summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig9
-rw-r--r--drivers/acpi/Makefile5
-rw-r--r--drivers/acpi/acpi_pad.c22
-rw-r--r--drivers/acpi/acpica/Makefile4
-rw-r--r--drivers/acpi/acpica/acevents.h51
-rw-r--r--drivers/acpi/acpica/acglobal.h25
-rw-r--r--drivers/acpi/acpica/acinterp.h9
-rw-r--r--drivers/acpi/acpica/aclocal.h19
-rw-r--r--drivers/acpi/acpica/actables.h4
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dsmthdat.c10
-rw-r--r--drivers/acpi/acpica/dsobject.c14
-rw-r--r--drivers/acpi/acpica/dsopcode.c13
-rw-r--r--drivers/acpi/acpica/dswexec.c6
-rw-r--r--drivers/acpi/acpica/dswstate.c10
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c167
-rw-r--r--drivers/acpi/acpica/evgpeblk.c766
-rw-r--r--drivers/acpi/acpica/evgpeinit.c653
-rw-r--r--drivers/acpi/acpica/evgpeutil.c337
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evxface.c24
-rw-r--r--drivers/acpi/acpica/evxfevnt.c224
-rw-r--r--drivers/acpi/acpica/exconfig.c21
-rw-r--r--drivers/acpi/acpica/exconvrt.c4
-rw-r--r--drivers/acpi/acpica/excreate.c4
-rw-r--r--drivers/acpi/acpica/exdebug.c261
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c16
-rw-r--r--drivers/acpi/acpica/exmisc.c8
-rw-r--r--drivers/acpi/acpica/exmutex.c46
-rw-r--r--drivers/acpi/acpica/exnames.c4
-rw-r--r--drivers/acpi/acpica/exoparg1.c18
-rw-r--r--drivers/acpi/acpica/exoparg2.c37
-rw-r--r--drivers/acpi/acpica/exoparg3.c4
-rw-r--r--drivers/acpi/acpica/exoparg6.c4
-rw-r--r--drivers/acpi/acpica/exprep.c4
-rw-r--r--drivers/acpi/acpica/exregion.c17
-rw-r--r--drivers/acpi/acpica/exresnte.c4
-rw-r--r--drivers/acpi/acpica/exresolv.c11
-rw-r--r--drivers/acpi/acpica/exresop.c8
-rw-r--r--drivers/acpi/acpica/exstore.c218
-rw-r--r--drivers/acpi/acpica/exsystem.c10
-rw-r--r--drivers/acpi/acpica/hwacpi.c20
-rw-r--r--drivers/acpi/acpica/hwregs.c6
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c2
-rw-r--r--drivers/acpi/acpica/nsaccess.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c4
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nssearch.c2
-rw-r--r--drivers/acpi/acpica/nsutils.c4
-rw-r--r--drivers/acpi/acpica/psargs.c4
-rw-r--r--drivers/acpi/acpica/psloop.c3
-rw-r--r--drivers/acpi/acpica/psxface.c5
-rw-r--r--drivers/acpi/acpica/rscreate.c14
-rw-r--r--drivers/acpi/acpica/rslist.c6
-rw-r--r--drivers/acpi/acpica/rsmisc.c4
-rw-r--r--drivers/acpi/acpica/tbfadt.c16
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c69
-rw-r--r--drivers/acpi/acpica/tbutils.c101
-rw-r--r--drivers/acpi/acpica/tbxface.c80
-rw-r--r--drivers/acpi/acpica/tbxfroot.c6
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c14
-rw-r--r--drivers/acpi/acpica/utdelete.c6
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c1
-rw-r--r--drivers/acpi/acpica/utmisc.c6
-rw-r--r--drivers/acpi/acpica/utmutex.c4
-rw-r--r--drivers/acpi/acpica/utobject.c8
-rw-r--r--drivers/acpi/apei/Kconfig30
-rw-r--r--drivers/acpi/apei/Makefile5
-rw-r--r--drivers/acpi/apei/apei-base.c593
-rw-r--r--drivers/acpi/apei/apei-internal.h114
-rw-r--r--drivers/acpi/apei/cper.c84
-rw-r--r--drivers/acpi/apei/einj.c548
-rw-r--r--drivers/acpi/apei/erst.c855
-rw-r--r--drivers/acpi/apei/ghes.c427
-rw-r--r--drivers/acpi/apei/hest.c173
-rw-r--r--drivers/acpi/atomicio.c360
-rw-r--r--drivers/acpi/bus.c53
-rw-r--r--drivers/acpi/ec.c3
-rw-r--r--drivers/acpi/hed.c112
-rw-r--r--drivers/acpi/hest.c139
-rw-r--r--drivers/acpi/osl.c13
-rw-r--r--drivers/acpi/pci_irq.c8
-rw-r--r--drivers/acpi/pci_root.c67
-rw-r--r--drivers/acpi/power.c1
-rw-r--r--drivers/acpi/processor_driver.c15
-rw-r--r--drivers/acpi/processor_idle.c58
-rw-r--r--drivers/acpi/scan.c2
-rw-r--r--drivers/acpi/sleep.c157
-rw-r--r--drivers/acpi/sleep.h2
-rw-r--r--drivers/acpi/system.c7
-rw-r--r--drivers/acpi/tables.c4
-rw-r--r--drivers/acpi/video.c118
-rw-r--r--drivers/acpi/video_detect.c2
-rw-r--r--drivers/ata/Kconfig521
-rw-r--r--drivers/ata/Makefile86
-rw-r--r--drivers/ata/ahci.c2544
-rw-r--r--drivers/ata/ahci.h343
-rw-r--r--drivers/ata/ahci_platform.c192
-rw-r--r--drivers/ata/ata_generic.c2
-rw-r--r--drivers/ata/ata_piix.c6
-rw-r--r--drivers/ata/libahci.c2216
-rw-r--r--drivers/ata/libata-core.c232
-rw-r--r--drivers/ata/libata-eh.c6
-rw-r--r--drivers/ata/libata-pmp.c32
-rw-r--r--drivers/ata/libata-scsi.c3
-rw-r--r--drivers/ata/libata-sff.c1799
-rw-r--r--drivers/ata/libata.h29
-rw-r--r--drivers/ata/pata_acpi.c10
-rw-r--r--drivers/ata/pata_ali.c7
-rw-r--r--drivers/ata/pata_amd.c2
-rw-r--r--drivers/ata/pata_artop.c2
-rw-r--r--drivers/ata/pata_at91.c1
-rw-r--r--drivers/ata/pata_atiixp.c6
-rw-r--r--drivers/ata/pata_atp867x.c2
-rw-r--r--drivers/ata/pata_bf54x.c89
-rw-r--r--drivers/ata/pata_cmd640.c13
-rw-r--r--drivers/ata/pata_cmd64x.c2
-rw-r--r--drivers/ata/pata_cs5520.c4
-rw-r--r--drivers/ata/pata_cs5530.c6
-rw-r--r--drivers/ata/pata_cs5535.c2
-rw-r--r--drivers/ata/pata_cs5536.c2
-rw-r--r--drivers/ata/pata_cypress.c2
-rw-r--r--drivers/ata/pata_efar.c4
-rw-r--r--drivers/ata/pata_hpt366.c4
-rw-r--r--drivers/ata/pata_hpt37x.c6
-rw-r--r--drivers/ata/pata_hpt3x2n.c4
-rw-r--r--drivers/ata/pata_hpt3x3.c2
-rw-r--r--drivers/ata/pata_icside.c7
-rw-r--r--drivers/ata/pata_it8213.c2
-rw-r--r--drivers/ata/pata_it821x.c8
-rw-r--r--drivers/ata/pata_jmicron.c2
-rw-r--r--drivers/ata/pata_macio.c9
-rw-r--r--drivers/ata/pata_marvell.c2
-rw-r--r--drivers/ata/pata_mpc52xx.c94
-rw-r--r--drivers/ata/pata_netcell.c2
-rw-r--r--drivers/ata/pata_ninja32.c2
-rw-r--r--drivers/ata/pata_ns87415.c4
-rw-r--r--drivers/ata/pata_octeon_cf.c30
-rw-r--r--drivers/ata/pata_of_platform.c9
-rw-r--r--drivers/ata/pata_oldpiix.c4
-rw-r--r--drivers/ata/pata_optidma.c2
-rw-r--r--drivers/ata/pata_pcmcia.c49
-rw-r--r--drivers/ata/pata_pdc2027x.c6
-rw-r--r--drivers/ata/pata_pdc202xx_old.c4
-rw-r--r--drivers/ata/pata_piccolo.c2
-rw-r--r--drivers/ata/pata_platform.c1
-rw-r--r--drivers/ata/pata_radisys.c4
-rw-r--r--drivers/ata/pata_rdc.c4
-rw-r--r--drivers/ata/pata_sc1200.c6
-rw-r--r--drivers/ata/pata_scc.c84
-rw-r--r--drivers/ata/pata_sch.c12
-rw-r--r--drivers/ata/pata_serverworks.c8
-rw-r--r--drivers/ata/pata_sil680.c34
-rw-r--r--drivers/ata/pata_sis.c2
-rw-r--r--drivers/ata/pata_sl82c105.c2
-rw-r--r--drivers/ata/pata_triflex.c2
-rw-r--r--drivers/ata/pata_via.c8
-rw-r--r--drivers/ata/pdc_adma.c74
-rw-r--r--drivers/ata/sata_fsl.c11
-rw-r--r--drivers/ata/sata_inic162x.c25
-rw-r--r--drivers/ata/sata_mv.c51
-rw-r--r--drivers/ata/sata_nv.c269
-rw-r--r--drivers/ata/sata_promise.c32
-rw-r--r--drivers/ata/sata_qstor.c104
-rw-r--r--drivers/ata/sata_sil.c13
-rw-r--r--drivers/ata/sata_sil24.c9
-rw-r--r--drivers/ata/sata_sis.c4
-rw-r--r--drivers/ata/sata_svw.c4
-rw-r--r--drivers/ata/sata_sx4.c10
-rw-r--r--drivers/ata/sata_uli.c6
-rw-r--r--drivers/ata/sata_via.c8
-rw-r--r--drivers/ata/sata_vsc.c12
-rw-r--r--drivers/atm/Kconfig1
-rw-r--r--drivers/atm/atmtcp.c6
-rw-r--r--drivers/atm/eni.c2
-rw-r--r--drivers/atm/fore200e.c23
-rw-r--r--drivers/atm/he.c4
-rw-r--r--drivers/auxdisplay/cfag12864bfb.c8
-rw-r--r--drivers/base/Kconfig7
-rw-r--r--drivers/base/class.c9
-rw-r--r--drivers/base/core.c130
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--drivers/base/dd.c4
-rw-r--r--drivers/base/devtmpfs.c5
-rw-r--r--drivers/base/firmware_class.c206
-rw-r--r--drivers/base/iommu.c43
-rw-r--r--drivers/base/module.c4
-rw-r--r--drivers/base/node.c3
-rw-r--r--drivers/base/platform.c38
-rw-r--r--drivers/base/power/runtime.c10
-rw-r--r--drivers/base/power/sysfs.c65
-rw-r--r--drivers/base/topology.c2
-rw-r--r--drivers/block/Kconfig22
-rw-r--r--drivers/block/amiflop.c47
-rw-r--r--drivers/block/cciss.c3
-rw-r--r--drivers/block/drbd/drbd_bitmap.c21
-rw-r--r--drivers/block/drbd/drbd_int.h151
-rw-r--r--drivers/block/drbd/drbd_main.c158
-rw-r--r--drivers/block/drbd/drbd_nl.c52
-rw-r--r--drivers/block/drbd/drbd_proc.c19
-rw-r--r--drivers/block/drbd/drbd_receiver.c666
-rw-r--r--drivers/block/drbd/drbd_req.c40
-rw-r--r--drivers/block/drbd/drbd_strings.c2
-rw-r--r--drivers/block/drbd/drbd_worker.c206
-rw-r--r--drivers/block/drbd/drbd_wrappers.h16
-rw-r--r--drivers/block/hd.c4
-rw-r--r--drivers/block/loop.c14
-rw-r--r--drivers/block/swim3.c2
-rw-r--r--drivers/block/virtio_blk.c46
-rw-r--r--drivers/block/xsysace.c13
-rw-r--r--drivers/bluetooth/bluecard_cs.c11
-rw-r--r--drivers/bluetooth/bt3c_cs.c11
-rw-r--r--drivers/bluetooth/btmrvl_drv.h8
-rw-r--r--drivers/bluetooth/btmrvl_main.c92
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c7
-rw-r--r--drivers/bluetooth/btuart_cs.c11
-rw-r--r--drivers/bluetooth/dtl1_cs.c11
-rw-r--r--drivers/bluetooth/hci_h4.c2
-rw-r--r--drivers/bluetooth/hci_ll.c8
-rw-r--r--drivers/bluetooth/hci_vhci.c2
-rw-r--r--drivers/cdrom/viocd.c2
-rw-r--r--drivers/char/Kconfig17
-rw-r--r--drivers/char/Makefile2
-rw-r--r--drivers/char/agp/agp.h80
-rw-r--r--drivers/char/agp/ali-agp.c1
-rw-r--r--drivers/char/agp/amd-k7-agp.c9
-rw-r--r--drivers/char/agp/amd64-agp.c56
-rw-r--r--drivers/char/agp/ati-agp.c8
-rw-r--r--drivers/char/agp/efficeon-agp.c1
-rw-r--r--drivers/char/agp/generic.c2
-rw-r--r--drivers/char/agp/intel-agp.c1883
-rw-r--r--drivers/char/agp/intel-agp.h239
-rw-r--r--drivers/char/agp/intel-gtt.c1516
-rw-r--r--drivers/char/agp/nvidia-agp.c1
-rw-r--r--drivers/char/agp/sis-agp.c9
-rw-r--r--drivers/char/agp/uninorth-agp.c16
-rw-r--r--drivers/char/agp/via-agp.c2
-rw-r--r--drivers/char/amiserial.c61
-rw-r--r--drivers/char/apm-emulation.c8
-rw-r--r--drivers/char/applicom.c22
-rw-r--r--drivers/char/bsr.c2
-rw-r--r--drivers/char/ds1620.c16
-rw-r--r--drivers/char/dtlk.c15
-rw-r--r--drivers/char/generic_nvram.c17
-rw-r--r--drivers/char/genrtc.c16
-rw-r--r--drivers/char/hangcheck-timer.c20
-rw-r--r--drivers/char/hpet.c14
-rw-r--r--drivers/char/hvsi.c6
-rw-r--r--drivers/char/hw_random/n2-drv.c9
-rw-r--r--drivers/char/hw_random/nomadik-rng.c17
-rw-r--r--drivers/char/hw_random/pasemi-rng.c9
-rw-r--r--drivers/char/hw_random/virtio-rng.c6
-rw-r--r--drivers/char/i8k.c21
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c26
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c15
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c479
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c17
-rw-r--r--drivers/char/isicom.c8
-rw-r--r--drivers/char/keyboard.c325
-rw-r--r--drivers/char/misc.c1
-rw-r--r--drivers/char/n_gsm.c2763
-rw-r--r--drivers/char/nvram.c10
-rw-r--r--drivers/char/nwflash.c7
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c9
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c5
-rw-r--r--drivers/char/pcmcia/ipwireless/main.c19
-rw-r--r--drivers/char/pcmcia/ipwireless/main.h1
-rw-r--r--drivers/char/pcmcia/ipwireless/tty.c19
-rw-r--r--drivers/char/pcmcia/ipwireless/tty.h3
-rw-r--r--drivers/char/pcmcia/synclink_cs.c22
-rw-r--r--drivers/char/ppdev.c4
-rw-r--r--drivers/char/ps3flash.c3
-rw-r--r--drivers/char/ramoops.c162
-rw-r--r--drivers/char/random.c10
-rw-r--r--drivers/char/raw.c42
-rw-r--r--drivers/char/serial167.c225
-rw-r--r--drivers/char/sysrq.c245
-rw-r--r--drivers/char/tpm/Kconfig6
-rw-r--r--drivers/char/tpm/tpm.c47
-rw-r--r--drivers/char/tpm/tpm_tis.c40
-rw-r--r--drivers/char/tty_buffer.c2
-rw-r--r--drivers/char/viotape.c2
-rw-r--r--drivers/char/virtio_console.c700
-rw-r--r--drivers/char/vt.c10
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c11
-rw-r--r--drivers/clocksource/cs5535-clockevt.c8
-rw-r--r--drivers/clocksource/sh_cmt.c45
-rw-r--r--drivers/clocksource/sh_mtu2.c37
-rw-r--r--drivers/clocksource/sh_tmu.c41
-rw-r--r--drivers/cpufreq/cpufreq.c40
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c48
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c115
-rw-r--r--drivers/cpuidle/cpuidle.c12
-rw-r--r--drivers/cpuidle/cpuidle.h1
-rw-r--r--drivers/cpuidle/driver.c16
-rw-r--r--drivers/cpuidle/governors/ladder.c2
-rw-r--r--drivers/cpuidle/governors/menu.c62
-rw-r--r--drivers/cpuidle/sysfs.c5
-rw-r--r--drivers/crypto/Kconfig21
-rw-r--r--drivers/crypto/Makefile4
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c7
-rw-r--r--drivers/crypto/geode-aes.c36
-rw-r--r--drivers/crypto/hifn_795x.c18
-rw-r--r--drivers/crypto/mv_cesa.c692
-rw-r--r--drivers/crypto/mv_cesa.h40
-rw-r--r--drivers/crypto/n2_asm.S95
-rw-r--r--drivers/crypto/n2_core.c2083
-rw-r--r--drivers/crypto/n2_core.h231
-rw-r--r--drivers/crypto/omap-sham.c1259
-rw-r--r--drivers/crypto/talitos.c708
-rw-r--r--drivers/crypto/talitos.h12
-rw-r--r--drivers/dma/Kconfig23
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/at_hdmac.c35
-rw-r--r--drivers/dma/coh901318.c263
-rw-r--r--drivers/dma/dmaengine.c22
-rw-r--r--drivers/dma/dw_dmac.c24
-rw-r--r--drivers/dma/fsldma.c45
-rw-r--r--drivers/dma/ioat/dma.c12
-rw-r--r--drivers/dma/ioat/dma.h19
-rw-r--r--drivers/dma/ioat/dma_v2.c186
-rw-r--r--drivers/dma/ioat/dma_v2.h33
-rw-r--r--drivers/dma/ioat/dma_v3.c143
-rw-r--r--drivers/dma/ioat/pci.c7
-rw-r--r--drivers/dma/iop-adma.c39
-rw-r--r--drivers/dma/ipu/ipu_idmac.c34
-rw-r--r--drivers/dma/mpc512x_dma.c15
-rw-r--r--drivers/dma/mv_xor.c25
-rw-r--r--drivers/dma/pl330.c866
-rw-r--r--drivers/dma/ppc4xx/adma.c21
-rw-r--r--drivers/dma/shdma.c57
-rw-r--r--drivers/dma/shdma.h4
-rw-r--r--drivers/dma/ste_dma40.c2657
-rw-r--r--drivers/dma/ste_dma40_ll.c454
-rw-r--r--drivers/dma/ste_dma40_ll.h354
-rw-r--r--drivers/dma/timb_dma.c860
-rw-r--r--drivers/dma/txx9dmac.c23
-rw-r--r--drivers/edac/amd76x_edac.c2
-rw-r--r--drivers/edac/i5000_edac.c20
-rw-r--r--drivers/edac/i5400_edac.c20
-rw-r--r--drivers/edac/i82443bxgx_edac.c24
-rw-r--r--drivers/edac/mpc85xx_edac.c30
-rw-r--r--drivers/edac/ppc4xx_edac.c10
-rw-r--r--drivers/edac/r82600_edac.c2
-rw-r--r--drivers/firewire/core-card.c22
-rw-r--r--drivers/firewire/core-cdev.c8
-rw-r--r--drivers/firewire/core-transaction.c96
-rw-r--r--drivers/firewire/core.h6
-rw-r--r--drivers/firewire/ohci.c190
-rw-r--r--drivers/firewire/ohci.h10
-rw-r--r--drivers/firmware/dcdbas.c4
-rw-r--r--drivers/firmware/dell_rbu.c10
-rw-r--r--drivers/firmware/efivars.c4
-rw-r--r--drivers/gpio/Kconfig37
-rw-r--r--drivers/gpio/Makefile5
-rw-r--r--drivers/gpio/cs5535-gpio.c2
-rw-r--r--drivers/gpio/gpiolib.c51
-rw-r--r--drivers/gpio/it8761e_gpio.c5
-rw-r--r--drivers/gpio/janz-ttl.c258
-rw-r--r--drivers/gpio/langwell_gpio.c83
-rw-r--r--drivers/gpio/max732x.c368
-rw-r--r--drivers/gpio/pca953x.c4
-rw-r--r--drivers/gpio/pl061.c2
-rw-r--r--drivers/gpio/rdc321x-gpio.c246
-rw-r--r--drivers/gpio/tc35892-gpio.c381
-rw-r--r--drivers/gpio/wm8994-gpio.c12
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/drm_auth.c3
-rw-r--r--drivers/gpu/drm/drm_bufs.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c9
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c490
-rw-r--r--drivers/gpu/drm/drm_dma.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c807
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c908
-rw-r--r--drivers/gpu/drm/drm_fops.c3
-rw-r--r--drivers/gpu/drm/drm_gem.c49
-rw-r--r--drivers/gpu/drm/drm_modes.c105
-rw-r--r--drivers/gpu/drm/drm_sysfs.c7
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/dvo.h10
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c46
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c44
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c21
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c38
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c32
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c28
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c15
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c29
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h38
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c154
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c5
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c23
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h143
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c41
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h88
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c1
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c93
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1068
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c256
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h31
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c103
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c217
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c71
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c111
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c21
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c8
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c1009
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c185
-rw-r--r--drivers/gpu/drm/nouveau/Makefile3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c500
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c116
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c259
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c58
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c18
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c566
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv40_grctx.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_calc.c87
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c46
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c36
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c15
-rw-r--r--drivers/gpu/drm/radeon/atombios.h80
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c23
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c1549
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h4
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h556
-rw-r--r--drivers/gpu/drm/radeon/r100.c729
-rw-r--r--drivers/gpu/drm/radeon/r100d.h164
-rw-r--r--drivers/gpu/drm/radeon/r300.c151
-rw-r--r--drivers/gpu/drm/radeon/r300d.h47
-rw-r--r--drivers/gpu/drm/radeon/r420.c36
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h3
-rw-r--r--drivers/gpu/drm/radeon/r520.c7
-rw-r--r--drivers/gpu/drm/radeon/r600.c600
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c58
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c65
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h57
-rw-r--r--drivers/gpu/drm/radeon/radeon.h259
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c140
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h44
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c255
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c64
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c63
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c63
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c129
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c358
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c107
-rw-r--r--drivers/gpu/drm/radeon/radeon_fixed.h67
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h49
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c814
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c66
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c122
-rw-r--r--drivers/gpu/drm/radeon/rs400.c9
-rw-r--r--drivers/gpu/drm/radeon/rs600.c231
-rw-r--r--drivers/gpu/drm/radeon/rs600d.h80
-rw-r--r--drivers/gpu/drm/radeon/rs690.c289
-rw-r--r--drivers/gpu/drm/radeon/rv515.c287
-rw-r--r--drivers/gpu/drm/radeon/rv515d.h46
-rw-r--r--drivers/gpu/drm/radeon/rv770.c28
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c3
-rw-r--r--drivers/gpu/drm/ttm/Makefile2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c98
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c122
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c41
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c845
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c44
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c50
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/vga/Kconfig6
-rw-r--r--drivers/hid/Kconfig159
-rw-r--r--drivers/hid/Makefile7
-rw-r--r--drivers/hid/hid-3m-pct.c31
-rw-r--r--drivers/hid/hid-cando.c272
-rw-r--r--drivers/hid/hid-core.c54
-rw-r--r--drivers/hid/hid-debug.c2
-rw-r--r--drivers/hid/hid-egalax.c281
-rw-r--r--drivers/hid/hid-gyration.c1
-rw-r--r--drivers/hid/hid-ids.h26
-rw-r--r--drivers/hid/hid-lg.c9
-rw-r--r--drivers/hid/hid-magicmouse.c5
-rw-r--r--drivers/hid/hid-ntrig.c526
-rw-r--r--drivers/hid/hid-picolcd.c2631
-rw-r--r--drivers/hid/hid-prodikeys.c910
-rw-r--r--drivers/hid/hid-roccat-kone.c1043
-rw-r--r--drivers/hid/hid-roccat-kone.h233
-rw-r--r--drivers/hid/hid-roccat.c428
-rw-r--r--drivers/hid/hid-roccat.h31
-rw-r--r--drivers/hid/hid-samsung.c95
-rw-r--r--drivers/hid/hid-topseed.c38
-rw-r--r--drivers/hid/hid-wacom.c229
-rw-r--r--drivers/hid/hid-zydacron.c237
-rw-r--r--drivers/hid/hidraw.c50
-rw-r--r--drivers/hid/usbhid/hid-core.c93
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/usbhid/hiddev.c19
-rw-r--r--drivers/hid/usbhid/usbhid.h1
-rw-r--r--drivers/hid/usbhid/usbkbd.c17
-rw-r--r--drivers/hid/usbhid/usbmouse.c6
-rw-r--r--drivers/hwmon/Kconfig43
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/adm1031.c68
-rw-r--r--drivers/hwmon/ads7871.c253
-rw-r--r--drivers/hwmon/applesmc.c186
-rw-r--r--drivers/hwmon/asus_atk0110.c7
-rw-r--r--drivers/hwmon/coretemp.c93
-rw-r--r--drivers/hwmon/dme1737.c328
-rw-r--r--drivers/hwmon/emc1403.c344
-rw-r--r--drivers/hwmon/f71882fg.c170
-rw-r--r--drivers/hwmon/fschmd.c9
-rw-r--r--drivers/hwmon/lis3lv02d.c245
-rw-r--r--drivers/hwmon/lis3lv02d.h11
-rw-r--r--drivers/hwmon/lm63.c16
-rw-r--r--drivers/hwmon/lm75.c2
-rw-r--r--drivers/hwmon/lm90.c3
-rw-r--r--drivers/hwmon/ltc4245.c18
-rw-r--r--drivers/hwmon/tmp102.c321
-rw-r--r--drivers/hwmon/tmp401.c255
-rw-r--r--drivers/hwmon/ultra45_env.c7
-rw-r--r--drivers/hwmon/w83793.c10
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c36
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c2
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c2
-rw-r--r--drivers/i2c/busses/i2c-amd756.c2
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c2
-rw-r--r--drivers/i2c/busses/i2c-at91.c3
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c168
-rw-r--r--drivers/i2c/busses/i2c-cpm.c32
-rw-r--r--drivers/i2c/busses/i2c-elektor.c2
-rw-r--r--drivers/i2c/busses/i2c-gpio.c2
-rw-r--r--drivers/i2c/busses/i2c-highlander.c5
-rw-r--r--drivers/i2c/busses/i2c-hydra.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c59
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c27
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c3
-rw-r--r--drivers/i2c/busses/i2c-mpc.c31
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c3
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c12
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c6
-rw-r--r--drivers/i2c/busses/i2c-ocores.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c265
-rw-r--r--drivers/i2c/busses/i2c-parport-light.c2
-rw-r--r--drivers/i2c/busses/i2c-parport.c2
-rw-r--r--drivers/i2c/busses/i2c-pasemi.c2
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c2
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c2
-rw-r--r--drivers/i2c/busses/i2c-piix4.c2
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa.c28
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c21
-rw-r--r--drivers/i2c/busses/i2c-s6000.c2
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c2
-rw-r--r--drivers/i2c/busses/i2c-sibyte.c2
-rw-r--r--drivers/i2c/busses/i2c-simtec.c3
-rw-r--r--drivers/i2c/busses/i2c-sis5595.c2
-rw-r--r--drivers/i2c/busses/i2c-sis630.c2
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c2
-rw-r--r--drivers/i2c/busses/i2c-stub.c9
-rw-r--r--drivers/i2c/busses/i2c-versatile.c3
-rw-r--r--drivers/i2c/busses/i2c-via.c2
-rw-r--r--drivers/i2c/busses/i2c-viapro.c2
-rw-r--r--drivers/i2c/busses/scx200_acb.c4
-rw-r--r--drivers/i2c/busses/scx200_i2c.c2
-rw-r--r--drivers/i2c/i2c-core.c259
-rw-r--r--drivers/i2c/i2c-dev.c36
-rw-r--r--drivers/ide/cmd640.c6
-rw-r--r--drivers/ide/gayle.c147
-rw-r--r--drivers/ide/ide-cs.c20
-rw-r--r--drivers/ide/ide-disk.c40
-rw-r--r--drivers/ide/ide-gd.c11
-rw-r--r--drivers/ide/ide_platform.c1
-rw-r--r--drivers/ide/pdc202xx_old.c5
-rw-r--r--drivers/ide/pmac.c10
-rw-r--r--drivers/idle/Kconfig11
-rw-r--r--drivers/idle/Makefile1
-rwxr-xr-xdrivers/idle/intel_idle.c461
-rw-r--r--drivers/ieee1394/dv1394.c11
-rw-r--r--drivers/ieee1394/raw1394.c3
-rw-r--r--drivers/ieee1394/video1394.c5
-rw-r--r--drivers/infiniband/Kconfig2
-rw-r--r--drivers/infiniband/Makefile2
-rw-r--r--drivers/infiniband/core/cma.c74
-rw-r--r--drivers/infiniband/core/core_priv.h4
-rw-r--r--drivers/infiniband/core/device.c6
-rw-r--r--drivers/infiniband/core/mad.c8
-rw-r--r--drivers/infiniband/core/sysfs.c21
-rw-r--r--drivers/infiniband/core/ucm.c14
-rw-r--r--drivers/infiniband/core/ucma.c4
-rw-r--r--drivers/infiniband/core/user_mad.c12
-rw-r--r--drivers/infiniband/core/uverbs_main.c11
-rw-r--r--drivers/infiniband/hw/amso1100/c2.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_alloc.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_mq.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c133
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/Kconfig18
-rw-r--r--drivers/infiniband/hw/cxgb4/Makefile5
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c2374
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c886
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c544
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c193
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h746
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c812
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c520
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c1576
-rw-r--r--drivers/infiniband/hw/cxgb4/resource.c417
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h548
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h829
-rw-r--r--drivers/infiniband/hw/cxgb4/user.h66
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c20
-rw-r--r--drivers/infiniband/hw/ipath/Kconfig8
-rw-r--r--drivers/infiniband/hw/ipath/Makefile6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c28
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6110.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6120.c1862
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba7220.c2631
-rw-r--r--drivers/infiniband/hw/ipath/ipath_kernel.h2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c8
-rw-r--r--drivers/infiniband/hw/mlx4/main.c3
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c50
-rw-r--r--drivers/infiniband/hw/mthca/mthca_allocator.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c22
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c95
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c10
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c4
-rw-r--r--drivers/infiniband/hw/qib/Kconfig7
-rw-r--r--drivers/infiniband/hw/qib/Makefile15
-rw-r--r--drivers/infiniband/hw/qib/qib.h1439
-rw-r--r--drivers/infiniband/hw/qib/qib_6120_regs.h977
-rw-r--r--drivers/infiniband/hw/qib/qib_7220.h156
-rw-r--r--drivers/infiniband/hw/qib/qib_7220_regs.h1496
-rw-r--r--drivers/infiniband/hw/qib/qib_7322_regs.h3163
-rw-r--r--drivers/infiniband/hw/qib/qib_common.h758
-rw-r--r--drivers/infiniband/hw/qib/qib_cq.c484
-rw-r--r--drivers/infiniband/hw/qib/qib_diag.c894
-rw-r--r--drivers/infiniband/hw/qib/qib_dma.c182
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c665
-rw-r--r--drivers/infiniband/hw/qib/qib_eeprom.c451
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c2317
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c618
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c3576
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c4618
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c7645
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c1586
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c236
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c328
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c2173
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.h373
-rw-r--r--drivers/infiniband/hw/qib/qib_mmap.c174
-rw-r--r--drivers/infiniband/hw/qib/qib_mr.c503
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c738
-rw-r--r--drivers/infiniband/hw/qib/qib_pio_copy.c64
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c1255
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c564
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.h184
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c2288
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c817
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c (renamed from drivers/infiniband/hw/ipath/ipath_sd7220.c)859
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220_img.c (renamed from drivers/infiniband/hw/ipath/ipath_sd7220_img.c)19
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c973
-rw-r--r--drivers/infiniband/hw/qib/qib_srq.c375
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c691
-rw-r--r--drivers/infiniband/hw/qib/qib_twsi.c498
-rw-r--r--drivers/infiniband/hw/qib/qib_tx.c557
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c555
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c607
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c157
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c897
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.h52
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c2248
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h1100
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs_mcast.c368
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_ppc64.c (renamed from drivers/infiniband/hw/ipath/ipath_7220.h)49
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_x86_64.c171
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c15
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c9
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h4
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c115
-rw-r--r--drivers/input/gameport/gameport.c4
-rw-r--r--drivers/input/joydev.c10
-rw-r--r--drivers/input/joystick/analog.c4
-rw-r--r--drivers/input/joystick/xpad.c16
-rw-r--r--drivers/input/keyboard/Kconfig66
-rw-r--r--drivers/input/keyboard/Makefile4
-rw-r--r--drivers/input/keyboard/amikbd.c97
-rw-r--r--drivers/input/keyboard/corgikbd.c414
-rw-r--r--drivers/input/keyboard/lm8323.c6
-rw-r--r--drivers/input/keyboard/spitzkbd.c496
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c349
-rw-r--r--drivers/input/keyboard/tosakbd.c431
-rw-r--r--drivers/input/misc/88pm860x_onkey.c1
-rw-r--r--drivers/input/misc/Kconfig50
-rw-r--r--drivers/input/misc/Makefile5
-rw-r--r--drivers/input/misc/ad714x-i2c.c140
-rw-r--r--drivers/input/misc/ad714x-spi.c103
-rw-r--r--drivers/input/misc/ad714x.c1347
-rw-r--r--drivers/input/misc/ad714x.h26
-rw-r--r--drivers/input/misc/ati_remote.c12
-rw-r--r--drivers/input/misc/ati_remote2.c4
-rw-r--r--drivers/input/misc/cm109.c28
-rw-r--r--drivers/input/misc/hp_sdc_rtc.c34
-rw-r--r--drivers/input/misc/keyspan_remote.c6
-rw-r--r--drivers/input/misc/max8925_onkey.c148
-rw-r--r--drivers/input/misc/pcf8574_keypad.c227
-rw-r--r--drivers/input/misc/pcspkr.c6
-rw-r--r--drivers/input/misc/powermate.c17
-rw-r--r--drivers/input/misc/sparcspkr.c14
-rw-r--r--drivers/input/misc/twl4030-vibra.c2
-rw-r--r--drivers/input/misc/uinput.c4
-rw-r--r--drivers/input/misc/wistron_btns.c4
-rw-r--r--drivers/input/misc/yealink.c25
-rw-r--r--drivers/input/mouse/Kconfig2
-rw-r--r--drivers/input/mouse/amimouse.c98
-rw-r--r--drivers/input/mouse/appletouch.c12
-rw-r--r--drivers/input/mouse/bcm5974.c24
-rw-r--r--drivers/input/mouse/elantech.c87
-rw-r--r--drivers/input/mouse/hgpk.c4
-rw-r--r--drivers/input/mouse/logips2pp.c98
-rw-r--r--drivers/input/mouse/psmouse-base.c72
-rw-r--r--drivers/input/mouse/synaptics.c53
-rw-r--r--drivers/input/mouse/synaptics.h6
-rw-r--r--drivers/input/serio/Kconfig16
-rw-r--r--drivers/input/serio/Makefile1
-rw-r--r--drivers/input/serio/ams_delta_serio.c177
-rw-r--r--drivers/input/serio/i8042-sparcio.h9
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h14
-rw-r--r--drivers/input/serio/xilinx_ps2.c15
-rw-r--r--drivers/input/tablet/acecad.c90
-rw-r--r--drivers/input/tablet/aiptek.c14
-rw-r--r--drivers/input/tablet/gtco.c12
-rw-r--r--drivers/input/tablet/kbtab.c55
-rw-r--r--drivers/input/tablet/wacom.h36
-rw-r--r--drivers/input/tablet/wacom_sys.c324
-rw-r--r--drivers/input/tablet/wacom_wac.c1168
-rw-r--r--drivers/input/tablet/wacom_wac.h13
-rw-r--r--drivers/input/touchscreen/Kconfig45
-rw-r--r--drivers/input/touchscreen/Makefile3
-rw-r--r--drivers/input/touchscreen/ads7846.c4
-rw-r--r--drivers/input/touchscreen/corgi_ts.c385
-rw-r--r--drivers/input/touchscreen/hampshire.c205
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c40
-rw-r--r--drivers/input/touchscreen/tps6507x-ts.c400
-rw-r--r--drivers/input/touchscreen/tsc2007.c2
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c20
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c2
-rw-r--r--drivers/isdn/capi/capi.c17
-rw-r--r--drivers/isdn/capi/kcapi.c6
-rw-r--r--drivers/isdn/divert/divert_procfs.c18
-rw-r--r--drivers/isdn/gigaset/capi.c41
-rw-r--r--drivers/isdn/hardware/avm/avm_cs.c76
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c2
-rw-r--r--drivers/isdn/hisax/avma1_cs.c63
-rw-r--r--drivers/isdn/hisax/elsa_cs.c40
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.c2
-rw-r--r--drivers/isdn/hisax/hisax_fcpcipnp.c3
-rw-r--r--drivers/isdn/hisax/sedlbauer_cs.c60
-rw-r--r--drivers/isdn/hisax/teles_cs.c50
-rw-r--r--drivers/isdn/i4l/isdn_common.c18
-rw-r--r--drivers/isdn/i4l/isdn_x25iface.c17
-rw-r--r--drivers/isdn/mISDN/timerdev.c12
-rw-r--r--drivers/leds/Kconfig19
-rw-r--r--drivers/leds/Makefile2
-rw-r--r--drivers/leds/led-class.c2
-rw-r--r--drivers/leds/leds-88pm860x.c11
-rw-r--r--drivers/leds/leds-gpio.c34
-rw-r--r--drivers/leds/leds-lp3944.c9
-rw-r--r--drivers/leds/leds-mc13783.c403
-rw-r--r--drivers/leds/leds-net5501.c94
-rw-r--r--drivers/leds/leds-ss4200.c2
-rw-r--r--drivers/macintosh/macio-adb.c1
-rw-r--r--drivers/macintosh/macio_asic.c28
-rw-r--r--drivers/macintosh/macio_sysfs.c6
-rw-r--r--drivers/macintosh/mediabay.c2
-rw-r--r--drivers/macintosh/nvram.c2
-rw-r--r--drivers/macintosh/rack-meter.c4
-rw-r--r--drivers/macintosh/smu.c11
-rw-r--r--drivers/macintosh/therm_adt746x.c2
-rw-r--r--drivers/macintosh/therm_pm72.c9
-rw-r--r--drivers/macintosh/therm_windtunnel.c7
-rw-r--r--drivers/macintosh/via-pmu.c17
-rw-r--r--drivers/macintosh/windfarm_pm81.c8
-rw-r--r--drivers/macintosh/windfarm_pm91.c9
-rw-r--r--drivers/md/Kconfig4
-rw-r--r--drivers/md/bitmap.c47
-rw-r--r--drivers/md/bitmap.h2
-rw-r--r--drivers/md/faulty.c9
-rw-r--r--drivers/md/linear.c36
-rw-r--r--drivers/md/md.c543
-rw-r--r--drivers/md/md.h16
-rw-r--r--drivers/md/multipath.c13
-rw-r--r--drivers/md/raid0.c251
-rw-r--r--drivers/md/raid0.h3
-rw-r--r--drivers/md/raid1.c114
-rw-r--r--drivers/md/raid10.c300
-rw-r--r--drivers/md/raid10.h12
-rw-r--r--drivers/md/raid5.c233
-rw-r--r--drivers/media/IR/Kconfig59
-rw-r--r--drivers/media/IR/Makefile14
-rw-r--r--drivers/media/IR/imon.c2396
-rw-r--r--drivers/media/IR/ir-core-priv.h126
-rw-r--r--drivers/media/IR/ir-functions.c1
-rw-r--r--drivers/media/IR/ir-jvc-decoder.c320
-rw-r--r--drivers/media/IR/ir-keymaps.c3494
-rw-r--r--drivers/media/IR/ir-keytable.c687
-rw-r--r--drivers/media/IR/ir-nec-decoder.c328
-rw-r--r--drivers/media/IR/ir-raw-event.c251
-rw-r--r--drivers/media/IR/ir-rc5-decoder.c324
-rw-r--r--drivers/media/IR/ir-rc6-decoder.c419
-rw-r--r--drivers/media/IR/ir-sony-decoder.c312
-rw-r--r--drivers/media/IR/ir-sysfs.c202
-rw-r--r--drivers/media/IR/keymaps/Kconfig15
-rw-r--r--drivers/media/IR/keymaps/Makefile67
-rw-r--r--drivers/media/IR/keymaps/rc-adstech-dvb-t-pci.c89
-rw-r--r--drivers/media/IR/keymaps/rc-apac-viewcomp.c80
-rw-r--r--drivers/media/IR/keymaps/rc-asus-pc39.c91
-rw-r--r--drivers/media/IR/keymaps/rc-ati-tv-wonder-hd-600.c69
-rw-r--r--drivers/media/IR/keymaps/rc-avermedia-a16d.c75
-rw-r--r--drivers/media/IR/keymaps/rc-avermedia-cardbus.c97
-rw-r--r--drivers/media/IR/keymaps/rc-avermedia-dvbt.c78
-rw-r--r--drivers/media/IR/keymaps/rc-avermedia-m135a-rm-jx.c90
-rw-r--r--drivers/media/IR/keymaps/rc-avermedia.c86
-rw-r--r--drivers/media/IR/keymaps/rc-avertv-303.c85
-rw-r--r--drivers/media/IR/keymaps/rc-behold-columbus.c108
-rw-r--r--drivers/media/IR/keymaps/rc-behold.c141
-rw-r--r--drivers/media/IR/keymaps/rc-budget-ci-old.c92
-rw-r--r--drivers/media/IR/keymaps/rc-cinergy-1400.c84
-rw-r--r--drivers/media/IR/keymaps/rc-cinergy.c78
-rw-r--r--drivers/media/IR/keymaps/rc-dm1105-nec.c76
-rw-r--r--drivers/media/IR/keymaps/rc-dntv-live-dvb-t.c78
-rw-r--r--drivers/media/IR/keymaps/rc-dntv-live-dvbt-pro.c97
-rw-r--r--drivers/media/IR/keymaps/rc-em-terratec.c69
-rw-r--r--drivers/media/IR/keymaps/rc-empty.c44
-rw-r--r--drivers/media/IR/keymaps/rc-encore-enltv-fm53.c81
-rw-r--r--drivers/media/IR/keymaps/rc-encore-enltv.c112
-rw-r--r--drivers/media/IR/keymaps/rc-encore-enltv2.c90
-rw-r--r--drivers/media/IR/keymaps/rc-evga-indtube.c61
-rw-r--r--drivers/media/IR/keymaps/rc-eztv.c96
-rw-r--r--drivers/media/IR/keymaps/rc-flydvb.c77
-rw-r--r--drivers/media/IR/keymaps/rc-flyvideo.c70
-rw-r--r--drivers/media/IR/keymaps/rc-fusionhdtv-mce.c98
-rw-r--r--drivers/media/IR/keymaps/rc-gadmei-rm008z.c81
-rw-r--r--drivers/media/IR/keymaps/rc-genius-tvgo-a11mce.c84
-rw-r--r--drivers/media/IR/keymaps/rc-gotview7135.c79
-rw-r--r--drivers/media/IR/keymaps/rc-hauppauge-new.c100
-rw-r--r--drivers/media/IR/keymaps/rc-imon-mce.c142
-rw-r--r--drivers/media/IR/keymaps/rc-imon-pad.c156
-rw-r--r--drivers/media/IR/keymaps/rc-iodata-bctv7e.c88
-rw-r--r--drivers/media/IR/keymaps/rc-kaiomy.c87
-rw-r--r--drivers/media/IR/keymaps/rc-kworld-315u.c83
-rw-r--r--drivers/media/IR/keymaps/rc-kworld-plus-tv-analog.c99
-rw-r--r--drivers/media/IR/keymaps/rc-manli.c135
-rw-r--r--drivers/media/IR/keymaps/rc-msi-tvanywhere-plus.c123
-rw-r--r--drivers/media/IR/keymaps/rc-msi-tvanywhere.c69
-rw-r--r--drivers/media/IR/keymaps/rc-nebula.c96
-rw-r--r--drivers/media/IR/keymaps/rc-nec-terratec-cinergy-xs.c105
-rw-r--r--drivers/media/IR/keymaps/rc-norwood.c85
-rw-r--r--drivers/media/IR/keymaps/rc-npgtech.c80
-rw-r--r--drivers/media/IR/keymaps/rc-pctv-sedna.c80
-rw-r--r--drivers/media/IR/keymaps/rc-pinnacle-color.c94
-rw-r--r--drivers/media/IR/keymaps/rc-pinnacle-grey.c89
-rw-r--r--drivers/media/IR/keymaps/rc-pinnacle-pctv-hd.c73
-rw-r--r--drivers/media/IR/keymaps/rc-pixelview-mk12.c83
-rw-r--r--drivers/media/IR/keymaps/rc-pixelview-new.c83
-rw-r--r--drivers/media/IR/keymaps/rc-pixelview.c82
-rw-r--r--drivers/media/IR/keymaps/rc-powercolor-real-angel.c81
-rw-r--r--drivers/media/IR/keymaps/rc-proteus-2309.c69
-rw-r--r--drivers/media/IR/keymaps/rc-purpletv.c81
-rw-r--r--drivers/media/IR/keymaps/rc-pv951.c78
-rw-r--r--drivers/media/IR/keymaps/rc-rc5-hauppauge-new.c103
-rw-r--r--drivers/media/IR/keymaps/rc-rc5-tv.c81
-rw-r--r--drivers/media/IR/keymaps/rc-real-audio-220-32-keys.c78
-rw-r--r--drivers/media/IR/keymaps/rc-tbs-nec.c73
-rw-r--r--drivers/media/IR/keymaps/rc-terratec-cinergy-xs.c92
-rw-r--r--drivers/media/IR/keymaps/rc-tevii-nec.c88
-rw-r--r--drivers/media/IR/keymaps/rc-tt-1500.c82
-rw-r--r--drivers/media/IR/keymaps/rc-videomate-s350.c85
-rw-r--r--drivers/media/IR/keymaps/rc-videomate-tv-pvr.c87
-rw-r--r--drivers/media/IR/keymaps/rc-winfast-usbii-deluxe.c82
-rw-r--r--drivers/media/IR/keymaps/rc-winfast.c102
-rw-r--r--drivers/media/IR/rc-map.c84
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.c25
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.h2
-rw-r--r--drivers/media/dvb/bt8xx/dst.c2
-rw-r--r--drivers/media/dvb/dm1105/dm1105.c25
-rw-r--r--drivers/media/dvb/dvb-core/dmxdev.c31
-rw-r--r--drivers/media/dvb/dvb-core/dvb_ca_en50221.c17
-rw-r--r--drivers/media/dvb/dvb-core/dvb_demux.c11
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c55
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.c29
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.c17
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.h11
-rw-r--r--drivers/media/dvb/dvb-usb/a800.c6
-rw-r--r--drivers/media/dvb/dvb-usb/af9005-remote.c16
-rw-r--r--drivers/media/dvb/dvb-usb/af9005.c8
-rw-r--r--drivers/media/dvb/dvb-usb/af9005.h4
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.c43
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.h18
-rw-r--r--drivers/media/dvb/dvb-usb/anysee.c6
-rw-r--r--drivers/media/dvb/dvb-usb/az6027.c114
-rw-r--r--drivers/media/dvb/dvb-usb/cinergyT2-core.c6
-rw-r--r--drivers/media/dvb/dvb-usb/cxusb.c51
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c100
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-common.c4
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-mb.c8
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-mc.c2
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb.h2
-rw-r--r--drivers/media/dvb/dvb-usb/digitv.c6
-rw-r--r--drivers/media/dvb/dvb-usb/dtt200u.c18
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h9
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-urb.c2
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb.h7
-rw-r--r--drivers/media/dvb/dvb-usb/dw2102.c44
-rw-r--r--drivers/media/dvb/dvb-usb/friio-fe.c2
-rw-r--r--drivers/media/dvb/dvb-usb/gp8psk.c4
-rw-r--r--drivers/media/dvb/dvb-usb/m920x.c18
-rw-r--r--drivers/media/dvb/dvb-usb/nova-t-usb2.c18
-rw-r--r--drivers/media/dvb/dvb-usb/opera1.c16
-rw-r--r--drivers/media/dvb/dvb-usb/usb-urb.c7
-rw-r--r--drivers/media/dvb/dvb-usb/vp702x.c12
-rw-r--r--drivers/media/dvb/dvb-usb/vp7045.c12
-rw-r--r--drivers/media/dvb/firewire/firedtv-avc.c2
-rw-r--r--drivers/media/dvb/firewire/firedtv-ci.c5
-rw-r--r--drivers/media/dvb/frontends/atbm8830_priv.h2
-rw-r--r--drivers/media/dvb/frontends/au8522_decoder.c7
-rw-r--r--drivers/media/dvb/frontends/au8522_dig.c26
-rw-r--r--drivers/media/dvb/frontends/au8522_priv.h5
-rw-r--r--drivers/media/dvb/frontends/dib3000mc.c35
-rw-r--r--drivers/media/dvb/frontends/dib7000p.c36
-rw-r--r--drivers/media/dvb/frontends/dib8000.h1
-rw-r--r--drivers/media/dvb/frontends/ds3000.c2
-rw-r--r--drivers/media/dvb/frontends/stv0900_core.c16
-rw-r--r--drivers/media/dvb/frontends/stv090x.c94
-rw-r--r--drivers/media/dvb/frontends/stv090x.h1
-rw-r--r--drivers/media/dvb/frontends/stv6110x.c34
-rw-r--r--drivers/media/dvb/frontends/stv6110x.h1
-rw-r--r--drivers/media/dvb/mantis/mantis_input.c4
-rw-r--r--drivers/media/dvb/mantis/mantis_vp1041.c10
-rw-r--r--drivers/media/dvb/ngene/Kconfig2
-rw-r--r--drivers/media/dvb/ngene/Makefile4
-rw-r--r--drivers/media/dvb/ngene/ngene-cards.c328
-rw-r--r--drivers/media/dvb/ngene/ngene-core.c518
-rw-r--r--drivers/media/dvb/ngene/ngene-dvb.c172
-rw-r--r--drivers/media/dvb/ngene/ngene-i2c.c179
-rw-r--r--drivers/media/dvb/ngene/ngene.h30
-rw-r--r--drivers/media/dvb/pt1/pt1.c271
-rw-r--r--drivers/media/dvb/pt1/va1j5jf8007s.c32
-rw-r--r--drivers/media/dvb/pt1/va1j5jf8007s.h8
-rw-r--r--drivers/media/dvb/pt1/va1j5jf8007t.c31
-rw-r--r--drivers/media/dvb/pt1/va1j5jf8007t.h8
-rw-r--r--drivers/media/dvb/ttpci/av7110.c4
-rw-r--r--drivers/media/dvb/ttpci/av7110_av.c8
-rw-r--r--drivers/media/dvb/ttpci/av7110_ca.c5
-rw-r--r--drivers/media/dvb/ttpci/budget-ci.c52
-rw-r--r--drivers/media/dvb/ttpci/budget.c47
-rw-r--r--drivers/media/dvb/ttusb-dec/ttusb_dec.c6
-rw-r--r--drivers/media/radio/radio-mr800.c19
-rw-r--r--drivers/media/video/Kconfig52
-rw-r--r--drivers/media/video/Makefile14
-rw-r--r--drivers/media/video/ak881x.c368
-rw-r--r--drivers/media/video/arv.c524
-rw-r--r--drivers/media/video/au0828/au0828-video.c6
-rw-r--r--drivers/media/video/bt8xx/bttv-cards.c4
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c27
-rw-r--r--drivers/media/video/bt8xx/bttv-input.c30
-rw-r--r--drivers/media/video/bw-qcam.c452
-rw-r--r--drivers/media/video/c-qcam.c483
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c4
-rw-r--r--drivers/media/video/cx18/cx18-av-core.c54
-rw-r--r--drivers/media/video/cx18/cx18-av-core.h24
-rw-r--r--drivers/media/video/cx18/cx18-av-vbi.c42
-rw-r--r--drivers/media/video/cx18/cx18-cards.c4
-rw-r--r--drivers/media/video/cx18/cx18-cards.h4
-rw-r--r--drivers/media/video/cx18/cx18-controls.c2
-rw-r--r--drivers/media/video/cx18/cx18-fileops.c2
-rw-r--r--drivers/media/video/cx18/cx18-i2c.c2
-rw-r--r--drivers/media/video/cx18/cx18-ioctl.c22
-rw-r--r--drivers/media/video/cx18/cx18-streams.c5
-rw-r--r--drivers/media/video/cx18/cx18-vbi.c2
-rw-r--r--drivers/media/video/cx231xx/cx231xx-audio.c2
-rw-r--r--drivers/media/video/cx231xx/cx231xx-core.c27
-rw-r--r--drivers/media/video/cx231xx/cx231xx-input.c52
-rw-r--r--drivers/media/video/cx231xx/cx231xx-video.c9
-rw-r--r--drivers/media/video/cx231xx/cx231xx.h2
-rw-r--r--drivers/media/video/cx2341x.c14
-rw-r--r--drivers/media/video/cx23885/cimax2.c12
-rw-r--r--drivers/media/video/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c16
-rw-r--r--drivers/media/video/cx23885/cx23885-input.c8
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c4
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c17
-rw-r--r--drivers/media/video/cx25840/cx25840-core.h5
-rw-r--r--drivers/media/video/cx25840/cx25840-vbi.c40
-rw-r--r--drivers/media/video/cx88/cx88-core.c3
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c2
-rw-r--r--drivers/media/video/cx88/cx88-input.c149
-rw-r--r--drivers/media/video/cx88/cx88-mpeg.c11
-rw-r--r--drivers/media/video/cx88/cx88-video.c20
-rw-r--r--drivers/media/video/cx88/cx88.h8
-rw-r--r--drivers/media/video/davinci/dm644x_ccdc.c131
-rw-r--r--drivers/media/video/davinci/dm644x_ccdc_regs.h10
-rw-r--r--drivers/media/video/davinci/isif_regs.h2
-rw-r--r--drivers/media/video/davinci/vpfe_capture.c83
-rw-r--r--drivers/media/video/davinci/vpif_capture.c8
-rw-r--r--drivers/media/video/davinci/vpif_display.c6
-rw-r--r--drivers/media/video/em28xx/em28xx-audio.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c56
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c29
-rw-r--r--drivers/media/video/em28xx/em28xx-dvb.c11
-rw-r--r--drivers/media/video/em28xx/em28xx-input.c29
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c66
-rw-r--r--drivers/media/video/em28xx/em28xx.h7
-rw-r--r--drivers/media/video/et61x251/et61x251_core.c6
-rw-r--r--drivers/media/video/font.h407
-rw-r--r--drivers/media/video/gspca/Kconfig6
-rw-r--r--drivers/media/video/gspca/benq.c4
-rw-r--r--drivers/media/video/gspca/cpia1.c36
-rw-r--r--drivers/media/video/gspca/gspca.c152
-rw-r--r--drivers/media/video/gspca/gspca.h6
-rw-r--r--drivers/media/video/gspca/ov534.c563
-rw-r--r--drivers/media/video/gspca/pac207.c4
-rw-r--r--drivers/media/video/gspca/sn9c2028.c2
-rw-r--r--drivers/media/video/gspca/sn9c20x.c359
-rw-r--r--drivers/media/video/gspca/sonixj.c585
-rw-r--r--drivers/media/video/gspca/spca561.c58
-rw-r--r--drivers/media/video/gspca/t613.c172
-rw-r--r--drivers/media/video/gspca/vc032x.c747
-rw-r--r--drivers/media/video/gspca/zc3xx.c95
-rw-r--r--drivers/media/video/hdpvr/hdpvr-core.c33
-rw-r--r--drivers/media/video/hdpvr/hdpvr-video.c26
-rw-r--r--drivers/media/video/hdpvr/hdpvr.h5
-rw-r--r--drivers/media/video/ir-kbd-i2c.c28
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c5
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.h10
-rw-r--r--drivers/media/video/ivtv/ivtv-fileops.c55
-rw-r--r--drivers/media/video/ivtv/ivtv-i2c.c6
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c119
-rw-r--r--drivers/media/video/ivtv/ivtv-irq.c23
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c17
-rw-r--r--drivers/media/video/ivtv/ivtv-vbi.c18
-rw-r--r--drivers/media/video/ivtv/ivtvfb.c2
-rw-r--r--drivers/media/video/mem2mem_testdev.c1052
-rw-r--r--drivers/media/video/meye.c78
-rw-r--r--drivers/media/video/meye.h10
-rw-r--r--drivers/media/video/mt9t031.c66
-rw-r--r--drivers/media/video/mt9v022.c2
-rw-r--r--drivers/media/video/mx1_camera.c4
-rw-r--r--drivers/media/video/mx3_camera.c4
-rw-r--r--drivers/media/video/omap/Kconfig11
-rw-r--r--drivers/media/video/omap/Makefile7
-rw-r--r--drivers/media/video/omap/omap_vout.c2643
-rw-r--r--drivers/media/video/omap/omap_voutdef.h147
-rw-r--r--drivers/media/video/omap/omap_voutlib.c293
-rw-r--r--drivers/media/video/omap/omap_voutlib.h34
-rw-r--r--drivers/media/video/omap24xxcam.c4
-rw-r--r--drivers/media/video/ov511.c11
-rw-r--r--drivers/media/video/ov7670.c286
-rw-r--r--drivers/media/video/ov9640.c6
-rw-r--r--drivers/media/video/pms.c18
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c2
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c28
-rw-r--r--drivers/media/video/pwc/Kconfig2
-rw-r--r--drivers/media/video/pxa_camera.c4
-rw-r--r--drivers/media/video/rj54n1cb0c.c18
-rw-r--r--drivers/media/video/s2255drv.c1161
-rw-r--r--drivers/media/video/saa7115.c74
-rw-r--r--drivers/media/video/saa7127.c31
-rw-r--r--drivers/media/video/saa7134/saa7134-alsa.c2
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c111
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c22
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c9
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c283
-rw-r--r--drivers/media/video/saa7134/saa7134-reg.h24
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c25
-rw-r--r--drivers/media/video/saa7134/saa7134.h7
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c613
-rw-r--r--drivers/media/video/sh_vou.c1476
-rw-r--r--drivers/media/video/sn9c102/sn9c102_core.c6
-rw-r--r--drivers/media/video/sn9c102/sn9c102_devtable.h18
-rw-r--r--drivers/media/video/sn9c102/sn9c102_hv7131d.c2
-rw-r--r--drivers/media/video/soc_camera.c21
-rw-r--r--drivers/media/video/tlg2300/pd-dvb.c6
-rw-r--r--drivers/media/video/tlg2300/pd-main.c9
-rw-r--r--drivers/media/video/tlg2300/pd-radio.c21
-rw-r--r--drivers/media/video/tlg2300/pd-video.c32
-rw-r--r--drivers/media/video/tvp514x.c13
-rw-r--r--drivers/media/video/tvp5150.c67
-rw-r--r--drivers/media/video/tvp7002.c53
-rw-r--r--drivers/media/video/usbvideo/quickcam_messenger.c3
-rw-r--r--drivers/media/video/usbvision/usbvision-core.c16
-rw-r--r--drivers/media/video/usbvision/usbvision-i2c.c3
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c82
-rw-r--r--drivers/media/video/usbvision/usbvision.h6
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c50
-rw-r--r--drivers/media/video/uvc/uvc_driver.c25
-rw-r--r--drivers/media/video/uvc/uvc_queue.c8
-rw-r--r--drivers/media/video/uvc/uvc_video.c4
-rw-r--r--drivers/media/video/uvc/uvcvideo.h4
-rw-r--r--drivers/media/video/v4l2-common.c101
-rw-r--r--drivers/media/video/v4l2-compat-ioctl32.c3
-rw-r--r--drivers/media/video/v4l2-dev.c6
-rw-r--r--drivers/media/video/v4l2-device.c11
-rw-r--r--drivers/media/video/v4l2-event.c292
-rw-r--r--drivers/media/video/v4l2-fh.c79
-rw-r--r--drivers/media/video/v4l2-ioctl.c103
-rw-r--r--drivers/media/video/v4l2-mem2mem.c633
-rw-r--r--drivers/media/video/videobuf-core.c244
-rw-r--r--drivers/media/video/videobuf-dma-contig.c115
-rw-r--r--drivers/media/video/videobuf-dma-sg.c383
-rw-r--r--drivers/media/video/videobuf-dvb.c2
-rw-r--r--drivers/media/video/videobuf-vmalloc.c183
-rw-r--r--drivers/media/video/vivi.c806
-rw-r--r--drivers/media/video/w9966.c1149
-rw-r--r--drivers/media/video/zc0301/zc0301_core.c6
-rw-r--r--drivers/media/video/zc0301/zc0301_sensor.h2
-rw-r--r--drivers/media/video/zoran/zoran.h24
-rw-r--r--drivers/media/video/zoran/zoran_driver.c16
-rw-r--r--drivers/media/video/zr364xx.c4
-rw-r--r--drivers/message/fusion/mptbase.c177
-rw-r--r--drivers/message/fusion/mptbase.h5
-rw-r--r--drivers/message/fusion/mptctl.c181
-rw-r--r--drivers/message/fusion/mptfc.c22
-rw-r--r--drivers/message/fusion/mptsas.c55
-rw-r--r--drivers/message/fusion/mptsas.h2
-rw-r--r--drivers/message/fusion/mptscsih.c33
-rw-r--r--drivers/message/fusion/mptspi.c10
-rw-r--r--drivers/message/i2o/i2o_config.c27
-rw-r--r--drivers/mfd/88pm860x-core.c42
-rw-r--r--drivers/mfd/88pm860x-i2c.c2
-rw-r--r--drivers/mfd/Kconfig101
-rw-r--r--drivers/mfd/Makefile16
-rw-r--r--drivers/mfd/ab3100-core.c99
-rw-r--r--drivers/mfd/ab3100-otp.c13
-rw-r--r--drivers/mfd/ab3550-core.c1401
-rw-r--r--drivers/mfd/ab4500-core.c209
-rw-r--r--drivers/mfd/ab8500-core.c444
-rw-r--r--drivers/mfd/ab8500-spi.c133
-rw-r--r--drivers/mfd/abx500-core.c157
-rw-r--r--drivers/mfd/da903x.c1
-rw-r--r--drivers/mfd/davinci_voicecodec.c190
-rw-r--r--drivers/mfd/janz-cmodio.c304
-rw-r--r--drivers/mfd/max8925-core.c7
-rw-r--r--drivers/mfd/max8925-i2c.c2
-rw-r--r--drivers/mfd/mc13783-core.c4
-rw-r--r--drivers/mfd/menelaus.c3
-rw-r--r--drivers/mfd/mfd-core.c2
-rw-r--r--drivers/mfd/pcf50633-adc.c39
-rw-r--r--drivers/mfd/pcf50633-core.c348
-rw-r--r--drivers/mfd/pcf50633-irq.c318
-rw-r--r--drivers/mfd/rdc321x-southbridge.c123
-rw-r--r--drivers/mfd/sh_mobile_sdhi.c28
-rw-r--r--drivers/mfd/t7l66xb.c3
-rw-r--r--drivers/mfd/tc35892.c347
-rw-r--r--drivers/mfd/timberdale.c156
-rw-r--r--drivers/mfd/timberdale.h16
-rw-r--r--drivers/mfd/tps65010.c2
-rw-r--r--drivers/mfd/tps6507x.c159
-rw-r--r--drivers/mfd/twl-core.c4
-rw-r--r--drivers/mfd/twl4030-irq.c11
-rw-r--r--drivers/mfd/wm831x-core.c112
-rw-r--r--drivers/mfd/wm831x-irq.c48
-rw-r--r--drivers/mfd/wm8350-i2c.c6
-rw-r--r--drivers/mfd/wm8400-core.c4
-rw-r--r--drivers/mfd/wm8994-core.c43
-rw-r--r--drivers/mfd/wm8994-irq.c310
-rw-r--r--drivers/misc/Kconfig32
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/ad525x_dpot-i2c.c134
-rw-r--r--drivers/misc/ad525x_dpot-spi.c172
-rw-r--r--drivers/misc/ad525x_dpot.c1016
-rw-r--r--drivers/misc/ad525x_dpot.h202
-rw-r--r--drivers/misc/c2port/core.c4
-rw-r--r--drivers/misc/ds1682.c6
-rw-r--r--drivers/misc/eeprom/at24.c65
-rw-r--r--drivers/misc/eeprom/at25.c6
-rw-r--r--drivers/misc/eeprom/eeprom.c39
-rw-r--r--drivers/misc/eeprom/eeprom_93cx6.c39
-rw-r--r--drivers/misc/eeprom/max6875.c54
-rw-r--r--drivers/misc/hdpuftrs/Makefile1
-rw-r--r--drivers/misc/hdpuftrs/hdpu_cpustate.c256
-rw-r--r--drivers/misc/hdpuftrs/hdpu_nexus.c149
-rw-r--r--drivers/misc/lkdtm.c20
-rw-r--r--drivers/misc/vmware_balloon.c4
-rw-r--r--drivers/mmc/core/core.c3
-rw-r--r--drivers/mmc/core/sd_ops.c2
-rw-r--r--drivers/mmc/core/sdio.c6
-rw-r--r--drivers/mmc/core/sdio_io.c30
-rw-r--r--drivers/mmc/host/Kconfig20
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/at91_mci.c2
-rw-r--r--drivers/mmc/host/atmel-mci.c66
-rw-r--r--drivers/mmc/host/au1xmmc.c2
-rw-r--r--drivers/mmc/host/bfin_sdh.c2
-rw-r--r--drivers/mmc/host/cb710-mmc.c2
-rw-r--r--drivers/mmc/host/davinci_mmc.c111
-rw-r--r--drivers/mmc/host/imxmmc.c2
-rw-r--r--drivers/mmc/host/mmci.c21
-rw-r--r--drivers/mmc/host/mmci.h6
-rw-r--r--drivers/mmc/host/msm_sdcc.c474
-rw-r--r--drivers/mmc/host/msm_sdcc.h15
-rw-r--r--drivers/mmc/host/mvsdio.c2
-rw-r--r--drivers/mmc/host/mxcmmc.c116
-rw-r--r--drivers/mmc/host/of_mmc_spi.c4
-rw-r--r--drivers/mmc/host/omap.c64
-rw-r--r--drivers/mmc/host/omap_hsmmc.c279
-rw-r--r--drivers/mmc/host/pxamci.c4
-rw-r--r--drivers/mmc/host/s3cmci.c3
-rw-r--r--drivers/mmc/host/sdhci-of-core.c11
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c12
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c12
-rw-r--r--drivers/mmc/host/sdhci-pci.c2
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c26
-rw-r--r--drivers/mmc/host/sdhci-s3c.c10
-rw-r--r--drivers/mmc/host/sdhci-spear.c298
-rw-r--r--drivers/mmc/host/sdhci.c25
-rw-r--r--drivers/mmc/host/sdhci.h42
-rw-r--r--drivers/mmc/host/sdricoh_cs.c2
-rw-r--r--drivers/mmc/host/sh_mmcif.c965
-rw-r--r--drivers/mmc/host/tifm_sd.c2
-rw-r--r--drivers/mmc/host/tmio_mmc.c369
-rw-r--r--drivers/mmc/host/tmio_mmc.h13
-rw-r--r--drivers/mmc/host/via-sdmmc.c2
-rw-r--r--drivers/mmc/host/wbsd.c2
-rw-r--r--drivers/mtd/Kconfig13
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c137
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c344
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c136
-rw-r--r--drivers/mtd/chips/cfi_probe.c56
-rw-r--r--drivers/mtd/chips/cfi_util.c3
-rw-r--r--drivers/mtd/chips/fwh_lock.h6
-rw-r--r--drivers/mtd/chips/gen_probe.c15
-rw-r--r--drivers/mtd/chips/jedec_probe.c288
-rw-r--r--drivers/mtd/devices/Makefile2
-rw-r--r--drivers/mtd/devices/block2mtd.c4
-rw-r--r--drivers/mtd/devices/pmc551.c4
-rw-r--r--drivers/mtd/devices/sst25l.c68
-rw-r--r--drivers/mtd/ftl.c1
-rw-r--r--drivers/mtd/inftlcore.c1
-rw-r--r--drivers/mtd/inftlmount.c7
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c79
-rw-r--r--drivers/mtd/lpddr/qinfo_probe.c7
-rw-r--r--drivers/mtd/maps/Kconfig2
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c16
-rw-r--r--drivers/mtd/maps/ceiva.c2
-rw-r--r--drivers/mtd/maps/ixp2000.c3
-rw-r--r--drivers/mtd/maps/ixp4xx.c7
-rw-r--r--drivers/mtd/maps/pcmciamtd.c91
-rw-r--r--drivers/mtd/maps/physmap.c7
-rw-r--r--drivers/mtd/maps/physmap_of.c68
-rw-r--r--drivers/mtd/maps/pismo.c8
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c3
-rw-r--r--drivers/mtd/maps/sun_uflash.c9
-rw-r--r--drivers/mtd/mtd_blkdevs.c335
-rw-r--r--drivers/mtd/mtdblock.c72
-rw-r--r--drivers/mtd/mtdblock_ro.c4
-rw-r--r--drivers/mtd/mtdchar.c124
-rw-r--r--drivers/mtd/mtdconcat.c3
-rw-r--r--drivers/mtd/mtdcore.c285
-rw-r--r--drivers/mtd/mtdcore.h7
-rw-r--r--drivers/mtd/mtdoops.c5
-rw-r--r--drivers/mtd/mtdsuper.c18
-rw-r--r--drivers/mtd/nand/Kconfig69
-rw-r--r--drivers/mtd/nand/Makefile10
-rw-r--r--drivers/mtd/nand/alauda.c2
-rw-r--r--drivers/mtd/nand/atmel_nand.c2
-rw-r--r--drivers/mtd/nand/au1550nd.c12
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c3
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c29
-rw-r--r--drivers/mtd/nand/cafe_nand.c4
-rw-r--r--drivers/mtd/nand/davinci_nand.c6
-rw-r--r--drivers/mtd/nand/denali.c2134
-rw-r--r--drivers/mtd/nand/denali.h816
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c15
-rw-r--r--drivers/mtd/nand/fsl_upm.c16
-rw-r--r--drivers/mtd/nand/gpio.c12
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c917
-rw-r--r--drivers/mtd/nand/mxc_nand.c146
-rw-r--r--drivers/mtd/nand/nand_base.c387
-rw-r--r--drivers/mtd/nand/nand_bbt.c29
-rw-r--r--drivers/mtd/nand/nand_bcm_umi.h71
-rw-r--r--drivers/mtd/nand/nand_ids.c1
-rw-r--r--drivers/mtd/nand/nandsim.c17
-rw-r--r--drivers/mtd/nand/ndfc.c15
-rw-r--r--drivers/mtd/nand/nomadik_nand.c6
-rw-r--r--drivers/mtd/nand/nuc900_nand.c (renamed from drivers/mtd/nand/w90p910_nand.c)144
-rw-r--r--drivers/mtd/nand/omap2.c16
-rw-r--r--drivers/mtd/nand/orion_nand.c13
-rw-r--r--drivers/mtd/nand/pasemi_nand.c11
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c11
-rw-r--r--drivers/mtd/nand/r852.c1140
-rw-r--r--drivers/mtd/nand/r852.h163
-rw-r--r--drivers/mtd/nand/s3c2410.c12
-rw-r--r--drivers/mtd/nand/sh_flctl.c2
-rw-r--r--drivers/mtd/nand/sm_common.c148
-rw-r--r--drivers/mtd/nand/sm_common.h61
-rw-r--r--drivers/mtd/nand/socrates_nand.c11
-rw-r--r--drivers/mtd/nand/tmio_nand.c14
-rw-r--r--drivers/mtd/nand/ts7250.c207
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c2
-rw-r--r--drivers/mtd/nftlcore.c1
-rw-r--r--drivers/mtd/onenand/Kconfig7
-rw-r--r--drivers/mtd/onenand/Makefile1
-rw-r--r--drivers/mtd/onenand/omap2.c12
-rw-r--r--drivers/mtd/onenand/onenand_base.c63
-rw-r--r--drivers/mtd/onenand/samsung.c1071
-rw-r--r--drivers/mtd/rfd_ftl.c1
-rw-r--r--drivers/mtd/sm_ftl.c1284
-rw-r--r--drivers/mtd/sm_ftl.h94
-rw-r--r--drivers/mtd/ssfdc.c1
-rw-r--r--drivers/mtd/tests/mtd_pagetest.c3
-rw-r--r--drivers/mtd/tests/mtd_readtest.c3
-rw-r--r--drivers/mtd/tests/mtd_speedtest.c3
-rw-r--r--drivers/mtd/tests/mtd_stresstest.c3
-rw-r--r--drivers/mtd/tests/mtd_subpagetest.c3
-rw-r--r--drivers/mtd/ubi/Kconfig2
-rw-r--r--drivers/mtd/ubi/build.c60
-rw-r--r--drivers/mtd/ubi/cdev.c3
-rw-r--r--drivers/mtd/ubi/io.c6
-rw-r--r--drivers/mtd/ubi/kapi.c6
-rw-r--r--drivers/mtd/ubi/scan.c4
-rw-r--r--drivers/mtd/ubi/ubi.h2
-rw-r--r--drivers/mtd/ubi/vtbl.c4
-rw-r--r--drivers/mtd/ubi/wl.c2
-rw-r--r--drivers/net/3c501.c2
-rw-r--r--drivers/net/3c503.c44
-rw-r--r--drivers/net/3c505.c14
-rw-r--r--drivers/net/3c507.c8
-rw-r--r--drivers/net/3c509.c4
-rw-r--r--drivers/net/3c515.c6
-rw-r--r--drivers/net/3c523.c11
-rw-r--r--drivers/net/3c527.c6
-rw-r--r--drivers/net/3c59x.c11
-rw-r--r--drivers/net/7990.c12
-rw-r--r--drivers/net/8139cp.c9
-rw-r--r--drivers/net/8139too.c8
-rw-r--r--drivers/net/82596.c9
-rw-r--r--drivers/net/Kconfig46
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/a2065.c11
-rw-r--r--drivers/net/ac3200.c2
-rw-r--r--drivers/net/acenic.c44
-rw-r--r--drivers/net/acenic.h6
-rw-r--r--drivers/net/amd8111e.c8
-rw-r--r--drivers/net/apne.c1
-rw-r--r--drivers/net/appletalk/cops.c9
-rw-r--r--drivers/net/appletalk/ltpc.c1
-rw-r--r--drivers/net/arcnet/arcnet.c1
-rw-r--r--drivers/net/arcnet/com20020-pci.c4
-rw-r--r--drivers/net/ariadne.c3
-rw-r--r--drivers/net/arm/am79c961a.c7
-rw-r--r--drivers/net/arm/at91_ether.c7
-rw-r--r--drivers/net/arm/ep93xx_eth.c2
-rw-r--r--drivers/net/arm/ether1.c1
-rw-r--r--drivers/net/arm/ether3.c1
-rw-r--r--drivers/net/arm/ixp4xx_eth.c9
-rw-r--r--drivers/net/arm/ks8695net.c13
-rw-r--r--drivers/net/arm/w90p910_ether.c7
-rw-r--r--drivers/net/at1700.c11
-rw-r--r--drivers/net/atarilance.c5
-rw-r--r--drivers/net/atl1c/atl1c_ethtool.c2
-rw-r--r--drivers/net/atl1c/atl1c_main.c9
-rw-r--r--drivers/net/atl1e/atl1e_ethtool.c2
-rw-r--r--drivers/net/atl1e/atl1e_main.c16
-rw-r--r--drivers/net/atlx/atl1.c7
-rw-r--r--drivers/net/atlx/atl2.c8
-rw-r--r--drivers/net/atlx/atlx.c6
-rw-r--r--drivers/net/atp.c10
-rw-r--r--drivers/net/au1000_eth.c262
-rw-r--r--drivers/net/au1000_eth.h4
-rw-r--r--drivers/net/ax88796.c1
-rw-r--r--drivers/net/b44.c8
-rw-r--r--drivers/net/bcm63xx_enet.c14
-rw-r--r--drivers/net/benet/be.h13
-rw-r--r--drivers/net/benet/be_cmds.c35
-rw-r--r--drivers/net/benet/be_cmds.h2
-rw-r--r--drivers/net/benet/be_ethtool.c5
-rw-r--r--drivers/net/benet/be_hw.h3
-rw-r--r--drivers/net/benet/be_main.c316
-rw-r--r--drivers/net/bfin_mac.c561
-rw-r--r--drivers/net/bfin_mac.h18
-rw-r--r--drivers/net/bmac.c15
-rw-r--r--drivers/net/bnx2.c102
-rw-r--r--drivers/net/bnx2.h9
-rw-r--r--drivers/net/bnx2x.h66
-rw-r--r--drivers/net/bnx2x_hsi.h2
-rw-r--r--drivers/net/bnx2x_link.c12
-rw-r--r--drivers/net/bnx2x_main.c1878
-rw-r--r--drivers/net/bnx2x_reg.h27
-rw-r--r--drivers/net/bonding/bond_ipv6.c9
-rw-r--r--drivers/net/bonding/bond_main.c275
-rw-r--r--drivers/net/bonding/bonding.h2
-rw-r--r--drivers/net/caif/Kconfig17
-rw-r--r--drivers/net/caif/Makefile12
-rw-r--r--drivers/net/caif/caif_serial.c449
-rw-r--r--drivers/net/can/Kconfig10
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/at91_can.c4
-rw-r--r--drivers/net/can/bfin_can.c3
-rw-r--r--drivers/net/can/janz-ican3.c1830
-rw-r--r--drivers/net/can/mcp251x.c16
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c9
-rw-r--r--drivers/net/can/mscan/mscan.c1
-rw-r--r--drivers/net/can/sja1000/Kconfig4
-rw-r--r--drivers/net/can/sja1000/ems_pci.c1
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c1
-rw-r--r--drivers/net/can/sja1000/plx_pci.c154
-rw-r--r--drivers/net/can/sja1000/sja1000.c25
-rw-r--r--drivers/net/can/sja1000/sja1000.h1
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c1
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c13
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c48
-rw-r--r--drivers/net/can/ti_hecc.c1
-rw-r--r--drivers/net/can/usb/ems_usb.c22
-rw-r--r--drivers/net/cassini.c15
-rw-r--r--drivers/net/chelsio/pm3393.c7
-rw-r--r--drivers/net/chelsio/sge.c58
-rw-r--r--drivers/net/cnic.c90
-rw-r--r--drivers/net/cnic.h10
-rw-r--r--drivers/net/cnic_if.h4
-rw-r--r--drivers/net/cpmac.c17
-rw-r--r--drivers/net/cris/eth_v10.c8
-rw-r--r--drivers/net/cs89x0.c3
-rw-r--r--drivers/net/cxgb3/l2t.c1
-rw-r--r--drivers/net/cxgb3/sge.c20
-rw-r--r--drivers/net/cxgb3/xgmac.c8
-rw-r--r--drivers/net/cxgb4/cxgb4.h9
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c102
-rw-r--r--drivers/net/cxgb4/sge.c11
-rw-r--r--drivers/net/cxgb4/t4_hw.c117
-rw-r--r--drivers/net/cxgb4/t4_msg.h1
-rw-r--r--drivers/net/cxgb4/t4fw_api.h4
-rw-r--r--drivers/net/davinci_emac.c8
-rw-r--r--drivers/net/de600.c4
-rw-r--r--drivers/net/de620.c1
-rw-r--r--drivers/net/declance.c10
-rw-r--r--drivers/net/defxx.c6
-rw-r--r--drivers/net/depca.c15
-rw-r--r--drivers/net/dl2k.c8
-rw-r--r--drivers/net/dm9000.c50
-rw-r--r--drivers/net/dnet.c4
-rw-r--r--drivers/net/e100.c190
-rw-r--r--drivers/net/e1000/e1000.h37
-rw-r--r--drivers/net/e1000/e1000_ethtool.c89
-rw-r--r--drivers/net/e1000/e1000_hw.c356
-rw-r--r--drivers/net/e1000/e1000_hw.h1
-rw-r--r--drivers/net/e1000/e1000_main.c429
-rw-r--r--drivers/net/e1000/e1000_osdep.h14
-rw-r--r--drivers/net/e1000/e1000_param.c112
-rw-r--r--drivers/net/e1000e/82571.c29
-rw-r--r--drivers/net/e1000e/defines.h9
-rw-r--r--drivers/net/e1000e/e1000.h26
-rw-r--r--drivers/net/e1000e/es2lan.c11
-rw-r--r--drivers/net/e1000e/ethtool.c48
-rw-r--r--drivers/net/e1000e/hw.h5
-rw-r--r--drivers/net/e1000e/ich8lan.c391
-rw-r--r--drivers/net/e1000e/lib.c60
-rw-r--r--drivers/net/e1000e/netdev.c866
-rw-r--r--drivers/net/e1000e/param.c25
-rw-r--r--drivers/net/e1000e/phy.c21
-rw-r--r--drivers/net/e2100.c1
-rw-r--r--drivers/net/eepro.c13
-rw-r--r--drivers/net/eexpress.c11
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c99
-rw-r--r--drivers/net/ehea/ehea_qmr.c43
-rw-r--r--drivers/net/ehea/ehea_qmr.h14
-rw-r--r--drivers/net/enc28j60.c2
-rw-r--r--drivers/net/enic/Makefile2
-rw-r--r--drivers/net/enic/cq_enet_desc.h12
-rw-r--r--drivers/net/enic/enic.h12
-rw-r--r--drivers/net/enic/enic_main.c354
-rw-r--r--drivers/net/enic/enic_res.c5
-rw-r--r--drivers/net/enic/enic_res.h1
-rw-r--r--drivers/net/enic/vnic_dev.c110
-rw-r--r--drivers/net/enic/vnic_dev.h10
-rw-r--r--drivers/net/enic/vnic_rq.c4
-rw-r--r--drivers/net/enic/vnic_vic.c73
-rw-r--r--drivers/net/enic/vnic_vic.h59
-rw-r--r--drivers/net/enic/vnic_wq.c4
-rw-r--r--drivers/net/epic100.c13
-rw-r--r--drivers/net/eql.c2
-rw-r--r--drivers/net/es3210.c2
-rw-r--r--drivers/net/eth16i.c5
-rw-r--r--drivers/net/ethoc.c42
-rw-r--r--drivers/net/ewrk3.c14
-rw-r--r--drivers/net/fealnx.c9
-rw-r--r--drivers/net/fec.c1162
-rw-r--r--drivers/net/fec.h2
-rw-r--r--drivers/net/fec_mpc52xx.c28
-rw-r--r--drivers/net/fec_mpc52xx_phy.c11
-rw-r--r--drivers/net/forcedeth.c251
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c17
-rw-r--r--drivers/net/fs_enet/mac-fcc.c14
-rw-r--r--drivers/net/fs_enet/mac-fec.c10
-rw-r--r--drivers/net/fs_enet/mac-scc.c12
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c7
-rw-r--r--drivers/net/fs_enet/mii-fec.c13
-rw-r--r--drivers/net/fsl_pq_mdio.c15
-rw-r--r--drivers/net/gianfar.c229
-rw-r--r--drivers/net/gianfar.h8
-rw-r--r--drivers/net/greth.c10
-rw-r--r--drivers/net/hamachi.c11
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c2
-rw-r--r--drivers/net/hamradio/scc.c1
-rw-r--r--drivers/net/hamradio/yam.c3
-rw-r--r--drivers/net/hp-plus.c4
-rw-r--r--drivers/net/hp.c3
-rw-r--r--drivers/net/hp100.c16
-rw-r--r--drivers/net/hydra.c1
-rw-r--r--drivers/net/ibm_newemac/core.c33
-rw-r--r--drivers/net/ibm_newemac/debug.c9
-rw-r--r--drivers/net/ibm_newemac/debug.h4
-rw-r--r--drivers/net/ibm_newemac/mal.c36
-rw-r--r--drivers/net/ibm_newemac/rgmii.c20
-rw-r--r--drivers/net/ibm_newemac/tah.c15
-rw-r--r--drivers/net/ibm_newemac/zmii.c17
-rw-r--r--drivers/net/ibmlana.c8
-rw-r--r--drivers/net/ibmveth.c24
-rw-r--r--drivers/net/ifb.c1
-rw-r--r--drivers/net/igb/e1000_82575.c35
-rw-r--r--drivers/net/igb/e1000_82575.h9
-rw-r--r--drivers/net/igb/e1000_defines.h5
-rw-r--r--drivers/net/igb/e1000_hw.h17
-rw-r--r--drivers/net/igb/e1000_mac.c27
-rw-r--r--drivers/net/igb/igb.h9
-rw-r--r--drivers/net/igb/igb_ethtool.c58
-rw-r--r--drivers/net/igb/igb_main.c613
-rw-r--r--drivers/net/igbvf/ethtool.c2
-rw-r--r--drivers/net/igbvf/netdev.c88
-rw-r--r--drivers/net/ioc3-eth.c7
-rw-r--r--drivers/net/ipg.c11
-rw-r--r--drivers/net/ipg.h109
-rw-r--r--drivers/net/irda/Kconfig6
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/ali-ircc.c32
-rw-r--r--drivers/net/irda/au1k_ir.c1
-rw-r--r--drivers/net/irda/bfin_sir.c8
-rw-r--r--drivers/net/irda/donauboe.c2
-rw-r--r--drivers/net/irda/irda-usb.c2
-rw-r--r--drivers/net/irda/mcs7780.c4
-rw-r--r--drivers/net/irda/pxaficp_ir.c1
-rw-r--r--drivers/net/irda/sa1100_ir.c2
-rw-r--r--drivers/net/irda/sh_irda.c865
-rw-r--r--drivers/net/irda/sh_sir.c12
-rw-r--r--drivers/net/irda/sir_dev.c1
-rw-r--r--drivers/net/irda/smsc-ircc2.c3
-rw-r--r--drivers/net/irda/via-ircc.h2
-rw-r--r--drivers/net/irda/vlsi_ir.c5
-rw-r--r--drivers/net/irda/w83977af_ir.c2
-rw-r--r--drivers/net/iseries_veth.c6
-rw-r--r--drivers/net/ixgb/ixgb.h8
-rw-r--r--drivers/net/ixgb/ixgb_ee.c24
-rw-r--r--drivers/net/ixgb/ixgb_hw.c164
-rw-r--r--drivers/net/ixgb/ixgb_hw.h12
-rw-r--r--drivers/net/ixgb/ixgb_main.c159
-rw-r--r--drivers/net/ixgb/ixgb_osdep.h16
-rw-r--r--drivers/net/ixgb/ixgb_param.c31
-rw-r--r--drivers/net/ixgbe/ixgbe.h6
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c9
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c419
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c539
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h22
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c146
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c17
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c723
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c72
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h6
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c137
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.h8
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h54
-rw-r--r--drivers/net/ixgbevf/defines.h12
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c125
-rw-r--r--drivers/net/ixgbevf/vf.c27
-rw-r--r--drivers/net/ixgbevf/vf.h4
-rw-r--r--drivers/net/ixp2000/ixpdev.c2
-rw-r--r--drivers/net/jme.c12
-rw-r--r--drivers/net/korina.c12
-rw-r--r--drivers/net/ks8842.c61
-rw-r--r--drivers/net/ks8851.c448
-rw-r--r--drivers/net/ks8851.h14
-rw-r--r--drivers/net/ks8851_mll.c63
-rw-r--r--drivers/net/ksz884x.c86
-rw-r--r--drivers/net/lance.c4
-rw-r--r--drivers/net/lib82596.c11
-rw-r--r--drivers/net/lib8390.c22
-rw-r--r--drivers/net/ll_temac.h19
-rw-r--r--drivers/net/ll_temac_main.c253
-rw-r--r--drivers/net/lne390.c2
-rw-r--r--drivers/net/lp486e.c8
-rw-r--r--drivers/net/mac8390.c2
-rw-r--r--drivers/net/mac89x0.c1
-rw-r--r--drivers/net/macb.c9
-rw-r--r--drivers/net/mace.c6
-rw-r--r--drivers/net/macmace.c7
-rw-r--r--drivers/net/macvlan.c32
-rw-r--r--drivers/net/macvtap.c46
-rw-r--r--drivers/net/meth.c4
-rw-r--r--drivers/net/mlx4/en_ethtool.c2
-rw-r--r--drivers/net/mlx4/en_netdev.c53
-rw-r--r--drivers/net/mlx4/eq.c2
-rw-r--r--drivers/net/mlx4/icm.c36
-rw-r--r--drivers/net/mlx4/mlx4.h1
-rw-r--r--drivers/net/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/mv643xx_eth.c10
-rw-r--r--drivers/net/myri10ge/myri10ge.c54
-rw-r--r--drivers/net/myri_sbus.c11
-rw-r--r--drivers/net/natsemi.c10
-rw-r--r--drivers/net/ne-h8300.c1
-rw-r--r--drivers/net/ne.c1
-rw-r--r--drivers/net/ne2.c1
-rw-r--r--drivers/net/ne2k-pci.c1
-rw-r--r--drivers/net/ne3210.c2
-rw-r--r--drivers/net/netconsole.c15
-rw-r--r--drivers/net/netx-eth.c1
-rw-r--r--drivers/net/netxen/netxen_nic.h6
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c9
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h8
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c131
-rw-r--r--drivers/net/netxen/netxen_nic_init.c169
-rw-r--r--drivers/net/netxen/netxen_nic_main.c85
-rw-r--r--drivers/net/ni5010.c5
-rw-r--r--drivers/net/ni52.c13
-rw-r--r--drivers/net/ni65.c5
-rw-r--r--drivers/net/niu.c74
-rw-r--r--drivers/net/niu.h7
-rw-r--r--drivers/net/octeon/octeon_mgmt.c66
-rw-r--r--drivers/net/pasemi_mac.c2
-rw-r--r--drivers/net/pci-skeleton.c7
-rw-r--r--drivers/net/pcmcia/3c574_cs.c21
-rw-r--r--drivers/net/pcmcia/3c589_cs.c303
-rw-r--r--drivers/net/pcmcia/axnet_cs.c31
-rw-r--r--drivers/net/pcmcia/com20020_cs.c29
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c27
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c18
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c26
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c16
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c31
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c45
-rw-r--r--drivers/net/pcnet32.c15
-rw-r--r--drivers/net/phy/bcm63xx.c8
-rw-r--r--drivers/net/phy/broadcom.c16
-rw-r--r--drivers/net/phy/cicada.c8
-rw-r--r--drivers/net/phy/davicom.c9
-rw-r--r--drivers/net/phy/et1011c.c7
-rw-r--r--drivers/net/phy/icplus.c7
-rw-r--r--drivers/net/phy/lxt.c8
-rw-r--r--drivers/net/phy/marvell.c13
-rw-r--r--drivers/net/phy/mdio-bitbang.c60
-rw-r--r--drivers/net/phy/mdio-gpio.c13
-rw-r--r--drivers/net/phy/mdio_bus.c4
-rw-r--r--drivers/net/phy/micrel.c9
-rw-r--r--drivers/net/phy/national.c10
-rw-r--r--drivers/net/phy/phy_device.c12
-rw-r--r--drivers/net/phy/qsemi.c7
-rw-r--r--drivers/net/phy/realtek.c7
-rw-r--r--drivers/net/phy/smsc.c11
-rw-r--r--drivers/net/phy/ste10Xp.c8
-rw-r--r--drivers/net/phy/vitesse.c8
-rw-r--r--drivers/net/plip.c4
-rw-r--r--drivers/net/ppp_generic.c23
-rw-r--r--drivers/net/pppoe.c10
-rw-r--r--drivers/net/pppol2tp.c2680
-rw-r--r--drivers/net/ps3_gelic_net.c13
-rw-r--r--drivers/net/ps3_gelic_wireless.c76
-rw-r--r--drivers/net/qla3xxx.c72
-rw-r--r--drivers/net/qla3xxx.h8
-rw-r--r--drivers/net/qlcnic/qlcnic.h40
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c3
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c34
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h60
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c136
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c101
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c447
-rw-r--r--drivers/net/qlge/qlge.h8
-rw-r--r--drivers/net/qlge/qlge_dbg.c4
-rw-r--r--drivers/net/qlge/qlge_ethtool.c2
-rw-r--r--drivers/net/qlge/qlge_main.c64
-rw-r--r--drivers/net/r6040.c46
-rw-r--r--drivers/net/r8169.c158
-rw-r--r--drivers/net/rrunner.c1
-rw-r--r--drivers/net/s2io.c15
-rw-r--r--drivers/net/s6gmac.c3
-rw-r--r--drivers/net/sb1000.c5
-rw-r--r--drivers/net/sb1250-mac.c275
-rw-r--r--drivers/net/sc92031.c8
-rw-r--r--drivers/net/seeq8005.c4
-rw-r--r--drivers/net/sfc/efx.c137
-rw-r--r--drivers/net/sfc/efx.h4
-rw-r--r--drivers/net/sfc/ethtool.c6
-rw-r--r--drivers/net/sfc/falcon.c16
-rw-r--r--drivers/net/sfc/falcon_xmac.c22
-rw-r--r--drivers/net/sfc/mcdi.c32
-rw-r--r--drivers/net/sfc/mcdi_mac.c25
-rw-r--r--drivers/net/sfc/mcdi_pcol.h71
-rw-r--r--drivers/net/sfc/mcdi_phy.c152
-rw-r--r--drivers/net/sfc/net_driver.h76
-rw-r--r--drivers/net/sfc/nic.c114
-rw-r--r--drivers/net/sfc/nic.h3
-rw-r--r--drivers/net/sfc/selftest.c8
-rw-r--r--drivers/net/sfc/selftest.h4
-rw-r--r--drivers/net/sfc/siena.c19
-rw-r--r--drivers/net/sfc/tx.c61
-rw-r--r--drivers/net/sfc/workarounds.h2
-rw-r--r--drivers/net/sgiseeq.c6
-rw-r--r--drivers/net/sh_eth.c5
-rw-r--r--drivers/net/sis190.c6
-rw-r--r--drivers/net/sis900.c25
-rw-r--r--drivers/net/skfp/fplustm.c2
-rw-r--r--drivers/net/skfp/pcmplc.c4
-rw-r--r--drivers/net/skfp/skfddi.c15
-rw-r--r--drivers/net/skfp/smt.c2
-rw-r--r--drivers/net/skfp/srf.c2
-rw-r--r--drivers/net/skge.c46
-rw-r--r--drivers/net/skge.h4
-rw-r--r--drivers/net/sky2.c215
-rw-r--r--drivers/net/sky2.h41
-rw-r--r--drivers/net/slhc.c1
-rw-r--r--drivers/net/slip.c4
-rw-r--r--drivers/net/smc-mca.c1
-rw-r--r--drivers/net/smc-ultra.c1
-rw-r--r--drivers/net/smc-ultra32.c1
-rw-r--r--drivers/net/smc911x.c21
-rw-r--r--drivers/net/smc9194.c61
-rw-r--r--drivers/net/smc91x.c12
-rw-r--r--drivers/net/smsc911x.c11
-rw-r--r--drivers/net/smsc9420.c8
-rw-r--r--drivers/net/sonic.c10
-rw-r--r--drivers/net/spider_net.c8
-rw-r--r--drivers/net/starfire.c16
-rw-r--r--drivers/net/stmmac/Makefile2
-rw-r--r--drivers/net/stmmac/common.h21
-rw-r--r--drivers/net/stmmac/dwmac100.c538
-rw-r--r--drivers/net/stmmac/dwmac100.h5
-rw-r--r--drivers/net/stmmac/dwmac1000.h12
-rw-r--r--drivers/net/stmmac/dwmac1000_core.c41
-rw-r--r--drivers/net/stmmac/dwmac1000_dma.c338
-rw-r--r--drivers/net/stmmac/dwmac100_core.c196
-rw-r--r--drivers/net/stmmac/dwmac100_dma.c134
-rw-r--r--drivers/net/stmmac/dwmac_dma.h1
-rw-r--r--drivers/net/stmmac/dwmac_lib.c19
-rw-r--r--drivers/net/stmmac/enh_desc.c337
-rw-r--r--drivers/net/stmmac/norm_desc.c236
-rw-r--r--drivers/net/stmmac/stmmac.h10
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c7
-rw-r--r--drivers/net/stmmac/stmmac_main.c32
-rw-r--r--drivers/net/stmmac/stmmac_timer.c6
-rw-r--r--drivers/net/stnic.c1
-rw-r--r--drivers/net/sun3_82586.c13
-rw-r--r--drivers/net/sun3lance.c8
-rw-r--r--drivers/net/sunbmac.c25
-rw-r--r--drivers/net/sundance.c14
-rw-r--r--drivers/net/sungem.c9
-rw-r--r--drivers/net/sunhme.c37
-rw-r--r--drivers/net/sunlance.c24
-rw-r--r--drivers/net/sunqe.c20
-rw-r--r--drivers/net/sunvnet.c9
-rw-r--r--drivers/net/tc35815.c8
-rw-r--r--drivers/net/tehuti.c10
-rw-r--r--drivers/net/tg3.c839
-rw-r--r--drivers/net/tg3.h17
-rw-r--r--drivers/net/tlan.c13
-rw-r--r--drivers/net/tokenring/3c359.c112
-rw-r--r--drivers/net/tokenring/ibmtr.c13
-rw-r--r--drivers/net/tokenring/lanstreamer.c58
-rw-r--r--drivers/net/tokenring/madgemc.c12
-rw-r--r--drivers/net/tokenring/olympic.c74
-rw-r--r--drivers/net/tokenring/smctr.c4
-rw-r--r--drivers/net/tokenring/tms380tr.c65
-rw-r--r--drivers/net/tsi108_eth.c16
-rw-r--r--drivers/net/tulip/de2104x.c13
-rw-r--r--drivers/net/tulip/de4x5.c87
-rw-r--r--drivers/net/tulip/dmfe.c17
-rw-r--r--drivers/net/tulip/media.c2
-rw-r--r--drivers/net/tulip/pnic.c2
-rw-r--r--drivers/net/tulip/tulip_core.c31
-rw-r--r--drivers/net/tulip/uli526x.c10
-rw-r--r--drivers/net/tulip/winbond-840.c18
-rw-r--r--drivers/net/tulip/xircom_cb.c6
-rw-r--r--drivers/net/tun.c58
-rw-r--r--drivers/net/typhoon.c8
-rw-r--r--drivers/net/ucc_geth.c21
-rw-r--r--drivers/net/usb/asix.c53
-rw-r--r--drivers/net/usb/catc.c6
-rw-r--r--drivers/net/usb/cdc_ether.c113
-rw-r--r--drivers/net/usb/dm9601.c9
-rw-r--r--drivers/net/usb/hso.c7
-rw-r--r--drivers/net/usb/ipheth.c24
-rw-r--r--drivers/net/usb/kaweth.c13
-rw-r--r--drivers/net/usb/mcs7830.c10
-rw-r--r--drivers/net/usb/pegasus.c9
-rw-r--r--drivers/net/usb/pegasus.h2
-rw-r--r--drivers/net/usb/rndis_host.c18
-rw-r--r--drivers/net/usb/smsc75xx.c6
-rw-r--r--drivers/net/usb/smsc95xx.c6
-rw-r--r--drivers/net/usb/usbnet.c15
-rw-r--r--drivers/net/via-rhine.c10
-rw-r--r--drivers/net/via-velocity.c121
-rw-r--r--drivers/net/via-velocity.h77
-rw-r--r--drivers/net/virtio_net.c95
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c11
-rw-r--r--drivers/net/vxge/vxge-config.c41
-rw-r--r--drivers/net/vxge/vxge-config.h34
-rw-r--r--drivers/net/vxge/vxge-ethtool.c5
-rw-r--r--drivers/net/vxge/vxge-main.c245
-rw-r--r--drivers/net/vxge/vxge-main.h6
-rw-r--r--drivers/net/vxge/vxge-traffic.c79
-rw-r--r--drivers/net/vxge/vxge-traffic.h50
-rw-r--r--drivers/net/vxge/vxge-version.h4
-rw-r--r--drivers/net/wan/cycx_x25.c13
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wan/hd64570.c1
-rw-r--r--drivers/net/wan/hd64572.c1
-rw-r--r--drivers/net/wan/hdlc_x25.c12
-rw-r--r--drivers/net/wan/ixp4xx_hss.c1
-rw-r--r--drivers/net/wan/lapbether.c12
-rw-r--r--drivers/net/wan/lmc/lmc_main.c6
-rw-r--r--drivers/net/wan/pc300_drv.c5
-rw-r--r--drivers/net/wan/pc300_tty.c2
-rw-r--r--drivers/net/wan/sdla.c2
-rw-r--r--drivers/net/wan/wanxl.c1
-rw-r--r--drivers/net/wan/x25_asy.c12
-rw-r--r--drivers/net/wd.c1
-rw-r--r--drivers/net/wimax/i2400m/control.c27
-rw-r--r--drivers/net/wimax/i2400m/driver.c167
-rw-r--r--drivers/net/wimax/i2400m/i2400m-sdio.h5
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h82
-rw-r--r--drivers/net/wimax/i2400m/netdev.c14
-rw-r--r--drivers/net/wimax/i2400m/rx.c116
-rw-r--r--drivers/net/wimax/i2400m/sdio-rx.c2
-rw-r--r--drivers/net/wimax/i2400m/sdio-tx.c35
-rw-r--r--drivers/net/wimax/i2400m/sdio.c7
-rw-r--r--drivers/net/wimax/i2400m/tx.c155
-rw-r--r--drivers/net/wimax/i2400m/usb-notif.c1
-rw-r--r--drivers/net/wimax/i2400m/usb.c14
-rw-r--r--drivers/net/wireless/Kconfig92
-rw-r--r--drivers/net/wireless/adm8211.c12
-rw-r--r--drivers/net/wireless/airo.c52
-rw-r--r--drivers/net/wireless/airo_cs.c72
-rw-r--r--drivers/net/wireless/at76c50x-usb.c2
-rw-r--r--drivers/net/wireless/ath/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ar9170/ar9170.h52
-rw-r--r--drivers/net/wireless/ath/ar9170/cmd.h2
-rw-r--r--drivers/net/wireless/ath/ar9170/eeprom.h4
-rw-r--r--drivers/net/wireless/ath/ar9170/hw.h1
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c587
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c25
-rw-r--r--drivers/net/wireless/ath/ath.h27
-rw-r--r--drivers/net/wireless/ath/ath5k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c744
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.h104
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h313
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c7
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c336
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h39
-rw-r--r--drivers/net/wireless/ath/ath5k/caps.c9
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c382
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.h4
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c19
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.h35
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h88
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c379
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c77
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c17
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h42
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c41
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig21
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile26
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c217
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_initvals.h742
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c1374
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9001_initvals.h1254
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c1000
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c598
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_initvals.h (renamed from drivers/net/wireless/ath/ath9k/initvals.h)2292
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c480
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c535
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.h572
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c802
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c1838
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h323
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c205
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_initvals.h1784
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c614
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.h120
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c1134
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h847
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h28
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c109
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c1024
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h19
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c392
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h17
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c297
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h21
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h25
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c1014
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.h104
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h465
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c255
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c834
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c1775
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c707
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c480
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.h245
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h280
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1913
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h275
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c88
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c571
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h93
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c130
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.c978
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.h596
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c550
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h183
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c336
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h139
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c591
-rw-r--r--drivers/net/wireless/ath/debug.h1
-rw-r--r--drivers/net/wireless/ath/hw.c4
-rw-r--r--drivers/net/wireless/ath/regd.c4
-rw-r--r--drivers/net/wireless/atmel.c1
-rw-r--r--drivers/net/wireless/atmel_cs.c70
-rw-r--r--drivers/net/wireless/b43/b43.h1
-rw-r--r--drivers/net/wireless/b43/main.c26
-rw-r--r--drivers/net/wireless/b43/pcmcia.c5
-rw-r--r--drivers/net/wireless/b43/phy_n.c479
-rw-r--r--drivers/net/wireless/b43/phy_n.h21
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c22
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h37
-rw-r--r--drivers/net/wireless/b43/xmit.c1
-rw-r--r--drivers/net/wireless/b43legacy/main.c21
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_rx.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c38
-rw-r--r--drivers/net/wireless/hostap/hostap_download.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c3
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c60
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c190
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h14
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_module.c13
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_rx.c1
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c102
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c500
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h60
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c93
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c361
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c198
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h33
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c1493
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c331
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c850
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h56
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c276
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h118
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ict.c308
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c1530
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c208
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c1340
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c425
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c1021
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h181
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h116
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c1021
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h136
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c912
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h288
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h53
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h94
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c826
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c557
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c901
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h76
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c1074
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c406
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Kconfig9
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Makefile5
-rw-r--r--drivers/net/wireless/iwmc3200wifi/bus.h2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c17
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c14
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.h1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debug.h7
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debugfs.c123
-rw-r--r--drivers/net/wireless/iwmc3200wifi/hal.c15
-rw-r--r--drivers/net/wireless/iwmc3200wifi/hal.h5
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c9
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c79
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.c19
-rw-r--r--drivers/net/wireless/iwmc3200wifi/trace.c3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/trace.h283
-rw-r--r--drivers/net/wireless/iwmc3200wifi/tx.c12
-rw-r--r--drivers/net/wireless/iwmc3200wifi/umac.h2
-rw-r--r--drivers/net/wireless/libertas/assoc.c22
-rw-r--r--drivers/net/wireless/libertas/cfg.c1
-rw-r--r--drivers/net/wireless/libertas/debugfs.c5
-rw-r--r--drivers/net/wireless/libertas/dev.h1
-rw-r--r--drivers/net/wireless/libertas/if_cs.c21
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c127
-rw-r--r--drivers/net/wireless/libertas/if_usb.c4
-rw-r--r--drivers/net/wireless/libertas/main.c15
-rw-r--r--drivers/net/wireless/libertas/rx.c51
-rw-r--r--drivers/net/wireless/libertas/tx.c2
-rw-r--r--drivers/net/wireless/libertas/wext.c4
-rw-r--r--drivers/net/wireless/libertas_tf/cmd.c203
-rw-r--r--drivers/net/wireless/libertas_tf/deb_defs.h104
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c252
-rw-r--r--drivers/net/wireless/libertas_tf/libertas_tf.h2
-rw-r--r--drivers/net/wireless/libertas_tf/main.c106
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c96
-rw-r--r--drivers/net/wireless/mwl8k.c30
-rw-r--r--drivers/net/wireless/orinoco/Kconfig20
-rw-r--r--drivers/net/wireless/orinoco/Makefile4
-rw-r--r--drivers/net/wireless/orinoco/airport.c8
-rw-r--r--drivers/net/wireless/orinoco/cfg.c91
-rw-r--r--drivers/net/wireless/orinoco/fw.c10
-rw-r--r--drivers/net/wireless/orinoco/hermes.c286
-rw-r--r--drivers/net/wireless/orinoco/hermes.h62
-rw-r--r--drivers/net/wireless/orinoco/hermes_dld.c243
-rw-r--r--drivers/net/wireless/orinoco/hw.c102
-rw-r--r--drivers/net/wireless/orinoco/hw.h1
-rw-r--r--drivers/net/wireless/orinoco/main.c307
-rw-r--r--drivers/net/wireless/orinoco/main.h12
-rw-r--r--drivers/net/wireless/orinoco/orinoco.h38
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c110
-rw-r--r--drivers/net/wireless/orinoco/orinoco_nortel.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_plx.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_tmd.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c1795
-rw-r--r--drivers/net/wireless/orinoco/scan.c4
-rw-r--r--drivers/net/wireless/orinoco/spectrum_cs.c32
-rw-r--r--drivers/net/wireless/orinoco/wext.c273
-rw-r--r--drivers/net/wireless/p54/main.c2
-rw-r--r--drivers/net/wireless/p54/p54pci.c10
-rw-r--r--drivers/net/wireless/p54/p54usb.c1
-rw-r--r--drivers/net/wireless/p54/txrx.c3
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c18
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c10
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.c8
-rw-r--r--drivers/net/wireless/prism54/oid_mgt.c2
-rw-r--r--drivers/net/wireless/ray_cs.c257
-rw-r--r--drivers/net/wireless/ray_cs.h1
-rw-r--r--drivers/net/wireless/rndis_wlan.c390
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig4
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c65
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c63
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c130
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h119
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c676
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c318
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c299
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.h40
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h35
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00crypto.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c23
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dump.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00firmware.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00ht.c17
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c14
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c47
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h11
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00reg.h10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c11
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c102
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c135
-rw-r--r--drivers/net/wireless/rtl818x/Kconfig88
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180.h11
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c115
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c14
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig24
-rw-r--r--drivers/net/wireless/wl12xx/Makefile6
-rw-r--r--drivers/net/wireless/wl12xx/wl1251.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_boot.c3
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_io.h20
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_main.c73
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_ps.c8
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_reg.h7
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_rx.c8
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_sdio.c144
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_spi.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271.h63
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.c179
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.h157
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.c46
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.h10
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.c337
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.h27
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_conf.h488
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_debugfs.c12
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.c69
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.h8
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_init.c57
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_io.c87
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_io.h139
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_main.c1272
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ps.c7
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.c96
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_sdio.c291
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.c315
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.h96
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_testmode.c1
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.c133
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.h9
-rw-r--r--drivers/net/wireless/wl3501.h1
-rw-r--r--drivers/net/wireless/wl3501_cs.c80
-rw-r--r--drivers/net/wireless/zd1201.c12
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c13
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c10
-rw-r--r--drivers/net/xilinx_emaclite.c27
-rw-r--r--drivers/net/yellowfin.c13
-rw-r--r--drivers/net/znet.c2
-rw-r--r--drivers/net/zorro8390.c2
-rw-r--r--drivers/of/device.c25
-rw-r--r--drivers/of/fdt.c15
-rw-r--r--drivers/of/of_i2c.c4
-rw-r--r--drivers/of/of_mdio.c6
-rw-r--r--drivers/of/of_spi.c2
-rw-r--r--drivers/of/platform.c13
-rw-r--r--drivers/oprofile/cpu_buffer.c75
-rw-r--r--drivers/oprofile/oprof.c12
-rw-r--r--drivers/oprofile/oprof.h3
-rw-r--r--drivers/oprofile/timer_int.c78
-rw-r--r--drivers/parport/parport_amiga.c64
-rw-r--r--drivers/parport/parport_cs.c13
-rw-r--r--drivers/parport/parport_sunbpp.c7
-rw-r--r--drivers/pci/Kconfig2
-rw-r--r--drivers/pci/access.c41
-rw-r--r--drivers/pci/dmar.c82
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c5
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c5
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c17
-rw-r--r--drivers/pci/intel-iommu.c151
-rw-r--r--drivers/pci/intr_remapping.c6
-rw-r--r--drivers/pci/pci-sysfs.c89
-rw-r--r--drivers/pci/pci.c6
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c179
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h23
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c77
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c566
-rw-r--r--drivers/pci/probe.c8
-rw-r--r--drivers/pci/quirks.c21
-rw-r--r--drivers/pci/slot.c48
-rw-r--r--drivers/pcmcia/Kconfig25
-rw-r--r--drivers/pcmcia/Makefile10
-rw-r--r--drivers/pcmcia/bfin_cf_pcmcia.c2
-rw-r--r--drivers/pcmcia/cardbus.c1
-rw-r--r--drivers/pcmcia/cistpl.c125
-rw-r--r--drivers/pcmcia/cs.c1
-rw-r--r--drivers/pcmcia/cs_internal.h22
-rw-r--r--drivers/pcmcia/ds.c34
-rw-r--r--drivers/pcmcia/electra_cf.c9
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c7
-rw-r--r--drivers/pcmcia/omap_cf.c2
-rw-r--r--drivers/pcmcia/pcmcia_cis.c356
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c40
-rw-r--r--drivers/pcmcia/pcmcia_resource.c634
-rw-r--r--drivers/pcmcia/pxa2xx_vpac270.c229
-rw-r--r--drivers/pcmcia/rsrc_iodyn.c172
-rw-r--r--drivers/pcmcia/rsrc_mgr.c112
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c164
-rw-r--r--drivers/pcmcia/yenta_socket.c7
-rw-r--r--drivers/platform/x86/Kconfig10
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/classmate-laptop.c170
-rw-r--r--drivers/platform/x86/eeepc-wmi.c2
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c6
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c829
-rw-r--r--drivers/platform/x86/msi-laptop.c163
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c600
-rw-r--r--drivers/platform/x86/wmi.c103
-rw-r--r--drivers/power/Kconfig15
-rw-r--r--drivers/power/Makefile2
-rw-r--r--drivers/power/ds2760_battery.c64
-rw-r--r--drivers/power/ds2782_battery.c194
-rw-r--r--drivers/power/olpc_battery.c2
-rw-r--r--drivers/power/pda_power.c10
-rw-r--r--drivers/power/power_supply.h7
-rw-r--r--drivers/power/power_supply_core.c48
-rw-r--r--drivers/power/power_supply_sysfs.c147
-rw-r--r--drivers/power/test_power.c163
-rw-r--r--drivers/power/tosa_battery.c4
-rw-r--r--drivers/power/wm831x_power.c33
-rw-r--r--drivers/power/wm97xx_battery.c3
-rw-r--r--drivers/power/z2_battery.c328
-rw-r--r--drivers/ps3/ps3-sys-manager.c2
-rw-r--r--drivers/rapidio/Kconfig24
-rw-r--r--drivers/rapidio/Makefile4
-rw-r--r--drivers/rapidio/rio-scan.c424
-rw-r--r--drivers/rapidio/rio-sysfs.c6
-rw-r--r--drivers/rapidio/rio.c433
-rw-r--r--drivers/rapidio/rio.h44
-rw-r--r--drivers/rapidio/switches/Kconfig28
-rw-r--r--drivers/rapidio/switches/Makefile9
-rw-r--r--drivers/rapidio/switches/idtcps.c137
-rw-r--r--drivers/rapidio/switches/tsi500.c20
-rw-r--r--drivers/rapidio/switches/tsi568.c146
-rw-r--r--drivers/rapidio/switches/tsi57x.c315
-rw-r--r--drivers/regulator/88pm8607.c533
-rw-r--r--drivers/regulator/ab3100.c45
-rw-r--r--drivers/regulator/bq24022.c1
-rw-r--r--drivers/regulator/core.c85
-rw-r--r--drivers/regulator/mc13783-regulator.c6
-rw-r--r--drivers/regulator/tps6507x-regulator.c373
-rw-r--r--drivers/regulator/twl-regulator.c138
-rw-r--r--drivers/rtc/Kconfig19
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/rtc-ab3100.c41
-rw-r--r--drivers/rtc/rtc-ab8500.c363
-rw-r--r--drivers/rtc/rtc-cmos.c94
-rw-r--r--drivers/rtc/rtc-davinci.c673
-rw-r--r--drivers/rtc/rtc-ds1302.c85
-rw-r--r--drivers/rtc/rtc-ds1305.c6
-rw-r--r--drivers/rtc/rtc-ds1307.c6
-rw-r--r--drivers/rtc/rtc-ds1511.c10
-rw-r--r--drivers/rtc/rtc-ds1553.c4
-rw-r--r--drivers/rtc/rtc-ds1742.c4
-rw-r--r--drivers/rtc/rtc-isl1208.c45
-rw-r--r--drivers/rtc/rtc-m41t80.c22
-rw-r--r--drivers/rtc/rtc-m48t59.c4
-rw-r--r--drivers/rtc/rtc-mxc.c25
-rw-r--r--drivers/rtc/rtc-rx8581.c6
-rw-r--r--drivers/rtc/rtc-s3c.c107
-rw-r--r--drivers/rtc/rtc-stk17ta8.c8
-rw-r--r--drivers/rtc/rtc-tx4939.c4
-rw-r--r--drivers/rtc/rtc-wm831x.c16
-rw-r--r--drivers/s390/block/dasd.c45
-rw-r--r--drivers/s390/block/dasd_3990_erp.c20
-rw-r--r--drivers/s390/block/dasd_alias.c125
-rw-r--r--drivers/s390/block/dasd_devmap.c174
-rw-r--r--drivers/s390/block/dasd_eckd.c117
-rw-r--r--drivers/s390/block/dasd_eckd.h2
-rw-r--r--drivers/s390/block/dasd_int.h50
-rw-r--r--drivers/s390/char/Kconfig3
-rw-r--r--drivers/s390/char/fs3270.c1
-rw-r--r--drivers/s390/char/keyboard.c21
-rw-r--r--drivers/s390/char/sclp_cpi_sys.c2
-rw-r--r--drivers/s390/char/vmcp.c38
-rw-r--r--drivers/s390/char/zcore.c4
-rw-r--r--drivers/s390/cio/ccwgroup.c7
-rw-r--r--drivers/s390/cio/ccwreq.c15
-rw-r--r--drivers/s390/cio/chp.c5
-rw-r--r--drivers/s390/cio/chsc_sch.c1
-rw-r--r--drivers/s390/cio/cio.c3
-rw-r--r--drivers/s390/cio/css.c8
-rw-r--r--drivers/s390/cio/ioasm.h15
-rw-r--r--drivers/s390/cio/qdio.h15
-rw-r--r--drivers/s390/cio/qdio_main.c67
-rw-r--r--drivers/s390/cio/qdio_setup.c8
-rw-r--r--drivers/s390/cio/qdio_thinint.c4
-rw-r--r--drivers/s390/crypto/zcrypt_api.c2
-rw-r--r--drivers/s390/net/ctcm_main.c6
-rw-r--r--drivers/s390/net/ctcm_mpc.c6
-rw-r--r--drivers/s390/net/lcs.c3
-rw-r--r--drivers/s390/net/qeth_core.h32
-rw-r--r--drivers/s390/net/qeth_core_main.c254
-rw-r--r--drivers/s390/net/qeth_core_mpc.h10
-rw-r--r--drivers/s390/net/qeth_core_sys.c151
-rw-r--r--drivers/s390/net/qeth_l2_main.c47
-rw-r--r--drivers/s390/net/qeth_l3_main.c108
-rw-r--r--drivers/s390/net/qeth_l3_sys.c244
-rw-r--r--drivers/s390/scsi/zfcp_aux.c7
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c1
-rw-r--r--drivers/s390/scsi/zfcp_def.h19
-rw-r--r--drivers/s390/scsi/zfcp_erp.c2
-rw-r--r--drivers/s390/scsi/zfcp_ext.h6
-rw-r--r--drivers/s390/scsi/zfcp_fc.c4
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c246
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h11
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c108
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h104
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c23
-rw-r--r--drivers/sbus/char/bbc_envctrl.c4
-rw-r--r--drivers/sbus/char/bbc_i2c.c11
-rw-r--r--drivers/sbus/char/display7seg.c9
-rw-r--r--drivers/sbus/char/envctrl.c9
-rw-r--r--drivers/sbus/char/flash.c17
-rw-r--r--drivers/sbus/char/openprom.c44
-rw-r--r--drivers/sbus/char/uctrl.c9
-rw-r--r--drivers/scsi/3w-9xxx.c34
-rw-r--r--drivers/scsi/3w-9xxx.h9
-rw-r--r--drivers/scsi/3w-sas.c12
-rw-r--r--drivers/scsi/3w-xxxx.c34
-rw-r--r--drivers/scsi/3w-xxxx.h8
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/a2091.c398
-rw-r--r--drivers/scsi/a2091.h46
-rw-r--r--drivers/scsi/a3000.c391
-rw-r--r--drivers/scsi/a3000.h50
-rw-r--r--drivers/scsi/a4000t.c101
-rw-r--r--drivers/scsi/aacraid/aachba.c67
-rw-r--r--drivers/scsi/aacraid/aacraid.h4
-rw-r--r--drivers/scsi/aacraid/commctrl.c4
-rw-r--r--drivers/scsi/aacraid/commsup.c18
-rw-r--r--drivers/scsi/aacraid/linit.c11
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h29
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c12
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c684
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c5
-rw-r--r--drivers/scsi/bfa/bfa_cb_ioim_macros.h29
-rw-r--r--drivers/scsi/bfa/bfa_core.c22
-rw-r--r--drivers/scsi/bfa/bfa_ioim.c22
-rw-r--r--drivers/scsi/bfa/bfa_os_inc.h23
-rw-r--r--drivers/scsi/bfa/bfad.c21
-rw-r--r--drivers/scsi/bfa/bfad_attr.c201
-rw-r--r--drivers/scsi/bfa/bfad_drv.h4
-rw-r--r--drivers/scsi/bfa/bfad_im.c67
-rw-r--r--drivers/scsi/bfa/bfad_im.h6
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_cee.h2
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_status.h4
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c11
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_init.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c2
-rw-r--r--drivers/scsi/dpt_i2o.c20
-rw-r--r--drivers/scsi/fcoe/fcoe.c214
-rw-r--r--drivers/scsi/fcoe/libfcoe.c111
-rw-r--r--drivers/scsi/fnic/fnic.h4
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c2
-rw-r--r--drivers/scsi/fnic/fnic_main.c2
-rw-r--r--drivers/scsi/gdth.c22
-rw-r--r--drivers/scsi/gvp11.c582
-rw-r--r--drivers/scsi/gvp11.h49
-rw-r--r--drivers/scsi/hpsa.c8
-rw-r--r--drivers/scsi/hpsa_cmd.h15
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c13
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/ipr.c236
-rw-r--r--drivers/scsi/ipr.h31
-rw-r--r--drivers/scsi/iscsi_tcp.c12
-rw-r--r--drivers/scsi/iscsi_tcp.h1
-rw-r--r--drivers/scsi/libfc/fc_disc.c8
-rw-r--r--drivers/scsi/libfc/fc_elsct.c2
-rw-r--r--drivers/scsi/libfc/fc_exch.c51
-rw-r--r--drivers/scsi/libfc/fc_fcp.c103
-rw-r--r--drivers/scsi/libfc/fc_libfc.h8
-rw-r--r--drivers/scsi/libfc/fc_lport.c58
-rw-r--r--drivers/scsi/libfc/fc_npiv.c7
-rw-r--r--drivers/scsi/libfc/fc_rport.c195
-rw-r--r--drivers/scsi/libiscsi_tcp.c2
-rw-r--r--drivers/scsi/libsas/sas_ata.c5
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c18
-rw-r--r--drivers/scsi/lpfc/lpfc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c40
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c498
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c79
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h190
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h60
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c213
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c69
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c99
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c149
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c269
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c4
-rw-r--r--drivers/scsi/megaraid.c20
-rw-r--r--drivers/scsi/megaraid.h3
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c22
-rw-r--r--drivers/scsi/mpt2sas/Kconfig2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_history.txt2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_init.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_tool.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c92
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h36
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c10
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c46
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_debug.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c1055
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c89
-rw-r--r--drivers/scsi/mvme147.c178
-rw-r--r--drivers/scsi/mvme147.h4
-rw-r--r--drivers/scsi/mvsas/mv_64xx.c25
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c10
-rw-r--r--drivers/scsi/mvsas/mv_init.c19
-rw-r--r--drivers/scsi/mvsas/mv_sas.c201
-rw-r--r--drivers/scsi/mvsas/mv_sas.h11
-rw-r--r--drivers/scsi/osst.c23
-rw-r--r--drivers/scsi/pcmcia/aha152x_stub.c9
-rw-r--r--drivers/scsi/pcmcia/fdomain_stub.c9
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c23
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.h1
-rw-r--r--drivers/scsi/pcmcia/qlogic_stub.c13
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c9
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c1
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c4
-rw-r--r--drivers/scsi/pmcraid.c6
-rw-r--r--drivers/scsi/qla2xxx/Makefile3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c804
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c1212
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h135
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c61
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h10
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h343
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h106
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h126
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c724
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h21
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c879
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c540
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c266
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c3636
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h889
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c543
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c149
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h48
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h46
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h8
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c374
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c23
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c337
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c135
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h3
-rw-r--r--drivers/scsi/qlogicpti.c17
-rw-r--r--drivers/scsi/scsi.c6
-rw-r--r--drivers/scsi/scsi_debug.c89
-rw-r--r--drivers/scsi/scsi_error.c19
-rw-r--r--drivers/scsi/scsi_scan.c38
-rw-r--r--drivers/scsi/scsi_sysfs.c7
-rw-r--r--drivers/scsi/scsi_trace.c284
-rw-r--r--drivers/scsi/scsi_transport_fc.c30
-rw-r--r--drivers/scsi/sd.c25
-rw-r--r--drivers/scsi/sg.c17
-rw-r--r--drivers/scsi/st.c1
-rw-r--r--drivers/scsi/sun_esp.c23
-rw-r--r--drivers/scsi/wd33c93.c6
-rw-r--r--drivers/scsi/wd33c93.h1
-rw-r--r--drivers/scsi/zorro7xx.c1
-rw-r--r--drivers/serial/68328serial.c2
-rw-r--r--drivers/serial/8250.c4
-rw-r--r--drivers/serial/Kconfig105
-rw-r--r--drivers/serial/Makefile3
-rw-r--r--drivers/serial/altera_jtaguart.c504
-rw-r--r--drivers/serial/altera_uart.c570
-rw-r--r--drivers/serial/amba-pl011.c6
-rw-r--r--drivers/serial/apbuart.c10
-rw-r--r--drivers/serial/atmel_serial.c207
-rw-r--r--drivers/serial/bfin_sport_uart.c209
-rw-r--r--drivers/serial/bfin_sport_uart.h27
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_core.c9
-rw-r--r--drivers/serial/kgdboc.c94
-rw-r--r--drivers/serial/mpc52xx_uart.c84
-rw-r--r--drivers/serial/mpsc.c1
-rw-r--r--drivers/serial/nwpserial.c2
-rw-r--r--drivers/serial/of_serial.c12
-rw-r--r--drivers/serial/pmac_zilog.c2
-rw-r--r--drivers/serial/s5pv210.c8
-rw-r--r--drivers/serial/serial_cs.c36
-rw-r--r--drivers/serial/sh-sci.c202
-rw-r--r--drivers/serial/sunhv.c9
-rw-r--r--drivers/serial/sunsab.c13
-rw-r--r--drivers/serial/sunsu.c13
-rw-r--r--drivers/serial/sunzilog.c65
-rw-r--r--drivers/serial/timbuart.c25
-rw-r--r--drivers/serial/uartlite.c43
-rw-r--r--drivers/serial/ucc_uart.c10
-rw-r--r--drivers/sfi/sfi_acpi.c41
-rw-r--r--drivers/sfi/sfi_core.c105
-rw-r--r--drivers/sfi/sfi_core.h8
-rw-r--r--drivers/sh/Kconfig24
-rw-r--r--drivers/sh/Makefile2
-rw-r--r--drivers/sh/clk-cpg.c298
-rw-r--r--drivers/sh/clk.c545
-rw-r--r--drivers/sh/intc.c333
-rw-r--r--drivers/spi/Kconfig23
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/amba-pl022.c250
-rw-r--r--drivers/spi/davinci_spi.c12
-rw-r--r--drivers/spi/ep93xx_spi.c938
-rw-r--r--drivers/spi/mpc512x_psc_spi.c576
-rw-r--r--drivers/spi/mpc52xx_psc_spi.c15
-rw-r--r--drivers/spi/mpc52xx_spi.c22
-rw-r--r--drivers/spi/omap2_mcspi.c153
-rw-r--r--drivers/spi/pxa2xx_spi.c25
-rw-r--r--drivers/spi/spi_bitbang_txrx.h93
-rw-r--r--drivers/spi/spi_butterfly.c3
-rw-r--r--drivers/spi/spi_gpio.c3
-rw-r--r--drivers/spi/spi_lm70llp.c3
-rw-r--r--drivers/spi/spi_mpc8xxx.c127
-rw-r--r--drivers/spi/spi_ppc4xx.c2
-rw-r--r--drivers/spi/spi_s3c24xx_gpio.c3
-rw-r--r--drivers/spi/spi_sh_sci.c3
-rw-r--r--drivers/spi/xilinx_spi_of.c10
-rw-r--r--drivers/ssb/driver_chipcommon.c3
-rw-r--r--drivers/ssb/main.c5
-rw-r--r--drivers/ssb/pci.c14
-rw-r--r--drivers/ssb/sprom.c14
-rw-r--r--drivers/staging/Kconfig20
-rw-r--r--drivers/staging/Makefile12
-rw-r--r--drivers/staging/adis16255/Kconfig11
-rw-r--r--drivers/staging/adis16255/Makefile1
-rw-r--r--drivers/staging/adis16255/adis16255.c466
-rw-r--r--drivers/staging/adis16255/adis16255.h12
-rw-r--r--drivers/staging/arlan/Kconfig15
-rw-r--r--drivers/staging/arlan/Makefile3
-rw-r--r--drivers/staging/arlan/TODO7
-rw-r--r--drivers/staging/arlan/arlan-main.c1882
-rw-r--r--drivers/staging/arlan/arlan-proc.c1210
-rw-r--r--drivers/staging/arlan/arlan.h535
-rw-r--r--drivers/staging/asus_oled/asus_oled.c2
-rw-r--r--drivers/staging/batman-adv/CHANGELOG14
-rw-r--r--drivers/staging/batman-adv/Makefile4
-rw-r--r--drivers/staging/batman-adv/README255
-rw-r--r--drivers/staging/batman-adv/TODO23
-rw-r--r--drivers/staging/batman-adv/aggregation.c45
-rw-r--r--drivers/staging/batman-adv/aggregation.h7
-rw-r--r--drivers/staging/batman-adv/bat_sysfs.c484
-rw-r--r--drivers/staging/batman-adv/bat_sysfs.h29
-rw-r--r--drivers/staging/batman-adv/bitarray.c76
-rw-r--r--drivers/staging/batman-adv/bitarray.h2
-rw-r--r--drivers/staging/batman-adv/device.c37
-rw-r--r--drivers/staging/batman-adv/device.h2
-rw-r--r--drivers/staging/batman-adv/hard-interface.c530
-rw-r--r--drivers/staging/batman-adv/hard-interface.h20
-rw-r--r--drivers/staging/batman-adv/hash.c2
-rw-r--r--drivers/staging/batman-adv/hash.h2
-rw-r--r--drivers/staging/batman-adv/main.c60
-rw-r--r--drivers/staging/batman-adv/main.h23
-rw-r--r--drivers/staging/batman-adv/originator.c295
-rw-r--r--drivers/staging/batman-adv/originator.h7
-rw-r--r--drivers/staging/batman-adv/packet.h2
-rw-r--r--drivers/staging/batman-adv/proc.c670
-rw-r--r--drivers/staging/batman-adv/proc.h40
-rw-r--r--drivers/staging/batman-adv/ring_buffer.c2
-rw-r--r--drivers/staging/batman-adv/ring_buffer.h2
-rw-r--r--drivers/staging/batman-adv/routing.c221
-rw-r--r--drivers/staging/batman-adv/routing.h2
-rw-r--r--drivers/staging/batman-adv/send.c149
-rw-r--r--drivers/staging/batman-adv/send.h6
-rw-r--r--drivers/staging/batman-adv/soft-interface.c30
-rw-r--r--drivers/staging/batman-adv/soft-interface.h2
-rw-r--r--drivers/staging/batman-adv/translation-table.c92
-rw-r--r--drivers/staging/batman-adv/translation-table.h8
-rw-r--r--drivers/staging/batman-adv/types.h54
-rw-r--r--drivers/staging/batman-adv/vis.c353
-rw-r--r--drivers/staging/batman-adv/vis.h19
-rw-r--r--drivers/staging/comedi/Kconfig1284
-rw-r--r--drivers/staging/comedi/Makefile1
-rw-r--r--drivers/staging/comedi/comedi.h202
-rw-r--r--drivers/staging/comedi/comedi_compat32.c3
-rw-r--r--drivers/staging/comedi/comedi_fops.c242
-rw-r--r--drivers/staging/comedi/comedi_fops.h1
-rw-r--r--drivers/staging/comedi/comedi_ksyms.c69
-rw-r--r--drivers/staging/comedi/comedidev.h34
-rw-r--r--drivers/staging/comedi/comedilib.h170
-rw-r--r--drivers/staging/comedi/drivers.c112
-rw-r--r--drivers/staging/comedi/drivers/8253.h3
-rw-r--r--drivers/staging/comedi/drivers/8255.c16
-rw-r--r--drivers/staging/comedi/drivers/8255.h22
-rw-r--r--drivers/staging/comedi/drivers/Makefile245
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_amcc_s5933.h6
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.c13
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/amcc_s5933_58.h4
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c6
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.h16
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1032.c4
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c94
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c4
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci2032.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3xxx.c191
-rw-r--r--drivers/staging/comedi/drivers/adl_pci6208.c10
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7230.c206
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9111.c41
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9118.c857
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c27
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1723.c153
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c36
-rw-r--r--drivers/staging/comedi/drivers/aio_aio12_8.c4
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200.c6
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci224.c116
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c132
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c61
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c94
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdas.c2
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdda.c3
-rw-r--r--drivers/staging/comedi/drivers/comedi_bond.c13
-rw-r--r--drivers/staging/comedi/drivers/comedi_parport.c10
-rw-r--r--drivers/staging/comedi/drivers/das08.c156
-rw-r--r--drivers/staging/comedi/drivers/das08.h2
-rw-r--r--drivers/staging/comedi/drivers/das08_cs.c40
-rw-r--r--drivers/staging/comedi/drivers/das16.c159
-rw-r--r--drivers/staging/comedi/drivers/das1800.c10
-rw-r--r--drivers/staging/comedi/drivers/dt2801.c2
-rw-r--r--drivers/staging/comedi/drivers/dt2811.c191
-rw-r--r--drivers/staging/comedi/drivers/dt2814.c38
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c225
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c21
-rw-r--r--drivers/staging/comedi/drivers/icp_multi.h3
-rw-r--r--drivers/staging/comedi/drivers/me_daq.c1
-rw-r--r--drivers/staging/comedi/drivers/mite.c2
-rw-r--r--drivers/staging/comedi/drivers/mite.h2
-rw-r--r--drivers/staging/comedi/drivers/mpc624.c185
-rw-r--r--drivers/staging/comedi/drivers/ni_6527.c7
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c47
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_670x.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_at_ao.c6
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c55
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_dio24.c46
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.c7
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_cs.c46
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_cs.c24
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c25
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c107
-rw-r--r--drivers/staging/comedi/drivers/plx9080.h4
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c88
-rw-r--r--drivers/staging/comedi/drivers/skel.c21
-rw-r--r--drivers/staging/comedi/drivers/ssv_dnp.c86
-rw-r--r--drivers/staging/comedi/drivers/unioxx5.c2
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c13
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c49
-rw-r--r--drivers/staging/comedi/internal.h12
-rw-r--r--drivers/staging/comedi/kcomedilib/Makefile9
-rw-r--r--drivers/staging/comedi/kcomedilib/data.c92
-rw-r--r--drivers/staging/comedi/kcomedilib/dio.c95
-rw-r--r--drivers/staging/comedi/kcomedilib/get.c293
-rw-r--r--drivers/staging/comedi/kcomedilib/kcomedilib_main.c523
-rw-r--r--drivers/staging/comedi/kcomedilib/ksyms.c146
-rw-r--r--drivers/staging/comedi/pci_ids.h31
-rw-r--r--drivers/staging/comedi/proc.c23
-rw-r--r--drivers/staging/comedi/range.c36
-rw-r--r--drivers/staging/comedi/wrapper.h25
-rw-r--r--drivers/staging/crystalhd/TODO1
-rw-r--r--drivers/staging/crystalhd/bc_dts_defs.h72
-rw-r--r--drivers/staging/crystalhd/bc_dts_glob_lnx.h119
-rw-r--r--drivers/staging/crystalhd/bc_dts_types.h25
-rw-r--r--drivers/staging/crystalhd/bcm_70012_regs.h24
-rw-r--r--drivers/staging/crystalhd/crystalhd_cmds.c178
-rw-r--r--drivers/staging/crystalhd/crystalhd_cmds.h21
-rw-r--r--drivers/staging/crystalhd/crystalhd_fw_if.h60
-rw-r--r--drivers/staging/crystalhd/crystalhd_hw.c212
-rw-r--r--drivers/staging/crystalhd/crystalhd_hw.h121
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.c76
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.h10
-rw-r--r--drivers/staging/crystalhd/crystalhd_misc.c100
-rw-r--r--drivers/staging/crystalhd/crystalhd_misc.h55
-rw-r--r--drivers/staging/cx25821/cx25821-alsa.c103
-rw-r--r--drivers/staging/cx25821/cx25821-audio-upstream.c152
-rw-r--r--drivers/staging/cx25821/cx25821-audups11.c84
-rw-r--r--drivers/staging/cx25821/cx25821-cards.c4
-rw-r--r--drivers/staging/cx25821/cx25821-core.c160
-rw-r--r--drivers/staging/cx25821/cx25821-gpio.c25
-rw-r--r--drivers/staging/cx25821/cx25821-i2c.c14
-rw-r--r--drivers/staging/cx25821/cx25821-medusa-video.c260
-rw-r--r--drivers/staging/cx25821/cx25821-video-upstream-ch2.c6
-rw-r--r--drivers/staging/cx25821/cx25821-video-upstream.c149
-rw-r--r--drivers/staging/cx25821/cx25821-video.c145
-rw-r--r--drivers/staging/cx25821/cx25821-video.h78
-rw-r--r--drivers/staging/cx25821/cx25821-video0.c88
-rw-r--r--drivers/staging/cx25821/cx25821-video1.c88
-rw-r--r--drivers/staging/cx25821/cx25821-video2.c88
-rw-r--r--drivers/staging/cx25821/cx25821-video3.c88
-rw-r--r--drivers/staging/cx25821/cx25821-video4.c88
-rw-r--r--drivers/staging/cx25821/cx25821-video5.c88
-rw-r--r--drivers/staging/cx25821/cx25821-video6.c88
-rw-r--r--drivers/staging/cx25821/cx25821-video7.c88
-rw-r--r--drivers/staging/cx25821/cx25821-videoioctl.c84
-rw-r--r--drivers/staging/cx25821/cx25821-vidups10.c84
-rw-r--r--drivers/staging/cx25821/cx25821-vidups9.c84
-rw-r--r--drivers/staging/cxt1e1/Kconfig22
-rw-r--r--drivers/staging/cxt1e1/Makefile19
-rw-r--r--drivers/staging/cxt1e1/comet.c568
-rw-r--r--drivers/staging/cxt1e1/comet.h366
-rw-r--r--drivers/staging/cxt1e1/comet_tables.c561
-rw-r--r--drivers/staging/cxt1e1/comet_tables.h85
-rw-r--r--drivers/staging/cxt1e1/functions.c368
-rw-r--r--drivers/staging/cxt1e1/hwprobe.c402
-rw-r--r--drivers/staging/cxt1e1/libsbew.h581
-rw-r--r--drivers/staging/cxt1e1/linux.c1356
-rw-r--r--drivers/staging/cxt1e1/musycc.c2185
-rw-r--r--drivers/staging/cxt1e1/musycc.h460
-rw-r--r--drivers/staging/cxt1e1/ossiRelease.c39
-rw-r--r--drivers/staging/cxt1e1/pmc93x6_eeprom.c561
-rw-r--r--drivers/staging/cxt1e1/pmc93x6_eeprom.h60
-rw-r--r--drivers/staging/cxt1e1/pmcc4.h155
-rw-r--r--drivers/staging/cxt1e1/pmcc4_cpld.h124
-rw-r--r--drivers/staging/cxt1e1/pmcc4_defs.h82
-rw-r--r--drivers/staging/cxt1e1/pmcc4_drv.c1860
-rw-r--r--drivers/staging/cxt1e1/pmcc4_ioctls.h81
-rw-r--r--drivers/staging/cxt1e1/pmcc4_private.h296
-rw-r--r--drivers/staging/cxt1e1/pmcc4_sysdep.h62
-rw-r--r--drivers/staging/cxt1e1/sbe_bid.h61
-rw-r--r--drivers/staging/cxt1e1/sbe_promformat.h157
-rw-r--r--drivers/staging/cxt1e1/sbecom_inline_linux.h310
-rw-r--r--drivers/staging/cxt1e1/sbecrc.c137
-rw-r--r--drivers/staging/cxt1e1/sbeid.c217
-rw-r--r--drivers/staging/cxt1e1/sbeproc.c358
-rw-r--r--drivers/staging/cxt1e1/sbeproc.h52
-rw-r--r--drivers/staging/cxt1e1/sbew_ioc.h136
-rw-r--r--drivers/staging/dream/Kconfig11
-rw-r--r--drivers/staging/dream/Makefile3
-rw-r--r--drivers/staging/dream/TODO1
-rw-r--r--drivers/staging/dream/camera/msm_vfe8x_proc.c2
-rw-r--r--drivers/staging/dream/pmem.c27
-rw-r--r--drivers/staging/dream/qdsp5/audio_out.c9
-rw-r--r--drivers/staging/dream/smd/Kconfig26
-rw-r--r--drivers/staging/dream/smd/Makefile7
-rw-r--r--drivers/staging/dream/smd/rpc_server_dog_keepalive.c68
-rw-r--r--drivers/staging/dream/smd/rpc_server_time_remote.c77
-rw-r--r--drivers/staging/dream/smd/smd.c1330
-rw-r--r--drivers/staging/dream/smd/smd_private.h171
-rw-r--r--drivers/staging/dream/smd/smd_qmi.c855
-rw-r--r--drivers/staging/dream/smd/smd_rpcrouter.c1261
-rw-r--r--drivers/staging/dream/smd/smd_rpcrouter.h193
-rw-r--r--drivers/staging/dream/smd/smd_rpcrouter_device.c377
-rw-r--r--drivers/staging/dream/smd/smd_rpcrouter_servers.c230
-rw-r--r--drivers/staging/dream/smd/smd_tty.c208
-rw-r--r--drivers/staging/dream/synaptics_i2c_rmi.c28
-rw-r--r--drivers/staging/dt3155/allocator.c16
-rw-r--r--drivers/staging/dt3155/allocator.h4
-rw-r--r--drivers/staging/dt3155/dt3155.h44
-rw-r--r--drivers/staging/dt3155/dt3155_drv.c380
-rw-r--r--drivers/staging/dt3155/dt3155_io.c24
-rw-r--r--drivers/staging/dt3155/dt3155_isr.c297
-rw-r--r--drivers/staging/dt3155/dt3155_isr.h2
-rw-r--r--drivers/staging/dt3155v4l/Kconfig20
-rw-r--r--drivers/staging/dt3155v4l/Makefile1
-rw-r--r--drivers/staging/dt3155v4l/dt3155v4l.c1200
-rw-r--r--drivers/staging/dt3155v4l/dt3155v4l.h224
-rw-r--r--drivers/staging/echo/echo.c2
-rw-r--r--drivers/staging/et131x/et1310_address_map.h7
-rw-r--r--drivers/staging/et131x/et1310_eeprom.c8
-rw-r--r--drivers/staging/et131x/et1310_phy.c2
-rw-r--r--drivers/staging/et131x/et1310_rx.c55
-rw-r--r--drivers/staging/et131x/et1310_rx.h5
-rw-r--r--drivers/staging/et131x/et1310_tx.c2
-rw-r--r--drivers/staging/et131x/et131x_initpci.c12
-rw-r--r--drivers/staging/et131x/et131x_isr.c6
-rw-r--r--drivers/staging/et131x/et131x_netdev.c20
-rw-r--r--drivers/staging/frontier/alphatrack.c34
-rw-r--r--drivers/staging/frontier/tranzport.c36
-rw-r--r--drivers/staging/go7007/go7007-fw.c12
-rw-r--r--drivers/staging/go7007/go7007-usb.c3
-rw-r--r--drivers/staging/go7007/go7007-v4l2.c4
-rw-r--r--drivers/staging/go7007/saa7134-go7007.c11
-rw-r--r--drivers/staging/go7007/wis-saa7113.c1
-rw-r--r--drivers/staging/go7007/wis-saa7115.c1
-rw-r--r--drivers/staging/go7007/wis-tw9903.c1
-rw-r--r--drivers/staging/hv/Kconfig8
-rw-r--r--drivers/staging/hv/Makefile11
-rw-r--r--drivers/staging/hv/TODO3
-rw-r--r--drivers/staging/hv/blkvsc.c (renamed from drivers/staging/hv/BlkVsc.c)4
-rw-r--r--drivers/staging/hv/blkvsc_drv.c45
-rw-r--r--drivers/staging/hv/channel.c (renamed from drivers/staging/hv/Channel.c)195
-rw-r--r--drivers/staging/hv/channel.h (renamed from drivers/staging/hv/Channel.h)2
-rw-r--r--drivers/staging/hv/channel_interface.c (renamed from drivers/staging/hv/ChannelInterface.c)2
-rw-r--r--drivers/staging/hv/channel_interface.h (renamed from drivers/staging/hv/ChannelInterface.h)2
-rw-r--r--drivers/staging/hv/channel_mgmt.c (renamed from drivers/staging/hv/ChannelMgmt.c)228
-rw-r--r--drivers/staging/hv/channel_mgmt.h (renamed from drivers/staging/hv/ChannelMgmt.h)6
-rw-r--r--drivers/staging/hv/connection.c (renamed from drivers/staging/hv/Connection.c)31
-rw-r--r--drivers/staging/hv/hv.c (renamed from drivers/staging/hv/Hv.c)28
-rw-r--r--drivers/staging/hv/hv.h (renamed from drivers/staging/hv/Hv.h)0
-rw-r--r--drivers/staging/hv/hv_utils.c295
-rw-r--r--drivers/staging/hv/logging.h7
-rw-r--r--drivers/staging/hv/netvsc.c (renamed from drivers/staging/hv/NetVsc.c)104
-rw-r--r--drivers/staging/hv/netvsc.h (renamed from drivers/staging/hv/NetVsc.h)7
-rw-r--r--drivers/staging/hv/netvsc_api.h (renamed from drivers/staging/hv/NetVscApi.h)9
-rw-r--r--drivers/staging/hv/netvsc_drv.c243
-rw-r--r--drivers/staging/hv/osd.c70
-rw-r--r--drivers/staging/hv/ring_buffer.c (renamed from drivers/staging/hv/RingBuffer.c)16
-rw-r--r--drivers/staging/hv/ring_buffer.h (renamed from drivers/staging/hv/RingBuffer.h)0
-rw-r--r--drivers/staging/hv/rndis.h2
-rw-r--r--drivers/staging/hv/rndis_filter.c (renamed from drivers/staging/hv/RndisFilter.c)56
-rw-r--r--drivers/staging/hv/rndis_filter.h (renamed from drivers/staging/hv/RndisFilter.h)2
-rw-r--r--drivers/staging/hv/storvsc.c (renamed from drivers/staging/hv/StorVsc.c)54
-rw-r--r--drivers/staging/hv/storvsc_api.h (renamed from drivers/staging/hv/StorVscApi.h)2
-rw-r--r--drivers/staging/hv/storvsc_drv.c54
-rw-r--r--drivers/staging/hv/utils.h119
-rw-r--r--drivers/staging/hv/version_info.h (renamed from drivers/staging/hv/VersionInfo.h)7
-rw-r--r--drivers/staging/hv/vmbus.c (renamed from drivers/staging/hv/Vmbus.c)33
-rw-r--r--drivers/staging/hv/vmbus.h2
-rw-r--r--drivers/staging/hv/vmbus_api.h (renamed from drivers/staging/hv/VmbusApi.h)18
-rw-r--r--drivers/staging/hv/vmbus_channel_interface.h (renamed from drivers/staging/hv/VmbusChannelInterface.h)0
-rw-r--r--drivers/staging/hv/vmbus_drv.c96
-rw-r--r--drivers/staging/hv/vmbus_packet_format.h (renamed from drivers/staging/hv/VmbusPacketFormat.h)1
-rw-r--r--drivers/staging/hv/vmbus_private.h (renamed from drivers/staging/hv/VmbusPrivate.h)12
-rw-r--r--drivers/staging/hv/vstorage.h2
-rw-r--r--drivers/staging/iio/Documentation/iio_utils.h266
-rw-r--r--drivers/staging/iio/Documentation/lis3l02dqbuffersimple.c237
-rw-r--r--drivers/staging/iio/Documentation/sysfs-class-iio294
-rw-r--r--drivers/staging/iio/Documentation/userspace.txt2
-rw-r--r--drivers/staging/iio/Kconfig2
-rw-r--r--drivers/staging/iio/Makefile2
-rw-r--r--drivers/staging/iio/accel/Kconfig25
-rw-r--r--drivers/staging/iio/accel/Makefile11
-rw-r--r--drivers/staging/iio/accel/accel.h12
-rw-r--r--drivers/staging/iio/accel/adis16209.h193
-rw-r--r--drivers/staging/iio/accel/adis16209_core.c615
-rw-r--r--drivers/staging/iio/accel/adis16209_ring.c266
-rw-r--r--drivers/staging/iio/accel/adis16209_trigger.c124
-rw-r--r--drivers/staging/iio/accel/adis16220.h147
-rw-r--r--drivers/staging/iio/accel/adis16220_core.c670
-rw-r--r--drivers/staging/iio/accel/adis16240.h218
-rw-r--r--drivers/staging/iio/accel/adis16240_core.c599
-rw-r--r--drivers/staging/iio/accel/adis16240_ring.c254
-rw-r--r--drivers/staging/iio/accel/adis16240_trigger.c124
-rw-r--r--drivers/staging/iio/accel/inclinometer.h23
-rw-r--r--drivers/staging/iio/accel/kxsd9.c88
-rw-r--r--drivers/staging/iio/accel/lis3l02dq.h4
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_core.c173
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_ring.c24
-rw-r--r--drivers/staging/iio/accel/sca3000.h2
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c178
-rw-r--r--drivers/staging/iio/accel/sca3000_ring.c35
-rw-r--r--drivers/staging/iio/adc/Kconfig11
-rw-r--r--drivers/staging/iio/adc/Makefile2
-rw-r--r--drivers/staging/iio/adc/adc.h15
-rw-r--r--drivers/staging/iio/adc/max1363.h122
-rw-r--r--drivers/staging/iio/adc/max1363_core.c1107
-rw-r--r--drivers/staging/iio/adc/max1363_ring.c82
-rw-r--r--drivers/staging/iio/chrdev.h2
-rw-r--r--drivers/staging/iio/gyro/Kconfig13
-rw-r--r--drivers/staging/iio/gyro/Makefile7
-rw-r--r--drivers/staging/iio/gyro/adis16260.h175
-rw-r--r--drivers/staging/iio/gyro/adis16260_core.c661
-rw-r--r--drivers/staging/iio/gyro/adis16260_ring.c256
-rw-r--r--drivers/staging/iio/gyro/adis16260_trigger.c124
-rw-r--r--drivers/staging/iio/gyro/gyro.h43
-rw-r--r--drivers/staging/iio/iio.h47
-rw-r--r--drivers/staging/iio/imu/Kconfig33
-rw-r--r--drivers/staging/iio/imu/Makefile14
-rw-r--r--drivers/staging/iio/imu/adis16300.h194
-rw-r--r--drivers/staging/iio/imu/adis16300_core.c768
-rw-r--r--drivers/staging/iio/imu/adis16300_ring.c233
-rw-r--r--drivers/staging/iio/imu/adis16300_trigger.c127
-rw-r--r--drivers/staging/iio/imu/adis16350.h193
-rw-r--r--drivers/staging/iio/imu/adis16350_core.c736
-rw-r--r--drivers/staging/iio/imu/adis16350_ring.c286
-rw-r--r--drivers/staging/iio/imu/adis16350_trigger.c127
-rw-r--r--drivers/staging/iio/imu/adis16400.h229
-rw-r--r--drivers/staging/iio/imu/adis16400_core.c800
-rw-r--r--drivers/staging/iio/imu/adis16400_ring.c245
-rw-r--r--drivers/staging/iio/imu/adis16400_trigger.c127
-rw-r--r--drivers/staging/iio/industrialio-core.c60
-rw-r--r--drivers/staging/iio/industrialio-ring.c75
-rw-r--r--drivers/staging/iio/industrialio-trigger.c20
-rw-r--r--drivers/staging/iio/light/tsl2563.c18
-rw-r--r--drivers/staging/iio/magnetometer/magnet.h31
-rw-r--r--drivers/staging/iio/ring_generic.h50
-rw-r--r--drivers/staging/iio/ring_sw.c50
-rw-r--r--drivers/staging/iio/sysfs.h15
-rw-r--r--drivers/staging/iio/trigger/iio-trig-gpio.c131
-rw-r--r--drivers/staging/iio/trigger/iio-trig-periodic-rtc.c4
-rw-r--r--drivers/staging/line6/control.h166
-rw-r--r--drivers/staging/line6/driver.c10
-rw-r--r--drivers/staging/line6/dumprequest.c3
-rw-r--r--drivers/staging/line6/pod.c6
-rw-r--r--drivers/staging/line6/variax.c5
-rw-r--r--drivers/staging/memrar/Kconfig15
-rw-r--r--drivers/staging/memrar/Makefile2
-rw-r--r--drivers/staging/memrar/TODO43
-rw-r--r--drivers/staging/memrar/memrar-abi89
-rw-r--r--drivers/staging/memrar/memrar.h155
-rw-r--r--drivers/staging/memrar/memrar_allocator.c432
-rw-r--r--drivers/staging/memrar/memrar_allocator.h149
-rw-r--r--drivers/staging/memrar/memrar_handler.c996
-rw-r--r--drivers/staging/netwave/Kconfig11
-rw-r--r--drivers/staging/netwave/Makefile1
-rw-r--r--drivers/staging/netwave/TODO7
-rw-r--r--drivers/staging/netwave/netwave_cs.c1369
-rw-r--r--drivers/staging/otus/80211core/cagg.c34
-rw-r--r--drivers/staging/otus/80211core/ccmd.c3
-rw-r--r--drivers/staging/otus/80211core/cfunc.c3
-rw-r--r--drivers/staging/otus/80211core/cic.c9
-rw-r--r--drivers/staging/otus/80211core/cinit.c3
-rw-r--r--drivers/staging/otus/80211core/cmm.c144
-rw-r--r--drivers/staging/otus/80211core/cmmap.c103
-rw-r--r--drivers/staging/otus/80211core/cmmsta.c123
-rw-r--r--drivers/staging/otus/80211core/coid.c3
-rw-r--r--drivers/staging/otus/80211core/cpsmgr.c3
-rw-r--r--drivers/staging/otus/80211core/ctxrx.c81
-rw-r--r--drivers/staging/otus/80211core/queue.c5
-rw-r--r--drivers/staging/otus/80211core/ratectrl.c3
-rw-r--r--drivers/staging/otus/hal/hpani.c33
-rw-r--r--drivers/staging/otus/hal/hpani.h7
-rw-r--r--drivers/staging/otus/hal/hpfw2.c2
-rw-r--r--drivers/staging/otus/hal/hpfwu.c2
-rw-r--r--drivers/staging/otus/hal/hpfwu_2k.c2
-rw-r--r--drivers/staging/otus/hal/hpfwu_BA.c2
-rw-r--r--drivers/staging/otus/hal/hpfwu_OTUS_RC.c2
-rw-r--r--drivers/staging/otus/hal/hpfwuinit.c2
-rw-r--r--drivers/staging/otus/hal/hpmain.c30
-rw-r--r--drivers/staging/otus/hal/hpreg.c1586
-rw-r--r--drivers/staging/otus/ioctl.c37
-rw-r--r--drivers/staging/otus/usbdrv.c6
-rw-r--r--drivers/staging/otus/wwrap.c1
-rw-r--r--drivers/staging/otus/zdusb.c6
-rw-r--r--drivers/staging/panel/panel.c5
-rw-r--r--drivers/staging/phison/phison.c4
-rw-r--r--drivers/staging/poch/Kconfig6
-rw-r--r--drivers/staging/poch/Makefile1
-rw-r--r--drivers/staging/poch/README136
-rw-r--r--drivers/staging/poch/poch.c1443
-rw-r--r--drivers/staging/poch/poch.h35
-rw-r--r--drivers/staging/pohmelfs/config.c41
-rw-r--r--drivers/staging/pohmelfs/crypto.c38
-rw-r--r--drivers/staging/pohmelfs/dir.c14
-rw-r--r--drivers/staging/pohmelfs/inode.c74
-rw-r--r--drivers/staging/pohmelfs/net.c38
-rw-r--r--drivers/staging/pohmelfs/netfs.h17
-rw-r--r--drivers/staging/quatech_usb2/quatech_usb2.c12
-rw-r--r--drivers/staging/ramzswap/TODO5
-rw-r--r--drivers/staging/ramzswap/ramzswap_drv.c667
-rw-r--r--drivers/staging/ramzswap/ramzswap_drv.h51
-rw-r--r--drivers/staging/ramzswap/ramzswap_ioctl.h14
-rw-r--r--drivers/staging/rar_register/Kconfig28
-rw-r--r--drivers/staging/rar_register/rar_register.c610
-rw-r--r--drivers/staging/rar_register/rar_register.h62
-rw-r--r--drivers/staging/rt2860/chip/mac_pci.h41
-rw-r--r--drivers/staging/rt2860/chip/mac_usb.h55
-rw-r--r--drivers/staging/rt2860/chip/rtmp_mac.h52
-rw-r--r--drivers/staging/rt2860/chip/rtmp_phy.h338
-rw-r--r--drivers/staging/rt2860/chips/rt3070.c4
-rw-r--r--drivers/staging/rt2860/chips/rt3090.c4
-rw-r--r--drivers/staging/rt2860/chips/rt30xx.c9
-rw-r--r--drivers/staging/rt2860/common/cmm_aes.c2
-rw-r--r--drivers/staging/rt2860/common/cmm_data.c8
-rw-r--r--drivers/staging/rt2860/common/cmm_mac_pci.c2
-rw-r--r--drivers/staging/rt2860/common/cmm_mac_usb.c2
-rw-r--r--drivers/staging/rt2860/common/cmm_wpa.c18
-rw-r--r--drivers/staging/rt2860/common/rtmp_init.c15
-rw-r--r--drivers/staging/rt2860/common/spectrum.c7
-rw-r--r--drivers/staging/rt2860/iface/rtmp_usb.h4
-rw-r--r--drivers/staging/rt2860/mlme.h2
-rw-r--r--drivers/staging/rt2860/pci_main_dev.c37
-rw-r--r--drivers/staging/rt2860/rt_linux.c51
-rw-r--r--drivers/staging/rt2860/rt_linux.h14
-rw-r--r--drivers/staging/rt2860/rt_main_dev.c20
-rw-r--r--drivers/staging/rt2860/rt_pci_rbus.c24
-rw-r--r--drivers/staging/rt2860/rt_usb.c20
-rw-r--r--drivers/staging/rt2860/rtmp.h412
-rw-r--r--drivers/staging/rt2860/sta/assoc.c1
-rw-r--r--drivers/staging/rt2860/sta_ioctl.c15
-rw-r--r--drivers/staging/rt2860/usb_main_dev.c5
-rw-r--r--drivers/staging/rt2870/Kconfig2
-rw-r--r--drivers/staging/rt2870/common/rtusb_bulk.c42
-rw-r--r--drivers/staging/rt2870/common/rtusb_data.c8
-rw-r--r--drivers/staging/rt2870/common/rtusb_io.c39
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c6
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c3
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c3
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c3
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_module.c7
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c4
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c20
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c4
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c6
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c2076
-rw-r--r--drivers/staging/rtl8187se/r8180_rtl8225z2.c129
-rw-r--r--drivers/staging/rtl8192e/Makefile1
-rw-r--r--drivers/staging/rtl8192e/ieee80211.h2
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211.h2
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c6
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_ccmp.c3
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_tkip.c3
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_wep.c3
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_module.c9
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c17
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c6
-rw-r--r--drivers/staging/rtl8192e/ieee80211/rtl819x_Qos.h168
-rw-r--r--drivers/staging/rtl8192e/ieee80211/rtl819x_TSProc.c38
-rw-r--r--drivers/staging/rtl8192e/r8190_rtl8256.c4
-rw-r--r--drivers/staging/rtl8192e/r8192E_core.c14
-rw-r--r--drivers/staging/rtl8192e/r8192_pm.c6
-rw-r--r--drivers/staging/rtl8192e/r8192_pm.h4
-rw-r--r--drivers/staging/rtl8192su/Kconfig1
-rw-r--r--drivers/staging/rtl8192su/Makefile2
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211.h2
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_crypt.c6
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_ccmp.c3
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_tkip.c3
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_wep.c3
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_module.c9
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_r8192s.h24
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c17
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_wx.c6
-rw-r--r--drivers/staging/rtl8192su/ieee80211/rtl819x_Qos.h1
-rw-r--r--drivers/staging/rtl8192su/ieee80211/rtl819x_TSProc.c12
-rw-r--r--drivers/staging/rtl8192su/r8180_93cx6.c146
-rw-r--r--drivers/staging/rtl8192su/r8180_93cx6.h40
-rw-r--r--drivers/staging/rtl8192su/r8192SU_led.c2347
-rw-r--r--drivers/staging/rtl8192su/r8192SU_led.h93
-rw-r--r--drivers/staging/rtl8192su/r8192S_firmware.c503
-rw-r--r--drivers/staging/rtl8192su/r8192S_phy.c194
-rw-r--r--drivers/staging/rtl8192su/r8192U.h41
-rw-r--r--drivers/staging/rtl8192su/r8192U_core.c626
-rw-r--r--drivers/staging/rtl8192su/r819xU_cmdpkt.c663
-rw-r--r--drivers/staging/rtl8192su/r819xU_cmdpkt.h366
-rw-r--r--drivers/staging/rtl8192u/dot11d.h52
-rw-r--r--drivers/staging/rtl8192u/ieee80211.h548
-rw-r--r--drivers/staging/rtl8192u/ieee80211/api.c4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c6
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_module.c9
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c20
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c7
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c9
-rw-r--r--drivers/staging/rtl8192u/ieee80211_crypt.h2
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c12
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c1
-rw-r--r--drivers/staging/sep/sep_driver.c1
-rw-r--r--drivers/staging/serqt_usb2/serqt_usb2.c26
-rw-r--r--drivers/staging/slicoss/slicoss.c6
-rw-r--r--drivers/staging/sm7xx/smtcfb.c18
-rw-r--r--drivers/staging/sm7xx/smtcfb.h2
-rw-r--r--drivers/staging/strip/Kconfig22
-rw-r--r--drivers/staging/strip/Makefile1
-rw-r--r--drivers/staging/strip/TODO7
-rw-r--r--drivers/staging/strip/strip.c2823
-rw-r--r--drivers/staging/ti-st/Kconfig25
-rw-r--r--drivers/staging/ti-st/Makefile7
-rw-r--r--drivers/staging/ti-st/TODO19
-rw-r--r--drivers/staging/ti-st/bt_drv.c502
-rw-r--r--drivers/staging/ti-st/bt_drv.h61
-rw-r--r--drivers/staging/ti-st/fm.h13
-rw-r--r--drivers/staging/ti-st/st.h90
-rw-r--r--drivers/staging/ti-st/st_core.c1062
-rw-r--r--drivers/staging/ti-st/st_core.h98
-rw-r--r--drivers/staging/ti-st/st_kim.c754
-rw-r--r--drivers/staging/ti-st/st_kim.h150
-rw-r--r--drivers/staging/ti-st/st_ll.c147
-rw-r--r--drivers/staging/ti-st/st_ll.h62
-rw-r--r--drivers/staging/ti-st/sysfs-uim16
-rw-r--r--drivers/staging/tm6000/Kconfig32
-rw-r--r--drivers/staging/tm6000/Makefile17
-rw-r--r--drivers/staging/tm6000/README22
-rw-r--r--drivers/staging/tm6000/tm6000-alsa.c414
-rw-r--r--drivers/staging/tm6000/tm6000-cards.c973
-rw-r--r--drivers/staging/tm6000/tm6000-core.c602
-rw-r--r--drivers/staging/tm6000/tm6000-dvb.c346
-rw-r--r--drivers/staging/tm6000/tm6000-i2c.c370
-rw-r--r--drivers/staging/tm6000/tm6000-regs.h541
-rw-r--r--drivers/staging/tm6000/tm6000-stds.c873
-rw-r--r--drivers/staging/tm6000/tm6000-usb-isoc.h53
-rw-r--r--drivers/staging/tm6000/tm6000-video.c1557
-rw-r--r--drivers/staging/tm6000/tm6000.h301
-rw-r--r--drivers/staging/udlfb/udlfb.c73
-rw-r--r--drivers/staging/usbip/stub_rx.c6
-rw-r--r--drivers/staging/usbip/usbip_common.c2
-rw-r--r--drivers/staging/usbip/usbip_common.h2
-rw-r--r--drivers/staging/usbip/vhci.h2
-rw-r--r--drivers/staging/usbip/vhci_hcd.c2
-rw-r--r--drivers/staging/usbip/vhci_tx.c2
-rw-r--r--drivers/staging/vme/boards/vme_vmivme7805.c1
-rw-r--r--drivers/staging/vme/bridges/vme_ca91cx42.c115
-rw-r--r--drivers/staging/vme/bridges/vme_tsi148.c396
-rw-r--r--drivers/staging/vme/bridges/vme_tsi148.h26
-rw-r--r--drivers/staging/vme/devices/vme_user.c18
-rw-r--r--drivers/staging/vme/vme.c4
-rw-r--r--drivers/staging/vt6655/80211hdr.h46
-rw-r--r--drivers/staging/vt6655/80211mgr.c88
-rw-r--r--drivers/staging/vt6655/80211mgr.h88
-rw-r--r--drivers/staging/vt6655/IEEE11h.c6
-rw-r--r--drivers/staging/vt6655/IEEE11h.h2
-rw-r--r--drivers/staging/vt6655/aes_ccmp.c10
-rw-r--r--drivers/staging/vt6655/baseband.c64
-rw-r--r--drivers/staging/vt6655/baseband.h52
-rw-r--r--drivers/staging/vt6655/bssdb.c214
-rw-r--r--drivers/staging/vt6655/bssdb.h148
-rw-r--r--drivers/staging/vt6655/card.c214
-rw-r--r--drivers/staging/vt6655/card.h158
-rw-r--r--drivers/staging/vt6655/datarate.c52
-rw-r--r--drivers/staging/vt6655/datarate.h36
-rw-r--r--drivers/staging/vt6655/desc.h12
-rw-r--r--drivers/staging/vt6655/device.h28
-rw-r--r--drivers/staging/vt6655/device_main.c99
-rw-r--r--drivers/staging/vt6655/dpc.c184
-rw-r--r--drivers/staging/vt6655/dpc.h6
-rw-r--r--drivers/staging/vt6655/hostap.c13
-rw-r--r--drivers/staging/vt6655/hostap.h4
-rw-r--r--drivers/staging/vt6655/ioctl.c32
-rw-r--r--drivers/staging/vt6655/ioctl.h10
-rw-r--r--drivers/staging/vt6655/iwctl.c31
-rw-r--r--drivers/staging/vt6655/key.c36
-rw-r--r--drivers/staging/vt6655/key.h30
-rw-r--r--drivers/staging/vt6655/mac.c8
-rw-r--r--drivers/staging/vt6655/mac.h52
-rw-r--r--drivers/staging/vt6655/mib.c6
-rw-r--r--drivers/staging/vt6655/mib.h2
-rw-r--r--drivers/staging/vt6655/michael.c24
-rw-r--r--drivers/staging/vt6655/michael.h8
-rw-r--r--drivers/staging/vt6655/power.c26
-rw-r--r--drivers/staging/vt6655/power.h28
-rw-r--r--drivers/staging/vt6655/rc4.c4
-rw-r--r--drivers/staging/vt6655/rc4.h2
-rw-r--r--drivers/staging/vt6655/rf.c22
-rw-r--r--drivers/staging/vt6655/rf.h14
-rw-r--r--drivers/staging/vt6655/rxtx.c524
-rw-r--r--drivers/staging/vt6655/rxtx.h84
-rw-r--r--drivers/staging/vt6655/srom.c54
-rw-r--r--drivers/staging/vt6655/srom.h2
-rw-r--r--drivers/staging/vt6655/tether.c2
-rw-r--r--drivers/staging/vt6655/tether.h31
-rw-r--r--drivers/staging/vt6655/tkip.c2
-rw-r--r--drivers/staging/vt6655/tkip.h2
-rw-r--r--drivers/staging/vt6655/ttype.h22
-rw-r--r--drivers/staging/vt6655/vntwifi.c142
-rw-r--r--drivers/staging/vt6655/vntwifi.h142
-rw-r--r--drivers/staging/vt6655/wcmd.c104
-rw-r--r--drivers/staging/vt6655/wcmd.h26
-rw-r--r--drivers/staging/vt6655/wctl.c4
-rw-r--r--drivers/staging/vt6655/wmgr.c672
-rw-r--r--drivers/staging/vt6655/wmgr.h96
-rw-r--r--drivers/staging/vt6655/wpa.c14
-rw-r--r--drivers/staging/vt6655/wpa.h14
-rw-r--r--drivers/staging/vt6655/wpa2.c16
-rw-r--r--drivers/staging/vt6655/wpa2.h14
-rw-r--r--drivers/staging/vt6655/wpactl.c19
-rw-r--r--drivers/staging/vt6655/wroute.c6
-rw-r--r--drivers/staging/vt6656/80211hdr.h83
-rw-r--r--drivers/staging/vt6656/80211mgr.c88
-rw-r--r--drivers/staging/vt6656/80211mgr.h136
-rw-r--r--drivers/staging/vt6656/aes_ccmp.c564
-rw-r--r--drivers/staging/vt6656/aes_ccmp.h2
-rw-r--r--drivers/staging/vt6656/baseband.c114
-rw-r--r--drivers/staging/vt6656/baseband.h59
-rw-r--r--drivers/staging/vt6656/bssdb.c402
-rw-r--r--drivers/staging/vt6656/bssdb.h252
-rw-r--r--drivers/staging/vt6656/card.c68
-rw-r--r--drivers/staging/vt6656/card.h62
-rw-r--r--drivers/staging/vt6656/channel.c12
-rw-r--r--drivers/staging/vt6656/channel.h10
-rw-r--r--drivers/staging/vt6656/control.c87
-rw-r--r--drivers/staging/vt6656/control.h31
-rw-r--r--drivers/staging/vt6656/datarate.c71
-rw-r--r--drivers/staging/vt6656/datarate.h39
-rw-r--r--drivers/staging/vt6656/desc.h12
-rw-r--r--drivers/staging/vt6656/device.h218
-rw-r--r--drivers/staging/vt6656/dpc.c305
-rw-r--r--drivers/staging/vt6656/dpc.h27
-rw-r--r--drivers/staging/vt6656/firmware.c6
-rw-r--r--drivers/staging/vt6656/firmware.h9
-rw-r--r--drivers/staging/vt6656/hostap.c27
-rw-r--r--drivers/staging/vt6656/hostap.h9
-rw-r--r--drivers/staging/vt6656/int.c251
-rw-r--r--drivers/staging/vt6656/int.h12
-rw-r--r--drivers/staging/vt6656/iocmd.h353
-rw-r--r--drivers/staging/vt6656/ioctl.c47
-rw-r--r--drivers/staging/vt6656/ioctl.h15
-rw-r--r--drivers/staging/vt6656/iowpa.h4
-rw-r--r--drivers/staging/vt6656/iwctl.c52
-rw-r--r--drivers/staging/vt6656/iwctl.h5
-rw-r--r--drivers/staging/vt6656/key.c87
-rw-r--r--drivers/staging/vt6656/key.h62
-rw-r--r--drivers/staging/vt6656/mac.c14
-rw-r--r--drivers/staging/vt6656/mac.h9
-rw-r--r--drivers/staging/vt6656/main_usb.c229
-rw-r--r--drivers/staging/vt6656/mib.c115
-rw-r--r--drivers/staging/vt6656/mib.h192
-rw-r--r--drivers/staging/vt6656/michael.c181
-rw-r--r--drivers/staging/vt6656/michael.h12
-rw-r--r--drivers/staging/vt6656/power.c58
-rw-r--r--drivers/staging/vt6656/power.h45
-rw-r--r--drivers/staging/vt6656/rc4.c84
-rw-r--r--drivers/staging/vt6656/rc4.h13
-rw-r--r--drivers/staging/vt6656/rf.c32
-rw-r--r--drivers/staging/vt6656/rf.h33
-rw-r--r--drivers/staging/vt6656/rndis.h3
-rw-r--r--drivers/staging/vt6656/rxtx.c874
-rw-r--r--drivers/staging/vt6656/rxtx.h46
-rw-r--r--drivers/staging/vt6656/srom.h2
-rw-r--r--drivers/staging/vt6656/tcrc.c23
-rw-r--r--drivers/staging/vt6656/tcrc.h11
-rw-r--r--drivers/staging/vt6656/tether.c45
-rw-r--r--drivers/staging/vt6656/tether.h33
-rw-r--r--drivers/staging/vt6656/tkip.c2
-rw-r--r--drivers/staging/vt6656/tkip.h7
-rw-r--r--drivers/staging/vt6656/tmacro.h4
-rw-r--r--drivers/staging/vt6656/ttype.h57
-rw-r--r--drivers/staging/vt6656/upc.h8
-rw-r--r--drivers/staging/vt6656/usbpipe.c138
-rw-r--r--drivers/staging/vt6656/usbpipe.h51
-rw-r--r--drivers/staging/vt6656/wcmd.c172
-rw-r--r--drivers/staging/vt6656/wcmd.h36
-rw-r--r--drivers/staging/vt6656/wctl.c23
-rw-r--r--drivers/staging/vt6656/wctl.h13
-rw-r--r--drivers/staging/vt6656/wmgr.c960
-rw-r--r--drivers/staging/vt6656/wmgr.h180
-rw-r--r--drivers/staging/vt6656/wpa.c14
-rw-r--r--drivers/staging/vt6656/wpa.h16
-rw-r--r--drivers/staging/vt6656/wpa2.c50
-rw-r--r--drivers/staging/vt6656/wpa2.h20
-rw-r--r--drivers/staging/vt6656/wpactl.c25
-rw-r--r--drivers/staging/vt6656/wpactl.h9
-rw-r--r--drivers/staging/wavelan/Kconfig38
-rw-r--r--drivers/staging/wavelan/Makefile2
-rw-r--r--drivers/staging/wavelan/TODO7
-rw-r--r--drivers/staging/wavelan/i82586.h413
-rw-r--r--drivers/staging/wavelan/wavelan.c4383
-rw-r--r--drivers/staging/wavelan/wavelan.h370
-rw-r--r--drivers/staging/wavelan/wavelan.p.h696
-rw-r--r--drivers/staging/wavelan/wavelan_cs.c4610
-rw-r--r--drivers/staging/wavelan/wavelan_cs.h386
-rw-r--r--drivers/staging/wavelan/wavelan_cs.p.h766
-rw-r--r--drivers/staging/winbond/TODO (renamed from drivers/staging/winbond/README)3
-rw-r--r--drivers/staging/winbond/core.h14
-rw-r--r--drivers/staging/winbond/localpara.h452
-rw-r--r--drivers/staging/winbond/mac_structures.h342
-rw-r--r--drivers/staging/winbond/mds.c324
-rw-r--r--drivers/staging/winbond/mds_f.h20
-rw-r--r--drivers/staging/winbond/mds_s.h173
-rw-r--r--drivers/staging/winbond/mlme_s.h288
-rw-r--r--drivers/staging/winbond/mlmetxrx.c62
-rw-r--r--drivers/staging/winbond/mlmetxrx_f.h6
-rw-r--r--drivers/staging/winbond/mto.c299
-rw-r--r--drivers/staging/winbond/mto.h179
-rw-r--r--drivers/staging/winbond/phy_calibration.c9
-rw-r--r--drivers/staging/winbond/phy_calibration.h177
-rw-r--r--drivers/staging/winbond/reg.c3985
-rw-r--r--drivers/staging/winbond/scan_s.h152
-rw-r--r--drivers/staging/winbond/sme_api.h211
-rw-r--r--drivers/staging/winbond/sysdef.h18
-rw-r--r--drivers/staging/winbond/wb35reg.c618
-rw-r--r--drivers/staging/winbond/wb35reg_f.h104
-rw-r--r--drivers/staging/winbond/wb35reg_s.h221
-rw-r--r--drivers/staging/winbond/wb35rx.c258
-rw-r--r--drivers/staging/winbond/wb35tx_f.h18
-rw-r--r--drivers/staging/winbond/wbhal_f.h137
-rw-r--r--drivers/staging/winbond/wbhal_s.h502
-rw-r--r--drivers/staging/winbond/wblinux_f.h19
-rw-r--r--drivers/staging/winbond/wbusb.c193
-rw-r--r--drivers/staging/winbond/wbusb_s.h23
-rw-r--r--drivers/staging/wlags49_h2/Kconfig2
-rw-r--r--drivers/staging/wlags49_h2/README.wlags492
-rw-r--r--drivers/staging/wlags49_h2/ap_h2.c80
-rw-r--r--drivers/staging/wlags49_h2/debug.h104
-rw-r--r--drivers/staging/wlags49_h2/dhf.c143
-rw-r--r--drivers/staging/wlags49_h2/dhf.h56
-rw-r--r--drivers/staging/wlags49_h2/dhfcfg.h118
-rw-r--r--drivers/staging/wlags49_h2/hcf.c15
-rw-r--r--drivers/staging/wlags49_h2/wl_cs.c39
-rw-r--r--drivers/staging/wlags49_h2/wl_cs.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_internal.h8
-rw-r--r--drivers/staging/wlags49_h2/wl_main.c3
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.c51
-rw-r--r--drivers/staging/wlags49_h2/wl_priv.c11
-rw-r--r--drivers/staging/wlags49_h2/wl_profile.c957
-rw-r--r--drivers/staging/wlags49_h2/wl_sysfs.c3
-rw-r--r--drivers/staging/wlags49_h2/wl_wext.c9
-rw-r--r--drivers/staging/wlags49_h25/Kconfig2
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c154
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c5
-rw-r--r--drivers/staging/wlan-ng/p80211req.c5
-rw-r--r--drivers/staging/wlan-ng/p80211wext.c399
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c36
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c71
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c65
-rw-r--r--drivers/telephony/ixj.c15
-rw-r--r--drivers/telephony/ixj_pcmcia.c3
-rw-r--r--drivers/usb/atm/speedtch.c5
-rw-r--r--drivers/usb/atm/ueagle-atm.c347
-rw-r--r--drivers/usb/c67x00/c67x00-hcd.h2
-rw-r--r--drivers/usb/class/cdc-acm.c22
-rw-r--r--drivers/usb/class/cdc-acm.h4
-rw-r--r--drivers/usb/class/cdc-wdm.c38
-rw-r--r--drivers/usb/class/usblp.c2
-rw-r--r--drivers/usb/core/buffer.c2
-rw-r--r--drivers/usb/core/config.c214
-rw-r--r--drivers/usb/core/devices.c19
-rw-r--r--drivers/usb/core/devio.c3
-rw-r--r--drivers/usb/core/driver.c60
-rw-r--r--drivers/usb/core/generic.c2
-rw-r--r--drivers/usb/core/hcd-pci.c2
-rw-r--r--drivers/usb/core/hcd.c246
-rw-r--r--drivers/usb/core/hcd.h578
-rw-r--r--drivers/usb/core/hub.c30
-rw-r--r--drivers/usb/core/hub.h205
-rw-r--r--drivers/usb/core/inode.c2
-rw-r--r--drivers/usb/core/message.c133
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/core/sysfs.c25
-rw-r--r--drivers/usb/core/urb.c18
-rw-r--r--drivers/usb/core/usb.c96
-rw-r--r--drivers/usb/early/ehci-dbgp.c120
-rw-r--r--drivers/usb/gadget/Kconfig58
-rw-r--r--drivers/usb/gadget/Makefile8
-rw-r--r--drivers/usb/gadget/at91_udc.c7
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c3
-rw-r--r--drivers/usb/gadget/composite.c60
-rw-r--r--drivers/usb/gadget/config.c4
-rw-r--r--drivers/usb/gadget/dummy_hcd.c4
-rw-r--r--drivers/usb/gadget/epautoconf.c12
-rw-r--r--drivers/usb/gadget/f_acm.c32
-rw-r--r--drivers/usb/gadget/f_ecm.c33
-rw-r--r--drivers/usb/gadget/f_fs.c2442
-rw-r--r--drivers/usb/gadget/f_hid.c673
-rw-r--r--drivers/usb/gadget/f_mass_storage.c138
-rw-r--r--drivers/usb/gadget/f_rndis.c33
-rw-r--r--drivers/usb/gadget/f_uvc.c661
-rw-r--r--drivers/usb/gadget/f_uvc.h376
-rw-r--r--drivers/usb/gadget/fsl_mxc_udc.c (renamed from drivers/usb/gadget/fsl_mx3_udc.c)14
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c7
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c2
-rw-r--r--drivers/usb/gadget/g_ffs.c426
-rw-r--r--drivers/usb/gadget/hid.c298
-rw-r--r--drivers/usb/gadget/printer.c2
-rw-r--r--drivers/usb/gadget/pxa27x_udc.h2
-rw-r--r--drivers/usb/gadget/storage_common.c2
-rw-r--r--drivers/usb/gadget/u_ether.c4
-rw-r--r--drivers/usb/gadget/uvc.h241
-rw-r--r--drivers/usb/gadget/uvc_queue.c583
-rw-r--r--drivers/usb/gadget/uvc_queue.h89
-rw-r--r--drivers/usb/gadget/uvc_v4l2.c374
-rw-r--r--drivers/usb/gadget/uvc_video.c386
-rw-r--r--drivers/usb/gadget/webcam.c399
-rw-r--r--drivers/usb/host/Kconfig15
-rw-r--r--drivers/usb/host/ehci-au1xxx.c27
-rw-r--r--drivers/usb/host/ehci-fsl.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c3
-rw-r--r--drivers/usb/host/ehci-hub.c182
-rw-r--r--drivers/usb/host/ehci-mxc.c4
-rw-r--r--drivers/usb/host/ehci-omap.c23
-rw-r--r--drivers/usb/host/ehci-pci.c18
-rw-r--r--drivers/usb/host/ehci-ppc-of.c11
-rw-r--r--drivers/usb/host/ehci-q.c2
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c9
-rw-r--r--drivers/usb/host/ehci.h18
-rw-r--r--drivers/usb/host/fhci-dbg.c2
-rw-r--r--drivers/usb/host/fhci-hcd.c13
-rw-r--r--drivers/usb/host/fhci-hub.c2
-rw-r--r--drivers/usb/host/fhci-mem.c2
-rw-r--r--drivers/usb/host/fhci-q.c2
-rw-r--r--drivers/usb/host/fhci-sched.c2
-rw-r--r--drivers/usb/host/fhci-tds.c2
-rw-r--r--drivers/usb/host/fhci.h11
-rw-r--r--drivers/usb/host/imx21-hcd.c2
-rw-r--r--drivers/usb/host/isp116x-hcd.c2
-rw-r--r--drivers/usb/host/isp1362-hcd.c6
-rw-r--r--drivers/usb/host/isp1760-hcd.c29
-rw-r--r--drivers/usb/host/isp1760-if.c22
-rw-r--r--drivers/usb/host/ohci-hcd.c33
-rw-r--r--drivers/usb/host/ohci-omap3.c735
-rw-r--r--drivers/usb/host/ohci-ppc-of.c15
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c31
-rw-r--r--drivers/usb/host/r8a66597-hcd.c39
-rw-r--r--drivers/usb/host/sl811-hcd.c60
-rw-r--r--drivers/usb/host/sl811_cs.c28
-rw-r--r--drivers/usb/host/u132-hcd.c6
-rw-r--r--drivers/usb/host/uhci-hcd.c2
-rw-r--r--drivers/usb/host/whci/debug.c2
-rw-r--r--drivers/usb/host/whci/qset.c6
-rw-r--r--drivers/usb/host/xhci-dbg.c24
-rw-r--r--drivers/usb/host/xhci-hub.c39
-rw-r--r--drivers/usb/host/xhci-mem.c489
-rw-r--r--drivers/usb/host/xhci-pci.c8
-rw-r--r--drivers/usb/host/xhci-ring.c329
-rw-r--r--drivers/usb/host/xhci.c416
-rw-r--r--drivers/usb/host/xhci.h112
-rw-r--r--drivers/usb/misc/appledisplay.c6
-rw-r--r--drivers/usb/misc/ftdi-elan.c20
-rw-r--r--drivers/usb/misc/iowarrior.c12
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c13
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_con.c8
-rw-r--r--drivers/usb/misc/usblcd.c8
-rw-r--r--drivers/usb/misc/usbtest.c17
-rw-r--r--drivers/usb/mon/mon_bin.c27
-rw-r--r--drivers/usb/mon/mon_main.c3
-rw-r--r--drivers/usb/mon/mon_stat.c3
-rw-r--r--drivers/usb/mon/mon_text.c6
-rw-r--r--drivers/usb/musb/Kconfig6
-rw-r--r--drivers/usb/musb/Makefile14
-rw-r--r--drivers/usb/musb/blackfin.c96
-rw-r--r--drivers/usb/musb/davinci.c2
-rw-r--r--drivers/usb/musb/musb_core.c147
-rw-r--r--drivers/usb/musb/musb_core.h10
-rw-r--r--drivers/usb/musb/musb_debug.h13
-rw-r--r--drivers/usb/musb/musb_debugfs.c294
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c25
-rw-r--r--drivers/usb/musb/musb_regs.h10
-rw-r--r--drivers/usb/musb/musb_virthub.c4
-rw-r--r--drivers/usb/musb/musbhsdma.h16
-rw-r--r--drivers/usb/musb/omap2430.c29
-rw-r--r--drivers/usb/musb/tusb6010.c2
-rw-r--r--drivers/usb/otg/isp1301_omap.c2
-rw-r--r--drivers/usb/otg/twl4030-usb.c108
-rw-r--r--drivers/usb/otg/ulpi.c50
-rw-r--r--drivers/usb/serial/Kconfig23
-rw-r--r--drivers/usb/serial/Makefile2
-rw-r--r--drivers/usb/serial/aircable.c499
-rw-r--r--drivers/usb/serial/ark3116.c111
-rw-r--r--drivers/usb/serial/belkin_sa.c130
-rw-r--r--drivers/usb/serial/belkin_sa.h10
-rw-r--r--drivers/usb/serial/ch341.c5
-rw-r--r--drivers/usb/serial/console.c27
-rw-r--r--drivers/usb/serial/cp210x.c63
-rw-r--r--drivers/usb/serial/cypress_m8.c242
-rw-r--r--drivers/usb/serial/cypress_m8.h53
-rw-r--r--drivers/usb/serial/digi_acceleport.c4
-rw-r--r--drivers/usb/serial/empeg.c401
-rw-r--r--drivers/usb/serial/ftdi_sio.c457
-rw-r--r--drivers/usb/serial/ftdi_sio.h126
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h42
-rw-r--r--drivers/usb/serial/generic.c330
-rw-r--r--drivers/usb/serial/io_edgeport.c3
-rw-r--r--drivers/usb/serial/io_edgeport.h16
-rw-r--r--drivers/usb/serial/io_ionsp.h95
-rw-r--r--drivers/usb/serial/io_ti.c228
-rw-r--r--drivers/usb/serial/io_ti.h92
-rw-r--r--drivers/usb/serial/io_usbvend.h87
-rw-r--r--drivers/usb/serial/ipaq.c357
-rw-r--r--drivers/usb/serial/ipaq.h54
-rw-r--r--drivers/usb/serial/ipw.c184
-rw-r--r--drivers/usb/serial/ir-usb.c272
-rw-r--r--drivers/usb/serial/iuu_phoenix.c30
-rw-r--r--drivers/usb/serial/kl5kusb105.c436
-rw-r--r--drivers/usb/serial/kl5kusb105.h47
-rw-r--r--drivers/usb/serial/kobil_sct.c3
-rw-r--r--drivers/usb/serial/kobil_sct.h75
-rw-r--r--drivers/usb/serial/mct_u232.c7
-rw-r--r--drivers/usb/serial/mct_u232.h254
-rw-r--r--drivers/usb/serial/mos7720.c1130
-rw-r--r--drivers/usb/serial/mos7840.c1
-rw-r--r--drivers/usb/serial/option.c841
-rw-r--r--drivers/usb/serial/oti6858.c254
-rw-r--r--drivers/usb/serial/pl2303.c430
-rw-r--r--drivers/usb/serial/pl2303.h6
-rw-r--r--drivers/usb/serial/qcaux.c5
-rw-r--r--drivers/usb/serial/qcserial.c64
-rw-r--r--drivers/usb/serial/safe_serial.c231
-rw-r--r--drivers/usb/serial/spcp8x5.c407
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c179
-rw-r--r--drivers/usb/serial/usb-serial.c47
-rw-r--r--drivers/usb/serial/usb-wwan.h67
-rw-r--r--drivers/usb/serial/usb_debug.c12
-rw-r--r--drivers/usb/serial/usb_wwan.c665
-rw-r--r--drivers/usb/serial/visor.c344
-rw-r--r--drivers/usb/serial/visor.h9
-rw-r--r--drivers/usb/serial/zio.c64
-rw-r--r--drivers/usb/storage/isd200.c4
-rw-r--r--drivers/usb/storage/onetouch.c12
-rw-r--r--drivers/usb/storage/transport.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h24
-rw-r--r--drivers/usb/storage/usb.c87
-rw-r--r--drivers/usb/storage/usb.h3
-rw-r--r--drivers/usb/usb-skeleton.c10
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c4
-rw-r--r--drivers/usb/wusbcore/wusbhc.h4
-rw-r--r--drivers/vhost/net.c18
-rw-r--r--drivers/vhost/vhost.c70
-rw-r--r--drivers/video/Kconfig17
-rw-r--r--drivers/video/amifb.c49
-rw-r--r--drivers/video/arcfb.c8
-rw-r--r--drivers/video/aty/atyfb_base.c4
-rw-r--r--drivers/video/aty/radeon_base.c4
-rw-r--r--drivers/video/backlight/88pm860x_bl.c2
-rw-r--r--drivers/video/backlight/Kconfig116
-rw-r--r--drivers/video/backlight/Makefile4
-rw-r--r--drivers/video/backlight/adp8860_bl.c838
-rw-r--r--drivers/video/backlight/adx_bl.c4
-rw-r--r--drivers/video/backlight/ep93xx_bl.c160
-rw-r--r--drivers/video/backlight/l4f00242t03.c11
-rw-r--r--drivers/video/backlight/max8925_bl.c1
-rw-r--r--drivers/video/backlight/mbp_nvidia_bl.c47
-rw-r--r--drivers/video/backlight/pcf50633-backlight.c190
-rw-r--r--drivers/video/backlight/s6e63m0.c920
-rw-r--r--drivers/video/backlight/s6e63m0_gamma.h266
-rw-r--r--drivers/video/bf54x-lq043fb.c7
-rw-r--r--drivers/video/bfin-lq035q1-fb.c252
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c7
-rw-r--r--drivers/video/bw2.c7
-rw-r--r--drivers/video/cg14.c7
-rw-r--r--drivers/video/cg3.c7
-rw-r--r--drivers/video/cg6.c9
-rw-r--r--drivers/video/cirrusfb.c1
-rw-r--r--drivers/video/cobalt_lcdfb.c2
-rw-r--r--drivers/video/da8xx-fb.c301
-rw-r--r--drivers/video/efifb.c11
-rw-r--r--drivers/video/fb_defio.c42
-rw-r--r--drivers/video/fbmem.c77
-rw-r--r--drivers/video/fbsysfs.c1
-rw-r--r--drivers/video/ffb.c9
-rw-r--r--drivers/video/fm2fb.c1
-rw-r--r--drivers/video/fsl-diu-fb.c10
-rw-r--r--drivers/video/hgafb.c10
-rw-r--r--drivers/video/hitfb.c8
-rw-r--r--drivers/video/intelfb/intelfb.h4
-rw-r--r--drivers/video/leo.c7
-rw-r--r--drivers/video/mb862xx/mb862xxfb.c8
-rw-r--r--drivers/video/mx3fb.c3
-rw-r--r--drivers/video/nuc900fb.c2
-rw-r--r--drivers/video/offb.c28
-rw-r--r--drivers/video/omap2/displays/Kconfig9
-rw-r--r--drivers/video/omap2/displays/Makefile1
-rw-r--r--drivers/video/omap2/displays/panel-acx565akm.c819
-rw-r--r--drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c78
-rw-r--r--drivers/video/omap2/displays/panel-taal.c143
-rw-r--r--drivers/video/omap2/dss/Kconfig6
-rw-r--r--drivers/video/omap2/dss/Makefile3
-rw-r--r--drivers/video/omap2/dss/core.c85
-rw-r--r--drivers/video/omap2/dss/display.c9
-rw-r--r--drivers/video/omap2/dss/dss.c24
-rw-r--r--drivers/video/omap2/dss/dss.h50
-rw-r--r--drivers/video/omap2/dss/manager.c21
-rw-r--r--drivers/video/omap2/dss/sdi.c26
-rw-r--r--drivers/video/omap2/dss/venc.c15
-rw-r--r--drivers/video/omap2/omapfb/omapfb-ioctl.c5
-rw-r--r--drivers/video/omap2/omapfb/omapfb-sysfs.c25
-rw-r--r--drivers/video/p9100.c7
-rw-r--r--drivers/video/platinumfb.c9
-rw-r--r--drivers/video/s3c2410fb.c10
-rw-r--r--drivers/video/s3fb.c101
-rw-r--r--drivers/video/sgivwfb.c10
-rw-r--r--drivers/video/sis/sis_main.c2
-rw-r--r--drivers/video/sunxvr1000.c9
-rw-r--r--drivers/video/tcx.c7
-rw-r--r--drivers/video/vesafb.c11
-rw-r--r--drivers/video/vfb.c4
-rw-r--r--drivers/video/vga16fb.c36
-rw-r--r--drivers/video/via/Makefile4
-rw-r--r--drivers/video/via/accel.c137
-rw-r--r--drivers/video/via/accel.h40
-rw-r--r--drivers/video/via/chip.h8
-rw-r--r--drivers/video/via/dvi.c37
-rw-r--r--drivers/video/via/global.h1
-rw-r--r--drivers/video/via/hw.c307
-rw-r--r--drivers/video/via/hw.h20
-rw-r--r--drivers/video/via/ioctl.h2
-rw-r--r--drivers/video/via/lcd.c31
-rw-r--r--drivers/video/via/lcd.h2
-rw-r--r--drivers/video/via/share.h20
-rw-r--r--drivers/video/via/via-core.c668
-rw-r--r--drivers/video/via/via-gpio.c285
-rw-r--r--drivers/video/via/via_i2c.c232
-rw-r--r--drivers/video/via/via_modesetting.c126
-rw-r--r--drivers/video/via/via_modesetting.h (renamed from drivers/video/via/via_i2c.h)42
-rw-r--r--drivers/video/via/via_utility.c1
-rw-r--r--drivers/video/via/viafbdev.c192
-rw-r--r--drivers/video/via/viafbdev.h14
-rw-r--r--drivers/video/via/viamode.c15
-rw-r--r--drivers/video/via/vt1636.c34
-rw-r--r--drivers/video/via/vt1636.h2
-rw-r--r--drivers/video/w100fb.c10
-rw-r--r--drivers/video/xilinxfb.c23
-rw-r--r--drivers/virtio/virtio_balloon.c17
-rw-r--r--drivers/virtio/virtio_pci.c2
-rw-r--r--drivers/virtio/virtio_ring.c44
-rw-r--r--drivers/vlynq/Kconfig2
-rw-r--r--drivers/w1/slaves/w1_ds2431.c4
-rw-r--r--drivers/w1/slaves/w1_ds2433.c4
-rw-r--r--drivers/w1/slaves/w1_ds2760.c2
-rw-r--r--drivers/w1/w1.c4
-rw-r--r--drivers/watchdog/Kconfig26
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/bfin_wdt.c19
-rw-r--r--drivers/watchdog/booke_wdt.c6
-rw-r--r--drivers/watchdog/cpwd.c9
-rw-r--r--drivers/watchdog/eurotechwdt.c1
-rw-r--r--drivers/watchdog/gef_wdt.c8
-rw-r--r--drivers/watchdog/iTCO_vendor_support.c11
-rw-r--r--drivers/watchdog/iTCO_wdt.c29
-rw-r--r--drivers/watchdog/imx2_wdt.c358
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c10
-rw-r--r--drivers/watchdog/pc87413_wdt.c9
-rw-r--r--drivers/watchdog/pcwd_usb.c6
-rw-r--r--drivers/watchdog/pnx833x_wdt.c11
-rw-r--r--drivers/watchdog/rdc321x_wdt.c53
-rw-r--r--drivers/watchdog/riowd.c7
-rw-r--r--drivers/watchdog/s3c2410_wdt.c9
-rw-r--r--drivers/watchdog/shwdt.c2
-rw-r--r--drivers/watchdog/twl4030_wdt.c2
-rw-r--r--drivers/watchdog/wdt.c2
-rw-r--r--drivers/watchdog/wdt977.c2
-rw-r--r--drivers/xen/manage.c28
-rw-r--r--drivers/zorro/proc.c6
-rw-r--r--drivers/zorro/zorro-driver.c24
-rw-r--r--drivers/zorro/zorro-sysfs.c13
-rw-r--r--drivers/zorro/zorro.c243
3685 files changed, 342606 insertions, 146652 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index f42a03029b7..91874e04855 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_PCI) += pci/
obj-$(CONFIG_PARISC) += parisc/
obj-$(CONFIG_RAPIDIO) += rapidio/
obj-y += video/
+obj-y += idle/
obj-$(CONFIG_ACPI) += acpi/
obj-$(CONFIG_SFI) += sfi/
# PnP must come after ACPI since it will eventually need to check if acpi
@@ -91,7 +92,6 @@ obj-$(CONFIG_EISA) += eisa/
obj-y += lguest/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_CPU_IDLE) += cpuidle/
-obj-y += idle/
obj-$(CONFIG_MMC) += mmc/
obj-$(CONFIG_MEMSTICK) += memstick/
obj-$(CONFIG_NEW_LEDS) += leds/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 93d2c7971df..74641151880 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -360,4 +360,13 @@ config ACPI_SBS
To compile this driver as a module, choose M here:
the modules will be called sbs and sbshc.
+config ACPI_HED
+ tristate "Hardware Error Device"
+ help
+ This driver supports the Hardware Error Device (PNP0C33),
+ which is used to report some hardware errors notified via
+ SCI, mainly the corrected errors.
+
+source "drivers/acpi/apei/Kconfig"
+
endif # ACPI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index a8d8998dd5c..6ee33169e1d 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -19,7 +19,7 @@ obj-y += acpi.o \
# All the builtin files are in the "acpi." module_param namespace.
acpi-y += osl.o utils.o reboot.o
-acpi-y += hest.o
+acpi-y += atomicio.o
# sleep related files
acpi-y += wakeup.o
@@ -59,6 +59,7 @@ obj-$(CONFIG_ACPI_BATTERY) += battery.o
obj-$(CONFIG_ACPI_SBS) += sbshc.o
obj-$(CONFIG_ACPI_SBS) += sbs.o
obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o
+obj-$(CONFIG_ACPI_HED) += hed.o
# processor has its own "processor." module_param namespace
processor-y := processor_driver.o processor_throttling.o
@@ -66,3 +67,5 @@ processor-y += processor_idle.o processor_thermal.o
processor-$(CONFIG_CPU_FREQ) += processor_perflib.o
obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
+
+obj-$(CONFIG_ACPI_APEI) += apei/
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 62122134693..d269a8f3329 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -43,6 +43,10 @@ static DEFINE_MUTEX(isolated_cpus_lock);
#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
#define CPUID5_ECX_INTERRUPT_BREAK (0x2)
static unsigned long power_saving_mwait_eax;
+
+static unsigned char tsc_detected_unstable;
+static unsigned char tsc_marked_unstable;
+
static void power_saving_mwait_init(void)
{
unsigned int eax, ebx, ecx, edx;
@@ -87,8 +91,8 @@ static void power_saving_mwait_init(void)
/*FALL THROUGH*/
default:
- /* TSC could halt in idle, so notify users */
- mark_tsc_unstable("TSC halts in idle");
+ /* TSC could halt in idle */
+ tsc_detected_unstable = 1;
}
#endif
}
@@ -168,16 +172,14 @@ static int power_saving_thread(void *data)
do_sleep = 0;
- current_thread_info()->status &= ~TS_POLLING;
- /*
- * TS_POLLING-cleared state must be visible before we test
- * NEED_RESCHED:
- */
- smp_mb();
-
expire_time = jiffies + HZ * (100 - idle_pct) / 100;
while (!need_resched()) {
+ if (tsc_detected_unstable && !tsc_marked_unstable) {
+ /* TSC could halt in idle, so notify users */
+ mark_tsc_unstable("TSC halts in idle");
+ tsc_marked_unstable = 1;
+ }
local_irq_disable();
cpu = smp_processor_id();
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
@@ -200,8 +202,6 @@ static int power_saving_thread(void *data)
}
}
- current_thread_info()->status |= TS_POLLING;
-
/*
* current sched_rt has threshold for rt task running time.
* When a rt task uses 95% CPU time, the rt thread will be
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 7423052ece5..d93cc06f4bf 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -14,12 +14,12 @@ acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \
acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \
evmisc.o evrgnini.o evxface.o evxfregn.o \
- evgpe.o evgpeblk.o
+ evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o
acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\
exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\
excreate.o exmisc.o exoparg2.o exregion.o exstore.o exutils.o \
- exdump.o exmutex.o exoparg3.o exresnte.o exstoren.o
+ exdump.o exmutex.o exoparg3.o exresnte.o exstoren.o exdebug.o
acpi-y += hwacpi.o hwgpe.o hwregs.o hwsleep.o hwxface.o hwvalid.o
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 3e6ba99e405..64d1e5c2d4a 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -73,8 +73,10 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node *node,
u32 notify_value);
/*
- * evgpe - GPE handling and dispatch
+ * evgpe - Low-level GPE support
*/
+u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list);
+
acpi_status
acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info);
@@ -85,19 +87,13 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info);
struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
u32 gpe_number);
+struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
+ struct acpi_gpe_block_info
+ *gpe_block);
+
/*
- * evgpeblk
+ * evgpeblk - Upper-level GPE block support
*/
-u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info);
-
-acpi_status
-acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context);
-
-acpi_status
-acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
- struct acpi_gpe_block_info *gpe_block,
- void *context);
-
acpi_status
acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
struct acpi_generic_address *gpe_block_address,
@@ -116,12 +112,37 @@ u32
acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info,
u32 gpe_number);
-u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list);
+/*
+ * evgpeinit - GPE initialization and update
+ */
+acpi_status acpi_ev_gpe_initialize(void);
+
+void acpi_ev_update_gpes(acpi_owner_id table_owner_id);
acpi_status
-acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info);
+acpi_ev_match_gpe_method(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value);
-acpi_status acpi_ev_gpe_initialize(void);
+acpi_status
+acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value);
+
+/*
+ * evgpeutil - GPE utilities
+ */
+acpi_status
+acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context);
+
+u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info);
+
+struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number);
+
+acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
+
+acpi_status
+acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+ struct acpi_gpe_block_info *gpe_block,
+ void *context);
/*
* evregion - Address Space handling
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index f8dd8f250ac..9070f1fe8f1 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -112,6 +112,19 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_leave_wake_gpes_disabled, TRUE);
*/
u8 ACPI_INIT_GLOBAL(acpi_gbl_use_default_register_widths, TRUE);
+/*
+ * Optionally enable output from the AML Debug Object.
+ */
+u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE);
+
+/*
+ * Optionally copy the entire DSDT to local memory (instead of simply
+ * mapping it.) There are some BIOSs that corrupt or replace the original
+ * DSDT, creating the need for this option. Default is FALSE, do not copy
+ * the DSDT.
+ */
+u8 ACPI_INIT_GLOBAL(acpi_gbl_copy_dsdt_locally, FALSE);
+
/* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */
struct acpi_table_fadt acpi_gbl_FADT;
@@ -145,11 +158,10 @@ ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
****************************************************************************/
/*
- * acpi_gbl_root_table_list is the master list of ACPI tables found in the
- * RSDT/XSDT.
- *
+ * acpi_gbl_root_table_list is the master list of ACPI tables that were
+ * found in the RSDT/XSDT.
*/
-ACPI_EXTERN struct acpi_internal_rsdt acpi_gbl_root_table_list;
+ACPI_EXTERN struct acpi_table_list acpi_gbl_root_table_list;
ACPI_EXTERN struct acpi_table_facs *acpi_gbl_FACS;
/* These addresses are calculated from the FADT Event Block addresses */
@@ -160,6 +172,11 @@ ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_enable;
ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_status;
ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_enable;
+/* DSDT information. Used to check for DSDT corruption */
+
+ACPI_EXTERN struct acpi_table_header *acpi_gbl_DSDT;
+ACPI_EXTERN struct acpi_table_header acpi_gbl_original_dsdt_header;
+
/*
* Handle both ACPI 1.0 and ACPI 2.0 Integer widths. The integer width is
* determined by the revision of the DSDT: If the DSDT revision is less than
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 6df3f842816..049e203bd62 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -121,6 +121,13 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
struct acpi_walk_state *walk_state);
/*
+ * exdebug - AML debug object
+ */
+void
+acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
+ u32 level, u32 index);
+
+/*
* exfield - ACPI AML (p-code) execution - field manipulation
*/
acpi_status
@@ -274,7 +281,7 @@ acpi_status
acpi_ex_system_do_notify_op(union acpi_operand_object *value,
union acpi_operand_object *obj_desc);
-acpi_status acpi_ex_system_do_suspend(u64 time);
+acpi_status acpi_ex_system_do_sleep(u64 time);
acpi_status acpi_ex_system_do_stall(u32 time);
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 24b8faa5c39..147a7e6bd38 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -213,12 +213,12 @@ struct acpi_namespace_node {
#define ANOBJ_IS_BIT_OFFSET 0x40 /* i_aSL only: Reference is a bit offset */
#define ANOBJ_IS_REFERENCED 0x80 /* i_aSL only: Object was referenced */
-/* One internal RSDT for table management */
+/* Internal ACPI table management - master table list */
-struct acpi_internal_rsdt {
- struct acpi_table_desc *tables;
- u32 count;
- u32 size;
+struct acpi_table_list {
+ struct acpi_table_desc *tables; /* Table descriptor array */
+ u32 current_table_count; /* Tables currently in the array */
+ u32 max_table_count; /* Max tables array will hold */
u8 flags;
};
@@ -427,8 +427,8 @@ struct acpi_gpe_event_info {
struct acpi_gpe_register_info *register_info; /* Backpointer to register info */
u8 flags; /* Misc info about this GPE */
u8 gpe_number; /* This GPE */
- u8 runtime_count;
- u8 wakeup_count;
+ u8 runtime_count; /* References to a run GPE */
+ u8 wakeup_count; /* References to a wake GPE */
};
/* Information about a GPE register pair, one per each status/enable pair in an array */
@@ -454,6 +454,7 @@ struct acpi_gpe_block_info {
struct acpi_gpe_event_info *event_info; /* One for each GPE */
struct acpi_generic_address block_address; /* Base address of the block */
u32 register_count; /* Number of register pairs in block */
+ u16 gpe_count; /* Number of individual GPEs in block */
u8 block_base_number; /* Base GPE number for this block */
};
@@ -469,6 +470,10 @@ struct acpi_gpe_xrupt_info {
struct acpi_gpe_walk_info {
struct acpi_namespace_node *gpe_device;
struct acpi_gpe_block_info *gpe_block;
+ u16 count;
+ acpi_owner_id owner_id;
+ u8 enable_this_gpe;
+ u8 execute_by_owner_id;
};
struct acpi_gpe_device_info {
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 8ff3b741df2..62a576e3436 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -107,6 +107,10 @@ u8 acpi_tb_checksum(u8 *buffer, u32 length);
acpi_status
acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length);
+void acpi_tb_check_dsdt_header(void);
+
+struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index);
+
void
acpi_tb_install_table(acpi_physical_address address,
char *signature, u32 table_index);
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index bb13817e0c3..347bee1726f 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -323,7 +323,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
default:
ACPI_ERROR((AE_INFO,
- "Invalid opcode in field list: %X",
+ "Invalid opcode in field list: 0x%X",
arg->common.aml_opcode));
return_ACPI_STATUS(AE_AML_BAD_OPCODE);
}
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 721039233aa..2a9a561c2f0 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -225,7 +225,7 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
(walk_state->thread->current_sync_level >
obj_desc->method.mutex->mutex.sync_level)) {
ACPI_ERROR((AE_INFO,
- "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%d)",
+ "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%u)",
acpi_ut_get_node_name(method_node),
walk_state->thread->current_sync_level));
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index cc343b95954..f3d52f59250 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -262,7 +262,7 @@ acpi_ds_method_data_get_node(u8 type,
if (index > ACPI_METHOD_MAX_LOCAL) {
ACPI_ERROR((AE_INFO,
- "Local index %d is invalid (max %d)",
+ "Local index %u is invalid (max %u)",
index, ACPI_METHOD_MAX_LOCAL));
return_ACPI_STATUS(AE_AML_INVALID_INDEX);
}
@@ -276,7 +276,7 @@ acpi_ds_method_data_get_node(u8 type,
if (index > ACPI_METHOD_MAX_ARG) {
ACPI_ERROR((AE_INFO,
- "Arg index %d is invalid (max %d)",
+ "Arg index %u is invalid (max %u)",
index, ACPI_METHOD_MAX_ARG));
return_ACPI_STATUS(AE_AML_INVALID_INDEX);
}
@@ -287,7 +287,7 @@ acpi_ds_method_data_get_node(u8 type,
break;
default:
- ACPI_ERROR((AE_INFO, "Type %d is invalid", type));
+ ACPI_ERROR((AE_INFO, "Type %u is invalid", type));
return_ACPI_STATUS(AE_TYPE);
}
@@ -424,7 +424,7 @@ acpi_ds_method_data_get_value(u8 type,
case ACPI_REFCLASS_ARG:
ACPI_ERROR((AE_INFO,
- "Uninitialized Arg[%d] at node %p",
+ "Uninitialized Arg[%u] at node %p",
index, node));
return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG);
@@ -440,7 +440,7 @@ acpi_ds_method_data_get_value(u8 type,
default:
ACPI_ERROR((AE_INFO,
- "Not a Arg/Local opcode: %X",
+ "Not a Arg/Local opcode: 0x%X",
type));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 891e08bf560..3607adcaf08 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -288,7 +288,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
if (byte_list) {
if (byte_list->common.aml_opcode != AML_INT_BYTELIST_OP) {
ACPI_ERROR((AE_INFO,
- "Expecting bytelist, got AML opcode %X in op %p",
+ "Expecting bytelist, found AML opcode 0x%X in op %p",
byte_list->common.aml_opcode, byte_list));
acpi_ut_remove_reference(obj_desc);
@@ -511,7 +511,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
}
ACPI_INFO((AE_INFO,
- "Actual Package length (0x%X) is larger than NumElements field (0x%X), truncated\n",
+ "Actual Package length (%u) is larger than NumElements field (%u), truncated\n",
i, element_count));
} else if (i < element_count) {
/*
@@ -519,7 +519,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
* Note: this is not an error, the package is padded out with NULLs.
*/
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Package List length (0x%X) smaller than NumElements count (0x%X), padded with null elements\n",
+ "Package List length (%u) smaller than NumElements count (%u), padded with null elements\n",
i, element_count));
}
@@ -701,7 +701,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
default:
ACPI_ERROR((AE_INFO,
- "Unknown constant opcode %X",
+ "Unknown constant opcode 0x%X",
opcode));
status = AE_AML_OPERAND_TYPE;
break;
@@ -717,7 +717,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
break;
default:
- ACPI_ERROR((AE_INFO, "Unknown Integer type %X",
+ ACPI_ERROR((AE_INFO, "Unknown Integer type 0x%X",
op_info->type));
status = AE_AML_OPERAND_TYPE;
break;
@@ -806,7 +806,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
default:
ACPI_ERROR((AE_INFO,
- "Unimplemented reference type for AML opcode: %4.4X",
+ "Unimplemented reference type for AML opcode: 0x%4.4X",
opcode));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
@@ -816,7 +816,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
default:
- ACPI_ERROR((AE_INFO, "Unimplemented data type: %X",
+ ACPI_ERROR((AE_INFO, "Unimplemented data type: 0x%X",
obj_desc->common.type));
status = AE_AML_OPERAND_TYPE;
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index bf980cadb1e..53a7e416f33 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -292,7 +292,7 @@ acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc)
node = obj_desc->buffer.node;
if (!node) {
ACPI_ERROR((AE_INFO,
- "No pointer back to NS node in buffer obj %p",
+ "No pointer back to namespace node in buffer object %p",
obj_desc));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
@@ -336,7 +336,7 @@ acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc)
node = obj_desc->package.node;
if (!node) {
ACPI_ERROR((AE_INFO,
- "No pointer back to NS node in package %p",
+ "No pointer back to namespace node in package %p",
obj_desc));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
@@ -580,7 +580,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
default:
ACPI_ERROR((AE_INFO,
- "Unknown field creation opcode %02x", aml_opcode));
+ "Unknown field creation opcode 0x%02X",
+ aml_opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
}
@@ -589,7 +590,7 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
if ((bit_offset + bit_count) > (8 * (u32) buffer_desc->buffer.length)) {
ACPI_ERROR((AE_INFO,
- "Field [%4.4s] at %d exceeds Buffer [%4.4s] size %d (bits)",
+ "Field [%4.4s] at %u exceeds Buffer [%4.4s] size %u (bits)",
acpi_ut_get_node_name(result_desc),
bit_offset + bit_count,
acpi_ut_get_node_name(buffer_desc->buffer.node),
@@ -693,7 +694,7 @@ acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
status = acpi_ex_resolve_operands(op->common.aml_opcode,
ACPI_WALK_OPERANDS, walk_state);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO, "(%s) bad operand(s) (%X)",
+ ACPI_ERROR((AE_INFO, "(%s) bad operand(s), status 0x%X",
acpi_ps_get_opcode_name(op->common.aml_opcode),
status));
@@ -1461,7 +1462,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
default:
- ACPI_ERROR((AE_INFO, "Unknown control opcode=%X Op=%p",
+ ACPI_ERROR((AE_INFO, "Unknown control opcode=0x%X Op=%p",
op->common.aml_opcode, op));
status = AE_AML_BAD_OPCODE;
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 6b76c486d78..d555b374e31 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -140,7 +140,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
if (local_obj_desc->common.type != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO,
- "Bad predicate (not an integer) ObjDesc=%p State=%p Type=%X",
+ "Bad predicate (not an integer) ObjDesc=%p State=%p Type=0x%X",
obj_desc, walk_state, obj_desc->common.type));
status = AE_AML_OPERAND_TYPE;
@@ -354,7 +354,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
op_class = walk_state->op_info->class;
if (op_class == AML_CLASS_UNKNOWN) {
- ACPI_ERROR((AE_INFO, "Unknown opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown opcode 0x%X",
op->common.aml_opcode));
return_ACPI_STATUS(AE_NOT_IMPLEMENTED);
}
@@ -678,7 +678,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
default:
ACPI_ERROR((AE_INFO,
- "Unimplemented opcode, class=%X type=%X Opcode=%X Op=%p",
+ "Unimplemented opcode, class=0x%X type=0x%X Opcode=-0x%X Op=%p",
op_class, op_type, op->common.aml_opcode,
op));
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 050df816416..83155dd8671 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -179,7 +179,7 @@ acpi_ds_result_push(union acpi_operand_object * object,
if (!object) {
ACPI_ERROR((AE_INFO,
- "Null Object! Obj=%p State=%p Num=%X",
+ "Null Object! Obj=%p State=%p Num=%u",
object, walk_state, walk_state->result_count));
return (AE_BAD_PARAMETER);
}
@@ -223,7 +223,7 @@ static acpi_status acpi_ds_result_stack_push(struct acpi_walk_state *walk_state)
if (((u32) walk_state->result_size + ACPI_RESULTS_FRAME_OBJ_NUM) >
ACPI_RESULTS_OBJ_NUM_MAX) {
- ACPI_ERROR((AE_INFO, "Result stack overflow: State=%p Num=%X",
+ ACPI_ERROR((AE_INFO, "Result stack overflow: State=%p Num=%u",
walk_state, walk_state->result_size));
return (AE_STACK_OVERFLOW);
}
@@ -314,7 +314,7 @@ acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state)
if (walk_state->num_operands >= ACPI_OBJ_NUM_OPERANDS) {
ACPI_ERROR((AE_INFO,
- "Object stack overflow! Obj=%p State=%p #Ops=%X",
+ "Object stack overflow! Obj=%p State=%p #Ops=%u",
object, walk_state, walk_state->num_operands));
return (AE_STACK_OVERFLOW);
}
@@ -365,7 +365,7 @@ acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state)
if (walk_state->num_operands == 0) {
ACPI_ERROR((AE_INFO,
- "Object stack underflow! Count=%X State=%p #Ops=%X",
+ "Object stack underflow! Count=%X State=%p #Ops=%u",
pop_count, walk_state,
walk_state->num_operands));
return (AE_STACK_UNDERFLOW);
@@ -377,7 +377,7 @@ acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state)
walk_state->operands[walk_state->num_operands] = NULL;
}
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Count=%X State=%p #Ops=%X\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Count=%X State=%p #Ops=%u\n",
pop_count, walk_state, walk_state->num_operands));
return (AE_OK);
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index c1e6f472d43..f5795915a2e 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -302,7 +302,7 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
ACPI_DISABLE_EVENT);
ACPI_ERROR((AE_INFO,
- "No installed handler for fixed event [%08X]",
+ "No installed handler for fixed event [0x%08X]",
event));
return (ACPI_INTERRUPT_NOT_HANDLED);
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 78c55508aff..a221ad40416 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -60,7 +60,8 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
*
* RETURN: Status
*
- * DESCRIPTION: Updates GPE register enable masks based on the GPE type
+ * DESCRIPTION: Updates GPE register enable masks based upon whether there are
+ * references (either wake or run) to this GPE
*
******************************************************************************/
@@ -81,14 +82,20 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info)
(1 <<
(gpe_event_info->gpe_number - gpe_register_info->base_gpe_number));
+ /* Clear the wake/run bits up front */
+
ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, register_bit);
ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
- if (gpe_event_info->runtime_count)
+ /* Set the mask bits only if there are references to this GPE */
+
+ if (gpe_event_info->runtime_count) {
ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit);
+ }
- if (gpe_event_info->wakeup_count)
+ if (gpe_event_info->wakeup_count) {
ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit);
+ }
return_ACPI_STATUS(AE_OK);
}
@@ -101,7 +108,10 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info)
*
* RETURN: Status
*
- * DESCRIPTION: Enable a GPE based on the GPE type
+ * DESCRIPTION: Hardware-enable a GPE. Always enables the GPE, regardless
+ * of type or number of references.
+ *
+ * Note: The GPE lock should be already acquired when this function is called.
*
******************************************************************************/
@@ -109,20 +119,36 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status;
+
ACPI_FUNCTION_TRACE(ev_enable_gpe);
- /* Make sure HW enable masks are updated */
+
+ /*
+ * We will only allow a GPE to be enabled if it has either an
+ * associated method (_Lxx/_Exx) or a handler. Otherwise, the
+ * GPE will be immediately disabled by acpi_ev_gpe_dispatch the
+ * first time it fires.
+ */
+ if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) {
+ return_ACPI_STATUS(AE_NO_HANDLER);
+ }
+
+ /* Ensure the HW enable masks are current */
status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
+ }
+
+ /* Clear the GPE (of stale events) */
- /* Clear the GPE (of stale events), then enable it */
status = acpi_hw_clear_gpe(gpe_event_info);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
+ }
/* Enable the requested GPE */
+
status = acpi_hw_write_gpe_enable_reg(gpe_event_info);
return_ACPI_STATUS(status);
}
@@ -135,7 +161,10 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
*
* RETURN: Status
*
- * DESCRIPTION: Disable a GPE based on the GPE type
+ * DESCRIPTION: Hardware-disable a GPE. Always disables the requested GPE,
+ * regardless of the type or number of references.
+ *
+ * Note: The GPE lock should be already acquired when this function is called.
*
******************************************************************************/
@@ -145,24 +174,71 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
ACPI_FUNCTION_TRACE(ev_disable_gpe);
- /* Make sure HW enable masks are updated */
+
+ /*
+ * Note: Always disable the GPE, even if we think that that it is already
+ * disabled. It is possible that the AML or some other code has enabled
+ * the GPE behind our back.
+ */
+
+ /* Ensure the HW enable masks are current */
status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
+ }
/*
- * Even if we don't know the GPE type, make sure that we always
- * disable it. low_disable_gpe will just clear the enable bit for this
- * GPE and write it. It will not write out the current GPE enable mask,
- * since this may inadvertently enable GPEs too early, if a rogue GPE has
- * come in during ACPICA initialization - possibly as a result of AML or
- * other code that has enabled the GPE.
+ * Always H/W disable this GPE, even if we don't know the GPE type.
+ * Simply clear the enable bit for this particular GPE, but do not
+ * write out the current GPE enable mask since this may inadvertently
+ * enable GPEs too early. An example is a rogue GPE that has arrived
+ * during ACPICA initialization - possibly because AML or other code
+ * has enabled the GPE.
*/
status = acpi_hw_low_disable_gpe(gpe_event_info);
return_ACPI_STATUS(status);
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_low_get_gpe_info
+ *
+ * PARAMETERS: gpe_number - Raw GPE number
+ * gpe_block - A GPE info block
+ *
+ * RETURN: A GPE event_info struct. NULL if not a valid GPE (The gpe_number
+ * is not within the specified GPE block)
+ *
+ * DESCRIPTION: Returns the event_info struct associated with this GPE. This is
+ * the low-level implementation of ev_get_gpe_event_info.
+ *
+ ******************************************************************************/
+
+struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
+ struct acpi_gpe_block_info
+ *gpe_block)
+{
+ u32 gpe_index;
+
+ /*
+ * Validate that the gpe_number is within the specified gpe_block.
+ * (Two steps)
+ */
+ if (!gpe_block || (gpe_number < gpe_block->block_base_number)) {
+ return (NULL);
+ }
+
+ gpe_index = gpe_number - gpe_block->block_base_number;
+ if (gpe_index >= gpe_block->gpe_count) {
+ return (NULL);
+ }
+
+ return (&gpe_block->event_info[gpe_index]);
+}
+
+
/*******************************************************************************
*
* FUNCTION: acpi_ev_get_gpe_event_info
@@ -184,29 +260,23 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
u32 gpe_number)
{
union acpi_operand_object *obj_desc;
- struct acpi_gpe_block_info *gpe_block;
+ struct acpi_gpe_event_info *gpe_info;
u32 i;
ACPI_FUNCTION_ENTRY();
- /* A NULL gpe_block means use the FADT-defined GPE block(s) */
+ /* A NULL gpe_device means use the FADT-defined GPE block(s) */
if (!gpe_device) {
/* Examine GPE Block 0 and 1 (These blocks are permanent) */
for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
- gpe_block = acpi_gbl_gpe_fadt_blocks[i];
- if (gpe_block) {
- if ((gpe_number >= gpe_block->block_base_number)
- && (gpe_number <
- gpe_block->block_base_number +
- (gpe_block->register_count * 8))) {
- return (&gpe_block->
- event_info[gpe_number -
- gpe_block->
- block_base_number]);
- }
+ gpe_info = acpi_ev_low_get_gpe_info(gpe_number,
+ acpi_gbl_gpe_fadt_blocks
+ [i]);
+ if (gpe_info) {
+ return (gpe_info);
}
}
@@ -223,16 +293,8 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
return (NULL);
}
- gpe_block = obj_desc->device.gpe_block;
-
- if ((gpe_number >= gpe_block->block_base_number) &&
- (gpe_number <
- gpe_block->block_base_number + (gpe_block->register_count * 8))) {
- return (&gpe_block->
- event_info[gpe_number - gpe_block->block_base_number]);
- }
-
- return (NULL);
+ return (acpi_ev_low_get_gpe_info
+ (gpe_number, obj_desc->device.gpe_block));
}
/*******************************************************************************
@@ -389,7 +451,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
return_VOID;
}
- /* Set the GPE flags for return to enabled state */
+ /* Update the GPE register masks for return to enabled state */
(void)acpi_ev_update_gpe_enable_masks(gpe_event_info);
@@ -499,7 +561,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Unable to clear GPE[%2X]",
+ "Unable to clear GPE[0x%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
@@ -532,7 +594,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Unable to clear GPE[%2X]",
+ "Unable to clear GPE[0x%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
@@ -548,7 +610,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
status = acpi_ev_disable_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Unable to disable GPE[%2X]",
+ "Unable to disable GPE[0x%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
@@ -562,27 +624,30 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Unable to queue handler for GPE[%2X] - event disabled",
+ "Unable to queue handler for GPE[0x%2X] - event disabled",
gpe_number));
}
break;
default:
- /* No handler or method to run! */
-
+ /*
+ * No handler or method to run!
+ * 03/2010: This case should no longer be possible. We will not allow
+ * a GPE to be enabled if it has no handler or method.
+ */
ACPI_ERROR((AE_INFO,
- "No handler or method for GPE[%2X], disabling event",
+ "No handler or method for GPE[0x%2X], disabling event",
gpe_number));
/*
- * Disable the GPE. The GPE will remain disabled until the ACPICA
- * Core Subsystem is restarted, or a handler is installed.
+ * Disable the GPE. The GPE will remain disabled a handler
+ * is installed or ACPICA is restarted.
*/
status = acpi_ev_disable_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Unable to disable GPE[%2X]",
+ "Unable to disable GPE[0x%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index fef721917ea..7c28f2d9fd3 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -51,20 +51,6 @@ ACPI_MODULE_NAME("evgpeblk")
/* Local prototypes */
static acpi_status
-acpi_ev_save_method_info(acpi_handle obj_handle,
- u32 level, void *obj_desc, void **return_value);
-
-static acpi_status
-acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
- u32 level, void *info, void **return_value);
-
-static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
- interrupt_number);
-
-static acpi_status
-acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
-
-static acpi_status
acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
u32 interrupt_number);
@@ -73,527 +59,6 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block);
/*******************************************************************************
*
- * FUNCTION: acpi_ev_valid_gpe_event
- *
- * PARAMETERS: gpe_event_info - Info for this GPE
- *
- * RETURN: TRUE if the gpe_event is valid
- *
- * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
- * Should be called only when the GPE lists are semaphore locked
- * and not subject to change.
- *
- ******************************************************************************/
-
-u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
-{
- struct acpi_gpe_xrupt_info *gpe_xrupt_block;
- struct acpi_gpe_block_info *gpe_block;
-
- ACPI_FUNCTION_ENTRY();
-
- /* No need for spin lock since we are not changing any list elements */
-
- /* Walk the GPE interrupt levels */
-
- gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
- while (gpe_xrupt_block) {
- gpe_block = gpe_xrupt_block->gpe_block_list_head;
-
- /* Walk the GPE blocks on this interrupt level */
-
- while (gpe_block) {
- if ((&gpe_block->event_info[0] <= gpe_event_info) &&
- (&gpe_block->event_info[((acpi_size)
- gpe_block->
- register_count) * 8] >
- gpe_event_info)) {
- return (TRUE);
- }
-
- gpe_block = gpe_block->next;
- }
-
- gpe_xrupt_block = gpe_xrupt_block->next;
- }
-
- return (FALSE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_walk_gpe_list
- *
- * PARAMETERS: gpe_walk_callback - Routine called for each GPE block
- * Context - Value passed to callback
- *
- * RETURN: Status
- *
- * DESCRIPTION: Walk the GPE lists.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
-{
- struct acpi_gpe_block_info *gpe_block;
- struct acpi_gpe_xrupt_info *gpe_xrupt_info;
- acpi_status status = AE_OK;
- acpi_cpu_flags flags;
-
- ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
-
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
- /* Walk the interrupt level descriptor list */
-
- gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
- while (gpe_xrupt_info) {
-
- /* Walk all Gpe Blocks attached to this interrupt level */
-
- gpe_block = gpe_xrupt_info->gpe_block_list_head;
- while (gpe_block) {
-
- /* One callback per GPE block */
-
- status =
- gpe_walk_callback(gpe_xrupt_info, gpe_block,
- context);
- if (ACPI_FAILURE(status)) {
- if (status == AE_CTRL_END) { /* Callback abort */
- status = AE_OK;
- }
- goto unlock_and_exit;
- }
-
- gpe_block = gpe_block->next;
- }
-
- gpe_xrupt_info = gpe_xrupt_info->next;
- }
-
- unlock_and_exit:
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_delete_gpe_handlers
- *
- * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
- * gpe_block - Gpe Block info
- *
- * RETURN: Status
- *
- * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
- * Used only prior to termination.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
- struct acpi_gpe_block_info *gpe_block,
- void *context)
-{
- struct acpi_gpe_event_info *gpe_event_info;
- u32 i;
- u32 j;
-
- ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
-
- /* Examine each GPE Register within the block */
-
- for (i = 0; i < gpe_block->register_count; i++) {
-
- /* Now look at the individual GPEs in this byte register */
-
- for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
- gpe_event_info = &gpe_block->event_info[((acpi_size) i *
- ACPI_GPE_REGISTER_WIDTH)
- + j];
-
- if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
- ACPI_GPE_DISPATCH_HANDLER) {
- ACPI_FREE(gpe_event_info->dispatch.handler);
- gpe_event_info->dispatch.handler = NULL;
- gpe_event_info->flags &=
- ~ACPI_GPE_DISPATCH_MASK;
- }
- }
- }
-
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_save_method_info
- *
- * PARAMETERS: Callback from walk_namespace
- *
- * RETURN: Status
- *
- * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
- * control method under the _GPE portion of the namespace.
- * Extract the name and GPE type from the object, saving this
- * information for quick lookup during GPE dispatch
- *
- * The name of each GPE control method is of the form:
- * "_Lxx" or "_Exx"
- * Where:
- * L - means that the GPE is level triggered
- * E - means that the GPE is edge triggered
- * xx - is the GPE number [in HEX]
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ev_save_method_info(acpi_handle obj_handle,
- u32 level, void *obj_desc, void **return_value)
-{
- struct acpi_gpe_block_info *gpe_block = (void *)obj_desc;
- struct acpi_gpe_event_info *gpe_event_info;
- u32 gpe_number;
- char name[ACPI_NAME_SIZE + 1];
- u8 type;
-
- ACPI_FUNCTION_TRACE(ev_save_method_info);
-
- /*
- * _Lxx and _Exx GPE method support
- *
- * 1) Extract the name from the object and convert to a string
- */
- ACPI_MOVE_32_TO_32(name,
- &((struct acpi_namespace_node *)obj_handle)->name.
- integer);
- name[ACPI_NAME_SIZE] = 0;
-
- /*
- * 2) Edge/Level determination is based on the 2nd character
- * of the method name
- *
- * NOTE: Default GPE type is RUNTIME. May be changed later to WAKE
- * if a _PRW object is found that points to this GPE.
- */
- switch (name[1]) {
- case 'L':
- type = ACPI_GPE_LEVEL_TRIGGERED;
- break;
-
- case 'E':
- type = ACPI_GPE_EDGE_TRIGGERED;
- break;
-
- default:
- /* Unknown method type, just ignore it! */
-
- ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
- "Ignoring unknown GPE method type: %s "
- "(name not of form _Lxx or _Exx)", name));
- return_ACPI_STATUS(AE_OK);
- }
-
- /* Convert the last two characters of the name to the GPE Number */
-
- gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
- if (gpe_number == ACPI_UINT32_MAX) {
-
- /* Conversion failed; invalid method, just ignore it */
-
- ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
- "Could not extract GPE number from name: %s "
- "(name is not of form _Lxx or _Exx)", name));
- return_ACPI_STATUS(AE_OK);
- }
-
- /* Ensure that we have a valid GPE number for this GPE block */
-
- if ((gpe_number < gpe_block->block_base_number) ||
- (gpe_number >= (gpe_block->block_base_number +
- (gpe_block->register_count * 8)))) {
- /*
- * Not valid for this GPE block, just ignore it. However, it may be
- * valid for a different GPE block, since GPE0 and GPE1 methods both
- * appear under \_GPE.
- */
- return_ACPI_STATUS(AE_OK);
- }
-
- /*
- * Now we can add this information to the gpe_event_info block for use
- * during dispatch of this GPE.
- */
- gpe_event_info =
- &gpe_block->event_info[gpe_number - gpe_block->block_base_number];
-
- gpe_event_info->flags = (u8) (type | ACPI_GPE_DISPATCH_METHOD);
-
- gpe_event_info->dispatch.method_node =
- (struct acpi_namespace_node *)obj_handle;
-
- ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
- "Registered GPE method %s as GPE number 0x%.2X\n",
- name, gpe_number));
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_match_prw_and_gpe
- *
- * PARAMETERS: Callback from walk_namespace
- *
- * RETURN: Status. NOTE: We ignore errors so that the _PRW walk is
- * not aborted on a single _PRW failure.
- *
- * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
- * Device. Run the _PRW method. If present, extract the GPE
- * number and mark the GPE as a WAKE GPE.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
- u32 level, void *info, void **return_value)
-{
- struct acpi_gpe_walk_info *gpe_info = (void *)info;
- struct acpi_namespace_node *gpe_device;
- struct acpi_gpe_block_info *gpe_block;
- struct acpi_namespace_node *target_gpe_device;
- struct acpi_gpe_event_info *gpe_event_info;
- union acpi_operand_object *pkg_desc;
- union acpi_operand_object *obj_desc;
- u32 gpe_number;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe);
-
- /* Check for a _PRW method under this device */
-
- status = acpi_ut_evaluate_object(obj_handle, METHOD_NAME__PRW,
- ACPI_BTYPE_PACKAGE, &pkg_desc);
- if (ACPI_FAILURE(status)) {
-
- /* Ignore all errors from _PRW, we don't want to abort the subsystem */
-
- return_ACPI_STATUS(AE_OK);
- }
-
- /* The returned _PRW package must have at least two elements */
-
- if (pkg_desc->package.count < 2) {
- goto cleanup;
- }
-
- /* Extract pointers from the input context */
-
- gpe_device = gpe_info->gpe_device;
- gpe_block = gpe_info->gpe_block;
-
- /*
- * The _PRW object must return a package, we are only interested in the
- * first element
- */
- obj_desc = pkg_desc->package.elements[0];
-
- if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
-
- /* Use FADT-defined GPE device (from definition of _PRW) */
-
- target_gpe_device = acpi_gbl_fadt_gpe_device;
-
- /* Integer is the GPE number in the FADT described GPE blocks */
-
- gpe_number = (u32) obj_desc->integer.value;
- } else if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
-
- /* Package contains a GPE reference and GPE number within a GPE block */
-
- if ((obj_desc->package.count < 2) ||
- ((obj_desc->package.elements[0])->common.type !=
- ACPI_TYPE_LOCAL_REFERENCE) ||
- ((obj_desc->package.elements[1])->common.type !=
- ACPI_TYPE_INTEGER)) {
- goto cleanup;
- }
-
- /* Get GPE block reference and decode */
-
- target_gpe_device =
- obj_desc->package.elements[0]->reference.node;
- gpe_number = (u32) obj_desc->package.elements[1]->integer.value;
- } else {
- /* Unknown type, just ignore it */
-
- goto cleanup;
- }
-
- /*
- * Is this GPE within this block?
- *
- * TRUE if and only if these conditions are true:
- * 1) The GPE devices match.
- * 2) The GPE index(number) is within the range of the Gpe Block
- * associated with the GPE device.
- */
- if ((gpe_device == target_gpe_device) &&
- (gpe_number >= gpe_block->block_base_number) &&
- (gpe_number < gpe_block->block_base_number +
- (gpe_block->register_count * 8))) {
- gpe_event_info = &gpe_block->event_info[gpe_number -
- gpe_block->
- block_base_number];
-
- gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
- }
-
- cleanup:
- acpi_ut_remove_reference(pkg_desc);
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_get_gpe_xrupt_block
- *
- * PARAMETERS: interrupt_number - Interrupt for a GPE block
- *
- * RETURN: A GPE interrupt block
- *
- * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
- * block per unique interrupt level used for GPEs. Should be
- * called only when the GPE lists are semaphore locked and not
- * subject to change.
- *
- ******************************************************************************/
-
-static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
- interrupt_number)
-{
- struct acpi_gpe_xrupt_info *next_gpe_xrupt;
- struct acpi_gpe_xrupt_info *gpe_xrupt;
- acpi_status status;
- acpi_cpu_flags flags;
-
- ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
-
- /* No need for lock since we are not changing any list elements here */
-
- next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
- while (next_gpe_xrupt) {
- if (next_gpe_xrupt->interrupt_number == interrupt_number) {
- return_PTR(next_gpe_xrupt);
- }
-
- next_gpe_xrupt = next_gpe_xrupt->next;
- }
-
- /* Not found, must allocate a new xrupt descriptor */
-
- gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
- if (!gpe_xrupt) {
- return_PTR(NULL);
- }
-
- gpe_xrupt->interrupt_number = interrupt_number;
-
- /* Install new interrupt descriptor with spin lock */
-
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
- if (acpi_gbl_gpe_xrupt_list_head) {
- next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
- while (next_gpe_xrupt->next) {
- next_gpe_xrupt = next_gpe_xrupt->next;
- }
-
- next_gpe_xrupt->next = gpe_xrupt;
- gpe_xrupt->previous = next_gpe_xrupt;
- } else {
- acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
- }
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-
- /* Install new interrupt handler if not SCI_INT */
-
- if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
- status = acpi_os_install_interrupt_handler(interrupt_number,
- acpi_ev_gpe_xrupt_handler,
- gpe_xrupt);
- if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO,
- "Could not install GPE interrupt handler at level 0x%X",
- interrupt_number));
- return_PTR(NULL);
- }
- }
-
- return_PTR(gpe_xrupt);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_delete_gpe_xrupt
- *
- * PARAMETERS: gpe_xrupt - A GPE interrupt info block
- *
- * RETURN: Status
- *
- * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
- * interrupt handler if not the SCI interrupt.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
-{
- acpi_status status;
- acpi_cpu_flags flags;
-
- ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
-
- /* We never want to remove the SCI interrupt handler */
-
- if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
- gpe_xrupt->gpe_block_list_head = NULL;
- return_ACPI_STATUS(AE_OK);
- }
-
- /* Disable this interrupt */
-
- status =
- acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
- acpi_ev_gpe_xrupt_handler);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /* Unlink the interrupt block with lock */
-
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
- if (gpe_xrupt->previous) {
- gpe_xrupt->previous->next = gpe_xrupt->next;
- } else {
- /* No previous, update list head */
-
- acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
- }
-
- if (gpe_xrupt->next) {
- gpe_xrupt->next->previous = gpe_xrupt->previous;
- }
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-
- /* Free the block */
-
- ACPI_FREE(gpe_xrupt);
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ev_install_gpe_block
*
* PARAMETERS: gpe_block - New GPE block
@@ -705,8 +170,7 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
}
- acpi_current_gpe_count -=
- gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH;
+ acpi_current_gpe_count -= gpe_block->gpe_count;
/* Free the gpe_block */
@@ -760,9 +224,7 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
* Allocate the GPE event_info block. There are eight distinct GPEs
* per register. Initialization to zeros is sufficient.
*/
- gpe_event_info = ACPI_ALLOCATE_ZEROED(((acpi_size) gpe_block->
- register_count *
- ACPI_GPE_REGISTER_WIDTH) *
+ gpe_event_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->gpe_count *
sizeof(struct
acpi_gpe_event_info));
if (!gpe_event_info) {
@@ -880,6 +342,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
{
acpi_status status;
struct acpi_gpe_block_info *gpe_block;
+ struct acpi_gpe_walk_info walk_info;
ACPI_FUNCTION_TRACE(ev_create_gpe_block);
@@ -897,6 +360,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
/* Initialize the new GPE block */
gpe_block->node = gpe_device;
+ gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH);
gpe_block->register_count = register_count;
gpe_block->block_base_number = gpe_block_base_number;
@@ -921,12 +385,17 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
return_ACPI_STATUS(status);
}
- /* Find all GPE methods (_Lxx, _Exx) for this block */
+ /* Find all GPE methods (_Lxx or_Exx) for this block */
+
+ walk_info.gpe_block = gpe_block;
+ walk_info.gpe_device = gpe_device;
+ walk_info.enable_this_gpe = FALSE;
+ walk_info.execute_by_owner_id = FALSE;
status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
- acpi_ev_save_method_info, NULL,
- gpe_block, NULL);
+ acpi_ev_match_gpe_method, NULL,
+ &walk_info, NULL);
/* Return the new block */
@@ -938,14 +407,13 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
"GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n",
(u32) gpe_block->block_base_number,
(u32) (gpe_block->block_base_number +
- ((gpe_block->register_count *
- ACPI_GPE_REGISTER_WIDTH) - 1)),
+ (gpe_block->gpe_count - 1)),
gpe_device->name.ascii, gpe_block->register_count,
interrupt_number));
/* Update global count of currently available GPEs */
- acpi_current_gpe_count += register_count * ACPI_GPE_REGISTER_WIDTH;
+ acpi_current_gpe_count += gpe_block->gpe_count;
return_ACPI_STATUS(AE_OK);
}
@@ -969,10 +437,13 @@ acpi_status
acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
struct acpi_gpe_block_info *gpe_block)
{
+ acpi_status status;
struct acpi_gpe_event_info *gpe_event_info;
- struct acpi_gpe_walk_info gpe_info;
+ struct acpi_gpe_walk_info walk_info;
u32 wake_gpe_count;
u32 gpe_enabled_count;
+ u32 gpe_index;
+ u32 gpe_number;
u32 i;
u32 j;
@@ -995,210 +466,75 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
* definition a wake GPE and will not be enabled while the machine
* is running.
*/
- gpe_info.gpe_block = gpe_block;
- gpe_info.gpe_device = gpe_device;
+ walk_info.gpe_block = gpe_block;
+ walk_info.gpe_device = gpe_device;
+ walk_info.execute_by_owner_id = FALSE;
- acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ status =
+ acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
acpi_ev_match_prw_and_gpe, NULL,
- &gpe_info, NULL);
+ &walk_info, NULL);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "While executing _PRW methods"));
+ }
}
/*
- * Enable all GPEs that have a corresponding method and aren't
+ * Enable all GPEs that have a corresponding method and are not
* capable of generating wakeups. Any other GPEs within this block
- * must be enabled via the acpi_enable_gpe() interface.
+ * must be enabled via the acpi_enable_gpe interface.
*/
wake_gpe_count = 0;
gpe_enabled_count = 0;
- if (gpe_device == acpi_gbl_fadt_gpe_device)
+
+ if (gpe_device == acpi_gbl_fadt_gpe_device) {
gpe_device = NULL;
+ }
for (i = 0; i < gpe_block->register_count; i++) {
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
- acpi_status status;
- acpi_size gpe_index;
- int gpe_number;
/* Get the info block for this particular GPE */
- gpe_index = (acpi_size)i * ACPI_GPE_REGISTER_WIDTH + j;
+
+ gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
gpe_event_info = &gpe_block->event_info[gpe_index];
if (gpe_event_info->flags & ACPI_GPE_CAN_WAKE) {
wake_gpe_count++;
- if (acpi_gbl_leave_wake_gpes_disabled)
+ if (acpi_gbl_leave_wake_gpes_disabled) {
continue;
+ }
}
- if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD))
+ /* Ignore GPEs that have no corresponding _Lxx/_Exx method */
+
+ if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)) {
continue;
+ }
+
+ /* Enable this GPE */
gpe_number = gpe_index + gpe_block->block_base_number;
status = acpi_enable_gpe(gpe_device, gpe_number,
- ACPI_GPE_TYPE_RUNTIME);
- if (ACPI_FAILURE(status))
- ACPI_ERROR((AE_INFO,
- "Failed to enable GPE %02X\n",
- gpe_number));
- else
- gpe_enabled_count++;
- }
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_INIT,
- "Found %u Wake, Enabled %u Runtime GPEs in this block\n",
- wake_gpe_count, gpe_enabled_count));
-
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_gpe_initialize
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Initialize the GPE data structures
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_gpe_initialize(void)
-{
- u32 register_count0 = 0;
- u32 register_count1 = 0;
- u32 gpe_number_max = 0;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ev_gpe_initialize);
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /*
- * Initialize the GPE Block(s) defined in the FADT
- *
- * Why the GPE register block lengths are divided by 2: From the ACPI
- * Spec, section "General-Purpose Event Registers", we have:
- *
- * "Each register block contains two registers of equal length
- * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
- * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
- * The length of the GPE1_STS and GPE1_EN registers is equal to
- * half the GPE1_LEN. If a generic register block is not supported
- * then its respective block pointer and block length values in the
- * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
- * to be the same size."
- */
-
- /*
- * Determine the maximum GPE number for this machine.
- *
- * Note: both GPE0 and GPE1 are optional, and either can exist without
- * the other.
- *
- * If EITHER the register length OR the block address are zero, then that
- * particular block is not supported.
- */
- if (acpi_gbl_FADT.gpe0_block_length &&
- acpi_gbl_FADT.xgpe0_block.address) {
-
- /* GPE block 0 exists (has both length and address > 0) */
-
- register_count0 = (u16) (acpi_gbl_FADT.gpe0_block_length / 2);
-
- gpe_number_max =
- (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
-
- /* Install GPE Block 0 */
-
- status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
- &acpi_gbl_FADT.xgpe0_block,
- register_count0, 0,
- acpi_gbl_FADT.sci_interrupt,
- &acpi_gbl_gpe_fadt_blocks[0]);
-
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Could not create GPE Block 0"));
- }
- }
-
- if (acpi_gbl_FADT.gpe1_block_length &&
- acpi_gbl_FADT.xgpe1_block.address) {
-
- /* GPE block 1 exists (has both length and address > 0) */
-
- register_count1 = (u16) (acpi_gbl_FADT.gpe1_block_length / 2);
-
- /* Check for GPE0/GPE1 overlap (if both banks exist) */
-
- if ((register_count0) &&
- (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
- ACPI_ERROR((AE_INFO,
- "GPE0 block (GPE 0 to %d) overlaps the GPE1 block "
- "(GPE %d to %d) - Ignoring GPE1",
- gpe_number_max, acpi_gbl_FADT.gpe1_base,
- acpi_gbl_FADT.gpe1_base +
- ((register_count1 *
- ACPI_GPE_REGISTER_WIDTH) - 1)));
-
- /* Ignore GPE1 block by setting the register count to zero */
-
- register_count1 = 0;
- } else {
- /* Install GPE Block 1 */
-
- status =
- acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
- &acpi_gbl_FADT.xgpe1_block,
- register_count1,
- acpi_gbl_FADT.gpe1_base,
- acpi_gbl_FADT.
- sci_interrupt,
- &acpi_gbl_gpe_fadt_blocks
- [1]);
-
+ ACPI_GPE_TYPE_RUNTIME);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Could not create GPE Block 1"));
+ "Could not enable GPE 0x%02X",
+ gpe_number));
+ continue;
}
- /*
- * GPE0 and GPE1 do not have to be contiguous in the GPE number
- * space. However, GPE0 always starts at GPE number zero.
- */
- gpe_number_max = acpi_gbl_FADT.gpe1_base +
- ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
+ gpe_enabled_count++;
}
}
- /* Exit if there are no GPE registers */
-
- if ((register_count0 + register_count1) == 0) {
-
- /* GPEs are not required by ACPI, this is OK */
-
+ if (gpe_enabled_count || wake_gpe_count) {
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
- "There are no GPE blocks defined in the FADT\n"));
- status = AE_OK;
- goto cleanup;
- }
-
- /* Check for Max GPE number out-of-range */
-
- if (gpe_number_max > ACPI_GPE_MAX) {
- ACPI_ERROR((AE_INFO,
- "Maximum GPE number from FADT is too large: 0x%X",
- gpe_number_max));
- status = AE_BAD_VALUE;
- goto cleanup;
+ "Enabled %u Runtime GPEs, added %u Wake GPEs in this block\n",
+ gpe_enabled_count, wake_gpe_count));
}
- cleanup:
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
new file mode 100644
index 00000000000..3f6c2d26410
--- /dev/null
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -0,0 +1,653 @@
+/******************************************************************************
+ *
+ * Module Name: evgpeinit - System GPE initialization and update
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2010, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+#include "acnamesp.h"
+#include "acinterp.h"
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evgpeinit")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_gpe_initialize
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Initialize the GPE data structures and the FADT GPE 0/1 blocks
+ *
+ ******************************************************************************/
+acpi_status acpi_ev_gpe_initialize(void)
+{
+ u32 register_count0 = 0;
+ u32 register_count1 = 0;
+ u32 gpe_number_max = 0;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_gpe_initialize);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Initialize the GPE Block(s) defined in the FADT
+ *
+ * Why the GPE register block lengths are divided by 2: From the ACPI
+ * Spec, section "General-Purpose Event Registers", we have:
+ *
+ * "Each register block contains two registers of equal length
+ * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
+ * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
+ * The length of the GPE1_STS and GPE1_EN registers is equal to
+ * half the GPE1_LEN. If a generic register block is not supported
+ * then its respective block pointer and block length values in the
+ * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
+ * to be the same size."
+ */
+
+ /*
+ * Determine the maximum GPE number for this machine.
+ *
+ * Note: both GPE0 and GPE1 are optional, and either can exist without
+ * the other.
+ *
+ * If EITHER the register length OR the block address are zero, then that
+ * particular block is not supported.
+ */
+ if (acpi_gbl_FADT.gpe0_block_length &&
+ acpi_gbl_FADT.xgpe0_block.address) {
+
+ /* GPE block 0 exists (has both length and address > 0) */
+
+ register_count0 = (u16)(acpi_gbl_FADT.gpe0_block_length / 2);
+
+ gpe_number_max =
+ (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
+
+ /* Install GPE Block 0 */
+
+ status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
+ &acpi_gbl_FADT.xgpe0_block,
+ register_count0, 0,
+ acpi_gbl_FADT.sci_interrupt,
+ &acpi_gbl_gpe_fadt_blocks[0]);
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not create GPE Block 0"));
+ }
+ }
+
+ if (acpi_gbl_FADT.gpe1_block_length &&
+ acpi_gbl_FADT.xgpe1_block.address) {
+
+ /* GPE block 1 exists (has both length and address > 0) */
+
+ register_count1 = (u16)(acpi_gbl_FADT.gpe1_block_length / 2);
+
+ /* Check for GPE0/GPE1 overlap (if both banks exist) */
+
+ if ((register_count0) &&
+ (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
+ ACPI_ERROR((AE_INFO,
+ "GPE0 block (GPE 0 to %u) overlaps the GPE1 block "
+ "(GPE %u to %u) - Ignoring GPE1",
+ gpe_number_max, acpi_gbl_FADT.gpe1_base,
+ acpi_gbl_FADT.gpe1_base +
+ ((register_count1 *
+ ACPI_GPE_REGISTER_WIDTH) - 1)));
+
+ /* Ignore GPE1 block by setting the register count to zero */
+
+ register_count1 = 0;
+ } else {
+ /* Install GPE Block 1 */
+
+ status =
+ acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
+ &acpi_gbl_FADT.xgpe1_block,
+ register_count1,
+ acpi_gbl_FADT.gpe1_base,
+ acpi_gbl_FADT.
+ sci_interrupt,
+ &acpi_gbl_gpe_fadt_blocks
+ [1]);
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not create GPE Block 1"));
+ }
+
+ /*
+ * GPE0 and GPE1 do not have to be contiguous in the GPE number
+ * space. However, GPE0 always starts at GPE number zero.
+ */
+ gpe_number_max = acpi_gbl_FADT.gpe1_base +
+ ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
+ }
+ }
+
+ /* Exit if there are no GPE registers */
+
+ if ((register_count0 + register_count1) == 0) {
+
+ /* GPEs are not required by ACPI, this is OK */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+ "There are no GPE blocks defined in the FADT\n"));
+ status = AE_OK;
+ goto cleanup;
+ }
+
+ /* Check for Max GPE number out-of-range */
+
+ if (gpe_number_max > ACPI_GPE_MAX) {
+ ACPI_ERROR((AE_INFO,
+ "Maximum GPE number from FADT is too large: 0x%X",
+ gpe_number_max));
+ status = AE_BAD_VALUE;
+ goto cleanup;
+ }
+
+ cleanup:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_update_gpes
+ *
+ * PARAMETERS: table_owner_id - ID of the newly-loaded ACPI table
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Check for new GPE methods (_Lxx/_Exx) made available as a
+ * result of a Load() or load_table() operation. If new GPE
+ * methods have been installed, register the new methods and
+ * enable and runtime GPEs that are associated with them. Also,
+ * run any newly loaded _PRW methods in order to discover any
+ * new CAN_WAKE GPEs.
+ *
+ ******************************************************************************/
+
+void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
+{
+ struct acpi_gpe_xrupt_info *gpe_xrupt_info;
+ struct acpi_gpe_block_info *gpe_block;
+ struct acpi_gpe_walk_info walk_info;
+ acpi_status status = AE_OK;
+ u32 new_wake_gpe_count = 0;
+
+ /* We will examine only _PRW/_Lxx/_Exx methods owned by this table */
+
+ walk_info.owner_id = table_owner_id;
+ walk_info.execute_by_owner_id = TRUE;
+ walk_info.count = 0;
+
+ if (acpi_gbl_leave_wake_gpes_disabled) {
+ /*
+ * 1) Run any newly-loaded _PRW methods to find any GPEs that
+ * can now be marked as CAN_WAKE GPEs. Note: We must run the
+ * _PRW methods before we process the _Lxx/_Exx methods because
+ * we will enable all runtime GPEs associated with the new
+ * _Lxx/_Exx methods at the time we process those methods.
+ *
+ * Unlock interpreter so that we can run the _PRW methods.
+ */
+ walk_info.gpe_block = NULL;
+ walk_info.gpe_device = NULL;
+
+ acpi_ex_exit_interpreter();
+
+ status =
+ acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ ACPI_NS_WALK_NO_UNLOCK,
+ acpi_ev_match_prw_and_gpe, NULL,
+ &walk_info, NULL);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "While executing _PRW methods"));
+ }
+
+ acpi_ex_enter_interpreter();
+ new_wake_gpe_count = walk_info.count;
+ }
+
+ /*
+ * 2) Find any _Lxx/_Exx GPE methods that have just been loaded.
+ *
+ * Any GPEs that correspond to new _Lxx/_Exx methods and are not
+ * marked as CAN_WAKE are immediately enabled.
+ *
+ * Examine the namespace underneath each gpe_device within the
+ * gpe_block lists.
+ */
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return;
+ }
+
+ walk_info.count = 0;
+ walk_info.enable_this_gpe = TRUE;
+
+ /* Walk the interrupt level descriptor list */
+
+ gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
+ while (gpe_xrupt_info) {
+
+ /* Walk all Gpe Blocks attached to this interrupt level */
+
+ gpe_block = gpe_xrupt_info->gpe_block_list_head;
+ while (gpe_block) {
+ walk_info.gpe_block = gpe_block;
+ walk_info.gpe_device = gpe_block->node;
+
+ status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD,
+ walk_info.gpe_device,
+ ACPI_UINT32_MAX,
+ ACPI_NS_WALK_NO_UNLOCK,
+ acpi_ev_match_gpe_method,
+ NULL, &walk_info, NULL);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "While decoding _Lxx/_Exx methods"));
+ }
+
+ gpe_block = gpe_block->next;
+ }
+
+ gpe_xrupt_info = gpe_xrupt_info->next;
+ }
+
+ if (walk_info.count || new_wake_gpe_count) {
+ ACPI_INFO((AE_INFO,
+ "Enabled %u new runtime GPEs, added %u new wakeup GPEs",
+ walk_info.count, new_wake_gpe_count));
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ return;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_match_gpe_method
+ *
+ * PARAMETERS: Callback from walk_namespace
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
+ * control method under the _GPE portion of the namespace.
+ * Extract the name and GPE type from the object, saving this
+ * information for quick lookup during GPE dispatch. Allows a
+ * per-owner_id evaluation if execute_by_owner_id is TRUE in the
+ * walk_info parameter block.
+ *
+ * The name of each GPE control method is of the form:
+ * "_Lxx" or "_Exx", where:
+ * L - means that the GPE is level triggered
+ * E - means that the GPE is edge triggered
+ * xx - is the GPE number [in HEX]
+ *
+ * If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods
+ * with that owner.
+ * If walk_info->enable_this_gpe is TRUE, the GPE that is referred to by a GPE
+ * method is immediately enabled (Used for Load/load_table operators)
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_match_gpe_method(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value)
+{
+ struct acpi_namespace_node *method_node =
+ ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
+ struct acpi_gpe_walk_info *walk_info =
+ ACPI_CAST_PTR(struct acpi_gpe_walk_info, context);
+ struct acpi_gpe_event_info *gpe_event_info;
+ struct acpi_namespace_node *gpe_device;
+ acpi_status status;
+ u32 gpe_number;
+ char name[ACPI_NAME_SIZE + 1];
+ u8 type;
+
+ ACPI_FUNCTION_TRACE(ev_match_gpe_method);
+
+ /* Check if requested owner_id matches this owner_id */
+
+ if ((walk_info->execute_by_owner_id) &&
+ (method_node->owner_id != walk_info->owner_id)) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * Match and decode the _Lxx and _Exx GPE method names
+ *
+ * 1) Extract the method name and null terminate it
+ */
+ ACPI_MOVE_32_TO_32(name, &method_node->name.integer);
+ name[ACPI_NAME_SIZE] = 0;
+
+ /* 2) Name must begin with an underscore */
+
+ if (name[0] != '_') {
+ return_ACPI_STATUS(AE_OK); /* Ignore this method */
+ }
+
+ /*
+ * 3) Edge/Level determination is based on the 2nd character
+ * of the method name
+ *
+ * NOTE: Default GPE type is RUNTIME only. Later, if a _PRW object is
+ * found that points to this GPE, the ACPI_GPE_CAN_WAKE flag is set.
+ */
+ switch (name[1]) {
+ case 'L':
+ type = ACPI_GPE_LEVEL_TRIGGERED;
+ break;
+
+ case 'E':
+ type = ACPI_GPE_EDGE_TRIGGERED;
+ break;
+
+ default:
+ /* Unknown method type, just ignore it */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+ "Ignoring unknown GPE method type: %s "
+ "(name not of form _Lxx or _Exx)", name));
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* 4) The last two characters of the name are the hex GPE Number */
+
+ gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
+ if (gpe_number == ACPI_UINT32_MAX) {
+
+ /* Conversion failed; invalid method, just ignore it */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+ "Could not extract GPE number from name: %s "
+ "(name is not of form _Lxx or _Exx)", name));
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Ensure that we have a valid GPE number for this GPE block */
+
+ gpe_event_info =
+ acpi_ev_low_get_gpe_info(gpe_number, walk_info->gpe_block);
+ if (!gpe_event_info) {
+ /*
+ * This gpe_number is not valid for this GPE block, just ignore it.
+ * However, it may be valid for a different GPE block, since GPE0
+ * and GPE1 methods both appear under \_GPE.
+ */
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+ ACPI_GPE_DISPATCH_HANDLER) {
+
+ /* If there is already a handler, ignore this GPE method */
+
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+ ACPI_GPE_DISPATCH_METHOD) {
+ /*
+ * If there is already a method, ignore this method. But check
+ * for a type mismatch (if both the _Lxx AND _Exx exist)
+ */
+ if (type != (gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK)) {
+ ACPI_ERROR((AE_INFO,
+ "For GPE 0x%.2X, found both _L%2.2X and _E%2.2X methods",
+ gpe_number, gpe_number, gpe_number));
+ }
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * Add the GPE information from above to the gpe_event_info block for
+ * use during dispatch of this GPE.
+ */
+ gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD);
+ gpe_event_info->dispatch.method_node = method_node;
+
+ /*
+ * Enable this GPE if requested. This only happens when during the
+ * execution of a Load or load_table operator. We have found a new
+ * GPE method and want to immediately enable the GPE if it is a
+ * runtime GPE.
+ */
+ if (walk_info->enable_this_gpe) {
+
+ /* Ignore GPEs that can wake the system */
+
+ if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE) ||
+ !acpi_gbl_leave_wake_gpes_disabled) {
+ walk_info->count++;
+ gpe_device = walk_info->gpe_device;
+
+ if (gpe_device == acpi_gbl_fadt_gpe_device) {
+ gpe_device = NULL;
+ }
+
+ status = acpi_enable_gpe(gpe_device, gpe_number,
+ ACPI_GPE_TYPE_RUNTIME);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not enable GPE 0x%02X",
+ gpe_number));
+ }
+ }
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+ "Registered GPE method %s as GPE number 0x%.2X\n",
+ name, gpe_number));
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_match_prw_and_gpe
+ *
+ * PARAMETERS: Callback from walk_namespace
+ *
+ * RETURN: Status. NOTE: We ignore errors so that the _PRW walk is
+ * not aborted on a single _PRW failure.
+ *
+ * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
+ * Device. Run the _PRW method. If present, extract the GPE
+ * number and mark the GPE as a CAN_WAKE GPE. Allows a
+ * per-owner_id execution if execute_by_owner_id is TRUE in the
+ * walk_info parameter block.
+ *
+ * If walk_info->execute_by_owner_id is TRUE, we only execute _PRWs with that
+ * owner.
+ * If walk_info->gpe_device is NULL, we execute every _PRW found. Otherwise,
+ * we only execute _PRWs that refer to the input gpe_device.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value)
+{
+ struct acpi_gpe_walk_info *walk_info =
+ ACPI_CAST_PTR(struct acpi_gpe_walk_info, context);
+ struct acpi_namespace_node *gpe_device;
+ struct acpi_gpe_block_info *gpe_block;
+ struct acpi_namespace_node *target_gpe_device;
+ struct acpi_namespace_node *prw_node;
+ struct acpi_gpe_event_info *gpe_event_info;
+ union acpi_operand_object *pkg_desc;
+ union acpi_operand_object *obj_desc;
+ u32 gpe_number;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe);
+
+ /* Check for a _PRW method under this device */
+
+ status = acpi_ns_get_node(obj_handle, METHOD_NAME__PRW,
+ ACPI_NS_NO_UPSEARCH, &prw_node);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Check if requested owner_id matches this owner_id */
+
+ if ((walk_info->execute_by_owner_id) &&
+ (prw_node->owner_id != walk_info->owner_id)) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Execute the _PRW */
+
+ status = acpi_ut_evaluate_object(prw_node, NULL,
+ ACPI_BTYPE_PACKAGE, &pkg_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* The returned _PRW package must have at least two elements */
+
+ if (pkg_desc->package.count < 2) {
+ goto cleanup;
+ }
+
+ /* Extract pointers from the input context */
+
+ gpe_device = walk_info->gpe_device;
+ gpe_block = walk_info->gpe_block;
+
+ /*
+ * The _PRW object must return a package, we are only interested
+ * in the first element
+ */
+ obj_desc = pkg_desc->package.elements[0];
+
+ if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
+
+ /* Use FADT-defined GPE device (from definition of _PRW) */
+
+ target_gpe_device = NULL;
+ if (gpe_device) {
+ target_gpe_device = acpi_gbl_fadt_gpe_device;
+ }
+
+ /* Integer is the GPE number in the FADT described GPE blocks */
+
+ gpe_number = (u32)obj_desc->integer.value;
+ } else if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
+
+ /* Package contains a GPE reference and GPE number within a GPE block */
+
+ if ((obj_desc->package.count < 2) ||
+ ((obj_desc->package.elements[0])->common.type !=
+ ACPI_TYPE_LOCAL_REFERENCE) ||
+ ((obj_desc->package.elements[1])->common.type !=
+ ACPI_TYPE_INTEGER)) {
+ goto cleanup;
+ }
+
+ /* Get GPE block reference and decode */
+
+ target_gpe_device =
+ obj_desc->package.elements[0]->reference.node;
+ gpe_number = (u32)obj_desc->package.elements[1]->integer.value;
+ } else {
+ /* Unknown type, just ignore it */
+
+ goto cleanup;
+ }
+
+ /* Get the gpe_event_info for this GPE */
+
+ if (gpe_device) {
+ /*
+ * Is this GPE within this block?
+ *
+ * TRUE if and only if these conditions are true:
+ * 1) The GPE devices match.
+ * 2) The GPE index(number) is within the range of the Gpe Block
+ * associated with the GPE device.
+ */
+ if (gpe_device != target_gpe_device) {
+ goto cleanup;
+ }
+
+ gpe_event_info =
+ acpi_ev_low_get_gpe_info(gpe_number, gpe_block);
+ } else {
+ /* gpe_device is NULL, just match the target_device and gpe_number */
+
+ gpe_event_info =
+ acpi_ev_get_gpe_event_info(target_gpe_device, gpe_number);
+ }
+
+ if (gpe_event_info) {
+ if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
+
+ /* This GPE can wake the system */
+
+ gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
+ walk_info->count++;
+ }
+ }
+
+ cleanup:
+ acpi_ut_remove_reference(pkg_desc);
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
new file mode 100644
index 00000000000..19a0e513ea4
--- /dev/null
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -0,0 +1,337 @@
+/******************************************************************************
+ *
+ * Module Name: evgpeutil - GPE utilities
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2010, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evgpeutil")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_walk_gpe_list
+ *
+ * PARAMETERS: gpe_walk_callback - Routine called for each GPE block
+ * Context - Value passed to callback
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Walk the GPE lists.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
+{
+ struct acpi_gpe_block_info *gpe_block;
+ struct acpi_gpe_xrupt_info *gpe_xrupt_info;
+ acpi_status status = AE_OK;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Walk the interrupt level descriptor list */
+
+ gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
+ while (gpe_xrupt_info) {
+
+ /* Walk all Gpe Blocks attached to this interrupt level */
+
+ gpe_block = gpe_xrupt_info->gpe_block_list_head;
+ while (gpe_block) {
+
+ /* One callback per GPE block */
+
+ status =
+ gpe_walk_callback(gpe_xrupt_info, gpe_block,
+ context);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_CTRL_END) { /* Callback abort */
+ status = AE_OK;
+ }
+ goto unlock_and_exit;
+ }
+
+ gpe_block = gpe_block->next;
+ }
+
+ gpe_xrupt_info = gpe_xrupt_info->next;
+ }
+
+ unlock_and_exit:
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_valid_gpe_event
+ *
+ * PARAMETERS: gpe_event_info - Info for this GPE
+ *
+ * RETURN: TRUE if the gpe_event is valid
+ *
+ * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
+ * Should be called only when the GPE lists are semaphore locked
+ * and not subject to change.
+ *
+ ******************************************************************************/
+
+u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
+{
+ struct acpi_gpe_xrupt_info *gpe_xrupt_block;
+ struct acpi_gpe_block_info *gpe_block;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* No need for spin lock since we are not changing any list elements */
+
+ /* Walk the GPE interrupt levels */
+
+ gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
+ while (gpe_xrupt_block) {
+ gpe_block = gpe_xrupt_block->gpe_block_list_head;
+
+ /* Walk the GPE blocks on this interrupt level */
+
+ while (gpe_block) {
+ if ((&gpe_block->event_info[0] <= gpe_event_info) &&
+ (&gpe_block->event_info[gpe_block->gpe_count] >
+ gpe_event_info)) {
+ return (TRUE);
+ }
+
+ gpe_block = gpe_block->next;
+ }
+
+ gpe_xrupt_block = gpe_xrupt_block->next;
+ }
+
+ return (FALSE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_get_gpe_xrupt_block
+ *
+ * PARAMETERS: interrupt_number - Interrupt for a GPE block
+ *
+ * RETURN: A GPE interrupt block
+ *
+ * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
+ * block per unique interrupt level used for GPEs. Should be
+ * called only when the GPE lists are semaphore locked and not
+ * subject to change.
+ *
+ ******************************************************************************/
+
+struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number)
+{
+ struct acpi_gpe_xrupt_info *next_gpe_xrupt;
+ struct acpi_gpe_xrupt_info *gpe_xrupt;
+ acpi_status status;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
+
+ /* No need for lock since we are not changing any list elements here */
+
+ next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
+ while (next_gpe_xrupt) {
+ if (next_gpe_xrupt->interrupt_number == interrupt_number) {
+ return_PTR(next_gpe_xrupt);
+ }
+
+ next_gpe_xrupt = next_gpe_xrupt->next;
+ }
+
+ /* Not found, must allocate a new xrupt descriptor */
+
+ gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
+ if (!gpe_xrupt) {
+ return_PTR(NULL);
+ }
+
+ gpe_xrupt->interrupt_number = interrupt_number;
+
+ /* Install new interrupt descriptor with spin lock */
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ if (acpi_gbl_gpe_xrupt_list_head) {
+ next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
+ while (next_gpe_xrupt->next) {
+ next_gpe_xrupt = next_gpe_xrupt->next;
+ }
+
+ next_gpe_xrupt->next = gpe_xrupt;
+ gpe_xrupt->previous = next_gpe_xrupt;
+ } else {
+ acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
+ }
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+ /* Install new interrupt handler if not SCI_INT */
+
+ if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
+ status = acpi_os_install_interrupt_handler(interrupt_number,
+ acpi_ev_gpe_xrupt_handler,
+ gpe_xrupt);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO,
+ "Could not install GPE interrupt handler at level 0x%X",
+ interrupt_number));
+ return_PTR(NULL);
+ }
+ }
+
+ return_PTR(gpe_xrupt);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_delete_gpe_xrupt
+ *
+ * PARAMETERS: gpe_xrupt - A GPE interrupt info block
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
+ * interrupt handler if not the SCI interrupt.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
+{
+ acpi_status status;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
+
+ /* We never want to remove the SCI interrupt handler */
+
+ if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
+ gpe_xrupt->gpe_block_list_head = NULL;
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Disable this interrupt */
+
+ status =
+ acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
+ acpi_ev_gpe_xrupt_handler);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Unlink the interrupt block with lock */
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ if (gpe_xrupt->previous) {
+ gpe_xrupt->previous->next = gpe_xrupt->next;
+ } else {
+ /* No previous, update list head */
+
+ acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
+ }
+
+ if (gpe_xrupt->next) {
+ gpe_xrupt->next->previous = gpe_xrupt->previous;
+ }
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+ /* Free the block */
+
+ ACPI_FREE(gpe_xrupt);
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_delete_gpe_handlers
+ *
+ * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
+ * gpe_block - Gpe Block info
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
+ * Used only prior to termination.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+ struct acpi_gpe_block_info *gpe_block,
+ void *context)
+{
+ struct acpi_gpe_event_info *gpe_event_info;
+ u32 i;
+ u32 j;
+
+ ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
+
+ /* Examine each GPE Register within the block */
+
+ for (i = 0; i < gpe_block->register_count; i++) {
+
+ /* Now look at the individual GPEs in this byte register */
+
+ for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
+ gpe_event_info = &gpe_block->event_info[((acpi_size) i *
+ ACPI_GPE_REGISTER_WIDTH)
+ + j];
+
+ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+ ACPI_GPE_DISPATCH_HANDLER) {
+ ACPI_FREE(gpe_event_info->dispatch.handler);
+ gpe_event_info->dispatch.handler = NULL;
+ gpe_event_info->flags &=
+ ~ACPI_GPE_DISPATCH_MASK;
+ }
+ }
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 9a3cb7045a3..df0aea9a8cf 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -590,7 +590,7 @@ void acpi_ev_terminate(void)
status = acpi_disable_event(i, 0);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO,
- "Could not disable fixed event %d",
+ "Could not disable fixed event %u",
(u32) i));
}
}
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index b40757955f9..cc825023012 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -142,7 +142,7 @@ acpi_install_fixed_event_handler(u32 event,
if (ACPI_SUCCESS(status))
status = acpi_enable_event(event, 0);
if (ACPI_FAILURE(status)) {
- ACPI_WARNING((AE_INFO, "Could not enable fixed event %X",
+ ACPI_WARNING((AE_INFO, "Could not enable fixed event 0x%X",
event));
/* Remove the handler */
@@ -203,7 +203,7 @@ acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
if (ACPI_FAILURE(status)) {
ACPI_WARNING((AE_INFO,
- "Could not write to fixed event enable register %X",
+ "Could not write to fixed event enable register 0x%X",
event));
} else {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n",
@@ -682,14 +682,13 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
/* Parameter validation */
- if ((!address) || (type > ACPI_GPE_XRUPT_TYPE_MASK)) {
- status = AE_BAD_PARAMETER;
- goto exit;
+ if ((!address) || (type & ~ACPI_GPE_XRUPT_TYPE_MASK)) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
}
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
- goto exit;
+ return_ACPI_STATUS(status);
}
/* Ensure that we have a valid GPE number */
@@ -720,6 +719,13 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
handler->context = context;
handler->method_node = gpe_event_info->dispatch.method_node;
+ /* Disable the GPE before installing the handler */
+
+ status = acpi_ev_disable_gpe(gpe_event_info);
+ if (ACPI_FAILURE (status)) {
+ goto unlock_and_exit;
+ }
+
/* Install the handler */
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
@@ -733,12 +739,8 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
- exit:
- if (ACPI_FAILURE(status))
- ACPI_EXCEPTION((AE_INFO, status,
- "Installing notify handler failed"));
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 5ff32c78ea2..d5a5efc043b 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -69,7 +69,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
acpi_status acpi_enable(void)
{
- acpi_status status = AE_OK;
+ acpi_status status;
ACPI_FUNCTION_TRACE(acpi_enable);
@@ -84,21 +84,30 @@ acpi_status acpi_enable(void)
if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) {
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
"System is already in ACPI mode\n"));
- } else {
- /* Transition to ACPI mode */
+ return_ACPI_STATUS(AE_OK);
+ }
- status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI);
- if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO,
- "Could not transition to ACPI mode"));
- return_ACPI_STATUS(status);
- }
+ /* Transition to ACPI mode */
- ACPI_DEBUG_PRINT((ACPI_DB_INIT,
- "Transition to ACPI mode successful\n"));
+ status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO,
+ "Could not transition to ACPI mode"));
+ return_ACPI_STATUS(status);
}
- return_ACPI_STATUS(status);
+ /* Sanity check that transition succeeded */
+
+ if (acpi_hw_get_mode() != ACPI_SYS_MODE_ACPI) {
+ ACPI_ERROR((AE_INFO,
+ "Hardware did not enter ACPI mode"));
+ return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+ "Transition to ACPI mode successful\n"));
+
+ return_ACPI_STATUS(AE_OK);
}
ACPI_EXPORT_SYMBOL(acpi_enable)
@@ -203,21 +212,26 @@ ACPI_EXPORT_SYMBOL(acpi_enable_event)
*
* FUNCTION: acpi_set_gpe
*
- * PARAMETERS: gpe_device - Parent GPE Device
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
- * action - Enable or disable
- * Called from ISR or not
+ * action - ACPI_GPE_ENABLE or ACPI_GPE_DISABLE
*
* RETURN: Status
*
- * DESCRIPTION: Enable or disable an ACPI event (general purpose)
+ * DESCRIPTION: Enable or disable an individual GPE. This function bypasses
+ * the reference count mechanism used in the acpi_enable_gpe and
+ * acpi_disable_gpe interfaces -- and should be used with care.
+ *
+ * Note: Typically used to disable a runtime GPE for short period of time,
+ * then re-enable it, without disturbing the existing reference counts. This
+ * is useful, for example, in the Embedded Controller (EC) driver.
*
******************************************************************************/
acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action)
{
- acpi_status status = AE_OK;
- acpi_cpu_flags flags;
struct acpi_gpe_event_info *gpe_event_info;
+ acpi_status status;
+ acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_set_gpe);
@@ -243,7 +257,6 @@ acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action)
break;
default:
- ACPI_ERROR((AE_INFO, "Invalid action\n"));
status = AE_BAD_PARAMETER;
break;
}
@@ -259,25 +272,31 @@ ACPI_EXPORT_SYMBOL(acpi_set_gpe)
*
* FUNCTION: acpi_enable_gpe
*
- * PARAMETERS: gpe_device - Parent GPE Device
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
- * type - Purpose the GPE will be used for
+ * gpe_type - ACPI_GPE_TYPE_RUNTIME or ACPI_GPE_TYPE_WAKE
+ * or both
*
* RETURN: Status
*
- * DESCRIPTION: Take a reference to a GPE and enable it if necessary
+ * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
+ * hardware-enabled (for runtime GPEs), or the GPE register mask
+ * is updated (for wake GPEs).
*
******************************************************************************/
-acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
+acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type)
{
acpi_status status = AE_OK;
- acpi_cpu_flags flags;
struct acpi_gpe_event_info *gpe_event_info;
+ acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_enable_gpe);
- if (type & ~ACPI_GPE_TYPE_WAKE_RUN)
+ /* Parameter validation */
+
+ if (!gpe_type || (gpe_type & ~ACPI_GPE_TYPE_WAKE_RUN)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
@@ -289,26 +308,43 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
goto unlock_and_exit;
}
- if (type & ACPI_GPE_TYPE_RUNTIME) {
- if (++gpe_event_info->runtime_count == 1) {
+ if (gpe_type & ACPI_GPE_TYPE_RUNTIME) {
+ if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
+ status = AE_LIMIT; /* Too many references */
+ goto unlock_and_exit;
+ }
+
+ gpe_event_info->runtime_count++;
+ if (gpe_event_info->runtime_count == 1) {
status = acpi_ev_enable_gpe(gpe_event_info);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
gpe_event_info->runtime_count--;
+ goto unlock_and_exit;
+ }
}
}
- if (type & ACPI_GPE_TYPE_WAKE) {
+ if (gpe_type & ACPI_GPE_TYPE_WAKE) {
+ /* The GPE must have the ability to wake the system */
+
if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
- status = AE_BAD_PARAMETER;
+ status = AE_TYPE;
+ goto unlock_and_exit;
+ }
+
+ if (gpe_event_info->wakeup_count == ACPI_UINT8_MAX) {
+ status = AE_LIMIT; /* Too many references */
goto unlock_and_exit;
}
/*
- * Wake-up GPEs are only enabled right prior to putting the
- * system into a sleep state.
+ * Update the enable mask on the first wakeup reference. Wake GPEs
+ * are only hardware-enabled just before sleeping.
*/
- if (++gpe_event_info->wakeup_count == 1)
- acpi_ev_update_gpe_enable_masks(gpe_event_info);
+ gpe_event_info->wakeup_count++;
+ if (gpe_event_info->wakeup_count == 1) {
+ (void)acpi_ev_update_gpe_enable_masks(gpe_event_info);
+ }
}
unlock_and_exit:
@@ -321,27 +357,34 @@ ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
*
* FUNCTION: acpi_disable_gpe
*
- * PARAMETERS: gpe_device - Parent GPE Device
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
- * type - Purpose the GPE won't be used for any more
+ * gpe_type - ACPI_GPE_TYPE_RUNTIME or ACPI_GPE_TYPE_WAKE
+ * or both
*
* RETURN: Status
*
- * DESCRIPTION: Release a reference to a GPE and disable it if necessary
+ * DESCRIPTION: Remove a reference to a GPE. When the last reference is
+ * removed, only then is the GPE disabled (for runtime GPEs), or
+ * the GPE mask bit disabled (for wake GPEs)
*
******************************************************************************/
-acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
+acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type)
{
acpi_status status = AE_OK;
- acpi_cpu_flags flags;
struct acpi_gpe_event_info *gpe_event_info;
+ acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_disable_gpe);
- if (type & ~ACPI_GPE_TYPE_WAKE_RUN)
+ /* Parameter validation */
+
+ if (!gpe_type || (gpe_type & ~ACPI_GPE_TYPE_WAKE_RUN)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
/* Ensure that we have a valid GPE number */
gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
@@ -350,18 +393,39 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
goto unlock_and_exit;
}
- if ((type & ACPI_GPE_TYPE_RUNTIME) && gpe_event_info->runtime_count) {
- if (--gpe_event_info->runtime_count == 0)
+ /* Hardware-disable a runtime GPE on removal of the last reference */
+
+ if (gpe_type & ACPI_GPE_TYPE_RUNTIME) {
+ if (!gpe_event_info->runtime_count) {
+ status = AE_LIMIT; /* There are no references to remove */
+ goto unlock_and_exit;
+ }
+
+ gpe_event_info->runtime_count--;
+ if (!gpe_event_info->runtime_count) {
status = acpi_ev_disable_gpe(gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+ gpe_event_info->runtime_count++;
+ goto unlock_and_exit;
+ }
+ }
}
- if ((type & ACPI_GPE_TYPE_WAKE) && gpe_event_info->wakeup_count) {
- /*
- * Wake-up GPEs are not enabled after leaving system sleep
- * states, so we don't need to disable them here.
- */
- if (--gpe_event_info->wakeup_count == 0)
- acpi_ev_update_gpe_enable_masks(gpe_event_info);
+ /*
+ * Update masks for wake GPE on removal of the last reference.
+ * No need to hardware-disable wake GPEs here, they are not currently
+ * enabled.
+ */
+ if (gpe_type & ACPI_GPE_TYPE_WAKE) {
+ if (!gpe_event_info->wakeup_count) {
+ status = AE_LIMIT; /* There are no references to remove */
+ goto unlock_and_exit;
+ }
+
+ gpe_event_info->wakeup_count--;
+ if (!gpe_event_info->wakeup_count) {
+ (void)acpi_ev_update_gpe_enable_masks(gpe_event_info);
+ }
}
unlock_and_exit:
@@ -465,30 +529,23 @@ ACPI_EXPORT_SYMBOL(acpi_clear_event)
*
* FUNCTION: acpi_clear_gpe
*
- * PARAMETERS: gpe_device - Parent GPE Device
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
- * Flags - Called from an ISR or not
*
* RETURN: Status
*
* DESCRIPTION: Clear an ACPI event (general purpose)
*
******************************************************************************/
-acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
+acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number)
{
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
+ acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_clear_gpe);
- /* Use semaphore lock if not executing at interrupt level */
-
- if (flags & ACPI_NOT_ISR) {
- status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
/* Ensure that we have a valid GPE number */
@@ -501,9 +558,7 @@ acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
status = acpi_hw_clear_gpe(gpe_event_info);
unlock_and_exit:
- if (flags & ACPI_NOT_ISR) {
- (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
- }
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
@@ -569,9 +624,8 @@ ACPI_EXPORT_SYMBOL(acpi_get_event_status)
*
* FUNCTION: acpi_get_gpe_status
*
- * PARAMETERS: gpe_device - Parent GPE Device
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
- * Flags - Called from an ISR or not
* event_status - Where the current status of the event will
* be returned
*
@@ -582,21 +636,15 @@ ACPI_EXPORT_SYMBOL(acpi_get_event_status)
******************************************************************************/
acpi_status
acpi_get_gpe_status(acpi_handle gpe_device,
- u32 gpe_number, u32 flags, acpi_event_status * event_status)
+ u32 gpe_number, acpi_event_status *event_status)
{
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
+ acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_get_gpe_status);
- /* Use semaphore lock if not executing at interrupt level */
-
- if (flags & ACPI_NOT_ISR) {
- status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
/* Ensure that we have a valid GPE number */
@@ -614,9 +662,7 @@ acpi_get_gpe_status(acpi_handle gpe_device,
*event_status |= ACPI_EVENT_FLAG_HANDLE;
unlock_and_exit:
- if (flags & ACPI_NOT_ISR) {
- (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
- }
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
@@ -673,20 +719,15 @@ acpi_install_gpe_block(acpi_handle gpe_device,
goto unlock_and_exit;
}
- /* Run the _PRW methods and enable the GPEs */
-
- status = acpi_ev_initialize_gpe_block(node, gpe_block);
- if (ACPI_FAILURE(status)) {
- goto unlock_and_exit;
- }
-
- /* Get the device_object attached to the node */
+ /* Install block in the device_object attached to the node */
obj_desc = acpi_ns_get_attached_object(node);
if (!obj_desc) {
- /* No object, create a new one */
-
+ /*
+ * No object, create a new one (Device nodes do not always have
+ * an attached object)
+ */
obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE);
if (!obj_desc) {
status = AE_NO_MEMORY;
@@ -705,10 +746,14 @@ acpi_install_gpe_block(acpi_handle gpe_device,
}
}
- /* Install the GPE block in the device_object */
+ /* Now install the GPE block in the device_object */
obj_desc->device.gpe_block = gpe_block;
+ /* Run the _PRW methods and enable the runtime GPEs in the new block */
+
+ status = acpi_ev_initialize_gpe_block(node, gpe_block);
+
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
@@ -839,8 +884,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
/* Increment Index by the number of GPEs in this block */
- info->next_block_base_index +=
- (gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH);
+ info->next_block_base_index += gpe_block->gpe_count;
if (info->index < info->next_block_base_index) {
/*
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 7e8b3bedc37..008621c5ad8 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -82,8 +82,9 @@ acpi_ex_add_table(u32 table_index,
struct acpi_namespace_node *parent_node,
union acpi_operand_object **ddb_handle)
{
- acpi_status status;
union acpi_operand_object *obj_desc;
+ acpi_status status;
+ acpi_owner_id owner_id;
ACPI_FUNCTION_TRACE(ex_add_table);
@@ -119,7 +120,14 @@ acpi_ex_add_table(u32 table_index,
acpi_ns_exec_module_code_list();
acpi_ex_enter_interpreter();
- return_ACPI_STATUS(status);
+ /* Update GPEs for any new _PRW or _Lxx/_Exx methods. Ignore errors */
+
+ status = acpi_tb_get_owner_id(table_index, &owner_id);
+ if (ACPI_SUCCESS(status)) {
+ acpi_ev_update_gpes(owner_id);
+ }
+
+ return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
@@ -248,10 +256,8 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
status = acpi_get_table_by_index(table_index, &table);
if (ACPI_SUCCESS(status)) {
- ACPI_INFO((AE_INFO,
- "Dynamic OEM Table Load - [%.4s] OemId [%.6s] OemTableId [%.8s]",
- table->signature, table->oem_id,
- table->oem_table_id));
+ ACPI_INFO((AE_INFO, "Dynamic OEM Table Load:"));
+ acpi_tb_print_table_header(0, table);
}
/* Invoke table handler if present */
@@ -525,6 +531,9 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(status);
}
+ ACPI_INFO((AE_INFO, "Dynamic OEM Table Load:"));
+ acpi_tb_print_table_header(0, table_desc.pointer);
+
/* Remove the reference by added by acpi_ex_store above */
acpi_ut_remove_reference(ddb_handle);
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index bda7aed0404..b73bc50c5b7 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -650,7 +650,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
default:
ACPI_ERROR((AE_INFO,
- "Bad destination type during conversion: %X",
+ "Bad destination type during conversion: 0x%X",
destination_type));
status = AE_AML_INTERNAL;
break;
@@ -665,7 +665,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
default:
ACPI_ERROR((AE_INFO,
- "Unknown Target type ID 0x%X AmlOpcode %X DestType %s",
+ "Unknown Target type ID 0x%X AmlOpcode 0x%X DestType %s",
GET_CURRENT_ARG_TYPE(walk_state->op_info->
runtime_args),
walk_state->opcode,
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 0aa57d93869..3c61b48c73f 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -306,12 +306,12 @@ acpi_ex_create_region(u8 * aml_start,
*/
if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) &&
(region_space < ACPI_USER_REGION_BEGIN)) {
- ACPI_ERROR((AE_INFO, "Invalid AddressSpace type %X",
+ ACPI_ERROR((AE_INFO, "Invalid AddressSpace type 0x%X",
region_space));
return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
}
- ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Region Type - %s (%X)\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Region Type - %s (0x%X)\n",
acpi_ut_get_region_name(region_space), region_space));
/* Create the region descriptor */
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
new file mode 100644
index 00000000000..be8c98b480d
--- /dev/null
+++ b/drivers/acpi/acpica/exdebug.c
@@ -0,0 +1,261 @@
+/******************************************************************************
+ *
+ * Module Name: exdebug - Support for stores to the AML Debug Object
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2010, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acinterp.h"
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exdebug")
+
+#ifndef ACPI_NO_ERROR_MESSAGES
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_do_debug_object
+ *
+ * PARAMETERS: source_desc - Object to be output to "Debug Object"
+ * Level - Indentation level (used for packages)
+ * Index - Current package element, zero if not pkg
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Handles stores to the AML Debug Object. For example:
+ * Store(INT1, Debug)
+ *
+ * This function is not compiled if ACPI_NO_ERROR_MESSAGES is set.
+ *
+ * This function is only enabled if acpi_gbl_enable_aml_debug_object is set, or
+ * if ACPI_LV_DEBUG_OBJECT is set in the acpi_dbg_level. Thus, in the normal
+ * operational case, stores to the debug object are ignored but can be easily
+ * enabled if necessary.
+ *
+ ******************************************************************************/
+void
+acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
+ u32 level, u32 index)
+{
+ u32 i;
+
+ ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc);
+
+ /* Output must be enabled via the debug_object global or the dbg_level */
+
+ if (!acpi_gbl_enable_aml_debug_object &&
+ !(acpi_dbg_level & ACPI_LV_DEBUG_OBJECT)) {
+ return_VOID;
+ }
+
+ /*
+ * Print line header as long as we are not in the middle of an
+ * object display
+ */
+ if (!((level > 0) && index == 0)) {
+ acpi_os_printf("[ACPI Debug] %*s", level, " ");
+ }
+
+ /* Display the index for package output only */
+
+ if (index > 0) {
+ acpi_os_printf("(%.2u) ", index - 1);
+ }
+
+ if (!source_desc) {
+ acpi_os_printf("[Null Object]\n");
+ return_VOID;
+ }
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) {
+ acpi_os_printf("%s ",
+ acpi_ut_get_object_type_name(source_desc));
+
+ if (!acpi_ut_valid_internal_object(source_desc)) {
+ acpi_os_printf("%p, Invalid Internal Object!\n",
+ source_desc);
+ return_VOID;
+ }
+ } else if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) ==
+ ACPI_DESC_TYPE_NAMED) {
+ acpi_os_printf("%s: %p\n",
+ acpi_ut_get_type_name(((struct
+ acpi_namespace_node *)
+ source_desc)->type),
+ source_desc);
+ return_VOID;
+ } else {
+ return_VOID;
+ }
+
+ /* source_desc is of type ACPI_DESC_TYPE_OPERAND */
+
+ switch (source_desc->common.type) {
+ case ACPI_TYPE_INTEGER:
+
+ /* Output correct integer width */
+
+ if (acpi_gbl_integer_byte_width == 4) {
+ acpi_os_printf("0x%8.8X\n",
+ (u32)source_desc->integer.value);
+ } else {
+ acpi_os_printf("0x%8.8X%8.8X\n",
+ ACPI_FORMAT_UINT64(source_desc->integer.
+ value));
+ }
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ acpi_os_printf("[0x%.2X]\n", (u32)source_desc->buffer.length);
+ acpi_ut_dump_buffer2(source_desc->buffer.pointer,
+ (source_desc->buffer.length < 256) ?
+ source_desc->buffer.length : 256,
+ DB_BYTE_DISPLAY);
+ break;
+
+ case ACPI_TYPE_STRING:
+
+ acpi_os_printf("[0x%.2X] \"%s\"\n",
+ source_desc->string.length,
+ source_desc->string.pointer);
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+
+ acpi_os_printf("[Contains 0x%.2X Elements]\n",
+ source_desc->package.count);
+
+ /* Output the entire contents of the package */
+
+ for (i = 0; i < source_desc->package.count; i++) {
+ acpi_ex_do_debug_object(source_desc->package.
+ elements[i], level + 4, i + 1);
+ }
+ break;
+
+ case ACPI_TYPE_LOCAL_REFERENCE:
+
+ acpi_os_printf("[%s] ",
+ acpi_ut_get_reference_name(source_desc));
+
+ /* Decode the reference */
+
+ switch (source_desc->reference.class) {
+ case ACPI_REFCLASS_INDEX:
+
+ acpi_os_printf("0x%X\n", source_desc->reference.value);
+ break;
+
+ case ACPI_REFCLASS_TABLE:
+
+ /* Case for ddb_handle */
+
+ acpi_os_printf("Table Index 0x%X\n",
+ source_desc->reference.value);
+ return;
+
+ default:
+ break;
+ }
+
+ acpi_os_printf(" ");
+
+ /* Check for valid node first, then valid object */
+
+ if (source_desc->reference.node) {
+ if (ACPI_GET_DESCRIPTOR_TYPE
+ (source_desc->reference.node) !=
+ ACPI_DESC_TYPE_NAMED) {
+ acpi_os_printf
+ (" %p - Not a valid namespace node\n",
+ source_desc->reference.node);
+ } else {
+ acpi_os_printf("Node %p [%4.4s] ",
+ source_desc->reference.node,
+ (source_desc->reference.node)->
+ name.ascii);
+
+ switch ((source_desc->reference.node)->type) {
+
+ /* These types have no attached object */
+
+ case ACPI_TYPE_DEVICE:
+ acpi_os_printf("Device\n");
+ break;
+
+ case ACPI_TYPE_THERMAL:
+ acpi_os_printf("Thermal Zone\n");
+ break;
+
+ default:
+ acpi_ex_do_debug_object((source_desc->
+ reference.
+ node)->object,
+ level + 4, 0);
+ break;
+ }
+ }
+ } else if (source_desc->reference.object) {
+ if (ACPI_GET_DESCRIPTOR_TYPE
+ (source_desc->reference.object) ==
+ ACPI_DESC_TYPE_NAMED) {
+ acpi_ex_do_debug_object(((struct
+ acpi_namespace_node *)
+ source_desc->reference.
+ object)->object,
+ level + 4, 0);
+ } else {
+ acpi_ex_do_debug_object(source_desc->reference.
+ object, level + 4, 0);
+ }
+ }
+ break;
+
+ default:
+
+ acpi_os_printf("%p\n", source_desc);
+ break;
+ }
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "\n"));
+ return_VOID;
+}
+#endif
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index 6c79fecbee4..f17d2ff0031 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -281,7 +281,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
if (source_desc->buffer.length < length) {
ACPI_ERROR((AE_INFO,
- "SMBus or IPMI write requires Buffer of length %X, found length %X",
+ "SMBus or IPMI write requires Buffer of length %u, found length %u",
length, source_desc->buffer.length));
return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index f68a216168b..a6dc26f0b3b 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -94,7 +94,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
/* We must have a valid region */
if (rgn_desc->common.type != ACPI_TYPE_REGION) {
- ACPI_ERROR((AE_INFO, "Needed Region, found type %X (%s)",
+ ACPI_ERROR((AE_INFO, "Needed Region, found type 0x%X (%s)",
rgn_desc->common.type,
acpi_ut_get_object_type_name(rgn_desc)));
@@ -175,7 +175,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
* byte, and a field with Dword access specified.
*/
ACPI_ERROR((AE_INFO,
- "Field [%4.4s] access width (%d bytes) too large for region [%4.4s] (length %X)",
+ "Field [%4.4s] access width (%u bytes) too large for region [%4.4s] (length %u)",
acpi_ut_get_node_name(obj_desc->
common_field.node),
obj_desc->common_field.access_byte_width,
@@ -189,7 +189,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
* exceeds region length, indicate an error
*/
ACPI_ERROR((AE_INFO,
- "Field [%4.4s] Base+Offset+Width %X+%X+%X is beyond end of region [%4.4s] (length %X)",
+ "Field [%4.4s] Base+Offset+Width %u+%u+%u is beyond end of region [%4.4s] (length %u)",
acpi_ut_get_node_name(obj_desc->common_field.node),
obj_desc->common_field.base_byte_offset,
field_datum_byte_offset,
@@ -281,13 +281,13 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
if (ACPI_FAILURE(status)) {
if (status == AE_NOT_IMPLEMENTED) {
ACPI_ERROR((AE_INFO,
- "Region %s(%X) not implemented",
+ "Region %s(0x%X) not implemented",
acpi_ut_get_region_name(rgn_desc->region.
space_id),
rgn_desc->region.space_id));
} else if (status == AE_NOT_EXIST) {
ACPI_ERROR((AE_INFO,
- "Region %s(%X) has no handler",
+ "Region %s(0x%X) has no handler",
acpi_ut_get_region_name(rgn_desc->region.
space_id),
rgn_desc->region.space_id));
@@ -525,7 +525,7 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
default:
- ACPI_ERROR((AE_INFO, "Wrong object type in field I/O %X",
+ ACPI_ERROR((AE_INFO, "Wrong object type in field I/O %u",
obj_desc->common.type));
status = AE_AML_INTERNAL;
break;
@@ -630,7 +630,7 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
default:
ACPI_ERROR((AE_INFO,
- "Unknown UpdateRule value: %X",
+ "Unknown UpdateRule value: 0x%X",
(obj_desc->common_field.
field_flags &
AML_FIELD_UPDATE_RULE_MASK)));
@@ -689,7 +689,7 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
if (buffer_length <
ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length)) {
ACPI_ERROR((AE_INFO,
- "Field size %X (bits) is too large for buffer (%X)",
+ "Field size %u (bits) is too large for buffer (%u)",
obj_desc->common_field.bit_length, buffer_length));
return_ACPI_STATUS(AE_BUFFER_OVERFLOW);
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index c5bb1eeed2d..95db4be0877 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -99,7 +99,7 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
default:
- ACPI_ERROR((AE_INFO, "Unknown Reference Class %2.2X",
+ ACPI_ERROR((AE_INFO, "Unknown Reference Class 0x%2.2X",
obj_desc->reference.class));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
@@ -115,7 +115,7 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
default:
- ACPI_ERROR((AE_INFO, "Invalid descriptor type %X",
+ ACPI_ERROR((AE_INFO, "Invalid descriptor type 0x%X",
ACPI_GET_DESCRIPTOR_TYPE(obj_desc)));
return_ACPI_STATUS(AE_TYPE);
}
@@ -276,7 +276,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
break;
default:
- ACPI_ERROR((AE_INFO, "Invalid object type: %X",
+ ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X",
operand0->common.type));
status = AE_AML_INTERNAL;
}
@@ -378,7 +378,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
/* Invalid object type, should not happen here */
- ACPI_ERROR((AE_INFO, "Invalid object type: %X",
+ ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X",
operand0->common.type));
status = AE_AML_INTERNAL;
goto cleanup;
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index 7116bc86494..f73be97043c 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -85,10 +85,10 @@ void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
(obj_desc->mutex.prev)->mutex.next = obj_desc->mutex.next;
/*
- * Migrate the previous sync level associated with this mutex to the
- * previous mutex on the list so that it may be preserved. This handles
- * the case where several mutexes have been acquired at the same level,
- * but are not released in opposite order.
+ * Migrate the previous sync level associated with this mutex to
+ * the previous mutex on the list so that it may be preserved.
+ * This handles the case where several mutexes have been acquired
+ * at the same level, but are not released in opposite order.
*/
(obj_desc->mutex.prev)->mutex.original_sync_level =
obj_desc->mutex.original_sync_level;
@@ -101,8 +101,8 @@ void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
*
* FUNCTION: acpi_ex_link_mutex
*
- * PARAMETERS: obj_desc - The mutex to be linked
- * Thread - Current executing thread object
+ * PARAMETERS: obj_desc - The mutex to be linked
+ * Thread - Current executing thread object
*
* RETURN: None
*
@@ -138,9 +138,9 @@ acpi_ex_link_mutex(union acpi_operand_object *obj_desc,
*
* FUNCTION: acpi_ex_acquire_mutex_object
*
- * PARAMETERS: time_desc - Timeout in milliseconds
+ * PARAMETERS: Timeout - Timeout in milliseconds
* obj_desc - Mutex object
- * Thread - Current thread state
+ * thread_id - Current thread state
*
* RETURN: Status
*
@@ -234,7 +234,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- /* Must have a valid thread ID */
+ /* Must have a valid thread state struct */
if (!walk_state->thread) {
ACPI_ERROR((AE_INFO,
@@ -249,7 +249,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
*/
if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) {
ACPI_ERROR((AE_INFO,
- "Cannot acquire Mutex [%4.4s], current SyncLevel is too large (%d)",
+ "Cannot acquire Mutex [%4.4s], current SyncLevel is too large (%u)",
acpi_ut_get_node_name(obj_desc->mutex.node),
walk_state->thread->current_sync_level));
return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
@@ -359,6 +359,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
{
acpi_status status = AE_OK;
u8 previous_sync_level;
+ struct acpi_thread_state *owner_thread;
ACPI_FUNCTION_TRACE(ex_release_mutex);
@@ -366,9 +367,11 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
+ owner_thread = obj_desc->mutex.owner_thread;
+
/* The mutex must have been previously acquired in order to release it */
- if (!obj_desc->mutex.owner_thread) {
+ if (!owner_thread) {
ACPI_ERROR((AE_INFO,
"Cannot release Mutex [%4.4s], not acquired",
acpi_ut_get_node_name(obj_desc->mutex.node)));
@@ -387,16 +390,13 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
* The Mutex is owned, but this thread must be the owner.
* Special case for Global Lock, any thread can release
*/
- if ((obj_desc->mutex.owner_thread->thread_id !=
- walk_state->thread->thread_id)
- && (obj_desc != acpi_gbl_global_lock_mutex)) {
+ if ((owner_thread->thread_id != walk_state->thread->thread_id) &&
+ (obj_desc != acpi_gbl_global_lock_mutex)) {
ACPI_ERROR((AE_INFO,
"Thread %p cannot release Mutex [%4.4s] acquired by thread %p",
ACPI_CAST_PTR(void, walk_state->thread->thread_id),
acpi_ut_get_node_name(obj_desc->mutex.node),
- ACPI_CAST_PTR(void,
- obj_desc->mutex.owner_thread->
- thread_id)));
+ ACPI_CAST_PTR(void, owner_thread->thread_id)));
return_ACPI_STATUS(AE_AML_NOT_OWNER);
}
@@ -407,10 +407,9 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
* different level can only mean that the mutex ordering rule is being
* violated. This behavior is clarified in ACPI 4.0 specification.
*/
- if (obj_desc->mutex.sync_level !=
- walk_state->thread->current_sync_level) {
+ if (obj_desc->mutex.sync_level != owner_thread->current_sync_level) {
ACPI_ERROR((AE_INFO,
- "Cannot release Mutex [%4.4s], SyncLevel mismatch: mutex %d current %d",
+ "Cannot release Mutex [%4.4s], SyncLevel mismatch: mutex %u current %u",
acpi_ut_get_node_name(obj_desc->mutex.node),
obj_desc->mutex.sync_level,
walk_state->thread->current_sync_level));
@@ -423,7 +422,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
* acquired, but are not released in reverse order.
*/
previous_sync_level =
- walk_state->thread->acquired_mutex_list->mutex.original_sync_level;
+ owner_thread->acquired_mutex_list->mutex.original_sync_level;
status = acpi_ex_release_mutex_object(obj_desc);
if (ACPI_FAILURE(status)) {
@@ -434,8 +433,9 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
/* Restore the previous sync_level */
- walk_state->thread->current_sync_level = previous_sync_level;
+ owner_thread->current_sync_level = previous_sync_level;
}
+
return_ACPI_STATUS(status);
}
@@ -443,7 +443,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
*
* FUNCTION: acpi_ex_release_all_mutexes
*
- * PARAMETERS: Thread - Current executing thread object
+ * PARAMETERS: Thread - Current executing thread object
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 679f308c5a8..d11e539ef76 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -102,7 +102,7 @@ static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs)
name_string = ACPI_ALLOCATE(size_needed);
if (!name_string) {
ACPI_ERROR((AE_INFO,
- "Could not allocate size %d", size_needed));
+ "Could not allocate size %u", size_needed));
return_PTR(NULL);
}
@@ -216,7 +216,7 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
*/
status = AE_AML_BAD_NAME;
ACPI_ERROR((AE_INFO,
- "Bad character %02x in name, at %p",
+ "Bad character 0x%02x in name, at %p",
*aml_address, aml_address));
}
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 99adbab5acb..84e4d185aa2 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -110,7 +110,7 @@ acpi_status acpi_ex_opcode_0A_0T_1R(struct acpi_walk_state *walk_state)
default: /* Unknown opcode */
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
break;
@@ -173,7 +173,7 @@ acpi_status acpi_ex_opcode_1A_0T_0R(struct acpi_walk_state *walk_state)
case AML_SLEEP_OP: /* Sleep (msec_time) */
- status = acpi_ex_system_do_suspend(operand[0]->integer.value);
+ status = acpi_ex_system_do_sleep(operand[0]->integer.value);
break;
case AML_STALL_OP: /* Stall (usec_time) */
@@ -189,7 +189,7 @@ acpi_status acpi_ex_opcode_1A_0T_0R(struct acpi_walk_state *walk_state)
default: /* Unknown opcode */
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
break;
@@ -229,7 +229,7 @@ acpi_status acpi_ex_opcode_1A_1T_0R(struct acpi_walk_state *walk_state)
default: /* Unknown opcode */
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
@@ -399,7 +399,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
if (digit > 0) {
ACPI_ERROR((AE_INFO,
- "Integer too large to convert to BCD: %8.8X%8.8X",
+ "Integer too large to convert to BCD: 0x%8.8X%8.8X",
ACPI_FORMAT_UINT64(operand[0]->
integer.value)));
status = AE_AML_NUMERIC_OVERFLOW;
@@ -540,7 +540,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
default: /* Unknown opcode */
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
@@ -979,7 +979,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
default:
ACPI_ERROR((AE_INFO,
- "Unknown Index TargetType %X in reference object %p",
+ "Unknown Index TargetType 0x%X in reference object %p",
operand[0]->reference.
target_type, operand[0]));
status = AE_AML_OPERAND_TYPE;
@@ -1007,7 +1007,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
default:
ACPI_ERROR((AE_INFO,
- "Unknown class in reference(%p) - %2.2X",
+ "Unknown class in reference(%p) - 0x%2.2X",
operand[0],
operand[0]->reference.class));
@@ -1019,7 +1019,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 22841bbbe63..10e104cf0fb 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -119,33 +119,6 @@ acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state)
status = AE_AML_OPERAND_TYPE;
break;
}
-#ifdef ACPI_GPE_NOTIFY_CHECK
- /*
- * GPE method wake/notify check. Here, we want to ensure that we
- * don't receive any "DeviceWake" Notifies from a GPE _Lxx or _Exx
- * GPE method during system runtime. If we do, the GPE is marked
- * as "wake-only" and disabled.
- *
- * 1) Is the Notify() value == device_wake?
- * 2) Is this a GPE deferred method? (An _Lxx or _Exx method)
- * 3) Did the original GPE happen at system runtime?
- * (versus during wake)
- *
- * If all three cases are true, this is a wake-only GPE that should
- * be disabled at runtime.
- */
- if (value == 2) { /* device_wake */
- status =
- acpi_ev_check_for_wake_only_gpe(walk_state->
- gpe_event_info);
- if (ACPI_FAILURE(status)) {
-
- /* AE_WAKE_ONLY_GPE only error, means ignore this notify */
-
- return_ACPI_STATUS(AE_OK)
- }
- }
-#endif
/*
* Dispatch the notify to the appropriate handler
@@ -159,7 +132,7 @@ acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
}
@@ -224,7 +197,7 @@ acpi_status acpi_ex_opcode_2A_2T_1R(struct acpi_walk_state *walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
@@ -441,7 +414,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Index (%X%8.8X) is beyond end of object",
+ "Index (0x%8.8X%8.8X) is beyond end of object",
ACPI_FORMAT_UINT64(index)));
goto cleanup;
}
@@ -464,7 +437,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
break;
@@ -572,7 +545,7 @@ acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 8bb1012ef44..7a08d23befc 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -119,7 +119,7 @@ acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
@@ -244,7 +244,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index f256b6a25f2..4b50730cf9a 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -245,7 +245,7 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
index = operand[5]->integer.value;
if (index >= operand[0]->package.count) {
ACPI_ERROR((AE_INFO,
- "Index (%X%8.8X) beyond package end (%X)",
+ "Index (0x%8.8X%8.8X) beyond package end (0x%X)",
ACPI_FORMAT_UINT64(index),
operand[0]->package.count));
status = AE_AML_PACKAGE_LIMIT;
@@ -314,7 +314,7 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 2fbfe51fb14..25059dace0a 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -275,7 +275,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
default:
/* Invalid field access type */
- ACPI_ERROR((AE_INFO, "Unknown field access type %X", access));
+ ACPI_ERROR((AE_INFO, "Unknown field access type 0x%X", access));
return_UINT32(0);
}
@@ -430,7 +430,7 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
type = acpi_ns_get_type(info->region_node);
if (type != ACPI_TYPE_REGION) {
ACPI_ERROR((AE_INFO,
- "Needed Region, found type %X (%s)",
+ "Needed Region, found type 0x%X (%s)",
type, acpi_ut_get_type_name(type)));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 486b2e5661b..531000fc77d 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -105,7 +105,7 @@ acpi_ex_system_memory_space_handler(u32 function,
break;
default:
- ACPI_ERROR((AE_INFO, "Invalid SystemMemory width %d",
+ ACPI_ERROR((AE_INFO, "Invalid SystemMemory width %u",
bit_width));
return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
}
@@ -173,7 +173,7 @@ acpi_ex_system_memory_space_handler(u32 function,
mem_info->mapped_logical_address = acpi_os_map_memory((acpi_physical_address) address, map_length);
if (!mem_info->mapped_logical_address) {
ACPI_ERROR((AE_INFO,
- "Could not map memory at %8.8X%8.8X, size %X",
+ "Could not map memory at 0x%8.8X%8.8X, size %u",
ACPI_FORMAT_NATIVE_UINT(address),
(u32) map_length));
mem_info->mapped_length = 0;
@@ -491,8 +491,10 @@ acpi_ex_data_table_space_handler(u32 function,
{
ACPI_FUNCTION_TRACE(ex_data_table_space_handler);
- /* Perform the memory read or write */
-
+ /*
+ * Perform the memory read or write. The bit_width was already
+ * validated.
+ */
switch (function) {
case ACPI_READ:
@@ -502,9 +504,14 @@ acpi_ex_data_table_space_handler(u32 function,
break;
case ACPI_WRITE:
+
+ ACPI_MEMCPY(ACPI_PHYSADDR_TO_PTR(address),
+ ACPI_CAST_PTR(char, value), ACPI_DIV_8(bit_width));
+ break;
+
default:
- return_ACPI_STATUS(AE_SUPPORT);
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
}
return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index fdc1b27999e..1fa4289a687 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -252,7 +252,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
/* No named references are allowed here */
ACPI_ERROR((AE_INFO,
- "Unsupported Reference type %X",
+ "Unsupported Reference type 0x%X",
source_desc->reference.class));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
@@ -264,7 +264,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
/* Default case is for unknown types */
ACPI_ERROR((AE_INFO,
- "Node %p - Unknown object type %X",
+ "Node %p - Unknown object type 0x%X",
node, entry_type));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index fdd6a7079b9..7ca35ea8ace 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -231,7 +231,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
/* Invalid reference object */
ACPI_ERROR((AE_INFO,
- "Unknown TargetType %X in Index/Reference object %p",
+ "Unknown TargetType 0x%X in Index/Reference object %p",
stack_desc->reference.target_type,
stack_desc));
status = AE_AML_INTERNAL;
@@ -273,8 +273,8 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
default:
ACPI_ERROR((AE_INFO,
- "Unknown Reference type %X in %p", ref_type,
- stack_desc));
+ "Unknown Reference type 0x%X in %p",
+ ref_type, stack_desc));
status = AE_AML_INTERNAL;
break;
}
@@ -403,7 +403,8 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
if (ACPI_GET_DESCRIPTOR_TYPE(node) !=
ACPI_DESC_TYPE_NAMED) {
- ACPI_ERROR((AE_INFO, "Not a NS node %p [%s]",
+ ACPI_ERROR((AE_INFO,
+ "Not a namespace node %p [%s]",
node,
acpi_ut_get_descriptor_name(node)));
return_ACPI_STATUS(AE_AML_INTERNAL);
@@ -507,7 +508,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
default:
ACPI_ERROR((AE_INFO,
- "Unknown Reference Class %2.2X",
+ "Unknown Reference Class 0x%2.2X",
obj_desc->reference.class));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index c5ecd615f14..8c97cfd6a0f 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -153,7 +153,7 @@ acpi_ex_resolve_operands(u16 opcode,
arg_types = op_info->runtime_args;
if (arg_types == ARGI_INVALID_OPCODE) {
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X", opcode));
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X", opcode));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
@@ -218,7 +218,7 @@ acpi_ex_resolve_operands(u16 opcode,
if (!acpi_ut_valid_object_type(object_type)) {
ACPI_ERROR((AE_INFO,
- "Bad operand object type [%X]",
+ "Bad operand object type [0x%X]",
object_type));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
@@ -253,7 +253,7 @@ acpi_ex_resolve_operands(u16 opcode,
default:
ACPI_ERROR((AE_INFO,
- "Unknown Reference Class %2.2X in %p",
+ "Unknown Reference Class 0x%2.2X in %p",
obj_desc->reference.class,
obj_desc));
@@ -665,7 +665,7 @@ acpi_ex_resolve_operands(u16 opcode,
/* Unknown type */
ACPI_ERROR((AE_INFO,
- "Internal - Unknown ARGI (required operand) type %X",
+ "Internal - Unknown ARGI (required operand) type 0x%X",
this_arg_type));
return_ACPI_STATUS(AE_BAD_PARAMETER);
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 702b9ecfd44..1624436ba4c 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exstore - AML Interpreter object store support
@@ -53,10 +52,6 @@
ACPI_MODULE_NAME("exstore")
/* Local prototypes */
-static void
-acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
- u32 level, u32 index);
-
static acpi_status
acpi_ex_store_object_to_index(union acpi_operand_object *val_desc,
union acpi_operand_object *dest_desc,
@@ -64,215 +59,6 @@ acpi_ex_store_object_to_index(union acpi_operand_object *val_desc,
/*******************************************************************************
*
- * FUNCTION: acpi_ex_do_debug_object
- *
- * PARAMETERS: source_desc - Value to be stored
- * Level - Indentation level (used for packages)
- * Index - Current package element, zero if not pkg
- *
- * RETURN: None
- *
- * DESCRIPTION: Handles stores to the Debug Object.
- *
- ******************************************************************************/
-
-static void
-acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
- u32 level, u32 index)
-{
- u32 i;
-
- ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc);
-
- /* Print line header as long as we are not in the middle of an object display */
-
- if (!((level > 0) && index == 0)) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[ACPI Debug] %*s",
- level, " "));
- }
-
- /* Display index for package output only */
-
- if (index > 0) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- "(%.2u) ", index - 1));
- }
-
- if (!source_desc) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[Null Object]\n"));
- return_VOID;
- }
-
- if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%s ",
- acpi_ut_get_object_type_name
- (source_desc)));
-
- if (!acpi_ut_valid_internal_object(source_desc)) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- "%p, Invalid Internal Object!\n",
- source_desc));
- return_VOID;
- }
- } else if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) ==
- ACPI_DESC_TYPE_NAMED) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%s: %p\n",
- acpi_ut_get_type_name(((struct
- acpi_namespace_node
- *)source_desc)->
- type),
- source_desc));
- return_VOID;
- } else {
- return_VOID;
- }
-
- /* source_desc is of type ACPI_DESC_TYPE_OPERAND */
-
- switch (source_desc->common.type) {
- case ACPI_TYPE_INTEGER:
-
- /* Output correct integer width */
-
- if (acpi_gbl_integer_byte_width == 4) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "0x%8.8X\n",
- (u32) source_desc->integer.
- value));
- } else {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- "0x%8.8X%8.8X\n",
- ACPI_FORMAT_UINT64(source_desc->
- integer.
- value)));
- }
- break;
-
- case ACPI_TYPE_BUFFER:
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[0x%.2X]\n",
- (u32) source_desc->buffer.length));
- ACPI_DUMP_BUFFER(source_desc->buffer.pointer,
- (source_desc->buffer.length <
- 256) ? source_desc->buffer.length : 256);
- break;
-
- case ACPI_TYPE_STRING:
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[0x%.2X] \"%s\"\n",
- source_desc->string.length,
- source_desc->string.pointer));
- break;
-
- case ACPI_TYPE_PACKAGE:
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- "[Contains 0x%.2X Elements]\n",
- source_desc->package.count));
-
- /* Output the entire contents of the package */
-
- for (i = 0; i < source_desc->package.count; i++) {
- acpi_ex_do_debug_object(source_desc->package.
- elements[i], level + 4, i + 1);
- }
- break;
-
- case ACPI_TYPE_LOCAL_REFERENCE:
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[%s] ",
- acpi_ut_get_reference_name(source_desc)));
-
- /* Decode the reference */
-
- switch (source_desc->reference.class) {
- case ACPI_REFCLASS_INDEX:
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "0x%X\n",
- source_desc->reference.value));
- break;
-
- case ACPI_REFCLASS_TABLE:
-
- /* Case for ddb_handle */
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- "Table Index 0x%X\n",
- source_desc->reference.value));
- return;
-
- default:
- break;
- }
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, " "));
-
- /* Check for valid node first, then valid object */
-
- if (source_desc->reference.node) {
- if (ACPI_GET_DESCRIPTOR_TYPE
- (source_desc->reference.node) !=
- ACPI_DESC_TYPE_NAMED) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- " %p - Not a valid namespace node\n",
- source_desc->reference.
- node));
- } else {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- "Node %p [%4.4s] ",
- source_desc->reference.
- node,
- (source_desc->reference.
- node)->name.ascii));
-
- switch ((source_desc->reference.node)->type) {
-
- /* These types have no attached object */
-
- case ACPI_TYPE_DEVICE:
- acpi_os_printf("Device\n");
- break;
-
- case ACPI_TYPE_THERMAL:
- acpi_os_printf("Thermal Zone\n");
- break;
-
- default:
- acpi_ex_do_debug_object((source_desc->
- reference.
- node)->object,
- level + 4, 0);
- break;
- }
- }
- } else if (source_desc->reference.object) {
- if (ACPI_GET_DESCRIPTOR_TYPE
- (source_desc->reference.object) ==
- ACPI_DESC_TYPE_NAMED) {
- acpi_ex_do_debug_object(((struct
- acpi_namespace_node *)
- source_desc->reference.
- object)->object,
- level + 4, 0);
- } else {
- acpi_ex_do_debug_object(source_desc->reference.
- object, level + 4, 0);
- }
- }
- break;
-
- default:
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%p\n",
- source_desc));
- break;
- }
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "\n"));
- return_VOID;
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ex_store
*
* PARAMETERS: *source_desc - Value to be stored
@@ -402,12 +188,12 @@ acpi_ex_store(union acpi_operand_object *source_desc,
source_desc,
acpi_ut_get_object_type_name(source_desc)));
- acpi_ex_do_debug_object(source_desc, 0, 0);
+ ACPI_DEBUG_OBJECT(source_desc, 0, 0);
break;
default:
- ACPI_ERROR((AE_INFO, "Unknown Reference Class %2.2X",
+ ACPI_ERROR((AE_INFO, "Unknown Reference Class 0x%2.2X",
ref_desc->reference.class));
ACPI_DUMP_ENTRY(ref_desc, ACPI_LV_INFO);
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index e11b6cb42a5..6d32e09327f 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -170,7 +170,7 @@ acpi_status acpi_ex_system_do_stall(u32 how_long)
* (ACPI specifies 100 usec as max, but this gives some slack in
* order to support existing BIOSs)
*/
- ACPI_ERROR((AE_INFO, "Time parameter is too large (%d)",
+ ACPI_ERROR((AE_INFO, "Time parameter is too large (%u)",
how_long));
status = AE_AML_OPERAND_VALUE;
} else {
@@ -182,18 +182,18 @@ acpi_status acpi_ex_system_do_stall(u32 how_long)
/*******************************************************************************
*
- * FUNCTION: acpi_ex_system_do_suspend
+ * FUNCTION: acpi_ex_system_do_sleep
*
- * PARAMETERS: how_long - The amount of time to suspend,
+ * PARAMETERS: how_long - The amount of time to sleep,
* in milliseconds
*
* RETURN: None
*
- * DESCRIPTION: Suspend running thread for specified amount of time.
+ * DESCRIPTION: Sleep the running thread for specified amount of time.
*
******************************************************************************/
-acpi_status acpi_ex_system_do_suspend(u64 how_long)
+acpi_status acpi_ex_system_do_sleep(u64 how_long)
{
ACPI_FUNCTION_ENTRY();
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 679a112a7d2..b44274a0b62 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -63,7 +63,6 @@ acpi_status acpi_hw_set_mode(u32 mode)
{
acpi_status status;
- u32 retry;
ACPI_FUNCTION_TRACE(hw_set_mode);
@@ -125,24 +124,7 @@ acpi_status acpi_hw_set_mode(u32 mode)
return_ACPI_STATUS(status);
}
- /*
- * Some hardware takes a LONG time to switch modes. Give them 3 sec to
- * do so, but allow faster systems to proceed more quickly.
- */
- retry = 3000;
- while (retry) {
- if (acpi_hw_get_mode() == mode) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Mode %X successfully enabled\n",
- mode));
- return_ACPI_STATUS(AE_OK);
- }
- acpi_os_stall(1000);
- retry--;
- }
-
- ACPI_ERROR((AE_INFO, "Hardware did not change modes"));
- return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
+ return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index ec7fc227b33..5d1273b660a 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -299,7 +299,7 @@ struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id)
ACPI_FUNCTION_ENTRY();
if (register_id > ACPI_BITREG_MAX) {
- ACPI_ERROR((AE_INFO, "Invalid BitRegister ID: %X",
+ ACPI_ERROR((AE_INFO, "Invalid BitRegister ID: 0x%X",
register_id));
return (NULL);
}
@@ -413,7 +413,7 @@ acpi_hw_register_read(u32 register_id, u32 * return_value)
break;
default:
- ACPI_ERROR((AE_INFO, "Unknown Register ID: %X", register_id));
+ ACPI_ERROR((AE_INFO, "Unknown Register ID: 0x%X", register_id));
status = AE_BAD_PARAMETER;
break;
}
@@ -549,7 +549,7 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value)
break;
default:
- ACPI_ERROR((AE_INFO, "Unknown Register ID: %X", register_id));
+ ACPI_ERROR((AE_INFO, "Unknown Register ID: 0x%X", register_id));
status = AE_BAD_PARAMETER;
break;
}
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 5e6d4dbb802..36eb803dd9d 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -245,7 +245,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) ||
(acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) {
- ACPI_ERROR((AE_INFO, "Sleep values out of range: A=%X B=%X",
+ ACPI_ERROR((AE_INFO, "Sleep values out of range: A=0x%X B=0x%X",
acpi_gbl_sleep_type_a, acpi_gbl_sleep_type_b));
return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
}
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index e26c17d4b71..c10d587c164 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -150,7 +150,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
if (last_address > ACPI_UINT16_MAX) {
ACPI_ERROR((AE_INFO,
- "Illegal I/O port address/length above 64K: 0x%p/%X",
+ "Illegal I/O port address/length above 64K: %p/0x%X",
ACPI_CAST_PTR(void, address), byte_width));
return_ACPI_STATUS(AE_LIMIT);
}
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index aa2b80132d0..3a2814676ac 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -222,7 +222,7 @@ acpi_status acpi_ns_root_initialize(void)
default:
ACPI_ERROR((AE_INFO,
- "Unsupported initial type value %X",
+ "Unsupported initial type value 0x%X",
init_val->type));
acpi_ut_remove_reference(obj_desc);
obj_desc = NULL;
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 0689d36638d..2110cc2360f 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -205,8 +205,8 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
/* Check the node type and name */
if (type > ACPI_TYPE_LOCAL_MAX) {
- ACPI_WARNING((AE_INFO, "Invalid ACPI Object Type %08X",
- type));
+ ACPI_WARNING((AE_INFO,
+ "Invalid ACPI Object Type 0x%08X", type));
}
if (!acpi_ut_valid_acpi_name(this_node->name.integer)) {
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index 95937245163..7dea0031605 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -107,7 +107,7 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
if (index != 0) {
ACPI_ERROR((AE_INFO,
- "Could not construct external pathname; index=%X, size=%X, Path=%s",
+ "Could not construct external pathname; index=%u, size=%u, Path=%s",
(u32) index, (u32) size, &name_buffer[size]));
return (AE_BAD_PARAMETER);
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 08f8b3f5cca..a8e42b5e946 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -311,7 +311,7 @@ acpi_ns_search_and_enter(u32 target_name,
if (!node || !target_name || !return_node) {
ACPI_ERROR((AE_INFO,
- "Null parameter: Node %p Name %X ReturnNode %p",
+ "Null parameter: Node %p Name 0x%X ReturnNode %p",
node, target_name, return_node));
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 24d05a87a2a..bab559712da 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -276,7 +276,7 @@ u32 acpi_ns_local(acpi_object_type type)
/* Type code out of range */
- ACPI_WARNING((AE_INFO, "Invalid Object Type %X", type));
+ ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type));
return_UINT32(ACPI_NS_NORMAL);
}
@@ -764,7 +764,7 @@ u32 acpi_ns_opens_scope(acpi_object_type type)
/* type code out of range */
- ACPI_WARNING((AE_INFO, "Invalid Object Type %X", type));
+ ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type));
return_UINT32(ACPI_NS_NORMAL);
}
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 00493e108a0..7df1a4c9527 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -460,7 +460,7 @@ acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state,
default:
- ACPI_ERROR((AE_INFO, "Invalid ArgType %X", arg_type));
+ ACPI_ERROR((AE_INFO, "Invalid ArgType 0x%X", arg_type));
return_VOID;
}
@@ -742,7 +742,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
default:
- ACPI_ERROR((AE_INFO, "Invalid ArgType: %X", arg_type));
+ ACPI_ERROR((AE_INFO, "Invalid ArgType: 0x%X", arg_type));
status = AE_AML_OPERAND_TYPE;
break;
}
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 59aabaeab1d..2f2e7760938 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -136,7 +136,7 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
/* The opcode is unrecognized. Just skip unknown opcodes */
ACPI_ERROR((AE_INFO,
- "Found unknown opcode %X at AML address %p offset %X, ignoring",
+ "Found unknown opcode 0x%X at AML address %p offset 0x%X, ignoring",
walk_state->opcode, walk_state->parser_state.aml,
walk_state->aml_offset));
@@ -1021,7 +1021,6 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
if (status == AE_AML_NO_RETURN_VALUE) {
ACPI_EXCEPTION((AE_INFO, status,
"Invoked method did not return a value"));
-
}
ACPI_EXCEPTION((AE_INFO, status,
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 6064dd4e94c..c42f067cff9 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -46,6 +46,7 @@
#include "acparser.h"
#include "acdispat.h"
#include "acinterp.h"
+#include "actables.h"
#include "amlcode.h"
#define _COMPONENT ACPI_PARSER
@@ -220,6 +221,10 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
ACPI_FUNCTION_TRACE(ps_execute_method);
+ /* Quick validation of DSDT header */
+
+ acpi_tb_check_dsdt_header();
+
/* Validate the Info and method Node */
if (!info || !info->resolved_node) {
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index f2ee3b54860..c80a2eea3a0 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -212,7 +212,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
if ((*top_object_list)->common.type != ACPI_TYPE_PACKAGE) {
ACPI_ERROR((AE_INFO,
- "(PRT[%X]) Need sub-package, found %s",
+ "(PRT[%u]) Need sub-package, found %s",
index,
acpi_ut_get_object_type_name
(*top_object_list)));
@@ -223,7 +223,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
if ((*top_object_list)->package.count != 4) {
ACPI_ERROR((AE_INFO,
- "(PRT[%X]) Need package of length 4, found length %d",
+ "(PRT[%u]) Need package of length 4, found length %u",
index, (*top_object_list)->package.count));
return_ACPI_STATUS(AE_AML_PACKAGE_LIMIT);
}
@@ -240,7 +240,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
obj_desc = sub_object_list[0];
if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO,
- "(PRT[%X].Address) Need Integer, found %s",
+ "(PRT[%u].Address) Need Integer, found %s",
index,
acpi_ut_get_object_type_name(obj_desc)));
return_ACPI_STATUS(AE_BAD_DATA);
@@ -253,7 +253,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
obj_desc = sub_object_list[1];
if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO,
- "(PRT[%X].Pin) Need Integer, found %s",
+ "(PRT[%u].Pin) Need Integer, found %s",
index,
acpi_ut_get_object_type_name(obj_desc)));
return_ACPI_STATUS(AE_BAD_DATA);
@@ -289,7 +289,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
if (obj_desc->reference.class !=
ACPI_REFCLASS_NAME) {
ACPI_ERROR((AE_INFO,
- "(PRT[%X].Source) Need name, found Reference Class %X",
+ "(PRT[%u].Source) Need name, found Reference Class 0x%X",
index,
obj_desc->reference.class));
return_ACPI_STATUS(AE_BAD_DATA);
@@ -340,7 +340,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
default:
ACPI_ERROR((AE_INFO,
- "(PRT[%X].Source) Need Ref/String/Integer, found %s",
+ "(PRT[%u].Source) Need Ref/String/Integer, found %s",
index,
acpi_ut_get_object_type_name
(obj_desc)));
@@ -358,7 +358,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
obj_desc = sub_object_list[3];
if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO,
- "(PRT[%X].SourceIndex) Need Integer, found %s",
+ "(PRT[%u].SourceIndex) Need Integer, found %s",
index,
acpi_ut_get_object_type_name(obj_desc)));
return_ACPI_STATUS(AE_BAD_DATA);
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index fd057c72d25..7335f22aac2 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -94,7 +94,7 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
[resource_index]);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Could not convert AML resource (Type %X)",
+ "Could not convert AML resource (Type 0x%X)",
*aml));
return_ACPI_STATUS(status);
}
@@ -147,7 +147,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
ACPI_ERROR((AE_INFO,
- "Invalid descriptor type (%X) in resource list",
+ "Invalid descriptor type (0x%X) in resource list",
resource->type));
return_ACPI_STATUS(AE_BAD_DATA);
}
@@ -161,7 +161,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
[resource->type]);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Could not convert resource (type %X) to AML",
+ "Could not convert resource (type 0x%X) to AML",
resource->type));
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index 07de352fa44..f8cd9e87d98 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -88,7 +88,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
/* Each internal resource struct is expected to be 32-bit aligned */
ACPI_WARNING((AE_INFO,
- "Misaligned resource pointer (get): %p Type %2.2X Len %X",
+ "Misaligned resource pointer (get): %p Type 0x%2.2X Length %u",
resource, resource->type, resource->length));
}
@@ -541,7 +541,7 @@ if (((aml->irq.flags & 0x09) == 0x00) || ((aml->irq.flags & 0x09) == 0x09)) {
* "IRQ Format"), so 0x00 and 0x09 are illegal.
*/
ACPI_ERROR((AE_INFO,
- "Invalid interrupt polarity/trigger in resource list, %X",
+ "Invalid interrupt polarity/trigger in resource list, 0x%X",
aml->irq.flags));
return_ACPI_STATUS(AE_BAD_DATA);
}
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index f43fbe0fc3f..1728cb9bf60 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -283,7 +283,7 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
if (length > sizeof(struct acpi_table_fadt)) {
ACPI_WARNING((AE_INFO,
"FADT (revision %u) is longer than ACPI 2.0 version, "
- "truncating length 0x%X to 0x%X",
+ "truncating length %u to %u",
table->revision, length,
(u32)sizeof(struct acpi_table_fadt)));
}
@@ -422,7 +422,7 @@ static void acpi_tb_convert_fadt(void)
if (address64->address && address32 &&
(address64->address != (u64) address32)) {
ACPI_ERROR((AE_INFO,
- "32/64X address mismatch in %s: %8.8X/%8.8X%8.8X, using 32",
+ "32/64X address mismatch in %s: 0x%8.8X/0x%8.8X%8.8X, using 32",
fadt_info_table[i].name, address32,
ACPI_FORMAT_UINT64(address64->address)));
}
@@ -481,7 +481,7 @@ static void acpi_tb_validate_fadt(void)
(acpi_gbl_FADT.Xfacs != (u64) acpi_gbl_FADT.facs)) {
ACPI_WARNING((AE_INFO,
"32/64X FACS address mismatch in FADT - "
- "%8.8X/%8.8X%8.8X, using 32",
+ "0x%8.8X/0x%8.8X%8.8X, using 32",
acpi_gbl_FADT.facs,
ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xfacs)));
@@ -492,7 +492,7 @@ static void acpi_tb_validate_fadt(void)
(acpi_gbl_FADT.Xdsdt != (u64) acpi_gbl_FADT.dsdt)) {
ACPI_WARNING((AE_INFO,
"32/64X DSDT address mismatch in FADT - "
- "%8.8X/%8.8X%8.8X, using 32",
+ "0x%8.8X/0x%8.8X%8.8X, using 32",
acpi_gbl_FADT.dsdt,
ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xdsdt)));
@@ -521,7 +521,7 @@ static void acpi_tb_validate_fadt(void)
if (address64->address &&
(address64->bit_width != ACPI_MUL_8(length))) {
ACPI_WARNING((AE_INFO,
- "32/64X length mismatch in %s: %d/%d",
+ "32/64X length mismatch in %s: %u/%u",
name, ACPI_MUL_8(length),
address64->bit_width));
}
@@ -534,7 +534,7 @@ static void acpi_tb_validate_fadt(void)
if (!address64->address || !length) {
ACPI_ERROR((AE_INFO,
"Required field %s has zero address and/or length:"
- " %8.8X%8.8X/%X",
+ " 0x%8.8X%8.8X/0x%X",
name,
ACPI_FORMAT_UINT64(address64->
address),
@@ -550,7 +550,7 @@ static void acpi_tb_validate_fadt(void)
(!address64->address && length)) {
ACPI_WARNING((AE_INFO,
"Optional field %s has zero address or length: "
- "%8.8X%8.8X/%X",
+ "0x%8.8X%8.8X/0x%X",
name,
ACPI_FORMAT_UINT64(address64->
address),
@@ -600,7 +600,7 @@ static void acpi_tb_setup_fadt_registers(void)
(fadt_info_table[i].default_length !=
target64->bit_width)) {
ACPI_WARNING((AE_INFO,
- "Invalid length for %s: %d, using default %d",
+ "Invalid length for %s: %u, using default %u",
fadt_info_table[i].name,
target64->bit_width,
fadt_info_table[i].
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index e252180ce61..989d5c86786 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -83,7 +83,7 @@ acpi_tb_find_table(char *signature,
/* Search for the table */
- for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
if (ACPI_MEMCMP(&(acpi_gbl_root_table_list.tables[i].signature),
header.signature, ACPI_NAME_SIZE)) {
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 7ec02b0f69e..83d7af8d090 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -137,7 +137,7 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
/* Check if table is already registered */
- for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
if (!acpi_gbl_root_table_list.tables[i].pointer) {
status =
acpi_tb_verify_table(&acpi_gbl_root_table_list.
@@ -273,7 +273,7 @@ acpi_status acpi_tb_resize_root_table_list(void)
/* Increase the Table Array size */
tables = ACPI_ALLOCATE_ZEROED(((acpi_size) acpi_gbl_root_table_list.
- size +
+ max_table_count +
ACPI_ROOT_TABLE_SIZE_INCREMENT) *
sizeof(struct acpi_table_desc));
if (!tables) {
@@ -286,8 +286,8 @@ acpi_status acpi_tb_resize_root_table_list(void)
if (acpi_gbl_root_table_list.tables) {
ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables,
- (acpi_size) acpi_gbl_root_table_list.size *
- sizeof(struct acpi_table_desc));
+ (acpi_size) acpi_gbl_root_table_list.
+ max_table_count * sizeof(struct acpi_table_desc));
if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
ACPI_FREE(acpi_gbl_root_table_list.tables);
@@ -295,8 +295,9 @@ acpi_status acpi_tb_resize_root_table_list(void)
}
acpi_gbl_root_table_list.tables = tables;
- acpi_gbl_root_table_list.size += ACPI_ROOT_TABLE_SIZE_INCREMENT;
- acpi_gbl_root_table_list.flags |= (u8) ACPI_ROOT_ORIGIN_ALLOCATED;
+ acpi_gbl_root_table_list.max_table_count +=
+ ACPI_ROOT_TABLE_SIZE_INCREMENT;
+ acpi_gbl_root_table_list.flags |= (u8)ACPI_ROOT_ORIGIN_ALLOCATED;
return_ACPI_STATUS(AE_OK);
}
@@ -321,38 +322,36 @@ acpi_tb_store_table(acpi_physical_address address,
struct acpi_table_header *table,
u32 length, u8 flags, u32 *table_index)
{
- acpi_status status = AE_OK;
+ acpi_status status;
+ struct acpi_table_desc *new_table;
/* Ensure that there is room for the table in the Root Table List */
- if (acpi_gbl_root_table_list.count >= acpi_gbl_root_table_list.size) {
+ if (acpi_gbl_root_table_list.current_table_count >=
+ acpi_gbl_root_table_list.max_table_count) {
status = acpi_tb_resize_root_table_list();
if (ACPI_FAILURE(status)) {
return (status);
}
}
+ new_table =
+ &acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.
+ current_table_count];
+
/* Initialize added table */
- acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
- address = address;
- acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
- pointer = table;
- acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].length =
- length;
- acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
- owner_id = 0;
- acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].flags =
- flags;
-
- ACPI_MOVE_32_TO_32(&
- (acpi_gbl_root_table_list.
- tables[acpi_gbl_root_table_list.count].signature),
- table->signature);
-
- *table_index = acpi_gbl_root_table_list.count;
- acpi_gbl_root_table_list.count++;
- return (status);
+ new_table->address = address;
+ new_table->pointer = table;
+ new_table->length = length;
+ new_table->owner_id = 0;
+ new_table->flags = flags;
+
+ ACPI_MOVE_32_TO_32(&new_table->signature, table->signature);
+
+ *table_index = acpi_gbl_root_table_list.current_table_count;
+ acpi_gbl_root_table_list.current_table_count++;
+ return (AE_OK);
}
/*******************************************************************************
@@ -408,7 +407,7 @@ void acpi_tb_terminate(void)
/* Delete the individual tables */
- for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; i++) {
acpi_tb_delete_table(&acpi_gbl_root_table_list.tables[i]);
}
@@ -422,7 +421,7 @@ void acpi_tb_terminate(void)
acpi_gbl_root_table_list.tables = NULL;
acpi_gbl_root_table_list.flags = 0;
- acpi_gbl_root_table_list.count = 0;
+ acpi_gbl_root_table_list.current_table_count = 0;
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ACPI Tables freed\n"));
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
@@ -452,7 +451,7 @@ acpi_status acpi_tb_delete_namespace_by_owner(u32 table_index)
return_ACPI_STATUS(status);
}
- if (table_index >= acpi_gbl_root_table_list.count) {
+ if (table_index >= acpi_gbl_root_table_list.current_table_count) {
/* The table index does not exist */
@@ -505,7 +504,7 @@ acpi_status acpi_tb_allocate_owner_id(u32 table_index)
ACPI_FUNCTION_TRACE(tb_allocate_owner_id);
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- if (table_index < acpi_gbl_root_table_list.count) {
+ if (table_index < acpi_gbl_root_table_list.current_table_count) {
status = acpi_ut_allocate_owner_id
(&(acpi_gbl_root_table_list.tables[table_index].owner_id));
}
@@ -533,7 +532,7 @@ acpi_status acpi_tb_release_owner_id(u32 table_index)
ACPI_FUNCTION_TRACE(tb_release_owner_id);
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- if (table_index < acpi_gbl_root_table_list.count) {
+ if (table_index < acpi_gbl_root_table_list.current_table_count) {
acpi_ut_release_owner_id(&
(acpi_gbl_root_table_list.
tables[table_index].owner_id));
@@ -564,7 +563,7 @@ acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id)
ACPI_FUNCTION_TRACE(tb_get_owner_id);
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- if (table_index < acpi_gbl_root_table_list.count) {
+ if (table_index < acpi_gbl_root_table_list.current_table_count) {
*owner_id =
acpi_gbl_root_table_list.tables[table_index].owner_id;
status = AE_OK;
@@ -589,7 +588,7 @@ u8 acpi_tb_is_table_loaded(u32 table_index)
u8 is_loaded = FALSE;
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- if (table_index < acpi_gbl_root_table_list.count) {
+ if (table_index < acpi_gbl_root_table_list.current_table_count) {
is_loaded = (u8)
(acpi_gbl_root_table_list.tables[table_index].flags &
ACPI_TABLE_IS_LOADED);
@@ -616,7 +615,7 @@ void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded)
{
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- if (table_index < acpi_gbl_root_table_list.count) {
+ if (table_index < acpi_gbl_root_table_list.current_table_count) {
if (is_loaded) {
acpi_gbl_root_table_list.tables[table_index].flags |=
ACPI_TABLE_IS_LOADED;
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 02723a9fb10..34f9c2bc5e1 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -158,7 +158,7 @@ acpi_status acpi_tb_initialize_facs(void)
u8 acpi_tb_tables_loaded(void)
{
- if (acpi_gbl_root_table_list.count >= 3) {
+ if (acpi_gbl_root_table_list.current_table_count >= 3) {
return (TRUE);
}
@@ -309,7 +309,7 @@ acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
if (checksum) {
ACPI_WARNING((AE_INFO,
- "Incorrect checksum in table [%4.4s] - %2.2X, should be %2.2X",
+ "Incorrect checksum in table [%4.4s] - 0x%2.2X, should be 0x%2.2X",
table->signature, table->checksum,
(u8) (table->checksum - checksum)));
@@ -349,6 +349,84 @@ u8 acpi_tb_checksum(u8 *buffer, u32 length)
/*******************************************************************************
*
+ * FUNCTION: acpi_tb_check_dsdt_header
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Quick compare to check validity of the DSDT. This will detect
+ * if the DSDT has been replaced from outside the OS and/or if
+ * the DSDT header has been corrupted.
+ *
+ ******************************************************************************/
+
+void acpi_tb_check_dsdt_header(void)
+{
+
+ /* Compare original length and checksum to current values */
+
+ if (acpi_gbl_original_dsdt_header.length != acpi_gbl_DSDT->length ||
+ acpi_gbl_original_dsdt_header.checksum != acpi_gbl_DSDT->checksum) {
+ ACPI_ERROR((AE_INFO,
+ "The DSDT has been corrupted or replaced - old, new headers below"));
+ acpi_tb_print_table_header(0, &acpi_gbl_original_dsdt_header);
+ acpi_tb_print_table_header(0, acpi_gbl_DSDT);
+
+ ACPI_ERROR((AE_INFO,
+ "Please send DMI info to linux-acpi@vger.kernel.org\n"
+ "If system does not work as expected, please boot with acpi=copy_dsdt"));
+
+ /* Disable further error messages */
+
+ acpi_gbl_original_dsdt_header.length = acpi_gbl_DSDT->length;
+ acpi_gbl_original_dsdt_header.checksum =
+ acpi_gbl_DSDT->checksum;
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_copy_dsdt
+ *
+ * PARAMETERS: table_desc - Installed table to copy
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Implements a subsystem option to copy the DSDT to local memory.
+ * Some very bad BIOSs are known to either corrupt the DSDT or
+ * install a new, bad DSDT. This copy works around the problem.
+ *
+ ******************************************************************************/
+
+struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index)
+{
+ struct acpi_table_header *new_table;
+ struct acpi_table_desc *table_desc;
+
+ table_desc = &acpi_gbl_root_table_list.tables[table_index];
+
+ new_table = ACPI_ALLOCATE(table_desc->length);
+ if (!new_table) {
+ ACPI_ERROR((AE_INFO, "Could not copy DSDT of length 0x%X",
+ table_desc->length));
+ return (NULL);
+ }
+
+ ACPI_MEMCPY(new_table, table_desc->pointer, table_desc->length);
+ acpi_tb_delete_table(table_desc);
+ table_desc->pointer = new_table;
+ table_desc->flags = ACPI_TABLE_ORIGIN_ALLOCATED;
+
+ ACPI_INFO((AE_INFO,
+ "Forced DSDT copy: length 0x%05X copied locally, original unmapped",
+ new_table->length));
+
+ return (new_table);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_tb_install_table
*
* PARAMETERS: Address - Physical address of DSDT or FACS
@@ -496,7 +574,7 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size)
/* Will truncate 64-bit address to 32 bits, issue warning */
ACPI_WARNING((AE_INFO,
- "64-bit Physical Address in XSDT is too large (%8.8X%8.8X),"
+ "64-bit Physical Address in XSDT is too large (0x%8.8X%8.8X),"
" truncating",
ACPI_FORMAT_UINT64(address64)));
}
@@ -629,14 +707,14 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
*/
table_entry =
ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header);
- acpi_gbl_root_table_list.count = 2;
+ acpi_gbl_root_table_list.current_table_count = 2;
/*
* Initialize the root table array from the RSDT/XSDT
*/
for (i = 0; i < table_count; i++) {
- if (acpi_gbl_root_table_list.count >=
- acpi_gbl_root_table_list.size) {
+ if (acpi_gbl_root_table_list.current_table_count >=
+ acpi_gbl_root_table_list.max_table_count) {
/* There is no more room in the root table array, attempt resize */
@@ -646,19 +724,20 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
"Truncating %u table entries!",
(unsigned) (table_count -
(acpi_gbl_root_table_list.
- count - 2))));
+ current_table_count -
+ 2))));
break;
}
}
/* Get the table physical address (32-bit for RSDT, 64-bit for XSDT) */
- acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
- address =
+ acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.
+ current_table_count].address =
acpi_tb_get_root_table_entry(table_entry, table_entry_size);
table_entry += table_entry_size;
- acpi_gbl_root_table_list.count++;
+ acpi_gbl_root_table_list.current_table_count++;
}
/*
@@ -671,7 +750,7 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
* Complete the initialization of the root table array by examining
* the header of each table
*/
- for (i = 2; i < acpi_gbl_root_table_list.count; i++) {
+ for (i = 2; i < acpi_gbl_root_table_list.current_table_count; i++) {
acpi_tb_install_table(acpi_gbl_root_table_list.tables[i].
address, NULL, i);
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 5217a6159a3..4a8b9e6ea57 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -72,7 +72,7 @@ static int no_auto_ssdt;
acpi_status acpi_allocate_root_table(u32 initial_table_count)
{
- acpi_gbl_root_table_list.size = initial_table_count;
+ acpi_gbl_root_table_list.max_table_count = initial_table_count;
acpi_gbl_root_table_list.flags = ACPI_ROOT_ALLOW_RESIZE;
return (acpi_tb_resize_root_table_list());
@@ -130,7 +130,7 @@ acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
sizeof(struct acpi_table_desc));
acpi_gbl_root_table_list.tables = initial_table_array;
- acpi_gbl_root_table_list.size = initial_table_count;
+ acpi_gbl_root_table_list.max_table_count = initial_table_count;
acpi_gbl_root_table_list.flags = ACPI_ROOT_ORIGIN_UNKNOWN;
if (allow_resize) {
acpi_gbl_root_table_list.flags |=
@@ -172,6 +172,7 @@ acpi_status acpi_reallocate_root_table(void)
{
struct acpi_table_desc *tables;
acpi_size new_size;
+ acpi_size current_size;
ACPI_FUNCTION_TRACE(acpi_reallocate_root_table);
@@ -183,10 +184,17 @@ acpi_status acpi_reallocate_root_table(void)
return_ACPI_STATUS(AE_SUPPORT);
}
- new_size = ((acpi_size) acpi_gbl_root_table_list.count +
- ACPI_ROOT_TABLE_SIZE_INCREMENT) *
+ /*
+ * Get the current size of the root table and add the default
+ * increment to create the new table size.
+ */
+ current_size = (acpi_size)
+ acpi_gbl_root_table_list.current_table_count *
sizeof(struct acpi_table_desc);
+ new_size = current_size +
+ (ACPI_ROOT_TABLE_SIZE_INCREMENT * sizeof(struct acpi_table_desc));
+
/* Create new array and copy the old array */
tables = ACPI_ALLOCATE_ZEROED(new_size);
@@ -194,10 +202,17 @@ acpi_status acpi_reallocate_root_table(void)
return_ACPI_STATUS(AE_NO_MEMORY);
}
- ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, new_size);
+ ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, current_size);
- acpi_gbl_root_table_list.size = acpi_gbl_root_table_list.count;
+ /*
+ * Update the root table descriptor. The new size will be the current
+ * number of tables plus the increment, independent of the reserved
+ * size of the original table list.
+ */
acpi_gbl_root_table_list.tables = tables;
+ acpi_gbl_root_table_list.max_table_count =
+ acpi_gbl_root_table_list.current_table_count +
+ ACPI_ROOT_TABLE_SIZE_INCREMENT;
acpi_gbl_root_table_list.flags =
ACPI_ROOT_ORIGIN_ALLOCATED | ACPI_ROOT_ALLOW_RESIZE;
@@ -278,7 +293,8 @@ acpi_get_table_header(char *signature,
/* Walk the root table list */
- for (i = 0, j = 0; i < acpi_gbl_root_table_list.count; i++) {
+ for (i = 0, j = 0; i < acpi_gbl_root_table_list.current_table_count;
+ i++) {
if (!ACPI_COMPARE_NAME
(&(acpi_gbl_root_table_list.tables[i].signature),
signature)) {
@@ -341,7 +357,7 @@ acpi_status acpi_unload_table_id(acpi_owner_id id)
ACPI_FUNCTION_TRACE(acpi_unload_table_id);
/* Find table in the global table list */
- for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
if (id != acpi_gbl_root_table_list.tables[i].owner_id) {
continue;
}
@@ -391,7 +407,8 @@ acpi_get_table_with_size(char *signature,
/* Walk the root table list */
- for (i = 0, j = 0; i < acpi_gbl_root_table_list.count; i++) {
+ for (i = 0, j = 0; i < acpi_gbl_root_table_list.current_table_count;
+ i++) {
if (!ACPI_COMPARE_NAME
(&(acpi_gbl_root_table_list.tables[i].signature),
signature)) {
@@ -459,7 +476,7 @@ acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
/* Validate index */
- if (table_index >= acpi_gbl_root_table_list.count) {
+ if (table_index >= acpi_gbl_root_table_list.current_table_count) {
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
@@ -500,16 +517,17 @@ static acpi_status acpi_tb_load_namespace(void)
{
acpi_status status;
u32 i;
+ struct acpi_table_header *new_dsdt;
ACPI_FUNCTION_TRACE(tb_load_namespace);
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
/*
- * Load the namespace. The DSDT is required, but any SSDT and PSDT tables
- * are optional.
+ * Load the namespace. The DSDT is required, but any SSDT and
+ * PSDT tables are optional. Verify the DSDT.
*/
- if (!acpi_gbl_root_table_list.count ||
+ if (!acpi_gbl_root_table_list.current_table_count ||
!ACPI_COMPARE_NAME(&
(acpi_gbl_root_table_list.
tables[ACPI_TABLE_INDEX_DSDT].signature),
@@ -522,17 +540,35 @@ static acpi_status acpi_tb_load_namespace(void)
goto unlock_and_exit;
}
- /* A valid DSDT is required */
-
- status =
- acpi_tb_verify_table(&acpi_gbl_root_table_list.
- tables[ACPI_TABLE_INDEX_DSDT]);
- if (ACPI_FAILURE(status)) {
+ /*
+ * Save the DSDT pointer for simple access. This is the mapped memory
+ * address. We must take care here because the address of the .Tables
+ * array can change dynamically as tables are loaded at run-time. Note:
+ * .Pointer field is not validated until after call to acpi_tb_verify_table.
+ */
+ acpi_gbl_DSDT =
+ acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].pointer;
- status = AE_NO_ACPI_TABLES;
- goto unlock_and_exit;
+ /*
+ * Optionally copy the entire DSDT to local memory (instead of simply
+ * mapping it.) There are some BIOSs that corrupt or replace the original
+ * DSDT, creating the need for this option. Default is FALSE, do not copy
+ * the DSDT.
+ */
+ if (acpi_gbl_copy_dsdt_locally) {
+ new_dsdt = acpi_tb_copy_dsdt(ACPI_TABLE_INDEX_DSDT);
+ if (new_dsdt) {
+ acpi_gbl_DSDT = new_dsdt;
+ }
}
+ /*
+ * Save the original DSDT header for detection of table corruption
+ * and/or replacement of the DSDT from outside the OS.
+ */
+ ACPI_MEMCPY(&acpi_gbl_original_dsdt_header, acpi_gbl_DSDT,
+ sizeof(struct acpi_table_header));
+
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
/* Load and parse tables */
@@ -545,7 +581,7 @@ static acpi_status acpi_tb_load_namespace(void)
/* Load any SSDT or PSDT tables. Note: Loop leaves tables locked */
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
if ((!ACPI_COMPARE_NAME
(&(acpi_gbl_root_table_list.tables[i].signature),
ACPI_SIG_SSDT)
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index dda6e8c497d..fd2c07d1d3a 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -134,7 +134,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
ACPI_EBDA_PTR_LENGTH);
if (!table_ptr) {
ACPI_ERROR((AE_INFO,
- "Could not map memory at %8.8X for length %X",
+ "Could not map memory at 0x%8.8X for length %u",
ACPI_EBDA_PTR_LOCATION, ACPI_EBDA_PTR_LENGTH));
return_ACPI_STATUS(AE_NO_MEMORY);
@@ -159,7 +159,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
ACPI_EBDA_WINDOW_SIZE);
if (!table_ptr) {
ACPI_ERROR((AE_INFO,
- "Could not map memory at %8.8X for length %X",
+ "Could not map memory at 0x%8.8X for length %u",
physical_address, ACPI_EBDA_WINDOW_SIZE));
return_ACPI_STATUS(AE_NO_MEMORY);
@@ -191,7 +191,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
if (!table_ptr) {
ACPI_ERROR((AE_INFO,
- "Could not map memory at %8.8X for length %X",
+ "Could not map memory at 0x%8.8X for length %u",
ACPI_HI_RSDP_WINDOW_BASE,
ACPI_HI_RSDP_WINDOW_SIZE));
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 3d706b8fd44..8f089628156 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -340,7 +340,7 @@ void *acpi_ut_allocate(acpi_size size,
/* Report allocation error */
ACPI_WARNING((module, line,
- "Could not allocate size %X", (u32) size));
+ "Could not allocate size %u", (u32) size));
return_PTR(NULL);
}
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 97ec3621e71..6fef83f04bc 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -677,16 +677,24 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
u16 reference_count;
union acpi_operand_object *next_object;
acpi_status status;
+ acpi_size copy_size;
/* Save fields from destination that we don't want to overwrite */
reference_count = dest_desc->common.reference_count;
next_object = dest_desc->common.next_object;
- /* Copy the entire source object over the destination object */
+ /*
+ * Copy the entire source object over the destination object.
+ * Note: Source can be either an operand object or namespace node.
+ */
+ copy_size = sizeof(union acpi_operand_object);
+ if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_NAMED) {
+ copy_size = sizeof(struct acpi_namespace_node);
+ }
- ACPI_MEMCPY((char *)dest_desc, (char *)source_desc,
- sizeof(union acpi_operand_object));
+ ACPI_MEMCPY(ACPI_CAST_PTR(char, dest_desc),
+ ACPI_CAST_PTR(char, source_desc), copy_size);
/* Restore the saved fields */
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 16b51c69606..ed794cd033e 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -434,7 +434,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
default:
- ACPI_ERROR((AE_INFO, "Unknown action (%X)", action));
+ ACPI_ERROR((AE_INFO, "Unknown action (0x%X)", action));
break;
}
@@ -444,8 +444,8 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
*/
if (count > ACPI_MAX_REFERENCE_COUNT) {
ACPI_WARNING((AE_INFO,
- "Large Reference Count (%X) in object %p", count,
- object));
+ "Large Reference Count (0x%X) in object %p",
+ count, object));
}
}
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 7f5e734ce7f..6dfdeb65349 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -307,7 +307,7 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
prefix_node, path, AE_TYPE);
ACPI_ERROR((AE_INFO,
- "Type returned from %s was incorrect: %s, expected Btypes: %X",
+ "Type returned from %s was incorrect: %s, expected Btypes: 0x%X",
path,
acpi_ut_get_object_type_name(info->return_object),
expected_return_btypes));
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index eda3e656c4a..66116750a0f 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -785,6 +785,7 @@ acpi_status acpi_ut_init_globals(void)
/* Miscellaneous variables */
+ acpi_gbl_DSDT = NULL;
acpi_gbl_cm_single_step = FALSE;
acpi_gbl_db_terminate_threads = FALSE;
acpi_gbl_shutdown = FALSE;
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 32982e2ac38..e8d0724ee40 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -205,7 +205,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
/* Guard against multiple allocations of ID to the same location */
if (*owner_id) {
- ACPI_ERROR((AE_INFO, "Owner ID [%2.2X] already exists",
+ ACPI_ERROR((AE_INFO, "Owner ID [0x%2.2X] already exists",
*owner_id));
return_ACPI_STATUS(AE_ALREADY_EXISTS);
}
@@ -315,7 +315,7 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
/* Zero is not a valid owner_iD */
if (owner_id == 0) {
- ACPI_ERROR((AE_INFO, "Invalid OwnerId: %2.2X", owner_id));
+ ACPI_ERROR((AE_INFO, "Invalid OwnerId: 0x%2.2X", owner_id));
return_VOID;
}
@@ -341,7 +341,7 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
acpi_gbl_owner_id_mask[index] ^= bit;
} else {
ACPI_ERROR((AE_INFO,
- "Release of non-allocated OwnerId: %2.2X",
+ "Release of non-allocated OwnerId: 0x%2.2X",
owner_id + 1));
}
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 55d014ed6d5..058b3df4827 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -258,7 +258,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
acpi_gbl_mutex_info[mutex_id].thread_id = this_thread_id;
} else {
ACPI_EXCEPTION((AE_INFO, status,
- "Thread %p could not acquire Mutex [%X]",
+ "Thread %p could not acquire Mutex [0x%X]",
ACPI_CAST_PTR(void, this_thread_id), mutex_id));
}
@@ -297,7 +297,7 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
*/
if (acpi_gbl_mutex_info[mutex_id].thread_id == ACPI_MUTEX_NOT_ACQUIRED) {
ACPI_ERROR((AE_INFO,
- "Mutex [%X] is not acquired, cannot release",
+ "Mutex [0x%X] is not acquired, cannot release",
mutex_id));
return (AE_NOT_ACQUIRED);
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index 3356f0cb074..fd1fa2749ea 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -251,7 +251,7 @@ union acpi_operand_object *acpi_ut_create_buffer_object(acpi_size buffer_size)
buffer = ACPI_ALLOCATE_ZEROED(buffer_size);
if (!buffer) {
- ACPI_ERROR((AE_INFO, "Could not allocate size %X",
+ ACPI_ERROR((AE_INFO, "Could not allocate size %u",
(u32) buffer_size));
acpi_ut_remove_reference(buffer_desc);
return_PTR(NULL);
@@ -303,7 +303,7 @@ union acpi_operand_object *acpi_ut_create_string_object(acpi_size string_size)
*/
string = ACPI_ALLOCATE_ZEROED(string_size + 1);
if (!string) {
- ACPI_ERROR((AE_INFO, "Could not allocate size %X",
+ ACPI_ERROR((AE_INFO, "Could not allocate size %u",
(u32) string_size));
acpi_ut_remove_reference(string_desc);
return_PTR(NULL);
@@ -533,7 +533,7 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
*/
ACPI_ERROR((AE_INFO,
"Cannot convert to external object - "
- "unsupported Reference Class [%s] %X in object %p",
+ "unsupported Reference Class [%s] 0x%X in object %p",
acpi_ut_get_reference_name(internal_object),
internal_object->reference.class,
internal_object));
@@ -545,7 +545,7 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
default:
ACPI_ERROR((AE_INFO, "Cannot convert to external object - "
- "unsupported type [%s] %X in object %p",
+ "unsupported type [%s] 0x%X in object %p",
acpi_ut_get_object_type_name(internal_object),
internal_object->common.type, internal_object));
status = AE_TYPE;
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
new file mode 100644
index 00000000000..f8c668f27b5
--- /dev/null
+++ b/drivers/acpi/apei/Kconfig
@@ -0,0 +1,30 @@
+config ACPI_APEI
+ bool "ACPI Platform Error Interface (APEI)"
+ depends on X86
+ help
+ APEI allows to report errors (for example from the chipset)
+ to the operating system. This improves NMI handling
+ especially. In addition it supports error serialization and
+ error injection.
+
+config ACPI_APEI_GHES
+ tristate "APEI Generic Hardware Error Source"
+ depends on ACPI_APEI && X86
+ select ACPI_HED
+ help
+ Generic Hardware Error Source provides a way to report
+ platform hardware errors (such as that from chipset). It
+ works in so called "Firmware First" mode, that is, hardware
+ errors are reported to firmware firstly, then reported to
+ Linux by firmware. This way, some non-standard hardware
+ error registers or non-standard hardware link can be checked
+ by firmware to produce more valuable hardware error
+ information for Linux.
+
+config ACPI_APEI_EINJ
+ tristate "APEI Error INJection (EINJ)"
+ depends on ACPI_APEI && DEBUG_FS
+ help
+ EINJ provides a hardware error injection mechanism, it is
+ mainly used for debugging and testing the other parts of
+ APEI and some other RAS features.
diff --git a/drivers/acpi/apei/Makefile b/drivers/acpi/apei/Makefile
new file mode 100644
index 00000000000..b13b03a1778
--- /dev/null
+++ b/drivers/acpi/apei/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_ACPI_APEI) += apei.o
+obj-$(CONFIG_ACPI_APEI_GHES) += ghes.o
+obj-$(CONFIG_ACPI_APEI_EINJ) += einj.o
+
+apei-y := apei-base.o hest.o cper.o erst.o
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
new file mode 100644
index 00000000000..db3946e9c66
--- /dev/null
+++ b/drivers/acpi/apei/apei-base.c
@@ -0,0 +1,593 @@
+/*
+ * apei-base.c - ACPI Platform Error Interface (APEI) supporting
+ * infrastructure
+ *
+ * APEI allows to report errors (for example from the chipset) to the
+ * the operating system. This improves NMI handling especially. In
+ * addition it supports error serialization and error injection.
+ *
+ * For more information about APEI, please refer to ACPI Specification
+ * version 4.0, chapter 17.
+ *
+ * This file has Common functions used by more than one APEI table,
+ * including framework of interpreter for ERST and EINJ; resource
+ * management for APEI registers.
+ *
+ * Copyright (C) 2009, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/kref.h>
+#include <linux/rculist.h>
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+#include <acpi/atomicio.h>
+
+#include "apei-internal.h"
+
+#define APEI_PFX "APEI: "
+
+/*
+ * APEI ERST (Error Record Serialization Table) and EINJ (Error
+ * INJection) interpreter framework.
+ */
+
+#define APEI_EXEC_PRESERVE_REGISTER 0x1
+
+void apei_exec_ctx_init(struct apei_exec_context *ctx,
+ struct apei_exec_ins_type *ins_table,
+ u32 instructions,
+ struct acpi_whea_header *action_table,
+ u32 entries)
+{
+ ctx->ins_table = ins_table;
+ ctx->instructions = instructions;
+ ctx->action_table = action_table;
+ ctx->entries = entries;
+}
+EXPORT_SYMBOL_GPL(apei_exec_ctx_init);
+
+int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val)
+{
+ int rc;
+
+ rc = acpi_atomic_read(val, &entry->register_region);
+ if (rc)
+ return rc;
+ *val >>= entry->register_region.bit_offset;
+ *val &= entry->mask;
+
+ return 0;
+}
+
+int apei_exec_read_register(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 val = 0;
+
+ rc = __apei_exec_read_register(entry, &val);
+ if (rc)
+ return rc;
+ ctx->value = val;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_exec_read_register);
+
+int apei_exec_read_register_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+
+ rc = apei_exec_read_register(ctx, entry);
+ if (rc)
+ return rc;
+ ctx->value = (ctx->value == entry->value);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_exec_read_register_value);
+
+int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val)
+{
+ int rc;
+
+ val &= entry->mask;
+ val <<= entry->register_region.bit_offset;
+ if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) {
+ u64 valr = 0;
+ rc = acpi_atomic_read(&valr, &entry->register_region);
+ if (rc)
+ return rc;
+ valr &= ~(entry->mask << entry->register_region.bit_offset);
+ val |= valr;
+ }
+ rc = acpi_atomic_write(val, &entry->register_region);
+
+ return rc;
+}
+
+int apei_exec_write_register(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_write_register(entry, ctx->value);
+}
+EXPORT_SYMBOL_GPL(apei_exec_write_register);
+
+int apei_exec_write_register_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+
+ ctx->value = entry->value;
+ rc = apei_exec_write_register(ctx, entry);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(apei_exec_write_register_value);
+
+int apei_exec_noop(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_exec_noop);
+
+/*
+ * Interpret the specified action. Go through whole action table,
+ * execute all instructions belong to the action.
+ */
+int apei_exec_run(struct apei_exec_context *ctx, u8 action)
+{
+ int rc;
+ u32 i, ip;
+ struct acpi_whea_header *entry;
+ apei_exec_ins_func_t run;
+
+ ctx->ip = 0;
+
+ /*
+ * "ip" is the instruction pointer of current instruction,
+ * "ctx->ip" specifies the next instruction to executed,
+ * instruction "run" function may change the "ctx->ip" to
+ * implement "goto" semantics.
+ */
+rewind:
+ ip = 0;
+ for (i = 0; i < ctx->entries; i++) {
+ entry = &ctx->action_table[i];
+ if (entry->action != action)
+ continue;
+ if (ip == ctx->ip) {
+ if (entry->instruction >= ctx->instructions ||
+ !ctx->ins_table[entry->instruction].run) {
+ pr_warning(FW_WARN APEI_PFX
+ "Invalid action table, unknown instruction type: %d\n",
+ entry->instruction);
+ return -EINVAL;
+ }
+ run = ctx->ins_table[entry->instruction].run;
+ rc = run(ctx, entry);
+ if (rc < 0)
+ return rc;
+ else if (rc != APEI_EXEC_SET_IP)
+ ctx->ip++;
+ }
+ ip++;
+ if (ctx->ip < ip)
+ goto rewind;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_exec_run);
+
+typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry,
+ void *data);
+
+static int apei_exec_for_each_entry(struct apei_exec_context *ctx,
+ apei_exec_entry_func_t func,
+ void *data,
+ int *end)
+{
+ u8 ins;
+ int i, rc;
+ struct acpi_whea_header *entry;
+ struct apei_exec_ins_type *ins_table = ctx->ins_table;
+
+ for (i = 0; i < ctx->entries; i++) {
+ entry = ctx->action_table + i;
+ ins = entry->instruction;
+ if (end)
+ *end = i;
+ if (ins >= ctx->instructions || !ins_table[ins].run) {
+ pr_warning(FW_WARN APEI_PFX
+ "Invalid action table, unknown instruction type: %d\n",
+ ins);
+ return -EINVAL;
+ }
+ rc = func(ctx, entry, data);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int pre_map_gar_callback(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry,
+ void *data)
+{
+ u8 ins = entry->instruction;
+
+ if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
+ return acpi_pre_map_gar(&entry->register_region);
+
+ return 0;
+}
+
+/*
+ * Pre-map all GARs in action table to make it possible to access them
+ * in NMI handler.
+ */
+int apei_exec_pre_map_gars(struct apei_exec_context *ctx)
+{
+ int rc, end;
+
+ rc = apei_exec_for_each_entry(ctx, pre_map_gar_callback,
+ NULL, &end);
+ if (rc) {
+ struct apei_exec_context ctx_unmap;
+ memcpy(&ctx_unmap, ctx, sizeof(*ctx));
+ ctx_unmap.entries = end;
+ apei_exec_post_unmap_gars(&ctx_unmap);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(apei_exec_pre_map_gars);
+
+static int post_unmap_gar_callback(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry,
+ void *data)
+{
+ u8 ins = entry->instruction;
+
+ if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
+ acpi_post_unmap_gar(&entry->register_region);
+
+ return 0;
+}
+
+/* Post-unmap all GAR in action table. */
+int apei_exec_post_unmap_gars(struct apei_exec_context *ctx)
+{
+ return apei_exec_for_each_entry(ctx, post_unmap_gar_callback,
+ NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(apei_exec_post_unmap_gars);
+
+/*
+ * Resource management for GARs in APEI
+ */
+struct apei_res {
+ struct list_head list;
+ unsigned long start;
+ unsigned long end;
+};
+
+/* Collect all resources requested, to avoid conflict */
+struct apei_resources apei_resources_all = {
+ .iomem = LIST_HEAD_INIT(apei_resources_all.iomem),
+ .ioport = LIST_HEAD_INIT(apei_resources_all.ioport),
+};
+
+static int apei_res_add(struct list_head *res_list,
+ unsigned long start, unsigned long size)
+{
+ struct apei_res *res, *resn, *res_ins = NULL;
+ unsigned long end = start + size;
+
+ if (end <= start)
+ return 0;
+repeat:
+ list_for_each_entry_safe(res, resn, res_list, list) {
+ if (res->start > end || res->end < start)
+ continue;
+ else if (end <= res->end && start >= res->start) {
+ kfree(res_ins);
+ return 0;
+ }
+ list_del(&res->list);
+ res->start = start = min(res->start, start);
+ res->end = end = max(res->end, end);
+ kfree(res_ins);
+ res_ins = res;
+ goto repeat;
+ }
+
+ if (res_ins)
+ list_add(&res_ins->list, res_list);
+ else {
+ res_ins = kmalloc(sizeof(*res), GFP_KERNEL);
+ if (!res_ins)
+ return -ENOMEM;
+ res_ins->start = start;
+ res_ins->end = end;
+ list_add(&res_ins->list, res_list);
+ }
+
+ return 0;
+}
+
+static int apei_res_sub(struct list_head *res_list1,
+ struct list_head *res_list2)
+{
+ struct apei_res *res1, *resn1, *res2, *res;
+ res1 = list_entry(res_list1->next, struct apei_res, list);
+ resn1 = list_entry(res1->list.next, struct apei_res, list);
+ while (&res1->list != res_list1) {
+ list_for_each_entry(res2, res_list2, list) {
+ if (res1->start >= res2->end ||
+ res1->end <= res2->start)
+ continue;
+ else if (res1->end <= res2->end &&
+ res1->start >= res2->start) {
+ list_del(&res1->list);
+ kfree(res1);
+ break;
+ } else if (res1->end > res2->end &&
+ res1->start < res2->start) {
+ res = kmalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+ res->start = res2->end;
+ res->end = res1->end;
+ res1->end = res2->start;
+ list_add(&res->list, &res1->list);
+ resn1 = res;
+ } else {
+ if (res1->start < res2->start)
+ res1->end = res2->start;
+ else
+ res1->start = res2->end;
+ }
+ }
+ res1 = resn1;
+ resn1 = list_entry(resn1->list.next, struct apei_res, list);
+ }
+
+ return 0;
+}
+
+static void apei_res_clean(struct list_head *res_list)
+{
+ struct apei_res *res, *resn;
+
+ list_for_each_entry_safe(res, resn, res_list, list) {
+ list_del(&res->list);
+ kfree(res);
+ }
+}
+
+void apei_resources_fini(struct apei_resources *resources)
+{
+ apei_res_clean(&resources->iomem);
+ apei_res_clean(&resources->ioport);
+}
+EXPORT_SYMBOL_GPL(apei_resources_fini);
+
+static int apei_resources_merge(struct apei_resources *resources1,
+ struct apei_resources *resources2)
+{
+ int rc;
+ struct apei_res *res;
+
+ list_for_each_entry(res, &resources2->iomem, list) {
+ rc = apei_res_add(&resources1->iomem, res->start,
+ res->end - res->start);
+ if (rc)
+ return rc;
+ }
+ list_for_each_entry(res, &resources2->ioport, list) {
+ rc = apei_res_add(&resources1->ioport, res->start,
+ res->end - res->start);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+/*
+ * EINJ has two groups of GARs (EINJ table entry and trigger table
+ * entry), so common resources are subtracted from the trigger table
+ * resources before the second requesting.
+ */
+int apei_resources_sub(struct apei_resources *resources1,
+ struct apei_resources *resources2)
+{
+ int rc;
+
+ rc = apei_res_sub(&resources1->iomem, &resources2->iomem);
+ if (rc)
+ return rc;
+ return apei_res_sub(&resources1->ioport, &resources2->ioport);
+}
+EXPORT_SYMBOL_GPL(apei_resources_sub);
+
+/*
+ * IO memory/port rersource management mechanism is used to check
+ * whether memory/port area used by GARs conflicts with normal memory
+ * or IO memory/port of devices.
+ */
+int apei_resources_request(struct apei_resources *resources,
+ const char *desc)
+{
+ struct apei_res *res, *res_bak;
+ struct resource *r;
+
+ apei_resources_sub(resources, &apei_resources_all);
+
+ list_for_each_entry(res, &resources->iomem, list) {
+ r = request_mem_region(res->start, res->end - res->start,
+ desc);
+ if (!r) {
+ pr_err(APEI_PFX
+ "Can not request iomem region <%016llx-%016llx> for GARs.\n",
+ (unsigned long long)res->start,
+ (unsigned long long)res->end);
+ res_bak = res;
+ goto err_unmap_iomem;
+ }
+ }
+
+ list_for_each_entry(res, &resources->ioport, list) {
+ r = request_region(res->start, res->end - res->start, desc);
+ if (!r) {
+ pr_err(APEI_PFX
+ "Can not request ioport region <%016llx-%016llx> for GARs.\n",
+ (unsigned long long)res->start,
+ (unsigned long long)res->end);
+ res_bak = res;
+ goto err_unmap_ioport;
+ }
+ }
+
+ apei_resources_merge(&apei_resources_all, resources);
+
+ return 0;
+err_unmap_ioport:
+ list_for_each_entry(res, &resources->ioport, list) {
+ if (res == res_bak)
+ break;
+ release_mem_region(res->start, res->end - res->start);
+ }
+ res_bak = NULL;
+err_unmap_iomem:
+ list_for_each_entry(res, &resources->iomem, list) {
+ if (res == res_bak)
+ break;
+ release_region(res->start, res->end - res->start);
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(apei_resources_request);
+
+void apei_resources_release(struct apei_resources *resources)
+{
+ struct apei_res *res;
+
+ list_for_each_entry(res, &resources->iomem, list)
+ release_mem_region(res->start, res->end - res->start);
+ list_for_each_entry(res, &resources->ioport, list)
+ release_region(res->start, res->end - res->start);
+
+ apei_resources_sub(&apei_resources_all, resources);
+}
+EXPORT_SYMBOL_GPL(apei_resources_release);
+
+static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr)
+{
+ u32 width, space_id;
+
+ width = reg->bit_width;
+ space_id = reg->space_id;
+ /* Handle possible alignment issues */
+ memcpy(paddr, &reg->address, sizeof(*paddr));
+ if (!*paddr) {
+ pr_warning(FW_BUG APEI_PFX
+ "Invalid physical address in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
+ pr_warning(FW_BUG APEI_PFX
+ "Invalid bit width in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
+ space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
+ pr_warning(FW_BUG APEI_PFX
+ "Invalid address space type in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int collect_res_callback(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry,
+ void *data)
+{
+ struct apei_resources *resources = data;
+ struct acpi_generic_address *reg = &entry->register_region;
+ u8 ins = entry->instruction;
+ u64 paddr;
+ int rc;
+
+ if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER))
+ return 0;
+
+ rc = apei_check_gar(reg, &paddr);
+ if (rc)
+ return rc;
+
+ switch (reg->space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ return apei_res_add(&resources->iomem, paddr,
+ reg->bit_width / 8);
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ return apei_res_add(&resources->ioport, paddr,
+ reg->bit_width / 8);
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * Same register may be used by multiple instructions in GARs, so
+ * resources are collected before requesting.
+ */
+int apei_exec_collect_resources(struct apei_exec_context *ctx,
+ struct apei_resources *resources)
+{
+ return apei_exec_for_each_entry(ctx, collect_res_callback,
+ resources, NULL);
+}
+EXPORT_SYMBOL_GPL(apei_exec_collect_resources);
+
+struct dentry *apei_get_debugfs_dir(void)
+{
+ static struct dentry *dapei;
+
+ if (!dapei)
+ dapei = debugfs_create_dir("apei", NULL);
+
+ return dapei;
+}
+EXPORT_SYMBOL_GPL(apei_get_debugfs_dir);
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
new file mode 100644
index 00000000000..18df1e94027
--- /dev/null
+++ b/drivers/acpi/apei/apei-internal.h
@@ -0,0 +1,114 @@
+/*
+ * apei-internal.h - ACPI Platform Error Interface internal
+ * definations.
+ */
+
+#ifndef APEI_INTERNAL_H
+#define APEI_INTERNAL_H
+
+#include <linux/cper.h>
+
+struct apei_exec_context;
+
+typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+
+#define APEI_EXEC_INS_ACCESS_REGISTER 0x0001
+
+struct apei_exec_ins_type {
+ u32 flags;
+ apei_exec_ins_func_t run;
+};
+
+struct apei_exec_context {
+ u32 ip;
+ u64 value;
+ u64 var1;
+ u64 var2;
+ u64 src_base;
+ u64 dst_base;
+ struct apei_exec_ins_type *ins_table;
+ u32 instructions;
+ struct acpi_whea_header *action_table;
+ u32 entries;
+};
+
+void apei_exec_ctx_init(struct apei_exec_context *ctx,
+ struct apei_exec_ins_type *ins_table,
+ u32 instructions,
+ struct acpi_whea_header *action_table,
+ u32 entries);
+
+static inline void apei_exec_ctx_set_input(struct apei_exec_context *ctx,
+ u64 input)
+{
+ ctx->value = input;
+}
+
+static inline u64 apei_exec_ctx_get_output(struct apei_exec_context *ctx)
+{
+ return ctx->value;
+}
+
+int apei_exec_run(struct apei_exec_context *ctx, u8 action);
+
+/* Common instruction implementation */
+
+/* IP has been set in instruction function */
+#define APEI_EXEC_SET_IP 1
+
+int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val);
+int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val);
+int apei_exec_read_register(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_read_register_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_write_register(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_write_register_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_noop(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_pre_map_gars(struct apei_exec_context *ctx);
+int apei_exec_post_unmap_gars(struct apei_exec_context *ctx);
+
+struct apei_resources {
+ struct list_head iomem;
+ struct list_head ioport;
+};
+
+static inline void apei_resources_init(struct apei_resources *resources)
+{
+ INIT_LIST_HEAD(&resources->iomem);
+ INIT_LIST_HEAD(&resources->ioport);
+}
+
+void apei_resources_fini(struct apei_resources *resources);
+int apei_resources_sub(struct apei_resources *resources1,
+ struct apei_resources *resources2);
+int apei_resources_request(struct apei_resources *resources,
+ const char *desc);
+void apei_resources_release(struct apei_resources *resources);
+int apei_exec_collect_resources(struct apei_exec_context *ctx,
+ struct apei_resources *resources);
+
+struct dentry;
+struct dentry *apei_get_debugfs_dir(void);
+
+#define apei_estatus_for_each_section(estatus, section) \
+ for (section = (struct acpi_hest_generic_data *)(estatus + 1); \
+ (void *)section - (void *)estatus < estatus->data_length; \
+ section = (void *)(section+1) + section->error_data_length)
+
+static inline u32 apei_estatus_len(struct acpi_hest_generic_status *estatus)
+{
+ if (estatus->raw_data_length)
+ return estatus->raw_data_offset + \
+ estatus->raw_data_length;
+ else
+ return sizeof(*estatus) + estatus->data_length;
+}
+
+int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus);
+int apei_estatus_check(const struct acpi_hest_generic_status *estatus);
+#endif
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
new file mode 100644
index 00000000000..f4cf2fc4c8c
--- /dev/null
+++ b/drivers/acpi/apei/cper.c
@@ -0,0 +1,84 @@
+/*
+ * UEFI Common Platform Error Record (CPER) support
+ *
+ * Copyright (C) 2010, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * CPER is the format used to describe platform hardware error by
+ * various APEI tables, such as ERST, BERT and HEST etc.
+ *
+ * For more information about CPER, please refer to Appendix N of UEFI
+ * Specification version 2.3.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/cper.h>
+#include <linux/acpi.h>
+
+/*
+ * CPER record ID need to be unique even after reboot, because record
+ * ID is used as index for ERST storage, while CPER records from
+ * multiple boot may co-exist in ERST.
+ */
+u64 cper_next_record_id(void)
+{
+ static atomic64_t seq;
+
+ if (!atomic64_read(&seq))
+ atomic64_set(&seq, ((u64)get_seconds()) << 32);
+
+ return atomic64_inc_return(&seq);
+}
+EXPORT_SYMBOL_GPL(cper_next_record_id);
+
+int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus)
+{
+ if (estatus->data_length &&
+ estatus->data_length < sizeof(struct acpi_hest_generic_data))
+ return -EINVAL;
+ if (estatus->raw_data_length &&
+ estatus->raw_data_offset < sizeof(*estatus) + estatus->data_length)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_estatus_check_header);
+
+int apei_estatus_check(const struct acpi_hest_generic_status *estatus)
+{
+ struct acpi_hest_generic_data *gdata;
+ unsigned int data_len, gedata_len;
+ int rc;
+
+ rc = apei_estatus_check_header(estatus);
+ if (rc)
+ return rc;
+ data_len = estatus->data_length;
+ gdata = (struct acpi_hest_generic_data *)(estatus + 1);
+ while (data_len > sizeof(*gdata)) {
+ gedata_len = gdata->error_data_length;
+ if (gedata_len > data_len - sizeof(*gdata))
+ return -EINVAL;
+ data_len -= gedata_len + sizeof(*gdata);
+ }
+ if (data_len)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_estatus_check);
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
new file mode 100644
index 00000000000..465c885938e
--- /dev/null
+++ b/drivers/acpi/apei/einj.c
@@ -0,0 +1,548 @@
+/*
+ * APEI Error INJection support
+ *
+ * EINJ provides a hardware error injection mechanism, this is useful
+ * for debugging and testing of other APEI and RAS features.
+ *
+ * For more information about EINJ, please refer to ACPI Specification
+ * version 4.0, section 17.5.
+ *
+ * Copyright 2009-2010 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/nmi.h>
+#include <linux/delay.h>
+#include <acpi/acpi.h>
+
+#include "apei-internal.h"
+
+#define EINJ_PFX "EINJ: "
+
+#define SPIN_UNIT 100 /* 100ns */
+/* Firmware should respond within 1 miliseconds */
+#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
+
+/*
+ * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
+ * EINJ table through an unpublished extension. Use with caution as
+ * most will ignore the parameter and make their own choice of address
+ * for error injection.
+ */
+struct einj_parameter {
+ u64 type;
+ u64 reserved1;
+ u64 reserved2;
+ u64 param1;
+ u64 param2;
+};
+
+#define EINJ_OP_BUSY 0x1
+#define EINJ_STATUS_SUCCESS 0x0
+#define EINJ_STATUS_FAIL 0x1
+#define EINJ_STATUS_INVAL 0x2
+
+#define EINJ_TAB_ENTRY(tab) \
+ ((struct acpi_whea_header *)((char *)(tab) + \
+ sizeof(struct acpi_table_einj)))
+
+static struct acpi_table_einj *einj_tab;
+
+static struct apei_resources einj_resources;
+
+static struct apei_exec_ins_type einj_ins_type[] = {
+ [ACPI_EINJ_READ_REGISTER] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_read_register,
+ },
+ [ACPI_EINJ_READ_REGISTER_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_read_register_value,
+ },
+ [ACPI_EINJ_WRITE_REGISTER] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_write_register,
+ },
+ [ACPI_EINJ_WRITE_REGISTER_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_write_register_value,
+ },
+ [ACPI_EINJ_NOOP] = {
+ .flags = 0,
+ .run = apei_exec_noop,
+ },
+};
+
+/*
+ * Prevent EINJ interpreter to run simultaneously, because the
+ * corresponding firmware implementation may not work properly when
+ * invoked simultaneously.
+ */
+static DEFINE_MUTEX(einj_mutex);
+
+static struct einj_parameter *einj_param;
+
+static void einj_exec_ctx_init(struct apei_exec_context *ctx)
+{
+ apei_exec_ctx_init(ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type),
+ EINJ_TAB_ENTRY(einj_tab), einj_tab->entries);
+}
+
+static int __einj_get_available_error_type(u32 *type)
+{
+ struct apei_exec_context ctx;
+ int rc;
+
+ einj_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_EINJ_GET_ERROR_TYPE);
+ if (rc)
+ return rc;
+ *type = apei_exec_ctx_get_output(&ctx);
+
+ return 0;
+}
+
+/* Get error injection capabilities of the platform */
+static int einj_get_available_error_type(u32 *type)
+{
+ int rc;
+
+ mutex_lock(&einj_mutex);
+ rc = __einj_get_available_error_type(type);
+ mutex_unlock(&einj_mutex);
+
+ return rc;
+}
+
+static int einj_timedout(u64 *t)
+{
+ if ((s64)*t < SPIN_UNIT) {
+ pr_warning(FW_WARN EINJ_PFX
+ "Firmware does not respond in time\n");
+ return 1;
+ }
+ *t -= SPIN_UNIT;
+ ndelay(SPIN_UNIT);
+ touch_nmi_watchdog();
+ return 0;
+}
+
+static u64 einj_get_parameter_address(void)
+{
+ int i;
+ u64 paddr = 0;
+ struct acpi_whea_header *entry;
+
+ entry = EINJ_TAB_ENTRY(einj_tab);
+ for (i = 0; i < einj_tab->entries; i++) {
+ if (entry->action == ACPI_EINJ_SET_ERROR_TYPE &&
+ entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
+ entry->register_region.space_id ==
+ ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ memcpy(&paddr, &entry->register_region.address,
+ sizeof(paddr));
+ entry++;
+ }
+
+ return paddr;
+}
+
+/* do sanity check to trigger table */
+static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab)
+{
+ if (trigger_tab->header_size != sizeof(struct acpi_einj_trigger))
+ return -EINVAL;
+ if (trigger_tab->table_size > PAGE_SIZE ||
+ trigger_tab->table_size <= trigger_tab->header_size)
+ return -EINVAL;
+ if (trigger_tab->entry_count !=
+ (trigger_tab->table_size - trigger_tab->header_size) /
+ sizeof(struct acpi_einj_entry))
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Execute instructions in trigger error action table */
+static int __einj_error_trigger(u64 trigger_paddr)
+{
+ struct acpi_einj_trigger *trigger_tab = NULL;
+ struct apei_exec_context trigger_ctx;
+ struct apei_resources trigger_resources;
+ struct acpi_whea_header *trigger_entry;
+ struct resource *r;
+ u32 table_size;
+ int rc = -EIO;
+
+ r = request_mem_region(trigger_paddr, sizeof(*trigger_tab),
+ "APEI EINJ Trigger Table");
+ if (!r) {
+ pr_err(EINJ_PFX
+ "Can not request iomem region <%016llx-%016llx> for Trigger table.\n",
+ (unsigned long long)trigger_paddr,
+ (unsigned long long)trigger_paddr+sizeof(*trigger_tab));
+ goto out;
+ }
+ trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab));
+ if (!trigger_tab) {
+ pr_err(EINJ_PFX "Failed to map trigger table!\n");
+ goto out_rel_header;
+ }
+ rc = einj_check_trigger_header(trigger_tab);
+ if (rc) {
+ pr_warning(FW_BUG EINJ_PFX
+ "The trigger error action table is invalid\n");
+ goto out_rel_header;
+ }
+ rc = -EIO;
+ table_size = trigger_tab->table_size;
+ r = request_mem_region(trigger_paddr + sizeof(*trigger_tab),
+ table_size - sizeof(*trigger_tab),
+ "APEI EINJ Trigger Table");
+ if (!r) {
+ pr_err(EINJ_PFX
+"Can not request iomem region <%016llx-%016llx> for Trigger Table Entry.\n",
+ (unsigned long long)trigger_paddr+sizeof(*trigger_tab),
+ (unsigned long long)trigger_paddr + table_size);
+ goto out_rel_header;
+ }
+ iounmap(trigger_tab);
+ trigger_tab = ioremap_cache(trigger_paddr, table_size);
+ if (!trigger_tab) {
+ pr_err(EINJ_PFX "Failed to map trigger table!\n");
+ goto out_rel_entry;
+ }
+ trigger_entry = (struct acpi_whea_header *)
+ ((char *)trigger_tab + sizeof(struct acpi_einj_trigger));
+ apei_resources_init(&trigger_resources);
+ apei_exec_ctx_init(&trigger_ctx, einj_ins_type,
+ ARRAY_SIZE(einj_ins_type),
+ trigger_entry, trigger_tab->entry_count);
+ rc = apei_exec_collect_resources(&trigger_ctx, &trigger_resources);
+ if (rc)
+ goto out_fini;
+ rc = apei_resources_sub(&trigger_resources, &einj_resources);
+ if (rc)
+ goto out_fini;
+ rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger");
+ if (rc)
+ goto out_fini;
+ rc = apei_exec_pre_map_gars(&trigger_ctx);
+ if (rc)
+ goto out_release;
+
+ rc = apei_exec_run(&trigger_ctx, ACPI_EINJ_TRIGGER_ERROR);
+
+ apei_exec_post_unmap_gars(&trigger_ctx);
+out_release:
+ apei_resources_release(&trigger_resources);
+out_fini:
+ apei_resources_fini(&trigger_resources);
+out_rel_entry:
+ release_mem_region(trigger_paddr + sizeof(*trigger_tab),
+ table_size - sizeof(*trigger_tab));
+out_rel_header:
+ release_mem_region(trigger_paddr, sizeof(*trigger_tab));
+out:
+ if (trigger_tab)
+ iounmap(trigger_tab);
+
+ return rc;
+}
+
+static int __einj_error_inject(u32 type, u64 param1, u64 param2)
+{
+ struct apei_exec_context ctx;
+ u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT;
+ int rc;
+
+ einj_exec_ctx_init(&ctx);
+
+ rc = apei_exec_run(&ctx, ACPI_EINJ_BEGIN_OPERATION);
+ if (rc)
+ return rc;
+ apei_exec_ctx_set_input(&ctx, type);
+ rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
+ if (rc)
+ return rc;
+ if (einj_param) {
+ writeq(param1, &einj_param->param1);
+ writeq(param2, &einj_param->param2);
+ }
+ rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION);
+ if (rc)
+ return rc;
+ for (;;) {
+ rc = apei_exec_run(&ctx, ACPI_EINJ_CHECK_BUSY_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ if (!(val & EINJ_OP_BUSY))
+ break;
+ if (einj_timedout(&timeout))
+ return -EIO;
+ }
+ rc = apei_exec_run(&ctx, ACPI_EINJ_GET_COMMAND_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ if (val != EINJ_STATUS_SUCCESS)
+ return -EBUSY;
+
+ rc = apei_exec_run(&ctx, ACPI_EINJ_GET_TRIGGER_TABLE);
+ if (rc)
+ return rc;
+ trigger_paddr = apei_exec_ctx_get_output(&ctx);
+ rc = __einj_error_trigger(trigger_paddr);
+ if (rc)
+ return rc;
+ rc = apei_exec_run(&ctx, ACPI_EINJ_END_OPERATION);
+
+ return rc;
+}
+
+/* Inject the specified hardware error */
+static int einj_error_inject(u32 type, u64 param1, u64 param2)
+{
+ int rc;
+
+ mutex_lock(&einj_mutex);
+ rc = __einj_error_inject(type, param1, param2);
+ mutex_unlock(&einj_mutex);
+
+ return rc;
+}
+
+static u32 error_type;
+static u64 error_param1;
+static u64 error_param2;
+static struct dentry *einj_debug_dir;
+
+static int available_error_type_show(struct seq_file *m, void *v)
+{
+ int rc;
+ u32 available_error_type = 0;
+
+ rc = einj_get_available_error_type(&available_error_type);
+ if (rc)
+ return rc;
+ if (available_error_type & 0x0001)
+ seq_printf(m, "0x00000001\tProcessor Correctable\n");
+ if (available_error_type & 0x0002)
+ seq_printf(m, "0x00000002\tProcessor Uncorrectable non-fatal\n");
+ if (available_error_type & 0x0004)
+ seq_printf(m, "0x00000004\tProcessor Uncorrectable fatal\n");
+ if (available_error_type & 0x0008)
+ seq_printf(m, "0x00000008\tMemory Correctable\n");
+ if (available_error_type & 0x0010)
+ seq_printf(m, "0x00000010\tMemory Uncorrectable non-fatal\n");
+ if (available_error_type & 0x0020)
+ seq_printf(m, "0x00000020\tMemory Uncorrectable fatal\n");
+ if (available_error_type & 0x0040)
+ seq_printf(m, "0x00000040\tPCI Express Correctable\n");
+ if (available_error_type & 0x0080)
+ seq_printf(m, "0x00000080\tPCI Express Uncorrectable non-fatal\n");
+ if (available_error_type & 0x0100)
+ seq_printf(m, "0x00000100\tPCI Express Uncorrectable fatal\n");
+ if (available_error_type & 0x0200)
+ seq_printf(m, "0x00000200\tPlatform Correctable\n");
+ if (available_error_type & 0x0400)
+ seq_printf(m, "0x00000400\tPlatform Uncorrectable non-fatal\n");
+ if (available_error_type & 0x0800)
+ seq_printf(m, "0x00000800\tPlatform Uncorrectable fatal\n");
+
+ return 0;
+}
+
+static int available_error_type_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, available_error_type_show, NULL);
+}
+
+static const struct file_operations available_error_type_fops = {
+ .open = available_error_type_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int error_type_get(void *data, u64 *val)
+{
+ *val = error_type;
+
+ return 0;
+}
+
+static int error_type_set(void *data, u64 val)
+{
+ int rc;
+ u32 available_error_type = 0;
+
+ /* Only one error type can be specified */
+ if (val & (val - 1))
+ return -EINVAL;
+ rc = einj_get_available_error_type(&available_error_type);
+ if (rc)
+ return rc;
+ if (!(val & available_error_type))
+ return -EINVAL;
+ error_type = val;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(error_type_fops, error_type_get,
+ error_type_set, "0x%llx\n");
+
+static int error_inject_set(void *data, u64 val)
+{
+ if (!error_type)
+ return -EINVAL;
+
+ return einj_error_inject(error_type, error_param1, error_param2);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL,
+ error_inject_set, "%llu\n");
+
+static int einj_check_table(struct acpi_table_einj *einj_tab)
+{
+ if (einj_tab->header_length != sizeof(struct acpi_table_einj))
+ return -EINVAL;
+ if (einj_tab->header.length < sizeof(struct acpi_table_einj))
+ return -EINVAL;
+ if (einj_tab->entries !=
+ (einj_tab->header.length - sizeof(struct acpi_table_einj)) /
+ sizeof(struct acpi_einj_entry))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __init einj_init(void)
+{
+ int rc;
+ u64 param_paddr;
+ acpi_status status;
+ struct dentry *fentry;
+ struct apei_exec_context ctx;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ status = acpi_get_table(ACPI_SIG_EINJ, 0,
+ (struct acpi_table_header **)&einj_tab);
+ if (status == AE_NOT_FOUND) {
+ pr_info(EINJ_PFX "Table is not found!\n");
+ return -ENODEV;
+ } else if (ACPI_FAILURE(status)) {
+ const char *msg = acpi_format_exception(status);
+ pr_err(EINJ_PFX "Failed to get table, %s\n", msg);
+ return -EINVAL;
+ }
+
+ rc = einj_check_table(einj_tab);
+ if (rc) {
+ pr_warning(FW_BUG EINJ_PFX "EINJ table is invalid\n");
+ return -EINVAL;
+ }
+
+ rc = -ENOMEM;
+ einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir());
+ if (!einj_debug_dir)
+ goto err_cleanup;
+ fentry = debugfs_create_file("available_error_type", S_IRUSR,
+ einj_debug_dir, NULL,
+ &available_error_type_fops);
+ if (!fentry)
+ goto err_cleanup;
+ fentry = debugfs_create_file("error_type", S_IRUSR | S_IWUSR,
+ einj_debug_dir, NULL, &error_type_fops);
+ if (!fentry)
+ goto err_cleanup;
+ fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_param1);
+ if (!fentry)
+ goto err_cleanup;
+ fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_param2);
+ if (!fentry)
+ goto err_cleanup;
+ fentry = debugfs_create_file("error_inject", S_IWUSR,
+ einj_debug_dir, NULL, &error_inject_fops);
+ if (!fentry)
+ goto err_cleanup;
+
+ apei_resources_init(&einj_resources);
+ einj_exec_ctx_init(&ctx);
+ rc = apei_exec_collect_resources(&ctx, &einj_resources);
+ if (rc)
+ goto err_fini;
+ rc = apei_resources_request(&einj_resources, "APEI EINJ");
+ if (rc)
+ goto err_fini;
+ rc = apei_exec_pre_map_gars(&ctx);
+ if (rc)
+ goto err_release;
+ param_paddr = einj_get_parameter_address();
+ if (param_paddr) {
+ einj_param = ioremap(param_paddr, sizeof(*einj_param));
+ rc = -ENOMEM;
+ if (!einj_param)
+ goto err_unmap;
+ }
+
+ pr_info(EINJ_PFX "Error INJection is initialized.\n");
+
+ return 0;
+
+err_unmap:
+ apei_exec_post_unmap_gars(&ctx);
+err_release:
+ apei_resources_release(&einj_resources);
+err_fini:
+ apei_resources_fini(&einj_resources);
+err_cleanup:
+ debugfs_remove_recursive(einj_debug_dir);
+
+ return rc;
+}
+
+static void __exit einj_exit(void)
+{
+ struct apei_exec_context ctx;
+
+ if (einj_param)
+ iounmap(einj_param);
+ einj_exec_ctx_init(&ctx);
+ apei_exec_post_unmap_gars(&ctx);
+ apei_resources_release(&einj_resources);
+ apei_resources_fini(&einj_resources);
+ debugfs_remove_recursive(einj_debug_dir);
+}
+
+module_init(einj_init);
+module_exit(einj_exit);
+
+MODULE_AUTHOR("Huang Ying");
+MODULE_DESCRIPTION("APEI Error INJection support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
new file mode 100644
index 00000000000..2ebc3911550
--- /dev/null
+++ b/drivers/acpi/apei/erst.c
@@ -0,0 +1,855 @@
+/*
+ * APEI Error Record Serialization Table support
+ *
+ * ERST is a way provided by APEI to save and retrieve hardware error
+ * infomation to and from a persistent store.
+ *
+ * For more information about ERST, please refer to ACPI Specification
+ * version 4.0, section 17.4.
+ *
+ * Copyright 2010 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/acpi.h>
+#include <linux/uaccess.h>
+#include <linux/cper.h>
+#include <linux/nmi.h>
+#include <acpi/apei.h>
+
+#include "apei-internal.h"
+
+#define ERST_PFX "ERST: "
+
+/* ERST command status */
+#define ERST_STATUS_SUCCESS 0x0
+#define ERST_STATUS_NOT_ENOUGH_SPACE 0x1
+#define ERST_STATUS_HARDWARE_NOT_AVAILABLE 0x2
+#define ERST_STATUS_FAILED 0x3
+#define ERST_STATUS_RECORD_STORE_EMPTY 0x4
+#define ERST_STATUS_RECORD_NOT_FOUND 0x5
+
+#define ERST_TAB_ENTRY(tab) \
+ ((struct acpi_whea_header *)((char *)(tab) + \
+ sizeof(struct acpi_table_erst)))
+
+#define SPIN_UNIT 100 /* 100ns */
+/* Firmware should respond within 1 miliseconds */
+#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
+#define FIRMWARE_MAX_STALL 50 /* 50us */
+
+int erst_disable;
+EXPORT_SYMBOL_GPL(erst_disable);
+
+static struct acpi_table_erst *erst_tab;
+
+/* ERST Error Log Address Range atrributes */
+#define ERST_RANGE_RESERVED 0x0001
+#define ERST_RANGE_NVRAM 0x0002
+#define ERST_RANGE_SLOW 0x0004
+
+/*
+ * ERST Error Log Address Range, used as buffer for reading/writing
+ * error records.
+ */
+static struct erst_erange {
+ u64 base;
+ u64 size;
+ void __iomem *vaddr;
+ u32 attr;
+} erst_erange;
+
+/*
+ * Prevent ERST interpreter to run simultaneously, because the
+ * corresponding firmware implementation may not work properly when
+ * invoked simultaneously.
+ *
+ * It is used to provide exclusive accessing for ERST Error Log
+ * Address Range too.
+ */
+static DEFINE_SPINLOCK(erst_lock);
+
+static inline int erst_errno(int command_status)
+{
+ switch (command_status) {
+ case ERST_STATUS_SUCCESS:
+ return 0;
+ case ERST_STATUS_HARDWARE_NOT_AVAILABLE:
+ return -ENODEV;
+ case ERST_STATUS_NOT_ENOUGH_SPACE:
+ return -ENOSPC;
+ case ERST_STATUS_RECORD_STORE_EMPTY:
+ case ERST_STATUS_RECORD_NOT_FOUND:
+ return -ENOENT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int erst_timedout(u64 *t, u64 spin_unit)
+{
+ if ((s64)*t < spin_unit) {
+ pr_warning(FW_WARN ERST_PFX
+ "Firmware does not respond in time\n");
+ return 1;
+ }
+ *t -= spin_unit;
+ ndelay(spin_unit);
+ touch_nmi_watchdog();
+ return 0;
+}
+
+static int erst_exec_load_var1(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_read_register(entry, &ctx->var1);
+}
+
+static int erst_exec_load_var2(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_read_register(entry, &ctx->var2);
+}
+
+static int erst_exec_store_var1(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_write_register(entry, ctx->var1);
+}
+
+static int erst_exec_add(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ ctx->var1 += ctx->var2;
+ return 0;
+}
+
+static int erst_exec_subtract(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ ctx->var1 -= ctx->var2;
+ return 0;
+}
+
+static int erst_exec_add_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 val;
+
+ rc = __apei_exec_read_register(entry, &val);
+ if (rc)
+ return rc;
+ val += ctx->value;
+ rc = __apei_exec_write_register(entry, val);
+ return rc;
+}
+
+static int erst_exec_subtract_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 val;
+
+ rc = __apei_exec_read_register(entry, &val);
+ if (rc)
+ return rc;
+ val -= ctx->value;
+ rc = __apei_exec_write_register(entry, val);
+ return rc;
+}
+
+static int erst_exec_stall(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ u64 stall_time;
+
+ if (ctx->value > FIRMWARE_MAX_STALL) {
+ if (!in_nmi())
+ pr_warning(FW_WARN ERST_PFX
+ "Too long stall time for stall instruction: %llx.\n",
+ ctx->value);
+ stall_time = FIRMWARE_MAX_STALL;
+ } else
+ stall_time = ctx->value;
+ udelay(stall_time);
+ return 0;
+}
+
+static int erst_exec_stall_while_true(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 val;
+ u64 timeout = FIRMWARE_TIMEOUT;
+ u64 stall_time;
+
+ if (ctx->var1 > FIRMWARE_MAX_STALL) {
+ if (!in_nmi())
+ pr_warning(FW_WARN ERST_PFX
+ "Too long stall time for stall while true instruction: %llx.\n",
+ ctx->var1);
+ stall_time = FIRMWARE_MAX_STALL;
+ } else
+ stall_time = ctx->var1;
+
+ for (;;) {
+ rc = __apei_exec_read_register(entry, &val);
+ if (rc)
+ return rc;
+ if (val != ctx->value)
+ break;
+ if (erst_timedout(&timeout, stall_time * NSEC_PER_USEC))
+ return -EIO;
+ }
+ return 0;
+}
+
+static int erst_exec_skip_next_instruction_if_true(
+ struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 val;
+
+ rc = __apei_exec_read_register(entry, &val);
+ if (rc)
+ return rc;
+ if (val == ctx->value) {
+ ctx->ip += 2;
+ return APEI_EXEC_SET_IP;
+ }
+
+ return 0;
+}
+
+static int erst_exec_goto(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ ctx->ip = ctx->value;
+ return APEI_EXEC_SET_IP;
+}
+
+static int erst_exec_set_src_address_base(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_read_register(entry, &ctx->src_base);
+}
+
+static int erst_exec_set_dst_address_base(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_read_register(entry, &ctx->dst_base);
+}
+
+static int erst_exec_move_data(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 offset;
+
+ rc = __apei_exec_read_register(entry, &offset);
+ if (rc)
+ return rc;
+ memmove((void *)ctx->dst_base + offset,
+ (void *)ctx->src_base + offset,
+ ctx->var2);
+
+ return 0;
+}
+
+static struct apei_exec_ins_type erst_ins_type[] = {
+ [ACPI_ERST_READ_REGISTER] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_read_register,
+ },
+ [ACPI_ERST_READ_REGISTER_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_read_register_value,
+ },
+ [ACPI_ERST_WRITE_REGISTER] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_write_register,
+ },
+ [ACPI_ERST_WRITE_REGISTER_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_write_register_value,
+ },
+ [ACPI_ERST_NOOP] = {
+ .flags = 0,
+ .run = apei_exec_noop,
+ },
+ [ACPI_ERST_LOAD_VAR1] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_load_var1,
+ },
+ [ACPI_ERST_LOAD_VAR2] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_load_var2,
+ },
+ [ACPI_ERST_STORE_VAR1] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_store_var1,
+ },
+ [ACPI_ERST_ADD] = {
+ .flags = 0,
+ .run = erst_exec_add,
+ },
+ [ACPI_ERST_SUBTRACT] = {
+ .flags = 0,
+ .run = erst_exec_subtract,
+ },
+ [ACPI_ERST_ADD_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_add_value,
+ },
+ [ACPI_ERST_SUBTRACT_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_subtract_value,
+ },
+ [ACPI_ERST_STALL] = {
+ .flags = 0,
+ .run = erst_exec_stall,
+ },
+ [ACPI_ERST_STALL_WHILE_TRUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_stall_while_true,
+ },
+ [ACPI_ERST_SKIP_NEXT_IF_TRUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_skip_next_instruction_if_true,
+ },
+ [ACPI_ERST_GOTO] = {
+ .flags = 0,
+ .run = erst_exec_goto,
+ },
+ [ACPI_ERST_SET_SRC_ADDRESS_BASE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_set_src_address_base,
+ },
+ [ACPI_ERST_SET_DST_ADDRESS_BASE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_set_dst_address_base,
+ },
+ [ACPI_ERST_MOVE_DATA] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_move_data,
+ },
+};
+
+static inline void erst_exec_ctx_init(struct apei_exec_context *ctx)
+{
+ apei_exec_ctx_init(ctx, erst_ins_type, ARRAY_SIZE(erst_ins_type),
+ ERST_TAB_ENTRY(erst_tab), erst_tab->entries);
+}
+
+static int erst_get_erange(struct erst_erange *range)
+{
+ struct apei_exec_context ctx;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_RANGE);
+ if (rc)
+ return rc;
+ range->base = apei_exec_ctx_get_output(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_LENGTH);
+ if (rc)
+ return rc;
+ range->size = apei_exec_ctx_get_output(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_ATTRIBUTES);
+ if (rc)
+ return rc;
+ range->attr = apei_exec_ctx_get_output(&ctx);
+
+ return 0;
+}
+
+static ssize_t __erst_get_record_count(void)
+{
+ struct apei_exec_context ctx;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_COUNT);
+ if (rc)
+ return rc;
+ return apei_exec_ctx_get_output(&ctx);
+}
+
+ssize_t erst_get_record_count(void)
+{
+ ssize_t count;
+ unsigned long flags;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ spin_lock_irqsave(&erst_lock, flags);
+ count = __erst_get_record_count();
+ spin_unlock_irqrestore(&erst_lock, flags);
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(erst_get_record_count);
+
+static int __erst_get_next_record_id(u64 *record_id)
+{
+ struct apei_exec_context ctx;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_ID);
+ if (rc)
+ return rc;
+ *record_id = apei_exec_ctx_get_output(&ctx);
+
+ return 0;
+}
+
+/*
+ * Get the record ID of an existing error record on the persistent
+ * storage. If there is no error record on the persistent storage, the
+ * returned record_id is APEI_ERST_INVALID_RECORD_ID.
+ */
+int erst_get_next_record_id(u64 *record_id)
+{
+ int rc;
+ unsigned long flags;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ spin_lock_irqsave(&erst_lock, flags);
+ rc = __erst_get_next_record_id(record_id);
+ spin_unlock_irqrestore(&erst_lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(erst_get_next_record_id);
+
+static int __erst_write_to_storage(u64 offset)
+{
+ struct apei_exec_context ctx;
+ u64 timeout = FIRMWARE_TIMEOUT;
+ u64 val;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_WRITE);
+ if (rc)
+ return rc;
+ apei_exec_ctx_set_input(&ctx, offset);
+ rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET);
+ if (rc)
+ return rc;
+ rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
+ if (rc)
+ return rc;
+ for (;;) {
+ rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ if (!val)
+ break;
+ if (erst_timedout(&timeout, SPIN_UNIT))
+ return -EIO;
+ }
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_END);
+ if (rc)
+ return rc;
+
+ return erst_errno(val);
+}
+
+static int __erst_read_from_storage(u64 record_id, u64 offset)
+{
+ struct apei_exec_context ctx;
+ u64 timeout = FIRMWARE_TIMEOUT;
+ u64 val;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_READ);
+ if (rc)
+ return rc;
+ apei_exec_ctx_set_input(&ctx, offset);
+ rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET);
+ if (rc)
+ return rc;
+ apei_exec_ctx_set_input(&ctx, record_id);
+ rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID);
+ if (rc)
+ return rc;
+ rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
+ if (rc)
+ return rc;
+ for (;;) {
+ rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ if (!val)
+ break;
+ if (erst_timedout(&timeout, SPIN_UNIT))
+ return -EIO;
+ };
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_END);
+ if (rc)
+ return rc;
+
+ return erst_errno(val);
+}
+
+static int __erst_clear_from_storage(u64 record_id)
+{
+ struct apei_exec_context ctx;
+ u64 timeout = FIRMWARE_TIMEOUT;
+ u64 val;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_CLEAR);
+ if (rc)
+ return rc;
+ apei_exec_ctx_set_input(&ctx, record_id);
+ rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID);
+ if (rc)
+ return rc;
+ rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
+ if (rc)
+ return rc;
+ for (;;) {
+ rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ if (!val)
+ break;
+ if (erst_timedout(&timeout, SPIN_UNIT))
+ return -EIO;
+ }
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_END);
+ if (rc)
+ return rc;
+
+ return erst_errno(val);
+}
+
+/* NVRAM ERST Error Log Address Range is not supported yet */
+static void pr_unimpl_nvram(void)
+{
+ if (printk_ratelimit())
+ pr_warning(ERST_PFX
+ "NVRAM ERST Log Address Range is not implemented yet\n");
+}
+
+static int __erst_write_to_nvram(const struct cper_record_header *record)
+{
+ /* do not print message, because printk is not safe for NMI */
+ return -ENOSYS;
+}
+
+static int __erst_read_to_erange_from_nvram(u64 record_id, u64 *offset)
+{
+ pr_unimpl_nvram();
+ return -ENOSYS;
+}
+
+static int __erst_clear_from_nvram(u64 record_id)
+{
+ pr_unimpl_nvram();
+ return -ENOSYS;
+}
+
+int erst_write(const struct cper_record_header *record)
+{
+ int rc;
+ unsigned long flags;
+ struct cper_record_header *rcd_erange;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ if (memcmp(record->signature, CPER_SIG_RECORD, CPER_SIG_SIZE))
+ return -EINVAL;
+
+ if (erst_erange.attr & ERST_RANGE_NVRAM) {
+ if (!spin_trylock_irqsave(&erst_lock, flags))
+ return -EBUSY;
+ rc = __erst_write_to_nvram(record);
+ spin_unlock_irqrestore(&erst_lock, flags);
+ return rc;
+ }
+
+ if (record->record_length > erst_erange.size)
+ return -EINVAL;
+
+ if (!spin_trylock_irqsave(&erst_lock, flags))
+ return -EBUSY;
+ memcpy(erst_erange.vaddr, record, record->record_length);
+ rcd_erange = erst_erange.vaddr;
+ /* signature for serialization system */
+ memcpy(&rcd_erange->persistence_information, "ER", 2);
+
+ rc = __erst_write_to_storage(0);
+ spin_unlock_irqrestore(&erst_lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(erst_write);
+
+static int __erst_read_to_erange(u64 record_id, u64 *offset)
+{
+ int rc;
+
+ if (erst_erange.attr & ERST_RANGE_NVRAM)
+ return __erst_read_to_erange_from_nvram(
+ record_id, offset);
+
+ rc = __erst_read_from_storage(record_id, 0);
+ if (rc)
+ return rc;
+ *offset = 0;
+
+ return 0;
+}
+
+static ssize_t __erst_read(u64 record_id, struct cper_record_header *record,
+ size_t buflen)
+{
+ int rc;
+ u64 offset, len = 0;
+ struct cper_record_header *rcd_tmp;
+
+ rc = __erst_read_to_erange(record_id, &offset);
+ if (rc)
+ return rc;
+ rcd_tmp = erst_erange.vaddr + offset;
+ len = rcd_tmp->record_length;
+ if (len <= buflen)
+ memcpy(record, rcd_tmp, len);
+
+ return len;
+}
+
+/*
+ * If return value > buflen, the buffer size is not big enough,
+ * else if return value < 0, something goes wrong,
+ * else everything is OK, and return value is record length
+ */
+ssize_t erst_read(u64 record_id, struct cper_record_header *record,
+ size_t buflen)
+{
+ ssize_t len;
+ unsigned long flags;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ spin_lock_irqsave(&erst_lock, flags);
+ len = __erst_read(record_id, record, buflen);
+ spin_unlock_irqrestore(&erst_lock, flags);
+ return len;
+}
+EXPORT_SYMBOL_GPL(erst_read);
+
+/*
+ * If return value > buflen, the buffer size is not big enough,
+ * else if return value = 0, there is no more record to read,
+ * else if return value < 0, something goes wrong,
+ * else everything is OK, and return value is record length
+ */
+ssize_t erst_read_next(struct cper_record_header *record, size_t buflen)
+{
+ int rc;
+ ssize_t len;
+ unsigned long flags;
+ u64 record_id;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ spin_lock_irqsave(&erst_lock, flags);
+ rc = __erst_get_next_record_id(&record_id);
+ if (rc) {
+ spin_unlock_irqrestore(&erst_lock, flags);
+ return rc;
+ }
+ /* no more record */
+ if (record_id == APEI_ERST_INVALID_RECORD_ID) {
+ spin_unlock_irqrestore(&erst_lock, flags);
+ return 0;
+ }
+
+ len = __erst_read(record_id, record, buflen);
+ spin_unlock_irqrestore(&erst_lock, flags);
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(erst_read_next);
+
+int erst_clear(u64 record_id)
+{
+ int rc;
+ unsigned long flags;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ spin_lock_irqsave(&erst_lock, flags);
+ if (erst_erange.attr & ERST_RANGE_NVRAM)
+ rc = __erst_clear_from_nvram(record_id);
+ else
+ rc = __erst_clear_from_storage(record_id);
+ spin_unlock_irqrestore(&erst_lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(erst_clear);
+
+static int __init setup_erst_disable(char *str)
+{
+ erst_disable = 1;
+ return 0;
+}
+
+__setup("erst_disable", setup_erst_disable);
+
+static int erst_check_table(struct acpi_table_erst *erst_tab)
+{
+ if (erst_tab->header_length != sizeof(struct acpi_table_erst))
+ return -EINVAL;
+ if (erst_tab->header.length < sizeof(struct acpi_table_erst))
+ return -EINVAL;
+ if (erst_tab->entries !=
+ (erst_tab->header.length - sizeof(struct acpi_table_erst)) /
+ sizeof(struct acpi_erst_entry))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __init erst_init(void)
+{
+ int rc = 0;
+ acpi_status status;
+ struct apei_exec_context ctx;
+ struct apei_resources erst_resources;
+ struct resource *r;
+
+ if (acpi_disabled)
+ goto err;
+
+ if (erst_disable) {
+ pr_info(ERST_PFX
+ "Error Record Serialization Table (ERST) support is disabled.\n");
+ goto err;
+ }
+
+ status = acpi_get_table(ACPI_SIG_ERST, 0,
+ (struct acpi_table_header **)&erst_tab);
+ if (status == AE_NOT_FOUND) {
+ pr_err(ERST_PFX "Table is not found!\n");
+ goto err;
+ } else if (ACPI_FAILURE(status)) {
+ const char *msg = acpi_format_exception(status);
+ pr_err(ERST_PFX "Failed to get table, %s\n", msg);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ rc = erst_check_table(erst_tab);
+ if (rc) {
+ pr_err(FW_BUG ERST_PFX "ERST table is invalid\n");
+ goto err;
+ }
+
+ apei_resources_init(&erst_resources);
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_collect_resources(&ctx, &erst_resources);
+ if (rc)
+ goto err_fini;
+ rc = apei_resources_request(&erst_resources, "APEI ERST");
+ if (rc)
+ goto err_fini;
+ rc = apei_exec_pre_map_gars(&ctx);
+ if (rc)
+ goto err_release;
+ rc = erst_get_erange(&erst_erange);
+ if (rc) {
+ if (rc == -ENODEV)
+ pr_info(ERST_PFX
+ "The corresponding hardware device or firmware implementation "
+ "is not available.\n");
+ else
+ pr_err(ERST_PFX
+ "Failed to get Error Log Address Range.\n");
+ goto err_unmap_reg;
+ }
+
+ r = request_mem_region(erst_erange.base, erst_erange.size, "APEI ERST");
+ if (!r) {
+ pr_err(ERST_PFX
+ "Can not request iomem region <0x%16llx-0x%16llx> for ERST.\n",
+ (unsigned long long)erst_erange.base,
+ (unsigned long long)erst_erange.base + erst_erange.size);
+ rc = -EIO;
+ goto err_unmap_reg;
+ }
+ rc = -ENOMEM;
+ erst_erange.vaddr = ioremap_cache(erst_erange.base,
+ erst_erange.size);
+ if (!erst_erange.vaddr)
+ goto err_release_erange;
+
+ pr_info(ERST_PFX
+ "Error Record Serialization Table (ERST) support is initialized.\n");
+
+ return 0;
+
+err_release_erange:
+ release_mem_region(erst_erange.base, erst_erange.size);
+err_unmap_reg:
+ apei_exec_post_unmap_gars(&ctx);
+err_release:
+ apei_resources_release(&erst_resources);
+err_fini:
+ apei_resources_fini(&erst_resources);
+err:
+ erst_disable = 1;
+ return rc;
+}
+
+device_initcall(erst_init);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
new file mode 100644
index 00000000000..fd0cc016a09
--- /dev/null
+++ b/drivers/acpi/apei/ghes.c
@@ -0,0 +1,427 @@
+/*
+ * APEI Generic Hardware Error Source support
+ *
+ * Generic Hardware Error Source provides a way to report platform
+ * hardware errors (such as that from chipset). It works in so called
+ * "Firmware First" mode, that is, hardware errors are reported to
+ * firmware firstly, then reported to Linux by firmware. This way,
+ * some non-standard hardware error registers or non-standard hardware
+ * link can be checked by firmware to produce more hardware error
+ * information for Linux.
+ *
+ * For more information about Generic Hardware Error Source, please
+ * refer to ACPI Specification version 4.0, section 17.3.2.6
+ *
+ * Now, only SCI notification type and memory errors are
+ * supported. More notification type and hardware error type will be
+ * added later.
+ *
+ * Copyright 2010 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/cper.h>
+#include <linux/kdebug.h>
+#include <acpi/apei.h>
+#include <acpi/atomicio.h>
+#include <acpi/hed.h>
+#include <asm/mce.h>
+
+#include "apei-internal.h"
+
+#define GHES_PFX "GHES: "
+
+#define GHES_ESTATUS_MAX_SIZE 65536
+
+/*
+ * One struct ghes is created for each generic hardware error
+ * source.
+ *
+ * It provides the context for APEI hardware error timer/IRQ/SCI/NMI
+ * handler. Handler for one generic hardware error source is only
+ * triggered after the previous one is done. So handler can uses
+ * struct ghes without locking.
+ *
+ * estatus: memory buffer for error status block, allocated during
+ * HEST parsing.
+ */
+#define GHES_TO_CLEAR 0x0001
+
+struct ghes {
+ struct acpi_hest_generic *generic;
+ struct acpi_hest_generic_status *estatus;
+ struct list_head list;
+ u64 buffer_paddr;
+ unsigned long flags;
+};
+
+/*
+ * Error source lists, one list for each notification method. The
+ * members in lists are struct ghes.
+ *
+ * The list members are only added in HEST parsing and deleted during
+ * module_exit, that is, single-threaded. So no lock is needed for
+ * that.
+ *
+ * But the mutual exclusion is needed between members adding/deleting
+ * and timer/IRQ/SCI/NMI handler, which may traverse the list. RCU is
+ * used for that.
+ */
+static LIST_HEAD(ghes_sci);
+
+static struct ghes *ghes_new(struct acpi_hest_generic *generic)
+{
+ struct ghes *ghes;
+ unsigned int error_block_length;
+ int rc;
+
+ ghes = kzalloc(sizeof(*ghes), GFP_KERNEL);
+ if (!ghes)
+ return ERR_PTR(-ENOMEM);
+ ghes->generic = generic;
+ INIT_LIST_HEAD(&ghes->list);
+ rc = acpi_pre_map_gar(&generic->error_status_address);
+ if (rc)
+ goto err_free;
+ error_block_length = generic->error_block_length;
+ if (error_block_length > GHES_ESTATUS_MAX_SIZE) {
+ pr_warning(FW_WARN GHES_PFX
+ "Error status block length is too long: %u for "
+ "generic hardware error source: %d.\n",
+ error_block_length, generic->header.source_id);
+ error_block_length = GHES_ESTATUS_MAX_SIZE;
+ }
+ ghes->estatus = kmalloc(error_block_length, GFP_KERNEL);
+ if (!ghes->estatus) {
+ rc = -ENOMEM;
+ goto err_unmap;
+ }
+
+ return ghes;
+
+err_unmap:
+ acpi_post_unmap_gar(&generic->error_status_address);
+err_free:
+ kfree(ghes);
+ return ERR_PTR(rc);
+}
+
+static void ghes_fini(struct ghes *ghes)
+{
+ kfree(ghes->estatus);
+ acpi_post_unmap_gar(&ghes->generic->error_status_address);
+}
+
+enum {
+ GHES_SER_NO = 0x0,
+ GHES_SER_CORRECTED = 0x1,
+ GHES_SER_RECOVERABLE = 0x2,
+ GHES_SER_PANIC = 0x3,
+};
+
+static inline int ghes_severity(int severity)
+{
+ switch (severity) {
+ case CPER_SER_INFORMATIONAL:
+ return GHES_SER_NO;
+ case CPER_SER_CORRECTED:
+ return GHES_SER_CORRECTED;
+ case CPER_SER_RECOVERABLE:
+ return GHES_SER_RECOVERABLE;
+ case CPER_SER_FATAL:
+ return GHES_SER_PANIC;
+ default:
+ /* Unkown, go panic */
+ return GHES_SER_PANIC;
+ }
+}
+
+/* SCI handler run in work queue, so ioremap can be used here */
+static int ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
+ int from_phys)
+{
+ void *vaddr;
+
+ vaddr = ioremap_cache(paddr, len);
+ if (!vaddr)
+ return -ENOMEM;
+ if (from_phys)
+ memcpy(buffer, vaddr, len);
+ else
+ memcpy(vaddr, buffer, len);
+ iounmap(vaddr);
+
+ return 0;
+}
+
+static int ghes_read_estatus(struct ghes *ghes, int silent)
+{
+ struct acpi_hest_generic *g = ghes->generic;
+ u64 buf_paddr;
+ u32 len;
+ int rc;
+
+ rc = acpi_atomic_read(&buf_paddr, &g->error_status_address);
+ if (rc) {
+ if (!silent && printk_ratelimit())
+ pr_warning(FW_WARN GHES_PFX
+"Failed to read error status block address for hardware error source: %d.\n",
+ g->header.source_id);
+ return -EIO;
+ }
+ if (!buf_paddr)
+ return -ENOENT;
+
+ rc = ghes_copy_tofrom_phys(ghes->estatus, buf_paddr,
+ sizeof(*ghes->estatus), 1);
+ if (rc)
+ return rc;
+ if (!ghes->estatus->block_status)
+ return -ENOENT;
+
+ ghes->buffer_paddr = buf_paddr;
+ ghes->flags |= GHES_TO_CLEAR;
+
+ rc = -EIO;
+ len = apei_estatus_len(ghes->estatus);
+ if (len < sizeof(*ghes->estatus))
+ goto err_read_block;
+ if (len > ghes->generic->error_block_length)
+ goto err_read_block;
+ if (apei_estatus_check_header(ghes->estatus))
+ goto err_read_block;
+ rc = ghes_copy_tofrom_phys(ghes->estatus + 1,
+ buf_paddr + sizeof(*ghes->estatus),
+ len - sizeof(*ghes->estatus), 1);
+ if (rc)
+ return rc;
+ if (apei_estatus_check(ghes->estatus))
+ goto err_read_block;
+ rc = 0;
+
+err_read_block:
+ if (rc && !silent)
+ pr_warning(FW_WARN GHES_PFX
+ "Failed to read error status block!\n");
+ return rc;
+}
+
+static void ghes_clear_estatus(struct ghes *ghes)
+{
+ ghes->estatus->block_status = 0;
+ if (!(ghes->flags & GHES_TO_CLEAR))
+ return;
+ ghes_copy_tofrom_phys(ghes->estatus, ghes->buffer_paddr,
+ sizeof(ghes->estatus->block_status), 0);
+ ghes->flags &= ~GHES_TO_CLEAR;
+}
+
+static void ghes_do_proc(struct ghes *ghes)
+{
+ int ser, processed = 0;
+ struct acpi_hest_generic_data *gdata;
+
+ ser = ghes_severity(ghes->estatus->error_severity);
+ apei_estatus_for_each_section(ghes->estatus, gdata) {
+#ifdef CONFIG_X86_MCE
+ if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
+ CPER_SEC_PLATFORM_MEM)) {
+ apei_mce_report_mem_error(
+ ser == GHES_SER_CORRECTED,
+ (struct cper_sec_mem_err *)(gdata+1));
+ processed = 1;
+ }
+#endif
+ }
+
+ if (!processed && printk_ratelimit())
+ pr_warning(GHES_PFX
+ "Unknown error record from generic hardware error source: %d\n",
+ ghes->generic->header.source_id);
+}
+
+static int ghes_proc(struct ghes *ghes)
+{
+ int rc;
+
+ rc = ghes_read_estatus(ghes, 0);
+ if (rc)
+ goto out;
+ ghes_do_proc(ghes);
+
+out:
+ ghes_clear_estatus(ghes);
+ return 0;
+}
+
+static int ghes_notify_sci(struct notifier_block *this,
+ unsigned long event, void *data)
+{
+ struct ghes *ghes;
+ int ret = NOTIFY_DONE;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ghes, &ghes_sci, list) {
+ if (!ghes_proc(ghes))
+ ret = NOTIFY_OK;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static struct notifier_block ghes_notifier_sci = {
+ .notifier_call = ghes_notify_sci,
+};
+
+static int hest_ghes_parse(struct acpi_hest_header *hest_hdr, void *data)
+{
+ struct acpi_hest_generic *generic;
+ struct ghes *ghes = NULL;
+ int rc = 0;
+
+ if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR)
+ return 0;
+
+ generic = (struct acpi_hest_generic *)hest_hdr;
+ if (!generic->enabled)
+ return 0;
+
+ if (generic->error_block_length <
+ sizeof(struct acpi_hest_generic_status)) {
+ pr_warning(FW_BUG GHES_PFX
+"Invalid error block length: %u for generic hardware error source: %d\n",
+ generic->error_block_length,
+ generic->header.source_id);
+ goto err;
+ }
+ if (generic->records_to_preallocate == 0) {
+ pr_warning(FW_BUG GHES_PFX
+"Invalid records to preallocate: %u for generic hardware error source: %d\n",
+ generic->records_to_preallocate,
+ generic->header.source_id);
+ goto err;
+ }
+ ghes = ghes_new(generic);
+ if (IS_ERR(ghes)) {
+ rc = PTR_ERR(ghes);
+ ghes = NULL;
+ goto err;
+ }
+ switch (generic->notify.type) {
+ case ACPI_HEST_NOTIFY_POLLED:
+ pr_warning(GHES_PFX
+"Generic hardware error source: %d notified via POLL is not supported!\n",
+ generic->header.source_id);
+ break;
+ case ACPI_HEST_NOTIFY_EXTERNAL:
+ case ACPI_HEST_NOTIFY_LOCAL:
+ pr_warning(GHES_PFX
+"Generic hardware error source: %d notified via IRQ is not supported!\n",
+ generic->header.source_id);
+ break;
+ case ACPI_HEST_NOTIFY_SCI:
+ if (list_empty(&ghes_sci))
+ register_acpi_hed_notifier(&ghes_notifier_sci);
+ list_add_rcu(&ghes->list, &ghes_sci);
+ break;
+ case ACPI_HEST_NOTIFY_NMI:
+ pr_warning(GHES_PFX
+"Generic hardware error source: %d notified via NMI is not supported!\n",
+ generic->header.source_id);
+ break;
+ default:
+ pr_warning(FW_WARN GHES_PFX
+ "Unknown notification type: %u for generic hardware error source: %d\n",
+ generic->notify.type, generic->header.source_id);
+ break;
+ }
+
+ return 0;
+err:
+ if (ghes)
+ ghes_fini(ghes);
+ return rc;
+}
+
+static void ghes_cleanup(void)
+{
+ struct ghes *ghes, *nghes;
+
+ if (!list_empty(&ghes_sci))
+ unregister_acpi_hed_notifier(&ghes_notifier_sci);
+
+ synchronize_rcu();
+
+ list_for_each_entry_safe(ghes, nghes, &ghes_sci, list) {
+ list_del(&ghes->list);
+ ghes_fini(ghes);
+ kfree(ghes);
+ }
+}
+
+static int __init ghes_init(void)
+{
+ int rc;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ if (hest_disable) {
+ pr_info(GHES_PFX "HEST is not enabled!\n");
+ return -EINVAL;
+ }
+
+ rc = apei_hest_parse(hest_ghes_parse, NULL);
+ if (rc) {
+ pr_err(GHES_PFX
+ "Error during parsing HEST generic hardware error sources.\n");
+ goto err_cleanup;
+ }
+
+ if (list_empty(&ghes_sci)) {
+ pr_info(GHES_PFX
+ "No functional generic hardware error sources.\n");
+ rc = -ENODEV;
+ goto err_cleanup;
+ }
+
+ pr_info(GHES_PFX
+ "Generic Hardware Error Source support is initialized.\n");
+
+ return 0;
+err_cleanup:
+ ghes_cleanup();
+ return rc;
+}
+
+static void __exit ghes_exit(void)
+{
+ ghes_cleanup();
+}
+
+module_init(ghes_init);
+module_exit(ghes_exit);
+
+MODULE_AUTHOR("Huang Ying");
+MODULE_DESCRIPTION("APEI Generic Hardware Error Source support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
new file mode 100644
index 00000000000..e7f40d362cb
--- /dev/null
+++ b/drivers/acpi/apei/hest.c
@@ -0,0 +1,173 @@
+/*
+ * APEI Hardware Error Souce Table support
+ *
+ * HEST describes error sources in detail; communicates operational
+ * parameters (i.e. severity levels, masking bits, and threshold
+ * values) to Linux as necessary. It also allows the BIOS to report
+ * non-standard error sources to Linux (for example, chipset-specific
+ * error registers).
+ *
+ * For more information about HEST, please refer to ACPI Specification
+ * version 4.0, section 17.3.2.
+ *
+ * Copyright 2009 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/kdebug.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <acpi/apei.h>
+
+#include "apei-internal.h"
+
+#define HEST_PFX "HEST: "
+
+int hest_disable;
+EXPORT_SYMBOL_GPL(hest_disable);
+
+/* HEST table parsing */
+
+static struct acpi_table_hest *hest_tab;
+
+static int hest_void_parse(struct acpi_hest_header *hest_hdr, void *data)
+{
+ return 0;
+}
+
+static int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = {
+ [ACPI_HEST_TYPE_IA32_CHECK] = -1, /* need further calculation */
+ [ACPI_HEST_TYPE_IA32_CORRECTED_CHECK] = -1,
+ [ACPI_HEST_TYPE_IA32_NMI] = sizeof(struct acpi_hest_ia_nmi),
+ [ACPI_HEST_TYPE_AER_ROOT_PORT] = sizeof(struct acpi_hest_aer_root),
+ [ACPI_HEST_TYPE_AER_ENDPOINT] = sizeof(struct acpi_hest_aer),
+ [ACPI_HEST_TYPE_AER_BRIDGE] = sizeof(struct acpi_hest_aer_bridge),
+ [ACPI_HEST_TYPE_GENERIC_ERROR] = sizeof(struct acpi_hest_generic),
+};
+
+static int hest_esrc_len(struct acpi_hest_header *hest_hdr)
+{
+ u16 hest_type = hest_hdr->type;
+ int len;
+
+ if (hest_type >= ACPI_HEST_TYPE_RESERVED)
+ return 0;
+
+ len = hest_esrc_len_tab[hest_type];
+
+ if (hest_type == ACPI_HEST_TYPE_IA32_CORRECTED_CHECK) {
+ struct acpi_hest_ia_corrected *cmc;
+ cmc = (struct acpi_hest_ia_corrected *)hest_hdr;
+ len = sizeof(*cmc) + cmc->num_hardware_banks *
+ sizeof(struct acpi_hest_ia_error_bank);
+ } else if (hest_type == ACPI_HEST_TYPE_IA32_CHECK) {
+ struct acpi_hest_ia_machine_check *mc;
+ mc = (struct acpi_hest_ia_machine_check *)hest_hdr;
+ len = sizeof(*mc) + mc->num_hardware_banks *
+ sizeof(struct acpi_hest_ia_error_bank);
+ }
+ BUG_ON(len == -1);
+
+ return len;
+};
+
+int apei_hest_parse(apei_hest_func_t func, void *data)
+{
+ struct acpi_hest_header *hest_hdr;
+ int i, rc, len;
+
+ if (hest_disable)
+ return -EINVAL;
+
+ hest_hdr = (struct acpi_hest_header *)(hest_tab + 1);
+ for (i = 0; i < hest_tab->error_source_count; i++) {
+ len = hest_esrc_len(hest_hdr);
+ if (!len) {
+ pr_warning(FW_WARN HEST_PFX
+ "Unknown or unused hardware error source "
+ "type: %d for hardware error source: %d.\n",
+ hest_hdr->type, hest_hdr->source_id);
+ return -EINVAL;
+ }
+ if ((void *)hest_hdr + len >
+ (void *)hest_tab + hest_tab->header.length) {
+ pr_warning(FW_BUG HEST_PFX
+ "Table contents overflow for hardware error source: %d.\n",
+ hest_hdr->source_id);
+ return -EINVAL;
+ }
+
+ rc = func(hest_hdr, data);
+ if (rc)
+ return rc;
+
+ hest_hdr = (void *)hest_hdr + len;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_hest_parse);
+
+static int __init setup_hest_disable(char *str)
+{
+ hest_disable = 1;
+ return 0;
+}
+
+__setup("hest_disable", setup_hest_disable);
+
+static int __init hest_init(void)
+{
+ acpi_status status;
+ int rc = -ENODEV;
+
+ if (acpi_disabled)
+ goto err;
+
+ if (hest_disable) {
+ pr_info(HEST_PFX "HEST tabling parsing is disabled.\n");
+ goto err;
+ }
+
+ status = acpi_get_table(ACPI_SIG_HEST, 0,
+ (struct acpi_table_header **)&hest_tab);
+ if (status == AE_NOT_FOUND) {
+ pr_info(HEST_PFX "Table is not found!\n");
+ goto err;
+ } else if (ACPI_FAILURE(status)) {
+ const char *msg = acpi_format_exception(status);
+ pr_err(HEST_PFX "Failed to get table, %s\n", msg);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ rc = apei_hest_parse(hest_void_parse, NULL);
+ if (rc)
+ goto err;
+
+ pr_info(HEST_PFX "HEST table parsing is initialized.\n");
+
+ return 0;
+err:
+ hest_disable = 1;
+ return rc;
+}
+
+subsys_initcall(hest_init);
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
new file mode 100644
index 00000000000..814b1924961
--- /dev/null
+++ b/drivers/acpi/atomicio.c
@@ -0,0 +1,360 @@
+/*
+ * atomicio.c - ACPI IO memory pre-mapping/post-unmapping, then
+ * accessing in atomic context.
+ *
+ * This is used for NMI handler to access IO memory area, because
+ * ioremap/iounmap can not be used in NMI handler. The IO memory area
+ * is pre-mapped in process context and accessed in NMI handler.
+ *
+ * Copyright (C) 2009-2010, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/kref.h>
+#include <linux/rculist.h>
+#include <linux/interrupt.h>
+#include <acpi/atomicio.h>
+
+#define ACPI_PFX "ACPI: "
+
+static LIST_HEAD(acpi_iomaps);
+/*
+ * Used for mutual exclusion between writers of acpi_iomaps list, for
+ * synchronization between readers and writer, RCU is used.
+ */
+static DEFINE_SPINLOCK(acpi_iomaps_lock);
+
+struct acpi_iomap {
+ struct list_head list;
+ void __iomem *vaddr;
+ unsigned long size;
+ phys_addr_t paddr;
+ struct kref ref;
+};
+
+/* acpi_iomaps_lock or RCU read lock must be held before calling */
+static struct acpi_iomap *__acpi_find_iomap(phys_addr_t paddr,
+ unsigned long size)
+{
+ struct acpi_iomap *map;
+
+ list_for_each_entry_rcu(map, &acpi_iomaps, list) {
+ if (map->paddr + map->size >= paddr + size &&
+ map->paddr <= paddr)
+ return map;
+ }
+ return NULL;
+}
+
+/*
+ * Atomic "ioremap" used by NMI handler, if the specified IO memory
+ * area is not pre-mapped, NULL will be returned.
+ *
+ * acpi_iomaps_lock or RCU read lock must be held before calling
+ */
+static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr,
+ unsigned long size)
+{
+ struct acpi_iomap *map;
+
+ map = __acpi_find_iomap(paddr, size);
+ if (map)
+ return map->vaddr + (paddr - map->paddr);
+ else
+ return NULL;
+}
+
+/* acpi_iomaps_lock must be held before calling */
+static void __iomem *__acpi_try_ioremap(phys_addr_t paddr,
+ unsigned long size)
+{
+ struct acpi_iomap *map;
+
+ map = __acpi_find_iomap(paddr, size);
+ if (map) {
+ kref_get(&map->ref);
+ return map->vaddr + (paddr - map->paddr);
+ } else
+ return NULL;
+}
+
+/*
+ * Used to pre-map the specified IO memory area. First try to find
+ * whether the area is already pre-mapped, if it is, increase the
+ * reference count (in __acpi_try_ioremap) and return; otherwise, do
+ * the real ioremap, and add the mapping into acpi_iomaps list.
+ */
+static void __iomem *acpi_pre_map(phys_addr_t paddr,
+ unsigned long size)
+{
+ void __iomem *vaddr;
+ struct acpi_iomap *map;
+ unsigned long pg_sz, flags;
+ phys_addr_t pg_off;
+
+ spin_lock_irqsave(&acpi_iomaps_lock, flags);
+ vaddr = __acpi_try_ioremap(paddr, size);
+ spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
+ if (vaddr)
+ return vaddr;
+
+ pg_off = paddr & PAGE_MASK;
+ pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
+ vaddr = ioremap(pg_off, pg_sz);
+ if (!vaddr)
+ return NULL;
+ map = kmalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ goto err_unmap;
+ INIT_LIST_HEAD(&map->list);
+ map->paddr = pg_off;
+ map->size = pg_sz;
+ map->vaddr = vaddr;
+ kref_init(&map->ref);
+
+ spin_lock_irqsave(&acpi_iomaps_lock, flags);
+ vaddr = __acpi_try_ioremap(paddr, size);
+ if (vaddr) {
+ spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
+ iounmap(map->vaddr);
+ kfree(map);
+ return vaddr;
+ }
+ list_add_tail_rcu(&map->list, &acpi_iomaps);
+ spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
+
+ return vaddr + (paddr - pg_off);
+err_unmap:
+ iounmap(vaddr);
+ return NULL;
+}
+
+/* acpi_iomaps_lock must be held before calling */
+static void __acpi_kref_del_iomap(struct kref *ref)
+{
+ struct acpi_iomap *map;
+
+ map = container_of(ref, struct acpi_iomap, ref);
+ list_del_rcu(&map->list);
+}
+
+/*
+ * Used to post-unmap the specified IO memory area. The iounmap is
+ * done only if the reference count goes zero.
+ */
+static void acpi_post_unmap(phys_addr_t paddr, unsigned long size)
+{
+ struct acpi_iomap *map;
+ unsigned long flags;
+ int del;
+
+ spin_lock_irqsave(&acpi_iomaps_lock, flags);
+ map = __acpi_find_iomap(paddr, size);
+ BUG_ON(!map);
+ del = kref_put(&map->ref, __acpi_kref_del_iomap);
+ spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
+
+ if (!del)
+ return;
+
+ synchronize_rcu();
+ iounmap(map->vaddr);
+ kfree(map);
+}
+
+/* In NMI handler, should set silent = 1 */
+static int acpi_check_gar(struct acpi_generic_address *reg,
+ u64 *paddr, int silent)
+{
+ u32 width, space_id;
+
+ width = reg->bit_width;
+ space_id = reg->space_id;
+ /* Handle possible alignment issues */
+ memcpy(paddr, &reg->address, sizeof(*paddr));
+ if (!*paddr) {
+ if (!silent)
+ pr_warning(FW_BUG ACPI_PFX
+ "Invalid physical address in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
+ if (!silent)
+ pr_warning(FW_BUG ACPI_PFX
+ "Invalid bit width in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
+ space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
+ if (!silent)
+ pr_warning(FW_BUG ACPI_PFX
+ "Invalid address space type in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Pre-map, working on GAR */
+int acpi_pre_map_gar(struct acpi_generic_address *reg)
+{
+ u64 paddr;
+ void __iomem *vaddr;
+ int rc;
+
+ if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ return 0;
+
+ rc = acpi_check_gar(reg, &paddr, 0);
+ if (rc)
+ return rc;
+
+ vaddr = acpi_pre_map(paddr, reg->bit_width / 8);
+ if (!vaddr)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_pre_map_gar);
+
+/* Post-unmap, working on GAR */
+int acpi_post_unmap_gar(struct acpi_generic_address *reg)
+{
+ u64 paddr;
+ int rc;
+
+ if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ return 0;
+
+ rc = acpi_check_gar(reg, &paddr, 0);
+ if (rc)
+ return rc;
+
+ acpi_post_unmap(paddr, reg->bit_width / 8);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
+
+/*
+ * Can be used in atomic (including NMI) or process context. RCU read
+ * lock can only be released after the IO memory area accessing.
+ */
+static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
+{
+ void __iomem *addr;
+
+ rcu_read_lock();
+ addr = __acpi_ioremap_fast(paddr, width);
+ switch (width) {
+ case 8:
+ *val = readb(addr);
+ break;
+ case 16:
+ *val = readw(addr);
+ break;
+ case 32:
+ *val = readl(addr);
+ break;
+ case 64:
+ *val = readq(addr);
+ break;
+ default:
+ return -EINVAL;
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
+{
+ void __iomem *addr;
+
+ rcu_read_lock();
+ addr = __acpi_ioremap_fast(paddr, width);
+ switch (width) {
+ case 8:
+ writeb(val, addr);
+ break;
+ case 16:
+ writew(val, addr);
+ break;
+ case 32:
+ writel(val, addr);
+ break;
+ case 64:
+ writeq(val, addr);
+ break;
+ default:
+ return -EINVAL;
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+/* GAR accessing in atomic (including NMI) or process context */
+int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg)
+{
+ u64 paddr;
+ int rc;
+
+ rc = acpi_check_gar(reg, &paddr, 1);
+ if (rc)
+ return rc;
+
+ *val = 0;
+ switch (reg->space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ return acpi_atomic_read_mem(paddr, val, reg->bit_width);
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ return acpi_os_read_port(paddr, (u32 *)val, reg->bit_width);
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(acpi_atomic_read);
+
+int acpi_atomic_write(u64 val, struct acpi_generic_address *reg)
+{
+ u64 paddr;
+ int rc;
+
+ rc = acpi_check_gar(reg, &paddr, 1);
+ if (rc)
+ return rc;
+
+ switch (reg->space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ return acpi_atomic_write_mem(paddr, val, reg->bit_width);
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ return acpi_os_write_port(paddr, val, reg->bit_width);
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(acpi_atomic_write);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 743576bf1bd..c1d23cd7165 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -69,6 +69,44 @@ static struct dmi_system_id __cpuinitdata power_nocheck_dmi_table[] = {
};
+#ifdef CONFIG_X86
+static int set_copy_dsdt(const struct dmi_system_id *id)
+{
+ printk(KERN_NOTICE "%s detected - "
+ "force copy of DSDT to local memory\n", id->ident);
+ acpi_gbl_copy_dsdt_locally = 1;
+ return 0;
+}
+
+static struct dmi_system_id dsdt_dmi_table[] __initdata = {
+ /*
+ * Insyde BIOS on some TOSHIBA machines corrupt the DSDT.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=14679
+ */
+ {
+ .callback = set_copy_dsdt,
+ .ident = "TOSHIBA Satellite A505",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A505"),
+ },
+ },
+ {
+ .callback = set_copy_dsdt,
+ .ident = "TOSHIBA Satellite L505D",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L505D"),
+ },
+ },
+ {}
+};
+#else
+static struct dmi_system_id dsdt_dmi_table[] __initdata = {
+ {}
+};
+#endif
+
/* --------------------------------------------------------------------------
Device Management
-------------------------------------------------------------------------- */
@@ -363,11 +401,6 @@ static void acpi_print_osc_error(acpi_handle handle,
printk("\n");
}
-static u8 hex_val(unsigned char c)
-{
- return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
-}
-
static acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
{
int i;
@@ -384,8 +417,8 @@ static acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
return AE_BAD_PARAMETER;
}
for (i = 0; i < 16; i++) {
- uuid[i] = hex_val(str[opc_map_to_uuid[i]]) << 4;
- uuid[i] |= hex_val(str[opc_map_to_uuid[i] + 1]);
+ uuid[i] = hex_to_bin(str[opc_map_to_uuid[i]]) << 4;
+ uuid[i] |= hex_to_bin(str[opc_map_to_uuid[i] + 1]);
}
return AE_OK;
}
@@ -813,6 +846,12 @@ void __init acpi_early_init(void)
acpi_gbl_permanent_mmap = 1;
+ /*
+ * If the machine falls into the DMI check table,
+ * DSDT will be copied to memory
+ */
+ dmi_check_system(dsdt_dmi_table);
+
status = acpi_reallocate_root_table();
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 3f01f065b53..5f2027d782e 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1037,10 +1037,9 @@ int __init acpi_ec_ecdt_probe(void)
/* Don't trust ECDT, which comes from ASUSTek */
if (!EC_FLAGS_VALIDATE_ECDT)
goto install;
- saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL);
+ saved_ec = kmemdup(boot_ec, sizeof(struct acpi_ec), GFP_KERNEL);
if (!saved_ec)
return -ENOMEM;
- memcpy(saved_ec, boot_ec, sizeof(struct acpi_ec));
/* fall through */
}
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c
new file mode 100644
index 00000000000..d0c1967f759
--- /dev/null
+++ b/drivers/acpi/hed.c
@@ -0,0 +1,112 @@
+/*
+ * ACPI Hardware Error Device (PNP0C33) Driver
+ *
+ * Copyright (C) 2010, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * ACPI Hardware Error Device is used to report some hardware errors
+ * notified via SCI, mainly the corrected errors.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/hed.h>
+
+static struct acpi_device_id acpi_hed_ids[] = {
+ {"PNP0C33", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, acpi_hed_ids);
+
+static acpi_handle hed_handle;
+
+static BLOCKING_NOTIFIER_HEAD(acpi_hed_notify_list);
+
+int register_acpi_hed_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&acpi_hed_notify_list, nb);
+}
+EXPORT_SYMBOL_GPL(register_acpi_hed_notifier);
+
+void unregister_acpi_hed_notifier(struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&acpi_hed_notify_list, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_acpi_hed_notifier);
+
+/*
+ * SCI to report hardware error is forwarded to the listeners of HED,
+ * it is used by HEST Generic Hardware Error Source with notify type
+ * SCI.
+ */
+static void acpi_hed_notify(struct acpi_device *device, u32 event)
+{
+ blocking_notifier_call_chain(&acpi_hed_notify_list, 0, NULL);
+}
+
+static int __devinit acpi_hed_add(struct acpi_device *device)
+{
+ /* Only one hardware error device */
+ if (hed_handle)
+ return -EINVAL;
+ hed_handle = device->handle;
+ return 0;
+}
+
+static int __devexit acpi_hed_remove(struct acpi_device *device, int type)
+{
+ hed_handle = NULL;
+ return 0;
+}
+
+static struct acpi_driver acpi_hed_driver = {
+ .name = "hardware_error_device",
+ .class = "hardware_error",
+ .ids = acpi_hed_ids,
+ .ops = {
+ .add = acpi_hed_add,
+ .remove = acpi_hed_remove,
+ .notify = acpi_hed_notify,
+ },
+};
+
+static int __init acpi_hed_init(void)
+{
+ if (acpi_disabled)
+ return -ENODEV;
+
+ if (acpi_bus_register_driver(&acpi_hed_driver) < 0)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void __exit acpi_hed_exit(void)
+{
+ acpi_bus_unregister_driver(&acpi_hed_driver);
+}
+
+module_init(acpi_hed_init);
+module_exit(acpi_hed_exit);
+
+ACPI_MODULE_NAME("hed");
+MODULE_AUTHOR("Huang Ying");
+MODULE_DESCRIPTION("ACPI Hardware Error Device Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/hest.c b/drivers/acpi/hest.c
deleted file mode 100644
index 1c527a19287..00000000000
--- a/drivers/acpi/hest.c
+++ /dev/null
@@ -1,139 +0,0 @@
-#include <linux/acpi.h>
-#include <linux/pci.h>
-
-#define PREFIX "ACPI: "
-
-static inline unsigned long parse_acpi_hest_ia_machine_check(struct acpi_hest_ia_machine_check *p)
-{
- return sizeof(*p) +
- (sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks);
-}
-
-static inline unsigned long parse_acpi_hest_ia_corrected(struct acpi_hest_ia_corrected *p)
-{
- return sizeof(*p) +
- (sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks);
-}
-
-static inline unsigned long parse_acpi_hest_ia_nmi(struct acpi_hest_ia_nmi *p)
-{
- return sizeof(*p);
-}
-
-static inline unsigned long parse_acpi_hest_generic(struct acpi_hest_generic *p)
-{
- return sizeof(*p);
-}
-
-static inline unsigned int hest_match_pci(struct acpi_hest_aer_common *p, struct pci_dev *pci)
-{
- return (0 == pci_domain_nr(pci->bus) &&
- p->bus == pci->bus->number &&
- p->device == PCI_SLOT(pci->devfn) &&
- p->function == PCI_FUNC(pci->devfn));
-}
-
-static unsigned long parse_acpi_hest_aer(void *hdr, int type, struct pci_dev *pci, int *firmware_first)
-{
- struct acpi_hest_aer_common *p = hdr + sizeof(struct acpi_hest_header);
- unsigned long rc=0;
- u8 pcie_type = 0;
- u8 bridge = 0;
- switch (type) {
- case ACPI_HEST_TYPE_AER_ROOT_PORT:
- rc = sizeof(struct acpi_hest_aer_root);
- pcie_type = PCI_EXP_TYPE_ROOT_PORT;
- break;
- case ACPI_HEST_TYPE_AER_ENDPOINT:
- rc = sizeof(struct acpi_hest_aer);
- pcie_type = PCI_EXP_TYPE_ENDPOINT;
- break;
- case ACPI_HEST_TYPE_AER_BRIDGE:
- rc = sizeof(struct acpi_hest_aer_bridge);
- if ((pci->class >> 16) == PCI_BASE_CLASS_BRIDGE)
- bridge = 1;
- break;
- }
-
- if (p->flags & ACPI_HEST_GLOBAL) {
- if ((pci->is_pcie && (pci->pcie_type == pcie_type)) || bridge)
- *firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
- }
- else
- if (hest_match_pci(p, pci))
- *firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
- return rc;
-}
-
-static int acpi_hest_firmware_first(struct acpi_table_header *stdheader, struct pci_dev *pci)
-{
- struct acpi_table_hest *hest = (struct acpi_table_hest *)stdheader;
- void *p = (void *)hest + sizeof(*hest); /* defined by the ACPI 4.0 spec */
- struct acpi_hest_header *hdr = p;
-
- int i;
- int firmware_first = 0;
- static unsigned char printed_unused = 0;
- static unsigned char printed_reserved = 0;
-
- for (i=0, hdr=p; p < (((void *)hest) + hest->header.length) && i < hest->error_source_count; i++) {
- switch (hdr->type) {
- case ACPI_HEST_TYPE_IA32_CHECK:
- p += parse_acpi_hest_ia_machine_check(p);
- break;
- case ACPI_HEST_TYPE_IA32_CORRECTED_CHECK:
- p += parse_acpi_hest_ia_corrected(p);
- break;
- case ACPI_HEST_TYPE_IA32_NMI:
- p += parse_acpi_hest_ia_nmi(p);
- break;
- /* These three should never appear */
- case ACPI_HEST_TYPE_NOT_USED3:
- case ACPI_HEST_TYPE_NOT_USED4:
- case ACPI_HEST_TYPE_NOT_USED5:
- if (!printed_unused) {
- printk(KERN_DEBUG PREFIX
- "HEST Error Source list contains an obsolete type (%d).\n", hdr->type);
- printed_unused = 1;
- }
- break;
- case ACPI_HEST_TYPE_AER_ROOT_PORT:
- case ACPI_HEST_TYPE_AER_ENDPOINT:
- case ACPI_HEST_TYPE_AER_BRIDGE:
- p += parse_acpi_hest_aer(p, hdr->type, pci, &firmware_first);
- break;
- case ACPI_HEST_TYPE_GENERIC_ERROR:
- p += parse_acpi_hest_generic(p);
- break;
- /* These should never appear either */
- case ACPI_HEST_TYPE_RESERVED:
- default:
- if (!printed_reserved) {
- printk(KERN_DEBUG PREFIX
- "HEST Error Source list contains a reserved type (%d).\n", hdr->type);
- printed_reserved = 1;
- }
- break;
- }
- }
- return firmware_first;
-}
-
-int acpi_hest_firmware_first_pci(struct pci_dev *pci)
-{
- acpi_status status = AE_NOT_FOUND;
- struct acpi_table_header *hest = NULL;
-
- if (acpi_disabled)
- return 0;
-
- status = acpi_get_table(ACPI_SIG_HEST, 1, &hest);
-
- if (ACPI_SUCCESS(status)) {
- if (acpi_hest_firmware_first(hest, pci)) {
- return 1;
- }
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(acpi_hest_firmware_first_pci);
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 7594f65800c..78418ce4fc7 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -1207,6 +1207,15 @@ int acpi_check_mem_region(resource_size_t start, resource_size_t n,
EXPORT_SYMBOL(acpi_check_mem_region);
/*
+ * Let drivers know whether the resource checks are effective
+ */
+int acpi_resources_are_enforced(void)
+{
+ return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
+}
+EXPORT_SYMBOL(acpi_resources_are_enforced);
+
+/*
* Acquire a spinlock.
*
* handle is a pointer to the spinlock_t.
@@ -1406,7 +1415,7 @@ acpi_os_invalidate_address(
switch (space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
- /* Only interference checks against SystemIO and SytemMemory
+ /* Only interference checks against SystemIO and SystemMemory
are needed */
res.start = address;
res.end = address + length - 1;
@@ -1458,7 +1467,7 @@ acpi_os_validate_address (
switch (space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
- /* Only interference checks against SystemIO and SytemMemory
+ /* Only interference checks against SystemIO and SystemMemory
are needed */
res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
if (!res)
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index b0a71ecee68..e4804fb05e2 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -401,11 +401,13 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
* driver reported one, then use it. Exit in any case.
*/
if (gsi < 0) {
+ u32 dev_gsi;
dev_warn(&dev->dev, "PCI INT %c: no GSI", pin_name(pin));
/* Interrupt Line values above 0xF are forbidden */
- if (dev->irq > 0 && (dev->irq <= 0xF)) {
- printk(" - using IRQ %d\n", dev->irq);
- acpi_register_gsi(&dev->dev, dev->irq,
+ if (dev->irq > 0 && (dev->irq <= 0xF) &&
+ (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) {
+ printk(" - using ISA IRQ %d\n", dev->irq);
+ acpi_register_gsi(&dev->dev, dev_gsi,
ACPI_LEVEL_SENSITIVE,
ACPI_ACTIVE_LOW);
return 0;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index aefce33f2a0..4eac59393ed 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -120,7 +120,8 @@ acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus)
struct acpi_pci_root *root;
list_for_each_entry(root, &acpi_pci_roots, node)
- if ((root->segment == (u16) seg) && (root->bus_nr == (u16) bus))
+ if ((root->segment == (u16) seg) &&
+ (root->secondary.start == (u16) bus))
return root->device->handle;
return NULL;
}
@@ -154,7 +155,7 @@ EXPORT_SYMBOL_GPL(acpi_is_root_bridge);
static acpi_status
get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
{
- int *busnr = data;
+ struct resource *res = data;
struct acpi_resource_address64 address;
if (resource->type != ACPI_RESOURCE_TYPE_ADDRESS16 &&
@@ -164,28 +165,27 @@ get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
acpi_resource_to_address64(resource, &address);
if ((address.address_length > 0) &&
- (address.resource_type == ACPI_BUS_NUMBER_RANGE))
- *busnr = address.minimum;
+ (address.resource_type == ACPI_BUS_NUMBER_RANGE)) {
+ res->start = address.minimum;
+ res->end = address.minimum + address.address_length - 1;
+ }
return AE_OK;
}
static acpi_status try_get_root_bridge_busnr(acpi_handle handle,
- unsigned long long *bus)
+ struct resource *res)
{
acpi_status status;
- int busnum;
- busnum = -1;
+ res->start = -1;
status =
acpi_walk_resources(handle, METHOD_NAME__CRS,
- get_root_bridge_busnr_callback, &busnum);
+ get_root_bridge_busnr_callback, res);
if (ACPI_FAILURE(status))
return status;
- /* Check if we really get a bus number from _CRS */
- if (busnum == -1)
+ if (res->start == -1)
return AE_ERROR;
- *bus = busnum;
return AE_OK;
}
@@ -429,34 +429,47 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
struct acpi_device *child;
u32 flags, base_flags;
+ root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
+ if (!root)
+ return -ENOMEM;
+
segment = 0;
status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL,
&segment);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
printk(KERN_ERR PREFIX "can't evaluate _SEG\n");
- return -ENODEV;
+ result = -ENODEV;
+ goto end;
}
/* Check _CRS first, then _BBN. If no _BBN, default to zero. */
- bus = 0;
- status = try_get_root_bridge_busnr(device->handle, &bus);
+ root->secondary.flags = IORESOURCE_BUS;
+ status = try_get_root_bridge_busnr(device->handle, &root->secondary);
if (ACPI_FAILURE(status)) {
+ /*
+ * We need both the start and end of the downstream bus range
+ * to interpret _CBA (MMCONFIG base address), so it really is
+ * supposed to be in _CRS. If we don't find it there, all we
+ * can do is assume [_BBN-0xFF] or [0-0xFF].
+ */
+ root->secondary.end = 0xFF;
+ printk(KERN_WARNING FW_BUG PREFIX
+ "no secondary bus range in _CRS\n");
status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, &bus);
- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- printk(KERN_ERR PREFIX
- "no bus number in _CRS and can't evaluate _BBN\n");
- return -ENODEV;
+ if (ACPI_SUCCESS(status))
+ root->secondary.start = bus;
+ else if (status == AE_NOT_FOUND)
+ root->secondary.start = 0;
+ else {
+ printk(KERN_ERR PREFIX "can't evaluate _BBN\n");
+ result = -ENODEV;
+ goto end;
}
}
- root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
- if (!root)
- return -ENOMEM;
-
INIT_LIST_HEAD(&root->node);
root->device = device;
root->segment = segment & 0xFFFF;
- root->bus_nr = bus & 0xFF;
strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
device->driver_data = root;
@@ -475,9 +488,9 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
/* TBD: Locking */
list_add_tail(&root->node, &acpi_pci_roots);
- printk(KERN_INFO PREFIX "%s [%s] (%04x:%02x)\n",
+ printk(KERN_INFO PREFIX "%s [%s] (domain %04x %pR)\n",
acpi_device_name(device), acpi_device_bid(device),
- root->segment, root->bus_nr);
+ root->segment, &root->secondary);
/*
* Scan the Root Bridge
@@ -486,11 +499,11 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
* PCI namespace does not get created until this call is made (and
* thus the root bridge's pci_dev does not exist).
*/
- root->bus = pci_acpi_scan_root(device, segment, bus);
+ root->bus = pci_acpi_scan_root(root);
if (!root->bus) {
printk(KERN_ERR PREFIX
"Bus %04x:%02x not present in PCI namespace\n",
- root->segment, root->bus_nr);
+ root->segment, (unsigned int)root->secondary.start);
result = -ENODEV;
goto end;
}
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index ddc76787b84..f74d3b31e5c 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -172,7 +172,6 @@ static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
return -EINVAL;
/* The state of the list is 'on' IFF all resources are 'on'. */
- /* */
for (i = 0; i < list->count; i++) {
/*
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 5675d9747e8..b1034a9ada4 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -616,7 +616,8 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
acpi_processor_get_limit_info(pr);
- acpi_processor_power_init(pr, device);
+ if (cpuidle_get_driver() == &acpi_idle_driver)
+ acpi_processor_power_init(pr, device);
pr->cdev = thermal_cooling_device_register("Processor", device,
&processor_cooling_ops);
@@ -920,9 +921,14 @@ static int __init acpi_processor_init(void)
if (!acpi_processor_dir)
return -ENOMEM;
#endif
- result = cpuidle_register_driver(&acpi_idle_driver);
- if (result < 0)
- goto out_proc;
+
+ if (!cpuidle_register_driver(&acpi_idle_driver)) {
+ printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
+ acpi_idle_driver.name);
+ } else {
+ printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s",
+ cpuidle_get_driver()->name);
+ }
result = acpi_bus_register_driver(&acpi_processor_driver);
if (result < 0)
@@ -941,7 +947,6 @@ static int __init acpi_processor_init(void)
out_cpuidle:
cpuidle_unregister_driver(&acpi_idle_driver);
-out_proc:
#ifdef CONFIG_ACPI_PROCFS
remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
#endif
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 5939e7f7d8e..2e8c27d48f2 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -698,7 +698,7 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
"max_cstate: C%d\n"
"maximum allowed latency: %d usec\n",
pr->power.state ? pr->power.state - pr->power.states : 0,
- max_cstate, pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY));
+ max_cstate, pm_qos_request(PM_QOS_CPU_DMA_LATENCY));
seq_puts(seq, "states:\n");
@@ -727,19 +727,9 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
break;
}
- if (pr->power.states[i].promotion.state)
- seq_printf(seq, "promotion[C%zd] ",
- (pr->power.states[i].promotion.state -
- pr->power.states));
- else
- seq_puts(seq, "promotion[--] ");
+ seq_puts(seq, "promotion[--] ");
- if (pr->power.states[i].demotion.state)
- seq_printf(seq, "demotion[C%zd] ",
- (pr->power.states[i].demotion.state -
- pr->power.states));
- else
- seq_puts(seq, "demotion[--] ");
+ seq_puts(seq, "demotion[--] ");
seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
pr->power.states[i].latency,
@@ -869,6 +859,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
struct acpi_processor *pr;
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
ktime_t kt1, kt2;
+ s64 idle_time_ns;
s64 idle_time;
s64 sleep_ticks = 0;
@@ -881,6 +872,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
return(acpi_idle_enter_c1(dev, state));
local_irq_disable();
+
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
/*
@@ -888,12 +880,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
* NEED_RESCHED:
*/
smp_mb();
- }
- if (unlikely(need_resched())) {
- current_thread_info()->status |= TS_POLLING;
- local_irq_enable();
- return 0;
+ if (unlikely(need_resched())) {
+ current_thread_info()->status |= TS_POLLING;
+ local_irq_enable();
+ return 0;
+ }
}
/*
@@ -910,15 +902,18 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
sched_clock_idle_sleep_event();
acpi_idle_do_entry(cx);
kt2 = ktime_get_real();
- idle_time = ktime_to_us(ktime_sub(kt2, kt1));
+ idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
+ idle_time = idle_time_ns;
+ do_div(idle_time, NSEC_PER_USEC);
sleep_ticks = us_to_pm_timer_ticks(idle_time);
/* Tell the scheduler how much we idled: */
- sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
+ sched_clock_idle_wakeup_event(idle_time_ns);
local_irq_enable();
- current_thread_info()->status |= TS_POLLING;
+ if (cx->entry_method != ACPI_CSTATE_FFH)
+ current_thread_info()->status |= TS_POLLING;
cx->usage++;
@@ -943,6 +938,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
struct acpi_processor *pr;
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
ktime_t kt1, kt2;
+ s64 idle_time_ns;
s64 idle_time;
s64 sleep_ticks = 0;
@@ -968,6 +964,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
}
local_irq_disable();
+
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
/*
@@ -975,12 +972,12 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
* NEED_RESCHED:
*/
smp_mb();
- }
- if (unlikely(need_resched())) {
- current_thread_info()->status |= TS_POLLING;
- local_irq_enable();
- return 0;
+ if (unlikely(need_resched())) {
+ current_thread_info()->status |= TS_POLLING;
+ local_irq_enable();
+ return 0;
+ }
}
acpi_unlazy_tlb(smp_processor_id());
@@ -1025,14 +1022,17 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
spin_unlock(&c3_lock);
}
kt2 = ktime_get_real();
- idle_time = ktime_to_us(ktime_sub(kt2, kt1));
+ idle_time_ns = ktime_to_us(ktime_sub(kt2, kt1));
+ idle_time = idle_time_ns;
+ do_div(idle_time, NSEC_PER_USEC);
sleep_ticks = us_to_pm_timer_ticks(idle_time);
/* Tell the scheduler how much we idled: */
- sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
+ sched_clock_idle_wakeup_event(idle_time_ns);
local_irq_enable();
- current_thread_info()->status |= TS_POLLING;
+ if (cx->entry_method != ACPI_CSTATE_FFH)
+ current_thread_info()->status |= TS_POLLING;
cx->usage++;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 0338f513a01..7f2e051ed4f 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -765,7 +765,7 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
}
status = acpi_get_gpe_status(NULL, device->wakeup.gpe_number,
- ACPI_NOT_ISR, &event_status);
+ &event_status);
if (status == AE_OK)
device->wakeup.flags.run_wake =
!!(event_status & ACPI_EVENT_FLAG_HANDLE);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 504a55edac4..3fb4bdea7e0 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -80,22 +80,6 @@ static int acpi_sleep_prepare(u32 acpi_state)
#ifdef CONFIG_ACPI_SLEEP
static u32 acpi_target_sleep_state = ACPI_STATE_S0;
-/*
- * According to the ACPI specification the BIOS should make sure that ACPI is
- * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still,
- * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI
- * on such systems during resume. Unfortunately that doesn't help in
- * particularly pathological cases in which SCI_EN has to be set directly on
- * resume, although the specification states very clearly that this flag is
- * owned by the hardware. The set_sci_en_on_resume variable will be set in such
- * cases.
- */
-static bool set_sci_en_on_resume;
-
-void __init acpi_set_sci_en_on_resume(void)
-{
- set_sci_en_on_resume = true;
-}
/*
* ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
@@ -256,11 +240,8 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
break;
}
- /* If ACPI is not enabled by the BIOS, we need to enable it here. */
- if (set_sci_en_on_resume)
- acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
- else
- acpi_enable();
+ /* This violates the spec but is required for bug compatibility. */
+ acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
/* Reprogram control registers and execute _BFS */
acpi_leave_sleep_state_prep(acpi_state);
@@ -357,12 +338,6 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
return 0;
}
-static int __init init_set_sci_en_on_resume(const struct dmi_system_id *d)
-{
- set_sci_en_on_resume = true;
- return 0;
-}
-
static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
{
.callback = init_old_suspend_ordering,
@@ -381,22 +356,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
},
},
{
- .callback = init_set_sci_en_on_resume,
- .ident = "Apple MacBook 1,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Apple MacMini 1,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
- },
- },
- {
.callback = init_old_suspend_ordering,
.ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
.matches = {
@@ -405,94 +364,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
},
},
{
- .callback = init_set_sci_en_on_resume,
- .ident = "Toshiba Satellite L300",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard HP G7000 Notebook PC",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP G7000 Notebook PC"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard HP Pavilion dv3 Notebook PC",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv3 Notebook PC"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard Pavilion dv4",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard Pavilion dv7",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv7"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard Compaq Presario C700 Notebook PC",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario C700 Notebook PC"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard Compaq Presario CQ40 Notebook PC",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario CQ40 Notebook PC"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad T410",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad T510",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad W510",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad X201[s]",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
- },
- },
- {
.callback = init_old_suspend_ordering,
.ident = "Panasonic CF51-2L",
.matches = {
@@ -501,30 +372,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
},
},
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Dell Studio 1558",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1558"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Dell Studio 1557",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1557"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Dell Studio 1555",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1555"),
- },
- },
{},
};
#endif /* CONFIG_SUSPEND */
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index 8a8f3b3382a..25b8bd14928 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -1,6 +1,6 @@
extern u8 sleep_states[];
-extern int acpi_suspend (u32 state);
+extern int acpi_suspend(u32 state);
extern void acpi_enable_wakeup_device_prep(u8 sleep_state);
extern void acpi_enable_wakeup_device(u8 sleep_state);
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index 4aaf2497613..c79e789ed03 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -71,7 +71,7 @@ struct acpi_table_attr {
struct list_head node;
};
-static ssize_t acpi_table_show(struct kobject *kobj,
+static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
{
@@ -303,8 +303,7 @@ static int get_status(u32 index, acpi_event_status *status, acpi_handle *handle)
"Invalid GPE 0x%x\n", index));
goto end;
}
- result = acpi_get_gpe_status(*handle, index,
- ACPI_NOT_ISR, status);
+ result = acpi_get_gpe_status(*handle, index, status);
} else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS))
result = acpi_get_event_status(index - num_gpes, status);
@@ -395,7 +394,7 @@ static ssize_t counter_set(struct kobject *kobj,
result = acpi_set_gpe(handle, index, ACPI_GPE_ENABLE);
else if (!strcmp(buf, "clear\n") &&
(status & ACPI_EVENT_FLAG_SET))
- result = acpi_clear_gpe(handle, index, ACPI_NOT_ISR);
+ result = acpi_clear_gpe(handle, index);
else
all_counters[index].count = strtoul(buf, NULL, 0);
} else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 8a0ed2800e6..f336bca7c45 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -213,7 +213,7 @@ acpi_table_parse_entries(char *id,
unsigned long table_end;
acpi_size tbl_size;
- if (acpi_disabled && !acpi_ht)
+ if (acpi_disabled)
return -ENODEV;
if (!handler)
@@ -280,7 +280,7 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler)
struct acpi_table_header *table = NULL;
acpi_size tbl_size;
- if (acpi_disabled && !acpi_ht)
+ if (acpi_disabled)
return -ENODEV;
if (!handler)
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index a0c93b32148..9865d46f49a 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -45,6 +45,7 @@
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <linux/suspend.h>
+#include <acpi/video.h>
#define PREFIX "ACPI: "
@@ -65,11 +66,6 @@
#define MAX_NAME_LEN 20
-#define ACPI_VIDEO_DISPLAY_CRT 1
-#define ACPI_VIDEO_DISPLAY_TV 2
-#define ACPI_VIDEO_DISPLAY_DVI 3
-#define ACPI_VIDEO_DISPLAY_LCD 4
-
#define _COMPONENT ACPI_VIDEO_COMPONENT
ACPI_MODULE_NAME("video");
@@ -1007,11 +1003,11 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
result = acpi_video_init_brightness(device);
if (result)
return;
- name = kzalloc(MAX_NAME_LEN, GFP_KERNEL);
+ name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
if (!name)
return;
+ count++;
- sprintf(name, "acpi_video%d", count++);
memset(&props, 0, sizeof(struct backlight_properties));
props.max_brightness = device->brightness->count - 3;
device->backlight = backlight_device_register(name, NULL, device,
@@ -1067,10 +1063,10 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
if (device->cap._DCS && device->cap._DSS) {
static int count;
char *name;
- name = kzalloc(MAX_NAME_LEN, GFP_KERNEL);
+ name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
if (!name)
return;
- sprintf(name, "acpi_video%d", count++);
+ count++;
device->output_dev = video_output_register(name,
NULL, device, &acpi_output_properties);
kfree(name);
@@ -1748,11 +1744,27 @@ acpi_video_get_device_attr(struct acpi_video_bus *video, unsigned long device_id
}
static int
+acpi_video_get_device_type(struct acpi_video_bus *video,
+ unsigned long device_id)
+{
+ struct acpi_video_enumerated_device *ids;
+ int i;
+
+ for (i = 0; i < video->attached_count; i++) {
+ ids = &video->attached_array[i];
+ if ((ids->value.int_val & 0xffff) == device_id)
+ return ids->value.int_val;
+ }
+
+ return 0;
+}
+
+static int
acpi_video_bus_get_one_device(struct acpi_device *device,
struct acpi_video_bus *video)
{
unsigned long long device_id;
- int status;
+ int status, device_type;
struct acpi_video_device *data;
struct acpi_video_device_attrib* attribute;
@@ -1797,8 +1809,25 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
}
if(attribute->bios_can_detect)
data->flags.bios = 1;
- } else
- data->flags.unknown = 1;
+ } else {
+ /* Check for legacy IDs */
+ device_type = acpi_video_get_device_type(video,
+ device_id);
+ /* Ignore bits 16 and 18-20 */
+ switch (device_type & 0xffe2ffff) {
+ case ACPI_VIDEO_DISPLAY_LEGACY_MONITOR:
+ data->flags.crt = 1;
+ break;
+ case ACPI_VIDEO_DISPLAY_LEGACY_PANEL:
+ data->flags.lcd = 1;
+ break;
+ case ACPI_VIDEO_DISPLAY_LEGACY_TV:
+ data->flags.tvout = 1;
+ break;
+ default:
+ data->flags.unknown = 1;
+ }
+ }
acpi_video_device_bind(video, data);
acpi_video_device_find_cap(data);
@@ -2032,6 +2061,71 @@ out:
return result;
}
+int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
+ void **edid)
+{
+ struct acpi_video_bus *video;
+ struct acpi_video_device *video_device;
+ union acpi_object *buffer = NULL;
+ acpi_status status;
+ int i, length;
+
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+
+ video = acpi_driver_data(device);
+
+ for (i = 0; i < video->attached_count; i++) {
+ video_device = video->attached_array[i].bind_info;
+ length = 256;
+
+ if (!video_device)
+ continue;
+
+ if (type) {
+ switch (type) {
+ case ACPI_VIDEO_DISPLAY_CRT:
+ if (!video_device->flags.crt)
+ continue;
+ break;
+ case ACPI_VIDEO_DISPLAY_TV:
+ if (!video_device->flags.tvout)
+ continue;
+ break;
+ case ACPI_VIDEO_DISPLAY_DVI:
+ if (!video_device->flags.dvi)
+ continue;
+ break;
+ case ACPI_VIDEO_DISPLAY_LCD:
+ if (!video_device->flags.lcd)
+ continue;
+ break;
+ }
+ } else if (video_device->device_id != device_id) {
+ continue;
+ }
+
+ status = acpi_video_device_EDID(video_device, &buffer, length);
+
+ if (ACPI_FAILURE(status) || !buffer ||
+ buffer->type != ACPI_TYPE_BUFFER) {
+ length = 128;
+ status = acpi_video_device_EDID(video_device, &buffer,
+ length);
+ if (ACPI_FAILURE(status) || !buffer ||
+ buffer->type != ACPI_TYPE_BUFFER) {
+ continue;
+ }
+ }
+
+ *edid = buffer->buffer.pointer;
+ return length;
+ }
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(acpi_video_get_edid);
+
static int
acpi_video_bus_get_devices(struct acpi_video_bus *video,
struct acpi_device *device)
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index fc2f26b9b40..c5fef01b3c9 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -250,7 +250,7 @@ static int __init acpi_backlight(char *str)
ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR;
if (!strcmp("video", str))
acpi_video_support |=
- ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO;
+ ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO;
}
return 1;
}
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 01c52c415bd..73f883333a0 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -57,6 +57,8 @@ config SATA_PMP
This option adds support for SATA Port Multipliers
(the SATA version of an ethernet hub, or SAS expander).
+comment "Controllers with non-SFF native interface"
+
config SATA_AHCI
tristate "AHCI SATA support"
depends on PCI
@@ -65,11 +67,11 @@ config SATA_AHCI
If unsure, say N.
-config SATA_SIL24
- tristate "Silicon Image 3124/3132 SATA support"
- depends on PCI
+config SATA_AHCI_PLATFORM
+ tristate "Platform AHCI SATA support"
help
- This option enables support for Silicon Image 3124/3132 Serial ATA.
+ This option enables support for Platform AHCI Serial ATA
+ controllers.
If unsure, say N.
@@ -82,6 +84,20 @@ config SATA_FSL
If unsure, say N.
+config SATA_INIC162X
+ tristate "Initio 162x SATA support"
+ depends on PCI
+ help
+ This option enables support for Initio 162x Serial ATA.
+
+config SATA_SIL24
+ tristate "Silicon Image 3124/3132 SATA support"
+ depends on PCI
+ help
+ This option enables support for Silicon Image 3124/3132 Serial ATA.
+
+ If unsure, say N.
+
config ATA_SFF
bool "ATA SFF support"
default y
@@ -102,15 +118,65 @@ config ATA_SFF
if ATA_SFF
-config SATA_SVW
- tristate "ServerWorks Frodo / Apple K2 SATA support"
+comment "SFF controllers with custom DMA interface"
+
+config PDC_ADMA
+ tristate "Pacific Digital ADMA support"
depends on PCI
help
- This option enables support for Broadcom/Serverworks/Apple K2
- SATA support.
+ This option enables support for Pacific Digital ADMA controllers
+
+ If unsure, say N.
+
+config PATA_MPC52xx
+ tristate "Freescale MPC52xx SoC internal IDE"
+ depends on PPC_MPC52xx && PPC_BESTCOMM
+ select PPC_BESTCOMM_ATA
+ help
+ This option enables support for integrated IDE controller
+ of the Freescale MPC52xx SoC.
+
+ If unsure, say N.
+
+config PATA_OCTEON_CF
+ tristate "OCTEON Boot Bus Compact Flash support"
+ depends on CPU_CAVIUM_OCTEON
+ help
+ This option enables a polled compact flash driver for use with
+ compact flash cards attached to the OCTEON boot bus.
+
+ If unsure, say N.
+
+config SATA_QSTOR
+ tristate "Pacific Digital SATA QStor support"
+ depends on PCI
+ help
+ This option enables support for Pacific Digital Serial ATA QStor.
+
+ If unsure, say N.
+
+config SATA_SX4
+ tristate "Promise SATA SX4 support (Experimental)"
+ depends on PCI && EXPERIMENTAL
+ help
+ This option enables support for Promise Serial ATA SX4.
If unsure, say N.
+config ATA_BMDMA
+ bool "ATA BMDMA support"
+ default y
+ help
+ This option adds support for SFF ATA controllers with BMDMA
+ capability. BMDMA stands for bus-master DMA and the
+ de-facto DMA interface for SFF controllers.
+
+ If unuser, say Y.
+
+if ATA_BMDMA
+
+comment "SATA SFF controllers with BMDMA"
+
config ATA_PIIX
tristate "Intel ESB, ICH, PIIX3, PIIX4 PATA/SATA support"
depends on PCI
@@ -138,22 +204,6 @@ config SATA_NV
If unsure, say N.
-config PDC_ADMA
- tristate "Pacific Digital ADMA support"
- depends on PCI
- help
- This option enables support for Pacific Digital ADMA controllers
-
- If unsure, say N.
-
-config SATA_QSTOR
- tristate "Pacific Digital SATA QStor support"
- depends on PCI
- help
- This option enables support for Pacific Digital Serial ATA QStor.
-
- If unsure, say N.
-
config SATA_PROMISE
tristate "Promise SATA TX2/TX4 support"
depends on PCI
@@ -162,14 +212,6 @@ config SATA_PROMISE
If unsure, say N.
-config SATA_SX4
- tristate "Promise SATA SX4 support (Experimental)"
- depends on PCI && EXPERIMENTAL
- help
- This option enables support for Promise Serial ATA SX4.
-
- If unsure, say N.
-
config SATA_SIL
tristate "Silicon Image SATA support"
depends on PCI
@@ -189,6 +231,15 @@ config SATA_SIS
enable the PATA_SIS driver in the config.
If unsure, say N.
+config SATA_SVW
+ tristate "ServerWorks Frodo / Apple K2 SATA support"
+ depends on PCI
+ help
+ This option enables support for Broadcom/Serverworks/Apple K2
+ SATA support.
+
+ If unsure, say N.
+
config SATA_ULI
tristate "ULi Electronics SATA support"
depends on PCI
@@ -213,20 +264,7 @@ config SATA_VITESSE
If unsure, say N.
-config SATA_INIC162X
- tristate "Initio 162x SATA support"
- depends on PCI
- help
- This option enables support for Initio 162x Serial ATA.
-
-config PATA_ACPI
- tristate "ACPI firmware driver for PATA"
- depends on ATA_ACPI
- help
- This option enables an ACPI method driver which drives
- motherboard PATA controller interfaces through the ACPI
- firmware in the BIOS. This driver can sometimes handle
- otherwise unsupported hardware.
+comment "PATA SFF controllers with BMDMA"
config PATA_ALI
tristate "ALi PATA support"
@@ -254,40 +292,30 @@ config PATA_ARTOP
If unsure, say N.
-config PATA_ATP867X
- tristate "ARTOP/Acard ATP867X PATA support"
+config PATA_ATIIXP
+ tristate "ATI PATA support"
depends on PCI
help
- This option enables support for ARTOP/Acard ATP867X PATA
- controllers.
-
- If unsure, say N.
-
-config PATA_AT32
- tristate "Atmel AVR32 PATA support (Experimental)"
- depends on AVR32 && PLATFORM_AT32AP && EXPERIMENTAL
- help
- This option enables support for the IDE devices on the
- Atmel AT32AP platform.
+ This option enables support for the ATI ATA interfaces
+ found on the many ATI chipsets.
If unsure, say N.
-config PATA_ATIIXP
- tristate "ATI PATA support"
+config PATA_ATP867X
+ tristate "ARTOP/Acard ATP867X PATA support"
depends on PCI
help
- This option enables support for the ATI ATA interfaces
- found on the many ATI chipsets.
+ This option enables support for ARTOP/Acard ATP867X PATA
+ controllers.
If unsure, say N.
-config PATA_CMD640_PCI
- tristate "CMD640 PCI PATA support (Experimental)"
- depends on PCI && EXPERIMENTAL
+config PATA_BF54X
+ tristate "Blackfin 54x ATAPI support"
+ depends on BF542 || BF548 || BF549
help
- This option enables support for the CMD640 PCI IDE
- interface chip. Only the primary channel is currently
- supported.
+ This option enables support for the built-in ATAPI controller on
+ Blackfin 54x family chips.
If unsure, say N.
@@ -354,15 +382,6 @@ config PATA_EFAR
If unsure, say N.
-config ATA_GENERIC
- tristate "Generic ATA support"
- depends on PCI
- help
- This option enables support for generic BIOS configured
- ATA controllers via the new ATA layer
-
- If unsure, say N.
-
config PATA_HPT366
tristate "HPT 366/368 PATA support"
depends on PCI
@@ -407,12 +426,20 @@ config PATA_HPT3X3_DMA
controllers. Enable with care as there are still some
problems with DMA on this chipset.
-config PATA_ISAPNP
- tristate "ISA Plug and Play PATA support"
- depends on ISAPNP
+config PATA_ICSIDE
+ tristate "Acorn ICS PATA support"
+ depends on ARM && ARCH_ACORN
help
- This option enables support for ISA plug & play ATA
- controllers such as those found on old soundcards.
+ On Acorn systems, say Y here if you wish to use the ICS PATA
+ interface card. This is not required for ICS partition support.
+ If you are unsure, say N to this.
+
+config PATA_IT8213
+ tristate "IT8213 PATA support (Experimental)"
+ depends on PCI && EXPERIMENTAL
+ help
+ This option enables support for the ITE 821 PATA
+ controllers via the new ATA layer.
If unsure, say N.
@@ -426,15 +453,6 @@ config PATA_IT821X
If unsure, say N.
-config PATA_IT8213
- tristate "IT8213 PATA support (Experimental)"
- depends on PCI && EXPERIMENTAL
- help
- This option enables support for the ITE 821 PATA
- controllers via the new ATA layer.
-
- If unsure, say N.
-
config PATA_JMICRON
tristate "JMicron PATA support"
depends on PCI
@@ -444,23 +462,14 @@ config PATA_JMICRON
If unsure, say N.
-config PATA_LEGACY
- tristate "Legacy ISA PATA support (Experimental)"
- depends on (ISA || PCI) && EXPERIMENTAL
- help
- This option enables support for ISA/VLB/PCI bus legacy PATA
- ports and allows them to be accessed via the new ATA layer.
-
- If unsure, say N.
-
-config PATA_TRIFLEX
- tristate "Compaq Triflex PATA support"
- depends on PCI
+config PATA_MACIO
+ tristate "Apple PowerMac/PowerBook internal 'MacIO' IDE"
+ depends on PPC_PMAC
help
- Enable support for the Compaq 'Triflex' IDE controller as found
- on many Compaq Pentium-Pro systems, via the new ATA layer.
-
- If unsure, say N.
+ Most IDE capable PowerMacs have IDE busses driven by a variant
+ of this controller which is part of the Apple chipset used on
+ most PowerMac models. Some models have multiple busses using
+ different chipsets, though generally, MacIO is one of them.
config PATA_MARVELL
tristate "Marvell PATA support via legacy mode"
@@ -473,32 +482,6 @@ config PATA_MARVELL
If unsure, say N.
-config PATA_MPC52xx
- tristate "Freescale MPC52xx SoC internal IDE"
- depends on PPC_MPC52xx && PPC_BESTCOMM
- select PPC_BESTCOMM_ATA
- help
- This option enables support for integrated IDE controller
- of the Freescale MPC52xx SoC.
-
- If unsure, say N.
-
-config PATA_MPIIX
- tristate "Intel PATA MPIIX support"
- depends on PCI
- help
- This option enables support for MPIIX PATA support.
-
- If unsure, say N.
-
-config PATA_OLDPIIX
- tristate "Intel PATA old PIIX support"
- depends on PCI
- help
- This option enables support for early PIIX PATA support.
-
- If unsure, say N.
-
config PATA_NETCELL
tristate "NETCELL Revolution RAID support"
depends on PCI
@@ -517,15 +500,6 @@ config PATA_NINJA32
If unsure, say N.
-config PATA_NS87410
- tristate "Nat Semi NS87410 PATA support"
- depends on PCI
- help
- This option enables support for the National Semiconductor
- NS87410 PCI-IDE controller.
-
- If unsure, say N.
-
config PATA_NS87415
tristate "Nat Semi NS87415 PATA support"
depends on PCI
@@ -535,12 +509,11 @@ config PATA_NS87415
If unsure, say N.
-config PATA_OPTI
- tristate "OPTI621/6215 PATA support (Very Experimental)"
- depends on PCI && EXPERIMENTAL
+config PATA_OLDPIIX
+ tristate "Intel PATA old PIIX support"
+ depends on PCI
help
- This option enables full PIO support for the early Opti ATA
- controllers found on some old motherboards.
+ This option enables support for early PIIX PATA support.
If unsure, say N.
@@ -554,24 +527,6 @@ config PATA_OPTIDMA
If unsure, say N.
-config PATA_PALMLD
- tristate "Palm LifeDrive PATA support"
- depends on MACH_PALMLD
- help
- This option enables support for Palm LifeDrive's internal ATA
- port via the new ATA layer.
-
- If unsure, say N.
-
-config PATA_PCMCIA
- tristate "PCMCIA PATA support"
- depends on PCMCIA
- help
- This option enables support for PCMCIA ATA interfaces, including
- compact flash card adapters via the new ATA layer.
-
- If unsure, say N.
-
config PATA_PDC2027X
tristate "Promise PATA 2027x support"
depends on PCI
@@ -589,12 +544,6 @@ config PATA_PDC_OLD
If unsure, say N.
-config PATA_QDI
- tristate "QDI VLB PATA support"
- depends on ISA
- help
- Support for QDI 6500 and 6580 PATA controllers on VESA local bus.
-
config PATA_RADISYS
tristate "RADISYS 82600 PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
@@ -604,15 +553,6 @@ config PATA_RADISYS
If unsure, say N.
-config PATA_RB532
- tristate "RouterBoard 532 PATA CompactFlash support"
- depends on MIKROTIK_RB532
- help
- This option enables support for the RouterBoard 532
- PATA CompactFlash controller.
-
- If unsure, say N.
-
config PATA_RDC
tristate "RDC PATA support"
depends on PCI
@@ -623,21 +563,30 @@ config PATA_RDC
If unsure, say N.
-config PATA_RZ1000
- tristate "PC Tech RZ1000 PATA support"
+config PATA_SC1200
+ tristate "SC1200 PATA support"
depends on PCI
help
- This option enables basic support for the PC Tech RZ1000/1
- PATA controllers via the new ATA layer
+ This option enables support for the NatSemi/AMD SC1200 SoC
+ companion chip used with the Geode processor family.
If unsure, say N.
-config PATA_SC1200
- tristate "SC1200 PATA support"
+config PATA_SCC
+ tristate "Toshiba's Cell Reference Set IDE support"
+ depends on PCI && PPC_CELLEB
+ help
+ This option enables support for the built-in IDE controller on
+ Toshiba Cell Reference Board.
+
+ If unsure, say N.
+
+config PATA_SCH
+ tristate "Intel SCH PATA support"
depends on PCI
help
- This option enables support for the NatSemi/AMD SC1200 SoC
- companion chip used with the Geode processor family.
+ This option enables support for Intel SCH PATA on the Intel
+ SCH (US15W, US15L, UL11L) series host controllers.
If unsure, say N.
@@ -675,6 +624,15 @@ config PATA_TOSHIBA
If unsure, say N.
+config PATA_TRIFLEX
+ tristate "Compaq Triflex PATA support"
+ depends on PCI
+ help
+ Enable support for the Compaq 'Triflex' IDE controller as found
+ on many Compaq Pentium-Pro systems, via the new ATA layer.
+
+ If unsure, say N.
+
config PATA_VIA
tristate "VIA PATA support"
depends on PCI
@@ -693,12 +651,99 @@ config PATA_WINBOND
If unsure, say N.
-config PATA_WINBOND_VLB
- tristate "Winbond W83759A VLB PATA support (Experimental)"
- depends on ISA && EXPERIMENTAL
+endif # ATA_BMDMA
+
+comment "PIO-only SFF controllers"
+
+config PATA_AT32
+ tristate "Atmel AVR32 PATA support (Experimental)"
+ depends on AVR32 && PLATFORM_AT32AP && EXPERIMENTAL
help
- Support for the Winbond W83759A controller on Vesa Local Bus
- systems.
+ This option enables support for the IDE devices on the
+ Atmel AT32AP platform.
+
+ If unsure, say N.
+
+config PATA_AT91
+ tristate "PATA support for AT91SAM9260"
+ depends on ARM && ARCH_AT91
+ help
+ This option enables support for IDE devices on the Atmel AT91SAM9260 SoC.
+
+ If unsure, say N.
+
+config PATA_CMD640_PCI
+ tristate "CMD640 PCI PATA support (Experimental)"
+ depends on PCI && EXPERIMENTAL
+ help
+ This option enables support for the CMD640 PCI IDE
+ interface chip. Only the primary channel is currently
+ supported.
+
+ If unsure, say N.
+
+config PATA_ISAPNP
+ tristate "ISA Plug and Play PATA support"
+ depends on ISAPNP
+ help
+ This option enables support for ISA plug & play ATA
+ controllers such as those found on old soundcards.
+
+ If unsure, say N.
+
+config PATA_IXP4XX_CF
+ tristate "IXP4XX Compact Flash support"
+ depends on ARCH_IXP4XX
+ help
+ This option enables support for a Compact Flash connected on
+ the ixp4xx expansion bus. This driver had been written for
+ Loft/Avila boards in mind but can work with others.
+
+ If unsure, say N.
+
+config PATA_MPIIX
+ tristate "Intel PATA MPIIX support"
+ depends on PCI
+ help
+ This option enables support for MPIIX PATA support.
+
+ If unsure, say N.
+
+config PATA_NS87410
+ tristate "Nat Semi NS87410 PATA support"
+ depends on PCI
+ help
+ This option enables support for the National Semiconductor
+ NS87410 PCI-IDE controller.
+
+ If unsure, say N.
+
+config PATA_OPTI
+ tristate "OPTI621/6215 PATA support (Very Experimental)"
+ depends on PCI && EXPERIMENTAL
+ help
+ This option enables full PIO support for the early Opti ATA
+ controllers found on some old motherboards.
+
+ If unsure, say N.
+
+config PATA_PALMLD
+ tristate "Palm LifeDrive PATA support"
+ depends on MACH_PALMLD
+ help
+ This option enables support for Palm LifeDrive's internal ATA
+ port via the new ATA layer.
+
+ If unsure, say N.
+
+config PATA_PCMCIA
+ tristate "PCMCIA PATA support"
+ depends on PCMCIA
+ help
+ This option enables support for PCMCIA ATA interfaces, including
+ compact flash card adapters via the new ATA layer.
+
+ If unsure, say N.
config HAVE_PATA_PLATFORM
bool
@@ -717,14 +762,6 @@ config PATA_PLATFORM
If unsure, say N.
-config PATA_AT91
- tristate "PATA support for AT91SAM9260"
- depends on ARM && ARCH_AT91
- help
- This option enables support for IDE devices on the Atmel AT91SAM9260 SoC.
-
- If unsure, say N.
-
config PATA_OF_PLATFORM
tristate "OpenFirmware platform device PATA support"
depends on PATA_PLATFORM && PPC_OF
@@ -735,69 +772,65 @@ config PATA_OF_PLATFORM
If unsure, say N.
-config PATA_ICSIDE
- tristate "Acorn ICS PATA support"
- depends on ARM && ARCH_ACORN
+config PATA_QDI
+ tristate "QDI VLB PATA support"
+ depends on ISA
help
- On Acorn systems, say Y here if you wish to use the ICS PATA
- interface card. This is not required for ICS partition support.
- If you are unsure, say N to this.
+ Support for QDI 6500 and 6580 PATA controllers on VESA local bus.
-config PATA_IXP4XX_CF
- tristate "IXP4XX Compact Flash support"
- depends on ARCH_IXP4XX
+config PATA_RB532
+ tristate "RouterBoard 532 PATA CompactFlash support"
+ depends on MIKROTIK_RB532
help
- This option enables support for a Compact Flash connected on
- the ixp4xx expansion bus. This driver had been written for
- Loft/Avila boards in mind but can work with others.
+ This option enables support for the RouterBoard 532
+ PATA CompactFlash controller.
If unsure, say N.
-config PATA_OCTEON_CF
- tristate "OCTEON Boot Bus Compact Flash support"
- depends on CPU_CAVIUM_OCTEON
+config PATA_RZ1000
+ tristate "PC Tech RZ1000 PATA support"
+ depends on PCI
help
- This option enables a polled compact flash driver for use with
- compact flash cards attached to the OCTEON boot bus.
+ This option enables basic support for the PC Tech RZ1000/1
+ PATA controllers via the new ATA layer
If unsure, say N.
-config PATA_SCC
- tristate "Toshiba's Cell Reference Set IDE support"
- depends on PCI && PPC_CELLEB
+config PATA_WINBOND_VLB
+ tristate "Winbond W83759A VLB PATA support (Experimental)"
+ depends on ISA && EXPERIMENTAL
help
- This option enables support for the built-in IDE controller on
- Toshiba Cell Reference Board.
+ Support for the Winbond W83759A controller on Vesa Local Bus
+ systems.
- If unsure, say N.
+comment "Generic fallback / legacy drivers"
-config PATA_SCH
- tristate "Intel SCH PATA support"
- depends on PCI
+config PATA_ACPI
+ tristate "ACPI firmware driver for PATA"
+ depends on ATA_ACPI && ATA_BMDMA
help
- This option enables support for Intel SCH PATA on the Intel
- SCH (US15W, US15L, UL11L) series host controllers.
-
- If unsure, say N.
+ This option enables an ACPI method driver which drives
+ motherboard PATA controller interfaces through the ACPI
+ firmware in the BIOS. This driver can sometimes handle
+ otherwise unsupported hardware.
-config PATA_BF54X
- tristate "Blackfin 54x ATAPI support"
- depends on BF542 || BF548 || BF549
+config ATA_GENERIC
+ tristate "Generic ATA support"
+ depends on PCI && ATA_BMDMA
help
- This option enables support for the built-in ATAPI controller on
- Blackfin 54x family chips.
+ This option enables support for generic BIOS configured
+ ATA controllers via the new ATA layer
If unsure, say N.
-config PATA_MACIO
- tristate "Apple PowerMac/PowerBook internal 'MacIO' IDE"
- depends on PPC_PMAC
+config PATA_LEGACY
+ tristate "Legacy ISA PATA support (Experimental)"
+ depends on (ISA || PCI) && EXPERIMENTAL
help
- Most IDE capable PowerMacs have IDE busses driven by a variant
- of this controller which is part of the Apple chipset used on
- most PowerMac models. Some models have multiple busses using
- different chipsets, though generally, MacIO is one of them.
+ This option enables support for ISA/VLB/PCI bus legacy PATA
+ ports and allows them to be accessed via the new ATA layer.
+ If unsure, say N.
endif # ATA_SFF
endif # ATA
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index fc936d4471d..7ef89d73df6 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -1,32 +1,39 @@
obj-$(CONFIG_ATA) += libata.o
-obj-$(CONFIG_SATA_AHCI) += ahci.o
-obj-$(CONFIG_SATA_SVW) += sata_svw.o
+# non-SFF interface
+obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o
+obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o
+obj-$(CONFIG_SATA_FSL) += sata_fsl.o
+obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
+obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
+
+# SFF w/ custom DMA
+obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
+obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o
+obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o
+obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o
+obj-$(CONFIG_SATA_SX4) += sata_sx4.o
+
+# SFF SATA w/ BMDMA
obj-$(CONFIG_ATA_PIIX) += ata_piix.o
+obj-$(CONFIG_SATA_MV) += sata_mv.o
+obj-$(CONFIG_SATA_NV) += sata_nv.o
obj-$(CONFIG_SATA_PROMISE) += sata_promise.o
-obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o
obj-$(CONFIG_SATA_SIL) += sata_sil.o
-obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
-obj-$(CONFIG_SATA_VIA) += sata_via.o
-obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o
obj-$(CONFIG_SATA_SIS) += sata_sis.o
-obj-$(CONFIG_SATA_SX4) += sata_sx4.o
-obj-$(CONFIG_SATA_NV) += sata_nv.o
+obj-$(CONFIG_SATA_SVW) += sata_svw.o
obj-$(CONFIG_SATA_ULI) += sata_uli.o
-obj-$(CONFIG_SATA_MV) += sata_mv.o
-obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
-obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
-obj-$(CONFIG_SATA_FSL) += sata_fsl.o
-obj-$(CONFIG_PATA_MACIO) += pata_macio.o
+obj-$(CONFIG_SATA_VIA) += sata_via.o
+obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o
+# SFF PATA w/ BMDMA
obj-$(CONFIG_PATA_ALI) += pata_ali.o
obj-$(CONFIG_PATA_AMD) += pata_amd.o
obj-$(CONFIG_PATA_ARTOP) += pata_artop.o
-obj-$(CONFIG_PATA_ATP867X) += pata_atp867x.o
-obj-$(CONFIG_PATA_AT32) += pata_at32.o
obj-$(CONFIG_PATA_ATIIXP) += pata_atiixp.o
-obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o
+obj-$(CONFIG_PATA_ATP867X) += pata_atp867x.o
+obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o
obj-$(CONFIG_PATA_CMD64X) += pata_cmd64x.o
obj-$(CONFIG_PATA_CS5520) += pata_cs5520.o
obj-$(CONFIG_PATA_CS5530) += pata_cs5530.o
@@ -38,47 +45,50 @@ obj-$(CONFIG_PATA_HPT366) += pata_hpt366.o
obj-$(CONFIG_PATA_HPT37X) += pata_hpt37x.o
obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o
obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o
-obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o
-obj-$(CONFIG_PATA_IT821X) += pata_it821x.o
+obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o
obj-$(CONFIG_PATA_IT8213) += pata_it8213.o
+obj-$(CONFIG_PATA_IT821X) += pata_it821x.o
obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o
+obj-$(CONFIG_PATA_MACIO) += pata_macio.o
+obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o
obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o
obj-$(CONFIG_PATA_NINJA32) += pata_ninja32.o
-obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o
obj-$(CONFIG_PATA_NS87415) += pata_ns87415.o
-obj-$(CONFIG_PATA_OPTI) += pata_opti.o
-obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o
-obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o
-obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o
-obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o
obj-$(CONFIG_PATA_OLDPIIX) += pata_oldpiix.o
-obj-$(CONFIG_PATA_PALMLD) += pata_palmld.o
-obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o
+obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o
obj-$(CONFIG_PATA_PDC2027X) += pata_pdc2027x.o
obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o
-obj-$(CONFIG_PATA_QDI) += pata_qdi.o
obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o
-obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o
obj-$(CONFIG_PATA_RDC) += pata_rdc.o
-obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o
+obj-$(CONFIG_PATA_SCC) += pata_scc.o
+obj-$(CONFIG_PATA_SCH) += pata_sch.o
obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o
obj-$(CONFIG_PATA_SIL680) += pata_sil680.o
+obj-$(CONFIG_PATA_SIS) += pata_sis.o
obj-$(CONFIG_PATA_TOSHIBA) += pata_piccolo.o
+obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o
obj-$(CONFIG_PATA_VIA) += pata_via.o
obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o
-obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o
-obj-$(CONFIG_PATA_SIS) += pata_sis.o
-obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o
+
+# SFF PIO only
+obj-$(CONFIG_PATA_AT32) += pata_at32.o
+obj-$(CONFIG_PATA_AT91) += pata_at91.o
+obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o
+obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o
obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o
-obj-$(CONFIG_PATA_SCC) += pata_scc.o
-obj-$(CONFIG_PATA_SCH) += pata_sch.o
-obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o
-obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o
+obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o
+obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o
+obj-$(CONFIG_PATA_OPTI) += pata_opti.o
+obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o
+obj-$(CONFIG_PATA_PALMLD) += pata_palmld.o
obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o
-obj-$(CONFIG_PATA_AT91) += pata_at91.o
obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o
-obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o
+obj-$(CONFIG_PATA_QDI) += pata_qdi.o
+obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o
+obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
+obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o
+
# Should be last but two libata driver
obj-$(CONFIG_PATA_ACPI) += pata_acpi.o
# Should be last but one libata driver
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 5326af28a41..8ca16f54e1e 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -46,403 +46,48 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <linux/libata.h>
+#include "ahci.h"
#define DRV_NAME "ahci"
#define DRV_VERSION "3.0"
-/* Enclosure Management Control */
-#define EM_CTRL_MSG_TYPE 0x000f0000
-
-/* Enclosure Management LED Message Type */
-#define EM_MSG_LED_HBA_PORT 0x0000000f
-#define EM_MSG_LED_PMP_SLOT 0x0000ff00
-#define EM_MSG_LED_VALUE 0xffff0000
-#define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
-#define EM_MSG_LED_VALUE_OFF 0xfff80000
-#define EM_MSG_LED_VALUE_ON 0x00010000
-
-static int ahci_skip_host_reset;
-static int ahci_ignore_sss;
-
-module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
-MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
-
-module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
-MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
-
-static int ahci_enable_alpm(struct ata_port *ap,
- enum link_pm policy);
-static void ahci_disable_alpm(struct ata_port *ap);
-static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
-static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
- size_t size);
-static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
- ssize_t size);
-
enum {
AHCI_PCI_BAR = 5,
- AHCI_MAX_PORTS = 32,
- AHCI_MAX_SG = 168, /* hardware max is 64K */
- AHCI_DMA_BOUNDARY = 0xffffffff,
- AHCI_MAX_CMDS = 32,
- AHCI_CMD_SZ = 32,
- AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
- AHCI_RX_FIS_SZ = 256,
- AHCI_CMD_TBL_CDB = 0x40,
- AHCI_CMD_TBL_HDR_SZ = 0x80,
- AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
- AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
- AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
- AHCI_RX_FIS_SZ,
- AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ +
- AHCI_CMD_TBL_AR_SZ +
- (AHCI_RX_FIS_SZ * 16),
- AHCI_IRQ_ON_SG = (1 << 31),
- AHCI_CMD_ATAPI = (1 << 5),
- AHCI_CMD_WRITE = (1 << 6),
- AHCI_CMD_PREFETCH = (1 << 7),
- AHCI_CMD_RESET = (1 << 8),
- AHCI_CMD_CLR_BUSY = (1 << 10),
-
- RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
- RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
- RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
-
- board_ahci = 0,
- board_ahci_vt8251 = 1,
- board_ahci_ign_iferr = 2,
- board_ahci_sb600 = 3,
- board_ahci_mv = 4,
- board_ahci_sb700 = 5, /* for SB700 and SB800 */
- board_ahci_mcp65 = 6,
- board_ahci_nopmp = 7,
- board_ahci_yesncq = 8,
- board_ahci_nosntf = 9,
-
- /* global controller registers */
- HOST_CAP = 0x00, /* host capabilities */
- HOST_CTL = 0x04, /* global host control */
- HOST_IRQ_STAT = 0x08, /* interrupt status */
- HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
- HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
- HOST_EM_LOC = 0x1c, /* Enclosure Management location */
- HOST_EM_CTL = 0x20, /* Enclosure Management Control */
- HOST_CAP2 = 0x24, /* host capabilities, extended */
-
- /* HOST_CTL bits */
- HOST_RESET = (1 << 0), /* reset controller; self-clear */
- HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
- HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
-
- /* HOST_CAP bits */
- HOST_CAP_SXS = (1 << 5), /* Supports External SATA */
- HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
- HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */
- HOST_CAP_PART = (1 << 13), /* Partial state capable */
- HOST_CAP_SSC = (1 << 14), /* Slumber state capable */
- HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */
- HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */
- HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
- HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */
- HOST_CAP_CLO = (1 << 24), /* Command List Override support */
- HOST_CAP_LED = (1 << 25), /* Supports activity LED */
- HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
- HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
- HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */
- HOST_CAP_SNTF = (1 << 29), /* SNotification register */
- HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
- HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
-
- /* HOST_CAP2 bits */
- HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */
- HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */
- HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */
-
- /* registers for each SATA port */
- PORT_LST_ADDR = 0x00, /* command list DMA addr */
- PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
- PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
- PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
- PORT_IRQ_STAT = 0x10, /* interrupt status */
- PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
- PORT_CMD = 0x18, /* port command */
- PORT_TFDATA = 0x20, /* taskfile data */
- PORT_SIG = 0x24, /* device TF signature */
- PORT_CMD_ISSUE = 0x38, /* command issue */
- PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
- PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
- PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
- PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
- PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
- PORT_FBS = 0x40, /* FIS-based Switching */
-
- /* PORT_IRQ_{STAT,MASK} bits */
- PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
- PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
- PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
- PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
- PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
- PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
- PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
- PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
-
- PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
- PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
- PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
- PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
- PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
- PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
- PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
- PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
- PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
-
- PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
- PORT_IRQ_IF_ERR |
- PORT_IRQ_CONNECT |
- PORT_IRQ_PHYRDY |
- PORT_IRQ_UNK_FIS |
- PORT_IRQ_BAD_PMP,
- PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
- PORT_IRQ_TF_ERR |
- PORT_IRQ_HBUS_DATA_ERR,
- DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
- PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
- PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
-
- /* PORT_CMD bits */
- PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
- PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
- PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
- PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */
- PORT_CMD_PMP = (1 << 17), /* PMP attached */
- PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
- PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
- PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
- PORT_CMD_CLO = (1 << 3), /* Command list override */
- PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
- PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
- PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
-
- PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
- PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
- PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
- PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
-
- PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */
- PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */
- PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */
- PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */
- PORT_FBS_SDE = (1 << 2), /* FBS single device error */
- PORT_FBS_DEC = (1 << 1), /* FBS device error clear */
- PORT_FBS_EN = (1 << 0), /* Enable FBS */
-
- /* hpriv->flags bits */
- AHCI_HFLAG_NO_NCQ = (1 << 0),
- AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
- AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
- AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
- AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
- AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
- AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
- AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
- AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
- AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
- AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
- AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
- link offline */
- AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
-
- /* ap->flags bits */
-
- AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
- ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
- ATA_FLAG_IPM,
-
- ICH_MAP = 0x90, /* ICH MAP register */
-
- /* em constants */
- EM_MAX_SLOTS = 8,
- EM_MAX_RETRY = 5,
-
- /* em_ctl bits */
- EM_CTL_RST = (1 << 9), /* Reset */
- EM_CTL_TM = (1 << 8), /* Transmit Message */
- EM_CTL_ALHD = (1 << 26), /* Activity LED */
-};
-
-struct ahci_cmd_hdr {
- __le32 opts;
- __le32 status;
- __le32 tbl_addr;
- __le32 tbl_addr_hi;
- __le32 reserved[4];
-};
-
-struct ahci_sg {
- __le32 addr;
- __le32 addr_hi;
- __le32 reserved;
- __le32 flags_size;
-};
-
-struct ahci_em_priv {
- enum sw_activity blink_policy;
- struct timer_list timer;
- unsigned long saved_activity;
- unsigned long activity;
- unsigned long led_state;
-};
-
-struct ahci_host_priv {
- unsigned int flags; /* AHCI_HFLAG_* */
- u32 cap; /* cap to use */
- u32 cap2; /* cap2 to use */
- u32 port_map; /* port map to use */
- u32 saved_cap; /* saved initial cap */
- u32 saved_cap2; /* saved initial cap2 */
- u32 saved_port_map; /* saved initial port_map */
- u32 em_loc; /* enclosure management location */
};
-struct ahci_port_priv {
- struct ata_link *active_link;
- struct ahci_cmd_hdr *cmd_slot;
- dma_addr_t cmd_slot_dma;
- void *cmd_tbl;
- dma_addr_t cmd_tbl_dma;
- void *rx_fis;
- dma_addr_t rx_fis_dma;
- /* for NCQ spurious interrupt analysis */
- unsigned int ncq_saw_d2h:1;
- unsigned int ncq_saw_dmas:1;
- unsigned int ncq_saw_sdb:1;
- u32 intr_mask; /* interrupts to enable */
- bool fbs_supported; /* set iff FBS is supported */
- bool fbs_enabled; /* set iff FBS is enabled */
- int fbs_last_dev; /* save FBS.DEV of last FIS */
- /* enclosure management info per PM slot */
- struct ahci_em_priv em_priv[EM_MAX_SLOTS];
+enum board_ids {
+ /* board IDs by feature in alphabetical order */
+ board_ahci,
+ board_ahci_ign_iferr,
+ board_ahci_nosntf,
+
+ /* board IDs for specific chipsets in alphabetical order */
+ board_ahci_mcp65,
+ board_ahci_mcp77,
+ board_ahci_mcp89,
+ board_ahci_mv,
+ board_ahci_sb600,
+ board_ahci_sb700, /* for SB700 and SB800 */
+ board_ahci_vt8251,
+
+ /* aliases */
+ board_ahci_mcp_linux = board_ahci_mcp65,
+ board_ahci_mcp67 = board_ahci_mcp65,
+ board_ahci_mcp73 = board_ahci_mcp65,
+ board_ahci_mcp79 = board_ahci_mcp77,
};
-static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
-static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
-static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
-static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
-static int ahci_port_start(struct ata_port *ap);
-static void ahci_port_stop(struct ata_port *ap);
-static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
-static void ahci_qc_prep(struct ata_queued_cmd *qc);
-static void ahci_freeze(struct ata_port *ap);
-static void ahci_thaw(struct ata_port *ap);
-static void ahci_enable_fbs(struct ata_port *ap);
-static void ahci_disable_fbs(struct ata_port *ap);
-static void ahci_pmp_attach(struct ata_port *ap);
-static void ahci_pmp_detach(struct ata_port *ap);
-static int ahci_softreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline);
static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
-static int ahci_hardreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline);
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
-static void ahci_postreset(struct ata_link *link, unsigned int *class);
-static void ahci_error_handler(struct ata_port *ap);
-static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
-static int ahci_port_resume(struct ata_port *ap);
-static void ahci_dev_config(struct ata_device *dev);
-static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
- u32 opts);
#ifdef CONFIG_PM
-static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
static int ahci_pci_device_resume(struct pci_dev *pdev);
#endif
-static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
-static ssize_t ahci_activity_store(struct ata_device *dev,
- enum sw_activity val);
-static void ahci_init_sw_activity(struct ata_link *link);
-
-static ssize_t ahci_show_host_caps(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t ahci_show_host_cap2(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t ahci_show_host_version(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t ahci_show_port_cmd(struct device *dev,
- struct device_attribute *attr, char *buf);
-
-static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
-static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
-static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
-static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
-
-static struct device_attribute *ahci_shost_attrs[] = {
- &dev_attr_link_power_management_policy,
- &dev_attr_em_message_type,
- &dev_attr_em_message,
- &dev_attr_ahci_host_caps,
- &dev_attr_ahci_host_cap2,
- &dev_attr_ahci_host_version,
- &dev_attr_ahci_port_cmd,
- NULL
-};
-
-static struct device_attribute *ahci_sdev_attrs[] = {
- &dev_attr_sw_activity,
- &dev_attr_unload_heads,
- NULL
-};
-
-static struct scsi_host_template ahci_sht = {
- ATA_NCQ_SHT(DRV_NAME),
- .can_queue = AHCI_MAX_CMDS - 1,
- .sg_tablesize = AHCI_MAX_SG,
- .dma_boundary = AHCI_DMA_BOUNDARY,
- .shost_attrs = ahci_shost_attrs,
- .sdev_attrs = ahci_sdev_attrs,
-};
-
-static struct ata_port_operations ahci_ops = {
- .inherits = &sata_pmp_port_ops,
-
- .qc_defer = ahci_pmp_qc_defer,
- .qc_prep = ahci_qc_prep,
- .qc_issue = ahci_qc_issue,
- .qc_fill_rtf = ahci_qc_fill_rtf,
-
- .freeze = ahci_freeze,
- .thaw = ahci_thaw,
- .softreset = ahci_softreset,
- .hardreset = ahci_hardreset,
- .postreset = ahci_postreset,
- .pmp_softreset = ahci_softreset,
- .error_handler = ahci_error_handler,
- .post_internal_cmd = ahci_post_internal_cmd,
- .dev_config = ahci_dev_config,
-
- .scr_read = ahci_scr_read,
- .scr_write = ahci_scr_write,
- .pmp_attach = ahci_pmp_attach,
- .pmp_detach = ahci_pmp_detach,
-
- .enable_pm = ahci_enable_alpm,
- .disable_pm = ahci_disable_alpm,
- .em_show = ahci_led_show,
- .em_store = ahci_led_store,
- .sw_activity_show = ahci_activity_show,
- .sw_activity_store = ahci_activity_store,
-#ifdef CONFIG_PM
- .port_suspend = ahci_port_suspend,
- .port_resume = ahci_port_resume,
-#endif
- .port_start = ahci_port_start,
- .port_stop = ahci_port_stop,
-};
static struct ata_port_operations ahci_vt8251_ops = {
.inherits = &ahci_ops,
@@ -463,6 +108,7 @@ static struct ata_port_operations ahci_sb600_ops = {
#define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
static const struct ata_port_info ahci_port_info[] = {
+ /* by features */
[board_ahci] =
{
.flags = AHCI_FLAG_COMMON,
@@ -470,81 +116,83 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_vt8251] =
+ [board_ahci_ign_iferr] =
{
- AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
+ AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_vt8251_ops,
+ .port_ops = &ahci_ops,
},
- [board_ahci_ign_iferr] =
+ [board_ahci_nosntf] =
{
- AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
+ AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_sb600] =
+ /* by chipsets */
+ [board_ahci_mcp65] =
{
- AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
- AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 |
- AHCI_HFLAG_32BIT_ONLY),
+ AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP |
+ AHCI_HFLAG_YES_NCQ),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_sb600_ops,
+ .port_ops = &ahci_ops,
},
- [board_ahci_mv] =
+ [board_ahci_mcp77] =
{
- AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
- AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
+ AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP),
+ .flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_sb700] = /* for SB700 and SB800 */
+ [board_ahci_mcp89] =
{
- AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
+ AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_sb600_ops,
+ .port_ops = &ahci_ops,
},
- [board_ahci_mcp65] =
+ [board_ahci_mv] =
{
- AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
- .flags = AHCI_FLAG_COMMON,
+ AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
+ AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_nopmp] =
+ [board_ahci_sb600] =
{
- AHCI_HFLAGS (AHCI_HFLAG_NO_PMP),
+ AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
+ AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 |
+ AHCI_HFLAG_32BIT_ONLY),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_ops,
+ .port_ops = &ahci_sb600_ops,
},
- [board_ahci_yesncq] =
+ [board_ahci_sb700] = /* for SB700 and SB800 */
{
- AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
+ AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_ops,
+ .port_ops = &ahci_sb600_ops,
},
- [board_ahci_nosntf] =
+ [board_ahci_vt8251] =
{
- AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
+ AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_ops,
+ .port_ops = &ahci_vt8251_ops,
},
};
@@ -629,82 +277,82 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
- { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0581), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0582), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0583), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0584), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0585), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0586), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0587), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0588), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0589), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x058a), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x058b), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x058c), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x058d), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x058e), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x058f), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0581), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0582), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0583), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0584), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0585), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0586), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0587), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0588), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0589), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x058a), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x058b), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x058c), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x058d), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x058e), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x058f), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci_mcp89 }, /* MCP89 */
/* SiS */
{ PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
@@ -737,12 +385,6 @@ static struct pci_driver ahci_pci_driver = {
#endif
};
-static int ahci_em_messages = 1;
-module_param(ahci_em_messages, int, 0444);
-/* add other LED protocol types when they become supported */
-MODULE_PARM_DESC(ahci_em_messages,
- "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
-
#if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
static int marvell_enable;
#else
@@ -752,166 +394,15 @@ module_param(marvell_enable, int, 0644);
MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
-static inline int ahci_nr_ports(u32 cap)
-{
- return (cap & 0x1f) + 1;
-}
-
-static inline void __iomem *__ahci_port_base(struct ata_host *host,
- unsigned int port_no)
-{
- void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
-
- return mmio + 0x100 + (port_no * 0x80);
-}
-
-static inline void __iomem *ahci_port_base(struct ata_port *ap)
-{
- return __ahci_port_base(ap->host, ap->port_no);
-}
-
-static void ahci_enable_ahci(void __iomem *mmio)
-{
- int i;
- u32 tmp;
-
- /* turn on AHCI_EN */
- tmp = readl(mmio + HOST_CTL);
- if (tmp & HOST_AHCI_EN)
- return;
-
- /* Some controllers need AHCI_EN to be written multiple times.
- * Try a few times before giving up.
- */
- for (i = 0; i < 5; i++) {
- tmp |= HOST_AHCI_EN;
- writel(tmp, mmio + HOST_CTL);
- tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
- if (tmp & HOST_AHCI_EN)
- return;
- msleep(10);
- }
-
- WARN_ON(1);
-}
-
-static ssize_t ahci_show_host_caps(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct ata_port *ap = ata_shost_to_port(shost);
- struct ahci_host_priv *hpriv = ap->host->private_data;
-
- return sprintf(buf, "%x\n", hpriv->cap);
-}
-
-static ssize_t ahci_show_host_cap2(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct ata_port *ap = ata_shost_to_port(shost);
- struct ahci_host_priv *hpriv = ap->host->private_data;
-
- return sprintf(buf, "%x\n", hpriv->cap2);
-}
-
-static ssize_t ahci_show_host_version(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct ata_port *ap = ata_shost_to_port(shost);
- void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
-
- return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
-}
-
-static ssize_t ahci_show_port_cmd(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct ata_port *ap = ata_shost_to_port(shost);
- void __iomem *port_mmio = ahci_port_base(ap);
-
- return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
-}
-
-/**
- * ahci_save_initial_config - Save and fixup initial config values
- * @pdev: target PCI device
- * @hpriv: host private area to store config values
- *
- * Some registers containing configuration info might be setup by
- * BIOS and might be cleared on reset. This function saves the
- * initial values of those registers into @hpriv such that they
- * can be restored after controller reset.
- *
- * If inconsistent, config values are fixed up by this function.
- *
- * LOCKING:
- * None.
- */
-static void ahci_save_initial_config(struct pci_dev *pdev,
- struct ahci_host_priv *hpriv)
+static void ahci_pci_save_initial_config(struct pci_dev *pdev,
+ struct ahci_host_priv *hpriv)
{
- void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
- u32 cap, cap2, vers, port_map;
- int i;
- int mv;
-
- /* make sure AHCI mode is enabled before accessing CAP */
- ahci_enable_ahci(mmio);
+ unsigned int force_port_map = 0;
+ unsigned int mask_port_map = 0;
- /* Values prefixed with saved_ are written back to host after
- * reset. Values without are used for driver operation.
- */
- hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
- hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
-
- /* CAP2 register is only defined for AHCI 1.2 and later */
- vers = readl(mmio + HOST_VERSION);
- if ((vers >> 16) > 1 ||
- ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
- hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
- else
- hpriv->saved_cap2 = cap2 = 0;
-
- /* some chips have errata preventing 64bit use */
- if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
- dev_printk(KERN_INFO, &pdev->dev,
- "controller can't do 64bit DMA, forcing 32bit\n");
- cap &= ~HOST_CAP_64;
- }
-
- if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
- dev_printk(KERN_INFO, &pdev->dev,
- "controller can't do NCQ, turning off CAP_NCQ\n");
- cap &= ~HOST_CAP_NCQ;
- }
-
- if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
- dev_printk(KERN_INFO, &pdev->dev,
- "controller can do NCQ, turning on CAP_NCQ\n");
- cap |= HOST_CAP_NCQ;
- }
-
- if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
- dev_printk(KERN_INFO, &pdev->dev,
- "controller can't do PMP, turning off CAP_PMP\n");
- cap &= ~HOST_CAP_PMP;
- }
-
- if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
- dev_printk(KERN_INFO, &pdev->dev,
- "controller can't do SNTF, turning off CAP_SNTF\n");
- cap &= ~HOST_CAP_SNTF;
- }
-
- if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
- port_map != 1) {
- dev_printk(KERN_INFO, &pdev->dev,
- "JMB361 has only one port, port_map 0x%x -> 0x%x\n",
- port_map, 1);
- port_map = 1;
+ if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
+ dev_info(&pdev->dev, "JMB361 has only one port\n");
+ force_port_map = 1;
}
/*
@@ -921,469 +412,25 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
*/
if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
if (pdev->device == 0x6121)
- mv = 0x3;
+ mask_port_map = 0x3;
else
- mv = 0xf;
- dev_printk(KERN_ERR, &pdev->dev,
- "MV_AHCI HACK: port_map %x -> %x\n",
- port_map,
- port_map & mv);
- dev_printk(KERN_ERR, &pdev->dev,
+ mask_port_map = 0xf;
+ dev_info(&pdev->dev,
"Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
-
- port_map &= mv;
}
- /* cross check port_map and cap.n_ports */
- if (port_map) {
- int map_ports = 0;
-
- for (i = 0; i < AHCI_MAX_PORTS; i++)
- if (port_map & (1 << i))
- map_ports++;
-
- /* If PI has more ports than n_ports, whine, clear
- * port_map and let it be generated from n_ports.
- */
- if (map_ports > ahci_nr_ports(cap)) {
- dev_printk(KERN_WARNING, &pdev->dev,
- "implemented port map (0x%x) contains more "
- "ports than nr_ports (%u), using nr_ports\n",
- port_map, ahci_nr_ports(cap));
- port_map = 0;
- }
- }
-
- /* fabricate port_map from cap.nr_ports */
- if (!port_map) {
- port_map = (1 << ahci_nr_ports(cap)) - 1;
- dev_printk(KERN_WARNING, &pdev->dev,
- "forcing PORTS_IMPL to 0x%x\n", port_map);
-
- /* write the fixed up value to the PI register */
- hpriv->saved_port_map = port_map;
- }
-
- /* record values to use during operation */
- hpriv->cap = cap;
- hpriv->cap2 = cap2;
- hpriv->port_map = port_map;
-}
-
-/**
- * ahci_restore_initial_config - Restore initial config
- * @host: target ATA host
- *
- * Restore initial config stored by ahci_save_initial_config().
- *
- * LOCKING:
- * None.
- */
-static void ahci_restore_initial_config(struct ata_host *host)
-{
- struct ahci_host_priv *hpriv = host->private_data;
- void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
-
- writel(hpriv->saved_cap, mmio + HOST_CAP);
- if (hpriv->saved_cap2)
- writel(hpriv->saved_cap2, mmio + HOST_CAP2);
- writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
- (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
-}
-
-static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
-{
- static const int offset[] = {
- [SCR_STATUS] = PORT_SCR_STAT,
- [SCR_CONTROL] = PORT_SCR_CTL,
- [SCR_ERROR] = PORT_SCR_ERR,
- [SCR_ACTIVE] = PORT_SCR_ACT,
- [SCR_NOTIFICATION] = PORT_SCR_NTF,
- };
- struct ahci_host_priv *hpriv = ap->host->private_data;
-
- if (sc_reg < ARRAY_SIZE(offset) &&
- (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
- return offset[sc_reg];
- return 0;
-}
-
-static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
-{
- void __iomem *port_mmio = ahci_port_base(link->ap);
- int offset = ahci_scr_offset(link->ap, sc_reg);
-
- if (offset) {
- *val = readl(port_mmio + offset);
- return 0;
- }
- return -EINVAL;
-}
-
-static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
-{
- void __iomem *port_mmio = ahci_port_base(link->ap);
- int offset = ahci_scr_offset(link->ap, sc_reg);
-
- if (offset) {
- writel(val, port_mmio + offset);
- return 0;
- }
- return -EINVAL;
-}
-
-static void ahci_start_engine(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 tmp;
-
- /* start DMA */
- tmp = readl(port_mmio + PORT_CMD);
- tmp |= PORT_CMD_START;
- writel(tmp, port_mmio + PORT_CMD);
- readl(port_mmio + PORT_CMD); /* flush */
-}
-
-static int ahci_stop_engine(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 tmp;
-
- tmp = readl(port_mmio + PORT_CMD);
-
- /* check if the HBA is idle */
- if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
- return 0;
-
- /* setting HBA to idle */
- tmp &= ~PORT_CMD_START;
- writel(tmp, port_mmio + PORT_CMD);
-
- /* wait for engine to stop. This could be as long as 500 msec */
- tmp = ata_wait_register(port_mmio + PORT_CMD,
- PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
- if (tmp & PORT_CMD_LIST_ON)
- return -EIO;
-
- return 0;
-}
-
-static void ahci_start_fis_rx(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- struct ahci_host_priv *hpriv = ap->host->private_data;
- struct ahci_port_priv *pp = ap->private_data;
- u32 tmp;
-
- /* set FIS registers */
- if (hpriv->cap & HOST_CAP_64)
- writel((pp->cmd_slot_dma >> 16) >> 16,
- port_mmio + PORT_LST_ADDR_HI);
- writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
-
- if (hpriv->cap & HOST_CAP_64)
- writel((pp->rx_fis_dma >> 16) >> 16,
- port_mmio + PORT_FIS_ADDR_HI);
- writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
-
- /* enable FIS reception */
- tmp = readl(port_mmio + PORT_CMD);
- tmp |= PORT_CMD_FIS_RX;
- writel(tmp, port_mmio + PORT_CMD);
-
- /* flush */
- readl(port_mmio + PORT_CMD);
+ ahci_save_initial_config(&pdev->dev, hpriv, force_port_map,
+ mask_port_map);
}
-static int ahci_stop_fis_rx(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 tmp;
-
- /* disable FIS reception */
- tmp = readl(port_mmio + PORT_CMD);
- tmp &= ~PORT_CMD_FIS_RX;
- writel(tmp, port_mmio + PORT_CMD);
-
- /* wait for completion, spec says 500ms, give it 1000 */
- tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
- PORT_CMD_FIS_ON, 10, 1000);
- if (tmp & PORT_CMD_FIS_ON)
- return -EBUSY;
-
- return 0;
-}
-
-static void ahci_power_up(struct ata_port *ap)
-{
- struct ahci_host_priv *hpriv = ap->host->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 cmd;
-
- cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
-
- /* spin up device */
- if (hpriv->cap & HOST_CAP_SSS) {
- cmd |= PORT_CMD_SPIN_UP;
- writel(cmd, port_mmio + PORT_CMD);
- }
-
- /* wake up link */
- writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
-}
-
-static void ahci_disable_alpm(struct ata_port *ap)
-{
- struct ahci_host_priv *hpriv = ap->host->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 cmd;
- struct ahci_port_priv *pp = ap->private_data;
-
- /* IPM bits should be disabled by libata-core */
- /* get the existing command bits */
- cmd = readl(port_mmio + PORT_CMD);
-
- /* disable ALPM and ASP */
- cmd &= ~PORT_CMD_ASP;
- cmd &= ~PORT_CMD_ALPE;
-
- /* force the interface back to active */
- cmd |= PORT_CMD_ICC_ACTIVE;
-
- /* write out new cmd value */
- writel(cmd, port_mmio + PORT_CMD);
- cmd = readl(port_mmio + PORT_CMD);
-
- /* wait 10ms to be sure we've come out of any low power state */
- msleep(10);
-
- /* clear out any PhyRdy stuff from interrupt status */
- writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
-
- /* go ahead and clean out PhyRdy Change from Serror too */
- ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
-
- /*
- * Clear flag to indicate that we should ignore all PhyRdy
- * state changes
- */
- hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
-
- /*
- * Enable interrupts on Phy Ready.
- */
- pp->intr_mask |= PORT_IRQ_PHYRDY;
- writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
-
- /*
- * don't change the link pm policy - we can be called
- * just to turn of link pm temporarily
- */
-}
-
-static int ahci_enable_alpm(struct ata_port *ap,
- enum link_pm policy)
-{
- struct ahci_host_priv *hpriv = ap->host->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 cmd;
- struct ahci_port_priv *pp = ap->private_data;
- u32 asp;
-
- /* Make sure the host is capable of link power management */
- if (!(hpriv->cap & HOST_CAP_ALPM))
- return -EINVAL;
-
- switch (policy) {
- case MAX_PERFORMANCE:
- case NOT_AVAILABLE:
- /*
- * if we came here with NOT_AVAILABLE,
- * it just means this is the first time we
- * have tried to enable - default to max performance,
- * and let the user go to lower power modes on request.
- */
- ahci_disable_alpm(ap);
- return 0;
- case MIN_POWER:
- /* configure HBA to enter SLUMBER */
- asp = PORT_CMD_ASP;
- break;
- case MEDIUM_POWER:
- /* configure HBA to enter PARTIAL */
- asp = 0;
- break;
- default:
- return -EINVAL;
- }
-
- /*
- * Disable interrupts on Phy Ready. This keeps us from
- * getting woken up due to spurious phy ready interrupts
- * TBD - Hot plug should be done via polling now, is
- * that even supported?
- */
- pp->intr_mask &= ~PORT_IRQ_PHYRDY;
- writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
-
- /*
- * Set a flag to indicate that we should ignore all PhyRdy
- * state changes since these can happen now whenever we
- * change link state
- */
- hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
-
- /* get the existing command bits */
- cmd = readl(port_mmio + PORT_CMD);
-
- /*
- * Set ASP based on Policy
- */
- cmd |= asp;
-
- /*
- * Setting this bit will instruct the HBA to aggressively
- * enter a lower power link state when it's appropriate and
- * based on the value set above for ASP
- */
- cmd |= PORT_CMD_ALPE;
-
- /* write out new cmd value */
- writel(cmd, port_mmio + PORT_CMD);
- cmd = readl(port_mmio + PORT_CMD);
-
- /* IPM bits should be set by libata-core */
- return 0;
-}
-
-#ifdef CONFIG_PM
-static void ahci_power_down(struct ata_port *ap)
-{
- struct ahci_host_priv *hpriv = ap->host->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 cmd, scontrol;
-
- if (!(hpriv->cap & HOST_CAP_SSS))
- return;
-
- /* put device into listen mode, first set PxSCTL.DET to 0 */
- scontrol = readl(port_mmio + PORT_SCR_CTL);
- scontrol &= ~0xf;
- writel(scontrol, port_mmio + PORT_SCR_CTL);
-
- /* then set PxCMD.SUD to 0 */
- cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
- cmd &= ~PORT_CMD_SPIN_UP;
- writel(cmd, port_mmio + PORT_CMD);
-}
-#endif
-
-static void ahci_start_port(struct ata_port *ap)
-{
- struct ahci_port_priv *pp = ap->private_data;
- struct ata_link *link;
- struct ahci_em_priv *emp;
- ssize_t rc;
- int i;
-
- /* enable FIS reception */
- ahci_start_fis_rx(ap);
-
- /* enable DMA */
- ahci_start_engine(ap);
-
- /* turn on LEDs */
- if (ap->flags & ATA_FLAG_EM) {
- ata_for_each_link(link, ap, EDGE) {
- emp = &pp->em_priv[link->pmp];
-
- /* EM Transmit bit maybe busy during init */
- for (i = 0; i < EM_MAX_RETRY; i++) {
- rc = ahci_transmit_led_message(ap,
- emp->led_state,
- 4);
- if (rc == -EBUSY)
- msleep(1);
- else
- break;
- }
- }
- }
-
- if (ap->flags & ATA_FLAG_SW_ACTIVITY)
- ata_for_each_link(link, ap, EDGE)
- ahci_init_sw_activity(link);
-
-}
-
-static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
-{
- int rc;
-
- /* disable DMA */
- rc = ahci_stop_engine(ap);
- if (rc) {
- *emsg = "failed to stop engine";
- return rc;
- }
-
- /* disable FIS reception */
- rc = ahci_stop_fis_rx(ap);
- if (rc) {
- *emsg = "failed stop FIS RX";
- return rc;
- }
-
- return 0;
-}
-
-static int ahci_reset_controller(struct ata_host *host)
+static int ahci_pci_reset_controller(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
- struct ahci_host_priv *hpriv = host->private_data;
- void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
- u32 tmp;
- /* we must be in AHCI mode, before using anything
- * AHCI-specific, such as HOST_RESET.
- */
- ahci_enable_ahci(mmio);
-
- /* global controller reset */
- if (!ahci_skip_host_reset) {
- tmp = readl(mmio + HOST_CTL);
- if ((tmp & HOST_RESET) == 0) {
- writel(tmp | HOST_RESET, mmio + HOST_CTL);
- readl(mmio + HOST_CTL); /* flush */
- }
-
- /*
- * to perform host reset, OS should set HOST_RESET
- * and poll until this bit is read to be "0".
- * reset must complete within 1 second, or
- * the hardware should be considered fried.
- */
- tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
- HOST_RESET, 10, 1000);
-
- if (tmp & HOST_RESET) {
- dev_printk(KERN_ERR, host->dev,
- "controller reset failed (0x%x)\n", tmp);
- return -EIO;
- }
-
- /* turn on AHCI mode */
- ahci_enable_ahci(mmio);
-
- /* Some registers might be cleared on reset. Restore
- * initial values.
- */
- ahci_restore_initial_config(host);
- } else
- dev_printk(KERN_INFO, host->dev,
- "skipping global host reset\n");
+ ahci_reset_controller(host);
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
+ struct ahci_host_priv *hpriv = host->private_data;
u16 tmp16;
/* configure PCS */
@@ -1397,267 +444,10 @@ static int ahci_reset_controller(struct ata_host *host)
return 0;
}
-static void ahci_sw_activity(struct ata_link *link)
-{
- struct ata_port *ap = link->ap;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
-
- if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
- return;
-
- emp->activity++;
- if (!timer_pending(&emp->timer))
- mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
-}
-
-static void ahci_sw_activity_blink(unsigned long arg)
-{
- struct ata_link *link = (struct ata_link *)arg;
- struct ata_port *ap = link->ap;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
- unsigned long led_message = emp->led_state;
- u32 activity_led_state;
- unsigned long flags;
-
- led_message &= EM_MSG_LED_VALUE;
- led_message |= ap->port_no | (link->pmp << 8);
-
- /* check to see if we've had activity. If so,
- * toggle state of LED and reset timer. If not,
- * turn LED to desired idle state.
- */
- spin_lock_irqsave(ap->lock, flags);
- if (emp->saved_activity != emp->activity) {
- emp->saved_activity = emp->activity;
- /* get the current LED state */
- activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
-
- if (activity_led_state)
- activity_led_state = 0;
- else
- activity_led_state = 1;
-
- /* clear old state */
- led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
-
- /* toggle state */
- led_message |= (activity_led_state << 16);
- mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
- } else {
- /* switch to idle */
- led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
- if (emp->blink_policy == BLINK_OFF)
- led_message |= (1 << 16);
- }
- spin_unlock_irqrestore(ap->lock, flags);
- ahci_transmit_led_message(ap, led_message, 4);
-}
-
-static void ahci_init_sw_activity(struct ata_link *link)
-{
- struct ata_port *ap = link->ap;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
-
- /* init activity stats, setup timer */
- emp->saved_activity = emp->activity = 0;
- setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
-
- /* check our blink policy and set flag for link if it's enabled */
- if (emp->blink_policy)
- link->flags |= ATA_LFLAG_SW_ACTIVITY;
-}
-
-static int ahci_reset_em(struct ata_host *host)
-{
- void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
- u32 em_ctl;
-
- em_ctl = readl(mmio + HOST_EM_CTL);
- if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
- return -EINVAL;
-
- writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
- return 0;
-}
-
-static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
- ssize_t size)
-{
- struct ahci_host_priv *hpriv = ap->host->private_data;
- struct ahci_port_priv *pp = ap->private_data;
- void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
- u32 em_ctl;
- u32 message[] = {0, 0};
- unsigned long flags;
- int pmp;
- struct ahci_em_priv *emp;
-
- /* get the slot number from the message */
- pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
- if (pmp < EM_MAX_SLOTS)
- emp = &pp->em_priv[pmp];
- else
- return -EINVAL;
-
- spin_lock_irqsave(ap->lock, flags);
-
- /*
- * if we are still busy transmitting a previous message,
- * do not allow
- */
- em_ctl = readl(mmio + HOST_EM_CTL);
- if (em_ctl & EM_CTL_TM) {
- spin_unlock_irqrestore(ap->lock, flags);
- return -EBUSY;
- }
-
- /*
- * create message header - this is all zero except for
- * the message size, which is 4 bytes.
- */
- message[0] |= (4 << 8);
-
- /* ignore 0:4 of byte zero, fill in port info yourself */
- message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
-
- /* write message to EM_LOC */
- writel(message[0], mmio + hpriv->em_loc);
- writel(message[1], mmio + hpriv->em_loc+4);
-
- /* save off new led state for port/slot */
- emp->led_state = state;
-
- /*
- * tell hardware to transmit the message
- */
- writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
-
- spin_unlock_irqrestore(ap->lock, flags);
- return size;
-}
-
-static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
-{
- struct ahci_port_priv *pp = ap->private_data;
- struct ata_link *link;
- struct ahci_em_priv *emp;
- int rc = 0;
-
- ata_for_each_link(link, ap, EDGE) {
- emp = &pp->em_priv[link->pmp];
- rc += sprintf(buf, "%lx\n", emp->led_state);
- }
- return rc;
-}
-
-static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
- size_t size)
-{
- int state;
- int pmp;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_em_priv *emp;
-
- state = simple_strtoul(buf, NULL, 0);
-
- /* get the slot number from the message */
- pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
- if (pmp < EM_MAX_SLOTS)
- emp = &pp->em_priv[pmp];
- else
- return -EINVAL;
-
- /* mask off the activity bits if we are in sw_activity
- * mode, user should turn off sw_activity before setting
- * activity led through em_message
- */
- if (emp->blink_policy)
- state &= ~EM_MSG_LED_VALUE_ACTIVITY;
-
- return ahci_transmit_led_message(ap, state, size);
-}
-
-static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
-{
- struct ata_link *link = dev->link;
- struct ata_port *ap = link->ap;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
- u32 port_led_state = emp->led_state;
-
- /* save the desired Activity LED behavior */
- if (val == OFF) {
- /* clear LFLAG */
- link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
-
- /* set the LED to OFF */
- port_led_state &= EM_MSG_LED_VALUE_OFF;
- port_led_state |= (ap->port_no | (link->pmp << 8));
- ahci_transmit_led_message(ap, port_led_state, 4);
- } else {
- link->flags |= ATA_LFLAG_SW_ACTIVITY;
- if (val == BLINK_OFF) {
- /* set LED to ON for idle */
- port_led_state &= EM_MSG_LED_VALUE_OFF;
- port_led_state |= (ap->port_no | (link->pmp << 8));
- port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
- ahci_transmit_led_message(ap, port_led_state, 4);
- }
- }
- emp->blink_policy = val;
- return 0;
-}
-
-static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
-{
- struct ata_link *link = dev->link;
- struct ata_port *ap = link->ap;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
-
- /* display the saved value of activity behavior for this
- * disk.
- */
- return sprintf(buf, "%d\n", emp->blink_policy);
-}
-
-static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
- int port_no, void __iomem *mmio,
- void __iomem *port_mmio)
-{
- const char *emsg = NULL;
- int rc;
- u32 tmp;
-
- /* make sure port is not active */
- rc = ahci_deinit_port(ap, &emsg);
- if (rc)
- dev_printk(KERN_WARNING, &pdev->dev,
- "%s (%d)\n", emsg, rc);
-
- /* clear SError */
- tmp = readl(port_mmio + PORT_SCR_ERR);
- VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
- writel(tmp, port_mmio + PORT_SCR_ERR);
-
- /* clear port IRQ */
- tmp = readl(port_mmio + PORT_IRQ_STAT);
- VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
- if (tmp)
- writel(tmp, port_mmio + PORT_IRQ_STAT);
-
- writel(1 << port_no, mmio + HOST_IRQ_STAT);
-}
-
-static void ahci_init_controller(struct ata_host *host)
+static void ahci_pci_init_controller(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
struct pci_dev *pdev = to_pci_dev(host->dev);
- void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
- int i;
void __iomem *port_mmio;
u32 tmp;
int mv;
@@ -1678,220 +468,7 @@ static void ahci_init_controller(struct ata_host *host)
writel(tmp, port_mmio + PORT_IRQ_STAT);
}
- for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap = host->ports[i];
-
- port_mmio = ahci_port_base(ap);
- if (ata_port_is_dummy(ap))
- continue;
-
- ahci_port_init(pdev, ap, i, mmio, port_mmio);
- }
-
- tmp = readl(mmio + HOST_CTL);
- VPRINTK("HOST_CTL 0x%x\n", tmp);
- writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
- tmp = readl(mmio + HOST_CTL);
- VPRINTK("HOST_CTL 0x%x\n", tmp);
-}
-
-static void ahci_dev_config(struct ata_device *dev)
-{
- struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
-
- if (hpriv->flags & AHCI_HFLAG_SECT255) {
- dev->max_sectors = 255;
- ata_dev_printk(dev, KERN_INFO,
- "SB600 AHCI: limiting to 255 sectors per cmd\n");
- }
-}
-
-static unsigned int ahci_dev_classify(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- struct ata_taskfile tf;
- u32 tmp;
-
- tmp = readl(port_mmio + PORT_SIG);
- tf.lbah = (tmp >> 24) & 0xff;
- tf.lbam = (tmp >> 16) & 0xff;
- tf.lbal = (tmp >> 8) & 0xff;
- tf.nsect = (tmp) & 0xff;
-
- return ata_dev_classify(&tf);
-}
-
-static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
- u32 opts)
-{
- dma_addr_t cmd_tbl_dma;
-
- cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
-
- pp->cmd_slot[tag].opts = cpu_to_le32(opts);
- pp->cmd_slot[tag].status = 0;
- pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
- pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
-}
-
-static int ahci_kick_engine(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- struct ahci_host_priv *hpriv = ap->host->private_data;
- u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
- u32 tmp;
- int busy, rc;
-
- /* stop engine */
- rc = ahci_stop_engine(ap);
- if (rc)
- goto out_restart;
-
- /* need to do CLO?
- * always do CLO if PMP is attached (AHCI-1.3 9.2)
- */
- busy = status & (ATA_BUSY | ATA_DRQ);
- if (!busy && !sata_pmp_attached(ap)) {
- rc = 0;
- goto out_restart;
- }
-
- if (!(hpriv->cap & HOST_CAP_CLO)) {
- rc = -EOPNOTSUPP;
- goto out_restart;
- }
-
- /* perform CLO */
- tmp = readl(port_mmio + PORT_CMD);
- tmp |= PORT_CMD_CLO;
- writel(tmp, port_mmio + PORT_CMD);
-
- rc = 0;
- tmp = ata_wait_register(port_mmio + PORT_CMD,
- PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
- if (tmp & PORT_CMD_CLO)
- rc = -EIO;
-
- /* restart engine */
- out_restart:
- ahci_start_engine(ap);
- return rc;
-}
-
-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
- struct ata_taskfile *tf, int is_cmd, u16 flags,
- unsigned long timeout_msec)
-{
- const u32 cmd_fis_len = 5; /* five dwords */
- struct ahci_port_priv *pp = ap->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u8 *fis = pp->cmd_tbl;
- u32 tmp;
-
- /* prep the command */
- ata_tf_to_fis(tf, pmp, is_cmd, fis);
- ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
-
- /* issue & wait */
- writel(1, port_mmio + PORT_CMD_ISSUE);
-
- if (timeout_msec) {
- tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
- 1, timeout_msec);
- if (tmp & 0x1) {
- ahci_kick_engine(ap);
- return -EBUSY;
- }
- } else
- readl(port_mmio + PORT_CMD_ISSUE); /* flush */
-
- return 0;
-}
-
-static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
- int pmp, unsigned long deadline,
- int (*check_ready)(struct ata_link *link))
-{
- struct ata_port *ap = link->ap;
- struct ahci_host_priv *hpriv = ap->host->private_data;
- const char *reason = NULL;
- unsigned long now, msecs;
- struct ata_taskfile tf;
- int rc;
-
- DPRINTK("ENTER\n");
-
- /* prepare for SRST (AHCI-1.1 10.4.1) */
- rc = ahci_kick_engine(ap);
- if (rc && rc != -EOPNOTSUPP)
- ata_link_printk(link, KERN_WARNING,
- "failed to reset engine (errno=%d)\n", rc);
-
- ata_tf_init(link->device, &tf);
-
- /* issue the first D2H Register FIS */
- msecs = 0;
- now = jiffies;
- if (time_after(now, deadline))
- msecs = jiffies_to_msecs(deadline - now);
-
- tf.ctl |= ATA_SRST;
- if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
- AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
- rc = -EIO;
- reason = "1st FIS failed";
- goto fail;
- }
-
- /* spec says at least 5us, but be generous and sleep for 1ms */
- msleep(1);
-
- /* issue the second D2H Register FIS */
- tf.ctl &= ~ATA_SRST;
- ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
-
- /* wait for link to become ready */
- rc = ata_wait_after_reset(link, deadline, check_ready);
- if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
- /*
- * Workaround for cases where link online status can't
- * be trusted. Treat device readiness timeout as link
- * offline.
- */
- ata_link_printk(link, KERN_INFO,
- "device not ready, treating as offline\n");
- *class = ATA_DEV_NONE;
- } else if (rc) {
- /* link occupied, -ENODEV too is an error */
- reason = "device not ready";
- goto fail;
- } else
- *class = ahci_dev_classify(ap);
-
- DPRINTK("EXIT, class=%u\n", *class);
- return 0;
-
- fail:
- ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
- return rc;
-}
-
-static int ahci_check_ready(struct ata_link *link)
-{
- void __iomem *port_mmio = ahci_port_base(link->ap);
- u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
-
- return ata_check_ready(status);
-}
-
-static int ahci_softreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline)
-{
- int pmp = sata_srst_pmp(link);
-
- DPRINTK("ENTER\n");
-
- return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
+ ahci_init_controller(host);
}
static int ahci_sb600_check_ready(struct ata_link *link)
@@ -1943,38 +520,6 @@ static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
return rc;
}
-static int ahci_hardreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline)
-{
- const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
- struct ata_port *ap = link->ap;
- struct ahci_port_priv *pp = ap->private_data;
- u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
- struct ata_taskfile tf;
- bool online;
- int rc;
-
- DPRINTK("ENTER\n");
-
- ahci_stop_engine(ap);
-
- /* clear D2H reception area to properly wait for D2H FIS */
- ata_tf_init(link->device, &tf);
- tf.command = 0x80;
- ata_tf_to_fis(&tf, 0, 0, d2h_fis);
-
- rc = sata_link_hardreset(link, timing, deadline, &online,
- ahci_check_ready);
-
- ahci_start_engine(ap);
-
- if (online)
- *class = ahci_dev_classify(ap);
-
- DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
- return rc;
-}
-
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
@@ -2043,605 +588,12 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
return rc;
}
-static void ahci_postreset(struct ata_link *link, unsigned int *class)
-{
- struct ata_port *ap = link->ap;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 new_tmp, tmp;
-
- ata_std_postreset(link, class);
-
- /* Make sure port's ATAPI bit is set appropriately */
- new_tmp = tmp = readl(port_mmio + PORT_CMD);
- if (*class == ATA_DEV_ATAPI)
- new_tmp |= PORT_CMD_ATAPI;
- else
- new_tmp &= ~PORT_CMD_ATAPI;
- if (new_tmp != tmp) {
- writel(new_tmp, port_mmio + PORT_CMD);
- readl(port_mmio + PORT_CMD); /* flush */
- }
-}
-
-static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
-{
- struct scatterlist *sg;
- struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
- unsigned int si;
-
- VPRINTK("ENTER\n");
-
- /*
- * Next, the S/G list.
- */
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- dma_addr_t addr = sg_dma_address(sg);
- u32 sg_len = sg_dma_len(sg);
-
- ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
- ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
- ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
- }
-
- return si;
-}
-
-static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct ahci_port_priv *pp = ap->private_data;
-
- if (!sata_pmp_attached(ap) || pp->fbs_enabled)
- return ata_std_qc_defer(qc);
- else
- return sata_pmp_qc_defer_cmd_switch(qc);
-}
-
-static void ahci_qc_prep(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct ahci_port_priv *pp = ap->private_data;
- int is_atapi = ata_is_atapi(qc->tf.protocol);
- void *cmd_tbl;
- u32 opts;
- const u32 cmd_fis_len = 5; /* five dwords */
- unsigned int n_elem;
-
- /*
- * Fill in command table information. First, the header,
- * a SATA Register - Host to Device command FIS.
- */
- cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
-
- ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
- if (is_atapi) {
- memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
- memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
- }
-
- n_elem = 0;
- if (qc->flags & ATA_QCFLAG_DMAMAP)
- n_elem = ahci_fill_sg(qc, cmd_tbl);
-
- /*
- * Fill in command slot information.
- */
- opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
- if (qc->tf.flags & ATA_TFLAG_WRITE)
- opts |= AHCI_CMD_WRITE;
- if (is_atapi)
- opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
-
- ahci_fill_cmd_slot(pp, qc->tag, opts);
-}
-
-static void ahci_fbs_dec_intr(struct ata_port *ap)
-{
- struct ahci_port_priv *pp = ap->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 fbs = readl(port_mmio + PORT_FBS);
- int retries = 3;
-
- DPRINTK("ENTER\n");
- BUG_ON(!pp->fbs_enabled);
-
- /* time to wait for DEC is not specified by AHCI spec,
- * add a retry loop for safety.
- */
- writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS);
- fbs = readl(port_mmio + PORT_FBS);
- while ((fbs & PORT_FBS_DEC) && retries--) {
- udelay(1);
- fbs = readl(port_mmio + PORT_FBS);
- }
-
- if (fbs & PORT_FBS_DEC)
- dev_printk(KERN_ERR, ap->host->dev,
- "failed to clear device error\n");
-}
-
-static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
-{
- struct ahci_host_priv *hpriv = ap->host->private_data;
- struct ahci_port_priv *pp = ap->private_data;
- struct ata_eh_info *host_ehi = &ap->link.eh_info;
- struct ata_link *link = NULL;
- struct ata_queued_cmd *active_qc;
- struct ata_eh_info *active_ehi;
- bool fbs_need_dec = false;
- u32 serror;
-
- /* determine active link with error */
- if (pp->fbs_enabled) {
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 fbs = readl(port_mmio + PORT_FBS);
- int pmp = fbs >> PORT_FBS_DWE_OFFSET;
-
- if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) &&
- ata_link_online(&ap->pmp_link[pmp])) {
- link = &ap->pmp_link[pmp];
- fbs_need_dec = true;
- }
-
- } else
- ata_for_each_link(link, ap, EDGE)
- if (ata_link_active(link))
- break;
-
- if (!link)
- link = &ap->link;
-
- active_qc = ata_qc_from_tag(ap, link->active_tag);
- active_ehi = &link->eh_info;
-
- /* record irq stat */
- ata_ehi_clear_desc(host_ehi);
- ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
-
- /* AHCI needs SError cleared; otherwise, it might lock up */
- ahci_scr_read(&ap->link, SCR_ERROR, &serror);
- ahci_scr_write(&ap->link, SCR_ERROR, serror);
- host_ehi->serror |= serror;
-
- /* some controllers set IRQ_IF_ERR on device errors, ignore it */
- if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
- irq_stat &= ~PORT_IRQ_IF_ERR;
-
- if (irq_stat & PORT_IRQ_TF_ERR) {
- /* If qc is active, charge it; otherwise, the active
- * link. There's no active qc on NCQ errors. It will
- * be determined by EH by reading log page 10h.
- */
- if (active_qc)
- active_qc->err_mask |= AC_ERR_DEV;
- else
- active_ehi->err_mask |= AC_ERR_DEV;
-
- if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
- host_ehi->serror &= ~SERR_INTERNAL;
- }
-
- if (irq_stat & PORT_IRQ_UNK_FIS) {
- u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
-
- active_ehi->err_mask |= AC_ERR_HSM;
- active_ehi->action |= ATA_EH_RESET;
- ata_ehi_push_desc(active_ehi,
- "unknown FIS %08x %08x %08x %08x" ,
- unk[0], unk[1], unk[2], unk[3]);
- }
-
- if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
- active_ehi->err_mask |= AC_ERR_HSM;
- active_ehi->action |= ATA_EH_RESET;
- ata_ehi_push_desc(active_ehi, "incorrect PMP");
- }
-
- if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
- host_ehi->err_mask |= AC_ERR_HOST_BUS;
- host_ehi->action |= ATA_EH_RESET;
- ata_ehi_push_desc(host_ehi, "host bus error");
- }
-
- if (irq_stat & PORT_IRQ_IF_ERR) {
- if (fbs_need_dec)
- active_ehi->err_mask |= AC_ERR_DEV;
- else {
- host_ehi->err_mask |= AC_ERR_ATA_BUS;
- host_ehi->action |= ATA_EH_RESET;
- }
-
- ata_ehi_push_desc(host_ehi, "interface fatal error");
- }
-
- if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
- ata_ehi_hotplugged(host_ehi);
- ata_ehi_push_desc(host_ehi, "%s",
- irq_stat & PORT_IRQ_CONNECT ?
- "connection status changed" : "PHY RDY changed");
- }
-
- /* okay, let's hand over to EH */
-
- if (irq_stat & PORT_IRQ_FREEZE)
- ata_port_freeze(ap);
- else if (fbs_need_dec) {
- ata_link_abort(link);
- ahci_fbs_dec_intr(ap);
- } else
- ata_port_abort(ap);
-}
-
-static void ahci_port_intr(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- struct ata_eh_info *ehi = &ap->link.eh_info;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_host_priv *hpriv = ap->host->private_data;
- int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
- u32 status, qc_active = 0;
- int rc;
-
- status = readl(port_mmio + PORT_IRQ_STAT);
- writel(status, port_mmio + PORT_IRQ_STAT);
-
- /* ignore BAD_PMP while resetting */
- if (unlikely(resetting))
- status &= ~PORT_IRQ_BAD_PMP;
-
- /* If we are getting PhyRdy, this is
- * just a power state change, we should
- * clear out this, plus the PhyRdy/Comm
- * Wake bits from Serror
- */
- if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
- (status & PORT_IRQ_PHYRDY)) {
- status &= ~PORT_IRQ_PHYRDY;
- ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
- }
-
- if (unlikely(status & PORT_IRQ_ERROR)) {
- ahci_error_intr(ap, status);
- return;
- }
-
- if (status & PORT_IRQ_SDB_FIS) {
- /* If SNotification is available, leave notification
- * handling to sata_async_notification(). If not,
- * emulate it by snooping SDB FIS RX area.
- *
- * Snooping FIS RX area is probably cheaper than
- * poking SNotification but some constrollers which
- * implement SNotification, ICH9 for example, don't
- * store AN SDB FIS into receive area.
- */
- if (hpriv->cap & HOST_CAP_SNTF)
- sata_async_notification(ap);
- else {
- /* If the 'N' bit in word 0 of the FIS is set,
- * we just received asynchronous notification.
- * Tell libata about it.
- *
- * Lack of SNotification should not appear in
- * ahci 1.2, so the workaround is unnecessary
- * when FBS is enabled.
- */
- if (pp->fbs_enabled)
- WARN_ON_ONCE(1);
- else {
- const __le32 *f = pp->rx_fis + RX_FIS_SDB;
- u32 f0 = le32_to_cpu(f[0]);
- if (f0 & (1 << 15))
- sata_async_notification(ap);
- }
- }
- }
-
- /* pp->active_link is not reliable once FBS is enabled, both
- * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
- * NCQ and non-NCQ commands may be in flight at the same time.
- */
- if (pp->fbs_enabled) {
- if (ap->qc_active) {
- qc_active = readl(port_mmio + PORT_SCR_ACT);
- qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
- }
- } else {
- /* pp->active_link is valid iff any command is in flight */
- if (ap->qc_active && pp->active_link->sactive)
- qc_active = readl(port_mmio + PORT_SCR_ACT);
- else
- qc_active = readl(port_mmio + PORT_CMD_ISSUE);
- }
-
- rc = ata_qc_complete_multiple(ap, qc_active);
-
- /* while resetting, invalid completions are expected */
- if (unlikely(rc < 0 && !resetting)) {
- ehi->err_mask |= AC_ERR_HSM;
- ehi->action |= ATA_EH_RESET;
- ata_port_freeze(ap);
- }
-}
-
-static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
-{
- struct ata_host *host = dev_instance;
- struct ahci_host_priv *hpriv;
- unsigned int i, handled = 0;
- void __iomem *mmio;
- u32 irq_stat, irq_masked;
-
- VPRINTK("ENTER\n");
-
- hpriv = host->private_data;
- mmio = host->iomap[AHCI_PCI_BAR];
-
- /* sigh. 0xffffffff is a valid return from h/w */
- irq_stat = readl(mmio + HOST_IRQ_STAT);
- if (!irq_stat)
- return IRQ_NONE;
-
- irq_masked = irq_stat & hpriv->port_map;
-
- spin_lock(&host->lock);
-
- for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap;
-
- if (!(irq_masked & (1 << i)))
- continue;
-
- ap = host->ports[i];
- if (ap) {
- ahci_port_intr(ap);
- VPRINTK("port %u\n", i);
- } else {
- VPRINTK("port %u (no irq)\n", i);
- if (ata_ratelimit())
- dev_printk(KERN_WARNING, host->dev,
- "interrupt on disabled port %u\n", i);
- }
-
- handled = 1;
- }
-
- /* HOST_IRQ_STAT behaves as level triggered latch meaning that
- * it should be cleared after all the port events are cleared;
- * otherwise, it will raise a spurious interrupt after each
- * valid one. Please read section 10.6.2 of ahci 1.1 for more
- * information.
- *
- * Also, use the unmasked value to clear interrupt as spurious
- * pending event on a dummy port might cause screaming IRQ.
- */
- writel(irq_stat, mmio + HOST_IRQ_STAT);
-
- spin_unlock(&host->lock);
-
- VPRINTK("EXIT\n");
-
- return IRQ_RETVAL(handled);
-}
-
-static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- void __iomem *port_mmio = ahci_port_base(ap);
- struct ahci_port_priv *pp = ap->private_data;
-
- /* Keep track of the currently active link. It will be used
- * in completion path to determine whether NCQ phase is in
- * progress.
- */
- pp->active_link = qc->dev->link;
-
- if (qc->tf.protocol == ATA_PROT_NCQ)
- writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
-
- if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
- u32 fbs = readl(port_mmio + PORT_FBS);
- fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
- fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
- writel(fbs, port_mmio + PORT_FBS);
- pp->fbs_last_dev = qc->dev->link->pmp;
- }
-
- writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
-
- ahci_sw_activity(qc->dev->link);
-
- return 0;
-}
-
-static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
-{
- struct ahci_port_priv *pp = qc->ap->private_data;
- u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
-
- if (pp->fbs_enabled)
- d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
-
- ata_tf_from_fis(d2h_fis, &qc->result_tf);
- return true;
-}
-
-static void ahci_freeze(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
-
- /* turn IRQ off */
- writel(0, port_mmio + PORT_IRQ_MASK);
-}
-
-static void ahci_thaw(struct ata_port *ap)
-{
- void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 tmp;
- struct ahci_port_priv *pp = ap->private_data;
-
- /* clear IRQ */
- tmp = readl(port_mmio + PORT_IRQ_STAT);
- writel(tmp, port_mmio + PORT_IRQ_STAT);
- writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
-
- /* turn IRQ back on */
- writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
-}
-
-static void ahci_error_handler(struct ata_port *ap)
-{
- if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
- /* restart engine */
- ahci_stop_engine(ap);
- ahci_start_engine(ap);
- }
-
- sata_pmp_error_handler(ap);
-}
-
-static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
-
- /* make DMA engine forget about the failed command */
- if (qc->flags & ATA_QCFLAG_FAILED)
- ahci_kick_engine(ap);
-}
-
-static void ahci_enable_fbs(struct ata_port *ap)
-{
- struct ahci_port_priv *pp = ap->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 fbs;
- int rc;
-
- if (!pp->fbs_supported)
- return;
-
- fbs = readl(port_mmio + PORT_FBS);
- if (fbs & PORT_FBS_EN) {
- pp->fbs_enabled = true;
- pp->fbs_last_dev = -1; /* initialization */
- return;
- }
-
- rc = ahci_stop_engine(ap);
- if (rc)
- return;
-
- writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
- fbs = readl(port_mmio + PORT_FBS);
- if (fbs & PORT_FBS_EN) {
- dev_printk(KERN_INFO, ap->host->dev, "FBS is enabled.\n");
- pp->fbs_enabled = true;
- pp->fbs_last_dev = -1; /* initialization */
- } else
- dev_printk(KERN_ERR, ap->host->dev, "Failed to enable FBS\n");
-
- ahci_start_engine(ap);
-}
-
-static void ahci_disable_fbs(struct ata_port *ap)
-{
- struct ahci_port_priv *pp = ap->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 fbs;
- int rc;
-
- if (!pp->fbs_supported)
- return;
-
- fbs = readl(port_mmio + PORT_FBS);
- if ((fbs & PORT_FBS_EN) == 0) {
- pp->fbs_enabled = false;
- return;
- }
-
- rc = ahci_stop_engine(ap);
- if (rc)
- return;
-
- writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS);
- fbs = readl(port_mmio + PORT_FBS);
- if (fbs & PORT_FBS_EN)
- dev_printk(KERN_ERR, ap->host->dev, "Failed to disable FBS\n");
- else {
- dev_printk(KERN_INFO, ap->host->dev, "FBS is disabled.\n");
- pp->fbs_enabled = false;
- }
-
- ahci_start_engine(ap);
-}
-
-static void ahci_pmp_attach(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- struct ahci_port_priv *pp = ap->private_data;
- u32 cmd;
-
- cmd = readl(port_mmio + PORT_CMD);
- cmd |= PORT_CMD_PMP;
- writel(cmd, port_mmio + PORT_CMD);
-
- ahci_enable_fbs(ap);
-
- pp->intr_mask |= PORT_IRQ_BAD_PMP;
- writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
-}
-
-static void ahci_pmp_detach(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- struct ahci_port_priv *pp = ap->private_data;
- u32 cmd;
-
- ahci_disable_fbs(ap);
-
- cmd = readl(port_mmio + PORT_CMD);
- cmd &= ~PORT_CMD_PMP;
- writel(cmd, port_mmio + PORT_CMD);
-
- pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
- writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
-}
-
-static int ahci_port_resume(struct ata_port *ap)
-{
- ahci_power_up(ap);
- ahci_start_port(ap);
-
- if (sata_pmp_attached(ap))
- ahci_pmp_attach(ap);
- else
- ahci_pmp_detach(ap);
-
- return 0;
-}
-
#ifdef CONFIG_PM
-static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
-{
- const char *emsg = NULL;
- int rc;
-
- rc = ahci_deinit_port(ap, &emsg);
- if (rc == 0)
- ahci_power_down(ap);
- else {
- ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
- ahci_start_port(ap);
- }
-
- return rc;
-}
-
static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
struct ahci_host_priv *hpriv = host->private_data;
- void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
+ void __iomem *mmio = hpriv->mmio;
u32 ctl;
if (mesg.event & PM_EVENT_SUSPEND &&
@@ -2675,11 +627,11 @@ static int ahci_pci_device_resume(struct pci_dev *pdev)
return rc;
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
- rc = ahci_reset_controller(host);
+ rc = ahci_pci_reset_controller(host);
if (rc)
return rc;
- ahci_init_controller(host);
+ ahci_pci_init_controller(host);
}
ata_host_resume(host);
@@ -2688,92 +640,6 @@ static int ahci_pci_device_resume(struct pci_dev *pdev)
}
#endif
-static int ahci_port_start(struct ata_port *ap)
-{
- struct ahci_host_priv *hpriv = ap->host->private_data;
- struct device *dev = ap->host->dev;
- struct ahci_port_priv *pp;
- void *mem;
- dma_addr_t mem_dma;
- size_t dma_sz, rx_fis_sz;
-
- pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
- if (!pp)
- return -ENOMEM;
-
- /* check FBS capability */
- if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 cmd = readl(port_mmio + PORT_CMD);
- if (cmd & PORT_CMD_FBSCP)
- pp->fbs_supported = true;
- else
- dev_printk(KERN_WARNING, dev,
- "The port is not capable of FBS\n");
- }
-
- if (pp->fbs_supported) {
- dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
- rx_fis_sz = AHCI_RX_FIS_SZ * 16;
- } else {
- dma_sz = AHCI_PORT_PRIV_DMA_SZ;
- rx_fis_sz = AHCI_RX_FIS_SZ;
- }
-
- mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
- if (!mem)
- return -ENOMEM;
- memset(mem, 0, dma_sz);
-
- /*
- * First item in chunk of DMA memory: 32-slot command table,
- * 32 bytes each in size
- */
- pp->cmd_slot = mem;
- pp->cmd_slot_dma = mem_dma;
-
- mem += AHCI_CMD_SLOT_SZ;
- mem_dma += AHCI_CMD_SLOT_SZ;
-
- /*
- * Second item: Received-FIS area
- */
- pp->rx_fis = mem;
- pp->rx_fis_dma = mem_dma;
-
- mem += rx_fis_sz;
- mem_dma += rx_fis_sz;
-
- /*
- * Third item: data area for storing a single command
- * and its scatter-gather table
- */
- pp->cmd_tbl = mem;
- pp->cmd_tbl_dma = mem_dma;
-
- /*
- * Save off initial list of interrupts to be enabled.
- * This could be changed later
- */
- pp->intr_mask = DEF_PORT_IRQ;
-
- ap->private_data = pp;
-
- /* engage engines, captain */
- return ahci_port_resume(ap);
-}
-
-static void ahci_port_stop(struct ata_port *ap)
-{
- const char *emsg = NULL;
- int rc;
-
- /* de-initialize port */
- rc = ahci_deinit_port(ap, &emsg);
- if (rc)
- ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
-}
-
static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
{
int rc;
@@ -2806,31 +672,12 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
return 0;
}
-static void ahci_print_info(struct ata_host *host)
+static void ahci_pci_print_info(struct ata_host *host)
{
- struct ahci_host_priv *hpriv = host->private_data;
struct pci_dev *pdev = to_pci_dev(host->dev);
- void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
- u32 vers, cap, cap2, impl, speed;
- const char *speed_s;
u16 cc;
const char *scc_s;
- vers = readl(mmio + HOST_VERSION);
- cap = hpriv->cap;
- cap2 = hpriv->cap2;
- impl = hpriv->port_map;
-
- speed = (cap >> 20) & 0xf;
- if (speed == 1)
- speed_s = "1.5";
- else if (speed == 2)
- speed_s = "3";
- else if (speed == 3)
- speed_s = "6";
- else
- speed_s = "?";
-
pci_read_config_word(pdev, 0x0a, &cc);
if (cc == PCI_CLASS_STORAGE_IDE)
scc_s = "IDE";
@@ -2841,50 +688,7 @@ static void ahci_print_info(struct ata_host *host)
else
scc_s = "unknown";
- dev_printk(KERN_INFO, &pdev->dev,
- "AHCI %02x%02x.%02x%02x "
- "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
- ,
-
- (vers >> 24) & 0xff,
- (vers >> 16) & 0xff,
- (vers >> 8) & 0xff,
- vers & 0xff,
-
- ((cap >> 8) & 0x1f) + 1,
- (cap & 0x1f) + 1,
- speed_s,
- impl,
- scc_s);
-
- dev_printk(KERN_INFO, &pdev->dev,
- "flags: "
- "%s%s%s%s%s%s%s"
- "%s%s%s%s%s%s%s"
- "%s%s%s%s%s%s\n"
- ,
-
- cap & HOST_CAP_64 ? "64bit " : "",
- cap & HOST_CAP_NCQ ? "ncq " : "",
- cap & HOST_CAP_SNTF ? "sntf " : "",
- cap & HOST_CAP_MPS ? "ilck " : "",
- cap & HOST_CAP_SSS ? "stag " : "",
- cap & HOST_CAP_ALPM ? "pm " : "",
- cap & HOST_CAP_LED ? "led " : "",
- cap & HOST_CAP_CLO ? "clo " : "",
- cap & HOST_CAP_ONLY ? "only " : "",
- cap & HOST_CAP_PMP ? "pmp " : "",
- cap & HOST_CAP_FBS ? "fbs " : "",
- cap & HOST_CAP_PIO_MULTI ? "pio " : "",
- cap & HOST_CAP_SSC ? "slum " : "",
- cap & HOST_CAP_PART ? "part " : "",
- cap & HOST_CAP_CCC ? "ccc " : "",
- cap & HOST_CAP_EMS ? "ems " : "",
- cap & HOST_CAP_SXS ? "sxs " : "",
- cap2 & HOST_CAP2_APST ? "apst " : "",
- cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
- cap2 & HOST_CAP2_BOH ? "boh " : ""
- );
+ ahci_print_info(host, scc_s);
}
/* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
@@ -3308,41 +1112,28 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
pci_intx(pdev, 1);
+ hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
+
/* save initial config */
- ahci_save_initial_config(pdev, hpriv);
+ ahci_pci_save_initial_config(pdev, hpriv);
/* prepare host */
if (hpriv->cap & HOST_CAP_NCQ) {
pi.flags |= ATA_FLAG_NCQ;
- /* Auto-activate optimization is supposed to be supported on
- all AHCI controllers indicating NCQ support, but it seems
- to be broken at least on some NVIDIA MCP79 chipsets.
- Until we get info on which NVIDIA chipsets don't have this
- issue, if any, disable AA on all NVIDIA AHCIs. */
- if (pdev->vendor != PCI_VENDOR_ID_NVIDIA)
+ /*
+ * Auto-activate optimization is supposed to be
+ * supported on all AHCI controllers indicating NCQ
+ * capability, but it seems to be broken on some
+ * chipsets including NVIDIAs.
+ */
+ if (!(hpriv->flags & AHCI_HFLAG_NO_FPDMA_AA))
pi.flags |= ATA_FLAG_FPDMA_AA;
}
if (hpriv->cap & HOST_CAP_PMP)
pi.flags |= ATA_FLAG_PMP;
- if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) {
- u8 messages;
- void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
- u32 em_loc = readl(mmio + HOST_EM_LOC);
- u32 em_ctl = readl(mmio + HOST_EM_CTL);
-
- messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
-
- /* we only support LED message type right now */
- if ((messages & 0x01) && (ahci_em_messages == 1)) {
- /* store em_loc */
- hpriv->em_loc = ((em_loc >> 16) * 4);
- pi.flags |= ATA_FLAG_EM;
- if (!(em_ctl & EM_CTL_ALHD))
- pi.flags |= ATA_FLAG_SW_ACTIVITY;
- }
- }
+ ahci_set_em_messages(hpriv, &pi);
if (ahci_broken_system_poweroff(pdev)) {
pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
@@ -3372,7 +1163,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
if (!host)
return -ENOMEM;
- host->iomap = pcim_iomap_table(pdev);
host->private_data = hpriv;
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
@@ -3395,7 +1185,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* set enclosure management message type */
if (ap->flags & ATA_FLAG_EM)
- ap->em_message_type = ahci_em_messages;
+ ap->em_message_type = hpriv->em_msg_type;
/* disabled/not-implemented port */
@@ -3414,12 +1204,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- rc = ahci_reset_controller(host);
+ rc = ahci_pci_reset_controller(host);
if (rc)
return rc;
- ahci_init_controller(host);
- ahci_print_info(host);
+ ahci_pci_init_controller(host);
+ ahci_pci_print_info(host);
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
new file mode 100644
index 00000000000..7113c572447
--- /dev/null
+++ b/drivers/ata/ahci.h
@@ -0,0 +1,343 @@
+/*
+ * ahci.h - Common AHCI SATA definitions and declarations
+ *
+ * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
+ *
+ * Copyright 2004-2005 Red Hat, Inc.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * AHCI hardware documentation:
+ * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
+ * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
+ *
+ */
+
+#ifndef _AHCI_H
+#define _AHCI_H
+
+#include <linux/libata.h>
+
+/* Enclosure Management Control */
+#define EM_CTRL_MSG_TYPE 0x000f0000
+
+/* Enclosure Management LED Message Type */
+#define EM_MSG_LED_HBA_PORT 0x0000000f
+#define EM_MSG_LED_PMP_SLOT 0x0000ff00
+#define EM_MSG_LED_VALUE 0xffff0000
+#define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
+#define EM_MSG_LED_VALUE_OFF 0xfff80000
+#define EM_MSG_LED_VALUE_ON 0x00010000
+
+enum {
+ AHCI_MAX_PORTS = 32,
+ AHCI_MAX_SG = 168, /* hardware max is 64K */
+ AHCI_DMA_BOUNDARY = 0xffffffff,
+ AHCI_MAX_CMDS = 32,
+ AHCI_CMD_SZ = 32,
+ AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
+ AHCI_RX_FIS_SZ = 256,
+ AHCI_CMD_TBL_CDB = 0x40,
+ AHCI_CMD_TBL_HDR_SZ = 0x80,
+ AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
+ AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
+ AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
+ AHCI_RX_FIS_SZ,
+ AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ +
+ AHCI_CMD_TBL_AR_SZ +
+ (AHCI_RX_FIS_SZ * 16),
+ AHCI_IRQ_ON_SG = (1 << 31),
+ AHCI_CMD_ATAPI = (1 << 5),
+ AHCI_CMD_WRITE = (1 << 6),
+ AHCI_CMD_PREFETCH = (1 << 7),
+ AHCI_CMD_RESET = (1 << 8),
+ AHCI_CMD_CLR_BUSY = (1 << 10),
+
+ RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
+ RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
+ RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
+
+ /* global controller registers */
+ HOST_CAP = 0x00, /* host capabilities */
+ HOST_CTL = 0x04, /* global host control */
+ HOST_IRQ_STAT = 0x08, /* interrupt status */
+ HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
+ HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
+ HOST_EM_LOC = 0x1c, /* Enclosure Management location */
+ HOST_EM_CTL = 0x20, /* Enclosure Management Control */
+ HOST_CAP2 = 0x24, /* host capabilities, extended */
+
+ /* HOST_CTL bits */
+ HOST_RESET = (1 << 0), /* reset controller; self-clear */
+ HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
+ HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
+
+ /* HOST_CAP bits */
+ HOST_CAP_SXS = (1 << 5), /* Supports External SATA */
+ HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
+ HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */
+ HOST_CAP_PART = (1 << 13), /* Partial state capable */
+ HOST_CAP_SSC = (1 << 14), /* Slumber state capable */
+ HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */
+ HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */
+ HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
+ HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */
+ HOST_CAP_CLO = (1 << 24), /* Command List Override support */
+ HOST_CAP_LED = (1 << 25), /* Supports activity LED */
+ HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
+ HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
+ HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */
+ HOST_CAP_SNTF = (1 << 29), /* SNotification register */
+ HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
+ HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
+
+ /* HOST_CAP2 bits */
+ HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */
+ HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */
+ HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */
+
+ /* registers for each SATA port */
+ PORT_LST_ADDR = 0x00, /* command list DMA addr */
+ PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
+ PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
+ PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
+ PORT_IRQ_STAT = 0x10, /* interrupt status */
+ PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
+ PORT_CMD = 0x18, /* port command */
+ PORT_TFDATA = 0x20, /* taskfile data */
+ PORT_SIG = 0x24, /* device TF signature */
+ PORT_CMD_ISSUE = 0x38, /* command issue */
+ PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
+ PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
+ PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
+ PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
+ PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
+ PORT_FBS = 0x40, /* FIS-based Switching */
+
+ /* PORT_IRQ_{STAT,MASK} bits */
+ PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
+ PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
+ PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
+ PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
+ PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
+ PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
+ PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
+ PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
+
+ PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
+ PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
+ PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
+ PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
+ PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
+ PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
+ PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
+ PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
+ PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
+
+ PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
+ PORT_IRQ_IF_ERR |
+ PORT_IRQ_CONNECT |
+ PORT_IRQ_PHYRDY |
+ PORT_IRQ_UNK_FIS |
+ PORT_IRQ_BAD_PMP,
+ PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
+ PORT_IRQ_TF_ERR |
+ PORT_IRQ_HBUS_DATA_ERR,
+ DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
+ PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
+ PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
+
+ /* PORT_CMD bits */
+ PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
+ PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
+ PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
+ PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */
+ PORT_CMD_PMP = (1 << 17), /* PMP attached */
+ PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
+ PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
+ PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
+ PORT_CMD_CLO = (1 << 3), /* Command list override */
+ PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
+ PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
+ PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
+
+ PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
+ PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
+ PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
+ PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
+
+ PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */
+ PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */
+ PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */
+ PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */
+ PORT_FBS_SDE = (1 << 2), /* FBS single device error */
+ PORT_FBS_DEC = (1 << 1), /* FBS device error clear */
+ PORT_FBS_EN = (1 << 0), /* Enable FBS */
+
+ /* hpriv->flags bits */
+ AHCI_HFLAG_NO_NCQ = (1 << 0),
+ AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
+ AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
+ AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
+ AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
+ AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
+ AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
+ AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
+ AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
+ AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
+ AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
+ AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
+ link offline */
+ AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
+ AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */
+
+ /* ap->flags bits */
+
+ AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
+ ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
+ ATA_FLAG_IPM,
+
+ ICH_MAP = 0x90, /* ICH MAP register */
+
+ /* em constants */
+ EM_MAX_SLOTS = 8,
+ EM_MAX_RETRY = 5,
+
+ /* em_ctl bits */
+ EM_CTL_RST = (1 << 9), /* Reset */
+ EM_CTL_TM = (1 << 8), /* Transmit Message */
+ EM_CTL_MR = (1 << 0), /* Message Recieved */
+ EM_CTL_ALHD = (1 << 26), /* Activity LED */
+ EM_CTL_XMT = (1 << 25), /* Transmit Only */
+ EM_CTL_SMB = (1 << 24), /* Single Message Buffer */
+
+ /* em message type */
+ EM_MSG_TYPE_LED = (1 << 0), /* LED */
+ EM_MSG_TYPE_SAFTE = (1 << 1), /* SAF-TE */
+ EM_MSG_TYPE_SES2 = (1 << 2), /* SES-2 */
+ EM_MSG_TYPE_SGPIO = (1 << 3), /* SGPIO */
+};
+
+struct ahci_cmd_hdr {
+ __le32 opts;
+ __le32 status;
+ __le32 tbl_addr;
+ __le32 tbl_addr_hi;
+ __le32 reserved[4];
+};
+
+struct ahci_sg {
+ __le32 addr;
+ __le32 addr_hi;
+ __le32 reserved;
+ __le32 flags_size;
+};
+
+struct ahci_em_priv {
+ enum sw_activity blink_policy;
+ struct timer_list timer;
+ unsigned long saved_activity;
+ unsigned long activity;
+ unsigned long led_state;
+};
+
+struct ahci_port_priv {
+ struct ata_link *active_link;
+ struct ahci_cmd_hdr *cmd_slot;
+ dma_addr_t cmd_slot_dma;
+ void *cmd_tbl;
+ dma_addr_t cmd_tbl_dma;
+ void *rx_fis;
+ dma_addr_t rx_fis_dma;
+ /* for NCQ spurious interrupt analysis */
+ unsigned int ncq_saw_d2h:1;
+ unsigned int ncq_saw_dmas:1;
+ unsigned int ncq_saw_sdb:1;
+ u32 intr_mask; /* interrupts to enable */
+ bool fbs_supported; /* set iff FBS is supported */
+ bool fbs_enabled; /* set iff FBS is enabled */
+ int fbs_last_dev; /* save FBS.DEV of last FIS */
+ /* enclosure management info per PM slot */
+ struct ahci_em_priv em_priv[EM_MAX_SLOTS];
+};
+
+struct ahci_host_priv {
+ void __iomem * mmio; /* bus-independant mem map */
+ unsigned int flags; /* AHCI_HFLAG_* */
+ u32 cap; /* cap to use */
+ u32 cap2; /* cap2 to use */
+ u32 port_map; /* port map to use */
+ u32 saved_cap; /* saved initial cap */
+ u32 saved_cap2; /* saved initial cap2 */
+ u32 saved_port_map; /* saved initial port_map */
+ u32 em_loc; /* enclosure management location */
+ u32 em_buf_sz; /* EM buffer size in byte */
+ u32 em_msg_type; /* EM message type */
+};
+
+extern int ahci_ignore_sss;
+
+extern struct scsi_host_template ahci_sht;
+extern struct ata_port_operations ahci_ops;
+
+void ahci_save_initial_config(struct device *dev,
+ struct ahci_host_priv *hpriv,
+ unsigned int force_port_map,
+ unsigned int mask_port_map);
+void ahci_init_controller(struct ata_host *host);
+int ahci_reset_controller(struct ata_host *host);
+
+int ahci_do_softreset(struct ata_link *link, unsigned int *class,
+ int pmp, unsigned long deadline,
+ int (*check_ready)(struct ata_link *link));
+
+int ahci_stop_engine(struct ata_port *ap);
+void ahci_start_engine(struct ata_port *ap);
+int ahci_check_ready(struct ata_link *link);
+int ahci_kick_engine(struct ata_port *ap);
+void ahci_set_em_messages(struct ahci_host_priv *hpriv,
+ struct ata_port_info *pi);
+int ahci_reset_em(struct ata_host *host);
+irqreturn_t ahci_interrupt(int irq, void *dev_instance);
+void ahci_print_info(struct ata_host *host, const char *scc_s);
+
+static inline void __iomem *__ahci_port_base(struct ata_host *host,
+ unsigned int port_no)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+
+ return mmio + 0x100 + (port_no * 0x80);
+}
+
+static inline void __iomem *ahci_port_base(struct ata_port *ap)
+{
+ return __ahci_port_base(ap->host, ap->port_no);
+}
+
+static inline int ahci_nr_ports(u32 cap)
+{
+ return (cap & 0x1f) + 1;
+}
+
+#endif /* _AHCI_H */
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
new file mode 100644
index 00000000000..5e11b160f24
--- /dev/null
+++ b/drivers/ata/ahci_platform.c
@@ -0,0 +1,192 @@
+/*
+ * AHCI SATA platform driver
+ *
+ * Copyright 2004-2005 Red Hat, Inc.
+ * Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2010 MontaVista Software, LLC.
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/libata.h>
+#include <linux/ahci_platform.h>
+#include "ahci.h"
+
+static int __init ahci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ahci_platform_data *pdata = dev->platform_data;
+ struct ata_port_info pi = {
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ };
+ const struct ata_port_info *ppi[] = { &pi, NULL };
+ struct ahci_host_priv *hpriv;
+ struct ata_host *host;
+ struct resource *mem;
+ int irq;
+ int n_ports;
+ int i;
+ int rc;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(dev, "no mmio space\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "no irq\n");
+ return -EINVAL;
+ }
+
+ if (pdata && pdata->init) {
+ rc = pdata->init(dev);
+ if (rc)
+ return rc;
+ }
+
+ if (pdata && pdata->ata_port_info)
+ pi = *pdata->ata_port_info;
+
+ hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
+ if (!hpriv) {
+ rc = -ENOMEM;
+ goto err0;
+ }
+
+ hpriv->flags |= (unsigned long)pi.private_data;
+
+ hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
+ if (!hpriv->mmio) {
+ dev_err(dev, "can't map %pR\n", mem);
+ rc = -ENOMEM;
+ goto err0;
+ }
+
+ ahci_save_initial_config(dev, hpriv,
+ pdata ? pdata->force_port_map : 0,
+ pdata ? pdata->mask_port_map : 0);
+
+ /* prepare host */
+ if (hpriv->cap & HOST_CAP_NCQ)
+ pi.flags |= ATA_FLAG_NCQ;
+
+ if (hpriv->cap & HOST_CAP_PMP)
+ pi.flags |= ATA_FLAG_PMP;
+
+ ahci_set_em_messages(hpriv, &pi);
+
+ /* CAP.NP sometimes indicate the index of the last enabled
+ * port, at other times, that of the last possible port, so
+ * determining the maximum port number requires looking at
+ * both CAP.NP and port_map.
+ */
+ n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
+
+ host = ata_host_alloc_pinfo(dev, ppi, n_ports);
+ if (!host) {
+ rc = -ENOMEM;
+ goto err0;
+ }
+
+ host->private_data = hpriv;
+
+ if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
+ host->flags |= ATA_HOST_PARALLEL_SCAN;
+ else
+ printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
+
+ if (pi.flags & ATA_FLAG_EM)
+ ahci_reset_em(host);
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ ata_port_desc(ap, "mmio %pR", mem);
+ ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
+
+ /* set initial link pm policy */
+ ap->pm_policy = NOT_AVAILABLE;
+
+ /* set enclosure management message type */
+ if (ap->flags & ATA_FLAG_EM)
+ ap->em_message_type = hpriv->em_msg_type;
+
+ /* disabled/not-implemented port */
+ if (!(hpriv->port_map & (1 << i)))
+ ap->ops = &ata_dummy_port_ops;
+ }
+
+ rc = ahci_reset_controller(host);
+ if (rc)
+ goto err0;
+
+ ahci_init_controller(host);
+ ahci_print_info(host, "platform");
+
+ rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
+ &ahci_sht);
+ if (rc)
+ goto err0;
+
+ return 0;
+err0:
+ if (pdata && pdata->exit)
+ pdata->exit(dev);
+ return rc;
+}
+
+static int __devexit ahci_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ahci_platform_data *pdata = dev->platform_data;
+ struct ata_host *host = dev_get_drvdata(dev);
+
+ ata_host_detach(host);
+
+ if (pdata && pdata->exit)
+ pdata->exit(dev);
+
+ return 0;
+}
+
+static struct platform_driver ahci_driver = {
+ .probe = ahci_probe,
+ .remove = __devexit_p(ahci_remove),
+ .driver = {
+ .name = "ahci",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ahci_init(void)
+{
+ return platform_driver_probe(&ahci_driver, ahci_probe);
+}
+module_init(ahci_init);
+
+static void __exit ahci_exit(void)
+{
+ platform_driver_unregister(&ahci_driver);
+}
+module_exit(ahci_exit);
+
+MODULE_DESCRIPTION("AHCI SATA platform driver");
+MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ahci");
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 33fb614f978..573158a9668 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -155,7 +155,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id
return rc;
pcim_pin_device(dev);
}
- return ata_pci_sff_init_one(dev, ppi, &generic_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &generic_sht, NULL, 0);
}
static struct pci_device_id ata_generic[] = {
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 83bc49fac9b..7409f98d2ae 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -43,7 +43,7 @@
* driver the list of errata that are relevant is below, going back to
* PIIX4. Older device documentation is now a bit tricky to find.
*
- * The chipsets all follow very much the same design. The orginal Triton
+ * The chipsets all follow very much the same design. The original Triton
* series chipsets do _not_ support independant device timings, but this
* is fixed in Triton II. With the odd mobile exception the chips then
* change little except in gaining more modes until SATA arrives. This
@@ -1589,7 +1589,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
hpriv->map = piix_init_sata_map(pdev, port_info,
piix_map_db_table[ent->driver_data]);
- rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
host->private_data = hpriv;
@@ -1626,7 +1626,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
host->flags |= ATA_HOST_PARALLEL_SCAN;
pci_set_master(pdev);
- return ata_pci_sff_activate_host(host, ata_sff_interrupt, &piix_sht);
+ return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &piix_sht);
}
static void piix_remove_one(struct pci_dev *pdev)
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
new file mode 100644
index 00000000000..1984a6e89e8
--- /dev/null
+++ b/drivers/ata/libahci.c
@@ -0,0 +1,2216 @@
+/*
+ * libahci.c - Common AHCI SATA low-level routines
+ *
+ * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
+ *
+ * Copyright 2004-2005 Red Hat, Inc.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * AHCI hardware documentation:
+ * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
+ * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/libata.h>
+#include "ahci.h"
+
+static int ahci_skip_host_reset;
+int ahci_ignore_sss;
+EXPORT_SYMBOL_GPL(ahci_ignore_sss);
+
+module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
+MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
+
+module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
+MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
+
+static int ahci_enable_alpm(struct ata_port *ap,
+ enum link_pm policy);
+static void ahci_disable_alpm(struct ata_port *ap);
+static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
+static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
+ size_t size);
+static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
+ ssize_t size);
+
+
+
+static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
+static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
+static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
+static int ahci_port_start(struct ata_port *ap);
+static void ahci_port_stop(struct ata_port *ap);
+static void ahci_qc_prep(struct ata_queued_cmd *qc);
+static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
+static void ahci_freeze(struct ata_port *ap);
+static void ahci_thaw(struct ata_port *ap);
+static void ahci_enable_fbs(struct ata_port *ap);
+static void ahci_disable_fbs(struct ata_port *ap);
+static void ahci_pmp_attach(struct ata_port *ap);
+static void ahci_pmp_detach(struct ata_port *ap);
+static int ahci_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+static int ahci_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+static void ahci_postreset(struct ata_link *link, unsigned int *class);
+static void ahci_error_handler(struct ata_port *ap);
+static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
+static int ahci_port_resume(struct ata_port *ap);
+static void ahci_dev_config(struct ata_device *dev);
+static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
+ u32 opts);
+#ifdef CONFIG_PM
+static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
+#endif
+static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
+static ssize_t ahci_activity_store(struct ata_device *dev,
+ enum sw_activity val);
+static void ahci_init_sw_activity(struct ata_link *link);
+
+static ssize_t ahci_show_host_caps(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t ahci_show_host_cap2(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t ahci_show_host_version(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t ahci_show_port_cmd(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t ahci_read_em_buffer(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t ahci_store_em_buffer(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size);
+
+static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
+static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
+static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
+static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
+static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
+ ahci_read_em_buffer, ahci_store_em_buffer);
+
+static struct device_attribute *ahci_shost_attrs[] = {
+ &dev_attr_link_power_management_policy,
+ &dev_attr_em_message_type,
+ &dev_attr_em_message,
+ &dev_attr_ahci_host_caps,
+ &dev_attr_ahci_host_cap2,
+ &dev_attr_ahci_host_version,
+ &dev_attr_ahci_port_cmd,
+ &dev_attr_em_buffer,
+ NULL
+};
+
+static struct device_attribute *ahci_sdev_attrs[] = {
+ &dev_attr_sw_activity,
+ &dev_attr_unload_heads,
+ NULL
+};
+
+struct scsi_host_template ahci_sht = {
+ ATA_NCQ_SHT("ahci"),
+ .can_queue = AHCI_MAX_CMDS - 1,
+ .sg_tablesize = AHCI_MAX_SG,
+ .dma_boundary = AHCI_DMA_BOUNDARY,
+ .shost_attrs = ahci_shost_attrs,
+ .sdev_attrs = ahci_sdev_attrs,
+};
+EXPORT_SYMBOL_GPL(ahci_sht);
+
+struct ata_port_operations ahci_ops = {
+ .inherits = &sata_pmp_port_ops,
+
+ .qc_defer = ahci_pmp_qc_defer,
+ .qc_prep = ahci_qc_prep,
+ .qc_issue = ahci_qc_issue,
+ .qc_fill_rtf = ahci_qc_fill_rtf,
+
+ .freeze = ahci_freeze,
+ .thaw = ahci_thaw,
+ .softreset = ahci_softreset,
+ .hardreset = ahci_hardreset,
+ .postreset = ahci_postreset,
+ .pmp_softreset = ahci_softreset,
+ .error_handler = ahci_error_handler,
+ .post_internal_cmd = ahci_post_internal_cmd,
+ .dev_config = ahci_dev_config,
+
+ .scr_read = ahci_scr_read,
+ .scr_write = ahci_scr_write,
+ .pmp_attach = ahci_pmp_attach,
+ .pmp_detach = ahci_pmp_detach,
+
+ .enable_pm = ahci_enable_alpm,
+ .disable_pm = ahci_disable_alpm,
+ .em_show = ahci_led_show,
+ .em_store = ahci_led_store,
+ .sw_activity_show = ahci_activity_show,
+ .sw_activity_store = ahci_activity_store,
+#ifdef CONFIG_PM
+ .port_suspend = ahci_port_suspend,
+ .port_resume = ahci_port_resume,
+#endif
+ .port_start = ahci_port_start,
+ .port_stop = ahci_port_stop,
+};
+EXPORT_SYMBOL_GPL(ahci_ops);
+
+int ahci_em_messages = 1;
+EXPORT_SYMBOL_GPL(ahci_em_messages);
+module_param(ahci_em_messages, int, 0444);
+/* add other LED protocol types when they become supported */
+MODULE_PARM_DESC(ahci_em_messages,
+ "AHCI Enclosure Management Message control (0 = off, 1 = on)");
+
+static void ahci_enable_ahci(void __iomem *mmio)
+{
+ int i;
+ u32 tmp;
+
+ /* turn on AHCI_EN */
+ tmp = readl(mmio + HOST_CTL);
+ if (tmp & HOST_AHCI_EN)
+ return;
+
+ /* Some controllers need AHCI_EN to be written multiple times.
+ * Try a few times before giving up.
+ */
+ for (i = 0; i < 5; i++) {
+ tmp |= HOST_AHCI_EN;
+ writel(tmp, mmio + HOST_CTL);
+ tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
+ if (tmp & HOST_AHCI_EN)
+ return;
+ msleep(10);
+ }
+
+ WARN_ON(1);
+}
+
+static ssize_t ahci_show_host_caps(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+
+ return sprintf(buf, "%x\n", hpriv->cap);
+}
+
+static ssize_t ahci_show_host_cap2(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+
+ return sprintf(buf, "%x\n", hpriv->cap2);
+}
+
+static ssize_t ahci_show_host_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+
+ return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
+}
+
+static ssize_t ahci_show_port_cmd(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ void __iomem *port_mmio = ahci_port_base(ap);
+
+ return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
+}
+
+static ssize_t ahci_read_em_buffer(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ void __iomem *em_mmio = mmio + hpriv->em_loc;
+ u32 em_ctl, msg;
+ unsigned long flags;
+ size_t count;
+ int i;
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ em_ctl = readl(mmio + HOST_EM_CTL);
+ if (!(ap->flags & ATA_FLAG_EM) || em_ctl & EM_CTL_XMT ||
+ !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO)) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ return -EINVAL;
+ }
+
+ if (!(em_ctl & EM_CTL_MR)) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ return -EAGAIN;
+ }
+
+ if (!(em_ctl & EM_CTL_SMB))
+ em_mmio += hpriv->em_buf_sz;
+
+ count = hpriv->em_buf_sz;
+
+ /* the count should not be larger than PAGE_SIZE */
+ if (count > PAGE_SIZE) {
+ if (printk_ratelimit())
+ ata_port_printk(ap, KERN_WARNING,
+ "EM read buffer size too large: "
+ "buffer size %u, page size %lu\n",
+ hpriv->em_buf_sz, PAGE_SIZE);
+ count = PAGE_SIZE;
+ }
+
+ for (i = 0; i < count; i += 4) {
+ msg = readl(em_mmio + i);
+ buf[i] = msg & 0xff;
+ buf[i + 1] = (msg >> 8) & 0xff;
+ buf[i + 2] = (msg >> 16) & 0xff;
+ buf[i + 3] = (msg >> 24) & 0xff;
+ }
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return i;
+}
+
+static ssize_t ahci_store_em_buffer(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ void __iomem *em_mmio = mmio + hpriv->em_loc;
+ u32 em_ctl, msg;
+ unsigned long flags;
+ int i;
+
+ /* check size validity */
+ if (!(ap->flags & ATA_FLAG_EM) ||
+ !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO) ||
+ size % 4 || size > hpriv->em_buf_sz)
+ return -EINVAL;
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ em_ctl = readl(mmio + HOST_EM_CTL);
+ if (em_ctl & EM_CTL_TM) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ return -EBUSY;
+ }
+
+ for (i = 0; i < size; i += 4) {
+ msg = buf[i] | buf[i + 1] << 8 |
+ buf[i + 2] << 16 | buf[i + 3] << 24;
+ writel(msg, em_mmio + i);
+ }
+
+ writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return size;
+}
+
+/**
+ * ahci_save_initial_config - Save and fixup initial config values
+ * @dev: target AHCI device
+ * @hpriv: host private area to store config values
+ * @force_port_map: force port map to a specified value
+ * @mask_port_map: mask out particular bits from port map
+ *
+ * Some registers containing configuration info might be setup by
+ * BIOS and might be cleared on reset. This function saves the
+ * initial values of those registers into @hpriv such that they
+ * can be restored after controller reset.
+ *
+ * If inconsistent, config values are fixed up by this function.
+ *
+ * LOCKING:
+ * None.
+ */
+void ahci_save_initial_config(struct device *dev,
+ struct ahci_host_priv *hpriv,
+ unsigned int force_port_map,
+ unsigned int mask_port_map)
+{
+ void __iomem *mmio = hpriv->mmio;
+ u32 cap, cap2, vers, port_map;
+ int i;
+
+ /* make sure AHCI mode is enabled before accessing CAP */
+ ahci_enable_ahci(mmio);
+
+ /* Values prefixed with saved_ are written back to host after
+ * reset. Values without are used for driver operation.
+ */
+ hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
+ hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
+
+ /* CAP2 register is only defined for AHCI 1.2 and later */
+ vers = readl(mmio + HOST_VERSION);
+ if ((vers >> 16) > 1 ||
+ ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
+ hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
+ else
+ hpriv->saved_cap2 = cap2 = 0;
+
+ /* some chips have errata preventing 64bit use */
+ if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
+ dev_printk(KERN_INFO, dev,
+ "controller can't do 64bit DMA, forcing 32bit\n");
+ cap &= ~HOST_CAP_64;
+ }
+
+ if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
+ dev_printk(KERN_INFO, dev,
+ "controller can't do NCQ, turning off CAP_NCQ\n");
+ cap &= ~HOST_CAP_NCQ;
+ }
+
+ if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
+ dev_printk(KERN_INFO, dev,
+ "controller can do NCQ, turning on CAP_NCQ\n");
+ cap |= HOST_CAP_NCQ;
+ }
+
+ if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
+ dev_printk(KERN_INFO, dev,
+ "controller can't do PMP, turning off CAP_PMP\n");
+ cap &= ~HOST_CAP_PMP;
+ }
+
+ if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
+ dev_printk(KERN_INFO, dev,
+ "controller can't do SNTF, turning off CAP_SNTF\n");
+ cap &= ~HOST_CAP_SNTF;
+ }
+
+ if (force_port_map && port_map != force_port_map) {
+ dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n",
+ port_map, force_port_map);
+ port_map = force_port_map;
+ }
+
+ if (mask_port_map) {
+ dev_printk(KERN_ERR, dev, "masking port_map 0x%x -> 0x%x\n",
+ port_map,
+ port_map & mask_port_map);
+ port_map &= mask_port_map;
+ }
+
+ /* cross check port_map and cap.n_ports */
+ if (port_map) {
+ int map_ports = 0;
+
+ for (i = 0; i < AHCI_MAX_PORTS; i++)
+ if (port_map & (1 << i))
+ map_ports++;
+
+ /* If PI has more ports than n_ports, whine, clear
+ * port_map and let it be generated from n_ports.
+ */
+ if (map_ports > ahci_nr_ports(cap)) {
+ dev_printk(KERN_WARNING, dev,
+ "implemented port map (0x%x) contains more "
+ "ports than nr_ports (%u), using nr_ports\n",
+ port_map, ahci_nr_ports(cap));
+ port_map = 0;
+ }
+ }
+
+ /* fabricate port_map from cap.nr_ports */
+ if (!port_map) {
+ port_map = (1 << ahci_nr_ports(cap)) - 1;
+ dev_printk(KERN_WARNING, dev,
+ "forcing PORTS_IMPL to 0x%x\n", port_map);
+
+ /* write the fixed up value to the PI register */
+ hpriv->saved_port_map = port_map;
+ }
+
+ /* record values to use during operation */
+ hpriv->cap = cap;
+ hpriv->cap2 = cap2;
+ hpriv->port_map = port_map;
+}
+EXPORT_SYMBOL_GPL(ahci_save_initial_config);
+
+/**
+ * ahci_restore_initial_config - Restore initial config
+ * @host: target ATA host
+ *
+ * Restore initial config stored by ahci_save_initial_config().
+ *
+ * LOCKING:
+ * None.
+ */
+static void ahci_restore_initial_config(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+
+ writel(hpriv->saved_cap, mmio + HOST_CAP);
+ if (hpriv->saved_cap2)
+ writel(hpriv->saved_cap2, mmio + HOST_CAP2);
+ writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
+ (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
+}
+
+static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
+{
+ static const int offset[] = {
+ [SCR_STATUS] = PORT_SCR_STAT,
+ [SCR_CONTROL] = PORT_SCR_CTL,
+ [SCR_ERROR] = PORT_SCR_ERR,
+ [SCR_ACTIVE] = PORT_SCR_ACT,
+ [SCR_NOTIFICATION] = PORT_SCR_NTF,
+ };
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+
+ if (sc_reg < ARRAY_SIZE(offset) &&
+ (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
+ return offset[sc_reg];
+ return 0;
+}
+
+static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
+{
+ void __iomem *port_mmio = ahci_port_base(link->ap);
+ int offset = ahci_scr_offset(link->ap, sc_reg);
+
+ if (offset) {
+ *val = readl(port_mmio + offset);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
+{
+ void __iomem *port_mmio = ahci_port_base(link->ap);
+ int offset = ahci_scr_offset(link->ap, sc_reg);
+
+ if (offset) {
+ writel(val, port_mmio + offset);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int ahci_is_device_present(void __iomem *port_mmio)
+{
+ u8 status = readl(port_mmio + PORT_TFDATA) & 0xff;
+
+ /* Make sure PxTFD.STS.BSY and PxTFD.STS.DRQ are 0 */
+ if (status & (ATA_BUSY | ATA_DRQ))
+ return 0;
+
+ /* Make sure PxSSTS.DET is 3h */
+ status = readl(port_mmio + PORT_SCR_STAT) & 0xf;
+ if (status != 3)
+ return 0;
+ return 1;
+}
+
+void ahci_start_engine(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 tmp;
+
+ if (!ahci_is_device_present(port_mmio))
+ return;
+
+ /* start DMA */
+ tmp = readl(port_mmio + PORT_CMD);
+ tmp |= PORT_CMD_START;
+ writel(tmp, port_mmio + PORT_CMD);
+ readl(port_mmio + PORT_CMD); /* flush */
+}
+EXPORT_SYMBOL_GPL(ahci_start_engine);
+
+int ahci_stop_engine(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 tmp;
+
+ tmp = readl(port_mmio + PORT_CMD);
+
+ /* check if the HBA is idle */
+ if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
+ return 0;
+
+ /* setting HBA to idle */
+ tmp &= ~PORT_CMD_START;
+ writel(tmp, port_mmio + PORT_CMD);
+
+ /* wait for engine to stop. This could be as long as 500 msec */
+ tmp = ata_wait_register(port_mmio + PORT_CMD,
+ PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
+ if (tmp & PORT_CMD_LIST_ON)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ahci_stop_engine);
+
+static void ahci_start_fis_rx(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct ahci_port_priv *pp = ap->private_data;
+ u32 tmp;
+
+ /* set FIS registers */
+ if (hpriv->cap & HOST_CAP_64)
+ writel((pp->cmd_slot_dma >> 16) >> 16,
+ port_mmio + PORT_LST_ADDR_HI);
+ writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
+
+ if (hpriv->cap & HOST_CAP_64)
+ writel((pp->rx_fis_dma >> 16) >> 16,
+ port_mmio + PORT_FIS_ADDR_HI);
+ writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
+
+ /* enable FIS reception */
+ tmp = readl(port_mmio + PORT_CMD);
+ tmp |= PORT_CMD_FIS_RX;
+ writel(tmp, port_mmio + PORT_CMD);
+
+ /* flush */
+ readl(port_mmio + PORT_CMD);
+}
+
+static int ahci_stop_fis_rx(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 tmp;
+
+ /* disable FIS reception */
+ tmp = readl(port_mmio + PORT_CMD);
+ tmp &= ~PORT_CMD_FIS_RX;
+ writel(tmp, port_mmio + PORT_CMD);
+
+ /* wait for completion, spec says 500ms, give it 1000 */
+ tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
+ PORT_CMD_FIS_ON, 10, 1000);
+ if (tmp & PORT_CMD_FIS_ON)
+ return -EBUSY;
+
+ return 0;
+}
+
+static void ahci_power_up(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 cmd;
+
+ cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
+
+ /* spin up device */
+ if (hpriv->cap & HOST_CAP_SSS) {
+ cmd |= PORT_CMD_SPIN_UP;
+ writel(cmd, port_mmio + PORT_CMD);
+ }
+
+ /* wake up link */
+ writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
+}
+
+static void ahci_disable_alpm(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 cmd;
+ struct ahci_port_priv *pp = ap->private_data;
+
+ /* IPM bits should be disabled by libata-core */
+ /* get the existing command bits */
+ cmd = readl(port_mmio + PORT_CMD);
+
+ /* disable ALPM and ASP */
+ cmd &= ~PORT_CMD_ASP;
+ cmd &= ~PORT_CMD_ALPE;
+
+ /* force the interface back to active */
+ cmd |= PORT_CMD_ICC_ACTIVE;
+
+ /* write out new cmd value */
+ writel(cmd, port_mmio + PORT_CMD);
+ cmd = readl(port_mmio + PORT_CMD);
+
+ /* wait 10ms to be sure we've come out of any low power state */
+ msleep(10);
+
+ /* clear out any PhyRdy stuff from interrupt status */
+ writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
+
+ /* go ahead and clean out PhyRdy Change from Serror too */
+ ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
+
+ /*
+ * Clear flag to indicate that we should ignore all PhyRdy
+ * state changes
+ */
+ hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
+
+ /*
+ * Enable interrupts on Phy Ready.
+ */
+ pp->intr_mask |= PORT_IRQ_PHYRDY;
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+
+ /*
+ * don't change the link pm policy - we can be called
+ * just to turn of link pm temporarily
+ */
+}
+
+static int ahci_enable_alpm(struct ata_port *ap,
+ enum link_pm policy)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 cmd;
+ struct ahci_port_priv *pp = ap->private_data;
+ u32 asp;
+
+ /* Make sure the host is capable of link power management */
+ if (!(hpriv->cap & HOST_CAP_ALPM))
+ return -EINVAL;
+
+ switch (policy) {
+ case MAX_PERFORMANCE:
+ case NOT_AVAILABLE:
+ /*
+ * if we came here with NOT_AVAILABLE,
+ * it just means this is the first time we
+ * have tried to enable - default to max performance,
+ * and let the user go to lower power modes on request.
+ */
+ ahci_disable_alpm(ap);
+ return 0;
+ case MIN_POWER:
+ /* configure HBA to enter SLUMBER */
+ asp = PORT_CMD_ASP;
+ break;
+ case MEDIUM_POWER:
+ /* configure HBA to enter PARTIAL */
+ asp = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Disable interrupts on Phy Ready. This keeps us from
+ * getting woken up due to spurious phy ready interrupts
+ * TBD - Hot plug should be done via polling now, is
+ * that even supported?
+ */
+ pp->intr_mask &= ~PORT_IRQ_PHYRDY;
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+
+ /*
+ * Set a flag to indicate that we should ignore all PhyRdy
+ * state changes since these can happen now whenever we
+ * change link state
+ */
+ hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
+
+ /* get the existing command bits */
+ cmd = readl(port_mmio + PORT_CMD);
+
+ /*
+ * Set ASP based on Policy
+ */
+ cmd |= asp;
+
+ /*
+ * Setting this bit will instruct the HBA to aggressively
+ * enter a lower power link state when it's appropriate and
+ * based on the value set above for ASP
+ */
+ cmd |= PORT_CMD_ALPE;
+
+ /* write out new cmd value */
+ writel(cmd, port_mmio + PORT_CMD);
+ cmd = readl(port_mmio + PORT_CMD);
+
+ /* IPM bits should be set by libata-core */
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static void ahci_power_down(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 cmd, scontrol;
+
+ if (!(hpriv->cap & HOST_CAP_SSS))
+ return;
+
+ /* put device into listen mode, first set PxSCTL.DET to 0 */
+ scontrol = readl(port_mmio + PORT_SCR_CTL);
+ scontrol &= ~0xf;
+ writel(scontrol, port_mmio + PORT_SCR_CTL);
+
+ /* then set PxCMD.SUD to 0 */
+ cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
+ cmd &= ~PORT_CMD_SPIN_UP;
+ writel(cmd, port_mmio + PORT_CMD);
+}
+#endif
+
+static void ahci_start_port(struct ata_port *ap)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ata_link *link;
+ struct ahci_em_priv *emp;
+ ssize_t rc;
+ int i;
+
+ /* enable FIS reception */
+ ahci_start_fis_rx(ap);
+
+ /* enable DMA */
+ ahci_start_engine(ap);
+
+ /* turn on LEDs */
+ if (ap->flags & ATA_FLAG_EM) {
+ ata_for_each_link(link, ap, EDGE) {
+ emp = &pp->em_priv[link->pmp];
+
+ /* EM Transmit bit maybe busy during init */
+ for (i = 0; i < EM_MAX_RETRY; i++) {
+ rc = ahci_transmit_led_message(ap,
+ emp->led_state,
+ 4);
+ if (rc == -EBUSY)
+ msleep(1);
+ else
+ break;
+ }
+ }
+ }
+
+ if (ap->flags & ATA_FLAG_SW_ACTIVITY)
+ ata_for_each_link(link, ap, EDGE)
+ ahci_init_sw_activity(link);
+
+}
+
+static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
+{
+ int rc;
+
+ /* disable DMA */
+ rc = ahci_stop_engine(ap);
+ if (rc) {
+ *emsg = "failed to stop engine";
+ return rc;
+ }
+
+ /* disable FIS reception */
+ rc = ahci_stop_fis_rx(ap);
+ if (rc) {
+ *emsg = "failed stop FIS RX";
+ return rc;
+ }
+
+ return 0;
+}
+
+int ahci_reset_controller(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ u32 tmp;
+
+ /* we must be in AHCI mode, before using anything
+ * AHCI-specific, such as HOST_RESET.
+ */
+ ahci_enable_ahci(mmio);
+
+ /* global controller reset */
+ if (!ahci_skip_host_reset) {
+ tmp = readl(mmio + HOST_CTL);
+ if ((tmp & HOST_RESET) == 0) {
+ writel(tmp | HOST_RESET, mmio + HOST_CTL);
+ readl(mmio + HOST_CTL); /* flush */
+ }
+
+ /*
+ * to perform host reset, OS should set HOST_RESET
+ * and poll until this bit is read to be "0".
+ * reset must complete within 1 second, or
+ * the hardware should be considered fried.
+ */
+ tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
+ HOST_RESET, 10, 1000);
+
+ if (tmp & HOST_RESET) {
+ dev_printk(KERN_ERR, host->dev,
+ "controller reset failed (0x%x)\n", tmp);
+ return -EIO;
+ }
+
+ /* turn on AHCI mode */
+ ahci_enable_ahci(mmio);
+
+ /* Some registers might be cleared on reset. Restore
+ * initial values.
+ */
+ ahci_restore_initial_config(host);
+ } else
+ dev_printk(KERN_INFO, host->dev,
+ "skipping global host reset\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ahci_reset_controller);
+
+static void ahci_sw_activity(struct ata_link *link)
+{
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+
+ if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
+ return;
+
+ emp->activity++;
+ if (!timer_pending(&emp->timer))
+ mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
+}
+
+static void ahci_sw_activity_blink(unsigned long arg)
+{
+ struct ata_link *link = (struct ata_link *)arg;
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+ unsigned long led_message = emp->led_state;
+ u32 activity_led_state;
+ unsigned long flags;
+
+ led_message &= EM_MSG_LED_VALUE;
+ led_message |= ap->port_no | (link->pmp << 8);
+
+ /* check to see if we've had activity. If so,
+ * toggle state of LED and reset timer. If not,
+ * turn LED to desired idle state.
+ */
+ spin_lock_irqsave(ap->lock, flags);
+ if (emp->saved_activity != emp->activity) {
+ emp->saved_activity = emp->activity;
+ /* get the current LED state */
+ activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
+
+ if (activity_led_state)
+ activity_led_state = 0;
+ else
+ activity_led_state = 1;
+
+ /* clear old state */
+ led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
+
+ /* toggle state */
+ led_message |= (activity_led_state << 16);
+ mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
+ } else {
+ /* switch to idle */
+ led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
+ if (emp->blink_policy == BLINK_OFF)
+ led_message |= (1 << 16);
+ }
+ spin_unlock_irqrestore(ap->lock, flags);
+ ahci_transmit_led_message(ap, led_message, 4);
+}
+
+static void ahci_init_sw_activity(struct ata_link *link)
+{
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+
+ /* init activity stats, setup timer */
+ emp->saved_activity = emp->activity = 0;
+ setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
+
+ /* check our blink policy and set flag for link if it's enabled */
+ if (emp->blink_policy)
+ link->flags |= ATA_LFLAG_SW_ACTIVITY;
+}
+
+int ahci_reset_em(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ u32 em_ctl;
+
+ em_ctl = readl(mmio + HOST_EM_CTL);
+ if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
+ return -EINVAL;
+
+ writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ahci_reset_em);
+
+static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
+ ssize_t size)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ u32 em_ctl;
+ u32 message[] = {0, 0};
+ unsigned long flags;
+ int pmp;
+ struct ahci_em_priv *emp;
+
+ /* get the slot number from the message */
+ pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
+ if (pmp < EM_MAX_SLOTS)
+ emp = &pp->em_priv[pmp];
+ else
+ return -EINVAL;
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ /*
+ * if we are still busy transmitting a previous message,
+ * do not allow
+ */
+ em_ctl = readl(mmio + HOST_EM_CTL);
+ if (em_ctl & EM_CTL_TM) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ return -EBUSY;
+ }
+
+ if (hpriv->em_msg_type & EM_MSG_TYPE_LED) {
+ /*
+ * create message header - this is all zero except for
+ * the message size, which is 4 bytes.
+ */
+ message[0] |= (4 << 8);
+
+ /* ignore 0:4 of byte zero, fill in port info yourself */
+ message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
+
+ /* write message to EM_LOC */
+ writel(message[0], mmio + hpriv->em_loc);
+ writel(message[1], mmio + hpriv->em_loc+4);
+
+ /*
+ * tell hardware to transmit the message
+ */
+ writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
+ }
+
+ /* save off new led state for port/slot */
+ emp->led_state = state;
+
+ spin_unlock_irqrestore(ap->lock, flags);
+ return size;
+}
+
+static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ata_link *link;
+ struct ahci_em_priv *emp;
+ int rc = 0;
+
+ ata_for_each_link(link, ap, EDGE) {
+ emp = &pp->em_priv[link->pmp];
+ rc += sprintf(buf, "%lx\n", emp->led_state);
+ }
+ return rc;
+}
+
+static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
+ size_t size)
+{
+ int state;
+ int pmp;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp;
+
+ state = simple_strtoul(buf, NULL, 0);
+
+ /* get the slot number from the message */
+ pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
+ if (pmp < EM_MAX_SLOTS)
+ emp = &pp->em_priv[pmp];
+ else
+ return -EINVAL;
+
+ /* mask off the activity bits if we are in sw_activity
+ * mode, user should turn off sw_activity before setting
+ * activity led through em_message
+ */
+ if (emp->blink_policy)
+ state &= ~EM_MSG_LED_VALUE_ACTIVITY;
+
+ return ahci_transmit_led_message(ap, state, size);
+}
+
+static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
+{
+ struct ata_link *link = dev->link;
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+ u32 port_led_state = emp->led_state;
+
+ /* save the desired Activity LED behavior */
+ if (val == OFF) {
+ /* clear LFLAG */
+ link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
+
+ /* set the LED to OFF */
+ port_led_state &= EM_MSG_LED_VALUE_OFF;
+ port_led_state |= (ap->port_no | (link->pmp << 8));
+ ahci_transmit_led_message(ap, port_led_state, 4);
+ } else {
+ link->flags |= ATA_LFLAG_SW_ACTIVITY;
+ if (val == BLINK_OFF) {
+ /* set LED to ON for idle */
+ port_led_state &= EM_MSG_LED_VALUE_OFF;
+ port_led_state |= (ap->port_no | (link->pmp << 8));
+ port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
+ ahci_transmit_led_message(ap, port_led_state, 4);
+ }
+ }
+ emp->blink_policy = val;
+ return 0;
+}
+
+static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
+{
+ struct ata_link *link = dev->link;
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+
+ /* display the saved value of activity behavior for this
+ * disk.
+ */
+ return sprintf(buf, "%d\n", emp->blink_policy);
+}
+
+static void ahci_port_init(struct device *dev, struct ata_port *ap,
+ int port_no, void __iomem *mmio,
+ void __iomem *port_mmio)
+{
+ const char *emsg = NULL;
+ int rc;
+ u32 tmp;
+
+ /* make sure port is not active */
+ rc = ahci_deinit_port(ap, &emsg);
+ if (rc)
+ dev_warn(dev, "%s (%d)\n", emsg, rc);
+
+ /* clear SError */
+ tmp = readl(port_mmio + PORT_SCR_ERR);
+ VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
+ writel(tmp, port_mmio + PORT_SCR_ERR);
+
+ /* clear port IRQ */
+ tmp = readl(port_mmio + PORT_IRQ_STAT);
+ VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
+ if (tmp)
+ writel(tmp, port_mmio + PORT_IRQ_STAT);
+
+ writel(1 << port_no, mmio + HOST_IRQ_STAT);
+}
+
+void ahci_init_controller(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ int i;
+ void __iomem *port_mmio;
+ u32 tmp;
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ port_mmio = ahci_port_base(ap);
+ if (ata_port_is_dummy(ap))
+ continue;
+
+ ahci_port_init(host->dev, ap, i, mmio, port_mmio);
+ }
+
+ tmp = readl(mmio + HOST_CTL);
+ VPRINTK("HOST_CTL 0x%x\n", tmp);
+ writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
+ tmp = readl(mmio + HOST_CTL);
+ VPRINTK("HOST_CTL 0x%x\n", tmp);
+}
+EXPORT_SYMBOL_GPL(ahci_init_controller);
+
+static void ahci_dev_config(struct ata_device *dev)
+{
+ struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
+
+ if (hpriv->flags & AHCI_HFLAG_SECT255) {
+ dev->max_sectors = 255;
+ ata_dev_printk(dev, KERN_INFO,
+ "SB600 AHCI: limiting to 255 sectors per cmd\n");
+ }
+}
+
+static unsigned int ahci_dev_classify(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ata_taskfile tf;
+ u32 tmp;
+
+ tmp = readl(port_mmio + PORT_SIG);
+ tf.lbah = (tmp >> 24) & 0xff;
+ tf.lbam = (tmp >> 16) & 0xff;
+ tf.lbal = (tmp >> 8) & 0xff;
+ tf.nsect = (tmp) & 0xff;
+
+ return ata_dev_classify(&tf);
+}
+
+static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
+ u32 opts)
+{
+ dma_addr_t cmd_tbl_dma;
+
+ cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
+
+ pp->cmd_slot[tag].opts = cpu_to_le32(opts);
+ pp->cmd_slot[tag].status = 0;
+ pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
+ pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
+}
+
+int ahci_kick_engine(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
+ u32 tmp;
+ int busy, rc;
+
+ /* stop engine */
+ rc = ahci_stop_engine(ap);
+ if (rc)
+ goto out_restart;
+
+ /* need to do CLO?
+ * always do CLO if PMP is attached (AHCI-1.3 9.2)
+ */
+ busy = status & (ATA_BUSY | ATA_DRQ);
+ if (!busy && !sata_pmp_attached(ap)) {
+ rc = 0;
+ goto out_restart;
+ }
+
+ if (!(hpriv->cap & HOST_CAP_CLO)) {
+ rc = -EOPNOTSUPP;
+ goto out_restart;
+ }
+
+ /* perform CLO */
+ tmp = readl(port_mmio + PORT_CMD);
+ tmp |= PORT_CMD_CLO;
+ writel(tmp, port_mmio + PORT_CMD);
+
+ rc = 0;
+ tmp = ata_wait_register(port_mmio + PORT_CMD,
+ PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
+ if (tmp & PORT_CMD_CLO)
+ rc = -EIO;
+
+ /* restart engine */
+ out_restart:
+ ahci_start_engine(ap);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ahci_kick_engine);
+
+static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
+ struct ata_taskfile *tf, int is_cmd, u16 flags,
+ unsigned long timeout_msec)
+{
+ const u32 cmd_fis_len = 5; /* five dwords */
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u8 *fis = pp->cmd_tbl;
+ u32 tmp;
+
+ /* prep the command */
+ ata_tf_to_fis(tf, pmp, is_cmd, fis);
+ ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
+
+ /* issue & wait */
+ writel(1, port_mmio + PORT_CMD_ISSUE);
+
+ if (timeout_msec) {
+ tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
+ 1, timeout_msec);
+ if (tmp & 0x1) {
+ ahci_kick_engine(ap);
+ return -EBUSY;
+ }
+ } else
+ readl(port_mmio + PORT_CMD_ISSUE); /* flush */
+
+ return 0;
+}
+
+int ahci_do_softreset(struct ata_link *link, unsigned int *class,
+ int pmp, unsigned long deadline,
+ int (*check_ready)(struct ata_link *link))
+{
+ struct ata_port *ap = link->ap;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ const char *reason = NULL;
+ unsigned long now, msecs;
+ struct ata_taskfile tf;
+ int rc;
+
+ DPRINTK("ENTER\n");
+
+ /* prepare for SRST (AHCI-1.1 10.4.1) */
+ rc = ahci_kick_engine(ap);
+ if (rc && rc != -EOPNOTSUPP)
+ ata_link_printk(link, KERN_WARNING,
+ "failed to reset engine (errno=%d)\n", rc);
+
+ ata_tf_init(link->device, &tf);
+
+ /* issue the first D2H Register FIS */
+ msecs = 0;
+ now = jiffies;
+ if (time_after(now, deadline))
+ msecs = jiffies_to_msecs(deadline - now);
+
+ tf.ctl |= ATA_SRST;
+ if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
+ AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
+ rc = -EIO;
+ reason = "1st FIS failed";
+ goto fail;
+ }
+
+ /* spec says at least 5us, but be generous and sleep for 1ms */
+ msleep(1);
+
+ /* issue the second D2H Register FIS */
+ tf.ctl &= ~ATA_SRST;
+ ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
+
+ /* wait for link to become ready */
+ rc = ata_wait_after_reset(link, deadline, check_ready);
+ if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
+ /*
+ * Workaround for cases where link online status can't
+ * be trusted. Treat device readiness timeout as link
+ * offline.
+ */
+ ata_link_printk(link, KERN_INFO,
+ "device not ready, treating as offline\n");
+ *class = ATA_DEV_NONE;
+ } else if (rc) {
+ /* link occupied, -ENODEV too is an error */
+ reason = "device not ready";
+ goto fail;
+ } else
+ *class = ahci_dev_classify(ap);
+
+ DPRINTK("EXIT, class=%u\n", *class);
+ return 0;
+
+ fail:
+ ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
+ return rc;
+}
+
+int ahci_check_ready(struct ata_link *link)
+{
+ void __iomem *port_mmio = ahci_port_base(link->ap);
+ u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
+
+ return ata_check_ready(status);
+}
+EXPORT_SYMBOL_GPL(ahci_check_ready);
+
+static int ahci_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ int pmp = sata_srst_pmp(link);
+
+ DPRINTK("ENTER\n");
+
+ return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
+}
+EXPORT_SYMBOL_GPL(ahci_do_softreset);
+
+static int ahci_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+ struct ata_taskfile tf;
+ bool online;
+ int rc;
+
+ DPRINTK("ENTER\n");
+
+ ahci_stop_engine(ap);
+
+ /* clear D2H reception area to properly wait for D2H FIS */
+ ata_tf_init(link->device, &tf);
+ tf.command = 0x80;
+ ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+
+ rc = sata_link_hardreset(link, timing, deadline, &online,
+ ahci_check_ready);
+
+ ahci_start_engine(ap);
+
+ if (online)
+ *class = ahci_dev_classify(ap);
+
+ DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
+ return rc;
+}
+
+static void ahci_postreset(struct ata_link *link, unsigned int *class)
+{
+ struct ata_port *ap = link->ap;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 new_tmp, tmp;
+
+ ata_std_postreset(link, class);
+
+ /* Make sure port's ATAPI bit is set appropriately */
+ new_tmp = tmp = readl(port_mmio + PORT_CMD);
+ if (*class == ATA_DEV_ATAPI)
+ new_tmp |= PORT_CMD_ATAPI;
+ else
+ new_tmp &= ~PORT_CMD_ATAPI;
+ if (new_tmp != tmp) {
+ writel(new_tmp, port_mmio + PORT_CMD);
+ readl(port_mmio + PORT_CMD); /* flush */
+ }
+}
+
+static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
+{
+ struct scatterlist *sg;
+ struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
+ unsigned int si;
+
+ VPRINTK("ENTER\n");
+
+ /*
+ * Next, the S/G list.
+ */
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ dma_addr_t addr = sg_dma_address(sg);
+ u32 sg_len = sg_dma_len(sg);
+
+ ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
+ ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
+ ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
+ }
+
+ return si;
+}
+
+static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+
+ if (!sata_pmp_attached(ap) || pp->fbs_enabled)
+ return ata_std_qc_defer(qc);
+ else
+ return sata_pmp_qc_defer_cmd_switch(qc);
+}
+
+static void ahci_qc_prep(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ int is_atapi = ata_is_atapi(qc->tf.protocol);
+ void *cmd_tbl;
+ u32 opts;
+ const u32 cmd_fis_len = 5; /* five dwords */
+ unsigned int n_elem;
+
+ /*
+ * Fill in command table information. First, the header,
+ * a SATA Register - Host to Device command FIS.
+ */
+ cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
+
+ ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
+ if (is_atapi) {
+ memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
+ memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
+ }
+
+ n_elem = 0;
+ if (qc->flags & ATA_QCFLAG_DMAMAP)
+ n_elem = ahci_fill_sg(qc, cmd_tbl);
+
+ /*
+ * Fill in command slot information.
+ */
+ opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
+ if (qc->tf.flags & ATA_TFLAG_WRITE)
+ opts |= AHCI_CMD_WRITE;
+ if (is_atapi)
+ opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
+
+ ahci_fill_cmd_slot(pp, qc->tag, opts);
+}
+
+static void ahci_fbs_dec_intr(struct ata_port *ap)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 fbs = readl(port_mmio + PORT_FBS);
+ int retries = 3;
+
+ DPRINTK("ENTER\n");
+ BUG_ON(!pp->fbs_enabled);
+
+ /* time to wait for DEC is not specified by AHCI spec,
+ * add a retry loop for safety.
+ */
+ writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS);
+ fbs = readl(port_mmio + PORT_FBS);
+ while ((fbs & PORT_FBS_DEC) && retries--) {
+ udelay(1);
+ fbs = readl(port_mmio + PORT_FBS);
+ }
+
+ if (fbs & PORT_FBS_DEC)
+ dev_printk(KERN_ERR, ap->host->dev,
+ "failed to clear device error\n");
+}
+
+static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ata_eh_info *host_ehi = &ap->link.eh_info;
+ struct ata_link *link = NULL;
+ struct ata_queued_cmd *active_qc;
+ struct ata_eh_info *active_ehi;
+ bool fbs_need_dec = false;
+ u32 serror;
+
+ /* determine active link with error */
+ if (pp->fbs_enabled) {
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 fbs = readl(port_mmio + PORT_FBS);
+ int pmp = fbs >> PORT_FBS_DWE_OFFSET;
+
+ if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) &&
+ ata_link_online(&ap->pmp_link[pmp])) {
+ link = &ap->pmp_link[pmp];
+ fbs_need_dec = true;
+ }
+
+ } else
+ ata_for_each_link(link, ap, EDGE)
+ if (ata_link_active(link))
+ break;
+
+ if (!link)
+ link = &ap->link;
+
+ active_qc = ata_qc_from_tag(ap, link->active_tag);
+ active_ehi = &link->eh_info;
+
+ /* record irq stat */
+ ata_ehi_clear_desc(host_ehi);
+ ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
+
+ /* AHCI needs SError cleared; otherwise, it might lock up */
+ ahci_scr_read(&ap->link, SCR_ERROR, &serror);
+ ahci_scr_write(&ap->link, SCR_ERROR, serror);
+ host_ehi->serror |= serror;
+
+ /* some controllers set IRQ_IF_ERR on device errors, ignore it */
+ if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
+ irq_stat &= ~PORT_IRQ_IF_ERR;
+
+ if (irq_stat & PORT_IRQ_TF_ERR) {
+ /* If qc is active, charge it; otherwise, the active
+ * link. There's no active qc on NCQ errors. It will
+ * be determined by EH by reading log page 10h.
+ */
+ if (active_qc)
+ active_qc->err_mask |= AC_ERR_DEV;
+ else
+ active_ehi->err_mask |= AC_ERR_DEV;
+
+ if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
+ host_ehi->serror &= ~SERR_INTERNAL;
+ }
+
+ if (irq_stat & PORT_IRQ_UNK_FIS) {
+ u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
+
+ active_ehi->err_mask |= AC_ERR_HSM;
+ active_ehi->action |= ATA_EH_RESET;
+ ata_ehi_push_desc(active_ehi,
+ "unknown FIS %08x %08x %08x %08x" ,
+ unk[0], unk[1], unk[2], unk[3]);
+ }
+
+ if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
+ active_ehi->err_mask |= AC_ERR_HSM;
+ active_ehi->action |= ATA_EH_RESET;
+ ata_ehi_push_desc(active_ehi, "incorrect PMP");
+ }
+
+ if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
+ host_ehi->err_mask |= AC_ERR_HOST_BUS;
+ host_ehi->action |= ATA_EH_RESET;
+ ata_ehi_push_desc(host_ehi, "host bus error");
+ }
+
+ if (irq_stat & PORT_IRQ_IF_ERR) {
+ if (fbs_need_dec)
+ active_ehi->err_mask |= AC_ERR_DEV;
+ else {
+ host_ehi->err_mask |= AC_ERR_ATA_BUS;
+ host_ehi->action |= ATA_EH_RESET;
+ }
+
+ ata_ehi_push_desc(host_ehi, "interface fatal error");
+ }
+
+ if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
+ ata_ehi_hotplugged(host_ehi);
+ ata_ehi_push_desc(host_ehi, "%s",
+ irq_stat & PORT_IRQ_CONNECT ?
+ "connection status changed" : "PHY RDY changed");
+ }
+
+ /* okay, let's hand over to EH */
+
+ if (irq_stat & PORT_IRQ_FREEZE)
+ ata_port_freeze(ap);
+ else if (fbs_need_dec) {
+ ata_link_abort(link);
+ ahci_fbs_dec_intr(ap);
+ } else
+ ata_port_abort(ap);
+}
+
+static void ahci_port_intr(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
+ u32 status, qc_active = 0;
+ int rc;
+
+ status = readl(port_mmio + PORT_IRQ_STAT);
+ writel(status, port_mmio + PORT_IRQ_STAT);
+
+ /* ignore BAD_PMP while resetting */
+ if (unlikely(resetting))
+ status &= ~PORT_IRQ_BAD_PMP;
+
+ /* If we are getting PhyRdy, this is
+ * just a power state change, we should
+ * clear out this, plus the PhyRdy/Comm
+ * Wake bits from Serror
+ */
+ if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
+ (status & PORT_IRQ_PHYRDY)) {
+ status &= ~PORT_IRQ_PHYRDY;
+ ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
+ }
+
+ if (unlikely(status & PORT_IRQ_ERROR)) {
+ ahci_error_intr(ap, status);
+ return;
+ }
+
+ if (status & PORT_IRQ_SDB_FIS) {
+ /* If SNotification is available, leave notification
+ * handling to sata_async_notification(). If not,
+ * emulate it by snooping SDB FIS RX area.
+ *
+ * Snooping FIS RX area is probably cheaper than
+ * poking SNotification but some constrollers which
+ * implement SNotification, ICH9 for example, don't
+ * store AN SDB FIS into receive area.
+ */
+ if (hpriv->cap & HOST_CAP_SNTF)
+ sata_async_notification(ap);
+ else {
+ /* If the 'N' bit in word 0 of the FIS is set,
+ * we just received asynchronous notification.
+ * Tell libata about it.
+ *
+ * Lack of SNotification should not appear in
+ * ahci 1.2, so the workaround is unnecessary
+ * when FBS is enabled.
+ */
+ if (pp->fbs_enabled)
+ WARN_ON_ONCE(1);
+ else {
+ const __le32 *f = pp->rx_fis + RX_FIS_SDB;
+ u32 f0 = le32_to_cpu(f[0]);
+ if (f0 & (1 << 15))
+ sata_async_notification(ap);
+ }
+ }
+ }
+
+ /* pp->active_link is not reliable once FBS is enabled, both
+ * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
+ * NCQ and non-NCQ commands may be in flight at the same time.
+ */
+ if (pp->fbs_enabled) {
+ if (ap->qc_active) {
+ qc_active = readl(port_mmio + PORT_SCR_ACT);
+ qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
+ }
+ } else {
+ /* pp->active_link is valid iff any command is in flight */
+ if (ap->qc_active && pp->active_link->sactive)
+ qc_active = readl(port_mmio + PORT_SCR_ACT);
+ else
+ qc_active = readl(port_mmio + PORT_CMD_ISSUE);
+ }
+
+
+ rc = ata_qc_complete_multiple(ap, qc_active);
+
+ /* while resetting, invalid completions are expected */
+ if (unlikely(rc < 0 && !resetting)) {
+ ehi->err_mask |= AC_ERR_HSM;
+ ehi->action |= ATA_EH_RESET;
+ ata_port_freeze(ap);
+ }
+}
+
+irqreturn_t ahci_interrupt(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ struct ahci_host_priv *hpriv;
+ unsigned int i, handled = 0;
+ void __iomem *mmio;
+ u32 irq_stat, irq_masked;
+
+ VPRINTK("ENTER\n");
+
+ hpriv = host->private_data;
+ mmio = hpriv->mmio;
+
+ /* sigh. 0xffffffff is a valid return from h/w */
+ irq_stat = readl(mmio + HOST_IRQ_STAT);
+ if (!irq_stat)
+ return IRQ_NONE;
+
+ irq_masked = irq_stat & hpriv->port_map;
+
+ spin_lock(&host->lock);
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap;
+
+ if (!(irq_masked & (1 << i)))
+ continue;
+
+ ap = host->ports[i];
+ if (ap) {
+ ahci_port_intr(ap);
+ VPRINTK("port %u\n", i);
+ } else {
+ VPRINTK("port %u (no irq)\n", i);
+ if (ata_ratelimit())
+ dev_printk(KERN_WARNING, host->dev,
+ "interrupt on disabled port %u\n", i);
+ }
+
+ handled = 1;
+ }
+
+ /* HOST_IRQ_STAT behaves as level triggered latch meaning that
+ * it should be cleared after all the port events are cleared;
+ * otherwise, it will raise a spurious interrupt after each
+ * valid one. Please read section 10.6.2 of ahci 1.1 for more
+ * information.
+ *
+ * Also, use the unmasked value to clear interrupt as spurious
+ * pending event on a dummy port might cause screaming IRQ.
+ */
+ writel(irq_stat, mmio + HOST_IRQ_STAT);
+
+ spin_unlock(&host->lock);
+
+ VPRINTK("EXIT\n");
+
+ return IRQ_RETVAL(handled);
+}
+EXPORT_SYMBOL_GPL(ahci_interrupt);
+
+static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_port_priv *pp = ap->private_data;
+
+ /* Keep track of the currently active link. It will be used
+ * in completion path to determine whether NCQ phase is in
+ * progress.
+ */
+ pp->active_link = qc->dev->link;
+
+ if (qc->tf.protocol == ATA_PROT_NCQ)
+ writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
+
+ if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
+ u32 fbs = readl(port_mmio + PORT_FBS);
+ fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
+ fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
+ writel(fbs, port_mmio + PORT_FBS);
+ pp->fbs_last_dev = qc->dev->link->pmp;
+ }
+
+ writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
+
+ ahci_sw_activity(qc->dev->link);
+
+ return 0;
+}
+
+static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
+{
+ struct ahci_port_priv *pp = qc->ap->private_data;
+ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+
+ if (pp->fbs_enabled)
+ d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
+
+ ata_tf_from_fis(d2h_fis, &qc->result_tf);
+ return true;
+}
+
+static void ahci_freeze(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+
+ /* turn IRQ off */
+ writel(0, port_mmio + PORT_IRQ_MASK);
+}
+
+static void ahci_thaw(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 tmp;
+ struct ahci_port_priv *pp = ap->private_data;
+
+ /* clear IRQ */
+ tmp = readl(port_mmio + PORT_IRQ_STAT);
+ writel(tmp, port_mmio + PORT_IRQ_STAT);
+ writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
+
+ /* turn IRQ back on */
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+}
+
+static void ahci_error_handler(struct ata_port *ap)
+{
+ if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
+ /* restart engine */
+ ahci_stop_engine(ap);
+ ahci_start_engine(ap);
+ }
+
+ sata_pmp_error_handler(ap);
+}
+
+static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+
+ /* make DMA engine forget about the failed command */
+ if (qc->flags & ATA_QCFLAG_FAILED)
+ ahci_kick_engine(ap);
+}
+
+static void ahci_enable_fbs(struct ata_port *ap)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 fbs;
+ int rc;
+
+ if (!pp->fbs_supported)
+ return;
+
+ fbs = readl(port_mmio + PORT_FBS);
+ if (fbs & PORT_FBS_EN) {
+ pp->fbs_enabled = true;
+ pp->fbs_last_dev = -1; /* initialization */
+ return;
+ }
+
+ rc = ahci_stop_engine(ap);
+ if (rc)
+ return;
+
+ writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
+ fbs = readl(port_mmio + PORT_FBS);
+ if (fbs & PORT_FBS_EN) {
+ dev_printk(KERN_INFO, ap->host->dev, "FBS is enabled.\n");
+ pp->fbs_enabled = true;
+ pp->fbs_last_dev = -1; /* initialization */
+ } else
+ dev_printk(KERN_ERR, ap->host->dev, "Failed to enable FBS\n");
+
+ ahci_start_engine(ap);
+}
+
+static void ahci_disable_fbs(struct ata_port *ap)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 fbs;
+ int rc;
+
+ if (!pp->fbs_supported)
+ return;
+
+ fbs = readl(port_mmio + PORT_FBS);
+ if ((fbs & PORT_FBS_EN) == 0) {
+ pp->fbs_enabled = false;
+ return;
+ }
+
+ rc = ahci_stop_engine(ap);
+ if (rc)
+ return;
+
+ writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS);
+ fbs = readl(port_mmio + PORT_FBS);
+ if (fbs & PORT_FBS_EN)
+ dev_printk(KERN_ERR, ap->host->dev, "Failed to disable FBS\n");
+ else {
+ dev_printk(KERN_INFO, ap->host->dev, "FBS is disabled.\n");
+ pp->fbs_enabled = false;
+ }
+
+ ahci_start_engine(ap);
+}
+
+static void ahci_pmp_attach(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_port_priv *pp = ap->private_data;
+ u32 cmd;
+
+ cmd = readl(port_mmio + PORT_CMD);
+ cmd |= PORT_CMD_PMP;
+ writel(cmd, port_mmio + PORT_CMD);
+
+ ahci_enable_fbs(ap);
+
+ pp->intr_mask |= PORT_IRQ_BAD_PMP;
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+}
+
+static void ahci_pmp_detach(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_port_priv *pp = ap->private_data;
+ u32 cmd;
+
+ ahci_disable_fbs(ap);
+
+ cmd = readl(port_mmio + PORT_CMD);
+ cmd &= ~PORT_CMD_PMP;
+ writel(cmd, port_mmio + PORT_CMD);
+
+ pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+}
+
+static int ahci_port_resume(struct ata_port *ap)
+{
+ ahci_power_up(ap);
+ ahci_start_port(ap);
+
+ if (sata_pmp_attached(ap))
+ ahci_pmp_attach(ap);
+ else
+ ahci_pmp_detach(ap);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
+{
+ const char *emsg = NULL;
+ int rc;
+
+ rc = ahci_deinit_port(ap, &emsg);
+ if (rc == 0)
+ ahci_power_down(ap);
+ else {
+ ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
+ ahci_start_port(ap);
+ }
+
+ return rc;
+}
+#endif
+
+static int ahci_port_start(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct device *dev = ap->host->dev;
+ struct ahci_port_priv *pp;
+ void *mem;
+ dma_addr_t mem_dma;
+ size_t dma_sz, rx_fis_sz;
+
+ pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+ if (!pp)
+ return -ENOMEM;
+
+ /* check FBS capability */
+ if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 cmd = readl(port_mmio + PORT_CMD);
+ if (cmd & PORT_CMD_FBSCP)
+ pp->fbs_supported = true;
+ else
+ dev_printk(KERN_WARNING, dev,
+ "The port is not capable of FBS\n");
+ }
+
+ if (pp->fbs_supported) {
+ dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
+ rx_fis_sz = AHCI_RX_FIS_SZ * 16;
+ } else {
+ dma_sz = AHCI_PORT_PRIV_DMA_SZ;
+ rx_fis_sz = AHCI_RX_FIS_SZ;
+ }
+
+ mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+ memset(mem, 0, dma_sz);
+
+ /*
+ * First item in chunk of DMA memory: 32-slot command table,
+ * 32 bytes each in size
+ */
+ pp->cmd_slot = mem;
+ pp->cmd_slot_dma = mem_dma;
+
+ mem += AHCI_CMD_SLOT_SZ;
+ mem_dma += AHCI_CMD_SLOT_SZ;
+
+ /*
+ * Second item: Received-FIS area
+ */
+ pp->rx_fis = mem;
+ pp->rx_fis_dma = mem_dma;
+
+ mem += rx_fis_sz;
+ mem_dma += rx_fis_sz;
+
+ /*
+ * Third item: data area for storing a single command
+ * and its scatter-gather table
+ */
+ pp->cmd_tbl = mem;
+ pp->cmd_tbl_dma = mem_dma;
+
+ /*
+ * Save off initial list of interrupts to be enabled.
+ * This could be changed later
+ */
+ pp->intr_mask = DEF_PORT_IRQ;
+
+ ap->private_data = pp;
+
+ /* engage engines, captain */
+ return ahci_port_resume(ap);
+}
+
+static void ahci_port_stop(struct ata_port *ap)
+{
+ const char *emsg = NULL;
+ int rc;
+
+ /* de-initialize port */
+ rc = ahci_deinit_port(ap, &emsg);
+ if (rc)
+ ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
+}
+
+void ahci_print_info(struct ata_host *host, const char *scc_s)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ u32 vers, cap, cap2, impl, speed;
+ const char *speed_s;
+
+ vers = readl(mmio + HOST_VERSION);
+ cap = hpriv->cap;
+ cap2 = hpriv->cap2;
+ impl = hpriv->port_map;
+
+ speed = (cap >> 20) & 0xf;
+ if (speed == 1)
+ speed_s = "1.5";
+ else if (speed == 2)
+ speed_s = "3";
+ else if (speed == 3)
+ speed_s = "6";
+ else
+ speed_s = "?";
+
+ dev_info(host->dev,
+ "AHCI %02x%02x.%02x%02x "
+ "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
+ ,
+
+ (vers >> 24) & 0xff,
+ (vers >> 16) & 0xff,
+ (vers >> 8) & 0xff,
+ vers & 0xff,
+
+ ((cap >> 8) & 0x1f) + 1,
+ (cap & 0x1f) + 1,
+ speed_s,
+ impl,
+ scc_s);
+
+ dev_info(host->dev,
+ "flags: "
+ "%s%s%s%s%s%s%s"
+ "%s%s%s%s%s%s%s"
+ "%s%s%s%s%s%s\n"
+ ,
+
+ cap & HOST_CAP_64 ? "64bit " : "",
+ cap & HOST_CAP_NCQ ? "ncq " : "",
+ cap & HOST_CAP_SNTF ? "sntf " : "",
+ cap & HOST_CAP_MPS ? "ilck " : "",
+ cap & HOST_CAP_SSS ? "stag " : "",
+ cap & HOST_CAP_ALPM ? "pm " : "",
+ cap & HOST_CAP_LED ? "led " : "",
+ cap & HOST_CAP_CLO ? "clo " : "",
+ cap & HOST_CAP_ONLY ? "only " : "",
+ cap & HOST_CAP_PMP ? "pmp " : "",
+ cap & HOST_CAP_FBS ? "fbs " : "",
+ cap & HOST_CAP_PIO_MULTI ? "pio " : "",
+ cap & HOST_CAP_SSC ? "slum " : "",
+ cap & HOST_CAP_PART ? "part " : "",
+ cap & HOST_CAP_CCC ? "ccc " : "",
+ cap & HOST_CAP_EMS ? "ems " : "",
+ cap & HOST_CAP_SXS ? "sxs " : "",
+ cap2 & HOST_CAP2_APST ? "apst " : "",
+ cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
+ cap2 & HOST_CAP2_BOH ? "boh " : ""
+ );
+}
+EXPORT_SYMBOL_GPL(ahci_print_info);
+
+void ahci_set_em_messages(struct ahci_host_priv *hpriv,
+ struct ata_port_info *pi)
+{
+ u8 messages;
+ void __iomem *mmio = hpriv->mmio;
+ u32 em_loc = readl(mmio + HOST_EM_LOC);
+ u32 em_ctl = readl(mmio + HOST_EM_CTL);
+
+ if (!ahci_em_messages || !(hpriv->cap & HOST_CAP_EMS))
+ return;
+
+ messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
+
+ if (messages) {
+ /* store em_loc */
+ hpriv->em_loc = ((em_loc >> 16) * 4);
+ hpriv->em_buf_sz = ((em_loc & 0xff) * 4);
+ hpriv->em_msg_type = messages;
+ pi->flags |= ATA_FLAG_EM;
+ if (!(em_ctl & EM_CTL_ALHD))
+ pi->flags |= ATA_FLAG_SW_ACTIVITY;
+ }
+}
+EXPORT_SYMBOL_GPL(ahci_set_em_messages);
+
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_DESCRIPTION("Common AHCI SATA low-level routines");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 49cffb6094a..06b7e49e039 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -65,6 +65,7 @@
#include <linux/libata.h>
#include <asm/byteorder.h>
#include <linux/cdrom.h>
+#include <linux/ratelimit.h>
#include "libata.h"
@@ -96,7 +97,6 @@ static void ata_dev_xfermask(struct ata_device *dev);
static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
unsigned int ata_print_id = 1;
-static struct workqueue_struct *ata_wq;
struct workqueue_struct *ata_aux_wq;
@@ -160,6 +160,10 @@ int libata_allow_tpm = 0;
module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
+static int atapi_an;
+module_param(atapi_an, int, 0444);
+MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
+
MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("Library module for ATA devices");
MODULE_LICENSE("GPL");
@@ -1685,52 +1689,6 @@ unsigned long ata_id_xfermask(const u16 *id)
return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
}
-/**
- * ata_pio_queue_task - Queue port_task
- * @ap: The ata_port to queue port_task for
- * @data: data for @fn to use
- * @delay: delay time in msecs for workqueue function
- *
- * Schedule @fn(@data) for execution after @delay jiffies using
- * port_task. There is one port_task per port and it's the
- * user(low level driver)'s responsibility to make sure that only
- * one task is active at any given time.
- *
- * libata core layer takes care of synchronization between
- * port_task and EH. ata_pio_queue_task() may be ignored for EH
- * synchronization.
- *
- * LOCKING:
- * Inherited from caller.
- */
-void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
-{
- ap->port_task_data = data;
-
- /* may fail if ata_port_flush_task() in progress */
- queue_delayed_work(ata_wq, &ap->port_task, msecs_to_jiffies(delay));
-}
-
-/**
- * ata_port_flush_task - Flush port_task
- * @ap: The ata_port to flush port_task for
- *
- * After this function completes, port_task is guranteed not to
- * be running or scheduled.
- *
- * LOCKING:
- * Kernel thread context (may sleep)
- */
-void ata_port_flush_task(struct ata_port *ap)
-{
- DPRINTK("ENTER\n");
-
- cancel_rearming_delayed_work(&ap->port_task);
-
- if (ata_msg_ctl(ap))
- ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
-}
-
static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
{
struct completion *waiting = qc->private_data;
@@ -1852,7 +1810,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
- ata_port_flush_task(ap);
+ ata_sff_flush_pio_task(ap);
if (!rc) {
spin_lock_irqsave(ap->lock, flags);
@@ -1906,22 +1864,6 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
ap->qc_active = preempted_qc_active;
ap->nr_active_links = preempted_nr_active_links;
- /* XXX - Some LLDDs (sata_mv) disable port on command failure.
- * Until those drivers are fixed, we detect the condition
- * here, fail the command with AC_ERR_SYSTEM and reenable the
- * port.
- *
- * Note that this doesn't change any behavior as internal
- * command failure results in disabling the device in the
- * higher layer for LLDDs without new reset/EH callbacks.
- *
- * Kill the following code as soon as those drivers are fixed.
- */
- if (ap->flags & ATA_FLAG_DISABLED) {
- err_mask |= AC_ERR_SYSTEM;
- ata_port_probe(ap);
- }
-
spin_unlock_irqrestore(ap->lock, flags);
if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
@@ -2184,6 +2126,14 @@ retry:
goto err_out;
}
+ if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
+ ata_dev_printk(dev, KERN_DEBUG, "dumping IDENTIFY data, "
+ "class=%d may_fallback=%d tried_spinup=%d\n",
+ class, may_fallback, tried_spinup);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
+ 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
+ }
+
/* Falling back doesn't make sense if ID data was read
* successfully at least once.
*/
@@ -2572,7 +2522,8 @@ int ata_dev_configure(struct ata_device *dev)
* to enable ATAPI AN to discern between PHY status
* changed notifications and ATAPI ANs.
*/
- if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
+ if (atapi_an &&
+ (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
(!sata_pmp_attached(ap) ||
sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
unsigned int err_mask;
@@ -2767,8 +2718,6 @@ int ata_bus_probe(struct ata_port *ap)
int rc;
struct ata_device *dev;
- ata_port_probe(ap);
-
ata_for_each_dev(dev, &ap->link, ALL)
tries[dev->devno] = ATA_PROBE_MAX_TRIES;
@@ -2796,8 +2745,7 @@ int ata_bus_probe(struct ata_port *ap)
ap->ops->phy_reset(ap);
ata_for_each_dev(dev, &ap->link, ALL) {
- if (!(ap->flags & ATA_FLAG_DISABLED) &&
- dev->class != ATA_DEV_UNKNOWN)
+ if (dev->class != ATA_DEV_UNKNOWN)
classes[dev->devno] = dev->class;
else
classes[dev->devno] = ATA_DEV_NONE;
@@ -2805,8 +2753,6 @@ int ata_bus_probe(struct ata_port *ap)
dev->class = ATA_DEV_UNKNOWN;
}
- ata_port_probe(ap);
-
/* read IDENTIFY page and configure devices. We have to do the identify
specific sequence bass-ackwards so that PDIAG- is released by
the slave device */
@@ -2856,8 +2802,6 @@ int ata_bus_probe(struct ata_port *ap)
ata_for_each_dev(dev, &ap->link, ENABLED)
return 0;
- /* no device present, disable port */
- ata_port_disable(ap);
return -ENODEV;
fail:
@@ -2889,22 +2833,6 @@ int ata_bus_probe(struct ata_port *ap)
}
/**
- * ata_port_probe - Mark port as enabled
- * @ap: Port for which we indicate enablement
- *
- * Modify @ap data structure such that the system
- * thinks that the entire port is enabled.
- *
- * LOCKING: host lock, or some other form of
- * serialization.
- */
-
-void ata_port_probe(struct ata_port *ap)
-{
- ap->flags &= ~ATA_FLAG_DISABLED;
-}
-
-/**
* sata_print_link_status - Print SATA link status
* @link: SATA link to printk link status about
*
@@ -2951,26 +2879,6 @@ struct ata_device *ata_dev_pair(struct ata_device *adev)
}
/**
- * ata_port_disable - Disable port.
- * @ap: Port to be disabled.
- *
- * Modify @ap data structure such that the system
- * thinks that the entire port is disabled, and should
- * never attempt to probe or communicate with devices
- * on this port.
- *
- * LOCKING: host lock, or some other form of
- * serialization.
- */
-
-void ata_port_disable(struct ata_port *ap)
-{
- ap->link.device[0].class = ATA_DEV_NONE;
- ap->link.device[1].class = ATA_DEV_NONE;
- ap->flags |= ATA_FLAG_DISABLED;
-}
-
-/**
* sata_down_spd_limit - adjust SATA spd limit downward
* @link: Link to adjust SATA spd limit for
* @spd_limit: Additional limit
@@ -3631,9 +3539,15 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline,
int (*check_ready)(struct ata_link *link))
{
unsigned long start = jiffies;
- unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
+ unsigned long nodev_deadline;
int warned = 0;
+ /* choose which 0xff timeout to use, read comment in libata.h */
+ if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
+ nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
+ else
+ nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
+
/* Slave readiness can't be tested separately from master. On
* M/S emulation configuration, this function should be called
* only on the master and it will handle both master and slave.
@@ -3651,12 +3565,12 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline,
if (ready > 0)
return 0;
- /* -ENODEV could be transient. Ignore -ENODEV if link
+ /*
+ * -ENODEV could be transient. Ignore -ENODEV if link
* is online. Also, some SATA devices take a long
- * time to clear 0xff after reset. For example,
- * HHD424020F7SV00 iVDR needs >= 800ms while Quantum
- * GoVault needs even more than that. Wait for
- * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline.
+ * time to clear 0xff after reset. Wait for
+ * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
+ * offline.
*
* Note that some PATA controllers (pata_ali) explode
* if status register is read more than once when
@@ -5558,30 +5472,6 @@ void ata_host_resume(struct ata_host *host)
#endif
/**
- * ata_port_start - Set port up for dma.
- * @ap: Port to initialize
- *
- * Called just after data structures for each port are
- * initialized. Allocates space for PRD table.
- *
- * May be used as the port_start() entry in ata_port_operations.
- *
- * LOCKING:
- * Inherited from caller.
- */
-int ata_port_start(struct ata_port *ap)
-{
- struct device *dev = ap->dev;
-
- ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
- GFP_KERNEL);
- if (!ap->prd)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
* ata_dev_init - Initialize an ata_device structure
* @dev: Device structure to initialize
*
@@ -5709,12 +5599,9 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
ap->pflags |= ATA_PFLAG_INITIALIZING;
ap->lock = &host->lock;
- ap->flags = ATA_FLAG_DISABLED;
ap->print_id = -1;
- ap->ctl = ATA_DEVCTL_OBS;
ap->host = host;
ap->dev = host->dev;
- ap->last_ctl = 0xFF;
#if defined(ATA_VERBOSE_DEBUG)
/* turn on all debugging levels */
@@ -5725,11 +5612,6 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
#endif
-#ifdef CONFIG_ATA_SFF
- INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
-#else
- INIT_DELAYED_WORK(&ap->port_task, NULL);
-#endif
INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
INIT_LIST_HEAD(&ap->eh_done_q);
@@ -5747,6 +5629,8 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
ap->stats.unhandled_irq = 1;
ap->stats.idle_irq = 1;
#endif
+ ata_sff_port_init(ap);
+
return ap;
}
@@ -6138,8 +6022,6 @@ static void async_port_probe(void *data, async_cookie_t cookie)
struct ata_eh_info *ehi = &ap->link.eh_info;
unsigned long flags;
- ata_port_probe(ap);
-
/* kick EH for boot probing */
spin_lock_irqsave(ap->lock, flags);
@@ -6503,6 +6385,7 @@ static int __init ata_parse_force_one(char **cur,
{ "3.0Gbps", .spd_limit = 2 },
{ "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
{ "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
+ { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
{ "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
{ "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
{ "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
@@ -6663,62 +6546,43 @@ static void __init ata_parse_force_param(void)
static int __init ata_init(void)
{
- ata_parse_force_param();
+ int rc = -ENOMEM;
- /*
- * FIXME: In UP case, there is only one workqueue thread and if you
- * have more than one PIO device, latency is bloody awful, with
- * occasional multi-second "hiccups" as one PIO device waits for
- * another. It's an ugly wart that users DO occasionally complain
- * about; luckily most users have at most one PIO polled device.
- */
- ata_wq = create_workqueue("ata");
- if (!ata_wq)
- goto free_force_tbl;
+ ata_parse_force_param();
ata_aux_wq = create_singlethread_workqueue("ata_aux");
if (!ata_aux_wq)
- goto free_wq;
+ goto fail;
+
+ rc = ata_sff_init();
+ if (rc)
+ goto fail;
printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
return 0;
-free_wq:
- destroy_workqueue(ata_wq);
-free_force_tbl:
+fail:
kfree(ata_force_tbl);
- return -ENOMEM;
+ if (ata_aux_wq)
+ destroy_workqueue(ata_aux_wq);
+ return rc;
}
static void __exit ata_exit(void)
{
+ ata_sff_exit();
kfree(ata_force_tbl);
- destroy_workqueue(ata_wq);
destroy_workqueue(ata_aux_wq);
}
subsys_initcall(ata_init);
module_exit(ata_exit);
-static unsigned long ratelimit_time;
-static DEFINE_SPINLOCK(ata_ratelimit_lock);
+static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
int ata_ratelimit(void)
{
- int rc;
- unsigned long flags;
-
- spin_lock_irqsave(&ata_ratelimit_lock, flags);
-
- if (time_after(jiffies, ratelimit_time)) {
- rc = 1;
- ratelimit_time = jiffies + (HZ/5);
- } else
- rc = 0;
-
- spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
-
- return rc;
+ return __ratelimit(&ratelimit);
}
/**
@@ -6826,11 +6690,9 @@ EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
EXPORT_SYMBOL_GPL(ata_mode_string);
EXPORT_SYMBOL_GPL(ata_id_xfermask);
-EXPORT_SYMBOL_GPL(ata_port_start);
EXPORT_SYMBOL_GPL(ata_do_set_mode);
EXPORT_SYMBOL_GPL(ata_std_qc_defer);
EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
-EXPORT_SYMBOL_GPL(ata_port_probe);
EXPORT_SYMBOL_GPL(ata_dev_disable);
EXPORT_SYMBOL_GPL(sata_set_spd);
EXPORT_SYMBOL_GPL(ata_wait_after_reset);
@@ -6842,7 +6704,6 @@ EXPORT_SYMBOL_GPL(sata_std_hardreset);
EXPORT_SYMBOL_GPL(ata_std_postreset);
EXPORT_SYMBOL_GPL(ata_dev_classify);
EXPORT_SYMBOL_GPL(ata_dev_pair);
-EXPORT_SYMBOL_GPL(ata_port_disable);
EXPORT_SYMBOL_GPL(ata_ratelimit);
EXPORT_SYMBOL_GPL(ata_wait_register);
EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
@@ -6864,7 +6725,6 @@ EXPORT_SYMBOL_GPL(ata_id_c_string);
EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
EXPORT_SYMBOL_GPL(ata_scsi_simulate);
-EXPORT_SYMBOL_GPL(ata_pio_queue_task);
EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
EXPORT_SYMBOL_GPL(ata_timing_find_mode);
EXPORT_SYMBOL_GPL(ata_timing_compute);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 228740f356c..f77a67303f8 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -550,8 +550,8 @@ void ata_scsi_error(struct Scsi_Host *host)
DPRINTK("ENTER\n");
- /* synchronize with port task */
- ata_port_flush_task(ap);
+ /* make sure sff pio task is not running */
+ ata_sff_flush_pio_task(ap);
/* synchronize with host lock and sort out timeouts */
@@ -3684,7 +3684,7 @@ void ata_std_error_handler(struct ata_port *ap)
ata_reset_fn_t hardreset = ops->hardreset;
/* ignore built-in hardreset if SCR access is not available */
- if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link))
+ if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
hardreset = NULL;
ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 00305f41ed8..224faabd7b7 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -231,10 +231,14 @@ static const char *sata_pmp_spec_rev_str(const u32 *gscr)
return "<unknown>";
}
+#define PMP_GSCR_SII_POL 129
+
static int sata_pmp_configure(struct ata_device *dev, int print_info)
{
struct ata_port *ap = dev->link->ap;
u32 *gscr = dev->gscr;
+ u16 vendor = sata_pmp_gscr_vendor(gscr);
+ u16 devid = sata_pmp_gscr_devid(gscr);
unsigned int err_mask = 0;
const char *reason;
int nr_ports, rc;
@@ -260,12 +264,34 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
goto fail;
}
+ /* Disable sending Early R_OK.
+ * With "cached read" HDD testing and multiple ports busy on a SATA
+ * host controller, 3726 PMP will very rarely drop a deferred
+ * R_OK that was intended for the host. Symptom will be all
+ * 5 drives under test will timeout, get reset, and recover.
+ */
+ if (vendor == 0x1095 && devid == 0x3726) {
+ u32 reg;
+
+ err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg);
+ if (err_mask) {
+ rc = -EIO;
+ reason = "failed to read Sil3726 Private Register";
+ goto fail;
+ }
+ reg &= ~0x1;
+ err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg);
+ if (err_mask) {
+ rc = -EIO;
+ reason = "failed to write Sil3726 Private Register";
+ goto fail;
+ }
+ }
+
if (print_info) {
ata_dev_printk(dev, KERN_INFO, "Port Multiplier %s, "
"0x%04x:0x%04x r%d, %d ports, feat 0x%x/0x%x\n",
- sata_pmp_spec_rev_str(gscr),
- sata_pmp_gscr_vendor(gscr),
- sata_pmp_gscr_devid(gscr),
+ sata_pmp_spec_rev_str(gscr), vendor, devid,
sata_pmp_gscr_rev(gscr),
nr_ports, gscr[SATA_PMP_GSCR_FEAT_EN],
gscr[SATA_PMP_GSCR_FEAT]);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 0088cdeb0b1..cfa9dd3d725 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3345,9 +3345,6 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
struct ata_link *link;
struct ata_device *dev;
- if (ap->flags & ATA_FLAG_DISABLED)
- return;
-
repeat:
ata_for_each_link(link, ap, EDGE) {
ata_for_each_dev(dev, link, ENABLED) {
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index e3877b6843c..efa4a18cfb9 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -40,10 +40,12 @@
#include "libata.h"
+static struct workqueue_struct *ata_sff_wq;
+
const struct ata_port_operations ata_sff_port_ops = {
.inherits = &ata_base_port_ops,
- .qc_prep = ata_sff_qc_prep,
+ .qc_prep = ata_noop_qc_prep,
.qc_issue = ata_sff_qc_issue,
.qc_fill_rtf = ata_sff_qc_fill_rtf,
@@ -53,9 +55,7 @@ const struct ata_port_operations ata_sff_port_ops = {
.softreset = ata_sff_softreset,
.hardreset = sata_sff_hardreset,
.postreset = ata_sff_postreset,
- .drain_fifo = ata_sff_drain_fifo,
.error_handler = ata_sff_error_handler,
- .post_internal_cmd = ata_sff_post_internal_cmd,
.sff_dev_select = ata_sff_dev_select,
.sff_check_status = ata_sff_check_status,
@@ -63,178 +63,12 @@ const struct ata_port_operations ata_sff_port_ops = {
.sff_tf_read = ata_sff_tf_read,
.sff_exec_command = ata_sff_exec_command,
.sff_data_xfer = ata_sff_data_xfer,
- .sff_irq_on = ata_sff_irq_on,
- .sff_irq_clear = ata_sff_irq_clear,
+ .sff_drain_fifo = ata_sff_drain_fifo,
.lost_interrupt = ata_sff_lost_interrupt,
-
- .port_start = ata_sff_port_start,
};
EXPORT_SYMBOL_GPL(ata_sff_port_ops);
-const struct ata_port_operations ata_bmdma_port_ops = {
- .inherits = &ata_sff_port_ops,
-
- .mode_filter = ata_bmdma_mode_filter,
-
- .bmdma_setup = ata_bmdma_setup,
- .bmdma_start = ata_bmdma_start,
- .bmdma_stop = ata_bmdma_stop,
- .bmdma_status = ata_bmdma_status,
-};
-EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
-
-const struct ata_port_operations ata_bmdma32_port_ops = {
- .inherits = &ata_bmdma_port_ops,
-
- .sff_data_xfer = ata_sff_data_xfer32,
- .port_start = ata_sff_port_start32,
-};
-EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
-
-/**
- * ata_fill_sg - Fill PCI IDE PRD table
- * @qc: Metadata associated with taskfile to be transferred
- *
- * Fill PCI IDE PRD (scatter-gather) table with segments
- * associated with the current disk command.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- *
- */
-static void ata_fill_sg(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct scatterlist *sg;
- unsigned int si, pi;
-
- pi = 0;
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- u32 addr, offset;
- u32 sg_len, len;
-
- /* determine if physical DMA addr spans 64K boundary.
- * Note h/w doesn't support 64-bit, so we unconditionally
- * truncate dma_addr_t to u32.
- */
- addr = (u32) sg_dma_address(sg);
- sg_len = sg_dma_len(sg);
-
- while (sg_len) {
- offset = addr & 0xffff;
- len = sg_len;
- if ((offset + sg_len) > 0x10000)
- len = 0x10000 - offset;
-
- ap->prd[pi].addr = cpu_to_le32(addr);
- ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
- VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
-
- pi++;
- sg_len -= len;
- addr += len;
- }
- }
-
- ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
-}
-
-/**
- * ata_fill_sg_dumb - Fill PCI IDE PRD table
- * @qc: Metadata associated with taskfile to be transferred
- *
- * Fill PCI IDE PRD (scatter-gather) table with segments
- * associated with the current disk command. Perform the fill
- * so that we avoid writing any length 64K records for
- * controllers that don't follow the spec.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- *
- */
-static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct scatterlist *sg;
- unsigned int si, pi;
-
- pi = 0;
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- u32 addr, offset;
- u32 sg_len, len, blen;
-
- /* determine if physical DMA addr spans 64K boundary.
- * Note h/w doesn't support 64-bit, so we unconditionally
- * truncate dma_addr_t to u32.
- */
- addr = (u32) sg_dma_address(sg);
- sg_len = sg_dma_len(sg);
-
- while (sg_len) {
- offset = addr & 0xffff;
- len = sg_len;
- if ((offset + sg_len) > 0x10000)
- len = 0x10000 - offset;
-
- blen = len & 0xffff;
- ap->prd[pi].addr = cpu_to_le32(addr);
- if (blen == 0) {
- /* Some PATA chipsets like the CS5530 can't
- cope with 0x0000 meaning 64K as the spec
- says */
- ap->prd[pi].flags_len = cpu_to_le32(0x8000);
- blen = 0x8000;
- ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
- }
- ap->prd[pi].flags_len = cpu_to_le32(blen);
- VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
-
- pi++;
- sg_len -= len;
- addr += len;
- }
- }
-
- ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
-}
-
-/**
- * ata_sff_qc_prep - Prepare taskfile for submission
- * @qc: Metadata associated with taskfile to be prepared
- *
- * Prepare ATA taskfile for submission.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- */
-void ata_sff_qc_prep(struct ata_queued_cmd *qc)
-{
- if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
-
- ata_fill_sg(qc);
-}
-EXPORT_SYMBOL_GPL(ata_sff_qc_prep);
-
-/**
- * ata_sff_dumb_qc_prep - Prepare taskfile for submission
- * @qc: Metadata associated with taskfile to be prepared
- *
- * Prepare ATA taskfile for submission.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- */
-void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc)
-{
- if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
-
- ata_fill_sg_dumb(qc);
-}
-EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep);
-
/**
* ata_sff_check_status - Read device status reg & clear interrupt
* @ap: port where the device is
@@ -446,6 +280,27 @@ int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)
EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
/**
+ * ata_sff_set_devctl - Write device control reg
+ * @ap: port where the device is
+ * @ctl: value to write
+ *
+ * Writes ATA taskfile device control register.
+ *
+ * Note: may NOT be used as the sff_set_devctl() entry in
+ * ata_port_operations.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static void ata_sff_set_devctl(struct ata_port *ap, u8 ctl)
+{
+ if (ap->ops->sff_set_devctl)
+ ap->ops->sff_set_devctl(ap, ctl);
+ else
+ iowrite8(ctl, ap->ioaddr.ctl_addr);
+}
+
+/**
* ata_sff_dev_select - Select device 0/1 on ATA bus
* @ap: ATA channel to manipulate
* @device: ATA device (numbered from zero) to select
@@ -491,7 +346,7 @@ EXPORT_SYMBOL_GPL(ata_sff_dev_select);
* LOCKING:
* caller.
*/
-void ata_dev_select(struct ata_port *ap, unsigned int device,
+static void ata_dev_select(struct ata_port *ap, unsigned int device,
unsigned int wait, unsigned int can_sleep)
{
if (ata_msg_probe(ap))
@@ -517,50 +372,34 @@ void ata_dev_select(struct ata_port *ap, unsigned int device,
* Enable interrupts on a legacy IDE device using MMIO or PIO,
* wait for idle, clear any pending interrupts.
*
+ * Note: may NOT be used as the sff_irq_on() entry in
+ * ata_port_operations.
+ *
* LOCKING:
* Inherited from caller.
*/
-u8 ata_sff_irq_on(struct ata_port *ap)
+void ata_sff_irq_on(struct ata_port *ap)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
- u8 tmp;
+
+ if (ap->ops->sff_irq_on) {
+ ap->ops->sff_irq_on(ap);
+ return;
+ }
ap->ctl &= ~ATA_NIEN;
ap->last_ctl = ap->ctl;
- if (ioaddr->ctl_addr)
- iowrite8(ap->ctl, ioaddr->ctl_addr);
- tmp = ata_wait_idle(ap);
-
- ap->ops->sff_irq_clear(ap);
+ if (ap->ops->sff_set_devctl || ioaddr->ctl_addr)
+ ata_sff_set_devctl(ap, ap->ctl);
+ ata_wait_idle(ap);
- return tmp;
+ if (ap->ops->sff_irq_clear)
+ ap->ops->sff_irq_clear(ap);
}
EXPORT_SYMBOL_GPL(ata_sff_irq_on);
/**
- * ata_sff_irq_clear - Clear PCI IDE BMDMA interrupt.
- * @ap: Port associated with this ATA transaction.
- *
- * Clear interrupt and error flags in DMA status register.
- *
- * May be used as the irq_clear() entry in ata_port_operations.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- */
-void ata_sff_irq_clear(struct ata_port *ap)
-{
- void __iomem *mmio = ap->ioaddr.bmdma_addr;
-
- if (!mmio)
- return;
-
- iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
-}
-EXPORT_SYMBOL_GPL(ata_sff_irq_clear);
-
-/**
* ata_sff_tf_load - send taskfile registers to host controller
* @ap: Port to which output is sent
* @tf: ATA taskfile register set
@@ -579,7 +418,6 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
if (ioaddr->ctl_addr)
iowrite8(tf->ctl, ioaddr->ctl_addr);
ap->last_ctl = tf->ctl;
- ata_wait_idle(ap);
}
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
@@ -615,8 +453,6 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
iowrite8(tf->device, ioaddr->device_addr);
VPRINTK("device 0x%X\n", tf->device);
}
-
- ata_wait_idle(ap);
}
EXPORT_SYMBOL_GPL(ata_sff_tf_load);
@@ -894,7 +730,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
do_write);
}
- if (!do_write)
+ if (!do_write && !PageSlab(page))
flush_dcache_page(page);
qc->curbytes += qc->sect_size;
@@ -962,11 +798,15 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
case ATAPI_PROT_NODATA:
ap->hsm_task_state = HSM_ST_LAST;
break;
+#ifdef CONFIG_ATA_BMDMA
case ATAPI_PROT_DMA:
ap->hsm_task_state = HSM_ST_LAST;
/* initiate bmdma */
ap->ops->bmdma_start(qc);
break;
+#endif /* CONFIG_ATA_BMDMA */
+ default:
+ BUG();
}
}
@@ -1165,7 +1005,7 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
qc = ata_qc_from_tag(ap, qc->tag);
if (qc) {
if (likely(!(qc->err_mask & AC_ERR_HSM))) {
- ap->ops->sff_irq_on(ap);
+ ata_sff_irq_on(ap);
ata_qc_complete(qc);
} else
ata_port_freeze(ap);
@@ -1181,7 +1021,7 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
} else {
if (in_wq) {
spin_lock_irqsave(ap->lock, flags);
- ap->ops->sff_irq_on(ap);
+ ata_sff_irq_on(ap);
ata_qc_complete(qc);
spin_unlock_irqrestore(ap->lock, flags);
} else
@@ -1293,7 +1133,7 @@ fsm_start:
if (in_wq)
spin_unlock_irqrestore(ap->lock, flags);
- /* if polling, ata_pio_task() handles the rest.
+ /* if polling, ata_sff_pio_task() handles the rest.
* otherwise, interrupt handler takes over from here.
*/
break;
@@ -1458,14 +1298,38 @@ fsm_start:
}
EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
-void ata_pio_task(struct work_struct *work)
+void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay)
+{
+ /* may fail if ata_sff_flush_pio_task() in progress */
+ queue_delayed_work(ata_sff_wq, &ap->sff_pio_task,
+ msecs_to_jiffies(delay));
+}
+EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);
+
+void ata_sff_flush_pio_task(struct ata_port *ap)
+{
+ DPRINTK("ENTER\n");
+
+ cancel_rearming_delayed_work(&ap->sff_pio_task);
+ ap->hsm_task_state = HSM_ST_IDLE;
+
+ if (ata_msg_ctl(ap))
+ ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
+}
+
+static void ata_sff_pio_task(struct work_struct *work)
{
struct ata_port *ap =
- container_of(work, struct ata_port, port_task.work);
- struct ata_queued_cmd *qc = ap->port_task_data;
+ container_of(work, struct ata_port, sff_pio_task.work);
+ struct ata_queued_cmd *qc;
u8 status;
int poll_next;
+ /* qc can be NULL if timeout occurred */
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (!qc)
+ return;
+
fsm_start:
WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
@@ -1481,7 +1345,7 @@ fsm_start:
msleep(2);
status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
if (status & ATA_BUSY) {
- ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE);
+ ata_sff_queue_pio_task(ap, ATA_SHORT_PAUSE);
return;
}
}
@@ -1497,15 +1361,11 @@ fsm_start:
}
/**
- * ata_sff_qc_issue - issue taskfile to device in proto-dependent manner
+ * ata_sff_qc_issue - issue taskfile to a SFF controller
* @qc: command to issue to device
*
- * Using various libata functions and hooks, this function
- * starts an ATA command. ATA commands are grouped into
- * classes called "protocols", and issuing each type of protocol
- * is slightly different.
- *
- * May be used as the qc_issue() entry in ata_port_operations.
+ * This function issues a PIO or NODATA command to a SFF
+ * controller.
*
* LOCKING:
* spin_lock_irqsave(host lock)
@@ -1520,23 +1380,8 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
/* Use polling pio if the LLD doesn't handle
* interrupt driven pio and atapi CDB interrupt.
*/
- if (ap->flags & ATA_FLAG_PIO_POLLING) {
- switch (qc->tf.protocol) {
- case ATA_PROT_PIO:
- case ATA_PROT_NODATA:
- case ATAPI_PROT_PIO:
- case ATAPI_PROT_NODATA:
- qc->tf.flags |= ATA_TFLAG_POLLING;
- break;
- case ATAPI_PROT_DMA:
- if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
- /* see ata_dma_blacklisted() */
- BUG();
- break;
- default:
- break;
- }
- }
+ if (ap->flags & ATA_FLAG_PIO_POLLING)
+ qc->tf.flags |= ATA_TFLAG_POLLING;
/* select the device */
ata_dev_select(ap, qc->dev->devno, 1, 0);
@@ -1551,17 +1396,8 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
ap->hsm_task_state = HSM_ST_LAST;
if (qc->tf.flags & ATA_TFLAG_POLLING)
- ata_pio_queue_task(ap, qc, 0);
-
- break;
+ ata_sff_queue_pio_task(ap, 0);
- case ATA_PROT_DMA:
- WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
-
- ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
- ap->ops->bmdma_setup(qc); /* set up bmdma */
- ap->ops->bmdma_start(qc); /* initiate bmdma */
- ap->hsm_task_state = HSM_ST_LAST;
break;
case ATA_PROT_PIO:
@@ -1573,20 +1409,21 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
if (qc->tf.flags & ATA_TFLAG_WRITE) {
/* PIO data out protocol */
ap->hsm_task_state = HSM_ST_FIRST;
- ata_pio_queue_task(ap, qc, 0);
+ ata_sff_queue_pio_task(ap, 0);
- /* always send first data block using
- * the ata_pio_task() codepath.
+ /* always send first data block using the
+ * ata_sff_pio_task() codepath.
*/
} else {
/* PIO data in protocol */
ap->hsm_task_state = HSM_ST;
if (qc->tf.flags & ATA_TFLAG_POLLING)
- ata_pio_queue_task(ap, qc, 0);
+ ata_sff_queue_pio_task(ap, 0);
- /* if polling, ata_pio_task() handles the rest.
- * otherwise, interrupt handler takes over from here.
+ /* if polling, ata_sff_pio_task() handles the
+ * rest. otherwise, interrupt handler takes
+ * over from here.
*/
}
@@ -1604,19 +1441,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
/* send cdb by polling if no cdb interrupt */
if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
(qc->tf.flags & ATA_TFLAG_POLLING))
- ata_pio_queue_task(ap, qc, 0);
- break;
-
- case ATAPI_PROT_DMA:
- WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
-
- ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
- ap->ops->bmdma_setup(qc); /* set up bmdma */
- ap->hsm_task_state = HSM_ST_FIRST;
-
- /* send cdb by polling if no cdb interrupt */
- if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
- ata_pio_queue_task(ap, qc, 0);
+ ata_sff_queue_pio_task(ap, 0);
break;
default:
@@ -1648,27 +1473,27 @@ bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
}
EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
-/**
- * ata_sff_host_intr - Handle host interrupt for given (port, task)
- * @ap: Port on which interrupt arrived (possibly...)
- * @qc: Taskfile currently active in engine
- *
- * Handle host interrupt for given queued command. Currently,
- * only DMA interrupts are handled. All other commands are
- * handled via polling with interrupts disabled (nIEN bit).
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- *
- * RETURNS:
- * One if interrupt was handled, zero if not (shared irq).
- */
-unsigned int ata_sff_host_intr(struct ata_port *ap,
- struct ata_queued_cmd *qc)
+static unsigned int ata_sff_idle_irq(struct ata_port *ap)
{
- struct ata_eh_info *ehi = &ap->link.eh_info;
- u8 status, host_stat = 0;
- bool bmdma_stopped = false;
+ ap->stats.idle_irq++;
+
+#ifdef ATA_IRQ_TRAP
+ if ((ap->stats.idle_irq % 1000) == 0) {
+ ap->ops->sff_check_status(ap);
+ if (ap->ops->sff_irq_clear)
+ ap->ops->sff_irq_clear(ap);
+ ata_port_printk(ap, KERN_WARNING, "irq trap\n");
+ return 1;
+ }
+#endif
+ return 0; /* irq not handled */
+}
+
+static unsigned int __ata_sff_port_intr(struct ata_port *ap,
+ struct ata_queued_cmd *qc,
+ bool hsmv_on_idle)
+{
+ u8 status;
VPRINTK("ata%u: protocol %d task_state %d\n",
ap->print_id, qc->tf.protocol, ap->hsm_task_state);
@@ -1685,90 +1510,56 @@ unsigned int ata_sff_host_intr(struct ata_port *ap,
* need to check ata_is_atapi(qc->tf.protocol) again.
*/
if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
- goto idle_irq;
- break;
- case HSM_ST_LAST:
- if (qc->tf.protocol == ATA_PROT_DMA ||
- qc->tf.protocol == ATAPI_PROT_DMA) {
- /* check status of DMA engine */
- host_stat = ap->ops->bmdma_status(ap);
- VPRINTK("ata%u: host_stat 0x%X\n",
- ap->print_id, host_stat);
-
- /* if it's not our irq... */
- if (!(host_stat & ATA_DMA_INTR))
- goto idle_irq;
-
- /* before we do anything else, clear DMA-Start bit */
- ap->ops->bmdma_stop(qc);
- bmdma_stopped = true;
-
- if (unlikely(host_stat & ATA_DMA_ERR)) {
- /* error when transfering data to/from memory */
- qc->err_mask |= AC_ERR_HOST_BUS;
- ap->hsm_task_state = HSM_ST_ERR;
- }
- }
+ return ata_sff_idle_irq(ap);
break;
case HSM_ST:
+ case HSM_ST_LAST:
break;
default:
- goto idle_irq;
+ return ata_sff_idle_irq(ap);
}
-
/* check main status, clearing INTRQ if needed */
status = ata_sff_irq_status(ap);
if (status & ATA_BUSY) {
- if (bmdma_stopped) {
+ if (hsmv_on_idle) {
/* BMDMA engine is already stopped, we're screwed */
qc->err_mask |= AC_ERR_HSM;
ap->hsm_task_state = HSM_ST_ERR;
} else
- goto idle_irq;
+ return ata_sff_idle_irq(ap);
}
- /* ack bmdma irq events */
- ap->ops->sff_irq_clear(ap);
+ /* clear irq events */
+ if (ap->ops->sff_irq_clear)
+ ap->ops->sff_irq_clear(ap);
ata_sff_hsm_move(ap, qc, status, 0);
- if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
- qc->tf.protocol == ATAPI_PROT_DMA))
- ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
-
return 1; /* irq handled */
-
-idle_irq:
- ap->stats.idle_irq++;
-
-#ifdef ATA_IRQ_TRAP
- if ((ap->stats.idle_irq % 1000) == 0) {
- ap->ops->sff_check_status(ap);
- ap->ops->sff_irq_clear(ap);
- ata_port_printk(ap, KERN_WARNING, "irq trap\n");
- return 1;
- }
-#endif
- return 0; /* irq not handled */
}
-EXPORT_SYMBOL_GPL(ata_sff_host_intr);
/**
- * ata_sff_interrupt - Default ATA host interrupt handler
- * @irq: irq line (unused)
- * @dev_instance: pointer to our ata_host information structure
+ * ata_sff_port_intr - Handle SFF port interrupt
+ * @ap: Port on which interrupt arrived (possibly...)
+ * @qc: Taskfile currently active in engine
*
- * Default interrupt handler for PCI IDE devices. Calls
- * ata_sff_host_intr() for each port that is not disabled.
+ * Handle port interrupt for given queued command.
*
* LOCKING:
- * Obtains host lock during operation.
+ * spin_lock_irqsave(host lock)
*
* RETURNS:
- * IRQ_NONE or IRQ_HANDLED.
+ * One if interrupt was handled, zero if not (shared irq).
*/
-irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
+unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
+{
+ return __ata_sff_port_intr(ap, qc, false);
+}
+EXPORT_SYMBOL_GPL(ata_sff_port_intr);
+
+static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance,
+ unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *))
{
struct ata_host *host = dev_instance;
bool retried = false;
@@ -1785,13 +1576,10 @@ retry:
struct ata_port *ap = host->ports[i];
struct ata_queued_cmd *qc;
- if (unlikely(ap->flags & ATA_FLAG_DISABLED))
- continue;
-
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc) {
if (!(qc->tf.flags & ATA_TFLAG_POLLING))
- handled |= ata_sff_host_intr(ap, qc);
+ handled |= port_intr(ap, qc);
else
polling |= 1 << i;
} else
@@ -1818,7 +1606,8 @@ retry:
if (idle & (1 << i)) {
ap->ops->sff_check_status(ap);
- ap->ops->sff_irq_clear(ap);
+ if (ap->ops->sff_irq_clear)
+ ap->ops->sff_irq_clear(ap);
} else {
/* clear INTRQ and check if BUSY cleared */
if (!(ap->ops->sff_check_status(ap) & ATA_BUSY))
@@ -1840,6 +1629,25 @@ retry:
return IRQ_RETVAL(handled);
}
+
+/**
+ * ata_sff_interrupt - Default SFF ATA host interrupt handler
+ * @irq: irq line (unused)
+ * @dev_instance: pointer to our ata_host information structure
+ *
+ * Default interrupt handler for PCI IDE devices. Calls
+ * ata_sff_port_intr() for each port that is not disabled.
+ *
+ * LOCKING:
+ * Obtains host lock during operation.
+ *
+ * RETURNS:
+ * IRQ_NONE or IRQ_HANDLED.
+ */
+irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
+{
+ return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr);
+}
EXPORT_SYMBOL_GPL(ata_sff_interrupt);
/**
@@ -1862,11 +1670,8 @@ void ata_sff_lost_interrupt(struct ata_port *ap)
/* Only one outstanding command per SFF channel */
qc = ata_qc_from_tag(ap, ap->link.active_tag);
- /* Check we have a live one.. */
- if (qc == NULL || !(qc->flags & ATA_QCFLAG_ACTIVE))
- return;
- /* We cannot lose an interrupt on a polled command */
- if (qc->tf.flags & ATA_TFLAG_POLLING)
+ /* We cannot lose an interrupt on a non-existent or polled command */
+ if (!qc || qc->tf.flags & ATA_TFLAG_POLLING)
return;
/* See if the controller thinks it is still busy - if so the command
isn't a lost IRQ but is still in progress */
@@ -1880,7 +1685,7 @@ void ata_sff_lost_interrupt(struct ata_port *ap)
status);
/* Run the host interrupt logic as if the interrupt had not been
lost */
- ata_sff_host_intr(ap, qc);
+ ata_sff_port_intr(ap, qc);
}
EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt);
@@ -1888,20 +1693,18 @@ EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt);
* ata_sff_freeze - Freeze SFF controller port
* @ap: port to freeze
*
- * Freeze BMDMA controller port.
+ * Freeze SFF controller port.
*
* LOCKING:
* Inherited from caller.
*/
void ata_sff_freeze(struct ata_port *ap)
{
- struct ata_ioports *ioaddr = &ap->ioaddr;
-
ap->ctl |= ATA_NIEN;
ap->last_ctl = ap->ctl;
- if (ioaddr->ctl_addr)
- iowrite8(ap->ctl, ioaddr->ctl_addr);
+ if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr)
+ ata_sff_set_devctl(ap, ap->ctl);
/* Under certain circumstances, some controllers raise IRQ on
* ATA_NIEN manipulation. Also, many controllers fail to mask
@@ -1909,7 +1712,8 @@ void ata_sff_freeze(struct ata_port *ap)
*/
ap->ops->sff_check_status(ap);
- ap->ops->sff_irq_clear(ap);
+ if (ap->ops->sff_irq_clear)
+ ap->ops->sff_irq_clear(ap);
}
EXPORT_SYMBOL_GPL(ata_sff_freeze);
@@ -1926,8 +1730,9 @@ void ata_sff_thaw(struct ata_port *ap)
{
/* clear & re-enable interrupts */
ap->ops->sff_check_status(ap);
- ap->ops->sff_irq_clear(ap);
- ap->ops->sff_irq_on(ap);
+ if (ap->ops->sff_irq_clear)
+ ap->ops->sff_irq_clear(ap);
+ ata_sff_irq_on(ap);
}
EXPORT_SYMBOL_GPL(ata_sff_thaw);
@@ -2301,8 +2106,8 @@ void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
}
/* set up device control */
- if (ap->ioaddr.ctl_addr) {
- iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
+ if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) {
+ ata_sff_set_devctl(ap, ap->ctl);
ap->last_ctl = ap->ctl;
}
}
@@ -2342,7 +2147,7 @@ void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);
/**
- * ata_sff_error_handler - Stock error handler for BMDMA controller
+ * ata_sff_error_handler - Stock error handler for SFF controller
* @ap: port to handle error for
*
* Stock error handler for SFF controller. It can handle both
@@ -2359,62 +2164,32 @@ void ata_sff_error_handler(struct ata_port *ap)
ata_reset_fn_t hardreset = ap->ops->hardreset;
struct ata_queued_cmd *qc;
unsigned long flags;
- int thaw = 0;
qc = __ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
qc = NULL;
- /* reset PIO HSM and stop DMA engine */
spin_lock_irqsave(ap->lock, flags);
- ap->hsm_task_state = HSM_ST_IDLE;
-
- if (ap->ioaddr.bmdma_addr &&
- qc && (qc->tf.protocol == ATA_PROT_DMA ||
- qc->tf.protocol == ATAPI_PROT_DMA)) {
- u8 host_stat;
-
- host_stat = ap->ops->bmdma_status(ap);
-
- /* BMDMA controllers indicate host bus error by
- * setting DMA_ERR bit and timing out. As it wasn't
- * really a timeout event, adjust error mask and
- * cancel frozen state.
- */
- if (qc->err_mask == AC_ERR_TIMEOUT
- && (host_stat & ATA_DMA_ERR)) {
- qc->err_mask = AC_ERR_HOST_BUS;
- thaw = 1;
- }
-
- ap->ops->bmdma_stop(qc);
- }
-
- ata_sff_sync(ap); /* FIXME: We don't need this */
- ap->ops->sff_check_status(ap);
- ap->ops->sff_irq_clear(ap);
- /* We *MUST* do FIFO draining before we issue a reset as several
- * devices helpfully clear their internal state and will lock solid
- * if we touch the data port post reset. Pass qc in case anyone wants
- * to do different PIO/DMA recovery or has per command fixups
+ /*
+ * We *MUST* do FIFO draining before we issue a reset as
+ * several devices helpfully clear their internal state and
+ * will lock solid if we touch the data port post reset. Pass
+ * qc in case anyone wants to do different PIO/DMA recovery or
+ * has per command fixups
*/
- if (ap->ops->drain_fifo)
- ap->ops->drain_fifo(qc);
+ if (ap->ops->sff_drain_fifo)
+ ap->ops->sff_drain_fifo(qc);
spin_unlock_irqrestore(ap->lock, flags);
- if (thaw)
- ata_eh_thaw_port(ap);
-
- /* PIO and DMA engines have been stopped, perform recovery */
-
- /* Ignore ata_sff_softreset if ctl isn't accessible and
- * built-in hardresets if SCR access isn't available.
- */
+ /* ignore ata_sff_softreset if ctl isn't accessible */
if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr)
softreset = NULL;
- if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link))
+
+ /* ignore built-in hardresets if SCR access is not available */
+ if ((hardreset == sata_std_hardreset ||
+ hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
hardreset = NULL;
ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
@@ -2423,73 +2198,6 @@ void ata_sff_error_handler(struct ata_port *ap)
EXPORT_SYMBOL_GPL(ata_sff_error_handler);
/**
- * ata_sff_post_internal_cmd - Stock post_internal_cmd for SFF controller
- * @qc: internal command to clean up
- *
- * LOCKING:
- * Kernel thread context (may sleep)
- */
-void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- unsigned long flags;
-
- spin_lock_irqsave(ap->lock, flags);
-
- ap->hsm_task_state = HSM_ST_IDLE;
-
- if (ap->ioaddr.bmdma_addr)
- ap->ops->bmdma_stop(qc);
-
- spin_unlock_irqrestore(ap->lock, flags);
-}
-EXPORT_SYMBOL_GPL(ata_sff_post_internal_cmd);
-
-/**
- * ata_sff_port_start - Set port up for dma.
- * @ap: Port to initialize
- *
- * Called just after data structures for each port are
- * initialized. Allocates space for PRD table if the device
- * is DMA capable SFF.
- *
- * May be used as the port_start() entry in ata_port_operations.
- *
- * LOCKING:
- * Inherited from caller.
- */
-int ata_sff_port_start(struct ata_port *ap)
-{
- if (ap->ioaddr.bmdma_addr)
- return ata_port_start(ap);
- return 0;
-}
-EXPORT_SYMBOL_GPL(ata_sff_port_start);
-
-/**
- * ata_sff_port_start32 - Set port up for dma.
- * @ap: Port to initialize
- *
- * Called just after data structures for each port are
- * initialized. Allocates space for PRD table if the device
- * is DMA capable SFF.
- *
- * May be used as the port_start() entry in ata_port_operations for
- * devices that are capable of 32bit PIO.
- *
- * LOCKING:
- * Inherited from caller.
- */
-int ata_sff_port_start32(struct ata_port *ap)
-{
- ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
- if (ap->ioaddr.bmdma_addr)
- return ata_port_start(ap);
- return 0;
-}
-EXPORT_SYMBOL_GPL(ata_sff_port_start32);
-
-/**
* ata_sff_std_ports - initialize ioaddr with standard port offsets.
* @ioaddr: IO address structure to be initialized
*
@@ -2515,302 +2223,8 @@ void ata_sff_std_ports(struct ata_ioports *ioaddr)
}
EXPORT_SYMBOL_GPL(ata_sff_std_ports);
-unsigned long ata_bmdma_mode_filter(struct ata_device *adev,
- unsigned long xfer_mask)
-{
- /* Filter out DMA modes if the device has been configured by
- the BIOS as PIO only */
-
- if (adev->link->ap->ioaddr.bmdma_addr == NULL)
- xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
- return xfer_mask;
-}
-EXPORT_SYMBOL_GPL(ata_bmdma_mode_filter);
-
-/**
- * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
- * @qc: Info associated with this ATA transaction.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- */
-void ata_bmdma_setup(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
- u8 dmactl;
-
- /* load PRD table addr. */
- mb(); /* make sure PRD table writes are visible to controller */
- iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
-
- /* specify data direction, triple-check start bit is clear */
- dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
- dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
- if (!rw)
- dmactl |= ATA_DMA_WR;
- iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
-
- /* issue r/w command */
- ap->ops->sff_exec_command(ap, &qc->tf);
-}
-EXPORT_SYMBOL_GPL(ata_bmdma_setup);
-
-/**
- * ata_bmdma_start - Start a PCI IDE BMDMA transaction
- * @qc: Info associated with this ATA transaction.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- */
-void ata_bmdma_start(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- u8 dmactl;
-
- /* start host DMA transaction */
- dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
- iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
-
- /* Strictly, one may wish to issue an ioread8() here, to
- * flush the mmio write. However, control also passes
- * to the hardware at this point, and it will interrupt
- * us when we are to resume control. So, in effect,
- * we don't care when the mmio write flushes.
- * Further, a read of the DMA status register _immediately_
- * following the write may not be what certain flaky hardware
- * is expected, so I think it is best to not add a readb()
- * without first all the MMIO ATA cards/mobos.
- * Or maybe I'm just being paranoid.
- *
- * FIXME: The posting of this write means I/O starts are
- * unneccessarily delayed for MMIO
- */
-}
-EXPORT_SYMBOL_GPL(ata_bmdma_start);
-
-/**
- * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
- * @qc: Command we are ending DMA for
- *
- * Clears the ATA_DMA_START flag in the dma control register
- *
- * May be used as the bmdma_stop() entry in ata_port_operations.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- */
-void ata_bmdma_stop(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- void __iomem *mmio = ap->ioaddr.bmdma_addr;
-
- /* clear start/stop bit */
- iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
- mmio + ATA_DMA_CMD);
-
- /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
- ata_sff_dma_pause(ap);
-}
-EXPORT_SYMBOL_GPL(ata_bmdma_stop);
-
-/**
- * ata_bmdma_status - Read PCI IDE BMDMA status
- * @ap: Port associated with this ATA transaction.
- *
- * Read and return BMDMA status register.
- *
- * May be used as the bmdma_status() entry in ata_port_operations.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- */
-u8 ata_bmdma_status(struct ata_port *ap)
-{
- return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
-}
-EXPORT_SYMBOL_GPL(ata_bmdma_status);
-
-/**
- * ata_bus_reset - reset host port and associated ATA channel
- * @ap: port to reset
- *
- * This is typically the first time we actually start issuing
- * commands to the ATA channel. We wait for BSY to clear, then
- * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
- * result. Determine what devices, if any, are on the channel
- * by looking at the device 0/1 error register. Look at the signature
- * stored in each device's taskfile registers, to determine if
- * the device is ATA or ATAPI.
- *
- * LOCKING:
- * PCI/etc. bus probe sem.
- * Obtains host lock.
- *
- * SIDE EFFECTS:
- * Sets ATA_FLAG_DISABLED if bus reset fails.
- *
- * DEPRECATED:
- * This function is only for drivers which still use old EH and
- * will be removed soon.
- */
-void ata_bus_reset(struct ata_port *ap)
-{
- struct ata_device *device = ap->link.device;
- struct ata_ioports *ioaddr = &ap->ioaddr;
- unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
- u8 err;
- unsigned int dev0, dev1 = 0, devmask = 0;
- int rc;
-
- DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
-
- /* determine if device 0/1 are present */
- if (ap->flags & ATA_FLAG_SATA_RESET)
- dev0 = 1;
- else {
- dev0 = ata_devchk(ap, 0);
- if (slave_possible)
- dev1 = ata_devchk(ap, 1);
- }
-
- if (dev0)
- devmask |= (1 << 0);
- if (dev1)
- devmask |= (1 << 1);
-
- /* select device 0 again */
- ap->ops->sff_dev_select(ap, 0);
-
- /* issue bus reset */
- if (ap->flags & ATA_FLAG_SRST) {
- rc = ata_bus_softreset(ap, devmask,
- ata_deadline(jiffies, 40000));
- if (rc && rc != -ENODEV)
- goto err_out;
- }
-
- /*
- * determine by signature whether we have ATA or ATAPI devices
- */
- device[0].class = ata_sff_dev_classify(&device[0], dev0, &err);
- if ((slave_possible) && (err != 0x81))
- device[1].class = ata_sff_dev_classify(&device[1], dev1, &err);
-
- /* is double-select really necessary? */
- if (device[1].class != ATA_DEV_NONE)
- ap->ops->sff_dev_select(ap, 1);
- if (device[0].class != ATA_DEV_NONE)
- ap->ops->sff_dev_select(ap, 0);
-
- /* if no devices were detected, disable this port */
- if ((device[0].class == ATA_DEV_NONE) &&
- (device[1].class == ATA_DEV_NONE))
- goto err_out;
-
- if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
- /* set up device control for ATA_FLAG_SATA_RESET */
- iowrite8(ap->ctl, ioaddr->ctl_addr);
- ap->last_ctl = ap->ctl;
- }
-
- DPRINTK("EXIT\n");
- return;
-
-err_out:
- ata_port_printk(ap, KERN_ERR, "disabling port\n");
- ata_port_disable(ap);
-
- DPRINTK("EXIT\n");
-}
-EXPORT_SYMBOL_GPL(ata_bus_reset);
-
#ifdef CONFIG_PCI
-/**
- * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex
- * @pdev: PCI device
- *
- * Some PCI ATA devices report simplex mode but in fact can be told to
- * enter non simplex mode. This implements the necessary logic to
- * perform the task on such devices. Calling it on other devices will
- * have -undefined- behaviour.
- */
-int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
-{
- unsigned long bmdma = pci_resource_start(pdev, 4);
- u8 simplex;
-
- if (bmdma == 0)
- return -ENOENT;
-
- simplex = inb(bmdma + 0x02);
- outb(simplex & 0x60, bmdma + 0x02);
- simplex = inb(bmdma + 0x02);
- if (simplex & 0x80)
- return -EOPNOTSUPP;
- return 0;
-}
-EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
-
-/**
- * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
- * @host: target ATA host
- *
- * Acquire PCI BMDMA resources and initialize @host accordingly.
- *
- * LOCKING:
- * Inherited from calling layer (may sleep).
- *
- * RETURNS:
- * 0 on success, -errno otherwise.
- */
-int ata_pci_bmdma_init(struct ata_host *host)
-{
- struct device *gdev = host->dev;
- struct pci_dev *pdev = to_pci_dev(gdev);
- int i, rc;
-
- /* No BAR4 allocation: No DMA */
- if (pci_resource_start(pdev, 4) == 0)
- return 0;
-
- /* TODO: If we get no DMA mask we should fall back to PIO */
- rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
- if (rc)
- return rc;
- rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
- if (rc)
- return rc;
-
- /* request and iomap DMA region */
- rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
- if (rc) {
- dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n");
- return -ENOMEM;
- }
- host->iomap = pcim_iomap_table(pdev);
-
- for (i = 0; i < 2; i++) {
- struct ata_port *ap = host->ports[i];
- void __iomem *bmdma = host->iomap[4] + 8 * i;
-
- if (ata_port_is_dummy(ap))
- continue;
-
- ap->ioaddr.bmdma_addr = bmdma;
- if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
- (ioread8(bmdma + 2) & 0x80))
- host->flags |= ATA_HOST_SIMPLEX;
-
- ata_port_desc(ap, "bmdma 0x%llx",
- (unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
-
static int ata_resources_present(struct pci_dev *pdev, int port)
{
int i;
@@ -2905,13 +2319,13 @@ int ata_pci_sff_init_host(struct ata_host *host)
EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
/**
- * ata_pci_sff_prepare_host - helper to prepare native PCI ATA host
+ * ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host
* @pdev: target PCI device
* @ppi: array of port_info, must be enough for two ports
* @r_host: out argument for the initialized ATA host
*
- * Helper to allocate ATA host for @pdev, acquire all native PCI
- * resources and initialize it accordingly in one go.
+ * Helper to allocate PIO-only SFF ATA host for @pdev, acquire
+ * all PCI resources and initialize it accordingly in one go.
*
* LOCKING:
* Inherited from calling layer (may sleep).
@@ -2941,22 +2355,10 @@ int ata_pci_sff_prepare_host(struct pci_dev *pdev,
if (rc)
goto err_out;
- /* init DMA related stuff */
- rc = ata_pci_bmdma_init(host);
- if (rc)
- goto err_bmdma;
-
devres_remove_group(&pdev->dev, NULL);
*r_host = host;
return 0;
-err_bmdma:
- /* This is necessary because PCI and iomap resources are
- * merged and releasing the top group won't release the
- * acquired resources if some of those have been acquired
- * before entering this function.
- */
- pcim_iounmap_regions(pdev, 0xf);
err_out:
devres_release_group(&pdev->dev, NULL);
return rc;
@@ -3057,8 +2459,21 @@ out:
}
EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
+static const struct ata_port_info *ata_sff_find_valid_pi(
+ const struct ata_port_info * const *ppi)
+{
+ int i;
+
+ /* look up the first valid port_info */
+ for (i = 0; i < 2 && ppi[i]; i++)
+ if (ppi[i]->port_ops != &ata_dummy_port_ops)
+ return ppi[i];
+
+ return NULL;
+}
+
/**
- * ata_pci_sff_init_one - Initialize/register PCI IDE host controller
+ * ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller
* @pdev: Controller to be initialized
* @ppi: array of port_info, must be enough for two ports
* @sht: scsi_host_template to use when registering the host
@@ -3067,11 +2482,7 @@ EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
*
* This is a helper function which can be called from a driver's
* xxx_init_one() probe function if the hardware uses traditional
- * IDE taskfile registers.
- *
- * This function calls pci_enable_device(), reserves its register
- * regions, sets the dma mask, enables bus master mode, and calls
- * ata_device_add()
+ * IDE taskfile registers and is PIO only.
*
* ASSUMPTION:
* Nobody makes a single channel controller that appears solely as
@@ -3088,20 +2499,13 @@ int ata_pci_sff_init_one(struct pci_dev *pdev,
struct scsi_host_template *sht, void *host_priv, int hflag)
{
struct device *dev = &pdev->dev;
- const struct ata_port_info *pi = NULL;
+ const struct ata_port_info *pi;
struct ata_host *host = NULL;
- int i, rc;
+ int rc;
DPRINTK("ENTER\n");
- /* look up the first valid port_info */
- for (i = 0; i < 2 && ppi[i]; i++) {
- if (ppi[i]->port_ops != &ata_dummy_port_ops) {
- pi = ppi[i];
- break;
- }
- }
-
+ pi = ata_sff_find_valid_pi(ppi);
if (!pi) {
dev_printk(KERN_ERR, &pdev->dev,
"no valid port_info specified\n");
@@ -3122,7 +2526,6 @@ int ata_pci_sff_init_one(struct pci_dev *pdev,
host->private_data = host_priv;
host->flags |= hflag;
- pci_set_master(pdev);
rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
out:
if (rc == 0)
@@ -3135,3 +2538,801 @@ out:
EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
#endif /* CONFIG_PCI */
+
+/*
+ * BMDMA support
+ */
+
+#ifdef CONFIG_ATA_BMDMA
+
+const struct ata_port_operations ata_bmdma_port_ops = {
+ .inherits = &ata_sff_port_ops,
+
+ .error_handler = ata_bmdma_error_handler,
+ .post_internal_cmd = ata_bmdma_post_internal_cmd,
+
+ .qc_prep = ata_bmdma_qc_prep,
+ .qc_issue = ata_bmdma_qc_issue,
+
+ .sff_irq_clear = ata_bmdma_irq_clear,
+ .bmdma_setup = ata_bmdma_setup,
+ .bmdma_start = ata_bmdma_start,
+ .bmdma_stop = ata_bmdma_stop,
+ .bmdma_status = ata_bmdma_status,
+
+ .port_start = ata_bmdma_port_start,
+};
+EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
+
+const struct ata_port_operations ata_bmdma32_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .sff_data_xfer = ata_sff_data_xfer32,
+ .port_start = ata_bmdma_port_start32,
+};
+EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
+
+/**
+ * ata_bmdma_fill_sg - Fill PCI IDE PRD table
+ * @qc: Metadata associated with taskfile to be transferred
+ *
+ * Fill PCI IDE PRD (scatter-gather) table with segments
+ * associated with the current disk command.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ */
+static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_bmdma_prd *prd = ap->bmdma_prd;
+ struct scatterlist *sg;
+ unsigned int si, pi;
+
+ pi = 0;
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ u32 addr, offset;
+ u32 sg_len, len;
+
+ /* determine if physical DMA addr spans 64K boundary.
+ * Note h/w doesn't support 64-bit, so we unconditionally
+ * truncate dma_addr_t to u32.
+ */
+ addr = (u32) sg_dma_address(sg);
+ sg_len = sg_dma_len(sg);
+
+ while (sg_len) {
+ offset = addr & 0xffff;
+ len = sg_len;
+ if ((offset + sg_len) > 0x10000)
+ len = 0x10000 - offset;
+
+ prd[pi].addr = cpu_to_le32(addr);
+ prd[pi].flags_len = cpu_to_le32(len & 0xffff);
+ VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
+
+ pi++;
+ sg_len -= len;
+ addr += len;
+ }
+ }
+
+ prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+}
+
+/**
+ * ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table
+ * @qc: Metadata associated with taskfile to be transferred
+ *
+ * Fill PCI IDE PRD (scatter-gather) table with segments
+ * associated with the current disk command. Perform the fill
+ * so that we avoid writing any length 64K records for
+ * controllers that don't follow the spec.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ */
+static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_bmdma_prd *prd = ap->bmdma_prd;
+ struct scatterlist *sg;
+ unsigned int si, pi;
+
+ pi = 0;
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ u32 addr, offset;
+ u32 sg_len, len, blen;
+
+ /* determine if physical DMA addr spans 64K boundary.
+ * Note h/w doesn't support 64-bit, so we unconditionally
+ * truncate dma_addr_t to u32.
+ */
+ addr = (u32) sg_dma_address(sg);
+ sg_len = sg_dma_len(sg);
+
+ while (sg_len) {
+ offset = addr & 0xffff;
+ len = sg_len;
+ if ((offset + sg_len) > 0x10000)
+ len = 0x10000 - offset;
+
+ blen = len & 0xffff;
+ prd[pi].addr = cpu_to_le32(addr);
+ if (blen == 0) {
+ /* Some PATA chipsets like the CS5530 can't
+ cope with 0x0000 meaning 64K as the spec
+ says */
+ prd[pi].flags_len = cpu_to_le32(0x8000);
+ blen = 0x8000;
+ prd[++pi].addr = cpu_to_le32(addr + 0x8000);
+ }
+ prd[pi].flags_len = cpu_to_le32(blen);
+ VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
+
+ pi++;
+ sg_len -= len;
+ addr += len;
+ }
+ }
+
+ prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+}
+
+/**
+ * ata_bmdma_qc_prep - Prepare taskfile for submission
+ * @qc: Metadata associated with taskfile to be prepared
+ *
+ * Prepare ATA taskfile for submission.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
+{
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+ return;
+
+ ata_bmdma_fill_sg(qc);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
+
+/**
+ * ata_bmdma_dumb_qc_prep - Prepare taskfile for submission
+ * @qc: Metadata associated with taskfile to be prepared
+ *
+ * Prepare ATA taskfile for submission.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
+{
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+ return;
+
+ ata_bmdma_fill_sg_dumb(qc);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
+
+/**
+ * ata_bmdma_qc_issue - issue taskfile to a BMDMA controller
+ * @qc: command to issue to device
+ *
+ * This function issues a PIO, NODATA or DMA command to a
+ * SFF/BMDMA controller. PIO and NODATA are handled by
+ * ata_sff_qc_issue().
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * Zero on success, AC_ERR_* mask on failure
+ */
+unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+
+ /* see ata_dma_blacklisted() */
+ BUG_ON((ap->flags & ATA_FLAG_PIO_POLLING) &&
+ qc->tf.protocol == ATAPI_PROT_DMA);
+
+ /* defer PIO handling to sff_qc_issue */
+ if (!ata_is_dma(qc->tf.protocol))
+ return ata_sff_qc_issue(qc);
+
+ /* select the device */
+ ata_dev_select(ap, qc->dev->devno, 1, 0);
+
+ /* start the command */
+ switch (qc->tf.protocol) {
+ case ATA_PROT_DMA:
+ WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
+
+ ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
+ ap->ops->bmdma_setup(qc); /* set up bmdma */
+ ap->ops->bmdma_start(qc); /* initiate bmdma */
+ ap->hsm_task_state = HSM_ST_LAST;
+ break;
+
+ case ATAPI_PROT_DMA:
+ WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
+
+ ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
+ ap->ops->bmdma_setup(qc); /* set up bmdma */
+ ap->hsm_task_state = HSM_ST_FIRST;
+
+ /* send cdb by polling if no cdb interrupt */
+ if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
+ ata_sff_queue_pio_task(ap, 0);
+ break;
+
+ default:
+ WARN_ON(1);
+ return AC_ERR_SYSTEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue);
+
+/**
+ * ata_bmdma_port_intr - Handle BMDMA port interrupt
+ * @ap: Port on which interrupt arrived (possibly...)
+ * @qc: Taskfile currently active in engine
+ *
+ * Handle port interrupt for given queued command.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * One if interrupt was handled, zero if not (shared irq).
+ */
+unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
+{
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ u8 host_stat = 0;
+ bool bmdma_stopped = false;
+ unsigned int handled;
+
+ if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) {
+ /* check status of DMA engine */
+ host_stat = ap->ops->bmdma_status(ap);
+ VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat);
+
+ /* if it's not our irq... */
+ if (!(host_stat & ATA_DMA_INTR))
+ return ata_sff_idle_irq(ap);
+
+ /* before we do anything else, clear DMA-Start bit */
+ ap->ops->bmdma_stop(qc);
+ bmdma_stopped = true;
+
+ if (unlikely(host_stat & ATA_DMA_ERR)) {
+ /* error when transfering data to/from memory */
+ qc->err_mask |= AC_ERR_HOST_BUS;
+ ap->hsm_task_state = HSM_ST_ERR;
+ }
+ }
+
+ handled = __ata_sff_port_intr(ap, qc, bmdma_stopped);
+
+ if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
+ ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
+
+ return handled;
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_port_intr);
+
+/**
+ * ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler
+ * @irq: irq line (unused)
+ * @dev_instance: pointer to our ata_host information structure
+ *
+ * Default interrupt handler for PCI IDE devices. Calls
+ * ata_bmdma_port_intr() for each port that is not disabled.
+ *
+ * LOCKING:
+ * Obtains host lock during operation.
+ *
+ * RETURNS:
+ * IRQ_NONE or IRQ_HANDLED.
+ */
+irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance)
+{
+ return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_interrupt);
+
+/**
+ * ata_bmdma_error_handler - Stock error handler for BMDMA controller
+ * @ap: port to handle error for
+ *
+ * Stock error handler for BMDMA controller. It can handle both
+ * PATA and SATA controllers. Most BMDMA controllers should be
+ * able to use this EH as-is or with some added handling before
+ * and after.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ */
+void ata_bmdma_error_handler(struct ata_port *ap)
+{
+ struct ata_queued_cmd *qc;
+ unsigned long flags;
+ bool thaw = false;
+
+ qc = __ata_qc_from_tag(ap, ap->link.active_tag);
+ if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
+ qc = NULL;
+
+ /* reset PIO HSM and stop DMA engine */
+ spin_lock_irqsave(ap->lock, flags);
+
+ if (qc && ata_is_dma(qc->tf.protocol)) {
+ u8 host_stat;
+
+ host_stat = ap->ops->bmdma_status(ap);
+
+ /* BMDMA controllers indicate host bus error by
+ * setting DMA_ERR bit and timing out. As it wasn't
+ * really a timeout event, adjust error mask and
+ * cancel frozen state.
+ */
+ if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
+ qc->err_mask = AC_ERR_HOST_BUS;
+ thaw = true;
+ }
+
+ ap->ops->bmdma_stop(qc);
+
+ /* if we're gonna thaw, make sure IRQ is clear */
+ if (thaw) {
+ ap->ops->sff_check_status(ap);
+ if (ap->ops->sff_irq_clear)
+ ap->ops->sff_irq_clear(ap);
+ }
+ }
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ if (thaw)
+ ata_eh_thaw_port(ap);
+
+ ata_sff_error_handler(ap);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
+
+/**
+ * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA
+ * @qc: internal command to clean up
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ */
+void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ unsigned long flags;
+
+ if (ata_is_dma(qc->tf.protocol)) {
+ spin_lock_irqsave(ap->lock, flags);
+ ap->ops->bmdma_stop(qc);
+ spin_unlock_irqrestore(ap->lock, flags);
+ }
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
+
+/**
+ * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
+ * @ap: Port associated with this ATA transaction.
+ *
+ * Clear interrupt and error flags in DMA status register.
+ *
+ * May be used as the irq_clear() entry in ata_port_operations.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_bmdma_irq_clear(struct ata_port *ap)
+{
+ void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+ if (!mmio)
+ return;
+
+ iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
+
+/**
+ * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
+ * @qc: Info associated with this ATA transaction.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_bmdma_setup(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
+ u8 dmactl;
+
+ /* load PRD table addr. */
+ mb(); /* make sure PRD table writes are visible to controller */
+ iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
+
+ /* specify data direction, triple-check start bit is clear */
+ dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+ dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
+ if (!rw)
+ dmactl |= ATA_DMA_WR;
+ iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+
+ /* issue r/w command */
+ ap->ops->sff_exec_command(ap, &qc->tf);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_setup);
+
+/**
+ * ata_bmdma_start - Start a PCI IDE BMDMA transaction
+ * @qc: Info associated with this ATA transaction.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_bmdma_start(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ u8 dmactl;
+
+ /* start host DMA transaction */
+ dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+ iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+
+ /* Strictly, one may wish to issue an ioread8() here, to
+ * flush the mmio write. However, control also passes
+ * to the hardware at this point, and it will interrupt
+ * us when we are to resume control. So, in effect,
+ * we don't care when the mmio write flushes.
+ * Further, a read of the DMA status register _immediately_
+ * following the write may not be what certain flaky hardware
+ * is expected, so I think it is best to not add a readb()
+ * without first all the MMIO ATA cards/mobos.
+ * Or maybe I'm just being paranoid.
+ *
+ * FIXME: The posting of this write means I/O starts are
+ * unneccessarily delayed for MMIO
+ */
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_start);
+
+/**
+ * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
+ * @qc: Command we are ending DMA for
+ *
+ * Clears the ATA_DMA_START flag in the dma control register
+ *
+ * May be used as the bmdma_stop() entry in ata_port_operations.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_bmdma_stop(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+ /* clear start/stop bit */
+ iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
+ mmio + ATA_DMA_CMD);
+
+ /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+ ata_sff_dma_pause(ap);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_stop);
+
+/**
+ * ata_bmdma_status - Read PCI IDE BMDMA status
+ * @ap: Port associated with this ATA transaction.
+ *
+ * Read and return BMDMA status register.
+ *
+ * May be used as the bmdma_status() entry in ata_port_operations.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+u8 ata_bmdma_status(struct ata_port *ap)
+{
+ return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_status);
+
+
+/**
+ * ata_bmdma_port_start - Set port up for bmdma.
+ * @ap: Port to initialize
+ *
+ * Called just after data structures for each port are
+ * initialized. Allocates space for PRD table.
+ *
+ * May be used as the port_start() entry in ata_port_operations.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+int ata_bmdma_port_start(struct ata_port *ap)
+{
+ if (ap->mwdma_mask || ap->udma_mask) {
+ ap->bmdma_prd =
+ dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ,
+ &ap->bmdma_prd_dma, GFP_KERNEL);
+ if (!ap->bmdma_prd)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_port_start);
+
+/**
+ * ata_bmdma_port_start32 - Set port up for dma.
+ * @ap: Port to initialize
+ *
+ * Called just after data structures for each port are
+ * initialized. Enables 32bit PIO and allocates space for PRD
+ * table.
+ *
+ * May be used as the port_start() entry in ata_port_operations for
+ * devices that are capable of 32bit PIO.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+int ata_bmdma_port_start32(struct ata_port *ap)
+{
+ ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
+ return ata_bmdma_port_start(ap);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_port_start32);
+
+#ifdef CONFIG_PCI
+
+/**
+ * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex
+ * @pdev: PCI device
+ *
+ * Some PCI ATA devices report simplex mode but in fact can be told to
+ * enter non simplex mode. This implements the necessary logic to
+ * perform the task on such devices. Calling it on other devices will
+ * have -undefined- behaviour.
+ */
+int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
+{
+ unsigned long bmdma = pci_resource_start(pdev, 4);
+ u8 simplex;
+
+ if (bmdma == 0)
+ return -ENOENT;
+
+ simplex = inb(bmdma + 0x02);
+ outb(simplex & 0x60, bmdma + 0x02);
+ simplex = inb(bmdma + 0x02);
+ if (simplex & 0x80)
+ return -EOPNOTSUPP;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
+
+static void ata_bmdma_nodma(struct ata_host *host, const char *reason)
+{
+ int i;
+
+ dev_printk(KERN_ERR, host->dev, "BMDMA: %s, falling back to PIO\n",
+ reason);
+
+ for (i = 0; i < 2; i++) {
+ host->ports[i]->mwdma_mask = 0;
+ host->ports[i]->udma_mask = 0;
+ }
+}
+
+/**
+ * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
+ * @host: target ATA host
+ *
+ * Acquire PCI BMDMA resources and initialize @host accordingly.
+ *
+ * LOCKING:
+ * Inherited from calling layer (may sleep).
+ */
+void ata_pci_bmdma_init(struct ata_host *host)
+{
+ struct device *gdev = host->dev;
+ struct pci_dev *pdev = to_pci_dev(gdev);
+ int i, rc;
+
+ /* No BAR4 allocation: No DMA */
+ if (pci_resource_start(pdev, 4) == 0) {
+ ata_bmdma_nodma(host, "BAR4 is zero");
+ return;
+ }
+
+ /*
+ * Some controllers require BMDMA region to be initialized
+ * even if DMA is not in use to clear IRQ status via
+ * ->sff_irq_clear method. Try to initialize bmdma_addr
+ * regardless of dma masks.
+ */
+ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ ata_bmdma_nodma(host, "failed to set dma mask");
+ if (!rc) {
+ rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ ata_bmdma_nodma(host,
+ "failed to set consistent dma mask");
+ }
+
+ /* request and iomap DMA region */
+ rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
+ if (rc) {
+ ata_bmdma_nodma(host, "failed to request/iomap BAR4");
+ return;
+ }
+ host->iomap = pcim_iomap_table(pdev);
+
+ for (i = 0; i < 2; i++) {
+ struct ata_port *ap = host->ports[i];
+ void __iomem *bmdma = host->iomap[4] + 8 * i;
+
+ if (ata_port_is_dummy(ap))
+ continue;
+
+ ap->ioaddr.bmdma_addr = bmdma;
+ if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
+ (ioread8(bmdma + 2) & 0x80))
+ host->flags |= ATA_HOST_SIMPLEX;
+
+ ata_port_desc(ap, "bmdma 0x%llx",
+ (unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
+ }
+}
+EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
+
+/**
+ * ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host
+ * @pdev: target PCI device
+ * @ppi: array of port_info, must be enough for two ports
+ * @r_host: out argument for the initialized ATA host
+ *
+ * Helper to allocate BMDMA ATA host for @pdev, acquire all PCI
+ * resources and initialize it accordingly in one go.
+ *
+ * LOCKING:
+ * Inherited from calling layer (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,
+ const struct ata_port_info * const * ppi,
+ struct ata_host **r_host)
+{
+ int rc;
+
+ rc = ata_pci_sff_prepare_host(pdev, ppi, r_host);
+ if (rc)
+ return rc;
+
+ ata_pci_bmdma_init(*r_host);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host);
+
+/**
+ * ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller
+ * @pdev: Controller to be initialized
+ * @ppi: array of port_info, must be enough for two ports
+ * @sht: scsi_host_template to use when registering the host
+ * @host_priv: host private_data
+ * @hflags: host flags
+ *
+ * This function is similar to ata_pci_sff_init_one() but also
+ * takes care of BMDMA initialization.
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ *
+ * RETURNS:
+ * Zero on success, negative on errno-based value on error.
+ */
+int ata_pci_bmdma_init_one(struct pci_dev *pdev,
+ const struct ata_port_info * const * ppi,
+ struct scsi_host_template *sht, void *host_priv,
+ int hflags)
+{
+ struct device *dev = &pdev->dev;
+ const struct ata_port_info *pi;
+ struct ata_host *host = NULL;
+ int rc;
+
+ DPRINTK("ENTER\n");
+
+ pi = ata_sff_find_valid_pi(ppi);
+ if (!pi) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "no valid port_info specified\n");
+ return -EINVAL;
+ }
+
+ if (!devres_open_group(dev, NULL, GFP_KERNEL))
+ return -ENOMEM;
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ goto out;
+
+ /* prepare and activate BMDMA host */
+ rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
+ if (rc)
+ goto out;
+ host->private_data = host_priv;
+ host->flags |= hflags;
+
+ pci_set_master(pdev);
+ rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
+ out:
+ if (rc == 0)
+ devres_remove_group(&pdev->dev, NULL);
+ else
+ devres_release_group(&pdev->dev, NULL);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one);
+
+#endif /* CONFIG_PCI */
+#endif /* CONFIG_ATA_BMDMA */
+
+/**
+ * ata_sff_port_init - Initialize SFF/BMDMA ATA port
+ * @ap: Port to initialize
+ *
+ * Called on port allocation to initialize SFF/BMDMA specific
+ * fields.
+ *
+ * LOCKING:
+ * None.
+ */
+void ata_sff_port_init(struct ata_port *ap)
+{
+ INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task);
+ ap->ctl = ATA_DEVCTL_OBS;
+ ap->last_ctl = 0xFF;
+}
+
+int __init ata_sff_init(void)
+{
+ /*
+ * FIXME: In UP case, there is only one workqueue thread and if you
+ * have more than one PIO device, latency is bloody awful, with
+ * occasional multi-second "hiccups" as one PIO device waits for
+ * another. It's an ugly wart that users DO occasionally complain
+ * about; luckily most users have at most one PIO polled device.
+ */
+ ata_sff_wq = create_workqueue("ata_sff");
+ if (!ata_sff_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void __exit ata_sff_exit(void)
+{
+ destroy_workqueue(ata_sff_wq);
+}
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 823e6309636..4b84ed60324 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -38,17 +38,6 @@ struct ata_scsi_args {
void (*done)(struct scsi_cmnd *);
};
-static inline int ata_is_builtin_hardreset(ata_reset_fn_t reset)
-{
- if (reset == sata_std_hardreset)
- return 1;
-#ifdef CONFIG_ATA_SFF
- if (reset == sata_sff_hardreset)
- return 1;
-#endif
- return 0;
-}
-
/* libata-core.c */
enum {
/* flags for ata_dev_read_id() */
@@ -79,7 +68,6 @@ extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
u64 block, u32 n_block, unsigned int tf_flags,
unsigned int tag);
extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
-extern void ata_port_flush_task(struct ata_port *ap);
extern unsigned ata_exec_internal(struct ata_device *dev,
struct ata_taskfile *tf, const u8 *cdb,
int dma_dir, void *buf, unsigned int buflen,
@@ -202,10 +190,19 @@ static inline int sata_pmp_attach(struct ata_device *dev)
/* libata-sff.c */
#ifdef CONFIG_ATA_SFF
-extern void ata_dev_select(struct ata_port *ap, unsigned int device,
- unsigned int wait, unsigned int can_sleep);
-extern u8 ata_irq_on(struct ata_port *ap);
-extern void ata_pio_task(struct work_struct *work);
+extern void ata_sff_flush_pio_task(struct ata_port *ap);
+extern void ata_sff_port_init(struct ata_port *ap);
+extern int ata_sff_init(void);
+extern void ata_sff_exit(void);
+#else /* CONFIG_ATA_SFF */
+static inline void ata_sff_flush_pio_task(struct ata_port *ap)
+{ }
+static inline void ata_sff_port_init(struct ata_port *ap)
+{ }
+static inline int ata_sff_init(void)
+{ return 0; }
+static inline void ata_sff_exit(void)
+{ }
#endif /* CONFIG_ATA_SFF */
#endif /* __LIBATA_H__ */
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index 1ea2be0f4b9..c8d47034d5e 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -101,7 +101,7 @@ static unsigned long pacpi_discover_modes(struct ata_port *ap, struct ata_device
static unsigned long pacpi_mode_filter(struct ata_device *adev, unsigned long mask)
{
struct pata_acpi *acpi = adev->link->ap->private_data;
- return ata_bmdma_mode_filter(adev, mask & acpi->mask[adev->devno]);
+ return mask & acpi->mask[adev->devno];
}
/**
@@ -172,7 +172,7 @@ static unsigned int pacpi_qc_issue(struct ata_queued_cmd *qc)
struct pata_acpi *acpi = ap->private_data;
if (acpi->gtm.flags & 0x10)
- return ata_sff_qc_issue(qc);
+ return ata_bmdma_qc_issue(qc);
if (adev != acpi->last) {
pacpi_set_piomode(ap, adev);
@@ -180,7 +180,7 @@ static unsigned int pacpi_qc_issue(struct ata_queued_cmd *qc)
pacpi_set_dmamode(ap, adev);
acpi->last = adev;
}
- return ata_sff_qc_issue(qc);
+ return ata_bmdma_qc_issue(qc);
}
/**
@@ -205,7 +205,7 @@ static int pacpi_port_start(struct ata_port *ap)
return -ENOMEM;
acpi->mask[0] = pacpi_discover_modes(ap, &ap->link.device[0]);
acpi->mask[1] = pacpi_discover_modes(ap, &ap->link.device[1]);
- ret = ata_sff_port_start(ap);
+ ret = ata_bmdma_port_start(ap);
if (ret < 0)
return ret;
@@ -260,7 +260,7 @@ static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
return rc;
pcim_pin_device(pdev);
}
- return ata_pci_sff_init_one(pdev, ppi, &pacpi_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &pacpi_sht, NULL, 0);
}
static const struct pci_device_id pacpi_pci_tbl[] = {
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index dc61b72f751..794ec6e3275 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -124,7 +124,7 @@ static unsigned long ali_20_filter(struct ata_device *adev, unsigned long mask)
ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
if (strstr(model_num, "WDC"))
return mask &= ~ATA_MASK_UDMA;
- return ata_bmdma_mode_filter(adev, mask);
+ return mask;
}
/**
@@ -583,7 +583,10 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
ppi[0] = &info_20_udma;
}
- return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL, 0);
+ if (!ppi[0]->mwdma_mask && !ppi[0]->udma_mask)
+ return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL, 0);
+ else
+ return ata_pci_bmdma_init_one(pdev, ppi, &ali_sht, NULL, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index d95eca9c547..620a07cabe3 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -574,7 +574,7 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* And fire it up */
- return ata_pci_sff_init_one(pdev, ppi, &amd_sht, hpriv, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &amd_sht, hpriv, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 4d066d6c30f..ba43f0f8c88 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -421,7 +421,7 @@ static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
BUG_ON(ppi[0] == NULL);
- return ata_pci_sff_init_one(pdev, ppi, &artop_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &artop_sht, NULL, 0);
}
static const struct pci_device_id artop_pci_tbl[] = {
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index c6a946aa252..0da0dcc7dd0 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -202,7 +202,6 @@ static struct ata_port_operations pata_at91_port_ops = {
.sff_data_xfer = pata_at91_data_xfer_noirq,
.set_piomode = pata_at91_set_piomode,
.cable_detect = ata_cable_40wire,
- .port_start = ATA_OP_NULL,
};
static int __devinit pata_at91_probe(struct platform_device *pdev)
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index cbaf2eddac6..43755616dc5 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -217,7 +217,7 @@ static struct scsi_host_template atiixp_sht = {
static struct ata_port_operations atiixp_port_ops = {
.inherits = &ata_bmdma_port_ops,
- .qc_prep = ata_sff_dumb_qc_prep,
+ .qc_prep = ata_bmdma_dumb_qc_prep,
.bmdma_start = atiixp_bmdma_start,
.bmdma_stop = atiixp_bmdma_stop,
@@ -246,8 +246,8 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (!pci_test_config_bits(pdev, &atiixp_enable_bits[i]))
ppi[i] = &ata_dummy_port_info;
- return ata_pci_sff_init_one(pdev, ppi, &atiixp_sht, NULL,
- ATA_HOST_PARALLEL_SCAN);
+ return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
+ ATA_HOST_PARALLEL_SCAN);
}
static const struct pci_device_id atiixp[] = {
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
index bb6e0746e07..95295935dd9 100644
--- a/drivers/ata/pata_atp867x.c
+++ b/drivers/ata/pata_atp867x.c
@@ -525,7 +525,7 @@ static int atp867x_init_one(struct pci_dev *pdev,
pci_set_master(pdev);
- rc = ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ rc = ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &atp867x_sht);
if (rc)
dev_printk(KERN_ERR, &pdev->dev, "failed to activate host\n");
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index 02c81f12c70..9cae65de750 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -821,6 +821,18 @@ static void bfin_dev_select(struct ata_port *ap, unsigned int device)
}
/**
+ * bfin_set_devctl - Write device control reg
+ * @ap: port where the device is
+ * @ctl: value to write
+ */
+
+static u8 bfin_set_devctl(struct ata_port *ap, u8 ctl)
+{
+ void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+ write_atapi_register(base, ATA_REG_CTRL, ctl);
+}
+
+/**
* bfin_bmdma_setup - Set up IDE DMA transaction
* @qc: Info associated with this ATA transaction.
*
@@ -1202,7 +1214,7 @@ static unsigned int bfin_data_xfer(struct ata_device *dev, unsigned char *buf,
* bfin_irq_clear - Clear ATAPI interrupt.
* @ap: Port associated with this ATA transaction.
*
- * Note: Original code is ata_sff_irq_clear().
+ * Note: Original code is ata_bmdma_irq_clear().
*/
static void bfin_irq_clear(struct ata_port *ap)
@@ -1216,56 +1228,6 @@ static void bfin_irq_clear(struct ata_port *ap)
}
/**
- * bfin_irq_on - Enable interrupts on a port.
- * @ap: Port on which interrupts are enabled.
- *
- * Note: Original code is ata_sff_irq_on().
- */
-
-static unsigned char bfin_irq_on(struct ata_port *ap)
-{
- void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
- u8 tmp;
-
- dev_dbg(ap->dev, "in atapi irq on\n");
- ap->ctl &= ~ATA_NIEN;
- ap->last_ctl = ap->ctl;
-
- write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
- tmp = ata_wait_idle(ap);
-
- bfin_irq_clear(ap);
-
- return tmp;
-}
-
-/**
- * bfin_freeze - Freeze DMA controller port
- * @ap: port to freeze
- *
- * Note: Original code is ata_sff_freeze().
- */
-
-static void bfin_freeze(struct ata_port *ap)
-{
- void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
-
- dev_dbg(ap->dev, "in atapi dma freeze\n");
- ap->ctl |= ATA_NIEN;
- ap->last_ctl = ap->ctl;
-
- write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
-
- /* Under certain circumstances, some controllers raise IRQ on
- * ATA_NIEN manipulation. Also, many controllers fail to mask
- * previously pending IRQ on ATA_NIEN assertion. Clear it.
- */
- ap->ops->sff_check_status(ap);
-
- bfin_irq_clear(ap);
-}
-
-/**
* bfin_thaw - Thaw DMA controller port
* @ap: port to thaw
*
@@ -1276,7 +1238,7 @@ void bfin_thaw(struct ata_port *ap)
{
dev_dbg(ap->dev, "in atapi dma thaw\n");
bfin_check_status(ap);
- bfin_irq_on(ap);
+ ata_sff_irq_on(ap);
}
/**
@@ -1293,7 +1255,7 @@ static void bfin_postreset(struct ata_link *link, unsigned int *classes)
void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
/* re-enable interrupts */
- bfin_irq_on(ap);
+ ata_sff_irq_on(ap);
/* is double-select really necessary? */
if (classes[0] != ATA_DEV_NONE)
@@ -1438,18 +1400,12 @@ static irqreturn_t bfin_ata_interrupt(int irq, void *dev_instance)
spin_lock_irqsave(&host->lock, flags);
for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap;
+ struct ata_port *ap = host->ports[i];
+ struct ata_queued_cmd *qc;
- ap = host->ports[i];
- if (ap &&
- !(ap->flags & ATA_FLAG_DISABLED)) {
- struct ata_queued_cmd *qc;
-
- qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
- (qc->flags & ATA_QCFLAG_ACTIVE))
- handled |= bfin_ata_host_intr(ap, qc);
- }
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
+ handled |= bfin_ata_host_intr(ap, qc);
}
spin_unlock_irqrestore(&host->lock, flags);
@@ -1465,7 +1421,7 @@ static struct scsi_host_template bfin_sht = {
};
static struct ata_port_operations bfin_pata_ops = {
- .inherits = &ata_sff_port_ops,
+ .inherits = &ata_bmdma_port_ops,
.set_piomode = bfin_set_piomode,
.set_dmamode = bfin_set_dmamode,
@@ -1476,6 +1432,7 @@ static struct ata_port_operations bfin_pata_ops = {
.sff_check_status = bfin_check_status,
.sff_check_altstatus = bfin_check_altstatus,
.sff_dev_select = bfin_dev_select,
+ .sff_set_devctl = bfin_set_devctl,
.bmdma_setup = bfin_bmdma_setup,
.bmdma_start = bfin_bmdma_start,
@@ -1485,13 +1442,11 @@ static struct ata_port_operations bfin_pata_ops = {
.qc_prep = ata_noop_qc_prep,
- .freeze = bfin_freeze,
.thaw = bfin_thaw,
.softreset = bfin_softreset,
.postreset = bfin_postreset,
.sff_irq_clear = bfin_irq_clear,
- .sff_irq_on = bfin_irq_on,
.port_start = bfin_port_start,
.port_stop = bfin_port_stop,
diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
index 45896b3c653..e5f289f59ca 100644
--- a/drivers/ata/pata_cmd640.c
+++ b/drivers/ata/pata_cmd640.c
@@ -153,24 +153,20 @@ static int cmd640_port_start(struct ata_port *ap)
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct cmd640_reg *timing;
- int ret = ata_sff_port_start(ap);
- if (ret < 0)
- return ret;
-
timing = devm_kzalloc(&pdev->dev, sizeof(struct cmd640_reg), GFP_KERNEL);
if (timing == NULL)
return -ENOMEM;
timing->last = -1; /* Force a load */
ap->private_data = timing;
- return ret;
+ return 0;
}
static struct scsi_host_template cmd640_sht = {
- ATA_BMDMA_SHT(DRV_NAME),
+ ATA_PIO_SHT(DRV_NAME),
};
static struct ata_port_operations cmd640_port_ops = {
- .inherits = &ata_bmdma_port_ops,
+ .inherits = &ata_sff_port_ops,
/* In theory xfer_noirq is not needed once we kill the prefetcher */
.sff_data_xfer = ata_sff_data_xfer_noirq,
.qc_issue = cmd640_qc_issue,
@@ -181,13 +177,10 @@ static struct ata_port_operations cmd640_port_ops = {
static void cmd640_hardware_init(struct pci_dev *pdev)
{
- u8 r;
u8 ctrl;
/* CMD640 detected, commiserations */
pci_write_config_byte(pdev, 0x5B, 0x00);
- /* Get version info */
- pci_read_config_byte(pdev, CFR, &r);
/* PIO0 command cycles */
pci_write_config_byte(pdev, CMDTIM, 0);
/* 512 byte bursts (sector) */
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 4c81a71b887..9f5da1c7454 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -367,7 +367,7 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
pci_write_config_byte(pdev, UDIDETCR0, 0xF0);
#endif
- return ata_pci_sff_init_one(pdev, ppi, &cmd64x_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &cmd64x_sht, NULL, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 95ebdac517f..030952f1f97 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -110,7 +110,7 @@ static struct scsi_host_template cs5520_sht = {
static struct ata_port_operations cs5520_port_ops = {
.inherits = &ata_bmdma_port_ops,
- .qc_prep = ata_sff_dumb_qc_prep,
+ .qc_prep = ata_bmdma_dumb_qc_prep,
.cable_detect = ata_cable_40wire,
.set_piomode = cs5520_set_piomode,
};
@@ -221,7 +221,7 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi
continue;
rc = devm_request_irq(&pdev->dev, irq[ap->port_no],
- ata_sff_interrupt, 0, DRV_NAME, host);
+ ata_bmdma_interrupt, 0, DRV_NAME, host);
if (rc)
return rc;
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index 738ad2e14a9..f792330f0d8 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -156,7 +156,7 @@ static unsigned int cs5530_qc_issue(struct ata_queued_cmd *qc)
cs5530_set_dmamode(ap, adev);
}
- return ata_sff_qc_issue(qc);
+ return ata_bmdma_qc_issue(qc);
}
static struct scsi_host_template cs5530_sht = {
@@ -167,7 +167,7 @@ static struct scsi_host_template cs5530_sht = {
static struct ata_port_operations cs5530_port_ops = {
.inherits = &ata_bmdma_port_ops,
- .qc_prep = ata_sff_dumb_qc_prep,
+ .qc_prep = ata_bmdma_dumb_qc_prep,
.qc_issue = cs5530_qc_issue,
.cable_detect = ata_cable_40wire,
@@ -324,7 +324,7 @@ static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
ppi[1] = &info_palmax_secondary;
/* Now kick off ATA set up */
- return ata_pci_sff_init_one(pdev, ppi, &cs5530_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &cs5530_sht, NULL, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
index a02e6459fdc..03a93186aa1 100644
--- a/drivers/ata/pata_cs5535.c
+++ b/drivers/ata/pata_cs5535.c
@@ -198,7 +198,7 @@ static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id)
rdmsr(ATAC_CH0D1_PIO, timings, dummy);
if (CS5535_BAD_PIO(timings))
wrmsr(ATAC_CH0D1_PIO, 0xF7F4F7F4UL, 0);
- return ata_pci_sff_init_one(dev, ppi, &cs5535_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &cs5535_sht, NULL, 0);
}
static const struct pci_device_id cs5535[] = {
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index 914ae3506ff..21ee23f89e8 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -260,7 +260,7 @@ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id)
return -ENODEV;
}
- return ata_pci_sff_init_one(dev, ppi, &cs5536_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &cs5536_sht, NULL, 0);
}
static const struct pci_device_id cs5536[] = {
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
index 0fcc096b8da..6d915b063d9 100644
--- a/drivers/ata/pata_cypress.c
+++ b/drivers/ata/pata_cypress.c
@@ -138,7 +138,7 @@ static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *i
if (PCI_FUNC(pdev->devfn) != 1)
return -ENODEV;
- return ata_pci_sff_init_one(pdev, ppi, &cy82c693_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &cy82c693_sht, NULL, 0);
}
static const struct pci_device_id cy82c693[] = {
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index 3bac0e07969..a08834758ea 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -277,8 +277,8 @@ static int efar_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
dev_printk(KERN_DEBUG, &pdev->dev,
"version " DRV_VERSION "\n");
- return ata_pci_sff_init_one(pdev, ppi, &efar_sht, NULL,
- ATA_HOST_PARALLEL_SCAN);
+ return ata_pci_bmdma_init_one(pdev, ppi, &efar_sht, NULL,
+ ATA_HOST_PARALLEL_SCAN);
}
static const struct pci_device_id efar_pci_tbl[] = {
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index af49bfb5724..7688868557b 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -182,7 +182,7 @@ static unsigned long hpt366_filter(struct ata_device *adev, unsigned long mask)
} else if (adev->class == ATA_DEV_ATAPI)
mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
- return ata_bmdma_mode_filter(adev, mask);
+ return mask;
}
static int hpt36x_cable_detect(struct ata_port *ap)
@@ -361,7 +361,7 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
break;
}
/* Now kick off ATA set up */
- return ata_pci_sff_init_one(dev, ppi, &hpt36x_sht, hpriv, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &hpt36x_sht, hpriv, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 8839307a64c..9ae4c083057 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -282,7 +282,7 @@ static unsigned long hpt370_filter(struct ata_device *adev, unsigned long mask)
if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
mask &= ~(0xE0 << ATA_SHIFT_UDMA);
}
- return ata_bmdma_mode_filter(adev, mask);
+ return mask;
}
/**
@@ -298,7 +298,7 @@ static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask)
if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
mask &= ~(0xE0 << ATA_SHIFT_UDMA);
}
- return ata_bmdma_mode_filter(adev, mask);
+ return mask;
}
/**
@@ -987,7 +987,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
}
/* Now kick off ATA set up */
- return ata_pci_sff_init_one(dev, ppi, &hpt37x_sht, private_data, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &hpt37x_sht, private_data, 0);
}
static const struct pci_device_id hpt37x[] = {
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 01457b266f3..32f3463216b 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -320,7 +320,7 @@ static unsigned int hpt3x2n_qc_issue(struct ata_queued_cmd *qc)
hpt3x2n_set_clock(ap, dpll ? 0x21 : 0x23);
}
- return ata_sff_qc_issue(qc);
+ return ata_bmdma_qc_issue(qc);
}
static struct scsi_host_template hpt3x2n_sht = {
@@ -548,7 +548,7 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
outb(inb(iobase + 0x9c) | 0x04, iobase + 0x9c);
/* Now kick off ATA set up */
- return ata_pci_sff_init_one(dev, ppi, &hpt3x2n_sht, hpriv, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &hpt3x2n_sht, hpriv, 0);
}
static const struct pci_device_id hpt3x2n[] = {
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index 727a81ce4c9..b63d5e2d462 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -248,7 +248,7 @@ static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd");
}
pci_set_master(pdev);
- return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &hpt3x3_sht);
}
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index fa812e206ee..9f2889fe43b 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -321,7 +321,7 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
}
static struct ata_port_operations pata_icside_port_ops = {
- .inherits = &ata_sff_port_ops,
+ .inherits = &ata_bmdma_port_ops,
/* no need to build any PRD tables for DMA */
.qc_prep = ata_noop_qc_prep,
.sff_data_xfer = ata_sff_data_xfer_noirq,
@@ -333,7 +333,8 @@ static struct ata_port_operations pata_icside_port_ops = {
.cable_detect = ata_cable_40wire,
.set_dmamode = pata_icside_set_dmamode,
.postreset = pata_icside_postreset,
- .post_internal_cmd = pata_icside_bmdma_stop,
+
+ .port_start = ATA_OP_NULL, /* don't need PRD table */
};
static void __devinit
@@ -469,7 +470,7 @@ static int __devinit pata_icside_add_ports(struct pata_icside_info *info)
pata_icside_setup_ioaddr(ap, info->base, info, info->port[i]);
}
- return ata_host_activate(host, ec->irq, ata_sff_interrupt, 0,
+ return ata_host_activate(host, ec->irq, ata_bmdma_interrupt, 0,
&pata_icside_sht);
}
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
index f971f0de88e..4d142a2ab8f 100644
--- a/drivers/ata/pata_it8213.c
+++ b/drivers/ata/pata_it8213.c
@@ -273,7 +273,7 @@ static int it8213_init_one (struct pci_dev *pdev, const struct pci_device_id *en
dev_printk(KERN_DEBUG, &pdev->dev,
"version " DRV_VERSION "\n");
- return ata_pci_sff_init_one(pdev, ppi, &it8213_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &it8213_sht, NULL, 0);
}
static const struct pci_device_id it8213_pci_tbl[] = {
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 5cb286fd839..bf88f71a21f 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -430,7 +430,7 @@ static unsigned int it821x_smart_qc_issue(struct ata_queued_cmd *qc)
case 0xFC: /* Internal 'report rebuild state' */
/* Arguably should just no-op this one */
case ATA_CMD_SET_FEATURES:
- return ata_sff_qc_issue(qc);
+ return ata_bmdma_qc_issue(qc);
}
printk(KERN_DEBUG "it821x: can't process command 0x%02X\n", qc->tf.command);
return AC_ERR_DEV;
@@ -448,7 +448,7 @@ static unsigned int it821x_smart_qc_issue(struct ata_queued_cmd *qc)
static unsigned int it821x_passthru_qc_issue(struct ata_queued_cmd *qc)
{
it821x_passthru_dev_select(qc->ap, qc->dev->devno);
- return ata_sff_qc_issue(qc);
+ return ata_bmdma_qc_issue(qc);
}
/**
@@ -739,7 +739,7 @@ static int it821x_port_start(struct ata_port *ap)
struct it821x_dev *itdev;
u8 conf;
- int ret = ata_sff_port_start(ap);
+ int ret = ata_bmdma_port_start(ap);
if (ret < 0)
return ret;
@@ -933,7 +933,7 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
else
ppi[0] = &info_smart;
}
- return ata_pci_sff_init_one(pdev, ppi, &it821x_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &it821x_sht, NULL, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index 565e01e6ac7..cb3babbb703 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -144,7 +144,7 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i
};
const struct ata_port_info *ppi[] = { &info, NULL };
- return ata_pci_sff_init_one(pdev, ppi, &jmicron_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &jmicron_sht, NULL, 0);
}
static const struct pci_device_id jmicron_pci_tbl[] = {
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 211b6438b3a..76640ac7688 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -720,6 +720,8 @@ static int pata_macio_port_start(struct ata_port *ap)
if (priv->dma_table_cpu == NULL) {
dev_err(priv->dev, "Unable to allocate DMA command list\n");
ap->ioaddr.bmdma_addr = NULL;
+ ap->mwdma_mask = 0;
+ ap->udma_mask = 0;
}
return 0;
}
@@ -917,7 +919,7 @@ static struct scsi_host_template pata_macio_sht = {
};
static struct ata_port_operations pata_macio_ops = {
- .inherits = &ata_sff_port_ops,
+ .inherits = &ata_bmdma_port_ops,
.freeze = pata_macio_freeze,
.set_piomode = pata_macio_set_timings,
@@ -925,7 +927,6 @@ static struct ata_port_operations pata_macio_ops = {
.cable_detect = pata_macio_cable_detect,
.sff_dev_select = pata_macio_dev_select,
.qc_prep = pata_macio_qc_prep,
- .mode_filter = ata_bmdma_mode_filter,
.bmdma_setup = pata_macio_bmdma_setup,
.bmdma_start = pata_macio_bmdma_start,
.bmdma_stop = pata_macio_bmdma_stop,
@@ -1109,7 +1110,7 @@ static int __devinit pata_macio_common_init(struct pata_macio_priv *priv,
/* Start it up */
priv->irq = irq;
- return ata_host_activate(priv->host, irq, ata_sff_interrupt, 0,
+ return ata_host_activate(priv->host, irq, ata_bmdma_interrupt, 0,
&pata_macio_sht);
}
@@ -1139,7 +1140,7 @@ static int __devinit pata_macio_attach(struct macio_dev *mdev,
"Failed to allocate private memory\n");
return -ENOMEM;
}
- priv->node = of_node_get(mdev->ofdev.node);
+ priv->node = of_node_get(mdev->ofdev.dev.of_node);
priv->mdev = mdev;
priv->dev = &mdev->ofdev.dev;
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index e8ca02e5a71..dd38083dcbe 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -153,7 +153,7 @@ static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *i
return -ENODEV;
}
#endif
- return ata_pci_sff_init_one(pdev, ppi, &marvell_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &marvell_sht, NULL, 0);
}
static const struct pci_device_id marvell_pci_tbl[] = {
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 9f5b053611d..f087ab55b1d 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -64,13 +64,13 @@ struct mpc52xx_ata_priv {
/* ATAPI-4 PIO specs (in ns) */
-static const int ataspec_t0[5] = {600, 383, 240, 180, 120};
-static const int ataspec_t1[5] = { 70, 50, 30, 30, 25};
-static const int ataspec_t2_8[5] = {290, 290, 290, 80, 70};
-static const int ataspec_t2_16[5] = {165, 125, 100, 80, 70};
-static const int ataspec_t2i[5] = { 0, 0, 0, 70, 25};
-static const int ataspec_t4[5] = { 30, 20, 15, 10, 10};
-static const int ataspec_ta[5] = { 35, 35, 35, 35, 35};
+static const u16 ataspec_t0[5] = {600, 383, 240, 180, 120};
+static const u16 ataspec_t1[5] = { 70, 50, 30, 30, 25};
+static const u16 ataspec_t2_8[5] = {290, 290, 290, 80, 70};
+static const u16 ataspec_t2_16[5] = {165, 125, 100, 80, 70};
+static const u16 ataspec_t2i[5] = { 0, 0, 0, 70, 25};
+static const u16 ataspec_t4[5] = { 30, 20, 15, 10, 10};
+static const u16 ataspec_ta[5] = { 35, 35, 35, 35, 35};
#define CALC_CLKCYC(c,v) ((((v)+(c)-1)/(c)))
@@ -78,13 +78,13 @@ static const int ataspec_ta[5] = { 35, 35, 35, 35, 35};
/* ATAPI-4 MDMA specs (in clocks) */
struct mdmaspec {
- u32 t0M;
- u32 td;
- u32 th;
- u32 tj;
- u32 tkw;
- u32 tm;
- u32 tn;
+ u8 t0M;
+ u8 td;
+ u8 th;
+ u8 tj;
+ u8 tkw;
+ u8 tm;
+ u8 tn;
};
static const struct mdmaspec mdmaspec66[3] = {
@@ -101,23 +101,23 @@ static const struct mdmaspec mdmaspec132[3] = {
/* ATAPI-4 UDMA specs (in clocks) */
struct udmaspec {
- u32 tcyc;
- u32 t2cyc;
- u32 tds;
- u32 tdh;
- u32 tdvs;
- u32 tdvh;
- u32 tfs;
- u32 tli;
- u32 tmli;
- u32 taz;
- u32 tzah;
- u32 tenv;
- u32 tsr;
- u32 trfs;
- u32 trp;
- u32 tack;
- u32 tss;
+ u8 tcyc;
+ u8 t2cyc;
+ u8 tds;
+ u8 tdh;
+ u8 tdvs;
+ u8 tdvh;
+ u8 tfs;
+ u8 tli;
+ u8 tmli;
+ u8 taz;
+ u8 tzah;
+ u8 tenv;
+ u8 tsr;
+ u8 trfs;
+ u8 trp;
+ u8 tack;
+ u8 tss;
};
static const struct udmaspec udmaspec66[6] = {
@@ -270,7 +270,7 @@ mpc52xx_ata_compute_pio_timings(struct mpc52xx_ata_priv *priv, int dev, int pio)
{
struct mpc52xx_ata_timings *timing = &priv->timings[dev];
unsigned int ipb_period = priv->ipb_period;
- unsigned int t0, t1, t2_8, t2_16, t2i, t4, ta;
+ u32 t0, t1, t2_8, t2_16, t2i, t4, ta;
if ((pio < 0) || (pio > 4))
return -EINVAL;
@@ -299,8 +299,8 @@ mpc52xx_ata_compute_mdma_timings(struct mpc52xx_ata_priv *priv, int dev,
if (speed < 0 || speed > 2)
return -EINVAL;
- t->mdma1 = (s->t0M << 24) | (s->td << 16) | (s->tkw << 8) | (s->tm);
- t->mdma2 = (s->th << 24) | (s->tj << 16) | (s->tn << 8);
+ t->mdma1 = ((u32)s->t0M << 24) | ((u32)s->td << 16) | ((u32)s->tkw << 8) | s->tm;
+ t->mdma2 = ((u32)s->th << 24) | ((u32)s->tj << 16) | ((u32)s->tn << 8);
t->using_udma = 0;
return 0;
@@ -316,11 +316,11 @@ mpc52xx_ata_compute_udma_timings(struct mpc52xx_ata_priv *priv, int dev,
if (speed < 0 || speed > 2)
return -EINVAL;
- t->udma1 = (s->t2cyc << 24) | (s->tcyc << 16) | (s->tds << 8) | s->tdh;
- t->udma2 = (s->tdvs << 24) | (s->tdvh << 16) | (s->tfs << 8) | s->tli;
- t->udma3 = (s->tmli << 24) | (s->taz << 16) | (s->tenv << 8) | s->tsr;
- t->udma4 = (s->tss << 24) | (s->trfs << 16) | (s->trp << 8) | s->tack;
- t->udma5 = (s->tzah << 24);
+ t->udma1 = ((u32)s->t2cyc << 24) | ((u32)s->tcyc << 16) | ((u32)s->tds << 8) | s->tdh;
+ t->udma2 = ((u32)s->tdvs << 24) | ((u32)s->tdvh << 16) | ((u32)s->tfs << 8) | s->tli;
+ t->udma3 = ((u32)s->tmli << 24) | ((u32)s->taz << 16) | ((u32)s->tenv << 8) | s->tsr;
+ t->udma4 = ((u32)s->tss << 24) | ((u32)s->trfs << 16) | ((u32)s->trp << 8) | s->tack;
+ t->udma5 = (u32)s->tzah << 24;
t->using_udma = 1;
return 0;
@@ -659,7 +659,7 @@ mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv,
ata_port_desc(ap, "ata_regs 0x%lx", raw_ata_regs);
/* activate host */
- return ata_host_activate(host, priv->ata_irq, ata_sff_interrupt, 0,
+ return ata_host_activate(host, priv->ata_irq, ata_bmdma_interrupt, 0,
&mpc52xx_ata_sht);
}
@@ -694,7 +694,7 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match)
struct bcom_task *dmatsk = NULL;
/* Get ipb frequency */
- ipb_freq = mpc5xxx_get_bus_frequency(op->node);
+ ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node);
if (!ipb_freq) {
dev_err(&op->dev, "could not determine IPB bus frequency\n");
return -ENODEV;
@@ -702,7 +702,7 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match)
/* Get device base address from device tree, request the region
* and ioremap it. */
- rv = of_address_to_resource(op->node, 0, &res_mem);
+ rv = of_address_to_resource(op->dev.of_node, 0, &res_mem);
if (rv) {
dev_err(&op->dev, "could not determine device base address\n");
return rv;
@@ -735,14 +735,14 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match)
* The MPC5200 ATA controller supports MWDMA modes 0, 1 and 2 and
* UDMA modes 0, 1 and 2.
*/
- prop = of_get_property(op->node, "mwdma-mode", &proplen);
+ prop = of_get_property(op->dev.of_node, "mwdma-mode", &proplen);
if ((prop) && (proplen >= 4))
mwdma_mask = ATA_MWDMA2 & ((1 << (*prop + 1)) - 1);
- prop = of_get_property(op->node, "udma-mode", &proplen);
+ prop = of_get_property(op->dev.of_node, "udma-mode", &proplen);
if ((prop) && (proplen >= 4))
udma_mask = ATA_UDMA2 & ((1 << (*prop + 1)) - 1);
- ata_irq = irq_of_parse_and_map(op->node, 0);
+ ata_irq = irq_of_parse_and_map(op->dev.of_node, 0);
if (ata_irq == NO_IRQ) {
dev_err(&op->dev, "error mapping irq\n");
return -EINVAL;
@@ -884,9 +884,6 @@ static struct of_device_id mpc52xx_ata_of_match[] = {
static struct of_platform_driver mpc52xx_ata_of_platform_driver = {
- .owner = THIS_MODULE,
- .name = DRV_NAME,
- .match_table = mpc52xx_ata_of_match,
.probe = mpc52xx_ata_probe,
.remove = mpc52xx_ata_remove,
#ifdef CONFIG_PM
@@ -896,6 +893,7 @@ static struct of_platform_driver mpc52xx_ata_of_platform_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
+ .of_match_table = mpc52xx_ata_of_match,
},
};
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
index 94f979a7f4f..3eb921c746a 100644
--- a/drivers/ata/pata_netcell.c
+++ b/drivers/ata/pata_netcell.c
@@ -82,7 +82,7 @@ static int netcell_init_one (struct pci_dev *pdev, const struct pci_device_id *e
ata_pci_bmdma_clear_simplex(pdev);
/* And let the library code do the work */
- return ata_pci_sff_init_one(pdev, port_info, &netcell_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, port_info, &netcell_sht, NULL, 0);
}
static const struct pci_device_id netcell_pci_tbl[] = {
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
index dd53a66b19e..cc50bd09aa2 100644
--- a/drivers/ata/pata_ninja32.c
+++ b/drivers/ata/pata_ninja32.c
@@ -149,7 +149,7 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
ninja32_program(base);
/* FIXME: Should we disable them at remove ? */
- return ata_host_activate(host, dev->irq, ata_sff_interrupt,
+ return ata_host_activate(host, dev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &ninja32_sht);
}
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
index 830431f036a..605f198f958 100644
--- a/drivers/ata/pata_ns87415.c
+++ b/drivers/ata/pata_ns87415.c
@@ -126,7 +126,7 @@ static void ns87415_bmdma_setup(struct ata_queued_cmd *qc)
/* load PRD table addr. */
mb(); /* make sure PRD table writes are visible to controller */
- iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
+ iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
/* specify data direction, triple-check start bit is clear */
dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
@@ -380,7 +380,7 @@ static int ns87415_init_one (struct pci_dev *pdev, const struct pci_device_id *e
ns87415_fixup(pdev);
- return ata_pci_sff_init_one(pdev, ppi, &ns87415_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &ns87415_sht, NULL, 0);
}
static const struct pci_device_id ns87415_pci_tbl[] = {
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 005a44483a7..06ddd91ffed 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -489,9 +489,8 @@ static void octeon_cf_exec_command16(struct ata_port *ap,
ata_wait_idle(ap);
}
-static u8 octeon_cf_irq_on(struct ata_port *ap)
+static void octeon_cf_irq_on(struct ata_port *ap)
{
- return 0;
}
static void octeon_cf_irq_clear(struct ata_port *ap)
@@ -655,9 +654,6 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
ap = host->ports[i];
ocd = ap->dev->platform_data;
- if (ap->flags & ATA_FLAG_DISABLED)
- continue;
-
ocd = ap->dev->platform_data;
cf_port = ap->private_data;
dma_int.u64 =
@@ -667,8 +663,7 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
- (qc->flags & ATA_QCFLAG_ACTIVE)) {
+ if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING)) {
if (dma_int.s.done && !dma_cfg.s.en) {
if (!sg_is_last(qc->cursg)) {
qc->cursg = sg_next(qc->cursg);
@@ -738,8 +733,7 @@ static void octeon_cf_delayed_finish(struct work_struct *work)
goto out;
}
qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
- (qc->flags & ATA_QCFLAG_ACTIVE))
+ if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
octeon_cf_dma_finished(ap, qc);
out:
spin_unlock_irqrestore(&host->lock, flags);
@@ -756,20 +750,6 @@ static void octeon_cf_dev_config(struct ata_device *dev)
}
/*
- * Trap if driver tries to do standard bmdma commands. They are not
- * supported.
- */
-static void unreachable_qc(struct ata_queued_cmd *qc)
-{
- BUG();
-}
-
-static u8 unreachable_port(struct ata_port *ap)
-{
- BUG();
-}
-
-/*
* We don't do ATAPI DMA so return 0.
*/
static int octeon_cf_check_atapi_dma(struct ata_queued_cmd *qc)
@@ -810,10 +790,6 @@ static struct ata_port_operations octeon_cf_ops = {
.sff_dev_select = octeon_cf_dev_select,
.sff_irq_on = octeon_cf_irq_on,
.sff_irq_clear = octeon_cf_irq_clear,
- .bmdma_setup = unreachable_qc,
- .bmdma_start = unreachable_qc,
- .bmdma_stop = unreachable_qc,
- .bmdma_status = unreachable_port,
.cable_detect = ata_cable_40wire,
.set_piomode = octeon_cf_set_piomode,
.set_dmamode = octeon_cf_set_dmamode,
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index 1f18ad9e4fe..5a1b82c08be 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -18,7 +18,7 @@ static int __devinit pata_of_platform_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
int ret;
- struct device_node *dn = ofdev->node;
+ struct device_node *dn = ofdev->dev.of_node;
struct resource io_res;
struct resource ctl_res;
struct resource irq_res;
@@ -91,8 +91,11 @@ static struct of_device_id pata_of_platform_match[] = {
MODULE_DEVICE_TABLE(of, pata_of_platform_match);
static struct of_platform_driver pata_of_platform_driver = {
- .name = "pata_of_platform",
- .match_table = pata_of_platform_match,
+ .driver = {
+ .name = "pata_of_platform",
+ .owner = THIS_MODULE,
+ .of_match_table = pata_of_platform_match,
+ },
.probe = pata_of_platform_probe,
.remove = __devexit_p(pata_of_platform_remove),
};
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index 5f6aba7eb0d..b811c163620 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -200,7 +200,7 @@ static unsigned int oldpiix_qc_issue(struct ata_queued_cmd *qc)
if (ata_dma_enabled(adev))
oldpiix_set_dmamode(ap, adev);
}
- return ata_sff_qc_issue(qc);
+ return ata_bmdma_qc_issue(qc);
}
@@ -248,7 +248,7 @@ static int oldpiix_init_one (struct pci_dev *pdev, const struct pci_device_id *e
dev_printk(KERN_DEBUG, &pdev->dev,
"version " DRV_VERSION "\n");
- return ata_pci_sff_init_one(pdev, ppi, &oldpiix_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &oldpiix_sht, NULL, 0);
}
static const struct pci_device_id oldpiix_pci_tbl[] = {
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index 76b7d12b1e8..0852cd07de0 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -429,7 +429,7 @@ static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id)
if (optiplus_with_udma(dev))
ppi[0] = &info_82c700_udma;
- return ata_pci_sff_init_one(dev, ppi, &optidma_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &optidma_sht, NULL, 0);
}
static const struct pci_device_id optidma[] = {
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index d94b8f0bd74..118c28e8aba 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -45,16 +45,6 @@
#define DRV_NAME "pata_pcmcia"
#define DRV_VERSION "0.3.5"
-/*
- * Private data structure to glue stuff together
- */
-
-struct ata_pcmcia_info {
- struct pcmcia_device *pdev;
- int ndev;
- dev_node_t node;
-};
-
/**
* pcmcia_set_mode - PCMCIA specific mode setup
* @link: link
@@ -175,7 +165,7 @@ static struct ata_port_operations pcmcia_8bit_port_ops = {
.sff_data_xfer = ata_data_xfer_8bit,
.cable_detect = ata_cable_40wire,
.set_mode = pcmcia_set_mode_8bit,
- .drain_fifo = pcmcia_8bit_drain_fifo,
+ .sff_drain_fifo = pcmcia_8bit_drain_fifo,
};
@@ -248,7 +238,6 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
{
struct ata_host *host;
struct ata_port *ap;
- struct ata_pcmcia_info *info;
struct pcmcia_config_check *stk = NULL;
int is_kme = 0, ret = -ENOMEM, p;
unsigned long io_base, ctl_base;
@@ -256,19 +245,10 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
int n_ports = 1;
struct ata_port_operations *ops = &pcmcia_port_ops;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (info == NULL)
- return -ENOMEM;
-
- /* Glue stuff together. FIXME: We may be able to get rid of info with care */
- info->pdev = pdev;
- pdev->priv = info;
-
/* Set up attributes in order to probe card and get resources */
pdev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
pdev->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
pdev->io.IOAddrLines = 3;
- pdev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
pdev->conf.Attributes = CONF_ENABLE_IRQ;
pdev->conf.IntType = INT_MEMORY_AND_IO;
@@ -293,8 +273,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
}
io_base = pdev->io.BasePort1;
ctl_base = stk->ctl_base;
- ret = pcmcia_request_irq(pdev, &pdev->irq);
- if (ret)
+ if (!pdev->irq)
goto failed;
ret = pcmcia_request_configuration(pdev, &pdev->conf);
@@ -344,21 +323,19 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
}
/* activate */
- ret = ata_host_activate(host, pdev->irq.AssignedIRQ, ata_sff_interrupt,
+ ret = ata_host_activate(host, pdev->irq, ata_sff_interrupt,
IRQF_SHARED, &pcmcia_sht);
if (ret)
goto failed;
- info->ndev = 1;
+ pdev->priv = host;
kfree(stk);
return 0;
failed:
kfree(stk);
- info->ndev = 0;
pcmcia_disable_device(pdev);
out1:
- kfree(info);
return ret;
}
@@ -372,20 +349,12 @@ out1:
static void pcmcia_remove_one(struct pcmcia_device *pdev)
{
- struct ata_pcmcia_info *info = pdev->priv;
- struct device *dev = &pdev->dev;
-
- if (info != NULL) {
- /* If we have attached the device to the ATA layer, detach it */
- if (info->ndev) {
- struct ata_host *host = dev_get_drvdata(dev);
- ata_host_detach(host);
- }
- info->ndev = 0;
- pdev->priv = NULL;
- }
+ struct ata_host *host = pdev->priv;
+
+ if (host)
+ ata_host_detach(host);
+
pcmcia_disable_device(pdev);
- kfree(info);
}
static struct pcmcia_device_id pcmcia_devices[] = {
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index ca5cad0fd80..b1835112252 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -265,7 +265,7 @@ static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long
struct ata_device *pair = ata_dev_pair(adev);
if (adev->class != ATA_DEV_ATA || adev->devno == 0 || pair == NULL)
- return ata_bmdma_mode_filter(adev, mask);
+ return mask;
/* Check for slave of a Maxtor at UDMA6 */
ata_id_c_string(pair->id, model_num, ATA_ID_PROD,
@@ -274,7 +274,7 @@ static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long
if (strstr(model_num, "Maxtor") == NULL && pair->dma_mode == XFER_UDMA_6)
mask &= ~ (1 << (6 + ATA_SHIFT_UDMA));
- return ata_bmdma_mode_filter(adev, mask);
+ return mask;
}
/**
@@ -754,7 +754,7 @@ static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_de
return -EIO;
pci_set_master(pdev);
- return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &pdc2027x_sht);
}
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index 9ac0897cf8b..c39f213e1bb 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -249,7 +249,7 @@ static int pdc2026x_port_start(struct ata_port *ap)
u8 burst = ioread8(bmdma + 0x1f);
iowrite8(burst | 0x01, bmdma + 0x1f);
}
- return ata_sff_port_start(ap);
+ return ata_bmdma_port_start(ap);
}
/**
@@ -337,7 +337,7 @@ static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id
return -ENODEV;
}
}
- return ata_pci_sff_init_one(dev, ppi, &pdc202xx_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &pdc202xx_sht, NULL, 0);
}
static const struct pci_device_id pdc202xx[] = {
diff --git a/drivers/ata/pata_piccolo.c b/drivers/ata/pata_piccolo.c
index 98161541484..cb01bf9496f 100644
--- a/drivers/ata/pata_piccolo.c
+++ b/drivers/ata/pata_piccolo.c
@@ -95,7 +95,7 @@ static int ata_tosh_init_one(struct pci_dev *dev, const struct pci_device_id *id
};
const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
/* Just one port for the moment */
- return ata_pci_sff_init_one(dev, ppi, &tosh_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &tosh_sht, NULL, 0);
}
static struct pci_device_id ata_tosh[] = {
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 3f6ebc6c665..50400fa120f 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -53,7 +53,6 @@ static struct ata_port_operations pata_platform_port_ops = {
.sff_data_xfer = ata_sff_data_xfer_noirq,
.cable_detect = ata_cable_unknown,
.set_mode = pata_platform_set_mode,
- .port_start = ATA_OP_NULL,
};
static void pata_platform_setup_port(struct ata_ioports *ioaddr,
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index fc9602229ac..8574b31f177 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -179,7 +179,7 @@ static unsigned int radisys_qc_issue(struct ata_queued_cmd *qc)
radisys_set_piomode(ap, adev);
}
}
- return ata_sff_qc_issue(qc);
+ return ata_bmdma_qc_issue(qc);
}
@@ -227,7 +227,7 @@ static int radisys_init_one (struct pci_dev *pdev, const struct pci_device_id *e
dev_printk(KERN_DEBUG, &pdev->dev,
"version " DRV_VERSION "\n");
- return ata_pci_sff_init_one(pdev, ppi, &radisys_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &radisys_sht, NULL, 0);
}
static const struct pci_device_id radisys_pci_tbl[] = {
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
index 37092cfd7bc..5fbe9b166c6 100644
--- a/drivers/ata/pata_rdc.c
+++ b/drivers/ata/pata_rdc.c
@@ -344,7 +344,7 @@ static int __devinit rdc_init_one(struct pci_dev *pdev,
*/
pci_read_config_dword(pdev, 0x54, &hpriv->saved_iocfg);
- rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
host->private_data = hpriv;
@@ -354,7 +354,7 @@ static int __devinit rdc_init_one(struct pci_dev *pdev,
host->flags |= ATA_HOST_PARALLEL_SCAN;
pci_set_master(pdev);
- return ata_pci_sff_activate_host(host, ata_sff_interrupt, &rdc_sht);
+ return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &rdc_sht);
}
static void rdc_remove_one(struct pci_dev *pdev)
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index dfecc6f964b..e2c18257adf 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -174,7 +174,7 @@ static unsigned int sc1200_qc_issue(struct ata_queued_cmd *qc)
sc1200_set_dmamode(ap, adev);
}
- return ata_sff_qc_issue(qc);
+ return ata_bmdma_qc_issue(qc);
}
/**
@@ -209,7 +209,7 @@ static struct scsi_host_template sc1200_sht = {
static struct ata_port_operations sc1200_port_ops = {
.inherits = &ata_bmdma_port_ops,
- .qc_prep = ata_sff_dumb_qc_prep,
+ .qc_prep = ata_bmdma_dumb_qc_prep,
.qc_issue = sc1200_qc_issue,
.qc_defer = sc1200_qc_defer,
.cable_detect = ata_cable_40wire,
@@ -237,7 +237,7 @@ static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id)
};
const struct ata_port_info *ppi[] = { &info, NULL };
- return ata_pci_sff_init_one(dev, ppi, &sc1200_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &sc1200_sht, NULL, 0);
}
static const struct pci_device_id sc1200[] = {
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index 4257d6b40af..d9db3f8d60e 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -265,7 +265,7 @@ unsigned long scc_mode_filter(struct ata_device *adev, unsigned long mask)
printk(KERN_INFO "%s: limit ATAPI UDMA to UDMA4\n", DRV_NAME);
mask &= ~(0xE0 << ATA_SHIFT_UDMA);
}
- return ata_bmdma_mode_filter(adev, mask);
+ return mask;
}
/**
@@ -416,6 +416,17 @@ static void scc_dev_select (struct ata_port *ap, unsigned int device)
}
/**
+ * scc_set_devctl - Write device control reg
+ * @ap: port where the device is
+ * @ctl: value to write
+ */
+
+static void scc_set_devctl(struct ata_port *ap, u8 ctl)
+{
+ out_be32(ap->ioaddr.ctl_addr, ctl);
+}
+
+/**
* scc_bmdma_setup - Set up PCI IDE BMDMA transaction
* @qc: Info associated with this ATA transaction.
*
@@ -430,7 +441,7 @@ static void scc_bmdma_setup (struct ata_queued_cmd *qc)
void __iomem *mmio = ap->ioaddr.bmdma_addr;
/* load PRD table addr */
- out_be32(mmio + SCC_DMA_TABLE_OFS, ap->prd_dma);
+ out_be32(mmio + SCC_DMA_TABLE_OFS, ap->bmdma_prd_dma);
/* specify data direction, triple-check start bit is clear */
dmactl = in_be32(mmio + SCC_DMA_CMD);
@@ -501,8 +512,8 @@ static unsigned int scc_devchk (struct ata_port *ap,
* Note: Original code is ata_sff_wait_after_reset
*/
-int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
- unsigned long deadline)
+static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
+ unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct ata_ioports *ioaddr = &ap->ioaddr;
@@ -817,54 +828,6 @@ static unsigned int scc_data_xfer (struct ata_device *dev, unsigned char *buf,
}
/**
- * scc_irq_on - Enable interrupts on a port.
- * @ap: Port on which interrupts are enabled.
- *
- * Note: Original code is ata_sff_irq_on().
- */
-
-static u8 scc_irq_on (struct ata_port *ap)
-{
- struct ata_ioports *ioaddr = &ap->ioaddr;
- u8 tmp;
-
- ap->ctl &= ~ATA_NIEN;
- ap->last_ctl = ap->ctl;
-
- out_be32(ioaddr->ctl_addr, ap->ctl);
- tmp = ata_wait_idle(ap);
-
- ap->ops->sff_irq_clear(ap);
-
- return tmp;
-}
-
-/**
- * scc_freeze - Freeze BMDMA controller port
- * @ap: port to freeze
- *
- * Note: Original code is ata_sff_freeze().
- */
-
-static void scc_freeze (struct ata_port *ap)
-{
- struct ata_ioports *ioaddr = &ap->ioaddr;
-
- ap->ctl |= ATA_NIEN;
- ap->last_ctl = ap->ctl;
-
- out_be32(ioaddr->ctl_addr, ap->ctl);
-
- /* Under certain circumstances, some controllers raise IRQ on
- * ATA_NIEN manipulation. Also, many controllers fail to mask
- * previously pending IRQ on ATA_NIEN assertion. Clear it.
- */
- ap->ops->sff_check_status(ap);
-
- ap->ops->sff_irq_clear(ap);
-}
-
-/**
* scc_pata_prereset - prepare for reset
* @ap: ATA port to be reset
* @deadline: deadline jiffies for the operation
@@ -903,8 +866,7 @@ static void scc_postreset(struct ata_link *link, unsigned int *classes)
}
/* set up device control */
- if (ap->ioaddr.ctl_addr)
- out_be32(ap->ioaddr.ctl_addr, ap->ctl);
+ out_be32(ap->ioaddr.ctl_addr, ap->ctl);
DPRINTK("EXIT\n");
}
@@ -913,7 +875,7 @@ static void scc_postreset(struct ata_link *link, unsigned int *classes)
* scc_irq_clear - Clear PCI IDE BMDMA interrupt.
* @ap: Port associated with this ATA transaction.
*
- * Note: Original code is ata_sff_irq_clear().
+ * Note: Original code is ata_bmdma_irq_clear().
*/
static void scc_irq_clear (struct ata_port *ap)
@@ -930,7 +892,7 @@ static void scc_irq_clear (struct ata_port *ap)
* scc_port_start - Set port up for dma.
* @ap: Port to initialize
*
- * Allocate space for PRD table using ata_port_start().
+ * Allocate space for PRD table using ata_bmdma_port_start().
* Set PRD table address for PTERADD. (PRD Transfer End Read)
*/
@@ -939,11 +901,11 @@ static int scc_port_start (struct ata_port *ap)
void __iomem *mmio = ap->ioaddr.bmdma_addr;
int rc;
- rc = ata_port_start(ap);
+ rc = ata_bmdma_port_start(ap);
if (rc)
return rc;
- out_be32(mmio + SCC_DMA_PTERADD, ap->prd_dma);
+ out_be32(mmio + SCC_DMA_PTERADD, ap->bmdma_prd_dma);
return 0;
}
@@ -978,6 +940,7 @@ static struct ata_port_operations scc_pata_ops = {
.sff_check_status = scc_check_status,
.sff_check_altstatus = scc_check_altstatus,
.sff_dev_select = scc_dev_select,
+ .sff_set_devctl = scc_set_devctl,
.bmdma_setup = scc_bmdma_setup,
.bmdma_start = scc_bmdma_start,
@@ -985,14 +948,11 @@ static struct ata_port_operations scc_pata_ops = {
.bmdma_status = scc_bmdma_status,
.sff_data_xfer = scc_data_xfer,
- .freeze = scc_freeze,
.prereset = scc_pata_prereset,
.softreset = scc_softreset,
.postreset = scc_postreset,
- .post_internal_cmd = scc_bmdma_stop,
.sff_irq_clear = scc_irq_clear,
- .sff_irq_on = scc_irq_on,
.port_start = scc_port_start,
.port_stop = scc_port_stop,
@@ -1145,7 +1105,7 @@ static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &scc_sht);
}
diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
index 99cceb458e2..e97b32f03a6 100644
--- a/drivers/ata/pata_sch.c
+++ b/drivers/ata/pata_sch.c
@@ -174,22 +174,12 @@ static int __devinit sch_init_one(struct pci_dev *pdev,
{
static int printed_version;
const struct ata_port_info *ppi[] = { &sch_port_info, NULL };
- struct ata_host *host;
- int rc;
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev,
"version " DRV_VERSION "\n");
- /* enable device and prepare host */
- rc = pcim_enable_device(pdev);
- if (rc)
- return rc;
- rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
- if (rc)
- return rc;
- pci_set_master(pdev);
- return ata_pci_sff_activate_host(host, ata_sff_interrupt, &sch_sht);
+ return ata_pci_bmdma_init_one(pdev, ppi, &sch_sht, NULL, 0);
}
static int __init sch_init(void)
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index 9524d54035f..86dd714e3e1 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -198,7 +198,7 @@ static unsigned long serverworks_osb4_filter(struct ata_device *adev, unsigned l
{
if (adev->class == ATA_DEV_ATA)
mask &= ~ATA_MASK_UDMA;
- return ata_bmdma_mode_filter(adev, mask);
+ return mask;
}
@@ -218,7 +218,7 @@ static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned lo
/* Disk, UDMA */
if (adev->class != ATA_DEV_ATA)
- return ata_bmdma_mode_filter(adev, mask);
+ return mask;
/* Actually do need to check */
ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
@@ -227,7 +227,7 @@ static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned lo
if (!strcmp(p, model_num))
mask &= ~(0xE0 << ATA_SHIFT_UDMA);
}
- return ata_bmdma_mode_filter(adev, mask);
+ return mask;
}
/**
@@ -460,7 +460,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE)
ata_pci_bmdma_clear_simplex(pdev);
- return ata_pci_sff_init_one(pdev, ppi, &serverworks_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &serverworks_sht, NULL, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index c6c589c23ff..d3190d7ec30 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -190,15 +190,37 @@ static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev)
pci_write_config_word(pdev, ua, ultra);
}
+/**
+ * sil680_sff_exec_command - issue ATA command to host controller
+ * @ap: port to which command is being issued
+ * @tf: ATA taskfile register set
+ *
+ * Issues ATA command, with proper synchronization with interrupt
+ * handler / other threads. Use our MMIO space for PCI posting to avoid
+ * a hideously slow cycle all the way to the device.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void sil680_sff_exec_command(struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
+ iowrite8(tf->command, ap->ioaddr.command_addr);
+ ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+}
+
static struct scsi_host_template sil680_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
+
static struct ata_port_operations sil680_port_ops = {
- .inherits = &ata_bmdma32_port_ops,
- .cable_detect = sil680_cable_detect,
- .set_piomode = sil680_set_piomode,
- .set_dmamode = sil680_set_dmamode,
+ .inherits = &ata_bmdma32_port_ops,
+ .sff_exec_command = sil680_sff_exec_command,
+ .cable_detect = sil680_cable_detect,
+ .set_piomode = sil680_set_piomode,
+ .set_dmamode = sil680_set_dmamode,
};
/**
@@ -352,11 +374,11 @@ static int __devinit sil680_init_one(struct pci_dev *pdev,
ata_sff_std_ports(&host->ports[1]->ioaddr);
/* Register & activate */
- return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &sil680_sht);
use_ioports:
- return ata_pci_sff_init_one(pdev, ppi, &sil680_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &sil680_sht, NULL, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index b6708032f32..60cea13cccc 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -826,7 +826,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
sis_fixup(pdev, chipset);
- return ata_pci_sff_init_one(pdev, ppi, &sis_sht, chipset, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &sis_sht, chipset, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index 733b042a746..98548f640c8 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -316,7 +316,7 @@ static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id
val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16;
pci_write_config_dword(dev, 0x40, val);
- return ata_pci_sff_init_one(dev, ppi, &sl82c105_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &sl82c105_sht, NULL, 0);
}
static const struct pci_device_id sl82c105[] = {
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index 48f50600ed2..0d1f89e571d 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -201,7 +201,7 @@ static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id)
if (!printed_version++)
dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
- return ata_pci_sff_init_one(dev, ppi, &triflex_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &triflex_sht, NULL, 0);
}
static const struct pci_device_id triflex[] = {
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 741e7cb69d8..5e659885de1 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -355,7 +355,7 @@ static unsigned long via_mode_filter(struct ata_device *dev, unsigned long mask)
mask &= ~ ATA_MASK_UDMA;
}
}
- return ata_bmdma_mode_filter(dev, mask);
+ return mask;
}
/**
@@ -417,8 +417,6 @@ static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
tf->lbam,
tf->lbah);
}
-
- ata_wait_idle(ap);
}
static int via_port_start(struct ata_port *ap)
@@ -426,7 +424,7 @@ static int via_port_start(struct ata_port *ap)
struct via_port *vp;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- int ret = ata_sff_port_start(ap);
+ int ret = ata_bmdma_port_start(ap);
if (ret < 0)
return ret;
@@ -629,7 +627,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* We have established the device type, now fire it up */
- return ata_pci_sff_init_one(pdev, ppi, &via_sht, (void *)config, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &via_sht, (void *)config, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index 5904cfdb8db..adbe0426c8f 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -324,10 +324,8 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
VPRINTK("ENTER\n");
adma_enter_reg_mode(qc->ap);
- if (qc->tf.protocol != ATA_PROT_DMA) {
- ata_sff_qc_prep(qc);
+ if (qc->tf.protocol != ATA_PROT_DMA)
return;
- }
buf[i++] = 0; /* Response flags */
buf[i++] = 0; /* reserved */
@@ -442,8 +440,6 @@ static inline unsigned int adma_intr_pkt(struct ata_host *host)
continue;
handled = 1;
adma_enter_reg_mode(ap);
- if (ap->flags & ATA_FLAG_DISABLED)
- continue;
pp = ap->private_data;
if (!pp || pp->state != adma_state_pkt)
continue;
@@ -484,42 +480,38 @@ static inline unsigned int adma_intr_mmio(struct ata_host *host)
unsigned int handled = 0, port_no;
for (port_no = 0; port_no < host->n_ports; ++port_no) {
- struct ata_port *ap;
- ap = host->ports[port_no];
- if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) {
- struct ata_queued_cmd *qc;
- struct adma_port_priv *pp = ap->private_data;
- if (!pp || pp->state != adma_state_mmio)
+ struct ata_port *ap = host->ports[port_no];
+ struct adma_port_priv *pp = ap->private_data;
+ struct ata_queued_cmd *qc;
+
+ if (!pp || pp->state != adma_state_mmio)
+ continue;
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
+
+ /* check main status, clearing INTRQ */
+ u8 status = ata_sff_check_status(ap);
+ if ((status & ATA_BUSY))
continue;
- qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
-
- /* check main status, clearing INTRQ */
- u8 status = ata_sff_check_status(ap);
- if ((status & ATA_BUSY))
- continue;
- DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
- ap->print_id, qc->tf.protocol, status);
-
- /* complete taskfile transaction */
- pp->state = adma_state_idle;
- qc->err_mask |= ac_err_mask(status);
- if (!qc->err_mask)
- ata_qc_complete(qc);
- else {
- struct ata_eh_info *ehi =
- &ap->link.eh_info;
- ata_ehi_clear_desc(ehi);
- ata_ehi_push_desc(ehi,
- "status 0x%02X", status);
-
- if (qc->err_mask == AC_ERR_DEV)
- ata_port_abort(ap);
- else
- ata_port_freeze(ap);
- }
- handled = 1;
+ DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
+ ap->print_id, qc->tf.protocol, status);
+
+ /* complete taskfile transaction */
+ pp->state = adma_state_idle;
+ qc->err_mask |= ac_err_mask(status);
+ if (!qc->err_mask)
+ ata_qc_complete(qc);
+ else {
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ ata_ehi_clear_desc(ehi);
+ ata_ehi_push_desc(ehi, "status 0x%02X", status);
+
+ if (qc->err_mask == AC_ERR_DEV)
+ ata_port_abort(ap);
+ else
+ ata_port_freeze(ap);
}
+ handled = 1;
}
}
return handled;
@@ -562,11 +554,7 @@ static int adma_port_start(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
struct adma_port_priv *pp;
- int rc;
- rc = ata_port_start(ap);
- if (rc)
- return rc;
adma_enter_reg_mode(ap);
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index a69192b38b4..61c89b54ea2 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1313,7 +1313,7 @@ static int sata_fsl_probe(struct of_device *ofdev,
dev_printk(KERN_INFO, &ofdev->dev,
"Sata FSL Platform/CSB Driver init\n");
- hcr_base = of_iomap(ofdev->node, 0);
+ hcr_base = of_iomap(ofdev->dev.of_node, 0);
if (!hcr_base)
goto error_exit_with_cleanup;
@@ -1332,7 +1332,7 @@ static int sata_fsl_probe(struct of_device *ofdev,
host_priv->ssr_base = ssr_base;
host_priv->csr_base = csr_base;
- irq = irq_of_parse_and_map(ofdev->node, 0);
+ irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
if (irq < 0) {
dev_printk(KERN_ERR, &ofdev->dev, "invalid irq from platform\n");
goto error_exit_with_cleanup;
@@ -1427,8 +1427,11 @@ static struct of_device_id fsl_sata_match[] = {
MODULE_DEVICE_TABLE(of, fsl_sata_match);
static struct of_platform_driver fsl_sata_driver = {
- .name = "fsl-sata",
- .match_table = fsl_sata_match,
+ .driver = {
+ .name = "fsl-sata",
+ .owner = THIS_MODULE,
+ .of_match_table = fsl_sata_match,
+ },
.probe = sata_fsl_probe,
.remove = sata_fsl_remove,
#ifdef CONFIG_PM
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 27dc6c86a4c..a36149ebf4a 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -415,22 +415,11 @@ static irqreturn_t inic_interrupt(int irq, void *dev_instance)
spin_lock(&host->lock);
- for (i = 0; i < NR_PORTS; i++) {
- struct ata_port *ap = host->ports[i];
-
- if (!(host_irq_stat & (HIRQ_PORT0 << i)))
- continue;
-
- if (likely(ap && !(ap->flags & ATA_FLAG_DISABLED))) {
- inic_host_intr(ap);
+ for (i = 0; i < NR_PORTS; i++)
+ if (host_irq_stat & (HIRQ_PORT0 << i)) {
+ inic_host_intr(host->ports[i]);
handled++;
- } else {
- if (ata_ratelimit())
- dev_printk(KERN_ERR, host->dev, "interrupt "
- "from disabled port %d (0x%x)\n",
- i, host_irq_stat);
}
- }
spin_unlock(&host->lock);
@@ -679,8 +668,7 @@ static void init_port(struct ata_port *ap)
memset(pp->pkt, 0, sizeof(struct inic_pkt));
memset(pp->cpb_tbl, 0, IDMA_CPB_TBL_SIZE);
- /* setup PRD and CPB lookup table addresses */
- writel(ap->prd_dma, port_base + PORT_PRD_ADDR);
+ /* setup CPB lookup table addresses */
writel(pp->cpb_tbl_dma, port_base + PORT_CPB_CPBLAR);
}
@@ -694,7 +682,6 @@ static int inic_port_start(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
struct inic_port_priv *pp;
- int rc;
/* alloc and initialize private data */
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
@@ -703,10 +690,6 @@ static int inic_port_start(struct ata_port *ap)
ap->private_data = pp;
/* Alloc resources */
- rc = ata_port_start(ap);
- if (rc)
- return rc;
-
pp->pkt = dmam_alloc_coherent(dev, sizeof(struct inic_pkt),
&pp->pkt_dma, GFP_KERNEL);
if (!pp->pkt)
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 71cc0d42f9e..a476cd99b95 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -675,8 +675,6 @@ static struct ata_port_operations mv5_ops = {
.freeze = mv_eh_freeze,
.thaw = mv_eh_thaw,
.hardreset = mv_hardreset,
- .error_handler = ata_std_error_handler, /* avoid SFF EH */
- .post_internal_cmd = ATA_OP_NULL,
.scr_read = mv5_scr_read,
.scr_write = mv5_scr_write,
@@ -686,16 +684,27 @@ static struct ata_port_operations mv5_ops = {
};
static struct ata_port_operations mv6_ops = {
- .inherits = &mv5_ops,
+ .inherits = &ata_bmdma_port_ops,
+
+ .lost_interrupt = ATA_OP_NULL,
+
+ .qc_defer = mv_qc_defer,
+ .qc_prep = mv_qc_prep,
+ .qc_issue = mv_qc_issue,
+
.dev_config = mv6_dev_config,
- .scr_read = mv_scr_read,
- .scr_write = mv_scr_write,
+ .freeze = mv_eh_freeze,
+ .thaw = mv_eh_thaw,
+ .hardreset = mv_hardreset,
+ .softreset = mv_softreset,
.pmp_hardreset = mv_pmp_hardreset,
.pmp_softreset = mv_softreset,
- .softreset = mv_softreset,
.error_handler = mv_pmp_error_handler,
+ .scr_read = mv_scr_read,
+ .scr_write = mv_scr_write,
+
.sff_check_status = mv_sff_check_status,
.sff_irq_clear = mv_sff_irq_clear,
.check_atapi_dma = mv_check_atapi_dma,
@@ -703,6 +712,9 @@ static struct ata_port_operations mv6_ops = {
.bmdma_start = mv_bmdma_start,
.bmdma_stop = mv_bmdma_stop,
.bmdma_status = mv_bmdma_status,
+
+ .port_start = mv_port_start,
+ .port_stop = mv_port_stop,
};
static struct ata_port_operations mv_iie_ops = {
@@ -2248,7 +2260,7 @@ static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
}
if (qc->tf.flags & ATA_TFLAG_POLLING)
- ata_pio_queue_task(ap, qc, 0);
+ ata_sff_queue_pio_task(ap, 0);
return 0;
}
@@ -2344,7 +2356,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
if (IS_GEN_II(hpriv))
return mv_qc_issue_fis(qc);
}
- return ata_sff_qc_issue(qc);
+ return ata_bmdma_qc_issue(qc);
}
static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
@@ -2355,13 +2367,9 @@ static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
return NULL;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (qc) {
- if (qc->tf.flags & ATA_TFLAG_POLLING)
- qc = NULL;
- else if (!(qc->flags & ATA_QCFLAG_ACTIVE))
- qc = NULL;
- }
- return qc;
+ if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
+ return qc;
+ return NULL;
}
static void mv_pmp_error_handler(struct ata_port *ap)
@@ -2546,9 +2554,7 @@ static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
char *when = "idle";
ata_ehi_clear_desc(ehi);
- if (ap->flags & ATA_FLAG_DISABLED) {
- when = "disabled";
- } else if (edma_was_enabled) {
+ if (edma_was_enabled) {
when = "EDMA enabled";
} else {
struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
@@ -2782,10 +2788,6 @@ static void mv_port_intr(struct ata_port *ap, u32 port_cause)
struct mv_port_priv *pp;
int edma_was_enabled;
- if (ap->flags & ATA_FLAG_DISABLED) {
- mv_unexpected_intr(ap, 0);
- return;
- }
/*
* Grab a snapshot of the EDMA_EN flag setting,
* so that we have a consistent view for this port,
@@ -2809,7 +2811,7 @@ static void mv_port_intr(struct ata_port *ap, u32 port_cause)
} else if (!edma_was_enabled) {
struct ata_queued_cmd *qc = mv_get_active_qc(ap);
if (qc)
- ata_sff_host_intr(ap, qc);
+ ata_bmdma_port_intr(ap, qc);
else
mv_unexpected_intr(ap, edma_was_enabled);
}
@@ -3656,9 +3658,6 @@ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
/* special case: control/altstatus doesn't have ATA_REG_ address */
port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
- /* unused: */
- port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
-
/* Clear any currently outstanding port interrupt conditions */
serr = port_mmio + mv_scr_offset(SCR_ERROR);
writelfl(readl(serr), serr);
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 2a98b09ab73..6fd11478411 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -272,7 +272,7 @@ enum ncq_saw_flag_list {
};
struct nv_swncq_port_priv {
- struct ata_prd *prd; /* our SG list */
+ struct ata_bmdma_prd *prd; /* our SG list */
dma_addr_t prd_dma; /* and its DMA mapping */
void __iomem *sactive_block;
void __iomem *irq_block;
@@ -920,7 +920,7 @@ static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
}
/* handle interrupt */
- return ata_sff_host_intr(ap, qc);
+ return ata_bmdma_port_intr(ap, qc);
}
static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
@@ -933,107 +933,108 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
+ struct nv_adma_port_priv *pp = ap->private_data;
+ void __iomem *mmio = pp->ctl_block;
+ u16 status;
+ u32 gen_ctl;
+ u32 notifier, notifier_error;
+
notifier_clears[i] = 0;
- if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
- struct nv_adma_port_priv *pp = ap->private_data;
- void __iomem *mmio = pp->ctl_block;
- u16 status;
- u32 gen_ctl;
- u32 notifier, notifier_error;
-
- /* if ADMA is disabled, use standard ata interrupt handler */
- if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
- u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
- >> (NV_INT_PORT_SHIFT * i);
- handled += nv_host_intr(ap, irq_stat);
- continue;
- }
+ /* if ADMA is disabled, use standard ata interrupt handler */
+ if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
+ u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
+ >> (NV_INT_PORT_SHIFT * i);
+ handled += nv_host_intr(ap, irq_stat);
+ continue;
+ }
- /* if in ATA register mode, check for standard interrupts */
- if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
- u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
- >> (NV_INT_PORT_SHIFT * i);
- if (ata_tag_valid(ap->link.active_tag))
- /** NV_INT_DEV indication seems unreliable at times
- at least in ADMA mode. Force it on always when a
- command is active, to prevent losing interrupts. */
- irq_stat |= NV_INT_DEV;
- handled += nv_host_intr(ap, irq_stat);
- }
+ /* if in ATA register mode, check for standard interrupts */
+ if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
+ u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
+ >> (NV_INT_PORT_SHIFT * i);
+ if (ata_tag_valid(ap->link.active_tag))
+ /** NV_INT_DEV indication seems unreliable
+ at times at least in ADMA mode. Force it
+ on always when a command is active, to
+ prevent losing interrupts. */
+ irq_stat |= NV_INT_DEV;
+ handled += nv_host_intr(ap, irq_stat);
+ }
+
+ notifier = readl(mmio + NV_ADMA_NOTIFIER);
+ notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
+ notifier_clears[i] = notifier | notifier_error;
+
+ gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
+
+ if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
+ !notifier_error)
+ /* Nothing to do */
+ continue;
+
+ status = readw(mmio + NV_ADMA_STAT);
+
+ /*
+ * Clear status. Ensure the controller sees the
+ * clearing before we start looking at any of the CPB
+ * statuses, so that any CPB completions after this
+ * point in the handler will raise another interrupt.
+ */
+ writew(status, mmio + NV_ADMA_STAT);
+ readw(mmio + NV_ADMA_STAT); /* flush posted write */
+ rmb();
+
+ handled++; /* irq handled if we got here */
- notifier = readl(mmio + NV_ADMA_NOTIFIER);
- notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
- notifier_clears[i] = notifier | notifier_error;
-
- gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
-
- if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
- !notifier_error)
- /* Nothing to do */
- continue;
-
- status = readw(mmio + NV_ADMA_STAT);
-
- /* Clear status. Ensure the controller sees the clearing before we start
- looking at any of the CPB statuses, so that any CPB completions after
- this point in the handler will raise another interrupt. */
- writew(status, mmio + NV_ADMA_STAT);
- readw(mmio + NV_ADMA_STAT); /* flush posted write */
- rmb();
-
- handled++; /* irq handled if we got here */
-
- /* freeze if hotplugged or controller error */
- if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
- NV_ADMA_STAT_HOTUNPLUG |
- NV_ADMA_STAT_TIMEOUT |
- NV_ADMA_STAT_SERROR))) {
- struct ata_eh_info *ehi = &ap->link.eh_info;
-
- ata_ehi_clear_desc(ehi);
- __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
- if (status & NV_ADMA_STAT_TIMEOUT) {
- ehi->err_mask |= AC_ERR_SYSTEM;
- ata_ehi_push_desc(ehi, "timeout");
- } else if (status & NV_ADMA_STAT_HOTPLUG) {
- ata_ehi_hotplugged(ehi);
- ata_ehi_push_desc(ehi, "hotplug");
- } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
- ata_ehi_hotplugged(ehi);
- ata_ehi_push_desc(ehi, "hot unplug");
- } else if (status & NV_ADMA_STAT_SERROR) {
- /* let libata analyze SError and figure out the cause */
- ata_ehi_push_desc(ehi, "SError");
- } else
- ata_ehi_push_desc(ehi, "unknown");
- ata_port_freeze(ap);
- continue;
+ /* freeze if hotplugged or controller error */
+ if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
+ NV_ADMA_STAT_HOTUNPLUG |
+ NV_ADMA_STAT_TIMEOUT |
+ NV_ADMA_STAT_SERROR))) {
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+
+ ata_ehi_clear_desc(ehi);
+ __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
+ if (status & NV_ADMA_STAT_TIMEOUT) {
+ ehi->err_mask |= AC_ERR_SYSTEM;
+ ata_ehi_push_desc(ehi, "timeout");
+ } else if (status & NV_ADMA_STAT_HOTPLUG) {
+ ata_ehi_hotplugged(ehi);
+ ata_ehi_push_desc(ehi, "hotplug");
+ } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
+ ata_ehi_hotplugged(ehi);
+ ata_ehi_push_desc(ehi, "hot unplug");
+ } else if (status & NV_ADMA_STAT_SERROR) {
+ /* let EH analyze SError and figure out cause */
+ ata_ehi_push_desc(ehi, "SError");
+ } else
+ ata_ehi_push_desc(ehi, "unknown");
+ ata_port_freeze(ap);
+ continue;
+ }
+
+ if (status & (NV_ADMA_STAT_DONE |
+ NV_ADMA_STAT_CPBERR |
+ NV_ADMA_STAT_CMD_COMPLETE)) {
+ u32 check_commands = notifier_clears[i];
+ int pos, error = 0;
+
+ if (status & NV_ADMA_STAT_CPBERR) {
+ /* check all active commands */
+ if (ata_tag_valid(ap->link.active_tag))
+ check_commands = 1 <<
+ ap->link.active_tag;
+ else
+ check_commands = ap->link.sactive;
}
- if (status & (NV_ADMA_STAT_DONE |
- NV_ADMA_STAT_CPBERR |
- NV_ADMA_STAT_CMD_COMPLETE)) {
- u32 check_commands = notifier_clears[i];
- int pos, error = 0;
-
- if (status & NV_ADMA_STAT_CPBERR) {
- /* Check all active commands */
- if (ata_tag_valid(ap->link.active_tag))
- check_commands = 1 <<
- ap->link.active_tag;
- else
- check_commands = ap->
- link.sactive;
- }
-
- /** Check CPBs for completed commands */
- while ((pos = ffs(check_commands)) && !error) {
- pos--;
- error = nv_adma_check_cpb(ap, pos,
+ /* check CPBs for completed commands */
+ while ((pos = ffs(check_commands)) && !error) {
+ pos--;
+ error = nv_adma_check_cpb(ap, pos,
notifier_error & (1 << pos));
- check_commands &= ~(1 << pos);
- }
+ check_commands &= ~(1 << pos);
}
}
}
@@ -1099,7 +1100,7 @@ static void nv_adma_irq_clear(struct ata_port *ap)
u32 notifier_clears[2];
if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
- ata_sff_irq_clear(ap);
+ ata_bmdma_irq_clear(ap);
return;
}
@@ -1130,7 +1131,7 @@ static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
struct nv_adma_port_priv *pp = qc->ap->private_data;
if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
- ata_sff_post_internal_cmd(qc);
+ ata_bmdma_post_internal_cmd(qc);
}
static int nv_adma_port_start(struct ata_port *ap)
@@ -1155,7 +1156,8 @@ static int nv_adma_port_start(struct ata_port *ap)
if (rc)
return rc;
- rc = ata_port_start(ap);
+ /* we might fallback to bmdma, allocate bmdma resources */
+ rc = ata_bmdma_port_start(ap);
if (rc)
return rc;
@@ -1407,7 +1409,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
(qc->flags & ATA_QCFLAG_DMAMAP));
nv_adma_register_mode(qc->ap);
- ata_sff_qc_prep(qc);
+ ata_bmdma_qc_prep(qc);
return;
}
@@ -1466,7 +1468,7 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
(qc->flags & ATA_QCFLAG_DMAMAP));
nv_adma_register_mode(qc->ap);
- return ata_sff_qc_issue(qc);
+ return ata_bmdma_qc_issue(qc);
} else
nv_adma_mode(qc->ap);
@@ -1498,22 +1500,19 @@ static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
spin_lock_irqsave(&host->lock, flags);
for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap;
-
- ap = host->ports[i];
- if (ap &&
- !(ap->flags & ATA_FLAG_DISABLED)) {
- struct ata_queued_cmd *qc;
+ struct ata_port *ap = host->ports[i];
+ struct ata_queued_cmd *qc;
- qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
- handled += ata_sff_host_intr(ap, qc);
- else
- // No request pending? Clear interrupt status
- // anyway, in case there's one pending.
- ap->ops->sff_check_status(ap);
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
+ handled += ata_bmdma_port_intr(ap, qc);
+ } else {
+ /*
+ * No request pending? Clear interrupt status
+ * anyway, in case there's one pending.
+ */
+ ap->ops->sff_check_status(ap);
}
-
}
spin_unlock_irqrestore(&host->lock, flags);
@@ -1526,11 +1525,7 @@ static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
int i, handled = 0;
for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap = host->ports[i];
-
- if (ap && !(ap->flags & ATA_FLAG_DISABLED))
- handled += nv_host_intr(ap, irq_stat);
-
+ handled += nv_host_intr(host->ports[i], irq_stat);
irq_stat >>= NV_INT_PORT_SHIFT;
}
@@ -1744,7 +1739,7 @@ static void nv_adma_error_handler(struct ata_port *ap)
readw(mmio + NV_ADMA_CTL); /* flush posted write */
}
- ata_sff_error_handler(ap);
+ ata_bmdma_error_handler(ap);
}
static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
@@ -1870,7 +1865,7 @@ static void nv_swncq_error_handler(struct ata_port *ap)
ehc->i.action |= ATA_EH_RESET;
}
- ata_sff_error_handler(ap);
+ ata_bmdma_error_handler(ap);
}
#ifdef CONFIG_PM
@@ -1991,7 +1986,8 @@ static int nv_swncq_port_start(struct ata_port *ap)
struct nv_swncq_port_priv *pp;
int rc;
- rc = ata_port_start(ap);
+ /* we might fallback to bmdma, allocate bmdma resources */
+ rc = ata_bmdma_port_start(ap);
if (rc)
return rc;
@@ -2016,7 +2012,7 @@ static int nv_swncq_port_start(struct ata_port *ap)
static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
{
if (qc->tf.protocol != ATA_PROT_NCQ) {
- ata_sff_qc_prep(qc);
+ ata_bmdma_qc_prep(qc);
return;
}
@@ -2031,7 +2027,7 @@ static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
struct scatterlist *sg;
struct nv_swncq_port_priv *pp = ap->private_data;
- struct ata_prd *prd;
+ struct ata_bmdma_prd *prd;
unsigned int si, idx;
prd = pp->prd + ATA_MAX_PRD * qc->tag;
@@ -2092,7 +2088,7 @@ static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
struct nv_swncq_port_priv *pp = ap->private_data;
if (qc->tf.protocol != ATA_PROT_NCQ)
- return ata_sff_qc_issue(qc);
+ return ata_bmdma_qc_issue(qc);
DPRINTK("Enter\n");
@@ -2380,16 +2376,14 @@ static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
- if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
- if (ap->link.sactive) {
- nv_swncq_host_interrupt(ap, (u16)irq_stat);
- handled = 1;
- } else {
- if (irq_stat) /* reserve Hotplug */
- nv_swncq_irq_clear(ap, 0xfff0);
+ if (ap->link.sactive) {
+ nv_swncq_host_interrupt(ap, (u16)irq_stat);
+ handled = 1;
+ } else {
+ if (irq_stat) /* reserve Hotplug */
+ nv_swncq_irq_clear(ap, 0xfff0);
- handled += nv_host_intr(ap, (u8)irq_stat);
- }
+ handled += nv_host_intr(ap, (u8)irq_stat);
}
irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
}
@@ -2436,7 +2430,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ppi[0] = &nv_port_info[type];
ipriv = ppi[0]->private_data;
- rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
@@ -2479,8 +2473,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
- return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
- IRQF_SHARED, ipriv->sht);
+ return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 5356ec00d2b..f03ad48273f 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -333,7 +333,8 @@ static int pdc_common_port_start(struct ata_port *ap)
struct pdc_port_priv *pp;
int rc;
- rc = ata_port_start(ap);
+ /* we use the same prd table as bmdma, allocate it */
+ rc = ata_bmdma_port_start(ap);
if (rc)
return rc;
@@ -499,7 +500,7 @@ static int pdc_sata_scr_write(struct ata_link *link,
static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
- dma_addr_t sg_table = ap->prd_dma;
+ dma_addr_t sg_table = ap->bmdma_prd_dma;
unsigned int cdb_len = qc->dev->cdb_len;
u8 *cdb = qc->cdb;
struct pdc_port_priv *pp = ap->private_data;
@@ -587,6 +588,7 @@ static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
static void pdc_fill_sg(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
+ struct ata_bmdma_prd *prd = ap->bmdma_prd;
struct scatterlist *sg;
const u32 SG_COUNT_ASIC_BUG = 41*4;
unsigned int si, idx;
@@ -613,8 +615,8 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
if ((offset + sg_len) > 0x10000)
len = 0x10000 - offset;
- ap->prd[idx].addr = cpu_to_le32(addr);
- ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
+ prd[idx].addr = cpu_to_le32(addr);
+ prd[idx].flags_len = cpu_to_le32(len & 0xffff);
VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
idx++;
@@ -623,27 +625,27 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
}
}
- len = le32_to_cpu(ap->prd[idx - 1].flags_len);
+ len = le32_to_cpu(prd[idx - 1].flags_len);
if (len > SG_COUNT_ASIC_BUG) {
u32 addr;
VPRINTK("Splitting last PRD.\n");
- addr = le32_to_cpu(ap->prd[idx - 1].addr);
- ap->prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG);
+ addr = le32_to_cpu(prd[idx - 1].addr);
+ prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG);
VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG);
addr = addr + len - SG_COUNT_ASIC_BUG;
len = SG_COUNT_ASIC_BUG;
- ap->prd[idx].addr = cpu_to_le32(addr);
- ap->prd[idx].flags_len = cpu_to_le32(len);
+ prd[idx].addr = cpu_to_le32(addr);
+ prd[idx].flags_len = cpu_to_le32(len);
VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
idx++;
}
- ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+ prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
}
static void pdc_qc_prep(struct ata_queued_cmd *qc)
@@ -658,7 +660,7 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
pdc_fill_sg(qc);
/*FALLTHROUGH*/
case ATA_PROT_NODATA:
- i = pdc_pkt_header(&qc->tf, qc->ap->prd_dma,
+ i = pdc_pkt_header(&qc->tf, qc->ap->bmdma_prd_dma,
qc->dev->devno, pp->pkt);
if (qc->tf.flags & ATA_TFLAG_LBA48)
i = pdc_prep_lba48(&qc->tf, pp->pkt, i);
@@ -838,7 +840,7 @@ static void pdc_error_handler(struct ata_port *ap)
if (!(ap->pflags & ATA_PFLAG_FROZEN))
pdc_reset_port(ap);
- ata_std_error_handler(ap);
+ ata_sff_error_handler(ap);
}
static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
@@ -984,8 +986,7 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
/* check for a plug or unplug event */
ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4);
tmp = hotplug_status & (0x11 << ata_no);
- if (tmp && ap &&
- !(ap->flags & ATA_FLAG_DISABLED)) {
+ if (tmp) {
struct ata_eh_info *ehi = &ap->link.eh_info;
ata_ehi_clear_desc(ehi);
ata_ehi_hotplugged(ehi);
@@ -997,8 +998,7 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
/* check for a packet interrupt */
tmp = mask & (1 << (i + 1));
- if (tmp && ap &&
- !(ap->flags & ATA_FLAG_DISABLED)) {
+ if (tmp) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index 92ba45e6689..daeebf19a6a 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -120,8 +120,6 @@ static void qs_host_stop(struct ata_host *host);
static void qs_qc_prep(struct ata_queued_cmd *qc);
static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
-static void qs_bmdma_stop(struct ata_queued_cmd *qc);
-static u8 qs_bmdma_status(struct ata_port *ap);
static void qs_freeze(struct ata_port *ap);
static void qs_thaw(struct ata_port *ap);
static int qs_prereset(struct ata_link *link, unsigned long deadline);
@@ -137,8 +135,6 @@ static struct ata_port_operations qs_ata_ops = {
.inherits = &ata_sff_port_ops,
.check_atapi_dma = qs_check_atapi_dma,
- .bmdma_stop = qs_bmdma_stop,
- .bmdma_status = qs_bmdma_status,
.qc_prep = qs_qc_prep,
.qc_issue = qs_qc_issue,
@@ -147,7 +143,6 @@ static struct ata_port_operations qs_ata_ops = {
.prereset = qs_prereset,
.softreset = ATA_OP_NULL,
.error_handler = qs_error_handler,
- .post_internal_cmd = ATA_OP_NULL,
.lost_interrupt = ATA_OP_NULL,
.scr_read = qs_scr_read,
@@ -191,16 +186,6 @@ static int qs_check_atapi_dma(struct ata_queued_cmd *qc)
return 1; /* ATAPI DMA not supported */
}
-static void qs_bmdma_stop(struct ata_queued_cmd *qc)
-{
- /* nothing */
-}
-
-static u8 qs_bmdma_status(struct ata_port *ap)
-{
- return 0;
-}
-
static inline void qs_enter_reg_mode(struct ata_port *ap)
{
u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
@@ -255,7 +240,7 @@ static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
static void qs_error_handler(struct ata_port *ap)
{
qs_enter_reg_mode(ap);
- ata_std_error_handler(ap);
+ ata_sff_error_handler(ap);
}
static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
@@ -304,10 +289,8 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
VPRINTK("ENTER\n");
qs_enter_reg_mode(qc->ap);
- if (qc->tf.protocol != ATA_PROT_DMA) {
- ata_sff_qc_prep(qc);
+ if (qc->tf.protocol != ATA_PROT_DMA)
return;
- }
nelem = qs_fill_sg(qc);
@@ -404,26 +387,24 @@ static inline unsigned int qs_intr_pkt(struct ata_host *host)
u8 sHST = sff1 & 0x3f; /* host status */
unsigned int port_no = (sff1 >> 8) & 0x03;
struct ata_port *ap = host->ports[port_no];
+ struct qs_port_priv *pp = ap->private_data;
+ struct ata_queued_cmd *qc;
DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
sff1, sff0, port_no, sHST, sDST);
handled = 1;
- if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
- struct ata_queued_cmd *qc;
- struct qs_port_priv *pp = ap->private_data;
- if (!pp || pp->state != qs_state_pkt)
- continue;
- qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
- switch (sHST) {
- case 0: /* successful CPB */
- case 3: /* device error */
- qs_enter_reg_mode(qc->ap);
- qs_do_or_die(qc, sDST);
- break;
- default:
- break;
- }
+ if (!pp || pp->state != qs_state_pkt)
+ continue;
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
+ switch (sHST) {
+ case 0: /* successful CPB */
+ case 3: /* device error */
+ qs_enter_reg_mode(qc->ap);
+ qs_do_or_die(qc, sDST);
+ break;
+ default:
+ break;
}
}
}
@@ -436,33 +417,30 @@ static inline unsigned int qs_intr_mmio(struct ata_host *host)
unsigned int handled = 0, port_no;
for (port_no = 0; port_no < host->n_ports; ++port_no) {
- struct ata_port *ap;
- ap = host->ports[port_no];
- if (ap &&
- !(ap->flags & ATA_FLAG_DISABLED)) {
- struct ata_queued_cmd *qc;
- struct qs_port_priv *pp;
- qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (!qc || !(qc->flags & ATA_QCFLAG_ACTIVE)) {
- /*
- * The qstor hardware generates spurious
- * interrupts from time to time when switching
- * in and out of packet mode.
- * There's no obvious way to know if we're
- * here now due to that, so just ack the irq
- * and pretend we knew it was ours.. (ugh).
- * This does not affect packet mode.
- */
- ata_sff_check_status(ap);
- handled = 1;
- continue;
- }
- pp = ap->private_data;
- if (!pp || pp->state != qs_state_mmio)
- continue;
- if (!(qc->tf.flags & ATA_TFLAG_POLLING))
- handled |= ata_sff_host_intr(ap, qc);
+ struct ata_port *ap = host->ports[port_no];
+ struct qs_port_priv *pp = ap->private_data;
+ struct ata_queued_cmd *qc;
+
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (!qc) {
+ /*
+ * The qstor hardware generates spurious
+ * interrupts from time to time when switching
+ * in and out of packet mode. There's no
+ * obvious way to know if we're here now due
+ * to that, so just ack the irq and pretend we
+ * knew it was ours.. (ugh). This does not
+ * affect packet mode.
+ */
+ ata_sff_check_status(ap);
+ handled = 1;
+ continue;
}
+
+ if (!pp || pp->state != qs_state_mmio)
+ continue;
+ if (!(qc->tf.flags & ATA_TFLAG_POLLING))
+ handled |= ata_sff_port_intr(ap, qc);
}
return handled;
}
@@ -509,11 +487,7 @@ static int qs_port_start(struct ata_port *ap)
void __iomem *mmio_base = qs_mmio_base(ap->host);
void __iomem *chan = mmio_base + (ap->port_no * 0x4000);
u64 addr;
- int rc;
- rc = ata_port_start(ap);
- if (rc)
- return rc;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 3cb69d5fb81..3a4f8421971 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -284,7 +284,7 @@ static void sil_bmdma_setup(struct ata_queued_cmd *qc)
void __iomem *bmdma = ap->ioaddr.bmdma_addr;
/* load PRD table addr. */
- iowrite32(ap->prd_dma, bmdma + ATA_DMA_TABLE_OFS);
+ iowrite32(ap->bmdma_prd_dma, bmdma + ATA_DMA_TABLE_OFS);
/* issue r/w command */
ap->ops->sff_exec_command(ap, &qc->tf);
@@ -311,10 +311,10 @@ static void sil_fill_sg(struct ata_queued_cmd *qc)
{
struct scatterlist *sg;
struct ata_port *ap = qc->ap;
- struct ata_prd *prd, *last_prd = NULL;
+ struct ata_bmdma_prd *prd, *last_prd = NULL;
unsigned int si;
- prd = &ap->prd[0];
+ prd = &ap->bmdma_prd[0];
for_each_sg(qc->sg, sg, qc->n_elem, si) {
/* Note h/w doesn't support 64-bit, so we unconditionally
* truncate dma_addr_t to u32.
@@ -503,7 +503,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
goto err_hsm;
/* ack bmdma irq events */
- ata_sff_irq_clear(ap);
+ ata_bmdma_irq_clear(ap);
/* kick HSM in the ass */
ata_sff_hsm_move(ap, qc, status, 0);
@@ -532,9 +532,6 @@ static irqreturn_t sil_interrupt(int irq, void *dev_instance)
struct ata_port *ap = host->ports[i];
u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
- if (unlikely(ap->flags & ATA_FLAG_DISABLED))
- continue;
-
/* turn off SATA_IRQ if not supported */
if (ap->flags & SIL_FLAG_NO_SATA_IRQ)
bmdma2 &= ~SIL_DMA_SATA_IRQ;
@@ -587,7 +584,7 @@ static void sil_thaw(struct ata_port *ap)
/* clear IRQ */
ap->ops->sff_check_status(ap);
- ata_sff_irq_clear(ap);
+ ata_bmdma_irq_clear(ap);
/* turn on SATA IRQ if supported */
if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ))
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 433b6b89c79..e9250514734 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -1160,13 +1160,8 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance)
for (i = 0; i < host->n_ports; i++)
if (status & (1 << i)) {
- struct ata_port *ap = host->ports[i];
- if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
- sil24_host_intr(ap);
- handled++;
- } else
- printk(KERN_ERR DRV_NAME
- ": interrupt from disabled port %d\n", i);
+ sil24_host_intr(host->ports[i]);
+ handled++;
}
spin_unlock(&host->lock);
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index f8a91bfd66a..2bfe3ae0397 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -279,7 +279,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
- rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
@@ -308,7 +308,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
pci_intx(pdev, 1);
- return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &sis_sht);
}
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 7257f2d5c52..7d9db4aaf07 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -224,7 +224,7 @@ static void k2_bmdma_setup_mmio(struct ata_queued_cmd *qc)
/* load PRD table addr. */
mb(); /* make sure PRD table writes are visible to controller */
- writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
+ writel(ap->bmdma_prd_dma, mmio + ATA_DMA_TABLE_OFS);
/* specify data direction, triple-check start bit is clear */
dmactl = readb(mmio + ATA_DMA_CMD);
@@ -502,7 +502,7 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en
writel(0x0, mmio_base + K2_SATA_SIM_OFFSET);
pci_set_master(pdev);
- return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &k2_sata_sht);
}
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 232468f2ea9..bedd5188e5b 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -302,11 +302,6 @@ static int pdc_port_start(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
struct pdc_port_priv *pp;
- int rc;
-
- rc = ata_port_start(ap);
- if (rc)
- return rc;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
@@ -840,8 +835,7 @@ static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
ap = host->ports[port_no];
tmp = mask & (1 << i);
VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
- if (tmp && ap &&
- !(ap->flags & ATA_FLAG_DISABLED)) {
+ if (tmp && ap) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
@@ -927,7 +921,7 @@ static void pdc_error_handler(struct ata_port *ap)
if (!(ap->pflags & ATA_PFLAG_FROZEN))
pdc_reset_port(ap);
- ata_std_error_handler(ap);
+ ata_sff_error_handler(ap);
}
static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index 011e098590d..b8578c32d34 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -181,9 +181,7 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- rc = ata_pci_bmdma_init(host);
- if (rc)
- return rc;
+ ata_pci_bmdma_init(host);
iomap = host->iomap;
@@ -244,7 +242,7 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
pci_intx(pdev, 1);
- return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &uli_sht);
}
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 08f65492cc8..101d8c219ca 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -308,7 +308,7 @@ static void svia_noop_freeze(struct ata_port *ap)
* certain way. Leave it alone and just clear pending IRQ.
*/
ap->ops->sff_check_status(ap);
- ata_sff_irq_clear(ap);
+ ata_bmdma_irq_clear(ap);
}
/**
@@ -463,7 +463,7 @@ static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
struct ata_host *host;
int rc;
- rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
*r_host = host;
@@ -520,7 +520,7 @@ static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
struct ata_host *host;
int i, rc;
- rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
*r_host = host;
@@ -628,7 +628,7 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
svia_configure(pdev);
pci_set_master(pdev);
- return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &svia_sht);
}
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index 8b2a278b254..b777176ff49 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -245,7 +245,7 @@ static void vsc_port_intr(u8 port_status, struct ata_port *ap)
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING)))
- handled = ata_sff_host_intr(ap, qc);
+ handled = ata_bmdma_port_intr(ap, qc);
/* We received an interrupt during a polled command,
* or some other spurious condition. Interrupt reporting
@@ -284,14 +284,8 @@ static irqreturn_t vsc_sata_interrupt(int irq, void *dev_instance)
for (i = 0; i < host->n_ports; i++) {
u8 port_status = (status >> (8 * i)) & 0xff;
if (port_status) {
- struct ata_port *ap = host->ports[i];
-
- if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
- vsc_port_intr(port_status, ap);
- handled++;
- } else
- dev_printk(KERN_ERR, host->dev,
- "interrupt from disabled port %d\n", i);
+ vsc_port_intr(port_status, host->ports[i]);
+ handled++;
}
}
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
index 191b85e857e..f1a0a00b3b0 100644
--- a/drivers/atm/Kconfig
+++ b/drivers/atm/Kconfig
@@ -394,6 +394,7 @@ config ATM_HE_USE_SUNI
config ATM_SOLOS
tristate "Solos ADSL2+ PCI Multiport card driver"
depends on PCI
+ select FW_LOADER
help
Support for the Solos multiport ADSL2+ card.
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index b86712167eb..b9101818b47 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -68,7 +68,7 @@ static int atmtcp_send_control(struct atm_vcc *vcc,int type,
*(struct atm_vcc **) &new_msg->vcc = vcc;
old_test = test_bit(flag,&vcc->flags);
out_vcc->push(out_vcc,skb);
- add_wait_queue(sk_atm(vcc)->sk_sleep, &wait);
+ add_wait_queue(sk_sleep(sk_atm(vcc)), &wait);
while (test_bit(flag,&vcc->flags) == old_test) {
mb();
out_vcc = PRIV(vcc->dev) ? PRIV(vcc->dev)->vcc : NULL;
@@ -80,7 +80,7 @@ static int atmtcp_send_control(struct atm_vcc *vcc,int type,
schedule();
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(sk_atm(vcc)->sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(sk_atm(vcc)), &wait);
return error;
}
@@ -105,7 +105,7 @@ static int atmtcp_recv_control(const struct atmtcp_control *msg)
msg->type);
return -EINVAL;
}
- wake_up(sk_atm(vcc)->sk_sleep);
+ wake_up(sk_sleep(sk_atm(vcc)));
return 0;
}
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 719ec5a0dca..90a5a7cac74 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1131,7 +1131,7 @@ DPRINTK("doing direct send\n"); /* @@@ well, this doesn't work anyway */
if (i == -1)
put_dma(tx->index,eni_dev->dma,&j,(unsigned long)
skb->data,
- skb->len - skb->data_len);
+ skb_headlen(skb));
else
put_dma(tx->index,eni_dev->dma,&j,(unsigned long)
skb_shinfo(skb)->frags[i].page + skb_shinfo(skb)->frags[i].page_offset,
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index f7d6ebaa041..da8f176c051 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -789,7 +789,7 @@ static int __init fore200e_sba_map(struct fore200e *fore200e)
fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
/* get the supported DVMA burst sizes */
- bursts = of_getintprop_default(op->node->parent, "burst-sizes", 0x00);
+ bursts = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0x00);
if (sbus_can_dma_64bit())
sbus_set_sbus64(&op->dev, bursts);
@@ -820,18 +820,20 @@ static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_
const u8 *prop;
int len;
- prop = of_get_property(op->node, "madaddrlo2", &len);
+ prop = of_get_property(op->dev.of_node, "madaddrlo2", &len);
if (!prop)
return -ENODEV;
memcpy(&prom->mac_addr[4], prop, 4);
- prop = of_get_property(op->node, "madaddrhi4", &len);
+ prop = of_get_property(op->dev.of_node, "madaddrhi4", &len);
if (!prop)
return -ENODEV;
memcpy(&prom->mac_addr[2], prop, 4);
- prom->serial_number = of_getintprop_default(op->node, "serialnumber", 0);
- prom->hw_revision = of_getintprop_default(op->node, "promversion", 0);
+ prom->serial_number = of_getintprop_default(op->dev.of_node,
+ "serialnumber", 0);
+ prom->hw_revision = of_getintprop_default(op->dev.of_node,
+ "promversion", 0);
return 0;
}
@@ -841,10 +843,10 @@ static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page)
struct of_device *op = fore200e->bus_dev;
const struct linux_prom_registers *regs;
- regs = of_get_property(op->node, "reg", NULL);
+ regs = of_get_property(op->dev.of_node, "reg", NULL);
return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n",
- (regs ? regs->which_io : 0), op->node->name);
+ (regs ? regs->which_io : 0), op->dev.of_node->name);
}
#endif /* CONFIG_SBUS */
@@ -2693,8 +2695,11 @@ static const struct of_device_id fore200e_sba_match[] = {
MODULE_DEVICE_TABLE(of, fore200e_sba_match);
static struct of_platform_driver fore200e_sba_driver = {
- .name = "fore_200e",
- .match_table = fore200e_sba_match,
+ .driver = {
+ .name = "fore_200e",
+ .owner = THIS_MODULE,
+ .of_match_table = fore200e_sba_match,
+ },
.probe = fore200e_sba_probe,
.remove = __devexit_p(fore200e_sba_remove),
};
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index c213e0da034..56c2e99e458 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -2664,8 +2664,8 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
#ifdef USE_SCATTERGATHER
tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
- skb->len - skb->data_len, PCI_DMA_TODEVICE);
- tpd->iovec[slot].len = skb->len - skb->data_len;
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+ tpd->iovec[slot].len = skb_headlen(skb);
++slot;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c
index 3fecfb446d9..5ad3bad2b0a 100644
--- a/drivers/auxdisplay/cfag12864bfb.c
+++ b/drivers/auxdisplay/cfag12864bfb.c
@@ -37,7 +37,7 @@
#define CFAG12864BFB_NAME "cfag12864bfb"
-static struct fb_fix_screeninfo cfag12864bfb_fix __initdata = {
+static struct fb_fix_screeninfo cfag12864bfb_fix __devinitdata = {
.id = "cfag12864b",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_MONO10,
@@ -48,7 +48,7 @@ static struct fb_fix_screeninfo cfag12864bfb_fix __initdata = {
.accel = FB_ACCEL_NONE,
};
-static struct fb_var_screeninfo cfag12864bfb_var __initdata = {
+static struct fb_var_screeninfo cfag12864bfb_var __devinitdata = {
.xres = CFAG12864B_WIDTH,
.yres = CFAG12864B_HEIGHT,
.xres_virtual = CFAG12864B_WIDTH,
@@ -114,7 +114,7 @@ none:
return ret;
}
-static int cfag12864bfb_remove(struct platform_device *device)
+static int __devexit cfag12864bfb_remove(struct platform_device *device)
{
struct fb_info *info = platform_get_drvdata(device);
@@ -128,7 +128,7 @@ static int cfag12864bfb_remove(struct platform_device *device)
static struct platform_driver cfag12864bfb_driver = {
.probe = cfag12864bfb_probe,
- .remove = cfag12864bfb_remove,
+ .remove = __devexit_p(cfag12864bfb_remove),
.driver = {
.name = CFAG12864BFB_NAME,
},
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index fd52c48ee76..ef38aff737e 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -18,9 +18,9 @@ config UEVENT_HELPER_PATH
config DEVTMPFS
bool "Maintain a devtmpfs filesystem to mount at /dev"
- depends on HOTPLUG && SHMEM && TMPFS
+ depends on HOTPLUG
help
- This creates a tmpfs filesystem instance early at bootup.
+ This creates a tmpfs/ramfs filesystem instance early at bootup.
In this filesystem, the kernel driver core maintains device
nodes with their default names and permissions for all
registered devices with an assigned major/minor number.
@@ -33,6 +33,9 @@ config DEVTMPFS
functional /dev without any further help. It also allows simple
rescue systems, and reliably handles dynamic major/minor numbers.
+ Notice: if CONFIG_TMPFS isn't enabled, the simpler ramfs
+ file system will be used instead.
+
config DEVTMPFS_MOUNT
bool "Automount devtmpfs at /dev, after the kernel mounted the rootfs"
depends on DEVTMPFS
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 9c6a0d6408e..8e231d05b40 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -63,6 +63,14 @@ static void class_release(struct kobject *kobj)
kfree(cp);
}
+static const struct kobj_ns_type_operations *class_child_ns_type(struct kobject *kobj)
+{
+ struct class_private *cp = to_class(kobj);
+ struct class *class = cp->class;
+
+ return class->ns_type;
+}
+
static const struct sysfs_ops class_sysfs_ops = {
.show = class_attr_show,
.store = class_attr_store,
@@ -71,6 +79,7 @@ static const struct sysfs_ops class_sysfs_ops = {
static struct kobj_type class_ktype = {
.sysfs_ops = &class_sysfs_ops,
.release = class_release,
+ .child_ns_type = class_child_ns_type,
};
/* Hotplug events for classes go to the class class_subsys */
diff --git a/drivers/base/core.c b/drivers/base/core.c
index b56a0ba31d4..9630fbdf4e6 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -20,7 +20,6 @@
#include <linux/notifier.h>
#include <linux/genhd.h>
#include <linux/kallsyms.h>
-#include <linux/semaphore.h>
#include <linux/mutex.h>
#include <linux/async.h>
@@ -132,9 +131,21 @@ static void device_release(struct kobject *kobj)
kfree(p);
}
+static const void *device_namespace(struct kobject *kobj)
+{
+ struct device *dev = to_dev(kobj);
+ const void *ns = NULL;
+
+ if (dev->class && dev->class->ns_type)
+ ns = dev->class->namespace(dev);
+
+ return ns;
+}
+
static struct kobj_type device_ktype = {
.release = device_release,
.sysfs_ops = &dev_sysfs_ops,
+ .namespace = device_namespace,
};
@@ -559,10 +570,10 @@ void device_initialize(struct device *dev)
dev->kobj.kset = devices_kset;
kobject_init(&dev->kobj, &device_ktype);
INIT_LIST_HEAD(&dev->dma_pools);
- init_MUTEX(&dev->sem);
+ mutex_init(&dev->mutex);
+ lockdep_set_novalidate_class(&dev->mutex);
spin_lock_init(&dev->devres_lock);
INIT_LIST_HEAD(&dev->devres_head);
- device_init_wakeup(dev, 0);
device_pm_init(dev);
set_dev_node(dev, -1);
}
@@ -596,11 +607,59 @@ static struct kobject *virtual_device_parent(struct device *dev)
return virtual_dir;
}
-static struct kobject *get_device_parent(struct device *dev,
- struct device *parent)
+struct class_dir {
+ struct kobject kobj;
+ struct class *class;
+};
+
+#define to_class_dir(obj) container_of(obj, struct class_dir, kobj)
+
+static void class_dir_release(struct kobject *kobj)
+{
+ struct class_dir *dir = to_class_dir(kobj);
+ kfree(dir);
+}
+
+static const
+struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj)
{
+ struct class_dir *dir = to_class_dir(kobj);
+ return dir->class->ns_type;
+}
+
+static struct kobj_type class_dir_ktype = {
+ .release = class_dir_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .child_ns_type = class_dir_child_ns_type
+};
+
+static struct kobject *
+class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
+{
+ struct class_dir *dir;
int retval;
+ dir = kzalloc(sizeof(*dir), GFP_KERNEL);
+ if (!dir)
+ return NULL;
+
+ dir->class = class;
+ kobject_init(&dir->kobj, &class_dir_ktype);
+
+ dir->kobj.kset = &class->p->class_dirs;
+
+ retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
+ if (retval < 0) {
+ kobject_put(&dir->kobj);
+ return NULL;
+ }
+ return &dir->kobj;
+}
+
+
+static struct kobject *get_device_parent(struct device *dev,
+ struct device *parent)
+{
if (dev->class) {
static DEFINE_MUTEX(gdp_mutex);
struct kobject *kobj = NULL;
@@ -635,18 +694,7 @@ static struct kobject *get_device_parent(struct device *dev,
}
/* or create a new class-directory at the parent device */
- k = kobject_create();
- if (!k) {
- mutex_unlock(&gdp_mutex);
- return NULL;
- }
- k->kset = &dev->class->p->class_dirs;
- retval = kobject_add(k, parent_kobj, "%s", dev->class->name);
- if (retval < 0) {
- mutex_unlock(&gdp_mutex);
- kobject_put(k);
- return NULL;
- }
+ k = class_dir_create_and_add(dev->class, parent_kobj);
/* do not emit an uevent for this simple "glue" directory */
mutex_unlock(&gdp_mutex);
return k;
@@ -738,7 +786,7 @@ out_device:
out_busid:
if (dev->kobj.parent != &dev->class->p->class_subsys.kobj &&
device_is_not_partition(dev))
- sysfs_remove_link(&dev->class->p->class_subsys.kobj,
+ sysfs_delete_link(&dev->class->p->class_subsys.kobj, &dev->kobj,
dev_name(dev));
#else
/* link in the class directory pointing to the device */
@@ -756,7 +804,7 @@ out_busid:
return 0;
out_busid:
- sysfs_remove_link(&dev->class->p->class_subsys.kobj, dev_name(dev));
+ sysfs_delete_link(&dev->class->p->class_subsys.kobj, &dev->kobj, dev_name(dev));
#endif
out_subsys:
@@ -784,13 +832,13 @@ static void device_remove_class_symlinks(struct device *dev)
if (dev->kobj.parent != &dev->class->p->class_subsys.kobj &&
device_is_not_partition(dev))
- sysfs_remove_link(&dev->class->p->class_subsys.kobj,
+ sysfs_delete_link(&dev->class->p->class_subsys.kobj, &dev->kobj,
dev_name(dev));
#else
if (dev->parent && device_is_not_partition(dev))
sysfs_remove_link(&dev->kobj, "device");
- sysfs_remove_link(&dev->class->p->class_subsys.kobj, dev_name(dev));
+ sysfs_delete_link(&dev->class->p->class_subsys.kobj, &dev->kobj, dev_name(dev));
#endif
sysfs_remove_link(&dev->kobj, "subsystem");
@@ -1372,7 +1420,7 @@ struct device *__root_device_register(const char *name, struct module *owner)
return ERR_PTR(err);
}
-#ifdef CONFIG_MODULE /* gotta find a "cleaner" way to do this */
+#ifdef CONFIG_MODULES /* gotta find a "cleaner" way to do this */
if (owner) {
struct module_kobject *mk = &owner->mkobj;
@@ -1576,6 +1624,14 @@ int device_rename(struct device *dev, char *new_name)
goto out;
}
+#ifndef CONFIG_SYSFS_DEPRECATED
+ if (dev->class) {
+ error = sysfs_rename_link(&dev->class->p->class_subsys.kobj,
+ &dev->kobj, old_device_name, new_name);
+ if (error)
+ goto out;
+ }
+#endif
error = kobject_rename(&dev->kobj, new_name);
if (error)
goto out;
@@ -1590,11 +1646,6 @@ int device_rename(struct device *dev, char *new_name)
new_class_name);
}
}
-#else
- if (dev->class) {
- error = sysfs_rename_link(&dev->class->p->class_subsys.kobj,
- &dev->kobj, old_device_name, new_name);
- }
#endif
out:
@@ -1735,10 +1786,25 @@ EXPORT_SYMBOL_GPL(device_move);
*/
void device_shutdown(void)
{
- struct device *dev, *devn;
+ struct device *dev;
+
+ spin_lock(&devices_kset->list_lock);
+ /*
+ * Walk the devices list backward, shutting down each in turn.
+ * Beware that device unplug events may also start pulling
+ * devices offline, even as the system is shutting down.
+ */
+ while (!list_empty(&devices_kset->list)) {
+ dev = list_entry(devices_kset->list.prev, struct device,
+ kobj.entry);
+ get_device(dev);
+ /*
+ * Make sure the device is off the kset list, in the
+ * event that dev->*->shutdown() doesn't remove it.
+ */
+ list_del_init(&dev->kobj.entry);
+ spin_unlock(&devices_kset->list_lock);
- list_for_each_entry_safe_reverse(dev, devn, &devices_kset->list,
- kobj.entry) {
if (dev->bus && dev->bus->shutdown) {
dev_dbg(dev, "shutdown\n");
dev->bus->shutdown(dev);
@@ -1746,6 +1812,10 @@ void device_shutdown(void)
dev_dbg(dev, "shutdown\n");
dev->driver->shutdown(dev);
}
+ put_device(dev);
+
+ spin_lock(&devices_kset->list_lock);
}
+ spin_unlock(&devices_kset->list_lock);
async_synchronize_full();
}
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index f35719aab3c..251acea3d35 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -186,7 +186,7 @@ static ssize_t print_cpus_offline(struct sysdev_class *class,
/* display offline cpus < nr_cpu_ids */
if (!alloc_cpumask_var(&offline, GFP_KERNEL))
return -ENOMEM;
- cpumask_complement(offline, cpu_online_mask);
+ cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
n = cpulist_scnprintf(buf, len, offline);
free_cpumask_var(offline);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index c89291f8a16..503c2620bbc 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -40,11 +40,11 @@ static void driver_bound(struct device *dev)
pr_debug("driver: '%s': %s: bound to device '%s'\n", dev_name(dev),
__func__, dev->driver->name);
+ klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
+
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_BOUND_DRIVER, dev);
-
- klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
}
static int driver_sysfs_add(struct device *dev)
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 057cf11326b..af0600143d1 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -20,6 +20,7 @@
#include <linux/namei.h>
#include <linux/fs.h>
#include <linux/shmem_fs.h>
+#include <linux/ramfs.h>
#include <linux/cred.h>
#include <linux/sched.h>
#include <linux/init_task.h>
@@ -45,7 +46,11 @@ __setup("devtmpfs.mount=", mount_param);
static int dev_get_sb(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data, struct vfsmount *mnt)
{
+#ifdef CONFIG_TMPFS
return get_sb_single(fs_type, flags, data, shmem_fill_super, mnt);
+#else
+ return get_sb_single(fs_type, flags, data, ramfs_fill_super, mnt);
+#endif
}
static struct file_system_type dev_fs_type = {
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 985da11174e..3f093b0dd21 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -27,6 +27,52 @@ MODULE_AUTHOR("Manuel Estrada Sainz");
MODULE_DESCRIPTION("Multi purpose firmware loading support");
MODULE_LICENSE("GPL");
+/* Builtin firmware support */
+
+#ifdef CONFIG_FW_LOADER
+
+extern struct builtin_fw __start_builtin_fw[];
+extern struct builtin_fw __end_builtin_fw[];
+
+static bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
+{
+ struct builtin_fw *b_fw;
+
+ for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
+ if (strcmp(name, b_fw->name) == 0) {
+ fw->size = b_fw->size;
+ fw->data = b_fw->data;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool fw_is_builtin_firmware(const struct firmware *fw)
+{
+ struct builtin_fw *b_fw;
+
+ for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++)
+ if (fw->data == b_fw->data)
+ return true;
+
+ return false;
+}
+
+#else /* Module case - no builtin firmware support */
+
+static inline bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
+{
+ return false;
+}
+
+static inline bool fw_is_builtin_firmware(const struct firmware *fw)
+{
+ return false;
+}
+#endif
+
enum {
FW_STATUS_LOADING,
FW_STATUS_DONE,
@@ -40,7 +86,6 @@ static int loading_timeout = 60; /* In seconds */
static DEFINE_MUTEX(fw_lock);
struct firmware_priv {
- char *fw_id;
struct completion completion;
struct bin_attribute attr_data;
struct firmware *fw;
@@ -48,18 +93,11 @@ struct firmware_priv {
struct page **pages;
int nr_pages;
int page_array_size;
- const char *vdata;
struct timer_list timeout;
+ bool nowait;
+ char fw_id[];
};
-#ifdef CONFIG_FW_LOADER
-extern struct builtin_fw __start_builtin_fw[];
-extern struct builtin_fw __end_builtin_fw[];
-#else /* Module case. Avoid ifdefs later; it'll all optimise out */
-static struct builtin_fw *__start_builtin_fw;
-static struct builtin_fw *__end_builtin_fw;
-#endif
-
static void
fw_load_abort(struct firmware_priv *fw_priv)
{
@@ -100,9 +138,25 @@ firmware_timeout_store(struct class *class,
return count;
}
-static CLASS_ATTR(timeout, 0644, firmware_timeout_show, firmware_timeout_store);
+static struct class_attribute firmware_class_attrs[] = {
+ __ATTR(timeout, S_IWUSR | S_IRUGO,
+ firmware_timeout_show, firmware_timeout_store),
+ __ATTR_NULL
+};
+
+static void fw_dev_release(struct device *dev)
+{
+ struct firmware_priv *fw_priv = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < fw_priv->nr_pages; i++)
+ __free_page(fw_priv->pages[i]);
+ kfree(fw_priv->pages);
+ kfree(fw_priv);
+ kfree(dev);
-static void fw_dev_release(struct device *dev);
+ module_put(THIS_MODULE);
+}
static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
{
@@ -112,12 +166,15 @@ static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
return -ENOMEM;
if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
return -ENOMEM;
+ if (add_uevent_var(env, "ASYNC=%d", fw_priv->nowait))
+ return -ENOMEM;
return 0;
}
static struct class firmware_class = {
.name = "firmware",
+ .class_attrs = firmware_class_attrs,
.dev_uevent = firmware_uevent,
.dev_release = fw_dev_release,
};
@@ -130,6 +187,17 @@ static ssize_t firmware_loading_show(struct device *dev,
return sprintf(buf, "%d\n", loading);
}
+static void firmware_free_data(const struct firmware *fw)
+{
+ int i;
+ vunmap(fw->data);
+ if (fw->pages) {
+ for (i = 0; i < PFN_UP(fw->size); i++)
+ __free_page(fw->pages[i]);
+ kfree(fw->pages);
+ }
+}
+
/* Some architectures don't have PAGE_KERNEL_RO */
#ifndef PAGE_KERNEL_RO
#define PAGE_KERNEL_RO PAGE_KERNEL
@@ -162,21 +230,21 @@ static ssize_t firmware_loading_store(struct device *dev,
mutex_unlock(&fw_lock);
break;
}
- vfree(fw_priv->fw->data);
- fw_priv->fw->data = NULL;
+ firmware_free_data(fw_priv->fw);
+ memset(fw_priv->fw, 0, sizeof(struct firmware));
+ /* If the pages are not owned by 'struct firmware' */
for (i = 0; i < fw_priv->nr_pages; i++)
__free_page(fw_priv->pages[i]);
kfree(fw_priv->pages);
fw_priv->pages = NULL;
fw_priv->page_array_size = 0;
fw_priv->nr_pages = 0;
- fw_priv->fw->size = 0;
set_bit(FW_STATUS_LOADING, &fw_priv->status);
mutex_unlock(&fw_lock);
break;
case 0:
if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) {
- vfree(fw_priv->fw->data);
+ vunmap(fw_priv->fw->data);
fw_priv->fw->data = vmap(fw_priv->pages,
fw_priv->nr_pages,
0, PAGE_KERNEL_RO);
@@ -184,7 +252,10 @@ static ssize_t firmware_loading_store(struct device *dev,
dev_err(dev, "%s: vmap() failed\n", __func__);
goto err;
}
- /* Pages will be freed by vfree() */
+ /* Pages are now owned by 'struct firmware' */
+ fw_priv->fw->pages = fw_priv->pages;
+ fw_priv->pages = NULL;
+
fw_priv->page_array_size = 0;
fw_priv->nr_pages = 0;
complete(&fw_priv->completion);
@@ -207,8 +278,9 @@ static ssize_t firmware_loading_store(struct device *dev,
static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
static ssize_t
-firmware_data_read(struct kobject *kobj, struct bin_attribute *bin_attr,
- char *buffer, loff_t offset, size_t count)
+firmware_data_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buffer, loff_t offset,
+ size_t count)
{
struct device *dev = to_dev(kobj);
struct firmware_priv *fw_priv = dev_get_drvdata(dev);
@@ -291,6 +363,7 @@ fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
/**
* firmware_data_write - write method for firmware
+ * @filp: open sysfs file
* @kobj: kobject for the device
* @bin_attr: bin_attr structure
* @buffer: buffer being written
@@ -301,8 +374,9 @@ fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
* the driver as a firmware image.
**/
static ssize_t
-firmware_data_write(struct kobject *kobj, struct bin_attribute *bin_attr,
- char *buffer, loff_t offset, size_t count)
+firmware_data_write(struct file* filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buffer,
+ loff_t offset, size_t count)
{
struct device *dev = to_dev(kobj);
struct firmware_priv *fw_priv = dev_get_drvdata(dev);
@@ -353,21 +427,6 @@ static struct bin_attribute firmware_attr_data_tmpl = {
.write = firmware_data_write,
};
-static void fw_dev_release(struct device *dev)
-{
- struct firmware_priv *fw_priv = dev_get_drvdata(dev);
- int i;
-
- for (i = 0; i < fw_priv->nr_pages; i++)
- __free_page(fw_priv->pages[i]);
- kfree(fw_priv->pages);
- kfree(fw_priv->fw_id);
- kfree(fw_priv);
- kfree(dev);
-
- module_put(THIS_MODULE);
-}
-
static void
firmware_class_timeout(u_long data)
{
@@ -379,8 +438,8 @@ static int fw_register_device(struct device **dev_p, const char *fw_name,
struct device *device)
{
int retval;
- struct firmware_priv *fw_priv = kzalloc(sizeof(*fw_priv),
- GFP_KERNEL);
+ struct firmware_priv *fw_priv =
+ kzalloc(sizeof(*fw_priv) + strlen(fw_name) + 1 , GFP_KERNEL);
struct device *f_dev = kzalloc(sizeof(*f_dev), GFP_KERNEL);
*dev_p = NULL;
@@ -391,16 +450,9 @@ static int fw_register_device(struct device **dev_p, const char *fw_name,
goto error_kfree;
}
+ strcpy(fw_priv->fw_id, fw_name);
init_completion(&fw_priv->completion);
fw_priv->attr_data = firmware_attr_data_tmpl;
- fw_priv->fw_id = kstrdup(fw_name, GFP_KERNEL);
- if (!fw_priv->fw_id) {
- dev_err(device, "%s: Firmware name allocation failed\n",
- __func__);
- retval = -ENOMEM;
- goto error_kfree;
- }
-
fw_priv->timeout.function = firmware_class_timeout;
fw_priv->timeout.data = (u_long) fw_priv;
init_timer(&fw_priv->timeout);
@@ -427,7 +479,7 @@ error_kfree:
static int fw_setup_device(struct firmware *fw, struct device **dev_p,
const char *fw_name, struct device *device,
- int uevent)
+ int uevent, bool nowait)
{
struct device *f_dev;
struct firmware_priv *fw_priv;
@@ -443,6 +495,8 @@ static int fw_setup_device(struct firmware *fw, struct device **dev_p,
fw_priv = dev_get_drvdata(f_dev);
+ fw_priv->nowait = nowait;
+
fw_priv->fw = fw;
sysfs_bin_attr_init(&fw_priv->attr_data);
retval = sysfs_create_bin_file(&f_dev->kobj, &fw_priv->attr_data);
@@ -470,12 +524,11 @@ out:
static int
_request_firmware(const struct firmware **firmware_p, const char *name,
- struct device *device, int uevent)
+ struct device *device, int uevent, bool nowait)
{
struct device *f_dev;
struct firmware_priv *fw_priv;
struct firmware *firmware;
- struct builtin_fw *builtin;
int retval;
if (!firmware_p)
@@ -489,21 +542,16 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
goto out;
}
- for (builtin = __start_builtin_fw; builtin != __end_builtin_fw;
- builtin++) {
- if (strcmp(name, builtin->name))
- continue;
- dev_info(device, "firmware: using built-in firmware %s\n",
- name);
- firmware->size = builtin->size;
- firmware->data = builtin->data;
+ if (fw_get_builtin_firmware(firmware, name)) {
+ dev_dbg(device, "firmware: using built-in firmware %s\n", name);
return 0;
}
if (uevent)
- dev_info(device, "firmware: requesting %s\n", name);
+ dev_dbg(device, "firmware: requesting %s\n", name);
- retval = fw_setup_device(firmware, &f_dev, name, device, uevent);
+ retval = fw_setup_device(firmware, &f_dev, name, device,
+ uevent, nowait);
if (retval)
goto error_kfree_fw;
@@ -560,26 +608,18 @@ request_firmware(const struct firmware **firmware_p, const char *name,
struct device *device)
{
int uevent = 1;
- return _request_firmware(firmware_p, name, device, uevent);
+ return _request_firmware(firmware_p, name, device, uevent, false);
}
/**
* release_firmware: - release the resource associated with a firmware image
* @fw: firmware resource to release
**/
-void
-release_firmware(const struct firmware *fw)
+void release_firmware(const struct firmware *fw)
{
- struct builtin_fw *builtin;
-
if (fw) {
- for (builtin = __start_builtin_fw; builtin != __end_builtin_fw;
- builtin++) {
- if (fw->data == builtin->data)
- goto free_fw;
- }
- vfree(fw->data);
- free_fw:
+ if (!fw_is_builtin_firmware(fw))
+ firmware_free_data(fw);
kfree(fw);
}
}
@@ -606,7 +646,7 @@ request_firmware_work_func(void *arg)
return 0;
}
ret = _request_firmware(&fw, fw_work->name, fw_work->device,
- fw_work->uevent);
+ fw_work->uevent, true);
fw_work->cont(fw, fw_work->context);
@@ -670,26 +710,12 @@ request_firmware_nowait(
return 0;
}
-static int __init
-firmware_class_init(void)
+static int __init firmware_class_init(void)
{
- int error;
- error = class_register(&firmware_class);
- if (error) {
- printk(KERN_ERR "%s: class_register failed\n", __func__);
- return error;
- }
- error = class_create_file(&firmware_class, &class_attr_timeout);
- if (error) {
- printk(KERN_ERR "%s: class_create_file failed\n",
- __func__);
- class_unregister(&firmware_class);
- }
- return error;
-
+ return class_register(&firmware_class);
}
-static void __exit
-firmware_class_exit(void)
+
+static void __exit firmware_class_exit(void)
{
class_unregister(&firmware_class);
}
diff --git a/drivers/base/iommu.c b/drivers/base/iommu.c
index 8ad4ffea692..6e6b6a11b3c 100644
--- a/drivers/base/iommu.c
+++ b/drivers/base/iommu.c
@@ -80,20 +80,6 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
}
EXPORT_SYMBOL_GPL(iommu_detach_device);
-int iommu_map_range(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
-{
- return iommu_ops->map(domain, iova, paddr, size, prot);
-}
-EXPORT_SYMBOL_GPL(iommu_map_range);
-
-void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova,
- size_t size)
-{
- iommu_ops->unmap(domain, iova, size);
-}
-EXPORT_SYMBOL_GPL(iommu_unmap_range);
-
phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
unsigned long iova)
{
@@ -107,3 +93,32 @@ int iommu_domain_has_cap(struct iommu_domain *domain,
return iommu_ops->domain_has_cap(domain, cap);
}
EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
+
+int iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, int gfp_order, int prot)
+{
+ unsigned long invalid_mask;
+ size_t size;
+
+ size = 0x1000UL << gfp_order;
+ invalid_mask = size - 1;
+
+ BUG_ON((iova | paddr) & invalid_mask);
+
+ return iommu_ops->map(domain, iova, paddr, gfp_order, prot);
+}
+EXPORT_SYMBOL_GPL(iommu_map);
+
+int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order)
+{
+ unsigned long invalid_mask;
+ size_t size;
+
+ size = 0x1000UL << gfp_order;
+ invalid_mask = size - 1;
+
+ BUG_ON(iova & invalid_mask);
+
+ return iommu_ops->unmap(domain, iova, gfp_order);
+}
+EXPORT_SYMBOL_GPL(iommu_unmap);
diff --git a/drivers/base/module.c b/drivers/base/module.c
index f32f2f9b7be..db930d3ee31 100644
--- a/drivers/base/module.c
+++ b/drivers/base/module.c
@@ -15,12 +15,10 @@ static char *make_driver_name(struct device_driver *drv)
{
char *driver_name;
- driver_name = kmalloc(strlen(drv->name) + strlen(drv->bus->name) + 2,
- GFP_KERNEL);
+ driver_name = kasprintf(GFP_KERNEL, "%s:%s", drv->bus->name, drv->name);
if (!driver_name)
return NULL;
- sprintf(driver_name, "%s:%s", drv->bus->name, drv->name);
return driver_name;
}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 057979a19ee..2bdd8a94ec9 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -9,6 +9,7 @@
#include <linux/memory.h>
#include <linux/node.h>
#include <linux/hugetlb.h>
+#include <linux/compaction.h>
#include <linux/cpumask.h>
#include <linux/topology.h>
#include <linux/nodemask.h>
@@ -246,6 +247,8 @@ int register_node(struct node *node, int num, struct node *parent)
scan_unevictable_register_node(node);
hugetlb_register_node(node);
+
+ compaction_register_node(node);
}
return error;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 4b4b565c835..4d99c8bdfed 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -187,7 +187,7 @@ EXPORT_SYMBOL_GPL(platform_device_alloc);
* released.
*/
int platform_device_add_resources(struct platform_device *pdev,
- struct resource *res, unsigned int num)
+ const struct resource *res, unsigned int num)
{
struct resource *r;
@@ -367,7 +367,7 @@ EXPORT_SYMBOL_GPL(platform_device_unregister);
*/
struct platform_device *platform_device_register_simple(const char *name,
int id,
- struct resource *res,
+ const struct resource *res,
unsigned int num)
{
struct platform_device *pdev;
@@ -735,7 +735,7 @@ static void platform_pm_complete(struct device *dev)
#ifdef CONFIG_SUSPEND
-static int platform_pm_suspend(struct device *dev)
+int __weak platform_pm_suspend(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -753,7 +753,7 @@ static int platform_pm_suspend(struct device *dev)
return ret;
}
-static int platform_pm_suspend_noirq(struct device *dev)
+int __weak platform_pm_suspend_noirq(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -769,7 +769,7 @@ static int platform_pm_suspend_noirq(struct device *dev)
return ret;
}
-static int platform_pm_resume(struct device *dev)
+int __weak platform_pm_resume(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -787,7 +787,7 @@ static int platform_pm_resume(struct device *dev)
return ret;
}
-static int platform_pm_resume_noirq(struct device *dev)
+int __weak platform_pm_resume_noirq(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -967,17 +967,17 @@ static int platform_pm_restore_noirq(struct device *dev)
int __weak platform_pm_runtime_suspend(struct device *dev)
{
- return -ENOSYS;
+ return pm_generic_runtime_suspend(dev);
};
int __weak platform_pm_runtime_resume(struct device *dev)
{
- return -ENOSYS;
+ return pm_generic_runtime_resume(dev);
};
int __weak platform_pm_runtime_idle(struct device *dev)
{
- return -ENOSYS;
+ return pm_generic_runtime_idle(dev);
};
#else /* !CONFIG_PM_RUNTIME */
@@ -1254,6 +1254,26 @@ static int __init early_platform_driver_probe_id(char *class_str,
}
if (match) {
+ /*
+ * Set up a sensible init_name to enable
+ * dev_name() and others to be used before the
+ * rest of the driver core is initialized.
+ */
+ if (!match->dev.init_name && slab_is_available()) {
+ if (match->id != -1)
+ match->dev.init_name =
+ kasprintf(GFP_KERNEL, "%s.%d",
+ match->name,
+ match->id);
+ else
+ match->dev.init_name =
+ kasprintf(GFP_KERNEL, "%s",
+ match->name);
+
+ if (!match->dev.init_name)
+ return -ENOMEM;
+ }
+
if (epdrv->pdrv->probe(match))
pr_warning("%s: unable to probe %s early.\n",
class_str, match->name);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 626dd147b75..b0ec0e9f27e 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -229,14 +229,16 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
if (retval) {
dev->power.runtime_status = RPM_ACTIVE;
- pm_runtime_cancel_pending(dev);
-
if (retval == -EAGAIN || retval == -EBUSY) {
- notify = true;
+ if (dev->power.timer_expires == 0)
+ notify = true;
dev->power.runtime_error = 0;
+ } else {
+ pm_runtime_cancel_pending(dev);
}
} else {
dev->power.runtime_status = RPM_SUSPENDED;
+ pm_runtime_deactivate_timer(dev);
if (dev->parent) {
parent = dev->parent;
@@ -659,8 +661,6 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
if (dev->power.runtime_status == RPM_SUSPENDED)
retval = 1;
- else if (dev->power.runtime_status == RPM_SUSPENDING)
- retval = -EINPROGRESS;
else if (atomic_read(&dev->power.usage_count) > 0
|| dev->power.disable_depth > 0)
retval = -EAGAIN;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 86fd9373447..a4c33bc5125 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -5,6 +5,7 @@
#include <linux/device.h>
#include <linux/string.h>
#include <linux/pm_runtime.h>
+#include <asm/atomic.h>
#include "power.h"
/*
@@ -143,7 +144,59 @@ wake_store(struct device * dev, struct device_attribute *attr,
static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store);
-#ifdef CONFIG_PM_SLEEP_ADVANCED_DEBUG
+#ifdef CONFIG_PM_ADVANCED_DEBUG
+#ifdef CONFIG_PM_RUNTIME
+
+static ssize_t rtpm_usagecount_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count));
+}
+
+static ssize_t rtpm_children_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", dev->power.ignore_children ?
+ 0 : atomic_read(&dev->power.child_count));
+}
+
+static ssize_t rtpm_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if ((dev->power.disable_depth) && (dev->power.runtime_auto == false))
+ return sprintf(buf, "disabled & forbidden\n");
+ else if (dev->power.disable_depth)
+ return sprintf(buf, "disabled\n");
+ else if (dev->power.runtime_auto == false)
+ return sprintf(buf, "forbidden\n");
+ return sprintf(buf, "enabled\n");
+}
+
+static ssize_t rtpm_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if (dev->power.runtime_error)
+ return sprintf(buf, "error\n");
+ switch (dev->power.runtime_status) {
+ case RPM_SUSPENDED:
+ return sprintf(buf, "suspended\n");
+ case RPM_SUSPENDING:
+ return sprintf(buf, "suspending\n");
+ case RPM_RESUMING:
+ return sprintf(buf, "resuming\n");
+ case RPM_ACTIVE:
+ return sprintf(buf, "active\n");
+ }
+ return -EIO;
+}
+
+static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL);
+static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL);
+static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
+static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL);
+
+#endif
+
static ssize_t async_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -170,15 +223,21 @@ static ssize_t async_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(async, 0644, async_show, async_store);
-#endif /* CONFIG_PM_SLEEP_ADVANCED_DEBUG */
+#endif /* CONFIG_PM_ADVANCED_DEBUG */
static struct attribute * power_attrs[] = {
#ifdef CONFIG_PM_RUNTIME
&dev_attr_control.attr,
#endif
&dev_attr_wakeup.attr,
-#ifdef CONFIG_PM_SLEEP_ADVANCED_DEBUG
+#ifdef CONFIG_PM_ADVANCED_DEBUG
&dev_attr_async.attr,
+#ifdef CONFIG_PM_RUNTIME
+ &dev_attr_runtime_usage.attr,
+ &dev_attr_runtime_active_kids.attr,
+ &dev_attr_runtime_status.attr,
+ &dev_attr_runtime_enabled.attr,
+#endif
#endif
NULL,
};
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index bf6b13206d0..9fc630ce1dd 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -162,7 +162,7 @@ static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
topology_remove_dev(cpu);
break;
}
- return rc ? NOTIFY_BAD : NOTIFY_OK;
+ return notifier_from_errno(rc);
}
static int __cpuinit topology_sysfs_init(void)
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 77bfce52e9c..de277689da6 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -76,6 +76,17 @@ config BLK_DEV_XD
It's pretty unlikely that you have one of these: say N.
+config GDROM
+ tristate "SEGA Dreamcast GD-ROM drive"
+ depends on SH_DREAMCAST
+ help
+ A standard SEGA Dreamcast comes with a modified CD ROM drive called a
+ "GD-ROM" by SEGA to signify it is capable of reading special disks
+ with up to 1 GB of data. This drive will also read standard CD ROM
+ disks. Select this option to access any disks in your GD ROM drive.
+ Most users will want to say "Y" here.
+ You can also build this as a module which will be called gdrom.
+
config PARIDE
tristate "Parallel port IDE device support"
depends on PARPORT_PC
@@ -103,17 +114,6 @@ config PARIDE
"MicroSolutions backpack protocol", "DataStor Commuter protocol"
etc.).
-config GDROM
- tristate "SEGA Dreamcast GD-ROM drive"
- depends on SH_DREAMCAST
- help
- A standard SEGA Dreamcast comes with a modified CD ROM drive called a
- "GD-ROM" by SEGA to signify it is capable of reading special disks
- with up to 1 GB of data. This drive will also read standard CD ROM
- disks. Select this option to access any disks in your GD ROM drive.
- Most users will want to say "Y" here.
- You can also build this as a module which will be called gdrom.
-
source "drivers/block/paride/Kconfig"
config BLK_CPQ_DA
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 0182a22c423..832798aa14f 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -66,6 +66,7 @@
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/interrupt.h>
+#include <linux/platform_device.h>
#include <asm/setup.h>
#include <asm/uaccess.h>
@@ -1696,34 +1697,18 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
return get_disk(unit[drive].gendisk);
}
-static int __init amiga_floppy_init(void)
+static int __init amiga_floppy_probe(struct platform_device *pdev)
{
int i, ret;
- if (!MACH_IS_AMIGA)
- return -ENODEV;
-
- if (!AMIGAHW_PRESENT(AMI_FLOPPY))
- return -ENODEV;
-
if (register_blkdev(FLOPPY_MAJOR,"fd"))
return -EBUSY;
- /*
- * We request DSKPTR, DSKLEN and DSKDATA only, because the other
- * floppy registers are too spreaded over the custom register space
- */
- ret = -EBUSY;
- if (!request_mem_region(CUSTOM_PHYSADDR+0x20, 8, "amiflop [Paula]")) {
- printk("fd: cannot get floppy registers\n");
- goto out_blkdev;
- }
-
ret = -ENOMEM;
if ((raw_buf = (char *)amiga_chip_alloc (RAW_BUF_SIZE, "Floppy")) ==
NULL) {
printk("fd: cannot get chip mem buffer\n");
- goto out_memregion;
+ goto out_blkdev;
}
ret = -EBUSY;
@@ -1792,18 +1777,13 @@ out_irq2:
free_irq(IRQ_AMIGA_DSKBLK, NULL);
out_irq:
amiga_chip_free(raw_buf);
-out_memregion:
- release_mem_region(CUSTOM_PHYSADDR+0x20, 8);
out_blkdev:
unregister_blkdev(FLOPPY_MAJOR,"fd");
return ret;
}
-module_init(amiga_floppy_init);
-#ifdef MODULE
-
#if 0 /* not safe to unload */
-void cleanup_module(void)
+static int __exit amiga_floppy_remove(struct platform_device *pdev)
{
int i;
@@ -1820,12 +1800,25 @@ void cleanup_module(void)
custom.dmacon = DMAF_DISK; /* disable DMA */
amiga_chip_free(raw_buf);
blk_cleanup_queue(floppy_queue);
- release_mem_region(CUSTOM_PHYSADDR+0x20, 8);
unregister_blkdev(FLOPPY_MAJOR, "fd");
}
#endif
-#else
+static struct platform_driver amiga_floppy_driver = {
+ .driver = {
+ .name = "amiga-floppy",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init amiga_floppy_init(void)
+{
+ return platform_driver_probe(&amiga_floppy_driver, amiga_floppy_probe);
+}
+
+module_init(amiga_floppy_init);
+
+#ifndef MODULE
static int __init amiga_floppy_setup (char *str)
{
int n;
@@ -1840,3 +1833,5 @@ static int __init amiga_floppy_setup (char *str)
__setup("floppy=", amiga_floppy_setup);
#endif
+
+MODULE_ALIAS("platform:amiga-floppy");
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index eb5ff0531cf..51ceaee98f9 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1588,7 +1588,6 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
c->Request = ioc->Request;
if (ioc->buf_size > 0) {
- int i;
for (i = 0; i < sg_used; i++) {
temp64.val =
pci_map_single(host->pdev, buff[i],
@@ -2434,7 +2433,7 @@ static int deregister_disk(ctlr_info_t *h, int drv_index,
/* if it was the last disk, find the new hightest lun */
if (clear_all && recalculate_highest_lun) {
- int i, newhighest = -1;
+ int newhighest = -1;
for (i = 0; i <= h->highest_lun; i++) {
/* if the disk has size > 0, it is available */
if (h->drv[i] && h->drv[i]->heads)
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 3390716898d..e3f88d6e141 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -84,6 +84,9 @@ struct drbd_bitmap {
#define BM_MD_IO_ERROR 1
#define BM_P_VMALLOCED 2
+static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
+ unsigned long e, int val, const enum km_type km);
+
static int bm_is_locked(struct drbd_bitmap *b)
{
return test_bit(BM_LOCKED, &b->bm_flags);
@@ -441,7 +444,7 @@ static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
* In case this is actually a resize, we copy the old bitmap into the new one.
* Otherwise, the bitmap is initialized to all bits set.
*/
-int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity)
+int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
{
struct drbd_bitmap *b = mdev->bitmap;
unsigned long bits, words, owords, obits, *p_addr, *bm;
@@ -516,7 +519,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity)
obits = b->bm_bits;
growing = bits > obits;
- if (opages)
+ if (opages && growing && set_new_bits)
bm_set_surplus(b);
b->bm_pages = npages;
@@ -526,8 +529,12 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity)
b->bm_dev_capacity = capacity;
if (growing) {
- bm_memset(b, owords, 0xff, words-owords);
- b->bm_set += bits - obits;
+ if (set_new_bits) {
+ bm_memset(b, owords, 0xff, words-owords);
+ b->bm_set += bits - obits;
+ } else
+ bm_memset(b, owords, 0x00, words-owords);
+
}
if (want < have) {
@@ -773,7 +780,7 @@ static void bm_page_io_async(struct drbd_conf *mdev, struct drbd_bitmap *b, int
/* nothing to do, on disk == in memory */
# define bm_cpu_to_lel(x) ((void)0)
# else
-void bm_cpu_to_lel(struct drbd_bitmap *b)
+static void bm_cpu_to_lel(struct drbd_bitmap *b)
{
/* need to cpu_to_lel all the pages ...
* this may be optimized by using
@@ -1015,7 +1022,7 @@ unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_f
* wants bitnr, not sector.
* expected to be called for only a few bits (e - s about BITS_PER_LONG).
* Must hold bitmap lock already. */
-int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
+static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
unsigned long e, int val, const enum km_type km)
{
struct drbd_bitmap *b = mdev->bitmap;
@@ -1053,7 +1060,7 @@ int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
* for val != 0, we change 0 -> 1, return code positive
* for val == 0, we change 1 -> 0, return code negative
* wants bitnr, not sector */
-int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
+static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
const unsigned long e, int val)
{
unsigned long flags;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index e5e86a78182..e9654c8d5b6 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -132,6 +132,7 @@ enum {
DRBD_FAULT_DT_RA = 6, /* data read ahead */
DRBD_FAULT_BM_ALLOC = 7, /* bitmap allocation */
DRBD_FAULT_AL_EE = 8, /* alloc ee */
+ DRBD_FAULT_RECEIVE = 9, /* Changes some bytes upon receiving a [rs]data block */
DRBD_FAULT_MAX,
};
@@ -208,8 +209,11 @@ enum drbd_packets {
P_RS_IS_IN_SYNC = 0x22, /* meta socket */
P_SYNC_PARAM89 = 0x23, /* data socket, protocol version 89 replacement for P_SYNC_PARAM */
P_COMPRESSED_BITMAP = 0x24, /* compressed or otherwise encoded bitmap transfer */
+ /* P_CKPT_FENCE_REQ = 0x25, * currently reserved for protocol D */
+ /* P_CKPT_DISABLE_REQ = 0x26, * currently reserved for protocol D */
+ P_DELAY_PROBE = 0x27, /* is used on BOTH sockets */
- P_MAX_CMD = 0x25,
+ P_MAX_CMD = 0x28,
P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */
P_MAX_OPT_CMD = 0x101,
@@ -264,6 +268,7 @@ static inline const char *cmdname(enum drbd_packets cmd)
[P_CSUM_RS_REQUEST] = "CsumRSRequest",
[P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
[P_COMPRESSED_BITMAP] = "CBitmap",
+ [P_DELAY_PROBE] = "DelayProbe",
[P_MAX_CMD] = NULL,
};
@@ -481,7 +486,8 @@ struct p_sizes {
u64 u_size; /* user requested size */
u64 c_size; /* current exported size */
u32 max_segment_size; /* Maximal size of a BIO */
- u32 queue_order_type;
+ u16 queue_order_type; /* not yet implemented in DRBD*/
+ u16 dds_flags; /* use enum dds_flags here. */
} __packed;
struct p_state {
@@ -538,6 +544,18 @@ struct p_compressed_bm {
u8 code[0];
} __packed;
+struct p_delay_probe {
+ struct p_header head;
+ u32 seq_num; /* sequence number to match the two probe packets */
+ u32 offset; /* usecs the probe got sent after the reference time point */
+} __packed;
+
+struct delay_probe {
+ struct list_head list;
+ unsigned int seq_num;
+ struct timeval time;
+};
+
/* DCBP: Drbd Compressed Bitmap Packet ... */
static inline enum drbd_bitmap_code
DCBP_get_code(struct p_compressed_bm *p)
@@ -722,22 +740,6 @@ enum epoch_event {
EV_CLEANUP = 32, /* used as flag */
};
-struct drbd_epoch_entry {
- struct drbd_work w;
- struct drbd_conf *mdev;
- struct bio *private_bio;
- struct hlist_node colision;
- sector_t sector;
- unsigned int size;
- struct drbd_epoch *epoch;
-
- /* up to here, the struct layout is identical to drbd_request;
- * we might be able to use that to our advantage... */
-
- unsigned int flags;
- u64 block_id;
-};
-
struct drbd_wq_barrier {
struct drbd_work w;
struct completion done;
@@ -748,17 +750,49 @@ struct digest_info {
void *digest;
};
-/* ee flag bits */
+struct drbd_epoch_entry {
+ struct drbd_work w;
+ struct hlist_node colision;
+ struct drbd_epoch *epoch;
+ struct drbd_conf *mdev;
+ struct page *pages;
+ atomic_t pending_bios;
+ unsigned int size;
+ /* see comments on ee flag bits below */
+ unsigned long flags;
+ sector_t sector;
+ u64 block_id;
+};
+
+/* ee flag bits.
+ * While corresponding bios are in flight, the only modification will be
+ * set_bit WAS_ERROR, which has to be atomic.
+ * If no bios are in flight yet, or all have been completed,
+ * non-atomic modification to ee->flags is ok.
+ */
enum {
__EE_CALL_AL_COMPLETE_IO,
- __EE_CONFLICT_PENDING,
__EE_MAY_SET_IN_SYNC,
+
+ /* This epoch entry closes an epoch using a barrier.
+ * On sucessful completion, the epoch is released,
+ * and the P_BARRIER_ACK send. */
__EE_IS_BARRIER,
+
+ /* In case a barrier failed,
+ * we need to resubmit without the barrier flag. */
+ __EE_RESUBMITTED,
+
+ /* we may have several bios per epoch entry.
+ * if any of those fail, we set this flag atomically
+ * from the endio callback */
+ __EE_WAS_ERROR,
};
#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
-#define EE_CONFLICT_PENDING (1<<__EE_CONFLICT_PENDING)
#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
#define EE_IS_BARRIER (1<<__EE_IS_BARRIER)
+#define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
+#define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
/* global flag bits */
enum {
@@ -908,9 +942,12 @@ struct drbd_conf {
unsigned int ko_count;
struct drbd_work resync_work,
unplug_work,
- md_sync_work;
+ md_sync_work,
+ delay_probe_work,
+ uuid_work;
struct timer_list resync_timer;
struct timer_list md_sync_timer;
+ struct timer_list delay_probe_timer;
/* Used after attach while negotiating new disk state. */
union drbd_state new_state_tmp;
@@ -1026,6 +1063,13 @@ struct drbd_conf {
u64 ed_uuid; /* UUID of the exposed data */
struct mutex state_mutex;
char congestion_reason; /* Why we where congested... */
+ struct list_head delay_probes; /* protected by peer_seq_lock */
+ int data_delay; /* Delay of packets on the data-sock behind meta-sock */
+ unsigned int delay_seq; /* To generate sequence numbers of delay probes */
+ struct timeval dps_time; /* delay-probes-start-time */
+ unsigned int dp_volume_last; /* send_cnt of last delay probe */
+ int c_sync_rate; /* current resync rate after delay_probe magic */
+ atomic_t new_c_uuid;
};
static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
@@ -1081,6 +1125,11 @@ enum chg_state_flags {
CS_ORDERED = CS_WAIT_COMPLETE + CS_SERIALIZE,
};
+enum dds_flags {
+ DDSF_FORCED = 1,
+ DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */
+};
+
extern void drbd_init_set_defaults(struct drbd_conf *mdev);
extern int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
union drbd_state mask, union drbd_state val);
@@ -1113,7 +1162,7 @@ extern int drbd_send_protocol(struct drbd_conf *mdev);
extern int drbd_send_uuids(struct drbd_conf *mdev);
extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev);
extern int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val);
-extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply);
+extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags);
extern int _drbd_send_state(struct drbd_conf *mdev);
extern int drbd_send_state(struct drbd_conf *mdev);
extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
@@ -1311,7 +1360,7 @@ struct bm_extent {
#define APP_R_HSIZE 15
extern int drbd_bm_init(struct drbd_conf *mdev);
-extern int drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors);
+extern int drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors, int set_new_bits);
extern void drbd_bm_cleanup(struct drbd_conf *mdev);
extern void drbd_bm_set_all(struct drbd_conf *mdev);
extern void drbd_bm_clear_all(struct drbd_conf *mdev);
@@ -1383,7 +1432,7 @@ extern void drbd_resume_io(struct drbd_conf *mdev);
extern char *ppsize(char *buf, unsigned long long size);
extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int);
enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 };
-extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, int force) __must_hold(local);
+extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local);
extern void resync_after_online_grow(struct drbd_conf *);
extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local);
extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role,
@@ -1414,7 +1463,8 @@ static inline void ov_oos_print(struct drbd_conf *mdev)
}
-extern void drbd_csum(struct drbd_conf *, struct crypto_hash *, struct bio *, void *);
+extern void drbd_csum_bio(struct drbd_conf *, struct crypto_hash *, struct bio *, void *);
+extern void drbd_csum_ee(struct drbd_conf *, struct crypto_hash *, struct drbd_epoch_entry *, void *);
/* worker callbacks */
extern int w_req_cancel_conflict(struct drbd_conf *, struct drbd_work *, int);
extern int w_read_retry_remote(struct drbd_conf *, struct drbd_work *, int);
@@ -1438,6 +1488,8 @@ extern int w_e_reissue(struct drbd_conf *, struct drbd_work *, int);
extern void resync_timer_fn(unsigned long data);
/* drbd_receiver.c */
+extern int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
+ const unsigned rw, const int fault_type);
extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list);
extern struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
u64 id,
@@ -1593,6 +1645,41 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
* inline helper functions
*************************/
+/* see also page_chain_add and friends in drbd_receiver.c */
+static inline struct page *page_chain_next(struct page *page)
+{
+ return (struct page *)page_private(page);
+}
+#define page_chain_for_each(page) \
+ for (; page && ({ prefetch(page_chain_next(page)); 1; }); \
+ page = page_chain_next(page))
+#define page_chain_for_each_safe(page, n) \
+ for (; page && ({ n = page_chain_next(page); 1; }); page = n)
+
+static inline int drbd_bio_has_active_page(struct bio *bio)
+{
+ struct bio_vec *bvec;
+ int i;
+
+ __bio_for_each_segment(bvec, bio, i, 0) {
+ if (page_count(bvec->bv_page) > 1)
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e)
+{
+ struct page *page = e->pages;
+ page_chain_for_each(page) {
+ if (page_count(page) > 1)
+ return 1;
+ }
+ return 0;
+}
+
+
static inline void drbd_state_lock(struct drbd_conf *mdev)
{
wait_event(mdev->misc_wait,
@@ -2132,13 +2219,15 @@ static inline int __inc_ap_bio_cond(struct drbd_conf *mdev)
return 0;
if (test_bit(BITMAP_IO, &mdev->flags))
return 0;
+ if (atomic_read(&mdev->new_c_uuid))
+ return 0;
return 1;
}
/* I'd like to use wait_event_lock_irq,
* but I'm not sure when it got introduced,
* and not sure when it has 3 or 4 arguments */
-static inline void inc_ap_bio(struct drbd_conf *mdev, int one_or_two)
+static inline void inc_ap_bio(struct drbd_conf *mdev, int count)
{
/* compare with after_state_ch,
* os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S */
@@ -2152,6 +2241,9 @@ static inline void inc_ap_bio(struct drbd_conf *mdev, int one_or_two)
* to avoid races with the reconnect code,
* we need to atomic_inc within the spinlock. */
+ if (atomic_read(&mdev->new_c_uuid) && atomic_add_unless(&mdev->new_c_uuid, -1, 1))
+ drbd_queue_work_front(&mdev->data.work, &mdev->uuid_work);
+
spin_lock_irq(&mdev->req_lock);
while (!__inc_ap_bio_cond(mdev)) {
prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
@@ -2160,7 +2252,7 @@ static inline void inc_ap_bio(struct drbd_conf *mdev, int one_or_two)
finish_wait(&mdev->misc_wait, &wait);
spin_lock_irq(&mdev->req_lock);
}
- atomic_add(one_or_two, &mdev->ap_bio_cnt);
+ atomic_add(count, &mdev->ap_bio_cnt);
spin_unlock_irq(&mdev->req_lock);
}
@@ -2251,7 +2343,8 @@ static inline void drbd_md_flush(struct drbd_conf *mdev)
if (test_bit(MD_NO_BARRIER, &mdev->flags))
return;
- r = blkdev_issue_flush(mdev->ldev->md_bdev, NULL);
+ r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL,
+ BLKDEV_IFL_WAIT);
if (r) {
set_bit(MD_NO_BARRIER, &mdev->flags);
dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 93d1f9b469d..be2d2da9cdb 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -684,6 +684,9 @@ static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
rv = SS_NO_REMOTE_DISK;
+ else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
+ rv = SS_NO_UP_TO_DATE_DISK;
+
else if ((ns.conn == C_CONNECTED ||
ns.conn == C_WF_BITMAP_S ||
ns.conn == C_SYNC_SOURCE ||
@@ -840,7 +843,12 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
break;
case C_WF_BITMAP_S:
case C_PAUSED_SYNC_S:
- ns.pdsk = D_OUTDATED;
+ /* remap any consistent state to D_OUTDATED,
+ * but disallow "upgrade" of not even consistent states.
+ */
+ ns.pdsk =
+ (D_DISKLESS < os.pdsk && os.pdsk < D_OUTDATED)
+ ? os.pdsk : D_OUTDATED;
break;
case C_SYNC_SOURCE:
ns.pdsk = D_INCONSISTENT;
@@ -1205,21 +1213,20 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
&& (ns.pdsk < D_INCONSISTENT ||
ns.pdsk == D_UNKNOWN ||
ns.pdsk == D_OUTDATED)) {
- kfree(mdev->p_uuid);
- mdev->p_uuid = NULL;
if (get_ldev(mdev)) {
if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
- mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
- drbd_uuid_new_current(mdev);
- drbd_send_uuids(mdev);
- }
+ mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE &&
+ !atomic_read(&mdev->new_c_uuid))
+ atomic_set(&mdev->new_c_uuid, 2);
put_ldev(mdev);
}
}
if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
- if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0)
- drbd_uuid_new_current(mdev);
+ /* Diskless peer becomes primary or got connected do diskless, primary peer. */
+ if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0 &&
+ !atomic_read(&mdev->new_c_uuid))
+ atomic_set(&mdev->new_c_uuid, 2);
/* D_DISKLESS Peer becomes secondary */
if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
@@ -1232,7 +1239,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
kfree(mdev->p_uuid); /* We expect to receive up-to-date UUIDs soon. */
mdev->p_uuid = NULL; /* ...to not use the old ones in the mean time */
- drbd_send_sizes(mdev, 0); /* to start sync... */
+ drbd_send_sizes(mdev, 0, 0); /* to start sync... */
drbd_send_uuids(mdev);
drbd_send_state(mdev);
}
@@ -1343,6 +1350,24 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
drbd_md_sync(mdev);
}
+static int w_new_current_uuid(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+{
+ if (get_ldev(mdev)) {
+ if (mdev->ldev->md.uuid[UI_BITMAP] == 0) {
+ drbd_uuid_new_current(mdev);
+ if (get_net_conf(mdev)) {
+ drbd_send_uuids(mdev);
+ put_net_conf(mdev);
+ }
+ drbd_md_sync(mdev);
+ }
+ put_ldev(mdev);
+ }
+ atomic_dec(&mdev->new_c_uuid);
+ wake_up(&mdev->misc_wait);
+
+ return 1;
+}
static int drbd_thread_setup(void *arg)
{
@@ -1755,7 +1780,7 @@ int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val)
(struct p_header *)&p, sizeof(p));
}
-int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply)
+int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
{
struct p_sizes p;
sector_t d_size, u_size;
@@ -1767,7 +1792,6 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply)
d_size = drbd_get_max_capacity(mdev->ldev);
u_size = mdev->ldev->dc.disk_size;
q_order_type = drbd_queue_order_type(mdev);
- p.queue_order_type = cpu_to_be32(drbd_queue_order_type(mdev));
put_ldev(mdev);
} else {
d_size = 0;
@@ -1779,7 +1803,8 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply)
p.u_size = cpu_to_be64(u_size);
p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
p.max_segment_size = cpu_to_be32(queue_max_segment_size(mdev->rq_queue));
- p.queue_order_type = cpu_to_be32(q_order_type);
+ p.queue_order_type = cpu_to_be16(q_order_type);
+ p.dds_flags = cpu_to_be16(flags);
ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
(struct p_header *)&p, sizeof(p));
@@ -2180,6 +2205,43 @@ int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
return ok;
}
+static int drbd_send_delay_probe(struct drbd_conf *mdev, struct drbd_socket *ds)
+{
+ struct p_delay_probe dp;
+ int offset, ok = 0;
+ struct timeval now;
+
+ mutex_lock(&ds->mutex);
+ if (likely(ds->socket)) {
+ do_gettimeofday(&now);
+ offset = now.tv_usec - mdev->dps_time.tv_usec +
+ (now.tv_sec - mdev->dps_time.tv_sec) * 1000000;
+ dp.seq_num = cpu_to_be32(mdev->delay_seq);
+ dp.offset = cpu_to_be32(offset);
+
+ ok = _drbd_send_cmd(mdev, ds->socket, P_DELAY_PROBE,
+ (struct p_header *)&dp, sizeof(dp), 0);
+ }
+ mutex_unlock(&ds->mutex);
+
+ return ok;
+}
+
+static int drbd_send_delay_probes(struct drbd_conf *mdev)
+{
+ int ok;
+
+ mdev->delay_seq++;
+ do_gettimeofday(&mdev->dps_time);
+ ok = drbd_send_delay_probe(mdev, &mdev->meta);
+ ok = ok && drbd_send_delay_probe(mdev, &mdev->data);
+
+ mdev->dp_volume_last = mdev->send_cnt;
+ mod_timer(&mdev->delay_probe_timer, jiffies + mdev->sync_conf.dp_interval * HZ / 10);
+
+ return ok;
+}
+
/* called on sndtimeo
* returns FALSE if we should retry,
* TRUE if we think connection is dead
@@ -2309,6 +2371,44 @@ static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
return 1;
}
+static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
+{
+ struct page *page = e->pages;
+ unsigned len = e->size;
+ page_chain_for_each(page) {
+ unsigned l = min_t(unsigned, len, PAGE_SIZE);
+ if (!_drbd_send_page(mdev, page, 0, l))
+ return 0;
+ len -= l;
+ }
+ return 1;
+}
+
+static void consider_delay_probes(struct drbd_conf *mdev)
+{
+ if (mdev->state.conn != C_SYNC_SOURCE || mdev->agreed_pro_version < 93)
+ return;
+
+ if (mdev->dp_volume_last + mdev->sync_conf.dp_volume * 2 < mdev->send_cnt)
+ drbd_send_delay_probes(mdev);
+}
+
+static int w_delay_probes(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+{
+ if (!cancel && mdev->state.conn == C_SYNC_SOURCE)
+ drbd_send_delay_probes(mdev);
+
+ return 1;
+}
+
+static void delay_probe_timer_fn(unsigned long data)
+{
+ struct drbd_conf *mdev = (struct drbd_conf *) data;
+
+ if (list_empty(&mdev->delay_probe_work.list))
+ drbd_queue_work(&mdev->data.work, &mdev->delay_probe_work);
+}
+
/* Used to send write requests
* R_PRIMARY -> Peer (P_DATA)
*/
@@ -2360,7 +2460,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
drbd_send(mdev, mdev->data.socket, &p, sizeof(p), MSG_MORE));
if (ok && dgs) {
dgb = mdev->int_dig_out;
- drbd_csum(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
+ drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, MSG_MORE);
}
if (ok) {
@@ -2371,6 +2471,10 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
}
drbd_put_data_sock(mdev);
+
+ if (ok)
+ consider_delay_probes(mdev);
+
return ok;
}
@@ -2409,13 +2513,17 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
sizeof(p), MSG_MORE);
if (ok && dgs) {
dgb = mdev->int_dig_out;
- drbd_csum(mdev, mdev->integrity_w_tfm, e->private_bio, dgb);
+ drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, MSG_MORE);
}
if (ok)
- ok = _drbd_send_zc_bio(mdev, e->private_bio);
+ ok = _drbd_send_zc_ee(mdev, e);
drbd_put_data_sock(mdev);
+
+ if (ok)
+ consider_delay_probes(mdev);
+
return ok;
}
@@ -2600,6 +2708,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
atomic_set(&mdev->net_cnt, 0);
atomic_set(&mdev->packet_seq, 0);
atomic_set(&mdev->pp_in_use, 0);
+ atomic_set(&mdev->new_c_uuid, 0);
mutex_init(&mdev->md_io_mutex);
mutex_init(&mdev->data.mutex);
@@ -2628,16 +2737,26 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
INIT_LIST_HEAD(&mdev->unplug_work.list);
INIT_LIST_HEAD(&mdev->md_sync_work.list);
INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
+ INIT_LIST_HEAD(&mdev->delay_probes);
+ INIT_LIST_HEAD(&mdev->delay_probe_work.list);
+ INIT_LIST_HEAD(&mdev->uuid_work.list);
+
mdev->resync_work.cb = w_resync_inactive;
mdev->unplug_work.cb = w_send_write_hint;
mdev->md_sync_work.cb = w_md_sync;
mdev->bm_io_work.w.cb = w_bitmap_io;
+ mdev->delay_probe_work.cb = w_delay_probes;
+ mdev->uuid_work.cb = w_new_current_uuid;
init_timer(&mdev->resync_timer);
init_timer(&mdev->md_sync_timer);
+ init_timer(&mdev->delay_probe_timer);
mdev->resync_timer.function = resync_timer_fn;
mdev->resync_timer.data = (unsigned long) mdev;
mdev->md_sync_timer.function = md_sync_timer_fn;
mdev->md_sync_timer.data = (unsigned long) mdev;
+ mdev->delay_probe_timer.function = delay_probe_timer_fn;
+ mdev->delay_probe_timer.data = (unsigned long) mdev;
+
init_waitqueue_head(&mdev->misc_wait);
init_waitqueue_head(&mdev->state_wait);
@@ -2680,7 +2799,7 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
drbd_set_my_capacity(mdev, 0);
if (mdev->bitmap) {
/* maybe never allocated. */
- drbd_bm_resize(mdev, 0);
+ drbd_bm_resize(mdev, 0, 1);
drbd_bm_cleanup(mdev);
}
@@ -3129,7 +3248,7 @@ int __init drbd_init(void)
if (err)
goto Enomem;
- drbd_proc = proc_create("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops);
+ drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
if (!drbd_proc) {
printk(KERN_ERR "drbd: unable to register proc file\n");
goto Enomem;
@@ -3660,7 +3779,8 @@ _drbd_fault_str(unsigned int type) {
[DRBD_FAULT_DT_RD] = "Data read",
[DRBD_FAULT_DT_RA] = "Data read ahead",
[DRBD_FAULT_BM_ALLOC] = "BM allocation",
- [DRBD_FAULT_AL_EE] = "EE allocation"
+ [DRBD_FAULT_AL_EE] = "EE allocation",
+ [DRBD_FAULT_RECEIVE] = "receive data corruption",
};
return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 6429d2b19e0..632e3245d1b 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -510,7 +510,7 @@ void drbd_resume_io(struct drbd_conf *mdev)
* Returns 0 on success, negative return values indicate errors.
* You should call drbd_md_sync() after calling this function.
*/
-enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, int force) __must_hold(local)
+enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
{
sector_t prev_first_sect, prev_size; /* previous meta location */
sector_t la_size;
@@ -541,12 +541,12 @@ enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, int force
/* TODO: should only be some assert here, not (re)init... */
drbd_md_set_sector_offsets(mdev, mdev->ldev);
- size = drbd_new_dev_size(mdev, mdev->ldev, force);
+ size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED);
if (drbd_get_capacity(mdev->this_bdev) != size ||
drbd_bm_capacity(mdev) != size) {
int err;
- err = drbd_bm_resize(mdev, size);
+ err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
if (unlikely(err)) {
/* currently there is only one error: ENOMEM! */
size = drbd_bm_capacity(mdev)>>1;
@@ -704,9 +704,6 @@ void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __mu
struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
int max_segments = mdev->ldev->dc.max_bio_bvecs;
- if (b->merge_bvec_fn && !mdev->ldev->dc.use_bmbv)
- max_seg_s = PAGE_SIZE;
-
max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s);
blk_queue_max_hw_sectors(q, max_seg_s >> 9);
@@ -1199,13 +1196,12 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
}
/* allocation not in the IO path, cqueue thread context */
- new_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
+ new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
if (!new_conf) {
retcode = ERR_NOMEM;
goto fail;
}
- memset(new_conf, 0, sizeof(struct net_conf));
new_conf->timeout = DRBD_TIMEOUT_DEF;
new_conf->try_connect_int = DRBD_CONNECT_INT_DEF;
new_conf->ping_int = DRBD_PING_INT_DEF;
@@ -1477,8 +1473,8 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
{
struct resize rs;
int retcode = NO_ERROR;
- int ldsc = 0; /* local disk size changed */
enum determine_dev_size dd;
+ enum dds_flags ddsf;
memset(&rs, 0, sizeof(struct resize));
if (!resize_from_tags(mdev, nlp->tag_list, &rs)) {
@@ -1502,13 +1498,17 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
goto fail;
}
- if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
- mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
- ldsc = 1;
+ if (rs.no_resync && mdev->agreed_pro_version < 93) {
+ retcode = ERR_NEED_APV_93;
+ goto fail;
}
+ if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
+ mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
+
mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
- dd = drbd_determin_dev_size(mdev, rs.resize_force);
+ ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
+ dd = drbd_determin_dev_size(mdev, ddsf);
drbd_md_sync(mdev);
put_ldev(mdev);
if (dd == dev_size_error) {
@@ -1516,12 +1516,12 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
goto fail;
}
- if (mdev->state.conn == C_CONNECTED && (dd != unchanged || ldsc)) {
+ if (mdev->state.conn == C_CONNECTED) {
if (dd == grew)
set_bit(RESIZE_PENDING, &mdev->flags);
drbd_send_uuids(mdev);
- drbd_send_sizes(mdev, 1);
+ drbd_send_sizes(mdev, 1, ddsf);
}
fail:
@@ -1551,6 +1551,10 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
sc.rate = DRBD_RATE_DEF;
sc.after = DRBD_AFTER_DEF;
sc.al_extents = DRBD_AL_EXTENTS_DEF;
+ sc.dp_volume = DRBD_DP_VOLUME_DEF;
+ sc.dp_interval = DRBD_DP_INTERVAL_DEF;
+ sc.throttle_th = DRBD_RS_THROTTLE_TH_DEF;
+ sc.hold_off_th = DRBD_RS_HOLD_OFF_TH_DEF;
} else
memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
@@ -2207,9 +2211,9 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
{
struct cn_msg *cn_reply;
struct drbd_nl_cfg_reply *reply;
- struct bio_vec *bvec;
unsigned short *tl;
- int i;
+ struct page *page;
+ unsigned len;
if (!e)
return;
@@ -2247,11 +2251,15 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
put_unaligned(T_ee_data, tl++);
put_unaligned(e->size, tl++);
- __bio_for_each_segment(bvec, e->private_bio, i, 0) {
- void *d = kmap(bvec->bv_page);
- memcpy(tl, d + bvec->bv_offset, bvec->bv_len);
- kunmap(bvec->bv_page);
- tl=(unsigned short*)((char*)tl + bvec->bv_len);
+ len = e->size;
+ page = e->pages;
+ page_chain_for_each(page) {
+ void *d = kmap_atomic(page, KM_USER0);
+ unsigned l = min_t(unsigned, len, PAGE_SIZE);
+ memcpy(tl, d, l);
+ kunmap_atomic(d, KM_USER0);
+ tl = (unsigned short*)((char*)tl + l);
+ len -= l;
}
put_unaligned(TT_END, tl++); /* Close the tag list */
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index be3374b6846..d0f1767ea4c 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -73,14 +73,21 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
seq_printf(seq, "sync'ed:%3u.%u%% ", res / 10, res % 10);
/* if more than 1 GB display in MB */
if (mdev->rs_total > 0x100000L)
- seq_printf(seq, "(%lu/%lu)M\n\t",
+ seq_printf(seq, "(%lu/%lu)M",
(unsigned long) Bit2KB(rs_left >> 10),
(unsigned long) Bit2KB(mdev->rs_total >> 10));
else
- seq_printf(seq, "(%lu/%lu)K\n\t",
+ seq_printf(seq, "(%lu/%lu)K",
(unsigned long) Bit2KB(rs_left),
(unsigned long) Bit2KB(mdev->rs_total));
+ if (mdev->state.conn == C_SYNC_TARGET)
+ seq_printf(seq, " queue_delay: %d.%d ms\n\t",
+ mdev->data_delay / 1000,
+ (mdev->data_delay % 1000) / 100);
+ else if (mdev->state.conn == C_SYNC_SOURCE)
+ seq_printf(seq, " delay_probe: %u\n\t", mdev->delay_seq);
+
/* see drivers/md/md.c
* We do not want to overflow, so the order of operands and
* the * 100 / 100 trick are important. We do a +1 to be
@@ -128,6 +135,14 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
else
seq_printf(seq, " (%ld)", dbdt);
+ if (mdev->state.conn == C_SYNC_TARGET) {
+ if (mdev->c_sync_rate > 1000)
+ seq_printf(seq, " want: %d,%03d",
+ mdev->c_sync_rate / 1000, mdev->c_sync_rate % 1000);
+ else
+ seq_printf(seq, " want: %d", mdev->c_sync_rate);
+ }
+
seq_printf(seq, " K/sec\n");
}
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 3f096e7959b..bc9ab7fb2cc 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -80,30 +80,128 @@ static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epo
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
-static struct page *drbd_pp_first_page_or_try_alloc(struct drbd_conf *mdev)
+/*
+ * some helper functions to deal with single linked page lists,
+ * page->private being our "next" pointer.
+ */
+
+/* If at least n pages are linked at head, get n pages off.
+ * Otherwise, don't modify head, and return NULL.
+ * Locking is the responsibility of the caller.
+ */
+static struct page *page_chain_del(struct page **head, int n)
+{
+ struct page *page;
+ struct page *tmp;
+
+ BUG_ON(!n);
+ BUG_ON(!head);
+
+ page = *head;
+
+ if (!page)
+ return NULL;
+
+ while (page) {
+ tmp = page_chain_next(page);
+ if (--n == 0)
+ break; /* found sufficient pages */
+ if (tmp == NULL)
+ /* insufficient pages, don't use any of them. */
+ return NULL;
+ page = tmp;
+ }
+
+ /* add end of list marker for the returned list */
+ set_page_private(page, 0);
+ /* actual return value, and adjustment of head */
+ page = *head;
+ *head = tmp;
+ return page;
+}
+
+/* may be used outside of locks to find the tail of a (usually short)
+ * "private" page chain, before adding it back to a global chain head
+ * with page_chain_add() under a spinlock. */
+static struct page *page_chain_tail(struct page *page, int *len)
+{
+ struct page *tmp;
+ int i = 1;
+ while ((tmp = page_chain_next(page)))
+ ++i, page = tmp;
+ if (len)
+ *len = i;
+ return page;
+}
+
+static int page_chain_free(struct page *page)
+{
+ struct page *tmp;
+ int i = 0;
+ page_chain_for_each_safe(page, tmp) {
+ put_page(page);
+ ++i;
+ }
+ return i;
+}
+
+static void page_chain_add(struct page **head,
+ struct page *chain_first, struct page *chain_last)
+{
+#if 1
+ struct page *tmp;
+ tmp = page_chain_tail(chain_first, NULL);
+ BUG_ON(tmp != chain_last);
+#endif
+
+ /* add chain to head */
+ set_page_private(chain_last, (unsigned long)*head);
+ *head = chain_first;
+}
+
+static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
{
struct page *page = NULL;
+ struct page *tmp = NULL;
+ int i = 0;
/* Yes, testing drbd_pp_vacant outside the lock is racy.
* So what. It saves a spin_lock. */
- if (drbd_pp_vacant > 0) {
+ if (drbd_pp_vacant >= number) {
spin_lock(&drbd_pp_lock);
- page = drbd_pp_pool;
- if (page) {
- drbd_pp_pool = (struct page *)page_private(page);
- set_page_private(page, 0); /* just to be polite */
- drbd_pp_vacant--;
- }
+ page = page_chain_del(&drbd_pp_pool, number);
+ if (page)
+ drbd_pp_vacant -= number;
spin_unlock(&drbd_pp_lock);
+ if (page)
+ return page;
}
+
/* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
- if (!page)
- page = alloc_page(GFP_TRY);
- if (page)
- atomic_inc(&mdev->pp_in_use);
- return page;
+ for (i = 0; i < number; i++) {
+ tmp = alloc_page(GFP_TRY);
+ if (!tmp)
+ break;
+ set_page_private(tmp, (unsigned long)page);
+ page = tmp;
+ }
+
+ if (i == number)
+ return page;
+
+ /* Not enough pages immediately available this time.
+ * No need to jump around here, drbd_pp_alloc will retry this
+ * function "soon". */
+ if (page) {
+ tmp = page_chain_tail(page, NULL);
+ spin_lock(&drbd_pp_lock);
+ page_chain_add(&drbd_pp_pool, page, tmp);
+ drbd_pp_vacant += i;
+ spin_unlock(&drbd_pp_lock);
+ }
+ return NULL;
}
/* kick lower level device, if we have more than (arbitrary number)
@@ -127,7 +225,7 @@ static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed
list_for_each_safe(le, tle, &mdev->net_ee) {
e = list_entry(le, struct drbd_epoch_entry, w.list);
- if (drbd_bio_has_active_page(e->private_bio))
+ if (drbd_ee_has_active_page(e))
break;
list_move(le, to_be_freed);
}
@@ -148,32 +246,34 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
}
/**
- * drbd_pp_alloc() - Returns a page, fails only if a signal comes in
+ * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
* @mdev: DRBD device.
- * @retry: whether or not to retry allocation forever (or until signalled)
+ * @number: number of pages requested
+ * @retry: whether to retry, if not enough pages are available right now
+ *
+ * Tries to allocate number pages, first from our own page pool, then from
+ * the kernel, unless this allocation would exceed the max_buffers setting.
+ * Possibly retry until DRBD frees sufficient pages somewhere else.
*
- * Tries to allocate a page, first from our own page pool, then from the
- * kernel, unless this allocation would exceed the max_buffers setting.
- * If @retry is non-zero, retry until DRBD frees a page somewhere else.
+ * Returns a page chain linked via page->private.
*/
-static struct page *drbd_pp_alloc(struct drbd_conf *mdev, int retry)
+static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
{
struct page *page = NULL;
DEFINE_WAIT(wait);
- if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
- page = drbd_pp_first_page_or_try_alloc(mdev);
- if (page)
- return page;
- }
+ /* Yes, we may run up to @number over max_buffers. If we
+ * follow it strictly, the admin will get it wrong anyways. */
+ if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
+ page = drbd_pp_first_pages_or_try_alloc(mdev, number);
- for (;;) {
+ while (page == NULL) {
prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
drbd_kick_lo_and_reclaim_net(mdev);
if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
- page = drbd_pp_first_page_or_try_alloc(mdev);
+ page = drbd_pp_first_pages_or_try_alloc(mdev, number);
if (page)
break;
}
@@ -190,62 +290,32 @@ static struct page *drbd_pp_alloc(struct drbd_conf *mdev, int retry)
}
finish_wait(&drbd_pp_wait, &wait);
+ if (page)
+ atomic_add(number, &mdev->pp_in_use);
return page;
}
/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
- * Is also used from inside an other spin_lock_irq(&mdev->req_lock) */
+ * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
+ * Either links the page chain back to the global pool,
+ * or returns all pages to the system. */
static void drbd_pp_free(struct drbd_conf *mdev, struct page *page)
{
- int free_it;
-
- spin_lock(&drbd_pp_lock);
- if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) {
- free_it = 1;
- } else {
- set_page_private(page, (unsigned long)drbd_pp_pool);
- drbd_pp_pool = page;
- drbd_pp_vacant++;
- free_it = 0;
- }
- spin_unlock(&drbd_pp_lock);
-
- atomic_dec(&mdev->pp_in_use);
-
- if (free_it)
- __free_page(page);
-
- wake_up(&drbd_pp_wait);
-}
-
-static void drbd_pp_free_bio_pages(struct drbd_conf *mdev, struct bio *bio)
-{
- struct page *p_to_be_freed = NULL;
- struct page *page;
- struct bio_vec *bvec;
int i;
-
- spin_lock(&drbd_pp_lock);
- __bio_for_each_segment(bvec, bio, i, 0) {
- if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) {
- set_page_private(bvec->bv_page, (unsigned long)p_to_be_freed);
- p_to_be_freed = bvec->bv_page;
- } else {
- set_page_private(bvec->bv_page, (unsigned long)drbd_pp_pool);
- drbd_pp_pool = bvec->bv_page;
- drbd_pp_vacant++;
- }
- }
- spin_unlock(&drbd_pp_lock);
- atomic_sub(bio->bi_vcnt, &mdev->pp_in_use);
-
- while (p_to_be_freed) {
- page = p_to_be_freed;
- p_to_be_freed = (struct page *)page_private(page);
- set_page_private(page, 0); /* just to be polite */
- put_page(page);
+ if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count)
+ i = page_chain_free(page);
+ else {
+ struct page *tmp;
+ tmp = page_chain_tail(page, &i);
+ spin_lock(&drbd_pp_lock);
+ page_chain_add(&drbd_pp_pool, page, tmp);
+ drbd_pp_vacant += i;
+ spin_unlock(&drbd_pp_lock);
}
-
+ atomic_sub(i, &mdev->pp_in_use);
+ i = atomic_read(&mdev->pp_in_use);
+ if (i < 0)
+ dev_warn(DEV, "ASSERTION FAILED: pp_in_use: %d < 0\n", i);
wake_up(&drbd_pp_wait);
}
@@ -270,11 +340,9 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
unsigned int data_size,
gfp_t gfp_mask) __must_hold(local)
{
- struct request_queue *q;
struct drbd_epoch_entry *e;
struct page *page;
- struct bio *bio;
- unsigned int ds;
+ unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE))
return NULL;
@@ -286,84 +354,32 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
return NULL;
}
- bio = bio_alloc(gfp_mask & ~__GFP_HIGHMEM, div_ceil(data_size, PAGE_SIZE));
- if (!bio) {
- if (!(gfp_mask & __GFP_NOWARN))
- dev_err(DEV, "alloc_ee: Allocation of a bio failed\n");
- goto fail1;
- }
-
- bio->bi_bdev = mdev->ldev->backing_bdev;
- bio->bi_sector = sector;
-
- ds = data_size;
- while (ds) {
- page = drbd_pp_alloc(mdev, (gfp_mask & __GFP_WAIT));
- if (!page) {
- if (!(gfp_mask & __GFP_NOWARN))
- dev_err(DEV, "alloc_ee: Allocation of a page failed\n");
- goto fail2;
- }
- if (!bio_add_page(bio, page, min_t(int, ds, PAGE_SIZE), 0)) {
- drbd_pp_free(mdev, page);
- dev_err(DEV, "alloc_ee: bio_add_page(s=%llu,"
- "data_size=%u,ds=%u) failed\n",
- (unsigned long long)sector, data_size, ds);
-
- q = bdev_get_queue(bio->bi_bdev);
- if (q->merge_bvec_fn) {
- struct bvec_merge_data bvm = {
- .bi_bdev = bio->bi_bdev,
- .bi_sector = bio->bi_sector,
- .bi_size = bio->bi_size,
- .bi_rw = bio->bi_rw,
- };
- int l = q->merge_bvec_fn(q, &bvm,
- &bio->bi_io_vec[bio->bi_vcnt]);
- dev_err(DEV, "merge_bvec_fn() = %d\n", l);
- }
-
- /* dump more of the bio. */
- dev_err(DEV, "bio->bi_max_vecs = %d\n", bio->bi_max_vecs);
- dev_err(DEV, "bio->bi_vcnt = %d\n", bio->bi_vcnt);
- dev_err(DEV, "bio->bi_size = %d\n", bio->bi_size);
- dev_err(DEV, "bio->bi_phys_segments = %d\n", bio->bi_phys_segments);
-
- goto fail2;
- break;
- }
- ds -= min_t(int, ds, PAGE_SIZE);
- }
-
- D_ASSERT(data_size == bio->bi_size);
-
- bio->bi_private = e;
- e->mdev = mdev;
- e->sector = sector;
- e->size = bio->bi_size;
+ page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
+ if (!page)
+ goto fail;
- e->private_bio = bio;
- e->block_id = id;
INIT_HLIST_NODE(&e->colision);
e->epoch = NULL;
+ e->mdev = mdev;
+ e->pages = page;
+ atomic_set(&e->pending_bios, 0);
+ e->size = data_size;
e->flags = 0;
+ e->sector = sector;
+ e->sector = sector;
+ e->block_id = id;
return e;
- fail2:
- drbd_pp_free_bio_pages(mdev, bio);
- bio_put(bio);
- fail1:
+ fail:
mempool_free(e, drbd_ee_mempool);
-
return NULL;
}
void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
{
- struct bio *bio = e->private_bio;
- drbd_pp_free_bio_pages(mdev, bio);
- bio_put(bio);
+ drbd_pp_free(mdev, e->pages);
+ D_ASSERT(atomic_read(&e->pending_bios) == 0);
D_ASSERT(hlist_unhashed(&e->colision));
mempool_free(e, drbd_ee_mempool);
}
@@ -902,7 +918,7 @@ retry:
if (!drbd_send_protocol(mdev))
return -1;
drbd_send_sync_param(mdev, &mdev->sync_conf);
- drbd_send_sizes(mdev, 0);
+ drbd_send_sizes(mdev, 0, 0);
drbd_send_uuids(mdev);
drbd_send_state(mdev);
clear_bit(USE_DEGR_WFC_T, &mdev->flags);
@@ -946,7 +962,8 @@ static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct d
int rv;
if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
- rv = blkdev_issue_flush(mdev->ldev->backing_bdev, NULL);
+ rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
+ NULL, BLKDEV_IFL_WAIT);
if (rv) {
dev_err(DEV, "local disk flush failed with status %d\n", rv);
/* would rather check on EOPNOTSUPP, but that is not reliable.
@@ -1120,6 +1137,101 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo)
}
/**
+ * drbd_submit_ee()
+ * @mdev: DRBD device.
+ * @e: epoch entry
+ * @rw: flag field, see bio->bi_rw
+ */
+/* TODO allocate from our own bio_set. */
+int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
+ const unsigned rw, const int fault_type)
+{
+ struct bio *bios = NULL;
+ struct bio *bio;
+ struct page *page = e->pages;
+ sector_t sector = e->sector;
+ unsigned ds = e->size;
+ unsigned n_bios = 0;
+ unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
+
+ if (atomic_read(&mdev->new_c_uuid)) {
+ if (atomic_add_unless(&mdev->new_c_uuid, -1, 1)) {
+ drbd_uuid_new_current(mdev);
+ drbd_md_sync(mdev);
+
+ atomic_dec(&mdev->new_c_uuid);
+ wake_up(&mdev->misc_wait);
+ }
+ wait_event(mdev->misc_wait, !atomic_read(&mdev->new_c_uuid));
+ }
+
+ /* In most cases, we will only need one bio. But in case the lower
+ * level restrictions happen to be different at this offset on this
+ * side than those of the sending peer, we may need to submit the
+ * request in more than one bio. */
+next_bio:
+ bio = bio_alloc(GFP_NOIO, nr_pages);
+ if (!bio) {
+ dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
+ goto fail;
+ }
+ /* > e->sector, unless this is the first bio */
+ bio->bi_sector = sector;
+ bio->bi_bdev = mdev->ldev->backing_bdev;
+ /* we special case some flags in the multi-bio case, see below
+ * (BIO_RW_UNPLUG, BIO_RW_BARRIER) */
+ bio->bi_rw = rw;
+ bio->bi_private = e;
+ bio->bi_end_io = drbd_endio_sec;
+
+ bio->bi_next = bios;
+ bios = bio;
+ ++n_bios;
+
+ page_chain_for_each(page) {
+ unsigned len = min_t(unsigned, ds, PAGE_SIZE);
+ if (!bio_add_page(bio, page, len, 0)) {
+ /* a single page must always be possible! */
+ BUG_ON(bio->bi_vcnt == 0);
+ goto next_bio;
+ }
+ ds -= len;
+ sector += len >> 9;
+ --nr_pages;
+ }
+ D_ASSERT(page == NULL);
+ D_ASSERT(ds == 0);
+
+ atomic_set(&e->pending_bios, n_bios);
+ do {
+ bio = bios;
+ bios = bios->bi_next;
+ bio->bi_next = NULL;
+
+ /* strip off BIO_RW_UNPLUG unless it is the last bio */
+ if (bios)
+ bio->bi_rw &= ~(1<<BIO_RW_UNPLUG);
+
+ drbd_generic_make_request(mdev, fault_type, bio);
+
+ /* strip off BIO_RW_BARRIER,
+ * unless it is the first or last bio */
+ if (bios && bios->bi_next)
+ bios->bi_rw &= ~(1<<BIO_RW_BARRIER);
+ } while (bios);
+ maybe_kick_lo(mdev);
+ return 0;
+
+fail:
+ while (bios) {
+ bio = bios;
+ bios = bios->bi_next;
+ bio_put(bio);
+ }
+ return -ENOMEM;
+}
+
+/**
* w_e_reissue() - Worker callback; Resubmit a bio, without BIO_RW_BARRIER set
* @mdev: DRBD device.
* @w: work object.
@@ -1128,8 +1240,6 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo)
int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local)
{
struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
- struct bio *bio = e->private_bio;
-
/* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place,
(and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
so that we can finish that epoch in drbd_may_finish_epoch().
@@ -1143,33 +1253,17 @@ int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __relea
if (previous_epoch(mdev, e->epoch))
dev_warn(DEV, "Write ordering was not enforced (one time event)\n");
- /* prepare bio for re-submit,
- * re-init volatile members */
/* we still have a local reference,
* get_ldev was done in receive_Data. */
- bio->bi_bdev = mdev->ldev->backing_bdev;
- bio->bi_sector = e->sector;
- bio->bi_size = e->size;
- bio->bi_idx = 0;
-
- bio->bi_flags &= ~(BIO_POOL_MASK - 1);
- bio->bi_flags |= 1 << BIO_UPTODATE;
-
- /* don't know whether this is necessary: */
- bio->bi_phys_segments = 0;
- bio->bi_next = NULL;
-
- /* these should be unchanged: */
- /* bio->bi_end_io = drbd_endio_write_sec; */
- /* bio->bi_vcnt = whatever; */
e->w.cb = e_end_block;
-
- /* This is no longer a barrier request. */
- bio->bi_rw &= ~(1UL << BIO_RW_BARRIER);
-
- drbd_generic_make_request(mdev, DRBD_FAULT_DT_WR, bio);
-
+ if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_DT_WR) != 0) {
+ /* drbd_submit_ee fails for one reason only:
+ * if was not able to allocate sufficient bios.
+ * requeue, try again later. */
+ e->w.cb = w_e_reissue;
+ drbd_queue_work(&mdev->data.work, &e->w);
+ }
return 1;
}
@@ -1261,13 +1355,13 @@ static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h)
static struct drbd_epoch_entry *
read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
{
+ const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
struct drbd_epoch_entry *e;
- struct bio_vec *bvec;
struct page *page;
- struct bio *bio;
- int dgs, ds, i, rr;
+ int dgs, ds, rr;
void *dig_in = mdev->int_dig_in;
void *dig_vv = mdev->int_dig_vv;
+ unsigned long *data;
dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
@@ -1286,29 +1380,44 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __
ERR_IF(data_size & 0x1ff) return NULL;
ERR_IF(data_size > DRBD_MAX_SEGMENT_SIZE) return NULL;
+ /* even though we trust out peer,
+ * we sometimes have to double check. */
+ if (sector + (data_size>>9) > capacity) {
+ dev_err(DEV, "capacity: %llus < sector: %llus + size: %u\n",
+ (unsigned long long)capacity,
+ (unsigned long long)sector, data_size);
+ return NULL;
+ }
+
/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
if (!e)
return NULL;
- bio = e->private_bio;
+
ds = data_size;
- bio_for_each_segment(bvec, bio, i) {
- page = bvec->bv_page;
- rr = drbd_recv(mdev, kmap(page), min_t(int, ds, PAGE_SIZE));
+ page = e->pages;
+ page_chain_for_each(page) {
+ unsigned len = min_t(int, ds, PAGE_SIZE);
+ data = kmap(page);
+ rr = drbd_recv(mdev, data, len);
+ if (FAULT_ACTIVE(mdev, DRBD_FAULT_RECEIVE)) {
+ dev_err(DEV, "Fault injection: Corrupting data on receive\n");
+ data[0] = data[0] ^ (unsigned long)-1;
+ }
kunmap(page);
- if (rr != min_t(int, ds, PAGE_SIZE)) {
+ if (rr != len) {
drbd_free_ee(mdev, e);
dev_warn(DEV, "short read receiving data: read %d expected %d\n",
- rr, min_t(int, ds, PAGE_SIZE));
+ rr, len);
return NULL;
}
ds -= rr;
}
if (dgs) {
- drbd_csum(mdev, mdev->integrity_r_tfm, bio, dig_vv);
+ drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
if (memcmp(dig_in, dig_vv, dgs)) {
dev_err(DEV, "Digest integrity check FAILED.\n");
drbd_bcast_ee(mdev, "digest failed",
@@ -1330,7 +1439,10 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
int rr, rv = 1;
void *data;
- page = drbd_pp_alloc(mdev, 1);
+ if (!data_size)
+ return TRUE;
+
+ page = drbd_pp_alloc(mdev, 1, 1);
data = kmap(page);
while (data_size) {
@@ -1394,7 +1506,7 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
}
if (dgs) {
- drbd_csum(mdev, mdev->integrity_r_tfm, bio, dig_vv);
+ drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
if (memcmp(dig_in, dig_vv, dgs)) {
dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
return 0;
@@ -1415,7 +1527,7 @@ static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int u
D_ASSERT(hlist_unhashed(&e->colision));
- if (likely(drbd_bio_uptodate(e->private_bio))) {
+ if (likely((e->flags & EE_WAS_ERROR) == 0)) {
drbd_set_in_sync(mdev, sector, e->size);
ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
} else {
@@ -1434,30 +1546,28 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
struct drbd_epoch_entry *e;
e = read_in_block(mdev, ID_SYNCER, sector, data_size);
- if (!e) {
- put_ldev(mdev);
- return FALSE;
- }
+ if (!e)
+ goto fail;
dec_rs_pending(mdev);
- e->private_bio->bi_end_io = drbd_endio_write_sec;
- e->private_bio->bi_rw = WRITE;
- e->w.cb = e_end_resync_block;
-
inc_unacked(mdev);
/* corresponding dec_unacked() in e_end_resync_block()
* respective _drbd_clear_done_ee */
+ e->w.cb = e_end_resync_block;
+
spin_lock_irq(&mdev->req_lock);
list_add(&e->w.list, &mdev->sync_ee);
spin_unlock_irq(&mdev->req_lock);
- drbd_generic_make_request(mdev, DRBD_FAULT_RS_WR, e->private_bio);
- /* accounting done in endio */
+ if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
+ return TRUE;
- maybe_kick_lo(mdev);
- return TRUE;
+ drbd_free_ee(mdev, e);
+fail:
+ put_ldev(mdev);
+ return FALSE;
}
static int receive_DataReply(struct drbd_conf *mdev, struct p_header *h)
@@ -1552,7 +1662,7 @@ static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
}
if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
- if (likely(drbd_bio_uptodate(e->private_bio))) {
+ if (likely((e->flags & EE_WAS_ERROR) == 0)) {
pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
mdev->state.conn <= C_PAUSED_SYNC_T &&
e->flags & EE_MAY_SET_IN_SYNC) ?
@@ -1698,7 +1808,6 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
return FALSE;
}
- e->private_bio->bi_end_io = drbd_endio_write_sec;
e->w.cb = e_end_block;
spin_lock(&mdev->epoch_lock);
@@ -1894,12 +2003,8 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
drbd_al_begin_io(mdev, e->sector);
}
- e->private_bio->bi_rw = rw;
- drbd_generic_make_request(mdev, DRBD_FAULT_DT_WR, e->private_bio);
- /* accounting done in endio */
-
- maybe_kick_lo(mdev);
- return TRUE;
+ if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
+ return TRUE;
out_interrupted:
/* yes, the epoch_size now is imbalanced.
@@ -1945,7 +2050,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
"no local data.\n");
drbd_send_ack_rp(mdev, h->command == P_DATA_REQUEST ? P_NEG_DREPLY :
P_NEG_RS_DREPLY , p);
- return TRUE;
+ return drbd_drain_block(mdev, h->length - brps);
}
/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
@@ -1957,9 +2062,6 @@ static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
return FALSE;
}
- e->private_bio->bi_rw = READ;
- e->private_bio->bi_end_io = drbd_endio_read_sec;
-
switch (h->command) {
case P_DATA_REQUEST:
e->w.cb = w_e_end_data_req;
@@ -2053,10 +2155,8 @@ static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
inc_unacked(mdev);
- drbd_generic_make_request(mdev, fault_type, e->private_bio);
- maybe_kick_lo(mdev);
-
- return TRUE;
+ if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
+ return TRUE;
out_free_e:
kfree(di);
@@ -2473,6 +2573,9 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
hg > 0 ? "source" : "target");
}
+ if (abs(hg) == 100)
+ drbd_khelper(mdev, "initial-split-brain");
+
if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
int pcount = (mdev->state.role == R_PRIMARY)
+ (peer_role == R_PRIMARY);
@@ -2518,7 +2621,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
* after an attempted attach on a diskless node.
* We just refuse to attach -- well, we drop the "connection"
* to that disk, in a way... */
- dev_alert(DEV, "Split-Brain detected, dropping connection!\n");
+ dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
drbd_khelper(mdev, "split-brain");
return C_MASK;
}
@@ -2849,7 +2952,7 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
unsigned int max_seg_s;
sector_t p_size, p_usize, my_usize;
int ldsc = 0; /* local disk size changed */
- enum drbd_conns nconn;
+ enum dds_flags ddsf;
ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
if (drbd_recv(mdev, h->payload, h->length) != h->length)
@@ -2905,8 +3008,9 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
}
#undef min_not_zero
+ ddsf = be16_to_cpu(p->dds_flags);
if (get_ldev(mdev)) {
- dd = drbd_determin_dev_size(mdev, 0);
+ dd = drbd_determin_dev_size(mdev, ddsf);
put_ldev(mdev);
if (dd == dev_size_error)
return FALSE;
@@ -2916,33 +3020,21 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
drbd_set_my_capacity(mdev, p_size);
}
- if (mdev->p_uuid && mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
- nconn = drbd_sync_handshake(mdev,
- mdev->state.peer, mdev->state.pdsk);
- put_ldev(mdev);
-
- if (nconn == C_MASK) {
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return FALSE;
- }
-
- if (drbd_request_state(mdev, NS(conn, nconn)) < SS_SUCCESS) {
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return FALSE;
- }
- }
-
if (get_ldev(mdev)) {
if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
ldsc = 1;
}
- max_seg_s = be32_to_cpu(p->max_segment_size);
+ if (mdev->agreed_pro_version < 94)
+ max_seg_s = be32_to_cpu(p->max_segment_size);
+ else /* drbd 8.3.8 onwards */
+ max_seg_s = DRBD_MAX_SEGMENT_SIZE;
+
if (max_seg_s != queue_max_segment_size(mdev->rq_queue))
drbd_setup_queue_param(mdev, max_seg_s);
- drbd_setup_order_type(mdev, be32_to_cpu(p->queue_order_type));
+ drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type));
put_ldev(mdev);
}
@@ -2951,14 +3043,17 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
drbd_get_capacity(mdev->this_bdev) || ldsc) {
/* we have different sizes, probably peer
* needs to know my new size... */
- drbd_send_sizes(mdev, 0);
+ drbd_send_sizes(mdev, 0, ddsf);
}
if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
(dd == grew && mdev->state.conn == C_CONNECTED)) {
if (mdev->state.pdsk >= D_INCONSISTENT &&
- mdev->state.disk >= D_INCONSISTENT)
- resync_after_online_grow(mdev);
- else
+ mdev->state.disk >= D_INCONSISTENT) {
+ if (ddsf & DDSF_NO_RESYNC)
+ dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
+ else
+ resync_after_online_grow(mdev);
+ } else
set_bit(RESYNC_AFTER_NEG, &mdev->flags);
}
}
@@ -3490,6 +3585,92 @@ static int receive_UnplugRemote(struct drbd_conf *mdev, struct p_header *h)
return TRUE;
}
+static void timeval_sub_us(struct timeval* tv, unsigned int us)
+{
+ tv->tv_sec -= us / 1000000;
+ us = us % 1000000;
+ if (tv->tv_usec > us) {
+ tv->tv_usec += 1000000;
+ tv->tv_sec--;
+ }
+ tv->tv_usec -= us;
+}
+
+static void got_delay_probe(struct drbd_conf *mdev, int from, struct p_delay_probe *p)
+{
+ struct delay_probe *dp;
+ struct list_head *le;
+ struct timeval now;
+ int seq_num;
+ int offset;
+ int data_delay;
+
+ seq_num = be32_to_cpu(p->seq_num);
+ offset = be32_to_cpu(p->offset);
+
+ spin_lock(&mdev->peer_seq_lock);
+ if (!list_empty(&mdev->delay_probes)) {
+ if (from == USE_DATA_SOCKET)
+ le = mdev->delay_probes.next;
+ else
+ le = mdev->delay_probes.prev;
+
+ dp = list_entry(le, struct delay_probe, list);
+
+ if (dp->seq_num == seq_num) {
+ list_del(le);
+ spin_unlock(&mdev->peer_seq_lock);
+ do_gettimeofday(&now);
+ timeval_sub_us(&now, offset);
+ data_delay =
+ now.tv_usec - dp->time.tv_usec +
+ (now.tv_sec - dp->time.tv_sec) * 1000000;
+
+ if (data_delay > 0)
+ mdev->data_delay = data_delay;
+
+ kfree(dp);
+ return;
+ }
+
+ if (dp->seq_num > seq_num) {
+ spin_unlock(&mdev->peer_seq_lock);
+ dev_warn(DEV, "Previous allocation failure of struct delay_probe?\n");
+ return; /* Do not alloca a struct delay_probe.... */
+ }
+ }
+ spin_unlock(&mdev->peer_seq_lock);
+
+ dp = kmalloc(sizeof(struct delay_probe), GFP_NOIO);
+ if (!dp) {
+ dev_warn(DEV, "Failed to allocate a struct delay_probe, do not worry.\n");
+ return;
+ }
+
+ dp->seq_num = seq_num;
+ do_gettimeofday(&dp->time);
+ timeval_sub_us(&dp->time, offset);
+
+ spin_lock(&mdev->peer_seq_lock);
+ if (from == USE_DATA_SOCKET)
+ list_add(&dp->list, &mdev->delay_probes);
+ else
+ list_add_tail(&dp->list, &mdev->delay_probes);
+ spin_unlock(&mdev->peer_seq_lock);
+}
+
+static int receive_delay_probe(struct drbd_conf *mdev, struct p_header *h)
+{
+ struct p_delay_probe *p = (struct p_delay_probe *)h;
+
+ ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
+ if (drbd_recv(mdev, h->payload, h->length) != h->length)
+ return FALSE;
+
+ got_delay_probe(mdev, USE_DATA_SOCKET, p);
+ return TRUE;
+}
+
typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, struct p_header *);
static drbd_cmd_handler_f drbd_default_handler[] = {
@@ -3513,6 +3694,7 @@ static drbd_cmd_handler_f drbd_default_handler[] = {
[P_OV_REQUEST] = receive_DataRequest,
[P_OV_REPLY] = receive_DataRequest,
[P_CSUM_RS_REQUEST] = receive_DataRequest,
+ [P_DELAY_PROBE] = receive_delay_probe,
/* anything missing from this table is in
* the asender_tbl, see get_asender_cmd */
[P_MAX_CMD] = NULL,
@@ -3739,7 +3921,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
i = atomic_read(&mdev->pp_in_use);
if (i)
- dev_info(DEV, "pp_in_use = %u, expected 0\n", i);
+ dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
D_ASSERT(list_empty(&mdev->read_ee));
D_ASSERT(list_empty(&mdev->active_ee));
@@ -4232,7 +4414,6 @@ static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header *h)
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
- D_ASSERT(p->block_id == ID_SYNCER);
update_peer_seq(mdev, be32_to_cpu(p->seq_num));
@@ -4290,6 +4471,14 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header *h)
return TRUE;
}
+static int got_delay_probe_m(struct drbd_conf *mdev, struct p_header *h)
+{
+ struct p_delay_probe *p = (struct p_delay_probe *)h;
+
+ got_delay_probe(mdev, USE_META_SOCKET, p);
+ return TRUE;
+}
+
struct asender_cmd {
size_t pkt_size;
int (*process)(struct drbd_conf *mdev, struct p_header *h);
@@ -4314,6 +4503,7 @@ static struct asender_cmd *get_asender_cmd(int cmd)
[P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
[P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
[P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
+ [P_DELAY_PROBE] = { sizeof(struct p_delay_probe), got_delay_probe_m },
[P_MAX_CMD] = { 0, NULL },
};
if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index de81ab7b462..3397f11d0ba 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -722,6 +722,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
struct drbd_request *req;
int local, remote;
int err = -EIO;
+ int ret = 0;
/* allocate outside of all locks; */
req = drbd_req_new(mdev, bio);
@@ -784,7 +785,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
(mdev->state.pdsk == D_INCONSISTENT &&
mdev->state.conn >= C_CONNECTED));
- if (!(local || remote)) {
+ if (!(local || remote) && !mdev->state.susp) {
dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
goto fail_free_complete;
}
@@ -810,6 +811,16 @@ allocate_barrier:
/* GOOD, everything prepared, grab the spin_lock */
spin_lock_irq(&mdev->req_lock);
+ if (mdev->state.susp) {
+ /* If we got suspended, use the retry mechanism of
+ generic_make_request() to restart processing of this
+ bio. In the next call to drbd_make_request_26
+ we sleep in inc_ap_bio() */
+ ret = 1;
+ spin_unlock_irq(&mdev->req_lock);
+ goto fail_free_complete;
+ }
+
if (remote) {
remote = (mdev->state.pdsk == D_UP_TO_DATE ||
(mdev->state.pdsk == D_INCONSISTENT &&
@@ -947,12 +958,14 @@ fail_and_free_req:
req->private_bio = NULL;
put_ldev(mdev);
}
- bio_endio(bio, err);
+ if (!ret)
+ bio_endio(bio, err);
+
drbd_req_free(req);
dec_ap_bio(mdev);
kfree(b);
- return 0;
+ return ret;
}
/* helper function for drbd_make_request
@@ -962,11 +975,6 @@ fail_and_free_req:
*/
static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write)
{
- /* Unconfigured */
- if (mdev->state.conn == C_DISCONNECTING &&
- mdev->state.disk == D_DISKLESS)
- return 1;
-
if (mdev->state.role != R_PRIMARY &&
(!allow_oos || is_write)) {
if (__ratelimit(&drbd_ratelimit_state)) {
@@ -1070,15 +1078,21 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
/* we need to get a "reference count" (ap_bio_cnt)
* to avoid races with the disconnect/reconnect/suspend code.
- * In case we need to split the bio here, we need to get two references
+ * In case we need to split the bio here, we need to get three references
* atomically, otherwise we might deadlock when trying to submit the
* second one! */
- inc_ap_bio(mdev, 2);
+ inc_ap_bio(mdev, 3);
D_ASSERT(e_enr == s_enr + 1);
- drbd_make_request_common(mdev, &bp->bio1);
- drbd_make_request_common(mdev, &bp->bio2);
+ while (drbd_make_request_common(mdev, &bp->bio1))
+ inc_ap_bio(mdev, 1);
+
+ while (drbd_make_request_common(mdev, &bp->bio2))
+ inc_ap_bio(mdev, 1);
+
+ dec_ap_bio(mdev);
+
bio_pair_release(bp);
}
return 0;
@@ -1115,7 +1129,7 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
} else if (limit && get_ldev(mdev)) {
struct request_queue * const b =
mdev->ldev->backing_bdev->bd_disk->queue;
- if (b->merge_bvec_fn && mdev->ldev->dc.use_bmbv) {
+ if (b->merge_bvec_fn) {
backing_limit = b->merge_bvec_fn(b, bvm, bvec);
limit = min(limit, backing_limit);
}
diff --git a/drivers/block/drbd/drbd_strings.c b/drivers/block/drbd/drbd_strings.c
index 76863e3f05b..85179e1fb50 100644
--- a/drivers/block/drbd/drbd_strings.c
+++ b/drivers/block/drbd/drbd_strings.c
@@ -70,7 +70,7 @@ static const char *drbd_disk_s_names[] = {
static const char *drbd_state_sw_errors[] = {
[-SS_TWO_PRIMARIES] = "Multiple primaries not allowed by config",
- [-SS_NO_UP_TO_DATE_DISK] = "Refusing to be Primary without at least one UpToDate disk",
+ [-SS_NO_UP_TO_DATE_DISK] = "Need access to UpToDate data",
[-SS_NO_LOCAL_DISK] = "Can not resync without local disk",
[-SS_NO_REMOTE_DISK] = "Can not resync without remote disk",
[-SS_CONNECTED_OUTDATES] = "Refusing to be Outdated while Connected",
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index d48a1dfd7b2..727ff633975 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -47,8 +47,7 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca
/* defined here:
drbd_md_io_complete
- drbd_endio_write_sec
- drbd_endio_read_sec
+ drbd_endio_sec
drbd_endio_pri
* more endio handlers:
@@ -85,27 +84,10 @@ void drbd_md_io_complete(struct bio *bio, int error)
/* reads on behalf of the partner,
* "submitted" by the receiver
*/
-void drbd_endio_read_sec(struct bio *bio, int error) __releases(local)
+void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
{
unsigned long flags = 0;
- struct drbd_epoch_entry *e = NULL;
- struct drbd_conf *mdev;
- int uptodate = bio_flagged(bio, BIO_UPTODATE);
-
- e = bio->bi_private;
- mdev = e->mdev;
-
- if (error)
- dev_warn(DEV, "read: error=%d s=%llus\n", error,
- (unsigned long long)e->sector);
- if (!error && !uptodate) {
- dev_warn(DEV, "read: setting error to -EIO s=%llus\n",
- (unsigned long long)e->sector);
- /* strange behavior of some lower level drivers...
- * fail the request by clearing the uptodate flag,
- * but do not return any error?! */
- error = -EIO;
- }
+ struct drbd_conf *mdev = e->mdev;
D_ASSERT(e->block_id != ID_VACANT);
@@ -114,49 +96,38 @@ void drbd_endio_read_sec(struct bio *bio, int error) __releases(local)
list_del(&e->w.list);
if (list_empty(&mdev->read_ee))
wake_up(&mdev->ee_wait);
+ if (test_bit(__EE_WAS_ERROR, &e->flags))
+ __drbd_chk_io_error(mdev, FALSE);
spin_unlock_irqrestore(&mdev->req_lock, flags);
- drbd_chk_io_error(mdev, error, FALSE);
drbd_queue_work(&mdev->data.work, &e->w);
put_ldev(mdev);
}
+static int is_failed_barrier(int ee_flags)
+{
+ return (ee_flags & (EE_IS_BARRIER|EE_WAS_ERROR|EE_RESUBMITTED))
+ == (EE_IS_BARRIER|EE_WAS_ERROR);
+}
+
/* writes on behalf of the partner, or resync writes,
- * "submitted" by the receiver.
- */
-void drbd_endio_write_sec(struct bio *bio, int error) __releases(local)
+ * "submitted" by the receiver, final stage. */
+static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local)
{
unsigned long flags = 0;
- struct drbd_epoch_entry *e = NULL;
- struct drbd_conf *mdev;
+ struct drbd_conf *mdev = e->mdev;
sector_t e_sector;
int do_wake;
int is_syncer_req;
int do_al_complete_io;
- int uptodate = bio_flagged(bio, BIO_UPTODATE);
- int is_barrier = bio_rw_flagged(bio, BIO_RW_BARRIER);
-
- e = bio->bi_private;
- mdev = e->mdev;
- if (error)
- dev_warn(DEV, "write: error=%d s=%llus\n", error,
- (unsigned long long)e->sector);
- if (!error && !uptodate) {
- dev_warn(DEV, "write: setting error to -EIO s=%llus\n",
- (unsigned long long)e->sector);
- /* strange behavior of some lower level drivers...
- * fail the request by clearing the uptodate flag,
- * but do not return any error?! */
- error = -EIO;
- }
-
- /* error == -ENOTSUPP would be a better test,
- * alas it is not reliable */
- if (error && is_barrier && e->flags & EE_IS_BARRIER) {
+ /* if this is a failed barrier request, disable use of barriers,
+ * and schedule for resubmission */
+ if (is_failed_barrier(e->flags)) {
drbd_bump_write_ordering(mdev, WO_bdev_flush);
spin_lock_irqsave(&mdev->req_lock, flags);
list_del(&e->w.list);
+ e->flags = (e->flags & ~EE_WAS_ERROR) | EE_RESUBMITTED;
e->w.cb = w_e_reissue;
/* put_ldev actually happens below, once we come here again. */
__release(local);
@@ -167,17 +138,16 @@ void drbd_endio_write_sec(struct bio *bio, int error) __releases(local)
D_ASSERT(e->block_id != ID_VACANT);
- spin_lock_irqsave(&mdev->req_lock, flags);
- mdev->writ_cnt += e->size >> 9;
- is_syncer_req = is_syncer_block_id(e->block_id);
-
/* after we moved e to done_ee,
* we may no longer access it,
* it may be freed/reused already!
* (as soon as we release the req_lock) */
e_sector = e->sector;
do_al_complete_io = e->flags & EE_CALL_AL_COMPLETE_IO;
+ is_syncer_req = is_syncer_block_id(e->block_id);
+ spin_lock_irqsave(&mdev->req_lock, flags);
+ mdev->writ_cnt += e->size >> 9;
list_del(&e->w.list); /* has been on active_ee or sync_ee */
list_add_tail(&e->w.list, &mdev->done_ee);
@@ -190,7 +160,7 @@ void drbd_endio_write_sec(struct bio *bio, int error) __releases(local)
? list_empty(&mdev->sync_ee)
: list_empty(&mdev->active_ee);
- if (error)
+ if (test_bit(__EE_WAS_ERROR, &e->flags))
__drbd_chk_io_error(mdev, FALSE);
spin_unlock_irqrestore(&mdev->req_lock, flags);
@@ -205,7 +175,42 @@ void drbd_endio_write_sec(struct bio *bio, int error) __releases(local)
wake_asender(mdev);
put_ldev(mdev);
+}
+
+/* writes on behalf of the partner, or resync writes,
+ * "submitted" by the receiver.
+ */
+void drbd_endio_sec(struct bio *bio, int error)
+{
+ struct drbd_epoch_entry *e = bio->bi_private;
+ struct drbd_conf *mdev = e->mdev;
+ int uptodate = bio_flagged(bio, BIO_UPTODATE);
+ int is_write = bio_data_dir(bio) == WRITE;
+
+ if (error)
+ dev_warn(DEV, "%s: error=%d s=%llus\n",
+ is_write ? "write" : "read", error,
+ (unsigned long long)e->sector);
+ if (!error && !uptodate) {
+ dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
+ is_write ? "write" : "read",
+ (unsigned long long)e->sector);
+ /* strange behavior of some lower level drivers...
+ * fail the request by clearing the uptodate flag,
+ * but do not return any error?! */
+ error = -EIO;
+ }
+
+ if (error)
+ set_bit(__EE_WAS_ERROR, &e->flags);
+ bio_put(bio); /* no need for the bio anymore */
+ if (atomic_dec_and_test(&e->pending_bios)) {
+ if (is_write)
+ drbd_endio_write_sec_final(e);
+ else
+ drbd_endio_read_sec_final(e);
+ }
}
/* read, readA or write requests on R_PRIMARY coming from drbd_make_request
@@ -295,7 +300,34 @@ int w_resync_inactive(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
return 1; /* Simply ignore this! */
}
-void drbd_csum(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest)
+void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, struct drbd_epoch_entry *e, void *digest)
+{
+ struct hash_desc desc;
+ struct scatterlist sg;
+ struct page *page = e->pages;
+ struct page *tmp;
+ unsigned len;
+
+ desc.tfm = tfm;
+ desc.flags = 0;
+
+ sg_init_table(&sg, 1);
+ crypto_hash_init(&desc);
+
+ while ((tmp = page_chain_next(page))) {
+ /* all but the last page will be fully used */
+ sg_set_page(&sg, page, PAGE_SIZE, 0);
+ crypto_hash_update(&desc, &sg, sg.length);
+ page = tmp;
+ }
+ /* and now the last, possibly only partially used page */
+ len = e->size & (PAGE_SIZE - 1);
+ sg_set_page(&sg, page, len ?: PAGE_SIZE, 0);
+ crypto_hash_update(&desc, &sg, sg.length);
+ crypto_hash_final(&desc, digest);
+}
+
+void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest)
{
struct hash_desc desc;
struct scatterlist sg;
@@ -329,11 +361,11 @@ static int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel
return 1;
}
- if (likely(drbd_bio_uptodate(e->private_bio))) {
+ if (likely((e->flags & EE_WAS_ERROR) == 0)) {
digest_size = crypto_hash_digestsize(mdev->csums_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
- drbd_csum(mdev, mdev->csums_tfm, e->private_bio, digest);
+ drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
inc_rs_pending(mdev);
ok = drbd_send_drequest_csum(mdev,
@@ -369,23 +401,21 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
/* GFP_TRY, because if there is no memory available right now, this may
* be rescheduled for later. It is "only" background resync, after all. */
e = drbd_alloc_ee(mdev, DRBD_MAGIC+0xbeef, sector, size, GFP_TRY);
- if (!e) {
- put_ldev(mdev);
- return 2;
- }
+ if (!e)
+ goto fail;
spin_lock_irq(&mdev->req_lock);
list_add(&e->w.list, &mdev->read_ee);
spin_unlock_irq(&mdev->req_lock);
- e->private_bio->bi_end_io = drbd_endio_read_sec;
- e->private_bio->bi_rw = READ;
e->w.cb = w_e_send_csum;
+ if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0)
+ return 1;
- mdev->read_cnt += size >> 9;
- drbd_generic_make_request(mdev, DRBD_FAULT_RS_RD, e->private_bio);
-
- return 1;
+ drbd_free_ee(mdev, e);
+fail:
+ put_ldev(mdev);
+ return 2;
}
void resync_timer_fn(unsigned long data)
@@ -414,13 +444,25 @@ void resync_timer_fn(unsigned long data)
drbd_queue_work(&mdev->data.work, &mdev->resync_work);
}
+static int calc_resync_rate(struct drbd_conf *mdev)
+{
+ int d = mdev->data_delay / 1000; /* us -> ms */
+ int td = mdev->sync_conf.throttle_th * 100; /* 0.1s -> ms */
+ int hd = mdev->sync_conf.hold_off_th * 100; /* 0.1s -> ms */
+ int cr = mdev->sync_conf.rate;
+
+ return d <= td ? cr :
+ d >= hd ? 0 :
+ cr + (cr * (td - d) / (hd - td));
+}
+
int w_make_resync_request(struct drbd_conf *mdev,
struct drbd_work *w, int cancel)
{
unsigned long bit;
sector_t sector;
const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
- int max_segment_size = queue_max_segment_size(mdev->rq_queue);
+ int max_segment_size;
int number, i, size, pe, mx;
int align, queued, sndbuf;
@@ -446,7 +488,13 @@ int w_make_resync_request(struct drbd_conf *mdev,
return 1;
}
- number = SLEEP_TIME * mdev->sync_conf.rate / ((BM_BLOCK_SIZE/1024)*HZ);
+ /* starting with drbd 8.3.8, we can handle multi-bio EEs,
+ * if it should be necessary */
+ max_segment_size = mdev->agreed_pro_version < 94 ?
+ queue_max_segment_size(mdev->rq_queue) : DRBD_MAX_SEGMENT_SIZE;
+
+ mdev->c_sync_rate = calc_resync_rate(mdev);
+ number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
pe = atomic_read(&mdev->rs_pending_cnt);
mutex_lock(&mdev->data.mutex);
@@ -509,12 +557,6 @@ next_sector:
*
* Additionally always align bigger requests, in order to
* be prepared for all stripe sizes of software RAIDs.
- *
- * we _do_ care about the agreed-upon q->max_segment_size
- * here, as splitting up the requests on the other side is more
- * difficult. the consequence is, that on lvm and md and other
- * "indirect" devices, this is dead code, since
- * q->max_segment_size will be PAGE_SIZE.
*/
align = 1;
for (;;) {
@@ -806,7 +848,7 @@ out:
/* helper */
static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
{
- if (drbd_bio_has_active_page(e->private_bio)) {
+ if (drbd_ee_has_active_page(e)) {
/* This might happen if sendpage() has not finished */
spin_lock_irq(&mdev->req_lock);
list_add_tail(&e->w.list, &mdev->net_ee);
@@ -832,7 +874,7 @@ int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
return 1;
}
- if (likely(drbd_bio_uptodate(e->private_bio))) {
+ if (likely((e->flags & EE_WAS_ERROR) == 0)) {
ok = drbd_send_block(mdev, P_DATA_REPLY, e);
} else {
if (__ratelimit(&drbd_ratelimit_state))
@@ -873,7 +915,7 @@ int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
put_ldev(mdev);
}
- if (likely(drbd_bio_uptodate(e->private_bio))) {
+ if (likely((e->flags & EE_WAS_ERROR) == 0)) {
if (likely(mdev->state.pdsk >= D_INCONSISTENT)) {
inc_rs_pending(mdev);
ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
@@ -921,7 +963,7 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
di = (struct digest_info *)(unsigned long)e->block_id;
- if (likely(drbd_bio_uptodate(e->private_bio))) {
+ if (likely((e->flags & EE_WAS_ERROR) == 0)) {
/* quick hack to try to avoid a race against reconfiguration.
* a real fix would be much more involved,
* introducing more locking mechanisms */
@@ -931,7 +973,7 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
digest = kmalloc(digest_size, GFP_NOIO);
}
if (digest) {
- drbd_csum(mdev, mdev->csums_tfm, e->private_bio, digest);
+ drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
eq = !memcmp(digest, di->digest, digest_size);
kfree(digest);
}
@@ -973,14 +1015,14 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
if (unlikely(cancel))
goto out;
- if (unlikely(!drbd_bio_uptodate(e->private_bio)))
+ if (unlikely((e->flags & EE_WAS_ERROR) != 0))
goto out;
digest_size = crypto_hash_digestsize(mdev->verify_tfm);
/* FIXME if this allocation fails, online verify will not terminate! */
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
- drbd_csum(mdev, mdev->verify_tfm, e->private_bio, digest);
+ drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
inc_rs_pending(mdev);
ok = drbd_send_drequest_csum(mdev, e->sector, e->size,
digest, digest_size, P_OV_REPLY);
@@ -1029,11 +1071,11 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
di = (struct digest_info *)(unsigned long)e->block_id;
- if (likely(drbd_bio_uptodate(e->private_bio))) {
+ if (likely((e->flags & EE_WAS_ERROR) == 0)) {
digest_size = crypto_hash_digestsize(mdev->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
- drbd_csum(mdev, mdev->verify_tfm, e->private_bio, digest);
+ drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
D_ASSERT(digest_size == di->digest_size);
eq = !memcmp(digest, di->digest, digest_size);
diff --git a/drivers/block/drbd/drbd_wrappers.h b/drivers/block/drbd/drbd_wrappers.h
index f93fa111ce5..defdb5013ea 100644
--- a/drivers/block/drbd/drbd_wrappers.h
+++ b/drivers/block/drbd/drbd_wrappers.h
@@ -18,23 +18,9 @@ static inline void drbd_set_my_capacity(struct drbd_conf *mdev,
#define drbd_bio_uptodate(bio) bio_flagged(bio, BIO_UPTODATE)
-static inline int drbd_bio_has_active_page(struct bio *bio)
-{
- struct bio_vec *bvec;
- int i;
-
- __bio_for_each_segment(bvec, bio, i, 0) {
- if (page_count(bvec->bv_page) > 1)
- return 1;
- }
-
- return 0;
-}
-
/* bi_end_io handlers */
extern void drbd_md_io_complete(struct bio *bio, int error);
-extern void drbd_endio_read_sec(struct bio *bio, int error);
-extern void drbd_endio_write_sec(struct bio *bio, int error);
+extern void drbd_endio_sec(struct bio *bio, int error);
extern void drbd_endio_pri(struct bio *bio, int error);
/*
diff --git a/drivers/block/hd.c b/drivers/block/hd.c
index 034e6dfc878..81c78b3ce2d 100644
--- a/drivers/block/hd.c
+++ b/drivers/block/hd.c
@@ -164,12 +164,12 @@ unsigned long read_timer(void)
unsigned long t, flags;
int i;
- spin_lock_irqsave(&i8253_lock, flags);
+ raw_spin_lock_irqsave(&i8253_lock, flags);
t = jiffies * 11932;
outb_p(0, 0x43);
i = inb_p(0x40);
i |= inb(0x40) << 8;
- spin_unlock_irqrestore(&i8253_lock, flags);
+ raw_spin_unlock_irqrestore(&i8253_lock, flags);
return(t - i);
}
#endif
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 8546d123b9a..6120922f459 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -485,7 +485,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
goto out;
}
- ret = vfs_fsync(file, file->f_path.dentry, 0);
+ ret = vfs_fsync(file, 0);
if (unlikely(ret)) {
ret = -EIO;
goto out;
@@ -495,7 +495,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
ret = lo_send(lo, bio, pos);
if (barrier && !ret) {
- ret = vfs_fsync(file, file->f_path.dentry, 0);
+ ret = vfs_fsync(file, 0);
if (unlikely(ret))
ret = -EIO;
}
@@ -835,6 +835,8 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
set_capacity(lo->lo_disk, size);
bd_set_size(bdev, size << 9);
+ /* let user-space know about the new size */
+ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
set_blocksize(bdev, lo_blocksize);
@@ -858,6 +860,7 @@ out_clr:
set_capacity(lo->lo_disk, 0);
invalidate_bdev(bdev);
bd_set_size(bdev, 0);
+ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
mapping_set_gfp_mask(mapping, lo->old_gfp_mask);
lo->lo_state = Lo_unbound;
out_putf:
@@ -944,8 +947,11 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
if (bdev)
invalidate_bdev(bdev);
set_capacity(lo->lo_disk, 0);
- if (bdev)
+ if (bdev) {
bd_set_size(bdev, 0);
+ /* let user-space know about this change */
+ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
+ }
mapping_set_gfp_mask(filp->f_mapping, gfp);
lo->lo_state = Lo_unbound;
/* This is safe: open() is still holding a reference. */
@@ -1189,6 +1195,8 @@ static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev)
sz <<= 9;
mutex_lock(&bdev->bd_mutex);
bd_set_size(bdev, sz);
+ /* let user-space know about the new size */
+ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
mutex_unlock(&bdev->bd_mutex);
out:
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 59ca2b77b57..52f2d11bc7b 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1004,7 +1004,7 @@ static const struct block_device_operations floppy_fops = {
static int swim3_add_device(struct macio_dev *mdev, int index)
{
- struct device_node *swim = mdev->ofdev.node;
+ struct device_node *swim = mdev->ofdev.dev.of_node;
struct floppy_state *fs = &floppy_states[index];
int rc = -EBUSY;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2138a7ae050..83fa09a836c 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -50,7 +50,7 @@ static void blk_done(struct virtqueue *vq)
unsigned long flags;
spin_lock_irqsave(&vblk->lock, flags);
- while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) {
+ while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
int error;
switch (vbr->status) {
@@ -70,6 +70,8 @@ static void blk_done(struct virtqueue *vq)
vbr->req->sense_len = vbr->in_hdr.sense_len;
vbr->req->errors = vbr->in_hdr.errors;
}
+ if (blk_special_request(vbr->req))
+ vbr->req->errors = (error != 0);
__blk_end_request_all(vbr->req, error);
list_del(&vbr->list);
@@ -103,6 +105,11 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
vbr->out_hdr.sector = 0;
vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
break;
+ case REQ_TYPE_SPECIAL:
+ vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
+ vbr->out_hdr.sector = 0;
+ vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
+ break;
case REQ_TYPE_LINUX_BLOCK:
if (req->cmd[0] == REQ_LB_OP_FLUSH) {
vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
@@ -151,7 +158,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
}
}
- if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) {
+ if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) {
mempool_free(vbr, vblk->pool);
return false;
}
@@ -180,7 +187,7 @@ static void do_virtblk_request(struct request_queue *q)
}
if (issued)
- vblk->vq->vq_ops->kick(vblk->vq);
+ virtqueue_kick(vblk->vq);
}
static void virtblk_prepare_flush(struct request_queue *q, struct request *req)
@@ -189,12 +196,45 @@ static void virtblk_prepare_flush(struct request_queue *q, struct request *req)
req->cmd[0] = REQ_LB_OP_FLUSH;
}
+/* return id (s/n) string for *disk to *id_str
+ */
+static int virtblk_get_id(struct gendisk *disk, char *id_str)
+{
+ struct virtio_blk *vblk = disk->private_data;
+ struct request *req;
+ struct bio *bio;
+
+ bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
+ GFP_KERNEL);
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL);
+ if (IS_ERR(req)) {
+ bio_put(bio);
+ return PTR_ERR(req);
+ }
+
+ req->cmd_type = REQ_TYPE_SPECIAL;
+ return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
+}
+
static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long data)
{
struct gendisk *disk = bdev->bd_disk;
struct virtio_blk *vblk = disk->private_data;
+ if (cmd == 0x56424944) { /* 'VBID' */
+ void __user *usr_data = (void __user *)data;
+ char id_str[VIRTIO_BLK_ID_BYTES];
+ int err;
+
+ err = virtblk_get_id(disk, id_str);
+ if (!err && copy_to_user(usr_data, id_str, VIRTIO_BLK_ID_BYTES))
+ err = -EFAULT;
+ return err;
+ }
/*
* Only allow the generic SCSI ioctls if the host can support it.
*/
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index e1c95e208a6..a7b83c0a7eb 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -1198,10 +1198,10 @@ ace_of_probe(struct of_device *op, const struct of_device_id *match)
dev_dbg(&op->dev, "ace_of_probe(%p, %p)\n", op, match);
/* device id */
- id = of_get_property(op->node, "port-number", NULL);
+ id = of_get_property(op->dev.of_node, "port-number", NULL);
/* physaddr */
- rc = of_address_to_resource(op->node, 0, &res);
+ rc = of_address_to_resource(op->dev.of_node, 0, &res);
if (rc) {
dev_err(&op->dev, "invalid address\n");
return rc;
@@ -1209,11 +1209,11 @@ ace_of_probe(struct of_device *op, const struct of_device_id *match)
physaddr = res.start;
/* irq */
- irq = irq_of_parse_and_map(op->node, 0);
+ irq = irq_of_parse_and_map(op->dev.of_node, 0);
/* bus width */
bus_width = ACE_BUS_WIDTH_16;
- if (of_find_property(op->node, "8-bit", NULL))
+ if (of_find_property(op->dev.of_node, "8-bit", NULL))
bus_width = ACE_BUS_WIDTH_8;
/* Call the bus-independant setup code */
@@ -1237,13 +1237,12 @@ static const struct of_device_id ace_of_match[] __devinitconst = {
MODULE_DEVICE_TABLE(of, ace_of_match);
static struct of_platform_driver ace_of_driver = {
- .owner = THIS_MODULE,
- .name = "xsysace",
- .match_table = ace_of_match,
.probe = ace_of_probe,
.remove = __devexit_p(ace_of_remove),
.driver = {
.name = "xsysace",
+ .owner = THIS_MODULE,
+ .of_match_table = ace_of_match,
},
};
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index d9bf87ca9e8..6f907ebed2d 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -65,7 +65,6 @@ MODULE_LICENSE("GPL");
typedef struct bluecard_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
struct hci_dev *hdev;
@@ -869,9 +868,6 @@ static int bluecard_probe(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts1 = 8;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
-
- link->irq.Handler = bluecard_interrupt;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -908,9 +904,9 @@ static int bluecard_config(struct pcmcia_device *link)
if (i != 0)
goto failed;
- i = pcmcia_request_irq(link, &link->irq);
+ i = pcmcia_request_irq(link, bluecard_interrupt);
if (i != 0)
- link->irq.AssignedIRQ = 0;
+ goto failed;
i = pcmcia_request_configuration(link, &link->conf);
if (i != 0)
@@ -919,9 +915,6 @@ static int bluecard_config(struct pcmcia_device *link)
if (bluecard_open(info) != 0)
goto failed;
- strcpy(info->node.dev_name, info->hdev->name);
- link->dev_node = &info->node;
-
return 0;
failed:
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 027cb8bf650..21e05fdc912 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -72,7 +72,6 @@ MODULE_FIRMWARE("BT3CPCC.bin");
typedef struct bt3c_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
struct hci_dev *hdev;
@@ -661,9 +660,6 @@ static int bt3c_probe(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts1 = 8;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
-
- link->irq.Handler = bt3c_interrupt;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -743,9 +739,9 @@ static int bt3c_config(struct pcmcia_device *link)
goto failed;
found_port:
- i = pcmcia_request_irq(link, &link->irq);
+ i = pcmcia_request_irq(link, &bt3c_interrupt);
if (i != 0)
- link->irq.AssignedIRQ = 0;
+ goto failed;
i = pcmcia_request_configuration(link, &link->conf);
if (i != 0)
@@ -754,9 +750,6 @@ found_port:
if (bt3c_open(info) != 0)
goto failed;
- strcpy(info->node.dev_name, info->hdev->name);
- link->dev_node = &info->node;
-
return 0;
failed:
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index 204727586ee..bed0ba63023 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -42,6 +42,8 @@ struct btmrvl_device {
void *card;
struct hci_dev *hcidev;
+ u8 dev_type;
+
u8 tx_dnld_rdy;
u8 psmode;
@@ -88,8 +90,11 @@ struct btmrvl_private {
#define BT_CMD_HOST_SLEEP_ENABLE 0x5A
#define BT_CMD_MODULE_CFG_REQ 0x5B
-/* Sub-commands: Module Bringup/Shutdown Request */
+/* Sub-commands: Module Bringup/Shutdown Request/Response */
#define MODULE_BRINGUP_REQ 0xF1
+#define MODULE_BROUGHT_UP 0x00
+#define MODULE_ALREADY_UP 0x0C
+
#define MODULE_SHUTDOWN_REQ 0xF2
#define BT_EVENT_POWER_STATE 0x20
@@ -123,6 +128,7 @@ struct btmrvl_event {
/* Prototype of global function */
+int btmrvl_register_hdev(struct btmrvl_private *priv);
struct btmrvl_private *btmrvl_add_card(void *card);
int btmrvl_remove_card(struct btmrvl_private *priv);
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 53a43adf2e2..ee37ef0caee 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -66,7 +66,7 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
{
struct btmrvl_adapter *adapter = priv->adapter;
struct btmrvl_event *event;
- u8 ret = 0;
+ int ret = 0;
event = (struct btmrvl_event *) skb->data;
if (event->ec != 0xff) {
@@ -112,8 +112,17 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
case BT_CMD_MODULE_CFG_REQ:
if (priv->btmrvl_dev.sendcmdflag &&
event->data[1] == MODULE_BRINGUP_REQ) {
- BT_DBG("EVENT:%s", (event->data[2]) ?
- "Bring-up failed" : "Bring-up succeed");
+ BT_DBG("EVENT:%s",
+ ((event->data[2] == MODULE_BROUGHT_UP) ||
+ (event->data[2] == MODULE_ALREADY_UP)) ?
+ "Bring-up succeed" : "Bring-up failed");
+
+ if (event->length > 3)
+ priv->btmrvl_dev.dev_type = event->data[3];
+ else
+ priv->btmrvl_dev.dev_type = HCI_BREDR;
+
+ BT_DBG("dev_type: %d", priv->btmrvl_dev.dev_type);
} else if (priv->btmrvl_dev.sendcmdflag &&
event->data[1] == MODULE_SHUTDOWN_REQ) {
BT_DBG("EVENT:%s", (event->data[2]) ?
@@ -522,47 +531,20 @@ static int btmrvl_service_main_thread(void *data)
return 0;
}
-struct btmrvl_private *btmrvl_add_card(void *card)
+int btmrvl_register_hdev(struct btmrvl_private *priv)
{
struct hci_dev *hdev = NULL;
- struct btmrvl_private *priv;
int ret;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- BT_ERR("Can not allocate priv");
- goto err_priv;
- }
-
- priv->adapter = kzalloc(sizeof(*priv->adapter), GFP_KERNEL);
- if (!priv->adapter) {
- BT_ERR("Allocate buffer for btmrvl_adapter failed!");
- goto err_adapter;
- }
-
- btmrvl_init_adapter(priv);
-
hdev = hci_alloc_dev();
if (!hdev) {
BT_ERR("Can not allocate HCI device");
goto err_hdev;
}
- BT_DBG("Starting kthread...");
- priv->main_thread.priv = priv;
- spin_lock_init(&priv->driver_lock);
-
- init_waitqueue_head(&priv->main_thread.wait_q);
- priv->main_thread.task = kthread_run(btmrvl_service_main_thread,
- &priv->main_thread, "btmrvl_main_service");
-
priv->btmrvl_dev.hcidev = hdev;
- priv->btmrvl_dev.card = card;
-
hdev->driver_data = priv;
- priv->btmrvl_dev.tx_dnld_rdy = true;
-
hdev->bus = HCI_SDIO;
hdev->open = btmrvl_open;
hdev->close = btmrvl_close;
@@ -572,6 +554,10 @@ struct btmrvl_private *btmrvl_add_card(void *card)
hdev->ioctl = btmrvl_ioctl;
hdev->owner = THIS_MODULE;
+ btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
+
+ hdev->dev_type = priv->btmrvl_dev.dev_type;
+
ret = hci_register_dev(hdev);
if (ret < 0) {
BT_ERR("Can not register HCI device");
@@ -582,16 +568,52 @@ struct btmrvl_private *btmrvl_add_card(void *card)
btmrvl_debugfs_init(hdev);
#endif
- return priv;
+ return 0;
err_hci_register_dev:
- /* Stop the thread servicing the interrupts */
- kthread_stop(priv->main_thread.task);
-
hci_free_dev(hdev);
err_hdev:
+ /* Stop the thread servicing the interrupts */
+ kthread_stop(priv->main_thread.task);
+
btmrvl_free_adapter(priv);
+ kfree(priv);
+
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(btmrvl_register_hdev);
+
+struct btmrvl_private *btmrvl_add_card(void *card)
+{
+ struct btmrvl_private *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ BT_ERR("Can not allocate priv");
+ goto err_priv;
+ }
+
+ priv->adapter = kzalloc(sizeof(*priv->adapter), GFP_KERNEL);
+ if (!priv->adapter) {
+ BT_ERR("Allocate buffer for btmrvl_adapter failed!");
+ goto err_adapter;
+ }
+
+ btmrvl_init_adapter(priv);
+
+ BT_DBG("Starting kthread...");
+ priv->main_thread.priv = priv;
+ spin_lock_init(&priv->driver_lock);
+
+ init_waitqueue_head(&priv->main_thread.wait_q);
+ priv->main_thread.task = kthread_run(btmrvl_service_main_thread,
+ &priv->main_thread, "btmrvl_main_service");
+
+ priv->btmrvl_dev.card = card;
+ priv->btmrvl_dev.tx_dnld_rdy = true;
+
+ return priv;
err_adapter:
kfree(priv);
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 0dba76aa223..df0773ebd9e 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -931,7 +931,12 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
priv->hw_host_to_card = btmrvl_sdio_host_to_card;
priv->hw_wakeup_firmware = btmrvl_sdio_wakeup_fw;
- btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
+ if (btmrvl_register_hdev(priv)) {
+ BT_ERR("Register hdev failed!");
+ ret = -ENODEV;
+ goto disable_host_int;
+ }
+
priv->btmrvl_dev.psmode = 1;
btmrvl_enable_ps(priv);
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index 60c0953d7d0..4ed7288f99d 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -67,7 +67,6 @@ MODULE_LICENSE("GPL");
typedef struct btuart_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
struct hci_dev *hdev;
@@ -590,9 +589,6 @@ static int btuart_probe(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts1 = 8;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
-
- link->irq.Handler = btuart_interrupt;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -672,9 +668,9 @@ static int btuart_config(struct pcmcia_device *link)
goto failed;
found_port:
- i = pcmcia_request_irq(link, &link->irq);
+ i = pcmcia_request_irq(link, btuart_interrupt);
if (i != 0)
- link->irq.AssignedIRQ = 0;
+ goto failed;
i = pcmcia_request_configuration(link, &link->conf);
if (i != 0)
@@ -683,9 +679,6 @@ found_port:
if (btuart_open(info) != 0)
goto failed;
- strcpy(info->node.dev_name, info->hdev->name);
- link->dev_node = &info->node;
-
return 0;
failed:
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 17788317c51..ef044d55cb2 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -67,7 +67,6 @@ MODULE_LICENSE("GPL");
typedef struct dtl1_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
struct hci_dev *hdev;
@@ -575,9 +574,6 @@ static int dtl1_probe(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts1 = 8;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
-
- link->irq.Handler = dtl1_interrupt;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -621,9 +617,9 @@ static int dtl1_config(struct pcmcia_device *link)
if (pcmcia_loop_config(link, dtl1_confcheck, NULL) < 0)
goto failed;
- i = pcmcia_request_irq(link, &link->irq);
+ i = pcmcia_request_irq(link, dtl1_interrupt);
if (i != 0)
- link->irq.AssignedIRQ = 0;
+ goto failed;
i = pcmcia_request_configuration(link, &link->conf);
if (i != 0)
@@ -632,9 +628,6 @@ static int dtl1_config(struct pcmcia_device *link)
if (dtl1_open(info) != 0)
goto failed;
- strcpy(info->node.dev_name, info->hdev->name);
- link->dev_node = &info->node;
-
return 0;
failed:
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index c0ce8134814..3f038f5308a 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -246,7 +246,7 @@ static int h4_recv(struct hci_uart *hu, void *data, int count)
BT_ERR("Can't allocate mem for new packet");
h4->rx_state = H4_W4_PACKET_TYPE;
h4->rx_count = 0;
- return 0;
+ return -ENOMEM;
}
h4->rx_skb->dev = (void *) hu->hdev;
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index 5c65014635b..fb8445c7365 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -402,7 +402,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
continue;
case HCILL_W4_EVENT_HDR:
- eh = (struct hci_event_hdr *) ll->rx_skb->data;
+ eh = hci_event_hdr(ll->rx_skb);
BT_DBG("Event header: evt 0x%2.2x plen %d", eh->evt, eh->plen);
@@ -410,7 +410,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
continue;
case HCILL_W4_ACL_HDR:
- ah = (struct hci_acl_hdr *) ll->rx_skb->data;
+ ah = hci_acl_hdr(ll->rx_skb);
dlen = __le16_to_cpu(ah->dlen);
BT_DBG("ACL header: dlen %d", dlen);
@@ -419,7 +419,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
continue;
case HCILL_W4_SCO_HDR:
- sh = (struct hci_sco_hdr *) ll->rx_skb->data;
+ sh = hci_sco_hdr(ll->rx_skb);
BT_DBG("SCO header: dlen %d", sh->dlen);
@@ -491,7 +491,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
BT_ERR("Can't allocate mem for new packet");
ll->rx_state = HCILL_W4_PACKET_TYPE;
ll->rx_count = 0;
- return 0;
+ return -ENOMEM;
}
ll->rx_skb->dev = (void *) hu->hdev;
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index bb0aefdb426..3aa7b2a54b6 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -157,7 +157,7 @@ static inline ssize_t vhci_put_user(struct vhci_data *data,
break;
case HCI_SCODATA_PKT:
- data->hdev->stat.cmd_tx++;
+ data->hdev->stat.sco_tx++;
break;
};
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index cc435be0bc1..451cd7071b1 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -567,7 +567,7 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
struct disk_info *d;
struct cdrom_device_info *c;
struct request_queue *q;
- struct device_node *node = vdev->dev.archdata.of_node;
+ struct device_node *node = vdev->dev.of_node;
deviceno = vdev->unit_address;
if (deviceno >= VIOCD_MAX_CD)
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 3141dd3b6e5..f09fc0e2062 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -276,11 +276,19 @@ config N_HDLC
Allows synchronous HDLC communications with tty device drivers that
support synchronous HDLC such as the Microgate SyncLink adapter.
- This driver can only be built as a module ( = code which can be
+ This driver can be built as a module ( = code which can be
inserted in and removed from the running kernel whenever you want).
The module will be called n_hdlc. If you want to do that, say M
here.
+config N_GSM
+ tristate "GSM MUX line discipline support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ depends on NET
+ help
+ This line discipline provides support for the GSM MUX protocol and
+ presents the mux as a set of 61 individual tty devices.
+
config RISCOM8
tristate "SDL RISCom/8 card support"
depends on SERIAL_NONSTANDARD
@@ -1113,5 +1121,12 @@ config DEVPORT
source "drivers/s390/char/Kconfig"
+config RAMOOPS
+ tristate "Log panic/oops to a RAM buffer"
+ default n
+ help
+ This enables panic and oops messages to be logged to a circular
+ buffer in RAM where it can be read back at some later point.
+
endmenu
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index f957edf7e45..88d6eac6975 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_SYNCLINK) += synclink.o
obj-$(CONFIG_SYNCLINKMP) += synclinkmp.o
obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o
obj-$(CONFIG_N_HDLC) += n_hdlc.o
+obj-$(CONFIG_N_GSM) += n_gsm.o
obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
obj-$(CONFIG_SX) += sx.o generic_serial.o
obj-$(CONFIG_RIO) += rio/ generic_serial.o
@@ -107,6 +108,7 @@ obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o
obj-$(CONFIG_TCG_TPM) += tpm/
obj-$(CONFIG_PS3_FLASH) += ps3flash.o
+obj-$(CONFIG_RAMOOPS) += ramoops.o
obj-$(CONFIG_JS_RTC) += js-rtc.o
js-rtc-y = rtc.o
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 870f12cfed9..12049094999 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -178,86 +178,6 @@ struct agp_bridge_data {
#define PGE_EMPTY(b, p) (!(p) || (p) == (unsigned long) (b)->scratch_page)
-/* Intel registers */
-#define INTEL_APSIZE 0xb4
-#define INTEL_ATTBASE 0xb8
-#define INTEL_AGPCTRL 0xb0
-#define INTEL_NBXCFG 0x50
-#define INTEL_ERRSTS 0x91
-
-/* Intel i830 registers */
-#define I830_GMCH_CTRL 0x52
-#define I830_GMCH_ENABLED 0x4
-#define I830_GMCH_MEM_MASK 0x1
-#define I830_GMCH_MEM_64M 0x1
-#define I830_GMCH_MEM_128M 0
-#define I830_GMCH_GMS_MASK 0x70
-#define I830_GMCH_GMS_DISABLED 0x00
-#define I830_GMCH_GMS_LOCAL 0x10
-#define I830_GMCH_GMS_STOLEN_512 0x20
-#define I830_GMCH_GMS_STOLEN_1024 0x30
-#define I830_GMCH_GMS_STOLEN_8192 0x40
-#define I830_RDRAM_CHANNEL_TYPE 0x03010
-#define I830_RDRAM_ND(x) (((x) & 0x20) >> 5)
-#define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3)
-
-/* This one is for I830MP w. an external graphic card */
-#define INTEL_I830_ERRSTS 0x92
-
-/* Intel 855GM/852GM registers */
-#define I855_GMCH_GMS_MASK 0xF0
-#define I855_GMCH_GMS_STOLEN_0M 0x0
-#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
-#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
-#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
-#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
-#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
-#define I85X_CAPID 0x44
-#define I85X_VARIANT_MASK 0x7
-#define I85X_VARIANT_SHIFT 5
-#define I855_GME 0x0
-#define I855_GM 0x4
-#define I852_GME 0x2
-#define I852_GM 0x5
-
-/* Intel i845 registers */
-#define INTEL_I845_AGPM 0x51
-#define INTEL_I845_ERRSTS 0xc8
-
-/* Intel i860 registers */
-#define INTEL_I860_MCHCFG 0x50
-#define INTEL_I860_ERRSTS 0xc8
-
-/* Intel i810 registers */
-#define I810_GMADDR 0x10
-#define I810_MMADDR 0x14
-#define I810_PTE_BASE 0x10000
-#define I810_PTE_MAIN_UNCACHED 0x00000000
-#define I810_PTE_LOCAL 0x00000002
-#define I810_PTE_VALID 0x00000001
-#define I830_PTE_SYSTEM_CACHED 0x00000006
-#define I810_SMRAM_MISCC 0x70
-#define I810_GFX_MEM_WIN_SIZE 0x00010000
-#define I810_GFX_MEM_WIN_32M 0x00010000
-#define I810_GMS 0x000000c0
-#define I810_GMS_DISABLE 0x00000000
-#define I810_PGETBL_CTL 0x2020
-#define I810_PGETBL_ENABLED 0x00000001
-#define I965_PGETBL_SIZE_MASK 0x0000000e
-#define I965_PGETBL_SIZE_512KB (0 << 1)
-#define I965_PGETBL_SIZE_256KB (1 << 1)
-#define I965_PGETBL_SIZE_128KB (2 << 1)
-#define I965_PGETBL_SIZE_1MB (3 << 1)
-#define I965_PGETBL_SIZE_2MB (4 << 1)
-#define I965_PGETBL_SIZE_1_5MB (5 << 1)
-#define G33_PGETBL_SIZE_MASK (3 << 8)
-#define G33_PGETBL_SIZE_1M (1 << 8)
-#define G33_PGETBL_SIZE_2M (2 << 8)
-
-#define I810_DRAM_CTL 0x3000
-#define I810_DRAM_ROW_0 0x00000001
-#define I810_DRAM_ROW_0_SDRAM 0x00000001
-
struct agp_device_ids {
unsigned short device_id; /* first, to make table easier to read */
enum chipset_type chipset;
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index d2ce68f27e4..fd793519ea2 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -204,6 +204,7 @@ static const struct agp_bridge_driver ali_generic_bridge = {
.aperture_sizes = ali_generic_sizes,
.size_type = U32_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = ali_configure,
.fetch_size = ali_fetch_size,
.cleanup = ali_cleanup,
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index a7637d72cef..b6b1568314c 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -142,6 +142,7 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge)
{
struct aper_size_info_lvl2 *value;
struct amd_page_map page_dir;
+ unsigned long __iomem *cur_gatt;
unsigned long addr;
int retval;
u32 temp;
@@ -178,6 +179,13 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge)
readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
}
+ for (i = 0; i < value->num_entries; i++) {
+ addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = GET_GATT(addr);
+ writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
+ readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
+ }
+
return 0;
}
@@ -375,6 +383,7 @@ static const struct agp_bridge_driver amd_irongate_driver = {
.aperture_sizes = amd_irongate_sizes,
.size_type = LVL2_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = amd_irongate_configure,
.fetch_size = amd_irongate_fetch_size,
.cleanup = amd_irongate_cleanup,
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index fd50ead59c7..70312da4c96 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -210,6 +210,7 @@ static const struct agp_bridge_driver amd_8151_driver = {
.aperture_sizes = amd_8151_sizes,
.size_type = U32_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = amd_8151_configure,
.fetch_size = amd64_fetch_size,
.cleanup = amd64_cleanup,
@@ -383,7 +384,7 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
{
u32 httfea,baseaddr,enuscr;
struct pci_dev *dev1;
- int i;
+ int i, ret;
unsigned size = amd64_fetch_size();
dev_info(&pdev->dev, "setting up ULi AGP\n");
@@ -399,15 +400,18 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
if (i == ARRAY_SIZE(uli_sizes)) {
dev_info(&pdev->dev, "no ULi size found for %d\n", size);
- return -ENODEV;
+ ret = -ENODEV;
+ goto put;
}
/* shadow x86-64 registers into ULi registers */
pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea);
/* if x86-64 aperture base is beyond 4G, exit here */
- if ((httfea & 0x7fff) >> (32 - 25))
- return -ENODEV;
+ if ((httfea & 0x7fff) >> (32 - 25)) {
+ ret = -ENODEV;
+ goto put;
+ }
httfea = (httfea& 0x7fff) << 25;
@@ -419,9 +423,10 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
enuscr= httfea+ (size * 1024 * 1024) - 1;
pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea);
pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr);
-
+ ret = 0;
+put:
pci_dev_put(dev1);
- return 0;
+ return ret;
}
@@ -440,7 +445,7 @@ static int nforce3_agp_init(struct pci_dev *pdev)
{
u32 tmp, apbase, apbar, aplimit;
struct pci_dev *dev1;
- int i;
+ int i, ret;
unsigned size = amd64_fetch_size();
dev_info(&pdev->dev, "setting up Nforce3 AGP\n");
@@ -457,7 +462,8 @@ static int nforce3_agp_init(struct pci_dev *pdev)
if (i == ARRAY_SIZE(nforce3_sizes)) {
dev_info(&pdev->dev, "no NForce3 size found for %d\n", size);
- return -ENODEV;
+ ret = -ENODEV;
+ goto put;
}
pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp);
@@ -471,7 +477,8 @@ static int nforce3_agp_init(struct pci_dev *pdev)
/* if x86-64 aperture base is beyond 4G, exit here */
if ( (apbase & 0x7fff) >> (32 - 25) ) {
dev_info(&pdev->dev, "aperture base > 4G\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto put;
}
apbase = (apbase & 0x7fff) << 25;
@@ -487,9 +494,11 @@ static int nforce3_agp_init(struct pci_dev *pdev)
pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase);
pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit);
+ ret = 0;
+put:
pci_dev_put(dev1);
- return 0;
+ return ret;
}
static int __devinit agp_amd64_probe(struct pci_dev *pdev,
@@ -499,6 +508,10 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev,
u8 cap_ptr;
int err;
+ /* The Highlander principle */
+ if (agp_bridges_found)
+ return -ENODEV;
+
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
if (!cap_ptr)
return -ENODEV;
@@ -562,6 +575,8 @@ static void __devexit agp_amd64_remove(struct pci_dev *pdev)
amd64_aperture_sizes[bridge->aperture_size_idx].size);
agp_remove_bridge(bridge);
agp_put_bridge(bridge);
+
+ agp_bridges_found--;
}
#ifdef CONFIG_PM
@@ -709,6 +724,11 @@ static struct pci_device_id agp_amd64_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
+static DEFINE_PCI_DEVICE_TABLE(agp_amd64_pci_promisc_table) = {
+ { PCI_DEVICE_CLASS(0, 0) },
+ { }
+};
+
static struct pci_driver agp_amd64_pci_driver = {
.name = "agpgart-amd64",
.id_table = agp_amd64_pci_table,
@@ -734,7 +754,6 @@ int __init agp_amd64_init(void)
return err;
if (agp_bridges_found == 0) {
- struct pci_dev *dev;
if (!agp_try_unsupported && !agp_try_unsupported_boot) {
printk(KERN_INFO PFX "No supported AGP bridge found.\n");
#ifdef MODULE
@@ -750,17 +769,10 @@ int __init agp_amd64_init(void)
return -ENODEV;
/* Look for any AGP bridge */
- dev = NULL;
- err = -ENODEV;
- for_each_pci_dev(dev) {
- if (!pci_find_capability(dev, PCI_CAP_ID_AGP))
- continue;
- /* Only one bridge supported right now */
- if (agp_amd64_probe(dev, NULL) == 0) {
- err = 0;
- break;
- }
- }
+ agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
+ err = driver_attach(&agp_amd64_pci_driver.driver);
+ if (err == 0 && agp_bridges_found == 0)
+ err = -ENODEV;
}
return err;
}
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 3b2ecbe86eb..dc30e224349 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -341,6 +341,7 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge)
{
struct aper_size_info_lvl2 *value;
struct ati_page_map page_dir;
+ unsigned long __iomem *cur_gatt;
unsigned long addr;
int retval;
u32 temp;
@@ -395,6 +396,12 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge)
readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
}
+ for (i = 0; i < value->num_entries; i++) {
+ addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = GET_GATT(addr);
+ writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
+ }
+
return 0;
}
@@ -415,6 +422,7 @@ static const struct agp_bridge_driver ati_generic_bridge = {
.aperture_sizes = ati_generic_sizes,
.size_type = LVL2_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = ati_configure,
.fetch_size = ati_fetch_size,
.cleanup = ati_cleanup,
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
index 793f39ea961..aa109cbe0e6 100644
--- a/drivers/char/agp/efficeon-agp.c
+++ b/drivers/char/agp/efficeon-agp.c
@@ -28,6 +28,7 @@
#include <linux/page-flags.h>
#include <linux/mm.h>
#include "agp.h"
+#include "intel-agp.h"
/*
* The real differences to the generic AGP code is
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index fb86708e47e..4b51982fd23 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -1214,7 +1214,7 @@ struct agp_memory *agp_generic_alloc_user(size_t page_count, int type)
return NULL;
for (i = 0; i < page_count; i++)
- new->pages[i] = 0;
+ new->pages[i] = NULL;
new->page_count = 0;
new->type = type;
new->num_scratch_pages = pages;
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index aa4248efc5d..d836a71bf06 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -11,1531 +11,13 @@
#include <linux/agp_backend.h>
#include <asm/smp.h>
#include "agp.h"
+#include "intel-agp.h"
+
+#include "intel-gtt.c"
int intel_agp_enabled;
EXPORT_SYMBOL(intel_agp_enabled);
-/*
- * If we have Intel graphics, we're not going to have anything other than
- * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
- * on the Intel IOMMU support (CONFIG_DMAR).
- * Only newer chipsets need to bother with this, of course.
- */
-#ifdef CONFIG_DMAR
-#define USE_PCI_DMA_API 1
-#endif
-
-#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
-#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
-#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
-#define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972
-#define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980
-#define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982
-#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990
-#define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992
-#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0
-#define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2
-#define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00
-#define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02
-#define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10
-#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12
-#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
-#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
-#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010
-#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011
-#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000
-#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001
-#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
-#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2
-#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
-#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2
-#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0
-#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
-#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40
-#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
-#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
-#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
-#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00
-#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02
-#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10
-#define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12
-#define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20
-#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22
-#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
-#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106
-
-/* cover 915 and 945 variants */
-#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB)
-
-#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB)
-
-#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
-
-#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
-
-#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
-
-#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
- IS_SNB)
-
-extern int agp_memory_reserved;
-
-
-/* Intel 815 register */
-#define INTEL_815_APCONT 0x51
-#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF
-
-/* Intel i820 registers */
-#define INTEL_I820_RDCR 0x51
-#define INTEL_I820_ERRSTS 0xc8
-
-/* Intel i840 registers */
-#define INTEL_I840_MCHCFG 0x50
-#define INTEL_I840_ERRSTS 0xc8
-
-/* Intel i850 registers */
-#define INTEL_I850_MCHCFG 0x50
-#define INTEL_I850_ERRSTS 0xc8
-
-/* intel 915G registers */
-#define I915_GMADDR 0x18
-#define I915_MMADDR 0x10
-#define I915_PTEADDR 0x1C
-#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
-#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
-#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
-#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
-#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
-#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
-#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
-#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
-
-#define I915_IFPADDR 0x60
-
-/* Intel 965G registers */
-#define I965_MSAC 0x62
-#define I965_IFPADDR 0x70
-
-/* Intel 7505 registers */
-#define INTEL_I7505_APSIZE 0x74
-#define INTEL_I7505_NCAPID 0x60
-#define INTEL_I7505_NISTAT 0x6c
-#define INTEL_I7505_ATTBASE 0x78
-#define INTEL_I7505_ERRSTS 0x42
-#define INTEL_I7505_AGPCTRL 0x70
-#define INTEL_I7505_MCHCFG 0x50
-
-#define SNB_GMCH_CTRL 0x50
-#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
-#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
-#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
-#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
-#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
-#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
-#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
-#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
-#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
-#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
-#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
-#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
-#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
-#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
-#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
-#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
-#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
-#define SNB_GTT_SIZE_0M (0 << 8)
-#define SNB_GTT_SIZE_1M (1 << 8)
-#define SNB_GTT_SIZE_2M (2 << 8)
-#define SNB_GTT_SIZE_MASK (3 << 8)
-
-static const struct aper_size_info_fixed intel_i810_sizes[] =
-{
- {64, 16384, 4},
- /* The 32M mode still requires a 64k gatt */
- {32, 8192, 4}
-};
-
-#define AGP_DCACHE_MEMORY 1
-#define AGP_PHYS_MEMORY 2
-#define INTEL_AGP_CACHED_MEMORY 3
-
-static struct gatt_mask intel_i810_masks[] =
-{
- {.mask = I810_PTE_VALID, .type = 0},
- {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
- {.mask = I810_PTE_VALID, .type = 0},
- {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
- .type = INTEL_AGP_CACHED_MEMORY}
-};
-
-static struct _intel_private {
- struct pci_dev *pcidev; /* device one */
- u8 __iomem *registers;
- u32 __iomem *gtt; /* I915G */
- int num_dcache_entries;
- /* gtt_entries is the number of gtt entries that are already mapped
- * to stolen memory. Stolen memory is larger than the memory mapped
- * through gtt_entries, as it includes some reserved space for the BIOS
- * popup and for the GTT.
- */
- int gtt_entries; /* i830+ */
- int gtt_total_size;
- union {
- void __iomem *i9xx_flush_page;
- void *i8xx_flush_page;
- };
- struct page *i8xx_page;
- struct resource ifp_resource;
- int resource_valid;
-} intel_private;
-
-#ifdef USE_PCI_DMA_API
-static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
-{
- *ret = pci_map_page(intel_private.pcidev, page, 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(intel_private.pcidev, *ret))
- return -EINVAL;
- return 0;
-}
-
-static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
-{
- pci_unmap_page(intel_private.pcidev, dma,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-}
-
-static void intel_agp_free_sglist(struct agp_memory *mem)
-{
- struct sg_table st;
-
- st.sgl = mem->sg_list;
- st.orig_nents = st.nents = mem->page_count;
-
- sg_free_table(&st);
-
- mem->sg_list = NULL;
- mem->num_sg = 0;
-}
-
-static int intel_agp_map_memory(struct agp_memory *mem)
-{
- struct sg_table st;
- struct scatterlist *sg;
- int i;
-
- DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
-
- if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
- return -ENOMEM;
-
- mem->sg_list = sg = st.sgl;
-
- for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
- sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
-
- mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
- mem->page_count, PCI_DMA_BIDIRECTIONAL);
- if (unlikely(!mem->num_sg)) {
- intel_agp_free_sglist(mem);
- return -ENOMEM;
- }
- return 0;
-}
-
-static void intel_agp_unmap_memory(struct agp_memory *mem)
-{
- DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
-
- pci_unmap_sg(intel_private.pcidev, mem->sg_list,
- mem->page_count, PCI_DMA_BIDIRECTIONAL);
- intel_agp_free_sglist(mem);
-}
-
-static void intel_agp_insert_sg_entries(struct agp_memory *mem,
- off_t pg_start, int mask_type)
-{
- struct scatterlist *sg;
- int i, j;
-
- j = pg_start;
-
- WARN_ON(!mem->num_sg);
-
- if (mem->num_sg == mem->page_count) {
- for_each_sg(mem->sg_list, sg, mem->page_count, i) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- sg_dma_address(sg), mask_type),
- intel_private.gtt+j);
- j++;
- }
- } else {
- /* sg may merge pages, but we have to separate
- * per-page addr for GTT */
- unsigned int len, m;
-
- for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
- len = sg_dma_len(sg) / PAGE_SIZE;
- for (m = 0; m < len; m++) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- sg_dma_address(sg) + m * PAGE_SIZE,
- mask_type),
- intel_private.gtt+j);
- j++;
- }
- }
- }
- readl(intel_private.gtt+j-1);
-}
-
-#else
-
-static void intel_agp_insert_sg_entries(struct agp_memory *mem,
- off_t pg_start, int mask_type)
-{
- int i, j;
- u32 cache_bits = 0;
-
- if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
- {
- cache_bits = I830_PTE_SYSTEM_CACHED;
- }
-
- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- page_to_phys(mem->pages[i]), mask_type),
- intel_private.gtt+j);
- }
-
- readl(intel_private.gtt+j-1);
-}
-
-#endif
-
-static int intel_i810_fetch_size(void)
-{
- u32 smram_miscc;
- struct aper_size_info_fixed *values;
-
- pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
- values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
-
- if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
- dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
- return 0;
- }
- if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
- agp_bridge->previous_size =
- agp_bridge->current_size = (void *) (values + 1);
- agp_bridge->aperture_size_idx = 1;
- return values[1].size;
- } else {
- agp_bridge->previous_size =
- agp_bridge->current_size = (void *) (values);
- agp_bridge->aperture_size_idx = 0;
- return values[0].size;
- }
-
- return 0;
-}
-
-static int intel_i810_configure(void)
-{
- struct aper_size_info_fixed *current_size;
- u32 temp;
- int i;
-
- current_size = A_SIZE_FIX(agp_bridge->current_size);
-
- if (!intel_private.registers) {
- pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
- temp &= 0xfff80000;
-
- intel_private.registers = ioremap(temp, 128 * 4096);
- if (!intel_private.registers) {
- dev_err(&intel_private.pcidev->dev,
- "can't remap memory\n");
- return -ENOMEM;
- }
- }
-
- if ((readl(intel_private.registers+I810_DRAM_CTL)
- & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
- /* This will need to be dynamically assigned */
- dev_info(&intel_private.pcidev->dev,
- "detected 4MB dedicated video ram\n");
- intel_private.num_dcache_entries = 1024;
- }
- pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
- writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
- readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
-
- if (agp_bridge->driver->needs_scratch_page) {
- for (i = 0; i < current_size->num_entries; i++) {
- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */
- }
- global_cache_flush();
- return 0;
-}
-
-static void intel_i810_cleanup(void)
-{
- writel(0, intel_private.registers+I810_PGETBL_CTL);
- readl(intel_private.registers); /* PCI Posting. */
- iounmap(intel_private.registers);
-}
-
-static void intel_i810_tlbflush(struct agp_memory *mem)
-{
- return;
-}
-
-static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
-{
- return;
-}
-
-/* Exists to support ARGB cursors */
-static struct page *i8xx_alloc_pages(void)
-{
- struct page *page;
-
- page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
- if (page == NULL)
- return NULL;
-
- if (set_pages_uc(page, 4) < 0) {
- set_pages_wb(page, 4);
- __free_pages(page, 2);
- return NULL;
- }
- get_page(page);
- atomic_inc(&agp_bridge->current_memory_agp);
- return page;
-}
-
-static void i8xx_destroy_pages(struct page *page)
-{
- if (page == NULL)
- return;
-
- set_pages_wb(page, 4);
- put_page(page);
- __free_pages(page, 2);
- atomic_dec(&agp_bridge->current_memory_agp);
-}
-
-static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
- int type)
-{
- if (type < AGP_USER_TYPES)
- return type;
- else if (type == AGP_USER_CACHED_MEMORY)
- return INTEL_AGP_CACHED_MEMORY;
- else
- return 0;
-}
-
-static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
- int type)
-{
- int i, j, num_entries;
- void *temp;
- int ret = -EINVAL;
- int mask_type;
-
- if (mem->page_count == 0)
- goto out;
-
- temp = agp_bridge->current_size;
- num_entries = A_SIZE_FIX(temp)->num_entries;
-
- if ((pg_start + mem->page_count) > num_entries)
- goto out_err;
-
-
- for (j = pg_start; j < (pg_start + mem->page_count); j++) {
- if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
- ret = -EBUSY;
- goto out_err;
- }
- }
-
- if (type != mem->type)
- goto out_err;
-
- mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
-
- switch (mask_type) {
- case AGP_DCACHE_MEMORY:
- if (!mem->is_flushed)
- global_cache_flush();
- for (i = pg_start; i < (pg_start + mem->page_count); i++) {
- writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
- intel_private.registers+I810_PTE_BASE+(i*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
- break;
- case AGP_PHYS_MEMORY:
- case AGP_NORMAL_MEMORY:
- if (!mem->is_flushed)
- global_cache_flush();
- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- page_to_phys(mem->pages[i]), mask_type),
- intel_private.registers+I810_PTE_BASE+(j*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
- break;
- default:
- goto out_err;
- }
-
- agp_bridge->driver->tlb_flush(mem);
-out:
- ret = 0;
-out_err:
- mem->is_flushed = true;
- return ret;
-}
-
-static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
- int type)
-{
- int i;
-
- if (mem->page_count == 0)
- return 0;
-
- for (i = pg_start; i < (mem->page_count + pg_start); i++) {
- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
-
- agp_bridge->driver->tlb_flush(mem);
- return 0;
-}
-
-/*
- * The i810/i830 requires a physical address to program its mouse
- * pointer into hardware.
- * However the Xserver still writes to it through the agp aperture.
- */
-static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
-{
- struct agp_memory *new;
- struct page *page;
-
- switch (pg_count) {
- case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
- break;
- case 4:
- /* kludge to get 4 physical pages for ARGB cursor */
- page = i8xx_alloc_pages();
- break;
- default:
- return NULL;
- }
-
- if (page == NULL)
- return NULL;
-
- new = agp_create_memory(pg_count);
- if (new == NULL)
- return NULL;
-
- new->pages[0] = page;
- if (pg_count == 4) {
- /* kludge to get 4 physical pages for ARGB cursor */
- new->pages[1] = new->pages[0] + 1;
- new->pages[2] = new->pages[1] + 1;
- new->pages[3] = new->pages[2] + 1;
- }
- new->page_count = pg_count;
- new->num_scratch_pages = pg_count;
- new->type = AGP_PHYS_MEMORY;
- new->physical = page_to_phys(new->pages[0]);
- return new;
-}
-
-static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
-{
- struct agp_memory *new;
-
- if (type == AGP_DCACHE_MEMORY) {
- if (pg_count != intel_private.num_dcache_entries)
- return NULL;
-
- new = agp_create_memory(1);
- if (new == NULL)
- return NULL;
-
- new->type = AGP_DCACHE_MEMORY;
- new->page_count = pg_count;
- new->num_scratch_pages = 0;
- agp_free_page_array(new);
- return new;
- }
- if (type == AGP_PHYS_MEMORY)
- return alloc_agpphysmem_i8xx(pg_count, type);
- return NULL;
-}
-
-static void intel_i810_free_by_type(struct agp_memory *curr)
-{
- agp_free_key(curr->key);
- if (curr->type == AGP_PHYS_MEMORY) {
- if (curr->page_count == 4)
- i8xx_destroy_pages(curr->pages[0]);
- else {
- agp_bridge->driver->agp_destroy_page(curr->pages[0],
- AGP_PAGE_DESTROY_UNMAP);
- agp_bridge->driver->agp_destroy_page(curr->pages[0],
- AGP_PAGE_DESTROY_FREE);
- }
- agp_free_page_array(curr);
- }
- kfree(curr);
-}
-
-static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
- dma_addr_t addr, int type)
-{
- /* Type checking must be done elsewhere */
- return addr | bridge->driver->masks[type].mask;
-}
-
-static struct aper_size_info_fixed intel_i830_sizes[] =
-{
- {128, 32768, 5},
- /* The 64M mode still requires a 128k gatt */
- {64, 16384, 5},
- {256, 65536, 6},
- {512, 131072, 7},
-};
-
-static void intel_i830_init_gtt_entries(void)
-{
- u16 gmch_ctrl;
- int gtt_entries = 0;
- u8 rdct;
- int local = 0;
- static const int ddt[4] = { 0, 16, 32, 64 };
- int size; /* reserved space (in kb) at the top of stolen memory */
-
- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
-
- if (IS_I965) {
- u32 pgetbl_ctl;
- pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
-
- /* The 965 has a field telling us the size of the GTT,
- * which may be larger than what is necessary to map the
- * aperture.
- */
- switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
- case I965_PGETBL_SIZE_128KB:
- size = 128;
- break;
- case I965_PGETBL_SIZE_256KB:
- size = 256;
- break;
- case I965_PGETBL_SIZE_512KB:
- size = 512;
- break;
- case I965_PGETBL_SIZE_1MB:
- size = 1024;
- break;
- case I965_PGETBL_SIZE_2MB:
- size = 2048;
- break;
- case I965_PGETBL_SIZE_1_5MB:
- size = 1024 + 512;
- break;
- default:
- dev_info(&intel_private.pcidev->dev,
- "unknown page table size, assuming 512KB\n");
- size = 512;
- }
- size += 4; /* add in BIOS popup space */
- } else if (IS_G33 && !IS_PINEVIEW) {
- /* G33's GTT size defined in gmch_ctrl */
- switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
- case G33_PGETBL_SIZE_1M:
- size = 1024;
- break;
- case G33_PGETBL_SIZE_2M:
- size = 2048;
- break;
- default:
- dev_info(&agp_bridge->dev->dev,
- "unknown page table size 0x%x, assuming 512KB\n",
- (gmch_ctrl & G33_PGETBL_SIZE_MASK));
- size = 512;
- }
- size += 4;
- } else if (IS_G4X || IS_PINEVIEW) {
- /* On 4 series hardware, GTT stolen is separate from graphics
- * stolen, ignore it in stolen gtt entries counting. However,
- * 4KB of the stolen memory doesn't get mapped to the GTT.
- */
- size = 4;
- } else {
- /* On previous hardware, the GTT size was just what was
- * required to map the aperture.
- */
- size = agp_bridge->driver->fetch_size() + 4;
- }
-
- if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
- switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
- case I830_GMCH_GMS_STOLEN_512:
- gtt_entries = KB(512) - KB(size);
- break;
- case I830_GMCH_GMS_STOLEN_1024:
- gtt_entries = MB(1) - KB(size);
- break;
- case I830_GMCH_GMS_STOLEN_8192:
- gtt_entries = MB(8) - KB(size);
- break;
- case I830_GMCH_GMS_LOCAL:
- rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
- gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
- MB(ddt[I830_RDRAM_DDT(rdct)]);
- local = 1;
- break;
- default:
- gtt_entries = 0;
- break;
- }
- } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
- /*
- * SandyBridge has new memory control reg at 0x50.w
- */
- u16 snb_gmch_ctl;
- pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
- case SNB_GMCH_GMS_STOLEN_32M:
- gtt_entries = MB(32) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_64M:
- gtt_entries = MB(64) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_96M:
- gtt_entries = MB(96) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_128M:
- gtt_entries = MB(128) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_160M:
- gtt_entries = MB(160) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_192M:
- gtt_entries = MB(192) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_224M:
- gtt_entries = MB(224) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_256M:
- gtt_entries = MB(256) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_288M:
- gtt_entries = MB(288) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_320M:
- gtt_entries = MB(320) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_352M:
- gtt_entries = MB(352) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_384M:
- gtt_entries = MB(384) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_416M:
- gtt_entries = MB(416) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_448M:
- gtt_entries = MB(448) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_480M:
- gtt_entries = MB(480) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_512M:
- gtt_entries = MB(512) - KB(size);
- break;
- }
- } else {
- switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
- case I855_GMCH_GMS_STOLEN_1M:
- gtt_entries = MB(1) - KB(size);
- break;
- case I855_GMCH_GMS_STOLEN_4M:
- gtt_entries = MB(4) - KB(size);
- break;
- case I855_GMCH_GMS_STOLEN_8M:
- gtt_entries = MB(8) - KB(size);
- break;
- case I855_GMCH_GMS_STOLEN_16M:
- gtt_entries = MB(16) - KB(size);
- break;
- case I855_GMCH_GMS_STOLEN_32M:
- gtt_entries = MB(32) - KB(size);
- break;
- case I915_GMCH_GMS_STOLEN_48M:
- /* Check it's really I915G */
- if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
- gtt_entries = MB(48) - KB(size);
- else
- gtt_entries = 0;
- break;
- case I915_GMCH_GMS_STOLEN_64M:
- /* Check it's really I915G */
- if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
- gtt_entries = MB(64) - KB(size);
- else
- gtt_entries = 0;
- break;
- case G33_GMCH_GMS_STOLEN_128M:
- if (IS_G33 || IS_I965 || IS_G4X)
- gtt_entries = MB(128) - KB(size);
- else
- gtt_entries = 0;
- break;
- case G33_GMCH_GMS_STOLEN_256M:
- if (IS_G33 || IS_I965 || IS_G4X)
- gtt_entries = MB(256) - KB(size);
- else
- gtt_entries = 0;
- break;
- case INTEL_GMCH_GMS_STOLEN_96M:
- if (IS_I965 || IS_G4X)
- gtt_entries = MB(96) - KB(size);
- else
- gtt_entries = 0;
- break;
- case INTEL_GMCH_GMS_STOLEN_160M:
- if (IS_I965 || IS_G4X)
- gtt_entries = MB(160) - KB(size);
- else
- gtt_entries = 0;
- break;
- case INTEL_GMCH_GMS_STOLEN_224M:
- if (IS_I965 || IS_G4X)
- gtt_entries = MB(224) - KB(size);
- else
- gtt_entries = 0;
- break;
- case INTEL_GMCH_GMS_STOLEN_352M:
- if (IS_I965 || IS_G4X)
- gtt_entries = MB(352) - KB(size);
- else
- gtt_entries = 0;
- break;
- default:
- gtt_entries = 0;
- break;
- }
- }
- if (gtt_entries > 0) {
- dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
- gtt_entries / KB(1), local ? "local" : "stolen");
- gtt_entries /= KB(4);
- } else {
- dev_info(&agp_bridge->dev->dev,
- "no pre-allocated video memory detected\n");
- gtt_entries = 0;
- }
-
- intel_private.gtt_entries = gtt_entries;
-}
-
-static void intel_i830_fini_flush(void)
-{
- kunmap(intel_private.i8xx_page);
- intel_private.i8xx_flush_page = NULL;
- unmap_page_from_agp(intel_private.i8xx_page);
-
- __free_page(intel_private.i8xx_page);
- intel_private.i8xx_page = NULL;
-}
-
-static void intel_i830_setup_flush(void)
-{
- /* return if we've already set the flush mechanism up */
- if (intel_private.i8xx_page)
- return;
-
- intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
- if (!intel_private.i8xx_page)
- return;
-
- intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
- if (!intel_private.i8xx_flush_page)
- intel_i830_fini_flush();
-}
-
-/* The chipset_flush interface needs to get data that has already been
- * flushed out of the CPU all the way out to main memory, because the GPU
- * doesn't snoop those buffers.
- *
- * The 8xx series doesn't have the same lovely interface for flushing the
- * chipset write buffers that the later chips do. According to the 865
- * specs, it's 64 octwords, or 1KB. So, to get those previous things in
- * that buffer out, we just fill 1KB and clflush it out, on the assumption
- * that it'll push whatever was in there out. It appears to work.
- */
-static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
-{
- unsigned int *pg = intel_private.i8xx_flush_page;
-
- memset(pg, 0, 1024);
-
- if (cpu_has_clflush)
- clflush_cache_range(pg, 1024);
- else if (wbinvd_on_all_cpus() != 0)
- printk(KERN_ERR "Timed out waiting for cache flush.\n");
-}
-
-/* The intel i830 automatically initializes the agp aperture during POST.
- * Use the memory already set aside for in the GTT.
- */
-static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
-{
- int page_order;
- struct aper_size_info_fixed *size;
- int num_entries;
- u32 temp;
-
- size = agp_bridge->current_size;
- page_order = size->page_order;
- num_entries = size->num_entries;
- agp_bridge->gatt_table_real = NULL;
-
- pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
- temp &= 0xfff80000;
-
- intel_private.registers = ioremap(temp, 128 * 4096);
- if (!intel_private.registers)
- return -ENOMEM;
-
- temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
- global_cache_flush(); /* FIXME: ?? */
-
- /* we have to call this as early as possible after the MMIO base address is known */
- intel_i830_init_gtt_entries();
-
- agp_bridge->gatt_table = NULL;
-
- agp_bridge->gatt_bus_addr = temp;
-
- return 0;
-}
-
-/* Return the gatt table to a sane state. Use the top of stolen
- * memory for the GTT.
- */
-static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
-{
- return 0;
-}
-
-static int intel_i830_fetch_size(void)
-{
- u16 gmch_ctrl;
- struct aper_size_info_fixed *values;
-
- values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
-
- if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
- agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
- /* 855GM/852GM/865G has 128MB aperture size */
- agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
- agp_bridge->aperture_size_idx = 0;
- return values[0].size;
- }
-
- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
-
- if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
- agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
- agp_bridge->aperture_size_idx = 0;
- return values[0].size;
- } else {
- agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + 1);
- agp_bridge->aperture_size_idx = 1;
- return values[1].size;
- }
-
- return 0;
-}
-
-static int intel_i830_configure(void)
-{
- struct aper_size_info_fixed *current_size;
- u32 temp;
- u16 gmch_ctrl;
- int i;
-
- current_size = A_SIZE_FIX(agp_bridge->current_size);
-
- pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
- gmch_ctrl |= I830_GMCH_ENABLED;
- pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
-
- writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
- readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
-
- if (agp_bridge->driver->needs_scratch_page) {
- for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
- }
-
- global_cache_flush();
-
- intel_i830_setup_flush();
- return 0;
-}
-
-static void intel_i830_cleanup(void)
-{
- iounmap(intel_private.registers);
-}
-
-static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
- int type)
-{
- int i, j, num_entries;
- void *temp;
- int ret = -EINVAL;
- int mask_type;
-
- if (mem->page_count == 0)
- goto out;
-
- temp = agp_bridge->current_size;
- num_entries = A_SIZE_FIX(temp)->num_entries;
-
- if (pg_start < intel_private.gtt_entries) {
- dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
- "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
- pg_start, intel_private.gtt_entries);
-
- dev_info(&intel_private.pcidev->dev,
- "trying to insert into local/stolen memory\n");
- goto out_err;
- }
-
- if ((pg_start + mem->page_count) > num_entries)
- goto out_err;
-
- /* The i830 can't check the GTT for entries since its read only,
- * depend on the caller to make the correct offset decisions.
- */
-
- if (type != mem->type)
- goto out_err;
-
- mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
-
- if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
- mask_type != INTEL_AGP_CACHED_MEMORY)
- goto out_err;
-
- if (!mem->is_flushed)
- global_cache_flush();
-
- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- page_to_phys(mem->pages[i]), mask_type),
- intel_private.registers+I810_PTE_BASE+(j*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
- agp_bridge->driver->tlb_flush(mem);
-
-out:
- ret = 0;
-out_err:
- mem->is_flushed = true;
- return ret;
-}
-
-static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
- int type)
-{
- int i;
-
- if (mem->page_count == 0)
- return 0;
-
- if (pg_start < intel_private.gtt_entries) {
- dev_info(&intel_private.pcidev->dev,
- "trying to disable local/stolen memory\n");
- return -EINVAL;
- }
-
- for (i = pg_start; i < (mem->page_count + pg_start); i++) {
- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
-
- agp_bridge->driver->tlb_flush(mem);
- return 0;
-}
-
-static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
-{
- if (type == AGP_PHYS_MEMORY)
- return alloc_agpphysmem_i8xx(pg_count, type);
- /* always return NULL for other allocation types for now */
- return NULL;
-}
-
-static int intel_alloc_chipset_flush_resource(void)
-{
- int ret;
- ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
- PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
- pcibios_align_resource, agp_bridge->dev);
-
- return ret;
-}
-
-static void intel_i915_setup_chipset_flush(void)
-{
- int ret;
- u32 temp;
-
- pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
- if (!(temp & 0x1)) {
- intel_alloc_chipset_flush_resource();
- intel_private.resource_valid = 1;
- pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
- } else {
- temp &= ~1;
-
- intel_private.resource_valid = 1;
- intel_private.ifp_resource.start = temp;
- intel_private.ifp_resource.end = temp + PAGE_SIZE;
- ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
- /* some BIOSes reserve this area in a pnp some don't */
- if (ret)
- intel_private.resource_valid = 0;
- }
-}
-
-static void intel_i965_g33_setup_chipset_flush(void)
-{
- u32 temp_hi, temp_lo;
- int ret;
-
- pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
- pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
-
- if (!(temp_lo & 0x1)) {
-
- intel_alloc_chipset_flush_resource();
-
- intel_private.resource_valid = 1;
- pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
- upper_32_bits(intel_private.ifp_resource.start));
- pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
- } else {
- u64 l64;
-
- temp_lo &= ~0x1;
- l64 = ((u64)temp_hi << 32) | temp_lo;
-
- intel_private.resource_valid = 1;
- intel_private.ifp_resource.start = l64;
- intel_private.ifp_resource.end = l64 + PAGE_SIZE;
- ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
- /* some BIOSes reserve this area in a pnp some don't */
- if (ret)
- intel_private.resource_valid = 0;
- }
-}
-
-static void intel_i9xx_setup_flush(void)
-{
- /* return if already configured */
- if (intel_private.ifp_resource.start)
- return;
-
- if (IS_SNB)
- return;
-
- /* setup a resource for this object */
- intel_private.ifp_resource.name = "Intel Flush Page";
- intel_private.ifp_resource.flags = IORESOURCE_MEM;
-
- /* Setup chipset flush for 915 */
- if (IS_I965 || IS_G33 || IS_G4X) {
- intel_i965_g33_setup_chipset_flush();
- } else {
- intel_i915_setup_chipset_flush();
- }
-
- if (intel_private.ifp_resource.start) {
- intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
- if (!intel_private.i9xx_flush_page)
- dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
- }
-}
-
-static int intel_i915_configure(void)
-{
- struct aper_size_info_fixed *current_size;
- u32 temp;
- u16 gmch_ctrl;
- int i;
-
- current_size = A_SIZE_FIX(agp_bridge->current_size);
-
- pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
-
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
- gmch_ctrl |= I830_GMCH_ENABLED;
- pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
-
- writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
- readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
-
- if (agp_bridge->driver->needs_scratch_page) {
- for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
- writel(agp_bridge->scratch_page, intel_private.gtt+i);
- }
- readl(intel_private.gtt+i-1); /* PCI Posting. */
- }
-
- global_cache_flush();
-
- intel_i9xx_setup_flush();
-
- return 0;
-}
-
-static void intel_i915_cleanup(void)
-{
- if (intel_private.i9xx_flush_page)
- iounmap(intel_private.i9xx_flush_page);
- if (intel_private.resource_valid)
- release_resource(&intel_private.ifp_resource);
- intel_private.ifp_resource.start = 0;
- intel_private.resource_valid = 0;
- iounmap(intel_private.gtt);
- iounmap(intel_private.registers);
-}
-
-static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
-{
- if (intel_private.i9xx_flush_page)
- writel(1, intel_private.i9xx_flush_page);
-}
-
-static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
- int type)
-{
- int num_entries;
- void *temp;
- int ret = -EINVAL;
- int mask_type;
-
- if (mem->page_count == 0)
- goto out;
-
- temp = agp_bridge->current_size;
- num_entries = A_SIZE_FIX(temp)->num_entries;
-
- if (pg_start < intel_private.gtt_entries) {
- dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
- "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
- pg_start, intel_private.gtt_entries);
-
- dev_info(&intel_private.pcidev->dev,
- "trying to insert into local/stolen memory\n");
- goto out_err;
- }
-
- if ((pg_start + mem->page_count) > num_entries)
- goto out_err;
-
- /* The i915 can't check the GTT for entries since it's read only;
- * depend on the caller to make the correct offset decisions.
- */
-
- if (type != mem->type)
- goto out_err;
-
- mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
-
- if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
- mask_type != INTEL_AGP_CACHED_MEMORY)
- goto out_err;
-
- if (!mem->is_flushed)
- global_cache_flush();
-
- intel_agp_insert_sg_entries(mem, pg_start, mask_type);
- agp_bridge->driver->tlb_flush(mem);
-
- out:
- ret = 0;
- out_err:
- mem->is_flushed = true;
- return ret;
-}
-
-static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
- int type)
-{
- int i;
-
- if (mem->page_count == 0)
- return 0;
-
- if (pg_start < intel_private.gtt_entries) {
- dev_info(&intel_private.pcidev->dev,
- "trying to disable local/stolen memory\n");
- return -EINVAL;
- }
-
- for (i = pg_start; i < (mem->page_count + pg_start); i++)
- writel(agp_bridge->scratch_page, intel_private.gtt+i);
-
- readl(intel_private.gtt+i-1);
-
- agp_bridge->driver->tlb_flush(mem);
- return 0;
-}
-
-/* Return the aperture size by just checking the resource length. The effect
- * described in the spec of the MSAC registers is just changing of the
- * resource size.
- */
-static int intel_i9xx_fetch_size(void)
-{
- int num_sizes = ARRAY_SIZE(intel_i830_sizes);
- int aper_size; /* size in megabytes */
- int i;
-
- aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
-
- for (i = 0; i < num_sizes; i++) {
- if (aper_size == intel_i830_sizes[i].size) {
- agp_bridge->current_size = intel_i830_sizes + i;
- agp_bridge->previous_size = agp_bridge->current_size;
- return aper_size;
- }
- }
-
- return 0;
-}
-
-/* The intel i915 automatically initializes the agp aperture during POST.
- * Use the memory already set aside for in the GTT.
- */
-static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
-{
- int page_order;
- struct aper_size_info_fixed *size;
- int num_entries;
- u32 temp, temp2;
- int gtt_map_size = 256 * 1024;
-
- size = agp_bridge->current_size;
- page_order = size->page_order;
- num_entries = size->num_entries;
- agp_bridge->gatt_table_real = NULL;
-
- pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
- pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
-
- if (IS_G33)
- gtt_map_size = 1024 * 1024; /* 1M on G33 */
- intel_private.gtt = ioremap(temp2, gtt_map_size);
- if (!intel_private.gtt)
- return -ENOMEM;
-
- intel_private.gtt_total_size = gtt_map_size / 4;
-
- temp &= 0xfff80000;
-
- intel_private.registers = ioremap(temp, 128 * 4096);
- if (!intel_private.registers) {
- iounmap(intel_private.gtt);
- return -ENOMEM;
- }
-
- temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
- global_cache_flush(); /* FIXME: ? */
-
- /* we have to call this as early as possible after the MMIO base address is known */
- intel_i830_init_gtt_entries();
-
- agp_bridge->gatt_table = NULL;
-
- agp_bridge->gatt_bus_addr = temp;
-
- return 0;
-}
-
-/*
- * The i965 supports 36-bit physical addresses, but to keep
- * the format of the GTT the same, the bits that don't fit
- * in a 32-bit word are shifted down to bits 4..7.
- *
- * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
- * is always zero on 32-bit architectures, so no need to make
- * this conditional.
- */
-static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
- dma_addr_t addr, int type)
-{
- /* Shift high bits down */
- addr |= (addr >> 28) & 0xf0;
-
- /* Type checking must be done elsewhere */
- return addr | bridge->driver->masks[type].mask;
-}
-
-static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
-{
- u16 snb_gmch_ctl;
-
- switch (agp_bridge->dev->device) {
- case PCI_DEVICE_ID_INTEL_GM45_HB:
- case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
- case PCI_DEVICE_ID_INTEL_Q45_HB:
- case PCI_DEVICE_ID_INTEL_G45_HB:
- case PCI_DEVICE_ID_INTEL_G41_HB:
- case PCI_DEVICE_ID_INTEL_B43_HB:
- case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
- case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
- case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
- case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
- *gtt_offset = *gtt_size = MB(2);
- break;
- case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
- case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
- *gtt_offset = MB(2);
-
- pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
- default:
- case SNB_GTT_SIZE_0M:
- printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
- *gtt_size = MB(0);
- break;
- case SNB_GTT_SIZE_1M:
- *gtt_size = MB(1);
- break;
- case SNB_GTT_SIZE_2M:
- *gtt_size = MB(2);
- break;
- }
- break;
- default:
- *gtt_offset = *gtt_size = KB(512);
- }
-}
-
-/* The intel i965 automatically initializes the agp aperture during POST.
- * Use the memory already set aside for in the GTT.
- */
-static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
-{
- int page_order;
- struct aper_size_info_fixed *size;
- int num_entries;
- u32 temp;
- int gtt_offset, gtt_size;
-
- size = agp_bridge->current_size;
- page_order = size->page_order;
- num_entries = size->num_entries;
- agp_bridge->gatt_table_real = NULL;
-
- pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
-
- temp &= 0xfff00000;
-
- intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
-
- intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
-
- if (!intel_private.gtt)
- return -ENOMEM;
-
- intel_private.gtt_total_size = gtt_size / 4;
-
- intel_private.registers = ioremap(temp, 128 * 4096);
- if (!intel_private.registers) {
- iounmap(intel_private.gtt);
- return -ENOMEM;
- }
-
- temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
- global_cache_flush(); /* FIXME: ? */
-
- /* we have to call this as early as possible after the MMIO base address is known */
- intel_i830_init_gtt_entries();
-
- agp_bridge->gatt_table = NULL;
-
- agp_bridge->gatt_bus_addr = temp;
-
- return 0;
-}
-
-
static int intel_fetch_size(void)
{
int i;
@@ -1982,6 +464,7 @@ static const struct agp_bridge_driver intel_generic_driver = {
.aperture_sizes = intel_generic_sizes,
.size_type = U16_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = intel_configure,
.fetch_size = intel_fetch_size,
.cleanup = intel_cleanup,
@@ -2003,38 +486,12 @@ static const struct agp_bridge_driver intel_generic_driver = {
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
-static const struct agp_bridge_driver intel_810_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = intel_i810_sizes,
- .size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 2,
- .needs_scratch_page = true,
- .configure = intel_i810_configure,
- .fetch_size = intel_i810_fetch_size,
- .cleanup = intel_i810_cleanup,
- .tlb_flush = intel_i810_tlbflush,
- .mask_memory = intel_i810_mask_memory,
- .masks = intel_i810_masks,
- .agp_enable = intel_i810_agp_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = agp_generic_create_gatt_table,
- .free_gatt_table = agp_generic_free_gatt_table,
- .insert_memory = intel_i810_insert_entries,
- .remove_memory = intel_i810_remove_entries,
- .alloc_by_type = intel_i810_alloc_by_type,
- .free_by_type = intel_i810_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = agp_generic_type_to_mask_type,
-};
-
static const struct agp_bridge_driver intel_815_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_815_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 2,
+ .needs_scratch_page = true,
.configure = intel_815_configure,
.fetch_size = intel_815_fetch_size,
.cleanup = intel_8xx_cleanup,
@@ -2056,39 +513,12 @@ static const struct agp_bridge_driver intel_815_driver = {
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
-static const struct agp_bridge_driver intel_830_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = intel_i830_sizes,
- .size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 4,
- .needs_scratch_page = true,
- .configure = intel_i830_configure,
- .fetch_size = intel_i830_fetch_size,
- .cleanup = intel_i830_cleanup,
- .tlb_flush = intel_i810_tlbflush,
- .mask_memory = intel_i810_mask_memory,
- .masks = intel_i810_masks,
- .agp_enable = intel_i810_agp_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = intel_i830_create_gatt_table,
- .free_gatt_table = intel_i830_free_gatt_table,
- .insert_memory = intel_i830_insert_entries,
- .remove_memory = intel_i830_remove_entries,
- .alloc_by_type = intel_i830_alloc_by_type,
- .free_by_type = intel_i810_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = intel_i830_type_to_mask_type,
- .chipset_flush = intel_i830_chipset_flush,
-};
-
static const struct agp_bridge_driver intel_820_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = intel_820_configure,
.fetch_size = intel_8xx_fetch_size,
.cleanup = intel_820_cleanup,
@@ -2115,6 +545,7 @@ static const struct agp_bridge_driver intel_830mp_driver = {
.aperture_sizes = intel_830mp_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 4,
+ .needs_scratch_page = true,
.configure = intel_830mp_configure,
.fetch_size = intel_8xx_fetch_size,
.cleanup = intel_8xx_cleanup,
@@ -2141,6 +572,7 @@ static const struct agp_bridge_driver intel_840_driver = {
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = intel_840_configure,
.fetch_size = intel_8xx_fetch_size,
.cleanup = intel_8xx_cleanup,
@@ -2167,6 +599,7 @@ static const struct agp_bridge_driver intel_845_driver = {
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = intel_845_configure,
.fetch_size = intel_8xx_fetch_size,
.cleanup = intel_8xx_cleanup,
@@ -2193,6 +626,7 @@ static const struct agp_bridge_driver intel_850_driver = {
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = intel_850_configure,
.fetch_size = intel_8xx_fetch_size,
.cleanup = intel_8xx_cleanup,
@@ -2219,6 +653,7 @@ static const struct agp_bridge_driver intel_860_driver = {
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = intel_860_configure,
.fetch_size = intel_8xx_fetch_size,
.cleanup = intel_8xx_cleanup,
@@ -2240,79 +675,12 @@ static const struct agp_bridge_driver intel_860_driver = {
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
-static const struct agp_bridge_driver intel_915_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = intel_i830_sizes,
- .size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 4,
- .needs_scratch_page = true,
- .configure = intel_i915_configure,
- .fetch_size = intel_i9xx_fetch_size,
- .cleanup = intel_i915_cleanup,
- .tlb_flush = intel_i810_tlbflush,
- .mask_memory = intel_i810_mask_memory,
- .masks = intel_i810_masks,
- .agp_enable = intel_i810_agp_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = intel_i915_create_gatt_table,
- .free_gatt_table = intel_i830_free_gatt_table,
- .insert_memory = intel_i915_insert_entries,
- .remove_memory = intel_i915_remove_entries,
- .alloc_by_type = intel_i830_alloc_by_type,
- .free_by_type = intel_i810_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = intel_i830_type_to_mask_type,
- .chipset_flush = intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
- .agp_map_page = intel_agp_map_page,
- .agp_unmap_page = intel_agp_unmap_page,
- .agp_map_memory = intel_agp_map_memory,
- .agp_unmap_memory = intel_agp_unmap_memory,
-#endif
-};
-
-static const struct agp_bridge_driver intel_i965_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = intel_i830_sizes,
- .size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 4,
- .needs_scratch_page = true,
- .configure = intel_i915_configure,
- .fetch_size = intel_i9xx_fetch_size,
- .cleanup = intel_i915_cleanup,
- .tlb_flush = intel_i810_tlbflush,
- .mask_memory = intel_i965_mask_memory,
- .masks = intel_i810_masks,
- .agp_enable = intel_i810_agp_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = intel_i965_create_gatt_table,
- .free_gatt_table = intel_i830_free_gatt_table,
- .insert_memory = intel_i915_insert_entries,
- .remove_memory = intel_i915_remove_entries,
- .alloc_by_type = intel_i830_alloc_by_type,
- .free_by_type = intel_i810_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = intel_i830_type_to_mask_type,
- .chipset_flush = intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
- .agp_map_page = intel_agp_map_page,
- .agp_unmap_page = intel_agp_unmap_page,
- .agp_map_memory = intel_agp_map_memory,
- .agp_unmap_memory = intel_agp_unmap_memory,
-#endif
-};
-
static const struct agp_bridge_driver intel_7505_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = intel_7505_configure,
.fetch_size = intel_8xx_fetch_size,
.cleanup = intel_8xx_cleanup,
@@ -2334,40 +702,6 @@ static const struct agp_bridge_driver intel_7505_driver = {
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
-static const struct agp_bridge_driver intel_g33_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = intel_i830_sizes,
- .size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 4,
- .needs_scratch_page = true,
- .configure = intel_i915_configure,
- .fetch_size = intel_i9xx_fetch_size,
- .cleanup = intel_i915_cleanup,
- .tlb_flush = intel_i810_tlbflush,
- .mask_memory = intel_i965_mask_memory,
- .masks = intel_i810_masks,
- .agp_enable = intel_i810_agp_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = intel_i915_create_gatt_table,
- .free_gatt_table = intel_i830_free_gatt_table,
- .insert_memory = intel_i915_insert_entries,
- .remove_memory = intel_i915_remove_entries,
- .alloc_by_type = intel_i830_alloc_by_type,
- .free_by_type = intel_i810_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = intel_i830_type_to_mask_type,
- .chipset_flush = intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
- .agp_map_page = intel_agp_map_page,
- .agp_unmap_page = intel_agp_unmap_page,
- .agp_map_memory = intel_agp_map_memory,
- .agp_unmap_memory = intel_agp_unmap_memory,
-#endif
-};
-
static int find_gmch(u16 device)
{
struct pci_dev *gmch_device;
@@ -2392,103 +726,137 @@ static int find_gmch(u16 device)
static const struct intel_driver_description {
unsigned int chip_id;
unsigned int gmch_chip_id;
- unsigned int multi_gmch_chip; /* if we have more gfx chip type on this HB. */
char *name;
const struct agp_bridge_driver *driver;
const struct agp_bridge_driver *gmch_driver;
} intel_agp_chipsets[] = {
- { PCI_DEVICE_ID_INTEL_82443LX_0, 0, 0, "440LX", &intel_generic_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82443BX_0, 0, 0, "440BX", &intel_generic_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82443GX_0, 0, 0, "440GX", &intel_generic_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, 0, "i810",
+ { PCI_DEVICE_ID_INTEL_82443LX_0, 0, "440LX", &intel_generic_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82443BX_0, 0, "440BX", &intel_generic_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82443GX_0, 0, "440GX", &intel_generic_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
NULL, &intel_810_driver },
- { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, 0, "i810",
+ { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
NULL, &intel_810_driver },
- { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, 0, "i810",
+ { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
NULL, &intel_810_driver },
- { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, 0, "i815",
+ { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
&intel_815_driver, &intel_810_driver },
- { PCI_DEVICE_ID_INTEL_82820_HB, 0, 0, "i820", &intel_820_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, 0, "i820", &intel_820_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, 0, "830M",
+ { PCI_DEVICE_ID_INTEL_82820_HB, 0, "i820", &intel_820_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, "i820", &intel_820_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
&intel_830mp_driver, &intel_830_driver },
- { PCI_DEVICE_ID_INTEL_82840_HB, 0, 0, "i840", &intel_840_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82845_HB, 0, 0, "845G", &intel_845_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, 0, "830M",
+ { PCI_DEVICE_ID_INTEL_82840_HB, 0, "i840", &intel_840_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82845_HB, 0, "845G", &intel_845_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
&intel_845_driver, &intel_830_driver },
- { PCI_DEVICE_ID_INTEL_82850_HB, 0, 0, "i850", &intel_850_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, 0, "854",
+ { PCI_DEVICE_ID_INTEL_82850_HB, 0, "i850", &intel_850_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, "854",
&intel_845_driver, &intel_830_driver },
- { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, 0, "855PM", &intel_845_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, 0, "855GM",
+ { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, "855PM", &intel_845_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
&intel_845_driver, &intel_830_driver },
- { PCI_DEVICE_ID_INTEL_82860_HB, 0, 0, "i860", &intel_860_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, 0, "865",
+ { PCI_DEVICE_ID_INTEL_82860_HB, 0, "i860", &intel_860_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, "865",
&intel_845_driver, &intel_830_driver },
- { PCI_DEVICE_ID_INTEL_82875_HB, 0, 0, "i875", &intel_845_driver, NULL },
- { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, 0, "E7221 (i915)",
+ { PCI_DEVICE_ID_INTEL_82875_HB, 0, "i875", &intel_845_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, 0, "915G",
+ { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, 0, "915GM",
+ { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, 0, "945G",
+ { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, 0, "945GM",
+ { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, 0, "945GME",
+ { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, 0, "946GZ",
+ { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, 0, "G35",
+ { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, 0, "965Q",
+ { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, 0, "965G",
+ { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, 0, "965GM",
+ { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, 0, "965GME/GLE",
+ { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_7505_0, 0, 0, "E7505", &intel_7505_driver, NULL },
- { PCI_DEVICE_ID_INTEL_7205_0, 0, 0, "E7205", &intel_7505_driver, NULL },
- { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, 0, "G33",
+ { PCI_DEVICE_ID_INTEL_7505_0, 0, "E7505", &intel_7505_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_7205_0, 0, "E7205", &intel_7505_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, "G33",
NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, 0, "Q35",
+ { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
+ { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "GMA3150",
+ { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "GMA3150",
+ { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
+ { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG,
"GM45", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, 0,
+ { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG,
"Eaglelake", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0,
+ { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG,
"Q45/Q43", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0,
+ { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG,
"G45/G43", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, 0,
+ { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG,
"B43", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0,
+ { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG,
"G41", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0,
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
"HD Graphics", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
"HD Graphics", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
"HD Graphics", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
"HD Graphics", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, 0,
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG,
"Sandybridge", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, 0,
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG,
"Sandybridge", NULL, &intel_i965_driver },
- { 0, 0, 0, NULL, NULL, NULL }
+ { 0, 0, NULL, NULL, NULL }
};
+static int __devinit intel_gmch_probe(struct pci_dev *pdev,
+ struct agp_bridge_data *bridge)
+{
+ int i;
+ bridge->driver = NULL;
+
+ for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
+ if ((intel_agp_chipsets[i].gmch_chip_id != 0) &&
+ find_gmch(intel_agp_chipsets[i].gmch_chip_id)) {
+ bridge->driver =
+ intel_agp_chipsets[i].gmch_driver;
+ break;
+ }
+ }
+
+ if (!bridge->driver)
+ return 0;
+
+ bridge->dev_private_data = &intel_private;
+ bridge->dev = pdev;
+
+ dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
+
+ if (bridge->driver->mask_memory == intel_i965_mask_memory) {
+ if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
+ dev_err(&intel_private.pcidev->dev,
+ "set gfx device dma mask 36bit failed!\n");
+ else
+ pci_set_consistent_dma_mask(intel_private.pcidev,
+ DMA_BIT_MASK(36));
+ }
+
+ return 1;
+}
+
static int __devinit agp_intel_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -2503,22 +871,18 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
if (!bridge)
return -ENOMEM;
+ bridge->capndx = cap_ptr;
+
+ if (intel_gmch_probe(pdev, bridge))
+ goto found_gmch;
+
for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
/* In case that multiple models of gfx chip may
stand on same host bridge type, this can be
sure we detect the right IGD. */
if (pdev->device == intel_agp_chipsets[i].chip_id) {
- if ((intel_agp_chipsets[i].gmch_chip_id != 0) &&
- find_gmch(intel_agp_chipsets[i].gmch_chip_id)) {
- bridge->driver =
- intel_agp_chipsets[i].gmch_driver;
- break;
- } else if (intel_agp_chipsets[i].multi_gmch_chip) {
- continue;
- } else {
- bridge->driver = intel_agp_chipsets[i].driver;
- break;
- }
+ bridge->driver = intel_agp_chipsets[i].driver;
+ break;
}
}
@@ -2530,18 +894,16 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
return -ENODEV;
}
- if (bridge->driver == NULL) {
- /* bridge has no AGP and no IGD detected */
+ if (!bridge->driver) {
if (cap_ptr)
dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n",
- intel_agp_chipsets[i].gmch_chip_id);
+ intel_agp_chipsets[i].gmch_chip_id);
agp_put_bridge(bridge);
return -ENODEV;
}
bridge->dev = pdev;
- bridge->capndx = cap_ptr;
- bridge->dev_private_data = &intel_private;
+ bridge->dev_private_data = NULL;
dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
@@ -2577,15 +939,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
&bridge->mode);
}
- if (bridge->driver->mask_memory == intel_i965_mask_memory) {
- if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
- dev_err(&intel_private.pcidev->dev,
- "set gfx device dma mask 36bit failed!\n");
- else
- pci_set_consistent_dma_mask(intel_private.pcidev,
- DMA_BIT_MASK(36));
- }
-
+found_gmch:
pci_set_drvdata(pdev, bridge);
err = agp_add_bridge(bridge);
if (!err)
@@ -2611,22 +965,7 @@ static int agp_intel_resume(struct pci_dev *pdev)
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
int ret_val;
- if (bridge->driver == &intel_generic_driver)
- intel_configure();
- else if (bridge->driver == &intel_850_driver)
- intel_850_configure();
- else if (bridge->driver == &intel_845_driver)
- intel_845_configure();
- else if (bridge->driver == &intel_830mp_driver)
- intel_830mp_configure();
- else if (bridge->driver == &intel_915_driver)
- intel_i915_configure();
- else if (bridge->driver == &intel_830_driver)
- intel_i830_configure();
- else if (bridge->driver == &intel_810_driver)
- intel_i810_configure();
- else if (bridge->driver == &intel_i965_driver)
- intel_i915_configure();
+ bridge->driver->configure();
ret_val = agp_rebind_memory();
if (ret_val != 0)
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
new file mode 100644
index 00000000000..2547465d465
--- /dev/null
+++ b/drivers/char/agp/intel-agp.h
@@ -0,0 +1,239 @@
+/*
+ * Common Intel AGPGART and GTT definitions.
+ */
+
+/* Intel registers */
+#define INTEL_APSIZE 0xb4
+#define INTEL_ATTBASE 0xb8
+#define INTEL_AGPCTRL 0xb0
+#define INTEL_NBXCFG 0x50
+#define INTEL_ERRSTS 0x91
+
+/* Intel i830 registers */
+#define I830_GMCH_CTRL 0x52
+#define I830_GMCH_ENABLED 0x4
+#define I830_GMCH_MEM_MASK 0x1
+#define I830_GMCH_MEM_64M 0x1
+#define I830_GMCH_MEM_128M 0
+#define I830_GMCH_GMS_MASK 0x70
+#define I830_GMCH_GMS_DISABLED 0x00
+#define I830_GMCH_GMS_LOCAL 0x10
+#define I830_GMCH_GMS_STOLEN_512 0x20
+#define I830_GMCH_GMS_STOLEN_1024 0x30
+#define I830_GMCH_GMS_STOLEN_8192 0x40
+#define I830_RDRAM_CHANNEL_TYPE 0x03010
+#define I830_RDRAM_ND(x) (((x) & 0x20) >> 5)
+#define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3)
+
+/* This one is for I830MP w. an external graphic card */
+#define INTEL_I830_ERRSTS 0x92
+
+/* Intel 855GM/852GM registers */
+#define I855_GMCH_GMS_MASK 0xF0
+#define I855_GMCH_GMS_STOLEN_0M 0x0
+#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
+#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
+#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
+#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
+#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
+#define I85X_CAPID 0x44
+#define I85X_VARIANT_MASK 0x7
+#define I85X_VARIANT_SHIFT 5
+#define I855_GME 0x0
+#define I855_GM 0x4
+#define I852_GME 0x2
+#define I852_GM 0x5
+
+/* Intel i845 registers */
+#define INTEL_I845_AGPM 0x51
+#define INTEL_I845_ERRSTS 0xc8
+
+/* Intel i860 registers */
+#define INTEL_I860_MCHCFG 0x50
+#define INTEL_I860_ERRSTS 0xc8
+
+/* Intel i810 registers */
+#define I810_GMADDR 0x10
+#define I810_MMADDR 0x14
+#define I810_PTE_BASE 0x10000
+#define I810_PTE_MAIN_UNCACHED 0x00000000
+#define I810_PTE_LOCAL 0x00000002
+#define I810_PTE_VALID 0x00000001
+#define I830_PTE_SYSTEM_CACHED 0x00000006
+#define I810_SMRAM_MISCC 0x70
+#define I810_GFX_MEM_WIN_SIZE 0x00010000
+#define I810_GFX_MEM_WIN_32M 0x00010000
+#define I810_GMS 0x000000c0
+#define I810_GMS_DISABLE 0x00000000
+#define I810_PGETBL_CTL 0x2020
+#define I810_PGETBL_ENABLED 0x00000001
+#define I965_PGETBL_SIZE_MASK 0x0000000e
+#define I965_PGETBL_SIZE_512KB (0 << 1)
+#define I965_PGETBL_SIZE_256KB (1 << 1)
+#define I965_PGETBL_SIZE_128KB (2 << 1)
+#define I965_PGETBL_SIZE_1MB (3 << 1)
+#define I965_PGETBL_SIZE_2MB (4 << 1)
+#define I965_PGETBL_SIZE_1_5MB (5 << 1)
+#define G33_PGETBL_SIZE_MASK (3 << 8)
+#define G33_PGETBL_SIZE_1M (1 << 8)
+#define G33_PGETBL_SIZE_2M (2 << 8)
+
+#define I810_DRAM_CTL 0x3000
+#define I810_DRAM_ROW_0 0x00000001
+#define I810_DRAM_ROW_0_SDRAM 0x00000001
+
+/* Intel 815 register */
+#define INTEL_815_APCONT 0x51
+#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF
+
+/* Intel i820 registers */
+#define INTEL_I820_RDCR 0x51
+#define INTEL_I820_ERRSTS 0xc8
+
+/* Intel i840 registers */
+#define INTEL_I840_MCHCFG 0x50
+#define INTEL_I840_ERRSTS 0xc8
+
+/* Intel i850 registers */
+#define INTEL_I850_MCHCFG 0x50
+#define INTEL_I850_ERRSTS 0xc8
+
+/* intel 915G registers */
+#define I915_GMADDR 0x18
+#define I915_MMADDR 0x10
+#define I915_PTEADDR 0x1C
+#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
+#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
+#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
+#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
+#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
+#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
+#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
+#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
+
+#define I915_IFPADDR 0x60
+
+/* Intel 965G registers */
+#define I965_MSAC 0x62
+#define I965_IFPADDR 0x70
+
+/* Intel 7505 registers */
+#define INTEL_I7505_APSIZE 0x74
+#define INTEL_I7505_NCAPID 0x60
+#define INTEL_I7505_NISTAT 0x6c
+#define INTEL_I7505_ATTBASE 0x78
+#define INTEL_I7505_ERRSTS 0x42
+#define INTEL_I7505_AGPCTRL 0x70
+#define INTEL_I7505_MCHCFG 0x50
+
+#define SNB_GMCH_CTRL 0x50
+#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
+#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
+#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
+#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
+#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
+#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
+#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
+#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
+#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
+#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
+#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
+#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
+#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
+#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
+#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
+#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
+#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
+#define SNB_GTT_SIZE_0M (0 << 8)
+#define SNB_GTT_SIZE_1M (1 << 8)
+#define SNB_GTT_SIZE_2M (2 << 8)
+#define SNB_GTT_SIZE_MASK (3 << 8)
+
+/* pci devices ids */
+#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
+#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
+#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
+#define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972
+#define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980
+#define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982
+#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990
+#define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992
+#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0
+#define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2
+#define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00
+#define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02
+#define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10
+#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12
+#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
+#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001
+#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
+#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2
+#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
+#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2
+#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0
+#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
+#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40
+#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
+#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
+#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
+#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00
+#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02
+#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10
+#define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12
+#define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20
+#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22
+#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
+#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106
+
+/* cover 915 and 945 variants */
+#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB)
+
+#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB)
+
+#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
+
+#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
+
+#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
+
+#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
+ IS_SNB)
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
new file mode 100644
index 00000000000..e8ea6825822
--- /dev/null
+++ b/drivers/char/agp/intel-gtt.c
@@ -0,0 +1,1516 @@
+/*
+ * Intel GTT (Graphics Translation Table) routines
+ *
+ * Caveat: This driver implements the linux agp interface, but this is far from
+ * a agp driver! GTT support ended up here for purely historical reasons: The
+ * old userspace intel graphics drivers needed an interface to map memory into
+ * the GTT. And the drm provides a default interface for graphic devices sitting
+ * on an agp port. So it made sense to fake the GTT support as an agp port to
+ * avoid having to create a new api.
+ *
+ * With gem this does not make much sense anymore, just needlessly complicates
+ * the code. But as long as the old graphics stack is still support, it's stuck
+ * here.
+ *
+ * /fairy-tale-mode off
+ */
+
+/*
+ * If we have Intel graphics, we're not going to have anything other than
+ * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
+ * on the Intel IOMMU support (CONFIG_DMAR).
+ * Only newer chipsets need to bother with this, of course.
+ */
+#ifdef CONFIG_DMAR
+#define USE_PCI_DMA_API 1
+#endif
+
+static const struct aper_size_info_fixed intel_i810_sizes[] =
+{
+ {64, 16384, 4},
+ /* The 32M mode still requires a 64k gatt */
+ {32, 8192, 4}
+};
+
+#define AGP_DCACHE_MEMORY 1
+#define AGP_PHYS_MEMORY 2
+#define INTEL_AGP_CACHED_MEMORY 3
+
+static struct gatt_mask intel_i810_masks[] =
+{
+ {.mask = I810_PTE_VALID, .type = 0},
+ {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
+ {.mask = I810_PTE_VALID, .type = 0},
+ {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
+ .type = INTEL_AGP_CACHED_MEMORY}
+};
+
+static struct _intel_private {
+ struct pci_dev *pcidev; /* device one */
+ u8 __iomem *registers;
+ u32 __iomem *gtt; /* I915G */
+ int num_dcache_entries;
+ /* gtt_entries is the number of gtt entries that are already mapped
+ * to stolen memory. Stolen memory is larger than the memory mapped
+ * through gtt_entries, as it includes some reserved space for the BIOS
+ * popup and for the GTT.
+ */
+ int gtt_entries; /* i830+ */
+ int gtt_total_size;
+ union {
+ void __iomem *i9xx_flush_page;
+ void *i8xx_flush_page;
+ };
+ struct page *i8xx_page;
+ struct resource ifp_resource;
+ int resource_valid;
+} intel_private;
+
+#ifdef USE_PCI_DMA_API
+static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
+{
+ *ret = pci_map_page(intel_private.pcidev, page, 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(intel_private.pcidev, *ret))
+ return -EINVAL;
+ return 0;
+}
+
+static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
+{
+ pci_unmap_page(intel_private.pcidev, dma,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+}
+
+static void intel_agp_free_sglist(struct agp_memory *mem)
+{
+ struct sg_table st;
+
+ st.sgl = mem->sg_list;
+ st.orig_nents = st.nents = mem->page_count;
+
+ sg_free_table(&st);
+
+ mem->sg_list = NULL;
+ mem->num_sg = 0;
+}
+
+static int intel_agp_map_memory(struct agp_memory *mem)
+{
+ struct sg_table st;
+ struct scatterlist *sg;
+ int i;
+
+ DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
+
+ if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
+ return -ENOMEM;
+
+ mem->sg_list = sg = st.sgl;
+
+ for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
+ sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
+
+ mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
+ mem->page_count, PCI_DMA_BIDIRECTIONAL);
+ if (unlikely(!mem->num_sg)) {
+ intel_agp_free_sglist(mem);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void intel_agp_unmap_memory(struct agp_memory *mem)
+{
+ DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
+
+ pci_unmap_sg(intel_private.pcidev, mem->sg_list,
+ mem->page_count, PCI_DMA_BIDIRECTIONAL);
+ intel_agp_free_sglist(mem);
+}
+
+static void intel_agp_insert_sg_entries(struct agp_memory *mem,
+ off_t pg_start, int mask_type)
+{
+ struct scatterlist *sg;
+ int i, j;
+
+ j = pg_start;
+
+ WARN_ON(!mem->num_sg);
+
+ if (mem->num_sg == mem->page_count) {
+ for_each_sg(mem->sg_list, sg, mem->page_count, i) {
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ sg_dma_address(sg), mask_type),
+ intel_private.gtt+j);
+ j++;
+ }
+ } else {
+ /* sg may merge pages, but we have to separate
+ * per-page addr for GTT */
+ unsigned int len, m;
+
+ for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
+ len = sg_dma_len(sg) / PAGE_SIZE;
+ for (m = 0; m < len; m++) {
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ sg_dma_address(sg) + m * PAGE_SIZE,
+ mask_type),
+ intel_private.gtt+j);
+ j++;
+ }
+ }
+ }
+ readl(intel_private.gtt+j-1);
+}
+
+#else
+
+static void intel_agp_insert_sg_entries(struct agp_memory *mem,
+ off_t pg_start, int mask_type)
+{
+ int i, j;
+ u32 cache_bits = 0;
+
+ if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
+ {
+ cache_bits = I830_PTE_SYSTEM_CACHED;
+ }
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ page_to_phys(mem->pages[i]), mask_type),
+ intel_private.gtt+j);
+ }
+
+ readl(intel_private.gtt+j-1);
+}
+
+#endif
+
+static int intel_i810_fetch_size(void)
+{
+ u32 smram_miscc;
+ struct aper_size_info_fixed *values;
+
+ pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
+ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
+
+ if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
+ dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
+ return 0;
+ }
+ if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
+ agp_bridge->current_size = (void *) (values + 1);
+ agp_bridge->aperture_size_idx = 1;
+ return values[1].size;
+ } else {
+ agp_bridge->current_size = (void *) (values);
+ agp_bridge->aperture_size_idx = 0;
+ return values[0].size;
+ }
+
+ return 0;
+}
+
+static int intel_i810_configure(void)
+{
+ struct aper_size_info_fixed *current_size;
+ u32 temp;
+ int i;
+
+ current_size = A_SIZE_FIX(agp_bridge->current_size);
+
+ if (!intel_private.registers) {
+ pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
+ temp &= 0xfff80000;
+
+ intel_private.registers = ioremap(temp, 128 * 4096);
+ if (!intel_private.registers) {
+ dev_err(&intel_private.pcidev->dev,
+ "can't remap memory\n");
+ return -ENOMEM;
+ }
+ }
+
+ if ((readl(intel_private.registers+I810_DRAM_CTL)
+ & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
+ /* This will need to be dynamically assigned */
+ dev_info(&intel_private.pcidev->dev,
+ "detected 4MB dedicated video ram\n");
+ intel_private.num_dcache_entries = 1024;
+ }
+ pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
+ readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+
+ if (agp_bridge->driver->needs_scratch_page) {
+ for (i = 0; i < current_size->num_entries; i++) {
+ writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+ }
+ readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */
+ }
+ global_cache_flush();
+ return 0;
+}
+
+static void intel_i810_cleanup(void)
+{
+ writel(0, intel_private.registers+I810_PGETBL_CTL);
+ readl(intel_private.registers); /* PCI Posting. */
+ iounmap(intel_private.registers);
+}
+
+static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+ return;
+}
+
+/* Exists to support ARGB cursors */
+static struct page *i8xx_alloc_pages(void)
+{
+ struct page *page;
+
+ page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
+ if (page == NULL)
+ return NULL;
+
+ if (set_pages_uc(page, 4) < 0) {
+ set_pages_wb(page, 4);
+ __free_pages(page, 2);
+ return NULL;
+ }
+ get_page(page);
+ atomic_inc(&agp_bridge->current_memory_agp);
+ return page;
+}
+
+static void i8xx_destroy_pages(struct page *page)
+{
+ if (page == NULL)
+ return;
+
+ set_pages_wb(page, 4);
+ put_page(page);
+ __free_pages(page, 2);
+ atomic_dec(&agp_bridge->current_memory_agp);
+}
+
+static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
+ int type)
+{
+ if (type < AGP_USER_TYPES)
+ return type;
+ else if (type == AGP_USER_CACHED_MEMORY)
+ return INTEL_AGP_CACHED_MEMORY;
+ else
+ return 0;
+}
+
+static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int i, j, num_entries;
+ void *temp;
+ int ret = -EINVAL;
+ int mask_type;
+
+ if (mem->page_count == 0)
+ goto out;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+
+ if ((pg_start + mem->page_count) > num_entries)
+ goto out_err;
+
+
+ for (j = pg_start; j < (pg_start + mem->page_count); j++) {
+ if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
+ ret = -EBUSY;
+ goto out_err;
+ }
+ }
+
+ if (type != mem->type)
+ goto out_err;
+
+ mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+
+ switch (mask_type) {
+ case AGP_DCACHE_MEMORY:
+ if (!mem->is_flushed)
+ global_cache_flush();
+ for (i = pg_start; i < (pg_start + mem->page_count); i++) {
+ writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
+ intel_private.registers+I810_PTE_BASE+(i*4));
+ }
+ readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
+ break;
+ case AGP_PHYS_MEMORY:
+ case AGP_NORMAL_MEMORY:
+ if (!mem->is_flushed)
+ global_cache_flush();
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ page_to_phys(mem->pages[i]), mask_type),
+ intel_private.registers+I810_PTE_BASE+(j*4));
+ }
+ readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
+ break;
+ default:
+ goto out_err;
+ }
+
+out:
+ ret = 0;
+out_err:
+ mem->is_flushed = true;
+ return ret;
+}
+
+static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int i;
+
+ if (mem->page_count == 0)
+ return 0;
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+ }
+ readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
+
+ return 0;
+}
+
+/*
+ * The i810/i830 requires a physical address to program its mouse
+ * pointer into hardware.
+ * However the Xserver still writes to it through the agp aperture.
+ */
+static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
+{
+ struct agp_memory *new;
+ struct page *page;
+
+ switch (pg_count) {
+ case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
+ break;
+ case 4:
+ /* kludge to get 4 physical pages for ARGB cursor */
+ page = i8xx_alloc_pages();
+ break;
+ default:
+ return NULL;
+ }
+
+ if (page == NULL)
+ return NULL;
+
+ new = agp_create_memory(pg_count);
+ if (new == NULL)
+ return NULL;
+
+ new->pages[0] = page;
+ if (pg_count == 4) {
+ /* kludge to get 4 physical pages for ARGB cursor */
+ new->pages[1] = new->pages[0] + 1;
+ new->pages[2] = new->pages[1] + 1;
+ new->pages[3] = new->pages[2] + 1;
+ }
+ new->page_count = pg_count;
+ new->num_scratch_pages = pg_count;
+ new->type = AGP_PHYS_MEMORY;
+ new->physical = page_to_phys(new->pages[0]);
+ return new;
+}
+
+static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
+{
+ struct agp_memory *new;
+
+ if (type == AGP_DCACHE_MEMORY) {
+ if (pg_count != intel_private.num_dcache_entries)
+ return NULL;
+
+ new = agp_create_memory(1);
+ if (new == NULL)
+ return NULL;
+
+ new->type = AGP_DCACHE_MEMORY;
+ new->page_count = pg_count;
+ new->num_scratch_pages = 0;
+ agp_free_page_array(new);
+ return new;
+ }
+ if (type == AGP_PHYS_MEMORY)
+ return alloc_agpphysmem_i8xx(pg_count, type);
+ return NULL;
+}
+
+static void intel_i810_free_by_type(struct agp_memory *curr)
+{
+ agp_free_key(curr->key);
+ if (curr->type == AGP_PHYS_MEMORY) {
+ if (curr->page_count == 4)
+ i8xx_destroy_pages(curr->pages[0]);
+ else {
+ agp_bridge->driver->agp_destroy_page(curr->pages[0],
+ AGP_PAGE_DESTROY_UNMAP);
+ agp_bridge->driver->agp_destroy_page(curr->pages[0],
+ AGP_PAGE_DESTROY_FREE);
+ }
+ agp_free_page_array(curr);
+ }
+ kfree(curr);
+}
+
+static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
+ dma_addr_t addr, int type)
+{
+ /* Type checking must be done elsewhere */
+ return addr | bridge->driver->masks[type].mask;
+}
+
+static struct aper_size_info_fixed intel_i830_sizes[] =
+{
+ {128, 32768, 5},
+ /* The 64M mode still requires a 128k gatt */
+ {64, 16384, 5},
+ {256, 65536, 6},
+ {512, 131072, 7},
+};
+
+static void intel_i830_init_gtt_entries(void)
+{
+ u16 gmch_ctrl;
+ int gtt_entries = 0;
+ u8 rdct;
+ int local = 0;
+ static const int ddt[4] = { 0, 16, 32, 64 };
+ int size; /* reserved space (in kb) at the top of stolen memory */
+
+ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+
+ if (IS_I965) {
+ u32 pgetbl_ctl;
+ pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+
+ /* The 965 has a field telling us the size of the GTT,
+ * which may be larger than what is necessary to map the
+ * aperture.
+ */
+ switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
+ case I965_PGETBL_SIZE_128KB:
+ size = 128;
+ break;
+ case I965_PGETBL_SIZE_256KB:
+ size = 256;
+ break;
+ case I965_PGETBL_SIZE_512KB:
+ size = 512;
+ break;
+ case I965_PGETBL_SIZE_1MB:
+ size = 1024;
+ break;
+ case I965_PGETBL_SIZE_2MB:
+ size = 2048;
+ break;
+ case I965_PGETBL_SIZE_1_5MB:
+ size = 1024 + 512;
+ break;
+ default:
+ dev_info(&intel_private.pcidev->dev,
+ "unknown page table size, assuming 512KB\n");
+ size = 512;
+ }
+ size += 4; /* add in BIOS popup space */
+ } else if (IS_G33 && !IS_PINEVIEW) {
+ /* G33's GTT size defined in gmch_ctrl */
+ switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
+ case G33_PGETBL_SIZE_1M:
+ size = 1024;
+ break;
+ case G33_PGETBL_SIZE_2M:
+ size = 2048;
+ break;
+ default:
+ dev_info(&agp_bridge->dev->dev,
+ "unknown page table size 0x%x, assuming 512KB\n",
+ (gmch_ctrl & G33_PGETBL_SIZE_MASK));
+ size = 512;
+ }
+ size += 4;
+ } else if (IS_G4X || IS_PINEVIEW) {
+ /* On 4 series hardware, GTT stolen is separate from graphics
+ * stolen, ignore it in stolen gtt entries counting. However,
+ * 4KB of the stolen memory doesn't get mapped to the GTT.
+ */
+ size = 4;
+ } else {
+ /* On previous hardware, the GTT size was just what was
+ * required to map the aperture.
+ */
+ size = agp_bridge->driver->fetch_size() + 4;
+ }
+
+ if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
+ switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
+ case I830_GMCH_GMS_STOLEN_512:
+ gtt_entries = KB(512) - KB(size);
+ break;
+ case I830_GMCH_GMS_STOLEN_1024:
+ gtt_entries = MB(1) - KB(size);
+ break;
+ case I830_GMCH_GMS_STOLEN_8192:
+ gtt_entries = MB(8) - KB(size);
+ break;
+ case I830_GMCH_GMS_LOCAL:
+ rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
+ gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
+ MB(ddt[I830_RDRAM_DDT(rdct)]);
+ local = 1;
+ break;
+ default:
+ gtt_entries = 0;
+ break;
+ }
+ } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
+ /*
+ * SandyBridge has new memory control reg at 0x50.w
+ */
+ u16 snb_gmch_ctl;
+ pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
+ case SNB_GMCH_GMS_STOLEN_32M:
+ gtt_entries = MB(32) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_64M:
+ gtt_entries = MB(64) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_96M:
+ gtt_entries = MB(96) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_128M:
+ gtt_entries = MB(128) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_160M:
+ gtt_entries = MB(160) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_192M:
+ gtt_entries = MB(192) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_224M:
+ gtt_entries = MB(224) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_256M:
+ gtt_entries = MB(256) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_288M:
+ gtt_entries = MB(288) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_320M:
+ gtt_entries = MB(320) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_352M:
+ gtt_entries = MB(352) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_384M:
+ gtt_entries = MB(384) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_416M:
+ gtt_entries = MB(416) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_448M:
+ gtt_entries = MB(448) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_480M:
+ gtt_entries = MB(480) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_512M:
+ gtt_entries = MB(512) - KB(size);
+ break;
+ }
+ } else {
+ switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
+ case I855_GMCH_GMS_STOLEN_1M:
+ gtt_entries = MB(1) - KB(size);
+ break;
+ case I855_GMCH_GMS_STOLEN_4M:
+ gtt_entries = MB(4) - KB(size);
+ break;
+ case I855_GMCH_GMS_STOLEN_8M:
+ gtt_entries = MB(8) - KB(size);
+ break;
+ case I855_GMCH_GMS_STOLEN_16M:
+ gtt_entries = MB(16) - KB(size);
+ break;
+ case I855_GMCH_GMS_STOLEN_32M:
+ gtt_entries = MB(32) - KB(size);
+ break;
+ case I915_GMCH_GMS_STOLEN_48M:
+ /* Check it's really I915G */
+ if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
+ gtt_entries = MB(48) - KB(size);
+ else
+ gtt_entries = 0;
+ break;
+ case I915_GMCH_GMS_STOLEN_64M:
+ /* Check it's really I915G */
+ if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
+ gtt_entries = MB(64) - KB(size);
+ else
+ gtt_entries = 0;
+ break;
+ case G33_GMCH_GMS_STOLEN_128M:
+ if (IS_G33 || IS_I965 || IS_G4X)
+ gtt_entries = MB(128) - KB(size);
+ else
+ gtt_entries = 0;
+ break;
+ case G33_GMCH_GMS_STOLEN_256M:
+ if (IS_G33 || IS_I965 || IS_G4X)
+ gtt_entries = MB(256) - KB(size);
+ else
+ gtt_entries = 0;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_96M:
+ if (IS_I965 || IS_G4X)
+ gtt_entries = MB(96) - KB(size);
+ else
+ gtt_entries = 0;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_160M:
+ if (IS_I965 || IS_G4X)
+ gtt_entries = MB(160) - KB(size);
+ else
+ gtt_entries = 0;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_224M:
+ if (IS_I965 || IS_G4X)
+ gtt_entries = MB(224) - KB(size);
+ else
+ gtt_entries = 0;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_352M:
+ if (IS_I965 || IS_G4X)
+ gtt_entries = MB(352) - KB(size);
+ else
+ gtt_entries = 0;
+ break;
+ default:
+ gtt_entries = 0;
+ break;
+ }
+ }
+ if (gtt_entries > 0) {
+ dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
+ gtt_entries / KB(1), local ? "local" : "stolen");
+ gtt_entries /= KB(4);
+ } else {
+ dev_info(&agp_bridge->dev->dev,
+ "no pre-allocated video memory detected\n");
+ gtt_entries = 0;
+ }
+
+ intel_private.gtt_entries = gtt_entries;
+}
+
+static void intel_i830_fini_flush(void)
+{
+ kunmap(intel_private.i8xx_page);
+ intel_private.i8xx_flush_page = NULL;
+ unmap_page_from_agp(intel_private.i8xx_page);
+
+ __free_page(intel_private.i8xx_page);
+ intel_private.i8xx_page = NULL;
+}
+
+static void intel_i830_setup_flush(void)
+{
+ /* return if we've already set the flush mechanism up */
+ if (intel_private.i8xx_page)
+ return;
+
+ intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
+ if (!intel_private.i8xx_page)
+ return;
+
+ intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
+ if (!intel_private.i8xx_flush_page)
+ intel_i830_fini_flush();
+}
+
+/* The chipset_flush interface needs to get data that has already been
+ * flushed out of the CPU all the way out to main memory, because the GPU
+ * doesn't snoop those buffers.
+ *
+ * The 8xx series doesn't have the same lovely interface for flushing the
+ * chipset write buffers that the later chips do. According to the 865
+ * specs, it's 64 octwords, or 1KB. So, to get those previous things in
+ * that buffer out, we just fill 1KB and clflush it out, on the assumption
+ * that it'll push whatever was in there out. It appears to work.
+ */
+static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
+{
+ unsigned int *pg = intel_private.i8xx_flush_page;
+
+ memset(pg, 0, 1024);
+
+ if (cpu_has_clflush)
+ clflush_cache_range(pg, 1024);
+ else if (wbinvd_on_all_cpus() != 0)
+ printk(KERN_ERR "Timed out waiting for cache flush.\n");
+}
+
+/* The intel i830 automatically initializes the agp aperture during POST.
+ * Use the memory already set aside for in the GTT.
+ */
+static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ int page_order;
+ struct aper_size_info_fixed *size;
+ int num_entries;
+ u32 temp;
+
+ size = agp_bridge->current_size;
+ page_order = size->page_order;
+ num_entries = size->num_entries;
+ agp_bridge->gatt_table_real = NULL;
+
+ pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
+ temp &= 0xfff80000;
+
+ intel_private.registers = ioremap(temp, 128 * 4096);
+ if (!intel_private.registers)
+ return -ENOMEM;
+
+ temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+ global_cache_flush(); /* FIXME: ?? */
+
+ /* we have to call this as early as possible after the MMIO base address is known */
+ intel_i830_init_gtt_entries();
+
+ agp_bridge->gatt_table = NULL;
+
+ agp_bridge->gatt_bus_addr = temp;
+
+ return 0;
+}
+
+/* Return the gatt table to a sane state. Use the top of stolen
+ * memory for the GTT.
+ */
+static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
+{
+ return 0;
+}
+
+static int intel_i830_fetch_size(void)
+{
+ u16 gmch_ctrl;
+ struct aper_size_info_fixed *values;
+
+ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
+
+ if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
+ agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
+ /* 855GM/852GM/865G has 128MB aperture size */
+ agp_bridge->current_size = (void *) values;
+ agp_bridge->aperture_size_idx = 0;
+ return values[0].size;
+ }
+
+ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+
+ if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
+ agp_bridge->current_size = (void *) values;
+ agp_bridge->aperture_size_idx = 0;
+ return values[0].size;
+ } else {
+ agp_bridge->current_size = (void *) (values + 1);
+ agp_bridge->aperture_size_idx = 1;
+ return values[1].size;
+ }
+
+ return 0;
+}
+
+static int intel_i830_configure(void)
+{
+ struct aper_size_info_fixed *current_size;
+ u32 temp;
+ u16 gmch_ctrl;
+ int i;
+
+ current_size = A_SIZE_FIX(agp_bridge->current_size);
+
+ pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+ gmch_ctrl |= I830_GMCH_ENABLED;
+ pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
+
+ writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
+ readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+
+ if (agp_bridge->driver->needs_scratch_page) {
+ for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
+ writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+ }
+ readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
+ }
+
+ global_cache_flush();
+
+ intel_i830_setup_flush();
+ return 0;
+}
+
+static void intel_i830_cleanup(void)
+{
+ iounmap(intel_private.registers);
+}
+
+static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int i, j, num_entries;
+ void *temp;
+ int ret = -EINVAL;
+ int mask_type;
+
+ if (mem->page_count == 0)
+ goto out;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+
+ if (pg_start < intel_private.gtt_entries) {
+ dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
+ "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
+ pg_start, intel_private.gtt_entries);
+
+ dev_info(&intel_private.pcidev->dev,
+ "trying to insert into local/stolen memory\n");
+ goto out_err;
+ }
+
+ if ((pg_start + mem->page_count) > num_entries)
+ goto out_err;
+
+ /* The i830 can't check the GTT for entries since its read only,
+ * depend on the caller to make the correct offset decisions.
+ */
+
+ if (type != mem->type)
+ goto out_err;
+
+ mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+
+ if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
+ mask_type != INTEL_AGP_CACHED_MEMORY)
+ goto out_err;
+
+ if (!mem->is_flushed)
+ global_cache_flush();
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ page_to_phys(mem->pages[i]), mask_type),
+ intel_private.registers+I810_PTE_BASE+(j*4));
+ }
+ readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
+
+out:
+ ret = 0;
+out_err:
+ mem->is_flushed = true;
+ return ret;
+}
+
+static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int i;
+
+ if (mem->page_count == 0)
+ return 0;
+
+ if (pg_start < intel_private.gtt_entries) {
+ dev_info(&intel_private.pcidev->dev,
+ "trying to disable local/stolen memory\n");
+ return -EINVAL;
+ }
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+ }
+ readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
+
+ return 0;
+}
+
+static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
+{
+ if (type == AGP_PHYS_MEMORY)
+ return alloc_agpphysmem_i8xx(pg_count, type);
+ /* always return NULL for other allocation types for now */
+ return NULL;
+}
+
+static int intel_alloc_chipset_flush_resource(void)
+{
+ int ret;
+ ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
+ PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
+ pcibios_align_resource, agp_bridge->dev);
+
+ return ret;
+}
+
+static void intel_i915_setup_chipset_flush(void)
+{
+ int ret;
+ u32 temp;
+
+ pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
+ if (!(temp & 0x1)) {
+ intel_alloc_chipset_flush_resource();
+ intel_private.resource_valid = 1;
+ pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+ } else {
+ temp &= ~1;
+
+ intel_private.resource_valid = 1;
+ intel_private.ifp_resource.start = temp;
+ intel_private.ifp_resource.end = temp + PAGE_SIZE;
+ ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
+ /* some BIOSes reserve this area in a pnp some don't */
+ if (ret)
+ intel_private.resource_valid = 0;
+ }
+}
+
+static void intel_i965_g33_setup_chipset_flush(void)
+{
+ u32 temp_hi, temp_lo;
+ int ret;
+
+ pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
+ pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
+
+ if (!(temp_lo & 0x1)) {
+
+ intel_alloc_chipset_flush_resource();
+
+ intel_private.resource_valid = 1;
+ pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
+ upper_32_bits(intel_private.ifp_resource.start));
+ pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+ } else {
+ u64 l64;
+
+ temp_lo &= ~0x1;
+ l64 = ((u64)temp_hi << 32) | temp_lo;
+
+ intel_private.resource_valid = 1;
+ intel_private.ifp_resource.start = l64;
+ intel_private.ifp_resource.end = l64 + PAGE_SIZE;
+ ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
+ /* some BIOSes reserve this area in a pnp some don't */
+ if (ret)
+ intel_private.resource_valid = 0;
+ }
+}
+
+static void intel_i9xx_setup_flush(void)
+{
+ /* return if already configured */
+ if (intel_private.ifp_resource.start)
+ return;
+
+ if (IS_SNB)
+ return;
+
+ /* setup a resource for this object */
+ intel_private.ifp_resource.name = "Intel Flush Page";
+ intel_private.ifp_resource.flags = IORESOURCE_MEM;
+
+ /* Setup chipset flush for 915 */
+ if (IS_I965 || IS_G33 || IS_G4X) {
+ intel_i965_g33_setup_chipset_flush();
+ } else {
+ intel_i915_setup_chipset_flush();
+ }
+
+ if (intel_private.ifp_resource.start) {
+ intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
+ if (!intel_private.i9xx_flush_page)
+ dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
+ }
+}
+
+static int intel_i915_configure(void)
+{
+ struct aper_size_info_fixed *current_size;
+ u32 temp;
+ u16 gmch_ctrl;
+ int i;
+
+ current_size = A_SIZE_FIX(agp_bridge->current_size);
+
+ pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
+
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+ gmch_ctrl |= I830_GMCH_ENABLED;
+ pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
+
+ writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
+ readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+
+ if (agp_bridge->driver->needs_scratch_page) {
+ for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
+ writel(agp_bridge->scratch_page, intel_private.gtt+i);
+ }
+ readl(intel_private.gtt+i-1); /* PCI Posting. */
+ }
+
+ global_cache_flush();
+
+ intel_i9xx_setup_flush();
+
+ return 0;
+}
+
+static void intel_i915_cleanup(void)
+{
+ if (intel_private.i9xx_flush_page)
+ iounmap(intel_private.i9xx_flush_page);
+ if (intel_private.resource_valid)
+ release_resource(&intel_private.ifp_resource);
+ intel_private.ifp_resource.start = 0;
+ intel_private.resource_valid = 0;
+ iounmap(intel_private.gtt);
+ iounmap(intel_private.registers);
+}
+
+static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
+{
+ if (intel_private.i9xx_flush_page)
+ writel(1, intel_private.i9xx_flush_page);
+}
+
+static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int num_entries;
+ void *temp;
+ int ret = -EINVAL;
+ int mask_type;
+
+ if (mem->page_count == 0)
+ goto out;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+
+ if (pg_start < intel_private.gtt_entries) {
+ dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
+ "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
+ pg_start, intel_private.gtt_entries);
+
+ dev_info(&intel_private.pcidev->dev,
+ "trying to insert into local/stolen memory\n");
+ goto out_err;
+ }
+
+ if ((pg_start + mem->page_count) > num_entries)
+ goto out_err;
+
+ /* The i915 can't check the GTT for entries since it's read only;
+ * depend on the caller to make the correct offset decisions.
+ */
+
+ if (type != mem->type)
+ goto out_err;
+
+ mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+
+ if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
+ mask_type != INTEL_AGP_CACHED_MEMORY)
+ goto out_err;
+
+ if (!mem->is_flushed)
+ global_cache_flush();
+
+ intel_agp_insert_sg_entries(mem, pg_start, mask_type);
+
+ out:
+ ret = 0;
+ out_err:
+ mem->is_flushed = true;
+ return ret;
+}
+
+static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int i;
+
+ if (mem->page_count == 0)
+ return 0;
+
+ if (pg_start < intel_private.gtt_entries) {
+ dev_info(&intel_private.pcidev->dev,
+ "trying to disable local/stolen memory\n");
+ return -EINVAL;
+ }
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++)
+ writel(agp_bridge->scratch_page, intel_private.gtt+i);
+
+ readl(intel_private.gtt+i-1);
+
+ return 0;
+}
+
+/* Return the aperture size by just checking the resource length. The effect
+ * described in the spec of the MSAC registers is just changing of the
+ * resource size.
+ */
+static int intel_i9xx_fetch_size(void)
+{
+ int num_sizes = ARRAY_SIZE(intel_i830_sizes);
+ int aper_size; /* size in megabytes */
+ int i;
+
+ aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
+
+ for (i = 0; i < num_sizes; i++) {
+ if (aper_size == intel_i830_sizes[i].size) {
+ agp_bridge->current_size = intel_i830_sizes + i;
+ return aper_size;
+ }
+ }
+
+ return 0;
+}
+
+/* The intel i915 automatically initializes the agp aperture during POST.
+ * Use the memory already set aside for in the GTT.
+ */
+static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ int page_order;
+ struct aper_size_info_fixed *size;
+ int num_entries;
+ u32 temp, temp2;
+ int gtt_map_size = 256 * 1024;
+
+ size = agp_bridge->current_size;
+ page_order = size->page_order;
+ num_entries = size->num_entries;
+ agp_bridge->gatt_table_real = NULL;
+
+ pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
+ pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
+
+ if (IS_G33)
+ gtt_map_size = 1024 * 1024; /* 1M on G33 */
+ intel_private.gtt = ioremap(temp2, gtt_map_size);
+ if (!intel_private.gtt)
+ return -ENOMEM;
+
+ intel_private.gtt_total_size = gtt_map_size / 4;
+
+ temp &= 0xfff80000;
+
+ intel_private.registers = ioremap(temp, 128 * 4096);
+ if (!intel_private.registers) {
+ iounmap(intel_private.gtt);
+ return -ENOMEM;
+ }
+
+ temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+ global_cache_flush(); /* FIXME: ? */
+
+ /* we have to call this as early as possible after the MMIO base address is known */
+ intel_i830_init_gtt_entries();
+
+ agp_bridge->gatt_table = NULL;
+
+ agp_bridge->gatt_bus_addr = temp;
+
+ return 0;
+}
+
+/*
+ * The i965 supports 36-bit physical addresses, but to keep
+ * the format of the GTT the same, the bits that don't fit
+ * in a 32-bit word are shifted down to bits 4..7.
+ *
+ * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
+ * is always zero on 32-bit architectures, so no need to make
+ * this conditional.
+ */
+static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
+ dma_addr_t addr, int type)
+{
+ /* Shift high bits down */
+ addr |= (addr >> 28) & 0xf0;
+
+ /* Type checking must be done elsewhere */
+ return addr | bridge->driver->masks[type].mask;
+}
+
+static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
+{
+ u16 snb_gmch_ctl;
+
+ switch (agp_bridge->dev->device) {
+ case PCI_DEVICE_ID_INTEL_GM45_HB:
+ case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
+ case PCI_DEVICE_ID_INTEL_Q45_HB:
+ case PCI_DEVICE_ID_INTEL_G45_HB:
+ case PCI_DEVICE_ID_INTEL_G41_HB:
+ case PCI_DEVICE_ID_INTEL_B43_HB:
+ case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
+ case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
+ case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
+ case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
+ *gtt_offset = *gtt_size = MB(2);
+ break;
+ case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
+ case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
+ *gtt_offset = MB(2);
+
+ pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
+ default:
+ case SNB_GTT_SIZE_0M:
+ printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
+ *gtt_size = MB(0);
+ break;
+ case SNB_GTT_SIZE_1M:
+ *gtt_size = MB(1);
+ break;
+ case SNB_GTT_SIZE_2M:
+ *gtt_size = MB(2);
+ break;
+ }
+ break;
+ default:
+ *gtt_offset = *gtt_size = KB(512);
+ }
+}
+
+/* The intel i965 automatically initializes the agp aperture during POST.
+ * Use the memory already set aside for in the GTT.
+ */
+static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ int page_order;
+ struct aper_size_info_fixed *size;
+ int num_entries;
+ u32 temp;
+ int gtt_offset, gtt_size;
+
+ size = agp_bridge->current_size;
+ page_order = size->page_order;
+ num_entries = size->num_entries;
+ agp_bridge->gatt_table_real = NULL;
+
+ pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
+
+ temp &= 0xfff00000;
+
+ intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
+
+ intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
+
+ if (!intel_private.gtt)
+ return -ENOMEM;
+
+ intel_private.gtt_total_size = gtt_size / 4;
+
+ intel_private.registers = ioremap(temp, 128 * 4096);
+ if (!intel_private.registers) {
+ iounmap(intel_private.gtt);
+ return -ENOMEM;
+ }
+
+ temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+ global_cache_flush(); /* FIXME: ? */
+
+ /* we have to call this as early as possible after the MMIO base address is known */
+ intel_i830_init_gtt_entries();
+
+ agp_bridge->gatt_table = NULL;
+
+ agp_bridge->gatt_bus_addr = temp;
+
+ return 0;
+}
+
+static const struct agp_bridge_driver intel_810_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_i810_sizes,
+ .size_type = FIXED_APER_SIZE,
+ .num_aperture_sizes = 2,
+ .needs_scratch_page = true,
+ .configure = intel_i810_configure,
+ .fetch_size = intel_i810_fetch_size,
+ .cleanup = intel_i810_cleanup,
+ .mask_memory = intel_i810_mask_memory,
+ .masks = intel_i810_masks,
+ .agp_enable = intel_i810_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = intel_i810_insert_entries,
+ .remove_memory = intel_i810_remove_entries,
+ .alloc_by_type = intel_i810_alloc_by_type,
+ .free_by_type = intel_i810_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_830_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_i830_sizes,
+ .size_type = FIXED_APER_SIZE,
+ .num_aperture_sizes = 4,
+ .needs_scratch_page = true,
+ .configure = intel_i830_configure,
+ .fetch_size = intel_i830_fetch_size,
+ .cleanup = intel_i830_cleanup,
+ .mask_memory = intel_i810_mask_memory,
+ .masks = intel_i810_masks,
+ .agp_enable = intel_i810_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = intel_i830_create_gatt_table,
+ .free_gatt_table = intel_i830_free_gatt_table,
+ .insert_memory = intel_i830_insert_entries,
+ .remove_memory = intel_i830_remove_entries,
+ .alloc_by_type = intel_i830_alloc_by_type,
+ .free_by_type = intel_i810_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = intel_i830_type_to_mask_type,
+ .chipset_flush = intel_i830_chipset_flush,
+};
+
+static const struct agp_bridge_driver intel_915_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_i830_sizes,
+ .size_type = FIXED_APER_SIZE,
+ .num_aperture_sizes = 4,
+ .needs_scratch_page = true,
+ .configure = intel_i915_configure,
+ .fetch_size = intel_i9xx_fetch_size,
+ .cleanup = intel_i915_cleanup,
+ .mask_memory = intel_i810_mask_memory,
+ .masks = intel_i810_masks,
+ .agp_enable = intel_i810_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = intel_i915_create_gatt_table,
+ .free_gatt_table = intel_i830_free_gatt_table,
+ .insert_memory = intel_i915_insert_entries,
+ .remove_memory = intel_i915_remove_entries,
+ .alloc_by_type = intel_i830_alloc_by_type,
+ .free_by_type = intel_i810_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = intel_i830_type_to_mask_type,
+ .chipset_flush = intel_i915_chipset_flush,
+#ifdef USE_PCI_DMA_API
+ .agp_map_page = intel_agp_map_page,
+ .agp_unmap_page = intel_agp_unmap_page,
+ .agp_map_memory = intel_agp_map_memory,
+ .agp_unmap_memory = intel_agp_unmap_memory,
+#endif
+};
+
+static const struct agp_bridge_driver intel_i965_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_i830_sizes,
+ .size_type = FIXED_APER_SIZE,
+ .num_aperture_sizes = 4,
+ .needs_scratch_page = true,
+ .configure = intel_i915_configure,
+ .fetch_size = intel_i9xx_fetch_size,
+ .cleanup = intel_i915_cleanup,
+ .mask_memory = intel_i965_mask_memory,
+ .masks = intel_i810_masks,
+ .agp_enable = intel_i810_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = intel_i965_create_gatt_table,
+ .free_gatt_table = intel_i830_free_gatt_table,
+ .insert_memory = intel_i915_insert_entries,
+ .remove_memory = intel_i915_remove_entries,
+ .alloc_by_type = intel_i830_alloc_by_type,
+ .free_by_type = intel_i810_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = intel_i830_type_to_mask_type,
+ .chipset_flush = intel_i915_chipset_flush,
+#ifdef USE_PCI_DMA_API
+ .agp_map_page = intel_agp_map_page,
+ .agp_unmap_page = intel_agp_unmap_page,
+ .agp_map_memory = intel_agp_map_memory,
+ .agp_unmap_memory = intel_agp_unmap_memory,
+#endif
+};
+
+static const struct agp_bridge_driver intel_g33_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_i830_sizes,
+ .size_type = FIXED_APER_SIZE,
+ .num_aperture_sizes = 4,
+ .needs_scratch_page = true,
+ .configure = intel_i915_configure,
+ .fetch_size = intel_i9xx_fetch_size,
+ .cleanup = intel_i915_cleanup,
+ .mask_memory = intel_i965_mask_memory,
+ .masks = intel_i810_masks,
+ .agp_enable = intel_i810_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = intel_i915_create_gatt_table,
+ .free_gatt_table = intel_i830_free_gatt_table,
+ .insert_memory = intel_i915_insert_entries,
+ .remove_memory = intel_i915_remove_entries,
+ .alloc_by_type = intel_i830_alloc_by_type,
+ .free_by_type = intel_i810_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = intel_i830_type_to_mask_type,
+ .chipset_flush = intel_i915_chipset_flush,
+#ifdef USE_PCI_DMA_API
+ .agp_map_page = intel_agp_map_page,
+ .agp_unmap_page = intel_agp_unmap_page,
+ .agp_map_memory = intel_agp_map_memory,
+ .agp_unmap_memory = intel_agp_unmap_memory,
+#endif
+};
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index 10f24e349a2..b9734a97818 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -310,6 +310,7 @@ static const struct agp_bridge_driver nvidia_driver = {
.aperture_sizes = nvidia_generic_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 5,
+ .needs_scratch_page = true,
.configure = nvidia_configure,
.fetch_size = nvidia_fetch_size,
.cleanup = nvidia_cleanup,
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
index 6c3837a0184..29aacd81de7 100644
--- a/drivers/char/agp/sis-agp.c
+++ b/drivers/char/agp/sis-agp.c
@@ -125,6 +125,7 @@ static struct agp_bridge_driver sis_driver = {
.aperture_sizes = sis_generic_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = sis_configure,
.fetch_size = sis_fetch_size,
.cleanup = sis_cleanup,
@@ -415,14 +416,6 @@ static struct pci_device_id agp_sis_pci_table[] = {
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
- {
- .class = (PCI_CLASS_BRIDGE_HOST << 8),
- .class_mask = ~0,
- .vendor = PCI_VENDOR_ID_SI,
- .device = PCI_DEVICE_ID_SI_760,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- },
{ }
};
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index 6f48931ac1c..95db71360d2 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -28,6 +28,7 @@
*/
static int uninorth_rev;
static int is_u3;
+static u32 scratch_value;
#define DEFAULT_APERTURE_SIZE 256
#define DEFAULT_APERTURE_STRING "256"
@@ -172,7 +173,7 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int ty
gp = (u32 *) &agp_bridge->gatt_table[pg_start];
for (i = 0; i < mem->page_count; ++i) {
- if (gp[i]) {
+ if (gp[i] != scratch_value) {
dev_info(&agp_bridge->dev->dev,
"uninorth_insert_memory: entry 0x%x occupied (%x)\n",
i, gp[i]);
@@ -214,8 +215,9 @@ int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
return 0;
gp = (u32 *) &agp_bridge->gatt_table[pg_start];
- for (i = 0; i < mem->page_count; ++i)
- gp[i] = 0;
+ for (i = 0; i < mem->page_count; ++i) {
+ gp[i] = scratch_value;
+ }
mb();
uninorth_tlbflush(mem);
@@ -421,8 +423,13 @@ static int uninorth_create_gatt_table(struct agp_bridge_data *bridge)
bridge->gatt_bus_addr = virt_to_phys(table);
+ if (is_u3)
+ scratch_value = (page_to_phys(agp_bridge->scratch_page_page) >> PAGE_SHIFT) | 0x80000000UL;
+ else
+ scratch_value = cpu_to_le32((page_to_phys(agp_bridge->scratch_page_page) & 0xFFFFF000UL) |
+ 0x1UL);
for (i = 0; i < num_entries; i++)
- bridge->gatt_table[i] = 0;
+ bridge->gatt_table[i] = scratch_value;
return 0;
@@ -519,6 +526,7 @@ const struct agp_bridge_driver uninorth_agp_driver = {
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
.cant_use_aperture = true,
+ .needs_scratch_page = true,
};
const struct agp_bridge_driver u3_agp_driver = {
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
index d3bd243867f..df67e80019d 100644
--- a/drivers/char/agp/via-agp.c
+++ b/drivers/char/agp/via-agp.c
@@ -175,6 +175,7 @@ static const struct agp_bridge_driver via_agp3_driver = {
.aperture_sizes = agp3_generic_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 10,
+ .needs_scratch_page = true,
.configure = via_configure_agp3,
.fetch_size = via_fetch_size_agp3,
.cleanup = via_cleanup_agp3,
@@ -201,6 +202,7 @@ static const struct agp_bridge_driver via_driver = {
.aperture_sizes = via_generic_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 9,
+ .needs_scratch_page = true,
.configure = via_configure,
.fetch_size = via_fetch_size,
.cleanup = via_cleanup,
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index 56b27671adc..4f8d60c25a9 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -84,6 +84,7 @@ static char *serial_version = "4.30";
#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/bitops.h>
+#include <linux/platform_device.h>
#include <asm/setup.h>
@@ -1954,29 +1955,16 @@ static const struct tty_operations serial_ops = {
/*
* The serial driver boot-time initialization code!
*/
-static int __init rs_init(void)
+static int __init amiga_serial_probe(struct platform_device *pdev)
{
unsigned long flags;
struct serial_state * state;
int error;
- if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_SERIAL))
- return -ENODEV;
-
serial_driver = alloc_tty_driver(1);
if (!serial_driver)
return -ENOMEM;
- /*
- * We request SERDAT and SERPER only, because the serial registers are
- * too spreaded over the custom register space
- */
- if (!request_mem_region(CUSTOM_PHYSADDR+0x30, 4,
- "amiserial [Paula]")) {
- error = -EBUSY;
- goto fail_put_tty_driver;
- }
-
IRQ_ports = NULL;
show_serial_version();
@@ -1998,7 +1986,7 @@ static int __init rs_init(void)
error = tty_register_driver(serial_driver);
if (error)
- goto fail_release_mem_region;
+ goto fail_put_tty_driver;
state = rs_table;
state->magic = SSTATE_MAGIC;
@@ -2050,23 +2038,24 @@ static int __init rs_init(void)
ciab.ddra |= (SER_DTR | SER_RTS); /* outputs */
ciab.ddra &= ~(SER_DCD | SER_CTS | SER_DSR); /* inputs */
+ platform_set_drvdata(pdev, state);
+
return 0;
fail_free_irq:
free_irq(IRQ_AMIGA_TBE, state);
fail_unregister:
tty_unregister_driver(serial_driver);
-fail_release_mem_region:
- release_mem_region(CUSTOM_PHYSADDR+0x30, 4);
fail_put_tty_driver:
put_tty_driver(serial_driver);
return error;
}
-static __exit void rs_exit(void)
+static int __exit amiga_serial_remove(struct platform_device *pdev)
{
int error;
- struct async_struct *info = rs_table[0].info;
+ struct serial_state *state = platform_get_drvdata(pdev);
+ struct async_struct *info = state->info;
/* printk("Unloading %s: version %s\n", serial_name, serial_version); */
tasklet_kill(&info->tlet);
@@ -2075,19 +2064,38 @@ static __exit void rs_exit(void)
error);
put_tty_driver(serial_driver);
- if (info) {
- rs_table[0].info = NULL;
- kfree(info);
- }
+ rs_table[0].info = NULL;
+ kfree(info);
free_irq(IRQ_AMIGA_TBE, rs_table);
free_irq(IRQ_AMIGA_RBF, rs_table);
- release_mem_region(CUSTOM_PHYSADDR+0x30, 4);
+ platform_set_drvdata(pdev, NULL);
+
+ return error;
+}
+
+static struct platform_driver amiga_serial_driver = {
+ .remove = __exit_p(amiga_serial_remove),
+ .driver = {
+ .name = "amiga-serial",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init amiga_serial_init(void)
+{
+ return platform_driver_probe(&amiga_serial_driver, amiga_serial_probe);
+}
+
+module_init(amiga_serial_init);
+
+static void __exit amiga_serial_exit(void)
+{
+ platform_driver_unregister(&amiga_serial_driver);
}
-module_init(rs_init)
-module_exit(rs_exit)
+module_exit(amiga_serial_exit);
#if defined(CONFIG_SERIAL_CONSOLE) && !defined(MODULE)
@@ -2154,3 +2162,4 @@ console_initcall(amiserial_console_init);
#endif /* CONFIG_SERIAL_CONSOLE && !MODULE */
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:amiga-serial");
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 4f568cb9af3..033e1505fca 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -265,8 +265,8 @@ static unsigned int apm_poll(struct file *fp, poll_table * wait)
* Only when everyone who has opened /dev/apm_bios with write permission
* has acknowledge does the actual suspend happen.
*/
-static int
-apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
+static long
+apm_ioctl(struct file *filp, u_int cmd, u_long arg)
{
struct apm_user *as = filp->private_data;
int err = -EINVAL;
@@ -274,6 +274,7 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
if (!as->suser || !as->writer)
return -EPERM;
+ lock_kernel();
switch (cmd) {
case APM_IOC_SUSPEND:
mutex_lock(&state_lock);
@@ -334,6 +335,7 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
mutex_unlock(&state_lock);
break;
}
+ unlock_kernel();
return err;
}
@@ -397,7 +399,7 @@ static const struct file_operations apm_bios_fops = {
.owner = THIS_MODULE,
.read = apm_read,
.poll = apm_poll,
- .ioctl = apm_ioctl,
+ .unlocked_ioctl = apm_ioctl,
.open = apm_open,
.release = apm_release,
};
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index a7424bf7eac..f4ae0e0fb63 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -26,6 +26,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
+#include <linux/smp_lock.h>
#include <linux/miscdevice.h>
#include <linux/pci.h>
#include <linux/wait.h>
@@ -106,8 +107,7 @@ static unsigned int DeviceErrorCount; /* number of device error */
static ssize_t ac_read (struct file *, char __user *, size_t, loff_t *);
static ssize_t ac_write (struct file *, const char __user *, size_t, loff_t *);
-static int ac_ioctl(struct inode *, struct file *, unsigned int,
- unsigned long);
+static long ac_ioctl(struct file *, unsigned int, unsigned long);
static irqreturn_t ac_interrupt(int, void *);
static const struct file_operations ac_fops = {
@@ -115,7 +115,7 @@ static const struct file_operations ac_fops = {
.llseek = no_llseek,
.read = ac_read,
.write = ac_write,
- .ioctl = ac_ioctl,
+ .unlocked_ioctl = ac_ioctl,
};
static struct miscdevice ac_miscdev = {
@@ -689,7 +689,7 @@ static irqreturn_t ac_interrupt(int vec, void *dev_instance)
-static int ac_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{ /* @ ADG ou ATO selon le cas */
int i;
@@ -703,15 +703,11 @@ static int ac_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
/* In general, the device is only openable by root anyway, so we're not
particularly concerned that bogus ioctls can flood the console. */
- adgl = kmalloc(sizeof(struct st_ram_io), GFP_KERNEL);
- if (!adgl)
- return -ENOMEM;
+ adgl = memdup_user(argp, sizeof(struct st_ram_io));
+ if (IS_ERR(adgl))
+ return PTR_ERR(adgl);
- if (copy_from_user(adgl, argp, sizeof(struct st_ram_io))) {
- kfree(adgl);
- return -EFAULT;
- }
-
+ lock_kernel();
IndexCard = adgl->num_card-1;
if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) {
@@ -721,6 +717,7 @@ static int ac_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
warncount--;
}
kfree(adgl);
+ unlock_kernel();
return -EINVAL;
}
@@ -838,6 +835,7 @@ static int ac_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
Dummy = readb(apbs[IndexCard].RamIO + VERS);
kfree(adgl);
+ unlock_kernel();
return 0;
}
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index 7fef305774d..89d871ef8c2 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -253,7 +253,7 @@ static int bsr_add_node(struct device_node *bn)
cur->bsr_device = device_create(bsr_class, NULL, cur->bsr_dev,
cur, cur->bsr_name);
- if (!cur->bsr_device) {
+ if (IS_ERR(cur->bsr_device)) {
printk(KERN_ERR "device_create failed for %s\n",
cur->bsr_name);
cdev_del(&cur->bsr_cdev);
diff --git a/drivers/char/ds1620.c b/drivers/char/ds1620.c
index 61f0146e215..dbee8688f75 100644
--- a/drivers/char/ds1620.c
+++ b/drivers/char/ds1620.c
@@ -232,7 +232,7 @@ ds1620_read(struct file *file, char __user *buf, size_t count, loff_t *ptr)
}
static int
-ds1620_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+ds1620_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct therm therm;
union {
@@ -316,6 +316,18 @@ ds1620_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned
return 0;
}
+static long
+ds1620_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = ds1620_ioctl(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
#ifdef THERM_USE_PROC
static int
proc_therm_ds1620_read(char *buf, char **start, off_t offset,
@@ -344,7 +356,7 @@ static const struct file_operations ds1620_fops = {
.owner = THIS_MODULE,
.open = ds1620_open,
.read = ds1620_read,
- .ioctl = ds1620_ioctl,
+ .unlocked_ioctl = ds1620_unlocked_ioctl,
};
static struct miscdevice ds1620_miscdev = {
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
index 045c930e632..e3859d4eaea 100644
--- a/drivers/char/dtlk.c
+++ b/drivers/char/dtlk.c
@@ -93,8 +93,8 @@ static ssize_t dtlk_write(struct file *, const char __user *,
static unsigned int dtlk_poll(struct file *, poll_table *);
static int dtlk_open(struct inode *, struct file *);
static int dtlk_release(struct inode *, struct file *);
-static int dtlk_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg);
+static long dtlk_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg);
static const struct file_operations dtlk_fops =
{
@@ -102,7 +102,7 @@ static const struct file_operations dtlk_fops =
.read = dtlk_read,
.write = dtlk_write,
.poll = dtlk_poll,
- .ioctl = dtlk_ioctl,
+ .unlocked_ioctl = dtlk_ioctl,
.open = dtlk_open,
.release = dtlk_release,
};
@@ -263,10 +263,9 @@ static void dtlk_timer_tick(unsigned long data)
wake_up_interruptible(&dtlk_process_list);
}
-static int dtlk_ioctl(struct inode *inode,
- struct file *file,
- unsigned int cmd,
- unsigned long arg)
+static long dtlk_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
{
char __user *argp = (char __user *)arg;
struct dtlk_settings *sp;
@@ -276,7 +275,9 @@ static int dtlk_ioctl(struct inode *inode,
switch (cmd) {
case DTLK_INTERROGATE:
+ lock_kernel();
sp = dtlk_interrogate();
+ unlock_kernel();
if (copy_to_user(argp, sp, sizeof(struct dtlk_settings)))
return -EINVAL;
return 0;
diff --git a/drivers/char/generic_nvram.c b/drivers/char/generic_nvram.c
index fda4181b5e6..82b5a88a82d 100644
--- a/drivers/char/generic_nvram.c
+++ b/drivers/char/generic_nvram.c
@@ -19,6 +19,7 @@
#include <linux/miscdevice.h>
#include <linux/fcntl.h>
#include <linux/init.h>
+#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include <asm/nvram.h>
#ifdef CONFIG_PPC_PMAC
@@ -84,8 +85,7 @@ static ssize_t write_nvram(struct file *file, const char __user *buf,
return p - buf;
}
-static int nvram_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int nvram_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
switch(cmd) {
#ifdef CONFIG_PPC_PMAC
@@ -116,12 +116,23 @@ static int nvram_ioctl(struct inode *inode, struct file *file,
return 0;
}
+static long nvram_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = nvram_ioctl(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
const struct file_operations nvram_fops = {
.owner = THIS_MODULE,
.llseek = nvram_llseek,
.read = read_nvram,
.write = write_nvram,
- .ioctl = nvram_ioctl,
+ .unlocked_ioctl = nvram_unlocked_ioctl,
};
static struct miscdevice nvram_dev = {
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
index 31e7c91c2d9..b6c2cc167c1 100644
--- a/drivers/char/genrtc.c
+++ b/drivers/char/genrtc.c
@@ -262,7 +262,7 @@ static inline int gen_set_rtc_irq_bit(unsigned char bit)
#endif
}
-static int gen_rtc_ioctl(struct inode *inode, struct file *file,
+static int gen_rtc_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct rtc_time wtime;
@@ -332,6 +332,18 @@ static int gen_rtc_ioctl(struct inode *inode, struct file *file,
return -EINVAL;
}
+static long gen_rtc_unlocked_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = gen_rtc_ioctl(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
/*
* We enforce only one user at a time here with the open/close.
* Also clear the previous interrupt data on an open, and clean
@@ -482,7 +494,7 @@ static const struct file_operations gen_rtc_fops = {
.read = gen_rtc_read,
.poll = gen_rtc_poll,
#endif
- .ioctl = gen_rtc_ioctl,
+ .unlocked_ioctl = gen_rtc_unlocked_ioctl,
.open = gen_rtc_open,
.release = gen_rtc_release,
};
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index 712d9f271aa..e0249722d25 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -49,8 +49,9 @@
#include <asm/uaccess.h>
#include <linux/sysrq.h>
#include <linux/timer.h>
+#include <linux/time.h>
-#define VERSION_STR "0.9.0"
+#define VERSION_STR "0.9.1"
#define DEFAULT_IOFENCE_MARGIN 60 /* Default fudge factor, in seconds */
#define DEFAULT_IOFENCE_TICK 180 /* Default timer timeout, in seconds */
@@ -119,10 +120,8 @@ __setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks);
#if defined(CONFIG_S390)
# define HAVE_MONOTONIC
# define TIMER_FREQ 1000000000ULL
-#elif defined(CONFIG_IA64)
-# define TIMER_FREQ ((unsigned long long)local_cpu_data->itc_freq)
#else
-# define TIMER_FREQ (HZ*loops_per_jiffy)
+# define TIMER_FREQ 1000000000ULL
#endif
#ifdef HAVE_MONOTONIC
@@ -130,7 +129,9 @@ extern unsigned long long monotonic_clock(void);
#else
static inline unsigned long long monotonic_clock(void)
{
- return get_cycles();
+ struct timespec ts;
+ getrawmonotonic(&ts);
+ return timespec_to_ns(&ts);
}
#endif /* HAVE_MONOTONIC */
@@ -168,6 +169,13 @@ static void hangcheck_fire(unsigned long data)
printk(KERN_CRIT "Hangcheck: hangcheck value past margin!\n");
}
}
+#if 0
+ /*
+ * Enable to investigate delays in detail
+ */
+ printk("Hangcheck: called %Ld ns since last time (%Ld ns overshoot)\n",
+ tsc_diff, tsc_diff - hangcheck_tick*TIMER_FREQ);
+#endif
mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));
hangcheck_tsc = monotonic_clock();
}
@@ -180,7 +188,7 @@ static int __init hangcheck_init(void)
#if defined (HAVE_MONOTONIC)
printk("Hangcheck: Using monotonic_clock().\n");
#else
- printk("Hangcheck: Using get_cycles().\n");
+ printk("Hangcheck: Using getrawmonotonic().\n");
#endif /* HAVE_MONOTONIC */
hangcheck_tsc_margin =
(unsigned long long)(hangcheck_margin + hangcheck_tick);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 9ded667625a..a0a1829d319 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -431,14 +431,18 @@ static int hpet_release(struct inode *inode, struct file *file)
static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
-static int
-hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
- unsigned long arg)
+static long hpet_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
struct hpet_dev *devp;
+ int ret;
devp = file->private_data;
- return hpet_ioctl_common(devp, cmd, arg, 0);
+ lock_kernel();
+ ret = hpet_ioctl_common(devp, cmd, arg, 0);
+ unlock_kernel();
+
+ return ret;
}
static int hpet_ioctl_ieon(struct hpet_dev *devp)
@@ -654,7 +658,7 @@ static const struct file_operations hpet_fops = {
.llseek = no_llseek,
.read = hpet_read,
.poll = hpet_poll,
- .ioctl = hpet_ioctl,
+ .unlocked_ioctl = hpet_ioctl,
.open = hpet_open,
.release = hpet_release,
.fasync = hpet_fasync,
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c
index 793b236c926..d4b14ff1c4c 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/char/hvsi.c
@@ -194,10 +194,8 @@ static inline void print_state(struct hvsi_struct *hp)
"HVSI_WAIT_FOR_MCTRL_RESPONSE",
"HVSI_FSP_DIED",
};
- const char *name = state_names[hp->state];
-
- if (hp->state > ARRAY_SIZE(state_names))
- name = "UNKNOWN";
+ const char *name = (hp->state < ARRAY_SIZE(state_names))
+ ? state_names[hp->state] : "UNKNOWN";
pr_debug("hvsi%i: state = %s\n", hp->index, name);
#endif /* DEBUG */
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index 10f868eefaa..0f9cbf1aaf1 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -660,7 +660,7 @@ static int __devinit n2rng_probe(struct of_device *op,
np->hvapi_major);
goto out_hvapi_unregister;
}
- np->num_units = of_getintprop_default(op->node,
+ np->num_units = of_getintprop_default(op->dev.of_node,
"rng-#units", 0);
if (!np->num_units) {
dev_err(&op->dev, "VF RNG lacks rng-#units property\n");
@@ -751,8 +751,11 @@ static const struct of_device_id n2rng_match[] = {
MODULE_DEVICE_TABLE(of, n2rng_match);
static struct of_platform_driver n2rng_driver = {
- .name = "n2rng",
- .match_table = n2rng_match,
+ .driver = {
+ .name = "n2rng",
+ .owner = THIS_MODULE,
+ .of_match_table = n2rng_match,
+ },
.probe = n2rng_probe,
.remove = __devexit_p(n2rng_remove),
};
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index a8b4c401014..a348c7e9aa0 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -15,6 +15,10 @@
#include <linux/amba/bus.h>
#include <linux/hw_random.h>
#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+static struct clk *rng_clk;
static int nmk_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
@@ -40,6 +44,15 @@ static int nmk_rng_probe(struct amba_device *dev, struct amba_id *id)
void __iomem *base;
int ret;
+ rng_clk = clk_get(&dev->dev, NULL);
+ if (IS_ERR(rng_clk)) {
+ dev_err(&dev->dev, "could not get rng clock\n");
+ ret = PTR_ERR(rng_clk);
+ return ret;
+ }
+
+ clk_enable(rng_clk);
+
ret = amba_request_regions(dev, dev->dev.init_name);
if (ret)
return ret;
@@ -57,6 +70,8 @@ out_unmap:
iounmap(base);
out_release:
amba_release_regions(dev);
+ clk_disable(rng_clk);
+ clk_put(rng_clk);
return ret;
}
@@ -66,6 +81,8 @@ static int nmk_rng_remove(struct amba_device *dev)
hwrng_unregister(&nmk_rng);
iounmap(base);
amba_release_regions(dev);
+ clk_disable(rng_clk);
+ clk_put(rng_clk);
return 0;
}
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index 7fa61dd1d9d..261ba8f22b8 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -98,7 +98,7 @@ static int __devinit rng_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
void __iomem *rng_regs;
- struct device_node *rng_np = ofdev->node;
+ struct device_node *rng_np = ofdev->dev.of_node;
struct resource res;
int err = 0;
@@ -140,8 +140,11 @@ static struct of_device_id rng_match[] = {
};
static struct of_platform_driver rng_driver = {
- .name = "pasemi-rng",
- .match_table = rng_match,
+ .driver = {
+ .name = "pasemi-rng",
+ .owner = THIS_MODULE,
+ .of_match_table = rng_match,
+ },
.probe = rng_probe,
.remove = rng_remove,
};
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 64fe0a793ef..75f1cbd61c1 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -32,7 +32,7 @@ static bool busy;
static void random_recv_done(struct virtqueue *vq)
{
/* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
- if (!vq->vq_ops->get_buf(vq, &data_avail))
+ if (!virtqueue_get_buf(vq, &data_avail))
return;
complete(&have_data);
@@ -46,10 +46,10 @@ static void register_buffer(u8 *buf, size_t size)
sg_init_one(&sg, buf, size);
/* There should always be room for one buffer. */
- if (vq->vq_ops->add_buf(vq, &sg, 0, 1, buf) < 0)
+ if (virtqueue_add_buf(vq, &sg, 0, 1, buf) < 0)
BUG();
- vq->vq_ops->kick(vq);
+ virtqueue_kick(vq);
}
static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index fc8cf7ac7f2..4cd8b227c11 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -23,6 +23,7 @@
#include <linux/seq_file.h>
#include <linux/dmi.h>
#include <linux/capability.h>
+#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -82,8 +83,7 @@ module_param(fan_mult, int, 0);
MODULE_PARM_DESC(fan_mult, "Factor to multiply fan speed with");
static int i8k_open_fs(struct inode *inode, struct file *file);
-static int i8k_ioctl(struct inode *, struct file *, unsigned int,
- unsigned long);
+static long i8k_ioctl(struct file *, unsigned int, unsigned long);
static const struct file_operations i8k_fops = {
.owner = THIS_MODULE,
@@ -91,7 +91,7 @@ static const struct file_operations i8k_fops = {
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
- .ioctl = i8k_ioctl,
+ .unlocked_ioctl = i8k_ioctl,
};
struct smm_regs {
@@ -307,8 +307,8 @@ static int i8k_get_dell_signature(int req_fn)
return regs.eax == 1145651527 && regs.edx == 1145392204 ? 0 : -1;
}
-static int i8k_ioctl(struct inode *ip, struct file *fp, unsigned int cmd,
- unsigned long arg)
+static int
+i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg)
{
int val = 0;
int speed;
@@ -395,6 +395,17 @@ static int i8k_ioctl(struct inode *ip, struct file *fp, unsigned int cmd,
return 0;
}
+static long i8k_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ lock_kernel();
+ ret = i8k_ioctl_unlocked(fp, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
/*
* Print the information for /proc/i8k.
*/
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 65545de3dbf..d8ec92a3898 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -228,8 +228,7 @@ static int handle_send_req(ipmi_user_t user,
return rv;
}
-static int ipmi_ioctl(struct inode *inode,
- struct file *file,
+static int ipmi_ioctl(struct file *file,
unsigned int cmd,
unsigned long data)
{
@@ -630,6 +629,23 @@ static int ipmi_ioctl(struct inode *inode,
return rv;
}
+/*
+ * Note: it doesn't make sense to take the BKL here but
+ * not in compat_ipmi_ioctl. -arnd
+ */
+static long ipmi_unlocked_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long data)
+{
+ int ret;
+
+ lock_kernel();
+ ret = ipmi_ioctl(file, cmd, data);
+ unlock_kernel();
+
+ return ret;
+}
+
#ifdef CONFIG_COMPAT
/*
@@ -802,7 +818,7 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
if (copy_to_user(precv64, &recv64, sizeof(recv64)))
return -EFAULT;
- rc = ipmi_ioctl(filep->f_path.dentry->d_inode, filep,
+ rc = ipmi_ioctl(filep,
((cmd == COMPAT_IPMICTL_RECEIVE_MSG)
? IPMICTL_RECEIVE_MSG
: IPMICTL_RECEIVE_MSG_TRUNC),
@@ -819,14 +835,14 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
return rc;
}
default:
- return ipmi_ioctl(filep->f_path.dentry->d_inode, filep, cmd, arg);
+ return ipmi_ioctl(filep, cmd, arg);
}
}
#endif
static const struct file_operations ipmi_fops = {
.owner = THIS_MODULE,
- .ioctl = ipmi_ioctl,
+ .unlocked_ioctl = ipmi_unlocked_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_ipmi_ioctl,
#endif
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index c6ad4234378..4f3f8c9ec26 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2505,12 +2505,11 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
return rv;
}
- printk(KERN_INFO
- "ipmi: Found new BMC (man_id: 0x%6.6x, "
- " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
- bmc->id.manufacturer_id,
- bmc->id.product_id,
- bmc->id.device_id);
+ dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, "
+ "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
+ bmc->id.manufacturer_id,
+ bmc->id.product_id,
+ bmc->id.device_id);
}
/*
@@ -4037,8 +4036,8 @@ static void ipmi_request_event(void)
static struct timer_list ipmi_timer;
-/* Call every ~100 ms. */
-#define IPMI_TIMEOUT_TIME 100
+/* Call every ~1000 ms. */
+#define IPMI_TIMEOUT_TIME 1000
/* How many jiffies does it take to get to the timeout time. */
#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 4462b113ba3..35603dd4e6c 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -107,6 +107,14 @@ enum si_type {
};
static char *si_to_str[] = { "kcs", "smic", "bt" };
+enum ipmi_addr_src {
+ SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS,
+ SI_PCI, SI_DEVICETREE, SI_DEFAULT
+};
+static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI",
+ "ACPI", "SMBIOS", "PCI",
+ "device-tree", "default" };
+
#define DEVICE_NAME "ipmi_si"
static struct platform_driver ipmi_driver = {
@@ -188,7 +196,7 @@ struct smi_info {
int (*irq_setup)(struct smi_info *info);
void (*irq_cleanup)(struct smi_info *info);
unsigned int io_size;
- char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
+ enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
void (*addr_source_cleanup)(struct smi_info *info);
void *addr_source_data;
@@ -300,6 +308,7 @@ static int num_max_busy_us;
static int unload_when_empty = 1;
+static int add_smi(struct smi_info *smi);
static int try_smi_init(struct smi_info *smi);
static void cleanup_one_si(struct smi_info *to_clean);
@@ -314,9 +323,14 @@ static void deliver_recv_msg(struct smi_info *smi_info,
{
/* Deliver the message to the upper layer with the lock
released. */
- spin_unlock(&(smi_info->si_lock));
- ipmi_smi_msg_received(smi_info->intf, msg);
- spin_lock(&(smi_info->si_lock));
+
+ if (smi_info->run_to_completion) {
+ ipmi_smi_msg_received(smi_info->intf, msg);
+ } else {
+ spin_unlock(&(smi_info->si_lock));
+ ipmi_smi_msg_received(smi_info->intf, msg);
+ spin_lock(&(smi_info->si_lock));
+ }
}
static void return_hosed_msg(struct smi_info *smi_info, int cCode)
@@ -445,6 +459,9 @@ static inline void disable_si_irq(struct smi_info *smi_info)
if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
start_disable_irq(smi_info);
smi_info->interrupt_disabled = 1;
+ if (!atomic_read(&smi_info->stop_operation))
+ mod_timer(&smi_info->si_timer,
+ jiffies + SI_TIMEOUT_JIFFIES);
}
}
@@ -576,9 +593,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
if (msg[2] != 0) {
/* Error clearing flags */
- printk(KERN_WARNING
- "ipmi_si: Error clearing flags: %2.2x\n",
- msg[2]);
+ dev_warn(smi_info->dev,
+ "Error clearing flags: %2.2x\n", msg[2]);
}
if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
start_enable_irq(smi_info);
@@ -670,9 +686,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
- printk(KERN_WARNING
- "ipmi_si: Could not enable interrupts"
- ", failed get, using polled mode.\n");
+ dev_warn(smi_info->dev, "Could not enable interrupts"
+ ", failed get, using polled mode.\n");
smi_info->si_state = SI_NORMAL;
} else {
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -693,11 +708,11 @@ static void handle_transaction_done(struct smi_info *smi_info)
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
- if (msg[2] != 0) {
- printk(KERN_WARNING
- "ipmi_si: Could not enable interrupts"
- ", failed set, using polled mode.\n");
- }
+ if (msg[2] != 0)
+ dev_warn(smi_info->dev, "Could not enable interrupts"
+ ", failed set, using polled mode.\n");
+ else
+ smi_info->interrupt_disabled = 0;
smi_info->si_state = SI_NORMAL;
break;
}
@@ -709,9 +724,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
- printk(KERN_WARNING
- "ipmi_si: Could not disable interrupts"
- ", failed get.\n");
+ dev_warn(smi_info->dev, "Could not disable interrupts"
+ ", failed get.\n");
smi_info->si_state = SI_NORMAL;
} else {
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -733,9 +747,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
- printk(KERN_WARNING
- "ipmi_si: Could not disable interrupts"
- ", failed set.\n");
+ dev_warn(smi_info->dev, "Could not disable interrupts"
+ ", failed set.\n");
}
smi_info->si_state = SI_NORMAL;
break;
@@ -877,6 +890,11 @@ static void sender(void *send_info,
printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
#endif
+ mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
+
+ if (smi_info->thread)
+ wake_up_process(smi_info->thread);
+
if (smi_info->run_to_completion) {
/*
* If we are running to completion, then throw it in
@@ -997,6 +1015,8 @@ static int ipmi_thread(void *data)
; /* do nothing */
else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
schedule();
+ else if (smi_result == SI_SM_IDLE)
+ schedule_timeout_interruptible(100);
else
schedule_timeout_interruptible(0);
}
@@ -1039,6 +1059,7 @@ static void smi_timeout(unsigned long data)
unsigned long flags;
unsigned long jiffies_now;
long time_diff;
+ long timeout;
#ifdef DEBUG_TIMING
struct timeval t;
#endif
@@ -1059,9 +1080,9 @@ static void smi_timeout(unsigned long data)
if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
/* Running with interrupts, only do long timeouts. */
- smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
+ timeout = jiffies + SI_TIMEOUT_JIFFIES;
smi_inc_stat(smi_info, long_timeouts);
- goto do_add_timer;
+ goto do_mod_timer;
}
/*
@@ -1070,14 +1091,15 @@ static void smi_timeout(unsigned long data)
*/
if (smi_result == SI_SM_CALL_WITH_DELAY) {
smi_inc_stat(smi_info, short_timeouts);
- smi_info->si_timer.expires = jiffies + 1;
+ timeout = jiffies + 1;
} else {
smi_inc_stat(smi_info, long_timeouts);
- smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
+ timeout = jiffies + SI_TIMEOUT_JIFFIES;
}
- do_add_timer:
- add_timer(&(smi_info->si_timer));
+ do_mod_timer:
+ if (smi_result != SI_SM_IDLE)
+ mod_timer(&(smi_info->si_timer), timeout);
}
static irqreturn_t si_irq_handler(int irq, void *data)
@@ -1144,10 +1166,10 @@ static int smi_start_processing(void *send_info,
new_smi->thread = kthread_run(ipmi_thread, new_smi,
"kipmi%d", new_smi->intf_num);
if (IS_ERR(new_smi->thread)) {
- printk(KERN_NOTICE "ipmi_si_intf: Could not start"
- " kernel thread due to error %ld, only using"
- " timers to drive the interface\n",
- PTR_ERR(new_smi->thread));
+ dev_notice(new_smi->dev, "Could not start"
+ " kernel thread due to error %ld, only using"
+ " timers to drive the interface\n",
+ PTR_ERR(new_smi->thread));
new_smi->thread = NULL;
}
}
@@ -1308,14 +1330,13 @@ static int std_irq_setup(struct smi_info *info)
DEVICE_NAME,
info);
if (rv) {
- printk(KERN_WARNING
- "ipmi_si: %s unable to claim interrupt %d,"
- " running polled\n",
- DEVICE_NAME, info->irq);
+ dev_warn(info->dev, "%s unable to claim interrupt %d,"
+ " running polled\n",
+ DEVICE_NAME, info->irq);
info->irq = 0;
} else {
info->irq_cleanup = std_irq_cleanup;
- printk(" Using irq %d\n", info->irq);
+ dev_info(info->dev, "Using irq %d\n", info->irq);
}
return rv;
@@ -1406,8 +1427,8 @@ static int port_setup(struct smi_info *info)
info->io.outputb = port_outl;
break;
default:
- printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n",
- info->io.regsize);
+ dev_warn(info->dev, "Invalid register size: %d\n",
+ info->io.regsize);
return -EINVAL;
}
@@ -1529,8 +1550,8 @@ static int mem_setup(struct smi_info *info)
break;
#endif
default:
- printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n",
- info->io.regsize);
+ dev_warn(info->dev, "Invalid register size: %d\n",
+ info->io.regsize);
return -EINVAL;
}
@@ -1755,7 +1776,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
goto out;
}
- info->addr_source = "hotmod";
+ info->addr_source = SI_HOTMOD;
info->si_type = si_type;
info->io.addr_data = addr;
info->io.addr_type = addr_space;
@@ -1777,7 +1798,9 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
info->irq_setup = std_irq_setup;
info->slave_addr = ipmb;
- try_smi_init(info);
+ if (!add_smi(info))
+ if (try_smi_init(info))
+ cleanup_one_si(info);
} else {
/* remove */
struct smi_info *e, *tmp_e;
@@ -1813,7 +1836,8 @@ static __devinit void hardcode_find_bmc(void)
if (!info)
return;
- info->addr_source = "hardcoded";
+ info->addr_source = SI_HARDCODED;
+ printk(KERN_INFO PFX "probing via hardcoded address\n");
if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
info->si_type = SI_KCS;
@@ -1822,8 +1846,7 @@ static __devinit void hardcode_find_bmc(void)
} else if (strcmp(si_type[i], "bt") == 0) {
info->si_type = SI_BT;
} else {
- printk(KERN_WARNING
- "ipmi_si: Interface type specified "
+ printk(KERN_WARNING PFX "Interface type specified "
"for interface %d, was invalid: %s\n",
i, si_type[i]);
kfree(info);
@@ -1841,11 +1864,9 @@ static __devinit void hardcode_find_bmc(void)
info->io.addr_data = addrs[i];
info->io.addr_type = IPMI_MEM_ADDR_SPACE;
} else {
- printk(KERN_WARNING
- "ipmi_si: Interface type specified "
- "for interface %d, "
- "but port and address were not set or "
- "set to zero.\n", i);
+ printk(KERN_WARNING PFX "Interface type specified "
+ "for interface %d, but port and address were "
+ "not set or set to zero.\n", i);
kfree(info);
continue;
}
@@ -1863,7 +1884,9 @@ static __devinit void hardcode_find_bmc(void)
info->irq_setup = std_irq_setup;
info->slave_addr = slave_addrs[i];
- try_smi_init(info);
+ if (!add_smi(info))
+ if (try_smi_init(info))
+ cleanup_one_si(info);
}
}
@@ -1923,15 +1946,13 @@ static int acpi_gpe_irq_setup(struct smi_info *info)
&ipmi_acpi_gpe,
info);
if (status != AE_OK) {
- printk(KERN_WARNING
- "ipmi_si: %s unable to claim ACPI GPE %d,"
- " running polled\n",
- DEVICE_NAME, info->irq);
+ dev_warn(info->dev, "%s unable to claim ACPI GPE %d,"
+ " running polled\n", DEVICE_NAME, info->irq);
info->irq = 0;
return -EINVAL;
} else {
info->irq_cleanup = acpi_gpe_irq_cleanup;
- printk(" Using ACPI GPE %d\n", info->irq);
+ dev_info(info->dev, "Using ACPI GPE %d\n", info->irq);
return 0;
}
}
@@ -1989,8 +2010,8 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
u8 addr_space;
if (spmi->IPMIlegacy != 1) {
- printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
- return -ENODEV;
+ printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
+ return -ENODEV;
}
if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
@@ -2000,11 +2021,12 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
- printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
+ printk(KERN_ERR PFX "Could not allocate SI data (3)\n");
return -ENOMEM;
}
- info->addr_source = "SPMI";
+ info->addr_source = SI_SPMI;
+ printk(KERN_INFO PFX "probing via SPMI\n");
/* Figure out the interface type. */
switch (spmi->InterfaceType) {
@@ -2018,8 +2040,8 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
info->si_type = SI_BT;
break;
default:
- printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
- spmi->InterfaceType);
+ printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n",
+ spmi->InterfaceType);
kfree(info);
return -EIO;
}
@@ -2055,13 +2077,12 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
info->io.addr_type = IPMI_IO_ADDR_SPACE;
} else {
kfree(info);
- printk(KERN_WARNING
- "ipmi_si: Unknown ACPI I/O Address type\n");
+ printk(KERN_WARNING PFX "Unknown ACPI I/O Address type\n");
return -EIO;
}
info->io.addr_data = spmi->addr.address;
- try_smi_init(info);
+ add_smi(info);
return 0;
}
@@ -2093,6 +2114,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
{
struct acpi_device *acpi_dev;
struct smi_info *info;
+ struct resource *res;
acpi_handle handle;
acpi_status status;
unsigned long long tmp;
@@ -2105,7 +2127,8 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
if (!info)
return -ENOMEM;
- info->addr_source = "ACPI";
+ info->addr_source = SI_ACPI;
+ printk(KERN_INFO PFX "probing via ACPI\n");
handle = acpi_dev->handle;
@@ -2125,22 +2148,26 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
info->si_type = SI_BT;
break;
default:
- dev_info(&dev->dev, "unknown interface type %lld\n", tmp);
+ dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp);
goto err_free;
}
- if (pnp_port_valid(dev, 0)) {
+ res = pnp_get_resource(dev, IORESOURCE_IO, 0);
+ if (res) {
info->io_setup = port_setup;
info->io.addr_type = IPMI_IO_ADDR_SPACE;
- info->io.addr_data = pnp_port_start(dev, 0);
- } else if (pnp_mem_valid(dev, 0)) {
- info->io_setup = mem_setup;
- info->io.addr_type = IPMI_MEM_ADDR_SPACE;
- info->io.addr_data = pnp_mem_start(dev, 0);
} else {
+ res = pnp_get_resource(dev, IORESOURCE_MEM, 0);
+ if (res) {
+ info->io_setup = mem_setup;
+ info->io.addr_type = IPMI_MEM_ADDR_SPACE;
+ }
+ }
+ if (!res) {
dev_err(&dev->dev, "no I/O or memory address\n");
goto err_free;
}
+ info->io.addr_data = res->start;
info->io.regspacing = DEFAULT_REGSPACING;
info->io.regsize = DEFAULT_REGSPACING;
@@ -2156,10 +2183,14 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
info->irq_setup = std_irq_setup;
}
- info->dev = &acpi_dev->dev;
+ info->dev = &dev->dev;
pnp_set_drvdata(dev, info);
- return try_smi_init(info);
+ dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n",
+ res, info->io.regsize, info->io.regspacing,
+ info->irq);
+
+ return add_smi(info);
err_free:
kfree(info);
@@ -2264,12 +2295,12 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
- printk(KERN_ERR
- "ipmi_si: Could not allocate SI data\n");
+ printk(KERN_ERR PFX "Could not allocate SI data\n");
return;
}
- info->addr_source = "SMBIOS";
+ info->addr_source = SI_SMBIOS;
+ printk(KERN_INFO PFX "probing via SMBIOS\n");
switch (ipmi_data->type) {
case 0x01: /* KCS */
@@ -2299,8 +2330,7 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
default:
kfree(info);
- printk(KERN_WARNING
- "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
+ printk(KERN_WARNING PFX "Unknown SMBIOS I/O Address type: %d\n",
ipmi_data->addr_space);
return;
}
@@ -2318,7 +2348,7 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
if (info->irq)
info->irq_setup = std_irq_setup;
- try_smi_init(info);
+ add_smi(info);
}
static void __devinit dmi_find_bmc(void)
@@ -2368,7 +2398,8 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
if (!info)
return -ENOMEM;
- info->addr_source = "PCI";
+ info->addr_source = SI_PCI;
+ dev_info(&pdev->dev, "probing via PCI");
switch (class_type) {
case PCI_ERMC_CLASSCODE_TYPE_SMIC:
@@ -2385,15 +2416,13 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
default:
kfree(info);
- printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
- pci_name(pdev), class_type);
+ dev_info(&pdev->dev, "Unknown IPMI type: %d\n", class_type);
return -ENOMEM;
}
rv = pci_enable_device(pdev);
if (rv) {
- printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
- pci_name(pdev));
+ dev_err(&pdev->dev, "couldn't enable PCI device\n");
kfree(info);
return rv;
}
@@ -2421,7 +2450,11 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
info->dev = &pdev->dev;
pci_set_drvdata(pdev, info);
- return try_smi_init(info);
+ dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
+ &pdev->resource[0], info->io.regsize, info->io.regspacing,
+ info->irq);
+
+ return add_smi(info);
}
static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
@@ -2469,11 +2502,11 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
struct smi_info *info;
struct resource resource;
const int *regsize, *regspacing, *regshift;
- struct device_node *np = dev->node;
+ struct device_node *np = dev->dev.of_node;
int ret;
int proplen;
- dev_info(&dev->dev, PFX "probing via device tree\n");
+ dev_info(&dev->dev, "probing via device tree\n");
ret = of_address_to_resource(np, 0, &resource);
if (ret) {
@@ -2503,12 +2536,12 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
if (!info) {
dev_err(&dev->dev,
- PFX "could not allocate memory for OF probe\n");
+ "could not allocate memory for OF probe\n");
return -ENOMEM;
}
info->si_type = (enum si_type) match->data;
- info->addr_source = "device-tree";
+ info->addr_source = SI_DEVICETREE;
info->irq_setup = std_irq_setup;
if (resource.flags & IORESOURCE_IO) {
@@ -2525,16 +2558,16 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
info->io.regspacing = regspacing ? *regspacing : DEFAULT_REGSPACING;
info->io.regshift = regshift ? *regshift : 0;
- info->irq = irq_of_parse_and_map(dev->node, 0);
+ info->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
info->dev = &dev->dev;
- dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %x\n",
+ dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n",
info->io.addr_data, info->io.regsize, info->io.regspacing,
info->irq);
dev_set_drvdata(&dev->dev, info);
- return try_smi_init(info);
+ return add_smi(info);
}
static int __devexit ipmi_of_remove(struct of_device *dev)
@@ -2555,8 +2588,11 @@ static struct of_device_id ipmi_match[] =
};
static struct of_platform_driver ipmi_of_platform_driver = {
- .name = "ipmi",
- .match_table = ipmi_match,
+ .driver = {
+ .name = "ipmi",
+ .owner = THIS_MODULE,
+ .of_match_table = ipmi_match,
+ },
.probe = ipmi_of_probe,
.remove = __devexit_p(ipmi_of_remove),
};
@@ -2640,9 +2676,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
rv = wait_for_msg_done(smi_info);
if (rv) {
- printk(KERN_WARNING
- "ipmi_si: Error getting response from get global,"
- " enables command, the event buffer is not"
+ printk(KERN_WARNING PFX "Error getting response from get"
+ " global enables command, the event buffer is not"
" enabled.\n");
goto out;
}
@@ -2654,10 +2689,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
resp[2] != 0) {
- printk(KERN_WARNING
- "ipmi_si: Invalid return from get global"
- " enables command, cannot enable the event"
- " buffer.\n");
+ printk(KERN_WARNING PFX "Invalid return from get global"
+ " enables command, cannot enable the event buffer.\n");
rv = -EINVAL;
goto out;
}
@@ -2673,9 +2706,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
rv = wait_for_msg_done(smi_info);
if (rv) {
- printk(KERN_WARNING
- "ipmi_si: Error getting response from set global,"
- " enables command, the event buffer is not"
+ printk(KERN_WARNING PFX "Error getting response from set"
+ " global, enables command, the event buffer is not"
" enabled.\n");
goto out;
}
@@ -2686,10 +2718,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
if (resp_len < 3 ||
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
- printk(KERN_WARNING
- "ipmi_si: Invalid return from get global,"
- "enables command, not enable the event"
- " buffer.\n");
+ printk(KERN_WARNING PFX "Invalid return from get global,"
+ "enables command, not enable the event buffer.\n");
rv = -EINVAL;
goto out;
}
@@ -2948,7 +2978,7 @@ static __devinit void default_find_bmc(void)
if (!info)
return;
- info->addr_source = NULL;
+ info->addr_source = SI_DEFAULT;
info->si_type = ipmi_defaults[i].type;
info->io_setup = port_setup;
@@ -2960,14 +2990,16 @@ static __devinit void default_find_bmc(void)
info->io.regsize = DEFAULT_REGSPACING;
info->io.regshift = 0;
- if (try_smi_init(info) == 0) {
- /* Found one... */
- printk(KERN_INFO "ipmi_si: Found default %s state"
- " machine at %s address 0x%lx\n",
- si_to_str[info->si_type],
- addr_space_to_str[info->io.addr_type],
- info->io.addr_data);
- return;
+ if (add_smi(info) == 0) {
+ if ((try_smi_init(info)) == 0) {
+ /* Found one... */
+ printk(KERN_INFO PFX "Found default %s"
+ " state machine at %s address 0x%lx\n",
+ si_to_str[info->si_type],
+ addr_space_to_str[info->io.addr_type],
+ info->io.addr_data);
+ } else
+ cleanup_one_si(info);
}
}
}
@@ -2986,34 +3018,48 @@ static int is_new_interface(struct smi_info *info)
return 1;
}
-static int try_smi_init(struct smi_info *new_smi)
+static int add_smi(struct smi_info *new_smi)
{
- int rv;
- int i;
-
- if (new_smi->addr_source) {
- printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
- " machine at %s address 0x%lx, slave address 0x%x,"
- " irq %d\n",
- new_smi->addr_source,
- si_to_str[new_smi->si_type],
- addr_space_to_str[new_smi->io.addr_type],
- new_smi->io.addr_data,
- new_smi->slave_addr, new_smi->irq);
- }
+ int rv = 0;
+ printk(KERN_INFO PFX "Adding %s-specified %s state machine",
+ ipmi_addr_src_to_str[new_smi->addr_source],
+ si_to_str[new_smi->si_type]);
mutex_lock(&smi_infos_lock);
if (!is_new_interface(new_smi)) {
- printk(KERN_WARNING "ipmi_si: duplicate interface\n");
+ printk(KERN_CONT PFX "duplicate interface\n");
rv = -EBUSY;
goto out_err;
}
+ printk(KERN_CONT "\n");
+
/* So we know not to free it unless we have allocated one. */
new_smi->intf = NULL;
new_smi->si_sm = NULL;
new_smi->handlers = NULL;
+ list_add_tail(&new_smi->link, &smi_infos);
+
+out_err:
+ mutex_unlock(&smi_infos_lock);
+ return rv;
+}
+
+static int try_smi_init(struct smi_info *new_smi)
+{
+ int rv = 0;
+ int i;
+
+ printk(KERN_INFO PFX "Trying %s-specified %s state"
+ " machine at %s address 0x%lx, slave address 0x%x,"
+ " irq %d\n",
+ ipmi_addr_src_to_str[new_smi->addr_source],
+ si_to_str[new_smi->si_type],
+ addr_space_to_str[new_smi->io.addr_type],
+ new_smi->io.addr_data,
+ new_smi->slave_addr, new_smi->irq);
+
switch (new_smi->si_type) {
case SI_KCS:
new_smi->handlers = &kcs_smi_handlers;
@@ -3036,7 +3082,8 @@ static int try_smi_init(struct smi_info *new_smi)
/* Allocate the state machine's data and initialize it. */
new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
if (!new_smi->si_sm) {
- printk(KERN_ERR "Could not allocate state machine memory\n");
+ printk(KERN_ERR PFX
+ "Could not allocate state machine memory\n");
rv = -ENOMEM;
goto out_err;
}
@@ -3046,7 +3093,7 @@ static int try_smi_init(struct smi_info *new_smi)
/* Now that we know the I/O size, we can set up the I/O. */
rv = new_smi->io_setup(new_smi);
if (rv) {
- printk(KERN_ERR "Could not set up I/O space\n");
+ printk(KERN_ERR PFX "Could not set up I/O space\n");
goto out_err;
}
@@ -3056,8 +3103,7 @@ static int try_smi_init(struct smi_info *new_smi)
/* Do low-level detection first. */
if (new_smi->handlers->detect(new_smi->si_sm)) {
if (new_smi->addr_source)
- printk(KERN_INFO "ipmi_si: Interface detection"
- " failed\n");
+ printk(KERN_INFO PFX "Interface detection failed\n");
rv = -ENODEV;
goto out_err;
}
@@ -3069,7 +3115,7 @@ static int try_smi_init(struct smi_info *new_smi)
rv = try_get_dev_id(new_smi);
if (rv) {
if (new_smi->addr_source)
- printk(KERN_INFO "ipmi_si: There appears to be no BMC"
+ printk(KERN_INFO PFX "There appears to be no BMC"
" at this location\n");
goto out_err;
}
@@ -3085,7 +3131,7 @@ static int try_smi_init(struct smi_info *new_smi)
for (i = 0; i < SI_NUM_STATS; i++)
atomic_set(&new_smi->stats[i], 0);
- new_smi->interrupt_disabled = 0;
+ new_smi->interrupt_disabled = 1;
atomic_set(&new_smi->stop_operation, 0);
new_smi->intf_num = smi_num;
smi_num++;
@@ -3111,9 +3157,8 @@ static int try_smi_init(struct smi_info *new_smi)
new_smi->pdev = platform_device_alloc("ipmi_si",
new_smi->intf_num);
if (!new_smi->pdev) {
- printk(KERN_ERR
- "ipmi_si_intf:"
- " Unable to allocate platform device\n");
+ printk(KERN_ERR PFX
+ "Unable to allocate platform device\n");
goto out_err;
}
new_smi->dev = &new_smi->pdev->dev;
@@ -3121,9 +3166,8 @@ static int try_smi_init(struct smi_info *new_smi)
rv = platform_device_add(new_smi->pdev);
if (rv) {
- printk(KERN_ERR
- "ipmi_si_intf:"
- " Unable to register system interface device:"
+ printk(KERN_ERR PFX
+ "Unable to register system interface device:"
" %d\n",
rv);
goto out_err;
@@ -3138,9 +3182,8 @@ static int try_smi_init(struct smi_info *new_smi)
"bmc",
new_smi->slave_addr);
if (rv) {
- printk(KERN_ERR
- "ipmi_si: Unable to register device: error %d\n",
- rv);
+ dev_err(new_smi->dev, "Unable to register device: error %d\n",
+ rv);
goto out_err_stop_timer;
}
@@ -3148,9 +3191,7 @@ static int try_smi_init(struct smi_info *new_smi)
type_file_read_proc,
new_smi);
if (rv) {
- printk(KERN_ERR
- "ipmi_si: Unable to create proc entry: %d\n",
- rv);
+ dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
goto out_err_stop_timer;
}
@@ -3158,9 +3199,7 @@ static int try_smi_init(struct smi_info *new_smi)
stat_file_read_proc,
new_smi);
if (rv) {
- printk(KERN_ERR
- "ipmi_si: Unable to create proc entry: %d\n",
- rv);
+ dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
goto out_err_stop_timer;
}
@@ -3168,18 +3207,12 @@ static int try_smi_init(struct smi_info *new_smi)
param_read_proc,
new_smi);
if (rv) {
- printk(KERN_ERR
- "ipmi_si: Unable to create proc entry: %d\n",
- rv);
+ dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
goto out_err_stop_timer;
}
- list_add_tail(&new_smi->link, &smi_infos);
-
- mutex_unlock(&smi_infos_lock);
-
- printk(KERN_INFO "IPMI %s interface initialized\n",
- si_to_str[new_smi->si_type]);
+ dev_info(new_smi->dev, "IPMI %s interface initialized\n",
+ si_to_str[new_smi->si_type]);
return 0;
@@ -3188,11 +3221,17 @@ static int try_smi_init(struct smi_info *new_smi)
wait_for_timer_and_thread(new_smi);
out_err:
- if (new_smi->intf)
+ new_smi->interrupt_disabled = 1;
+
+ if (new_smi->intf) {
ipmi_unregister_smi(new_smi->intf);
+ new_smi->intf = NULL;
+ }
- if (new_smi->irq_cleanup)
+ if (new_smi->irq_cleanup) {
new_smi->irq_cleanup(new_smi);
+ new_smi->irq_cleanup = NULL;
+ }
/*
* Wait until we know that we are out of any interrupt
@@ -3205,18 +3244,21 @@ static int try_smi_init(struct smi_info *new_smi)
if (new_smi->handlers)
new_smi->handlers->cleanup(new_smi->si_sm);
kfree(new_smi->si_sm);
+ new_smi->si_sm = NULL;
}
- if (new_smi->addr_source_cleanup)
+ if (new_smi->addr_source_cleanup) {
new_smi->addr_source_cleanup(new_smi);
- if (new_smi->io_cleanup)
+ new_smi->addr_source_cleanup = NULL;
+ }
+ if (new_smi->io_cleanup) {
new_smi->io_cleanup(new_smi);
+ new_smi->io_cleanup = NULL;
+ }
- if (new_smi->dev_registered)
+ if (new_smi->dev_registered) {
platform_device_unregister(new_smi->pdev);
-
- kfree(new_smi);
-
- mutex_unlock(&smi_infos_lock);
+ new_smi->dev_registered = 0;
+ }
return rv;
}
@@ -3226,6 +3268,8 @@ static __devinit int init_ipmi_si(void)
int i;
char *str;
int rv;
+ struct smi_info *e;
+ enum ipmi_addr_src type = SI_INVALID;
if (initialized)
return 0;
@@ -3234,9 +3278,7 @@ static __devinit int init_ipmi_si(void)
/* Register the device drivers. */
rv = driver_register(&ipmi_driver.driver);
if (rv) {
- printk(KERN_ERR
- "init_ipmi_si: Unable to register driver: %d\n",
- rv);
+ printk(KERN_ERR PFX "Unable to register driver: %d\n", rv);
return rv;
}
@@ -3260,38 +3302,81 @@ static __devinit int init_ipmi_si(void)
hardcode_find_bmc();
-#ifdef CONFIG_DMI
- dmi_find_bmc();
-#endif
+ /* If the user gave us a device, they presumably want us to use it */
+ mutex_lock(&smi_infos_lock);
+ if (!list_empty(&smi_infos)) {
+ mutex_unlock(&smi_infos_lock);
+ return 0;
+ }
+ mutex_unlock(&smi_infos_lock);
-#ifdef CONFIG_ACPI
- spmi_find_bmc();
+#ifdef CONFIG_PCI
+ rv = pci_register_driver(&ipmi_pci_driver);
+ if (rv)
+ printk(KERN_ERR PFX "Unable to register PCI driver: %d\n", rv);
#endif
+
#ifdef CONFIG_ACPI
pnp_register_driver(&ipmi_pnp_driver);
#endif
-#ifdef CONFIG_PCI
- rv = pci_register_driver(&ipmi_pci_driver);
- if (rv)
- printk(KERN_ERR
- "init_ipmi_si: Unable to register PCI driver: %d\n",
- rv);
+#ifdef CONFIG_DMI
+ dmi_find_bmc();
+#endif
+
+#ifdef CONFIG_ACPI
+ spmi_find_bmc();
#endif
#ifdef CONFIG_PPC_OF
of_register_platform_driver(&ipmi_of_platform_driver);
#endif
+ /* We prefer devices with interrupts, but in the case of a machine
+ with multiple BMCs we assume that there will be several instances
+ of a given type so if we succeed in registering a type then also
+ try to register everything else of the same type */
+
+ mutex_lock(&smi_infos_lock);
+ list_for_each_entry(e, &smi_infos, link) {
+ /* Try to register a device if it has an IRQ and we either
+ haven't successfully registered a device yet or this
+ device has the same type as one we successfully registered */
+ if (e->irq && (!type || e->addr_source == type)) {
+ if (!try_smi_init(e)) {
+ type = e->addr_source;
+ }
+ }
+ }
+
+ /* type will only have been set if we successfully registered an si */
+ if (type) {
+ mutex_unlock(&smi_infos_lock);
+ return 0;
+ }
+
+ /* Fall back to the preferred device */
+
+ list_for_each_entry(e, &smi_infos, link) {
+ if (!e->irq && (!type || e->addr_source == type)) {
+ if (!try_smi_init(e)) {
+ type = e->addr_source;
+ }
+ }
+ }
+ mutex_unlock(&smi_infos_lock);
+
+ if (type)
+ return 0;
+
if (si_trydefaults) {
mutex_lock(&smi_infos_lock);
if (list_empty(&smi_infos)) {
/* No BMC was found, try defaults. */
mutex_unlock(&smi_infos_lock);
default_find_bmc();
- } else {
+ } else
mutex_unlock(&smi_infos_lock);
- }
}
mutex_lock(&smi_infos_lock);
@@ -3305,8 +3390,8 @@ static __devinit int init_ipmi_si(void)
of_unregister_platform_driver(&ipmi_of_platform_driver);
#endif
driver_unregister(&ipmi_driver.driver);
- printk(KERN_WARNING
- "ipmi_si: Unable to find any System Interface(s)\n");
+ printk(KERN_WARNING PFX
+ "Unable to find any System Interface(s)\n");
return -ENODEV;
} else {
mutex_unlock(&smi_infos_lock);
@@ -3317,7 +3402,7 @@ module_init(init_ipmi_si);
static void cleanup_one_si(struct smi_info *to_clean)
{
- int rv;
+ int rv = 0;
unsigned long flags;
if (!to_clean)
@@ -3361,14 +3446,16 @@ static void cleanup_one_si(struct smi_info *to_clean)
schedule_timeout_uninterruptible(1);
}
- rv = ipmi_unregister_smi(to_clean->intf);
+ if (to_clean->intf)
+ rv = ipmi_unregister_smi(to_clean->intf);
+
if (rv) {
- printk(KERN_ERR
- "ipmi_si: Unable to unregister device: errno=%d\n",
+ printk(KERN_ERR PFX "Unable to unregister device: errno=%d\n",
rv);
}
- to_clean->handlers->cleanup(to_clean->si_sm);
+ if (to_clean->handlers)
+ to_clean->handlers->cleanup(to_clean->si_sm);
kfree(to_clean->si_sm);
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index a4d57e31f71..82bcdb262a3 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -659,7 +659,7 @@ static struct watchdog_info ident = {
.identity = "IPMI"
};
-static int ipmi_ioctl(struct inode *inode, struct file *file,
+static int ipmi_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
@@ -730,6 +730,19 @@ static int ipmi_ioctl(struct inode *inode, struct file *file,
}
}
+static long ipmi_unlocked_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = ipmi_ioctl(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
static ssize_t ipmi_write(struct file *file,
const char __user *buf,
size_t len,
@@ -880,7 +893,7 @@ static const struct file_operations ipmi_wdog_fops = {
.read = ipmi_read,
.poll = ipmi_poll,
.write = ipmi_write,
- .ioctl = ipmi_ioctl,
+ .unlocked_ioctl = ipmi_unlocked_ioctl,
.open = ipmi_open,
.release = ipmi_close,
.fasync = ipmi_fasync,
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index c1ab303455c..98310e1aae3 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -1573,11 +1573,16 @@ static int __devinit isicom_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "ISI PCI Card(Device ID 0x%x)\n", ent->device);
/* allot the first empty slot in the array */
- for (index = 0; index < BOARD_COUNT; index++)
+ for (index = 0; index < BOARD_COUNT; index++) {
if (isi_card[index].base == 0) {
board = &isi_card[index];
break;
}
+ }
+ if (index == BOARD_COUNT) {
+ retval = -ENODEV;
+ goto err_disable;
+ }
board->index = index;
board->base = pci_resource_start(pdev, 3);
@@ -1624,6 +1629,7 @@ errunrr:
errdec:
board->base = 0;
card_count--;
+err_disable:
pci_disable_device(pdev);
err:
return retval;
diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
index ada25bb8941..54109dc9240 100644
--- a/drivers/char/keyboard.c
+++ b/drivers/char/keyboard.c
@@ -24,6 +24,8 @@
* 21-08-02: Converted to input API, major cleanup. (Vojtech Pavlik)
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/consolemap.h>
#include <linux/module.h>
#include <linux/sched.h>
@@ -38,7 +40,6 @@
#include <linux/kbd_kern.h>
#include <linux/kbd_diacr.h>
#include <linux/vt_kern.h>
-#include <linux/sysrq.h>
#include <linux/input.h>
#include <linux/reboot.h>
#include <linux/notifier.h>
@@ -82,8 +83,7 @@ void compute_shiftstate(void);
typedef void (k_handler_fn)(struct vc_data *vc, unsigned char value,
char up_flag);
static k_handler_fn K_HANDLERS;
-k_handler_fn *k_handler[16] = { K_HANDLERS };
-EXPORT_SYMBOL_GPL(k_handler);
+static k_handler_fn *k_handler[16] = { K_HANDLERS };
#define FN_HANDLERS\
fn_null, fn_enter, fn_show_ptregs, fn_show_mem,\
@@ -133,7 +133,7 @@ static struct input_handler kbd_handler;
static DEFINE_SPINLOCK(kbd_event_lock);
static unsigned long key_down[BITS_TO_LONGS(KEY_CNT)]; /* keyboard key bitmap */
static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */
-static int dead_key_next;
+static bool dead_key_next;
static int npadch = -1; /* -1 or number assembled on pad */
static unsigned int diacr;
static char rep; /* flag telling character repeat */
@@ -147,22 +147,6 @@ static struct ledptr {
unsigned char valid:1;
} ledptrs[3];
-/* Simple translation table for the SysRq keys */
-
-#ifdef CONFIG_MAGIC_SYSRQ
-unsigned char kbd_sysrq_xlate[KEY_MAX + 1] =
- "\000\0331234567890-=\177\t" /* 0x00 - 0x0f */
- "qwertyuiop[]\r\000as" /* 0x10 - 0x1f */
- "dfghjkl;'`\000\\zxcv" /* 0x20 - 0x2f */
- "bnm,./\000*\000 \000\201\202\203\204\205" /* 0x30 - 0x3f */
- "\206\207\210\211\212\000\000789-456+1" /* 0x40 - 0x4f */
- "230\177\000\000\213\214\000\000\000\000\000\000\000\000\000\000" /* 0x50 - 0x5f */
- "\r\000/"; /* 0x60 - 0x6f */
-static int sysrq_down;
-static int sysrq_alt_use;
-#endif
-static int sysrq_alt;
-
/*
* Notifier list for console keyboard events
*/
@@ -361,8 +345,8 @@ static void to_utf8(struct vc_data *vc, uint c)
/* 110***** 10****** */
put_queue(vc, 0xc0 | (c >> 6));
put_queue(vc, 0x80 | (c & 0x3f));
- } else if (c < 0x10000) {
- if (c >= 0xD800 && c < 0xE000)
+ } else if (c < 0x10000) {
+ if (c >= 0xD800 && c < 0xE000)
return;
if (c == 0xFFFF)
return;
@@ -370,7 +354,7 @@ static void to_utf8(struct vc_data *vc, uint c)
put_queue(vc, 0xe0 | (c >> 12));
put_queue(vc, 0x80 | ((c >> 6) & 0x3f));
put_queue(vc, 0x80 | (c & 0x3f));
- } else if (c < 0x110000) {
+ } else if (c < 0x110000) {
/* 11110*** 10****** 10****** 10****** */
put_queue(vc, 0xf0 | (c >> 18));
put_queue(vc, 0x80 | ((c >> 12) & 0x3f));
@@ -469,6 +453,7 @@ static void fn_enter(struct vc_data *vc)
}
diacr = 0;
}
+
put_queue(vc, 13);
if (vc_kbd_mode(kbd, VC_CRLF))
put_queue(vc, 10);
@@ -478,6 +463,7 @@ static void fn_caps_toggle(struct vc_data *vc)
{
if (rep)
return;
+
chg_vc_kbd_led(kbd, VC_CAPSLOCK);
}
@@ -485,12 +471,14 @@ static void fn_caps_on(struct vc_data *vc)
{
if (rep)
return;
+
set_vc_kbd_led(kbd, VC_CAPSLOCK);
}
static void fn_show_ptregs(struct vc_data *vc)
{
struct pt_regs *regs = get_irq_regs();
+
if (regs)
show_regs(regs);
}
@@ -515,7 +503,7 @@ static void fn_hold(struct vc_data *vc)
static void fn_num(struct vc_data *vc)
{
- if (vc_kbd_mode(kbd,VC_APPLIC))
+ if (vc_kbd_mode(kbd, VC_APPLIC))
applkey(vc, 'P', 1);
else
fn_bare_num(vc);
@@ -610,7 +598,7 @@ static void fn_boot_it(struct vc_data *vc)
static void fn_compose(struct vc_data *vc)
{
- dead_key_next = 1;
+ dead_key_next = true;
}
static void fn_spawn_con(struct vc_data *vc)
@@ -657,7 +645,7 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
static void k_lowercase(struct vc_data *vc, unsigned char value, char up_flag)
{
- printk(KERN_ERR "keyboard.c: k_lowercase was called - impossible\n");
+ pr_err("k_lowercase was called - impossible\n");
}
static void k_unicode(struct vc_data *vc, unsigned int value, char up_flag)
@@ -669,7 +657,7 @@ static void k_unicode(struct vc_data *vc, unsigned int value, char up_flag)
value = handle_diacr(vc, value);
if (dead_key_next) {
- dead_key_next = 0;
+ dead_key_next = false;
diacr = value;
return;
}
@@ -691,6 +679,7 @@ static void k_deadunicode(struct vc_data *vc, unsigned int value, char up_flag)
{
if (up_flag)
return;
+
diacr = (diacr ? handle_diacr(vc, value) : value);
}
@@ -710,29 +699,28 @@ static void k_dead2(struct vc_data *vc, unsigned char value, char up_flag)
static void k_dead(struct vc_data *vc, unsigned char value, char up_flag)
{
static const unsigned char ret_diacr[NR_DEAD] = {'`', '\'', '^', '~', '"', ',' };
- value = ret_diacr[value];
- k_deadunicode(vc, value, up_flag);
+
+ k_deadunicode(vc, ret_diacr[value], up_flag);
}
static void k_cons(struct vc_data *vc, unsigned char value, char up_flag)
{
if (up_flag)
return;
+
set_console(value);
}
static void k_fn(struct vc_data *vc, unsigned char value, char up_flag)
{
- unsigned v;
-
if (up_flag)
return;
- v = value;
- if (v < ARRAY_SIZE(func_table)) {
+
+ if ((unsigned)value < ARRAY_SIZE(func_table)) {
if (func_table[value])
puts_queue(vc, func_table[value]);
} else
- printk(KERN_ERR "k_fn called with value=%d\n", value);
+ pr_err("k_fn called with value=%d\n", value);
}
static void k_cur(struct vc_data *vc, unsigned char value, char up_flag)
@@ -741,6 +729,7 @@ static void k_cur(struct vc_data *vc, unsigned char value, char up_flag)
if (up_flag)
return;
+
applkey(vc, cur_chars[value], vc_kbd_mode(kbd, VC_CKMODE));
}
@@ -758,43 +747,45 @@ static void k_pad(struct vc_data *vc, unsigned char value, char up_flag)
return;
}
- if (!vc_kbd_led(kbd, VC_NUMLOCK))
+ if (!vc_kbd_led(kbd, VC_NUMLOCK)) {
+
switch (value) {
- case KVAL(K_PCOMMA):
- case KVAL(K_PDOT):
- k_fn(vc, KVAL(K_REMOVE), 0);
- return;
- case KVAL(K_P0):
- k_fn(vc, KVAL(K_INSERT), 0);
- return;
- case KVAL(K_P1):
- k_fn(vc, KVAL(K_SELECT), 0);
- return;
- case KVAL(K_P2):
- k_cur(vc, KVAL(K_DOWN), 0);
- return;
- case KVAL(K_P3):
- k_fn(vc, KVAL(K_PGDN), 0);
- return;
- case KVAL(K_P4):
- k_cur(vc, KVAL(K_LEFT), 0);
- return;
- case KVAL(K_P6):
- k_cur(vc, KVAL(K_RIGHT), 0);
- return;
- case KVAL(K_P7):
- k_fn(vc, KVAL(K_FIND), 0);
- return;
- case KVAL(K_P8):
- k_cur(vc, KVAL(K_UP), 0);
- return;
- case KVAL(K_P9):
- k_fn(vc, KVAL(K_PGUP), 0);
- return;
- case KVAL(K_P5):
- applkey(vc, 'G', vc_kbd_mode(kbd, VC_APPLIC));
- return;
+ case KVAL(K_PCOMMA):
+ case KVAL(K_PDOT):
+ k_fn(vc, KVAL(K_REMOVE), 0);
+ return;
+ case KVAL(K_P0):
+ k_fn(vc, KVAL(K_INSERT), 0);
+ return;
+ case KVAL(K_P1):
+ k_fn(vc, KVAL(K_SELECT), 0);
+ return;
+ case KVAL(K_P2):
+ k_cur(vc, KVAL(K_DOWN), 0);
+ return;
+ case KVAL(K_P3):
+ k_fn(vc, KVAL(K_PGDN), 0);
+ return;
+ case KVAL(K_P4):
+ k_cur(vc, KVAL(K_LEFT), 0);
+ return;
+ case KVAL(K_P6):
+ k_cur(vc, KVAL(K_RIGHT), 0);
+ return;
+ case KVAL(K_P7):
+ k_fn(vc, KVAL(K_FIND), 0);
+ return;
+ case KVAL(K_P8):
+ k_cur(vc, KVAL(K_UP), 0);
+ return;
+ case KVAL(K_P9):
+ k_fn(vc, KVAL(K_PGUP), 0);
+ return;
+ case KVAL(K_P5):
+ applkey(vc, 'G', vc_kbd_mode(kbd, VC_APPLIC));
+ return;
}
+ }
put_queue(vc, pad_chars[value]);
if (value == KVAL(K_PENTER) && vc_kbd_mode(kbd, VC_CRLF))
@@ -880,6 +871,7 @@ static void k_lock(struct vc_data *vc, unsigned char value, char up_flag)
{
if (up_flag || rep)
return;
+
chg_vc_kbd_lock(kbd, value);
}
@@ -888,6 +880,7 @@ static void k_slock(struct vc_data *vc, unsigned char value, char up_flag)
k_shift(vc, value, up_flag);
if (up_flag || rep)
return;
+
chg_vc_kbd_slock(kbd, value);
/* try to make Alt, oops, AltGr and such work */
if (!key_maps[kbd->lockstate ^ kbd->slockstate]) {
@@ -925,12 +918,12 @@ static void k_brlcommit(struct vc_data *vc, unsigned int pattern, char up_flag)
static void k_brl(struct vc_data *vc, unsigned char value, char up_flag)
{
- static unsigned pressed,committing;
+ static unsigned pressed, committing;
static unsigned long releasestart;
if (kbd->kbdmode != VC_UNICODE) {
if (!up_flag)
- printk("keyboard mode must be unicode for braille patterns\n");
+ pr_warning("keyboard mode must be unicode for braille patterns\n");
return;
}
@@ -942,32 +935,28 @@ static void k_brl(struct vc_data *vc, unsigned char value, char up_flag)
if (value > 8)
return;
- if (up_flag) {
- if (brl_timeout) {
- if (!committing ||
- time_after(jiffies,
- releasestart + msecs_to_jiffies(brl_timeout))) {
- committing = pressed;
- releasestart = jiffies;
- }
- pressed &= ~(1 << (value - 1));
- if (!pressed) {
- if (committing) {
- k_brlcommit(vc, committing, 0);
- committing = 0;
- }
- }
- } else {
- if (committing) {
- k_brlcommit(vc, committing, 0);
- committing = 0;
- }
- pressed &= ~(1 << (value - 1));
- }
- } else {
+ if (!up_flag) {
pressed |= 1 << (value - 1);
if (!brl_timeout)
committing = pressed;
+ } else if (brl_timeout) {
+ if (!committing ||
+ time_after(jiffies,
+ releasestart + msecs_to_jiffies(brl_timeout))) {
+ committing = pressed;
+ releasestart = jiffies;
+ }
+ pressed &= ~(1 << (value - 1));
+ if (!pressed && committing) {
+ k_brlcommit(vc, committing, 0);
+ committing = 0;
+ }
+ } else {
+ if (committing) {
+ k_brlcommit(vc, committing, 0);
+ committing = 0;
+ }
+ pressed &= ~(1 << (value - 1));
}
}
@@ -988,6 +977,7 @@ void setledstate(struct kbd_struct *kbd, unsigned int led)
kbd->ledmode = LED_SHOW_IOCTL;
} else
kbd->ledmode = LED_SHOW_FLAGS;
+
set_leds();
}
@@ -1075,7 +1065,7 @@ static const unsigned short x86_keycodes[256] =
332,340,365,342,343,344,345,346,356,270,341,368,369,370,371,372 };
#ifdef CONFIG_SPARC
-static int sparc_l1_a_state = 0;
+static int sparc_l1_a_state;
extern void sun_do_break(void);
#endif
@@ -1085,52 +1075,54 @@ static int emulate_raw(struct vc_data *vc, unsigned int keycode,
int code;
switch (keycode) {
- case KEY_PAUSE:
- put_queue(vc, 0xe1);
- put_queue(vc, 0x1d | up_flag);
- put_queue(vc, 0x45 | up_flag);
- break;
- case KEY_HANGEUL:
- if (!up_flag)
- put_queue(vc, 0xf2);
- break;
+ case KEY_PAUSE:
+ put_queue(vc, 0xe1);
+ put_queue(vc, 0x1d | up_flag);
+ put_queue(vc, 0x45 | up_flag);
+ break;
- case KEY_HANJA:
- if (!up_flag)
- put_queue(vc, 0xf1);
- break;
+ case KEY_HANGEUL:
+ if (!up_flag)
+ put_queue(vc, 0xf2);
+ break;
- case KEY_SYSRQ:
- /*
- * Real AT keyboards (that's what we're trying
- * to emulate here emit 0xe0 0x2a 0xe0 0x37 when
- * pressing PrtSc/SysRq alone, but simply 0x54
- * when pressing Alt+PrtSc/SysRq.
- */
- if (sysrq_alt) {
- put_queue(vc, 0x54 | up_flag);
- } else {
- put_queue(vc, 0xe0);
- put_queue(vc, 0x2a | up_flag);
- put_queue(vc, 0xe0);
- put_queue(vc, 0x37 | up_flag);
- }
- break;
+ case KEY_HANJA:
+ if (!up_flag)
+ put_queue(vc, 0xf1);
+ break;
- default:
- if (keycode > 255)
- return -1;
+ case KEY_SYSRQ:
+ /*
+ * Real AT keyboards (that's what we're trying
+ * to emulate here emit 0xe0 0x2a 0xe0 0x37 when
+ * pressing PrtSc/SysRq alone, but simply 0x54
+ * when pressing Alt+PrtSc/SysRq.
+ */
+ if (test_bit(KEY_LEFTALT, key_down) ||
+ test_bit(KEY_RIGHTALT, key_down)) {
+ put_queue(vc, 0x54 | up_flag);
+ } else {
+ put_queue(vc, 0xe0);
+ put_queue(vc, 0x2a | up_flag);
+ put_queue(vc, 0xe0);
+ put_queue(vc, 0x37 | up_flag);
+ }
+ break;
- code = x86_keycodes[keycode];
- if (!code)
- return -1;
+ default:
+ if (keycode > 255)
+ return -1;
- if (code & 0x100)
- put_queue(vc, 0xe0);
- put_queue(vc, (code & 0x7f) | up_flag);
+ code = x86_keycodes[keycode];
+ if (!code)
+ return -1;
- break;
+ if (code & 0x100)
+ put_queue(vc, 0xe0);
+ put_queue(vc, (code & 0x7f) | up_flag);
+
+ break;
}
return 0;
@@ -1153,6 +1145,7 @@ static int emulate_raw(struct vc_data *vc, unsigned int keycode, unsigned char u
static void kbd_rawcode(unsigned char data)
{
struct vc_data *vc = vc_cons[fg_console].d;
+
kbd = kbd_table + vc->vc_num;
if (kbd->kbdmode == VC_RAW)
put_queue(vc, data);
@@ -1162,10 +1155,12 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
{
struct vc_data *vc = vc_cons[fg_console].d;
unsigned short keysym, *key_map;
- unsigned char type, raw_mode;
+ unsigned char type;
+ bool raw_mode;
struct tty_struct *tty;
int shift_final;
struct keyboard_notifier_param param = { .vc = vc, .value = keycode, .down = down };
+ int rc;
tty = vc->vc_tty;
@@ -1176,8 +1171,6 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
kbd = kbd_table + vc->vc_num;
- if (keycode == KEY_LEFTALT || keycode == KEY_RIGHTALT)
- sysrq_alt = down ? keycode : 0;
#ifdef CONFIG_SPARC
if (keycode == KEY_STOP)
sparc_l1_a_state = down;
@@ -1185,29 +1178,16 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
rep = (down == 2);
- if ((raw_mode = (kbd->kbdmode == VC_RAW)) && !hw_raw)
+ raw_mode = (kbd->kbdmode == VC_RAW);
+ if (raw_mode && !hw_raw)
if (emulate_raw(vc, keycode, !down << 7))
if (keycode < BTN_MISC && printk_ratelimit())
- printk(KERN_WARNING "keyboard.c: can't emulate rawmode for keycode %d\n", keycode);
+ pr_warning("can't emulate rawmode for keycode %d\n",
+ keycode);
-#ifdef CONFIG_MAGIC_SYSRQ /* Handle the SysRq Hack */
- if (keycode == KEY_SYSRQ && (sysrq_down || (down == 1 && sysrq_alt))) {
- if (!sysrq_down) {
- sysrq_down = down;
- sysrq_alt_use = sysrq_alt;
- }
- return;
- }
- if (sysrq_down && !down && keycode == sysrq_alt_use)
- sysrq_down = 0;
- if (sysrq_down && down && !rep) {
- handle_sysrq(kbd_sysrq_xlate[keycode], tty);
- return;
- }
-#endif
#ifdef CONFIG_SPARC
if (keycode == KEY_A && sparc_l1_a_state) {
- sparc_l1_a_state = 0;
+ sparc_l1_a_state = false;
sun_do_break();
}
#endif
@@ -1229,7 +1209,7 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
put_queue(vc, (keycode >> 7) | 0x80);
put_queue(vc, keycode | 0x80);
}
- raw_mode = 1;
+ raw_mode = true;
}
if (down)
@@ -1252,29 +1232,32 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
param.ledstate = kbd->ledflagstate;
key_map = key_maps[shift_final];
- if (atomic_notifier_call_chain(&keyboard_notifier_list, KBD_KEYCODE, &param) == NOTIFY_STOP || !key_map) {
- atomic_notifier_call_chain(&keyboard_notifier_list, KBD_UNBOUND_KEYCODE, &param);
+ rc = atomic_notifier_call_chain(&keyboard_notifier_list,
+ KBD_KEYCODE, &param);
+ if (rc == NOTIFY_STOP || !key_map) {
+ atomic_notifier_call_chain(&keyboard_notifier_list,
+ KBD_UNBOUND_KEYCODE, &param);
compute_shiftstate();
kbd->slockstate = 0;
return;
}
- if (keycode >= NR_KEYS)
- if (keycode >= KEY_BRL_DOT1 && keycode <= KEY_BRL_DOT8)
- keysym = U(K(KT_BRL, keycode - KEY_BRL_DOT1 + 1));
- else
- return;
- else
+ if (keycode < NR_KEYS)
keysym = key_map[keycode];
+ else if (keycode >= KEY_BRL_DOT1 && keycode <= KEY_BRL_DOT8)
+ keysym = U(K(KT_BRL, keycode - KEY_BRL_DOT1 + 1));
+ else
+ return;
type = KTYP(keysym);
if (type < 0xf0) {
param.value = keysym;
- if (atomic_notifier_call_chain(&keyboard_notifier_list, KBD_UNICODE, &param) == NOTIFY_STOP)
- return;
- if (down && !raw_mode)
- to_utf8(vc, keysym);
+ rc = atomic_notifier_call_chain(&keyboard_notifier_list,
+ KBD_UNICODE, &param);
+ if (rc != NOTIFY_STOP)
+ if (down && !raw_mode)
+ to_utf8(vc, keysym);
return;
}
@@ -1288,9 +1271,11 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
keysym = key_map[keycode];
}
}
- param.value = keysym;
- if (atomic_notifier_call_chain(&keyboard_notifier_list, KBD_KEYSYM, &param) == NOTIFY_STOP)
+ param.value = keysym;
+ rc = atomic_notifier_call_chain(&keyboard_notifier_list,
+ KBD_KEYSYM, &param);
+ if (rc == NOTIFY_STOP)
return;
if (raw_mode && type != KT_SPEC && type != KT_SHIFT)
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 92ab03d2829..cd650ca8c67 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -144,6 +144,7 @@ static int misc_open(struct inode * inode, struct file * file)
old_fops = file->f_op;
file->f_op = new_fops;
if (file->f_op->open) {
+ file->private_data = c;
err=file->f_op->open(inode,file);
if (err) {
fops_put(file->f_op);
diff --git a/drivers/char/n_gsm.c b/drivers/char/n_gsm.c
new file mode 100644
index 00000000000..c4161d5e053
--- /dev/null
+++ b/drivers/char/n_gsm.c
@@ -0,0 +1,2763 @@
+/*
+ * n_gsm.c GSM 0710 tty multiplexor
+ * Copyright (c) 2009/10 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * * THIS IS A DEVELOPMENT SNAPSHOT IT IS NOT A FINAL RELEASE *
+ *
+ * TO DO:
+ * Mostly done: ioctls for setting modes/timing
+ * Partly done: hooks so you can pull off frames to non tty devs
+ * Restart DLCI 0 when it closes ?
+ * Test basic encoding
+ * Improve the tx engine
+ * Resolve tx side locking by adding a queue_head and routing
+ * all control traffic via it
+ * General tidy/document
+ * Review the locking/move to refcounts more (mux now moved to an
+ * alloc/free model ready)
+ * Use newest tty open/close port helpers and install hooks
+ * What to do about power functions ?
+ * Termios setting and negotiation
+ * Do we need a 'which mux are you' ioctl to correlate mux and tty sets
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/fcntl.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/timer.h>
+#include <linux/ctype.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/bitops.h>
+#include <linux/file.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/tty_flip.h>
+#include <linux/tty_driver.h>
+#include <linux/serial.h>
+#include <linux/kfifo.h>
+#include <linux/skbuff.h>
+#include <linux/gsmmux.h>
+
+static int debug;
+module_param(debug, int, 0600);
+
+#define T1 (HZ/10)
+#define T2 (HZ/3)
+#define N2 3
+
+/* Use long timers for testing at low speed with debug on */
+#ifdef DEBUG_TIMING
+#define T1 HZ
+#define T2 (2 * HZ)
+#endif
+
+/* Semi-arbitary buffer size limits. 0710 is normally run with 32-64 byte
+ limits so this is plenty */
+#define MAX_MRU 512
+#define MAX_MTU 512
+
+/*
+ * Each block of data we have queued to go out is in the form of
+ * a gsm_msg which holds everything we need in a link layer independant
+ * format
+ */
+
+struct gsm_msg {
+ struct gsm_msg *next;
+ u8 addr; /* DLCI address + flags */
+ u8 ctrl; /* Control byte + flags */
+ unsigned int len; /* Length of data block (can be zero) */
+ unsigned char *data; /* Points into buffer but not at the start */
+ unsigned char buffer[0];
+};
+
+/*
+ * Each active data link has a gsm_dlci structure associated which ties
+ * the link layer to an optional tty (if the tty side is open). To avoid
+ * complexity right now these are only ever freed up when the mux is
+ * shut down.
+ *
+ * At the moment we don't free DLCI objects until the mux is torn down
+ * this avoid object life time issues but might be worth review later.
+ */
+
+struct gsm_dlci {
+ struct gsm_mux *gsm;
+ int addr;
+ int state;
+#define DLCI_CLOSED 0
+#define DLCI_OPENING 1 /* Sending SABM not seen UA */
+#define DLCI_OPEN 2 /* SABM/UA complete */
+#define DLCI_CLOSING 3 /* Sending DISC not seen UA/DM */
+
+ /* Link layer */
+ spinlock_t lock; /* Protects the internal state */
+ struct timer_list t1; /* Retransmit timer for SABM and UA */
+ int retries;
+ /* Uplink tty if active */
+ struct tty_port port; /* The tty bound to this DLCI if there is one */
+ struct kfifo *fifo; /* Queue fifo for the DLCI */
+ struct kfifo _fifo; /* For new fifo API porting only */
+ int adaption; /* Adaption layer in use */
+ u32 modem_rx; /* Our incoming virtual modem lines */
+ u32 modem_tx; /* Our outgoing modem lines */
+ int dead; /* Refuse re-open */
+ /* Flow control */
+ int throttled; /* Private copy of throttle state */
+ int constipated; /* Throttle status for outgoing */
+ /* Packetised I/O */
+ struct sk_buff *skb; /* Frame being sent */
+ struct sk_buff_head skb_list; /* Queued frames */
+ /* Data handling callback */
+ void (*data)(struct gsm_dlci *dlci, u8 *data, int len);
+};
+
+/* DLCI 0, 62/63 are special or reseved see gsmtty_open */
+
+#define NUM_DLCI 64
+
+/*
+ * DLCI 0 is used to pass control blocks out of band of the data
+ * flow (and with a higher link priority). One command can be outstanding
+ * at a time and we use this structure to manage them. They are created
+ * and destroyed by the user context, and updated by the receive paths
+ * and timers
+ */
+
+struct gsm_control {
+ u8 cmd; /* Command we are issuing */
+ u8 *data; /* Data for the command in case we retransmit */
+ int len; /* Length of block for retransmission */
+ int done; /* Done flag */
+ int error; /* Error if any */
+};
+
+/*
+ * Each GSM mux we have is represented by this structure. If we are
+ * operating as an ldisc then we use this structure as our ldisc
+ * state. We need to sort out lifetimes and locking with respect
+ * to the gsm mux array. For now we don't free DLCI objects that
+ * have been instantiated until the mux itself is terminated.
+ *
+ * To consider further: tty open versus mux shutdown.
+ */
+
+struct gsm_mux {
+ struct tty_struct *tty; /* The tty our ldisc is bound to */
+ spinlock_t lock;
+
+ /* Events on the GSM channel */
+ wait_queue_head_t event;
+
+ /* Bits for GSM mode decoding */
+
+ /* Framing Layer */
+ unsigned char *buf;
+ int state;
+#define GSM_SEARCH 0
+#define GSM_START 1
+#define GSM_ADDRESS 2
+#define GSM_CONTROL 3
+#define GSM_LEN 4
+#define GSM_DATA 5
+#define GSM_FCS 6
+#define GSM_OVERRUN 7
+ unsigned int len;
+ unsigned int address;
+ unsigned int count;
+ int escape;
+ int encoding;
+ u8 control;
+ u8 fcs;
+ u8 *txframe; /* TX framing buffer */
+
+ /* Methods for the receiver side */
+ void (*receive)(struct gsm_mux *gsm, u8 ch);
+ void (*error)(struct gsm_mux *gsm, u8 ch, u8 flag);
+ /* And transmit side */
+ int (*output)(struct gsm_mux *mux, u8 *data, int len);
+
+ /* Link Layer */
+ unsigned int mru;
+ unsigned int mtu;
+ int initiator; /* Did we initiate connection */
+ int dead; /* Has the mux been shut down */
+ struct gsm_dlci *dlci[NUM_DLCI];
+ int constipated; /* Asked by remote to shut up */
+
+ spinlock_t tx_lock;
+ unsigned int tx_bytes; /* TX data outstanding */
+#define TX_THRESH_HI 8192
+#define TX_THRESH_LO 2048
+ struct gsm_msg *tx_head; /* Pending data packets */
+ struct gsm_msg *tx_tail;
+
+ /* Control messages */
+ struct timer_list t2_timer; /* Retransmit timer for commands */
+ int cretries; /* Command retry counter */
+ struct gsm_control *pending_cmd;/* Our current pending command */
+ spinlock_t control_lock; /* Protects the pending command */
+
+ /* Configuration */
+ int adaption; /* 1 or 2 supported */
+ u8 ftype; /* UI or UIH */
+ int t1, t2; /* Timers in 1/100th of a sec */
+ int n2; /* Retry count */
+
+ /* Statistics (not currently exposed) */
+ unsigned long bad_fcs;
+ unsigned long malformed;
+ unsigned long io_error;
+ unsigned long bad_size;
+ unsigned long unsupported;
+};
+
+
+/*
+ * Mux objects - needed so that we can translate a tty index into the
+ * relevant mux and DLCI.
+ */
+
+#define MAX_MUX 4 /* 256 minors */
+static struct gsm_mux *gsm_mux[MAX_MUX]; /* GSM muxes */
+static spinlock_t gsm_mux_lock;
+
+/*
+ * This section of the driver logic implements the GSM encodings
+ * both the basic and the 'advanced'. Reliable transport is not
+ * supported.
+ */
+
+#define CR 0x02
+#define EA 0x01
+#define PF 0x10
+
+/* I is special: the rest are ..*/
+#define RR 0x01
+#define UI 0x03
+#define RNR 0x05
+#define REJ 0x09
+#define DM 0x0F
+#define SABM 0x2F
+#define DISC 0x43
+#define UA 0x63
+#define UIH 0xEF
+
+/* Channel commands */
+#define CMD_NSC 0x09
+#define CMD_TEST 0x11
+#define CMD_PSC 0x21
+#define CMD_RLS 0x29
+#define CMD_FCOFF 0x31
+#define CMD_PN 0x41
+#define CMD_RPN 0x49
+#define CMD_FCON 0x51
+#define CMD_CLD 0x61
+#define CMD_SNC 0x69
+#define CMD_MSC 0x71
+
+/* Virtual modem bits */
+#define MDM_FC 0x01
+#define MDM_RTC 0x02
+#define MDM_RTR 0x04
+#define MDM_IC 0x20
+#define MDM_DV 0x40
+
+#define GSM0_SOF 0xF9
+#define GSM1_SOF 0x7E
+#define GSM1_ESCAPE 0x7D
+#define GSM1_ESCAPE_BITS 0x20
+#define XON 0x11
+#define XOFF 0x13
+
+static const struct tty_port_operations gsm_port_ops;
+
+/*
+ * CRC table for GSM 0710
+ */
+
+static const u8 gsm_fcs8[256] = {
+ 0x00, 0x91, 0xE3, 0x72, 0x07, 0x96, 0xE4, 0x75,
+ 0x0E, 0x9F, 0xED, 0x7C, 0x09, 0x98, 0xEA, 0x7B,
+ 0x1C, 0x8D, 0xFF, 0x6E, 0x1B, 0x8A, 0xF8, 0x69,
+ 0x12, 0x83, 0xF1, 0x60, 0x15, 0x84, 0xF6, 0x67,
+ 0x38, 0xA9, 0xDB, 0x4A, 0x3F, 0xAE, 0xDC, 0x4D,
+ 0x36, 0xA7, 0xD5, 0x44, 0x31, 0xA0, 0xD2, 0x43,
+ 0x24, 0xB5, 0xC7, 0x56, 0x23, 0xB2, 0xC0, 0x51,
+ 0x2A, 0xBB, 0xC9, 0x58, 0x2D, 0xBC, 0xCE, 0x5F,
+ 0x70, 0xE1, 0x93, 0x02, 0x77, 0xE6, 0x94, 0x05,
+ 0x7E, 0xEF, 0x9D, 0x0C, 0x79, 0xE8, 0x9A, 0x0B,
+ 0x6C, 0xFD, 0x8F, 0x1E, 0x6B, 0xFA, 0x88, 0x19,
+ 0x62, 0xF3, 0x81, 0x10, 0x65, 0xF4, 0x86, 0x17,
+ 0x48, 0xD9, 0xAB, 0x3A, 0x4F, 0xDE, 0xAC, 0x3D,
+ 0x46, 0xD7, 0xA5, 0x34, 0x41, 0xD0, 0xA2, 0x33,
+ 0x54, 0xC5, 0xB7, 0x26, 0x53, 0xC2, 0xB0, 0x21,
+ 0x5A, 0xCB, 0xB9, 0x28, 0x5D, 0xCC, 0xBE, 0x2F,
+ 0xE0, 0x71, 0x03, 0x92, 0xE7, 0x76, 0x04, 0x95,
+ 0xEE, 0x7F, 0x0D, 0x9C, 0xE9, 0x78, 0x0A, 0x9B,
+ 0xFC, 0x6D, 0x1F, 0x8E, 0xFB, 0x6A, 0x18, 0x89,
+ 0xF2, 0x63, 0x11, 0x80, 0xF5, 0x64, 0x16, 0x87,
+ 0xD8, 0x49, 0x3B, 0xAA, 0xDF, 0x4E, 0x3C, 0xAD,
+ 0xD6, 0x47, 0x35, 0xA4, 0xD1, 0x40, 0x32, 0xA3,
+ 0xC4, 0x55, 0x27, 0xB6, 0xC3, 0x52, 0x20, 0xB1,
+ 0xCA, 0x5B, 0x29, 0xB8, 0xCD, 0x5C, 0x2E, 0xBF,
+ 0x90, 0x01, 0x73, 0xE2, 0x97, 0x06, 0x74, 0xE5,
+ 0x9E, 0x0F, 0x7D, 0xEC, 0x99, 0x08, 0x7A, 0xEB,
+ 0x8C, 0x1D, 0x6F, 0xFE, 0x8B, 0x1A, 0x68, 0xF9,
+ 0x82, 0x13, 0x61, 0xF0, 0x85, 0x14, 0x66, 0xF7,
+ 0xA8, 0x39, 0x4B, 0xDA, 0xAF, 0x3E, 0x4C, 0xDD,
+ 0xA6, 0x37, 0x45, 0xD4, 0xA1, 0x30, 0x42, 0xD3,
+ 0xB4, 0x25, 0x57, 0xC6, 0xB3, 0x22, 0x50, 0xC1,
+ 0xBA, 0x2B, 0x59, 0xC8, 0xBD, 0x2C, 0x5E, 0xCF
+};
+
+#define INIT_FCS 0xFF
+#define GOOD_FCS 0xCF
+
+/**
+ * gsm_fcs_add - update FCS
+ * @fcs: Current FCS
+ * @c: Next data
+ *
+ * Update the FCS to include c. Uses the algorithm in the specification
+ * notes.
+ */
+
+static inline u8 gsm_fcs_add(u8 fcs, u8 c)
+{
+ return gsm_fcs8[fcs ^ c];
+}
+
+/**
+ * gsm_fcs_add_block - update FCS for a block
+ * @fcs: Current FCS
+ * @c: buffer of data
+ * @len: length of buffer
+ *
+ * Update the FCS to include c. Uses the algorithm in the specification
+ * notes.
+ */
+
+static inline u8 gsm_fcs_add_block(u8 fcs, u8 *c, int len)
+{
+ while (len--)
+ fcs = gsm_fcs8[fcs ^ *c++];
+ return fcs;
+}
+
+/**
+ * gsm_read_ea - read a byte into an EA
+ * @val: variable holding value
+ * c: byte going into the EA
+ *
+ * Processes one byte of an EA. Updates the passed variable
+ * and returns 1 if the EA is now completely read
+ */
+
+static int gsm_read_ea(unsigned int *val, u8 c)
+{
+ /* Add the next 7 bits into the value */
+ *val <<= 7;
+ *val |= c >> 1;
+ /* Was this the last byte of the EA 1 = yes*/
+ return c & EA;
+}
+
+/**
+ * gsm_encode_modem - encode modem data bits
+ * @dlci: DLCI to encode from
+ *
+ * Returns the correct GSM encoded modem status bits (6 bit field) for
+ * the current status of the DLCI and attached tty object
+ */
+
+static u8 gsm_encode_modem(const struct gsm_dlci *dlci)
+{
+ u8 modembits = 0;
+ /* FC is true flow control not modem bits */
+ if (dlci->throttled)
+ modembits |= MDM_FC;
+ if (dlci->modem_tx & TIOCM_DTR)
+ modembits |= MDM_RTC;
+ if (dlci->modem_tx & TIOCM_RTS)
+ modembits |= MDM_RTR;
+ if (dlci->modem_tx & TIOCM_RI)
+ modembits |= MDM_IC;
+ if (dlci->modem_tx & TIOCM_CD)
+ modembits |= MDM_DV;
+ return modembits;
+}
+
+/**
+ * gsm_print_packet - display a frame for debug
+ * @hdr: header to print before decode
+ * @addr: address EA from the frame
+ * @cr: C/R bit from the frame
+ * @control: control including PF bit
+ * @data: following data bytes
+ * @dlen: length of data
+ *
+ * Displays a packet in human readable format for debugging purposes. The
+ * style is based on amateur radio LAP-B dump display.
+ */
+
+static void gsm_print_packet(const char *hdr, int addr, int cr,
+ u8 control, const u8 *data, int dlen)
+{
+ if (!(debug & 1))
+ return;
+
+ printk(KERN_INFO "%s %d) %c: ", hdr, addr, "RC"[cr]);
+
+ switch (control & ~PF) {
+ case SABM:
+ printk(KERN_CONT "SABM");
+ break;
+ case UA:
+ printk(KERN_CONT "UA");
+ break;
+ case DISC:
+ printk(KERN_CONT "DISC");
+ break;
+ case DM:
+ printk(KERN_CONT "DM");
+ break;
+ case UI:
+ printk(KERN_CONT "UI");
+ break;
+ case UIH:
+ printk(KERN_CONT "UIH");
+ break;
+ default:
+ if (!(control & 0x01)) {
+ printk(KERN_CONT "I N(S)%d N(R)%d",
+ (control & 0x0E) >> 1, (control & 0xE)>> 5);
+ } else switch (control & 0x0F) {
+ case RR:
+ printk("RR(%d)", (control & 0xE0) >> 5);
+ break;
+ case RNR:
+ printk("RNR(%d)", (control & 0xE0) >> 5);
+ break;
+ case REJ:
+ printk("REJ(%d)", (control & 0xE0) >> 5);
+ break;
+ default:
+ printk(KERN_CONT "[%02X]", control);
+ }
+ }
+
+ if (control & PF)
+ printk(KERN_CONT "(P)");
+ else
+ printk(KERN_CONT "(F)");
+
+ if (dlen) {
+ int ct = 0;
+ while (dlen--) {
+ if (ct % 8 == 0)
+ printk(KERN_CONT "\n ");
+ printk(KERN_CONT "%02X ", *data++);
+ ct++;
+ }
+ }
+ printk(KERN_CONT "\n");
+}
+
+
+/*
+ * Link level transmission side
+ */
+
+/**
+ * gsm_stuff_packet - bytestuff a packet
+ * @ibuf: input
+ * @obuf: output
+ * @len: length of input
+ *
+ * Expand a buffer by bytestuffing it. The worst case size change
+ * is doubling and the caller is responsible for handing out
+ * suitable sized buffers.
+ */
+
+static int gsm_stuff_frame(const u8 *input, u8 *output, int len)
+{
+ int olen = 0;
+ while (len--) {
+ if (*input == GSM1_SOF || *input == GSM1_ESCAPE
+ || *input == XON || *input == XOFF) {
+ *output++ = GSM1_ESCAPE;
+ *output++ = *input++ ^ GSM1_ESCAPE_BITS;
+ olen++;
+ } else
+ *output++ = *input++;
+ olen++;
+ }
+ return olen;
+}
+
+static void hex_packet(const unsigned char *p, int len)
+{
+ int i;
+ for (i = 0; i < len; i++) {
+ if (i && (i % 16) == 0)
+ printk("\n");
+ printk("%02X ", *p++);
+ }
+ printk("\n");
+}
+
+/**
+ * gsm_send - send a control frame
+ * @gsm: our GSM mux
+ * @addr: address for control frame
+ * @cr: command/response bit
+ * @control: control byte including PF bit
+ *
+ * Format up and transmit a control frame. These do not go via the
+ * queueing logic as they should be transmitted ahead of data when
+ * they are needed.
+ *
+ * FIXME: Lock versus data TX path
+ */
+
+static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
+{
+ int len;
+ u8 cbuf[10];
+ u8 ibuf[3];
+
+ switch (gsm->encoding) {
+ case 0:
+ cbuf[0] = GSM0_SOF;
+ cbuf[1] = (addr << 2) | (cr << 1) | EA;
+ cbuf[2] = control;
+ cbuf[3] = EA; /* Length of data = 0 */
+ cbuf[4] = 0xFF - gsm_fcs_add_block(INIT_FCS, cbuf + 1, 3);
+ cbuf[5] = GSM0_SOF;
+ len = 6;
+ break;
+ case 1:
+ case 2:
+ /* Control frame + packing (but not frame stuffing) in mode 1 */
+ ibuf[0] = (addr << 2) | (cr << 1) | EA;
+ ibuf[1] = control;
+ ibuf[2] = 0xFF - gsm_fcs_add_block(INIT_FCS, ibuf, 2);
+ /* Stuffing may double the size worst case */
+ len = gsm_stuff_frame(ibuf, cbuf + 1, 3);
+ /* Now add the SOF markers */
+ cbuf[0] = GSM1_SOF;
+ cbuf[len + 1] = GSM1_SOF;
+ /* FIXME: we can omit the lead one in many cases */
+ len += 2;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+ gsm->output(gsm, cbuf, len);
+ gsm_print_packet("-->", addr, cr, control, NULL, 0);
+}
+
+/**
+ * gsm_response - send a control response
+ * @gsm: our GSM mux
+ * @addr: address for control frame
+ * @control: control byte including PF bit
+ *
+ * Format up and transmit a link level response frame.
+ */
+
+static inline void gsm_response(struct gsm_mux *gsm, int addr, int control)
+{
+ gsm_send(gsm, addr, 0, control);
+}
+
+/**
+ * gsm_command - send a control command
+ * @gsm: our GSM mux
+ * @addr: address for control frame
+ * @control: control byte including PF bit
+ *
+ * Format up and transmit a link level command frame.
+ */
+
+static inline void gsm_command(struct gsm_mux *gsm, int addr, int control)
+{
+ gsm_send(gsm, addr, 1, control);
+}
+
+/* Data transmission */
+
+#define HDR_LEN 6 /* ADDR CTRL [LEN.2] DATA FCS */
+
+/**
+ * gsm_data_alloc - allocate data frame
+ * @gsm: GSM mux
+ * @addr: DLCI address
+ * @len: length excluding header and FCS
+ * @ctrl: control byte
+ *
+ * Allocate a new data buffer for sending frames with data. Space is left
+ * at the front for header bytes but that is treated as an implementation
+ * detail and not for the high level code to use
+ */
+
+static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len,
+ u8 ctrl)
+{
+ struct gsm_msg *m = kmalloc(sizeof(struct gsm_msg) + len + HDR_LEN,
+ GFP_ATOMIC);
+ if (m == NULL)
+ return NULL;
+ m->data = m->buffer + HDR_LEN - 1; /* Allow for FCS */
+ m->len = len;
+ m->addr = addr;
+ m->ctrl = ctrl;
+ m->next = NULL;
+ return m;
+}
+
+/**
+ * gsm_data_kick - poke the queue
+ * @gsm: GSM Mux
+ *
+ * The tty device has called us to indicate that room has appeared in
+ * the transmit queue. Ram more data into the pipe if we have any
+ *
+ * FIXME: lock against link layer control transmissions
+ */
+
+static void gsm_data_kick(struct gsm_mux *gsm)
+{
+ struct gsm_msg *msg = gsm->tx_head;
+ int len;
+ int skip_sof = 0;
+
+ /* FIXME: We need to apply this solely to data messages */
+ if (gsm->constipated)
+ return;
+
+ while (gsm->tx_head != NULL) {
+ msg = gsm->tx_head;
+ if (gsm->encoding != 0) {
+ gsm->txframe[0] = GSM1_SOF;
+ len = gsm_stuff_frame(msg->data,
+ gsm->txframe + 1, msg->len);
+ gsm->txframe[len + 1] = GSM1_SOF;
+ len += 2;
+ } else {
+ gsm->txframe[0] = GSM0_SOF;
+ memcpy(gsm->txframe + 1 , msg->data, msg->len);
+ gsm->txframe[msg->len + 1] = GSM0_SOF;
+ len = msg->len + 2;
+ }
+
+ if (debug & 4) {
+ printk("gsm_data_kick: \n");
+ hex_packet(gsm->txframe, len);
+ }
+
+ if (gsm->output(gsm, gsm->txframe + skip_sof,
+ len - skip_sof) < 0)
+ break;
+ /* FIXME: Can eliminate one SOF in many more cases */
+ gsm->tx_head = msg->next;
+ if (gsm->tx_head == NULL)
+ gsm->tx_tail = NULL;
+ gsm->tx_bytes -= msg->len;
+ kfree(msg);
+ /* For a burst of frames skip the extra SOF within the
+ burst */
+ skip_sof = 1;
+ }
+}
+
+/**
+ * __gsm_data_queue - queue a UI or UIH frame
+ * @dlci: DLCI sending the data
+ * @msg: message queued
+ *
+ * Add data to the transmit queue and try and get stuff moving
+ * out of the mux tty if not already doing so. The Caller must hold
+ * the gsm tx lock.
+ */
+
+static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
+{
+ struct gsm_mux *gsm = dlci->gsm;
+ u8 *dp = msg->data;
+ u8 *fcs = dp + msg->len;
+
+ /* Fill in the header */
+ if (gsm->encoding == 0) {
+ if (msg->len < 128)
+ *--dp = (msg->len << 1) | EA;
+ else {
+ *--dp = (msg->len >> 6) | EA;
+ *--dp = (msg->len & 127) << 1;
+ }
+ }
+
+ *--dp = msg->ctrl;
+ if (gsm->initiator)
+ *--dp = (msg->addr << 2) | 2 | EA;
+ else
+ *--dp = (msg->addr << 2) | EA;
+ *fcs = gsm_fcs_add_block(INIT_FCS, dp , msg->data - dp);
+ /* Ugly protocol layering violation */
+ if (msg->ctrl == UI || msg->ctrl == (UI|PF))
+ *fcs = gsm_fcs_add_block(*fcs, msg->data, msg->len);
+ *fcs = 0xFF - *fcs;
+
+ gsm_print_packet("Q> ", msg->addr, gsm->initiator, msg->ctrl,
+ msg->data, msg->len);
+
+ /* Move the header back and adjust the length, also allow for the FCS
+ now tacked on the end */
+ msg->len += (msg->data - dp) + 1;
+ msg->data = dp;
+
+ /* Add to the actual output queue */
+ if (gsm->tx_tail)
+ gsm->tx_tail->next = msg;
+ else
+ gsm->tx_head = msg;
+ gsm->tx_tail = msg;
+ gsm->tx_bytes += msg->len;
+ gsm_data_kick(gsm);
+}
+
+/**
+ * gsm_data_queue - queue a UI or UIH frame
+ * @dlci: DLCI sending the data
+ * @msg: message queued
+ *
+ * Add data to the transmit queue and try and get stuff moving
+ * out of the mux tty if not already doing so. Take the
+ * the gsm tx lock and dlci lock.
+ */
+
+static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
+ __gsm_data_queue(dlci, msg);
+ spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
+}
+
+/**
+ * gsm_dlci_data_output - try and push data out of a DLCI
+ * @gsm: mux
+ * @dlci: the DLCI to pull data from
+ *
+ * Pull data from a DLCI and send it into the transmit queue if there
+ * is data. Keep to the MRU of the mux. This path handles the usual tty
+ * interface which is a byte stream with optional modem data.
+ *
+ * Caller must hold the tx_lock of the mux.
+ */
+
+static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
+{
+ struct gsm_msg *msg;
+ u8 *dp;
+ int len, size;
+ int h = dlci->adaption - 1;
+
+ len = kfifo_len(dlci->fifo);
+ if (len == 0)
+ return 0;
+
+ /* MTU/MRU count only the data bits */
+ if (len > gsm->mtu)
+ len = gsm->mtu;
+
+ size = len + h;
+
+ msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
+ /* FIXME: need a timer or something to kick this so it can't
+ get stuck with no work outstanding and no buffer free */
+ if (msg == NULL)
+ return -ENOMEM;
+ dp = msg->data;
+ switch (dlci->adaption) {
+ case 1: /* Unstructured */
+ break;
+ case 2: /* Unstructed with modem bits. Always one byte as we never
+ send inline break data */
+ *dp += gsm_encode_modem(dlci);
+ len--;
+ break;
+ }
+ WARN_ON(kfifo_out_locked(dlci->fifo, dp , len, &dlci->lock) != len);
+ __gsm_data_queue(dlci, msg);
+ /* Bytes of data we used up */
+ return size;
+}
+
+/**
+ * gsm_dlci_data_output_framed - try and push data out of a DLCI
+ * @gsm: mux
+ * @dlci: the DLCI to pull data from
+ *
+ * Pull data from a DLCI and send it into the transmit queue if there
+ * is data. Keep to the MRU of the mux. This path handles framed data
+ * queued as skbuffs to the DLCI.
+ *
+ * Caller must hold the tx_lock of the mux.
+ */
+
+static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
+ struct gsm_dlci *dlci)
+{
+ struct gsm_msg *msg;
+ u8 *dp;
+ int len, size;
+ int last = 0, first = 0;
+ int overhead = 0;
+
+ /* One byte per frame is used for B/F flags */
+ if (dlci->adaption == 4)
+ overhead = 1;
+
+ /* dlci->skb is locked by tx_lock */
+ if (dlci->skb == NULL) {
+ dlci->skb = skb_dequeue(&dlci->skb_list);
+ if (dlci->skb == NULL)
+ return 0;
+ first = 1;
+ }
+ len = dlci->skb->len + overhead;
+
+ /* MTU/MRU count only the data bits */
+ if (len > gsm->mtu) {
+ if (dlci->adaption == 3) {
+ /* Over long frame, bin it */
+ kfree_skb(dlci->skb);
+ dlci->skb = NULL;
+ return 0;
+ }
+ len = gsm->mtu;
+ } else
+ last = 1;
+
+ size = len + overhead;
+ msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
+
+ /* FIXME: need a timer or something to kick this so it can't
+ get stuck with no work outstanding and no buffer free */
+ if (msg == NULL)
+ return -ENOMEM;
+ dp = msg->data;
+
+ if (dlci->adaption == 4) { /* Interruptible framed (Packetised Data) */
+ /* Flag byte to carry the start/end info */
+ *dp++ = last << 7 | first << 6 | 1; /* EA */
+ len--;
+ }
+ memcpy(dp, skb_pull(dlci->skb, len), len);
+ __gsm_data_queue(dlci, msg);
+ if (last)
+ dlci->skb = NULL;
+ return size;
+}
+
+/**
+ * gsm_dlci_data_sweep - look for data to send
+ * @gsm: the GSM mux
+ *
+ * Sweep the GSM mux channels in priority order looking for ones with
+ * data to send. We could do with optimising this scan a bit. We aim
+ * to fill the queue totally or up to TX_THRESH_HI bytes. Once we hit
+ * TX_THRESH_LO we get called again
+ *
+ * FIXME: We should round robin between groups and in theory you can
+ * renegotiate DLCI priorities with optional stuff. Needs optimising.
+ */
+
+static void gsm_dlci_data_sweep(struct gsm_mux *gsm)
+{
+ int len;
+ /* Priority ordering: We should do priority with RR of the groups */
+ int i = 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gsm->tx_lock, flags);
+ while (i < NUM_DLCI) {
+ struct gsm_dlci *dlci;
+
+ if (gsm->tx_bytes > TX_THRESH_HI)
+ break;
+ dlci = gsm->dlci[i];
+ if (dlci == NULL || dlci->constipated) {
+ i++;
+ continue;
+ }
+ if (dlci->adaption < 3)
+ len = gsm_dlci_data_output(gsm, dlci);
+ else
+ len = gsm_dlci_data_output_framed(gsm, dlci);
+ if (len < 0)
+ return;
+ /* DLCI empty - try the next */
+ if (len == 0)
+ i++;
+ }
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
+}
+
+/**
+ * gsm_dlci_data_kick - transmit if possible
+ * @dlci: DLCI to kick
+ *
+ * Transmit data from this DLCI if the queue is empty. We can't rely on
+ * a tty wakeup except when we filled the pipe so we need to fire off
+ * new data ourselves in other cases.
+ */
+
+static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
+ /* If we have nothing running then we need to fire up */
+ if (dlci->gsm->tx_bytes == 0)
+ gsm_dlci_data_output(dlci->gsm, dlci);
+ else if (dlci->gsm->tx_bytes < TX_THRESH_LO)
+ gsm_dlci_data_sweep(dlci->gsm);
+ spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
+}
+
+/*
+ * Control message processing
+ */
+
+
+/**
+ * gsm_control_reply - send a response frame to a control
+ * @gsm: gsm channel
+ * @cmd: the command to use
+ * @data: data to follow encoded info
+ * @dlen: length of data
+ *
+ * Encode up and queue a UI/UIH frame containing our response.
+ */
+
+static void gsm_control_reply(struct gsm_mux *gsm, int cmd, u8 *data,
+ int dlen)
+{
+ struct gsm_msg *msg;
+ msg = gsm_data_alloc(gsm, 0, dlen + 2, gsm->ftype);
+ msg->data[0] = (cmd & 0xFE) << 1 | EA; /* Clear C/R */
+ msg->data[1] = (dlen << 1) | EA;
+ memcpy(msg->data + 2, data, dlen);
+ gsm_data_queue(gsm->dlci[0], msg);
+}
+
+/**
+ * gsm_process_modem - process received modem status
+ * @tty: virtual tty bound to the DLCI
+ * @dlci: DLCI to affect
+ * @modem: modem bits (full EA)
+ *
+ * Used when a modem control message or line state inline in adaption
+ * layer 2 is processed. Sort out the local modem state and throttles
+ */
+
+static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
+ u32 modem)
+{
+ int mlines = 0;
+ u8 brk = modem >> 6;
+
+ /* Flow control/ready to communicate */
+ if (modem & MDM_FC) {
+ /* Need to throttle our output on this device */
+ dlci->constipated = 1;
+ }
+ if (modem & MDM_RTC) {
+ mlines |= TIOCM_DSR | TIOCM_DTR;
+ dlci->constipated = 0;
+ gsm_dlci_data_kick(dlci);
+ }
+ /* Map modem bits */
+ if (modem & MDM_RTR)
+ mlines |= TIOCM_RTS | TIOCM_CTS;
+ if (modem & MDM_IC)
+ mlines |= TIOCM_RI;
+ if (modem & MDM_DV)
+ mlines |= TIOCM_CD;
+
+ /* Carrier drop -> hangup */
+ if (tty) {
+ if ((mlines & TIOCM_CD) == 0 && (dlci->modem_rx & TIOCM_CD))
+ if (!(tty->termios->c_cflag & CLOCAL))
+ tty_hangup(tty);
+ if (brk & 0x01)
+ tty_insert_flip_char(tty, 0, TTY_BREAK);
+ }
+ dlci->modem_rx = mlines;
+}
+
+/**
+ * gsm_control_modem - modem status received
+ * @gsm: GSM channel
+ * @data: data following command
+ * @clen: command length
+ *
+ * We have received a modem status control message. This is used by
+ * the GSM mux protocol to pass virtual modem line status and optionally
+ * to indicate break signals. Unpack it, convert to Linux representation
+ * and if need be stuff a break message down the tty.
+ */
+
+static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
+{
+ unsigned int addr = 0;
+ unsigned int modem = 0;
+ struct gsm_dlci *dlci;
+ int len = clen;
+ u8 *dp = data;
+ struct tty_struct *tty;
+
+ while (gsm_read_ea(&addr, *dp++) == 0) {
+ len--;
+ if (len == 0)
+ return;
+ }
+ /* Must be at least one byte following the EA */
+ len--;
+ if (len <= 0)
+ return;
+
+ addr >>= 1;
+ /* Closed port, or invalid ? */
+ if (addr == 0 || addr >= NUM_DLCI || gsm->dlci[addr] == NULL)
+ return;
+ dlci = gsm->dlci[addr];
+
+ while (gsm_read_ea(&modem, *dp++) == 0) {
+ len--;
+ if (len == 0)
+ return;
+ }
+ tty = tty_port_tty_get(&dlci->port);
+ gsm_process_modem(tty, dlci, modem);
+ if (tty) {
+ tty_wakeup(tty);
+ tty_kref_put(tty);
+ }
+ gsm_control_reply(gsm, CMD_MSC, data, clen);
+}
+
+/**
+ * gsm_control_rls - remote line status
+ * @gsm: GSM channel
+ * @data: data bytes
+ * @clen: data length
+ *
+ * The modem sends us a two byte message on the control channel whenever
+ * it wishes to send us an error state from the virtual link. Stuff
+ * this into the uplink tty if present
+ */
+
+static void gsm_control_rls(struct gsm_mux *gsm, u8 *data, int clen)
+{
+ struct tty_struct *tty;
+ unsigned int addr = 0 ;
+ u8 bits;
+ int len = clen;
+ u8 *dp = data;
+
+ while (gsm_read_ea(&addr, *dp++) == 0) {
+ len--;
+ if (len == 0)
+ return;
+ }
+ /* Must be at least one byte following ea */
+ len--;
+ if (len <= 0)
+ return;
+ addr >>= 1;
+ /* Closed port, or invalid ? */
+ if (addr == 0 || addr >= NUM_DLCI || gsm->dlci[addr] == NULL)
+ return;
+ /* No error ? */
+ bits = *dp;
+ if ((bits & 1) == 0)
+ return;
+ /* See if we have an uplink tty */
+ tty = tty_port_tty_get(&gsm->dlci[addr]->port);
+
+ if (tty) {
+ if (bits & 2)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ if (bits & 4)
+ tty_insert_flip_char(tty, 0, TTY_PARITY);
+ if (bits & 8)
+ tty_insert_flip_char(tty, 0, TTY_FRAME);
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
+ }
+ gsm_control_reply(gsm, CMD_RLS, data, clen);
+}
+
+static void gsm_dlci_begin_close(struct gsm_dlci *dlci);
+
+/**
+ * gsm_control_message - DLCI 0 control processing
+ * @gsm: our GSM mux
+ * @command: the command EA
+ * @data: data beyond the command/length EAs
+ * @clen: length
+ *
+ * Input processor for control messages from the other end of the link.
+ * Processes the incoming request and queues a response frame or an
+ * NSC response if not supported
+ */
+
+static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
+ u8 *data, int clen)
+{
+ u8 buf[1];
+ switch (command) {
+ case CMD_CLD: {
+ struct gsm_dlci *dlci = gsm->dlci[0];
+ /* Modem wishes to close down */
+ if (dlci) {
+ dlci->dead = 1;
+ gsm->dead = 1;
+ gsm_dlci_begin_close(dlci);
+ }
+ }
+ break;
+ case CMD_TEST:
+ /* Modem wishes to test, reply with the data */
+ gsm_control_reply(gsm, CMD_TEST, data, clen);
+ break;
+ case CMD_FCON:
+ /* Modem wants us to STFU */
+ gsm->constipated = 1;
+ gsm_control_reply(gsm, CMD_FCON, NULL, 0);
+ break;
+ case CMD_FCOFF:
+ /* Modem can accept data again */
+ gsm->constipated = 0;
+ gsm_control_reply(gsm, CMD_FCOFF, NULL, 0);
+ /* Kick the link in case it is idling */
+ gsm_data_kick(gsm);
+ break;
+ case CMD_MSC:
+ /* Out of band modem line change indicator for a DLCI */
+ gsm_control_modem(gsm, data, clen);
+ break;
+ case CMD_RLS:
+ /* Out of band error reception for a DLCI */
+ gsm_control_rls(gsm, data, clen);
+ break;
+ case CMD_PSC:
+ /* Modem wishes to enter power saving state */
+ gsm_control_reply(gsm, CMD_PSC, NULL, 0);
+ break;
+ /* Optional unsupported commands */
+ case CMD_PN: /* Parameter negotiation */
+ case CMD_RPN: /* Remote port negotation */
+ case CMD_SNC: /* Service negotation command */
+ default:
+ /* Reply to bad commands with an NSC */
+ buf[0] = command;
+ gsm_control_reply(gsm, CMD_NSC, buf, 1);
+ break;
+ }
+}
+
+/**
+ * gsm_control_response - process a response to our control
+ * @gsm: our GSM mux
+ * @command: the command (response) EA
+ * @data: data beyond the command/length EA
+ * @clen: length
+ *
+ * Process a response to an outstanding command. We only allow a single
+ * control message in flight so this is fairly easy. All the clean up
+ * is done by the caller, we just update the fields, flag it as done
+ * and return
+ */
+
+static void gsm_control_response(struct gsm_mux *gsm, unsigned int command,
+ u8 *data, int clen)
+{
+ struct gsm_control *ctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gsm->control_lock, flags);
+
+ ctrl = gsm->pending_cmd;
+ /* Does the reply match our command */
+ command |= 1;
+ if (ctrl != NULL && (command == ctrl->cmd || command == CMD_NSC)) {
+ /* Our command was replied to, kill the retry timer */
+ del_timer(&gsm->t2_timer);
+ gsm->pending_cmd = NULL;
+ /* Rejected by the other end */
+ if (command == CMD_NSC)
+ ctrl->error = -EOPNOTSUPP;
+ ctrl->done = 1;
+ wake_up(&gsm->event);
+ }
+ spin_unlock_irqrestore(&gsm->control_lock, flags);
+}
+
+/**
+ * gsm_control_transmit - send control packet
+ * @gsm: gsm mux
+ * @ctrl: frame to send
+ *
+ * Send out a pending control command (called under control lock)
+ */
+
+static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl)
+{
+ struct gsm_msg *msg = gsm_data_alloc(gsm, 0, ctrl->len + 1,
+ gsm->ftype|PF);
+ if (msg == NULL)
+ return;
+ msg->data[0] = (ctrl->cmd << 1) | 2 | EA; /* command */
+ memcpy(msg->data + 1, ctrl->data, ctrl->len);
+ gsm_data_queue(gsm->dlci[0], msg);
+}
+
+/**
+ * gsm_control_retransmit - retransmit a control frame
+ * @data: pointer to our gsm object
+ *
+ * Called off the T2 timer expiry in order to retransmit control frames
+ * that have been lost in the system somewhere. The control_lock protects
+ * us from colliding with another sender or a receive completion event.
+ * In that situation the timer may still occur in a small window but
+ * gsm->pending_cmd will be NULL and we just let the timer expire.
+ */
+
+static void gsm_control_retransmit(unsigned long data)
+{
+ struct gsm_mux *gsm = (struct gsm_mux *)data;
+ struct gsm_control *ctrl;
+ unsigned long flags;
+ spin_lock_irqsave(&gsm->control_lock, flags);
+ ctrl = gsm->pending_cmd;
+ if (ctrl) {
+ gsm->cretries--;
+ if (gsm->cretries == 0) {
+ gsm->pending_cmd = NULL;
+ ctrl->error = -ETIMEDOUT;
+ ctrl->done = 1;
+ spin_unlock_irqrestore(&gsm->control_lock, flags);
+ wake_up(&gsm->event);
+ return;
+ }
+ gsm_control_transmit(gsm, ctrl);
+ mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
+ }
+ spin_unlock_irqrestore(&gsm->control_lock, flags);
+}
+
+/**
+ * gsm_control_send - send a control frame on DLCI 0
+ * @gsm: the GSM channel
+ * @command: command to send including CR bit
+ * @data: bytes of data (must be kmalloced)
+ * @len: length of the block to send
+ *
+ * Queue and dispatch a control command. Only one command can be
+ * active at a time. In theory more can be outstanding but the matching
+ * gets really complicated so for now stick to one outstanding.
+ */
+
+static struct gsm_control *gsm_control_send(struct gsm_mux *gsm,
+ unsigned int command, u8 *data, int clen)
+{
+ struct gsm_control *ctrl = kzalloc(sizeof(struct gsm_control),
+ GFP_KERNEL);
+ unsigned long flags;
+ if (ctrl == NULL)
+ return NULL;
+retry:
+ wait_event(gsm->event, gsm->pending_cmd == NULL);
+ spin_lock_irqsave(&gsm->control_lock, flags);
+ if (gsm->pending_cmd != NULL) {
+ spin_unlock_irqrestore(&gsm->control_lock, flags);
+ goto retry;
+ }
+ ctrl->cmd = command;
+ ctrl->data = data;
+ ctrl->len = clen;
+ gsm->pending_cmd = ctrl;
+ gsm->cretries = gsm->n2;
+ mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
+ gsm_control_transmit(gsm, ctrl);
+ spin_unlock_irqrestore(&gsm->control_lock, flags);
+ return ctrl;
+}
+
+/**
+ * gsm_control_wait - wait for a control to finish
+ * @gsm: GSM mux
+ * @control: control we are waiting on
+ *
+ * Waits for the control to complete or time out. Frees any used
+ * resources and returns 0 for success, or an error if the remote
+ * rejected or ignored the request.
+ */
+
+static int gsm_control_wait(struct gsm_mux *gsm, struct gsm_control *control)
+{
+ int err;
+ wait_event(gsm->event, control->done == 1);
+ err = control->error;
+ kfree(control);
+ return err;
+}
+
+
+/*
+ * DLCI level handling: Needs krefs
+ */
+
+/*
+ * State transitions and timers
+ */
+
+/**
+ * gsm_dlci_close - a DLCI has closed
+ * @dlci: DLCI that closed
+ *
+ * Perform processing when moving a DLCI into closed state. If there
+ * is an attached tty this is hung up
+ */
+
+static void gsm_dlci_close(struct gsm_dlci *dlci)
+{
+ del_timer(&dlci->t1);
+ if (debug & 8)
+ printk("DLCI %d goes closed.\n", dlci->addr);
+ dlci->state = DLCI_CLOSED;
+ if (dlci->addr != 0) {
+ struct tty_struct *tty = tty_port_tty_get(&dlci->port);
+ if (tty) {
+ tty_hangup(tty);
+ tty_kref_put(tty);
+ }
+ kfifo_reset(dlci->fifo);
+ } else
+ dlci->gsm->dead = 1;
+ wake_up(&dlci->gsm->event);
+ /* A DLCI 0 close is a MUX termination so we need to kick that
+ back to userspace somehow */
+}
+
+/**
+ * gsm_dlci_open - a DLCI has opened
+ * @dlci: DLCI that opened
+ *
+ * Perform processing when moving a DLCI into open state.
+ */
+
+static void gsm_dlci_open(struct gsm_dlci *dlci)
+{
+ /* Note that SABM UA .. SABM UA first UA lost can mean that we go
+ open -> open */
+ del_timer(&dlci->t1);
+ /* This will let a tty open continue */
+ dlci->state = DLCI_OPEN;
+ if (debug & 8)
+ printk("DLCI %d goes open.\n", dlci->addr);
+ wake_up(&dlci->gsm->event);
+}
+
+/**
+ * gsm_dlci_t1 - T1 timer expiry
+ * @dlci: DLCI that opened
+ *
+ * The T1 timer handles retransmits of control frames (essentially of
+ * SABM and DISC). We resend the command until the retry count runs out
+ * in which case an opening port goes back to closed and a closing port
+ * is simply put into closed state (any further frames from the other
+ * end will get a DM response)
+ */
+
+static void gsm_dlci_t1(unsigned long data)
+{
+ struct gsm_dlci *dlci = (struct gsm_dlci *)data;
+ struct gsm_mux *gsm = dlci->gsm;
+
+ switch (dlci->state) {
+ case DLCI_OPENING:
+ dlci->retries--;
+ if (dlci->retries) {
+ gsm_command(dlci->gsm, dlci->addr, SABM|PF);
+ mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
+ } else
+ gsm_dlci_close(dlci);
+ break;
+ case DLCI_CLOSING:
+ dlci->retries--;
+ if (dlci->retries) {
+ gsm_command(dlci->gsm, dlci->addr, DISC|PF);
+ mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
+ } else
+ gsm_dlci_close(dlci);
+ break;
+ }
+}
+
+/**
+ * gsm_dlci_begin_open - start channel open procedure
+ * @dlci: DLCI to open
+ *
+ * Commence opening a DLCI from the Linux side. We issue SABM messages
+ * to the modem which should then reply with a UA, at which point we
+ * will move into open state. Opening is done asynchronously with retry
+ * running off timers and the responses.
+ */
+
+static void gsm_dlci_begin_open(struct gsm_dlci *dlci)
+{
+ struct gsm_mux *gsm = dlci->gsm;
+ if (dlci->state == DLCI_OPEN || dlci->state == DLCI_OPENING)
+ return;
+ dlci->retries = gsm->n2;
+ dlci->state = DLCI_OPENING;
+ gsm_command(dlci->gsm, dlci->addr, SABM|PF);
+ mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
+}
+
+/**
+ * gsm_dlci_begin_close - start channel open procedure
+ * @dlci: DLCI to open
+ *
+ * Commence closing a DLCI from the Linux side. We issue DISC messages
+ * to the modem which should then reply with a UA, at which point we
+ * will move into closed state. Closing is done asynchronously with retry
+ * off timers. We may also receive a DM reply from the other end which
+ * indicates the channel was already closed.
+ */
+
+static void gsm_dlci_begin_close(struct gsm_dlci *dlci)
+{
+ struct gsm_mux *gsm = dlci->gsm;
+ if (dlci->state == DLCI_CLOSED || dlci->state == DLCI_CLOSING)
+ return;
+ dlci->retries = gsm->n2;
+ dlci->state = DLCI_CLOSING;
+ gsm_command(dlci->gsm, dlci->addr, DISC|PF);
+ mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
+}
+
+/**
+ * gsm_dlci_data - data arrived
+ * @dlci: channel
+ * @data: block of bytes received
+ * @len: length of received block
+ *
+ * A UI or UIH frame has arrived which contains data for a channel
+ * other than the control channel. If the relevant virtual tty is
+ * open we shovel the bits down it, if not we drop them.
+ */
+
+static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int len)
+{
+ /* krefs .. */
+ struct tty_port *port = &dlci->port;
+ struct tty_struct *tty = tty_port_tty_get(port);
+ unsigned int modem = 0;
+
+ if (debug & 16)
+ printk("%d bytes for tty %p\n", len, tty);
+ if (tty) {
+ switch (dlci->adaption) {
+ /* Unsupported types */
+ /* Packetised interruptible data */
+ case 4:
+ break;
+ /* Packetised uininterruptible voice/data */
+ case 3:
+ break;
+ /* Asynchronous serial with line state in each frame */
+ case 2:
+ while (gsm_read_ea(&modem, *data++) == 0) {
+ len--;
+ if (len == 0)
+ return;
+ }
+ gsm_process_modem(tty, dlci, modem);
+ /* Line state will go via DLCI 0 controls only */
+ case 1:
+ default:
+ tty_insert_flip_string(tty, data, len);
+ tty_flip_buffer_push(tty);
+ }
+ tty_kref_put(tty);
+ }
+}
+
+/**
+ * gsm_dlci_control - data arrived on control channel
+ * @dlci: channel
+ * @data: block of bytes received
+ * @len: length of received block
+ *
+ * A UI or UIH frame has arrived which contains data for DLCI 0 the
+ * control channel. This should contain a command EA followed by
+ * control data bytes. The command EA contains a command/response bit
+ * and we divide up the work accordingly.
+ */
+
+static void gsm_dlci_command(struct gsm_dlci *dlci, u8 *data, int len)
+{
+ /* See what command is involved */
+ unsigned int command = 0;
+ while (len-- > 0) {
+ if (gsm_read_ea(&command, *data++) == 1) {
+ int clen = *data++;
+ len--;
+ /* FIXME: this is properly an EA */
+ clen >>= 1;
+ /* Malformed command ? */
+ if (clen > len)
+ return;
+ if (command & 1)
+ gsm_control_message(dlci->gsm, command,
+ data, clen);
+ else
+ gsm_control_response(dlci->gsm, command,
+ data, clen);
+ return;
+ }
+ }
+}
+
+/*
+ * Allocate/Free DLCI channels
+ */
+
+/**
+ * gsm_dlci_alloc - allocate a DLCI
+ * @gsm: GSM mux
+ * @addr: address of the DLCI
+ *
+ * Allocate and install a new DLCI object into the GSM mux.
+ *
+ * FIXME: review locking races
+ */
+
+static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
+{
+ struct gsm_dlci *dlci = kzalloc(sizeof(struct gsm_dlci), GFP_ATOMIC);
+ if (dlci == NULL)
+ return NULL;
+ spin_lock_init(&dlci->lock);
+ dlci->fifo = &dlci->_fifo;
+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
+ kfree(dlci);
+ return NULL;
+ }
+
+ skb_queue_head_init(&dlci->skb_list);
+ init_timer(&dlci->t1);
+ dlci->t1.function = gsm_dlci_t1;
+ dlci->t1.data = (unsigned long)dlci;
+ tty_port_init(&dlci->port);
+ dlci->port.ops = &gsm_port_ops;
+ dlci->gsm = gsm;
+ dlci->addr = addr;
+ dlci->adaption = gsm->adaption;
+ dlci->state = DLCI_CLOSED;
+ if (addr)
+ dlci->data = gsm_dlci_data;
+ else
+ dlci->data = gsm_dlci_command;
+ gsm->dlci[addr] = dlci;
+ return dlci;
+}
+
+/**
+ * gsm_dlci_free - release DLCI
+ * @dlci: DLCI to destroy
+ *
+ * Free up a DLCI. Currently to keep the lifetime rules sane we only
+ * clean up DLCI objects when the MUX closes rather than as the port
+ * is closed down on both the tty and mux levels.
+ *
+ * Can sleep.
+ */
+static void gsm_dlci_free(struct gsm_dlci *dlci)
+{
+ struct tty_struct *tty = tty_port_tty_get(&dlci->port);
+ if (tty) {
+ tty_vhangup(tty);
+ tty_kref_put(tty);
+ }
+ del_timer_sync(&dlci->t1);
+ dlci->gsm->dlci[dlci->addr] = NULL;
+ kfifo_free(dlci->fifo);
+ kfree(dlci);
+}
+
+
+/*
+ * LAPBish link layer logic
+ */
+
+/**
+ * gsm_queue - a GSM frame is ready to process
+ * @gsm: pointer to our gsm mux
+ *
+ * At this point in time a frame has arrived and been demangled from
+ * the line encoding. All the differences between the encodings have
+ * been handled below us and the frame is unpacked into the structures.
+ * The fcs holds the header FCS but any data FCS must be added here.
+ */
+
+static void gsm_queue(struct gsm_mux *gsm)
+{
+ struct gsm_dlci *dlci;
+ u8 cr;
+ int address;
+ /* We have to sneak a look at the packet body to do the FCS.
+ A somewhat layering violation in the spec */
+
+ if ((gsm->control & ~PF) == UI)
+ gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->len);
+ if (gsm->fcs != GOOD_FCS) {
+ gsm->bad_fcs++;
+ if (debug & 4)
+ printk("BAD FCS %02x\n", gsm->fcs);
+ return;
+ }
+ address = gsm->address >> 1;
+ if (address >= NUM_DLCI)
+ goto invalid;
+
+ cr = gsm->address & 1; /* C/R bit */
+
+ gsm_print_packet("<--", address, cr, gsm->control, gsm->buf, gsm->len);
+
+ cr ^= 1 - gsm->initiator; /* Flip so 1 always means command */
+ dlci = gsm->dlci[address];
+
+ switch (gsm->control) {
+ case SABM|PF:
+ if (cr == 0)
+ goto invalid;
+ if (dlci == NULL)
+ dlci = gsm_dlci_alloc(gsm, address);
+ if (dlci == NULL)
+ return;
+ if (dlci->dead)
+ gsm_response(gsm, address, DM);
+ else {
+ gsm_response(gsm, address, UA);
+ gsm_dlci_open(dlci);
+ }
+ break;
+ case DISC|PF:
+ if (cr == 0)
+ goto invalid;
+ if (dlci == NULL || dlci->state == DLCI_CLOSED) {
+ gsm_response(gsm, address, DM);
+ return;
+ }
+ /* Real close complete */
+ gsm_response(gsm, address, UA);
+ gsm_dlci_close(dlci);
+ break;
+ case UA:
+ case UA|PF:
+ if (cr == 0 || dlci == NULL)
+ break;
+ switch (dlci->state) {
+ case DLCI_CLOSING:
+ gsm_dlci_close(dlci);
+ break;
+ case DLCI_OPENING:
+ gsm_dlci_open(dlci);
+ break;
+ }
+ break;
+ case DM: /* DM can be valid unsolicited */
+ case DM|PF:
+ if (cr)
+ goto invalid;
+ if (dlci == NULL)
+ return;
+ gsm_dlci_close(dlci);
+ break;
+ case UI:
+ case UI|PF:
+ case UIH:
+ case UIH|PF:
+#if 0
+ if (cr)
+ goto invalid;
+#endif
+ if (dlci == NULL || dlci->state != DLCI_OPEN) {
+ gsm_command(gsm, address, DM|PF);
+ return;
+ }
+ dlci->data(dlci, gsm->buf, gsm->len);
+ break;
+ default:
+ goto invalid;
+ }
+ return;
+invalid:
+ gsm->malformed++;
+ return;
+}
+
+
+/**
+ * gsm0_receive - perform processing for non-transparency
+ * @gsm: gsm data for this ldisc instance
+ * @c: character
+ *
+ * Receive bytes in gsm mode 0
+ */
+
+static void gsm0_receive(struct gsm_mux *gsm, unsigned char c)
+{
+ switch (gsm->state) {
+ case GSM_SEARCH: /* SOF marker */
+ if (c == GSM0_SOF) {
+ gsm->state = GSM_ADDRESS;
+ gsm->address = 0;
+ gsm->len = 0;
+ gsm->fcs = INIT_FCS;
+ }
+ break; /* Address EA */
+ case GSM_ADDRESS:
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ if (gsm_read_ea(&gsm->address, c))
+ gsm->state = GSM_CONTROL;
+ break;
+ case GSM_CONTROL: /* Control Byte */
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ gsm->control = c;
+ gsm->state = GSM_LEN;
+ break;
+ case GSM_LEN: /* Length EA */
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ if (gsm_read_ea(&gsm->len, c)) {
+ if (gsm->len > gsm->mru) {
+ gsm->bad_size++;
+ gsm->state = GSM_SEARCH;
+ break;
+ }
+ gsm->count = 0;
+ gsm->state = GSM_DATA;
+ }
+ break;
+ case GSM_DATA: /* Data */
+ gsm->buf[gsm->count++] = c;
+ if (gsm->count == gsm->len)
+ gsm->state = GSM_FCS;
+ break;
+ case GSM_FCS: /* FCS follows the packet */
+ gsm->fcs = c;
+ gsm_queue(gsm);
+ /* And then back for the next frame */
+ gsm->state = GSM_SEARCH;
+ break;
+ }
+}
+
+/**
+ * gsm0_receive - perform processing for non-transparency
+ * @gsm: gsm data for this ldisc instance
+ * @c: character
+ *
+ * Receive bytes in mode 1 (Advanced option)
+ */
+
+static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
+{
+ if (c == GSM1_SOF) {
+ /* EOF is only valid in frame if we have got to the data state
+ and received at least one byte (the FCS) */
+ if (gsm->state == GSM_DATA && gsm->count) {
+ /* Extract the FCS */
+ gsm->count--;
+ gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->buf[gsm->count]);
+ gsm->len = gsm->count;
+ gsm_queue(gsm);
+ gsm->state = GSM_START;
+ return;
+ }
+ /* Any partial frame was a runt so go back to start */
+ if (gsm->state != GSM_START) {
+ gsm->malformed++;
+ gsm->state = GSM_START;
+ }
+ /* A SOF in GSM_START means we are still reading idling or
+ framing bytes */
+ return;
+ }
+
+ if (c == GSM1_ESCAPE) {
+ gsm->escape = 1;
+ return;
+ }
+
+ /* Only an unescaped SOF gets us out of GSM search */
+ if (gsm->state == GSM_SEARCH)
+ return;
+
+ if (gsm->escape) {
+ c ^= GSM1_ESCAPE_BITS;
+ gsm->escape = 0;
+ }
+ switch (gsm->state) {
+ case GSM_START: /* First byte after SOF */
+ gsm->address = 0;
+ gsm->state = GSM_ADDRESS;
+ gsm->fcs = INIT_FCS;
+ /* Drop through */
+ case GSM_ADDRESS: /* Address continuation */
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ if (gsm_read_ea(&gsm->address, c))
+ gsm->state = GSM_CONTROL;
+ break;
+ case GSM_CONTROL: /* Control Byte */
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ gsm->control = c;
+ gsm->count = 0;
+ gsm->state = GSM_DATA;
+ break;
+ case GSM_DATA: /* Data */
+ if (gsm->count > gsm->mru ) { /* Allow one for the FCS */
+ gsm->state = GSM_OVERRUN;
+ gsm->bad_size++;
+ } else
+ gsm->buf[gsm->count++] = c;
+ break;
+ case GSM_OVERRUN: /* Over-long - eg a dropped SOF */
+ break;
+ }
+}
+
+/**
+ * gsm_error - handle tty error
+ * @gsm: ldisc data
+ * @data: byte received (may be invalid)
+ * @flag: error received
+ *
+ * Handle an error in the receipt of data for a frame. Currently we just
+ * go back to hunting for a SOF.
+ *
+ * FIXME: better diagnostics ?
+ */
+
+static void gsm_error(struct gsm_mux *gsm,
+ unsigned char data, unsigned char flag)
+{
+ gsm->state = GSM_SEARCH;
+ gsm->io_error++;
+}
+
+/**
+ * gsm_cleanup_mux - generic GSM protocol cleanup
+ * @gsm: our mux
+ *
+ * Clean up the bits of the mux which are the same for all framing
+ * protocols. Remove the mux from the mux table, stop all the timers
+ * and then shut down each device hanging up the channels as we go.
+ */
+
+void gsm_cleanup_mux(struct gsm_mux *gsm)
+{
+ int i;
+ struct gsm_dlci *dlci = gsm->dlci[0];
+ struct gsm_msg *txq;
+
+ gsm->dead = 1;
+
+ spin_lock(&gsm_mux_lock);
+ for (i = 0; i < MAX_MUX; i++) {
+ if (gsm_mux[i] == gsm) {
+ gsm_mux[i] = NULL;
+ break;
+ }
+ }
+ spin_unlock(&gsm_mux_lock);
+ WARN_ON(i == MAX_MUX);
+
+ del_timer_sync(&gsm->t2_timer);
+ /* Now we are sure T2 has stopped */
+ if (dlci) {
+ dlci->dead = 1;
+ gsm_dlci_begin_close(dlci);
+ wait_event_interruptible(gsm->event,
+ dlci->state == DLCI_CLOSED);
+ }
+ /* Free up any link layer users */
+ for (i = 0; i < NUM_DLCI; i++)
+ if (gsm->dlci[i])
+ gsm_dlci_free(gsm->dlci[i]);
+ /* Now wipe the queues */
+ for (txq = gsm->tx_head; txq != NULL; txq = gsm->tx_head) {
+ gsm->tx_head = txq->next;
+ kfree(txq);
+ }
+ gsm->tx_tail = NULL;
+}
+EXPORT_SYMBOL_GPL(gsm_cleanup_mux);
+
+/**
+ * gsm_activate_mux - generic GSM setup
+ * @gsm: our mux
+ *
+ * Set up the bits of the mux which are the same for all framing
+ * protocols. Add the mux to the mux table so it can be opened and
+ * finally kick off connecting to DLCI 0 on the modem.
+ */
+
+int gsm_activate_mux(struct gsm_mux *gsm)
+{
+ struct gsm_dlci *dlci;
+ int i = 0;
+
+ init_timer(&gsm->t2_timer);
+ gsm->t2_timer.function = gsm_control_retransmit;
+ gsm->t2_timer.data = (unsigned long)gsm;
+ init_waitqueue_head(&gsm->event);
+ spin_lock_init(&gsm->control_lock);
+ spin_lock_init(&gsm->tx_lock);
+
+ if (gsm->encoding == 0)
+ gsm->receive = gsm0_receive;
+ else
+ gsm->receive = gsm1_receive;
+ gsm->error = gsm_error;
+
+ spin_lock(&gsm_mux_lock);
+ for (i = 0; i < MAX_MUX; i++) {
+ if (gsm_mux[i] == NULL) {
+ gsm_mux[i] = gsm;
+ break;
+ }
+ }
+ spin_unlock(&gsm_mux_lock);
+ if (i == MAX_MUX)
+ return -EBUSY;
+
+ dlci = gsm_dlci_alloc(gsm, 0);
+ if (dlci == NULL)
+ return -ENOMEM;
+ gsm->dead = 0; /* Tty opens are now permissible */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gsm_activate_mux);
+
+/**
+ * gsm_free_mux - free up a mux
+ * @mux: mux to free
+ *
+ * Dispose of allocated resources for a dead mux. No refcounting
+ * at present so the mux must be truely dead.
+ */
+void gsm_free_mux(struct gsm_mux *gsm)
+{
+ kfree(gsm->txframe);
+ kfree(gsm->buf);
+ kfree(gsm);
+}
+EXPORT_SYMBOL_GPL(gsm_free_mux);
+
+/**
+ * gsm_alloc_mux - allocate a mux
+ *
+ * Creates a new mux ready for activation.
+ */
+
+struct gsm_mux *gsm_alloc_mux(void)
+{
+ struct gsm_mux *gsm = kzalloc(sizeof(struct gsm_mux), GFP_KERNEL);
+ if (gsm == NULL)
+ return NULL;
+ gsm->buf = kmalloc(MAX_MRU + 1, GFP_KERNEL);
+ if (gsm->buf == NULL) {
+ kfree(gsm);
+ return NULL;
+ }
+ gsm->txframe = kmalloc(2 * MAX_MRU + 2, GFP_KERNEL);
+ if (gsm->txframe == NULL) {
+ kfree(gsm->buf);
+ kfree(gsm);
+ return NULL;
+ }
+ spin_lock_init(&gsm->lock);
+
+ gsm->t1 = T1;
+ gsm->t2 = T2;
+ gsm->n2 = N2;
+ gsm->ftype = UIH;
+ gsm->initiator = 0;
+ gsm->adaption = 1;
+ gsm->encoding = 1;
+ gsm->mru = 64; /* Default to encoding 1 so these should be 64 */
+ gsm->mtu = 64;
+ gsm->dead = 1; /* Avoid early tty opens */
+
+ return gsm;
+}
+EXPORT_SYMBOL_GPL(gsm_alloc_mux);
+
+
+
+
+/**
+ * gsmld_output - write to link
+ * @gsm: our mux
+ * @data: bytes to output
+ * @len: size
+ *
+ * Write a block of data from the GSM mux to the data channel. This
+ * will eventually be serialized from above but at the moment isn't.
+ */
+
+static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len)
+{
+ if (tty_write_room(gsm->tty) < len) {
+ set_bit(TTY_DO_WRITE_WAKEUP, &gsm->tty->flags);
+ return -ENOSPC;
+ }
+ if (debug & 4) {
+ printk("-->%d bytes out\n", len);
+ hex_packet(data, len);
+ }
+ gsm->tty->ops->write(gsm->tty, data, len);
+ return len;
+}
+
+/**
+ * gsmld_attach_gsm - mode set up
+ * @tty: our tty structure
+ * @gsm: our mux
+ *
+ * Set up the MUX for basic mode and commence connecting to the
+ * modem. Currently called from the line discipline set up but
+ * will need moving to an ioctl path.
+ */
+
+static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
+{
+ int ret;
+
+ gsm->tty = tty_kref_get(tty);
+ gsm->output = gsmld_output;
+ ret = gsm_activate_mux(gsm);
+ if (ret != 0)
+ tty_kref_put(gsm->tty);
+ return ret;
+}
+
+
+/**
+ * gsmld_detach_gsm - stop doing 0710 mux
+ * @tty: tty atttached to the mux
+ * @gsm: mux
+ *
+ * Shutdown and then clean up the resources used by the line discipline
+ */
+
+static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
+{
+ WARN_ON(tty != gsm->tty);
+ gsm_cleanup_mux(gsm);
+ tty_kref_put(gsm->tty);
+ gsm->tty = NULL;
+}
+
+static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
+{
+ struct gsm_mux *gsm = tty->disc_data;
+ const unsigned char *dp;
+ char *f;
+ int i;
+ char buf[64];
+ char flags;
+
+ if (debug & 4) {
+ printk("Inbytes %dd\n", count);
+ hex_packet(cp, count);
+ }
+
+ for (i = count, dp = cp, f = fp; i; i--, dp++) {
+ flags = *f++;
+ switch (flags) {
+ case TTY_NORMAL:
+ gsm->receive(gsm, *dp);
+ break;
+ case TTY_OVERRUN:
+ case TTY_BREAK:
+ case TTY_PARITY:
+ case TTY_FRAME:
+ gsm->error(gsm, *dp, flags);
+ break;
+ default:
+ printk(KERN_ERR "%s: unknown flag %d\n",
+ tty_name(tty, buf), flags);
+ break;
+ }
+ }
+ /* FASYNC if needed ? */
+ /* If clogged call tty_throttle(tty); */
+}
+
+/**
+ * gsmld_chars_in_buffer - report available bytes
+ * @tty: tty device
+ *
+ * Report the number of characters buffered to be delivered to user
+ * at this instant in time.
+ *
+ * Locking: gsm lock
+ */
+
+static ssize_t gsmld_chars_in_buffer(struct tty_struct *tty)
+{
+ return 0;
+}
+
+/**
+ * gsmld_flush_buffer - clean input queue
+ * @tty: terminal device
+ *
+ * Flush the input buffer. Called when the line discipline is
+ * being closed, when the tty layer wants the buffer flushed (eg
+ * at hangup).
+ */
+
+static void gsmld_flush_buffer(struct tty_struct *tty)
+{
+}
+
+/**
+ * gsmld_close - close the ldisc for this tty
+ * @tty: device
+ *
+ * Called from the terminal layer when this line discipline is
+ * being shut down, either because of a close or becsuse of a
+ * discipline change. The function will not be called while other
+ * ldisc methods are in progress.
+ */
+
+static void gsmld_close(struct tty_struct *tty)
+{
+ struct gsm_mux *gsm = tty->disc_data;
+
+ gsmld_detach_gsm(tty, gsm);
+
+ gsmld_flush_buffer(tty);
+ /* Do other clean up here */
+ gsm_free_mux(gsm);
+}
+
+/**
+ * gsmld_open - open an ldisc
+ * @tty: terminal to open
+ *
+ * Called when this line discipline is being attached to the
+ * terminal device. Can sleep. Called serialized so that no
+ * other events will occur in parallel. No further open will occur
+ * until a close.
+ */
+
+static int gsmld_open(struct tty_struct *tty)
+{
+ struct gsm_mux *gsm;
+
+ if (tty->ops->write == NULL)
+ return -EINVAL;
+
+ /* Attach our ldisc data */
+ gsm = gsm_alloc_mux();
+ if (gsm == NULL)
+ return -ENOMEM;
+
+ tty->disc_data = gsm;
+ tty->receive_room = 65536;
+
+ /* Attach the initial passive connection */
+ gsm->encoding = 1;
+ return gsmld_attach_gsm(tty, gsm);
+}
+
+/**
+ * gsmld_write_wakeup - asynchronous I/O notifier
+ * @tty: tty device
+ *
+ * Required for the ptys, serial driver etc. since processes
+ * that attach themselves to the master and rely on ASYNC
+ * IO must be woken up
+ */
+
+static void gsmld_write_wakeup(struct tty_struct *tty)
+{
+ struct gsm_mux *gsm = tty->disc_data;
+
+ /* Queue poll */
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ gsm_data_kick(gsm);
+ if (gsm->tx_bytes < TX_THRESH_LO)
+ gsm_dlci_data_sweep(gsm);
+}
+
+/**
+ * gsmld_read - read function for tty
+ * @tty: tty device
+ * @file: file object
+ * @buf: userspace buffer pointer
+ * @nr: size of I/O
+ *
+ * Perform reads for the line discipline. We are guaranteed that the
+ * line discipline will not be closed under us but we may get multiple
+ * parallel readers and must handle this ourselves. We may also get
+ * a hangup. Always called in user context, may sleep.
+ *
+ * This code must be sure never to sleep through a hangup.
+ */
+
+static ssize_t gsmld_read(struct tty_struct *tty, struct file *file,
+ unsigned char __user *buf, size_t nr)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * gsmld_write - write function for tty
+ * @tty: tty device
+ * @file: file object
+ * @buf: userspace buffer pointer
+ * @nr: size of I/O
+ *
+ * Called when the owner of the device wants to send a frame
+ * itself (or some other control data). The data is transferred
+ * as-is and must be properly framed and checksummed as appropriate
+ * by userspace. Frames are either sent whole or not at all as this
+ * avoids pain user side.
+ */
+
+static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
+ const unsigned char *buf, size_t nr)
+{
+ int space = tty_write_room(tty);
+ if (space >= nr)
+ return tty->ops->write(tty, buf, nr);
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ return -ENOBUFS;
+}
+
+/**
+ * gsmld_poll - poll method for N_GSM0710
+ * @tty: terminal device
+ * @file: file accessing it
+ * @wait: poll table
+ *
+ * Called when the line discipline is asked to poll() for data or
+ * for special events. This code is not serialized with respect to
+ * other events save open/close.
+ *
+ * This code must be sure never to sleep through a hangup.
+ * Called without the kernel lock held - fine
+ */
+
+static unsigned int gsmld_poll(struct tty_struct *tty, struct file *file,
+ poll_table *wait)
+{
+ unsigned int mask = 0;
+ struct gsm_mux *gsm = tty->disc_data;
+
+ poll_wait(file, &tty->read_wait, wait);
+ poll_wait(file, &tty->write_wait, wait);
+ if (tty_hung_up_p(file))
+ mask |= POLLHUP;
+ if (!tty_is_writelocked(tty) && tty_write_room(tty) > 0)
+ mask |= POLLOUT | POLLWRNORM;
+ if (gsm->dead)
+ mask |= POLLHUP;
+ return mask;
+}
+
+static int gsmld_config(struct tty_struct *tty, struct gsm_mux *gsm,
+ struct gsm_config *c)
+{
+ int need_close = 0;
+ int need_restart = 0;
+
+ /* Stuff we don't support yet - UI or I frame transport, windowing */
+ if ((c->adaption !=1 && c->adaption != 2) || c->k)
+ return -EOPNOTSUPP;
+ /* Check the MRU/MTU range looks sane */
+ if (c->mru > MAX_MRU || c->mtu > MAX_MTU || c->mru < 8 || c->mtu < 8)
+ return -EINVAL;
+ if (c->n2 < 3)
+ return -EINVAL;
+ if (c->encapsulation > 1) /* Basic, advanced, no I */
+ return -EINVAL;
+ if (c->initiator > 1)
+ return -EINVAL;
+ if (c->i == 0 || c->i > 2) /* UIH and UI only */
+ return -EINVAL;
+ /*
+ * See what is needed for reconfiguration
+ */
+
+ /* Timing fields */
+ if (c->t1 != 0 && c->t1 != gsm->t1)
+ need_restart = 1;
+ if (c->t2 != 0 && c->t2 != gsm->t2)
+ need_restart = 1;
+ if (c->encapsulation != gsm->encoding)
+ need_restart = 1;
+ if (c->adaption != gsm->adaption)
+ need_restart = 1;
+ /* Requires care */
+ if (c->initiator != gsm->initiator)
+ need_close = 1;
+ if (c->mru != gsm->mru)
+ need_restart = 1;
+ if (c->mtu != gsm->mtu)
+ need_restart = 1;
+
+ /*
+ * Close down what is needed, restart and initiate the new
+ * configuration
+ */
+
+ if (need_close || need_restart) {
+ gsm_dlci_begin_close(gsm->dlci[0]);
+ /* This will timeout if the link is down due to N2 expiring */
+ wait_event_interruptible(gsm->event,
+ gsm->dlci[0]->state == DLCI_CLOSED);
+ if (signal_pending(current))
+ return -EINTR;
+ }
+ if (need_restart)
+ gsm_cleanup_mux(gsm);
+
+ gsm->initiator = c->initiator;
+ gsm->mru = c->mru;
+ gsm->encoding = c->encapsulation;
+ gsm->adaption = c->adaption;
+
+ if (c->i == 1)
+ gsm->ftype = UIH;
+ else if (c->i == 2)
+ gsm->ftype = UI;
+
+ if (c->t1)
+ gsm->t1 = c->t1;
+ if (c->t2)
+ gsm->t2 = c->t2;
+
+ /* FIXME: We need to separate activation/deactivation from adding
+ and removing from the mux array */
+ if (need_restart)
+ gsm_activate_mux(gsm);
+ if (gsm->initiator && need_close)
+ gsm_dlci_begin_open(gsm->dlci[0]);
+ return 0;
+}
+
+static int gsmld_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct gsm_config c;
+ struct gsm_mux *gsm = tty->disc_data;
+
+ switch (cmd) {
+ case GSMIOC_GETCONF:
+ memset(&c, 0, sizeof(c));
+ c.adaption = gsm->adaption;
+ c.encapsulation = gsm->encoding;
+ c.initiator = gsm->initiator;
+ c.t1 = gsm->t1;
+ c.t2 = gsm->t2;
+ c.t3 = 0; /* Not supported */
+ c.n2 = gsm->n2;
+ if (gsm->ftype == UIH)
+ c.i = 1;
+ else
+ c.i = 2;
+ printk("Ftype %d i %d\n", gsm->ftype, c.i);
+ c.mru = gsm->mru;
+ c.mtu = gsm->mtu;
+ c.k = 0;
+ if (copy_to_user((void *)arg, &c, sizeof(c)))
+ return -EFAULT;
+ return 0;
+ case GSMIOC_SETCONF:
+ if (copy_from_user(&c, (void *)arg, sizeof(c)))
+ return -EFAULT;
+ return gsmld_config(tty, gsm, &c);
+ default:
+ return n_tty_ioctl_helper(tty, file, cmd, arg);
+ }
+}
+
+
+/* Line discipline for real tty */
+struct tty_ldisc_ops tty_ldisc_packet = {
+ .owner = THIS_MODULE,
+ .magic = TTY_LDISC_MAGIC,
+ .name = "n_gsm",
+ .open = gsmld_open,
+ .close = gsmld_close,
+ .flush_buffer = gsmld_flush_buffer,
+ .chars_in_buffer = gsmld_chars_in_buffer,
+ .read = gsmld_read,
+ .write = gsmld_write,
+ .ioctl = gsmld_ioctl,
+ .poll = gsmld_poll,
+ .receive_buf = gsmld_receive_buf,
+ .write_wakeup = gsmld_write_wakeup
+};
+
+/*
+ * Virtual tty side
+ */
+
+#define TX_SIZE 512
+
+static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk)
+{
+ u8 modembits[5];
+ struct gsm_control *ctrl;
+ int len = 2;
+
+ if (brk)
+ len++;
+
+ modembits[0] = len << 1 | EA; /* Data bytes */
+ modembits[1] = dlci->addr << 2 | 3; /* DLCI, EA, 1 */
+ modembits[2] = gsm_encode_modem(dlci) << 1 | EA;
+ if (brk)
+ modembits[3] = brk << 4 | 2 | EA; /* Valid, EA */
+ ctrl = gsm_control_send(dlci->gsm, CMD_MSC, modembits, len + 1);
+ if (ctrl == NULL)
+ return -ENOMEM;
+ return gsm_control_wait(dlci->gsm, ctrl);
+}
+
+static int gsm_carrier_raised(struct tty_port *port)
+{
+ struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
+ /* Not yet open so no carrier info */
+ if (dlci->state != DLCI_OPEN)
+ return 0;
+ if (debug & 2)
+ return 1;
+ return dlci->modem_rx & TIOCM_CD;
+}
+
+static void gsm_dtr_rts(struct tty_port *port, int onoff)
+{
+ struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
+ unsigned int modem_tx = dlci->modem_tx;
+ if (onoff)
+ modem_tx |= TIOCM_DTR | TIOCM_RTS;
+ else
+ modem_tx &= ~(TIOCM_DTR | TIOCM_RTS);
+ if (modem_tx != dlci->modem_tx) {
+ dlci->modem_tx = modem_tx;
+ gsmtty_modem_update(dlci, 0);
+ }
+}
+
+static const struct tty_port_operations gsm_port_ops = {
+ .carrier_raised = gsm_carrier_raised,
+ .dtr_rts = gsm_dtr_rts,
+};
+
+
+static int gsmtty_open(struct tty_struct *tty, struct file *filp)
+{
+ struct gsm_mux *gsm;
+ struct gsm_dlci *dlci;
+ struct tty_port *port;
+ unsigned int line = tty->index;
+ unsigned int mux = line >> 6;
+
+ line = line & 0x3F;
+
+ if (mux >= MAX_MUX)
+ return -ENXIO;
+ /* FIXME: we need to lock gsm_mux for lifetimes of ttys eventually */
+ if (gsm_mux[mux] == NULL)
+ return -EUNATCH;
+ if (line == 0 || line > 61) /* 62/63 reserved */
+ return -ECHRNG;
+ gsm = gsm_mux[mux];
+ if (gsm->dead)
+ return -EL2HLT;
+ dlci = gsm->dlci[line];
+ if (dlci == NULL)
+ dlci = gsm_dlci_alloc(gsm, line);
+ if (dlci == NULL)
+ return -ENOMEM;
+ port = &dlci->port;
+ port->count++;
+ tty->driver_data = dlci;
+ tty_port_tty_set(port, tty);
+
+ dlci->modem_rx = 0;
+ /* We could in theory open and close before we wait - eg if we get
+ a DM straight back. This is ok as that will have caused a hangup */
+ set_bit(ASYNCB_INITIALIZED, &port->flags);
+ /* Start sending off SABM messages */
+ gsm_dlci_begin_open(dlci);
+ /* And wait for virtual carrier */
+ return tty_port_block_til_ready(port, tty, filp);
+}
+
+static void gsmtty_close(struct tty_struct *tty, struct file *filp)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ if (dlci == NULL)
+ return;
+ if (tty_port_close_start(&dlci->port, tty, filp) == 0)
+ return;
+ gsm_dlci_begin_close(dlci);
+ tty_port_close_end(&dlci->port, tty);
+ tty_port_tty_set(&dlci->port, NULL);
+}
+
+static void gsmtty_hangup(struct tty_struct *tty)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ tty_port_hangup(&dlci->port);
+ gsm_dlci_begin_close(dlci);
+}
+
+static int gsmtty_write(struct tty_struct *tty, const unsigned char *buf,
+ int len)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ /* Stuff the bytes into the fifo queue */
+ int sent = kfifo_in_locked(dlci->fifo, buf, len, &dlci->lock);
+ /* Need to kick the channel */
+ gsm_dlci_data_kick(dlci);
+ return sent;
+}
+
+static int gsmtty_write_room(struct tty_struct *tty)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ return TX_SIZE - kfifo_len(dlci->fifo);
+}
+
+static int gsmtty_chars_in_buffer(struct tty_struct *tty)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ return kfifo_len(dlci->fifo);
+}
+
+static void gsmtty_flush_buffer(struct tty_struct *tty)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ /* Caution needed: If we implement reliable transport classes
+ then the data being transmitted can't simply be junked once
+ it has first hit the stack. Until then we can just blow it
+ away */
+ kfifo_reset(dlci->fifo);
+ /* Need to unhook this DLCI from the transmit queue logic */
+}
+
+static void gsmtty_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ /* The FIFO handles the queue so the kernel will do the right
+ thing waiting on chars_in_buffer before calling us. No work
+ to do here */
+}
+
+static int gsmtty_tiocmget(struct tty_struct *tty, struct file *filp)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ return dlci->modem_rx;
+}
+
+static int gsmtty_tiocmset(struct tty_struct *tty, struct file *filp,
+ unsigned int set, unsigned int clear)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ unsigned int modem_tx = dlci->modem_tx;
+
+ modem_tx &= clear;
+ modem_tx |= set;
+
+ if (modem_tx != dlci->modem_tx) {
+ dlci->modem_tx = modem_tx;
+ return gsmtty_modem_update(dlci, 0);
+ }
+ return 0;
+}
+
+
+static int gsmtty_ioctl(struct tty_struct *tty, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ return -ENOIOCTLCMD;
+}
+
+static void gsmtty_set_termios(struct tty_struct *tty, struct ktermios *old)
+{
+ /* For the moment its fixed. In actual fact the speed information
+ for the virtual channel can be propogated in both directions by
+ the RPN control message. This however rapidly gets nasty as we
+ then have to remap modem signals each way according to whether
+ our virtual cable is null modem etc .. */
+ tty_termios_copy_hw(tty->termios, old);
+}
+
+static void gsmtty_throttle(struct tty_struct *tty)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ if (tty->termios->c_cflag & CRTSCTS)
+ dlci->modem_tx &= ~TIOCM_DTR;
+ dlci->throttled = 1;
+ /* Send an MSC with DTR cleared */
+ gsmtty_modem_update(dlci, 0);
+}
+
+static void gsmtty_unthrottle(struct tty_struct *tty)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ if (tty->termios->c_cflag & CRTSCTS)
+ dlci->modem_tx |= TIOCM_DTR;
+ dlci->throttled = 0;
+ /* Send an MSC with DTR set */
+ gsmtty_modem_update(dlci, 0);
+}
+
+static int gsmtty_break_ctl(struct tty_struct *tty, int state)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ int encode = 0; /* Off */
+
+ if (state == -1) /* "On indefinitely" - we can't encode this
+ properly */
+ encode = 0x0F;
+ else if (state > 0) {
+ encode = state / 200; /* mS to encoding */
+ if (encode > 0x0F)
+ encode = 0x0F; /* Best effort */
+ }
+ return gsmtty_modem_update(dlci, encode);
+}
+
+static struct tty_driver *gsm_tty_driver;
+
+/* Virtual ttys for the demux */
+static const struct tty_operations gsmtty_ops = {
+ .open = gsmtty_open,
+ .close = gsmtty_close,
+ .write = gsmtty_write,
+ .write_room = gsmtty_write_room,
+ .chars_in_buffer = gsmtty_chars_in_buffer,
+ .flush_buffer = gsmtty_flush_buffer,
+ .ioctl = gsmtty_ioctl,
+ .throttle = gsmtty_throttle,
+ .unthrottle = gsmtty_unthrottle,
+ .set_termios = gsmtty_set_termios,
+ .hangup = gsmtty_hangup,
+ .wait_until_sent = gsmtty_wait_until_sent,
+ .tiocmget = gsmtty_tiocmget,
+ .tiocmset = gsmtty_tiocmset,
+ .break_ctl = gsmtty_break_ctl,
+};
+
+
+
+static int __init gsm_init(void)
+{
+ /* Fill in our line protocol discipline, and register it */
+ int status = tty_register_ldisc(N_GSM0710, &tty_ldisc_packet);
+ if (status != 0) {
+ printk(KERN_ERR "n_gsm: can't register line discipline (err = %d)\n", status);
+ return status;
+ }
+
+ gsm_tty_driver = alloc_tty_driver(256);
+ if (!gsm_tty_driver) {
+ tty_unregister_ldisc(N_GSM0710);
+ printk(KERN_ERR "gsm_init: tty allocation failed.\n");
+ return -EINVAL;
+ }
+ gsm_tty_driver->owner = THIS_MODULE;
+ gsm_tty_driver->driver_name = "gsmtty";
+ gsm_tty_driver->name = "gsmtty";
+ gsm_tty_driver->major = 0; /* Dynamic */
+ gsm_tty_driver->minor_start = 0;
+ gsm_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ gsm_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ gsm_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV
+ | TTY_DRIVER_HARDWARE_BREAK;
+ gsm_tty_driver->init_termios = tty_std_termios;
+ /* Fixme */
+ gsm_tty_driver->init_termios.c_lflag &= ~ECHO;
+ tty_set_operations(gsm_tty_driver, &gsmtty_ops);
+
+ spin_lock_init(&gsm_mux_lock);
+
+ if (tty_register_driver(gsm_tty_driver)) {
+ put_tty_driver(gsm_tty_driver);
+ tty_unregister_ldisc(N_GSM0710);
+ printk(KERN_ERR "gsm_init: tty registration failed.\n");
+ return -EBUSY;
+ }
+ printk(KERN_INFO "gsm_init: loaded as %d,%d.\n", gsm_tty_driver->major, gsm_tty_driver->minor_start);
+ return 0;
+}
+
+static void __exit gsm_exit(void)
+{
+ int status = tty_unregister_ldisc(N_GSM0710);
+ if (status != 0)
+ printk(KERN_ERR "n_gsm: can't unregister line discipline (err = %d)\n", status);
+ tty_unregister_driver(gsm_tty_driver);
+ put_tty_driver(gsm_tty_driver);
+ printk(KERN_INFO "gsm_init: unloaded.\n");
+}
+
+module_init(gsm_init);
+module_exit(gsm_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_LDISC(N_GSM0710);
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 47e8f7b0e4c..66d2917b003 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -296,8 +296,8 @@ checksum_err:
return -EIO;
}
-static int nvram_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static long nvram_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
int i;
@@ -308,6 +308,7 @@ static int nvram_ioctl(struct inode *inode, struct file *file,
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
+ lock_kernel();
spin_lock_irq(&rtc_lock);
for (i = 0; i < NVRAM_BYTES; ++i)
@@ -315,6 +316,7 @@ static int nvram_ioctl(struct inode *inode, struct file *file,
__nvram_set_checksum();
spin_unlock_irq(&rtc_lock);
+ unlock_kernel();
return 0;
case NVRAM_SETCKS:
@@ -323,9 +325,11 @@ static int nvram_ioctl(struct inode *inode, struct file *file,
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
+ lock_kernel();
spin_lock_irq(&rtc_lock);
__nvram_set_checksum();
spin_unlock_irq(&rtc_lock);
+ unlock_kernel();
return 0;
default:
@@ -422,7 +426,7 @@ static const struct file_operations nvram_fops = {
.llseek = nvram_llseek,
.read = nvram_read,
.write = nvram_write,
- .ioctl = nvram_ioctl,
+ .unlocked_ioctl = nvram_ioctl,
.open = nvram_open,
.release = nvram_release,
};
diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c
index f80810901db..043a1c7b86b 100644
--- a/drivers/char/nwflash.c
+++ b/drivers/char/nwflash.c
@@ -94,8 +94,9 @@ static int get_flash_id(void)
return c2;
}
-static int flash_ioctl(struct inode *inodep, struct file *filep, unsigned int cmd, unsigned long arg)
+static long flash_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
+ lock_kernel();
switch (cmd) {
case CMD_WRITE_DISABLE:
gbWriteBase64Enable = 0;
@@ -113,8 +114,10 @@ static int flash_ioctl(struct inode *inodep, struct file *filep, unsigned int cm
default:
gbWriteBase64Enable = 0;
gbWriteEnable = 0;
+ unlock_kernel();
return -EINVAL;
}
+ unlock_kernel();
return 0;
}
@@ -631,7 +634,7 @@ static const struct file_operations flash_fops =
.llseek = flash_llseek,
.read = flash_read,
.write = flash_write,
- .ioctl = flash_ioctl,
+ .unlocked_ioctl = flash_ioctl,
};
static struct miscdevice flash_miscdev =
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 90b199f97be..e7956acf2ad 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -106,7 +106,6 @@ static int major; /* major number we get from the kernel */
struct cm4000_dev {
struct pcmcia_device *p_dev;
- dev_node_t node; /* OS node (major,minor) */
unsigned char atr[MAX_ATR];
unsigned char rbuf[512];
@@ -884,8 +883,7 @@ static void monitor_card(unsigned long p)
/* slow down warning, but prompt immediately after insertion */
if (dev->cwarn == 0 || dev->cwarn == 10) {
set_bit(IS_BAD_CARD, &dev->flags);
- printk(KERN_WARNING MODULE_NAME ": device %s: ",
- dev->node.dev_name);
+ dev_warn(&dev->p_dev->dev, MODULE_NAME ": ");
if (test_bit(IS_BAD_CSUM, &dev->flags)) {
DEBUGP(4, dev, "ATR checksum (0x%.2x, should "
"be zero) failed\n", dev->atr_csum);
@@ -1781,11 +1779,6 @@ static int cm4000_config(struct pcmcia_device * link, int devno)
goto cs_release;
dev = link->priv;
- sprintf(dev->node.dev_name, DEVICE_NAME "%d", devno);
- dev->node.major = major;
- dev->node.minor = devno;
- dev->node.next = NULL;
- link->dev_node = &dev->node;
return 0;
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index a6a70e476be..c0775c844e0 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -72,7 +72,6 @@ static struct class *cmx_class;
struct reader_dev {
struct pcmcia_device *p_dev;
- dev_node_t node;
wait_queue_head_t devq;
wait_queue_head_t poll_wait;
wait_queue_head_t read_wait;
@@ -568,10 +567,6 @@ static int reader_config(struct pcmcia_device *link, int devno)
}
dev = link->priv;
- sprintf(dev->node.dev_name, DEVICE_NAME "%d", devno);
- dev->node.major = major;
- dev->node.minor = devno;
- dev->node.next = &dev->node;
DEBUGP(2, dev, "device " DEVICE_NAME "%d at 0x%.4x-0x%.4x\n", devno,
link->io.BasePort1, link->io.BasePort1+link->io.NumPorts1);
diff --git a/drivers/char/pcmcia/ipwireless/main.c b/drivers/char/pcmcia/ipwireless/main.c
index dff24dae148..63c32e3f23b 100644
--- a/drivers/char/pcmcia/ipwireless/main.c
+++ b/drivers/char/pcmcia/ipwireless/main.c
@@ -195,9 +195,6 @@ static int config_ipwireless(struct ipw_dev *ipw)
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = ipwireless_interrupt;
-
INIT_WORK(&ipw->work_reboot, signalled_reboot_work);
ipwireless_init_hardware_v1(ipw->hardware, link->io.BasePort1,
@@ -205,8 +202,7 @@ static int config_ipwireless(struct ipw_dev *ipw)
ipw->is_v2_card, signalled_reboot_callback,
ipw);
- ret = pcmcia_request_irq(link, &link->irq);
-
+ ret = pcmcia_request_irq(link, ipwireless_interrupt);
if (ret != 0)
goto exit;
@@ -217,7 +213,7 @@ static int config_ipwireless(struct ipw_dev *ipw)
(unsigned int) link->io.BasePort1,
(unsigned int) (link->io.BasePort1 +
link->io.NumPorts1 - 1),
- (unsigned int) link->irq.AssignedIRQ);
+ (unsigned int) link->irq);
if (ipw->attr_memory && ipw->common_memory)
printk(KERN_INFO IPWIRELESS_PCCARD_NAME
": attr memory 0x%08lx-0x%08lx, common memory 0x%08lx-0x%08lx\n",
@@ -232,8 +228,7 @@ static int config_ipwireless(struct ipw_dev *ipw)
if (!ipw->network)
goto exit;
- ipw->tty = ipwireless_tty_create(ipw->hardware, ipw->network,
- ipw->nodes);
+ ipw->tty = ipwireless_tty_create(ipw->hardware, ipw->network);
if (!ipw->tty)
goto exit;
@@ -248,8 +243,6 @@ static int config_ipwireless(struct ipw_dev *ipw)
if (ret != 0)
goto exit;
- link->dev_node = &ipw->nodes[0];
-
return 0;
exit:
@@ -271,8 +264,6 @@ exit:
static void release_ipwireless(struct ipw_dev *ipw)
{
- pcmcia_disable_device(ipw->link);
-
if (ipw->common_memory) {
release_mem_region(ipw->request_common_memory.Base,
ipw->request_common_memory.Size);
@@ -288,7 +279,6 @@ static void release_ipwireless(struct ipw_dev *ipw)
if (ipw->attr_memory)
pcmcia_release_window(ipw->link, ipw->handle_attr_memory);
- /* Break the link with Card Services */
pcmcia_disable_device(ipw->link);
}
@@ -313,9 +303,6 @@ static int ipwireless_attach(struct pcmcia_device *link)
ipw->link = link;
link->priv = ipw;
- /* Link this device into our device list. */
- link->dev_node = &ipw->nodes[0];
-
ipw->hardware = ipwireless_hardware_create();
if (!ipw->hardware) {
kfree(ipw);
diff --git a/drivers/char/pcmcia/ipwireless/main.h b/drivers/char/pcmcia/ipwireless/main.h
index 0e0363af9ab..96d0ef31b17 100644
--- a/drivers/char/pcmcia/ipwireless/main.h
+++ b/drivers/char/pcmcia/ipwireless/main.h
@@ -54,7 +54,6 @@ struct ipw_dev {
void __iomem *common_memory;
win_req_t request_common_memory;
- dev_node_t nodes[2];
/* Reference to attribute memory, containing CIS data */
void *attribute_memory;
diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
index 2bb7874a689..1a2c2c3b068 100644
--- a/drivers/char/pcmcia/ipwireless/tty.c
+++ b/drivers/char/pcmcia/ipwireless/tty.c
@@ -487,7 +487,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file,
return tty_mode_ioctl(linux_tty, file, cmd , arg);
}
-static int add_tty(dev_node_t *nodesp, int j,
+static int add_tty(int j,
struct ipw_hardware *hardware,
struct ipw_network *network, int channel_idx,
int secondary_channel_idx, int tty_type)
@@ -510,19 +510,13 @@ static int add_tty(dev_node_t *nodesp, int j,
ipwireless_associate_network_tty(network,
secondary_channel_idx,
ttys[j]);
- if (nodesp != NULL) {
- sprintf(nodesp->dev_name, "ttyIPWp%d", j);
- nodesp->major = ipw_tty_driver->major;
- nodesp->minor = j + ipw_tty_driver->minor_start;
- }
if (get_tty(j + ipw_tty_driver->minor_start) == ttys[j])
report_registering(ttys[j]);
return 0;
}
struct ipw_tty *ipwireless_tty_create(struct ipw_hardware *hardware,
- struct ipw_network *network,
- dev_node_t *nodes)
+ struct ipw_network *network)
{
int i, j;
@@ -539,26 +533,23 @@ struct ipw_tty *ipwireless_tty_create(struct ipw_hardware *hardware,
if (allfree) {
j = i;
- if (add_tty(&nodes[0], j, hardware, network,
+ if (add_tty(j, hardware, network,
IPW_CHANNEL_DIALLER, IPW_CHANNEL_RAS,
TTYTYPE_MODEM))
return NULL;
j += IPWIRELESS_PCMCIA_MINOR_RANGE;
- if (add_tty(&nodes[1], j, hardware, network,
+ if (add_tty(j, hardware, network,
IPW_CHANNEL_DIALLER, -1,
TTYTYPE_MONITOR))
return NULL;
j += IPWIRELESS_PCMCIA_MINOR_RANGE;
- if (add_tty(NULL, j, hardware, network,
+ if (add_tty(j, hardware, network,
IPW_CHANNEL_RAS, -1,
TTYTYPE_RAS_RAW))
return NULL;
- nodes[0].next = &nodes[1];
- nodes[1].next = NULL;
-
return ttys[i];
}
}
diff --git a/drivers/char/pcmcia/ipwireless/tty.h b/drivers/char/pcmcia/ipwireless/tty.h
index b0deb9168b6..4da6c201f72 100644
--- a/drivers/char/pcmcia/ipwireless/tty.h
+++ b/drivers/char/pcmcia/ipwireless/tty.h
@@ -34,8 +34,7 @@ int ipwireless_tty_init(void);
void ipwireless_tty_release(void);
struct ipw_tty *ipwireless_tty_create(struct ipw_hardware *hw,
- struct ipw_network *net,
- dev_node_t *nodes);
+ struct ipw_network *net);
void ipwireless_tty_free(struct ipw_tty *tty);
void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
unsigned int length);
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index c31a0d913d3..308903ec8bf 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -220,7 +220,6 @@ typedef struct _mgslpc_info {
/* PCMCIA support */
struct pcmcia_device *p_dev;
- dev_node_t node;
int stop;
/* SPPP/Cisco HDLC device parts */
@@ -552,10 +551,6 @@ static int mgslpc_probe(struct pcmcia_device *link)
/* Initialize the struct pcmcia_device structure */
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = NULL;
-
link->conf.Attributes = 0;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -608,9 +603,7 @@ static int mgslpc_config(struct pcmcia_device *link)
link->conf.ConfigIndex = 8;
link->conf.Present = PRESENT_OPTION;
- link->irq.Handler = mgslpc_isr;
-
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_irq(link, mgslpc_isr);
if (ret)
goto failed;
ret = pcmcia_request_configuration(link, &link->conf);
@@ -618,17 +611,12 @@ static int mgslpc_config(struct pcmcia_device *link)
goto failed;
info->io_base = link->io.BasePort1;
- info->irq_level = link->irq.AssignedIRQ;
-
- /* add to linked list of devices */
- sprintf(info->node.dev_name, "mgslpc0");
- info->node.major = info->node.minor = 0;
- link->dev_node = &info->node;
+ info->irq_level = link->irq;
- printk(KERN_INFO "%s: index 0x%02x:",
- info->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x:",
+ link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %d", link->irq.AssignedIRQ);
+ printk(", irq %d", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1+link->io.NumPorts1-1);
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index fdd37543aa7..02abfddce45 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -287,12 +287,10 @@ static int register_device (int minor, struct pp_struct *pp)
char *name;
int fl;
- name = kmalloc (strlen (CHRDEV) + 3, GFP_KERNEL);
+ name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
if (name == NULL)
return -ENOMEM;
- sprintf (name, CHRDEV "%x", minor);
-
port = parport_find_number (minor);
if (!port) {
printk (KERN_WARNING "%s: no associated port!\n", name);
diff --git a/drivers/char/ps3flash.c b/drivers/char/ps3flash.c
index 606048b72bc..85c004a518e 100644
--- a/drivers/char/ps3flash.c
+++ b/drivers/char/ps3flash.c
@@ -305,8 +305,7 @@ static int ps3flash_flush(struct file *file, fl_owner_t id)
return ps3flash_writeback(ps3flash_dev);
}
-static int ps3flash_fsync(struct file *file, struct dentry *dentry,
- int datasync)
+static int ps3flash_fsync(struct file *file, int datasync)
{
return ps3flash_writeback(ps3flash_dev);
}
diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c
new file mode 100644
index 00000000000..74f00b5ffa3
--- /dev/null
+++ b/drivers/char/ramoops.c
@@ -0,0 +1,162 @@
+/*
+ * RAM Oops/Panic logger
+ *
+ * Copyright (C) 2010 Marco Stornelli <marco.stornelli@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kmsg_dump.h>
+#include <linux/time.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+
+#define RAMOOPS_KERNMSG_HDR "===="
+#define RAMOOPS_HEADER_SIZE (5 + sizeof(struct timeval))
+
+#define RECORD_SIZE 4096
+
+static ulong mem_address;
+module_param(mem_address, ulong, 0400);
+MODULE_PARM_DESC(mem_address,
+ "start of reserved RAM used to store oops/panic logs");
+
+static ulong mem_size;
+module_param(mem_size, ulong, 0400);
+MODULE_PARM_DESC(mem_size,
+ "size of reserved RAM used to store oops/panic logs");
+
+static int dump_oops = 1;
+module_param(dump_oops, int, 0600);
+MODULE_PARM_DESC(dump_oops,
+ "set to 1 to dump oopses, 0 to only dump panics (default 1)");
+
+static struct ramoops_context {
+ struct kmsg_dumper dump;
+ void *virt_addr;
+ phys_addr_t phys_addr;
+ unsigned long size;
+ int count;
+ int max_count;
+} oops_cxt;
+
+static void ramoops_do_dump(struct kmsg_dumper *dumper,
+ enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
+ const char *s2, unsigned long l2)
+{
+ struct ramoops_context *cxt = container_of(dumper,
+ struct ramoops_context, dump);
+ unsigned long s1_start, s2_start;
+ unsigned long l1_cpy, l2_cpy;
+ int res;
+ char *buf;
+ struct timeval timestamp;
+
+ /* Only dump oopses if dump_oops is set */
+ if (reason == KMSG_DUMP_OOPS && !dump_oops)
+ return;
+
+ buf = (char *)(cxt->virt_addr + (cxt->count * RECORD_SIZE));
+ memset(buf, '\0', RECORD_SIZE);
+ res = sprintf(buf, "%s", RAMOOPS_KERNMSG_HDR);
+ buf += res;
+ do_gettimeofday(&timestamp);
+ res = sprintf(buf, "%lu.%lu\n", (long)timestamp.tv_sec, (long)timestamp.tv_usec);
+ buf += res;
+
+ l2_cpy = min(l2, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE));
+ l1_cpy = min(l1, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE) - l2_cpy);
+
+ s2_start = l2 - l2_cpy;
+ s1_start = l1 - l1_cpy;
+
+ memcpy(buf, s1 + s1_start, l1_cpy);
+ memcpy(buf + l1_cpy, s2 + s2_start, l2_cpy);
+
+ cxt->count = (cxt->count + 1) % cxt->max_count;
+}
+
+static int __init ramoops_init(void)
+{
+ struct ramoops_context *cxt = &oops_cxt;
+ int err = -EINVAL;
+
+ if (!mem_size) {
+ printk(KERN_ERR "ramoops: invalid size specification");
+ goto fail3;
+ }
+
+ rounddown_pow_of_two(mem_size);
+
+ if (mem_size < RECORD_SIZE) {
+ printk(KERN_ERR "ramoops: size too small");
+ goto fail3;
+ }
+
+ cxt->max_count = mem_size / RECORD_SIZE;
+ cxt->count = 0;
+ cxt->size = mem_size;
+ cxt->phys_addr = mem_address;
+
+ if (!request_mem_region(cxt->phys_addr, cxt->size, "ramoops")) {
+ printk(KERN_ERR "ramoops: request mem region failed");
+ err = -EINVAL;
+ goto fail3;
+ }
+
+ cxt->virt_addr = ioremap(cxt->phys_addr, cxt->size);
+ if (!cxt->virt_addr) {
+ printk(KERN_ERR "ramoops: ioremap failed");
+ goto fail2;
+ }
+
+ cxt->dump.dump = ramoops_do_dump;
+ err = kmsg_dump_register(&cxt->dump);
+ if (err) {
+ printk(KERN_ERR "ramoops: registering kmsg dumper failed");
+ goto fail1;
+ }
+
+ return 0;
+
+fail1:
+ iounmap(cxt->virt_addr);
+fail2:
+ release_mem_region(cxt->phys_addr, cxt->size);
+fail3:
+ return err;
+}
+
+static void __exit ramoops_exit(void)
+{
+ struct ramoops_context *cxt = &oops_cxt;
+
+ if (kmsg_dump_unregister(&cxt->dump) < 0)
+ printk(KERN_WARNING "ramoops: could not unregister kmsg_dumper");
+
+ iounmap(cxt->virt_addr);
+ release_mem_region(cxt->phys_addr, cxt->size);
+}
+
+
+module_init(ramoops_init);
+module_exit(ramoops_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marco Stornelli <marco.stornelli@gmail.com>");
+MODULE_DESCRIPTION("RAM Oops/Panic logger/driver");
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 2fd3d39995d..8d85587b6d4 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -257,6 +257,7 @@
#define INPUT_POOL_WORDS 128
#define OUTPUT_POOL_WORDS 32
#define SEC_XFER_SIZE 512
+#define EXTRACT_SIZE 10
/*
* The minimum number of bits of entropy before we wake up a read on
@@ -414,7 +415,7 @@ struct entropy_store {
unsigned add_ptr;
int entropy_count;
int input_rotate;
- __u8 *last_data;
+ __u8 last_data[EXTRACT_SIZE];
};
static __u32 input_pool_data[INPUT_POOL_WORDS];
@@ -714,8 +715,6 @@ void add_disk_randomness(struct gendisk *disk)
}
#endif
-#define EXTRACT_SIZE 10
-
/*********************************************************************
*
* Entropy extraction routines
@@ -862,7 +861,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
while (nbytes) {
extract_buf(r, tmp);
- if (r->last_data) {
+ if (fips_enabled) {
spin_lock_irqsave(&r->lock, flags);
if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
panic("Hardware RNG duplicated output!\n");
@@ -951,9 +950,6 @@ static void init_std_data(struct entropy_store *r)
now = ktime_get_real();
mix_pool_bytes(r, &now, sizeof(now));
mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
- /* Enable continuous test in fips mode */
- if (fips_enabled)
- r->last_data = kmalloc(EXTRACT_SIZE, GFP_KERNEL);
}
static int rand_initialize(void)
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 8756ab0daa8..b38942f6bf3 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -121,13 +121,17 @@ static int raw_release(struct inode *inode, struct file *filp)
/*
* Forward ioctls to the underlying block device.
*/
-static int
-raw_ioctl(struct inode *inode, struct file *filp,
- unsigned int command, unsigned long arg)
+static long
+raw_ioctl(struct file *filp, unsigned int command, unsigned long arg)
{
struct block_device *bdev = filp->private_data;
+ int ret;
+
+ lock_kernel();
+ ret = blkdev_ioctl(bdev, 0, command, arg);
+ unlock_kernel();
- return blkdev_ioctl(bdev, 0, command, arg);
+ return ret;
}
static void bind_device(struct raw_config_request *rq)
@@ -141,13 +145,14 @@ static void bind_device(struct raw_config_request *rq)
* Deal with ioctls against the raw-device control interface, to bind
* and unbind other raw devices.
*/
-static int raw_ctl_ioctl(struct inode *inode, struct file *filp,
- unsigned int command, unsigned long arg)
+static long raw_ctl_ioctl(struct file *filp, unsigned int command,
+ unsigned long arg)
{
struct raw_config_request rq;
struct raw_device_data *rawdev;
int err = 0;
+ lock_kernel();
switch (command) {
case RAW_SETBIND:
case RAW_GETBIND:
@@ -240,25 +245,26 @@ static int raw_ctl_ioctl(struct inode *inode, struct file *filp,
break;
}
out:
+ unlock_kernel();
return err;
}
static const struct file_operations raw_fops = {
- .read = do_sync_read,
- .aio_read = generic_file_aio_read,
- .write = do_sync_write,
- .aio_write = blkdev_aio_write,
- .fsync = blkdev_fsync,
- .open = raw_open,
- .release= raw_release,
- .ioctl = raw_ioctl,
- .owner = THIS_MODULE,
+ .read = do_sync_read,
+ .aio_read = generic_file_aio_read,
+ .write = do_sync_write,
+ .aio_write = blkdev_aio_write,
+ .fsync = blkdev_fsync,
+ .open = raw_open,
+ .release = raw_release,
+ .unlocked_ioctl = raw_ioctl,
+ .owner = THIS_MODULE,
};
static const struct file_operations raw_ctl_fops = {
- .ioctl = raw_ctl_ioctl,
- .open = raw_open,
- .owner = THIS_MODULE,
+ .unlocked_ioctl = raw_ctl_ioctl,
+ .open = raw_open,
+ .owner = THIS_MODULE,
};
static struct cdev raw_cdev;
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index 8dfd24721a8..ecbe479c7d6 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -176,23 +176,6 @@ static void config_setup(struct cyclades_port *);
static void show_status(int);
#endif
-#ifdef CONFIG_REMOTE_DEBUG
-static void debug_setup(void);
-void queueDebugChar(int c);
-int getDebugChar(void);
-
-#define DEBUG_PORT 1
-#define DEBUG_LEN 256
-
-typedef struct {
- int in;
- int out;
- unsigned char buf[DEBUG_LEN];
-} debugq;
-
-debugq debugiq;
-#endif
-
/*
* I have my own version of udelay(), as it is needed when initialising
* the chip, before the delay loop has been calibrated. Should probably
@@ -515,11 +498,6 @@ static irqreturn_t cd2401_tx_interrupt(int irq, void *dev_id)
/* determine the channel and change to that context */
channel = (u_short) (base_addr[CyLICR] >> 2);
-#ifdef CONFIG_REMOTE_DEBUG
- if (channel == DEBUG_PORT) {
- panic("TxInt on debug port!!!");
- }
-#endif
/* validate the port number (as configured and open) */
if ((channel < 0) || (NR_PORTS <= channel)) {
base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy);
@@ -627,7 +605,6 @@ static irqreturn_t cd2401_rx_interrupt(int irq, void *dev_id)
char data;
int char_count;
int save_cnt;
- int len;
/* determine the channel and change to that context */
channel = (u_short) (base_addr[CyLICR] >> 2);
@@ -635,14 +612,6 @@ static irqreturn_t cd2401_rx_interrupt(int irq, void *dev_id)
info->last_active = jiffies;
save_cnt = char_count = base_addr[CyRFOC];
-#ifdef CONFIG_REMOTE_DEBUG
- if (channel == DEBUG_PORT) {
- while (char_count--) {
- data = base_addr[CyRDR];
- queueDebugChar(data);
- }
- } else
-#endif
/* if there is nowhere to put the data, discard it */
if (info->tty == 0) {
while (char_count--) {
@@ -1528,7 +1497,6 @@ static int
cy_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
- unsigned long val;
struct cyclades_port *info = tty->driver_data;
int ret_val = 0;
void __user *argp = (void __user *)arg;
@@ -2197,9 +2165,7 @@ static int __init serial167_init(void)
port_num++;
info++;
}
-#ifdef CONFIG_REMOTE_DEBUG
- debug_setup();
-#endif
+
ret = request_irq(MVME167_IRQ_SER_ERR, cd2401_rxerr_interrupt, 0,
"cd2401_errors", cd2401_rxerr_interrupt);
if (ret) {
@@ -2520,193 +2486,4 @@ static int __init serial167_console_init(void)
console_initcall(serial167_console_init);
-#ifdef CONFIG_REMOTE_DEBUG
-void putDebugChar(int c)
-{
- volatile unsigned char *base_addr = (u_char *) BASE_ADDR;
- unsigned long flags;
- volatile u_char sink;
- u_char ier;
- int port;
-
- local_irq_save(flags);
-
- /* Ensure transmitter is enabled! */
-
- port = DEBUG_PORT;
- base_addr[CyCAR] = (u_char) port;
- while (base_addr[CyCCR])
- ;
- base_addr[CyCCR] = CyENB_XMTR;
-
- ier = base_addr[CyIER];
- base_addr[CyIER] = CyTxMpty;
-
- while (1) {
- if (pcc2chip[PccSCCTICR] & 0x20) {
- /* We have a Tx int. Acknowledge it */
- sink = pcc2chip[PccTPIACKR];
- if ((base_addr[CyLICR] >> 2) == port) {
- base_addr[CyTDR] = c;
- base_addr[CyTEOIR] = 0;
- break;
- } else
- base_addr[CyTEOIR] = CyNOTRANS;
- }
- }
-
- base_addr[CyIER] = ier;
-
- local_irq_restore(flags);
-}
-
-int getDebugChar()
-{
- volatile unsigned char *base_addr = (u_char *) BASE_ADDR;
- unsigned long flags;
- volatile u_char sink;
- u_char ier;
- int port;
- int i, c;
-
- i = debugiq.out;
- if (i != debugiq.in) {
- c = debugiq.buf[i];
- if (++i == DEBUG_LEN)
- i = 0;
- debugiq.out = i;
- return c;
- }
- /* OK, nothing in queue, wait in poll loop */
-
- local_irq_save(flags);
-
- /* Ensure receiver is enabled! */
-
- port = DEBUG_PORT;
- base_addr[CyCAR] = (u_char) port;
-#if 0
- while (base_addr[CyCCR])
- ;
- base_addr[CyCCR] = CyENB_RCVR;
-#endif
- ier = base_addr[CyIER];
- base_addr[CyIER] = CyRxData;
-
- while (1) {
- if (pcc2chip[PccSCCRICR] & 0x20) {
- /* We have a Rx int. Acknowledge it */
- sink = pcc2chip[PccRPIACKR];
- if ((base_addr[CyLICR] >> 2) == port) {
- int cnt = base_addr[CyRFOC];
- while (cnt-- > 0) {
- c = base_addr[CyRDR];
- if (c == 0)
- printk
- ("!! debug char is null (cnt=%d) !!",
- cnt);
- else
- queueDebugChar(c);
- }
- base_addr[CyREOIR] = 0;
- i = debugiq.out;
- if (i == debugiq.in)
- panic("Debug input queue empty!");
- c = debugiq.buf[i];
- if (++i == DEBUG_LEN)
- i = 0;
- debugiq.out = i;
- break;
- } else
- base_addr[CyREOIR] = CyNOTRANS;
- }
- }
-
- base_addr[CyIER] = ier;
-
- local_irq_restore(flags);
-
- return (c);
-}
-
-void queueDebugChar(int c)
-{
- int i;
-
- i = debugiq.in;
- debugiq.buf[i] = c;
- if (++i == DEBUG_LEN)
- i = 0;
- if (i != debugiq.out)
- debugiq.in = i;
-}
-
-static void debug_setup()
-{
- unsigned long flags;
- volatile unsigned char *base_addr = (u_char *) BASE_ADDR;
- int i, cflag;
-
- cflag = B19200;
-
- local_irq_save(flags);
-
- for (i = 0; i < 4; i++) {
- base_addr[CyCAR] = i;
- base_addr[CyLICR] = i << 2;
- }
-
- debugiq.in = debugiq.out = 0;
-
- base_addr[CyCAR] = DEBUG_PORT;
-
- /* baud rate */
- i = cflag & CBAUD;
-
- base_addr[CyIER] = 0;
-
- base_addr[CyCMR] = CyASYNC;
- base_addr[CyLICR] = DEBUG_PORT << 2;
- base_addr[CyLIVR] = 0x5c;
-
- /* tx and rx baud rate */
-
- base_addr[CyTCOR] = baud_co[i];
- base_addr[CyTBPR] = baud_bpr[i];
- base_addr[CyRCOR] = baud_co[i] >> 5;
- base_addr[CyRBPR] = baud_bpr[i];
-
- /* set line characteristics according configuration */
-
- base_addr[CySCHR1] = 0;
- base_addr[CySCHR2] = 0;
- base_addr[CySCRL] = 0;
- base_addr[CySCRH] = 0;
- base_addr[CyCOR1] = Cy_8_BITS | CyPARITY_NONE;
- base_addr[CyCOR2] = 0;
- base_addr[CyCOR3] = Cy_1_STOP;
- base_addr[CyCOR4] = baud_cor4[i];
- base_addr[CyCOR5] = 0;
- base_addr[CyCOR6] = 0;
- base_addr[CyCOR7] = 0;
-
- write_cy_cmd(base_addr, CyINIT_CHAN);
- write_cy_cmd(base_addr, CyENB_RCVR);
-
- base_addr[CyCAR] = DEBUG_PORT; /* !!! Is this needed? */
-
- base_addr[CyRTPRL] = 2;
- base_addr[CyRTPRH] = 0;
-
- base_addr[CyMSVR1] = CyRTS;
- base_addr[CyMSVR2] = CyDTR;
-
- base_addr[CyIER] = CyRxData;
-
- local_irq_restore(flags);
-
-} /* debug_setup */
-
-#endif
-
MODULE_LICENSE("GPL");
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 59de2525d30..5d15630a583 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -1,7 +1,4 @@
-/* -*- linux-c -*-
- *
- * $Id: sysrq.c,v 1.15 1998/08/23 14:56:41 mj Exp $
- *
+/*
* Linux Magic System Request Key Hacks
*
* (c) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
@@ -10,8 +7,13 @@
* (c) 2000 Crutcher Dunnavant <crutcher+kernel@datastacks.com>
* overhauled to use key registration
* based upon discusions in irc://irc.openprojects.net/#kernelnewbies
+ *
+ * Copyright (c) 2010 Dmitry Torokhov
+ * Input handler conversion
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
@@ -39,33 +41,34 @@
#include <linux/hrtimer.h>
#include <linux/oom.h>
#include <linux/slab.h>
+#include <linux/input.h>
#include <asm/ptrace.h>
#include <asm/irq_regs.h>
/* Whether we react on sysrq keys or just ignore them */
-int __read_mostly __sysrq_enabled = 1;
-
-static int __read_mostly sysrq_always_enabled;
+static int __read_mostly sysrq_enabled = 1;
+static bool __read_mostly sysrq_always_enabled;
-int sysrq_on(void)
+static bool sysrq_on(void)
{
- return __sysrq_enabled || sysrq_always_enabled;
+ return sysrq_enabled || sysrq_always_enabled;
}
/*
* A value of 1 means 'all', other nonzero values are an op mask:
*/
-static inline int sysrq_on_mask(int mask)
+static bool sysrq_on_mask(int mask)
{
- return sysrq_always_enabled || __sysrq_enabled == 1 ||
- (__sysrq_enabled & mask);
+ return sysrq_always_enabled ||
+ sysrq_enabled == 1 ||
+ (sysrq_enabled & mask);
}
static int __init sysrq_always_enabled_setup(char *str)
{
- sysrq_always_enabled = 1;
- printk(KERN_INFO "debug: sysrq always enabled.\n");
+ sysrq_always_enabled = true;
+ pr_info("sysrq always enabled.\n");
return 1;
}
@@ -76,6 +79,7 @@ __setup("sysrq_always_enabled", sysrq_always_enabled_setup);
static void sysrq_handle_loglevel(int key, struct tty_struct *tty)
{
int i;
+
i = key - '0';
console_loglevel = 7;
printk("Loglevel set to %d\n", i);
@@ -101,7 +105,7 @@ static struct sysrq_key_op sysrq_SAK_op = {
.enable_mask = SYSRQ_ENABLE_KEYBOARD,
};
#else
-#define sysrq_SAK_op (*(struct sysrq_key_op *)0)
+#define sysrq_SAK_op (*(struct sysrq_key_op *)NULL)
#endif
#ifdef CONFIG_VT
@@ -119,7 +123,7 @@ static struct sysrq_key_op sysrq_unraw_op = {
.enable_mask = SYSRQ_ENABLE_KEYBOARD,
};
#else
-#define sysrq_unraw_op (*(struct sysrq_key_op *)0)
+#define sysrq_unraw_op (*(struct sysrq_key_op *)NULL)
#endif /* CONFIG_VT */
static void sysrq_handle_crash(int key, struct tty_struct *tty)
@@ -195,7 +199,7 @@ static struct sysrq_key_op sysrq_showlocks_op = {
.action_msg = "Show Locks Held",
};
#else
-#define sysrq_showlocks_op (*(struct sysrq_key_op *)0)
+#define sysrq_showlocks_op (*(struct sysrq_key_op *)NULL)
#endif
#ifdef CONFIG_SMP
@@ -289,7 +293,7 @@ static struct sysrq_key_op sysrq_showstate_blocked_op = {
static void sysrq_ftrace_dump(int key, struct tty_struct *tty)
{
- ftrace_dump();
+ ftrace_dump(DUMP_ALL);
}
static struct sysrq_key_op sysrq_ftrace_dump_op = {
.handler = sysrq_ftrace_dump,
@@ -298,7 +302,7 @@ static struct sysrq_key_op sysrq_ftrace_dump_op = {
.enable_mask = SYSRQ_ENABLE_DUMP,
};
#else
-#define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)0)
+#define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)NULL)
#endif
static void sysrq_handle_showmem(int key, struct tty_struct *tty)
@@ -477,6 +481,7 @@ struct sysrq_key_op *__sysrq_get_key_op(int key)
i = sysrq_key_table_key2index(key);
if (i != -1)
op_p = sysrq_key_table[i];
+
return op_p;
}
@@ -488,11 +493,7 @@ static void __sysrq_put_key_op(int key, struct sysrq_key_op *op_p)
sysrq_key_table[i] = op_p;
}
-/*
- * This is the non-locking version of handle_sysrq. It must/can only be called
- * by sysrq key handlers, as they are inside of the lock
- */
-void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
+static void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
{
struct sysrq_key_op *op_p;
int orig_log_level;
@@ -544,10 +545,6 @@ void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
}
-/*
- * This function is called by the keyboard handler when SysRq is pressed
- * and any other keycode arrives.
- */
void handle_sysrq(int key, struct tty_struct *tty)
{
if (sysrq_on())
@@ -555,10 +552,177 @@ void handle_sysrq(int key, struct tty_struct *tty)
}
EXPORT_SYMBOL(handle_sysrq);
+#ifdef CONFIG_INPUT
+
+/* Simple translation table for the SysRq keys */
+static const unsigned char sysrq_xlate[KEY_MAX + 1] =
+ "\000\0331234567890-=\177\t" /* 0x00 - 0x0f */
+ "qwertyuiop[]\r\000as" /* 0x10 - 0x1f */
+ "dfghjkl;'`\000\\zxcv" /* 0x20 - 0x2f */
+ "bnm,./\000*\000 \000\201\202\203\204\205" /* 0x30 - 0x3f */
+ "\206\207\210\211\212\000\000789-456+1" /* 0x40 - 0x4f */
+ "230\177\000\000\213\214\000\000\000\000\000\000\000\000\000\000" /* 0x50 - 0x5f */
+ "\r\000/"; /* 0x60 - 0x6f */
+
+static bool sysrq_down;
+static int sysrq_alt_use;
+static int sysrq_alt;
+
+static bool sysrq_filter(struct input_handle *handle, unsigned int type,
+ unsigned int code, int value)
+{
+ if (type != EV_KEY)
+ goto out;
+
+ switch (code) {
+
+ case KEY_LEFTALT:
+ case KEY_RIGHTALT:
+ if (value)
+ sysrq_alt = code;
+ else if (sysrq_down && code == sysrq_alt_use)
+ sysrq_down = false;
+ break;
+
+ case KEY_SYSRQ:
+ if (value == 1 && sysrq_alt) {
+ sysrq_down = true;
+ sysrq_alt_use = sysrq_alt;
+ }
+ break;
+
+ default:
+ if (sysrq_down && value && value != 2)
+ __handle_sysrq(sysrq_xlate[code], NULL, 1);
+ break;
+ }
+
+out:
+ return sysrq_down;
+}
+
+static int sysrq_connect(struct input_handler *handler,
+ struct input_dev *dev,
+ const struct input_device_id *id)
+{
+ struct input_handle *handle;
+ int error;
+
+ sysrq_down = false;
+ sysrq_alt = 0;
+
+ handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ handle->dev = dev;
+ handle->handler = handler;
+ handle->name = "sysrq";
+
+ error = input_register_handle(handle);
+ if (error) {
+ pr_err("Failed to register input sysrq handler, error %d\n",
+ error);
+ goto err_free;
+ }
+
+ error = input_open_device(handle);
+ if (error) {
+ pr_err("Failed to open input device, error %d\n", error);
+ goto err_unregister;
+ }
+
+ return 0;
+
+ err_unregister:
+ input_unregister_handle(handle);
+ err_free:
+ kfree(handle);
+ return error;
+}
+
+static void sysrq_disconnect(struct input_handle *handle)
+{
+ input_close_device(handle);
+ input_unregister_handle(handle);
+ kfree(handle);
+}
+
+/*
+ * We are matching on KEY_LEFTALT insteard of KEY_SYSRQ because not all
+ * keyboards have SysRq ikey predefined and so user may add it to keymap
+ * later, but we expect all such keyboards to have left alt.
+ */
+static const struct input_device_id sysrq_ids[] = {
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+ INPUT_DEVICE_ID_MATCH_KEYBIT,
+ .evbit = { BIT_MASK(EV_KEY) },
+ .keybit = { BIT_MASK(KEY_LEFTALT) },
+ },
+ { },
+};
+
+static struct input_handler sysrq_handler = {
+ .filter = sysrq_filter,
+ .connect = sysrq_connect,
+ .disconnect = sysrq_disconnect,
+ .name = "sysrq",
+ .id_table = sysrq_ids,
+};
+
+static bool sysrq_handler_registered;
+
+static inline void sysrq_register_handler(void)
+{
+ int error;
+
+ error = input_register_handler(&sysrq_handler);
+ if (error)
+ pr_err("Failed to register input handler, error %d", error);
+ else
+ sysrq_handler_registered = true;
+}
+
+static inline void sysrq_unregister_handler(void)
+{
+ if (sysrq_handler_registered) {
+ input_unregister_handler(&sysrq_handler);
+ sysrq_handler_registered = false;
+ }
+}
+
+#else
+
+static inline void sysrq_register_handler(void)
+{
+}
+
+static inline void sysrq_unregister_handler(void)
+{
+}
+
+#endif /* CONFIG_INPUT */
+
+int sysrq_toggle_support(int enable_mask)
+{
+ bool was_enabled = sysrq_on();
+
+ sysrq_enabled = enable_mask;
+
+ if (was_enabled != sysrq_on()) {
+ if (sysrq_on())
+ sysrq_register_handler();
+ else
+ sysrq_unregister_handler();
+ }
+
+ return 0;
+}
+
static int __sysrq_swap_key_ops(int key, struct sysrq_key_op *insert_op_p,
struct sysrq_key_op *remove_op_p)
{
-
int retval;
unsigned long flags;
@@ -599,6 +763,7 @@ static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
return -EFAULT;
__handle_sysrq(c, NULL, 0);
}
+
return count;
}
@@ -606,10 +771,28 @@ static const struct file_operations proc_sysrq_trigger_operations = {
.write = write_sysrq_trigger,
};
+static void sysrq_init_procfs(void)
+{
+ if (!proc_create("sysrq-trigger", S_IWUSR, NULL,
+ &proc_sysrq_trigger_operations))
+ pr_err("Failed to register proc interface\n");
+}
+
+#else
+
+static inline void sysrq_init_procfs(void)
+{
+}
+
+#endif /* CONFIG_PROC_FS */
+
static int __init sysrq_init(void)
{
- proc_create("sysrq-trigger", S_IWUSR, NULL, &proc_sysrq_trigger_operations);
+ sysrq_init_procfs();
+
+ if (sysrq_on())
+ sysrq_register_handler();
+
return 0;
}
module_init(sysrq_init);
-#endif
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index f5fc64f89c5..4dc338f3d1a 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -17,14 +17,16 @@ menuconfig TCG_TPM
obtained at: <http://sourceforge.net/projects/trousers>. To
compile this driver as a module, choose M here; the module
will be called tpm. If unsure, say N.
- Note: For more TPM drivers enable CONFIG_PNP, CONFIG_ACPI
+ Notes:
+ 1) For more TPM drivers enable CONFIG_PNP, CONFIG_ACPI
and CONFIG_PNPACPI.
+ 2) Without ACPI enabled, the BIOS event log won't be accessible,
+ which is required to validate the PCR 0-7 values.
if TCG_TPM
config TCG_TIS
tristate "TPM Interface Specification 1.2 Interface"
- depends on PNP
---help---
If you have a TPM security chip that is compliant with the
TCG TIS 1.2 TPM specification say Yes and it will be accessible
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 068c816e694..05ad4a17a28 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -1068,6 +1068,27 @@ void tpm_remove_hardware(struct device *dev)
}
EXPORT_SYMBOL_GPL(tpm_remove_hardware);
+#define TPM_ORD_SAVESTATE cpu_to_be32(152)
+#define SAVESTATE_RESULT_SIZE 10
+
+static struct tpm_input_header savestate_header = {
+ .tag = TPM_TAG_RQU_COMMAND,
+ .length = cpu_to_be32(10),
+ .ordinal = TPM_ORD_SAVESTATE
+};
+
+/* Bug workaround - some TPM's don't flush the most
+ * recently changed pcr on suspend, so force the flush
+ * with an extend to the selected _unused_ non-volatile pcr.
+ */
+static int tpm_suspend_pcr;
+static int __init tpm_suspend_setup(char *str)
+{
+ get_option(&str, &tpm_suspend_pcr);
+ return 1;
+}
+__setup("tpm_suspend_pcr=", tpm_suspend_setup);
+
/*
* We are about to suspend. Save the TPM state
* so that it can be restored.
@@ -1075,17 +1096,29 @@ EXPORT_SYMBOL_GPL(tpm_remove_hardware);
int tpm_pm_suspend(struct device *dev, pm_message_t pm_state)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
- u8 savestate[] = {
- 0, 193, /* TPM_TAG_RQU_COMMAND */
- 0, 0, 0, 10, /* blob length (in bytes) */
- 0, 0, 0, 152 /* TPM_ORD_SaveState */
- };
+ struct tpm_cmd_t cmd;
+ int rc;
+
+ u8 dummy_hash[TPM_DIGEST_SIZE] = { 0 };
if (chip == NULL)
return -ENODEV;
- tpm_transmit(chip, savestate, sizeof(savestate));
- return 0;
+ /* for buggy tpm, flush pcrs with extend to selected dummy */
+ if (tpm_suspend_pcr) {
+ cmd.header.in = pcrextend_header;
+ cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(tpm_suspend_pcr);
+ memcpy(cmd.params.pcrextend_in.hash, dummy_hash,
+ TPM_DIGEST_SIZE);
+ rc = transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
+ "extending dummy pcr before suspend");
+ }
+
+ /* now do the actual savestate */
+ cmd.header.in = savestate_header;
+ rc = transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE,
+ "sending savestate before suspend");
+ return rc;
}
EXPORT_SYMBOL_GPL(tpm_pm_suspend);
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 94345994f8a..24314a9cffe 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -598,7 +598,7 @@ out_err:
tpm_remove_hardware(chip->dev);
return rc;
}
-
+#ifdef CONFIG_PNP
static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
const struct pnp_device_id *pnp_id)
{
@@ -663,7 +663,7 @@ static struct pnp_driver tis_pnp_driver = {
module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
-
+#endif
static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg)
{
return tpm_pm_suspend(&dev->dev, msg);
@@ -690,21 +690,21 @@ MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
static int __init init_tis(void)
{
int rc;
+#ifdef CONFIG_PNP
+ if (!force)
+ return pnp_register_driver(&tis_pnp_driver);
+#endif
- if (force) {
- rc = platform_driver_register(&tis_drv);
- if (rc < 0)
- return rc;
- if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0)))
- return PTR_ERR(pdev);
- if((rc=tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0)) != 0) {
- platform_device_unregister(pdev);
- platform_driver_unregister(&tis_drv);
- }
+ rc = platform_driver_register(&tis_drv);
+ if (rc < 0)
return rc;
+ if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0)))
+ return PTR_ERR(pdev);
+ if((rc=tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0)) != 0) {
+ platform_device_unregister(pdev);
+ platform_driver_unregister(&tis_drv);
}
-
- return pnp_register_driver(&tis_pnp_driver);
+ return rc;
}
static void __exit cleanup_tis(void)
@@ -728,12 +728,14 @@ static void __exit cleanup_tis(void)
list_del(&i->list);
}
spin_unlock(&tis_lock);
-
- if (force) {
- platform_device_unregister(pdev);
- platform_driver_unregister(&tis_drv);
- } else
+#ifdef CONFIG_PNP
+ if (!force) {
pnp_unregister_driver(&tis_pnp_driver);
+ return;
+ }
+#endif
+ platform_device_unregister(pdev);
+ platform_driver_unregister(&tis_drv);
}
module_init(init_tis);
diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c
index 7ee52164d47..cc1e9850d65 100644
--- a/drivers/char/tty_buffer.c
+++ b/drivers/char/tty_buffer.c
@@ -238,7 +238,7 @@ EXPORT_SYMBOL_GPL(tty_buffer_request_room);
* @size: size
*
* Queue a series of bytes to the tty buffering. All the characters
- * passed are marked as without error. Returns the number added.
+ * passed are marked with the supplied flag. Returns the number added.
*
* Locking: Called functions may take tty->buf.lock
*/
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c
index 1144a04cda6..42f7fa442ff 100644
--- a/drivers/char/viotape.c
+++ b/drivers/char/viotape.c
@@ -866,7 +866,7 @@ static int viotape_probe(struct vio_dev *vdev, const struct vio_device_id *id)
{
int i = vdev->unit_address;
int j;
- struct device_node *node = vdev->dev.archdata.of_node;
+ struct device_node *node = vdev->dev.of_node;
if (i >= VIOTAPE_MAX_TAPE)
return -ENODEV;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 196428c2287..8c99bf1b5e9 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -33,35 +33,6 @@
#include <linux/workqueue.h>
#include "hvc_console.h"
-/* Moved here from .h file in order to disable MULTIPORT. */
-#define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */
-
-struct virtio_console_multiport_conf {
- struct virtio_console_config config;
- /* max. number of ports this device can hold */
- __u32 max_nr_ports;
- /* number of ports added so far */
- __u32 nr_ports;
-} __attribute__((packed));
-
-/*
- * A message that's passed between the Host and the Guest for a
- * particular port.
- */
-struct virtio_console_control {
- __u32 id; /* Port number */
- __u16 event; /* The kind of control event (see below) */
- __u16 value; /* Extra information for the key */
-};
-
-/* Some events for control messages */
-#define VIRTIO_CONSOLE_PORT_READY 0
-#define VIRTIO_CONSOLE_CONSOLE_PORT 1
-#define VIRTIO_CONSOLE_RESIZE 2
-#define VIRTIO_CONSOLE_PORT_OPEN 3
-#define VIRTIO_CONSOLE_PORT_NAME 4
-#define VIRTIO_CONSOLE_PORT_REMOVE 5
-
/*
* This is a global struct for storing common data for all the devices
* this driver handles.
@@ -107,6 +78,9 @@ struct console {
/* The hvc device associated with this console port */
struct hvc_struct *hvc;
+ /* The size of the console */
+ struct winsize ws;
+
/*
* This number identifies the number that we used to register
* with hvc in hvc_instantiate() and hvc_alloc(); this is the
@@ -139,7 +113,6 @@ struct ports_device {
* notification
*/
struct work_struct control_work;
- struct work_struct config_work;
struct list_head ports;
@@ -150,7 +123,7 @@ struct ports_device {
spinlock_t cvq_lock;
/* The current config space is stored here */
- struct virtio_console_multiport_conf config;
+ struct virtio_console_config config;
/* The virtio device we're associated with */
struct virtio_device *vdev;
@@ -189,6 +162,9 @@ struct port {
*/
spinlock_t inbuf_lock;
+ /* Protect the operations on the out_vq. */
+ spinlock_t outvq_lock;
+
/* The IO vqs for this port */
struct virtqueue *in_vq, *out_vq;
@@ -214,6 +190,8 @@ struct port {
/* The 'id' to identify the port with the Host */
u32 id;
+ bool outvq_full;
+
/* Is the host device open */
bool host_connected;
@@ -328,7 +306,7 @@ static void *get_inbuf(struct port *port)
unsigned int len;
vq = port->in_vq;
- buf = vq->vq_ops->get_buf(vq, &len);
+ buf = virtqueue_get_buf(vq, &len);
if (buf) {
buf->len = len;
buf->offset = 0;
@@ -349,8 +327,8 @@ static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
sg_init_one(sg, buf->buf, buf->size);
- ret = vq->vq_ops->add_buf(vq, sg, 0, 1, buf);
- vq->vq_ops->kick(vq);
+ ret = virtqueue_add_buf(vq, sg, 0, 1, buf);
+ virtqueue_kick(vq);
return ret;
}
@@ -366,7 +344,7 @@ static void discard_port_data(struct port *port)
if (port->inbuf)
buf = port->inbuf;
else
- buf = vq->vq_ops->get_buf(vq, &len);
+ buf = virtqueue_get_buf(vq, &len);
ret = 0;
while (buf) {
@@ -374,7 +352,7 @@ static void discard_port_data(struct port *port)
ret++;
free_buf(buf);
}
- buf = vq->vq_ops->get_buf(vq, &len);
+ buf = virtqueue_get_buf(vq, &len);
}
port->inbuf = NULL;
if (ret)
@@ -403,57 +381,96 @@ out:
return ret;
}
-static ssize_t send_control_msg(struct port *port, unsigned int event,
- unsigned int value)
+static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
+ unsigned int event, unsigned int value)
{
struct scatterlist sg[1];
struct virtio_console_control cpkt;
struct virtqueue *vq;
unsigned int len;
- if (!use_multiport(port->portdev))
+ if (!use_multiport(portdev))
return 0;
- cpkt.id = port->id;
+ cpkt.id = port_id;
cpkt.event = event;
cpkt.value = value;
- vq = port->portdev->c_ovq;
+ vq = portdev->c_ovq;
sg_init_one(sg, &cpkt, sizeof(cpkt));
- if (vq->vq_ops->add_buf(vq, sg, 1, 0, &cpkt) >= 0) {
- vq->vq_ops->kick(vq);
- while (!vq->vq_ops->get_buf(vq, &len))
+ if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt) >= 0) {
+ virtqueue_kick(vq);
+ while (!virtqueue_get_buf(vq, &len))
cpu_relax();
}
return 0;
}
-static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count)
+static ssize_t send_control_msg(struct port *port, unsigned int event,
+ unsigned int value)
+{
+ return __send_control_msg(port->portdev, port->id, event, value);
+}
+
+/* Callers must take the port->outvq_lock */
+static void reclaim_consumed_buffers(struct port *port)
+{
+ void *buf;
+ unsigned int len;
+
+ while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
+ kfree(buf);
+ port->outvq_full = false;
+ }
+}
+
+static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
+ bool nonblock)
{
struct scatterlist sg[1];
struct virtqueue *out_vq;
ssize_t ret;
+ unsigned long flags;
unsigned int len;
out_vq = port->out_vq;
+ spin_lock_irqsave(&port->outvq_lock, flags);
+
+ reclaim_consumed_buffers(port);
+
sg_init_one(sg, in_buf, in_count);
- ret = out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, in_buf);
+ ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf);
/* Tell Host to go! */
- out_vq->vq_ops->kick(out_vq);
+ virtqueue_kick(out_vq);
if (ret < 0) {
in_count = 0;
- goto fail;
+ goto done;
}
- /* Wait till the host acknowledges it pushed out the data we sent. */
- while (!out_vq->vq_ops->get_buf(out_vq, &len))
+ if (ret == 0)
+ port->outvq_full = true;
+
+ if (nonblock)
+ goto done;
+
+ /*
+ * Wait till the host acknowledges it pushed out the data we
+ * sent. This is done for ports in blocking mode or for data
+ * from the hvc_console; the tty operations are performed with
+ * spinlocks held so we can't sleep here.
+ */
+ while (!virtqueue_get_buf(out_vq, &len))
cpu_relax();
-fail:
- /* We're expected to return the amount of data we wrote */
+done:
+ spin_unlock_irqrestore(&port->outvq_lock, flags);
+ /*
+ * We're expected to return the amount of data we wrote -- all
+ * of it
+ */
return in_count;
}
@@ -503,9 +520,28 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
}
/* The condition that must be true for polling to end */
-static bool wait_is_over(struct port *port)
+static bool will_read_block(struct port *port)
+{
+ return !port_has_data(port) && port->host_connected;
+}
+
+static bool will_write_block(struct port *port)
{
- return port_has_data(port) || !port->host_connected;
+ bool ret;
+
+ if (!port->host_connected)
+ return true;
+
+ spin_lock_irq(&port->outvq_lock);
+ /*
+ * Check if the Host has consumed any buffers since we last
+ * sent data (this is only applicable for nonblocking ports).
+ */
+ reclaim_consumed_buffers(port);
+ ret = port->outvq_full;
+ spin_unlock_irq(&port->outvq_lock);
+
+ return ret;
}
static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
@@ -528,7 +564,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
return -EAGAIN;
ret = wait_event_interruptible(port->waitqueue,
- wait_is_over(port));
+ !will_read_block(port));
if (ret < 0)
return ret;
}
@@ -554,9 +590,22 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
struct port *port;
char *buf;
ssize_t ret;
+ bool nonblock;
port = filp->private_data;
+ nonblock = filp->f_flags & O_NONBLOCK;
+
+ if (will_write_block(port)) {
+ if (nonblock)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(port->waitqueue,
+ !will_write_block(port));
+ if (ret < 0)
+ return ret;
+ }
+
count = min((size_t)(32 * 1024), count);
buf = kmalloc(count, GFP_KERNEL);
@@ -569,9 +618,14 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
goto free_buf;
}
- ret = send_buf(port, buf, count);
+ ret = send_buf(port, buf, count, nonblock);
+
+ if (nonblock && ret > 0)
+ goto out;
+
free_buf:
kfree(buf);
+out:
return ret;
}
@@ -586,7 +640,7 @@ static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
ret = 0;
if (port->inbuf)
ret |= POLLIN | POLLRDNORM;
- if (port->host_connected)
+ if (!will_write_block(port))
ret |= POLLOUT;
if (!port->host_connected)
ret |= POLLHUP;
@@ -610,6 +664,10 @@ static int port_fops_release(struct inode *inode, struct file *filp)
spin_unlock_irq(&port->inbuf_lock);
+ spin_lock_irq(&port->outvq_lock);
+ reclaim_consumed_buffers(port);
+ spin_unlock_irq(&port->outvq_lock);
+
return 0;
}
@@ -638,6 +696,15 @@ static int port_fops_open(struct inode *inode, struct file *filp)
port->guest_connected = true;
spin_unlock_irq(&port->inbuf_lock);
+ spin_lock_irq(&port->outvq_lock);
+ /*
+ * There might be a chance that we missed reclaiming a few
+ * buffers in the window of the port getting previously closed
+ * and opening now.
+ */
+ reclaim_consumed_buffers(port);
+ spin_unlock_irq(&port->outvq_lock);
+
/* Notify host of port being opened */
send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1);
@@ -676,9 +743,9 @@ static int put_chars(u32 vtermno, const char *buf, int count)
port = find_port_by_vtermno(vtermno);
if (!port)
- return 0;
+ return -EPIPE;
- return send_buf(port, (void *)buf, count);
+ return send_buf(port, (void *)buf, count, false);
}
/*
@@ -692,9 +759,13 @@ static int get_chars(u32 vtermno, char *buf, int count)
{
struct port *port;
+ /* If we've not set up the port yet, we have no input to give. */
+ if (unlikely(early_put_chars))
+ return 0;
+
port = find_port_by_vtermno(vtermno);
if (!port)
- return 0;
+ return -EPIPE;
/* If we don't have an input queue yet, we can't get input. */
BUG_ON(!port->in_vq);
@@ -705,22 +776,14 @@ static int get_chars(u32 vtermno, char *buf, int count)
static void resize_console(struct port *port)
{
struct virtio_device *vdev;
- struct winsize ws;
/* The port could have been hot-unplugged */
- if (!port)
+ if (!port || !is_console_port(port))
return;
vdev = port->portdev->vdev;
- if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) {
- vdev->config->get(vdev,
- offsetof(struct virtio_console_config, cols),
- &ws.ws_col, sizeof(u16));
- vdev->config->get(vdev,
- offsetof(struct virtio_console_config, rows),
- &ws.ws_row, sizeof(u16));
- hvc_resize(port->cons.hvc, ws);
- }
+ if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE))
+ hvc_resize(port->cons.hvc, port->cons.ws);
}
/* We set the configuration at this point, since we now have a tty */
@@ -804,6 +867,13 @@ int init_port_console(struct port *port)
spin_unlock_irq(&pdrvdata_lock);
port->guest_connected = true;
+ /*
+ * Start using the new console output if this is the first
+ * console to come up.
+ */
+ if (early_put_chars)
+ early_put_chars = NULL;
+
/* Notify host of port being opened */
send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
@@ -859,6 +929,8 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf,
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"host_connected: %d\n", port->host_connected);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "outvq_full: %d\n", port->outvq_full);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
"is_console: %s\n",
is_console_port(port) ? "yes" : "no");
out_offset += snprintf(buf + out_offset, out_count - out_offset,
@@ -875,6 +947,153 @@ static const struct file_operations port_debugfs_ops = {
.read = debugfs_read,
};
+static void set_console_size(struct port *port, u16 rows, u16 cols)
+{
+ if (!port || !is_console_port(port))
+ return;
+
+ port->cons.ws.ws_row = rows;
+ port->cons.ws.ws_col = cols;
+}
+
+static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
+{
+ struct port_buffer *buf;
+ unsigned int nr_added_bufs;
+ int ret;
+
+ nr_added_bufs = 0;
+ do {
+ buf = alloc_buf(PAGE_SIZE);
+ if (!buf)
+ break;
+
+ spin_lock_irq(lock);
+ ret = add_inbuf(vq, buf);
+ if (ret < 0) {
+ spin_unlock_irq(lock);
+ free_buf(buf);
+ break;
+ }
+ nr_added_bufs++;
+ spin_unlock_irq(lock);
+ } while (ret > 0);
+
+ return nr_added_bufs;
+}
+
+static int add_port(struct ports_device *portdev, u32 id)
+{
+ char debugfs_name[16];
+ struct port *port;
+ struct port_buffer *buf;
+ dev_t devt;
+ unsigned int nr_added_bufs;
+ int err;
+
+ port = kmalloc(sizeof(*port), GFP_KERNEL);
+ if (!port) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ port->portdev = portdev;
+ port->id = id;
+
+ port->name = NULL;
+ port->inbuf = NULL;
+ port->cons.hvc = NULL;
+
+ port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
+
+ port->host_connected = port->guest_connected = false;
+
+ port->outvq_full = false;
+
+ port->in_vq = portdev->in_vqs[port->id];
+ port->out_vq = portdev->out_vqs[port->id];
+
+ cdev_init(&port->cdev, &port_fops);
+
+ devt = MKDEV(portdev->chr_major, id);
+ err = cdev_add(&port->cdev, devt, 1);
+ if (err < 0) {
+ dev_err(&port->portdev->vdev->dev,
+ "Error %d adding cdev for port %u\n", err, id);
+ goto free_port;
+ }
+ port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev,
+ devt, port, "vport%up%u",
+ port->portdev->drv_index, id);
+ if (IS_ERR(port->dev)) {
+ err = PTR_ERR(port->dev);
+ dev_err(&port->portdev->vdev->dev,
+ "Error %d creating device for port %u\n",
+ err, id);
+ goto free_cdev;
+ }
+
+ spin_lock_init(&port->inbuf_lock);
+ spin_lock_init(&port->outvq_lock);
+ init_waitqueue_head(&port->waitqueue);
+
+ /* Fill the in_vq with buffers so the host can send us data. */
+ nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
+ if (!nr_added_bufs) {
+ dev_err(port->dev, "Error allocating inbufs\n");
+ err = -ENOMEM;
+ goto free_device;
+ }
+
+ /*
+ * If we're not using multiport support, this has to be a console port
+ */
+ if (!use_multiport(port->portdev)) {
+ err = init_port_console(port);
+ if (err)
+ goto free_inbufs;
+ }
+
+ spin_lock_irq(&portdev->ports_lock);
+ list_add_tail(&port->list, &port->portdev->ports);
+ spin_unlock_irq(&portdev->ports_lock);
+
+ /*
+ * Tell the Host we're set so that it can send us various
+ * configuration parameters for this port (eg, port name,
+ * caching, whether this is a console port, etc.)
+ */
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
+
+ if (pdrvdata.debugfs_dir) {
+ /*
+ * Finally, create the debugfs file that we can use to
+ * inspect a port's state at any time
+ */
+ sprintf(debugfs_name, "vport%up%u",
+ port->portdev->drv_index, id);
+ port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
+ pdrvdata.debugfs_dir,
+ port,
+ &port_debugfs_ops);
+ }
+ return 0;
+
+free_inbufs:
+ while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
+ free_buf(buf);
+free_device:
+ device_destroy(pdrvdata.class, port->dev->devt);
+free_cdev:
+ cdev_del(&port->cdev);
+free_port:
+ kfree(port);
+fail:
+ /* The host might want to notify management sw about port add failure */
+ __send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0);
+ return err;
+}
+
/* Remove all port-specific data. */
static int remove_port(struct port *port)
{
@@ -888,7 +1107,18 @@ static int remove_port(struct port *port)
spin_lock_irq(&pdrvdata_lock);
list_del(&port->cons.list);
spin_unlock_irq(&pdrvdata_lock);
+#if 0
+ /*
+ * hvc_remove() not called as removing one hvc port
+ * results in other hvc ports getting frozen.
+ *
+ * Once this is resolved in hvc, this functionality
+ * will be enabled. Till that is done, the -EPIPE
+ * return from get_chars() above will help
+ * hvc_console.c to clean up on ports we remove here.
+ */
hvc_remove(port->cons.hvc);
+#endif
}
if (port->guest_connected)
send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
@@ -900,8 +1130,10 @@ static int remove_port(struct port *port)
/* Remove unused data this port might have received. */
discard_port_data(port);
+ reclaim_consumed_buffers(port);
+
/* Remove buffers we queued up for the Host to send us data in. */
- while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq)))
+ while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
free_buf(buf);
kfree(port->name);
@@ -924,7 +1156,7 @@ static void handle_control_message(struct ports_device *portdev,
cpkt = (struct virtio_console_control *)(buf->buf + buf->offset);
port = find_port_by_id(portdev, cpkt->id);
- if (!port) {
+ if (!port && cpkt->event != VIRTIO_CONSOLE_PORT_ADD) {
/* No valid header at start of buffer. Drop it. */
dev_dbg(&portdev->vdev->dev,
"Invalid index %u in control packet\n", cpkt->id);
@@ -932,6 +1164,24 @@ static void handle_control_message(struct ports_device *portdev,
}
switch (cpkt->event) {
+ case VIRTIO_CONSOLE_PORT_ADD:
+ if (port) {
+ dev_dbg(&portdev->vdev->dev,
+ "Port %u already added\n", port->id);
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
+ break;
+ }
+ if (cpkt->id >= portdev->config.max_nr_ports) {
+ dev_warn(&portdev->vdev->dev,
+ "Request for adding port with out-of-bound id %u, max. supported id: %u\n",
+ cpkt->id, portdev->config.max_nr_ports - 1);
+ break;
+ }
+ add_port(portdev, cpkt->id);
+ break;
+ case VIRTIO_CONSOLE_PORT_REMOVE:
+ remove_port(port);
+ break;
case VIRTIO_CONSOLE_CONSOLE_PORT:
if (!cpkt->value)
break;
@@ -944,15 +1194,34 @@ static void handle_control_message(struct ports_device *portdev,
* have to notify the host first.
*/
break;
- case VIRTIO_CONSOLE_RESIZE:
+ case VIRTIO_CONSOLE_RESIZE: {
+ struct {
+ __u16 rows;
+ __u16 cols;
+ } size;
+
if (!is_console_port(port))
break;
+
+ memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt),
+ sizeof(size));
+ set_console_size(port, size.rows, size.cols);
+
port->cons.hvc->irq_requested = 1;
resize_console(port);
break;
+ }
case VIRTIO_CONSOLE_PORT_OPEN:
port->host_connected = cpkt->value;
wake_up_interruptible(&port->waitqueue);
+ /*
+ * If the host port got closed and the host had any
+ * unconsumed buffers, we'll be able to reclaim them
+ * now.
+ */
+ spin_lock_irq(&port->outvq_lock);
+ reclaim_consumed_buffers(port);
+ spin_unlock_irq(&port->outvq_lock);
break;
case VIRTIO_CONSOLE_PORT_NAME:
/*
@@ -990,32 +1259,6 @@ static void handle_control_message(struct ports_device *portdev,
kobject_uevent(&port->dev->kobj, KOBJ_CHANGE);
}
break;
- case VIRTIO_CONSOLE_PORT_REMOVE:
- /*
- * Hot unplug the port. We don't decrement nr_ports
- * since we don't want to deal with extra complexities
- * of using the lowest-available port id: We can just
- * pick up the nr_ports number as the id and not have
- * userspace send it to us. This helps us in two
- * ways:
- *
- * - We don't need to have a 'port_id' field in the
- * config space when a port is hot-added. This is a
- * good thing as we might queue up multiple hotplug
- * requests issued in our workqueue.
- *
- * - Another way to deal with this would have been to
- * use a bitmap of the active ports and select the
- * lowest non-active port from that map. That
- * bloats the already tight config space and we
- * would end up artificially limiting the
- * max. number of ports to sizeof(bitmap). Right
- * now we can support 2^32 ports (as the port id is
- * stored in a u32 type).
- *
- */
- remove_port(port);
- break;
}
}
@@ -1030,7 +1273,7 @@ static void control_work_handler(struct work_struct *work)
vq = portdev->c_ivq;
spin_lock(&portdev->cvq_lock);
- while ((buf = vq->vq_ops->get_buf(vq, &len))) {
+ while ((buf = virtqueue_get_buf(vq, &len))) {
spin_unlock(&portdev->cvq_lock);
buf->len = len;
@@ -1092,204 +1335,29 @@ static void config_intr(struct virtio_device *vdev)
struct ports_device *portdev;
portdev = vdev->priv;
- if (use_multiport(portdev)) {
- /* Handle port hot-add */
- schedule_work(&portdev->config_work);
- }
- /*
- * We'll use this way of resizing only for legacy support.
- * For newer userspace (VIRTIO_CONSOLE_F_MULTPORT+), use
- * control messages to indicate console size changes so that
- * it can be done per-port
- */
- resize_console(find_port_by_id(portdev, 0));
-}
-
-static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
-{
- struct port_buffer *buf;
- unsigned int nr_added_bufs;
- int ret;
-
- nr_added_bufs = 0;
- do {
- buf = alloc_buf(PAGE_SIZE);
- if (!buf)
- break;
-
- spin_lock_irq(lock);
- ret = add_inbuf(vq, buf);
- if (ret < 0) {
- spin_unlock_irq(lock);
- free_buf(buf);
- break;
- }
- nr_added_bufs++;
- spin_unlock_irq(lock);
- } while (ret > 0);
-
- return nr_added_bufs;
-}
-
-static int add_port(struct ports_device *portdev, u32 id)
-{
- char debugfs_name[16];
- struct port *port;
- struct port_buffer *buf;
- dev_t devt;
- unsigned int nr_added_bufs;
- int err;
-
- port = kmalloc(sizeof(*port), GFP_KERNEL);
- if (!port) {
- err = -ENOMEM;
- goto fail;
- }
-
- port->portdev = portdev;
- port->id = id;
-
- port->name = NULL;
- port->inbuf = NULL;
- port->cons.hvc = NULL;
-
- port->host_connected = port->guest_connected = false;
-
- port->in_vq = portdev->in_vqs[port->id];
- port->out_vq = portdev->out_vqs[port->id];
-
- cdev_init(&port->cdev, &port_fops);
-
- devt = MKDEV(portdev->chr_major, id);
- err = cdev_add(&port->cdev, devt, 1);
- if (err < 0) {
- dev_err(&port->portdev->vdev->dev,
- "Error %d adding cdev for port %u\n", err, id);
- goto free_port;
- }
- port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev,
- devt, port, "vport%up%u",
- port->portdev->drv_index, id);
- if (IS_ERR(port->dev)) {
- err = PTR_ERR(port->dev);
- dev_err(&port->portdev->vdev->dev,
- "Error %d creating device for port %u\n",
- err, id);
- goto free_cdev;
- }
-
- spin_lock_init(&port->inbuf_lock);
- init_waitqueue_head(&port->waitqueue);
-
- /* Fill the in_vq with buffers so the host can send us data. */
- nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
- if (!nr_added_bufs) {
- dev_err(port->dev, "Error allocating inbufs\n");
- err = -ENOMEM;
- goto free_device;
- }
-
- /*
- * If we're not using multiport support, this has to be a console port
- */
- if (!use_multiport(port->portdev)) {
- err = init_port_console(port);
- if (err)
- goto free_inbufs;
- }
-
- spin_lock_irq(&portdev->ports_lock);
- list_add_tail(&port->list, &port->portdev->ports);
- spin_unlock_irq(&portdev->ports_lock);
-
- /*
- * Tell the Host we're set so that it can send us various
- * configuration parameters for this port (eg, port name,
- * caching, whether this is a console port, etc.)
- */
- send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
-
- if (pdrvdata.debugfs_dir) {
- /*
- * Finally, create the debugfs file that we can use to
- * inspect a port's state at any time
- */
- sprintf(debugfs_name, "vport%up%u",
- port->portdev->drv_index, id);
- port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
- pdrvdata.debugfs_dir,
- port,
- &port_debugfs_ops);
- }
- return 0;
-
-free_inbufs:
- while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq)))
- free_buf(buf);
-free_device:
- device_destroy(pdrvdata.class, port->dev->devt);
-free_cdev:
- cdev_del(&port->cdev);
-free_port:
- kfree(port);
-fail:
- return err;
-}
-/*
- * The workhandler for config-space updates.
- *
- * This is called when ports are hot-added.
- */
-static void config_work_handler(struct work_struct *work)
-{
- struct virtio_console_multiport_conf virtconconf;
- struct ports_device *portdev;
- struct virtio_device *vdev;
- int err;
+ if (!use_multiport(portdev)) {
+ struct port *port;
+ u16 rows, cols;
- portdev = container_of(work, struct ports_device, config_work);
+ vdev->config->get(vdev,
+ offsetof(struct virtio_console_config, cols),
+ &cols, sizeof(u16));
+ vdev->config->get(vdev,
+ offsetof(struct virtio_console_config, rows),
+ &rows, sizeof(u16));
- vdev = portdev->vdev;
- vdev->config->get(vdev,
- offsetof(struct virtio_console_multiport_conf,
- nr_ports),
- &virtconconf.nr_ports,
- sizeof(virtconconf.nr_ports));
+ port = find_port_by_id(portdev, 0);
+ set_console_size(port, rows, cols);
- if (portdev->config.nr_ports == virtconconf.nr_ports) {
/*
- * Port 0 got hot-added. Since we already did all the
- * other initialisation for it, just tell the Host
- * that the port is ready if we find the port. In
- * case the port was hot-removed earlier, we call
- * add_port to add the port.
+ * We'll use this way of resizing only for legacy
+ * support. For newer userspace
+ * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages
+ * to indicate console size changes so that it can be
+ * done per-port.
*/
- struct port *port;
-
- port = find_port_by_id(portdev, 0);
- if (!port)
- add_port(portdev, 0);
- else
- send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
- return;
- }
- if (virtconconf.nr_ports > portdev->config.max_nr_ports) {
- dev_warn(&vdev->dev,
- "More ports specified (%u) than allowed (%u)",
- portdev->config.nr_ports + 1,
- portdev->config.max_nr_ports);
- return;
- }
- if (virtconconf.nr_ports < portdev->config.nr_ports)
- return;
-
- /* Hot-add ports */
- while (virtconconf.nr_ports - portdev->config.nr_ports) {
- err = add_port(portdev, portdev->config.nr_ports);
- if (err)
- break;
- portdev->config.nr_ports++;
+ resize_console(port);
}
}
@@ -1414,7 +1482,6 @@ static const struct file_operations portdev_fops = {
static int __devinit virtcons_probe(struct virtio_device *vdev)
{
struct ports_device *portdev;
- u32 i;
int err;
bool multiport;
@@ -1443,37 +1510,19 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
}
multiport = false;
- portdev->config.nr_ports = 1;
portdev->config.max_nr_ports = 1;
-#if 0 /* Multiport is not quite ready yet --RR */
if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) {
multiport = true;
vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT;
- vdev->config->get(vdev,
- offsetof(struct virtio_console_multiport_conf,
- nr_ports),
- &portdev->config.nr_ports,
- sizeof(portdev->config.nr_ports));
- vdev->config->get(vdev,
- offsetof(struct virtio_console_multiport_conf,
- max_nr_ports),
+ vdev->config->get(vdev, offsetof(struct virtio_console_config,
+ max_nr_ports),
&portdev->config.max_nr_ports,
sizeof(portdev->config.max_nr_ports));
- if (portdev->config.nr_ports > portdev->config.max_nr_ports) {
- dev_warn(&vdev->dev,
- "More ports (%u) specified than allowed (%u). Will init %u ports.",
- portdev->config.nr_ports,
- portdev->config.max_nr_ports,
- portdev->config.max_nr_ports);
-
- portdev->config.nr_ports = portdev->config.max_nr_ports;
- }
}
/* Let the Host know we support multiple ports.*/
vdev->config->finalize_features(vdev);
-#endif
err = init_vqs(portdev);
if (err < 0) {
@@ -1489,7 +1538,6 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
spin_lock_init(&portdev->cvq_lock);
INIT_WORK(&portdev->control_work, &control_work_handler);
- INIT_WORK(&portdev->config_work, &config_work_handler);
nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock);
if (!nr_added_bufs) {
@@ -1498,16 +1546,22 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
err = -ENOMEM;
goto free_vqs;
}
+ } else {
+ /*
+ * For backward compatibility: Create a console port
+ * if we're running on older host.
+ */
+ add_port(portdev, 0);
}
- for (i = 0; i < portdev->config.nr_ports; i++)
- add_port(portdev, i);
-
- /* Start using the new console output. */
- early_put_chars = NULL;
+ __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
+ VIRTIO_CONSOLE_DEVICE_READY, 1);
return 0;
free_vqs:
+ /* The host might want to notify mgmt sw about device add failure */
+ __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
+ VIRTIO_CONSOLE_DEVICE_READY, 0);
vdev->config->del_vqs(vdev);
kfree(portdev->in_vqs);
kfree(portdev->out_vqs);
@@ -1529,17 +1583,16 @@ static void virtcons_remove(struct virtio_device *vdev)
portdev = vdev->priv;
cancel_work_sync(&portdev->control_work);
- cancel_work_sync(&portdev->config_work);
list_for_each_entry_safe(port, port2, &portdev->ports, list)
remove_port(port);
unregister_chrdev(portdev->chr_major, "virtio-portsdev");
- while ((buf = portdev->c_ivq->vq_ops->get_buf(portdev->c_ivq, &len)))
+ while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
free_buf(buf);
- while ((buf = portdev->c_ivq->vq_ops->detach_unused_buf(portdev->c_ivq)))
+ while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
free_buf(buf);
vdev->config->del_vqs(vdev);
@@ -1556,6 +1609,7 @@ static struct virtio_device_id id_table[] = {
static unsigned int features[] = {
VIRTIO_CONSOLE_F_SIZE,
+ VIRTIO_CONSOLE_F_MULTIPORT,
};
static struct virtio_driver virtio_console = {
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index bd1d1164fec..7cdb6ee569c 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -3967,13 +3967,9 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op)
font.charcount = op->charcount;
font.height = op->height;
font.width = op->width;
- font.data = kmalloc(size, GFP_KERNEL);
- if (!font.data)
- return -ENOMEM;
- if (copy_from_user(font.data, op->data, size)) {
- kfree(font.data);
- return -EFAULT;
- }
+ font.data = memdup_user(op->data, size);
+ if (IS_ERR(font.data))
+ return PTR_ERR(font.data);
acquire_console_sem();
if (vc->vc_sw->con_font_set)
rc = vc->vc_sw->con_font_set(vc, &font, op->flags);
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 7261b8d9087..ed8a9cec2a0 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -772,18 +772,18 @@ hwicap_of_probe(struct of_device *op, const struct of_device_id *match)
dev_dbg(&op->dev, "hwicap_of_probe(%p, %p)\n", op, match);
- rc = of_address_to_resource(op->node, 0, &res);
+ rc = of_address_to_resource(op->dev.of_node, 0, &res);
if (rc) {
dev_err(&op->dev, "invalid address\n");
return rc;
}
- id = of_get_property(op->node, "port-number", NULL);
+ id = of_get_property(op->dev.of_node, "port-number", NULL);
/* It's most likely that we're using V4, if the family is not
specified */
regs = &v4_config_registers;
- family = of_get_property(op->node, "xlnx,family", NULL);
+ family = of_get_property(op->dev.of_node, "xlnx,family", NULL);
if (family) {
if (!strcmp(family, "virtex2p")) {
@@ -812,13 +812,12 @@ static const struct of_device_id __devinitconst hwicap_of_match[] = {
MODULE_DEVICE_TABLE(of, hwicap_of_match);
static struct of_platform_driver hwicap_of_driver = {
- .owner = THIS_MODULE,
- .name = DRIVER_NAME,
- .match_table = hwicap_of_match,
.probe = hwicap_of_probe,
.remove = __devexit_p(hwicap_of_remove),
.driver = {
.name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = hwicap_of_match,
},
};
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index b314a999aab..d7be69f1315 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -154,14 +154,14 @@ static int __init cs5535_mfgpt_init(void)
if (cs5535_mfgpt_setup_irq(timer, MFGPT_CMP2, &timer_irq)) {
printk(KERN_ERR DRV_NAME ": Could not set up IRQ %d\n",
timer_irq);
- return -EIO;
+ goto err_timer;
}
/* And register it with the kernel */
ret = setup_irq(timer_irq, &mfgptirq);
if (ret) {
printk(KERN_ERR DRV_NAME ": Unable to set up the interrupt.\n");
- goto err;
+ goto err_irq;
}
/* Set the clock scale and enable the event mode for CMP2 */
@@ -184,8 +184,10 @@ static int __init cs5535_mfgpt_init(void)
return 0;
-err:
+err_irq:
cs5535_mfgpt_release_irq(cs5535_event_clock, MFGPT_CMP2, &timer_irq);
+err_timer:
+ cs5535_mfgpt_free_timer(cs5535_event_clock);
printk(KERN_ERR DRV_NAME ": Unable to set up the MFGPT clock source\n");
return -EIO;
}
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 744f748cc84..f6677cb1978 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -150,13 +150,12 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
{
- struct sh_timer_config *cfg = p->pdev->dev.platform_data;
int ret;
/* enable clock */
ret = clk_enable(p->clk);
if (ret) {
- pr_err("sh_cmt: cannot enable clock \"%s\"\n", cfg->clk);
+ dev_err(&p->pdev->dev, "cannot enable clock\n");
return ret;
}
@@ -279,7 +278,7 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
delay = 1;
if (!delay)
- pr_warning("sh_cmt: too long delay\n");
+ dev_warn(&p->pdev->dev, "too long delay\n");
} while (delay);
}
@@ -289,7 +288,7 @@ static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
unsigned long flags;
if (delta > p->max_match_value)
- pr_warning("sh_cmt: delta out of range\n");
+ dev_warn(&p->pdev->dev, "delta out of range\n");
spin_lock_irqsave(&p->lock, flags);
p->next_match_value = delta;
@@ -451,7 +450,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
cs->resume = sh_cmt_clocksource_resume;
cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
- pr_info("sh_cmt: %s used as clock source\n", cs->name);
+ dev_info(&p->pdev->dev, "used as clock source\n");
clocksource_register(cs);
return 0;
}
@@ -497,13 +496,11 @@ static void sh_cmt_clock_event_mode(enum clock_event_mode mode,
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
- pr_info("sh_cmt: %s used for periodic clock events\n",
- ced->name);
+ dev_info(&p->pdev->dev, "used for periodic clock events\n");
sh_cmt_clock_event_start(p, 1);
break;
case CLOCK_EVT_MODE_ONESHOT:
- pr_info("sh_cmt: %s used for oneshot clock events\n",
- ced->name);
+ dev_info(&p->pdev->dev, "used for oneshot clock events\n");
sh_cmt_clock_event_start(p, 0);
break;
case CLOCK_EVT_MODE_SHUTDOWN:
@@ -544,7 +541,7 @@ static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
ced->set_next_event = sh_cmt_clock_event_next;
ced->set_mode = sh_cmt_clock_event_mode;
- pr_info("sh_cmt: %s used for clock events\n", ced->name);
+ dev_info(&p->pdev->dev, "used for clock events\n");
clockevents_register_device(ced);
}
@@ -601,22 +598,27 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
/* map memory, let mapbase point to our channel */
p->mapbase = ioremap_nocache(res->start, resource_size(res));
if (p->mapbase == NULL) {
- pr_err("sh_cmt: failed to remap I/O memory\n");
+ dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
goto err0;
}
/* request irq using setup_irq() (too early for request_irq()) */
- p->irqaction.name = cfg->name;
+ p->irqaction.name = dev_name(&p->pdev->dev);
p->irqaction.handler = sh_cmt_interrupt;
p->irqaction.dev_id = p;
- p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
+ p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
+ IRQF_IRQPOLL | IRQF_NOBALANCING;
/* get hold of clock */
- p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ p->clk = clk_get(&p->pdev->dev, "cmt_fck");
if (IS_ERR(p->clk)) {
- pr_err("sh_cmt: cannot get clock \"%s\"\n", cfg->clk);
- ret = PTR_ERR(p->clk);
- goto err1;
+ dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
+ p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ if (IS_ERR(p->clk)) {
+ dev_err(&p->pdev->dev, "cannot get clock\n");
+ ret = PTR_ERR(p->clk);
+ goto err1;
+ }
}
if (resource_size(res) == 6) {
@@ -629,17 +631,17 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
p->clear_bits = ~0xc000;
}
- ret = sh_cmt_register(p, cfg->name,
+ ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev),
cfg->clockevent_rating,
cfg->clocksource_rating);
if (ret) {
- pr_err("sh_cmt: registration failed\n");
+ dev_err(&p->pdev->dev, "registration failed\n");
goto err1;
}
ret = setup_irq(irq, &p->irqaction);
if (ret) {
- pr_err("sh_cmt: failed to request irq %d\n", irq);
+ dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
goto err1;
}
@@ -654,11 +656,10 @@ err0:
static int __devinit sh_cmt_probe(struct platform_device *pdev)
{
struct sh_cmt_priv *p = platform_get_drvdata(pdev);
- struct sh_timer_config *cfg = pdev->dev.platform_data;
int ret;
if (p) {
- pr_info("sh_cmt: %s kept as earlytimer\n", cfg->name);
+ dev_info(&pdev->dev, "kept as earlytimer\n");
return 0;
}
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 5fb78bfd73b..ef7a5be8a09 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -119,13 +119,12 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
static int sh_mtu2_enable(struct sh_mtu2_priv *p)
{
- struct sh_timer_config *cfg = p->pdev->dev.platform_data;
int ret;
/* enable clock */
ret = clk_enable(p->clk);
if (ret) {
- pr_err("sh_mtu2: cannot enable clock \"%s\"\n", cfg->clk);
+ dev_err(&p->pdev->dev, "cannot enable clock\n");
return ret;
}
@@ -194,8 +193,7 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
- pr_info("sh_mtu2: %s used for periodic clock events\n",
- ced->name);
+ dev_info(&p->pdev->dev, "used for periodic clock events\n");
sh_mtu2_enable(p);
break;
case CLOCK_EVT_MODE_UNUSED:
@@ -222,13 +220,13 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p,
ced->cpumask = cpumask_of(0);
ced->set_mode = sh_mtu2_clock_event_mode;
- pr_info("sh_mtu2: %s used for clock events\n", ced->name);
+ dev_info(&p->pdev->dev, "used for clock events\n");
clockevents_register_device(ced);
ret = setup_irq(p->irqaction.irq, &p->irqaction);
if (ret) {
- pr_err("sh_mtu2: failed to request irq %d\n",
- p->irqaction.irq);
+ dev_err(&p->pdev->dev, "failed to request irq %d\n",
+ p->irqaction.irq);
return;
}
}
@@ -274,26 +272,32 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
/* map memory, let mapbase point to our channel */
p->mapbase = ioremap_nocache(res->start, resource_size(res));
if (p->mapbase == NULL) {
- pr_err("sh_mtu2: failed to remap I/O memory\n");
+ dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
goto err0;
}
/* setup data for setup_irq() (too early for request_irq()) */
- p->irqaction.name = cfg->name;
+ p->irqaction.name = dev_name(&p->pdev->dev);
p->irqaction.handler = sh_mtu2_interrupt;
p->irqaction.dev_id = p;
p->irqaction.irq = irq;
- p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
+ p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
+ IRQF_IRQPOLL | IRQF_NOBALANCING;
/* get hold of clock */
- p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ p->clk = clk_get(&p->pdev->dev, "mtu2_fck");
if (IS_ERR(p->clk)) {
- pr_err("sh_mtu2: cannot get clock \"%s\"\n", cfg->clk);
- ret = PTR_ERR(p->clk);
- goto err1;
+ dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
+ p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ if (IS_ERR(p->clk)) {
+ dev_err(&p->pdev->dev, "cannot get clock\n");
+ ret = PTR_ERR(p->clk);
+ goto err1;
+ }
}
- return sh_mtu2_register(p, cfg->name, cfg->clockevent_rating);
+ return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev),
+ cfg->clockevent_rating);
err1:
iounmap(p->mapbase);
err0:
@@ -303,11 +307,10 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
static int __devinit sh_mtu2_probe(struct platform_device *pdev)
{
struct sh_mtu2_priv *p = platform_get_drvdata(pdev);
- struct sh_timer_config *cfg = pdev->dev.platform_data;
int ret;
if (p) {
- pr_info("sh_mtu2: %s kept as earlytimer\n", cfg->name);
+ dev_info(&pdev->dev, "kept as earlytimer\n");
return 0;
}
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index fc9ff1e5b77..8e44e14ec4c 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -107,13 +107,12 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
static int sh_tmu_enable(struct sh_tmu_priv *p)
{
- struct sh_timer_config *cfg = p->pdev->dev.platform_data;
int ret;
/* enable clock */
ret = clk_enable(p->clk);
if (ret) {
- pr_err("sh_tmu: cannot enable clock \"%s\"\n", cfg->clk);
+ dev_err(&p->pdev->dev, "cannot enable clock\n");
return ret;
}
@@ -229,7 +228,7 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
cs->disable = sh_tmu_clocksource_disable;
cs->mask = CLOCKSOURCE_MASK(32);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
- pr_info("sh_tmu: %s used as clock source\n", cs->name);
+ dev_info(&p->pdev->dev, "used as clock source\n");
clocksource_register(cs);
return 0;
}
@@ -277,13 +276,11 @@ static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
- pr_info("sh_tmu: %s used for periodic clock events\n",
- ced->name);
+ dev_info(&p->pdev->dev, "used for periodic clock events\n");
sh_tmu_clock_event_start(p, 1);
break;
case CLOCK_EVT_MODE_ONESHOT:
- pr_info("sh_tmu: %s used for oneshot clock events\n",
- ced->name);
+ dev_info(&p->pdev->dev, "used for oneshot clock events\n");
sh_tmu_clock_event_start(p, 0);
break;
case CLOCK_EVT_MODE_UNUSED:
@@ -324,13 +321,13 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
ced->set_next_event = sh_tmu_clock_event_next;
ced->set_mode = sh_tmu_clock_event_mode;
- pr_info("sh_tmu: %s used for clock events\n", ced->name);
+ dev_info(&p->pdev->dev, "used for clock events\n");
clockevents_register_device(ced);
ret = setup_irq(p->irqaction.irq, &p->irqaction);
if (ret) {
- pr_err("sh_tmu: failed to request irq %d\n",
- p->irqaction.irq);
+ dev_err(&p->pdev->dev, "failed to request irq %d\n",
+ p->irqaction.irq);
return;
}
}
@@ -379,26 +376,31 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
/* map memory, let mapbase point to our channel */
p->mapbase = ioremap_nocache(res->start, resource_size(res));
if (p->mapbase == NULL) {
- pr_err("sh_tmu: failed to remap I/O memory\n");
+ dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
goto err0;
}
/* setup data for setup_irq() (too early for request_irq()) */
- p->irqaction.name = cfg->name;
+ p->irqaction.name = dev_name(&p->pdev->dev);
p->irqaction.handler = sh_tmu_interrupt;
p->irqaction.dev_id = p;
p->irqaction.irq = irq;
- p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
+ p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
+ IRQF_IRQPOLL | IRQF_NOBALANCING;
/* get hold of clock */
- p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ p->clk = clk_get(&p->pdev->dev, "tmu_fck");
if (IS_ERR(p->clk)) {
- pr_err("sh_tmu: cannot get clock \"%s\"\n", cfg->clk);
- ret = PTR_ERR(p->clk);
- goto err1;
+ dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
+ p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ if (IS_ERR(p->clk)) {
+ dev_err(&p->pdev->dev, "cannot get clock\n");
+ ret = PTR_ERR(p->clk);
+ goto err1;
+ }
}
- return sh_tmu_register(p, cfg->name,
+ return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
cfg->clockevent_rating,
cfg->clocksource_rating);
err1:
@@ -410,11 +412,10 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
static int __devinit sh_tmu_probe(struct platform_device *pdev)
{
struct sh_tmu_priv *p = platform_get_drvdata(pdev);
- struct sh_timer_config *cfg = pdev->dev.platform_data;
int ret;
if (p) {
- pr_info("sh_tmu: %s kept as earlytimer\n", cfg->name);
+ dev_info(&pdev->dev, "kept as earlytimer\n");
return 0;
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 75d293eeb3e..063b2184caf 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -662,32 +662,20 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
}
-#define define_one_ro(_name) \
-static struct freq_attr _name = \
-__ATTR(_name, 0444, show_##_name, NULL)
-
-#define define_one_ro0400(_name) \
-static struct freq_attr _name = \
-__ATTR(_name, 0400, show_##_name, NULL)
-
-#define define_one_rw(_name) \
-static struct freq_attr _name = \
-__ATTR(_name, 0644, show_##_name, store_##_name)
-
-define_one_ro0400(cpuinfo_cur_freq);
-define_one_ro(cpuinfo_min_freq);
-define_one_ro(cpuinfo_max_freq);
-define_one_ro(cpuinfo_transition_latency);
-define_one_ro(scaling_available_governors);
-define_one_ro(scaling_driver);
-define_one_ro(scaling_cur_freq);
-define_one_ro(bios_limit);
-define_one_ro(related_cpus);
-define_one_ro(affected_cpus);
-define_one_rw(scaling_min_freq);
-define_one_rw(scaling_max_freq);
-define_one_rw(scaling_governor);
-define_one_rw(scaling_setspeed);
+cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
+cpufreq_freq_attr_ro(cpuinfo_min_freq);
+cpufreq_freq_attr_ro(cpuinfo_max_freq);
+cpufreq_freq_attr_ro(cpuinfo_transition_latency);
+cpufreq_freq_attr_ro(scaling_available_governors);
+cpufreq_freq_attr_ro(scaling_driver);
+cpufreq_freq_attr_ro(scaling_cur_freq);
+cpufreq_freq_attr_ro(bios_limit);
+cpufreq_freq_attr_ro(related_cpus);
+cpufreq_freq_attr_ro(affected_cpus);
+cpufreq_freq_attr_rw(scaling_min_freq);
+cpufreq_freq_attr_rw(scaling_max_freq);
+cpufreq_freq_attr_rw(scaling_governor);
+cpufreq_freq_attr_rw(scaling_setspeed);
static struct attribute *default_attrs[] = {
&cpuinfo_min_freq.attr,
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 3a147874a46..526bfbf6961 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -178,12 +178,8 @@ static ssize_t show_sampling_rate_min(struct kobject *kobj,
return sprintf(buf, "%u\n", min_sampling_rate);
}
-#define define_one_ro(_name) \
-static struct global_attr _name = \
-__ATTR(_name, 0444, show_##_name, NULL)
-
-define_one_ro(sampling_rate_max);
-define_one_ro(sampling_rate_min);
+define_one_global_ro(sampling_rate_max);
+define_one_global_ro(sampling_rate_min);
/* cpufreq_conservative Governor Tunables */
#define show_one(file_name, object) \
@@ -221,12 +217,8 @@ show_one_old(freq_step);
show_one_old(sampling_rate_min);
show_one_old(sampling_rate_max);
-#define define_one_ro_old(object, _name) \
-static struct freq_attr object = \
-__ATTR(_name, 0444, show_##_name##_old, NULL)
-
-define_one_ro_old(sampling_rate_min_old, sampling_rate_min);
-define_one_ro_old(sampling_rate_max_old, sampling_rate_max);
+cpufreq_freq_attr_ro_old(sampling_rate_min);
+cpufreq_freq_attr_ro_old(sampling_rate_max);
/*** delete after deprecation time ***/
@@ -364,16 +356,12 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
return count;
}
-#define define_one_rw(_name) \
-static struct global_attr _name = \
-__ATTR(_name, 0644, show_##_name, store_##_name)
-
-define_one_rw(sampling_rate);
-define_one_rw(sampling_down_factor);
-define_one_rw(up_threshold);
-define_one_rw(down_threshold);
-define_one_rw(ignore_nice_load);
-define_one_rw(freq_step);
+define_one_global_rw(sampling_rate);
+define_one_global_rw(sampling_down_factor);
+define_one_global_rw(up_threshold);
+define_one_global_rw(down_threshold);
+define_one_global_rw(ignore_nice_load);
+define_one_global_rw(freq_step);
static struct attribute *dbs_attributes[] = {
&sampling_rate_max.attr,
@@ -409,16 +397,12 @@ write_one_old(down_threshold);
write_one_old(ignore_nice_load);
write_one_old(freq_step);
-#define define_one_rw_old(object, _name) \
-static struct freq_attr object = \
-__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old)
-
-define_one_rw_old(sampling_rate_old, sampling_rate);
-define_one_rw_old(sampling_down_factor_old, sampling_down_factor);
-define_one_rw_old(up_threshold_old, up_threshold);
-define_one_rw_old(down_threshold_old, down_threshold);
-define_one_rw_old(ignore_nice_load_old, ignore_nice_load);
-define_one_rw_old(freq_step_old, freq_step);
+cpufreq_freq_attr_rw_old(sampling_rate);
+cpufreq_freq_attr_rw_old(sampling_down_factor);
+cpufreq_freq_attr_rw_old(up_threshold);
+cpufreq_freq_attr_rw_old(down_threshold);
+cpufreq_freq_attr_rw_old(ignore_nice_load);
+cpufreq_freq_attr_rw_old(freq_step);
static struct attribute *dbs_attributes_old[] = {
&sampling_rate_max_old.attr,
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index bd444dc93cf..e1314212d8d 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -73,6 +73,7 @@ enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
struct cpu_dbs_info_s {
cputime64_t prev_cpu_idle;
+ cputime64_t prev_cpu_iowait;
cputime64_t prev_cpu_wall;
cputime64_t prev_cpu_nice;
struct cpufreq_policy *cur_policy;
@@ -108,6 +109,7 @@ static struct dbs_tuners {
unsigned int down_differential;
unsigned int ignore_nice;
unsigned int powersave_bias;
+ unsigned int io_is_busy;
} dbs_tuners_ins = {
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
.down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
@@ -148,6 +150,16 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
return idle_time;
}
+static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall)
+{
+ u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
+
+ if (iowait_time == -1ULL)
+ return 0;
+
+ return iowait_time;
+}
+
/*
* Find right freq to be set now with powersave_bias on.
* Returns the freq_hi to be used right now and will set freq_hi_jiffies,
@@ -234,12 +246,8 @@ static ssize_t show_sampling_rate_min(struct kobject *kobj,
return sprintf(buf, "%u\n", min_sampling_rate);
}
-#define define_one_ro(_name) \
-static struct global_attr _name = \
-__ATTR(_name, 0444, show_##_name, NULL)
-
-define_one_ro(sampling_rate_max);
-define_one_ro(sampling_rate_min);
+define_one_global_ro(sampling_rate_max);
+define_one_global_ro(sampling_rate_min);
/* cpufreq_ondemand Governor Tunables */
#define show_one(file_name, object) \
@@ -249,6 +257,7 @@ static ssize_t show_##file_name \
return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
}
show_one(sampling_rate, sampling_rate);
+show_one(io_is_busy, io_is_busy);
show_one(up_threshold, up_threshold);
show_one(ignore_nice_load, ignore_nice);
show_one(powersave_bias, powersave_bias);
@@ -274,12 +283,8 @@ show_one_old(powersave_bias);
show_one_old(sampling_rate_min);
show_one_old(sampling_rate_max);
-#define define_one_ro_old(object, _name) \
-static struct freq_attr object = \
-__ATTR(_name, 0444, show_##_name##_old, NULL)
-
-define_one_ro_old(sampling_rate_min_old, sampling_rate_min);
-define_one_ro_old(sampling_rate_max_old, sampling_rate_max);
+cpufreq_freq_attr_ro_old(sampling_rate_min);
+cpufreq_freq_attr_ro_old(sampling_rate_max);
/*** delete after deprecation time ***/
@@ -299,6 +304,23 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
return count;
}
+static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ mutex_lock(&dbs_mutex);
+ dbs_tuners_ins.io_is_busy = !!input;
+ mutex_unlock(&dbs_mutex);
+
+ return count;
+}
+
static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
@@ -376,14 +398,11 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
return count;
}
-#define define_one_rw(_name) \
-static struct global_attr _name = \
-__ATTR(_name, 0644, show_##_name, store_##_name)
-
-define_one_rw(sampling_rate);
-define_one_rw(up_threshold);
-define_one_rw(ignore_nice_load);
-define_one_rw(powersave_bias);
+define_one_global_rw(sampling_rate);
+define_one_global_rw(io_is_busy);
+define_one_global_rw(up_threshold);
+define_one_global_rw(ignore_nice_load);
+define_one_global_rw(powersave_bias);
static struct attribute *dbs_attributes[] = {
&sampling_rate_max.attr,
@@ -392,6 +411,7 @@ static struct attribute *dbs_attributes[] = {
&up_threshold.attr,
&ignore_nice_load.attr,
&powersave_bias.attr,
+ &io_is_busy.attr,
NULL
};
@@ -415,14 +435,10 @@ write_one_old(up_threshold);
write_one_old(ignore_nice_load);
write_one_old(powersave_bias);
-#define define_one_rw_old(object, _name) \
-static struct freq_attr object = \
-__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old)
-
-define_one_rw_old(sampling_rate_old, sampling_rate);
-define_one_rw_old(up_threshold_old, up_threshold);
-define_one_rw_old(ignore_nice_load_old, ignore_nice_load);
-define_one_rw_old(powersave_bias_old, powersave_bias);
+cpufreq_freq_attr_rw_old(sampling_rate);
+cpufreq_freq_attr_rw_old(up_threshold);
+cpufreq_freq_attr_rw_old(ignore_nice_load);
+cpufreq_freq_attr_rw_old(powersave_bias);
static struct attribute *dbs_attributes_old[] = {
&sampling_rate_max_old.attr,
@@ -470,14 +486,15 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
- cputime64_t cur_wall_time, cur_idle_time;
- unsigned int idle_time, wall_time;
+ cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
+ unsigned int idle_time, wall_time, iowait_time;
unsigned int load, load_freq;
int freq_avg;
j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
+ cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
wall_time = (unsigned int) cputime64_sub(cur_wall_time,
j_dbs_info->prev_cpu_wall);
@@ -487,6 +504,10 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
j_dbs_info->prev_cpu_idle);
j_dbs_info->prev_cpu_idle = cur_idle_time;
+ iowait_time = (unsigned int) cputime64_sub(cur_iowait_time,
+ j_dbs_info->prev_cpu_iowait);
+ j_dbs_info->prev_cpu_iowait = cur_iowait_time;
+
if (dbs_tuners_ins.ignore_nice) {
cputime64_t cur_nice;
unsigned long cur_nice_jiffies;
@@ -504,6 +525,16 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
idle_time += jiffies_to_usecs(cur_nice_jiffies);
}
+ /*
+ * For the purpose of ondemand, waiting for disk IO is an
+ * indication that you're performance critical, and not that
+ * the system is actually idle. So subtract the iowait time
+ * from the cpu idle time.
+ */
+
+ if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time)
+ idle_time -= iowait_time;
+
if (unlikely(!wall_time || wall_time < idle_time))
continue;
@@ -617,6 +648,29 @@ static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
cancel_delayed_work_sync(&dbs_info->work);
}
+/*
+ * Not all CPUs want IO time to be accounted as busy; this dependson how
+ * efficient idling at a higher frequency/voltage is.
+ * Pavel Machek says this is not so for various generations of AMD and old
+ * Intel systems.
+ * Mike Chan (androidlcom) calis this is also not true for ARM.
+ * Because of this, whitelist specific known (series) of CPUs by default, and
+ * leave all others up to the user.
+ */
+static int should_io_be_busy(void)
+{
+#if defined(CONFIG_X86)
+ /*
+ * For Intel, Core 2 (model 15) andl later have an efficient idle.
+ */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ boot_cpu_data.x86 == 6 &&
+ boot_cpu_data.x86_model >= 15)
+ return 1;
+#endif
+ return 0;
+}
+
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
unsigned int event)
{
@@ -679,6 +733,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
dbs_tuners_ins.sampling_rate =
max(min_sampling_rate,
latency * LATENCY_MULTIPLIER);
+ dbs_tuners_ins.io_is_busy = should_io_be_busy();
}
mutex_unlock(&dbs_mutex);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 12fdd3987a3..199488576a0 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -156,7 +156,7 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
if (dev->enabled)
return 0;
- if (!cpuidle_curr_driver || !cpuidle_curr_governor)
+ if (!cpuidle_get_driver() || !cpuidle_curr_governor)
return -EIO;
if (!dev->state_count)
return -EINVAL;
@@ -207,7 +207,7 @@ void cpuidle_disable_device(struct cpuidle_device *dev)
{
if (!dev->enabled)
return;
- if (!cpuidle_curr_driver || !cpuidle_curr_governor)
+ if (!cpuidle_get_driver() || !cpuidle_curr_governor)
return;
dev->enabled = 0;
@@ -271,10 +271,11 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
{
int ret;
struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
+ struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
if (!sys_dev)
return -EINVAL;
- if (!try_module_get(cpuidle_curr_driver->owner))
+ if (!try_module_get(cpuidle_driver->owner))
return -EINVAL;
init_completion(&dev->kobj_unregister);
@@ -284,7 +285,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
if ((ret = cpuidle_add_sysfs(sys_dev))) {
- module_put(cpuidle_curr_driver->owner);
+ module_put(cpuidle_driver->owner);
return ret;
}
@@ -325,6 +326,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_device);
void cpuidle_unregister_device(struct cpuidle_device *dev)
{
struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
+ struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
if (dev->registered == 0)
return;
@@ -340,7 +342,7 @@ void cpuidle_unregister_device(struct cpuidle_device *dev)
cpuidle_resume_and_unlock();
- module_put(cpuidle_curr_driver->owner);
+ module_put(cpuidle_driver->owner);
}
EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index 9476ba33ee2..33e50d556f1 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -9,7 +9,6 @@
/* For internal use only */
extern struct cpuidle_governor *cpuidle_curr_governor;
-extern struct cpuidle_driver *cpuidle_curr_driver;
extern struct list_head cpuidle_governors;
extern struct list_head cpuidle_detected_devices;
extern struct mutex cpuidle_lock;
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 2257004fe33..fd1601e3d12 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -14,7 +14,7 @@
#include "cpuidle.h"
-struct cpuidle_driver *cpuidle_curr_driver;
+static struct cpuidle_driver *cpuidle_curr_driver;
DEFINE_SPINLOCK(cpuidle_driver_lock);
/**
@@ -40,13 +40,25 @@ int cpuidle_register_driver(struct cpuidle_driver *drv)
EXPORT_SYMBOL_GPL(cpuidle_register_driver);
/**
+ * cpuidle_get_driver - return the current driver
+ */
+struct cpuidle_driver *cpuidle_get_driver(void)
+{
+ return cpuidle_curr_driver;
+}
+EXPORT_SYMBOL_GPL(cpuidle_get_driver);
+
+/**
* cpuidle_unregister_driver - unregisters a driver
* @drv: the driver
*/
void cpuidle_unregister_driver(struct cpuidle_driver *drv)
{
- if (!drv)
+ if (drv != cpuidle_curr_driver) {
+ WARN(1, "invalid cpuidle_unregister_driver(%s)\n",
+ drv->name);
return;
+ }
spin_lock(&cpuidle_driver_lock);
cpuidle_curr_driver = NULL;
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 1c1ceb4f218..12c98900dcf 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -67,7 +67,7 @@ static int ladder_select_state(struct cpuidle_device *dev)
struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
struct ladder_device_state *last_state;
int last_residency, last_idx = ldev->last_state_idx;
- int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
+ int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
/* Special case when user has set very strict latency requirement */
if (unlikely(latency_req == 0)) {
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index f8e57c6303f..52ff8aa63f8 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -21,9 +21,12 @@
#include <linux/math64.h>
#define BUCKETS 12
+#define INTERVALS 8
#define RESOLUTION 1024
-#define DECAY 4
+#define DECAY 8
#define MAX_INTERESTING 50000
+#define STDDEV_THRESH 400
+
/*
* Concepts and ideas behind the menu governor
@@ -64,6 +67,16 @@
* indexed based on the magnitude of the expected duration as well as the
* "is IO outstanding" property.
*
+ * Repeatable-interval-detector
+ * ----------------------------
+ * There are some cases where "next timer" is a completely unusable predictor:
+ * Those cases where the interval is fixed, for example due to hardware
+ * interrupt mitigation, but also due to fixed transfer rate devices such as
+ * mice.
+ * For this, we use a different predictor: We track the duration of the last 8
+ * intervals and if the stand deviation of these 8 intervals is below a
+ * threshold value, we use the average of these intervals as prediction.
+ *
* Limiting Performance Impact
* ---------------------------
* C states, especially those with large exit latencies, can have a real
@@ -104,6 +117,8 @@ struct menu_device {
unsigned int exit_us;
unsigned int bucket;
u64 correction_factor[BUCKETS];
+ u32 intervals[INTERVALS];
+ int interval_ptr;
};
@@ -175,6 +190,42 @@ static u64 div_round64(u64 dividend, u32 divisor)
return div_u64(dividend + (divisor / 2), divisor);
}
+/*
+ * Try detecting repeating patterns by keeping track of the last 8
+ * intervals, and checking if the standard deviation of that set
+ * of points is below a threshold. If it is... then use the
+ * average of these 8 points as the estimated value.
+ */
+static void detect_repeating_patterns(struct menu_device *data)
+{
+ int i;
+ uint64_t avg = 0;
+ uint64_t stddev = 0; /* contains the square of the std deviation */
+
+ /* first calculate average and standard deviation of the past */
+ for (i = 0; i < INTERVALS; i++)
+ avg += data->intervals[i];
+ avg = avg / INTERVALS;
+
+ /* if the avg is beyond the known next tick, it's worthless */
+ if (avg > data->expected_us)
+ return;
+
+ for (i = 0; i < INTERVALS; i++)
+ stddev += (data->intervals[i] - avg) *
+ (data->intervals[i] - avg);
+
+ stddev = stddev / INTERVALS;
+
+ /*
+ * now.. if stddev is small.. then assume we have a
+ * repeating pattern and predict we keep doing this.
+ */
+
+ if (avg && stddev < STDDEV_THRESH)
+ data->predicted_us = avg;
+}
+
/**
* menu_select - selects the next idle state to enter
* @dev: the CPU
@@ -182,7 +233,7 @@ static u64 div_round64(u64 dividend, u32 divisor)
static int menu_select(struct cpuidle_device *dev)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
- int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
+ int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
int i;
int multiplier;
@@ -218,6 +269,8 @@ static int menu_select(struct cpuidle_device *dev)
data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
RESOLUTION * DECAY);
+ detect_repeating_patterns(data);
+
/*
* We want to default to C1 (hlt), not to busy polling
* unless the timer is happening really really soon.
@@ -310,6 +363,11 @@ static void menu_update(struct cpuidle_device *dev)
new_factor = 1;
data->correction_factor[data->bucket] = new_factor;
+
+ /* update the repeating-pattern data */
+ data->intervals[data->interval_ptr++] = last_idle_us;
+ if (data->interval_ptr >= INTERVALS)
+ data->interval_ptr = 0;
}
/**
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 0ba9c8b8ee7..0310ffaec9d 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -47,10 +47,11 @@ static ssize_t show_current_driver(struct sysdev_class *class,
char *buf)
{
ssize_t ret;
+ struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
spin_lock(&cpuidle_driver_lock);
- if (cpuidle_curr_driver)
- ret = sprintf(buf, "%s\n", cpuidle_curr_driver->name);
+ if (cpuidle_driver)
+ ret = sprintf(buf, "%s\n", cpuidle_driver->name);
else
ret = sprintf(buf, "none\n");
spin_unlock(&cpuidle_driver_lock);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index b08403d7d1c..fbf94cf496f 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -170,6 +170,18 @@ config CRYPTO_DEV_MV_CESA
Currently the driver supports AES in ECB and CBC mode without DMA.
+config CRYPTO_DEV_NIAGARA2
+ tristate "Niagara2 Stream Processing Unit driver"
+ select CRYPTO_ALGAPI
+ depends on SPARC64
+ help
+ Each core of a Niagara2 processor contains a Stream
+ Processing Unit, which itself contains several cryptographic
+ sub-units. One set provides the Modular Arithmetic Unit,
+ used for SSL offload. The other set provides the Cipher
+ Group, which can perform encryption, decryption, hashing,
+ checksumming, and raw copies.
+
config CRYPTO_DEV_HIFN_795X
tristate "Driver HIFN 795x crypto accelerator chips"
select CRYPTO_DES
@@ -222,4 +234,13 @@ config CRYPTO_DEV_PPC4XX
help
This option allows you to have support for AMCC crypto acceleration.
+config CRYPTO_DEV_OMAP_SHAM
+ tristate "Support for OMAP SHA1/MD5 hw accelerator"
+ depends on ARCH_OMAP2 || ARCH_OMAP3
+ select CRYPTO_SHA1
+ select CRYPTO_MD5
+ help
+ OMAP processors have SHA1/MD5 hw accelerator. Select this if you
+ want to use the OMAP module for SHA1/MD5 algorithms.
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 6ffcb3f7f94..6dbbe00c452 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -1,8 +1,12 @@
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
+obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
+n2_crypto-objs := n2_core.o n2_asm.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
+obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
+
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 6c4c8b7ce3a..9d65b371de6 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1281,8 +1281,11 @@ static const struct of_device_id crypto4xx_match[] = {
};
static struct of_platform_driver crypto4xx_driver = {
- .name = "crypto4xx",
- .match_table = crypto4xx_match,
+ .driver = {
+ .name = "crypto4xx",
+ .owner = THIS_MODULE,
+ .of_match_table = crypto4xx_match,
+ },
.probe = crypto4xx_probe,
.remove = crypto4xx_remove,
};
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index c7a5a43ba69..09389dd2f96 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -15,14 +15,14 @@
#include <crypto/algapi.h>
#include <crypto/aes.h>
-#include <asm/io.h>
-#include <asm/delay.h>
+#include <linux/io.h>
+#include <linux/delay.h>
#include "geode-aes.h"
/* Static structures */
-static void __iomem * _iobase;
+static void __iomem *_iobase;
static spinlock_t lock;
/* Write a 128 bit field (either a writable key or IV) */
@@ -30,7 +30,7 @@ static inline void
_writefield(u32 offset, void *value)
{
int i;
- for(i = 0; i < 4; i++)
+ for (i = 0; i < 4; i++)
iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
}
@@ -39,7 +39,7 @@ static inline void
_readfield(u32 offset, void *value)
{
int i;
- for(i = 0; i < 4; i++)
+ for (i = 0; i < 4; i++)
((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
}
@@ -59,7 +59,7 @@ do_crypt(void *src, void *dst, int len, u32 flags)
do {
status = ioread32(_iobase + AES_INTR_REG);
cpu_relax();
- } while(!(status & AES_INTRA_PENDING) && --counter);
+ } while (!(status & AES_INTRA_PENDING) && --counter);
/* Clear the event */
iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
@@ -317,7 +317,7 @@ geode_cbc_decrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_virt(desc, &walk);
op->iv = walk.iv;
- while((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes)) {
op->src = walk.src.virt.addr,
op->dst = walk.dst.virt.addr;
op->mode = AES_MODE_CBC;
@@ -349,7 +349,7 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_virt(desc, &walk);
op->iv = walk.iv;
- while((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes)) {
op->src = walk.src.virt.addr,
op->dst = walk.dst.virt.addr;
op->mode = AES_MODE_CBC;
@@ -429,7 +429,7 @@ geode_ecb_decrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- while((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes)) {
op->src = walk.src.virt.addr,
op->dst = walk.dst.virt.addr;
op->mode = AES_MODE_ECB;
@@ -459,7 +459,7 @@ geode_ecb_encrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- while((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes)) {
op->src = walk.src.virt.addr,
op->dst = walk.dst.virt.addr;
op->mode = AES_MODE_ECB;
@@ -518,11 +518,12 @@ static int __devinit
geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int ret;
-
- if ((ret = pci_enable_device(dev)))
+ ret = pci_enable_device(dev);
+ if (ret)
return ret;
- if ((ret = pci_request_regions(dev, "geode-aes")))
+ ret = pci_request_regions(dev, "geode-aes");
+ if (ret)
goto eenable;
_iobase = pci_iomap(dev, 0, 0);
@@ -537,13 +538,16 @@ geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* Clear any pending activity */
iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
- if ((ret = crypto_register_alg(&geode_alg)))
+ ret = crypto_register_alg(&geode_alg);
+ if (ret)
goto eiomap;
- if ((ret = crypto_register_alg(&geode_ecb_alg)))
+ ret = crypto_register_alg(&geode_ecb_alg);
+ if (ret)
goto ealg;
- if ((ret = crypto_register_alg(&geode_cbc_alg)))
+ ret = crypto_register_alg(&geode_cbc_alg);
+ if (ret)
goto eecb;
printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n");
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 73e8b1713b5..16fce3aadf4 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -638,7 +638,7 @@ struct hifn_crypto_alg
#define ASYNC_FLAGS_MISALIGNED (1<<0)
-struct ablkcipher_walk
+struct hifn_cipher_walk
{
struct scatterlist cache[ASYNC_SCATTERLIST_CACHE];
u32 flags;
@@ -657,7 +657,7 @@ struct hifn_request_context
u8 *iv;
unsigned int ivsize;
u8 op, type, mode, unused;
- struct ablkcipher_walk walk;
+ struct hifn_cipher_walk walk;
};
#define crypto_alg_to_hifn(a) container_of(a, struct hifn_crypto_alg, alg)
@@ -1417,7 +1417,7 @@ static int hifn_setup_dma(struct hifn_device *dev,
return 0;
}
-static int ablkcipher_walk_init(struct ablkcipher_walk *w,
+static int hifn_cipher_walk_init(struct hifn_cipher_walk *w,
int num, gfp_t gfp_flags)
{
int i;
@@ -1442,7 +1442,7 @@ static int ablkcipher_walk_init(struct ablkcipher_walk *w,
return i;
}
-static void ablkcipher_walk_exit(struct ablkcipher_walk *w)
+static void hifn_cipher_walk_exit(struct hifn_cipher_walk *w)
{
int i;
@@ -1486,8 +1486,8 @@ static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst,
return idx;
}
-static int ablkcipher_walk(struct ablkcipher_request *req,
- struct ablkcipher_walk *w)
+static int hifn_cipher_walk(struct ablkcipher_request *req,
+ struct hifn_cipher_walk *w)
{
struct scatterlist *dst, *t;
unsigned int nbytes = req->nbytes, offset, copy, diff;
@@ -1600,12 +1600,12 @@ static int hifn_setup_session(struct ablkcipher_request *req)
}
if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
- err = ablkcipher_walk_init(&rctx->walk, idx, GFP_ATOMIC);
+ err = hifn_cipher_walk_init(&rctx->walk, idx, GFP_ATOMIC);
if (err < 0)
return err;
}
- sg_num = ablkcipher_walk(req, &rctx->walk);
+ sg_num = hifn_cipher_walk(req, &rctx->walk);
if (sg_num < 0) {
err = sg_num;
goto err_out_exit;
@@ -1806,7 +1806,7 @@ static void hifn_process_ready(struct ablkcipher_request *req, int error)
kunmap_atomic(saddr, KM_SOFTIRQ0);
}
- ablkcipher_walk_exit(&rctx->walk);
+ hifn_cipher_walk_exit(&rctx->walk);
}
req->base.complete(&req->base, error);
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 6f29012bcc4..e095422b58d 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -15,8 +15,14 @@
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
#include "mv_cesa.h"
+
+#define MV_CESA "MV-CESA:"
+#define MAX_HW_HASH_SIZE 0xFFFF
+
/*
* STM:
* /---------------------------------------\
@@ -39,10 +45,12 @@ enum engine_status {
* @dst_sg_it: sg iterator for dst
* @sg_src_left: bytes left in src to process (scatter list)
* @src_start: offset to add to src start position (scatter list)
- * @crypt_len: length of current crypt process
+ * @crypt_len: length of current hw crypt/hash process
+ * @hw_nbytes: total bytes to process in hw for this request
+ * @copy_back: whether to copy data back (crypt) or not (hash)
* @sg_dst_left: bytes left dst to process in this scatter list
* @dst_start: offset to add to dst start position (scatter list)
- * @total_req_bytes: total number of bytes processed (request).
+ * @hw_processed_bytes: number of bytes processed by hw (request).
*
* sg helper are used to iterate over the scatterlist. Since the size of the
* SRAM may be less than the scatter size, this struct struct is used to keep
@@ -51,15 +59,19 @@ enum engine_status {
struct req_progress {
struct sg_mapping_iter src_sg_it;
struct sg_mapping_iter dst_sg_it;
+ void (*complete) (void);
+ void (*process) (int is_first);
/* src mostly */
int sg_src_left;
int src_start;
int crypt_len;
+ int hw_nbytes;
/* dst mostly */
+ int copy_back;
int sg_dst_left;
int dst_start;
- int total_req_bytes;
+ int hw_processed_bytes;
};
struct crypto_priv {
@@ -72,10 +84,12 @@ struct crypto_priv {
spinlock_t lock;
struct crypto_queue queue;
enum engine_status eng_st;
- struct ablkcipher_request *cur_req;
+ struct crypto_async_request *cur_req;
struct req_progress p;
int max_req_size;
int sram_size;
+ int has_sha1;
+ int has_hmac_sha1;
};
static struct crypto_priv *cpg;
@@ -97,6 +111,31 @@ struct mv_req_ctx {
int decrypt;
};
+enum hash_op {
+ COP_SHA1,
+ COP_HMAC_SHA1
+};
+
+struct mv_tfm_hash_ctx {
+ struct crypto_shash *fallback;
+ struct crypto_shash *base_hash;
+ u32 ivs[2 * SHA1_DIGEST_SIZE / 4];
+ int count_add;
+ enum hash_op op;
+};
+
+struct mv_req_hash_ctx {
+ u64 count;
+ u32 state[SHA1_DIGEST_SIZE / 4];
+ u8 buffer[SHA1_BLOCK_SIZE];
+ int first_hash; /* marks that we don't have previous state */
+ int last_chunk; /* marks that this is the 'final' request */
+ int extra_bytes; /* unprocessed bytes in buffer */
+ enum hash_op op;
+ int count_add;
+ struct scatterlist dummysg;
+};
+
static void compute_aes_dec_key(struct mv_ctx *ctx)
{
struct crypto_aes_ctx gen_aes_key;
@@ -144,32 +183,51 @@ static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static void setup_data_in(struct ablkcipher_request *req)
+static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
{
int ret;
- void *buf;
+ void *sbuf;
+ int copied = 0;
- if (!cpg->p.sg_src_left) {
- ret = sg_miter_next(&cpg->p.src_sg_it);
- BUG_ON(!ret);
- cpg->p.sg_src_left = cpg->p.src_sg_it.length;
- cpg->p.src_start = 0;
- }
-
- cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size);
-
- buf = cpg->p.src_sg_it.addr;
- buf += cpg->p.src_start;
+ while (1) {
+ if (!p->sg_src_left) {
+ ret = sg_miter_next(&p->src_sg_it);
+ BUG_ON(!ret);
+ p->sg_src_left = p->src_sg_it.length;
+ p->src_start = 0;
+ }
- memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len);
+ sbuf = p->src_sg_it.addr + p->src_start;
+
+ if (p->sg_src_left <= len - copied) {
+ memcpy(dbuf + copied, sbuf, p->sg_src_left);
+ copied += p->sg_src_left;
+ p->sg_src_left = 0;
+ if (copied >= len)
+ break;
+ } else {
+ int copy_len = len - copied;
+ memcpy(dbuf + copied, sbuf, copy_len);
+ p->src_start += copy_len;
+ p->sg_src_left -= copy_len;
+ break;
+ }
+ }
+}
- cpg->p.sg_src_left -= cpg->p.crypt_len;
- cpg->p.src_start += cpg->p.crypt_len;
+static void setup_data_in(void)
+{
+ struct req_progress *p = &cpg->p;
+ int data_in_sram =
+ min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
+ copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len,
+ data_in_sram - p->crypt_len);
+ p->crypt_len = data_in_sram;
}
static void mv_process_current_q(int first_block)
{
- struct ablkcipher_request *req = cpg->cur_req;
+ struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
struct sec_accel_config op;
@@ -179,6 +237,7 @@ static void mv_process_current_q(int first_block)
op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
break;
case COP_AES_CBC:
+ default:
op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
@@ -211,7 +270,7 @@ static void mv_process_current_q(int first_block)
ENC_P_DST(SRAM_DATA_OUT_START);
op.enc_key_p = SRAM_DATA_KEY_P;
- setup_data_in(req);
+ setup_data_in();
op.enc_len = cpg->p.crypt_len;
memcpy(cpg->sram + SRAM_CONFIG, &op,
sizeof(struct sec_accel_config));
@@ -228,91 +287,294 @@ static void mv_process_current_q(int first_block)
static void mv_crypto_algo_completion(void)
{
- struct ablkcipher_request *req = cpg->cur_req;
+ struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+ sg_miter_stop(&cpg->p.src_sg_it);
+ sg_miter_stop(&cpg->p.dst_sg_it);
+
if (req_ctx->op != COP_AES_CBC)
return ;
memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
}
+static void mv_process_hash_current(int first_block)
+{
+ struct ahash_request *req = ahash_request_cast(cpg->cur_req);
+ struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
+ struct req_progress *p = &cpg->p;
+ struct sec_accel_config op = { 0 };
+ int is_last;
+
+ switch (req_ctx->op) {
+ case COP_SHA1:
+ default:
+ op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1;
+ break;
+ case COP_HMAC_SHA1:
+ op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
+ break;
+ }
+
+ op.mac_src_p =
+ MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32)
+ req_ctx->
+ count);
+
+ setup_data_in();
+
+ op.mac_digest =
+ MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len);
+ op.mac_iv =
+ MAC_INNER_IV_P(SRAM_HMAC_IV_IN) |
+ MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT);
+
+ is_last = req_ctx->last_chunk
+ && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes)
+ && (req_ctx->count <= MAX_HW_HASH_SIZE);
+ if (req_ctx->first_hash) {
+ if (is_last)
+ op.config |= CFG_NOT_FRAG;
+ else
+ op.config |= CFG_FIRST_FRAG;
+
+ req_ctx->first_hash = 0;
+ } else {
+ if (is_last)
+ op.config |= CFG_LAST_FRAG;
+ else
+ op.config |= CFG_MID_FRAG;
+ }
+
+ memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
+
+ writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
+ /* GO */
+ writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
+
+ /*
+ * XXX: add timer if the interrupt does not occur for some mystery
+ * reason
+ */
+}
+
+static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
+ struct shash_desc *desc)
+{
+ int i;
+ struct sha1_state shash_state;
+
+ shash_state.count = ctx->count + ctx->count_add;
+ for (i = 0; i < 5; i++)
+ shash_state.state[i] = ctx->state[i];
+ memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer));
+ return crypto_shash_import(desc, &shash_state);
+}
+
+static int mv_hash_final_fallback(struct ahash_request *req)
+{
+ const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(tfm_ctx->fallback)];
+ } desc;
+ int rc;
+
+ desc.shash.tfm = tfm_ctx->fallback;
+ desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ if (unlikely(req_ctx->first_hash)) {
+ crypto_shash_init(&desc.shash);
+ crypto_shash_update(&desc.shash, req_ctx->buffer,
+ req_ctx->extra_bytes);
+ } else {
+ /* only SHA1 for now....
+ */
+ rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash);
+ if (rc)
+ goto out;
+ }
+ rc = crypto_shash_final(&desc.shash, req->result);
+out:
+ return rc;
+}
+
+static void mv_hash_algo_completion(void)
+{
+ struct ahash_request *req = ahash_request_cast(cpg->cur_req);
+ struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
+
+ if (ctx->extra_bytes)
+ copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
+ sg_miter_stop(&cpg->p.src_sg_it);
+
+ ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
+ ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
+ ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
+ ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
+ ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
+
+ if (likely(ctx->last_chunk)) {
+ if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
+ memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
+ crypto_ahash_digestsize(crypto_ahash_reqtfm
+ (req)));
+ } else
+ mv_hash_final_fallback(req);
+ }
+}
+
static void dequeue_complete_req(void)
{
- struct ablkcipher_request *req = cpg->cur_req;
+ struct crypto_async_request *req = cpg->cur_req;
void *buf;
int ret;
+ cpg->p.hw_processed_bytes += cpg->p.crypt_len;
+ if (cpg->p.copy_back) {
+ int need_copy_len = cpg->p.crypt_len;
+ int sram_offset = 0;
+ do {
+ int dst_copy;
+
+ if (!cpg->p.sg_dst_left) {
+ ret = sg_miter_next(&cpg->p.dst_sg_it);
+ BUG_ON(!ret);
+ cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
+ cpg->p.dst_start = 0;
+ }
- cpg->p.total_req_bytes += cpg->p.crypt_len;
- do {
- int dst_copy;
-
- if (!cpg->p.sg_dst_left) {
- ret = sg_miter_next(&cpg->p.dst_sg_it);
- BUG_ON(!ret);
- cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
- cpg->p.dst_start = 0;
- }
-
- buf = cpg->p.dst_sg_it.addr;
- buf += cpg->p.dst_start;
+ buf = cpg->p.dst_sg_it.addr;
+ buf += cpg->p.dst_start;
- dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left);
+ dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
- memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy);
+ memcpy(buf,
+ cpg->sram + SRAM_DATA_OUT_START + sram_offset,
+ dst_copy);
+ sram_offset += dst_copy;
+ cpg->p.sg_dst_left -= dst_copy;
+ need_copy_len -= dst_copy;
+ cpg->p.dst_start += dst_copy;
+ } while (need_copy_len > 0);
+ }
- cpg->p.sg_dst_left -= dst_copy;
- cpg->p.crypt_len -= dst_copy;
- cpg->p.dst_start += dst_copy;
- } while (cpg->p.crypt_len > 0);
+ cpg->p.crypt_len = 0;
BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
- if (cpg->p.total_req_bytes < req->nbytes) {
+ if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
/* process next scatter list entry */
cpg->eng_st = ENGINE_BUSY;
- mv_process_current_q(0);
+ cpg->p.process(0);
} else {
- sg_miter_stop(&cpg->p.src_sg_it);
- sg_miter_stop(&cpg->p.dst_sg_it);
- mv_crypto_algo_completion();
+ cpg->p.complete();
cpg->eng_st = ENGINE_IDLE;
- req->base.complete(&req->base, 0);
+ local_bh_disable();
+ req->complete(req, 0);
+ local_bh_enable();
}
}
static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
{
int i = 0;
-
- do {
- total_bytes -= sl[i].length;
- i++;
-
- } while (total_bytes > 0);
+ size_t cur_len;
+
+ while (1) {
+ cur_len = sl[i].length;
+ ++i;
+ if (total_bytes > cur_len)
+ total_bytes -= cur_len;
+ else
+ break;
+ }
return i;
}
-static void mv_enqueue_new_req(struct ablkcipher_request *req)
+static void mv_start_new_crypt_req(struct ablkcipher_request *req)
{
+ struct req_progress *p = &cpg->p;
int num_sgs;
- cpg->cur_req = req;
- memset(&cpg->p, 0, sizeof(struct req_progress));
+ cpg->cur_req = &req->base;
+ memset(p, 0, sizeof(struct req_progress));
+ p->hw_nbytes = req->nbytes;
+ p->complete = mv_crypto_algo_completion;
+ p->process = mv_process_current_q;
+ p->copy_back = 1;
num_sgs = count_sgs(req->src, req->nbytes);
- sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
+ sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
num_sgs = count_sgs(req->dst, req->nbytes);
- sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
+ sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
+
mv_process_current_q(1);
}
+static void mv_start_new_hash_req(struct ahash_request *req)
+{
+ struct req_progress *p = &cpg->p;
+ struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
+ const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+ int num_sgs, hw_bytes, old_extra_bytes, rc;
+ cpg->cur_req = &req->base;
+ memset(p, 0, sizeof(struct req_progress));
+ hw_bytes = req->nbytes + ctx->extra_bytes;
+ old_extra_bytes = ctx->extra_bytes;
+
+ if (unlikely(ctx->extra_bytes)) {
+ memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
+ ctx->extra_bytes);
+ p->crypt_len = ctx->extra_bytes;
+ }
+
+ memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
+
+ if (unlikely(!ctx->first_hash)) {
+ writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
+ writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
+ writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
+ writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
+ writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
+ }
+
+ ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
+ if (ctx->extra_bytes != 0
+ && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
+ hw_bytes -= ctx->extra_bytes;
+ else
+ ctx->extra_bytes = 0;
+
+ num_sgs = count_sgs(req->src, req->nbytes);
+ sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
+
+ if (hw_bytes) {
+ p->hw_nbytes = hw_bytes;
+ p->complete = mv_hash_algo_completion;
+ p->process = mv_process_hash_current;
+
+ mv_process_hash_current(1);
+ } else {
+ copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
+ ctx->extra_bytes - old_extra_bytes);
+ sg_miter_stop(&p->src_sg_it);
+ if (ctx->last_chunk)
+ rc = mv_hash_final_fallback(req);
+ else
+ rc = 0;
+ cpg->eng_st = ENGINE_IDLE;
+ local_bh_disable();
+ req->base.complete(&req->base, rc);
+ local_bh_enable();
+ }
+}
+
static int queue_manag(void *data)
{
cpg->eng_st = ENGINE_IDLE;
do {
- struct ablkcipher_request *req;
struct crypto_async_request *async_req = NULL;
struct crypto_async_request *backlog;
@@ -338,9 +600,18 @@ static int queue_manag(void *data)
}
if (async_req) {
- req = container_of(async_req,
- struct ablkcipher_request, base);
- mv_enqueue_new_req(req);
+ if (async_req->tfm->__crt_alg->cra_type !=
+ &crypto_ahash_type) {
+ struct ablkcipher_request *req =
+ container_of(async_req,
+ struct ablkcipher_request,
+ base);
+ mv_start_new_crypt_req(req);
+ } else {
+ struct ahash_request *req =
+ ahash_request_cast(async_req);
+ mv_start_new_hash_req(req);
+ }
async_req = NULL;
}
@@ -350,13 +621,13 @@ static int queue_manag(void *data)
return 0;
}
-static int mv_handle_req(struct ablkcipher_request *req)
+static int mv_handle_req(struct crypto_async_request *req)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&cpg->lock, flags);
- ret = ablkcipher_enqueue_request(&cpg->queue, req);
+ ret = crypto_enqueue_request(&cpg->queue, req);
spin_unlock_irqrestore(&cpg->lock, flags);
wake_up_process(cpg->queue_th);
return ret;
@@ -369,7 +640,7 @@ static int mv_enc_aes_ecb(struct ablkcipher_request *req)
req_ctx->op = COP_AES_ECB;
req_ctx->decrypt = 0;
- return mv_handle_req(req);
+ return mv_handle_req(&req->base);
}
static int mv_dec_aes_ecb(struct ablkcipher_request *req)
@@ -381,7 +652,7 @@ static int mv_dec_aes_ecb(struct ablkcipher_request *req)
req_ctx->decrypt = 1;
compute_aes_dec_key(ctx);
- return mv_handle_req(req);
+ return mv_handle_req(&req->base);
}
static int mv_enc_aes_cbc(struct ablkcipher_request *req)
@@ -391,7 +662,7 @@ static int mv_enc_aes_cbc(struct ablkcipher_request *req)
req_ctx->op = COP_AES_CBC;
req_ctx->decrypt = 0;
- return mv_handle_req(req);
+ return mv_handle_req(&req->base);
}
static int mv_dec_aes_cbc(struct ablkcipher_request *req)
@@ -403,7 +674,7 @@ static int mv_dec_aes_cbc(struct ablkcipher_request *req)
req_ctx->decrypt = 1;
compute_aes_dec_key(ctx);
- return mv_handle_req(req);
+ return mv_handle_req(&req->base);
}
static int mv_cra_init(struct crypto_tfm *tfm)
@@ -412,6 +683,215 @@ static int mv_cra_init(struct crypto_tfm *tfm)
return 0;
}
+static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op,
+ int is_last, unsigned int req_len,
+ int count_add)
+{
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->op = op;
+ ctx->count = req_len;
+ ctx->first_hash = 1;
+ ctx->last_chunk = is_last;
+ ctx->count_add = count_add;
+}
+
+static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last,
+ unsigned req_len)
+{
+ ctx->last_chunk = is_last;
+ ctx->count += req_len;
+}
+
+static int mv_hash_init(struct ahash_request *req)
+{
+ const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+ mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0,
+ tfm_ctx->count_add);
+ return 0;
+}
+
+static int mv_hash_update(struct ahash_request *req)
+{
+ if (!req->nbytes)
+ return 0;
+
+ mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes);
+ return mv_handle_req(&req->base);
+}
+
+static int mv_hash_final(struct ahash_request *req)
+{
+ struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
+ /* dummy buffer of 4 bytes */
+ sg_init_one(&ctx->dummysg, ctx->buffer, 4);
+ /* I think I'm allowed to do that... */
+ ahash_request_set_crypt(req, &ctx->dummysg, req->result, 0);
+ mv_update_hash_req_ctx(ctx, 1, 0);
+ return mv_handle_req(&req->base);
+}
+
+static int mv_hash_finup(struct ahash_request *req)
+{
+ if (!req->nbytes)
+ return mv_hash_final(req);
+
+ mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
+ return mv_handle_req(&req->base);
+}
+
+static int mv_hash_digest(struct ahash_request *req)
+{
+ const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+ mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1,
+ req->nbytes, tfm_ctx->count_add);
+ return mv_handle_req(&req->base);
+}
+
+static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate,
+ const void *ostate)
+{
+ const struct sha1_state *isha1_state = istate, *osha1_state = ostate;
+ int i;
+ for (i = 0; i < 5; i++) {
+ ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]);
+ ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]);
+ }
+}
+
+static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
+ unsigned int keylen)
+{
+ int rc;
+ struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base);
+ int bs, ds, ss;
+
+ if (!ctx->base_hash)
+ return 0;
+
+ rc = crypto_shash_setkey(ctx->fallback, key, keylen);
+ if (rc)
+ return rc;
+
+ /* Can't see a way to extract the ipad/opad from the fallback tfm
+ so I'm basically copying code from the hmac module */
+ bs = crypto_shash_blocksize(ctx->base_hash);
+ ds = crypto_shash_digestsize(ctx->base_hash);
+ ss = crypto_shash_statesize(ctx->base_hash);
+
+ {
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(ctx->base_hash)];
+ } desc;
+ unsigned int i;
+ char ipad[ss];
+ char opad[ss];
+
+ desc.shash.tfm = ctx->base_hash;
+ desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ if (keylen > bs) {
+ int err;
+
+ err =
+ crypto_shash_digest(&desc.shash, key, keylen, ipad);
+ if (err)
+ return err;
+
+ keylen = ds;
+ } else
+ memcpy(ipad, key, keylen);
+
+ memset(ipad + keylen, 0, bs - keylen);
+ memcpy(opad, ipad, bs);
+
+ for (i = 0; i < bs; i++) {
+ ipad[i] ^= 0x36;
+ opad[i] ^= 0x5c;
+ }
+
+ rc = crypto_shash_init(&desc.shash) ? :
+ crypto_shash_update(&desc.shash, ipad, bs) ? :
+ crypto_shash_export(&desc.shash, ipad) ? :
+ crypto_shash_init(&desc.shash) ? :
+ crypto_shash_update(&desc.shash, opad, bs) ? :
+ crypto_shash_export(&desc.shash, opad);
+
+ if (rc == 0)
+ mv_hash_init_ivs(ctx, ipad, opad);
+
+ return rc;
+ }
+}
+
+static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
+ enum hash_op op, int count_add)
+{
+ const char *fallback_driver_name = tfm->__crt_alg->cra_name;
+ struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_shash *fallback_tfm = NULL;
+ struct crypto_shash *base_hash = NULL;
+ int err = -ENOMEM;
+
+ ctx->op = op;
+ ctx->count_add = count_add;
+
+ /* Allocate a fallback and abort if it failed. */
+ fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback_tfm)) {
+ printk(KERN_WARNING MV_CESA
+ "Fallback driver '%s' could not be loaded!\n",
+ fallback_driver_name);
+ err = PTR_ERR(fallback_tfm);
+ goto out;
+ }
+ ctx->fallback = fallback_tfm;
+
+ if (base_hash_name) {
+ /* Allocate a hash to compute the ipad/opad of hmac. */
+ base_hash = crypto_alloc_shash(base_hash_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(base_hash)) {
+ printk(KERN_WARNING MV_CESA
+ "Base driver '%s' could not be loaded!\n",
+ base_hash_name);
+ err = PTR_ERR(fallback_tfm);
+ goto err_bad_base;
+ }
+ }
+ ctx->base_hash = base_hash;
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct mv_req_hash_ctx) +
+ crypto_shash_descsize(ctx->fallback));
+ return 0;
+err_bad_base:
+ crypto_free_shash(fallback_tfm);
+out:
+ return err;
+}
+
+static void mv_cra_hash_exit(struct crypto_tfm *tfm)
+{
+ struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_shash(ctx->fallback);
+ if (ctx->base_hash)
+ crypto_free_shash(ctx->base_hash);
+}
+
+static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm)
+{
+ return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0);
+}
+
+static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
+{
+ return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
+}
+
irqreturn_t crypto_int(int irq, void *priv)
{
u32 val;
@@ -474,6 +954,53 @@ struct crypto_alg mv_aes_alg_cbc = {
},
};
+struct ahash_alg mv_sha1_alg = {
+ .init = mv_hash_init,
+ .update = mv_hash_update,
+ .final = mv_hash_final,
+ .finup = mv_hash_finup,
+ .digest = mv_hash_digest,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "mv-sha1",
+ .cra_priority = 300,
+ .cra_flags =
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
+ .cra_init = mv_cra_hash_sha1_init,
+ .cra_exit = mv_cra_hash_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+};
+
+struct ahash_alg mv_hmac_sha1_alg = {
+ .init = mv_hash_init,
+ .update = mv_hash_update,
+ .final = mv_hash_final,
+ .finup = mv_hash_finup,
+ .digest = mv_hash_digest,
+ .setkey = mv_hash_setkey,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "mv-hmac-sha1",
+ .cra_priority = 300,
+ .cra_flags =
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
+ .cra_init = mv_cra_hash_hmac_sha1_init,
+ .cra_exit = mv_cra_hash_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+};
+
static int mv_probe(struct platform_device *pdev)
{
struct crypto_priv *cp;
@@ -482,7 +1009,7 @@ static int mv_probe(struct platform_device *pdev)
int ret;
if (cpg) {
- printk(KERN_ERR "Second crypto dev?\n");
+ printk(KERN_ERR MV_CESA "Second crypto dev?\n");
return -EEXIST;
}
@@ -496,7 +1023,7 @@ static int mv_probe(struct platform_device *pdev)
spin_lock_init(&cp->lock);
crypto_init_queue(&cp->queue, 50);
- cp->reg = ioremap(res->start, res->end - res->start + 1);
+ cp->reg = ioremap(res->start, resource_size(res));
if (!cp->reg) {
ret = -ENOMEM;
goto err;
@@ -507,7 +1034,7 @@ static int mv_probe(struct platform_device *pdev)
ret = -ENXIO;
goto err_unmap_reg;
}
- cp->sram_size = res->end - res->start + 1;
+ cp->sram_size = resource_size(res);
cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
cp->sram = ioremap(res->start, cp->sram_size);
if (!cp->sram) {
@@ -546,6 +1073,21 @@ static int mv_probe(struct platform_device *pdev)
ret = crypto_register_alg(&mv_aes_alg_cbc);
if (ret)
goto err_unreg_ecb;
+
+ ret = crypto_register_ahash(&mv_sha1_alg);
+ if (ret == 0)
+ cpg->has_sha1 = 1;
+ else
+ printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n");
+
+ ret = crypto_register_ahash(&mv_hmac_sha1_alg);
+ if (ret == 0) {
+ cpg->has_hmac_sha1 = 1;
+ } else {
+ printk(KERN_WARNING MV_CESA
+ "Could not register hmac-sha1 driver\n");
+ }
+
return 0;
err_unreg_ecb:
crypto_unregister_alg(&mv_aes_alg_ecb);
@@ -570,6 +1112,10 @@ static int mv_remove(struct platform_device *pdev)
crypto_unregister_alg(&mv_aes_alg_ecb);
crypto_unregister_alg(&mv_aes_alg_cbc);
+ if (cp->has_sha1)
+ crypto_unregister_ahash(&mv_sha1_alg);
+ if (cp->has_hmac_sha1)
+ crypto_unregister_ahash(&mv_hmac_sha1_alg);
kthread_stop(cp->queue_th);
free_irq(cp->irq, cp);
memset(cp->sram, 0, cp->sram_size);
diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h
index c3e25d3bb17..08fcb1116d9 100644
--- a/drivers/crypto/mv_cesa.h
+++ b/drivers/crypto/mv_cesa.h
@@ -1,6 +1,10 @@
#ifndef __MV_CRYPTO_H__
#define DIGEST_INITIAL_VAL_A 0xdd00
+#define DIGEST_INITIAL_VAL_B 0xdd04
+#define DIGEST_INITIAL_VAL_C 0xdd08
+#define DIGEST_INITIAL_VAL_D 0xdd0c
+#define DIGEST_INITIAL_VAL_E 0xdd10
#define DES_CMD_REG 0xdd58
#define SEC_ACCEL_CMD 0xde00
@@ -70,6 +74,10 @@ struct sec_accel_config {
#define CFG_AES_LEN_128 (0 << 24)
#define CFG_AES_LEN_192 (1 << 24)
#define CFG_AES_LEN_256 (2 << 24)
+#define CFG_NOT_FRAG (0 << 30)
+#define CFG_FIRST_FRAG (1 << 30)
+#define CFG_LAST_FRAG (2 << 30)
+#define CFG_MID_FRAG (3 << 30)
u32 enc_p;
#define ENC_P_SRC(x) (x)
@@ -90,7 +98,11 @@ struct sec_accel_config {
#define MAC_SRC_TOTAL_LEN(x) ((x) << 16)
u32 mac_digest;
+#define MAC_DIGEST_P(x) (x)
+#define MAC_FRAG_LEN(x) ((x) << 16)
u32 mac_iv;
+#define MAC_INNER_IV_P(x) (x)
+#define MAC_OUTER_IV_P(x) ((x) << 16)
}__attribute__ ((packed));
/*
* /-----------\ 0
@@ -101,19 +113,37 @@ struct sec_accel_config {
* | IV IN | 4 * 4
* |-----------| 0x40 (inplace)
* | IV BUF | 4 * 4
- * |-----------| 0x50
+ * |-----------| 0x80
* | DATA IN | 16 * x (max ->max_req_size)
- * |-----------| 0x50 (inplace operation)
+ * |-----------| 0x80 (inplace operation)
* | DATA OUT | 16 * x (max ->max_req_size)
* \-----------/ SRAM size
*/
+
+ /* Hashing memory map:
+ * /-----------\ 0
+ * | ACCEL CFG | 4 * 8
+ * |-----------| 0x20
+ * | Inner IV | 5 * 4
+ * |-----------| 0x34
+ * | Outer IV | 5 * 4
+ * |-----------| 0x48
+ * | Output BUF| 5 * 4
+ * |-----------| 0x80
+ * | DATA IN | 64 * x (max ->max_req_size)
+ * \-----------/ SRAM size
+ */
#define SRAM_CONFIG 0x00
#define SRAM_DATA_KEY_P 0x20
#define SRAM_DATA_IV 0x40
#define SRAM_DATA_IV_BUF 0x40
-#define SRAM_DATA_IN_START 0x50
-#define SRAM_DATA_OUT_START 0x50
+#define SRAM_DATA_IN_START 0x80
+#define SRAM_DATA_OUT_START 0x80
+
+#define SRAM_HMAC_IV_IN 0x20
+#define SRAM_HMAC_IV_OUT 0x34
+#define SRAM_DIGEST_BUF 0x48
-#define SRAM_CFG_SPACE 0x50
+#define SRAM_CFG_SPACE 0x80
#endif
diff --git a/drivers/crypto/n2_asm.S b/drivers/crypto/n2_asm.S
new file mode 100644
index 00000000000..f7c793745a1
--- /dev/null
+++ b/drivers/crypto/n2_asm.S
@@ -0,0 +1,95 @@
+/* n2_asm.S: Hypervisor calls for NCS support.
+ *
+ * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
+ */
+
+#include <linux/linkage.h>
+#include <asm/hypervisor.h>
+#include "n2_core.h"
+
+ /* o0: queue type
+ * o1: RA of queue
+ * o2: num entries in queue
+ * o3: address of queue handle return
+ */
+ENTRY(sun4v_ncs_qconf)
+ mov HV_FAST_NCS_QCONF, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%o3]
+ retl
+ nop
+ENDPROC(sun4v_ncs_qconf)
+
+ /* %o0: queue handle
+ * %o1: address of queue type return
+ * %o2: address of queue base address return
+ * %o3: address of queue num entries return
+ */
+ENTRY(sun4v_ncs_qinfo)
+ mov %o1, %g1
+ mov %o2, %g2
+ mov %o3, %g3
+ mov HV_FAST_NCS_QINFO, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%g1]
+ stx %o2, [%g2]
+ stx %o3, [%g3]
+ retl
+ nop
+ENDPROC(sun4v_ncs_qinfo)
+
+ /* %o0: queue handle
+ * %o1: address of head offset return
+ */
+ENTRY(sun4v_ncs_gethead)
+ mov %o1, %o2
+ mov HV_FAST_NCS_GETHEAD, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%o2]
+ retl
+ nop
+ENDPROC(sun4v_ncs_gethead)
+
+ /* %o0: queue handle
+ * %o1: address of tail offset return
+ */
+ENTRY(sun4v_ncs_gettail)
+ mov %o1, %o2
+ mov HV_FAST_NCS_GETTAIL, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%o2]
+ retl
+ nop
+ENDPROC(sun4v_ncs_gettail)
+
+ /* %o0: queue handle
+ * %o1: new tail offset
+ */
+ENTRY(sun4v_ncs_settail)
+ mov HV_FAST_NCS_SETTAIL, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ENDPROC(sun4v_ncs_settail)
+
+ /* %o0: queue handle
+ * %o1: address of devino return
+ */
+ENTRY(sun4v_ncs_qhandle_to_devino)
+ mov %o1, %o2
+ mov HV_FAST_NCS_QHANDLE_TO_DEVINO, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%o2]
+ retl
+ nop
+ENDPROC(sun4v_ncs_qhandle_to_devino)
+
+ /* %o0: queue handle
+ * %o1: new head offset
+ */
+ENTRY(sun4v_ncs_sethead_marker)
+ mov HV_FAST_NCS_SETHEAD_MARKER, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ENDPROC(sun4v_ncs_sethead_marker)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
new file mode 100644
index 00000000000..8566be832f5
--- /dev/null
+++ b/drivers/crypto/n2_core.c
@@ -0,0 +1,2083 @@
+/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
+ *
+ * Copyright (C) 2010 David S. Miller <davem@davemloft.net>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/cpumask.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/crypto.h>
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+
+#include <asm/hypervisor.h>
+#include <asm/mdesc.h>
+
+#include "n2_core.h"
+
+#define DRV_MODULE_NAME "n2_crypto"
+#define DRV_MODULE_VERSION "0.1"
+#define DRV_MODULE_RELDATE "April 29, 2010"
+
+static char version[] __devinitdata =
+ DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_DESCRIPTION("Niagara2 Crypto driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define N2_CRA_PRIORITY 300
+
+static DEFINE_MUTEX(spu_lock);
+
+struct spu_queue {
+ cpumask_t sharing;
+ unsigned long qhandle;
+
+ spinlock_t lock;
+ u8 q_type;
+ void *q;
+ unsigned long head;
+ unsigned long tail;
+ struct list_head jobs;
+
+ unsigned long devino;
+
+ char irq_name[32];
+ unsigned int irq;
+
+ struct list_head list;
+};
+
+static struct spu_queue **cpu_to_cwq;
+static struct spu_queue **cpu_to_mau;
+
+static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
+{
+ if (q->q_type == HV_NCS_QTYPE_MAU) {
+ off += MAU_ENTRY_SIZE;
+ if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
+ off = 0;
+ } else {
+ off += CWQ_ENTRY_SIZE;
+ if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
+ off = 0;
+ }
+ return off;
+}
+
+struct n2_request_common {
+ struct list_head entry;
+ unsigned int offset;
+};
+#define OFFSET_NOT_RUNNING (~(unsigned int)0)
+
+/* An async job request records the final tail value it used in
+ * n2_request_common->offset, test to see if that offset is in
+ * the range old_head, new_head, inclusive.
+ */
+static inline bool job_finished(struct spu_queue *q, unsigned int offset,
+ unsigned long old_head, unsigned long new_head)
+{
+ if (old_head <= new_head) {
+ if (offset > old_head && offset <= new_head)
+ return true;
+ } else {
+ if (offset > old_head || offset <= new_head)
+ return true;
+ }
+ return false;
+}
+
+/* When the HEAD marker is unequal to the actual HEAD, we get
+ * a virtual device INO interrupt. We should process the
+ * completed CWQ entries and adjust the HEAD marker to clear
+ * the IRQ.
+ */
+static irqreturn_t cwq_intr(int irq, void *dev_id)
+{
+ unsigned long off, new_head, hv_ret;
+ struct spu_queue *q = dev_id;
+
+ pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
+ smp_processor_id(), q->qhandle);
+
+ spin_lock(&q->lock);
+
+ hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
+
+ pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
+ smp_processor_id(), new_head, hv_ret);
+
+ for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
+ /* XXX ... XXX */
+ }
+
+ hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
+ if (hv_ret == HV_EOK)
+ q->head = new_head;
+
+ spin_unlock(&q->lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mau_intr(int irq, void *dev_id)
+{
+ struct spu_queue *q = dev_id;
+ unsigned long head, hv_ret;
+
+ spin_lock(&q->lock);
+
+ pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
+ smp_processor_id(), q->qhandle);
+
+ hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
+
+ pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
+ smp_processor_id(), head, hv_ret);
+
+ sun4v_ncs_sethead_marker(q->qhandle, head);
+
+ spin_unlock(&q->lock);
+
+ return IRQ_HANDLED;
+}
+
+static void *spu_queue_next(struct spu_queue *q, void *cur)
+{
+ return q->q + spu_next_offset(q, cur - q->q);
+}
+
+static int spu_queue_num_free(struct spu_queue *q)
+{
+ unsigned long head = q->head;
+ unsigned long tail = q->tail;
+ unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
+ unsigned long diff;
+
+ if (head > tail)
+ diff = head - tail;
+ else
+ diff = (end - tail) + head;
+
+ return (diff / CWQ_ENTRY_SIZE) - 1;
+}
+
+static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
+{
+ int avail = spu_queue_num_free(q);
+
+ if (avail >= num_entries)
+ return q->q + q->tail;
+
+ return NULL;
+}
+
+static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
+{
+ unsigned long hv_ret, new_tail;
+
+ new_tail = spu_next_offset(q, last - q->q);
+
+ hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
+ if (hv_ret == HV_EOK)
+ q->tail = new_tail;
+ return hv_ret;
+}
+
+static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
+ int enc_type, int auth_type,
+ unsigned int hash_len,
+ bool sfas, bool sob, bool eob, bool encrypt,
+ int opcode)
+{
+ u64 word = (len - 1) & CONTROL_LEN;
+
+ word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
+ word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
+ word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
+ if (sfas)
+ word |= CONTROL_STORE_FINAL_AUTH_STATE;
+ if (sob)
+ word |= CONTROL_START_OF_BLOCK;
+ if (eob)
+ word |= CONTROL_END_OF_BLOCK;
+ if (encrypt)
+ word |= CONTROL_ENCRYPT;
+ if (hmac_key_len)
+ word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
+ if (hash_len)
+ word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
+
+ return word;
+}
+
+#if 0
+static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
+{
+ if (this_len >= 64 ||
+ qp->head != qp->tail)
+ return true;
+ return false;
+}
+#endif
+
+struct n2_base_ctx {
+ struct list_head list;
+};
+
+static void n2_base_ctx_init(struct n2_base_ctx *ctx)
+{
+ INIT_LIST_HEAD(&ctx->list);
+}
+
+struct n2_hash_ctx {
+ struct n2_base_ctx base;
+
+ struct crypto_ahash *fallback;
+
+ /* These next three members must match the layout created by
+ * crypto_init_shash_ops_async. This allows us to properly
+ * plumb requests we can't do in hardware down to the fallback
+ * operation, providing all of the data structures and layouts
+ * expected by those paths.
+ */
+ struct ahash_request fallback_req;
+ struct shash_desc fallback_desc;
+ union {
+ struct md5_state md5;
+ struct sha1_state sha1;
+ struct sha256_state sha256;
+ } u;
+
+ unsigned char hash_key[64];
+ unsigned char keyed_zero_hash[32];
+};
+
+static int n2_hash_async_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
+ ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_init(&ctx->fallback_req);
+}
+
+static int n2_hash_async_update(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
+ ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ctx->fallback_req.nbytes = req->nbytes;
+ ctx->fallback_req.src = req->src;
+
+ return crypto_ahash_update(&ctx->fallback_req);
+}
+
+static int n2_hash_async_final(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
+ ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ctx->fallback_req.result = req->result;
+
+ return crypto_ahash_final(&ctx->fallback_req);
+}
+
+static int n2_hash_async_finup(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
+ ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ctx->fallback_req.nbytes = req->nbytes;
+ ctx->fallback_req.src = req->src;
+ ctx->fallback_req.result = req->result;
+
+ return crypto_ahash_finup(&ctx->fallback_req);
+}
+
+static int n2_hash_cra_init(struct crypto_tfm *tfm)
+{
+ const char *fallback_driver_name = tfm->__crt_alg->cra_name;
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct crypto_ahash *fallback_tfm;
+ int err;
+
+ fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback_tfm)) {
+ pr_warning("Fallback driver '%s' could not be loaded!\n",
+ fallback_driver_name);
+ err = PTR_ERR(fallback_tfm);
+ goto out;
+ }
+
+ ctx->fallback = fallback_tfm;
+ return 0;
+
+out:
+ return err;
+}
+
+static void n2_hash_cra_exit(struct crypto_tfm *tfm)
+{
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+
+ crypto_free_ahash(ctx->fallback);
+}
+
+static unsigned long wait_for_tail(struct spu_queue *qp)
+{
+ unsigned long head, hv_ret;
+
+ do {
+ hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
+ if (hv_ret != HV_EOK) {
+ pr_err("Hypervisor error on gethead\n");
+ break;
+ }
+ if (head == qp->tail) {
+ qp->head = head;
+ break;
+ }
+ } while (1);
+ return hv_ret;
+}
+
+static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
+ struct cwq_initial_entry *ent)
+{
+ unsigned long hv_ret = spu_queue_submit(qp, ent);
+
+ if (hv_ret == HV_EOK)
+ hv_ret = wait_for_tail(qp);
+
+ return hv_ret;
+}
+
+static int n2_hash_async_digest(struct ahash_request *req,
+ unsigned int auth_type, unsigned int digest_size,
+ unsigned int result_size, void *hash_loc)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cwq_initial_entry *ent;
+ struct crypto_hash_walk walk;
+ struct spu_queue *qp;
+ unsigned long flags;
+ int err = -ENODEV;
+ int nbytes, cpu;
+
+ /* The total effective length of the operation may not
+ * exceed 2^16.
+ */
+ if (unlikely(req->nbytes > (1 << 16))) {
+ ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
+ ctx->fallback_req.base.flags =
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ctx->fallback_req.nbytes = req->nbytes;
+ ctx->fallback_req.src = req->src;
+ ctx->fallback_req.result = req->result;
+
+ return crypto_ahash_digest(&ctx->fallback_req);
+ }
+
+ n2_base_ctx_init(&ctx->base);
+
+ nbytes = crypto_hash_walk_first(req, &walk);
+
+ cpu = get_cpu();
+ qp = cpu_to_cwq[cpu];
+ if (!qp)
+ goto out;
+
+ spin_lock_irqsave(&qp->lock, flags);
+
+ /* XXX can do better, improve this later by doing a by-hand scatterlist
+ * XXX walk, etc.
+ */
+ ent = qp->q + qp->tail;
+
+ ent->control = control_word_base(nbytes, 0, 0,
+ auth_type, digest_size,
+ false, true, false, false,
+ OPCODE_INPLACE_BIT |
+ OPCODE_AUTH_MAC);
+ ent->src_addr = __pa(walk.data);
+ ent->auth_key_addr = 0UL;
+ ent->auth_iv_addr = __pa(hash_loc);
+ ent->final_auth_state_addr = 0UL;
+ ent->enc_key_addr = 0UL;
+ ent->enc_iv_addr = 0UL;
+ ent->dest_addr = __pa(hash_loc);
+
+ nbytes = crypto_hash_walk_done(&walk, 0);
+ while (nbytes > 0) {
+ ent = spu_queue_next(qp, ent);
+
+ ent->control = (nbytes - 1);
+ ent->src_addr = __pa(walk.data);
+ ent->auth_key_addr = 0UL;
+ ent->auth_iv_addr = 0UL;
+ ent->final_auth_state_addr = 0UL;
+ ent->enc_key_addr = 0UL;
+ ent->enc_iv_addr = 0UL;
+ ent->dest_addr = 0UL;
+
+ nbytes = crypto_hash_walk_done(&walk, 0);
+ }
+ ent->control |= CONTROL_END_OF_BLOCK;
+
+ if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
+ err = -EINVAL;
+ else
+ err = 0;
+
+ spin_unlock_irqrestore(&qp->lock, flags);
+
+ if (!err)
+ memcpy(req->result, hash_loc, result_size);
+out:
+ put_cpu();
+
+ return err;
+}
+
+static int n2_md5_async_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct md5_state *m = &ctx->u.md5;
+
+ if (unlikely(req->nbytes == 0)) {
+ static const char md5_zero[MD5_DIGEST_SIZE] = {
+ 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
+ 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
+ };
+
+ memcpy(req->result, md5_zero, MD5_DIGEST_SIZE);
+ return 0;
+ }
+ m->hash[0] = cpu_to_le32(0x67452301);
+ m->hash[1] = cpu_to_le32(0xefcdab89);
+ m->hash[2] = cpu_to_le32(0x98badcfe);
+ m->hash[3] = cpu_to_le32(0x10325476);
+
+ return n2_hash_async_digest(req, AUTH_TYPE_MD5,
+ MD5_DIGEST_SIZE, MD5_DIGEST_SIZE,
+ m->hash);
+}
+
+static int n2_sha1_async_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct sha1_state *s = &ctx->u.sha1;
+
+ if (unlikely(req->nbytes == 0)) {
+ static const char sha1_zero[SHA1_DIGEST_SIZE] = {
+ 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32,
+ 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8,
+ 0x07, 0x09
+ };
+
+ memcpy(req->result, sha1_zero, SHA1_DIGEST_SIZE);
+ return 0;
+ }
+ s->state[0] = SHA1_H0;
+ s->state[1] = SHA1_H1;
+ s->state[2] = SHA1_H2;
+ s->state[3] = SHA1_H3;
+ s->state[4] = SHA1_H4;
+
+ return n2_hash_async_digest(req, AUTH_TYPE_SHA1,
+ SHA1_DIGEST_SIZE, SHA1_DIGEST_SIZE,
+ s->state);
+}
+
+static int n2_sha256_async_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct sha256_state *s = &ctx->u.sha256;
+
+ if (req->nbytes == 0) {
+ static const char sha256_zero[SHA256_DIGEST_SIZE] = {
+ 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a,
+ 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae,
+ 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99,
+ 0x1b, 0x78, 0x52, 0xb8, 0x55
+ };
+
+ memcpy(req->result, sha256_zero, SHA256_DIGEST_SIZE);
+ return 0;
+ }
+ s->state[0] = SHA256_H0;
+ s->state[1] = SHA256_H1;
+ s->state[2] = SHA256_H2;
+ s->state[3] = SHA256_H3;
+ s->state[4] = SHA256_H4;
+ s->state[5] = SHA256_H5;
+ s->state[6] = SHA256_H6;
+ s->state[7] = SHA256_H7;
+
+ return n2_hash_async_digest(req, AUTH_TYPE_SHA256,
+ SHA256_DIGEST_SIZE, SHA256_DIGEST_SIZE,
+ s->state);
+}
+
+static int n2_sha224_async_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct sha256_state *s = &ctx->u.sha256;
+
+ if (req->nbytes == 0) {
+ static const char sha224_zero[SHA224_DIGEST_SIZE] = {
+ 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
+ 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
+ 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
+ 0x2f
+ };
+
+ memcpy(req->result, sha224_zero, SHA224_DIGEST_SIZE);
+ return 0;
+ }
+ s->state[0] = SHA224_H0;
+ s->state[1] = SHA224_H1;
+ s->state[2] = SHA224_H2;
+ s->state[3] = SHA224_H3;
+ s->state[4] = SHA224_H4;
+ s->state[5] = SHA224_H5;
+ s->state[6] = SHA224_H6;
+ s->state[7] = SHA224_H7;
+
+ return n2_hash_async_digest(req, AUTH_TYPE_SHA256,
+ SHA256_DIGEST_SIZE, SHA224_DIGEST_SIZE,
+ s->state);
+}
+
+struct n2_cipher_context {
+ int key_len;
+ int enc_type;
+ union {
+ u8 aes[AES_MAX_KEY_SIZE];
+ u8 des[DES_KEY_SIZE];
+ u8 des3[3 * DES_KEY_SIZE];
+ u8 arc4[258]; /* S-box, X, Y */
+ } key;
+};
+
+#define N2_CHUNK_ARR_LEN 16
+
+struct n2_crypto_chunk {
+ struct list_head entry;
+ unsigned long iv_paddr : 44;
+ unsigned long arr_len : 20;
+ unsigned long dest_paddr;
+ unsigned long dest_final;
+ struct {
+ unsigned long src_paddr : 44;
+ unsigned long src_len : 20;
+ } arr[N2_CHUNK_ARR_LEN];
+};
+
+struct n2_request_context {
+ struct ablkcipher_walk walk;
+ struct list_head chunk_list;
+ struct n2_crypto_chunk chunk;
+ u8 temp_iv[16];
+};
+
+/* The SPU allows some level of flexibility for partial cipher blocks
+ * being specified in a descriptor.
+ *
+ * It merely requires that every descriptor's length field is at least
+ * as large as the cipher block size. This means that a cipher block
+ * can span at most 2 descriptors. However, this does not allow a
+ * partial block to span into the final descriptor as that would
+ * violate the rule (since every descriptor's length must be at lest
+ * the block size). So, for example, assuming an 8 byte block size:
+ *
+ * 0xe --> 0xa --> 0x8
+ *
+ * is a valid length sequence, whereas:
+ *
+ * 0xe --> 0xb --> 0x7
+ *
+ * is not a valid sequence.
+ */
+
+struct n2_cipher_alg {
+ struct list_head entry;
+ u8 enc_type;
+ struct crypto_alg alg;
+};
+
+static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+
+ return container_of(alg, struct n2_cipher_alg, alg);
+}
+
+struct n2_cipher_request_context {
+ struct ablkcipher_walk walk;
+};
+
+static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+
+ ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ ctx->enc_type |= ENC_TYPE_ALG_AES128;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->enc_type |= ENC_TYPE_ALG_AES192;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->enc_type |= ENC_TYPE_ALG_AES256;
+ break;
+ default:
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ ctx->key_len = keylen;
+ memcpy(ctx->key.aes, key, keylen);
+ return 0;
+}
+
+static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+ u32 tmp[DES_EXPKEY_WORDS];
+ int err;
+
+ ctx->enc_type = n2alg->enc_type;
+
+ if (keylen != DES_KEY_SIZE) {
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ err = des_ekey(tmp, key);
+ if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ return -EINVAL;
+ }
+
+ ctx->key_len = keylen;
+ memcpy(ctx->key.des, key, keylen);
+ return 0;
+}
+
+static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+
+ ctx->enc_type = n2alg->enc_type;
+
+ if (keylen != (3 * DES_KEY_SIZE)) {
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ ctx->key_len = keylen;
+ memcpy(ctx->key.des3, key, keylen);
+ return 0;
+}
+
+static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+ u8 *s = ctx->key.arc4;
+ u8 *x = s + 256;
+ u8 *y = x + 1;
+ int i, j, k;
+
+ ctx->enc_type = n2alg->enc_type;
+
+ j = k = 0;
+ *x = 0;
+ *y = 0;
+ for (i = 0; i < 256; i++)
+ s[i] = i;
+ for (i = 0; i < 256; i++) {
+ u8 a = s[i];
+ j = (j + key[k] + a) & 0xff;
+ s[i] = s[j];
+ s[j] = a;
+ if (++k >= keylen)
+ k = 0;
+ }
+
+ return 0;
+}
+
+static inline int cipher_descriptor_len(int nbytes, unsigned int block_size)
+{
+ int this_len = nbytes;
+
+ this_len -= (nbytes & (block_size - 1));
+ return this_len > (1 << 16) ? (1 << 16) : this_len;
+}
+
+static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp,
+ struct spu_queue *qp, bool encrypt)
+{
+ struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct cwq_initial_entry *ent;
+ bool in_place;
+ int i;
+
+ ent = spu_queue_alloc(qp, cp->arr_len);
+ if (!ent) {
+ pr_info("queue_alloc() of %d fails\n",
+ cp->arr_len);
+ return -EBUSY;
+ }
+
+ in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
+
+ ent->control = control_word_base(cp->arr[0].src_len,
+ 0, ctx->enc_type, 0, 0,
+ false, true, false, encrypt,
+ OPCODE_ENCRYPT |
+ (in_place ? OPCODE_INPLACE_BIT : 0));
+ ent->src_addr = cp->arr[0].src_paddr;
+ ent->auth_key_addr = 0UL;
+ ent->auth_iv_addr = 0UL;
+ ent->final_auth_state_addr = 0UL;
+ ent->enc_key_addr = __pa(&ctx->key);
+ ent->enc_iv_addr = cp->iv_paddr;
+ ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
+
+ for (i = 1; i < cp->arr_len; i++) {
+ ent = spu_queue_next(qp, ent);
+
+ ent->control = cp->arr[i].src_len - 1;
+ ent->src_addr = cp->arr[i].src_paddr;
+ ent->auth_key_addr = 0UL;
+ ent->auth_iv_addr = 0UL;
+ ent->final_auth_state_addr = 0UL;
+ ent->enc_key_addr = 0UL;
+ ent->enc_iv_addr = 0UL;
+ ent->dest_addr = 0UL;
+ }
+ ent->control |= CONTROL_END_OF_BLOCK;
+
+ return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
+}
+
+static int n2_compute_chunks(struct ablkcipher_request *req)
+{
+ struct n2_request_context *rctx = ablkcipher_request_ctx(req);
+ struct ablkcipher_walk *walk = &rctx->walk;
+ struct n2_crypto_chunk *chunk;
+ unsigned long dest_prev;
+ unsigned int tot_len;
+ bool prev_in_place;
+ int err, nbytes;
+
+ ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes);
+ err = ablkcipher_walk_phys(req, walk);
+ if (err)
+ return err;
+
+ INIT_LIST_HEAD(&rctx->chunk_list);
+
+ chunk = &rctx->chunk;
+ INIT_LIST_HEAD(&chunk->entry);
+
+ chunk->iv_paddr = 0UL;
+ chunk->arr_len = 0;
+ chunk->dest_paddr = 0UL;
+
+ prev_in_place = false;
+ dest_prev = ~0UL;
+ tot_len = 0;
+
+ while ((nbytes = walk->nbytes) != 0) {
+ unsigned long dest_paddr, src_paddr;
+ bool in_place;
+ int this_len;
+
+ src_paddr = (page_to_phys(walk->src.page) +
+ walk->src.offset);
+ dest_paddr = (page_to_phys(walk->dst.page) +
+ walk->dst.offset);
+ in_place = (src_paddr == dest_paddr);
+ this_len = cipher_descriptor_len(nbytes, walk->blocksize);
+
+ if (chunk->arr_len != 0) {
+ if (in_place != prev_in_place ||
+ (!prev_in_place &&
+ dest_paddr != dest_prev) ||
+ chunk->arr_len == N2_CHUNK_ARR_LEN ||
+ tot_len + this_len > (1 << 16)) {
+ chunk->dest_final = dest_prev;
+ list_add_tail(&chunk->entry,
+ &rctx->chunk_list);
+ chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
+ if (!chunk) {
+ err = -ENOMEM;
+ break;
+ }
+ INIT_LIST_HEAD(&chunk->entry);
+ }
+ }
+ if (chunk->arr_len == 0) {
+ chunk->dest_paddr = dest_paddr;
+ tot_len = 0;
+ }
+ chunk->arr[chunk->arr_len].src_paddr = src_paddr;
+ chunk->arr[chunk->arr_len].src_len = this_len;
+ chunk->arr_len++;
+
+ dest_prev = dest_paddr + this_len;
+ prev_in_place = in_place;
+ tot_len += this_len;
+
+ err = ablkcipher_walk_done(req, walk, nbytes - this_len);
+ if (err)
+ break;
+ }
+ if (!err && chunk->arr_len != 0) {
+ chunk->dest_final = dest_prev;
+ list_add_tail(&chunk->entry, &rctx->chunk_list);
+ }
+
+ return err;
+}
+
+static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv)
+{
+ struct n2_request_context *rctx = ablkcipher_request_ctx(req);
+ struct n2_crypto_chunk *c, *tmp;
+
+ if (final_iv)
+ memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
+
+ ablkcipher_walk_complete(&rctx->walk);
+ list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
+ list_del(&c->entry);
+ if (unlikely(c != &rctx->chunk))
+ kfree(c);
+ }
+
+}
+
+static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt)
+{
+ struct n2_request_context *rctx = ablkcipher_request_ctx(req);
+ struct crypto_tfm *tfm = req->base.tfm;
+ int err = n2_compute_chunks(req);
+ struct n2_crypto_chunk *c, *tmp;
+ unsigned long flags, hv_ret;
+ struct spu_queue *qp;
+
+ if (err)
+ return err;
+
+ qp = cpu_to_cwq[get_cpu()];
+ err = -ENODEV;
+ if (!qp)
+ goto out;
+
+ spin_lock_irqsave(&qp->lock, flags);
+
+ list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
+ err = __n2_crypt_chunk(tfm, c, qp, encrypt);
+ if (err)
+ break;
+ list_del(&c->entry);
+ if (unlikely(c != &rctx->chunk))
+ kfree(c);
+ }
+ if (!err) {
+ hv_ret = wait_for_tail(qp);
+ if (hv_ret != HV_EOK)
+ err = -EINVAL;
+ }
+
+ spin_unlock_irqrestore(&qp->lock, flags);
+
+ put_cpu();
+
+out:
+ n2_chunk_complete(req, NULL);
+ return err;
+}
+
+static int n2_encrypt_ecb(struct ablkcipher_request *req)
+{
+ return n2_do_ecb(req, true);
+}
+
+static int n2_decrypt_ecb(struct ablkcipher_request *req)
+{
+ return n2_do_ecb(req, false);
+}
+
+static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt)
+{
+ struct n2_request_context *rctx = ablkcipher_request_ctx(req);
+ struct crypto_tfm *tfm = req->base.tfm;
+ unsigned long flags, hv_ret, iv_paddr;
+ int err = n2_compute_chunks(req);
+ struct n2_crypto_chunk *c, *tmp;
+ struct spu_queue *qp;
+ void *final_iv_addr;
+
+ final_iv_addr = NULL;
+
+ if (err)
+ return err;
+
+ qp = cpu_to_cwq[get_cpu()];
+ err = -ENODEV;
+ if (!qp)
+ goto out;
+
+ spin_lock_irqsave(&qp->lock, flags);
+
+ if (encrypt) {
+ iv_paddr = __pa(rctx->walk.iv);
+ list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
+ entry) {
+ c->iv_paddr = iv_paddr;
+ err = __n2_crypt_chunk(tfm, c, qp, true);
+ if (err)
+ break;
+ iv_paddr = c->dest_final - rctx->walk.blocksize;
+ list_del(&c->entry);
+ if (unlikely(c != &rctx->chunk))
+ kfree(c);
+ }
+ final_iv_addr = __va(iv_paddr);
+ } else {
+ list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
+ entry) {
+ if (c == &rctx->chunk) {
+ iv_paddr = __pa(rctx->walk.iv);
+ } else {
+ iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
+ tmp->arr[tmp->arr_len-1].src_len -
+ rctx->walk.blocksize);
+ }
+ if (!final_iv_addr) {
+ unsigned long pa;
+
+ pa = (c->arr[c->arr_len-1].src_paddr +
+ c->arr[c->arr_len-1].src_len -
+ rctx->walk.blocksize);
+ final_iv_addr = rctx->temp_iv;
+ memcpy(rctx->temp_iv, __va(pa),
+ rctx->walk.blocksize);
+ }
+ c->iv_paddr = iv_paddr;
+ err = __n2_crypt_chunk(tfm, c, qp, false);
+ if (err)
+ break;
+ list_del(&c->entry);
+ if (unlikely(c != &rctx->chunk))
+ kfree(c);
+ }
+ }
+ if (!err) {
+ hv_ret = wait_for_tail(qp);
+ if (hv_ret != HV_EOK)
+ err = -EINVAL;
+ }
+
+ spin_unlock_irqrestore(&qp->lock, flags);
+
+ put_cpu();
+
+out:
+ n2_chunk_complete(req, err ? NULL : final_iv_addr);
+ return err;
+}
+
+static int n2_encrypt_chaining(struct ablkcipher_request *req)
+{
+ return n2_do_chaining(req, true);
+}
+
+static int n2_decrypt_chaining(struct ablkcipher_request *req)
+{
+ return n2_do_chaining(req, false);
+}
+
+struct n2_cipher_tmpl {
+ const char *name;
+ const char *drv_name;
+ u8 block_size;
+ u8 enc_type;
+ struct ablkcipher_alg ablkcipher;
+};
+
+static const struct n2_cipher_tmpl cipher_tmpls[] = {
+ /* ARC4: only ECB is supported (chaining bits ignored) */
+ { .name = "ecb(arc4)",
+ .drv_name = "ecb-arc4",
+ .block_size = 1,
+ .enc_type = (ENC_TYPE_ALG_RC4_STREAM |
+ ENC_TYPE_CHAINING_ECB),
+ .ablkcipher = {
+ .min_keysize = 1,
+ .max_keysize = 256,
+ .setkey = n2_arc4_setkey,
+ .encrypt = n2_encrypt_ecb,
+ .decrypt = n2_decrypt_ecb,
+ },
+ },
+
+ /* DES: ECB CBC and CFB are supported */
+ { .name = "ecb(des)",
+ .drv_name = "ecb-des",
+ .block_size = DES_BLOCK_SIZE,
+ .enc_type = (ENC_TYPE_ALG_DES |
+ ENC_TYPE_CHAINING_ECB),
+ .ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = n2_des_setkey,
+ .encrypt = n2_encrypt_ecb,
+ .decrypt = n2_decrypt_ecb,
+ },
+ },
+ { .name = "cbc(des)",
+ .drv_name = "cbc-des",
+ .block_size = DES_BLOCK_SIZE,
+ .enc_type = (ENC_TYPE_ALG_DES |
+ ENC_TYPE_CHAINING_CBC),
+ .ablkcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = n2_des_setkey,
+ .encrypt = n2_encrypt_chaining,
+ .decrypt = n2_decrypt_chaining,
+ },
+ },
+ { .name = "cfb(des)",
+ .drv_name = "cfb-des",
+ .block_size = DES_BLOCK_SIZE,
+ .enc_type = (ENC_TYPE_ALG_DES |
+ ENC_TYPE_CHAINING_CFB),
+ .ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = n2_des_setkey,
+ .encrypt = n2_encrypt_chaining,
+ .decrypt = n2_decrypt_chaining,
+ },
+ },
+
+ /* 3DES: ECB CBC and CFB are supported */
+ { .name = "ecb(des3_ede)",
+ .drv_name = "ecb-3des",
+ .block_size = DES_BLOCK_SIZE,
+ .enc_type = (ENC_TYPE_ALG_3DES |
+ ENC_TYPE_CHAINING_ECB),
+ .ablkcipher = {
+ .min_keysize = 3 * DES_KEY_SIZE,
+ .max_keysize = 3 * DES_KEY_SIZE,
+ .setkey = n2_3des_setkey,
+ .encrypt = n2_encrypt_ecb,
+ .decrypt = n2_decrypt_ecb,
+ },
+ },
+ { .name = "cbc(des3_ede)",
+ .drv_name = "cbc-3des",
+ .block_size = DES_BLOCK_SIZE,
+ .enc_type = (ENC_TYPE_ALG_3DES |
+ ENC_TYPE_CHAINING_CBC),
+ .ablkcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = 3 * DES_KEY_SIZE,
+ .max_keysize = 3 * DES_KEY_SIZE,
+ .setkey = n2_3des_setkey,
+ .encrypt = n2_encrypt_chaining,
+ .decrypt = n2_decrypt_chaining,
+ },
+ },
+ { .name = "cfb(des3_ede)",
+ .drv_name = "cfb-3des",
+ .block_size = DES_BLOCK_SIZE,
+ .enc_type = (ENC_TYPE_ALG_3DES |
+ ENC_TYPE_CHAINING_CFB),
+ .ablkcipher = {
+ .min_keysize = 3 * DES_KEY_SIZE,
+ .max_keysize = 3 * DES_KEY_SIZE,
+ .setkey = n2_3des_setkey,
+ .encrypt = n2_encrypt_chaining,
+ .decrypt = n2_decrypt_chaining,
+ },
+ },
+ /* AES: ECB CBC and CTR are supported */
+ { .name = "ecb(aes)",
+ .drv_name = "ecb-aes",
+ .block_size = AES_BLOCK_SIZE,
+ .enc_type = (ENC_TYPE_ALG_AES128 |
+ ENC_TYPE_CHAINING_ECB),
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = n2_aes_setkey,
+ .encrypt = n2_encrypt_ecb,
+ .decrypt = n2_decrypt_ecb,
+ },
+ },
+ { .name = "cbc(aes)",
+ .drv_name = "cbc-aes",
+ .block_size = AES_BLOCK_SIZE,
+ .enc_type = (ENC_TYPE_ALG_AES128 |
+ ENC_TYPE_CHAINING_CBC),
+ .ablkcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = n2_aes_setkey,
+ .encrypt = n2_encrypt_chaining,
+ .decrypt = n2_decrypt_chaining,
+ },
+ },
+ { .name = "ctr(aes)",
+ .drv_name = "ctr-aes",
+ .block_size = AES_BLOCK_SIZE,
+ .enc_type = (ENC_TYPE_ALG_AES128 |
+ ENC_TYPE_CHAINING_COUNTER),
+ .ablkcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = n2_aes_setkey,
+ .encrypt = n2_encrypt_chaining,
+ .decrypt = n2_encrypt_chaining,
+ },
+ },
+
+};
+#define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls)
+
+static LIST_HEAD(cipher_algs);
+
+struct n2_hash_tmpl {
+ const char *name;
+ int (*digest)(struct ahash_request *req);
+ u8 digest_size;
+ u8 block_size;
+};
+static const struct n2_hash_tmpl hash_tmpls[] = {
+ { .name = "md5",
+ .digest = n2_md5_async_digest,
+ .digest_size = MD5_DIGEST_SIZE,
+ .block_size = MD5_HMAC_BLOCK_SIZE },
+ { .name = "sha1",
+ .digest = n2_sha1_async_digest,
+ .digest_size = SHA1_DIGEST_SIZE,
+ .block_size = SHA1_BLOCK_SIZE },
+ { .name = "sha256",
+ .digest = n2_sha256_async_digest,
+ .digest_size = SHA256_DIGEST_SIZE,
+ .block_size = SHA256_BLOCK_SIZE },
+ { .name = "sha224",
+ .digest = n2_sha224_async_digest,
+ .digest_size = SHA224_DIGEST_SIZE,
+ .block_size = SHA224_BLOCK_SIZE },
+};
+#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
+
+struct n2_ahash_alg {
+ struct list_head entry;
+ struct ahash_alg alg;
+};
+static LIST_HEAD(ahash_algs);
+
+static int algs_registered;
+
+static void __n2_unregister_algs(void)
+{
+ struct n2_cipher_alg *cipher, *cipher_tmp;
+ struct n2_ahash_alg *alg, *alg_tmp;
+
+ list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) {
+ crypto_unregister_alg(&cipher->alg);
+ list_del(&cipher->entry);
+ kfree(cipher);
+ }
+ list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
+ crypto_unregister_ahash(&alg->alg);
+ list_del(&alg->entry);
+ kfree(alg);
+ }
+}
+
+static int n2_cipher_cra_init(struct crypto_tfm *tfm)
+{
+ tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context);
+ return 0;
+}
+
+static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
+{
+ struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
+ struct crypto_alg *alg;
+ int err;
+
+ if (!p)
+ return -ENOMEM;
+
+ alg = &p->alg;
+
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
+ alg->cra_priority = N2_CRA_PRIORITY;
+ alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
+ alg->cra_blocksize = tmpl->block_size;
+ p->enc_type = tmpl->enc_type;
+ alg->cra_ctxsize = sizeof(struct n2_cipher_context);
+ alg->cra_type = &crypto_ablkcipher_type;
+ alg->cra_u.ablkcipher = tmpl->ablkcipher;
+ alg->cra_init = n2_cipher_cra_init;
+ alg->cra_module = THIS_MODULE;
+
+ list_add(&p->entry, &cipher_algs);
+ err = crypto_register_alg(alg);
+ if (err) {
+ list_del(&p->entry);
+ kfree(p);
+ }
+ return err;
+}
+
+static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
+{
+ struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
+ struct hash_alg_common *halg;
+ struct crypto_alg *base;
+ struct ahash_alg *ahash;
+ int err;
+
+ if (!p)
+ return -ENOMEM;
+
+ ahash = &p->alg;
+ ahash->init = n2_hash_async_init;
+ ahash->update = n2_hash_async_update;
+ ahash->final = n2_hash_async_final;
+ ahash->finup = n2_hash_async_finup;
+ ahash->digest = tmpl->digest;
+
+ halg = &ahash->halg;
+ halg->digestsize = tmpl->digest_size;
+
+ base = &halg->base;
+ snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
+ snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
+ base->cra_priority = N2_CRA_PRIORITY;
+ base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK;
+ base->cra_blocksize = tmpl->block_size;
+ base->cra_ctxsize = sizeof(struct n2_hash_ctx);
+ base->cra_module = THIS_MODULE;
+ base->cra_init = n2_hash_cra_init;
+ base->cra_exit = n2_hash_cra_exit;
+
+ list_add(&p->entry, &ahash_algs);
+ err = crypto_register_ahash(ahash);
+ if (err) {
+ list_del(&p->entry);
+ kfree(p);
+ }
+ return err;
+}
+
+static int __devinit n2_register_algs(void)
+{
+ int i, err = 0;
+
+ mutex_lock(&spu_lock);
+ if (algs_registered++)
+ goto out;
+
+ for (i = 0; i < NUM_HASH_TMPLS; i++) {
+ err = __n2_register_one_ahash(&hash_tmpls[i]);
+ if (err) {
+ __n2_unregister_algs();
+ goto out;
+ }
+ }
+ for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
+ err = __n2_register_one_cipher(&cipher_tmpls[i]);
+ if (err) {
+ __n2_unregister_algs();
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&spu_lock);
+ return err;
+}
+
+static void __exit n2_unregister_algs(void)
+{
+ mutex_lock(&spu_lock);
+ if (!--algs_registered)
+ __n2_unregister_algs();
+ mutex_unlock(&spu_lock);
+}
+
+/* To map CWQ queues to interrupt sources, the hypervisor API provides
+ * a devino. This isn't very useful to us because all of the
+ * interrupts listed in the of_device node have been translated to
+ * Linux virtual IRQ cookie numbers.
+ *
+ * So we have to back-translate, going through the 'intr' and 'ino'
+ * property tables of the n2cp MDESC node, matching it with the OF
+ * 'interrupts' property entries, in order to to figure out which
+ * devino goes to which already-translated IRQ.
+ */
+static int find_devino_index(struct of_device *dev, struct spu_mdesc_info *ip,
+ unsigned long dev_ino)
+{
+ const unsigned int *dev_intrs;
+ unsigned int intr;
+ int i;
+
+ for (i = 0; i < ip->num_intrs; i++) {
+ if (ip->ino_table[i].ino == dev_ino)
+ break;
+ }
+ if (i == ip->num_intrs)
+ return -ENODEV;
+
+ intr = ip->ino_table[i].intr;
+
+ dev_intrs = of_get_property(dev->node, "interrupts", NULL);
+ if (!dev_intrs)
+ return -ENODEV;
+
+ for (i = 0; i < dev->num_irqs; i++) {
+ if (dev_intrs[i] == intr)
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+static int spu_map_ino(struct of_device *dev, struct spu_mdesc_info *ip,
+ const char *irq_name, struct spu_queue *p,
+ irq_handler_t handler)
+{
+ unsigned long herr;
+ int index;
+
+ herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
+ if (herr)
+ return -EINVAL;
+
+ index = find_devino_index(dev, ip, p->devino);
+ if (index < 0)
+ return index;
+
+ p->irq = dev->irqs[index];
+
+ sprintf(p->irq_name, "%s-%d", irq_name, index);
+
+ return request_irq(p->irq, handler, IRQF_SAMPLE_RANDOM,
+ p->irq_name, p);
+}
+
+static struct kmem_cache *queue_cache[2];
+
+static void *new_queue(unsigned long q_type)
+{
+ return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
+}
+
+static void free_queue(void *p, unsigned long q_type)
+{
+ return kmem_cache_free(queue_cache[q_type - 1], p);
+}
+
+static int queue_cache_init(void)
+{
+ if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
+ queue_cache[HV_NCS_QTYPE_MAU - 1] =
+ kmem_cache_create("cwq_queue",
+ (MAU_NUM_ENTRIES *
+ MAU_ENTRY_SIZE),
+ MAU_ENTRY_SIZE, 0, NULL);
+ if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
+ return -ENOMEM;
+
+ if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
+ queue_cache[HV_NCS_QTYPE_CWQ - 1] =
+ kmem_cache_create("cwq_queue",
+ (CWQ_NUM_ENTRIES *
+ CWQ_ENTRY_SIZE),
+ CWQ_ENTRY_SIZE, 0, NULL);
+ if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
+ kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void queue_cache_destroy(void)
+{
+ kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
+ kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
+}
+
+static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
+{
+ cpumask_var_t old_allowed;
+ unsigned long hv_ret;
+
+ if (cpumask_empty(&p->sharing))
+ return -EINVAL;
+
+ if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpumask_copy(old_allowed, &current->cpus_allowed);
+
+ set_cpus_allowed_ptr(current, &p->sharing);
+
+ hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
+ CWQ_NUM_ENTRIES, &p->qhandle);
+ if (!hv_ret)
+ sun4v_ncs_sethead_marker(p->qhandle, 0);
+
+ set_cpus_allowed_ptr(current, old_allowed);
+
+ free_cpumask_var(old_allowed);
+
+ return (hv_ret ? -EINVAL : 0);
+}
+
+static int spu_queue_setup(struct spu_queue *p)
+{
+ int err;
+
+ p->q = new_queue(p->q_type);
+ if (!p->q)
+ return -ENOMEM;
+
+ err = spu_queue_register(p, p->q_type);
+ if (err) {
+ free_queue(p->q, p->q_type);
+ p->q = NULL;
+ }
+
+ return err;
+}
+
+static void spu_queue_destroy(struct spu_queue *p)
+{
+ unsigned long hv_ret;
+
+ if (!p->q)
+ return;
+
+ hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
+
+ if (!hv_ret)
+ free_queue(p->q, p->q_type);
+}
+
+static void spu_list_destroy(struct list_head *list)
+{
+ struct spu_queue *p, *n;
+
+ list_for_each_entry_safe(p, n, list, list) {
+ int i;
+
+ for (i = 0; i < NR_CPUS; i++) {
+ if (cpu_to_cwq[i] == p)
+ cpu_to_cwq[i] = NULL;
+ }
+
+ if (p->irq) {
+ free_irq(p->irq, p);
+ p->irq = 0;
+ }
+ spu_queue_destroy(p);
+ list_del(&p->list);
+ kfree(p);
+ }
+}
+
+/* Walk the backward arcs of a CWQ 'exec-unit' node,
+ * gathering cpu membership information.
+ */
+static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
+ struct of_device *dev,
+ u64 node, struct spu_queue *p,
+ struct spu_queue **table)
+{
+ u64 arc;
+
+ mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
+ u64 tgt = mdesc_arc_target(mdesc, arc);
+ const char *name = mdesc_node_name(mdesc, tgt);
+ const u64 *id;
+
+ if (strcmp(name, "cpu"))
+ continue;
+ id = mdesc_get_property(mdesc, tgt, "id", NULL);
+ if (table[*id] != NULL) {
+ dev_err(&dev->dev, "%s: SPU cpu slot already set.\n",
+ dev->node->full_name);
+ return -EINVAL;
+ }
+ cpu_set(*id, p->sharing);
+ table[*id] = p;
+ }
+ return 0;
+}
+
+/* Process an 'exec-unit' MDESC node of type 'cwq'. */
+static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
+ struct of_device *dev, struct mdesc_handle *mdesc,
+ u64 node, const char *iname, unsigned long q_type,
+ irq_handler_t handler, struct spu_queue **table)
+{
+ struct spu_queue *p;
+ int err;
+
+ p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
+ if (!p) {
+ dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n",
+ dev->node->full_name);
+ return -ENOMEM;
+ }
+
+ cpus_clear(p->sharing);
+ spin_lock_init(&p->lock);
+ p->q_type = q_type;
+ INIT_LIST_HEAD(&p->jobs);
+ list_add(&p->list, list);
+
+ err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
+ if (err)
+ return err;
+
+ err = spu_queue_setup(p);
+ if (err)
+ return err;
+
+ return spu_map_ino(dev, ip, iname, p, handler);
+}
+
+static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct of_device *dev,
+ struct spu_mdesc_info *ip, struct list_head *list,
+ const char *exec_name, unsigned long q_type,
+ irq_handler_t handler, struct spu_queue **table)
+{
+ int err = 0;
+ u64 node;
+
+ mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
+ const char *type;
+
+ type = mdesc_get_property(mdesc, node, "type", NULL);
+ if (!type || strcmp(type, exec_name))
+ continue;
+
+ err = handle_exec_unit(ip, list, dev, mdesc, node,
+ exec_name, q_type, handler, table);
+ if (err) {
+ spu_list_destroy(list);
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node,
+ struct spu_mdesc_info *ip)
+{
+ const u64 *intr, *ino;
+ int intr_len, ino_len;
+ int i;
+
+ intr = mdesc_get_property(mdesc, node, "intr", &intr_len);
+ if (!intr)
+ return -ENODEV;
+
+ ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
+ if (!intr)
+ return -ENODEV;
+
+ if (intr_len != ino_len)
+ return -EINVAL;
+
+ ip->num_intrs = intr_len / sizeof(u64);
+ ip->ino_table = kzalloc((sizeof(struct ino_blob) *
+ ip->num_intrs),
+ GFP_KERNEL);
+ if (!ip->ino_table)
+ return -ENOMEM;
+
+ for (i = 0; i < ip->num_intrs; i++) {
+ struct ino_blob *b = &ip->ino_table[i];
+ b->intr = intr[i];
+ b->ino = ino[i];
+ }
+
+ return 0;
+}
+
+static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc,
+ struct of_device *dev,
+ struct spu_mdesc_info *ip,
+ const char *node_name)
+{
+ const unsigned int *reg;
+ u64 node;
+
+ reg = of_get_property(dev->node, "reg", NULL);
+ if (!reg)
+ return -ENODEV;
+
+ mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
+ const char *name;
+ const u64 *chdl;
+
+ name = mdesc_get_property(mdesc, node, "name", NULL);
+ if (!name || strcmp(name, node_name))
+ continue;
+ chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
+ if (!chdl || (*chdl != *reg))
+ continue;
+ ip->cfg_handle = *chdl;
+ return get_irq_props(mdesc, node, ip);
+ }
+
+ return -ENODEV;
+}
+
+static unsigned long n2_spu_hvapi_major;
+static unsigned long n2_spu_hvapi_minor;
+
+static int __devinit n2_spu_hvapi_register(void)
+{
+ int err;
+
+ n2_spu_hvapi_major = 2;
+ n2_spu_hvapi_minor = 0;
+
+ err = sun4v_hvapi_register(HV_GRP_NCS,
+ n2_spu_hvapi_major,
+ &n2_spu_hvapi_minor);
+
+ if (!err)
+ pr_info("Registered NCS HVAPI version %lu.%lu\n",
+ n2_spu_hvapi_major,
+ n2_spu_hvapi_minor);
+
+ return err;
+}
+
+static void n2_spu_hvapi_unregister(void)
+{
+ sun4v_hvapi_unregister(HV_GRP_NCS);
+}
+
+static int global_ref;
+
+static int __devinit grab_global_resources(void)
+{
+ int err = 0;
+
+ mutex_lock(&spu_lock);
+
+ if (global_ref++)
+ goto out;
+
+ err = n2_spu_hvapi_register();
+ if (err)
+ goto out;
+
+ err = queue_cache_init();
+ if (err)
+ goto out_hvapi_release;
+
+ err = -ENOMEM;
+ cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
+ GFP_KERNEL);
+ if (!cpu_to_cwq)
+ goto out_queue_cache_destroy;
+
+ cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
+ GFP_KERNEL);
+ if (!cpu_to_mau)
+ goto out_free_cwq_table;
+
+ err = 0;
+
+out:
+ if (err)
+ global_ref--;
+ mutex_unlock(&spu_lock);
+ return err;
+
+out_free_cwq_table:
+ kfree(cpu_to_cwq);
+ cpu_to_cwq = NULL;
+
+out_queue_cache_destroy:
+ queue_cache_destroy();
+
+out_hvapi_release:
+ n2_spu_hvapi_unregister();
+ goto out;
+}
+
+static void release_global_resources(void)
+{
+ mutex_lock(&spu_lock);
+ if (!--global_ref) {
+ kfree(cpu_to_cwq);
+ cpu_to_cwq = NULL;
+
+ kfree(cpu_to_mau);
+ cpu_to_mau = NULL;
+
+ queue_cache_destroy();
+ n2_spu_hvapi_unregister();
+ }
+ mutex_unlock(&spu_lock);
+}
+
+static struct n2_crypto * __devinit alloc_n2cp(void)
+{
+ struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
+
+ if (np)
+ INIT_LIST_HEAD(&np->cwq_list);
+
+ return np;
+}
+
+static void free_n2cp(struct n2_crypto *np)
+{
+ if (np->cwq_info.ino_table) {
+ kfree(np->cwq_info.ino_table);
+ np->cwq_info.ino_table = NULL;
+ }
+
+ kfree(np);
+}
+
+static void __devinit n2_spu_driver_version(void)
+{
+ static int n2_spu_version_printed;
+
+ if (n2_spu_version_printed++ == 0)
+ pr_info("%s", version);
+}
+
+static int __devinit n2_crypto_probe(struct of_device *dev,
+ const struct of_device_id *match)
+{
+ struct mdesc_handle *mdesc;
+ const char *full_name;
+ struct n2_crypto *np;
+ int err;
+
+ n2_spu_driver_version();
+
+ full_name = dev->node->full_name;
+ pr_info("Found N2CP at %s\n", full_name);
+
+ np = alloc_n2cp();
+ if (!np) {
+ dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n",
+ full_name);
+ return -ENOMEM;
+ }
+
+ err = grab_global_resources();
+ if (err) {
+ dev_err(&dev->dev, "%s: Unable to grab "
+ "global resources.\n", full_name);
+ goto out_free_n2cp;
+ }
+
+ mdesc = mdesc_grab();
+
+ if (!mdesc) {
+ dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
+ full_name);
+ err = -ENODEV;
+ goto out_free_global;
+ }
+ err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
+ if (err) {
+ dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
+ full_name);
+ mdesc_release(mdesc);
+ goto out_free_global;
+ }
+
+ err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
+ "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
+ cpu_to_cwq);
+ mdesc_release(mdesc);
+
+ if (err) {
+ dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n",
+ full_name);
+ goto out_free_global;
+ }
+
+ err = n2_register_algs();
+ if (err) {
+ dev_err(&dev->dev, "%s: Unable to register algorithms.\n",
+ full_name);
+ goto out_free_spu_list;
+ }
+
+ dev_set_drvdata(&dev->dev, np);
+
+ return 0;
+
+out_free_spu_list:
+ spu_list_destroy(&np->cwq_list);
+
+out_free_global:
+ release_global_resources();
+
+out_free_n2cp:
+ free_n2cp(np);
+
+ return err;
+}
+
+static int __devexit n2_crypto_remove(struct of_device *dev)
+{
+ struct n2_crypto *np = dev_get_drvdata(&dev->dev);
+
+ n2_unregister_algs();
+
+ spu_list_destroy(&np->cwq_list);
+
+ release_global_resources();
+
+ free_n2cp(np);
+
+ return 0;
+}
+
+static struct n2_mau * __devinit alloc_ncp(void)
+{
+ struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
+
+ if (mp)
+ INIT_LIST_HEAD(&mp->mau_list);
+
+ return mp;
+}
+
+static void free_ncp(struct n2_mau *mp)
+{
+ if (mp->mau_info.ino_table) {
+ kfree(mp->mau_info.ino_table);
+ mp->mau_info.ino_table = NULL;
+ }
+
+ kfree(mp);
+}
+
+static int __devinit n2_mau_probe(struct of_device *dev,
+ const struct of_device_id *match)
+{
+ struct mdesc_handle *mdesc;
+ const char *full_name;
+ struct n2_mau *mp;
+ int err;
+
+ n2_spu_driver_version();
+
+ full_name = dev->node->full_name;
+ pr_info("Found NCP at %s\n", full_name);
+
+ mp = alloc_ncp();
+ if (!mp) {
+ dev_err(&dev->dev, "%s: Unable to allocate ncp.\n",
+ full_name);
+ return -ENOMEM;
+ }
+
+ err = grab_global_resources();
+ if (err) {
+ dev_err(&dev->dev, "%s: Unable to grab "
+ "global resources.\n", full_name);
+ goto out_free_ncp;
+ }
+
+ mdesc = mdesc_grab();
+
+ if (!mdesc) {
+ dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
+ full_name);
+ err = -ENODEV;
+ goto out_free_global;
+ }
+
+ err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
+ if (err) {
+ dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
+ full_name);
+ mdesc_release(mdesc);
+ goto out_free_global;
+ }
+
+ err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
+ "mau", HV_NCS_QTYPE_MAU, mau_intr,
+ cpu_to_mau);
+ mdesc_release(mdesc);
+
+ if (err) {
+ dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n",
+ full_name);
+ goto out_free_global;
+ }
+
+ dev_set_drvdata(&dev->dev, mp);
+
+ return 0;
+
+out_free_global:
+ release_global_resources();
+
+out_free_ncp:
+ free_ncp(mp);
+
+ return err;
+}
+
+static int __devexit n2_mau_remove(struct of_device *dev)
+{
+ struct n2_mau *mp = dev_get_drvdata(&dev->dev);
+
+ spu_list_destroy(&mp->mau_list);
+
+ release_global_resources();
+
+ free_ncp(mp);
+
+ return 0;
+}
+
+static struct of_device_id n2_crypto_match[] = {
+ {
+ .name = "n2cp",
+ .compatible = "SUNW,n2-cwq",
+ },
+ {
+ .name = "n2cp",
+ .compatible = "SUNW,vf-cwq",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, n2_crypto_match);
+
+static struct of_platform_driver n2_crypto_driver = {
+ .name = "n2cp",
+ .match_table = n2_crypto_match,
+ .probe = n2_crypto_probe,
+ .remove = __devexit_p(n2_crypto_remove),
+};
+
+static struct of_device_id n2_mau_match[] = {
+ {
+ .name = "ncp",
+ .compatible = "SUNW,n2-mau",
+ },
+ {
+ .name = "ncp",
+ .compatible = "SUNW,vf-mau",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, n2_mau_match);
+
+static struct of_platform_driver n2_mau_driver = {
+ .name = "ncp",
+ .match_table = n2_mau_match,
+ .probe = n2_mau_probe,
+ .remove = __devexit_p(n2_mau_remove),
+};
+
+static int __init n2_init(void)
+{
+ int err = of_register_driver(&n2_crypto_driver, &of_bus_type);
+
+ if (!err) {
+ err = of_register_driver(&n2_mau_driver, &of_bus_type);
+ if (err)
+ of_unregister_driver(&n2_crypto_driver);
+ }
+ return err;
+}
+
+static void __exit n2_exit(void)
+{
+ of_unregister_driver(&n2_mau_driver);
+ of_unregister_driver(&n2_crypto_driver);
+}
+
+module_init(n2_init);
+module_exit(n2_exit);
diff --git a/drivers/crypto/n2_core.h b/drivers/crypto/n2_core.h
new file mode 100644
index 00000000000..4bcbbeae98f
--- /dev/null
+++ b/drivers/crypto/n2_core.h
@@ -0,0 +1,231 @@
+#ifndef _N2_CORE_H
+#define _N2_CORE_H
+
+#ifndef __ASSEMBLY__
+
+struct ino_blob {
+ u64 intr;
+ u64 ino;
+};
+
+struct spu_mdesc_info {
+ u64 cfg_handle;
+ struct ino_blob *ino_table;
+ int num_intrs;
+};
+
+struct n2_crypto {
+ struct spu_mdesc_info cwq_info;
+ struct list_head cwq_list;
+};
+
+struct n2_mau {
+ struct spu_mdesc_info mau_info;
+ struct list_head mau_list;
+};
+
+#define CWQ_ENTRY_SIZE 64
+#define CWQ_NUM_ENTRIES 64
+
+#define MAU_ENTRY_SIZE 64
+#define MAU_NUM_ENTRIES 64
+
+struct cwq_initial_entry {
+ u64 control;
+ u64 src_addr;
+ u64 auth_key_addr;
+ u64 auth_iv_addr;
+ u64 final_auth_state_addr;
+ u64 enc_key_addr;
+ u64 enc_iv_addr;
+ u64 dest_addr;
+};
+
+struct cwq_ext_entry {
+ u64 len;
+ u64 src_addr;
+ u64 resv1;
+ u64 resv2;
+ u64 resv3;
+ u64 resv4;
+ u64 resv5;
+ u64 resv6;
+};
+
+struct cwq_final_entry {
+ u64 control;
+ u64 src_addr;
+ u64 resv1;
+ u64 resv2;
+ u64 resv3;
+ u64 resv4;
+ u64 resv5;
+ u64 resv6;
+};
+
+#define CONTROL_LEN 0x000000000000ffffULL
+#define CONTROL_LEN_SHIFT 0
+#define CONTROL_HMAC_KEY_LEN 0x0000000000ff0000ULL
+#define CONTROL_HMAC_KEY_LEN_SHIFT 16
+#define CONTROL_ENC_TYPE 0x00000000ff000000ULL
+#define CONTROL_ENC_TYPE_SHIFT 24
+#define ENC_TYPE_ALG_RC4_STREAM 0x00ULL
+#define ENC_TYPE_ALG_RC4_NOSTREAM 0x04ULL
+#define ENC_TYPE_ALG_DES 0x08ULL
+#define ENC_TYPE_ALG_3DES 0x0cULL
+#define ENC_TYPE_ALG_AES128 0x10ULL
+#define ENC_TYPE_ALG_AES192 0x14ULL
+#define ENC_TYPE_ALG_AES256 0x18ULL
+#define ENC_TYPE_ALG_RESERVED 0x1cULL
+#define ENC_TYPE_ALG_MASK 0x1cULL
+#define ENC_TYPE_CHAINING_ECB 0x00ULL
+#define ENC_TYPE_CHAINING_CBC 0x01ULL
+#define ENC_TYPE_CHAINING_CFB 0x02ULL
+#define ENC_TYPE_CHAINING_COUNTER 0x03ULL
+#define ENC_TYPE_CHAINING_MASK 0x03ULL
+#define CONTROL_AUTH_TYPE 0x0000001f00000000ULL
+#define CONTROL_AUTH_TYPE_SHIFT 32
+#define AUTH_TYPE_RESERVED 0x00ULL
+#define AUTH_TYPE_MD5 0x01ULL
+#define AUTH_TYPE_SHA1 0x02ULL
+#define AUTH_TYPE_SHA256 0x03ULL
+#define AUTH_TYPE_CRC32 0x04ULL
+#define AUTH_TYPE_HMAC_MD5 0x05ULL
+#define AUTH_TYPE_HMAC_SHA1 0x06ULL
+#define AUTH_TYPE_HMAC_SHA256 0x07ULL
+#define AUTH_TYPE_TCP_CHECKSUM 0x08ULL
+#define AUTH_TYPE_SSL_HMAC_MD5 0x09ULL
+#define AUTH_TYPE_SSL_HMAC_SHA1 0x0aULL
+#define AUTH_TYPE_SSL_HMAC_SHA256 0x0bULL
+#define CONTROL_STRAND 0x000000e000000000ULL
+#define CONTROL_STRAND_SHIFT 37
+#define CONTROL_HASH_LEN 0x0000ff0000000000ULL
+#define CONTROL_HASH_LEN_SHIFT 40
+#define CONTROL_INTERRUPT 0x0001000000000000ULL
+#define CONTROL_STORE_FINAL_AUTH_STATE 0x0002000000000000ULL
+#define CONTROL_RESERVED 0x001c000000000000ULL
+#define CONTROL_HV_DONE 0x0004000000000000ULL
+#define CONTROL_HV_PROTOCOL_ERROR 0x0008000000000000ULL
+#define CONTROL_HV_HARDWARE_ERROR 0x0010000000000000ULL
+#define CONTROL_END_OF_BLOCK 0x0020000000000000ULL
+#define CONTROL_START_OF_BLOCK 0x0040000000000000ULL
+#define CONTROL_ENCRYPT 0x0080000000000000ULL
+#define CONTROL_OPCODE 0xff00000000000000ULL
+#define CONTROL_OPCODE_SHIFT 56
+#define OPCODE_INPLACE_BIT 0x80ULL
+#define OPCODE_SSL_KEYBLOCK 0x10ULL
+#define OPCODE_COPY 0x20ULL
+#define OPCODE_ENCRYPT 0x40ULL
+#define OPCODE_AUTH_MAC 0x41ULL
+
+#endif /* !(__ASSEMBLY__) */
+
+/* NCS v2.0 hypervisor interfaces */
+#define HV_NCS_QTYPE_MAU 0x01
+#define HV_NCS_QTYPE_CWQ 0x02
+
+/* ncs_qconf()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_NCS_QCONF
+ * ARG0: Queue type (HV_NCS_QTYPE_{MAU,CWQ})
+ * ARG1: Real address of queue, or handle for unconfigure
+ * ARG2: Number of entries in queue, zero for unconfigure
+ * RET0: status
+ * RET1: queue handle
+ *
+ * Configure a queue in the stream processing unit.
+ *
+ * The real address given as the base must be 64-byte
+ * aligned.
+ *
+ * The queue size can range from a minimum of 2 to a maximum
+ * of 64. The queue size must be a power of two.
+ *
+ * To unconfigure a queue, specify a length of zero and place
+ * the queue handle into ARG1.
+ *
+ * On configure success the hypervisor will set the FIRST, HEAD,
+ * and TAIL registers to the address of the first entry in the
+ * queue. The LAST register will be set to point to the last
+ * entry in the queue.
+ */
+#define HV_FAST_NCS_QCONF 0x111
+
+/* ncs_qinfo()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_NCS_QINFO
+ * ARG0: Queue handle
+ * RET0: status
+ * RET1: Queue type (HV_NCS_QTYPE_{MAU,CWQ})
+ * RET2: Queue base address
+ * RET3: Number of entries
+ */
+#define HV_FAST_NCS_QINFO 0x112
+
+/* ncs_gethead()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_NCS_GETHEAD
+ * ARG0: Queue handle
+ * RET0: status
+ * RET1: queue head offset
+ */
+#define HV_FAST_NCS_GETHEAD 0x113
+
+/* ncs_gettail()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_NCS_GETTAIL
+ * ARG0: Queue handle
+ * RET0: status
+ * RET1: queue tail offset
+ */
+#define HV_FAST_NCS_GETTAIL 0x114
+
+/* ncs_settail()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_NCS_SETTAIL
+ * ARG0: Queue handle
+ * ARG1: New tail offset
+ * RET0: status
+ */
+#define HV_FAST_NCS_SETTAIL 0x115
+
+/* ncs_qhandle_to_devino()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_NCS_QHANDLE_TO_DEVINO
+ * ARG0: Queue handle
+ * RET0: status
+ * RET1: devino
+ */
+#define HV_FAST_NCS_QHANDLE_TO_DEVINO 0x116
+
+/* ncs_sethead_marker()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_NCS_SETHEAD_MARKER
+ * ARG0: Queue handle
+ * ARG1: New head offset
+ * RET0: status
+ */
+#define HV_FAST_NCS_SETHEAD_MARKER 0x117
+
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_ncs_qconf(unsigned long queue_type,
+ unsigned long queue_ra,
+ unsigned long num_entries,
+ unsigned long *qhandle);
+extern unsigned long sun4v_ncs_qinfo(unsigned long qhandle,
+ unsigned long *queue_type,
+ unsigned long *queue_ra,
+ unsigned long *num_entries);
+extern unsigned long sun4v_ncs_gethead(unsigned long qhandle,
+ unsigned long *head);
+extern unsigned long sun4v_ncs_gettail(unsigned long qhandle,
+ unsigned long *tail);
+extern unsigned long sun4v_ncs_settail(unsigned long qhandle,
+ unsigned long tail);
+extern unsigned long sun4v_ncs_qhandle_to_devino(unsigned long qhandle,
+ unsigned long *devino);
+extern unsigned long sun4v_ncs_sethead_marker(unsigned long qhandle,
+ unsigned long head);
+#endif /* !(__ASSEMBLY__) */
+
+#endif /* _N2_CORE_H */
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
new file mode 100644
index 00000000000..8b034337793
--- /dev/null
+++ b/drivers/crypto/omap-sham.c
@@ -0,0 +1,1259 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for OMAP SHA1/MD5 HW acceleration.
+ *
+ * Copyright (c) 2010 Nokia Corporation
+ * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Some ideas are from old omap-sha1-md5.c driver.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/version.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+
+#include <plat/cpu.h>
+#include <plat/dma.h>
+#include <mach/irqs.h>
+
+#define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04))
+#define SHA_REG_DIN(x) (0x1C + ((x) * 0x04))
+
+#define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE
+#define MD5_DIGEST_SIZE 16
+
+#define SHA_REG_DIGCNT 0x14
+
+#define SHA_REG_CTRL 0x18
+#define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
+#define SHA_REG_CTRL_CLOSE_HASH (1 << 4)
+#define SHA_REG_CTRL_ALGO_CONST (1 << 3)
+#define SHA_REG_CTRL_ALGO (1 << 2)
+#define SHA_REG_CTRL_INPUT_READY (1 << 1)
+#define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
+
+#define SHA_REG_REV 0x5C
+#define SHA_REG_REV_MAJOR 0xF0
+#define SHA_REG_REV_MINOR 0x0F
+
+#define SHA_REG_MASK 0x60
+#define SHA_REG_MASK_DMA_EN (1 << 3)
+#define SHA_REG_MASK_IT_EN (1 << 2)
+#define SHA_REG_MASK_SOFTRESET (1 << 1)
+#define SHA_REG_AUTOIDLE (1 << 0)
+
+#define SHA_REG_SYSSTATUS 0x64
+#define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
+
+#define DEFAULT_TIMEOUT_INTERVAL HZ
+
+#define FLAGS_FIRST 0x0001
+#define FLAGS_FINUP 0x0002
+#define FLAGS_FINAL 0x0004
+#define FLAGS_FAST 0x0008
+#define FLAGS_SHA1 0x0010
+#define FLAGS_DMA_ACTIVE 0x0020
+#define FLAGS_OUTPUT_READY 0x0040
+#define FLAGS_CLEAN 0x0080
+#define FLAGS_INIT 0x0100
+#define FLAGS_CPU 0x0200
+#define FLAGS_HMAC 0x0400
+
+/* 3rd byte */
+#define FLAGS_BUSY 16
+
+#define OP_UPDATE 1
+#define OP_FINAL 2
+
+struct omap_sham_dev;
+
+struct omap_sham_reqctx {
+ struct omap_sham_dev *dd;
+ unsigned long flags;
+ unsigned long op;
+
+ size_t digcnt;
+ u8 *buffer;
+ size_t bufcnt;
+ size_t buflen;
+ dma_addr_t dma_addr;
+
+ /* walk state */
+ struct scatterlist *sg;
+ unsigned int offset; /* offset in current sg */
+ unsigned int total; /* total request */
+};
+
+struct omap_sham_hmac_ctx {
+ struct crypto_shash *shash;
+ u8 ipad[SHA1_MD5_BLOCK_SIZE];
+ u8 opad[SHA1_MD5_BLOCK_SIZE];
+};
+
+struct omap_sham_ctx {
+ struct omap_sham_dev *dd;
+
+ unsigned long flags;
+
+ /* fallback stuff */
+ struct crypto_shash *fallback;
+
+ struct omap_sham_hmac_ctx base[0];
+};
+
+#define OMAP_SHAM_QUEUE_LENGTH 1
+
+struct omap_sham_dev {
+ struct list_head list;
+ unsigned long phys_base;
+ struct device *dev;
+ void __iomem *io_base;
+ int irq;
+ struct clk *iclk;
+ spinlock_t lock;
+ int dma;
+ int dma_lch;
+ struct tasklet_struct done_task;
+ struct tasklet_struct queue_task;
+
+ unsigned long flags;
+ struct crypto_queue queue;
+ struct ahash_request *req;
+};
+
+struct omap_sham_drv {
+ struct list_head dev_list;
+ spinlock_t lock;
+ unsigned long flags;
+};
+
+static struct omap_sham_drv sham = {
+ .dev_list = LIST_HEAD_INIT(sham.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(sham.lock),
+};
+
+static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
+{
+ return __raw_readl(dd->io_base + offset);
+}
+
+static inline void omap_sham_write(struct omap_sham_dev *dd,
+ u32 offset, u32 value)
+{
+ __raw_writel(value, dd->io_base + offset);
+}
+
+static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
+ u32 value, u32 mask)
+{
+ u32 val;
+
+ val = omap_sham_read(dd, address);
+ val &= ~mask;
+ val |= value;
+ omap_sham_write(dd, address, val);
+}
+
+static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
+{
+ unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
+
+ while (!(omap_sham_read(dd, offset) & bit)) {
+ if (time_is_before_jiffies(timeout))
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void omap_sham_copy_hash(struct ahash_request *req, int out)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ u32 *hash = (u32 *)req->result;
+ int i;
+
+ if (likely(ctx->flags & FLAGS_SHA1)) {
+ /* SHA1 results are in big endian */
+ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
+ if (out)
+ hash[i] = be32_to_cpu(omap_sham_read(ctx->dd,
+ SHA_REG_DIGEST(i)));
+ else
+ omap_sham_write(ctx->dd, SHA_REG_DIGEST(i),
+ cpu_to_be32(hash[i]));
+ } else {
+ /* MD5 results are in little endian */
+ for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++)
+ if (out)
+ hash[i] = le32_to_cpu(omap_sham_read(ctx->dd,
+ SHA_REG_DIGEST(i)));
+ else
+ omap_sham_write(ctx->dd, SHA_REG_DIGEST(i),
+ cpu_to_le32(hash[i]));
+ }
+}
+
+static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
+ int final, int dma)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ u32 val = length << 5, mask;
+
+ if (unlikely(!ctx->digcnt)) {
+
+ clk_enable(dd->iclk);
+
+ if (!(dd->flags & FLAGS_INIT)) {
+ omap_sham_write_mask(dd, SHA_REG_MASK,
+ SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
+
+ if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
+ SHA_REG_SYSSTATUS_RESETDONE))
+ return -ETIMEDOUT;
+
+ dd->flags |= FLAGS_INIT;
+ }
+ } else {
+ omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt);
+ }
+
+ omap_sham_write_mask(dd, SHA_REG_MASK,
+ SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
+ SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
+ /*
+ * Setting ALGO_CONST only for the first iteration
+ * and CLOSE_HASH only for the last one.
+ */
+ if (ctx->flags & FLAGS_SHA1)
+ val |= SHA_REG_CTRL_ALGO;
+ if (!ctx->digcnt)
+ val |= SHA_REG_CTRL_ALGO_CONST;
+ if (final)
+ val |= SHA_REG_CTRL_CLOSE_HASH;
+
+ mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
+ SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
+
+ omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
+
+ return 0;
+}
+
+static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
+ size_t length, int final)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ int err, count, len32;
+ const u32 *buffer = (const u32 *)buf;
+
+ dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
+ ctx->digcnt, length, final);
+
+ err = omap_sham_write_ctrl(dd, length, final, 0);
+ if (err)
+ return err;
+
+ if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY))
+ return -ETIMEDOUT;
+
+ ctx->digcnt += length;
+
+ if (final)
+ ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
+
+ len32 = DIV_ROUND_UP(length, sizeof(u32));
+
+ for (count = 0; count < len32; count++)
+ omap_sham_write(dd, SHA_REG_DIN(count), buffer[count]);
+
+ return -EINPROGRESS;
+}
+
+static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
+ size_t length, int final)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ int err, len32;
+
+ dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
+ ctx->digcnt, length, final);
+
+ /* flush cache entries related to our page */
+ if (dma_addr == ctx->dma_addr)
+ dma_sync_single_for_device(dd->dev, dma_addr, length,
+ DMA_TO_DEVICE);
+
+ len32 = DIV_ROUND_UP(length, sizeof(u32));
+
+ omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32,
+ 1, OMAP_DMA_SYNC_PACKET, dd->dma, OMAP_DMA_DST_SYNC);
+
+ omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC,
+ dma_addr, 0, 0);
+
+ err = omap_sham_write_ctrl(dd, length, final, 1);
+ if (err)
+ return err;
+
+ ctx->digcnt += length;
+
+ if (final)
+ ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
+
+ dd->flags |= FLAGS_DMA_ACTIVE;
+
+ omap_start_dma(dd->dma_lch);
+
+ return -EINPROGRESS;
+}
+
+static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx,
+ const u8 *data, size_t length)
+{
+ size_t count = min(length, ctx->buflen - ctx->bufcnt);
+
+ count = min(count, ctx->total);
+ if (count <= 0)
+ return 0;
+ memcpy(ctx->buffer + ctx->bufcnt, data, count);
+ ctx->bufcnt += count;
+
+ return count;
+}
+
+static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
+{
+ size_t count;
+
+ while (ctx->sg) {
+ count = omap_sham_append_buffer(ctx,
+ sg_virt(ctx->sg) + ctx->offset,
+ ctx->sg->length - ctx->offset);
+ if (!count)
+ break;
+ ctx->offset += count;
+ ctx->total -= count;
+ if (ctx->offset == ctx->sg->length) {
+ ctx->sg = sg_next(ctx->sg);
+ if (ctx->sg)
+ ctx->offset = 0;
+ else
+ ctx->total = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ unsigned int final;
+ size_t count;
+
+ if (!ctx->total)
+ return 0;
+
+ omap_sham_append_sg(ctx);
+
+ final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
+
+ dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
+ ctx->bufcnt, ctx->digcnt, final);
+
+ if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
+ count = ctx->bufcnt;
+ ctx->bufcnt = 0;
+ return omap_sham_xmit_dma(dd, ctx->dma_addr, count, final);
+ }
+
+ return 0;
+}
+
+static int omap_sham_update_dma_fast(struct omap_sham_dev *dd)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ unsigned int length;
+
+ ctx->flags |= FLAGS_FAST;
+
+ length = min(ctx->total, sg_dma_len(ctx->sg));
+ ctx->total = length;
+
+ if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
+ dev_err(dd->dev, "dma_map_sg error\n");
+ return -EINVAL;
+ }
+
+ ctx->total -= length;
+
+ return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1);
+}
+
+static int omap_sham_update_cpu(struct omap_sham_dev *dd)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ int bufcnt;
+
+ omap_sham_append_sg(ctx);
+ bufcnt = ctx->bufcnt;
+ ctx->bufcnt = 0;
+
+ return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
+}
+
+static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+
+ omap_stop_dma(dd->dma_lch);
+ if (ctx->flags & FLAGS_FAST)
+ dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
+
+ return 0;
+}
+
+static void omap_sham_cleanup(struct ahash_request *req)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ struct omap_sham_dev *dd = ctx->dd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->lock, flags);
+ if (ctx->flags & FLAGS_CLEAN) {
+ spin_unlock_irqrestore(&dd->lock, flags);
+ return;
+ }
+ ctx->flags |= FLAGS_CLEAN;
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ if (ctx->digcnt)
+ clk_disable(dd->iclk);
+
+ if (ctx->dma_addr)
+ dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
+ DMA_TO_DEVICE);
+
+ if (ctx->buffer)
+ free_page((unsigned long)ctx->buffer);
+
+ dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
+}
+
+static int omap_sham_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ struct omap_sham_dev *dd = NULL, *tmp;
+
+ spin_lock_bh(&sham.lock);
+ if (!tctx->dd) {
+ list_for_each_entry(tmp, &sham.dev_list, list) {
+ dd = tmp;
+ break;
+ }
+ tctx->dd = dd;
+ } else {
+ dd = tctx->dd;
+ }
+ spin_unlock_bh(&sham.lock);
+
+ ctx->dd = dd;
+
+ ctx->flags = 0;
+
+ ctx->flags |= FLAGS_FIRST;
+
+ dev_dbg(dd->dev, "init: digest size: %d\n",
+ crypto_ahash_digestsize(tfm));
+
+ if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
+ ctx->flags |= FLAGS_SHA1;
+
+ ctx->bufcnt = 0;
+ ctx->digcnt = 0;
+
+ ctx->buflen = PAGE_SIZE;
+ ctx->buffer = (void *)__get_free_page(
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!ctx->buffer)
+ return -ENOMEM;
+
+ ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
+ dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
+ free_page((unsigned long)ctx->buffer);
+ return -EINVAL;
+ }
+
+ if (tctx->flags & FLAGS_HMAC) {
+ struct omap_sham_hmac_ctx *bctx = tctx->base;
+
+ memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE);
+ ctx->bufcnt = SHA1_MD5_BLOCK_SIZE;
+ ctx->flags |= FLAGS_HMAC;
+ }
+
+ return 0;
+
+}
+
+static int omap_sham_update_req(struct omap_sham_dev *dd)
+{
+ struct ahash_request *req = dd->req;
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ int err;
+
+ dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
+ ctx->total, ctx->digcnt, (ctx->flags & FLAGS_FINUP) != 0);
+
+ if (ctx->flags & FLAGS_CPU)
+ err = omap_sham_update_cpu(dd);
+ else if (ctx->flags & FLAGS_FAST)
+ err = omap_sham_update_dma_fast(dd);
+ else
+ err = omap_sham_update_dma_slow(dd);
+
+ /* wait for dma completion before can take more data */
+ dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
+
+ return err;
+}
+
+static int omap_sham_final_req(struct omap_sham_dev *dd)
+{
+ struct ahash_request *req = dd->req;
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ int err = 0, use_dma = 1;
+
+ if (ctx->bufcnt <= 64)
+ /* faster to handle last block with cpu */
+ use_dma = 0;
+
+ if (use_dma)
+ err = omap_sham_xmit_dma(dd, ctx->dma_addr, ctx->bufcnt, 1);
+ else
+ err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
+
+ ctx->bufcnt = 0;
+
+ if (err != -EINPROGRESS)
+ omap_sham_cleanup(req);
+
+ dev_dbg(dd->dev, "final_req: err: %d\n", err);
+
+ return err;
+}
+
+static int omap_sham_finish_req_hmac(struct ahash_request *req)
+{
+ struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ struct omap_sham_hmac_ctx *bctx = tctx->base;
+ int bs = crypto_shash_blocksize(bctx->shash);
+ int ds = crypto_shash_digestsize(bctx->shash);
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(bctx->shash)];
+ } desc;
+
+ desc.shash.tfm = bctx->shash;
+ desc.shash.flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
+
+ return crypto_shash_init(&desc.shash) ?:
+ crypto_shash_update(&desc.shash, bctx->opad, bs) ?:
+ crypto_shash_finup(&desc.shash, req->result, ds, req->result);
+}
+
+static void omap_sham_finish_req(struct ahash_request *req, int err)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+
+ if (!err) {
+ omap_sham_copy_hash(ctx->dd->req, 1);
+ if (ctx->flags & FLAGS_HMAC)
+ err = omap_sham_finish_req_hmac(req);
+ }
+
+ if (ctx->flags & FLAGS_FINAL)
+ omap_sham_cleanup(req);
+
+ clear_bit(FLAGS_BUSY, &ctx->dd->flags);
+
+ if (req->base.complete)
+ req->base.complete(&req->base, err);
+}
+
+static int omap_sham_handle_queue(struct omap_sham_dev *dd)
+{
+ struct crypto_async_request *async_req, *backlog;
+ struct omap_sham_reqctx *ctx;
+ struct ahash_request *req, *prev_req;
+ unsigned long flags;
+ int err = 0;
+
+ if (test_and_set_bit(FLAGS_BUSY, &dd->flags))
+ return 0;
+
+ spin_lock_irqsave(&dd->lock, flags);
+ backlog = crypto_get_backlog(&dd->queue);
+ async_req = crypto_dequeue_request(&dd->queue);
+ if (!async_req)
+ clear_bit(FLAGS_BUSY, &dd->flags);
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ if (!async_req)
+ return 0;
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ req = ahash_request_cast(async_req);
+
+ prev_req = dd->req;
+ dd->req = req;
+
+ ctx = ahash_request_ctx(req);
+
+ dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
+ ctx->op, req->nbytes);
+
+ if (req != prev_req && ctx->digcnt)
+ /* request has changed - restore hash */
+ omap_sham_copy_hash(req, 0);
+
+ if (ctx->op == OP_UPDATE) {
+ err = omap_sham_update_req(dd);
+ if (err != -EINPROGRESS && (ctx->flags & FLAGS_FINUP))
+ /* no final() after finup() */
+ err = omap_sham_final_req(dd);
+ } else if (ctx->op == OP_FINAL) {
+ err = omap_sham_final_req(dd);
+ }
+
+ if (err != -EINPROGRESS) {
+ /* done_task will not finish it, so do it here */
+ omap_sham_finish_req(req, err);
+ tasklet_schedule(&dd->queue_task);
+ }
+
+ dev_dbg(dd->dev, "exit, err: %d\n", err);
+
+ return err;
+}
+
+static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ struct omap_sham_dev *dd = tctx->dd;
+ unsigned long flags;
+ int err;
+
+ ctx->op = op;
+
+ spin_lock_irqsave(&dd->lock, flags);
+ err = ahash_enqueue_request(&dd->queue, req);
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ omap_sham_handle_queue(dd);
+
+ return err;
+}
+
+static int omap_sham_update(struct ahash_request *req)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+
+ if (!req->nbytes)
+ return 0;
+
+ ctx->total = req->nbytes;
+ ctx->sg = req->src;
+ ctx->offset = 0;
+
+ if (ctx->flags & FLAGS_FINUP) {
+ if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) {
+ /*
+ * OMAP HW accel works only with buffers >= 9
+ * will switch to bypass in final()
+ * final has the same request and data
+ */
+ omap_sham_append_sg(ctx);
+ return 0;
+ } else if (ctx->bufcnt + ctx->total <= 64) {
+ ctx->flags |= FLAGS_CPU;
+ } else if (!ctx->bufcnt && sg_is_last(ctx->sg)) {
+ /* may be can use faster functions */
+ int aligned = IS_ALIGNED((u32)ctx->sg->offset,
+ sizeof(u32));
+
+ if (aligned && (ctx->flags & FLAGS_FIRST))
+ /* digest: first and final */
+ ctx->flags |= FLAGS_FAST;
+
+ ctx->flags &= ~FLAGS_FIRST;
+ }
+ } else if (ctx->bufcnt + ctx->total <= ctx->buflen) {
+ /* if not finaup -> not fast */
+ omap_sham_append_sg(ctx);
+ return 0;
+ }
+
+ return omap_sham_enqueue(req, OP_UPDATE);
+}
+
+static int omap_sham_shash_digest(struct crypto_shash *shash, u32 flags,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(shash)];
+ } desc;
+
+ desc.shash.tfm = shash;
+ desc.shash.flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_shash_digest(&desc.shash, data, len, out);
+}
+
+static int omap_sham_final_shash(struct ahash_request *req)
+{
+ struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+
+ return omap_sham_shash_digest(tctx->fallback, req->base.flags,
+ ctx->buffer, ctx->bufcnt, req->result);
+}
+
+static int omap_sham_final(struct ahash_request *req)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ int err = 0;
+
+ ctx->flags |= FLAGS_FINUP;
+
+ /* OMAP HW accel works only with buffers >= 9 */
+ /* HMAC is always >= 9 because of ipad */
+ if ((ctx->digcnt + ctx->bufcnt) < 9)
+ err = omap_sham_final_shash(req);
+ else if (ctx->bufcnt)
+ return omap_sham_enqueue(req, OP_FINAL);
+
+ omap_sham_cleanup(req);
+
+ return err;
+}
+
+static int omap_sham_finup(struct ahash_request *req)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ int err1, err2;
+
+ ctx->flags |= FLAGS_FINUP;
+
+ err1 = omap_sham_update(req);
+ if (err1 == -EINPROGRESS)
+ return err1;
+ /*
+ * final() has to be always called to cleanup resources
+ * even if udpate() failed, except EINPROGRESS
+ */
+ err2 = omap_sham_final(req);
+
+ return err1 ?: err2;
+}
+
+static int omap_sham_digest(struct ahash_request *req)
+{
+ return omap_sham_init(req) ?: omap_sham_finup(req);
+}
+
+static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct omap_sham_hmac_ctx *bctx = tctx->base;
+ int bs = crypto_shash_blocksize(bctx->shash);
+ int ds = crypto_shash_digestsize(bctx->shash);
+ int err, i;
+ err = crypto_shash_setkey(tctx->fallback, key, keylen);
+ if (err)
+ return err;
+
+ if (keylen > bs) {
+ err = omap_sham_shash_digest(bctx->shash,
+ crypto_shash_get_flags(bctx->shash),
+ key, keylen, bctx->ipad);
+ if (err)
+ return err;
+ keylen = ds;
+ } else {
+ memcpy(bctx->ipad, key, keylen);
+ }
+
+ memset(bctx->ipad + keylen, 0, bs - keylen);
+ memcpy(bctx->opad, bctx->ipad, bs);
+
+ for (i = 0; i < bs; i++) {
+ bctx->ipad[i] ^= 0x36;
+ bctx->opad[i] ^= 0x5c;
+ }
+
+ return err;
+}
+
+static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
+{
+ struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
+ const char *alg_name = crypto_tfm_alg_name(tfm);
+
+ /* Allocate a fallback and abort if it failed. */
+ tctx->fallback = crypto_alloc_shash(alg_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(tctx->fallback)) {
+ pr_err("omap-sham: fallback driver '%s' "
+ "could not be loaded.\n", alg_name);
+ return PTR_ERR(tctx->fallback);
+ }
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct omap_sham_reqctx));
+
+ if (alg_base) {
+ struct omap_sham_hmac_ctx *bctx = tctx->base;
+ tctx->flags |= FLAGS_HMAC;
+ bctx->shash = crypto_alloc_shash(alg_base, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(bctx->shash)) {
+ pr_err("omap-sham: base driver '%s' "
+ "could not be loaded.\n", alg_base);
+ crypto_free_shash(tctx->fallback);
+ return PTR_ERR(bctx->shash);
+ }
+
+ }
+
+ return 0;
+}
+
+static int omap_sham_cra_init(struct crypto_tfm *tfm)
+{
+ return omap_sham_cra_init_alg(tfm, NULL);
+}
+
+static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
+{
+ return omap_sham_cra_init_alg(tfm, "sha1");
+}
+
+static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
+{
+ return omap_sham_cra_init_alg(tfm, "md5");
+}
+
+static void omap_sham_cra_exit(struct crypto_tfm *tfm)
+{
+ struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_shash(tctx->fallback);
+ tctx->fallback = NULL;
+
+ if (tctx->flags & FLAGS_HMAC) {
+ struct omap_sham_hmac_ctx *bctx = tctx->base;
+ crypto_free_shash(bctx->shash);
+ }
+}
+
+static struct ahash_alg algs[] = {
+{
+ .init = omap_sham_init,
+ .update = omap_sham_update,
+ .final = omap_sham_final,
+ .finup = omap_sham_finup,
+ .digest = omap_sham_digest,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "omap-sha1",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_sham_cra_init,
+ .cra_exit = omap_sham_cra_exit,
+ }
+},
+{
+ .init = omap_sham_init,
+ .update = omap_sham_update,
+ .final = omap_sham_final,
+ .finup = omap_sham_finup,
+ .digest = omap_sham_digest,
+ .halg.digestsize = MD5_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "md5",
+ .cra_driver_name = "omap-md5",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_sham_cra_init,
+ .cra_exit = omap_sham_cra_exit,
+ }
+},
+{
+ .init = omap_sham_init,
+ .update = omap_sham_update,
+ .final = omap_sham_final,
+ .finup = omap_sham_finup,
+ .digest = omap_sham_digest,
+ .setkey = omap_sham_setkey,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "omap-hmac-sha1",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_sham_ctx) +
+ sizeof(struct omap_sham_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_sham_cra_sha1_init,
+ .cra_exit = omap_sham_cra_exit,
+ }
+},
+{
+ .init = omap_sham_init,
+ .update = omap_sham_update,
+ .final = omap_sham_final,
+ .finup = omap_sham_finup,
+ .digest = omap_sham_digest,
+ .setkey = omap_sham_setkey,
+ .halg.digestsize = MD5_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(md5)",
+ .cra_driver_name = "omap-hmac-md5",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_sham_ctx) +
+ sizeof(struct omap_sham_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_sham_cra_md5_init,
+ .cra_exit = omap_sham_cra_exit,
+ }
+}
+};
+
+static void omap_sham_done_task(unsigned long data)
+{
+ struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
+ struct ahash_request *req = dd->req;
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ int ready = 1;
+
+ if (ctx->flags & FLAGS_OUTPUT_READY) {
+ ctx->flags &= ~FLAGS_OUTPUT_READY;
+ ready = 1;
+ }
+
+ if (dd->flags & FLAGS_DMA_ACTIVE) {
+ dd->flags &= ~FLAGS_DMA_ACTIVE;
+ omap_sham_update_dma_stop(dd);
+ omap_sham_update_dma_slow(dd);
+ }
+
+ if (ready && !(dd->flags & FLAGS_DMA_ACTIVE)) {
+ dev_dbg(dd->dev, "update done\n");
+ /* finish curent request */
+ omap_sham_finish_req(req, 0);
+ /* start new request */
+ omap_sham_handle_queue(dd);
+ }
+}
+
+static void omap_sham_queue_task(unsigned long data)
+{
+ struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
+
+ omap_sham_handle_queue(dd);
+}
+
+static irqreturn_t omap_sham_irq(int irq, void *dev_id)
+{
+ struct omap_sham_dev *dd = dev_id;
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+
+ if (!ctx) {
+ dev_err(dd->dev, "unknown interrupt.\n");
+ return IRQ_HANDLED;
+ }
+
+ if (unlikely(ctx->flags & FLAGS_FINAL))
+ /* final -> allow device to go to power-saving mode */
+ omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
+
+ omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
+ SHA_REG_CTRL_OUTPUT_READY);
+ omap_sham_read(dd, SHA_REG_CTRL);
+
+ ctx->flags |= FLAGS_OUTPUT_READY;
+ tasklet_schedule(&dd->done_task);
+
+ return IRQ_HANDLED;
+}
+
+static void omap_sham_dma_callback(int lch, u16 ch_status, void *data)
+{
+ struct omap_sham_dev *dd = data;
+
+ if (likely(lch == dd->dma_lch))
+ tasklet_schedule(&dd->done_task);
+}
+
+static int omap_sham_dma_init(struct omap_sham_dev *dd)
+{
+ int err;
+
+ dd->dma_lch = -1;
+
+ err = omap_request_dma(dd->dma, dev_name(dd->dev),
+ omap_sham_dma_callback, dd, &dd->dma_lch);
+ if (err) {
+ dev_err(dd->dev, "Unable to request DMA channel\n");
+ return err;
+ }
+ omap_set_dma_dest_params(dd->dma_lch, 0,
+ OMAP_DMA_AMODE_CONSTANT,
+ dd->phys_base + SHA_REG_DIN(0), 0, 16);
+
+ omap_set_dma_dest_burst_mode(dd->dma_lch,
+ OMAP_DMA_DATA_BURST_16);
+
+ return 0;
+}
+
+static void omap_sham_dma_cleanup(struct omap_sham_dev *dd)
+{
+ if (dd->dma_lch >= 0) {
+ omap_free_dma(dd->dma_lch);
+ dd->dma_lch = -1;
+ }
+}
+
+static int __devinit omap_sham_probe(struct platform_device *pdev)
+{
+ struct omap_sham_dev *dd;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int err, i, j;
+
+ dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL);
+ if (dd == NULL) {
+ dev_err(dev, "unable to alloc data struct.\n");
+ err = -ENOMEM;
+ goto data_err;
+ }
+ dd->dev = dev;
+ platform_set_drvdata(pdev, dd);
+
+ INIT_LIST_HEAD(&dd->list);
+ spin_lock_init(&dd->lock);
+ tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
+ tasklet_init(&dd->queue_task, omap_sham_queue_task, (unsigned long)dd);
+ crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
+
+ dd->irq = -1;
+
+ /* Get the base address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "no MEM resource info\n");
+ err = -ENODEV;
+ goto res_err;
+ }
+ dd->phys_base = res->start;
+
+ /* Get the DMA */
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!res) {
+ dev_err(dev, "no DMA resource info\n");
+ err = -ENODEV;
+ goto res_err;
+ }
+ dd->dma = res->start;
+
+ /* Get the IRQ */
+ dd->irq = platform_get_irq(pdev, 0);
+ if (dd->irq < 0) {
+ dev_err(dev, "no IRQ resource info\n");
+ err = dd->irq;
+ goto res_err;
+ }
+
+ err = request_irq(dd->irq, omap_sham_irq,
+ IRQF_TRIGGER_LOW, dev_name(dev), dd);
+ if (err) {
+ dev_err(dev, "unable to request irq.\n");
+ goto res_err;
+ }
+
+ err = omap_sham_dma_init(dd);
+ if (err)
+ goto dma_err;
+
+ /* Initializing the clock */
+ dd->iclk = clk_get(dev, "ick");
+ if (!dd->iclk) {
+ dev_err(dev, "clock intialization failed.\n");
+ err = -ENODEV;
+ goto clk_err;
+ }
+
+ dd->io_base = ioremap(dd->phys_base, SZ_4K);
+ if (!dd->io_base) {
+ dev_err(dev, "can't ioremap\n");
+ err = -ENOMEM;
+ goto io_err;
+ }
+
+ clk_enable(dd->iclk);
+ dev_info(dev, "hw accel on OMAP rev %u.%u\n",
+ (omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MAJOR) >> 4,
+ omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MINOR);
+ clk_disable(dd->iclk);
+
+ spin_lock(&sham.lock);
+ list_add_tail(&dd->list, &sham.dev_list);
+ spin_unlock(&sham.lock);
+
+ for (i = 0; i < ARRAY_SIZE(algs); i++) {
+ err = crypto_register_ahash(&algs[i]);
+ if (err)
+ goto err_algs;
+ }
+
+ return 0;
+
+err_algs:
+ for (j = 0; j < i; j++)
+ crypto_unregister_ahash(&algs[j]);
+ iounmap(dd->io_base);
+io_err:
+ clk_put(dd->iclk);
+clk_err:
+ omap_sham_dma_cleanup(dd);
+dma_err:
+ if (dd->irq >= 0)
+ free_irq(dd->irq, dd);
+res_err:
+ kfree(dd);
+ dd = NULL;
+data_err:
+ dev_err(dev, "initialization failed.\n");
+
+ return err;
+}
+
+static int __devexit omap_sham_remove(struct platform_device *pdev)
+{
+ static struct omap_sham_dev *dd;
+ int i;
+
+ dd = platform_get_drvdata(pdev);
+ if (!dd)
+ return -ENODEV;
+ spin_lock(&sham.lock);
+ list_del(&dd->list);
+ spin_unlock(&sham.lock);
+ for (i = 0; i < ARRAY_SIZE(algs); i++)
+ crypto_unregister_ahash(&algs[i]);
+ tasklet_kill(&dd->done_task);
+ tasklet_kill(&dd->queue_task);
+ iounmap(dd->io_base);
+ clk_put(dd->iclk);
+ omap_sham_dma_cleanup(dd);
+ if (dd->irq >= 0)
+ free_irq(dd->irq, dd);
+ kfree(dd);
+ dd = NULL;
+
+ return 0;
+}
+
+static struct platform_driver omap_sham_driver = {
+ .probe = omap_sham_probe,
+ .remove = omap_sham_remove,
+ .driver = {
+ .name = "omap-sham",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init omap_sham_mod_init(void)
+{
+ pr_info("loading %s driver\n", "omap-sham");
+
+ if (!cpu_class_is_omap2() ||
+ omap_type() != OMAP2_DEVICE_TYPE_SEC) {
+ pr_err("Unsupported cpu\n");
+ return -ENODEV;
+ }
+
+ return platform_driver_register(&omap_sham_driver);
+}
+
+static void __exit omap_sham_mod_exit(void)
+{
+ platform_driver_unregister(&omap_sham_driver);
+}
+
+module_init(omap_sham_mod_init);
+module_exit(omap_sham_mod_exit);
+
+MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Dmitry Kasatkin");
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index dc558a09731..637c105f53d 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1,7 +1,7 @@
/*
* talitos - Freescale Integrated Security Engine (SEC) device driver
*
- * Copyright (c) 2008 Freescale Semiconductor, Inc.
+ * Copyright (c) 2008-2010 Freescale Semiconductor, Inc.
*
* Scatterlist Crypto API glue code copied from files with the following:
* Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
@@ -43,9 +43,12 @@
#include <crypto/aes.h>
#include <crypto/des.h>
#include <crypto/sha.h>
+#include <crypto/md5.h>
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <crypto/skcipher.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
#include "talitos.h"
@@ -65,6 +68,13 @@ struct talitos_ptr {
__be32 ptr; /* address */
};
+static const struct talitos_ptr zero_entry = {
+ .len = 0,
+ .j_extent = 0,
+ .eptr = 0,
+ .ptr = 0
+};
+
/* descriptor */
struct talitos_desc {
__be32 hdr; /* header high bits */
@@ -146,6 +156,7 @@ struct talitos_private {
/* .features flag */
#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
+#define TALITOS_FTR_SHA224_HWINIT 0x00000004
static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
{
@@ -692,7 +703,7 @@ static void talitos_unregister_rng(struct device *dev)
#define TALITOS_MAX_KEY_SIZE 64
#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
-#define MD5_DIGEST_SIZE 16
+#define MD5_BLOCK_SIZE 64
struct talitos_ctx {
struct device *dev;
@@ -705,6 +716,23 @@ struct talitos_ctx {
unsigned int authsize;
};
+#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
+#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
+
+struct talitos_ahash_req_ctx {
+ u64 count;
+ u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
+ unsigned int hw_context_size;
+ u8 buf[HASH_MAX_BLOCK_SIZE];
+ u8 bufnext[HASH_MAX_BLOCK_SIZE];
+ unsigned int swinit;
+ unsigned int first;
+ unsigned int last;
+ unsigned int to_hash_later;
+ struct scatterlist bufsl[2];
+ struct scatterlist *psrc;
+};
+
static int aead_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
@@ -821,10 +849,14 @@ static void talitos_sg_unmap(struct device *dev,
else
dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
- if (edesc->dst_is_chained)
- talitos_unmap_sg_chain(dev, dst, DMA_FROM_DEVICE);
- else
- dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+ if (dst) {
+ if (edesc->dst_is_chained)
+ talitos_unmap_sg_chain(dev, dst,
+ DMA_FROM_DEVICE);
+ else
+ dma_unmap_sg(dev, dst, dst_nents,
+ DMA_FROM_DEVICE);
+ }
} else
if (edesc->src_is_chained)
talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
@@ -1114,12 +1146,67 @@ static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
return sg_nents;
}
+/**
+ * sg_copy_end_to_buffer - Copy end data from SG list to a linear buffer
+ * @sgl: The SG list
+ * @nents: Number of SG entries
+ * @buf: Where to copy to
+ * @buflen: The number of bytes to copy
+ * @skip: The number of bytes to skip before copying.
+ * Note: skip + buflen should equal SG total size.
+ *
+ * Returns the number of copied bytes.
+ *
+ **/
+static size_t sg_copy_end_to_buffer(struct scatterlist *sgl, unsigned int nents,
+ void *buf, size_t buflen, unsigned int skip)
+{
+ unsigned int offset = 0;
+ unsigned int boffset = 0;
+ struct sg_mapping_iter miter;
+ unsigned long flags;
+ unsigned int sg_flags = SG_MITER_ATOMIC;
+ size_t total_buffer = buflen + skip;
+
+ sg_flags |= SG_MITER_FROM_SG;
+
+ sg_miter_start(&miter, sgl, nents, sg_flags);
+
+ local_irq_save(flags);
+
+ while (sg_miter_next(&miter) && offset < total_buffer) {
+ unsigned int len;
+ unsigned int ignore;
+
+ if ((offset + miter.length) > skip) {
+ if (offset < skip) {
+ /* Copy part of this segment */
+ ignore = skip - offset;
+ len = miter.length - ignore;
+ memcpy(buf + boffset, miter.addr + ignore, len);
+ } else {
+ /* Copy all of this segment */
+ len = miter.length;
+ memcpy(buf + boffset, miter.addr, len);
+ }
+ boffset += len;
+ }
+ offset += miter.length;
+ }
+
+ sg_miter_stop(&miter);
+
+ local_irq_restore(flags);
+ return boffset;
+}
+
/*
* allocate and map the extended descriptor
*/
static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
struct scatterlist *src,
struct scatterlist *dst,
+ int hash_result,
unsigned int cryptlen,
unsigned int authsize,
int icv_stashing,
@@ -1139,11 +1226,16 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
src_nents = sg_count(src, cryptlen + authsize, &src_chained);
src_nents = (src_nents == 1) ? 0 : src_nents;
- if (dst == src) {
- dst_nents = src_nents;
+ if (hash_result) {
+ dst_nents = 0;
} else {
- dst_nents = sg_count(dst, cryptlen + authsize, &dst_chained);
- dst_nents = (dst_nents == 1) ? 0 : dst_nents;
+ if (dst == src) {
+ dst_nents = src_nents;
+ } else {
+ dst_nents = sg_count(dst, cryptlen + authsize,
+ &dst_chained);
+ dst_nents = (dst_nents == 1) ? 0 : dst_nents;
+ }
}
/*
@@ -1172,8 +1264,10 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
edesc->src_is_chained = src_chained;
edesc->dst_is_chained = dst_chained;
edesc->dma_len = dma_len;
- edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
- edesc->dma_len, DMA_BIDIRECTIONAL);
+ if (dma_len)
+ edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
+ edesc->dma_len,
+ DMA_BIDIRECTIONAL);
return edesc;
}
@@ -1184,7 +1278,7 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq,
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
- return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
+ return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
areq->cryptlen, ctx->authsize, icv_stashing,
areq->base.flags);
}
@@ -1441,8 +1535,8 @@ static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
- return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, areq->nbytes,
- 0, 0, areq->base.flags);
+ return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
+ areq->nbytes, 0, 0, areq->base.flags);
}
static int ablkcipher_encrypt(struct ablkcipher_request *areq)
@@ -1478,15 +1572,329 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq)
return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
}
+static void common_nonsnoop_hash_unmap(struct device *dev,
+ struct talitos_edesc *edesc,
+ struct ahash_request *areq)
+{
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+ unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
+
+ /* When using hashctx-in, must unmap it. */
+ if (edesc->desc.ptr[1].len)
+ unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
+ DMA_TO_DEVICE);
+
+ if (edesc->desc.ptr[2].len)
+ unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
+ DMA_TO_DEVICE);
+
+ talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL);
+
+ if (edesc->dma_len)
+ dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
+ DMA_BIDIRECTIONAL);
+
+}
+
+static void ahash_done(struct device *dev,
+ struct talitos_desc *desc, void *context,
+ int err)
+{
+ struct ahash_request *areq = context;
+ struct talitos_edesc *edesc =
+ container_of(desc, struct talitos_edesc, desc);
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+ if (!req_ctx->last && req_ctx->to_hash_later) {
+ /* Position any partial block for next update/final/finup */
+ memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
+ }
+ common_nonsnoop_hash_unmap(dev, edesc, areq);
+
+ kfree(edesc);
+
+ areq->base.complete(&areq->base, err);
+}
+
+static int common_nonsnoop_hash(struct talitos_edesc *edesc,
+ struct ahash_request *areq, unsigned int length,
+ void (*callback) (struct device *dev,
+ struct talitos_desc *desc,
+ void *context, int error))
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct device *dev = ctx->dev;
+ struct talitos_desc *desc = &edesc->desc;
+ int sg_count, ret;
+
+ /* first DWORD empty */
+ desc->ptr[0] = zero_entry;
+
+ /* hash context in */
+ if (!req_ctx->first || req_ctx->swinit) {
+ map_single_talitos_ptr(dev, &desc->ptr[1],
+ req_ctx->hw_context_size,
+ (char *)req_ctx->hw_context, 0,
+ DMA_TO_DEVICE);
+ req_ctx->swinit = 0;
+ } else {
+ desc->ptr[1] = zero_entry;
+ /* Indicate next op is not the first. */
+ req_ctx->first = 0;
+ }
+
+ /* HMAC key */
+ if (ctx->keylen)
+ map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
+ (char *)&ctx->key, 0, DMA_TO_DEVICE);
+ else
+ desc->ptr[2] = zero_entry;
+
+ /*
+ * data in
+ */
+ desc->ptr[3].len = cpu_to_be16(length);
+ desc->ptr[3].j_extent = 0;
+
+ sg_count = talitos_map_sg(dev, req_ctx->psrc,
+ edesc->src_nents ? : 1,
+ DMA_TO_DEVICE,
+ edesc->src_is_chained);
+
+ if (sg_count == 1) {
+ to_talitos_ptr(&desc->ptr[3], sg_dma_address(req_ctx->psrc));
+ } else {
+ sg_count = sg_to_link_tbl(req_ctx->psrc, sg_count, length,
+ &edesc->link_tbl[0]);
+ if (sg_count > 1) {
+ desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
+ to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
+ dma_sync_single_for_device(ctx->dev,
+ edesc->dma_link_tbl,
+ edesc->dma_len,
+ DMA_BIDIRECTIONAL);
+ } else {
+ /* Only one segment now, so no link tbl needed */
+ to_talitos_ptr(&desc->ptr[3],
+ sg_dma_address(req_ctx->psrc));
+ }
+ }
+
+ /* fifth DWORD empty */
+ desc->ptr[4] = zero_entry;
+
+ /* hash/HMAC out -or- hash context out */
+ if (req_ctx->last)
+ map_single_talitos_ptr(dev, &desc->ptr[5],
+ crypto_ahash_digestsize(tfm),
+ areq->result, 0, DMA_FROM_DEVICE);
+ else
+ map_single_talitos_ptr(dev, &desc->ptr[5],
+ req_ctx->hw_context_size,
+ req_ctx->hw_context, 0, DMA_FROM_DEVICE);
+
+ /* last DWORD empty */
+ desc->ptr[6] = zero_entry;
+
+ ret = talitos_submit(dev, desc, callback, areq);
+ if (ret != -EINPROGRESS) {
+ common_nonsnoop_hash_unmap(dev, edesc, areq);
+ kfree(edesc);
+ }
+ return ret;
+}
+
+static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
+ unsigned int nbytes)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+ return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, 1,
+ nbytes, 0, 0, areq->base.flags);
+}
+
+static int ahash_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+ /* Initialize the context */
+ req_ctx->count = 0;
+ req_ctx->first = 1; /* first indicates h/w must init its context */
+ req_ctx->swinit = 0; /* assume h/w init of context */
+ req_ctx->hw_context_size =
+ (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
+ ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
+ : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
+
+ return 0;
+}
+
+/*
+ * on h/w without explicit sha224 support, we initialize h/w context
+ * manually with sha224 constants, and tell it to run sha256.
+ */
+static int ahash_init_sha224_swinit(struct ahash_request *areq)
+{
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+ ahash_init(areq);
+ req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
+
+ req_ctx->hw_context[0] = cpu_to_be32(SHA224_H0);
+ req_ctx->hw_context[1] = cpu_to_be32(SHA224_H1);
+ req_ctx->hw_context[2] = cpu_to_be32(SHA224_H2);
+ req_ctx->hw_context[3] = cpu_to_be32(SHA224_H3);
+ req_ctx->hw_context[4] = cpu_to_be32(SHA224_H4);
+ req_ctx->hw_context[5] = cpu_to_be32(SHA224_H5);
+ req_ctx->hw_context[6] = cpu_to_be32(SHA224_H6);
+ req_ctx->hw_context[7] = cpu_to_be32(SHA224_H7);
+
+ /* init 64-bit count */
+ req_ctx->hw_context[8] = 0;
+ req_ctx->hw_context[9] = 0;
+
+ return 0;
+}
+
+static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct talitos_edesc *edesc;
+ unsigned int blocksize =
+ crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+ unsigned int nbytes_to_hash;
+ unsigned int to_hash_later;
+ unsigned int index;
+ int chained;
+
+ index = req_ctx->count & (blocksize - 1);
+ req_ctx->count += nbytes;
+
+ if (!req_ctx->last && (index + nbytes) < blocksize) {
+ /* Buffer the partial block */
+ sg_copy_to_buffer(areq->src,
+ sg_count(areq->src, nbytes, &chained),
+ req_ctx->buf + index, nbytes);
+ return 0;
+ }
+
+ if (index) {
+ /* partial block from previous update; chain it in. */
+ sg_init_table(req_ctx->bufsl, (nbytes) ? 2 : 1);
+ sg_set_buf(req_ctx->bufsl, req_ctx->buf, index);
+ if (nbytes)
+ scatterwalk_sg_chain(req_ctx->bufsl, 2,
+ areq->src);
+ req_ctx->psrc = req_ctx->bufsl;
+ } else {
+ req_ctx->psrc = areq->src;
+ }
+ nbytes_to_hash = index + nbytes;
+ if (!req_ctx->last) {
+ to_hash_later = (nbytes_to_hash & (blocksize - 1));
+ if (to_hash_later) {
+ int nents;
+ /* Must copy to_hash_later bytes from the end
+ * to bufnext (a partial block) for later.
+ */
+ nents = sg_count(areq->src, nbytes, &chained);
+ sg_copy_end_to_buffer(areq->src, nents,
+ req_ctx->bufnext,
+ to_hash_later,
+ nbytes - to_hash_later);
+
+ /* Adjust count for what will be hashed now */
+ nbytes_to_hash -= to_hash_later;
+ }
+ req_ctx->to_hash_later = to_hash_later;
+ }
+
+ /* allocate extended descriptor */
+ edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ edesc->desc.hdr = ctx->desc_hdr_template;
+
+ /* On last one, request SEC to pad; otherwise continue */
+ if (req_ctx->last)
+ edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
+ else
+ edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
+
+ /* request SEC to INIT hash. */
+ if (req_ctx->first && !req_ctx->swinit)
+ edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
+
+ /* When the tfm context has a keylen, it's an HMAC.
+ * A first or last (ie. not middle) descriptor must request HMAC.
+ */
+ if (ctx->keylen && (req_ctx->first || req_ctx->last))
+ edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
+
+ return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
+ ahash_done);
+}
+
+static int ahash_update(struct ahash_request *areq)
+{
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+ req_ctx->last = 0;
+
+ return ahash_process_req(areq, areq->nbytes);
+}
+
+static int ahash_final(struct ahash_request *areq)
+{
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+ req_ctx->last = 1;
+
+ return ahash_process_req(areq, 0);
+}
+
+static int ahash_finup(struct ahash_request *areq)
+{
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+ req_ctx->last = 1;
+
+ return ahash_process_req(areq, areq->nbytes);
+}
+
+static int ahash_digest(struct ahash_request *areq)
+{
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+
+ ahash->init(areq);
+ req_ctx->last = 1;
+
+ return ahash_process_req(areq, areq->nbytes);
+}
+
struct talitos_alg_template {
- struct crypto_alg alg;
+ u32 type;
+ union {
+ struct crypto_alg crypto;
+ struct ahash_alg hash;
+ } alg;
__be32 desc_hdr_template;
};
static struct talitos_alg_template driver_algs[] = {
/* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
- {
- .alg = {
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.crypto = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
@@ -1511,8 +1919,8 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_SHA1_HMAC,
},
- {
- .alg = {
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.crypto = {
.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1538,8 +1946,8 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_SHA1_HMAC,
},
- {
- .alg = {
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.crypto = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
@@ -1564,8 +1972,8 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_SHA256_HMAC,
},
- {
- .alg = {
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.crypto = {
.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1591,8 +1999,8 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_SHA256_HMAC,
},
- {
- .alg = {
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.crypto = {
.cra_name = "authenc(hmac(md5),cbc(aes))",
.cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
@@ -1617,8 +2025,8 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_MD5_HMAC,
},
- {
- .alg = {
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.crypto = {
.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1645,8 +2053,8 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_MD5_HMAC,
},
/* ABLKCIPHER algorithms. */
- {
- .alg = {
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .alg.crypto = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
@@ -1667,8 +2075,8 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_SEL0_AESU |
DESC_HDR_MODE0_AESU_CBC,
},
- {
- .alg = {
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .alg.crypto = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1689,14 +2097,140 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_SEL0_DEU |
DESC_HDR_MODE0_DEU_CBC |
DESC_HDR_MODE0_DEU_3DES,
- }
+ },
+ /* AHASH algorithms. */
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .halg.digestsize = MD5_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "md5",
+ .cra_driver_name = "md5-talitos",
+ .cra_blocksize = MD5_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUA |
+ DESC_HDR_MODE0_MDEU_MD5,
+ },
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-talitos",
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUA |
+ DESC_HDR_MODE0_MDEU_SHA1,
+ },
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .halg.digestsize = SHA224_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-talitos",
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUA |
+ DESC_HDR_MODE0_MDEU_SHA224,
+ },
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-talitos",
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUA |
+ DESC_HDR_MODE0_MDEU_SHA256,
+ },
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .halg.digestsize = SHA384_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "sha384-talitos",
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUB |
+ DESC_HDR_MODE0_MDEUB_SHA384,
+ },
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "sha512-talitos",
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUB |
+ DESC_HDR_MODE0_MDEUB_SHA512,
+ },
};
struct talitos_crypto_alg {
struct list_head entry;
struct device *dev;
- __be32 desc_hdr_template;
- struct crypto_alg crypto_alg;
+ struct talitos_alg_template algt;
};
static int talitos_cra_init(struct crypto_tfm *tfm)
@@ -1705,13 +2239,28 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
struct talitos_crypto_alg *talitos_alg;
struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
- talitos_alg = container_of(alg, struct talitos_crypto_alg, crypto_alg);
+ if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
+ talitos_alg = container_of(__crypto_ahash_alg(alg),
+ struct talitos_crypto_alg,
+ algt.alg.hash);
+ else
+ talitos_alg = container_of(alg, struct talitos_crypto_alg,
+ algt.alg.crypto);
/* update context with ptr to dev */
ctx->dev = talitos_alg->dev;
/* copy descriptor header template value */
- ctx->desc_hdr_template = talitos_alg->desc_hdr_template;
+ ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
+
+ return 0;
+}
+
+static int talitos_cra_init_aead(struct crypto_tfm *tfm)
+{
+ struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ talitos_cra_init(tfm);
/* random first IV */
get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
@@ -1719,6 +2268,19 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
return 0;
}
+static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
+{
+ struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ talitos_cra_init(tfm);
+
+ ctx->keylen = 0;
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct talitos_ahash_req_ctx));
+
+ return 0;
+}
+
/*
* given the alg's descriptor header template, determine whether descriptor
* type and primary/secondary execution units required match the hw
@@ -1747,7 +2309,15 @@ static int talitos_remove(struct of_device *ofdev)
int i;
list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
- crypto_unregister_alg(&t_alg->crypto_alg);
+ switch (t_alg->algt.type) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ case CRYPTO_ALG_TYPE_AEAD:
+ crypto_unregister_alg(&t_alg->algt.alg.crypto);
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ crypto_unregister_ahash(&t_alg->algt.alg.hash);
+ break;
+ }
list_del(&t_alg->entry);
kfree(t_alg);
}
@@ -1781,6 +2351,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
struct talitos_alg_template
*template)
{
+ struct talitos_private *priv = dev_get_drvdata(dev);
struct talitos_crypto_alg *t_alg;
struct crypto_alg *alg;
@@ -1788,16 +2359,36 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
if (!t_alg)
return ERR_PTR(-ENOMEM);
- alg = &t_alg->crypto_alg;
- *alg = template->alg;
+ t_alg->algt = *template;
+
+ switch (t_alg->algt.type) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ alg = &t_alg->algt.alg.crypto;
+ alg->cra_init = talitos_cra_init;
+ break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ alg = &t_alg->algt.alg.crypto;
+ alg->cra_init = talitos_cra_init_aead;
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ alg = &t_alg->algt.alg.hash.halg.base;
+ alg->cra_init = talitos_cra_init_ahash;
+ if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
+ !strcmp(alg->cra_name, "sha224")) {
+ t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
+ t_alg->algt.desc_hdr_template =
+ DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUA |
+ DESC_HDR_MODE0_MDEU_SHA256;
+ }
+ break;
+ }
alg->cra_module = THIS_MODULE;
- alg->cra_init = talitos_cra_init;
alg->cra_priority = TALITOS_CRA_PRIORITY;
alg->cra_alignmask = 0;
alg->cra_ctxsize = sizeof(struct talitos_ctx);
- t_alg->desc_hdr_template = template->desc_hdr_template;
t_alg->dev = dev;
return t_alg;
@@ -1807,7 +2398,7 @@ static int talitos_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
struct device *dev = &ofdev->dev;
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct talitos_private *priv;
const unsigned int *prop;
int i, err;
@@ -1877,7 +2468,8 @@ static int talitos_probe(struct of_device *ofdev,
priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
if (of_device_is_compatible(np, "fsl,sec2.1"))
- priv->features |= TALITOS_FTR_HW_AUTH_CHECK;
+ priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
+ TALITOS_FTR_SHA224_HWINIT;
priv->chan = kzalloc(sizeof(struct talitos_channel) *
priv->num_channels, GFP_KERNEL);
@@ -1931,6 +2523,7 @@ static int talitos_probe(struct of_device *ofdev,
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
struct talitos_crypto_alg *t_alg;
+ char *name = NULL;
t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
if (IS_ERR(t_alg)) {
@@ -1938,15 +2531,27 @@ static int talitos_probe(struct of_device *ofdev,
goto err_out;
}
- err = crypto_register_alg(&t_alg->crypto_alg);
+ switch (t_alg->algt.type) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ case CRYPTO_ALG_TYPE_AEAD:
+ err = crypto_register_alg(
+ &t_alg->algt.alg.crypto);
+ name = t_alg->algt.alg.crypto.cra_driver_name;
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ err = crypto_register_ahash(
+ &t_alg->algt.alg.hash);
+ name =
+ t_alg->algt.alg.hash.halg.base.cra_driver_name;
+ break;
+ }
if (err) {
dev_err(dev, "%s alg registration failed\n",
- t_alg->crypto_alg.cra_driver_name);
+ name);
kfree(t_alg);
} else {
list_add_tail(&t_alg->entry, &priv->alg_list);
- dev_info(dev, "%s\n",
- t_alg->crypto_alg.cra_driver_name);
+ dev_info(dev, "%s\n", name);
}
}
}
@@ -1968,8 +2573,11 @@ static const struct of_device_id talitos_match[] = {
MODULE_DEVICE_TABLE(of, talitos_match);
static struct of_platform_driver talitos_driver = {
- .name = "talitos",
- .match_table = talitos_match,
+ .driver = {
+ .name = "talitos",
+ .owner = THIS_MODULE,
+ .of_match_table = talitos_match,
+ },
.probe = talitos_probe,
.remove = talitos_remove,
};
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index ff5a1450e14..0b746aca458 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -1,7 +1,7 @@
/*
* Freescale SEC (talitos) device register and descriptor header defines
*
- * Copyright (c) 2006-2008 Freescale Semiconductor, Inc.
+ * Copyright (c) 2006-2010 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -130,6 +130,9 @@
#define TALITOS_CRCUISR 0xf030 /* cyclic redundancy check unit*/
#define TALITOS_CRCUISR_LO 0xf034
+#define TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 0x28
+#define TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512 0x48
+
/*
* talitos descriptor header (hdr) bits
*/
@@ -157,12 +160,16 @@
#define DESC_HDR_MODE0_AESU_CBC cpu_to_be32(0x00200000)
#define DESC_HDR_MODE0_DEU_CBC cpu_to_be32(0x00400000)
#define DESC_HDR_MODE0_DEU_3DES cpu_to_be32(0x00200000)
+#define DESC_HDR_MODE0_MDEU_CONT cpu_to_be32(0x08000000)
#define DESC_HDR_MODE0_MDEU_INIT cpu_to_be32(0x01000000)
#define DESC_HDR_MODE0_MDEU_HMAC cpu_to_be32(0x00800000)
#define DESC_HDR_MODE0_MDEU_PAD cpu_to_be32(0x00400000)
+#define DESC_HDR_MODE0_MDEU_SHA224 cpu_to_be32(0x00300000)
#define DESC_HDR_MODE0_MDEU_MD5 cpu_to_be32(0x00200000)
#define DESC_HDR_MODE0_MDEU_SHA256 cpu_to_be32(0x00100000)
#define DESC_HDR_MODE0_MDEU_SHA1 cpu_to_be32(0x00000000)
+#define DESC_HDR_MODE0_MDEUB_SHA384 cpu_to_be32(0x00000000)
+#define DESC_HDR_MODE0_MDEUB_SHA512 cpu_to_be32(0x00200000)
#define DESC_HDR_MODE0_MDEU_MD5_HMAC (DESC_HDR_MODE0_MDEU_MD5 | \
DESC_HDR_MODE0_MDEU_HMAC)
#define DESC_HDR_MODE0_MDEU_SHA256_HMAC (DESC_HDR_MODE0_MDEU_SHA256 | \
@@ -181,9 +188,12 @@
#define DESC_HDR_MODE1_MDEU_INIT cpu_to_be32(0x00001000)
#define DESC_HDR_MODE1_MDEU_HMAC cpu_to_be32(0x00000800)
#define DESC_HDR_MODE1_MDEU_PAD cpu_to_be32(0x00000400)
+#define DESC_HDR_MODE1_MDEU_SHA224 cpu_to_be32(0x00000300)
#define DESC_HDR_MODE1_MDEU_MD5 cpu_to_be32(0x00000200)
#define DESC_HDR_MODE1_MDEU_SHA256 cpu_to_be32(0x00000100)
#define DESC_HDR_MODE1_MDEU_SHA1 cpu_to_be32(0x00000000)
+#define DESC_HDR_MODE1_MDEUB_SHA384 cpu_to_be32(0x00000000)
+#define DESC_HDR_MODE1_MDEUB_SHA512 cpu_to_be32(0x00000200)
#define DESC_HDR_MODE1_MDEU_MD5_HMAC (DESC_HDR_MODE1_MDEU_MD5 | \
DESC_HDR_MODE1_MDEU_HMAC)
#define DESC_HDR_MODE1_MDEU_SHA256_HMAC (DESC_HDR_MODE1_MDEU_SHA256 | \
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index c27f80e5d53..9e01e96fee9 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -141,6 +141,13 @@ config COH901318
help
Enable support for ST-Ericsson COH 901 318 DMA.
+config STE_DMA40
+ bool "ST-Ericsson DMA40 support"
+ depends on ARCH_U8500
+ select DMA_ENGINE
+ help
+ Support for ST-Ericsson DMA40 controller
+
config AMCC_PPC440SPE_ADMA
tristate "AMCC PPC440SPe ADMA support"
depends on 440SPe || 440SP
@@ -149,9 +156,25 @@ config AMCC_PPC440SPE_ADMA
help
Enable support for the AMCC PPC440SPe RAID engines.
+config TIMB_DMA
+ tristate "Timberdale FPGA DMA support"
+ depends on MFD_TIMBERDALE || HAS_IOMEM
+ select DMA_ENGINE
+ help
+ Enable support for the Timberdale FPGA DMA engine.
+
config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
bool
+config PL330_DMA
+ tristate "DMA API Driver for PL330"
+ select DMA_ENGINE
+ depends on PL330
+ help
+ Select if your platform has one or more PL330 DMACs.
+ You need to provide platform specific settings via
+ platform_data for a dma-pl330 device.
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 22bba3d5e2b..0fe5ebbfda5 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -20,3 +20,6 @@ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_SH_DMAE) += shdma.o
obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
+obj-$(CONFIG_TIMB_DMA) += timb_dma.o
+obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
+obj-$(CONFIG_PL330_DMA) += pl330.o
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 278cf5bceef..bd5250e8c00 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -760,13 +760,18 @@ err_desc_get:
return NULL;
}
-static void atc_terminate_all(struct dma_chan *chan)
+static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma *atdma = to_at_dma(chan->device);
struct at_desc *desc, *_desc;
LIST_HEAD(list);
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
/*
* This is only called when something went wrong elsewhere, so
* we don't really care about the data. Just disable the
@@ -790,32 +795,30 @@ static void atc_terminate_all(struct dma_chan *chan)
/* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node)
atc_chain_complete(atchan, desc);
+
+ return 0;
}
/**
- * atc_is_tx_complete - poll for transaction completion
+ * atc_tx_status - poll for transaction completion
* @chan: DMA channel
* @cookie: transaction identifier to check status of
- * @done: if not %NULL, updated with last completed transaction
- * @used: if not %NULL, updated with last used transaction
+ * @txstate: if not %NULL updated with transaction state
*
- * If @done and @used are passed in, upon return they reflect the driver
+ * If @txstate is passed in, upon return it reflect the driver
* internal state and can be used with dma_async_is_complete() to check
* the status of multiple cookies without re-checking hardware state.
*/
static enum dma_status
-atc_is_tx_complete(struct dma_chan *chan,
+atc_tx_status(struct dma_chan *chan,
dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
+ struct dma_tx_state *txstate)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
dma_cookie_t last_used;
dma_cookie_t last_complete;
enum dma_status ret;
- dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n",
- cookie, done ? *done : 0, used ? *used : 0);
-
spin_lock_bh(&atchan->lock);
last_complete = atchan->completed_cookie;
@@ -833,10 +836,10 @@ atc_is_tx_complete(struct dma_chan *chan,
spin_unlock_bh(&atchan->lock);
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
+ dev_vdbg(chan2dev(chan), "tx_status: %d (d%d, u%d)\n",
+ cookie, last_complete ? last_complete : 0,
+ last_used ? last_used : 0);
return ret;
}
@@ -1082,7 +1085,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
/* set base routines */
atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
- atdma->dma_common.device_is_tx_complete = atc_is_tx_complete;
+ atdma->dma_common.device_tx_status = atc_tx_status;
atdma->dma_common.device_issue_pending = atc_issue_pending;
atdma->dma_common.dev = &pdev->dev;
@@ -1092,7 +1095,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
- atdma->dma_common.device_terminate_all = atc_terminate_all;
+ atdma->dma_common.device_control = atc_control;
}
dma_writel(atdma, EN, AT_DMA_ENABLE);
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 1656fdcdb6c..a724e6be1b4 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -37,7 +37,7 @@ struct coh901318_desc {
struct list_head node;
struct scatterlist *sg;
unsigned int sg_len;
- struct coh901318_lli *data;
+ struct coh901318_lli *lli;
enum dma_data_direction dir;
unsigned long flags;
};
@@ -283,7 +283,7 @@ static int coh901318_start(struct coh901318_chan *cohc)
}
static int coh901318_prep_linked_list(struct coh901318_chan *cohc,
- struct coh901318_lli *data)
+ struct coh901318_lli *lli)
{
int channel = cohc->id;
void __iomem *virtbase = cohc->base->virtbase;
@@ -292,18 +292,18 @@ static int coh901318_prep_linked_list(struct coh901318_chan *cohc,
COH901318_CX_STAT_SPACING*channel) &
COH901318_CX_STAT_ACTIVE);
- writel(data->src_addr,
+ writel(lli->src_addr,
virtbase + COH901318_CX_SRC_ADDR +
COH901318_CX_SRC_ADDR_SPACING * channel);
- writel(data->dst_addr, virtbase +
+ writel(lli->dst_addr, virtbase +
COH901318_CX_DST_ADDR +
COH901318_CX_DST_ADDR_SPACING * channel);
- writel(data->link_addr, virtbase + COH901318_CX_LNK_ADDR +
+ writel(lli->link_addr, virtbase + COH901318_CX_LNK_ADDR +
COH901318_CX_LNK_ADDR_SPACING * channel);
- writel(data->control, virtbase + COH901318_CX_CTRL +
+ writel(lli->control, virtbase + COH901318_CX_CTRL +
COH901318_CX_CTRL_SPACING * channel);
return 0;
@@ -408,33 +408,107 @@ coh901318_first_queued(struct coh901318_chan *cohc)
return d;
}
+static inline u32 coh901318_get_bytes_in_lli(struct coh901318_lli *in_lli)
+{
+ struct coh901318_lli *lli = in_lli;
+ u32 bytes = 0;
+
+ while (lli) {
+ bytes += lli->control & COH901318_CX_CTRL_TC_VALUE_MASK;
+ lli = lli->virt_link_addr;
+ }
+ return bytes;
+}
+
/*
- * DMA start/stop controls
+ * Get the number of bytes left to transfer on this channel,
+ * it is unwise to call this before stopping the channel for
+ * absolute measures, but for a rough guess you can still call
+ * it.
*/
-u32 coh901318_get_bytes_left(struct dma_chan *chan)
+static u32 coh901318_get_bytes_left(struct dma_chan *chan)
{
- unsigned long flags;
- u32 ret;
struct coh901318_chan *cohc = to_coh901318_chan(chan);
+ struct coh901318_desc *cohd;
+ struct list_head *pos;
+ unsigned long flags;
+ u32 left = 0;
+ int i = 0;
spin_lock_irqsave(&cohc->lock, flags);
- /* Read transfer count value */
- ret = readl(cohc->base->virtbase +
- COH901318_CX_CTRL+COH901318_CX_CTRL_SPACING *
- cohc->id) & COH901318_CX_CTRL_TC_VALUE_MASK;
+ /*
+ * If there are many queued jobs, we iterate and add the
+ * size of them all. We take a special look on the first
+ * job though, since it is probably active.
+ */
+ list_for_each(pos, &cohc->active) {
+ /*
+ * The first job in the list will be working on the
+ * hardware. The job can be stopped but still active,
+ * so that the transfer counter is somewhere inside
+ * the buffer.
+ */
+ cohd = list_entry(pos, struct coh901318_desc, node);
+
+ if (i == 0) {
+ struct coh901318_lli *lli;
+ dma_addr_t ladd;
+
+ /* Read current transfer count value */
+ left = readl(cohc->base->virtbase +
+ COH901318_CX_CTRL +
+ COH901318_CX_CTRL_SPACING * cohc->id) &
+ COH901318_CX_CTRL_TC_VALUE_MASK;
+
+ /* See if the transfer is linked... */
+ ladd = readl(cohc->base->virtbase +
+ COH901318_CX_LNK_ADDR +
+ COH901318_CX_LNK_ADDR_SPACING *
+ cohc->id) &
+ ~COH901318_CX_LNK_LINK_IMMEDIATE;
+ /* Single transaction */
+ if (!ladd)
+ continue;
+
+ /*
+ * Linked transaction, follow the lli, find the
+ * currently processing lli, and proceed to the next
+ */
+ lli = cohd->lli;
+ while (lli && lli->link_addr != ladd)
+ lli = lli->virt_link_addr;
+
+ if (lli)
+ lli = lli->virt_link_addr;
+
+ /*
+ * Follow remaining lli links around to count the total
+ * number of bytes left
+ */
+ left += coh901318_get_bytes_in_lli(lli);
+ } else {
+ left += coh901318_get_bytes_in_lli(cohd->lli);
+ }
+ i++;
+ }
+
+ /* Also count bytes in the queued jobs */
+ list_for_each(pos, &cohc->queue) {
+ cohd = list_entry(pos, struct coh901318_desc, node);
+ left += coh901318_get_bytes_in_lli(cohd->lli);
+ }
spin_unlock_irqrestore(&cohc->lock, flags);
- return ret;
+ return left;
}
-EXPORT_SYMBOL(coh901318_get_bytes_left);
-
-/* Stops a transfer without losing data. Enables power save.
- Use this function in conjunction with coh901318_continue(..)
-*/
-void coh901318_stop(struct dma_chan *chan)
+/*
+ * Pauses a transfer without losing data. Enables power save.
+ * Use this function in conjunction with coh901318_resume.
+ */
+static void coh901318_pause(struct dma_chan *chan)
{
u32 val;
unsigned long flags;
@@ -475,12 +549,11 @@ void coh901318_stop(struct dma_chan *chan)
spin_unlock_irqrestore(&cohc->lock, flags);
}
-EXPORT_SYMBOL(coh901318_stop);
-/* Continues a transfer that has been stopped via 300_dma_stop(..).
+/* Resumes a transfer that has been stopped via 300_dma_stop(..).
Power save is handled.
*/
-void coh901318_continue(struct dma_chan *chan)
+static void coh901318_resume(struct dma_chan *chan)
{
u32 val;
unsigned long flags;
@@ -506,7 +579,6 @@ void coh901318_continue(struct dma_chan *chan)
spin_unlock_irqrestore(&cohc->lock, flags);
}
-EXPORT_SYMBOL(coh901318_continue);
bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
{
@@ -565,29 +637,30 @@ static int coh901318_config(struct coh901318_chan *cohc,
*/
static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
{
- struct coh901318_desc *cohd_que;
+ struct coh901318_desc *cohd;
- /* start queued jobs, if any
+ /*
+ * start queued jobs, if any
* TODO: transmit all queued jobs in one go
*/
- cohd_que = coh901318_first_queued(cohc);
+ cohd = coh901318_first_queued(cohc);
- if (cohd_que != NULL) {
+ if (cohd != NULL) {
/* Remove from queue */
- coh901318_desc_remove(cohd_que);
+ coh901318_desc_remove(cohd);
/* initiate DMA job */
cohc->busy = 1;
- coh901318_desc_submit(cohc, cohd_que);
+ coh901318_desc_submit(cohc, cohd);
- coh901318_prep_linked_list(cohc, cohd_que->data);
+ coh901318_prep_linked_list(cohc, cohd->lli);
- /* start dma job */
+ /* start dma job on this channel */
coh901318_start(cohc);
}
- return cohd_que;
+ return cohd;
}
/*
@@ -622,7 +695,7 @@ static void dma_tasklet(unsigned long data)
cohc->completed = cohd_fin->desc.cookie;
/* release the lli allocation and remove the descriptor */
- coh901318_lli_free(&cohc->base->pool, &cohd_fin->data);
+ coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli);
/* return desc to free-list */
coh901318_desc_remove(cohd_fin);
@@ -666,23 +739,44 @@ static void dma_tasklet(unsigned long data)
/* called from interrupt context */
static void dma_tc_handle(struct coh901318_chan *cohc)
{
- BUG_ON(!cohc->allocated && (list_empty(&cohc->active) ||
- list_empty(&cohc->queue)));
-
- if (!cohc->allocated)
+ /*
+ * If the channel is not allocated, then we shouldn't have
+ * any TC interrupts on it.
+ */
+ if (!cohc->allocated) {
+ dev_err(COHC_2_DEV(cohc), "spurious interrupt from "
+ "unallocated channel\n");
return;
+ }
spin_lock(&cohc->lock);
+ /*
+ * When we reach this point, at least one queue item
+ * should have been moved over from cohc->queue to
+ * cohc->active and run to completion, that is why we're
+ * getting a terminal count interrupt is it not?
+ * If you get this BUG() the most probable cause is that
+ * the individual nodes in the lli chain have IRQ enabled,
+ * so check your platform config for lli chain ctrl.
+ */
+ BUG_ON(list_empty(&cohc->active));
+
cohc->nbr_active_done++;
+ /*
+ * This attempt to take a job from cohc->queue, put it
+ * into cohc->active and start it.
+ */
if (coh901318_queue_start(cohc) == NULL)
cohc->busy = 0;
- BUG_ON(list_empty(&cohc->active));
-
spin_unlock(&cohc->lock);
+ /*
+ * This tasklet will remove items from cohc->active
+ * and thus terminates them.
+ */
if (cohc_chan_conf(cohc)->priority_high)
tasklet_hi_schedule(&cohc->tasklet);
else
@@ -809,6 +903,7 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id)
static int coh901318_alloc_chan_resources(struct dma_chan *chan)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
+ unsigned long flags;
dev_vdbg(COHC_2_DEV(cohc), "[%s] DMA channel %d\n",
__func__, cohc->id);
@@ -816,11 +911,15 @@ static int coh901318_alloc_chan_resources(struct dma_chan *chan)
if (chan->client_count > 1)
return -EBUSY;
+ spin_lock_irqsave(&cohc->lock, flags);
+
coh901318_config(cohc, NULL);
cohc->allocated = 1;
cohc->completed = chan->cookie = 1;
+ spin_unlock_irqrestore(&cohc->lock, flags);
+
return 1;
}
@@ -843,7 +942,7 @@ coh901318_free_chan_resources(struct dma_chan *chan)
spin_unlock_irqrestore(&cohc->lock, flags);
- chan->device->device_terminate_all(chan);
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
}
@@ -870,7 +969,7 @@ static struct dma_async_tx_descriptor *
coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t size, unsigned long flags)
{
- struct coh901318_lli *data;
+ struct coh901318_lli *lli;
struct coh901318_desc *cohd;
unsigned long flg;
struct coh901318_chan *cohc = to_coh901318_chan(chan);
@@ -892,23 +991,23 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
if ((lli_len << MAX_DMA_PACKET_SIZE_SHIFT) < size)
lli_len++;
- data = coh901318_lli_alloc(&cohc->base->pool, lli_len);
+ lli = coh901318_lli_alloc(&cohc->base->pool, lli_len);
- if (data == NULL)
+ if (lli == NULL)
goto err;
ret = coh901318_lli_fill_memcpy(
- &cohc->base->pool, data, src, size, dest,
+ &cohc->base->pool, lli, src, size, dest,
cohc_chan_param(cohc)->ctrl_lli_chained,
ctrl_last);
if (ret)
goto err;
- COH_DBG(coh901318_list_print(cohc, data));
+ COH_DBG(coh901318_list_print(cohc, lli));
/* Pick a descriptor to handle this transfer */
cohd = coh901318_desc_get(cohc);
- cohd->data = data;
+ cohd->lli = lli;
cohd->flags = flags;
cohd->desc.tx_submit = coh901318_tx_submit;
@@ -926,7 +1025,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned long flags)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
- struct coh901318_lli *data;
+ struct coh901318_lli *lli;
struct coh901318_desc *cohd;
const struct coh901318_params *params;
struct scatterlist *sg;
@@ -999,13 +1098,13 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
pr_debug("Allocate %d lli:s for this transfer\n", len);
- data = coh901318_lli_alloc(&cohc->base->pool, len);
+ lli = coh901318_lli_alloc(&cohc->base->pool, len);
- if (data == NULL)
+ if (lli == NULL)
goto err_dma_alloc;
- /* initiate allocated data list */
- ret = coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len,
+ /* initiate allocated lli list */
+ ret = coh901318_lli_fill_sg(&cohc->base->pool, lli, sgl, sg_len,
cohc_dev_addr(cohc),
ctrl_chained,
ctrl,
@@ -1014,14 +1113,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (ret)
goto err_lli_fill;
- COH_DBG(coh901318_list_print(cohc, data));
+ COH_DBG(coh901318_list_print(cohc, lli));
/* Pick a descriptor to handle this transfer */
cohd = coh901318_desc_get(cohc);
cohd->dir = direction;
cohd->flags = flags;
cohd->desc.tx_submit = coh901318_tx_submit;
- cohd->data = data;
+ cohd->lli = lli;
spin_unlock_irqrestore(&cohc->lock, flg);
@@ -1035,9 +1134,8 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
static enum dma_status
-coh901318_is_tx_complete(struct dma_chan *chan,
- dma_cookie_t cookie, dma_cookie_t *done,
- dma_cookie_t *used)
+coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
dma_cookie_t last_used;
@@ -1049,10 +1147,10 @@ coh901318_is_tx_complete(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used,
+ coh901318_get_bytes_left(chan));
+ if (ret == DMA_IN_PROGRESS && cohc->stopped)
+ ret = DMA_PAUSED;
return ret;
}
@@ -1065,23 +1163,42 @@ coh901318_issue_pending(struct dma_chan *chan)
spin_lock_irqsave(&cohc->lock, flags);
- /* Busy means that pending jobs are already being processed */
+ /*
+ * Busy means that pending jobs are already being processed,
+ * and then there is no point in starting the queue: the
+ * terminal count interrupt on the channel will take the next
+ * job on the queue and execute it anyway.
+ */
if (!cohc->busy)
coh901318_queue_start(cohc);
spin_unlock_irqrestore(&cohc->lock, flags);
}
-static void
-coh901318_terminate_all(struct dma_chan *chan)
+static int
+coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
{
unsigned long flags;
struct coh901318_chan *cohc = to_coh901318_chan(chan);
struct coh901318_desc *cohd;
void __iomem *virtbase = cohc->base->virtbase;
- coh901318_stop(chan);
+ if (cmd == DMA_PAUSE) {
+ coh901318_pause(chan);
+ return 0;
+ }
+
+ if (cmd == DMA_RESUME) {
+ coh901318_resume(chan);
+ return 0;
+ }
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
+ /* The remainder of this function terminates the transfer */
+ coh901318_pause(chan);
spin_lock_irqsave(&cohc->lock, flags);
/* Clear any pending BE or TC interrupt */
@@ -1099,7 +1216,7 @@ coh901318_terminate_all(struct dma_chan *chan)
while ((cohd = coh901318_first_active_get(cohc))) {
/* release the lli allocation*/
- coh901318_lli_free(&cohc->base->pool, &cohd->data);
+ coh901318_lli_free(&cohc->base->pool, &cohd->lli);
/* return desc to free-list */
coh901318_desc_remove(cohd);
@@ -1108,7 +1225,7 @@ coh901318_terminate_all(struct dma_chan *chan)
while ((cohd = coh901318_first_queued(cohc))) {
/* release the lli allocation*/
- coh901318_lli_free(&cohc->base->pool, &cohd->data);
+ coh901318_lli_free(&cohc->base->pool, &cohd->lli);
/* return desc to free-list */
coh901318_desc_remove(cohd);
@@ -1120,6 +1237,8 @@ coh901318_terminate_all(struct dma_chan *chan)
cohc->busy = 0;
spin_unlock_irqrestore(&cohc->lock, flags);
+
+ return 0;
}
void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
struct coh901318_base *base)
@@ -1235,9 +1354,9 @@ static int __init coh901318_probe(struct platform_device *pdev)
base->dma_slave.device_alloc_chan_resources = coh901318_alloc_chan_resources;
base->dma_slave.device_free_chan_resources = coh901318_free_chan_resources;
base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg;
- base->dma_slave.device_is_tx_complete = coh901318_is_tx_complete;
+ base->dma_slave.device_tx_status = coh901318_tx_status;
base->dma_slave.device_issue_pending = coh901318_issue_pending;
- base->dma_slave.device_terminate_all = coh901318_terminate_all;
+ base->dma_slave.device_control = coh901318_control;
base->dma_slave.dev = &pdev->dev;
err = dma_async_device_register(&base->dma_slave);
@@ -1255,9 +1374,9 @@ static int __init coh901318_probe(struct platform_device *pdev)
base->dma_memcpy.device_alloc_chan_resources = coh901318_alloc_chan_resources;
base->dma_memcpy.device_free_chan_resources = coh901318_free_chan_resources;
base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy;
- base->dma_memcpy.device_is_tx_complete = coh901318_is_tx_complete;
+ base->dma_memcpy.device_tx_status = coh901318_tx_status;
base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
- base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
+ base->dma_memcpy.device_control = coh901318_control;
base->dma_memcpy.dev = &pdev->dev;
/*
* This controller can only access address at even 32bit boundaries,
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index d18b5d069d7..9d31d5eb95c 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -515,7 +515,6 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
break;
if (--device->privatecnt == 0)
dma_cap_clear(DMA_PRIVATE, device->cap_mask);
- chan->private = NULL;
chan = NULL;
}
}
@@ -537,7 +536,6 @@ void dma_release_channel(struct dma_chan *chan)
/* drop PRIVATE cap enabled by __dma_request_channel() */
if (--chan->device->privatecnt == 0)
dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
- chan->private = NULL;
mutex_unlock(&dma_list_mutex);
}
EXPORT_SYMBOL_GPL(dma_release_channel);
@@ -695,11 +693,11 @@ int dma_async_device_register(struct dma_device *device)
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
!device->device_prep_slave_sg);
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
- !device->device_terminate_all);
+ !device->device_control);
BUG_ON(!device->device_alloc_chan_resources);
BUG_ON(!device->device_free_chan_resources);
- BUG_ON(!device->device_is_tx_complete);
+ BUG_ON(!device->device_tx_status);
BUG_ON(!device->device_issue_pending);
BUG_ON(!device->dev);
@@ -978,7 +976,9 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
struct dma_chan *chan)
{
tx->chan = chan;
+ #ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
spin_lock_init(&tx->lock);
+ #endif
}
EXPORT_SYMBOL(dma_async_tx_descriptor_init);
@@ -1011,7 +1011,7 @@ EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
*/
void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
{
- struct dma_async_tx_descriptor *dep = tx->next;
+ struct dma_async_tx_descriptor *dep = txd_next(tx);
struct dma_async_tx_descriptor *dep_next;
struct dma_chan *chan;
@@ -1019,7 +1019,7 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
return;
/* we'll submit tx->next now, so clear the link */
- tx->next = NULL;
+ txd_clear_next(tx);
chan = dep->chan;
/* keep submitting up until a channel switch is detected
@@ -1027,14 +1027,14 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
* processing the interrupt from async_tx_channel_switch
*/
for (; dep; dep = dep_next) {
- spin_lock_bh(&dep->lock);
- dep->parent = NULL;
- dep_next = dep->next;
+ txd_lock(dep);
+ txd_clear_parent(dep);
+ dep_next = txd_next(dep);
if (dep_next && dep_next->chan == chan)
- dep->next = NULL; /* ->next will be submitted */
+ txd_clear_next(dep); /* ->next will be submitted */
else
dep_next = NULL; /* submit current dep and terminate */
- spin_unlock_bh(&dep->lock);
+ txd_unlock(dep);
dep->tx_submit(dep);
}
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index d28369f7afd..a3991ab0d67 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -781,13 +781,18 @@ err_desc_get:
return NULL;
}
-static void dwc_terminate_all(struct dma_chan *chan)
+static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_desc *desc, *_desc;
LIST_HEAD(list);
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
/*
* This is only called when something went wrong elsewhere, so
* we don't really care about the data. Just disable the
@@ -810,12 +815,14 @@ static void dwc_terminate_all(struct dma_chan *chan)
/* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node)
dwc_descriptor_complete(dwc, desc);
+
+ return 0;
}
static enum dma_status
-dwc_is_tx_complete(struct dma_chan *chan,
- dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
+dwc_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
dma_cookie_t last_used;
@@ -835,10 +842,7 @@ dwc_is_tx_complete(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
}
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return ret;
}
@@ -1338,9 +1342,9 @@ static int __init dw_probe(struct platform_device *pdev)
dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
- dw->dma.device_terminate_all = dwc_terminate_all;
+ dw->dma.device_control = dwc_control;
- dw->dma.device_is_tx_complete = dwc_is_tx_complete;
+ dw->dma.device_tx_status = dwc_tx_status;
dw->dma.device_issue_pending = dwc_issue_pending;
dma_writel(dw, CFG, DW_CFG_DMA_EN);
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 88f470f0d82..8088b14ba5f 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -775,13 +775,18 @@ fail:
return NULL;
}
-static void fsl_dma_device_terminate_all(struct dma_chan *dchan)
+static int fsl_dma_device_control(struct dma_chan *dchan,
+ enum dma_ctrl_cmd cmd, unsigned long arg)
{
struct fsldma_chan *chan;
unsigned long flags;
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
if (!dchan)
- return;
+ return -EINVAL;
chan = to_fsl_chan(dchan);
@@ -795,6 +800,8 @@ static void fsl_dma_device_terminate_all(struct dma_chan *dchan)
fsldma_free_desc_list(chan, &chan->ld_running);
spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ return 0;
}
/**
@@ -965,13 +972,12 @@ static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
}
/**
- * fsl_dma_is_complete - Determine the DMA status
+ * fsl_tx_status - Determine the DMA status
* @chan : Freescale DMA channel
*/
-static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan,
+static enum dma_status fsl_tx_status(struct dma_chan *dchan,
dma_cookie_t cookie,
- dma_cookie_t *done,
- dma_cookie_t *used)
+ struct dma_tx_state *txstate)
{
struct fsldma_chan *chan = to_fsl_chan(dchan);
dma_cookie_t last_used;
@@ -982,11 +988,7 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan,
last_used = dchan->cookie;
last_complete = chan->completed_cookie;
- if (done)
- *done = last_complete;
-
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return dma_async_is_complete(cookie, last_complete, last_used);
}
@@ -1313,7 +1315,7 @@ static int __devinit fsldma_of_probe(struct of_device *op,
INIT_LIST_HEAD(&fdev->common.channels);
/* ioremap the registers for use */
- fdev->regs = of_iomap(op->node, 0);
+ fdev->regs = of_iomap(op->dev.of_node, 0);
if (!fdev->regs) {
dev_err(&op->dev, "unable to ioremap registers\n");
err = -ENOMEM;
@@ -1321,7 +1323,7 @@ static int __devinit fsldma_of_probe(struct of_device *op,
}
/* map the channel IRQ if it exists, but don't hookup the handler yet */
- fdev->irq = irq_of_parse_and_map(op->node, 0);
+ fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
@@ -1330,10 +1332,10 @@ static int __devinit fsldma_of_probe(struct of_device *op,
fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
- fdev->common.device_is_tx_complete = fsl_dma_is_complete;
+ fdev->common.device_tx_status = fsl_tx_status;
fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
- fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
+ fdev->common.device_control = fsl_dma_device_control;
fdev->common.dev = &op->dev;
dev_set_drvdata(&op->dev, fdev);
@@ -1343,7 +1345,7 @@ static int __devinit fsldma_of_probe(struct of_device *op,
* of_platform_bus_remove(). Instead, we manually instantiate every DMA
* channel object.
*/
- for_each_child_of_node(op->node, child) {
+ for_each_child_of_node(op->dev.of_node, child) {
if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
fsl_dma_chan_probe(fdev, child,
FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
@@ -1409,10 +1411,13 @@ static const struct of_device_id fsldma_of_ids[] = {
};
static struct of_platform_driver fsldma_of_driver = {
- .name = "fsl-elo-dma",
- .match_table = fsldma_of_ids,
- .probe = fsldma_of_probe,
- .remove = fsldma_of_remove,
+ .driver = {
+ .name = "fsl-elo-dma",
+ .owner = THIS_MODULE,
+ .of_match_table = fsldma_of_ids,
+ },
+ .probe = fsldma_of_probe,
+ .remove = fsldma_of_remove,
};
/*----------------------------------------------------------------------------*/
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 3e5a8005c62..c9213ead4a2 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -727,18 +727,18 @@ static void ioat1_timer_event(unsigned long data)
}
enum dma_status
-ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
+ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct ioat_chan_common *chan = to_chan_common(c);
struct ioatdma_device *device = chan->device;
- if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
+ if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS)
return DMA_SUCCESS;
device->cleanup_fn((unsigned long) c);
- return ioat_is_complete(c, cookie, done, used);
+ return ioat_tx_status(c, cookie, txstate);
}
static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
@@ -858,7 +858,7 @@ int __devinit ioat_dma_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
if (tmo == 0 ||
- dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL)
+ dma->device_tx_status(dma_chan, cookie, NULL)
!= DMA_SUCCESS) {
dev_err(dev, "Self-test copy timed out, disabling\n");
err = -ENODEV;
@@ -1199,7 +1199,7 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
- dma->device_is_tx_complete = ioat_is_dma_complete;
+ dma->device_tx_status = ioat_dma_tx_status;
err = ioat_probe(device);
if (err)
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 86b97ac8774..6d3a73b57e5 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -96,6 +96,7 @@ struct ioat_chan_common {
#define IOAT_COMPLETION_ACK 1
#define IOAT_RESET_PENDING 2
#define IOAT_KOBJ_INIT_FAIL 3
+ #define IOAT_RESHAPE_PENDING 4
struct timer_list timer;
#define COMPLETION_TIMEOUT msecs_to_jiffies(100)
#define IDLE_TIMEOUT msecs_to_jiffies(2000)
@@ -142,15 +143,14 @@ static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c)
}
/**
- * ioat_is_complete - poll the status of an ioat transaction
+ * ioat_tx_status - poll the status of an ioat transaction
* @c: channel handle
* @cookie: transaction identifier
- * @done: if set, updated with last completed transaction
- * @used: if set, updated with last used transaction
+ * @txstate: if set, updated with the transaction state
*/
static inline enum dma_status
-ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
+ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct ioat_chan_common *chan = to_chan_common(c);
dma_cookie_t last_used;
@@ -159,10 +159,7 @@ ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie,
last_used = c->cookie;
last_complete = chan->completed_cookie;
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return dma_async_is_complete(cookie, last_complete, last_used);
}
@@ -338,8 +335,8 @@ struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
unsigned long ioat_get_current_completion(struct ioat_chan_common *chan);
void ioat_init_channel(struct ioatdma_device *device,
struct ioat_chan_common *chan, int idx);
-enum dma_status ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used);
+enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+ struct dma_tx_state *txstate);
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
size_t len, struct ioat_dma_descriptor *hw);
bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index b5ae56c211e..3c8b32a8379 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -56,8 +56,6 @@ void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
ioat->dmacount += ioat2_ring_pending(ioat);
ioat->issued = ioat->head;
- /* make descriptor updates globally visible before notifying channel */
- wmb();
writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
dev_dbg(to_dev(chan),
"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
@@ -69,9 +67,9 @@ void ioat2_issue_pending(struct dma_chan *c)
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
if (ioat2_ring_pending(ioat)) {
- spin_lock_bh(&ioat->ring_lock);
+ spin_lock_bh(&ioat->prep_lock);
__ioat2_issue_pending(ioat);
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&ioat->prep_lock);
}
}
@@ -80,7 +78,7 @@ void ioat2_issue_pending(struct dma_chan *c)
* @ioat: ioat2+ channel
*
* Check if the number of unsubmitted descriptors has exceeded the
- * watermark. Called with ring_lock held
+ * watermark. Called with prep_lock held
*/
static void ioat2_update_pending(struct ioat2_dma_chan *ioat)
{
@@ -92,7 +90,6 @@ static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
{
struct ioat_ring_ent *desc;
struct ioat_dma_descriptor *hw;
- int idx;
if (ioat2_ring_space(ioat) < 1) {
dev_err(to_dev(&ioat->base),
@@ -102,8 +99,7 @@ static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n",
__func__, ioat->head, ioat->tail, ioat->issued);
- idx = ioat2_desc_alloc(ioat, 1);
- desc = ioat2_get_ring_ent(ioat, idx);
+ desc = ioat2_get_ring_ent(ioat, ioat->head);
hw = desc->hw;
hw->ctl = 0;
@@ -117,14 +113,16 @@ static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
async_tx_ack(&desc->txd);
ioat2_set_chainaddr(ioat, desc->txd.phys);
dump_desc_dbg(ioat, desc);
+ wmb();
+ ioat->head += 1;
__ioat2_issue_pending(ioat);
}
static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
{
- spin_lock_bh(&ioat->ring_lock);
+ spin_lock_bh(&ioat->prep_lock);
__ioat2_start_null_desc(ioat);
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&ioat->prep_lock);
}
static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
@@ -134,15 +132,16 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
struct ioat_ring_ent *desc;
bool seen_current = false;
u16 active;
- int i;
+ int idx = ioat->tail, i;
dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
__func__, ioat->head, ioat->tail, ioat->issued);
active = ioat2_ring_active(ioat);
for (i = 0; i < active && !seen_current; i++) {
- prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1));
- desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
+ smp_read_barrier_depends();
+ prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
+ desc = ioat2_get_ring_ent(ioat, idx + i);
tx = &desc->txd;
dump_desc_dbg(ioat, desc);
if (tx->cookie) {
@@ -158,11 +157,12 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
if (tx->phys == phys_complete)
seen_current = true;
}
- ioat->tail += i;
+ smp_mb(); /* finish all descriptor reads before incrementing tail */
+ ioat->tail = idx + i;
BUG_ON(active && !seen_current); /* no active descs have written a completion? */
chan->last_completion = phys_complete;
- if (ioat->head == ioat->tail) {
+ if (active - i == 0) {
dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
__func__);
clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
@@ -179,24 +179,9 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
struct ioat_chan_common *chan = &ioat->base;
unsigned long phys_complete;
- prefetch(chan->completion);
-
- if (!spin_trylock_bh(&chan->cleanup_lock))
- return;
-
- if (!ioat_cleanup_preamble(chan, &phys_complete)) {
- spin_unlock_bh(&chan->cleanup_lock);
- return;
- }
-
- if (!spin_trylock_bh(&ioat->ring_lock)) {
- spin_unlock_bh(&chan->cleanup_lock);
- return;
- }
-
- __cleanup(ioat, phys_complete);
-
- spin_unlock_bh(&ioat->ring_lock);
+ spin_lock_bh(&chan->cleanup_lock);
+ if (ioat_cleanup_preamble(chan, &phys_complete))
+ __cleanup(ioat, phys_complete);
spin_unlock_bh(&chan->cleanup_lock);
}
@@ -287,12 +272,10 @@ void ioat2_timer_event(unsigned long data)
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base;
- spin_lock_bh(&chan->cleanup_lock);
if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
unsigned long phys_complete;
u64 status;
- spin_lock_bh(&ioat->ring_lock);
status = ioat_chansts(chan);
/* when halted due to errors check for channel
@@ -311,26 +294,31 @@ void ioat2_timer_event(unsigned long data)
* acknowledged a pending completion once, then be more
* forceful with a restart
*/
- if (ioat_cleanup_preamble(chan, &phys_complete))
+ spin_lock_bh(&chan->cleanup_lock);
+ if (ioat_cleanup_preamble(chan, &phys_complete)) {
__cleanup(ioat, phys_complete);
- else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
+ } else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
+ spin_lock_bh(&ioat->prep_lock);
ioat2_restart_channel(ioat);
- else {
+ spin_unlock_bh(&ioat->prep_lock);
+ } else {
set_bit(IOAT_COMPLETION_ACK, &chan->state);
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
}
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
} else {
u16 active;
/* if the ring is idle, empty, and oversized try to step
* down the size
*/
- spin_lock_bh(&ioat->ring_lock);
+ spin_lock_bh(&chan->cleanup_lock);
+ spin_lock_bh(&ioat->prep_lock);
active = ioat2_ring_active(ioat);
if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
reshape_ring(ioat, ioat->alloc_order-1);
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&ioat->prep_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
/* keep shrinking until we get back to our minimum
* default size
@@ -338,7 +326,6 @@ void ioat2_timer_event(unsigned long data)
if (ioat->alloc_order > ioat_get_alloc_order())
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
}
- spin_unlock_bh(&chan->cleanup_lock);
}
static int ioat2_reset_hw(struct ioat_chan_common *chan)
@@ -392,7 +379,7 @@ int ioat2_enumerate_channels(struct ioatdma_device *device)
ioat_init_channel(device, &ioat->base, i);
ioat->xfercap_log = xfercap_log;
- spin_lock_init(&ioat->ring_lock);
+ spin_lock_init(&ioat->prep_lock);
if (device->reset_hw(&ioat->base)) {
i = 0;
break;
@@ -418,8 +405,17 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+
+ /* make descriptor updates visible before advancing ioat->head,
+ * this is purposefully not smp_wmb() since we are also
+ * publishing the descriptor updates to a dma device
+ */
+ wmb();
+
+ ioat->head += ioat->produce;
+
ioat2_update_pending(ioat);
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&ioat->prep_lock);
return cookie;
}
@@ -531,13 +527,15 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
if (!ring)
return -ENOMEM;
- spin_lock_bh(&ioat->ring_lock);
+ spin_lock_bh(&chan->cleanup_lock);
+ spin_lock_bh(&ioat->prep_lock);
ioat->ring = ring;
ioat->head = 0;
ioat->issued = 0;
ioat->tail = 0;
ioat->alloc_order = order;
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&ioat->prep_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
tasklet_enable(&chan->cleanup_task);
ioat2_start_null_desc(ioat);
@@ -553,7 +551,7 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
*/
struct ioat_chan_common *chan = &ioat->base;
struct dma_chan *c = &chan->common;
- const u16 curr_size = ioat2_ring_mask(ioat) + 1;
+ const u16 curr_size = ioat2_ring_size(ioat);
const u16 active = ioat2_ring_active(ioat);
const u16 new_size = 1 << order;
struct ioat_ring_ent **ring;
@@ -653,54 +651,61 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
}
/**
- * ioat2_alloc_and_lock - common descriptor alloc boilerplate for ioat2,3 ops
- * @idx: gets starting descriptor index on successful allocation
+ * ioat2_check_space_lock - verify space and grab ring producer lock
* @ioat: ioat2,3 channel (ring) to operate on
* @num_descs: allocation length
*/
-int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs)
+int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs)
{
struct ioat_chan_common *chan = &ioat->base;
+ bool retry;
- spin_lock_bh(&ioat->ring_lock);
+ retry:
+ spin_lock_bh(&ioat->prep_lock);
/* never allow the last descriptor to be consumed, we need at
* least one free at all times to allow for on-the-fly ring
* resizing.
*/
- while (unlikely(ioat2_ring_space(ioat) <= num_descs)) {
- if (reshape_ring(ioat, ioat->alloc_order + 1) &&
- ioat2_ring_space(ioat) > num_descs)
- break;
-
- if (printk_ratelimit())
- dev_dbg(to_dev(chan),
- "%s: ring full! num_descs: %d (%x:%x:%x)\n",
- __func__, num_descs, ioat->head, ioat->tail,
- ioat->issued);
- spin_unlock_bh(&ioat->ring_lock);
-
- /* progress reclaim in the allocation failure case we
- * may be called under bh_disabled so we need to trigger
- * the timer event directly
- */
- spin_lock_bh(&chan->cleanup_lock);
- if (jiffies > chan->timer.expires &&
- timer_pending(&chan->timer)) {
- struct ioatdma_device *device = chan->device;
-
- mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
- spin_unlock_bh(&chan->cleanup_lock);
- device->timer_fn((unsigned long) &chan->common);
- } else
- spin_unlock_bh(&chan->cleanup_lock);
- return -ENOMEM;
+ if (likely(ioat2_ring_space(ioat) > num_descs)) {
+ dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n",
+ __func__, num_descs, ioat->head, ioat->tail, ioat->issued);
+ ioat->produce = num_descs;
+ return 0; /* with ioat->prep_lock held */
}
+ retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &chan->state);
+ spin_unlock_bh(&ioat->prep_lock);
- dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n",
- __func__, num_descs, ioat->head, ioat->tail, ioat->issued);
+ /* is another cpu already trying to expand the ring? */
+ if (retry)
+ goto retry;
- *idx = ioat2_desc_alloc(ioat, num_descs);
- return 0; /* with ioat->ring_lock held */
+ spin_lock_bh(&chan->cleanup_lock);
+ spin_lock_bh(&ioat->prep_lock);
+ retry = reshape_ring(ioat, ioat->alloc_order + 1);
+ clear_bit(IOAT_RESHAPE_PENDING, &chan->state);
+ spin_unlock_bh(&ioat->prep_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
+
+ /* if we were able to expand the ring retry the allocation */
+ if (retry)
+ goto retry;
+
+ if (printk_ratelimit())
+ dev_dbg(to_dev(chan), "%s: ring full! num_descs: %d (%x:%x:%x)\n",
+ __func__, num_descs, ioat->head, ioat->tail, ioat->issued);
+
+ /* progress reclaim in the allocation failure case we may be
+ * called under bh_disabled so we need to trigger the timer
+ * event directly
+ */
+ if (jiffies > chan->timer.expires && timer_pending(&chan->timer)) {
+ struct ioatdma_device *device = chan->device;
+
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ device->timer_fn((unsigned long) &chan->common);
+ }
+
+ return -ENOMEM;
}
struct dma_async_tx_descriptor *
@@ -713,14 +718,11 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
dma_addr_t dst = dma_dest;
dma_addr_t src = dma_src;
size_t total_len = len;
- int num_descs;
- u16 idx;
- int i;
+ int num_descs, idx, i;
num_descs = ioat2_xferlen_to_descs(ioat, len);
- if (likely(num_descs) &&
- ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0)
- /* pass */;
+ if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0)
+ idx = ioat->head;
else
return NULL;
i = 0;
@@ -777,7 +779,8 @@ void ioat2_free_chan_resources(struct dma_chan *c)
device->cleanup_fn((unsigned long) c);
device->reset_hw(chan);
- spin_lock_bh(&ioat->ring_lock);
+ spin_lock_bh(&chan->cleanup_lock);
+ spin_lock_bh(&ioat->prep_lock);
descs = ioat2_ring_space(ioat);
dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs);
for (i = 0; i < descs; i++) {
@@ -800,7 +803,8 @@ void ioat2_free_chan_resources(struct dma_chan *c)
ioat->alloc_order = 0;
pci_pool_free(device->completion_pool, chan->completion,
chan->completion_dma);
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&ioat->prep_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
chan->last_completion = 0;
chan->completion_dma = 0;
@@ -855,7 +859,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
dma->device_issue_pending = ioat2_issue_pending;
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
dma->device_free_chan_resources = ioat2_free_chan_resources;
- dma->device_is_tx_complete = ioat_is_dma_complete;
+ dma->device_tx_status = ioat_tx_status;
err = ioat_probe(device);
if (err)
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index ef2871fd786..a2c413b2b8d 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -22,6 +22,7 @@
#define IOATDMA_V2_H
#include <linux/dmaengine.h>
+#include <linux/circ_buf.h>
#include "dma.h"
#include "hw.h"
@@ -49,8 +50,9 @@ extern int ioat_ring_alloc_order;
* @tail: cleanup index
* @dmacount: identical to 'head' except for occasionally resetting to zero
* @alloc_order: log2 of the number of allocated descriptors
+ * @produce: number of descriptors to produce at submit time
* @ring: software ring buffer implementation of hardware ring
- * @ring_lock: protects ring attributes
+ * @prep_lock: serializes descriptor preparation (producers)
*/
struct ioat2_dma_chan {
struct ioat_chan_common base;
@@ -60,8 +62,9 @@ struct ioat2_dma_chan {
u16 tail;
u16 dmacount;
u16 alloc_order;
+ u16 produce;
struct ioat_ring_ent **ring;
- spinlock_t ring_lock;
+ spinlock_t prep_lock;
};
static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
@@ -71,38 +74,26 @@ static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
return container_of(chan, struct ioat2_dma_chan, base);
}
-static inline u16 ioat2_ring_mask(struct ioat2_dma_chan *ioat)
+static inline u16 ioat2_ring_size(struct ioat2_dma_chan *ioat)
{
- return (1 << ioat->alloc_order) - 1;
+ return 1 << ioat->alloc_order;
}
/* count of descriptors in flight with the engine */
static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat)
{
- return (ioat->head - ioat->tail) & ioat2_ring_mask(ioat);
+ return CIRC_CNT(ioat->head, ioat->tail, ioat2_ring_size(ioat));
}
/* count of descriptors pending submission to hardware */
static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat)
{
- return (ioat->head - ioat->issued) & ioat2_ring_mask(ioat);
+ return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat));
}
static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat)
{
- u16 num_descs = ioat2_ring_mask(ioat) + 1;
- u16 active = ioat2_ring_active(ioat);
-
- BUG_ON(active > num_descs);
-
- return num_descs - active;
-}
-
-/* assumes caller already checked space */
-static inline u16 ioat2_desc_alloc(struct ioat2_dma_chan *ioat, u16 len)
-{
- ioat->head += len;
- return ioat->head - len;
+ return ioat2_ring_size(ioat) - ioat2_ring_active(ioat);
}
static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len)
@@ -151,7 +142,7 @@ struct ioat_ring_ent {
static inline struct ioat_ring_ent *
ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx)
{
- return ioat->ring[idx & ioat2_ring_mask(ioat)];
+ return ioat->ring[idx & (ioat2_ring_size(ioat) - 1)];
}
static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr)
@@ -168,7 +159,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca);
int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca);
struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
-int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs);
+int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs);
int ioat2_enumerate_channels(struct ioatdma_device *device);
struct dma_async_tx_descriptor *
ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 6740e319c9c..1cdd22e1051 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -260,8 +260,8 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
struct ioat_chan_common *chan = &ioat->base;
struct ioat_ring_ent *desc;
bool seen_current = false;
+ int idx = ioat->tail, i;
u16 active;
- int i;
dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
__func__, ioat->head, ioat->tail, ioat->issued);
@@ -270,13 +270,14 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
for (i = 0; i < active && !seen_current; i++) {
struct dma_async_tx_descriptor *tx;
- prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1));
- desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
+ smp_read_barrier_depends();
+ prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
+ desc = ioat2_get_ring_ent(ioat, idx + i);
dump_desc_dbg(ioat, desc);
tx = &desc->txd;
if (tx->cookie) {
chan->completed_cookie = tx->cookie;
- ioat3_dma_unmap(ioat, desc, ioat->tail + i);
+ ioat3_dma_unmap(ioat, desc, idx + i);
tx->cookie = 0;
if (tx->callback) {
tx->callback(tx->callback_param);
@@ -293,69 +294,30 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
i++;
}
}
- ioat->tail += i;
+ smp_mb(); /* finish all descriptor reads before incrementing tail */
+ ioat->tail = idx + i;
BUG_ON(active && !seen_current); /* no active descs have written a completion? */
chan->last_completion = phys_complete;
- active = ioat2_ring_active(ioat);
- if (active == 0) {
+ if (active - i == 0) {
dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
__func__);
clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
}
/* 5 microsecond delay per pending descriptor */
- writew(min((5 * active), IOAT_INTRDELAY_MASK),
+ writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
chan->device->reg_base + IOAT_INTRDELAY_OFFSET);
}
-/* try to cleanup, but yield (via spin_trylock) to incoming submissions
- * with the expectation that we will immediately poll again shortly
- */
-static void ioat3_cleanup_poll(struct ioat2_dma_chan *ioat)
+static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
unsigned long phys_complete;
- prefetch(chan->completion);
-
- if (!spin_trylock_bh(&chan->cleanup_lock))
- return;
-
- if (!ioat_cleanup_preamble(chan, &phys_complete)) {
- spin_unlock_bh(&chan->cleanup_lock);
- return;
- }
-
- if (!spin_trylock_bh(&ioat->ring_lock)) {
- spin_unlock_bh(&chan->cleanup_lock);
- return;
- }
-
- __cleanup(ioat, phys_complete);
-
- spin_unlock_bh(&ioat->ring_lock);
- spin_unlock_bh(&chan->cleanup_lock);
-}
-
-/* run cleanup now because we already delayed the interrupt via INTRDELAY */
-static void ioat3_cleanup_sync(struct ioat2_dma_chan *ioat)
-{
- struct ioat_chan_common *chan = &ioat->base;
- unsigned long phys_complete;
-
- prefetch(chan->completion);
-
spin_lock_bh(&chan->cleanup_lock);
- if (!ioat_cleanup_preamble(chan, &phys_complete)) {
- spin_unlock_bh(&chan->cleanup_lock);
- return;
- }
- spin_lock_bh(&ioat->ring_lock);
-
- __cleanup(ioat, phys_complete);
-
- spin_unlock_bh(&ioat->ring_lock);
+ if (ioat_cleanup_preamble(chan, &phys_complete))
+ __cleanup(ioat, phys_complete);
spin_unlock_bh(&chan->cleanup_lock);
}
@@ -363,7 +325,7 @@ static void ioat3_cleanup_event(unsigned long data)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
- ioat3_cleanup_sync(ioat);
+ ioat3_cleanup(ioat);
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
@@ -384,12 +346,10 @@ static void ioat3_timer_event(unsigned long data)
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base;
- spin_lock_bh(&chan->cleanup_lock);
if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
unsigned long phys_complete;
u64 status;
- spin_lock_bh(&ioat->ring_lock);
status = ioat_chansts(chan);
/* when halted due to errors check for channel
@@ -408,26 +368,31 @@ static void ioat3_timer_event(unsigned long data)
* acknowledged a pending completion once, then be more
* forceful with a restart
*/
+ spin_lock_bh(&chan->cleanup_lock);
if (ioat_cleanup_preamble(chan, &phys_complete))
__cleanup(ioat, phys_complete);
- else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
+ else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
+ spin_lock_bh(&ioat->prep_lock);
ioat3_restart_channel(ioat);
- else {
+ spin_unlock_bh(&ioat->prep_lock);
+ } else {
set_bit(IOAT_COMPLETION_ACK, &chan->state);
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
}
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
} else {
u16 active;
/* if the ring is idle, empty, and oversized try to step
* down the size
*/
- spin_lock_bh(&ioat->ring_lock);
+ spin_lock_bh(&chan->cleanup_lock);
+ spin_lock_bh(&ioat->prep_lock);
active = ioat2_ring_active(ioat);
if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
reshape_ring(ioat, ioat->alloc_order-1);
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&ioat->prep_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
/* keep shrinking until we get back to our minimum
* default size
@@ -435,21 +400,20 @@ static void ioat3_timer_event(unsigned long data)
if (ioat->alloc_order > ioat_get_alloc_order())
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
}
- spin_unlock_bh(&chan->cleanup_lock);
}
static enum dma_status
-ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
+ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
- if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
+ if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS)
return DMA_SUCCESS;
- ioat3_cleanup_poll(ioat);
+ ioat3_cleanup(ioat);
- return ioat_is_complete(c, cookie, done, used);
+ return ioat_tx_status(c, cookie, txstate);
}
static struct dma_async_tx_descriptor *
@@ -460,15 +424,12 @@ ioat3_prep_memset_lock(struct dma_chan *c, dma_addr_t dest, int value,
struct ioat_ring_ent *desc;
size_t total_len = len;
struct ioat_fill_descriptor *fill;
- int num_descs;
u64 src_data = (0x0101010101010101ULL) * (value & 0xff);
- u16 idx;
- int i;
+ int num_descs, idx, i;
num_descs = ioat2_xferlen_to_descs(ioat, len);
- if (likely(num_descs) &&
- ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0)
- /* pass */;
+ if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0)
+ idx = ioat->head;
else
return NULL;
i = 0;
@@ -513,11 +474,8 @@ __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
struct ioat_xor_descriptor *xor;
struct ioat_xor_ext_descriptor *xor_ex = NULL;
struct ioat_dma_descriptor *hw;
+ int num_descs, with_ext, idx, i;
u32 offset = 0;
- int num_descs;
- int with_ext;
- int i;
- u16 idx;
u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
BUG_ON(src_cnt < 2);
@@ -537,9 +495,8 @@ __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
* (legacy) descriptor to ensure all completion writes arrive in
* order.
*/
- if (likely(num_descs) &&
- ioat2_alloc_and_lock(&idx, ioat, num_descs+1) == 0)
- /* pass */;
+ if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs+1) == 0)
+ idx = ioat->head;
else
return NULL;
i = 0;
@@ -657,11 +614,8 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
struct ioat_pq_ext_descriptor *pq_ex = NULL;
struct ioat_dma_descriptor *hw;
u32 offset = 0;
- int num_descs;
- int with_ext;
- int i, s;
- u16 idx;
u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
+ int i, s, idx, with_ext, num_descs;
dev_dbg(to_dev(chan), "%s\n", __func__);
/* the engine requires at least two sources (we provide
@@ -687,8 +641,8 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
* order.
*/
if (likely(num_descs) &&
- ioat2_alloc_and_lock(&idx, ioat, num_descs+1) == 0)
- /* pass */;
+ ioat2_check_space_lock(ioat, num_descs+1) == 0)
+ idx = ioat->head;
else
return NULL;
i = 0;
@@ -851,10 +805,9 @@ ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
struct ioat_ring_ent *desc;
struct ioat_dma_descriptor *hw;
- u16 idx;
- if (ioat2_alloc_and_lock(&idx, ioat, 1) == 0)
- desc = ioat2_get_ring_ent(ioat, idx);
+ if (ioat2_check_space_lock(ioat, 1) == 0)
+ desc = ioat2_get_ring_ent(ioat, ioat->head);
else
return NULL;
@@ -977,7 +930,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
- if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_err(dev, "Self-test xor timed out\n");
err = -ENODEV;
goto free_resources;
@@ -1031,7 +984,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
- if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_err(dev, "Self-test validate timed out\n");
err = -ENODEV;
goto free_resources;
@@ -1072,7 +1025,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
- if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_err(dev, "Self-test memset timed out\n");
err = -ENODEV;
goto free_resources;
@@ -1115,7 +1068,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
- if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_err(dev, "Self-test 2nd validate timed out\n");
err = -ENODEV;
goto free_resources;
@@ -1222,7 +1175,7 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (cap & IOAT_CAP_XOR) {
is_raid_device = true;
dma->max_xor = 8;
- dma->xor_align = 2;
+ dma->xor_align = 6;
dma_cap_set(DMA_XOR, dma->cap_mask);
dma->device_prep_dma_xor = ioat3_prep_xor;
@@ -1233,7 +1186,7 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (cap & IOAT_CAP_PQ) {
is_raid_device = true;
dma_set_maxpq(dma, 8, 0);
- dma->pq_align = 2;
+ dma->pq_align = 6;
dma_cap_set(DMA_PQ, dma->cap_mask);
dma->device_prep_dma_pq = ioat3_prep_pq;
@@ -1243,7 +1196,7 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (!(cap & IOAT_CAP_XOR)) {
dma->max_xor = 8;
- dma->xor_align = 2;
+ dma->xor_align = 6;
dma_cap_set(DMA_XOR, dma->cap_mask);
dma->device_prep_dma_xor = ioat3_prep_pqxor;
@@ -1259,11 +1212,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (is_raid_device) {
- dma->device_is_tx_complete = ioat3_is_complete;
+ dma->device_tx_status = ioat3_tx_status;
device->cleanup_fn = ioat3_cleanup_event;
device->timer_fn = ioat3_timer_event;
} else {
- dma->device_is_tx_complete = ioat_is_dma_complete;
+ dma->device_tx_status = ioat_dma_tx_status;
device->cleanup_fn = ioat2_cleanup_event;
device->timer_fn = ioat2_timer_event;
}
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index 99ec26725ba..fab37d1cf48 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -138,15 +138,10 @@ static int __devinit ioat_pci_probe(struct pci_dev *pdev, const struct pci_devic
if (err)
return err;
- device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL);
- if (!device)
- return -ENOMEM;
-
- pci_set_master(pdev);
-
device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
if (!device)
return -ENOMEM;
+ pci_set_master(pdev);
pci_set_drvdata(pdev, device);
device->version = readb(device->reg_base + IOAT_VER_OFFSET);
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 1ebc801678b..161c452923b 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -894,14 +894,14 @@ static void iop_adma_free_chan_resources(struct dma_chan *chan)
}
/**
- * iop_adma_is_complete - poll the status of an ADMA transaction
+ * iop_adma_status - poll the status of an ADMA transaction
* @chan: ADMA channel handle
* @cookie: ADMA transaction identifier
+ * @txstate: a holder for the current state of the channel or NULL
*/
-static enum dma_status iop_adma_is_complete(struct dma_chan *chan,
+static enum dma_status iop_adma_status(struct dma_chan *chan,
dma_cookie_t cookie,
- dma_cookie_t *done,
- dma_cookie_t *used)
+ struct dma_tx_state *txstate)
{
struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
dma_cookie_t last_used;
@@ -910,12 +910,7 @@ static enum dma_status iop_adma_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = iop_chan->completed_cookie;
-
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
-
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret == DMA_SUCCESS)
return ret;
@@ -924,11 +919,7 @@ static enum dma_status iop_adma_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = iop_chan->completed_cookie;
-
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return dma_async_is_complete(cookie, last_complete, last_used);
}
@@ -1043,7 +1034,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(1);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
+ if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test copy timed out, disabling\n");
@@ -1143,7 +1134,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
+ if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test xor timed out, disabling\n");
@@ -1190,7 +1181,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test zero sum timed out, disabling\n");
err = -ENODEV;
@@ -1214,7 +1205,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test memset timed out, disabling\n");
err = -ENODEV;
@@ -1246,7 +1237,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test non-zero sum timed out, disabling\n");
err = -ENODEV;
@@ -1341,7 +1332,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
+ if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
dev_err(dev, "Self-test pq timed out, disabling\n");
err = -ENODEV;
@@ -1378,7 +1369,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
+ if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
err = -ENODEV;
@@ -1410,7 +1401,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
+ if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
err = -ENODEV;
@@ -1508,7 +1499,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
/* set base routines */
dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources;
dma_dev->device_free_chan_resources = iop_adma_free_chan_resources;
- dma_dev->device_is_tx_complete = iop_adma_is_complete;
+ dma_dev->device_tx_status = iop_adma_status;
dma_dev->device_issue_pending = iop_adma_issue_pending;
dma_dev->dev = &pdev->dev;
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 2a446397c88..cb26ee9773d 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1472,13 +1472,18 @@ static void idmac_issue_pending(struct dma_chan *chan)
*/
}
-static void __idmac_terminate_all(struct dma_chan *chan)
+static int __idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
{
struct idmac_channel *ichan = to_idmac_chan(chan);
struct idmac *idmac = to_idmac(chan->device);
unsigned long flags;
int i;
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
ipu_disable_channel(idmac, ichan,
ichan->status >= IPU_CHANNEL_ENABLED);
@@ -1505,17 +1510,23 @@ static void __idmac_terminate_all(struct dma_chan *chan)
tasklet_enable(&to_ipu(idmac)->tasklet);
ichan->status = IPU_CHANNEL_INITIALIZED;
+
+ return 0;
}
-static void idmac_terminate_all(struct dma_chan *chan)
+static int idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
{
struct idmac_channel *ichan = to_idmac_chan(chan);
+ int ret;
mutex_lock(&ichan->chan_mutex);
- __idmac_terminate_all(chan);
+ ret = __idmac_control(chan, cmd, arg);
mutex_unlock(&ichan->chan_mutex);
+
+ return ret;
}
#ifdef DEBUG
@@ -1607,7 +1618,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
mutex_lock(&ichan->chan_mutex);
- __idmac_terminate_all(chan);
+ __idmac_control(chan, DMA_TERMINATE_ALL, 0);
if (ichan->status > IPU_CHANNEL_FREE) {
#ifdef DEBUG
@@ -1637,15 +1648,12 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
tasklet_schedule(&to_ipu(idmac)->tasklet);
}
-static enum dma_status idmac_is_tx_complete(struct dma_chan *chan,
- dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used)
+static enum dma_status idmac_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct idmac_channel *ichan = to_idmac_chan(chan);
- if (done)
- *done = ichan->completed;
- if (used)
- *used = chan->cookie;
+ dma_set_tx_state(txstate, ichan->completed, chan->cookie, 0);
if (cookie != chan->cookie)
return DMA_ERROR;
return DMA_SUCCESS;
@@ -1664,12 +1672,12 @@ static int __init ipu_idmac_init(struct ipu *ipu)
dma->dev = ipu->dev;
dma->device_alloc_chan_resources = idmac_alloc_chan_resources;
dma->device_free_chan_resources = idmac_free_chan_resources;
- dma->device_is_tx_complete = idmac_is_tx_complete;
+ dma->device_tx_status = idmac_tx_status;
dma->device_issue_pending = idmac_issue_pending;
/* Compulsory for DMA_SLAVE fields */
dma->device_prep_slave_sg = idmac_prep_slave_sg;
- dma->device_terminate_all = idmac_terminate_all;
+ dma->device_control = idmac_control;
INIT_LIST_HEAD(&dma->channels);
for (i = 0; i < IPU_CHANNELS_NUM; i++) {
@@ -1703,7 +1711,7 @@ static void __exit ipu_idmac_exit(struct ipu *ipu)
for (i = 0; i < IPU_CHANNELS_NUM; i++) {
struct idmac_channel *ichan = ipu->channel + i;
- idmac_terminate_all(&ichan->dma_chan);
+ idmac_control(&ichan->dma_chan, DMA_TERMINATE_ALL, 0);
idmac_prep_slave_sg(&ichan->dma_chan, NULL, 0, DMA_NONE, 0);
}
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index bbbd5856662..201e6e19c34 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -541,8 +541,8 @@ static void mpc_dma_issue_pending(struct dma_chan *chan)
/* Check request completion status */
static enum dma_status
-mpc_dma_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
+mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
unsigned long flags;
@@ -554,12 +554,7 @@ mpc_dma_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie,
last_complete = mchan->completed_cookie;
spin_unlock_irqrestore(&mchan->lock, flags);
- if (done)
- *done = last_complete;
-
- if (used)
- *used = last_used;
-
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return dma_async_is_complete(cookie, last_complete, last_used);
}
@@ -663,7 +658,7 @@ static int __devinit mpc_dma_probe(struct of_device *op,
}
regs_start = res.start;
- regs_size = res.end - res.start + 1;
+ regs_size = resource_size(&res);
if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
dev_err(dev, "Error requesting memory region!\n");
@@ -694,7 +689,7 @@ static int __devinit mpc_dma_probe(struct of_device *op,
dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
dma->device_free_chan_resources = mpc_dma_free_chan_resources;
dma->device_issue_pending = mpc_dma_issue_pending;
- dma->device_is_tx_complete = mpc_dma_is_tx_complete;
+ dma->device_tx_status = mpc_dma_tx_status;
dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
INIT_LIST_HEAD(&dma->channels);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index e2fd34da64f..86c5ae9fde3 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -810,14 +810,14 @@ static void mv_xor_free_chan_resources(struct dma_chan *chan)
}
/**
- * mv_xor_is_complete - poll the status of an XOR transaction
+ * mv_xor_status - poll the status of an XOR transaction
* @chan: XOR channel handle
* @cookie: XOR transaction identifier
+ * @txstate: XOR transactions state holder (or NULL)
*/
-static enum dma_status mv_xor_is_complete(struct dma_chan *chan,
+static enum dma_status mv_xor_status(struct dma_chan *chan,
dma_cookie_t cookie,
- dma_cookie_t *done,
- dma_cookie_t *used)
+ struct dma_tx_state *txstate)
{
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
dma_cookie_t last_used;
@@ -827,10 +827,7 @@ static enum dma_status mv_xor_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = mv_chan->completed_cookie;
mv_chan->is_complete_cookie = cookie;
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret == DMA_SUCCESS) {
@@ -842,11 +839,7 @@ static enum dma_status mv_xor_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = mv_chan->completed_cookie;
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
-
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return dma_async_is_complete(cookie, last_complete, last_used);
}
@@ -975,7 +968,7 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
async_tx_ack(tx);
msleep(1);
- if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) !=
+ if (mv_xor_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test copy timed out, disabling\n");
@@ -1073,7 +1066,7 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
async_tx_ack(tx);
msleep(8);
- if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) !=
+ if (mv_xor_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test xor timed out, disabling\n");
@@ -1168,7 +1161,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
/* set base routines */
dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
- dma_dev->device_is_tx_complete = mv_xor_is_complete;
+ dma_dev->device_tx_status = mv_xor_status;
dma_dev->device_issue_pending = mv_xor_issue_pending;
dma_dev->dev = &pdev->dev;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
new file mode 100644
index 00000000000..7c50f6dfd3f
--- /dev/null
+++ b/drivers/dma/pl330.c
@@ -0,0 +1,866 @@
+/* linux/drivers/dma/pl330.c
+ *
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/amba/bus.h>
+#include <linux/amba/pl330.h>
+
+#define NR_DEFAULT_DESC 16
+
+enum desc_status {
+ /* In the DMAC pool */
+ FREE,
+ /*
+ * Allocted to some channel during prep_xxx
+ * Also may be sitting on the work_list.
+ */
+ PREP,
+ /*
+ * Sitting on the work_list and already submitted
+ * to the PL330 core. Not more than two descriptors
+ * of a channel can be BUSY at any time.
+ */
+ BUSY,
+ /*
+ * Sitting on the channel work_list but xfer done
+ * by PL330 core
+ */
+ DONE,
+};
+
+struct dma_pl330_chan {
+ /* Schedule desc completion */
+ struct tasklet_struct task;
+
+ /* DMA-Engine Channel */
+ struct dma_chan chan;
+
+ /* Last completed cookie */
+ dma_cookie_t completed;
+
+ /* List of to be xfered descriptors */
+ struct list_head work_list;
+
+ /* Pointer to the DMAC that manages this channel,
+ * NULL if the channel is available to be acquired.
+ * As the parent, this DMAC also provides descriptors
+ * to the channel.
+ */
+ struct dma_pl330_dmac *dmac;
+
+ /* To protect channel manipulation */
+ spinlock_t lock;
+
+ /* Token of a hardware channel thread of PL330 DMAC
+ * NULL if the channel is available to be acquired.
+ */
+ void *pl330_chid;
+};
+
+struct dma_pl330_dmac {
+ struct pl330_info pif;
+
+ /* DMA-Engine Device */
+ struct dma_device ddma;
+
+ /* Pool of descriptors available for the DMAC's channels */
+ struct list_head desc_pool;
+ /* To protect desc_pool manipulation */
+ spinlock_t pool_lock;
+
+ /* Peripheral channels connected to this DMAC */
+ struct dma_pl330_chan peripherals[0]; /* keep at end */
+};
+
+struct dma_pl330_desc {
+ /* To attach to a queue as child */
+ struct list_head node;
+
+ /* Descriptor for the DMA Engine API */
+ struct dma_async_tx_descriptor txd;
+
+ /* Xfer for PL330 core */
+ struct pl330_xfer px;
+
+ struct pl330_reqcfg rqcfg;
+ struct pl330_req req;
+
+ enum desc_status status;
+
+ /* The channel which currently holds this desc */
+ struct dma_pl330_chan *pchan;
+};
+
+static inline struct dma_pl330_chan *
+to_pchan(struct dma_chan *ch)
+{
+ if (!ch)
+ return NULL;
+
+ return container_of(ch, struct dma_pl330_chan, chan);
+}
+
+static inline struct dma_pl330_desc *
+to_desc(struct dma_async_tx_descriptor *tx)
+{
+ return container_of(tx, struct dma_pl330_desc, txd);
+}
+
+static inline void free_desc_list(struct list_head *list)
+{
+ struct dma_pl330_dmac *pdmac;
+ struct dma_pl330_desc *desc;
+ struct dma_pl330_chan *pch;
+ unsigned long flags;
+
+ if (list_empty(list))
+ return;
+
+ /* Finish off the work list */
+ list_for_each_entry(desc, list, node) {
+ dma_async_tx_callback callback;
+ void *param;
+
+ /* All desc in a list belong to same channel */
+ pch = desc->pchan;
+ callback = desc->txd.callback;
+ param = desc->txd.callback_param;
+
+ if (callback)
+ callback(param);
+
+ desc->pchan = NULL;
+ }
+
+ pdmac = pch->dmac;
+
+ spin_lock_irqsave(&pdmac->pool_lock, flags);
+ list_splice_tail_init(list, &pdmac->desc_pool);
+ spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+}
+
+static inline void fill_queue(struct dma_pl330_chan *pch)
+{
+ struct dma_pl330_desc *desc;
+ int ret;
+
+ list_for_each_entry(desc, &pch->work_list, node) {
+
+ /* If already submitted */
+ if (desc->status == BUSY)
+ break;
+
+ ret = pl330_submit_req(pch->pl330_chid,
+ &desc->req);
+ if (!ret) {
+ desc->status = BUSY;
+ break;
+ } else if (ret == -EAGAIN) {
+ /* QFull or DMAC Dying */
+ break;
+ } else {
+ /* Unacceptable request */
+ desc->status = DONE;
+ dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
+ __func__, __LINE__, desc->txd.cookie);
+ tasklet_schedule(&pch->task);
+ }
+ }
+}
+
+static void pl330_tasklet(unsigned long data)
+{
+ struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
+ struct dma_pl330_desc *desc, *_dt;
+ unsigned long flags;
+ LIST_HEAD(list);
+
+ spin_lock_irqsave(&pch->lock, flags);
+
+ /* Pick up ripe tomatoes */
+ list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
+ if (desc->status == DONE) {
+ pch->completed = desc->txd.cookie;
+ list_move_tail(&desc->node, &list);
+ }
+
+ /* Try to submit a req imm. next to the last completed cookie */
+ fill_queue(pch);
+
+ /* Make sure the PL330 Channel thread is active */
+ pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
+
+ spin_unlock_irqrestore(&pch->lock, flags);
+
+ free_desc_list(&list);
+}
+
+static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
+{
+ struct dma_pl330_desc *desc = token;
+ struct dma_pl330_chan *pch = desc->pchan;
+ unsigned long flags;
+
+ /* If desc aborted */
+ if (!pch)
+ return;
+
+ spin_lock_irqsave(&pch->lock, flags);
+
+ desc->status = DONE;
+
+ spin_unlock_irqrestore(&pch->lock, flags);
+
+ tasklet_schedule(&pch->task);
+}
+
+static int pl330_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct dma_pl330_chan *pch = to_pchan(chan);
+ struct dma_pl330_dmac *pdmac = pch->dmac;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pch->lock, flags);
+
+ pch->completed = chan->cookie = 1;
+
+ pch->pl330_chid = pl330_request_channel(&pdmac->pif);
+ if (!pch->pl330_chid) {
+ spin_unlock_irqrestore(&pch->lock, flags);
+ return 0;
+ }
+
+ tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
+
+ spin_unlock_irqrestore(&pch->lock, flags);
+
+ return 1;
+}
+
+static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
+{
+ struct dma_pl330_chan *pch = to_pchan(chan);
+ struct dma_pl330_desc *desc;
+ unsigned long flags;
+
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
+ spin_lock_irqsave(&pch->lock, flags);
+
+ /* FLUSH the PL330 Channel thread */
+ pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
+
+ /* Mark all desc done */
+ list_for_each_entry(desc, &pch->work_list, node)
+ desc->status = DONE;
+
+ spin_unlock_irqrestore(&pch->lock, flags);
+
+ pl330_tasklet((unsigned long) pch);
+
+ return 0;
+}
+
+static void pl330_free_chan_resources(struct dma_chan *chan)
+{
+ struct dma_pl330_chan *pch = to_pchan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pch->lock, flags);
+
+ tasklet_kill(&pch->task);
+
+ pl330_release_channel(pch->pl330_chid);
+ pch->pl330_chid = NULL;
+
+ spin_unlock_irqrestore(&pch->lock, flags);
+}
+
+static enum dma_status
+pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct dma_pl330_chan *pch = to_pchan(chan);
+ dma_cookie_t last_done, last_used;
+ int ret;
+
+ last_done = pch->completed;
+ last_used = chan->cookie;
+
+ ret = dma_async_is_complete(cookie, last_done, last_used);
+
+ dma_set_tx_state(txstate, last_done, last_used, 0);
+
+ return ret;
+}
+
+static void pl330_issue_pending(struct dma_chan *chan)
+{
+ pl330_tasklet((unsigned long) to_pchan(chan));
+}
+
+/*
+ * We returned the last one of the circular list of descriptor(s)
+ * from prep_xxx, so the argument to submit corresponds to the last
+ * descriptor of the list.
+ */
+static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct dma_pl330_desc *desc, *last = to_desc(tx);
+ struct dma_pl330_chan *pch = to_pchan(tx->chan);
+ dma_cookie_t cookie;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pch->lock, flags);
+
+ /* Assign cookies to all nodes */
+ cookie = tx->chan->cookie;
+
+ while (!list_empty(&last->node)) {
+ desc = list_entry(last->node.next, struct dma_pl330_desc, node);
+
+ if (++cookie < 0)
+ cookie = 1;
+ desc->txd.cookie = cookie;
+
+ list_move_tail(&desc->node, &pch->work_list);
+ }
+
+ if (++cookie < 0)
+ cookie = 1;
+ last->txd.cookie = cookie;
+
+ list_add_tail(&last->node, &pch->work_list);
+
+ tx->chan->cookie = cookie;
+
+ spin_unlock_irqrestore(&pch->lock, flags);
+
+ return cookie;
+}
+
+static inline void _init_desc(struct dma_pl330_desc *desc)
+{
+ desc->pchan = NULL;
+ desc->req.x = &desc->px;
+ desc->req.token = desc;
+ desc->rqcfg.swap = SWAP_NO;
+ desc->rqcfg.privileged = 0;
+ desc->rqcfg.insnaccess = 0;
+ desc->rqcfg.scctl = SCCTRL0;
+ desc->rqcfg.dcctl = DCCTRL0;
+ desc->req.cfg = &desc->rqcfg;
+ desc->req.xfer_cb = dma_pl330_rqcb;
+ desc->txd.tx_submit = pl330_tx_submit;
+
+ INIT_LIST_HEAD(&desc->node);
+}
+
+/* Returns the number of descriptors added to the DMAC pool */
+int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
+{
+ struct dma_pl330_desc *desc;
+ unsigned long flags;
+ int i;
+
+ if (!pdmac)
+ return 0;
+
+ desc = kmalloc(count * sizeof(*desc), flg);
+ if (!desc)
+ return 0;
+
+ spin_lock_irqsave(&pdmac->pool_lock, flags);
+
+ for (i = 0; i < count; i++) {
+ _init_desc(&desc[i]);
+ list_add_tail(&desc[i].node, &pdmac->desc_pool);
+ }
+
+ spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+
+ return count;
+}
+
+static struct dma_pl330_desc *
+pluck_desc(struct dma_pl330_dmac *pdmac)
+{
+ struct dma_pl330_desc *desc = NULL;
+ unsigned long flags;
+
+ if (!pdmac)
+ return NULL;
+
+ spin_lock_irqsave(&pdmac->pool_lock, flags);
+
+ if (!list_empty(&pdmac->desc_pool)) {
+ desc = list_entry(pdmac->desc_pool.next,
+ struct dma_pl330_desc, node);
+
+ list_del_init(&desc->node);
+
+ desc->status = PREP;
+ desc->txd.callback = NULL;
+ }
+
+ spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+
+ return desc;
+}
+
+static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
+{
+ struct dma_pl330_dmac *pdmac = pch->dmac;
+ struct dma_pl330_peri *peri = pch->chan.private;
+ struct dma_pl330_desc *desc;
+
+ /* Pluck one desc from the pool of DMAC */
+ desc = pluck_desc(pdmac);
+
+ /* If the DMAC pool is empty, alloc new */
+ if (!desc) {
+ if (!add_desc(pdmac, GFP_ATOMIC, 1))
+ return NULL;
+
+ /* Try again */
+ desc = pluck_desc(pdmac);
+ if (!desc) {
+ dev_err(pch->dmac->pif.dev,
+ "%s:%d ALERT!\n", __func__, __LINE__);
+ return NULL;
+ }
+ }
+
+ /* Initialize the descriptor */
+ desc->pchan = pch;
+ desc->txd.cookie = 0;
+ async_tx_ack(&desc->txd);
+
+ desc->req.rqtype = peri->rqtype;
+ desc->req.peri = peri->peri_id;
+
+ dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
+
+ return desc;
+}
+
+static inline void fill_px(struct pl330_xfer *px,
+ dma_addr_t dst, dma_addr_t src, size_t len)
+{
+ px->next = NULL;
+ px->bytes = len;
+ px->dst_addr = dst;
+ px->src_addr = src;
+}
+
+static struct dma_pl330_desc *
+__pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
+ dma_addr_t src, size_t len)
+{
+ struct dma_pl330_desc *desc = pl330_get_desc(pch);
+
+ if (!desc) {
+ dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
+ __func__, __LINE__);
+ return NULL;
+ }
+
+ /*
+ * Ideally we should lookout for reqs bigger than
+ * those that can be programmed with 256 bytes of
+ * MC buffer, but considering a req size is seldom
+ * going to be word-unaligned and more than 200MB,
+ * we take it easy.
+ * Also, should the limit is reached we'd rather
+ * have the platform increase MC buffer size than
+ * complicating this API driver.
+ */
+ fill_px(&desc->px, dst, src, len);
+
+ return desc;
+}
+
+/* Call after fixing burst size */
+static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
+{
+ struct dma_pl330_chan *pch = desc->pchan;
+ struct pl330_info *pi = &pch->dmac->pif;
+ int burst_len;
+
+ burst_len = pi->pcfg.data_bus_width / 8;
+ burst_len *= pi->pcfg.data_buf_dep;
+ burst_len >>= desc->rqcfg.brst_size;
+
+ /* src/dst_burst_len can't be more than 16 */
+ if (burst_len > 16)
+ burst_len = 16;
+
+ while (burst_len > 1) {
+ if (!(len % (burst_len << desc->rqcfg.brst_size)))
+ break;
+ burst_len--;
+ }
+
+ return burst_len;
+}
+
+static struct dma_async_tx_descriptor *
+pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct dma_pl330_desc *desc;
+ struct dma_pl330_chan *pch = to_pchan(chan);
+ struct dma_pl330_peri *peri = chan->private;
+ struct pl330_info *pi;
+ int burst;
+
+ if (unlikely(!pch || !len || !peri))
+ return NULL;
+
+ if (peri->rqtype != MEMTOMEM)
+ return NULL;
+
+ pi = &pch->dmac->pif;
+
+ desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
+ if (!desc)
+ return NULL;
+
+ desc->rqcfg.src_inc = 1;
+ desc->rqcfg.dst_inc = 1;
+
+ /* Select max possible burst size */
+ burst = pi->pcfg.data_bus_width / 8;
+
+ while (burst > 1) {
+ if (!(len % burst))
+ break;
+ burst /= 2;
+ }
+
+ desc->rqcfg.brst_size = 0;
+ while (burst != (1 << desc->rqcfg.brst_size))
+ desc->rqcfg.brst_size++;
+
+ desc->rqcfg.brst_len = get_burst_len(desc, len);
+
+ desc->txd.flags = flags;
+
+ return &desc->txd;
+}
+
+static struct dma_async_tx_descriptor *
+pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_data_direction direction,
+ unsigned long flg)
+{
+ struct dma_pl330_desc *first, *desc = NULL;
+ struct dma_pl330_chan *pch = to_pchan(chan);
+ struct dma_pl330_peri *peri = chan->private;
+ struct scatterlist *sg;
+ unsigned long flags;
+ int i, burst_size;
+ dma_addr_t addr;
+
+ if (unlikely(!pch || !sgl || !sg_len))
+ return NULL;
+
+ /* Make sure the direction is consistent */
+ if ((direction == DMA_TO_DEVICE &&
+ peri->rqtype != MEMTODEV) ||
+ (direction == DMA_FROM_DEVICE &&
+ peri->rqtype != DEVTOMEM)) {
+ dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n",
+ __func__, __LINE__);
+ return NULL;
+ }
+
+ addr = peri->fifo_addr;
+ burst_size = peri->burst_sz;
+
+ first = NULL;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+
+ desc = pl330_get_desc(pch);
+ if (!desc) {
+ struct dma_pl330_dmac *pdmac = pch->dmac;
+
+ dev_err(pch->dmac->pif.dev,
+ "%s:%d Unable to fetch desc\n",
+ __func__, __LINE__);
+ if (!first)
+ return NULL;
+
+ spin_lock_irqsave(&pdmac->pool_lock, flags);
+
+ while (!list_empty(&first->node)) {
+ desc = list_entry(first->node.next,
+ struct dma_pl330_desc, node);
+ list_move_tail(&desc->node, &pdmac->desc_pool);
+ }
+
+ list_move_tail(&first->node, &pdmac->desc_pool);
+
+ spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+
+ return NULL;
+ }
+
+ if (!first)
+ first = desc;
+ else
+ list_add_tail(&desc->node, &first->node);
+
+ if (direction == DMA_TO_DEVICE) {
+ desc->rqcfg.src_inc = 1;
+ desc->rqcfg.dst_inc = 0;
+ fill_px(&desc->px,
+ addr, sg_dma_address(sg), sg_dma_len(sg));
+ } else {
+ desc->rqcfg.src_inc = 0;
+ desc->rqcfg.dst_inc = 1;
+ fill_px(&desc->px,
+ sg_dma_address(sg), addr, sg_dma_len(sg));
+ }
+
+ desc->rqcfg.brst_size = burst_size;
+ desc->rqcfg.brst_len = 1;
+ }
+
+ /* Return the last desc in the chain */
+ desc->txd.flags = flg;
+ return &desc->txd;
+}
+
+static irqreturn_t pl330_irq_handler(int irq, void *data)
+{
+ if (pl330_update(data))
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+}
+
+static int __devinit
+pl330_probe(struct amba_device *adev, struct amba_id *id)
+{
+ struct dma_pl330_platdata *pdat;
+ struct dma_pl330_dmac *pdmac;
+ struct dma_pl330_chan *pch;
+ struct pl330_info *pi;
+ struct dma_device *pd;
+ struct resource *res;
+ int i, ret, irq;
+
+ pdat = adev->dev.platform_data;
+
+ if (!pdat || !pdat->nr_valid_peri) {
+ dev_err(&adev->dev, "platform data missing\n");
+ return -ENODEV;
+ }
+
+ /* Allocate a new DMAC and its Channels */
+ pdmac = kzalloc(pdat->nr_valid_peri * sizeof(*pch)
+ + sizeof(*pdmac), GFP_KERNEL);
+ if (!pdmac) {
+ dev_err(&adev->dev, "unable to allocate mem\n");
+ return -ENOMEM;
+ }
+
+ pi = &pdmac->pif;
+ pi->dev = &adev->dev;
+ pi->pl330_data = NULL;
+ pi->mcbufsz = pdat->mcbuf_sz;
+
+ res = &adev->res;
+ request_mem_region(res->start, resource_size(res), "dma-pl330");
+
+ pi->base = ioremap(res->start, resource_size(res));
+ if (!pi->base) {
+ ret = -ENXIO;
+ goto probe_err1;
+ }
+
+ irq = adev->irq[0];
+ ret = request_irq(irq, pl330_irq_handler, 0,
+ dev_name(&adev->dev), pi);
+ if (ret)
+ goto probe_err2;
+
+ ret = pl330_add(pi);
+ if (ret)
+ goto probe_err3;
+
+ INIT_LIST_HEAD(&pdmac->desc_pool);
+ spin_lock_init(&pdmac->pool_lock);
+
+ /* Create a descriptor pool of default size */
+ if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
+ dev_warn(&adev->dev, "unable to allocate desc\n");
+
+ pd = &pdmac->ddma;
+ INIT_LIST_HEAD(&pd->channels);
+
+ /* Initialize channel parameters */
+ for (i = 0; i < pdat->nr_valid_peri; i++) {
+ struct dma_pl330_peri *peri = &pdat->peri[i];
+ pch = &pdmac->peripherals[i];
+
+ switch (peri->rqtype) {
+ case MEMTOMEM:
+ dma_cap_set(DMA_MEMCPY, pd->cap_mask);
+ break;
+ case MEMTODEV:
+ case DEVTOMEM:
+ dma_cap_set(DMA_SLAVE, pd->cap_mask);
+ break;
+ default:
+ dev_err(&adev->dev, "DEVTODEV Not Supported\n");
+ continue;
+ }
+
+ INIT_LIST_HEAD(&pch->work_list);
+ spin_lock_init(&pch->lock);
+ pch->pl330_chid = NULL;
+ pch->chan.private = peri;
+ pch->chan.device = pd;
+ pch->chan.chan_id = i;
+ pch->dmac = pdmac;
+
+ /* Add the channel to the DMAC list */
+ pd->chancnt++;
+ list_add_tail(&pch->chan.device_node, &pd->channels);
+ }
+
+ pd->dev = &adev->dev;
+
+ pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
+ pd->device_free_chan_resources = pl330_free_chan_resources;
+ pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
+ pd->device_tx_status = pl330_tx_status;
+ pd->device_prep_slave_sg = pl330_prep_slave_sg;
+ pd->device_control = pl330_control;
+ pd->device_issue_pending = pl330_issue_pending;
+
+ ret = dma_async_device_register(pd);
+ if (ret) {
+ dev_err(&adev->dev, "unable to register DMAC\n");
+ goto probe_err4;
+ }
+
+ amba_set_drvdata(adev, pdmac);
+
+ dev_info(&adev->dev,
+ "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
+ dev_info(&adev->dev,
+ "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
+ pi->pcfg.data_buf_dep,
+ pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
+ pi->pcfg.num_peri, pi->pcfg.num_events);
+
+ return 0;
+
+probe_err4:
+ pl330_del(pi);
+probe_err3:
+ free_irq(irq, pi);
+probe_err2:
+ iounmap(pi->base);
+probe_err1:
+ release_mem_region(res->start, resource_size(res));
+ kfree(pdmac);
+
+ return ret;
+}
+
+static int __devexit pl330_remove(struct amba_device *adev)
+{
+ struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
+ struct dma_pl330_chan *pch, *_p;
+ struct pl330_info *pi;
+ struct resource *res;
+ int irq;
+
+ if (!pdmac)
+ return 0;
+
+ amba_set_drvdata(adev, NULL);
+
+ /* Idle the DMAC */
+ list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
+ chan.device_node) {
+
+ /* Remove the channel */
+ list_del(&pch->chan.device_node);
+
+ /* Flush the channel */
+ pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
+ pl330_free_chan_resources(&pch->chan);
+ }
+
+ pi = &pdmac->pif;
+
+ pl330_del(pi);
+
+ irq = adev->irq[0];
+ free_irq(irq, pi);
+
+ iounmap(pi->base);
+
+ res = &adev->res;
+ release_mem_region(res->start, resource_size(res));
+
+ kfree(pdmac);
+
+ return 0;
+}
+
+static struct amba_id pl330_ids[] = {
+ {
+ .id = 0x00041330,
+ .mask = 0x000fffff,
+ },
+ { 0, 0 },
+};
+
+static struct amba_driver pl330_driver = {
+ .drv = {
+ .owner = THIS_MODULE,
+ .name = "dma-pl330",
+ },
+ .id_table = pl330_ids,
+ .probe = pl330_probe,
+ .remove = pl330_remove,
+};
+
+static int __init pl330_init(void)
+{
+ return amba_driver_register(&pl330_driver);
+}
+module_init(pl330_init);
+
+static void __exit pl330_exit(void)
+{
+ amba_driver_unregister(&pl330_driver);
+ return;
+}
+module_exit(pl330_exit);
+
+MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
+MODULE_DESCRIPTION("API Driver for PL330 DMAC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index d44626fa35a..fa98abe4686 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -3935,12 +3935,13 @@ static void ppc440spe_adma_free_chan_resources(struct dma_chan *chan)
}
/**
- * ppc440spe_adma_is_complete - poll the status of an ADMA transaction
+ * ppc440spe_adma_tx_status - poll the status of an ADMA transaction
* @chan: ADMA channel handle
* @cookie: ADMA transaction identifier
+ * @txstate: a holder for the current state of the channel
*/
-static enum dma_status ppc440spe_adma_is_complete(struct dma_chan *chan,
- dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used)
+static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct ppc440spe_adma_chan *ppc440spe_chan;
dma_cookie_t last_used;
@@ -3951,10 +3952,7 @@ static enum dma_status ppc440spe_adma_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = ppc440spe_chan->completed_cookie;
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret == DMA_SUCCESS)
@@ -3965,10 +3963,7 @@ static enum dma_status ppc440spe_adma_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = ppc440spe_chan->completed_cookie;
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return dma_async_is_complete(cookie, last_complete, last_used);
}
@@ -4180,7 +4175,7 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
ppc440spe_adma_alloc_chan_resources;
adev->common.device_free_chan_resources =
ppc440spe_adma_free_chan_resources;
- adev->common.device_is_tx_complete = ppc440spe_adma_is_complete;
+ adev->common.device_tx_status = ppc440spe_adma_tx_status;
adev->common.device_issue_pending = ppc440spe_adma_issue_pending;
/* Set prep routines based on capability */
@@ -4949,12 +4944,12 @@ static const struct of_device_id ppc440spe_adma_of_match[] __devinitconst = {
MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match);
static struct of_platform_driver ppc440spe_adma_driver = {
- .match_table = ppc440spe_adma_of_match,
.probe = ppc440spe_adma_probe,
.remove = __devexit_p(ppc440spe_adma_remove),
.driver = {
.name = "PPC440SP(E)-ADMA",
.owner = THIS_MODULE,
+ .of_match_table = ppc440spe_adma_of_match,
},
};
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 6f25a20de99..a2a519fd2a2 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -26,8 +26,7 @@
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-
-#include <asm/dmaengine.h>
+#include <linux/sh_dma.h>
#include "shdma.h"
@@ -45,7 +44,7 @@ enum sh_dmae_desc_status {
#define LOG2_DEFAULT_XFER_SIZE 2
/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
-static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)];
+static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
@@ -190,7 +189,7 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
struct sh_dmae_device, common);
struct sh_dmae_pdata *pdata = shdev->pdata;
- struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
+ const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16);
int shift = chan_pdata->dmars_bit;
@@ -266,8 +265,8 @@ static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
return NULL;
}
-static struct sh_dmae_slave_config *sh_dmae_find_slave(
- struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id)
+static const struct sh_dmae_slave_config *sh_dmae_find_slave(
+ struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
{
struct dma_device *dma_dev = sh_chan->common.device;
struct sh_dmae_device *shdev = container_of(dma_dev,
@@ -275,11 +274,11 @@ static struct sh_dmae_slave_config *sh_dmae_find_slave(
struct sh_dmae_pdata *pdata = shdev->pdata;
int i;
- if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER)
+ if (param->slave_id >= SH_DMA_SLAVE_NUMBER)
return NULL;
for (i = 0; i < pdata->slave_num; i++)
- if (pdata->slave[i].slave_id == slave_id)
+ if (pdata->slave[i].slave_id == param->slave_id)
return pdata->slave + i;
return NULL;
@@ -299,9 +298,9 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
* never runs concurrently with itself or free_chan_resources.
*/
if (param) {
- struct sh_dmae_slave_config *cfg;
+ const struct sh_dmae_slave_config *cfg;
- cfg = sh_dmae_find_slave(sh_chan, param->slave_id);
+ cfg = sh_dmae_find_slave(sh_chan, param);
if (!cfg) {
ret = -EINVAL;
goto efindslave;
@@ -574,12 +573,14 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
{
struct sh_dmae_slave *param;
struct sh_dmae_chan *sh_chan;
+ dma_addr_t slave_addr;
if (!chan)
return NULL;
sh_chan = to_sh_chan(chan);
param = chan->private;
+ slave_addr = param->config->addr;
/* Someone calling slave DMA on a public channel? */
if (!param || !sg_len) {
@@ -592,16 +593,21 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
* if (param != NULL), this is a successfully requested slave channel,
* therefore param->config != NULL too.
*/
- return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &param->config->addr,
+ return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr,
direction, flags);
}
-static void sh_dmae_terminate_all(struct dma_chan *chan)
+static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
{
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
if (!chan)
- return;
+ return -EINVAL;
dmae_halt(sh_chan);
@@ -617,6 +623,8 @@ static void sh_dmae_terminate_all(struct dma_chan *chan)
spin_unlock_bh(&sh_chan->desc_lock);
sh_dmae_chan_ld_cleanup(sh_chan, true);
+
+ return 0;
}
static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
@@ -714,6 +722,10 @@ static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
{
while (__ld_cleanup(sh_chan, all))
;
+
+ if (all)
+ /* Terminating - forgive uncompleted cookies */
+ sh_chan->completed_cookie = sh_chan->common.cookie;
}
static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
@@ -748,10 +760,9 @@ static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
sh_chan_xfer_ld_queue(sh_chan);
}
-static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
+static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
dma_cookie_t cookie,
- dma_cookie_t *done,
- dma_cookie_t *used)
+ struct dma_tx_state *txstate)
{
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
dma_cookie_t last_used;
@@ -763,12 +774,7 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = sh_chan->completed_cookie;
BUG_ON(last_complete < 0);
-
- if (done)
- *done = last_complete;
-
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
spin_lock_bh(&sh_chan->desc_lock);
@@ -873,7 +879,7 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
int irq, unsigned long flags)
{
int err;
- struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
+ const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
struct platform_device *pdev = to_platform_device(shdev->common.dev);
struct sh_dmae_chan *new_sh_chan;
@@ -1040,12 +1046,12 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
= sh_dmae_alloc_chan_resources;
shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
- shdev->common.device_is_tx_complete = sh_dmae_is_complete;
+ shdev->common.device_tx_status = sh_dmae_tx_status;
shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
/* Compulsory for DMA_SLAVE fields */
shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
- shdev->common.device_terminate_all = sh_dmae_terminate_all;
+ shdev->common.device_control = sh_dmae_control;
shdev->common.dev = &pdev->dev;
/* Default transfer size of 32 bytes requires 32-byte alignment */
@@ -1186,6 +1192,7 @@ static struct platform_driver sh_dmae_driver = {
.remove = __exit_p(sh_dmae_remove),
.shutdown = sh_dmae_shutdown,
.driver = {
+ .owner = THIS_MODULE,
.name = "sh-dma-engine",
},
};
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 153609a1e96..4021275a0a4 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -17,8 +17,8 @@
#include <linux/interrupt.h>
#include <linux/list.h>
-#include <asm/dmaengine.h>
-
+#define SH_DMAC_MAX_CHANNELS 6
+#define SH_DMA_SLAVE_NUMBER 256
#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */
struct device;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
new file mode 100644
index 00000000000..c426829f6ab
--- /dev/null
+++ b/drivers/dma/ste_dma40.c
@@ -0,0 +1,2657 @@
+/*
+ * driver/dma/ste_dma40.c
+ *
+ * Copyright (C) ST-Ericsson 2007-2010
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Per Friden <per.friden@stericsson.com>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+
+#include <plat/ste_dma40.h>
+
+#include "ste_dma40_ll.h"
+
+#define D40_NAME "dma40"
+
+#define D40_PHY_CHAN -1
+
+/* For masking out/in 2 bit channel positions */
+#define D40_CHAN_POS(chan) (2 * (chan / 2))
+#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
+
+/* Maximum iterations taken before giving up suspending a channel */
+#define D40_SUSPEND_MAX_IT 500
+
+#define D40_ALLOC_FREE (1 << 31)
+#define D40_ALLOC_PHY (1 << 30)
+#define D40_ALLOC_LOG_FREE 0
+
+/* The number of free d40_desc to keep in memory before starting
+ * to kfree() them */
+#define D40_DESC_CACHE_SIZE 50
+
+/* Hardware designer of the block */
+#define D40_PERIPHID2_DESIGNER 0x8
+
+/**
+ * enum 40_command - The different commands and/or statuses.
+ *
+ * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
+ * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
+ * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
+ * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
+ */
+enum d40_command {
+ D40_DMA_STOP = 0,
+ D40_DMA_RUN = 1,
+ D40_DMA_SUSPEND_REQ = 2,
+ D40_DMA_SUSPENDED = 3
+};
+
+/**
+ * struct d40_lli_pool - Structure for keeping LLIs in memory
+ *
+ * @base: Pointer to memory area when the pre_alloc_lli's are not large
+ * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
+ * pre_alloc_lli is used.
+ * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
+ * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
+ * one buffer to one buffer.
+ */
+struct d40_lli_pool {
+ void *base;
+ int size;
+ /* Space for dst and src, plus an extra for padding */
+ u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
+};
+
+/**
+ * struct d40_desc - A descriptor is one DMA job.
+ *
+ * @lli_phy: LLI settings for physical channel. Both src and dst=
+ * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
+ * lli_len equals one.
+ * @lli_log: Same as above but for logical channels.
+ * @lli_pool: The pool with two entries pre-allocated.
+ * @lli_len: Number of LLI's in lli_pool
+ * @lli_tcount: Number of LLIs processed in the transfer. When equals lli_len
+ * then this transfer job is done.
+ * @txd: DMA engine struct. Used for among other things for communication
+ * during a transfer.
+ * @node: List entry.
+ * @dir: The transfer direction of this job.
+ * @is_in_client_list: true if the client owns this descriptor.
+ *
+ * This descriptor is used for both logical and physical transfers.
+ */
+
+struct d40_desc {
+ /* LLI physical */
+ struct d40_phy_lli_bidir lli_phy;
+ /* LLI logical */
+ struct d40_log_lli_bidir lli_log;
+
+ struct d40_lli_pool lli_pool;
+ u32 lli_len;
+ u32 lli_tcount;
+
+ struct dma_async_tx_descriptor txd;
+ struct list_head node;
+
+ enum dma_data_direction dir;
+ bool is_in_client_list;
+};
+
+/**
+ * struct d40_lcla_pool - LCLA pool settings and data.
+ *
+ * @base: The virtual address of LCLA.
+ * @phy: Physical base address of LCLA.
+ * @base_size: size of lcla.
+ * @lock: Lock to protect the content in this struct.
+ * @alloc_map: Mapping between physical channel and LCLA entries.
+ * @num_blocks: The number of entries of alloc_map. Equals to the
+ * number of physical channels.
+ */
+struct d40_lcla_pool {
+ void *base;
+ dma_addr_t phy;
+ resource_size_t base_size;
+ spinlock_t lock;
+ u32 *alloc_map;
+ int num_blocks;
+};
+
+/**
+ * struct d40_phy_res - struct for handling eventlines mapped to physical
+ * channels.
+ *
+ * @lock: A lock protection this entity.
+ * @num: The physical channel number of this entity.
+ * @allocated_src: Bit mapped to show which src event line's are mapped to
+ * this physical channel. Can also be free or physically allocated.
+ * @allocated_dst: Same as for src but is dst.
+ * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
+ * event line number. Both allocated_src and allocated_dst can not be
+ * allocated to a physical channel, since the interrupt handler has then
+ * no way of figure out which one the interrupt belongs to.
+ */
+struct d40_phy_res {
+ spinlock_t lock;
+ int num;
+ u32 allocated_src;
+ u32 allocated_dst;
+};
+
+struct d40_base;
+
+/**
+ * struct d40_chan - Struct that describes a channel.
+ *
+ * @lock: A spinlock to protect this struct.
+ * @log_num: The logical number, if any of this channel.
+ * @completed: Starts with 1, after first interrupt it is set to dma engine's
+ * current cookie.
+ * @pending_tx: The number of pending transfers. Used between interrupt handler
+ * and tasklet.
+ * @busy: Set to true when transfer is ongoing on this channel.
+ * @phy_chan: Pointer to physical channel which this instance runs on.
+ * @chan: DMA engine handle.
+ * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
+ * transfer and call client callback.
+ * @client: Cliented owned descriptor list.
+ * @active: Active descriptor.
+ * @queue: Queued jobs.
+ * @free: List of free descripts, ready to be reused.
+ * @free_len: Number of descriptors in the free list.
+ * @dma_cfg: The client configuration of this dma channel.
+ * @base: Pointer to the device instance struct.
+ * @src_def_cfg: Default cfg register setting for src.
+ * @dst_def_cfg: Default cfg register setting for dst.
+ * @log_def: Default logical channel settings.
+ * @lcla: Space for one dst src pair for logical channel transfers.
+ * @lcpa: Pointer to dst and src lcpa settings.
+ *
+ * This struct can either "be" a logical or a physical channel.
+ */
+struct d40_chan {
+ spinlock_t lock;
+ int log_num;
+ /* ID of the most recent completed transfer */
+ int completed;
+ int pending_tx;
+ bool busy;
+ struct d40_phy_res *phy_chan;
+ struct dma_chan chan;
+ struct tasklet_struct tasklet;
+ struct list_head client;
+ struct list_head active;
+ struct list_head queue;
+ struct list_head free;
+ int free_len;
+ struct stedma40_chan_cfg dma_cfg;
+ struct d40_base *base;
+ /* Default register configurations */
+ u32 src_def_cfg;
+ u32 dst_def_cfg;
+ struct d40_def_lcsp log_def;
+ struct d40_lcla_elem lcla;
+ struct d40_log_lli_full *lcpa;
+};
+
+/**
+ * struct d40_base - The big global struct, one for each probe'd instance.
+ *
+ * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
+ * @execmd_lock: Lock for execute command usage since several channels share
+ * the same physical register.
+ * @dev: The device structure.
+ * @virtbase: The virtual base address of the DMA's register.
+ * @clk: Pointer to the DMA clock structure.
+ * @phy_start: Physical memory start of the DMA registers.
+ * @phy_size: Size of the DMA register map.
+ * @irq: The IRQ number.
+ * @num_phy_chans: The number of physical channels. Read from HW. This
+ * is the number of available channels for this driver, not counting "Secure
+ * mode" allocated physical channels.
+ * @num_log_chans: The number of logical channels. Calculated from
+ * num_phy_chans.
+ * @dma_both: dma_device channels that can do both memcpy and slave transfers.
+ * @dma_slave: dma_device channels that can do only do slave transfers.
+ * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
+ * @phy_chans: Room for all possible physical channels in system.
+ * @log_chans: Room for all possible logical channels in system.
+ * @lookup_log_chans: Used to map interrupt number to logical channel. Points
+ * to log_chans entries.
+ * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
+ * to phy_chans entries.
+ * @plat_data: Pointer to provided platform_data which is the driver
+ * configuration.
+ * @phy_res: Vector containing all physical channels.
+ * @lcla_pool: lcla pool settings and data.
+ * @lcpa_base: The virtual mapped address of LCPA.
+ * @phy_lcpa: The physical address of the LCPA.
+ * @lcpa_size: The size of the LCPA area.
+ */
+struct d40_base {
+ spinlock_t interrupt_lock;
+ spinlock_t execmd_lock;
+ struct device *dev;
+ void __iomem *virtbase;
+ struct clk *clk;
+ phys_addr_t phy_start;
+ resource_size_t phy_size;
+ int irq;
+ int num_phy_chans;
+ int num_log_chans;
+ struct dma_device dma_both;
+ struct dma_device dma_slave;
+ struct dma_device dma_memcpy;
+ struct d40_chan *phy_chans;
+ struct d40_chan *log_chans;
+ struct d40_chan **lookup_log_chans;
+ struct d40_chan **lookup_phy_chans;
+ struct stedma40_platform_data *plat_data;
+ /* Physical half channels */
+ struct d40_phy_res *phy_res;
+ struct d40_lcla_pool lcla_pool;
+ void *lcpa_base;
+ dma_addr_t phy_lcpa;
+ resource_size_t lcpa_size;
+};
+
+/**
+ * struct d40_interrupt_lookup - lookup table for interrupt handler
+ *
+ * @src: Interrupt mask register.
+ * @clr: Interrupt clear register.
+ * @is_error: true if this is an error interrupt.
+ * @offset: start delta in the lookup_log_chans in d40_base. If equals to
+ * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
+ */
+struct d40_interrupt_lookup {
+ u32 src;
+ u32 clr;
+ bool is_error;
+ int offset;
+};
+
+/**
+ * struct d40_reg_val - simple lookup struct
+ *
+ * @reg: The register.
+ * @val: The value that belongs to the register in reg.
+ */
+struct d40_reg_val {
+ unsigned int reg;
+ unsigned int val;
+};
+
+static int d40_pool_lli_alloc(struct d40_desc *d40d,
+ int lli_len, bool is_log)
+{
+ u32 align;
+ void *base;
+
+ if (is_log)
+ align = sizeof(struct d40_log_lli);
+ else
+ align = sizeof(struct d40_phy_lli);
+
+ if (lli_len == 1) {
+ base = d40d->lli_pool.pre_alloc_lli;
+ d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
+ d40d->lli_pool.base = NULL;
+ } else {
+ d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
+
+ base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
+ d40d->lli_pool.base = base;
+
+ if (d40d->lli_pool.base == NULL)
+ return -ENOMEM;
+ }
+
+ if (is_log) {
+ d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
+ align);
+ d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
+ align);
+ } else {
+ d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
+ align);
+ d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
+ align);
+
+ d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src);
+ d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst);
+ }
+
+ return 0;
+}
+
+static void d40_pool_lli_free(struct d40_desc *d40d)
+{
+ kfree(d40d->lli_pool.base);
+ d40d->lli_pool.base = NULL;
+ d40d->lli_pool.size = 0;
+ d40d->lli_log.src = NULL;
+ d40d->lli_log.dst = NULL;
+ d40d->lli_phy.src = NULL;
+ d40d->lli_phy.dst = NULL;
+ d40d->lli_phy.src_addr = 0;
+ d40d->lli_phy.dst_addr = 0;
+}
+
+static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
+ struct d40_desc *desc)
+{
+ dma_cookie_t cookie = d40c->chan.cookie;
+
+ if (++cookie < 0)
+ cookie = 1;
+
+ d40c->chan.cookie = cookie;
+ desc->txd.cookie = cookie;
+
+ return cookie;
+}
+
+static void d40_desc_reset(struct d40_desc *d40d)
+{
+ d40d->lli_tcount = 0;
+}
+
+static void d40_desc_remove(struct d40_desc *d40d)
+{
+ list_del(&d40d->node);
+}
+
+static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
+{
+ struct d40_desc *desc;
+ struct d40_desc *d;
+ struct d40_desc *_d;
+
+ if (!list_empty(&d40c->client)) {
+ list_for_each_entry_safe(d, _d, &d40c->client, node)
+ if (async_tx_test_ack(&d->txd)) {
+ d40_pool_lli_free(d);
+ d40_desc_remove(d);
+ desc = d;
+ goto out;
+ }
+ }
+
+ if (list_empty(&d40c->free)) {
+ /* Alloc new desc because we're out of used ones */
+ desc = kzalloc(sizeof(struct d40_desc), GFP_NOWAIT);
+ if (desc == NULL)
+ goto out;
+ INIT_LIST_HEAD(&desc->node);
+ } else {
+ /* Reuse an old desc. */
+ desc = list_first_entry(&d40c->free,
+ struct d40_desc,
+ node);
+ list_del(&desc->node);
+ d40c->free_len--;
+ }
+out:
+ return desc;
+}
+
+static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
+{
+ if (d40c->free_len < D40_DESC_CACHE_SIZE) {
+ list_add_tail(&d40d->node, &d40c->free);
+ d40c->free_len++;
+ } else
+ kfree(d40d);
+}
+
+static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
+{
+ list_add_tail(&desc->node, &d40c->active);
+}
+
+static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
+{
+ struct d40_desc *d;
+
+ if (list_empty(&d40c->active))
+ return NULL;
+
+ d = list_first_entry(&d40c->active,
+ struct d40_desc,
+ node);
+ return d;
+}
+
+static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
+{
+ list_add_tail(&desc->node, &d40c->queue);
+}
+
+static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
+{
+ struct d40_desc *d;
+
+ if (list_empty(&d40c->queue))
+ return NULL;
+
+ d = list_first_entry(&d40c->queue,
+ struct d40_desc,
+ node);
+ return d;
+}
+
+/* Support functions for logical channels */
+
+static int d40_lcla_id_get(struct d40_chan *d40c,
+ struct d40_lcla_pool *pool)
+{
+ int src_id = 0;
+ int dst_id = 0;
+ struct d40_log_lli *lcla_lidx_base =
+ pool->base + d40c->phy_chan->num * 1024;
+ int i;
+ int lli_per_log = d40c->base->plat_data->llis_per_log;
+
+ if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
+ return 0;
+
+ if (pool->num_blocks > 32)
+ return -EINVAL;
+
+ spin_lock(&pool->lock);
+
+ for (i = 0; i < pool->num_blocks; i++) {
+ if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
+ pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
+ break;
+ }
+ }
+ src_id = i;
+ if (src_id >= pool->num_blocks)
+ goto err;
+
+ for (; i < pool->num_blocks; i++) {
+ if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
+ pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
+ break;
+ }
+ }
+
+ dst_id = i;
+ if (dst_id == src_id)
+ goto err;
+
+ d40c->lcla.src_id = src_id;
+ d40c->lcla.dst_id = dst_id;
+ d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
+ d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
+
+
+ spin_unlock(&pool->lock);
+ return 0;
+err:
+ spin_unlock(&pool->lock);
+ return -EINVAL;
+}
+
+static void d40_lcla_id_put(struct d40_chan *d40c,
+ struct d40_lcla_pool *pool,
+ int id)
+{
+ if (id < 0)
+ return;
+
+ d40c->lcla.src_id = -1;
+ d40c->lcla.dst_id = -1;
+
+ spin_lock(&pool->lock);
+ pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id));
+ spin_unlock(&pool->lock);
+}
+
+static int d40_channel_execute_command(struct d40_chan *d40c,
+ enum d40_command command)
+{
+ int status, i;
+ void __iomem *active_reg;
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->base->execmd_lock, flags);
+
+ if (d40c->phy_chan->num % 2 == 0)
+ active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
+ else
+ active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
+
+ if (command == D40_DMA_SUSPEND_REQ) {
+ status = (readl(active_reg) &
+ D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
+ D40_CHAN_POS(d40c->phy_chan->num);
+
+ if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
+ goto done;
+ }
+
+ writel(command << D40_CHAN_POS(d40c->phy_chan->num), active_reg);
+
+ if (command == D40_DMA_SUSPEND_REQ) {
+
+ for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
+ status = (readl(active_reg) &
+ D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
+ D40_CHAN_POS(d40c->phy_chan->num);
+
+ cpu_relax();
+ /*
+ * Reduce the number of bus accesses while
+ * waiting for the DMA to suspend.
+ */
+ udelay(3);
+
+ if (status == D40_DMA_STOP ||
+ status == D40_DMA_SUSPENDED)
+ break;
+ }
+
+ if (i == D40_SUSPEND_MAX_IT) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
+ __func__, d40c->phy_chan->num, d40c->log_num,
+ status);
+ dump_stack();
+ ret = -EBUSY;
+ }
+
+ }
+done:
+ spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
+ return ret;
+}
+
+static void d40_term_all(struct d40_chan *d40c)
+{
+ struct d40_desc *d40d;
+ struct d40_desc *d;
+ struct d40_desc *_d;
+
+ /* Release active descriptors */
+ while ((d40d = d40_first_active_get(d40c))) {
+ d40_desc_remove(d40d);
+
+ /* Return desc to free-list */
+ d40_desc_free(d40c, d40d);
+ }
+
+ /* Release queued descriptors waiting for transfer */
+ while ((d40d = d40_first_queued(d40c))) {
+ d40_desc_remove(d40d);
+
+ /* Return desc to free-list */
+ d40_desc_free(d40c, d40d);
+ }
+
+ /* Release client owned descriptors */
+ if (!list_empty(&d40c->client))
+ list_for_each_entry_safe(d, _d, &d40c->client, node) {
+ d40_pool_lli_free(d);
+ d40_desc_remove(d);
+ /* Return desc to free-list */
+ d40_desc_free(d40c, d40d);
+ }
+
+ d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
+ d40c->lcla.src_id);
+ d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
+ d40c->lcla.dst_id);
+
+ d40c->pending_tx = 0;
+ d40c->busy = false;
+}
+
+static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
+{
+ u32 val;
+ unsigned long flags;
+
+ if (do_enable)
+ val = D40_ACTIVATE_EVENTLINE;
+ else
+ val = D40_DEACTIVATE_EVENTLINE;
+
+ spin_lock_irqsave(&d40c->phy_chan->lock, flags);
+
+ /* Enable event line connected to device (or memcpy) */
+ if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
+ (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
+ u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
+
+ writel((val << D40_EVENTLINE_POS(event)) |
+ ~D40_EVENTLINE_MASK(event),
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SSLNK);
+ }
+ if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
+ u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
+
+ writel((val << D40_EVENTLINE_POS(event)) |
+ ~D40_EVENTLINE_MASK(event),
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDLNK);
+ }
+
+ spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
+}
+
+static u32 d40_chan_has_events(struct d40_chan *d40c)
+{
+ u32 val = 0;
+
+ /* If SSLNK or SDLNK is zero all events are disabled */
+ if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
+ (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
+ val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SSLNK);
+
+ if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
+ val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDLNK);
+ return val;
+}
+
+static void d40_config_enable_lidx(struct d40_chan *d40c)
+{
+ /* Set LIDX for lcla */
+ writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
+ D40_SREG_ELEM_LOG_LIDX_MASK,
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
+
+ writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
+ D40_SREG_ELEM_LOG_LIDX_MASK,
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
+}
+
+static int d40_config_write(struct d40_chan *d40c)
+{
+ u32 addr_base;
+ u32 var;
+ int res;
+
+ res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+ if (res)
+ return res;
+
+ /* Odd addresses are even addresses + 4 */
+ addr_base = (d40c->phy_chan->num % 2) * 4;
+ /* Setup channel mode to logical or physical */
+ var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
+ D40_CHAN_POS(d40c->phy_chan->num);
+ writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
+
+ /* Setup operational mode option register */
+ var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
+ 0x3) << D40_CHAN_POS(d40c->phy_chan->num);
+
+ writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+ /* Set default config for CFG reg */
+ writel(d40c->src_def_cfg,
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SSCFG);
+ writel(d40c->dst_def_cfg,
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDCFG);
+
+ d40_config_enable_lidx(d40c);
+ }
+ return res;
+}
+
+static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
+{
+
+ if (d40d->lli_phy.dst && d40d->lli_phy.src) {
+ d40_phy_lli_write(d40c->base->virtbase,
+ d40c->phy_chan->num,
+ d40d->lli_phy.dst,
+ d40d->lli_phy.src);
+ d40d->lli_tcount = d40d->lli_len;
+ } else if (d40d->lli_log.dst && d40d->lli_log.src) {
+ u32 lli_len;
+ struct d40_log_lli *src = d40d->lli_log.src;
+ struct d40_log_lli *dst = d40d->lli_log.dst;
+
+ src += d40d->lli_tcount;
+ dst += d40d->lli_tcount;
+
+ if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
+ lli_len = d40d->lli_len;
+ else
+ lli_len = d40c->base->plat_data->llis_per_log;
+ d40d->lli_tcount += lli_len;
+ d40_log_lli_write(d40c->lcpa, d40c->lcla.src,
+ d40c->lcla.dst,
+ dst, src,
+ d40c->base->plat_data->llis_per_log);
+ }
+}
+
+static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct d40_chan *d40c = container_of(tx->chan,
+ struct d40_chan,
+ chan);
+ struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ tx->cookie = d40_assign_cookie(d40c, d40d);
+
+ d40_desc_queue(d40c, d40d);
+
+ spin_unlock_irqrestore(&d40c->lock, flags);
+
+ return tx->cookie;
+}
+
+static int d40_start(struct d40_chan *d40c)
+{
+ int err;
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+ err = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+ if (err)
+ return err;
+ d40_config_set_event(d40c, true);
+ }
+
+ err = d40_channel_execute_command(d40c, D40_DMA_RUN);
+
+ return err;
+}
+
+static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
+{
+ struct d40_desc *d40d;
+ int err;
+
+ /* Start queued jobs, if any */
+ d40d = d40_first_queued(d40c);
+
+ if (d40d != NULL) {
+ d40c->busy = true;
+
+ /* Remove from queue */
+ d40_desc_remove(d40d);
+
+ /* Add to active queue */
+ d40_desc_submit(d40c, d40d);
+
+ /* Initiate DMA job */
+ d40_desc_load(d40c, d40d);
+
+ /* Start dma job */
+ err = d40_start(d40c);
+
+ if (err)
+ return NULL;
+ }
+
+ return d40d;
+}
+
+/* called from interrupt context */
+static void dma_tc_handle(struct d40_chan *d40c)
+{
+ struct d40_desc *d40d;
+
+ if (!d40c->phy_chan)
+ return;
+
+ /* Get first active entry from list */
+ d40d = d40_first_active_get(d40c);
+
+ if (d40d == NULL)
+ return;
+
+ if (d40d->lli_tcount < d40d->lli_len) {
+
+ d40_desc_load(d40c, d40d);
+ /* Start dma job */
+ (void) d40_start(d40c);
+ return;
+ }
+
+ if (d40_queue_start(d40c) == NULL)
+ d40c->busy = false;
+
+ d40c->pending_tx++;
+ tasklet_schedule(&d40c->tasklet);
+
+}
+
+static void dma_tasklet(unsigned long data)
+{
+ struct d40_chan *d40c = (struct d40_chan *) data;
+ struct d40_desc *d40d_fin;
+ unsigned long flags;
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ /* Get first active entry from list */
+ d40d_fin = d40_first_active_get(d40c);
+
+ if (d40d_fin == NULL)
+ goto err;
+
+ d40c->completed = d40d_fin->txd.cookie;
+
+ /*
+ * If terminating a channel pending_tx is set to zero.
+ * This prevents any finished active jobs to return to the client.
+ */
+ if (d40c->pending_tx == 0) {
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return;
+ }
+
+ /* Callback to client */
+ callback = d40d_fin->txd.callback;
+ callback_param = d40d_fin->txd.callback_param;
+
+ if (async_tx_test_ack(&d40d_fin->txd)) {
+ d40_pool_lli_free(d40d_fin);
+ d40_desc_remove(d40d_fin);
+ /* Return desc to free-list */
+ d40_desc_free(d40c, d40d_fin);
+ } else {
+ d40_desc_reset(d40d_fin);
+ if (!d40d_fin->is_in_client_list) {
+ d40_desc_remove(d40d_fin);
+ list_add_tail(&d40d_fin->node, &d40c->client);
+ d40d_fin->is_in_client_list = true;
+ }
+ }
+
+ d40c->pending_tx--;
+
+ if (d40c->pending_tx)
+ tasklet_schedule(&d40c->tasklet);
+
+ spin_unlock_irqrestore(&d40c->lock, flags);
+
+ if (callback)
+ callback(callback_param);
+
+ return;
+
+ err:
+ /* Rescue manouver if receiving double interrupts */
+ if (d40c->pending_tx > 0)
+ d40c->pending_tx--;
+ spin_unlock_irqrestore(&d40c->lock, flags);
+}
+
+static irqreturn_t d40_handle_interrupt(int irq, void *data)
+{
+ static const struct d40_interrupt_lookup il[] = {
+ {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
+ {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
+ {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
+ {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
+ {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
+ {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
+ {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
+ {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
+ {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
+ {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
+ };
+
+ int i;
+ u32 regs[ARRAY_SIZE(il)];
+ u32 tmp;
+ u32 idx;
+ u32 row;
+ long chan = -1;
+ struct d40_chan *d40c;
+ unsigned long flags;
+ struct d40_base *base = data;
+
+ spin_lock_irqsave(&base->interrupt_lock, flags);
+
+ /* Read interrupt status of both logical and physical channels */
+ for (i = 0; i < ARRAY_SIZE(il); i++)
+ regs[i] = readl(base->virtbase + il[i].src);
+
+ for (;;) {
+
+ chan = find_next_bit((unsigned long *)regs,
+ BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
+
+ /* No more set bits found? */
+ if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
+ break;
+
+ row = chan / BITS_PER_LONG;
+ idx = chan & (BITS_PER_LONG - 1);
+
+ /* ACK interrupt */
+ tmp = readl(base->virtbase + il[row].clr);
+ tmp |= 1 << idx;
+ writel(tmp, base->virtbase + il[row].clr);
+
+ if (il[row].offset == D40_PHY_CHAN)
+ d40c = base->lookup_phy_chans[idx];
+ else
+ d40c = base->lookup_log_chans[il[row].offset + idx];
+ spin_lock(&d40c->lock);
+
+ if (!il[row].is_error)
+ dma_tc_handle(d40c);
+ else
+ dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n",
+ __func__, chan, il[row].offset, idx);
+
+ spin_unlock(&d40c->lock);
+ }
+
+ spin_unlock_irqrestore(&base->interrupt_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+
+static int d40_validate_conf(struct d40_chan *d40c,
+ struct stedma40_chan_cfg *conf)
+{
+ int res = 0;
+ u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
+ u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
+ bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
+ == STEDMA40_CHANNEL_IN_LOG_MODE;
+
+ if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH &&
+ dst_event_group == STEDMA40_DEV_DST_MEMORY) {
+ dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
+ __func__);
+ res = -EINVAL;
+ }
+
+ if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM &&
+ src_event_group == STEDMA40_DEV_SRC_MEMORY) {
+ dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
+ __func__);
+ res = -EINVAL;
+ }
+
+ if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
+ dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] No event line\n", __func__);
+ res = -EINVAL;
+ }
+
+ if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
+ (src_event_group != dst_event_group)) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Invalid event group\n", __func__);
+ res = -EINVAL;
+ }
+
+ if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
+ /*
+ * DMAC HW supports it. Will be added to this driver,
+ * in case any dma client requires it.
+ */
+ dev_err(&d40c->chan.dev->device,
+ "[%s] periph to periph not supported\n",
+ __func__);
+ res = -EINVAL;
+ }
+
+ return res;
+}
+
+static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
+ int log_event_line, bool is_log)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&phy->lock, flags);
+ if (!is_log) {
+ /* Physical interrupts are masked per physical full channel */
+ if (phy->allocated_src == D40_ALLOC_FREE &&
+ phy->allocated_dst == D40_ALLOC_FREE) {
+ phy->allocated_dst = D40_ALLOC_PHY;
+ phy->allocated_src = D40_ALLOC_PHY;
+ goto found;
+ } else
+ goto not_found;
+ }
+
+ /* Logical channel */
+ if (is_src) {
+ if (phy->allocated_src == D40_ALLOC_PHY)
+ goto not_found;
+
+ if (phy->allocated_src == D40_ALLOC_FREE)
+ phy->allocated_src = D40_ALLOC_LOG_FREE;
+
+ if (!(phy->allocated_src & (1 << log_event_line))) {
+ phy->allocated_src |= 1 << log_event_line;
+ goto found;
+ } else
+ goto not_found;
+ } else {
+ if (phy->allocated_dst == D40_ALLOC_PHY)
+ goto not_found;
+
+ if (phy->allocated_dst == D40_ALLOC_FREE)
+ phy->allocated_dst = D40_ALLOC_LOG_FREE;
+
+ if (!(phy->allocated_dst & (1 << log_event_line))) {
+ phy->allocated_dst |= 1 << log_event_line;
+ goto found;
+ } else
+ goto not_found;
+ }
+
+not_found:
+ spin_unlock_irqrestore(&phy->lock, flags);
+ return false;
+found:
+ spin_unlock_irqrestore(&phy->lock, flags);
+ return true;
+}
+
+static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
+ int log_event_line)
+{
+ unsigned long flags;
+ bool is_free = false;
+
+ spin_lock_irqsave(&phy->lock, flags);
+ if (!log_event_line) {
+ /* Physical interrupts are masked per physical full channel */
+ phy->allocated_dst = D40_ALLOC_FREE;
+ phy->allocated_src = D40_ALLOC_FREE;
+ is_free = true;
+ goto out;
+ }
+
+ /* Logical channel */
+ if (is_src) {
+ phy->allocated_src &= ~(1 << log_event_line);
+ if (phy->allocated_src == D40_ALLOC_LOG_FREE)
+ phy->allocated_src = D40_ALLOC_FREE;
+ } else {
+ phy->allocated_dst &= ~(1 << log_event_line);
+ if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
+ phy->allocated_dst = D40_ALLOC_FREE;
+ }
+
+ is_free = ((phy->allocated_src | phy->allocated_dst) ==
+ D40_ALLOC_FREE);
+
+out:
+ spin_unlock_irqrestore(&phy->lock, flags);
+
+ return is_free;
+}
+
+static int d40_allocate_channel(struct d40_chan *d40c)
+{
+ int dev_type;
+ int event_group;
+ int event_line;
+ struct d40_phy_res *phys;
+ int i;
+ int j;
+ int log_num;
+ bool is_src;
+ bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
+ == STEDMA40_CHANNEL_IN_LOG_MODE;
+
+
+ phys = d40c->base->phy_res;
+
+ if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
+ dev_type = d40c->dma_cfg.src_dev_type;
+ log_num = 2 * dev_type;
+ is_src = true;
+ } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
+ d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
+ /* dst event lines are used for logical memcpy */
+ dev_type = d40c->dma_cfg.dst_dev_type;
+ log_num = 2 * dev_type + 1;
+ is_src = false;
+ } else
+ return -EINVAL;
+
+ event_group = D40_TYPE_TO_GROUP(dev_type);
+ event_line = D40_TYPE_TO_EVENT(dev_type);
+
+ if (!is_log) {
+ if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
+ /* Find physical half channel */
+ for (i = 0; i < d40c->base->num_phy_chans; i++) {
+
+ if (d40_alloc_mask_set(&phys[i], is_src,
+ 0, is_log))
+ goto found_phy;
+ }
+ } else
+ for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
+ int phy_num = j + event_group * 2;
+ for (i = phy_num; i < phy_num + 2; i++) {
+ if (d40_alloc_mask_set(&phys[i], is_src,
+ 0, is_log))
+ goto found_phy;
+ }
+ }
+ return -EINVAL;
+found_phy:
+ d40c->phy_chan = &phys[i];
+ d40c->log_num = D40_PHY_CHAN;
+ goto out;
+ }
+ if (dev_type == -1)
+ return -EINVAL;
+
+ /* Find logical channel */
+ for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
+ int phy_num = j + event_group * 2;
+ /*
+ * Spread logical channels across all available physical rather
+ * than pack every logical channel at the first available phy
+ * channels.
+ */
+ if (is_src) {
+ for (i = phy_num; i < phy_num + 2; i++) {
+ if (d40_alloc_mask_set(&phys[i], is_src,
+ event_line, is_log))
+ goto found_log;
+ }
+ } else {
+ for (i = phy_num + 1; i >= phy_num; i--) {
+ if (d40_alloc_mask_set(&phys[i], is_src,
+ event_line, is_log))
+ goto found_log;
+ }
+ }
+ }
+ return -EINVAL;
+
+found_log:
+ d40c->phy_chan = &phys[i];
+ d40c->log_num = log_num;
+out:
+
+ if (is_log)
+ d40c->base->lookup_log_chans[d40c->log_num] = d40c;
+ else
+ d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
+
+ return 0;
+
+}
+
+static int d40_config_chan(struct d40_chan *d40c,
+ struct stedma40_chan_cfg *info)
+{
+
+ /* Fill in basic CFG register values */
+ d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
+ &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+ d40_log_cfg(&d40c->dma_cfg,
+ &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
+
+ if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
+ d40c->lcpa = d40c->base->lcpa_base +
+ d40c->dma_cfg.src_dev_type * 32;
+ else
+ d40c->lcpa = d40c->base->lcpa_base +
+ d40c->dma_cfg.dst_dev_type * 32 + 16;
+ }
+
+ /* Write channel configuration to the DMA */
+ return d40_config_write(d40c);
+}
+
+static int d40_config_memcpy(struct d40_chan *d40c)
+{
+ dma_cap_mask_t cap = d40c->chan.device->cap_mask;
+
+ if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
+ d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
+ d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
+ d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
+ memcpy[d40c->chan.chan_id];
+
+ } else if (dma_has_cap(DMA_MEMCPY, cap) &&
+ dma_has_cap(DMA_SLAVE, cap)) {
+ d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
+ } else {
+ dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static int d40_free_dma(struct d40_chan *d40c)
+{
+
+ int res = 0;
+ u32 event, dir;
+ struct d40_phy_res *phy = d40c->phy_chan;
+ bool is_src;
+
+ /* Terminate all queued and active transfers */
+ d40_term_all(d40c);
+
+ if (phy == NULL) {
+ dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (phy->allocated_src == D40_ALLOC_FREE &&
+ phy->allocated_dst == D40_ALLOC_FREE) {
+ dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
+ __func__);
+ return -EINVAL;
+ }
+
+
+ res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+ if (res) {
+ dev_err(&d40c->chan.dev->device, "[%s] suspend\n",
+ __func__);
+ return res;
+ }
+
+ if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
+ d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
+ event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
+ dir = D40_CHAN_REG_SDLNK;
+ is_src = false;
+ } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
+ event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
+ dir = D40_CHAN_REG_SSLNK;
+ is_src = true;
+ } else {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Unknown direction\n", __func__);
+ return -EINVAL;
+ }
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+ /*
+ * Release logical channel, deactivate the event line during
+ * the time physical res is suspended.
+ */
+ writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) &
+ D40_EVENTLINE_MASK(event),
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ phy->num * D40_DREG_PCDELTA + dir);
+
+ d40c->base->lookup_log_chans[d40c->log_num] = NULL;
+
+ /*
+ * Check if there are more logical allocation
+ * on this phy channel.
+ */
+ if (!d40_alloc_mask_free(phy, is_src, event)) {
+ /* Resume the other logical channels if any */
+ if (d40_chan_has_events(d40c)) {
+ res = d40_channel_execute_command(d40c,
+ D40_DMA_RUN);
+ if (res) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Executing RUN command\n",
+ __func__);
+ return res;
+ }
+ }
+ return 0;
+ }
+ } else
+ d40_alloc_mask_free(phy, is_src, 0);
+
+ /* Release physical channel */
+ res = d40_channel_execute_command(d40c, D40_DMA_STOP);
+ if (res) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Failed to stop channel\n", __func__);
+ return res;
+ }
+ d40c->phy_chan = NULL;
+ /* Invalidate channel type */
+ d40c->dma_cfg.channel_type = 0;
+ d40c->base->lookup_phy_chans[phy->num] = NULL;
+
+ return 0;
+
+
+}
+
+static int d40_pause(struct dma_chan *chan)
+{
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+ int res;
+
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+ if (res == 0) {
+ if (d40c->log_num != D40_PHY_CHAN) {
+ d40_config_set_event(d40c, false);
+ /* Resume the other logical channels if any */
+ if (d40_chan_has_events(d40c))
+ res = d40_channel_execute_command(d40c,
+ D40_DMA_RUN);
+ }
+ }
+
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return res;
+}
+
+static bool d40_is_paused(struct d40_chan *d40c)
+{
+ bool is_paused = false;
+ unsigned long flags;
+ void __iomem *active_reg;
+ u32 status;
+ u32 event;
+ int res;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ if (d40c->log_num == D40_PHY_CHAN) {
+ if (d40c->phy_chan->num % 2 == 0)
+ active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
+ else
+ active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
+
+ status = (readl(active_reg) &
+ D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
+ D40_CHAN_POS(d40c->phy_chan->num);
+ if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
+ is_paused = true;
+
+ goto _exit;
+ }
+
+ res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+ if (res != 0)
+ goto _exit;
+
+ if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
+ d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
+ event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
+ else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
+ event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
+ else {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Unknown direction\n", __func__);
+ goto _exit;
+ }
+ status = d40_chan_has_events(d40c);
+ status = (status & D40_EVENTLINE_MASK(event)) >>
+ D40_EVENTLINE_POS(event);
+
+ if (status != D40_DMA_RUN)
+ is_paused = true;
+
+ /* Resume the other logical channels if any */
+ if (d40_chan_has_events(d40c))
+ res = d40_channel_execute_command(d40c,
+ D40_DMA_RUN);
+
+_exit:
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return is_paused;
+
+}
+
+
+static bool d40_tx_is_linked(struct d40_chan *d40c)
+{
+ bool is_link;
+
+ if (d40c->log_num != D40_PHY_CHAN)
+ is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
+ else
+ is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDLNK) &
+ D40_SREG_LNK_PHYS_LNK_MASK;
+ return is_link;
+}
+
+static u32 d40_residue(struct d40_chan *d40c)
+{
+ u32 num_elt;
+
+ if (d40c->log_num != D40_PHY_CHAN)
+ num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
+ >> D40_MEM_LCSP2_ECNT_POS;
+ else
+ num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDELT) &
+ D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS;
+ return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
+}
+
+static int d40_resume(struct dma_chan *chan)
+{
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+ int res = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+ res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+ if (res)
+ goto out;
+
+ /* If bytes left to transfer or linked tx resume job */
+ if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
+ d40_config_set_event(d40c, true);
+ res = d40_channel_execute_command(d40c, D40_DMA_RUN);
+ }
+ } else if (d40_residue(d40c) || d40_tx_is_linked(d40c))
+ res = d40_channel_execute_command(d40c, D40_DMA_RUN);
+
+out:
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return res;
+}
+
+static u32 stedma40_residue(struct dma_chan *chan)
+{
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+ u32 bytes_left;
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+ bytes_left = d40_residue(d40c);
+ spin_unlock_irqrestore(&d40c->lock, flags);
+
+ return bytes_left;
+}
+
+/* Public DMA functions in addition to the DMA engine framework */
+
+int stedma40_set_psize(struct dma_chan *chan,
+ int src_psize,
+ int dst_psize)
+{
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+ d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
+ d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
+ d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
+ d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
+ goto out;
+ }
+
+ if (src_psize == STEDMA40_PSIZE_PHY_1)
+ d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
+ else {
+ d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
+ d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
+ D40_SREG_CFG_PSIZE_POS);
+ d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
+ }
+
+ if (dst_psize == STEDMA40_PSIZE_PHY_1)
+ d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
+ else {
+ d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
+ d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
+ D40_SREG_CFG_PSIZE_POS);
+ d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
+ }
+out:
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(stedma40_set_psize);
+
+struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
+ struct scatterlist *sgl_dst,
+ struct scatterlist *sgl_src,
+ unsigned int sgl_len,
+ unsigned long flags)
+{
+ int res;
+ struct d40_desc *d40d;
+ struct d40_chan *d40c = container_of(chan, struct d40_chan,
+ chan);
+ unsigned long flg;
+ int lli_max = d40c->base->plat_data->llis_per_log;
+
+
+ spin_lock_irqsave(&d40c->lock, flg);
+ d40d = d40_desc_get(d40c);
+
+ if (d40d == NULL)
+ goto err;
+
+ memset(d40d, 0, sizeof(struct d40_desc));
+ d40d->lli_len = sgl_len;
+
+ d40d->txd.flags = flags;
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+ if (sgl_len > 1)
+ /*
+ * Check if there is space available in lcla. If not,
+ * split list into 1-length and run only in lcpa
+ * space.
+ */
+ if (d40_lcla_id_get(d40c,
+ &d40c->base->lcla_pool) != 0)
+ lli_max = 1;
+
+ if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Out of memory\n", __func__);
+ goto err;
+ }
+
+ (void) d40_log_sg_to_lli(d40c->lcla.src_id,
+ sgl_src,
+ sgl_len,
+ d40d->lli_log.src,
+ d40c->log_def.lcsp1,
+ d40c->dma_cfg.src_info.data_width,
+ flags & DMA_PREP_INTERRUPT, lli_max,
+ d40c->base->plat_data->llis_per_log);
+
+ (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
+ sgl_dst,
+ sgl_len,
+ d40d->lli_log.dst,
+ d40c->log_def.lcsp3,
+ d40c->dma_cfg.dst_info.data_width,
+ flags & DMA_PREP_INTERRUPT, lli_max,
+ d40c->base->plat_data->llis_per_log);
+
+
+ } else {
+ if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Out of memory\n", __func__);
+ goto err;
+ }
+
+ res = d40_phy_sg_to_lli(sgl_src,
+ sgl_len,
+ 0,
+ d40d->lli_phy.src,
+ d40d->lli_phy.src_addr,
+ d40c->src_def_cfg,
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.src_info.psize,
+ true);
+
+ if (res < 0)
+ goto err;
+
+ res = d40_phy_sg_to_lli(sgl_dst,
+ sgl_len,
+ 0,
+ d40d->lli_phy.dst,
+ d40d->lli_phy.dst_addr,
+ d40c->dst_def_cfg,
+ d40c->dma_cfg.dst_info.data_width,
+ d40c->dma_cfg.dst_info.psize,
+ true);
+
+ if (res < 0)
+ goto err;
+
+ (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
+ d40d->lli_pool.size, DMA_TO_DEVICE);
+ }
+
+ dma_async_tx_descriptor_init(&d40d->txd, chan);
+
+ d40d->txd.tx_submit = d40_tx_submit;
+
+ spin_unlock_irqrestore(&d40c->lock, flg);
+
+ return &d40d->txd;
+err:
+ spin_unlock_irqrestore(&d40c->lock, flg);
+ return NULL;
+}
+EXPORT_SYMBOL(stedma40_memcpy_sg);
+
+bool stedma40_filter(struct dma_chan *chan, void *data)
+{
+ struct stedma40_chan_cfg *info = data;
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+ int err;
+
+ if (data) {
+ err = d40_validate_conf(d40c, info);
+ if (!err)
+ d40c->dma_cfg = *info;
+ } else
+ err = d40_config_memcpy(d40c);
+
+ return err == 0;
+}
+EXPORT_SYMBOL(stedma40_filter);
+
+/* DMA ENGINE functions */
+static int d40_alloc_chan_resources(struct dma_chan *chan)
+{
+ int err;
+ unsigned long flags;
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ d40c->completed = chan->cookie = 1;
+
+ /*
+ * If no dma configuration is set (channel_type == 0)
+ * use default configuration
+ */
+ if (d40c->dma_cfg.channel_type == 0) {
+ err = d40_config_memcpy(d40c);
+ if (err)
+ goto err_alloc;
+ }
+
+ err = d40_allocate_channel(d40c);
+ if (err) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Failed to allocate channel\n", __func__);
+ goto err_alloc;
+ }
+
+ err = d40_config_chan(d40c, &d40c->dma_cfg);
+ if (err) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Failed to configure channel\n",
+ __func__);
+ goto err_config;
+ }
+
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return 0;
+
+ err_config:
+ (void) d40_free_dma(d40c);
+ err_alloc:
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Channel allocation failed\n", __func__);
+ return -EINVAL;
+}
+
+static void d40_free_chan_resources(struct dma_chan *chan)
+{
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+ int err;
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ err = d40_free_dma(d40c);
+
+ if (err)
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Failed to free channel\n", __func__);
+ spin_unlock_irqrestore(&d40c->lock, flags);
+}
+
+static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
+ dma_addr_t dst,
+ dma_addr_t src,
+ size_t size,
+ unsigned long flags)
+{
+ struct d40_desc *d40d;
+ struct d40_chan *d40c = container_of(chan, struct d40_chan,
+ chan);
+ unsigned long flg;
+ int err = 0;
+
+ spin_lock_irqsave(&d40c->lock, flg);
+ d40d = d40_desc_get(d40c);
+
+ if (d40d == NULL) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Descriptor is NULL\n", __func__);
+ goto err;
+ }
+
+ memset(d40d, 0, sizeof(struct d40_desc));
+
+ d40d->txd.flags = flags;
+
+ dma_async_tx_descriptor_init(&d40d->txd, chan);
+
+ d40d->txd.tx_submit = d40_tx_submit;
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+
+ if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Out of memory\n", __func__);
+ goto err;
+ }
+ d40d->lli_len = 1;
+
+ d40_log_fill_lli(d40d->lli_log.src,
+ src,
+ size,
+ 0,
+ d40c->log_def.lcsp1,
+ d40c->dma_cfg.src_info.data_width,
+ true, true);
+
+ d40_log_fill_lli(d40d->lli_log.dst,
+ dst,
+ size,
+ 0,
+ d40c->log_def.lcsp3,
+ d40c->dma_cfg.dst_info.data_width,
+ true, true);
+
+ } else {
+
+ if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Out of memory\n", __func__);
+ goto err;
+ }
+
+ err = d40_phy_fill_lli(d40d->lli_phy.src,
+ src,
+ size,
+ d40c->dma_cfg.src_info.psize,
+ 0,
+ d40c->src_def_cfg,
+ true,
+ d40c->dma_cfg.src_info.data_width,
+ false);
+ if (err)
+ goto err_fill_lli;
+
+ err = d40_phy_fill_lli(d40d->lli_phy.dst,
+ dst,
+ size,
+ d40c->dma_cfg.dst_info.psize,
+ 0,
+ d40c->dst_def_cfg,
+ true,
+ d40c->dma_cfg.dst_info.data_width,
+ false);
+
+ if (err)
+ goto err_fill_lli;
+
+ (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
+ d40d->lli_pool.size, DMA_TO_DEVICE);
+ }
+
+ spin_unlock_irqrestore(&d40c->lock, flg);
+ return &d40d->txd;
+
+err_fill_lli:
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Failed filling in PHY LLI\n", __func__);
+ d40_pool_lli_free(d40d);
+err:
+ spin_unlock_irqrestore(&d40c->lock, flg);
+ return NULL;
+}
+
+static int d40_prep_slave_sg_log(struct d40_desc *d40d,
+ struct d40_chan *d40c,
+ struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_data_direction direction,
+ unsigned long flags)
+{
+ dma_addr_t dev_addr = 0;
+ int total_size;
+ int lli_max = d40c->base->plat_data->llis_per_log;
+
+ if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Out of memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ d40d->lli_len = sg_len;
+ d40d->lli_tcount = 0;
+
+ if (sg_len > 1)
+ /*
+ * Check if there is space available in lcla.
+ * If not, split list into 1-length and run only
+ * in lcpa space.
+ */
+ if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0)
+ lli_max = 1;
+
+ if (direction == DMA_FROM_DEVICE) {
+ dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
+ total_size = d40_log_sg_to_dev(&d40c->lcla,
+ sgl, sg_len,
+ &d40d->lli_log,
+ &d40c->log_def,
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.dst_info.data_width,
+ direction,
+ flags & DMA_PREP_INTERRUPT,
+ dev_addr, lli_max,
+ d40c->base->plat_data->llis_per_log);
+ } else if (direction == DMA_TO_DEVICE) {
+ dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
+ total_size = d40_log_sg_to_dev(&d40c->lcla,
+ sgl, sg_len,
+ &d40d->lli_log,
+ &d40c->log_def,
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.dst_info.data_width,
+ direction,
+ flags & DMA_PREP_INTERRUPT,
+ dev_addr, lli_max,
+ d40c->base->plat_data->llis_per_log);
+ } else
+ return -EINVAL;
+ if (total_size < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
+ struct d40_chan *d40c,
+ struct scatterlist *sgl,
+ unsigned int sgl_len,
+ enum dma_data_direction direction,
+ unsigned long flags)
+{
+ dma_addr_t src_dev_addr;
+ dma_addr_t dst_dev_addr;
+ int res;
+
+ if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Out of memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ d40d->lli_len = sgl_len;
+ d40d->lli_tcount = 0;
+
+ if (direction == DMA_FROM_DEVICE) {
+ dst_dev_addr = 0;
+ src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
+ } else if (direction == DMA_TO_DEVICE) {
+ dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
+ src_dev_addr = 0;
+ } else
+ return -EINVAL;
+
+ res = d40_phy_sg_to_lli(sgl,
+ sgl_len,
+ src_dev_addr,
+ d40d->lli_phy.src,
+ d40d->lli_phy.src_addr,
+ d40c->src_def_cfg,
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.src_info.psize,
+ true);
+ if (res < 0)
+ return res;
+
+ res = d40_phy_sg_to_lli(sgl,
+ sgl_len,
+ dst_dev_addr,
+ d40d->lli_phy.dst,
+ d40d->lli_phy.dst_addr,
+ d40c->dst_def_cfg,
+ d40c->dma_cfg.dst_info.data_width,
+ d40c->dma_cfg.dst_info.psize,
+ true);
+ if (res < 0)
+ return res;
+
+ (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
+ d40d->lli_pool.size, DMA_TO_DEVICE);
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
+ struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_data_direction direction,
+ unsigned long flags)
+{
+ struct d40_desc *d40d;
+ struct d40_chan *d40c = container_of(chan, struct d40_chan,
+ chan);
+ unsigned long flg;
+ int err;
+
+ if (d40c->dma_cfg.pre_transfer)
+ d40c->dma_cfg.pre_transfer(chan,
+ d40c->dma_cfg.pre_transfer_data,
+ sg_dma_len(sgl));
+
+ spin_lock_irqsave(&d40c->lock, flg);
+ d40d = d40_desc_get(d40c);
+ spin_unlock_irqrestore(&d40c->lock, flg);
+
+ if (d40d == NULL)
+ return NULL;
+
+ memset(d40d, 0, sizeof(struct d40_desc));
+
+ if (d40c->log_num != D40_PHY_CHAN)
+ err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
+ direction, flags);
+ else
+ err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
+ direction, flags);
+ if (err) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Failed to prepare %s slave sg job: %d\n",
+ __func__,
+ d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
+ return NULL;
+ }
+
+ d40d->txd.flags = flags;
+
+ dma_async_tx_descriptor_init(&d40d->txd, chan);
+
+ d40d->txd.tx_submit = d40_tx_submit;
+
+ return &d40d->txd;
+}
+
+static enum dma_status d40_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+ int ret;
+
+ last_complete = d40c->completed;
+ last_used = chan->cookie;
+
+ if (d40_is_paused(d40c))
+ ret = DMA_PAUSED;
+ else
+ ret = dma_async_is_complete(cookie, last_complete, last_used);
+
+ dma_set_tx_state(txstate, last_complete, last_used,
+ stedma40_residue(chan));
+
+ return ret;
+}
+
+static void d40_issue_pending(struct dma_chan *chan)
+{
+ struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ /* Busy means that pending jobs are already being processed */
+ if (!d40c->busy)
+ (void) d40_queue_start(d40c);
+
+ spin_unlock_irqrestore(&d40c->lock, flags);
+}
+
+static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ unsigned long flags;
+ struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ spin_lock_irqsave(&d40c->lock, flags);
+ d40_term_all(d40c);
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return 0;
+ case DMA_PAUSE:
+ return d40_pause(chan);
+ case DMA_RESUME:
+ return d40_resume(chan);
+ }
+
+ /* Other commands are unimplemented */
+ return -ENXIO;
+}
+
+/* Initialization functions */
+
+static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
+ struct d40_chan *chans, int offset,
+ int num_chans)
+{
+ int i = 0;
+ struct d40_chan *d40c;
+
+ INIT_LIST_HEAD(&dma->channels);
+
+ for (i = offset; i < offset + num_chans; i++) {
+ d40c = &chans[i];
+ d40c->base = base;
+ d40c->chan.device = dma;
+
+ /* Invalidate lcla element */
+ d40c->lcla.src_id = -1;
+ d40c->lcla.dst_id = -1;
+
+ spin_lock_init(&d40c->lock);
+
+ d40c->log_num = D40_PHY_CHAN;
+
+ INIT_LIST_HEAD(&d40c->free);
+ INIT_LIST_HEAD(&d40c->active);
+ INIT_LIST_HEAD(&d40c->queue);
+ INIT_LIST_HEAD(&d40c->client);
+
+ d40c->free_len = 0;
+
+ tasklet_init(&d40c->tasklet, dma_tasklet,
+ (unsigned long) d40c);
+
+ list_add_tail(&d40c->chan.device_node,
+ &dma->channels);
+ }
+}
+
+static int __init d40_dmaengine_init(struct d40_base *base,
+ int num_reserved_chans)
+{
+ int err ;
+
+ d40_chan_init(base, &base->dma_slave, base->log_chans,
+ 0, base->num_log_chans);
+
+ dma_cap_zero(base->dma_slave.cap_mask);
+ dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
+
+ base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
+ base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
+ base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
+ base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
+ base->dma_slave.device_tx_status = d40_tx_status;
+ base->dma_slave.device_issue_pending = d40_issue_pending;
+ base->dma_slave.device_control = d40_control;
+ base->dma_slave.dev = base->dev;
+
+ err = dma_async_device_register(&base->dma_slave);
+
+ if (err) {
+ dev_err(base->dev,
+ "[%s] Failed to register slave channels\n",
+ __func__);
+ goto failure1;
+ }
+
+ d40_chan_init(base, &base->dma_memcpy, base->log_chans,
+ base->num_log_chans, base->plat_data->memcpy_len);
+
+ dma_cap_zero(base->dma_memcpy.cap_mask);
+ dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
+
+ base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
+ base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
+ base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
+ base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
+ base->dma_memcpy.device_tx_status = d40_tx_status;
+ base->dma_memcpy.device_issue_pending = d40_issue_pending;
+ base->dma_memcpy.device_control = d40_control;
+ base->dma_memcpy.dev = base->dev;
+ /*
+ * This controller can only access address at even
+ * 32bit boundaries, i.e. 2^2
+ */
+ base->dma_memcpy.copy_align = 2;
+
+ err = dma_async_device_register(&base->dma_memcpy);
+
+ if (err) {
+ dev_err(base->dev,
+ "[%s] Failed to regsiter memcpy only channels\n",
+ __func__);
+ goto failure2;
+ }
+
+ d40_chan_init(base, &base->dma_both, base->phy_chans,
+ 0, num_reserved_chans);
+
+ dma_cap_zero(base->dma_both.cap_mask);
+ dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
+ dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
+
+ base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
+ base->dma_both.device_free_chan_resources = d40_free_chan_resources;
+ base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
+ base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
+ base->dma_both.device_tx_status = d40_tx_status;
+ base->dma_both.device_issue_pending = d40_issue_pending;
+ base->dma_both.device_control = d40_control;
+ base->dma_both.dev = base->dev;
+ base->dma_both.copy_align = 2;
+ err = dma_async_device_register(&base->dma_both);
+
+ if (err) {
+ dev_err(base->dev,
+ "[%s] Failed to register logical and physical capable channels\n",
+ __func__);
+ goto failure3;
+ }
+ return 0;
+failure3:
+ dma_async_device_unregister(&base->dma_memcpy);
+failure2:
+ dma_async_device_unregister(&base->dma_slave);
+failure1:
+ return err;
+}
+
+/* Initialization functions. */
+
+static int __init d40_phy_res_init(struct d40_base *base)
+{
+ int i;
+ int num_phy_chans_avail = 0;
+ u32 val[2];
+ int odd_even_bit = -2;
+
+ val[0] = readl(base->virtbase + D40_DREG_PRSME);
+ val[1] = readl(base->virtbase + D40_DREG_PRSMO);
+
+ for (i = 0; i < base->num_phy_chans; i++) {
+ base->phy_res[i].num = i;
+ odd_even_bit += 2 * ((i % 2) == 0);
+ if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
+ /* Mark security only channels as occupied */
+ base->phy_res[i].allocated_src = D40_ALLOC_PHY;
+ base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
+ } else {
+ base->phy_res[i].allocated_src = D40_ALLOC_FREE;
+ base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
+ num_phy_chans_avail++;
+ }
+ spin_lock_init(&base->phy_res[i].lock);
+ }
+ dev_info(base->dev, "%d of %d physical DMA channels available\n",
+ num_phy_chans_avail, base->num_phy_chans);
+
+ /* Verify settings extended vs standard */
+ val[0] = readl(base->virtbase + D40_DREG_PRTYP);
+
+ for (i = 0; i < base->num_phy_chans; i++) {
+
+ if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
+ (val[0] & 0x3) != 1)
+ dev_info(base->dev,
+ "[%s] INFO: channel %d is misconfigured (%d)\n",
+ __func__, i, val[0] & 0x3);
+
+ val[0] = val[0] >> 2;
+ }
+
+ return num_phy_chans_avail;
+}
+
+static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
+{
+ static const struct d40_reg_val dma_id_regs[] = {
+ /* Peripheral Id */
+ { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
+ { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
+ /*
+ * D40_DREG_PERIPHID2 Depends on HW revision:
+ * MOP500/HREF ED has 0x0008,
+ * ? has 0x0018,
+ * HREF V1 has 0x0028
+ */
+ { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
+
+ /* PCell Id */
+ { .reg = D40_DREG_CELLID0, .val = 0x000d},
+ { .reg = D40_DREG_CELLID1, .val = 0x00f0},
+ { .reg = D40_DREG_CELLID2, .val = 0x0005},
+ { .reg = D40_DREG_CELLID3, .val = 0x00b1}
+ };
+ struct stedma40_platform_data *plat_data;
+ struct clk *clk = NULL;
+ void __iomem *virtbase = NULL;
+ struct resource *res = NULL;
+ struct d40_base *base = NULL;
+ int num_log_chans = 0;
+ int num_phy_chans;
+ int i;
+
+ clk = clk_get(&pdev->dev, NULL);
+
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "[%s] No matching clock found\n",
+ __func__);
+ goto failure;
+ }
+
+ clk_enable(clk);
+
+ /* Get IO for DMAC base address */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
+ if (!res)
+ goto failure;
+
+ if (request_mem_region(res->start, resource_size(res),
+ D40_NAME " I/O base") == NULL)
+ goto failure;
+
+ virtbase = ioremap(res->start, resource_size(res));
+ if (!virtbase)
+ goto failure;
+
+ /* HW version check */
+ for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
+ if (dma_id_regs[i].val !=
+ readl(virtbase + dma_id_regs[i].reg)) {
+ dev_err(&pdev->dev,
+ "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
+ __func__,
+ dma_id_regs[i].val,
+ dma_id_regs[i].reg,
+ readl(virtbase + dma_id_regs[i].reg));
+ goto failure;
+ }
+ }
+
+ i = readl(virtbase + D40_DREG_PERIPHID2);
+
+ if ((i & 0xf) != D40_PERIPHID2_DESIGNER) {
+ dev_err(&pdev->dev,
+ "[%s] Unknown designer! Got %x wanted %x\n",
+ __func__, i & 0xf, D40_PERIPHID2_DESIGNER);
+ goto failure;
+ }
+
+ /* The number of physical channels on this HW */
+ num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
+
+ dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
+ (i >> 4) & 0xf, res->start);
+
+ plat_data = pdev->dev.platform_data;
+
+ /* Count the number of logical channels in use */
+ for (i = 0; i < plat_data->dev_len; i++)
+ if (plat_data->dev_rx[i] != 0)
+ num_log_chans++;
+
+ for (i = 0; i < plat_data->dev_len; i++)
+ if (plat_data->dev_tx[i] != 0)
+ num_log_chans++;
+
+ base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
+ (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
+ sizeof(struct d40_chan), GFP_KERNEL);
+
+ if (base == NULL) {
+ dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
+ goto failure;
+ }
+
+ base->clk = clk;
+ base->num_phy_chans = num_phy_chans;
+ base->num_log_chans = num_log_chans;
+ base->phy_start = res->start;
+ base->phy_size = resource_size(res);
+ base->virtbase = virtbase;
+ base->plat_data = plat_data;
+ base->dev = &pdev->dev;
+ base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
+ base->log_chans = &base->phy_chans[num_phy_chans];
+
+ base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
+ GFP_KERNEL);
+ if (!base->phy_res)
+ goto failure;
+
+ base->lookup_phy_chans = kzalloc(num_phy_chans *
+ sizeof(struct d40_chan *),
+ GFP_KERNEL);
+ if (!base->lookup_phy_chans)
+ goto failure;
+
+ if (num_log_chans + plat_data->memcpy_len) {
+ /*
+ * The max number of logical channels are event lines for all
+ * src devices and dst devices
+ */
+ base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
+ sizeof(struct d40_chan *),
+ GFP_KERNEL);
+ if (!base->lookup_log_chans)
+ goto failure;
+ }
+ base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
+ GFP_KERNEL);
+ if (!base->lcla_pool.alloc_map)
+ goto failure;
+
+ return base;
+
+failure:
+ if (clk) {
+ clk_disable(clk);
+ clk_put(clk);
+ }
+ if (virtbase)
+ iounmap(virtbase);
+ if (res)
+ release_mem_region(res->start,
+ resource_size(res));
+ if (virtbase)
+ iounmap(virtbase);
+
+ if (base) {
+ kfree(base->lcla_pool.alloc_map);
+ kfree(base->lookup_log_chans);
+ kfree(base->lookup_phy_chans);
+ kfree(base->phy_res);
+ kfree(base);
+ }
+
+ return NULL;
+}
+
+static void __init d40_hw_init(struct d40_base *base)
+{
+
+ static const struct d40_reg_val dma_init_reg[] = {
+ /* Clock every part of the DMA block from start */
+ { .reg = D40_DREG_GCC, .val = 0x0000ff01},
+
+ /* Interrupts on all logical channels */
+ { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
+ };
+ int i;
+ u32 prmseo[2] = {0, 0};
+ u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
+ u32 pcmis = 0;
+ u32 pcicr = 0;
+
+ for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
+ writel(dma_init_reg[i].val,
+ base->virtbase + dma_init_reg[i].reg);
+
+ /* Configure all our dma channels to default settings */
+ for (i = 0; i < base->num_phy_chans; i++) {
+
+ activeo[i % 2] = activeo[i % 2] << 2;
+
+ if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
+ == D40_ALLOC_PHY) {
+ activeo[i % 2] |= 3;
+ continue;
+ }
+
+ /* Enable interrupt # */
+ pcmis = (pcmis << 1) | 1;
+
+ /* Clear interrupt # */
+ pcicr = (pcicr << 1) | 1;
+
+ /* Set channel to physical mode */
+ prmseo[i % 2] = prmseo[i % 2] << 2;
+ prmseo[i % 2] |= 1;
+
+ }
+
+ writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
+ writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
+ writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
+ writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
+
+ /* Write which interrupt to enable */
+ writel(pcmis, base->virtbase + D40_DREG_PCMIS);
+
+ /* Write which interrupt to clear */
+ writel(pcicr, base->virtbase + D40_DREG_PCICR);
+
+}
+
+static int __init d40_probe(struct platform_device *pdev)
+{
+ int err;
+ int ret = -ENOENT;
+ struct d40_base *base;
+ struct resource *res = NULL;
+ int num_reserved_chans;
+ u32 val;
+
+ base = d40_hw_detect_init(pdev);
+
+ if (!base)
+ goto failure;
+
+ num_reserved_chans = d40_phy_res_init(base);
+
+ platform_set_drvdata(pdev, base);
+
+ spin_lock_init(&base->interrupt_lock);
+ spin_lock_init(&base->execmd_lock);
+
+ /* Get IO for logical channel parameter address */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
+ if (!res) {
+ ret = -ENOENT;
+ dev_err(&pdev->dev,
+ "[%s] No \"lcpa\" memory resource\n",
+ __func__);
+ goto failure;
+ }
+ base->lcpa_size = resource_size(res);
+ base->phy_lcpa = res->start;
+
+ if (request_mem_region(res->start, resource_size(res),
+ D40_NAME " I/O lcpa") == NULL) {
+ ret = -EBUSY;
+ dev_err(&pdev->dev,
+ "[%s] Failed to request LCPA region 0x%x-0x%x\n",
+ __func__, res->start, res->end);
+ goto failure;
+ }
+
+ /* We make use of ESRAM memory for this. */
+ val = readl(base->virtbase + D40_DREG_LCPA);
+ if (res->start != val && val != 0) {
+ dev_warn(&pdev->dev,
+ "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
+ __func__, val, res->start);
+ } else
+ writel(res->start, base->virtbase + D40_DREG_LCPA);
+
+ base->lcpa_base = ioremap(res->start, resource_size(res));
+ if (!base->lcpa_base) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev,
+ "[%s] Failed to ioremap LCPA region\n",
+ __func__);
+ goto failure;
+ }
+ /* Get IO for logical channel link address */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla");
+ if (!res) {
+ ret = -ENOENT;
+ dev_err(&pdev->dev,
+ "[%s] No \"lcla\" resource defined\n",
+ __func__);
+ goto failure;
+ }
+
+ base->lcla_pool.base_size = resource_size(res);
+ base->lcla_pool.phy = res->start;
+
+ if (request_mem_region(res->start, resource_size(res),
+ D40_NAME " I/O lcla") == NULL) {
+ ret = -EBUSY;
+ dev_err(&pdev->dev,
+ "[%s] Failed to request LCLA region 0x%x-0x%x\n",
+ __func__, res->start, res->end);
+ goto failure;
+ }
+ val = readl(base->virtbase + D40_DREG_LCLA);
+ if (res->start != val && val != 0) {
+ dev_warn(&pdev->dev,
+ "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n",
+ __func__, val, res->start);
+ } else
+ writel(res->start, base->virtbase + D40_DREG_LCLA);
+
+ base->lcla_pool.base = ioremap(res->start, resource_size(res));
+ if (!base->lcla_pool.base) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev,
+ "[%s] Failed to ioremap LCLA 0x%x-0x%x\n",
+ __func__, res->start, res->end);
+ goto failure;
+ }
+
+ spin_lock_init(&base->lcla_pool.lock);
+
+ base->lcla_pool.num_blocks = base->num_phy_chans;
+
+ base->irq = platform_get_irq(pdev, 0);
+
+ ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
+
+ if (ret) {
+ dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
+ goto failure;
+ }
+
+ err = d40_dmaengine_init(base, num_reserved_chans);
+ if (err)
+ goto failure;
+
+ d40_hw_init(base);
+
+ dev_info(base->dev, "initialized\n");
+ return 0;
+
+failure:
+ if (base) {
+ if (base->virtbase)
+ iounmap(base->virtbase);
+ if (base->lcla_pool.phy)
+ release_mem_region(base->lcla_pool.phy,
+ base->lcla_pool.base_size);
+ if (base->phy_lcpa)
+ release_mem_region(base->phy_lcpa,
+ base->lcpa_size);
+ if (base->phy_start)
+ release_mem_region(base->phy_start,
+ base->phy_size);
+ if (base->clk) {
+ clk_disable(base->clk);
+ clk_put(base->clk);
+ }
+
+ kfree(base->lcla_pool.alloc_map);
+ kfree(base->lookup_log_chans);
+ kfree(base->lookup_phy_chans);
+ kfree(base->phy_res);
+ kfree(base);
+ }
+
+ dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
+ return ret;
+}
+
+static struct platform_driver d40_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = D40_NAME,
+ },
+};
+
+int __init stedma40_init(void)
+{
+ return platform_driver_probe(&d40_driver, d40_probe);
+}
+arch_initcall(stedma40_init);
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
new file mode 100644
index 00000000000..561fdd8a80c
--- /dev/null
+++ b/drivers/dma/ste_dma40_ll.c
@@ -0,0 +1,454 @@
+/*
+ * driver/dma/ste_dma40_ll.c
+ *
+ * Copyright (C) ST-Ericsson 2007-2010
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Per Friden <per.friden@stericsson.com>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ */
+
+#include <linux/kernel.h>
+#include <plat/ste_dma40.h>
+
+#include "ste_dma40_ll.h"
+
+/* Sets up proper LCSP1 and LCSP3 register for a logical channel */
+void d40_log_cfg(struct stedma40_chan_cfg *cfg,
+ u32 *lcsp1, u32 *lcsp3)
+{
+ u32 l3 = 0; /* dst */
+ u32 l1 = 0; /* src */
+
+ /* src is mem? -> increase address pos */
+ if (cfg->dir == STEDMA40_MEM_TO_PERIPH ||
+ cfg->dir == STEDMA40_MEM_TO_MEM)
+ l1 |= 1 << D40_MEM_LCSP1_SCFG_INCR_POS;
+
+ /* dst is mem? -> increase address pos */
+ if (cfg->dir == STEDMA40_PERIPH_TO_MEM ||
+ cfg->dir == STEDMA40_MEM_TO_MEM)
+ l3 |= 1 << D40_MEM_LCSP3_DCFG_INCR_POS;
+
+ /* src is hw? -> master port 1 */
+ if (cfg->dir == STEDMA40_PERIPH_TO_MEM ||
+ cfg->dir == STEDMA40_PERIPH_TO_PERIPH)
+ l1 |= 1 << D40_MEM_LCSP1_SCFG_MST_POS;
+
+ /* dst is hw? -> master port 1 */
+ if (cfg->dir == STEDMA40_MEM_TO_PERIPH ||
+ cfg->dir == STEDMA40_PERIPH_TO_PERIPH)
+ l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS;
+
+ l3 |= 1 << D40_MEM_LCSP3_DCFG_TIM_POS;
+ l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS;
+ l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS;
+ l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS;
+ l3 |= 1 << D40_MEM_LCSP3_DTCP_POS;
+
+ l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS;
+ l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
+ l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS;
+ l1 |= 1 << D40_MEM_LCSP1_STCP_POS;
+
+ *lcsp1 = l1;
+ *lcsp3 = l3;
+
+}
+
+/* Sets up SRC and DST CFG register for both logical and physical channels */
+void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
+ u32 *src_cfg, u32 *dst_cfg, bool is_log)
+{
+ u32 src = 0;
+ u32 dst = 0;
+
+ if (!is_log) {
+ /* Physical channel */
+ if ((cfg->dir == STEDMA40_PERIPH_TO_MEM) ||
+ (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) {
+ /* Set master port to 1 */
+ src |= 1 << D40_SREG_CFG_MST_POS;
+ src |= D40_TYPE_TO_EVENT(cfg->src_dev_type);
+
+ if (cfg->src_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL)
+ src |= 1 << D40_SREG_CFG_PHY_TM_POS;
+ else
+ src |= 3 << D40_SREG_CFG_PHY_TM_POS;
+ }
+ if ((cfg->dir == STEDMA40_MEM_TO_PERIPH) ||
+ (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) {
+ /* Set master port to 1 */
+ dst |= 1 << D40_SREG_CFG_MST_POS;
+ dst |= D40_TYPE_TO_EVENT(cfg->dst_dev_type);
+
+ if (cfg->dst_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL)
+ dst |= 1 << D40_SREG_CFG_PHY_TM_POS;
+ else
+ dst |= 3 << D40_SREG_CFG_PHY_TM_POS;
+ }
+ /* Interrupt on end of transfer for destination */
+ dst |= 1 << D40_SREG_CFG_TIM_POS;
+
+ /* Generate interrupt on error */
+ src |= 1 << D40_SREG_CFG_EIM_POS;
+ dst |= 1 << D40_SREG_CFG_EIM_POS;
+
+ /* PSIZE */
+ if (cfg->src_info.psize != STEDMA40_PSIZE_PHY_1) {
+ src |= 1 << D40_SREG_CFG_PHY_PEN_POS;
+ src |= cfg->src_info.psize << D40_SREG_CFG_PSIZE_POS;
+ }
+ if (cfg->dst_info.psize != STEDMA40_PSIZE_PHY_1) {
+ dst |= 1 << D40_SREG_CFG_PHY_PEN_POS;
+ dst |= cfg->dst_info.psize << D40_SREG_CFG_PSIZE_POS;
+ }
+
+ /* Element size */
+ src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS;
+ dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS;
+
+ } else {
+ /* Logical channel */
+ dst |= 1 << D40_SREG_CFG_LOG_GIM_POS;
+ src |= 1 << D40_SREG_CFG_LOG_GIM_POS;
+ }
+
+ if (cfg->channel_type & STEDMA40_HIGH_PRIORITY_CHANNEL) {
+ src |= 1 << D40_SREG_CFG_PRI_POS;
+ dst |= 1 << D40_SREG_CFG_PRI_POS;
+ }
+
+ src |= cfg->src_info.endianess << D40_SREG_CFG_LBE_POS;
+ dst |= cfg->dst_info.endianess << D40_SREG_CFG_LBE_POS;
+
+ *src_cfg = src;
+ *dst_cfg = dst;
+}
+
+int d40_phy_fill_lli(struct d40_phy_lli *lli,
+ dma_addr_t data,
+ u32 data_size,
+ int psize,
+ dma_addr_t next_lli,
+ u32 reg_cfg,
+ bool term_int,
+ u32 data_width,
+ bool is_device)
+{
+ int num_elems;
+
+ if (psize == STEDMA40_PSIZE_PHY_1)
+ num_elems = 1;
+ else
+ num_elems = 2 << psize;
+
+ /*
+ * Size is 16bit. data_width is 8, 16, 32 or 64 bit
+ * Block large than 64 KiB must be split.
+ */
+ if (data_size > (0xffff << data_width))
+ return -EINVAL;
+
+ /* Must be aligned */
+ if (!IS_ALIGNED(data, 0x1 << data_width))
+ return -EINVAL;
+
+ /* Transfer size can't be smaller than (num_elms * elem_size) */
+ if (data_size < num_elems * (0x1 << data_width))
+ return -EINVAL;
+
+ /* The number of elements. IE now many chunks */
+ lli->reg_elt = (data_size >> data_width) << D40_SREG_ELEM_PHY_ECNT_POS;
+
+ /*
+ * Distance to next element sized entry.
+ * Usually the size of the element unless you want gaps.
+ */
+ if (!is_device)
+ lli->reg_elt |= (0x1 << data_width) <<
+ D40_SREG_ELEM_PHY_EIDX_POS;
+
+ /* Where the data is */
+ lli->reg_ptr = data;
+ lli->reg_cfg = reg_cfg;
+
+ /* If this scatter list entry is the last one, no next link */
+ if (next_lli == 0)
+ lli->reg_lnk = 0x1 << D40_SREG_LNK_PHY_TCP_POS;
+ else
+ lli->reg_lnk = next_lli;
+
+ /* Set/clear interrupt generation on this link item.*/
+ if (term_int)
+ lli->reg_cfg |= 0x1 << D40_SREG_CFG_TIM_POS;
+ else
+ lli->reg_cfg &= ~(0x1 << D40_SREG_CFG_TIM_POS);
+
+ /* Post link */
+ lli->reg_lnk |= 0 << D40_SREG_LNK_PHY_PRE_POS;
+
+ return 0;
+}
+
+int d40_phy_sg_to_lli(struct scatterlist *sg,
+ int sg_len,
+ dma_addr_t target,
+ struct d40_phy_lli *lli,
+ dma_addr_t lli_phys,
+ u32 reg_cfg,
+ u32 data_width,
+ int psize,
+ bool term_int)
+{
+ int total_size = 0;
+ int i;
+ struct scatterlist *current_sg = sg;
+ dma_addr_t next_lli_phys;
+ dma_addr_t dst;
+ int err = 0;
+
+ for_each_sg(sg, current_sg, sg_len, i) {
+
+ total_size += sg_dma_len(current_sg);
+
+ /* If this scatter list entry is the last one, no next link */
+ if (sg_len - 1 == i)
+ next_lli_phys = 0;
+ else
+ next_lli_phys = ALIGN(lli_phys + (i + 1) *
+ sizeof(struct d40_phy_lli),
+ D40_LLI_ALIGN);
+
+ if (target)
+ dst = target;
+ else
+ dst = sg_phys(current_sg);
+
+ err = d40_phy_fill_lli(&lli[i],
+ dst,
+ sg_dma_len(current_sg),
+ psize,
+ next_lli_phys,
+ reg_cfg,
+ !next_lli_phys,
+ data_width,
+ target == dst);
+ if (err)
+ goto err;
+ }
+
+ return total_size;
+ err:
+ return err;
+}
+
+
+void d40_phy_lli_write(void __iomem *virtbase,
+ u32 phy_chan_num,
+ struct d40_phy_lli *lli_dst,
+ struct d40_phy_lli *lli_src)
+{
+
+ writel(lli_src->reg_cfg, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSCFG);
+ writel(lli_src->reg_elt, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
+ writel(lli_src->reg_ptr, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSPTR);
+ writel(lli_src->reg_lnk, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSLNK);
+
+ writel(lli_dst->reg_cfg, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDCFG);
+ writel(lli_dst->reg_elt, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
+ writel(lli_dst->reg_ptr, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDPTR);
+ writel(lli_dst->reg_lnk, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDLNK);
+
+}
+
+/* DMA logical lli operations */
+
+void d40_log_fill_lli(struct d40_log_lli *lli,
+ dma_addr_t data, u32 data_size,
+ u32 lli_next_off, u32 reg_cfg,
+ u32 data_width,
+ bool term_int, bool addr_inc)
+{
+ lli->lcsp13 = reg_cfg;
+
+ /* The number of elements to transfer */
+ lli->lcsp02 = ((data_size >> data_width) <<
+ D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK;
+ /* 16 LSBs address of the current element */
+ lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK;
+ /* 16 MSBs address of the current element */
+ lli->lcsp13 |= data & D40_MEM_LCSP1_SPTR_MASK;
+
+ if (addr_inc)
+ lli->lcsp13 |= D40_MEM_LCSP1_SCFG_INCR_MASK;
+
+ lli->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK;
+ /* If this scatter list entry is the last one, no next link */
+ lli->lcsp13 |= (lli_next_off << D40_MEM_LCSP1_SLOS_POS) &
+ D40_MEM_LCSP1_SLOS_MASK;
+
+ if (term_int)
+ lli->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK;
+ else
+ lli->lcsp13 &= ~D40_MEM_LCSP1_SCFG_TIM_MASK;
+}
+
+int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
+ struct scatterlist *sg,
+ int sg_len,
+ struct d40_log_lli_bidir *lli,
+ struct d40_def_lcsp *lcsp,
+ u32 src_data_width,
+ u32 dst_data_width,
+ enum dma_data_direction direction,
+ bool term_int, dma_addr_t dev_addr, int max_len,
+ int llis_per_log)
+{
+ int total_size = 0;
+ struct scatterlist *current_sg = sg;
+ int i;
+ u32 next_lli_off_dst;
+ u32 next_lli_off_src;
+
+ next_lli_off_src = 0;
+ next_lli_off_dst = 0;
+
+ for_each_sg(sg, current_sg, sg_len, i) {
+ total_size += sg_dma_len(current_sg);
+
+ /*
+ * If this scatter list entry is the last one or
+ * max length, terminate link.
+ */
+ if (sg_len - 1 == i || ((i+1) % max_len == 0)) {
+ next_lli_off_src = 0;
+ next_lli_off_dst = 0;
+ } else {
+ if (next_lli_off_dst == 0 &&
+ next_lli_off_src == 0) {
+ /* The first lli will be at next_lli_off */
+ next_lli_off_dst = (lcla->dst_id *
+ llis_per_log + 1);
+ next_lli_off_src = (lcla->src_id *
+ llis_per_log + 1);
+ } else {
+ next_lli_off_dst++;
+ next_lli_off_src++;
+ }
+ }
+
+ if (direction == DMA_TO_DEVICE) {
+ d40_log_fill_lli(&lli->src[i],
+ sg_phys(current_sg),
+ sg_dma_len(current_sg),
+ next_lli_off_src,
+ lcsp->lcsp1, src_data_width,
+ term_int && !next_lli_off_src,
+ true);
+ d40_log_fill_lli(&lli->dst[i],
+ dev_addr,
+ sg_dma_len(current_sg),
+ next_lli_off_dst,
+ lcsp->lcsp3, dst_data_width,
+ /* No next == terminal interrupt */
+ term_int && !next_lli_off_dst,
+ false);
+ } else {
+ d40_log_fill_lli(&lli->dst[i],
+ sg_phys(current_sg),
+ sg_dma_len(current_sg),
+ next_lli_off_dst,
+ lcsp->lcsp3, dst_data_width,
+ /* No next == terminal interrupt */
+ term_int && !next_lli_off_dst,
+ true);
+ d40_log_fill_lli(&lli->src[i],
+ dev_addr,
+ sg_dma_len(current_sg),
+ next_lli_off_src,
+ lcsp->lcsp1, src_data_width,
+ term_int && !next_lli_off_src,
+ false);
+ }
+ }
+ return total_size;
+}
+
+int d40_log_sg_to_lli(int lcla_id,
+ struct scatterlist *sg,
+ int sg_len,
+ struct d40_log_lli *lli_sg,
+ u32 lcsp13, /* src or dst*/
+ u32 data_width,
+ bool term_int, int max_len, int llis_per_log)
+{
+ int total_size = 0;
+ struct scatterlist *current_sg = sg;
+ int i;
+ u32 next_lli_off = 0;
+
+ for_each_sg(sg, current_sg, sg_len, i) {
+ total_size += sg_dma_len(current_sg);
+
+ /*
+ * If this scatter list entry is the last one or
+ * max length, terminate link.
+ */
+ if (sg_len - 1 == i || ((i+1) % max_len == 0))
+ next_lli_off = 0;
+ else {
+ if (next_lli_off == 0)
+ /* The first lli will be at next_lli_off */
+ next_lli_off = lcla_id * llis_per_log + 1;
+ else
+ next_lli_off++;
+ }
+
+ d40_log_fill_lli(&lli_sg[i],
+ sg_phys(current_sg),
+ sg_dma_len(current_sg),
+ next_lli_off,
+ lcsp13, data_width,
+ term_int && !next_lli_off,
+ true);
+ }
+ return total_size;
+}
+
+void d40_log_lli_write(struct d40_log_lli_full *lcpa,
+ struct d40_log_lli *lcla_src,
+ struct d40_log_lli *lcla_dst,
+ struct d40_log_lli *lli_dst,
+ struct d40_log_lli *lli_src,
+ int llis_per_log)
+{
+ u32 slos = 0;
+ u32 dlos = 0;
+ int i;
+
+ lcpa->lcsp0 = lli_src->lcsp02;
+ lcpa->lcsp1 = lli_src->lcsp13;
+ lcpa->lcsp2 = lli_dst->lcsp02;
+ lcpa->lcsp3 = lli_dst->lcsp13;
+
+ slos = lli_src->lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
+ dlos = lli_dst->lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
+
+ for (i = 0; (i < llis_per_log) && slos && dlos; i++) {
+ writel(lli_src[i+1].lcsp02, &lcla_src[i].lcsp02);
+ writel(lli_src[i+1].lcsp13, &lcla_src[i].lcsp13);
+ writel(lli_dst[i+1].lcsp02, &lcla_dst[i].lcsp02);
+ writel(lli_dst[i+1].lcsp13, &lcla_dst[i].lcsp13);
+
+ slos = lli_src[i+1].lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
+ dlos = lli_dst[i+1].lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
+ }
+}
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
new file mode 100644
index 00000000000..2029280cb33
--- /dev/null
+++ b/drivers/dma/ste_dma40_ll.h
@@ -0,0 +1,354 @@
+/*
+ * driver/dma/ste_dma40_ll.h
+ *
+ * Copyright (C) ST-Ericsson 2007-2010
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Per Friden <per.friden@stericsson.com>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ */
+#ifndef STE_DMA40_LL_H
+#define STE_DMA40_LL_H
+
+#define D40_DREG_PCBASE 0x400
+#define D40_DREG_PCDELTA (8 * 4)
+#define D40_LLI_ALIGN 16 /* LLI alignment must be 16 bytes. */
+
+#define D40_TYPE_TO_GROUP(type) (type / 16)
+#define D40_TYPE_TO_EVENT(type) (type % 16)
+
+/* Most bits of the CFG register are the same in log as in phy mode */
+#define D40_SREG_CFG_MST_POS 15
+#define D40_SREG_CFG_TIM_POS 14
+#define D40_SREG_CFG_EIM_POS 13
+#define D40_SREG_CFG_LOG_INCR_POS 12
+#define D40_SREG_CFG_PHY_PEN_POS 12
+#define D40_SREG_CFG_PSIZE_POS 10
+#define D40_SREG_CFG_ESIZE_POS 8
+#define D40_SREG_CFG_PRI_POS 7
+#define D40_SREG_CFG_LBE_POS 6
+#define D40_SREG_CFG_LOG_GIM_POS 5
+#define D40_SREG_CFG_LOG_MFU_POS 4
+#define D40_SREG_CFG_PHY_TM_POS 4
+#define D40_SREG_CFG_PHY_EVTL_POS 0
+
+
+/* Standard channel parameters - basic mode (element register) */
+#define D40_SREG_ELEM_PHY_ECNT_POS 16
+#define D40_SREG_ELEM_PHY_EIDX_POS 0
+
+#define D40_SREG_ELEM_PHY_ECNT_MASK (0xFFFF << D40_SREG_ELEM_PHY_ECNT_POS)
+
+/* Standard channel parameters - basic mode (Link register) */
+#define D40_SREG_LNK_PHY_TCP_POS 0
+#define D40_SREG_LNK_PHY_LMP_POS 1
+#define D40_SREG_LNK_PHY_PRE_POS 2
+/*
+ * Source destination link address. Contains the
+ * 29-bit byte word aligned address of the reload area.
+ */
+#define D40_SREG_LNK_PHYS_LNK_MASK 0xFFFFFFF8UL
+
+/* Standard basic channel logical mode */
+
+/* Element register */
+#define D40_SREG_ELEM_LOG_ECNT_POS 16
+#define D40_SREG_ELEM_LOG_LIDX_POS 8
+#define D40_SREG_ELEM_LOG_LOS_POS 1
+#define D40_SREG_ELEM_LOG_TCP_POS 0
+
+#define D40_SREG_ELEM_LOG_LIDX_MASK (0xFF << D40_SREG_ELEM_LOG_LIDX_POS)
+
+/* Link register */
+#define D40_DEACTIVATE_EVENTLINE 0x0
+#define D40_ACTIVATE_EVENTLINE 0x1
+#define D40_EVENTLINE_POS(i) (2 * i)
+#define D40_EVENTLINE_MASK(i) (0x3 << D40_EVENTLINE_POS(i))
+
+/* Standard basic channel logical params in memory */
+
+/* LCSP0 */
+#define D40_MEM_LCSP0_ECNT_POS 16
+#define D40_MEM_LCSP0_SPTR_POS 0
+
+#define D40_MEM_LCSP0_ECNT_MASK (0xFFFF << D40_MEM_LCSP0_ECNT_POS)
+#define D40_MEM_LCSP0_SPTR_MASK (0xFFFF << D40_MEM_LCSP0_SPTR_POS)
+
+/* LCSP1 */
+#define D40_MEM_LCSP1_SPTR_POS 16
+#define D40_MEM_LCSP1_SCFG_MST_POS 15
+#define D40_MEM_LCSP1_SCFG_TIM_POS 14
+#define D40_MEM_LCSP1_SCFG_EIM_POS 13
+#define D40_MEM_LCSP1_SCFG_INCR_POS 12
+#define D40_MEM_LCSP1_SCFG_PSIZE_POS 10
+#define D40_MEM_LCSP1_SCFG_ESIZE_POS 8
+#define D40_MEM_LCSP1_SLOS_POS 1
+#define D40_MEM_LCSP1_STCP_POS 0
+
+#define D40_MEM_LCSP1_SPTR_MASK (0xFFFF << D40_MEM_LCSP1_SPTR_POS)
+#define D40_MEM_LCSP1_SCFG_TIM_MASK (0x1 << D40_MEM_LCSP1_SCFG_TIM_POS)
+#define D40_MEM_LCSP1_SCFG_INCR_MASK (0x1 << D40_MEM_LCSP1_SCFG_INCR_POS)
+#define D40_MEM_LCSP1_SCFG_PSIZE_MASK (0x3 << D40_MEM_LCSP1_SCFG_PSIZE_POS)
+#define D40_MEM_LCSP1_SLOS_MASK (0x7F << D40_MEM_LCSP1_SLOS_POS)
+#define D40_MEM_LCSP1_STCP_MASK (0x1 << D40_MEM_LCSP1_STCP_POS)
+
+/* LCSP2 */
+#define D40_MEM_LCSP2_ECNT_POS 16
+
+#define D40_MEM_LCSP2_ECNT_MASK (0xFFFF << D40_MEM_LCSP2_ECNT_POS)
+
+/* LCSP3 */
+#define D40_MEM_LCSP3_DCFG_MST_POS 15
+#define D40_MEM_LCSP3_DCFG_TIM_POS 14
+#define D40_MEM_LCSP3_DCFG_EIM_POS 13
+#define D40_MEM_LCSP3_DCFG_INCR_POS 12
+#define D40_MEM_LCSP3_DCFG_PSIZE_POS 10
+#define D40_MEM_LCSP3_DCFG_ESIZE_POS 8
+#define D40_MEM_LCSP3_DLOS_POS 1
+#define D40_MEM_LCSP3_DTCP_POS 0
+
+#define D40_MEM_LCSP3_DLOS_MASK (0x7F << D40_MEM_LCSP3_DLOS_POS)
+#define D40_MEM_LCSP3_DTCP_MASK (0x1 << D40_MEM_LCSP3_DTCP_POS)
+
+
+/* Standard channel parameter register offsets */
+#define D40_CHAN_REG_SSCFG 0x00
+#define D40_CHAN_REG_SSELT 0x04
+#define D40_CHAN_REG_SSPTR 0x08
+#define D40_CHAN_REG_SSLNK 0x0C
+#define D40_CHAN_REG_SDCFG 0x10
+#define D40_CHAN_REG_SDELT 0x14
+#define D40_CHAN_REG_SDPTR 0x18
+#define D40_CHAN_REG_SDLNK 0x1C
+
+/* DMA Register Offsets */
+#define D40_DREG_GCC 0x000
+#define D40_DREG_PRTYP 0x004
+#define D40_DREG_PRSME 0x008
+#define D40_DREG_PRSMO 0x00C
+#define D40_DREG_PRMSE 0x010
+#define D40_DREG_PRMSO 0x014
+#define D40_DREG_PRMOE 0x018
+#define D40_DREG_PRMOO 0x01C
+#define D40_DREG_LCPA 0x020
+#define D40_DREG_LCLA 0x024
+#define D40_DREG_ACTIVE 0x050
+#define D40_DREG_ACTIVO 0x054
+#define D40_DREG_FSEB1 0x058
+#define D40_DREG_FSEB2 0x05C
+#define D40_DREG_PCMIS 0x060
+#define D40_DREG_PCICR 0x064
+#define D40_DREG_PCTIS 0x068
+#define D40_DREG_PCEIS 0x06C
+#define D40_DREG_LCMIS0 0x080
+#define D40_DREG_LCMIS1 0x084
+#define D40_DREG_LCMIS2 0x088
+#define D40_DREG_LCMIS3 0x08C
+#define D40_DREG_LCICR0 0x090
+#define D40_DREG_LCICR1 0x094
+#define D40_DREG_LCICR2 0x098
+#define D40_DREG_LCICR3 0x09C
+#define D40_DREG_LCTIS0 0x0A0
+#define D40_DREG_LCTIS1 0x0A4
+#define D40_DREG_LCTIS2 0x0A8
+#define D40_DREG_LCTIS3 0x0AC
+#define D40_DREG_LCEIS0 0x0B0
+#define D40_DREG_LCEIS1 0x0B4
+#define D40_DREG_LCEIS2 0x0B8
+#define D40_DREG_LCEIS3 0x0BC
+#define D40_DREG_STFU 0xFC8
+#define D40_DREG_ICFG 0xFCC
+#define D40_DREG_PERIPHID0 0xFE0
+#define D40_DREG_PERIPHID1 0xFE4
+#define D40_DREG_PERIPHID2 0xFE8
+#define D40_DREG_PERIPHID3 0xFEC
+#define D40_DREG_CELLID0 0xFF0
+#define D40_DREG_CELLID1 0xFF4
+#define D40_DREG_CELLID2 0xFF8
+#define D40_DREG_CELLID3 0xFFC
+
+/* LLI related structures */
+
+/**
+ * struct d40_phy_lli - The basic configration register for each physical
+ * channel.
+ *
+ * @reg_cfg: The configuration register.
+ * @reg_elt: The element register.
+ * @reg_ptr: The pointer register.
+ * @reg_lnk: The link register.
+ *
+ * These registers are set up for both physical and logical transfers
+ * Note that the bit in each register means differently in logical and
+ * physical(standard) mode.
+ *
+ * This struct must be 16 bytes aligned, and only contain physical registers
+ * since it will be directly accessed by the DMA.
+ */
+struct d40_phy_lli {
+ u32 reg_cfg;
+ u32 reg_elt;
+ u32 reg_ptr;
+ u32 reg_lnk;
+};
+
+/**
+ * struct d40_phy_lli_bidir - struct for a transfer.
+ *
+ * @src: Register settings for src channel.
+ * @dst: Register settings for dst channel.
+ * @dst_addr: Physical destination address.
+ * @src_addr: Physical source address.
+ *
+ * All DMA transfers have a source and a destination.
+ */
+
+struct d40_phy_lli_bidir {
+ struct d40_phy_lli *src;
+ struct d40_phy_lli *dst;
+ dma_addr_t dst_addr;
+ dma_addr_t src_addr;
+};
+
+
+/**
+ * struct d40_log_lli - logical lli configuration
+ *
+ * @lcsp02: Either maps to register lcsp0 if src or lcsp2 if dst.
+ * @lcsp13: Either maps to register lcsp1 if src or lcsp3 if dst.
+ *
+ * This struct must be 8 bytes aligned since it will be accessed directy by
+ * the DMA. Never add any none hw mapped registers to this struct.
+ */
+
+struct d40_log_lli {
+ u32 lcsp02;
+ u32 lcsp13;
+};
+
+/**
+ * struct d40_log_lli_bidir - For both src and dst
+ *
+ * @src: pointer to src lli configuration.
+ * @dst: pointer to dst lli configuration.
+ *
+ * You always have a src and a dst when doing DMA transfers.
+ */
+
+struct d40_log_lli_bidir {
+ struct d40_log_lli *src;
+ struct d40_log_lli *dst;
+};
+
+/**
+ * struct d40_log_lli_full - LCPA layout
+ *
+ * @lcsp0: Logical Channel Standard Param 0 - Src.
+ * @lcsp1: Logical Channel Standard Param 1 - Src.
+ * @lcsp2: Logical Channel Standard Param 2 - Dst.
+ * @lcsp3: Logical Channel Standard Param 3 - Dst.
+ *
+ * This struct maps to LCPA physical memory layout. Must map to
+ * the hw.
+ */
+struct d40_log_lli_full {
+ u32 lcsp0;
+ u32 lcsp1;
+ u32 lcsp2;
+ u32 lcsp3;
+};
+
+/**
+ * struct d40_def_lcsp - Default LCSP1 and LCSP3 settings
+ *
+ * @lcsp3: The default configuration for dst.
+ * @lcsp1: The default configuration for src.
+ */
+struct d40_def_lcsp {
+ u32 lcsp3;
+ u32 lcsp1;
+};
+
+/**
+ * struct d40_lcla_elem - Info for one LCA element.
+ *
+ * @src_id: logical channel src id
+ * @dst_id: logical channel dst id
+ * @src: LCPA formated src parameters
+ * @dst: LCPA formated dst parameters
+ *
+ */
+struct d40_lcla_elem {
+ int src_id;
+ int dst_id;
+ struct d40_log_lli *src;
+ struct d40_log_lli *dst;
+};
+
+/* Physical channels */
+
+void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
+ u32 *src_cfg, u32 *dst_cfg, bool is_log);
+
+void d40_log_cfg(struct stedma40_chan_cfg *cfg,
+ u32 *lcsp1, u32 *lcsp2);
+
+int d40_phy_sg_to_lli(struct scatterlist *sg,
+ int sg_len,
+ dma_addr_t target,
+ struct d40_phy_lli *lli,
+ dma_addr_t lli_phys,
+ u32 reg_cfg,
+ u32 data_width,
+ int psize,
+ bool term_int);
+
+int d40_phy_fill_lli(struct d40_phy_lli *lli,
+ dma_addr_t data,
+ u32 data_size,
+ int psize,
+ dma_addr_t next_lli,
+ u32 reg_cfg,
+ bool term_int,
+ u32 data_width,
+ bool is_device);
+
+void d40_phy_lli_write(void __iomem *virtbase,
+ u32 phy_chan_num,
+ struct d40_phy_lli *lli_dst,
+ struct d40_phy_lli *lli_src);
+
+/* Logical channels */
+
+void d40_log_fill_lli(struct d40_log_lli *lli,
+ dma_addr_t data, u32 data_size,
+ u32 lli_next_off, u32 reg_cfg,
+ u32 data_width,
+ bool term_int, bool addr_inc);
+
+int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
+ struct scatterlist *sg,
+ int sg_len,
+ struct d40_log_lli_bidir *lli,
+ struct d40_def_lcsp *lcsp,
+ u32 src_data_width,
+ u32 dst_data_width,
+ enum dma_data_direction direction,
+ bool term_int, dma_addr_t dev_addr, int max_len,
+ int llis_per_log);
+
+void d40_log_lli_write(struct d40_log_lli_full *lcpa,
+ struct d40_log_lli *lcla_src,
+ struct d40_log_lli *lcla_dst,
+ struct d40_log_lli *lli_dst,
+ struct d40_log_lli *lli_src,
+ int llis_per_log);
+
+int d40_log_sg_to_lli(int lcla_id,
+ struct scatterlist *sg,
+ int sg_len,
+ struct d40_log_lli *lli_sg,
+ u32 lcsp13, /* src or dst*/
+ u32 data_width,
+ bool term_int, int max_len, int llis_per_log);
+
+#endif /* STE_DMA40_LLI_H */
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
new file mode 100644
index 00000000000..a1bf77c1993
--- /dev/null
+++ b/drivers/dma/timb_dma.c
@@ -0,0 +1,860 @@
+/*
+ * timb_dma.c timberdale FPGA DMA driver
+ * Copyright (c) 2010 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Supports:
+ * Timberdale FPGA DMA engine
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <linux/timb_dma.h>
+
+#define DRIVER_NAME "timb-dma"
+
+/* Global DMA registers */
+#define TIMBDMA_ACR 0x34
+#define TIMBDMA_32BIT_ADDR 0x01
+
+#define TIMBDMA_ISR 0x080000
+#define TIMBDMA_IPR 0x080004
+#define TIMBDMA_IER 0x080008
+
+/* Channel specific registers */
+/* RX instances base addresses are 0x00, 0x40, 0x80 ...
+ * TX instances base addresses are 0x18, 0x58, 0x98 ...
+ */
+#define TIMBDMA_INSTANCE_OFFSET 0x40
+#define TIMBDMA_INSTANCE_TX_OFFSET 0x18
+
+/* RX registers, relative the instance base */
+#define TIMBDMA_OFFS_RX_DHAR 0x00
+#define TIMBDMA_OFFS_RX_DLAR 0x04
+#define TIMBDMA_OFFS_RX_LR 0x0C
+#define TIMBDMA_OFFS_RX_BLR 0x10
+#define TIMBDMA_OFFS_RX_ER 0x14
+#define TIMBDMA_RX_EN 0x01
+/* bytes per Row, video specific register
+ * which is placed after the TX registers...
+ */
+#define TIMBDMA_OFFS_RX_BPRR 0x30
+
+/* TX registers, relative the instance base */
+#define TIMBDMA_OFFS_TX_DHAR 0x00
+#define TIMBDMA_OFFS_TX_DLAR 0x04
+#define TIMBDMA_OFFS_TX_BLR 0x0C
+#define TIMBDMA_OFFS_TX_LR 0x14
+
+
+#define TIMB_DMA_DESC_SIZE 8
+
+struct timb_dma_desc {
+ struct list_head desc_node;
+ struct dma_async_tx_descriptor txd;
+ u8 *desc_list;
+ unsigned int desc_list_len;
+ bool interrupt;
+};
+
+struct timb_dma_chan {
+ struct dma_chan chan;
+ void __iomem *membase;
+ spinlock_t lock; /* Used to protect data structures,
+ especially the lists and descriptors,
+ from races between the tasklet and calls
+ from above */
+ dma_cookie_t last_completed_cookie;
+ bool ongoing;
+ struct list_head active_list;
+ struct list_head queue;
+ struct list_head free_list;
+ unsigned int bytes_per_line;
+ enum dma_data_direction direction;
+ unsigned int descs; /* Descriptors to allocate */
+ unsigned int desc_elems; /* number of elems per descriptor */
+};
+
+struct timb_dma {
+ struct dma_device dma;
+ void __iomem *membase;
+ struct tasklet_struct tasklet;
+ struct timb_dma_chan channels[0];
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+static struct device *chan2dmadev(struct dma_chan *chan)
+{
+ return chan2dev(chan)->parent->parent;
+}
+
+static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan)
+{
+ int id = td_chan->chan.chan_id;
+ return (struct timb_dma *)((u8 *)td_chan -
+ id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
+}
+
+/* Must be called with the spinlock held */
+static void __td_enable_chan_irq(struct timb_dma_chan *td_chan)
+{
+ int id = td_chan->chan.chan_id;
+ struct timb_dma *td = tdchantotd(td_chan);
+ u32 ier;
+
+ /* enable interrupt for this channel */
+ ier = ioread32(td->membase + TIMBDMA_IER);
+ ier |= 1 << id;
+ dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id,
+ ier);
+ iowrite32(ier, td->membase + TIMBDMA_IER);
+}
+
+/* Should be called with the spinlock held */
+static bool __td_dma_done_ack(struct timb_dma_chan *td_chan)
+{
+ int id = td_chan->chan.chan_id;
+ struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan -
+ id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
+ u32 isr;
+ bool done = false;
+
+ dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td);
+
+ isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id);
+ if (isr) {
+ iowrite32(isr, td->membase + TIMBDMA_ISR);
+ done = true;
+ }
+
+ return done;
+}
+
+static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc,
+ bool single)
+{
+ dma_addr_t addr;
+ int len;
+
+ addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) |
+ dma_desc[4];
+
+ len = (dma_desc[3] << 8) | dma_desc[2];
+
+ if (single)
+ dma_unmap_single(chan2dev(&td_chan->chan), addr, len,
+ td_chan->direction);
+ else
+ dma_unmap_page(chan2dev(&td_chan->chan), addr, len,
+ td_chan->direction);
+}
+
+static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single)
+{
+ struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan,
+ struct timb_dma_chan, chan);
+ u8 *descs;
+
+ for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) {
+ __td_unmap_desc(td_chan, descs, single);
+ if (descs[0] & 0x02)
+ break;
+ }
+}
+
+static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc,
+ struct scatterlist *sg, bool last)
+{
+ if (sg_dma_len(sg) > USHRT_MAX) {
+ dev_err(chan2dev(&td_chan->chan), "Too big sg element\n");
+ return -EINVAL;
+ }
+
+ /* length must be word aligned */
+ if (sg_dma_len(sg) % sizeof(u32)) {
+ dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n",
+ sg_dma_len(sg));
+ return -EINVAL;
+ }
+
+ dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: %p\n",
+ dma_desc, (void *)sg_dma_address(sg));
+
+ dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff;
+ dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff;
+ dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff;
+ dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff;
+
+ dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff;
+ dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff;
+
+ dma_desc[1] = 0x00;
+ dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */
+
+ return 0;
+}
+
+/* Must be called with the spinlock held */
+static void __td_start_dma(struct timb_dma_chan *td_chan)
+{
+ struct timb_dma_desc *td_desc;
+
+ if (td_chan->ongoing) {
+ dev_err(chan2dev(&td_chan->chan),
+ "Transfer already ongoing\n");
+ return;
+ }
+
+ td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
+ desc_node);
+
+ dev_dbg(chan2dev(&td_chan->chan),
+ "td_chan: %p, chan: %d, membase: %p\n",
+ td_chan, td_chan->chan.chan_id, td_chan->membase);
+
+ if (td_chan->direction == DMA_FROM_DEVICE) {
+
+ /* descriptor address */
+ iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR);
+ iowrite32(td_desc->txd.phys, td_chan->membase +
+ TIMBDMA_OFFS_RX_DLAR);
+ /* Bytes per line */
+ iowrite32(td_chan->bytes_per_line, td_chan->membase +
+ TIMBDMA_OFFS_RX_BPRR);
+ /* enable RX */
+ iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER);
+ } else {
+ /* address high */
+ iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR);
+ iowrite32(td_desc->txd.phys, td_chan->membase +
+ TIMBDMA_OFFS_TX_DLAR);
+ }
+
+ td_chan->ongoing = true;
+
+ if (td_desc->interrupt)
+ __td_enable_chan_irq(td_chan);
+}
+
+static void __td_finish(struct timb_dma_chan *td_chan)
+{
+ dma_async_tx_callback callback;
+ void *param;
+ struct dma_async_tx_descriptor *txd;
+ struct timb_dma_desc *td_desc;
+
+ /* can happen if the descriptor is canceled */
+ if (list_empty(&td_chan->active_list))
+ return;
+
+ td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
+ desc_node);
+ txd = &td_desc->txd;
+
+ dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n",
+ txd->cookie);
+
+ /* make sure to stop the transfer */
+ if (td_chan->direction == DMA_FROM_DEVICE)
+ iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER);
+/* Currently no support for stopping DMA transfers
+ else
+ iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR);
+*/
+ td_chan->last_completed_cookie = txd->cookie;
+ td_chan->ongoing = false;
+
+ callback = txd->callback;
+ param = txd->callback_param;
+
+ list_move(&td_desc->desc_node, &td_chan->free_list);
+
+ if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
+ __td_unmap_descs(td_desc,
+ txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE);
+
+ /*
+ * The API requires that no submissions are done from a
+ * callback, so we don't need to drop the lock here
+ */
+ if (callback)
+ callback(param);
+}
+
+static u32 __td_ier_mask(struct timb_dma *td)
+{
+ int i;
+ u32 ret = 0;
+
+ for (i = 0; i < td->dma.chancnt; i++) {
+ struct timb_dma_chan *td_chan = td->channels + i;
+ if (td_chan->ongoing) {
+ struct timb_dma_desc *td_desc =
+ list_entry(td_chan->active_list.next,
+ struct timb_dma_desc, desc_node);
+ if (td_desc->interrupt)
+ ret |= 1 << i;
+ }
+ }
+
+ return ret;
+}
+
+static void __td_start_next(struct timb_dma_chan *td_chan)
+{
+ struct timb_dma_desc *td_desc;
+
+ BUG_ON(list_empty(&td_chan->queue));
+ BUG_ON(td_chan->ongoing);
+
+ td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc,
+ desc_node);
+
+ dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n",
+ __func__, td_desc->txd.cookie);
+
+ list_move(&td_desc->desc_node, &td_chan->active_list);
+ __td_start_dma(td_chan);
+}
+
+static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+ struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc,
+ txd);
+ struct timb_dma_chan *td_chan = container_of(txd->chan,
+ struct timb_dma_chan, chan);
+ dma_cookie_t cookie;
+
+ spin_lock_bh(&td_chan->lock);
+
+ cookie = txd->chan->cookie;
+ if (++cookie < 0)
+ cookie = 1;
+ txd->chan->cookie = cookie;
+ txd->cookie = cookie;
+
+ if (list_empty(&td_chan->active_list)) {
+ dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__,
+ txd->cookie);
+ list_add_tail(&td_desc->desc_node, &td_chan->active_list);
+ __td_start_dma(td_chan);
+ } else {
+ dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n",
+ txd->cookie);
+
+ list_add_tail(&td_desc->desc_node, &td_chan->queue);
+ }
+
+ spin_unlock_bh(&td_chan->lock);
+
+ return cookie;
+}
+
+static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan)
+{
+ struct dma_chan *chan = &td_chan->chan;
+ struct timb_dma_desc *td_desc;
+ int err;
+
+ td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL);
+ if (!td_desc) {
+ dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
+ goto err;
+ }
+
+ td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE;
+
+ td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL);
+ if (!td_desc->desc_list) {
+ dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
+ goto err;
+ }
+
+ dma_async_tx_descriptor_init(&td_desc->txd, chan);
+ td_desc->txd.tx_submit = td_tx_submit;
+ td_desc->txd.flags = DMA_CTRL_ACK;
+
+ td_desc->txd.phys = dma_map_single(chan2dmadev(chan),
+ td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE);
+
+ err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys);
+ if (err) {
+ dev_err(chan2dev(chan), "DMA mapping error: %d\n", err);
+ goto err;
+ }
+
+ return td_desc;
+err:
+ kfree(td_desc->desc_list);
+ kfree(td_desc);
+
+ return NULL;
+
+}
+
+static void td_free_desc(struct timb_dma_desc *td_desc)
+{
+ dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc);
+ dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys,
+ td_desc->desc_list_len, DMA_TO_DEVICE);
+
+ kfree(td_desc->desc_list);
+ kfree(td_desc);
+}
+
+static void td_desc_put(struct timb_dma_chan *td_chan,
+ struct timb_dma_desc *td_desc)
+{
+ dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc);
+
+ spin_lock_bh(&td_chan->lock);
+ list_add(&td_desc->desc_node, &td_chan->free_list);
+ spin_unlock_bh(&td_chan->lock);
+}
+
+static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan)
+{
+ struct timb_dma_desc *td_desc, *_td_desc;
+ struct timb_dma_desc *ret = NULL;
+
+ spin_lock_bh(&td_chan->lock);
+ list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list,
+ desc_node) {
+ if (async_tx_test_ack(&td_desc->txd)) {
+ list_del(&td_desc->desc_node);
+ ret = td_desc;
+ break;
+ }
+ dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n",
+ td_desc);
+ }
+ spin_unlock_bh(&td_chan->lock);
+
+ return ret;
+}
+
+static int td_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct timb_dma_chan *td_chan =
+ container_of(chan, struct timb_dma_chan, chan);
+ int i;
+
+ dev_dbg(chan2dev(chan), "%s: entry\n", __func__);
+
+ BUG_ON(!list_empty(&td_chan->free_list));
+ for (i = 0; i < td_chan->descs; i++) {
+ struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan);
+ if (!td_desc) {
+ if (i)
+ break;
+ else {
+ dev_err(chan2dev(chan),
+ "Couldnt allocate any descriptors\n");
+ return -ENOMEM;
+ }
+ }
+
+ td_desc_put(td_chan, td_desc);
+ }
+
+ spin_lock_bh(&td_chan->lock);
+ td_chan->last_completed_cookie = 1;
+ chan->cookie = 1;
+ spin_unlock_bh(&td_chan->lock);
+
+ return 0;
+}
+
+static void td_free_chan_resources(struct dma_chan *chan)
+{
+ struct timb_dma_chan *td_chan =
+ container_of(chan, struct timb_dma_chan, chan);
+ struct timb_dma_desc *td_desc, *_td_desc;
+ LIST_HEAD(list);
+
+ dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
+
+ /* check that all descriptors are free */
+ BUG_ON(!list_empty(&td_chan->active_list));
+ BUG_ON(!list_empty(&td_chan->queue));
+
+ spin_lock_bh(&td_chan->lock);
+ list_splice_init(&td_chan->free_list, &list);
+ spin_unlock_bh(&td_chan->lock);
+
+ list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) {
+ dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__,
+ td_desc);
+ td_free_desc(td_desc);
+ }
+}
+
+static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct timb_dma_chan *td_chan =
+ container_of(chan, struct timb_dma_chan, chan);
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+ int ret;
+
+ dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
+
+ last_complete = td_chan->last_completed_cookie;
+ last_used = chan->cookie;
+
+ ret = dma_async_is_complete(cookie, last_complete, last_used);
+
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
+
+ dev_dbg(chan2dev(chan),
+ "%s: exit, ret: %d, last_complete: %d, last_used: %d\n",
+ __func__, ret, last_complete, last_used);
+
+ return ret;
+}
+
+static void td_issue_pending(struct dma_chan *chan)
+{
+ struct timb_dma_chan *td_chan =
+ container_of(chan, struct timb_dma_chan, chan);
+
+ dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
+ spin_lock_bh(&td_chan->lock);
+
+ if (!list_empty(&td_chan->active_list))
+ /* transfer ongoing */
+ if (__td_dma_done_ack(td_chan))
+ __td_finish(td_chan);
+
+ if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue))
+ __td_start_next(td_chan);
+
+ spin_unlock_bh(&td_chan->lock);
+}
+
+static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
+ struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_data_direction direction, unsigned long flags)
+{
+ struct timb_dma_chan *td_chan =
+ container_of(chan, struct timb_dma_chan, chan);
+ struct timb_dma_desc *td_desc;
+ struct scatterlist *sg;
+ unsigned int i;
+ unsigned int desc_usage = 0;
+
+ if (!sgl || !sg_len) {
+ dev_err(chan2dev(chan), "%s: No SG list\n", __func__);
+ return NULL;
+ }
+
+ /* even channels are for RX, odd for TX */
+ if (td_chan->direction != direction) {
+ dev_err(chan2dev(chan),
+ "Requesting channel in wrong direction\n");
+ return NULL;
+ }
+
+ td_desc = td_desc_get(td_chan);
+ if (!td_desc) {
+ dev_err(chan2dev(chan), "Not enough descriptors available\n");
+ return NULL;
+ }
+
+ td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ int err;
+ if (desc_usage > td_desc->desc_list_len) {
+ dev_err(chan2dev(chan), "No descriptor space\n");
+ return NULL;
+ }
+
+ err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg,
+ i == (sg_len - 1));
+ if (err) {
+ dev_err(chan2dev(chan), "Failed to update desc: %d\n",
+ err);
+ td_desc_put(td_chan, td_desc);
+ return NULL;
+ }
+ desc_usage += TIMB_DMA_DESC_SIZE;
+ }
+
+ dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
+ td_desc->desc_list_len, DMA_TO_DEVICE);
+
+ return &td_desc->txd;
+}
+
+static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct timb_dma_chan *td_chan =
+ container_of(chan, struct timb_dma_chan, chan);
+ struct timb_dma_desc *td_desc, *_td_desc;
+
+ dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
+
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
+ /* first the easy part, put the queue into the free list */
+ spin_lock_bh(&td_chan->lock);
+ list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
+ desc_node)
+ list_move(&td_desc->desc_node, &td_chan->free_list);
+
+ /* now tear down the runnning */
+ __td_finish(td_chan);
+ spin_unlock_bh(&td_chan->lock);
+
+ return 0;
+}
+
+static void td_tasklet(unsigned long data)
+{
+ struct timb_dma *td = (struct timb_dma *)data;
+ u32 isr;
+ u32 ipr;
+ u32 ier;
+ int i;
+
+ isr = ioread32(td->membase + TIMBDMA_ISR);
+ ipr = isr & __td_ier_mask(td);
+
+ /* ack the interrupts */
+ iowrite32(ipr, td->membase + TIMBDMA_ISR);
+
+ for (i = 0; i < td->dma.chancnt; i++)
+ if (ipr & (1 << i)) {
+ struct timb_dma_chan *td_chan = td->channels + i;
+ spin_lock(&td_chan->lock);
+ __td_finish(td_chan);
+ if (!list_empty(&td_chan->queue))
+ __td_start_next(td_chan);
+ spin_unlock(&td_chan->lock);
+ }
+
+ ier = __td_ier_mask(td);
+ iowrite32(ier, td->membase + TIMBDMA_IER);
+}
+
+
+static irqreturn_t td_irq(int irq, void *devid)
+{
+ struct timb_dma *td = devid;
+ u32 ipr = ioread32(td->membase + TIMBDMA_IPR);
+
+ if (ipr) {
+ /* disable interrupts, will be re-enabled in tasklet */
+ iowrite32(0, td->membase + TIMBDMA_IER);
+
+ tasklet_schedule(&td->tasklet);
+
+ return IRQ_HANDLED;
+ } else
+ return IRQ_NONE;
+}
+
+
+static int __devinit td_probe(struct platform_device *pdev)
+{
+ struct timb_dma_platform_data *pdata = pdev->dev.platform_data;
+ struct timb_dma *td;
+ struct resource *iomem;
+ int irq;
+ int err;
+ int i;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data\n");
+ return -EINVAL;
+ }
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem)
+ return -EINVAL;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ if (!request_mem_region(iomem->start, resource_size(iomem),
+ DRIVER_NAME))
+ return -EBUSY;
+
+ td = kzalloc(sizeof(struct timb_dma) +
+ sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL);
+ if (!td) {
+ err = -ENOMEM;
+ goto err_release_region;
+ }
+
+ dev_dbg(&pdev->dev, "Allocated TD: %p\n", td);
+
+ td->membase = ioremap(iomem->start, resource_size(iomem));
+ if (!td->membase) {
+ dev_err(&pdev->dev, "Failed to remap I/O memory\n");
+ err = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ /* 32bit addressing */
+ iowrite32(TIMBDMA_32BIT_ADDR, td->membase + TIMBDMA_ACR);
+
+ /* disable and clear any interrupts */
+ iowrite32(0x0, td->membase + TIMBDMA_IER);
+ iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR);
+
+ tasklet_init(&td->tasklet, td_tasklet, (unsigned long)td);
+
+ err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to request IRQ\n");
+ goto err_tasklet_kill;
+ }
+
+ td->dma.device_alloc_chan_resources = td_alloc_chan_resources;
+ td->dma.device_free_chan_resources = td_free_chan_resources;
+ td->dma.device_tx_status = td_tx_status;
+ td->dma.device_issue_pending = td_issue_pending;
+
+ dma_cap_set(DMA_SLAVE, td->dma.cap_mask);
+ dma_cap_set(DMA_PRIVATE, td->dma.cap_mask);
+ td->dma.device_prep_slave_sg = td_prep_slave_sg;
+ td->dma.device_control = td_control;
+
+ td->dma.dev = &pdev->dev;
+
+ INIT_LIST_HEAD(&td->dma.channels);
+
+ for (i = 0; i < pdata->nr_channels; i++, td->dma.chancnt++) {
+ struct timb_dma_chan *td_chan = &td->channels[i];
+ struct timb_dma_platform_data_channel *pchan =
+ pdata->channels + i;
+
+ /* even channels are RX, odd are TX */
+ if (((i % 2) && pchan->rx) || (!(i % 2) && !pchan->rx)) {
+ dev_err(&pdev->dev, "Wrong channel configuration\n");
+ err = -EINVAL;
+ goto err_tasklet_kill;
+ }
+
+ td_chan->chan.device = &td->dma;
+ td_chan->chan.cookie = 1;
+ td_chan->chan.chan_id = i;
+ spin_lock_init(&td_chan->lock);
+ INIT_LIST_HEAD(&td_chan->active_list);
+ INIT_LIST_HEAD(&td_chan->queue);
+ INIT_LIST_HEAD(&td_chan->free_list);
+
+ td_chan->descs = pchan->descriptors;
+ td_chan->desc_elems = pchan->descriptor_elements;
+ td_chan->bytes_per_line = pchan->bytes_per_line;
+ td_chan->direction = pchan->rx ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE;
+
+ td_chan->membase = td->membase +
+ (i / 2) * TIMBDMA_INSTANCE_OFFSET +
+ (pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET);
+
+ dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n",
+ i, td_chan->membase);
+
+ list_add_tail(&td_chan->chan.device_node, &td->dma.channels);
+ }
+
+ err = dma_async_device_register(&td->dma);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register async device\n");
+ goto err_free_irq;
+ }
+
+ platform_set_drvdata(pdev, td);
+
+ dev_dbg(&pdev->dev, "Probe result: %d\n", err);
+ return err;
+
+err_free_irq:
+ free_irq(irq, td);
+err_tasklet_kill:
+ tasklet_kill(&td->tasklet);
+ iounmap(td->membase);
+err_free_mem:
+ kfree(td);
+err_release_region:
+ release_mem_region(iomem->start, resource_size(iomem));
+
+ return err;
+
+}
+
+static int __devexit td_remove(struct platform_device *pdev)
+{
+ struct timb_dma *td = platform_get_drvdata(pdev);
+ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ int irq = platform_get_irq(pdev, 0);
+
+ dma_async_device_unregister(&td->dma);
+ free_irq(irq, td);
+ tasklet_kill(&td->tasklet);
+ iounmap(td->membase);
+ kfree(td);
+ release_mem_region(iomem->start, resource_size(iomem));
+
+ platform_set_drvdata(pdev, NULL);
+
+ dev_dbg(&pdev->dev, "Removed...\n");
+ return 0;
+}
+
+static struct platform_driver td_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = td_probe,
+ .remove = __exit_p(td_remove),
+};
+
+static int __init td_init(void)
+{
+ return platform_driver_register(&td_driver);
+}
+module_init(td_init);
+
+static void __exit td_exit(void)
+{
+ platform_driver_unregister(&td_driver);
+}
+module_exit(td_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Timberdale DMA controller driver");
+MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>");
+MODULE_ALIAS("platform:"DRIVER_NAME);
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 75fcf1ac8bb..cbd83e362b5 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -938,12 +938,17 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return &first->txd;
}
-static void txx9dmac_terminate_all(struct dma_chan *chan)
+static int txx9dmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
{
struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
struct txx9dmac_desc *desc, *_desc;
LIST_HEAD(list);
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -EINVAL;
+
dev_vdbg(chan2dev(chan), "terminate_all\n");
spin_lock_bh(&dc->lock);
@@ -958,12 +963,13 @@ static void txx9dmac_terminate_all(struct dma_chan *chan)
/* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node)
txx9dmac_descriptor_complete(dc, desc);
+
+ return 0;
}
static enum dma_status
-txx9dmac_is_tx_complete(struct dma_chan *chan,
- dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
+txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
dma_cookie_t last_used;
@@ -985,10 +991,7 @@ txx9dmac_is_tx_complete(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
}
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return ret;
}
@@ -1153,8 +1156,8 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev)
dc->dma.dev = &pdev->dev;
dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources;
dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources;
- dc->dma.device_terminate_all = txx9dmac_terminate_all;
- dc->dma.device_is_tx_complete = txx9dmac_is_tx_complete;
+ dc->dma.device_control = txx9dmac_control;
+ dc->dma.device_tx_status = txx9dmac_tx_status;
dc->dma.device_issue_pending = txx9dmac_issue_pending;
if (pdata && pdata->memcpy_chan == ch) {
dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy;
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index f2330f81cb5..cace0a7b707 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -294,7 +294,7 @@ static int __devinit amd76x_init_one(struct pci_dev *pdev,
{
debugf0("%s()\n", __func__);
- /* don't need to call pci_device_enable() */
+ /* don't need to call pci_enable_device() */
return amd76x_probe1(pdev, ent->driver_data);
}
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index adc10a2ac5f..996c1bdb5a3 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -774,7 +774,7 @@ static void i5000_clear_error(struct mem_ctl_info *mci)
static void i5000_check_error(struct mem_ctl_info *mci)
{
struct i5000_error_info info;
- debugf4("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+ debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
i5000_get_error_info(mci, &info);
i5000_process_error_info(mci, &info, 1);
}
@@ -1353,8 +1353,8 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
int num_dimms_per_channel;
int num_csrows;
- debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n",
- __func__,
+ debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
+ __FILE__, __func__,
pdev->bus->number,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
@@ -1389,7 +1389,7 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
return -ENOMEM;
kobject_get(&mci->edac_mci_kobj);
- debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+ debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
mci->dev = &pdev->dev; /* record ptr to the generic device */
@@ -1432,8 +1432,8 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
/* add this new MC control structure to EDAC's list of MCs */
if (edac_mc_add_mc(mci)) {
- debugf0("MC: " __FILE__
- ": %s(): failed edac_mc_add_mc()\n", __func__);
+ debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n",
+ __FILE__, __func__);
/* FIXME: perhaps some code should go here that disables error
* reporting if we just enabled it
*/
@@ -1478,7 +1478,7 @@ static int __devinit i5000_init_one(struct pci_dev *pdev,
{
int rc;
- debugf0("MC: " __FILE__ ": %s()\n", __func__);
+ debugf0("MC: %s: %s()\n", __FILE__, __func__);
/* wake up device */
rc = pci_enable_device(pdev);
@@ -1497,7 +1497,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
- debugf0(__FILE__ ": %s()\n", __func__);
+ debugf0("%s: %s()\n", __FILE__, __func__);
if (i5000_pci)
edac_pci_release_generic_ctl(i5000_pci);
@@ -1544,7 +1544,7 @@ static int __init i5000_init(void)
{
int pci_rc;
- debugf2("MC: " __FILE__ ": %s()\n", __func__);
+ debugf2("MC: %s: %s()\n", __FILE__, __func__);
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
@@ -1560,7 +1560,7 @@ static int __init i5000_init(void)
*/
static void __exit i5000_exit(void)
{
- debugf2("MC: " __FILE__ ": %s()\n", __func__);
+ debugf2("MC: %s: %s()\n", __FILE__, __func__);
pci_unregister_driver(&i5000_driver);
}
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index f99d10655ed..010c1d6526f 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -694,7 +694,7 @@ static void i5400_clear_error(struct mem_ctl_info *mci)
static void i5400_check_error(struct mem_ctl_info *mci)
{
struct i5400_error_info info;
- debugf4("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+ debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
i5400_get_error_info(mci, &info);
i5400_process_error_info(mci, &info);
}
@@ -1227,8 +1227,8 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
if (dev_idx >= ARRAY_SIZE(i5400_devs))
return -EINVAL;
- debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n",
- __func__,
+ debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
+ __FILE__, __func__,
pdev->bus->number,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
@@ -1256,7 +1256,7 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
if (mci == NULL)
return -ENOMEM;
- debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+ debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
mci->dev = &pdev->dev; /* record ptr to the generic device */
@@ -1299,8 +1299,8 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
/* add this new MC control structure to EDAC's list of MCs */
if (edac_mc_add_mc(mci)) {
- debugf0("MC: " __FILE__
- ": %s(): failed edac_mc_add_mc()\n", __func__);
+ debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n",
+ __FILE__, __func__);
/* FIXME: perhaps some code should go here that disables error
* reporting if we just enabled it
*/
@@ -1344,7 +1344,7 @@ static int __devinit i5400_init_one(struct pci_dev *pdev,
{
int rc;
- debugf0("MC: " __FILE__ ": %s()\n", __func__);
+ debugf0("MC: %s: %s()\n", __FILE__, __func__);
/* wake up device */
rc = pci_enable_device(pdev);
@@ -1363,7 +1363,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
- debugf0(__FILE__ ": %s()\n", __func__);
+ debugf0("%s: %s()\n", __FILE__, __func__);
if (i5400_pci)
edac_pci_release_generic_ctl(i5400_pci);
@@ -1409,7 +1409,7 @@ static int __init i5400_init(void)
{
int pci_rc;
- debugf2("MC: " __FILE__ ": %s()\n", __func__);
+ debugf2("MC: %s: %s()\n", __FILE__, __func__);
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
@@ -1425,7 +1425,7 @@ static int __init i5400_init(void)
*/
static void __exit i5400_exit(void)
{
- debugf2("MC: " __FILE__ ": %s()\n", __func__);
+ debugf2("MC: %s: %s()\n", __FILE__, __func__);
pci_unregister_driver(&i5400_driver);
}
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 7f3884fcbd4..a2fa1feed72 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -178,7 +178,7 @@ static void i82443bxgx_edacmc_check(struct mem_ctl_info *mci)
{
struct i82443bxgx_edacmc_error_info info;
- debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+ debugf1("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
i82443bxgx_edacmc_get_error_info(mci, &info);
i82443bxgx_edacmc_process_error_info(mci, &info, 1);
}
@@ -198,13 +198,13 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar);
- debugf1("MC%d: " __FILE__ ": %s() Row=%d DRB = %#0x\n",
- mci->mc_idx, __func__, index, drbar);
+ debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n",
+ mci->mc_idx, __FILE__, __func__, index, drbar);
row_high_limit = ((u32) drbar << 23);
/* find the DRAM Chip Select Base address and mask */
- debugf1("MC%d: " __FILE__ ": %s() Row=%d, "
- "Boundry Address=%#0x, Last = %#0x \n",
- mci->mc_idx, __func__, index, row_high_limit,
+ debugf1("MC%d: %s: %s() Row=%d, "
+ "Boundry Address=%#0x, Last = %#0x\n",
+ mci->mc_idx, __FILE__, __func__, index, row_high_limit,
row_high_limit_last);
/* 440GX goes to 2GB, represented with a DRB of 0. */
@@ -237,7 +237,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
enum mem_type mtype;
enum edac_type edac_mode;
- debugf0("MC: " __FILE__ ": %s()\n", __func__);
+ debugf0("MC: %s: %s()\n", __FILE__, __func__);
/* Something is really hosed if PCI config space reads from
* the MC aren't working.
@@ -250,7 +250,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
if (mci == NULL)
return -ENOMEM;
- debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+ debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
mci->dev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
@@ -336,7 +336,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
__func__);
}
- debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+ debugf3("MC: %s: %s(): success\n", __FILE__, __func__);
return 0;
fail:
@@ -352,9 +352,9 @@ static int __devinit i82443bxgx_edacmc_init_one(struct pci_dev *pdev,
{
int rc;
- debugf0("MC: " __FILE__ ": %s()\n", __func__);
+ debugf0("MC: %s: %s()\n", __FILE__, __func__);
- /* don't need to call pci_device_enable() */
+ /* don't need to call pci_enable_device() */
rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data);
if (mci_pdev == NULL)
@@ -367,7 +367,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
- debugf0(__FILE__ ": %s()\n", __func__);
+ debugf0("%s: %s()\n", __FILE__, __func__);
if (i82443bxgx_pci)
edac_pci_release_generic_ctl(i82443bxgx_pci);
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 4471647b480..6c1886b497f 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -338,15 +338,13 @@ static struct of_device_id mpc85xx_pci_err_of_match[] = {
};
static struct of_platform_driver mpc85xx_pci_err_driver = {
- .owner = THIS_MODULE,
- .name = "mpc85xx_pci_err",
- .match_table = mpc85xx_pci_err_of_match,
.probe = mpc85xx_pci_err_probe,
.remove = __devexit_p(mpc85xx_pci_err_remove),
.driver = {
- .name = "mpc85xx_pci_err",
- .owner = THIS_MODULE,
- },
+ .name = "mpc85xx_pci_err",
+ .owner = THIS_MODULE,
+ .of_match_table = mpc85xx_pci_err_of_match,
+ },
};
#endif /* CONFIG_PCI */
@@ -654,15 +652,13 @@ static struct of_device_id mpc85xx_l2_err_of_match[] = {
};
static struct of_platform_driver mpc85xx_l2_err_driver = {
- .owner = THIS_MODULE,
- .name = "mpc85xx_l2_err",
- .match_table = mpc85xx_l2_err_of_match,
.probe = mpc85xx_l2_err_probe,
.remove = mpc85xx_l2_err_remove,
.driver = {
- .name = "mpc85xx_l2_err",
- .owner = THIS_MODULE,
- },
+ .name = "mpc85xx_l2_err",
+ .owner = THIS_MODULE,
+ .of_match_table = mpc85xx_l2_err_of_match,
+ },
};
/**************************** MC Err device ***************************/
@@ -1131,15 +1127,13 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = {
};
static struct of_platform_driver mpc85xx_mc_err_driver = {
- .owner = THIS_MODULE,
- .name = "mpc85xx_mc_err",
- .match_table = mpc85xx_mc_err_of_match,
.probe = mpc85xx_mc_err_probe,
.remove = mpc85xx_mc_err_remove,
.driver = {
- .name = "mpc85xx_mc_err",
- .owner = THIS_MODULE,
- },
+ .name = "mpc85xx_mc_err",
+ .owner = THIS_MODULE,
+ .of_match_table = mpc85xx_mc_err_of_match,
+ },
};
#ifdef CONFIG_MPC85xx
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 11f2172aa1e..9d6f6783328 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -202,13 +202,13 @@ static struct of_device_id ppc4xx_edac_match[] = {
};
static struct of_platform_driver ppc4xx_edac_driver = {
- .match_table = ppc4xx_edac_match,
.probe = ppc4xx_edac_probe,
.remove = ppc4xx_edac_remove,
- .driver = {
- .owner = THIS_MODULE,
- .name = PPC4XX_EDAC_MODULE_NAME
- }
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = PPC4XX_EDAC_MODULE_NAME
+ .of_match_table = ppc4xx_edac_match,
+ },
};
/*
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index d55f8e9de78..6a822c631ef 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -354,7 +354,7 @@ static int __devinit r82600_init_one(struct pci_dev *pdev,
{
debugf0("%s()\n", __func__);
- /* don't need to call pci_device_enable() */
+ /* don't need to call pci_enable_device() */
return r82600_probe1(pdev, ent->driver_data);
}
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 5045156c531..9dcb30466ec 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -30,7 +30,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
-#include <linux/timer.h>
#include <linux/workqueue.h>
#include <asm/atomic.h>
@@ -63,7 +62,7 @@ static size_t config_rom_length = 1 + 4 + 1 + 1;
#define BIB_CRC(v) ((v) << 0)
#define BIB_CRC_LENGTH(v) ((v) << 16)
#define BIB_INFO_LENGTH(v) ((v) << 24)
-
+#define BIB_BUS_NAME 0x31333934 /* "1394" */
#define BIB_LINK_SPEED(v) ((v) << 0)
#define BIB_GENERATION(v) ((v) << 4)
#define BIB_MAX_ROM(v) ((v) << 8)
@@ -73,7 +72,8 @@ static size_t config_rom_length = 1 + 4 + 1 + 1;
#define BIB_BMC ((1) << 28)
#define BIB_ISC ((1) << 29)
#define BIB_CMC ((1) << 30)
-#define BIB_IMC ((1) << 31)
+#define BIB_IRMC ((1) << 31)
+#define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */
static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
{
@@ -91,18 +91,18 @@ static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
config_rom[0] = cpu_to_be32(
BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0));
- config_rom[1] = cpu_to_be32(0x31333934);
+ config_rom[1] = cpu_to_be32(BIB_BUS_NAME);
config_rom[2] = cpu_to_be32(
BIB_LINK_SPEED(card->link_speed) |
BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
BIB_MAX_ROM(2) |
BIB_MAX_RECEIVE(card->max_receive) |
- BIB_BMC | BIB_ISC | BIB_CMC | BIB_IMC);
+ BIB_BMC | BIB_ISC | BIB_CMC | BIB_IRMC);
config_rom[3] = cpu_to_be32(card->guid >> 32);
config_rom[4] = cpu_to_be32(card->guid);
/* Generate root directory. */
- config_rom[6] = cpu_to_be32(0x0c0083c0); /* node capabilities */
+ config_rom[6] = cpu_to_be32(NODE_CAPABILITIES);
i = 7;
j = 7 + descriptor_count;
@@ -407,13 +407,6 @@ static void fw_card_bm_work(struct work_struct *work)
fw_card_put(card);
}
-static void flush_timer_callback(unsigned long data)
-{
- struct fw_card *card = (struct fw_card *)data;
-
- fw_flush_transactions(card);
-}
-
void fw_card_initialize(struct fw_card *card,
const struct fw_card_driver *driver,
struct device *device)
@@ -432,8 +425,6 @@ void fw_card_initialize(struct fw_card *card,
init_completion(&card->done);
INIT_LIST_HEAD(&card->transaction_list);
spin_lock_init(&card->lock);
- setup_timer(&card->flush_timer,
- flush_timer_callback, (unsigned long)card);
card->local_node = NULL;
@@ -558,7 +549,6 @@ void fw_core_remove_card(struct fw_card *card)
wait_for_completion(&card->done);
WARN_ON(!list_empty(&card->transaction_list));
- del_timer_sync(&card->flush_timer);
}
EXPORT_SYMBOL(fw_core_remove_card);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 14a34d99eea..5bf106b9d79 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -227,7 +227,7 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
list_add_tail(&client->link, &device->client_list);
mutex_unlock(&device->client_list_mutex);
- return 0;
+ return nonseekable_open(inode, file);
}
static void queue_event(struct client *client, struct event *event,
@@ -1496,13 +1496,13 @@ static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
const struct file_operations fw_device_ops = {
.owner = THIS_MODULE,
+ .llseek = no_llseek,
.open = fw_device_op_open,
.read = fw_device_op_read,
.unlocked_ioctl = fw_device_op_ioctl,
- .poll = fw_device_op_poll,
- .release = fw_device_op_release,
.mmap = fw_device_op_mmap,
-
+ .release = fw_device_op_release,
+ .poll = fw_device_op_poll,
#ifdef CONFIG_COMPAT
.compat_ioctl = fw_device_op_compat_ioctl,
#endif
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 673b03f8b4e..fdc33ff06dc 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -81,7 +81,7 @@ static int close_transaction(struct fw_transaction *transaction,
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(t, &card->transaction_list, link) {
if (t == transaction) {
- list_del(&t->link);
+ list_del_init(&t->link);
card->tlabel_mask &= ~(1ULL << t->tlabel);
break;
}
@@ -89,6 +89,7 @@ static int close_transaction(struct fw_transaction *transaction,
spin_unlock_irqrestore(&card->lock, flags);
if (&t->link != &card->transaction_list) {
+ del_timer_sync(&t->split_timeout_timer);
t->callback(card, rcode, NULL, 0, t->callback_data);
return 0;
}
@@ -121,6 +122,31 @@ int fw_cancel_transaction(struct fw_card *card,
}
EXPORT_SYMBOL(fw_cancel_transaction);
+static void split_transaction_timeout_callback(unsigned long data)
+{
+ struct fw_transaction *t = (struct fw_transaction *)data;
+ struct fw_card *card = t->card;
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->lock, flags);
+ if (list_empty(&t->link)) {
+ spin_unlock_irqrestore(&card->lock, flags);
+ return;
+ }
+ list_del(&t->link);
+ card->tlabel_mask &= ~(1ULL << t->tlabel);
+ spin_unlock_irqrestore(&card->lock, flags);
+
+ card->driver->cancel_packet(card, &t->packet);
+
+ /*
+ * At this point cancel_packet will never call the transaction
+ * callback, since we just took the transaction out of the list.
+ * So do it here.
+ */
+ t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
+}
+
static void transmit_complete_callback(struct fw_packet *packet,
struct fw_card *card, int status)
{
@@ -229,6 +255,23 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
packet->payload_mapped = false;
}
+static int allocate_tlabel(struct fw_card *card)
+{
+ int tlabel;
+
+ tlabel = card->current_tlabel;
+ while (card->tlabel_mask & (1ULL << tlabel)) {
+ tlabel = (tlabel + 1) & 0x3f;
+ if (tlabel == card->current_tlabel)
+ return -EBUSY;
+ }
+
+ card->current_tlabel = (tlabel + 1) & 0x3f;
+ card->tlabel_mask |= 1ULL << tlabel;
+
+ return tlabel;
+}
+
/**
* This function provides low-level access to the IEEE1394 transaction
* logic. Most C programs would use either fw_read(), fw_write() or
@@ -277,31 +320,26 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
int tlabel;
/*
- * Bump the flush timer up 100ms first of all so we
- * don't race with a flush timer callback.
- */
-
- mod_timer(&card->flush_timer, jiffies + DIV_ROUND_UP(HZ, 10));
-
- /*
* Allocate tlabel from the bitmap and put the transaction on
* the list while holding the card spinlock.
*/
spin_lock_irqsave(&card->lock, flags);
- tlabel = card->current_tlabel;
- if (card->tlabel_mask & (1ULL << tlabel)) {
+ tlabel = allocate_tlabel(card);
+ if (tlabel < 0) {
spin_unlock_irqrestore(&card->lock, flags);
callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
return;
}
- card->current_tlabel = (card->current_tlabel + 1) & 0x3f;
- card->tlabel_mask |= (1ULL << tlabel);
-
t->node_id = destination_id;
t->tlabel = tlabel;
+ t->card = card;
+ setup_timer(&t->split_timeout_timer,
+ split_transaction_timeout_callback, (unsigned long)t);
+ /* FIXME: start this timer later, relative to t->timestamp */
+ mod_timer(&t->split_timeout_timer, jiffies + DIV_ROUND_UP(HZ, 10));
t->callback = callback;
t->callback_data = callback_data;
@@ -347,11 +385,13 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
struct transaction_callback_data d;
struct fw_transaction t;
+ init_timer_on_stack(&t.split_timeout_timer);
init_completion(&d.done);
d.payload = payload;
fw_send_request(card, &t, tcode, destination_id, generation, speed,
offset, payload, length, transaction_callback, &d);
wait_for_completion(&d.done);
+ destroy_timer_on_stack(&t.split_timeout_timer);
return d.rcode;
}
@@ -394,30 +434,6 @@ void fw_send_phy_config(struct fw_card *card,
mutex_unlock(&phy_config_mutex);
}
-void fw_flush_transactions(struct fw_card *card)
-{
- struct fw_transaction *t, *next;
- struct list_head list;
- unsigned long flags;
-
- INIT_LIST_HEAD(&list);
- spin_lock_irqsave(&card->lock, flags);
- list_splice_init(&card->transaction_list, &list);
- card->tlabel_mask = 0;
- spin_unlock_irqrestore(&card->lock, flags);
-
- list_for_each_entry_safe(t, next, &list, link) {
- card->driver->cancel_packet(card, &t->packet);
-
- /*
- * At this point cancel_packet will never call the
- * transaction callback, since we just took all the
- * transactions out of the list. So do it here.
- */
- t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
- }
-}
-
static struct fw_address_handler *lookup_overlapping_address_handler(
struct list_head *list, unsigned long long offset, size_t length)
{
@@ -827,8 +843,8 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(t, &card->transaction_list, link) {
if (t->node_id == source && t->tlabel == tlabel) {
- list_del(&t->link);
- card->tlabel_mask &= ~(1 << t->tlabel);
+ list_del_init(&t->link);
+ card->tlabel_mask &= ~(1ULL << t->tlabel);
break;
}
}
@@ -869,6 +885,8 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
break;
}
+ del_timer_sync(&t->split_timeout_timer);
+
/*
* The response handler may be executed while the request handler
* is still pending. Cancel the request handler.
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index fb0321300cc..0ecfcd95f4c 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -27,7 +27,12 @@ struct fw_packet;
#define PHY_LINK_ACTIVE 0x80
#define PHY_CONTENDER 0x40
#define PHY_BUS_RESET 0x40
+#define PHY_EXTENDED_REGISTERS 0xe0
#define PHY_BUS_SHORT_RESET 0x40
+#define PHY_INT_STATUS_BITS 0x3c
+#define PHY_ENABLE_ACCEL 0x02
+#define PHY_ENABLE_MULTI 0x01
+#define PHY_PAGE_SELECT 0xe0
#define BANDWIDTH_AVAILABLE_INITIAL 4915
#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
@@ -215,7 +220,6 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
void fw_fill_response(struct fw_packet *response, u32 *request_header,
int rcode, void *payload, size_t length);
-void fw_flush_transactions(struct fw_card *card);
void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 94b16e0340a..9f627e758cf 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -236,13 +236,15 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
#define QUIRK_CYCLE_TIMER 1
#define QUIRK_RESET_PACKET 2
#define QUIRK_BE_HEADERS 4
+#define QUIRK_NO_1394A 8
/* In case of multiple matches in ohci_quirks[], only the first one is used. */
static const struct {
unsigned short vendor, device, flags;
} ohci_quirks[] = {
{PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, QUIRK_CYCLE_TIMER |
- QUIRK_RESET_PACKET},
+ QUIRK_RESET_PACKET |
+ QUIRK_NO_1394A},
{PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET},
{PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
{PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
@@ -257,15 +259,16 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
+ ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
")");
-#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
-
#define OHCI_PARAM_DEBUG_AT_AR 1
#define OHCI_PARAM_DEBUG_SELFIDS 2
#define OHCI_PARAM_DEBUG_IRQS 4
#define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */
+#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
+
static int param_debug;
module_param_named(debug, param_debug, int, 0644);
MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
@@ -438,9 +441,10 @@ static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
#else
-#define log_irqs(evt)
-#define log_selfids(node_id, generation, self_id_count, sid)
-#define log_ar_at_event(dir, speed, header, evt)
+#define param_debug 0
+static inline void log_irqs(u32 evt) {}
+static inline void log_selfids(int node_id, int generation, int self_id_count, u32 *s) {}
+static inline void log_ar_at_event(char dir, int speed, u32 *header, int evt) {}
#endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
@@ -460,27 +464,71 @@ static inline void flush_writes(const struct fw_ohci *ohci)
reg_read(ohci, OHCI1394_Version);
}
-static int ohci_update_phy_reg(struct fw_card *card, int addr,
- int clear_bits, int set_bits)
+static int read_phy_reg(struct fw_ohci *ohci, int addr)
{
- struct fw_ohci *ohci = fw_ohci(card);
- u32 val, old;
+ u32 val;
+ int i;
reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
- flush_writes(ohci);
- msleep(2);
- val = reg_read(ohci, OHCI1394_PhyControl);
- if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
- fw_error("failed to set phy reg bits.\n");
- return -EBUSY;
+ for (i = 0; i < 10; i++) {
+ val = reg_read(ohci, OHCI1394_PhyControl);
+ if (val & OHCI1394_PhyControl_ReadDone)
+ return OHCI1394_PhyControl_ReadData(val);
+
+ msleep(1);
}
+ fw_error("failed to read phy reg\n");
+
+ return -EBUSY;
+}
+
+static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
+{
+ int i;
- old = OHCI1394_PhyControl_ReadData(val);
- old = (old & ~clear_bits) | set_bits;
reg_write(ohci, OHCI1394_PhyControl,
- OHCI1394_PhyControl_Write(addr, old));
+ OHCI1394_PhyControl_Write(addr, val));
+ for (i = 0; i < 100; i++) {
+ val = reg_read(ohci, OHCI1394_PhyControl);
+ if (!(val & OHCI1394_PhyControl_WritePending))
+ return 0;
- return 0;
+ msleep(1);
+ }
+ fw_error("failed to write phy reg\n");
+
+ return -EBUSY;
+}
+
+static int ohci_update_phy_reg(struct fw_card *card, int addr,
+ int clear_bits, int set_bits)
+{
+ struct fw_ohci *ohci = fw_ohci(card);
+ int ret;
+
+ ret = read_phy_reg(ohci, addr);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The interrupt status bits are cleared by writing a one bit.
+ * Avoid clearing them unless explicitly requested in set_bits.
+ */
+ if (addr == 5)
+ clear_bits |= PHY_INT_STATUS_BITS;
+
+ return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits);
+}
+
+static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
+{
+ int ret;
+
+ ret = ohci_update_phy_reg(&ohci->card, 7, PHY_PAGE_SELECT, page << 5);
+ if (ret < 0)
+ return ret;
+
+ return read_phy_reg(ohci, addr);
}
static int ar_context_add_page(struct ar_context *ctx)
@@ -1351,7 +1399,7 @@ static void bus_reset_tasklet(unsigned long data)
* was set up before this reset, the old one is now no longer
* in use and we can free it. Update the config rom pointers
* to point to the current config rom and clear the
- * next_config_rom pointer so a new udpate can take place.
+ * next_config_rom pointer so a new update can take place.
*/
if (ohci->next_config_rom != NULL) {
@@ -1495,13 +1543,64 @@ static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length)
memset(&dest[length], 0, CONFIG_ROM_SIZE - size);
}
+static int configure_1394a_enhancements(struct fw_ohci *ohci)
+{
+ bool enable_1394a;
+ int ret, clear, set, offset;
+
+ /* Check if the driver should configure link and PHY. */
+ if (!(reg_read(ohci, OHCI1394_HCControlSet) &
+ OHCI1394_HCControl_programPhyEnable))
+ return 0;
+
+ /* Paranoia: check whether the PHY supports 1394a, too. */
+ enable_1394a = false;
+ ret = read_phy_reg(ohci, 2);
+ if (ret < 0)
+ return ret;
+ if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) {
+ ret = read_paged_phy_reg(ohci, 1, 8);
+ if (ret < 0)
+ return ret;
+ if (ret >= 1)
+ enable_1394a = true;
+ }
+
+ if (ohci->quirks & QUIRK_NO_1394A)
+ enable_1394a = false;
+
+ /* Configure PHY and link consistently. */
+ if (enable_1394a) {
+ clear = 0;
+ set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
+ } else {
+ clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
+ set = 0;
+ }
+ ret = ohci_update_phy_reg(&ohci->card, 5, clear, set);
+ if (ret < 0)
+ return ret;
+
+ if (enable_1394a)
+ offset = OHCI1394_HCControlSet;
+ else
+ offset = OHCI1394_HCControlClear;
+ reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable);
+
+ /* Clean up: configuration has been taken care of. */
+ reg_write(ohci, OHCI1394_HCControlClear,
+ OHCI1394_HCControl_programPhyEnable);
+
+ return 0;
+}
+
static int ohci_enable(struct fw_card *card,
const __be32 *config_rom, size_t length)
{
struct fw_ohci *ohci = fw_ohci(card);
struct pci_dev *dev = to_pci_dev(card->device);
u32 lps;
- int i;
+ int i, ret;
if (software_reset(ohci)) {
fw_error("Failed to reset ohci card.\n");
@@ -1565,10 +1664,14 @@ static int ohci_enable(struct fw_card *card,
if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
+ ret = configure_1394a_enhancements(ohci);
+ if (ret < 0)
+ return ret;
+
/* Activate link_on bit and contender bit in our self ID packets.*/
- if (ohci_update_phy_reg(card, 4, 0,
- PHY_LINK_ACTIVE | PHY_CONTENDER) < 0)
- return -EIO;
+ ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER);
+ if (ret < 0)
+ return ret;
/*
* When the link is not yet enabled, the atomic config rom
@@ -2304,7 +2407,7 @@ static const struct fw_card_driver ohci_driver = {
};
#ifdef CONFIG_PPC_PMAC
-static void ohci_pmac_on(struct pci_dev *dev)
+static void pmac_ohci_on(struct pci_dev *dev)
{
if (machine_is(powermac)) {
struct device_node *ofn = pci_device_to_OF_node(dev);
@@ -2316,7 +2419,7 @@ static void ohci_pmac_on(struct pci_dev *dev)
}
}
-static void ohci_pmac_off(struct pci_dev *dev)
+static void pmac_ohci_off(struct pci_dev *dev)
{
if (machine_is(powermac)) {
struct device_node *ofn = pci_device_to_OF_node(dev);
@@ -2328,15 +2431,15 @@ static void ohci_pmac_off(struct pci_dev *dev)
}
}
#else
-#define ohci_pmac_on(dev)
-#define ohci_pmac_off(dev)
+static inline void pmac_ohci_on(struct pci_dev *dev) {}
+static inline void pmac_ohci_off(struct pci_dev *dev) {}
#endif /* CONFIG_PPC_PMAC */
static int __devinit pci_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
struct fw_ohci *ohci;
- u32 bus_options, max_receive, link_speed, version;
+ u32 bus_options, max_receive, link_speed, version, link_enh;
u64 guid;
int i, err, n_ir, n_it;
size_t size;
@@ -2349,7 +2452,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
- ohci_pmac_on(dev);
+ pmac_ohci_on(dev);
err = pci_enable_device(dev);
if (err) {
@@ -2389,6 +2492,23 @@ static int __devinit pci_probe(struct pci_dev *dev,
if (param_quirks)
ohci->quirks = param_quirks;
+ /* TI OHCI-Lynx and compatible: set recommended configuration bits. */
+ if (dev->vendor == PCI_VENDOR_ID_TI) {
+ pci_read_config_dword(dev, PCI_CFG_TI_LinkEnh, &link_enh);
+
+ /* adjust latency of ATx FIFO: use 1.7 KB threshold */
+ link_enh &= ~TI_LinkEnh_atx_thresh_mask;
+ link_enh |= TI_LinkEnh_atx_thresh_1_7K;
+
+ /* use priority arbitration for asynchronous responses */
+ link_enh |= TI_LinkEnh_enab_unfair;
+
+ /* required for aPhyEnhanceEnable to work */
+ link_enh |= TI_LinkEnh_enab_accel;
+
+ pci_write_config_dword(dev, PCI_CFG_TI_LinkEnh, link_enh);
+ }
+
ar_context_init(&ohci->ar_request_ctx, ohci,
OHCI1394_AsReqRcvContextControlSet);
@@ -2466,7 +2586,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
pci_disable_device(dev);
fail_free:
kfree(&ohci->card);
- ohci_pmac_off(dev);
+ pmac_ohci_off(dev);
fail:
if (err == -ENOMEM)
fw_error("Out of memory\n");
@@ -2509,7 +2629,7 @@ static void pci_remove(struct pci_dev *dev)
pci_release_region(dev, 0);
pci_disable_device(dev);
kfree(&ohci->card);
- ohci_pmac_off(dev);
+ pmac_ohci_off(dev);
fw_notify("Removed fw-ohci device.\n");
}
@@ -2530,7 +2650,7 @@ static int pci_suspend(struct pci_dev *dev, pm_message_t state)
err = pci_set_power_state(dev, pci_choose_state(dev, state));
if (err)
fw_error("pci_set_power_state failed with %d\n", err);
- ohci_pmac_off(dev);
+ pmac_ohci_off(dev);
return 0;
}
@@ -2540,7 +2660,7 @@ static int pci_resume(struct pci_dev *dev)
struct fw_ohci *ohci = pci_get_drvdata(dev);
int err;
- ohci_pmac_on(dev);
+ pmac_ohci_on(dev);
pci_set_power_state(dev, PCI_D0);
pci_restore_state(dev);
err = pci_enable_device(dev);
diff --git a/drivers/firewire/ohci.h b/drivers/firewire/ohci.h
index ba492d85c51..3bc9a5d744e 100644
--- a/drivers/firewire/ohci.h
+++ b/drivers/firewire/ohci.h
@@ -67,7 +67,7 @@
#define OHCI1394_PhyControl_ReadDone 0x80000000
#define OHCI1394_PhyControl_ReadData(r) (((r) & 0x00ff0000) >> 16)
#define OHCI1394_PhyControl_Write(addr, data) (((addr) << 8) | (data) | 0x00004000)
-#define OHCI1394_PhyControl_WriteDone 0x00004000
+#define OHCI1394_PhyControl_WritePending 0x00004000
#define OHCI1394_IsochronousCycleTimer 0x0F0
#define OHCI1394_AsReqFilterHiSet 0x100
#define OHCI1394_AsReqFilterHiClear 0x104
@@ -154,4 +154,12 @@
#define OHCI1394_phy_tcode 0xe
+/* TI extensions */
+
+#define PCI_CFG_TI_LinkEnh 0xf4
+#define TI_LinkEnh_enab_accel 0x00000002
+#define TI_LinkEnh_enab_unfair 0x00000080
+#define TI_LinkEnh_atx_thresh_mask 0x00003000
+#define TI_LinkEnh_atx_thresh_1_7K 0x00001000
+
#endif /* _FIREWIRE_OHCI_H */
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index fb09bb3c0ad..aa9bc9e980e 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -149,7 +149,7 @@ static ssize_t smi_data_buf_size_store(struct device *dev,
return count;
}
-static ssize_t smi_data_read(struct kobject *kobj,
+static ssize_t smi_data_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
@@ -162,7 +162,7 @@ static ssize_t smi_data_read(struct kobject *kobj,
return ret;
}
-static ssize_t smi_data_write(struct kobject *kobj,
+static ssize_t smi_data_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
diff --git a/drivers/firmware/dell_rbu.c b/drivers/firmware/dell_rbu.c
index 3a4460265b1..2f452f1f7c8 100644
--- a/drivers/firmware/dell_rbu.c
+++ b/drivers/firmware/dell_rbu.c
@@ -522,7 +522,7 @@ static ssize_t read_rbu_mono_data(char *buffer, loff_t pos, size_t count)
rbu_data.image_update_buffer, rbu_data.bios_image_size);
}
-static ssize_t read_rbu_data(struct kobject *kobj,
+static ssize_t read_rbu_data(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
@@ -576,7 +576,7 @@ static void callbackfn_rbu(const struct firmware *fw, void *context)
release_firmware(fw);
}
-static ssize_t read_rbu_image_type(struct kobject *kobj,
+static ssize_t read_rbu_image_type(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
@@ -586,7 +586,7 @@ static ssize_t read_rbu_image_type(struct kobject *kobj,
return size;
}
-static ssize_t write_rbu_image_type(struct kobject *kobj,
+static ssize_t write_rbu_image_type(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
@@ -647,7 +647,7 @@ static ssize_t write_rbu_image_type(struct kobject *kobj,
return rc;
}
-static ssize_t read_rbu_packet_size(struct kobject *kobj,
+static ssize_t read_rbu_packet_size(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
@@ -660,7 +660,7 @@ static ssize_t read_rbu_packet_size(struct kobject *kobj,
return size;
}
-static ssize_t write_rbu_packet_size(struct kobject *kobj,
+static ssize_t write_rbu_packet_size(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 81b70bd0758..2a62ec6390e 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -402,7 +402,7 @@ efivar_unregister(struct efivar_entry *var)
}
-static ssize_t efivar_create(struct kobject *kobj,
+static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
@@ -461,7 +461,7 @@ static ssize_t efivar_create(struct kobject *kobj,
return count;
}
-static ssize_t efivar_delete(struct kobject *kobj,
+static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index fee678f74a1..724038dab4c 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -139,6 +139,13 @@ config GPIO_MAX732X
Board setup code must specify the model to use, and the start
number for these GPIOs.
+config GPIO_MAX732X_IRQ
+ bool "Interrupt controller support for MAX732x"
+ depends on GPIO_MAX732X=y && GENERIC_HARDIRQS
+ help
+ Say yes here to enable the max732x to be used as an interrupt
+ controller. It requires the driver to be built in the kernel.
+
config GPIO_PCA953X
tristate "PCA953x, PCA955x, TCA64xx, and MAX7310 I/O ports"
depends on I2C
@@ -188,6 +195,13 @@ config GPIO_PCF857X
This driver provides an in-kernel interface to those GPIOs using
platform-neutral GPIO calls.
+config GPIO_TC35892
+ bool "TC35892 GPIOs"
+ depends on MFD_TC35892
+ help
+ This enables support for the GPIOs found on the TC35892
+ I/O Expander.
+
config GPIO_TWL4030
tristate "TWL4030, TWL5030, and TPS659x0 GPIOs"
depends on TWL4030_CORE
@@ -264,10 +278,10 @@ config GPIO_BT8XX
If unsure, say N.
config GPIO_LANGWELL
- bool "Intel Moorestown Platform Langwell GPIO support"
+ bool "Intel Langwell/Penwell GPIO support"
depends on PCI
help
- Say Y here to support Intel Moorestown platform GPIO.
+ Say Y here to support Intel Langwell/Penwell GPIO.
config GPIO_TIMBERDALE
bool "Support for timberdale GPIO IP"
@@ -275,6 +289,15 @@ config GPIO_TIMBERDALE
---help---
Add support for the GPIO IP in the timberdale FPGA.
+config GPIO_RDC321X
+ tristate "RDC R-321x GPIO support"
+ depends on PCI && GPIOLIB
+ select MFD_CORE
+ select MFD_RDC321X
+ help
+ Support for the RDC R321x SoC GPIOs over southbridge
+ PCI configuration space.
+
comment "SPI GPIO expanders:"
config GPIO_MAX7301
@@ -310,4 +333,14 @@ config GPIO_UCB1400
To compile this driver as a module, choose M here: the
module will be called ucb1400_gpio.
+comment "MODULbus GPIO expanders:"
+
+config GPIO_JANZ_TTL
+ tristate "Janz VMOD-TTL Digital IO Module"
+ depends on MFD_JANZ_CMODIO
+ help
+ This enables support for the Janz VMOD-TTL Digital IO module.
+ This driver provides support for driving the pins in output
+ mode only. Input mode is not supported.
+
endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 10f3f8d958b..51c3cdd41b5 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o
obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
obj-$(CONFIG_GPIO_PL061) += pl061.o
+obj-$(CONFIG_GPIO_TC35892) += tc35892-gpio.o
obj-$(CONFIG_GPIO_TIMBERDALE) += timbgpio.o
obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o
obj-$(CONFIG_GPIO_UCB1400) += ucb1400_gpio.o
@@ -27,4 +28,6 @@ obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
obj-$(CONFIG_GPIO_WM831X) += wm831x-gpio.o
obj-$(CONFIG_GPIO_WM8350) += wm8350-gpiolib.o
obj-$(CONFIG_GPIO_WM8994) += wm8994-gpio.o
-obj-$(CONFIG_GPIO_SCH) += sch_gpio.o \ No newline at end of file
+obj-$(CONFIG_GPIO_SCH) += sch_gpio.o
+obj-$(CONFIG_GPIO_RDC321X) += rdc321x-gpio.o
+obj-$(CONFIG_GPIO_JANZ_TTL) += janz-ttl.o
diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c
index 0c3c498f226..f73a1555e49 100644
--- a/drivers/gpio/cs5535-gpio.c
+++ b/drivers/gpio/cs5535-gpio.c
@@ -197,7 +197,7 @@ static int chip_direction_output(struct gpio_chip *c, unsigned offset, int val)
return 0;
}
-static char *cs5535_gpio_names[] = {
+static const char * const cs5535_gpio_names[] = {
"GPIO0", "GPIO1", "GPIO2", "GPIO3",
"GPIO4", "GPIO5", "GPIO6", "GPIO7",
"GPIO8", "GPIO9", "GPIO10", "GPIO11",
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index eb0c3fe44b2..3ca36542e33 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -399,7 +399,7 @@ static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
goto free_id;
}
- pdesc->value_sd = sysfs_get_dirent(dev->kobj.sd, "value");
+ pdesc->value_sd = sysfs_get_dirent(dev->kobj.sd, NULL, "value");
if (!pdesc->value_sd) {
ret = -ENODEV;
goto free_id;
@@ -722,7 +722,7 @@ int gpio_export(unsigned gpio, bool direction_may_change)
unsigned long flags;
struct gpio_desc *desc;
int status = -EINVAL;
- char *ioname = NULL;
+ const char *ioname = NULL;
/* can't export until sysfs is available ... */
if (!gpio_class.p) {
@@ -753,7 +753,7 @@ int gpio_export(unsigned gpio, bool direction_may_change)
struct device *dev;
dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
- desc, ioname ? ioname : "gpio%d", gpio);
+ desc, ioname ? ioname : "gpio%u", gpio);
if (!IS_ERR(dev)) {
status = sysfs_create_group(&dev->kobj,
&gpio_attr_group);
@@ -1106,7 +1106,7 @@ unlock:
fail:
/* failures here can mean systems won't boot... */
if (status)
- pr_err("gpiochip_add: gpios %d..%d (%s) not registered\n",
+ pr_err("gpiochip_add: gpios %d..%d (%s) failed to register\n",
chip->base, chip->base + chip->ngpio - 1,
chip->label ? : "generic");
return status;
@@ -1447,6 +1447,49 @@ fail:
}
EXPORT_SYMBOL_GPL(gpio_direction_output);
+/**
+ * gpio_set_debounce - sets @debounce time for a @gpio
+ * @gpio: the gpio to set debounce time
+ * @debounce: debounce time is microseconds
+ */
+int gpio_set_debounce(unsigned gpio, unsigned debounce)
+{
+ unsigned long flags;
+ struct gpio_chip *chip;
+ struct gpio_desc *desc = &gpio_desc[gpio];
+ int status = -EINVAL;
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
+ if (!gpio_is_valid(gpio))
+ goto fail;
+ chip = desc->chip;
+ if (!chip || !chip->set || !chip->set_debounce)
+ goto fail;
+ gpio -= chip->base;
+ if (gpio >= chip->ngpio)
+ goto fail;
+ status = gpio_ensure_requested(desc, gpio);
+ if (status < 0)
+ goto fail;
+
+ /* now we know the gpio is valid and chip won't vanish */
+
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
+ might_sleep_if(extra_checks && chip->can_sleep);
+
+ return chip->set_debounce(chip, gpio, debounce);
+
+fail:
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ if (status)
+ pr_debug("%s: gpio-%d status %d\n",
+ __func__, gpio, status);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(gpio_set_debounce);
/* I/O calls are only valid after configuration completed; the relevant
* "is this a valid GPIO" error checks should already have been done.
diff --git a/drivers/gpio/it8761e_gpio.c b/drivers/gpio/it8761e_gpio.c
index 41a9388f2fd..48fc43c4bdd 100644
--- a/drivers/gpio/it8761e_gpio.c
+++ b/drivers/gpio/it8761e_gpio.c
@@ -217,7 +217,10 @@ gpiochip_add_err:
static void __exit it8761e_gpio_exit(void)
{
if (gpio_ba) {
- gpiochip_remove(&it8761e_gpio_chip);
+ int ret = gpiochip_remove(&it8761e_gpio_chip);
+
+ WARN(ret, "%s(): gpiochip_remove() failed, ret=%d\n",
+ __func__, ret);
release_region(gpio_ba, GPIO_IOSIZE);
gpio_ba = 0;
diff --git a/drivers/gpio/janz-ttl.c b/drivers/gpio/janz-ttl.c
new file mode 100644
index 00000000000..813ac077e5d
--- /dev/null
+++ b/drivers/gpio/janz-ttl.c
@@ -0,0 +1,258 @@
+/*
+ * Janz MODULbus VMOD-TTL GPIO Driver
+ *
+ * Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+
+#include <linux/mfd/janz.h>
+
+#define DRV_NAME "janz-ttl"
+
+#define PORTA_DIRECTION 0x23
+#define PORTB_DIRECTION 0x2B
+#define PORTC_DIRECTION 0x06
+#define PORTA_IOCTL 0x24
+#define PORTB_IOCTL 0x2C
+#define PORTC_IOCTL 0x07
+
+#define MASTER_INT_CTL 0x00
+#define MASTER_CONF_CTL 0x01
+
+#define CONF_PAE (1 << 2)
+#define CONF_PBE (1 << 7)
+#define CONF_PCE (1 << 4)
+
+struct ttl_control_regs {
+ __be16 portc;
+ __be16 portb;
+ __be16 porta;
+ __be16 control;
+};
+
+struct ttl_module {
+ struct gpio_chip gpio;
+
+ /* base address of registers */
+ struct ttl_control_regs __iomem *regs;
+
+ u8 portc_shadow;
+ u8 portb_shadow;
+ u8 porta_shadow;
+
+ spinlock_t lock;
+};
+
+static int ttl_get_value(struct gpio_chip *gpio, unsigned offset)
+{
+ struct ttl_module *mod = dev_get_drvdata(gpio->dev);
+ u8 *shadow;
+ int ret;
+
+ if (offset < 8) {
+ shadow = &mod->porta_shadow;
+ } else if (offset < 16) {
+ shadow = &mod->portb_shadow;
+ offset -= 8;
+ } else {
+ shadow = &mod->portc_shadow;
+ offset -= 16;
+ }
+
+ spin_lock(&mod->lock);
+ ret = *shadow & (1 << offset);
+ spin_unlock(&mod->lock);
+ return ret;
+}
+
+static void ttl_set_value(struct gpio_chip *gpio, unsigned offset, int value)
+{
+ struct ttl_module *mod = dev_get_drvdata(gpio->dev);
+ void __iomem *port;
+ u8 *shadow;
+
+ if (offset < 8) {
+ port = &mod->regs->porta;
+ shadow = &mod->porta_shadow;
+ } else if (offset < 16) {
+ port = &mod->regs->portb;
+ shadow = &mod->portb_shadow;
+ offset -= 8;
+ } else {
+ port = &mod->regs->portc;
+ shadow = &mod->portc_shadow;
+ offset -= 16;
+ }
+
+ spin_lock(&mod->lock);
+ if (value)
+ *shadow |= (1 << offset);
+ else
+ *shadow &= ~(1 << offset);
+
+ iowrite16be(*shadow, port);
+ spin_unlock(&mod->lock);
+}
+
+static void __devinit ttl_write_reg(struct ttl_module *mod, u8 reg, u16 val)
+{
+ iowrite16be(reg, &mod->regs->control);
+ iowrite16be(val, &mod->regs->control);
+}
+
+static void __devinit ttl_setup_device(struct ttl_module *mod)
+{
+ /* reset the device to a known state */
+ iowrite16be(0x0000, &mod->regs->control);
+ iowrite16be(0x0001, &mod->regs->control);
+ iowrite16be(0x0000, &mod->regs->control);
+
+ /* put all ports in open-drain mode */
+ ttl_write_reg(mod, PORTA_IOCTL, 0x00ff);
+ ttl_write_reg(mod, PORTB_IOCTL, 0x00ff);
+ ttl_write_reg(mod, PORTC_IOCTL, 0x000f);
+
+ /* set all ports as outputs */
+ ttl_write_reg(mod, PORTA_DIRECTION, 0x0000);
+ ttl_write_reg(mod, PORTB_DIRECTION, 0x0000);
+ ttl_write_reg(mod, PORTC_DIRECTION, 0x0000);
+
+ /* set all ports to drive zeroes */
+ iowrite16be(0x0000, &mod->regs->porta);
+ iowrite16be(0x0000, &mod->regs->portb);
+ iowrite16be(0x0000, &mod->regs->portc);
+
+ /* enable all ports */
+ ttl_write_reg(mod, MASTER_CONF_CTL, CONF_PAE | CONF_PBE | CONF_PCE);
+}
+
+static int __devinit ttl_probe(struct platform_device *pdev)
+{
+ struct janz_platform_data *pdata;
+ struct device *dev = &pdev->dev;
+ struct ttl_module *mod;
+ struct gpio_chip *gpio;
+ struct resource *res;
+ int ret;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(dev, "no platform data\n");
+ ret = -ENXIO;
+ goto out_return;
+ }
+
+ mod = kzalloc(sizeof(*mod), GFP_KERNEL);
+ if (!mod) {
+ dev_err(dev, "unable to allocate private data\n");
+ ret = -ENOMEM;
+ goto out_return;
+ }
+
+ platform_set_drvdata(pdev, mod);
+ spin_lock_init(&mod->lock);
+
+ /* get access to the MODULbus registers for this module */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "MODULbus registers not found\n");
+ ret = -ENODEV;
+ goto out_free_mod;
+ }
+
+ mod->regs = ioremap(res->start, resource_size(res));
+ if (!mod->regs) {
+ dev_err(dev, "MODULbus registers not ioremap\n");
+ ret = -ENOMEM;
+ goto out_free_mod;
+ }
+
+ ttl_setup_device(mod);
+
+ /* Initialize the GPIO data structures */
+ gpio = &mod->gpio;
+ gpio->dev = &pdev->dev;
+ gpio->label = pdev->name;
+ gpio->get = ttl_get_value;
+ gpio->set = ttl_set_value;
+ gpio->owner = THIS_MODULE;
+
+ /* request dynamic allocation */
+ gpio->base = -1;
+ gpio->ngpio = 20;
+
+ ret = gpiochip_add(gpio);
+ if (ret) {
+ dev_err(dev, "unable to add GPIO chip\n");
+ goto out_iounmap_regs;
+ }
+
+ dev_info(&pdev->dev, "module %d: registered GPIO device\n",
+ pdata->modno);
+ return 0;
+
+out_iounmap_regs:
+ iounmap(mod->regs);
+out_free_mod:
+ kfree(mod);
+out_return:
+ return ret;
+}
+
+static int __devexit ttl_remove(struct platform_device *pdev)
+{
+ struct ttl_module *mod = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = gpiochip_remove(&mod->gpio);
+ if (ret) {
+ dev_err(dev, "unable to remove GPIO chip\n");
+ return ret;
+ }
+
+ iounmap(mod->regs);
+ kfree(mod);
+ return 0;
+}
+
+static struct platform_driver ttl_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ttl_probe,
+ .remove = __devexit_p(ttl_remove),
+};
+
+static int __init ttl_init(void)
+{
+ return platform_driver_register(&ttl_driver);
+}
+
+static void __exit ttl_exit(void)
+{
+ platform_driver_unregister(&ttl_driver);
+}
+
+MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
+MODULE_DESCRIPTION("Janz MODULbus VMOD-TTL Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:janz-ttl");
+
+module_init(ttl_init);
+module_exit(ttl_exit);
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index 00c3a14127a..8383a8d7f99 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -17,6 +17,7 @@
/* Supports:
* Moorestown platform Langwell chip.
+ * Medfield platform Penwell chip.
*/
#include <linux/module.h>
@@ -31,44 +32,65 @@
#include <linux/gpio.h>
#include <linux/slab.h>
-struct lnw_gpio_register {
- u32 GPLR[2];
- u32 GPDR[2];
- u32 GPSR[2];
- u32 GPCR[2];
- u32 GRER[2];
- u32 GFER[2];
- u32 GEDR[2];
+/*
+ * Langwell chip has 64 pins and thus there are 2 32bit registers to control
+ * each feature, while Penwell chip has 96 pins for each block, and need 3 32bit
+ * registers to control them, so we only define the order here instead of a
+ * structure, to get a bit offset for a pin (use GPDR as an example):
+ *
+ * nreg = ngpio / 32;
+ * reg = offset / 32;
+ * bit = offset % 32;
+ * reg_addr = reg_base + GPDR * nreg * 4 + reg * 4;
+ *
+ * so the bit of reg_addr is to control pin offset's GPDR feature
+*/
+
+enum GPIO_REG {
+ GPLR = 0, /* pin level read-only */
+ GPDR, /* pin direction */
+ GPSR, /* pin set */
+ GPCR, /* pin clear */
+ GRER, /* rising edge detect */
+ GFER, /* falling edge detect */
+ GEDR, /* edge detect result */
};
struct lnw_gpio {
struct gpio_chip chip;
- struct lnw_gpio_register *reg_base;
+ void *reg_base;
spinlock_t lock;
unsigned irq_base;
};
-static int lnw_gpio_get(struct gpio_chip *chip, unsigned offset)
+static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset,
+ enum GPIO_REG reg_type)
{
struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
+ unsigned nreg = chip->ngpio / 32;
u8 reg = offset / 32;
- void __iomem *gplr;
+ void __iomem *ptr;
+
+ ptr = (void __iomem *)(lnw->reg_base + reg_type * nreg * 4 + reg * 4);
+ return ptr;
+}
+
+static int lnw_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ void __iomem *gplr = gpio_reg(chip, offset, GPLR);
- gplr = (void __iomem *)(&lnw->reg_base->GPLR[reg]);
return readl(gplr) & BIT(offset % 32);
}
static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
- struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
- u8 reg = offset / 32;
void __iomem *gpsr, *gpcr;
if (value) {
- gpsr = (void __iomem *)(&lnw->reg_base->GPSR[reg]);
+ gpsr = gpio_reg(chip, offset, GPSR);
writel(BIT(offset % 32), gpsr);
} else {
- gpcr = (void __iomem *)(&lnw->reg_base->GPCR[reg]);
+ gpcr = gpio_reg(chip, offset, GPCR);
writel(BIT(offset % 32), gpcr);
}
}
@@ -76,12 +98,10 @@ static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
static int lnw_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
- u8 reg = offset / 32;
+ void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
u32 value;
unsigned long flags;
- void __iomem *gpdr;
- gpdr = (void __iomem *)(&lnw->reg_base->GPDR[reg]);
spin_lock_irqsave(&lnw->lock, flags);
value = readl(gpdr);
value &= ~BIT(offset % 32);
@@ -94,12 +114,10 @@ static int lnw_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
- u8 reg = offset / 32;
+ void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
unsigned long flags;
- void __iomem *gpdr;
lnw_gpio_set(chip, offset, value);
- gpdr = (void __iomem *)(&lnw->reg_base->GPDR[reg]);
spin_lock_irqsave(&lnw->lock, flags);
value = readl(gpdr);
value |= BIT(offset % 32);;
@@ -118,11 +136,10 @@ static int lnw_irq_type(unsigned irq, unsigned type)
{
struct lnw_gpio *lnw = get_irq_chip_data(irq);
u32 gpio = irq - lnw->irq_base;
- u8 reg = gpio / 32;
unsigned long flags;
u32 value;
- void __iomem *grer = (void __iomem *)(&lnw->reg_base->GRER[reg]);
- void __iomem *gfer = (void __iomem *)(&lnw->reg_base->GFER[reg]);
+ void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER);
+ void __iomem *gfer = gpio_reg(&lnw->chip, gpio, GFER);
if (gpio >= lnw->chip.ngpio)
return -EINVAL;
@@ -158,8 +175,10 @@ static struct irq_chip lnw_irqchip = {
.set_type = lnw_irq_type,
};
-static struct pci_device_id lnw_gpio_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f) },
+static DEFINE_PCI_DEVICE_TABLE(lnw_gpio_ids) = { /* pin number */
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f), .driver_data = 64 },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081f), .driver_data = 96 },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081a), .driver_data = 96 },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, lnw_gpio_ids);
@@ -167,17 +186,17 @@ MODULE_DEVICE_TABLE(pci, lnw_gpio_ids);
static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
{
struct lnw_gpio *lnw = (struct lnw_gpio *)get_irq_data(irq);
- u32 reg, gpio;
+ u32 base, gpio;
void __iomem *gedr;
u32 gedr_v;
/* check GPIO controller to check which pin triggered the interrupt */
- for (reg = 0; reg < lnw->chip.ngpio / 32; reg++) {
- gedr = (void __iomem *)(&lnw->reg_base->GEDR[reg]);
+ for (base = 0; base < lnw->chip.ngpio; base += 32) {
+ gedr = gpio_reg(&lnw->chip, base, GEDR);
gedr_v = readl(gedr);
if (!gedr_v)
continue;
- for (gpio = reg*32; gpio < reg*32+32; gpio++)
+ for (gpio = base; gpio < base + 32; gpio++)
if (gedr_v & BIT(gpio % 32)) {
pr_debug("pin %d triggered\n", gpio);
generic_handle_irq(lnw->irq_base + gpio);
@@ -245,7 +264,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
lnw->chip.set = lnw_gpio_set;
lnw->chip.to_irq = lnw_gpio_to_irq;
lnw->chip.base = gpio_base;
- lnw->chip.ngpio = 64;
+ lnw->chip.ngpio = id->driver_data;
lnw->chip.can_sleep = 0;
pci_set_drvdata(pdev, lnw);
retval = gpiochip_add(&lnw->chip);
diff --git a/drivers/gpio/max732x.c b/drivers/gpio/max732x.c
index f7868243af8..9cad60f9e96 100644
--- a/drivers/gpio/max732x.c
+++ b/drivers/gpio/max732x.c
@@ -17,7 +17,8 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/gpio.h>
-
+#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/i2c.h>
#include <linux/i2c/max732x.h>
@@ -31,7 +32,8 @@
* - Open Drain I/O
*
* designated by 'O', 'I' and 'P' individually according to MAXIM's
- * datasheets.
+ * datasheets. 'I' and 'P' ports are interrupt capables, some with
+ * a dedicated interrupt mask.
*
* There are two groups of I/O ports, each group usually includes
* up to 8 I/O ports, and is accessed by a specific I2C address:
@@ -44,7 +46,8 @@
*
* Within each group of ports, there are five known combinations of
* I/O ports: 4I4O, 4P4O, 8I, 8P, 8O, see the definitions below for
- * the detailed organization of these ports.
+ * the detailed organization of these ports. Only Goup A is interrupt
+ * capable.
*
* GPIO numbers start from 'gpio_base + 0' to 'gpio_base + 8/16',
* and GPIOs from GROUP_A are numbered before those from GROUP_B
@@ -68,16 +71,47 @@
#define GROUP_A(x) ((x) & 0xffff) /* I2C Addr: 0b'110xxxx */
#define GROUP_B(x) ((x) << 16) /* I2C Addr: 0b'101xxxx */
+#define INT_NONE 0x0 /* No interrupt capability */
+#define INT_NO_MASK 0x1 /* Has interrupts, no mask */
+#define INT_INDEP_MASK 0x2 /* Has interrupts, independent mask */
+#define INT_MERGED_MASK 0x3 /* Has interrupts, merged mask */
+
+#define INT_CAPS(x) (((uint64_t)(x)) << 32)
+
+enum {
+ MAX7319,
+ MAX7320,
+ MAX7321,
+ MAX7322,
+ MAX7323,
+ MAX7324,
+ MAX7325,
+ MAX7326,
+ MAX7327,
+};
+
+static uint64_t max732x_features[] = {
+ [MAX7319] = GROUP_A(IO_8I) | INT_CAPS(INT_MERGED_MASK),
+ [MAX7320] = GROUP_B(IO_8O),
+ [MAX7321] = GROUP_A(IO_8P) | INT_CAPS(INT_NO_MASK),
+ [MAX7322] = GROUP_A(IO_4I4O) | INT_CAPS(INT_MERGED_MASK),
+ [MAX7323] = GROUP_A(IO_4P4O) | INT_CAPS(INT_INDEP_MASK),
+ [MAX7324] = GROUP_A(IO_8I) | GROUP_B(IO_8O) | INT_CAPS(INT_MERGED_MASK),
+ [MAX7325] = GROUP_A(IO_8P) | GROUP_B(IO_8O) | INT_CAPS(INT_NO_MASK),
+ [MAX7326] = GROUP_A(IO_4I4O) | GROUP_B(IO_8O) | INT_CAPS(INT_MERGED_MASK),
+ [MAX7327] = GROUP_A(IO_4P4O) | GROUP_B(IO_8O) | INT_CAPS(INT_NO_MASK),
+};
+
static const struct i2c_device_id max732x_id[] = {
- { "max7319", GROUP_A(IO_8I) },
- { "max7320", GROUP_B(IO_8O) },
- { "max7321", GROUP_A(IO_8P) },
- { "max7322", GROUP_A(IO_4I4O) },
- { "max7323", GROUP_A(IO_4P4O) },
- { "max7324", GROUP_A(IO_8I) | GROUP_B(IO_8O) },
- { "max7325", GROUP_A(IO_8P) | GROUP_B(IO_8O) },
- { "max7326", GROUP_A(IO_4I4O) | GROUP_B(IO_8O) },
- { "max7327", GROUP_A(IO_4P4O) | GROUP_B(IO_8O) },
+ { "max7319", MAX7319 },
+ { "max7320", MAX7320 },
+ { "max7321", MAX7321 },
+ { "max7322", MAX7322 },
+ { "max7323", MAX7323 },
+ { "max7324", MAX7324 },
+ { "max7325", MAX7325 },
+ { "max7326", MAX7326 },
+ { "max7327", MAX7327 },
{ },
};
MODULE_DEVICE_TABLE(i2c, max732x_id);
@@ -96,9 +130,19 @@ struct max732x_chip {
struct mutex lock;
uint8_t reg_out[2];
+
+#ifdef CONFIG_GPIO_MAX732X_IRQ
+ struct mutex irq_lock;
+ int irq_base;
+ uint8_t irq_mask;
+ uint8_t irq_mask_cur;
+ uint8_t irq_trig_raise;
+ uint8_t irq_trig_fall;
+ uint8_t irq_features;
+#endif
};
-static int max732x_write(struct max732x_chip *chip, int group_a, uint8_t val)
+static int max732x_writeb(struct max732x_chip *chip, int group_a, uint8_t val)
{
struct i2c_client *client;
int ret;
@@ -113,7 +157,7 @@ static int max732x_write(struct max732x_chip *chip, int group_a, uint8_t val)
return 0;
}
-static int max732x_read(struct max732x_chip *chip, int group_a, uint8_t *val)
+static int max732x_readb(struct max732x_chip *chip, int group_a, uint8_t *val)
{
struct i2c_client *client;
int ret;
@@ -142,7 +186,7 @@ static int max732x_gpio_get_value(struct gpio_chip *gc, unsigned off)
chip = container_of(gc, struct max732x_chip, gpio_chip);
- ret = max732x_read(chip, is_group_a(chip, off), &reg_val);
+ ret = max732x_readb(chip, is_group_a(chip, off), &reg_val);
if (ret < 0)
return 0;
@@ -162,7 +206,7 @@ static void max732x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
reg_out = (off > 7) ? chip->reg_out[1] : chip->reg_out[0];
reg_out = (val) ? reg_out | mask : reg_out & ~mask;
- ret = max732x_write(chip, is_group_a(chip, off), reg_out);
+ ret = max732x_writeb(chip, is_group_a(chip, off), reg_out);
if (ret < 0)
goto out;
@@ -188,6 +232,13 @@ static int max732x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
return -EACCES;
}
+ /*
+ * Open-drain pins must be set to high impedance (which is
+ * equivalent to output-high) to be turned into an input.
+ */
+ if ((mask & chip->dir_output))
+ max732x_gpio_set_value(gc, off, 1);
+
return 0;
}
@@ -209,12 +260,278 @@ static int max732x_gpio_direction_output(struct gpio_chip *gc,
return 0;
}
+#ifdef CONFIG_GPIO_MAX732X_IRQ
+static int max732x_writew(struct max732x_chip *chip, uint16_t val)
+{
+ int ret;
+
+ val = cpu_to_le16(val);
+
+ ret = i2c_master_send(chip->client_group_a, (char *)&val, 2);
+ if (ret < 0) {
+ dev_err(&chip->client_group_a->dev, "failed writing\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int max732x_readw(struct max732x_chip *chip, uint16_t *val)
+{
+ int ret;
+
+ ret = i2c_master_recv(chip->client_group_a, (char *)val, 2);
+ if (ret < 0) {
+ dev_err(&chip->client_group_a->dev, "failed reading\n");
+ return ret;
+ }
+
+ *val = le16_to_cpu(*val);
+ return 0;
+}
+
+static void max732x_irq_update_mask(struct max732x_chip *chip)
+{
+ uint16_t msg;
+
+ if (chip->irq_mask == chip->irq_mask_cur)
+ return;
+
+ chip->irq_mask = chip->irq_mask_cur;
+
+ if (chip->irq_features == INT_NO_MASK)
+ return;
+
+ mutex_lock(&chip->lock);
+
+ switch (chip->irq_features) {
+ case INT_INDEP_MASK:
+ msg = (chip->irq_mask << 8) | chip->reg_out[0];
+ max732x_writew(chip, msg);
+ break;
+
+ case INT_MERGED_MASK:
+ msg = chip->irq_mask | chip->reg_out[0];
+ max732x_writeb(chip, 1, (uint8_t)msg);
+ break;
+ }
+
+ mutex_unlock(&chip->lock);
+}
+
+static int max732x_gpio_to_irq(struct gpio_chip *gc, unsigned off)
+{
+ struct max732x_chip *chip;
+
+ chip = container_of(gc, struct max732x_chip, gpio_chip);
+ return chip->irq_base + off;
+}
+
+static void max732x_irq_mask(unsigned int irq)
+{
+ struct max732x_chip *chip = get_irq_chip_data(irq);
+
+ chip->irq_mask_cur &= ~(1 << (irq - chip->irq_base));
+}
+
+static void max732x_irq_unmask(unsigned int irq)
+{
+ struct max732x_chip *chip = get_irq_chip_data(irq);
+
+ chip->irq_mask_cur |= 1 << (irq - chip->irq_base);
+}
+
+static void max732x_irq_bus_lock(unsigned int irq)
+{
+ struct max732x_chip *chip = get_irq_chip_data(irq);
+
+ mutex_lock(&chip->irq_lock);
+ chip->irq_mask_cur = chip->irq_mask;
+}
+
+static void max732x_irq_bus_sync_unlock(unsigned int irq)
+{
+ struct max732x_chip *chip = get_irq_chip_data(irq);
+
+ max732x_irq_update_mask(chip);
+ mutex_unlock(&chip->irq_lock);
+}
+
+static int max732x_irq_set_type(unsigned int irq, unsigned int type)
+{
+ struct max732x_chip *chip = get_irq_chip_data(irq);
+ uint16_t off = irq - chip->irq_base;
+ uint16_t mask = 1 << off;
+
+ if (!(mask & chip->dir_input)) {
+ dev_dbg(&chip->client->dev, "%s port %d is output only\n",
+ chip->client->name, off);
+ return -EACCES;
+ }
+
+ if (!(type & IRQ_TYPE_EDGE_BOTH)) {
+ dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
+ irq, type);
+ return -EINVAL;
+ }
+
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ chip->irq_trig_fall |= mask;
+ else
+ chip->irq_trig_fall &= ~mask;
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ chip->irq_trig_raise |= mask;
+ else
+ chip->irq_trig_raise &= ~mask;
+
+ return max732x_gpio_direction_input(&chip->gpio_chip, off);
+}
+
+static struct irq_chip max732x_irq_chip = {
+ .name = "max732x",
+ .mask = max732x_irq_mask,
+ .unmask = max732x_irq_unmask,
+ .bus_lock = max732x_irq_bus_lock,
+ .bus_sync_unlock = max732x_irq_bus_sync_unlock,
+ .set_type = max732x_irq_set_type,
+};
+
+static uint8_t max732x_irq_pending(struct max732x_chip *chip)
+{
+ uint8_t cur_stat;
+ uint8_t old_stat;
+ uint8_t trigger;
+ uint8_t pending;
+ uint16_t status;
+ int ret;
+
+ ret = max732x_readw(chip, &status);
+ if (ret)
+ return 0;
+
+ trigger = status >> 8;
+ trigger &= chip->irq_mask;
+
+ if (!trigger)
+ return 0;
+
+ cur_stat = status & 0xFF;
+ cur_stat &= chip->irq_mask;
+
+ old_stat = cur_stat ^ trigger;
+
+ pending = (old_stat & chip->irq_trig_fall) |
+ (cur_stat & chip->irq_trig_raise);
+ pending &= trigger;
+
+ return pending;
+}
+
+static irqreturn_t max732x_irq_handler(int irq, void *devid)
+{
+ struct max732x_chip *chip = devid;
+ uint8_t pending;
+ uint8_t level;
+
+ pending = max732x_irq_pending(chip);
+
+ if (!pending)
+ return IRQ_HANDLED;
+
+ do {
+ level = __ffs(pending);
+ handle_nested_irq(level + chip->irq_base);
+
+ pending &= ~(1 << level);
+ } while (pending);
+
+ return IRQ_HANDLED;
+}
+
+static int max732x_irq_setup(struct max732x_chip *chip,
+ const struct i2c_device_id *id)
+{
+ struct i2c_client *client = chip->client;
+ struct max732x_platform_data *pdata = client->dev.platform_data;
+ int has_irq = max732x_features[id->driver_data] >> 32;
+ int ret;
+
+ if (pdata->irq_base && has_irq != INT_NONE) {
+ int lvl;
+
+ chip->irq_base = pdata->irq_base;
+ chip->irq_features = has_irq;
+ mutex_init(&chip->irq_lock);
+
+ for (lvl = 0; lvl < chip->gpio_chip.ngpio; lvl++) {
+ int irq = lvl + chip->irq_base;
+
+ if (!(chip->dir_input & (1 << lvl)))
+ continue;
+
+ set_irq_chip_data(irq, chip);
+ set_irq_chip_and_handler(irq, &max732x_irq_chip,
+ handle_edge_irq);
+ set_irq_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ set_irq_noprobe(irq);
+#endif
+ }
+
+ ret = request_threaded_irq(client->irq,
+ NULL,
+ max732x_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(&client->dev), chip);
+ if (ret) {
+ dev_err(&client->dev, "failed to request irq %d\n",
+ client->irq);
+ goto out_failed;
+ }
+
+ chip->gpio_chip.to_irq = max732x_gpio_to_irq;
+ }
+
+ return 0;
+
+out_failed:
+ chip->irq_base = 0;
+ return ret;
+}
+
+static void max732x_irq_teardown(struct max732x_chip *chip)
+{
+ if (chip->irq_base)
+ free_irq(chip->client->irq, chip);
+}
+#else /* CONFIG_GPIO_MAX732X_IRQ */
+static int max732x_irq_setup(struct max732x_chip *chip,
+ const struct i2c_device_id *id)
+{
+ struct i2c_client *client = chip->client;
+ struct max732x_platform_data *pdata = client->dev.platform_data;
+ int has_irq = max732x_features[id->driver_data] >> 32;
+
+ if (pdata->irq_base && has_irq != INT_NONE)
+ dev_warn(&client->dev, "interrupt support not compiled in\n");
+
+ return 0;
+}
+
+static void max732x_irq_teardown(struct max732x_chip *chip)
+{
+}
+#endif
+
static int __devinit max732x_setup_gpio(struct max732x_chip *chip,
const struct i2c_device_id *id,
unsigned gpio_start)
{
struct gpio_chip *gc = &chip->gpio_chip;
- uint32_t id_data = id->driver_data;
+ uint32_t id_data = (uint32_t)max732x_features[id->driver_data];
int i, port = 0;
for (i = 0; i < 16; i++, id_data >>= 2) {
@@ -285,14 +602,14 @@ static int __devinit max732x_probe(struct i2c_client *client,
switch (client->addr & 0x70) {
case 0x60:
chip->client_group_a = client;
- if (nr_port > 7) {
+ if (nr_port > 8) {
c = i2c_new_dummy(client->adapter, addr_b);
chip->client_group_b = chip->client_dummy = c;
}
break;
case 0x50:
chip->client_group_b = client;
- if (nr_port > 7) {
+ if (nr_port > 8) {
c = i2c_new_dummy(client->adapter, addr_a);
chip->client_group_a = chip->client_dummy = c;
}
@@ -306,9 +623,13 @@ static int __devinit max732x_probe(struct i2c_client *client,
mutex_init(&chip->lock);
- max732x_read(chip, is_group_a(chip, 0), &chip->reg_out[0]);
- if (nr_port > 7)
- max732x_read(chip, is_group_a(chip, 8), &chip->reg_out[1]);
+ max732x_readb(chip, is_group_a(chip, 0), &chip->reg_out[0]);
+ if (nr_port > 8)
+ max732x_readb(chip, is_group_a(chip, 8), &chip->reg_out[1]);
+
+ ret = max732x_irq_setup(chip, id);
+ if (ret)
+ goto out_failed;
ret = gpiochip_add(&chip->gpio_chip);
if (ret)
@@ -325,6 +646,7 @@ static int __devinit max732x_probe(struct i2c_client *client,
return 0;
out_failed:
+ max732x_irq_teardown(chip);
kfree(chip);
return ret;
}
@@ -352,6 +674,8 @@ static int __devexit max732x_remove(struct i2c_client *client)
return ret;
}
+ max732x_irq_teardown(chip);
+
/* unregister any dummy i2c_client */
if (chip->client_dummy)
i2c_unregister_device(chip->client_dummy);
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index b827c976dc6..a2b12aa1f2b 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -73,7 +73,7 @@ struct pca953x_chip {
struct i2c_client *client;
struct pca953x_platform_data *dyn_pdata;
struct gpio_chip gpio_chip;
- char **names;
+ const char *const *names;
};
static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val)
@@ -449,7 +449,7 @@ pca953x_get_alt_pdata(struct i2c_client *client)
struct device_node *node;
const uint16_t *val;
- node = dev_archdata_get_node(&client->dev.archdata);
+ node = client->dev.of_node;
if (node == NULL)
return NULL;
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c
index 105701a1f05..ee568c8fcbd 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/pl061.c
@@ -164,7 +164,7 @@ static int pl061_irq_type(unsigned irq, unsigned trigger)
unsigned long flags;
u8 gpiois, gpioibe, gpioiev;
- if (offset < 0 || offset > PL061_GPIO_NR)
+ if (offset < 0 || offset >= PL061_GPIO_NR)
return -EINVAL;
spin_lock_irqsave(&chip->irq_lock, flags);
diff --git a/drivers/gpio/rdc321x-gpio.c b/drivers/gpio/rdc321x-gpio.c
new file mode 100644
index 00000000000..2762698e020
--- /dev/null
+++ b/drivers/gpio/rdc321x-gpio.c
@@ -0,0 +1,246 @@
+/*
+ * RDC321x GPIO driver
+ *
+ * Copyright (C) 2008, Volker Weiss <dev@tintuc.de>
+ * Copyright (C) 2007-2010 Florian Fainelli <florian@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/gpio.h>
+#include <linux/mfd/rdc321x.h>
+#include <linux/slab.h>
+
+struct rdc321x_gpio {
+ spinlock_t lock;
+ struct pci_dev *sb_pdev;
+ u32 data_reg[2];
+ int reg1_ctrl_base;
+ int reg1_data_base;
+ int reg2_ctrl_base;
+ int reg2_data_base;
+ struct gpio_chip chip;
+};
+
+/* read GPIO pin */
+static int rdc_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+ struct rdc321x_gpio *gpch;
+ u32 value = 0;
+ int reg;
+
+ gpch = container_of(chip, struct rdc321x_gpio, chip);
+ reg = gpio < 32 ? gpch->reg1_data_base : gpch->reg2_data_base;
+
+ spin_lock(&gpch->lock);
+ pci_write_config_dword(gpch->sb_pdev, reg,
+ gpch->data_reg[gpio < 32 ? 0 : 1]);
+ pci_read_config_dword(gpch->sb_pdev, reg, &value);
+ spin_unlock(&gpch->lock);
+
+ return (1 << (gpio & 0x1f)) & value ? 1 : 0;
+}
+
+static void rdc_gpio_set_value_impl(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ struct rdc321x_gpio *gpch;
+ int reg = (gpio < 32) ? 0 : 1;
+
+ gpch = container_of(chip, struct rdc321x_gpio, chip);
+
+ if (value)
+ gpch->data_reg[reg] |= 1 << (gpio & 0x1f);
+ else
+ gpch->data_reg[reg] &= ~(1 << (gpio & 0x1f));
+
+ pci_write_config_dword(gpch->sb_pdev,
+ reg ? gpch->reg2_data_base : gpch->reg1_data_base,
+ gpch->data_reg[reg]);
+}
+
+/* set GPIO pin to value */
+static void rdc_gpio_set_value(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ struct rdc321x_gpio *gpch;
+
+ gpch = container_of(chip, struct rdc321x_gpio, chip);
+ spin_lock(&gpch->lock);
+ rdc_gpio_set_value_impl(chip, gpio, value);
+ spin_unlock(&gpch->lock);
+}
+
+static int rdc_gpio_config(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ struct rdc321x_gpio *gpch;
+ int err;
+ u32 reg;
+
+ gpch = container_of(chip, struct rdc321x_gpio, chip);
+
+ spin_lock(&gpch->lock);
+ err = pci_read_config_dword(gpch->sb_pdev, gpio < 32 ?
+ gpch->reg1_ctrl_base : gpch->reg2_ctrl_base, &reg);
+ if (err)
+ goto unlock;
+
+ reg |= 1 << (gpio & 0x1f);
+
+ err = pci_write_config_dword(gpch->sb_pdev, gpio < 32 ?
+ gpch->reg1_ctrl_base : gpch->reg2_ctrl_base, reg);
+ if (err)
+ goto unlock;
+
+ rdc_gpio_set_value_impl(chip, gpio, value);
+
+unlock:
+ spin_unlock(&gpch->lock);
+
+ return err;
+}
+
+/* configure GPIO pin as input */
+static int rdc_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+ return rdc_gpio_config(chip, gpio, 1);
+}
+
+/*
+ * Cache the initial value of both GPIO data registers
+ */
+static int __devinit rdc321x_gpio_probe(struct platform_device *pdev)
+{
+ int err;
+ struct resource *r;
+ struct rdc321x_gpio *rdc321x_gpio_dev;
+ struct rdc321x_gpio_pdata *pdata;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data supplied\n");
+ return -ENODEV;
+ }
+
+ rdc321x_gpio_dev = kzalloc(sizeof(struct rdc321x_gpio), GFP_KERNEL);
+ if (!rdc321x_gpio_dev) {
+ dev_err(&pdev->dev, "failed to allocate private data\n");
+ return -ENOMEM;
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_IO, "gpio-reg1");
+ if (!r) {
+ dev_err(&pdev->dev, "failed to get gpio-reg1 resource\n");
+ err = -ENODEV;
+ goto out_free;
+ }
+
+ spin_lock_init(&rdc321x_gpio_dev->lock);
+ rdc321x_gpio_dev->sb_pdev = pdata->sb_pdev;
+ rdc321x_gpio_dev->reg1_ctrl_base = r->start;
+ rdc321x_gpio_dev->reg1_data_base = r->start + 0x4;
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_IO, "gpio-reg2");
+ if (!r) {
+ dev_err(&pdev->dev, "failed to get gpio-reg2 resource\n");
+ err = -ENODEV;
+ goto out_free;
+ }
+
+ rdc321x_gpio_dev->reg2_ctrl_base = r->start;
+ rdc321x_gpio_dev->reg2_data_base = r->start + 0x4;
+
+ rdc321x_gpio_dev->chip.label = "rdc321x-gpio";
+ rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input;
+ rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config;
+ rdc321x_gpio_dev->chip.get = rdc_gpio_get_value;
+ rdc321x_gpio_dev->chip.set = rdc_gpio_set_value;
+ rdc321x_gpio_dev->chip.base = 0;
+ rdc321x_gpio_dev->chip.ngpio = pdata->max_gpios;
+
+ platform_set_drvdata(pdev, rdc321x_gpio_dev);
+
+ /* This might not be, what others (BIOS, bootloader, etc.)
+ wrote to these registers before, but it's a good guess. Still
+ better than just using 0xffffffff. */
+ err = pci_read_config_dword(rdc321x_gpio_dev->sb_pdev,
+ rdc321x_gpio_dev->reg1_data_base,
+ &rdc321x_gpio_dev->data_reg[0]);
+ if (err)
+ goto out_drvdata;
+
+ err = pci_read_config_dword(rdc321x_gpio_dev->sb_pdev,
+ rdc321x_gpio_dev->reg2_data_base,
+ &rdc321x_gpio_dev->data_reg[1]);
+ if (err)
+ goto out_drvdata;
+
+ dev_info(&pdev->dev, "registering %d GPIOs\n",
+ rdc321x_gpio_dev->chip.ngpio);
+ return gpiochip_add(&rdc321x_gpio_dev->chip);
+
+out_drvdata:
+ platform_set_drvdata(pdev, NULL);
+out_free:
+ kfree(rdc321x_gpio_dev);
+ return err;
+}
+
+static int __devexit rdc321x_gpio_remove(struct platform_device *pdev)
+{
+ int ret;
+ struct rdc321x_gpio *rdc321x_gpio_dev = platform_get_drvdata(pdev);
+
+ ret = gpiochip_remove(&rdc321x_gpio_dev->chip);
+ if (ret)
+ dev_err(&pdev->dev, "failed to unregister chip\n");
+
+ kfree(rdc321x_gpio_dev);
+ platform_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static struct platform_driver rdc321x_gpio_driver = {
+ .driver.name = "rdc321x-gpio",
+ .driver.owner = THIS_MODULE,
+ .probe = rdc321x_gpio_probe,
+ .remove = __devexit_p(rdc321x_gpio_remove),
+};
+
+static int __init rdc321x_gpio_init(void)
+{
+ return platform_driver_register(&rdc321x_gpio_driver);
+}
+
+static void __exit rdc321x_gpio_exit(void)
+{
+ platform_driver_unregister(&rdc321x_gpio_driver);
+}
+
+module_init(rdc321x_gpio_init);
+module_exit(rdc321x_gpio_exit);
+
+MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_DESCRIPTION("RDC321x GPIO driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:rdc321x-gpio");
diff --git a/drivers/gpio/tc35892-gpio.c b/drivers/gpio/tc35892-gpio.c
new file mode 100644
index 00000000000..1be6288780d
--- /dev/null
+++ b/drivers/gpio/tc35892-gpio.c
@@ -0,0 +1,381 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License, version 2
+ * Author: Hanumath Prasad <hanumath.prasad@stericsson.com> for ST-Ericsson
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/tc35892.h>
+
+/*
+ * These registers are modified under the irq bus lock and cached to avoid
+ * unnecessary writes in bus_sync_unlock.
+ */
+enum { REG_IBE, REG_IEV, REG_IS, REG_IE };
+
+#define CACHE_NR_REGS 4
+#define CACHE_NR_BANKS 3
+
+struct tc35892_gpio {
+ struct gpio_chip chip;
+ struct tc35892 *tc35892;
+ struct device *dev;
+ struct mutex irq_lock;
+
+ int irq_base;
+
+ /* Caches of interrupt control registers for bus_lock */
+ u8 regs[CACHE_NR_REGS][CACHE_NR_BANKS];
+ u8 oldregs[CACHE_NR_REGS][CACHE_NR_BANKS];
+};
+
+static inline struct tc35892_gpio *to_tc35892_gpio(struct gpio_chip *chip)
+{
+ return container_of(chip, struct tc35892_gpio, chip);
+}
+
+static int tc35892_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip);
+ struct tc35892 *tc35892 = tc35892_gpio->tc35892;
+ u8 reg = TC35892_GPIODATA0 + (offset / 8) * 2;
+ u8 mask = 1 << (offset % 8);
+ int ret;
+
+ ret = tc35892_reg_read(tc35892, reg);
+ if (ret < 0)
+ return ret;
+
+ return ret & mask;
+}
+
+static void tc35892_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+{
+ struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip);
+ struct tc35892 *tc35892 = tc35892_gpio->tc35892;
+ u8 reg = TC35892_GPIODATA0 + (offset / 8) * 2;
+ unsigned pos = offset % 8;
+ u8 data[] = {!!val << pos, 1 << pos};
+
+ tc35892_block_write(tc35892, reg, ARRAY_SIZE(data), data);
+}
+
+static int tc35892_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int val)
+{
+ struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip);
+ struct tc35892 *tc35892 = tc35892_gpio->tc35892;
+ u8 reg = TC35892_GPIODIR0 + offset / 8;
+ unsigned pos = offset % 8;
+
+ tc35892_gpio_set(chip, offset, val);
+
+ return tc35892_set_bits(tc35892, reg, 1 << pos, 1 << pos);
+}
+
+static int tc35892_gpio_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip);
+ struct tc35892 *tc35892 = tc35892_gpio->tc35892;
+ u8 reg = TC35892_GPIODIR0 + offset / 8;
+ unsigned pos = offset % 8;
+
+ return tc35892_set_bits(tc35892, reg, 1 << pos, 0);
+}
+
+static int tc35892_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip);
+
+ return tc35892_gpio->irq_base + offset;
+}
+
+static struct gpio_chip template_chip = {
+ .label = "tc35892",
+ .owner = THIS_MODULE,
+ .direction_input = tc35892_gpio_direction_input,
+ .get = tc35892_gpio_get,
+ .direction_output = tc35892_gpio_direction_output,
+ .set = tc35892_gpio_set,
+ .to_irq = tc35892_gpio_to_irq,
+ .can_sleep = 1,
+};
+
+static int tc35892_gpio_irq_set_type(unsigned int irq, unsigned int type)
+{
+ struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq);
+ int offset = irq - tc35892_gpio->irq_base;
+ int regoffset = offset / 8;
+ int mask = 1 << (offset % 8);
+
+ if (type == IRQ_TYPE_EDGE_BOTH) {
+ tc35892_gpio->regs[REG_IBE][regoffset] |= mask;
+ return 0;
+ }
+
+ tc35892_gpio->regs[REG_IBE][regoffset] &= ~mask;
+
+ if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH)
+ tc35892_gpio->regs[REG_IS][regoffset] |= mask;
+ else
+ tc35892_gpio->regs[REG_IS][regoffset] &= ~mask;
+
+ if (type == IRQ_TYPE_EDGE_RISING || type == IRQ_TYPE_LEVEL_HIGH)
+ tc35892_gpio->regs[REG_IEV][regoffset] |= mask;
+ else
+ tc35892_gpio->regs[REG_IEV][regoffset] &= ~mask;
+
+ return 0;
+}
+
+static void tc35892_gpio_irq_lock(unsigned int irq)
+{
+ struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq);
+
+ mutex_lock(&tc35892_gpio->irq_lock);
+}
+
+static void tc35892_gpio_irq_sync_unlock(unsigned int irq)
+{
+ struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq);
+ struct tc35892 *tc35892 = tc35892_gpio->tc35892;
+ static const u8 regmap[] = {
+ [REG_IBE] = TC35892_GPIOIBE0,
+ [REG_IEV] = TC35892_GPIOIEV0,
+ [REG_IS] = TC35892_GPIOIS0,
+ [REG_IE] = TC35892_GPIOIE0,
+ };
+ int i, j;
+
+ for (i = 0; i < CACHE_NR_REGS; i++) {
+ for (j = 0; j < CACHE_NR_BANKS; j++) {
+ u8 old = tc35892_gpio->oldregs[i][j];
+ u8 new = tc35892_gpio->regs[i][j];
+
+ if (new == old)
+ continue;
+
+ tc35892_gpio->oldregs[i][j] = new;
+ tc35892_reg_write(tc35892, regmap[i] + j * 8, new);
+ }
+ }
+
+ mutex_unlock(&tc35892_gpio->irq_lock);
+}
+
+static void tc35892_gpio_irq_mask(unsigned int irq)
+{
+ struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq);
+ int offset = irq - tc35892_gpio->irq_base;
+ int regoffset = offset / 8;
+ int mask = 1 << (offset % 8);
+
+ tc35892_gpio->regs[REG_IE][regoffset] &= ~mask;
+}
+
+static void tc35892_gpio_irq_unmask(unsigned int irq)
+{
+ struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq);
+ int offset = irq - tc35892_gpio->irq_base;
+ int regoffset = offset / 8;
+ int mask = 1 << (offset % 8);
+
+ tc35892_gpio->regs[REG_IE][regoffset] |= mask;
+}
+
+static struct irq_chip tc35892_gpio_irq_chip = {
+ .name = "tc35892-gpio",
+ .bus_lock = tc35892_gpio_irq_lock,
+ .bus_sync_unlock = tc35892_gpio_irq_sync_unlock,
+ .mask = tc35892_gpio_irq_mask,
+ .unmask = tc35892_gpio_irq_unmask,
+ .set_type = tc35892_gpio_irq_set_type,
+};
+
+static irqreturn_t tc35892_gpio_irq(int irq, void *dev)
+{
+ struct tc35892_gpio *tc35892_gpio = dev;
+ struct tc35892 *tc35892 = tc35892_gpio->tc35892;
+ u8 status[CACHE_NR_BANKS];
+ int ret;
+ int i;
+
+ ret = tc35892_block_read(tc35892, TC35892_GPIOMIS0,
+ ARRAY_SIZE(status), status);
+ if (ret < 0)
+ return IRQ_NONE;
+
+ for (i = 0; i < ARRAY_SIZE(status); i++) {
+ unsigned int stat = status[i];
+ if (!stat)
+ continue;
+
+ while (stat) {
+ int bit = __ffs(stat);
+ int line = i * 8 + bit;
+
+ handle_nested_irq(tc35892_gpio->irq_base + line);
+ stat &= ~(1 << bit);
+ }
+
+ tc35892_reg_write(tc35892, TC35892_GPIOIC0 + i, status[i]);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int tc35892_gpio_irq_init(struct tc35892_gpio *tc35892_gpio)
+{
+ int base = tc35892_gpio->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + tc35892_gpio->chip.ngpio; irq++) {
+ set_irq_chip_data(irq, tc35892_gpio);
+ set_irq_chip_and_handler(irq, &tc35892_gpio_irq_chip,
+ handle_simple_irq);
+ set_irq_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ set_irq_noprobe(irq);
+#endif
+ }
+
+ return 0;
+}
+
+static void tc35892_gpio_irq_remove(struct tc35892_gpio *tc35892_gpio)
+{
+ int base = tc35892_gpio->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + tc35892_gpio->chip.ngpio; irq++) {
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, 0);
+#endif
+ set_irq_chip_and_handler(irq, NULL, NULL);
+ set_irq_chip_data(irq, NULL);
+ }
+}
+
+static int __devinit tc35892_gpio_probe(struct platform_device *pdev)
+{
+ struct tc35892 *tc35892 = dev_get_drvdata(pdev->dev.parent);
+ struct tc35892_gpio_platform_data *pdata;
+ struct tc35892_gpio *tc35892_gpio;
+ int ret;
+ int irq;
+
+ pdata = tc35892->pdata->gpio;
+ if (!pdata)
+ return -ENODEV;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ tc35892_gpio = kzalloc(sizeof(struct tc35892_gpio), GFP_KERNEL);
+ if (!tc35892_gpio)
+ return -ENOMEM;
+
+ mutex_init(&tc35892_gpio->irq_lock);
+
+ tc35892_gpio->dev = &pdev->dev;
+ tc35892_gpio->tc35892 = tc35892;
+
+ tc35892_gpio->chip = template_chip;
+ tc35892_gpio->chip.ngpio = tc35892->num_gpio;
+ tc35892_gpio->chip.dev = &pdev->dev;
+ tc35892_gpio->chip.base = pdata->gpio_base;
+
+ tc35892_gpio->irq_base = tc35892->irq_base + TC35892_INT_GPIO(0);
+
+ /* Bring the GPIO module out of reset */
+ ret = tc35892_set_bits(tc35892, TC35892_RSTCTRL,
+ TC35892_RSTCTRL_GPIRST, 0);
+ if (ret < 0)
+ goto out_free;
+
+ ret = tc35892_gpio_irq_init(tc35892_gpio);
+ if (ret)
+ goto out_free;
+
+ ret = request_threaded_irq(irq, NULL, tc35892_gpio_irq, IRQF_ONESHOT,
+ "tc35892-gpio", tc35892_gpio);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get irq: %d\n", ret);
+ goto out_removeirq;
+ }
+
+ ret = gpiochip_add(&tc35892_gpio->chip);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to add gpiochip: %d\n", ret);
+ goto out_freeirq;
+ }
+
+ platform_set_drvdata(pdev, tc35892_gpio);
+
+ return 0;
+
+out_freeirq:
+ free_irq(irq, tc35892_gpio);
+out_removeirq:
+ tc35892_gpio_irq_remove(tc35892_gpio);
+out_free:
+ kfree(tc35892_gpio);
+ return ret;
+}
+
+static int __devexit tc35892_gpio_remove(struct platform_device *pdev)
+{
+ struct tc35892_gpio *tc35892_gpio = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+ int ret;
+
+ ret = gpiochip_remove(&tc35892_gpio->chip);
+ if (ret < 0) {
+ dev_err(tc35892_gpio->dev,
+ "unable to remove gpiochip: %d\n", ret);
+ return ret;
+ }
+
+ free_irq(irq, tc35892_gpio);
+ tc35892_gpio_irq_remove(tc35892_gpio);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(tc35892_gpio);
+
+ return 0;
+}
+
+static struct platform_driver tc35892_gpio_driver = {
+ .driver.name = "tc35892-gpio",
+ .driver.owner = THIS_MODULE,
+ .probe = tc35892_gpio_probe,
+ .remove = __devexit_p(tc35892_gpio_remove),
+};
+
+static int __init tc35892_gpio_init(void)
+{
+ return platform_driver_register(&tc35892_gpio_driver);
+}
+subsys_initcall(tc35892_gpio_init);
+
+static void __exit tc35892_gpio_exit(void)
+{
+ platform_driver_unregister(&tc35892_gpio_driver);
+}
+module_exit(tc35892_gpio_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TC35892 GPIO driver");
+MODULE_AUTHOR("Hanumath Prasad, Rabin Vincent");
diff --git a/drivers/gpio/wm8994-gpio.c b/drivers/gpio/wm8994-gpio.c
index 7607cc61e1d..2ac9a16d3da 100644
--- a/drivers/gpio/wm8994-gpio.c
+++ b/drivers/gpio/wm8994-gpio.c
@@ -81,6 +81,18 @@ static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset, WM8994_GPN_LVL, value);
}
+static int wm8994_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
+ struct wm8994 *wm8994 = wm8994_gpio->wm8994;
+
+ if (!wm8994->irq_base)
+ return -EINVAL;
+
+ return wm8994->irq_base + offset;
+}
+
+
#ifdef CONFIG_DEBUG_FS
static void wm8994_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 305c5900396..88910e5a2c7 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -9,6 +9,7 @@ menuconfig DRM
depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU
select I2C
select I2C_ALGOBIT
+ select SLOW_WORK
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
introduced in XFree86 4.0. If you say Y here, you need to select
@@ -59,6 +60,7 @@ config DRM_RADEON
select FW_LOADER
select DRM_KMS_HELPER
select DRM_TTM
+ select POWER_SUPPLY
help
Choose this option if you have an ATI Radeon graphics card. There
are both PCI and AGP versions. You don't need to choose this to
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 932b5aa96a6..3f46772f0cb 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -79,10 +79,9 @@ static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
struct drm_device *dev = master->minor->dev;
DRM_DEBUG("%d\n", magic);
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
- memset(entry, 0, sizeof(*entry));
entry->priv = priv;
entry->hash_item.key = (unsigned long)magic;
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index f7ba82ebf65..2092e7bb788 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -961,7 +961,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
}
- /* No allocations failed, so now we can replace the orginal pagelist
+ /* No allocations failed, so now we can replace the original pagelist
* with the new one.
*/
if (dma->page_count) {
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 61b9bcfdf04..994d23beeb1 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -34,6 +34,7 @@
#include "drm.h"
#include "drmP.h"
#include "drm_crtc.h"
+#include "drm_edid.h"
struct drm_prop_enum_list {
int type;
@@ -494,7 +495,6 @@ void drm_connector_cleanup(struct drm_connector *connector)
list_for_each_entry_safe(mode, t, &connector->user_modes, head)
drm_mode_remove(connector, mode);
- kfree(connector->fb_helper_private);
mutex_lock(&dev->mode_config.mutex);
drm_mode_object_put(dev, &connector->base);
list_del(&connector->head);
@@ -858,7 +858,6 @@ void drm_mode_config_init(struct drm_device *dev)
mutex_init(&dev->mode_config.mutex);
mutex_init(&dev->mode_config.idr_mutex);
INIT_LIST_HEAD(&dev->mode_config.fb_list);
- INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list);
INIT_LIST_HEAD(&dev->mode_config.crtc_list);
INIT_LIST_HEAD(&dev->mode_config.connector_list);
INIT_LIST_HEAD(&dev->mode_config.encoder_list);
@@ -2350,7 +2349,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
struct edid *edid)
{
struct drm_device *dev = connector->dev;
- int ret = 0;
+ int ret = 0, size;
if (connector->edid_blob_ptr)
drm_property_destroy_blob(dev, connector->edid_blob_ptr);
@@ -2362,7 +2361,9 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
return ret;
}
- connector->edid_blob_ptr = drm_property_create_blob(connector->dev, 128, edid);
+ size = EDID_LENGTH * (1 + edid->extensions);
+ connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
+ size, edid);
ret = drm_connector_property_set_value(connector,
dev->mode_config.edid_property,
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 51103aa469f..76440195104 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -55,7 +55,7 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
}
/**
- * drm_helper_probe_connector_modes - get complete set of display modes
+ * drm_helper_probe_single_connector_modes - get complete set of display modes
* @dev: DRM device
* @maxX: max width for modes
* @maxY: max height for modes
@@ -154,21 +154,6 @@ prune:
}
EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
-int drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX,
- uint32_t maxY)
-{
- struct drm_connector *connector;
- int count = 0;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- count += drm_helper_probe_single_connector_modes(connector,
- maxX, maxY);
- }
-
- return count;
-}
-EXPORT_SYMBOL(drm_helper_probe_connector_modes);
-
/**
* drm_helper_encoder_in_use - check if a given encoder is in use
* @encoder: encoder to check
@@ -263,302 +248,6 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_helper_disable_unused_functions);
-static struct drm_display_mode *drm_has_preferred_mode(struct drm_connector *connector, int width, int height)
-{
- struct drm_display_mode *mode;
-
- list_for_each_entry(mode, &connector->modes, head) {
- if (drm_mode_width(mode) > width ||
- drm_mode_height(mode) > height)
- continue;
- if (mode->type & DRM_MODE_TYPE_PREFERRED)
- return mode;
- }
- return NULL;
-}
-
-static bool drm_has_cmdline_mode(struct drm_connector *connector)
-{
- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
- struct drm_fb_helper_cmdline_mode *cmdline_mode;
-
- if (!fb_help_conn)
- return false;
-
- cmdline_mode = &fb_help_conn->cmdline_mode;
- return cmdline_mode->specified;
-}
-
-static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_connector *connector, int width, int height)
-{
- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
- struct drm_fb_helper_cmdline_mode *cmdline_mode;
- struct drm_display_mode *mode = NULL;
-
- if (!fb_help_conn)
- return mode;
-
- cmdline_mode = &fb_help_conn->cmdline_mode;
- if (cmdline_mode->specified == false)
- return mode;
-
- /* attempt to find a matching mode in the list of modes
- * we have gotten so far, if not add a CVT mode that conforms
- */
- if (cmdline_mode->rb || cmdline_mode->margins)
- goto create_mode;
-
- list_for_each_entry(mode, &connector->modes, head) {
- /* check width/height */
- if (mode->hdisplay != cmdline_mode->xres ||
- mode->vdisplay != cmdline_mode->yres)
- continue;
-
- if (cmdline_mode->refresh_specified) {
- if (mode->vrefresh != cmdline_mode->refresh)
- continue;
- }
-
- if (cmdline_mode->interlace) {
- if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
- continue;
- }
- return mode;
- }
-
-create_mode:
- mode = drm_cvt_mode(connector->dev, cmdline_mode->xres,
- cmdline_mode->yres,
- cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
- cmdline_mode->rb, cmdline_mode->interlace,
- cmdline_mode->margins);
- drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
- list_add(&mode->head, &connector->modes);
- return mode;
-}
-
-static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
-{
- bool enable;
-
- if (strict) {
- enable = connector->status == connector_status_connected;
- } else {
- enable = connector->status != connector_status_disconnected;
- }
- return enable;
-}
-
-static void drm_enable_connectors(struct drm_device *dev, bool *enabled)
-{
- bool any_enabled = false;
- struct drm_connector *connector;
- int i = 0;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- enabled[i] = drm_connector_enabled(connector, true);
- DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
- enabled[i] ? "yes" : "no");
- any_enabled |= enabled[i];
- i++;
- }
-
- if (any_enabled)
- return;
-
- i = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- enabled[i] = drm_connector_enabled(connector, false);
- i++;
- }
-}
-
-static bool drm_target_preferred(struct drm_device *dev,
- struct drm_display_mode **modes,
- bool *enabled, int width, int height)
-{
- struct drm_connector *connector;
- int i = 0;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-
- if (enabled[i] == false) {
- i++;
- continue;
- }
-
- DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
- connector->base.id);
-
- /* got for command line mode first */
- modes[i] = drm_pick_cmdline_mode(connector, width, height);
- if (!modes[i]) {
- DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
- connector->base.id);
- modes[i] = drm_has_preferred_mode(connector, width, height);
- }
- /* No preferred modes, pick one off the list */
- if (!modes[i] && !list_empty(&connector->modes)) {
- list_for_each_entry(modes[i], &connector->modes, head)
- break;
- }
- DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
- "none");
- i++;
- }
- return true;
-}
-
-static int drm_pick_crtcs(struct drm_device *dev,
- struct drm_crtc **best_crtcs,
- struct drm_display_mode **modes,
- int n, int width, int height)
-{
- int c, o;
- struct drm_connector *connector;
- struct drm_connector_helper_funcs *connector_funcs;
- struct drm_encoder *encoder;
- struct drm_crtc *best_crtc;
- int my_score, best_score, score;
- struct drm_crtc **crtcs, *crtc;
-
- if (n == dev->mode_config.num_connector)
- return 0;
- c = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (c == n)
- break;
- c++;
- }
-
- best_crtcs[n] = NULL;
- best_crtc = NULL;
- best_score = drm_pick_crtcs(dev, best_crtcs, modes, n+1, width, height);
- if (modes[n] == NULL)
- return best_score;
-
- crtcs = kmalloc(dev->mode_config.num_connector *
- sizeof(struct drm_crtc *), GFP_KERNEL);
- if (!crtcs)
- return best_score;
-
- my_score = 1;
- if (connector->status == connector_status_connected)
- my_score++;
- if (drm_has_cmdline_mode(connector))
- my_score++;
- if (drm_has_preferred_mode(connector, width, height))
- my_score++;
-
- connector_funcs = connector->helper_private;
- encoder = connector_funcs->best_encoder(connector);
- if (!encoder)
- goto out;
-
- connector->encoder = encoder;
-
- /* select a crtc for this connector and then attempt to configure
- remaining connectors */
- c = 0;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-
- if ((encoder->possible_crtcs & (1 << c)) == 0) {
- c++;
- continue;
- }
-
- for (o = 0; o < n; o++)
- if (best_crtcs[o] == crtc)
- break;
-
- if (o < n) {
- /* ignore cloning for now */
- c++;
- continue;
- }
-
- crtcs[n] = crtc;
- memcpy(crtcs, best_crtcs, n * sizeof(struct drm_crtc *));
- score = my_score + drm_pick_crtcs(dev, crtcs, modes, n + 1,
- width, height);
- if (score > best_score) {
- best_crtc = crtc;
- best_score = score;
- memcpy(best_crtcs, crtcs,
- dev->mode_config.num_connector *
- sizeof(struct drm_crtc *));
- }
- c++;
- }
-out:
- kfree(crtcs);
- return best_score;
-}
-
-static void drm_setup_crtcs(struct drm_device *dev)
-{
- struct drm_crtc **crtcs;
- struct drm_display_mode **modes;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- bool *enabled;
- int width, height;
- int i, ret;
-
- DRM_DEBUG_KMS("\n");
-
- width = dev->mode_config.max_width;
- height = dev->mode_config.max_height;
-
- /* clean out all the encoder/crtc combos */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- encoder->crtc = NULL;
- }
-
- crtcs = kcalloc(dev->mode_config.num_connector,
- sizeof(struct drm_crtc *), GFP_KERNEL);
- modes = kcalloc(dev->mode_config.num_connector,
- sizeof(struct drm_display_mode *), GFP_KERNEL);
- enabled = kcalloc(dev->mode_config.num_connector,
- sizeof(bool), GFP_KERNEL);
-
- drm_enable_connectors(dev, enabled);
-
- ret = drm_target_preferred(dev, modes, enabled, width, height);
- if (!ret)
- DRM_ERROR("Unable to find initial modes\n");
-
- DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
-
- drm_pick_crtcs(dev, crtcs, modes, 0, width, height);
-
- i = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct drm_display_mode *mode = modes[i];
- struct drm_crtc *crtc = crtcs[i];
-
- if (connector->encoder == NULL) {
- i++;
- continue;
- }
-
- if (mode && crtc) {
- DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
- mode->name, crtc->base.id);
- crtc->desired_mode = mode;
- connector->encoder->crtc = crtc;
- } else {
- connector->encoder->crtc = NULL;
- connector->encoder = NULL;
- }
- i++;
- }
-
- kfree(crtcs);
- kfree(modes);
- kfree(enabled);
-}
-
/**
* drm_encoder_crtc_ok - can a given crtc drive a given encoder?
* @encoder: encoder to test
@@ -936,10 +625,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
ret = -EINVAL;
goto fail;
}
- /* TODO are these needed? */
- set->crtc->desired_x = set->x;
- set->crtc->desired_y = set->y;
- set->crtc->desired_mode = set->mode;
}
drm_helper_disable_unused_functions(dev);
} else if (fb_changed) {
@@ -984,63 +669,6 @@ fail:
}
EXPORT_SYMBOL(drm_crtc_helper_set_config);
-bool drm_helper_plugged_event(struct drm_device *dev)
-{
- DRM_DEBUG_KMS("\n");
-
- drm_helper_probe_connector_modes(dev, dev->mode_config.max_width,
- dev->mode_config.max_height);
-
- drm_setup_crtcs(dev);
-
- /* alert the driver fb layer */
- dev->mode_config.funcs->fb_changed(dev);
-
- /* FIXME: send hotplug event */
- return true;
-}
-/**
- * drm_initial_config - setup a sane initial connector configuration
- * @dev: DRM device
- *
- * LOCKING:
- * Called at init time, must take mode config lock.
- *
- * Scan the CRTCs and connectors and try to put together an initial setup.
- * At the moment, this is a cloned configuration across all heads with
- * a new framebuffer object as the backing store.
- *
- * RETURNS:
- * Zero if everything went ok, nonzero otherwise.
- */
-bool drm_helper_initial_config(struct drm_device *dev)
-{
- int count = 0;
-
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(dev);
-
- drm_fb_helper_parse_command_line(dev);
-
- count = drm_helper_probe_connector_modes(dev,
- dev->mode_config.max_width,
- dev->mode_config.max_height);
-
- /*
- * we shouldn't end up with no modes here.
- */
- if (count == 0)
- printk(KERN_INFO "No connectors reported connected with modes\n");
-
- drm_setup_crtcs(dev);
-
- /* alert the driver fb layer */
- dev->mode_config.funcs->fb_changed(dev);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_helper_initial_config);
-
static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
{
int dpms = DRM_MODE_DPMS_OFF;
@@ -1123,27 +751,6 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
}
EXPORT_SYMBOL(drm_helper_connector_dpms);
-/**
- * drm_hotplug_stage_two
- * @dev DRM device
- * @connector hotpluged connector
- *
- * LOCKING.
- * Caller must hold mode config lock, function might grab struct lock.
- *
- * Stage two of a hotplug.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_helper_hotplug_stage_two(struct drm_device *dev)
-{
- drm_helper_plugged_event(dev);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_helper_hotplug_stage_two);
-
int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
struct drm_mode_fb_cmd *mode_cmd)
{
@@ -1200,3 +807,98 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
return 0;
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
+
+static struct slow_work_ops output_poll_ops;
+
+#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
+static void output_poll_execute(struct slow_work *work)
+{
+ struct delayed_slow_work *delayed_work = container_of(work, struct delayed_slow_work, work);
+ struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_slow_work);
+ struct drm_connector *connector;
+ enum drm_connector_status old_status, status;
+ bool repoll = false, changed = false;
+ int ret;
+
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+ /* if this is HPD or polled don't check it -
+ TV out for instance */
+ if (!connector->polled)
+ continue;
+
+ else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT))
+ repoll = true;
+
+ old_status = connector->status;
+ /* if we are connected and don't want to poll for disconnect
+ skip it */
+ if (old_status == connector_status_connected &&
+ !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) &&
+ !(connector->polled & DRM_CONNECTOR_POLL_HPD))
+ continue;
+
+ status = connector->funcs->detect(connector);
+ if (old_status != status)
+ changed = true;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (changed) {
+ /* send a uevent + call fbdev */
+ drm_sysfs_hotplug_event(dev);
+ if (dev->mode_config.funcs->output_poll_changed)
+ dev->mode_config.funcs->output_poll_changed(dev);
+ }
+
+ if (repoll) {
+ ret = delayed_slow_work_enqueue(delayed_work, DRM_OUTPUT_POLL_PERIOD);
+ if (ret)
+ DRM_ERROR("delayed enqueue failed %d\n", ret);
+ }
+}
+
+void drm_kms_helper_poll_init(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ bool poll = false;
+ int ret;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->polled)
+ poll = true;
+ }
+ slow_work_register_user(THIS_MODULE);
+ delayed_slow_work_init(&dev->mode_config.output_poll_slow_work,
+ &output_poll_ops);
+
+ if (poll) {
+ ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD);
+ if (ret)
+ DRM_ERROR("delayed enqueue failed %d\n", ret);
+ }
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_init);
+
+void drm_kms_helper_poll_fini(struct drm_device *dev)
+{
+ delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
+ slow_work_unregister_user(THIS_MODULE);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_fini);
+
+void drm_helper_hpd_irq_event(struct drm_device *dev)
+{
+ if (!dev->mode_config.poll_enabled)
+ return;
+ delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
+ /* schedule a slow work asap */
+ delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, 0);
+}
+EXPORT_SYMBOL(drm_helper_hpd_irq_event);
+
+static struct slow_work_ops output_poll_ops = {
+ .execute = output_poll_execute,
+};
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index 13f1537413f..252cbd74df0 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -47,12 +47,10 @@ int drm_dma_setup(struct drm_device *dev)
{
int i;
- dev->dma = kmalloc(sizeof(*dev->dma), GFP_KERNEL);
+ dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
if (!dev->dma)
return -ENOMEM;
- memset(dev->dma, 0, sizeof(*dev->dma));
-
for (i = 0; i <= DRM_MAX_ORDER; i++)
memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 18f41d7061f..c1981861bbb 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2,6 +2,7 @@
* Copyright (c) 2006 Luc Verhaegen (quirks list)
* Copyright (c) 2007-2008 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright 2010 Red Hat, Inc.
*
* DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
* FB layer.
@@ -33,10 +34,9 @@
#include "drmP.h"
#include "drm_edid.h"
-/*
- * TODO:
- * - support EDID 1.4 (incl. CE blocks)
- */
+#define EDID_EST_TIMINGS 16
+#define EDID_STD_TIMINGS 8
+#define EDID_DETAILED_TIMINGS 4
/*
* EDID blocks out in the wild have a variety of bugs, try to collect
@@ -65,7 +65,8 @@
#define LEVEL_DMT 0
#define LEVEL_GTF 1
-#define LEVEL_CVT 2
+#define LEVEL_GTF2 2
+#define LEVEL_CVT 3
static struct edid_quirk {
char *vendor;
@@ -109,51 +110,64 @@ static struct edid_quirk {
{ "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
};
+/*** DDC fetch and block validation ***/
-/* Valid EDID header has these bytes */
static const u8 edid_header[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
};
-/**
- * drm_edid_is_valid - sanity check EDID data
- * @edid: EDID data
- *
- * Sanity check the EDID block by looking at the header, the version number
- * and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's
- * valid.
+/*
+ * Sanity check the EDID block (base or extension). Return 0 if the block
+ * doesn't check out, or 1 if it's valid.
*/
-bool drm_edid_is_valid(struct edid *edid)
+static bool
+drm_edid_block_valid(u8 *raw_edid)
{
- int i, score = 0;
+ int i;
u8 csum = 0;
- u8 *raw_edid = (u8 *)edid;
+ struct edid *edid = (struct edid *)raw_edid;
+
+ if (raw_edid[0] == 0x00) {
+ int score = 0;
- for (i = 0; i < sizeof(edid_header); i++)
- if (raw_edid[i] == edid_header[i])
- score++;
+ for (i = 0; i < sizeof(edid_header); i++)
+ if (raw_edid[i] == edid_header[i])
+ score++;
- if (score == 8) ;
- else if (score >= 6) {
- DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
- memcpy(raw_edid, edid_header, sizeof(edid_header));
- } else
- goto bad;
+ if (score == 8) ;
+ else if (score >= 6) {
+ DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
+ memcpy(raw_edid, edid_header, sizeof(edid_header));
+ } else {
+ goto bad;
+ }
+ }
for (i = 0; i < EDID_LENGTH; i++)
csum += raw_edid[i];
if (csum) {
DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
- goto bad;
- }
- if (edid->version != 1) {
- DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
- goto bad;
+ /* allow CEA to slide through, switches mangle this */
+ if (raw_edid[0] != 0x02)
+ goto bad;
}
- if (edid->revision > 4)
- DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+ /* per-block-type checks */
+ switch (raw_edid[0]) {
+ case 0: /* base */
+ if (edid->version != 1) {
+ DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
+ goto bad;
+ }
+
+ if (edid->revision > 4)
+ DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+ break;
+
+ default:
+ break;
+ }
return 1;
@@ -165,8 +179,158 @@ bad:
}
return 0;
}
+
+/**
+ * drm_edid_is_valid - sanity check EDID data
+ * @edid: EDID data
+ *
+ * Sanity-check an entire EDID record (including extensions)
+ */
+bool drm_edid_is_valid(struct edid *edid)
+{
+ int i;
+ u8 *raw = (u8 *)edid;
+
+ if (!edid)
+ return false;
+
+ for (i = 0; i <= edid->extensions; i++)
+ if (!drm_edid_block_valid(raw + i * EDID_LENGTH))
+ return false;
+
+ return true;
+}
EXPORT_SYMBOL(drm_edid_is_valid);
+#define DDC_ADDR 0x50
+#define DDC_SEGMENT_ADDR 0x30
+/**
+ * Get EDID information via I2C.
+ *
+ * \param adapter : i2c device adaptor
+ * \param buf : EDID data buffer to be filled
+ * \param len : EDID data buffer length
+ * \return 0 on success or -1 on failure.
+ *
+ * Try to fetch EDID information by calling i2c driver function.
+ */
+static int
+drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
+ int block, int len)
+{
+ unsigned char start = block * EDID_LENGTH;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = DDC_ADDR,
+ .flags = 0,
+ .len = 1,
+ .buf = &start,
+ }, {
+ .addr = DDC_ADDR,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = buf + start,
+ }
+ };
+
+ if (i2c_transfer(adapter, msgs, 2) == 2)
+ return 0;
+
+ return -1;
+}
+
+static u8 *
+drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
+{
+ int i, j = 0;
+ u8 *block, *new;
+
+ if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
+ return NULL;
+
+ /* base block fetch */
+ for (i = 0; i < 4; i++) {
+ if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
+ goto out;
+ if (drm_edid_block_valid(block))
+ break;
+ }
+ if (i == 4)
+ goto carp;
+
+ /* if there's no extensions, we're done */
+ if (block[0x7e] == 0)
+ return block;
+
+ new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
+ if (!new)
+ goto out;
+ block = new;
+
+ for (j = 1; j <= block[0x7e]; j++) {
+ for (i = 0; i < 4; i++) {
+ if (drm_do_probe_ddc_edid(adapter, block, j,
+ EDID_LENGTH))
+ goto out;
+ if (drm_edid_block_valid(block + j * EDID_LENGTH))
+ break;
+ }
+ if (i == 4)
+ goto carp;
+ }
+
+ return block;
+
+carp:
+ dev_warn(&connector->dev->pdev->dev, "%s: EDID block %d invalid.\n",
+ drm_get_connector_name(connector), j);
+
+out:
+ kfree(block);
+ return NULL;
+}
+
+/**
+ * Probe DDC presence.
+ *
+ * \param adapter : i2c device adaptor
+ * \return 1 on success
+ */
+static bool
+drm_probe_ddc(struct i2c_adapter *adapter)
+{
+ unsigned char out;
+
+ return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0);
+}
+
+/**
+ * drm_get_edid - get EDID data, if available
+ * @connector: connector we're probing
+ * @adapter: i2c adapter to use for DDC
+ *
+ * Poke the given i2c channel to grab EDID data if possible. If found,
+ * attach it to the connector.
+ *
+ * Return edid data or NULL if we couldn't find any.
+ */
+struct edid *drm_get_edid(struct drm_connector *connector,
+ struct i2c_adapter *adapter)
+{
+ struct edid *edid = NULL;
+
+ if (drm_probe_ddc(adapter))
+ edid = (struct edid *)drm_do_get_edid(connector, adapter);
+
+ connector->display_info.raw_edid = (char *)edid;
+
+ return edid;
+
+}
+EXPORT_SYMBOL(drm_get_edid);
+
+/*** EDID parsing ***/
+
/**
* edid_vendor - match a string against EDID's obfuscated vendor field
* @edid: EDID to match
@@ -335,7 +499,7 @@ static struct drm_display_mode drm_dmt_modes[] = {
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@85Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
- 1072, 1376, 0, 768, 769, 772, 808, 0,
+ 1168, 1376, 0, 768, 769, 772, 808, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1152x864@75Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
@@ -426,7 +590,7 @@ static struct drm_display_mode drm_dmt_modes[] = {
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@75Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 2025000, 1600, 1664,
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@85Hz */
@@ -497,8 +661,8 @@ static struct drm_display_mode drm_dmt_modes[] = {
static const int drm_num_dmt_modes =
sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
-static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
- int hsize, int vsize, int fresh)
+struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
+ int hsize, int vsize, int fresh)
{
int i;
struct drm_display_mode *ptr, *mode;
@@ -516,6 +680,111 @@ static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
}
return mode;
}
+EXPORT_SYMBOL(drm_mode_find_dmt);
+
+typedef void detailed_cb(struct detailed_timing *timing, void *closure);
+
+static void
+drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure)
+{
+ int i;
+ struct edid *edid = (struct edid *)raw_edid;
+
+ if (edid == NULL)
+ return;
+
+ for (i = 0; i < EDID_DETAILED_TIMINGS; i++)
+ cb(&(edid->detailed_timings[i]), closure);
+
+ /* XXX extension block walk */
+}
+
+static void
+is_rb(struct detailed_timing *t, void *data)
+{
+ u8 *r = (u8 *)t;
+ if (r[3] == EDID_DETAIL_MONITOR_RANGE)
+ if (r[15] & 0x10)
+ *(bool *)data = true;
+}
+
+/* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */
+static bool
+drm_monitor_supports_rb(struct edid *edid)
+{
+ if (edid->revision >= 4) {
+ bool ret;
+ drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
+ return ret;
+ }
+
+ return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0);
+}
+
+static void
+find_gtf2(struct detailed_timing *t, void *data)
+{
+ u8 *r = (u8 *)t;
+ if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02)
+ *(u8 **)data = r;
+}
+
+/* Secondary GTF curve kicks in above some break frequency */
+static int
+drm_gtf2_hbreak(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? (r[12] * 2) : 0;
+}
+
+static int
+drm_gtf2_2c(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[13] : 0;
+}
+
+static int
+drm_gtf2_m(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? (r[15] << 8) + r[14] : 0;
+}
+
+static int
+drm_gtf2_k(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[16] : 0;
+}
+
+static int
+drm_gtf2_2j(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[17] : 0;
+}
+
+/**
+ * standard_timing_level - get std. timing level(CVT/GTF/DMT)
+ * @edid: EDID block to scan
+ */
+static int standard_timing_level(struct edid *edid)
+{
+ if (edid->revision >= 2) {
+ if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
+ return LEVEL_CVT;
+ if (drm_gtf2_hbreak(edid))
+ return LEVEL_GTF2;
+ return LEVEL_GTF;
+ }
+ return LEVEL_DMT;
+}
/*
* 0 is reserved. The spec says 0x01 fill for unused timings. Some old
@@ -536,22 +805,20 @@ bad_std_timing(u8 a, u8 b)
*
* Take the standard timing params (in this case width, aspect, and refresh)
* and convert them into a real mode using CVT/GTF/DMT.
- *
- * Punts for now, but should eventually use the FB layer's CVT based mode
- * generation code.
*/
-struct drm_display_mode *drm_mode_std(struct drm_device *dev,
- struct std_timing *t,
- int revision,
- int timing_level)
+static struct drm_display_mode *
+drm_mode_std(struct drm_connector *connector, struct edid *edid,
+ struct std_timing *t, int revision)
{
- struct drm_display_mode *mode;
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *m, *mode = NULL;
int hsize, vsize;
int vrefresh_rate;
unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
>> EDID_TIMING_ASPECT_SHIFT;
unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
>> EDID_TIMING_VFREQ_SHIFT;
+ int timing_level = standard_timing_level(edid);
if (bad_std_timing(t->hsize, t->vfreq_aspect))
return NULL;
@@ -572,18 +839,38 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
vsize = (hsize * 4) / 5;
else
vsize = (hsize * 9) / 16;
- /* HDTV hack */
- if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) {
- mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
+
+ /* HDTV hack, part 1 */
+ if (vrefresh_rate == 60 &&
+ ((hsize == 1360 && vsize == 765) ||
+ (hsize == 1368 && vsize == 769))) {
+ hsize = 1366;
+ vsize = 768;
+ }
+
+ /*
+ * If this connector already has a mode for this size and refresh
+ * rate (because it came from detailed or CVT info), use that
+ * instead. This way we don't have to guess at interlace or
+ * reduced blanking.
+ */
+ list_for_each_entry(m, &connector->probed_modes, head)
+ if (m->hdisplay == hsize && m->vdisplay == vsize &&
+ drm_mode_vrefresh(m) == vrefresh_rate)
+ return NULL;
+
+ /* HDTV hack, part 2 */
+ if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) {
+ mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0,
false);
mode->hdisplay = 1366;
mode->vsync_start = mode->vsync_start - 1;
mode->vsync_end = mode->vsync_end - 1;
return mode;
}
- mode = NULL;
+
/* check whether it can be found in default mode table */
- mode = drm_find_dmt(dev, hsize, vsize, vrefresh_rate);
+ mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate);
if (mode)
return mode;
@@ -593,6 +880,23 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
case LEVEL_GTF:
mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
break;
+ case LEVEL_GTF2:
+ /*
+ * This is potentially wrong if there's ever a monitor with
+ * more than one ranges section, each claiming a different
+ * secondary GTF curve. Please don't do that.
+ */
+ mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+ if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
+ kfree(mode);
+ mode = drm_gtf_mode_complex(dev, hsize, vsize,
+ vrefresh_rate, 0, 0,
+ drm_gtf2_m(edid),
+ drm_gtf2_2c(edid),
+ drm_gtf2_k(edid),
+ drm_gtf2_2j(edid));
+ }
+ break;
case LEVEL_CVT:
mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
false);
@@ -716,10 +1020,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
if (mode->vsync_end > mode->vtotal)
mode->vtotal = mode->vsync_end + 1;
- drm_mode_set_name(mode);
-
drm_mode_do_interlace_quirk(mode, pt);
+ drm_mode_set_name(mode);
+
if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
}
@@ -802,10 +1106,6 @@ static struct drm_display_mode edid_est_modes[] = {
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
};
-#define EDID_EST_TIMINGS 16
-#define EDID_STD_TIMINGS 8
-#define EDID_DETAILED_TIMINGS 4
-
/**
* add_established_modes - get est. modes from EDID and add them
* @edid: EDID block to scan
@@ -833,19 +1133,6 @@ static int add_established_modes(struct drm_connector *connector, struct edid *e
return modes;
}
-/**
- * stanard_timing_level - get std. timing level(CVT/GTF/DMT)
- * @edid: EDID block to scan
- */
-static int standard_timing_level(struct edid *edid)
-{
- if (edid->revision >= 2) {
- if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
- return LEVEL_CVT;
- return LEVEL_GTF;
- }
- return LEVEL_DMT;
-}
/**
* add_standard_modes - get std. modes from EDID and add them
@@ -856,22 +1143,14 @@ static int standard_timing_level(struct edid *edid)
*/
static int add_standard_modes(struct drm_connector *connector, struct edid *edid)
{
- struct drm_device *dev = connector->dev;
int i, modes = 0;
- int timing_level;
-
- timing_level = standard_timing_level(edid);
for (i = 0; i < EDID_STD_TIMINGS; i++) {
- struct std_timing *t = &edid->standard_timings[i];
struct drm_display_mode *newmode;
- /* If std timings bytes are 1, 1 it's empty */
- if (t->hsize == 1 && t->vfreq_aspect == 1)
- continue;
-
- newmode = drm_mode_std(dev, &edid->standard_timings[i],
- edid->revision, timing_level);
+ newmode = drm_mode_std(connector, edid,
+ &edid->standard_timings[i],
+ edid->revision);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
@@ -881,36 +1160,86 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
return modes;
}
-/*
- * XXX fix this for:
- * - GTF secondary curve formula
- * - EDID 1.4 range offsets
- * - CVT extended bits
- */
static bool
-mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
+mode_is_rb(struct drm_display_mode *mode)
{
- struct detailed_data_monitor_range *range;
- int hsync, vrefresh;
-
- range = &timing->data.other_data.data.range;
+ return (mode->htotal - mode->hdisplay == 160) &&
+ (mode->hsync_end - mode->hdisplay == 80) &&
+ (mode->hsync_end - mode->hsync_start == 32) &&
+ (mode->vsync_start - mode->vdisplay == 3);
+}
+static bool
+mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
+{
+ int hsync, hmin, hmax;
+
+ hmin = t[7];
+ if (edid->revision >= 4)
+ hmin += ((t[4] & 0x04) ? 255 : 0);
+ hmax = t[8];
+ if (edid->revision >= 4)
+ hmax += ((t[4] & 0x08) ? 255 : 0);
hsync = drm_mode_hsync(mode);
- vrefresh = drm_mode_vrefresh(mode);
- if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz)
+ return (hsync <= hmax && hsync >= hmin);
+}
+
+static bool
+mode_in_vsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
+{
+ int vsync, vmin, vmax;
+
+ vmin = t[5];
+ if (edid->revision >= 4)
+ vmin += ((t[4] & 0x01) ? 255 : 0);
+ vmax = t[6];
+ if (edid->revision >= 4)
+ vmax += ((t[4] & 0x02) ? 255 : 0);
+ vsync = drm_mode_vrefresh(mode);
+
+ return (vsync <= vmax && vsync >= vmin);
+}
+
+static u32
+range_pixel_clock(struct edid *edid, u8 *t)
+{
+ /* unspecified */
+ if (t[9] == 0 || t[9] == 255)
+ return 0;
+
+ /* 1.4 with CVT support gives us real precision, yay */
+ if (edid->revision >= 4 && t[10] == 0x04)
+ return (t[9] * 10000) - ((t[12] >> 2) * 250);
+
+ /* 1.3 is pathetic, so fuzz up a bit */
+ return t[9] * 10000 + 5001;
+}
+
+static bool
+mode_in_range(struct drm_display_mode *mode, struct edid *edid,
+ struct detailed_timing *timing)
+{
+ u32 max_clock;
+ u8 *t = (u8 *)timing;
+
+ if (!mode_in_hsync_range(mode, edid, t))
return false;
- if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq)
+ if (!mode_in_vsync_range(mode, edid, t))
return false;
- if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) {
- /* be forgiving since it's in units of 10MHz */
- int max_clock = range->pixel_clock_mhz * 10 + 9;
- max_clock *= 1000;
+ if ((max_clock = range_pixel_clock(edid, t)))
if (mode->clock > max_clock)
return false;
- }
+
+ /* 1.4 max horizontal check */
+ if (edid->revision >= 4 && t[10] == 0x04)
+ if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3))))
+ return false;
+
+ if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid))
+ return false;
return true;
}
@@ -919,15 +1248,16 @@ mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
* XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
* need to account for them.
*/
-static int drm_gtf_modes_for_range(struct drm_connector *connector,
- struct detailed_timing *timing)
+static int
+drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
+ struct detailed_timing *timing)
{
int i, modes = 0;
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
for (i = 0; i < drm_num_dmt_modes; i++) {
- if (mode_in_range(drm_dmt_modes + i, timing)) {
+ if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
@@ -988,13 +1318,100 @@ static int drm_cvt_modes(struct drm_connector *connector,
return modes;
}
+static const struct {
+ short w;
+ short h;
+ short r;
+ short rb;
+} est3_modes[] = {
+ /* byte 6 */
+ { 640, 350, 85, 0 },
+ { 640, 400, 85, 0 },
+ { 720, 400, 85, 0 },
+ { 640, 480, 85, 0 },
+ { 848, 480, 60, 0 },
+ { 800, 600, 85, 0 },
+ { 1024, 768, 85, 0 },
+ { 1152, 864, 75, 0 },
+ /* byte 7 */
+ { 1280, 768, 60, 1 },
+ { 1280, 768, 60, 0 },
+ { 1280, 768, 75, 0 },
+ { 1280, 768, 85, 0 },
+ { 1280, 960, 60, 0 },
+ { 1280, 960, 85, 0 },
+ { 1280, 1024, 60, 0 },
+ { 1280, 1024, 85, 0 },
+ /* byte 8 */
+ { 1360, 768, 60, 0 },
+ { 1440, 900, 60, 1 },
+ { 1440, 900, 60, 0 },
+ { 1440, 900, 75, 0 },
+ { 1440, 900, 85, 0 },
+ { 1400, 1050, 60, 1 },
+ { 1400, 1050, 60, 0 },
+ { 1400, 1050, 75, 0 },
+ /* byte 9 */
+ { 1400, 1050, 85, 0 },
+ { 1680, 1050, 60, 1 },
+ { 1680, 1050, 60, 0 },
+ { 1680, 1050, 75, 0 },
+ { 1680, 1050, 85, 0 },
+ { 1600, 1200, 60, 0 },
+ { 1600, 1200, 65, 0 },
+ { 1600, 1200, 70, 0 },
+ /* byte 10 */
+ { 1600, 1200, 75, 0 },
+ { 1600, 1200, 85, 0 },
+ { 1792, 1344, 60, 0 },
+ { 1792, 1344, 85, 0 },
+ { 1856, 1392, 60, 0 },
+ { 1856, 1392, 75, 0 },
+ { 1920, 1200, 60, 1 },
+ { 1920, 1200, 60, 0 },
+ /* byte 11 */
+ { 1920, 1200, 75, 0 },
+ { 1920, 1200, 85, 0 },
+ { 1920, 1440, 60, 0 },
+ { 1920, 1440, 75, 0 },
+};
+static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
+
+static int
+drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
+{
+ int i, j, m, modes = 0;
+ struct drm_display_mode *mode;
+ u8 *est = ((u8 *)timing) + 5;
+
+ for (i = 0; i < 6; i++) {
+ for (j = 7; j > 0; j--) {
+ m = (i * 8) + (7 - j);
+ if (m >= num_est3_modes)
+ break;
+ if (est[i] & (1 << j)) {
+ mode = drm_mode_find_dmt(connector->dev,
+ est3_modes[m].w,
+ est3_modes[m].h,
+ est3_modes[m].r
+ /*, est3_modes[m].rb */);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ modes++;
+ }
+ }
+ }
+ }
+
+ return modes;
+}
+
static int add_detailed_modes(struct drm_connector *connector,
struct detailed_timing *timing,
struct edid *edid, u32 quirks, int preferred)
{
int i, modes = 0;
struct detailed_non_pixel *data = &timing->data.other_data;
- int timing_level = standard_timing_level(edid);
int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
@@ -1015,7 +1432,8 @@ static int add_detailed_modes(struct drm_connector *connector,
switch (data->type) {
case EDID_DETAIL_MONITOR_RANGE:
if (gtf)
- modes += drm_gtf_modes_for_range(connector, timing);
+ modes += drm_gtf_modes_for_range(connector, edid,
+ timing);
break;
case EDID_DETAIL_STD_MODES:
/* Six modes per detailed section */
@@ -1024,8 +1442,8 @@ static int add_detailed_modes(struct drm_connector *connector,
struct drm_display_mode *newmode;
std = &data->data.timings[i];
- newmode = drm_mode_std(dev, std, edid->revision,
- timing_level);
+ newmode = drm_mode_std(connector, edid, std,
+ edid->revision);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
@@ -1035,6 +1453,9 @@ static int add_detailed_modes(struct drm_connector *connector,
case EDID_DETAIL_CVT_3BYTE:
modes += drm_cvt_modes(connector, timing);
break;
+ case EDID_DETAIL_EST_TIMINGS:
+ modes += drm_est3_modes(connector, timing);
+ break;
default:
break;
}
@@ -1058,7 +1479,10 @@ static int add_detailed_info(struct drm_connector *connector,
for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
struct detailed_timing *timing = &edid->detailed_timings[i];
- int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
+ int preferred = (i == 0);
+
+ if (preferred && edid->version == 1 && edid->revision < 4)
+ preferred = (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
/* In 1.0, only timings are allowed */
if (!timing->pixel_clock && edid->version == 1 &&
@@ -1088,39 +1512,22 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
int i, modes = 0;
char *edid_ext = NULL;
struct detailed_timing *timing;
- int edid_ext_num;
int start_offset, end_offset;
- int timing_level;
- if (edid->version == 1 && edid->revision < 3) {
- /* If the EDID version is less than 1.3, there is no
- * extension EDID.
- */
+ if (edid->version == 1 && edid->revision < 3)
return 0;
- }
- if (!edid->extensions) {
- /* if there is no extension EDID, it is unnecessary to
- * parse the E-EDID to get detailed info
- */
+ if (!edid->extensions)
return 0;
- }
-
- /* Chose real EDID extension number */
- edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
- DRM_MAX_EDID_EXT_NUM : edid->extensions;
/* Find CEA extension */
- for (i = 0; i < edid_ext_num; i++) {
+ for (i = 0; i < edid->extensions; i++) {
edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
- /* This block is CEA extension */
if (edid_ext[0] == 0x02)
break;
}
- if (i == edid_ext_num) {
- /* if there is no additional timing EDID block, return */
+ if (i == edid->extensions)
return 0;
- }
/* Get the start offset of detailed timing block */
start_offset = edid_ext[2];
@@ -1132,7 +1539,6 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
return 0;
}
- timing_level = standard_timing_level(edid);
end_offset = EDID_LENGTH;
end_offset -= sizeof(struct detailed_timing);
for (i = start_offset; i < end_offset;
@@ -1144,123 +1550,6 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
return modes;
}
-#define DDC_ADDR 0x50
-/**
- * Get EDID information via I2C.
- *
- * \param adapter : i2c device adaptor
- * \param buf : EDID data buffer to be filled
- * \param len : EDID data buffer length
- * \return 0 on success or -1 on failure.
- *
- * Try to fetch EDID information by calling i2c driver function.
- */
-int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
- unsigned char *buf, int len)
-{
- unsigned char start = 0x0;
- struct i2c_msg msgs[] = {
- {
- .addr = DDC_ADDR,
- .flags = 0,
- .len = 1,
- .buf = &start,
- }, {
- .addr = DDC_ADDR,
- .flags = I2C_M_RD,
- .len = len,
- .buf = buf,
- }
- };
-
- if (i2c_transfer(adapter, msgs, 2) == 2)
- return 0;
-
- return -1;
-}
-EXPORT_SYMBOL(drm_do_probe_ddc_edid);
-
-static int drm_ddc_read_edid(struct drm_connector *connector,
- struct i2c_adapter *adapter,
- char *buf, int len)
-{
- int i;
-
- for (i = 0; i < 4; i++) {
- if (drm_do_probe_ddc_edid(adapter, buf, len))
- return -1;
- if (drm_edid_is_valid((struct edid *)buf))
- return 0;
- }
-
- /* repeated checksum failures; warn, but carry on */
- dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
- drm_get_connector_name(connector));
- return -1;
-}
-
-/**
- * drm_get_edid - get EDID data, if available
- * @connector: connector we're probing
- * @adapter: i2c adapter to use for DDC
- *
- * Poke the given connector's i2c channel to grab EDID data if possible.
- *
- * Return edid data or NULL if we couldn't find any.
- */
-struct edid *drm_get_edid(struct drm_connector *connector,
- struct i2c_adapter *adapter)
-{
- int ret;
- struct edid *edid;
-
- edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
- GFP_KERNEL);
- if (edid == NULL) {
- dev_warn(&connector->dev->pdev->dev,
- "Failed to allocate EDID\n");
- goto end;
- }
-
- /* Read first EDID block */
- ret = drm_ddc_read_edid(connector, adapter,
- (unsigned char *)edid, EDID_LENGTH);
- if (ret != 0)
- goto clean_up;
-
- /* There are EDID extensions to be read */
- if (edid->extensions != 0) {
- int edid_ext_num = edid->extensions;
-
- if (edid_ext_num > DRM_MAX_EDID_EXT_NUM) {
- dev_warn(&connector->dev->pdev->dev,
- "The number of extension(%d) is "
- "over max (%d), actually read number (%d)\n",
- edid_ext_num, DRM_MAX_EDID_EXT_NUM,
- DRM_MAX_EDID_EXT_NUM);
- /* Reset EDID extension number to be read */
- edid_ext_num = DRM_MAX_EDID_EXT_NUM;
- }
- /* Read EDID including extensions too */
- ret = drm_ddc_read_edid(connector, adapter, (char *)edid,
- EDID_LENGTH * (edid_ext_num + 1));
- if (ret != 0)
- goto clean_up;
-
- }
-
- connector->display_info.raw_edid = (char *)edid;
- goto end;
-
-clean_up:
- kfree(edid);
- edid = NULL;
-end:
- return edid;
-
-}
-EXPORT_SYMBOL(drm_get_edid);
-
#define HDMI_IDENTIFIER 0x000C03
#define VENDOR_BLOCK 0x03
/**
@@ -1273,7 +1562,7 @@ EXPORT_SYMBOL(drm_get_edid);
bool drm_detect_hdmi_monitor(struct edid *edid)
{
char *edid_ext = NULL;
- int i, hdmi_id, edid_ext_num;
+ int i, hdmi_id;
int start_offset, end_offset;
bool is_hdmi = false;
@@ -1281,19 +1570,15 @@ bool drm_detect_hdmi_monitor(struct edid *edid)
if (edid == NULL || edid->extensions == 0)
goto end;
- /* Chose real EDID extension number */
- edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
- DRM_MAX_EDID_EXT_NUM : edid->extensions;
-
/* Find CEA extension */
- for (i = 0; i < edid_ext_num; i++) {
+ for (i = 0; i < edid->extensions; i++) {
edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
/* This block is CEA extension */
if (edid_ext[0] == 0x02)
break;
}
- if (i == edid_ext_num)
+ if (i == edid->extensions)
goto end;
/* Data block offset in CEA extension block */
@@ -1348,10 +1633,24 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
quirks = edid_get_quirks(edid);
- num_modes += add_established_modes(connector, edid);
- num_modes += add_standard_modes(connector, edid);
+ /*
+ * EDID spec says modes should be preferred in this order:
+ * - preferred detailed mode
+ * - other detailed modes from base block
+ * - detailed modes from extension blocks
+ * - CVT 3-byte code modes
+ * - standard timing codes
+ * - established timing codes
+ * - modes inferred from GTF or CVT range information
+ *
+ * We don't quite implement this yet, but we're close.
+ *
+ * XXX order for additional mode types in extension blocks?
+ */
num_modes += add_detailed_info(connector, edid, quirks);
num_modes += add_detailed_info_eedid(connector, edid, quirks);
+ num_modes += add_standard_modes(connector, edid);
+ num_modes += add_established_modes(connector, edid);
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector, quirks);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 288ea2f3277..b3779d243ae 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -42,15 +42,33 @@ MODULE_LICENSE("GPL and additional rights");
static LIST_HEAD(kernel_fb_helper_list);
-int drm_fb_helper_add_connector(struct drm_connector *connector)
+/* simple single crtc case helper function */
+int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
- connector->fb_helper_private = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
- if (!connector->fb_helper_private)
- return -ENOMEM;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_connector *connector;
+ int i;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct drm_fb_helper_connector *fb_helper_connector;
+
+ fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
+ if (!fb_helper_connector)
+ goto fail;
+ fb_helper_connector->connector = connector;
+ fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
+ }
return 0;
+fail:
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ kfree(fb_helper->connector_info[i]);
+ fb_helper->connector_info[i] = NULL;
+ }
+ fb_helper->connector_count = 0;
+ return -ENOMEM;
}
-EXPORT_SYMBOL(drm_fb_helper_add_connector);
+EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
/**
* drm_fb_helper_connector_parse_command_line - parse command line for connector
@@ -65,7 +83,7 @@ EXPORT_SYMBOL(drm_fb_helper_add_connector);
*
* enable/enable Digital/disable bit at the end
*/
-static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *connector,
+static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_connector *fb_helper_conn,
const char *mode_option)
{
const char *name;
@@ -75,13 +93,13 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
int i;
enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
struct drm_fb_helper_cmdline_mode *cmdline_mode;
+ struct drm_connector *connector = fb_helper_conn->connector;
- if (!fb_help_conn)
+ if (!fb_helper_conn)
return false;
- cmdline_mode = &fb_help_conn->cmdline_mode;
+ cmdline_mode = &fb_helper_conn->cmdline_mode;
if (!mode_option)
mode_option = fb_mode_option;
@@ -204,18 +222,21 @@ done:
return true;
}
-int drm_fb_helper_parse_command_line(struct drm_device *dev)
+static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
{
- struct drm_connector *connector;
+ struct drm_fb_helper_connector *fb_helper_conn;
+ int i;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ for (i = 0; i < fb_helper->connector_count; i++) {
char *option = NULL;
+ fb_helper_conn = fb_helper->connector_info[i];
+
/* do something on return - turn off connector maybe */
- if (fb_get_options(drm_get_connector_name(connector), &option))
+ if (fb_get_options(drm_get_connector_name(fb_helper_conn->connector), &option))
continue;
- drm_fb_helper_connector_parse_command_line(connector, option);
+ drm_fb_helper_connector_parse_command_line(fb_helper_conn, option);
}
return 0;
}
@@ -293,6 +314,7 @@ static void drm_fb_helper_on(struct fb_info *info)
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
+ struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_encoder *encoder;
int i;
@@ -300,33 +322,28 @@ static void drm_fb_helper_on(struct fb_info *info)
* For each CRTC in this fb, turn the crtc on then,
* find all associated encoders and turn them on.
*/
+ mutex_lock(&dev->mode_config.mutex);
for (i = 0; i < fb_helper->crtc_count; i++) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs =
- crtc->helper_private;
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ crtc_funcs = crtc->helper_private;
- /* Only mess with CRTCs in this fb */
- if (crtc->base.id != fb_helper->crtc_info[i].crtc_id ||
- !crtc->enabled)
- continue;
+ if (!crtc->enabled)
+ continue;
- mutex_lock(&dev->mode_config.mutex);
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
- mutex_unlock(&dev->mode_config.mutex);
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
- /* Found a CRTC on this fb, now find encoders */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- struct drm_encoder_helper_funcs *encoder_funcs;
- encoder_funcs = encoder->helper_private;
- mutex_lock(&dev->mode_config.mutex);
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
- mutex_unlock(&dev->mode_config.mutex);
- }
+ /* Found a CRTC on this fb, now find encoders */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ struct drm_encoder_helper_funcs *encoder_funcs;
+
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
}
}
}
+ mutex_unlock(&dev->mode_config.mutex);
}
static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
@@ -334,6 +351,7 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
+ struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_encoder *encoder;
int i;
@@ -341,32 +359,26 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
* For each CRTC in this fb, find all associated encoders
* and turn them off, then turn off the CRTC.
*/
+ mutex_lock(&dev->mode_config.mutex);
for (i = 0; i < fb_helper->crtc_count; i++) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs =
- crtc->helper_private;
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ crtc_funcs = crtc->helper_private;
- /* Only mess with CRTCs in this fb */
- if (crtc->base.id != fb_helper->crtc_info[i].crtc_id ||
- !crtc->enabled)
- continue;
+ if (!crtc->enabled)
+ continue;
- /* Found a CRTC on this fb, now find encoders */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- struct drm_encoder_helper_funcs *encoder_funcs;
+ /* Found a CRTC on this fb, now find encoders */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ struct drm_encoder_helper_funcs *encoder_funcs;
- encoder_funcs = encoder->helper_private;
- mutex_lock(&dev->mode_config.mutex);
- encoder_funcs->dpms(encoder, dpms_mode);
- mutex_unlock(&dev->mode_config.mutex);
- }
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->dpms(encoder, dpms_mode);
}
- mutex_lock(&dev->mode_config.mutex);
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
- mutex_unlock(&dev->mode_config.mutex);
}
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
}
+ mutex_unlock(&dev->mode_config.mutex);
}
int drm_fb_helper_blank(int blank, struct fb_info *info)
@@ -401,50 +413,81 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
{
int i;
+ for (i = 0; i < helper->connector_count; i++)
+ kfree(helper->connector_info[i]);
+ kfree(helper->connector_info);
for (i = 0; i < helper->crtc_count; i++)
kfree(helper->crtc_info[i].mode_set.connectors);
kfree(helper->crtc_info);
}
-int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count, int max_conn_count)
+int drm_fb_helper_init(struct drm_device *dev,
+ struct drm_fb_helper *fb_helper,
+ int crtc_count, int max_conn_count)
{
- struct drm_device *dev = helper->dev;
struct drm_crtc *crtc;
int ret = 0;
int i;
- helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
- if (!helper->crtc_info)
+ fb_helper->dev = dev;
+
+ INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
+
+ fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
+ if (!fb_helper->crtc_info)
return -ENOMEM;
- helper->crtc_count = crtc_count;
+ fb_helper->crtc_count = crtc_count;
+ fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL);
+ if (!fb_helper->connector_info) {
+ kfree(fb_helper->crtc_info);
+ return -ENOMEM;
+ }
+ fb_helper->connector_count = 0;
for (i = 0; i < crtc_count; i++) {
- helper->crtc_info[i].mode_set.connectors =
+ fb_helper->crtc_info[i].mode_set.connectors =
kcalloc(max_conn_count,
sizeof(struct drm_connector *),
GFP_KERNEL);
- if (!helper->crtc_info[i].mode_set.connectors) {
+ if (!fb_helper->crtc_info[i].mode_set.connectors) {
ret = -ENOMEM;
goto out_free;
}
- helper->crtc_info[i].mode_set.num_connectors = 0;
+ fb_helper->crtc_info[i].mode_set.num_connectors = 0;
}
i = 0;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- helper->crtc_info[i].crtc_id = crtc->base.id;
- helper->crtc_info[i].mode_set.crtc = crtc;
+ fb_helper->crtc_info[i].crtc_id = crtc->base.id;
+ fb_helper->crtc_info[i].mode_set.crtc = crtc;
i++;
}
- helper->conn_limit = max_conn_count;
+ fb_helper->conn_limit = max_conn_count;
return 0;
out_free:
- drm_fb_helper_crtc_free(helper);
+ drm_fb_helper_crtc_free(fb_helper);
return -ENOMEM;
}
-EXPORT_SYMBOL(drm_fb_helper_init_crtc_count);
+EXPORT_SYMBOL(drm_fb_helper_init);
+
+void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
+{
+ if (!list_empty(&fb_helper->kernel_fb_list)) {
+ list_del(&fb_helper->kernel_fb_list);
+ if (list_empty(&kernel_fb_helper_list)) {
+ printk(KERN_INFO "drm: unregistered panic notifier\n");
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &paniced);
+ unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
+ }
+ }
+
+ drm_fb_helper_crtc_free(fb_helper);
+
+}
+EXPORT_SYMBOL(drm_fb_helper_fini);
static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, u16 regno, struct fb_info *info)
@@ -508,20 +551,15 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
+ struct drm_crtc_helper_funcs *crtc_funcs;
u16 *red, *green, *blue, *transp;
struct drm_crtc *crtc;
int i, rc = 0;
int start;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- for (i = 0; i < fb_helper->crtc_count; i++) {
- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
- break;
- }
- if (i == fb_helper->crtc_count)
- continue;
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ crtc_funcs = crtc->helper_private;
red = cmap->red;
green = cmap->green;
@@ -549,41 +587,6 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_setcmap);
-int drm_fb_helper_setcolreg(unsigned regno,
- unsigned red,
- unsigned green,
- unsigned blue,
- unsigned transp,
- struct fb_info *info)
-{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
- struct drm_crtc *crtc;
- int i;
- int ret;
-
- if (regno > 255)
- return 1;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- for (i = 0; i < fb_helper->crtc_count; i++) {
- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
- break;
- }
- if (i == fb_helper->crtc_count)
- continue;
-
- ret = setcolreg(crtc, red, green, blue, regno, info);
- if (ret)
- return ret;
-
- crtc_funcs->load_lut(crtc);
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_fb_helper_setcolreg);
-
int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
@@ -687,23 +690,21 @@ int drm_fb_helper_set_par(struct fb_info *info)
return -EINVAL;
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-
- for (i = 0; i < fb_helper->crtc_count; i++) {
- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
- break;
- }
- if (i == fb_helper->crtc_count)
- continue;
-
- if (crtc->fb == fb_helper->crtc_info[i].mode_set.fb) {
- mutex_lock(&dev->mode_config.mutex);
- ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
+ mutex_lock(&dev->mode_config.mutex);
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
+ if (ret) {
mutex_unlock(&dev->mode_config.mutex);
- if (ret)
- return ret;
+ return ret;
}
}
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (fb_helper->delayed_hotplug) {
+ fb_helper->delayed_hotplug = false;
+ drm_fb_helper_hotplug_event(fb_helper);
+ }
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_set_par);
@@ -718,14 +719,9 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
int ret = 0;
int i;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- for (i = 0; i < fb_helper->crtc_count; i++) {
- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
- break;
- }
-
- if (i == fb_helper->crtc_count)
- continue;
+ mutex_lock(&dev->mode_config.mutex);
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
modeset = &fb_helper->crtc_info[i].mode_set;
@@ -733,209 +729,138 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
modeset->y = var->yoffset;
if (modeset->num_connectors) {
- mutex_lock(&dev->mode_config.mutex);
ret = crtc->funcs->set_config(modeset);
- mutex_unlock(&dev->mode_config.mutex);
if (!ret) {
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
}
}
}
+ mutex_unlock(&dev->mode_config.mutex);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_pan_display);
-int drm_fb_helper_single_fb_probe(struct drm_device *dev,
- int preferred_bpp,
- int (*fb_create)(struct drm_device *dev,
- uint32_t fb_width,
- uint32_t fb_height,
- uint32_t surface_width,
- uint32_t surface_height,
- uint32_t surface_depth,
- uint32_t surface_bpp,
- struct drm_framebuffer **fb_ptr))
+int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
+ int preferred_bpp)
{
- struct drm_crtc *crtc;
- struct drm_connector *connector;
- unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1;
- unsigned int surface_width = 0, surface_height = 0;
int new_fb = 0;
int crtc_count = 0;
- int ret, i, conn_count = 0;
+ int i;
struct fb_info *info;
- struct drm_framebuffer *fb;
- struct drm_mode_set *modeset = NULL;
- struct drm_fb_helper *fb_helper;
- uint32_t surface_depth = 24, surface_bpp = 32;
+ struct drm_fb_helper_surface_size sizes;
+ int gamma_size = 0;
+
+ memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
+ sizes.surface_depth = 24;
+ sizes.surface_bpp = 32;
+ sizes.fb_width = (unsigned)-1;
+ sizes.fb_height = (unsigned)-1;
/* if driver picks 8 or 16 by default use that
for both depth/bpp */
- if (preferred_bpp != surface_bpp) {
- surface_depth = surface_bpp = preferred_bpp;
+ if (preferred_bpp != sizes.surface_bpp) {
+ sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
}
/* first up get a count of crtcs now in use and new min/maxes width/heights */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
-
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
struct drm_fb_helper_cmdline_mode *cmdline_mode;
- if (!fb_help_conn)
- continue;
-
- cmdline_mode = &fb_help_conn->cmdline_mode;
+ cmdline_mode = &fb_helper_conn->cmdline_mode;
if (cmdline_mode->bpp_specified) {
switch (cmdline_mode->bpp) {
case 8:
- surface_depth = surface_bpp = 8;
+ sizes.surface_depth = sizes.surface_bpp = 8;
break;
case 15:
- surface_depth = 15;
- surface_bpp = 16;
+ sizes.surface_depth = 15;
+ sizes.surface_bpp = 16;
break;
case 16:
- surface_depth = surface_bpp = 16;
+ sizes.surface_depth = sizes.surface_bpp = 16;
break;
case 24:
- surface_depth = surface_bpp = 24;
+ sizes.surface_depth = sizes.surface_bpp = 24;
break;
case 32:
- surface_depth = 24;
- surface_bpp = 32;
+ sizes.surface_depth = 24;
+ sizes.surface_bpp = 32;
break;
}
break;
}
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (drm_helper_crtc_in_use(crtc)) {
- if (crtc->desired_mode) {
- if (crtc->desired_mode->hdisplay < fb_width)
- fb_width = crtc->desired_mode->hdisplay;
-
- if (crtc->desired_mode->vdisplay < fb_height)
- fb_height = crtc->desired_mode->vdisplay;
-
- if (crtc->desired_mode->hdisplay > surface_width)
- surface_width = crtc->desired_mode->hdisplay;
-
- if (crtc->desired_mode->vdisplay > surface_height)
- surface_height = crtc->desired_mode->vdisplay;
- }
+ crtc_count = 0;
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ struct drm_display_mode *desired_mode;
+ desired_mode = fb_helper->crtc_info[i].desired_mode;
+
+ if (desired_mode) {
+ if (gamma_size == 0)
+ gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
+ if (desired_mode->hdisplay < sizes.fb_width)
+ sizes.fb_width = desired_mode->hdisplay;
+ if (desired_mode->vdisplay < sizes.fb_height)
+ sizes.fb_height = desired_mode->vdisplay;
+ if (desired_mode->hdisplay > sizes.surface_width)
+ sizes.surface_width = desired_mode->hdisplay;
+ if (desired_mode->vdisplay > sizes.surface_height)
+ sizes.surface_height = desired_mode->vdisplay;
crtc_count++;
}
}
- if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
+ if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
/* hmm everyone went away - assume VGA cable just fell out
and will come back later. */
- return 0;
+ DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n");
+ sizes.fb_width = sizes.surface_width = 1024;
+ sizes.fb_height = sizes.surface_height = 768;
}
- /* do we have an fb already? */
- if (list_empty(&dev->mode_config.fb_kernel_list)) {
- ret = (*fb_create)(dev, fb_width, fb_height, surface_width,
- surface_height, surface_depth, surface_bpp,
- &fb);
- if (ret)
- return -EINVAL;
- new_fb = 1;
- } else {
- fb = list_first_entry(&dev->mode_config.fb_kernel_list,
- struct drm_framebuffer, filp_head);
-
- /* if someone hotplugs something bigger than we have already allocated, we are pwned.
- As really we can't resize an fbdev that is in the wild currently due to fbdev
- not really being designed for the lower layers moving stuff around under it.
- - so in the grand style of things - punt. */
- if ((fb->width < surface_width) ||
- (fb->height < surface_height)) {
- DRM_ERROR("Framebuffer not large enough to scale console onto.\n");
- return -EINVAL;
- }
- }
-
- info = fb->fbdev;
- fb_helper = info->par;
-
- crtc_count = 0;
- /* okay we need to setup new connector sets in the crtcs */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- modeset = &fb_helper->crtc_info[crtc_count].mode_set;
- modeset->fb = fb;
- conn_count = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder)
- if (connector->encoder->crtc == modeset->crtc) {
- modeset->connectors[conn_count] = connector;
- conn_count++;
- if (conn_count > fb_helper->conn_limit)
- BUG();
- }
- }
-
- for (i = conn_count; i < fb_helper->conn_limit; i++)
- modeset->connectors[i] = NULL;
+ /* push down into drivers */
+ new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
+ if (new_fb < 0)
+ return new_fb;
- modeset->crtc = crtc;
- crtc_count++;
+ info = fb_helper->fbdev;
- modeset->num_connectors = conn_count;
- if (modeset->crtc->desired_mode) {
- if (modeset->mode)
- drm_mode_destroy(dev, modeset->mode);
- modeset->mode = drm_mode_duplicate(dev,
- modeset->crtc->desired_mode);
- }
+ /* set the fb pointer */
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
}
- fb_helper->crtc_count = crtc_count;
- fb_helper->fb = fb;
if (new_fb) {
info->var.pixclock = 0;
- ret = fb_alloc_cmap(&info->cmap, modeset->crtc->gamma_size, 0);
- if (ret)
- return ret;
if (register_framebuffer(info) < 0) {
- fb_dealloc_cmap(&info->cmap);
return -EINVAL;
}
+
+ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
+ info->fix.id);
+
} else {
drm_fb_helper_set_par(info);
}
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
- info->fix.id);
/* Switch back to kernel console on panic */
/* multi card linked list maybe */
if (list_empty(&kernel_fb_helper_list)) {
- printk(KERN_INFO "registered panic notifier\n");
+ printk(KERN_INFO "drm: registered panic notifier\n");
atomic_notifier_chain_register(&panic_notifier_list,
&paniced);
register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
}
- list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
+ if (new_fb)
+ list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
+
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
-void drm_fb_helper_free(struct drm_fb_helper *helper)
-{
- list_del(&helper->kernel_fb_list);
- if (list_empty(&kernel_fb_helper_list)) {
- printk(KERN_INFO "unregistered panic notifier\n");
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &paniced);
- unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
- }
- drm_fb_helper_crtc_free(helper);
- fb_dealloc_cmap(&helper->fb->fbdev->cmap);
-}
-EXPORT_SYMBOL(drm_fb_helper_free);
-
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
uint32_t depth)
{
@@ -954,10 +879,11 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
}
EXPORT_SYMBOL(drm_fb_helper_fill_fix);
-void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb,
+void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height)
{
- info->pseudo_palette = fb->pseudo_palette;
+ struct drm_framebuffer *fb = fb_helper->fb;
+ info->pseudo_palette = fb_helper->pseudo_palette;
info->var.xres_virtual = fb->width;
info->var.yres_virtual = fb->height;
info->var.bits_per_pixel = fb->bits_per_pixel;
@@ -1025,3 +951,457 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb,
info->var.yres = fb_height;
}
EXPORT_SYMBOL(drm_fb_helper_fill_var);
+
+static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
+ uint32_t maxX,
+ uint32_t maxY)
+{
+ struct drm_connector *connector;
+ int count = 0;
+ int i;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ count += connector->funcs->fill_modes(connector, maxX, maxY);
+ }
+
+ return count;
+}
+
+static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
+{
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, &fb_connector->connector->modes, head) {
+ if (drm_mode_width(mode) > width ||
+ drm_mode_height(mode) > height)
+ continue;
+ if (mode->type & DRM_MODE_TYPE_PREFERRED)
+ return mode;
+ }
+ return NULL;
+}
+
+static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
+{
+ struct drm_fb_helper_cmdline_mode *cmdline_mode;
+ cmdline_mode = &fb_connector->cmdline_mode;
+ return cmdline_mode->specified;
+}
+
+static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
+ int width, int height)
+{
+ struct drm_fb_helper_cmdline_mode *cmdline_mode;
+ struct drm_display_mode *mode = NULL;
+
+ cmdline_mode = &fb_helper_conn->cmdline_mode;
+ if (cmdline_mode->specified == false)
+ return mode;
+
+ /* attempt to find a matching mode in the list of modes
+ * we have gotten so far, if not add a CVT mode that conforms
+ */
+ if (cmdline_mode->rb || cmdline_mode->margins)
+ goto create_mode;
+
+ list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
+ /* check width/height */
+ if (mode->hdisplay != cmdline_mode->xres ||
+ mode->vdisplay != cmdline_mode->yres)
+ continue;
+
+ if (cmdline_mode->refresh_specified) {
+ if (mode->vrefresh != cmdline_mode->refresh)
+ continue;
+ }
+
+ if (cmdline_mode->interlace) {
+ if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
+ continue;
+ }
+ return mode;
+ }
+
+create_mode:
+ mode = drm_cvt_mode(fb_helper_conn->connector->dev, cmdline_mode->xres,
+ cmdline_mode->yres,
+ cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
+ cmdline_mode->rb, cmdline_mode->interlace,
+ cmdline_mode->margins);
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ list_add(&mode->head, &fb_helper_conn->connector->modes);
+ return mode;
+}
+
+static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
+{
+ bool enable;
+
+ if (strict) {
+ enable = connector->status == connector_status_connected;
+ } else {
+ enable = connector->status != connector_status_disconnected;
+ }
+ return enable;
+}
+
+static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
+ bool *enabled)
+{
+ bool any_enabled = false;
+ struct drm_connector *connector;
+ int i = 0;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ enabled[i] = drm_connector_enabled(connector, true);
+ DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
+ enabled[i] ? "yes" : "no");
+ any_enabled |= enabled[i];
+ }
+
+ if (any_enabled)
+ return;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ enabled[i] = drm_connector_enabled(connector, false);
+ }
+}
+
+static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
+ struct drm_display_mode **modes,
+ bool *enabled, int width, int height)
+{
+ int count, i, j;
+ bool can_clone = false;
+ struct drm_fb_helper_connector *fb_helper_conn;
+ struct drm_display_mode *dmt_mode, *mode;
+
+ /* only contemplate cloning in the single crtc case */
+ if (fb_helper->crtc_count > 1)
+ return false;
+
+ count = 0;
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ if (enabled[i])
+ count++;
+ }
+
+ /* only contemplate cloning if more than one connector is enabled */
+ if (count <= 1)
+ return false;
+
+ /* check the command line or if nothing common pick 1024x768 */
+ can_clone = true;
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ if (!enabled[i])
+ continue;
+ fb_helper_conn = fb_helper->connector_info[i];
+ modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+ if (!modes[i]) {
+ can_clone = false;
+ break;
+ }
+ for (j = 0; j < i; j++) {
+ if (!enabled[j])
+ continue;
+ if (!drm_mode_equal(modes[j], modes[i]))
+ can_clone = false;
+ }
+ }
+
+ if (can_clone) {
+ DRM_DEBUG_KMS("can clone using command line\n");
+ return true;
+ }
+
+ /* try and find a 1024x768 mode on each connector */
+ can_clone = true;
+ dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60);
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+
+ if (!enabled[i])
+ continue;
+
+ fb_helper_conn = fb_helper->connector_info[i];
+ list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
+ if (drm_mode_equal(mode, dmt_mode))
+ modes[i] = mode;
+ }
+ if (!modes[i])
+ can_clone = false;
+ }
+
+ if (can_clone) {
+ DRM_DEBUG_KMS("can clone using 1024x768\n");
+ return true;
+ }
+ DRM_INFO("kms: can't enable cloning when we probably wanted to.\n");
+ return false;
+}
+
+static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
+ struct drm_display_mode **modes,
+ bool *enabled, int width, int height)
+{
+ struct drm_fb_helper_connector *fb_helper_conn;
+ int i;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ fb_helper_conn = fb_helper->connector_info[i];
+
+ if (enabled[i] == false)
+ continue;
+
+ DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
+ fb_helper_conn->connector->base.id);
+
+ /* got for command line mode first */
+ modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+ if (!modes[i]) {
+ DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
+ fb_helper_conn->connector->base.id);
+ modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
+ }
+ /* No preferred modes, pick one off the list */
+ if (!modes[i] && !list_empty(&fb_helper_conn->connector->modes)) {
+ list_for_each_entry(modes[i], &fb_helper_conn->connector->modes, head)
+ break;
+ }
+ DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
+ "none");
+ }
+ return true;
+}
+
+static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_crtc **best_crtcs,
+ struct drm_display_mode **modes,
+ int n, int width, int height)
+{
+ int c, o;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_connector *connector;
+ struct drm_connector_helper_funcs *connector_funcs;
+ struct drm_encoder *encoder;
+ struct drm_fb_helper_crtc *best_crtc;
+ int my_score, best_score, score;
+ struct drm_fb_helper_crtc **crtcs, *crtc;
+ struct drm_fb_helper_connector *fb_helper_conn;
+
+ if (n == fb_helper->connector_count)
+ return 0;
+
+ fb_helper_conn = fb_helper->connector_info[n];
+ connector = fb_helper_conn->connector;
+
+ best_crtcs[n] = NULL;
+ best_crtc = NULL;
+ best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
+ if (modes[n] == NULL)
+ return best_score;
+
+ crtcs = kzalloc(dev->mode_config.num_connector *
+ sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
+ if (!crtcs)
+ return best_score;
+
+ my_score = 1;
+ if (connector->status == connector_status_connected)
+ my_score++;
+ if (drm_has_cmdline_mode(fb_helper_conn))
+ my_score++;
+ if (drm_has_preferred_mode(fb_helper_conn, width, height))
+ my_score++;
+
+ connector_funcs = connector->helper_private;
+ encoder = connector_funcs->best_encoder(connector);
+ if (!encoder)
+ goto out;
+
+ /* select a crtc for this connector and then attempt to configure
+ remaining connectors */
+ for (c = 0; c < fb_helper->crtc_count; c++) {
+ crtc = &fb_helper->crtc_info[c];
+
+ if ((encoder->possible_crtcs & (1 << c)) == 0) {
+ continue;
+ }
+
+ for (o = 0; o < n; o++)
+ if (best_crtcs[o] == crtc)
+ break;
+
+ if (o < n) {
+ /* ignore cloning unless only a single crtc */
+ if (fb_helper->crtc_count > 1)
+ continue;
+
+ if (!drm_mode_equal(modes[o], modes[n]))
+ continue;
+ }
+
+ crtcs[n] = crtc;
+ memcpy(crtcs, best_crtcs, n * sizeof(struct drm_fb_helper_crtc *));
+ score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
+ width, height);
+ if (score > best_score) {
+ best_crtc = crtc;
+ best_score = score;
+ memcpy(best_crtcs, crtcs,
+ dev->mode_config.num_connector *
+ sizeof(struct drm_fb_helper_crtc *));
+ }
+ }
+out:
+ kfree(crtcs);
+ return best_score;
+}
+
+static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_fb_helper_crtc **crtcs;
+ struct drm_display_mode **modes;
+ struct drm_encoder *encoder;
+ struct drm_mode_set *modeset;
+ bool *enabled;
+ int width, height;
+ int i, ret;
+
+ DRM_DEBUG_KMS("\n");
+
+ width = dev->mode_config.max_width;
+ height = dev->mode_config.max_height;
+
+ /* clean out all the encoder/crtc combos */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ encoder->crtc = NULL;
+ }
+
+ crtcs = kcalloc(dev->mode_config.num_connector,
+ sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
+ modes = kcalloc(dev->mode_config.num_connector,
+ sizeof(struct drm_display_mode *), GFP_KERNEL);
+ enabled = kcalloc(dev->mode_config.num_connector,
+ sizeof(bool), GFP_KERNEL);
+
+ drm_enable_connectors(fb_helper, enabled);
+
+ ret = drm_target_cloned(fb_helper, modes, enabled, width, height);
+ if (!ret) {
+ ret = drm_target_preferred(fb_helper, modes, enabled, width, height);
+ if (!ret)
+ DRM_ERROR("Unable to find initial modes\n");
+ }
+
+ DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
+
+ drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height);
+
+ /* need to set the modesets up here for use later */
+ /* fill out the connector<->crtc mappings into the modesets */
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ modeset = &fb_helper->crtc_info[i].mode_set;
+ modeset->num_connectors = 0;
+ }
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_display_mode *mode = modes[i];
+ struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
+ modeset = &fb_crtc->mode_set;
+
+ if (mode && fb_crtc) {
+ DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
+ mode->name, fb_crtc->mode_set.crtc->base.id);
+ fb_crtc->desired_mode = mode;
+ if (modeset->mode)
+ drm_mode_destroy(dev, modeset->mode);
+ modeset->mode = drm_mode_duplicate(dev,
+ fb_crtc->desired_mode);
+ modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
+ }
+ }
+
+ kfree(crtcs);
+ kfree(modes);
+ kfree(enabled);
+}
+
+/**
+ * drm_helper_initial_config - setup a sane initial connector configuration
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Called at init time, must take mode config lock.
+ *
+ * Scan the CRTCs and connectors and try to put together an initial setup.
+ * At the moment, this is a cloned configuration across all heads with
+ * a new framebuffer object as the backing store.
+ *
+ * RETURNS:
+ * Zero if everything went ok, nonzero otherwise.
+ */
+bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
+{
+ struct drm_device *dev = fb_helper->dev;
+ int count = 0;
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(fb_helper->dev);
+
+ drm_fb_helper_parse_command_line(fb_helper);
+
+ count = drm_fb_helper_probe_connector_modes(fb_helper,
+ dev->mode_config.max_width,
+ dev->mode_config.max_height);
+ /*
+ * we shouldn't end up with no modes here.
+ */
+ if (count == 0) {
+ printk(KERN_INFO "No connectors reported connected with modes\n");
+ }
+ drm_setup_crtcs(fb_helper);
+
+ return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+}
+EXPORT_SYMBOL(drm_fb_helper_initial_config);
+
+bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
+{
+ int count = 0;
+ u32 max_width, max_height, bpp_sel;
+ bool bound = false, crtcs_bound = false;
+ struct drm_crtc *crtc;
+
+ if (!fb_helper->fb)
+ return false;
+
+ list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) {
+ if (crtc->fb)
+ crtcs_bound = true;
+ if (crtc->fb == fb_helper->fb)
+ bound = true;
+ }
+
+ if (!bound && crtcs_bound) {
+ fb_helper->delayed_hotplug = true;
+ return false;
+ }
+ DRM_DEBUG_KMS("\n");
+
+ max_width = fb_helper->fb->width;
+ max_height = fb_helper->fb->height;
+ bpp_sel = fb_helper->fb->bits_per_pixel;
+
+ count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
+ max_height);
+ drm_setup_crtcs(fb_helper);
+
+ return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+}
+EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
+
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 9d532d7fdf5..e7aace20981 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -243,11 +243,10 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
- priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- memset(priv, 0, sizeof(*priv));
filp->private_data = priv;
priv->filp = filp;
priv->uid = current_euid();
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index aa89d4b0b4c..33dad3fa604 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -124,6 +124,31 @@ drm_gem_destroy(struct drm_device *dev)
}
/**
+ * Initialize an already allocate GEM object of the specified size with
+ * shmfs backing store.
+ */
+int drm_gem_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size)
+{
+ BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+
+ obj->dev = dev;
+ obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
+ if (IS_ERR(obj->filp))
+ return -ENOMEM;
+
+ kref_init(&obj->refcount);
+ kref_init(&obj->handlecount);
+ obj->size = size;
+
+ atomic_inc(&dev->object_count);
+ atomic_add(obj->size, &dev->object_memory);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_object_init);
+
+/**
* Allocate a GEM object of the specified size with shmfs backing store
*/
struct drm_gem_object *
@@ -131,28 +156,22 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
{
struct drm_gem_object *obj;
- BUG_ON((size & (PAGE_SIZE - 1)) != 0);
-
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
goto free;
- obj->dev = dev;
- obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
- if (IS_ERR(obj->filp))
+ if (drm_gem_object_init(dev, obj, size) != 0)
goto free;
- kref_init(&obj->refcount);
- kref_init(&obj->handlecount);
- obj->size = size;
if (dev->driver->gem_init_object != NULL &&
dev->driver->gem_init_object(obj) != 0) {
goto fput;
}
- atomic_inc(&dev->object_count);
- atomic_add(obj->size, &dev->object_memory);
return obj;
fput:
+ /* Object_init mangles the global counters - readjust them. */
+ atomic_dec(&dev->object_count);
+ atomic_sub(obj->size, &dev->object_memory);
fput(obj->filp);
free:
kfree(obj);
@@ -403,15 +422,15 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
idr_destroy(&file_private->object_idr);
}
-static void
-drm_gem_object_free_common(struct drm_gem_object *obj)
+void
+drm_gem_object_release(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
fput(obj->filp);
atomic_dec(&dev->object_count);
atomic_sub(obj->size, &dev->object_memory);
- kfree(obj);
}
+EXPORT_SYMBOL(drm_gem_object_release);
/**
* Called after the last reference to the object has been lost.
@@ -429,8 +448,6 @@ drm_gem_object_free(struct kref *kref)
if (dev->driver->gem_free_object != NULL)
dev->driver->gem_free_object(obj);
-
- drm_gem_object_free_common(obj);
}
EXPORT_SYMBOL(drm_gem_object_free);
@@ -453,8 +470,6 @@ drm_gem_object_free_unlocked(struct kref *kref)
dev->driver->gem_free_object(obj);
mutex_unlock(&dev->struct_mutex);
}
-
- drm_gem_object_free_common(obj);
}
EXPORT_SYMBOL(drm_gem_object_free_unlocked);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 76d63394c77..f1f473ea97d 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -258,8 +258,10 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
/* 18/16. Find actual vertical frame frequency */
/* ignore - just set the mode flag for interlaced */
- if (interlaced)
+ if (interlaced) {
drm_mode->vtotal *= 2;
+ drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ }
/* Fill the mode line name */
drm_mode_set_name(drm_mode);
if (reduced)
@@ -268,43 +270,35 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
else
drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_NHSYNC);
- if (interlaced)
- drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
- return drm_mode;
+ return drm_mode;
}
EXPORT_SYMBOL(drm_cvt_mode);
/**
- * drm_gtf_mode - create the modeline based on GTF algorithm
+ * drm_gtf_mode_complex - create the modeline based on full GTF algorithm
*
* @dev :drm device
* @hdisplay :hdisplay size
* @vdisplay :vdisplay size
* @vrefresh :vrefresh rate.
* @interlaced :whether the interlace is supported
- * @margins :whether the margin is supported
+ * @margins :desired margin size
+ * @GTF_[MCKJ] :extended GTF formula parameters
*
* LOCKING.
* none.
*
- * return the modeline based on GTF algorithm
- *
- * This function is to create the modeline based on the GTF algorithm.
- * Generalized Timing Formula is derived from:
- * GTF Spreadsheet by Andy Morrish (1/5/97)
- * available at http://www.vesa.org
+ * return the modeline based on full GTF algorithm.
*
- * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
- * What I have done is to translate it by using integer calculation.
- * I also refer to the function of fb_get_mode in the file of
- * drivers/video/fbmon.c
+ * GTF feature blocks specify C and J in multiples of 0.5, so we pass them
+ * in here multiplied by two. For a C of 40, pass in 80.
*/
-struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
- int vdisplay, int vrefresh,
- bool interlaced, int margins)
-{
- /* 1) top/bottom margin size (% of height) - default: 1.8, */
+struct drm_display_mode *
+drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
+ int vrefresh, bool interlaced, int margins,
+ int GTF_M, int GTF_2C, int GTF_K, int GTF_2J)
+{ /* 1) top/bottom margin size (% of height) - default: 1.8, */
#define GTF_MARGIN_PERCENTAGE 18
/* 2) character cell horizontal granularity (pixels) - default 8 */
#define GTF_CELL_GRAN 8
@@ -316,17 +310,9 @@ struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
#define H_SYNC_PERCENT 8
/* min time of vsync + back porch (microsec) */
#define MIN_VSYNC_PLUS_BP 550
- /* blanking formula gradient */
-#define GTF_M 600
- /* blanking formula offset */
-#define GTF_C 40
- /* blanking formula scaling factor */
-#define GTF_K 128
- /* blanking formula scaling factor */
-#define GTF_J 20
/* C' and M' are part of the Blanking Duty Cycle computation */
-#define GTF_C_PRIME (((GTF_C - GTF_J) * GTF_K / 256) + GTF_J)
-#define GTF_M_PRIME (GTF_K * GTF_M / 256)
+#define GTF_C_PRIME ((((GTF_2C - GTF_2J) * GTF_K / 256) + GTF_2J) / 2)
+#define GTF_M_PRIME (GTF_K * GTF_M / 256)
struct drm_display_mode *drm_mode;
unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd;
int top_margin, bottom_margin;
@@ -460,17 +446,61 @@ struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
drm_mode->clock = pixel_freq;
- drm_mode_set_name(drm_mode);
- drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
-
if (interlaced) {
drm_mode->vtotal *= 2;
drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
}
+ drm_mode_set_name(drm_mode);
+ if (GTF_M == 600 && GTF_2C == 80 && GTF_K == 128 && GTF_2J == 40)
+ drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
+ else
+ drm_mode->flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC;
+
return drm_mode;
}
+EXPORT_SYMBOL(drm_gtf_mode_complex);
+
+/**
+ * drm_gtf_mode - create the modeline based on GTF algorithm
+ *
+ * @dev :drm device
+ * @hdisplay :hdisplay size
+ * @vdisplay :vdisplay size
+ * @vrefresh :vrefresh rate.
+ * @interlaced :whether the interlace is supported
+ * @margins :whether the margin is supported
+ *
+ * LOCKING.
+ * none.
+ *
+ * return the modeline based on GTF algorithm
+ *
+ * This function is to create the modeline based on the GTF algorithm.
+ * Generalized Timing Formula is derived from:
+ * GTF Spreadsheet by Andy Morrish (1/5/97)
+ * available at http://www.vesa.org
+ *
+ * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
+ * What I have done is to translate it by using integer calculation.
+ * I also refer to the function of fb_get_mode in the file of
+ * drivers/video/fbmon.c
+ *
+ * Standard GTF parameters:
+ * M = 600
+ * C = 40
+ * K = 128
+ * J = 20
+ */
+struct drm_display_mode *
+drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
+ bool lace, int margins)
+{
+ return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace,
+ margins, 600, 40 * 2, 128, 20 * 2);
+}
EXPORT_SYMBOL(drm_gtf_mode);
+
/**
* drm_mode_set_name - set the name on a mode
* @mode: name will be set in this mode
@@ -482,8 +512,11 @@ EXPORT_SYMBOL(drm_gtf_mode);
*/
void drm_mode_set_name(struct drm_display_mode *mode)
{
- snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay,
- mode->vdisplay);
+ bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+
+ snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
+ mode->hdisplay, mode->vdisplay,
+ interlaced ? "i" : "");
}
EXPORT_SYMBOL(drm_mode_set_name);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 25bbd30ed7a..101d381e9d8 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -193,8 +193,9 @@ static ssize_t enabled_show(struct device *device,
"disabled");
}
-static ssize_t edid_show(struct kobject *kobj, struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static ssize_t edid_show(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t off,
+ size_t count)
{
struct device *connector_dev = container_of(kobj, struct device, kobj);
struct drm_connector *connector = to_drm_connector(connector_dev);
@@ -333,7 +334,7 @@ static struct device_attribute connector_attrs_opt1[] = {
static struct bin_attribute edid_attr = {
.attr.name = "edid",
.attr.mode = 0444,
- .size = 128,
+ .size = 0,
.read = edid_show,
};
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 9929f84ec3e..95639017bdb 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -33,3 +33,5 @@ i915-$(CONFIG_ACPI) += i915_opregion.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
obj-$(CONFIG_DRM_I915) += i915.o
+
+CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 288fc50627e..0d6ff640e1c 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -70,16 +70,6 @@ struct intel_dvo_dev_ops {
void (*dpms)(struct intel_dvo_device *dvo, int mode);
/*
- * Saves the output's state for restoration on VT switch.
- */
- void (*save)(struct intel_dvo_device *dvo);
-
- /*
- * Restore's the output's state at VT switch.
- */
- void (*restore)(struct intel_dvo_device *dvo);
-
- /*
* Callback for testing a video mode for a given output.
*
* This function should only check for cases where a mode can't
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 1184c14ba87..14d59804acd 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -159,16 +159,7 @@
#define CH7017_BANG_LIMIT_CONTROL 0x7f
struct ch7017_priv {
- uint8_t save_hapi;
- uint8_t save_vali;
- uint8_t save_valo;
- uint8_t save_ailo;
- uint8_t save_lvds_pll_vco;
- uint8_t save_feedback_div;
- uint8_t save_lvds_control_2;
- uint8_t save_outputs_enable;
- uint8_t save_lvds_power_down;
- uint8_t save_power_management;
+ uint8_t dummy;
};
static void ch7017_dump_regs(struct intel_dvo_device *dvo);
@@ -401,39 +392,6 @@ do { \
DUMP(CH7017_LVDS_POWER_DOWN);
}
-static void ch7017_save(struct intel_dvo_device *dvo)
-{
- struct ch7017_priv *priv = dvo->dev_priv;
-
- ch7017_read(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, &priv->save_hapi);
- ch7017_read(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, &priv->save_valo);
- ch7017_read(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, &priv->save_ailo);
- ch7017_read(dvo, CH7017_LVDS_PLL_VCO_CONTROL, &priv->save_lvds_pll_vco);
- ch7017_read(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, &priv->save_feedback_div);
- ch7017_read(dvo, CH7017_LVDS_CONTROL_2, &priv->save_lvds_control_2);
- ch7017_read(dvo, CH7017_OUTPUTS_ENABLE, &priv->save_outputs_enable);
- ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &priv->save_lvds_power_down);
- ch7017_read(dvo, CH7017_POWER_MANAGEMENT, &priv->save_power_management);
-}
-
-static void ch7017_restore(struct intel_dvo_device *dvo)
-{
- struct ch7017_priv *priv = dvo->dev_priv;
-
- /* Power down before changing mode */
- ch7017_dpms(dvo, DRM_MODE_DPMS_OFF);
-
- ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, priv->save_hapi);
- ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, priv->save_valo);
- ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, priv->save_ailo);
- ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, priv->save_lvds_pll_vco);
- ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, priv->save_feedback_div);
- ch7017_write(dvo, CH7017_LVDS_CONTROL_2, priv->save_lvds_control_2);
- ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, priv->save_outputs_enable);
- ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, priv->save_lvds_power_down);
- ch7017_write(dvo, CH7017_POWER_MANAGEMENT, priv->save_power_management);
-}
-
static void ch7017_destroy(struct intel_dvo_device *dvo)
{
struct ch7017_priv *priv = dvo->dev_priv;
@@ -451,7 +409,5 @@ struct intel_dvo_dev_ops ch7017_ops = {
.mode_set = ch7017_mode_set,
.dpms = ch7017_dpms,
.dump_regs = ch7017_dump_regs,
- .save = ch7017_save,
- .restore = ch7017_restore,
.destroy = ch7017_destroy,
};
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index d56ff5cc22b..6f1944b2444 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -92,21 +92,10 @@ static struct ch7xxx_id_struct {
{ CH7301_VID, "CH7301" },
};
-struct ch7xxx_reg_state {
- uint8_t regs[CH7xxx_NUM_REGS];
-};
-
struct ch7xxx_priv {
bool quiet;
-
- struct ch7xxx_reg_state save_reg;
- struct ch7xxx_reg_state mode_reg;
- uint8_t save_TCTL, save_TPCP, save_TPD, save_TPVT;
- uint8_t save_TLPF, save_TCT, save_PM, save_IDF;
};
-static void ch7xxx_save(struct intel_dvo_device *dvo);
-
static char *ch7xxx_get_id(uint8_t vid)
{
int i;
@@ -312,42 +301,17 @@ static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode)
static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
{
- struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
int i;
for (i = 0; i < CH7xxx_NUM_REGS; i++) {
+ uint8_t val;
if ((i % 8) == 0 )
DRM_LOG_KMS("\n %02X: ", i);
- DRM_LOG_KMS("%02X ", ch7xxx->mode_reg.regs[i]);
+ ch7xxx_readb(dvo, i, &val);
+ DRM_LOG_KMS("%02X ", val);
}
}
-static void ch7xxx_save(struct intel_dvo_device *dvo)
-{
- struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
-
- ch7xxx_readb(dvo, CH7xxx_TCTL, &ch7xxx->save_TCTL);
- ch7xxx_readb(dvo, CH7xxx_TPCP, &ch7xxx->save_TPCP);
- ch7xxx_readb(dvo, CH7xxx_TPD, &ch7xxx->save_TPD);
- ch7xxx_readb(dvo, CH7xxx_TPVT, &ch7xxx->save_TPVT);
- ch7xxx_readb(dvo, CH7xxx_TLPF, &ch7xxx->save_TLPF);
- ch7xxx_readb(dvo, CH7xxx_PM, &ch7xxx->save_PM);
- ch7xxx_readb(dvo, CH7xxx_IDF, &ch7xxx->save_IDF);
-}
-
-static void ch7xxx_restore(struct intel_dvo_device *dvo)
-{
- struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
-
- ch7xxx_writeb(dvo, CH7xxx_TCTL, ch7xxx->save_TCTL);
- ch7xxx_writeb(dvo, CH7xxx_TPCP, ch7xxx->save_TPCP);
- ch7xxx_writeb(dvo, CH7xxx_TPD, ch7xxx->save_TPD);
- ch7xxx_writeb(dvo, CH7xxx_TPVT, ch7xxx->save_TPVT);
- ch7xxx_writeb(dvo, CH7xxx_TLPF, ch7xxx->save_TLPF);
- ch7xxx_writeb(dvo, CH7xxx_IDF, ch7xxx->save_IDF);
- ch7xxx_writeb(dvo, CH7xxx_PM, ch7xxx->save_PM);
-}
-
static void ch7xxx_destroy(struct intel_dvo_device *dvo)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
@@ -365,7 +329,5 @@ struct intel_dvo_dev_ops ch7xxx_ops = {
.mode_set = ch7xxx_mode_set,
.dpms = ch7xxx_dpms,
.dump_regs = ch7xxx_dump_regs,
- .save = ch7xxx_save,
- .restore = ch7xxx_restore,
.destroy = ch7xxx_destroy,
};
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 24169e528f0..a2ec3f48720 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -153,9 +153,6 @@ struct ivch_priv {
bool quiet;
uint16_t width, height;
-
- uint16_t save_VR01;
- uint16_t save_VR40;
};
@@ -405,22 +402,6 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo)
DRM_LOG_KMS("VR8F: 0x%04x\n", val);
}
-static void ivch_save(struct intel_dvo_device *dvo)
-{
- struct ivch_priv *priv = dvo->dev_priv;
-
- ivch_read(dvo, VR01, &priv->save_VR01);
- ivch_read(dvo, VR40, &priv->save_VR40);
-}
-
-static void ivch_restore(struct intel_dvo_device *dvo)
-{
- struct ivch_priv *priv = dvo->dev_priv;
-
- ivch_write(dvo, VR01, priv->save_VR01);
- ivch_write(dvo, VR40, priv->save_VR40);
-}
-
static void ivch_destroy(struct intel_dvo_device *dvo)
{
struct ivch_priv *priv = dvo->dev_priv;
@@ -434,8 +415,6 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
struct intel_dvo_dev_ops ivch_ops= {
.init = ivch_init,
.dpms = ivch_dpms,
- .save = ivch_save,
- .restore = ivch_restore,
.mode_valid = ivch_mode_valid,
.mode_set = ivch_mode_set,
.detect = ivch_detect,
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 0001c13f0a8..9b8e6765cf2 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -58,17 +58,9 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#define SIL164_REGC 0x0c
-struct sil164_save_rec {
- uint8_t reg8;
- uint8_t reg9;
- uint8_t regc;
-};
-
struct sil164_priv {
//I2CDevRec d;
bool quiet;
- struct sil164_save_rec save_regs;
- struct sil164_save_rec mode_regs;
};
#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr))
@@ -252,34 +244,6 @@ static void sil164_dump_regs(struct intel_dvo_device *dvo)
DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val);
}
-static void sil164_save(struct intel_dvo_device *dvo)
-{
- struct sil164_priv *sil= dvo->dev_priv;
-
- if (!sil164_readb(dvo, SIL164_REG8, &sil->save_regs.reg8))
- return;
-
- if (!sil164_readb(dvo, SIL164_REG9, &sil->save_regs.reg9))
- return;
-
- if (!sil164_readb(dvo, SIL164_REGC, &sil->save_regs.regc))
- return;
-
- return;
-}
-
-static void sil164_restore(struct intel_dvo_device *dvo)
-{
- struct sil164_priv *sil = dvo->dev_priv;
-
- /* Restore it powered down initially */
- sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8 & ~0x1);
-
- sil164_writeb(dvo, SIL164_REG9, sil->save_regs.reg9);
- sil164_writeb(dvo, SIL164_REGC, sil->save_regs.regc);
- sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8);
-}
-
static void sil164_destroy(struct intel_dvo_device *dvo)
{
struct sil164_priv *sil = dvo->dev_priv;
@@ -297,7 +261,5 @@ struct intel_dvo_dev_ops sil164_ops = {
.mode_set = sil164_mode_set,
.dpms = sil164_dpms,
.dump_regs = sil164_dump_regs,
- .save = sil164_save,
- .restore = sil164_restore,
.destroy = sil164_destroy,
};
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index c7c391bc116..66c697bc9b2 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -86,16 +86,8 @@
#define TFP410_V_RES_LO 0x3C
#define TFP410_V_RES_HI 0x3D
-struct tfp410_save_rec {
- uint8_t ctl1;
- uint8_t ctl2;
-};
-
struct tfp410_priv {
bool quiet;
-
- struct tfp410_save_rec saved_reg;
- struct tfp410_save_rec mode_reg;
};
static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
@@ -293,28 +285,6 @@ static void tfp410_dump_regs(struct intel_dvo_device *dvo)
DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
}
-static void tfp410_save(struct intel_dvo_device *dvo)
-{
- struct tfp410_priv *tfp = dvo->dev_priv;
-
- if (!tfp410_readb(dvo, TFP410_CTL_1, &tfp->saved_reg.ctl1))
- return;
-
- if (!tfp410_readb(dvo, TFP410_CTL_2, &tfp->saved_reg.ctl2))
- return;
-}
-
-static void tfp410_restore(struct intel_dvo_device *dvo)
-{
- struct tfp410_priv *tfp = dvo->dev_priv;
-
- /* Restore it powered down initially */
- tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1 & ~0x1);
-
- tfp410_writeb(dvo, TFP410_CTL_2, tfp->saved_reg.ctl2);
- tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1);
-}
-
static void tfp410_destroy(struct intel_dvo_device *dvo)
{
struct tfp410_priv *tfp = dvo->dev_priv;
@@ -332,7 +302,5 @@ struct intel_dvo_dev_ops tfp410_ops = {
.mode_set = tfp410_mode_set,
.dpms = tfp410_dpms,
.dump_regs = tfp410_dump_regs,
- .save = tfp410_save,
- .restore = tfp410_restore,
.destroy = tfp410_destroy,
};
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a0b8447b06e..322070c0c63 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -96,19 +96,18 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
spin_lock(lock);
list_for_each_entry(obj_priv, head, list)
{
- struct drm_gem_object *obj = obj_priv->obj;
-
seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s",
- obj,
+ &obj_priv->base,
get_pin_flag(obj_priv),
- obj->size,
- obj->read_domains, obj->write_domain,
+ obj_priv->base.size,
+ obj_priv->base.read_domains,
+ obj_priv->base.write_domain,
obj_priv->last_rendering_seqno,
obj_priv->dirty ? " dirty" : "",
obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
- if (obj->name)
- seq_printf(m, " (name: %d)", obj->name);
+ if (obj_priv->base.name)
+ seq_printf(m, " (name: %d)", obj_priv->base.name);
if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
if (obj_priv->gtt_space != NULL)
@@ -289,7 +288,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
spin_lock(&dev_priv->mm.active_list_lock);
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
- obj = obj_priv->obj;
+ obj = &obj_priv->base;
if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
ret = i915_gem_object_get_pages(obj, 0);
if (ret) {
@@ -567,23 +566,14 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_crtc *crtc;
drm_i915_private_t *dev_priv = dev->dev_private;
- bool fbc_enabled = false;
- if (!dev_priv->display.fbc_enabled) {
+ if (!I915_HAS_FBC(dev)) {
seq_printf(m, "FBC unsupported on this chipset\n");
return 0;
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (!crtc->enabled)
- continue;
- if (dev_priv->display.fbc_enabled(crtc))
- fbc_enabled = true;
- }
-
- if (fbc_enabled) {
+ if (intel_fbc_enabled(dev)) {
seq_printf(m, "FBC enabled\n");
} else {
seq_printf(m, "FBC disabled: ");
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index c3cfafcbfe7..2a6b5de5ae5 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1357,13 +1357,12 @@ static void i915_setup_compression(struct drm_device *dev, int size)
dev_priv->cfb_size = size;
+ intel_disable_fbc(dev);
dev_priv->compressed_fb = compressed_fb;
if (IS_GM45(dev)) {
- g4x_disable_fbc(dev);
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
} else {
- i8xx_disable_fbc(dev);
I915_WRITE(FBC_CFB_BASE, cfb_base);
I915_WRITE(FBC_LL_BASE, ll_base);
dev_priv->compressed_llb = compressed_llb;
@@ -1504,8 +1503,8 @@ static int i915_load_modeset_init(struct drm_device *dev,
I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
- drm_helper_initial_config(dev);
-
+ intel_fbdev_init(dev);
+ drm_kms_helper_poll_init(dev);
return 0;
destroy_ringbuffer:
@@ -1591,7 +1590,7 @@ static void i915_get_mem_freq(struct drm_device *dev)
*/
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv;
resource_size_t base, size;
int ret = 0, mmio_bar;
uint32_t agp_size, prealloc_size, prealloc_start;
@@ -1723,6 +1722,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* Start out suspended */
dev_priv->mm.suspended = 1;
+ intel_detect_pch(dev);
+
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = i915_load_modeset_init(dev, prealloc_start,
prealloc_size, agp_size);
@@ -1769,6 +1770,8 @@ int i915_driver_unload(struct drm_device *dev)
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ intel_modeset_cleanup(dev);
+
/*
* free the memory space allocated for the child device
* config parsed from VBT
@@ -1792,8 +1795,6 @@ int i915_driver_unload(struct drm_device *dev)
intel_opregion_free(dev, 0);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- intel_modeset_cleanup(dev);
-
i915_gem_free_all_phys_object(dev);
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index cc03537bb88..5c51e45ab68 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -188,6 +188,35 @@ const static struct pci_device_id pciidlist[] = {
MODULE_DEVICE_TABLE(pci, pciidlist);
#endif
+#define INTEL_PCH_DEVICE_ID_MASK 0xff00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
+
+void intel_detect_pch (struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct pci_dev *pch;
+
+ /*
+ * The reason to probe ISA bridge instead of Dev31:Fun0 is to
+ * make graphics device passthrough work easy for VMM, that only
+ * need to expose ISA bridge to let driver know the real hardware
+ * underneath. This is a requirement from virtualization team.
+ */
+ pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
+ if (pch) {
+ if (pch->vendor == PCI_VENDOR_ID_INTEL) {
+ int id;
+ id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+
+ if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_CPT;
+ DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+ }
+ }
+ pci_dev_put(pch);
+ }
+}
+
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6e4790065d9..7f797ef1ab3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -128,6 +128,7 @@ struct drm_i915_master_private {
struct drm_i915_fence_reg {
struct drm_gem_object *obj;
+ struct list_head lru_list;
};
struct sdvo_device_mapping {
@@ -135,6 +136,7 @@ struct sdvo_device_mapping {
u8 slave_addr;
u8 dvo_wiring;
u8 initialized;
+ u8 ddc_pin;
};
struct drm_i915_error_state {
@@ -175,7 +177,7 @@ struct drm_i915_error_state {
struct drm_i915_display_funcs {
void (*dpms)(struct drm_crtc *crtc, int mode);
- bool (*fbc_enabled)(struct drm_crtc *crtc);
+ bool (*fbc_enabled)(struct drm_device *dev);
void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
void (*disable_fbc)(struct drm_device *dev);
int (*get_display_clock_speed)(struct drm_device *dev);
@@ -222,6 +224,13 @@ enum no_fbc_reason {
FBC_NOT_TILED, /* buffer not tiled */
};
+enum intel_pch {
+ PCH_IBX, /* Ibexpeak PCH */
+ PCH_CPT, /* Cougarpoint PCH */
+};
+
+struct intel_fbdev;
+
typedef struct drm_i915_private {
struct drm_device *dev;
@@ -335,6 +344,9 @@ typedef struct drm_i915_private {
/* Display functions */
struct drm_i915_display_funcs display;
+ /* PCH chipset type */
+ enum intel_pch pch_type;
+
/* Register state */
bool modeset_on_lid;
u8 saveLBB;
@@ -637,11 +649,14 @@ typedef struct drm_i915_private {
struct drm_mm_node *compressed_fb;
struct drm_mm_node *compressed_llb;
+
+ /* list of fbdev register on this device */
+ struct intel_fbdev *fbdev;
} drm_i915_private_t;
/** driver private structure attached to each drm_gem_object */
struct drm_i915_gem_object {
- struct drm_gem_object *obj;
+ struct drm_gem_object base;
/** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space;
@@ -651,9 +666,6 @@ struct drm_i915_gem_object {
/** This object's place on GPU write list */
struct list_head gpu_write_list;
- /** This object's place on the fenced object LRU */
- struct list_head fence_list;
-
/**
* This is set if the object is on the active or flushing lists
* (has pending rendering), and is not set if it's on inactive (ready
@@ -740,7 +752,7 @@ struct drm_i915_gem_object {
atomic_t pending_flip;
};
-#define to_intel_bo(x) ((struct drm_i915_gem_object *) (x)->driver_private)
+#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
/**
* Request queue structure.
@@ -902,6 +914,8 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
+struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
+ size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
void i915_gem_object_unpin(struct drm_gem_object *obj);
@@ -998,6 +1012,12 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void i8xx_disable_fbc(struct drm_device *dev);
extern void g4x_disable_fbc(struct drm_device *dev);
+extern void intel_disable_fbc(struct drm_device *dev);
+extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
+extern bool intel_fbc_enabled(struct drm_device *dev);
+
+extern void intel_detect_pch (struct drm_device *dev);
+extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
/**
* Lock test for when it's just for synchronization of ring access.
@@ -1130,7 +1150,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
- !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev))
+ !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev) && \
+ !IS_GEN6(dev))
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
/* dsparb controlled by hw only */
#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
@@ -1144,6 +1165,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
IS_GEN6(dev))
#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
+#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ef3d91dda71..112699f71fa 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -124,7 +124,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
args->size = roundup(args->size, PAGE_SIZE);
/* Allocate the new object */
- obj = drm_gem_object_alloc(dev, args->size);
+ obj = i915_gem_alloc_object(dev, args->size);
if (obj == NULL)
return -ENOMEM;
@@ -1051,7 +1051,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
* about to occur.
*/
if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
- list_move_tail(&obj_priv->fence_list,
+ struct drm_i915_fence_reg *reg =
+ &dev_priv->fence_regs[obj_priv->fence_reg];
+ list_move_tail(&reg->lru_list,
&dev_priv->mm.fence_list);
}
@@ -1566,7 +1568,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
list_for_each_entry_safe(obj_priv, next,
&dev_priv->mm.gpu_write_list,
gpu_write_list) {
- struct drm_gem_object *obj = obj_priv->obj;
+ struct drm_gem_object *obj = &obj_priv->base;
if ((obj->write_domain & flush_domains) ==
obj->write_domain) {
@@ -1577,9 +1579,12 @@ i915_gem_process_flushing_list(struct drm_device *dev,
i915_gem_object_move_to_active(obj, seqno);
/* update the fence lru list */
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- list_move_tail(&obj_priv->fence_list,
+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_fence_reg *reg =
+ &dev_priv->fence_regs[obj_priv->fence_reg];
+ list_move_tail(&reg->lru_list,
&dev_priv->mm.fence_list);
+ }
trace_i915_gem_object_change_domain(obj,
obj->read_domains,
@@ -1745,7 +1750,7 @@ i915_gem_retire_request(struct drm_device *dev,
obj_priv = list_first_entry(&dev_priv->mm.active_list,
struct drm_i915_gem_object,
list);
- obj = obj_priv->obj;
+ obj = &obj_priv->base;
/* If the seqno being retired doesn't match the oldest in the
* list, then the oldest in the list must still be newer than
@@ -2119,7 +2124,7 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
/* Try to find the smallest clean object */
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- struct drm_gem_object *obj = obj_priv->obj;
+ struct drm_gem_object *obj = &obj_priv->base;
if (obj->size >= min_size) {
if ((!obj_priv->dirty ||
i915_gem_object_is_purgeable(obj_priv)) &&
@@ -2253,7 +2258,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
/* Find an object that we can immediately reuse */
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
- obj = obj_priv->obj;
+ obj = &obj_priv->base;
if (obj->size >= min_size)
break;
@@ -2485,9 +2490,10 @@ static int i915_find_fence_reg(struct drm_device *dev)
/* None available, try to steal one or wait for a user to finish */
i = I915_FENCE_REG_NONE;
- list_for_each_entry(obj_priv, &dev_priv->mm.fence_list,
- fence_list) {
- obj = obj_priv->obj;
+ list_for_each_entry(reg, &dev_priv->mm.fence_list,
+ lru_list) {
+ obj = reg->obj;
+ obj_priv = to_intel_bo(obj);
if (obj_priv->pin_count)
continue;
@@ -2536,7 +2542,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
/* Just update our place in the LRU if our fence is getting used. */
if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
- list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
+ reg = &dev_priv->fence_regs[obj_priv->fence_reg];
+ list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
return 0;
}
@@ -2566,7 +2573,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
obj_priv->fence_reg = ret;
reg = &dev_priv->fence_regs[obj_priv->fence_reg];
- list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
+ list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
reg->obj = obj;
@@ -2598,6 +2605,8 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_i915_fence_reg *reg =
+ &dev_priv->fence_regs[obj_priv->fence_reg];
if (IS_GEN6(dev)) {
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
@@ -2616,9 +2625,9 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
I915_WRITE(fence_reg, 0);
}
- dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
+ reg->obj = NULL;
obj_priv->fence_reg = I915_FENCE_REG_NONE;
- list_del_init(&obj_priv->fence_list);
+ list_del_init(&reg->lru_list);
}
/**
@@ -4471,34 +4480,38 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
return 0;
}
-int i915_gem_init_object(struct drm_gem_object *obj)
+struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
+ size_t size)
{
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
- obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
- if (obj_priv == NULL)
- return -ENOMEM;
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (obj == NULL)
+ return NULL;
- /*
- * We've just allocated pages from the kernel,
- * so they've just been written by the CPU with
- * zeros. They'll need to be clflushed before we
- * use them with the GPU.
- */
- obj->write_domain = I915_GEM_DOMAIN_CPU;
- obj->read_domains = I915_GEM_DOMAIN_CPU;
+ if (drm_gem_object_init(dev, &obj->base, size) != 0) {
+ kfree(obj);
+ return NULL;
+ }
- obj_priv->agp_type = AGP_USER_MEMORY;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->driver_private = obj_priv;
- obj_priv->obj = obj;
- obj_priv->fence_reg = I915_FENCE_REG_NONE;
- INIT_LIST_HEAD(&obj_priv->list);
- INIT_LIST_HEAD(&obj_priv->gpu_write_list);
- INIT_LIST_HEAD(&obj_priv->fence_list);
- obj_priv->madv = I915_MADV_WILLNEED;
+ obj->agp_type = AGP_USER_MEMORY;
+ obj->base.driver_private = NULL;
+ obj->fence_reg = I915_FENCE_REG_NONE;
+ INIT_LIST_HEAD(&obj->list);
+ INIT_LIST_HEAD(&obj->gpu_write_list);
+ obj->madv = I915_MADV_WILLNEED;
- trace_i915_gem_object_create(obj);
+ trace_i915_gem_object_create(&obj->base);
+
+ return &obj->base;
+}
+
+int i915_gem_init_object(struct drm_gem_object *obj)
+{
+ BUG();
return 0;
}
@@ -4521,9 +4534,11 @@ void i915_gem_free_object(struct drm_gem_object *obj)
if (obj_priv->mmap_offset)
i915_gem_free_mmap_offset(obj);
+ drm_gem_object_release(obj);
+
kfree(obj_priv->page_cpu_valid);
kfree(obj_priv->bit_17);
- kfree(obj->driver_private);
+ kfree(obj_priv);
}
/** Unbinds all inactive objects. */
@@ -4536,9 +4551,9 @@ i915_gem_evict_from_inactive_list(struct drm_device *dev)
struct drm_gem_object *obj;
int ret;
- obj = list_first_entry(&dev_priv->mm.inactive_list,
- struct drm_i915_gem_object,
- list)->obj;
+ obj = &list_first_entry(&dev_priv->mm.inactive_list,
+ struct drm_i915_gem_object,
+ list)->base;
ret = i915_gem_object_unbind(obj);
if (ret != 0) {
@@ -4608,7 +4623,7 @@ i915_gem_init_pipe_control(struct drm_device *dev)
struct drm_i915_gem_object *obj_priv;
int ret;
- obj = drm_gem_object_alloc(dev, 4096);
+ obj = i915_gem_alloc_object(dev, 4096);
if (obj == NULL) {
DRM_ERROR("Failed to allocate seqno page\n");
ret = -ENOMEM;
@@ -4653,7 +4668,7 @@ i915_gem_init_hws(struct drm_device *dev)
if (!I915_NEED_GFX_HWS(dev))
return 0;
- obj = drm_gem_object_alloc(dev, 4096);
+ obj = i915_gem_alloc_object(dev, 4096);
if (obj == NULL) {
DRM_ERROR("Failed to allocate status page\n");
ret = -ENOMEM;
@@ -4764,7 +4779,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
if (ret != 0)
return ret;
- obj = drm_gem_object_alloc(dev, 128 * 1024);
+ obj = i915_gem_alloc_object(dev, 128 * 1024);
if (obj == NULL) {
DRM_ERROR("Failed to allocate ringbuffer\n");
i915_gem_cleanup_hws(dev);
@@ -4957,6 +4972,8 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
INIT_LIST_HEAD(&dev_priv->mm.request_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+ for (i = 0; i < 16; i++)
+ INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
dev_priv->mm.next_gem_seqno = 1;
@@ -5185,6 +5202,20 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
}
static int
+i915_gpu_is_active(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int lists_empty;
+
+ spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->mm.active_list);
+ spin_unlock(&dev_priv->mm.active_list_lock);
+
+ return !lists_empty;
+}
+
+static int
i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
{
drm_i915_private_t *dev_priv, *next_dev;
@@ -5213,6 +5244,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
spin_lock(&shrink_list_lock);
+rescan:
/* first scan for clean buffers */
list_for_each_entry_safe(dev_priv, next_dev,
&shrink_list, mm.shrink_list) {
@@ -5229,7 +5261,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
&dev_priv->mm.inactive_list,
list) {
if (i915_gem_object_is_purgeable(obj_priv)) {
- i915_gem_object_unbind(obj_priv->obj);
+ i915_gem_object_unbind(&obj_priv->base);
if (--nr_to_scan <= 0)
break;
}
@@ -5258,7 +5290,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
&dev_priv->mm.inactive_list,
list) {
if (nr_to_scan > 0) {
- i915_gem_object_unbind(obj_priv->obj);
+ i915_gem_object_unbind(&obj_priv->base);
nr_to_scan--;
} else
cnt++;
@@ -5270,6 +5302,36 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
would_deadlock = 0;
}
+ if (nr_to_scan) {
+ int active = 0;
+
+ /*
+ * We are desperate for pages, so as a last resort, wait
+ * for the GPU to finish and discard whatever we can.
+ * This has a dramatic impact to reduce the number of
+ * OOM-killer events whilst running the GPU aggressively.
+ */
+ list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
+ struct drm_device *dev = dev_priv->dev;
+
+ if (!mutex_trylock(&dev->struct_mutex))
+ continue;
+
+ spin_unlock(&shrink_list_lock);
+
+ if (i915_gpu_is_active(dev)) {
+ i915_gpu_idle(dev);
+ active++;
+ }
+
+ spin_lock(&shrink_list_lock);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ if (active)
+ goto rescan;
+ }
+
spin_unlock(&shrink_list_lock);
if (would_deadlock)
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 35507cf53fa..80f380b1d95 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -39,7 +39,7 @@ i915_verify_inactive(struct drm_device *dev, char *file, int line)
struct drm_i915_gem_object *obj_priv;
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- obj = obj_priv->obj;
+ obj = &obj_priv->base;
if (obj_priv->pin_count || obj_priv->active ||
(obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
I915_GEM_DOMAIN_GTT)))
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 4bdccefcf2c..4b7c49d4257 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -283,6 +283,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
return -EINVAL;
}
+ if (obj_priv->pin_count) {
+ drm_gem_object_unreference_unlocked(obj);
+ return -EBUSY;
+ }
+
if (args->tiling_mode == I915_TILING_NONE) {
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
args->stride = 0;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index df6a9cd82c4..8c3f0802686 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -169,9 +169,13 @@ void intel_enable_asle (struct drm_device *dev)
if (HAS_PCH_SPLIT(dev))
ironlake_enable_display_irq(dev_priv, DE_GSE);
- else
+ else {
i915_enable_pipestat(dev_priv, 1,
I915_LEGACY_BLC_EVENT_ENABLE);
+ if (IS_I965G(dev))
+ i915_enable_pipestat(dev_priv, 0,
+ I915_LEGACY_BLC_EVENT_ENABLE);
+ }
}
/**
@@ -256,18 +260,18 @@ static void i915_hotplug_work_func(struct work_struct *work)
hotplug_work);
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
- if (mode_config->num_connector) {
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ if (mode_config->num_encoder) {
+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
if (intel_encoder->hot_plug)
(*intel_encoder->hot_plug) (intel_encoder);
}
}
/* Just fire off a uevent and let userspace tell us what to do */
- drm_sysfs_hotplug_event(dev);
+ drm_helper_hpd_irq_event(dev);
}
static void i915_handle_rps_change(struct drm_device *dev)
@@ -612,7 +616,7 @@ static void i915_capture_error_state(struct drm_device *dev)
batchbuffer[1] = NULL;
count = 0;
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
- struct drm_gem_object *obj = obj_priv->obj;
+ struct drm_gem_object *obj = &obj_priv->base;
if (batchbuffer[0] == NULL &&
bbaddr >= obj_priv->gtt_offset &&
@@ -648,7 +652,7 @@ static void i915_capture_error_state(struct drm_device *dev)
if (error->active_bo) {
int i = 0;
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
- struct drm_gem_object *obj = obj_priv->obj;
+ struct drm_gem_object *obj = &obj_priv->base;
error->active_bo[i].size = obj->size;
error->active_bo[i].name = obj->name;
@@ -950,7 +954,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
intel_finish_page_flip(dev, 1);
}
- if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
+ if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
+ (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
(iir & I915_ASLE_INTERRUPT))
opregion_asle_intr(dev);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 4cbc5210fd3..f3e39cc46f0 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1764,6 +1764,14 @@
#define DP_LINK_TRAIN_MASK (3 << 28)
#define DP_LINK_TRAIN_SHIFT 28
+/* CPT Link training mode */
+#define DP_LINK_TRAIN_PAT_1_CPT (0 << 8)
+#define DP_LINK_TRAIN_PAT_2_CPT (1 << 8)
+#define DP_LINK_TRAIN_PAT_IDLE_CPT (2 << 8)
+#define DP_LINK_TRAIN_OFF_CPT (3 << 8)
+#define DP_LINK_TRAIN_MASK_CPT (7 << 8)
+#define DP_LINK_TRAIN_SHIFT_CPT 8
+
/* Signal voltages. These are mostly controlled by the other end */
#define DP_VOLTAGE_0_4 (0 << 25)
#define DP_VOLTAGE_0_6 (1 << 25)
@@ -1924,7 +1932,10 @@
/* Display & cursor control */
/* dithering flag on Ironlake */
-#define PIPE_ENABLE_DITHER (1 << 4)
+#define PIPE_ENABLE_DITHER (1 << 4)
+#define PIPE_DITHER_TYPE_MASK (3 << 2)
+#define PIPE_DITHER_TYPE_SPATIAL (0 << 2)
+#define PIPE_DITHER_TYPE_ST01 (1 << 2)
/* Pipe A */
#define PIPEADSL 0x70000
#define PIPEACONF 0x70008
@@ -1988,15 +1999,24 @@
#define DSPFW1 0x70034
#define DSPFW_SR_SHIFT 23
+#define DSPFW_SR_MASK (0x1ff<<23)
#define DSPFW_CURSORB_SHIFT 16
+#define DSPFW_CURSORB_MASK (0x3f<<16)
#define DSPFW_PLANEB_SHIFT 8
+#define DSPFW_PLANEB_MASK (0x7f<<8)
+#define DSPFW_PLANEA_MASK (0x7f)
#define DSPFW2 0x70038
#define DSPFW_CURSORA_MASK 0x00003f00
#define DSPFW_CURSORA_SHIFT 8
+#define DSPFW_PLANEC_MASK (0x7f)
#define DSPFW3 0x7003c
#define DSPFW_HPLL_SR_EN (1<<31)
#define DSPFW_CURSOR_SR_SHIFT 24
#define PINEVIEW_SELF_REFRESH_EN (1<<30)
+#define DSPFW_CURSOR_SR_MASK (0x3f<<24)
+#define DSPFW_HPLL_CURSOR_SHIFT 16
+#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
+#define DSPFW_HPLL_SR_MASK (0x1ff)
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
@@ -2023,6 +2043,43 @@
#define PINEVIEW_CURSOR_DFT_WM 0
#define PINEVIEW_CURSOR_GUARD_WM 5
+
+/* define the Watermark register on Ironlake */
+#define WM0_PIPEA_ILK 0x45100
+#define WM0_PIPE_PLANE_MASK (0x7f<<16)
+#define WM0_PIPE_PLANE_SHIFT 16
+#define WM0_PIPE_SPRITE_MASK (0x3f<<8)
+#define WM0_PIPE_SPRITE_SHIFT 8
+#define WM0_PIPE_CURSOR_MASK (0x1f)
+
+#define WM0_PIPEB_ILK 0x45104
+#define WM1_LP_ILK 0x45108
+#define WM1_LP_SR_EN (1<<31)
+#define WM1_LP_LATENCY_SHIFT 24
+#define WM1_LP_LATENCY_MASK (0x7f<<24)
+#define WM1_LP_SR_MASK (0x1ff<<8)
+#define WM1_LP_SR_SHIFT 8
+#define WM1_LP_CURSOR_MASK (0x3f)
+
+/* Memory latency timer register */
+#define MLTR_ILK 0x11222
+/* the unit of memory self-refresh latency time is 0.5us */
+#define ILK_SRLT_MASK 0x3f
+
+/* define the fifo size on Ironlake */
+#define ILK_DISPLAY_FIFO 128
+#define ILK_DISPLAY_MAXWM 64
+#define ILK_DISPLAY_DFTWM 8
+
+#define ILK_DISPLAY_SR_FIFO 512
+#define ILK_DISPLAY_MAX_SRWM 0x1ff
+#define ILK_DISPLAY_DFT_SRWM 0x3f
+#define ILK_CURSOR_SR_FIFO 64
+#define ILK_CURSOR_MAX_SRWM 0x3f
+#define ILK_CURSOR_DFT_SRWM 8
+
+#define ILK_FIFO_LINE_SIZE 64
+
/*
* The two pipe frame counter registers are not synchronized, so
* reading a stable value is somewhat tricky. The following code
@@ -2304,8 +2361,15 @@
#define GTIIR 0x44018
#define GTIER 0x4401c
+#define ILK_DISPLAY_CHICKEN2 0x42004
+#define ILK_DPARB_GATE (1<<22)
+#define ILK_VSDPFD_FULL (1<<21)
+#define ILK_DSPCLK_GATE 0x42020
+#define ILK_DPARB_CLK_GATE (1<<5)
+
#define DISP_ARB_CTL 0x45000
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
+#define DISP_FBC_WM_DIS (1<<15)
/* PCH */
@@ -2316,6 +2380,11 @@
#define SDE_PORTB_HOTPLUG (1 << 8)
#define SDE_SDVOB_HOTPLUG (1 << 6)
#define SDE_HOTPLUG_MASK (0xf << 8)
+/* CPT */
+#define SDE_CRT_HOTPLUG_CPT (1 << 19)
+#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
+#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
+#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
#define SDEISR 0xc4000
#define SDEIMR 0xc4004
@@ -2407,6 +2476,17 @@
#define PCH_SSC4_PARMS 0xc6210
#define PCH_SSC4_AUX_PARMS 0xc6214
+#define PCH_DPLL_SEL 0xc7000
+#define TRANSA_DPLL_ENABLE (1<<3)
+#define TRANSA_DPLLB_SEL (1<<0)
+#define TRANSA_DPLLA_SEL 0
+#define TRANSB_DPLL_ENABLE (1<<7)
+#define TRANSB_DPLLB_SEL (1<<4)
+#define TRANSB_DPLLA_SEL (0)
+#define TRANSC_DPLL_ENABLE (1<<11)
+#define TRANSC_DPLLB_SEL (1<<8)
+#define TRANSC_DPLLA_SEL (0)
+
/* transcoder */
#define TRANS_HTOTAL_A 0xe0000
@@ -2493,6 +2573,19 @@
#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22)
#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22)
#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22)
+/* ILK always use 400mV 0dB for voltage swing and pre-emphasis level.
+ SNB has different settings. */
+/* SNB A-stepping */
+#define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
+#define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
+#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
+#define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
+/* SNB B-stepping */
+#define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22)
+#define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22)
+#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
+#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
+#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f<<22)
#define FDI_DP_PORT_WIDTH_X1 (0<<19)
#define FDI_DP_PORT_WIDTH_X2 (1<<19)
#define FDI_DP_PORT_WIDTH_X3 (2<<19)
@@ -2525,6 +2618,13 @@
#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6)
#define FDI_SEL_RAWCLK (0<<4)
#define FDI_SEL_PCDCLK (1<<4)
+/* CPT */
+#define FDI_AUTO_TRAINING (1<<10)
+#define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8)
+#define FDI_LINK_TRAIN_PATTERN_2_CPT (1<<8)
+#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8)
+#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
+#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
#define FDI_RXA_MISC 0xf0010
#define FDI_RXB_MISC 0xf1010
@@ -2596,6 +2696,9 @@
#define HSYNC_ACTIVE_HIGH (1 << 3)
#define PORT_DETECTED (1 << 2)
+/* PCH SDVOB multiplex with HDMIB */
+#define PCH_SDVOB HDMIB
+
#define HDMIC 0xe1150
#define HDMID 0xe1160
@@ -2653,4 +2756,42 @@
#define PCH_DPD_AUX_CH_DATA4 0xe4320
#define PCH_DPD_AUX_CH_DATA5 0xe4324
+/* CPT */
+#define PORT_TRANS_A_SEL_CPT 0
+#define PORT_TRANS_B_SEL_CPT (1<<29)
+#define PORT_TRANS_C_SEL_CPT (2<<29)
+#define PORT_TRANS_SEL_MASK (3<<29)
+
+#define TRANS_DP_CTL_A 0xe0300
+#define TRANS_DP_CTL_B 0xe1300
+#define TRANS_DP_CTL_C 0xe2300
+#define TRANS_DP_OUTPUT_ENABLE (1<<31)
+#define TRANS_DP_PORT_SEL_B (0<<29)
+#define TRANS_DP_PORT_SEL_C (1<<29)
+#define TRANS_DP_PORT_SEL_D (2<<29)
+#define TRANS_DP_PORT_SEL_MASK (3<<29)
+#define TRANS_DP_AUDIO_ONLY (1<<26)
+#define TRANS_DP_ENH_FRAMING (1<<18)
+#define TRANS_DP_8BPC (0<<9)
+#define TRANS_DP_10BPC (1<<9)
+#define TRANS_DP_6BPC (2<<9)
+#define TRANS_DP_12BPC (3<<9)
+#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4)
+#define TRANS_DP_VSYNC_ACTIVE_LOW 0
+#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
+#define TRANS_DP_HSYNC_ACTIVE_LOW 0
+
+/* SNB eDP training params */
+/* SNB A-stepping */
+#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
+#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
+#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
+/* SNB B-stepping */
+#define EDP_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22)
+#define EDP_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
+#define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index ac0d1a73ac2..60a5800fba6 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -600,14 +600,16 @@ void i915_save_display(struct drm_device *dev)
}
/* FIXME: save TV & SDVO state */
- /* FBC state */
- if (IS_GM45(dev)) {
- dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
- } else {
- dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
- dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
- dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
- dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+ /* Only save FBC state on the platform that supports FBC */
+ if (I915_HAS_FBC(dev)) {
+ if (IS_GM45(dev)) {
+ dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
+ } else {
+ dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
+ dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
+ dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
+ dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+ }
}
/* VGA state */
@@ -702,18 +704,19 @@ void i915_restore_display(struct drm_device *dev)
}
/* FIXME: restore TV & SDVO state */
- /* FBC info */
- if (IS_GM45(dev)) {
- g4x_disable_fbc(dev);
- I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
- } else {
- i8xx_disable_fbc(dev);
- I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
- I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
- I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
- I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+ /* only restore FBC info on the platform that supports FBC*/
+ if (I915_HAS_FBC(dev)) {
+ if (IS_GM45(dev)) {
+ g4x_disable_fbc(dev);
+ I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
+ } else {
+ i8xx_disable_fbc(dev);
+ I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
+ I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
+ I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
+ I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+ }
}
-
/* VGA state */
if (IS_IRONLAKE(dev))
I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 01840d9bc38..9e4c45f68d6 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -115,7 +115,7 @@ TRACE_EVENT(i915_gem_object_get_fence,
__entry->obj, __entry->fence, __entry->tiling_mode)
);
-TRACE_EVENT(i915_gem_object_unbind,
+DECLARE_EVENT_CLASS(i915_gem_object,
TP_PROTO(struct drm_gem_object *obj),
@@ -132,21 +132,18 @@ TRACE_EVENT(i915_gem_object_unbind,
TP_printk("obj=%p", __entry->obj)
);
-TRACE_EVENT(i915_gem_object_destroy,
+DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
TP_PROTO(struct drm_gem_object *obj),
- TP_ARGS(obj),
+ TP_ARGS(obj)
+);
- TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
- ),
+DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
- TP_fast_assign(
- __entry->obj = obj;
- ),
+ TP_PROTO(struct drm_gem_object *obj),
- TP_printk("obj=%p", __entry->obj)
+ TP_ARGS(obj)
);
/* batch tracing */
@@ -197,8 +194,7 @@ TRACE_EVENT(i915_gem_request_flush,
__entry->flush_domains, __entry->invalidate_domains)
);
-
-TRACE_EVENT(i915_gem_request_complete,
+DECLARE_EVENT_CLASS(i915_gem_request,
TP_PROTO(struct drm_device *dev, u32 seqno),
@@ -217,64 +213,35 @@ TRACE_EVENT(i915_gem_request_complete,
TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
);
-TRACE_EVENT(i915_gem_request_retire,
+DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
TP_PROTO(struct drm_device *dev, u32 seqno),
- TP_ARGS(dev, seqno),
-
- TP_STRUCT__entry(
- __field(u32, dev)
- __field(u32, seqno)
- ),
-
- TP_fast_assign(
- __entry->dev = dev->primary->index;
- __entry->seqno = seqno;
- ),
-
- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+ TP_ARGS(dev, seqno)
);
-TRACE_EVENT(i915_gem_request_wait_begin,
+DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
TP_PROTO(struct drm_device *dev, u32 seqno),
- TP_ARGS(dev, seqno),
-
- TP_STRUCT__entry(
- __field(u32, dev)
- __field(u32, seqno)
- ),
-
- TP_fast_assign(
- __entry->dev = dev->primary->index;
- __entry->seqno = seqno;
- ),
-
- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+ TP_ARGS(dev, seqno)
);
-TRACE_EVENT(i915_gem_request_wait_end,
+DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin,
TP_PROTO(struct drm_device *dev, u32 seqno),
- TP_ARGS(dev, seqno),
+ TP_ARGS(dev, seqno)
+);
- TP_STRUCT__entry(
- __field(u32, dev)
- __field(u32, seqno)
- ),
+DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
- TP_fast_assign(
- __entry->dev = dev->primary->index;
- __entry->seqno = seqno;
- ),
+ TP_PROTO(struct drm_device *dev, u32 seqno),
- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+ TP_ARGS(dev, seqno)
);
-TRACE_EVENT(i915_ring_wait_begin,
+DECLARE_EVENT_CLASS(i915_ring,
TP_PROTO(struct drm_device *dev),
@@ -291,26 +258,23 @@ TRACE_EVENT(i915_ring_wait_begin,
TP_printk("dev=%u", __entry->dev)
);
-TRACE_EVENT(i915_ring_wait_end,
+DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
TP_PROTO(struct drm_device *dev),
- TP_ARGS(dev),
+ TP_ARGS(dev)
+);
- TP_STRUCT__entry(
- __field(u32, dev)
- ),
+DEFINE_EVENT(i915_ring, i915_ring_wait_end,
- TP_fast_assign(
- __entry->dev = dev->primary->index;
- ),
+ TP_PROTO(struct drm_device *dev),
- TP_printk("dev=%u", __entry->dev)
+ TP_ARGS(dev)
);
#endif /* _I915_TRACE_H_ */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
+#define TRACE_INCLUDE_PATH .
#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index f9ba452f0cb..4c748d8f73d 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -366,6 +366,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
p_mapping->dvo_port = p_child->dvo_port;
p_mapping->slave_addr = p_child->slave_addr;
p_mapping->dvo_wiring = p_child->dvo_wiring;
+ p_mapping->ddc_pin = p_child->ddc_pin;
p_mapping->initialized = 1;
} else {
DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 759c2ef72ef..e16ac5a28c3 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -136,11 +136,17 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
if (intel_crtc->pipe == 0) {
- adpa |= ADPA_PIPE_A_SELECT;
+ if (HAS_PCH_CPT(dev))
+ adpa |= PORT_TRANS_A_SEL_CPT;
+ else
+ adpa |= ADPA_PIPE_A_SELECT;
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT_A, 0);
} else {
- adpa |= ADPA_PIPE_B_SELECT;
+ if (HAS_PCH_CPT(dev))
+ adpa |= PORT_TRANS_B_SEL_CPT;
+ else
+ adpa |= ADPA_PIPE_B_SELECT;
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT_B, 0);
}
@@ -152,15 +158,21 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 adpa;
+ u32 adpa, temp;
bool ret;
- adpa = I915_READ(PCH_ADPA);
+ temp = adpa = I915_READ(PCH_ADPA);
- adpa &= ~ADPA_CRT_HOTPLUG_MASK;
- /* disable HPD first */
- I915_WRITE(PCH_ADPA, adpa);
- (void)I915_READ(PCH_ADPA);
+ if (HAS_PCH_CPT(dev)) {
+ /* Disable DAC before force detect */
+ I915_WRITE(PCH_ADPA, adpa & ~ADPA_DAC_ENABLE);
+ (void)I915_READ(PCH_ADPA);
+ } else {
+ adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ /* disable HPD first */
+ I915_WRITE(PCH_ADPA, adpa);
+ (void)I915_READ(PCH_ADPA);
+ }
adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
ADPA_CRT_HOTPLUG_WARMUP_10MS |
@@ -176,6 +188,11 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0)
;
+ if (HAS_PCH_CPT(dev)) {
+ I915_WRITE(PCH_ADPA, temp);
+ (void)I915_READ(PCH_ADPA);
+ }
+
/* Check the status to see if both blue and green are on now */
adpa = I915_READ(PCH_ADPA);
adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK;
@@ -245,9 +262,9 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
return false;
}
-static bool intel_crt_detect_ddc(struct drm_connector *connector)
+static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
/* CRT should always be at 0, but check anyway */
if (intel_encoder->type != INTEL_OUTPUT_ANALOG)
@@ -387,8 +404,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct drm_encoder *encoder = &intel_encoder->enc;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct drm_crtc *crtc;
int dpms_mode;
enum drm_connector_status status;
@@ -400,18 +417,19 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
return connector_status_disconnected;
}
- if (intel_crt_detect_ddc(connector))
+ if (intel_crt_detect_ddc(encoder))
return connector_status_connected;
/* for pre-945g platforms use load detect */
if (encoder->crtc && encoder->crtc->enabled) {
status = intel_crt_load_detect(encoder->crtc, intel_encoder);
} else {
- crtc = intel_get_load_detect_pipe(intel_encoder,
+ crtc = intel_get_load_detect_pipe(intel_encoder, connector,
NULL, &dpms_mode);
if (crtc) {
status = intel_crt_load_detect(crtc, intel_encoder);
- intel_release_load_detect_pipe(intel_encoder, dpms_mode);
+ intel_release_load_detect_pipe(intel_encoder,
+ connector, dpms_mode);
} else
status = connector_status_unknown;
}
@@ -421,9 +439,6 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
static void intel_crt_destroy(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-
- intel_i2c_destroy(intel_encoder->ddc_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -432,29 +447,27 @@ static void intel_crt_destroy(struct drm_connector *connector)
static int intel_crt_get_modes(struct drm_connector *connector)
{
int ret;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct i2c_adapter *ddcbus;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct i2c_adapter *ddc_bus;
struct drm_device *dev = connector->dev;
- ret = intel_ddc_get_modes(intel_encoder);
+ ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (ret || !IS_G4X(dev))
goto end;
- ddcbus = intel_encoder->ddc_bus;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
- intel_encoder->ddc_bus =
- intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
+ ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
- if (!intel_encoder->ddc_bus) {
- intel_encoder->ddc_bus = ddcbus;
+ if (!ddc_bus) {
dev_printk(KERN_ERR, &connector->dev->pdev->dev,
"DDC bus registration failed for CRTDDC_D.\n");
goto end;
}
/* Try to get modes by GPIOD port */
- ret = intel_ddc_get_modes(intel_encoder);
- intel_i2c_destroy(ddcbus);
+ ret = intel_ddc_get_modes(connector, ddc_bus);
+ intel_i2c_destroy(ddc_bus);
end:
return ret;
@@ -491,12 +504,16 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
.mode_valid = intel_crt_mode_valid,
.get_modes = intel_crt_get_modes,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_crt_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ intel_i2c_destroy(intel_encoder->ddc_bus);
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_crt_enc_funcs = {
@@ -507,6 +524,7 @@ void intel_crt_init(struct drm_device *dev)
{
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 i2c_reg;
@@ -514,14 +532,20 @@ void intel_crt_init(struct drm_device *dev)
if (!intel_encoder)
return;
- connector = &intel_encoder->base;
- drm_connector_init(dev, &intel_encoder->base,
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
+ return;
+ }
+
+ connector = &intel_connector->base;
+ drm_connector_init(dev, &intel_connector->base,
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs,
DRM_MODE_ENCODER_DAC);
- drm_mode_connector_attach_encoder(&intel_encoder->base,
+ drm_mode_connector_attach_encoder(&intel_connector->base,
&intel_encoder->enc);
/* Set up the DDC bus. */
@@ -553,5 +577,10 @@ void intel_crt_init(struct drm_device *dev)
drm_sysfs_connector_add(connector);
+ if (I915_HAS_HOTPLUG(dev))
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ else
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+
dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index c7502b6b160..f469a84cacf 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -742,12 +742,11 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *l_entry;
+ struct drm_encoder *l_entry;
- list_for_each_entry(l_entry, &mode_config->connector_list, head) {
- if (l_entry->encoder &&
- l_entry->encoder->crtc == crtc) {
- struct intel_encoder *intel_encoder = to_intel_encoder(l_entry);
+ list_for_each_entry(l_entry, &mode_config->encoder_list, head) {
+ if (l_entry && l_entry->crtc == crtc) {
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry);
if (intel_encoder->type == type)
return true;
}
@@ -755,23 +754,6 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
return false;
}
-static struct drm_connector *
-intel_pipe_get_connector (struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *l_entry, *ret = NULL;
-
- list_for_each_entry(l_entry, &mode_config->connector_list, head) {
- if (l_entry->encoder &&
- l_entry->encoder->crtc == crtc) {
- ret = l_entry;
- break;
- }
- }
- return ret;
-}
-
#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
/**
* Returns whether the given set of divisors are valid for a given refclk with
@@ -905,9 +887,9 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
memset(best_clock, 0, sizeof(*best_clock));
max_n = limit->n.max;
- /* based on hardware requriment prefer smaller n to precision */
+ /* based on hardware requirement, prefer smaller n to precision */
for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
- /* based on hardware requirment prefere larger m1,m2 */
+ /* based on hardware requirement, prefere larger m1,m2 */
for (clock.m1 = limit->m1.max;
clock.m1 >= limit->m1.min; clock.m1--) {
for (clock.m2 = limit->m2.max;
@@ -1066,9 +1048,8 @@ void i8xx_disable_fbc(struct drm_device *dev)
DRM_DEBUG_KMS("disabled FBC\n");
}
-static bool i8xx_fbc_enabled(struct drm_crtc *crtc)
+static bool i8xx_fbc_enabled(struct drm_device *dev)
{
- struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
@@ -1125,14 +1106,43 @@ void g4x_disable_fbc(struct drm_device *dev)
DRM_DEBUG_KMS("disabled FBC\n");
}
-static bool g4x_fbc_enabled(struct drm_crtc *crtc)
+static bool g4x_fbc_enabled(struct drm_device *dev)
{
- struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}
+bool intel_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->display.fbc_enabled)
+ return false;
+
+ return dev_priv->display.fbc_enabled(dev);
+}
+
+void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+
+ if (!dev_priv->display.enable_fbc)
+ return;
+
+ dev_priv->display.enable_fbc(crtc, interval);
+}
+
+void intel_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->display.disable_fbc)
+ return;
+
+ dev_priv->display.disable_fbc(dev);
+}
+
/**
* intel_update_fbc - enable/disable FBC as needed
* @crtc: CRTC to point the compressor at
@@ -1167,9 +1177,7 @@ static void intel_update_fbc(struct drm_crtc *crtc,
if (!i915_powersave)
return;
- if (!dev_priv->display.fbc_enabled ||
- !dev_priv->display.enable_fbc ||
- !dev_priv->display.disable_fbc)
+ if (!I915_HAS_FBC(dev))
return;
if (!crtc->fb)
@@ -1216,28 +1224,25 @@ static void intel_update_fbc(struct drm_crtc *crtc,
goto out_disable;
}
- if (dev_priv->display.fbc_enabled(crtc)) {
+ if (intel_fbc_enabled(dev)) {
/* We can re-enable it in this case, but need to update pitch */
- if (fb->pitch > dev_priv->cfb_pitch)
- dev_priv->display.disable_fbc(dev);
- if (obj_priv->fence_reg != dev_priv->cfb_fence)
- dev_priv->display.disable_fbc(dev);
- if (plane != dev_priv->cfb_plane)
- dev_priv->display.disable_fbc(dev);
+ if ((fb->pitch > dev_priv->cfb_pitch) ||
+ (obj_priv->fence_reg != dev_priv->cfb_fence) ||
+ (plane != dev_priv->cfb_plane))
+ intel_disable_fbc(dev);
}
- if (!dev_priv->display.fbc_enabled(crtc)) {
- /* Now try to turn it back on if possible */
- dev_priv->display.enable_fbc(crtc, 500);
- }
+ /* Now try to turn it back on if possible */
+ if (!intel_fbc_enabled(dev))
+ intel_enable_fbc(crtc, 500);
return;
out_disable:
DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
/* Multiple disables should be harmless */
- if (dev_priv->display.fbc_enabled(crtc))
- dev_priv->display.disable_fbc(dev);
+ if (intel_fbc_enabled(dev))
+ intel_disable_fbc(dev);
}
static int
@@ -1510,6 +1515,219 @@ static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
udelay(500);
}
+/* The FDI link training functions for ILK/Ibexpeak. */
+static void ironlake_fdi_link_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+ int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+ int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
+ int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
+ u32 temp, tries = 0;
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ temp = I915_READ(fdi_tx_reg);
+ temp |= FDI_TX_ENABLE;
+ temp &= ~(7 << 19);
+ temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(fdi_tx_reg, temp);
+ I915_READ(fdi_tx_reg);
+
+ temp = I915_READ(fdi_rx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
+ I915_READ(fdi_rx_reg);
+ udelay(150);
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ temp = I915_READ(fdi_rx_imr_reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ I915_WRITE(fdi_rx_imr_reg, temp);
+ I915_READ(fdi_rx_imr_reg);
+ udelay(150);
+
+ for (;;) {
+ temp = I915_READ(fdi_rx_iir_reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if ((temp & FDI_RX_BIT_LOCK)) {
+ DRM_DEBUG_KMS("FDI train 1 done.\n");
+ I915_WRITE(fdi_rx_iir_reg,
+ temp | FDI_RX_BIT_LOCK);
+ break;
+ }
+
+ tries++;
+
+ if (tries > 5) {
+ DRM_DEBUG_KMS("FDI train 1 fail!\n");
+ break;
+ }
+ }
+
+ /* Train 2 */
+ temp = I915_READ(fdi_tx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ I915_WRITE(fdi_tx_reg, temp);
+
+ temp = I915_READ(fdi_rx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ I915_WRITE(fdi_rx_reg, temp);
+ udelay(150);
+
+ tries = 0;
+
+ for (;;) {
+ temp = I915_READ(fdi_rx_iir_reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ I915_WRITE(fdi_rx_iir_reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done.\n");
+ break;
+ }
+
+ tries++;
+
+ if (tries > 5) {
+ DRM_DEBUG_KMS("FDI train 2 fail!\n");
+ break;
+ }
+ }
+
+ DRM_DEBUG_KMS("FDI train done\n");
+}
+
+static int snb_b_fdi_train_param [] = {
+ FDI_LINK_TRAIN_400MV_0DB_SNB_B,
+ FDI_LINK_TRAIN_400MV_6DB_SNB_B,
+ FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
+ FDI_LINK_TRAIN_800MV_0DB_SNB_B,
+};
+
+/* The FDI link training functions for SNB/Cougarpoint. */
+static void gen6_fdi_link_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+ int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+ int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
+ int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
+ u32 temp, i;
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ temp = I915_READ(fdi_tx_reg);
+ temp |= FDI_TX_ENABLE;
+ temp &= ~(7 << 19);
+ temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ I915_WRITE(fdi_tx_reg, temp);
+ I915_READ(fdi_tx_reg);
+
+ temp = I915_READ(fdi_rx_reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
+ I915_READ(fdi_rx_reg);
+ udelay(150);
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ temp = I915_READ(fdi_rx_imr_reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ I915_WRITE(fdi_rx_imr_reg, temp);
+ I915_READ(fdi_rx_imr_reg);
+ udelay(150);
+
+ for (i = 0; i < 4; i++ ) {
+ temp = I915_READ(fdi_tx_reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ I915_WRITE(fdi_tx_reg, temp);
+ udelay(500);
+
+ temp = I915_READ(fdi_rx_iir_reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_BIT_LOCK) {
+ I915_WRITE(fdi_rx_iir_reg,
+ temp | FDI_RX_BIT_LOCK);
+ DRM_DEBUG_KMS("FDI train 1 done.\n");
+ break;
+ }
+ }
+ if (i == 4)
+ DRM_DEBUG_KMS("FDI train 1 fail!\n");
+
+ /* Train 2 */
+ temp = I915_READ(fdi_tx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ if (IS_GEN6(dev)) {
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ }
+ I915_WRITE(fdi_tx_reg, temp);
+
+ temp = I915_READ(fdi_rx_reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ }
+ I915_WRITE(fdi_rx_reg, temp);
+ udelay(150);
+
+ for (i = 0; i < 4; i++ ) {
+ temp = I915_READ(fdi_tx_reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ I915_WRITE(fdi_tx_reg, temp);
+ udelay(500);
+
+ temp = I915_READ(fdi_rx_iir_reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ I915_WRITE(fdi_rx_iir_reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done.\n");
+ break;
+ }
+ }
+ if (i == 4)
+ DRM_DEBUG_KMS("FDI train 2 fail!\n");
+
+ DRM_DEBUG_KMS("FDI train done.\n");
+}
+
static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
@@ -1523,8 +1741,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
- int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
- int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ;
@@ -1541,8 +1757,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B;
int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B;
int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
+ int trans_dpll_sel = (pipe == 0) ? 0 : 1;
u32 temp;
- int tries = 5, j, n;
+ int n;
u32 pipe_bpc;
temp = I915_READ(pipeconf_reg);
@@ -1569,12 +1786,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
/* enable eDP PLL */
ironlake_enable_pll_edp(crtc);
} else {
- /* enable PCH DPLL */
- temp = I915_READ(pch_dpll_reg);
- if ((temp & DPLL_VCO_ENABLE) == 0) {
- I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
- I915_READ(pch_dpll_reg);
- }
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
temp = I915_READ(fdi_rx_reg);
@@ -1584,9 +1795,15 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
*/
temp &= ~(0x7 << 16);
temp |= (pipe_bpc << 11);
- I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
- FDI_SEL_PCDCLK |
- FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
+ temp &= ~(7 << 19);
+ temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
+ I915_READ(fdi_rx_reg);
+ udelay(200);
+
+ /* Switch from Rawclk to PCDclk */
+ temp = I915_READ(fdi_rx_reg);
+ I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
I915_READ(fdi_rx_reg);
udelay(200);
@@ -1629,91 +1846,32 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
}
if (!HAS_eDP) {
- /* enable CPU FDI TX and PCH FDI RX */
- temp = I915_READ(fdi_tx_reg);
- temp |= FDI_TX_ENABLE;
- temp |= FDI_DP_PORT_WIDTH_X4; /* default */
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(fdi_tx_reg, temp);
- I915_READ(fdi_tx_reg);
-
- temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
- I915_READ(fdi_rx_reg);
-
- udelay(150);
-
- /* Train FDI. */
- /* umask FDI RX Interrupt symbol_lock and bit_lock bit
- for train result */
- temp = I915_READ(fdi_rx_imr_reg);
- temp &= ~FDI_RX_SYMBOL_LOCK;
- temp &= ~FDI_RX_BIT_LOCK;
- I915_WRITE(fdi_rx_imr_reg, temp);
- I915_READ(fdi_rx_imr_reg);
- udelay(150);
+ /* For PCH output, training FDI link */
+ if (IS_GEN6(dev))
+ gen6_fdi_link_train(crtc);
+ else
+ ironlake_fdi_link_train(crtc);
- temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
- if ((temp & FDI_RX_BIT_LOCK) == 0) {
- for (j = 0; j < tries; j++) {
- temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
- temp);
- if (temp & FDI_RX_BIT_LOCK)
- break;
- udelay(200);
- }
- if (j != tries)
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_BIT_LOCK);
- else
- DRM_DEBUG_KMS("train 1 fail\n");
- } else {
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_BIT_LOCK);
- DRM_DEBUG_KMS("train 1 ok 2!\n");
+ /* enable PCH DPLL */
+ temp = I915_READ(pch_dpll_reg);
+ if ((temp & DPLL_VCO_ENABLE) == 0) {
+ I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
+ I915_READ(pch_dpll_reg);
}
- temp = I915_READ(fdi_tx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- I915_WRITE(fdi_tx_reg, temp);
-
- temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- I915_WRITE(fdi_rx_reg, temp);
-
- udelay(150);
+ udelay(200);
- temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
- if ((temp & FDI_RX_SYMBOL_LOCK) == 0) {
- for (j = 0; j < tries; j++) {
- temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
- temp);
- if (temp & FDI_RX_SYMBOL_LOCK)
- break;
- udelay(200);
- }
- if (j != tries) {
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("train 2 ok 1!\n");
- } else
- DRM_DEBUG_KMS("train 2 fail\n");
- } else {
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("train 2 ok 2!\n");
+ if (HAS_PCH_CPT(dev)) {
+ /* Be sure PCH DPLL SEL is set */
+ temp = I915_READ(PCH_DPLL_SEL);
+ if (trans_dpll_sel == 0 &&
+ (temp & TRANSA_DPLL_ENABLE) == 0)
+ temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+ else if (trans_dpll_sel == 1 &&
+ (temp & TRANSB_DPLL_ENABLE) == 0)
+ temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ I915_WRITE(PCH_DPLL_SEL, temp);
+ I915_READ(PCH_DPLL_SEL);
}
- DRM_DEBUG_KMS("train done\n");
/* set transcoder timing */
I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
@@ -1724,6 +1882,60 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg));
I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg));
+ /* enable normal train */
+ temp = I915_READ(fdi_tx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
+ FDI_TX_ENHANCE_FRAME_ENABLE);
+ I915_READ(fdi_tx_reg);
+
+ temp = I915_READ(fdi_rx_reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE;
+ }
+ I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+ I915_READ(fdi_rx_reg);
+
+ /* wait one idle pattern time */
+ udelay(100);
+
+ /* For PCH DP, enable TRANS_DP_CTL */
+ if (HAS_PCH_CPT(dev) &&
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
+ int reg;
+
+ reg = I915_READ(trans_dp_ctl);
+ reg &= ~TRANS_DP_PORT_SEL_MASK;
+ reg = TRANS_DP_OUTPUT_ENABLE |
+ TRANS_DP_ENH_FRAMING |
+ TRANS_DP_VSYNC_ACTIVE_HIGH |
+ TRANS_DP_HSYNC_ACTIVE_HIGH;
+
+ switch (intel_trans_dp_port_sel(crtc)) {
+ case PCH_DP_B:
+ reg |= TRANS_DP_PORT_SEL_B;
+ break;
+ case PCH_DP_C:
+ reg |= TRANS_DP_PORT_SEL_C;
+ break;
+ case PCH_DP_D:
+ reg |= TRANS_DP_PORT_SEL_D;
+ break;
+ default:
+ DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
+ reg |= TRANS_DP_PORT_SEL_B;
+ break;
+ }
+
+ I915_WRITE(trans_dp_ctl, reg);
+ POSTING_READ(trans_dp_ctl);
+ }
+
/* enable PCH transcoder */
temp = I915_READ(transconf_reg);
/*
@@ -1738,23 +1950,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0)
;
- /* enable normal */
-
- temp = I915_READ(fdi_tx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
- FDI_TX_ENHANCE_FRAME_ENABLE);
- I915_READ(fdi_tx_reg);
-
- temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE |
- FDI_RX_ENHANCE_FRAME_ENABLE);
- I915_READ(fdi_rx_reg);
-
- /* wait one idle pattern time */
- udelay(100);
-
}
intel_crtc_load_lut(crtc);
@@ -1805,6 +2000,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(pf_ctl_reg);
}
I915_WRITE(pf_win_size, 0);
+ POSTING_READ(pf_win_size);
+
/* disable CPU FDI tx and PCH FDI rx */
temp = I915_READ(fdi_tx_reg);
@@ -1825,11 +2022,18 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
I915_WRITE(fdi_tx_reg, temp);
+ POSTING_READ(fdi_tx_reg);
temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
I915_WRITE(fdi_rx_reg, temp);
+ POSTING_READ(fdi_rx_reg);
udelay(100);
@@ -1859,6 +2063,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
}
+
temp = I915_READ(transconf_reg);
/* BPC in transcoder is consistent with that in pipeconf */
temp &= ~PIPE_BPC_MASK;
@@ -1867,35 +2072,53 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(transconf_reg);
udelay(100);
+ if (HAS_PCH_CPT(dev)) {
+ /* disable TRANS_DP_CTL */
+ int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
+ int reg;
+
+ reg = I915_READ(trans_dp_ctl);
+ reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
+ I915_WRITE(trans_dp_ctl, reg);
+ POSTING_READ(trans_dp_ctl);
+
+ /* disable DPLL_SEL */
+ temp = I915_READ(PCH_DPLL_SEL);
+ if (trans_dpll_sel == 0)
+ temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
+ else
+ temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ I915_WRITE(PCH_DPLL_SEL, temp);
+ I915_READ(PCH_DPLL_SEL);
+
+ }
+
/* disable PCH DPLL */
temp = I915_READ(pch_dpll_reg);
- if ((temp & DPLL_VCO_ENABLE) != 0) {
- I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
- I915_READ(pch_dpll_reg);
- }
+ I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
+ I915_READ(pch_dpll_reg);
if (HAS_eDP) {
ironlake_disable_pll_edp(crtc);
}
+ /* Switch from PCDclk to Rawclk */
temp = I915_READ(fdi_rx_reg);
temp &= ~FDI_SEL_PCDCLK;
I915_WRITE(fdi_rx_reg, temp);
I915_READ(fdi_rx_reg);
+ /* Disable CPU FDI TX PLL */
+ temp = I915_READ(fdi_tx_reg);
+ I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
+ I915_READ(fdi_tx_reg);
+ udelay(100);
+
temp = I915_READ(fdi_rx_reg);
temp &= ~FDI_RX_PLL_ENABLE;
I915_WRITE(fdi_rx_reg, temp);
I915_READ(fdi_rx_reg);
- /* Disable CPU FDI TX PLL */
- temp = I915_READ(fdi_tx_reg);
- if ((temp & FDI_TX_PLL_ENABLE) != 0) {
- I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
- I915_READ(fdi_tx_reg);
- udelay(100);
- }
-
/* Wait for the clocks to turn off. */
udelay(100);
break;
@@ -2331,6 +2554,30 @@ static struct intel_watermark_params i830_wm_info = {
I830_FIFO_LINE_SIZE
};
+static struct intel_watermark_params ironlake_display_wm_info = {
+ ILK_DISPLAY_FIFO,
+ ILK_DISPLAY_MAXWM,
+ ILK_DISPLAY_DFTWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params ironlake_display_srwm_info = {
+ ILK_DISPLAY_SR_FIFO,
+ ILK_DISPLAY_MAX_SRWM,
+ ILK_DISPLAY_DFT_SRWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params ironlake_cursor_srwm_info = {
+ ILK_CURSOR_SR_FIFO,
+ ILK_CURSOR_MAX_SRWM,
+ ILK_CURSOR_DFT_SRWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+
/**
* intel_calculate_wm - calculate watermark level
* @clock_in_khz: pixel clock
@@ -2449,66 +2696,6 @@ static void pineview_disable_cxsr(struct drm_device *dev)
DRM_INFO("Big FIFO is disabled\n");
}
-static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
- int pixel_size)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg;
- unsigned long wm;
- struct cxsr_latency *latency;
-
- latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
- dev_priv->mem_freq);
- if (!latency) {
- DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
- pineview_disable_cxsr(dev);
- return;
- }
-
- /* Display SR */
- wm = intel_calculate_wm(clock, &pineview_display_wm, pixel_size,
- latency->display_sr);
- reg = I915_READ(DSPFW1);
- reg &= 0x7fffff;
- reg |= wm << 23;
- I915_WRITE(DSPFW1, reg);
- DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
-
- /* cursor SR */
- wm = intel_calculate_wm(clock, &pineview_cursor_wm, pixel_size,
- latency->cursor_sr);
- reg = I915_READ(DSPFW3);
- reg &= ~(0x3f << 24);
- reg |= (wm & 0x3f) << 24;
- I915_WRITE(DSPFW3, reg);
-
- /* Display HPLL off SR */
- wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
- latency->display_hpll_disable, I915_FIFO_LINE_SIZE);
- reg = I915_READ(DSPFW3);
- reg &= 0xfffffe00;
- reg |= wm & 0x1ff;
- I915_WRITE(DSPFW3, reg);
-
- /* cursor HPLL off SR */
- wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pixel_size,
- latency->cursor_hpll_disable);
- reg = I915_READ(DSPFW3);
- reg &= ~(0x3f << 16);
- reg |= (wm & 0x3f) << 16;
- I915_WRITE(DSPFW3, reg);
- DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
-
- /* activate cxsr */
- reg = I915_READ(DSPFW3);
- reg |= PINEVIEW_SELF_REFRESH_EN;
- I915_WRITE(DSPFW3, reg);
-
- DRM_INFO("Big FIFO is enabled\n");
-
- return;
-}
-
/*
* Latency for FIFO fetches is dependent on several factors:
* - memory configuration (speed, channels)
@@ -2593,6 +2780,71 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
return size;
}
+static void pineview_update_wm(struct drm_device *dev, int planea_clock,
+ int planeb_clock, int sr_hdisplay, int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg;
+ unsigned long wm;
+ struct cxsr_latency *latency;
+ int sr_clock;
+
+ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
+ dev_priv->mem_freq);
+ if (!latency) {
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+ pineview_disable_cxsr(dev);
+ return;
+ }
+
+ if (!planea_clock || !planeb_clock) {
+ sr_clock = planea_clock ? planea_clock : planeb_clock;
+
+ /* Display SR */
+ wm = intel_calculate_wm(sr_clock, &pineview_display_wm,
+ pixel_size, latency->display_sr);
+ reg = I915_READ(DSPFW1);
+ reg &= ~DSPFW_SR_MASK;
+ reg |= wm << DSPFW_SR_SHIFT;
+ I915_WRITE(DSPFW1, reg);
+ DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
+
+ /* cursor SR */
+ wm = intel_calculate_wm(sr_clock, &pineview_cursor_wm,
+ pixel_size, latency->cursor_sr);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_CURSOR_SR_MASK;
+ reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
+ I915_WRITE(DSPFW3, reg);
+
+ /* Display HPLL off SR */
+ wm = intel_calculate_wm(sr_clock, &pineview_display_hplloff_wm,
+ pixel_size, latency->display_hpll_disable);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_HPLL_SR_MASK;
+ reg |= wm & DSPFW_HPLL_SR_MASK;
+ I915_WRITE(DSPFW3, reg);
+
+ /* cursor HPLL off SR */
+ wm = intel_calculate_wm(sr_clock, &pineview_cursor_hplloff_wm,
+ pixel_size, latency->cursor_hpll_disable);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_HPLL_CURSOR_MASK;
+ reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
+ I915_WRITE(DSPFW3, reg);
+ DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
+
+ /* activate cxsr */
+ reg = I915_READ(DSPFW3);
+ reg |= PINEVIEW_SELF_REFRESH_EN;
+ I915_WRITE(DSPFW3, reg);
+ DRM_DEBUG_KMS("Self-refresh is enabled\n");
+ } else {
+ pineview_disable_cxsr(dev);
+ DRM_DEBUG_KMS("Self-refresh is disabled\n");
+ }
+}
+
static void g4x_update_wm(struct drm_device *dev, int planea_clock,
int planeb_clock, int sr_hdisplay, int pixel_size)
{
@@ -2813,6 +3065,108 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
I915_WRITE(FW_BLC, fwater_lo);
}
+#define ILK_LP0_PLANE_LATENCY 700
+
+static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
+ int planeb_clock, int sr_hdisplay, int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+ int sr_wm, cursor_wm;
+ unsigned long line_time_us;
+ int sr_clock, entries_required;
+ u32 reg_value;
+
+ /* Calculate and update the watermark for plane A */
+ if (planea_clock) {
+ entries_required = ((planea_clock / 1000) * pixel_size *
+ ILK_LP0_PLANE_LATENCY) / 1000;
+ entries_required = DIV_ROUND_UP(entries_required,
+ ironlake_display_wm_info.cacheline_size);
+ planea_wm = entries_required +
+ ironlake_display_wm_info.guard_size;
+
+ if (planea_wm > (int)ironlake_display_wm_info.max_wm)
+ planea_wm = ironlake_display_wm_info.max_wm;
+
+ cursora_wm = 16;
+ reg_value = I915_READ(WM0_PIPEA_ILK);
+ reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) |
+ (cursora_wm & WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEA_ILK, reg_value);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, "
+ "cursor: %d\n", planea_wm, cursora_wm);
+ }
+ /* Calculate and update the watermark for plane B */
+ if (planeb_clock) {
+ entries_required = ((planeb_clock / 1000) * pixel_size *
+ ILK_LP0_PLANE_LATENCY) / 1000;
+ entries_required = DIV_ROUND_UP(entries_required,
+ ironlake_display_wm_info.cacheline_size);
+ planeb_wm = entries_required +
+ ironlake_display_wm_info.guard_size;
+
+ if (planeb_wm > (int)ironlake_display_wm_info.max_wm)
+ planeb_wm = ironlake_display_wm_info.max_wm;
+
+ cursorb_wm = 16;
+ reg_value = I915_READ(WM0_PIPEB_ILK);
+ reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) |
+ (cursorb_wm & WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEB_ILK, reg_value);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, "
+ "cursor: %d\n", planeb_wm, cursorb_wm);
+ }
+
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ */
+ if (!planea_clock || !planeb_clock) {
+ int line_count;
+ /* Read the self-refresh latency. The unit is 0.5us */
+ int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
+
+ sr_clock = planea_clock ? planea_clock : planeb_clock;
+ line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+
+ /* Use ns/us then divide to preserve precision */
+ line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
+ / 1000;
+
+ /* calculate the self-refresh watermark for display plane */
+ entries_required = line_count * sr_hdisplay * pixel_size;
+ entries_required = DIV_ROUND_UP(entries_required,
+ ironlake_display_srwm_info.cacheline_size);
+ sr_wm = entries_required +
+ ironlake_display_srwm_info.guard_size;
+
+ /* calculate the self-refresh watermark for display cursor */
+ entries_required = line_count * pixel_size * 64;
+ entries_required = DIV_ROUND_UP(entries_required,
+ ironlake_cursor_srwm_info.cacheline_size);
+ cursor_wm = entries_required +
+ ironlake_cursor_srwm_info.guard_size;
+
+ /* configure watermark and enable self-refresh */
+ reg_value = I915_READ(WM1_LP_ILK);
+ reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
+ WM1_LP_CURSOR_MASK);
+ reg_value |= WM1_LP_SR_EN |
+ (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
+ (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
+
+ I915_WRITE(WM1_LP_ILK, reg_value);
+ DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
+ "cursor %d\n", sr_wm, cursor_wm);
+
+ } else {
+ /* Turn off self refresh if both pipes are enabled */
+ I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
+ }
+}
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
*
@@ -2882,12 +3236,6 @@ static void intel_update_watermarks(struct drm_device *dev)
if (enabled <= 0)
return;
- /* Single plane configs can enable self refresh */
- if (enabled == 1 && IS_PINEVIEW(dev))
- pineview_enable_cxsr(dev, sr_clock, pixel_size);
- else if (IS_PINEVIEW(dev))
- pineview_disable_cxsr(dev);
-
dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
sr_hdisplay, pixel_size);
}
@@ -2924,7 +3272,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
bool is_edp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct intel_encoder *intel_encoder = NULL;
const intel_limit_t *limit;
int ret;
struct fdi_m_n m_n = {0};
@@ -2935,6 +3284,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0;
int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+ int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+ int trans_dpll_sel = (pipe == 0) ? 0 : 1;
int lvds_reg = LVDS;
u32 temp;
int sdvo_pixel_multiply;
@@ -2942,12 +3293,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
drm_vblank_pre_modeset(dev, pipe);
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- if (!connector->encoder || connector->encoder->crtc != crtc)
+ if (!encoder || encoder->crtc != crtc)
continue;
+ intel_encoder = enc_to_intel_encoder(encoder);
+
switch (intel_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -3043,14 +3395,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* FDI link */
if (HAS_PCH_SPLIT(dev)) {
- int lane, link_bw, bpp;
+ int lane = 0, link_bw, bpp;
/* eDP doesn't require FDI link, so just set DP M/N
according to current link config */
if (is_edp) {
- struct drm_connector *edp;
target_clock = mode->clock;
- edp = intel_pipe_get_connector(crtc);
- intel_edp_link_config(to_intel_encoder(edp),
+ intel_edp_link_config(intel_encoder,
&lane, &link_bw);
} else {
/* DP over FDI requires target mode clock
@@ -3059,7 +3409,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
target_clock = mode->clock;
else
target_clock = adjusted_mode->clock;
- lane = 4;
link_bw = 270000;
}
@@ -3111,6 +3460,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
bpp = 24;
}
+ if (!lane) {
+ /*
+ * Account for spread spectrum to avoid
+ * oversubscribing the link. Max center spread
+ * is 2.5%; use 5% for safety's sake.
+ */
+ u32 bps = target_clock * bpp * 21 / 20;
+ lane = bps / (link_bw * 8) + 1;
+ }
+
+ intel_crtc->fdi_lanes = lane;
+
ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
}
@@ -3265,11 +3626,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
}
- dspcntr |= DISPLAY_PLANE_ENABLE;
- pipeconf |= PIPEACONF_ENABLE;
- dpll |= DPLL_VCO_ENABLE;
-
-
/* Disable the panel fitter if it was on our pipe */
if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
I915_WRITE(PFIT_CONTROL, 0);
@@ -3292,6 +3648,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
udelay(150);
}
+ /* enable transcoder DPLL */
+ if (HAS_PCH_CPT(dev)) {
+ temp = I915_READ(PCH_DPLL_SEL);
+ if (trans_dpll_sel == 0)
+ temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+ else
+ temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ I915_WRITE(PCH_DPLL_SEL, temp);
+ I915_READ(PCH_DPLL_SEL);
+ udelay(150);
+ }
+
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
@@ -3303,7 +3671,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
lvds_reg = PCH_LVDS;
lvds = I915_READ(lvds_reg);
- lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT;
+ lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+ if (pipe == 1) {
+ if (HAS_PCH_CPT(dev))
+ lvds |= PORT_TRANS_B_SEL_CPT;
+ else
+ lvds |= LVDS_PIPEB_SELECT;
+ } else {
+ if (HAS_PCH_CPT(dev))
+ lvds &= ~PORT_TRANS_SEL_MASK;
+ else
+ lvds &= ~LVDS_PIPEB_SELECT;
+ }
/* set the corresponsding LVDS_BORDER bit */
lvds |= dev_priv->lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
@@ -3321,14 +3700,16 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* set the dithering flag */
if (IS_I965G(dev)) {
if (dev_priv->lvds_dither) {
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev)) {
pipeconf |= PIPE_ENABLE_DITHER;
- else
+ pipeconf |= PIPE_DITHER_TYPE_ST01;
+ } else
lvds |= LVDS_ENABLE_DITHER;
} else {
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev)) {
pipeconf &= ~PIPE_ENABLE_DITHER;
- else
+ pipeconf &= ~PIPE_DITHER_TYPE_MASK;
+ } else
lvds &= ~LVDS_ENABLE_DITHER;
}
}
@@ -3337,6 +3718,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
if (is_dp)
intel_dp_set_m_n(crtc, mode, adjusted_mode);
+ else if (HAS_PCH_SPLIT(dev)) {
+ /* For non-DP output, clear any trans DP clock recovery setting.*/
+ if (pipe == 0) {
+ I915_WRITE(TRANSA_DATA_M1, 0);
+ I915_WRITE(TRANSA_DATA_N1, 0);
+ I915_WRITE(TRANSA_DP_LINK_M1, 0);
+ I915_WRITE(TRANSA_DP_LINK_N1, 0);
+ } else {
+ I915_WRITE(TRANSB_DATA_M1, 0);
+ I915_WRITE(TRANSB_DATA_N1, 0);
+ I915_WRITE(TRANSB_DP_LINK_M1, 0);
+ I915_WRITE(TRANSB_DP_LINK_N1, 0);
+ }
+ }
if (!is_edp) {
I915_WRITE(fp_reg, fp);
@@ -3411,6 +3806,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* enable FDI RX PLL too */
temp = I915_READ(fdi_rx_reg);
I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
+ I915_READ(fdi_rx_reg);
+ udelay(200);
+
+ /* enable FDI TX PLL too */
+ temp = I915_READ(fdi_tx_reg);
+ I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
+ I915_READ(fdi_tx_reg);
+
+ /* enable FDI RX PCDCLK */
+ temp = I915_READ(fdi_rx_reg);
+ I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
+ I915_READ(fdi_rx_reg);
udelay(200);
}
}
@@ -3671,6 +4078,7 @@ static struct drm_display_mode load_detect_mode = {
};
struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector,
struct drm_display_mode *mode,
int *dpms_mode)
{
@@ -3729,7 +4137,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
}
encoder->crtc = crtc;
- intel_encoder->base.encoder = encoder;
+ connector->encoder = encoder;
intel_encoder->load_detect_temp = true;
intel_crtc = to_intel_crtc(crtc);
@@ -3755,7 +4163,8 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
return crtc;
}
-void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpms_mode)
+void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector, int dpms_mode)
{
struct drm_encoder *encoder = &intel_encoder->enc;
struct drm_device *dev = encoder->dev;
@@ -3765,7 +4174,7 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpm
if (intel_encoder->load_detect_temp) {
encoder->crtc = NULL;
- intel_encoder->base.encoder = NULL;
+ connector->encoder = NULL;
intel_encoder->load_detect_temp = false;
crtc->enabled = drm_helper_crtc_in_use(crtc);
drm_helper_disable_unused_functions(dev);
@@ -4392,14 +4801,14 @@ struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
return crtc;
}
-static int intel_connector_clones(struct drm_device *dev, int type_mask)
+static int intel_encoder_clones(struct drm_device *dev, int type_mask)
{
int index_mask = 0;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
int entry = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
if (type_mask & intel_encoder->clone_mask)
index_mask |= (1 << entry);
entry++;
@@ -4411,7 +4820,7 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask)
static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
intel_crt_init(dev);
@@ -4426,9 +4835,8 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_dp_init(dev, DP_A);
if (I915_READ(HDMIB) & PORT_DETECTED) {
- /* check SDVOB */
- /* found = intel_sdvo_init(dev, HDMIB); */
- found = 0;
+ /* PCH SDVOB multiplex with HDMIB */
+ found = intel_sdvo_init(dev, PCH_SDVOB);
if (!found)
intel_hdmi_init(dev, HDMIB);
if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
@@ -4494,12 +4902,11 @@ static void intel_setup_outputs(struct drm_device *dev)
if (SUPPORTS_TV(dev))
intel_tv_init(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct drm_encoder *encoder = &intel_encoder->enc;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
encoder->possible_crtcs = intel_encoder->crtc_mask;
- encoder->possible_clones = intel_connector_clones(dev,
+ encoder->possible_clones = intel_encoder_clones(dev,
intel_encoder->clone_mask);
}
}
@@ -4507,10 +4914,6 @@ static void intel_setup_outputs(struct drm_device *dev)
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_device *dev = fb->dev;
-
- if (fb->fbdev)
- intelfb_remove(dev, fb);
drm_framebuffer_cleanup(fb);
drm_gem_object_unreference_unlocked(intel_fb->obj);
@@ -4533,18 +4936,13 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
.create_handle = intel_user_framebuffer_create_handle,
};
-int intel_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd,
- struct drm_framebuffer **fb,
- struct drm_gem_object *obj)
+int intel_framebuffer_init(struct drm_device *dev,
+ struct intel_framebuffer *intel_fb,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_gem_object *obj)
{
- struct intel_framebuffer *intel_fb;
int ret;
- intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
- if (!intel_fb)
- return -ENOMEM;
-
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
@@ -4552,40 +4950,41 @@ int intel_framebuffer_create(struct drm_device *dev,
}
drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
-
intel_fb->obj = obj;
-
- *fb = &intel_fb->base;
-
return 0;
}
-
static struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
struct drm_mode_fb_cmd *mode_cmd)
{
struct drm_gem_object *obj;
- struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
int ret;
obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
if (!obj)
return NULL;
- ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj);
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb)
+ return NULL;
+
+ ret = intel_framebuffer_init(dev, intel_fb,
+ mode_cmd, obj);
if (ret) {
drm_gem_object_unreference_unlocked(obj);
+ kfree(intel_fb);
return NULL;
}
- return fb;
+ return &intel_fb->base;
}
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = intel_user_framebuffer_create,
- .fb_changed = intelfb_probe,
+ .output_poll_changed = intel_fb_output_poll_changed,
};
static struct drm_gem_object *
@@ -4594,7 +4993,7 @@ intel_alloc_power_context(struct drm_device *dev)
struct drm_gem_object *pwrctx;
int ret;
- pwrctx = drm_gem_object_alloc(dev, 4096);
+ pwrctx = i915_gem_alloc_object(dev, 4096);
if (!pwrctx) {
DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
return NULL;
@@ -4732,6 +5131,25 @@ void intel_init_clock_gating(struct drm_device *dev)
}
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+ /*
+ * According to the spec the following bits should be set in
+ * order to enable memory self-refresh
+ * The bit 22/21 of 0x42004
+ * The bit 5 of 0x42020
+ * The bit 15 of 0x45000
+ */
+ if (IS_IRONLAKE(dev)) {
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ (I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE | ILK_VSDPFD_FULL));
+ I915_WRITE(ILK_DSPCLK_GATE,
+ (I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPARB_CLK_GATE));
+ I915_WRITE(DISP_ARB_CTL,
+ (I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_WM_DIS));
+ }
return;
} else if (IS_G4X(dev)) {
uint32_t dspclk_gate;
@@ -4809,8 +5227,7 @@ static void intel_init_display(struct drm_device *dev)
else
dev_priv->display.dpms = i9xx_crtc_dpms;
- /* Only mobile has FBC, leave pointers NULL for other chips */
- if (IS_MOBILE(dev)) {
+ if (I915_HAS_FBC(dev)) {
if (IS_GM45(dev)) {
dev_priv->display.fbc_enabled = g4x_fbc_enabled;
dev_priv->display.enable_fbc = g4x_enable_fbc;
@@ -4847,9 +5264,31 @@ static void intel_init_display(struct drm_device *dev)
i830_get_display_clock_speed;
/* For FIFO watermark updates */
- if (HAS_PCH_SPLIT(dev))
- dev_priv->display.update_wm = NULL;
- else if (IS_G4X(dev))
+ if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
+ if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
+ dev_priv->display.update_wm = ironlake_update_wm;
+ else {
+ DRM_DEBUG_KMS("Failed to get proper latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ } else
+ dev_priv->display.update_wm = NULL;
+ } else if (IS_PINEVIEW(dev)) {
+ if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+ dev_priv->fsb_freq,
+ dev_priv->mem_freq)) {
+ DRM_INFO("failed to find known CxSR latency "
+ "(found fsb freq %d, mem freq %d), "
+ "disabling CxSR\n",
+ dev_priv->fsb_freq, dev_priv->mem_freq);
+ /* Disable CxSR and never update its watermark again */
+ pineview_disable_cxsr(dev);
+ dev_priv->display.update_wm = NULL;
+ } else
+ dev_priv->display.update_wm = pineview_update_wm;
+ } else if (IS_G4X(dev))
dev_priv->display.update_wm = g4x_update_wm;
else if (IS_I965G(dev))
dev_priv->display.update_wm = i965_update_wm;
@@ -4923,13 +5362,6 @@ void intel_modeset_init(struct drm_device *dev)
(unsigned long)dev);
intel_setup_overlay(dev);
-
- if (IS_PINEVIEW(dev) && !intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
- dev_priv->fsb_freq,
- dev_priv->mem_freq))
- DRM_INFO("failed to find known CxSR latency "
- "(found fsb freq %d, mem freq %d), disabling CxSR\n",
- dev_priv->fsb_freq, dev_priv->mem_freq);
}
void intel_modeset_cleanup(struct drm_device *dev)
@@ -4940,6 +5372,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
+ drm_kms_helper_poll_fini(dev);
+ intel_fbdev_fini(dev);
+
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
/* Skip inactive CRTCs */
if (!crtc->fb)
@@ -4974,14 +5409,29 @@ void intel_modeset_cleanup(struct drm_device *dev)
}
-/* current intel driver doesn't take advantage of encoders
- always give back the encoder for the connector
-*/
-struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
+/*
+ * Return which encoder is currently attached for connector.
+ */
+struct drm_encoder *intel_attached_encoder (struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+ int i;
- return &intel_encoder->enc;
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == 0)
+ break;
+
+ obj = drm_mode_object_find(connector->dev,
+ connector->encoder_ids[i],
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ continue;
+
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ return NULL;
}
/*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 77e40cfcf21..6b1c9a27c27 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -48,8 +48,6 @@ struct intel_dp_priv {
uint32_t output_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
- uint32_t save_DP;
- uint8_t save_link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
int dpms_mode;
uint8_t link_bw;
@@ -141,7 +139,8 @@ static int
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder));
int max_lanes = intel_dp_max_lane_count(intel_encoder);
@@ -215,7 +214,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
{
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint32_t output_reg = dp_priv->output_reg;
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = output_reg + 0x10;
uint32_t ch_data = ch_ctl + 4;
@@ -224,19 +223,27 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
uint32_t ctl;
uint32_t status;
uint32_t aux_clock_divider;
- int try;
+ int try, precharge;
/* The clock divider is based off the hrawclk,
* and would like to run at 2MHz. So, take the
* hrawclk value and divide by 2 and use that
*/
- if (IS_eDP(intel_encoder))
- aux_clock_divider = 225; /* eDP input clock at 450Mhz */
- else if (HAS_PCH_SPLIT(dev))
+ if (IS_eDP(intel_encoder)) {
+ if (IS_GEN6(dev))
+ aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
+ else
+ aux_clock_divider = 225; /* eDP input clock at 450Mhz */
+ } else if (HAS_PCH_SPLIT(dev))
aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
else
aux_clock_divider = intel_hrawclk(dev) / 2;
+ if (IS_GEN6(dev))
+ precharge = 3;
+ else
+ precharge = 5;
+
/* Must try at least 3 times according to DP spec */
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
@@ -249,7 +256,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
ctl = (DP_AUX_CH_CTL_SEND_BUSY |
DP_AUX_CH_CTL_TIME_OUT_400us |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- (5 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
DP_AUX_CH_CTL_DONE |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
@@ -465,7 +472,8 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
}
static int
-intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name)
+intel_dp_i2c_init(struct intel_encoder *intel_encoder,
+ struct intel_connector *intel_connector, const char *name)
{
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
@@ -480,7 +488,7 @@ intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name)
strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1);
dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0';
dp_priv->adapter.algo_data = &dp_priv->algo;
- dp_priv->adapter.dev.parent = &intel_encoder->base.kdev;
+ dp_priv->adapter.dev.parent = &intel_connector->base.kdev;
return i2c_dp_aux_add_bus(&dp_priv->adapter);
}
@@ -555,7 +563,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int lane_count = 4;
@@ -564,13 +572,16 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
/*
* Find the lane count in the intel_encoder private
*/
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+ struct intel_encoder *intel_encoder;
+ struct intel_dp_priv *dp_priv;
- if (!connector->encoder || connector->encoder->crtc != crtc)
+ if (!encoder || encoder->crtc != crtc)
continue;
+ intel_encoder = enc_to_intel_encoder(encoder);
+ dp_priv = intel_encoder->dev_priv;
+
if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
lane_count = dp_priv->lane_count;
break;
@@ -626,16 +637,24 @@ static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct drm_device *dev = encoder->dev;
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
struct drm_crtc *crtc = intel_encoder->enc.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- dp_priv->DP = (DP_LINK_TRAIN_OFF |
- DP_VOLTAGE_0_4 |
- DP_PRE_EMPHASIS_0 |
- DP_SYNC_VS_HIGH |
- DP_SYNC_HS_HIGH);
+ dp_priv->DP = (DP_VOLTAGE_0_4 |
+ DP_PRE_EMPHASIS_0);
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ dp_priv->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ dp_priv->DP |= DP_SYNC_VS_HIGH;
+
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ dp_priv->DP |= DP_LINK_TRAIN_OFF_CPT;
+ else
+ dp_priv->DP |= DP_LINK_TRAIN_OFF;
switch (dp_priv->lane_count) {
case 1:
@@ -664,7 +683,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
dp_priv->DP |= DP_ENHANCED_FRAMING;
}
- if (intel_crtc->pipe == 1)
+ /* CPT DP's pipe select is decided in TRANS_DP_CTL */
+ if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
dp_priv->DP |= DP_PIPEB_SELECT;
if (IS_eDP(intel_encoder)) {
@@ -704,7 +724,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
{
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dp_reg = I915_READ(dp_priv->output_reg);
@@ -749,20 +769,6 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
return link_status[r - DP_LANE0_1_STATUS];
}
-static void
-intel_dp_save(struct drm_connector *connector)
-{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
-
- dp_priv->save_DP = I915_READ(dp_priv->output_reg);
- intel_dp_aux_native_read(intel_encoder, DP_LINK_BW_SET,
- dp_priv->save_link_configuration,
- sizeof (dp_priv->save_link_configuration));
-}
-
static uint8_t
intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
@@ -892,6 +898,25 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count)
return signal_levels;
}
+/* Gen6's DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_gen6_edp_signal_levels(uint8_t train_set)
+{
+ switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ return EDP_LINK_TRAIN_400MV_6DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_800MV_0DB_SNB_B;
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n");
+ return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
+ }
+}
+
static uint8_t
intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
@@ -948,7 +973,7 @@ intel_dp_set_link_train(struct intel_encoder *intel_encoder,
uint8_t train_set[4],
bool first)
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
int ret;
@@ -974,7 +999,7 @@ static void
intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint8_t train_set[4];
@@ -985,23 +1010,38 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
bool channel_eq = false;
bool first = true;
int tries;
+ u32 reg;
/* Write the link configuration data */
- intel_dp_aux_native_write(intel_encoder, 0x100,
+ intel_dp_aux_native_write(intel_encoder, DP_LINK_BW_SET,
link_configuration, DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
- DP &= ~DP_LINK_TRAIN_MASK;
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ else
+ DP &= ~DP_LINK_TRAIN_MASK;
memset(train_set, 0, 4);
voltage = 0xff;
tries = 0;
clock_recovery = false;
for (;;) {
/* Use train_set[0] to set the voltage and pre emphasis values */
- uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
- DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+ uint32_t signal_levels;
+ if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
+ signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
+ } else {
+ signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
+ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+ }
- if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_1,
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
+ else
+ reg = DP | DP_LINK_TRAIN_PAT_1;
+
+ if (!intel_dp_set_link_train(intel_encoder, reg,
DP_TRAINING_PATTERN_1, train_set, first))
break;
first = false;
@@ -1041,11 +1081,23 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
channel_eq = false;
for (;;) {
/* Use train_set[0] to set the voltage and pre emphasis values */
- uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
- DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+ uint32_t signal_levels;
+
+ if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
+ signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
+ } else {
+ signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
+ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+ }
+
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
+ else
+ reg = DP | DP_LINK_TRAIN_PAT_2;
/* channel eq pattern */
- if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_2,
+ if (!intel_dp_set_link_train(intel_encoder, reg,
DP_TRAINING_PATTERN_2, train_set,
false))
break;
@@ -1068,7 +1120,12 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
++tries;
}
- I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF);
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ reg = DP | DP_LINK_TRAIN_OFF_CPT;
+ else
+ reg = DP | DP_LINK_TRAIN_OFF;
+
+ I915_WRITE(dp_priv->output_reg, reg);
POSTING_READ(dp_priv->output_reg);
intel_dp_aux_native_write_1(intel_encoder,
DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
@@ -1077,7 +1134,7 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
static void
intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
@@ -1090,9 +1147,15 @@ intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
udelay(100);
}
- DP &= ~DP_LINK_TRAIN_MASK;
- I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
- POSTING_READ(dp_priv->output_reg);
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) {
+ DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
+ POSTING_READ(dp_priv->output_reg);
+ } else {
+ DP &= ~DP_LINK_TRAIN_MASK;
+ I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
+ POSTING_READ(dp_priv->output_reg);
+ }
udelay(17000);
@@ -1102,18 +1165,6 @@ intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
POSTING_READ(dp_priv->output_reg);
}
-static void
-intel_dp_restore(struct drm_connector *connector)
-{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
-
- if (dp_priv->save_DP & DP_PORT_EN)
- intel_dp_link_train(intel_encoder, dp_priv->save_DP, dp_priv->save_link_configuration);
- else
- intel_dp_link_down(intel_encoder, dp_priv->save_DP);
-}
-
/*
* According to DP spec
* 5.1.2:
@@ -1144,7 +1195,8 @@ intel_dp_check_link_status(struct intel_encoder *intel_encoder)
static enum drm_connector_status
ironlake_dp_detect(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
enum drm_connector_status status;
@@ -1168,8 +1220,9 @@ ironlake_dp_detect(struct drm_connector *connector)
static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint32_t temp, bit;
@@ -1180,16 +1233,6 @@ intel_dp_detect(struct drm_connector *connector)
if (HAS_PCH_SPLIT(dev))
return ironlake_dp_detect(connector);
- temp = I915_READ(PORT_HOTPLUG_EN);
-
- I915_WRITE(PORT_HOTPLUG_EN,
- temp |
- DPB_HOTPLUG_INT_EN |
- DPC_HOTPLUG_INT_EN |
- DPD_HOTPLUG_INT_EN);
-
- POSTING_READ(PORT_HOTPLUG_EN);
-
switch (dp_priv->output_reg) {
case DP_B:
bit = DPB_HOTPLUG_INT_STATUS;
@@ -1222,15 +1265,16 @@ intel_dp_detect(struct drm_connector *connector)
static int intel_dp_get_modes(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
/* We should parse the EDID data and find out if it has an audio sink
*/
- ret = intel_ddc_get_modes(intel_encoder);
+ ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (ret)
return ret;
@@ -1249,13 +1293,9 @@ static int intel_dp_get_modes(struct drm_connector *connector)
static void
intel_dp_destroy (struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_encoder);
+ kfree(connector);
}
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
@@ -1268,8 +1308,6 @@ static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
static const struct drm_connector_funcs intel_dp_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_dp_save,
- .restore = intel_dp_restore,
.detect = intel_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_dp_destroy,
@@ -1278,12 +1316,17 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
.get_modes = intel_dp_get_modes,
.mode_valid = intel_dp_mode_valid,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_dp_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
@@ -1299,12 +1342,35 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder)
intel_dp_check_link_status(intel_encoder);
}
+/* Return which DP Port should be selected for Transcoder DP control */
+int
+intel_trans_dp_port_sel (struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_encoder *encoder;
+ struct intel_encoder *intel_encoder = NULL;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+ if (!encoder || encoder->crtc != crtc)
+ continue;
+
+ intel_encoder = enc_to_intel_encoder(encoder);
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ return dp_priv->output_reg;
+ }
+ }
+ return -1;
+}
+
void
intel_dp_init(struct drm_device *dev, int output_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct intel_dp_priv *dp_priv;
const char *name = NULL;
@@ -1313,13 +1379,21 @@ intel_dp_init(struct drm_device *dev, int output_reg)
if (!intel_encoder)
return;
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
+ return;
+ }
+
dp_priv = (struct intel_dp_priv *)(intel_encoder + 1);
- connector = &intel_encoder->base;
+ connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_dp_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
if (output_reg == DP_A)
intel_encoder->type = INTEL_OUTPUT_EDP;
else
@@ -1349,7 +1423,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs);
- drm_mode_connector_attach_encoder(&intel_encoder->base,
+ drm_mode_connector_attach_encoder(&intel_connector->base,
&intel_encoder->enc);
drm_sysfs_connector_add(connector);
@@ -1378,7 +1452,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
break;
}
- intel_dp_i2c_init(intel_encoder, name);
+ intel_dp_i2c_init(intel_encoder, intel_connector, name);
intel_encoder->ddc_bus = &dp_priv->adapter;
intel_encoder->hot_plug = intel_dp_hot_plug;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index e30253755f1..df931f78766 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -96,8 +96,6 @@ struct intel_framebuffer {
struct intel_encoder {
- struct drm_connector base;
-
struct drm_encoder enc;
int type;
struct i2c_adapter *i2c_bus;
@@ -110,6 +108,11 @@ struct intel_encoder {
int clone_mask;
};
+struct intel_connector {
+ struct drm_connector base;
+ void *dev_priv;
+};
+
struct intel_crtc;
struct intel_overlay {
struct drm_device *dev;
@@ -149,17 +152,18 @@ struct intel_crtc {
bool lowfreq_avail;
struct intel_overlay *overlay;
struct intel_unpin_work *unpin_work;
+ int fdi_lanes;
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
-#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
+#define to_intel_connector(x) container_of(x, struct intel_connector, base)
#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
const char *name);
void intel_i2c_destroy(struct i2c_adapter *adapter);
-int intel_ddc_get_modes(struct intel_encoder *intel_encoder);
+int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
extern bool intel_ddc_probe(struct intel_encoder *intel_encoder);
void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
void intel_i2c_reset_gmbus(struct drm_device *dev);
@@ -183,7 +187,7 @@ extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
extern void intel_encoder_commit (struct drm_encoder *encoder);
-extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
+extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector);
extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
@@ -192,17 +196,16 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
extern void intel_wait_for_vblank(struct drm_device *dev);
extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector,
struct drm_display_mode *mode,
int *dpms_mode);
extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector,
int dpms_mode);
extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
extern int intel_sdvo_supports_hotplug(struct drm_connector *connector);
extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable);
-extern int intelfb_probe(struct drm_device *dev);
-extern int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
-extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc);
extern void intelfb_restore(void);
extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
@@ -212,10 +215,12 @@ extern void intel_init_clock_gating(struct drm_device *dev);
extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
-extern int intel_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd,
- struct drm_framebuffer **fb,
- struct drm_gem_object *obj);
+extern int intel_framebuffer_init(struct drm_device *dev,
+ struct intel_framebuffer *ifb,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_gem_object *obj);
+extern int intel_fbdev_init(struct drm_device *dev);
+extern void intel_fbdev_fini(struct drm_device *dev);
extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
@@ -229,4 +234,6 @@ extern int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_overlay_attrs(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+
+extern void intel_fb_output_poll_changed(struct drm_device *dev);
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index ebf213c96b9..227feca7cf8 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -96,39 +96,11 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
}
}
-static void intel_dvo_save(struct drm_connector *connector)
-{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
-
- /* Each output should probably just save the registers it touches,
- * but for now, use more overkill.
- */
- dev_priv->saveDVOA = I915_READ(DVOA);
- dev_priv->saveDVOB = I915_READ(DVOB);
- dev_priv->saveDVOC = I915_READ(DVOC);
-
- dvo->dev_ops->save(dvo);
-}
-
-static void intel_dvo_restore(struct drm_connector *connector)
-{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
-
- dvo->dev_ops->restore(dvo);
-
- I915_WRITE(DVOA, dev_priv->saveDVOA);
- I915_WRITE(DVOB, dev_priv->saveDVOB);
- I915_WRITE(DVOC, dev_priv->saveDVOC);
-}
-
static int intel_dvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -241,7 +213,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
*/
static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
return dvo->dev_ops->detect(dvo);
@@ -249,7 +222,8 @@ static enum drm_connector_status intel_dvo_detect(struct drm_connector *connecto
static int intel_dvo_get_modes(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
/* We should probably have an i2c driver get_modes function for those
@@ -257,7 +231,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
* (TV-out, for example), but for now with just TMDS and LVDS,
* that's not the case.
*/
- intel_ddc_get_modes(intel_encoder);
+ intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (!list_empty(&connector->probed_modes))
return 1;
@@ -275,38 +249,10 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
static void intel_dvo_destroy (struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
-
- if (dvo) {
- if (dvo->dev_ops->destroy)
- dvo->dev_ops->destroy(dvo);
- if (dvo->panel_fixed_mode)
- kfree(dvo->panel_fixed_mode);
- /* no need, in i830_dvoices[] now */
- //kfree(dvo);
- }
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
- if (intel_encoder->ddc_bus)
- intel_i2c_destroy(intel_encoder->ddc_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_encoder);
-}
-
-#ifdef RANDR_GET_CRTC_INTERFACE
-static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
- int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT);
-
- return intel_pipe_to_crtc(pScrn, pipe);
+ kfree(connector);
}
-#endif
static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
.dpms = intel_dvo_dpms,
@@ -318,8 +264,6 @@ static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_dvo_save,
- .restore = intel_dvo_restore,
.detect = intel_dvo_detect,
.destroy = intel_dvo_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -328,12 +272,26 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
.mode_valid = intel_dvo_mode_valid,
.get_modes = intel_dvo_get_modes,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+
+ if (dvo) {
+ if (dvo->dev_ops->destroy)
+ dvo->dev_ops->destroy(dvo);
+ if (dvo->panel_fixed_mode)
+ kfree(dvo->panel_fixed_mode);
+ }
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
+ if (intel_encoder->ddc_bus)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
@@ -352,7 +310,8 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
uint32_t dvo_reg = dvo->dvo_reg;
uint32_t dvo_val = I915_READ(dvo_reg);
@@ -384,6 +343,7 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
void intel_dvo_init(struct drm_device *dev)
{
struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct intel_dvo_device *dvo;
struct i2c_adapter *i2cbus = NULL;
int ret = 0;
@@ -393,6 +353,12 @@ void intel_dvo_init(struct drm_device *dev)
if (!intel_encoder)
return;
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
+ return;
+ }
+
/* Set up the DDC bus */
intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
if (!intel_encoder->ddc_bus)
@@ -400,7 +366,7 @@ void intel_dvo_init(struct drm_device *dev)
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
- struct drm_connector *connector = &intel_encoder->base;
+ struct drm_connector *connector = &intel_connector->base;
int gpio;
dvo = &intel_dvo_devices[i];
@@ -471,7 +437,7 @@ void intel_dvo_init(struct drm_device *dev)
drm_encoder_helper_add(&intel_encoder->enc,
&intel_dvo_helper_funcs);
- drm_mode_connector_attach_encoder(&intel_encoder->base,
+ drm_mode_connector_attach_encoder(&intel_connector->base,
&intel_encoder->enc);
if (dvo->type == INTEL_DVO_CHIP_LVDS) {
/* For our LVDS chipsets, we should hopefully be able
@@ -496,4 +462,5 @@ void intel_dvo_init(struct drm_device *dev)
intel_i2c_destroy(i2cbus);
free_intel:
kfree(intel_encoder);
+ kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 8a0b3bcdc7b..6f53cf7fbc5 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -44,9 +44,10 @@
#include "i915_drm.h"
#include "i915_drv.h"
-struct intelfb_par {
+struct intel_fbdev {
struct drm_fb_helper helper;
- struct intel_framebuffer *intel_fb;
+ struct intel_framebuffer ifb;
+ struct list_head fbdev_list;
struct drm_display_mode *our_mode;
};
@@ -54,7 +55,6 @@ static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@@ -63,62 +63,12 @@ static struct fb_ops intelfb_ops = {
.fb_setcmap = drm_fb_helper_setcmap,
};
-static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
- .gamma_set = intel_crtc_fb_gamma_set,
- .gamma_get = intel_crtc_fb_gamma_get,
-};
-
-
-/**
- * Currently it is assumed that the old framebuffer is reused.
- *
- * LOCKING
- * caller should hold the mode config lock.
- *
- */
-int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
-{
- struct fb_info *info;
- struct drm_framebuffer *fb;
- struct drm_display_mode *mode = crtc->desired_mode;
-
- fb = crtc->fb;
- if (!fb)
- return 1;
-
- info = fb->fbdev;
- if (!info)
- return 1;
-
- if (!mode)
- return 1;
-
- info->var.xres = mode->hdisplay;
- info->var.right_margin = mode->hsync_start - mode->hdisplay;
- info->var.hsync_len = mode->hsync_end - mode->hsync_start;
- info->var.left_margin = mode->htotal - mode->hsync_end;
- info->var.yres = mode->vdisplay;
- info->var.lower_margin = mode->vsync_start - mode->vdisplay;
- info->var.vsync_len = mode->vsync_end - mode->vsync_start;
- info->var.upper_margin = mode->vtotal - mode->vsync_end;
- info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
- /* avoid overflow */
- info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
-
- return 0;
-}
-EXPORT_SYMBOL(intelfb_resize);
-
-static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
- uint32_t fb_height, uint32_t surface_width,
- uint32_t surface_height,
- uint32_t surface_depth, uint32_t surface_bpp,
- struct drm_framebuffer **fb_p)
+static int intelfb_create(struct intel_fbdev *ifbdev,
+ struct drm_fb_helper_surface_size *sizes)
{
+ struct drm_device *dev = ifbdev->helper.dev;
struct fb_info *info;
- struct intelfb_par *par;
struct drm_framebuffer *fb;
- struct intel_framebuffer *intel_fb;
struct drm_mode_fb_cmd mode_cmd;
struct drm_gem_object *fbo = NULL;
struct drm_i915_gem_object *obj_priv;
@@ -126,19 +76,19 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1;
/* we don't do packed 24bpp */
- if (surface_bpp == 24)
- surface_bpp = 32;
+ if (sizes->surface_bpp == 24)
+ sizes->surface_bpp = 32;
- mode_cmd.width = surface_width;
- mode_cmd.height = surface_height;
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
- mode_cmd.bpp = surface_bpp;
+ mode_cmd.bpp = sizes->surface_bpp;
mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
- mode_cmd.depth = surface_depth;
+ mode_cmd.depth = sizes->surface_depth;
size = mode_cmd.pitch * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
- fbo = drm_gem_object_alloc(dev, size);
+ fbo = i915_gem_alloc_object(dev, size);
if (!fbo) {
DRM_ERROR("failed to allocate framebuffer\n");
ret = -ENOMEM;
@@ -157,45 +107,37 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
/* Flush everything out, we'll be doing GTT only from now on */
i915_gem_object_set_to_gtt_domain(fbo, 1);
- ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo);
- if (ret) {
- DRM_ERROR("failed to allocate fb.\n");
- goto out_unpin;
- }
-
- list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
-
- intel_fb = to_intel_framebuffer(fb);
- *fb_p = fb;
-
- info = framebuffer_alloc(sizeof(struct intelfb_par), device);
+ info = framebuffer_alloc(0, device);
if (!info) {
ret = -ENOMEM;
goto out_unpin;
}
- par = info->par;
+ info->par = ifbdev;
- par->helper.funcs = &intel_fb_helper_funcs;
- par->helper.dev = dev;
- ret = drm_fb_helper_init_crtc_count(&par->helper, 2,
- INTELFB_CONN_LIMIT);
- if (ret)
- goto out_unref;
+ intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo);
+
+ fb = &ifbdev->ifb.base;
+
+ ifbdev->helper.fb = fb;
+ ifbdev->helper.fbdev = info;
strcpy(info->fix.id, "inteldrmfb");
info->flags = FBINFO_DEFAULT;
-
info->fbops = &intelfb_ops;
-
/* setup aperture base/size for vesafb takeover */
- info->aperture_base = dev->mode_config.fb_base;
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
+ info->apertures->ranges[0].base = dev->mode_config.fb_base;
if (IS_I9XX(dev))
- info->aperture_size = pci_resource_len(dev->pdev, 2);
+ info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2);
else
- info->aperture_size = pci_resource_len(dev->pdev, 0);
+ info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset;
info->fix.smem_len = size;
@@ -208,12 +150,18 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
ret = -ENOSPC;
goto out_unpin;
}
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
info->screen_size = size;
// memset(info->screen_base, 0, size);
drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
- drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
+ drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
/* FIXME: we really shouldn't expose mmio space at all */
info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar);
@@ -225,14 +173,10 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->pixmap.scan_align = 1;
- fb->fbdev = info;
-
- par->intel_fb = intel_fb;
-
- /* To allow resizeing without swapping buffers */
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
- intel_fb->base.width, intel_fb->base.height,
- obj_priv->gtt_offset, fbo);
+ fb->width, fb->height,
+ obj_priv->gtt_offset, fbo);
+
mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(dev->pdev, info);
@@ -247,35 +191,86 @@ out:
return ret;
}
-int intelfb_probe(struct drm_device *dev)
+static int intel_fb_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
{
+ struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
+ int new_fb = 0;
int ret;
- DRM_DEBUG_KMS("\n");
- ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create);
- return ret;
+ if (!helper->fb) {
+ ret = intelfb_create(ifbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
}
-EXPORT_SYMBOL(intelfb_probe);
-int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
+ .gamma_set = intel_crtc_fb_gamma_set,
+ .gamma_get = intel_crtc_fb_gamma_get,
+ .fb_probe = intel_fb_find_or_create_single,
+};
+
+int intel_fbdev_destroy(struct drm_device *dev,
+ struct intel_fbdev *ifbdev)
{
struct fb_info *info;
+ struct intel_framebuffer *ifb = &ifbdev->ifb;
- if (!fb)
- return -EINVAL;
-
- info = fb->fbdev;
-
- if (info) {
- struct intelfb_par *par = info->par;
+ if (ifbdev->helper.fbdev) {
+ info = ifbdev->helper.fbdev;
unregister_framebuffer(info);
iounmap(info->screen_base);
- if (info->par)
- drm_fb_helper_free(&par->helper);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
+ drm_fb_helper_fini(&ifbdev->helper);
+
+ drm_framebuffer_cleanup(&ifb->base);
+ if (ifb->obj)
+ drm_gem_object_unreference_unlocked(ifb->obj);
+
+ return 0;
+}
+
+int intel_fbdev_init(struct drm_device *dev)
+{
+ struct intel_fbdev *ifbdev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
+ if (!ifbdev)
+ return -ENOMEM;
+
+ dev_priv->fbdev = ifbdev;
+ ifbdev->helper.funcs = &intel_fb_helper_funcs;
+
+ drm_fb_helper_init(dev, &ifbdev->helper, 2,
+ INTELFB_CONN_LIMIT);
+
+ drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
+ drm_fb_helper_initial_config(&ifbdev->helper, 32);
return 0;
}
-EXPORT_SYMBOL(intelfb_remove);
+
+void intel_fbdev_fini(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ if (!dev_priv->fbdev)
+ return;
+
+ intel_fbdev_destroy(dev, dev_priv->fbdev);
+ kfree(dev_priv->fbdev);
+ dev_priv->fbdev = NULL;
+}
MODULE_LICENSE("GPL and additional rights");
+
+void intel_fb_output_poll_changed(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
+}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 48cade0cf7b..65727f0a79a 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -39,7 +39,6 @@
struct intel_hdmi_priv {
u32 sdvox_reg;
- u32 save_SDVOX;
bool has_hdmi_sink;
};
@@ -63,8 +62,12 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
if (hdmi_priv->has_hdmi_sink)
sdvox |= SDVO_AUDIO_ENABLE;
- if (intel_crtc->pipe == 1)
- sdvox |= SDVO_PIPE_B_SELECT;
+ if (intel_crtc->pipe == 1) {
+ if (HAS_PCH_CPT(dev))
+ sdvox |= PORT_TRANS_B_SEL_CPT;
+ else
+ sdvox |= SDVO_PIPE_B_SELECT;
+ }
I915_WRITE(hdmi_priv->sdvox_reg, sdvox);
POSTING_READ(hdmi_priv->sdvox_reg);
@@ -106,27 +109,6 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
}
}
-static void intel_hdmi_save(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
-
- hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg);
-}
-
-static void intel_hdmi_restore(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
-
- I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX);
- POSTING_READ(hdmi_priv->sdvox_reg);
-}
-
static int intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
@@ -151,13 +133,14 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
struct edid *edid = NULL;
enum drm_connector_status status = connector_status_disconnected;
hdmi_priv->has_hdmi_sink = false;
- edid = drm_get_edid(&intel_encoder->base,
+ edid = drm_get_edid(connector,
intel_encoder->ddc_bus);
if (edid) {
@@ -165,7 +148,7 @@ intel_hdmi_detect(struct drm_connector *connector)
status = connector_status_connected;
hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
}
- intel_encoder->base.display_info.raw_edid = NULL;
+ connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -174,24 +157,21 @@ intel_hdmi_detect(struct drm_connector *connector)
static int intel_hdmi_get_modes(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
/* We should parse the EDID data and find out if it's an HDMI sink so
* we can send audio to it.
*/
- return intel_ddc_get_modes(intel_encoder);
+ return intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
}
static void intel_hdmi_destroy(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_encoder);
+ kfree(connector);
}
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
@@ -204,8 +184,6 @@ static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_hdmi_save,
- .restore = intel_hdmi_restore,
.detect = intel_hdmi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_hdmi_destroy,
@@ -214,12 +192,17 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
.get_modes = intel_hdmi_get_modes,
.mode_valid = intel_hdmi_mode_valid,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_hdmi_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
@@ -231,21 +214,30 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct intel_hdmi_priv *hdmi_priv;
intel_encoder = kcalloc(sizeof(struct intel_encoder) +
sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
if (!intel_encoder)
return;
+
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
+ return;
+ }
+
hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1);
- connector = &intel_encoder->base;
+ connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
intel_encoder->type = INTEL_OUTPUT_HDMI;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
@@ -285,7 +277,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs);
- drm_mode_connector_attach_encoder(&intel_encoder->base,
+ drm_mode_connector_attach_encoder(&intel_connector->base,
&intel_encoder->enc);
drm_sysfs_connector_add(connector);
@@ -303,6 +295,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
err_connector:
drm_connector_cleanup(connector);
kfree(intel_encoder);
+ kfree(intel_connector);
return;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b66806a37d3..6a1accd83ae 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -139,75 +139,6 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
/* XXX: We never power down the LVDS pairs. */
}
-static void intel_lvds_save(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
- u32 pwm_ctl_reg;
-
- if (HAS_PCH_SPLIT(dev)) {
- pp_on_reg = PCH_PP_ON_DELAYS;
- pp_off_reg = PCH_PP_OFF_DELAYS;
- pp_ctl_reg = PCH_PP_CONTROL;
- pp_div_reg = PCH_PP_DIVISOR;
- pwm_ctl_reg = BLC_PWM_CPU_CTL;
- } else {
- pp_on_reg = PP_ON_DELAYS;
- pp_off_reg = PP_OFF_DELAYS;
- pp_ctl_reg = PP_CONTROL;
- pp_div_reg = PP_DIVISOR;
- pwm_ctl_reg = BLC_PWM_CTL;
- }
-
- dev_priv->savePP_ON = I915_READ(pp_on_reg);
- dev_priv->savePP_OFF = I915_READ(pp_off_reg);
- dev_priv->savePP_CONTROL = I915_READ(pp_ctl_reg);
- dev_priv->savePP_DIVISOR = I915_READ(pp_div_reg);
- dev_priv->saveBLC_PWM_CTL = I915_READ(pwm_ctl_reg);
- dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
- BACKLIGHT_DUTY_CYCLE_MASK);
-
- /*
- * If the light is off at server startup, just make it full brightness
- */
- if (dev_priv->backlight_duty_cycle == 0)
- dev_priv->backlight_duty_cycle =
- intel_lvds_get_max_backlight(dev);
-}
-
-static void intel_lvds_restore(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
- u32 pwm_ctl_reg;
-
- if (HAS_PCH_SPLIT(dev)) {
- pp_on_reg = PCH_PP_ON_DELAYS;
- pp_off_reg = PCH_PP_OFF_DELAYS;
- pp_ctl_reg = PCH_PP_CONTROL;
- pp_div_reg = PCH_PP_DIVISOR;
- pwm_ctl_reg = BLC_PWM_CPU_CTL;
- } else {
- pp_on_reg = PP_ON_DELAYS;
- pp_off_reg = PP_OFF_DELAYS;
- pp_ctl_reg = PP_CONTROL;
- pp_div_reg = PP_DIVISOR;
- pwm_ctl_reg = BLC_PWM_CTL;
- }
-
- I915_WRITE(pwm_ctl_reg, dev_priv->saveBLC_PWM_CTL);
- I915_WRITE(pp_on_reg, dev_priv->savePP_ON);
- I915_WRITE(pp_off_reg, dev_priv->savePP_OFF);
- I915_WRITE(pp_div_reg, dev_priv->savePP_DIVISOR);
- I915_WRITE(pp_ctl_reg, dev_priv->savePP_CONTROL);
- if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
- intel_lvds_set_power(dev, true);
- else
- intel_lvds_set_power(dev, false);
-}
-
static int intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
@@ -635,12 +566,13 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
static int intel_lvds_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
if (dev_priv->lvds_edid_good) {
- ret = intel_ddc_get_modes(intel_encoder);
+ ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (ret)
return ret;
@@ -717,11 +649,8 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
static void intel_lvds_destroy(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct drm_i915_private *dev_priv = dev->dev_private;
- if (intel_encoder->ddc_bus)
- intel_i2c_destroy(intel_encoder->ddc_bus);
if (dev_priv->lid_notifier.notifier_call)
acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
drm_sysfs_connector_remove(connector);
@@ -734,13 +663,14 @@ static int intel_lvds_set_property(struct drm_connector *connector,
uint64_t value)
{
struct drm_device *dev = connector->dev;
- struct intel_encoder *intel_encoder =
- to_intel_encoder(connector);
if (property == dev->mode_config.scaling_mode_property &&
connector->encoder) {
struct drm_crtc *crtc = connector->encoder->crtc;
+ struct drm_encoder *encoder = connector->encoder;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
+
if (value == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
return 0;
@@ -774,13 +704,11 @@ static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
.get_modes = intel_lvds_get_modes,
.mode_valid = intel_lvds_mode_valid,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_lvds_save,
- .restore = intel_lvds_restore,
.detect = intel_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_lvds_set_property,
@@ -790,7 +718,12 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
static void intel_lvds_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ if (intel_encoder->ddc_bus)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
@@ -979,6 +912,7 @@ void intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
@@ -1012,19 +946,27 @@ void intel_lvds_init(struct drm_device *dev)
return;
}
- connector = &intel_encoder->base;
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
+ return;
+ }
+
+ connector = &intel_connector->base;
encoder = &intel_encoder->enc;
- drm_connector_init(dev, &intel_encoder->base, &intel_lvds_connector_funcs,
+ drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS);
- drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
+ drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
intel_encoder->type = INTEL_OUTPUT_LVDS;
intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
intel_encoder->crtc_mask = (1 << 1);
+ if (IS_I965G(dev))
+ intel_encoder->crtc_mask |= (1 << 0);
drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
@@ -1039,7 +981,7 @@ void intel_lvds_init(struct drm_device *dev)
* the initial panel fitting mode will be FULL_SCREEN.
*/
- drm_connector_attach_property(&intel_encoder->base,
+ drm_connector_attach_property(&intel_connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN;
@@ -1067,7 +1009,7 @@ void intel_lvds_init(struct drm_device *dev)
*/
dev_priv->lvds_edid_good = true;
- if (!intel_ddc_get_modes(intel_encoder))
+ if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus))
dev_priv->lvds_edid_good = false;
list_for_each_entry(scan, &connector->probed_modes, head) {
@@ -1151,4 +1093,5 @@ failed:
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
kfree(intel_encoder);
+ kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 8e5c83b2d12..4b1fd3d9c73 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -54,9 +54,9 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder)
}
};
- intel_i2c_quirk_set(intel_encoder->base.dev, true);
+ intel_i2c_quirk_set(intel_encoder->enc.dev, true);
ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2);
- intel_i2c_quirk_set(intel_encoder->base.dev, false);
+ intel_i2c_quirk_set(intel_encoder->enc.dev, false);
if (ret == 2)
return true;
@@ -66,22 +66,23 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder)
/**
* intel_ddc_get_modes - get modelist from monitor
* @connector: DRM connector device to use
+ * @adapter: i2c adapter
*
* Fetch the EDID information from @connector using the DDC bus.
*/
-int intel_ddc_get_modes(struct intel_encoder *intel_encoder)
+int intel_ddc_get_modes(struct drm_connector *connector,
+ struct i2c_adapter *adapter)
{
struct edid *edid;
int ret = 0;
- intel_i2c_quirk_set(intel_encoder->base.dev, true);
- edid = drm_get_edid(&intel_encoder->base, intel_encoder->ddc_bus);
- intel_i2c_quirk_set(intel_encoder->base.dev, false);
+ intel_i2c_quirk_set(connector->dev, true);
+ edid = drm_get_edid(connector, adapter);
+ intel_i2c_quirk_set(connector->dev, false);
if (edid) {
- drm_mode_connector_update_edid_property(&intel_encoder->base,
- edid);
- ret = drm_add_edid_modes(&intel_encoder->base, edid);
- intel_encoder->base.display_info.raw_edid = NULL;
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ connector->display_info.raw_edid = NULL;
kfree(edid);
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 6d524a1fc27..b0e17b06eb6 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -373,7 +373,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
/* never have the overlay hw on without showing a frame */
BUG_ON(!overlay->vid_bo);
- obj = overlay->vid_bo->obj;
+ obj = &overlay->vid_bo->base;
i915_gem_object_unpin(obj);
drm_gem_object_unreference(obj);
@@ -411,7 +411,7 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
switch (overlay->hw_wedged) {
case RELEASE_OLD_VID:
- obj = overlay->old_vid_bo->obj;
+ obj = &overlay->old_vid_bo->base;
i915_gem_object_unpin(obj);
drm_gem_object_unreference(obj);
overlay->old_vid_bo = NULL;
@@ -467,7 +467,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (ret != 0)
return ret;
- obj = overlay->old_vid_bo->obj;
+ obj = &overlay->old_vid_bo->base;
i915_gem_object_unpin(obj);
drm_gem_object_unreference(obj);
overlay->old_vid_bo = NULL;
@@ -1341,7 +1341,7 @@ void intel_setup_overlay(struct drm_device *dev)
return;
overlay->dev = dev;
- reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE);
+ reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
if (!reg_bo)
goto out_free;
overlay->reg_bo = to_intel_bo(reg_bo);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 87d953664cb..aba72c489a2 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -36,7 +36,18 @@
#include "i915_drm.h"
#include "i915_drv.h"
#include "intel_sdvo_regs.h"
-#include <linux/dmi.h>
+
+#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
+#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
+#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
+#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
+
+#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
+ SDVO_TV_MASK)
+
+#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
+#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
+
static char *tv_format_names[] = {
"NTSC_M" , "NTSC_J" , "NTSC_443",
@@ -86,12 +97,6 @@ struct intel_sdvo_priv {
/* This is for current tv format name */
char *tv_format_name;
- /* This contains all current supported TV format */
- char *tv_format_supported[TV_FORMAT_NUM];
- int format_supported_num;
- struct drm_property *tv_format_property;
- struct drm_property *tv_format_name_property[TV_FORMAT_NUM];
-
/**
* This is set if we treat the device as HDMI, instead of DVI.
*/
@@ -112,12 +117,6 @@ struct intel_sdvo_priv {
*/
struct drm_display_mode *sdvo_lvds_fixed_mode;
- /**
- * Returned SDTV resolutions allowed for the current format, if the
- * device reported it.
- */
- struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
-
/*
* supported encoding mode, used to determine whether HDMI is
* supported
@@ -130,11 +129,24 @@ struct intel_sdvo_priv {
/* Mac mini hack -- use the same DDC as the analog connector */
struct i2c_adapter *analog_ddc_bus;
- int save_sdvo_mult;
- u16 save_active_outputs;
- struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
- struct intel_sdvo_dtd save_output_dtd[16];
- u32 save_SDVOX;
+};
+
+struct intel_sdvo_connector {
+ /* Mark the type of connector */
+ uint16_t output_flag;
+
+ /* This contains all current supported TV format */
+ char *tv_format_supported[TV_FORMAT_NUM];
+ int format_supported_num;
+ struct drm_property *tv_format_property;
+ struct drm_property *tv_format_name_property[TV_FORMAT_NUM];
+
+ /**
+ * Returned SDTV resolutions allowed for the current format, if the
+ * device reported it.
+ */
+ struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
+
/* add the property for the SDVO-TV */
struct drm_property *left_property;
struct drm_property *right_property;
@@ -162,7 +174,12 @@ struct intel_sdvo_priv {
};
static bool
-intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags);
+intel_sdvo_output_setup(struct intel_encoder *intel_encoder,
+ uint16_t flags);
+static void
+intel_sdvo_tv_create_property(struct drm_connector *connector, int type);
+static void
+intel_sdvo_create_enhance_property(struct drm_connector *connector);
/**
* Writes the SDVOB or SDVOC with the given value, but always writes both
@@ -171,12 +188,18 @@ intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags);
*/
static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val)
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
u32 bval = val, cval = val;
int i;
+ if (sdvo_priv->sdvo_reg == PCH_SDVOB) {
+ I915_WRITE(sdvo_priv->sdvo_reg, val);
+ I915_READ(sdvo_priv->sdvo_reg);
+ return;
+ }
+
if (sdvo_priv->sdvo_reg == SDVOB) {
cval = I915_READ(SDVOC);
} else {
@@ -353,7 +376,8 @@ static const struct _sdvo_cmd_name {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
};
-#define SDVO_NAME(dev_priv) ((dev_priv)->sdvo_reg == SDVOB ? "SDVOB" : "SDVOC")
+#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB)
+#define SDVO_NAME(dev_priv) (IS_SDVOB((dev_priv)->sdvo_reg) ? "SDVOB" : "SDVOC")
#define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv)
static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd,
@@ -563,17 +587,6 @@ static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, b
return true;
}
-static bool intel_sdvo_get_active_outputs(struct intel_encoder *intel_encoder,
- u16 *outputs)
-{
- u8 status;
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder, outputs, sizeof(*outputs));
-
- return (status == SDVO_CMD_STATUS_SUCCESS);
-}
-
static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder,
u16 outputs)
{
@@ -646,40 +659,6 @@ static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder,
return (status == SDVO_CMD_STATUS_SUCCESS);
}
-static bool intel_sdvo_get_timing(struct intel_encoder *intel_encoder, u8 cmd,
- struct intel_sdvo_dtd *dtd)
-{
- u8 status;
-
- intel_sdvo_write_cmd(intel_encoder, cmd, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder, &dtd->part1,
- sizeof(dtd->part1));
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- intel_sdvo_write_cmd(intel_encoder, cmd + 1, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder, &dtd->part2,
- sizeof(dtd->part2));
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- return true;
-}
-
-static bool intel_sdvo_get_input_timing(struct intel_encoder *intel_encoder,
- struct intel_sdvo_dtd *dtd)
-{
- return intel_sdvo_get_timing(intel_encoder,
- SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
-}
-
-static bool intel_sdvo_get_output_timing(struct intel_encoder *intel_encoder,
- struct intel_sdvo_dtd *dtd)
-{
- return intel_sdvo_get_timing(intel_encoder,
- SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd);
-}
-
static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd,
struct intel_sdvo_dtd *dtd)
{
@@ -767,23 +746,6 @@ static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_en
return false;
}
-static int intel_sdvo_get_clock_rate_mult(struct intel_encoder *intel_encoder)
-{
- u8 response, status;
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder, &response, 1);
-
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n");
- return SDVO_CLOCK_RATE_MULT_1X;
- } else {
- DRM_DEBUG_KMS("Current clock rate multiplier: %d\n", response);
- }
-
- return response;
-}
-
static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val)
{
u8 status;
@@ -1071,7 +1033,7 @@ static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder)
memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ?
sizeof(format) : sizeof(format_map));
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format_map,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format,
sizeof(format));
status = intel_sdvo_read_response(intel_encoder, NULL, 0);
@@ -1101,7 +1063,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
/* Set output timings */
intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
intel_sdvo_set_target_output(intel_encoder,
- dev_priv->controlled_output);
+ dev_priv->attached_output);
intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
/* Set the input timing to the screen. Assume always input 0. */
@@ -1139,7 +1101,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
dev_priv->sdvo_lvds_fixed_mode);
intel_sdvo_set_target_output(intel_encoder,
- dev_priv->controlled_output);
+ dev_priv->attached_output);
intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
/* Set the input timing to the screen. Assume always input 0. */
@@ -1204,7 +1166,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
* channel on the motherboard. In a two-input device, the first input
* will be SDVOB and the second SDVOC.
*/
- in_out.in0 = sdvo_priv->controlled_output;
+ in_out.in0 = sdvo_priv->attached_output;
in_out.in1 = 0;
intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP,
@@ -1230,7 +1192,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) {
/* Set the output timing to the screen */
intel_sdvo_set_target_output(intel_encoder,
- sdvo_priv->controlled_output);
+ sdvo_priv->attached_output);
intel_sdvo_set_output_timing(intel_encoder, &input_dtd);
}
@@ -1352,107 +1314,16 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
if (0)
intel_sdvo_set_encoder_power_state(intel_encoder, mode);
- intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->controlled_output);
+ intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->attached_output);
}
return;
}
-static void intel_sdvo_save(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- int o;
-
- sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_encoder);
- intel_sdvo_get_active_outputs(intel_encoder, &sdvo_priv->save_active_outputs);
-
- if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
- intel_sdvo_set_target_input(intel_encoder, true, false);
- intel_sdvo_get_input_timing(intel_encoder,
- &sdvo_priv->save_input_dtd_1);
- }
-
- if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
- intel_sdvo_set_target_input(intel_encoder, false, true);
- intel_sdvo_get_input_timing(intel_encoder,
- &sdvo_priv->save_input_dtd_2);
- }
-
- for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
- {
- u16 this_output = (1 << o);
- if (sdvo_priv->caps.output_flags & this_output)
- {
- intel_sdvo_set_target_output(intel_encoder, this_output);
- intel_sdvo_get_output_timing(intel_encoder,
- &sdvo_priv->save_output_dtd[o]);
- }
- }
- if (sdvo_priv->is_tv) {
- /* XXX: Save TV format/enhancements. */
- }
-
- sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->sdvo_reg);
-}
-
-static void intel_sdvo_restore(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- int o;
- int i;
- bool input1, input2;
- u8 status;
-
- intel_sdvo_set_active_outputs(intel_encoder, 0);
-
- for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
- {
- u16 this_output = (1 << o);
- if (sdvo_priv->caps.output_flags & this_output) {
- intel_sdvo_set_target_output(intel_encoder, this_output);
- intel_sdvo_set_output_timing(intel_encoder, &sdvo_priv->save_output_dtd[o]);
- }
- }
-
- if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
- intel_sdvo_set_target_input(intel_encoder, true, false);
- intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_1);
- }
-
- if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
- intel_sdvo_set_target_input(intel_encoder, false, true);
- intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_2);
- }
-
- intel_sdvo_set_clock_rate_mult(intel_encoder, sdvo_priv->save_sdvo_mult);
-
- if (sdvo_priv->is_tv) {
- /* XXX: Restore TV format/enhancements. */
- }
-
- intel_sdvo_write_sdvox(intel_encoder, sdvo_priv->save_SDVOX);
-
- if (sdvo_priv->save_SDVOX & SDVO_ENABLE)
- {
- for (i = 0; i < 2; i++)
- intel_wait_for_vblank(dev);
- status = intel_sdvo_get_trained_inputs(intel_encoder, &input1, &input2);
- if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
- DRM_DEBUG_KMS("First %s output reported failure to "
- "sync\n", SDVO_NAME(sdvo_priv));
- }
-
- intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->save_active_outputs);
-}
-
static int intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -1490,6 +1361,8 @@ static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, str
return true;
}
+/* No use! */
+#if 0
struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
{
struct drm_connector *connector = NULL;
@@ -1560,6 +1433,7 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
intel_sdvo_read_response(intel_encoder, &response, 2);
}
+#endif
static bool
intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder)
@@ -1598,12 +1472,17 @@ static struct drm_connector *
intel_find_analog_connector(struct drm_device *dev)
{
struct drm_connector *connector;
+ struct drm_encoder *encoder;
struct intel_encoder *intel_encoder;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- intel_encoder = to_intel_encoder(connector);
- if (intel_encoder->type == INTEL_OUTPUT_ANALOG)
- return connector;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ intel_encoder = enc_to_intel_encoder(encoder);
+ if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector && encoder == intel_attached_encoder(connector))
+ return connector;
+ }
+ }
}
return NULL;
}
@@ -1625,15 +1504,17 @@ intel_analog_is_connected(struct drm_device *dev)
}
enum drm_connector_status
-intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
+intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
enum drm_connector_status status = connector_status_connected;
struct edid *edid = NULL;
- edid = drm_get_edid(&intel_encoder->base,
- intel_encoder->ddc_bus);
+ edid = drm_get_edid(connector, intel_encoder->ddc_bus);
/* This is only applied to SDVO cards with multiple outputs */
if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) {
@@ -1646,8 +1527,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
*/
while(temp_ddc > 1) {
sdvo_priv->ddc_bus = temp_ddc;
- edid = drm_get_edid(&intel_encoder->base,
- intel_encoder->ddc_bus);
+ edid = drm_get_edid(connector, intel_encoder->ddc_bus);
if (edid) {
/*
* When we can get the EDID, maybe it is the
@@ -1664,28 +1544,25 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
/* when there is no edid and no monitor is connected with VGA
* port, try to use the CRT ddc to read the EDID for DVI-connector
*/
- if (edid == NULL &&
- sdvo_priv->analog_ddc_bus &&
- !intel_analog_is_connected(intel_encoder->base.dev))
- edid = drm_get_edid(&intel_encoder->base,
- sdvo_priv->analog_ddc_bus);
+ if (edid == NULL && sdvo_priv->analog_ddc_bus &&
+ !intel_analog_is_connected(connector->dev))
+ edid = drm_get_edid(connector, sdvo_priv->analog_ddc_bus);
+
if (edid != NULL) {
- /* Don't report the output as connected if it's a DVI-I
- * connector with a non-digital EDID coming out.
- */
- if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) {
- if (edid->input & DRM_EDID_INPUT_DIGITAL)
- sdvo_priv->is_hdmi =
- drm_detect_hdmi_monitor(edid);
- else
- status = connector_status_disconnected;
- }
+ bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+ bool need_digital = !!(sdvo_connector->output_flag & SDVO_TMDS_MASK);
- kfree(edid);
- intel_encoder->base.display_info.raw_edid = NULL;
+ /* DDC bus is shared, match EDID to connector type */
+ if (is_digital && need_digital)
+ sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid);
+ else if (is_digital != need_digital)
+ status = connector_status_disconnected;
- } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
+ connector->display_info.raw_edid = NULL;
+ } else
status = connector_status_disconnected;
+
+ kfree(edid);
return status;
}
@@ -1694,8 +1571,12 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
{
uint16_t response;
u8 status;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
+ enum drm_connector_status ret;
intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
@@ -1713,24 +1594,41 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
if (response == 0)
return connector_status_disconnected;
- if (intel_sdvo_multifunc_encoder(intel_encoder) &&
- sdvo_priv->attached_output != response) {
- if (sdvo_priv->controlled_output != response &&
- intel_sdvo_output_setup(intel_encoder, response) != true)
- return connector_status_unknown;
- sdvo_priv->attached_output = response;
+ sdvo_priv->attached_output = response;
+
+ if ((sdvo_connector->output_flag & response) == 0)
+ ret = connector_status_disconnected;
+ else if (response & SDVO_TMDS_MASK)
+ ret = intel_sdvo_hdmi_sink_detect(connector);
+ else
+ ret = connector_status_connected;
+
+ /* May update encoder flag for like clock for SDVO TV, etc.*/
+ if (ret == connector_status_connected) {
+ sdvo_priv->is_tv = false;
+ sdvo_priv->is_lvds = false;
+ intel_encoder->needs_tv_clock = false;
+
+ if (response & SDVO_TV_MASK) {
+ sdvo_priv->is_tv = true;
+ intel_encoder->needs_tv_clock = true;
+ }
+ if (response & SDVO_LVDS_MASK)
+ sdvo_priv->is_lvds = true;
}
- return intel_sdvo_hdmi_sink_detect(connector, response);
+
+ return ret;
}
static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
int num_modes;
/* set the bus switch and get the modes */
- num_modes = intel_ddc_get_modes(intel_encoder);
+ num_modes = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
/*
* Mac mini hack. On this device, the DVI-I connector shares one DDC
@@ -1740,17 +1638,10 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
*/
if (num_modes == 0 &&
sdvo_priv->analog_ddc_bus &&
- !intel_analog_is_connected(intel_encoder->base.dev)) {
- struct i2c_adapter *digital_ddc_bus;
-
+ !intel_analog_is_connected(connector->dev)) {
/* Switch to the analog ddc bus and try that
*/
- digital_ddc_bus = intel_encoder->ddc_bus;
- intel_encoder->ddc_bus = sdvo_priv->analog_ddc_bus;
-
- (void) intel_ddc_get_modes(intel_encoder);
-
- intel_encoder->ddc_bus = digital_ddc_bus;
+ (void) intel_ddc_get_modes(connector, sdvo_priv->analog_ddc_bus);
}
}
@@ -1821,8 +1712,9 @@ struct drm_display_mode sdvo_tv_modes[] = {
static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
{
- struct intel_encoder *output = to_intel_encoder(connector);
- struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
struct intel_sdvo_sdtv_resolution_request tv_res;
uint32_t reply = 0, format_map = 0;
int i;
@@ -1842,11 +1734,11 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
sizeof(format_map) ? sizeof(format_map) :
sizeof(struct intel_sdvo_sdtv_resolution_request));
- intel_sdvo_set_target_output(output, sdvo_priv->controlled_output);
+ intel_sdvo_set_target_output(intel_encoder, sdvo_priv->attached_output);
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
&tv_res, sizeof(tv_res));
- status = intel_sdvo_read_response(output, &reply, 3);
+ status = intel_sdvo_read_response(intel_encoder, &reply, 3);
if (status != SDVO_CMD_STATUS_SUCCESS)
return;
@@ -1863,7 +1755,8 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
struct drm_display_mode *newmode;
@@ -1873,7 +1766,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
* Assume that the preferred modes are
* arranged in priority order.
*/
- intel_ddc_get_modes(intel_encoder);
+ intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (list_empty(&connector->probed_modes) == false)
goto end;
@@ -1902,12 +1795,12 @@ end:
static int intel_sdvo_get_modes(struct drm_connector *connector)
{
- struct intel_encoder *output = to_intel_encoder(connector);
- struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
- if (sdvo_priv->is_tv)
+ if (IS_TV(sdvo_connector))
intel_sdvo_get_tv_modes(connector);
- else if (sdvo_priv->is_lvds == true)
+ else if (IS_LVDS(sdvo_connector))
intel_sdvo_get_lvds_modes(connector);
else
intel_sdvo_get_ddc_modes(connector);
@@ -1920,11 +1813,11 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
static
void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
struct drm_device *dev = connector->dev;
- if (sdvo_priv->is_tv) {
+ if (IS_TV(sdvo_priv)) {
if (sdvo_priv->left_property)
drm_property_destroy(dev, sdvo_priv->left_property);
if (sdvo_priv->right_property)
@@ -1937,8 +1830,6 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
drm_property_destroy(dev, sdvo_priv->hpos_property);
if (sdvo_priv->vpos_property)
drm_property_destroy(dev, sdvo_priv->vpos_property);
- }
- if (sdvo_priv->is_tv) {
if (sdvo_priv->saturation_property)
drm_property_destroy(dev,
sdvo_priv->saturation_property);
@@ -1948,7 +1839,7 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
if (sdvo_priv->hue_property)
drm_property_destroy(dev, sdvo_priv->hue_property);
}
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+ if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) {
if (sdvo_priv->brightness_property)
drm_property_destroy(dev,
sdvo_priv->brightness_property);
@@ -1958,31 +1849,17 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
static void intel_sdvo_destroy(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
-
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
- if (intel_encoder->ddc_bus)
- intel_i2c_destroy(intel_encoder->ddc_bus);
- if (sdvo_priv->analog_ddc_bus)
- intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
-
- if (sdvo_priv->sdvo_lvds_fixed_mode != NULL)
- drm_mode_destroy(connector->dev,
- sdvo_priv->sdvo_lvds_fixed_mode);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
- if (sdvo_priv->tv_format_property)
+ if (sdvo_connector->tv_format_property)
drm_property_destroy(connector->dev,
- sdvo_priv->tv_format_property);
-
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds)
- intel_sdvo_destroy_enhance_property(connector);
+ sdvo_connector->tv_format_property);
+ intel_sdvo_destroy_enhance_property(connector);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
-
- kfree(intel_encoder);
+ kfree(connector);
}
static int
@@ -1990,9 +1867,11 @@ intel_sdvo_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
struct drm_crtc *crtc = encoder->crtc;
int ret = 0;
bool changed = false;
@@ -2003,101 +1882,101 @@ intel_sdvo_set_property(struct drm_connector *connector,
if (ret < 0)
goto out;
- if (property == sdvo_priv->tv_format_property) {
+ if (property == sdvo_connector->tv_format_property) {
if (val >= TV_FORMAT_NUM) {
ret = -EINVAL;
goto out;
}
if (sdvo_priv->tv_format_name ==
- sdvo_priv->tv_format_supported[val])
+ sdvo_connector->tv_format_supported[val])
goto out;
- sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[val];
+ sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[val];
changed = true;
}
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+ if (IS_TV(sdvo_connector) || IS_LVDS(sdvo_connector)) {
cmd = 0;
temp_value = val;
- if (sdvo_priv->left_property == property) {
+ if (sdvo_connector->left_property == property) {
drm_connector_property_set_value(connector,
- sdvo_priv->right_property, val);
- if (sdvo_priv->left_margin == temp_value)
+ sdvo_connector->right_property, val);
+ if (sdvo_connector->left_margin == temp_value)
goto out;
- sdvo_priv->left_margin = temp_value;
- sdvo_priv->right_margin = temp_value;
- temp_value = sdvo_priv->max_hscan -
- sdvo_priv->left_margin;
+ sdvo_connector->left_margin = temp_value;
+ sdvo_connector->right_margin = temp_value;
+ temp_value = sdvo_connector->max_hscan -
+ sdvo_connector->left_margin;
cmd = SDVO_CMD_SET_OVERSCAN_H;
- } else if (sdvo_priv->right_property == property) {
+ } else if (sdvo_connector->right_property == property) {
drm_connector_property_set_value(connector,
- sdvo_priv->left_property, val);
- if (sdvo_priv->right_margin == temp_value)
+ sdvo_connector->left_property, val);
+ if (sdvo_connector->right_margin == temp_value)
goto out;
- sdvo_priv->left_margin = temp_value;
- sdvo_priv->right_margin = temp_value;
- temp_value = sdvo_priv->max_hscan -
- sdvo_priv->left_margin;
+ sdvo_connector->left_margin = temp_value;
+ sdvo_connector->right_margin = temp_value;
+ temp_value = sdvo_connector->max_hscan -
+ sdvo_connector->left_margin;
cmd = SDVO_CMD_SET_OVERSCAN_H;
- } else if (sdvo_priv->top_property == property) {
+ } else if (sdvo_connector->top_property == property) {
drm_connector_property_set_value(connector,
- sdvo_priv->bottom_property, val);
- if (sdvo_priv->top_margin == temp_value)
+ sdvo_connector->bottom_property, val);
+ if (sdvo_connector->top_margin == temp_value)
goto out;
- sdvo_priv->top_margin = temp_value;
- sdvo_priv->bottom_margin = temp_value;
- temp_value = sdvo_priv->max_vscan -
- sdvo_priv->top_margin;
+ sdvo_connector->top_margin = temp_value;
+ sdvo_connector->bottom_margin = temp_value;
+ temp_value = sdvo_connector->max_vscan -
+ sdvo_connector->top_margin;
cmd = SDVO_CMD_SET_OVERSCAN_V;
- } else if (sdvo_priv->bottom_property == property) {
+ } else if (sdvo_connector->bottom_property == property) {
drm_connector_property_set_value(connector,
- sdvo_priv->top_property, val);
- if (sdvo_priv->bottom_margin == temp_value)
+ sdvo_connector->top_property, val);
+ if (sdvo_connector->bottom_margin == temp_value)
goto out;
- sdvo_priv->top_margin = temp_value;
- sdvo_priv->bottom_margin = temp_value;
- temp_value = sdvo_priv->max_vscan -
- sdvo_priv->top_margin;
+ sdvo_connector->top_margin = temp_value;
+ sdvo_connector->bottom_margin = temp_value;
+ temp_value = sdvo_connector->max_vscan -
+ sdvo_connector->top_margin;
cmd = SDVO_CMD_SET_OVERSCAN_V;
- } else if (sdvo_priv->hpos_property == property) {
- if (sdvo_priv->cur_hpos == temp_value)
+ } else if (sdvo_connector->hpos_property == property) {
+ if (sdvo_connector->cur_hpos == temp_value)
goto out;
cmd = SDVO_CMD_SET_POSITION_H;
- sdvo_priv->cur_hpos = temp_value;
- } else if (sdvo_priv->vpos_property == property) {
- if (sdvo_priv->cur_vpos == temp_value)
+ sdvo_connector->cur_hpos = temp_value;
+ } else if (sdvo_connector->vpos_property == property) {
+ if (sdvo_connector->cur_vpos == temp_value)
goto out;
cmd = SDVO_CMD_SET_POSITION_V;
- sdvo_priv->cur_vpos = temp_value;
- } else if (sdvo_priv->saturation_property == property) {
- if (sdvo_priv->cur_saturation == temp_value)
+ sdvo_connector->cur_vpos = temp_value;
+ } else if (sdvo_connector->saturation_property == property) {
+ if (sdvo_connector->cur_saturation == temp_value)
goto out;
cmd = SDVO_CMD_SET_SATURATION;
- sdvo_priv->cur_saturation = temp_value;
- } else if (sdvo_priv->contrast_property == property) {
- if (sdvo_priv->cur_contrast == temp_value)
+ sdvo_connector->cur_saturation = temp_value;
+ } else if (sdvo_connector->contrast_property == property) {
+ if (sdvo_connector->cur_contrast == temp_value)
goto out;
cmd = SDVO_CMD_SET_CONTRAST;
- sdvo_priv->cur_contrast = temp_value;
- } else if (sdvo_priv->hue_property == property) {
- if (sdvo_priv->cur_hue == temp_value)
+ sdvo_connector->cur_contrast = temp_value;
+ } else if (sdvo_connector->hue_property == property) {
+ if (sdvo_connector->cur_hue == temp_value)
goto out;
cmd = SDVO_CMD_SET_HUE;
- sdvo_priv->cur_hue = temp_value;
- } else if (sdvo_priv->brightness_property == property) {
- if (sdvo_priv->cur_brightness == temp_value)
+ sdvo_connector->cur_hue = temp_value;
+ } else if (sdvo_connector->brightness_property == property) {
+ if (sdvo_connector->cur_brightness == temp_value)
goto out;
cmd = SDVO_CMD_SET_BRIGHTNESS;
- sdvo_priv->cur_brightness = temp_value;
+ sdvo_connector->cur_brightness = temp_value;
}
if (cmd) {
intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2);
@@ -2127,8 +2006,6 @@ static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_sdvo_save,
- .restore = intel_sdvo_restore,
.detect = intel_sdvo_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_sdvo_set_property,
@@ -2138,12 +2015,27 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
.get_modes = intel_sdvo_get_modes,
.mode_valid = intel_sdvo_mode_valid,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
+ if (intel_encoder->ddc_bus)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
+ if (sdvo_priv->analog_ddc_bus)
+ intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
+
+ if (sdvo_priv->sdvo_lvds_fixed_mode != NULL)
+ drm_mode_destroy(encoder->dev,
+ sdvo_priv->sdvo_lvds_fixed_mode);
+
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
@@ -2159,49 +2051,29 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
* outputs, then LVDS outputs.
*/
static void
-intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv)
+intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
+ struct intel_sdvo_priv *sdvo, u32 reg)
{
- uint16_t mask = 0;
- unsigned int num_bits;
-
- /* Make a mask of outputs less than or equal to our own priority in the
- * list.
- */
- switch (dev_priv->controlled_output) {
- case SDVO_OUTPUT_LVDS1:
- mask |= SDVO_OUTPUT_LVDS1;
- case SDVO_OUTPUT_LVDS0:
- mask |= SDVO_OUTPUT_LVDS0;
- case SDVO_OUTPUT_TMDS1:
- mask |= SDVO_OUTPUT_TMDS1;
- case SDVO_OUTPUT_TMDS0:
- mask |= SDVO_OUTPUT_TMDS0;
- case SDVO_OUTPUT_RGB1:
- mask |= SDVO_OUTPUT_RGB1;
- case SDVO_OUTPUT_RGB0:
- mask |= SDVO_OUTPUT_RGB0;
- break;
- }
+ struct sdvo_device_mapping *mapping;
- /* Count bits to find what number we are in the priority list. */
- mask &= dev_priv->caps.output_flags;
- num_bits = hweight16(mask);
- if (num_bits > 3) {
- /* if more than 3 outputs, default to DDC bus 3 for now */
- num_bits = 3;
- }
+ if (IS_SDVOB(reg))
+ mapping = &(dev_priv->sdvo_mappings[0]);
+ else
+ mapping = &(dev_priv->sdvo_mappings[1]);
- /* Corresponds to SDVO_CONTROL_BUS_DDCx */
- dev_priv->ddc_bus = 1 << num_bits;
+ sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
}
static bool
-intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output)
+intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output, int device)
{
struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
uint8_t status;
- intel_sdvo_set_target_output(output, sdvo_priv->controlled_output);
+ if (device == 0)
+ intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS0);
+ else
+ intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS1);
intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0);
status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1);
@@ -2214,15 +2086,13 @@ static struct intel_encoder *
intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan)
{
struct drm_device *dev = chan->drm_dev;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
struct intel_encoder *intel_encoder = NULL;
- list_for_each_entry(connector,
- &dev->mode_config.connector_list, head) {
- if (to_intel_encoder(connector)->ddc_bus == &chan->adapter) {
- intel_encoder = to_intel_encoder(connector);
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ intel_encoder = enc_to_intel_encoder(encoder);
+ if (intel_encoder->ddc_bus == &chan->adapter)
break;
- }
}
return intel_encoder;
}
@@ -2259,7 +2129,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
struct drm_i915_private *dev_priv = dev->dev_private;
struct sdvo_device_mapping *my_mapping, *other_mapping;
- if (sdvo_reg == SDVOB) {
+ if (IS_SDVOB(sdvo_reg)) {
my_mapping = &dev_priv->sdvo_mappings[0];
other_mapping = &dev_priv->sdvo_mappings[1];
} else {
@@ -2284,120 +2154,237 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
/* No SDVO device info is found for another DVO port,
* so use mapping assumption we had before BIOS parsing.
*/
- if (sdvo_reg == SDVOB)
+ if (IS_SDVOB(sdvo_reg))
return 0x70;
else
return 0x72;
}
-static int intel_sdvo_bad_tv_callback(const struct dmi_system_id *id)
+static bool
+intel_sdvo_connector_alloc (struct intel_connector **ret)
{
- DRM_DEBUG_KMS("Ignoring bad SDVO TV connector for %s\n", id->ident);
- return 1;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *sdvo_connector;
+
+ *ret = kzalloc(sizeof(*intel_connector) +
+ sizeof(*sdvo_connector), GFP_KERNEL);
+ if (!*ret)
+ return false;
+
+ intel_connector = *ret;
+ sdvo_connector = (struct intel_sdvo_connector *)(intel_connector + 1);
+ intel_connector->dev_priv = sdvo_connector;
+
+ return true;
}
-static struct dmi_system_id intel_sdvo_bad_tv[] = {
- {
- .callback = intel_sdvo_bad_tv_callback,
- .ident = "IntelG45/ICH10R/DME1737",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "IBM CORPORATION"),
- DMI_MATCH(DMI_PRODUCT_NAME, "4800784"),
- },
- },
+static void
+intel_sdvo_connector_create (struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs,
+ connector->connector_type);
- { } /* terminating entry */
-};
+ drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+ drm_sysfs_connector_add(connector);
+}
static bool
-intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
+intel_sdvo_dvi_init(struct intel_encoder *intel_encoder, int device)
{
- struct drm_connector *connector = &intel_encoder->base;
struct drm_encoder *encoder = &intel_encoder->enc;
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- bool ret = true, registered = false;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *sdvo_connector;
+
+ if (!intel_sdvo_connector_alloc(&intel_connector))
+ return false;
+
+ sdvo_connector = intel_connector->dev_priv;
+
+ if (device == 0) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS0;
+ sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
+ } else if (device == 1) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS1;
+ sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
+ }
+
+ connector = &intel_connector->base;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_DVID;
+
+ if (intel_sdvo_get_supp_encode(intel_encoder, &sdvo_priv->encode)
+ && intel_sdvo_get_digital_encoding_mode(intel_encoder, device)
+ && sdvo_priv->is_hdmi) {
+ /* enable hdmi encoding mode if supported */
+ intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI);
+ intel_sdvo_set_colorimetry(intel_encoder,
+ SDVO_COLORIMETRY_RGB256);
+ connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+ }
+ intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT);
+
+ intel_sdvo_connector_create(encoder, connector);
+
+ return true;
+}
+
+static bool
+intel_sdvo_tv_init(struct intel_encoder *intel_encoder, int type)
+{
+ struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *sdvo_connector;
+
+ if (!intel_sdvo_connector_alloc(&intel_connector))
+ return false;
+
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+ sdvo_connector = intel_connector->dev_priv;
+
+ sdvo_priv->controlled_output |= type;
+ sdvo_connector->output_flag = type;
+
+ sdvo_priv->is_tv = true;
+ intel_encoder->needs_tv_clock = true;
+ intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+
+ intel_sdvo_connector_create(encoder, connector);
+
+ intel_sdvo_tv_create_property(connector, type);
+
+ intel_sdvo_create_enhance_property(connector);
+
+ return true;
+}
+
+static bool
+intel_sdvo_analog_init(struct intel_encoder *intel_encoder, int device)
+{
+ struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *sdvo_connector;
+
+ if (!intel_sdvo_connector_alloc(&intel_connector))
+ return false;
+
+ connector = &intel_connector->base;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+ sdvo_connector = intel_connector->dev_priv;
+
+ if (device == 0) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB0;
+ sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+ } else if (device == 1) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB1;
+ sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+ }
+
+ intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT);
+
+ intel_sdvo_connector_create(encoder, connector);
+ return true;
+}
+
+static bool
+intel_sdvo_lvds_init(struct intel_encoder *intel_encoder, int device)
+{
+ struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *sdvo_connector;
+
+ if (!intel_sdvo_connector_alloc(&intel_connector))
+ return false;
+
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+ sdvo_connector = intel_connector->dev_priv;
+
+ sdvo_priv->is_lvds = true;
+
+ if (device == 0) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS0;
+ sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+ } else if (device == 1) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS1;
+ sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+ }
+
+ intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
+ (1 << INTEL_SDVO_LVDS_CLONE_BIT);
+
+ intel_sdvo_connector_create(encoder, connector);
+ intel_sdvo_create_enhance_property(connector);
+ return true;
+}
+
+static bool
+intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
+{
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
sdvo_priv->is_tv = false;
intel_encoder->needs_tv_clock = false;
sdvo_priv->is_lvds = false;
- if (device_is_registered(&connector->kdev)) {
- drm_sysfs_connector_remove(connector);
- registered = true;
- }
+ /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
- if (flags &
- (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) {
- if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0)
- sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0;
- else
- sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1;
-
- encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
- connector->connector_type = DRM_MODE_CONNECTOR_DVID;
-
- if (intel_sdvo_get_supp_encode(intel_encoder,
- &sdvo_priv->encode) &&
- intel_sdvo_get_digital_encoding_mode(intel_encoder) &&
- sdvo_priv->is_hdmi) {
- /* enable hdmi encoding mode if supported */
- intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI);
- intel_sdvo_set_colorimetry(intel_encoder,
- SDVO_COLORIMETRY_RGB256);
- connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
- intel_encoder->clone_mask =
- (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT);
- }
- } else if ((flags & SDVO_OUTPUT_SVID0) &&
- !dmi_check_system(intel_sdvo_bad_tv)) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
- encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
- connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
- sdvo_priv->is_tv = true;
- intel_encoder->needs_tv_clock = true;
- intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
- } else if (flags & SDVO_OUTPUT_RGB0) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
- encoder->encoder_type = DRM_MODE_ENCODER_DAC;
- connector->connector_type = DRM_MODE_CONNECTOR_VGA;
- intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT);
- } else if (flags & SDVO_OUTPUT_RGB1) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
- encoder->encoder_type = DRM_MODE_ENCODER_DAC;
- connector->connector_type = DRM_MODE_CONNECTOR_VGA;
- intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT);
- } else if (flags & SDVO_OUTPUT_CVBS0) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0;
- encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
- connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
- sdvo_priv->is_tv = true;
- intel_encoder->needs_tv_clock = true;
- intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
- } else if (flags & SDVO_OUTPUT_LVDS0) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
- encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
- connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
- sdvo_priv->is_lvds = true;
- intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
- (1 << INTEL_SDVO_LVDS_CLONE_BIT);
- } else if (flags & SDVO_OUTPUT_LVDS1) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1;
- encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
- connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
- sdvo_priv->is_lvds = true;
- intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
- (1 << INTEL_SDVO_LVDS_CLONE_BIT);
- } else {
+ if (flags & SDVO_OUTPUT_TMDS0)
+ if (!intel_sdvo_dvi_init(intel_encoder, 0))
+ return false;
+
+ if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
+ if (!intel_sdvo_dvi_init(intel_encoder, 1))
+ return false;
+
+ /* TV has no XXX1 function block */
+ if (flags & SDVO_OUTPUT_SVID0)
+ if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_SVID0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_CVBS0)
+ if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_CVBS0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_RGB0)
+ if (!intel_sdvo_analog_init(intel_encoder, 0))
+ return false;
+
+ if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
+ if (!intel_sdvo_analog_init(intel_encoder, 1))
+ return false;
+
+ if (flags & SDVO_OUTPUT_LVDS0)
+ if (!intel_sdvo_lvds_init(intel_encoder, 0))
+ return false;
+ if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
+ if (!intel_sdvo_lvds_init(intel_encoder, 1))
+ return false;
+
+ if ((flags & SDVO_OUTPUT_MASK) == 0) {
unsigned char bytes[2];
sdvo_priv->controlled_output = 0;
@@ -2405,28 +2392,25 @@ intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
SDVO_NAME(sdvo_priv),
bytes[0], bytes[1]);
- ret = false;
+ return false;
}
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
- if (ret && registered)
- ret = drm_sysfs_connector_add(connector) == 0 ? true : false;
-
-
- return ret;
-
+ return true;
}
-static void intel_sdvo_tv_create_property(struct drm_connector *connector)
+static void intel_sdvo_tv_create_property(struct drm_connector *connector, int type)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
struct intel_sdvo_tv_format format;
uint32_t format_map, i;
uint8_t status;
- intel_sdvo_set_target_output(intel_encoder,
- sdvo_priv->controlled_output);
+ intel_sdvo_set_target_output(intel_encoder, type);
intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0);
@@ -2441,35 +2425,37 @@ static void intel_sdvo_tv_create_property(struct drm_connector *connector)
if (format_map == 0)
return;
- sdvo_priv->format_supported_num = 0;
+ sdvo_connector->format_supported_num = 0;
for (i = 0 ; i < TV_FORMAT_NUM; i++)
if (format_map & (1 << i)) {
- sdvo_priv->tv_format_supported
- [sdvo_priv->format_supported_num++] =
+ sdvo_connector->tv_format_supported
+ [sdvo_connector->format_supported_num++] =
tv_format_names[i];
}
- sdvo_priv->tv_format_property =
+ sdvo_connector->tv_format_property =
drm_property_create(
connector->dev, DRM_MODE_PROP_ENUM,
- "mode", sdvo_priv->format_supported_num);
+ "mode", sdvo_connector->format_supported_num);
- for (i = 0; i < sdvo_priv->format_supported_num; i++)
+ for (i = 0; i < sdvo_connector->format_supported_num; i++)
drm_property_add_enum(
- sdvo_priv->tv_format_property, i,
- i, sdvo_priv->tv_format_supported[i]);
+ sdvo_connector->tv_format_property, i,
+ i, sdvo_connector->tv_format_supported[i]);
- sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[0];
+ sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[0];
drm_connector_attach_property(
- connector, sdvo_priv->tv_format_property, 0);
+ connector, sdvo_connector->tv_format_property, 0);
}
static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
struct intel_sdvo_enhancements_reply sdvo_data;
struct drm_device *dev = connector->dev;
uint8_t status;
@@ -2488,7 +2474,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
DRM_DEBUG_KMS("No enhancement is supported\n");
return;
}
- if (sdvo_priv->is_tv) {
+ if (IS_TV(sdvo_priv)) {
/* when horizontal overscan is supported, Add the left/right
* property
*/
@@ -2636,8 +2622,6 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
"default %d, current %d\n",
data_value[0], data_value[1], response);
}
- }
- if (sdvo_priv->is_tv) {
if (sdvo_data.saturation) {
intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
@@ -2733,7 +2717,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
data_value[0], data_value[1], response);
}
}
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+ if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) {
if (sdvo_data.brightness) {
intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0);
@@ -2773,12 +2757,11 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_connector *connector;
struct intel_encoder *intel_encoder;
struct intel_sdvo_priv *sdvo_priv;
-
u8 ch[0x40];
int i;
+ u32 i2c_reg, ddc_reg, analog_ddc_reg;
intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
if (!intel_encoder) {
@@ -2791,11 +2774,21 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
intel_encoder->dev_priv = sdvo_priv;
intel_encoder->type = INTEL_OUTPUT_SDVO;
+ if (HAS_PCH_SPLIT(dev)) {
+ i2c_reg = PCH_GPIOE;
+ ddc_reg = PCH_GPIOE;
+ analog_ddc_reg = PCH_GPIOA;
+ } else {
+ i2c_reg = GPIOE;
+ ddc_reg = GPIOE;
+ analog_ddc_reg = GPIOA;
+ }
+
/* setup the DDC bus. */
- if (sdvo_reg == SDVOB)
- intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
+ if (IS_SDVOB(sdvo_reg))
+ intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOB");
else
- intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
+ intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOC");
if (!intel_encoder->i2c_bus)
goto err_inteloutput;
@@ -2809,20 +2802,20 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
for (i = 0; i < 0x40; i++) {
if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) {
DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
- sdvo_reg == SDVOB ? 'B' : 'C');
+ IS_SDVOB(sdvo_reg) ? 'B' : 'C');
goto err_i2c;
}
}
/* setup the DDC bus. */
- if (sdvo_reg == SDVOB) {
- intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
- sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
+ if (IS_SDVOB(sdvo_reg)) {
+ intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS");
+ sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
"SDVOB/VGA DDC BUS");
dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
} else {
- intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
- sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
+ intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS");
+ sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
"SDVOC/VGA DDC BUS");
dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
}
@@ -2833,41 +2826,21 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
/* Wrap with our custom algo which switches to DDC mode */
intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
+ /* encoder type will be decided later */
+ drm_encoder_init(dev, &intel_encoder->enc, &intel_sdvo_enc_funcs, 0);
+ drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
+
/* In default case sdvo lvds is false */
intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps);
if (intel_sdvo_output_setup(intel_encoder,
sdvo_priv->caps.output_flags) != true) {
DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
- sdvo_reg == SDVOB ? 'B' : 'C');
+ IS_SDVOB(sdvo_reg) ? 'B' : 'C');
goto err_i2c;
}
-
- connector = &intel_encoder->base;
- drm_connector_init(dev, connector, &intel_sdvo_connector_funcs,
- connector->connector_type);
-
- drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
- connector->display_info.subpixel_order = SubPixelHorizontalRGB;
-
- drm_encoder_init(dev, &intel_encoder->enc,
- &intel_sdvo_enc_funcs, intel_encoder->enc.encoder_type);
-
- drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
-
- drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
- if (sdvo_priv->is_tv)
- intel_sdvo_tv_create_property(connector);
-
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds)
- intel_sdvo_create_enhance_property(connector);
-
- drm_sysfs_connector_add(connector);
-
- intel_sdvo_select_ddc_bus(sdvo_priv);
+ intel_sdvo_select_ddc_bus(dev_priv, sdvo_priv, sdvo_reg);
/* Set the input timing to the screen. Assume always input 0. */
intel_sdvo_set_target_input(intel_encoder, true, false);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index d7d39b2327d..6d553c29d10 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -916,143 +916,6 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode)
}
}
-static void
-intel_tv_save(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
- int i;
-
- tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1);
- tv_priv->save_TV_H_CTL_2 = I915_READ(TV_H_CTL_2);
- tv_priv->save_TV_H_CTL_3 = I915_READ(TV_H_CTL_3);
- tv_priv->save_TV_V_CTL_1 = I915_READ(TV_V_CTL_1);
- tv_priv->save_TV_V_CTL_2 = I915_READ(TV_V_CTL_2);
- tv_priv->save_TV_V_CTL_3 = I915_READ(TV_V_CTL_3);
- tv_priv->save_TV_V_CTL_4 = I915_READ(TV_V_CTL_4);
- tv_priv->save_TV_V_CTL_5 = I915_READ(TV_V_CTL_5);
- tv_priv->save_TV_V_CTL_6 = I915_READ(TV_V_CTL_6);
- tv_priv->save_TV_V_CTL_7 = I915_READ(TV_V_CTL_7);
- tv_priv->save_TV_SC_CTL_1 = I915_READ(TV_SC_CTL_1);
- tv_priv->save_TV_SC_CTL_2 = I915_READ(TV_SC_CTL_2);
- tv_priv->save_TV_SC_CTL_3 = I915_READ(TV_SC_CTL_3);
-
- tv_priv->save_TV_CSC_Y = I915_READ(TV_CSC_Y);
- tv_priv->save_TV_CSC_Y2 = I915_READ(TV_CSC_Y2);
- tv_priv->save_TV_CSC_U = I915_READ(TV_CSC_U);
- tv_priv->save_TV_CSC_U2 = I915_READ(TV_CSC_U2);
- tv_priv->save_TV_CSC_V = I915_READ(TV_CSC_V);
- tv_priv->save_TV_CSC_V2 = I915_READ(TV_CSC_V2);
- tv_priv->save_TV_CLR_KNOBS = I915_READ(TV_CLR_KNOBS);
- tv_priv->save_TV_CLR_LEVEL = I915_READ(TV_CLR_LEVEL);
- tv_priv->save_TV_WIN_POS = I915_READ(TV_WIN_POS);
- tv_priv->save_TV_WIN_SIZE = I915_READ(TV_WIN_SIZE);
- tv_priv->save_TV_FILTER_CTL_1 = I915_READ(TV_FILTER_CTL_1);
- tv_priv->save_TV_FILTER_CTL_2 = I915_READ(TV_FILTER_CTL_2);
- tv_priv->save_TV_FILTER_CTL_3 = I915_READ(TV_FILTER_CTL_3);
-
- for (i = 0; i < 60; i++)
- tv_priv->save_TV_H_LUMA[i] = I915_READ(TV_H_LUMA_0 + (i <<2));
- for (i = 0; i < 60; i++)
- tv_priv->save_TV_H_CHROMA[i] = I915_READ(TV_H_CHROMA_0 + (i <<2));
- for (i = 0; i < 43; i++)
- tv_priv->save_TV_V_LUMA[i] = I915_READ(TV_V_LUMA_0 + (i <<2));
- for (i = 0; i < 43; i++)
- tv_priv->save_TV_V_CHROMA[i] = I915_READ(TV_V_CHROMA_0 + (i <<2));
-
- tv_priv->save_TV_DAC = I915_READ(TV_DAC);
- tv_priv->save_TV_CTL = I915_READ(TV_CTL);
-}
-
-static void
-intel_tv_restore(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
- struct drm_crtc *crtc = connector->encoder->crtc;
- struct intel_crtc *intel_crtc;
- int i;
-
- /* FIXME: No CRTC? */
- if (!crtc)
- return;
-
- intel_crtc = to_intel_crtc(crtc);
- I915_WRITE(TV_H_CTL_1, tv_priv->save_TV_H_CTL_1);
- I915_WRITE(TV_H_CTL_2, tv_priv->save_TV_H_CTL_2);
- I915_WRITE(TV_H_CTL_3, tv_priv->save_TV_H_CTL_3);
- I915_WRITE(TV_V_CTL_1, tv_priv->save_TV_V_CTL_1);
- I915_WRITE(TV_V_CTL_2, tv_priv->save_TV_V_CTL_2);
- I915_WRITE(TV_V_CTL_3, tv_priv->save_TV_V_CTL_3);
- I915_WRITE(TV_V_CTL_4, tv_priv->save_TV_V_CTL_4);
- I915_WRITE(TV_V_CTL_5, tv_priv->save_TV_V_CTL_5);
- I915_WRITE(TV_V_CTL_6, tv_priv->save_TV_V_CTL_6);
- I915_WRITE(TV_V_CTL_7, tv_priv->save_TV_V_CTL_7);
- I915_WRITE(TV_SC_CTL_1, tv_priv->save_TV_SC_CTL_1);
- I915_WRITE(TV_SC_CTL_2, tv_priv->save_TV_SC_CTL_2);
- I915_WRITE(TV_SC_CTL_3, tv_priv->save_TV_SC_CTL_3);
-
- I915_WRITE(TV_CSC_Y, tv_priv->save_TV_CSC_Y);
- I915_WRITE(TV_CSC_Y2, tv_priv->save_TV_CSC_Y2);
- I915_WRITE(TV_CSC_U, tv_priv->save_TV_CSC_U);
- I915_WRITE(TV_CSC_U2, tv_priv->save_TV_CSC_U2);
- I915_WRITE(TV_CSC_V, tv_priv->save_TV_CSC_V);
- I915_WRITE(TV_CSC_V2, tv_priv->save_TV_CSC_V2);
- I915_WRITE(TV_CLR_KNOBS, tv_priv->save_TV_CLR_KNOBS);
- I915_WRITE(TV_CLR_LEVEL, tv_priv->save_TV_CLR_LEVEL);
-
- {
- int pipeconf_reg = (intel_crtc->pipe == 0) ?
- PIPEACONF : PIPEBCONF;
- int dspcntr_reg = (intel_crtc->plane == 0) ?
- DSPACNTR : DSPBCNTR;
- int pipeconf = I915_READ(pipeconf_reg);
- int dspcntr = I915_READ(dspcntr_reg);
- int dspbase_reg = (intel_crtc->plane == 0) ?
- DSPAADDR : DSPBADDR;
- /* Pipe must be off here */
- I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
-
- if (!IS_I9XX(dev)) {
- /* Wait for vblank for the disable to take effect */
- intel_wait_for_vblank(dev);
- }
-
- I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
- /* Wait for vblank for the disable to take effect. */
- intel_wait_for_vblank(dev);
-
- /* Filter ctl must be set before TV_WIN_SIZE */
- I915_WRITE(TV_FILTER_CTL_1, tv_priv->save_TV_FILTER_CTL_1);
- I915_WRITE(TV_FILTER_CTL_2, tv_priv->save_TV_FILTER_CTL_2);
- I915_WRITE(TV_FILTER_CTL_3, tv_priv->save_TV_FILTER_CTL_3);
- I915_WRITE(TV_WIN_POS, tv_priv->save_TV_WIN_POS);
- I915_WRITE(TV_WIN_SIZE, tv_priv->save_TV_WIN_SIZE);
- I915_WRITE(pipeconf_reg, pipeconf);
- I915_WRITE(dspcntr_reg, dspcntr);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
- }
-
- for (i = 0; i < 60; i++)
- I915_WRITE(TV_H_LUMA_0 + (i <<2), tv_priv->save_TV_H_LUMA[i]);
- for (i = 0; i < 60; i++)
- I915_WRITE(TV_H_CHROMA_0 + (i <<2), tv_priv->save_TV_H_CHROMA[i]);
- for (i = 0; i < 43; i++)
- I915_WRITE(TV_V_LUMA_0 + (i <<2), tv_priv->save_TV_V_LUMA[i]);
- for (i = 0; i < 43; i++)
- I915_WRITE(TV_V_CHROMA_0 + (i <<2), tv_priv->save_TV_V_CHROMA[i]);
-
- I915_WRITE(TV_DAC, tv_priv->save_TV_DAC);
- I915_WRITE(TV_CTL, tv_priv->save_TV_CTL);
-}
-
static const struct tv_mode *
intel_tv_mode_lookup (char *tv_format)
{
@@ -1078,7 +941,8 @@ intel_tv_mode_find (struct intel_encoder *intel_encoder)
static enum drm_mode_status
intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
/* Ensure TV refresh is close to desired refresh */
@@ -1441,7 +1305,8 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder
*/
static void intel_tv_find_better_format(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
int i;
@@ -1475,9 +1340,9 @@ intel_tv_detect(struct drm_connector *connector)
{
struct drm_crtc *crtc;
struct drm_display_mode mode;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
- struct drm_encoder *encoder = &intel_encoder->enc;
int dpms_mode;
int type = tv_priv->type;
@@ -1487,10 +1352,12 @@ intel_tv_detect(struct drm_connector *connector)
if (encoder->crtc && encoder->crtc->enabled) {
type = intel_tv_detect_type(encoder->crtc, intel_encoder);
} else {
- crtc = intel_get_load_detect_pipe(intel_encoder, &mode, &dpms_mode);
+ crtc = intel_get_load_detect_pipe(intel_encoder, connector,
+ &mode, &dpms_mode);
if (crtc) {
type = intel_tv_detect_type(crtc, intel_encoder);
- intel_release_load_detect_pipe(intel_encoder, dpms_mode);
+ intel_release_load_detect_pipe(intel_encoder, connector,
+ dpms_mode);
} else
type = -1;
}
@@ -1525,7 +1392,8 @@ static void
intel_tv_chose_preferred_modes(struct drm_connector *connector,
struct drm_display_mode *mode_ptr)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
@@ -1550,7 +1418,8 @@ static int
intel_tv_get_modes(struct drm_connector *connector)
{
struct drm_display_mode *mode_ptr;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
int j, count = 0;
u64 tmp;
@@ -1604,11 +1473,9 @@ intel_tv_get_modes(struct drm_connector *connector)
static void
intel_tv_destroy (struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_encoder);
+ kfree(connector);
}
@@ -1617,9 +1484,9 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
uint64_t val)
{
struct drm_device *dev = connector->dev;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
- struct drm_encoder *encoder = &intel_encoder->enc;
struct drm_crtc *crtc = encoder->crtc;
int ret = 0;
bool changed = false;
@@ -1676,8 +1543,6 @@ static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
static const struct drm_connector_funcs intel_tv_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_tv_save,
- .restore = intel_tv_restore,
.detect = intel_tv_detect,
.destroy = intel_tv_destroy,
.set_property = intel_tv_set_property,
@@ -1687,12 +1552,15 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
.mode_valid = intel_tv_mode_valid,
.get_modes = intel_tv_get_modes,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_tv_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_tv_enc_funcs = {
@@ -1741,6 +1609,7 @@ intel_tv_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct intel_tv_priv *tv_priv;
u32 tv_dac_on, tv_dac_off, save_tv_dac;
char **tv_format_names;
@@ -1786,7 +1655,13 @@ intel_tv_init(struct drm_device *dev)
return;
}
- connector = &intel_encoder->base;
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
+ return;
+ }
+
+ connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_tv_connector_funcs,
DRM_MODE_CONNECTOR_SVIDEO);
@@ -1794,7 +1669,7 @@ intel_tv_init(struct drm_device *dev)
drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs,
DRM_MODE_ENCODER_TVDAC);
- drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
+ drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
tv_priv = (struct intel_tv_priv *)(intel_encoder + 1);
intel_encoder->type = INTEL_OUTPUT_TVOUT;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 453df3f6053..acd31ed861e 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -22,7 +22,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nv50_cursor.o nv50_display.o nv50_fbcon.o \
nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
- nv17_gpio.o nv50_gpio.o
+ nv17_gpio.o nv50_gpio.o \
+ nv50_calc.o
nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index abc382a9918..e7e69ccce5c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -26,6 +26,7 @@
#define NV_DEBUG_NOTRACE
#include "nouveau_drv.h"
#include "nouveau_hw.h"
+#include "nouveau_encoder.h"
/* these defines are made up */
#define NV_CIO_CRE_44_HEADA 0x0
@@ -256,6 +257,11 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
struct init_tbl_entry {
char *name;
uint8_t id;
+ /* Return:
+ * > 0: success, length of opcode
+ * 0: success, but abort further parsing of table (INIT_DONE etc)
+ * < 0: failure, table parsing will be aborted
+ */
int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
};
@@ -709,6 +715,83 @@ static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
return dcb_entry;
}
+static int
+read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
+{
+ uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
+ int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
+ int recordoffset = 0, rdofs = 1, wrofs = 0;
+ uint8_t port_type = 0;
+
+ if (!i2ctable)
+ return -EINVAL;
+
+ if (dcb_version >= 0x30) {
+ if (i2ctable[0] != dcb_version) /* necessary? */
+ NV_WARN(dev,
+ "DCB I2C table version mismatch (%02X vs %02X)\n",
+ i2ctable[0], dcb_version);
+ dcb_i2c_ver = i2ctable[0];
+ headerlen = i2ctable[1];
+ if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
+ i2c_entries = i2ctable[2];
+ else
+ NV_WARN(dev,
+ "DCB I2C table has more entries than indexable "
+ "(%d entries, max %d)\n", i2ctable[2],
+ DCB_MAX_NUM_I2C_ENTRIES);
+ entry_len = i2ctable[3];
+ /* [4] is i2c_default_indices, read in parse_dcb_table() */
+ }
+ /*
+ * It's your own fault if you call this function on a DCB 1.1 BIOS --
+ * the test below is for DCB 1.2
+ */
+ if (dcb_version < 0x14) {
+ recordoffset = 2;
+ rdofs = 0;
+ wrofs = 1;
+ }
+
+ if (index == 0xf)
+ return 0;
+ if (index >= i2c_entries) {
+ NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
+ index, i2ctable[2]);
+ return -ENOENT;
+ }
+ if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
+ NV_ERROR(dev, "DCB I2C entry invalid\n");
+ return -EINVAL;
+ }
+
+ if (dcb_i2c_ver >= 0x30) {
+ port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
+
+ /*
+ * Fixup for chips using same address offset for read and
+ * write.
+ */
+ if (port_type == 4) /* seen on C51 */
+ rdofs = wrofs = 1;
+ if (port_type >= 5) /* G80+ */
+ rdofs = wrofs = 0;
+ }
+
+ if (dcb_i2c_ver >= 0x40) {
+ if (port_type != 5 && port_type != 6)
+ NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
+
+ i2c->entry = ROM32(i2ctable[headerlen + recordoffset + entry_len * index]);
+ }
+
+ i2c->port_type = port_type;
+ i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
+ i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
+
+ return 0;
+}
+
static struct nouveau_i2c_chan *
init_i2c_device_find(struct drm_device *dev, int i2c_index)
{
@@ -727,6 +810,20 @@ init_i2c_device_find(struct drm_device *dev, int i2c_index)
}
if (i2c_index == 0x80) /* g80+ */
i2c_index = dcb->i2c_default_indices & 0xf;
+ else
+ if (i2c_index == 0x81)
+ i2c_index = (dcb->i2c_default_indices & 0xf0) >> 4;
+
+ if (i2c_index > DCB_MAX_NUM_I2C_ENTRIES) {
+ NV_ERROR(dev, "invalid i2c_index 0x%x\n", i2c_index);
+ return NULL;
+ }
+
+ /* Make sure i2c table entry has been parsed, it may not
+ * have been if this is a bus not referenced by a DCB encoder
+ */
+ read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
+ i2c_index, &dcb->i2c[i2c_index]);
return nouveau_i2c_find(dev, i2c_index);
}
@@ -818,7 +915,7 @@ init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
offset, config, count);
- return 0;
+ return -EINVAL;
}
configval = ROM32(bios->data[offset + 11 + config * 4]);
@@ -920,7 +1017,7 @@ init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
offset, config, count);
- return 0;
+ return -EINVAL;
}
freq = ROM16(bios->data[offset + 12 + config * 2]);
@@ -1067,6 +1164,126 @@ init_io_flag_condition(struct nvbios *bios, uint16_t offset,
}
static int
+init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_DP_CONDITION opcode: 0x3A ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): "sub" opcode
+ * offset + 2 (8 bit): unknown
+ *
+ */
+
+ struct bit_displayport_encoder_table *dpe = NULL;
+ struct dcb_entry *dcb = bios->display.output;
+ struct drm_device *dev = bios->dev;
+ uint8_t cond = bios->data[offset + 1];
+ int dummy;
+
+ BIOSLOG(bios, "0x%04X: subop 0x%02X\n", offset, cond);
+
+ if (!iexec->execute)
+ return 3;
+
+ dpe = nouveau_bios_dp_table(dev, dcb, &dummy);
+ if (!dpe) {
+ NV_ERROR(dev, "0x%04X: INIT_3A: no encoder table!!\n", offset);
+ return -EINVAL;
+ }
+
+ switch (cond) {
+ case 0:
+ {
+ struct dcb_connector_table_entry *ent =
+ &bios->dcb.connector.entry[dcb->connector];
+
+ if (ent->type != DCB_CONNECTOR_eDP)
+ iexec->execute = false;
+ }
+ break;
+ case 1:
+ case 2:
+ if (!(dpe->unknown & cond))
+ iexec->execute = false;
+ break;
+ case 5:
+ {
+ struct nouveau_i2c_chan *auxch;
+ int ret;
+
+ auxch = nouveau_i2c_find(dev, bios->display.output->i2c_index);
+ if (!auxch)
+ return -ENODEV;
+
+ ret = nouveau_dp_auxch(auxch, 9, 0xd, &cond, 1);
+ if (ret)
+ return ret;
+
+ if (cond & 1)
+ iexec->execute = false;
+ }
+ break;
+ default:
+ NV_WARN(dev, "0x%04X: unknown INIT_3A op: %d\n", offset, cond);
+ break;
+ }
+
+ if (iexec->execute)
+ BIOSLOG(bios, "0x%04X: continuing to execute\n", offset);
+ else
+ BIOSLOG(bios, "0x%04X: skipping following commands\n", offset);
+
+ return 3;
+}
+
+static int
+init_op_3b(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_3B opcode: 0x3B ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): crtc index
+ *
+ */
+
+ uint8_t or = ffs(bios->display.output->or) - 1;
+ uint8_t index = bios->data[offset + 1];
+ uint8_t data;
+
+ if (!iexec->execute)
+ return 2;
+
+ data = bios_idxprt_rd(bios, 0x3d4, index);
+ bios_idxprt_wr(bios, 0x3d4, index, data & ~(1 << or));
+ return 2;
+}
+
+static int
+init_op_3c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_3C opcode: 0x3C ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): crtc index
+ *
+ */
+
+ uint8_t or = ffs(bios->display.output->or) - 1;
+ uint8_t index = bios->data[offset + 1];
+ uint8_t data;
+
+ if (!iexec->execute)
+ return 2;
+
+ data = bios_idxprt_rd(bios, 0x3d4, index);
+ bios_idxprt_wr(bios, 0x3d4, index, data | (1 << or));
+ return 2;
+}
+
+static int
init_idx_addr_latched(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -1170,7 +1387,7 @@ init_io_restrict_pll2(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
offset, config, count);
- return 0;
+ return -EINVAL;
}
freq = ROM32(bios->data[offset + 11 + config * 4]);
@@ -1231,12 +1448,11 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*/
uint8_t i2c_index = bios->data[offset + 1];
- uint8_t i2c_address = bios->data[offset + 2];
+ uint8_t i2c_address = bios->data[offset + 2] >> 1;
uint8_t count = bios->data[offset + 3];
- int len = 4 + count * 3;
struct nouveau_i2c_chan *chan;
- struct i2c_msg msg;
- int i;
+ int len = 4 + count * 3;
+ int ret, i;
if (!iexec->execute)
return len;
@@ -1247,35 +1463,34 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
chan = init_i2c_device_find(bios->dev, i2c_index);
if (!chan)
- return 0;
+ return -ENODEV;
for (i = 0; i < count; i++) {
- uint8_t i2c_reg = bios->data[offset + 4 + i * 3];
+ uint8_t reg = bios->data[offset + 4 + i * 3];
uint8_t mask = bios->data[offset + 5 + i * 3];
uint8_t data = bios->data[offset + 6 + i * 3];
- uint8_t value;
+ union i2c_smbus_data val;
- msg.addr = i2c_address;
- msg.flags = I2C_M_RD;
- msg.len = 1;
- msg.buf = &value;
- if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
- return 0;
+ ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
+ I2C_SMBUS_READ, reg,
+ I2C_SMBUS_BYTE_DATA, &val);
+ if (ret < 0)
+ return ret;
BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, "
"Mask: 0x%02X, Data: 0x%02X\n",
- offset, i2c_reg, value, mask, data);
+ offset, reg, val.byte, mask, data);
- value = (value & mask) | data;
+ if (!bios->execute)
+ continue;
- if (bios->execute) {
- msg.addr = i2c_address;
- msg.flags = 0;
- msg.len = 1;
- msg.buf = &value;
- if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
- return 0;
- }
+ val.byte &= mask;
+ val.byte |= data;
+ ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
+ I2C_SMBUS_WRITE, reg,
+ I2C_SMBUS_BYTE_DATA, &val);
+ if (ret < 0)
+ return ret;
}
return len;
@@ -1301,12 +1516,11 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*/
uint8_t i2c_index = bios->data[offset + 1];
- uint8_t i2c_address = bios->data[offset + 2];
+ uint8_t i2c_address = bios->data[offset + 2] >> 1;
uint8_t count = bios->data[offset + 3];
- int len = 4 + count * 2;
struct nouveau_i2c_chan *chan;
- struct i2c_msg msg;
- int i;
+ int len = 4 + count * 2;
+ int ret, i;
if (!iexec->execute)
return len;
@@ -1317,23 +1531,25 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
chan = init_i2c_device_find(bios->dev, i2c_index);
if (!chan)
- return 0;
+ return -ENODEV;
for (i = 0; i < count; i++) {
- uint8_t i2c_reg = bios->data[offset + 4 + i * 2];
- uint8_t data = bios->data[offset + 5 + i * 2];
+ uint8_t reg = bios->data[offset + 4 + i * 2];
+ union i2c_smbus_data val;
+
+ val.byte = bios->data[offset + 5 + i * 2];
BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Data: 0x%02X\n",
- offset, i2c_reg, data);
-
- if (bios->execute) {
- msg.addr = i2c_address;
- msg.flags = 0;
- msg.len = 1;
- msg.buf = &data;
- if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
- return 0;
- }
+ offset, reg, val.byte);
+
+ if (!bios->execute)
+ continue;
+
+ ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
+ I2C_SMBUS_WRITE, reg,
+ I2C_SMBUS_BYTE_DATA, &val);
+ if (ret < 0)
+ return ret;
}
return len;
@@ -1357,7 +1573,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*/
uint8_t i2c_index = bios->data[offset + 1];
- uint8_t i2c_address = bios->data[offset + 2];
+ uint8_t i2c_address = bios->data[offset + 2] >> 1;
uint8_t count = bios->data[offset + 3];
int len = 4 + count;
struct nouveau_i2c_chan *chan;
@@ -1374,7 +1590,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
chan = init_i2c_device_find(bios->dev, i2c_index);
if (!chan)
- return 0;
+ return -ENODEV;
for (i = 0; i < count; i++) {
data[i] = bios->data[offset + 4 + i];
@@ -1388,7 +1604,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
msg.len = count;
msg.buf = data;
if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
- return 0;
+ return -EIO;
}
return len;
@@ -1427,7 +1643,7 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
reg = get_tmds_index_reg(bios->dev, mlv);
if (!reg)
- return 0;
+ return -EINVAL;
bios_wr32(bios, reg,
tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE);
@@ -1471,7 +1687,7 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
reg = get_tmds_index_reg(bios->dev, mlv);
if (!reg)
- return 0;
+ return -EINVAL;
for (i = 0; i < count; i++) {
uint8_t tmdsaddr = bios->data[offset + 3 + i * 2];
@@ -1946,7 +2162,7 @@ init_configure_mem(struct nvbios *bios, uint16_t offset,
uint32_t reg, data;
if (bios->major_version > 2)
- return 0;
+ return -ENODEV;
bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd(
bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20);
@@ -2001,7 +2217,7 @@ init_configure_clk(struct nvbios *bios, uint16_t offset,
int clock;
if (bios->major_version > 2)
- return 0;
+ return -ENODEV;
clock = ROM16(bios->data[meminitoffs + 4]) * 10;
setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock);
@@ -2034,7 +2250,7 @@ init_configure_preinit(struct nvbios *bios, uint16_t offset,
uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6));
if (bios->major_version > 2)
- return 0;
+ return -ENODEV;
bios_idxprt_wr(bios, NV_CIO_CRX__COLOR,
NV_CIO_CRE_SCRATCH4__INDEX, cr3c);
@@ -2656,7 +2872,7 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Zero block length - has the M table "
"been parsed?\n", offset);
- return 0;
+ return -EINVAL;
}
strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf;
@@ -2840,14 +3056,14 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
if (!bios->display.output) {
NV_ERROR(dev, "INIT_AUXCH: no active output\n");
- return 0;
+ return -EINVAL;
}
auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
if (!auxch) {
NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n",
bios->display.output->i2c_index);
- return 0;
+ return -ENODEV;
}
if (!iexec->execute)
@@ -2860,7 +3076,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1);
if (ret) {
NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret);
- return 0;
+ return ret;
}
data &= bios->data[offset + 0];
@@ -2869,7 +3085,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1);
if (ret) {
NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret);
- return 0;
+ return ret;
}
}
@@ -2899,14 +3115,14 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
if (!bios->display.output) {
NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n");
- return 0;
+ return -EINVAL;
}
auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
if (!auxch) {
NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n",
bios->display.output->i2c_index);
- return 0;
+ return -ENODEV;
}
if (!iexec->execute)
@@ -2917,7 +3133,7 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1);
if (ret) {
NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret);
- return 0;
+ return ret;
}
}
@@ -2934,6 +3150,9 @@ static struct init_tbl_entry itbl_entry[] = {
{ "INIT_COPY" , 0x37, init_copy },
{ "INIT_NOT" , 0x38, init_not },
{ "INIT_IO_FLAG_CONDITION" , 0x39, init_io_flag_condition },
+ { "INIT_DP_CONDITION" , 0x3A, init_dp_condition },
+ { "INIT_OP_3B" , 0x3B, init_op_3b },
+ { "INIT_OP_3C" , 0x3C, init_op_3c },
{ "INIT_INDEX_ADDRESS_LATCHED" , 0x49, init_idx_addr_latched },
{ "INIT_IO_RESTRICT_PLL2" , 0x4A, init_io_restrict_pll2 },
{ "INIT_PLL2" , 0x4B, init_pll2 },
@@ -3001,7 +3220,7 @@ parse_init_table(struct nvbios *bios, unsigned int offset,
* is changed back to EXECUTE.
*/
- int count = 0, i, res;
+ int count = 0, i, ret;
uint8_t id;
/*
@@ -3016,26 +3235,33 @@ parse_init_table(struct nvbios *bios, unsigned int offset,
for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != id); i++)
;
- if (itbl_entry[i].name) {
- BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n",
- offset, itbl_entry[i].id, itbl_entry[i].name);
-
- /* execute eventual command handler */
- res = (*itbl_entry[i].handler)(bios, offset, iexec);
- if (!res)
- break;
- /*
- * Add the offset of the current command including all data
- * of that command. The offset will then be pointing on the
- * next op code.
- */
- offset += res;
- } else {
+ if (!itbl_entry[i].name) {
NV_ERROR(bios->dev,
"0x%04X: Init table command not found: "
"0x%02X\n", offset, id);
return -ENOENT;
}
+
+ BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n", offset,
+ itbl_entry[i].id, itbl_entry[i].name);
+
+ /* execute eventual command handler */
+ ret = (*itbl_entry[i].handler)(bios, offset, iexec);
+ if (ret < 0) {
+ NV_ERROR(bios->dev, "0x%04X: Failed parsing init "
+ "table opcode: %s %d\n", offset,
+ itbl_entry[i].name, ret);
+ }
+
+ if (ret <= 0)
+ break;
+
+ /*
+ * Add the offset of the current command including all data
+ * of that command. The offset will then be pointing on the
+ * next op code.
+ */
+ offset += ret;
}
if (offset >= bios->length)
@@ -4285,31 +4511,32 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
break;
}
-#if 0 /* for easy debugging */
- ErrorF("pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq);
- ErrorF("pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq);
- ErrorF("pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq);
- ErrorF("pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq);
-
- ErrorF("pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq);
- ErrorF("pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq);
- ErrorF("pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq);
- ErrorF("pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq);
-
- ErrorF("pll.vco1.min_n: %d\n", pll_lim->vco1.min_n);
- ErrorF("pll.vco1.max_n: %d\n", pll_lim->vco1.max_n);
- ErrorF("pll.vco1.min_m: %d\n", pll_lim->vco1.min_m);
- ErrorF("pll.vco1.max_m: %d\n", pll_lim->vco1.max_m);
- ErrorF("pll.vco2.min_n: %d\n", pll_lim->vco2.min_n);
- ErrorF("pll.vco2.max_n: %d\n", pll_lim->vco2.max_n);
- ErrorF("pll.vco2.min_m: %d\n", pll_lim->vco2.min_m);
- ErrorF("pll.vco2.max_m: %d\n", pll_lim->vco2.max_m);
-
- ErrorF("pll.max_log2p: %d\n", pll_lim->max_log2p);
- ErrorF("pll.log2p_bias: %d\n", pll_lim->log2p_bias);
-
- ErrorF("pll.refclk: %d\n", pll_lim->refclk);
-#endif
+ NV_DEBUG(dev, "pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq);
+ NV_DEBUG(dev, "pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq);
+ NV_DEBUG(dev, "pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq);
+ NV_DEBUG(dev, "pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq);
+ NV_DEBUG(dev, "pll.vco1.min_n: %d\n", pll_lim->vco1.min_n);
+ NV_DEBUG(dev, "pll.vco1.max_n: %d\n", pll_lim->vco1.max_n);
+ NV_DEBUG(dev, "pll.vco1.min_m: %d\n", pll_lim->vco1.min_m);
+ NV_DEBUG(dev, "pll.vco1.max_m: %d\n", pll_lim->vco1.max_m);
+ if (pll_lim->vco2.maxfreq) {
+ NV_DEBUG(dev, "pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq);
+ NV_DEBUG(dev, "pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq);
+ NV_DEBUG(dev, "pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq);
+ NV_DEBUG(dev, "pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq);
+ NV_DEBUG(dev, "pll.vco2.min_n: %d\n", pll_lim->vco2.min_n);
+ NV_DEBUG(dev, "pll.vco2.max_n: %d\n", pll_lim->vco2.max_n);
+ NV_DEBUG(dev, "pll.vco2.min_m: %d\n", pll_lim->vco2.min_m);
+ NV_DEBUG(dev, "pll.vco2.max_m: %d\n", pll_lim->vco2.max_m);
+ }
+ if (!pll_lim->max_p) {
+ NV_DEBUG(dev, "pll.max_log2p: %d\n", pll_lim->max_log2p);
+ NV_DEBUG(dev, "pll.log2p_bias: %d\n", pll_lim->log2p_bias);
+ } else {
+ NV_DEBUG(dev, "pll.min_p: %d\n", pll_lim->min_p);
+ NV_DEBUG(dev, "pll.max_p: %d\n", pll_lim->max_p);
+ }
+ NV_DEBUG(dev, "pll.refclk: %d\n", pll_lim->refclk);
return 0;
}
@@ -4953,79 +5180,6 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
return 0;
}
-static int
-read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
-{
- uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
- int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
- int recordoffset = 0, rdofs = 1, wrofs = 0;
- uint8_t port_type = 0;
-
- if (!i2ctable)
- return -EINVAL;
-
- if (dcb_version >= 0x30) {
- if (i2ctable[0] != dcb_version) /* necessary? */
- NV_WARN(dev,
- "DCB I2C table version mismatch (%02X vs %02X)\n",
- i2ctable[0], dcb_version);
- dcb_i2c_ver = i2ctable[0];
- headerlen = i2ctable[1];
- if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
- i2c_entries = i2ctable[2];
- else
- NV_WARN(dev,
- "DCB I2C table has more entries than indexable "
- "(%d entries, max %d)\n", i2ctable[2],
- DCB_MAX_NUM_I2C_ENTRIES);
- entry_len = i2ctable[3];
- /* [4] is i2c_default_indices, read in parse_dcb_table() */
- }
- /*
- * It's your own fault if you call this function on a DCB 1.1 BIOS --
- * the test below is for DCB 1.2
- */
- if (dcb_version < 0x14) {
- recordoffset = 2;
- rdofs = 0;
- wrofs = 1;
- }
-
- if (index == 0xf)
- return 0;
- if (index >= i2c_entries) {
- NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
- index, i2ctable[2]);
- return -ENOENT;
- }
- if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
- NV_ERROR(dev, "DCB I2C entry invalid\n");
- return -EINVAL;
- }
-
- if (dcb_i2c_ver >= 0x30) {
- port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
-
- /*
- * Fixup for chips using same address offset for read and
- * write.
- */
- if (port_type == 4) /* seen on C51 */
- rdofs = wrofs = 1;
- if (port_type >= 5) /* G80+ */
- rdofs = wrofs = 0;
- }
-
- if (dcb_i2c_ver >= 0x40 && port_type != 5 && port_type != 6)
- NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
-
- i2c->port_type = port_type;
- i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
- i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
-
- return 0;
-}
-
static struct dcb_gpio_entry *
new_gpio_entry(struct nvbios *bios)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index c0d7b0a3ece..adf4ec2d06c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -35,6 +35,7 @@
#define DCB_LOC_ON_CHIP 0
struct dcb_i2c_entry {
+ uint32_t entry;
uint8_t port_type;
uint8_t read, write;
struct nouveau_i2c_chan *chan;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 957d1762984..6f3c1952237 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -160,11 +160,11 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
ttm_bo_type_device, &nvbo->placement, align, 0,
false, NULL, size, nouveau_bo_del_ttm);
- nvbo->channel = NULL;
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
return ret;
}
+ nvbo->channel = NULL;
spin_lock(&dev_priv->ttm.bo_list_lock);
list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list);
@@ -225,7 +225,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
nouveau_bo_placement_set(nvbo, memtype, 0);
- ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
+ ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@@ -261,7 +261,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
- ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
+ ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@@ -391,25 +391,16 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break;
case TTM_PL_VRAM:
man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_MAPPABLE |
- TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
+ TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
-
- man->io_addr = NULL;
- man->io_offset = drm_get_resource_start(dev, 1);
- man->io_size = drm_get_resource_len(dev, 1);
- if (man->io_size > dev_priv->vram_size)
- man->io_size = dev_priv->vram_size;
-
man->gpu_offset = dev_priv->vm_vram_base;
break;
case TTM_PL_TT:
switch (dev_priv->gart_info.type) {
case NOUVEAU_GART_AGP:
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
- TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED;
man->default_caching = TTM_PL_FLAG_UNCACHED;
break;
@@ -424,10 +415,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
dev_priv->gart_info.type);
return -EINVAL;
}
-
- man->io_offset = dev_priv->gart_info.aper_base;
- man->io_size = dev_priv->gart_info.aper_size;
- man->io_addr = NULL;
man->gpu_offset = dev_priv->vm_gart_base;
break;
default:
@@ -462,7 +449,8 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
static int
nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
- struct nouveau_bo *nvbo, bool evict, bool no_wait,
+ struct nouveau_bo *nvbo, bool evict,
+ bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct nouveau_fence *fence = NULL;
@@ -473,7 +461,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
return ret;
ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
- evict, no_wait, new_mem);
+ evict, no_wait_reserve, no_wait_gpu, new_mem);
if (nvbo->channel && nvbo->channel != chan)
ret = nouveau_fence_wait(fence, NULL, false, false);
nouveau_fence_unref((void *)&fence);
@@ -497,7 +485,8 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
static int
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
- int no_wait, struct ttm_mem_reg *new_mem)
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
@@ -575,12 +564,13 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
dst_offset += (PAGE_SIZE * line_count);
}
- return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem);
+ return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
}
static int
nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait, struct ttm_mem_reg *new_mem)
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
@@ -593,7 +583,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
if (ret)
return ret;
@@ -601,11 +591,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem);
+ ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
- ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
+ ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
out:
if (tmp_mem.mm_node) {
spin_lock(&bo->bdev->glob->lru_lock);
@@ -618,7 +608,8 @@ out:
static int
nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait, struct ttm_mem_reg *new_mem)
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
@@ -631,15 +622,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
if (ret)
return ret;
- ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem);
+ ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
+ ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
if (ret)
goto out;
@@ -706,7 +697,8 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
static int
nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait, struct ttm_mem_reg *new_mem)
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -721,7 +713,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
/* Software copy if the card isn't up and running yet. */
if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
!dev_priv->channel) {
- ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
goto out;
}
@@ -735,17 +727,17 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
/* Hardware assisted copy. */
if (new_mem->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem);
+ ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
else if (old_mem->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem);
+ ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
else
- ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
+ ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
if (!ret)
goto out;
/* Fallback to software copy. */
- ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
out:
if (ret)
@@ -762,6 +754,55 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
return 0;
}
+static int
+nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+ struct drm_device *dev = dev_priv->dev;
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* System memory */
+ return 0;
+ case TTM_PL_TT:
+#if __OS_HAS_AGP
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.base = dev_priv->gart_info.aper_base;
+ mem->bus.is_iomem = true;
+ }
+#endif
+ break;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.base = drm_get_resource_start(dev, 1);
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int
+nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+{
+ return 0;
+}
+
struct ttm_bo_driver nouveau_bo_driver = {
.create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
.invalidate_caches = nouveau_bo_invalidate_caches,
@@ -774,5 +815,8 @@ struct ttm_bo_driver nouveau_bo_driver = {
.sync_obj_flush = nouveau_fence_flush,
.sync_obj_unref = nouveau_fence_unref,
.sync_obj_ref = nouveau_fence_ref,
+ .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
+ .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
+ .io_mem_free = &nouveau_ttm_io_mem_free,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 14afe1e47e5..266b0ff441a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -241,7 +241,8 @@ nouveau_connector_detect(struct drm_connector *connector)
if (nv_encoder && nv_connector->native_mode) {
unsigned status = connector_status_connected;
-#ifdef CONFIG_ACPI
+#if defined(CONFIG_ACPI_BUTTON) || \
+ (defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE))
if (!nouveau_ignorelid && !acpi_lid_open())
status = connector_status_unknown;
#endif
@@ -843,6 +844,7 @@ nouveau_connector_create(struct drm_device *dev,
switch (dcb->type) {
case DCB_CONNECTOR_VGA:
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
if (dev_priv->card_type >= NV_50) {
drm_connector_attach_property(connector,
dev->mode_config.scaling_mode_property,
@@ -854,6 +856,17 @@ nouveau_connector_create(struct drm_device *dev,
case DCB_CONNECTOR_TV_3:
nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
break;
+ case DCB_CONNECTOR_DP:
+ case DCB_CONNECTOR_eDP:
+ case DCB_CONNECTOR_HDMI_0:
+ case DCB_CONNECTOR_HDMI_1:
+ case DCB_CONNECTOR_DVI_I:
+ case DCB_CONNECTOR_DVI_D:
+ if (dev_priv->card_type >= NV_50)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ else
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ /* fall-through */
default:
nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index a251886a0ce..7933de4aff2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -33,6 +33,8 @@
#include "drmP.h"
#include "nouveau_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
static int
nouveau_debugfs_channel_info(struct seq_file *m, void *data)
{
@@ -159,6 +161,7 @@ static struct drm_info_list nouveau_debugfs_list[] = {
{ "chipset", nouveau_debugfs_chipset_info, 0, NULL },
{ "memory", nouveau_debugfs_memory_info, 0, NULL },
{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
+ { "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
};
#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index cf1c5c0a0ab..74e6b4ed12c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -34,10 +34,6 @@ static void
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
{
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
- struct drm_device *dev = drm_fb->dev;
-
- if (drm_fb->fbdev)
- nouveau_fbcon_remove(dev, drm_fb);
if (fb->nvbo)
drm_gem_object_unreference_unlocked(fb->nvbo->gem);
@@ -61,27 +57,20 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
.create_handle = nouveau_user_framebuffer_create_handle,
};
-struct drm_framebuffer *
-nouveau_framebuffer_create(struct drm_device *dev, struct nouveau_bo *nvbo,
- struct drm_mode_fb_cmd *mode_cmd)
+int
+nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
+ struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo)
{
- struct nouveau_framebuffer *fb;
int ret;
- fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
- if (!fb)
- return NULL;
-
- ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs);
+ ret = drm_framebuffer_init(dev, &nouveau_fb->base, &nouveau_framebuffer_funcs);
if (ret) {
- kfree(fb);
- return NULL;
+ return ret;
}
- drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
-
- fb->nvbo = nvbo;
- return &fb->base;
+ drm_helper_mode_fill_fb_struct(&nouveau_fb->base, mode_cmd);
+ nouveau_fb->nvbo = nvbo;
+ return 0;
}
static struct drm_framebuffer *
@@ -89,24 +78,29 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_mode_fb_cmd *mode_cmd)
{
- struct drm_framebuffer *fb;
+ struct nouveau_framebuffer *nouveau_fb;
struct drm_gem_object *gem;
+ int ret;
gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
if (!gem)
return NULL;
- fb = nouveau_framebuffer_create(dev, nouveau_gem_object(gem), mode_cmd);
- if (!fb) {
+ nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
+ if (!nouveau_fb)
+ return NULL;
+
+ ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem));
+ if (ret) {
drm_gem_object_unreference(gem);
return NULL;
}
- return fb;
+ return &nouveau_fb->base;
}
const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
.fb_create = nouveau_user_framebuffer_create,
- .fb_changed = nouveau_fbcon_probe,
+ .output_poll_changed = nouveau_fbcon_output_poll_changed,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 1de974acbc6..c6079e36669 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -153,7 +153,6 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_channel *chan;
struct drm_crtc *crtc;
- uint32_t fbdev_flags;
int ret, i;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
@@ -163,8 +162,7 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
return 0;
NV_INFO(dev, "Disabling fbcon acceleration...\n");
- fbdev_flags = dev_priv->fbdev_info->flags;
- dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_save_disable_accel(dev);
NV_INFO(dev, "Unpinning framebuffer(s)...\n");
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -230,9 +228,9 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
}
acquire_console_sem();
- fb_set_suspend(dev_priv->fbdev_info, 1);
+ nouveau_fbcon_set_suspend(dev, 1);
release_console_sem();
- dev_priv->fbdev_info->flags = fbdev_flags;
+ nouveau_fbcon_restore_accel(dev);
return 0;
out_abort:
@@ -250,14 +248,12 @@ nouveau_pci_resume(struct pci_dev *pdev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine = &dev_priv->engine;
struct drm_crtc *crtc;
- uint32_t fbdev_flags;
int ret, i;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
- fbdev_flags = dev_priv->fbdev_info->flags;
- dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_save_disable_accel(dev);
NV_INFO(dev, "We're back, enabling device...\n");
pci_set_power_state(pdev, PCI_D0);
@@ -332,13 +328,14 @@ nouveau_pci_resume(struct pci_dev *pdev)
}
acquire_console_sem();
- fb_set_suspend(dev_priv->fbdev_info, 0);
+ nouveau_fbcon_set_suspend(dev, 0);
release_console_sem();
- nouveau_fbcon_zfill(dev);
+ nouveau_fbcon_zfill_all(dev);
drm_helper_resume_force_mode(dev);
- dev_priv->fbdev_info->flags = fbdev_flags;
+
+ nouveau_fbcon_restore_accel(dev);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index ace630aa89e..5b134438eff 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -535,6 +535,7 @@ struct drm_nouveau_private {
struct fb_info *fbdev_info;
+ int fifo_alloc_count;
struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
struct nouveau_engine engine;
@@ -621,6 +622,9 @@ struct drm_nouveau_private {
struct {
struct dentry *channel_root;
} debugfs;
+
+ struct nouveau_fbdev *nfbdev;
+ struct apertures_struct *apertures;
};
static inline struct drm_nouveau_private *
@@ -1166,6 +1170,12 @@ int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
+/* nv50_calc. */
+int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
+ int *N1, int *M1, int *N2, int *M2, int *P);
+int nv50_calc_pll2(struct drm_device *, struct pll_lims *,
+ int clk, int *N, int *fN, int *M, int *P);
+
#ifndef ioread32_native
#ifdef __BIG_ENDIAN
#define ioread16_native ioread16be
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 9f28b94e479..e1df8209cd0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -48,6 +48,8 @@ struct nouveau_encoder {
union {
struct {
int mc_unknown;
+ uint32_t unk0;
+ uint32_t unk1;
int dpcd_version;
int link_nr;
int link_bw;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
index 4a3f31aa194..d432134b71e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fb.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
@@ -40,8 +40,6 @@ nouveau_framebuffer(struct drm_framebuffer *fb)
extern const struct drm_mode_config_funcs nouveau_mode_config_funcs;
-struct drm_framebuffer *
-nouveau_framebuffer_create(struct drm_device *, struct nouveau_bo *,
- struct drm_mode_fb_cmd *);
-
+int nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
+ struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo);
#endif /* __NOUVEAU_FB_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 8e7dc1d4912..fd4a2df715e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -52,8 +52,8 @@
static int
nouveau_fbcon_sync(struct fb_info *info)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
int ret, i;
@@ -97,7 +97,6 @@ static struct fb_ops nouveau_fbcon_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@@ -111,7 +110,6 @@ static struct fb_ops nv04_fbcon_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = nv04_fbcon_fillrect,
.fb_copyarea = nv04_fbcon_copyarea,
.fb_imageblit = nv04_fbcon_imageblit,
@@ -125,7 +123,6 @@ static struct fb_ops nv50_fbcon_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = nv50_fbcon_fillrect,
.fb_copyarea = nv50_fbcon_copyarea,
.fb_imageblit = nv50_fbcon_imageblit,
@@ -155,54 +152,10 @@ static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
*blue = nv_crtc->lut.b[regno];
}
-static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
- .gamma_set = nouveau_fbcon_gamma_set,
- .gamma_get = nouveau_fbcon_gamma_get
-};
-
-#if defined(__i386__) || defined(__x86_64__)
-static bool
-nouveau_fbcon_has_vesafb_or_efifb(struct drm_device *dev)
-{
- struct pci_dev *pdev = dev->pdev;
- int ramin;
-
- if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB &&
- screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
- return false;
-
- if (screen_info.lfb_base < pci_resource_start(pdev, 1))
- goto not_fb;
-
- if (screen_info.lfb_base + screen_info.lfb_size >=
- pci_resource_start(pdev, 1) + pci_resource_len(pdev, 1))
- goto not_fb;
-
- return true;
-not_fb:
- ramin = 2;
- if (pci_resource_len(pdev, ramin) == 0) {
- ramin = 3;
- if (pci_resource_len(pdev, ramin) == 0)
- return false;
- }
-
- if (screen_info.lfb_base < pci_resource_start(pdev, ramin))
- return false;
-
- if (screen_info.lfb_base + screen_info.lfb_size >=
- pci_resource_start(pdev, ramin) + pci_resource_len(pdev, ramin))
- return false;
-
- return true;
-}
-#endif
-
-void
-nouveau_fbcon_zfill(struct drm_device *dev)
+static void
+nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct fb_info *info = dev_priv->fbdev_info;
+ struct fb_info *info = nfbdev->helper.fbdev;
struct fb_fillrect rect;
/* Clear the entire fbcon. The drm will program every connector
@@ -218,28 +171,27 @@ nouveau_fbcon_zfill(struct drm_device *dev)
}
static int
-nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
- uint32_t fb_height, uint32_t surface_width,
- uint32_t surface_height, uint32_t surface_depth,
- uint32_t surface_bpp, struct drm_framebuffer **pfb)
+nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
+ struct drm_fb_helper_surface_size *sizes)
{
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct fb_info *info;
- struct nouveau_fbcon_par *par;
struct drm_framebuffer *fb;
struct nouveau_framebuffer *nouveau_fb;
struct nouveau_bo *nvbo;
struct drm_mode_fb_cmd mode_cmd;
- struct device *device = &dev->pdev->dev;
+ struct pci_dev *pdev = dev->pdev;
+ struct device *device = &pdev->dev;
int size, ret;
- mode_cmd.width = surface_width;
- mode_cmd.height = surface_height;
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
- mode_cmd.bpp = surface_bpp;
+ mode_cmd.bpp = sizes->surface_bpp;
mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
- mode_cmd.depth = surface_depth;
+ mode_cmd.depth = sizes->surface_depth;
size = mode_cmd.pitch * mode_cmd.height;
size = roundup(size, PAGE_SIZE);
@@ -268,31 +220,28 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
mutex_lock(&dev->struct_mutex);
- fb = nouveau_framebuffer_create(dev, nvbo, &mode_cmd);
- if (!fb) {
+ info = framebuffer_alloc(0, device);
+ if (!info) {
ret = -ENOMEM;
- NV_ERROR(dev, "failed to allocate fb.\n");
goto out_unref;
}
- list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
-
- nouveau_fb = nouveau_framebuffer(fb);
- *pfb = fb;
-
- info = framebuffer_alloc(sizeof(struct nouveau_fbcon_par), device);
- if (!info) {
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
ret = -ENOMEM;
goto out_unref;
}
- par = info->par;
- par->helper.funcs = &nouveau_fbcon_helper_funcs;
- par->helper.dev = dev;
- ret = drm_fb_helper_init_crtc_count(&par->helper, 2, 4);
- if (ret)
- goto out_unref;
- dev_priv->fbdev_info = info;
+ info->par = nfbdev;
+
+ nouveau_framebuffer_init(dev, &nfbdev->nouveau_fb, &mode_cmd, nvbo);
+
+ nouveau_fb = &nfbdev->nouveau_fb;
+ fb = &nouveau_fb->base;
+
+ /* setup helper */
+ nfbdev->helper.fb = fb;
+ nfbdev->helper.fbdev = info;
strcpy(info->fix.id, "nouveaufb");
if (nouveau_nofbaccel)
@@ -310,31 +259,17 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
info->screen_size = size;
drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
- drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
+ drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
/* FIXME: we really shouldn't expose mmio space at all */
- info->fix.mmio_start = pci_resource_start(dev->pdev, 1);
- info->fix.mmio_len = pci_resource_len(dev->pdev, 1);
+ info->fix.mmio_start = pci_resource_start(pdev, 1);
+ info->fix.mmio_len = pci_resource_len(pdev, 1);
/* Set aperture base/size for vesafb takeover */
-#if defined(__i386__) || defined(__x86_64__)
- if (nouveau_fbcon_has_vesafb_or_efifb(dev)) {
- /* Some NVIDIA VBIOS' are stupid and decide to put the
- * framebuffer in the middle of the PRAMIN BAR for
- * whatever reason. We need to know the exact lfb_base
- * to get vesafb kicked off, and the only reliable way
- * we have left is to find out lfb_base the same way
- * vesafb did.
- */
- info->aperture_base = screen_info.lfb_base;
- info->aperture_size = screen_info.lfb_size;
- if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB)
- info->aperture_size *= 65536;
- } else
-#endif
- {
- info->aperture_base = info->fix.mmio_start;
- info->aperture_size = info->fix.mmio_len;
+ info->apertures = dev_priv->apertures;
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_unref;
}
info->pixmap.size = 64*1024;
@@ -343,11 +278,6 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->pixmap.scan_align = 1;
- fb->fbdev = info;
-
- par->nouveau_fb = nouveau_fb;
- par->dev = dev;
-
if (dev_priv->channel && !nouveau_nofbaccel) {
switch (dev_priv->card_type) {
case NV_50:
@@ -361,7 +291,7 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
};
}
- nouveau_fbcon_zfill(dev);
+ nouveau_fbcon_zfill(dev, nfbdev);
/* To allow resizeing without swapping buffers */
NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n",
@@ -379,44 +309,123 @@ out:
return ret;
}
-int
-nouveau_fbcon_probe(struct drm_device *dev)
+static int
+nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
{
- NV_DEBUG_KMS(dev, "\n");
+ struct nouveau_fbdev *nfbdev = (struct nouveau_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = nouveau_fbcon_create(nfbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
- return drm_fb_helper_single_fb_probe(dev, 32, nouveau_fbcon_create);
+void
+nouveau_fbcon_output_poll_changed(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ drm_fb_helper_hotplug_event(&dev_priv->nfbdev->helper);
}
int
-nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
{
- struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fb);
+ struct nouveau_framebuffer *nouveau_fb = &nfbdev->nouveau_fb;
struct fb_info *info;
- if (!fb)
- return -EINVAL;
-
- info = fb->fbdev;
- if (info) {
- struct nouveau_fbcon_par *par = info->par;
-
+ if (nfbdev->helper.fbdev) {
+ info = nfbdev->helper.fbdev;
unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ if (nouveau_fb->nvbo) {
nouveau_bo_unmap(nouveau_fb->nvbo);
drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
nouveau_fb->nvbo = NULL;
- if (par)
- drm_fb_helper_free(&par->helper);
- framebuffer_release(info);
}
-
+ drm_fb_helper_fini(&nfbdev->helper);
+ drm_framebuffer_cleanup(&nouveau_fb->base);
return 0;
}
void nouveau_fbcon_gpu_lockup(struct fb_info *info)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
info->flags |= FBINFO_HWACCEL_DISABLED;
}
+
+static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
+ .gamma_set = nouveau_fbcon_gamma_set,
+ .gamma_get = nouveau_fbcon_gamma_get,
+ .fb_probe = nouveau_fbcon_find_or_create_single,
+};
+
+
+int nouveau_fbcon_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fbdev *nfbdev;
+
+ nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
+ if (!nfbdev)
+ return -ENOMEM;
+
+ nfbdev->dev = dev;
+ dev_priv->nfbdev = nfbdev;
+ nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
+
+ drm_fb_helper_init(dev, &nfbdev->helper, 2, 4);
+ drm_fb_helper_single_add_all_connectors(&nfbdev->helper);
+ drm_fb_helper_initial_config(&nfbdev->helper, 32);
+ return 0;
+}
+
+void nouveau_fbcon_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->nfbdev)
+ return;
+
+ nouveau_fbcon_destroy(dev, dev_priv->nfbdev);
+ kfree(dev_priv->nfbdev);
+ dev_priv->nfbdev = NULL;
+}
+
+void nouveau_fbcon_save_disable_accel(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ dev_priv->nfbdev->saved_flags = dev_priv->nfbdev->helper.fbdev->flags;
+ dev_priv->nfbdev->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
+}
+
+void nouveau_fbcon_restore_accel(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ dev_priv->nfbdev->helper.fbdev->flags = dev_priv->nfbdev->saved_flags;
+}
+
+void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state);
+}
+
+void nouveau_fbcon_zfill_all(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ nouveau_fbcon_zfill(dev, dev_priv->nfbdev);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index f9c34e1a8c1..e7e12684c37 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -29,16 +29,16 @@
#include "drm_fb_helper.h"
-struct nouveau_fbcon_par {
+#include "nouveau_fb.h"
+struct nouveau_fbdev {
struct drm_fb_helper helper;
+ struct nouveau_framebuffer nouveau_fb;
+ struct list_head fbdev_list;
struct drm_device *dev;
- struct nouveau_framebuffer *nouveau_fb;
+ unsigned int saved_flags;
};
-int nouveau_fbcon_probe(struct drm_device *dev);
-int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
void nouveau_fbcon_restore(void);
-void nouveau_fbcon_zfill(struct drm_device *dev);
void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
@@ -50,5 +50,14 @@ void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
int nv50_fbcon_accel_init(struct fb_info *info);
void nouveau_fbcon_gpu_lockup(struct fb_info *info);
+
+int nouveau_fbcon_init(struct drm_device *dev);
+void nouveau_fbcon_fini(struct drm_device *dev);
+void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
+void nouveau_fbcon_zfill_all(struct drm_device *dev);
+void nouveau_fbcon_save_disable_accel(struct drm_device *dev);
+void nouveau_fbcon_restore_accel(struct drm_device *dev);
+
+void nouveau_fbcon_output_poll_changed(struct drm_device *dev);
#endif /* __NV50_FBCON_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 1bc0b38a516..69c76cf9340 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -57,6 +57,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
}
ttm_bo_unref(&bo);
+
+ drm_gem_object_release(gem);
+ kfree(gem);
}
int
@@ -382,7 +385,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
nvbo->channel = chan;
ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
- false, false);
+ false, false, false);
nvbo->channel = NULL;
if (unlikely(ret)) {
NV_ERROR(dev, "fail ttm_validate\n");
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c
index 32f0e495464..f731c5f6053 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.c
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c
@@ -68,13 +68,12 @@ nouveau_grctx_prog_load(struct drm_device *dev)
return ret;
}
- pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL);
+ pgraph->ctxprog = kmemdup(fw->data, fw->size, GFP_KERNEL);
if (!pgraph->ctxprog) {
NV_ERROR(dev, "OOM copying ctxprog\n");
release_firmware(fw);
return -ENOMEM;
}
- memcpy(pgraph->ctxprog, fw->data, fw->size);
cp = pgraph->ctxprog;
if (le32_to_cpu(cp->signature) != 0x5043564e ||
@@ -97,14 +96,13 @@ nouveau_grctx_prog_load(struct drm_device *dev)
return ret;
}
- pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
+ pgraph->ctxvals = kmemdup(fw->data, fw->size, GFP_KERNEL);
if (!pgraph->ctxvals) {
NV_ERROR(dev, "OOM copying ctxvals\n");
release_firmware(fw);
nouveau_grctx_fini(dev);
return -ENOMEM;
}
- memcpy(pgraph->ctxvals, fw->data, fw->size);
cv = (void *)pgraph->ctxvals;
if (le32_to_cpu(cv->signature) != 0x5643564e ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 88583e7bf65..316a3c7e6eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -254,16 +254,27 @@ struct nouveau_i2c_chan *
nouveau_i2c_find(struct drm_device *dev, int index)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->vbios;
+ struct dcb_i2c_entry *i2c = &dev_priv->vbios.dcb.i2c[index];
if (index >= DCB_MAX_NUM_I2C_ENTRIES)
return NULL;
- if (!bios->dcb.i2c[index].chan) {
- if (nouveau_i2c_init(dev, &bios->dcb.i2c[index], index))
- return NULL;
+ if (dev_priv->chipset >= NV_50 && (i2c->entry & 0x00000100)) {
+ uint32_t reg = 0xe500, val;
+
+ if (i2c->port_type == 6) {
+ reg += i2c->read * 0x50;
+ val = 0x2002;
+ } else {
+ reg += ((i2c->entry & 0x1e00) >> 9) * 0x50;
+ val = 0xe001;
+ }
+
+ nv_wr32(dev, reg, (nv_rd32(dev, reg) & ~0xf003) | val);
}
- return bios->dcb.i2c[index].chan;
+ if (!i2c->chan && nouveau_i2c_init(dev, i2c, index))
+ return NULL;
+ return i2c->chan;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 13e73cee4c4..53360f15606 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -1204,7 +1204,7 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *)arg;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t status, fbdev_flags = 0;
+ uint32_t status;
unsigned long flags;
status = nv_rd32(dev, NV03_PMC_INTR_0);
@@ -1213,11 +1213,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- if (dev_priv->fbdev_info) {
- fbdev_flags = dev_priv->fbdev_info->flags;
- dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
- }
-
if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
nouveau_fifo_irq_handler(dev);
status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
@@ -1247,9 +1242,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
if (status)
NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
- if (dev_priv->fbdev_info)
- dev_priv->fbdev_info->flags = fbdev_flags;
-
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
return IRQ_HANDLED;
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index aa9b310e41b..6ca80a3fe70 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -826,6 +826,7 @@
#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000
#define NV50_SOR_DP_UNK118(i,l) (0x0061c118 + (i) * 0x800 + (l) * 0x80)
#define NV50_SOR_DP_UNK120(i,l) (0x0061c120 + (i) * 0x800 + (l) * 0x80)
+#define NV50_SOR_DP_UNK128(i,l) (0x0061c128 + (i) * 0x800 + (l) * 0x80)
#define NV50_SOR_DP_UNK130(i,l) (0x0061c130 + (i) * 0x800 + (l) * 0x80)
#define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000)
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index e1710640a27..e632339c323 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -34,6 +34,7 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+#include "nouveau_fbcon.h"
#include "nv50_display.h"
static void nouveau_stub_takedown(struct drm_device *dev) {}
@@ -515,8 +516,10 @@ nouveau_card_init(struct drm_device *dev)
dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_helper_initial_config(dev);
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ nouveau_fbcon_init(dev);
+ drm_kms_helper_poll_init(dev);
+ }
return 0;
@@ -563,6 +566,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) {
+
nouveau_backlight_exit(dev);
if (dev_priv->channel) {
@@ -637,6 +641,48 @@ static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev)
#endif
}
+static struct apertures_struct *nouveau_get_apertures(struct drm_device *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+ struct apertures_struct *aper = alloc_apertures(3);
+ if (!aper)
+ return NULL;
+
+ aper->ranges[0].base = pci_resource_start(pdev, 1);
+ aper->ranges[0].size = pci_resource_len(pdev, 1);
+ aper->count = 1;
+
+ if (pci_resource_len(pdev, 2)) {
+ aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
+ aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
+ aper->count++;
+ }
+
+ if (pci_resource_len(pdev, 3)) {
+ aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
+ aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
+ aper->count++;
+ }
+
+ return aper;
+}
+
+static int nouveau_remove_conflicting_drivers(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ bool primary = false;
+ dev_priv->apertures = nouveau_get_apertures(dev);
+ if (!dev_priv->apertures)
+ return -ENOMEM;
+
+#ifdef CONFIG_X86
+ primary = dev->pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+
+ remove_conflicting_framebuffers(dev_priv->apertures, "nouveaufb", primary);
+ return 0;
+}
+
int nouveau_load(struct drm_device *dev, unsigned long flags)
{
struct drm_nouveau_private *dev_priv;
@@ -724,6 +770,12 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
dev_priv->card_type, reg0);
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ int ret = nouveau_remove_conflicting_drivers(dev);
+ if (ret)
+ return ret;
+ }
+
/* map larger RAMIN aperture on NV40 cards */
dev_priv->ramin = NULL;
if (dev_priv->card_type >= NV_40) {
@@ -794,6 +846,8 @@ int nouveau_unload(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ drm_kms_helper_poll_fini(dev);
+ nouveau_fbcon_fini(dev);
if (dev_priv->card_type >= NV_50)
nv50_display_destroy(dev);
else
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 813b25cec72..1eeac4fae73 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -30,8 +30,8 @@
void
nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
@@ -57,8 +57,8 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
void
nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
@@ -91,8 +91,8 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
void
nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
uint32_t fg;
@@ -179,8 +179,8 @@ nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
int
nv04_fbcon_accel_init(struct fb_info *info)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
const int sub = NvSubCtxSurf2D;
@@ -236,7 +236,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
if (ret)
return ret;
- ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ?
+ ret = nv04_fbcon_grobj_new(dev, dev_priv->chipset >= 0x11 ?
0x009f : 0x005f, NvImageBlit);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index e260986ea65..618355e9cdd 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -532,9 +532,82 @@ nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
return 0;
}
-static int
-nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+/*
+ * Software methods, why they are needed, and how they all work:
+ *
+ * NV04 and NV05 keep most of the state in PGRAPH context itself, but some
+ * 2d engine settings are kept inside the grobjs themselves. The grobjs are
+ * 3 words long on both. grobj format on NV04 is:
+ *
+ * word 0:
+ * - bits 0-7: class
+ * - bit 12: color key active
+ * - bit 13: clip rect active
+ * - bit 14: if set, destination surface is swizzled and taken from buffer 5
+ * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
+ * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
+ * NV03_CONTEXT_SURFACE_DST].
+ * - bits 15-17: 2d operation [aka patch config]
+ * - bit 24: patch valid [enables rendering using this object]
+ * - bit 25: surf3d valid [for tex_tri and multitex_tri only]
+ * word 1:
+ * - bits 0-1: mono format
+ * - bits 8-13: color format
+ * - bits 16-31: DMA_NOTIFY instance
+ * word 2:
+ * - bits 0-15: DMA_A instance
+ * - bits 16-31: DMA_B instance
+ *
+ * On NV05 it's:
+ *
+ * word 0:
+ * - bits 0-7: class
+ * - bit 12: color key active
+ * - bit 13: clip rect active
+ * - bit 14: if set, destination surface is swizzled and taken from buffer 5
+ * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
+ * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
+ * NV03_CONTEXT_SURFACE_DST].
+ * - bits 15-17: 2d operation [aka patch config]
+ * - bits 20-22: dither mode
+ * - bit 24: patch valid [enables rendering using this object]
+ * - bit 25: surface_dst/surface_color/surf2d/surf3d valid
+ * - bit 26: surface_src/surface_zeta valid
+ * - bit 27: pattern valid
+ * - bit 28: rop valid
+ * - bit 29: beta1 valid
+ * - bit 30: beta4 valid
+ * word 1:
+ * - bits 0-1: mono format
+ * - bits 8-13: color format
+ * - bits 16-31: DMA_NOTIFY instance
+ * word 2:
+ * - bits 0-15: DMA_A instance
+ * - bits 16-31: DMA_B instance
+ *
+ * NV05 will set/unset the relevant valid bits when you poke the relevant
+ * object-binding methods with object of the proper type, or with the NULL
+ * type. It'll only allow rendering using the grobj if all needed objects
+ * are bound. The needed set of objects depends on selected operation: for
+ * example rop object is needed by ROP_AND, but not by SRCCOPY_AND.
+ *
+ * NV04 doesn't have these methods implemented at all, and doesn't have the
+ * relevant bits in grobj. Instead, it'll allow rendering whenever bit 24
+ * is set. So we have to emulate them in software, internally keeping the
+ * same bits as NV05 does. Since grobjs are aligned to 16 bytes on nv04,
+ * but the last word isn't actually used for anything, we abuse it for this
+ * purpose.
+ *
+ * Actually, NV05 can optionally check bit 24 too, but we disable this since
+ * there's no use for it.
+ *
+ * For unknown reasons, NV04 implements surf3d binding in hardware as an
+ * exception. Also for unknown reasons, NV04 doesn't implement the clipping
+ * methods on the surf3d object, so we have to emulate them too.
+ */
+
+static void
+nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
{
struct drm_device *dev = chan->dev;
uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
@@ -542,42 +615,509 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
uint32_t tmp;
tmp = nv_ri32(dev, instance);
- tmp &= ~0x00038000;
- tmp |= ((data & 7) << 15);
+ tmp &= ~mask;
+ tmp |= value;
nv_wi32(dev, instance, tmp);
nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp);
nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
+}
+
+static void
+nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
+{
+ struct drm_device *dev = chan->dev;
+ uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
+ uint32_t tmp, ctx1;
+ int class, op, valid = 1;
+
+ ctx1 = nv_ri32(dev, instance);
+ class = ctx1 & 0xff;
+ op = (ctx1 >> 15) & 7;
+ tmp = nv_ri32(dev, instance + 0xc);
+ tmp &= ~mask;
+ tmp |= value;
+ nv_wi32(dev, instance + 0xc, tmp);
+
+ /* check for valid surf2d/surf_dst/surf_color */
+ if (!(tmp & 0x02000000))
+ valid = 0;
+ /* check for valid surf_src/surf_zeta */
+ if ((class == 0x1f || class == 0x48) && !(tmp & 0x04000000))
+ valid = 0;
+
+ switch (op) {
+ /* SRCCOPY_AND, SRCCOPY: no extra objects required */
+ case 0:
+ case 3:
+ break;
+ /* ROP_AND: requires pattern and rop */
+ case 1:
+ if (!(tmp & 0x18000000))
+ valid = 0;
+ break;
+ /* BLEND_AND: requires beta1 */
+ case 2:
+ if (!(tmp & 0x20000000))
+ valid = 0;
+ break;
+ /* SRCCOPY_PREMULT, BLEND_PREMULT: beta4 required */
+ case 4:
+ case 5:
+ if (!(tmp & 0x40000000))
+ valid = 0;
+ break;
+ }
+
+ nv04_graph_set_ctx1(chan, 0x01000000, valid << 24);
+}
+
+static int
+nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ if (data > 5)
+ return 1;
+ /* Old versions of the objects only accept first three operations. */
+ if (data > 2 && grclass < 0x40)
+ return 1;
+ nv04_graph_set_ctx1(chan, 0x00038000, data << 15);
+ /* changing operation changes set of objects needed for validation */
+ nv04_graph_set_ctx_val(chan, 0, 0);
+ return 0;
+}
+
+static int
+nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ uint32_t min = data & 0xffff, max;
+ uint32_t w = data >> 16;
+ if (min & 0x8000)
+ /* too large */
+ return 1;
+ if (w & 0x8000)
+ /* yes, it accepts negative for some reason. */
+ w |= 0xffff0000;
+ max = min + w;
+ max &= 0x3ffff;
+ nv_wr32(chan->dev, 0x40053c, min);
+ nv_wr32(chan->dev, 0x400544, max);
+ return 0;
+}
+
+static int
+nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ uint32_t min = data & 0xffff, max;
+ uint32_t w = data >> 16;
+ if (min & 0x8000)
+ /* too large */
+ return 1;
+ if (w & 0x8000)
+ /* yes, it accepts negative for some reason. */
+ w |= 0xffff0000;
+ max = min + w;
+ max &= 0x3ffff;
+ nv_wr32(chan->dev, 0x400540, min);
+ nv_wr32(chan->dev, 0x400548, max);
return 0;
}
+static int
+nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx1(chan, 0x00004000, 0);
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+ return 0;
+ case 0x42:
+ nv04_graph_set_ctx1(chan, 0x00004000, 0);
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx1(chan, 0x00004000, 0);
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+ return 0;
+ case 0x42:
+ nv04_graph_set_ctx1(chan, 0x00004000, 0);
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ return 0;
+ case 0x52:
+ nv04_graph_set_ctx1(chan, 0x00004000, 0x00004000);
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x08000000, 0);
+ return 0;
+ case 0x18:
+ nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x08000000, 0);
+ return 0;
+ case 0x44:
+ nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x10000000, 0);
+ return 0;
+ case 0x43:
+ nv04_graph_set_ctx_val(chan, 0x10000000, 0x10000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x20000000, 0);
+ return 0;
+ case 0x12:
+ nv04_graph_set_ctx_val(chan, 0x20000000, 0x20000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x40000000, 0);
+ return 0;
+ case 0x72:
+ nv04_graph_set_ctx_val(chan, 0x40000000, 0x40000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+ return 0;
+ case 0x58:
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x04000000, 0);
+ return 0;
+ case 0x59:
+ nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+ return 0;
+ case 0x5a:
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x04000000, 0);
+ return 0;
+ case 0x5b:
+ nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx1(chan, 0x2000, 0);
+ return 0;
+ case 0x19:
+ nv04_graph_set_ctx1(chan, 0x2000, 0x2000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx1(chan, 0x1000, 0);
+ return 0;
+ /* Yes, for some reason even the old versions of objects
+ * accept 0x57 and not 0x17. Consistency be damned.
+ */
+ case 0x57:
+ nv04_graph_set_ctx1(chan, 0x1000, 0x1000);
+ return 0;
+ }
+ return 1;
+}
+
static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = {
{ 0x0150, nv04_graph_mthd_set_ref },
{}
};
-static struct nouveau_pgraph_object_method nv04_graph_mthds_set_operation[] = {
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_gdirect[] = {
+ { 0x0184, nv04_graph_mthd_bind_nv01_patt },
+ { 0x0188, nv04_graph_mthd_bind_rop },
+ { 0x018c, nv04_graph_mthd_bind_beta1 },
+ { 0x0190, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_gdirect[] = {
+ { 0x0188, nv04_graph_mthd_bind_nv04_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_imageblit[] = {
+ { 0x0184, nv04_graph_mthd_bind_chroma },
+ { 0x0188, nv04_graph_mthd_bind_clip },
+ { 0x018c, nv04_graph_mthd_bind_nv01_patt },
+ { 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, nv04_graph_mthd_bind_surf_dst },
+ { 0x019c, nv04_graph_mthd_bind_surf_src },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_imageblit_ifc[] = {
+ { 0x0184, nv04_graph_mthd_bind_chroma },
+ { 0x0188, nv04_graph_mthd_bind_clip },
+ { 0x018c, nv04_graph_mthd_bind_nv04_patt },
+ { 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, nv04_graph_mthd_bind_beta4 },
+ { 0x019c, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_iifc[] = {
+ { 0x0188, nv04_graph_mthd_bind_chroma },
+ { 0x018c, nv04_graph_mthd_bind_clip },
+ { 0x0190, nv04_graph_mthd_bind_nv04_patt },
+ { 0x0194, nv04_graph_mthd_bind_rop },
+ { 0x0198, nv04_graph_mthd_bind_beta1 },
+ { 0x019c, nv04_graph_mthd_bind_beta4 },
+ { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
+ { 0x03e4, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_ifc[] = {
+ { 0x0184, nv04_graph_mthd_bind_chroma },
+ { 0x0188, nv04_graph_mthd_bind_clip },
+ { 0x018c, nv04_graph_mthd_bind_nv01_patt },
+ { 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifc[] = {
+ { 0x0184, nv04_graph_mthd_bind_chroma },
+ { 0x0188, nv04_graph_mthd_bind_nv01_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_surf_dst },
{ 0x02fc, nv04_graph_mthd_set_operation },
{},
};
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifc[] = {
+ { 0x0184, nv04_graph_mthd_bind_chroma },
+ { 0x0188, nv04_graph_mthd_bind_nv04_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifm[] = {
+ { 0x0188, nv04_graph_mthd_bind_nv01_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_surf_dst },
+ { 0x0304, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifm[] = {
+ { 0x0188, nv04_graph_mthd_bind_nv04_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf },
+ { 0x0304, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_shape[] = {
+ { 0x0184, nv04_graph_mthd_bind_clip },
+ { 0x0188, nv04_graph_mthd_bind_nv01_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_shape[] = {
+ { 0x0184, nv04_graph_mthd_bind_clip },
+ { 0x0188, nv04_graph_mthd_bind_nv04_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_tex_tri[] = {
+ { 0x0188, nv04_graph_mthd_bind_clip },
+ { 0x018c, nv04_graph_mthd_bind_surf_color },
+ { 0x0190, nv04_graph_mthd_bind_surf_zeta },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_surf3d[] = {
+ { 0x02f8, nv04_graph_mthd_surf3d_clip_h },
+ { 0x02fc, nv04_graph_mthd_surf3d_clip_v },
+ {},
+};
+
struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
- { 0x0039, false, NULL },
- { 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */
- { 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */
- { 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */
- { 0x0077, false, nv04_graph_mthds_set_operation }, /* sifm */
+ { 0x0038, false, NULL }, /* dvd subpicture */
+ { 0x0039, false, NULL }, /* m2mf */
+ { 0x004b, false, nv04_graph_mthds_nv03_gdirect }, /* nv03 gdirect */
+ { 0x004a, false, nv04_graph_mthds_nv04_gdirect }, /* nv04 gdirect */
+ { 0x001f, false, nv04_graph_mthds_nv01_imageblit }, /* nv01 imageblit */
+ { 0x005f, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 imageblit */
+ { 0x0060, false, nv04_graph_mthds_nv04_iifc }, /* nv04 iifc */
+ { 0x0064, false, NULL }, /* nv05 iifc */
+ { 0x0021, false, nv04_graph_mthds_nv01_ifc }, /* nv01 ifc */
+ { 0x0061, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 ifc */
+ { 0x0065, false, NULL }, /* nv05 ifc */
+ { 0x0036, false, nv04_graph_mthds_nv03_sifc }, /* nv03 sifc */
+ { 0x0076, false, nv04_graph_mthds_nv04_sifc }, /* nv04 sifc */
+ { 0x0066, false, NULL }, /* nv05 sifc */
+ { 0x0037, false, nv04_graph_mthds_nv03_sifm }, /* nv03 sifm */
+ { 0x0077, false, nv04_graph_mthds_nv04_sifm }, /* nv04 sifm */
{ 0x0030, false, NULL }, /* null */
{ 0x0042, false, NULL }, /* surf2d */
{ 0x0043, false, NULL }, /* rop */
{ 0x0012, false, NULL }, /* beta1 */
{ 0x0072, false, NULL }, /* beta4 */
{ 0x0019, false, NULL }, /* cliprect */
- { 0x0044, false, NULL }, /* pattern */
+ { 0x0018, false, NULL }, /* nv01 pattern */
+ { 0x0044, false, NULL }, /* nv04 pattern */
{ 0x0052, false, NULL }, /* swzsurf */
- { 0x0053, false, NULL }, /* surf3d */
+ { 0x0053, false, nv04_graph_mthds_surf3d }, /* surf3d */
+ { 0x0048, false, nv04_graph_mthds_nv03_tex_tri }, /* nv03 tex_tri */
{ 0x0054, false, NULL }, /* tex_tri */
{ 0x0055, false, NULL }, /* multitex_tri */
+ { 0x0017, false, NULL }, /* nv01 chroma */
+ { 0x0057, false, NULL }, /* nv04 chroma */
+ { 0x0058, false, NULL }, /* surf_dst */
+ { 0x0059, false, NULL }, /* surf_src */
+ { 0x005a, false, NULL }, /* surf_color */
+ { 0x005b, false, NULL }, /* surf_zeta */
+ { 0x001c, false, nv04_graph_mthds_nv01_shape }, /* nv01 line */
+ { 0x005c, false, nv04_graph_mthds_nv04_shape }, /* nv04 line */
+ { 0x001d, false, nv04_graph_mthds_nv01_shape }, /* nv01 tri */
+ { 0x005d, false, nv04_graph_mthds_nv04_shape }, /* nv04 tri */
+ { 0x001e, false, nv04_graph_mthds_nv01_shape }, /* nv01 rect */
+ { 0x005e, false, nv04_graph_mthds_nv04_shape }, /* nv04 rect */
{ 0x506e, true, nv04_graph_mthds_sw },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 0616c96e4b6..704a25d04ac 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -253,7 +253,11 @@ nv40_graph_init(struct drm_device *dev)
if (!dev_priv->engine.graph.ctxprog) {
struct nouveau_grctx ctx = {};
- uint32_t cp[256];
+ uint32_t *cp;
+
+ cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL);
+ if (!cp)
+ return -ENOMEM;
ctx.dev = dev;
ctx.mode = NOUVEAU_GRCTX_PROG;
@@ -265,6 +269,8 @@ nv40_graph_init(struct drm_device *dev)
nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
for (i = 0; i < ctx.ctxprog_len; i++)
nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
+
+ kfree(cp);
}
/* No context present currently */
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c
index 11b11c31f54..9b5c9746958 100644
--- a/drivers/gpu/drm/nouveau/nv40_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv40_grctx.c
@@ -115,11 +115,6 @@
/* TODO:
* - get vs count from 0x1540
- * - document unimplemented bits compared to nvidia
- * - nsource handling
- * - R0 & 0x0200 handling
- * - single-vs handling
- * - 400314 bit 0
*/
static int
diff --git a/drivers/gpu/drm/nouveau/nv50_calc.c b/drivers/gpu/drm/nouveau/nv50_calc.c
new file mode 100644
index 00000000000..2cdc2bfe717
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_calc.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "drm_fixed.h"
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+
+int
+nv50_calc_pll(struct drm_device *dev, struct pll_lims *pll, int clk,
+ int *N1, int *M1, int *N2, int *M2, int *P)
+{
+ struct nouveau_pll_vals pll_vals;
+ int ret;
+
+ ret = nouveau_calc_pll_mnp(dev, pll, clk, &pll_vals);
+ if (ret <= 0)
+ return ret;
+
+ *N1 = pll_vals.N1;
+ *M1 = pll_vals.M1;
+ *N2 = pll_vals.N2;
+ *M2 = pll_vals.M2;
+ *P = pll_vals.log2P;
+ return ret;
+}
+
+int
+nv50_calc_pll2(struct drm_device *dev, struct pll_lims *pll, int clk,
+ int *N, int *fN, int *M, int *P)
+{
+ fixed20_12 fb_div, a, b;
+
+ *P = pll->vco1.maxfreq / clk;
+ if (*P > pll->max_p)
+ *P = pll->max_p;
+ if (*P < pll->min_p)
+ *P = pll->min_p;
+
+ /* *M = ceil(refclk / pll->vco.max_inputfreq); */
+ a.full = dfixed_const(pll->refclk);
+ b.full = dfixed_const(pll->vco1.max_inputfreq);
+ a.full = dfixed_div(a, b);
+ a.full = dfixed_ceil(a);
+ *M = dfixed_trunc(a);
+
+ /* fb_div = (vco * *M) / refclk; */
+ fb_div.full = dfixed_const(clk * *P);
+ fb_div.full = dfixed_mul(fb_div, a);
+ a.full = dfixed_const(pll->refclk);
+ fb_div.full = dfixed_div(fb_div, a);
+
+ /* *N = floor(fb_div); */
+ a.full = dfixed_floor(fb_div);
+ *N = dfixed_trunc(fb_div);
+
+ /* *fN = (fmod(fb_div, 1.0) * 8192) - 4096; */
+ b.full = dfixed_const(8192);
+ a.full = dfixed_mul(a, b);
+ fb_div.full = dfixed_mul(fb_div, b);
+ fb_div.full = fb_div.full - a.full;
+ *fN = dfixed_trunc(fb_div) - 4096;
+ *fN &= 0xffff;
+
+ return clk;
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index cfabeb974a5..b4e4a3b05ea 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -264,32 +264,40 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
int
nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
{
- uint32_t pll_reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
- struct nouveau_pll_vals pll;
- struct pll_lims limits;
+ uint32_t reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
+ struct pll_lims pll;
uint32_t reg1, reg2;
- int ret;
+ int ret, N1, M1, N2, M2, P;
- ret = get_pll_limits(dev, pll_reg, &limits);
+ ret = get_pll_limits(dev, reg, &pll);
if (ret)
return ret;
- ret = nouveau_calc_pll_mnp(dev, &limits, pclk, &pll);
- if (ret <= 0)
- return ret;
+ if (pll.vco2.maxfreq) {
+ ret = nv50_calc_pll(dev, &pll, pclk, &N1, &M1, &N2, &M2, &P);
+ if (ret <= 0)
+ return 0;
+
+ NV_DEBUG(dev, "pclk %d out %d NM1 %d %d NM2 %d %d P %d\n",
+ pclk, ret, N1, M1, N2, M2, P);
- if (limits.vco2.maxfreq) {
- reg1 = nv_rd32(dev, pll_reg + 4) & 0xff00ff00;
- reg2 = nv_rd32(dev, pll_reg + 8) & 0x8000ff00;
- nv_wr32(dev, pll_reg, 0x10000611);
- nv_wr32(dev, pll_reg + 4, reg1 | (pll.M1 << 16) | pll.N1);
- nv_wr32(dev, pll_reg + 8,
- reg2 | (pll.log2P << 28) | (pll.M2 << 16) | pll.N2);
+ reg1 = nv_rd32(dev, reg + 4) & 0xff00ff00;
+ reg2 = nv_rd32(dev, reg + 8) & 0x8000ff00;
+ nv_wr32(dev, reg, 0x10000611);
+ nv_wr32(dev, reg + 4, reg1 | (M1 << 16) | N1);
+ nv_wr32(dev, reg + 8, reg2 | (P << 28) | (M2 << 16) | N2);
} else {
- reg1 = nv_rd32(dev, pll_reg + 4) & 0xffc00000;
- nv_wr32(dev, pll_reg, 0x50000610);
- nv_wr32(dev, pll_reg + 4, reg1 |
- (pll.log2P << 16) | (pll.M1 << 8) | pll.N1);
+ ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P);
+ if (ret <= 0)
+ return 0;
+
+ NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n",
+ pclk, ret, N1, N2, M1, P);
+
+ reg1 = nv_rd32(dev, reg + 4) & 0xffc00000;
+ nv_wr32(dev, reg, 0x50000610);
+ nv_wr32(dev, reg + 4, reg1 | (P << 16) | (M1 << 8) | N1);
+ nv_wr32(dev, reg + 8, N2);
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 649db4c1b69..580a5d10be9 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -29,6 +29,7 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_fb.h"
+#include "nouveau_fbcon.h"
#include "drm_crtc_helper.h"
static void
@@ -783,6 +784,37 @@ ack:
}
static void
+nv50_display_unk20_dp_hack(struct drm_device *dev, struct dcb_entry *dcb)
+{
+ int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1);
+ struct drm_encoder *encoder;
+ uint32_t tmp, unk0 = 0, unk1 = 0;
+
+ if (dcb->type != OUTPUT_DP)
+ return;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (nv_encoder->dcb == dcb) {
+ unk0 = nv_encoder->dp.unk0;
+ unk1 = nv_encoder->dp.unk1;
+ break;
+ }
+ }
+
+ if (unk0 || unk1) {
+ tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
+ tmp &= 0xfffffe03;
+ nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp | unk0);
+
+ tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link));
+ tmp &= 0xfef080c0;
+ nv_wr32(dev, NV50_SOR_DP_UNK128(or, link), tmp | unk1);
+ }
+}
+
+static void
nv50_display_unk20_handler(struct drm_device *dev)
{
struct dcb_entry *dcbent;
@@ -805,6 +837,8 @@ nv50_display_unk20_handler(struct drm_device *dev)
nouveau_bios_run_display_table(dev, dcbent, script, pclk);
+ nv50_display_unk20_dp_hack(dev, dcbent);
+
tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head));
tmp &= ~0x000000f;
nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head), tmp);
@@ -945,6 +979,8 @@ nv50_display_irq_hotplug_bh(struct work_struct *work)
nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054));
if (dev_priv->chipset >= 0x90)
nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074));
+
+ drm_helper_hpd_irq_event(dev);
}
void
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index a8c70e7e918..6bf025c6fc6 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -6,8 +6,8 @@
void
nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
@@ -49,8 +49,8 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
void
nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
@@ -84,8 +84,8 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
void
nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
uint32_t width, dwords, *data = (uint32_t *)image->data;
@@ -152,8 +152,8 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
int
nv50_fbcon_accel_init(struct fb_info *info)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
struct nouveau_gpuobj *eng2d = NULL;
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index 0c68698f23d..b11eaf9c5c7 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -321,18 +321,23 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
encoder->possible_clones = 0;
if (nv_encoder->dcb->type == OUTPUT_DP) {
- uint32_t mc, or = nv_encoder->or;
+ int or = nv_encoder->or, link = !(entry->dpconf.sor.link & 1);
+ uint32_t tmp;
if (dev_priv->chipset < 0x90 ||
dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0)
- mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or));
+ tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or));
else
- mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or));
+ tmp = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or));
- switch ((mc & 0x00000f00) >> 8) {
+ switch ((tmp & 0x00000f00) >> 8) {
case 8:
case 9:
- nv_encoder->dp.mc_unknown = (mc & 0x000f0000) >> 16;
+ nv_encoder->dp.mc_unknown = (tmp & 0x000f0000) >> 16;
+ tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
+ nv_encoder->dp.unk0 = tmp & 0x000001fc;
+ tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link));
+ nv_encoder->dp.unk1 = tmp & 0x010f7f3f;
break;
default:
break;
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 27e2c715be1..1bc72c3190a 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -3780,7 +3780,7 @@ typedef struct _ATOM_ASIC_SS_ASSIGNMENT
UCHAR ucReserved[2];
}ATOM_ASIC_SS_ASSIGNMENT;
-//Define ucClockIndication, SW uses the IDs below to search if the SS is requried/enabled on a clock branch/signal type.
+//Define ucClockIndication, SW uses the IDs below to search if the SS is required/enabled on a clock branch/signal type.
//SS is not required or enabled if a match is not found.
#define ASIC_INTERNAL_MEMORY_SS 1
#define ASIC_INTERNAL_ENGINE_SS 2
@@ -5742,6 +5742,9 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER
#define ATOM_PP_THERMALCONTROLLER_RV6xx 7
#define ATOM_PP_THERMALCONTROLLER_RV770 8
#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
+#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11
+#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
+#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller
typedef struct _ATOM_PPLIB_STATE
{
@@ -5749,6 +5752,26 @@ typedef struct _ATOM_PPLIB_STATE
UCHAR ucClockStateIndices[1]; // variable-sized
} ATOM_PPLIB_STATE;
+typedef struct _ATOM_PPLIB_FANTABLE
+{
+ UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same.
+ UCHAR ucTHyst; // Temperature hysteresis. Integer.
+ USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
+ USHORT usTMed; // The middle temperature where we change slopes.
+ USHORT usTHigh; // The high point above TMed for adjusting the second slope.
+ USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments).
+ USHORT usPWMMed; // The PWM value (in percent) at TMed.
+ USHORT usPWMHigh; // The PWM value at THigh.
+} ATOM_PPLIB_FANTABLE;
+
+typedef struct _ATOM_PPLIB_EXTENDEDHEADER
+{
+ USHORT usSize;
+ ULONG ulMaxEngineClock; // For Overdrive.
+ ULONG ulMaxMemoryClock; // For Overdrive.
+ // Add extra system parameters here, always adjust size to include all fields.
+} ATOM_PPLIB_EXTENDEDHEADER;
+
//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
@@ -5762,6 +5785,12 @@ typedef struct _ATOM_PPLIB_STATE
#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
+#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
+#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition.
+#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
+#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC.
+#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature.
+#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state.
typedef struct _ATOM_PPLIB_POWERPLAYTABLE
{
@@ -5797,6 +5826,21 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE
} ATOM_PPLIB_POWERPLAYTABLE;
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
+{
+ ATOM_PPLIB_POWERPLAYTABLE basicTable;
+ UCHAR ucNumCustomThermalPolicy;
+ USHORT usCustomThermalPolicyArrayOffset;
+}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
+{
+ ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
+ USHORT usFormatID; // To be used ONLY by PPGen.
+ USHORT usFanTableOffset;
+ USHORT usExtendendedHeaderOffset;
+} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
+
//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
@@ -5816,7 +5860,9 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE
#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400
#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800
#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
-// remaining 3 bits are reserved
+#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000
+#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000
+#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000
//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
@@ -5840,9 +5886,15 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE
#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
+#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
-#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
+//memory related flags
+#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000
+
+//M3 Arb //2bits, current 3 sets of parameters in total
+#define ATOM_PPLIB_M3ARB_MASK 0x00060000
+#define ATOM_PPLIB_M3ARB_SHIFT 17
// Contained in an array starting at the offset
// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
@@ -5860,6 +5912,9 @@ typedef struct _ATOM_PPLIB_NONCLOCK_INFO
// Contained in an array starting at the offset
// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
+#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
+#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
+
typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
{
USHORT usEngineClockLow;
@@ -5882,6 +5937,23 @@ typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4
#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8
#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16
+#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0).
+
+typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
+{
+ USHORT usEngineClockLow;
+ UCHAR ucEngineClockHigh;
+
+ USHORT usMemoryClockLow;
+ UCHAR ucMemoryClockHigh;
+
+ USHORT usVDDC;
+ USHORT usVDDCI;
+ USHORT usUnused;
+
+ ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
@@ -5895,7 +5967,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
UCHAR ucPadding; // For proper alignment and size.
USHORT usVDDC; // For the 780, use: None, Low, High, Variable
UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
- UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
+ UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requirement.
USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
ULONG ulFlags;
} ATOM_PPLIB_RS780_CLOCK_INFO;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a87990b3ae8..f3f2827017e 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -26,7 +26,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
-#include "radeon_fixed.h"
+#include <drm/drm_fixed.h>
#include "radeon.h"
#include "atom.h"
#include "atom-bits.h"
@@ -245,25 +245,27 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
+ radeon_crtc->enabled = true;
+ /* adjust pm to dpms changes BEFORE enabling crtcs */
+ radeon_pm_compute_clocks(rdev);
atombios_enable_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
atombios_blank_crtc(crtc, ATOM_DISABLE);
- /* XXX re-enable when interrupt support is added */
- if (!ASIC_IS_DCE4(rdev))
- drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+ drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- /* XXX re-enable when interrupt support is added */
- if (!ASIC_IS_DCE4(rdev))
- drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
+ drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
atombios_blank_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_DISABLE);
+ radeon_crtc->enabled = false;
+ /* adjust pm to dpms changes AFTER disabling crtcs */
+ radeon_pm_compute_clocks(rdev);
break;
}
}
@@ -705,6 +707,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
break;
case ATOM_DCPLL:
case ATOM_PPLL_INVALID:
+ default:
pll = &rdev->clock.dcpll;
break;
}
@@ -1160,6 +1163,12 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ /* adjust pm to upcoming mode change */
+ radeon_pm_compute_clocks(rdev);
+
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
return true;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 28b31c64f48..abffb1499e2 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -351,7 +351,7 @@ retry:
args.v1.ucChannelID = chan->rec.i2c_id;
args.v1.ucDelay = delay / 10;
if (ASIC_IS_DCE4(rdev))
- args.v2.ucHPD_ID = chan->rec.hpd_id;
+ args.v2.ucHPD_ID = chan->rec.hpd;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e8f447e2050..8c8e4d3cbaa 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -28,39 +28,235 @@
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_drm.h"
-#include "rv770d.h"
+#include "evergreend.h"
#include "atom.h"
#include "avivod.h"
#include "evergreen_reg.h"
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+
static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
+void evergreen_pm_misc(struct radeon_device *rdev)
+{
+
+}
+
+void evergreen_pm_prepare(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* disable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
+ tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+ }
+ }
+}
+
+void evergreen_pm_finish(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* enable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
+ tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+ }
+ }
+}
+
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
bool connected = false;
- /* XXX */
+
+ switch (hpd) {
+ case RADEON_HPD_1:
+ if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_2:
+ if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_3:
+ if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_4:
+ if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_5:
+ if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_6:
+ if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ default:
+ break;
+ }
+
return connected;
}
void evergreen_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd)
{
- /* XXX */
+ u32 tmp;
+ bool connected = evergreen_hpd_sense(rdev, hpd);
+
+ switch (hpd) {
+ case RADEON_HPD_1:
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_2:
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_3:
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_4:
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_5:
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_6:
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
}
void evergreen_hpd_init(struct radeon_device *rdev)
{
- /* XXX */
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+ u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
+ DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HPD1_CONTROL, tmp);
+ rdev->irq.hpd[0] = true;
+ break;
+ case RADEON_HPD_2:
+ WREG32(DC_HPD2_CONTROL, tmp);
+ rdev->irq.hpd[1] = true;
+ break;
+ case RADEON_HPD_3:
+ WREG32(DC_HPD3_CONTROL, tmp);
+ rdev->irq.hpd[2] = true;
+ break;
+ case RADEON_HPD_4:
+ WREG32(DC_HPD4_CONTROL, tmp);
+ rdev->irq.hpd[3] = true;
+ break;
+ case RADEON_HPD_5:
+ WREG32(DC_HPD5_CONTROL, tmp);
+ rdev->irq.hpd[4] = true;
+ break;
+ case RADEON_HPD_6:
+ WREG32(DC_HPD6_CONTROL, tmp);
+ rdev->irq.hpd[5] = true;
+ break;
+ default:
+ break;
+ }
+ }
+ if (rdev->irq.installed)
+ evergreen_irq_set(rdev);
}
-
-void evergreen_bandwidth_update(struct radeon_device *rdev)
+void evergreen_hpd_fini(struct radeon_device *rdev)
{
- /* XXX */
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HPD1_CONTROL, 0);
+ rdev->irq.hpd[0] = false;
+ break;
+ case RADEON_HPD_2:
+ WREG32(DC_HPD2_CONTROL, 0);
+ rdev->irq.hpd[1] = false;
+ break;
+ case RADEON_HPD_3:
+ WREG32(DC_HPD3_CONTROL, 0);
+ rdev->irq.hpd[2] = false;
+ break;
+ case RADEON_HPD_4:
+ WREG32(DC_HPD4_CONTROL, 0);
+ rdev->irq.hpd[3] = false;
+ break;
+ case RADEON_HPD_5:
+ WREG32(DC_HPD5_CONTROL, 0);
+ rdev->irq.hpd[4] = false;
+ break;
+ case RADEON_HPD_6:
+ WREG32(DC_HPD6_CONTROL, 0);
+ rdev->irq.hpd[5] = false;
+ break;
+ default:
+ break;
+ }
+ }
}
-void evergreen_hpd_fini(struct radeon_device *rdev)
+void evergreen_bandwidth_update(struct radeon_device *rdev)
{
/* XXX */
}
@@ -83,10 +279,31 @@ static int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
/*
* GART
*/
+void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+ unsigned i;
+ u32 tmp;
+
+ WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
+ tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
+ if (tmp == 2) {
+ printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
+ return;
+ }
+ if (tmp) {
+ return;
+ }
+ udelay(1);
+ }
+}
+
int evergreen_pcie_gart_enable(struct radeon_device *rdev)
{
u32 tmp;
- int r, i;
+ int r;
if (rdev->gart.table.vram.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
@@ -121,10 +338,9 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(rdev->dummy_page.addr >> 12));
- for (i = 1; i < 7; i++)
- WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+ WREG32(VM_CONTEXT1_CNTL, 0);
- r600_pcie_gart_tlb_flush(rdev);
+ evergreen_pcie_gart_tlb_flush(rdev);
rdev->gart.ready = true;
return 0;
}
@@ -132,11 +348,11 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
void evergreen_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
- int i, r;
+ int r;
/* Disable all tables */
- for (i = 0; i < 7; i++)
- WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+ WREG32(VM_CONTEXT0_CNTL, 0);
+ WREG32(VM_CONTEXT1_CNTL, 0);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
@@ -173,7 +389,6 @@ void evergreen_pcie_gart_fini(struct radeon_device *rdev)
void evergreen_agp_enable(struct radeon_device *rdev)
{
u32 tmp;
- int i;
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
@@ -193,8 +408,8 @@ void evergreen_agp_enable(struct radeon_device *rdev)
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
- for (i = 0; i < 7; i++)
- WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+ WREG32(VM_CONTEXT0_CNTL, 0);
+ WREG32(VM_CONTEXT1_CNTL, 0);
}
static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -400,40 +615,656 @@ static void evergreen_mc_program(struct radeon_device *rdev)
rv515_vga_render_disable(rdev);
}
-#if 0
/*
* CP.
*/
-static void evergreen_cp_stop(struct radeon_device *rdev)
-{
- /* XXX */
-}
-
static int evergreen_cp_load_microcode(struct radeon_device *rdev)
{
- /* XXX */
+ const __be32 *fw_data;
+ int i;
+ if (!rdev->me_fw || !rdev->pfp_fw)
+ return -EINVAL;
+
+ r700_cp_stop(rdev);
+ WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
+
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
+ WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
+ WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ WREG32(CP_ME_RAM_WADDR, 0);
+ WREG32(CP_ME_RAM_RADDR, 0);
return 0;
}
+int evergreen_cp_resume(struct radeon_device *rdev)
+{
+ u32 tmp;
+ u32 rb_bufsz;
+ int r;
+
+ /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
+ WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
+ SOFT_RESET_PA |
+ SOFT_RESET_SH |
+ SOFT_RESET_VGT |
+ SOFT_RESET_SX));
+ RREG32(GRBM_SOFT_RESET);
+ mdelay(15);
+ WREG32(GRBM_SOFT_RESET, 0);
+ RREG32(GRBM_SOFT_RESET);
+
+ /* Set ring buffer size */
+ rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+ tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+ tmp |= BUF_SWAP_32BIT;
+#endif
+ WREG32(CP_RB_CNTL, tmp);
+ WREG32(CP_SEM_WAIT_TIMER, 0x4);
+
+ /* Set the write pointer delay */
+ WREG32(CP_RB_WPTR_DELAY, 0);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
+ WREG32(CP_RB_RPTR_WR, 0);
+ WREG32(CP_RB_WPTR, 0);
+ WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
+ WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
+ mdelay(1);
+ WREG32(CP_RB_CNTL, tmp);
+
+ WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
+ WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
+
+ rdev->cp.rptr = RREG32(CP_RB_RPTR);
+ rdev->cp.wptr = RREG32(CP_RB_WPTR);
+
+ r600_cp_start(rdev);
+ rdev->cp.ready = true;
+ r = radeon_ring_test(rdev);
+ if (r) {
+ rdev->cp.ready = false;
+ return r;
+ }
+ return 0;
+}
/*
* Core functions
*/
-static u32 evergreen_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
+static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
+ u32 num_tile_pipes,
u32 num_backends,
u32 backend_disable_mask)
{
u32 backend_map = 0;
+ u32 enabled_backends_mask = 0;
+ u32 enabled_backends_count = 0;
+ u32 cur_pipe;
+ u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
+ u32 cur_backend = 0;
+ u32 i;
+ bool force_no_swizzle;
+
+ if (num_tile_pipes > EVERGREEN_MAX_PIPES)
+ num_tile_pipes = EVERGREEN_MAX_PIPES;
+ if (num_tile_pipes < 1)
+ num_tile_pipes = 1;
+ if (num_backends > EVERGREEN_MAX_BACKENDS)
+ num_backends = EVERGREEN_MAX_BACKENDS;
+ if (num_backends < 1)
+ num_backends = 1;
+
+ for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
+ if (((backend_disable_mask >> i) & 1) == 0) {
+ enabled_backends_mask |= (1 << i);
+ ++enabled_backends_count;
+ }
+ if (enabled_backends_count == num_backends)
+ break;
+ }
+
+ if (enabled_backends_count == 0) {
+ enabled_backends_mask = 1;
+ enabled_backends_count = 1;
+ }
+
+ if (enabled_backends_count != num_backends)
+ num_backends = enabled_backends_count;
+
+ memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
+ switch (rdev->family) {
+ case CHIP_CEDAR:
+ case CHIP_REDWOOD:
+ force_no_swizzle = false;
+ break;
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ case CHIP_JUNIPER:
+ default:
+ force_no_swizzle = true;
+ break;
+ }
+ if (force_no_swizzle) {
+ bool last_backend_enabled = false;
+
+ force_no_swizzle = false;
+ for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
+ if (((enabled_backends_mask >> i) & 1) == 1) {
+ if (last_backend_enabled)
+ force_no_swizzle = true;
+ last_backend_enabled = true;
+ } else
+ last_backend_enabled = false;
+ }
+ }
+
+ switch (num_tile_pipes) {
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ DRM_ERROR("odd number of pipes!\n");
+ break;
+ case 2:
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ break;
+ case 4:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 1;
+ swizzle_pipe[3] = 3;
+ }
+ break;
+ case 6:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 1;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 5;
+ }
+ break;
+ case 8:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ swizzle_pipe[6] = 6;
+ swizzle_pipe[7] = 7;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 6;
+ swizzle_pipe[4] = 1;
+ swizzle_pipe[5] = 3;
+ swizzle_pipe[6] = 5;
+ swizzle_pipe[7] = 7;
+ }
+ break;
+ }
+
+ for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
+ while (((1 << cur_backend) & enabled_backends_mask) == 0)
+ cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
+
+ backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
+
+ cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
+ }
return backend_map;
}
-#endif
static void evergreen_gpu_init(struct radeon_device *rdev)
{
- /* XXX */
+ u32 cc_rb_backend_disable = 0;
+ u32 cc_gc_shader_pipe_config;
+ u32 gb_addr_config = 0;
+ u32 mc_shared_chmap, mc_arb_ramcfg;
+ u32 gb_backend_map;
+ u32 grbm_gfx_index;
+ u32 sx_debug_1;
+ u32 smx_dc_ctl0;
+ u32 sq_config;
+ u32 sq_lds_resource_mgmt;
+ u32 sq_gpr_resource_mgmt_1;
+ u32 sq_gpr_resource_mgmt_2;
+ u32 sq_gpr_resource_mgmt_3;
+ u32 sq_thread_resource_mgmt;
+ u32 sq_thread_resource_mgmt_2;
+ u32 sq_stack_resource_mgmt_1;
+ u32 sq_stack_resource_mgmt_2;
+ u32 sq_stack_resource_mgmt_3;
+ u32 vgt_cache_invalidation;
+ u32 hdp_host_path_cntl;
+ int i, j, num_shader_engines, ps_thread_count;
+
+ switch (rdev->family) {
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ rdev->config.evergreen.num_ses = 2;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 8;
+ rdev->config.evergreen.max_simds = 10;
+ rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 512;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ case CHIP_JUNIPER:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 4;
+ rdev->config.evergreen.max_simds = 10;
+ rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 512;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ case CHIP_REDWOOD:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 4;
+ rdev->config.evergreen.max_simds = 5;
+ rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 256;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ case CHIP_CEDAR:
+ default:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 2;
+ rdev->config.evergreen.max_tile_pipes = 2;
+ rdev->config.evergreen.max_simds = 2;
+ rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 192;
+ rdev->config.evergreen.max_gs_threads = 16;
+ rdev->config.evergreen.max_stack_entries = 256;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 128;
+ rdev->config.evergreen.sx_max_export_pos_size = 32;
+ rdev->config.evergreen.sx_max_export_smx_size = 96;
+ rdev->config.evergreen.max_hw_contexts = 4;
+ rdev->config.evergreen.sq_num_cf_insts = 1;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ }
+
+ /* Initialize HDP */
+ for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+ WREG32((0x2c14 + j), 0x00000000);
+ WREG32((0x2c18 + j), 0x00000000);
+ WREG32((0x2c1c + j), 0x00000000);
+ WREG32((0x2c20 + j), 0x00000000);
+ WREG32((0x2c24 + j), 0x00000000);
+ }
+
+ WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+
+ cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
+
+ cc_gc_shader_pipe_config |=
+ INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
+ & EVERGREEN_MAX_PIPES_MASK);
+ cc_gc_shader_pipe_config |=
+ INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
+ & EVERGREEN_MAX_SIMDS_MASK);
+
+ cc_rb_backend_disable =
+ BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
+ & EVERGREEN_MAX_BACKENDS_MASK);
+
+
+ mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+ mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+ switch (rdev->config.evergreen.max_tile_pipes) {
+ case 1:
+ default:
+ gb_addr_config |= NUM_PIPES(0);
+ break;
+ case 2:
+ gb_addr_config |= NUM_PIPES(1);
+ break;
+ case 4:
+ gb_addr_config |= NUM_PIPES(2);
+ break;
+ case 8:
+ gb_addr_config |= NUM_PIPES(3);
+ break;
+ }
+
+ gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
+ gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
+ gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
+ gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
+ gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
+ gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
+
+ if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
+ gb_addr_config |= ROW_SIZE(2);
+ else
+ gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
+
+ if (rdev->ddev->pdev->device == 0x689e) {
+ u32 efuse_straps_4;
+ u32 efuse_straps_3;
+ u8 efuse_box_bit_131_124;
+
+ WREG32(RCU_IND_INDEX, 0x204);
+ efuse_straps_4 = RREG32(RCU_IND_DATA);
+ WREG32(RCU_IND_INDEX, 0x203);
+ efuse_straps_3 = RREG32(RCU_IND_DATA);
+ efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
+
+ switch(efuse_box_bit_131_124) {
+ case 0x00:
+ gb_backend_map = 0x76543210;
+ break;
+ case 0x55:
+ gb_backend_map = 0x77553311;
+ break;
+ case 0x56:
+ gb_backend_map = 0x77553300;
+ break;
+ case 0x59:
+ gb_backend_map = 0x77552211;
+ break;
+ case 0x66:
+ gb_backend_map = 0x77443300;
+ break;
+ case 0x99:
+ gb_backend_map = 0x66552211;
+ break;
+ case 0x5a:
+ gb_backend_map = 0x77552200;
+ break;
+ case 0xaa:
+ gb_backend_map = 0x66442200;
+ break;
+ case 0x95:
+ gb_backend_map = 0x66553311;
+ break;
+ default:
+ DRM_ERROR("bad backend map, using default\n");
+ gb_backend_map =
+ evergreen_get_tile_pipe_to_backend_map(rdev,
+ rdev->config.evergreen.max_tile_pipes,
+ rdev->config.evergreen.max_backends,
+ ((EVERGREEN_MAX_BACKENDS_MASK <<
+ rdev->config.evergreen.max_backends) &
+ EVERGREEN_MAX_BACKENDS_MASK));
+ break;
+ }
+ } else if (rdev->ddev->pdev->device == 0x68b9) {
+ u32 efuse_straps_3;
+ u8 efuse_box_bit_127_124;
+
+ WREG32(RCU_IND_INDEX, 0x203);
+ efuse_straps_3 = RREG32(RCU_IND_DATA);
+ efuse_box_bit_127_124 = (u8)(efuse_straps_3 & 0xF0000000) >> 28;
+
+ switch(efuse_box_bit_127_124) {
+ case 0x0:
+ gb_backend_map = 0x00003210;
+ break;
+ case 0x5:
+ case 0x6:
+ case 0x9:
+ case 0xa:
+ gb_backend_map = 0x00003311;
+ break;
+ default:
+ DRM_ERROR("bad backend map, using default\n");
+ gb_backend_map =
+ evergreen_get_tile_pipe_to_backend_map(rdev,
+ rdev->config.evergreen.max_tile_pipes,
+ rdev->config.evergreen.max_backends,
+ ((EVERGREEN_MAX_BACKENDS_MASK <<
+ rdev->config.evergreen.max_backends) &
+ EVERGREEN_MAX_BACKENDS_MASK));
+ break;
+ }
+ } else
+ gb_backend_map =
+ evergreen_get_tile_pipe_to_backend_map(rdev,
+ rdev->config.evergreen.max_tile_pipes,
+ rdev->config.evergreen.max_backends,
+ ((EVERGREEN_MAX_BACKENDS_MASK <<
+ rdev->config.evergreen.max_backends) &
+ EVERGREEN_MAX_BACKENDS_MASK));
+
+ WREG32(GB_BACKEND_MAP, gb_backend_map);
+ WREG32(GB_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+ WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+
+ num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
+ grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
+
+ for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
+ u32 rb = cc_rb_backend_disable | (0xf0 << 16);
+ u32 sp = cc_gc_shader_pipe_config;
+ u32 gfx = grbm_gfx_index | SE_INDEX(i);
+
+ if (i == num_shader_engines) {
+ rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK);
+ sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK);
+ }
+
+ WREG32(GRBM_GFX_INDEX, gfx);
+ WREG32(RLC_GFX_INDEX, gfx);
+
+ WREG32(CC_RB_BACKEND_DISABLE, rb);
+ WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
+ WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
+ WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
+ }
+
+ grbm_gfx_index |= SE_BROADCAST_WRITES;
+ WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
+ WREG32(RLC_GFX_INDEX, grbm_gfx_index);
+
+ WREG32(CGTS_SYS_TCC_DISABLE, 0);
+ WREG32(CGTS_TCC_DISABLE, 0);
+ WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
+ WREG32(CGTS_USER_TCC_DISABLE, 0);
+
+ /* set HW defaults for 3D engine */
+ WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
+ ROQ_IB2_START(0x2b)));
+
+ WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
+
+ WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
+ SYNC_GRADIENT |
+ SYNC_WALKER |
+ SYNC_ALIGNER));
+
+ sx_debug_1 = RREG32(SX_DEBUG_1);
+ sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
+ WREG32(SX_DEBUG_1, sx_debug_1);
+
+
+ smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
+ smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
+ smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
+ WREG32(SMX_DC_CTL0, smx_dc_ctl0);
+
+ WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
+ POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
+ SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
+
+ WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
+ SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
+ SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
+
+ WREG32(VGT_NUM_INSTANCES, 1);
+ WREG32(SPI_CONFIG_CNTL, 0);
+ WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
+ WREG32(CP_PERFMON_CNTL, 0);
+
+ WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
+ FETCH_FIFO_HIWATER(0x4) |
+ DONE_FIFO_HIWATER(0xe0) |
+ ALU_UPDATE_FIFO_HIWATER(0x8)));
+
+ sq_config = RREG32(SQ_CONFIG);
+ sq_config &= ~(PS_PRIO(3) |
+ VS_PRIO(3) |
+ GS_PRIO(3) |
+ ES_PRIO(3));
+ sq_config |= (VC_ENABLE |
+ EXPORT_SRC_C |
+ PS_PRIO(0) |
+ VS_PRIO(1) |
+ GS_PRIO(2) |
+ ES_PRIO(3));
+
+ if (rdev->family == CHIP_CEDAR)
+ /* no vertex cache */
+ sq_config &= ~VC_ENABLE;
+
+ sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
+
+ sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
+ sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
+ sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
+ sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
+ sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
+ sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
+ sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
+
+ if (rdev->family == CHIP_CEDAR)
+ ps_thread_count = 96;
+ else
+ ps_thread_count = 128;
+
+ sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
+ sq_thread_resource_mgmt |= NUM_VS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+ sq_thread_resource_mgmt |= NUM_GS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+ sq_thread_resource_mgmt |= NUM_ES_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+ sq_thread_resource_mgmt_2 = NUM_HS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+ sq_thread_resource_mgmt_2 |= NUM_LS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+
+ sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+
+ WREG32(SQ_CONFIG, sq_config);
+ WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
+ WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
+ WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
+ WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
+ WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
+ WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
+ WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
+ WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
+ WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
+ WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
+
+ WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+ FORCE_EOV_MAX_REZ_CNT(255)));
+
+ if (rdev->family == CHIP_CEDAR)
+ vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
+ else
+ vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
+ vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
+ WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
+
+ WREG32(VGT_GS_VERTEX_REUSE, 16);
+ WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+ WREG32(CB_PERF_CTR0_SEL_0, 0);
+ WREG32(CB_PERF_CTR0_SEL_1, 0);
+ WREG32(CB_PERF_CTR1_SEL_0, 0);
+ WREG32(CB_PERF_CTR1_SEL_1, 0);
+ WREG32(CB_PERF_CTR2_SEL_0, 0);
+ WREG32(CB_PERF_CTR2_SEL_1, 0);
+ WREG32(CB_PERF_CTR3_SEL_0, 0);
+ WREG32(CB_PERF_CTR3_SEL_1, 0);
+
+ hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+ WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+ WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+
+ udelay(50);
+
}
int evergreen_mc_init(struct radeon_device *rdev)
@@ -476,26 +1307,627 @@ int evergreen_mc_init(struct radeon_device *rdev)
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
rdev->mc.visible_vram_size = rdev->mc.aper_size;
- /* FIXME remove this once we support unmappable VRAM */
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
- rdev->mc.mc_vram_size = rdev->mc.aper_size;
- rdev->mc.real_vram_size = rdev->mc.aper_size;
- }
r600_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
return 0;
}
-int evergreen_gpu_reset(struct radeon_device *rdev)
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
{
/* FIXME: implement for evergreen */
+ return false;
+}
+
+static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
+{
+ struct evergreen_mc_save save;
+ u32 srbm_reset = 0;
+ u32 grbm_reset = 0;
+
+ dev_info(rdev->dev, "GPU softreset \n");
+ dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+ RREG32(GRBM_STATUS));
+ dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ RREG32(GRBM_STATUS_SE0));
+ dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ RREG32(GRBM_STATUS_SE1));
+ dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ RREG32(SRBM_STATUS));
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+ /* Disable CP parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+
+ /* reset all the gfx blocks */
+ grbm_reset = (SOFT_RESET_CP |
+ SOFT_RESET_CB |
+ SOFT_RESET_DB |
+ SOFT_RESET_PA |
+ SOFT_RESET_SC |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SH |
+ SOFT_RESET_SX |
+ SOFT_RESET_TC |
+ SOFT_RESET_TA |
+ SOFT_RESET_VC |
+ SOFT_RESET_VGT);
+
+ dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
+ WREG32(GRBM_SOFT_RESET, grbm_reset);
+ (void)RREG32(GRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(GRBM_SOFT_RESET, 0);
+ (void)RREG32(GRBM_SOFT_RESET);
+
+ /* reset all the system blocks */
+ srbm_reset = SRBM_SOFT_RESET_ALL_MASK;
+
+ dev_info(rdev->dev, " SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
+ WREG32(SRBM_SOFT_RESET, srbm_reset);
+ (void)RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+ (void)RREG32(SRBM_SOFT_RESET);
+ /* Wait a little for things to settle down */
+ udelay(50);
+ dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+ RREG32(GRBM_STATUS));
+ dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ RREG32(GRBM_STATUS_SE0));
+ dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ RREG32(GRBM_STATUS_SE1));
+ dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ RREG32(SRBM_STATUS));
+ /* After reset we need to reinit the asic as GPU often endup in an
+ * incoherent state.
+ */
+ atom_asic_init(rdev->mode_info.atom_context);
+ evergreen_mc_resume(rdev, &save);
+ return 0;
+}
+
+int evergreen_asic_reset(struct radeon_device *rdev)
+{
+ return evergreen_gpu_soft_reset(rdev);
+}
+
+/* Interrupts */
+
+u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
+{
+ switch (crtc) {
+ case 0:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET);
+ case 1:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET);
+ case 2:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET);
+ case 3:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ case 4:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET);
+ case 5:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ default:
+ return 0;
+ }
+}
+
+void evergreen_disable_interrupt_state(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ WREG32(CP_INT_CNTL, 0);
+ WREG32(GRBM_INT_CNTL, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+
+ WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+ WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
+
+ tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+
+}
+
+int evergreen_irq_set(struct radeon_device *rdev)
+{
+ u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+ u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
+ u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
+ u32 grbm_int_cntl = 0;
+
+ if (!rdev->irq.installed) {
+ WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+ return -EINVAL;
+ }
+ /* don't enable anything if the ih is disabled */
+ if (!rdev->ih.enabled) {
+ r600_disable_interrupts(rdev);
+ /* force the active interrupt state to all disabled */
+ evergreen_disable_interrupt_state(rdev);
+ return 0;
+ }
+
+ hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+
+ if (rdev->irq.sw_int) {
+ DRM_DEBUG("evergreen_irq_set: sw int\n");
+ cp_int_cntl |= RB_INT_ENABLE;
+ }
+ if (rdev->irq.crtc_vblank_int[0]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 0\n");
+ crtc1 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[1]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 1\n");
+ crtc2 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[2]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 2\n");
+ crtc3 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[3]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 3\n");
+ crtc4 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[4]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 4\n");
+ crtc5 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[5]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 5\n");
+ crtc6 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.hpd[0]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 1\n");
+ hpd1 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[1]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 2\n");
+ hpd2 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[2]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 3\n");
+ hpd3 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[3]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 4\n");
+ hpd4 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[4]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 5\n");
+ hpd5 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[5]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 6\n");
+ hpd6 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.gui_idle) {
+ DRM_DEBUG("gui idle\n");
+ grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
+ }
+
+ WREG32(CP_INT_CNTL, cp_int_cntl);
+ WREG32(GRBM_INT_CNTL, grbm_int_cntl);
+
+ WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
+ WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
+ WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
+ WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
+ WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
+ WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
+
+ WREG32(DC_HPD1_INT_CONTROL, hpd1);
+ WREG32(DC_HPD2_INT_CONTROL, hpd2);
+ WREG32(DC_HPD3_INT_CONTROL, hpd3);
+ WREG32(DC_HPD4_INT_CONTROL, hpd4);
+ WREG32(DC_HPD5_INT_CONTROL, hpd5);
+ WREG32(DC_HPD6_INT_CONTROL, hpd6);
+
return 0;
}
+static inline void evergreen_irq_ack(struct radeon_device *rdev,
+ u32 *disp_int,
+ u32 *disp_int_cont,
+ u32 *disp_int_cont2,
+ u32 *disp_int_cont3,
+ u32 *disp_int_cont4,
+ u32 *disp_int_cont5)
+{
+ u32 tmp;
+
+ *disp_int = RREG32(DISP_INTERRUPT_STATUS);
+ *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+ *disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
+ *disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
+ *disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
+ *disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
+
+ if (*disp_int & LB_D1_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int & LB_D1_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int_cont & LB_D2_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int & DC_HPD1_INTERRUPT) {
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ }
+ if (*disp_int_cont & DC_HPD2_INTERRUPT) {
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ }
+ if (*disp_int_cont2 & DC_HPD3_INTERRUPT) {
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ }
+ if (*disp_int_cont3 & DC_HPD4_INTERRUPT) {
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ }
+ if (*disp_int_cont4 & DC_HPD5_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ }
+ if (*disp_int_cont5 & DC_HPD6_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ }
+}
+
+void evergreen_irq_disable(struct radeon_device *rdev)
+{
+ u32 disp_int, disp_int_cont, disp_int_cont2;
+ u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
+
+ r600_disable_interrupts(rdev);
+ /* Wait and acknowledge irq */
+ mdelay(1);
+ evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
+ &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
+ evergreen_disable_interrupt_state(rdev);
+}
+
+static void evergreen_irq_suspend(struct radeon_device *rdev)
+{
+ evergreen_irq_disable(rdev);
+ r600_rlc_stop(rdev);
+}
+
+static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
+{
+ u32 wptr, tmp;
+
+ /* XXX use writeback */
+ wptr = RREG32(IH_RB_WPTR);
+
+ if (wptr & RB_OVERFLOW) {
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 16). Hopefully
+ * this should allow us to catchup.
+ */
+ dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
+ wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+ rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
+ tmp = RREG32(IH_RB_CNTL);
+ tmp |= IH_WPTR_OVERFLOW_CLEAR;
+ WREG32(IH_RB_CNTL, tmp);
+ }
+ return (wptr & rdev->ih.ptr_mask);
+}
+
+int evergreen_irq_process(struct radeon_device *rdev)
+{
+ u32 wptr = evergreen_get_ih_wptr(rdev);
+ u32 rptr = rdev->ih.rptr;
+ u32 src_id, src_data;
+ u32 ring_index;
+ u32 disp_int, disp_int_cont, disp_int_cont2;
+ u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
+ unsigned long flags;
+ bool queue_hotplug = false;
+
+ DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+ if (!rdev->ih.enabled)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&rdev->ih.lock, flags);
+
+ if (rptr == wptr) {
+ spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ return IRQ_NONE;
+ }
+ if (rdev->shutdown) {
+ spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ return IRQ_NONE;
+ }
+
+restart_ih:
+ /* display interrupts */
+ evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
+ &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
+
+ rdev->ih.wptr = wptr;
+ while (rptr != wptr) {
+ /* wptr/rptr are in bytes! */
+ ring_index = rptr / 4;
+ src_id = rdev->ih.ring[ring_index] & 0xff;
+ src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
+
+ switch (src_id) {
+ case 1: /* D1 vblank/vline */
+ switch (src_data) {
+ case 0: /* D1 vblank */
+ if (disp_int & LB_D1_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 0);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
+ }
+ break;
+ case 1: /* D1 vline */
+ if (disp_int & LB_D1_VLINE_INTERRUPT) {
+ disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 2: /* D2 vblank/vline */
+ switch (src_data) {
+ case 0: /* D2 vblank */
+ if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 1);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
+ }
+ break;
+ case 1: /* D2 vline */
+ if (disp_int_cont & LB_D2_VLINE_INTERRUPT) {
+ disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 3: /* D3 vblank/vline */
+ switch (src_data) {
+ case 0: /* D3 vblank */
+ if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 2);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D3 vblank\n");
+ }
+ break;
+ case 1: /* D3 vline */
+ if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
+ disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D3 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 4: /* D4 vblank/vline */
+ switch (src_data) {
+ case 0: /* D4 vblank */
+ if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 3);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D4 vblank\n");
+ }
+ break;
+ case 1: /* D4 vline */
+ if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
+ disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D4 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 5: /* D5 vblank/vline */
+ switch (src_data) {
+ case 0: /* D5 vblank */
+ if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 4);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D5 vblank\n");
+ }
+ break;
+ case 1: /* D5 vline */
+ if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
+ disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D5 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 6: /* D6 vblank/vline */
+ switch (src_data) {
+ case 0: /* D6 vblank */
+ if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 5);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D6 vblank\n");
+ }
+ break;
+ case 1: /* D6 vline */
+ if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
+ disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D6 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 42: /* HPD hotplug */
+ switch (src_data) {
+ case 0:
+ if (disp_int & DC_HPD1_INTERRUPT) {
+ disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
+ }
+ break;
+ case 1:
+ if (disp_int_cont & DC_HPD2_INTERRUPT) {
+ disp_int_cont &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
+ }
+ break;
+ case 2:
+ if (disp_int_cont2 & DC_HPD3_INTERRUPT) {
+ disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
+ }
+ break;
+ case 3:
+ if (disp_int_cont3 & DC_HPD4_INTERRUPT) {
+ disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
+ }
+ break;
+ case 4:
+ if (disp_int_cont4 & DC_HPD5_INTERRUPT) {
+ disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
+ }
+ break;
+ case 5:
+ if (disp_int_cont5 & DC_HPD6_INTERRUPT) {
+ disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 176: /* CP_INT in ring buffer */
+ case 177: /* CP_INT in IB1 */
+ case 178: /* CP_INT in IB2 */
+ DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev);
+ break;
+ case 181: /* CP EOP event */
+ DRM_DEBUG("IH: CP EOP\n");
+ break;
+ case 233: /* GUI IDLE */
+ DRM_DEBUG("IH: CP EOP\n");
+ rdev->pm.gui_idle = true;
+ wake_up(&rdev->irq.idle_queue);
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+
+ /* wptr/rptr are in bytes! */
+ rptr += 16;
+ rptr &= rdev->ih.ptr_mask;
+ }
+ /* make sure wptr hasn't changed while processing */
+ wptr = evergreen_get_ih_wptr(rdev);
+ if (wptr != rdev->ih.wptr)
+ goto restart_ih;
+ if (queue_hotplug)
+ queue_work(rdev->wq, &rdev->hotplug_work);
+ rdev->ih.rptr = rptr;
+ WREG32(IH_RB_RPTR, rdev->ih.rptr);
+ spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ return IRQ_HANDLED;
+}
+
static int evergreen_startup(struct radeon_device *rdev)
{
-#if 0
int r;
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
@@ -505,17 +1937,15 @@ static int evergreen_startup(struct radeon_device *rdev)
return r;
}
}
-#endif
+
evergreen_mc_program(rdev);
-#if 0
if (rdev->flags & RADEON_IS_AGP) {
- evergreem_agp_enable(rdev);
+ evergreen_agp_enable(rdev);
} else {
r = evergreen_pcie_gart_enable(rdev);
if (r)
return r;
}
-#endif
evergreen_gpu_init(rdev);
#if 0
if (!rdev->r600_blit.shader_obj) {
@@ -536,6 +1966,7 @@ static int evergreen_startup(struct radeon_device *rdev)
DRM_ERROR("failed to pin blit object %d\n", r);
return r;
}
+#endif
/* Enable IRQ */
r = r600_irq_init(rdev);
@@ -544,7 +1975,7 @@ static int evergreen_startup(struct radeon_device *rdev)
radeon_irq_kms_fini(rdev);
return r;
}
- r600_irq_set(rdev);
+ evergreen_irq_set(rdev);
r = radeon_ring_init(rdev, rdev->cp.ring_size);
if (r)
@@ -552,12 +1983,12 @@ static int evergreen_startup(struct radeon_device *rdev)
r = evergreen_cp_load_microcode(rdev);
if (r)
return r;
- r = r600_cp_resume(rdev);
+ r = evergreen_cp_resume(rdev);
if (r)
return r;
/* write back buffer are not vital so don't worry about failure */
r600_wb_enable(rdev);
-#endif
+
return 0;
}
@@ -582,13 +2013,13 @@ int evergreen_resume(struct radeon_device *rdev)
DRM_ERROR("r600 startup failed on resume\n");
return r;
}
-#if 0
+
r = r600_ib_test(rdev);
if (r) {
DRM_ERROR("radeon: failled testing IB (%d).\n", r);
return r;
}
-#endif
+
return r;
}
@@ -597,12 +2028,14 @@ int evergreen_suspend(struct radeon_device *rdev)
{
#if 0
int r;
-
+#endif
/* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
rdev->cp.ready = false;
+ evergreen_irq_suspend(rdev);
r600_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev);
+#if 0
/* unpin shaders bo */
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
if (likely(r == 0)) {
@@ -682,8 +2115,6 @@ int evergreen_init(struct radeon_device *rdev)
r = radeon_clocks_init(rdev);
if (r)
return r;
- /* Initialize power management */
- radeon_pm_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
@@ -702,7 +2133,7 @@ int evergreen_init(struct radeon_device *rdev)
r = radeon_bo_init(rdev);
if (r)
return r;
-#if 0
+
r = radeon_irq_kms_init(rdev);
if (r)
return r;
@@ -716,14 +2147,16 @@ int evergreen_init(struct radeon_device *rdev)
r = r600_pcie_gart_init(rdev);
if (r)
return r;
-#endif
+
rdev->accel_working = false;
r = evergreen_startup(rdev);
if (r) {
- evergreen_suspend(rdev);
- /*r600_wb_fini(rdev);*/
- /*radeon_ring_fini(rdev);*/
- /*evergreen_pcie_gart_fini(rdev);*/
+ dev_err(rdev->dev, "disabling GPU acceleration\n");
+ r700_cp_fini(rdev);
+ r600_wb_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ evergreen_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
if (rdev->accel_working) {
@@ -743,16 +2176,12 @@ int evergreen_init(struct radeon_device *rdev)
void evergreen_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
- evergreen_suspend(rdev);
-#if 0
- r600_blit_fini(rdev);
+ /*r600_blit_fini(rdev);*/
+ r700_cp_fini(rdev);
+ r600_wb_fini(rdev);
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
- radeon_ring_fini(rdev);
- r600_wb_fini(rdev);
evergreen_pcie_gart_fini(rdev);
-#endif
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_clocks_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index f7c7c964343..af86af836f1 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -164,8 +164,12 @@
#define EVERGREEN_CRTC5_REGISTER_OFFSET (0x129f0 - 0x6df0)
/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
+#define EVERGREEN_CRTC_V_BLANK_START_END 0x6e34
#define EVERGREEN_CRTC_CONTROL 0x6e70
# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
+# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
+#define EVERGREEN_CRTC_STATUS 0x6e8c
+#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
new file mode 100644
index 00000000000..93e9e17ad54
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -0,0 +1,556 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef EVERGREEND_H
+#define EVERGREEND_H
+
+#define EVERGREEN_MAX_SH_GPRS 256
+#define EVERGREEN_MAX_TEMP_GPRS 16
+#define EVERGREEN_MAX_SH_THREADS 256
+#define EVERGREEN_MAX_SH_STACK_ENTRIES 4096
+#define EVERGREEN_MAX_FRC_EOV_CNT 16384
+#define EVERGREEN_MAX_BACKENDS 8
+#define EVERGREEN_MAX_BACKENDS_MASK 0xFF
+#define EVERGREEN_MAX_SIMDS 16
+#define EVERGREEN_MAX_SIMDS_MASK 0xFFFF
+#define EVERGREEN_MAX_PIPES 8
+#define EVERGREEN_MAX_PIPES_MASK 0xFF
+#define EVERGREEN_MAX_LDS_NUM 0xFFFF
+
+/* Registers */
+
+#define RCU_IND_INDEX 0x100
+#define RCU_IND_DATA 0x104
+
+#define GRBM_GFX_INDEX 0x802C
+#define INSTANCE_INDEX(x) ((x) << 0)
+#define SE_INDEX(x) ((x) << 16)
+#define INSTANCE_BROADCAST_WRITES (1 << 30)
+#define SE_BROADCAST_WRITES (1 << 31)
+#define RLC_GFX_INDEX 0x3fC4
+#define CC_GC_SHADER_PIPE_CONFIG 0x8950
+#define WRITE_DIS (1 << 0)
+#define CC_RB_BACKEND_DISABLE 0x98F4
+#define BACKEND_DISABLE(x) ((x) << 16)
+#define GB_ADDR_CONFIG 0x98F8
+#define NUM_PIPES(x) ((x) << 0)
+#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
+#define BANK_INTERLEAVE_SIZE(x) ((x) << 8)
+#define NUM_SHADER_ENGINES(x) ((x) << 12)
+#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16)
+#define NUM_GPUS(x) ((x) << 20)
+#define MULTI_GPU_TILE_SIZE(x) ((x) << 24)
+#define ROW_SIZE(x) ((x) << 28)
+#define GB_BACKEND_MAP 0x98FC
+#define DMIF_ADDR_CONFIG 0xBD4
+#define HDP_ADDR_CONFIG 0x2F48
+
+#define CC_SYS_RB_BACKEND_DISABLE 0x3F88
+#define GC_USER_RB_BACKEND_DISABLE 0x9B7C
+
+#define CGTS_SYS_TCC_DISABLE 0x3F90
+#define CGTS_TCC_DISABLE 0x9148
+#define CGTS_USER_SYS_TCC_DISABLE 0x3F94
+#define CGTS_USER_TCC_DISABLE 0x914C
+
+#define CONFIG_MEMSIZE 0x5428
+
+#define CP_ME_CNTL 0x86D8
+#define CP_ME_HALT (1 << 28)
+#define CP_PFP_HALT (1 << 26)
+#define CP_ME_RAM_DATA 0xC160
+#define CP_ME_RAM_RADDR 0xC158
+#define CP_ME_RAM_WADDR 0xC15C
+#define CP_MEQ_THRESHOLDS 0x8764
+#define STQ_SPLIT(x) ((x) << 0)
+#define CP_PERFMON_CNTL 0x87FC
+#define CP_PFP_UCODE_ADDR 0xC150
+#define CP_PFP_UCODE_DATA 0xC154
+#define CP_QUEUE_THRESHOLDS 0x8760
+#define ROQ_IB1_START(x) ((x) << 0)
+#define ROQ_IB2_START(x) ((x) << 8)
+#define CP_RB_BASE 0xC100
+#define CP_RB_CNTL 0xC104
+#define RB_BUFSZ(x) ((x) << 0)
+#define RB_BLKSZ(x) ((x) << 8)
+#define RB_NO_UPDATE (1 << 27)
+#define RB_RPTR_WR_ENA (1 << 31)
+#define BUF_SWAP_32BIT (2 << 16)
+#define CP_RB_RPTR 0x8700
+#define CP_RB_RPTR_ADDR 0xC10C
+#define CP_RB_RPTR_ADDR_HI 0xC110
+#define CP_RB_RPTR_WR 0xC108
+#define CP_RB_WPTR 0xC114
+#define CP_RB_WPTR_ADDR 0xC118
+#define CP_RB_WPTR_ADDR_HI 0xC11C
+#define CP_RB_WPTR_DELAY 0x8704
+#define CP_SEM_WAIT_TIMER 0x85BC
+#define CP_DEBUG 0xC1FC
+
+
+#define GC_USER_SHADER_PIPE_CONFIG 0x8954
+#define INACTIVE_QD_PIPES(x) ((x) << 8)
+#define INACTIVE_QD_PIPES_MASK 0x0000FF00
+#define INACTIVE_SIMDS(x) ((x) << 16)
+#define INACTIVE_SIMDS_MASK 0x00FF0000
+
+#define GRBM_CNTL 0x8000
+#define GRBM_READ_TIMEOUT(x) ((x) << 0)
+#define GRBM_SOFT_RESET 0x8020
+#define SOFT_RESET_CP (1 << 0)
+#define SOFT_RESET_CB (1 << 1)
+#define SOFT_RESET_DB (1 << 3)
+#define SOFT_RESET_PA (1 << 5)
+#define SOFT_RESET_SC (1 << 6)
+#define SOFT_RESET_SPI (1 << 8)
+#define SOFT_RESET_SH (1 << 9)
+#define SOFT_RESET_SX (1 << 10)
+#define SOFT_RESET_TC (1 << 11)
+#define SOFT_RESET_TA (1 << 12)
+#define SOFT_RESET_VC (1 << 13)
+#define SOFT_RESET_VGT (1 << 14)
+
+#define GRBM_STATUS 0x8010
+#define CMDFIFO_AVAIL_MASK 0x0000000F
+#define SRBM_RQ_PENDING (1 << 5)
+#define CF_RQ_PENDING (1 << 7)
+#define PF_RQ_PENDING (1 << 8)
+#define GRBM_EE_BUSY (1 << 10)
+#define SX_CLEAN (1 << 11)
+#define DB_CLEAN (1 << 12)
+#define CB_CLEAN (1 << 13)
+#define TA_BUSY (1 << 14)
+#define VGT_BUSY_NO_DMA (1 << 16)
+#define VGT_BUSY (1 << 17)
+#define SX_BUSY (1 << 20)
+#define SH_BUSY (1 << 21)
+#define SPI_BUSY (1 << 22)
+#define SC_BUSY (1 << 24)
+#define PA_BUSY (1 << 25)
+#define DB_BUSY (1 << 26)
+#define CP_COHERENCY_BUSY (1 << 28)
+#define CP_BUSY (1 << 29)
+#define CB_BUSY (1 << 30)
+#define GUI_ACTIVE (1 << 31)
+#define GRBM_STATUS_SE0 0x8014
+#define GRBM_STATUS_SE1 0x8018
+#define SE_SX_CLEAN (1 << 0)
+#define SE_DB_CLEAN (1 << 1)
+#define SE_CB_CLEAN (1 << 2)
+#define SE_TA_BUSY (1 << 25)
+#define SE_SX_BUSY (1 << 26)
+#define SE_SPI_BUSY (1 << 27)
+#define SE_SH_BUSY (1 << 28)
+#define SE_SC_BUSY (1 << 29)
+#define SE_DB_BUSY (1 << 30)
+#define SE_CB_BUSY (1 << 31)
+
+#define HDP_HOST_PATH_CNTL 0x2C00
+#define HDP_NONSURFACE_BASE 0x2C04
+#define HDP_NONSURFACE_INFO 0x2C08
+#define HDP_NONSURFACE_SIZE 0x2C0C
+#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
+#define HDP_TILING_CONFIG 0x2F3C
+
+#define MC_SHARED_CHMAP 0x2004
+#define NOOFCHAN_SHIFT 12
+#define NOOFCHAN_MASK 0x00003000
+
+#define MC_ARB_RAMCFG 0x2760
+#define NOOFBANK_SHIFT 0
+#define NOOFBANK_MASK 0x00000003
+#define NOOFRANK_SHIFT 2
+#define NOOFRANK_MASK 0x00000004
+#define NOOFROWS_SHIFT 3
+#define NOOFROWS_MASK 0x00000038
+#define NOOFCOLS_SHIFT 6
+#define NOOFCOLS_MASK 0x000000C0
+#define CHANSIZE_SHIFT 8
+#define CHANSIZE_MASK 0x00000100
+#define BURSTLENGTH_SHIFT 9
+#define BURSTLENGTH_MASK 0x00000200
+#define CHANSIZE_OVERRIDE (1 << 11)
+#define MC_VM_AGP_TOP 0x2028
+#define MC_VM_AGP_BOT 0x202C
+#define MC_VM_AGP_BASE 0x2030
+#define MC_VM_FB_LOCATION 0x2024
+#define MC_VM_MB_L1_TLB0_CNTL 0x2234
+#define MC_VM_MB_L1_TLB1_CNTL 0x2238
+#define MC_VM_MB_L1_TLB2_CNTL 0x223C
+#define MC_VM_MB_L1_TLB3_CNTL 0x2240
+#define ENABLE_L1_TLB (1 << 0)
+#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
+#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3)
+#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3)
+#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3)
+#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3)
+#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
+#define EFFECTIVE_L1_TLB_SIZE(x) ((x)<<15)
+#define EFFECTIVE_L1_QUEUE_SIZE(x) ((x)<<18)
+#define MC_VM_MD_L1_TLB0_CNTL 0x2654
+#define MC_VM_MD_L1_TLB1_CNTL 0x2658
+#define MC_VM_MD_L1_TLB2_CNTL 0x265C
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
+
+#define PA_CL_ENHANCE 0x8A14
+#define CLIP_VTX_REORDER_ENA (1 << 0)
+#define NUM_CLIP_SEQ(x) ((x) << 1)
+#define PA_SC_AA_CONFIG 0x28C04
+#define PA_SC_CLIPRECT_RULE 0x2820C
+#define PA_SC_EDGERULE 0x28230
+#define PA_SC_FIFO_SIZE 0x8BCC
+#define SC_PRIM_FIFO_SIZE(x) ((x) << 0)
+#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12)
+#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20)
+#define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24
+#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
+#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
+#define PA_SC_LINE_STIPPLE 0x28A0C
+#define PA_SC_LINE_STIPPLE_STATE 0x8B10
+
+#define SCRATCH_REG0 0x8500
+#define SCRATCH_REG1 0x8504
+#define SCRATCH_REG2 0x8508
+#define SCRATCH_REG3 0x850C
+#define SCRATCH_REG4 0x8510
+#define SCRATCH_REG5 0x8514
+#define SCRATCH_REG6 0x8518
+#define SCRATCH_REG7 0x851C
+#define SCRATCH_UMSK 0x8540
+#define SCRATCH_ADDR 0x8544
+
+#define SMX_DC_CTL0 0xA020
+#define USE_HASH_FUNCTION (1 << 0)
+#define NUMBER_OF_SETS(x) ((x) << 1)
+#define FLUSH_ALL_ON_EVENT (1 << 10)
+#define STALL_ON_EVENT (1 << 11)
+#define SMX_EVENT_CTL 0xA02C
+#define ES_FLUSH_CTL(x) ((x) << 0)
+#define GS_FLUSH_CTL(x) ((x) << 3)
+#define ACK_FLUSH_CTL(x) ((x) << 6)
+#define SYNC_FLUSH_CTL (1 << 8)
+
+#define SPI_CONFIG_CNTL 0x9100
+#define GPR_WRITE_PRIORITY(x) ((x) << 0)
+#define SPI_CONFIG_CNTL_1 0x913C
+#define VTX_DONE_DELAY(x) ((x) << 0)
+#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
+#define SPI_INPUT_Z 0x286D8
+#define SPI_PS_IN_CONTROL_0 0x286CC
+#define NUM_INTERP(x) ((x)<<0)
+#define POSITION_ENA (1<<8)
+#define POSITION_CENTROID (1<<9)
+#define POSITION_ADDR(x) ((x)<<10)
+#define PARAM_GEN(x) ((x)<<15)
+#define PARAM_GEN_ADDR(x) ((x)<<19)
+#define BARYC_SAMPLE_CNTL(x) ((x)<<26)
+#define PERSP_GRADIENT_ENA (1<<28)
+#define LINEAR_GRADIENT_ENA (1<<29)
+#define POSITION_SAMPLE (1<<30)
+#define BARYC_AT_SAMPLE_ENA (1<<31)
+
+#define SQ_CONFIG 0x8C00
+#define VC_ENABLE (1 << 0)
+#define EXPORT_SRC_C (1 << 1)
+#define CS_PRIO(x) ((x) << 18)
+#define LS_PRIO(x) ((x) << 20)
+#define HS_PRIO(x) ((x) << 22)
+#define PS_PRIO(x) ((x) << 24)
+#define VS_PRIO(x) ((x) << 26)
+#define GS_PRIO(x) ((x) << 28)
+#define ES_PRIO(x) ((x) << 30)
+#define SQ_GPR_RESOURCE_MGMT_1 0x8C04
+#define NUM_PS_GPRS(x) ((x) << 0)
+#define NUM_VS_GPRS(x) ((x) << 16)
+#define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28)
+#define SQ_GPR_RESOURCE_MGMT_2 0x8C08
+#define NUM_GS_GPRS(x) ((x) << 0)
+#define NUM_ES_GPRS(x) ((x) << 16)
+#define SQ_GPR_RESOURCE_MGMT_3 0x8C0C
+#define NUM_HS_GPRS(x) ((x) << 0)
+#define NUM_LS_GPRS(x) ((x) << 16)
+#define SQ_THREAD_RESOURCE_MGMT 0x8C18
+#define NUM_PS_THREADS(x) ((x) << 0)
+#define NUM_VS_THREADS(x) ((x) << 8)
+#define NUM_GS_THREADS(x) ((x) << 16)
+#define NUM_ES_THREADS(x) ((x) << 24)
+#define SQ_THREAD_RESOURCE_MGMT_2 0x8C1C
+#define NUM_HS_THREADS(x) ((x) << 0)
+#define NUM_LS_THREADS(x) ((x) << 8)
+#define SQ_STACK_RESOURCE_MGMT_1 0x8C20
+#define NUM_PS_STACK_ENTRIES(x) ((x) << 0)
+#define NUM_VS_STACK_ENTRIES(x) ((x) << 16)
+#define SQ_STACK_RESOURCE_MGMT_2 0x8C24
+#define NUM_GS_STACK_ENTRIES(x) ((x) << 0)
+#define NUM_ES_STACK_ENTRIES(x) ((x) << 16)
+#define SQ_STACK_RESOURCE_MGMT_3 0x8C28
+#define NUM_HS_STACK_ENTRIES(x) ((x) << 0)
+#define NUM_LS_STACK_ENTRIES(x) ((x) << 16)
+#define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C
+#define SQ_LDS_RESOURCE_MGMT 0x8E2C
+
+#define SQ_MS_FIFO_SIZES 0x8CF0
+#define CACHE_FIFO_SIZE(x) ((x) << 0)
+#define FETCH_FIFO_HIWATER(x) ((x) << 8)
+#define DONE_FIFO_HIWATER(x) ((x) << 16)
+#define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24)
+
+#define SX_DEBUG_1 0x9058
+#define ENABLE_NEW_SMX_ADDRESS (1 << 16)
+#define SX_EXPORT_BUFFER_SIZES 0x900C
+#define COLOR_BUFFER_SIZE(x) ((x) << 0)
+#define POSITION_BUFFER_SIZE(x) ((x) << 8)
+#define SMX_BUFFER_SIZE(x) ((x) << 16)
+#define SX_MISC 0x28350
+
+#define CB_PERF_CTR0_SEL_0 0x9A20
+#define CB_PERF_CTR0_SEL_1 0x9A24
+#define CB_PERF_CTR1_SEL_0 0x9A28
+#define CB_PERF_CTR1_SEL_1 0x9A2C
+#define CB_PERF_CTR2_SEL_0 0x9A30
+#define CB_PERF_CTR2_SEL_1 0x9A34
+#define CB_PERF_CTR3_SEL_0 0x9A38
+#define CB_PERF_CTR3_SEL_1 0x9A3C
+
+#define TA_CNTL_AUX 0x9508
+#define DISABLE_CUBE_WRAP (1 << 0)
+#define DISABLE_CUBE_ANISO (1 << 1)
+#define SYNC_GRADIENT (1 << 24)
+#define SYNC_WALKER (1 << 25)
+#define SYNC_ALIGNER (1 << 26)
+
+#define VGT_CACHE_INVALIDATION 0x88C4
+#define CACHE_INVALIDATION(x) ((x) << 0)
+#define VC_ONLY 0
+#define TC_ONLY 1
+#define VC_AND_TC 2
+#define AUTO_INVLD_EN(x) ((x) << 6)
+#define NO_AUTO 0
+#define ES_AUTO 1
+#define GS_AUTO 2
+#define ES_AND_GS_AUTO 3
+#define VGT_GS_VERTEX_REUSE 0x88D4
+#define VGT_NUM_INSTANCES 0x8974
+#define VGT_OUT_DEALLOC_CNTL 0x28C5C
+#define DEALLOC_DIST_MASK 0x0000007F
+#define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58
+#define VTX_REUSE_DEPTH_MASK 0x000000FF
+
+#define VM_CONTEXT0_CNTL 0x1410
+#define ENABLE_CONTEXT (1 << 0)
+#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
+#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
+#define VM_CONTEXT1_CNTL 0x1414
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518
+#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
+#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
+#define RESPONSE_TYPE_MASK 0x000000F0
+#define RESPONSE_TYPE_SHIFT 4
+#define VM_L2_CNTL 0x1400
+#define ENABLE_L2_CACHE (1 << 0)
+#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
+#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
+#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 14)
+#define VM_L2_CNTL2 0x1404
+#define INVALIDATE_ALL_L1_TLBS (1 << 0)
+#define INVALIDATE_L2_CACHE (1 << 1)
+#define VM_L2_CNTL3 0x1408
+#define BANK_SELECT(x) ((x) << 0)
+#define CACHE_UPDATE_MODE(x) ((x) << 6)
+#define VM_L2_STATUS 0x140C
+#define L2_BUSY (1 << 0)
+
+#define WAIT_UNTIL 0x8040
+
+#define SRBM_STATUS 0x0E50
+#define SRBM_SOFT_RESET 0x0E60
+#define SRBM_SOFT_RESET_ALL_MASK 0x00FEEFA6
+#define SOFT_RESET_BIF (1 << 1)
+#define SOFT_RESET_CG (1 << 2)
+#define SOFT_RESET_DC (1 << 5)
+#define SOFT_RESET_GRBM (1 << 8)
+#define SOFT_RESET_HDP (1 << 9)
+#define SOFT_RESET_IH (1 << 10)
+#define SOFT_RESET_MC (1 << 11)
+#define SOFT_RESET_RLC (1 << 13)
+#define SOFT_RESET_ROM (1 << 14)
+#define SOFT_RESET_SEM (1 << 15)
+#define SOFT_RESET_VMC (1 << 17)
+#define SOFT_RESET_TST (1 << 21)
+#define SOFT_RESET_REGBB (1 << 22)
+#define SOFT_RESET_ORB (1 << 23)
+
+#define IH_RB_CNTL 0x3e00
+# define IH_RB_ENABLE (1 << 0)
+# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
+# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
+# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
+# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
+# define IH_WPTR_OVERFLOW_ENABLE (1 << 16)
+# define IH_WPTR_OVERFLOW_CLEAR (1 << 31)
+#define IH_RB_BASE 0x3e04
+#define IH_RB_RPTR 0x3e08
+#define IH_RB_WPTR 0x3e0c
+# define RB_OVERFLOW (1 << 0)
+# define WPTR_OFFSET_MASK 0x3fffc
+#define IH_RB_WPTR_ADDR_HI 0x3e10
+#define IH_RB_WPTR_ADDR_LO 0x3e14
+#define IH_CNTL 0x3e18
+# define ENABLE_INTR (1 << 0)
+# define IH_MC_SWAP(x) ((x) << 2)
+# define IH_MC_SWAP_NONE 0
+# define IH_MC_SWAP_16BIT 1
+# define IH_MC_SWAP_32BIT 2
+# define IH_MC_SWAP_64BIT 3
+# define RPTR_REARM (1 << 4)
+# define MC_WRREQ_CREDIT(x) ((x) << 15)
+# define MC_WR_CLEAN_CNT(x) ((x) << 20)
+
+#define CP_INT_CNTL 0xc124
+# define CNTX_BUSY_INT_ENABLE (1 << 19)
+# define CNTX_EMPTY_INT_ENABLE (1 << 20)
+# define SCRATCH_INT_ENABLE (1 << 25)
+# define TIME_STAMP_INT_ENABLE (1 << 26)
+# define IB2_INT_ENABLE (1 << 29)
+# define IB1_INT_ENABLE (1 << 30)
+# define RB_INT_ENABLE (1 << 31)
+#define CP_INT_STATUS 0xc128
+# define SCRATCH_INT_STAT (1 << 25)
+# define TIME_STAMP_INT_STAT (1 << 26)
+# define IB2_INT_STAT (1 << 29)
+# define IB1_INT_STAT (1 << 30)
+# define RB_INT_STAT (1 << 31)
+
+#define GRBM_INT_CNTL 0x8060
+# define RDERR_INT_ENABLE (1 << 0)
+# define GUI_IDLE_INT_ENABLE (1 << 19)
+
+/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
+#define CRTC_STATUS_FRAME_COUNT 0x6e98
+
+/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
+#define VLINE_STATUS 0x6bb8
+# define VLINE_OCCURRED (1 << 0)
+# define VLINE_ACK (1 << 4)
+# define VLINE_STAT (1 << 12)
+# define VLINE_INTERRUPT (1 << 16)
+# define VLINE_INTERRUPT_TYPE (1 << 17)
+/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
+#define VBLANK_STATUS 0x6bbc
+# define VBLANK_OCCURRED (1 << 0)
+# define VBLANK_ACK (1 << 4)
+# define VBLANK_STAT (1 << 12)
+# define VBLANK_INTERRUPT (1 << 16)
+# define VBLANK_INTERRUPT_TYPE (1 << 17)
+
+/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
+#define INT_MASK 0x6b40
+# define VBLANK_INT_MASK (1 << 0)
+# define VLINE_INT_MASK (1 << 4)
+
+#define DISP_INTERRUPT_STATUS 0x60f4
+# define LB_D1_VLINE_INTERRUPT (1 << 2)
+# define LB_D1_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD1_INTERRUPT (1 << 17)
+# define DC_HPD1_RX_INTERRUPT (1 << 18)
+# define DACA_AUTODETECT_INTERRUPT (1 << 22)
+# define DACB_AUTODETECT_INTERRUPT (1 << 23)
+# define DC_I2C_SW_DONE_INTERRUPT (1 << 24)
+# define DC_I2C_HW_DONE_INTERRUPT (1 << 25)
+#define DISP_INTERRUPT_STATUS_CONTINUE 0x60f8
+# define LB_D2_VLINE_INTERRUPT (1 << 2)
+# define LB_D2_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD2_INTERRUPT (1 << 17)
+# define DC_HPD2_RX_INTERRUPT (1 << 18)
+# define DISP_TIMER_INTERRUPT (1 << 24)
+#define DISP_INTERRUPT_STATUS_CONTINUE2 0x60fc
+# define LB_D3_VLINE_INTERRUPT (1 << 2)
+# define LB_D3_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD3_INTERRUPT (1 << 17)
+# define DC_HPD3_RX_INTERRUPT (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE3 0x6100
+# define LB_D4_VLINE_INTERRUPT (1 << 2)
+# define LB_D4_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD4_INTERRUPT (1 << 17)
+# define DC_HPD4_RX_INTERRUPT (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE4 0x614c
+# define LB_D5_VLINE_INTERRUPT (1 << 2)
+# define LB_D5_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD5_INTERRUPT (1 << 17)
+# define DC_HPD5_RX_INTERRUPT (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6050
+# define LB_D6_VLINE_INTERRUPT (1 << 2)
+# define LB_D6_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD6_INTERRUPT (1 << 17)
+# define DC_HPD6_RX_INTERRUPT (1 << 18)
+
+/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
+#define GRPH_INT_STATUS 0x6858
+# define GRPH_PFLIP_INT_OCCURRED (1 << 0)
+# define GRPH_PFLIP_INT_CLEAR (1 << 8)
+/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
+#define GRPH_INT_CONTROL 0x685c
+# define GRPH_PFLIP_INT_MASK (1 << 0)
+# define GRPH_PFLIP_INT_TYPE (1 << 8)
+
+#define DACA_AUTODETECT_INT_CONTROL 0x66c8
+#define DACB_AUTODETECT_INT_CONTROL 0x67c8
+
+#define DC_HPD1_INT_STATUS 0x601c
+#define DC_HPD2_INT_STATUS 0x6028
+#define DC_HPD3_INT_STATUS 0x6034
+#define DC_HPD4_INT_STATUS 0x6040
+#define DC_HPD5_INT_STATUS 0x604c
+#define DC_HPD6_INT_STATUS 0x6058
+# define DC_HPDx_INT_STATUS (1 << 0)
+# define DC_HPDx_SENSE (1 << 1)
+# define DC_HPDx_RX_INT_STATUS (1 << 8)
+
+#define DC_HPD1_INT_CONTROL 0x6020
+#define DC_HPD2_INT_CONTROL 0x602c
+#define DC_HPD3_INT_CONTROL 0x6038
+#define DC_HPD4_INT_CONTROL 0x6044
+#define DC_HPD5_INT_CONTROL 0x6050
+#define DC_HPD6_INT_CONTROL 0x605c
+# define DC_HPDx_INT_ACK (1 << 0)
+# define DC_HPDx_INT_POLARITY (1 << 8)
+# define DC_HPDx_INT_EN (1 << 16)
+# define DC_HPDx_RX_INT_ACK (1 << 20)
+# define DC_HPDx_RX_INT_EN (1 << 24)
+
+#define DC_HPD1_CONTROL 0x6024
+#define DC_HPD2_CONTROL 0x6030
+#define DC_HPD3_CONTROL 0x603c
+#define DC_HPD4_CONTROL 0x6048
+#define DC_HPD5_CONTROL 0x6054
+#define DC_HPD6_CONTROL 0x6060
+# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
+# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
+# define DC_HPDx_EN (1 << 28)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index cf60c0b3ef1..cc004b05d63 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -37,6 +37,7 @@
#include "rs100d.h"
#include "rv200d.h"
#include "rv250d.h"
+#include "atom.h"
#include <linux/firmware.h>
#include <linux/platform_device.h>
@@ -67,6 +68,264 @@ MODULE_FIRMWARE(FIRMWARE_R520);
* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
*/
+void r100_pm_get_dynpm_state(struct radeon_device *rdev)
+{
+ int i;
+ rdev->pm.dynpm_can_upclock = true;
+ rdev->pm.dynpm_can_downclock = true;
+
+ switch (rdev->pm.dynpm_planned_action) {
+ case DYNPM_ACTION_MINIMUM:
+ rdev->pm.requested_power_state_index = 0;
+ rdev->pm.dynpm_can_downclock = false;
+ break;
+ case DYNPM_ACTION_DOWNCLOCK:
+ if (rdev->pm.current_power_state_index == 0) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ rdev->pm.dynpm_can_downclock = false;
+ } else {
+ if (rdev->pm.active_crtc_count > 1) {
+ for (i = 0; i < rdev->pm.num_power_states; i++) {
+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ continue;
+ else if (i >= rdev->pm.current_power_state_index) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ break;
+ } else {
+ rdev->pm.requested_power_state_index = i;
+ break;
+ }
+ }
+ } else
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index - 1;
+ }
+ /* don't use the power state if crtcs are active and no display flag is set */
+ if ((rdev->pm.active_crtc_count > 0) &&
+ (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
+ RADEON_PM_MODE_NO_DISPLAY)) {
+ rdev->pm.requested_power_state_index++;
+ }
+ break;
+ case DYNPM_ACTION_UPCLOCK:
+ if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ rdev->pm.dynpm_can_upclock = false;
+ } else {
+ if (rdev->pm.active_crtc_count > 1) {
+ for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ continue;
+ else if (i <= rdev->pm.current_power_state_index) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ break;
+ } else {
+ rdev->pm.requested_power_state_index = i;
+ break;
+ }
+ }
+ } else
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index + 1;
+ }
+ break;
+ case DYNPM_ACTION_DEFAULT:
+ rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.dynpm_can_upclock = false;
+ break;
+ case DYNPM_ACTION_NONE:
+ default:
+ DRM_ERROR("Requested mode for not defined action\n");
+ return;
+ }
+ /* only one clock mode per power state */
+ rdev->pm.requested_clock_mode_index = 0;
+
+ DRM_DEBUG("Requested: e: %d m: %d p: %d\n",
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].sclk,
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].mclk,
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ pcie_lanes);
+}
+
+void r100_pm_init_profile(struct radeon_device *rdev)
+{
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+}
+
+void r100_pm_misc(struct radeon_device *rdev)
+{
+ int requested_index = rdev->pm.requested_power_state_index;
+ struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
+ struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
+ u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl;
+
+ if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
+ if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+ tmp = RREG32(voltage->gpio.reg);
+ if (voltage->active_high)
+ tmp |= voltage->gpio.mask;
+ else
+ tmp &= ~(voltage->gpio.mask);
+ WREG32(voltage->gpio.reg, tmp);
+ if (voltage->delay)
+ udelay(voltage->delay);
+ } else {
+ tmp = RREG32(voltage->gpio.reg);
+ if (voltage->active_high)
+ tmp &= ~voltage->gpio.mask;
+ else
+ tmp |= voltage->gpio.mask;
+ WREG32(voltage->gpio.reg, tmp);
+ if (voltage->delay)
+ udelay(voltage->delay);
+ }
+ }
+
+ sclk_cntl = RREG32_PLL(SCLK_CNTL);
+ sclk_cntl2 = RREG32_PLL(SCLK_CNTL2);
+ sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3);
+ sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL);
+ sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3);
+ if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
+ sclk_more_cntl |= REDUCED_SPEED_SCLK_EN;
+ if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE)
+ sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE;
+ else
+ sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE;
+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2)
+ sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0);
+ else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4)
+ sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2);
+ } else
+ sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN;
+
+ if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
+ sclk_more_cntl |= IO_CG_VOLTAGE_DROP;
+ if (voltage->delay) {
+ sclk_more_cntl |= VOLTAGE_DROP_SYNC;
+ switch (voltage->delay) {
+ case 33:
+ sclk_more_cntl |= VOLTAGE_DELAY_SEL(0);
+ break;
+ case 66:
+ sclk_more_cntl |= VOLTAGE_DELAY_SEL(1);
+ break;
+ case 99:
+ sclk_more_cntl |= VOLTAGE_DELAY_SEL(2);
+ break;
+ case 132:
+ sclk_more_cntl |= VOLTAGE_DELAY_SEL(3);
+ break;
+ }
+ } else
+ sclk_more_cntl &= ~VOLTAGE_DROP_SYNC;
+ } else
+ sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP;
+
+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
+ sclk_cntl &= ~FORCE_HDP;
+ else
+ sclk_cntl |= FORCE_HDP;
+
+ WREG32_PLL(SCLK_CNTL, sclk_cntl);
+ WREG32_PLL(SCLK_CNTL2, sclk_cntl2);
+ WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl);
+
+ /* set pcie lanes */
+ if ((rdev->flags & RADEON_IS_PCIE) &&
+ !(rdev->flags & RADEON_IS_IGP) &&
+ rdev->asic->set_pcie_lanes &&
+ (ps->pcie_lanes !=
+ rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
+ radeon_set_pcie_lanes(rdev,
+ ps->pcie_lanes);
+ DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
+ }
+}
+
+void r100_pm_prepare(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* disable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ if (radeon_crtc->crtc_id) {
+ tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
+ tmp |= RADEON_CRTC2_DISP_REQ_EN_B;
+ WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
+ } else {
+ tmp = RREG32(RADEON_CRTC_GEN_CNTL);
+ tmp |= RADEON_CRTC_DISP_REQ_EN_B;
+ WREG32(RADEON_CRTC_GEN_CNTL, tmp);
+ }
+ }
+ }
+}
+
+void r100_pm_finish(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* enable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ if (radeon_crtc->crtc_id) {
+ tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
+ tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B;
+ WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
+ } else {
+ tmp = RREG32(RADEON_CRTC_GEN_CNTL);
+ tmp &= ~RADEON_CRTC_DISP_REQ_EN_B;
+ WREG32(RADEON_CRTC_GEN_CNTL, tmp);
+ }
+ }
+ }
+}
+
+bool r100_gui_idle(struct radeon_device *rdev)
+{
+ if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
+ return false;
+ else
+ return true;
+}
+
/* hpd for digital panel detect/disconnect */
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
@@ -254,6 +513,9 @@ int r100_irq_set(struct radeon_device *rdev)
if (rdev->irq.sw_int) {
tmp |= RADEON_SW_INT_ENABLE;
}
+ if (rdev->irq.gui_idle) {
+ tmp |= RADEON_GUI_IDLE_MASK;
+ }
if (rdev->irq.crtc_vblank_int[0]) {
tmp |= RADEON_CRTC_VBLANK_MASK;
}
@@ -288,6 +550,12 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
+ /* the interrupt works, but the status bit is permanently asserted */
+ if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
+ if (!rdev->irq.gui_idle_acked)
+ irq_mask |= RADEON_GUI_IDLE_STAT;
+ }
+
if (irqs) {
WREG32(RADEON_GEN_INT_STATUS, irqs);
}
@@ -299,6 +567,9 @@ int r100_irq_process(struct radeon_device *rdev)
uint32_t status, msi_rearm;
bool queue_hotplug = false;
+ /* reset gui idle ack. the status bit is broken */
+ rdev->irq.gui_idle_acked = false;
+
status = r100_irq_ack(rdev);
if (!status) {
return IRQ_NONE;
@@ -311,6 +582,12 @@ int r100_irq_process(struct radeon_device *rdev)
if (status & RADEON_SW_INT_TEST) {
radeon_fence_process(rdev);
}
+ /* gui idle interrupt */
+ if (status & RADEON_GUI_IDLE_STAT) {
+ rdev->irq.gui_idle_acked = true;
+ rdev->pm.gui_idle = true;
+ wake_up(&rdev->irq.idle_queue);
+ }
/* Vertical blank interrupts */
if (status & RADEON_CRTC_VBLANK_STAT) {
drm_handle_vblank(rdev->ddev, 0);
@@ -332,6 +609,8 @@ int r100_irq_process(struct radeon_device *rdev)
}
status = r100_irq_ack(rdev);
}
+ /* reset gui idle ack. the status bit is broken */
+ rdev->irq.gui_idle_acked = false;
if (queue_hotplug)
queue_work(rdev->wq, &rdev->hotplug_work);
if (rdev->msi_enabled) {
@@ -663,26 +942,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
if (r100_debugfs_cp_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for CP !\n");
}
- /* Reset CP */
- tmp = RREG32(RADEON_CP_CSQ_STAT);
- if ((tmp & (1 << 31))) {
- DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp);
- WREG32(RADEON_CP_CSQ_MODE, 0);
- WREG32(RADEON_CP_CSQ_CNTL, 0);
- WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
- tmp = RREG32(RADEON_RBBM_SOFT_RESET);
- mdelay(2);
- WREG32(RADEON_RBBM_SOFT_RESET, 0);
- tmp = RREG32(RADEON_RBBM_SOFT_RESET);
- mdelay(2);
- tmp = RREG32(RADEON_CP_CSQ_STAT);
- if ((tmp & (1 << 31))) {
- DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp);
- }
- } else {
- DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
- }
-
if (!rdev->me_fw) {
r = r100_cp_init_microcode(rdev);
if (r) {
@@ -787,39 +1046,6 @@ void r100_cp_disable(struct radeon_device *rdev)
}
}
-int r100_cp_reset(struct radeon_device *rdev)
-{
- uint32_t tmp;
- bool reinit_cp;
- int i;
-
- reinit_cp = rdev->cp.ready;
- rdev->cp.ready = false;
- WREG32(RADEON_CP_CSQ_MODE, 0);
- WREG32(RADEON_CP_CSQ_CNTL, 0);
- WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
- (void)RREG32(RADEON_RBBM_SOFT_RESET);
- udelay(200);
- WREG32(RADEON_RBBM_SOFT_RESET, 0);
- /* Wait to prevent race in RBBM_STATUS */
- mdelay(1);
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32(RADEON_RBBM_STATUS);
- if (!(tmp & (1 << 16))) {
- DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n",
- tmp);
- if (reinit_cp) {
- return r100_cp_init(rdev, rdev->cp.ring_size);
- }
- return 0;
- }
- DRM_UDELAY(1);
- }
- tmp = RREG32(RADEON_RBBM_STATUS);
- DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp);
- return -1;
-}
-
void r100_cp_commit(struct radeon_device *rdev)
{
WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
@@ -1733,76 +1959,163 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
-void r100_gpu_init(struct radeon_device *rdev)
+void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
{
- /* TODO: anythings to do here ? pipes ? */
- r100_hdp_reset(rdev);
+ lockup->last_cp_rptr = cp->rptr;
+ lockup->last_jiffies = jiffies;
+}
+
+/**
+ * r100_gpu_cp_is_lockup() - check if CP is lockup by recording information
+ * @rdev: radeon device structure
+ * @lockup: r100_gpu_lockup structure holding CP lockup tracking informations
+ * @cp: radeon_cp structure holding CP information
+ *
+ * We don't need to initialize the lockup tracking information as we will either
+ * have CP rptr to a different value of jiffies wrap around which will force
+ * initialization of the lockup tracking informations.
+ *
+ * A possible false positivie is if we get call after while and last_cp_rptr ==
+ * the current CP rptr, even if it's unlikely it might happen. To avoid this
+ * if the elapsed time since last call is bigger than 2 second than we return
+ * false and update the tracking information. Due to this the caller must call
+ * r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported
+ * the fencing code should be cautious about that.
+ *
+ * Caller should write to the ring to force CP to do something so we don't get
+ * false positive when CP is just gived nothing to do.
+ *
+ **/
+bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
+{
+ unsigned long cjiffies, elapsed;
+
+ cjiffies = jiffies;
+ if (!time_after(cjiffies, lockup->last_jiffies)) {
+ /* likely a wrap around */
+ lockup->last_cp_rptr = cp->rptr;
+ lockup->last_jiffies = jiffies;
+ return false;
+ }
+ if (cp->rptr != lockup->last_cp_rptr) {
+ /* CP is still working no lockup */
+ lockup->last_cp_rptr = cp->rptr;
+ lockup->last_jiffies = jiffies;
+ return false;
+ }
+ elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
+ if (elapsed >= 3000) {
+ /* very likely the improbable case where current
+ * rptr is equal to last recorded, a while ago, rptr
+ * this is more likely a false positive update tracking
+ * information which should force us to be recall at
+ * latter point
+ */
+ lockup->last_cp_rptr = cp->rptr;
+ lockup->last_jiffies = jiffies;
+ return false;
+ }
+ if (elapsed >= 1000) {
+ dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
+ return true;
+ }
+ /* give a chance to the GPU ... */
+ return false;
}
-void r100_hdp_reset(struct radeon_device *rdev)
+bool r100_gpu_is_lockup(struct radeon_device *rdev)
{
- uint32_t tmp;
+ u32 rbbm_status;
+ int r;
- tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
- tmp |= (7 << 28);
- WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
- (void)RREG32(RADEON_HOST_PATH_CNTL);
- udelay(200);
- WREG32(RADEON_RBBM_SOFT_RESET, 0);
- WREG32(RADEON_HOST_PATH_CNTL, tmp);
- (void)RREG32(RADEON_HOST_PATH_CNTL);
+ rbbm_status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
+ r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
+ return false;
+ }
+ /* force CP activities */
+ r = radeon_ring_lock(rdev, 2);
+ if (!r) {
+ /* PACKET2 NOP */
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_unlock_commit(rdev);
+ }
+ rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
}
-int r100_rb2d_reset(struct radeon_device *rdev)
+void r100_bm_disable(struct radeon_device *rdev)
{
- uint32_t tmp;
- int i;
+ u32 tmp;
- WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
- (void)RREG32(RADEON_RBBM_SOFT_RESET);
- udelay(200);
- WREG32(RADEON_RBBM_SOFT_RESET, 0);
- /* Wait to prevent race in RBBM_STATUS */
+ /* disable bus mastering */
+ tmp = RREG32(R_000030_BUS_CNTL);
+ WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
+ mdelay(1);
+ WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
+ mdelay(1);
+ WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
+ tmp = RREG32(RADEON_BUS_CNTL);
+ mdelay(1);
+ pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
+ pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
mdelay(1);
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32(RADEON_RBBM_STATUS);
- if (!(tmp & (1 << 26))) {
- DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n",
- tmp);
- return 0;
- }
- DRM_UDELAY(1);
- }
- tmp = RREG32(RADEON_RBBM_STATUS);
- DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp);
- return -1;
}
-int r100_gpu_reset(struct radeon_device *rdev)
+int r100_asic_reset(struct radeon_device *rdev)
{
- uint32_t status;
+ struct r100_mc_save save;
+ u32 status, tmp;
- /* reset order likely matter */
- status = RREG32(RADEON_RBBM_STATUS);
- /* reset HDP */
- r100_hdp_reset(rdev);
- /* reset rb2d */
- if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
- r100_rb2d_reset(rdev);
+ r100_mc_stop(rdev, &save);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(status)) {
+ return 0;
}
- /* TODO: reset 3D engine */
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* stop CP */
+ WREG32(RADEON_CP_CSQ_CNTL, 0);
+ tmp = RREG32(RADEON_CP_RB_CNTL);
+ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+ WREG32(RADEON_CP_RB_RPTR_WR, 0);
+ WREG32(RADEON_CP_RB_WPTR, 0);
+ WREG32(RADEON_CP_RB_CNTL, tmp);
+ /* save PCI state */
+ pci_save_state(rdev->pdev);
+ /* disable bus mastering */
+ r100_bm_disable(rdev);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
+ S_0000F0_SOFT_RESET_RE(1) |
+ S_0000F0_SOFT_RESET_PP(1) |
+ S_0000F0_SOFT_RESET_RB(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* reset CP */
- status = RREG32(RADEON_RBBM_STATUS);
- if (status & (1 << 16)) {
- r100_cp_reset(rdev);
- }
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* restore PCI & busmastering */
+ pci_restore_state(rdev->pdev);
+ r100_enable_bm(rdev);
/* Check if GPU is idle */
- status = RREG32(RADEON_RBBM_STATUS);
- if (status & RADEON_RBBM_ACTIVE) {
- DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
+ if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
+ G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
+ dev_err(rdev->dev, "failed to reset GPU\n");
+ rdev->gpu_lockup = true;
return -1;
}
- DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
+ r100_mc_resume(rdev, &save);
+ dev_info(rdev->dev, "GPU reset succeed\n");
return 0;
}
@@ -2002,11 +2315,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
else
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
}
- /* FIXME remove this once we support unmappable VRAM */
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
- rdev->mc.mc_vram_size = rdev->mc.aper_size;
- rdev->mc.real_vram_size = rdev->mc.aper_size;
- }
}
void r100_vga_set_state(struct radeon_device *rdev, bool state)
@@ -2335,53 +2643,53 @@ void r100_bandwidth_update(struct radeon_device *rdev)
fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
fixed20_12 memtcas_ff[8] = {
- fixed_init(1),
- fixed_init(2),
- fixed_init(3),
- fixed_init(0),
- fixed_init_half(1),
- fixed_init_half(2),
- fixed_init(0),
+ dfixed_init(1),
+ dfixed_init(2),
+ dfixed_init(3),
+ dfixed_init(0),
+ dfixed_init_half(1),
+ dfixed_init_half(2),
+ dfixed_init(0),
};
fixed20_12 memtcas_rs480_ff[8] = {
- fixed_init(0),
- fixed_init(1),
- fixed_init(2),
- fixed_init(3),
- fixed_init(0),
- fixed_init_half(1),
- fixed_init_half(2),
- fixed_init_half(3),
+ dfixed_init(0),
+ dfixed_init(1),
+ dfixed_init(2),
+ dfixed_init(3),
+ dfixed_init(0),
+ dfixed_init_half(1),
+ dfixed_init_half(2),
+ dfixed_init_half(3),
};
fixed20_12 memtcas2_ff[8] = {
- fixed_init(0),
- fixed_init(1),
- fixed_init(2),
- fixed_init(3),
- fixed_init(4),
- fixed_init(5),
- fixed_init(6),
- fixed_init(7),
+ dfixed_init(0),
+ dfixed_init(1),
+ dfixed_init(2),
+ dfixed_init(3),
+ dfixed_init(4),
+ dfixed_init(5),
+ dfixed_init(6),
+ dfixed_init(7),
};
fixed20_12 memtrbs[8] = {
- fixed_init(1),
- fixed_init_half(1),
- fixed_init(2),
- fixed_init_half(2),
- fixed_init(3),
- fixed_init_half(3),
- fixed_init(4),
- fixed_init_half(4)
+ dfixed_init(1),
+ dfixed_init_half(1),
+ dfixed_init(2),
+ dfixed_init_half(2),
+ dfixed_init(3),
+ dfixed_init_half(3),
+ dfixed_init(4),
+ dfixed_init_half(4)
};
fixed20_12 memtrbs_r4xx[8] = {
- fixed_init(4),
- fixed_init(5),
- fixed_init(6),
- fixed_init(7),
- fixed_init(8),
- fixed_init(9),
- fixed_init(10),
- fixed_init(11)
+ dfixed_init(4),
+ dfixed_init(5),
+ dfixed_init(6),
+ dfixed_init(7),
+ dfixed_init(8),
+ dfixed_init(9),
+ dfixed_init(10),
+ dfixed_init(11)
};
fixed20_12 min_mem_eff;
fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
@@ -2412,7 +2720,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
}
}
- min_mem_eff.full = rfixed_const_8(0);
+ min_mem_eff.full = dfixed_const_8(0);
/* get modes */
if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
@@ -2433,28 +2741,28 @@ void r100_bandwidth_update(struct radeon_device *rdev)
mclk_ff = rdev->pm.mclk;
temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
- temp_ff.full = rfixed_const(temp);
- mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
+ temp_ff.full = dfixed_const(temp);
+ mem_bw.full = dfixed_mul(mclk_ff, temp_ff);
pix_clk.full = 0;
pix_clk2.full = 0;
peak_disp_bw.full = 0;
if (mode1) {
- temp_ff.full = rfixed_const(1000);
- pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
- pix_clk.full = rfixed_div(pix_clk, temp_ff);
- temp_ff.full = rfixed_const(pixel_bytes1);
- peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
+ temp_ff.full = dfixed_const(1000);
+ pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */
+ pix_clk.full = dfixed_div(pix_clk, temp_ff);
+ temp_ff.full = dfixed_const(pixel_bytes1);
+ peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff);
}
if (mode2) {
- temp_ff.full = rfixed_const(1000);
- pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
- pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
- temp_ff.full = rfixed_const(pixel_bytes2);
- peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
+ temp_ff.full = dfixed_const(1000);
+ pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */
+ pix_clk2.full = dfixed_div(pix_clk2, temp_ff);
+ temp_ff.full = dfixed_const(pixel_bytes2);
+ peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff);
}
- mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
+ mem_bw.full = dfixed_mul(mem_bw, min_mem_eff);
if (peak_disp_bw.full >= mem_bw.full) {
DRM_ERROR("You may not have enough display bandwidth for current mode\n"
"If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
@@ -2496,9 +2804,9 @@ void r100_bandwidth_update(struct radeon_device *rdev)
mem_tras = ((temp >> 12) & 0xf) + 4;
}
/* convert to FF */
- trcd_ff.full = rfixed_const(mem_trcd);
- trp_ff.full = rfixed_const(mem_trp);
- tras_ff.full = rfixed_const(mem_tras);
+ trcd_ff.full = dfixed_const(mem_trcd);
+ trp_ff.full = dfixed_const(mem_trp);
+ tras_ff.full = dfixed_const(mem_tras);
/* Get values from the MEM_SDRAM_MODE_REG register...converting its */
temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
@@ -2516,7 +2824,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
/* extra cas latency stored in bits 23-25 0-4 clocks */
data = (temp >> 23) & 0x7;
if (data < 5)
- tcas_ff.full += rfixed_const(data);
+ tcas_ff.full += dfixed_const(data);
}
if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
@@ -2553,72 +2861,72 @@ void r100_bandwidth_update(struct radeon_device *rdev)
if (rdev->flags & RADEON_IS_AGP) {
fixed20_12 agpmode_ff;
- agpmode_ff.full = rfixed_const(radeon_agpmode);
- temp_ff.full = rfixed_const_666(16);
- sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff);
+ agpmode_ff.full = dfixed_const(radeon_agpmode);
+ temp_ff.full = dfixed_const_666(16);
+ sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff);
}
/* TODO PCIE lanes may affect this - agpmode == 16?? */
if (ASIC_IS_R300(rdev)) {
- sclk_delay_ff.full = rfixed_const(250);
+ sclk_delay_ff.full = dfixed_const(250);
} else {
if ((rdev->family == CHIP_RV100) ||
rdev->flags & RADEON_IS_IGP) {
if (rdev->mc.vram_is_ddr)
- sclk_delay_ff.full = rfixed_const(41);
+ sclk_delay_ff.full = dfixed_const(41);
else
- sclk_delay_ff.full = rfixed_const(33);
+ sclk_delay_ff.full = dfixed_const(33);
} else {
if (rdev->mc.vram_width == 128)
- sclk_delay_ff.full = rfixed_const(57);
+ sclk_delay_ff.full = dfixed_const(57);
else
- sclk_delay_ff.full = rfixed_const(41);
+ sclk_delay_ff.full = dfixed_const(41);
}
}
- mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff);
+ mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff);
if (rdev->mc.vram_is_ddr) {
if (rdev->mc.vram_width == 32) {
- k1.full = rfixed_const(40);
+ k1.full = dfixed_const(40);
c = 3;
} else {
- k1.full = rfixed_const(20);
+ k1.full = dfixed_const(20);
c = 1;
}
} else {
- k1.full = rfixed_const(40);
+ k1.full = dfixed_const(40);
c = 3;
}
- temp_ff.full = rfixed_const(2);
- mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff);
- temp_ff.full = rfixed_const(c);
- mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff);
- temp_ff.full = rfixed_const(4);
- mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff);
- mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff);
+ temp_ff.full = dfixed_const(2);
+ mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff);
+ temp_ff.full = dfixed_const(c);
+ mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff);
+ temp_ff.full = dfixed_const(4);
+ mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff);
+ mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff);
mc_latency_mclk.full += k1.full;
- mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff);
- mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff);
+ mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff);
+ mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff);
/*
HW cursor time assuming worst case of full size colour cursor.
*/
- temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
+ temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
temp_ff.full += trcd_ff.full;
if (temp_ff.full < tras_ff.full)
temp_ff.full = tras_ff.full;
- cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff);
+ cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff);
- temp_ff.full = rfixed_const(cur_size);
- cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff);
+ temp_ff.full = dfixed_const(cur_size);
+ cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff);
/*
Find the total latency for the display data.
*/
- disp_latency_overhead.full = rfixed_const(8);
- disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
+ disp_latency_overhead.full = dfixed_const(8);
+ disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff);
mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
@@ -2646,16 +2954,16 @@ void r100_bandwidth_update(struct radeon_device *rdev)
/*
Find the drain rate of the display buffer.
*/
- temp_ff.full = rfixed_const((16/pixel_bytes1));
- disp_drain_rate.full = rfixed_div(pix_clk, temp_ff);
+ temp_ff.full = dfixed_const((16/pixel_bytes1));
+ disp_drain_rate.full = dfixed_div(pix_clk, temp_ff);
/*
Find the critical point of the display buffer.
*/
- crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency);
- crit_point_ff.full += rfixed_const_half(0);
+ crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency);
+ crit_point_ff.full += dfixed_const_half(0);
- critical_point = rfixed_trunc(crit_point_ff);
+ critical_point = dfixed_trunc(crit_point_ff);
if (rdev->disp_priority == 2) {
critical_point = 0;
@@ -2726,8 +3034,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
/*
Find the drain rate of the display buffer.
*/
- temp_ff.full = rfixed_const((16/pixel_bytes2));
- disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff);
+ temp_ff.full = dfixed_const((16/pixel_bytes2));
+ disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff);
grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
@@ -2748,8 +3056,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
critical_point2 = 0;
else {
temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
- temp_ff.full = rfixed_const(temp);
- temp_ff.full = rfixed_mul(mclk_ff, temp_ff);
+ temp_ff.full = dfixed_const(temp);
+ temp_ff.full = dfixed_mul(mclk_ff, temp_ff);
if (sclk_ff.full < temp_ff.full)
temp_ff.full = sclk_ff.full;
@@ -2757,15 +3065,15 @@ void r100_bandwidth_update(struct radeon_device *rdev)
if (mode1) {
temp_ff.full = read_return_rate.full - disp_drain_rate.full;
- time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff);
+ time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff);
} else {
time_disp1_drop_priority.full = 0;
}
crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
- crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2);
- crit_point_ff.full += rfixed_const_half(0);
+ crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2);
+ crit_point_ff.full += dfixed_const_half(0);
- critical_point2 = rfixed_trunc(crit_point_ff);
+ critical_point2 = dfixed_trunc(crit_point_ff);
if (rdev->disp_priority == 2) {
critical_point2 = 0;
@@ -3399,7 +3707,7 @@ static int r100_startup(struct radeon_device *rdev)
/* Resume clock */
r100_clock_startup(rdev);
/* Initialize GPU configuration (# pipes, ...) */
- r100_gpu_init(rdev);
+// r100_gpu_init(rdev);
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
r100_enable_bm(rdev);
@@ -3436,7 +3744,7 @@ int r100_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
r100_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -3462,7 +3770,6 @@ int r100_suspend(struct radeon_device *rdev)
void r100_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -3505,7 +3812,7 @@ int r100_init(struct radeon_device *rdev)
return r;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -3518,8 +3825,6 @@ int r100_init(struct radeon_device *rdev)
r100_errata(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h
index df29a630c46..d016b16fa11 100644
--- a/drivers/gpu/drm/radeon/r100d.h
+++ b/drivers/gpu/drm/radeon/r100d.h
@@ -74,6 +74,134 @@
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
/* Registers */
+#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
+#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
+#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
+#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
+#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
+#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
+#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
+#define S_0000F0_SOFT_RESET_SE(x) (((x) & 0x1) << 2)
+#define G_0000F0_SOFT_RESET_SE(x) (((x) >> 2) & 0x1)
+#define C_0000F0_SOFT_RESET_SE 0xFFFFFFFB
+#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
+#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
+#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
+#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
+#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
+#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
+#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
+#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
+#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
+#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
+#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
+#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
+#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
+#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
+#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
+#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
+#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
+#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
+#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
+#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
+#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
+#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
+#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
+#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
+#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
+#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
+#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
+#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
+#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
+#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
+#define R_000030_BUS_CNTL 0x000030
+#define S_000030_BUS_DBL_RESYNC(x) (((x) & 0x1) << 0)
+#define G_000030_BUS_DBL_RESYNC(x) (((x) >> 0) & 0x1)
+#define C_000030_BUS_DBL_RESYNC 0xFFFFFFFE
+#define S_000030_BUS_MSTR_RESET(x) (((x) & 0x1) << 1)
+#define G_000030_BUS_MSTR_RESET(x) (((x) >> 1) & 0x1)
+#define C_000030_BUS_MSTR_RESET 0xFFFFFFFD
+#define S_000030_BUS_FLUSH_BUF(x) (((x) & 0x1) << 2)
+#define G_000030_BUS_FLUSH_BUF(x) (((x) >> 2) & 0x1)
+#define C_000030_BUS_FLUSH_BUF 0xFFFFFFFB
+#define S_000030_BUS_STOP_REQ_DIS(x) (((x) & 0x1) << 3)
+#define G_000030_BUS_STOP_REQ_DIS(x) (((x) >> 3) & 0x1)
+#define C_000030_BUS_STOP_REQ_DIS 0xFFFFFFF7
+#define S_000030_BUS_PM4_READ_COMBINE_EN(x) (((x) & 0x1) << 4)
+#define G_000030_BUS_PM4_READ_COMBINE_EN(x) (((x) >> 4) & 0x1)
+#define C_000030_BUS_PM4_READ_COMBINE_EN 0xFFFFFFEF
+#define S_000030_BUS_WRT_COMBINE_EN(x) (((x) & 0x1) << 5)
+#define G_000030_BUS_WRT_COMBINE_EN(x) (((x) >> 5) & 0x1)
+#define C_000030_BUS_WRT_COMBINE_EN 0xFFFFFFDF
+#define S_000030_BUS_MASTER_DIS(x) (((x) & 0x1) << 6)
+#define G_000030_BUS_MASTER_DIS(x) (((x) >> 6) & 0x1)
+#define C_000030_BUS_MASTER_DIS 0xFFFFFFBF
+#define S_000030_BIOS_ROM_WRT_EN(x) (((x) & 0x1) << 7)
+#define G_000030_BIOS_ROM_WRT_EN(x) (((x) >> 7) & 0x1)
+#define C_000030_BIOS_ROM_WRT_EN 0xFFFFFF7F
+#define S_000030_BM_DAC_CRIPPLE(x) (((x) & 0x1) << 8)
+#define G_000030_BM_DAC_CRIPPLE(x) (((x) >> 8) & 0x1)
+#define C_000030_BM_DAC_CRIPPLE 0xFFFFFEFF
+#define S_000030_BUS_NON_PM4_READ_COMBINE_EN(x) (((x) & 0x1) << 9)
+#define G_000030_BUS_NON_PM4_READ_COMBINE_EN(x) (((x) >> 9) & 0x1)
+#define C_000030_BUS_NON_PM4_READ_COMBINE_EN 0xFFFFFDFF
+#define S_000030_BUS_XFERD_DISCARD_EN(x) (((x) & 0x1) << 10)
+#define G_000030_BUS_XFERD_DISCARD_EN(x) (((x) >> 10) & 0x1)
+#define C_000030_BUS_XFERD_DISCARD_EN 0xFFFFFBFF
+#define S_000030_BUS_SGL_READ_DISABLE(x) (((x) & 0x1) << 11)
+#define G_000030_BUS_SGL_READ_DISABLE(x) (((x) >> 11) & 0x1)
+#define C_000030_BUS_SGL_READ_DISABLE 0xFFFFF7FF
+#define S_000030_BIOS_DIS_ROM(x) (((x) & 0x1) << 12)
+#define G_000030_BIOS_DIS_ROM(x) (((x) >> 12) & 0x1)
+#define C_000030_BIOS_DIS_ROM 0xFFFFEFFF
+#define S_000030_BUS_PCI_READ_RETRY_EN(x) (((x) & 0x1) << 13)
+#define G_000030_BUS_PCI_READ_RETRY_EN(x) (((x) >> 13) & 0x1)
+#define C_000030_BUS_PCI_READ_RETRY_EN 0xFFFFDFFF
+#define S_000030_BUS_AGP_AD_STEPPING_EN(x) (((x) & 0x1) << 14)
+#define G_000030_BUS_AGP_AD_STEPPING_EN(x) (((x) >> 14) & 0x1)
+#define C_000030_BUS_AGP_AD_STEPPING_EN 0xFFFFBFFF
+#define S_000030_BUS_PCI_WRT_RETRY_EN(x) (((x) & 0x1) << 15)
+#define G_000030_BUS_PCI_WRT_RETRY_EN(x) (((x) >> 15) & 0x1)
+#define C_000030_BUS_PCI_WRT_RETRY_EN 0xFFFF7FFF
+#define S_000030_BUS_RETRY_WS(x) (((x) & 0xF) << 16)
+#define G_000030_BUS_RETRY_WS(x) (((x) >> 16) & 0xF)
+#define C_000030_BUS_RETRY_WS 0xFFF0FFFF
+#define S_000030_BUS_MSTR_RD_MULT(x) (((x) & 0x1) << 20)
+#define G_000030_BUS_MSTR_RD_MULT(x) (((x) >> 20) & 0x1)
+#define C_000030_BUS_MSTR_RD_MULT 0xFFEFFFFF
+#define S_000030_BUS_MSTR_RD_LINE(x) (((x) & 0x1) << 21)
+#define G_000030_BUS_MSTR_RD_LINE(x) (((x) >> 21) & 0x1)
+#define C_000030_BUS_MSTR_RD_LINE 0xFFDFFFFF
+#define S_000030_BUS_SUSPEND(x) (((x) & 0x1) << 22)
+#define G_000030_BUS_SUSPEND(x) (((x) >> 22) & 0x1)
+#define C_000030_BUS_SUSPEND 0xFFBFFFFF
+#define S_000030_LAT_16X(x) (((x) & 0x1) << 23)
+#define G_000030_LAT_16X(x) (((x) >> 23) & 0x1)
+#define C_000030_LAT_16X 0xFF7FFFFF
+#define S_000030_BUS_RD_DISCARD_EN(x) (((x) & 0x1) << 24)
+#define G_000030_BUS_RD_DISCARD_EN(x) (((x) >> 24) & 0x1)
+#define C_000030_BUS_RD_DISCARD_EN 0xFEFFFFFF
+#define S_000030_ENFRCWRDY(x) (((x) & 0x1) << 25)
+#define G_000030_ENFRCWRDY(x) (((x) >> 25) & 0x1)
+#define C_000030_ENFRCWRDY 0xFDFFFFFF
+#define S_000030_BUS_MSTR_WS(x) (((x) & 0x1) << 26)
+#define G_000030_BUS_MSTR_WS(x) (((x) >> 26) & 0x1)
+#define C_000030_BUS_MSTR_WS 0xFBFFFFFF
+#define S_000030_BUS_PARKING_DIS(x) (((x) & 0x1) << 27)
+#define G_000030_BUS_PARKING_DIS(x) (((x) >> 27) & 0x1)
+#define C_000030_BUS_PARKING_DIS 0xF7FFFFFF
+#define S_000030_BUS_MSTR_DISCONNECT_EN(x) (((x) & 0x1) << 28)
+#define G_000030_BUS_MSTR_DISCONNECT_EN(x) (((x) >> 28) & 0x1)
+#define C_000030_BUS_MSTR_DISCONNECT_EN 0xEFFFFFFF
+#define S_000030_SERR_EN(x) (((x) & 0x1) << 29)
+#define G_000030_SERR_EN(x) (((x) >> 29) & 0x1)
+#define C_000030_SERR_EN 0xDFFFFFFF
+#define S_000030_BUS_READ_BURST(x) (((x) & 0x1) << 30)
+#define G_000030_BUS_READ_BURST(x) (((x) >> 30) & 0x1)
+#define C_000030_BUS_READ_BURST 0xBFFFFFFF
+#define S_000030_BUS_RDY_READ_DLY(x) (((x) & 0x1) << 31)
+#define G_000030_BUS_RDY_READ_DLY(x) (((x) >> 31) & 0x1)
+#define C_000030_BUS_RDY_READ_DLY 0x7FFFFFFF
#define R_000040_GEN_INT_CNTL 0x000040
#define S_000040_CRTC_VBLANK(x) (((x) & 0x1) << 0)
#define G_000040_CRTC_VBLANK(x) (((x) >> 0) & 0x1)
@@ -710,5 +838,41 @@
#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1)
#define C_00000D_FORCE_RB 0xEFFFFFFF
+/* PLL regs */
+#define SCLK_CNTL 0xd
+#define FORCE_HDP (1 << 17)
+#define CLK_PWRMGT_CNTL 0x14
+#define GLOBAL_PMAN_EN (1 << 10)
+#define DISP_PM (1 << 20)
+#define PLL_PWRMGT_CNTL 0x15
+#define MPLL_TURNOFF (1 << 0)
+#define SPLL_TURNOFF (1 << 1)
+#define PPLL_TURNOFF (1 << 2)
+#define P2PLL_TURNOFF (1 << 3)
+#define TVPLL_TURNOFF (1 << 4)
+#define MOBILE_SU (1 << 16)
+#define SU_SCLK_USE_BCLK (1 << 17)
+#define SCLK_CNTL2 0x1e
+#define REDUCED_SPEED_SCLK_MODE (1 << 16)
+#define REDUCED_SPEED_SCLK_SEL(x) ((x) << 17)
+#define MCLK_MISC 0x1f
+#define EN_MCLK_TRISTATE_IN_SUSPEND (1 << 18)
+#define SCLK_MORE_CNTL 0x35
+#define REDUCED_SPEED_SCLK_EN (1 << 16)
+#define IO_CG_VOLTAGE_DROP (1 << 17)
+#define VOLTAGE_DELAY_SEL(x) ((x) << 20)
+#define VOLTAGE_DROP_SYNC (1 << 19)
+
+/* mmreg */
+#define DISP_PWR_MAN 0xd08
+#define DISP_D3_GRPH_RST (1 << 18)
+#define DISP_D3_SUBPIC_RST (1 << 19)
+#define DISP_D3_OV0_RST (1 << 20)
+#define DISP_D1D2_GRPH_RST (1 << 21)
+#define DISP_D1D2_SUBPIC_RST (1 << 22)
+#define DISP_D1D2_OV0_RST (1 << 23)
+#define DISP_DVO_ENABLE_RST (1 << 24)
+#define TV_ENABLE_RST (1 << 25)
+#define AUTO_PWRUP_EN (1 << 26)
#endif
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index a5ff8076b42..b2f9efe2897 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -27,8 +27,9 @@
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include "drmP.h"
-#include "drm.h"
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc_helper.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
@@ -151,6 +152,10 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev)
u32 tmp;
int r;
+ WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
@@ -323,7 +328,6 @@ void r300_gpu_init(struct radeon_device *rdev)
{
uint32_t gb_tile_config, tmp;
- r100_hdp_reset(rdev);
if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
(rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) {
/* r300,r350 */
@@ -375,89 +379,85 @@ void r300_gpu_init(struct radeon_device *rdev)
rdev->num_gb_pipes, rdev->num_z_pipes);
}
-int r300_ga_reset(struct radeon_device *rdev)
+bool r300_gpu_is_lockup(struct radeon_device *rdev)
{
- uint32_t tmp;
- bool reinit_cp;
- int i;
+ u32 rbbm_status;
+ int r;
- reinit_cp = rdev->cp.ready;
- rdev->cp.ready = false;
- for (i = 0; i < rdev->usec_timeout; i++) {
- WREG32(RADEON_CP_CSQ_MODE, 0);
- WREG32(RADEON_CP_CSQ_CNTL, 0);
- WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
- (void)RREG32(RADEON_RBBM_SOFT_RESET);
- udelay(200);
- WREG32(RADEON_RBBM_SOFT_RESET, 0);
- /* Wait to prevent race in RBBM_STATUS */
- mdelay(1);
- tmp = RREG32(RADEON_RBBM_STATUS);
- if (tmp & ((1 << 20) | (1 << 26))) {
- DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp);
- /* GA still busy soft reset it */
- WREG32(0x429C, 0x200);
- WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
- WREG32(R300_RE_SCISSORS_TL, 0);
- WREG32(R300_RE_SCISSORS_BR, 0);
- WREG32(0x24AC, 0);
- }
- /* Wait to prevent race in RBBM_STATUS */
- mdelay(1);
- tmp = RREG32(RADEON_RBBM_STATUS);
- if (!(tmp & ((1 << 20) | (1 << 26)))) {
- break;
- }
+ rbbm_status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
+ r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
+ return false;
}
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32(RADEON_RBBM_STATUS);
- if (!(tmp & ((1 << 20) | (1 << 26)))) {
- DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
- tmp);
- if (reinit_cp) {
- return r100_cp_init(rdev, rdev->cp.ring_size);
- }
- return 0;
- }
- DRM_UDELAY(1);
+ /* force CP activities */
+ r = radeon_ring_lock(rdev, 2);
+ if (!r) {
+ /* PACKET2 NOP */
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_unlock_commit(rdev);
}
- tmp = RREG32(RADEON_RBBM_STATUS);
- DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
- return -1;
+ rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
}
-int r300_gpu_reset(struct radeon_device *rdev)
+int r300_asic_reset(struct radeon_device *rdev)
{
- uint32_t status;
-
- /* reset order likely matter */
- status = RREG32(RADEON_RBBM_STATUS);
- /* reset HDP */
- r100_hdp_reset(rdev);
- /* reset rb2d */
- if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
- r100_rb2d_reset(rdev);
- }
- /* reset GA */
- if (status & ((1 << 20) | (1 << 26))) {
- r300_ga_reset(rdev);
- }
- /* reset CP */
- status = RREG32(RADEON_RBBM_STATUS);
- if (status & (1 << 16)) {
- r100_cp_reset(rdev);
+ struct r100_mc_save save;
+ u32 status, tmp;
+
+ r100_mc_stop(rdev, &save);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(status)) {
+ return 0;
}
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* stop CP */
+ WREG32(RADEON_CP_CSQ_CNTL, 0);
+ tmp = RREG32(RADEON_CP_RB_CNTL);
+ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+ WREG32(RADEON_CP_RB_RPTR_WR, 0);
+ WREG32(RADEON_CP_RB_WPTR, 0);
+ WREG32(RADEON_CP_RB_CNTL, tmp);
+ /* save PCI state */
+ pci_save_state(rdev->pdev);
+ /* disable bus mastering */
+ r100_bm_disable(rdev);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
+ S_0000F0_SOFT_RESET_GA(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* resetting the CP seems to be problematic sometimes it end up
+ * hard locking the computer, but it's necessary for successfull
+ * reset more test & playing is needed on R3XX/R4XX to find a
+ * reliable (if any solution)
+ */
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* restore PCI & busmastering */
+ pci_restore_state(rdev->pdev);
+ r100_enable_bm(rdev);
/* Check if GPU is idle */
- status = RREG32(RADEON_RBBM_STATUS);
- if (status & RADEON_RBBM_ACTIVE) {
- DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
+ if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
+ dev_err(rdev->dev, "failed to reset GPU\n");
+ rdev->gpu_lockup = true;
return -1;
}
- DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
+ r100_mc_resume(rdev, &save);
+ dev_info(rdev->dev, "GPU reset succeed\n");
return 0;
}
-
/*
* r300,r350,rv350,rv380 VRAM info
*/
@@ -1316,7 +1316,7 @@ int r300_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
r300_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -1344,7 +1344,6 @@ int r300_suspend(struct radeon_device *rdev)
void r300_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -1387,7 +1386,7 @@ int r300_init(struct radeon_device *rdev)
return r;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -1400,8 +1399,6 @@ int r300_init(struct radeon_device *rdev)
r300_errata(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h
index 4c73114f0de..968a33317fb 100644
--- a/drivers/gpu/drm/radeon/r300d.h
+++ b/drivers/gpu/drm/radeon/r300d.h
@@ -209,7 +209,52 @@
#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
-
+#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
+#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
+#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
+#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
+#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
+#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
+#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
+#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
+#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
+#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
+#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
+#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
+#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
+#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
+#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
+#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
+#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
+#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
+#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
+#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
+#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
+#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
+#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
+#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
+#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
+#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
+#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
+#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
+#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
+#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
+#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
+#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
+#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
+#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
+#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
+#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
+#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
+#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
+#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
+#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
+#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
+#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
+#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
+#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
+#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
+#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
#define R_00000D_SCLK_CNTL 0x00000D
#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0)
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index c2bda4ad62e..4415a5ee587 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -36,6 +36,35 @@
#include "r420d.h"
#include "r420_reg_safe.h"
+void r420_pm_init_profile(struct radeon_device *rdev)
+{
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+}
+
static void r420_set_reg_safe(struct radeon_device *rdev)
{
rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
@@ -241,7 +270,7 @@ int r420_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
r420_clock_resume(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -274,7 +303,6 @@ int r420_suspend(struct radeon_device *rdev)
void r420_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -322,7 +350,7 @@ int r420_init(struct radeon_device *rdev)
}
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -334,8 +362,6 @@ int r420_init(struct radeon_device *rdev)
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 0cf2ad2a558..93c9a2bbccf 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -347,9 +347,11 @@
#define AVIVO_D1CRTC_CONTROL 0x6080
# define AVIVO_CRTC_EN (1 << 0)
+# define AVIVO_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
#define AVIVO_D1CRTC_BLANK_CONTROL 0x6084
#define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088
#define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c
+#define AVIVO_D1CRTC_STATUS_POSITION 0x60a0
#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
@@ -488,6 +490,7 @@
#define AVIVO_D2CRTC_BLANK_CONTROL 0x6884
#define AVIVO_D2CRTC_INTERLACE_CONTROL 0x6888
#define AVIVO_D2CRTC_INTERLACE_STATUS 0x688c
+#define AVIVO_D2CRTC_STATUS_POSITION 0x68a0
#define AVIVO_D2CRTC_FRAME_COUNT 0x68a4
#define AVIVO_D2CRTC_STEREO_CONTROL 0x68c4
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 3c44b8d3931..34330df2848 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -53,7 +53,6 @@ static void r520_gpu_init(struct radeon_device *rdev)
{
unsigned pipe_select_current, gb_pipe_select, tmp;
- r100_hdp_reset(rdev);
rv515_vga_render_disable(rdev);
/*
* DST_PIPE_CONFIG 0x170C
@@ -209,7 +208,7 @@ int r520_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
rv515_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -246,7 +245,7 @@ int r520_init(struct radeon_device *rdev)
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -262,8 +261,6 @@ int r520_init(struct radeon_device *rdev)
}
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 8f3454e2056..44e96a2ae25 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -44,6 +44,9 @@
#define R700_PFP_UCODE_SIZE 848
#define R700_PM4_UCODE_SIZE 1360
#define R700_RLC_UCODE_SIZE 1024
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+#define EVERGREEN_RLC_UCODE_SIZE 768
/* Firmware Names */
MODULE_FIRMWARE("radeon/R600_pfp.bin");
@@ -68,6 +71,18 @@ MODULE_FIRMWARE("radeon/RV710_pfp.bin");
MODULE_FIRMWARE("radeon/RV710_me.bin");
MODULE_FIRMWARE("radeon/R600_rlc.bin");
MODULE_FIRMWARE("radeon/R700_rlc.bin");
+MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
+MODULE_FIRMWARE("radeon/CEDAR_me.bin");
+MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
@@ -75,6 +90,401 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
void r600_gpu_init(struct radeon_device *rdev);
void r600_fini(struct radeon_device *rdev);
+void r600_irq_disable(struct radeon_device *rdev);
+
+void r600_pm_get_dynpm_state(struct radeon_device *rdev)
+{
+ int i;
+
+ rdev->pm.dynpm_can_upclock = true;
+ rdev->pm.dynpm_can_downclock = true;
+
+ /* power state array is low to high, default is first */
+ if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
+ int min_power_state_index = 0;
+
+ if (rdev->pm.num_power_states > 2)
+ min_power_state_index = 1;
+
+ switch (rdev->pm.dynpm_planned_action) {
+ case DYNPM_ACTION_MINIMUM:
+ rdev->pm.requested_power_state_index = min_power_state_index;
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_downclock = false;
+ break;
+ case DYNPM_ACTION_DOWNCLOCK:
+ if (rdev->pm.current_power_state_index == min_power_state_index) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ rdev->pm.dynpm_can_downclock = false;
+ } else {
+ if (rdev->pm.active_crtc_count > 1) {
+ for (i = 0; i < rdev->pm.num_power_states; i++) {
+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ continue;
+ else if (i >= rdev->pm.current_power_state_index) {
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index;
+ break;
+ } else {
+ rdev->pm.requested_power_state_index = i;
+ break;
+ }
+ }
+ } else
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index - 1;
+ }
+ rdev->pm.requested_clock_mode_index = 0;
+ /* don't use the power state if crtcs are active and no display flag is set */
+ if ((rdev->pm.active_crtc_count > 0) &&
+ (rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].flags &
+ RADEON_PM_MODE_NO_DISPLAY)) {
+ rdev->pm.requested_power_state_index++;
+ }
+ break;
+ case DYNPM_ACTION_UPCLOCK:
+ if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ rdev->pm.dynpm_can_upclock = false;
+ } else {
+ if (rdev->pm.active_crtc_count > 1) {
+ for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ continue;
+ else if (i <= rdev->pm.current_power_state_index) {
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index;
+ break;
+ } else {
+ rdev->pm.requested_power_state_index = i;
+ break;
+ }
+ }
+ } else
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index + 1;
+ }
+ rdev->pm.requested_clock_mode_index = 0;
+ break;
+ case DYNPM_ACTION_DEFAULT:
+ rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_upclock = false;
+ break;
+ case DYNPM_ACTION_NONE:
+ default:
+ DRM_ERROR("Requested mode for not defined action\n");
+ return;
+ }
+ } else {
+ /* XXX select a power state based on AC/DC, single/dualhead, etc. */
+ /* for now just select the first power state and switch between clock modes */
+ /* power state array is low to high, default is first (0) */
+ if (rdev->pm.active_crtc_count > 1) {
+ rdev->pm.requested_power_state_index = -1;
+ /* start at 1 as we don't want the default mode */
+ for (i = 1; i < rdev->pm.num_power_states; i++) {
+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ continue;
+ else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
+ (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
+ rdev->pm.requested_power_state_index = i;
+ break;
+ }
+ }
+ /* if nothing selected, grab the default state. */
+ if (rdev->pm.requested_power_state_index == -1)
+ rdev->pm.requested_power_state_index = 0;
+ } else
+ rdev->pm.requested_power_state_index = 1;
+
+ switch (rdev->pm.dynpm_planned_action) {
+ case DYNPM_ACTION_MINIMUM:
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_downclock = false;
+ break;
+ case DYNPM_ACTION_DOWNCLOCK:
+ if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
+ if (rdev->pm.current_clock_mode_index == 0) {
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_downclock = false;
+ } else
+ rdev->pm.requested_clock_mode_index =
+ rdev->pm.current_clock_mode_index - 1;
+ } else {
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_downclock = false;
+ }
+ /* don't use the power state if crtcs are active and no display flag is set */
+ if ((rdev->pm.active_crtc_count > 0) &&
+ (rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].flags &
+ RADEON_PM_MODE_NO_DISPLAY)) {
+ rdev->pm.requested_clock_mode_index++;
+ }
+ break;
+ case DYNPM_ACTION_UPCLOCK:
+ if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
+ if (rdev->pm.current_clock_mode_index ==
+ (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
+ rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
+ rdev->pm.dynpm_can_upclock = false;
+ } else
+ rdev->pm.requested_clock_mode_index =
+ rdev->pm.current_clock_mode_index + 1;
+ } else {
+ rdev->pm.requested_clock_mode_index =
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
+ rdev->pm.dynpm_can_upclock = false;
+ }
+ break;
+ case DYNPM_ACTION_DEFAULT:
+ rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_upclock = false;
+ break;
+ case DYNPM_ACTION_NONE:
+ default:
+ DRM_ERROR("Requested mode for not defined action\n");
+ return;
+ }
+ }
+
+ DRM_DEBUG("Requested: e: %d m: %d p: %d\n",
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].sclk,
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].mclk,
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ pcie_lanes);
+}
+
+static int r600_pm_get_type_index(struct radeon_device *rdev,
+ enum radeon_pm_state_type ps_type,
+ int instance)
+{
+ int i;
+ int found_instance = -1;
+
+ for (i = 0; i < rdev->pm.num_power_states; i++) {
+ if (rdev->pm.power_state[i].type == ps_type) {
+ found_instance++;
+ if (found_instance == instance)
+ return i;
+ }
+ }
+ /* return default if no match */
+ return rdev->pm.default_power_state_index;
+}
+
+void rs780_pm_init_profile(struct radeon_device *rdev)
+{
+ if (rdev->pm.num_power_states == 2) {
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+ } else if (rdev->pm.num_power_states == 3) {
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+ } else {
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+ }
+}
+
+void r600_pm_init_profile(struct radeon_device *rdev)
+{
+ if (rdev->family == CHIP_R600) {
+ /* XXX */
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+ } else {
+ if (rdev->pm.num_power_states < 4) {
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
+ } else {
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
+ /* low sh */
+ if (rdev->flags & RADEON_IS_MOBILITY) {
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
+ } else {
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
+ }
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
+ /* low mh */
+ if (rdev->flags & RADEON_IS_MOBILITY) {
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 2;
+ } else {
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1;
+ }
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
+ }
+ }
+}
+
+void r600_pm_misc(struct radeon_device *rdev)
+{
+
+}
+
+bool r600_gui_idle(struct radeon_device *rdev)
+{
+ if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
+ return false;
+ else
+ return true;
+}
/* hpd for digital panel detect/disconnect */
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
@@ -714,11 +1124,6 @@ int r600_mc_init(struct radeon_device *rdev)
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
- /* FIXME remove this once we support unmappable VRAM */
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
- rdev->mc.mc_vram_size = rdev->mc.aper_size;
- rdev->mc.real_vram_size = rdev->mc.aper_size;
- }
r600_vram_gtt_location(rdev, &rdev->mc);
if (rdev->flags & RADEON_IS_IGP)
@@ -750,7 +1155,6 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
- u32 srbm_reset = 0;
u32 tmp;
dev_info(rdev->dev, "GPU softreset \n");
@@ -765,7 +1169,7 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
/* Disable CP parsing/prefetching */
- WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
+ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
/* Check if any of the rendering block is busy and reset it */
if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
(RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
@@ -784,72 +1188,56 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
S_008020_SOFT_RESET_VGT(1);
dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(R_008020_GRBM_SOFT_RESET, tmp);
- (void)RREG32(R_008020_GRBM_SOFT_RESET);
- udelay(50);
+ RREG32(R_008020_GRBM_SOFT_RESET);
+ mdelay(15);
WREG32(R_008020_GRBM_SOFT_RESET, 0);
- (void)RREG32(R_008020_GRBM_SOFT_RESET);
}
/* Reset CP (we always reset CP) */
tmp = S_008020_SOFT_RESET_CP(1);
dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(R_008020_GRBM_SOFT_RESET, tmp);
- (void)RREG32(R_008020_GRBM_SOFT_RESET);
- udelay(50);
+ RREG32(R_008020_GRBM_SOFT_RESET);
+ mdelay(15);
WREG32(R_008020_GRBM_SOFT_RESET, 0);
- (void)RREG32(R_008020_GRBM_SOFT_RESET);
- /* Reset others GPU block if necessary */
- if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
- if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_GRBM(1);
- if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_IH(1);
- if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_VMC(1);
- if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_MC(1);
- if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_MC(1);
- if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_MC(1);
- if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_MC(1);
- if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_MC(1);
- if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
- if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
- if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
- dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
- WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
- (void)RREG32(R_000E60_SRBM_SOFT_RESET);
- udelay(50);
- WREG32(R_000E60_SRBM_SOFT_RESET, 0);
- (void)RREG32(R_000E60_SRBM_SOFT_RESET);
- WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
- (void)RREG32(R_000E60_SRBM_SOFT_RESET);
- udelay(50);
- WREG32(R_000E60_SRBM_SOFT_RESET, 0);
- (void)RREG32(R_000E60_SRBM_SOFT_RESET);
/* Wait a little for things to settle down */
- udelay(50);
+ mdelay(1);
dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
RREG32(R_008010_GRBM_STATUS));
dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
RREG32(R_008014_GRBM_STATUS2));
dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
RREG32(R_000E50_SRBM_STATUS));
- /* After reset we need to reinit the asic as GPU often endup in an
- * incoherent state.
- */
- atom_asic_init(rdev->mode_info.atom_context);
rv515_mc_resume(rdev, &save);
return 0;
}
-int r600_gpu_reset(struct radeon_device *rdev)
+bool r600_gpu_is_lockup(struct radeon_device *rdev)
+{
+ u32 srbm_status;
+ u32 grbm_status;
+ u32 grbm_status2;
+ int r;
+
+ srbm_status = RREG32(R_000E50_SRBM_STATUS);
+ grbm_status = RREG32(R_008010_GRBM_STATUS);
+ grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
+ if (!G_008010_GUI_ACTIVE(grbm_status)) {
+ r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
+ return false;
+ }
+ /* force CP activities */
+ r = radeon_ring_lock(rdev, 2);
+ if (!r) {
+ /* PACKET2 NOP */
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_unlock_commit(rdev);
+ }
+ rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
+}
+
+int r600_asic_reset(struct radeon_device *rdev)
{
return r600_gpu_soft_reset(rdev);
}
@@ -1467,10 +1855,31 @@ int r600_init_microcode(struct radeon_device *rdev)
chip_name = "RV710";
rlc_chip_name = "R700";
break;
+ case CHIP_CEDAR:
+ chip_name = "CEDAR";
+ rlc_chip_name = "CEDAR";
+ break;
+ case CHIP_REDWOOD:
+ chip_name = "REDWOOD";
+ rlc_chip_name = "REDWOOD";
+ break;
+ case CHIP_JUNIPER:
+ chip_name = "JUNIPER";
+ rlc_chip_name = "JUNIPER";
+ break;
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ chip_name = "CYPRESS";
+ rlc_chip_name = "CYPRESS";
+ break;
default: BUG();
}
- if (rdev->family >= CHIP_RV770) {
+ if (rdev->family >= CHIP_CEDAR) {
+ pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+ me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+ rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+ } else if (rdev->family >= CHIP_RV770) {
pfp_req_size = R700_PFP_UCODE_SIZE * 4;
me_req_size = R700_PM4_UCODE_SIZE * 4;
rlc_req_size = R700_RLC_UCODE_SIZE * 4;
@@ -1584,12 +1993,15 @@ int r600_cp_start(struct radeon_device *rdev)
}
radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(rdev, 0x1);
- if (rdev->family < CHIP_RV770) {
- radeon_ring_write(rdev, 0x3);
- radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
- } else {
+ if (rdev->family >= CHIP_CEDAR) {
+ radeon_ring_write(rdev, 0x0);
+ radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
+ } else if (rdev->family >= CHIP_RV770) {
radeon_ring_write(rdev, 0x0);
radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
+ } else {
+ radeon_ring_write(rdev, 0x3);
+ radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
}
radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(rdev, 0);
@@ -2051,8 +2463,6 @@ int r600_init(struct radeon_device *rdev)
r = radeon_clocks_init(rdev);
if (r)
return r;
- /* Initialize power management */
- radeon_pm_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
@@ -2117,7 +2527,6 @@ int r600_init(struct radeon_device *rdev)
void r600_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r600_audio_fini(rdev);
r600_blit_fini(rdev);
r600_cp_fini(rdev);
@@ -2290,10 +2699,11 @@ static void r600_ih_ring_fini(struct radeon_device *rdev)
}
}
-static void r600_rlc_stop(struct radeon_device *rdev)
+void r600_rlc_stop(struct radeon_device *rdev)
{
- if (rdev->family >= CHIP_RV770) {
+ if ((rdev->family >= CHIP_RV770) &&
+ (rdev->family <= CHIP_RV740)) {
/* r7xx asics need to soft reset RLC before halting */
WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
RREG32(SRBM_SOFT_RESET);
@@ -2330,7 +2740,12 @@ static int r600_rlc_init(struct radeon_device *rdev)
WREG32(RLC_UCODE_CNTL, 0);
fw_data = (const __be32 *)rdev->rlc_fw->data;
- if (rdev->family >= CHIP_RV770) {
+ if (rdev->family >= CHIP_CEDAR) {
+ for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
+ } else if (rdev->family >= CHIP_RV770) {
for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
@@ -2360,7 +2775,7 @@ static void r600_enable_interrupts(struct radeon_device *rdev)
rdev->ih.enabled = true;
}
-static void r600_disable_interrupts(struct radeon_device *rdev)
+void r600_disable_interrupts(struct radeon_device *rdev)
{
u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
u32 ih_cntl = RREG32(IH_CNTL);
@@ -2475,7 +2890,10 @@ int r600_irq_init(struct radeon_device *rdev)
WREG32(IH_CNTL, ih_cntl);
/* force the active interrupt state to all disabled */
- r600_disable_interrupt_state(rdev);
+ if (rdev->family >= CHIP_CEDAR)
+ evergreen_disable_interrupt_state(rdev);
+ else
+ r600_disable_interrupt_state(rdev);
/* enable irqs */
r600_enable_interrupts(rdev);
@@ -2485,7 +2903,7 @@ int r600_irq_init(struct radeon_device *rdev)
void r600_irq_suspend(struct radeon_device *rdev)
{
- r600_disable_interrupts(rdev);
+ r600_irq_disable(rdev);
r600_rlc_stop(rdev);
}
@@ -2500,6 +2918,8 @@ int r600_irq_set(struct radeon_device *rdev)
u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
u32 mode_int = 0;
u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
+ u32 grbm_int_cntl = 0;
+ u32 hdmi1, hdmi2;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
@@ -2513,7 +2933,9 @@ int r600_irq_set(struct radeon_device *rdev)
return 0;
}
+ hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
if (ASIC_IS_DCE3(rdev)) {
+ hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -2523,6 +2945,7 @@ int r600_irq_set(struct radeon_device *rdev)
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
}
} else {
+ hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -2564,10 +2987,25 @@ int r600_irq_set(struct radeon_device *rdev)
DRM_DEBUG("r600_irq_set: hpd 6\n");
hpd6 |= DC_HPDx_INT_EN;
}
+ if (rdev->irq.hdmi[0]) {
+ DRM_DEBUG("r600_irq_set: hdmi 1\n");
+ hdmi1 |= R600_HDMI_INT_EN;
+ }
+ if (rdev->irq.hdmi[1]) {
+ DRM_DEBUG("r600_irq_set: hdmi 2\n");
+ hdmi2 |= R600_HDMI_INT_EN;
+ }
+ if (rdev->irq.gui_idle) {
+ DRM_DEBUG("gui idle\n");
+ grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
+ }
WREG32(CP_INT_CNTL, cp_int_cntl);
WREG32(DxMODE_INT_MASK, mode_int);
+ WREG32(GRBM_INT_CNTL, grbm_int_cntl);
+ WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
if (ASIC_IS_DCE3(rdev)) {
+ WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
WREG32(DC_HPD1_INT_CONTROL, hpd1);
WREG32(DC_HPD2_INT_CONTROL, hpd2);
WREG32(DC_HPD3_INT_CONTROL, hpd3);
@@ -2577,6 +3015,7 @@ int r600_irq_set(struct radeon_device *rdev)
WREG32(DC_HPD6_INT_CONTROL, hpd6);
}
} else {
+ WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
@@ -2660,6 +3099,18 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
}
+ if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
+ WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
+ }
+ if (ASIC_IS_DCE3(rdev)) {
+ if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
+ WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
+ }
+ } else {
+ if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
+ WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
+ }
+ }
}
void r600_irq_disable(struct radeon_device *rdev)
@@ -2713,6 +3164,8 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
* 19 1 FP Hot plug detection B
* 19 2 DAC A auto-detection
* 19 3 DAC B auto-detection
+ * 21 4 HDMI block A
+ * 21 5 HDMI block B
* 176 - CP_INT RB
* 177 - CP_INT IB1
* 178 - CP_INT IB2
@@ -2852,6 +3305,10 @@ restart_ih:
break;
}
break;
+ case 21: /* HDMI */
+ DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
+ r600_audio_schedule_polling(rdev);
+ break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
@@ -2861,6 +3318,11 @@ restart_ih:
case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n");
break;
+ case 233: /* GUI IDLE */
+ DRM_DEBUG("IH: CP EOP\n");
+ rdev->pm.gui_idle = true;
+ wake_up(&rdev->irq.idle_queue);
+ break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 1d898051c63..2b26553c352 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -44,7 +44,7 @@ static int r600_audio_chipset_supported(struct radeon_device *rdev)
/*
* current number of channels
*/
-static int r600_audio_channels(struct radeon_device *rdev)
+int r600_audio_channels(struct radeon_device *rdev)
{
return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1;
}
@@ -52,7 +52,7 @@ static int r600_audio_channels(struct radeon_device *rdev)
/*
* current bits per sample
*/
-static int r600_audio_bits_per_sample(struct radeon_device *rdev)
+int r600_audio_bits_per_sample(struct radeon_device *rdev)
{
uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4;
switch (value) {
@@ -71,7 +71,7 @@ static int r600_audio_bits_per_sample(struct radeon_device *rdev)
/*
* current sampling rate in HZ
*/
-static int r600_audio_rate(struct radeon_device *rdev)
+int r600_audio_rate(struct radeon_device *rdev)
{
uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
uint32_t result;
@@ -90,7 +90,7 @@ static int r600_audio_rate(struct radeon_device *rdev)
/*
* iec 60958 status bits
*/
-static uint8_t r600_audio_status_bits(struct radeon_device *rdev)
+uint8_t r600_audio_status_bits(struct radeon_device *rdev)
{
return RREG32(R600_AUDIO_STATUS_BITS) & 0xff;
}
@@ -98,12 +98,21 @@ static uint8_t r600_audio_status_bits(struct radeon_device *rdev)
/*
* iec 60958 category code
*/
-static uint8_t r600_audio_category_code(struct radeon_device *rdev)
+uint8_t r600_audio_category_code(struct radeon_device *rdev)
{
return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff;
}
/*
+ * schedule next audio update event
+ */
+void r600_audio_schedule_polling(struct radeon_device *rdev)
+{
+ mod_timer(&rdev->audio_timer,
+ jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
+}
+
+/*
* update all hdmi interfaces with current audio parameters
*/
static void r600_audio_update_hdmi(unsigned long param)
@@ -118,7 +127,7 @@ static void r600_audio_update_hdmi(unsigned long param)
uint8_t category_code = r600_audio_category_code(rdev);
struct drm_encoder *encoder;
- int changes = 0;
+ int changes = 0, still_going = 0;
changes |= channels != rdev->audio_channels;
changes |= rate != rdev->audio_rate;
@@ -135,15 +144,13 @@ static void r600_audio_update_hdmi(unsigned long param)
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ still_going |= radeon_encoder->audio_polling_active;
if (changes || r600_hdmi_buffer_status_changed(encoder))
- r600_hdmi_update_audio_settings(
- encoder, channels,
- rate, bps, status_bits,
- category_code);
+ r600_hdmi_update_audio_settings(encoder);
}
- mod_timer(&rdev->audio_timer,
- jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
+ if(still_going) r600_audio_schedule_polling(rdev);
}
/*
@@ -176,9 +183,34 @@ int r600_audio_init(struct radeon_device *rdev)
r600_audio_update_hdmi,
(unsigned long)rdev);
+ return 0;
+}
+
+/*
+ * enable the polling timer, to check for status changes
+ */
+void r600_audio_enable_polling(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+ DRM_DEBUG("r600_audio_enable_polling: %d", radeon_encoder->audio_polling_active);
+ if (radeon_encoder->audio_polling_active)
+ return;
+
+ radeon_encoder->audio_polling_active = 1;
mod_timer(&rdev->audio_timer, jiffies + 1);
+}
- return 0;
+/*
+ * disable the polling timer, so we get no more status updates
+ */
+void r600_audio_disable_polling(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ DRM_DEBUG("r600_audio_disable_polling: %d", radeon_encoder->audio_polling_active);
+ radeon_encoder->audio_polling_active = 0;
}
/*
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index f6c6c77db7e..d13622ae74e 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -447,6 +447,9 @@ int r600_blit_init(struct radeon_device *rdev)
u32 packet2s[16];
int num_packet2s = 0;
+ /* don't reinitialize blit */
+ if (rdev->r600_blit.shader_obj)
+ return 0;
mutex_init(&rdev->r600_blit.mutex);
rdev->r600_blit.state_offset = 0;
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 2616b822ba6..26b4bc9d89a 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -290,17 +290,15 @@ void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
if (!offset)
return;
- if (r600_hdmi_is_audio_buffer_filled(encoder)) {
- /* disable audio workaround and start delivering of audio frames */
- WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
+ if (!radeon_encoder->hdmi_audio_workaround ||
+ r600_hdmi_is_audio_buffer_filled(encoder)) {
- } else if (radeon_encoder->hdmi_audio_workaround) {
- /* enable audio workaround and start delivering of audio frames */
- WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
+ /* disable audio workaround */
+ WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
} else {
- /* disable audio workaround and stop delivering of audio frames */
- WREG32_P(offset+R600_HDMI_CNTL, 0x00000000, ~0x00001001);
+ /* enable audio workaround */
+ WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
}
}
@@ -345,25 +343,23 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
/* audio packets per line, does anyone know how to calc this ? */
WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000);
-
- /* update? reset? don't realy know */
- WREG32_P(offset+R600_HDMI_CNTL, 0x14000000, ~0x14000000);
}
/*
* update settings with current parameters from audio engine
*/
-void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
- int channels,
- int rate,
- int bps,
- uint8_t status_bits,
- uint8_t category_code)
+void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ int channels = r600_audio_channels(rdev);
+ int rate = r600_audio_rate(rdev);
+ int bps = r600_audio_bits_per_sample(rdev);
+ uint8_t status_bits = r600_audio_status_bits(rdev);
+ uint8_t category_code = r600_audio_category_code(rdev);
+
uint32_t iec;
if (!offset)
@@ -415,9 +411,6 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0);
r600_hdmi_audio_workaround(encoder);
-
- /* update? reset? don't realy know */
- WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000);
}
static int r600_hdmi_find_free_block(struct drm_device *dev)
@@ -486,6 +479,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t offset;
if (ASIC_IS_DCE4(rdev))
return;
@@ -499,10 +493,10 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
}
}
+ offset = radeon_encoder->hdmi_offset;
if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
} else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
- int offset = radeon_encoder->hdmi_offset;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4);
@@ -518,6 +512,21 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
}
}
+ if (rdev->irq.installed
+ && rdev->family != CHIP_RS600
+ && rdev->family != CHIP_RS690
+ && rdev->family != CHIP_RS740) {
+
+ /* if irq is available use it */
+ rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true;
+ radeon_irq_set(rdev);
+
+ r600_audio_disable_polling(encoder);
+ } else {
+ /* if not fallback to polling */
+ r600_audio_enable_polling(encoder);
+ }
+
DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n",
radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
}
@@ -530,22 +539,30 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t offset;
if (ASIC_IS_DCE4(rdev))
return;
- if (!radeon_encoder->hdmi_offset) {
+ offset = radeon_encoder->hdmi_offset;
+ if (!offset) {
dev_err(rdev->dev, "Disabling not enabled HDMI\n");
return;
}
DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
- radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
+ offset, radeon_encoder->encoder_id);
+
+ /* disable irq */
+ rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = false;
+ radeon_irq_set(rdev);
+
+ /* disable polling */
+ r600_audio_disable_polling(encoder);
if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
} else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
- int offset = radeon_encoder->hdmi_offset;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4);
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index 7b1d22370f6..d84612ae47e 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -157,33 +157,36 @@
#define R600_HDMI_BLOCK3 0x7800
/* HDMI registers */
-#define R600_HDMI_ENABLE 0x00
-#define R600_HDMI_STATUS 0x04
-#define R600_HDMI_CNTL 0x08
-#define R600_HDMI_UNKNOWN_0 0x0C
-#define R600_HDMI_AUDIOCNTL 0x10
-#define R600_HDMI_VIDEOCNTL 0x14
-#define R600_HDMI_VERSION 0x18
-#define R600_HDMI_UNKNOWN_1 0x28
-#define R600_HDMI_VIDEOINFOFRAME_0 0x54
-#define R600_HDMI_VIDEOINFOFRAME_1 0x58
-#define R600_HDMI_VIDEOINFOFRAME_2 0x5c
-#define R600_HDMI_VIDEOINFOFRAME_3 0x60
-#define R600_HDMI_32kHz_CTS 0xac
-#define R600_HDMI_32kHz_N 0xb0
-#define R600_HDMI_44_1kHz_CTS 0xb4
-#define R600_HDMI_44_1kHz_N 0xb8
-#define R600_HDMI_48kHz_CTS 0xbc
-#define R600_HDMI_48kHz_N 0xc0
-#define R600_HDMI_AUDIOINFOFRAME_0 0xcc
-#define R600_HDMI_AUDIOINFOFRAME_1 0xd0
-#define R600_HDMI_IEC60958_1 0xd4
-#define R600_HDMI_IEC60958_2 0xd8
-#define R600_HDMI_UNKNOWN_2 0xdc
-#define R600_HDMI_AUDIO_DEBUG_0 0xe0
-#define R600_HDMI_AUDIO_DEBUG_1 0xe4
-#define R600_HDMI_AUDIO_DEBUG_2 0xe8
-#define R600_HDMI_AUDIO_DEBUG_3 0xec
+#define R600_HDMI_ENABLE 0x00
+#define R600_HDMI_STATUS 0x04
+# define R600_HDMI_INT_PENDING (1 << 29)
+#define R600_HDMI_CNTL 0x08
+# define R600_HDMI_INT_EN (1 << 28)
+# define R600_HDMI_INT_ACK (1 << 29)
+#define R600_HDMI_UNKNOWN_0 0x0C
+#define R600_HDMI_AUDIOCNTL 0x10
+#define R600_HDMI_VIDEOCNTL 0x14
+#define R600_HDMI_VERSION 0x18
+#define R600_HDMI_UNKNOWN_1 0x28
+#define R600_HDMI_VIDEOINFOFRAME_0 0x54
+#define R600_HDMI_VIDEOINFOFRAME_1 0x58
+#define R600_HDMI_VIDEOINFOFRAME_2 0x5c
+#define R600_HDMI_VIDEOINFOFRAME_3 0x60
+#define R600_HDMI_32kHz_CTS 0xac
+#define R600_HDMI_32kHz_N 0xb0
+#define R600_HDMI_44_1kHz_CTS 0xb4
+#define R600_HDMI_44_1kHz_N 0xb8
+#define R600_HDMI_48kHz_CTS 0xbc
+#define R600_HDMI_48kHz_N 0xc0
+#define R600_HDMI_AUDIOINFOFRAME_0 0xcc
+#define R600_HDMI_AUDIOINFOFRAME_1 0xd0
+#define R600_HDMI_IEC60958_1 0xd4
+#define R600_HDMI_IEC60958_2 0xd8
+#define R600_HDMI_UNKNOWN_2 0xdc
+#define R600_HDMI_AUDIO_DEBUG_0 0xe0
+#define R600_HDMI_AUDIO_DEBUG_1 0xe4
+#define R600_HDMI_AUDIO_DEBUG_2 0xe8
+#define R600_HDMI_AUDIO_DEBUG_3 0xec
/* HDMI additional config base register addresses */
#define R600_HDMI_CONFIG1 0x7600
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 034218c3dbb..669feb689bf 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -89,7 +89,6 @@ extern int radeon_testing;
extern int radeon_connector_table;
extern int radeon_tv;
extern int radeon_new_pll;
-extern int radeon_dynpm;
extern int radeon_audio;
extern int radeon_disp_priority;
extern int radeon_hw_i2c;
@@ -99,6 +98,7 @@ extern int radeon_hw_i2c;
* symbol;
*/
#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
+#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
/* RADEON_IB_POOL_SIZE must be a power of 2 */
#define RADEON_IB_POOL_SIZE 16
#define RADEON_DEBUGFS_MAX_NUM_FILES 32
@@ -172,6 +172,8 @@ struct radeon_clock {
int radeon_pm_init(struct radeon_device *rdev);
void radeon_pm_fini(struct radeon_device *rdev);
void radeon_pm_compute_clocks(struct radeon_device *rdev);
+void radeon_pm_suspend(struct radeon_device *rdev);
+void radeon_pm_resume(struct radeon_device *rdev);
void radeon_combios_get_power_modes(struct radeon_device *rdev);
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
@@ -182,7 +184,8 @@ struct radeon_fence_driver {
uint32_t scratch_reg;
atomic_t seq;
uint32_t last_seq;
- unsigned long count_timeout;
+ unsigned long last_jiffies;
+ unsigned long last_timeout;
wait_queue_head_t queue;
rwlock_t lock;
struct list_head created;
@@ -197,7 +200,6 @@ struct radeon_fence {
struct list_head list;
/* protected by radeon_fence.lock */
uint32_t seq;
- unsigned long timeout;
bool emited;
bool signaled;
};
@@ -259,6 +261,7 @@ struct radeon_bo_list {
unsigned rdomain;
unsigned wdomain;
u32 tiling_flags;
+ bool reserved;
};
/*
@@ -371,10 +374,15 @@ struct radeon_irq {
bool installed;
bool sw_int;
/* FIXME: use a define max crtc rather than hardcode it */
- bool crtc_vblank_int[2];
+ bool crtc_vblank_int[6];
wait_queue_head_t vblank_queue;
/* FIXME: use defines for max hpd/dacs */
bool hpd[6];
+ bool gui_idle;
+ bool gui_idle_acked;
+ wait_queue_head_t idle_queue;
+ /* FIXME: use defines for max HDMI blocks */
+ bool hdmi[2];
spinlock_t sw_lock;
int sw_refcount;
};
@@ -462,7 +470,9 @@ int radeon_ib_test(struct radeon_device *rdev);
extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
/* Ring access between begin & end cannot sleep */
void radeon_ring_free_size(struct radeon_device *rdev);
+int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw);
int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
+void radeon_ring_commit(struct radeon_device *rdev);
void radeon_ring_unlock_commit(struct radeon_device *rdev);
void radeon_ring_unlock_undo(struct radeon_device *rdev);
int radeon_ring_test(struct radeon_device *rdev);
@@ -566,6 +576,7 @@ typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
*/
int radeon_agp_init(struct radeon_device *rdev);
void radeon_agp_resume(struct radeon_device *rdev);
+void radeon_agp_suspend(struct radeon_device *rdev);
void radeon_agp_fini(struct radeon_device *rdev);
@@ -597,17 +608,24 @@ struct radeon_wb {
* Equation between gpu/memory clock and available bandwidth is hw dependent
* (type of memory, bus size, efficiency, ...)
*/
-enum radeon_pm_state {
- PM_STATE_DISABLED,
- PM_STATE_MINIMUM,
- PM_STATE_PAUSED,
- PM_STATE_ACTIVE
+
+enum radeon_pm_method {
+ PM_METHOD_PROFILE,
+ PM_METHOD_DYNPM,
+};
+
+enum radeon_dynpm_state {
+ DYNPM_STATE_DISABLED,
+ DYNPM_STATE_MINIMUM,
+ DYNPM_STATE_PAUSED,
+ DYNPM_STATE_ACTIVE
};
-enum radeon_pm_action {
- PM_ACTION_NONE,
- PM_ACTION_MINIMUM,
- PM_ACTION_DOWNCLOCK,
- PM_ACTION_UPCLOCK
+enum radeon_dynpm_action {
+ DYNPM_ACTION_NONE,
+ DYNPM_ACTION_MINIMUM,
+ DYNPM_ACTION_DOWNCLOCK,
+ DYNPM_ACTION_UPCLOCK,
+ DYNPM_ACTION_DEFAULT
};
enum radeon_voltage_type {
@@ -625,11 +643,25 @@ enum radeon_pm_state_type {
POWER_STATE_TYPE_PERFORMANCE,
};
-enum radeon_pm_clock_mode_type {
- POWER_MODE_TYPE_DEFAULT,
- POWER_MODE_TYPE_LOW,
- POWER_MODE_TYPE_MID,
- POWER_MODE_TYPE_HIGH,
+enum radeon_pm_profile_type {
+ PM_PROFILE_DEFAULT,
+ PM_PROFILE_AUTO,
+ PM_PROFILE_LOW,
+ PM_PROFILE_HIGH,
+};
+
+#define PM_PROFILE_DEFAULT_IDX 0
+#define PM_PROFILE_LOW_SH_IDX 1
+#define PM_PROFILE_HIGH_SH_IDX 2
+#define PM_PROFILE_LOW_MH_IDX 3
+#define PM_PROFILE_HIGH_MH_IDX 4
+#define PM_PROFILE_MAX 5
+
+struct radeon_pm_profile {
+ int dpms_off_ps_idx;
+ int dpms_on_ps_idx;
+ int dpms_off_cm_idx;
+ int dpms_on_cm_idx;
};
struct radeon_voltage {
@@ -646,12 +678,8 @@ struct radeon_voltage {
u32 voltage;
};
-struct radeon_pm_non_clock_info {
- /* pcie lanes */
- int pcie_lanes;
- /* standardized non-clock flags */
- u32 flags;
-};
+/* clock mode flags */
+#define RADEON_PM_MODE_NO_DISPLAY (1 << 0)
struct radeon_pm_clock_info {
/* memory clock */
@@ -660,10 +688,13 @@ struct radeon_pm_clock_info {
u32 sclk;
/* voltage info */
struct radeon_voltage voltage;
- /* standardized clock flags - not sure we'll need these */
+ /* standardized clock flags */
u32 flags;
};
+/* state flags */
+#define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0)
+
struct radeon_power_state {
enum radeon_pm_state_type type;
/* XXX: use a define for num clock modes */
@@ -671,9 +702,11 @@ struct radeon_power_state {
/* number of valid clock modes in this power state */
int num_clock_modes;
struct radeon_pm_clock_info *default_clock_mode;
- /* non clock info about this state */
- struct radeon_pm_non_clock_info non_clock_info;
- bool voltage_drop_active;
+ /* standardized state flags */
+ u32 flags;
+ u32 misc; /* vbios specific flags */
+ u32 misc2; /* vbios specific flags */
+ int pcie_lanes; /* pcie lanes */
};
/*
@@ -683,14 +716,11 @@ struct radeon_power_state {
struct radeon_pm {
struct mutex mutex;
- struct delayed_work idle_work;
- enum radeon_pm_state state;
- enum radeon_pm_action planned_action;
- unsigned long action_timeout;
- bool downclocked;
- int active_crtcs;
+ u32 active_crtcs;
+ int active_crtc_count;
int req_vblank;
bool vblank_sync;
+ bool gui_idle;
fixed20_12 max_bandwidth;
fixed20_12 igp_sideport_mclk;
fixed20_12 igp_system_mclk;
@@ -707,12 +737,27 @@ struct radeon_pm {
struct radeon_power_state power_state[8];
/* number of valid power states */
int num_power_states;
- struct radeon_power_state *current_power_state;
- struct radeon_pm_clock_info *current_clock_mode;
- struct radeon_power_state *requested_power_state;
- struct radeon_pm_clock_info *requested_clock_mode;
- struct radeon_power_state *default_power_state;
+ int current_power_state_index;
+ int current_clock_mode_index;
+ int requested_power_state_index;
+ int requested_clock_mode_index;
+ int default_power_state_index;
+ u32 current_sclk;
+ u32 current_mclk;
struct radeon_i2c_chan *i2c_bus;
+ /* selected pm method */
+ enum radeon_pm_method pm_method;
+ /* dynpm power management */
+ struct delayed_work dynpm_idle_work;
+ enum radeon_dynpm_state dynpm_state;
+ enum radeon_dynpm_action dynpm_planned_action;
+ unsigned long dynpm_action_timeout;
+ bool dynpm_can_upclock;
+ bool dynpm_can_downclock;
+ /* profile-based power management */
+ enum radeon_pm_profile_type profile;
+ int profile_index;
+ struct radeon_pm_profile profiles[PM_PROFILE_MAX];
};
@@ -746,7 +791,8 @@ struct radeon_asic {
int (*resume)(struct radeon_device *rdev);
int (*suspend)(struct radeon_device *rdev);
void (*vga_set_state)(struct radeon_device *rdev, bool state);
- int (*gpu_reset)(struct radeon_device *rdev);
+ bool (*gpu_is_lockup)(struct radeon_device *rdev);
+ int (*asic_reset)(struct radeon_device *rdev);
void (*gart_tlb_flush)(struct radeon_device *rdev);
int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
@@ -799,44 +845,84 @@ struct radeon_asic {
* through ring.
*/
void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
+ bool (*gui_idle)(struct radeon_device *rdev);
+ /* power management */
+ void (*pm_misc)(struct radeon_device *rdev);
+ void (*pm_prepare)(struct radeon_device *rdev);
+ void (*pm_finish)(struct radeon_device *rdev);
+ void (*pm_init_profile)(struct radeon_device *rdev);
+ void (*pm_get_dynpm_state)(struct radeon_device *rdev);
};
/*
* Asic structures
*/
+struct r100_gpu_lockup {
+ unsigned long last_jiffies;
+ u32 last_cp_rptr;
+};
+
struct r100_asic {
- const unsigned *reg_safe_bm;
- unsigned reg_safe_bm_size;
- u32 hdp_cntl;
+ const unsigned *reg_safe_bm;
+ unsigned reg_safe_bm_size;
+ u32 hdp_cntl;
+ struct r100_gpu_lockup lockup;
};
struct r300_asic {
- const unsigned *reg_safe_bm;
- unsigned reg_safe_bm_size;
- u32 resync_scratch;
- u32 hdp_cntl;
+ const unsigned *reg_safe_bm;
+ unsigned reg_safe_bm_size;
+ u32 resync_scratch;
+ u32 hdp_cntl;
+ struct r100_gpu_lockup lockup;
};
struct r600_asic {
- unsigned max_pipes;
- unsigned max_tile_pipes;
- unsigned max_simds;
- unsigned max_backends;
- unsigned max_gprs;
- unsigned max_threads;
- unsigned max_stack_entries;
- unsigned max_hw_contexts;
- unsigned max_gs_threads;
- unsigned sx_max_export_size;
- unsigned sx_max_export_pos_size;
- unsigned sx_max_export_smx_size;
- unsigned sq_num_cf_insts;
- unsigned tiling_nbanks;
- unsigned tiling_npipes;
- unsigned tiling_group_size;
+ unsigned max_pipes;
+ unsigned max_tile_pipes;
+ unsigned max_simds;
+ unsigned max_backends;
+ unsigned max_gprs;
+ unsigned max_threads;
+ unsigned max_stack_entries;
+ unsigned max_hw_contexts;
+ unsigned max_gs_threads;
+ unsigned sx_max_export_size;
+ unsigned sx_max_export_pos_size;
+ unsigned sx_max_export_smx_size;
+ unsigned sq_num_cf_insts;
+ unsigned tiling_nbanks;
+ unsigned tiling_npipes;
+ unsigned tiling_group_size;
+ struct r100_gpu_lockup lockup;
};
struct rv770_asic {
+ unsigned max_pipes;
+ unsigned max_tile_pipes;
+ unsigned max_simds;
+ unsigned max_backends;
+ unsigned max_gprs;
+ unsigned max_threads;
+ unsigned max_stack_entries;
+ unsigned max_hw_contexts;
+ unsigned max_gs_threads;
+ unsigned sx_max_export_size;
+ unsigned sx_max_export_pos_size;
+ unsigned sx_max_export_smx_size;
+ unsigned sq_num_cf_insts;
+ unsigned sx_num_of_sets;
+ unsigned sc_prim_fifo_size;
+ unsigned sc_hiz_tile_fifo_size;
+ unsigned sc_earlyz_tile_fifo_fize;
+ unsigned tiling_nbanks;
+ unsigned tiling_npipes;
+ unsigned tiling_group_size;
+ struct r100_gpu_lockup lockup;
+};
+
+struct evergreen_asic {
+ unsigned num_ses;
unsigned max_pipes;
unsigned max_tile_pipes;
unsigned max_simds;
@@ -853,7 +939,7 @@ struct rv770_asic {
unsigned sx_num_of_sets;
unsigned sc_prim_fifo_size;
unsigned sc_hiz_tile_fifo_size;
- unsigned sc_earlyz_tile_fifo_fize;
+ unsigned sc_earlyz_tile_fifo_size;
unsigned tiling_nbanks;
unsigned tiling_npipes;
unsigned tiling_group_size;
@@ -864,6 +950,7 @@ union radeon_asic_config {
struct r100_asic r100;
struct r600_asic r600;
struct rv770_asic rv770;
+ struct evergreen_asic evergreen;
};
/*
@@ -927,9 +1014,6 @@ struct radeon_device {
bool is_atom_bios;
uint16_t bios_header_start;
struct radeon_bo *stollen_vga_memory;
- struct fb_info *fbdev_info;
- struct radeon_bo *fbdev_rbo;
- struct radeon_framebuffer *fbdev_rfb;
/* Register mmio */
resource_size_t rmmio_base;
resource_size_t rmmio_size;
@@ -974,6 +1058,7 @@ struct radeon_device {
struct work_struct hotplug_work;
int num_crtc; /* number of crtcs */
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
+ struct mutex vram_mutex;
/* audio stuff */
struct timer_list audio_timer;
@@ -984,6 +1069,7 @@ struct radeon_device {
uint8_t audio_category_code;
bool powered_down;
+ struct notifier_block acpi_nb;
};
int radeon_device_init(struct radeon_device *rdev,
@@ -1145,7 +1231,8 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
-#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev))
+#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev))
+#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
@@ -1173,9 +1260,16 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
+#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev))
+#define radeon_pm_misc(rdev) (rdev)->asic->pm_misc((rdev))
+#define radeon_pm_prepare(rdev) (rdev)->asic->pm_prepare((rdev))
+#define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev))
+#define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev))
+#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev))
/* Common functions */
/* AGP */
+extern int radeon_gpu_reset(struct radeon_device *rdev);
extern void radeon_agp_disable(struct radeon_device *rdev);
extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
extern void radeon_gart_restore(struct radeon_device *rdev);
@@ -1200,6 +1294,8 @@ extern int radeon_resume_kms(struct drm_device *dev);
extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
+extern void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
+extern bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
/* rv200,rv250,rv280 */
extern void r200_set_safe_registers(struct radeon_device *rdev);
@@ -1260,6 +1356,7 @@ extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern bool r600_card_posted(struct radeon_device *rdev);
extern void r600_cp_stop(struct radeon_device *rdev);
+extern int r600_cp_start(struct radeon_device *rdev);
extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_cp_resume(struct radeon_device *rdev);
extern void r600_cp_fini(struct radeon_device *rdev);
@@ -1276,29 +1373,39 @@ extern void r600_scratch_init(struct radeon_device *rdev);
extern int r600_blit_init(struct radeon_device *rdev);
extern void r600_blit_fini(struct radeon_device *rdev);
extern int r600_init_microcode(struct radeon_device *rdev);
-extern int r600_gpu_reset(struct radeon_device *rdev);
+extern int r600_asic_reset(struct radeon_device *rdev);
/* r600 irq */
extern int r600_irq_init(struct radeon_device *rdev);
extern void r600_irq_fini(struct radeon_device *rdev);
extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_irq_set(struct radeon_device *rdev);
extern void r600_irq_suspend(struct radeon_device *rdev);
+extern void r600_disable_interrupts(struct radeon_device *rdev);
+extern void r600_rlc_stop(struct radeon_device *rdev);
/* r600 audio */
extern int r600_audio_init(struct radeon_device *rdev);
extern int r600_audio_tmds_index(struct drm_encoder *encoder);
extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
+extern int r600_audio_channels(struct radeon_device *rdev);
+extern int r600_audio_bits_per_sample(struct radeon_device *rdev);
+extern int r600_audio_rate(struct radeon_device *rdev);
+extern uint8_t r600_audio_status_bits(struct radeon_device *rdev);
+extern uint8_t r600_audio_category_code(struct radeon_device *rdev);
+extern void r600_audio_schedule_polling(struct radeon_device *rdev);
+extern void r600_audio_enable_polling(struct drm_encoder *encoder);
+extern void r600_audio_disable_polling(struct drm_encoder *encoder);
extern void r600_audio_fini(struct radeon_device *rdev);
extern void r600_hdmi_init(struct drm_encoder *encoder);
extern void r600_hdmi_enable(struct drm_encoder *encoder);
extern void r600_hdmi_disable(struct drm_encoder *encoder);
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
-extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
- int channels,
- int rate,
- int bps,
- uint8_t status_bits,
- uint8_t category_code);
+extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
+
+extern void r700_cp_stop(struct radeon_device *rdev);
+extern void r700_cp_fini(struct radeon_device *rdev);
+extern void evergreen_disable_interrupt_state(struct radeon_device *rdev);
+extern int evergreen_irq_set(struct radeon_device *rdev);
/* evergreen */
struct evergreen_mc_save {
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 28e473f1f56..f40dfb77f9b 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -270,3 +270,8 @@ void radeon_agp_fini(struct radeon_device *rdev)
}
#endif
}
+
+void radeon_agp_suspend(struct radeon_device *rdev)
+{
+ radeon_agp_fini(rdev);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index a4b4bc9fa32..e57df08d4ae 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -134,7 +134,8 @@ static struct radeon_asic r100_asic = {
.suspend = &r100_suspend,
.resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r100_gpu_reset,
+ .gpu_is_lockup = &r100_gpu_is_lockup,
+ .asic_reset = &r100_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -164,6 +165,12 @@ static struct radeon_asic r100_asic = {
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r100_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic r200_asic = {
@@ -172,7 +179,8 @@ static struct radeon_asic r200_asic = {
.suspend = &r100_suspend,
.resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r100_gpu_reset,
+ .gpu_is_lockup = &r100_gpu_is_lockup,
+ .asic_reset = &r100_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -201,6 +209,12 @@ static struct radeon_asic r200_asic = {
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r100_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic r300_asic = {
@@ -209,7 +223,8 @@ static struct radeon_asic r300_asic = {
.suspend = &r300_suspend,
.resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &r300_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -239,6 +254,12 @@ static struct radeon_asic r300_asic = {
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r100_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic r300_asic_pcie = {
@@ -247,7 +268,8 @@ static struct radeon_asic r300_asic_pcie = {
.suspend = &r300_suspend,
.resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -276,6 +298,12 @@ static struct radeon_asic r300_asic_pcie = {
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r100_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic r420_asic = {
@@ -284,7 +312,8 @@ static struct radeon_asic r420_asic = {
.suspend = &r420_suspend,
.resume = &r420_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -314,6 +343,12 @@ static struct radeon_asic r420_asic = {
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r420_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic rs400_asic = {
@@ -322,7 +357,8 @@ static struct radeon_asic rs400_asic = {
.suspend = &rs400_suspend,
.resume = &rs400_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.gart_set_page = &rs400_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -352,6 +388,12 @@ static struct radeon_asic rs400_asic = {
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r100_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic rs600_asic = {
@@ -360,7 +402,8 @@ static struct radeon_asic rs600_asic = {
.suspend = &rs600_suspend,
.resume = &rs600_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rs600_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -390,6 +433,12 @@ static struct radeon_asic rs600_asic = {
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &rs600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r420_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic rs690_asic = {
@@ -398,7 +447,8 @@ static struct radeon_asic rs690_asic = {
.suspend = &rs690_suspend,
.resume = &rs690_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.gart_set_page = &rs400_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -428,6 +478,12 @@ static struct radeon_asic rs690_asic = {
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &rs600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r420_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic rv515_asic = {
@@ -436,7 +492,8 @@ static struct radeon_asic rv515_asic = {
.suspend = &rv515_suspend,
.resume = &rv515_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &rv515_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -466,6 +523,12 @@ static struct radeon_asic rv515_asic = {
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &rs600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r420_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic r520_asic = {
@@ -474,7 +537,8 @@ static struct radeon_asic r520_asic = {
.suspend = &rv515_suspend,
.resume = &r520_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &rv515_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -504,6 +568,12 @@ static struct radeon_asic r520_asic = {
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &rs600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r420_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic r600_asic = {
@@ -513,7 +583,8 @@ static struct radeon_asic r600_asic = {
.resume = &r600_resume,
.cp_commit = &r600_cp_commit,
.vga_set_state = &r600_vga_set_state,
- .gpu_reset = &r600_gpu_reset,
+ .gpu_is_lockup = &r600_gpu_is_lockup,
+ .asic_reset = &r600_asic_reset,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
@@ -541,6 +612,12 @@ static struct radeon_asic r600_asic = {
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
.ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &r600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r600_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
};
static struct radeon_asic rs780_asic = {
@@ -549,8 +626,9 @@ static struct radeon_asic rs780_asic = {
.suspend = &r600_suspend,
.resume = &r600_resume,
.cp_commit = &r600_cp_commit,
+ .gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
- .gpu_reset = &r600_gpu_reset,
+ .asic_reset = &r600_asic_reset,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
@@ -578,6 +656,12 @@ static struct radeon_asic rs780_asic = {
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
.ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &r600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &rs780_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
};
static struct radeon_asic rv770_asic = {
@@ -586,7 +670,8 @@ static struct radeon_asic rv770_asic = {
.suspend = &rv770_suspend,
.resume = &rv770_resume,
.cp_commit = &r600_cp_commit,
- .gpu_reset = &rv770_gpu_reset,
+ .asic_reset = &r600_asic_reset,
+ .gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
@@ -615,6 +700,12 @@ static struct radeon_asic rv770_asic = {
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
.ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &rv770_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r600_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
};
static struct radeon_asic evergreen_asic = {
@@ -622,16 +713,17 @@ static struct radeon_asic evergreen_asic = {
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
- .cp_commit = NULL,
- .gpu_reset = &evergreen_gpu_reset,
+ .cp_commit = &r600_cp_commit,
+ .gpu_is_lockup = &evergreen_gpu_is_lockup,
+ .asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
+ .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
- .ring_test = NULL,
- .ring_ib_execute = NULL,
- .irq_set = NULL,
- .irq_process = NULL,
- .get_vblank_counter = NULL,
+ .ring_test = &r600_ring_test,
+ .ring_ib_execute = &r600_ring_ib_execute,
+ .irq_set = &evergreen_irq_set,
+ .irq_process = &evergreen_irq_process,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
.fence_ring_emit = NULL,
.cs_parse = NULL,
.copy_blit = NULL,
@@ -650,6 +742,12 @@ static struct radeon_asic evergreen_asic = {
.hpd_fini = &evergreen_hpd_fini,
.hpd_sense = &evergreen_hpd_sense,
.hpd_set_polarity = &evergreen_hpd_set_polarity,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &evergreen_pm_misc,
+ .pm_prepare = &evergreen_pm_prepare,
+ .pm_finish = &evergreen_pm_finish,
+ .pm_init_profile = &r600_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
};
int radeon_asic_init(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index a0b8280663d..5c40a3dfaca 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -60,7 +60,8 @@ int r100_resume(struct radeon_device *rdev);
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void r100_vga_set_state(struct radeon_device *rdev, bool state);
-int r100_gpu_reset(struct radeon_device *rdev);
+bool r100_gpu_is_lockup(struct radeon_device *rdev);
+int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
@@ -110,8 +111,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev);
void r100_wb_disable(struct radeon_device *rdev);
void r100_wb_fini(struct radeon_device *rdev);
int r100_wb_init(struct radeon_device *rdev);
-void r100_hdp_reset(struct radeon_device *rdev);
-int r100_rb2d_reset(struct radeon_device *rdev);
int r100_cp_reset(struct radeon_device *rdev);
void r100_vga_render_disable(struct radeon_device *rdev);
int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
@@ -126,6 +125,13 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p,
unsigned idx);
void r100_enable_bm(struct radeon_device *rdev);
void r100_set_common_regs(struct radeon_device *rdev);
+void r100_bm_disable(struct radeon_device *rdev);
+extern bool r100_gui_idle(struct radeon_device *rdev);
+extern void r100_pm_misc(struct radeon_device *rdev);
+extern void r100_pm_prepare(struct radeon_device *rdev);
+extern void r100_pm_finish(struct radeon_device *rdev);
+extern void r100_pm_init_profile(struct radeon_device *rdev);
+extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
/*
* r200,rv250,rs300,rv280
@@ -134,7 +140,7 @@ extern int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_pages,
- struct radeon_fence *fence);
+ struct radeon_fence *fence);
/*
* r300,r350,rv350,rv380
@@ -143,7 +149,8 @@ extern int r300_init(struct radeon_device *rdev);
extern void r300_fini(struct radeon_device *rdev);
extern int r300_suspend(struct radeon_device *rdev);
extern int r300_resume(struct radeon_device *rdev);
-extern int r300_gpu_reset(struct radeon_device *rdev);
+extern bool r300_gpu_is_lockup(struct radeon_device *rdev);
+extern int r300_asic_reset(struct radeon_device *rdev);
extern void r300_ring_start(struct radeon_device *rdev);
extern void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
@@ -162,6 +169,7 @@ extern int r420_init(struct radeon_device *rdev);
extern void r420_fini(struct radeon_device *rdev);
extern int r420_suspend(struct radeon_device *rdev);
extern int r420_resume(struct radeon_device *rdev);
+extern void r420_pm_init_profile(struct radeon_device *rdev);
/*
* rs400,rs480
@@ -178,6 +186,7 @@ void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
/*
* rs600.
*/
+extern int rs600_asic_reset(struct radeon_device *rdev);
extern int rs600_init(struct radeon_device *rdev);
extern void rs600_fini(struct radeon_device *rdev);
extern int rs600_suspend(struct radeon_device *rdev);
@@ -195,6 +204,9 @@ void rs600_hpd_fini(struct radeon_device *rdev);
bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void rs600_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
+extern void rs600_pm_misc(struct radeon_device *rdev);
+extern void rs600_pm_prepare(struct radeon_device *rdev);
+extern void rs600_pm_finish(struct radeon_device *rdev);
/*
* rs690,rs740
@@ -212,7 +224,6 @@ void rs690_bandwidth_update(struct radeon_device *rdev);
*/
int rv515_init(struct radeon_device *rdev);
void rv515_fini(struct radeon_device *rdev);
-int rv515_gpu_reset(struct radeon_device *rdev);
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rv515_ring_start(struct radeon_device *rdev);
@@ -252,7 +263,8 @@ int r600_copy_dma(struct radeon_device *rdev,
struct radeon_fence *fence);
int r600_irq_process(struct radeon_device *rdev);
int r600_irq_set(struct radeon_device *rdev);
-int r600_gpu_reset(struct radeon_device *rdev);
+bool r600_gpu_is_lockup(struct radeon_device *rdev);
+int r600_asic_reset(struct radeon_device *rdev);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
@@ -268,6 +280,11 @@ bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void r600_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
+extern bool r600_gui_idle(struct radeon_device *rdev);
+extern void r600_pm_misc(struct radeon_device *rdev);
+extern void r600_pm_init_profile(struct radeon_device *rdev);
+extern void rs780_pm_init_profile(struct radeon_device *rdev);
+extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
/*
* rv770,rv730,rv710,rv740
@@ -276,20 +293,29 @@ int rv770_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev);
int rv770_suspend(struct radeon_device *rdev);
int rv770_resume(struct radeon_device *rdev);
-int rv770_gpu_reset(struct radeon_device *rdev);
+extern void rv770_pm_misc(struct radeon_device *rdev);
/*
* evergreen
*/
+void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
int evergreen_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
int evergreen_suspend(struct radeon_device *rdev);
int evergreen_resume(struct radeon_device *rdev);
-int evergreen_gpu_reset(struct radeon_device *rdev);
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
+int evergreen_asic_reset(struct radeon_device *rdev);
void evergreen_bandwidth_update(struct radeon_device *rdev);
void evergreen_hpd_init(struct radeon_device *rdev);
void evergreen_hpd_fini(struct radeon_device *rdev);
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void evergreen_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
+u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
+int evergreen_irq_set(struct radeon_device *rdev);
+int evergreen_irq_process(struct radeon_device *rdev);
+extern void evergreen_pm_misc(struct radeon_device *rdev);
+extern void evergreen_pm_prepare(struct radeon_device *rdev);
+extern void evergreen_pm_finish(struct radeon_device *rdev);
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 9916d825401..24ea683f7cf 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -530,6 +530,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
}
/* look up gpio for ddc, hpd */
+ ddc_bus.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
if ((le16_to_cpu(path->usDeviceTag) &
(ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
@@ -547,7 +549,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
ATOM_I2C_RECORD *i2c_record;
ATOM_HPD_INT_RECORD *hpd_record;
ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
- hpd.hpd = RADEON_HPD_NONE;
while (record->ucRecordType > 0
&& record->
@@ -585,13 +586,10 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
break;
}
}
- } else {
- hpd.hpd = RADEON_HPD_NONE;
- ddc_bus.valid = false;
}
/* needed for aux chan transactions */
- ddc_bus.hpd_id = hpd.hpd ? (hpd.hpd - 1) : 0;
+ ddc_bus.hpd = hpd.hpd;
conn_id = le16_to_cpu(path->usConnObjectId);
@@ -682,11 +680,19 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
uint8_t dac;
union atom_supported_devices *supported_devices;
int i, j, max_device;
- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
+ struct bios_connector *bios_connectors;
+ size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
- if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
+ bios_connectors = kzalloc(bc_size, GFP_KERNEL);
+ if (!bios_connectors)
return false;
+ if (!atom_parse_data_header(ctx, index, &size, &frev, &crev,
+ &data_offset)) {
+ kfree(bios_connectors);
+ return false;
+ }
+
supported_devices =
(union atom_supported_devices *)(ctx->bios + data_offset);
@@ -853,6 +859,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
radeon_link_encoder_connector(dev);
+ kfree(bios_connectors);
return true;
}
@@ -1174,7 +1181,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
- le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
+ le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
lvds->panel_pwr_delay =
@@ -1442,26 +1449,30 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
static const char *thermal_controller_names[] = {
"NONE",
- "LM63",
- "ADM1032",
- "ADM1030",
- "MUA6649",
- "LM64",
- "F75375",
- "ASC7512",
+ "lm63",
+ "adm1032",
+ "adm1030",
+ "max6649",
+ "lm64",
+ "f75375",
+ "asc7xxx",
};
static const char *pp_lib_thermal_controller_names[] = {
"NONE",
- "LM63",
- "ADM1032",
- "ADM1030",
- "MUA6649",
- "LM64",
- "F75375",
+ "lm63",
+ "adm1032",
+ "adm1030",
+ "max6649",
+ "lm64",
+ "f75375",
"RV6xx",
"RV770",
- "ADT7473",
+ "adt7473",
+ "External GPIO",
+ "Evergreen",
+ "adt7473 with internal",
+
};
union power_info {
@@ -1485,7 +1496,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
int state_index = 0, mode_index = 0;
struct radeon_i2c_bus_rec i2c_bus;
- rdev->pm.default_power_state = NULL;
+ rdev->pm.default_power_state_index = -1;
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
@@ -1498,10 +1509,19 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
power_info->info.ucOverdriveControllerAddress >> 1);
i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal");
+ if (rdev->pm.i2c_bus) {
+ struct i2c_board_info info = { };
+ const char *name = thermal_controller_names[power_info->info.
+ ucOverdriveThermalController];
+ info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
+ strlcpy(info.type, name, sizeof(info.type));
+ i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ }
}
num_modes = power_info->info.ucNumOfPowerModeEntries;
if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
+ /* last mode is usually default, array is low to high */
for (i = 0; i < num_modes; i++) {
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
switch (frev) {
@@ -1515,13 +1535,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
(rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
continue;
- /* skip overclock modes for now */
- if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
- rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
- (rdev->pm.power_state[state_index].clock_info[0].sclk >
- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
- continue;
- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+ rdev->pm.power_state[state_index].pcie_lanes =
power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
@@ -1542,6 +1556,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
}
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ rdev->pm.power_state[state_index].misc = misc;
/* order matters! */
if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
rdev->pm.power_state[state_index].type =
@@ -1555,15 +1571,23 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BALANCED;
- if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
+ if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_PERFORMANCE;
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ }
if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ rdev->pm.default_power_state_index = state_index;
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[0];
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ } else if (state_index == 0) {
+ rdev->pm.power_state[state_index].clock_info[0].flags |=
+ RADEON_PM_MODE_NO_DISPLAY;
}
state_index++;
break;
@@ -1577,13 +1601,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
(rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
continue;
- /* skip overclock modes for now */
- if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
- rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
- (rdev->pm.power_state[state_index].clock_info[0].sclk >
- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
- continue;
- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+ rdev->pm.power_state[state_index].pcie_lanes =
power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
@@ -1605,6 +1623,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
}
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ rdev->pm.power_state[state_index].misc = misc;
+ rdev->pm.power_state[state_index].misc2 = misc2;
/* order matters! */
if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
rdev->pm.power_state[state_index].type =
@@ -1618,18 +1639,29 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BALANCED;
- if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
+ if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_PERFORMANCE;
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ }
if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BALANCED;
+ if (misc2 & ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT)
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ rdev->pm.default_power_state_index = state_index;
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[0];
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ } else if (state_index == 0) {
+ rdev->pm.power_state[state_index].clock_info[0].flags |=
+ RADEON_PM_MODE_NO_DISPLAY;
}
state_index++;
break;
@@ -1643,13 +1675,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
(rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
continue;
- /* skip overclock modes for now */
- if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
- rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
- (rdev->pm.power_state[state_index].clock_info[0].sclk >
- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
- continue;
- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+ rdev->pm.power_state[state_index].pcie_lanes =
power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
@@ -1677,6 +1703,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
}
}
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ rdev->pm.power_state[state_index].misc = misc;
+ rdev->pm.power_state[state_index].misc2 = misc2;
/* order matters! */
if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
rdev->pm.power_state[state_index].type =
@@ -1690,42 +1719,76 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BALANCED;
- if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
+ if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_PERFORMANCE;
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ }
if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BALANCED;
if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ rdev->pm.default_power_state_index = state_index;
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[0];
+ } else if (state_index == 0) {
+ rdev->pm.power_state[state_index].clock_info[0].flags |=
+ RADEON_PM_MODE_NO_DISPLAY;
}
state_index++;
break;
}
}
- } else if (frev == 4) {
+ /* last mode is usually default */
+ if (rdev->pm.default_power_state_index == -1) {
+ rdev->pm.power_state[state_index - 1].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = state_index - 1;
+ rdev->pm.power_state[state_index - 1].default_clock_mode =
+ &rdev->pm.power_state[state_index - 1].clock_info[0];
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ rdev->pm.power_state[state_index].misc = 0;
+ rdev->pm.power_state[state_index].misc2 = 0;
+ }
+ } else {
/* add the i2c bus for thermal/fan chip */
/* no support for internal controller yet */
- if (power_info->info_4.sThermalController.ucType > 0) {
- if ((power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) ||
- (power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV770)) {
+ ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController;
+ if (controller->ucType > 0) {
+ if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) ||
+ (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) ||
+ (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN)) {
DRM_INFO("Internal thermal controller %s fan control\n",
- (power_info->info_4.sThermalController.ucFanParameters &
+ (controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ } else if ((controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
+ (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL)) {
+ DRM_INFO("Special thermal controller config\n");
} else {
DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
- pp_lib_thermal_controller_names[power_info->info_4.sThermalController.ucType],
- power_info->info_4.sThermalController.ucI2cAddress >> 1,
- (power_info->info_4.sThermalController.ucFanParameters &
+ pp_lib_thermal_controller_names[controller->ucType],
+ controller->ucI2cAddress >> 1,
+ (controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info_4.sThermalController.ucI2cLine);
+ i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal");
+ if (rdev->pm.i2c_bus) {
+ struct i2c_board_info info = { };
+ const char *name = pp_lib_thermal_controller_names[controller->ucType];
+ info.addr = controller->ucI2cAddress >> 1;
+ strlcpy(info.type, name, sizeof(info.type));
+ i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ }
+
}
}
+ /* first mode is usually default, followed by low to high */
for (i = 0; i < power_info->info_4.ucNumStates; i++) {
mode_index = 0;
power_state = (struct _ATOM_PPLIB_STATE *)
@@ -1754,14 +1817,34 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
/* skip invalid modes */
if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
continue;
- /* skip overclock modes for now */
- if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+ VOLTAGE_SW;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+ clock_info->usVDDC;
+ mode_index++;
+ } else if (ASIC_IS_DCE4(rdev)) {
+ struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info =
+ (struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *)
+ (mode_info->atom_context->bios +
+ data_offset +
+ le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
+ (power_state->ucClockStateIndices[j] *
+ power_info->info_4.ucClockInfoSize));
+ sclk = le16_to_cpu(clock_info->usEngineClockLow);
+ sclk |= clock_info->ucEngineClockHigh << 16;
+ mclk = le16_to_cpu(clock_info->usMemoryClockLow);
+ mclk |= clock_info->ucMemoryClockHigh << 16;
+ rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
continue;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
VOLTAGE_SW;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
clock_info->usVDDC;
+ /* XXX usVDDCI */
mode_index++;
} else {
struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info =
@@ -1781,12 +1864,6 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
(rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
continue;
- /* skip overclock modes for now */
- if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk >
- rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
- (rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
- continue;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
VOLTAGE_SW;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
@@ -1798,7 +1875,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
if (mode_index) {
misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
misc2 = le16_to_cpu(non_clock_info->usClassification);
- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+ rdev->pm.power_state[state_index].misc = misc;
+ rdev->pm.power_state[state_index].misc2 = misc2;
+ rdev->pm.power_state[state_index].pcie_lanes =
((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
@@ -1815,22 +1894,36 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
POWER_STATE_TYPE_PERFORMANCE;
break;
}
+ rdev->pm.power_state[state_index].flags = 0;
+ if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
+ rdev->pm.power_state[state_index].flags |=
+ RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ rdev->pm.default_power_state_index = state_index;
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[mode_index - 1];
}
state_index++;
}
}
+ /* if multiple clock modes, mark the lowest as no display */
+ for (i = 0; i < state_index; i++) {
+ if (rdev->pm.power_state[i].num_clock_modes > 1)
+ rdev->pm.power_state[i].clock_info[0].flags |=
+ RADEON_PM_MODE_NO_DISPLAY;
+ }
+ /* first mode is usually default */
+ if (rdev->pm.default_power_state_index == -1) {
+ rdev->pm.power_state[0].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = 0;
+ rdev->pm.power_state[0].default_clock_mode =
+ &rdev->pm.power_state[0].clock_info[0];
+ }
}
} else {
- /* XXX figure out some good default low power mode for cards w/out power tables */
- }
-
- if (rdev->pm.default_power_state == NULL) {
/* add the default mode */
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_DEFAULT;
@@ -1840,18 +1933,16 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[0];
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
- if (rdev->asic->get_pcie_lanes)
- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev);
- else
- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16;
- rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ rdev->pm.power_state[state_index].pcie_lanes = 16;
+ rdev->pm.default_power_state_index = state_index;
+ rdev->pm.power_state[state_index].flags = 0;
state_index++;
}
+
rdev->pm.num_power_states = state_index;
- rdev->pm.current_power_state = rdev->pm.default_power_state;
- rdev->pm.current_clock_mode =
- rdev->pm.default_power_state->default_clock_mode;
+ rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.current_clock_mode_index = 0;
}
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 8ad71f70131..fbba938f804 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -85,12 +85,11 @@ static bool radeon_read_bios(struct radeon_device *rdev)
pci_unmap_rom(rdev->pdev, bios);
return false;
}
- rdev->bios = kmalloc(size, GFP_KERNEL);
+ rdev->bios = kmemdup(bios, size, GFP_KERNEL);
if (rdev->bios == NULL) {
pci_unmap_rom(rdev->pdev, bios);
return false;
}
- memcpy(rdev->bios, bios, size);
pci_unmap_rom(rdev->pdev, bios);
return true;
}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 37db8adb274..7b5e10d3e9c 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -450,17 +450,17 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
{
int edid_info;
struct edid *edid;
+ unsigned char *raw;
edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE);
if (!edid_info)
return false;
- edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
- GFP_KERNEL);
+ raw = rdev->bios + edid_info;
+ edid = kmalloc(EDID_LENGTH * (raw[0x7e] + 1), GFP_KERNEL);
if (edid == NULL)
return false;
- memcpy((unsigned char *)edid,
- (unsigned char *)(rdev->bios + edid_info), EDID_LENGTH);
+ memcpy((unsigned char *)edid, raw, EDID_LENGTH * (raw[0x7e] + 1));
if (!drm_edid_is_valid(edid)) {
kfree(edid);
@@ -600,7 +600,7 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
}
i2c.mm_i2c = false;
i2c.i2c_id = 0;
- i2c.hpd_id = 0;
+ i2c.hpd = RADEON_HPD_NONE;
if (ddc_line)
i2c.valid = true;
@@ -1113,18 +1113,20 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
break;
if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
- (RBIOS16(tmp + 2) ==
- lvds->native_mode.vdisplay)) {
- lvds->native_mode.htotal = RBIOS16(tmp + 17) * 8;
- lvds->native_mode.hsync_start = RBIOS16(tmp + 21) * 8;
- lvds->native_mode.hsync_end = (RBIOS8(tmp + 23) +
- RBIOS16(tmp + 21)) * 8;
-
- lvds->native_mode.vtotal = RBIOS16(tmp + 24);
- lvds->native_mode.vsync_start = RBIOS16(tmp + 28) & 0x7ff;
- lvds->native_mode.vsync_end =
- ((RBIOS16(tmp + 28) & 0xf800) >> 11) +
- (RBIOS16(tmp + 28) & 0x7ff);
+ (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
+ lvds->native_mode.htotal = lvds->native_mode.hdisplay +
+ (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
+ lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
+ (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
+ lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
+ (RBIOS8(tmp + 23) * 8);
+
+ lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
+ (RBIOS16(tmp + 24) - RBIOS16(tmp + 26));
+ lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
+ ((RBIOS16(tmp + 28) & 0x7ff) - RBIOS16(tmp + 26));
+ lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
+ ((RBIOS16(tmp + 28) & 0xf800) >> 11);
lvds->native_mode.clock = RBIOS16(tmp + 9) * 10;
lvds->native_mode.flags = 0;
@@ -2196,7 +2198,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
ATOM_DEVICE_DFP1_SUPPORT);
ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
- hpd.hpd = RADEON_HPD_NONE;
+ hpd.hpd = RADEON_HPD_1;
radeon_add_legacy_connector(dev,
0,
ATOM_DEVICE_CRT1_SUPPORT |
@@ -2366,7 +2368,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
u8 rev, blocks, tmp;
int state_index = 0;
- rdev->pm.default_power_state = NULL;
+ rdev->pm.default_power_state_index = -1;
if (rdev->flags & RADEON_IS_MOBILITY) {
offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
@@ -2380,17 +2382,13 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
(rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
goto default_mode;
- /* skip overclock modes for now */
- if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
- rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
- (rdev->pm.power_state[state_index].clock_info[0].sclk >
- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
- goto default_mode;
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BATTERY;
misc = RBIOS16(offset + 0x5 + 0x0);
if (rev > 4)
misc2 = RBIOS16(offset + 0x5 + 0xe);
+ rdev->pm.power_state[state_index].misc = misc;
+ rdev->pm.power_state[state_index].misc2 = misc2;
if (misc & 0x4) {
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO;
if (misc & 0x8)
@@ -2437,8 +2435,9 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
} else
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
if (rev > 6)
- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+ rdev->pm.power_state[state_index].pcie_lanes =
RBIOS8(offset + 0x5 + 0x10);
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
state_index++;
} else {
/* XXX figure out some good default low power mode for mobility cards w/out power tables */
@@ -2456,16 +2455,13 @@ default_mode:
rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0];
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
- if (rdev->asic->get_pcie_lanes)
- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev);
- else
- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16;
- rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ rdev->pm.power_state[state_index].pcie_lanes = 16;
+ rdev->pm.power_state[state_index].flags = 0;
+ rdev->pm.default_power_state_index = state_index;
rdev->pm.num_power_states = state_index + 1;
- rdev->pm.current_power_state = rdev->pm.default_power_state;
- rdev->pm.current_clock_mode =
- rdev->pm.default_power_state->default_clock_mode;
+ rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.current_clock_mode_index = 0;
}
void radeon_external_tmds_setup(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 4559a53d5e5..0c7ccc6961a 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1041,7 +1041,6 @@ radeon_add_atom_connector(struct drm_device *dev,
struct radeon_connector_atom_dig *radeon_dig_connector;
uint32_t subpixel_order = SubPixelNone;
bool shared_ddc = false;
- int ret;
/* fixme - tv/cv/din */
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -1076,9 +1075,7 @@ radeon_add_atom_connector(struct drm_device *dev,
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
if (!radeon_connector->ddc_bus)
@@ -1088,12 +1085,11 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
break;
case DRM_MODE_CONNECTOR_DVIA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
if (!radeon_connector->ddc_bus)
@@ -1113,9 +1109,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
if (!radeon_connector->ddc_bus)
@@ -1141,9 +1135,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI");
if (!radeon_connector->ddc_bus)
@@ -1163,9 +1155,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (i2c_bus->valid) {
/* add DP i2c bus */
if (connector_type == DRM_MODE_CONNECTOR_eDP)
@@ -1191,9 +1181,7 @@ radeon_add_atom_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_9PinDIN:
if (radeon_tv == 1) {
drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
@@ -1211,9 +1199,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
if (!radeon_connector->ddc_bus)
@@ -1226,6 +1212,12 @@ radeon_add_atom_connector(struct drm_device *dev,
break;
}
+ if (hpd->hpd == RADEON_HPD_NONE) {
+ if (i2c_bus->valid)
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ } else
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
connector->display_info.subpixel_order = subpixel_order;
drm_sysfs_connector_add(connector);
return;
@@ -1250,7 +1242,6 @@ radeon_add_legacy_connector(struct drm_device *dev,
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
uint32_t subpixel_order = SubPixelNone;
- int ret;
/* fixme - tv/cv/din */
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -1278,9 +1269,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
if (!radeon_connector->ddc_bus)
@@ -1290,12 +1279,11 @@ radeon_add_legacy_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
break;
case DRM_MODE_CONNECTOR_DVIA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
if (!radeon_connector->ddc_bus)
@@ -1309,9 +1297,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
if (!radeon_connector->ddc_bus)
@@ -1330,9 +1316,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_9PinDIN:
if (radeon_tv == 1) {
drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
radeon_connector->dac_load_detect = true;
/* RS400,RC410,RS480 chipset seems to report a lot
* of false positive on load detect, we haven't yet
@@ -1351,9 +1335,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
break;
case DRM_MODE_CONNECTOR_LVDS:
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
if (!radeon_connector->ddc_bus)
@@ -1366,6 +1348,11 @@ radeon_add_legacy_connector(struct drm_device *dev,
break;
}
+ if (hpd->hpd == RADEON_HPD_NONE) {
+ if (i2c_bus->valid)
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ } else
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->display_info.subpixel_order = subpixel_order;
drm_sysfs_connector_add(connector);
return;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index f9b0fe002c0..ae0fb7356e6 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -220,10 +220,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
int r;
mutex_lock(&rdev->cs_mutex);
- if (rdev->gpu_lockup) {
- mutex_unlock(&rdev->cs_mutex);
- return -EINVAL;
- }
/* initialize parser */
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7b629e30556..fdc3fdf78ac 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -299,24 +299,24 @@ void radeon_update_bandwidth_info(struct radeon_device *rdev)
sclk = radeon_get_engine_clock(rdev);
mclk = rdev->clock.default_mclk;
- a.full = rfixed_const(100);
- rdev->pm.sclk.full = rfixed_const(sclk);
- rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
- rdev->pm.mclk.full = rfixed_const(mclk);
- rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a);
+ a.full = dfixed_const(100);
+ rdev->pm.sclk.full = dfixed_const(sclk);
+ rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
+ rdev->pm.mclk.full = dfixed_const(mclk);
+ rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
- a.full = rfixed_const(16);
+ a.full = dfixed_const(16);
/* core_bandwidth = sclk(Mhz) * 16 */
- rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
+ rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
} else {
sclk = radeon_get_engine_clock(rdev);
mclk = radeon_get_memory_clock(rdev);
- a.full = rfixed_const(100);
- rdev->pm.sclk.full = rfixed_const(sclk);
- rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
- rdev->pm.mclk.full = rfixed_const(mclk);
- rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a);
+ a.full = dfixed_const(100);
+ rdev->pm.sclk.full = dfixed_const(sclk);
+ rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
+ rdev->pm.mclk.full = dfixed_const(mclk);
+ rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
}
}
@@ -599,9 +599,11 @@ int radeon_device_init(struct radeon_device *rdev,
spin_lock_init(&rdev->ih.lock);
mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex);
+ mutex_init(&rdev->vram_mutex);
rwlock_init(&rdev->fence_drv.lock);
INIT_LIST_HEAD(&rdev->gem.objects);
init_waitqueue_head(&rdev->irq.vblank_queue);
+ init_waitqueue_head(&rdev->irq.idle_queue);
/* setup workqueue */
rdev->wq = create_workqueue("radeon");
@@ -671,7 +673,7 @@ int radeon_device_init(struct radeon_device *rdev,
/* Acceleration not working on AGP card try again
* with fallback to PCI or PCIE GART
*/
- radeon_gpu_reset(rdev);
+ radeon_asic_reset(rdev);
radeon_fini(rdev);
radeon_agp_disable(rdev);
r = radeon_init(rdev);
@@ -691,6 +693,8 @@ void radeon_device_fini(struct radeon_device *rdev)
{
DRM_INFO("radeon: finishing device.\n");
rdev->shutdown = true;
+ /* evict vram memory */
+ radeon_bo_evict_vram(rdev);
radeon_fini(rdev);
destroy_workqueue(rdev->wq);
vga_switcheroo_unregister_client(rdev->pdev);
@@ -728,9 +732,10 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
continue;
}
robj = rfb->obj->driver_private;
- if (robj != rdev->fbdev_rbo) {
+ /* don't unpin kernel fb objects */
+ if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
r = radeon_bo_reserve(robj, false);
- if (unlikely(r == 0)) {
+ if (r == 0) {
radeon_bo_unpin(robj);
radeon_bo_unreserve(robj);
}
@@ -743,11 +748,14 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
radeon_save_bios_scratch_regs(rdev);
+ radeon_pm_suspend(rdev);
radeon_suspend(rdev);
radeon_hpd_fini(rdev);
/* evict remaining vram memory */
radeon_bo_evict_vram(rdev);
+ radeon_agp_suspend(rdev);
+
pci_save_state(dev->pdev);
if (state.event == PM_EVENT_SUSPEND) {
/* Shut down the device */
@@ -755,7 +763,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
pci_set_power_state(dev->pdev, PCI_D3hot);
}
acquire_console_sem();
- fb_set_suspend(rdev->fbdev_info, 1);
+ radeon_fbdev_set_suspend(rdev, 1);
release_console_sem();
return 0;
}
@@ -778,8 +786,9 @@ int radeon_resume_kms(struct drm_device *dev)
/* resume AGP if in use */
radeon_agp_resume(rdev);
radeon_resume(rdev);
+ radeon_pm_resume(rdev);
radeon_restore_bios_scratch_regs(rdev);
- fb_set_suspend(rdev->fbdev_info, 0);
+ radeon_fbdev_set_suspend(rdev, 0);
release_console_sem();
/* reset hpd state */
@@ -789,6 +798,26 @@ int radeon_resume_kms(struct drm_device *dev)
return 0;
}
+int radeon_gpu_reset(struct radeon_device *rdev)
+{
+ int r;
+
+ radeon_save_bios_scratch_regs(rdev);
+ radeon_suspend(rdev);
+
+ r = radeon_asic_reset(rdev);
+ if (!r) {
+ dev_info(rdev->dev, "GPU reset succeed\n");
+ radeon_resume(rdev);
+ radeon_restore_bios_scratch_regs(rdev);
+ drm_helper_resume_force_mode(rdev->ddev);
+ return 0;
+ }
+ /* bad news, how to tell it to userspace ? */
+ dev_info(rdev->dev, "GPU reset failed\n");
+ return r;
+}
+
/*
* Debugfs
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index bb1c122cad2..1006549d157 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -633,37 +633,37 @@ calc_fb_div(struct radeon_pll *pll,
vco_freq = freq * post_div;
/* feedback_divider = vco_freq * ref_div / pll->reference_freq; */
- a.full = rfixed_const(pll->reference_freq);
- feedback_divider.full = rfixed_const(vco_freq);
- feedback_divider.full = rfixed_div(feedback_divider, a);
- a.full = rfixed_const(ref_div);
- feedback_divider.full = rfixed_mul(feedback_divider, a);
+ a.full = dfixed_const(pll->reference_freq);
+ feedback_divider.full = dfixed_const(vco_freq);
+ feedback_divider.full = dfixed_div(feedback_divider, a);
+ a.full = dfixed_const(ref_div);
+ feedback_divider.full = dfixed_mul(feedback_divider, a);
if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
/* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */
- a.full = rfixed_const(10);
- feedback_divider.full = rfixed_mul(feedback_divider, a);
- feedback_divider.full += rfixed_const_half(0);
- feedback_divider.full = rfixed_floor(feedback_divider);
- feedback_divider.full = rfixed_div(feedback_divider, a);
+ a.full = dfixed_const(10);
+ feedback_divider.full = dfixed_mul(feedback_divider, a);
+ feedback_divider.full += dfixed_const_half(0);
+ feedback_divider.full = dfixed_floor(feedback_divider);
+ feedback_divider.full = dfixed_div(feedback_divider, a);
/* *fb_div = floor(feedback_divider); */
- a.full = rfixed_floor(feedback_divider);
- *fb_div = rfixed_trunc(a);
+ a.full = dfixed_floor(feedback_divider);
+ *fb_div = dfixed_trunc(a);
/* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */
- a.full = rfixed_const(10);
- b.full = rfixed_mul(feedback_divider, a);
+ a.full = dfixed_const(10);
+ b.full = dfixed_mul(feedback_divider, a);
- feedback_divider.full = rfixed_floor(feedback_divider);
- feedback_divider.full = rfixed_mul(feedback_divider, a);
+ feedback_divider.full = dfixed_floor(feedback_divider);
+ feedback_divider.full = dfixed_mul(feedback_divider, a);
feedback_divider.full = b.full - feedback_divider.full;
- *fb_div_frac = rfixed_trunc(feedback_divider);
+ *fb_div_frac = dfixed_trunc(feedback_divider);
} else {
/* *fb_div = floor(feedback_divider + 0.5); */
- feedback_divider.full += rfixed_const_half(0);
- feedback_divider.full = rfixed_floor(feedback_divider);
+ feedback_divider.full += dfixed_const_half(0);
+ feedback_divider.full = dfixed_floor(feedback_divider);
- *fb_div = rfixed_trunc(feedback_divider);
+ *fb_div = dfixed_trunc(feedback_divider);
*fb_div_frac = 0;
}
@@ -693,10 +693,10 @@ calc_fb_ref_div(struct radeon_pll *pll,
pll_out_max = pll->pll_out_max;
}
- ffreq.full = rfixed_const(freq);
+ ffreq.full = dfixed_const(freq);
/* max_error = ffreq * 0.0025; */
- a.full = rfixed_const(400);
- max_error.full = rfixed_div(ffreq, a);
+ a.full = dfixed_const(400);
+ max_error.full = dfixed_div(ffreq, a);
for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) {
if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) {
@@ -707,9 +707,9 @@ calc_fb_ref_div(struct radeon_pll *pll,
continue;
/* pll_out = vco / post_div; */
- a.full = rfixed_const(post_div);
- pll_out.full = rfixed_const(vco);
- pll_out.full = rfixed_div(pll_out, a);
+ a.full = dfixed_const(post_div);
+ pll_out.full = dfixed_const(vco);
+ pll_out.full = dfixed_div(pll_out, a);
if (pll_out.full >= ffreq.full) {
error.full = pll_out.full - ffreq.full;
@@ -831,10 +831,6 @@ void radeon_compute_pll(struct radeon_pll *pll,
static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
- struct drm_device *dev = fb->dev;
-
- if (fb->fbdev)
- radeonfb_remove(dev, fb);
if (radeon_fb->obj)
drm_gem_object_unreference_unlocked(radeon_fb->obj);
@@ -856,21 +852,15 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = {
.create_handle = radeon_user_framebuffer_create_handle,
};
-struct drm_framebuffer *
-radeon_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd,
- struct drm_gem_object *obj)
+void
+radeon_framebuffer_init(struct drm_device *dev,
+ struct radeon_framebuffer *rfb,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_gem_object *obj)
{
- struct radeon_framebuffer *radeon_fb;
-
- radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
- if (radeon_fb == NULL) {
- return NULL;
- }
- drm_framebuffer_init(dev, &radeon_fb->base, &radeon_fb_funcs);
- drm_helper_mode_fill_fb_struct(&radeon_fb->base, mode_cmd);
- radeon_fb->obj = obj;
- return &radeon_fb->base;
+ rfb->obj = obj;
+ drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
+ drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
}
static struct drm_framebuffer *
@@ -879,6 +869,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd *mode_cmd)
{
struct drm_gem_object *obj;
+ struct radeon_framebuffer *radeon_fb;
obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
if (obj == NULL) {
@@ -886,12 +877,26 @@ radeon_user_framebuffer_create(struct drm_device *dev,
"can't create framebuffer\n", mode_cmd->handle);
return NULL;
}
- return radeon_framebuffer_create(dev, mode_cmd, obj);
+
+ radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
+ if (radeon_fb == NULL) {
+ return NULL;
+ }
+
+ radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
+
+ return &radeon_fb->base;
+}
+
+static void radeon_output_poll_changed(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ radeon_fb_output_poll_changed(rdev);
}
static const struct drm_mode_config_funcs radeon_mode_funcs = {
.fb_create = radeon_user_framebuffer_create,
- .fb_changed = radeonfb_probe,
+ .output_poll_changed = radeon_output_poll_changed
};
struct drm_prop_enum_list {
@@ -978,8 +983,11 @@ void radeon_update_display_priority(struct radeon_device *rdev)
/* set display priority to high for r3xx, rv515 chips
* this avoids flickering due to underflow to the
* display controllers during heavy acceleration.
+ * Don't force high on rs4xx igp chips as it seems to
+ * affect the sound card. See kernel bug 15982.
*/
- if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515))
+ if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) &&
+ !(rdev->flags & RADEON_IS_IGP))
rdev->disp_priority = 2;
else
rdev->disp_priority = 0;
@@ -1031,15 +1039,24 @@ int radeon_modeset_init(struct radeon_device *rdev)
}
/* initialize hpd */
radeon_hpd_init(rdev);
- drm_helper_initial_config(rdev->ddev);
+
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
+ radeon_fbdev_init(rdev);
+ drm_kms_helper_poll_init(rdev->ddev);
+
return 0;
}
void radeon_modeset_fini(struct radeon_device *rdev)
{
+ radeon_fbdev_fini(rdev);
kfree(rdev->mode_info.bios_hardcoded_edid);
+ radeon_pm_fini(rdev);
if (rdev->mode_info.mode_config_initialized) {
+ drm_kms_helper_poll_fini(rdev->ddev);
radeon_hpd_fini(rdev);
drm_mode_config_cleanup(rdev->ddev);
rdev->mode_info.mode_config_initialized = false;
@@ -1089,15 +1106,15 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
}
if (radeon_crtc->rmx_type != RMX_OFF) {
fixed20_12 a, b;
- a.full = rfixed_const(crtc->mode.vdisplay);
- b.full = rfixed_const(radeon_crtc->native_mode.hdisplay);
- radeon_crtc->vsc.full = rfixed_div(a, b);
- a.full = rfixed_const(crtc->mode.hdisplay);
- b.full = rfixed_const(radeon_crtc->native_mode.vdisplay);
- radeon_crtc->hsc.full = rfixed_div(a, b);
+ a.full = dfixed_const(crtc->mode.vdisplay);
+ b.full = dfixed_const(radeon_crtc->native_mode.hdisplay);
+ radeon_crtc->vsc.full = dfixed_div(a, b);
+ a.full = dfixed_const(crtc->mode.hdisplay);
+ b.full = dfixed_const(radeon_crtc->native_mode.vdisplay);
+ radeon_crtc->hsc.full = dfixed_div(a, b);
} else {
- radeon_crtc->vsc.full = rfixed_const(1);
- radeon_crtc->hsc.full = rfixed_const(1);
+ radeon_crtc->vsc.full = dfixed_const(1);
+ radeon_crtc->hsc.full = dfixed_const(1);
}
return true;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index b3749d47be7..902d1731a65 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -44,9 +44,10 @@
* - 2.1.0 - add square tiling interface
* - 2.2.0 - add r6xx/r7xx const buffer support
* - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs
+ * - 2.4.0 - add crtc id query
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 3
+#define KMS_DRIVER_MINOR 4
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -91,7 +92,6 @@ int radeon_testing = 0;
int radeon_connector_table = 0;
int radeon_tv = 1;
int radeon_new_pll = -1;
-int radeon_dynpm = -1;
int radeon_audio = 1;
int radeon_disp_priority = 0;
int radeon_hw_i2c = 0;
@@ -132,9 +132,6 @@ module_param_named(tv, radeon_tv, int, 0444);
MODULE_PARM_DESC(new_pll, "Select new PLL code");
module_param_named(new_pll, radeon_new_pll, int, 0444);
-MODULE_PARM_DESC(dynpm, "Disable/Enable dynamic power management (1 = enable)");
-module_param_named(dynpm, radeon_dynpm, int, 0444);
-
MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
module_param_named(audio, radeon_audio, int, 0444);
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index c5ddaf58563..1ebb100015b 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -309,9 +309,6 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- /* adjust pm to upcoming mode change */
- radeon_pm_compute_clocks(rdev);
-
/* set the active encoder to connector routing */
radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0);
@@ -1111,8 +1108,6 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
}
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
- /* adjust pm to dpms change */
- radeon_pm_compute_clocks(rdev);
}
union crtc_source_param {
@@ -1546,10 +1541,49 @@ static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig;
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ if (ASIC_IS_DCE4(rdev))
+ /* disable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ else {
+ /* disable the encoder and transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
+ }
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ atombios_ddia_setup(encoder, ATOM_DISABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ atombios_external_tmds_setup(encoder, ATOM_DISABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ atombios_dac_setup(encoder, ATOM_DISABLE);
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+ atombios_tv_setup(encoder, ATOM_DISABLE);
+ break;
+ }
+
if (radeon_encoder_is_digital(encoder)) {
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
r600_hdmi_disable(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 9ac57a09784..e192acfbf0c 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -23,10 +23,6 @@
* Authors:
* David Airlie
*/
- /*
- * Modularization
- */
-
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/fb.h>
@@ -42,17 +38,21 @@
#include <linux/vga_switcheroo.h>
-struct radeon_fb_device {
+/* object hierarchy -
+ this contains a helper + a radeon fb
+ the helper contains a pointer to radeon framebuffer baseclass.
+*/
+struct radeon_fbdev {
struct drm_fb_helper helper;
- struct radeon_framebuffer *rfb;
- struct radeon_device *rdev;
+ struct radeon_framebuffer rfb;
+ struct list_head fbdev_list;
+ struct radeon_device *rdev;
};
static struct fb_ops radeonfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@@ -61,45 +61,6 @@ static struct fb_ops radeonfb_ops = {
.fb_setcmap = drm_fb_helper_setcmap,
};
-/**
- * Currently it is assumed that the old framebuffer is reused.
- *
- * LOCKING
- * caller should hold the mode config lock.
- *
- */
-int radeonfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
-{
- struct fb_info *info;
- struct drm_framebuffer *fb;
- struct drm_display_mode *mode = crtc->desired_mode;
-
- fb = crtc->fb;
- if (fb == NULL) {
- return 1;
- }
- info = fb->fbdev;
- if (info == NULL) {
- return 1;
- }
- if (mode == NULL) {
- return 1;
- }
- info->var.xres = mode->hdisplay;
- info->var.right_margin = mode->hsync_start - mode->hdisplay;
- info->var.hsync_len = mode->hsync_end - mode->hsync_start;
- info->var.left_margin = mode->htotal - mode->hsync_end;
- info->var.yres = mode->vdisplay;
- info->var.lower_margin = mode->vsync_start - mode->vdisplay;
- info->var.vsync_len = mode->vsync_end - mode->vsync_start;
- info->var.upper_margin = mode->vtotal - mode->vsync_end;
- info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
- /* avoid overflow */
- info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
-
- return 0;
-}
-EXPORT_SYMBOL(radeonfb_resize);
static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
{
@@ -125,57 +86,44 @@ static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bo
return aligned;
}
-static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
- .gamma_set = radeon_crtc_fb_gamma_set,
- .gamma_get = radeon_crtc_fb_gamma_get,
-};
+static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
+{
+ struct radeon_bo *rbo = gobj->driver_private;
+ int ret;
+
+ ret = radeon_bo_reserve(rbo, false);
+ if (likely(ret == 0)) {
+ radeon_bo_kunmap(rbo);
+ radeon_bo_unreserve(rbo);
+ }
+ drm_gem_object_unreference_unlocked(gobj);
+}
-int radeonfb_create(struct drm_device *dev,
- uint32_t fb_width, uint32_t fb_height,
- uint32_t surface_width, uint32_t surface_height,
- uint32_t surface_depth, uint32_t surface_bpp,
- struct drm_framebuffer **fb_p)
+static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_gem_object **gobj_p)
{
- struct radeon_device *rdev = dev->dev_private;
- struct fb_info *info;
- struct radeon_fb_device *rfbdev;
- struct drm_framebuffer *fb = NULL;
- struct radeon_framebuffer *rfb;
- struct drm_mode_fb_cmd mode_cmd;
+ struct radeon_device *rdev = rfbdev->rdev;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
- struct device *device = &rdev->pdev->dev;
- int size, aligned_size, ret;
- u64 fb_gpuaddr;
- void *fbptr = NULL;
- unsigned long tmp;
bool fb_tiled = false; /* useful for testing */
u32 tiling_flags = 0;
+ int ret;
+ int aligned_size, size;
- mode_cmd.width = surface_width;
- mode_cmd.height = surface_height;
-
- /* avivo can't scanout real 24bpp */
- if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
- surface_bpp = 32;
-
- mode_cmd.bpp = surface_bpp;
/* need to align pitch with crtc limits */
- mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8);
- mode_cmd.depth = surface_depth;
+ mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
- size = mode_cmd.pitch * mode_cmd.height;
+ size = mode_cmd->pitch * mode_cmd->height;
aligned_size = ALIGN(size, PAGE_SIZE);
-
ret = radeon_gem_object_create(rdev, aligned_size, 0,
- RADEON_GEM_DOMAIN_VRAM,
- false, ttm_bo_type_kernel,
- &gobj);
+ RADEON_GEM_DOMAIN_VRAM,
+ false, ttm_bo_type_kernel,
+ &gobj);
if (ret) {
- printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
- surface_width, surface_height);
- ret = -ENOMEM;
- goto out;
+ printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
+ aligned_size);
+ return -ENOMEM;
}
rbo = gobj->driver_private;
@@ -183,7 +131,7 @@ int radeonfb_create(struct drm_device *dev,
tiling_flags = RADEON_TILING_MACRO;
#ifdef __BIG_ENDIAN
- switch (mode_cmd.bpp) {
+ switch (mode_cmd->bpp) {
case 32:
tiling_flags |= RADEON_TILING_SWAP_32BIT;
break;
@@ -196,57 +144,81 @@ int radeonfb_create(struct drm_device *dev,
if (tiling_flags) {
ret = radeon_bo_set_tiling_flags(rbo,
- tiling_flags | RADEON_TILING_SURFACE,
- mode_cmd.pitch);
+ tiling_flags | RADEON_TILING_SURFACE,
+ mode_cmd->pitch);
if (ret)
dev_err(rdev->dev, "FB failed to set tiling flags\n");
}
- mutex_lock(&rdev->ddev->struct_mutex);
- fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
- if (fb == NULL) {
- DRM_ERROR("failed to allocate fb.\n");
- ret = -ENOMEM;
- goto out_unref;
- }
+
+
ret = radeon_bo_reserve(rbo, false);
if (unlikely(ret != 0))
goto out_unref;
- ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
+ ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, NULL);
if (ret) {
radeon_bo_unreserve(rbo);
goto out_unref;
}
if (fb_tiled)
radeon_bo_check_tiling(rbo, 0, 0);
- ret = radeon_bo_kmap(rbo, &fbptr);
+ ret = radeon_bo_kmap(rbo, NULL);
radeon_bo_unreserve(rbo);
if (ret) {
goto out_unref;
}
- list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list);
+ *gobj_p = gobj;
+ return 0;
+out_unref:
+ radeonfb_destroy_pinned_object(gobj);
+ *gobj_p = NULL;
+ return ret;
+}
+
+static int radeonfb_create(struct radeon_fbdev *rfbdev,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct radeon_device *rdev = rfbdev->rdev;
+ struct fb_info *info;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_mode_fb_cmd mode_cmd;
+ struct drm_gem_object *gobj = NULL;
+ struct radeon_bo *rbo = NULL;
+ struct device *device = &rdev->pdev->dev;
+ int ret;
+ unsigned long tmp;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+
+ /* avivo can't scanout real 24bpp */
+ if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
+ sizes->surface_bpp = 32;
+
+ mode_cmd.bpp = sizes->surface_bpp;
+ mode_cmd.depth = sizes->surface_depth;
- *fb_p = fb;
- rfb = to_radeon_framebuffer(fb);
- rdev->fbdev_rfb = rfb;
- rdev->fbdev_rbo = rbo;
+ ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
+ rbo = gobj->driver_private;
- info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
+ /* okay we have an object now allocate the framebuffer */
+ info = framebuffer_alloc(0, device);
if (info == NULL) {
ret = -ENOMEM;
goto out_unref;
}
- rdev->fbdev_info = info;
- rfbdev = info->par;
- rfbdev->helper.funcs = &radeon_fb_helper_funcs;
- rfbdev->helper.dev = dev;
- ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, rdev->num_crtc,
- RADEONFB_CONN_LIMIT);
- if (ret)
- goto out_unref;
+ info->par = rfbdev;
+
+ radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
- memset_io(fbptr, 0x0, aligned_size);
+ fb = &rfbdev->rfb.base;
+
+ /* setup helper */
+ rfbdev->helper.fb = fb;
+ rfbdev->helper.fbdev = info;
+
+ memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo));
strcpy(info->fix.id, "radeondrmfb");
@@ -255,17 +227,22 @@ int radeonfb_create(struct drm_device *dev,
info->flags = FBINFO_DEFAULT;
info->fbops = &radeonfb_ops;
- tmp = fb_gpuaddr - rdev->mc.vram_start;
+ tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start;
info->fix.smem_start = rdev->mc.aper_base + tmp;
- info->fix.smem_len = size;
- info->screen_base = fbptr;
- info->screen_size = size;
+ info->fix.smem_len = radeon_bo_size(rbo);
+ info->screen_base = rbo->kptr;
+ info->screen_size = radeon_bo_size(rbo);
- drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
+ drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
/* setup aperture base/size for vesafb takeover */
- info->aperture_base = rdev->ddev->mode_config.fb_base;
- info->aperture_size = rdev->mc.real_vram_size;
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+ info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
+ info->apertures->ranges[0].size = rdev->mc.real_vram_size;
info->fix.mmio_start = 0;
info->fix.mmio_len = 0;
@@ -274,44 +251,55 @@ int radeonfb_create(struct drm_device *dev,
info->pixmap.access_align = 32;
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->pixmap.scan_align = 1;
+
if (info->screen_base == NULL) {
ret = -ENOSPC;
goto out_unref;
}
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base);
- DRM_INFO("size %lu\n", (unsigned long)size);
+ DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
DRM_INFO("fb depth is %d\n", fb->depth);
DRM_INFO(" pitch is %d\n", fb->pitch);
- fb->fbdev = info;
- rfbdev->rfb = rfb;
- rfbdev->rdev = rdev;
-
- mutex_unlock(&rdev->ddev->struct_mutex);
vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
return 0;
out_unref:
if (rbo) {
- ret = radeon_bo_reserve(rbo, false);
- if (likely(ret == 0)) {
- radeon_bo_kunmap(rbo);
- radeon_bo_unreserve(rbo);
- }
+
}
if (fb && ret) {
- list_del(&fb->filp_head);
drm_gem_object_unreference(gobj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
- drm_gem_object_unreference(gobj);
- mutex_unlock(&rdev->ddev->struct_mutex);
-out:
return ret;
}
+static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = radeonfb_create(rfbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
+
static char *mode_option;
int radeon_parse_options(char *options)
{
@@ -328,46 +316,102 @@ int radeon_parse_options(char *options)
return 0;
}
-int radeonfb_probe(struct drm_device *dev)
+void radeon_fb_output_poll_changed(struct radeon_device *rdev)
{
- struct radeon_device *rdev = dev->dev_private;
- int bpp_sel = 32;
-
- /* select 8 bpp console on RN50 or 16MB cards */
- if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
- bpp_sel = 8;
-
- return drm_fb_helper_single_fb_probe(dev, bpp_sel, &radeonfb_create);
+ drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
}
-int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
{
struct fb_info *info;
- struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb);
+ struct radeon_framebuffer *rfb = &rfbdev->rfb;
struct radeon_bo *rbo;
int r;
- if (!fb) {
- return -EINVAL;
+ if (rfbdev->helper.fbdev) {
+ info = rfbdev->helper.fbdev;
+
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
}
- info = fb->fbdev;
- if (info) {
- struct radeon_fb_device *rfbdev = info->par;
+
+ if (rfb->obj) {
rbo = rfb->obj->driver_private;
- unregister_framebuffer(info);
r = radeon_bo_reserve(rbo, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rbo);
radeon_bo_unpin(rbo);
radeon_bo_unreserve(rbo);
}
- drm_fb_helper_free(&rfbdev->helper);
- framebuffer_release(info);
+ drm_gem_object_unreference_unlocked(rfb->obj);
}
+ drm_fb_helper_fini(&rfbdev->helper);
+ drm_framebuffer_cleanup(&rfb->base);
- printk(KERN_INFO "unregistered panic notifier\n");
+ return 0;
+}
+
+static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
+ .gamma_set = radeon_crtc_fb_gamma_set,
+ .gamma_get = radeon_crtc_fb_gamma_get,
+ .fb_probe = radeon_fb_find_or_create_single,
+};
+
+int radeon_fbdev_init(struct radeon_device *rdev)
+{
+ struct radeon_fbdev *rfbdev;
+ int bpp_sel = 32;
+
+ /* select 8 bpp console on RN50 or 16MB cards */
+ if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
+ bpp_sel = 8;
+
+ rfbdev = kzalloc(sizeof(struct radeon_fbdev), GFP_KERNEL);
+ if (!rfbdev)
+ return -ENOMEM;
+
+ rfbdev->rdev = rdev;
+ rdev->mode_info.rfbdev = rfbdev;
+ rfbdev->helper.funcs = &radeon_fb_helper_funcs;
+ drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
+ rdev->num_crtc,
+ RADEONFB_CONN_LIMIT);
+ drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
+ drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
return 0;
}
-EXPORT_SYMBOL(radeonfb_remove);
-MODULE_LICENSE("GPL");
+
+void radeon_fbdev_fini(struct radeon_device *rdev)
+{
+ if (!rdev->mode_info.rfbdev)
+ return;
+
+ radeon_fbdev_destroy(rdev->ddev, rdev->mode_info.rfbdev);
+ kfree(rdev->mode_info.rfbdev);
+ rdev->mode_info.rfbdev = NULL;
+}
+
+void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
+{
+ fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
+}
+
+int radeon_fbdev_total_size(struct radeon_device *rdev)
+{
+ struct radeon_bo *robj;
+ int size = 0;
+
+ robj = rdev->mode_info.rfbdev->rfb.obj->driver_private;
+ size += radeon_bo_size(robj);
+ return size;
+}
+
+bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
+{
+ if (robj == rdev->mode_info.rfbdev->rfb.obj->driver_private)
+ return true;
+ return false;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index d90f95b405c..b1f9a81b5d1 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -58,7 +58,6 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
radeon_fence_ring_emit(rdev, fence);
fence->emited = true;
- fence->timeout = jiffies + ((2000 * HZ) / 1000);
list_del(&fence->list);
list_add_tail(&fence->list, &rdev->fence_drv.emited);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
@@ -71,15 +70,34 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
struct list_head *i, *n;
uint32_t seq;
bool wake = false;
+ unsigned long cjiffies;
- if (rdev == NULL) {
- return true;
- }
- if (rdev->shutdown) {
- return true;
- }
seq = RREG32(rdev->fence_drv.scratch_reg);
- rdev->fence_drv.last_seq = seq;
+ if (seq != rdev->fence_drv.last_seq) {
+ rdev->fence_drv.last_seq = seq;
+ rdev->fence_drv.last_jiffies = jiffies;
+ rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+ } else {
+ cjiffies = jiffies;
+ if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
+ cjiffies -= rdev->fence_drv.last_jiffies;
+ if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
+ /* update the timeout */
+ rdev->fence_drv.last_timeout -= cjiffies;
+ } else {
+ /* the 500ms timeout is elapsed we should test
+ * for GPU lockup
+ */
+ rdev->fence_drv.last_timeout = 1;
+ }
+ } else {
+ /* wrap around update last jiffies, we will just wait
+ * a little longer
+ */
+ rdev->fence_drv.last_jiffies = cjiffies;
+ }
+ return false;
+ }
n = NULL;
list_for_each(i, &rdev->fence_drv.emited) {
fence = list_entry(i, struct radeon_fence, list);
@@ -171,9 +189,8 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
{
struct radeon_device *rdev;
- unsigned long cur_jiffies;
- unsigned long timeout;
- bool expired = false;
+ unsigned long irq_flags, timeout;
+ u32 seq;
int r;
if (fence == NULL) {
@@ -184,21 +201,18 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
if (radeon_fence_signaled(fence)) {
return 0;
}
-
+ timeout = rdev->fence_drv.last_timeout;
retry:
- cur_jiffies = jiffies;
- timeout = HZ / 100;
- if (time_after(fence->timeout, cur_jiffies)) {
- timeout = fence->timeout - cur_jiffies;
- }
-
+ /* save current sequence used to check for GPU lockup */
+ seq = rdev->fence_drv.last_seq;
if (intr) {
radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
radeon_fence_signaled(fence), timeout);
radeon_irq_kms_sw_irq_put(rdev);
- if (unlikely(r < 0))
+ if (unlikely(r < 0)) {
return r;
+ }
} else {
radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_timeout(rdev->fence_drv.queue,
@@ -206,38 +220,36 @@ retry:
radeon_irq_kms_sw_irq_put(rdev);
}
if (unlikely(!radeon_fence_signaled(fence))) {
- if (unlikely(r == 0)) {
- expired = true;
+ /* we were interrupted for some reason and fence isn't
+ * isn't signaled yet, resume wait
+ */
+ if (r) {
+ timeout = r;
+ goto retry;
}
- if (unlikely(expired)) {
- timeout = 1;
- if (time_after(cur_jiffies, fence->timeout)) {
- timeout = cur_jiffies - fence->timeout;
- }
- timeout = jiffies_to_msecs(timeout);
- if (timeout > 500) {
- DRM_ERROR("fence(%p:0x%08X) %lums timeout "
- "going to reset GPU\n",
- fence, fence->seq, timeout);
- radeon_gpu_reset(rdev);
- WREG32(rdev->fence_drv.scratch_reg, fence->seq);
- }
+ /* don't protect read access to rdev->fence_drv.last_seq
+ * if we experiencing a lockup the value doesn't change
+ */
+ if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
+ /* good news we believe it's a lockup */
+ WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq);
+ /* FIXME: what should we do ? marking everyone
+ * as signaled for now
+ */
+ rdev->gpu_lockup = true;
+ r = radeon_gpu_reset(rdev);
+ if (r)
+ return r;
+ WREG32(rdev->fence_drv.scratch_reg, fence->seq);
+ rdev->gpu_lockup = false;
}
+ timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+ write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+ rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+ rdev->fence_drv.last_jiffies = jiffies;
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
goto retry;
}
- if (unlikely(expired)) {
- rdev->fence_drv.count_timeout++;
- cur_jiffies = jiffies;
- timeout = 1;
- if (time_after(cur_jiffies, fence->timeout)) {
- timeout = cur_jiffies - fence->timeout;
- }
- timeout = jiffies_to_msecs(timeout);
- DRM_ERROR("fence(%p:0x%08X) %lums timeout\n",
- fence, fence->seq, timeout);
- DRM_ERROR("last signaled fence(0x%08X)\n",
- rdev->fence_drv.last_seq);
- }
return 0;
}
@@ -333,7 +345,6 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
INIT_LIST_HEAD(&rdev->fence_drv.created);
INIT_LIST_HEAD(&rdev->fence_drv.emited);
INIT_LIST_HEAD(&rdev->fence_drv.signaled);
- rdev->fence_drv.count_timeout = 0;
init_waitqueue_head(&rdev->fence_drv.queue);
rdev->fence_drv.initialized = true;
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
diff --git a/drivers/gpu/drm/radeon/radeon_fixed.h b/drivers/gpu/drm/radeon/radeon_fixed.h
deleted file mode 100644
index 3d4d84e078a..00000000000
--- a/drivers/gpu/drm/radeon/radeon_fixed.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright 2009 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Dave Airlie
- */
-#ifndef RADEON_FIXED_H
-#define RADEON_FIXED_H
-
-typedef union rfixed {
- u32 full;
-} fixed20_12;
-
-
-#define rfixed_const(A) (u32)(((A) << 12))/* + ((B + 0.000122)*4096)) */
-#define rfixed_const_half(A) (u32)(((A) << 12) + 2048)
-#define rfixed_const_666(A) (u32)(((A) << 12) + 2731)
-#define rfixed_const_8(A) (u32)(((A) << 12) + 3277)
-#define rfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12)
-#define fixed_init(A) { .full = rfixed_const((A)) }
-#define fixed_init_half(A) { .full = rfixed_const_half((A)) }
-#define rfixed_trunc(A) ((A).full >> 12)
-
-static inline u32 rfixed_floor(fixed20_12 A)
-{
- u32 non_frac = rfixed_trunc(A);
-
- return rfixed_const(non_frac);
-}
-
-static inline u32 rfixed_ceil(fixed20_12 A)
-{
- u32 non_frac = rfixed_trunc(A);
-
- if (A.full > rfixed_const(non_frac))
- return rfixed_const(non_frac + 1);
- else
- return rfixed_const(non_frac);
-}
-
-static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B)
-{
- u64 tmp = ((u64)A.full << 13);
-
- do_div(tmp, B.full);
- tmp += 1;
- tmp /= 2;
- return lower_32_bits(tmp);
-}
-#endif
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 1770d3c07fd..e65b90317fa 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -173,7 +173,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int i, j;
if (!rdev->gart.ready) {
- DRM_ERROR("trying to bind memory to unitialized GART !\n");
+ WARN(1, "trying to bind memory to unitialized GART !\n");
return -EINVAL;
}
t = offset / RADEON_GPU_PAGE_SIZE;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index ef92d147d8f..a72a3ee5d69 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -44,6 +44,9 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
if (robj) {
radeon_bo_unref(&robj);
}
+
+ drm_gem_object_release(gobj);
+ kfree(gobj);
}
int radeon_gem_object_create(struct radeon_device *rdev, int size,
@@ -158,8 +161,7 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
args->vram_visible = rdev->mc.real_vram_size;
if (rdev->stollen_vga_memory)
args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
- if (rdev->fbdev_rbo)
- args->vram_visible -= radeon_bo_size(rdev->fbdev_rbo);
+ args->vram_visible -= radeon_fbdev_total_size(rdev);
args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
RADEON_IB_POOL_SIZE*64*1024;
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index a212041e8b0..059bfa4098d 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -26,6 +26,7 @@
* Jerome Glisse
*/
#include "drmP.h"
+#include "drm_crtc_helper.h"
#include "radeon_drm.h"
#include "radeon_reg.h"
#include "radeon.h"
@@ -55,7 +56,7 @@ static void radeon_hotplug_work_func(struct work_struct *work)
radeon_connector_hotplug(connector);
}
/* Just fire off a uevent and let userspace tell us what to do */
- drm_sysfs_hotplug_event(dev);
+ drm_helper_hpd_irq_event(dev);
}
void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
@@ -67,6 +68,7 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
/* Disable *all* interrupts */
rdev->irq.sw_int = false;
+ rdev->irq.gui_idle = false;
for (i = 0; i < rdev->num_crtc; i++)
rdev->irq.crtc_vblank_int[i] = false;
for (i = 0; i < 6; i++)
@@ -96,6 +98,7 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
}
/* Disable *all* interrupts */
rdev->irq.sw_int = false;
+ rdev->irq.gui_idle = false;
for (i = 0; i < rdev->num_crtc; i++)
rdev->irq.crtc_vblank_int[i] = false;
for (i = 0; i < 6; i++)
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index c633319f98e..04068352ccd 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -98,11 +98,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_info *info;
+ struct radeon_mode_info *minfo = &rdev->mode_info;
uint32_t *value_ptr;
uint32_t value;
+ struct drm_crtc *crtc;
+ int i, found;
info = data;
value_ptr = (uint32_t *)((unsigned long)info->value);
+ value = *value_ptr;
switch (info->request) {
case RADEON_INFO_DEVICE_ID:
value = dev->pci_device;
@@ -116,6 +120,20 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
case RADEON_INFO_ACCEL_WORKING:
value = rdev->accel_working;
break;
+ case RADEON_INFO_CRTC_FROM_ID:
+ for (i = 0, found = 0; i < rdev->num_crtc; i++) {
+ crtc = (struct drm_crtc *)minfo->crtcs[i];
+ if (crtc && crtc->base.id == value) {
+ value = i;
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ DRM_DEBUG("unknown crtc id %d\n", value);
+ return -EINVAL;
+ }
+ break;
default:
DRM_DEBUG("Invalid request %d\n", info->request);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 88865e38fe3..e1e5255396a 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -26,7 +26,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
-#include "radeon_fixed.h"
+#include <drm/drm_fixed.h>
#include "radeon.h"
#include "atom.h"
@@ -314,6 +314,9 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
+ radeon_crtc->enabled = true;
+ /* adjust pm to dpms changes BEFORE enabling crtcs */
+ radeon_pm_compute_clocks(rdev);
if (radeon_crtc->crtc_id)
WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask));
else {
@@ -335,6 +338,9 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
RADEON_CRTC_DISP_REQ_EN_B));
WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~mask);
}
+ radeon_crtc->enabled = false;
+ /* adjust pm to dpms changes AFTER disabling crtcs */
+ radeon_pm_compute_clocks(rdev);
break;
}
}
@@ -966,6 +972,12 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ /* adjust pm to upcoming mode change */
+ radeon_pm_compute_clocks(rdev);
+
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
return true;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 0274abe17ad..5a13b3eeef1 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -116,8 +116,6 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
- /* adjust pm to dpms change */
- radeon_pm_compute_clocks(rdev);
}
static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
@@ -217,11 +215,6 @@ static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
-
- /* adjust pm to upcoming mode change */
- radeon_pm_compute_clocks(rdev);
/* set the active encoder to connector routing */
radeon_encoder_set_active_device(encoder);
@@ -286,8 +279,6 @@ static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
- /* adjust pm to dpms change */
- radeon_pm_compute_clocks(rdev);
}
static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder)
@@ -474,8 +465,6 @@ static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
- /* adjust pm to dpms change */
- radeon_pm_compute_clocks(rdev);
}
static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder)
@@ -642,8 +631,6 @@ static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
- /* adjust pm to dpms change */
- radeon_pm_compute_clocks(rdev);
}
static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder)
@@ -852,8 +839,6 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
- /* adjust pm to dpms change */
- radeon_pm_compute_clocks(rdev);
}
static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 5413fcd6308..67358baf28b 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -34,11 +34,12 @@
#include <drm_mode.h>
#include <drm_edid.h>
#include <drm_dp_helper.h>
+#include <drm_fixed.h>
#include <linux/i2c.h>
#include <linux/i2c-id.h>
#include <linux/i2c-algo-bit.h>
-#include "radeon_fixed.h"
+struct radeon_bo;
struct radeon_device;
#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base)
@@ -65,6 +66,16 @@ enum radeon_tv_std {
TV_STD_PAL_N,
};
+enum radeon_hpd_id {
+ RADEON_HPD_1 = 0,
+ RADEON_HPD_2,
+ RADEON_HPD_3,
+ RADEON_HPD_4,
+ RADEON_HPD_5,
+ RADEON_HPD_6,
+ RADEON_HPD_NONE = 0xff,
+};
+
/* radeon gpio-based i2c
* 1. "mask" reg and bits
* grabs the gpio pins for software use
@@ -84,7 +95,7 @@ struct radeon_i2c_bus_rec {
/* id used by atom */
uint8_t i2c_id;
/* id used by atom */
- uint8_t hpd_id;
+ enum radeon_hpd_id hpd;
/* can be used with hw i2c engine */
bool hw_capable;
/* uses multi-media i2c engine */
@@ -202,6 +213,8 @@ enum radeon_dvo_chip {
DVO_SIL1178,
};
+struct radeon_fbdev;
+
struct radeon_mode_info {
struct atom_context *atom_context;
struct card_info *atom_card_info;
@@ -218,6 +231,9 @@ struct radeon_mode_info {
struct drm_property *tmds_pll_property;
/* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid;
+
+ /* pointer to fbdev info structure */
+ struct radeon_fbdev *rfbdev;
};
#define MAX_H_CODE_TIMING_LEN 32
@@ -339,6 +355,7 @@ struct radeon_encoder {
enum radeon_rmx_type rmx_type;
struct drm_display_mode native_mode;
void *enc_priv;
+ int audio_polling_active;
int hdmi_offset;
int hdmi_config_offset;
int hdmi_audio_workaround;
@@ -363,16 +380,6 @@ struct radeon_gpio_rec {
u32 mask;
};
-enum radeon_hpd_id {
- RADEON_HPD_NONE = 0,
- RADEON_HPD_1,
- RADEON_HPD_2,
- RADEON_HPD_3,
- RADEON_HPD_4,
- RADEON_HPD_5,
- RADEON_HPD_6,
-};
-
struct radeon_hpd {
enum radeon_hpd_id hpd;
u8 plugged_state;
@@ -532,11 +539,10 @@ extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
-struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd,
- struct drm_gem_object *obj);
-
-int radeonfb_probe(struct drm_device *dev);
+void radeon_framebuffer_init(struct drm_device *dev,
+ struct radeon_framebuffer *rfb,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_gem_object *obj);
int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev);
@@ -575,4 +581,13 @@ void radeon_legacy_tv_adjust_pll2(struct drm_encoder *encoder,
void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
+
+/* fbdev layer */
+int radeon_fbdev_init(struct radeon_device *rdev);
+void radeon_fbdev_fini(struct radeon_device *rdev);
+void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state);
+int radeon_fbdev_total_size(struct radeon_device *rdev);
+bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
+
+void radeon_fb_output_poll_changed(struct radeon_device *rdev);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 122774742bd..d5b9373ce06 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -112,9 +112,11 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
+ mutex_lock(&rdev->vram_mutex);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
&bo->placement, 0, 0, !kernel, NULL, size,
&radeon_ttm_bo_destroy);
+ mutex_unlock(&rdev->vram_mutex);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
dev_err(rdev->dev,
@@ -166,11 +168,15 @@ void radeon_bo_kunmap(struct radeon_bo *bo)
void radeon_bo_unref(struct radeon_bo **bo)
{
struct ttm_buffer_object *tbo;
+ struct radeon_device *rdev;
if ((*bo) == NULL)
return;
+ rdev = (*bo)->rdev;
tbo = &((*bo)->tbo);
+ mutex_lock(&rdev->vram_mutex);
ttm_bo_unref(&tbo);
+ mutex_unlock(&rdev->vram_mutex);
if (tbo == NULL)
*bo = NULL;
}
@@ -192,7 +198,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
}
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
if (likely(r == 0)) {
bo->pin_count = 1;
if (gpu_addr != NULL)
@@ -216,7 +222,7 @@ int radeon_bo_unpin(struct radeon_bo *bo)
return 0;
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
if (unlikely(r != 0))
dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
return r;
@@ -295,6 +301,7 @@ int radeon_bo_list_reserve(struct list_head *head)
r = radeon_bo_reserve(lobj->bo, false);
if (unlikely(r != 0))
return r;
+ lobj->reserved = true;
}
return 0;
}
@@ -305,7 +312,7 @@ void radeon_bo_list_unreserve(struct list_head *head)
list_for_each_entry(lobj, head, list) {
/* only unreserve object we successfully reserved */
- if (radeon_bo_is_reserved(lobj->bo))
+ if (lobj->reserved && radeon_bo_is_reserved(lobj->bo))
radeon_bo_unreserve(lobj->bo);
}
}
@@ -316,6 +323,9 @@ int radeon_bo_list_validate(struct list_head *head)
struct radeon_bo *bo;
int r;
+ list_for_each_entry(lobj, head, list) {
+ lobj->reserved = false;
+ }
r = radeon_bo_list_reserve(head);
if (unlikely(r != 0)) {
return r;
@@ -331,7 +341,7 @@ int radeon_bo_list_validate(struct list_head *head)
lobj->rdomain);
}
r = ttm_bo_validate(&bo->tbo, &bo->placement,
- true, false);
+ true, false, false);
if (unlikely(r))
return r;
}
@@ -499,11 +509,33 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
radeon_bo_check_tiling(rbo, 0, 1);
}
-void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
+ struct radeon_device *rdev;
struct radeon_bo *rbo;
+ unsigned long offset, size;
+ int r;
+
if (!radeon_ttm_bo_is_radeon_bo(bo))
- return;
+ return 0;
rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 0);
+ rdev = rbo->rdev;
+ if (bo->mem.mem_type == TTM_PL_VRAM) {
+ size = bo->mem.num_pages << PAGE_SHIFT;
+ offset = bo->mem.mm_node->start << PAGE_SHIFT;
+ if ((offset + size) > rdev->mc.visible_vram_size) {
+ /* hurrah the memory is not visible ! */
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
+ rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
+ if (unlikely(r != 0))
+ return r;
+ offset = bo->mem.mm_node->start << PAGE_SHIFT;
+ /* this should not happen */
+ if ((offset + size) > rdev->mc.visible_vram_size)
+ return -EINVAL;
+ }
+ }
+ return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 7ab43de1e24..353998dc2c0 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -168,6 +168,6 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
bool force_drop);
extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
-extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index a4b57493aa7..a8d162c6f82 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -23,164 +23,122 @@
#include "drmP.h"
#include "radeon.h"
#include "avivod.h"
+#ifdef CONFIG_ACPI
+#include <linux/acpi.h>
+#endif
+#include <linux/power_supply.h>
#define RADEON_IDLE_LOOP_MS 100
#define RADEON_RECLOCK_DELAY_MS 200
#define RADEON_WAIT_VBLANK_TIMEOUT 200
+#define RADEON_WAIT_IDLE_TIMEOUT 200
+static void radeon_dynpm_idle_work_handler(struct work_struct *work);
+static int radeon_debugfs_pm_init(struct radeon_device *rdev);
+static bool radeon_pm_in_vbl(struct radeon_device *rdev);
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
-static void radeon_pm_set_clocks_locked(struct radeon_device *rdev);
+static void radeon_pm_update_profile(struct radeon_device *rdev);
static void radeon_pm_set_clocks(struct radeon_device *rdev);
-static void radeon_pm_idle_work_handler(struct work_struct *work);
-static int radeon_debugfs_pm_init(struct radeon_device *rdev);
-
-static const char *pm_state_names[4] = {
- "PM_STATE_DISABLED",
- "PM_STATE_MINIMUM",
- "PM_STATE_PAUSED",
- "PM_STATE_ACTIVE"
-};
-static const char *pm_state_types[5] = {
- "Default",
- "Powersave",
- "Battery",
- "Balanced",
- "Performance",
-};
+#define ACPI_AC_CLASS "ac_adapter"
-static void radeon_print_power_mode_info(struct radeon_device *rdev)
+#ifdef CONFIG_ACPI
+static int radeon_acpi_event(struct notifier_block *nb,
+ unsigned long val,
+ void *data)
{
- int i, j;
- bool is_default;
+ struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb);
+ struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
- DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states);
- for (i = 0; i < rdev->pm.num_power_states; i++) {
- if (rdev->pm.default_power_state == &rdev->pm.power_state[i])
- is_default = true;
+ if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
+ if (power_supply_is_system_supplied() > 0)
+ DRM_DEBUG("pm: AC\n");
else
- is_default = false;
- DRM_INFO("State %d %s %s\n", i,
- pm_state_types[rdev->pm.power_state[i].type],
- is_default ? "(default)" : "");
- if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
- DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].non_clock_info.pcie_lanes);
- DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes);
- for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) {
- if (rdev->flags & RADEON_IS_IGP)
- DRM_INFO("\t\t%d engine: %d\n",
- j,
- rdev->pm.power_state[i].clock_info[j].sclk * 10);
- else
- DRM_INFO("\t\t%d engine/memory: %d/%d\n",
- j,
- rdev->pm.power_state[i].clock_info[j].sclk * 10,
- rdev->pm.power_state[i].clock_info[j].mclk * 10);
+ DRM_DEBUG("pm: DC\n");
+
+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ if (rdev->pm.profile == PM_PROFILE_AUTO) {
+ mutex_lock(&rdev->pm.mutex);
+ radeon_pm_update_profile(rdev);
+ radeon_pm_set_clocks(rdev);
+ mutex_unlock(&rdev->pm.mutex);
+ }
}
}
+
+ return NOTIFY_OK;
}
+#endif
-static struct radeon_power_state * radeon_pick_power_state(struct radeon_device *rdev,
- enum radeon_pm_state_type type)
+static void radeon_pm_update_profile(struct radeon_device *rdev)
{
- int i, j;
- enum radeon_pm_state_type wanted_types[2];
- int wanted_count;
-
- switch (type) {
- case POWER_STATE_TYPE_DEFAULT:
- default:
- return rdev->pm.default_power_state;
- case POWER_STATE_TYPE_POWERSAVE:
- if (rdev->flags & RADEON_IS_MOBILITY) {
- wanted_types[0] = POWER_STATE_TYPE_POWERSAVE;
- wanted_types[1] = POWER_STATE_TYPE_BATTERY;
- wanted_count = 2;
- } else {
- wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
- wanted_count = 1;
- }
+ switch (rdev->pm.profile) {
+ case PM_PROFILE_DEFAULT:
+ rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
break;
- case POWER_STATE_TYPE_BATTERY:
- if (rdev->flags & RADEON_IS_MOBILITY) {
- wanted_types[0] = POWER_STATE_TYPE_BATTERY;
- wanted_types[1] = POWER_STATE_TYPE_POWERSAVE;
- wanted_count = 2;
+ case PM_PROFILE_AUTO:
+ if (power_supply_is_system_supplied() > 0) {
+ if (rdev->pm.active_crtc_count > 1)
+ rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
+ else
+ rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
} else {
- wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
- wanted_count = 1;
+ if (rdev->pm.active_crtc_count > 1)
+ rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
+ else
+ rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
}
break;
- case POWER_STATE_TYPE_BALANCED:
- case POWER_STATE_TYPE_PERFORMANCE:
- wanted_types[0] = type;
- wanted_count = 1;
+ case PM_PROFILE_LOW:
+ if (rdev->pm.active_crtc_count > 1)
+ rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
+ else
+ rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
break;
- }
-
- for (i = 0; i < wanted_count; i++) {
- for (j = 0; j < rdev->pm.num_power_states; j++) {
- if (rdev->pm.power_state[j].type == wanted_types[i])
- return &rdev->pm.power_state[j];
- }
- }
-
- return rdev->pm.default_power_state;
-}
-
-static struct radeon_pm_clock_info * radeon_pick_clock_mode(struct radeon_device *rdev,
- struct radeon_power_state *power_state,
- enum radeon_pm_clock_mode_type type)
-{
- switch (type) {
- case POWER_MODE_TYPE_DEFAULT:
- default:
- return power_state->default_clock_mode;
- case POWER_MODE_TYPE_LOW:
- return &power_state->clock_info[0];
- case POWER_MODE_TYPE_MID:
- if (power_state->num_clock_modes > 2)
- return &power_state->clock_info[1];
+ case PM_PROFILE_HIGH:
+ if (rdev->pm.active_crtc_count > 1)
+ rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
else
- return &power_state->clock_info[0];
+ rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
break;
- case POWER_MODE_TYPE_HIGH:
- return &power_state->clock_info[power_state->num_clock_modes - 1];
}
+ if (rdev->pm.active_crtc_count == 0) {
+ rdev->pm.requested_power_state_index =
+ rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
+ rdev->pm.requested_clock_mode_index =
+ rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
+ } else {
+ rdev->pm.requested_power_state_index =
+ rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
+ rdev->pm.requested_clock_mode_index =
+ rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
+ }
}
-static void radeon_get_power_state(struct radeon_device *rdev,
- enum radeon_pm_action action)
+static void radeon_unmap_vram_bos(struct radeon_device *rdev)
{
- switch (action) {
- case PM_ACTION_MINIMUM:
- rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_BATTERY);
- rdev->pm.requested_clock_mode =
- radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_LOW);
- break;
- case PM_ACTION_DOWNCLOCK:
- rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_POWERSAVE);
- rdev->pm.requested_clock_mode =
- radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_MID);
- break;
- case PM_ACTION_UPCLOCK:
- rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_DEFAULT);
- rdev->pm.requested_clock_mode =
- radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_HIGH);
- break;
- case PM_ACTION_NONE:
- default:
- DRM_ERROR("Requested mode for not defined action\n");
+ struct radeon_bo *bo, *n;
+
+ if (list_empty(&rdev->gem.objects))
return;
+
+ list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+ ttm_bo_unmap_virtual(&bo->tbo);
}
- DRM_INFO("Requested: e: %d m: %d p: %d\n",
- rdev->pm.requested_clock_mode->sclk,
- rdev->pm.requested_clock_mode->mclk,
- rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
+
+ if (rdev->gart.table.vram.robj)
+ ttm_bo_unmap_virtual(&rdev->gart.table.vram.robj->tbo);
+
+ if (rdev->stollen_vga_memory)
+ ttm_bo_unmap_virtual(&rdev->stollen_vga_memory->tbo);
+
+ if (rdev->r600_blit.shader_obj)
+ ttm_bo_unmap_virtual(&rdev->r600_blit.shader_obj->tbo);
}
-static inline void radeon_sync_with_vblank(struct radeon_device *rdev)
+static void radeon_sync_with_vblank(struct radeon_device *rdev)
{
if (rdev->pm.active_crtcs) {
rdev->pm.vblank_sync = false;
@@ -192,73 +150,332 @@ static inline void radeon_sync_with_vblank(struct radeon_device *rdev)
static void radeon_set_power_state(struct radeon_device *rdev)
{
- /* if *_clock_mode are the same, *_power_state are as well */
- if (rdev->pm.requested_clock_mode == rdev->pm.current_clock_mode)
+ u32 sclk, mclk;
+
+ if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
+ (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
return;
- DRM_INFO("Setting: e: %d m: %d p: %d\n",
- rdev->pm.requested_clock_mode->sclk,
- rdev->pm.requested_clock_mode->mclk,
- rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
-
- /* set pcie lanes */
- /* TODO */
-
- /* set voltage */
- /* TODO */
-
- /* set engine clock */
- radeon_sync_with_vblank(rdev);
- radeon_pm_debug_check_in_vbl(rdev, false);
- radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk);
- radeon_pm_debug_check_in_vbl(rdev, true);
-
-#if 0
- /* set memory clock */
- if (rdev->asic->set_memory_clock) {
- radeon_sync_with_vblank(rdev);
- radeon_pm_debug_check_in_vbl(rdev, false);
- radeon_set_memory_clock(rdev, rdev->pm.requested_clock_mode->mclk);
- radeon_pm_debug_check_in_vbl(rdev, true);
+ if (radeon_gui_idle(rdev)) {
+ sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].sclk;
+ if (sclk > rdev->clock.default_sclk)
+ sclk = rdev->clock.default_sclk;
+
+ mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].mclk;
+ if (mclk > rdev->clock.default_mclk)
+ mclk = rdev->clock.default_mclk;
+
+ /* voltage, pcie lanes, etc.*/
+ radeon_pm_misc(rdev);
+
+ if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+ radeon_sync_with_vblank(rdev);
+
+ if (!radeon_pm_in_vbl(rdev))
+ return;
+
+ radeon_pm_prepare(rdev);
+ /* set engine clock */
+ if (sclk != rdev->pm.current_sclk) {
+ radeon_pm_debug_check_in_vbl(rdev, false);
+ radeon_set_engine_clock(rdev, sclk);
+ radeon_pm_debug_check_in_vbl(rdev, true);
+ rdev->pm.current_sclk = sclk;
+ DRM_DEBUG("Setting: e: %d\n", sclk);
+ }
+
+ /* set memory clock */
+ if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
+ radeon_pm_debug_check_in_vbl(rdev, false);
+ radeon_set_memory_clock(rdev, mclk);
+ radeon_pm_debug_check_in_vbl(rdev, true);
+ rdev->pm.current_mclk = mclk;
+ DRM_DEBUG("Setting: m: %d\n", mclk);
+ }
+ radeon_pm_finish(rdev);
+ } else {
+ /* set engine clock */
+ if (sclk != rdev->pm.current_sclk) {
+ radeon_sync_with_vblank(rdev);
+ radeon_pm_prepare(rdev);
+ radeon_set_engine_clock(rdev, sclk);
+ radeon_pm_finish(rdev);
+ rdev->pm.current_sclk = sclk;
+ DRM_DEBUG("Setting: e: %d\n", sclk);
+ }
+ /* set memory clock */
+ if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
+ radeon_sync_with_vblank(rdev);
+ radeon_pm_prepare(rdev);
+ radeon_set_memory_clock(rdev, mclk);
+ radeon_pm_finish(rdev);
+ rdev->pm.current_mclk = mclk;
+ DRM_DEBUG("Setting: m: %d\n", mclk);
+ }
+ }
+
+ rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
+ rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
+ } else
+ DRM_DEBUG("pm: GUI not idle!!!\n");
+}
+
+static void radeon_pm_set_clocks(struct radeon_device *rdev)
+{
+ int i;
+
+ mutex_lock(&rdev->ddev->struct_mutex);
+ mutex_lock(&rdev->vram_mutex);
+ mutex_lock(&rdev->cp.mutex);
+
+ /* gui idle int has issues on older chips it seems */
+ if (rdev->family >= CHIP_R600) {
+ if (rdev->irq.installed) {
+ /* wait for GPU idle */
+ rdev->pm.gui_idle = false;
+ rdev->irq.gui_idle = true;
+ radeon_irq_set(rdev);
+ wait_event_interruptible_timeout(
+ rdev->irq.idle_queue, rdev->pm.gui_idle,
+ msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));
+ rdev->irq.gui_idle = false;
+ radeon_irq_set(rdev);
+ }
+ } else {
+ if (rdev->cp.ready) {
+ struct radeon_fence *fence;
+ radeon_ring_alloc(rdev, 64);
+ radeon_fence_create(rdev, &fence);
+ radeon_fence_emit(rdev, fence);
+ radeon_ring_commit(rdev);
+ radeon_fence_wait(fence, false);
+ radeon_fence_unref(&fence);
+ }
}
-#endif
+ radeon_unmap_vram_bos(rdev);
+
+ if (rdev->irq.installed) {
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (rdev->pm.active_crtcs & (1 << i)) {
+ rdev->pm.req_vblank |= (1 << i);
+ drm_vblank_get(rdev->ddev, i);
+ }
+ }
+ }
+
+ radeon_set_power_state(rdev);
+
+ if (rdev->irq.installed) {
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (rdev->pm.req_vblank & (1 << i)) {
+ rdev->pm.req_vblank &= ~(1 << i);
+ drm_vblank_put(rdev->ddev, i);
+ }
+ }
+ }
+
+ /* update display watermarks based on new power state */
+ radeon_update_bandwidth_info(rdev);
+ if (rdev->pm.active_crtc_count)
+ radeon_bandwidth_update(rdev);
+
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+
+ mutex_unlock(&rdev->cp.mutex);
+ mutex_unlock(&rdev->vram_mutex);
+ mutex_unlock(&rdev->ddev->struct_mutex);
+}
+
+static ssize_t radeon_get_pm_profile(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct radeon_device *rdev = ddev->dev_private;
+ int cp = rdev->pm.profile;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ (cp == PM_PROFILE_AUTO) ? "auto" :
+ (cp == PM_PROFILE_LOW) ? "low" :
+ (cp == PM_PROFILE_HIGH) ? "high" : "default");
+}
+
+static ssize_t radeon_set_pm_profile(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct radeon_device *rdev = ddev->dev_private;
+
+ mutex_lock(&rdev->pm.mutex);
+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ if (strncmp("default", buf, strlen("default")) == 0)
+ rdev->pm.profile = PM_PROFILE_DEFAULT;
+ else if (strncmp("auto", buf, strlen("auto")) == 0)
+ rdev->pm.profile = PM_PROFILE_AUTO;
+ else if (strncmp("low", buf, strlen("low")) == 0)
+ rdev->pm.profile = PM_PROFILE_LOW;
+ else if (strncmp("high", buf, strlen("high")) == 0)
+ rdev->pm.profile = PM_PROFILE_HIGH;
+ else {
+ DRM_ERROR("invalid power profile!\n");
+ goto fail;
+ }
+ radeon_pm_update_profile(rdev);
+ radeon_pm_set_clocks(rdev);
+ }
+fail:
+ mutex_unlock(&rdev->pm.mutex);
+
+ return count;
+}
+
+static ssize_t radeon_get_pm_method(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct radeon_device *rdev = ddev->dev_private;
+ int pm = rdev->pm.pm_method;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ (pm == PM_METHOD_DYNPM) ? "dynpm" : "profile");
+}
+
+static ssize_t radeon_set_pm_method(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct radeon_device *rdev = ddev->dev_private;
+
+
+ if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
+ mutex_lock(&rdev->pm.mutex);
+ rdev->pm.pm_method = PM_METHOD_DYNPM;
+ rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+ mutex_unlock(&rdev->pm.mutex);
+ } else if (strncmp("profile", buf, strlen("profile")) == 0) {
+ mutex_lock(&rdev->pm.mutex);
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
+ /* disable dynpm */
+ rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+ cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+ mutex_unlock(&rdev->pm.mutex);
+ } else {
+ DRM_ERROR("invalid power method!\n");
+ goto fail;
+ }
+ radeon_pm_compute_clocks(rdev);
+fail:
+ return count;
+}
+
+static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
+static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
- rdev->pm.current_power_state = rdev->pm.requested_power_state;
- rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode;
+void radeon_pm_suspend(struct radeon_device *rdev)
+{
+ mutex_lock(&rdev->pm.mutex);
+ cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+ rdev->pm.current_power_state_index = -1;
+ rdev->pm.current_clock_mode_index = -1;
+ rdev->pm.current_sclk = 0;
+ rdev->pm.current_mclk = 0;
+ mutex_unlock(&rdev->pm.mutex);
+}
+
+void radeon_pm_resume(struct radeon_device *rdev)
+{
+ radeon_pm_compute_clocks(rdev);
}
int radeon_pm_init(struct radeon_device *rdev)
{
- rdev->pm.state = PM_STATE_DISABLED;
- rdev->pm.planned_action = PM_ACTION_NONE;
- rdev->pm.downclocked = false;
+ int ret;
+ /* default to profile method */
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
+ rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+ rdev->pm.dynpm_can_upclock = true;
+ rdev->pm.dynpm_can_downclock = true;
+ rdev->pm.current_sclk = 0;
+ rdev->pm.current_mclk = 0;
if (rdev->bios) {
if (rdev->is_atom_bios)
radeon_atombios_get_power_modes(rdev);
else
radeon_combios_get_power_modes(rdev);
- radeon_print_power_mode_info(rdev);
+ radeon_pm_init_profile(rdev);
+ rdev->pm.current_power_state_index = -1;
+ rdev->pm.current_clock_mode_index = -1;
}
- if (radeon_debugfs_pm_init(rdev)) {
- DRM_ERROR("Failed to register debugfs file for PM!\n");
- }
+ if (rdev->pm.num_power_states > 1) {
+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ mutex_lock(&rdev->pm.mutex);
+ rdev->pm.profile = PM_PROFILE_DEFAULT;
+ radeon_pm_update_profile(rdev);
+ radeon_pm_set_clocks(rdev);
+ mutex_unlock(&rdev->pm.mutex);
+ }
+
+ /* where's the best place to put these? */
+ ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+ if (ret)
+ DRM_ERROR("failed to create device file for power profile\n");
+ ret = device_create_file(rdev->dev, &dev_attr_power_method);
+ if (ret)
+ DRM_ERROR("failed to create device file for power method\n");
+
+#ifdef CONFIG_ACPI
+ rdev->acpi_nb.notifier_call = radeon_acpi_event;
+ register_acpi_notifier(&rdev->acpi_nb);
+#endif
+ INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
- INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler);
+ if (radeon_debugfs_pm_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for PM!\n");
+ }
- if (radeon_dynpm != -1 && radeon_dynpm) {
- rdev->pm.state = PM_STATE_PAUSED;
- DRM_INFO("radeon: dynamic power management enabled\n");
+ DRM_INFO("radeon: power management initialized\n");
}
- DRM_INFO("radeon: power management initialized\n");
-
return 0;
}
void radeon_pm_fini(struct radeon_device *rdev)
{
+ if (rdev->pm.num_power_states > 1) {
+ mutex_lock(&rdev->pm.mutex);
+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ rdev->pm.profile = PM_PROFILE_DEFAULT;
+ radeon_pm_update_profile(rdev);
+ radeon_pm_set_clocks(rdev);
+ } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+ /* cancel work */
+ cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
+ /* reset default clocks */
+ rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+ radeon_pm_set_clocks(rdev);
+ }
+ mutex_unlock(&rdev->pm.mutex);
+
+ device_remove_file(rdev->dev, &dev_attr_power_profile);
+ device_remove_file(rdev->dev, &dev_attr_power_method);
+#ifdef CONFIG_ACPI
+ unregister_acpi_notifier(&rdev->acpi_nb);
+#endif
+ }
+
if (rdev->pm.i2c_bus)
radeon_i2c_destroy(rdev->pm.i2c_bus);
}
@@ -266,146 +483,167 @@ void radeon_pm_fini(struct radeon_device *rdev)
void radeon_pm_compute_clocks(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
- struct drm_connector *connector;
+ struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
- int count = 0;
- if (rdev->pm.state == PM_STATE_DISABLED)
+ if (rdev->pm.num_power_states < 2)
return;
mutex_lock(&rdev->pm.mutex);
rdev->pm.active_crtcs = 0;
- list_for_each_entry(connector,
- &ddev->mode_config.connector_list, head) {
- if (connector->encoder &&
- connector->encoder->crtc &&
- connector->dpms != DRM_MODE_DPMS_OFF) {
- radeon_crtc = to_radeon_crtc(connector->encoder->crtc);
+ rdev->pm.active_crtc_count = 0;
+ list_for_each_entry(crtc,
+ &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
- ++count;
+ rdev->pm.active_crtc_count++;
}
}
- if (count > 1) {
- if (rdev->pm.state == PM_STATE_ACTIVE) {
- cancel_delayed_work(&rdev->pm.idle_work);
-
- rdev->pm.state = PM_STATE_PAUSED;
- rdev->pm.planned_action = PM_ACTION_UPCLOCK;
- if (rdev->pm.downclocked)
- radeon_pm_set_clocks(rdev);
-
- DRM_DEBUG("radeon: dynamic power management deactivated\n");
- }
- } else if (count == 1) {
- /* TODO: Increase clocks if needed for current mode */
-
- if (rdev->pm.state == PM_STATE_MINIMUM) {
- rdev->pm.state = PM_STATE_ACTIVE;
- rdev->pm.planned_action = PM_ACTION_UPCLOCK;
- radeon_pm_set_clocks(rdev);
-
- queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
- msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
- }
- else if (rdev->pm.state == PM_STATE_PAUSED) {
- rdev->pm.state = PM_STATE_ACTIVE;
- queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
- msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
- DRM_DEBUG("radeon: dynamic power management activated\n");
- }
- }
- else { /* count == 0 */
- if (rdev->pm.state != PM_STATE_MINIMUM) {
- cancel_delayed_work(&rdev->pm.idle_work);
-
- rdev->pm.state = PM_STATE_MINIMUM;
- rdev->pm.planned_action = PM_ACTION_MINIMUM;
- radeon_pm_set_clocks(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ radeon_pm_update_profile(rdev);
+ radeon_pm_set_clocks(rdev);
+ } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+ if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
+ if (rdev->pm.active_crtc_count > 1) {
+ if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
+ cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+
+ rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+ radeon_pm_get_dynpm_state(rdev);
+ radeon_pm_set_clocks(rdev);
+
+ DRM_DEBUG("radeon: dynamic power management deactivated\n");
+ }
+ } else if (rdev->pm.active_crtc_count == 1) {
+ /* TODO: Increase clocks if needed for current mode */
+
+ if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
+ rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
+ radeon_pm_get_dynpm_state(rdev);
+ radeon_pm_set_clocks(rdev);
+
+ queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
+ rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+ queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ DRM_DEBUG("radeon: dynamic power management activated\n");
+ }
+ } else { /* count == 0 */
+ if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
+ cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+
+ rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
+ radeon_pm_get_dynpm_state(rdev);
+ radeon_pm_set_clocks(rdev);
+ }
+ }
}
}
mutex_unlock(&rdev->pm.mutex);
}
-static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
+static bool radeon_pm_in_vbl(struct radeon_device *rdev)
{
- u32 stat_crtc1 = 0, stat_crtc2 = 0;
+ u32 stat_crtc = 0, vbl = 0, position = 0;
bool in_vbl = true;
- if (ASIC_IS_AVIVO(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
+ if (rdev->pm.active_crtcs & (1 << 0)) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff;
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff;
+ }
+ if (rdev->pm.active_crtcs & (1 << 1)) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff;
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff;
+ }
+ if (rdev->pm.active_crtcs & (1 << 2)) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff;
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff;
+ }
+ if (rdev->pm.active_crtcs & (1 << 3)) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff;
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff;
+ }
+ if (rdev->pm.active_crtcs & (1 << 4)) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff;
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff;
+ }
+ if (rdev->pm.active_crtcs & (1 << 5)) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff;
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff;
+ }
+ } else if (ASIC_IS_AVIVO(rdev)) {
+ if (rdev->pm.active_crtcs & (1 << 0)) {
+ vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END) & 0xfff;
+ position = RREG32(AVIVO_D1CRTC_STATUS_POSITION) & 0xfff;
+ }
+ if (rdev->pm.active_crtcs & (1 << 1)) {
+ vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END) & 0xfff;
+ position = RREG32(AVIVO_D2CRTC_STATUS_POSITION) & 0xfff;
+ }
+ if (position < vbl && position > 1)
+ in_vbl = false;
+ } else {
if (rdev->pm.active_crtcs & (1 << 0)) {
- stat_crtc1 = RREG32(D1CRTC_STATUS);
- if (!(stat_crtc1 & 1))
+ stat_crtc = RREG32(RADEON_CRTC_STATUS);
+ if (!(stat_crtc & 1))
in_vbl = false;
}
if (rdev->pm.active_crtcs & (1 << 1)) {
- stat_crtc2 = RREG32(D2CRTC_STATUS);
- if (!(stat_crtc2 & 1))
+ stat_crtc = RREG32(RADEON_CRTC2_STATUS);
+ if (!(stat_crtc & 1))
in_vbl = false;
}
}
- if (in_vbl == false)
- DRM_INFO("not in vbl for pm change %08x %08x at %s\n", stat_crtc1,
- stat_crtc2, finish ? "exit" : "entry");
- return in_vbl;
-}
-static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
-{
- /*radeon_fence_wait_last(rdev);*/
- switch (rdev->pm.planned_action) {
- case PM_ACTION_UPCLOCK:
- rdev->pm.downclocked = false;
- break;
- case PM_ACTION_DOWNCLOCK:
- rdev->pm.downclocked = true;
- break;
- case PM_ACTION_MINIMUM:
- break;
- case PM_ACTION_NONE:
- DRM_ERROR("%s: PM_ACTION_NONE\n", __func__);
- break;
- }
- radeon_set_power_state(rdev);
- rdev->pm.planned_action = PM_ACTION_NONE;
+ if (position < vbl && position > 1)
+ in_vbl = false;
+
+ return in_vbl;
}
-static void radeon_pm_set_clocks(struct radeon_device *rdev)
+static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
{
- radeon_get_power_state(rdev, rdev->pm.planned_action);
- mutex_lock(&rdev->cp.mutex);
+ u32 stat_crtc = 0;
+ bool in_vbl = radeon_pm_in_vbl(rdev);
- if (rdev->pm.active_crtcs & (1 << 0)) {
- rdev->pm.req_vblank |= (1 << 0);
- drm_vblank_get(rdev->ddev, 0);
- }
- if (rdev->pm.active_crtcs & (1 << 1)) {
- rdev->pm.req_vblank |= (1 << 1);
- drm_vblank_get(rdev->ddev, 1);
- }
- radeon_pm_set_clocks_locked(rdev);
- if (rdev->pm.req_vblank & (1 << 0)) {
- rdev->pm.req_vblank &= ~(1 << 0);
- drm_vblank_put(rdev->ddev, 0);
- }
- if (rdev->pm.req_vblank & (1 << 1)) {
- rdev->pm.req_vblank &= ~(1 << 1);
- drm_vblank_put(rdev->ddev, 1);
- }
-
- mutex_unlock(&rdev->cp.mutex);
+ if (in_vbl == false)
+ DRM_DEBUG("not in vbl for pm change %08x at %s\n", stat_crtc,
+ finish ? "exit" : "entry");
+ return in_vbl;
}
-static void radeon_pm_idle_work_handler(struct work_struct *work)
+static void radeon_dynpm_idle_work_handler(struct work_struct *work)
{
struct radeon_device *rdev;
+ int resched;
rdev = container_of(work, struct radeon_device,
- pm.idle_work.work);
+ pm.dynpm_idle_work.work);
+ resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
mutex_lock(&rdev->pm.mutex);
- if (rdev->pm.state == PM_STATE_ACTIVE) {
+ if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
unsigned long irq_flags;
int not_processed = 0;
@@ -421,35 +659,40 @@ static void radeon_pm_idle_work_handler(struct work_struct *work)
read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
if (not_processed >= 3) { /* should upclock */
- if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) {
- rdev->pm.planned_action = PM_ACTION_NONE;
- } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
- rdev->pm.downclocked) {
- rdev->pm.planned_action =
- PM_ACTION_UPCLOCK;
- rdev->pm.action_timeout = jiffies +
+ if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+ } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
+ rdev->pm.dynpm_can_upclock) {
+ rdev->pm.dynpm_planned_action =
+ DYNPM_ACTION_UPCLOCK;
+ rdev->pm.dynpm_action_timeout = jiffies +
msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
}
} else if (not_processed == 0) { /* should downclock */
- if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) {
- rdev->pm.planned_action = PM_ACTION_NONE;
- } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
- !rdev->pm.downclocked) {
- rdev->pm.planned_action =
- PM_ACTION_DOWNCLOCK;
- rdev->pm.action_timeout = jiffies +
+ if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+ } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
+ rdev->pm.dynpm_can_downclock) {
+ rdev->pm.dynpm_planned_action =
+ DYNPM_ACTION_DOWNCLOCK;
+ rdev->pm.dynpm_action_timeout = jiffies +
msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
}
}
- if (rdev->pm.planned_action != PM_ACTION_NONE &&
- jiffies > rdev->pm.action_timeout) {
+ /* Note, radeon_pm_set_clocks is called with static_switch set
+ * to false since we want to wait for vbl to avoid flicker.
+ */
+ if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
+ jiffies > rdev->pm.dynpm_action_timeout) {
+ radeon_pm_get_dynpm_state(rdev);
radeon_pm_set_clocks(rdev);
}
}
mutex_unlock(&rdev->pm.mutex);
+ ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
- queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
+ queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
}
@@ -464,7 +707,6 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]);
seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index eabbc9cf30a..c332f46340d 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -553,7 +553,6 @@
# define RADEON_CRTC_CRNT_VLINE_MASK (0x7ff << 16)
#define RADEON_CRTC2_CRNT_FRAME 0x0314
#define RADEON_CRTC2_GUI_TRIG_VLINE 0x0318
-#define RADEON_CRTC2_STATUS 0x03fc
#define RADEON_CRTC2_VLINE_CRNT_VLINE 0x0310
#define RADEON_CRTC8_DATA 0x03d5 /* VGA, 0x3b5 */
#define RADEON_CRTC8_IDX 0x03d4 /* VGA, 0x3b4 */
@@ -995,6 +994,7 @@
# define RADEON_FP_DETECT_MASK (1 << 4)
# define RADEON_CRTC2_VBLANK_MASK (1 << 9)
# define RADEON_FP2_DETECT_MASK (1 << 10)
+# define RADEON_GUI_IDLE_MASK (1 << 19)
# define RADEON_SW_INT_ENABLE (1 << 25)
#define RADEON_GEN_INT_STATUS 0x0044
# define AVIVO_DISPLAY_INT_STATUS (1 << 0)
@@ -1006,6 +1006,8 @@
# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
# define RADEON_FP2_DETECT_STAT (1 << 10)
# define RADEON_FP2_DETECT_STAT_ACK (1 << 10)
+# define RADEON_GUI_IDLE_STAT (1 << 19)
+# define RADEON_GUI_IDLE_STAT_ACK (1 << 19)
# define RADEON_SW_INT_FIRE (1 << 26)
# define RADEON_SW_INT_TEST (1 << 25)
# define RADEON_SW_INT_TEST_ACK (1 << 25)
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index f6e1e8d4d98..261e98a276d 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -219,24 +219,26 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
void radeon_ib_pool_fini(struct radeon_device *rdev)
{
int r;
+ struct radeon_bo *robj;
if (!rdev->ib_pool.ready) {
return;
}
mutex_lock(&rdev->ib_pool.mutex);
radeon_ib_bogus_cleanup(rdev);
+ robj = rdev->ib_pool.robj;
+ rdev->ib_pool.robj = NULL;
+ mutex_unlock(&rdev->ib_pool.mutex);
- if (rdev->ib_pool.robj) {
- r = radeon_bo_reserve(rdev->ib_pool.robj, false);
+ if (robj) {
+ r = radeon_bo_reserve(robj, false);
if (likely(r == 0)) {
- radeon_bo_kunmap(rdev->ib_pool.robj);
- radeon_bo_unpin(rdev->ib_pool.robj);
- radeon_bo_unreserve(rdev->ib_pool.robj);
+ radeon_bo_kunmap(robj);
+ radeon_bo_unpin(robj);
+ radeon_bo_unreserve(robj);
}
- radeon_bo_unref(&rdev->ib_pool.robj);
- rdev->ib_pool.robj = NULL;
+ radeon_bo_unref(&robj);
}
- mutex_unlock(&rdev->ib_pool.mutex);
}
@@ -258,31 +260,41 @@ void radeon_ring_free_size(struct radeon_device *rdev)
}
}
-int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
+int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
{
int r;
/* Align requested size with padding so unlock_commit can
* pad safely */
ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
- mutex_lock(&rdev->cp.mutex);
while (ndw > (rdev->cp.ring_free_dw - 1)) {
radeon_ring_free_size(rdev);
if (ndw < rdev->cp.ring_free_dw) {
break;
}
r = radeon_fence_wait_next(rdev);
- if (r) {
- mutex_unlock(&rdev->cp.mutex);
+ if (r)
return r;
- }
}
rdev->cp.count_dw = ndw;
rdev->cp.wptr_old = rdev->cp.wptr;
return 0;
}
-void radeon_ring_unlock_commit(struct radeon_device *rdev)
+int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
+{
+ int r;
+
+ mutex_lock(&rdev->cp.mutex);
+ r = radeon_ring_alloc(rdev, ndw);
+ if (r) {
+ mutex_unlock(&rdev->cp.mutex);
+ return r;
+ }
+ return 0;
+}
+
+void radeon_ring_commit(struct radeon_device *rdev)
{
unsigned count_dw_pad;
unsigned i;
@@ -295,6 +307,11 @@ void radeon_ring_unlock_commit(struct radeon_device *rdev)
}
DRM_MEMORYBARRIER();
radeon_cp_commit(rdev);
+}
+
+void radeon_ring_unlock_commit(struct radeon_device *rdev)
+{
+ radeon_ring_commit(rdev);
mutex_unlock(&rdev->cp.mutex);
}
@@ -344,20 +361,23 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
void radeon_ring_fini(struct radeon_device *rdev)
{
int r;
+ struct radeon_bo *ring_obj;
mutex_lock(&rdev->cp.mutex);
- if (rdev->cp.ring_obj) {
- r = radeon_bo_reserve(rdev->cp.ring_obj, false);
+ ring_obj = rdev->cp.ring_obj;
+ rdev->cp.ring = NULL;
+ rdev->cp.ring_obj = NULL;
+ mutex_unlock(&rdev->cp.mutex);
+
+ if (ring_obj) {
+ r = radeon_bo_reserve(ring_obj, false);
if (likely(r == 0)) {
- radeon_bo_kunmap(rdev->cp.ring_obj);
- radeon_bo_unpin(rdev->cp.ring_obj);
- radeon_bo_unreserve(rdev->cp.ring_obj);
+ radeon_bo_kunmap(ring_obj);
+ radeon_bo_unpin(ring_obj);
+ radeon_bo_unreserve(ring_obj);
}
- radeon_bo_unref(&rdev->cp.ring_obj);
- rdev->cp.ring = NULL;
- rdev->cp.ring_obj = NULL;
+ radeon_bo_unref(&ring_obj);
}
- mutex_unlock(&rdev->cp.mutex);
}
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index cc5316dcf58..b3ba44c0a81 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -900,9 +900,10 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
flags |= RADEON_FRONT;
}
if (flags & (RADEON_DEPTH|RADEON_STENCIL)) {
- if (!dev_priv->have_z_offset)
+ if (!dev_priv->have_z_offset) {
printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n");
- flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
+ flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
+ }
}
if (flags & (RADEON_FRONT | RADEON_BACK)) {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index d031b686308..e9918d88f5b 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -33,6 +33,7 @@
#include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_placement.h>
#include <ttm/ttm_module.h>
+#include <ttm/ttm_page_alloc.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include <linux/seq_file.h>
@@ -162,34 +163,21 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
(unsigned)type);
return -EINVAL;
}
- man->io_offset = rdev->mc.agp_base;
- man->io_size = rdev->mc.gtt_size;
- man->io_addr = NULL;
if (!rdev->ddev->agp->cant_use_aperture)
- man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
- TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
- } else
-#endif
- {
- man->io_offset = 0;
- man->io_size = 0;
- man->io_addr = NULL;
}
+#endif
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
man->gpu_offset = rdev->mc.vram_start;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
- man->io_addr = NULL;
- man->io_offset = rdev->mc.aper_base;
- man->io_size = rdev->mc.aper_size;
break;
default:
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
@@ -244,9 +232,9 @@ static void radeon_move_null(struct ttm_buffer_object *bo,
}
static int radeon_move_blit(struct ttm_buffer_object *bo,
- bool evict, int no_wait,
- struct ttm_mem_reg *new_mem,
- struct ttm_mem_reg *old_mem)
+ bool evict, int no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem,
+ struct ttm_mem_reg *old_mem)
{
struct radeon_device *rdev;
uint64_t old_start, new_start;
@@ -290,13 +278,14 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
/* FIXME: handle copy error */
r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
- evict, no_wait, new_mem);
+ evict, no_wait_reserve, no_wait_gpu, new_mem);
radeon_fence_unref(&fence);
return r;
}
static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
- bool evict, bool interruptible, bool no_wait,
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
@@ -317,7 +306,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
placement.busy_placement = &placements;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
- interruptible, no_wait);
+ interruptible, no_wait_reserve, no_wait_gpu);
if (unlikely(r)) {
return r;
}
@@ -331,11 +320,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
goto out_cleanup;
}
- r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem);
+ r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
- r = ttm_bo_move_ttm(bo, true, no_wait, new_mem);
+ r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
out_cleanup:
if (tmp_mem.mm_node) {
struct ttm_bo_global *glob = rdev->mman.bdev.glob;
@@ -349,7 +338,8 @@ out_cleanup:
}
static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
- bool evict, bool interruptible, bool no_wait,
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
@@ -369,15 +359,15 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
- r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait);
+ r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
if (unlikely(r)) {
return r;
}
- r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem);
+ r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
- r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem);
+ r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -394,8 +384,9 @@ out_cleanup:
}
static int radeon_bo_move(struct ttm_buffer_object *bo,
- bool evict, bool interruptible, bool no_wait,
- struct ttm_mem_reg *new_mem)
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem;
@@ -422,23 +413,66 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
if (old_mem->mem_type == TTM_PL_VRAM &&
new_mem->mem_type == TTM_PL_SYSTEM) {
r = radeon_move_vram_ram(bo, evict, interruptible,
- no_wait, new_mem);
+ no_wait_reserve, no_wait_gpu, new_mem);
} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_VRAM) {
r = radeon_move_ram_vram(bo, evict, interruptible,
- no_wait, new_mem);
+ no_wait_reserve, no_wait_gpu, new_mem);
} else {
- r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem);
+ r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
}
if (r) {
memcpy:
- r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
}
-
return r;
}
+static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct radeon_device *rdev = radeon_get_rdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_TT:
+#if __OS_HAS_AGP
+ if (rdev->flags & RADEON_IS_AGP) {
+ /* RADEON_IS_AGP is set only if AGP is active */
+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.base = rdev->mc.agp_base;
+ mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
+ }
+#endif
+ break;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ /* check if it's visible */
+ if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
+ return -EINVAL;
+ mem->bus.base = rdev->mc.aper_base;
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
bool lazy, bool interruptible)
{
@@ -479,6 +513,8 @@ static struct ttm_bo_driver radeon_bo_driver = {
.sync_obj_ref = &radeon_sync_obj_ref,
.move_notify = &radeon_bo_move_notify,
.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
+ .io_mem_reserve = &radeon_ttm_io_mem_reserve,
+ .io_mem_free = &radeon_ttm_io_mem_free,
};
int radeon_ttm_init(struct radeon_device *rdev)
@@ -571,13 +607,17 @@ static const struct vm_operations_struct *ttm_vm_ops = NULL;
static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct ttm_buffer_object *bo;
+ struct radeon_device *rdev;
int r;
- bo = (struct ttm_buffer_object *)vma->vm_private_data;
+ bo = (struct ttm_buffer_object *)vma->vm_private_data;
if (bo == NULL) {
return VM_FAULT_NOPAGE;
}
+ rdev = radeon_get_rdev(bo->bdev);
+ mutex_lock(&rdev->vram_mutex);
r = ttm_vm_ops->fault(vma, vmf);
+ mutex_unlock(&rdev->vram_mutex);
return r;
}
@@ -745,8 +785,8 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES];
- static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32];
+ static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1];
+ static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32];
unsigned i;
for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
@@ -763,7 +803,13 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager;
}
- return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES);
+ /* Add ttm page pool to debugfs */
+ sprintf(radeon_mem_types_names[i], "ttm_page_pool");
+ radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+ radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
+ radeon_mem_types_list[i].driver_features = 0;
+ radeon_mem_types_list[i].data = NULL;
+ return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1);
#endif
return 0;
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 1a41cb268b7..9e4240b3bf0 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -243,8 +243,6 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev)
void rs400_gpu_init(struct radeon_device *rdev)
{
- /* FIXME: HDP same place on rs400 ? */
- r100_hdp_reset(rdev);
/* FIXME: is this correct ? */
r420_pipes_init(rdev);
if (rs400_mc_wait_for_idle(rdev)) {
@@ -433,7 +431,7 @@ int rs400_resume(struct radeon_device *rdev)
/* setup MC before calling post tables */
rs400_mc_program(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -458,7 +456,6 @@ int rs400_suspend(struct radeon_device *rdev)
void rs400_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -497,7 +494,7 @@ int rs400_init(struct radeon_device *rdev)
return r;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -509,8 +506,6 @@ int rs400_init(struct radeon_device *rdev)
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
/* initialize memory controller */
rs400_mc_init(rdev);
/* Fence driver */
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index a81bc7a21e1..79887cac5b5 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -46,6 +46,135 @@
void rs600_gpu_init(struct radeon_device *rdev);
int rs600_mc_wait_for_idle(struct radeon_device *rdev);
+void rs600_pm_misc(struct radeon_device *rdev)
+{
+ int requested_index = rdev->pm.requested_power_state_index;
+ struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
+ struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
+ u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl;
+ u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl;
+
+ if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
+ if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+ tmp = RREG32(voltage->gpio.reg);
+ if (voltage->active_high)
+ tmp |= voltage->gpio.mask;
+ else
+ tmp &= ~(voltage->gpio.mask);
+ WREG32(voltage->gpio.reg, tmp);
+ if (voltage->delay)
+ udelay(voltage->delay);
+ } else {
+ tmp = RREG32(voltage->gpio.reg);
+ if (voltage->active_high)
+ tmp &= ~voltage->gpio.mask;
+ else
+ tmp |= voltage->gpio.mask;
+ WREG32(voltage->gpio.reg, tmp);
+ if (voltage->delay)
+ udelay(voltage->delay);
+ }
+ }
+
+ dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
+ dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
+ dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf);
+ if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) {
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2);
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2);
+ } else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) {
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4);
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4);
+ }
+ } else {
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1);
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1);
+ }
+ WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length);
+
+ dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL);
+ if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
+ dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP;
+ if (voltage->delay) {
+ dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC;
+ dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay);
+ } else
+ dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC;
+ } else
+ dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP;
+ WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl);
+
+ hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL);
+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
+ hdp_dyn_cntl &= ~HDP_FORCEON;
+ else
+ hdp_dyn_cntl |= HDP_FORCEON;
+ WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl);
+#if 0
+ /* mc_host_dyn seems to cause hangs from time to time */
+ mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL);
+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN)
+ mc_host_dyn_cntl &= ~MC_HOST_FORCEON;
+ else
+ mc_host_dyn_cntl |= MC_HOST_FORCEON;
+ WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl);
+#endif
+ dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL);
+ if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN)
+ dyn_backbias_cntl |= IO_CG_BACKBIAS_EN;
+ else
+ dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN;
+ WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl);
+
+ /* set pcie lanes */
+ if ((rdev->flags & RADEON_IS_PCIE) &&
+ !(rdev->flags & RADEON_IS_IGP) &&
+ rdev->asic->set_pcie_lanes &&
+ (ps->pcie_lanes !=
+ rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
+ radeon_set_pcie_lanes(rdev,
+ ps->pcie_lanes);
+ DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
+ }
+}
+
+void rs600_pm_prepare(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* disable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
+ tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+ }
+ }
+}
+
+void rs600_pm_finish(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* enable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
+ tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+ }
+ }
+}
+
/* hpd for digital panel detect/disconnect */
bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
@@ -147,6 +276,78 @@ void rs600_hpd_fini(struct radeon_device *rdev)
}
}
+void rs600_bm_disable(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ /* disable bus mastering */
+ pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
+ pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
+ mdelay(1);
+}
+
+int rs600_asic_reset(struct radeon_device *rdev)
+{
+ u32 status, tmp;
+
+ struct rv515_mc_save save;
+
+ /* Stops all mc clients */
+ rv515_mc_stop(rdev, &save);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(status)) {
+ return 0;
+ }
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* stop CP */
+ WREG32(RADEON_CP_CSQ_CNTL, 0);
+ tmp = RREG32(RADEON_CP_RB_CNTL);
+ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+ WREG32(RADEON_CP_RB_RPTR_WR, 0);
+ WREG32(RADEON_CP_RB_WPTR, 0);
+ WREG32(RADEON_CP_RB_CNTL, tmp);
+ pci_save_state(rdev->pdev);
+ /* disable bus mastering */
+ rs600_bm_disable(rdev);
+ /* reset GA+VAP */
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
+ S_0000F0_SOFT_RESET_GA(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* reset CP */
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* reset MC */
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* restore PCI & busmastering */
+ pci_restore_state(rdev->pdev);
+ /* Check if GPU is idle */
+ if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
+ dev_err(rdev->dev, "failed to reset GPU\n");
+ rdev->gpu_lockup = true;
+ return -1;
+ }
+ rv515_mc_resume(rdev, &save);
+ dev_info(rdev->dev, "GPU reset succeed\n");
+ return 0;
+}
+
/*
* GART.
*/
@@ -310,6 +511,9 @@ int rs600_irq_set(struct radeon_device *rdev)
if (rdev->irq.sw_int) {
tmp |= S_000040_SW_INT_EN(1);
}
+ if (rdev->irq.gui_idle) {
+ tmp |= S_000040_GUI_IDLE(1);
+ }
if (rdev->irq.crtc_vblank_int[0]) {
mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
}
@@ -332,9 +536,15 @@ int rs600_irq_set(struct radeon_device *rdev)
static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int)
{
uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
- uint32_t irq_mask = ~C_000044_SW_INT;
+ uint32_t irq_mask = S_000044_SW_INT(1);
u32 tmp;
+ /* the interrupt works, but the status bit is permanently asserted */
+ if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
+ if (!rdev->irq.gui_idle_acked)
+ irq_mask |= S_000044_GUI_IDLE_STAT(1);
+ }
+
if (G_000044_DISPLAY_INT_STAT(irqs)) {
*r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) {
@@ -382,6 +592,9 @@ int rs600_irq_process(struct radeon_device *rdev)
uint32_t r500_disp_int;
bool queue_hotplug = false;
+ /* reset gui idle ack. the status bit is broken */
+ rdev->irq.gui_idle_acked = false;
+
status = rs600_irq_ack(rdev, &r500_disp_int);
if (!status && !r500_disp_int) {
return IRQ_NONE;
@@ -390,6 +603,12 @@ int rs600_irq_process(struct radeon_device *rdev)
/* SW interrupt */
if (G_000044_SW_INT(status))
radeon_fence_process(rdev);
+ /* GUI idle */
+ if (G_000040_GUI_IDLE(status)) {
+ rdev->irq.gui_idle_acked = true;
+ rdev->pm.gui_idle = true;
+ wake_up(&rdev->irq.idle_queue);
+ }
/* Vertical blank interrupts */
if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) {
drm_handle_vblank(rdev->ddev, 0);
@@ -411,6 +630,8 @@ int rs600_irq_process(struct radeon_device *rdev)
}
status = rs600_irq_ack(rdev, &r500_disp_int);
}
+ /* reset gui idle ack. the status bit is broken */
+ rdev->irq.gui_idle_acked = false;
if (queue_hotplug)
queue_work(rdev->wq, &rdev->hotplug_work);
if (rdev->msi_enabled) {
@@ -454,7 +675,6 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev)
void rs600_gpu_init(struct radeon_device *rdev)
{
- r100_hdp_reset(rdev);
r420_pipes_init(rdev);
/* Wait for mc idle */
if (rs600_mc_wait_for_idle(rdev))
@@ -601,7 +821,7 @@ int rs600_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
rv515_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -626,7 +846,6 @@ int rs600_suspend(struct radeon_device *rdev)
void rs600_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -664,7 +883,7 @@ int rs600_init(struct radeon_device *rdev)
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -676,8 +895,6 @@ int rs600_init(struct radeon_device *rdev)
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
/* initialize memory controller */
rs600_mc_init(rdev);
rs600_debugfs(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
index e52d2695510..a27c13ac47c 100644
--- a/drivers/gpu/drm/radeon/rs600d.h
+++ b/drivers/gpu/drm/radeon/rs600d.h
@@ -178,6 +178,52 @@
#define S_000074_MC_IND_DATA(x) (((x) & 0xFFFFFFFF) << 0)
#define G_000074_MC_IND_DATA(x) (((x) >> 0) & 0xFFFFFFFF)
#define C_000074_MC_IND_DATA 0x00000000
+#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
+#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
+#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
+#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
+#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
+#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
+#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
+#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
+#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
+#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
+#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
+#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
+#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
+#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
+#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
+#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
+#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
+#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
+#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
+#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
+#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
+#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
+#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
+#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
+#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
+#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
+#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
+#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
+#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
+#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
+#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
+#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
+#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
+#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
+#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
+#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
+#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
+#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
+#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
+#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
+#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
+#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
+#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
+#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
+#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
+#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
#define R_000134_HDP_FB_LOCATION 0x000134
#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0)
#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF)
@@ -588,4 +634,38 @@
#define G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1)
#define C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF
+/* PLL regs */
+#define GENERAL_PWRMGT 0x8
+#define GLOBAL_PWRMGT_EN (1 << 0)
+#define MOBILE_SU (1 << 2)
+#define DYN_PWRMGT_SCLK_LENGTH 0xc
+#define NORMAL_POWER_SCLK_HILEN(x) ((x) << 0)
+#define NORMAL_POWER_SCLK_LOLEN(x) ((x) << 4)
+#define REDUCED_POWER_SCLK_HILEN(x) ((x) << 8)
+#define REDUCED_POWER_SCLK_LOLEN(x) ((x) << 12)
+#define POWER_D1_SCLK_HILEN(x) ((x) << 16)
+#define POWER_D1_SCLK_LOLEN(x) ((x) << 20)
+#define STATIC_SCREEN_HILEN(x) ((x) << 24)
+#define STATIC_SCREEN_LOLEN(x) ((x) << 28)
+#define DYN_SCLK_VOL_CNTL 0xe
+#define IO_CG_VOLTAGE_DROP (1 << 0)
+#define VOLTAGE_DROP_SYNC (1 << 2)
+#define VOLTAGE_DELAY_SEL(x) ((x) << 3)
+#define HDP_DYN_CNTL 0x10
+#define HDP_FORCEON (1 << 0)
+#define MC_HOST_DYN_CNTL 0x1e
+#define MC_HOST_FORCEON (1 << 0)
+#define DYN_BACKBIAS_CNTL 0x29
+#define IO_CG_BACKBIAS_EN (1 << 0)
+
+/* mmreg */
+#define DOUT_POWER_MANAGEMENT_CNTL 0x7ee0
+#define PWRDN_WAIT_BUSY_OFF (1 << 0)
+#define PWRDN_WAIT_PWRSEQ_OFF (1 << 4)
+#define PWRDN_WAIT_PPLL_OFF (1 << 8)
+#define PWRUP_WAIT_PPLL_ON (1 << 12)
+#define PWRUP_WAIT_MEM_INIT_DONE (1 << 16)
+#define PM_ASSERT_RESET (1 << 20)
+#define PM_PWRDN_PPLL (1 << 24)
+
#endif
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index bbf3da790fd..bcc33195ebc 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -48,8 +48,6 @@ static int rs690_mc_wait_for_idle(struct radeon_device *rdev)
static void rs690_gpu_init(struct radeon_device *rdev)
{
- /* FIXME: HDP same place on rs690 ? */
- r100_hdp_reset(rdev);
/* FIXME: is this correct ? */
r420_pipes_init(rdev);
if (rs690_mc_wait_for_idle(rdev)) {
@@ -78,59 +76,59 @@ void rs690_pm_info(struct radeon_device *rdev)
/* Get various system informations from bios */
switch (crev) {
case 1:
- tmp.full = rfixed_const(100);
- rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info.ulBootUpMemoryClock);
- rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
- rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
- rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->info.usFSBClock));
- rdev->pm.igp_ht_link_width.full = rfixed_const(info->info.ucHTLinkWidth);
+ tmp.full = dfixed_const(100);
+ rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock);
+ rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
+ rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
+ rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock));
+ rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth);
break;
case 2:
- tmp.full = rfixed_const(100);
- rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info_v2.ulBootUpSidePortClock);
- rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
- rdev->pm.igp_system_mclk.full = rfixed_const(info->info_v2.ulBootUpUMAClock);
- rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
- rdev->pm.igp_ht_link_clk.full = rfixed_const(info->info_v2.ulHTLinkFreq);
- rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp);
- rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
+ tmp.full = dfixed_const(100);
+ rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock);
+ rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
+ rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock);
+ rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
+ rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq);
+ rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
+ rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
break;
default:
- tmp.full = rfixed_const(100);
+ tmp.full = dfixed_const(100);
/* We assume the slower possible clock ie worst case */
/* DDR 333Mhz */
- rdev->pm.igp_sideport_mclk.full = rfixed_const(333);
+ rdev->pm.igp_sideport_mclk.full = dfixed_const(333);
/* FIXME: system clock ? */
- rdev->pm.igp_system_mclk.full = rfixed_const(100);
- rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
- rdev->pm.igp_ht_link_clk.full = rfixed_const(200);
- rdev->pm.igp_ht_link_width.full = rfixed_const(8);
+ rdev->pm.igp_system_mclk.full = dfixed_const(100);
+ rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
+ rdev->pm.igp_ht_link_clk.full = dfixed_const(200);
+ rdev->pm.igp_ht_link_width.full = dfixed_const(8);
DRM_ERROR("No integrated system info for your GPU, using safe default\n");
break;
}
} else {
- tmp.full = rfixed_const(100);
+ tmp.full = dfixed_const(100);
/* We assume the slower possible clock ie worst case */
/* DDR 333Mhz */
- rdev->pm.igp_sideport_mclk.full = rfixed_const(333);
+ rdev->pm.igp_sideport_mclk.full = dfixed_const(333);
/* FIXME: system clock ? */
- rdev->pm.igp_system_mclk.full = rfixed_const(100);
- rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
- rdev->pm.igp_ht_link_clk.full = rfixed_const(200);
- rdev->pm.igp_ht_link_width.full = rfixed_const(8);
+ rdev->pm.igp_system_mclk.full = dfixed_const(100);
+ rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
+ rdev->pm.igp_ht_link_clk.full = dfixed_const(200);
+ rdev->pm.igp_ht_link_width.full = dfixed_const(8);
DRM_ERROR("No integrated system info for your GPU, using safe default\n");
}
/* Compute various bandwidth */
/* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */
- tmp.full = rfixed_const(4);
- rdev->pm.k8_bandwidth.full = rfixed_mul(rdev->pm.igp_system_mclk, tmp);
+ tmp.full = dfixed_const(4);
+ rdev->pm.k8_bandwidth.full = dfixed_mul(rdev->pm.igp_system_mclk, tmp);
/* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8
* = ht_clk * ht_width / 5
*/
- tmp.full = rfixed_const(5);
- rdev->pm.ht_bandwidth.full = rfixed_mul(rdev->pm.igp_ht_link_clk,
+ tmp.full = dfixed_const(5);
+ rdev->pm.ht_bandwidth.full = dfixed_mul(rdev->pm.igp_ht_link_clk,
rdev->pm.igp_ht_link_width);
- rdev->pm.ht_bandwidth.full = rfixed_div(rdev->pm.ht_bandwidth, tmp);
+ rdev->pm.ht_bandwidth.full = dfixed_div(rdev->pm.ht_bandwidth, tmp);
if (tmp.full < rdev->pm.max_bandwidth.full) {
/* HT link is a limiting factor */
rdev->pm.max_bandwidth.full = tmp.full;
@@ -138,10 +136,10 @@ void rs690_pm_info(struct radeon_device *rdev)
/* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7
* = (sideport_clk * 14) / 10
*/
- tmp.full = rfixed_const(14);
- rdev->pm.sideport_bandwidth.full = rfixed_mul(rdev->pm.igp_sideport_mclk, tmp);
- tmp.full = rfixed_const(10);
- rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp);
+ tmp.full = dfixed_const(14);
+ rdev->pm.sideport_bandwidth.full = dfixed_mul(rdev->pm.igp_sideport_mclk, tmp);
+ tmp.full = dfixed_const(10);
+ rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp);
}
void rs690_mc_init(struct radeon_device *rdev)
@@ -241,20 +239,20 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
return;
}
- if (crtc->vsc.full > rfixed_const(2))
- wm->num_line_pair.full = rfixed_const(2);
+ if (crtc->vsc.full > dfixed_const(2))
+ wm->num_line_pair.full = dfixed_const(2);
else
- wm->num_line_pair.full = rfixed_const(1);
-
- b.full = rfixed_const(mode->crtc_hdisplay);
- c.full = rfixed_const(256);
- a.full = rfixed_div(b, c);
- request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
- request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
- if (a.full < rfixed_const(4)) {
+ wm->num_line_pair.full = dfixed_const(1);
+
+ b.full = dfixed_const(mode->crtc_hdisplay);
+ c.full = dfixed_const(256);
+ a.full = dfixed_div(b, c);
+ request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
+ request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
+ if (a.full < dfixed_const(4)) {
wm->lb_request_fifo_depth = 4;
} else {
- wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth);
+ wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
}
/* Determine consumption rate
@@ -263,23 +261,23 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
* vsc = vertical scaling ratio, defined as source/destination
* hsc = horizontal scaling ration, defined as source/destination
*/
- a.full = rfixed_const(mode->clock);
- b.full = rfixed_const(1000);
- a.full = rfixed_div(a, b);
- pclk.full = rfixed_div(b, a);
+ a.full = dfixed_const(mode->clock);
+ b.full = dfixed_const(1000);
+ a.full = dfixed_div(a, b);
+ pclk.full = dfixed_div(b, a);
if (crtc->rmx_type != RMX_OFF) {
- b.full = rfixed_const(2);
+ b.full = dfixed_const(2);
if (crtc->vsc.full > b.full)
b.full = crtc->vsc.full;
- b.full = rfixed_mul(b, crtc->hsc);
- c.full = rfixed_const(2);
- b.full = rfixed_div(b, c);
- consumption_time.full = rfixed_div(pclk, b);
+ b.full = dfixed_mul(b, crtc->hsc);
+ c.full = dfixed_const(2);
+ b.full = dfixed_div(b, c);
+ consumption_time.full = dfixed_div(pclk, b);
} else {
consumption_time.full = pclk.full;
}
- a.full = rfixed_const(1);
- wm->consumption_rate.full = rfixed_div(a, consumption_time);
+ a.full = dfixed_const(1);
+ wm->consumption_rate.full = dfixed_div(a, consumption_time);
/* Determine line time
@@ -287,18 +285,18 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
* LineTime = total number of horizontal pixels
* pclk = pixel clock period(ns)
*/
- a.full = rfixed_const(crtc->base.mode.crtc_htotal);
- line_time.full = rfixed_mul(a, pclk);
+ a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+ line_time.full = dfixed_mul(a, pclk);
/* Determine active time
* ActiveTime = time of active region of display within one line,
* hactive = total number of horizontal active pixels
* htotal = total number of horizontal pixels
*/
- a.full = rfixed_const(crtc->base.mode.crtc_htotal);
- b.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
- wm->active_time.full = rfixed_mul(line_time, b);
- wm->active_time.full = rfixed_div(wm->active_time, a);
+ a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+ b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+ wm->active_time.full = dfixed_mul(line_time, b);
+ wm->active_time.full = dfixed_div(wm->active_time, a);
/* Maximun bandwidth is the minimun bandwidth of all component */
rdev->pm.max_bandwidth = rdev->pm.core_bandwidth;
@@ -306,8 +304,8 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
rdev->pm.sideport_bandwidth.full)
rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
- read_delay_latency.full = rfixed_const(370 * 800 * 1000);
- read_delay_latency.full = rfixed_div(read_delay_latency,
+ read_delay_latency.full = dfixed_const(370 * 800 * 1000);
+ read_delay_latency.full = dfixed_div(read_delay_latency,
rdev->pm.igp_sideport_mclk);
} else {
if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
@@ -316,23 +314,23 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full &&
rdev->pm.ht_bandwidth.full)
rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth;
- read_delay_latency.full = rfixed_const(5000);
+ read_delay_latency.full = dfixed_const(5000);
}
/* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */
- a.full = rfixed_const(16);
- rdev->pm.sclk.full = rfixed_mul(rdev->pm.max_bandwidth, a);
- a.full = rfixed_const(1000);
- rdev->pm.sclk.full = rfixed_div(a, rdev->pm.sclk);
+ a.full = dfixed_const(16);
+ rdev->pm.sclk.full = dfixed_mul(rdev->pm.max_bandwidth, a);
+ a.full = dfixed_const(1000);
+ rdev->pm.sclk.full = dfixed_div(a, rdev->pm.sclk);
/* Determine chunk time
* ChunkTime = the time it takes the DCP to send one chunk of data
* to the LB which consists of pipeline delay and inter chunk gap
* sclk = system clock(ns)
*/
- a.full = rfixed_const(256 * 13);
- chunk_time.full = rfixed_mul(rdev->pm.sclk, a);
- a.full = rfixed_const(10);
- chunk_time.full = rfixed_div(chunk_time, a);
+ a.full = dfixed_const(256 * 13);
+ chunk_time.full = dfixed_mul(rdev->pm.sclk, a);
+ a.full = dfixed_const(10);
+ chunk_time.full = dfixed_div(chunk_time, a);
/* Determine the worst case latency
* NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
@@ -342,13 +340,13 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
* ChunkTime = time it takes the DCP to send one chunk of data to the LB
* which consists of pipeline delay and inter chunk gap
*/
- if (rfixed_trunc(wm->num_line_pair) > 1) {
- a.full = rfixed_const(3);
- wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
+ if (dfixed_trunc(wm->num_line_pair) > 1) {
+ a.full = dfixed_const(3);
+ wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
wm->worst_case_latency.full += read_delay_latency.full;
} else {
- a.full = rfixed_const(2);
- wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
+ a.full = dfixed_const(2);
+ wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
wm->worst_case_latency.full += read_delay_latency.full;
}
@@ -362,34 +360,34 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
* of data to the LB which consists of
* pipeline delay and inter chunk gap
*/
- if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) {
+ if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
tolerable_latency.full = line_time.full;
} else {
- tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2);
+ tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
- tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time);
+ tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
tolerable_latency.full = line_time.full - tolerable_latency.full;
}
/* We assume worst case 32bits (4 bytes) */
- wm->dbpp.full = rfixed_const(4 * 8);
+ wm->dbpp.full = dfixed_const(4 * 8);
/* Determine the maximum priority mark
* width = viewport width in pixels
*/
- a.full = rfixed_const(16);
- wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
- wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
- wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
+ a.full = dfixed_const(16);
+ wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+ wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
+ wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
/* Determine estimated width */
estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
- estimated_width.full = rfixed_div(estimated_width, consumption_time);
- if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
- wm->priority_mark.full = rfixed_const(10);
+ estimated_width.full = dfixed_div(estimated_width, consumption_time);
+ if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
+ wm->priority_mark.full = dfixed_const(10);
} else {
- a.full = rfixed_const(16);
- wm->priority_mark.full = rfixed_div(estimated_width, a);
- wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
+ a.full = dfixed_const(16);
+ wm->priority_mark.full = dfixed_div(estimated_width, a);
+ wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
}
}
@@ -441,58 +439,58 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
if (mode0 && mode1) {
- if (rfixed_trunc(wm0.dbpp) > 64)
- a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
+ if (dfixed_trunc(wm0.dbpp) > 64)
+ a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
else
a.full = wm0.num_line_pair.full;
- if (rfixed_trunc(wm1.dbpp) > 64)
- b.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
+ if (dfixed_trunc(wm1.dbpp) > 64)
+ b.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
else
b.full = wm1.num_line_pair.full;
a.full += b.full;
- fill_rate.full = rfixed_div(wm0.sclk, a);
+ fill_rate.full = dfixed_div(wm0.sclk, a);
if (wm0.consumption_rate.full > fill_rate.full) {
b.full = wm0.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm0.active_time);
- a.full = rfixed_mul(wm0.worst_case_latency,
+ b.full = dfixed_mul(b, wm0.active_time);
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
a.full = a.full + b.full;
- b.full = rfixed_const(16 * 1000);
- priority_mark02.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark02.full = dfixed_div(a, b);
} else {
- a.full = rfixed_mul(wm0.worst_case_latency,
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
- b.full = rfixed_const(16 * 1000);
- priority_mark02.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark02.full = dfixed_div(a, b);
}
if (wm1.consumption_rate.full > fill_rate.full) {
b.full = wm1.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm1.active_time);
- a.full = rfixed_mul(wm1.worst_case_latency,
+ b.full = dfixed_mul(b, wm1.active_time);
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
a.full = a.full + b.full;
- b.full = rfixed_const(16 * 1000);
- priority_mark12.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
} else {
- a.full = rfixed_mul(wm1.worst_case_latency,
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
- b.full = rfixed_const(16 * 1000);
- priority_mark12.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
}
if (wm0.priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark.full;
- if (rfixed_trunc(priority_mark02) < 0)
+ if (dfixed_trunc(priority_mark02) < 0)
priority_mark02.full = 0;
if (wm0.priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark_max.full;
if (wm1.priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark.full;
- if (rfixed_trunc(priority_mark12) < 0)
+ if (dfixed_trunc(priority_mark12) < 0)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
- d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
- d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (rdev->disp_priority == 2) {
d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
@@ -502,32 +500,32 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
} else if (mode0) {
- if (rfixed_trunc(wm0.dbpp) > 64)
- a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
+ if (dfixed_trunc(wm0.dbpp) > 64)
+ a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
else
a.full = wm0.num_line_pair.full;
- fill_rate.full = rfixed_div(wm0.sclk, a);
+ fill_rate.full = dfixed_div(wm0.sclk, a);
if (wm0.consumption_rate.full > fill_rate.full) {
b.full = wm0.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm0.active_time);
- a.full = rfixed_mul(wm0.worst_case_latency,
+ b.full = dfixed_mul(b, wm0.active_time);
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
a.full = a.full + b.full;
- b.full = rfixed_const(16 * 1000);
- priority_mark02.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark02.full = dfixed_div(a, b);
} else {
- a.full = rfixed_mul(wm0.worst_case_latency,
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
- b.full = rfixed_const(16 * 1000);
- priority_mark02.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark02.full = dfixed_div(a, b);
}
if (wm0.priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark.full;
- if (rfixed_trunc(priority_mark02) < 0)
+ if (dfixed_trunc(priority_mark02) < 0)
priority_mark02.full = 0;
if (wm0.priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark_max.full;
- d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
+ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
if (rdev->disp_priority == 2)
d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
@@ -537,32 +535,32 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT,
S_006D4C_D2MODE_PRIORITY_B_OFF(1));
} else {
- if (rfixed_trunc(wm1.dbpp) > 64)
- a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
+ if (dfixed_trunc(wm1.dbpp) > 64)
+ a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
else
a.full = wm1.num_line_pair.full;
- fill_rate.full = rfixed_div(wm1.sclk, a);
+ fill_rate.full = dfixed_div(wm1.sclk, a);
if (wm1.consumption_rate.full > fill_rate.full) {
b.full = wm1.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm1.active_time);
- a.full = rfixed_mul(wm1.worst_case_latency,
+ b.full = dfixed_mul(b, wm1.active_time);
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
a.full = a.full + b.full;
- b.full = rfixed_const(16 * 1000);
- priority_mark12.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
} else {
- a.full = rfixed_mul(wm1.worst_case_latency,
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
- b.full = rfixed_const(16 * 1000);
- priority_mark12.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
}
if (wm1.priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark.full;
- if (rfixed_trunc(priority_mark12) < 0)
+ if (dfixed_trunc(priority_mark12) < 0)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
- d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (rdev->disp_priority == 2)
d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
WREG32(R_006548_D1MODE_PRIORITY_A_CNT,
@@ -653,7 +651,7 @@ int rs690_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
rv515_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -678,7 +676,6 @@ int rs690_suspend(struct radeon_device *rdev)
void rs690_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -717,7 +714,7 @@ int rs690_init(struct radeon_device *rdev)
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -729,8 +726,6 @@ int rs690_init(struct radeon_device *rdev)
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
/* initialize memory controller */
rs690_mc_init(rdev);
rv515_debugfs(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 9035121f4b5..7d9a7b0a180 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -147,16 +147,11 @@ void rv515_gpu_init(struct radeon_device *rdev)
{
unsigned pipe_select_current, gb_pipe_select, tmp;
- r100_hdp_reset(rdev);
- r100_rb2d_reset(rdev);
-
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
"reseting GPU. Bad things might happen.\n");
}
-
rv515_vga_render_disable(rdev);
-
r420_pipes_init(rdev);
gb_pipe_select = RREG32(0x402C);
tmp = RREG32(0x170C);
@@ -174,91 +169,6 @@ void rv515_gpu_init(struct radeon_device *rdev)
}
}
-int rv515_ga_reset(struct radeon_device *rdev)
-{
- uint32_t tmp;
- bool reinit_cp;
- int i;
-
- reinit_cp = rdev->cp.ready;
- rdev->cp.ready = false;
- for (i = 0; i < rdev->usec_timeout; i++) {
- WREG32(CP_CSQ_MODE, 0);
- WREG32(CP_CSQ_CNTL, 0);
- WREG32(RBBM_SOFT_RESET, 0x32005);
- (void)RREG32(RBBM_SOFT_RESET);
- udelay(200);
- WREG32(RBBM_SOFT_RESET, 0);
- /* Wait to prevent race in RBBM_STATUS */
- mdelay(1);
- tmp = RREG32(RBBM_STATUS);
- if (tmp & ((1 << 20) | (1 << 26))) {
- DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp);
- /* GA still busy soft reset it */
- WREG32(0x429C, 0x200);
- WREG32(VAP_PVS_STATE_FLUSH_REG, 0);
- WREG32(0x43E0, 0);
- WREG32(0x43E4, 0);
- WREG32(0x24AC, 0);
- }
- /* Wait to prevent race in RBBM_STATUS */
- mdelay(1);
- tmp = RREG32(RBBM_STATUS);
- if (!(tmp & ((1 << 20) | (1 << 26)))) {
- break;
- }
- }
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32(RBBM_STATUS);
- if (!(tmp & ((1 << 20) | (1 << 26)))) {
- DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
- tmp);
- DRM_INFO("GA_IDLE=0x%08X\n", RREG32(0x425C));
- DRM_INFO("RB3D_RESET_STATUS=0x%08X\n", RREG32(0x46f0));
- DRM_INFO("ISYNC_CNTL=0x%08X\n", RREG32(0x1724));
- if (reinit_cp) {
- return r100_cp_init(rdev, rdev->cp.ring_size);
- }
- return 0;
- }
- DRM_UDELAY(1);
- }
- tmp = RREG32(RBBM_STATUS);
- DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
- return -1;
-}
-
-int rv515_gpu_reset(struct radeon_device *rdev)
-{
- uint32_t status;
-
- /* reset order likely matter */
- status = RREG32(RBBM_STATUS);
- /* reset HDP */
- r100_hdp_reset(rdev);
- /* reset rb2d */
- if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
- r100_rb2d_reset(rdev);
- }
- /* reset GA */
- if (status & ((1 << 20) | (1 << 26))) {
- rv515_ga_reset(rdev);
- }
- /* reset CP */
- status = RREG32(RBBM_STATUS);
- if (status & (1 << 16)) {
- r100_cp_reset(rdev);
- }
- /* Check if GPU is idle */
- status = RREG32(RBBM_STATUS);
- if (status & (1 << 31)) {
- DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
- return -1;
- }
- DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
- return 0;
-}
-
static void rv515_vram_get_type(struct radeon_device *rdev)
{
uint32_t tmp;
@@ -335,7 +245,7 @@ static int rv515_debugfs_ga_info(struct seq_file *m, void *data)
tmp = RREG32(0x2140);
seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp);
- radeon_gpu_reset(rdev);
+ radeon_asic_reset(rdev);
tmp = RREG32(0x425C);
seq_printf(m, "GA_IDLE 0x%08x\n", tmp);
return 0;
@@ -503,7 +413,7 @@ int rv515_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
rv515_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -535,7 +445,6 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
void rv515_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -573,7 +482,7 @@ int rv515_init(struct radeon_device *rdev)
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -584,8 +493,6 @@ int rv515_init(struct radeon_device *rdev)
return -EINVAL;
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
@@ -885,20 +792,20 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
return;
}
- if (crtc->vsc.full > rfixed_const(2))
- wm->num_line_pair.full = rfixed_const(2);
+ if (crtc->vsc.full > dfixed_const(2))
+ wm->num_line_pair.full = dfixed_const(2);
else
- wm->num_line_pair.full = rfixed_const(1);
-
- b.full = rfixed_const(mode->crtc_hdisplay);
- c.full = rfixed_const(256);
- a.full = rfixed_div(b, c);
- request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
- request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
- if (a.full < rfixed_const(4)) {
+ wm->num_line_pair.full = dfixed_const(1);
+
+ b.full = dfixed_const(mode->crtc_hdisplay);
+ c.full = dfixed_const(256);
+ a.full = dfixed_div(b, c);
+ request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
+ request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
+ if (a.full < dfixed_const(4)) {
wm->lb_request_fifo_depth = 4;
} else {
- wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth);
+ wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
}
/* Determine consumption rate
@@ -907,23 +814,23 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
* vsc = vertical scaling ratio, defined as source/destination
* hsc = horizontal scaling ration, defined as source/destination
*/
- a.full = rfixed_const(mode->clock);
- b.full = rfixed_const(1000);
- a.full = rfixed_div(a, b);
- pclk.full = rfixed_div(b, a);
+ a.full = dfixed_const(mode->clock);
+ b.full = dfixed_const(1000);
+ a.full = dfixed_div(a, b);
+ pclk.full = dfixed_div(b, a);
if (crtc->rmx_type != RMX_OFF) {
- b.full = rfixed_const(2);
+ b.full = dfixed_const(2);
if (crtc->vsc.full > b.full)
b.full = crtc->vsc.full;
- b.full = rfixed_mul(b, crtc->hsc);
- c.full = rfixed_const(2);
- b.full = rfixed_div(b, c);
- consumption_time.full = rfixed_div(pclk, b);
+ b.full = dfixed_mul(b, crtc->hsc);
+ c.full = dfixed_const(2);
+ b.full = dfixed_div(b, c);
+ consumption_time.full = dfixed_div(pclk, b);
} else {
consumption_time.full = pclk.full;
}
- a.full = rfixed_const(1);
- wm->consumption_rate.full = rfixed_div(a, consumption_time);
+ a.full = dfixed_const(1);
+ wm->consumption_rate.full = dfixed_div(a, consumption_time);
/* Determine line time
@@ -931,27 +838,27 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
* LineTime = total number of horizontal pixels
* pclk = pixel clock period(ns)
*/
- a.full = rfixed_const(crtc->base.mode.crtc_htotal);
- line_time.full = rfixed_mul(a, pclk);
+ a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+ line_time.full = dfixed_mul(a, pclk);
/* Determine active time
* ActiveTime = time of active region of display within one line,
* hactive = total number of horizontal active pixels
* htotal = total number of horizontal pixels
*/
- a.full = rfixed_const(crtc->base.mode.crtc_htotal);
- b.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
- wm->active_time.full = rfixed_mul(line_time, b);
- wm->active_time.full = rfixed_div(wm->active_time, a);
+ a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+ b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+ wm->active_time.full = dfixed_mul(line_time, b);
+ wm->active_time.full = dfixed_div(wm->active_time, a);
/* Determine chunk time
* ChunkTime = the time it takes the DCP to send one chunk of data
* to the LB which consists of pipeline delay and inter chunk gap
* sclk = system clock(Mhz)
*/
- a.full = rfixed_const(600 * 1000);
- chunk_time.full = rfixed_div(a, rdev->pm.sclk);
- read_delay_latency.full = rfixed_const(1000);
+ a.full = dfixed_const(600 * 1000);
+ chunk_time.full = dfixed_div(a, rdev->pm.sclk);
+ read_delay_latency.full = dfixed_const(1000);
/* Determine the worst case latency
* NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
@@ -961,9 +868,9 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
* ChunkTime = time it takes the DCP to send one chunk of data to the LB
* which consists of pipeline delay and inter chunk gap
*/
- if (rfixed_trunc(wm->num_line_pair) > 1) {
- a.full = rfixed_const(3);
- wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
+ if (dfixed_trunc(wm->num_line_pair) > 1) {
+ a.full = dfixed_const(3);
+ wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
wm->worst_case_latency.full += read_delay_latency.full;
} else {
wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full;
@@ -979,34 +886,34 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
* of data to the LB which consists of
* pipeline delay and inter chunk gap
*/
- if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) {
+ if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
tolerable_latency.full = line_time.full;
} else {
- tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2);
+ tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
- tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time);
+ tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
tolerable_latency.full = line_time.full - tolerable_latency.full;
}
/* We assume worst case 32bits (4 bytes) */
- wm->dbpp.full = rfixed_const(2 * 16);
+ wm->dbpp.full = dfixed_const(2 * 16);
/* Determine the maximum priority mark
* width = viewport width in pixels
*/
- a.full = rfixed_const(16);
- wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
- wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
- wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
+ a.full = dfixed_const(16);
+ wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+ wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
+ wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
/* Determine estimated width */
estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
- estimated_width.full = rfixed_div(estimated_width, consumption_time);
- if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
+ estimated_width.full = dfixed_div(estimated_width, consumption_time);
+ if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
wm->priority_mark.full = wm->priority_mark_max.full;
} else {
- a.full = rfixed_const(16);
- wm->priority_mark.full = rfixed_div(estimated_width, a);
- wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
+ a.full = dfixed_const(16);
+ wm->priority_mark.full = dfixed_div(estimated_width, a);
+ wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
}
}
@@ -1035,58 +942,58 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
if (mode0 && mode1) {
- if (rfixed_trunc(wm0.dbpp) > 64)
- a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
+ if (dfixed_trunc(wm0.dbpp) > 64)
+ a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
else
a.full = wm0.num_line_pair.full;
- if (rfixed_trunc(wm1.dbpp) > 64)
- b.full = rfixed_div(wm1.dbpp, wm1.num_line_pair);
+ if (dfixed_trunc(wm1.dbpp) > 64)
+ b.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
else
b.full = wm1.num_line_pair.full;
a.full += b.full;
- fill_rate.full = rfixed_div(wm0.sclk, a);
+ fill_rate.full = dfixed_div(wm0.sclk, a);
if (wm0.consumption_rate.full > fill_rate.full) {
b.full = wm0.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm0.active_time);
- a.full = rfixed_const(16);
- b.full = rfixed_div(b, a);
- a.full = rfixed_mul(wm0.worst_case_latency,
+ b.full = dfixed_mul(b, wm0.active_time);
+ a.full = dfixed_const(16);
+ b.full = dfixed_div(b, a);
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
priority_mark02.full = a.full + b.full;
} else {
- a.full = rfixed_mul(wm0.worst_case_latency,
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
- b.full = rfixed_const(16 * 1000);
- priority_mark02.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark02.full = dfixed_div(a, b);
}
if (wm1.consumption_rate.full > fill_rate.full) {
b.full = wm1.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm1.active_time);
- a.full = rfixed_const(16);
- b.full = rfixed_div(b, a);
- a.full = rfixed_mul(wm1.worst_case_latency,
+ b.full = dfixed_mul(b, wm1.active_time);
+ a.full = dfixed_const(16);
+ b.full = dfixed_div(b, a);
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
priority_mark12.full = a.full + b.full;
} else {
- a.full = rfixed_mul(wm1.worst_case_latency,
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
- b.full = rfixed_const(16 * 1000);
- priority_mark12.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
}
if (wm0.priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark.full;
- if (rfixed_trunc(priority_mark02) < 0)
+ if (dfixed_trunc(priority_mark02) < 0)
priority_mark02.full = 0;
if (wm0.priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark_max.full;
if (wm1.priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark.full;
- if (rfixed_trunc(priority_mark12) < 0)
+ if (dfixed_trunc(priority_mark12) < 0)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
- d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
- d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (rdev->disp_priority == 2) {
d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
@@ -1096,32 +1003,32 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
} else if (mode0) {
- if (rfixed_trunc(wm0.dbpp) > 64)
- a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
+ if (dfixed_trunc(wm0.dbpp) > 64)
+ a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
else
a.full = wm0.num_line_pair.full;
- fill_rate.full = rfixed_div(wm0.sclk, a);
+ fill_rate.full = dfixed_div(wm0.sclk, a);
if (wm0.consumption_rate.full > fill_rate.full) {
b.full = wm0.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm0.active_time);
- a.full = rfixed_const(16);
- b.full = rfixed_div(b, a);
- a.full = rfixed_mul(wm0.worst_case_latency,
+ b.full = dfixed_mul(b, wm0.active_time);
+ a.full = dfixed_const(16);
+ b.full = dfixed_div(b, a);
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
priority_mark02.full = a.full + b.full;
} else {
- a.full = rfixed_mul(wm0.worst_case_latency,
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
- b.full = rfixed_const(16);
- priority_mark02.full = rfixed_div(a, b);
+ b.full = dfixed_const(16);
+ priority_mark02.full = dfixed_div(a, b);
}
if (wm0.priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark.full;
- if (rfixed_trunc(priority_mark02) < 0)
+ if (dfixed_trunc(priority_mark02) < 0)
priority_mark02.full = 0;
if (wm0.priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark_max.full;
- d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
+ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
if (rdev->disp_priority == 2)
d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
@@ -1129,32 +1036,32 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
} else {
- if (rfixed_trunc(wm1.dbpp) > 64)
- a.full = rfixed_div(wm1.dbpp, wm1.num_line_pair);
+ if (dfixed_trunc(wm1.dbpp) > 64)
+ a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
else
a.full = wm1.num_line_pair.full;
- fill_rate.full = rfixed_div(wm1.sclk, a);
+ fill_rate.full = dfixed_div(wm1.sclk, a);
if (wm1.consumption_rate.full > fill_rate.full) {
b.full = wm1.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm1.active_time);
- a.full = rfixed_const(16);
- b.full = rfixed_div(b, a);
- a.full = rfixed_mul(wm1.worst_case_latency,
+ b.full = dfixed_mul(b, wm1.active_time);
+ a.full = dfixed_const(16);
+ b.full = dfixed_div(b, a);
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
priority_mark12.full = a.full + b.full;
} else {
- a.full = rfixed_mul(wm1.worst_case_latency,
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
- b.full = rfixed_const(16 * 1000);
- priority_mark12.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
}
if (wm1.priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark.full;
- if (rfixed_trunc(priority_mark12) < 0)
+ if (dfixed_trunc(priority_mark12) < 0)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
- d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (rdev->disp_priority == 2)
d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
diff --git a/drivers/gpu/drm/radeon/rv515d.h b/drivers/gpu/drm/radeon/rv515d.h
index fc216e49384..590309a710b 100644
--- a/drivers/gpu/drm/radeon/rv515d.h
+++ b/drivers/gpu/drm/radeon/rv515d.h
@@ -217,6 +217,52 @@
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
/* Registers */
+#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
+#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
+#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
+#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
+#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
+#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
+#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
+#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
+#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
+#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
+#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
+#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
+#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
+#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
+#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
+#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
+#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
+#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
+#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
+#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
+#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
+#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
+#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
+#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
+#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
+#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
+#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
+#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
+#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
+#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
+#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
+#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
+#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
+#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
+#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
+#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
+#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
+#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
+#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
+#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
+#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
+#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
+#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
+#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
+#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
+#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
#define R_0000F8_CONFIG_MEMSIZE 0x0000F8
#define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0)
#define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 97958a64df1..253f24aec03 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -42,6 +42,10 @@
static void rv770_gpu_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev);
+void rv770_pm_misc(struct radeon_device *rdev)
+{
+
+}
/*
* GART
@@ -237,7 +241,6 @@ void r700_cp_stop(struct radeon_device *rdev)
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
}
-
static int rv770_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
@@ -272,6 +275,11 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
return 0;
}
+void r700_cp_fini(struct radeon_device *rdev)
+{
+ r700_cp_stop(rdev);
+ radeon_ring_fini(rdev);
+}
/*
* Core functions
@@ -906,23 +914,12 @@ int rv770_mc_init(struct radeon_device *rdev)
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
- /* FIXME remove this once we support unmappable VRAM */
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
- rdev->mc.mc_vram_size = rdev->mc.aper_size;
- rdev->mc.real_vram_size = rdev->mc.aper_size;
- }
r600_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
return 0;
}
-int rv770_gpu_reset(struct radeon_device *rdev)
-{
- /* FIXME: implement any rv770 specific bits */
- return r600_gpu_reset(rdev);
-}
-
static int rv770_startup(struct radeon_device *rdev)
{
int r;
@@ -1094,8 +1091,6 @@ int rv770_init(struct radeon_device *rdev)
r = radeon_clocks_init(rdev);
if (r)
return r;
- /* Initialize power management */
- radeon_pm_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
@@ -1132,7 +1127,7 @@ int rv770_init(struct radeon_device *rdev)
r = rv770_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
- r600_cp_fini(rdev);
+ r700_cp_fini(rdev);
r600_wb_fini(rdev);
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
@@ -1164,9 +1159,8 @@ int rv770_init(struct radeon_device *rdev)
void rv770_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r600_blit_fini(rdev);
- r600_cp_fini(rdev);
+ r700_cp_fini(rdev);
r600_wb_fini(rdev);
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index bff6fc2524c..2d0c9ca484c 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -539,11 +539,10 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset)
{
drm_savage_private_t *dev_priv;
- dev_priv = kmalloc(sizeof(drm_savage_private_t), GFP_KERNEL);
+ dev_priv = kzalloc(sizeof(drm_savage_private_t), GFP_KERNEL);
if (dev_priv == NULL)
return -ENOMEM;
- memset(dev_priv, 0, sizeof(drm_savage_private_t));
dev->dev_private = (void *)dev_priv;
dev_priv->chipset = (enum savage_family)chipset;
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index 1e138f5bae0..4256e200647 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,6 +4,6 @@
ccflags-y := -Iinclude/drm
ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \
- ttm_object.o ttm_lock.o ttm_execbuf_util.o
+ ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 0e3754a3a30..555ebb12ace 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -79,8 +79,6 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
- printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
- printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
man->available_caching);
@@ -357,7 +355,8 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem,
- bool evict, bool interruptible, bool no_wait)
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
@@ -402,12 +401,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
- ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
+ ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
else if (bdev->driver->move)
ret = bdev->driver->move(bo, evict, interruptible,
- no_wait, mem);
+ no_wait_reserve, no_wait_gpu, mem);
else
- ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
if (ret)
goto out_err;
@@ -605,8 +604,22 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
}
EXPORT_SYMBOL(ttm_bo_unref);
+int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
+{
+ return cancel_delayed_work_sync(&bdev->wq);
+}
+EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
+
+void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
+{
+ if (resched)
+ schedule_delayed_work(&bdev->wq,
+ ((HZ / 100) < 1) ? 1 : HZ / 100);
+}
+EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
+
static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
- bool no_wait)
+ bool no_wait_reserve, bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
@@ -615,7 +628,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
int ret = 0;
spin_lock(&bo->lock);
- ret = ttm_bo_wait(bo, false, interruptible, no_wait);
+ ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
spin_unlock(&bo->lock);
if (unlikely(ret != 0)) {
@@ -631,6 +644,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
+ evict_mem.bus.io_reserved = false;
placement.fpfn = 0;
placement.lpfn = 0;
@@ -638,7 +652,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
placement.num_busy_placement = 0;
bdev->driver->evict_flags(bo, &placement);
ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
- no_wait);
+ no_wait_reserve, no_wait_gpu);
if (ret) {
if (ret != -ERESTARTSYS) {
printk(KERN_ERR TTM_PFX
@@ -650,7 +664,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
}
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
- no_wait);
+ no_wait_reserve, no_wait_gpu);
if (ret) {
if (ret != -ERESTARTSYS)
printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
@@ -670,7 +684,8 @@ out:
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
uint32_t mem_type,
- bool interruptible, bool no_wait)
+ bool interruptible, bool no_wait_reserve,
+ bool no_wait_gpu)
{
struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
@@ -687,11 +702,11 @@ retry:
bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
kref_get(&bo->list_kref);
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
if (unlikely(ret == -EBUSY)) {
spin_unlock(&glob->lru_lock);
- if (likely(!no_wait))
+ if (likely(!no_wait_gpu))
ret = ttm_bo_wait_unreserved(bo, interruptible);
kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -713,7 +728,7 @@ retry:
while (put_count--)
kref_put(&bo->list_kref, ttm_bo_ref_bug);
- ret = ttm_bo_evict(bo, interruptible, no_wait);
+ ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
ttm_bo_unreserve(bo);
kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -764,7 +779,9 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
uint32_t mem_type,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
- bool interruptible, bool no_wait)
+ bool interruptible,
+ bool no_wait_reserve,
+ bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bdev->glob;
@@ -785,7 +802,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
}
spin_unlock(&glob->lru_lock);
ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
- no_wait);
+ no_wait_reserve, no_wait_gpu);
if (unlikely(ret != 0))
return ret;
} while (1);
@@ -855,7 +872,8 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
- bool interruptible, bool no_wait)
+ bool interruptible, bool no_wait_reserve,
+ bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
@@ -952,7 +970,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
}
ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
- interruptible, no_wait);
+ interruptible, no_wait_reserve, no_wait_gpu);
if (ret == 0 && mem->mm_node) {
mem->placement = cur_flags;
mem->mm_node->private = bo;
@@ -978,7 +996,8 @@ EXPORT_SYMBOL(ttm_bo_wait_cpu);
int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- bool interruptible, bool no_wait)
+ bool interruptible, bool no_wait_reserve,
+ bool no_wait_gpu)
{
struct ttm_bo_global *glob = bo->glob;
int ret = 0;
@@ -992,20 +1011,21 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
* instead of doing it here.
*/
spin_lock(&bo->lock);
- ret = ttm_bo_wait(bo, false, interruptible, no_wait);
+ ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
spin_unlock(&bo->lock);
if (ret)
return ret;
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
mem.page_alignment = bo->mem.page_alignment;
+ mem.bus.io_reserved = false;
/*
* Determine where to move the buffer.
*/
- ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
+ ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
if (ret)
goto out_unlock;
- ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
+ ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
out_unlock:
if (ret && mem.mm_node) {
spin_lock(&glob->lru_lock);
@@ -1039,7 +1059,8 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- bool interruptible, bool no_wait)
+ bool interruptible, bool no_wait_reserve,
+ bool no_wait_gpu)
{
int ret;
@@ -1054,7 +1075,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
*/
ret = ttm_bo_mem_compat(placement, &bo->mem);
if (ret < 0) {
- ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
+ ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
if (ret)
return ret;
} else {
@@ -1153,6 +1174,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->mem.num_pages = bo->num_pages;
bo->mem.mm_node = NULL;
bo->mem.page_alignment = page_alignment;
+ bo->mem.bus.io_reserved = false;
bo->buffer_start = buffer_start & PAGE_MASK;
bo->priv_flags = 0;
bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
@@ -1175,7 +1197,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
goto out_err;
}
- ret = ttm_bo_validate(bo, placement, interruptible, false);
+ ret = ttm_bo_validate(bo, placement, interruptible, false, false);
if (ret)
goto out_err;
@@ -1249,7 +1271,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
while (!list_empty(&man->lru)) {
spin_unlock(&glob->lru_lock);
- ret = ttm_mem_evict_first(bdev, mem_type, false, false);
+ ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
if (ret) {
if (allow_errors) {
return ret;
@@ -1553,26 +1575,6 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
return true;
}
-int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem,
- unsigned long *bus_base,
- unsigned long *bus_offset, unsigned long *bus_size)
-{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-
- *bus_size = 0;
- if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
- return -EINVAL;
-
- if (ttm_mem_reg_is_pci(bdev, mem)) {
- *bus_offset = mem->mm_node->start << PAGE_SHIFT;
- *bus_size = mem->num_pages << PAGE_SHIFT;
- *bus_base = man->io_offset;
- }
-
- return 0;
-}
-
void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -1581,8 +1583,8 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
if (!bdev->dev_mapping)
return;
-
unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
+ ttm_mem_io_free(bdev, &bo->mem);
}
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
@@ -1811,7 +1813,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
evict_mem.mem_type = TTM_PL_SYSTEM;
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
- false, false);
+ false, false, false);
if (unlikely(ret != 0))
goto out;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index d764e82e799..13012a1f148 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -50,7 +50,8 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
}
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
+ bool evict, bool no_wait_reserve,
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg *old_mem = &bo->mem;
@@ -81,30 +82,51 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_move_ttm);
+int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ int ret;
+
+ if (!mem->bus.io_reserved) {
+ mem->bus.io_reserved = true;
+ ret = bdev->driver->io_mem_reserve(bdev, mem);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+ return 0;
+}
+
+void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ if (bdev->driver->io_mem_reserve) {
+ if (mem->bus.io_reserved) {
+ mem->bus.io_reserved = false;
+ bdev->driver->io_mem_free(bdev, mem);
+ }
+ }
+}
+
int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void **virtual)
{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- unsigned long bus_offset;
- unsigned long bus_size;
- unsigned long bus_base;
int ret;
void *addr;
*virtual = NULL;
- ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
- if (ret || bus_size == 0)
+ ret = ttm_mem_io_reserve(bdev, mem);
+ if (ret || !mem->bus.is_iomem)
return ret;
- if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
- addr = (void *)(((u8 *) man->io_addr) + bus_offset);
- else {
+ if (mem->bus.addr) {
+ addr = mem->bus.addr;
+ } else {
if (mem->placement & TTM_PL_FLAG_WC)
- addr = ioremap_wc(bus_base + bus_offset, bus_size);
+ addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
else
- addr = ioremap_nocache(bus_base + bus_offset, bus_size);
- if (!addr)
+ addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
+ if (!addr) {
+ ttm_mem_io_free(bdev, mem);
return -ENOMEM;
+ }
}
*virtual = addr;
return 0;
@@ -117,8 +139,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
man = &bdev->man[mem->mem_type];
- if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
+ if (virtual && mem->bus.addr == NULL)
iounmap(virtual);
+ ttm_mem_io_free(bdev, mem);
}
static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -208,7 +231,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
}
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
+ bool evict, bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
@@ -369,26 +393,23 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
EXPORT_SYMBOL(ttm_io_prot);
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
- unsigned long bus_base,
- unsigned long bus_offset,
- unsigned long bus_size,
+ unsigned long offset,
+ unsigned long size,
struct ttm_bo_kmap_obj *map)
{
- struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_reg *mem = &bo->mem;
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
+ if (bo->mem.bus.addr) {
map->bo_kmap_type = ttm_bo_map_premapped;
- map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
+ map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
} else {
map->bo_kmap_type = ttm_bo_map_iomap;
if (mem->placement & TTM_PL_FLAG_WC)
- map->virtual = ioremap_wc(bus_base + bus_offset,
- bus_size);
+ map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
+ size);
else
- map->virtual = ioremap_nocache(bus_base + bus_offset,
- bus_size);
+ map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
+ size);
}
return (!map->virtual) ? -ENOMEM : 0;
}
@@ -441,13 +462,12 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
unsigned long start_page, unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
+ unsigned long offset, size;
int ret;
- unsigned long bus_base;
- unsigned long bus_offset;
- unsigned long bus_size;
BUG_ON(!list_empty(&bo->swap));
map->virtual = NULL;
+ map->bo = bo;
if (num_pages > bo->num_pages)
return -EINVAL;
if (start_page > bo->num_pages)
@@ -456,16 +476,15 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
return -EPERM;
#endif
- ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
- &bus_offset, &bus_size);
+ ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
if (ret)
return ret;
- if (bus_size == 0) {
+ if (!bo->mem.bus.is_iomem) {
return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
} else {
- bus_offset += start_page << PAGE_SHIFT;
- bus_size = num_pages << PAGE_SHIFT;
- return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
+ offset = start_page << PAGE_SHIFT;
+ size = num_pages << PAGE_SHIFT;
+ return ttm_bo_ioremap(bo, offset, size, map);
}
}
EXPORT_SYMBOL(ttm_bo_kmap);
@@ -477,6 +496,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
switch (map->bo_kmap_type) {
case ttm_bo_map_iomap:
iounmap(map->virtual);
+ ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
break;
case ttm_bo_map_vmap:
vunmap(map->virtual);
@@ -494,39 +514,11 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
}
EXPORT_SYMBOL(ttm_bo_kunmap);
-int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
- unsigned long dst_offset,
- unsigned long *pfn, pgprot_t *prot)
-{
- struct ttm_mem_reg *mem = &bo->mem;
- struct ttm_bo_device *bdev = bo->bdev;
- unsigned long bus_offset;
- unsigned long bus_size;
- unsigned long bus_base;
- int ret;
- ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
- &bus_size);
- if (ret)
- return -EINVAL;
- if (bus_size != 0)
- *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
- else
- if (!bo->ttm)
- return -EINVAL;
- else
- *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
- dst_offset >>
- PAGE_SHIFT));
- *prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
- PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
-
- return 0;
-}
-
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
void *sync_obj,
void *sync_obj_arg,
- bool evict, bool no_wait,
+ bool evict, bool no_wait_reserve,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 668dbe8b8dd..fe6cb77899f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -74,9 +74,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
vma->vm_private_data;
struct ttm_bo_device *bdev = bo->bdev;
- unsigned long bus_base;
- unsigned long bus_offset;
- unsigned long bus_size;
unsigned long page_offset;
unsigned long page_last;
unsigned long pfn;
@@ -84,7 +81,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct page *page;
int ret;
int i;
- bool is_iomem;
unsigned long address = (unsigned long)vmf->virtual_address;
int retval = VM_FAULT_NOPAGE;
@@ -101,8 +97,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_NOPAGE;
}
- if (bdev->driver->fault_reserve_notify)
- bdev->driver->fault_reserve_notify(bo);
+ if (bdev->driver->fault_reserve_notify) {
+ ret = bdev->driver->fault_reserve_notify(bo);
+ switch (ret) {
+ case 0:
+ break;
+ case -EBUSY:
+ set_need_resched();
+ case -ERESTARTSYS:
+ retval = VM_FAULT_NOPAGE;
+ goto out_unlock;
+ default:
+ retval = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+ }
/*
* Wait for buffer data in transit, due to a pipelined
@@ -122,15 +131,12 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
spin_unlock(&bo->lock);
- ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
- &bus_size);
- if (unlikely(ret != 0)) {
+ ret = ttm_mem_io_reserve(bdev, &bo->mem);
+ if (ret) {
retval = VM_FAULT_SIGBUS;
goto out_unlock;
}
- is_iomem = (bus_size != 0);
-
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
bo->vm_node->start - vma->vm_pgoff;
page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
@@ -154,8 +160,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* vma->vm_page_prot when the object changes caching policy, with
* the correct locks held.
*/
-
- if (is_iomem) {
+ if (bo->mem.bus.is_iomem) {
vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
vma->vm_page_prot);
} else {
@@ -171,10 +176,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
*/
for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
-
- if (is_iomem)
- pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
- page_offset;
+ if (bo->mem.bus.is_iomem)
+ pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
else {
page = ttm_tt_get_page(ttm, page_offset);
if (unlikely(!page && i == 0)) {
@@ -198,7 +201,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
retval =
(ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
goto out_unlock;
-
}
address += PAGE_SIZE;
@@ -221,8 +223,7 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma)
static void ttm_bo_vm_close(struct vm_area_struct *vma)
{
- struct ttm_buffer_object *bo =
- (struct ttm_buffer_object *)vma->vm_private_data;
+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
ttm_bo_unref(&bo);
vma->vm_private_data = NULL;
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 801b702566e..e70ddd82dc0 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -27,6 +27,7 @@
#include "ttm/ttm_memory.h"
#include "ttm/ttm_module.h"
+#include "ttm/ttm_page_alloc.h"
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/wait.h>
@@ -393,6 +394,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
"Zone %7s: Available graphics memory: %llu kiB.\n",
zone->name, (unsigned long long) zone->max_mem >> 10);
}
+ ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
return 0;
out_no_zone:
ttm_mem_global_release(glob);
@@ -405,6 +407,9 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
unsigned int i;
struct ttm_mem_zone *zone;
+ /* let the page allocator first stop the shrink work. */
+ ttm_page_alloc_fini();
+
flush_workqueue(glob->swap_queue);
destroy_workqueue(glob->swap_queue);
glob->swap_queue = NULL;
@@ -412,7 +417,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
zone = glob->zones[i];
kobject_del(&zone->kobj);
kobject_put(&zone->kobj);
- }
+ }
kobject_del(&glob->kobj);
kobject_put(&glob->kobj);
}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
new file mode 100644
index 00000000000..0d9a42c2394
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -0,0 +1,845 @@
+/*
+ * Copyright (c) Red Hat Inc.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Jerome Glisse <jglisse@redhat.com>
+ * Pauli Nieminen <suokkos@gmail.com>
+ */
+
+/* simple list based uncached page pool
+ * - Pool collects resently freed pages for reuse
+ * - Use page->lru to keep a free list
+ * - doesn't track currently in use pages
+ */
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/highmem.h>
+#include <linux/mm_types.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/seq_file.h> /* for seq_printf */
+#include <linux/slab.h>
+
+#include <asm/atomic.h>
+#include <asm/agp.h>
+
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_page_alloc.h"
+
+
+#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
+#define SMALL_ALLOCATION 16
+#define FREE_ALL_PAGES (~0U)
+/* times are in msecs */
+#define PAGE_FREE_INTERVAL 1000
+
+/**
+ * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
+ *
+ * @lock: Protects the shared pool from concurrnet access. Must be used with
+ * irqsave/irqrestore variants because pool allocator maybe called from
+ * delayed work.
+ * @fill_lock: Prevent concurrent calls to fill.
+ * @list: Pool of free uc/wc pages for fast reuse.
+ * @gfp_flags: Flags to pass for alloc_page.
+ * @npages: Number of pages in pool.
+ */
+struct ttm_page_pool {
+ spinlock_t lock;
+ bool fill_lock;
+ struct list_head list;
+ int gfp_flags;
+ unsigned npages;
+ char *name;
+ unsigned long nfrees;
+ unsigned long nrefills;
+};
+
+/**
+ * Limits for the pool. They are handled without locks because only place where
+ * they may change is in sysfs store. They won't have immediate effect anyway
+ * so forcing serialiazation to access them is pointless.
+ */
+
+struct ttm_pool_opts {
+ unsigned alloc_size;
+ unsigned max_size;
+ unsigned small;
+};
+
+#define NUM_POOLS 4
+
+/**
+ * struct ttm_pool_manager - Holds memory pools for fst allocation
+ *
+ * Manager is read only object for pool code so it doesn't need locking.
+ *
+ * @free_interval: minimum number of jiffies between freeing pages from pool.
+ * @page_alloc_inited: reference counting for pool allocation.
+ * @work: Work that is used to shrink the pool. Work is only run when there is
+ * some pages to free.
+ * @small_allocation: Limit in number of pages what is small allocation.
+ *
+ * @pools: All pool objects in use.
+ **/
+struct ttm_pool_manager {
+ struct kobject kobj;
+ struct shrinker mm_shrink;
+ atomic_t page_alloc_inited;
+ struct ttm_pool_opts options;
+
+ union {
+ struct ttm_page_pool pools[NUM_POOLS];
+ struct {
+ struct ttm_page_pool wc_pool;
+ struct ttm_page_pool uc_pool;
+ struct ttm_page_pool wc_pool_dma32;
+ struct ttm_page_pool uc_pool_dma32;
+ } ;
+ };
+};
+
+static struct attribute ttm_page_pool_max = {
+ .name = "pool_max_size",
+ .mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_small = {
+ .name = "pool_small_allocation",
+ .mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_alloc_size = {
+ .name = "pool_allocation_size",
+ .mode = S_IRUGO | S_IWUSR
+};
+
+static struct attribute *ttm_pool_attrs[] = {
+ &ttm_page_pool_max,
+ &ttm_page_pool_small,
+ &ttm_page_pool_alloc_size,
+ NULL
+};
+
+static void ttm_pool_kobj_release(struct kobject *kobj)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ (void)m;
+}
+
+static ssize_t ttm_pool_store(struct kobject *kobj,
+ struct attribute *attr, const char *buffer, size_t size)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ int chars;
+ unsigned val;
+ chars = sscanf(buffer, "%u", &val);
+ if (chars == 0)
+ return size;
+
+ /* Convert kb to number of pages */
+ val = val / (PAGE_SIZE >> 10);
+
+ if (attr == &ttm_page_pool_max)
+ m->options.max_size = val;
+ else if (attr == &ttm_page_pool_small)
+ m->options.small = val;
+ else if (attr == &ttm_page_pool_alloc_size) {
+ if (val > NUM_PAGES_TO_ALLOC*8) {
+ printk(KERN_ERR "[ttm] Setting allocation size to %lu "
+ "is not allowed. Recomended size is "
+ "%lu\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ return size;
+ } else if (val > NUM_PAGES_TO_ALLOC) {
+ printk(KERN_WARNING "[ttm] Setting allocation size to "
+ "larger than %lu is not recomended.\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ }
+ m->options.alloc_size = val;
+ }
+
+ return size;
+}
+
+static ssize_t ttm_pool_show(struct kobject *kobj,
+ struct attribute *attr, char *buffer)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ unsigned val = 0;
+
+ if (attr == &ttm_page_pool_max)
+ val = m->options.max_size;
+ else if (attr == &ttm_page_pool_small)
+ val = m->options.small;
+ else if (attr == &ttm_page_pool_alloc_size)
+ val = m->options.alloc_size;
+
+ val = val * (PAGE_SIZE >> 10);
+
+ return snprintf(buffer, PAGE_SIZE, "%u\n", val);
+}
+
+static const struct sysfs_ops ttm_pool_sysfs_ops = {
+ .show = &ttm_pool_show,
+ .store = &ttm_pool_store,
+};
+
+static struct kobj_type ttm_pool_kobj_type = {
+ .release = &ttm_pool_kobj_release,
+ .sysfs_ops = &ttm_pool_sysfs_ops,
+ .default_attrs = ttm_pool_attrs,
+};
+
+static struct ttm_pool_manager _manager = {
+ .page_alloc_inited = ATOMIC_INIT(0)
+};
+
+#ifndef CONFIG_X86
+static int set_pages_array_wb(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ unmap_page_from_agp(pages[i]);
+#endif
+ return 0;
+}
+
+static int set_pages_array_wc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ map_page_into_agp(pages[i]);
+#endif
+ return 0;
+}
+
+static int set_pages_array_uc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ map_page_into_agp(pages[i]);
+#endif
+ return 0;
+}
+#endif
+
+/**
+ * Select the right pool or requested caching state and ttm flags. */
+static struct ttm_page_pool *ttm_get_pool(int flags,
+ enum ttm_caching_state cstate)
+{
+ int pool_index;
+
+ if (cstate == tt_cached)
+ return NULL;
+
+ if (cstate == tt_wc)
+ pool_index = 0x0;
+ else
+ pool_index = 0x1;
+
+ if (flags & TTM_PAGE_FLAG_DMA32)
+ pool_index |= 0x2;
+
+ return &_manager.pools[pool_index];
+}
+
+/* set memory back to wb and free the pages. */
+static void ttm_pages_put(struct page *pages[], unsigned npages)
+{
+ unsigned i;
+ if (set_pages_array_wb(pages, npages))
+ printk(KERN_ERR "[ttm] Failed to set %d pages to wb!\n",
+ npages);
+ for (i = 0; i < npages; ++i)
+ __free_page(pages[i]);
+}
+
+static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
+ unsigned freed_pages)
+{
+ pool->npages -= freed_pages;
+ pool->nfrees += freed_pages;
+}
+
+/**
+ * Free pages from pool.
+ *
+ * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
+ * number of pages in one go.
+ *
+ * @pool: to free the pages from
+ * @free_all: If set to true will free all pages in pool
+ **/
+static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
+{
+ unsigned long irq_flags;
+ struct page *p;
+ struct page **pages_to_free;
+ unsigned freed_pages = 0,
+ npages_to_free = nr_free;
+
+ if (NUM_PAGES_TO_ALLOC < nr_free)
+ npages_to_free = NUM_PAGES_TO_ALLOC;
+
+ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+ GFP_KERNEL);
+ if (!pages_to_free) {
+ printk(KERN_ERR "Failed to allocate memory for pool free operation.\n");
+ return 0;
+ }
+
+restart:
+ spin_lock_irqsave(&pool->lock, irq_flags);
+
+ list_for_each_entry_reverse(p, &pool->list, lru) {
+ if (freed_pages >= npages_to_free)
+ break;
+
+ pages_to_free[freed_pages++] = p;
+ /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
+ if (freed_pages >= NUM_PAGES_TO_ALLOC) {
+ /* remove range of pages from the pool */
+ __list_del(p->lru.prev, &pool->list);
+
+ ttm_pool_update_free_locked(pool, freed_pages);
+ /**
+ * Because changing page caching is costly
+ * we unlock the pool to prevent stalling.
+ */
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ ttm_pages_put(pages_to_free, freed_pages);
+ if (likely(nr_free != FREE_ALL_PAGES))
+ nr_free -= freed_pages;
+
+ if (NUM_PAGES_TO_ALLOC >= nr_free)
+ npages_to_free = nr_free;
+ else
+ npages_to_free = NUM_PAGES_TO_ALLOC;
+
+ freed_pages = 0;
+
+ /* free all so restart the processing */
+ if (nr_free)
+ goto restart;
+
+ /* Not allowed to fall tough or break because
+ * following context is inside spinlock while we are
+ * outside here.
+ */
+ goto out;
+
+ }
+ }
+
+ /* remove range of pages from the pool */
+ if (freed_pages) {
+ __list_del(&p->lru, &pool->list);
+
+ ttm_pool_update_free_locked(pool, freed_pages);
+ nr_free -= freed_pages;
+ }
+
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ if (freed_pages)
+ ttm_pages_put(pages_to_free, freed_pages);
+out:
+ kfree(pages_to_free);
+ return nr_free;
+}
+
+/* Get good estimation how many pages are free in pools */
+static int ttm_pool_get_num_unused_pages(void)
+{
+ unsigned i;
+ int total = 0;
+ for (i = 0; i < NUM_POOLS; ++i)
+ total += _manager.pools[i].npages;
+
+ return total;
+}
+
+/**
+ * Calback for mm to request pool to reduce number of page held.
+ */
+static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
+{
+ static atomic_t start_pool = ATOMIC_INIT(0);
+ unsigned i;
+ unsigned pool_offset = atomic_add_return(1, &start_pool);
+ struct ttm_page_pool *pool;
+
+ pool_offset = pool_offset % NUM_POOLS;
+ /* select start pool in round robin fashion */
+ for (i = 0; i < NUM_POOLS; ++i) {
+ unsigned nr_free = shrink_pages;
+ if (shrink_pages == 0)
+ break;
+ pool = &_manager.pools[(i + pool_offset)%NUM_POOLS];
+ shrink_pages = ttm_page_pool_free(pool, nr_free);
+ }
+ /* return estimated number of unused pages in pool */
+ return ttm_pool_get_num_unused_pages();
+}
+
+static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
+{
+ manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
+ manager->mm_shrink.seeks = 1;
+ register_shrinker(&manager->mm_shrink);
+}
+
+static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
+{
+ unregister_shrinker(&manager->mm_shrink);
+}
+
+static int ttm_set_pages_caching(struct page **pages,
+ enum ttm_caching_state cstate, unsigned cpages)
+{
+ int r = 0;
+ /* Set page caching */
+ switch (cstate) {
+ case tt_uncached:
+ r = set_pages_array_uc(pages, cpages);
+ if (r)
+ printk(KERN_ERR "[ttm] Failed to set %d pages to uc!\n",
+ cpages);
+ break;
+ case tt_wc:
+ r = set_pages_array_wc(pages, cpages);
+ if (r)
+ printk(KERN_ERR "[ttm] Failed to set %d pages to wc!\n",
+ cpages);
+ break;
+ default:
+ break;
+ }
+ return r;
+}
+
+/**
+ * Free pages the pages that failed to change the caching state. If there is
+ * any pages that have changed their caching state already put them to the
+ * pool.
+ */
+static void ttm_handle_caching_state_failure(struct list_head *pages,
+ int ttm_flags, enum ttm_caching_state cstate,
+ struct page **failed_pages, unsigned cpages)
+{
+ unsigned i;
+ /* Failed pages has to be reed */
+ for (i = 0; i < cpages; ++i) {
+ list_del(&failed_pages[i]->lru);
+ __free_page(failed_pages[i]);
+ }
+}
+
+/**
+ * Allocate new pages with correct caching.
+ *
+ * This function is reentrant if caller updates count depending on number of
+ * pages returned in pages array.
+ */
+static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
+ int ttm_flags, enum ttm_caching_state cstate, unsigned count)
+{
+ struct page **caching_array;
+ struct page *p;
+ int r = 0;
+ unsigned i, cpages;
+ unsigned max_cpages = min(count,
+ (unsigned)(PAGE_SIZE/sizeof(struct page *)));
+
+ /* allocate array for page caching change */
+ caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
+
+ if (!caching_array) {
+ printk(KERN_ERR "[ttm] unable to allocate table for new pages.");
+ return -ENOMEM;
+ }
+
+ for (i = 0, cpages = 0; i < count; ++i) {
+ p = alloc_page(gfp_flags);
+
+ if (!p) {
+ printk(KERN_ERR "[ttm] unable to get page %u\n", i);
+
+ /* store already allocated pages in the pool after
+ * setting the caching state */
+ if (cpages) {
+ r = ttm_set_pages_caching(caching_array, cstate, cpages);
+ if (r)
+ ttm_handle_caching_state_failure(pages,
+ ttm_flags, cstate,
+ caching_array, cpages);
+ }
+ r = -ENOMEM;
+ goto out;
+ }
+
+#ifdef CONFIG_HIGHMEM
+ /* gfp flags of highmem page should never be dma32 so we
+ * we should be fine in such case
+ */
+ if (!PageHighMem(p))
+#endif
+ {
+ caching_array[cpages++] = p;
+ if (cpages == max_cpages) {
+
+ r = ttm_set_pages_caching(caching_array,
+ cstate, cpages);
+ if (r) {
+ ttm_handle_caching_state_failure(pages,
+ ttm_flags, cstate,
+ caching_array, cpages);
+ goto out;
+ }
+ cpages = 0;
+ }
+ }
+
+ list_add(&p->lru, pages);
+ }
+
+ if (cpages) {
+ r = ttm_set_pages_caching(caching_array, cstate, cpages);
+ if (r)
+ ttm_handle_caching_state_failure(pages,
+ ttm_flags, cstate,
+ caching_array, cpages);
+ }
+out:
+ kfree(caching_array);
+
+ return r;
+}
+
+/**
+ * Fill the given pool if there isn't enough pages and requested number of
+ * pages is small.
+ */
+static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
+ int ttm_flags, enum ttm_caching_state cstate, unsigned count,
+ unsigned long *irq_flags)
+{
+ struct page *p;
+ int r;
+ unsigned cpages = 0;
+ /**
+ * Only allow one pool fill operation at a time.
+ * If pool doesn't have enough pages for the allocation new pages are
+ * allocated from outside of pool.
+ */
+ if (pool->fill_lock)
+ return;
+
+ pool->fill_lock = true;
+
+ /* If allocation request is small and there is not enough
+ * pages in pool we fill the pool first */
+ if (count < _manager.options.small
+ && count > pool->npages) {
+ struct list_head new_pages;
+ unsigned alloc_size = _manager.options.alloc_size;
+
+ /**
+ * Can't change page caching if in irqsave context. We have to
+ * drop the pool->lock.
+ */
+ spin_unlock_irqrestore(&pool->lock, *irq_flags);
+
+ INIT_LIST_HEAD(&new_pages);
+ r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
+ cstate, alloc_size);
+ spin_lock_irqsave(&pool->lock, *irq_flags);
+
+ if (!r) {
+ list_splice(&new_pages, &pool->list);
+ ++pool->nrefills;
+ pool->npages += alloc_size;
+ } else {
+ printk(KERN_ERR "[ttm] Failed to fill pool (%p).", pool);
+ /* If we have any pages left put them to the pool. */
+ list_for_each_entry(p, &pool->list, lru) {
+ ++cpages;
+ }
+ list_splice(&new_pages, &pool->list);
+ pool->npages += cpages;
+ }
+
+ }
+ pool->fill_lock = false;
+}
+
+/**
+ * Cut count nubmer of pages from the pool and put them to return list
+ *
+ * @return count of pages still to allocate to fill the request.
+ */
+static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
+ struct list_head *pages, int ttm_flags,
+ enum ttm_caching_state cstate, unsigned count)
+{
+ unsigned long irq_flags;
+ struct list_head *p;
+ unsigned i;
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
+
+ if (count >= pool->npages) {
+ /* take all pages from the pool */
+ list_splice_init(&pool->list, pages);
+ count -= pool->npages;
+ pool->npages = 0;
+ goto out;
+ }
+ /* find the last pages to include for requested number of pages. Split
+ * pool to begin and halves to reduce search space. */
+ if (count <= pool->npages/2) {
+ i = 0;
+ list_for_each(p, &pool->list) {
+ if (++i == count)
+ break;
+ }
+ } else {
+ i = pool->npages + 1;
+ list_for_each_prev(p, &pool->list) {
+ if (--i == count)
+ break;
+ }
+ }
+ /* Cut count number of pages from pool */
+ list_cut_position(pages, &pool->list, p);
+ pool->npages -= count;
+ count = 0;
+out:
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+ return count;
+}
+
+/*
+ * On success pages list will hold count number of correctly
+ * cached pages.
+ */
+int ttm_get_pages(struct list_head *pages, int flags,
+ enum ttm_caching_state cstate, unsigned count)
+{
+ struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+ struct page *p = NULL;
+ int gfp_flags = 0;
+ int r;
+
+ /* set zero flag for page allocation if required */
+ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ gfp_flags |= __GFP_ZERO;
+
+ /* No pool for cached pages */
+ if (pool == NULL) {
+ if (flags & TTM_PAGE_FLAG_DMA32)
+ gfp_flags |= GFP_DMA32;
+ else
+ gfp_flags |= __GFP_HIGHMEM;
+
+ for (r = 0; r < count; ++r) {
+ p = alloc_page(gfp_flags);
+ if (!p) {
+
+ printk(KERN_ERR "[ttm] unable to allocate page.");
+ return -ENOMEM;
+ }
+
+ list_add(&p->lru, pages);
+ }
+ return 0;
+ }
+
+
+ /* combine zero flag to pool flags */
+ gfp_flags |= pool->gfp_flags;
+
+ /* First we take pages from the pool */
+ count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
+
+ /* clear the pages coming from the pool if requested */
+ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
+ list_for_each_entry(p, pages, lru) {
+ clear_page(page_address(p));
+ }
+ }
+
+ /* If pool didn't have enough pages allocate new one. */
+ if (count > 0) {
+ /* ttm_alloc_new_pages doesn't reference pool so we can run
+ * multiple requests in parallel.
+ **/
+ r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
+ if (r) {
+ /* If there is any pages in the list put them back to
+ * the pool. */
+ printk(KERN_ERR "[ttm] Failed to allocate extra pages "
+ "for large request.");
+ ttm_put_pages(pages, 0, flags, cstate);
+ return r;
+ }
+ }
+
+
+ return 0;
+}
+
+/* Put all pages in pages list to correct pool to wait for reuse */
+void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
+ enum ttm_caching_state cstate)
+{
+ unsigned long irq_flags;
+ struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+ struct page *p, *tmp;
+
+ if (pool == NULL) {
+ /* No pool for this memory type so free the pages */
+
+ list_for_each_entry_safe(p, tmp, pages, lru) {
+ __free_page(p);
+ }
+ /* Make the pages list empty */
+ INIT_LIST_HEAD(pages);
+ return;
+ }
+ if (page_count == 0) {
+ list_for_each_entry_safe(p, tmp, pages, lru) {
+ ++page_count;
+ }
+ }
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ list_splice_init(pages, &pool->list);
+ pool->npages += page_count;
+ /* Check that we don't go over the pool limit */
+ page_count = 0;
+ if (pool->npages > _manager.options.max_size) {
+ page_count = pool->npages - _manager.options.max_size;
+ /* free at least NUM_PAGES_TO_ALLOC number of pages
+ * to reduce calls to set_memory_wb */
+ if (page_count < NUM_PAGES_TO_ALLOC)
+ page_count = NUM_PAGES_TO_ALLOC;
+ }
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+ if (page_count)
+ ttm_page_pool_free(pool, page_count);
+}
+
+static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
+ char *name)
+{
+ spin_lock_init(&pool->lock);
+ pool->fill_lock = false;
+ INIT_LIST_HEAD(&pool->list);
+ pool->npages = pool->nfrees = 0;
+ pool->gfp_flags = flags;
+ pool->name = name;
+}
+
+int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
+{
+ int ret;
+ if (atomic_add_return(1, &_manager.page_alloc_inited) > 1)
+ return 0;
+
+ printk(KERN_INFO "[ttm] Initializing pool allocator.\n");
+
+ ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc");
+
+ ttm_page_pool_init_locked(&_manager.uc_pool, GFP_HIGHUSER, "uc");
+
+ ttm_page_pool_init_locked(&_manager.wc_pool_dma32, GFP_USER | GFP_DMA32,
+ "wc dma");
+
+ ttm_page_pool_init_locked(&_manager.uc_pool_dma32, GFP_USER | GFP_DMA32,
+ "uc dma");
+
+ _manager.options.max_size = max_pages;
+ _manager.options.small = SMALL_ALLOCATION;
+ _manager.options.alloc_size = NUM_PAGES_TO_ALLOC;
+
+ kobject_init(&_manager.kobj, &ttm_pool_kobj_type);
+ ret = kobject_add(&_manager.kobj, &glob->kobj, "pool");
+ if (unlikely(ret != 0)) {
+ kobject_put(&_manager.kobj);
+ return ret;
+ }
+
+ ttm_pool_mm_shrink_init(&_manager);
+
+ return 0;
+}
+
+void ttm_page_alloc_fini()
+{
+ int i;
+
+ if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0)
+ return;
+
+ printk(KERN_INFO "[ttm] Finilizing pool allocator.\n");
+ ttm_pool_mm_shrink_fini(&_manager);
+
+ for (i = 0; i < NUM_POOLS; ++i)
+ ttm_page_pool_free(&_manager.pools[i], FREE_ALL_PAGES);
+
+ kobject_put(&_manager.kobj);
+}
+
+int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
+{
+ struct ttm_page_pool *p;
+ unsigned i;
+ char *h[] = {"pool", "refills", "pages freed", "size"};
+ if (atomic_read(&_manager.page_alloc_inited) == 0) {
+ seq_printf(m, "No pool allocator running.\n");
+ return 0;
+ }
+ seq_printf(m, "%6s %12s %13s %8s\n",
+ h[0], h[1], h[2], h[3]);
+ for (i = 0; i < NUM_POOLS; ++i) {
+ p = &_manager.pools[i];
+
+ seq_printf(m, "%6s %12ld %13ld %8d\n",
+ p->name, p->nrefills,
+ p->nfrees, p->npages);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ttm_page_alloc_debugfs);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index d5fd5b8faeb..a7bab87a548 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -39,6 +39,7 @@
#include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h"
+#include "ttm/ttm_page_alloc.h"
static int ttm_tt_swapin(struct ttm_tt *ttm);
@@ -56,21 +57,6 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
ttm->pages = NULL;
}
-static struct page *ttm_tt_alloc_page(unsigned page_flags)
-{
- gfp_t gfp_flags = GFP_USER;
-
- if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
- gfp_flags |= __GFP_ZERO;
-
- if (page_flags & TTM_PAGE_FLAG_DMA32)
- gfp_flags |= __GFP_DMA32;
- else
- gfp_flags |= __GFP_HIGHMEM;
-
- return alloc_page(gfp_flags);
-}
-
static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
{
int write;
@@ -111,15 +97,21 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
{
struct page *p;
+ struct list_head h;
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
int ret;
while (NULL == (p = ttm->pages[index])) {
- p = ttm_tt_alloc_page(ttm->page_flags);
- if (!p)
+ INIT_LIST_HEAD(&h);
+
+ ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1);
+
+ if (ret != 0)
return NULL;
+ p = list_first_entry(&h, struct page, lru);
+
ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
if (unlikely(ret != 0))
goto out_err;
@@ -228,10 +220,10 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
if (ttm->caching_state == c_state)
return 0;
- if (c_state != tt_cached) {
- ret = ttm_tt_populate(ttm);
- if (unlikely(ret != 0))
- return ret;
+ if (ttm->state == tt_unpopulated) {
+ /* Change caching but don't populate */
+ ttm->caching_state = c_state;
+ return 0;
}
if (ttm->caching_state == tt_cached)
@@ -282,13 +274,17 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
{
int i;
+ unsigned count = 0;
+ struct list_head h;
struct page *cur_page;
struct ttm_backend *be = ttm->be;
+ INIT_LIST_HEAD(&h);
+
if (be)
be->func->clear(be);
- (void)ttm_tt_set_caching(ttm, tt_cached);
for (i = 0; i < ttm->num_pages; ++i) {
+
cur_page = ttm->pages[i];
ttm->pages[i] = NULL;
if (cur_page) {
@@ -298,9 +294,11 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
"Leaking pages.\n");
ttm_mem_global_free_page(ttm->glob->mem_glob,
cur_page);
- __free_page(cur_page);
+ list_add(&cur_page->lru, &h);
+ count++;
}
}
+ ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state);
ttm->state = tt_unpopulated;
ttm->first_himem_page = ttm->num_pages;
ttm->last_lomem_page = -1;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 825ebe3d89d..c4f5114aee7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -137,9 +137,6 @@ int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
- struct vmw_private *dev_priv =
- container_of(bdev, struct vmw_private, bdev);
-
switch (type) {
case TTM_PL_SYSTEM:
/* System memory */
@@ -151,11 +148,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
case TTM_PL_VRAM:
/* "On-card" video ram */
man->gpu_offset = 0;
- man->io_offset = dev_priv->vram_start;
- man->io_size = dev_priv->vram_size;
- man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE;
- man->io_addr = NULL;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_WC;
break;
@@ -193,6 +186,42 @@ static void vmw_swap_notify(struct ttm_buffer_object *bo)
vmw_dmabuf_gmr_unbind(bo);
}
+static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.is_iomem = false;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* System memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.base = dev_priv->vram_start;
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+{
+ return 0;
+}
+
/**
* FIXME: We're using the old vmware polling method to sync.
* Do this with fences instead.
@@ -248,5 +277,8 @@ struct ttm_bo_driver vmw_bo_driver = {
.sync_obj_unref = vmw_sync_obj_unref,
.sync_obj_ref = vmw_sync_obj_ref,
.move_notify = vmw_move_notify,
- .swap_notify = vmw_swap_notify
+ .swap_notify = vmw_swap_notify,
+ .fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
+ .io_mem_reserve = &vmw_ttm_io_mem_reserve,
+ .io_mem_free = &vmw_ttm_io_mem_free,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 0897359b3e4..dbd36b8910c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -570,7 +570,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* Put BO in VRAM, only if there is space.
*/
- ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false, false);
if (unlikely(ret == -ERESTARTSYS))
return ret;
@@ -590,7 +590,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* previous contents.
*/
- ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index a93367041cd..7421aaad8d0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -559,8 +559,13 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
info->pixmap.scan_align = 1;
#endif
- info->aperture_base = vmw_priv->vram_start;
- info->aperture_size = vmw_priv->vram_size;
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto err_aper;
+ }
+ info->apertures->ranges[0].base = vmw_priv->vram_start;
+ info->apertures->ranges[0].size = vmw_priv->vram_size;
/*
* Dirty & Deferred IO
@@ -580,6 +585,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
err_defio:
fb_deferred_io_cleanup(info);
+err_aper:
ttm_bo_kunmap(&par->map);
err_unref:
ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
@@ -628,7 +634,7 @@ int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false);
+ ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false);
ttm_bo_unreserve(bo);
return ret;
@@ -652,7 +658,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
if (unlikely(ret != 0))
goto err_unlock;
- ret = ttm_bo_validate(bo, &ne_placement, false, false);
+ ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
ttm_bo_unreserve(bo);
err_unlock:
ttm_write_unlock(&vmw_priv->active_master->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 31f9afed0a6..bbc7c4c30bc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -752,14 +752,8 @@ err_not_scanout:
return NULL;
}
-static int vmw_kms_fb_changed(struct drm_device *dev)
-{
- return 0;
-}
-
static struct drm_mode_config_funcs vmw_kms_funcs = {
.fb_create = vmw_kms_fb_create,
- .fb_changed = vmw_kms_fb_changed,
};
int vmw_kms_init(struct vmw_private *dev_priv)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 5b6eabeb7f5..ad566c85b07 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -118,7 +118,7 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
if (pin)
overlay_placement = &vmw_vram_ne_placement;
- ret = ttm_bo_validate(bo, overlay_placement, interruptible, false);
+ ret = ttm_bo_validate(bo, overlay_placement, interruptible, false, false);
ttm_bo_unreserve(bo);
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index 61ab4daf0bb..8d0e31a2202 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -18,12 +18,12 @@ config VGA_ARB_MAX_GPUS
multiple GPUS. The overhead for each GPU is very small.
config VGA_SWITCHEROO
- bool "Laptop Hybrid Grapics - GPU switching support"
+ bool "Laptop Hybrid Graphics - GPU switching support"
depends on X86
depends on ACPI
help
- Many laptops released in 2008/9/10 have two gpus with a multiplxer
+ Many laptops released in 2008/9/10 have two GPUs with a multiplexer
to switch between them. This adds support for dynamic switching when
X isn't running and delayed switching until the next logoff. This
- features is called hybrid graphics, ATI PowerXpress, and Nvidia
+ feature is called hybrid graphics, ATI PowerXpress, and Nvidia
HybridPower.
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 71d4c070362..132278fa624 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -86,6 +86,12 @@ config HID_BELKIN
---help---
Support for Belkin Flip KVM and Wireless keyboard.
+config HID_CANDO
+ tristate "Cando dual touch panel"
+ depends on USB_HID
+ ---help---
+ Support for Cando dual touch panel.
+
config HID_CHERRY
tristate "Cherry" if EMBEDDED
depends on USB_HID
@@ -100,6 +106,21 @@ config HID_CHICONY
---help---
Support for Chicony Tactical pad.
+config HID_PRODIKEYS
+ tristate "Prodikeys PC-MIDI Keyboard support"
+ depends on USB_HID && SND
+ select SND_RAWMIDI
+ ---help---
+ Support for Prodikeys PC-MIDI Keyboard device support.
+ Say Y here to enable support for this device.
+ - Prodikeys PC-MIDI keyboard.
+ The Prodikeys PC-MIDI acts as a USB Audio device, with one MIDI
+ input and one MIDI output. These MIDI jacks appear as
+ a sound "card" in the ALSA sound system.
+ Note: if you say N here, this device will still function as a basic
+ multimedia keyboard, but will lack support for the musical keyboard
+ and some additional multimedia keys.
+
config HID_CYPRESS
tristate "Cypress" if EMBEDDED
depends on USB_HID
@@ -108,9 +129,8 @@ config HID_CYPRESS
Support for cypress mouse and barcode readers.
config HID_DRAGONRISE
- tristate "DragonRise Inc. support" if EMBEDDED
+ tristate "DragonRise Inc. support"
depends on USB_HID
- default !EMBEDDED
---help---
Say Y here if you have DragonRise Inc.game controllers.
@@ -122,6 +142,12 @@ config DRAGONRISE_FF
Say Y here if you want to enable force feedback support for DragonRise Inc.
game controllers.
+config HID_EGALAX
+ tristate "eGalax multi-touch panel"
+ depends on USB_HID
+ ---help---
+ Support for the eGalax dual-touch panel.
+
config HID_EZKEY
tristate "Ezkey" if EMBEDDED
depends on USB_HID
@@ -137,16 +163,14 @@ config HID_KYE
Support for Kye/Genius Ergo Mouse.
config HID_GYRATION
- tristate "Gyration" if EMBEDDED
+ tristate "Gyration"
depends on USB_HID
- default !EMBEDDED
---help---
Support for Gyration remote control.
config HID_TWINHAN
- tristate "Twinhan" if EMBEDDED
+ tristate "Twinhan"
depends on USB_HID
- default !EMBEDDED
---help---
Support for Twinhan IR remote control.
@@ -233,16 +257,14 @@ config HID_NTRIG
Support for N-Trig touch screen.
config HID_ORTEK
- tristate "Ortek" if EMBEDDED
+ tristate "Ortek"
depends on USB_HID
- default !EMBEDDED
---help---
Support for Ortek WKB-2000 wireless keyboard + mouse trackpad.
config HID_PANTHERLORD
- tristate "Pantherlord support" if EMBEDDED
+ tristate "Pantherlord support"
depends on USB_HID
- default !EMBEDDED
---help---
Say Y here if you have a PantherLord/GreenAsia based game controller
or adapter.
@@ -256,29 +278,98 @@ config PANTHERLORD_FF
or adapter and want to enable force feedback support for it.
config HID_PETALYNX
- tristate "Petalynx" if EMBEDDED
+ tristate "Petalynx"
depends on USB_HID
- default !EMBEDDED
---help---
Support for Petalynx Maxter remote control.
+config HID_PICOLCD
+ tristate "PicoLCD (graphic version)"
+ depends on USB_HID
+ ---help---
+ This provides support for Minibox PicoLCD devices, currently
+ only the graphical ones are supported.
+
+ This includes support for the following device features:
+ - Keypad
+ - Switching between Firmware and Flash mode
+ - EEProm / Flash access (via debugfs)
+ Features selectively enabled:
+ - Framebuffer for monochrome 256x64 display
+ - Backlight control
+ - Contrast control
+ - General purpose outputs
+ Features that are not (yet) supported:
+ - IR
+
+config HID_PICOLCD_FB
+ bool "Framebuffer support" if EMBEDDED
+ default !EMBEDDED
+ depends on HID_PICOLCD
+ depends on HID_PICOLCD=FB || FB=y
+ select FB_DEFERRED_IO
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
+ ---help---
+ Provide access to PicoLCD's 256x64 monochrome display via a
+ frambuffer device.
+
+config HID_PICOLCD_BACKLIGHT
+ bool "Backlight control" if EMBEDDED
+ default !EMBEDDED
+ depends on HID_PICOLCD
+ depends on HID_PICOLCD=BACKLIGHT_CLASS_DEVICE || BACKLIGHT_CLASS_DEVICE=y
+ ---help---
+ Provide access to PicoLCD's backlight control via backlight
+ class.
+
+config HID_PICOLCD_LCD
+ bool "Contrast control" if EMBEDDED
+ default !EMBEDDED
+ depends on HID_PICOLCD
+ depends on HID_PICOLCD=LCD_CLASS_DEVICE || LCD_CLASS_DEVICE=y
+ ---help---
+ Provide access to PicoLCD's LCD contrast via lcd class.
+
+config HID_PICOLCD_LEDS
+ bool "GPO via leds class" if EMBEDDED
+ default !EMBEDDED
+ depends on HID_PICOLCD
+ depends on HID_PICOLCD=LEDS_CLASS || LEDS_CLASS=y
+ ---help---
+ Provide access to PicoLCD's GPO pins via leds class.
+
config HID_QUANTA
tristate "Quanta Optical Touch"
depends on USB_HID
---help---
Support for Quanta Optical Touch dual-touch panels.
+config HID_ROCCAT
+ tristate "Roccat special event support"
+ depends on USB_HID
+ ---help---
+ Support for Roccat special events.
+ Say Y here if you have a Roccat mouse or keyboard and want OSD or
+ macro execution support.
+
+config HID_ROCCAT_KONE
+ tristate "Roccat Kone Mouse support"
+ depends on USB_HID
+ ---help---
+ Support for Roccat Kone mouse.
+
config HID_SAMSUNG
- tristate "Samsung" if EMBEDDED
+ tristate "Samsung"
depends on USB_HID
- default !EMBEDDED
---help---
- Support for Samsung InfraRed remote control.
+ Support for Samsung InfraRed remote control or keyboards.
config HID_SONY
- tristate "Sony" if EMBEDDED
+ tristate "Sony"
depends on USB_HID
- default !EMBEDDED
---help---
Support for Sony PS3 controller.
@@ -289,16 +380,14 @@ config HID_STANTUM
Support for Stantum multitouch panel.
config HID_SUNPLUS
- tristate "Sunplus" if EMBEDDED
+ tristate "Sunplus"
depends on USB_HID
- default !EMBEDDED
---help---
Support for Sunplus wireless desktop.
config HID_GREENASIA
- tristate "GreenAsia (Product ID 0x12) support" if EMBEDDED
+ tristate "GreenAsia (Product ID 0x12) support"
depends on USB_HID
- default !EMBEDDED
---help---
Say Y here if you have a GreenAsia (Product ID 0x12) based game
controller or adapter.
@@ -313,9 +402,8 @@ config GREENASIA_FF
and want to enable force feedback support for it.
config HID_SMARTJOYPLUS
- tristate "SmartJoy PLUS PS2/USB adapter support" if EMBEDDED
+ tristate "SmartJoy PLUS PS2/USB adapter support"
depends on USB_HID
- default !EMBEDDED
---help---
Support for SmartJoy PLUS PS2/USB adapter.
@@ -328,16 +416,14 @@ config SMARTJOYPLUS_FF
enable force feedback support for it.
config HID_TOPSEED
- tristate "TopSeed Cyberlink remote control support" if EMBEDDED
+ tristate "TopSeed Cyberlink remote control support"
depends on USB_HID
- default !EMBEDDED
---help---
- Say Y if you have a TopSeed Cyberlink remote control.
+ Say Y if you have a TopSeed Cyberlink or BTC Emprex remote control.
config HID_THRUSTMASTER
- tristate "ThrustMaster devices support" if EMBEDDED
+ tristate "ThrustMaster devices support"
depends on USB_HID
- default !EMBEDDED
---help---
Say Y here if you have a THRUSTMASTER FireStore Dual Power 2 or
a THRUSTMASTER Ferrari GT Rumble Wheel.
@@ -357,10 +443,17 @@ config HID_WACOM
---help---
Support for Wacom Graphire Bluetooth tablet.
+config HID_WACOM_POWER_SUPPLY
+ bool "Wacom Bluetooth devices power supply status support"
+ depends on HID_WACOM
+ select POWER_SUPPLY
+ ---help---
+ Say Y here if you want to enable power supply status monitoring for
+ Wacom Bluetooth devices.
+
config HID_ZEROPLUS
- tristate "Zeroplus based game controller support" if EMBEDDED
+ tristate "Zeroplus based game controller support"
depends on USB_HID
- default !EMBEDDED
---help---
Say Y here if you have a Zeroplus based game controller.
@@ -372,6 +465,12 @@ config ZEROPLUS_FF
Say Y here if you have a Zeroplus based game controller and want
to have force feedback support for it.
+config HID_ZYDACRON
+ tristate "Zydacron remote control support"
+ depends on USB_HID
+ ---help---
+ Support for Zydacron remote control.
+
endmenu
endif # HID_SUPPORT
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 0b2618f092c..987fa062736 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -26,10 +26,12 @@ obj-$(CONFIG_HID_3M_PCT) += hid-3m-pct.o
obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o
obj-$(CONFIG_HID_APPLE) += hid-apple.o
obj-$(CONFIG_HID_BELKIN) += hid-belkin.o
+obj-$(CONFIG_HID_CANDO) += hid-cando.o
obj-$(CONFIG_HID_CHERRY) += hid-cherry.o
obj-$(CONFIG_HID_CHICONY) += hid-chicony.o
obj-$(CONFIG_HID_CYPRESS) += hid-cypress.o
obj-$(CONFIG_HID_DRAGONRISE) += hid-drff.o
+obj-$(CONFIG_HID_EGALAX) += hid-egalax.o
obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o
obj-$(CONFIG_HID_GYRATION) += hid-gyration.o
obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o
@@ -41,9 +43,13 @@ obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o
obj-$(CONFIG_HID_MOSART) += hid-mosart.o
obj-$(CONFIG_HID_NTRIG) += hid-ntrig.o
obj-$(CONFIG_HID_ORTEK) += hid-ortek.o
+obj-$(CONFIG_HID_PRODIKEYS) += hid-prodikeys.o
obj-$(CONFIG_HID_QUANTA) += hid-quanta.o
obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o
obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o
+obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o
+obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o
+obj-$(CONFIG_HID_ROCCAT_KONE) += hid-roccat-kone.o
obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
obj-$(CONFIG_HID_SONY) += hid-sony.o
@@ -54,6 +60,7 @@ obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o
obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o
obj-$(CONFIG_HID_TWINHAN) += hid-twinhan.o
obj-$(CONFIG_HID_ZEROPLUS) += hid-zpff.o
+obj-$(CONFIG_HID_ZYDACRON) += hid-zydacron.o
obj-$(CONFIG_HID_WACOM) += hid-wacom.o
obj-$(CONFIG_USB_HID) += usbhid/
diff --git a/drivers/hid/hid-3m-pct.c b/drivers/hid/hid-3m-pct.c
index c31e0be8cce..2a0d56b7a02 100644
--- a/drivers/hid/hid-3m-pct.c
+++ b/drivers/hid/hid-3m-pct.c
@@ -1,7 +1,7 @@
/*
* HID driver for 3M PCT multitouch panels
*
- * Copyright (c) 2009 Stephane Chatty <chatty@enac.fr>
+ * Copyright (c) 2009-2010 Stephane Chatty <chatty@enac.fr>
*
*/
@@ -25,7 +25,7 @@ MODULE_LICENSE("GPL");
#include "hid-ids.h"
struct mmm_finger {
- __s32 x, y;
+ __s32 x, y, w, h;
__u8 rank;
bool touch, valid;
};
@@ -82,7 +82,18 @@ static int mmm_input_mapping(struct hid_device *hdev, struct hid_input *hi,
/* touchscreen emulation */
hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
return 1;
+ case HID_DG_WIDTH:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_TOUCH_MAJOR);
+ return 1;
+ case HID_DG_HEIGHT:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_TOUCH_MINOR);
+ input_set_abs_params(hi->input, ABS_MT_ORIENTATION,
+ 1, 1, 0, 0);
+ return 1;
case HID_DG_CONTACTID:
+ field->logical_maximum = 59;
hid_map_usage(hi, usage, bit, max,
EV_ABS, ABS_MT_TRACKING_ID);
return 1;
@@ -128,9 +139,15 @@ static void mmm_filter_event(struct mmm_data *md, struct input_dev *input)
/* this finger is just placeholder data, ignore */
} else if (f->touch) {
/* this finger is on the screen */
+ int wide = (f->w > f->h);
input_event(input, EV_ABS, ABS_MT_TRACKING_ID, i);
input_event(input, EV_ABS, ABS_MT_POSITION_X, f->x);
input_event(input, EV_ABS, ABS_MT_POSITION_Y, f->y);
+ input_event(input, EV_ABS, ABS_MT_ORIENTATION, wide);
+ input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR,
+ wide ? f->w : f->h);
+ input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR,
+ wide ? f->h : f->w);
input_mt_sync(input);
/*
* touchscreen emulation: maintain the age rank
@@ -197,6 +214,14 @@ static int mmm_event(struct hid_device *hid, struct hid_field *field,
case HID_DG_CONFIDENCE:
md->valid = value;
break;
+ case HID_DG_WIDTH:
+ if (md->valid)
+ md->f[md->curid].w = value;
+ break;
+ case HID_DG_HEIGHT:
+ if (md->valid)
+ md->f[md->curid].h = value;
+ break;
case HID_DG_CONTACTID:
if (md->valid) {
md->curid = value;
@@ -255,6 +280,7 @@ static void mmm_remove(struct hid_device *hdev)
static const struct hid_device_id mmm_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M1968) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M2256) },
{ }
};
MODULE_DEVICE_TABLE(hid, mmm_devices);
@@ -287,5 +313,4 @@ static void __exit mmm_exit(void)
module_init(mmm_init);
module_exit(mmm_exit);
-MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-cando.c b/drivers/hid/hid-cando.c
new file mode 100644
index 00000000000..4267a6fdc27
--- /dev/null
+++ b/drivers/hid/hid-cando.c
@@ -0,0 +1,272 @@
+/*
+ * HID driver for Cando dual-touch panels
+ *
+ * Copyright (c) 2010 Stephane Chatty <chatty@enac.fr>
+ *
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
+MODULE_DESCRIPTION("Cando dual-touch panel");
+MODULE_LICENSE("GPL");
+
+#include "hid-ids.h"
+
+struct cando_data {
+ __u16 x, y;
+ __u8 id;
+ __s8 oldest; /* id of the oldest finger in previous frame */
+ bool valid; /* valid finger data, or just placeholder? */
+ bool first; /* is this the first finger in this frame? */
+ __s8 firstid; /* id of the first finger in the frame */
+ __u16 firstx, firsty; /* (x, y) of the first finger in the frame */
+};
+
+static int cando_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ switch (usage->hid & HID_USAGE_PAGE) {
+
+ case HID_UP_GENDESK:
+ switch (usage->hid) {
+ case HID_GD_X:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_POSITION_X);
+ /* touchscreen emulation */
+ input_set_abs_params(hi->input, ABS_X,
+ field->logical_minimum,
+ field->logical_maximum, 0, 0);
+ return 1;
+ case HID_GD_Y:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_POSITION_Y);
+ /* touchscreen emulation */
+ input_set_abs_params(hi->input, ABS_Y,
+ field->logical_minimum,
+ field->logical_maximum, 0, 0);
+ return 1;
+ }
+ return 0;
+
+ case HID_UP_DIGITIZER:
+ switch (usage->hid) {
+ case HID_DG_TIPSWITCH:
+ case HID_DG_CONTACTMAX:
+ return -1;
+ case HID_DG_INRANGE:
+ /* touchscreen emulation */
+ hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
+ return 1;
+ case HID_DG_CONTACTID:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_TRACKING_ID);
+ return 1;
+ }
+ return 0;
+ }
+
+ return 0;
+}
+
+static int cando_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if (usage->type == EV_KEY || usage->type == EV_ABS)
+ clear_bit(usage->code, *bit);
+
+ return 0;
+}
+
+/*
+ * this function is called when a whole finger has been parsed,
+ * so that it can decide what to send to the input layer.
+ */
+static void cando_filter_event(struct cando_data *td, struct input_dev *input)
+{
+ td->first = !td->first; /* touchscreen emulation */
+
+ if (!td->valid) {
+ /*
+ * touchscreen emulation: if this is the second finger and
+ * the first was valid, the first was the oldest; if the
+ * first was not valid and there was a valid finger in the
+ * previous frame, this is a release.
+ */
+ if (td->first) {
+ td->firstid = -1;
+ } else if (td->firstid >= 0) {
+ input_event(input, EV_ABS, ABS_X, td->firstx);
+ input_event(input, EV_ABS, ABS_Y, td->firsty);
+ td->oldest = td->firstid;
+ } else if (td->oldest >= 0) {
+ input_event(input, EV_KEY, BTN_TOUCH, 0);
+ td->oldest = -1;
+ }
+
+ return;
+ }
+
+ input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id);
+ input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x);
+ input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y);
+
+ input_mt_sync(input);
+
+ /*
+ * touchscreen emulation: if there was no touching finger previously,
+ * emit touch event
+ */
+ if (td->oldest < 0) {
+ input_event(input, EV_KEY, BTN_TOUCH, 1);
+ td->oldest = td->id;
+ }
+
+ /*
+ * touchscreen emulation: if this is the first finger, wait for the
+ * second; the oldest is then the second if it was the oldest already
+ * or if there was no first, the first otherwise.
+ */
+ if (td->first) {
+ td->firstx = td->x;
+ td->firsty = td->y;
+ td->firstid = td->id;
+ } else {
+ int x, y, oldest;
+ if (td->id == td->oldest || td->firstid < 0) {
+ x = td->x;
+ y = td->y;
+ oldest = td->id;
+ } else {
+ x = td->firstx;
+ y = td->firsty;
+ oldest = td->firstid;
+ }
+ input_event(input, EV_ABS, ABS_X, x);
+ input_event(input, EV_ABS, ABS_Y, y);
+ td->oldest = oldest;
+ }
+}
+
+
+static int cando_event(struct hid_device *hid, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct cando_data *td = hid_get_drvdata(hid);
+
+ if (hid->claimed & HID_CLAIMED_INPUT) {
+ struct input_dev *input = field->hidinput->input;
+
+ switch (usage->hid) {
+ case HID_DG_INRANGE:
+ td->valid = value;
+ break;
+ case HID_DG_CONTACTID:
+ td->id = value;
+ break;
+ case HID_GD_X:
+ td->x = value;
+ break;
+ case HID_GD_Y:
+ td->y = value;
+ cando_filter_event(td, input);
+ break;
+ case HID_DG_TIPSWITCH:
+ /* avoid interference from generic hidinput handling */
+ break;
+
+ default:
+ /* fallback to the generic hidinput handling */
+ return 0;
+ }
+ }
+
+ /* we have handled the hidinput part, now remains hiddev */
+ if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
+ hid->hiddev_hid_event(hid, field, usage, value);
+
+ return 1;
+}
+
+static int cando_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ struct cando_data *td;
+
+ td = kmalloc(sizeof(struct cando_data), GFP_KERNEL);
+ if (!td) {
+ dev_err(&hdev->dev, "cannot allocate Cando Touch data\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, td);
+ td->first = false;
+ td->oldest = -1;
+ td->valid = false;
+
+ ret = hid_parse(hdev);
+ if (!ret)
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+
+ if (ret)
+ kfree(td);
+
+ return ret;
+}
+
+static void cando_remove(struct hid_device *hdev)
+{
+ hid_hw_stop(hdev);
+ kfree(hid_get_drvdata(hdev));
+ hid_set_drvdata(hdev, NULL);
+}
+
+static const struct hid_device_id cando_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
+ USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
+ USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, cando_devices);
+
+static const struct hid_usage_id cando_grabbed_usages[] = {
+ { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
+ { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+};
+
+static struct hid_driver cando_driver = {
+ .name = "cando-touch",
+ .id_table = cando_devices,
+ .probe = cando_probe,
+ .remove = cando_remove,
+ .input_mapping = cando_input_mapping,
+ .input_mapped = cando_input_mapped,
+ .usage_table = cando_grabbed_usages,
+ .event = cando_event,
+};
+
+static int __init cando_init(void)
+{
+ return hid_register_driver(&cando_driver);
+}
+
+static void __exit cando_exit(void)
+{
+ hid_unregister_driver(&cando_driver);
+}
+
+module_init(cando_init);
+module_exit(cando_exit);
+
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 143e788b729..aa0f7dcabcd 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -653,10 +653,9 @@ int hid_parse_report(struct hid_device *device, __u8 *start,
if (device->driver->report_fixup)
device->driver->report_fixup(device, start, size);
- device->rdesc = kmalloc(size, GFP_KERNEL);
+ device->rdesc = kmemdup(start, size, GFP_KERNEL);
if (device->rdesc == NULL)
return -ENOMEM;
- memcpy(device->rdesc, start, size);
device->rsize = size;
parser = vmalloc(sizeof(struct hid_parser));
@@ -940,13 +939,8 @@ static void hid_output_field(struct hid_field *field, __u8 *data)
unsigned count = field->report_count;
unsigned offset = field->report_offset;
unsigned size = field->report_size;
- unsigned bitsused = offset + count * size;
unsigned n;
- /* make sure the unused bits in the last byte are zeros */
- if (count > 0 && size > 0 && (bitsused % 8) != 0)
- data[(bitsused-1)/8] &= (1 << (bitsused % 8)) - 1;
-
for (n = 0; n < count; n++) {
if (field->logical_minimum < 0) /* signed values */
implement(data, offset + n * size, size, s32ton(field->value[n], size));
@@ -966,6 +960,7 @@ void hid_output_report(struct hid_report *report, __u8 *data)
if (report->id > 0)
*data++ = report->id;
+ memset(data, 0, ((report->size - 1) >> 3) + 1);
for (n = 0; n < report->maxfield; n++)
hid_output_field(report->field[n], data);
}
@@ -1086,35 +1081,28 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i
buf = kmalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_ATOMIC);
- if (!buf) {
- report = hid_get_report(report_enum, data);
+ if (!buf)
goto nomem;
- }
-
- snprintf(buf, HID_DEBUG_BUFSIZE - 1,
- "\nreport (size %u) (%snumbered)\n", size, report_enum->numbered ? "" : "un");
- hid_debug_event(hid, buf);
-
- report = hid_get_report(report_enum, data);
- if (!report) {
- kfree(buf);
- return -1;
- }
/* dump the report */
snprintf(buf, HID_DEBUG_BUFSIZE - 1,
- "report %d (size %u) = ", report->id, size);
+ "\nreport (size %u) (%snumbered) = ", size, report_enum->numbered ? "" : "un");
hid_debug_event(hid, buf);
+
for (i = 0; i < size; i++) {
snprintf(buf, HID_DEBUG_BUFSIZE - 1,
" %02x", data[i]);
hid_debug_event(hid, buf);
}
hid_debug_event(hid, "\n");
-
kfree(buf);
nomem:
+ report = hid_get_report(report_enum, data);
+
+ if (!report)
+ return -1;
+
if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
ret = hdrv->raw_event(hid, report, data, size);
if (ret != 0)
@@ -1167,6 +1155,8 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
unsigned int i;
int len;
+ if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE)
+ connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV);
if (hdev->bus != BUS_USB)
connect_mask &= ~HID_CONNECT_HIDDEV;
if (hid_hiddev(hdev))
@@ -1246,6 +1236,7 @@ EXPORT_SYMBOL_GPL(hid_disconnect);
/* a list of devices for which there is a specialized driver on HID bus */
static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M1968) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M2256) },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) },
@@ -1290,14 +1281,19 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
@@ -1305,6 +1301,7 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
@@ -1331,6 +1328,8 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) },
@@ -1342,7 +1341,9 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
@@ -1359,8 +1360,10 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
{ }
@@ -1757,7 +1760,7 @@ int hid_add_device(struct hid_device *hdev)
/* we need to kill them here, otherwise they will stay allocated to
* wait for coming driver */
- if (hid_ignore(hdev))
+ if (!(hdev->quirks & HID_QUIRK_NO_IGNORE) && hid_ignore(hdev))
return -ENODEV;
/* XXX hack, any other cleaner solution after the driver core
@@ -1765,11 +1768,12 @@ int hid_add_device(struct hid_device *hdev)
dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
hdev->vendor, hdev->product, atomic_inc_return(&id));
+ hid_debug_register(hdev, dev_name(&hdev->dev));
ret = device_add(&hdev->dev);
if (!ret)
hdev->status |= HID_STAT_ADDED;
-
- hid_debug_register(hdev, dev_name(&hdev->dev));
+ else
+ hid_debug_unregister(hdev);
return ret;
}
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 56f314fbd4f..c9402676857 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -811,7 +811,7 @@ static const char *relatives[REL_MAX + 1] = {
[REL_WHEEL] = "Wheel", [REL_MISC] = "Misc",
};
-static const char *absolutes[ABS_MAX + 1] = {
+static const char *absolutes[ABS_CNT] = {
[ABS_X] = "X", [ABS_Y] = "Y",
[ABS_Z] = "Z", [ABS_RX] = "Rx",
[ABS_RY] = "Ry", [ABS_RZ] = "Rz",
diff --git a/drivers/hid/hid-egalax.c b/drivers/hid/hid-egalax.c
new file mode 100644
index 00000000000..f44bdc084cb
--- /dev/null
+++ b/drivers/hid/hid-egalax.c
@@ -0,0 +1,281 @@
+/*
+ * HID driver for eGalax dual-touch panels
+ *
+ * Copyright (c) 2010 Stephane Chatty <chatty@enac.fr>
+ *
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/slab.h>
+#include "usbhid/usbhid.h"
+
+MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
+MODULE_DESCRIPTION("eGalax dual-touch panel");
+MODULE_LICENSE("GPL");
+
+#include "hid-ids.h"
+
+struct egalax_data {
+ __u16 x, y, z;
+ __u8 id;
+ bool first; /* is this the first finger in the frame? */
+ bool valid; /* valid finger data, or just placeholder? */
+ bool activity; /* at least one active finger previously? */
+ __u16 lastx, lasty; /* latest valid (x, y) in the frame */
+};
+
+static int egalax_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ switch (usage->hid & HID_USAGE_PAGE) {
+
+ case HID_UP_GENDESK:
+ switch (usage->hid) {
+ case HID_GD_X:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_POSITION_X);
+ /* touchscreen emulation */
+ input_set_abs_params(hi->input, ABS_X,
+ field->logical_minimum,
+ field->logical_maximum, 0, 0);
+ return 1;
+ case HID_GD_Y:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_POSITION_Y);
+ /* touchscreen emulation */
+ input_set_abs_params(hi->input, ABS_Y,
+ field->logical_minimum,
+ field->logical_maximum, 0, 0);
+ return 1;
+ }
+ return 0;
+
+ case HID_UP_DIGITIZER:
+ switch (usage->hid) {
+ case HID_DG_TIPSWITCH:
+ /* touchscreen emulation */
+ hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
+ return 1;
+ case HID_DG_INRANGE:
+ case HID_DG_CONFIDENCE:
+ case HID_DG_CONTACTCOUNT:
+ case HID_DG_CONTACTMAX:
+ return -1;
+ case HID_DG_CONTACTID:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_TRACKING_ID);
+ return 1;
+ case HID_DG_TIPPRESSURE:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_PRESSURE);
+ return 1;
+ }
+ return 0;
+ }
+
+ /* ignore others (from other reports we won't get anyway) */
+ return -1;
+}
+
+static int egalax_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if (usage->type == EV_KEY || usage->type == EV_ABS)
+ clear_bit(usage->code, *bit);
+
+ return 0;
+}
+
+/*
+ * this function is called when a whole finger has been parsed,
+ * so that it can decide what to send to the input layer.
+ */
+static void egalax_filter_event(struct egalax_data *td, struct input_dev *input)
+{
+ td->first = !td->first; /* touchscreen emulation */
+
+ if (td->valid) {
+ /* emit multitouch events */
+ input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id);
+ input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x);
+ input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y);
+ input_event(input, EV_ABS, ABS_MT_PRESSURE, td->z);
+
+ input_mt_sync(input);
+
+ /*
+ * touchscreen emulation: store (x, y) as
+ * the last valid values in this frame
+ */
+ td->lastx = td->x;
+ td->lasty = td->y;
+ }
+
+ /*
+ * touchscreen emulation: if this is the second finger and at least
+ * one in this frame is valid, the latest valid in the frame is
+ * the oldest on the panel, the one we want for single touch
+ */
+ if (!td->first && td->activity) {
+ input_event(input, EV_ABS, ABS_X, td->lastx);
+ input_event(input, EV_ABS, ABS_Y, td->lasty);
+ }
+
+ if (!td->valid) {
+ /*
+ * touchscreen emulation: if the first finger is invalid
+ * and there previously was finger activity, this is a release
+ */
+ if (td->first && td->activity) {
+ input_event(input, EV_KEY, BTN_TOUCH, 0);
+ td->activity = false;
+ }
+ return;
+ }
+
+
+ /* touchscreen emulation: if no previous activity, emit touch event */
+ if (!td->activity) {
+ input_event(input, EV_KEY, BTN_TOUCH, 1);
+ td->activity = true;
+ }
+}
+
+
+static int egalax_event(struct hid_device *hid, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct egalax_data *td = hid_get_drvdata(hid);
+
+ if (hid->claimed & HID_CLAIMED_INPUT) {
+ struct input_dev *input = field->hidinput->input;
+
+ switch (usage->hid) {
+ case HID_DG_INRANGE:
+ case HID_DG_CONFIDENCE:
+ /* avoid interference from generic hidinput handling */
+ break;
+ case HID_DG_TIPSWITCH:
+ td->valid = value;
+ break;
+ case HID_DG_TIPPRESSURE:
+ td->z = value;
+ break;
+ case HID_DG_CONTACTID:
+ td->id = value;
+ break;
+ case HID_GD_X:
+ td->x = value;
+ break;
+ case HID_GD_Y:
+ td->y = value;
+ /* this is the last field in a finger */
+ egalax_filter_event(td, input);
+ break;
+ case HID_DG_CONTACTCOUNT:
+ /* touch emulation: this is the last field in a frame */
+ td->first = false;
+ break;
+
+ default:
+ /* fallback to the generic hidinput handling */
+ return 0;
+ }
+ }
+
+ /* we have handled the hidinput part, now remains hiddev */
+ if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
+ hid->hiddev_hid_event(hid, field, usage, value);
+
+ return 1;
+}
+
+static int egalax_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ struct egalax_data *td;
+ struct hid_report *report;
+
+ td = kmalloc(sizeof(struct egalax_data), GFP_KERNEL);
+ if (!td) {
+ dev_err(&hdev->dev, "cannot allocate eGalax data\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, td);
+
+ ret = hid_parse(hdev);
+ if (ret)
+ goto end;
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret)
+ goto end;
+
+ report = hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[5];
+ if (report) {
+ report->field[0]->value[0] = 2;
+ usbhid_submit_report(hdev, report, USB_DIR_OUT);
+ }
+
+end:
+ if (ret)
+ kfree(td);
+
+ return ret;
+}
+
+static void egalax_remove(struct hid_device *hdev)
+{
+ hid_hw_stop(hdev);
+ kfree(hid_get_drvdata(hdev));
+ hid_set_drvdata(hdev, NULL);
+}
+
+static const struct hid_device_id egalax_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, egalax_devices);
+
+static const struct hid_usage_id egalax_grabbed_usages[] = {
+ { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
+ { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+};
+
+static struct hid_driver egalax_driver = {
+ .name = "egalax-touch",
+ .id_table = egalax_devices,
+ .probe = egalax_probe,
+ .remove = egalax_remove,
+ .input_mapping = egalax_input_mapping,
+ .input_mapped = egalax_input_mapped,
+ .usage_table = egalax_grabbed_usages,
+ .event = egalax_event,
+};
+
+static int __init egalax_init(void)
+{
+ return hid_register_driver(&egalax_driver);
+}
+
+static void __exit egalax_exit(void)
+{
+ hid_unregister_driver(&egalax_driver);
+}
+
+module_init(egalax_init);
+module_exit(egalax_exit);
+
diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c
index 62416e6baec..3975e039c3d 100644
--- a/drivers/hid/hid-gyration.c
+++ b/drivers/hid/hid-gyration.c
@@ -73,6 +73,7 @@ static int gyration_event(struct hid_device *hdev, struct hid_field *field,
static const struct hid_device_id gyration_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ }
};
MODULE_DEVICE_TABLE(hid, gyration_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 09d27649a0f..6af77ed0b55 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -20,6 +20,7 @@
#define USB_VENDOR_ID_3M 0x0596
#define USB_DEVICE_ID_3M1968 0x0500
+#define USB_DEVICE_ID_3M2256 0x0502
#define USB_VENDOR_ID_A4TECH 0x09da
#define USB_DEVICE_ID_A4TECH_WCP32PU 0x0006
@@ -123,6 +124,13 @@
#define USB_VENDOR_ID_BERKSHIRE 0x0c98
#define USB_DEVICE_ID_BERKSHIRE_PCWD 0x1140
+#define USB_VENDOR_ID_BTC 0x046e
+#define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578
+
+#define USB_VENDOR_ID_CANDO 0x2087
+#define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01
+#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6 0x0b03
+
#define USB_VENDOR_ID_CH 0x068e
#define USB_DEVICE_ID_CH_PRO_PEDALS 0x00f2
#define USB_DEVICE_ID_CH_COMBATSTICK 0x00f4
@@ -148,6 +156,9 @@
#define USB_DEVICE_ID_CODEMERCS_IOW_FIRST 0x1500
#define USB_DEVICE_ID_CODEMERCS_IOW_LAST 0x15ff
+#define USB_VENDOR_ID_CREATIVELABS 0x041e
+#define USB_DEVICE_ID_PRODIKEYS_PCMIDI 0x2801
+
#define USB_VENDOR_ID_CYGNAL 0x10c4
#define USB_DEVICE_ID_CYGNAL_RADIO_SI470X 0x818a
@@ -171,6 +182,10 @@
#define USB_VENDOR_ID_DRAGONRISE 0x0079
+#define USB_VENDOR_ID_DWAV 0x0eef
+#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH 0x480d
+
#define USB_VENDOR_ID_ELO 0x04E7
#define USB_DEVICE_ID_ELO_TS2700 0x0020
@@ -267,6 +282,7 @@
#define USB_VENDOR_ID_GYRATION 0x0c16
#define USB_DEVICE_ID_GYRATION_REMOTE 0x0002
#define USB_DEVICE_ID_GYRATION_REMOTE_2 0x0003
+#define USB_DEVICE_ID_GYRATION_REMOTE_3 0x0008
#define USB_VENDOR_ID_HAPP 0x078b
#define USB_DEVICE_ID_UGCI_DRIVING 0x0010
@@ -342,6 +358,8 @@
#define USB_VENDOR_ID_MICROCHIP 0x04d8
#define USB_DEVICE_ID_PICKIT1 0x0032
#define USB_DEVICE_ID_PICKIT2 0x0033
+#define USB_DEVICE_ID_PICOLCD 0xc002
+#define USB_DEVICE_ID_PICOLCD_BOOTLOADER 0xf002
#define USB_VENDOR_ID_MICROSOFT 0x045e
#define USB_DEVICE_ID_SIDEWINDER_GV 0x003b
@@ -400,6 +418,9 @@
#define USB_VENDOR_ID_PRODIGE 0x05af
#define USB_DEVICE_ID_PRODIGE_CORDLESS 0x3062
+#define USB_VENDOR_ID_ROCCAT 0x1e7d
+#define USB_DEVICE_ID_ROCCAT_KONE 0x2ced
+
#define USB_VENDOR_ID_SAITEK 0x06a3
#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
@@ -409,6 +430,7 @@
#define USB_VENDOR_ID_SAMSUNG 0x0419
#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
+#define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600
#define USB_VENDOR_ID_SONY 0x054c
#define USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE 0x024b
@@ -457,6 +479,7 @@
#define USB_VENDOR_ID_WACOM 0x056a
#define USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH 0x81
+#define USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH 0xbd
#define USB_VENDOR_ID_WISEGROUP 0x0925
#define USB_DEVICE_ID_SMARTJOY_PLUS 0x0005
@@ -475,6 +498,9 @@
#define USB_VENDOR_ID_ZEROPLUS 0x0c12
+#define USB_VENDOR_ID_ZYDACRON 0x13EC
+#define USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL 0x0006
+
#define USB_VENDOR_ID_KYE 0x0458
#define USB_DEVICE_ID_KYE_ERGO_525V 0x0087
#define USB_DEVICE_ID_KYE_GPEN_560 0x5003
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 3677c9037a1..f6433d8050a 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -126,6 +126,9 @@ static int lg_wireless_mapping(struct hid_input *hi, struct hid_usage *usage,
case 0x1004: lg_map_key_clear(KEY_VIDEO); break;
case 0x1005: lg_map_key_clear(KEY_AUDIO); break;
case 0x100a: lg_map_key_clear(KEY_DOCUMENTS); break;
+ /* The following two entries are Playlist 1 and 2 on the MX3200 */
+ case 0x100f: lg_map_key_clear(KEY_FN_1); break;
+ case 0x1010: lg_map_key_clear(KEY_FN_2); break;
case 0x1011: lg_map_key_clear(KEY_PREVIOUSSONG); break;
case 0x1012: lg_map_key_clear(KEY_NEXTSONG); break;
case 0x1013: lg_map_key_clear(KEY_CAMERA); break;
@@ -137,6 +140,7 @@ static int lg_wireless_mapping(struct hid_input *hi, struct hid_usage *usage,
case 0x1019: lg_map_key_clear(KEY_PROG1); break;
case 0x101a: lg_map_key_clear(KEY_PROG2); break;
case 0x101b: lg_map_key_clear(KEY_PROG3); break;
+ case 0x101c: lg_map_key_clear(KEY_CYCLEWINDOWS); break;
case 0x101f: lg_map_key_clear(KEY_ZOOMIN); break;
case 0x1020: lg_map_key_clear(KEY_ZOOMOUT); break;
case 0x1021: lg_map_key_clear(KEY_ZOOMRESET); break;
@@ -147,6 +151,11 @@ static int lg_wireless_mapping(struct hid_input *hi, struct hid_usage *usage,
case 0x1029: lg_map_key_clear(KEY_SHUFFLE); break;
case 0x102a: lg_map_key_clear(KEY_BACK); break;
case 0x102b: lg_map_key_clear(KEY_CYCLEWINDOWS); break;
+ case 0x102d: lg_map_key_clear(KEY_WWW); break;
+ /* The following two are 'Start/answer call' and 'End/reject call'
+ on the MX3200 */
+ case 0x1031: lg_map_key_clear(KEY_OK); break;
+ case 0x1032: lg_map_key_clear(KEY_CANCEL); break;
case 0x1041: lg_map_key_clear(KEY_BATTERY); break;
case 0x1042: lg_map_key_clear(KEY_WORDPROCESSOR); break;
case 0x1043: lg_map_key_clear(KEY_SPREADSHEET); break;
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 0d471fc2ab8..f10d56a15f2 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -354,12 +354,15 @@ static int magicmouse_probe(struct hid_device *hdev,
goto err_free;
}
- ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_HIDINPUT);
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
dev_err(&hdev->dev, "magicmouse hw start failed\n");
goto err_free;
}
+ /* we are handling the input ourselves */
+ hidinput_disconnect(hdev);
+
report = hid_register_report(hdev, HID_INPUT_REPORT, TOUCH_REPORT_ID);
if (!report) {
dev_err(&hdev->dev, "unable to register touch report\n");
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index 4777bbfa1cc..b6b0caeeac5 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -24,6 +24,34 @@
#define NTRIG_DUPLICATE_USAGES 0x001
+static unsigned int min_width;
+module_param(min_width, uint, 0644);
+MODULE_PARM_DESC(min_width, "Minimum touch contact width to accept.");
+
+static unsigned int min_height;
+module_param(min_height, uint, 0644);
+MODULE_PARM_DESC(min_height, "Minimum touch contact height to accept.");
+
+static unsigned int activate_slack = 1;
+module_param(activate_slack, uint, 0644);
+MODULE_PARM_DESC(activate_slack, "Number of touch frames to ignore at "
+ "the start of touch input.");
+
+static unsigned int deactivate_slack = 4;
+module_param(deactivate_slack, uint, 0644);
+MODULE_PARM_DESC(deactivate_slack, "Number of empty frames to ignore before "
+ "deactivating touch.");
+
+static unsigned int activation_width = 64;
+module_param(activation_width, uint, 0644);
+MODULE_PARM_DESC(activation_width, "Width threshold to immediately start "
+ "processing touch events.");
+
+static unsigned int activation_height = 32;
+module_param(activation_height, uint, 0644);
+MODULE_PARM_DESC(activation_height, "Height threshold to immediately start "
+ "processing touch events.");
+
struct ntrig_data {
/* Incoming raw values for a single contact */
__u16 x, y, w, h;
@@ -37,6 +65,309 @@ struct ntrig_data {
__u8 mt_footer[4];
__u8 mt_foot_count;
+
+ /* The current activation state. */
+ __s8 act_state;
+
+ /* Empty frames to ignore before recognizing the end of activity */
+ __s8 deactivate_slack;
+
+ /* Frames to ignore before acknowledging the start of activity */
+ __s8 activate_slack;
+
+ /* Minimum size contact to accept */
+ __u16 min_width;
+ __u16 min_height;
+
+ /* Threshold to override activation slack */
+ __u16 activation_width;
+ __u16 activation_height;
+
+ __u16 sensor_logical_width;
+ __u16 sensor_logical_height;
+ __u16 sensor_physical_width;
+ __u16 sensor_physical_height;
+};
+
+
+static ssize_t show_phys_width(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->sensor_physical_width);
+}
+
+static DEVICE_ATTR(sensor_physical_width, S_IRUGO, show_phys_width, NULL);
+
+static ssize_t show_phys_height(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->sensor_physical_height);
+}
+
+static DEVICE_ATTR(sensor_physical_height, S_IRUGO, show_phys_height, NULL);
+
+static ssize_t show_log_width(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->sensor_logical_width);
+}
+
+static DEVICE_ATTR(sensor_logical_width, S_IRUGO, show_log_width, NULL);
+
+static ssize_t show_log_height(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->sensor_logical_height);
+}
+
+static DEVICE_ATTR(sensor_logical_height, S_IRUGO, show_log_height, NULL);
+
+static ssize_t show_min_width(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->min_width *
+ nd->sensor_physical_width /
+ nd->sensor_logical_width);
+}
+
+static ssize_t set_min_width(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val > nd->sensor_physical_width)
+ return -EINVAL;
+
+ nd->min_width = val * nd->sensor_logical_width /
+ nd->sensor_physical_width;
+
+ return count;
+}
+
+static DEVICE_ATTR(min_width, S_IWUSR | S_IRUGO, show_min_width, set_min_width);
+
+static ssize_t show_min_height(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->min_height *
+ nd->sensor_physical_height /
+ nd->sensor_logical_height);
+}
+
+static ssize_t set_min_height(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val > nd->sensor_physical_height)
+ return -EINVAL;
+
+ nd->min_height = val * nd->sensor_logical_height /
+ nd->sensor_physical_height;
+
+ return count;
+}
+
+static DEVICE_ATTR(min_height, S_IWUSR | S_IRUGO, show_min_height,
+ set_min_height);
+
+static ssize_t show_activate_slack(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->activate_slack);
+}
+
+static ssize_t set_activate_slack(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val > 0x7f)
+ return -EINVAL;
+
+ nd->activate_slack = val;
+
+ return count;
+}
+
+static DEVICE_ATTR(activate_slack, S_IWUSR | S_IRUGO, show_activate_slack,
+ set_activate_slack);
+
+static ssize_t show_activation_width(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->activation_width *
+ nd->sensor_physical_width /
+ nd->sensor_logical_width);
+}
+
+static ssize_t set_activation_width(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val > nd->sensor_physical_width)
+ return -EINVAL;
+
+ nd->activation_width = val * nd->sensor_logical_width /
+ nd->sensor_physical_width;
+
+ return count;
+}
+
+static DEVICE_ATTR(activation_width, S_IWUSR | S_IRUGO, show_activation_width,
+ set_activation_width);
+
+static ssize_t show_activation_height(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->activation_height *
+ nd->sensor_physical_height /
+ nd->sensor_logical_height);
+}
+
+static ssize_t set_activation_height(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val > nd->sensor_physical_height)
+ return -EINVAL;
+
+ nd->activation_height = val * nd->sensor_logical_height /
+ nd->sensor_physical_height;
+
+ return count;
+}
+
+static DEVICE_ATTR(activation_height, S_IWUSR | S_IRUGO,
+ show_activation_height, set_activation_height);
+
+static ssize_t show_deactivate_slack(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", -nd->deactivate_slack);
+}
+
+static ssize_t set_deactivate_slack(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ /*
+ * No more than 8 terminal frames have been observed so far
+ * and higher slack is highly likely to leave the single
+ * touch emulation stuck down.
+ */
+ if (val > 7)
+ return -EINVAL;
+
+ nd->deactivate_slack = -val;
+
+ return count;
+}
+
+static DEVICE_ATTR(deactivate_slack, S_IWUSR | S_IRUGO, show_deactivate_slack,
+ set_deactivate_slack);
+
+static struct attribute *sysfs_attrs[] = {
+ &dev_attr_sensor_physical_width.attr,
+ &dev_attr_sensor_physical_height.attr,
+ &dev_attr_sensor_logical_width.attr,
+ &dev_attr_sensor_logical_height.attr,
+ &dev_attr_min_height.attr,
+ &dev_attr_min_width.attr,
+ &dev_attr_activate_slack.attr,
+ &dev_attr_activation_width.attr,
+ &dev_attr_activation_height.attr,
+ &dev_attr_deactivate_slack.attr,
+ NULL
+};
+
+static struct attribute_group ntrig_attribute_group = {
+ .attrs = sysfs_attrs
};
/*
@@ -49,6 +380,8 @@ static int ntrig_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
/* No special mappings needed for the pen and single touch */
if (field->physical)
return 0;
@@ -62,6 +395,21 @@ static int ntrig_input_mapping(struct hid_device *hdev, struct hid_input *hi,
input_set_abs_params(hi->input, ABS_X,
field->logical_minimum,
field->logical_maximum, 0, 0);
+
+ if (!nd->sensor_logical_width) {
+ nd->sensor_logical_width =
+ field->logical_maximum -
+ field->logical_minimum;
+ nd->sensor_physical_width =
+ field->physical_maximum -
+ field->physical_minimum;
+ nd->activation_width = activation_width *
+ nd->sensor_logical_width /
+ nd->sensor_physical_width;
+ nd->min_width = min_width *
+ nd->sensor_logical_width /
+ nd->sensor_physical_width;
+ }
return 1;
case HID_GD_Y:
hid_map_usage(hi, usage, bit, max,
@@ -69,6 +417,21 @@ static int ntrig_input_mapping(struct hid_device *hdev, struct hid_input *hi,
input_set_abs_params(hi->input, ABS_Y,
field->logical_minimum,
field->logical_maximum, 0, 0);
+
+ if (!nd->sensor_logical_height) {
+ nd->sensor_logical_height =
+ field->logical_maximum -
+ field->logical_minimum;
+ nd->sensor_physical_height =
+ field->physical_maximum -
+ field->physical_minimum;
+ nd->activation_height = activation_height *
+ nd->sensor_logical_height /
+ nd->sensor_physical_height;
+ nd->min_height = min_height *
+ nd->sensor_logical_height /
+ nd->sensor_physical_height;
+ }
return 1;
}
return 0;
@@ -201,20 +564,68 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
if (nd->mt_foot_count != 4)
break;
- /* Pen activity signal, trigger end of touch. */
+ /* Pen activity signal. */
if (nd->mt_footer[2]) {
+ /*
+ * When the pen deactivates touch, we see a
+ * bogus frame with ContactCount > 0.
+ * We can
+ * save a bit of work by ensuring act_state < 0
+ * even if deactivation slack is turned off.
+ */
+ nd->act_state = deactivate_slack - 1;
nd->confidence = 0;
break;
}
- /* If the contact was invalid */
- if (!(nd->confidence && nd->mt_footer[0])
- || nd->w <= 250
- || nd->h <= 190) {
- nd->confidence = 0;
+ /*
+ * The first footer value indicates the presence of a
+ * finger.
+ */
+ if (nd->mt_footer[0]) {
+ /*
+ * We do not want to process contacts under
+ * the size threshold, but do not want to
+ * ignore them for activation state
+ */
+ if (nd->w < nd->min_width ||
+ nd->h < nd->min_height)
+ nd->confidence = 0;
+ } else
break;
+
+ if (nd->act_state > 0) {
+ /*
+ * Contact meets the activation size threshold
+ */
+ if (nd->w >= nd->activation_width &&
+ nd->h >= nd->activation_height) {
+ if (nd->id)
+ /*
+ * first contact, activate now
+ */
+ nd->act_state = 0;
+ else {
+ /*
+ * avoid corrupting this frame
+ * but ensure next frame will
+ * be active
+ */
+ nd->act_state = 1;
+ break;
+ }
+ } else
+ /*
+ * Defer adjusting the activation state
+ * until the end of the frame.
+ */
+ break;
}
+ /* Discarding this contact */
+ if (!nd->confidence)
+ break;
+
/* emit a normal (X, Y) for the first point only */
if (nd->id == 0) {
/*
@@ -227,8 +638,15 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
input_event(input, EV_ABS, ABS_X, nd->x);
input_event(input, EV_ABS, ABS_Y, nd->y);
}
+
+ /* Emit MT events */
input_event(input, EV_ABS, ABS_MT_POSITION_X, nd->x);
input_event(input, EV_ABS, ABS_MT_POSITION_Y, nd->y);
+
+ /*
+ * Translate from height and width to size
+ * and orientation.
+ */
if (nd->w > nd->h) {
input_event(input, EV_ABS,
ABS_MT_ORIENTATION, 1);
@@ -248,12 +666,88 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
break;
case HID_DG_CONTACTCOUNT: /* End of a multitouch group */
- if (!nd->reading_mt)
+ if (!nd->reading_mt) /* Just to be sure */
break;
nd->reading_mt = 0;
- if (nd->first_contact_touch) {
+
+ /*
+ * Activation state machine logic:
+ *
+ * Fundamental states:
+ * state > 0: Inactive
+ * state <= 0: Active
+ * state < -deactivate_slack:
+ * Pen termination of touch
+ *
+ * Specific values of interest
+ * state == activate_slack
+ * no valid input since the last reset
+ *
+ * state == 0
+ * general operational state
+ *
+ * state == -deactivate_slack
+ * read sufficient empty frames to accept
+ * the end of input and reset
+ */
+
+ if (nd->act_state > 0) { /* Currently inactive */
+ if (value)
+ /*
+ * Consider each live contact as
+ * evidence of intentional activity.
+ */
+ nd->act_state = (nd->act_state > value)
+ ? nd->act_state - value
+ : 0;
+ else
+ /*
+ * Empty frame before we hit the
+ * activity threshold, reset.
+ */
+ nd->act_state = nd->activate_slack;
+
+ /*
+ * Entered this block inactive and no
+ * coordinates sent this frame, so hold off
+ * on button state.
+ */
+ break;
+ } else { /* Currently active */
+ if (value && nd->act_state >=
+ nd->deactivate_slack)
+ /*
+ * Live point: clear accumulated
+ * deactivation count.
+ */
+ nd->act_state = 0;
+ else if (nd->act_state <= nd->deactivate_slack)
+ /*
+ * We've consumed the deactivation
+ * slack, time to deactivate and reset.
+ */
+ nd->act_state =
+ nd->activate_slack;
+ else { /* Move towards deactivation */
+ nd->act_state--;
+ break;
+ }
+ }
+
+ if (nd->first_contact_touch && nd->act_state <= 0) {
+ /*
+ * Check to see if we're ready to start
+ * emitting touch events.
+ *
+ * Note: activation slack will decrease over
+ * the course of the frame, and it will be
+ * inconsistent from the start to the end of
+ * the frame. However if the frame starts
+ * with slack, first_contact_touch will still
+ * be 0 and we will not get to this point.
+ */
input_report_key(input, BTN_TOOL_DOUBLETAP, 1);
input_report_key(input, BTN_TOUCH, 1);
} else {
@@ -263,7 +757,7 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
break;
default:
- /* fallback to the generic hidinput handling */
+ /* fall-back to the generic hidinput handling */
return 0;
}
}
@@ -293,6 +787,16 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
nd->reading_mt = 0;
+ nd->min_width = 0;
+ nd->min_height = 0;
+ nd->activate_slack = activate_slack;
+ nd->act_state = activate_slack;
+ nd->deactivate_slack = -deactivate_slack;
+ nd->sensor_logical_width = 0;
+ nd->sensor_logical_height = 0;
+ nd->sensor_physical_width = 0;
+ nd->sensor_physical_height = 0;
+
hid_set_drvdata(hdev, nd);
ret = hid_parse(hdev);
@@ -344,6 +848,8 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (report)
usbhid_submit_report(hdev, report, USB_DIR_OUT);
+ ret = sysfs_create_group(&hdev->dev.kobj,
+ &ntrig_attribute_group);
return 0;
err_free:
@@ -353,6 +859,8 @@ err_free:
static void ntrig_remove(struct hid_device *hdev)
{
+ sysfs_remove_group(&hdev->dev.kobj,
+ &ntrig_attribute_group);
hid_hw_stop(hdev);
kfree(hid_get_drvdata(hdev));
}
diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c
new file mode 100644
index 00000000000..7aabf65c48e
--- /dev/null
+++ b/drivers/hid/hid-picolcd.c
@@ -0,0 +1,2631 @@
+/***************************************************************************
+ * Copyright (C) 2010 by Bruno Prémont <bonbons@linux-vserver.org> *
+ * *
+ * Based on Logitech G13 driver (v0.4) *
+ * Copyright (C) 2009 by Rick L. Vinyard, Jr. <rvinyard@cs.nmsu.edu> *
+ * *
+ * This program is free software: you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published by *
+ * the Free Software Foundation, version 2 of the License. *
+ * *
+ * This driver is distributed in the hope that it will be useful, but *
+ * WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
+ * General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this software. If not see <http://www.gnu.org/licenses/>. *
+ ***************************************************************************/
+
+#include <linux/hid.h>
+#include <linux/hid-debug.h>
+#include <linux/input.h>
+#include "hid-ids.h"
+#include "usbhid/usbhid.h"
+#include <linux/usb.h>
+
+#include <linux/fb.h>
+#include <linux/vmalloc.h>
+#include <linux/backlight.h>
+#include <linux/lcd.h>
+
+#include <linux/leds.h>
+
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+
+#include <linux/completion.h>
+#include <linux/uaccess.h>
+
+#define PICOLCD_NAME "PicoLCD (graphic)"
+
+/* Report numbers */
+#define REPORT_ERROR_CODE 0x10 /* LCD: IN[16] */
+#define ERR_SUCCESS 0x00
+#define ERR_PARAMETER_MISSING 0x01
+#define ERR_DATA_MISSING 0x02
+#define ERR_BLOCK_READ_ONLY 0x03
+#define ERR_BLOCK_NOT_ERASABLE 0x04
+#define ERR_BLOCK_TOO_BIG 0x05
+#define ERR_SECTION_OVERFLOW 0x06
+#define ERR_INVALID_CMD_LEN 0x07
+#define ERR_INVALID_DATA_LEN 0x08
+#define REPORT_KEY_STATE 0x11 /* LCD: IN[2] */
+#define REPORT_IR_DATA 0x21 /* LCD: IN[63] */
+#define REPORT_EE_DATA 0x32 /* LCD: IN[63] */
+#define REPORT_MEMORY 0x41 /* LCD: IN[63] */
+#define REPORT_LED_STATE 0x81 /* LCD: OUT[1] */
+#define REPORT_BRIGHTNESS 0x91 /* LCD: OUT[1] */
+#define REPORT_CONTRAST 0x92 /* LCD: OUT[1] */
+#define REPORT_RESET 0x93 /* LCD: OUT[2] */
+#define REPORT_LCD_CMD 0x94 /* LCD: OUT[63] */
+#define REPORT_LCD_DATA 0x95 /* LCD: OUT[63] */
+#define REPORT_LCD_CMD_DATA 0x96 /* LCD: OUT[63] */
+#define REPORT_EE_READ 0xa3 /* LCD: OUT[63] */
+#define REPORT_EE_WRITE 0xa4 /* LCD: OUT[63] */
+#define REPORT_ERASE_MEMORY 0xb2 /* LCD: OUT[2] */
+#define REPORT_READ_MEMORY 0xb3 /* LCD: OUT[3] */
+#define REPORT_WRITE_MEMORY 0xb4 /* LCD: OUT[63] */
+#define REPORT_SPLASH_RESTART 0xc1 /* LCD: OUT[1] */
+#define REPORT_EXIT_KEYBOARD 0xef /* LCD: OUT[2] */
+#define REPORT_VERSION 0xf1 /* LCD: IN[2],OUT[1] Bootloader: IN[2],OUT[1] */
+#define REPORT_BL_ERASE_MEMORY 0xf2 /* Bootloader: IN[36],OUT[4] */
+#define REPORT_BL_READ_MEMORY 0xf3 /* Bootloader: IN[36],OUT[4] */
+#define REPORT_BL_WRITE_MEMORY 0xf4 /* Bootloader: IN[36],OUT[36] */
+#define REPORT_DEVID 0xf5 /* LCD: IN[5], OUT[1] Bootloader: IN[5],OUT[1] */
+#define REPORT_SPLASH_SIZE 0xf6 /* LCD: IN[4], OUT[1] */
+#define REPORT_HOOK_VERSION 0xf7 /* LCD: IN[2], OUT[1] */
+#define REPORT_EXIT_FLASHER 0xff /* Bootloader: OUT[2] */
+
+#ifdef CONFIG_HID_PICOLCD_FB
+/* Framebuffer
+ *
+ * The PicoLCD use a Topway LCD module of 256x64 pixel
+ * This display area is tiled over 4 controllers with 8 tiles
+ * each. Each tile has 8x64 pixel, each data byte representing
+ * a 1-bit wide vertical line of the tile.
+ *
+ * The display can be updated at a tile granularity.
+ *
+ * Chip 1 Chip 2 Chip 3 Chip 4
+ * +----------------+----------------+----------------+----------------+
+ * | Tile 1 | Tile 1 | Tile 1 | Tile 1 |
+ * +----------------+----------------+----------------+----------------+
+ * | Tile 2 | Tile 2 | Tile 2 | Tile 2 |
+ * +----------------+----------------+----------------+----------------+
+ * ...
+ * +----------------+----------------+----------------+----------------+
+ * | Tile 8 | Tile 8 | Tile 8 | Tile 8 |
+ * +----------------+----------------+----------------+----------------+
+ */
+#define PICOLCDFB_NAME "picolcdfb"
+#define PICOLCDFB_WIDTH (256)
+#define PICOLCDFB_HEIGHT (64)
+#define PICOLCDFB_SIZE (PICOLCDFB_WIDTH * PICOLCDFB_HEIGHT / 8)
+
+#define PICOLCDFB_UPDATE_RATE_LIMIT 10
+#define PICOLCDFB_UPDATE_RATE_DEFAULT 2
+
+/* Framebuffer visual structures */
+static const struct fb_fix_screeninfo picolcdfb_fix = {
+ .id = PICOLCDFB_NAME,
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_MONO01,
+ .xpanstep = 0,
+ .ypanstep = 0,
+ .ywrapstep = 0,
+ .line_length = PICOLCDFB_WIDTH / 8,
+ .accel = FB_ACCEL_NONE,
+};
+
+static const struct fb_var_screeninfo picolcdfb_var = {
+ .xres = PICOLCDFB_WIDTH,
+ .yres = PICOLCDFB_HEIGHT,
+ .xres_virtual = PICOLCDFB_WIDTH,
+ .yres_virtual = PICOLCDFB_HEIGHT,
+ .width = 103,
+ .height = 26,
+ .bits_per_pixel = 1,
+ .grayscale = 1,
+};
+#endif /* CONFIG_HID_PICOLCD_FB */
+
+/* Input device
+ *
+ * The PicoLCD has an IR receiver header, a built-in keypad with 5 keys
+ * and header for 4x4 key matrix. The built-in keys are part of the matrix.
+ */
+static const unsigned short def_keymap[] = {
+ KEY_RESERVED, /* none */
+ KEY_BACK, /* col 4 + row 1 */
+ KEY_HOMEPAGE, /* col 3 + row 1 */
+ KEY_RESERVED, /* col 2 + row 1 */
+ KEY_RESERVED, /* col 1 + row 1 */
+ KEY_SCROLLUP, /* col 4 + row 2 */
+ KEY_OK, /* col 3 + row 2 */
+ KEY_SCROLLDOWN, /* col 2 + row 2 */
+ KEY_RESERVED, /* col 1 + row 2 */
+ KEY_RESERVED, /* col 4 + row 3 */
+ KEY_RESERVED, /* col 3 + row 3 */
+ KEY_RESERVED, /* col 2 + row 3 */
+ KEY_RESERVED, /* col 1 + row 3 */
+ KEY_RESERVED, /* col 4 + row 4 */
+ KEY_RESERVED, /* col 3 + row 4 */
+ KEY_RESERVED, /* col 2 + row 4 */
+ KEY_RESERVED, /* col 1 + row 4 */
+};
+#define PICOLCD_KEYS ARRAY_SIZE(def_keymap)
+
+/* Description of in-progress IO operation, used for operations
+ * that trigger response from device */
+struct picolcd_pending {
+ struct hid_report *out_report;
+ struct hid_report *in_report;
+ struct completion ready;
+ int raw_size;
+ u8 raw_data[64];
+};
+
+/* Per device data structure */
+struct picolcd_data {
+ struct hid_device *hdev;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debug_reset;
+ struct dentry *debug_eeprom;
+ struct dentry *debug_flash;
+ struct mutex mutex_flash;
+ int addr_sz;
+#endif
+ u8 version[2];
+ unsigned short opmode_delay;
+ /* input stuff */
+ u8 pressed_keys[2];
+ struct input_dev *input_keys;
+ struct input_dev *input_cir;
+ unsigned short keycode[PICOLCD_KEYS];
+
+#ifdef CONFIG_HID_PICOLCD_FB
+ /* Framebuffer stuff */
+ u8 fb_update_rate;
+ u8 fb_bpp;
+ u8 *fb_vbitmap; /* local copy of what was sent to PicoLCD */
+ u8 *fb_bitmap; /* framebuffer */
+ struct fb_info *fb_info;
+ struct fb_deferred_io fb_defio;
+#endif /* CONFIG_HID_PICOLCD_FB */
+#ifdef CONFIG_HID_PICOLCD_LCD
+ struct lcd_device *lcd;
+ u8 lcd_contrast;
+#endif /* CONFIG_HID_PICOLCD_LCD */
+#ifdef CONFIG_HID_PICOLCD_BACKLIGHT
+ struct backlight_device *backlight;
+ u8 lcd_brightness;
+ u8 lcd_power;
+#endif /* CONFIG_HID_PICOLCD_BACKLIGHT */
+#ifdef CONFIG_HID_PICOLCD_LEDS
+ /* LED stuff */
+ u8 led_state;
+ struct led_classdev *led[8];
+#endif /* CONFIG_HID_PICOLCD_LEDS */
+
+ /* Housekeeping stuff */
+ spinlock_t lock;
+ struct mutex mutex;
+ struct picolcd_pending *pending;
+ int status;
+#define PICOLCD_BOOTLOADER 1
+#define PICOLCD_FAILED 2
+#define PICOLCD_READY_FB 4
+};
+
+
+/* Find a given report */
+#define picolcd_in_report(id, dev) picolcd_report(id, dev, HID_INPUT_REPORT)
+#define picolcd_out_report(id, dev) picolcd_report(id, dev, HID_OUTPUT_REPORT)
+
+static struct hid_report *picolcd_report(int id, struct hid_device *hdev, int dir)
+{
+ struct list_head *feature_report_list = &hdev->report_enum[dir].report_list;
+ struct hid_report *report = NULL;
+
+ list_for_each_entry(report, feature_report_list, list) {
+ if (report->id == id)
+ return report;
+ }
+ dev_warn(&hdev->dev, "No report with id 0x%x found\n", id);
+ return NULL;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void picolcd_debug_out_report(struct picolcd_data *data,
+ struct hid_device *hdev, struct hid_report *report);
+#define usbhid_submit_report(a, b, c) \
+ do { \
+ picolcd_debug_out_report(hid_get_drvdata(a), a, b); \
+ usbhid_submit_report(a, b, c); \
+ } while (0)
+#endif
+
+/* Submit a report and wait for a reply from device - if device fades away
+ * or does not respond in time, return NULL */
+static struct picolcd_pending *picolcd_send_and_wait(struct hid_device *hdev,
+ int report_id, const u8 *raw_data, int size)
+{
+ struct picolcd_data *data = hid_get_drvdata(hdev);
+ struct picolcd_pending *work;
+ struct hid_report *report = picolcd_out_report(report_id, hdev);
+ unsigned long flags;
+ int i, j, k;
+
+ if (!report || !data)
+ return NULL;
+ if (data->status & PICOLCD_FAILED)
+ return NULL;
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
+ return NULL;
+
+ init_completion(&work->ready);
+ work->out_report = report;
+ work->in_report = NULL;
+ work->raw_size = 0;
+
+ mutex_lock(&data->mutex);
+ spin_lock_irqsave(&data->lock, flags);
+ for (i = k = 0; i < report->maxfield; i++)
+ for (j = 0; j < report->field[i]->report_count; j++) {
+ hid_set_field(report->field[i], j, k < size ? raw_data[k] : 0);
+ k++;
+ }
+ data->pending = work;
+ usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+ wait_for_completion_interruptible_timeout(&work->ready, HZ*2);
+ spin_lock_irqsave(&data->lock, flags);
+ data->pending = NULL;
+ spin_unlock_irqrestore(&data->lock, flags);
+ mutex_unlock(&data->mutex);
+ return work;
+}
+
+#ifdef CONFIG_HID_PICOLCD_FB
+/* Send a given tile to PicoLCD */
+static int picolcd_fb_send_tile(struct hid_device *hdev, int chip, int tile)
+{
+ struct picolcd_data *data = hid_get_drvdata(hdev);
+ struct hid_report *report1 = picolcd_out_report(REPORT_LCD_CMD_DATA, hdev);
+ struct hid_report *report2 = picolcd_out_report(REPORT_LCD_DATA, hdev);
+ unsigned long flags;
+ u8 *tdata;
+ int i;
+
+ if (!report1 || report1->maxfield != 1 || !report2 || report2->maxfield != 1)
+ return -ENODEV;
+
+ spin_lock_irqsave(&data->lock, flags);
+ hid_set_field(report1->field[0], 0, chip << 2);
+ hid_set_field(report1->field[0], 1, 0x02);
+ hid_set_field(report1->field[0], 2, 0x00);
+ hid_set_field(report1->field[0], 3, 0x00);
+ hid_set_field(report1->field[0], 4, 0xb8 | tile);
+ hid_set_field(report1->field[0], 5, 0x00);
+ hid_set_field(report1->field[0], 6, 0x00);
+ hid_set_field(report1->field[0], 7, 0x40);
+ hid_set_field(report1->field[0], 8, 0x00);
+ hid_set_field(report1->field[0], 9, 0x00);
+ hid_set_field(report1->field[0], 10, 32);
+
+ hid_set_field(report2->field[0], 0, (chip << 2) | 0x01);
+ hid_set_field(report2->field[0], 1, 0x00);
+ hid_set_field(report2->field[0], 2, 0x00);
+ hid_set_field(report2->field[0], 3, 32);
+
+ tdata = data->fb_vbitmap + (tile * 4 + chip) * 64;
+ for (i = 0; i < 64; i++)
+ if (i < 32)
+ hid_set_field(report1->field[0], 11 + i, tdata[i]);
+ else
+ hid_set_field(report2->field[0], 4 + i - 32, tdata[i]);
+
+ usbhid_submit_report(data->hdev, report1, USB_DIR_OUT);
+ usbhid_submit_report(data->hdev, report2, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+ return 0;
+}
+
+/* Translate a single tile*/
+static int picolcd_fb_update_tile(u8 *vbitmap, const u8 *bitmap, int bpp,
+ int chip, int tile)
+{
+ int i, b, changed = 0;
+ u8 tdata[64];
+ u8 *vdata = vbitmap + (tile * 4 + chip) * 64;
+
+ if (bpp == 1) {
+ for (b = 7; b >= 0; b--) {
+ const u8 *bdata = bitmap + tile * 256 + chip * 8 + b * 32;
+ for (i = 0; i < 64; i++) {
+ tdata[i] <<= 1;
+ tdata[i] |= (bdata[i/8] >> (7 - i % 8)) & 0x01;
+ }
+ }
+ } else if (bpp == 8) {
+ for (b = 7; b >= 0; b--) {
+ const u8 *bdata = bitmap + (tile * 256 + chip * 8 + b * 32) * 8;
+ for (i = 0; i < 64; i++) {
+ tdata[i] <<= 1;
+ tdata[i] |= (bdata[i] & 0x80) ? 0x01 : 0x00;
+ }
+ }
+ } else {
+ /* Oops, we should never get here! */
+ WARN_ON(1);
+ return 0;
+ }
+
+ for (i = 0; i < 64; i++)
+ if (tdata[i] != vdata[i]) {
+ changed = 1;
+ vdata[i] = tdata[i];
+ }
+ return changed;
+}
+
+/* Reconfigure LCD display */
+static int picolcd_fb_reset(struct picolcd_data *data, int clear)
+{
+ struct hid_report *report = picolcd_out_report(REPORT_LCD_CMD, data->hdev);
+ int i, j;
+ unsigned long flags;
+ static const u8 mapcmd[8] = { 0x00, 0x02, 0x00, 0x64, 0x3f, 0x00, 0x64, 0xc0 };
+
+ if (!report || report->maxfield != 1)
+ return -ENODEV;
+
+ spin_lock_irqsave(&data->lock, flags);
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < report->field[0]->maxusage; j++)
+ if (j == 0)
+ hid_set_field(report->field[0], j, i << 2);
+ else if (j < sizeof(mapcmd))
+ hid_set_field(report->field[0], j, mapcmd[j]);
+ else
+ hid_set_field(report->field[0], j, 0);
+ usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+ }
+
+ data->status |= PICOLCD_READY_FB;
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ if (data->fb_bitmap) {
+ if (clear) {
+ memset(data->fb_vbitmap, 0xff, PICOLCDFB_SIZE);
+ memset(data->fb_bitmap, 0, PICOLCDFB_SIZE*data->fb_bpp);
+ } else {
+ /* invert 1 byte in each tile to force resend */
+ for (i = 0; i < PICOLCDFB_SIZE; i += 64)
+ data->fb_vbitmap[i] = ~data->fb_vbitmap[i];
+ }
+ }
+
+ /* schedule first output of framebuffer */
+ if (data->fb_info)
+ schedule_delayed_work(&data->fb_info->deferred_work, 0);
+
+ return 0;
+}
+
+/* Update fb_vbitmap from the screen_base and send changed tiles to device */
+static void picolcd_fb_update(struct picolcd_data *data)
+{
+ int chip, tile, n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&data->lock, flags);
+ if (!(data->status & PICOLCD_READY_FB)) {
+ spin_unlock_irqrestore(&data->lock, flags);
+ picolcd_fb_reset(data, 0);
+ } else {
+ spin_unlock_irqrestore(&data->lock, flags);
+ }
+
+ /*
+ * Translate the framebuffer into the format needed by the PicoLCD.
+ * See display layout above.
+ * Do this one tile after the other and push those tiles that changed.
+ *
+ * Wait for our IO to complete as otherwise we might flood the queue!
+ */
+ n = 0;
+ for (chip = 0; chip < 4; chip++)
+ for (tile = 0; tile < 8; tile++)
+ if (picolcd_fb_update_tile(data->fb_vbitmap,
+ data->fb_bitmap, data->fb_bpp, chip, tile)) {
+ n += 2;
+ if (n >= HID_OUTPUT_FIFO_SIZE / 2) {
+ usbhid_wait_io(data->hdev);
+ n = 0;
+ }
+ picolcd_fb_send_tile(data->hdev, chip, tile);
+ }
+ if (n)
+ usbhid_wait_io(data->hdev);
+}
+
+/* Stub to call the system default and update the image on the picoLCD */
+static void picolcd_fb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ if (!info->par)
+ return;
+ sys_fillrect(info, rect);
+
+ schedule_delayed_work(&info->deferred_work, 0);
+}
+
+/* Stub to call the system default and update the image on the picoLCD */
+static void picolcd_fb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ if (!info->par)
+ return;
+ sys_copyarea(info, area);
+
+ schedule_delayed_work(&info->deferred_work, 0);
+}
+
+/* Stub to call the system default and update the image on the picoLCD */
+static void picolcd_fb_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ if (!info->par)
+ return;
+ sys_imageblit(info, image);
+
+ schedule_delayed_work(&info->deferred_work, 0);
+}
+
+/*
+ * this is the slow path from userspace. they can seek and write to
+ * the fb. it's inefficient to do anything less than a full screen draw
+ */
+static ssize_t picolcd_fb_write(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t ret;
+ if (!info->par)
+ return -ENODEV;
+ ret = fb_sys_write(info, buf, count, ppos);
+ if (ret >= 0)
+ schedule_delayed_work(&info->deferred_work, 0);
+ return ret;
+}
+
+static int picolcd_fb_blank(int blank, struct fb_info *info)
+{
+ if (!info->par)
+ return -ENODEV;
+ /* We let fb notification do this for us via lcd/backlight device */
+ return 0;
+}
+
+static void picolcd_fb_destroy(struct fb_info *info)
+{
+ struct picolcd_data *data = info->par;
+ info->par = NULL;
+ if (data)
+ data->fb_info = NULL;
+ fb_deferred_io_cleanup(info);
+ framebuffer_release(info);
+}
+
+static int picolcd_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ __u32 bpp = var->bits_per_pixel;
+ __u32 activate = var->activate;
+
+ /* only allow 1/8 bit depth (8-bit is grayscale) */
+ *var = picolcdfb_var;
+ var->activate = activate;
+ if (bpp >= 8)
+ var->bits_per_pixel = 8;
+ else
+ var->bits_per_pixel = 1;
+ return 0;
+}
+
+static int picolcd_set_par(struct fb_info *info)
+{
+ struct picolcd_data *data = info->par;
+ u8 *o_fb, *n_fb;
+ if (info->var.bits_per_pixel == data->fb_bpp)
+ return 0;
+ /* switch between 1/8 bit depths */
+ if (info->var.bits_per_pixel != 1 && info->var.bits_per_pixel != 8)
+ return -EINVAL;
+
+ o_fb = data->fb_bitmap;
+ n_fb = vmalloc(PICOLCDFB_SIZE*info->var.bits_per_pixel);
+ if (!n_fb)
+ return -ENOMEM;
+
+ fb_deferred_io_cleanup(info);
+ /* translate FB content to new bits-per-pixel */
+ if (info->var.bits_per_pixel == 1) {
+ int i, b;
+ for (i = 0; i < PICOLCDFB_SIZE; i++) {
+ u8 p = 0;
+ for (b = 0; b < 8; b++) {
+ p <<= 1;
+ p |= o_fb[i*8+b] ? 0x01 : 0x00;
+ }
+ }
+ info->fix.visual = FB_VISUAL_MONO01;
+ info->fix.line_length = PICOLCDFB_WIDTH / 8;
+ } else {
+ int i;
+ for (i = 0; i < PICOLCDFB_SIZE * 8; i++)
+ n_fb[i] = o_fb[i/8] & (0x01 << (7 - i % 8)) ? 0xff : 0x00;
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ info->fix.line_length = PICOLCDFB_WIDTH;
+ }
+
+ data->fb_bitmap = n_fb;
+ data->fb_bpp = info->var.bits_per_pixel;
+ info->screen_base = (char __force __iomem *)n_fb;
+ info->fix.smem_start = (unsigned long)n_fb;
+ info->fix.smem_len = PICOLCDFB_SIZE*data->fb_bpp;
+ fb_deferred_io_init(info);
+ vfree(o_fb);
+ return 0;
+}
+
+/* Note this can't be const because of struct fb_info definition */
+static struct fb_ops picolcdfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_destroy = picolcd_fb_destroy,
+ .fb_read = fb_sys_read,
+ .fb_write = picolcd_fb_write,
+ .fb_blank = picolcd_fb_blank,
+ .fb_fillrect = picolcd_fb_fillrect,
+ .fb_copyarea = picolcd_fb_copyarea,
+ .fb_imageblit = picolcd_fb_imageblit,
+ .fb_check_var = picolcd_fb_check_var,
+ .fb_set_par = picolcd_set_par,
+};
+
+
+/* Callback from deferred IO workqueue */
+static void picolcd_fb_deferred_io(struct fb_info *info, struct list_head *pagelist)
+{
+ picolcd_fb_update(info->par);
+}
+
+static const struct fb_deferred_io picolcd_fb_defio = {
+ .delay = HZ / PICOLCDFB_UPDATE_RATE_DEFAULT,
+ .deferred_io = picolcd_fb_deferred_io,
+};
+
+
+/*
+ * The "fb_update_rate" sysfs attribute
+ */
+static ssize_t picolcd_fb_update_rate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct picolcd_data *data = dev_get_drvdata(dev);
+ unsigned i, fb_update_rate = data->fb_update_rate;
+ size_t ret = 0;
+
+ for (i = 1; i <= PICOLCDFB_UPDATE_RATE_LIMIT; i++)
+ if (ret >= PAGE_SIZE)
+ break;
+ else if (i == fb_update_rate)
+ ret += snprintf(buf+ret, PAGE_SIZE-ret, "[%u] ", i);
+ else
+ ret += snprintf(buf+ret, PAGE_SIZE-ret, "%u ", i);
+ if (ret > 0)
+ buf[min(ret, (size_t)PAGE_SIZE)-1] = '\n';
+ return ret;
+}
+
+static ssize_t picolcd_fb_update_rate_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct picolcd_data *data = dev_get_drvdata(dev);
+ int i;
+ unsigned u;
+
+ if (count < 1 || count > 10)
+ return -EINVAL;
+
+ i = sscanf(buf, "%u", &u);
+ if (i != 1)
+ return -EINVAL;
+
+ if (u > PICOLCDFB_UPDATE_RATE_LIMIT)
+ return -ERANGE;
+ else if (u == 0)
+ u = PICOLCDFB_UPDATE_RATE_DEFAULT;
+
+ data->fb_update_rate = u;
+ data->fb_defio.delay = HZ / data->fb_update_rate;
+ return count;
+}
+
+static DEVICE_ATTR(fb_update_rate, 0666, picolcd_fb_update_rate_show,
+ picolcd_fb_update_rate_store);
+
+/* initialize Framebuffer device */
+static int picolcd_init_framebuffer(struct picolcd_data *data)
+{
+ struct device *dev = &data->hdev->dev;
+ struct fb_info *info = NULL;
+ int error = -ENOMEM;
+ u8 *fb_vbitmap = NULL;
+ u8 *fb_bitmap = NULL;
+
+ fb_bitmap = vmalloc(PICOLCDFB_SIZE*picolcdfb_var.bits_per_pixel);
+ if (fb_bitmap == NULL) {
+ dev_err(dev, "can't get a free page for framebuffer\n");
+ goto err_nomem;
+ }
+
+ fb_vbitmap = kmalloc(PICOLCDFB_SIZE, GFP_KERNEL);
+ if (fb_vbitmap == NULL) {
+ dev_err(dev, "can't alloc vbitmap image buffer\n");
+ goto err_nomem;
+ }
+
+ data->fb_update_rate = PICOLCDFB_UPDATE_RATE_DEFAULT;
+ data->fb_defio = picolcd_fb_defio;
+ info = framebuffer_alloc(0, dev);
+ if (info == NULL) {
+ dev_err(dev, "failed to allocate a framebuffer\n");
+ goto err_nomem;
+ }
+
+ info->fbdefio = &data->fb_defio;
+ info->screen_base = (char __force __iomem *)fb_bitmap;
+ info->fbops = &picolcdfb_ops;
+ info->var = picolcdfb_var;
+ info->fix = picolcdfb_fix;
+ info->fix.smem_len = PICOLCDFB_SIZE;
+ info->fix.smem_start = (unsigned long)fb_bitmap;
+ info->par = data;
+ info->flags = FBINFO_FLAG_DEFAULT;
+
+ data->fb_vbitmap = fb_vbitmap;
+ data->fb_bitmap = fb_bitmap;
+ data->fb_bpp = picolcdfb_var.bits_per_pixel;
+ error = picolcd_fb_reset(data, 1);
+ if (error) {
+ dev_err(dev, "failed to configure display\n");
+ goto err_cleanup;
+ }
+ error = device_create_file(dev, &dev_attr_fb_update_rate);
+ if (error) {
+ dev_err(dev, "failed to create sysfs attributes\n");
+ goto err_cleanup;
+ }
+ data->fb_info = info;
+ error = register_framebuffer(info);
+ if (error) {
+ dev_err(dev, "failed to register framebuffer\n");
+ goto err_sysfs;
+ }
+ fb_deferred_io_init(info);
+ /* schedule first output of framebuffer */
+ schedule_delayed_work(&info->deferred_work, 0);
+ return 0;
+
+err_sysfs:
+ device_remove_file(dev, &dev_attr_fb_update_rate);
+err_cleanup:
+ data->fb_vbitmap = NULL;
+ data->fb_bitmap = NULL;
+ data->fb_bpp = 0;
+ data->fb_info = NULL;
+
+err_nomem:
+ framebuffer_release(info);
+ vfree(fb_bitmap);
+ kfree(fb_vbitmap);
+ return error;
+}
+
+static void picolcd_exit_framebuffer(struct picolcd_data *data)
+{
+ struct fb_info *info = data->fb_info;
+ u8 *fb_vbitmap = data->fb_vbitmap;
+ u8 *fb_bitmap = data->fb_bitmap;
+
+ if (!info)
+ return;
+
+ data->fb_vbitmap = NULL;
+ data->fb_bitmap = NULL;
+ data->fb_bpp = 0;
+ data->fb_info = NULL;
+ device_remove_file(&data->hdev->dev, &dev_attr_fb_update_rate);
+ fb_deferred_io_cleanup(info);
+ unregister_framebuffer(info);
+ vfree(fb_bitmap);
+ kfree(fb_vbitmap);
+}
+
+#define picolcd_fbinfo(d) ((d)->fb_info)
+#else
+static inline int picolcd_fb_reset(struct picolcd_data *data, int clear)
+{
+ return 0;
+}
+static inline int picolcd_init_framebuffer(struct picolcd_data *data)
+{
+ return 0;
+}
+static inline void picolcd_exit_framebuffer(struct picolcd_data *data)
+{
+}
+#define picolcd_fbinfo(d) NULL
+#endif /* CONFIG_HID_PICOLCD_FB */
+
+#ifdef CONFIG_HID_PICOLCD_BACKLIGHT
+/*
+ * backlight class device
+ */
+static int picolcd_get_brightness(struct backlight_device *bdev)
+{
+ struct picolcd_data *data = bl_get_data(bdev);
+ return data->lcd_brightness;
+}
+
+static int picolcd_set_brightness(struct backlight_device *bdev)
+{
+ struct picolcd_data *data = bl_get_data(bdev);
+ struct hid_report *report = picolcd_out_report(REPORT_BRIGHTNESS, data->hdev);
+ unsigned long flags;
+
+ if (!report || report->maxfield != 1 || report->field[0]->report_count != 1)
+ return -ENODEV;
+
+ data->lcd_brightness = bdev->props.brightness & 0x0ff;
+ data->lcd_power = bdev->props.power;
+ spin_lock_irqsave(&data->lock, flags);
+ hid_set_field(report->field[0], 0, data->lcd_power == FB_BLANK_UNBLANK ? data->lcd_brightness : 0);
+ usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+ return 0;
+}
+
+static int picolcd_check_bl_fb(struct backlight_device *bdev, struct fb_info *fb)
+{
+ return fb && fb == picolcd_fbinfo((struct picolcd_data *)bl_get_data(bdev));
+}
+
+static const struct backlight_ops picolcd_blops = {
+ .update_status = picolcd_set_brightness,
+ .get_brightness = picolcd_get_brightness,
+ .check_fb = picolcd_check_bl_fb,
+};
+
+static int picolcd_init_backlight(struct picolcd_data *data, struct hid_report *report)
+{
+ struct device *dev = &data->hdev->dev;
+ struct backlight_device *bdev;
+ struct backlight_properties props;
+ if (!report)
+ return -ENODEV;
+ if (report->maxfield != 1 || report->field[0]->report_count != 1 ||
+ report->field[0]->report_size != 8) {
+ dev_err(dev, "unsupported BRIGHTNESS report");
+ return -EINVAL;
+ }
+
+ memset(&props, 0, sizeof(props));
+ props.max_brightness = 0xff;
+ bdev = backlight_device_register(dev_name(dev), dev, data,
+ &picolcd_blops, &props);
+ if (IS_ERR(bdev)) {
+ dev_err(dev, "failed to register backlight\n");
+ return PTR_ERR(bdev);
+ }
+ bdev->props.brightness = 0xff;
+ data->lcd_brightness = 0xff;
+ data->backlight = bdev;
+ picolcd_set_brightness(bdev);
+ return 0;
+}
+
+static void picolcd_exit_backlight(struct picolcd_data *data)
+{
+ struct backlight_device *bdev = data->backlight;
+
+ data->backlight = NULL;
+ if (bdev)
+ backlight_device_unregister(bdev);
+}
+
+static inline int picolcd_resume_backlight(struct picolcd_data *data)
+{
+ if (!data->backlight)
+ return 0;
+ return picolcd_set_brightness(data->backlight);
+}
+
+#ifdef CONFIG_PM
+static void picolcd_suspend_backlight(struct picolcd_data *data)
+{
+ int bl_power = data->lcd_power;
+ if (!data->backlight)
+ return;
+
+ data->backlight->props.power = FB_BLANK_POWERDOWN;
+ picolcd_set_brightness(data->backlight);
+ data->lcd_power = data->backlight->props.power = bl_power;
+}
+#endif /* CONFIG_PM */
+#else
+static inline int picolcd_init_backlight(struct picolcd_data *data,
+ struct hid_report *report)
+{
+ return 0;
+}
+static inline void picolcd_exit_backlight(struct picolcd_data *data)
+{
+}
+static inline int picolcd_resume_backlight(struct picolcd_data *data)
+{
+ return 0;
+}
+static inline void picolcd_suspend_backlight(struct picolcd_data *data)
+{
+}
+#endif /* CONFIG_HID_PICOLCD_BACKLIGHT */
+
+#ifdef CONFIG_HID_PICOLCD_LCD
+/*
+ * lcd class device
+ */
+static int picolcd_get_contrast(struct lcd_device *ldev)
+{
+ struct picolcd_data *data = lcd_get_data(ldev);
+ return data->lcd_contrast;
+}
+
+static int picolcd_set_contrast(struct lcd_device *ldev, int contrast)
+{
+ struct picolcd_data *data = lcd_get_data(ldev);
+ struct hid_report *report = picolcd_out_report(REPORT_CONTRAST, data->hdev);
+ unsigned long flags;
+
+ if (!report || report->maxfield != 1 || report->field[0]->report_count != 1)
+ return -ENODEV;
+
+ data->lcd_contrast = contrast & 0x0ff;
+ spin_lock_irqsave(&data->lock, flags);
+ hid_set_field(report->field[0], 0, data->lcd_contrast);
+ usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+ return 0;
+}
+
+static int picolcd_check_lcd_fb(struct lcd_device *ldev, struct fb_info *fb)
+{
+ return fb && fb == picolcd_fbinfo((struct picolcd_data *)lcd_get_data(ldev));
+}
+
+static struct lcd_ops picolcd_lcdops = {
+ .get_contrast = picolcd_get_contrast,
+ .set_contrast = picolcd_set_contrast,
+ .check_fb = picolcd_check_lcd_fb,
+};
+
+static int picolcd_init_lcd(struct picolcd_data *data, struct hid_report *report)
+{
+ struct device *dev = &data->hdev->dev;
+ struct lcd_device *ldev;
+
+ if (!report)
+ return -ENODEV;
+ if (report->maxfield != 1 || report->field[0]->report_count != 1 ||
+ report->field[0]->report_size != 8) {
+ dev_err(dev, "unsupported CONTRAST report");
+ return -EINVAL;
+ }
+
+ ldev = lcd_device_register(dev_name(dev), dev, data, &picolcd_lcdops);
+ if (IS_ERR(ldev)) {
+ dev_err(dev, "failed to register LCD\n");
+ return PTR_ERR(ldev);
+ }
+ ldev->props.max_contrast = 0x0ff;
+ data->lcd_contrast = 0xe5;
+ data->lcd = ldev;
+ picolcd_set_contrast(ldev, 0xe5);
+ return 0;
+}
+
+static void picolcd_exit_lcd(struct picolcd_data *data)
+{
+ struct lcd_device *ldev = data->lcd;
+
+ data->lcd = NULL;
+ if (ldev)
+ lcd_device_unregister(ldev);
+}
+
+static inline int picolcd_resume_lcd(struct picolcd_data *data)
+{
+ if (!data->lcd)
+ return 0;
+ return picolcd_set_contrast(data->lcd, data->lcd_contrast);
+}
+#else
+static inline int picolcd_init_lcd(struct picolcd_data *data,
+ struct hid_report *report)
+{
+ return 0;
+}
+static inline void picolcd_exit_lcd(struct picolcd_data *data)
+{
+}
+static inline int picolcd_resume_lcd(struct picolcd_data *data)
+{
+ return 0;
+}
+#endif /* CONFIG_HID_PICOLCD_LCD */
+
+#ifdef CONFIG_HID_PICOLCD_LEDS
+/**
+ * LED class device
+ */
+static void picolcd_leds_set(struct picolcd_data *data)
+{
+ struct hid_report *report;
+ unsigned long flags;
+
+ if (!data->led[0])
+ return;
+ report = picolcd_out_report(REPORT_LED_STATE, data->hdev);
+ if (!report || report->maxfield != 1 || report->field[0]->report_count != 1)
+ return;
+
+ spin_lock_irqsave(&data->lock, flags);
+ hid_set_field(report->field[0], 0, data->led_state);
+ usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static void picolcd_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct device *dev;
+ struct hid_device *hdev;
+ struct picolcd_data *data;
+ int i, state = 0;
+
+ dev = led_cdev->dev->parent;
+ hdev = container_of(dev, struct hid_device, dev);
+ data = hid_get_drvdata(hdev);
+ for (i = 0; i < 8; i++) {
+ if (led_cdev != data->led[i])
+ continue;
+ state = (data->led_state >> i) & 1;
+ if (value == LED_OFF && state) {
+ data->led_state &= ~(1 << i);
+ picolcd_leds_set(data);
+ } else if (value != LED_OFF && !state) {
+ data->led_state |= 1 << i;
+ picolcd_leds_set(data);
+ }
+ break;
+ }
+}
+
+static enum led_brightness picolcd_led_get_brightness(struct led_classdev *led_cdev)
+{
+ struct device *dev;
+ struct hid_device *hdev;
+ struct picolcd_data *data;
+ int i, value = 0;
+
+ dev = led_cdev->dev->parent;
+ hdev = container_of(dev, struct hid_device, dev);
+ data = hid_get_drvdata(hdev);
+ for (i = 0; i < 8; i++)
+ if (led_cdev == data->led[i]) {
+ value = (data->led_state >> i) & 1;
+ break;
+ }
+ return value ? LED_FULL : LED_OFF;
+}
+
+static int picolcd_init_leds(struct picolcd_data *data, struct hid_report *report)
+{
+ struct device *dev = &data->hdev->dev;
+ struct led_classdev *led;
+ size_t name_sz = strlen(dev_name(dev)) + 8;
+ char *name;
+ int i, ret = 0;
+
+ if (!report)
+ return -ENODEV;
+ if (report->maxfield != 1 || report->field[0]->report_count != 1 ||
+ report->field[0]->report_size != 8) {
+ dev_err(dev, "unsupported LED_STATE report");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 8; i++) {
+ led = kzalloc(sizeof(struct led_classdev)+name_sz, GFP_KERNEL);
+ if (!led) {
+ dev_err(dev, "can't allocate memory for LED %d\n", i);
+ ret = -ENOMEM;
+ goto err;
+ }
+ name = (void *)(&led[1]);
+ snprintf(name, name_sz, "%s::GPO%d", dev_name(dev), i);
+ led->name = name;
+ led->brightness = 0;
+ led->max_brightness = 1;
+ led->brightness_get = picolcd_led_get_brightness;
+ led->brightness_set = picolcd_led_set_brightness;
+
+ data->led[i] = led;
+ ret = led_classdev_register(dev, data->led[i]);
+ if (ret) {
+ data->led[i] = NULL;
+ kfree(led);
+ dev_err(dev, "can't register LED %d\n", i);
+ goto err;
+ }
+ }
+ return 0;
+err:
+ for (i = 0; i < 8; i++)
+ if (data->led[i]) {
+ led = data->led[i];
+ data->led[i] = NULL;
+ led_classdev_unregister(led);
+ kfree(led);
+ }
+ return ret;
+}
+
+static void picolcd_exit_leds(struct picolcd_data *data)
+{
+ struct led_classdev *led;
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ led = data->led[i];
+ data->led[i] = NULL;
+ if (!led)
+ continue;
+ led_classdev_unregister(led);
+ kfree(led);
+ }
+}
+
+#else
+static inline int picolcd_init_leds(struct picolcd_data *data,
+ struct hid_report *report)
+{
+ return 0;
+}
+static inline void picolcd_exit_leds(struct picolcd_data *data)
+{
+}
+static inline int picolcd_leds_set(struct picolcd_data *data)
+{
+ return 0;
+}
+#endif /* CONFIG_HID_PICOLCD_LEDS */
+
+/*
+ * input class device
+ */
+static int picolcd_raw_keypad(struct picolcd_data *data,
+ struct hid_report *report, u8 *raw_data, int size)
+{
+ /*
+ * Keypad event
+ * First and second data bytes list currently pressed keys,
+ * 0x00 means no key and at most 2 keys may be pressed at same time
+ */
+ int i, j;
+
+ /* determine newly pressed keys */
+ for (i = 0; i < size; i++) {
+ unsigned int key_code;
+ if (raw_data[i] == 0)
+ continue;
+ for (j = 0; j < sizeof(data->pressed_keys); j++)
+ if (data->pressed_keys[j] == raw_data[i])
+ goto key_already_down;
+ for (j = 0; j < sizeof(data->pressed_keys); j++)
+ if (data->pressed_keys[j] == 0) {
+ data->pressed_keys[j] = raw_data[i];
+ break;
+ }
+ input_event(data->input_keys, EV_MSC, MSC_SCAN, raw_data[i]);
+ if (raw_data[i] < PICOLCD_KEYS)
+ key_code = data->keycode[raw_data[i]];
+ else
+ key_code = KEY_UNKNOWN;
+ if (key_code != KEY_UNKNOWN) {
+ dbg_hid(PICOLCD_NAME " got key press for %u:%d",
+ raw_data[i], key_code);
+ input_report_key(data->input_keys, key_code, 1);
+ }
+ input_sync(data->input_keys);
+key_already_down:
+ continue;
+ }
+
+ /* determine newly released keys */
+ for (j = 0; j < sizeof(data->pressed_keys); j++) {
+ unsigned int key_code;
+ if (data->pressed_keys[j] == 0)
+ continue;
+ for (i = 0; i < size; i++)
+ if (data->pressed_keys[j] == raw_data[i])
+ goto key_still_down;
+ input_event(data->input_keys, EV_MSC, MSC_SCAN, data->pressed_keys[j]);
+ if (data->pressed_keys[j] < PICOLCD_KEYS)
+ key_code = data->keycode[data->pressed_keys[j]];
+ else
+ key_code = KEY_UNKNOWN;
+ if (key_code != KEY_UNKNOWN) {
+ dbg_hid(PICOLCD_NAME " got key release for %u:%d",
+ data->pressed_keys[j], key_code);
+ input_report_key(data->input_keys, key_code, 0);
+ }
+ input_sync(data->input_keys);
+ data->pressed_keys[j] = 0;
+key_still_down:
+ continue;
+ }
+ return 1;
+}
+
+static int picolcd_raw_cir(struct picolcd_data *data,
+ struct hid_report *report, u8 *raw_data, int size)
+{
+ /* Need understanding of CIR data format to implement ... */
+ return 1;
+}
+
+static int picolcd_check_version(struct hid_device *hdev)
+{
+ struct picolcd_data *data = hid_get_drvdata(hdev);
+ struct picolcd_pending *verinfo;
+ int ret = 0;
+
+ if (!data)
+ return -ENODEV;
+
+ verinfo = picolcd_send_and_wait(hdev, REPORT_VERSION, NULL, 0);
+ if (!verinfo) {
+ dev_err(&hdev->dev, "no version response from PicoLCD");
+ return -ENODEV;
+ }
+
+ if (verinfo->raw_size == 2) {
+ data->version[0] = verinfo->raw_data[1];
+ data->version[1] = verinfo->raw_data[0];
+ if (data->status & PICOLCD_BOOTLOADER) {
+ dev_info(&hdev->dev, "PicoLCD, bootloader version %d.%d\n",
+ verinfo->raw_data[1], verinfo->raw_data[0]);
+ } else {
+ dev_info(&hdev->dev, "PicoLCD, firmware version %d.%d\n",
+ verinfo->raw_data[1], verinfo->raw_data[0]);
+ }
+ } else {
+ dev_err(&hdev->dev, "confused, got unexpected version response from PicoLCD\n");
+ ret = -EINVAL;
+ }
+ kfree(verinfo);
+ return ret;
+}
+
+/*
+ * Reset our device and wait for answer to VERSION request
+ */
+static int picolcd_reset(struct hid_device *hdev)
+{
+ struct picolcd_data *data = hid_get_drvdata(hdev);
+ struct hid_report *report = picolcd_out_report(REPORT_RESET, hdev);
+ unsigned long flags;
+ int error;
+
+ if (!data || !report || report->maxfield != 1)
+ return -ENODEV;
+
+ spin_lock_irqsave(&data->lock, flags);
+ if (hdev->product == USB_DEVICE_ID_PICOLCD_BOOTLOADER)
+ data->status |= PICOLCD_BOOTLOADER;
+
+ /* perform the reset */
+ hid_set_field(report->field[0], 0, 1);
+ usbhid_submit_report(hdev, report, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ error = picolcd_check_version(hdev);
+ if (error)
+ return error;
+
+ picolcd_resume_lcd(data);
+ picolcd_resume_backlight(data);
+#ifdef CONFIG_HID_PICOLCD_FB
+ if (data->fb_info)
+ schedule_delayed_work(&data->fb_info->deferred_work, 0);
+#endif /* CONFIG_HID_PICOLCD_FB */
+
+ picolcd_leds_set(data);
+ return 0;
+}
+
+/*
+ * The "operation_mode" sysfs attribute
+ */
+static ssize_t picolcd_operation_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct picolcd_data *data = dev_get_drvdata(dev);
+
+ if (data->status & PICOLCD_BOOTLOADER)
+ return snprintf(buf, PAGE_SIZE, "[bootloader] lcd\n");
+ else
+ return snprintf(buf, PAGE_SIZE, "bootloader [lcd]\n");
+}
+
+static ssize_t picolcd_operation_mode_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct picolcd_data *data = dev_get_drvdata(dev);
+ struct hid_report *report = NULL;
+ size_t cnt = count;
+ int timeout = data->opmode_delay;
+ unsigned long flags;
+
+ if (cnt >= 3 && strncmp("lcd", buf, 3) == 0) {
+ if (data->status & PICOLCD_BOOTLOADER)
+ report = picolcd_out_report(REPORT_EXIT_FLASHER, data->hdev);
+ buf += 3;
+ cnt -= 3;
+ } else if (cnt >= 10 && strncmp("bootloader", buf, 10) == 0) {
+ if (!(data->status & PICOLCD_BOOTLOADER))
+ report = picolcd_out_report(REPORT_EXIT_KEYBOARD, data->hdev);
+ buf += 10;
+ cnt -= 10;
+ }
+ if (!report)
+ return -EINVAL;
+
+ while (cnt > 0 && (buf[cnt-1] == '\n' || buf[cnt-1] == '\r'))
+ cnt--;
+ if (cnt != 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&data->lock, flags);
+ hid_set_field(report->field[0], 0, timeout & 0xff);
+ hid_set_field(report->field[0], 1, (timeout >> 8) & 0xff);
+ usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+ return count;
+}
+
+static DEVICE_ATTR(operation_mode, 0644, picolcd_operation_mode_show,
+ picolcd_operation_mode_store);
+
+/*
+ * The "operation_mode_delay" sysfs attribute
+ */
+static ssize_t picolcd_operation_mode_delay_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct picolcd_data *data = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%hu\n", data->opmode_delay);
+}
+
+static ssize_t picolcd_operation_mode_delay_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct picolcd_data *data = dev_get_drvdata(dev);
+ unsigned u;
+ if (sscanf(buf, "%u", &u) != 1)
+ return -EINVAL;
+ if (u > 30000)
+ return -EINVAL;
+ else
+ data->opmode_delay = u;
+ return count;
+}
+
+static DEVICE_ATTR(operation_mode_delay, 0644, picolcd_operation_mode_delay_show,
+ picolcd_operation_mode_delay_store);
+
+
+#ifdef CONFIG_DEBUG_FS
+/*
+ * The "reset" file
+ */
+static int picolcd_debug_reset_show(struct seq_file *f, void *p)
+{
+ if (picolcd_fbinfo((struct picolcd_data *)f->private))
+ seq_printf(f, "all fb\n");
+ else
+ seq_printf(f, "all\n");
+ return 0;
+}
+
+static int picolcd_debug_reset_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, picolcd_debug_reset_show, inode->i_private);
+}
+
+static ssize_t picolcd_debug_reset_write(struct file *f, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct picolcd_data *data = ((struct seq_file *)f->private_data)->private;
+ char buf[32];
+ size_t cnt = min(count, sizeof(buf)-1);
+ if (copy_from_user(buf, user_buf, cnt))
+ return -EFAULT;
+
+ while (cnt > 0 && (buf[cnt-1] == ' ' || buf[cnt-1] == '\n'))
+ cnt--;
+ buf[cnt] = '\0';
+ if (strcmp(buf, "all") == 0) {
+ picolcd_reset(data->hdev);
+ picolcd_fb_reset(data, 1);
+ } else if (strcmp(buf, "fb") == 0) {
+ picolcd_fb_reset(data, 1);
+ } else {
+ return -EINVAL;
+ }
+ return count;
+}
+
+static const struct file_operations picolcd_debug_reset_fops = {
+ .owner = THIS_MODULE,
+ .open = picolcd_debug_reset_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = picolcd_debug_reset_write,
+ .release = single_release,
+};
+
+/*
+ * The "eeprom" file
+ */
+static int picolcd_debug_eeprom_open(struct inode *i, struct file *f)
+{
+ f->private_data = i->i_private;
+ return 0;
+}
+
+static ssize_t picolcd_debug_eeprom_read(struct file *f, char __user *u,
+ size_t s, loff_t *off)
+{
+ struct picolcd_data *data = f->private_data;
+ struct picolcd_pending *resp;
+ u8 raw_data[3];
+ ssize_t ret = -EIO;
+
+ if (s == 0)
+ return -EINVAL;
+ if (*off > 0x0ff)
+ return 0;
+
+ /* prepare buffer with info about what we want to read (addr & len) */
+ raw_data[0] = *off & 0xff;
+ raw_data[1] = (*off >> 8) && 0xff;
+ raw_data[2] = s < 20 ? s : 20;
+ if (*off + raw_data[2] > 0xff)
+ raw_data[2] = 0x100 - *off;
+ resp = picolcd_send_and_wait(data->hdev, REPORT_EE_READ, raw_data,
+ sizeof(raw_data));
+ if (!resp)
+ return -EIO;
+
+ if (resp->in_report && resp->in_report->id == REPORT_EE_DATA) {
+ /* successful read :) */
+ ret = resp->raw_data[2];
+ if (ret > s)
+ ret = s;
+ if (copy_to_user(u, resp->raw_data+3, ret))
+ ret = -EFAULT;
+ else
+ *off += ret;
+ } /* anything else is some kind of IO error */
+
+ kfree(resp);
+ return ret;
+}
+
+static ssize_t picolcd_debug_eeprom_write(struct file *f, const char __user *u,
+ size_t s, loff_t *off)
+{
+ struct picolcd_data *data = f->private_data;
+ struct picolcd_pending *resp;
+ ssize_t ret = -EIO;
+ u8 raw_data[23];
+
+ if (s == 0)
+ return -EINVAL;
+ if (*off > 0x0ff)
+ return -ENOSPC;
+
+ memset(raw_data, 0, sizeof(raw_data));
+ raw_data[0] = *off & 0xff;
+ raw_data[1] = (*off >> 8) && 0xff;
+ raw_data[2] = s < 20 ? s : 20;
+ if (*off + raw_data[2] > 0xff)
+ raw_data[2] = 0x100 - *off;
+
+ if (copy_from_user(raw_data+3, u, raw_data[2]))
+ return -EFAULT;
+ resp = picolcd_send_and_wait(data->hdev, REPORT_EE_WRITE, raw_data,
+ sizeof(raw_data));
+
+ if (!resp)
+ return -EIO;
+
+ if (resp->in_report && resp->in_report->id == REPORT_EE_DATA) {
+ /* check if written data matches */
+ if (memcmp(raw_data, resp->raw_data, 3+raw_data[2]) == 0) {
+ *off += raw_data[2];
+ ret = raw_data[2];
+ }
+ }
+ kfree(resp);
+ return ret;
+}
+
+/*
+ * Notes:
+ * - read/write happens in chunks of at most 20 bytes, it's up to userspace
+ * to loop in order to get more data.
+ * - on write errors on otherwise correct write request the bytes
+ * that should have been written are in undefined state.
+ */
+static const struct file_operations picolcd_debug_eeprom_fops = {
+ .owner = THIS_MODULE,
+ .open = picolcd_debug_eeprom_open,
+ .read = picolcd_debug_eeprom_read,
+ .write = picolcd_debug_eeprom_write,
+ .llseek = generic_file_llseek,
+};
+
+/*
+ * The "flash" file
+ */
+static int picolcd_debug_flash_open(struct inode *i, struct file *f)
+{
+ f->private_data = i->i_private;
+ return 0;
+}
+
+/* record a flash address to buf (bounds check to be done by caller) */
+static int _picolcd_flash_setaddr(struct picolcd_data *data, u8 *buf, long off)
+{
+ buf[0] = off & 0xff;
+ buf[1] = (off >> 8) & 0xff;
+ if (data->addr_sz == 3)
+ buf[2] = (off >> 16) & 0xff;
+ return data->addr_sz == 2 ? 2 : 3;
+}
+
+/* read a given size of data (bounds check to be done by caller) */
+static ssize_t _picolcd_flash_read(struct picolcd_data *data, int report_id,
+ char __user *u, size_t s, loff_t *off)
+{
+ struct picolcd_pending *resp;
+ u8 raw_data[4];
+ ssize_t ret = 0;
+ int len_off, err = -EIO;
+
+ while (s > 0) {
+ err = -EIO;
+ len_off = _picolcd_flash_setaddr(data, raw_data, *off);
+ raw_data[len_off] = s > 32 ? 32 : s;
+ resp = picolcd_send_and_wait(data->hdev, report_id, raw_data, len_off+1);
+ if (!resp || !resp->in_report)
+ goto skip;
+ if (resp->in_report->id == REPORT_MEMORY ||
+ resp->in_report->id == REPORT_BL_READ_MEMORY) {
+ if (memcmp(raw_data, resp->raw_data, len_off+1) != 0)
+ goto skip;
+ if (copy_to_user(u+ret, resp->raw_data+len_off+1, raw_data[len_off])) {
+ err = -EFAULT;
+ goto skip;
+ }
+ *off += raw_data[len_off];
+ s -= raw_data[len_off];
+ ret += raw_data[len_off];
+ err = 0;
+ }
+skip:
+ kfree(resp);
+ if (err)
+ return ret > 0 ? ret : err;
+ }
+ return ret;
+}
+
+static ssize_t picolcd_debug_flash_read(struct file *f, char __user *u,
+ size_t s, loff_t *off)
+{
+ struct picolcd_data *data = f->private_data;
+
+ if (s == 0)
+ return -EINVAL;
+ if (*off > 0x05fff)
+ return 0;
+ if (*off + s > 0x05fff)
+ s = 0x06000 - *off;
+
+ if (data->status & PICOLCD_BOOTLOADER)
+ return _picolcd_flash_read(data, REPORT_BL_READ_MEMORY, u, s, off);
+ else
+ return _picolcd_flash_read(data, REPORT_READ_MEMORY, u, s, off);
+}
+
+/* erase block aligned to 64bytes boundary */
+static ssize_t _picolcd_flash_erase64(struct picolcd_data *data, int report_id,
+ loff_t *off)
+{
+ struct picolcd_pending *resp;
+ u8 raw_data[3];
+ int len_off;
+ ssize_t ret = -EIO;
+
+ if (*off & 0x3f)
+ return -EINVAL;
+
+ len_off = _picolcd_flash_setaddr(data, raw_data, *off);
+ resp = picolcd_send_and_wait(data->hdev, report_id, raw_data, len_off);
+ if (!resp || !resp->in_report)
+ goto skip;
+ if (resp->in_report->id == REPORT_MEMORY ||
+ resp->in_report->id == REPORT_BL_ERASE_MEMORY) {
+ if (memcmp(raw_data, resp->raw_data, len_off) != 0)
+ goto skip;
+ ret = 0;
+ }
+skip:
+ kfree(resp);
+ return ret;
+}
+
+/* write a given size of data (bounds check to be done by caller) */
+static ssize_t _picolcd_flash_write(struct picolcd_data *data, int report_id,
+ const char __user *u, size_t s, loff_t *off)
+{
+ struct picolcd_pending *resp;
+ u8 raw_data[36];
+ ssize_t ret = 0;
+ int len_off, err = -EIO;
+
+ while (s > 0) {
+ err = -EIO;
+ len_off = _picolcd_flash_setaddr(data, raw_data, *off);
+ raw_data[len_off] = s > 32 ? 32 : s;
+ if (copy_from_user(raw_data+len_off+1, u, raw_data[len_off])) {
+ err = -EFAULT;
+ break;
+ }
+ resp = picolcd_send_and_wait(data->hdev, report_id, raw_data,
+ len_off+1+raw_data[len_off]);
+ if (!resp || !resp->in_report)
+ goto skip;
+ if (resp->in_report->id == REPORT_MEMORY ||
+ resp->in_report->id == REPORT_BL_WRITE_MEMORY) {
+ if (memcmp(raw_data, resp->raw_data, len_off+1+raw_data[len_off]) != 0)
+ goto skip;
+ *off += raw_data[len_off];
+ s -= raw_data[len_off];
+ ret += raw_data[len_off];
+ err = 0;
+ }
+skip:
+ kfree(resp);
+ if (err)
+ break;
+ }
+ return ret > 0 ? ret : err;
+}
+
+static ssize_t picolcd_debug_flash_write(struct file *f, const char __user *u,
+ size_t s, loff_t *off)
+{
+ struct picolcd_data *data = f->private_data;
+ ssize_t err, ret = 0;
+ int report_erase, report_write;
+
+ if (s == 0)
+ return -EINVAL;
+ if (*off > 0x5fff)
+ return -ENOSPC;
+ if (s & 0x3f)
+ return -EINVAL;
+ if (*off & 0x3f)
+ return -EINVAL;
+
+ if (data->status & PICOLCD_BOOTLOADER) {
+ report_erase = REPORT_BL_ERASE_MEMORY;
+ report_write = REPORT_BL_WRITE_MEMORY;
+ } else {
+ report_erase = REPORT_ERASE_MEMORY;
+ report_write = REPORT_WRITE_MEMORY;
+ }
+ mutex_lock(&data->mutex_flash);
+ while (s > 0) {
+ err = _picolcd_flash_erase64(data, report_erase, off);
+ if (err)
+ break;
+ err = _picolcd_flash_write(data, report_write, u, 64, off);
+ if (err < 0)
+ break;
+ ret += err;
+ *off += err;
+ s -= err;
+ if (err != 64)
+ break;
+ }
+ mutex_unlock(&data->mutex_flash);
+ return ret > 0 ? ret : err;
+}
+
+/*
+ * Notes:
+ * - concurrent writing is prevented by mutex and all writes must be
+ * n*64 bytes and 64-byte aligned, each write being preceeded by an
+ * ERASE which erases a 64byte block.
+ * If less than requested was written or an error is returned for an
+ * otherwise correct write request the next 64-byte block which should
+ * have been written is in undefined state (mostly: original, erased,
+ * (half-)written with write error)
+ * - reading can happend without special restriction
+ */
+static const struct file_operations picolcd_debug_flash_fops = {
+ .owner = THIS_MODULE,
+ .open = picolcd_debug_flash_open,
+ .read = picolcd_debug_flash_read,
+ .write = picolcd_debug_flash_write,
+ .llseek = generic_file_llseek,
+};
+
+
+/*
+ * Helper code for HID report level dumping/debugging
+ */
+static const char *error_codes[] = {
+ "success", "parameter missing", "data_missing", "block readonly",
+ "block not erasable", "block too big", "section overflow",
+ "invalid command length", "invalid data length",
+};
+
+static void dump_buff_as_hex(char *dst, size_t dst_sz, const u8 *data,
+ const size_t data_len)
+{
+ int i, j;
+ for (i = j = 0; i < data_len && j + 3 < dst_sz; i++) {
+ dst[j++] = hex_asc[(data[i] >> 4) & 0x0f];
+ dst[j++] = hex_asc[data[i] & 0x0f];
+ dst[j++] = ' ';
+ }
+ if (j < dst_sz) {
+ dst[j--] = '\0';
+ dst[j] = '\n';
+ } else
+ dst[j] = '\0';
+}
+
+static void picolcd_debug_out_report(struct picolcd_data *data,
+ struct hid_device *hdev, struct hid_report *report)
+{
+ u8 raw_data[70];
+ int raw_size = (report->size >> 3) + 1;
+ char *buff;
+#define BUFF_SZ 256
+
+ /* Avoid unnecessary overhead if debugfs is disabled */
+ if (!hdev->debug_events)
+ return;
+
+ buff = kmalloc(BUFF_SZ, GFP_ATOMIC);
+ if (!buff)
+ return;
+
+ snprintf(buff, BUFF_SZ, "\nout report %d (size %d) = ",
+ report->id, raw_size);
+ hid_debug_event(hdev, buff);
+ if (raw_size + 5 > sizeof(raw_data)) {
+ hid_debug_event(hdev, " TOO BIG\n");
+ return;
+ } else {
+ raw_data[0] = report->id;
+ hid_output_report(report, raw_data);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data, raw_size);
+ hid_debug_event(hdev, buff);
+ }
+
+ switch (report->id) {
+ case REPORT_LED_STATE:
+ /* 1 data byte with GPO state */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_LED_STATE", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tGPO state: 0x%02x\n", raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_BRIGHTNESS:
+ /* 1 data byte with brightness */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_BRIGHTNESS", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tBrightness: 0x%02x\n", raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_CONTRAST:
+ /* 1 data byte with contrast */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_CONTRAST", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tContrast: 0x%02x\n", raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_RESET:
+ /* 2 data bytes with reset duration in ms */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_RESET", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tDuration: 0x%02x%02x (%dms)\n",
+ raw_data[2], raw_data[1], raw_data[2] << 8 | raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_LCD_CMD:
+ /* 63 data bytes with LCD commands */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_LCD_CMD", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ /* TODO: format decoding */
+ break;
+ case REPORT_LCD_DATA:
+ /* 63 data bytes with LCD data */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_LCD_CMD", report->id, raw_size-1);
+ /* TODO: format decoding */
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_LCD_CMD_DATA:
+ /* 63 data bytes with LCD commands and data */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_LCD_CMD", report->id, raw_size-1);
+ /* TODO: format decoding */
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_EE_READ:
+ /* 3 data bytes with read area description */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_EE_READ", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_EE_WRITE:
+ /* 3+1..20 data bytes with write area description */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_EE_WRITE", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+ hid_debug_event(hdev, buff);
+ if (raw_data[3] == 0) {
+ snprintf(buff, BUFF_SZ, "\tNo data\n");
+ } else if (raw_data[3] + 4 <= raw_size) {
+ snprintf(buff, BUFF_SZ, "\tData: ");
+ hid_debug_event(hdev, buff);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]);
+ } else {
+ snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+ }
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_ERASE_MEMORY:
+ case REPORT_BL_ERASE_MEMORY:
+ /* 3 data bytes with pointer inside erase block */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_ERASE_MEMORY", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ switch (data->addr_sz) {
+ case 2:
+ snprintf(buff, BUFF_SZ, "\tAddress inside 64 byte block: 0x%02x%02x\n",
+ raw_data[2], raw_data[1]);
+ break;
+ case 3:
+ snprintf(buff, BUFF_SZ, "\tAddress inside 64 byte block: 0x%02x%02x%02x\n",
+ raw_data[3], raw_data[2], raw_data[1]);
+ break;
+ default:
+ snprintf(buff, BUFF_SZ, "\tNot supported\n");
+ }
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_READ_MEMORY:
+ case REPORT_BL_READ_MEMORY:
+ /* 4 data bytes with read area description */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_READ_MEMORY", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ switch (data->addr_sz) {
+ case 2:
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+ break;
+ case 3:
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x%02x\n",
+ raw_data[3], raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[4]);
+ break;
+ default:
+ snprintf(buff, BUFF_SZ, "\tNot supported\n");
+ }
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_WRITE_MEMORY:
+ case REPORT_BL_WRITE_MEMORY:
+ /* 4+1..32 data bytes with write adrea description */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_WRITE_MEMORY", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ switch (data->addr_sz) {
+ case 2:
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+ hid_debug_event(hdev, buff);
+ if (raw_data[3] == 0) {
+ snprintf(buff, BUFF_SZ, "\tNo data\n");
+ } else if (raw_data[3] + 4 <= raw_size) {
+ snprintf(buff, BUFF_SZ, "\tData: ");
+ hid_debug_event(hdev, buff);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]);
+ } else {
+ snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+ }
+ break;
+ case 3:
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x%02x\n",
+ raw_data[3], raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[4]);
+ hid_debug_event(hdev, buff);
+ if (raw_data[4] == 0) {
+ snprintf(buff, BUFF_SZ, "\tNo data\n");
+ } else if (raw_data[4] + 5 <= raw_size) {
+ snprintf(buff, BUFF_SZ, "\tData: ");
+ hid_debug_event(hdev, buff);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data+5, raw_data[4]);
+ } else {
+ snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+ }
+ break;
+ default:
+ snprintf(buff, BUFF_SZ, "\tNot supported\n");
+ }
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_SPLASH_RESTART:
+ /* TODO */
+ break;
+ case REPORT_EXIT_KEYBOARD:
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_EXIT_KEYBOARD", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tRestart delay: %dms (0x%02x%02x)\n",
+ raw_data[1] | (raw_data[2] << 8),
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_VERSION:
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_VERSION", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_DEVID:
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_DEVID", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_SPLASH_SIZE:
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_SPLASH_SIZE", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_HOOK_VERSION:
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_HOOK_VERSION", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_EXIT_FLASHER:
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_VERSION", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tRestart delay: %dms (0x%02x%02x)\n",
+ raw_data[1] | (raw_data[2] << 8),
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ default:
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "<unknown>", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ break;
+ }
+ wake_up_interruptible(&hdev->debug_wait);
+ kfree(buff);
+}
+
+static void picolcd_debug_raw_event(struct picolcd_data *data,
+ struct hid_device *hdev, struct hid_report *report,
+ u8 *raw_data, int size)
+{
+ char *buff;
+
+#define BUFF_SZ 256
+ /* Avoid unnecessary overhead if debugfs is disabled */
+ if (!hdev->debug_events)
+ return;
+
+ buff = kmalloc(BUFF_SZ, GFP_ATOMIC);
+ if (!buff)
+ return;
+
+ switch (report->id) {
+ case REPORT_ERROR_CODE:
+ /* 2 data bytes with affected report and error code */
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_ERROR_CODE", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ if (raw_data[2] < ARRAY_SIZE(error_codes))
+ snprintf(buff, BUFF_SZ, "\tError code 0x%02x (%s) in reply to report 0x%02x\n",
+ raw_data[2], error_codes[raw_data[2]], raw_data[1]);
+ else
+ snprintf(buff, BUFF_SZ, "\tError code 0x%02x in reply to report 0x%02x\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_KEY_STATE:
+ /* 2 data bytes with key state */
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_KEY_STATE", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ if (raw_data[1] == 0)
+ snprintf(buff, BUFF_SZ, "\tNo key pressed\n");
+ else if (raw_data[2] == 0)
+ snprintf(buff, BUFF_SZ, "\tOne key pressed: 0x%02x (%d)\n",
+ raw_data[1], raw_data[1]);
+ else
+ snprintf(buff, BUFF_SZ, "\tTwo keys pressed: 0x%02x (%d), 0x%02x (%d)\n",
+ raw_data[1], raw_data[1], raw_data[2], raw_data[2]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_IR_DATA:
+ /* Up to 20 byes of IR scancode data */
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_IR_DATA", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ if (raw_data[1] == 0) {
+ snprintf(buff, BUFF_SZ, "\tUnexpectedly 0 data length\n");
+ hid_debug_event(hdev, buff);
+ } else if (raw_data[1] + 1 <= size) {
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n\tIR Data: ",
+ raw_data[1]-1);
+ hid_debug_event(hdev, buff);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data+2, raw_data[1]-1);
+ hid_debug_event(hdev, buff);
+ } else {
+ snprintf(buff, BUFF_SZ, "\tOverflowing data length: %d\n",
+ raw_data[1]-1);
+ hid_debug_event(hdev, buff);
+ }
+ break;
+ case REPORT_EE_DATA:
+ /* Data buffer in response to REPORT_EE_READ or REPORT_EE_WRITE */
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_EE_DATA", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+ hid_debug_event(hdev, buff);
+ if (raw_data[3] == 0) {
+ snprintf(buff, BUFF_SZ, "\tNo data\n");
+ hid_debug_event(hdev, buff);
+ } else if (raw_data[3] + 4 <= size) {
+ snprintf(buff, BUFF_SZ, "\tData: ");
+ hid_debug_event(hdev, buff);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]);
+ hid_debug_event(hdev, buff);
+ } else {
+ snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+ hid_debug_event(hdev, buff);
+ }
+ break;
+ case REPORT_MEMORY:
+ /* Data buffer in response to REPORT_READ_MEMORY or REPORT_WRTIE_MEMORY */
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_MEMORY", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ switch (data->addr_sz) {
+ case 2:
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+ hid_debug_event(hdev, buff);
+ if (raw_data[3] == 0) {
+ snprintf(buff, BUFF_SZ, "\tNo data\n");
+ } else if (raw_data[3] + 4 <= size) {
+ snprintf(buff, BUFF_SZ, "\tData: ");
+ hid_debug_event(hdev, buff);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]);
+ } else {
+ snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+ }
+ break;
+ case 3:
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x%02x\n",
+ raw_data[3], raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[4]);
+ hid_debug_event(hdev, buff);
+ if (raw_data[4] == 0) {
+ snprintf(buff, BUFF_SZ, "\tNo data\n");
+ } else if (raw_data[4] + 5 <= size) {
+ snprintf(buff, BUFF_SZ, "\tData: ");
+ hid_debug_event(hdev, buff);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data+5, raw_data[4]);
+ } else {
+ snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+ }
+ break;
+ default:
+ snprintf(buff, BUFF_SZ, "\tNot supported\n");
+ }
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_VERSION:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_VERSION", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tFirmware version: %d.%d\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_BL_ERASE_MEMORY:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_BL_ERASE_MEMORY", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ /* TODO */
+ break;
+ case REPORT_BL_READ_MEMORY:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_BL_READ_MEMORY", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ /* TODO */
+ break;
+ case REPORT_BL_WRITE_MEMORY:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_BL_WRITE_MEMORY", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ /* TODO */
+ break;
+ case REPORT_DEVID:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_DEVID", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tSerial: 0x%02x%02x%02x%02x\n",
+ raw_data[1], raw_data[2], raw_data[3], raw_data[4]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tType: 0x%02x\n",
+ raw_data[5]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_SPLASH_SIZE:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_SPLASH_SIZE", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tTotal splash space: %d\n",
+ (raw_data[2] << 8) | raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tUsed splash space: %d\n",
+ (raw_data[4] << 8) | raw_data[3]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_HOOK_VERSION:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_HOOK_VERSION", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tFirmware version: %d.%d\n",
+ raw_data[1], raw_data[2]);
+ hid_debug_event(hdev, buff);
+ break;
+ default:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "<unknown>", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ break;
+ }
+ wake_up_interruptible(&hdev->debug_wait);
+ kfree(buff);
+}
+
+static void picolcd_init_devfs(struct picolcd_data *data,
+ struct hid_report *eeprom_r, struct hid_report *eeprom_w,
+ struct hid_report *flash_r, struct hid_report *flash_w,
+ struct hid_report *reset)
+{
+ struct hid_device *hdev = data->hdev;
+
+ mutex_init(&data->mutex_flash);
+
+ /* reset */
+ if (reset)
+ data->debug_reset = debugfs_create_file("reset", 0600,
+ hdev->debug_dir, data, &picolcd_debug_reset_fops);
+
+ /* eeprom */
+ if (eeprom_r || eeprom_w)
+ data->debug_eeprom = debugfs_create_file("eeprom",
+ (eeprom_w ? S_IWUSR : 0) | (eeprom_r ? S_IRUSR : 0),
+ hdev->debug_dir, data, &picolcd_debug_eeprom_fops);
+
+ /* flash */
+ if (flash_r && flash_r->maxfield == 1 && flash_r->field[0]->report_size == 8)
+ data->addr_sz = flash_r->field[0]->report_count - 1;
+ else
+ data->addr_sz = -1;
+ if (data->addr_sz == 2 || data->addr_sz == 3) {
+ data->debug_flash = debugfs_create_file("flash",
+ (flash_w ? S_IWUSR : 0) | (flash_r ? S_IRUSR : 0),
+ hdev->debug_dir, data, &picolcd_debug_flash_fops);
+ } else if (flash_r || flash_w)
+ dev_warn(&hdev->dev, "Unexpected FLASH access reports, "
+ "please submit rdesc for review\n");
+}
+
+static void picolcd_exit_devfs(struct picolcd_data *data)
+{
+ struct dentry *dent;
+
+ dent = data->debug_reset;
+ data->debug_reset = NULL;
+ if (dent)
+ debugfs_remove(dent);
+ dent = data->debug_eeprom;
+ data->debug_eeprom = NULL;
+ if (dent)
+ debugfs_remove(dent);
+ dent = data->debug_flash;
+ data->debug_flash = NULL;
+ if (dent)
+ debugfs_remove(dent);
+ mutex_destroy(&data->mutex_flash);
+}
+#else
+static inline void picolcd_debug_raw_event(struct picolcd_data *data,
+ struct hid_device *hdev, struct hid_report *report,
+ u8 *raw_data, int size)
+{
+}
+static inline void picolcd_init_devfs(struct picolcd_data *data,
+ struct hid_report *eeprom_r, struct hid_report *eeprom_w,
+ struct hid_report *flash_r, struct hid_report *flash_w,
+ struct hid_report *reset)
+{
+}
+static inline void picolcd_exit_devfs(struct picolcd_data *data)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/*
+ * Handle raw report as sent by device
+ */
+static int picolcd_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *raw_data, int size)
+{
+ struct picolcd_data *data = hid_get_drvdata(hdev);
+ unsigned long flags;
+ int ret = 0;
+
+ if (!data)
+ return 1;
+
+ if (report->id == REPORT_KEY_STATE) {
+ if (data->input_keys)
+ ret = picolcd_raw_keypad(data, report, raw_data+1, size-1);
+ } else if (report->id == REPORT_IR_DATA) {
+ if (data->input_cir)
+ ret = picolcd_raw_cir(data, report, raw_data+1, size-1);
+ } else {
+ spin_lock_irqsave(&data->lock, flags);
+ /*
+ * We let the caller of picolcd_send_and_wait() check if the
+ * report we got is one of the expected ones or not.
+ */
+ if (data->pending) {
+ memcpy(data->pending->raw_data, raw_data+1, size-1);
+ data->pending->raw_size = size-1;
+ data->pending->in_report = report;
+ complete(&data->pending->ready);
+ }
+ spin_unlock_irqrestore(&data->lock, flags);
+ }
+
+ picolcd_debug_raw_event(data, hdev, report, raw_data, size);
+ return 1;
+}
+
+#ifdef CONFIG_PM
+static int picolcd_suspend(struct hid_device *hdev, pm_message_t message)
+{
+ if (message.event & PM_EVENT_AUTO)
+ return 0;
+
+ picolcd_suspend_backlight(hid_get_drvdata(hdev));
+ dbg_hid(PICOLCD_NAME " device ready for suspend\n");
+ return 0;
+}
+
+static int picolcd_resume(struct hid_device *hdev)
+{
+ int ret;
+ ret = picolcd_resume_backlight(hid_get_drvdata(hdev));
+ if (ret)
+ dbg_hid(PICOLCD_NAME " restoring backlight failed: %d\n", ret);
+ return 0;
+}
+
+static int picolcd_reset_resume(struct hid_device *hdev)
+{
+ int ret;
+ ret = picolcd_reset(hdev);
+ if (ret)
+ dbg_hid(PICOLCD_NAME " resetting our device failed: %d\n", ret);
+ ret = picolcd_fb_reset(hid_get_drvdata(hdev), 0);
+ if (ret)
+ dbg_hid(PICOLCD_NAME " restoring framebuffer content failed: %d\n", ret);
+ ret = picolcd_resume_lcd(hid_get_drvdata(hdev));
+ if (ret)
+ dbg_hid(PICOLCD_NAME " restoring lcd failed: %d\n", ret);
+ ret = picolcd_resume_backlight(hid_get_drvdata(hdev));
+ if (ret)
+ dbg_hid(PICOLCD_NAME " restoring backlight failed: %d\n", ret);
+ picolcd_leds_set(hid_get_drvdata(hdev));
+ return 0;
+}
+#endif
+
+/* initialize keypad input device */
+static int picolcd_init_keys(struct picolcd_data *data,
+ struct hid_report *report)
+{
+ struct hid_device *hdev = data->hdev;
+ struct input_dev *idev;
+ int error, i;
+
+ if (!report)
+ return -ENODEV;
+ if (report->maxfield != 1 || report->field[0]->report_count != 2 ||
+ report->field[0]->report_size != 8) {
+ dev_err(&hdev->dev, "unsupported KEY_STATE report");
+ return -EINVAL;
+ }
+
+ idev = input_allocate_device();
+ if (idev == NULL) {
+ dev_err(&hdev->dev, "failed to allocate input device");
+ return -ENOMEM;
+ }
+ input_set_drvdata(idev, hdev);
+ memcpy(data->keycode, def_keymap, sizeof(def_keymap));
+ idev->name = hdev->name;
+ idev->phys = hdev->phys;
+ idev->uniq = hdev->uniq;
+ idev->id.bustype = hdev->bus;
+ idev->id.vendor = hdev->vendor;
+ idev->id.product = hdev->product;
+ idev->id.version = hdev->version;
+ idev->dev.parent = hdev->dev.parent;
+ idev->keycode = &data->keycode;
+ idev->keycodemax = PICOLCD_KEYS;
+ idev->keycodesize = sizeof(data->keycode[0]);
+ input_set_capability(idev, EV_MSC, MSC_SCAN);
+ set_bit(EV_REP, idev->evbit);
+ for (i = 0; i < PICOLCD_KEYS; i++)
+ input_set_capability(idev, EV_KEY, data->keycode[i]);
+ error = input_register_device(idev);
+ if (error) {
+ dev_err(&hdev->dev, "error registering the input device");
+ input_free_device(idev);
+ return error;
+ }
+ data->input_keys = idev;
+ return 0;
+}
+
+static void picolcd_exit_keys(struct picolcd_data *data)
+{
+ struct input_dev *idev = data->input_keys;
+
+ data->input_keys = NULL;
+ if (idev)
+ input_unregister_device(idev);
+}
+
+/* initialize CIR input device */
+static inline int picolcd_init_cir(struct picolcd_data *data, struct hid_report *report)
+{
+ /* support not implemented yet */
+ return 0;
+}
+
+static inline void picolcd_exit_cir(struct picolcd_data *data)
+{
+}
+
+static int picolcd_probe_lcd(struct hid_device *hdev, struct picolcd_data *data)
+{
+ int error;
+
+ error = picolcd_check_version(hdev);
+ if (error)
+ return error;
+
+ if (data->version[0] != 0 && data->version[1] != 3)
+ dev_info(&hdev->dev, "Device with untested firmware revision, "
+ "please submit /sys/kernel/debug/hid/%s/rdesc for this device.\n",
+ dev_name(&hdev->dev));
+
+ /* Setup keypad input device */
+ error = picolcd_init_keys(data, picolcd_in_report(REPORT_KEY_STATE, hdev));
+ if (error)
+ goto err;
+
+ /* Setup CIR input device */
+ error = picolcd_init_cir(data, picolcd_in_report(REPORT_IR_DATA, hdev));
+ if (error)
+ goto err;
+
+ /* Set up the framebuffer device */
+ error = picolcd_init_framebuffer(data);
+ if (error)
+ goto err;
+
+ /* Setup lcd class device */
+ error = picolcd_init_lcd(data, picolcd_out_report(REPORT_CONTRAST, hdev));
+ if (error)
+ goto err;
+
+ /* Setup backlight class device */
+ error = picolcd_init_backlight(data, picolcd_out_report(REPORT_BRIGHTNESS, hdev));
+ if (error)
+ goto err;
+
+ /* Setup the LED class devices */
+ error = picolcd_init_leds(data, picolcd_out_report(REPORT_LED_STATE, hdev));
+ if (error)
+ goto err;
+
+ picolcd_init_devfs(data, picolcd_out_report(REPORT_EE_READ, hdev),
+ picolcd_out_report(REPORT_EE_WRITE, hdev),
+ picolcd_out_report(REPORT_READ_MEMORY, hdev),
+ picolcd_out_report(REPORT_WRITE_MEMORY, hdev),
+ picolcd_out_report(REPORT_RESET, hdev));
+ return 0;
+err:
+ picolcd_exit_leds(data);
+ picolcd_exit_backlight(data);
+ picolcd_exit_lcd(data);
+ picolcd_exit_framebuffer(data);
+ picolcd_exit_cir(data);
+ picolcd_exit_keys(data);
+ return error;
+}
+
+static int picolcd_probe_bootloader(struct hid_device *hdev, struct picolcd_data *data)
+{
+ int error;
+
+ error = picolcd_check_version(hdev);
+ if (error)
+ return error;
+
+ if (data->version[0] != 1 && data->version[1] != 0)
+ dev_info(&hdev->dev, "Device with untested bootloader revision, "
+ "please submit /sys/kernel/debug/hid/%s/rdesc for this device.\n",
+ dev_name(&hdev->dev));
+
+ picolcd_init_devfs(data, NULL, NULL,
+ picolcd_out_report(REPORT_BL_READ_MEMORY, hdev),
+ picolcd_out_report(REPORT_BL_WRITE_MEMORY, hdev), NULL);
+ return 0;
+}
+
+static int picolcd_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ struct picolcd_data *data;
+ int error = -ENOMEM;
+
+ dbg_hid(PICOLCD_NAME " hardware probe...\n");
+
+ /*
+ * Let's allocate the picolcd data structure, set some reasonable
+ * defaults, and associate it with the device
+ */
+ data = kzalloc(sizeof(struct picolcd_data), GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(&hdev->dev, "can't allocate space for Minibox PicoLCD device data\n");
+ error = -ENOMEM;
+ goto err_no_cleanup;
+ }
+
+ spin_lock_init(&data->lock);
+ mutex_init(&data->mutex);
+ data->hdev = hdev;
+ data->opmode_delay = 5000;
+ if (hdev->product == USB_DEVICE_ID_PICOLCD_BOOTLOADER)
+ data->status |= PICOLCD_BOOTLOADER;
+ hid_set_drvdata(hdev, data);
+
+ /* Parse the device reports and start it up */
+ error = hid_parse(hdev);
+ if (error) {
+ dev_err(&hdev->dev, "device report parse failed\n");
+ goto err_cleanup_data;
+ }
+
+ /* We don't use hidinput but hid_hw_start() fails if nothing is
+ * claimed. So spoof claimed input. */
+ hdev->claimed = HID_CLAIMED_INPUT;
+ error = hid_hw_start(hdev, 0);
+ hdev->claimed = 0;
+ if (error) {
+ dev_err(&hdev->dev, "hardware start failed\n");
+ goto err_cleanup_data;
+ }
+
+ error = hdev->ll_driver->open(hdev);
+ if (error) {
+ dev_err(&hdev->dev, "failed to open input interrupt pipe for key and IR events\n");
+ goto err_cleanup_hid_hw;
+ }
+
+ error = device_create_file(&hdev->dev, &dev_attr_operation_mode_delay);
+ if (error) {
+ dev_err(&hdev->dev, "failed to create sysfs attributes\n");
+ goto err_cleanup_hid_ll;
+ }
+
+ error = device_create_file(&hdev->dev, &dev_attr_operation_mode);
+ if (error) {
+ dev_err(&hdev->dev, "failed to create sysfs attributes\n");
+ goto err_cleanup_sysfs1;
+ }
+
+ if (data->status & PICOLCD_BOOTLOADER)
+ error = picolcd_probe_bootloader(hdev, data);
+ else
+ error = picolcd_probe_lcd(hdev, data);
+ if (error)
+ goto err_cleanup_sysfs2;
+
+ dbg_hid(PICOLCD_NAME " activated and initialized\n");
+ return 0;
+
+err_cleanup_sysfs2:
+ device_remove_file(&hdev->dev, &dev_attr_operation_mode);
+err_cleanup_sysfs1:
+ device_remove_file(&hdev->dev, &dev_attr_operation_mode_delay);
+err_cleanup_hid_ll:
+ hdev->ll_driver->close(hdev);
+err_cleanup_hid_hw:
+ hid_hw_stop(hdev);
+err_cleanup_data:
+ kfree(data);
+err_no_cleanup:
+ hid_set_drvdata(hdev, NULL);
+
+ return error;
+}
+
+static void picolcd_remove(struct hid_device *hdev)
+{
+ struct picolcd_data *data = hid_get_drvdata(hdev);
+ unsigned long flags;
+
+ dbg_hid(PICOLCD_NAME " hardware remove...\n");
+ spin_lock_irqsave(&data->lock, flags);
+ data->status |= PICOLCD_FAILED;
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ picolcd_exit_devfs(data);
+ device_remove_file(&hdev->dev, &dev_attr_operation_mode);
+ device_remove_file(&hdev->dev, &dev_attr_operation_mode_delay);
+ hdev->ll_driver->close(hdev);
+ hid_hw_stop(hdev);
+ hid_set_drvdata(hdev, NULL);
+
+ /* Shortcut potential pending reply that will never arrive */
+ spin_lock_irqsave(&data->lock, flags);
+ if (data->pending)
+ complete(&data->pending->ready);
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ /* Cleanup LED */
+ picolcd_exit_leds(data);
+ /* Clean up the framebuffer */
+ picolcd_exit_backlight(data);
+ picolcd_exit_lcd(data);
+ picolcd_exit_framebuffer(data);
+ /* Cleanup input */
+ picolcd_exit_cir(data);
+ picolcd_exit_keys(data);
+
+ mutex_destroy(&data->mutex);
+ /* Finally, clean up the picolcd data itself */
+ kfree(data);
+}
+
+static const struct hid_device_id picolcd_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, picolcd_devices);
+
+static struct hid_driver picolcd_driver = {
+ .name = "hid-picolcd",
+ .id_table = picolcd_devices,
+ .probe = picolcd_probe,
+ .remove = picolcd_remove,
+ .raw_event = picolcd_raw_event,
+#ifdef CONFIG_PM
+ .suspend = picolcd_suspend,
+ .resume = picolcd_resume,
+ .reset_resume = picolcd_reset_resume,
+#endif
+};
+
+static int __init picolcd_init(void)
+{
+ return hid_register_driver(&picolcd_driver);
+}
+
+static void __exit picolcd_exit(void)
+{
+ hid_unregister_driver(&picolcd_driver);
+}
+
+module_init(picolcd_init);
+module_exit(picolcd_exit);
+MODULE_DESCRIPTION("Minibox graphics PicoLCD Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
new file mode 100644
index 00000000000..845f428b809
--- /dev/null
+++ b/drivers/hid/hid-prodikeys.c
@@ -0,0 +1,910 @@
+/*
+ * HID driver for the Prodikeys PC-MIDI Keyboard
+ * providing midi & extra multimedia keys functionality
+ *
+ * Copyright (c) 2009 Don Prince <dhprince.devel@yahoo.co.uk>
+ *
+ * Controls for Octave Shift Up/Down, Channel, and
+ * Sustain Duration available via sysfs.
+ *
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/mutex.h>
+#include <linux/hid.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/rawmidi.h>
+#include "usbhid/usbhid.h"
+#include "hid-ids.h"
+
+
+#define pk_debug(format, arg...) \
+ pr_debug("hid-prodikeys: " format "\n" , ## arg)
+#define pk_error(format, arg...) \
+ pr_err("hid-prodikeys: " format "\n" , ## arg)
+
+struct pcmidi_snd;
+
+struct pk_device {
+ unsigned long quirks;
+
+ struct hid_device *hdev;
+ struct pcmidi_snd *pm; /* pcmidi device context */
+};
+
+struct pcmidi_snd;
+
+struct pcmidi_sustain {
+ unsigned long in_use;
+ struct pcmidi_snd *pm;
+ struct timer_list timer;
+ unsigned char status;
+ unsigned char note;
+ unsigned char velocity;
+};
+
+#define PCMIDI_SUSTAINED_MAX 32
+struct pcmidi_snd {
+ struct pk_device *pk;
+ unsigned short ifnum;
+ struct hid_report *pcmidi_report6;
+ struct input_dev *input_ep82;
+ unsigned short midi_mode;
+ unsigned short midi_sustain_mode;
+ unsigned short midi_sustain;
+ unsigned short midi_channel;
+ short midi_octave;
+ struct pcmidi_sustain sustained_notes[PCMIDI_SUSTAINED_MAX];
+ unsigned short fn_state;
+ unsigned short last_key[24];
+ spinlock_t rawmidi_in_lock;
+ struct snd_card *card;
+ struct snd_rawmidi *rwmidi;
+ struct snd_rawmidi_substream *in_substream;
+ struct snd_rawmidi_substream *out_substream;
+ unsigned long in_triggered;
+ unsigned long out_active;
+};
+
+#define PK_QUIRK_NOGET 0x00010000
+#define PCMIDI_MIDDLE_C 60
+#define PCMIDI_CHANNEL_MIN 0
+#define PCMIDI_CHANNEL_MAX 15
+#define PCMIDI_OCTAVE_MIN (-2)
+#define PCMIDI_OCTAVE_MAX 2
+#define PCMIDI_SUSTAIN_MIN 0
+#define PCMIDI_SUSTAIN_MAX 5000
+
+static const char shortname[] = "PC-MIDI";
+static const char longname[] = "Prodikeys PC-MIDI Keyboard";
+
+static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
+static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
+static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
+
+module_param_array(index, int, NULL, 0444);
+module_param_array(id, charp, NULL, 0444);
+module_param_array(enable, bool, NULL, 0444);
+MODULE_PARM_DESC(index, "Index value for the PC-MIDI virtual audio driver");
+MODULE_PARM_DESC(id, "ID string for the PC-MIDI virtual audio driver");
+MODULE_PARM_DESC(enable, "Enable for the PC-MIDI virtual audio driver");
+
+
+/* Output routine for the sysfs channel file */
+static ssize_t show_channel(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+
+ dbg_hid("pcmidi sysfs read channel=%u\n", pk->pm->midi_channel);
+
+ return sprintf(buf, "%u (min:%u, max:%u)\n", pk->pm->midi_channel,
+ PCMIDI_CHANNEL_MIN, PCMIDI_CHANNEL_MAX);
+}
+
+/* Input routine for the sysfs channel file */
+static ssize_t store_channel(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+
+ unsigned channel = 0;
+
+ if (sscanf(buf, "%u", &channel) > 0 && channel <= PCMIDI_CHANNEL_MAX) {
+ dbg_hid("pcmidi sysfs write channel=%u\n", channel);
+ pk->pm->midi_channel = channel;
+ return strlen(buf);
+ }
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(channel, S_IRUGO | S_IWUGO, show_channel,
+ store_channel);
+
+static struct device_attribute *sysfs_device_attr_channel = {
+ &dev_attr_channel,
+ };
+
+/* Output routine for the sysfs sustain file */
+static ssize_t show_sustain(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+
+ dbg_hid("pcmidi sysfs read sustain=%u\n", pk->pm->midi_sustain);
+
+ return sprintf(buf, "%u (off:%u, max:%u (ms))\n", pk->pm->midi_sustain,
+ PCMIDI_SUSTAIN_MIN, PCMIDI_SUSTAIN_MAX);
+}
+
+/* Input routine for the sysfs sustain file */
+static ssize_t store_sustain(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+
+ unsigned sustain = 0;
+
+ if (sscanf(buf, "%u", &sustain) > 0 && sustain <= PCMIDI_SUSTAIN_MAX) {
+ dbg_hid("pcmidi sysfs write sustain=%u\n", sustain);
+ pk->pm->midi_sustain = sustain;
+ pk->pm->midi_sustain_mode =
+ (0 == sustain || !pk->pm->midi_mode) ? 0 : 1;
+ return strlen(buf);
+ }
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(sustain, S_IRUGO | S_IWUGO, show_sustain,
+ store_sustain);
+
+static struct device_attribute *sysfs_device_attr_sustain = {
+ &dev_attr_sustain,
+ };
+
+/* Output routine for the sysfs octave file */
+static ssize_t show_octave(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+
+ dbg_hid("pcmidi sysfs read octave=%d\n", pk->pm->midi_octave);
+
+ return sprintf(buf, "%d (min:%d, max:%d)\n", pk->pm->midi_octave,
+ PCMIDI_OCTAVE_MIN, PCMIDI_OCTAVE_MAX);
+}
+
+/* Input routine for the sysfs octave file */
+static ssize_t store_octave(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+
+ int octave = 0;
+
+ if (sscanf(buf, "%d", &octave) > 0 &&
+ octave >= PCMIDI_OCTAVE_MIN && octave <= PCMIDI_OCTAVE_MAX) {
+ dbg_hid("pcmidi sysfs write octave=%d\n", octave);
+ pk->pm->midi_octave = octave;
+ return strlen(buf);
+ }
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(octave, S_IRUGO | S_IWUGO, show_octave,
+ store_octave);
+
+static struct device_attribute *sysfs_device_attr_octave = {
+ &dev_attr_octave,
+ };
+
+
+static void pcmidi_send_note(struct pcmidi_snd *pm,
+ unsigned char status, unsigned char note, unsigned char velocity)
+{
+ unsigned long flags;
+ unsigned char buffer[3];
+
+ buffer[0] = status;
+ buffer[1] = note;
+ buffer[2] = velocity;
+
+ spin_lock_irqsave(&pm->rawmidi_in_lock, flags);
+
+ if (!pm->in_substream)
+ goto drop_note;
+ if (!test_bit(pm->in_substream->number, &pm->in_triggered))
+ goto drop_note;
+
+ snd_rawmidi_receive(pm->in_substream, buffer, 3);
+
+drop_note:
+ spin_unlock_irqrestore(&pm->rawmidi_in_lock, flags);
+
+ return;
+}
+
+void pcmidi_sustained_note_release(unsigned long data)
+{
+ struct pcmidi_sustain *pms = (struct pcmidi_sustain *)data;
+
+ pcmidi_send_note(pms->pm, pms->status, pms->note, pms->velocity);
+ pms->in_use = 0;
+}
+
+void init_sustain_timers(struct pcmidi_snd *pm)
+{
+ struct pcmidi_sustain *pms;
+ unsigned i;
+
+ for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) {
+ pms = &pm->sustained_notes[i];
+ pms->in_use = 0;
+ pms->pm = pm;
+ setup_timer(&pms->timer, pcmidi_sustained_note_release,
+ (unsigned long)pms);
+ }
+}
+
+void stop_sustain_timers(struct pcmidi_snd *pm)
+{
+ struct pcmidi_sustain *pms;
+ unsigned i;
+
+ for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) {
+ pms = &pm->sustained_notes[i];
+ pms->in_use = 1;
+ del_timer_sync(&pms->timer);
+ }
+}
+
+static int pcmidi_get_output_report(struct pcmidi_snd *pm)
+{
+ struct hid_device *hdev = pm->pk->hdev;
+ struct hid_report *report;
+
+ list_for_each_entry(report,
+ &hdev->report_enum[HID_OUTPUT_REPORT].report_list, list) {
+ if (!(6 == report->id))
+ continue;
+
+ if (report->maxfield < 1) {
+ dev_err(&hdev->dev, "output report is empty\n");
+ break;
+ }
+ if (report->field[0]->report_count != 2) {
+ dev_err(&hdev->dev, "field count too low\n");
+ break;
+ }
+ pm->pcmidi_report6 = report;
+ return 0;
+ }
+ /* should never get here */
+ return -ENODEV;
+}
+
+static void pcmidi_submit_output_report(struct pcmidi_snd *pm, int state)
+{
+ struct hid_device *hdev = pm->pk->hdev;
+ struct hid_report *report = pm->pcmidi_report6;
+ report->field[0]->value[0] = 0x01;
+ report->field[0]->value[1] = state;
+
+ usbhid_submit_report(hdev, report, USB_DIR_OUT);
+}
+
+static int pcmidi_handle_report1(struct pcmidi_snd *pm, u8 *data)
+{
+ u32 bit_mask;
+
+ bit_mask = data[1];
+ bit_mask = (bit_mask << 8) | data[2];
+ bit_mask = (bit_mask << 8) | data[3];
+
+ dbg_hid("pcmidi mode: %d\n", pm->midi_mode);
+
+ /*KEY_MAIL or octave down*/
+ if (pm->midi_mode && bit_mask == 0x004000) {
+ /* octave down */
+ pm->midi_octave--;
+ if (pm->midi_octave < -2)
+ pm->midi_octave = -2;
+ dbg_hid("pcmidi mode: %d octave: %d\n",
+ pm->midi_mode, pm->midi_octave);
+ return 1;
+ }
+ /*KEY_WWW or sustain*/
+ else if (pm->midi_mode && bit_mask == 0x000004) {
+ /* sustain on/off*/
+ pm->midi_sustain_mode ^= 0x1;
+ return 1;
+ }
+
+ return 0; /* continue key processing */
+}
+
+static int pcmidi_handle_report3(struct pcmidi_snd *pm, u8 *data, int size)
+{
+ struct pcmidi_sustain *pms;
+ unsigned i, j;
+ unsigned char status, note, velocity;
+
+ unsigned num_notes = (size-1)/2;
+ for (j = 0; j < num_notes; j++) {
+ note = data[j*2+1];
+ velocity = data[j*2+2];
+
+ if (note < 0x81) { /* note on */
+ status = 128 + 16 + pm->midi_channel; /* 1001nnnn */
+ note = note - 0x54 + PCMIDI_MIDDLE_C +
+ (pm->midi_octave * 12);
+ if (0 == velocity)
+ velocity = 1; /* force note on */
+ } else { /* note off */
+ status = 128 + pm->midi_channel; /* 1000nnnn */
+ note = note - 0x94 + PCMIDI_MIDDLE_C +
+ (pm->midi_octave*12);
+
+ if (pm->midi_sustain_mode) {
+ for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) {
+ pms = &pm->sustained_notes[i];
+ if (!pms->in_use) {
+ pms->status = status;
+ pms->note = note;
+ pms->velocity = velocity;
+ pms->in_use = 1;
+
+ mod_timer(&pms->timer,
+ jiffies +
+ msecs_to_jiffies(pm->midi_sustain));
+ return 1;
+ }
+ }
+ }
+ }
+ pcmidi_send_note(pm, status, note, velocity);
+ }
+
+ return 1;
+}
+
+static int pcmidi_handle_report4(struct pcmidi_snd *pm, u8 *data)
+{
+ unsigned key;
+ u32 bit_mask;
+ u32 bit_index;
+
+ bit_mask = data[1];
+ bit_mask = (bit_mask << 8) | data[2];
+ bit_mask = (bit_mask << 8) | data[3];
+
+ /* break keys */
+ for (bit_index = 0; bit_index < 24; bit_index++) {
+ key = pm->last_key[bit_index];
+ if (!((0x01 << bit_index) & bit_mask)) {
+ input_event(pm->input_ep82, EV_KEY,
+ pm->last_key[bit_index], 0);
+ pm->last_key[bit_index] = 0;
+ }
+ }
+
+ /* make keys */
+ for (bit_index = 0; bit_index < 24; bit_index++) {
+ key = 0;
+ switch ((0x01 << bit_index) & bit_mask) {
+ case 0x000010: /* Fn lock*/
+ pm->fn_state ^= 0x000010;
+ if (pm->fn_state)
+ pcmidi_submit_output_report(pm, 0xc5);
+ else
+ pcmidi_submit_output_report(pm, 0xc6);
+ continue;
+ case 0x020000: /* midi launcher..send a key (qwerty) or not? */
+ pcmidi_submit_output_report(pm, 0xc1);
+ pm->midi_mode ^= 0x01;
+
+ dbg_hid("pcmidi mode: %d\n", pm->midi_mode);
+ continue;
+ case 0x100000: /* KEY_MESSENGER or octave up */
+ dbg_hid("pcmidi mode: %d\n", pm->midi_mode);
+ if (pm->midi_mode) {
+ pm->midi_octave++;
+ if (pm->midi_octave > 2)
+ pm->midi_octave = 2;
+ dbg_hid("pcmidi mode: %d octave: %d\n",
+ pm->midi_mode, pm->midi_octave);
+ continue;
+ } else
+ key = KEY_MESSENGER;
+ break;
+ case 0x400000:
+ key = KEY_CALENDAR;
+ break;
+ case 0x080000:
+ key = KEY_ADDRESSBOOK;
+ break;
+ case 0x040000:
+ key = KEY_DOCUMENTS;
+ break;
+ case 0x800000:
+ key = KEY_WORDPROCESSOR;
+ break;
+ case 0x200000:
+ key = KEY_SPREADSHEET;
+ break;
+ case 0x010000:
+ key = KEY_COFFEE;
+ break;
+ case 0x000100:
+ key = KEY_HELP;
+ break;
+ case 0x000200:
+ key = KEY_SEND;
+ break;
+ case 0x000400:
+ key = KEY_REPLY;
+ break;
+ case 0x000800:
+ key = KEY_FORWARDMAIL;
+ break;
+ case 0x001000:
+ key = KEY_NEW;
+ break;
+ case 0x002000:
+ key = KEY_OPEN;
+ break;
+ case 0x004000:
+ key = KEY_CLOSE;
+ break;
+ case 0x008000:
+ key = KEY_SAVE;
+ break;
+ case 0x000001:
+ key = KEY_UNDO;
+ break;
+ case 0x000002:
+ key = KEY_REDO;
+ break;
+ case 0x000004:
+ key = KEY_SPELLCHECK;
+ break;
+ case 0x000008:
+ key = KEY_PRINT;
+ break;
+ }
+ if (key) {
+ input_event(pm->input_ep82, EV_KEY, key, 1);
+ pm->last_key[bit_index] = key;
+ }
+ }
+
+ return 1;
+}
+
+int pcmidi_handle_report(
+ struct pcmidi_snd *pm, unsigned report_id, u8 *data, int size)
+{
+ int ret = 0;
+
+ switch (report_id) {
+ case 0x01: /* midi keys (qwerty)*/
+ ret = pcmidi_handle_report1(pm, data);
+ break;
+ case 0x03: /* midi keyboard (musical)*/
+ ret = pcmidi_handle_report3(pm, data, size);
+ break;
+ case 0x04: /* multimedia/midi keys (qwerty)*/
+ ret = pcmidi_handle_report4(pm, data);
+ break;
+ }
+ return ret;
+}
+
+void pcmidi_setup_extra_keys(struct pcmidi_snd *pm, struct input_dev *input)
+{
+ /* reassigned functionality for N/A keys
+ MY PICTURES => KEY_WORDPROCESSOR
+ MY MUSIC=> KEY_SPREADSHEET
+ */
+ unsigned int keys[] = {
+ KEY_FN,
+ KEY_MESSENGER, KEY_CALENDAR,
+ KEY_ADDRESSBOOK, KEY_DOCUMENTS,
+ KEY_WORDPROCESSOR,
+ KEY_SPREADSHEET,
+ KEY_COFFEE,
+ KEY_HELP, KEY_SEND,
+ KEY_REPLY, KEY_FORWARDMAIL,
+ KEY_NEW, KEY_OPEN,
+ KEY_CLOSE, KEY_SAVE,
+ KEY_UNDO, KEY_REDO,
+ KEY_SPELLCHECK, KEY_PRINT,
+ 0
+ };
+
+ unsigned int *pkeys = &keys[0];
+ unsigned short i;
+
+ if (pm->ifnum != 1) /* only set up ONCE for interace 1 */
+ return;
+
+ pm->input_ep82 = input;
+
+ for (i = 0; i < 24; i++)
+ pm->last_key[i] = 0;
+
+ while (*pkeys != 0) {
+ set_bit(*pkeys, pm->input_ep82->keybit);
+ ++pkeys;
+ }
+}
+
+static int pcmidi_set_operational(struct pcmidi_snd *pm)
+{
+ if (pm->ifnum != 1)
+ return 0; /* only set up ONCE for interace 1 */
+
+ pcmidi_get_output_report(pm);
+ pcmidi_submit_output_report(pm, 0xc1);
+ return 0;
+}
+
+static int pcmidi_snd_free(struct snd_device *dev)
+{
+ return 0;
+}
+
+static int pcmidi_in_open(struct snd_rawmidi_substream *substream)
+{
+ struct pcmidi_snd *pm = substream->rmidi->private_data;
+
+ dbg_hid("pcmidi in open\n");
+ pm->in_substream = substream;
+ return 0;
+}
+
+static int pcmidi_in_close(struct snd_rawmidi_substream *substream)
+{
+ dbg_hid("pcmidi in close\n");
+ return 0;
+}
+
+static void pcmidi_in_trigger(struct snd_rawmidi_substream *substream, int up)
+{
+ struct pcmidi_snd *pm = substream->rmidi->private_data;
+
+ dbg_hid("pcmidi in trigger %d\n", up);
+
+ pm->in_triggered = up;
+}
+
+static struct snd_rawmidi_ops pcmidi_in_ops = {
+ .open = pcmidi_in_open,
+ .close = pcmidi_in_close,
+ .trigger = pcmidi_in_trigger
+};
+
+int pcmidi_snd_initialise(struct pcmidi_snd *pm)
+{
+ static int dev;
+ struct snd_card *card;
+ struct snd_rawmidi *rwmidi;
+ int err;
+
+ static struct snd_device_ops ops = {
+ .dev_free = pcmidi_snd_free,
+ };
+
+ if (pm->ifnum != 1)
+ return 0; /* only set up midi device ONCE for interace 1 */
+
+ if (dev >= SNDRV_CARDS)
+ return -ENODEV;
+
+ if (!enable[dev]) {
+ dev++;
+ return -ENOENT;
+ }
+
+ /* Setup sound card */
+
+ err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card);
+ if (err < 0) {
+ pk_error("failed to create pc-midi sound card\n");
+ err = -ENOMEM;
+ goto fail;
+ }
+ pm->card = card;
+
+ /* Setup sound device */
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, pm, &ops);
+ if (err < 0) {
+ pk_error("failed to create pc-midi sound device: error %d\n",
+ err);
+ goto fail;
+ }
+
+ strncpy(card->driver, shortname, sizeof(card->driver));
+ strncpy(card->shortname, shortname, sizeof(card->shortname));
+ strncpy(card->longname, longname, sizeof(card->longname));
+
+ /* Set up rawmidi */
+ err = snd_rawmidi_new(card, card->shortname, 0,
+ 0, 1, &rwmidi);
+ if (err < 0) {
+ pk_error("failed to create pc-midi rawmidi device: error %d\n",
+ err);
+ goto fail;
+ }
+ pm->rwmidi = rwmidi;
+ strncpy(rwmidi->name, card->shortname, sizeof(rwmidi->name));
+ rwmidi->info_flags = SNDRV_RAWMIDI_INFO_INPUT;
+ rwmidi->private_data = pm;
+
+ snd_rawmidi_set_ops(rwmidi, SNDRV_RAWMIDI_STREAM_INPUT,
+ &pcmidi_in_ops);
+
+ snd_card_set_dev(card, &pm->pk->hdev->dev);
+
+ /* create sysfs variables */
+ err = device_create_file(&pm->pk->hdev->dev,
+ sysfs_device_attr_channel);
+ if (err < 0) {
+ pk_error("failed to create sysfs attribute channel: error %d\n",
+ err);
+ goto fail;
+ }
+
+ err = device_create_file(&pm->pk->hdev->dev,
+ sysfs_device_attr_sustain);
+ if (err < 0) {
+ pk_error("failed to create sysfs attribute sustain: error %d\n",
+ err);
+ goto fail_attr_sustain;
+ }
+
+ err = device_create_file(&pm->pk->hdev->dev,
+ sysfs_device_attr_octave);
+ if (err < 0) {
+ pk_error("failed to create sysfs attribute octave: error %d\n",
+ err);
+ goto fail_attr_octave;
+ }
+
+ spin_lock_init(&pm->rawmidi_in_lock);
+
+ init_sustain_timers(pm);
+ pcmidi_set_operational(pm);
+
+ /* register it */
+ err = snd_card_register(card);
+ if (err < 0) {
+ pk_error("failed to register pc-midi sound card: error %d\n",
+ err);
+ goto fail_register;
+ }
+
+ dbg_hid("pcmidi_snd_initialise finished ok\n");
+ return 0;
+
+fail_register:
+ stop_sustain_timers(pm);
+ device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_octave);
+fail_attr_octave:
+ device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_sustain);
+fail_attr_sustain:
+ device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_channel);
+fail:
+ if (pm->card) {
+ snd_card_free(pm->card);
+ pm->card = NULL;
+ }
+ return err;
+}
+
+int pcmidi_snd_terminate(struct pcmidi_snd *pm)
+{
+ if (pm->card) {
+ stop_sustain_timers(pm);
+
+ device_remove_file(&pm->pk->hdev->dev,
+ sysfs_device_attr_channel);
+ device_remove_file(&pm->pk->hdev->dev,
+ sysfs_device_attr_sustain);
+ device_remove_file(&pm->pk->hdev->dev,
+ sysfs_device_attr_octave);
+
+ snd_card_disconnect(pm->card);
+ snd_card_free_when_closed(pm->card);
+ }
+
+ return 0;
+}
+
+/*
+ * PC-MIDI report descriptor for report id is wrong.
+ */
+static void pk_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int rsize)
+{
+ if (rsize == 178 &&
+ rdesc[111] == 0x06 && rdesc[112] == 0x00 &&
+ rdesc[113] == 0xff) {
+ dev_info(&hdev->dev, "fixing up pc-midi keyboard report "
+ "descriptor\n");
+
+ rdesc[144] = 0x18; /* report 4: was 0x10 report count */
+ }
+}
+
+static int pk_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+ struct pcmidi_snd *pm;
+
+ pm = pk->pm;
+
+ if (HID_UP_MSVENDOR == (usage->hid & HID_USAGE_PAGE) &&
+ 1 == pm->ifnum) {
+ pcmidi_setup_extra_keys(pm, hi->input);
+ return 0;
+ }
+
+ return 0;
+}
+
+
+static int pk_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *data, int size)
+{
+ struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+ int ret = 0;
+
+ if (1 == pk->pm->ifnum) {
+ if (report->id == data[0])
+ switch (report->id) {
+ case 0x01: /* midi keys (qwerty)*/
+ case 0x03: /* midi keyboard (musical)*/
+ case 0x04: /* extra/midi keys (qwerty)*/
+ ret = pcmidi_handle_report(pk->pm,
+ report->id, data, size);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
+ unsigned long quirks = id->driver_data;
+ struct pk_device *pk;
+ struct pcmidi_snd *pm = NULL;
+
+ pk = kzalloc(sizeof(*pk), GFP_KERNEL);
+ if (pk == NULL) {
+ dev_err(&hdev->dev, "prodikeys: can't alloc descriptor\n");
+ return -ENOMEM;
+ }
+
+ pk->hdev = hdev;
+
+ pm = kzalloc(sizeof(*pm), GFP_KERNEL);
+ if (pm == NULL) {
+ dev_err(&hdev->dev,
+ "prodikeys: can't alloc descriptor\n");
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ pm->pk = pk;
+ pk->pm = pm;
+ pm->ifnum = ifnum;
+
+ hid_set_drvdata(hdev, pk);
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ dev_err(&hdev->dev, "prodikeys: hid parse failed\n");
+ goto err_free;
+ }
+
+ if (quirks & PK_QUIRK_NOGET) { /* hid_parse cleared all the quirks */
+ hdev->quirks |= HID_QUIRK_NOGET;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ dev_err(&hdev->dev, "prodikeys: hw start failed\n");
+ goto err_free;
+ }
+
+ ret = pcmidi_snd_initialise(pm);
+ if (ret < 0)
+ goto err_stop;
+
+ return 0;
+err_stop:
+ hid_hw_stop(hdev);
+err_free:
+ if (pm != NULL)
+ kfree(pm);
+
+ kfree(pk);
+ return ret;
+}
+
+static void pk_remove(struct hid_device *hdev)
+{
+ struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+ struct pcmidi_snd *pm;
+
+ pm = pk->pm;
+ if (pm) {
+ pcmidi_snd_terminate(pm);
+ kfree(pm);
+ }
+
+ hid_hw_stop(hdev);
+
+ kfree(pk);
+}
+
+static const struct hid_device_id pk_devices[] = {
+ {HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS,
+ USB_DEVICE_ID_PRODIKEYS_PCMIDI),
+ .driver_data = PK_QUIRK_NOGET},
+ { }
+};
+MODULE_DEVICE_TABLE(hid, pk_devices);
+
+static struct hid_driver pk_driver = {
+ .name = "prodikeys",
+ .id_table = pk_devices,
+ .report_fixup = pk_report_fixup,
+ .input_mapping = pk_input_mapping,
+ .raw_event = pk_raw_event,
+ .probe = pk_probe,
+ .remove = pk_remove,
+};
+
+static int pk_init(void)
+{
+ int ret;
+
+ ret = hid_register_driver(&pk_driver);
+ if (ret)
+ printk(KERN_ERR "can't register prodikeys driver\n");
+
+ return ret;
+}
+
+static void pk_exit(void)
+{
+ hid_unregister_driver(&pk_driver);
+}
+
+module_init(pk_init);
+module_exit(pk_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
new file mode 100644
index 00000000000..17f2dc04f88
--- /dev/null
+++ b/drivers/hid/hid-roccat-kone.c
@@ -0,0 +1,1043 @@
+/*
+ * Roccat Kone driver for Linux
+ *
+ * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * Roccat Kone is a gamer mouse which consists of a mouse part and a keyboard
+ * part. The keyboard part enables the mouse to execute stored macros with mixed
+ * key- and button-events.
+ *
+ * TODO implement on-the-fly polling-rate change
+ * The windows driver has the ability to change the polling rate of the
+ * device on the press of a mousebutton.
+ * Is it possible to remove and reinstall the urb in raw-event- or any
+ * other handler, or to defer this action to be executed somewhere else?
+ *
+ * TODO implement notification mechanism for overlong macro execution
+ * If user wants to execute an overlong macro only the names of macroset
+ * and macro are given. Should userland tap hidraw or is there an
+ * additional streaming mechanism?
+ *
+ * TODO is it possible to overwrite group for sysfs attributes via udev?
+ */
+
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/hid.h>
+#include <linux/usb.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "hid-ids.h"
+#include "hid-roccat.h"
+#include "hid-roccat-kone.h"
+
+static void kone_set_settings_checksum(struct kone_settings *settings)
+{
+ uint16_t checksum = 0;
+ unsigned char *address = (unsigned char *)settings;
+ int i;
+
+ for (i = 0; i < sizeof(struct kone_settings) - 2; ++i, ++address)
+ checksum += *address;
+ settings->checksum = cpu_to_le16(checksum);
+}
+
+/*
+ * Checks success after writing data to mouse
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_check_write(struct usb_device *usb_dev)
+{
+ int len;
+ unsigned char *data;
+
+ data = kmalloc(1, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ do {
+ /*
+ * Mouse needs 50 msecs until it says ok, but there are
+ * 30 more msecs needed for next write to work.
+ */
+ msleep(80);
+
+ len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE |
+ USB_DIR_IN,
+ kone_command_confirm_write, 0, data, 1,
+ USB_CTRL_SET_TIMEOUT);
+
+ if (len != 1) {
+ kfree(data);
+ return -EIO;
+ }
+
+ /*
+ * value of 3 seems to mean something like
+ * "not finished yet, but it looks good"
+ * So check again after a moment.
+ */
+ } while (*data == 3);
+
+ if (*data == 1) { /* everything alright */
+ kfree(data);
+ return 0;
+ } else { /* unknown answer */
+ dev_err(&usb_dev->dev, "got retval %d when checking write\n",
+ *data);
+ kfree(data);
+ return -EIO;
+ }
+}
+
+/*
+ * Reads settings from mouse and stores it in @buf
+ * @buf has to be alloced with GFP_KERNEL
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_get_settings(struct usb_device *usb_dev,
+ struct kone_settings *buf)
+{
+ int len;
+
+ len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ kone_command_settings, 0, buf,
+ sizeof(struct kone_settings), USB_CTRL_SET_TIMEOUT);
+
+ if (len != sizeof(struct kone_settings))
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Writes settings from @buf to mouse
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_set_settings(struct usb_device *usb_dev,
+ struct kone_settings const *settings)
+{
+ int len;
+
+ len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ USB_REQ_SET_CONFIGURATION,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ kone_command_settings, 0, (char *)settings,
+ sizeof(struct kone_settings),
+ USB_CTRL_SET_TIMEOUT);
+
+ if (len != sizeof(struct kone_settings))
+ return -EIO;
+
+ if (kone_check_write(usb_dev))
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Reads profile data from mouse and stores it in @buf
+ * @number: profile number to read
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_get_profile(struct usb_device *usb_dev,
+ struct kone_profile *buf, int number)
+{
+ int len;
+
+ if (number < 1 || number > 5)
+ return -EINVAL;
+
+ len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ kone_command_profile, number, buf,
+ sizeof(struct kone_profile), USB_CTRL_SET_TIMEOUT);
+
+ if (len != sizeof(struct kone_profile))
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Writes profile data to mouse.
+ * @number: profile number to write
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_set_profile(struct usb_device *usb_dev,
+ struct kone_profile const *profile, int number)
+{
+ int len;
+
+ if (number < 1 || number > 5)
+ return -EINVAL;
+
+ len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ USB_REQ_SET_CONFIGURATION,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ kone_command_profile, number, (char *)profile,
+ sizeof(struct kone_profile),
+ USB_CTRL_SET_TIMEOUT);
+
+ if (len != sizeof(struct kone_profile))
+ return len;
+
+ if (kone_check_write(usb_dev))
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Reads value of "fast-clip-weight" and stores it in @result
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_get_weight(struct usb_device *usb_dev, int *result)
+{
+ int len;
+ uint8_t *data;
+
+ data = kmalloc(1, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ kone_command_weight, 0, data, 1, USB_CTRL_SET_TIMEOUT);
+
+ if (len != 1) {
+ kfree(data);
+ return -EIO;
+ }
+ *result = (int)*data;
+ kfree(data);
+ return 0;
+}
+
+/*
+ * Reads firmware_version of mouse and stores it in @result
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_get_firmware_version(struct usb_device *usb_dev, int *result)
+{
+ int len;
+ unsigned char *data;
+
+ data = kmalloc(2, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ kone_command_firmware_version, 0, data, 2,
+ USB_CTRL_SET_TIMEOUT);
+
+ if (len != 2) {
+ kfree(data);
+ return -EIO;
+ }
+ *result = le16_to_cpu(*data);
+ kfree(data);
+ return 0;
+}
+
+static ssize_t kone_sysfs_read_settings(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+
+ if (off >= sizeof(struct kone_settings))
+ return 0;
+
+ if (off + count > sizeof(struct kone_settings))
+ count = sizeof(struct kone_settings) - off;
+
+ mutex_lock(&kone->kone_lock);
+ memcpy(buf, &kone->settings + off, count);
+ mutex_unlock(&kone->kone_lock);
+
+ return count;
+}
+
+/*
+ * Writing settings automatically activates startup_profile.
+ * This function keeps values in kone_device up to date and assumes that in
+ * case of error the old data is still valid
+ */
+static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval = 0, difference;
+
+ /* I need to get my data in one piece */
+ if (off != 0 || count != sizeof(struct kone_settings))
+ return -EINVAL;
+
+ mutex_lock(&kone->kone_lock);
+ difference = memcmp(buf, &kone->settings, sizeof(struct kone_settings));
+ if (difference) {
+ retval = kone_set_settings(usb_dev,
+ (struct kone_settings const *)buf);
+ if (!retval)
+ memcpy(&kone->settings, buf,
+ sizeof(struct kone_settings));
+ }
+ mutex_unlock(&kone->kone_lock);
+
+ if (retval)
+ return retval;
+
+ /*
+ * If we get here, treat settings as okay and update actual values
+ * according to startup_profile
+ */
+ kone->actual_profile = kone->settings.startup_profile;
+ kone->actual_dpi = kone->profiles[kone->actual_profile - 1].startup_dpi;
+
+ return sizeof(struct kone_settings);
+}
+
+static ssize_t kone_sysfs_read_profilex(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count, int number) {
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+
+ if (off >= sizeof(struct kone_profile))
+ return 0;
+
+ if (off + count > sizeof(struct kone_profile))
+ count = sizeof(struct kone_profile) - off;
+
+ mutex_lock(&kone->kone_lock);
+ memcpy(buf, &kone->profiles[number - 1], sizeof(struct kone_profile));
+ mutex_unlock(&kone->kone_lock);
+
+ return count;
+}
+
+static ssize_t kone_sysfs_read_profile1(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 1);
+}
+
+static ssize_t kone_sysfs_read_profile2(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 2);
+}
+
+static ssize_t kone_sysfs_read_profile3(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 3);
+}
+
+static ssize_t kone_sysfs_read_profile4(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 4);
+}
+
+static ssize_t kone_sysfs_read_profile5(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 5);
+}
+
+/* Writes data only if different to stored data */
+static ssize_t kone_sysfs_write_profilex(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count, int number) {
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ struct kone_profile *profile;
+ int retval = 0, difference;
+
+ /* I need to get my data in one piece */
+ if (off != 0 || count != sizeof(struct kone_profile))
+ return -EINVAL;
+
+ profile = &kone->profiles[number - 1];
+
+ mutex_lock(&kone->kone_lock);
+ difference = memcmp(buf, profile, sizeof(struct kone_profile));
+ if (difference) {
+ retval = kone_set_profile(usb_dev,
+ (struct kone_profile const *)buf, number);
+ if (!retval)
+ memcpy(profile, buf, sizeof(struct kone_profile));
+ }
+ mutex_unlock(&kone->kone_lock);
+
+ if (retval)
+ return retval;
+
+ return sizeof(struct kone_profile);
+}
+
+static ssize_t kone_sysfs_write_profile1(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 1);
+}
+
+static ssize_t kone_sysfs_write_profile2(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 2);
+}
+
+static ssize_t kone_sysfs_write_profile3(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 3);
+}
+
+static ssize_t kone_sysfs_write_profile4(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 4);
+}
+
+static ssize_t kone_sysfs_write_profile5(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 5);
+}
+
+static ssize_t kone_sysfs_show_actual_profile(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kone->actual_profile);
+}
+
+static ssize_t kone_sysfs_show_actual_dpi(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kone->actual_dpi);
+}
+
+/* weight is read each time, since we don't get informed when it's changed */
+static ssize_t kone_sysfs_show_weight(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int weight = 0;
+ int retval;
+
+ mutex_lock(&kone->kone_lock);
+ retval = kone_get_weight(usb_dev, &weight);
+ mutex_unlock(&kone->kone_lock);
+
+ if (retval)
+ return retval;
+ return snprintf(buf, PAGE_SIZE, "%d\n", weight);
+}
+
+static ssize_t kone_sysfs_show_firmware_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kone->firmware_version);
+}
+
+static ssize_t kone_sysfs_show_tcu(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kone->settings.tcu);
+}
+
+static int kone_tcu_command(struct usb_device *usb_dev, int number)
+{
+ int len;
+ char *value;
+
+ value = kmalloc(1, GFP_KERNEL);
+ if (!value)
+ return -ENOMEM;
+
+ *value = number;
+
+ len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ USB_REQ_SET_CONFIGURATION,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ kone_command_calibrate, 0, value, 1,
+ USB_CTRL_SET_TIMEOUT);
+
+ kfree(value);
+ return ((len != 1) ? -EIO : 0);
+}
+
+/*
+ * Calibrating the tcu is the only action that changes settings data inside the
+ * mouse, so this data needs to be reread
+ */
+static ssize_t kone_sysfs_set_tcu(struct device *dev,
+ struct device_attribute *attr, char const *buf, size_t size)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+ unsigned long state;
+
+ retval = strict_strtoul(buf, 10, &state);
+ if (retval)
+ return retval;
+
+ if (state != 0 && state != 1)
+ return -EINVAL;
+
+ mutex_lock(&kone->kone_lock);
+
+ if (state == 1) { /* state activate */
+ retval = kone_tcu_command(usb_dev, 1);
+ if (retval)
+ goto exit_unlock;
+ retval = kone_tcu_command(usb_dev, 2);
+ if (retval)
+ goto exit_unlock;
+ ssleep(5); /* tcu needs this time for calibration */
+ retval = kone_tcu_command(usb_dev, 3);
+ if (retval)
+ goto exit_unlock;
+ retval = kone_tcu_command(usb_dev, 0);
+ if (retval)
+ goto exit_unlock;
+ retval = kone_tcu_command(usb_dev, 4);
+ if (retval)
+ goto exit_unlock;
+ /*
+ * Kone needs this time to settle things.
+ * Reading settings too early will result in invalid data.
+ * Roccat's driver waits 1 sec, maybe this time could be
+ * shortened.
+ */
+ ssleep(1);
+ }
+
+ /* calibration changes values in settings, so reread */
+ retval = kone_get_settings(usb_dev, &kone->settings);
+ if (retval)
+ goto exit_no_settings;
+
+ /* only write settings back if activation state is different */
+ if (kone->settings.tcu != state) {
+ kone->settings.tcu = state;
+ kone_set_settings_checksum(&kone->settings);
+
+ retval = kone_set_settings(usb_dev, &kone->settings);
+ if (retval) {
+ dev_err(&usb_dev->dev, "couldn't set tcu state\n");
+ /*
+ * try to reread valid settings into buffer overwriting
+ * first error code
+ */
+ retval = kone_get_settings(usb_dev, &kone->settings);
+ if (retval)
+ goto exit_no_settings;
+ goto exit_unlock;
+ }
+ }
+
+ retval = size;
+exit_no_settings:
+ dev_err(&usb_dev->dev, "couldn't read settings\n");
+exit_unlock:
+ mutex_unlock(&kone->kone_lock);
+ return retval;
+}
+
+static ssize_t kone_sysfs_show_startup_profile(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kone->settings.startup_profile);
+}
+
+static ssize_t kone_sysfs_set_startup_profile(struct device *dev,
+ struct device_attribute *attr, char const *buf, size_t size)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+ unsigned long new_startup_profile;
+
+ retval = strict_strtoul(buf, 10, &new_startup_profile);
+ if (retval)
+ return retval;
+
+ if (new_startup_profile < 1 || new_startup_profile > 5)
+ return -EINVAL;
+
+ mutex_lock(&kone->kone_lock);
+
+ kone->settings.startup_profile = new_startup_profile;
+ kone_set_settings_checksum(&kone->settings);
+
+ retval = kone_set_settings(usb_dev, &kone->settings);
+
+ mutex_unlock(&kone->kone_lock);
+
+ if (retval)
+ return retval;
+
+ /* changing the startup profile immediately activates this profile */
+ kone->actual_profile = new_startup_profile;
+ kone->actual_dpi = kone->profiles[kone->actual_profile - 1].startup_dpi;
+
+ return size;
+}
+
+/*
+ * This file is used by userland software to find devices that are handled by
+ * this driver. This provides a consistent way for actual and older kernels
+ * where this driver replaced usbhid instead of generic-usb.
+ * Driver capabilities are determined by version number.
+ */
+static ssize_t kone_sysfs_show_driver_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, ROCCAT_KONE_DRIVER_VERSION "\n");
+}
+
+/*
+ * Read actual dpi settings.
+ * Returns raw value for further processing. Refer to enum kone_polling_rates to
+ * get real value.
+ */
+static DEVICE_ATTR(actual_dpi, 0440, kone_sysfs_show_actual_dpi, NULL);
+
+static DEVICE_ATTR(actual_profile, 0440, kone_sysfs_show_actual_profile, NULL);
+
+/*
+ * The mouse can be equipped with one of four supplied weights from 5 to 20
+ * grams which are recognized and its value can be read out.
+ * This returns the raw value reported by the mouse for easy evaluation by
+ * software. Refer to enum kone_weights to get corresponding real weight.
+ */
+static DEVICE_ATTR(weight, 0440, kone_sysfs_show_weight, NULL);
+
+/*
+ * Prints firmware version stored in mouse as integer.
+ * The raw value reported by the mouse is returned for easy evaluation, to get
+ * the real version number the decimal point has to be shifted 2 positions to
+ * the left. E.g. a value of 138 means 1.38.
+ */
+static DEVICE_ATTR(firmware_version, 0440,
+ kone_sysfs_show_firmware_version, NULL);
+
+/*
+ * Prints state of Tracking Control Unit as number where 0 = off and 1 = on
+ * Writing 0 deactivates tcu and writing 1 calibrates and activates the tcu
+ */
+static DEVICE_ATTR(tcu, 0660, kone_sysfs_show_tcu, kone_sysfs_set_tcu);
+
+/* Prints and takes the number of the profile the mouse starts with */
+static DEVICE_ATTR(startup_profile, 0660,
+ kone_sysfs_show_startup_profile,
+ kone_sysfs_set_startup_profile);
+
+static DEVICE_ATTR(kone_driver_version, 0440,
+ kone_sysfs_show_driver_version, NULL);
+
+static struct attribute *kone_attributes[] = {
+ &dev_attr_actual_dpi.attr,
+ &dev_attr_actual_profile.attr,
+ &dev_attr_weight.attr,
+ &dev_attr_firmware_version.attr,
+ &dev_attr_tcu.attr,
+ &dev_attr_startup_profile.attr,
+ &dev_attr_kone_driver_version.attr,
+ NULL
+};
+
+static struct attribute_group kone_attribute_group = {
+ .attrs = kone_attributes
+};
+
+static struct bin_attribute kone_settings_attr = {
+ .attr = { .name = "settings", .mode = 0660 },
+ .size = sizeof(struct kone_settings),
+ .read = kone_sysfs_read_settings,
+ .write = kone_sysfs_write_settings
+};
+
+static struct bin_attribute kone_profile1_attr = {
+ .attr = { .name = "profile1", .mode = 0660 },
+ .size = sizeof(struct kone_profile),
+ .read = kone_sysfs_read_profile1,
+ .write = kone_sysfs_write_profile1
+};
+
+static struct bin_attribute kone_profile2_attr = {
+ .attr = { .name = "profile2", .mode = 0660 },
+ .size = sizeof(struct kone_profile),
+ .read = kone_sysfs_read_profile2,
+ .write = kone_sysfs_write_profile2
+};
+
+static struct bin_attribute kone_profile3_attr = {
+ .attr = { .name = "profile3", .mode = 0660 },
+ .size = sizeof(struct kone_profile),
+ .read = kone_sysfs_read_profile3,
+ .write = kone_sysfs_write_profile3
+};
+
+static struct bin_attribute kone_profile4_attr = {
+ .attr = { .name = "profile4", .mode = 0660 },
+ .size = sizeof(struct kone_profile),
+ .read = kone_sysfs_read_profile4,
+ .write = kone_sysfs_write_profile4
+};
+
+static struct bin_attribute kone_profile5_attr = {
+ .attr = { .name = "profile5", .mode = 0660 },
+ .size = sizeof(struct kone_profile),
+ .read = kone_sysfs_read_profile5,
+ .write = kone_sysfs_write_profile5
+};
+
+static int kone_create_sysfs_attributes(struct usb_interface *intf)
+{
+ int retval;
+
+ retval = sysfs_create_group(&intf->dev.kobj, &kone_attribute_group);
+ if (retval)
+ goto exit_1;
+
+ retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_settings_attr);
+ if (retval)
+ goto exit_2;
+
+ retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile1_attr);
+ if (retval)
+ goto exit_3;
+
+ retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile2_attr);
+ if (retval)
+ goto exit_4;
+
+ retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile3_attr);
+ if (retval)
+ goto exit_5;
+
+ retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile4_attr);
+ if (retval)
+ goto exit_6;
+
+ retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile5_attr);
+ if (retval)
+ goto exit_7;
+
+ return 0;
+
+exit_7:
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile4_attr);
+exit_6:
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile3_attr);
+exit_5:
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile2_attr);
+exit_4:
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile1_attr);
+exit_3:
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_settings_attr);
+exit_2:
+ sysfs_remove_group(&intf->dev.kobj, &kone_attribute_group);
+exit_1:
+ return retval;
+}
+
+static void kone_remove_sysfs_attributes(struct usb_interface *intf)
+{
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile5_attr);
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile4_attr);
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile3_attr);
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile2_attr);
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile1_attr);
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_settings_attr);
+ sysfs_remove_group(&intf->dev.kobj, &kone_attribute_group);
+}
+
+static int kone_init_kone_device_struct(struct usb_device *usb_dev,
+ struct kone_device *kone)
+{
+ uint i;
+ int retval;
+
+ mutex_init(&kone->kone_lock);
+
+ for (i = 0; i < 5; ++i) {
+ retval = kone_get_profile(usb_dev, &kone->profiles[i], i + 1);
+ if (retval)
+ return retval;
+ }
+
+ retval = kone_get_settings(usb_dev, &kone->settings);
+ if (retval)
+ return retval;
+
+ retval = kone_get_firmware_version(usb_dev, &kone->firmware_version);
+ if (retval)
+ return retval;
+
+ kone->actual_profile = kone->settings.startup_profile;
+ kone->actual_dpi = kone->profiles[kone->actual_profile].startup_dpi;
+
+ return 0;
+}
+
+/*
+ * Since IGNORE_MOUSE quirk moved to hid-apple, there is no way to bind only to
+ * mousepart if usb_hid is compiled into the kernel and kone is compiled as
+ * module.
+ * Secial behaviour is bound only to mousepart since only mouseevents contain
+ * additional notifications.
+ */
+static int kone_init_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct kone_device *kone;
+ int retval;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ == USB_INTERFACE_PROTOCOL_MOUSE) {
+
+ kone = kzalloc(sizeof(*kone), GFP_KERNEL);
+ if (!kone) {
+ dev_err(&hdev->dev, "can't alloc device descriptor\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, kone);
+
+ retval = kone_init_kone_device_struct(usb_dev, kone);
+ if (retval) {
+ dev_err(&hdev->dev,
+ "couldn't init struct kone_device\n");
+ goto exit_free;
+ }
+
+ retval = roccat_connect(hdev);
+ if (retval < 0) {
+ dev_err(&hdev->dev, "couldn't init char dev\n");
+ /* be tolerant about not getting chrdev */
+ } else {
+ kone->roccat_claimed = 1;
+ kone->chrdev_minor = retval;
+ }
+
+ retval = kone_create_sysfs_attributes(intf);
+ if (retval) {
+ dev_err(&hdev->dev, "cannot create sysfs files\n");
+ goto exit_free;
+ }
+ } else {
+ hid_set_drvdata(hdev, NULL);
+ }
+
+ return 0;
+exit_free:
+ kfree(kone);
+ return retval;
+}
+
+
+static void kone_remove_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct kone_device *kone;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ == USB_INTERFACE_PROTOCOL_MOUSE) {
+ kone_remove_sysfs_attributes(intf);
+ kone = hid_get_drvdata(hdev);
+ if (kone->roccat_claimed)
+ roccat_disconnect(kone->chrdev_minor);
+ kfree(hid_get_drvdata(hdev));
+ }
+}
+
+static int kone_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int retval;
+
+ retval = hid_parse(hdev);
+ if (retval) {
+ dev_err(&hdev->dev, "parse failed\n");
+ goto exit;
+ }
+
+ retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (retval) {
+ dev_err(&hdev->dev, "hw start failed\n");
+ goto exit;
+ }
+
+ retval = kone_init_specials(hdev);
+ if (retval) {
+ dev_err(&hdev->dev, "couldn't install mouse\n");
+ goto exit_stop;
+ }
+
+ return 0;
+
+exit_stop:
+ hid_hw_stop(hdev);
+exit:
+ return retval;
+}
+
+static void kone_remove(struct hid_device *hdev)
+{
+ kone_remove_specials(hdev);
+ hid_hw_stop(hdev);
+}
+
+/* handle special events and keep actual profile and dpi values up to date */
+static void kone_keep_values_up_to_date(struct kone_device *kone,
+ struct kone_mouse_event const *event)
+{
+ switch (event->event) {
+ case kone_mouse_event_switch_profile:
+ case kone_mouse_event_osd_profile:
+ kone->actual_profile = event->value;
+ kone->actual_dpi = kone->profiles[kone->actual_profile - 1].
+ startup_dpi;
+ break;
+ case kone_mouse_event_switch_dpi:
+ case kone_mouse_event_osd_dpi:
+ kone->actual_dpi = event->value;
+ break;
+ }
+}
+
+static void kone_report_to_chrdev(struct kone_device const *kone,
+ struct kone_mouse_event const *event)
+{
+ struct kone_roccat_report roccat_report;
+
+ switch (event->event) {
+ case kone_mouse_event_switch_profile:
+ case kone_mouse_event_switch_dpi:
+ case kone_mouse_event_osd_profile:
+ case kone_mouse_event_osd_dpi:
+ roccat_report.event = event->event;
+ roccat_report.value = event->value;
+ roccat_report.key = 0;
+ roccat_report_event(kone->chrdev_minor,
+ (uint8_t *)&roccat_report,
+ sizeof(struct kone_roccat_report));
+ break;
+ case kone_mouse_event_call_overlong_macro:
+ if (event->value == kone_keystroke_action_press) {
+ roccat_report.event = kone_mouse_event_call_overlong_macro;
+ roccat_report.value = kone->actual_profile;
+ roccat_report.key = event->macro_key;
+ roccat_report_event(kone->chrdev_minor,
+ (uint8_t *)&roccat_report,
+ sizeof(struct kone_roccat_report));
+ }
+ break;
+ }
+
+}
+
+/*
+ * Is called for keyboard- and mousepart.
+ * Only mousepart gets informations about special events in its extended event
+ * structure.
+ */
+static int kone_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *data, int size)
+{
+ struct kone_device *kone = hid_get_drvdata(hdev);
+ struct kone_mouse_event *event = (struct kone_mouse_event *)data;
+
+ /* keyboard events are always processed by default handler */
+ if (size != sizeof(struct kone_mouse_event))
+ return 0;
+
+ /*
+ * Firmware 1.38 introduced new behaviour for tilt and special buttons.
+ * Pressed button is reported in each movement event.
+ * Workaround sends only one event per press.
+ */
+ if (memcmp(&kone->last_mouse_event.tilt, &event->tilt, 5))
+ memcpy(&kone->last_mouse_event, event,
+ sizeof(struct kone_mouse_event));
+ else
+ memset(&event->tilt, 0, 5);
+
+ kone_keep_values_up_to_date(kone, event);
+
+ if (kone->roccat_claimed)
+ kone_report_to_chrdev(kone, event);
+
+ return 0; /* always do further processing */
+}
+
+static const struct hid_device_id kone_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(hid, kone_devices);
+
+static struct hid_driver kone_driver = {
+ .name = "kone",
+ .id_table = kone_devices,
+ .probe = kone_probe,
+ .remove = kone_remove,
+ .raw_event = kone_raw_event
+};
+
+static int __init kone_init(void)
+{
+ return hid_register_driver(&kone_driver);
+}
+
+static void __exit kone_exit(void)
+{
+ hid_unregister_driver(&kone_driver);
+}
+
+module_init(kone_init);
+module_exit(kone_exit);
+
+MODULE_AUTHOR("Stefan Achatz");
+MODULE_DESCRIPTION("USB Roccat Kone driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-kone.h b/drivers/hid/hid-roccat-kone.h
new file mode 100644
index 00000000000..003e6f81c19
--- /dev/null
+++ b/drivers/hid/hid-roccat-kone.h
@@ -0,0 +1,233 @@
+#ifndef __HID_ROCCAT_KONE_H
+#define __HID_ROCCAT_KONE_H
+
+/*
+ * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/types.h>
+
+#define ROCCAT_KONE_DRIVER_VERSION "v0.3.1"
+
+#pragma pack(push)
+#pragma pack(1)
+
+struct kone_keystroke {
+ uint8_t key;
+ uint8_t action;
+ uint16_t period; /* in milliseconds */
+};
+
+enum kone_keystroke_buttons {
+ kone_keystroke_button_1 = 0xf0, /* left mouse button */
+ kone_keystroke_button_2 = 0xf1, /* right mouse button */
+ kone_keystroke_button_3 = 0xf2, /* wheel */
+ kone_keystroke_button_9 = 0xf3, /* side button up */
+ kone_keystroke_button_8 = 0xf4 /* side button down */
+};
+
+enum kone_keystroke_actions {
+ kone_keystroke_action_press = 0,
+ kone_keystroke_action_release = 1
+};
+
+struct kone_button_info {
+ uint8_t number; /* range 1-8 */
+ uint8_t type;
+ uint8_t macro_type; /* 0 = short, 1 = overlong */
+ uint8_t macro_set_name[16]; /* can be max 15 chars long */
+ uint8_t macro_name[16]; /* can be max 15 chars long */
+ uint8_t count;
+ struct kone_keystroke keystrokes[20];
+};
+
+enum kone_button_info_types {
+ /* valid button types until firmware 1.32 */
+ kone_button_info_type_button_1 = 0x1, /* click (left mouse button) */
+ kone_button_info_type_button_2 = 0x2, /* menu (right mouse button)*/
+ kone_button_info_type_button_3 = 0x3, /* scroll (wheel) */
+ kone_button_info_type_double_click = 0x4,
+ kone_button_info_type_key = 0x5,
+ kone_button_info_type_macro = 0x6,
+ kone_button_info_type_off = 0x7,
+ /* TODO clarify function and rename */
+ kone_button_info_type_osd_xy_prescaling = 0x8,
+ kone_button_info_type_osd_dpi = 0x9,
+ kone_button_info_type_osd_profile = 0xa,
+ kone_button_info_type_button_9 = 0xb, /* ie forward */
+ kone_button_info_type_button_8 = 0xc, /* ie backward */
+ kone_button_info_type_dpi_up = 0xd, /* internal */
+ kone_button_info_type_dpi_down = 0xe, /* internal */
+ kone_button_info_type_button_7 = 0xf, /* tilt left */
+ kone_button_info_type_button_6 = 0x10, /* tilt right */
+ kone_button_info_type_profile_up = 0x11, /* internal */
+ kone_button_info_type_profile_down = 0x12, /* internal */
+ /* additional valid button types since firmware 1.38 */
+ kone_button_info_type_multimedia_open_player = 0x20,
+ kone_button_info_type_multimedia_next_track = 0x21,
+ kone_button_info_type_multimedia_prev_track = 0x22,
+ kone_button_info_type_multimedia_play_pause = 0x23,
+ kone_button_info_type_multimedia_stop = 0x24,
+ kone_button_info_type_multimedia_mute = 0x25,
+ kone_button_info_type_multimedia_volume_up = 0x26,
+ kone_button_info_type_multimedia_volume_down = 0x27
+};
+
+enum kone_button_info_numbers {
+ kone_button_top = 1,
+ kone_button_wheel_tilt_left = 2,
+ kone_button_wheel_tilt_right = 3,
+ kone_button_forward = 4,
+ kone_button_backward = 5,
+ kone_button_middle = 6,
+ kone_button_plus = 7,
+ kone_button_minus = 8,
+};
+
+struct kone_light_info {
+ uint8_t number; /* number of light 1-5 */
+ uint8_t mod; /* 1 = on, 2 = off */
+ uint8_t red; /* range 0x00-0xff */
+ uint8_t green; /* range 0x00-0xff */
+ uint8_t blue; /* range 0x00-0xff */
+};
+
+struct kone_profile {
+ uint16_t size; /* always 975 */
+ uint16_t unused; /* always 0 */
+
+ /*
+ * range 1-5
+ * This number does not need to correspond with location where profile
+ * saved
+ */
+ uint8_t profile; /* range 1-5 */
+
+ uint16_t main_sensitivity; /* range 100-1000 */
+ uint8_t xy_sensitivity_enabled; /* 1 = on, 2 = off */
+ uint16_t x_sensitivity; /* range 100-1000 */
+ uint16_t y_sensitivity; /* range 100-1000 */
+ uint8_t dpi_rate; /* bit 1 = 800, ... */
+ uint8_t startup_dpi; /* range 1-6 */
+ uint8_t polling_rate; /* 1 = 125Hz, 2 = 500Hz, 3 = 1000Hz */
+ /* kone has no dcu
+ * value is always 2 in firmwares <= 1.32 and
+ * 1 in firmwares > 1.32
+ */
+ uint8_t dcu_flag;
+ uint8_t light_effect_1; /* range 1-3 */
+ uint8_t light_effect_2; /* range 1-5 */
+ uint8_t light_effect_3; /* range 1-4 */
+ uint8_t light_effect_speed; /* range 0-255 */
+
+ struct kone_light_info light_infos[5];
+ /* offset is kone_button_info_numbers - 1 */
+ struct kone_button_info button_infos[8];
+
+ uint16_t checksum; /* \brief holds checksum of struct */
+};
+
+enum kone_polling_rates {
+ kone_polling_rate_125 = 1,
+ kone_polling_rate_500 = 2,
+ kone_polling_rate_1000 = 3
+};
+
+struct kone_settings {
+ uint16_t size; /* always 36 */
+ uint8_t startup_profile; /* 1-5 */
+ uint8_t unknown1;
+ uint8_t tcu; /* 0 = off, 1 = on */
+ uint8_t unknown2[23];
+ uint8_t calibration_data[4];
+ uint8_t unknown3[2];
+ uint16_t checksum;
+};
+
+/*
+ * 12 byte mouse event read by interrupt_read
+ */
+struct kone_mouse_event {
+ uint8_t report_number; /* always 1 */
+ uint8_t button;
+ uint16_t x;
+ uint16_t y;
+ uint8_t wheel; /* up = 1, down = -1 */
+ uint8_t tilt; /* right = 1, left = -1 */
+ uint8_t unknown;
+ uint8_t event;
+ uint8_t value; /* press = 0, release = 1 */
+ uint8_t macro_key; /* 0 to 8 */
+};
+
+enum kone_mouse_events {
+ /* osd events are thought to be display on screen */
+ kone_mouse_event_osd_dpi = 0xa0,
+ kone_mouse_event_osd_profile = 0xb0,
+ /* TODO clarify meaning and occurence of kone_mouse_event_calibration */
+ kone_mouse_event_calibration = 0xc0,
+ kone_mouse_event_call_overlong_macro = 0xe0,
+ /* switch events notify if user changed values with mousebutton click */
+ kone_mouse_event_switch_dpi = 0xf0,
+ kone_mouse_event_switch_profile = 0xf1
+};
+
+enum kone_commands {
+ kone_command_profile = 0x5a,
+ kone_command_settings = 0x15a,
+ kone_command_firmware_version = 0x25a,
+ kone_command_weight = 0x45a,
+ kone_command_calibrate = 0x55a,
+ kone_command_confirm_write = 0x65a,
+ kone_command_firmware = 0xe5a
+};
+
+struct kone_roccat_report {
+ uint8_t event;
+ uint8_t value; /* holds dpi or profile value */
+ uint8_t key; /* macro key on overlong macro execution */
+};
+
+#pragma pack(pop)
+
+struct kone_device {
+ /*
+ * Storing actual values when we get informed about changes since there
+ * is no way of getting this information from the device on demand
+ */
+ int actual_profile, actual_dpi;
+ /* Used for neutralizing abnormal button behaviour */
+ struct kone_mouse_event last_mouse_event;
+
+ /*
+ * It's unlikely that multiple sysfs attributes are accessed at a time,
+ * so only one mutex is used to secure hardware access and profiles and
+ * settings of this struct.
+ */
+ struct mutex kone_lock;
+
+ /*
+ * Storing the data here reduces IO and ensures that data is available
+ * when its needed (E.g. interrupt handler).
+ */
+ struct kone_profile profiles[5];
+ struct kone_settings settings;
+
+ /*
+ * firmware doesn't change unless firmware update is implemented,
+ * so it's read only once
+ */
+ int firmware_version;
+
+ int roccat_claimed;
+ int chrdev_minor;
+};
+
+#endif
diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c
new file mode 100644
index 00000000000..e05d48edb66
--- /dev/null
+++ b/drivers/hid/hid-roccat.c
@@ -0,0 +1,428 @@
+/*
+ * Roccat driver for Linux
+ *
+ * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * Module roccat is a char device used to report special events of roccat
+ * hardware to userland. These events include requests for on-screen-display of
+ * profile or dpi settings or requests for execution of macro sequences that are
+ * not stored in device. The information in these events depends on hid device
+ * implementation and contains data that is not available in a single hid event
+ * or else hidraw could have been used.
+ * It is inspired by hidraw, but uses only one circular buffer for all readers.
+ */
+
+#include <linux/cdev.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+
+#include "hid-roccat.h"
+
+#define ROCCAT_FIRST_MINOR 0
+#define ROCCAT_MAX_DEVICES 8
+
+/* should be a power of 2 for performance reason */
+#define ROCCAT_CBUF_SIZE 16
+
+struct roccat_report {
+ uint8_t *value;
+ int len;
+};
+
+struct roccat_device {
+ unsigned int minor;
+ int open;
+ int exist;
+ wait_queue_head_t wait;
+ struct device *dev;
+ struct hid_device *hid;
+ struct list_head readers;
+ /* protects modifications of readers list */
+ struct mutex readers_lock;
+
+ /*
+ * circular_buffer has one writer and multiple readers with their own
+ * read pointers
+ */
+ struct roccat_report cbuf[ROCCAT_CBUF_SIZE];
+ int cbuf_end;
+ struct mutex cbuf_lock;
+};
+
+struct roccat_reader {
+ struct list_head node;
+ struct roccat_device *device;
+ int cbuf_start;
+};
+
+static int roccat_major;
+static struct class *roccat_class;
+static struct cdev roccat_cdev;
+
+static struct roccat_device *devices[ROCCAT_MAX_DEVICES];
+/* protects modifications of devices array */
+static DEFINE_MUTEX(devices_lock);
+
+static ssize_t roccat_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct roccat_reader *reader = file->private_data;
+ struct roccat_device *device = reader->device;
+ struct roccat_report *report;
+ ssize_t retval = 0, len;
+ DECLARE_WAITQUEUE(wait, current);
+
+ mutex_lock(&device->cbuf_lock);
+
+ /* no data? */
+ if (reader->cbuf_start == device->cbuf_end) {
+ add_wait_queue(&device->wait, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ /* wait for data */
+ while (reader->cbuf_start == device->cbuf_end) {
+ if (file->f_flags & O_NONBLOCK) {
+ retval = -EAGAIN;
+ break;
+ }
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+ if (!device->exist) {
+ retval = -EIO;
+ break;
+ }
+
+ mutex_unlock(&device->cbuf_lock);
+ schedule();
+ mutex_lock(&device->cbuf_lock);
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&device->wait, &wait);
+ }
+
+ /* here we either have data or a reason to return if retval is set */
+ if (retval)
+ goto exit_unlock;
+
+ report = &device->cbuf[reader->cbuf_start];
+ /*
+ * If report is larger than requested amount of data, rest of report
+ * is lost!
+ */
+ len = report->len > count ? count : report->len;
+
+ if (copy_to_user(buffer, report->value, len)) {
+ retval = -EFAULT;
+ goto exit_unlock;
+ }
+ retval += len;
+ reader->cbuf_start = (reader->cbuf_start + 1) % ROCCAT_CBUF_SIZE;
+
+exit_unlock:
+ mutex_unlock(&device->cbuf_lock);
+ return retval;
+}
+
+static unsigned int roccat_poll(struct file *file, poll_table *wait)
+{
+ struct roccat_reader *reader = file->private_data;
+ poll_wait(file, &reader->device->wait, wait);
+ if (reader->cbuf_start != reader->device->cbuf_end)
+ return POLLIN | POLLRDNORM;
+ if (!reader->device->exist)
+ return POLLERR | POLLHUP;
+ return 0;
+}
+
+static int roccat_open(struct inode *inode, struct file *file)
+{
+ unsigned int minor = iminor(inode);
+ struct roccat_reader *reader;
+ struct roccat_device *device;
+ int error = 0;
+
+ reader = kzalloc(sizeof(struct roccat_reader), GFP_KERNEL);
+ if (!reader)
+ return -ENOMEM;
+
+ mutex_lock(&devices_lock);
+
+ device = devices[minor];
+
+ mutex_lock(&device->readers_lock);
+
+ if (!device) {
+ printk(KERN_EMERG "roccat device with minor %d doesn't exist\n",
+ minor);
+ error = -ENODEV;
+ goto exit_unlock;
+ }
+
+ if (!device->open++) {
+ /* power on device on adding first reader */
+ if (device->hid->ll_driver->power) {
+ error = device->hid->ll_driver->power(device->hid,
+ PM_HINT_FULLON);
+ if (error < 0) {
+ --device->open;
+ goto exit_unlock;
+ }
+ }
+ error = device->hid->ll_driver->open(device->hid);
+ if (error < 0) {
+ if (device->hid->ll_driver->power)
+ device->hid->ll_driver->power(device->hid,
+ PM_HINT_NORMAL);
+ --device->open;
+ goto exit_unlock;
+ }
+ }
+
+ reader->device = device;
+ /* new reader doesn't get old events */
+ reader->cbuf_start = device->cbuf_end;
+
+ list_add_tail(&reader->node, &device->readers);
+ file->private_data = reader;
+
+exit_unlock:
+ mutex_unlock(&device->readers_lock);
+ mutex_unlock(&devices_lock);
+ return error;
+}
+
+static int roccat_release(struct inode *inode, struct file *file)
+{
+ unsigned int minor = iminor(inode);
+ struct roccat_reader *reader = file->private_data;
+ struct roccat_device *device;
+
+ mutex_lock(&devices_lock);
+
+ device = devices[minor];
+ if (!device) {
+ mutex_unlock(&devices_lock);
+ printk(KERN_EMERG "roccat device with minor %d doesn't exist\n",
+ minor);
+ return -ENODEV;
+ }
+
+ mutex_lock(&device->readers_lock);
+ list_del(&reader->node);
+ mutex_unlock(&device->readers_lock);
+ kfree(reader);
+
+ if (!--device->open) {
+ /* removing last reader */
+ if (device->exist) {
+ if (device->hid->ll_driver->power)
+ device->hid->ll_driver->power(device->hid,
+ PM_HINT_NORMAL);
+ device->hid->ll_driver->close(device->hid);
+ } else {
+ kfree(device);
+ }
+ }
+
+ mutex_unlock(&devices_lock);
+
+ return 0;
+}
+
+/*
+ * roccat_report_event() - output data to readers
+ * @minor: minor device number returned by roccat_connect()
+ * @data: pointer to data
+ * @len: size of data
+ *
+ * Return value is zero on success, a negative error code on failure.
+ *
+ * This is called from interrupt handler.
+ */
+int roccat_report_event(int minor, u8 const *data, int len)
+{
+ struct roccat_device *device;
+ struct roccat_reader *reader;
+ struct roccat_report *report;
+ uint8_t *new_value;
+
+ new_value = kmemdup(data, len, GFP_ATOMIC);
+ if (!new_value)
+ return -ENOMEM;
+
+ device = devices[minor];
+
+ report = &device->cbuf[device->cbuf_end];
+
+ /* passing NULL is safe */
+ kfree(report->value);
+
+ report->value = new_value;
+ report->len = len;
+ device->cbuf_end = (device->cbuf_end + 1) % ROCCAT_CBUF_SIZE;
+
+ list_for_each_entry(reader, &device->readers, node) {
+ /*
+ * As we already inserted one element, the buffer can't be
+ * empty. If start and end are equal, buffer is full and we
+ * increase start, so that slow reader misses one event, but
+ * gets the newer ones in the right order.
+ */
+ if (reader->cbuf_start == device->cbuf_end)
+ reader->cbuf_start = (reader->cbuf_start + 1) % ROCCAT_CBUF_SIZE;
+ }
+
+ wake_up_interruptible(&device->wait);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(roccat_report_event);
+
+/*
+ * roccat_connect() - create a char device for special event output
+ * @hid: the hid device the char device should be connected to.
+ *
+ * Return value is minor device number in Range [0, ROCCAT_MAX_DEVICES] on
+ * success, a negative error code on failure.
+ */
+int roccat_connect(struct hid_device *hid)
+{
+ unsigned int minor;
+ struct roccat_device *device;
+ int temp;
+
+ device = kzalloc(sizeof(struct roccat_device), GFP_KERNEL);
+ if (!device)
+ return -ENOMEM;
+
+ mutex_lock(&devices_lock);
+
+ for (minor = 0; minor < ROCCAT_MAX_DEVICES; ++minor) {
+ if (devices[minor])
+ continue;
+ break;
+ }
+
+ if (minor < ROCCAT_MAX_DEVICES) {
+ devices[minor] = device;
+ } else {
+ mutex_unlock(&devices_lock);
+ kfree(device);
+ return -EINVAL;
+ }
+
+ device->dev = device_create(roccat_class, &hid->dev,
+ MKDEV(roccat_major, minor), NULL,
+ "%s%s%d", "roccat", hid->driver->name, minor);
+
+ if (IS_ERR(device->dev)) {
+ devices[minor] = NULL;
+ mutex_unlock(&devices_lock);
+ temp = PTR_ERR(device->dev);
+ kfree(device);
+ return temp;
+ }
+
+ mutex_unlock(&devices_lock);
+
+ init_waitqueue_head(&device->wait);
+ INIT_LIST_HEAD(&device->readers);
+ mutex_init(&device->readers_lock);
+ mutex_init(&device->cbuf_lock);
+ device->minor = minor;
+ device->hid = hid;
+ device->exist = 1;
+ device->cbuf_end = 0;
+
+ return minor;
+}
+EXPORT_SYMBOL_GPL(roccat_connect);
+
+/* roccat_disconnect() - remove char device from hid device
+ * @minor: the minor device number returned by roccat_connect()
+ */
+void roccat_disconnect(int minor)
+{
+ struct roccat_device *device;
+
+ mutex_lock(&devices_lock);
+ device = devices[minor];
+ devices[minor] = NULL;
+ mutex_unlock(&devices_lock);
+
+ device->exist = 0; /* TODO exist maybe not needed */
+
+ device_destroy(roccat_class, MKDEV(roccat_major, minor));
+
+ if (device->open) {
+ device->hid->ll_driver->close(device->hid);
+ wake_up_interruptible(&device->wait);
+ } else {
+ kfree(device);
+ }
+}
+EXPORT_SYMBOL_GPL(roccat_disconnect);
+
+static const struct file_operations roccat_ops = {
+ .owner = THIS_MODULE,
+ .read = roccat_read,
+ .poll = roccat_poll,
+ .open = roccat_open,
+ .release = roccat_release,
+};
+
+static int __init roccat_init(void)
+{
+ int retval;
+ dev_t dev_id;
+
+ retval = alloc_chrdev_region(&dev_id, ROCCAT_FIRST_MINOR,
+ ROCCAT_MAX_DEVICES, "roccat");
+
+ roccat_major = MAJOR(dev_id);
+
+ if (retval < 0) {
+ printk(KERN_WARNING "roccat: can't get major number\n");
+ return retval;
+ }
+
+ roccat_class = class_create(THIS_MODULE, "roccat");
+ if (IS_ERR(roccat_class)) {
+ retval = PTR_ERR(roccat_class);
+ unregister_chrdev_region(dev_id, ROCCAT_MAX_DEVICES);
+ return retval;
+ }
+
+ cdev_init(&roccat_cdev, &roccat_ops);
+ cdev_add(&roccat_cdev, dev_id, ROCCAT_MAX_DEVICES);
+
+ return 0;
+}
+
+static void __exit roccat_exit(void)
+{
+ dev_t dev_id = MKDEV(roccat_major, 0);
+
+ cdev_del(&roccat_cdev);
+ class_destroy(roccat_class);
+ unregister_chrdev_region(dev_id, ROCCAT_MAX_DEVICES);
+}
+
+module_init(roccat_init);
+module_exit(roccat_exit);
+
+MODULE_AUTHOR("Stefan Achatz");
+MODULE_DESCRIPTION("USB Roccat char device");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat.h b/drivers/hid/hid-roccat.h
new file mode 100644
index 00000000000..d8aae0c1fa7
--- /dev/null
+++ b/drivers/hid/hid-roccat.h
@@ -0,0 +1,31 @@
+#ifndef __HID_ROCCAT_H
+#define __HID_ROCCAT_H
+
+/*
+ * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/hid.h>
+#include <linux/types.h>
+
+#if defined(CONFIG_HID_ROCCAT) || defined (CONFIG_HID_ROCCAT_MODULE)
+int roccat_connect(struct hid_device *hid);
+void roccat_disconnect(int minor);
+int roccat_report_event(int minor, u8 const *data, int len);
+#else
+static inline int roccat_connect(struct hid_device *hid) { return -1; }
+static inline void roccat_disconnect(int minor) {}
+static inline int roccat_report_event(int minor, u8 const *data, int len)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c
index 510dd134059..bda0fd60c98 100644
--- a/drivers/hid/hid-samsung.c
+++ b/drivers/hid/hid-samsung.c
@@ -7,6 +7,18 @@
* Copyright (c) 2006-2007 Jiri Kosina
* Copyright (c) 2007 Paul Walmsley
* Copyright (c) 2008 Jiri Slaby
+ * Copyright (c) 2010 Don Prince <dhprince.devel@yahoo.co.uk>
+ *
+ *
+ * This driver supports several HID devices:
+ *
+ * [0419:0001] Samsung IrDA remote controller (reports as Cypress USB Mouse).
+ * various hid report fixups for different variants.
+ *
+ * [0419:0600] Creative Desktop Wireless 6000 keyboard/mouse combo
+ * several key mappings used from the consumer usage page
+ * deviate from the USB HUT 1.12 standard.
+ *
*/
/*
@@ -17,14 +29,13 @@
*/
#include <linux/device.h>
+#include <linux/usb.h>
#include <linux/hid.h>
#include <linux/module.h>
#include "hid-ids.h"
/*
- * Samsung IrDA remote controller (reports as Cypress USB Mouse).
- *
* There are several variants for 0419:0001:
*
* 1. 184 byte report descriptor
@@ -43,21 +54,21 @@
* 4. 171 byte report descriptor
* Report #3 has an array field with logical range 0..1 instead of 1..3.
*/
-static inline void samsung_dev_trace(struct hid_device *hdev,
+static inline void samsung_irda_dev_trace(struct hid_device *hdev,
unsigned int rsize)
{
dev_info(&hdev->dev, "fixing up Samsung IrDA %d byte report "
"descriptor\n", rsize);
}
-static void samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static void samsung_irda_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int rsize)
{
if (rsize == 184 && rdesc[175] == 0x25 && rdesc[176] == 0x40 &&
rdesc[177] == 0x75 && rdesc[178] == 0x30 &&
rdesc[179] == 0x95 && rdesc[180] == 0x01 &&
rdesc[182] == 0x40) {
- samsung_dev_trace(hdev, 184);
+ samsung_irda_dev_trace(hdev, 184);
rdesc[176] = 0xff;
rdesc[178] = 0x08;
rdesc[180] = 0x06;
@@ -65,24 +76,80 @@ static void samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc,
} else
if (rsize == 203 && rdesc[192] == 0x15 && rdesc[193] == 0x0 &&
rdesc[194] == 0x25 && rdesc[195] == 0x12) {
- samsung_dev_trace(hdev, 203);
+ samsung_irda_dev_trace(hdev, 203);
rdesc[193] = 0x1;
rdesc[195] = 0xf;
} else
if (rsize == 135 && rdesc[124] == 0x15 && rdesc[125] == 0x0 &&
rdesc[126] == 0x25 && rdesc[127] == 0x11) {
- samsung_dev_trace(hdev, 135);
+ samsung_irda_dev_trace(hdev, 135);
rdesc[125] = 0x1;
rdesc[127] = 0xe;
} else
if (rsize == 171 && rdesc[160] == 0x15 && rdesc[161] == 0x0 &&
rdesc[162] == 0x25 && rdesc[163] == 0x01) {
- samsung_dev_trace(hdev, 171);
+ samsung_irda_dev_trace(hdev, 171);
rdesc[161] = 0x1;
rdesc[163] = 0x3;
}
}
+#define samsung_kbd_mouse_map_key_clear(c) \
+ hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
+
+static int samsung_kbd_mouse_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi, struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
+
+ if (1 != ifnum || HID_UP_CONSUMER != (usage->hid & HID_USAGE_PAGE))
+ return 0;
+
+ dbg_hid("samsung wireless keyboard/mouse input mapping event [0x%x]\n",
+ usage->hid & HID_USAGE);
+
+ switch (usage->hid & HID_USAGE) {
+ /* report 2 */
+ case 0x183: samsung_kbd_mouse_map_key_clear(KEY_MEDIA); break;
+ case 0x195: samsung_kbd_mouse_map_key_clear(KEY_EMAIL); break;
+ case 0x196: samsung_kbd_mouse_map_key_clear(KEY_CALC); break;
+ case 0x197: samsung_kbd_mouse_map_key_clear(KEY_COMPUTER); break;
+ case 0x22b: samsung_kbd_mouse_map_key_clear(KEY_SEARCH); break;
+ case 0x22c: samsung_kbd_mouse_map_key_clear(KEY_WWW); break;
+ case 0x22d: samsung_kbd_mouse_map_key_clear(KEY_BACK); break;
+ case 0x22e: samsung_kbd_mouse_map_key_clear(KEY_FORWARD); break;
+ case 0x22f: samsung_kbd_mouse_map_key_clear(KEY_FAVORITES); break;
+ case 0x230: samsung_kbd_mouse_map_key_clear(KEY_REFRESH); break;
+ case 0x231: samsung_kbd_mouse_map_key_clear(KEY_STOP); break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+static void samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int rsize)
+{
+ if (USB_DEVICE_ID_SAMSUNG_IR_REMOTE == hdev->product)
+ samsung_irda_report_fixup(hdev, rdesc, rsize);
+}
+
+static int samsung_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ int ret = 0;
+
+ if (USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE == hdev->product)
+ ret = samsung_kbd_mouse_input_mapping(hdev,
+ hi, field, usage, bit, max);
+
+ return ret;
+}
+
static int samsung_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
@@ -95,10 +162,12 @@ static int samsung_probe(struct hid_device *hdev,
goto err_free;
}
- if (hdev->rsize == 184) {
- /* disable hidinput, force hiddev */
- cmask = (cmask & ~HID_CONNECT_HIDINPUT) |
- HID_CONNECT_HIDDEV_FORCE;
+ if (USB_DEVICE_ID_SAMSUNG_IR_REMOTE == hdev->product) {
+ if (hdev->rsize == 184) {
+ /* disable hidinput, force hiddev */
+ cmask = (cmask & ~HID_CONNECT_HIDINPUT) |
+ HID_CONNECT_HIDDEV_FORCE;
+ }
}
ret = hid_hw_start(hdev, cmask);
@@ -114,6 +183,7 @@ err_free:
static const struct hid_device_id samsung_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
{ }
};
MODULE_DEVICE_TABLE(hid, samsung_devices);
@@ -122,6 +192,7 @@ static struct hid_driver samsung_driver = {
.name = "samsung",
.id_table = samsung_devices,
.report_fixup = samsung_report_fixup,
+ .input_mapping = samsung_input_mapping,
.probe = samsung_probe,
};
diff --git a/drivers/hid/hid-topseed.c b/drivers/hid/hid-topseed.c
index 6925eda1081..2eebdcc57bc 100644
--- a/drivers/hid/hid-topseed.c
+++ b/drivers/hid/hid-topseed.c
@@ -3,6 +3,9 @@
*
* Copyright (c) 2008 Lev Babiev
* based on hid-cherry driver
+ *
+ * Modified to also support BTC "Emprex 3009URF III Vista MCE Remote" by
+ * Wayne Thomas 2010.
*/
/*
@@ -24,23 +27,29 @@ static int ts_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
- if ((usage->hid & HID_USAGE_PAGE) != 0x0ffbc0000)
+ if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR)
return 0;
switch (usage->hid & HID_USAGE) {
- case 0x00d: ts_map_key_clear(KEY_HOME); break;
- case 0x024: ts_map_key_clear(KEY_MENU); break;
- case 0x025: ts_map_key_clear(KEY_TV); break;
- case 0x048: ts_map_key_clear(KEY_RED); break;
- case 0x047: ts_map_key_clear(KEY_GREEN); break;
- case 0x049: ts_map_key_clear(KEY_YELLOW); break;
- case 0x04a: ts_map_key_clear(KEY_BLUE); break;
- case 0x04b: ts_map_key_clear(KEY_ANGLE); break;
- case 0x04c: ts_map_key_clear(KEY_LANGUAGE); break;
- case 0x04d: ts_map_key_clear(KEY_SUBTITLE); break;
- case 0x031: ts_map_key_clear(KEY_AUDIO); break;
- case 0x032: ts_map_key_clear(KEY_TEXT); break;
- case 0x033: ts_map_key_clear(KEY_CHANNEL); break;
+ case 0x00d: ts_map_key_clear(KEY_MEDIA); break;
+ case 0x024: ts_map_key_clear(KEY_MENU); break;
+ case 0x025: ts_map_key_clear(KEY_TV); break;
+ case 0x031: ts_map_key_clear(KEY_AUDIO); break;
+ case 0x032: ts_map_key_clear(KEY_TEXT); break;
+ case 0x033: ts_map_key_clear(KEY_CHANNEL); break;
+ case 0x047: ts_map_key_clear(KEY_MP3); break;
+ case 0x048: ts_map_key_clear(KEY_TV2); break;
+ case 0x049: ts_map_key_clear(KEY_CAMERA); break;
+ case 0x04a: ts_map_key_clear(KEY_VIDEO); break;
+ case 0x04b: ts_map_key_clear(KEY_ANGLE); break;
+ case 0x04c: ts_map_key_clear(KEY_LANGUAGE); break;
+ case 0x04d: ts_map_key_clear(KEY_SUBTITLE); break;
+ case 0x050: ts_map_key_clear(KEY_RADIO); break;
+ case 0x05a: ts_map_key_clear(KEY_TEXT); break;
+ case 0x05b: ts_map_key_clear(KEY_RED); break;
+ case 0x05c: ts_map_key_clear(KEY_GREEN); break;
+ case 0x05d: ts_map_key_clear(KEY_YELLOW); break;
+ case 0x05e: ts_map_key_clear(KEY_BLUE); break;
default:
return 0;
}
@@ -50,6 +59,7 @@ static int ts_input_mapping(struct hid_device *hdev, struct hid_input *hi,
static const struct hid_device_id ts_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
{ }
};
MODULE_DEVICE_TABLE(hid, ts_devices);
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index f947d8337e2..1e051f1171e 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -22,14 +22,159 @@
#include <linux/hid.h>
#include <linux/module.h>
#include <linux/slab.h>
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+#include <linux/power_supply.h>
+#endif
#include "hid-ids.h"
struct wacom_data {
__u16 tool;
unsigned char butstate;
+ unsigned char high_speed;
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+ int battery_capacity;
+ struct power_supply battery;
+ struct power_supply ac;
+#endif
};
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+/*percent of battery capacity, 0 means AC online*/
+static unsigned short batcap[8] = { 1, 15, 25, 35, 50, 70, 100, 0 };
+
+static enum power_supply_property wacom_battery_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_CAPACITY
+};
+
+static enum power_supply_property wacom_ac_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE
+};
+
+static int wacom_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct wacom_data *wdata = container_of(psy,
+ struct wacom_data, battery);
+ int power_state = batcap[wdata->battery_capacity];
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ /* show 100% battery capacity when charging */
+ if (power_state == 0)
+ val->intval = 100;
+ else
+ val->intval = power_state;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int wacom_ac_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct wacom_data *wdata = container_of(psy, struct wacom_data, ac);
+ int power_state = batcap[wdata->battery_capacity];
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ /* fall through */
+ case POWER_SUPPLY_PROP_ONLINE:
+ if (power_state == 0)
+ val->intval = 1;
+ else
+ val->intval = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+#endif
+
+static void wacom_poke(struct hid_device *hdev, u8 speed)
+{
+ struct wacom_data *wdata = hid_get_drvdata(hdev);
+ int limit, ret;
+ char rep_data[2];
+
+ rep_data[0] = 0x03 ; rep_data[1] = 0x00;
+ limit = 3;
+ do {
+ ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
+ HID_FEATURE_REPORT);
+ } while (ret < 0 && limit-- > 0);
+
+ if (ret >= 0) {
+ if (speed == 0)
+ rep_data[0] = 0x05;
+ else
+ rep_data[0] = 0x06;
+
+ rep_data[1] = 0x00;
+ limit = 3;
+ do {
+ ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
+ HID_FEATURE_REPORT);
+ } while (ret < 0 && limit-- > 0);
+
+ if (ret >= 0) {
+ wdata->high_speed = speed;
+ return;
+ }
+ }
+
+ /*
+ * Note that if the raw queries fail, it's not a hard failure and it
+ * is safe to continue
+ */
+ dev_warn(&hdev->dev, "failed to poke device, command %d, err %d\n",
+ rep_data[0], ret);
+ return;
+}
+
+static ssize_t wacom_show_speed(struct device *dev,
+ struct device_attribute
+ *attr, char *buf)
+{
+ struct wacom_data *wdata = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%i\n", wdata->high_speed);
+}
+
+static ssize_t wacom_store_speed(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ int new_speed;
+
+ if (sscanf(buf, "%1d", &new_speed ) != 1)
+ return -EINVAL;
+
+ if (new_speed == 0 || new_speed == 1) {
+ wacom_poke(hdev, new_speed);
+ return strnlen(buf, PAGE_SIZE);
+ } else
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(speed, S_IRUGO | S_IWUGO,
+ wacom_show_speed, wacom_store_speed);
+
static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
u8 *raw_data, int size)
{
@@ -148,6 +293,12 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
input_sync(input);
}
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+ /* Store current battery capacity */
+ rw = (data[7] >> 2 & 0x07);
+ if (rw != wdata->battery_capacity)
+ wdata->battery_capacity = rw;
+#endif
return 1;
}
@@ -157,9 +308,7 @@ static int wacom_probe(struct hid_device *hdev,
struct hid_input *hidinput;
struct input_dev *input;
struct wacom_data *wdata;
- char rep_data[2];
int ret;
- int limit;
wdata = kzalloc(sizeof(*wdata), GFP_KERNEL);
if (wdata == NULL) {
@@ -182,31 +331,53 @@ static int wacom_probe(struct hid_device *hdev,
goto err_free;
}
- /*
- * Note that if the raw queries fail, it's not a hard failure and it
- * is safe to continue
- */
+ ret = device_create_file(&hdev->dev, &dev_attr_speed);
+ if (ret)
+ dev_warn(&hdev->dev,
+ "can't create sysfs speed attribute err: %d\n", ret);
- /* Set Wacom mode2 */
- rep_data[0] = 0x03; rep_data[1] = 0x00;
- limit = 3;
- do {
- ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
- HID_FEATURE_REPORT);
- } while (ret < 0 && limit-- > 0);
- if (ret < 0)
- dev_warn(&hdev->dev, "failed to poke device #1, %d\n", ret);
+ /* Set Wacom mode 2 with high reporting speed */
+ wacom_poke(hdev, 1);
- /* 0x06 - high reporting speed, 0x05 - low speed */
- rep_data[0] = 0x06; rep_data[1] = 0x00;
- limit = 3;
- do {
- ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
- HID_FEATURE_REPORT);
- } while (ret < 0 && limit-- > 0);
- if (ret < 0)
- dev_warn(&hdev->dev, "failed to poke device #2, %d\n", ret);
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+ wdata->battery.properties = wacom_battery_props;
+ wdata->battery.num_properties = ARRAY_SIZE(wacom_battery_props);
+ wdata->battery.get_property = wacom_battery_get_property;
+ wdata->battery.name = "wacom_battery";
+ wdata->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+ wdata->battery.use_for_apm = 0;
+ ret = power_supply_register(&hdev->dev, &wdata->battery);
+ if (ret) {
+ dev_warn(&hdev->dev,
+ "can't create sysfs battery attribute, err: %d\n", ret);
+ /*
+ * battery attribute is not critical for the tablet, but if it
+ * failed then there is no need to create ac attribute
+ */
+ goto move_on;
+ }
+
+ wdata->ac.properties = wacom_ac_props;
+ wdata->ac.num_properties = ARRAY_SIZE(wacom_ac_props);
+ wdata->ac.get_property = wacom_ac_get_property;
+ wdata->ac.name = "wacom_ac";
+ wdata->ac.type = POWER_SUPPLY_TYPE_MAINS;
+ wdata->ac.use_for_apm = 0;
+
+ ret = power_supply_register(&hdev->dev, &wdata->ac);
+ if (ret) {
+ dev_warn(&hdev->dev,
+ "can't create ac battery attribute, err: %d\n", ret);
+ /*
+ * ac attribute is not critical for the tablet, but if it
+ * failed then we don't want to battery attribute to exist
+ */
+ power_supply_unregister(&wdata->battery);
+ }
+
+move_on:
+#endif
hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
input = hidinput->input;
@@ -251,13 +422,21 @@ err_free:
static void wacom_remove(struct hid_device *hdev)
{
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+ struct wacom_data *wdata = hid_get_drvdata(hdev);
+#endif
hid_hw_stop(hdev);
+
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+ power_supply_unregister(&wdata->battery);
+ power_supply_unregister(&wdata->ac);
+#endif
kfree(hid_get_drvdata(hdev));
}
static const struct hid_device_id wacom_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
-
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) },
{ }
};
MODULE_DEVICE_TABLE(hid, wacom_devices);
diff --git a/drivers/hid/hid-zydacron.c b/drivers/hid/hid-zydacron.c
new file mode 100644
index 00000000000..9e8d35a203e
--- /dev/null
+++ b/drivers/hid/hid-zydacron.c
@@ -0,0 +1,237 @@
+/*
+* HID driver for zydacron remote control
+*
+* Copyright (c) 2010 Don Prince <dhprince.devel@yahoo.co.uk>
+*/
+
+/*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the Free
+* Software Foundation; either version 2 of the License, or (at your option)
+* any later version.
+*/
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+struct zc_device {
+ struct input_dev *input_ep81;
+ unsigned short last_key[4];
+};
+
+
+/*
+* Zydacron remote control has an invalid HID report descriptor,
+* that needs fixing before we can parse it.
+*/
+static void zc_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int rsize)
+{
+ if (rsize >= 253 &&
+ rdesc[0x96] == 0xbc && rdesc[0x97] == 0xff &&
+ rdesc[0xca] == 0xbc && rdesc[0xcb] == 0xff &&
+ rdesc[0xe1] == 0xbc && rdesc[0xe2] == 0xff) {
+ dev_info(&hdev->dev,
+ "fixing up zydacron remote control report "
+ "descriptor\n");
+ rdesc[0x96] = rdesc[0xca] = rdesc[0xe1] = 0x0c;
+ rdesc[0x97] = rdesc[0xcb] = rdesc[0xe2] = 0x00;
+ }
+}
+
+#define zc_map_key_clear(c) \
+ hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
+
+static int zc_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ int i;
+ struct zc_device *zc = hid_get_drvdata(hdev);
+ zc->input_ep81 = hi->input;
+
+ if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER)
+ return 0;
+
+ dbg_hid("zynacron input mapping event [0x%x]\n",
+ usage->hid & HID_USAGE);
+
+ switch (usage->hid & HID_USAGE) {
+ /* report 2 */
+ case 0x10:
+ zc_map_key_clear(KEY_MODE);
+ break;
+ case 0x30:
+ zc_map_key_clear(KEY_SCREEN);
+ break;
+ case 0x70:
+ zc_map_key_clear(KEY_INFO);
+ break;
+ /* report 3 */
+ case 0x04:
+ zc_map_key_clear(KEY_RADIO);
+ break;
+ /* report 4 */
+ case 0x0d:
+ zc_map_key_clear(KEY_PVR);
+ break;
+ case 0x25:
+ zc_map_key_clear(KEY_TV);
+ break;
+ case 0x47:
+ zc_map_key_clear(KEY_AUDIO);
+ break;
+ case 0x49:
+ zc_map_key_clear(KEY_AUX);
+ break;
+ case 0x4a:
+ zc_map_key_clear(KEY_VIDEO);
+ break;
+ case 0x48:
+ zc_map_key_clear(KEY_DVD);
+ break;
+ case 0x24:
+ zc_map_key_clear(KEY_MENU);
+ break;
+ case 0x32:
+ zc_map_key_clear(KEY_TEXT);
+ break;
+ default:
+ return 0;
+ }
+
+ for (i = 0; i < 4; i++)
+ zc->last_key[i] = 0;
+
+ return 1;
+}
+
+static int zc_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *data, int size)
+{
+ struct zc_device *zc = hid_get_drvdata(hdev);
+ int ret = 0;
+ unsigned key;
+ unsigned short index;
+
+ if (report->id == data[0]) {
+
+ /* break keys */
+ for (index = 0; index < 4; index++) {
+ key = zc->last_key[index];
+ if (key) {
+ input_event(zc->input_ep81, EV_KEY, key, 0);
+ zc->last_key[index] = 0;
+ }
+ }
+
+ key = 0;
+ switch (report->id) {
+ case 0x02:
+ case 0x03:
+ switch (data[1]) {
+ case 0x10:
+ key = KEY_MODE;
+ index = 0;
+ break;
+ case 0x30:
+ key = KEY_SCREEN;
+ index = 1;
+ break;
+ case 0x70:
+ key = KEY_INFO;
+ index = 2;
+ break;
+ case 0x04:
+ key = KEY_RADIO;
+ index = 3;
+ break;
+ }
+
+ if (key) {
+ input_event(zc->input_ep81, EV_KEY, key, 1);
+ zc->last_key[index] = key;
+ }
+
+ ret = 1;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int zc_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ struct zc_device *zc;
+
+ zc = kzalloc(sizeof(*zc), GFP_KERNEL);
+ if (zc == NULL) {
+ dev_err(&hdev->dev, "zydacron: can't alloc descriptor\n");
+ return -ENOMEM;
+ }
+
+ hid_set_drvdata(hdev, zc);
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ dev_err(&hdev->dev, "zydacron: parse failed\n");
+ goto err_free;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ dev_err(&hdev->dev, "zydacron: hw start failed\n");
+ goto err_free;
+ }
+
+ return 0;
+err_free:
+ kfree(zc);
+
+ return ret;
+}
+
+static void zc_remove(struct hid_device *hdev)
+{
+ struct zc_device *zc = hid_get_drvdata(hdev);
+
+ hid_hw_stop(hdev);
+
+ if (NULL != zc)
+ kfree(zc);
+}
+
+static const struct hid_device_id zc_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, zc_devices);
+
+static struct hid_driver zc_driver = {
+ .name = "zydacron",
+ .id_table = zc_devices,
+ .report_fixup = zc_report_fixup,
+ .input_mapping = zc_input_mapping,
+ .raw_event = zc_raw_event,
+ .probe = zc_probe,
+ .remove = zc_remove,
+};
+
+static int __init zc_init(void)
+{
+ return hid_register_driver(&zc_driver);
+}
+
+static void __exit zc_exit(void)
+{
+ hid_unregister_driver(&zc_driver);
+}
+
+module_init(zc_init);
+module_exit(zc_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 6eadf1a9b3c..3ccd4785067 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -106,38 +106,48 @@ out:
static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
unsigned int minor = iminor(file->f_path.dentry->d_inode);
- /* FIXME: What stops hidraw_table going NULL */
- struct hid_device *dev = hidraw_table[minor]->hid;
+ struct hid_device *dev;
__u8 *buf;
int ret = 0;
- if (!dev->hid_output_raw_report)
- return -ENODEV;
+ mutex_lock(&minors_lock);
+ dev = hidraw_table[minor]->hid;
+
+ if (!dev->hid_output_raw_report) {
+ ret = -ENODEV;
+ goto out;
+ }
if (count > HID_MAX_BUFFER_SIZE) {
printk(KERN_WARNING "hidraw: pid %d passed too large report\n",
task_pid_nr(current));
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (count < 2) {
printk(KERN_WARNING "hidraw: pid %d passed too short report\n",
task_pid_nr(current));
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
buf = kmalloc(count * sizeof(__u8), GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
if (copy_from_user(buf, buffer, count)) {
ret = -EFAULT;
- goto out;
+ goto out_free;
}
ret = dev->hid_output_raw_report(dev, buf, count, HID_OUTPUT_REPORT);
-out:
+out_free:
kfree(buf);
+out:
+ mutex_unlock(&minors_lock);
return ret;
}
@@ -165,11 +175,8 @@ static int hidraw_open(struct inode *inode, struct file *file)
goto out;
}
- lock_kernel();
mutex_lock(&minors_lock);
if (!hidraw_table[minor]) {
- printk(KERN_EMERG "hidraw device with minor %d doesn't exist\n",
- minor);
kfree(list);
err = -ENODEV;
goto out_unlock;
@@ -197,7 +204,6 @@ static int hidraw_open(struct inode *inode, struct file *file)
out_unlock:
mutex_unlock(&minors_lock);
- unlock_kernel();
out:
return err;
@@ -209,11 +215,8 @@ static int hidraw_release(struct inode * inode, struct file * file)
struct hidraw *dev;
struct hidraw_list *list = file->private_data;
- if (!hidraw_table[minor]) {
- printk(KERN_EMERG "hidraw device with minor %d doesn't exist\n",
- minor);
+ if (!hidraw_table[minor])
return -ENODEV;
- }
list_del(&list->node);
dev = hidraw_table[minor];
@@ -238,11 +241,12 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
struct inode *inode = file->f_path.dentry->d_inode;
unsigned int minor = iminor(inode);
long ret = 0;
- /* FIXME: What stops hidraw_table going NULL */
- struct hidraw *dev = hidraw_table[minor];
+ struct hidraw *dev;
void __user *user_arg = (void __user*) arg;
- lock_kernel();
+ mutex_lock(&minors_lock);
+ dev = hidraw_table[minor];
+
switch (cmd) {
case HIDIOCGRDESCSIZE:
if (put_user(dev->hid->rsize, (int __user *)arg))
@@ -311,11 +315,11 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
-EFAULT : len;
break;
}
- }
+ }
ret = -ENOTTY;
}
- unlock_kernel();
+ mutex_unlock(&minors_lock);
return ret;
}
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 7b85b696fda..1ebd3244eb8 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -623,6 +623,7 @@ int usbhid_wait_io(struct hid_device *hid)
return 0;
}
+EXPORT_SYMBOL_GPL(usbhid_wait_io);
static int hid_set_idle(struct usb_device *dev, int ifnum, int report, int idle)
{
@@ -783,13 +784,12 @@ static int hid_alloc_buffers(struct usb_device *dev, struct hid_device *hid)
{
struct usbhid_device *usbhid = hid->driver_data;
- usbhid->inbuf = usb_buffer_alloc(dev, usbhid->bufsize, GFP_KERNEL,
+ usbhid->inbuf = usb_alloc_coherent(dev, usbhid->bufsize, GFP_KERNEL,
&usbhid->inbuf_dma);
- usbhid->outbuf = usb_buffer_alloc(dev, usbhid->bufsize, GFP_KERNEL,
+ usbhid->outbuf = usb_alloc_coherent(dev, usbhid->bufsize, GFP_KERNEL,
&usbhid->outbuf_dma);
- usbhid->cr = usb_buffer_alloc(dev, sizeof(*usbhid->cr), GFP_KERNEL,
- &usbhid->cr_dma);
- usbhid->ctrlbuf = usb_buffer_alloc(dev, usbhid->bufsize, GFP_KERNEL,
+ usbhid->cr = kmalloc(sizeof(*usbhid->cr), GFP_KERNEL);
+ usbhid->ctrlbuf = usb_alloc_coherent(dev, usbhid->bufsize, GFP_KERNEL,
&usbhid->ctrlbuf_dma);
if (!usbhid->inbuf || !usbhid->outbuf || !usbhid->cr ||
!usbhid->ctrlbuf)
@@ -807,16 +807,36 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co
struct usb_host_interface *interface = intf->cur_altsetting;
int ret;
- ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- HID_REQ_SET_REPORT,
- USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- ((report_type + 1) << 8) | *buf,
- interface->desc.bInterfaceNumber, buf + 1, count - 1,
- USB_CTRL_SET_TIMEOUT);
-
- /* count also the report id */
- if (ret > 0)
- ret++;
+ if (usbhid->urbout) {
+ int actual_length;
+ int skipped_report_id = 0;
+ if (buf[0] == 0x0) {
+ /* Don't send the Report ID */
+ buf++;
+ count--;
+ skipped_report_id = 1;
+ }
+ ret = usb_interrupt_msg(dev, usbhid->urbout->pipe,
+ buf, count, &actual_length,
+ USB_CTRL_SET_TIMEOUT);
+ /* return the number of bytes transferred */
+ if (ret == 0) {
+ ret = actual_length;
+ /* count also the report id */
+ if (skipped_report_id)
+ ret++;
+ }
+ } else {
+ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ HID_REQ_SET_REPORT,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ ((report_type + 1) << 8) | *buf,
+ interface->desc.bInterfaceNumber, buf + 1, count - 1,
+ USB_CTRL_SET_TIMEOUT);
+ /* count also the report id */
+ if (ret > 0)
+ ret++;
+ }
return ret;
}
@@ -844,10 +864,10 @@ static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
{
struct usbhid_device *usbhid = hid->driver_data;
- usb_buffer_free(dev, usbhid->bufsize, usbhid->inbuf, usbhid->inbuf_dma);
- usb_buffer_free(dev, usbhid->bufsize, usbhid->outbuf, usbhid->outbuf_dma);
- usb_buffer_free(dev, sizeof(*(usbhid->cr)), usbhid->cr, usbhid->cr_dma);
- usb_buffer_free(dev, usbhid->bufsize, usbhid->ctrlbuf, usbhid->ctrlbuf_dma);
+ usb_free_coherent(dev, usbhid->bufsize, usbhid->inbuf, usbhid->inbuf_dma);
+ usb_free_coherent(dev, usbhid->bufsize, usbhid->outbuf, usbhid->outbuf_dma);
+ kfree(usbhid->cr);
+ usb_free_coherent(dev, usbhid->bufsize, usbhid->ctrlbuf, usbhid->ctrlbuf_dma);
}
static int usbhid_parse(struct hid_device *hid)
@@ -1007,9 +1027,8 @@ static int usbhid_start(struct hid_device *hid)
usb_fill_control_urb(usbhid->urbctrl, dev, 0, (void *) usbhid->cr,
usbhid->ctrlbuf, 1, hid_ctrl, hid);
- usbhid->urbctrl->setup_dma = usbhid->cr_dma;
usbhid->urbctrl->transfer_dma = usbhid->ctrlbuf_dma;
- usbhid->urbctrl->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP);
+ usbhid->urbctrl->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
if (!(hid->quirks & HID_QUIRK_NO_INIT_REPORTS))
usbhid_init_reports(hid);
@@ -1019,12 +1038,15 @@ static int usbhid_start(struct hid_device *hid)
/* Some keyboards don't work until their LEDs have been set.
* Since BIOSes do set the LEDs, it must be safe for any device
* that supports the keyboard boot protocol.
+ * In addition, enable remote wakeup by default for all keyboard
+ * devices supporting the boot protocol.
*/
if (interface->desc.bInterfaceSubClass == USB_INTERFACE_SUBCLASS_BOOT &&
interface->desc.bInterfaceProtocol ==
- USB_INTERFACE_PROTOCOL_KEYBOARD)
+ USB_INTERFACE_PROTOCOL_KEYBOARD) {
usbhid_set_leds(hid);
-
+ device_set_wakeup_enable(&dev->dev, 1);
+ }
return 0;
fail:
@@ -1133,6 +1155,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
hid->vendor = le16_to_cpu(dev->descriptor.idVendor);
hid->product = le16_to_cpu(dev->descriptor.idProduct);
hid->name[0] = 0;
+ hid->quirks = usbhid_lookup_quirk(hid->vendor, hid->product);
if (intf->cur_altsetting->desc.bInterfaceProtocol ==
USB_INTERFACE_PROTOCOL_MOUSE)
hid->type = HID_TYPE_USBMOUSE;
@@ -1289,6 +1312,11 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
{
set_bit(HID_REPORTED_IDLE, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
+ if (hid->driver && hid->driver->suspend) {
+ status = hid->driver->suspend(hid, message);
+ if (status < 0)
+ return status;
+ }
} else {
usbhid_mark_busy(usbhid);
spin_unlock_irq(&usbhid->lock);
@@ -1296,6 +1324,11 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
}
} else {
+ if (hid->driver && hid->driver->suspend) {
+ status = hid->driver->suspend(hid, message);
+ if (status < 0)
+ return status;
+ }
spin_lock_irq(&usbhid->lock);
set_bit(HID_REPORTED_IDLE, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
@@ -1350,6 +1383,11 @@ static int hid_resume(struct usb_interface *intf)
hid_io_error(hid);
usbhid_restart_queues(usbhid);
+ if (status >= 0 && hid->driver && hid->driver->resume) {
+ int ret = hid->driver->resume(hid);
+ if (ret < 0)
+ status = ret;
+ }
dev_dbg(&intf->dev, "resume status %d\n", status);
return 0;
}
@@ -1358,9 +1396,16 @@ static int hid_reset_resume(struct usb_interface *intf)
{
struct hid_device *hid = usb_get_intfdata(intf);
struct usbhid_device *usbhid = hid->driver_data;
+ int status;
clear_bit(HID_REPORTED_IDLE, &usbhid->iofl);
- return hid_post_reset(intf);
+ status = hid_post_reset(intf);
+ if (status >= 0 && hid->driver && hid->driver->reset_resume) {
+ int ret = hid->driver->reset_resume(hid);
+ if (ret < 0)
+ status = ret;
+ }
+ return status;
}
#endif /* CONFIG_PM */
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 1152f9b5fd4..5ff8d327f33 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -33,6 +33,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD },
{ USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD },
{ USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
+ { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 433602aed46..c24d2fa3e3b 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -267,6 +267,7 @@ static int hiddev_open(struct inode *inode, struct file *file)
struct hiddev_list *list;
int res, i;
+ /* See comment in hiddev_connect() for BKL explanation */
lock_kernel();
i = iminor(inode) - HIDDEV_MINOR_BASE;
@@ -894,8 +895,22 @@ int hiddev_connect(struct hid_device *hid, unsigned int force)
hiddev->hid = hid;
hiddev->exist = 1;
- /* when lock_kernel() usage is fixed in usb_open(),
- * we could also fix it here */
+ /*
+ * BKL here is used to avoid race after usb_register_dev().
+ * Once the device node has been created, open() could happen on it.
+ * The code below will then fail, as hiddev_table hasn't been
+ * updated.
+ *
+ * The obvious fix -- introducing mutex to guard hiddev_table[]
+ * doesn't work, as usb_open() and usb_register_dev() both take
+ * minor_rwsem, thus we'll have ABBA deadlock.
+ *
+ * Before BKL pushdown, usb_open() had been acquiring it in right
+ * order, so _open() was safe to use it to protect from this race.
+ * Now the order is different, but AB-BA deadlock still doesn't occur
+ * as BKL is dropped on schedule() (i.e. while sleeping on
+ * minor_rwsem). Fugly.
+ */
lock_kernel();
retval = usb_register_dev(usbhid->intf, &hiddev_class);
if (retval) {
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
index ec20400c7f2..693fd3e720d 100644
--- a/drivers/hid/usbhid/usbhid.h
+++ b/drivers/hid/usbhid/usbhid.h
@@ -75,7 +75,6 @@ struct usbhid_device {
struct urb *urbctrl; /* Control URB */
struct usb_ctrlrequest *cr; /* Control request struct */
- dma_addr_t cr_dma; /* Control request struct dma */
struct hid_control_fifo ctrl[HID_CONTROL_FIFO_SIZE]; /* Control fifo */
unsigned char ctrlhead, ctrltail; /* Control fifo head & tail */
char *ctrlbuf; /* Control buffer */
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index f843443ba5c..a948605564f 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -74,7 +74,6 @@ struct usb_kbd {
unsigned char *new;
struct usb_ctrlrequest *cr;
unsigned char *leds;
- dma_addr_t cr_dma;
dma_addr_t new_dma;
dma_addr_t leds_dma;
};
@@ -197,11 +196,11 @@ static int usb_kbd_alloc_mem(struct usb_device *dev, struct usb_kbd *kbd)
return -1;
if (!(kbd->led = usb_alloc_urb(0, GFP_KERNEL)))
return -1;
- if (!(kbd->new = usb_buffer_alloc(dev, 8, GFP_ATOMIC, &kbd->new_dma)))
+ if (!(kbd->new = usb_alloc_coherent(dev, 8, GFP_ATOMIC, &kbd->new_dma)))
return -1;
- if (!(kbd->cr = usb_buffer_alloc(dev, sizeof(struct usb_ctrlrequest), GFP_ATOMIC, &kbd->cr_dma)))
+ if (!(kbd->cr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL)))
return -1;
- if (!(kbd->leds = usb_buffer_alloc(dev, 1, GFP_ATOMIC, &kbd->leds_dma)))
+ if (!(kbd->leds = usb_alloc_coherent(dev, 1, GFP_ATOMIC, &kbd->leds_dma)))
return -1;
return 0;
@@ -211,9 +210,9 @@ static void usb_kbd_free_mem(struct usb_device *dev, struct usb_kbd *kbd)
{
usb_free_urb(kbd->irq);
usb_free_urb(kbd->led);
- usb_buffer_free(dev, 8, kbd->new, kbd->new_dma);
- usb_buffer_free(dev, sizeof(struct usb_ctrlrequest), kbd->cr, kbd->cr_dma);
- usb_buffer_free(dev, 1, kbd->leds, kbd->leds_dma);
+ usb_free_coherent(dev, 8, kbd->new, kbd->new_dma);
+ kfree(kbd->cr);
+ usb_free_coherent(dev, 1, kbd->leds, kbd->leds_dma);
}
static int usb_kbd_probe(struct usb_interface *iface,
@@ -304,15 +303,15 @@ static int usb_kbd_probe(struct usb_interface *iface,
usb_fill_control_urb(kbd->led, dev, usb_sndctrlpipe(dev, 0),
(void *) kbd->cr, kbd->leds, 1,
usb_kbd_led, kbd);
- kbd->led->setup_dma = kbd->cr_dma;
kbd->led->transfer_dma = kbd->leds_dma;
- kbd->led->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP);
+ kbd->led->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
error = input_register_device(kbd->dev);
if (error)
goto fail2;
usb_set_intfdata(iface, kbd);
+ device_set_wakeup_enable(&dev->dev, 1);
return 0;
fail2:
diff --git a/drivers/hid/usbhid/usbmouse.c b/drivers/hid/usbhid/usbmouse.c
index 72ab4b26809..79b2bf81a05 100644
--- a/drivers/hid/usbhid/usbmouse.c
+++ b/drivers/hid/usbhid/usbmouse.c
@@ -142,7 +142,7 @@ static int usb_mouse_probe(struct usb_interface *intf, const struct usb_device_i
if (!mouse || !input_dev)
goto fail1;
- mouse->data = usb_buffer_alloc(dev, 8, GFP_ATOMIC, &mouse->data_dma);
+ mouse->data = usb_alloc_coherent(dev, 8, GFP_ATOMIC, &mouse->data_dma);
if (!mouse->data)
goto fail1;
@@ -205,7 +205,7 @@ static int usb_mouse_probe(struct usb_interface *intf, const struct usb_device_i
fail3:
usb_free_urb(mouse->irq);
fail2:
- usb_buffer_free(dev, 8, mouse->data, mouse->data_dma);
+ usb_free_coherent(dev, 8, mouse->data, mouse->data_dma);
fail1:
input_free_device(input_dev);
kfree(mouse);
@@ -221,7 +221,7 @@ static void usb_mouse_disconnect(struct usb_interface *intf)
usb_kill_urb(mouse->irq);
input_unregister_device(mouse->dev);
usb_free_urb(mouse->irq);
- usb_buffer_free(interface_to_usbdev(intf), 8, mouse->data, mouse->data_dma);
+ usb_free_coherent(interface_to_usbdev(intf), 8, mouse->data, mouse->data_dma);
kfree(mouse);
}
}
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 9be8e1754a0..e19cf8eb6cc 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -447,13 +447,14 @@ config SENSORS_IT87
will be called it87.
config SENSORS_LM63
- tristate "National Semiconductor LM63"
+ tristate "National Semiconductor LM63 and LM64"
depends on I2C
help
- If you say yes here you get support for the National Semiconductor
- LM63 remote diode digital temperature sensor with integrated fan
- control. Such chips are found on the Tyan S4882 (Thunder K8QS Pro)
- motherboard, among others.
+ If you say yes here you get support for the National
+ Semiconductor LM63 and LM64 remote diode digital temperature
+ sensors with integrated fan control. Such chips are found
+ on the Tyan S4882 (Thunder K8QS Pro) motherboard, among
+ others.
This driver can also be built as a module. If so, the module
will be called lm63.
@@ -492,7 +493,8 @@ config SENSORS_LM75
- NXP's LM75A
- ST Microelectronics STDS75
- TelCom (now Microchip) TCN75
- - Texas Instruments TMP100, TMP101, TMP75, TMP175, TMP275
+ - Texas Instruments TMP100, TMP101, TMP105, TMP75, TMP175,
+ TMP275
This driver supports driver model based binding through board
specific I2C device tables.
@@ -749,6 +751,16 @@ config SENSORS_DME1737
This driver can also be built as a module. If so, the module
will be called dme1737.
+config SENSORS_EMC1403
+ tristate "SMSC EMC1403 thermal sensor"
+ depends on I2C
+ help
+ If you say yes here you get support for the SMSC EMC1403
+ temperature monitoring chip.
+
+ Threshold values can be configured using sysfs.
+ Data from the different diodes are accessible via sysfs.
+
config SENSORS_SMSC47M1
tristate "SMSC LPC47M10x and compatibles"
help
@@ -802,6 +814,15 @@ config SENSORS_ADS7828
This driver can also be built as a module. If so, the module
will be called ads7828.
+config SENSORS_ADS7871
+ tristate "Texas Instruments ADS7871 A/D converter"
+ depends on SPI
+ help
+ If you say yes here you get support for TI ADS7871 & ADS7870
+
+ This driver can also be built as a module. If so, the module
+ will be called ads7871.
+
config SENSORS_AMC6821
tristate "Texas Instruments AMC6821"
depends on I2C && EXPERIMENTAL
@@ -822,6 +843,16 @@ config SENSORS_THMC50
This driver can also be built as a module. If so, the module
will be called thmc50.
+config SENSORS_TMP102
+ tristate "Texas Instruments TMP102"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get support for Texas Instruments TMP102
+ sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called tmp102.
+
config SENSORS_TMP401
tristate "Texas Instruments TMP401 and compatibles"
depends on I2C && EXPERIMENTAL
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 4aa1a3d112a..2138ceb1a71 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_SENSORS_ADM1029) += adm1029.o
obj-$(CONFIG_SENSORS_ADM1031) += adm1031.o
obj-$(CONFIG_SENSORS_ADM9240) += adm9240.o
obj-$(CONFIG_SENSORS_ADS7828) += ads7828.o
+obj-$(CONFIG_SENSORS_ADS7871) += ads7871.o
obj-$(CONFIG_SENSORS_ADT7411) += adt7411.o
obj-$(CONFIG_SENSORS_ADT7462) += adt7462.o
obj-$(CONFIG_SENSORS_ADT7470) += adt7470.o
@@ -40,6 +41,7 @@ obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o
obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o
obj-$(CONFIG_SENSORS_DME1737) += dme1737.o
obj-$(CONFIG_SENSORS_DS1621) += ds1621.o
+obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o
obj-$(CONFIG_SENSORS_F71805F) += f71805f.o
obj-$(CONFIG_SENSORS_F71882FG) += f71882fg.o
obj-$(CONFIG_SENSORS_F75375S) += f75375s.o
@@ -89,6 +91,7 @@ obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o
obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o
obj-$(CONFIG_SENSORS_AMC6821) += amc6821.o
obj-$(CONFIG_SENSORS_THMC50) += thmc50.o
+obj-$(CONFIG_SENSORS_TMP102) += tmp102.o
obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 1644b92e7cc..15c1a9616af 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -36,6 +36,7 @@
#define ADM1031_REG_FAN_DIV(nr) (0x20 + (nr))
#define ADM1031_REG_PWM (0x22)
#define ADM1031_REG_FAN_MIN(nr) (0x10 + (nr))
+#define ADM1031_REG_FAN_FILTER (0x23)
#define ADM1031_REG_TEMP_OFFSET(nr) (0x0d + (nr))
#define ADM1031_REG_TEMP_MAX(nr) (0x14 + 4 * (nr))
@@ -61,6 +62,9 @@
#define ADM1031_CONF2_TACH2_ENABLE 0x08
#define ADM1031_CONF2_TEMP_ENABLE(chan) (0x10 << (chan))
+#define ADM1031_UPDATE_RATE_MASK 0x1c
+#define ADM1031_UPDATE_RATE_SHIFT 2
+
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
@@ -75,6 +79,7 @@ struct adm1031_data {
int chip_type;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
+ unsigned int update_rate; /* In milliseconds */
/* The chan_select_table contains the possible configurations for
* auto fan control.
*/
@@ -738,6 +743,57 @@ static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 12);
static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13);
static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14);
+/* Update Rate */
+static const unsigned int update_rates[] = {
+ 16000, 8000, 4000, 2000, 1000, 500, 250, 125,
+};
+
+static ssize_t show_update_rate(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct adm1031_data *data = i2c_get_clientdata(client);
+
+ return sprintf(buf, "%u\n", data->update_rate);
+}
+
+static ssize_t set_update_rate(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct adm1031_data *data = i2c_get_clientdata(client);
+ unsigned long val;
+ int i, err;
+ u8 reg;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ /* find the nearest update rate from the table */
+ for (i = 0; i < ARRAY_SIZE(update_rates) - 1; i++) {
+ if (val >= update_rates[i])
+ break;
+ }
+ /* if not found, we point to the last entry (lowest update rate) */
+
+ /* set the new update rate while preserving other settings */
+ reg = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
+ reg &= ~ADM1031_UPDATE_RATE_MASK;
+ reg |= i << ADM1031_UPDATE_RATE_SHIFT;
+ adm1031_write_value(client, ADM1031_REG_FAN_FILTER, reg);
+
+ mutex_lock(&data->update_lock);
+ data->update_rate = update_rates[i];
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static DEVICE_ATTR(update_rate, S_IRUGO | S_IWUSR, show_update_rate,
+ set_update_rate);
+
static struct attribute *adm1031_attributes[] = {
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_fan1_div.dev_attr.attr,
@@ -774,6 +830,7 @@ static struct attribute *adm1031_attributes[] = {
&sensor_dev_attr_auto_fan1_min_pwm.dev_attr.attr,
+ &dev_attr_update_rate.attr,
&dev_attr_alarms.attr,
NULL
@@ -900,6 +957,7 @@ static void adm1031_init_client(struct i2c_client *client)
{
unsigned int read_val;
unsigned int mask;
+ int i;
struct adm1031_data *data = i2c_get_clientdata(client);
mask = (ADM1031_CONF2_PWM1_ENABLE | ADM1031_CONF2_TACH1_ENABLE);
@@ -919,18 +977,24 @@ static void adm1031_init_client(struct i2c_client *client)
ADM1031_CONF1_MONITOR_ENABLE);
}
+ /* Read the chip's update rate */
+ mask = ADM1031_UPDATE_RATE_MASK;
+ read_val = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
+ i = (read_val & mask) >> ADM1031_UPDATE_RATE_SHIFT;
+ data->update_rate = update_rates[i];
}
static struct adm1031_data *adm1031_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
+ unsigned long next_update;
int chan;
mutex_lock(&data->update_lock);
- if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
- || !data->valid) {
+ next_update = data->last_updated + msecs_to_jiffies(data->update_rate);
+ if (time_after(jiffies, next_update) || !data->valid) {
dev_dbg(&client->dev, "Starting adm1031 update\n");
for (chan = 0;
diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c
new file mode 100644
index 00000000000..b300a2048af
--- /dev/null
+++ b/drivers/hwmon/ads7871.c
@@ -0,0 +1,253 @@
+/*
+ * ads7871 - driver for TI ADS7871 A/D converter
+ *
+ * Copyright (c) 2010 Paul Thomas <pthomas8589@gmail.com>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * later as publishhed by the Free Software Foundation.
+ *
+ * You need to have something like this in struct spi_board_info
+ * {
+ * .modalias = "ads7871",
+ * .max_speed_hz = 2*1000*1000,
+ * .chip_select = 0,
+ * .bus_num = 1,
+ * },
+ */
+
+/*From figure 18 in the datasheet*/
+/*Register addresses*/
+#define REG_LS_BYTE 0 /*A/D Output Data, LS Byte*/
+#define REG_MS_BYTE 1 /*A/D Output Data, MS Byte*/
+#define REG_PGA_VALID 2 /*PGA Valid Register*/
+#define REG_AD_CONTROL 3 /*A/D Control Register*/
+#define REG_GAIN_MUX 4 /*Gain/Mux Register*/
+#define REG_IO_STATE 5 /*Digital I/O State Register*/
+#define REG_IO_CONTROL 6 /*Digital I/O Control Register*/
+#define REG_OSC_CONTROL 7 /*Rev/Oscillator Control Register*/
+#define REG_SER_CONTROL 24 /*Serial Interface Control Register*/
+#define REG_ID 31 /*ID Register*/
+
+/*From figure 17 in the datasheet
+* These bits get ORed with the address to form
+* the instruction byte */
+/*Instruction Bit masks*/
+#define INST_MODE_bm (1<<7)
+#define INST_READ_bm (1<<6)
+#define INST_16BIT_bm (1<<5)
+
+/*From figure 18 in the datasheet*/
+/*bit masks for Rev/Oscillator Control Register*/
+#define MUX_CNV_bv 7
+#define MUX_CNV_bm (1<<MUX_CNV_bv)
+#define MUX_M3_bm (1<<3) /*M3 selects single ended*/
+#define MUX_G_bv 4 /*allows for reg = (gain << MUX_G_bv) | ...*/
+
+/*From figure 18 in the datasheet*/
+/*bit masks for Rev/Oscillator Control Register*/
+#define OSC_OSCR_bm (1<<5)
+#define OSC_OSCE_bm (1<<4)
+#define OSC_REFE_bm (1<<3)
+#define OSC_BUFE_bm (1<<2)
+#define OSC_R2V_bm (1<<1)
+#define OSC_RBG_bm (1<<0)
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spi/spi.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+
+#define DEVICE_NAME "ads7871"
+
+struct ads7871_data {
+ struct device *hwmon_dev;
+ struct mutex update_lock;
+};
+
+static int ads7871_read_reg8(struct spi_device *spi, int reg)
+{
+ int ret;
+ reg = reg | INST_READ_bm;
+ ret = spi_w8r8(spi, reg);
+ return ret;
+}
+
+static int ads7871_read_reg16(struct spi_device *spi, int reg)
+{
+ int ret;
+ reg = reg | INST_READ_bm | INST_16BIT_bm;
+ ret = spi_w8r16(spi, reg);
+ return ret;
+}
+
+static int ads7871_write_reg8(struct spi_device *spi, int reg, u8 val)
+{
+ u8 tmp[2] = {reg, val};
+ return spi_write(spi, tmp, sizeof(tmp));
+}
+
+static ssize_t show_voltage(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int ret, val, i = 0;
+ uint8_t channel, mux_cnv;
+
+ channel = attr->index;
+ /*TODO: add support for conversions
+ *other than single ended with a gain of 1*/
+ /*MUX_M3_bm forces single ended*/
+ /*This is also where the gain of the PGA would be set*/
+ ads7871_write_reg8(spi, REG_GAIN_MUX,
+ (MUX_CNV_bm | MUX_M3_bm | channel));
+
+ ret = ads7871_read_reg8(spi, REG_GAIN_MUX);
+ mux_cnv = ((ret & MUX_CNV_bm)>>MUX_CNV_bv);
+ /*on 400MHz arm9 platform the conversion
+ *is already done when we do this test*/
+ while ((i < 2) && mux_cnv) {
+ i++;
+ ret = ads7871_read_reg8(spi, REG_GAIN_MUX);
+ mux_cnv = ((ret & MUX_CNV_bm)>>MUX_CNV_bv);
+ msleep_interruptible(1);
+ }
+
+ if (mux_cnv == 0) {
+ val = ads7871_read_reg16(spi, REG_LS_BYTE);
+ /*result in volts*10000 = (val/8192)*2.5*10000*/
+ val = ((val>>2) * 25000) / 8192;
+ return sprintf(buf, "%d\n", val);
+ } else {
+ return -1;
+ }
+}
+
+static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_voltage, NULL, 0);
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_voltage, NULL, 1);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_voltage, NULL, 2);
+static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_voltage, NULL, 3);
+static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_voltage, NULL, 4);
+static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_voltage, NULL, 5);
+static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_voltage, NULL, 6);
+static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_voltage, NULL, 7);
+
+static struct attribute *ads7871_attributes[] = {
+ &sensor_dev_attr_in0_input.dev_attr.attr,
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in3_input.dev_attr.attr,
+ &sensor_dev_attr_in4_input.dev_attr.attr,
+ &sensor_dev_attr_in5_input.dev_attr.attr,
+ &sensor_dev_attr_in6_input.dev_attr.attr,
+ &sensor_dev_attr_in7_input.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group ads7871_group = {
+ .attrs = ads7871_attributes,
+};
+
+static int __devinit ads7871_probe(struct spi_device *spi)
+{
+ int status, ret, err = 0;
+ uint8_t val;
+ struct ads7871_data *pdata;
+
+ dev_dbg(&spi->dev, "probe\n");
+
+ pdata = kzalloc(sizeof(struct ads7871_data), GFP_KERNEL);
+ if (!pdata) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ status = sysfs_create_group(&spi->dev.kobj, &ads7871_group);
+ if (status < 0)
+ goto error_free;
+
+ pdata->hwmon_dev = hwmon_device_register(&spi->dev);
+ if (IS_ERR(pdata->hwmon_dev)) {
+ err = PTR_ERR(pdata->hwmon_dev);
+ goto error_remove;
+ }
+
+ spi_set_drvdata(spi, pdata);
+
+ /* Configure the SPI bus */
+ spi->mode = (SPI_MODE_0);
+ spi->bits_per_word = 8;
+ spi_setup(spi);
+
+ ads7871_write_reg8(spi, REG_SER_CONTROL, 0);
+ ads7871_write_reg8(spi, REG_AD_CONTROL, 0);
+
+ val = (OSC_OSCR_bm | OSC_OSCE_bm | OSC_REFE_bm | OSC_BUFE_bm);
+ ads7871_write_reg8(spi, REG_OSC_CONTROL, val);
+ ret = ads7871_read_reg8(spi, REG_OSC_CONTROL);
+
+ dev_dbg(&spi->dev, "REG_OSC_CONTROL write:%x, read:%x\n", val, ret);
+ /*because there is no other error checking on an SPI bus
+ we need to make sure we really have a chip*/
+ if (val != ret) {
+ err = -ENODEV;
+ goto error_remove;
+ }
+
+ return 0;
+
+error_remove:
+ sysfs_remove_group(&spi->dev.kobj, &ads7871_group);
+error_free:
+ kfree(pdata);
+exit:
+ return err;
+}
+
+static int __devexit ads7871_remove(struct spi_device *spi)
+{
+ struct ads7871_data *pdata = spi_get_drvdata(spi);
+
+ hwmon_device_unregister(pdata->hwmon_dev);
+ sysfs_remove_group(&spi->dev.kobj, &ads7871_group);
+ kfree(pdata);
+ return 0;
+}
+
+static struct spi_driver ads7871_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+
+ .probe = ads7871_probe,
+ .remove = __devexit_p(ads7871_remove),
+};
+
+static int __init ads7871_init(void)
+{
+ return spi_register_driver(&ads7871_driver);
+}
+
+static void __exit ads7871_exit(void)
+{
+ spi_unregister_driver(&ads7871_driver);
+}
+
+module_init(ads7871_init);
+module_exit(ads7871_exit);
+
+MODULE_AUTHOR("Paul Thomas <pthomas8589@gmail.com>");
+MODULE_DESCRIPTION("TI ADS7871 A/D driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index f085c18d290..b6598aa557a 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -148,6 +148,20 @@ static const char *temperature_sensors_sets[][41] = {
/* Set 18: MacBook Pro 2,2 */
{ "TB0T", "TC0D", "TC0P", "TG0H", "TG0P", "TG0T", "TM0P", "TTF0",
"Th0H", "Th1H", "Tm0P", "Ts0P", NULL },
+/* Set 19: Macbook Pro 5,3 */
+ { "TB0T", "TB1T", "TB2T", "TB3T", "TC0D", "TC0F", "TC0P", "TG0D",
+ "TG0F", "TG0H", "TG0P", "TG0T", "TN0D", "TN0P", "TTF0", "Th2H",
+ "Tm0P", "Ts0P", "Ts0S", NULL },
+/* Set 20: MacBook Pro 5,4 */
+ { "TB0T", "TB1T", "TB2T", "TB3T", "TC0D", "TC0F", "TC0P", "TN0D",
+ "TN0P", "TTF0", "Th2H", "Ts0P", "Ts0S", NULL },
+/* Set 21: MacBook Pro 6,2 */
+ { "TB0T", "TB1T", "TB2T", "TC0C", "TC0D", "TC0P", "TC1C", "TG0D",
+ "TG0P", "TG0T", "TMCD", "TP0P", "TPCD", "Th1H", "Th2H", "Tm0P",
+ "Ts0P", "Ts0S", NULL },
+/* Set 22: MacBook Pro 7,1 */
+ { "TB0T", "TB1T", "TB2T", "TC0D", "TC0P", "TN0D", "TN0P", "TN0S",
+ "TN1D", "TN1F", "TN1G", "TN1S", "Th1H", "Ts0P", "Ts0S", NULL },
};
/* List of keys used to read/write fan speeds */
@@ -646,6 +660,17 @@ out:
return snprintf(sysfsbuf, PAGE_SIZE, "(%d,%d)\n", left, right);
}
+/* Displays sensor key as label */
+static ssize_t applesmc_show_sensor_label(struct device *dev,
+ struct device_attribute *devattr, char *sysfsbuf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ const char *key =
+ temperature_sensors_sets[applesmc_temperature_set][attr->index];
+
+ return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", key);
+}
+
/* Displays degree Celsius * 1000 */
static ssize_t applesmc_show_temperature(struct device *dev,
struct device_attribute *devattr, char *sysfsbuf)
@@ -1113,6 +1138,86 @@ static const struct attribute_group fan_attribute_groups[] = {
/*
* Temperature sensors sysfs entries.
*/
+static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp5_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp6_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp7_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 6);
+static SENSOR_DEVICE_ATTR(temp8_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 7);
+static SENSOR_DEVICE_ATTR(temp9_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 8);
+static SENSOR_DEVICE_ATTR(temp10_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 9);
+static SENSOR_DEVICE_ATTR(temp11_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 10);
+static SENSOR_DEVICE_ATTR(temp12_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 11);
+static SENSOR_DEVICE_ATTR(temp13_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 12);
+static SENSOR_DEVICE_ATTR(temp14_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 13);
+static SENSOR_DEVICE_ATTR(temp15_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 14);
+static SENSOR_DEVICE_ATTR(temp16_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 15);
+static SENSOR_DEVICE_ATTR(temp17_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 16);
+static SENSOR_DEVICE_ATTR(temp18_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 17);
+static SENSOR_DEVICE_ATTR(temp19_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 18);
+static SENSOR_DEVICE_ATTR(temp20_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 19);
+static SENSOR_DEVICE_ATTR(temp21_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 20);
+static SENSOR_DEVICE_ATTR(temp22_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 21);
+static SENSOR_DEVICE_ATTR(temp23_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 22);
+static SENSOR_DEVICE_ATTR(temp24_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 23);
+static SENSOR_DEVICE_ATTR(temp25_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 24);
+static SENSOR_DEVICE_ATTR(temp26_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 25);
+static SENSOR_DEVICE_ATTR(temp27_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 26);
+static SENSOR_DEVICE_ATTR(temp28_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 27);
+static SENSOR_DEVICE_ATTR(temp29_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 28);
+static SENSOR_DEVICE_ATTR(temp30_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 29);
+static SENSOR_DEVICE_ATTR(temp31_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 30);
+static SENSOR_DEVICE_ATTR(temp32_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 31);
+static SENSOR_DEVICE_ATTR(temp33_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 32);
+static SENSOR_DEVICE_ATTR(temp34_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 33);
+static SENSOR_DEVICE_ATTR(temp35_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 34);
+static SENSOR_DEVICE_ATTR(temp36_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 35);
+static SENSOR_DEVICE_ATTR(temp37_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 36);
+static SENSOR_DEVICE_ATTR(temp38_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 37);
+static SENSOR_DEVICE_ATTR(temp39_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 38);
+static SENSOR_DEVICE_ATTR(temp40_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 39);
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
applesmc_show_temperature, NULL, 0);
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO,
@@ -1194,6 +1299,50 @@ static SENSOR_DEVICE_ATTR(temp39_input, S_IRUGO,
static SENSOR_DEVICE_ATTR(temp40_input, S_IRUGO,
applesmc_show_temperature, NULL, 39);
+static struct attribute *label_attributes[] = {
+ &sensor_dev_attr_temp1_label.dev_attr.attr,
+ &sensor_dev_attr_temp2_label.dev_attr.attr,
+ &sensor_dev_attr_temp3_label.dev_attr.attr,
+ &sensor_dev_attr_temp4_label.dev_attr.attr,
+ &sensor_dev_attr_temp5_label.dev_attr.attr,
+ &sensor_dev_attr_temp6_label.dev_attr.attr,
+ &sensor_dev_attr_temp7_label.dev_attr.attr,
+ &sensor_dev_attr_temp8_label.dev_attr.attr,
+ &sensor_dev_attr_temp9_label.dev_attr.attr,
+ &sensor_dev_attr_temp10_label.dev_attr.attr,
+ &sensor_dev_attr_temp11_label.dev_attr.attr,
+ &sensor_dev_attr_temp12_label.dev_attr.attr,
+ &sensor_dev_attr_temp13_label.dev_attr.attr,
+ &sensor_dev_attr_temp14_label.dev_attr.attr,
+ &sensor_dev_attr_temp15_label.dev_attr.attr,
+ &sensor_dev_attr_temp16_label.dev_attr.attr,
+ &sensor_dev_attr_temp17_label.dev_attr.attr,
+ &sensor_dev_attr_temp18_label.dev_attr.attr,
+ &sensor_dev_attr_temp19_label.dev_attr.attr,
+ &sensor_dev_attr_temp20_label.dev_attr.attr,
+ &sensor_dev_attr_temp21_label.dev_attr.attr,
+ &sensor_dev_attr_temp22_label.dev_attr.attr,
+ &sensor_dev_attr_temp23_label.dev_attr.attr,
+ &sensor_dev_attr_temp24_label.dev_attr.attr,
+ &sensor_dev_attr_temp25_label.dev_attr.attr,
+ &sensor_dev_attr_temp26_label.dev_attr.attr,
+ &sensor_dev_attr_temp27_label.dev_attr.attr,
+ &sensor_dev_attr_temp28_label.dev_attr.attr,
+ &sensor_dev_attr_temp29_label.dev_attr.attr,
+ &sensor_dev_attr_temp30_label.dev_attr.attr,
+ &sensor_dev_attr_temp31_label.dev_attr.attr,
+ &sensor_dev_attr_temp32_label.dev_attr.attr,
+ &sensor_dev_attr_temp33_label.dev_attr.attr,
+ &sensor_dev_attr_temp34_label.dev_attr.attr,
+ &sensor_dev_attr_temp35_label.dev_attr.attr,
+ &sensor_dev_attr_temp36_label.dev_attr.attr,
+ &sensor_dev_attr_temp37_label.dev_attr.attr,
+ &sensor_dev_attr_temp38_label.dev_attr.attr,
+ &sensor_dev_attr_temp39_label.dev_attr.attr,
+ &sensor_dev_attr_temp40_label.dev_attr.attr,
+ NULL
+};
+
static struct attribute *temperature_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
@@ -1241,6 +1390,10 @@ static struct attribute *temperature_attributes[] = {
static const struct attribute_group temperature_attributes_group =
{ .attrs = temperature_attributes };
+static const struct attribute_group label_attributes_group = {
+ .attrs = label_attributes
+};
+
/* Module stuff */
/*
@@ -1363,6 +1516,14 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = {
{ .accelerometer = 0, .light = 0, .temperature_set = 17 },
/* MacBook Pro 2,2: accelerometer, backlight and temperature set 18 */
{ .accelerometer = 1, .light = 1, .temperature_set = 18 },
+/* MacBook Pro 5,3: accelerometer, backlight and temperature set 19 */
+ { .accelerometer = 1, .light = 1, .temperature_set = 19 },
+/* MacBook Pro 5,4: accelerometer, backlight and temperature set 20 */
+ { .accelerometer = 1, .light = 1, .temperature_set = 20 },
+/* MacBook Pro 6,2: accelerometer, backlight and temperature set 21 */
+ { .accelerometer = 1, .light = 1, .temperature_set = 21 },
+/* MacBook Pro 7,1: accelerometer, backlight and temperature set 22 */
+ { .accelerometer = 1, .light = 1, .temperature_set = 22 },
};
/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1".
@@ -1376,6 +1537,22 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir") },
&applesmc_dmi_data[7]},
+ { applesmc_dmi_match, "Apple MacBook Pro 7", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro7") },
+ &applesmc_dmi_data[22]},
+ { applesmc_dmi_match, "Apple MacBook Pro 5,4", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,4") },
+ &applesmc_dmi_data[20]},
+ { applesmc_dmi_match, "Apple MacBook Pro 5,3", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,3") },
+ &applesmc_dmi_data[19]},
+ { applesmc_dmi_match, "Apple MacBook Pro 6", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6") },
+ &applesmc_dmi_data[21]},
{ applesmc_dmi_match, "Apple MacBook Pro 5", {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5") },
@@ -1518,7 +1695,8 @@ static int __init applesmc_init(void)
for (i = 0;
temperature_sensors_sets[applesmc_temperature_set][i] != NULL;
i++) {
- if (temperature_attributes[i] == NULL) {
+ if (temperature_attributes[i] == NULL ||
+ label_attributes[i] == NULL) {
printk(KERN_ERR "applesmc: More temperature sensors "
"in temperature_sensors_sets (at least %i)"
"than available sysfs files in "
@@ -1530,6 +1708,10 @@ static int __init applesmc_init(void)
temperature_attributes[i]);
if (ret)
goto out_temperature;
+ ret = sysfs_create_file(&pdev->dev.kobj,
+ label_attributes[i]);
+ if (ret)
+ goto out_temperature;
}
if (applesmc_accelerometer) {
@@ -1580,6 +1762,7 @@ out_accelerometer:
if (applesmc_accelerometer)
applesmc_release_accelerometer();
out_temperature:
+ sysfs_remove_group(&pdev->dev.kobj, &label_attributes_group);
sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group);
out_fans:
while (fans_handled)
@@ -1609,6 +1792,7 @@ static void __exit applesmc_exit(void)
}
if (applesmc_accelerometer)
applesmc_release_accelerometer();
+ sysfs_remove_group(&pdev->dev.kobj, &label_attributes_group);
sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group);
while (fans_handled)
sysfs_remove_group(&pdev->dev.kobj,
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 16c42024072..653db1bda93 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -1411,6 +1411,13 @@ static int __init atk0110_init(void)
{
int ret;
+ /* Make sure it's safe to access the device through ACPI */
+ if (!acpi_resources_are_enforced()) {
+ pr_err("atk: Resources not safely usable due to "
+ "acpi_enforce_resources kernel parameter\n");
+ return -EBUSY;
+ }
+
ret = acpi_bus_register_driver(&atk_driver);
if (ret)
pr_info("atk: acpi_bus_register_driver failed: %d\n", ret);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index e9b7fbc5a44..2988da150ed 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -241,6 +241,55 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *
return tjmax;
}
+static int __devinit get_tjmax(struct cpuinfo_x86 *c, u32 id,
+ struct device *dev)
+{
+ /* The 100C is default for both mobile and non mobile CPUs */
+ int err;
+ u32 eax, edx;
+ u32 val;
+
+ /* A new feature of current Intel(R) processors, the
+ IA32_TEMPERATURE_TARGET contains the TjMax value */
+ err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
+ if (err) {
+ dev_warn(dev, "Unable to read TjMax from CPU.\n");
+ } else {
+ val = (eax >> 16) & 0xff;
+ /*
+ * If the TjMax is not plausible, an assumption
+ * will be used
+ */
+ if ((val > 80) && (val < 120)) {
+ dev_info(dev, "TjMax is %d C.\n", val);
+ return val * 1000;
+ }
+ }
+
+ /*
+ * An assumption is made for early CPUs and unreadable MSR.
+ * NOTE: the given value may not be correct.
+ */
+
+ switch (c->x86_model) {
+ case 0xe:
+ case 0xf:
+ case 0x16:
+ case 0x1a:
+ dev_warn(dev, "TjMax is assumed as 100 C!\n");
+ return 100000;
+ break;
+ case 0x17:
+ case 0x1c: /* Atom CPUs */
+ return adjust_tjmax(c, id, dev);
+ break;
+ default:
+ dev_warn(dev, "CPU (model=0x%x) is not supported yet,"
+ " using default TjMax of 100C.\n", c->x86_model);
+ return 100000;
+ }
+}
+
static int __devinit coretemp_probe(struct platform_device *pdev)
{
struct coretemp_data *data;
@@ -283,14 +332,18 @@ static int __devinit coretemp_probe(struct platform_device *pdev)
}
}
- data->tjmax = adjust_tjmax(c, data->id, &pdev->dev);
+ data->tjmax = get_tjmax(c, data->id, &pdev->dev);
platform_set_drvdata(pdev, data);
- /* read the still undocumented IA32_TEMPERATURE_TARGET it exists
- on older CPUs but not in this register, Atoms don't have it either */
+ /*
+ * read the still undocumented IA32_TEMPERATURE_TARGET. It exists
+ * on older CPUs but not in this register,
+ * Atoms don't have it either.
+ */
if ((c->x86_model > 0xe) && (c->x86_model != 0x1c)) {
- err = rdmsr_safe_on_cpu(data->id, 0x1a2, &eax, &edx);
+ err = rdmsr_safe_on_cpu(data->id, MSR_IA32_TEMPERATURE_TARGET,
+ &eax, &edx);
if (err) {
dev_warn(&pdev->dev, "Unable to read"
" IA32_TEMPERATURE_TARGET MSR\n");
@@ -451,28 +504,20 @@ static int __init coretemp_init(void)
for_each_online_cpu(i) {
struct cpuinfo_x86 *c = &cpu_data(i);
+ /*
+ * CPUID.06H.EAX[0] indicates whether the CPU has thermal
+ * sensors. We check this bit only, all the early CPUs
+ * without thermal sensors will be filtered out.
+ */
+ if (c->cpuid_level >= 6 && (cpuid_eax(0x06) & 0x01)) {
+ err = coretemp_device_add(i);
+ if (err)
+ goto exit_devices_unreg;
- /* check if family 6, models 0xe (Pentium M DC),
- 0xf (Core 2 DC 65nm), 0x16 (Core 2 SC 65nm),
- 0x17 (Penryn 45nm), 0x1a (Nehalem), 0x1c (Atom),
- 0x1e (Lynnfield) */
- if ((c->cpuid_level < 0) || (c->x86 != 0x6) ||
- !((c->x86_model == 0xe) || (c->x86_model == 0xf) ||
- (c->x86_model == 0x16) || (c->x86_model == 0x17) ||
- (c->x86_model == 0x1a) || (c->x86_model == 0x1c) ||
- (c->x86_model == 0x1e))) {
-
- /* supported CPU not found, but report the unknown
- family 6 CPU */
- if ((c->x86 == 0x6) && (c->x86_model > 0xf))
- printk(KERN_WARNING DRVNAME ": Unknown CPU "
- "model 0x%x\n", c->x86_model);
- continue;
+ } else {
+ printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
+ " has no thermal sensor.\n", c->x86_model);
}
-
- err = coretemp_device_add(i);
- if (err)
- goto exit_devices_unreg;
}
if (list_empty(&pdev_list)) {
err = -ENODEV;
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index 823dd28a902..980c17d5eea 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -1,12 +1,14 @@
/*
- * dme1737.c - Driver for the SMSC DME1737, Asus A8000, SMSC SCH311x and
- * SCH5027 Super-I/O chips integrated hardware monitoring features.
- * Copyright (c) 2007, 2008 Juerg Haefliger <juergh@gmail.com>
+ * dme1737.c - Driver for the SMSC DME1737, Asus A8000, SMSC SCH311x, SCH5027,
+ * and SCH5127 Super-I/O chips integrated hardware monitoring
+ * features.
+ * Copyright (c) 2007, 2008, 2009, 2010 Juerg Haefliger <juergh@gmail.com>
*
* This driver is an I2C/ISA hybrid, meaning that it uses the I2C bus to access
* the chip registers if a DME1737, A8000, or SCH5027 is found and the ISA bus
- * if a SCH311x chip is found. Both types of chips have very similar hardware
- * monitoring capabilities but differ in the way they can be accessed.
+ * if a SCH311x or SCH5127 chip is found. Both types of chips have very
+ * similar hardware monitoring capabilities but differ in the way they can be
+ * accessed.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -57,7 +59,7 @@ MODULE_PARM_DESC(probe_all_addr, "Include probing of non-standard LPC "
/* Addresses to scan */
static const unsigned short normal_i2c[] = {0x2c, 0x2d, 0x2e, I2C_CLIENT_END};
-enum chips { dme1737, sch5027, sch311x };
+enum chips { dme1737, sch5027, sch311x, sch5127 };
/* ---------------------------------------------------------------------
* Registers
@@ -164,10 +166,29 @@ static const u8 DME1737_BIT_ALARM_FAN[] = {10, 11, 12, 13, 22, 23};
#define DME1737_VERSTEP_MASK 0xf8
#define SCH311X_DEVICE 0x8c
#define SCH5027_VERSTEP 0x69
+#define SCH5127_DEVICE 0x8e
+
+/* Device ID values (global configuration register index 0x20) */
+#define DME1737_ID_1 0x77
+#define DME1737_ID_2 0x78
+#define SCH3112_ID 0x7c
+#define SCH3114_ID 0x7d
+#define SCH3116_ID 0x7f
+#define SCH5027_ID 0x89
+#define SCH5127_ID 0x86
/* Length of ISA address segment */
#define DME1737_EXTENT 2
+/* chip-dependent features */
+#define HAS_TEMP_OFFSET (1 << 0) /* bit 0 */
+#define HAS_VID (1 << 1) /* bit 1 */
+#define HAS_ZONE3 (1 << 2) /* bit 2 */
+#define HAS_ZONE_HYST (1 << 3) /* bit 3 */
+#define HAS_PWM_MIN (1 << 4) /* bit 4 */
+#define HAS_FAN(ix) (1 << ((ix) + 5)) /* bits 5-10 */
+#define HAS_PWM(ix) (1 << ((ix) + 11)) /* bits 11-16 */
+
/* ---------------------------------------------------------------------
* Data structures and manipulation thereof
* --------------------------------------------------------------------- */
@@ -187,8 +208,7 @@ struct dme1737_data {
u8 vid;
u8 pwm_rr_en;
- u8 has_pwm;
- u8 has_fan;
+ u32 has_features;
/* Register values */
u16 in[7];
@@ -224,8 +244,11 @@ static const int IN_NOMINAL_SCH311x[] = {2500, 1500, 3300, 5000, 12000, 3300,
3300};
static const int IN_NOMINAL_SCH5027[] = {5000, 2250, 3300, 1125, 1125, 3300,
3300};
+static const int IN_NOMINAL_SCH5127[] = {2500, 2250, 3300, 1125, 1125, 3300,
+ 3300};
#define IN_NOMINAL(type) ((type) == sch311x ? IN_NOMINAL_SCH311x : \
(type) == sch5027 ? IN_NOMINAL_SCH5027 : \
+ (type) == sch5127 ? IN_NOMINAL_SCH5127 : \
IN_NOMINAL_DME1737)
/* Voltage input
@@ -568,7 +591,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
/* Sample register contents every 1 sec */
if (time_after(jiffies, data->last_update + HZ) || !data->valid) {
- if (data->type == dme1737) {
+ if (data->has_features & HAS_VID) {
data->vid = dme1737_read(data, DME1737_REG_VID) &
0x3f;
}
@@ -599,7 +622,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
DME1737_REG_TEMP_MIN(ix));
data->temp_max[ix] = dme1737_read(data,
DME1737_REG_TEMP_MAX(ix));
- if (data->type != sch5027) {
+ if (data->has_features & HAS_TEMP_OFFSET) {
data->temp_offset[ix] = dme1737_read(data,
DME1737_REG_TEMP_OFFSET(ix));
}
@@ -626,7 +649,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
for (ix = 0; ix < ARRAY_SIZE(data->fan); ix++) {
/* Skip reading registers if optional fans are not
* present */
- if (!(data->has_fan & (1 << ix))) {
+ if (!(data->has_features & HAS_FAN(ix))) {
continue;
}
data->fan[ix] = dme1737_read(data,
@@ -650,7 +673,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
for (ix = 0; ix < ARRAY_SIZE(data->pwm); ix++) {
/* Skip reading registers if optional PWMs are not
* present */
- if (!(data->has_pwm & (1 << ix))) {
+ if (!(data->has_features & HAS_PWM(ix))) {
continue;
}
data->pwm[ix] = dme1737_read(data,
@@ -672,12 +695,24 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
/* Thermal zone registers */
for (ix = 0; ix < ARRAY_SIZE(data->zone_low); ix++) {
- data->zone_low[ix] = dme1737_read(data,
- DME1737_REG_ZONE_LOW(ix));
- data->zone_abs[ix] = dme1737_read(data,
- DME1737_REG_ZONE_ABS(ix));
+ /* Skip reading registers if zone3 is not present */
+ if ((ix == 2) && !(data->has_features & HAS_ZONE3)) {
+ continue;
+ }
+ /* sch5127 zone2 registers are special */
+ if ((ix == 1) && (data->type == sch5127)) {
+ data->zone_low[1] = dme1737_read(data,
+ DME1737_REG_ZONE_LOW(2));
+ data->zone_abs[1] = dme1737_read(data,
+ DME1737_REG_ZONE_ABS(2));
+ } else {
+ data->zone_low[ix] = dme1737_read(data,
+ DME1737_REG_ZONE_LOW(ix));
+ data->zone_abs[ix] = dme1737_read(data,
+ DME1737_REG_ZONE_ABS(ix));
+ }
}
- if (data->type != sch5027) {
+ if (data->has_features & HAS_ZONE_HYST) {
for (ix = 0; ix < ARRAY_SIZE(data->zone_hyst); ix++) {
data->zone_hyst[ix] = dme1737_read(data,
DME1737_REG_ZONE_HYST(ix));
@@ -1594,10 +1629,6 @@ static struct attribute *dme1737_attr[] ={
&sensor_dev_attr_zone2_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_zone2_auto_point3_temp.dev_attr.attr,
&sensor_dev_attr_zone2_auto_channels_temp.dev_attr.attr,
- &sensor_dev_attr_zone3_auto_point1_temp.dev_attr.attr,
- &sensor_dev_attr_zone3_auto_point2_temp.dev_attr.attr,
- &sensor_dev_attr_zone3_auto_point3_temp.dev_attr.attr,
- &sensor_dev_attr_zone3_auto_channels_temp.dev_attr.attr,
NULL
};
@@ -1605,27 +1636,23 @@ static const struct attribute_group dme1737_group = {
.attrs = dme1737_attr,
};
-/* The following struct holds misc attributes, which are not available in all
- * chips. Their creation depends on the chip type which is determined during
- * module load. */
-static struct attribute *dme1737_misc_attr[] = {
- /* Temperatures */
+/* The following struct holds temp offset attributes, which are not available
+ * in all chips. The following chips support them:
+ * DME1737, SCH311x */
+static struct attribute *dme1737_temp_offset_attr[] = {
&sensor_dev_attr_temp1_offset.dev_attr.attr,
&sensor_dev_attr_temp2_offset.dev_attr.attr,
&sensor_dev_attr_temp3_offset.dev_attr.attr,
- /* Zones */
- &sensor_dev_attr_zone1_auto_point1_temp_hyst.dev_attr.attr,
- &sensor_dev_attr_zone2_auto_point1_temp_hyst.dev_attr.attr,
- &sensor_dev_attr_zone3_auto_point1_temp_hyst.dev_attr.attr,
NULL
};
-static const struct attribute_group dme1737_misc_group = {
- .attrs = dme1737_misc_attr,
+static const struct attribute_group dme1737_temp_offset_group = {
+ .attrs = dme1737_temp_offset_attr,
};
-/* The following struct holds VID-related attributes. Their creation
- depends on the chip type which is determined during module load. */
+/* The following struct holds VID related attributes, which are not available
+ * in all chips. The following chips support them:
+ * DME1737 */
static struct attribute *dme1737_vid_attr[] = {
&dev_attr_vrm.attr,
&dev_attr_cpu0_vid.attr,
@@ -1636,6 +1663,36 @@ static const struct attribute_group dme1737_vid_group = {
.attrs = dme1737_vid_attr,
};
+/* The following struct holds temp zone 3 related attributes, which are not
+ * available in all chips. The following chips support them:
+ * DME1737, SCH311x, SCH5027 */
+static struct attribute *dme1737_zone3_attr[] = {
+ &sensor_dev_attr_zone3_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_zone3_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_zone3_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_zone3_auto_channels_temp.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group dme1737_zone3_group = {
+ .attrs = dme1737_zone3_attr,
+};
+
+
+/* The following struct holds temp zone hysteresis related attributes, which
+ * are not available in all chips. The following chips support them:
+ * DME1737, SCH311x */
+static struct attribute *dme1737_zone_hyst_attr[] = {
+ &sensor_dev_attr_zone1_auto_point1_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_zone2_auto_point1_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_zone3_auto_point1_temp_hyst.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group dme1737_zone_hyst_group = {
+ .attrs = dme1737_zone_hyst_attr,
+};
+
/* The following structs hold the PWM attributes, some of which are optional.
* Their creation depends on the chip configuration which is determined during
* module load. */
@@ -1691,10 +1748,10 @@ static const struct attribute_group dme1737_pwm_group[] = {
{ .attrs = dme1737_pwm6_attr },
};
-/* The following struct holds misc PWM attributes, which are not available in
- * all chips. Their creation depends on the chip type which is determined
+/* The following struct holds auto PWM min attributes, which are not available
+ * in all chips. Their creation depends on the chip type which is determined
* during module load. */
-static struct attribute *dme1737_pwm_misc_attr[] = {
+static struct attribute *dme1737_auto_pwm_min_attr[] = {
&sensor_dev_attr_pwm1_auto_pwm_min.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_pwm_min.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_pwm_min.dev_attr.attr,
@@ -1764,14 +1821,25 @@ static struct attribute *dme1737_zone_chmod_attr[] = {
&sensor_dev_attr_zone2_auto_point1_temp.dev_attr.attr,
&sensor_dev_attr_zone2_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_zone2_auto_point3_temp.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group dme1737_zone_chmod_group = {
+ .attrs = dme1737_zone_chmod_attr,
+};
+
+
+/* The permissions of the following zone 3 attributes are changed to read-
+ * writeable if the chip is *not* locked. Otherwise they stay read-only. */
+static struct attribute *dme1737_zone3_chmod_attr[] = {
&sensor_dev_attr_zone3_auto_point1_temp.dev_attr.attr,
&sensor_dev_attr_zone3_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_zone3_auto_point3_temp.dev_attr.attr,
NULL
};
-static const struct attribute_group dme1737_zone_chmod_group = {
- .attrs = dme1737_zone_chmod_attr,
+static const struct attribute_group dme1737_zone3_chmod_group = {
+ .attrs = dme1737_zone3_chmod_attr,
};
/* The permissions of the following PWM attributes are changed to read-
@@ -1887,30 +1955,35 @@ static void dme1737_remove_files(struct device *dev)
int ix;
for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) {
- if (data->has_fan & (1 << ix)) {
+ if (data->has_features & HAS_FAN(ix)) {
sysfs_remove_group(&dev->kobj,
&dme1737_fan_group[ix]);
}
}
for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_group); ix++) {
- if (data->has_pwm & (1 << ix)) {
+ if (data->has_features & HAS_PWM(ix)) {
sysfs_remove_group(&dev->kobj,
&dme1737_pwm_group[ix]);
- if (data->type != sch5027 && ix < 3) {
+ if ((data->has_features & HAS_PWM_MIN) && ix < 3) {
sysfs_remove_file(&dev->kobj,
- dme1737_pwm_misc_attr[ix]);
+ dme1737_auto_pwm_min_attr[ix]);
}
}
}
- if (data->type != sch5027) {
- sysfs_remove_group(&dev->kobj, &dme1737_misc_group);
+ if (data->has_features & HAS_TEMP_OFFSET) {
+ sysfs_remove_group(&dev->kobj, &dme1737_temp_offset_group);
}
- if (data->type == dme1737) {
+ if (data->has_features & HAS_VID) {
sysfs_remove_group(&dev->kobj, &dme1737_vid_group);
}
-
+ if (data->has_features & HAS_ZONE3) {
+ sysfs_remove_group(&dev->kobj, &dme1737_zone3_group);
+ }
+ if (data->has_features & HAS_ZONE_HYST) {
+ sysfs_remove_group(&dev->kobj, &dme1737_zone_hyst_group);
+ }
sysfs_remove_group(&dev->kobj, &dme1737_group);
if (!data->client) {
@@ -1934,23 +2007,31 @@ static int dme1737_create_files(struct device *dev)
goto exit_remove;
}
- /* Create misc sysfs attributes */
- if ((data->type != sch5027) &&
+ /* Create chip-dependent sysfs attributes */
+ if ((data->has_features & HAS_TEMP_OFFSET) &&
(err = sysfs_create_group(&dev->kobj,
- &dme1737_misc_group))) {
+ &dme1737_temp_offset_group))) {
goto exit_remove;
}
-
- /* Create VID-related sysfs attributes */
- if ((data->type == dme1737) &&
+ if ((data->has_features & HAS_VID) &&
(err = sysfs_create_group(&dev->kobj,
&dme1737_vid_group))) {
goto exit_remove;
}
+ if ((data->has_features & HAS_ZONE3) &&
+ (err = sysfs_create_group(&dev->kobj,
+ &dme1737_zone3_group))) {
+ goto exit_remove;
+ }
+ if ((data->has_features & HAS_ZONE_HYST) &&
+ (err = sysfs_create_group(&dev->kobj,
+ &dme1737_zone_hyst_group))) {
+ goto exit_remove;
+ }
/* Create fan sysfs attributes */
for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) {
- if (data->has_fan & (1 << ix)) {
+ if (data->has_features & HAS_FAN(ix)) {
if ((err = sysfs_create_group(&dev->kobj,
&dme1737_fan_group[ix]))) {
goto exit_remove;
@@ -1960,14 +2041,14 @@ static int dme1737_create_files(struct device *dev)
/* Create PWM sysfs attributes */
for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_group); ix++) {
- if (data->has_pwm & (1 << ix)) {
+ if (data->has_features & HAS_PWM(ix)) {
if ((err = sysfs_create_group(&dev->kobj,
&dme1737_pwm_group[ix]))) {
goto exit_remove;
}
- if (data->type != sch5027 && ix < 3 &&
+ if ((data->has_features & HAS_PWM_MIN) && ix < 3 &&
(err = sysfs_create_file(&dev->kobj,
- dme1737_pwm_misc_attr[ix]))) {
+ dme1737_auto_pwm_min_attr[ix]))) {
goto exit_remove;
}
}
@@ -1983,21 +2064,30 @@ static int dme1737_create_files(struct device *dev)
dme1737_chmod_group(dev, &dme1737_zone_chmod_group,
S_IRUGO | S_IWUSR);
- /* Change permissions of misc sysfs attributes */
- if (data->type != sch5027) {
- dme1737_chmod_group(dev, &dme1737_misc_group,
+ /* Change permissions of chip-dependent sysfs attributes */
+ if (data->has_features & HAS_TEMP_OFFSET) {
+ dme1737_chmod_group(dev, &dme1737_temp_offset_group,
+ S_IRUGO | S_IWUSR);
+ }
+ if (data->has_features & HAS_ZONE3) {
+ dme1737_chmod_group(dev, &dme1737_zone3_chmod_group,
+ S_IRUGO | S_IWUSR);
+ }
+ if (data->has_features & HAS_ZONE_HYST) {
+ dme1737_chmod_group(dev, &dme1737_zone_hyst_group,
S_IRUGO | S_IWUSR);
}
/* Change permissions of PWM sysfs attributes */
for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_chmod_group); ix++) {
- if (data->has_pwm & (1 << ix)) {
+ if (data->has_features & HAS_PWM(ix)) {
dme1737_chmod_group(dev,
&dme1737_pwm_chmod_group[ix],
S_IRUGO | S_IWUSR);
- if (data->type != sch5027 && ix < 3) {
+ if ((data->has_features & HAS_PWM_MIN) &&
+ ix < 3) {
dme1737_chmod_file(dev,
- dme1737_pwm_misc_attr[ix],
+ dme1737_auto_pwm_min_attr[ix],
S_IRUGO | S_IWUSR);
}
}
@@ -2005,7 +2095,7 @@ static int dme1737_create_files(struct device *dev)
/* Change permissions of pwm[1-3] if in manual mode */
for (ix = 0; ix < 3; ix++) {
- if ((data->has_pwm & (1 << ix)) &&
+ if ((data->has_features & HAS_PWM(ix)) &&
(PWM_EN_FROM_REG(data->pwm_config[ix]) == 1)) {
dme1737_chmod_file(dev,
dme1737_pwm_chmod_attr[ix],
@@ -2052,20 +2142,20 @@ static int dme1737_init_device(struct device *dev)
return -EFAULT;
}
- /* Determine which optional fan and pwm features are enabled/present */
+ /* Determine which optional fan and pwm features are enabled (only
+ * valid for I2C devices) */
if (client) { /* I2C chip */
data->config2 = dme1737_read(data, DME1737_REG_CONFIG2);
/* Check if optional fan3 input is enabled */
if (data->config2 & 0x04) {
- data->has_fan |= (1 << 2);
+ data->has_features |= HAS_FAN(2);
}
/* Fan4 and pwm3 are only available if the client's I2C address
* is the default 0x2e. Otherwise the I/Os associated with
* these functions are used for addr enable/select. */
if (client->addr == 0x2e) {
- data->has_fan |= (1 << 3);
- data->has_pwm |= (1 << 2);
+ data->has_features |= HAS_FAN(3) | HAS_PWM(2);
}
/* Determine which of the optional fan[5-6] and pwm[5-6]
@@ -2077,26 +2167,40 @@ static int dme1737_init_device(struct device *dev)
dev_warn(dev, "Failed to query Super-IO for optional "
"features.\n");
}
- } else { /* ISA chip */
- /* Fan3 and pwm3 are always available. Fan[4-5] and pwm[5-6]
- * don't exist in the ISA chip. */
- data->has_fan |= (1 << 2);
- data->has_pwm |= (1 << 2);
}
- /* Fan1, fan2, pwm1, and pwm2 are always present */
- data->has_fan |= 0x03;
- data->has_pwm |= 0x03;
+ /* Fan[1-2] and pwm[1-2] are present in all chips */
+ data->has_features |= HAS_FAN(0) | HAS_FAN(1) | HAS_PWM(0) | HAS_PWM(1);
+
+ /* Chip-dependent features */
+ switch (data->type) {
+ case dme1737:
+ data->has_features |= HAS_TEMP_OFFSET | HAS_VID | HAS_ZONE3 |
+ HAS_ZONE_HYST | HAS_PWM_MIN;
+ break;
+ case sch311x:
+ data->has_features |= HAS_TEMP_OFFSET | HAS_ZONE3 |
+ HAS_ZONE_HYST | HAS_PWM_MIN | HAS_FAN(2) | HAS_PWM(2);
+ break;
+ case sch5027:
+ data->has_features |= HAS_ZONE3;
+ break;
+ case sch5127:
+ data->has_features |= HAS_FAN(2) | HAS_PWM(2);
+ break;
+ default:
+ break;
+ }
dev_info(dev, "Optional features: pwm3=%s, pwm5=%s, pwm6=%s, "
"fan3=%s, fan4=%s, fan5=%s, fan6=%s.\n",
- (data->has_pwm & (1 << 2)) ? "yes" : "no",
- (data->has_pwm & (1 << 4)) ? "yes" : "no",
- (data->has_pwm & (1 << 5)) ? "yes" : "no",
- (data->has_fan & (1 << 2)) ? "yes" : "no",
- (data->has_fan & (1 << 3)) ? "yes" : "no",
- (data->has_fan & (1 << 4)) ? "yes" : "no",
- (data->has_fan & (1 << 5)) ? "yes" : "no");
+ (data->has_features & HAS_PWM(2)) ? "yes" : "no",
+ (data->has_features & HAS_PWM(4)) ? "yes" : "no",
+ (data->has_features & HAS_PWM(5)) ? "yes" : "no",
+ (data->has_features & HAS_FAN(2)) ? "yes" : "no",
+ (data->has_features & HAS_FAN(3)) ? "yes" : "no",
+ (data->has_features & HAS_FAN(4)) ? "yes" : "no",
+ (data->has_features & HAS_FAN(5)) ? "yes" : "no");
reg = dme1737_read(data, DME1737_REG_TACH_PWM);
/* Inform if fan-to-pwm mapping differs from the default */
@@ -2122,7 +2226,7 @@ static int dme1737_init_device(struct device *dev)
for (ix = 0; ix < 3; ix++) {
data->pwm_config[ix] = dme1737_read(data,
DME1737_REG_PWM_CONFIG(ix));
- if ((data->has_pwm & (1 << ix)) &&
+ if ((data->has_features & HAS_PWM(ix)) &&
(PWM_EN_FROM_REG(data->pwm_config[ix]) == -1)) {
dev_info(dev, "Switching pwm%d to "
"manual mode.\n", ix + 1);
@@ -2142,7 +2246,7 @@ static int dme1737_init_device(struct device *dev)
data->pwm_acz[2] = 4; /* pwm3 -> zone3 */
/* Set VRM */
- if (data->type == dme1737) {
+ if (data->has_features & HAS_VID) {
data->vrm = vid_which_vrm();
}
@@ -2163,10 +2267,10 @@ static int dme1737_i2c_get_features(int sio_cip, struct dme1737_data *data)
dme1737_sio_enter(sio_cip);
/* Check device ID
- * The DME1737 can return either 0x78 or 0x77 as its device ID.
- * The SCH5027 returns 0x89 as its device ID. */
+ * We currently know about two kinds of DME1737 and SCH5027. */
reg = force_id ? force_id : dme1737_sio_inb(sio_cip, 0x20);
- if (!(reg == 0x77 || reg == 0x78 || reg == 0x89)) {
+ if (!(reg == DME1737_ID_1 || reg == DME1737_ID_2 ||
+ reg == SCH5027_ID)) {
err = -ENODEV;
goto exit;
}
@@ -2185,16 +2289,16 @@ static int dme1737_i2c_get_features(int sio_cip, struct dme1737_data *data)
* are enabled and available. Bits [3:2] of registers 0x43-0x46 are set
* to '10' if the respective feature is enabled. */
if ((inb(addr + 0x43) & 0x0c) == 0x08) { /* fan6 */
- data->has_fan |= (1 << 5);
+ data->has_features |= HAS_FAN(5);
}
if ((inb(addr + 0x44) & 0x0c) == 0x08) { /* pwm6 */
- data->has_pwm |= (1 << 5);
+ data->has_features |= HAS_PWM(5);
}
if ((inb(addr + 0x45) & 0x0c) == 0x08) { /* fan5 */
- data->has_fan |= (1 << 4);
+ data->has_features |= HAS_FAN(4);
}
if ((inb(addr + 0x46) & 0x0c) == 0x08) { /* pwm5 */
- data->has_pwm |= (1 << 4);
+ data->has_features |= HAS_PWM(4);
}
exit:
@@ -2222,7 +2326,6 @@ static int dme1737_i2c_detect(struct i2c_client *client,
if (company == DME1737_COMPANY_SMSC &&
verstep == SCH5027_VERSTEP) {
name = "sch5027";
-
} else if (company == DME1737_COMPANY_SMSC &&
(verstep & DME1737_VERSTEP_MASK) == DME1737_VERSTEP) {
name = "dme1737";
@@ -2329,10 +2432,10 @@ static int __init dme1737_isa_detect(int sio_cip, unsigned short *addr)
dme1737_sio_enter(sio_cip);
/* Check device ID
- * We currently know about SCH3112 (0x7c), SCH3114 (0x7d), and
- * SCH3116 (0x7f). */
+ * We currently know about SCH3112, SCH3114, SCH3116, and SCH5127 */
reg = force_id ? force_id : dme1737_sio_inb(sio_cip, 0x20);
- if (!(reg == 0x7c || reg == 0x7d || reg == 0x7f)) {
+ if (!(reg == SCH3112_ID || reg == SCH3114_ID || reg == SCH3116_ID ||
+ reg == SCH5127_ID)) {
err = -ENODEV;
goto exit;
}
@@ -2424,23 +2527,42 @@ static int __devinit dme1737_isa_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
/* Skip chip detection if module is loaded with force_id parameter */
- if (!force_id) {
+ switch (force_id) {
+ case SCH3112_ID:
+ case SCH3114_ID:
+ case SCH3116_ID:
+ data->type = sch311x;
+ break;
+ case SCH5127_ID:
+ data->type = sch5127;
+ break;
+ default:
company = dme1737_read(data, DME1737_REG_COMPANY);
device = dme1737_read(data, DME1737_REG_DEVICE);
- if (!((company == DME1737_COMPANY_SMSC) &&
- (device == SCH311X_DEVICE))) {
+ if ((company == DME1737_COMPANY_SMSC) &&
+ (device == SCH311X_DEVICE)) {
+ data->type = sch311x;
+ } else if ((company == DME1737_COMPANY_SMSC) &&
+ (device == SCH5127_DEVICE)) {
+ data->type = sch5127;
+ } else {
err = -ENODEV;
goto exit_kfree;
}
}
- data->type = sch311x;
- /* Fill in the remaining client fields and initialize the mutex */
- data->name = "sch311x";
+ if (data->type == sch5127) {
+ data->name = "sch5127";
+ } else {
+ data->name = "sch311x";
+ }
+
+ /* Initialize the mutex */
mutex_init(&data->update_lock);
- dev_info(dev, "Found a SCH311x chip at 0x%04x\n", data->addr);
+ dev_info(dev, "Found a %s chip at 0x%04x\n",
+ data->type == sch5127 ? "SCH5127" : "SCH311x", data->addr);
/* Initialize the chip */
if ((err = dme1737_init_device(dev))) {
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
new file mode 100644
index 00000000000..0e4b5642638
--- /dev/null
+++ b/drivers/hwmon/emc1403.c
@@ -0,0 +1,344 @@
+/*
+ * emc1403.c - SMSC Thermal Driver
+ *
+ * Copyright (C) 2008 Intel Corp
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * TODO
+ * - cache alarm and critical limit registers
+ * - add emc1404 support
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/sysfs.h>
+#include <linux/mutex.h>
+
+#define THERMAL_PID_REG 0xfd
+#define THERMAL_SMSC_ID_REG 0xfe
+#define THERMAL_REVISION_REG 0xff
+
+struct thermal_data {
+ struct device *hwmon_dev;
+ struct mutex mutex;
+ /* Cache the hyst value so we don't keep re-reading it. In theory
+ we could cache it forever as nobody else should be writing it. */
+ u8 cached_hyst;
+ unsigned long hyst_valid;
+};
+
+static ssize_t show_temp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+ int retval = i2c_smbus_read_byte_data(client, sda->index);
+
+ if (retval < 0)
+ return retval;
+ return sprintf(buf, "%d000\n", retval);
+}
+
+static ssize_t show_bit(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct sensor_device_attribute_2 *sda = to_sensor_dev_attr_2(attr);
+ int retval = i2c_smbus_read_byte_data(client, sda->nr);
+
+ if (retval < 0)
+ return retval;
+ retval &= sda->index;
+ return sprintf(buf, "%d\n", retval ? 1 : 0);
+}
+
+static ssize_t store_temp(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned long val;
+ int retval;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+ retval = i2c_smbus_write_byte_data(client, sda->index,
+ DIV_ROUND_CLOSEST(val, 1000));
+ if (retval < 0)
+ return retval;
+ return count;
+}
+
+static ssize_t show_hyst(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct thermal_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+ int retval;
+ int hyst;
+
+ retval = i2c_smbus_read_byte_data(client, sda->index);
+ if (retval < 0)
+ return retval;
+
+ if (time_after(jiffies, data->hyst_valid)) {
+ hyst = i2c_smbus_read_byte_data(client, 0x21);
+ if (hyst < 0)
+ return retval;
+ data->cached_hyst = hyst;
+ data->hyst_valid = jiffies + HZ;
+ }
+ return sprintf(buf, "%d000\n", retval - data->cached_hyst);
+}
+
+static ssize_t store_hyst(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct thermal_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+ int retval;
+ int hyst;
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+ retval = i2c_smbus_read_byte_data(client, sda->index);
+ if (retval < 0)
+ goto fail;
+
+ hyst = val - retval * 1000;
+ hyst = DIV_ROUND_CLOSEST(hyst, 1000);
+ if (hyst < 0 || hyst > 255) {
+ retval = -ERANGE;
+ goto fail;
+ }
+
+ retval = i2c_smbus_write_byte_data(client, 0x21, hyst);
+ if (retval == 0) {
+ retval = count;
+ data->cached_hyst = hyst;
+ data->hyst_valid = jiffies + HZ;
+ }
+fail:
+ mutex_unlock(&data->mutex);
+ return retval;
+}
+
+/*
+ * Sensors. We pass the actual i2c register to the methods.
+ */
+
+static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x06);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x05);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x20);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0x00);
+static SENSOR_DEVICE_ATTR_2(temp1_min_alarm, S_IRUGO,
+ show_bit, NULL, 0x36, 0x01);
+static SENSOR_DEVICE_ATTR_2(temp1_max_alarm, S_IRUGO,
+ show_bit, NULL, 0x35, 0x01);
+static SENSOR_DEVICE_ATTR_2(temp1_crit_alarm, S_IRUGO,
+ show_bit, NULL, 0x37, 0x01);
+static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO | S_IWUSR,
+ show_hyst, store_hyst, 0x20);
+
+static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x08);
+static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x07);
+static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x19);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 0x01);
+static SENSOR_DEVICE_ATTR_2(temp2_min_alarm, S_IRUGO,
+ show_bit, NULL, 0x36, 0x02);
+static SENSOR_DEVICE_ATTR_2(temp2_max_alarm, S_IRUGO,
+ show_bit, NULL, 0x35, 0x02);
+static SENSOR_DEVICE_ATTR_2(temp2_crit_alarm, S_IRUGO,
+ show_bit, NULL, 0x37, 0x02);
+static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO | S_IWUSR,
+ show_hyst, store_hyst, 0x19);
+
+static SENSOR_DEVICE_ATTR(temp3_min, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x16);
+static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x15);
+static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x1A);
+static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 0x23);
+static SENSOR_DEVICE_ATTR_2(temp3_min_alarm, S_IRUGO,
+ show_bit, NULL, 0x36, 0x04);
+static SENSOR_DEVICE_ATTR_2(temp3_max_alarm, S_IRUGO,
+ show_bit, NULL, 0x35, 0x04);
+static SENSOR_DEVICE_ATTR_2(temp3_crit_alarm, S_IRUGO,
+ show_bit, NULL, 0x37, 0x04);
+static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO | S_IWUSR,
+ show_hyst, store_hyst, 0x1A);
+
+static struct attribute *mid_att_thermal[] = {
+ &sensor_dev_attr_temp1_min.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp2_min.dev_attr.attr,
+ &sensor_dev_attr_temp2_max.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp3_min.dev_attr.attr,
+ &sensor_dev_attr_temp3_max.dev_attr.attr,
+ &sensor_dev_attr_temp3_crit.dev_attr.attr,
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group m_thermal_gr = {
+ .attrs = mid_att_thermal
+};
+
+static int emc1403_detect(struct i2c_client *client,
+ struct i2c_board_info *info)
+{
+ int id;
+ /* Check if thermal chip is SMSC and EMC1403 */
+
+ id = i2c_smbus_read_byte_data(client, THERMAL_SMSC_ID_REG);
+ if (id != 0x5d)
+ return -ENODEV;
+
+ /* Note: 0x25 is the 1404 which is very similar and this
+ driver could be extended */
+ id = i2c_smbus_read_byte_data(client, THERMAL_PID_REG);
+ if (id != 0x21)
+ return -ENODEV;
+
+ id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG);
+ if (id != 0x01)
+ return -ENODEV;
+
+ strlcpy(info->type, "emc1403", I2C_NAME_SIZE);
+ return 0;
+}
+
+static int emc1403_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int res;
+ struct thermal_data *data;
+
+ data = kzalloc(sizeof(struct thermal_data), GFP_KERNEL);
+ if (data == NULL) {
+ dev_warn(&client->dev, "out of memory");
+ return -ENOMEM;
+ }
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->mutex);
+ data->hyst_valid = jiffies - 1; /* Expired */
+
+ res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr);
+ if (res) {
+ dev_warn(&client->dev, "create group failed\n");
+ hwmon_device_unregister(data->hwmon_dev);
+ goto thermal_error1;
+ }
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ res = PTR_ERR(data->hwmon_dev);
+ dev_warn(&client->dev, "register hwmon dev failed\n");
+ goto thermal_error2;
+ }
+ dev_info(&client->dev, "EMC1403 Thermal chip found\n");
+ return res;
+
+thermal_error2:
+ sysfs_remove_group(&client->dev.kobj, &m_thermal_gr);
+thermal_error1:
+ kfree(data);
+ return res;
+}
+
+static int emc1403_remove(struct i2c_client *client)
+{
+ struct thermal_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &m_thermal_gr);
+ kfree(data);
+ return 0;
+}
+
+static const unsigned short emc1403_address_list[] = {
+ 0x18, 0x2a, 0x4c, 0x4d, I2C_CLIENT_END
+};
+
+static const struct i2c_device_id emc1403_idtable[] = {
+ { "emc1403", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, emc1403_idtable);
+
+static struct i2c_driver sensor_emc1403 = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "emc1403",
+ },
+ .detect = emc1403_detect,
+ .probe = emc1403_probe,
+ .remove = emc1403_remove,
+ .id_table = emc1403_idtable,
+ .address_list = emc1403_address_list,
+};
+
+static int __init sensor_emc1403_init(void)
+{
+ return i2c_add_driver(&sensor_emc1403);
+}
+
+static void __exit sensor_emc1403_exit(void)
+{
+ i2c_del_driver(&sensor_emc1403);
+}
+
+module_init(sensor_emc1403_init);
+module_exit(sensor_emc1403_exit);
+
+MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com");
+MODULE_DESCRIPTION("emc1403 Thermal Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index a95fa4256ca..537841ef44b 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -856,21 +856,19 @@ static inline int superio_inb(int base, int reg)
static int superio_inw(int base, int reg)
{
int val;
- outb(reg++, base);
- val = inb(base + 1) << 8;
- outb(reg, base);
- val |= inb(base + 1);
+ val = superio_inb(base, reg) << 8;
+ val |= superio_inb(base, reg + 1);
return val;
}
static inline void superio_enter(int base)
{
/* according to the datasheet the key must be send twice! */
- outb( SIO_UNLOCK_KEY, base);
- outb( SIO_UNLOCK_KEY, base);
+ outb(SIO_UNLOCK_KEY, base);
+ outb(SIO_UNLOCK_KEY, base);
}
-static inline void superio_select( int base, int ld)
+static inline void superio_select(int base, int ld)
{
outb(SIO_REG_LDSEL, base);
outb(ld, base + 1);
@@ -905,10 +903,8 @@ static u16 f71882fg_read16(struct f71882fg_data *data, u8 reg)
{
u16 val;
- outb(reg++, data->addr + ADDR_REG_OFFSET);
- val = inb(data->addr + DATA_REG_OFFSET) << 8;
- outb(reg, data->addr + ADDR_REG_OFFSET);
- val |= inb(data->addr + DATA_REG_OFFSET);
+ val = f71882fg_read8(data, reg) << 8;
+ val |= f71882fg_read8(data, reg + 1);
return val;
}
@@ -921,10 +917,8 @@ static void f71882fg_write8(struct f71882fg_data *data, u8 reg, u8 val)
static void f71882fg_write16(struct f71882fg_data *data, u8 reg, u16 val)
{
- outb(reg++, data->addr + ADDR_REG_OFFSET);
- outb(val >> 8, data->addr + DATA_REG_OFFSET);
- outb(reg, data->addr + ADDR_REG_OFFSET);
- outb(val & 255, data->addr + DATA_REG_OFFSET);
+ f71882fg_write8(data, reg, val >> 8);
+ f71882fg_write8(data, reg + 1, val & 0xff);
}
static u16 f71882fg_read_temp(struct f71882fg_data *data, int nr)
@@ -945,7 +939,7 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
mutex_lock(&data->update_lock);
/* Update once every 60 seconds */
- if ( time_after(jiffies, data->last_limits + 60 * HZ ) ||
+ if (time_after(jiffies, data->last_limits + 60 * HZ) ||
!data->valid) {
if (data->type == f71882fg || data->type == f71889fg) {
data->in1_max =
@@ -1127,8 +1121,12 @@ static ssize_t store_fan_full_speed(struct device *dev,
const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- long val = simple_strtol(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
val = SENSORS_LIMIT(val, 23, 1500000);
val = fan_to_reg(val);
@@ -1157,8 +1155,12 @@ static ssize_t store_fan_beep(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- unsigned long val = simple_strtoul(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ unsigned long val;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err)
+ return err;
mutex_lock(&data->update_lock);
data->fan_beep = f71882fg_read8(data, F71882FG_REG_FAN_BEEP);
@@ -1206,7 +1208,14 @@ static ssize_t store_in_max(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- long val = simple_strtol(buf, NULL, 10) / 8;
+ int err;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 8;
val = SENSORS_LIMIT(val, 0, 255);
mutex_lock(&data->update_lock);
@@ -1233,8 +1242,12 @@ static ssize_t store_in_beep(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- unsigned long val = simple_strtoul(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ unsigned long val;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err)
+ return err;
mutex_lock(&data->update_lock);
data->in_beep = f71882fg_read8(data, F71882FG_REG_IN_BEEP);
@@ -1299,8 +1312,14 @@ static ssize_t store_temp_max(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- long val = simple_strtol(buf, NULL, 10) / 1000;
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
val = SENSORS_LIMIT(val, 0, 255);
mutex_lock(&data->update_lock);
@@ -1333,10 +1352,16 @@ static ssize_t store_temp_max_hyst(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- long val = simple_strtol(buf, NULL, 10) / 1000;
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
ssize_t ret = count;
u8 reg;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
mutex_lock(&data->update_lock);
@@ -1372,8 +1397,14 @@ static ssize_t store_temp_crit(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- long val = simple_strtol(buf, NULL, 10) / 1000;
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
val = SENSORS_LIMIT(val, 0, 255);
mutex_lock(&data->update_lock);
@@ -1427,8 +1458,12 @@ static ssize_t store_temp_beep(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- unsigned long val = simple_strtoul(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ unsigned long val;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err)
+ return err;
mutex_lock(&data->update_lock);
data->temp_beep = f71882fg_read8(data, F71882FG_REG_TEMP_BEEP);
@@ -1490,8 +1525,13 @@ static ssize_t store_pwm(struct device *dev,
size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- long val = simple_strtol(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
val = SENSORS_LIMIT(val, 0, 255);
mutex_lock(&data->update_lock);
@@ -1551,8 +1591,12 @@ static ssize_t store_pwm_enable(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- long val = simple_strtol(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
/* Special case for F8000 pwm channel 3 which only does auto mode */
if (data->type == f8000 && nr == 2 && val != 2)
@@ -1626,9 +1670,14 @@ static ssize_t store_pwm_auto_point_pwm(struct device *dev,
const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int pwm = to_sensor_dev_attr_2(devattr)->index;
+ int err, pwm = to_sensor_dev_attr_2(devattr)->index;
int point = to_sensor_dev_attr_2(devattr)->nr;
- long val = simple_strtol(buf, NULL, 10);
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
val = SENSORS_LIMIT(val, 0, 255);
mutex_lock(&data->update_lock);
@@ -1674,10 +1723,16 @@ static ssize_t store_pwm_auto_point_temp_hyst(struct device *dev,
const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
int point = to_sensor_dev_attr_2(devattr)->nr;
- long val = simple_strtol(buf, NULL, 10) / 1000;
u8 reg;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
mutex_lock(&data->update_lock);
data->pwm_auto_point_temp[nr][point] =
@@ -1716,8 +1771,12 @@ static ssize_t store_pwm_interpolate(struct device *dev,
const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- unsigned long val = simple_strtoul(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ unsigned long val;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err)
+ return err;
mutex_lock(&data->update_lock);
data->pwm_auto_point_mapping[nr] =
@@ -1752,8 +1811,12 @@ static ssize_t store_pwm_auto_point_channel(struct device *dev,
const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- long val = simple_strtol(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
switch (val) {
case 1:
@@ -1798,9 +1861,15 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev,
const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int pwm = to_sensor_dev_attr_2(devattr)->index;
+ int err, pwm = to_sensor_dev_attr_2(devattr)->index;
int point = to_sensor_dev_attr_2(devattr)->nr;
- long val = simple_strtol(buf, NULL, 10) / 1000;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
if (data->type == f71889fg)
val = SENSORS_LIMIT(val, -128, 127);
@@ -2109,6 +2178,13 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
int err = -ENODEV;
u16 devid;
+ /* Don't step on other drivers' I/O space by accident */
+ if (!request_region(sioaddr, 2, DRVNAME)) {
+ printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n",
+ (int)sioaddr);
+ return -EBUSY;
+ }
+
superio_enter(sioaddr);
devid = superio_inw(sioaddr, SIO_REG_MANID);
@@ -2151,8 +2227,7 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
}
*address = superio_inw(sioaddr, SIO_REG_ADDR);
- if (*address == 0)
- {
+ if (*address == 0) {
printk(KERN_WARNING DRVNAME ": Base address not set\n");
goto exit;
}
@@ -2164,6 +2239,7 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
(int)superio_inb(sioaddr, SIO_REG_DEVREV));
exit:
superio_exit(sioaddr);
+ release_region(sioaddr, 2);
return err;
}
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index 0627f7a5b9b..b7ca2a9676c 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -38,6 +38,7 @@
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
+#include <linux/smp_lock.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
@@ -847,8 +848,7 @@ static ssize_t watchdog_write(struct file *filp, const char __user *buf,
return count;
}
-static int watchdog_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+static long watchdog_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
static struct watchdog_info ident = {
.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
@@ -858,6 +858,7 @@ static int watchdog_ioctl(struct inode *inode, struct file *filp,
int i, ret = 0;
struct fschmd_data *data = filp->private_data;
+ lock_kernel();
switch (cmd) {
case WDIOC_GETSUPPORT:
ident.firmware_version = data->revision;
@@ -914,7 +915,7 @@ static int watchdog_ioctl(struct inode *inode, struct file *filp,
default:
ret = -ENOTTY;
}
-
+ unlock_kernel();
return ret;
}
@@ -924,7 +925,7 @@ static const struct file_operations watchdog_fops = {
.open = watchdog_open,
.release = watchdog_release,
.write = watchdog_write,
- .ioctl = watchdog_ioctl,
+ .unlocked_ioctl = watchdog_ioctl,
};
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
index b2f2277cad3..6138f036b15 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/hwmon/lis3lv02d.c
@@ -41,6 +41,8 @@
/* joystick device poll interval in milliseconds */
#define MDPS_POLL_INTERVAL 50
+#define MDPS_POLL_MIN 0
+#define MDPS_POLL_MAX 2000
/*
* The sensor can also generate interrupts (DRDY) but it's pretty pointless
* because they are generated even if the data do not change. So it's better
@@ -121,11 +123,9 @@ static void lis3lv02d_get_xyz(struct lis3lv02d *lis3, int *x, int *y, int *z)
int position[3];
int i;
- mutex_lock(&lis3->mutex);
position[0] = lis3->read_data(lis3, OUTX);
position[1] = lis3->read_data(lis3, OUTY);
position[2] = lis3->read_data(lis3, OUTZ);
- mutex_unlock(&lis3->mutex);
for (i = 0; i < 3; i++)
position[i] = (position[i] * lis3->scale) / LIS3_ACCURACY;
@@ -249,8 +249,24 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3)
EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
+static void lis3lv02d_joystick_poll(struct input_polled_dev *pidev)
+{
+ int x, y, z;
+
+ mutex_lock(&lis3_dev.mutex);
+ lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
+ input_report_abs(pidev->input, ABS_X, x);
+ input_report_abs(pidev->input, ABS_Y, y);
+ input_report_abs(pidev->input, ABS_Z, z);
+ input_sync(pidev->input);
+ mutex_unlock(&lis3_dev.mutex);
+}
+
static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
{
+ if (!test_bit(0, &lis3_dev.misc_opened))
+ goto out;
+
/*
* Be careful: on some HP laptops the bios force DD when on battery and
* the lid is closed. This leads to interrupts as soon as a little move
@@ -260,44 +276,93 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
wake_up_interruptible(&lis3_dev.misc_wait);
kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
+out:
+ if (lis3_dev.whoami == WAI_8B && lis3_dev.idev &&
+ lis3_dev.idev->input->users)
+ return IRQ_WAKE_THREAD;
return IRQ_HANDLED;
}
-static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
+static void lis302dl_interrupt_handle_click(struct lis3lv02d *lis3)
{
- int ret;
+ struct input_dev *dev = lis3->idev->input;
+ u8 click_src;
- if (test_and_set_bit(0, &lis3_dev.misc_opened))
- return -EBUSY; /* already open */
+ mutex_lock(&lis3->mutex);
+ lis3->read(lis3, CLICK_SRC, &click_src);
- atomic_set(&lis3_dev.count, 0);
+ if (click_src & CLICK_SINGLE_X) {
+ input_report_key(dev, lis3->mapped_btns[0], 1);
+ input_report_key(dev, lis3->mapped_btns[0], 0);
+ }
- /*
- * The sensor can generate interrupts for free-fall and direction
- * detection (distinguishable with FF_WU_SRC and DD_SRC) but to keep
- * the things simple and _fast_ we activate it only for free-fall, so
- * no need to read register (very slow with ACPI). For the same reason,
- * we forbid shared interrupts.
- *
- * IRQF_TRIGGER_RISING seems pointless on HP laptops because the
- * io-apic is not configurable (and generates a warning) but I keep it
- * in case of support for other hardware.
- */
- ret = request_irq(lis3_dev.irq, lis302dl_interrupt, IRQF_TRIGGER_RISING,
- DRIVER_NAME, &lis3_dev);
+ if (click_src & CLICK_SINGLE_Y) {
+ input_report_key(dev, lis3->mapped_btns[1], 1);
+ input_report_key(dev, lis3->mapped_btns[1], 0);
+ }
- if (ret) {
- clear_bit(0, &lis3_dev.misc_opened);
- printk(KERN_ERR DRIVER_NAME ": IRQ%d allocation failed\n", lis3_dev.irq);
- return -EBUSY;
+ if (click_src & CLICK_SINGLE_Z) {
+ input_report_key(dev, lis3->mapped_btns[2], 1);
+ input_report_key(dev, lis3->mapped_btns[2], 0);
}
+ input_sync(dev);
+ mutex_unlock(&lis3->mutex);
+}
+
+static void lis302dl_interrupt_handle_ff_wu(struct lis3lv02d *lis3)
+{
+ u8 wu1_src;
+ u8 wu2_src;
+
+ lis3->read(lis3, FF_WU_SRC_1, &wu1_src);
+ lis3->read(lis3, FF_WU_SRC_2, &wu2_src);
+
+ wu1_src = wu1_src & FF_WU_SRC_IA ? wu1_src : 0;
+ wu2_src = wu2_src & FF_WU_SRC_IA ? wu2_src : 0;
+
+ /* joystick poll is internally protected by the lis3->mutex. */
+ if (wu1_src || wu2_src)
+ lis3lv02d_joystick_poll(lis3_dev.idev);
+}
+
+static irqreturn_t lis302dl_interrupt_thread1_8b(int irq, void *data)
+{
+
+ struct lis3lv02d *lis3 = data;
+
+ if ((lis3->pdata->irq_cfg & LIS3_IRQ1_MASK) == LIS3_IRQ1_CLICK)
+ lis302dl_interrupt_handle_click(lis3);
+ else
+ lis302dl_interrupt_handle_ff_wu(lis3);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t lis302dl_interrupt_thread2_8b(int irq, void *data)
+{
+
+ struct lis3lv02d *lis3 = data;
+
+ if ((lis3->pdata->irq_cfg & LIS3_IRQ2_MASK) == LIS3_IRQ2_CLICK)
+ lis302dl_interrupt_handle_click(lis3);
+ else
+ lis302dl_interrupt_handle_ff_wu(lis3);
+
+ return IRQ_HANDLED;
+}
+
+static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
+{
+ if (test_and_set_bit(0, &lis3_dev.misc_opened))
+ return -EBUSY; /* already open */
+
+ atomic_set(&lis3_dev.count, 0);
return 0;
}
static int lis3lv02d_misc_release(struct inode *inode, struct file *file)
{
fasync_helper(-1, file, 0, &lis3_dev.async_queue);
- free_irq(lis3_dev.irq, &lis3_dev);
clear_bit(0, &lis3_dev.misc_opened); /* release the device */
return 0;
}
@@ -380,22 +445,12 @@ static struct miscdevice lis3lv02d_misc_device = {
.fops = &lis3lv02d_misc_fops,
};
-static void lis3lv02d_joystick_poll(struct input_polled_dev *pidev)
-{
- int x, y, z;
-
- lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
- input_report_abs(pidev->input, ABS_X, x);
- input_report_abs(pidev->input, ABS_Y, y);
- input_report_abs(pidev->input, ABS_Z, z);
- input_sync(pidev->input);
-}
-
int lis3lv02d_joystick_enable(void)
{
struct input_dev *input_dev;
int err;
int max_val, fuzz, flat;
+ int btns[] = {BTN_X, BTN_Y, BTN_Z};
if (lis3_dev.idev)
return -EINVAL;
@@ -406,6 +461,8 @@ int lis3lv02d_joystick_enable(void)
lis3_dev.idev->poll = lis3lv02d_joystick_poll;
lis3_dev.idev->poll_interval = MDPS_POLL_INTERVAL;
+ lis3_dev.idev->poll_interval_min = MDPS_POLL_MIN;
+ lis3_dev.idev->poll_interval_max = MDPS_POLL_MAX;
input_dev = lis3_dev.idev->input;
input_dev->name = "ST LIS3LV02DL Accelerometer";
@@ -422,6 +479,10 @@ int lis3lv02d_joystick_enable(void)
input_set_abs_params(input_dev, ABS_Y, -max_val, max_val, fuzz, flat);
input_set_abs_params(input_dev, ABS_Z, -max_val, max_val, fuzz, flat);
+ lis3_dev.mapped_btns[0] = lis3lv02d_get_axis(abs(lis3_dev.ac.x), btns);
+ lis3_dev.mapped_btns[1] = lis3lv02d_get_axis(abs(lis3_dev.ac.y), btns);
+ lis3_dev.mapped_btns[2] = lis3lv02d_get_axis(abs(lis3_dev.ac.z), btns);
+
err = input_register_polled_device(lis3_dev.idev);
if (err) {
input_free_polled_device(lis3_dev.idev);
@@ -434,6 +495,11 @@ EXPORT_SYMBOL_GPL(lis3lv02d_joystick_enable);
void lis3lv02d_joystick_disable(void)
{
+ if (lis3_dev.irq)
+ free_irq(lis3_dev.irq, &lis3_dev);
+ if (lis3_dev.pdata && lis3_dev.pdata->irq2)
+ free_irq(lis3_dev.pdata->irq2, &lis3_dev);
+
if (!lis3_dev.idev)
return;
@@ -462,7 +528,9 @@ static ssize_t lis3lv02d_position_show(struct device *dev,
{
int x, y, z;
+ mutex_lock(&lis3_dev.mutex);
lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
+ mutex_unlock(&lis3_dev.mutex);
return sprintf(buf, "(%d,%d,%d)\n", x, y, z);
}
@@ -521,12 +589,70 @@ int lis3lv02d_remove_fs(struct lis3lv02d *lis3)
}
EXPORT_SYMBOL_GPL(lis3lv02d_remove_fs);
+static void lis3lv02d_8b_configure(struct lis3lv02d *dev,
+ struct lis3lv02d_platform_data *p)
+{
+ int err;
+ int ctrl2 = p->hipass_ctrl;
+
+ if (p->click_flags) {
+ dev->write(dev, CLICK_CFG, p->click_flags);
+ dev->write(dev, CLICK_TIMELIMIT, p->click_time_limit);
+ dev->write(dev, CLICK_LATENCY, p->click_latency);
+ dev->write(dev, CLICK_WINDOW, p->click_window);
+ dev->write(dev, CLICK_THSZ, p->click_thresh_z & 0xf);
+ dev->write(dev, CLICK_THSY_X,
+ (p->click_thresh_x & 0xf) |
+ (p->click_thresh_y << 4));
+
+ if (dev->idev) {
+ struct input_dev *input_dev = lis3_dev.idev->input;
+ input_set_capability(input_dev, EV_KEY, BTN_X);
+ input_set_capability(input_dev, EV_KEY, BTN_Y);
+ input_set_capability(input_dev, EV_KEY, BTN_Z);
+ }
+ }
+
+ if (p->wakeup_flags) {
+ dev->write(dev, FF_WU_CFG_1, p->wakeup_flags);
+ dev->write(dev, FF_WU_THS_1, p->wakeup_thresh & 0x7f);
+ /* default to 2.5ms for now */
+ dev->write(dev, FF_WU_DURATION_1, 1);
+ ctrl2 ^= HP_FF_WU1; /* Xor to keep compatible with old pdata*/
+ }
+
+ if (p->wakeup_flags2) {
+ dev->write(dev, FF_WU_CFG_2, p->wakeup_flags2);
+ dev->write(dev, FF_WU_THS_2, p->wakeup_thresh2 & 0x7f);
+ /* default to 2.5ms for now */
+ dev->write(dev, FF_WU_DURATION_2, 1);
+ ctrl2 ^= HP_FF_WU2; /* Xor to keep compatible with old pdata*/
+ }
+ /* Configure hipass filters */
+ dev->write(dev, CTRL_REG2, ctrl2);
+
+ if (p->irq2) {
+ err = request_threaded_irq(p->irq2,
+ NULL,
+ lis302dl_interrupt_thread2_8b,
+ IRQF_TRIGGER_RISING |
+ IRQF_ONESHOT,
+ DRIVER_NAME, &lis3_dev);
+ if (err < 0)
+ printk(KERN_ERR DRIVER_NAME
+ "No second IRQ. Limited functionality\n");
+ }
+}
+
/*
* Initialise the accelerometer and the various subsystems.
* Should be rather independent of the bus system.
*/
int lis3lv02d_init_device(struct lis3lv02d *dev)
{
+ int err;
+ irq_handler_t thread_fn;
+
dev->whoami = lis3lv02d_read_8(dev, WHO_AM_I);
switch (dev->whoami) {
@@ -567,25 +693,8 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
if (dev->pdata) {
struct lis3lv02d_platform_data *p = dev->pdata;
- if (p->click_flags && (dev->whoami == WAI_8B)) {
- dev->write(dev, CLICK_CFG, p->click_flags);
- dev->write(dev, CLICK_TIMELIMIT, p->click_time_limit);
- dev->write(dev, CLICK_LATENCY, p->click_latency);
- dev->write(dev, CLICK_WINDOW, p->click_window);
- dev->write(dev, CLICK_THSZ, p->click_thresh_z & 0xf);
- dev->write(dev, CLICK_THSY_X,
- (p->click_thresh_x & 0xf) |
- (p->click_thresh_y << 4));
- }
-
- if (p->wakeup_flags && (dev->whoami == WAI_8B)) {
- dev->write(dev, FF_WU_CFG_1, p->wakeup_flags);
- dev->write(dev, FF_WU_THS_1, p->wakeup_thresh & 0x7f);
- /* default to 2.5ms for now */
- dev->write(dev, FF_WU_DURATION_1, 1);
- /* enable high pass filter for both free-fall units */
- dev->write(dev, CTRL_REG2, HP_FF_WU1 | HP_FF_WU2);
- }
+ if (dev->whoami == WAI_8B)
+ lis3lv02d_8b_configure(dev, p);
if (p->irq_cfg)
dev->write(dev, CTRL_REG3, p->irq_cfg);
@@ -598,6 +707,32 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
goto out;
}
+ /*
+ * The sensor can generate interrupts for free-fall and direction
+ * detection (distinguishable with FF_WU_SRC and DD_SRC) but to keep
+ * the things simple and _fast_ we activate it only for free-fall, so
+ * no need to read register (very slow with ACPI). For the same reason,
+ * we forbid shared interrupts.
+ *
+ * IRQF_TRIGGER_RISING seems pointless on HP laptops because the
+ * io-apic is not configurable (and generates a warning) but I keep it
+ * in case of support for other hardware.
+ */
+ if (dev->whoami == WAI_8B)
+ thread_fn = lis302dl_interrupt_thread1_8b;
+ else
+ thread_fn = NULL;
+
+ err = request_threaded_irq(dev->irq, lis302dl_interrupt,
+ thread_fn,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ DRIVER_NAME, &lis3_dev);
+
+ if (err < 0) {
+ printk(KERN_ERR DRIVER_NAME "Cannot get IRQ\n");
+ goto out;
+ }
+
if (misc_register(&lis3lv02d_misc_device))
printk(KERN_ERR DRIVER_NAME ": misc_register failed\n");
out:
diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
index e6a01f44709..854091380e3 100644
--- a/drivers/hwmon/lis3lv02d.h
+++ b/drivers/hwmon/lis3lv02d.h
@@ -196,6 +196,16 @@ enum lis3lv02d_dd_src {
DD_SRC_IA = 0x40,
};
+enum lis3lv02d_click_src_8b {
+ CLICK_SINGLE_X = 0x01,
+ CLICK_DOUBLE_X = 0x02,
+ CLICK_SINGLE_Y = 0x04,
+ CLICK_DOUBLE_Y = 0x08,
+ CLICK_SINGLE_Z = 0x10,
+ CLICK_DOUBLE_Z = 0x20,
+ CLICK_IA = 0x40,
+};
+
struct axis_conversion {
s8 x;
s8 y;
@@ -223,6 +233,7 @@ struct lis3lv02d {
struct platform_device *pdev; /* platform device */
atomic_t count; /* interrupt count after last read */
struct axis_conversion ac; /* hw -> logical axis */
+ int mapped_btns[3];
u32 irq; /* IRQ number */
struct fasync_struct *async_queue; /* queue for the misc device */
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index bf81aff7051..776aeb3019d 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -53,7 +53,7 @@
* Address is fully defined internally and cannot be changed.
*/
-static const unsigned short normal_i2c[] = { 0x4c, I2C_CLIENT_END };
+static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
/*
* The LM63 registers
@@ -131,12 +131,15 @@ static struct lm63_data *lm63_update_device(struct device *dev);
static int lm63_detect(struct i2c_client *client, struct i2c_board_info *info);
static void lm63_init_client(struct i2c_client *client);
+enum chips { lm63, lm64 };
+
/*
* Driver data (common to all clients)
*/
static const struct i2c_device_id lm63_id[] = {
- { "lm63", 0 },
+ { "lm63", lm63 },
+ { "lm64", lm64 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm63_id);
@@ -422,6 +425,7 @@ static int lm63_detect(struct i2c_client *new_client,
struct i2c_adapter *adapter = new_client->adapter;
u8 man_id, chip_id, reg_config1, reg_config2;
u8 reg_alert_status, reg_alert_mask;
+ int address = new_client->addr;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
@@ -439,7 +443,6 @@ static int lm63_detect(struct i2c_client *new_client,
LM63_REG_ALERT_MASK);
if (man_id != 0x01 /* National Semiconductor */
- || chip_id != 0x41 /* LM63 */
|| (reg_config1 & 0x18) != 0x00
|| (reg_config2 & 0xF8) != 0x00
|| (reg_alert_status & 0x20) != 0x00
@@ -450,7 +453,12 @@ static int lm63_detect(struct i2c_client *new_client,
return -ENODEV;
}
- strlcpy(info->type, "lm63", I2C_NAME_SIZE);
+ if (chip_id == 0x41 && address == 0x4c)
+ strlcpy(info->type, "lm63", I2C_NAME_SIZE);
+ else if (chip_id == 0x51 && (address == 0x18 || address == 0x4e))
+ strlcpy(info->type, "lm64", I2C_NAME_SIZE);
+ else
+ return -ENODEV;
return 0;
}
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 8ae2cfe2d82..56463428a41 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -46,6 +46,7 @@ enum lm75_type { /* keep sorted in alphabetical order */
tcn75,
tmp100,
tmp101,
+ tmp105,
tmp175,
tmp275,
tmp75,
@@ -220,6 +221,7 @@ static const struct i2c_device_id lm75_ids[] = {
{ "tcn75", tcn75, },
{ "tmp100", tmp100, },
{ "tmp101", tmp101, },
+ { "tmp105", tmp105, },
{ "tmp175", tmp175, },
{ "tmp275", tmp275, },
{ "tmp75", tmp75, },
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 7cc2708871a..760ef72eea5 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -982,7 +982,8 @@ static struct lm90_data *lm90_update_device(struct device *dev)
mutex_lock(&data->update_lock);
- if (time_after(jiffies, data->last_updated + HZ * 2) || !data->valid) {
+ if (time_after(jiffies, data->last_updated + HZ / 2 + HZ / 10)
+ || !data->valid) {
u8 h, l;
dev_dbg(&client->dev, "Updating lm90 data.\n");
diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
index 65c232a9d0c..21d201befc2 100644
--- a/drivers/hwmon/ltc4245.c
+++ b/drivers/hwmon/ltc4245.c
@@ -45,9 +45,7 @@ enum ltc4245_cmd {
LTC4245_VEEIN = 0x19,
LTC4245_VEESENSE = 0x1a,
LTC4245_VEEOUT = 0x1b,
- LTC4245_GPIOADC1 = 0x1c,
- LTC4245_GPIOADC2 = 0x1d,
- LTC4245_GPIOADC3 = 0x1e,
+ LTC4245_GPIOADC = 0x1c,
};
struct ltc4245_data {
@@ -61,7 +59,7 @@ struct ltc4245_data {
u8 cregs[0x08];
/* Voltage registers */
- u8 vregs[0x0f];
+ u8 vregs[0x0d];
};
static struct ltc4245_data *ltc4245_update_device(struct device *dev)
@@ -86,7 +84,7 @@ static struct ltc4245_data *ltc4245_update_device(struct device *dev)
data->cregs[i] = val;
}
- /* Read voltage registers -- 0x10 to 0x1f */
+ /* Read voltage registers -- 0x10 to 0x1c */
for (i = 0; i < ARRAY_SIZE(data->vregs); i++) {
val = i2c_smbus_read_byte_data(client, i+0x10);
if (unlikely(val < 0))
@@ -128,9 +126,7 @@ static int ltc4245_get_voltage(struct device *dev, u8 reg)
case LTC4245_VEEOUT:
voltage = regval * -55;
break;
- case LTC4245_GPIOADC1:
- case LTC4245_GPIOADC2:
- case LTC4245_GPIOADC3:
+ case LTC4245_GPIOADC:
voltage = regval * 10;
break;
default:
@@ -297,9 +293,7 @@ LTC4245_ALARM(in7_min_alarm, (1 << 2), LTC4245_FAULT2);
LTC4245_ALARM(in8_min_alarm, (1 << 3), LTC4245_FAULT2);
/* GPIO voltages */
-LTC4245_VOLTAGE(in9_input, LTC4245_GPIOADC1);
-LTC4245_VOLTAGE(in10_input, LTC4245_GPIOADC2);
-LTC4245_VOLTAGE(in11_input, LTC4245_GPIOADC3);
+LTC4245_VOLTAGE(in9_input, LTC4245_GPIOADC);
/* Power Consumption (virtual) */
LTC4245_POWER(power1_input, LTC4245_12VSENSE);
@@ -342,8 +336,6 @@ static struct attribute *ltc4245_attributes[] = {
&sensor_dev_attr_in8_min_alarm.dev_attr.attr,
&sensor_dev_attr_in9_input.dev_attr.attr,
- &sensor_dev_attr_in10_input.dev_attr.attr,
- &sensor_dev_attr_in11_input.dev_attr.attr,
&sensor_dev_attr_power1_input.dev_attr.attr,
&sensor_dev_attr_power2_input.dev_attr.attr,
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
new file mode 100644
index 00000000000..8013895a1fa
--- /dev/null
+++ b/drivers/hwmon/tmp102.c
@@ -0,0 +1,321 @@
+/* Texas Instruments TMP102 SMBus temperature sensor driver
+ *
+ * Copyright (C) 2010 Steven King <sfking@fdwdc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+
+#define DRIVER_NAME "tmp102"
+
+#define TMP102_TEMP_REG 0x00
+#define TMP102_CONF_REG 0x01
+/* note: these bit definitions are byte swapped */
+#define TMP102_CONF_SD 0x0100
+#define TMP102_CONF_TM 0x0200
+#define TMP102_CONF_POL 0x0400
+#define TMP102_CONF_F0 0x0800
+#define TMP102_CONF_F1 0x1000
+#define TMP102_CONF_R0 0x2000
+#define TMP102_CONF_R1 0x4000
+#define TMP102_CONF_OS 0x8000
+#define TMP102_CONF_EM 0x0010
+#define TMP102_CONF_AL 0x0020
+#define TMP102_CONF_CR0 0x0040
+#define TMP102_CONF_CR1 0x0080
+#define TMP102_TLOW_REG 0x02
+#define TMP102_THIGH_REG 0x03
+
+struct tmp102 {
+ struct device *hwmon_dev;
+ struct mutex lock;
+ u16 config_orig;
+ unsigned long last_update;
+ int temp[3];
+};
+
+/* SMBus specifies low byte first, but the TMP102 returns high byte first,
+ * so we have to swab16 the values */
+static inline int tmp102_read_reg(struct i2c_client *client, u8 reg)
+{
+ int result = i2c_smbus_read_word_data(client, reg);
+ return result < 0 ? result : swab16(result);
+}
+
+static inline int tmp102_write_reg(struct i2c_client *client, u8 reg, u16 val)
+{
+ return i2c_smbus_write_word_data(client, reg, swab16(val));
+}
+
+/* convert left adjusted 13-bit TMP102 register value to milliCelsius */
+static inline int tmp102_reg_to_mC(s16 val)
+{
+ return ((val & ~0x01) * 1000) / 128;
+}
+
+/* convert milliCelsius to left adjusted 13-bit TMP102 register value */
+static inline u16 tmp102_mC_to_reg(int val)
+{
+ return (val * 128) / 1000;
+}
+
+static const u8 tmp102_reg[] = {
+ TMP102_TEMP_REG,
+ TMP102_TLOW_REG,
+ TMP102_THIGH_REG,
+};
+
+static struct tmp102 *tmp102_update_device(struct i2c_client *client)
+{
+ struct tmp102 *tmp102 = i2c_get_clientdata(client);
+
+ mutex_lock(&tmp102->lock);
+ if (time_after(jiffies, tmp102->last_update + HZ / 3)) {
+ int i;
+ for (i = 0; i < ARRAY_SIZE(tmp102->temp); ++i) {
+ int status = tmp102_read_reg(client, tmp102_reg[i]);
+ if (status > -1)
+ tmp102->temp[i] = tmp102_reg_to_mC(status);
+ }
+ tmp102->last_update = jiffies;
+ }
+ mutex_unlock(&tmp102->lock);
+ return tmp102;
+}
+
+static ssize_t tmp102_show_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+ struct tmp102 *tmp102 = tmp102_update_device(to_i2c_client(dev));
+
+ return sprintf(buf, "%d\n", tmp102->temp[sda->index]);
+}
+
+static ssize_t tmp102_set_temp(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct tmp102 *tmp102 = i2c_get_clientdata(client);
+ long val;
+ int status;
+
+ if (strict_strtol(buf, 10, &val) < 0)
+ return -EINVAL;
+ val = SENSORS_LIMIT(val, -256000, 255000);
+
+ mutex_lock(&tmp102->lock);
+ tmp102->temp[sda->index] = val;
+ status = tmp102_write_reg(client, tmp102_reg[sda->index],
+ tmp102_mC_to_reg(val));
+ mutex_unlock(&tmp102->lock);
+ return status ? : count;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tmp102_show_temp, NULL , 0);
+
+static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO, tmp102_show_temp,
+ tmp102_set_temp, 1);
+
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, tmp102_show_temp,
+ tmp102_set_temp, 2);
+
+static struct attribute *tmp102_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group tmp102_attr_group = {
+ .attrs = tmp102_attributes,
+};
+
+#define TMP102_CONFIG (TMP102_CONF_TM | TMP102_CONF_EM | TMP102_CONF_CR1)
+#define TMP102_CONFIG_RD_ONLY (TMP102_CONF_R0 | TMP102_CONF_R1 | TMP102_CONF_AL)
+
+static int __devinit tmp102_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tmp102 *tmp102;
+ int status;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_WORD_DATA)) {
+ dev_err(&client->dev, "adapter doesnt support SMBus word "
+ "transactions\n");
+ return -ENODEV;
+ }
+
+ tmp102 = kzalloc(sizeof(*tmp102), GFP_KERNEL);
+ if (!tmp102) {
+ dev_dbg(&client->dev, "kzalloc failed\n");
+ return -ENOMEM;
+ }
+ i2c_set_clientdata(client, tmp102);
+
+ status = tmp102_read_reg(client, TMP102_CONF_REG);
+ if (status < 0) {
+ dev_err(&client->dev, "error reading config register\n");
+ goto fail_free;
+ }
+ tmp102->config_orig = status;
+ status = tmp102_write_reg(client, TMP102_CONF_REG, TMP102_CONFIG);
+ if (status < 0) {
+ dev_err(&client->dev, "error writing config register\n");
+ goto fail_restore_config;
+ }
+ status = tmp102_read_reg(client, TMP102_CONF_REG);
+ if (status < 0) {
+ dev_err(&client->dev, "error reading config register\n");
+ goto fail_restore_config;
+ }
+ status &= ~TMP102_CONFIG_RD_ONLY;
+ if (status != TMP102_CONFIG) {
+ dev_err(&client->dev, "config settings did not stick\n");
+ status = -ENODEV;
+ goto fail_restore_config;
+ }
+ tmp102->last_update = jiffies - HZ;
+ mutex_init(&tmp102->lock);
+
+ status = sysfs_create_group(&client->dev.kobj, &tmp102_attr_group);
+ if (status) {
+ dev_dbg(&client->dev, "could not create sysfs files\n");
+ goto fail_restore_config;
+ }
+ tmp102->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(tmp102->hwmon_dev)) {
+ dev_dbg(&client->dev, "unable to register hwmon device\n");
+ status = PTR_ERR(tmp102->hwmon_dev);
+ goto fail_remove_sysfs;
+ }
+
+ dev_info(&client->dev, "initialized\n");
+
+ return 0;
+
+fail_remove_sysfs:
+ sysfs_remove_group(&client->dev.kobj, &tmp102_attr_group);
+fail_restore_config:
+ tmp102_write_reg(client, TMP102_CONF_REG, tmp102->config_orig);
+fail_free:
+ i2c_set_clientdata(client, NULL);
+ kfree(tmp102);
+
+ return status;
+}
+
+static int __devexit tmp102_remove(struct i2c_client *client)
+{
+ struct tmp102 *tmp102 = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(tmp102->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &tmp102_attr_group);
+
+ /* Stop monitoring if device was stopped originally */
+ if (tmp102->config_orig & TMP102_CONF_SD) {
+ int config;
+
+ config = tmp102_read_reg(client, TMP102_CONF_REG);
+ if (config >= 0)
+ tmp102_write_reg(client, TMP102_CONF_REG,
+ config | TMP102_CONF_SD);
+ }
+
+ i2c_set_clientdata(client, NULL);
+ kfree(tmp102);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tmp102_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int config;
+
+ config = tmp102_read_reg(client, TMP102_CONF_REG);
+ if (config < 0)
+ return config;
+
+ config |= TMP102_CONF_SD;
+ return tmp102_write_reg(client, TMP102_CONF_REG, config);
+}
+
+static int tmp102_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int config;
+
+ config = tmp102_read_reg(client, TMP102_CONF_REG);
+ if (config < 0)
+ return config;
+
+ config &= ~TMP102_CONF_SD;
+ return tmp102_write_reg(client, TMP102_CONF_REG, config);
+}
+
+static const struct dev_pm_ops tmp102_dev_pm_ops = {
+ .suspend = tmp102_suspend,
+ .resume = tmp102_resume,
+};
+
+#define TMP102_DEV_PM_OPS (&tmp102_dev_pm_ops)
+#else
+#define TMP102_DEV_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+static const struct i2c_device_id tmp102_id[] = {
+ { "tmp102", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tmp102_id);
+
+static struct i2c_driver tmp102_driver = {
+ .driver.name = DRIVER_NAME,
+ .driver.pm = TMP102_DEV_PM_OPS,
+ .probe = tmp102_probe,
+ .remove = __devexit_p(tmp102_remove),
+ .id_table = tmp102_id,
+};
+
+static int __init tmp102_init(void)
+{
+ return i2c_add_driver(&tmp102_driver);
+}
+module_init(tmp102_init);
+
+static void __exit tmp102_exit(void)
+{
+ i2c_del_driver(&tmp102_driver);
+}
+module_exit(tmp102_exit);
+
+MODULE_AUTHOR("Steven King <sfking@fdwdc.com>");
+MODULE_DESCRIPTION("Texas Instruments TMP102 temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index d14a1af9f55..ad8d535235c 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -92,17 +92,6 @@ static const u8 TMP411_TEMP_HIGHEST_LSB[2] = { 0x33, 0x37 };
#define TMP411_DEVICE_ID 0x12
/*
- * Functions declarations
- */
-
-static int tmp401_probe(struct i2c_client *client,
- const struct i2c_device_id *id);
-static int tmp401_detect(struct i2c_client *client,
- struct i2c_board_info *info);
-static int tmp401_remove(struct i2c_client *client);
-static struct tmp401_data *tmp401_update_device(struct device *dev);
-
-/*
* Driver data (common to all clients)
*/
@@ -113,18 +102,6 @@ static const struct i2c_device_id tmp401_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tmp401_id);
-static struct i2c_driver tmp401_driver = {
- .class = I2C_CLASS_HWMON,
- .driver = {
- .name = "tmp401",
- },
- .probe = tmp401_probe,
- .remove = tmp401_remove,
- .id_table = tmp401_id,
- .detect = tmp401_detect,
- .address_list = normal_i2c,
-};
-
/*
* Client data (each client gets its own)
*/
@@ -194,6 +171,71 @@ static u8 tmp401_crit_temp_to_register(long temp, u8 config)
return (temp + 500) / 1000;
}
+static struct tmp401_data *tmp401_update_device_reg16(
+ struct i2c_client *client, struct tmp401_data *data)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ /*
+ * High byte must be read first immediately followed
+ * by the low byte
+ */
+ data->temp[i] = i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_MSB[i]) << 8;
+ data->temp[i] |= i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_LSB[i]);
+ data->temp_low[i] = i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_LOW_LIMIT_MSB_READ[i]) << 8;
+ data->temp_low[i] |= i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_LOW_LIMIT_LSB[i]);
+ data->temp_high[i] = i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_HIGH_LIMIT_MSB_READ[i]) << 8;
+ data->temp_high[i] |= i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_HIGH_LIMIT_LSB[i]);
+ data->temp_crit[i] = i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_CRIT_LIMIT[i]);
+
+ if (data->kind == tmp411) {
+ data->temp_lowest[i] = i2c_smbus_read_byte_data(client,
+ TMP411_TEMP_LOWEST_MSB[i]) << 8;
+ data->temp_lowest[i] |= i2c_smbus_read_byte_data(
+ client, TMP411_TEMP_LOWEST_LSB[i]);
+
+ data->temp_highest[i] = i2c_smbus_read_byte_data(
+ client, TMP411_TEMP_HIGHEST_MSB[i]) << 8;
+ data->temp_highest[i] |= i2c_smbus_read_byte_data(
+ client, TMP411_TEMP_HIGHEST_LSB[i]);
+ }
+ }
+ return data;
+}
+
+static struct tmp401_data *tmp401_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct tmp401_data *data = i2c_get_clientdata(client);
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+ data->status = i2c_smbus_read_byte_data(client, TMP401_STATUS);
+ data->config = i2c_smbus_read_byte_data(client,
+ TMP401_CONFIG_READ);
+ tmp401_update_device_reg16(client, data);
+
+ data->temp_crit_hyst = i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_CRIT_HYST);
+
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
+
static ssize_t show_temp_value(struct device *dev,
struct device_attribute *devattr, char *buf)
{
@@ -420,30 +462,36 @@ static ssize_t reset_temp_history(struct device *dev,
}
static struct sensor_device_attribute tmp401_attr[] = {
- SENSOR_ATTR(temp1_input, 0444, show_temp_value, NULL, 0),
- SENSOR_ATTR(temp1_min, 0644, show_temp_min, store_temp_min, 0),
- SENSOR_ATTR(temp1_max, 0644, show_temp_max, store_temp_max, 0),
- SENSOR_ATTR(temp1_crit, 0644, show_temp_crit, store_temp_crit, 0),
- SENSOR_ATTR(temp1_crit_hyst, 0644, show_temp_crit_hyst,
+ SENSOR_ATTR(temp1_input, S_IRUGO, show_temp_value, NULL, 0),
+ SENSOR_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp_min,
+ store_temp_min, 0),
+ SENSOR_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max,
+ store_temp_max, 0),
+ SENSOR_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp_crit,
+ store_temp_crit, 0),
+ SENSOR_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temp_crit_hyst,
store_temp_crit_hyst, 0),
- SENSOR_ATTR(temp1_min_alarm, 0444, show_status, NULL,
+ SENSOR_ATTR(temp1_min_alarm, S_IRUGO, show_status, NULL,
TMP401_STATUS_LOCAL_LOW),
- SENSOR_ATTR(temp1_max_alarm, 0444, show_status, NULL,
+ SENSOR_ATTR(temp1_max_alarm, S_IRUGO, show_status, NULL,
TMP401_STATUS_LOCAL_HIGH),
- SENSOR_ATTR(temp1_crit_alarm, 0444, show_status, NULL,
+ SENSOR_ATTR(temp1_crit_alarm, S_IRUGO, show_status, NULL,
TMP401_STATUS_LOCAL_CRIT),
- SENSOR_ATTR(temp2_input, 0444, show_temp_value, NULL, 1),
- SENSOR_ATTR(temp2_min, 0644, show_temp_min, store_temp_min, 1),
- SENSOR_ATTR(temp2_max, 0644, show_temp_max, store_temp_max, 1),
- SENSOR_ATTR(temp2_crit, 0644, show_temp_crit, store_temp_crit, 1),
- SENSOR_ATTR(temp2_crit_hyst, 0444, show_temp_crit_hyst, NULL, 1),
- SENSOR_ATTR(temp2_fault, 0444, show_status, NULL,
+ SENSOR_ATTR(temp2_input, S_IRUGO, show_temp_value, NULL, 1),
+ SENSOR_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp_min,
+ store_temp_min, 1),
+ SENSOR_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp_max,
+ store_temp_max, 1),
+ SENSOR_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp_crit,
+ store_temp_crit, 1),
+ SENSOR_ATTR(temp2_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL, 1),
+ SENSOR_ATTR(temp2_fault, S_IRUGO, show_status, NULL,
TMP401_STATUS_REMOTE_OPEN),
- SENSOR_ATTR(temp2_min_alarm, 0444, show_status, NULL,
+ SENSOR_ATTR(temp2_min_alarm, S_IRUGO, show_status, NULL,
TMP401_STATUS_REMOTE_LOW),
- SENSOR_ATTR(temp2_max_alarm, 0444, show_status, NULL,
+ SENSOR_ATTR(temp2_max_alarm, S_IRUGO, show_status, NULL,
TMP401_STATUS_REMOTE_HIGH),
- SENSOR_ATTR(temp2_crit_alarm, 0444, show_status, NULL,
+ SENSOR_ATTR(temp2_crit_alarm, S_IRUGO, show_status, NULL,
TMP401_STATUS_REMOTE_CRIT),
};
@@ -455,11 +503,11 @@ static struct sensor_device_attribute tmp401_attr[] = {
* and remote channels.
*/
static struct sensor_device_attribute tmp411_attr[] = {
- SENSOR_ATTR(temp1_highest, 0444, show_temp_highest, NULL, 0),
- SENSOR_ATTR(temp1_lowest, 0444, show_temp_lowest, NULL, 0),
- SENSOR_ATTR(temp2_highest, 0444, show_temp_highest, NULL, 1),
- SENSOR_ATTR(temp2_lowest, 0444, show_temp_lowest, NULL, 1),
- SENSOR_ATTR(temp_reset_history, 0200, NULL, reset_temp_history, 0),
+ SENSOR_ATTR(temp1_highest, S_IRUGO, show_temp_highest, NULL, 0),
+ SENSOR_ATTR(temp1_lowest, S_IRUGO, show_temp_lowest, NULL, 0),
+ SENSOR_ATTR(temp2_highest, S_IRUGO, show_temp_highest, NULL, 1),
+ SENSOR_ATTR(temp2_lowest, S_IRUGO, show_temp_lowest, NULL, 1),
+ SENSOR_ATTR(temp_reset_history, S_IWUSR, NULL, reset_temp_history, 0),
};
/*
@@ -529,6 +577,27 @@ static int tmp401_detect(struct i2c_client *client,
return 0;
}
+static int tmp401_remove(struct i2c_client *client)
+{
+ struct tmp401_data *data = i2c_get_clientdata(client);
+ int i;
+
+ if (data->hwmon_dev)
+ hwmon_device_unregister(data->hwmon_dev);
+
+ for (i = 0; i < ARRAY_SIZE(tmp401_attr); i++)
+ device_remove_file(&client->dev, &tmp401_attr[i].dev_attr);
+
+ if (data->kind == tmp411) {
+ for (i = 0; i < ARRAY_SIZE(tmp411_attr); i++)
+ device_remove_file(&client->dev,
+ &tmp411_attr[i].dev_attr);
+ }
+
+ kfree(data);
+ return 0;
+}
+
static int tmp401_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -581,91 +650,17 @@ exit_remove:
return err;
}
-static int tmp401_remove(struct i2c_client *client)
-{
- struct tmp401_data *data = i2c_get_clientdata(client);
- int i;
-
- if (data->hwmon_dev)
- hwmon_device_unregister(data->hwmon_dev);
-
- for (i = 0; i < ARRAY_SIZE(tmp401_attr); i++)
- device_remove_file(&client->dev, &tmp401_attr[i].dev_attr);
-
- if (data->kind == tmp411) {
- for (i = 0; i < ARRAY_SIZE(tmp411_attr); i++)
- device_remove_file(&client->dev,
- &tmp411_attr[i].dev_attr);
- }
-
- kfree(data);
- return 0;
-}
-
-static struct tmp401_data *tmp401_update_device_reg16(
- struct i2c_client *client, struct tmp401_data *data)
-{
- int i;
-
- for (i = 0; i < 2; i++) {
- /*
- * High byte must be read first immediately followed
- * by the low byte
- */
- data->temp[i] = i2c_smbus_read_byte_data(client,
- TMP401_TEMP_MSB[i]) << 8;
- data->temp[i] |= i2c_smbus_read_byte_data(client,
- TMP401_TEMP_LSB[i]);
- data->temp_low[i] = i2c_smbus_read_byte_data(client,
- TMP401_TEMP_LOW_LIMIT_MSB_READ[i]) << 8;
- data->temp_low[i] |= i2c_smbus_read_byte_data(client,
- TMP401_TEMP_LOW_LIMIT_LSB[i]);
- data->temp_high[i] = i2c_smbus_read_byte_data(client,
- TMP401_TEMP_HIGH_LIMIT_MSB_READ[i]) << 8;
- data->temp_high[i] |= i2c_smbus_read_byte_data(client,
- TMP401_TEMP_HIGH_LIMIT_LSB[i]);
- data->temp_crit[i] = i2c_smbus_read_byte_data(client,
- TMP401_TEMP_CRIT_LIMIT[i]);
-
- if (data->kind == tmp411) {
- data->temp_lowest[i] = i2c_smbus_read_byte_data(client,
- TMP411_TEMP_LOWEST_MSB[i]) << 8;
- data->temp_lowest[i] |= i2c_smbus_read_byte_data(
- client, TMP411_TEMP_LOWEST_LSB[i]);
-
- data->temp_highest[i] = i2c_smbus_read_byte_data(
- client, TMP411_TEMP_HIGHEST_MSB[i]) << 8;
- data->temp_highest[i] |= i2c_smbus_read_byte_data(
- client, TMP411_TEMP_HIGHEST_LSB[i]);
- }
- }
- return data;
-}
-
-static struct tmp401_data *tmp401_update_device(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct tmp401_data *data = i2c_get_clientdata(client);
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
- data->status = i2c_smbus_read_byte_data(client, TMP401_STATUS);
- data->config = i2c_smbus_read_byte_data(client,
- TMP401_CONFIG_READ);
- tmp401_update_device_reg16(client, data);
-
- data->temp_crit_hyst = i2c_smbus_read_byte_data(client,
- TMP401_TEMP_CRIT_HYST);
-
- data->last_updated = jiffies;
- data->valid = 1;
- }
-
- mutex_unlock(&data->update_lock);
-
- return data;
-}
+static struct i2c_driver tmp401_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "tmp401",
+ },
+ .probe = tmp401_probe,
+ .remove = tmp401_remove,
+ .id_table = tmp401_id,
+ .detect = tmp401_detect,
+ .address_list = normal_i2c,
+};
static int __init tmp401_init(void)
{
diff --git a/drivers/hwmon/ultra45_env.c b/drivers/hwmon/ultra45_env.c
index 68e90abeba9..5da5942cf97 100644
--- a/drivers/hwmon/ultra45_env.c
+++ b/drivers/hwmon/ultra45_env.c
@@ -300,8 +300,11 @@ static const struct of_device_id env_match[] = {
MODULE_DEVICE_TABLE(of, env_match);
static struct of_platform_driver env_driver = {
- .name = "ultra45_env",
- .match_table = env_match,
+ .driver = {
+ .name = "ultra45_env",
+ .owner = THIS_MODULE,
+ .of_match_table = env_match,
+ },
.probe = env_probe,
.remove = __devexit_p(env_remove),
};
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 612807d9715..697202e2789 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
+#include <linux/smp_lock.h>
#include <linux/hwmon-vid.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
@@ -1319,8 +1320,8 @@ static ssize_t watchdog_write(struct file *filp, const char __user *buf,
return count;
}
-static int watchdog_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+static long watchdog_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
{
static struct watchdog_info ident = {
.options = WDIOF_KEEPALIVEPING |
@@ -1332,6 +1333,7 @@ static int watchdog_ioctl(struct inode *inode, struct file *filp,
int val, ret = 0;
struct w83793_data *data = filp->private_data;
+ lock_kernel();
switch (cmd) {
case WDIOC_GETSUPPORT:
if (!nowayout)
@@ -1385,7 +1387,7 @@ static int watchdog_ioctl(struct inode *inode, struct file *filp,
default:
ret = -ENOTTY;
}
-
+ unlock_kernel();
return ret;
}
@@ -1395,7 +1397,7 @@ static const struct file_operations watchdog_fops = {
.open = watchdog_open,
.release = watchdog_close,
.write = watchdog_write,
- .ioctl = watchdog_ioctl,
+ .unlocked_ioctl = watchdog_ioctl,
};
/*
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index dcdaf8e675b..2b9a8f54bb2 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -109,13 +109,13 @@ static void pca_stop(struct i2c_algo_pca_data *adap)
* returns after the address has been sent
*/
static int pca_address(struct i2c_algo_pca_data *adap,
- struct i2c_msg *msg)
+ struct i2c_msg *msg)
{
int sta = pca_get_con(adap);
int addr;
- addr = ( (0x7f & msg->addr) << 1 );
- if (msg->flags & I2C_M_RD )
+ addr = ((0x7f & msg->addr) << 1);
+ if (msg->flags & I2C_M_RD)
addr |= 1;
DEB2("=== SLAVE ADDRESS %#04x+%c=%#04x\n",
msg->addr, msg->flags & I2C_M_RD ? 'R' : 'W', addr);
@@ -134,7 +134,7 @@ static int pca_address(struct i2c_algo_pca_data *adap,
* Returns after the byte has been transmitted
*/
static int pca_tx_byte(struct i2c_algo_pca_data *adap,
- __u8 b)
+ __u8 b)
{
int sta = pca_get_con(adap);
DEB2("=== WRITE %#04x\n", b);
@@ -164,13 +164,13 @@ static void pca_rx_byte(struct i2c_algo_pca_data *adap,
* Returns after next byte has arrived.
*/
static int pca_rx_ack(struct i2c_algo_pca_data *adap,
- int ack)
+ int ack)
{
int sta = pca_get_con(adap);
sta &= ~(I2C_PCA_CON_STO|I2C_PCA_CON_STA|I2C_PCA_CON_SI|I2C_PCA_CON_AA);
- if ( ack )
+ if (ack)
sta |= I2C_PCA_CON_AA;
pca_set_con(adap, sta);
@@ -178,12 +178,12 @@ static int pca_rx_ack(struct i2c_algo_pca_data *adap,
}
static int pca_xfer(struct i2c_adapter *i2c_adap,
- struct i2c_msg *msgs,
- int num)
+ struct i2c_msg *msgs,
+ int num)
{
- struct i2c_algo_pca_data *adap = i2c_adap->algo_data;
- struct i2c_msg *msg = NULL;
- int curmsg;
+ struct i2c_algo_pca_data *adap = i2c_adap->algo_data;
+ struct i2c_msg *msg = NULL;
+ int curmsg;
int numbytes = 0;
int state;
int ret;
@@ -202,21 +202,21 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
DEB1("{{{ XFER %d messages\n", num);
- if (i2c_debug>=2) {
+ if (i2c_debug >= 2) {
for (curmsg = 0; curmsg < num; curmsg++) {
int addr, i;
msg = &msgs[curmsg];
addr = (0x7f & msg->addr) ;
- if (msg->flags & I2C_M_RD )
+ if (msg->flags & I2C_M_RD)
printk(KERN_INFO " [%02d] RD %d bytes from %#02x [%#02x, ...]\n",
- curmsg, msg->len, addr, (addr<<1) | 1);
+ curmsg, msg->len, addr, (addr << 1) | 1);
else {
printk(KERN_INFO " [%02d] WR %d bytes to %#02x [%#02x%s",
- curmsg, msg->len, addr, addr<<1,
+ curmsg, msg->len, addr, addr << 1,
msg->len == 0 ? "" : ", ");
- for(i=0; i < msg->len; i++)
+ for (i = 0; i < msg->len; i++)
printk("%#04x%s", msg->buf[i], i == msg->len - 1 ? "" : ", ");
printk("]\n");
}
@@ -305,7 +305,7 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
goto out;
case 0x58: /* Data byte has been received; NOT ACK has been returned */
- if ( numbytes == msg->len - 1 ) {
+ if (numbytes == msg->len - 1) {
pca_rx_byte(adap, &msg->buf[numbytes], 0);
curmsg++; numbytes = 0;
if (curmsg == num)
@@ -352,7 +352,7 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
static u32 pca_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm pca_algo = {
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 9c6170cd9aa..87ab0568bb0 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -564,7 +564,7 @@ config I2C_STU300
config I2C_VERSATILE
tristate "ARM Versatile/Realview I2C bus support"
- depends on ARCH_VERSATILE || ARCH_REALVIEW
+ depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS
select I2C_ALGOBIT
help
Say yes if you want to support the I2C serial bus on ARMs Versatile
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index bd8f1e4d9e6..906a3ca50db 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -60,7 +60,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* ALI1535 SMBus address offsets */
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index 659f63f5e4a..b14f6d68221 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -67,7 +67,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* ALI15X3 SMBus address offsets */
#define SMBHSTSTS (0 + ali15x3_smba)
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index c5a9fa488e7..03bcd07c469 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -43,7 +43,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* AMD756 SMBus address offsets */
#define SMB_ADDR_OFFSET 0xE0
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index 2fbef27b6cd..af1e5e254b7 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -18,7 +18,7 @@
#include <linux/delay.h>
#include <linux/acpi.h>
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR ("Vojtech Pavlik <vojtech@suse.cz>");
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 06e1ecb4919..305c07504f7 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -23,8 +23,7 @@
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
#include <mach/at91_twi.h>
#include <mach/board.h>
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index f1e14dd590c..fb26e5c6751 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -25,8 +25,6 @@
#include <asm/portmux.h>
#include <asm/irq.h>
-#define POLL_TIMEOUT (2 * HZ)
-
/* SMBus mode*/
#define TWI_I2C_MODE_STANDARD 1
#define TWI_I2C_MODE_STANDARDSUB 2
@@ -44,8 +42,6 @@ struct bfin_twi_iface {
int cur_mode;
int manual_stop;
int result;
- int timeout_count;
- struct timer_list timeout_timer;
struct i2c_adapter adap;
struct completion complete;
struct i2c_msg *pmsg;
@@ -85,14 +81,15 @@ static const u16 pin_req[2][3] = {
{P_TWI1_SCL, P_TWI1_SDA, 0},
};
-static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
+static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface,
+ unsigned short twi_int_status)
{
- unsigned short twi_int_status = read_INT_STAT(iface);
unsigned short mast_stat = read_MASTER_STAT(iface);
if (twi_int_status & XMTSERV) {
/* Transmit next data */
if (iface->writeNum > 0) {
+ SSYNC();
write_XMT_DATA8(iface, *(iface->transPtr++));
iface->writeNum--;
}
@@ -114,10 +111,6 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
write_MASTER_CTL(iface,
(read_MASTER_CTL(iface) | RSTART) & ~MDIR);
}
- SSYNC();
- /* Clear status */
- write_INT_STAT(iface, XMTSERV);
- SSYNC();
}
if (twi_int_status & RCVSERV) {
if (iface->readNum > 0) {
@@ -139,7 +132,6 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
} else if (iface->manual_stop) {
write_MASTER_CTL(iface,
read_MASTER_CTL(iface) | STOP);
- SSYNC();
} else if (iface->cur_mode == TWI_I2C_MODE_REPEAT &&
iface->cur_msg + 1 < iface->msg_num) {
if (iface->pmsg[iface->cur_msg + 1].flags & I2C_M_RD)
@@ -148,44 +140,37 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
else
write_MASTER_CTL(iface,
(read_MASTER_CTL(iface) | RSTART) & ~MDIR);
- SSYNC();
}
- /* Clear interrupt source */
- write_INT_STAT(iface, RCVSERV);
- SSYNC();
}
if (twi_int_status & MERR) {
- write_INT_STAT(iface, MERR);
write_INT_MASK(iface, 0);
write_MASTER_STAT(iface, 0x3e);
write_MASTER_CTL(iface, 0);
- SSYNC();
iface->result = -EIO;
- /* if both err and complete int stats are set, return proper
- * results.
+
+ if (mast_stat & LOSTARB)
+ dev_dbg(&iface->adap.dev, "Lost Arbitration\n");
+ if (mast_stat & ANAK)
+ dev_dbg(&iface->adap.dev, "Address Not Acknowledged\n");
+ if (mast_stat & DNAK)
+ dev_dbg(&iface->adap.dev, "Data Not Acknowledged\n");
+ if (mast_stat & BUFRDERR)
+ dev_dbg(&iface->adap.dev, "Buffer Read Error\n");
+ if (mast_stat & BUFWRERR)
+ dev_dbg(&iface->adap.dev, "Buffer Write Error\n");
+
+ /* If it is a quick transfer, only address without data,
+ * not an err, return 1.
*/
- if (twi_int_status & MCOMP) {
- write_INT_STAT(iface, MCOMP);
- write_INT_MASK(iface, 0);
- write_MASTER_CTL(iface, 0);
- SSYNC();
- /* If it is a quick transfer, only address bug no data,
- * not an err, return 1.
- */
- if (iface->writeNum == 0 && (mast_stat & BUFRDERR))
- iface->result = 1;
- /* If address not acknowledged return -1,
- * else return 0.
- */
- else if (!(mast_stat & ANAK))
- iface->result = 0;
- }
+ if (iface->cur_mode == TWI_I2C_MODE_STANDARD &&
+ iface->transPtr == NULL &&
+ (twi_int_status & MCOMP) && (mast_stat & DNAK))
+ iface->result = 1;
+
complete(&iface->complete);
return;
}
if (twi_int_status & MCOMP) {
- write_INT_STAT(iface, MCOMP);
- SSYNC();
if (iface->cur_mode == TWI_I2C_MODE_COMBINED) {
if (iface->readNum == 0) {
/* set the read number to 1 and ask for manual
@@ -207,7 +192,6 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
/* remove restart bit and enable master receive */
write_MASTER_CTL(iface,
read_MASTER_CTL(iface) & ~RSTART);
- SSYNC();
} else if (iface->cur_mode == TWI_I2C_MODE_REPEAT &&
iface->cur_msg+1 < iface->msg_num) {
iface->cur_msg++;
@@ -226,7 +210,6 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
write_XMT_DATA8(iface,
*(iface->transPtr++));
iface->writeNum--;
- SSYNC();
}
}
@@ -244,15 +227,13 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
/* remove restart bit and enable master receive */
write_MASTER_CTL(iface,
read_MASTER_CTL(iface) & ~RSTART);
- SSYNC();
} else {
iface->result = 1;
write_INT_MASK(iface, 0);
write_MASTER_CTL(iface, 0);
- SSYNC();
- complete(&iface->complete);
}
}
+ complete(&iface->complete);
}
/* Interrupt handler */
@@ -260,38 +241,26 @@ static irqreturn_t bfin_twi_interrupt_entry(int irq, void *dev_id)
{
struct bfin_twi_iface *iface = dev_id;
unsigned long flags;
+ unsigned short twi_int_status;
spin_lock_irqsave(&iface->lock, flags);
- del_timer(&iface->timeout_timer);
- bfin_twi_handle_interrupt(iface);
- spin_unlock_irqrestore(&iface->lock, flags);
- return IRQ_HANDLED;
-}
-
-static void bfin_twi_timeout(unsigned long data)
-{
- struct bfin_twi_iface *iface = (struct bfin_twi_iface *)data;
- unsigned long flags;
-
- spin_lock_irqsave(&iface->lock, flags);
- bfin_twi_handle_interrupt(iface);
- if (iface->result == 0) {
- iface->timeout_count--;
- if (iface->timeout_count > 0) {
- iface->timeout_timer.expires = jiffies + POLL_TIMEOUT;
- add_timer(&iface->timeout_timer);
- } else {
- iface->result = -1;
- complete(&iface->complete);
- }
+ while (1) {
+ twi_int_status = read_INT_STAT(iface);
+ if (!twi_int_status)
+ break;
+ /* Clear interrupt status */
+ write_INT_STAT(iface, twi_int_status);
+ bfin_twi_handle_interrupt(iface, twi_int_status);
+ SSYNC();
}
spin_unlock_irqrestore(&iface->lock, flags);
+ return IRQ_HANDLED;
}
/*
- * Generic i2c master transfer entrypoint
+ * One i2c master transfer
*/
-static int bfin_twi_master_xfer(struct i2c_adapter *adap,
+static int bfin_twi_do_master_xfer(struct i2c_adapter *adap,
struct i2c_msg *msgs, int num)
{
struct bfin_twi_iface *iface = adap->algo_data;
@@ -319,7 +288,6 @@ static int bfin_twi_master_xfer(struct i2c_adapter *adap,
iface->transPtr = pmsg->buf;
iface->writeNum = iface->readNum = pmsg->len;
iface->result = 0;
- iface->timeout_count = 10;
init_completion(&(iface->complete));
/* Set Transmit device address */
write_MASTER_ADDR(iface, pmsg->addr);
@@ -358,30 +326,41 @@ static int bfin_twi_master_xfer(struct i2c_adapter *adap,
iface->manual_stop = 1;
}
- iface->timeout_timer.expires = jiffies + POLL_TIMEOUT;
- add_timer(&iface->timeout_timer);
-
/* Master enable */
write_MASTER_CTL(iface, read_MASTER_CTL(iface) | MEN |
((iface->read_write == I2C_SMBUS_READ) ? MDIR : 0) |
((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ > 100) ? FAST : 0));
SSYNC();
- wait_for_completion(&iface->complete);
-
- rc = iface->result;
+ while (!iface->result) {
+ if (!wait_for_completion_timeout(&iface->complete,
+ adap->timeout)) {
+ iface->result = -1;
+ dev_err(&adap->dev, "master transfer timeout\n");
+ }
+ }
- if (rc == 1)
- return num;
+ if (iface->result == 1)
+ rc = iface->cur_msg + 1;
else
- return rc;
+ rc = iface->result;
+
+ return rc;
}
/*
- * SMBus type transfer entrypoint
+ * Generic i2c master transfer entrypoint
*/
+static int bfin_twi_master_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ return bfin_twi_do_master_xfer(adap, msgs, num);
+}
-int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
+/*
+ * One I2C SMBus transfer
+ */
+int bfin_twi_do_smbus_xfer(struct i2c_adapter *adap, u16 addr,
unsigned short flags, char read_write,
u8 command, int size, union i2c_smbus_data *data)
{
@@ -469,7 +448,6 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
iface->manual_stop = 0;
iface->read_write = read_write;
iface->command = command;
- iface->timeout_count = 10;
init_completion(&(iface->complete));
/* FIFO Initiation. Data in FIFO should be discarded before
@@ -486,9 +464,6 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
write_MASTER_ADDR(iface, addr);
SSYNC();
- iface->timeout_timer.expires = jiffies + POLL_TIMEOUT;
- add_timer(&iface->timeout_timer);
-
switch (iface->cur_mode) {
case TWI_I2C_MODE_STANDARDSUB:
write_XMT_DATA8(iface, iface->command);
@@ -550,10 +525,8 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
else if (iface->readNum > 255) {
write_MASTER_CTL(iface, 0xff << 6);
iface->manual_stop = 1;
- } else {
- del_timer(&iface->timeout_timer);
+ } else
break;
- }
}
}
write_INT_MASK(iface, MCOMP | MERR |
@@ -569,7 +542,13 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
}
SSYNC();
- wait_for_completion(&iface->complete);
+ while (!iface->result) {
+ if (!wait_for_completion_timeout(&iface->complete,
+ adap->timeout)) {
+ iface->result = -1;
+ dev_err(&adap->dev, "smbus transfer timeout\n");
+ }
+ }
rc = (iface->result >= 0) ? 0 : -1;
@@ -577,6 +556,17 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
}
/*
+ * Generic I2C SMBus transfer entrypoint
+ */
+int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
+ unsigned short flags, char read_write,
+ u8 command, int size, union i2c_smbus_data *data)
+{
+ return bfin_twi_do_smbus_xfer(adap, addr, flags,
+ read_write, command, size, data);
+}
+
+/*
* Return what the adapter supports
*/
static u32 bfin_twi_functionality(struct i2c_adapter *adap)
@@ -667,10 +657,6 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
goto out_error_no_irq;
}
- init_timer(&(iface->timeout_timer));
- iface->timeout_timer.function = bfin_twi_timeout;
- iface->timeout_timer.data = (unsigned long)iface;
-
p_adap = &iface->adap;
p_adap->nr = pdev->id;
strlcpy(p_adap->name, pdev->name, sizeof(p_adap->name));
@@ -678,6 +664,8 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
p_adap->algo_data = iface;
p_adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
p_adap->dev.parent = &pdev->dev;
+ p_adap->timeout = 5 * HZ;
+ p_adap->retries = 3;
rc = peripheral_request_list(pin_req[pdev->id], "i2c-bfin-twi");
if (rc) {
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 9c2e10082b7..b02b4533651 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -440,8 +440,8 @@ static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm)
init_waitqueue_head(&cpm->i2c_wait);
- cpm->irq = of_irq_to_resource(ofdev->node, 0, NULL);
- if (cpm->irq == NO_IRQ)
+ cpm->irq = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
+ if (!cpm->irq)
return -EINVAL;
/* Install interrupt handler. */
@@ -451,13 +451,13 @@ static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm)
return ret;
/* I2C parameter RAM */
- i2c_base = of_iomap(ofdev->node, 1);
+ i2c_base = of_iomap(ofdev->dev.of_node, 1);
if (i2c_base == NULL) {
ret = -EINVAL;
goto out_irq;
}
- if (of_device_is_compatible(ofdev->node, "fsl,cpm1-i2c")) {
+ if (of_device_is_compatible(ofdev->dev.of_node, "fsl,cpm1-i2c")) {
/* Check for and use a microcode relocation patch. */
cpm->i2c_ram = i2c_base;
@@ -474,7 +474,7 @@ static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm)
cpm->version = 1;
- } else if (of_device_is_compatible(ofdev->node, "fsl,cpm2-i2c")) {
+ } else if (of_device_is_compatible(ofdev->dev.of_node, "fsl,cpm2-i2c")) {
cpm->i2c_addr = cpm_muram_alloc(sizeof(struct i2c_ram), 64);
cpm->i2c_ram = cpm_muram_addr(cpm->i2c_addr);
out_be16(i2c_base, cpm->i2c_addr);
@@ -489,24 +489,24 @@ static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm)
}
/* I2C control/status registers */
- cpm->i2c_reg = of_iomap(ofdev->node, 0);
+ cpm->i2c_reg = of_iomap(ofdev->dev.of_node, 0);
if (cpm->i2c_reg == NULL) {
ret = -EINVAL;
goto out_ram;
}
- data = of_get_property(ofdev->node, "fsl,cpm-command", &len);
+ data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len);
if (!data || len != 4) {
ret = -EINVAL;
goto out_reg;
}
cpm->cp_command = *data;
- data = of_get_property(ofdev->node, "linux,i2c-class", &len);
+ data = of_get_property(ofdev->dev.of_node, "linux,i2c-class", &len);
if (data && len == 4)
cpm->adap.class = *data;
- data = of_get_property(ofdev->node, "clock-frequency", &len);
+ data = of_get_property(ofdev->dev.of_node, "clock-frequency", &len);
if (data && len == 4)
cpm->freq = *data;
else
@@ -661,7 +661,7 @@ static int __devinit cpm_i2c_probe(struct of_device *ofdev,
/* register new adapter to i2c module... */
- data = of_get_property(ofdev->node, "linux,i2c-index", &len);
+ data = of_get_property(ofdev->dev.of_node, "linux,i2c-index", &len);
if (data && len == 4) {
cpm->adap.nr = *data;
result = i2c_add_numbered_adapter(&cpm->adap);
@@ -679,7 +679,7 @@ static int __devinit cpm_i2c_probe(struct of_device *ofdev,
/*
* register OF I2C devices
*/
- of_register_i2c_devices(&cpm->adap, ofdev->node);
+ of_register_i2c_devices(&cpm->adap, ofdev->dev.of_node);
return 0;
out_shut:
@@ -718,13 +718,13 @@ static const struct of_device_id cpm_i2c_match[] = {
MODULE_DEVICE_TABLE(of, cpm_i2c_match);
static struct of_platform_driver cpm_i2c_driver = {
- .match_table = cpm_i2c_match,
.probe = cpm_i2c_probe,
.remove = __devexit_p(cpm_i2c_remove),
- .driver = {
- .name = "fsl-i2c-cpm",
- .owner = THIS_MODULE,
- }
+ .driver = {
+ .name = "fsl-i2c-cpm",
+ .owner = THIS_MODULE,
+ .of_match_table = cpm_i2c_match,
+ },
};
static int __init cpm_i2c_init(void)
diff --git a/drivers/i2c/busses/i2c-elektor.c b/drivers/i2c/busses/i2c-elektor.c
index 612255614a6..e5b1a3bf5b8 100644
--- a/drivers/i2c/busses/i2c-elektor.c
+++ b/drivers/i2c/busses/i2c-elektor.c
@@ -37,8 +37,8 @@
#include <linux/isa.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-pcf.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include <asm/irq.h>
#include "../algos/i2c-algo-pcf.h"
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index c21077d248a..d9aa9a649e3 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -211,7 +211,7 @@ static int __init i2c_gpio_init(void)
return ret;
}
-module_init(i2c_gpio_init);
+subsys_initcall(i2c_gpio_init);
static void __exit i2c_gpio_exit(void)
{
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index ce87a902c94..3df1bc80f37 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -282,7 +282,6 @@ static int highlander_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr,
union i2c_smbus_data *data)
{
struct highlander_i2c_dev *dev = i2c_get_adapdata(adap);
- int read = read_write & I2C_SMBUS_READ;
u16 tmp;
init_completion(&dev->cmd_complete);
@@ -337,11 +336,11 @@ static int highlander_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr,
highlander_i2c_done(dev);
/* Set slave address */
- iowrite16((addr << 1) | read, dev->base + SMSMADR);
+ iowrite16((addr << 1) | read_write, dev->base + SMSMADR);
highlander_i2c_command(dev, command, dev->buf_len);
- if (read)
+ if (read_write == I2C_SMBUS_READ)
return highlander_i2c_read(dev);
else
return highlander_i2c_write(dev);
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index c767295ad1f..9ff1695d845 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -28,7 +28,7 @@
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/init.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/hydra.h>
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 299b918455a..f4b21f2bb8e 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -138,6 +138,17 @@ static struct pci_dev *I801_dev;
#define FEATURE_I2C_BLOCK_READ (1 << 3)
static unsigned int i801_features;
+static const char *i801_feature_names[] = {
+ "SMBus PEC",
+ "Block buffer",
+ "Block process call",
+ "I2C block read",
+};
+
+static unsigned int disable_features;
+module_param(disable_features, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(disable_features, "Disable selected driver features");
+
/* Make sure the SMBus host is ready to start transmitting.
Return 0 if it is, -EBUSY if it is not. */
static int i801_check_pre(void)
@@ -341,9 +352,8 @@ static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data,
do {
msleep(1);
status = inb_p(SMBHSTSTS);
- }
- while ((!(status & SMBHSTSTS_BYTE_DONE))
- && (timeout++ < MAX_TIMEOUT));
+ } while ((!(status & SMBHSTSTS_BYTE_DONE))
+ && (timeout++ < MAX_TIMEOUT));
result = i801_check_post(status, timeout > MAX_TIMEOUT);
if (result < 0)
@@ -440,9 +450,9 @@ static int i801_block_transaction(union i2c_smbus_data *data, char read_write,
}
/* Return negative errno on error. */
-static s32 i801_access(struct i2c_adapter * adap, u16 addr,
+static s32 i801_access(struct i2c_adapter *adap, u16 addr,
unsigned short flags, char read_write, u8 command,
- int size, union i2c_smbus_data * data)
+ int size, union i2c_smbus_data *data)
{
int hwpec;
int block = 0;
@@ -511,7 +521,7 @@ static s32 i801_access(struct i2c_adapter * adap, u16 addr,
else
outb_p(inb_p(SMBAUXCTL) & (~SMBAUXCTL_CRC), SMBAUXCTL);
- if(block)
+ if (block)
ret = i801_block_transaction(data, read_write, size, hwpec);
else
ret = i801_transaction(xact | ENABLE_INT9);
@@ -523,9 +533,9 @@ static s32 i801_access(struct i2c_adapter * adap, u16 addr,
outb_p(inb_p(SMBAUXCTL) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B),
SMBAUXCTL);
- if(block)
+ if (block)
return ret;
- if(ret)
+ if (ret)
return ret;
if ((read_write == I2C_SMBUS_WRITE) || (xact == I801_QUICK))
return 0;
@@ -585,7 +595,7 @@ static const struct pci_device_id i801_ids[] = {
{ 0, }
};
-MODULE_DEVICE_TABLE (pci, i801_ids);
+MODULE_DEVICE_TABLE(pci, i801_ids);
#if defined CONFIG_INPUT_APANEL || defined CONFIG_INPUT_APANEL_MODULE
static unsigned char apanel_addr;
@@ -689,10 +699,11 @@ static void __devinit dmi_check_onboard_devices(const struct dmi_header *dm,
}
#endif
-static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int __devinit i801_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
{
unsigned char temp;
- int err;
+ int err, i;
#if defined CONFIG_SENSORS_FSCHMD || defined CONFIG_SENSORS_FSCHMD_MODULE
const char *vendor;
#endif
@@ -700,26 +711,28 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
I801_dev = dev;
i801_features = 0;
switch (dev->device) {
- case PCI_DEVICE_ID_INTEL_82801EB_3:
- case PCI_DEVICE_ID_INTEL_ESB_4:
- case PCI_DEVICE_ID_INTEL_ICH6_16:
- case PCI_DEVICE_ID_INTEL_ICH7_17:
- case PCI_DEVICE_ID_INTEL_ESB2_17:
- case PCI_DEVICE_ID_INTEL_ICH8_5:
- case PCI_DEVICE_ID_INTEL_ICH9_6:
- case PCI_DEVICE_ID_INTEL_TOLAPAI_1:
- case PCI_DEVICE_ID_INTEL_ICH10_4:
- case PCI_DEVICE_ID_INTEL_ICH10_5:
- case PCI_DEVICE_ID_INTEL_PCH_SMBUS:
- case PCI_DEVICE_ID_INTEL_CPT_SMBUS:
+ default:
i801_features |= FEATURE_I2C_BLOCK_READ;
/* fall through */
case PCI_DEVICE_ID_INTEL_82801DB_3:
i801_features |= FEATURE_SMBUS_PEC;
i801_features |= FEATURE_BLOCK_BUFFER;
+ /* fall through */
+ case PCI_DEVICE_ID_INTEL_82801CA_3:
+ case PCI_DEVICE_ID_INTEL_82801BA_2:
+ case PCI_DEVICE_ID_INTEL_82801AB_3:
+ case PCI_DEVICE_ID_INTEL_82801AA_3:
break;
}
+ /* Disable features on user request */
+ for (i = 0; i < ARRAY_SIZE(i801_feature_names); i++) {
+ if (i801_features & disable_features & (1 << i))
+ dev_notice(&dev->dev, "%s disabled by user\n",
+ i801_feature_names[i]);
+ }
+ i801_features &= ~disable_features;
+
err = pci_enable_device(dev);
if (err) {
dev_err(&dev->dev, "Failed to enable SMBus PCI device (%d)\n",
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index b1bc6e277d2..bf344135647 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -39,7 +39,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <asm/irq.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/i2c.h>
#include <linux/i2c-id.h>
#include <linux/of_platform.h>
@@ -664,16 +664,16 @@ static inline u8 iic_clckdiv(unsigned int opb)
static int __devinit iic_request_irq(struct of_device *ofdev,
struct ibm_iic_private *dev)
{
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
int irq;
if (iic_force_poll)
- return NO_IRQ;
+ return 0;
irq = irq_of_parse_and_map(np, 0);
- if (irq == NO_IRQ) {
+ if (!irq) {
dev_err(&ofdev->dev, "irq_of_parse_and_map failed\n");
- return NO_IRQ;
+ return 0;
}
/* Disable interrupts until we finish initialization, assumes
@@ -683,7 +683,7 @@ static int __devinit iic_request_irq(struct of_device *ofdev,
if (request_irq(irq, iic_handler, 0, "IBM IIC", dev)) {
dev_err(&ofdev->dev, "request_irq %d failed\n", irq);
/* Fallback to the polling mode */
- return NO_IRQ;
+ return 0;
}
return irq;
@@ -695,7 +695,7 @@ static int __devinit iic_request_irq(struct of_device *ofdev,
static int __devinit iic_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct ibm_iic_private *dev;
struct i2c_adapter *adap;
const u32 *freq;
@@ -719,7 +719,7 @@ static int __devinit iic_probe(struct of_device *ofdev,
init_waitqueue_head(&dev->wq);
dev->irq = iic_request_irq(ofdev, dev);
- if (dev->irq == NO_IRQ)
+ if (!dev->irq)
dev_warn(&ofdev->dev, "using polling mode\n");
/* Board specific settings */
@@ -766,7 +766,7 @@ static int __devinit iic_probe(struct of_device *ofdev,
return 0;
error_cleanup:
- if (dev->irq != NO_IRQ) {
+ if (dev->irq) {
iic_interrupt_mode(dev, 0);
free_irq(dev->irq, dev);
}
@@ -790,7 +790,7 @@ static int __devexit iic_remove(struct of_device *ofdev)
i2c_del_adapter(&dev->adap);
- if (dev->irq != NO_IRQ) {
+ if (dev->irq) {
iic_interrupt_mode(dev, 0);
free_irq(dev->irq, dev);
}
@@ -807,8 +807,11 @@ static const struct of_device_id ibm_iic_match[] = {
};
static struct of_platform_driver ibm_iic_driver = {
- .name = "ibm-iic",
- .match_table = ibm_iic_match,
+ .driver = {
+ .name = "ibm-iic",
+ .owner = THIS_MODULE,
+ .of_match_table = ibm_iic_match,
+ },
.probe = iic_probe,
.remove = __devexit_p(iic_remove),
};
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index 5901707fc66..112c61f7b8c 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -38,8 +38,7 @@
#include <linux/errno.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
#include "i2c-iop3xx.h"
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index f1321f76378..df00eb1f11f 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -118,7 +118,7 @@ static int i2c_wait(struct mpc_i2c *i2c, unsigned timeout, int writing)
u32 x;
int result = 0;
- if (i2c->irq == NO_IRQ) {
+ if (!i2c->irq) {
while (!(readb(i2c->base + MPC_I2C_SR) & CSR_MIF)) {
schedule();
if (time_after(jiffies, orig_jiffies + timeout)) {
@@ -560,15 +560,15 @@ static int __devinit fsl_i2c_probe(struct of_device *op,
init_waitqueue_head(&i2c->queue);
- i2c->base = of_iomap(op->node, 0);
+ i2c->base = of_iomap(op->dev.of_node, 0);
if (!i2c->base) {
dev_err(i2c->dev, "failed to map controller\n");
result = -ENOMEM;
goto fail_map;
}
- i2c->irq = irq_of_parse_and_map(op->node, 0);
- if (i2c->irq != NO_IRQ) { /* i2c->irq = NO_IRQ implies polling */
+ i2c->irq = irq_of_parse_and_map(op->dev.of_node, 0);
+ if (i2c->irq) { /* no i2c->irq implies polling */
result = request_irq(i2c->irq, mpc_i2c_isr,
IRQF_SHARED, "i2c-mpc", i2c);
if (result < 0) {
@@ -577,21 +577,22 @@ static int __devinit fsl_i2c_probe(struct of_device *op,
}
}
- if (of_get_property(op->node, "fsl,preserve-clocking", NULL)) {
+ if (of_get_property(op->dev.of_node, "fsl,preserve-clocking", NULL)) {
clock = MPC_I2C_CLOCK_PRESERVE;
} else {
- prop = of_get_property(op->node, "clock-frequency", &plen);
+ prop = of_get_property(op->dev.of_node, "clock-frequency",
+ &plen);
if (prop && plen == sizeof(u32))
clock = *prop;
}
if (match->data) {
struct mpc_i2c_data *data = match->data;
- data->setup(op->node, i2c, clock, data->prescaler);
+ data->setup(op->dev.of_node, i2c, clock, data->prescaler);
} else {
/* Backwards compatibility */
- if (of_get_property(op->node, "dfsrr", NULL))
- mpc_i2c_setup_8xxx(op->node, i2c, clock, 0);
+ if (of_get_property(op->dev.of_node, "dfsrr", NULL))
+ mpc_i2c_setup_8xxx(op->dev.of_node, i2c, clock, 0);
}
dev_set_drvdata(&op->dev, i2c);
@@ -605,7 +606,7 @@ static int __devinit fsl_i2c_probe(struct of_device *op,
dev_err(i2c->dev, "failed to add adapter\n");
goto fail_add;
}
- of_register_i2c_devices(&i2c->adap, op->node);
+ of_register_i2c_devices(&i2c->adap, op->dev.of_node);
return result;
@@ -627,7 +628,7 @@ static int __devexit fsl_i2c_remove(struct of_device *op)
i2c_del_adapter(&i2c->adap);
dev_set_drvdata(&op->dev, NULL);
- if (i2c->irq != NO_IRQ)
+ if (i2c->irq)
free_irq(i2c->irq, i2c);
irq_dispose_mapping(i2c->irq);
@@ -674,12 +675,12 @@ MODULE_DEVICE_TABLE(of, mpc_i2c_of_match);
/* Structure for a device driver */
static struct of_platform_driver mpc_i2c_driver = {
- .match_table = mpc_i2c_of_match,
.probe = fsl_i2c_probe,
.remove = __devexit_p(fsl_i2c_remove),
- .driver = {
- .owner = THIS_MODULE,
- .name = DRV_NAME,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRV_NAME,
+ .of_match_table = mpc_i2c_of_match,
},
};
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 3623a449908..16242063144 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -17,8 +17,7 @@
#include <linux/interrupt.h>
#include <linux/mv643xx_i2c.h>
#include <linux/platform_device.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
/* Register defines */
#define MV64XXX_I2C_REG_SLAVE_ADDR 0x00
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index 4a48dd4ef78..a605a5029cf 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -57,7 +57,7 @@
#include <linux/dmi.h>
#include <linux/acpi.h>
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR ("Hans-Frieder Vogt <hfvogt@gmx.net>");
@@ -404,10 +404,9 @@ static int __devinit nforce2_probe(struct pci_dev *dev, const struct pci_device_
/* SMBus adapter 1 */
res1 = nforce2_probe_smb(dev, 4, NFORCE_PCI_SMB1, &smbuses[0], "SMB1");
- if (res1 < 0) {
- dev_err(&dev->dev, "Error probing SMB1.\n");
+ if (res1 < 0)
smbuses[0].base = 0; /* to have a check value */
- }
+
/* SMBus adapter 2 */
if (dmi_check_system(nforce2_dmi_blacklist2)) {
dev_err(&dev->dev, "Disabling SMB2 for safety reasons.\n");
@@ -416,11 +415,10 @@ static int __devinit nforce2_probe(struct pci_dev *dev, const struct pci_device_
} else {
res2 = nforce2_probe_smb(dev, 5, NFORCE_PCI_SMB2, &smbuses[1],
"SMB2");
- if (res2 < 0) {
- dev_err(&dev->dev, "Error probing SMB2.\n");
+ if (res2 < 0)
smbuses[1].base = 0; /* to have a check value */
- }
}
+
if ((res1 < 0) && (res2 < 0)) {
/* we did not find even one of the SMBuses, so we give up */
kfree(smbuses);
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index a4f8d33fa38..73de8ade10b 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -704,7 +704,8 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
case I2C_IT_MTD:
case I2C_IT_MTDWS:
if (dev->cli.operation == I2C_READ) {
- while (!readl(dev->virtbase + I2C_RISR) & I2C_IT_RXFE) {
+ while (!(readl(dev->virtbase + I2C_RISR)
+ & I2C_IT_RXFE)) {
if (dev->cli.count == 0)
break;
*dev->cli.buffer =
@@ -914,6 +915,7 @@ static int __devinit nmk_i2c_probe(struct platform_device *pdev)
static int __devexit nmk_i2c_remove(struct platform_device *pdev)
{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct nmk_i2c_dev *dev = platform_get_drvdata(pdev);
i2c_del_adapter(&dev->adap);
@@ -924,6 +926,8 @@ static int __devexit nmk_i2c_remove(struct platform_device *pdev)
i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
free_irq(dev->irq, dev);
iounmap(dev->virtbase);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
clk_disable(dev->clk);
clk_put(dev->clk);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index b4ed4ca802e..0070371b29f 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -19,7 +19,7 @@
#include <linux/wait.h>
#include <linux/i2c-ocores.h>
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
struct ocores_i2c {
void __iomem *base;
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 389ac6032a7..7674efb5537 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -38,6 +38,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/i2c-omap.h>
/* I2C controller revisions */
#define OMAP_I2C_REV_2 0x20
@@ -45,29 +46,37 @@
/* I2C controller revisions present on specific hardware */
#define OMAP_I2C_REV_ON_2430 0x36
#define OMAP_I2C_REV_ON_3430 0x3C
+#define OMAP_I2C_REV_ON_4430 0x40
/* timeout waiting for the controller to respond */
#define OMAP_I2C_TIMEOUT (msecs_to_jiffies(1000))
-#define OMAP_I2C_REV_REG 0x00
-#define OMAP_I2C_IE_REG 0x01
-#define OMAP_I2C_STAT_REG 0x02
-#define OMAP_I2C_IV_REG 0x03
/* For OMAP3 I2C_IV has changed to I2C_WE (wakeup enable) */
-#define OMAP_I2C_WE_REG 0x03
-#define OMAP_I2C_SYSS_REG 0x04
-#define OMAP_I2C_BUF_REG 0x05
-#define OMAP_I2C_CNT_REG 0x06
-#define OMAP_I2C_DATA_REG 0x07
-#define OMAP_I2C_SYSC_REG 0x08
-#define OMAP_I2C_CON_REG 0x09
-#define OMAP_I2C_OA_REG 0x0a
-#define OMAP_I2C_SA_REG 0x0b
-#define OMAP_I2C_PSC_REG 0x0c
-#define OMAP_I2C_SCLL_REG 0x0d
-#define OMAP_I2C_SCLH_REG 0x0e
-#define OMAP_I2C_SYSTEST_REG 0x0f
-#define OMAP_I2C_BUFSTAT_REG 0x10
+enum {
+ OMAP_I2C_REV_REG = 0,
+ OMAP_I2C_IE_REG,
+ OMAP_I2C_STAT_REG,
+ OMAP_I2C_IV_REG,
+ OMAP_I2C_WE_REG,
+ OMAP_I2C_SYSS_REG,
+ OMAP_I2C_BUF_REG,
+ OMAP_I2C_CNT_REG,
+ OMAP_I2C_DATA_REG,
+ OMAP_I2C_SYSC_REG,
+ OMAP_I2C_CON_REG,
+ OMAP_I2C_OA_REG,
+ OMAP_I2C_SA_REG,
+ OMAP_I2C_PSC_REG,
+ OMAP_I2C_SCLL_REG,
+ OMAP_I2C_SCLH_REG,
+ OMAP_I2C_SYSTEST_REG,
+ OMAP_I2C_BUFSTAT_REG,
+ OMAP_I2C_REVNB_LO,
+ OMAP_I2C_REVNB_HI,
+ OMAP_I2C_IRQSTATUS_RAW,
+ OMAP_I2C_IRQENABLE_SET,
+ OMAP_I2C_IRQENABLE_CLR,
+};
/* I2C Interrupt Enable Register (OMAP_I2C_IE): */
#define OMAP_I2C_IE_XDR (1 << 14) /* TX Buffer drain int enable */
@@ -157,6 +166,9 @@
#define SYSC_IDLEMODE_SMART 0x2
#define SYSC_CLOCKACTIVITY_FCLK 0x2
+/* Errata definitions */
+#define I2C_OMAP_ERRATA_I207 (1 << 0)
+#define I2C_OMAP3_1P153 (1 << 1)
struct omap_i2c_dev {
struct device *dev;
@@ -167,9 +179,13 @@ struct omap_i2c_dev {
struct clk *fclk; /* Functional clock */
struct completion cmd_complete;
struct resource *ioarea;
+ u32 latency; /* maximum mpu wkup latency */
+ void (*set_mpu_wkup_lat)(struct device *dev,
+ long latency);
u32 speed; /* Speed of bus in Khz */
u16 cmd_err;
u8 *buf;
+ u8 *regs;
size_t buf_len;
struct i2c_adapter adapter;
u8 fifo_size; /* use as flag and value
@@ -186,17 +202,67 @@ struct omap_i2c_dev {
u16 bufstate;
u16 syscstate;
u16 westate;
+ u16 errata;
+};
+
+const static u8 reg_map[] = {
+ [OMAP_I2C_REV_REG] = 0x00,
+ [OMAP_I2C_IE_REG] = 0x01,
+ [OMAP_I2C_STAT_REG] = 0x02,
+ [OMAP_I2C_IV_REG] = 0x03,
+ [OMAP_I2C_WE_REG] = 0x03,
+ [OMAP_I2C_SYSS_REG] = 0x04,
+ [OMAP_I2C_BUF_REG] = 0x05,
+ [OMAP_I2C_CNT_REG] = 0x06,
+ [OMAP_I2C_DATA_REG] = 0x07,
+ [OMAP_I2C_SYSC_REG] = 0x08,
+ [OMAP_I2C_CON_REG] = 0x09,
+ [OMAP_I2C_OA_REG] = 0x0a,
+ [OMAP_I2C_SA_REG] = 0x0b,
+ [OMAP_I2C_PSC_REG] = 0x0c,
+ [OMAP_I2C_SCLL_REG] = 0x0d,
+ [OMAP_I2C_SCLH_REG] = 0x0e,
+ [OMAP_I2C_SYSTEST_REG] = 0x0f,
+ [OMAP_I2C_BUFSTAT_REG] = 0x10,
+};
+
+const static u8 omap4_reg_map[] = {
+ [OMAP_I2C_REV_REG] = 0x04,
+ [OMAP_I2C_IE_REG] = 0x2c,
+ [OMAP_I2C_STAT_REG] = 0x28,
+ [OMAP_I2C_IV_REG] = 0x34,
+ [OMAP_I2C_WE_REG] = 0x34,
+ [OMAP_I2C_SYSS_REG] = 0x90,
+ [OMAP_I2C_BUF_REG] = 0x94,
+ [OMAP_I2C_CNT_REG] = 0x98,
+ [OMAP_I2C_DATA_REG] = 0x9c,
+ [OMAP_I2C_SYSC_REG] = 0x20,
+ [OMAP_I2C_CON_REG] = 0xa4,
+ [OMAP_I2C_OA_REG] = 0xa8,
+ [OMAP_I2C_SA_REG] = 0xac,
+ [OMAP_I2C_PSC_REG] = 0xb0,
+ [OMAP_I2C_SCLL_REG] = 0xb4,
+ [OMAP_I2C_SCLH_REG] = 0xb8,
+ [OMAP_I2C_SYSTEST_REG] = 0xbC,
+ [OMAP_I2C_BUFSTAT_REG] = 0xc0,
+ [OMAP_I2C_REVNB_LO] = 0x00,
+ [OMAP_I2C_REVNB_HI] = 0x04,
+ [OMAP_I2C_IRQSTATUS_RAW] = 0x24,
+ [OMAP_I2C_IRQENABLE_SET] = 0x2c,
+ [OMAP_I2C_IRQENABLE_CLR] = 0x30,
};
static inline void omap_i2c_write_reg(struct omap_i2c_dev *i2c_dev,
int reg, u16 val)
{
- __raw_writew(val, i2c_dev->base + (reg << i2c_dev->reg_shift));
+ __raw_writew(val, i2c_dev->base +
+ (i2c_dev->regs[reg] << i2c_dev->reg_shift));
}
static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg)
{
- return __raw_readw(i2c_dev->base + (reg << i2c_dev->reg_shift));
+ return __raw_readw(i2c_dev->base +
+ (i2c_dev->regs[reg] << i2c_dev->reg_shift));
}
static int __init omap_i2c_get_clocks(struct omap_i2c_dev *dev)
@@ -265,7 +331,11 @@ static void omap_i2c_idle(struct omap_i2c_dev *dev)
WARN_ON(dev->idle);
dev->iestate = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
- omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, 0);
+ if (dev->rev >= OMAP_I2C_REV_ON_4430)
+ omap_i2c_write_reg(dev, OMAP_I2C_IRQENABLE_CLR, 1);
+ else
+ omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, 0);
+
if (dev->rev < OMAP_I2C_REV_2) {
iv = omap_i2c_read_reg(dev, OMAP_I2C_IV_REG); /* Read clears */
} else {
@@ -330,7 +400,9 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
* REVISIT: Some wkup sources might not be needed.
*/
dev->westate = OMAP_I2C_WE_ALL;
- omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate);
+ if (dev->rev < OMAP_I2C_REV_ON_4430)
+ omap_i2c_write_reg(dev, OMAP_I2C_WE_REG,
+ dev->westate);
}
}
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
@@ -357,7 +429,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
psc = fclk_rate / 12000000;
}
- if (cpu_is_omap2430() || cpu_is_omap34xx()) {
+ if (!(cpu_class_is_omap1() || cpu_is_omap2420())) {
/*
* HSI2C controller internal clk rate should be 19.2 Mhz for
@@ -430,6 +502,11 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
/* Take the I2C module out of reset: */
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
+ dev->errata = 0;
+
+ if (cpu_is_omap2430() || cpu_is_omap34xx())
+ dev->errata |= I2C_OMAP_ERRATA_I207;
+
/* Enable interrupts */
dev->iestate = (OMAP_I2C_IE_XRDY | OMAP_I2C_IE_RRDY |
OMAP_I2C_IE_ARDY | OMAP_I2C_IE_NACK |
@@ -539,8 +616,12 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
* REVISIT: We should abort the transfer on signals, but the bus goes
* into arbitration and we're currently unable to recover from it.
*/
+ if (dev->set_mpu_wkup_lat != NULL)
+ dev->set_mpu_wkup_lat(dev->dev, dev->latency);
r = wait_for_completion_timeout(&dev->cmd_complete,
OMAP_I2C_TIMEOUT);
+ if (dev->set_mpu_wkup_lat != NULL)
+ dev->set_mpu_wkup_lat(dev->dev, -1);
dev->buf_len = 0;
if (r < 0)
return r;
@@ -623,6 +704,34 @@ omap_i2c_ack_stat(struct omap_i2c_dev *dev, u16 stat)
omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, stat);
}
+static inline void i2c_omap_errata_i207(struct omap_i2c_dev *dev, u16 stat)
+{
+ /*
+ * I2C Errata(Errata Nos. OMAP2: 1.67, OMAP3: 1.8)
+ * Not applicable for OMAP4.
+ * Under certain rare conditions, RDR could be set again
+ * when the bus is busy, then ignore the interrupt and
+ * clear the interrupt.
+ */
+ if (stat & OMAP_I2C_STAT_RDR) {
+ /* Step 1: If RDR is set, clear it */
+ omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
+
+ /* Step 2: */
+ if (!(omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG)
+ & OMAP_I2C_STAT_BB)) {
+
+ /* Step 3: */
+ if (omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG)
+ & OMAP_I2C_STAT_RDR) {
+ omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
+ dev_dbg(dev->dev, "RDR when bus is busy.\n");
+ }
+
+ }
+ }
+}
+
/* rev1 devices are apparently only on some 15xx */
#ifdef CONFIG_ARCH_OMAP15XX
@@ -684,6 +793,35 @@ omap_i2c_rev1_isr(int this_irq, void *dev_id)
#define omap_i2c_rev1_isr NULL
#endif
+/*
+ * OMAP3430 Errata 1.153: When an XRDY/XDR is hit, wait for XUDF before writing
+ * data to DATA_REG. Otherwise some data bytes can be lost while transferring
+ * them from the memory to the I2C interface.
+ */
+static int errata_omap3_1p153(struct omap_i2c_dev *dev, u16 *stat, int *err)
+{
+ unsigned long timeout = 10000;
+
+ while (--timeout && !(*stat & OMAP_I2C_STAT_XUDF)) {
+ if (*stat & (OMAP_I2C_STAT_NACK | OMAP_I2C_STAT_AL)) {
+ omap_i2c_ack_stat(dev, *stat & (OMAP_I2C_STAT_XRDY |
+ OMAP_I2C_STAT_XDR));
+ *err |= OMAP_I2C_STAT_XUDF;
+ return -ETIMEDOUT;
+ }
+
+ cpu_relax();
+ *stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
+ }
+
+ if (!timeout) {
+ dev_err(dev->dev, "timeout waiting on XUDF bit\n");
+ return 0;
+ }
+
+ return 0;
+}
+
static irqreturn_t
omap_i2c_isr(int this_irq, void *dev_id)
{
@@ -733,6 +871,10 @@ complete:
}
if (stat & (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR)) {
u8 num_bytes = 1;
+
+ if (dev->errata & I2C_OMAP_ERRATA_I207)
+ i2c_omap_errata_i207(dev, stat);
+
if (dev->fifo_size) {
if (stat & OMAP_I2C_STAT_RRDY)
num_bytes = dev->fifo_size;
@@ -747,9 +889,12 @@ complete:
if (dev->buf_len) {
*dev->buf++ = w;
dev->buf_len--;
- /* Data reg from 2430 is 8 bit wide */
- if (!cpu_is_omap2430() &&
- !cpu_is_omap34xx()) {
+ /*
+ * Data reg in 2430, omap3 and
+ * omap4 is 8 bit wide
+ */
+ if (cpu_class_is_omap1() ||
+ cpu_is_omap2420()) {
if (dev->buf_len) {
*dev->buf++ = w >> 8;
dev->buf_len--;
@@ -787,9 +932,12 @@ complete:
if (dev->buf_len) {
w = *dev->buf++;
dev->buf_len--;
- /* Data reg from 2430 is 8 bit wide */
- if (!cpu_is_omap2430() &&
- !cpu_is_omap34xx()) {
+ /*
+ * Data reg in 2430, omap3 and
+ * omap4 is 8 bit wide
+ */
+ if (cpu_class_is_omap1() ||
+ cpu_is_omap2420()) {
if (dev->buf_len) {
w |= *dev->buf++ << 8;
dev->buf_len--;
@@ -807,25 +955,9 @@ complete:
break;
}
- /*
- * OMAP3430 Errata 1.153: When an XRDY/XDR
- * is hit, wait for XUDF before writing data
- * to DATA_REG. Otherwise some data bytes can
- * be lost while transferring them from the
- * memory to the I2C interface.
- */
-
- if (dev->rev <= OMAP_I2C_REV_ON_3430) {
- while (!(stat & OMAP_I2C_STAT_XUDF)) {
- if (stat & (OMAP_I2C_STAT_NACK | OMAP_I2C_STAT_AL)) {
- omap_i2c_ack_stat(dev, stat & (OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
- err |= OMAP_I2C_STAT_XUDF;
- goto complete;
- }
- cpu_relax();
- stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
- }
- }
+ if ((dev->errata & I2C_OMAP3_1P153) &&
+ errata_omap3_1p153(dev, &stat, &err))
+ goto complete;
omap_i2c_write_reg(dev, OMAP_I2C_DATA_REG, w);
}
@@ -857,6 +989,7 @@ omap_i2c_probe(struct platform_device *pdev)
struct omap_i2c_dev *dev;
struct i2c_adapter *adap;
struct resource *mem, *irq, *ioarea;
+ struct omap_i2c_bus_platform_data *pdata = pdev->dev.platform_data;
irq_handler_t isr;
int r;
u32 speed = 0;
@@ -886,10 +1019,13 @@ omap_i2c_probe(struct platform_device *pdev)
goto err_release_region;
}
- if (pdev->dev.platform_data != NULL)
- speed = *(u32 *)pdev->dev.platform_data;
- else
- speed = 100; /* Defualt speed */
+ if (pdata != NULL) {
+ speed = pdata->clkrate;
+ dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat;
+ } else {
+ speed = 100; /* Default speed */
+ dev->set_mpu_wkup_lat = NULL;
+ }
dev->speed = speed;
dev->idle = 1;
@@ -905,17 +1041,27 @@ omap_i2c_probe(struct platform_device *pdev)
if (cpu_is_omap7xx())
dev->reg_shift = 1;
+ else if (cpu_is_omap44xx())
+ dev->reg_shift = 0;
else
dev->reg_shift = 2;
if ((r = omap_i2c_get_clocks(dev)) != 0)
goto err_iounmap;
+ if (cpu_is_omap44xx())
+ dev->regs = (u8 *) omap4_reg_map;
+ else
+ dev->regs = (u8 *) reg_map;
+
omap_i2c_unidle(dev);
dev->rev = omap_i2c_read_reg(dev, OMAP_I2C_REV_REG) & 0xff;
- if (cpu_is_omap2430() || cpu_is_omap34xx()) {
+ if (dev->rev <= OMAP_I2C_REV_ON_3430)
+ dev->errata |= I2C_OMAP3_1P153;
+
+ if (!(cpu_class_is_omap1() || cpu_is_omap2420())) {
u16 s;
/* Set up the fifo size - Get total size */
@@ -927,8 +1073,17 @@ omap_i2c_probe(struct platform_device *pdev)
* size. This is to ensure that we can handle the status on int
* call back latencies.
*/
- dev->fifo_size = (dev->fifo_size / 2);
- dev->b_hw = 1; /* Enable hardware fixes */
+ if (dev->rev >= OMAP_I2C_REV_ON_4430) {
+ dev->fifo_size = 0;
+ dev->b_hw = 0; /* Disable hardware fixes */
+ } else {
+ dev->fifo_size = (dev->fifo_size / 2);
+ dev->b_hw = 1; /* Enable hardware fixes */
+ }
+ /* calculate wakeup latency constraint for MPU */
+ if (dev->set_mpu_wkup_lat != NULL)
+ dev->latency = (1000000 * dev->fifo_size) /
+ (1000 * speed / 8);
}
/* reset ASAP, clearing any IRQs */
diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c
index 5f41ec0f72d..fc5fbd1012c 100644
--- a/drivers/i2c/busses/i2c-parport-light.c
+++ b/drivers/i2c/busses/i2c-parport-light.c
@@ -33,7 +33,7 @@
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/i2c-smbus.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include "i2c-parport.h"
#define DEFAULT_BASE 0x378
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index 846583ed476..0eb1515541e 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -137,7 +137,7 @@ static int parport_getsda(void *data)
copied. The attaching code will set getscl to NULL for adapters that
cannot read SCL back, and will also make the data field point to
the parallel port structure. */
-static struct i2c_algo_bit_data parport_algo_data = {
+static const struct i2c_algo_bit_data parport_algo_data = {
.setsda = parport_setsda,
.setscl = parport_setscl,
.getsda = parport_getsda,
diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
index d3d4a4b43a1..4174101660c 100644
--- a/drivers/i2c/busses/i2c-pasemi.c
+++ b/drivers/i2c/busses/i2c-pasemi.c
@@ -25,7 +25,7 @@
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
static struct pci_driver pasemi_smb_driver;
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index f7346a9bd95..bbd77603a41 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -30,8 +30,8 @@
#include <linux/isa.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-pca.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include <asm/irq.h>
#define DRIVER "i2c-pca-isa"
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index 5b2213df5ed..ef5c78487eb 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -23,9 +23,9 @@
#include <linux/i2c-algo-pca.h>
#include <linux/i2c-pca-platform.h>
#include <linux/gpio.h>
+#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/io.h>
struct i2c_pca_pf_data {
void __iomem *reg_base;
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index ee9da6fcf69..6d14ac2e3c4 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -39,7 +39,7 @@
#include <linux/init.h>
#include <linux/dmi.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* PIIX4 SMBus address offsets */
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 7b57d5f267e..dfa7ae9c1b8 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -33,7 +33,7 @@
#include <linux/completion.h>
#include <linux/mutex.h>
#include <linux/delay.h>
-#include <asm/io.h>
+#include <linux/io.h>
#define DRV_NAME "pmcmsptwi"
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 14d249f5ed3..020ff23d762 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -34,9 +34,9 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/io.h>
#include <plat/i2c.h>
/*
@@ -209,18 +209,6 @@ static void i2c_pxa_show_state(struct pxa_i2c *i2c, int lno, const char *fname)
}
#define show_state(i2c) i2c_pxa_show_state(i2c, __LINE__, __func__)
-#else
-#define i2c_debug 0
-
-#define show_state(i2c) do { } while (0)
-#define decode_ISR(val) do { } while (0)
-#define decode_ICR(val) do { } while (0)
-#endif
-
-#define eedbg(lvl, x...) do { if ((lvl) < 1) { printk(KERN_DEBUG "" x); } } while(0)
-
-static void i2c_pxa_master_complete(struct pxa_i2c *i2c, int ret);
-static irqreturn_t i2c_pxa_handler(int this_irq, void *dev_id);
static void i2c_pxa_scream_blue_murder(struct pxa_i2c *i2c, const char *why)
{
@@ -236,6 +224,20 @@ static void i2c_pxa_scream_blue_murder(struct pxa_i2c *i2c, const char *why)
printk("\n");
}
+#else /* ifdef DEBUG */
+
+#define i2c_debug 0
+
+#define show_state(i2c) do { } while (0)
+#define decode_ISR(val) do { } while (0)
+#define decode_ICR(val) do { } while (0)
+#define i2c_pxa_scream_blue_murder(i2c, why) do { } while (0)
+
+#endif /* ifdef DEBUG / else */
+
+static void i2c_pxa_master_complete(struct pxa_i2c *i2c, int ret);
+static irqreturn_t i2c_pxa_handler(int this_irq, void *dev_id);
+
static inline int i2c_pxa_is_slavemode(struct pxa_i2c *i2c)
{
return !(readl(_ICR(i2c)) & ICR_SCLE);
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index d27072b2249..72902e0bbfa 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -35,9 +35,9 @@
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/io.h>
#include <plat/regs-iic.h>
#include <plat/iic.h>
@@ -482,7 +482,8 @@ static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
struct i2c_msg *msgs, int num)
{
- unsigned long timeout;
+ unsigned long iicstat, timeout;
+ int spins = 20;
int ret;
if (i2c->suspended)
@@ -521,7 +522,21 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
/* ensure the stop has been through the bus */
- msleep(1);
+ dev_dbg(i2c->dev, "waiting for bus idle\n");
+
+ /* first, try busy waiting briefly */
+ do {
+ iicstat = readl(i2c->regs + S3C2410_IICSTAT);
+ } while ((iicstat & S3C2410_IICSTAT_START) && --spins);
+
+ /* if that timed out sleep */
+ if (!spins) {
+ msleep(1);
+ iicstat = readl(i2c->regs + S3C2410_IICSTAT);
+ }
+
+ if (iicstat & S3C2410_IICSTAT_START)
+ dev_warn(i2c->dev, "timeout waiting for bus idle\n");
out:
return ret;
diff --git a/drivers/i2c/busses/i2c-s6000.c b/drivers/i2c/busses/i2c-s6000.c
index c91359f4965..cadc0216e02 100644
--- a/drivers/i2c/busses/i2c-s6000.c
+++ b/drivers/i2c/busses/i2c-s6000.c
@@ -36,8 +36,8 @@
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include "i2c-s6000.h"
#define DRV_NAME "i2c-s6000"
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index b9680f50f54..4f93da31d3a 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -16,10 +16,10 @@
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <asm/clock.h>
#include <asm/i2c-sh7760.h>
-#include <asm/io.h>
/* register offsets */
#define I2CSCR 0x0 /* slave ctrl */
diff --git a/drivers/i2c/busses/i2c-sibyte.c b/drivers/i2c/busses/i2c-sibyte.c
index 98b1ec48915..3d76a188e42 100644
--- a/drivers/i2c/busses/i2c-sibyte.c
+++ b/drivers/i2c/busses/i2c-sibyte.c
@@ -22,7 +22,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/i2c.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/sibyte/sb1250_regs.h>
#include <asm/sibyte/sb1250_smbus.h>
diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c
index 78b06107342..2fc08fbf67a 100644
--- a/drivers/i2c/busses/i2c-simtec.c
+++ b/drivers/i2c/busses/i2c-simtec.c
@@ -24,12 +24,11 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
-#include <asm/io.h>
-
struct simtec_i2c_data {
struct resource *ioarea;
void __iomem *reg;
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index 55a71370c79..437586611d4 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -61,7 +61,7 @@
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
static int blacklist[] = {
PCI_DEVICE_ID_SI_540,
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 2309c7f1bde..e6f539e26f6 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -53,7 +53,7 @@
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* SIS630 SMBus registers */
#define SMB_STS 0x80 /* status */
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index d43d8f8943d..86837f0c4cb 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -38,7 +38,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* base address register in PCI config space */
#define SIS96x_BAR 0x04
diff --git a/drivers/i2c/busses/i2c-stub.c b/drivers/i2c/busses/i2c-stub.c
index 0c770eabe85..b1b3447942c 100644
--- a/drivers/i2c/busses/i2c-stub.c
+++ b/drivers/i2c/busses/i2c-stub.c
@@ -29,13 +29,16 @@
#include <linux/i2c.h>
#define MAX_CHIPS 10
+#define STUB_FUNC (I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | \
+ I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | \
+ I2C_FUNC_SMBUS_I2C_BLOCK)
static unsigned short chip_addr[MAX_CHIPS];
module_param_array(chip_addr, ushort, NULL, S_IRUGO);
MODULE_PARM_DESC(chip_addr,
"Chip addresses (up to 10, between 0x03 and 0x77)");
-static unsigned long functionality = ~0UL;
+static unsigned long functionality = STUB_FUNC;
module_param(functionality, ulong, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(functionality, "Override functionality bitfield");
@@ -156,9 +159,7 @@ static s32 stub_xfer(struct i2c_adapter * adap, u16 addr, unsigned short flags,
static u32 stub_func(struct i2c_adapter *adapter)
{
- return (I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
- I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
- I2C_FUNC_SMBUS_I2C_BLOCK) & functionality;
+ return STUB_FUNC & functionality;
}
static const struct i2c_algorithm smbus_algorithm = {
diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c
index 5c473833d94..60556012312 100644
--- a/drivers/i2c/busses/i2c-versatile.c
+++ b/drivers/i2c/busses/i2c-versatile.c
@@ -15,8 +15,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
#define I2C_CONTROL 0x00
#define I2C_CONTROLS 0x00
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index de78283bddb..7799fe5bda8 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -25,7 +25,7 @@
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* Power management registers */
#define PM_CFG_REVID 0x08 /* silicon revision code */
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index d57292e5dae..4c6fff5f330 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -51,7 +51,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
static struct pci_dev *vt596_pdev;
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 684395b6f3e..4cb4bb00995 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -32,7 +32,7 @@
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/scx200.h>
@@ -552,7 +552,7 @@ static int __init scx200_create_isa(const char *text, unsigned long base,
* the name and the BAR where the I/O address resource is located. ISA
* devices are flagged with a bar value of -1 */
-static struct pci_device_id scx200_pci[] = {
+static const struct pci_device_id scx200_pci[] __initconst = {
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE),
.driver_data = 0 },
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE),
diff --git a/drivers/i2c/busses/scx200_i2c.c b/drivers/i2c/busses/scx200_i2c.c
index 42df0eca43d..7ee0d502cea 100644
--- a/drivers/i2c/busses/scx200_i2c.c
+++ b/drivers/i2c/busses/scx200_i2c.c
@@ -27,7 +27,7 @@
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/scx200_gpio.h>
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index c2258a51fe0..e0f833cca3f 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -159,107 +159,131 @@ static void i2c_device_shutdown(struct device *dev)
driver->shutdown(client);
}
-#ifdef CONFIG_SUSPEND
-static int i2c_device_pm_suspend(struct device *dev)
+#ifdef CONFIG_PM_SLEEP
+static int i2c_legacy_suspend(struct device *dev, pm_message_t mesg)
{
- const struct dev_pm_ops *pm;
+ struct i2c_client *client = i2c_verify_client(dev);
+ struct i2c_driver *driver;
- if (!dev->driver)
+ if (!client || !dev->driver)
return 0;
- pm = dev->driver->pm;
- if (!pm || !pm->suspend)
+ driver = to_i2c_driver(dev->driver);
+ if (!driver->suspend)
return 0;
- return pm->suspend(dev);
+ return driver->suspend(client, mesg);
}
-static int i2c_device_pm_resume(struct device *dev)
+static int i2c_legacy_resume(struct device *dev)
{
- const struct dev_pm_ops *pm;
+ struct i2c_client *client = i2c_verify_client(dev);
+ struct i2c_driver *driver;
- if (!dev->driver)
+ if (!client || !dev->driver)
return 0;
- pm = dev->driver->pm;
- if (!pm || !pm->resume)
+ driver = to_i2c_driver(dev->driver);
+ if (!driver->resume)
return 0;
- return pm->resume(dev);
+ return driver->resume(client);
}
-#else
-#define i2c_device_pm_suspend NULL
-#define i2c_device_pm_resume NULL
-#endif
-#ifdef CONFIG_PM_RUNTIME
-static int i2c_device_runtime_suspend(struct device *dev)
+static int i2c_device_pm_suspend(struct device *dev)
{
- const struct dev_pm_ops *pm;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (!dev->driver)
- return 0;
- pm = dev->driver->pm;
- if (!pm || !pm->runtime_suspend)
+ if (pm_runtime_suspended(dev))
return 0;
- return pm->runtime_suspend(dev);
-}
-static int i2c_device_runtime_resume(struct device *dev)
-{
- const struct dev_pm_ops *pm;
+ if (pm)
+ return pm->suspend ? pm->suspend(dev) : 0;
- if (!dev->driver)
- return 0;
- pm = dev->driver->pm;
- if (!pm || !pm->runtime_resume)
- return 0;
- return pm->runtime_resume(dev);
+ return i2c_legacy_suspend(dev, PMSG_SUSPEND);
}
-static int i2c_device_runtime_idle(struct device *dev)
+static int i2c_device_pm_resume(struct device *dev)
{
- const struct dev_pm_ops *pm = NULL;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int ret;
- if (dev->driver)
- pm = dev->driver->pm;
- if (pm && pm->runtime_idle) {
- ret = pm->runtime_idle(dev);
- if (ret)
- return ret;
+ if (pm)
+ ret = pm->resume ? pm->resume(dev) : 0;
+ else
+ ret = i2c_legacy_resume(dev);
+
+ if (!ret) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
}
- return pm_runtime_suspend(dev);
+ return ret;
}
-#else
-#define i2c_device_runtime_suspend NULL
-#define i2c_device_runtime_resume NULL
-#define i2c_device_runtime_idle NULL
-#endif
-static int i2c_device_suspend(struct device *dev, pm_message_t mesg)
+static int i2c_device_pm_freeze(struct device *dev)
{
- struct i2c_client *client = i2c_verify_client(dev);
- struct i2c_driver *driver;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (!client || !dev->driver)
+ if (pm_runtime_suspended(dev))
return 0;
- driver = to_i2c_driver(dev->driver);
- if (!driver->suspend)
- return 0;
- return driver->suspend(client, mesg);
+
+ if (pm)
+ return pm->freeze ? pm->freeze(dev) : 0;
+
+ return i2c_legacy_suspend(dev, PMSG_FREEZE);
}
-static int i2c_device_resume(struct device *dev)
+static int i2c_device_pm_thaw(struct device *dev)
{
- struct i2c_client *client = i2c_verify_client(dev);
- struct i2c_driver *driver;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (!client || !dev->driver)
+ if (pm_runtime_suspended(dev))
return 0;
- driver = to_i2c_driver(dev->driver);
- if (!driver->resume)
+
+ if (pm)
+ return pm->thaw ? pm->thaw(dev) : 0;
+
+ return i2c_legacy_resume(dev);
+}
+
+static int i2c_device_pm_poweroff(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ if (pm_runtime_suspended(dev))
return 0;
- return driver->resume(client);
+
+ if (pm)
+ return pm->poweroff ? pm->poweroff(dev) : 0;
+
+ return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
}
+static int i2c_device_pm_restore(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int ret;
+
+ if (pm)
+ ret = pm->restore ? pm->restore(dev) : 0;
+ else
+ ret = i2c_legacy_resume(dev);
+
+ if (!ret) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ }
+
+ return ret;
+}
+#else /* !CONFIG_PM_SLEEP */
+#define i2c_device_pm_suspend NULL
+#define i2c_device_pm_resume NULL
+#define i2c_device_pm_freeze NULL
+#define i2c_device_pm_thaw NULL
+#define i2c_device_pm_poweroff NULL
+#define i2c_device_pm_restore NULL
+#endif /* !CONFIG_PM_SLEEP */
+
static void i2c_client_dev_release(struct device *dev)
{
kfree(to_i2c_client(dev));
@@ -301,9 +325,15 @@ static const struct attribute_group *i2c_dev_attr_groups[] = {
static const struct dev_pm_ops i2c_device_pm_ops = {
.suspend = i2c_device_pm_suspend,
.resume = i2c_device_pm_resume,
- .runtime_suspend = i2c_device_runtime_suspend,
- .runtime_resume = i2c_device_runtime_resume,
- .runtime_idle = i2c_device_runtime_idle,
+ .freeze = i2c_device_pm_freeze,
+ .thaw = i2c_device_pm_thaw,
+ .poweroff = i2c_device_pm_poweroff,
+ .restore = i2c_device_pm_restore,
+ SET_RUNTIME_PM_OPS(
+ pm_generic_runtime_suspend,
+ pm_generic_runtime_resume,
+ pm_generic_runtime_idle
+ )
};
struct bus_type i2c_bus_type = {
@@ -312,8 +342,6 @@ struct bus_type i2c_bus_type = {
.probe = i2c_device_probe,
.remove = i2c_device_remove,
.shutdown = i2c_device_shutdown,
- .suspend = i2c_device_suspend,
- .resume = i2c_device_resume,
.pm = &i2c_device_pm_ops,
};
EXPORT_SYMBOL_GPL(i2c_bus_type);
@@ -390,6 +418,9 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
client->dev.parent = &client->adapter->dev;
client->dev.bus = &i2c_bus_type;
client->dev.type = &i2c_client_type;
+#ifdef CONFIG_OF
+ client->dev.of_node = info->of_node;
+#endif
dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
client->addr);
@@ -1193,10 +1224,10 @@ EXPORT_SYMBOL(i2c_transfer);
*
* Returns negative errno, or else the number of bytes written.
*/
-int i2c_master_send(struct i2c_client *client,const char *buf ,int count)
+int i2c_master_send(struct i2c_client *client, const char *buf, int count)
{
int ret;
- struct i2c_adapter *adap=client->adapter;
+ struct i2c_adapter *adap = client->adapter;
struct i2c_msg msg;
msg.addr = client->addr;
@@ -1220,9 +1251,9 @@ EXPORT_SYMBOL(i2c_master_send);
*
* Returns negative errno, or else the number of bytes read.
*/
-int i2c_master_recv(struct i2c_client *client, char *buf ,int count)
+int i2c_master_recv(struct i2c_client *client, char *buf, int count)
{
- struct i2c_adapter *adap=client->adapter;
+ struct i2c_adapter *adap = client->adapter;
struct i2c_msg msg;
int ret;
@@ -1424,7 +1455,7 @@ i2c_new_probed_device(struct i2c_adapter *adap,
}
EXPORT_SYMBOL_GPL(i2c_new_probed_device);
-struct i2c_adapter* i2c_get_adapter(int id)
+struct i2c_adapter *i2c_get_adapter(int id)
{
struct i2c_adapter *adapter;
@@ -1451,7 +1482,7 @@ static u8 crc8(u16 data)
{
int i;
- for(i = 0; i < 8; i++) {
+ for (i = 0; i < 8; i++) {
if (data & 0x8000)
data = data ^ POLY;
data = data << 1;
@@ -1464,7 +1495,7 @@ static u8 i2c_smbus_pec(u8 crc, u8 *p, size_t count)
{
int i;
- for(i = 0; i < count; i++)
+ for (i = 0; i < count; i++)
crc = crc8((crc ^ p[i]) << 8);
return crc;
}
@@ -1534,7 +1565,7 @@ EXPORT_SYMBOL(i2c_smbus_read_byte);
*/
s32 i2c_smbus_write_byte(struct i2c_client *client, u8 value)
{
- return i2c_smbus_xfer(client->adapter,client->addr,client->flags,
+ return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
I2C_SMBUS_WRITE, value, I2C_SMBUS_BYTE, NULL);
}
EXPORT_SYMBOL(i2c_smbus_write_byte);
@@ -1572,9 +1603,9 @@ s32 i2c_smbus_write_byte_data(struct i2c_client *client, u8 command, u8 value)
{
union i2c_smbus_data data;
data.byte = value;
- return i2c_smbus_xfer(client->adapter,client->addr,client->flags,
- I2C_SMBUS_WRITE,command,
- I2C_SMBUS_BYTE_DATA,&data);
+ return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_WRITE, command,
+ I2C_SMBUS_BYTE_DATA, &data);
}
EXPORT_SYMBOL(i2c_smbus_write_byte_data);
@@ -1611,9 +1642,9 @@ s32 i2c_smbus_write_word_data(struct i2c_client *client, u8 command, u16 value)
{
union i2c_smbus_data data;
data.word = value;
- return i2c_smbus_xfer(client->adapter,client->addr,client->flags,
- I2C_SMBUS_WRITE,command,
- I2C_SMBUS_WORD_DATA,&data);
+ return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_WRITE, command,
+ I2C_SMBUS_WORD_DATA, &data);
}
EXPORT_SYMBOL(i2c_smbus_write_word_data);
@@ -1690,9 +1721,9 @@ s32 i2c_smbus_write_block_data(struct i2c_client *client, u8 command,
length = I2C_SMBUS_BLOCK_MAX;
data.block[0] = length;
memcpy(&data.block[1], values, length);
- return i2c_smbus_xfer(client->adapter,client->addr,client->flags,
- I2C_SMBUS_WRITE,command,
- I2C_SMBUS_BLOCK_DATA,&data);
+ return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_WRITE, command,
+ I2C_SMBUS_BLOCK_DATA, &data);
}
EXPORT_SYMBOL(i2c_smbus_write_block_data);
@@ -1734,10 +1765,10 @@ EXPORT_SYMBOL(i2c_smbus_write_i2c_block_data);
/* Simulate a SMBus command using the i2c protocol
No checking of parameters is done! */
-static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
- unsigned short flags,
- char read_write, u8 command, int size,
- union i2c_smbus_data * data)
+static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags,
+ char read_write, u8 command, int size,
+ union i2c_smbus_data *data)
{
/* So we need to generate a series of msgs. In the case of writing, we
need to use only one message; when reading, we need two. We initialize
@@ -1745,7 +1776,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
simpler. */
unsigned char msgbuf0[I2C_SMBUS_BLOCK_MAX+3];
unsigned char msgbuf1[I2C_SMBUS_BLOCK_MAX+2];
- int num = read_write == I2C_SMBUS_READ?2:1;
+ int num = read_write == I2C_SMBUS_READ ? 2 : 1;
struct i2c_msg msg[2] = { { addr, flags, 1, msgbuf0 },
{ addr, flags | I2C_M_RD, 0, msgbuf1 }
};
@@ -1754,7 +1785,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
int status;
msgbuf0[0] = command;
- switch(size) {
+ switch (size) {
case I2C_SMBUS_QUICK:
msg[0].len = 0;
/* Special case: The read/write field is used as data */
@@ -1781,7 +1812,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
if (read_write == I2C_SMBUS_READ)
msg[1].len = 2;
else {
- msg[0].len=3;
+ msg[0].len = 3;
msgbuf0[1] = data->word & 0xff;
msgbuf0[2] = data->word >> 8;
}
@@ -1874,26 +1905,26 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
}
if (read_write == I2C_SMBUS_READ)
- switch(size) {
- case I2C_SMBUS_BYTE:
- data->byte = msgbuf0[0];
- break;
- case I2C_SMBUS_BYTE_DATA:
- data->byte = msgbuf1[0];
- break;
- case I2C_SMBUS_WORD_DATA:
- case I2C_SMBUS_PROC_CALL:
- data->word = msgbuf1[0] | (msgbuf1[1] << 8);
- break;
- case I2C_SMBUS_I2C_BLOCK_DATA:
- for (i = 0; i < data->block[0]; i++)
- data->block[i+1] = msgbuf1[i];
- break;
- case I2C_SMBUS_BLOCK_DATA:
- case I2C_SMBUS_BLOCK_PROC_CALL:
- for (i = 0; i < msgbuf1[0] + 1; i++)
- data->block[i] = msgbuf1[i];
- break;
+ switch (size) {
+ case I2C_SMBUS_BYTE:
+ data->byte = msgbuf0[0];
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ data->byte = msgbuf1[0];
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ case I2C_SMBUS_PROC_CALL:
+ data->word = msgbuf1[0] | (msgbuf1[1] << 8);
+ break;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ for (i = 0; i < data->block[0]; i++)
+ data->block[i+1] = msgbuf1[i];
+ break;
+ case I2C_SMBUS_BLOCK_DATA:
+ case I2C_SMBUS_BLOCK_PROC_CALL:
+ for (i = 0; i < msgbuf1[0] + 1; i++)
+ data->block[i] = msgbuf1[i];
+ break;
}
return 0;
}
@@ -1938,7 +1969,7 @@ s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags,
}
rt_mutex_unlock(&adapter->bus_lock);
} else
- res = i2c_smbus_xfer_emulated(adapter,addr,flags,read_write,
+ res = i2c_smbus_xfer_emulated(adapter, addr, flags, read_write,
command, protocol, data);
return res;
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index f4110aa4960..e0694e4d86c 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -35,7 +35,7 @@
#include <linux/i2c.h>
#include <linux/i2c-dev.h>
#include <linux/jiffies.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
static struct i2c_driver i2cdev_driver;
@@ -132,45 +132,45 @@ static DEVICE_ATTR(name, S_IRUGO, show_adapter_name, NULL);
* needed by those system calls and by this SMBus interface.
*/
-static ssize_t i2cdev_read (struct file *file, char __user *buf, size_t count,
- loff_t *offset)
+static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count,
+ loff_t *offset)
{
char *tmp;
int ret;
- struct i2c_client *client = (struct i2c_client *)file->private_data;
+ struct i2c_client *client = file->private_data;
if (count > 8192)
count = 8192;
- tmp = kmalloc(count,GFP_KERNEL);
- if (tmp==NULL)
+ tmp = kmalloc(count, GFP_KERNEL);
+ if (tmp == NULL)
return -ENOMEM;
pr_debug("i2c-dev: i2c-%d reading %zu bytes.\n",
iminor(file->f_path.dentry->d_inode), count);
- ret = i2c_master_recv(client,tmp,count);
+ ret = i2c_master_recv(client, tmp, count);
if (ret >= 0)
- ret = copy_to_user(buf,tmp,count)?-EFAULT:ret;
+ ret = copy_to_user(buf, tmp, count) ? -EFAULT : ret;
kfree(tmp);
return ret;
}
-static ssize_t i2cdev_write (struct file *file, const char __user *buf, size_t count,
- loff_t *offset)
+static ssize_t i2cdev_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *offset)
{
int ret;
char *tmp;
- struct i2c_client *client = (struct i2c_client *)file->private_data;
+ struct i2c_client *client = file->private_data;
if (count > 8192)
count = 8192;
- tmp = kmalloc(count,GFP_KERNEL);
- if (tmp==NULL)
+ tmp = kmalloc(count, GFP_KERNEL);
+ if (tmp == NULL)
return -ENOMEM;
- if (copy_from_user(tmp,buf,count)) {
+ if (copy_from_user(tmp, buf, count)) {
kfree(tmp);
return -EFAULT;
}
@@ -178,7 +178,7 @@ static ssize_t i2cdev_write (struct file *file, const char __user *buf, size_t c
pr_debug("i2c-dev: i2c-%d writing %zu bytes.\n",
iminor(file->f_path.dentry->d_inode), count);
- ret = i2c_master_send(client,tmp,count);
+ ret = i2c_master_send(client, tmp, count);
kfree(tmp);
return ret;
}
@@ -369,13 +369,13 @@ static noinline int i2cdev_ioctl_smbus(struct i2c_client *client,
static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct i2c_client *client = (struct i2c_client *)file->private_data;
+ struct i2c_client *client = file->private_data;
unsigned long funcs;
dev_dbg(&client->adapter->dev, "ioctl, cmd=0x%02x, arg=0x%02lx\n",
cmd, arg);
- switch ( cmd ) {
+ switch (cmd) {
case I2C_SLAVE:
case I2C_SLAVE_FORCE:
/* NOTE: devices set up to work with "new style" drivers
@@ -601,7 +601,7 @@ static void __exit i2c_dev_exit(void)
{
i2c_del_driver(&i2cdev_driver);
class_destroy(i2c_dev_class);
- unregister_chrdev(I2C_MAJOR,"i2c");
+ unregister_chrdev(I2C_MAJOR, "i2c");
}
MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and "
diff --git a/drivers/ide/cmd640.c b/drivers/ide/cmd640.c
index d2b8b272bc2..cb10201a15e 100644
--- a/drivers/ide/cmd640.c
+++ b/drivers/ide/cmd640.c
@@ -633,12 +633,10 @@ static void __init cmd640_init_dev(ide_drive_t *drive)
static int cmd640_test_irq(ide_hwif_t *hwif)
{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
int irq_reg = hwif->channel ? ARTTIM23 : CFR;
- u8 irq_stat, irq_mask = hwif->channel ? ARTTIM23_IDE23INTR :
+ u8 irq_mask = hwif->channel ? ARTTIM23_IDE23INTR :
CFR_IDE01INTR;
-
- pci_read_config_byte(dev, irq_reg, &irq_stat);
+ u8 irq_stat = get_cmd640_reg(irq_reg);
return (irq_stat & irq_mask) ? 1 : 0;
}
diff --git a/drivers/ide/gayle.c b/drivers/ide/gayle.c
index b9e517de6a8..3feaa26410b 100644
--- a/drivers/ide/gayle.c
+++ b/drivers/ide/gayle.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/zorro.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <asm/setup.h>
#include <asm/amigahw.h>
@@ -24,15 +25,6 @@
/*
- * Bases of the IDE interfaces
- */
-
-#define GAYLE_BASE_4000 0xdd2020 /* A4000/A4000T */
-#define GAYLE_BASE_1200 0xda0000 /* A1200/A600 and E-Matrix 530 */
-
-#define GAYLE_IDEREG_SIZE 0x2000
-
- /*
* Offsets from one of the above bases
*/
@@ -68,20 +60,20 @@ MODULE_PARM_DESC(doubler, "enable support for IDE doublers");
static int gayle_test_irq(ide_hwif_t *hwif)
{
- unsigned char ch;
+ unsigned char ch;
- ch = z_readb(hwif->io_ports.irq_addr);
- if (!(ch & GAYLE_IRQ_IDE))
- return 0;
- return 1;
+ ch = z_readb(hwif->io_ports.irq_addr);
+ if (!(ch & GAYLE_IRQ_IDE))
+ return 0;
+ return 1;
}
static void gayle_a1200_clear_irq(ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
+ ide_hwif_t *hwif = drive->hwif;
- (void)z_readb(hwif->io_ports.status_addr);
- z_writeb(0x7c, hwif->io_ports.irq_addr);
+ (void)z_readb(hwif->io_ports.status_addr);
+ z_writeb(0x7c, hwif->io_ports.irq_addr);
}
static void __init gayle_setup_ports(struct ide_hw *hw, unsigned long base,
@@ -122,64 +114,89 @@ static const struct ide_port_info gayle_port_info = {
* Probe for a Gayle IDE interface (and optionally for an IDE doubler)
*/
-static int __init gayle_init(void)
+static int __init amiga_gayle_ide_probe(struct platform_device *pdev)
{
- unsigned long phys_base, res_start, res_n;
- unsigned long base, ctrlport, irqport;
- int a4000, i, rc;
- struct ide_hw hw[GAYLE_NUM_HWIFS], *hws[GAYLE_NUM_HWIFS];
- struct ide_port_info d = gayle_port_info;
-
- if (!MACH_IS_AMIGA)
- return -ENODEV;
-
- if ((a4000 = AMIGAHW_PRESENT(A4000_IDE)) || AMIGAHW_PRESENT(A1200_IDE))
- goto found;
-
-#ifdef CONFIG_ZORRO
- if (zorro_find_device(ZORRO_PROD_MTEC_VIPER_MK_V_E_MATRIX_530_SCSI_IDE,
- NULL))
- goto found;
-#endif
- return -ENODEV;
-
-found:
- printk(KERN_INFO "ide: Gayle IDE controller (A%d style%s)\n",
- a4000 ? 4000 : 1200,
- ide_doubler ? ", IDE doubler" : "");
-
- if (a4000) {
- phys_base = GAYLE_BASE_4000;
- irqport = (unsigned long)ZTWO_VADDR(GAYLE_IRQ_4000);
- d.port_ops = &gayle_a4000_port_ops;
- } else {
- phys_base = GAYLE_BASE_1200;
- irqport = (unsigned long)ZTWO_VADDR(GAYLE_IRQ_1200);
- d.port_ops = &gayle_a1200_port_ops;
+ struct resource *res;
+ struct gayle_ide_platform_data *pdata;
+ unsigned long base, ctrlport, irqport;
+ unsigned int i;
+ int error;
+ struct ide_hw hw[GAYLE_NUM_HWIFS], *hws[GAYLE_NUM_HWIFS];
+ struct ide_port_info d = gayle_port_info;
+ struct ide_host *host;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ if (!request_mem_region(res->start, resource_size(res), "IDE"))
+ return -EBUSY;
+
+ pdata = pdev->dev.platform_data;
+ pr_info("ide: Gayle IDE controller (A%u style%s)\n",
+ pdata->explicit_ack ? 1200 : 4000,
+ ide_doubler ? ", IDE doubler" : "");
+
+ base = (unsigned long)ZTWO_VADDR(pdata->base);
+ ctrlport = 0;
+ irqport = (unsigned long)ZTWO_VADDR(pdata->irqport);
+ if (pdata->explicit_ack)
+ d.port_ops = &gayle_a1200_port_ops;
+ else
+ d.port_ops = &gayle_a4000_port_ops;
+
+ for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++, base += GAYLE_NEXT_PORT) {
+ if (GAYLE_HAS_CONTROL_REG)
+ ctrlport = base + GAYLE_CONTROL;
+
+ gayle_setup_ports(&hw[i], base, ctrlport, irqport);
+ hws[i] = &hw[i];
}
- res_start = ((unsigned long)phys_base) & ~(GAYLE_NEXT_PORT-1);
- res_n = GAYLE_IDEREG_SIZE;
+ error = ide_host_add(&d, hws, i, &host);
+ if (error)
+ goto out;
- if (!request_mem_region(res_start, res_n, "IDE"))
- return -EBUSY;
+ platform_set_drvdata(pdev, host);
+ return 0;
- for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++) {
- base = (unsigned long)ZTWO_VADDR(phys_base + i * GAYLE_NEXT_PORT);
- ctrlport = GAYLE_HAS_CONTROL_REG ? (base + GAYLE_CONTROL) : 0;
+out:
+ release_mem_region(res->start, resource_size(res));
+ return error;
+}
+
+static int __exit amiga_gayle_ide_remove(struct platform_device *pdev)
+{
+ struct ide_host *host = platform_get_drvdata(pdev);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ ide_host_remove(host);
+ release_mem_region(res->start, resource_size(res));
+ return 0;
+}
- gayle_setup_ports(&hw[i], base, ctrlport, irqport);
+static struct platform_driver amiga_gayle_ide_driver = {
+ .remove = __exit_p(amiga_gayle_ide_remove),
+ .driver = {
+ .name = "amiga-gayle-ide",
+ .owner = THIS_MODULE,
+ },
+};
- hws[i] = &hw[i];
- }
+static int __init amiga_gayle_ide_init(void)
+{
+ return platform_driver_probe(&amiga_gayle_ide_driver,
+ amiga_gayle_ide_probe);
+}
- rc = ide_host_add(&d, hws, i, NULL);
- if (rc)
- release_mem_region(res_start, res_n);
+module_init(amiga_gayle_ide_init);
- return rc;
+static void __exit amiga_gayle_ide_exit(void)
+{
+ platform_driver_unregister(&amiga_gayle_ide_driver);
}
-module_init(gayle_init);
+module_exit(amiga_gayle_ide_exit);
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:amiga-gayle-ide");
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c
index b85450865ff..0b7815d2581 100644
--- a/drivers/ide/ide-cs.c
+++ b/drivers/ide/ide-cs.c
@@ -65,8 +65,7 @@ MODULE_LICENSE("Dual MPL/GPL");
typedef struct ide_info_t {
struct pcmcia_device *p_dev;
struct ide_host *host;
- int ndev;
- dev_node_t node;
+ int ndev;
} ide_info_t;
static void ide_release(struct pcmcia_device *);
@@ -102,7 +101,6 @@ static int ide_probe(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
link->io.IOAddrLines = 3;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -285,8 +283,7 @@ static int ide_config(struct pcmcia_device *link)
io_base = link->io.BasePort1;
ctl_base = stk->ctl_base;
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
+ if (!link->irq)
goto failed;
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
@@ -299,24 +296,21 @@ static int ide_config(struct pcmcia_device *link)
if (is_kme)
outb(0x81, ctl_base+1);
- host = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link);
+ host = idecs_register(io_base, ctl_base, link->irq, link);
if (host == NULL && link->io.NumPorts1 == 0x20) {
outb(0x02, ctl_base + 0x10);
host = idecs_register(io_base + 0x10, ctl_base + 0x10,
- link->irq.AssignedIRQ, link);
+ link->irq, link);
}
if (host == NULL)
goto failed;
info->ndev = 1;
- sprintf(info->node.dev_name, "hd%c", 'a' + host->ports[0]->index * 2);
- info->node.major = host->ports[0]->major;
- info->node.minor = 0;
info->host = host;
- link->dev_node = &info->node;
- printk(KERN_INFO "ide-cs: %s: Vpp = %d.%d\n",
- info->node.dev_name, link->conf.Vpp / 10, link->conf.Vpp % 10);
+ dev_info(&link->dev, "ide-cs: hd%c: Vpp = %d.%d\n",
+ 'a' + host->ports[0]->index * 2,
+ link->conf.Vpp / 10, link->conf.Vpp % 10);
kfree(stk);
return 0;
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 3b128dce9c3..33d65039cce 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -407,32 +407,24 @@ static int ide_disk_get_capacity(ide_drive_t *drive)
return 0;
}
-static u64 ide_disk_set_capacity(ide_drive_t *drive, u64 capacity)
+static void ide_disk_unlock_native_capacity(ide_drive_t *drive)
{
- u64 set = min(capacity, drive->probed_capacity);
u16 *id = drive->id;
int lba48 = ata_id_lba48_enabled(id);
if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 ||
ata_id_hpa_enabled(id) == 0)
- goto out;
+ return;
/*
* according to the spec the SET MAX ADDRESS command shall be
* immediately preceded by a READ NATIVE MAX ADDRESS command
*/
- capacity = ide_disk_hpa_get_native_capacity(drive, lba48);
- if (capacity == 0)
- goto out;
-
- set = ide_disk_hpa_set_capacity(drive, set, lba48);
- if (set) {
- /* needed for ->resume to disable HPA */
- drive->dev_flags |= IDE_DFLAG_NOHPA;
- return set;
- }
-out:
- return drive->capacity64;
+ if (!ide_disk_hpa_get_native_capacity(drive, lba48))
+ return;
+
+ if (ide_disk_hpa_set_capacity(drive, drive->probed_capacity, lba48))
+ drive->dev_flags |= IDE_DFLAG_NOHPA; /* disable HPA on resume */
}
static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
@@ -783,13 +775,13 @@ static int ide_disk_set_doorlock(ide_drive_t *drive, struct gendisk *disk,
}
const struct ide_disk_ops ide_ata_disk_ops = {
- .check = ide_disk_check,
- .set_capacity = ide_disk_set_capacity,
- .get_capacity = ide_disk_get_capacity,
- .setup = ide_disk_setup,
- .flush = ide_disk_flush,
- .init_media = ide_disk_init_media,
- .set_doorlock = ide_disk_set_doorlock,
- .do_request = ide_do_rw_disk,
- .ioctl = ide_disk_ioctl,
+ .check = ide_disk_check,
+ .unlock_native_capacity = ide_disk_unlock_native_capacity,
+ .get_capacity = ide_disk_get_capacity,
+ .setup = ide_disk_setup,
+ .flush = ide_disk_flush,
+ .init_media = ide_disk_init_media,
+ .set_doorlock = ide_disk_set_doorlock,
+ .do_request = ide_do_rw_disk,
+ .ioctl = ide_disk_ioctl,
};
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index c32d83996ae..c102d23d9b3 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -288,17 +288,14 @@ static int ide_gd_media_changed(struct gendisk *disk)
return ret;
}
-static unsigned long long ide_gd_set_capacity(struct gendisk *disk,
- unsigned long long capacity)
+static void ide_gd_unlock_native_capacity(struct gendisk *disk)
{
struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
ide_drive_t *drive = idkp->drive;
const struct ide_disk_ops *disk_ops = drive->disk_ops;
- if (disk_ops->set_capacity)
- return disk_ops->set_capacity(drive, capacity);
-
- return drive->capacity64;
+ if (disk_ops->unlock_native_capacity)
+ disk_ops->unlock_native_capacity(drive);
}
static int ide_gd_revalidate_disk(struct gendisk *disk)
@@ -329,7 +326,7 @@ static const struct block_device_operations ide_gd_ops = {
.locked_ioctl = ide_gd_ioctl,
.getgeo = ide_gd_getgeo,
.media_changed = ide_gd_media_changed,
- .set_capacity = ide_gd_set_capacity,
+ .unlock_native_capacity = ide_gd_unlock_native_capacity,
.revalidate_disk = ide_gd_revalidate_disk
};
diff --git a/drivers/ide/ide_platform.c b/drivers/ide/ide_platform.c
index 42965b3e30b..542603b394e 100644
--- a/drivers/ide/ide_platform.c
+++ b/drivers/ide/ide_platform.c
@@ -95,6 +95,7 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start);
hw.dev = &pdev->dev;
+ d.irq_flags = res_irq->flags;
if (mmio)
d.host_flags |= IDE_HFLAG_MMIO;
diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
index c5f3841af36..3a35ec6193d 100644
--- a/drivers/ide/pdc202xx_old.c
+++ b/drivers/ide/pdc202xx_old.c
@@ -93,13 +93,13 @@ static int pdc202xx_test_irq(ide_hwif_t *hwif)
* bit 7: error, bit 6: interrupting,
* bit 5: FIFO full, bit 4: FIFO empty
*/
- return ((sc1d & 0x50) == 0x50) ? 1 : 0;
+ return (sc1d & 0x40) ? 1 : 0;
} else {
/*
* bit 3: error, bit 2: interrupting,
* bit 1: FIFO full, bit 0: FIFO empty
*/
- return ((sc1d & 0x05) == 0x05) ? 1 : 0;
+ return (sc1d & 0x04) ? 1 : 0;
}
}
@@ -241,6 +241,7 @@ static const struct ide_port_ops pdc20246_port_ops = {
static const struct ide_port_ops pdc2026x_port_ops = {
.set_pio_mode = pdc202xx_set_pio_mode,
.set_dma_mode = pdc202xx_set_mode,
+ .test_irq = pdc202xx_test_irq,
.cable_detect = pdc2026x_cable_detect,
};
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index 159955d16c4..183fa38760d 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -1153,7 +1153,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
if (macio_resource_count(mdev) == 0) {
printk(KERN_WARNING "ide-pmac: no address for %s\n",
- mdev->ofdev.node->full_name);
+ mdev->ofdev.dev.of_node->full_name);
rc = -ENXIO;
goto out_free_pmif;
}
@@ -1161,7 +1161,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
/* Request memory resource for IO ports */
if (macio_request_resource(mdev, 0, "ide-pmac (ports)")) {
printk(KERN_ERR "ide-pmac: can't request MMIO resource for "
- "%s!\n", mdev->ofdev.node->full_name);
+ "%s!\n", mdev->ofdev.dev.of_node->full_name);
rc = -EBUSY;
goto out_free_pmif;
}
@@ -1173,7 +1173,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
*/
if (macio_irq_count(mdev) == 0) {
printk(KERN_WARNING "ide-pmac: no intrs for device %s, using "
- "13\n", mdev->ofdev.node->full_name);
+ "13\n", mdev->ofdev.dev.of_node->full_name);
irq = irq_create_mapping(NULL, 13);
} else
irq = macio_irq(mdev, 0);
@@ -1182,7 +1182,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
regbase = (unsigned long) base;
pmif->mdev = mdev;
- pmif->node = mdev->ofdev.node;
+ pmif->node = mdev->ofdev.dev.of_node;
pmif->regbase = regbase;
pmif->irq = irq;
pmif->kauai_fcr = NULL;
@@ -1191,7 +1191,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
if (macio_request_resource(mdev, 1, "ide-pmac (dma)"))
printk(KERN_WARNING "ide-pmac: can't request DMA "
"resource for %s!\n",
- mdev->ofdev.node->full_name);
+ mdev->ofdev.dev.of_node->full_name);
else
pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000);
} else
diff --git a/drivers/idle/Kconfig b/drivers/idle/Kconfig
index f15e90a453d..fb5c5186d4a 100644
--- a/drivers/idle/Kconfig
+++ b/drivers/idle/Kconfig
@@ -1,3 +1,14 @@
+config INTEL_IDLE
+ tristate "Cpuidle Driver for Intel Processors"
+ depends on CPU_IDLE
+ depends on X86
+ depends on CPU_SUP_INTEL
+ depends on EXPERIMENTAL
+ help
+ Enable intel_idle, a cpuidle driver that includes knowledge of
+ native Intel hardware idle features. The acpi_idle driver
+ can be configured at the same time, in order to handle
+ processors intel_idle does not support.
menu "Memory power savings"
depends on X86_64
diff --git a/drivers/idle/Makefile b/drivers/idle/Makefile
index 5f68fc377e2..23d295cf10f 100644
--- a/drivers/idle/Makefile
+++ b/drivers/idle/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_I7300_IDLE) += i7300_idle.o
+obj-$(CONFIG_INTEL_IDLE) += intel_idle.o
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
new file mode 100755
index 00000000000..54f0fb4cd5d
--- /dev/null
+++ b/drivers/idle/intel_idle.c
@@ -0,0 +1,461 @@
+/*
+ * intel_idle.c - native hardware idle loop for modern Intel processors
+ *
+ * Copyright (c) 2010, Intel Corporation.
+ * Len Brown <len.brown@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/*
+ * intel_idle is a cpuidle driver that loads on specific Intel processors
+ * in lieu of the legacy ACPI processor_idle driver. The intent is to
+ * make Linux more efficient on these processors, as intel_idle knows
+ * more than ACPI, as well as make Linux more immune to ACPI BIOS bugs.
+ */
+
+/*
+ * Design Assumptions
+ *
+ * All CPUs have same idle states as boot CPU
+ *
+ * Chipset BM_STS (bus master status) bit is a NOP
+ * for preventing entry into deep C-stats
+ */
+
+/*
+ * Known limitations
+ *
+ * The driver currently initializes for_each_online_cpu() upon modprobe.
+ * It it unaware of subsequent processors hot-added to the system.
+ * This means that if you boot with maxcpus=n and later online
+ * processors above n, those processors will use C1 only.
+ *
+ * ACPI has a .suspend hack to turn off deep c-statees during suspend
+ * to avoid complications with the lapic timer workaround.
+ * Have not seen issues with suspend, but may need same workaround here.
+ *
+ * There is currently no kernel-based automatic probing/loading mechanism
+ * if the driver is built as a module.
+ */
+
+/* un-comment DEBUG to enable pr_debug() statements */
+#define DEBUG
+
+#include <linux/kernel.h>
+#include <linux/cpuidle.h>
+#include <linux/clockchips.h>
+#include <linux/hrtimer.h> /* ktime_get_real() */
+#include <trace/events/power.h>
+#include <linux/sched.h>
+
+#define INTEL_IDLE_VERSION "0.4"
+#define PREFIX "intel_idle: "
+
+#define MWAIT_SUBSTATE_MASK (0xf)
+#define MWAIT_CSTATE_MASK (0xf)
+#define MWAIT_SUBSTATE_SIZE (4)
+#define MWAIT_MAX_NUM_CSTATES 8
+#define CPUID_MWAIT_LEAF (5)
+#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
+#define CPUID5_ECX_INTERRUPT_BREAK (0x2)
+
+static struct cpuidle_driver intel_idle_driver = {
+ .name = "intel_idle",
+ .owner = THIS_MODULE,
+};
+/* intel_idle.max_cstate=0 disables driver */
+static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1;
+static int power_policy = 7; /* 0 = max perf; 15 = max powersave */
+
+static unsigned int substates;
+static int (*choose_substate)(int);
+
+/* Reliable LAPIC Timer States, bit 1 for C1 etc. */
+static unsigned int lapic_timer_reliable_states;
+
+static struct cpuidle_device *intel_idle_cpuidle_devices;
+static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
+
+static struct cpuidle_state *cpuidle_state_table;
+
+/*
+ * States are indexed by the cstate number,
+ * which is also the index into the MWAIT hint array.
+ * Thus C0 is a dummy.
+ */
+static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
+ { /* MWAIT C0 */ },
+ { /* MWAIT C1 */
+ .name = "NHM-C1",
+ .desc = "MWAIT 0x00",
+ .driver_data = (void *) 0x00,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 3,
+ .power_usage = 1000,
+ .target_residency = 6,
+ .enter = &intel_idle },
+ { /* MWAIT C2 */
+ .name = "NHM-C3",
+ .desc = "MWAIT 0x10",
+ .driver_data = (void *) 0x10,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 20,
+ .power_usage = 500,
+ .target_residency = 80,
+ .enter = &intel_idle },
+ { /* MWAIT C3 */
+ .name = "NHM-C6",
+ .desc = "MWAIT 0x20",
+ .driver_data = (void *) 0x20,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 200,
+ .power_usage = 350,
+ .target_residency = 800,
+ .enter = &intel_idle },
+};
+
+static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
+ { /* MWAIT C0 */ },
+ { /* MWAIT C1 */
+ .name = "ATM-C1",
+ .desc = "MWAIT 0x00",
+ .driver_data = (void *) 0x00,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 1,
+ .power_usage = 1000,
+ .target_residency = 4,
+ .enter = &intel_idle },
+ { /* MWAIT C2 */
+ .name = "ATM-C2",
+ .desc = "MWAIT 0x10",
+ .driver_data = (void *) 0x10,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 20,
+ .power_usage = 500,
+ .target_residency = 80,
+ .enter = &intel_idle },
+ { /* MWAIT C3 */ },
+ { /* MWAIT C4 */
+ .name = "ATM-C4",
+ .desc = "MWAIT 0x30",
+ .driver_data = (void *) 0x30,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 100,
+ .power_usage = 250,
+ .target_residency = 400,
+ .enter = &intel_idle },
+ { /* MWAIT C5 */ },
+ { /* MWAIT C6 */
+ .name = "ATM-C6",
+ .desc = "MWAIT 0x40",
+ .driver_data = (void *) 0x40,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 200,
+ .power_usage = 150,
+ .target_residency = 800,
+ .enter = NULL }, /* disabled */
+};
+
+/*
+ * choose_tunable_substate()
+ *
+ * Run-time decision on which C-state substate to invoke
+ * If power_policy = 0, choose shallowest substate (0)
+ * If power_policy = 15, choose deepest substate
+ * If power_policy = middle, choose middle substate etc.
+ */
+static int choose_tunable_substate(int cstate)
+{
+ unsigned int num_substates;
+ unsigned int substate_choice;
+
+ power_policy &= 0xF; /* valid range: 0-15 */
+ cstate &= 7; /* valid range: 0-7 */
+
+ num_substates = (substates >> ((cstate) * 4)) & MWAIT_SUBSTATE_MASK;
+
+ if (num_substates <= 1)
+ return 0;
+
+ substate_choice = ((power_policy + (power_policy + 1) *
+ (num_substates - 1)) / 16);
+
+ return substate_choice;
+}
+
+/*
+ * choose_zero_substate()
+ */
+static int choose_zero_substate(int cstate)
+{
+ return 0;
+}
+
+/**
+ * intel_idle
+ * @dev: cpuidle_device
+ * @state: cpuidle state
+ *
+ */
+static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
+{
+ unsigned long ecx = 1; /* break on interrupt flag */
+ unsigned long eax = (unsigned long)cpuidle_get_statedata(state);
+ unsigned int cstate;
+ ktime_t kt_before, kt_after;
+ s64 usec_delta;
+ int cpu = smp_processor_id();
+
+ cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
+
+ eax = eax + (choose_substate)(cstate);
+
+ local_irq_disable();
+
+ if (!(lapic_timer_reliable_states & (1 << (cstate))))
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
+
+ kt_before = ktime_get_real();
+
+ stop_critical_timings();
+#ifndef MODULE
+ trace_power_start(POWER_CSTATE, (eax >> 4) + 1);
+#endif
+ if (!need_resched()) {
+
+ __monitor((void *)&current_thread_info()->flags, 0, 0);
+ smp_mb();
+ if (!need_resched())
+ __mwait(eax, ecx);
+ }
+
+ start_critical_timings();
+
+ kt_after = ktime_get_real();
+ usec_delta = ktime_to_us(ktime_sub(kt_after, kt_before));
+
+ local_irq_enable();
+
+ if (!(lapic_timer_reliable_states & (1 << (cstate))))
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
+
+ return usec_delta;
+}
+
+/*
+ * intel_idle_probe()
+ */
+static int intel_idle_probe(void)
+{
+ unsigned int eax, ebx, ecx, edx;
+
+ if (max_cstate == 0) {
+ pr_debug(PREFIX "disabled\n");
+ return -EPERM;
+ }
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return -ENODEV;
+
+ if (!boot_cpu_has(X86_FEATURE_MWAIT))
+ return -ENODEV;
+
+ if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
+ return -ENODEV;
+
+ cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
+
+ if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
+ !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
+ return -ENODEV;
+#ifdef DEBUG
+ if (substates == 0) /* can over-ride via modparam */
+#endif
+ substates = edx;
+
+ pr_debug(PREFIX "MWAIT substates: 0x%x\n", substates);
+
+ if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
+ lapic_timer_reliable_states = 0xFFFFFFFF;
+
+ if (boot_cpu_data.x86 != 6) /* family 6 */
+ return -ENODEV;
+
+ switch (boot_cpu_data.x86_model) {
+
+ case 0x1A: /* Core i7, Xeon 5500 series */
+ case 0x1E: /* Core i7 and i5 Processor - Lynnfield Jasper Forest */
+ case 0x1F: /* Core i7 and i5 Processor - Nehalem */
+ case 0x2E: /* Nehalem-EX Xeon */
+ lapic_timer_reliable_states = (1 << 1); /* C1 */
+
+ case 0x25: /* Westmere */
+ case 0x2C: /* Westmere */
+ cpuidle_state_table = nehalem_cstates;
+ choose_substate = choose_tunable_substate;
+ break;
+
+ case 0x1C: /* 28 - Atom Processor */
+ lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */
+ cpuidle_state_table = atom_cstates;
+ choose_substate = choose_zero_substate;
+ break;
+#ifdef FUTURE_USE
+ case 0x17: /* 23 - Core 2 Duo */
+ lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */
+#endif
+
+ default:
+ pr_debug(PREFIX "does not run on family %d model %d\n",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+ return -ENODEV;
+ }
+
+ pr_debug(PREFIX "v" INTEL_IDLE_VERSION
+ " model 0x%X\n", boot_cpu_data.x86_model);
+
+ pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
+ lapic_timer_reliable_states);
+ return 0;
+}
+
+/*
+ * intel_idle_cpuidle_devices_uninit()
+ * unregister, free cpuidle_devices
+ */
+static void intel_idle_cpuidle_devices_uninit(void)
+{
+ int i;
+ struct cpuidle_device *dev;
+
+ for_each_online_cpu(i) {
+ dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
+ cpuidle_unregister_device(dev);
+ }
+
+ free_percpu(intel_idle_cpuidle_devices);
+ return;
+}
+/*
+ * intel_idle_cpuidle_devices_init()
+ * allocate, initialize, register cpuidle_devices
+ */
+static int intel_idle_cpuidle_devices_init(void)
+{
+ int i, cstate;
+ struct cpuidle_device *dev;
+
+ intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
+ if (intel_idle_cpuidle_devices == NULL)
+ return -ENOMEM;
+
+ for_each_online_cpu(i) {
+ dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
+
+ dev->state_count = 1;
+
+ for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
+ int num_substates;
+
+ if (cstate > max_cstate) {
+ printk(PREFIX "max_cstate %d reached\n",
+ max_cstate);
+ break;
+ }
+
+ /* does the state exist in CPUID.MWAIT? */
+ num_substates = (substates >> ((cstate) * 4))
+ & MWAIT_SUBSTATE_MASK;
+ if (num_substates == 0)
+ continue;
+ /* is the state not enabled? */
+ if (cpuidle_state_table[cstate].enter == NULL) {
+ /* does the driver not know about the state? */
+ if (*cpuidle_state_table[cstate].name == '\0')
+ pr_debug(PREFIX "unaware of model 0x%x"
+ " MWAIT %d please"
+ " contact lenb@kernel.org",
+ boot_cpu_data.x86_model, cstate);
+ continue;
+ }
+
+ if ((cstate > 2) &&
+ !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
+ mark_tsc_unstable("TSC halts in idle"
+ " states deeper than C2");
+
+ dev->states[dev->state_count] = /* structure copy */
+ cpuidle_state_table[cstate];
+
+ dev->state_count += 1;
+ }
+
+ dev->cpu = i;
+ if (cpuidle_register_device(dev)) {
+ pr_debug(PREFIX "cpuidle_register_device %d failed!\n",
+ i);
+ intel_idle_cpuidle_devices_uninit();
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+
+static int __init intel_idle_init(void)
+{
+ int retval;
+
+ retval = intel_idle_probe();
+ if (retval)
+ return retval;
+
+ retval = cpuidle_register_driver(&intel_idle_driver);
+ if (retval) {
+ printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
+ cpuidle_get_driver()->name);
+ return retval;
+ }
+
+ retval = intel_idle_cpuidle_devices_init();
+ if (retval) {
+ cpuidle_unregister_driver(&intel_idle_driver);
+ return retval;
+ }
+
+ return 0;
+}
+
+static void __exit intel_idle_exit(void)
+{
+ intel_idle_cpuidle_devices_uninit();
+ cpuidle_unregister_driver(&intel_idle_driver);
+
+ return;
+}
+
+module_init(intel_idle_init);
+module_exit(intel_idle_exit);
+
+module_param(power_policy, int, 0644);
+module_param(max_cstate, int, 0444);
+#ifdef DEBUG
+module_param(substates, int, 0444);
+#endif
+
+MODULE_AUTHOR("Len Brown <len.brown@intel.com>");
+MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index 9fd4a0d3206..adaefabc40e 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -1824,7 +1824,7 @@ static int dv1394_open(struct inode *inode, struct file *file)
"and will not be available in the new firewire driver stack. "
"Try libraw1394 based programs instead.\n", current->comm);
- return 0;
+ return nonseekable_open(inode, file);
}
@@ -2153,17 +2153,18 @@ static struct cdev dv1394_cdev;
static const struct file_operations dv1394_fops=
{
.owner = THIS_MODULE,
- .poll = dv1394_poll,
+ .poll = dv1394_poll,
.unlocked_ioctl = dv1394_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = dv1394_compat_ioctl,
#endif
.mmap = dv1394_mmap,
.open = dv1394_open,
- .write = dv1394_write,
- .read = dv1394_read,
+ .write = dv1394_write,
+ .read = dv1394_read,
.release = dv1394_release,
- .fasync = dv1394_fasync,
+ .fasync = dv1394_fasync,
+ .llseek = no_llseek,
};
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index 8aa56ac07e2..b563d5e9fa2 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -2834,7 +2834,7 @@ static int raw1394_open(struct inode *inode, struct file *file)
file->private_data = fi;
- return 0;
+ return nonseekable_open(inode, file);
}
static int raw1394_release(struct inode *inode, struct file *file)
@@ -3035,6 +3035,7 @@ static const struct file_operations raw1394_fops = {
.poll = raw1394_poll,
.open = raw1394_open,
.release = raw1394_release,
+ .llseek = no_llseek,
};
static int __init init_raw1394(void)
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
index 949064a0567..a42bd6893bc 100644
--- a/drivers/ieee1394/video1394.c
+++ b/drivers/ieee1394/video1394.c
@@ -1239,7 +1239,7 @@ static int video1394_open(struct inode *inode, struct file *file)
ctx->current_ctx = NULL;
file->private_data = ctx;
- return 0;
+ return nonseekable_open(inode, file);
}
static int video1394_release(struct inode *inode, struct file *file)
@@ -1287,7 +1287,8 @@ static const struct file_operations video1394_fops=
.poll = video1394_poll,
.mmap = video1394_mmap,
.open = video1394_open,
- .release = video1394_release
+ .release = video1394_release,
+ .llseek = no_llseek,
};
/*** HOTPLUG STUFF **********************************************************/
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 975adce5f40..89d70de5e23 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -43,9 +43,11 @@ config INFINIBAND_ADDR_TRANS
source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/hw/ipath/Kconfig"
+source "drivers/infiniband/hw/qib/Kconfig"
source "drivers/infiniband/hw/ehca/Kconfig"
source "drivers/infiniband/hw/amso1100/Kconfig"
source "drivers/infiniband/hw/cxgb3/Kconfig"
+source "drivers/infiniband/hw/cxgb4/Kconfig"
source "drivers/infiniband/hw/mlx4/Kconfig"
source "drivers/infiniband/hw/nes/Kconfig"
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index ed35e449624..9cc7a47d3e6 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -1,9 +1,11 @@
obj-$(CONFIG_INFINIBAND) += core/
obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/
obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/
+obj-$(CONFIG_INFINIBAND_QIB) += hw/qib/
obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/
obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/
obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/
+obj-$(CONFIG_INFINIBAND_CXGB4) += hw/cxgb4/
obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
obj-$(CONFIG_INFINIBAND_NES) += hw/nes/
obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 6d777069d86..b930b8110a6 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -79,7 +79,6 @@ static DEFINE_IDR(sdp_ps);
static DEFINE_IDR(tcp_ps);
static DEFINE_IDR(udp_ps);
static DEFINE_IDR(ipoib_ps);
-static int next_port;
struct cma_device {
struct list_head list;
@@ -1677,13 +1676,13 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED))
return -EINVAL;
- id->route.path_rec = kmalloc(sizeof *path_rec * num_paths, GFP_KERNEL);
+ id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
+ GFP_KERNEL);
if (!id->route.path_rec) {
ret = -ENOMEM;
goto err;
}
- memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths);
id->route.num_paths = num_paths;
return 0;
err:
@@ -1970,47 +1969,33 @@ err1:
static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
{
- struct rdma_bind_list *bind_list;
- int port, ret, low, high;
-
- bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
- if (!bind_list)
- return -ENOMEM;
-
-retry:
- /* FIXME: add proper port randomization per like inet_csk_get_port */
- do {
- ret = idr_get_new_above(ps, bind_list, next_port, &port);
- } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
-
- if (ret)
- goto err1;
+ static unsigned int last_used_port;
+ int low, high, remaining;
+ unsigned int rover;
inet_get_local_port_range(&low, &high);
- if (port > high) {
- if (next_port != low) {
- idr_remove(ps, port);
- next_port = low;
- goto retry;
- }
- ret = -EADDRNOTAVAIL;
- goto err2;
+ remaining = (high - low) + 1;
+ rover = net_random() % remaining + low;
+retry:
+ if (last_used_port != rover &&
+ !idr_find(ps, (unsigned short) rover)) {
+ int ret = cma_alloc_port(ps, id_priv, rover);
+ /*
+ * Remember previously used port number in order to avoid
+ * re-using same port immediately after it is closed.
+ */
+ if (!ret)
+ last_used_port = rover;
+ if (ret != -EADDRNOTAVAIL)
+ return ret;
}
-
- if (port == high)
- next_port = low;
- else
- next_port = port + 1;
-
- bind_list->ps = ps;
- bind_list->port = (unsigned short) port;
- cma_bind_port(bind_list, id_priv);
- return 0;
-err2:
- idr_remove(ps, port);
-err1:
- kfree(bind_list);
- return ret;
+ if (--remaining) {
+ rover++;
+ if ((rover < low) || (rover > high))
+ rover = low;
+ goto retry;
+ }
+ return -EADDRNOTAVAIL;
}
static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
@@ -2995,12 +2980,7 @@ static void cma_remove_one(struct ib_device *device)
static int __init cma_init(void)
{
- int ret, low, high, remaining;
-
- get_random_bytes(&next_port, sizeof next_port);
- inet_get_local_port_range(&low, &high);
- remaining = (high - low) + 1;
- next_port = ((unsigned int) next_port % remaining) + low;
+ int ret;
cma_wq = create_singlethread_workqueue("rdma_cm");
if (!cma_wq)
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 05ac36e6acd..a565af5c2d2 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -38,7 +38,9 @@
#include <rdma/ib_verbs.h>
-int ib_device_register_sysfs(struct ib_device *device);
+int ib_device_register_sysfs(struct ib_device *device,
+ int (*port_callback)(struct ib_device *,
+ u8, struct kobject *));
void ib_device_unregister_sysfs(struct ib_device *device);
int ib_sysfs_setup(void);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index d1fba415333..a19effad081 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -267,7 +267,9 @@ out:
* callback for each device that is added. @device must be allocated
* with ib_alloc_device().
*/
-int ib_register_device(struct ib_device *device)
+int ib_register_device(struct ib_device *device,
+ int (*port_callback)(struct ib_device *,
+ u8, struct kobject *))
{
int ret;
@@ -296,7 +298,7 @@ int ib_register_device(struct ib_device *device)
goto out;
}
- ret = ib_device_register_sysfs(device);
+ ret = ib_device_register_sysfs(device, port_callback);
if (ret) {
printk(KERN_WARNING "Couldn't register device %s with driver model\n",
device->name);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 1df1194aeba..ef1304f151d 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -47,8 +47,8 @@ MODULE_DESCRIPTION("kernel IB MAD API");
MODULE_AUTHOR("Hal Rosenstock");
MODULE_AUTHOR("Sean Hefty");
-int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
-int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
+static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
+static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
module_param_named(send_queue_size, mad_sendq_size, int, 0444);
MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
@@ -291,13 +291,11 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
}
if (mad_reg_req) {
- reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL);
+ reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
if (!reg_req) {
ret = ERR_PTR(-ENOMEM);
goto error3;
}
- /* Make a copy of the MAD registration request */
- memcpy(reg_req, mad_reg_req, sizeof *reg_req);
}
/* Now, fill in the various structures */
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index f901957abc8..3627300e2a1 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -475,7 +475,9 @@ err:
return NULL;
}
-static int add_port(struct ib_device *device, int port_num)
+static int add_port(struct ib_device *device, int port_num,
+ int (*port_callback)(struct ib_device *,
+ u8, struct kobject *))
{
struct ib_port *p;
struct ib_port_attr attr;
@@ -522,11 +524,20 @@ static int add_port(struct ib_device *device, int port_num)
if (ret)
goto err_free_pkey;
+ if (port_callback) {
+ ret = port_callback(device, port_num, &p->kobj);
+ if (ret)
+ goto err_remove_pkey;
+ }
+
list_add_tail(&p->kobj.entry, &device->port_list);
kobject_uevent(&p->kobj, KOBJ_ADD);
return 0;
+err_remove_pkey:
+ sysfs_remove_group(&p->kobj, &p->pkey_group);
+
err_free_pkey:
for (i = 0; i < attr.pkey_tbl_len; ++i)
kfree(p->pkey_group.attrs[i]);
@@ -754,7 +765,9 @@ static struct attribute_group iw_stats_group = {
.attrs = iw_proto_stats_attrs,
};
-int ib_device_register_sysfs(struct ib_device *device)
+int ib_device_register_sysfs(struct ib_device *device,
+ int (*port_callback)(struct ib_device *,
+ u8, struct kobject *))
{
struct device *class_dev = &device->dev;
int ret;
@@ -785,12 +798,12 @@ int ib_device_register_sysfs(struct ib_device *device)
}
if (device->node_type == RDMA_NODE_IB_SWITCH) {
- ret = add_port(device, 0);
+ ret = add_port(device, 0, port_callback);
if (ret)
goto err_put;
} else {
for (i = 1; i <= device->phys_port_cnt; ++i) {
- ret = add_port(device, i);
+ ret = add_port(device, i, port_callback);
if (ret)
goto err_put;
}
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 512b1c43460..08f948df8fa 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -706,14 +706,9 @@ static int ib_ucm_alloc_data(const void **dest, u64 src, u32 len)
if (!len)
return 0;
- data = kmalloc(len, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- if (copy_from_user(data, (void __user *)(unsigned long)src, len)) {
- kfree(data);
- return -EFAULT;
- }
+ data = memdup_user((void __user *)(unsigned long)src, len);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
*dest = data;
return 0;
@@ -1181,7 +1176,7 @@ static int ib_ucm_open(struct inode *inode, struct file *filp)
file->filp = filp;
file->device = container_of(inode->i_cdev, struct ib_ucm_device, cdev);
- return 0;
+ return nonseekable_open(inode, filp);
}
static int ib_ucm_close(struct inode *inode, struct file *filp)
@@ -1229,6 +1224,7 @@ static const struct file_operations ucm_fops = {
.release = ib_ucm_close,
.write = ib_ucm_write,
.poll = ib_ucm_poll,
+ .llseek = no_llseek,
};
static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 46185084121..ac7edc24165 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1220,7 +1220,8 @@ static int ucma_open(struct inode *inode, struct file *filp)
filp->private_data = file;
file->filp = filp;
- return 0;
+
+ return nonseekable_open(inode, filp);
}
static int ucma_close(struct inode *inode, struct file *filp)
@@ -1250,6 +1251,7 @@ static const struct file_operations ucma_fops = {
.release = ucma_close,
.write = ucma_write,
.poll = ucma_poll,
+ .llseek = no_llseek,
};
static struct miscdevice ucma_misc = {
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index e7db054fb1c..6babb72b39f 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -781,7 +781,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
{
struct ib_umad_port *port;
struct ib_umad_file *file;
- int ret = 0;
+ int ret;
port = container_of(inode->i_cdev, struct ib_umad_port, cdev);
if (port)
@@ -814,6 +814,8 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
list_add_tail(&file->port_list, &port->file_list);
+ ret = nonseekable_open(inode, filp);
+
out:
mutex_unlock(&port->file_mutex);
return ret;
@@ -866,7 +868,8 @@ static const struct file_operations umad_fops = {
.compat_ioctl = ib_umad_compat_ioctl,
#endif
.open = ib_umad_open,
- .release = ib_umad_close
+ .release = ib_umad_close,
+ .llseek = no_llseek,
};
static int ib_umad_sm_open(struct inode *inode, struct file *filp)
@@ -903,7 +906,7 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp)
filp->private_data = port;
- return 0;
+ return nonseekable_open(inode, filp);
fail:
kref_put(&port->umad_dev->ref, ib_umad_release_dev);
@@ -933,7 +936,8 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp)
static const struct file_operations umad_sm_fops = {
.owner = THIS_MODULE,
.open = ib_umad_sm_open,
- .release = ib_umad_sm_close
+ .release = ib_umad_sm_close,
+ .llseek = no_llseek,
};
static struct ib_client umad_client = {
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index fb352625442..ec83e9fe387 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -369,7 +369,8 @@ static const struct file_operations uverbs_event_fops = {
.read = ib_uverbs_event_read,
.poll = ib_uverbs_event_poll,
.release = ib_uverbs_event_close,
- .fasync = ib_uverbs_event_fasync
+ .fasync = ib_uverbs_event_fasync,
+ .llseek = no_llseek,
};
void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
@@ -623,7 +624,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
filp->private_data = file;
- return 0;
+ return nonseekable_open(inode, filp);
err_module:
module_put(dev->ib_dev->owner);
@@ -651,7 +652,8 @@ static const struct file_operations uverbs_fops = {
.owner = THIS_MODULE,
.write = ib_uverbs_write,
.open = ib_uverbs_open,
- .release = ib_uverbs_close
+ .release = ib_uverbs_close,
+ .llseek = no_llseek,
};
static const struct file_operations uverbs_mmap_fops = {
@@ -659,7 +661,8 @@ static const struct file_operations uverbs_mmap_fops = {
.write = ib_uverbs_write,
.mmap = ib_uverbs_mmap,
.open = ib_uverbs_open,
- .release = ib_uverbs_close
+ .release = ib_uverbs_close,
+ .llseek = no_llseek,
};
static struct ib_client uverbs_client = {
diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h
index f7ff66f9836..6ae698e6877 100644
--- a/drivers/infiniband/hw/amso1100/c2.h
+++ b/drivers/infiniband/hw/amso1100/c2.h
@@ -250,7 +250,7 @@ struct c2_array {
struct sp_chunk {
struct sp_chunk *next;
dma_addr_t dma_addr;
- DECLARE_PCI_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_ADDR(mapping);
u16 head;
u16 shared_ptr[0];
};
diff --git a/drivers/infiniband/hw/amso1100/c2_alloc.c b/drivers/infiniband/hw/amso1100/c2_alloc.c
index d4f5f5d42e9..78d247ec696 100644
--- a/drivers/infiniband/hw/amso1100/c2_alloc.c
+++ b/drivers/infiniband/hw/amso1100/c2_alloc.c
@@ -49,7 +49,7 @@ static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask,
return -ENOMEM;
new_head->dma_addr = dma_addr;
- pci_unmap_addr_set(new_head, mapping, new_head->dma_addr);
+ dma_unmap_addr_set(new_head, mapping, new_head->dma_addr);
new_head->next = NULL;
new_head->head = 0;
@@ -81,7 +81,7 @@ void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)
while (root) {
next = root->next;
dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root,
- pci_unmap_addr(root, mapping));
+ dma_unmap_addr(root, mapping));
root = next;
}
}
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
index f7b0fc23f41..49e0e8533f7 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -257,7 +257,7 @@ int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
{
dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
- mq->msg_pool.host, pci_unmap_addr(mq, mapping));
+ mq->msg_pool.host, dma_unmap_addr(mq, mapping));
}
static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
@@ -278,7 +278,7 @@ static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
NULL, /* peer (currently unknown) */
C2_MQ_HOST_TARGET);
- pci_unmap_addr_set(mq, mapping, mq->host_dma);
+ dma_unmap_addr_set(mq, mapping, mq->host_dma);
return 0;
}
diff --git a/drivers/infiniband/hw/amso1100/c2_mq.h b/drivers/infiniband/hw/amso1100/c2_mq.h
index acede007b94..fc1b9a7cec4 100644
--- a/drivers/infiniband/hw/amso1100/c2_mq.h
+++ b/drivers/infiniband/hw/amso1100/c2_mq.h
@@ -71,7 +71,7 @@ struct c2_mq {
u8 __iomem *adapter;
} msg_pool;
dma_addr_t host_dma;
- DECLARE_PCI_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_ADDR(mapping);
u16 hint_count;
u16 priv;
struct c2_mq_shared __iomem *peer;
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index c47f618d12e..aeebc4d37e3 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -865,7 +865,7 @@ int c2_register_device(struct c2_dev *dev)
dev->ibdev.iwcm->create_listen = c2_service_create;
dev->ibdev.iwcm->destroy_listen = c2_service_destroy;
- ret = ib_register_device(&dev->ibdev);
+ ret = ib_register_device(&dev->ibdev, NULL);
if (ret)
goto out_free_iwcm;
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.h b/drivers/infiniband/hw/amso1100/c2_provider.h
index 1076df2ee96..bf189987711 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.h
+++ b/drivers/infiniband/hw/amso1100/c2_provider.h
@@ -50,7 +50,7 @@
struct c2_buf_list {
void *buf;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index 78c4bcc6ef6..85cfae4cad7 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -524,7 +524,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
err = -ENOMEM;
goto bail1;
}
- pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
+ dma_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
pr_debug("%s rep_vq va %p dma %llx\n", __func__, q1_pages,
(unsigned long long) c2dev->rep_vq.host_dma);
c2_mq_rep_init(&c2dev->rep_vq,
@@ -545,7 +545,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
err = -ENOMEM;
goto bail2;
}
- pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
+ dma_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
pr_debug("%s aeq va %p dma %llx\n", __func__, q2_pages,
(unsigned long long) c2dev->aeq.host_dma);
c2_mq_rep_init(&c2dev->aeq,
@@ -596,11 +596,11 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
bail3:
dma_free_coherent(&c2dev->pcidev->dev,
c2dev->aeq.q_size * c2dev->aeq.msg_size,
- q2_pages, pci_unmap_addr(&c2dev->aeq, mapping));
+ q2_pages, dma_unmap_addr(&c2dev->aeq, mapping));
bail2:
dma_free_coherent(&c2dev->pcidev->dev,
c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
- q1_pages, pci_unmap_addr(&c2dev->rep_vq, mapping));
+ q1_pages, dma_unmap_addr(&c2dev->rep_vq, mapping));
bail1:
c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
bail0:
@@ -637,13 +637,13 @@ void __devexit c2_rnic_term(struct c2_dev *c2dev)
dma_free_coherent(&c2dev->pcidev->dev,
c2dev->aeq.q_size * c2dev->aeq.msg_size,
c2dev->aeq.msg_pool.host,
- pci_unmap_addr(&c2dev->aeq, mapping));
+ dma_unmap_addr(&c2dev->aeq, mapping));
/* Free the verbs reply queue */
dma_free_coherent(&c2dev->pcidev->dev,
c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
c2dev->rep_vq.msg_pool.host,
- pci_unmap_addr(&c2dev->rep_vq, mapping));
+ dma_unmap_addr(&c2dev->rep_vq, mapping));
/* Free the MQ shared pointer pool */
c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 35f286f1ad1..005b7b52bc1 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -174,7 +174,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
kfree(cq->sw_queue);
return -ENOMEM;
}
- pci_unmap_addr_set(cq, mapping, cq->dma_addr);
+ dma_unmap_addr_set(cq, mapping, cq->dma_addr);
memset(cq->queue, 0, size);
setup.id = cq->cqid;
setup.base_addr = (u64) (cq->dma_addr);
@@ -297,7 +297,7 @@ int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
goto err4;
memset(wq->queue, 0, depth * sizeof(union t3_wr));
- pci_unmap_addr_set(wq, mapping, wq->dma_addr);
+ dma_unmap_addr_set(wq, mapping, wq->dma_addr);
wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
if (!kernel_domain)
wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
@@ -325,7 +325,7 @@ int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
(1UL << (cq->size_log2))
* sizeof(struct t3_cqe), cq->queue,
- pci_unmap_addr(cq, mapping));
+ dma_unmap_addr(cq, mapping));
cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);
return err;
}
@@ -336,7 +336,7 @@ int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq,
dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
(1UL << (wq->size_log2))
* sizeof(union t3_wr), wq->queue,
- pci_unmap_addr(wq, mapping));
+ dma_unmap_addr(wq, mapping));
kfree(wq->sq);
cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2));
kfree(wq->rq);
@@ -537,7 +537,7 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
err = -ENOMEM;
goto err;
}
- pci_unmap_addr_set(&rdev_p->ctrl_qp, mapping,
+ dma_unmap_addr_set(&rdev_p->ctrl_qp, mapping,
rdev_p->ctrl_qp.dma_addr);
rdev_p->ctrl_qp.doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
memset(rdev_p->ctrl_qp.workq, 0,
@@ -583,7 +583,7 @@ static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p)
dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
(1UL << T3_CTRL_QP_SIZE_LOG2)
* sizeof(union t3_wr), rdev_p->ctrl_qp.workq,
- pci_unmap_addr(&rdev_p->ctrl_qp, mapping));
+ dma_unmap_addr(&rdev_p->ctrl_qp, mapping));
return cxio_hal_clear_qp_ctx(rdev_p, T3_CTRL_QP_ID);
}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index 073373c2c56..8f0caf7d448 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -71,7 +71,7 @@ struct cxio_hal_ctrl_qp {
wait_queue_head_t waitq;/* wait for RspQ/CQE msg */
union t3_wr *workq; /* the work request queue */
dma_addr_t dma_addr; /* pci bus address of the workq */
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
void __iomem *doorbell;
};
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index 15073b2da1c..e5ddb63e7d2 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -691,7 +691,7 @@ struct t3_swrq {
struct t3_wq {
union t3_wr *queue; /* DMA accessable memory */
dma_addr_t dma_addr; /* DMA address for HW */
- DECLARE_PCI_UNMAP_ADDR(mapping) /* unmap kruft */
+ DEFINE_DMA_UNMAP_ADDR(mapping); /* unmap kruft */
u32 error; /* 1 once we go to ERROR */
u32 qpid;
u32 wptr; /* idx to next available WR slot */
@@ -718,7 +718,7 @@ struct t3_cq {
u32 wptr;
u32 size_log2;
dma_addr_t dma_addr;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
struct t3_cqe *queue;
struct t3_cqe *sw_queue;
u32 sw_rptr;
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index 63f975f3e30..8e77dc543dd 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -47,8 +47,6 @@ MODULE_DESCRIPTION("Chelsio T3 RDMA Driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
-cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
-
static void open_rnic_dev(struct t3cdev *);
static void close_rnic_dev(struct t3cdev *);
static void iwch_event_handler(struct t3cdev *, u32, u32);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 4fef0329627..ebfb117ba68 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -102,12 +102,9 @@ static unsigned int cong_flavor = 1;
module_param(cong_flavor, uint, 0644);
MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
-static void process_work(struct work_struct *work);
static struct workqueue_struct *workq;
-static DECLARE_WORK(skb_work, process_work);
static struct sk_buff_head rxq;
-static cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS];
static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
static void ep_timeout(unsigned long arg);
@@ -151,7 +148,7 @@ int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2
return -EIO;
}
error = l2t_send(tdev, skb, l2e);
- if (error)
+ if (error < 0)
kfree_skb(skb);
return error;
}
@@ -167,7 +164,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
return -EIO;
}
error = cxgb3_ofld_send(tdev, skb);
- if (error)
+ if (error < 0)
kfree_skb(skb);
return error;
}
@@ -302,27 +299,6 @@ static void release_ep_resources(struct iwch_ep *ep)
put_ep(&ep->com);
}
-static void process_work(struct work_struct *work)
-{
- struct sk_buff *skb = NULL;
- void *ep;
- struct t3cdev *tdev;
- int ret;
-
- while ((skb = skb_dequeue(&rxq))) {
- ep = *((void **) (skb->cb));
- tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
- ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
- if (ret & CPL_RET_BUF_DONE)
- kfree_skb(skb);
-
- /*
- * ep was referenced in sched(), and is freed here.
- */
- put_ep((struct iwch_ep_common *)ep);
- }
-}
-
static int status2errno(int status)
{
switch (status) {
@@ -2157,7 +2133,49 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
/*
* All the CM events are handled on a work queue to have a safe context.
+ * These are the real handlers that are called from the work queue.
*/
+static const cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS] = {
+ [CPL_ACT_ESTABLISH] = act_establish,
+ [CPL_ACT_OPEN_RPL] = act_open_rpl,
+ [CPL_RX_DATA] = rx_data,
+ [CPL_TX_DMA_ACK] = tx_ack,
+ [CPL_ABORT_RPL_RSS] = abort_rpl,
+ [CPL_ABORT_RPL] = abort_rpl,
+ [CPL_PASS_OPEN_RPL] = pass_open_rpl,
+ [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
+ [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
+ [CPL_PASS_ESTABLISH] = pass_establish,
+ [CPL_PEER_CLOSE] = peer_close,
+ [CPL_ABORT_REQ_RSS] = peer_abort,
+ [CPL_CLOSE_CON_RPL] = close_con_rpl,
+ [CPL_RDMA_TERMINATE] = terminate,
+ [CPL_RDMA_EC_STATUS] = ec_status,
+};
+
+static void process_work(struct work_struct *work)
+{
+ struct sk_buff *skb = NULL;
+ void *ep;
+ struct t3cdev *tdev;
+ int ret;
+
+ while ((skb = skb_dequeue(&rxq))) {
+ ep = *((void **) (skb->cb));
+ tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
+ ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
+ if (ret & CPL_RET_BUF_DONE)
+ kfree_skb(skb);
+
+ /*
+ * ep was referenced in sched(), and is freed here.
+ */
+ put_ep((struct iwch_ep_common *)ep);
+ }
+}
+
+static DECLARE_WORK(skb_work, process_work);
+
static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
{
struct iwch_ep_common *epc = ctx;
@@ -2189,6 +2207,29 @@ static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
return CPL_RET_BUF_DONE;
}
+/*
+ * All upcalls from the T3 Core go to sched() to schedule the
+ * processing on a work queue.
+ */
+cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS] = {
+ [CPL_ACT_ESTABLISH] = sched,
+ [CPL_ACT_OPEN_RPL] = sched,
+ [CPL_RX_DATA] = sched,
+ [CPL_TX_DMA_ACK] = sched,
+ [CPL_ABORT_RPL_RSS] = sched,
+ [CPL_ABORT_RPL] = sched,
+ [CPL_PASS_OPEN_RPL] = sched,
+ [CPL_CLOSE_LISTSRV_RPL] = sched,
+ [CPL_PASS_ACCEPT_REQ] = sched,
+ [CPL_PASS_ESTABLISH] = sched,
+ [CPL_PEER_CLOSE] = sched,
+ [CPL_CLOSE_CON_RPL] = sched,
+ [CPL_ABORT_REQ_RSS] = sched,
+ [CPL_RDMA_TERMINATE] = sched,
+ [CPL_RDMA_EC_STATUS] = sched,
+ [CPL_SET_TCB_RPL] = set_tcb_rpl,
+};
+
int __init iwch_cm_init(void)
{
skb_queue_head_init(&rxq);
@@ -2197,46 +2238,6 @@ int __init iwch_cm_init(void)
if (!workq)
return -ENOMEM;
- /*
- * All upcalls from the T3 Core go to sched() to
- * schedule the processing on a work queue.
- */
- t3c_handlers[CPL_ACT_ESTABLISH] = sched;
- t3c_handlers[CPL_ACT_OPEN_RPL] = sched;
- t3c_handlers[CPL_RX_DATA] = sched;
- t3c_handlers[CPL_TX_DMA_ACK] = sched;
- t3c_handlers[CPL_ABORT_RPL_RSS] = sched;
- t3c_handlers[CPL_ABORT_RPL] = sched;
- t3c_handlers[CPL_PASS_OPEN_RPL] = sched;
- t3c_handlers[CPL_CLOSE_LISTSRV_RPL] = sched;
- t3c_handlers[CPL_PASS_ACCEPT_REQ] = sched;
- t3c_handlers[CPL_PASS_ESTABLISH] = sched;
- t3c_handlers[CPL_PEER_CLOSE] = sched;
- t3c_handlers[CPL_CLOSE_CON_RPL] = sched;
- t3c_handlers[CPL_ABORT_REQ_RSS] = sched;
- t3c_handlers[CPL_RDMA_TERMINATE] = sched;
- t3c_handlers[CPL_RDMA_EC_STATUS] = sched;
- t3c_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl;
-
- /*
- * These are the real handlers that are called from a
- * work queue.
- */
- work_handlers[CPL_ACT_ESTABLISH] = act_establish;
- work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl;
- work_handlers[CPL_RX_DATA] = rx_data;
- work_handlers[CPL_TX_DMA_ACK] = tx_ack;
- work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl;
- work_handlers[CPL_ABORT_RPL] = abort_rpl;
- work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl;
- work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl;
- work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req;
- work_handlers[CPL_PASS_ESTABLISH] = pass_establish;
- work_handlers[CPL_PEER_CLOSE] = peer_close;
- work_handlers[CPL_ABORT_REQ_RSS] = peer_abort;
- work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl;
- work_handlers[CPL_RDMA_TERMINATE] = terminate;
- work_handlers[CPL_RDMA_EC_STATUS] = ec_status;
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 19b1c4a62a2..fca0b4b747e 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1428,7 +1428,7 @@ int iwch_register_device(struct iwch_dev *dev)
dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
dev->ibdev.iwcm->get_qp = iwch_get_qp;
- ret = ib_register_device(&dev->ibdev);
+ ret = ib_register_device(&dev->ibdev, NULL);
if (ret)
goto bail1;
diff --git a/drivers/infiniband/hw/cxgb4/Kconfig b/drivers/infiniband/hw/cxgb4/Kconfig
new file mode 100644
index 00000000000..ccb85eaaad7
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/Kconfig
@@ -0,0 +1,18 @@
+config INFINIBAND_CXGB4
+ tristate "Chelsio T4 RDMA Driver"
+ depends on CHELSIO_T4 && INET
+ select GENERIC_ALLOCATOR
+ ---help---
+ This is an iWARP/RDMA driver for the Chelsio T4 1GbE and
+ 10GbE adapters.
+
+ For general information about Chelsio and our products, visit
+ our website at <http://www.chelsio.com>.
+
+ For customer support, please visit our customer support page at
+ <http://www.chelsio.com/support.htm>.
+
+ Please send feedback to <linux-bugs@chelsio.com>.
+
+ To compile this driver as a module, choose M here: the module
+ will be called iw_cxgb4.
diff --git a/drivers/infiniband/hw/cxgb4/Makefile b/drivers/infiniband/hw/cxgb4/Makefile
new file mode 100644
index 00000000000..e31a499f017
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/Makefile
@@ -0,0 +1,5 @@
+EXTRA_CFLAGS += -Idrivers/net/cxgb4
+
+obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o
+
+iw_cxgb4-y := device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
new file mode 100644
index 00000000000..30ce0a8eca0
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -0,0 +1,2374 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+#include <linux/notifier.h>
+#include <linux/inetdevice.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+
+#include <net/neighbour.h>
+#include <net/netevent.h>
+#include <net/route.h>
+
+#include "iw_cxgb4.h"
+
+static char *states[] = {
+ "idle",
+ "listen",
+ "connecting",
+ "mpa_wait_req",
+ "mpa_req_sent",
+ "mpa_req_rcvd",
+ "mpa_rep_sent",
+ "fpdu_mode",
+ "aborting",
+ "closing",
+ "moribund",
+ "dead",
+ NULL,
+};
+
+int c4iw_max_read_depth = 8;
+module_param(c4iw_max_read_depth, int, 0644);
+MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
+
+static int enable_tcp_timestamps;
+module_param(enable_tcp_timestamps, int, 0644);
+MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
+
+static int enable_tcp_sack;
+module_param(enable_tcp_sack, int, 0644);
+MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");
+
+static int enable_tcp_window_scaling = 1;
+module_param(enable_tcp_window_scaling, int, 0644);
+MODULE_PARM_DESC(enable_tcp_window_scaling,
+ "Enable tcp window scaling (default=1)");
+
+int c4iw_debug;
+module_param(c4iw_debug, int, 0644);
+MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)");
+
+static int peer2peer;
+module_param(peer2peer, int, 0644);
+MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
+
+static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
+module_param(p2p_type, int, 0644);
+MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
+ "1=RDMA_READ 0=RDMA_WRITE (default 1)");
+
+static int ep_timeout_secs = 60;
+module_param(ep_timeout_secs, int, 0644);
+MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
+ "in seconds (default=60)");
+
+static int mpa_rev = 1;
+module_param(mpa_rev, int, 0644);
+MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
+ "1 is spec compliant. (default=1)");
+
+static int markers_enabled;
+module_param(markers_enabled, int, 0644);
+MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
+
+static int crc_enabled = 1;
+module_param(crc_enabled, int, 0644);
+MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
+
+static int rcv_win = 256 * 1024;
+module_param(rcv_win, int, 0644);
+MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
+
+static int snd_win = 32 * 1024;
+module_param(snd_win, int, 0644);
+MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
+
+static struct workqueue_struct *workq;
+
+static struct sk_buff_head rxq;
+
+static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
+static void ep_timeout(unsigned long arg);
+static void connect_reply_upcall(struct c4iw_ep *ep, int status);
+
+static LIST_HEAD(timeout_list);
+static spinlock_t timeout_lock;
+
+static void start_ep_timer(struct c4iw_ep *ep)
+{
+ PDBG("%s ep %p\n", __func__, ep);
+ if (timer_pending(&ep->timer)) {
+ PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
+ del_timer_sync(&ep->timer);
+ } else
+ c4iw_get_ep(&ep->com);
+ ep->timer.expires = jiffies + ep_timeout_secs * HZ;
+ ep->timer.data = (unsigned long)ep;
+ ep->timer.function = ep_timeout;
+ add_timer(&ep->timer);
+}
+
+static void stop_ep_timer(struct c4iw_ep *ep)
+{
+ PDBG("%s ep %p\n", __func__, ep);
+ if (!timer_pending(&ep->timer)) {
+ printk(KERN_ERR "%s timer stopped when its not running! "
+ "ep %p state %u\n", __func__, ep, ep->com.state);
+ WARN_ON(1);
+ return;
+ }
+ del_timer_sync(&ep->timer);
+ c4iw_put_ep(&ep->com);
+}
+
+static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
+ struct l2t_entry *l2e)
+{
+ int error = 0;
+
+ if (c4iw_fatal_error(rdev)) {
+ kfree_skb(skb);
+ PDBG("%s - device in error state - dropping\n", __func__);
+ return -EIO;
+ }
+ error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
+ if (error < 0)
+ kfree_skb(skb);
+ return error;
+}
+
+int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
+{
+ int error = 0;
+
+ if (c4iw_fatal_error(rdev)) {
+ kfree_skb(skb);
+ PDBG("%s - device in error state - dropping\n", __func__);
+ return -EIO;
+ }
+ error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
+ if (error < 0)
+ kfree_skb(skb);
+ return error;
+}
+
+static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
+{
+ struct cpl_tid_release *req;
+
+ skb = get_skb(skb, sizeof *req, GFP_KERNEL);
+ if (!skb)
+ return;
+ req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
+ INIT_TP_WR(req, hwtid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
+ c4iw_ofld_send(rdev, skb);
+ return;
+}
+
+static void set_emss(struct c4iw_ep *ep, u16 opt)
+{
+ ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40;
+ ep->mss = ep->emss;
+ if (GET_TCPOPT_TSTAMP(opt))
+ ep->emss -= 12;
+ if (ep->emss < 128)
+ ep->emss = 128;
+ PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
+ ep->mss, ep->emss);
+}
+
+static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
+{
+ unsigned long flags;
+ enum c4iw_ep_state state;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ state = epc->state;
+ spin_unlock_irqrestore(&epc->lock, flags);
+ return state;
+}
+
+static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
+{
+ epc->state = new;
+}
+
+static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
+ __state_set(epc, new);
+ spin_unlock_irqrestore(&epc->lock, flags);
+ return;
+}
+
+static void *alloc_ep(int size, gfp_t gfp)
+{
+ struct c4iw_ep_common *epc;
+
+ epc = kzalloc(size, gfp);
+ if (epc) {
+ kref_init(&epc->kref);
+ spin_lock_init(&epc->lock);
+ init_waitqueue_head(&epc->waitq);
+ }
+ PDBG("%s alloc ep %p\n", __func__, epc);
+ return epc;
+}
+
+void _c4iw_free_ep(struct kref *kref)
+{
+ struct c4iw_ep *ep;
+
+ ep = container_of(kref, struct c4iw_ep, com.kref);
+ PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
+ if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
+ cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
+ dst_release(ep->dst);
+ cxgb4_l2t_release(ep->l2t);
+ }
+ kfree(ep);
+}
+
+static void release_ep_resources(struct c4iw_ep *ep)
+{
+ set_bit(RELEASE_RESOURCES, &ep->com.flags);
+ c4iw_put_ep(&ep->com);
+}
+
+static int status2errno(int status)
+{
+ switch (status) {
+ case CPL_ERR_NONE:
+ return 0;
+ case CPL_ERR_CONN_RESET:
+ return -ECONNRESET;
+ case CPL_ERR_ARP_MISS:
+ return -EHOSTUNREACH;
+ case CPL_ERR_CONN_TIMEDOUT:
+ return -ETIMEDOUT;
+ case CPL_ERR_TCAM_FULL:
+ return -ENOMEM;
+ case CPL_ERR_CONN_EXIST:
+ return -EADDRINUSE;
+ default:
+ return -EIO;
+ }
+}
+
+/*
+ * Try and reuse skbs already allocated...
+ */
+static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
+{
+ if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
+ skb_trim(skb, 0);
+ skb_get(skb);
+ skb_reset_transport_header(skb);
+ } else {
+ skb = alloc_skb(len, gfp);
+ }
+ return skb;
+}
+
+static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip,
+ __be32 peer_ip, __be16 local_port,
+ __be16 peer_port, u8 tos)
+{
+ struct rtable *rt;
+ struct flowi fl = {
+ .oif = 0,
+ .nl_u = {
+ .ip4_u = {
+ .daddr = peer_ip,
+ .saddr = local_ip,
+ .tos = tos}
+ },
+ .proto = IPPROTO_TCP,
+ .uli_u = {
+ .ports = {
+ .sport = local_port,
+ .dport = peer_port}
+ }
+ };
+
+ if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
+ return NULL;
+ return rt;
+}
+
+static void arp_failure_discard(void *handle, struct sk_buff *skb)
+{
+ PDBG("%s c4iw_dev %p\n", __func__, handle);
+ kfree_skb(skb);
+}
+
+/*
+ * Handle an ARP failure for an active open.
+ */
+static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
+{
+ printk(KERN_ERR MOD "ARP failure duing connect\n");
+ kfree_skb(skb);
+}
+
+/*
+ * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
+ * and send it along.
+ */
+static void abort_arp_failure(void *handle, struct sk_buff *skb)
+{
+ struct c4iw_rdev *rdev = handle;
+ struct cpl_abort_req *req = cplhdr(skb);
+
+ PDBG("%s rdev %p\n", __func__, rdev);
+ req->cmd = CPL_ABORT_NO_RST;
+ c4iw_ofld_send(rdev, skb);
+}
+
+static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
+{
+ unsigned int flowclen = 80;
+ struct fw_flowc_wr *flowc;
+ int i;
+
+ skb = get_skb(skb, flowclen, GFP_KERNEL);
+ flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
+
+ flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) |
+ FW_FLOWC_WR_NPARAMS(8));
+ flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen,
+ 16)) | FW_WR_FLOWID(ep->hwtid));
+
+ flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
+ flowc->mnemval[0].val = cpu_to_be32(0);
+ flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
+ flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
+ flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
+ flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
+ flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
+ flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
+ flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
+ flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
+ flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
+ flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
+ flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
+ flowc->mnemval[6].val = cpu_to_be32(snd_win);
+ flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
+ flowc->mnemval[7].val = cpu_to_be32(ep->emss);
+ /* Pad WR to 16 byte boundary */
+ flowc->mnemval[8].mnemonic = 0;
+ flowc->mnemval[8].val = 0;
+ for (i = 0; i < 9; i++) {
+ flowc->mnemval[i].r4[0] = 0;
+ flowc->mnemval[i].r4[1] = 0;
+ flowc->mnemval[i].r4[2] = 0;
+ }
+
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
+ c4iw_ofld_send(&ep->com.dev->rdev, skb);
+}
+
+static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
+{
+ struct cpl_close_con_req *req;
+ struct sk_buff *skb;
+ int wrlen = roundup(sizeof *req, 16);
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ skb = get_skb(NULL, wrlen, gfp);
+ if (!skb) {
+ printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
+ return -ENOMEM;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
+ t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
+ req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
+ memset(req, 0, wrlen);
+ INIT_TP_WR(req, ep->hwtid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
+ ep->hwtid));
+ return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+}
+
+static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
+{
+ struct cpl_abort_req *req;
+ int wrlen = roundup(sizeof *req, 16);
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ skb = get_skb(skb, wrlen, gfp);
+ if (!skb) {
+ printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
+ __func__);
+ return -ENOMEM;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
+ t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure);
+ req = (struct cpl_abort_req *) skb_put(skb, wrlen);
+ memset(req, 0, wrlen);
+ INIT_TP_WR(req, ep->hwtid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
+ req->cmd = CPL_ABORT_SEND_RST;
+ return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+}
+
+static int send_connect(struct c4iw_ep *ep)
+{
+ struct cpl_act_open_req *req;
+ struct sk_buff *skb;
+ u64 opt0;
+ u32 opt2;
+ unsigned int mtu_idx;
+ int wscale;
+ int wrlen = roundup(sizeof *req, 16);
+
+ PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
+
+ skb = get_skb(NULL, wrlen, GFP_KERNEL);
+ if (!skb) {
+ printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
+ __func__);
+ return -ENOMEM;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx);
+
+ cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
+ wscale = compute_wscale(rcv_win);
+ opt0 = KEEP_ALIVE(1) |
+ WND_SCALE(wscale) |
+ MSS_IDX(mtu_idx) |
+ L2T_IDX(ep->l2t->idx) |
+ TX_CHAN(ep->tx_chan) |
+ SMAC_SEL(ep->smac_idx) |
+ DSCP(ep->tos) |
+ RCV_BUFSIZ(rcv_win>>10);
+ opt2 = RX_CHANNEL(0) |
+ RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
+ if (enable_tcp_timestamps)
+ opt2 |= TSTAMPS_EN(1);
+ if (enable_tcp_sack)
+ opt2 |= SACK_EN(1);
+ if (wscale && enable_tcp_window_scaling)
+ opt2 |= WND_SCALE_EN(1);
+ t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
+
+ req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = cpu_to_be32(
+ MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid)));
+ req->local_port = ep->com.local_addr.sin_port;
+ req->peer_port = ep->com.remote_addr.sin_port;
+ req->local_ip = ep->com.local_addr.sin_addr.s_addr;
+ req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
+ req->opt0 = cpu_to_be64(opt0);
+ req->params = 0;
+ req->opt2 = cpu_to_be32(opt2);
+ return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+}
+
+static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb)
+{
+ int mpalen, wrlen;
+ struct fw_ofld_tx_data_wr *req;
+ struct mpa_message *mpa;
+
+ PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
+
+ BUG_ON(skb_cloned(skb));
+
+ mpalen = sizeof(*mpa) + ep->plen;
+ wrlen = roundup(mpalen + sizeof *req, 16);
+ skb = get_skb(skb, wrlen, GFP_KERNEL);
+ if (!skb) {
+ connect_reply_upcall(ep, -ENOMEM);
+ return;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
+
+ req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
+ memset(req, 0, wrlen);
+ req->op_to_immdlen = cpu_to_be32(
+ FW_WR_OP(FW_OFLD_TX_DATA_WR) |
+ FW_WR_COMPL(1) |
+ FW_WR_IMMDLEN(mpalen));
+ req->flowid_len16 = cpu_to_be32(
+ FW_WR_FLOWID(ep->hwtid) |
+ FW_WR_LEN16(wrlen >> 4));
+ req->plen = cpu_to_be32(mpalen);
+ req->tunnel_to_proxy = cpu_to_be32(
+ FW_OFLD_TX_DATA_WR_FLUSH(1) |
+ FW_OFLD_TX_DATA_WR_SHOVE(1));
+
+ mpa = (struct mpa_message *)(req + 1);
+ memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
+ mpa->flags = (crc_enabled ? MPA_CRC : 0) |
+ (markers_enabled ? MPA_MARKERS : 0);
+ mpa->private_data_size = htons(ep->plen);
+ mpa->revision = mpa_rev;
+
+ if (ep->plen)
+ memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
+
+ /*
+ * Reference the mpa skb. This ensures the data area
+ * will remain in memory until the hw acks the tx.
+ * Function fw4_ack() will deref it.
+ */
+ skb_get(skb);
+ t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
+ BUG_ON(ep->mpa_skb);
+ ep->mpa_skb = skb;
+ c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+ start_ep_timer(ep);
+ state_set(&ep->com, MPA_REQ_SENT);
+ ep->mpa_attr.initiator = 1;
+ return;
+}
+
+static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
+{
+ int mpalen, wrlen;
+ struct fw_ofld_tx_data_wr *req;
+ struct mpa_message *mpa;
+ struct sk_buff *skb;
+
+ PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
+
+ mpalen = sizeof(*mpa) + plen;
+ wrlen = roundup(mpalen + sizeof *req, 16);
+
+ skb = get_skb(NULL, wrlen, GFP_KERNEL);
+ if (!skb) {
+ printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
+ return -ENOMEM;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
+
+ req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
+ memset(req, 0, wrlen);
+ req->op_to_immdlen = cpu_to_be32(
+ FW_WR_OP(FW_OFLD_TX_DATA_WR) |
+ FW_WR_COMPL(1) |
+ FW_WR_IMMDLEN(mpalen));
+ req->flowid_len16 = cpu_to_be32(
+ FW_WR_FLOWID(ep->hwtid) |
+ FW_WR_LEN16(wrlen >> 4));
+ req->plen = cpu_to_be32(mpalen);
+ req->tunnel_to_proxy = cpu_to_be32(
+ FW_OFLD_TX_DATA_WR_FLUSH(1) |
+ FW_OFLD_TX_DATA_WR_SHOVE(1));
+
+ mpa = (struct mpa_message *)(req + 1);
+ memset(mpa, 0, sizeof(*mpa));
+ memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
+ mpa->flags = MPA_REJECT;
+ mpa->revision = mpa_rev;
+ mpa->private_data_size = htons(plen);
+ if (plen)
+ memcpy(mpa->private_data, pdata, plen);
+
+ /*
+ * Reference the mpa skb again. This ensures the data area
+ * will remain in memory until the hw acks the tx.
+ * Function fw4_ack() will deref it.
+ */
+ skb_get(skb);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
+ t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
+ BUG_ON(ep->mpa_skb);
+ ep->mpa_skb = skb;
+ return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+}
+
+static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
+{
+ int mpalen, wrlen;
+ struct fw_ofld_tx_data_wr *req;
+ struct mpa_message *mpa;
+ struct sk_buff *skb;
+
+ PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
+
+ mpalen = sizeof(*mpa) + plen;
+ wrlen = roundup(mpalen + sizeof *req, 16);
+
+ skb = get_skb(NULL, wrlen, GFP_KERNEL);
+ if (!skb) {
+ printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
+ return -ENOMEM;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
+
+ req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
+ memset(req, 0, wrlen);
+ req->op_to_immdlen = cpu_to_be32(
+ FW_WR_OP(FW_OFLD_TX_DATA_WR) |
+ FW_WR_COMPL(1) |
+ FW_WR_IMMDLEN(mpalen));
+ req->flowid_len16 = cpu_to_be32(
+ FW_WR_FLOWID(ep->hwtid) |
+ FW_WR_LEN16(wrlen >> 4));
+ req->plen = cpu_to_be32(mpalen);
+ req->tunnel_to_proxy = cpu_to_be32(
+ FW_OFLD_TX_DATA_WR_FLUSH(1) |
+ FW_OFLD_TX_DATA_WR_SHOVE(1));
+
+ mpa = (struct mpa_message *)(req + 1);
+ memset(mpa, 0, sizeof(*mpa));
+ memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
+ mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
+ (markers_enabled ? MPA_MARKERS : 0);
+ mpa->revision = mpa_rev;
+ mpa->private_data_size = htons(plen);
+ if (plen)
+ memcpy(mpa->private_data, pdata, plen);
+
+ /*
+ * Reference the mpa skb. This ensures the data area
+ * will remain in memory until the hw acks the tx.
+ * Function fw4_ack() will deref it.
+ */
+ skb_get(skb);
+ t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
+ ep->mpa_skb = skb;
+ state_set(&ep->com, MPA_REP_SENT);
+ return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+}
+
+static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *ep;
+ struct cpl_act_establish *req = cplhdr(skb);
+ unsigned int tid = GET_TID(req);
+ unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
+ struct tid_info *t = dev->rdev.lldi.tids;
+
+ ep = lookup_atid(t, atid);
+
+ PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
+ be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
+
+ dst_confirm(ep->dst);
+
+ /* setup the hwtid for this connection */
+ ep->hwtid = tid;
+ cxgb4_insert_tid(t, ep, tid);
+
+ ep->snd_seq = be32_to_cpu(req->snd_isn);
+ ep->rcv_seq = be32_to_cpu(req->rcv_isn);
+
+ set_emss(ep, ntohs(req->tcp_opt));
+
+ /* dealloc the atid */
+ cxgb4_free_atid(t, atid);
+
+ /* start MPA negotiation */
+ send_flowc(ep, NULL);
+ send_mpa_req(ep, skb);
+
+ return 0;
+}
+
+static void close_complete_upcall(struct c4iw_ep *ep)
+{
+ struct iw_cm_event event;
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ memset(&event, 0, sizeof(event));
+ event.event = IW_CM_EVENT_CLOSE;
+ if (ep->com.cm_id) {
+ PDBG("close complete delivered ep %p cm_id %p tid %u\n",
+ ep, ep->com.cm_id, ep->hwtid);
+ ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+ ep->com.cm_id->rem_ref(ep->com.cm_id);
+ ep->com.cm_id = NULL;
+ ep->com.qp = NULL;
+ }
+}
+
+static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
+{
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ close_complete_upcall(ep);
+ state_set(&ep->com, ABORTING);
+ return send_abort(ep, skb, gfp);
+}
+
+static void peer_close_upcall(struct c4iw_ep *ep)
+{
+ struct iw_cm_event event;
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ memset(&event, 0, sizeof(event));
+ event.event = IW_CM_EVENT_DISCONNECT;
+ if (ep->com.cm_id) {
+ PDBG("peer close delivered ep %p cm_id %p tid %u\n",
+ ep, ep->com.cm_id, ep->hwtid);
+ ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+ }
+}
+
+static void peer_abort_upcall(struct c4iw_ep *ep)
+{
+ struct iw_cm_event event;
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ memset(&event, 0, sizeof(event));
+ event.event = IW_CM_EVENT_CLOSE;
+ event.status = -ECONNRESET;
+ if (ep->com.cm_id) {
+ PDBG("abort delivered ep %p cm_id %p tid %u\n", ep,
+ ep->com.cm_id, ep->hwtid);
+ ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+ ep->com.cm_id->rem_ref(ep->com.cm_id);
+ ep->com.cm_id = NULL;
+ ep->com.qp = NULL;
+ }
+}
+
+static void connect_reply_upcall(struct c4iw_ep *ep, int status)
+{
+ struct iw_cm_event event;
+
+ PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status);
+ memset(&event, 0, sizeof(event));
+ event.event = IW_CM_EVENT_CONNECT_REPLY;
+ event.status = status;
+ event.local_addr = ep->com.local_addr;
+ event.remote_addr = ep->com.remote_addr;
+
+ if ((status == 0) || (status == -ECONNREFUSED)) {
+ event.private_data_len = ep->plen;
+ event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
+ }
+ if (ep->com.cm_id) {
+ PDBG("%s ep %p tid %u status %d\n", __func__, ep,
+ ep->hwtid, status);
+ ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+ }
+ if (status < 0) {
+ ep->com.cm_id->rem_ref(ep->com.cm_id);
+ ep->com.cm_id = NULL;
+ ep->com.qp = NULL;
+ }
+}
+
+static void connect_request_upcall(struct c4iw_ep *ep)
+{
+ struct iw_cm_event event;
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ memset(&event, 0, sizeof(event));
+ event.event = IW_CM_EVENT_CONNECT_REQUEST;
+ event.local_addr = ep->com.local_addr;
+ event.remote_addr = ep->com.remote_addr;
+ event.private_data_len = ep->plen;
+ event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
+ event.provider_data = ep;
+ if (state_read(&ep->parent_ep->com) != DEAD) {
+ c4iw_get_ep(&ep->com);
+ ep->parent_ep->com.cm_id->event_handler(
+ ep->parent_ep->com.cm_id,
+ &event);
+ }
+ c4iw_put_ep(&ep->parent_ep->com);
+ ep->parent_ep = NULL;
+}
+
+static void established_upcall(struct c4iw_ep *ep)
+{
+ struct iw_cm_event event;
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ memset(&event, 0, sizeof(event));
+ event.event = IW_CM_EVENT_ESTABLISHED;
+ if (ep->com.cm_id) {
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+ }
+}
+
+static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
+{
+ struct cpl_rx_data_ack *req;
+ struct sk_buff *skb;
+ int wrlen = roundup(sizeof *req, 16);
+
+ PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
+ skb = get_skb(NULL, wrlen, GFP_KERNEL);
+ if (!skb) {
+ printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
+ return 0;
+ }
+
+ req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
+ memset(req, 0, wrlen);
+ INIT_TP_WR(req, ep->hwtid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
+ ep->hwtid));
+ req->credit_dack = cpu_to_be32(credits);
+ set_wr_txq(skb, CPL_PRIORITY_ACK, ep->txq_idx);
+ c4iw_ofld_send(&ep->com.dev->rdev, skb);
+ return credits;
+}
+
+static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
+{
+ struct mpa_message *mpa;
+ u16 plen;
+ struct c4iw_qp_attributes attrs;
+ enum c4iw_qp_attr_mask mask;
+ int err;
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+
+ /*
+ * Stop mpa timer. If it expired, then the state has
+ * changed and we bail since ep_timeout already aborted
+ * the connection.
+ */
+ stop_ep_timer(ep);
+ if (state_read(&ep->com) != MPA_REQ_SENT)
+ return;
+
+ /*
+ * If we get more than the supported amount of private data
+ * then we must fail this connection.
+ */
+ if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ /*
+ * copy the new data into our accumulation buffer.
+ */
+ skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
+ skb->len);
+ ep->mpa_pkt_len += skb->len;
+
+ /*
+ * if we don't even have the mpa message, then bail.
+ */
+ if (ep->mpa_pkt_len < sizeof(*mpa))
+ return;
+ mpa = (struct mpa_message *) ep->mpa_pkt;
+
+ /* Validate MPA header. */
+ if (mpa->revision != mpa_rev) {
+ err = -EPROTO;
+ goto err;
+ }
+ if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
+ err = -EPROTO;
+ goto err;
+ }
+
+ plen = ntohs(mpa->private_data_size);
+
+ /*
+ * Fail if there's too much private data.
+ */
+ if (plen > MPA_MAX_PRIVATE_DATA) {
+ err = -EPROTO;
+ goto err;
+ }
+
+ /*
+ * If plen does not account for pkt size
+ */
+ if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
+ err = -EPROTO;
+ goto err;
+ }
+
+ ep->plen = (u8) plen;
+
+ /*
+ * If we don't have all the pdata yet, then bail.
+ * We'll continue process when more data arrives.
+ */
+ if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
+ return;
+
+ if (mpa->flags & MPA_REJECT) {
+ err = -ECONNREFUSED;
+ goto err;
+ }
+
+ /*
+ * If we get here we have accumulated the entire mpa
+ * start reply message including private data. And
+ * the MPA header is valid.
+ */
+ state_set(&ep->com, FPDU_MODE);
+ ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
+ ep->mpa_attr.recv_marker_enabled = markers_enabled;
+ ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
+ ep->mpa_attr.version = mpa_rev;
+ ep->mpa_attr.p2p_type = peer2peer ? p2p_type :
+ FW_RI_INIT_P2PTYPE_DISABLED;
+ PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
+ "xmit_marker_enabled=%d, version=%d\n", __func__,
+ ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
+ ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
+
+ attrs.mpa_attr = ep->mpa_attr;
+ attrs.max_ird = ep->ird;
+ attrs.max_ord = ep->ord;
+ attrs.llp_stream_handle = ep;
+ attrs.next_state = C4IW_QP_STATE_RTS;
+
+ mask = C4IW_QP_ATTR_NEXT_STATE |
+ C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
+ C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
+
+ /* bind QP and TID with INIT_WR */
+ err = c4iw_modify_qp(ep->com.qp->rhp,
+ ep->com.qp, mask, &attrs, 1);
+ if (err)
+ goto err;
+ goto out;
+err:
+ abort_connection(ep, skb, GFP_KERNEL);
+out:
+ connect_reply_upcall(ep, err);
+ return;
+}
+
+static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
+{
+ struct mpa_message *mpa;
+ u16 plen;
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+
+ if (state_read(&ep->com) != MPA_REQ_WAIT)
+ return;
+
+ /*
+ * If we get more than the supported amount of private data
+ * then we must fail this connection.
+ */
+ if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
+ stop_ep_timer(ep);
+ abort_connection(ep, skb, GFP_KERNEL);
+ return;
+ }
+
+ PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
+
+ /*
+ * Copy the new data into our accumulation buffer.
+ */
+ skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
+ skb->len);
+ ep->mpa_pkt_len += skb->len;
+
+ /*
+ * If we don't even have the mpa message, then bail.
+ * We'll continue process when more data arrives.
+ */
+ if (ep->mpa_pkt_len < sizeof(*mpa))
+ return;
+
+ PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
+ stop_ep_timer(ep);
+ mpa = (struct mpa_message *) ep->mpa_pkt;
+
+ /*
+ * Validate MPA Header.
+ */
+ if (mpa->revision != mpa_rev) {
+ abort_connection(ep, skb, GFP_KERNEL);
+ return;
+ }
+
+ if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
+ abort_connection(ep, skb, GFP_KERNEL);
+ return;
+ }
+
+ plen = ntohs(mpa->private_data_size);
+
+ /*
+ * Fail if there's too much private data.
+ */
+ if (plen > MPA_MAX_PRIVATE_DATA) {
+ abort_connection(ep, skb, GFP_KERNEL);
+ return;
+ }
+
+ /*
+ * If plen does not account for pkt size
+ */
+ if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
+ abort_connection(ep, skb, GFP_KERNEL);
+ return;
+ }
+ ep->plen = (u8) plen;
+
+ /*
+ * If we don't have all the pdata yet, then bail.
+ */
+ if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
+ return;
+
+ /*
+ * If we get here we have accumulated the entire mpa
+ * start reply message including private data.
+ */
+ ep->mpa_attr.initiator = 0;
+ ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
+ ep->mpa_attr.recv_marker_enabled = markers_enabled;
+ ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
+ ep->mpa_attr.version = mpa_rev;
+ ep->mpa_attr.p2p_type = peer2peer ? p2p_type :
+ FW_RI_INIT_P2PTYPE_DISABLED;
+ PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
+ "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__,
+ ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
+ ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
+ ep->mpa_attr.p2p_type);
+
+ state_set(&ep->com, MPA_REQ_RCVD);
+
+ /* drive upcall */
+ connect_request_upcall(ep);
+ return;
+}
+
+static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *ep;
+ struct cpl_rx_data *hdr = cplhdr(skb);
+ unsigned int dlen = ntohs(hdr->len);
+ unsigned int tid = GET_TID(hdr);
+ struct tid_info *t = dev->rdev.lldi.tids;
+
+ ep = lookup_tid(t, tid);
+ PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
+ skb_pull(skb, sizeof(*hdr));
+ skb_trim(skb, dlen);
+
+ ep->rcv_seq += dlen;
+ BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
+
+ /* update RX credits */
+ update_rx_credits(ep, dlen);
+
+ switch (state_read(&ep->com)) {
+ case MPA_REQ_SENT:
+ process_mpa_reply(ep, skb);
+ break;
+ case MPA_REQ_WAIT:
+ process_mpa_request(ep, skb);
+ break;
+ case MPA_REP_SENT:
+ break;
+ default:
+ printk(KERN_ERR MOD "%s Unexpected streaming data."
+ " ep %p state %d tid %u\n",
+ __func__, ep, state_read(&ep->com), ep->hwtid);
+
+ /*
+ * The ep will timeout and inform the ULP of the failure.
+ * See ep_timeout().
+ */
+ break;
+ }
+ return 0;
+}
+
+static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *ep;
+ struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
+ unsigned long flags;
+ int release = 0;
+ unsigned int tid = GET_TID(rpl);
+ struct tid_info *t = dev->rdev.lldi.tids;
+
+ ep = lookup_tid(t, tid);
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ BUG_ON(!ep);
+ spin_lock_irqsave(&ep->com.lock, flags);
+ switch (ep->com.state) {
+ case ABORTING:
+ __state_set(&ep->com, DEAD);
+ release = 1;
+ break;
+ default:
+ printk(KERN_ERR "%s ep %p state %d\n",
+ __func__, ep, ep->com.state);
+ break;
+ }
+ spin_unlock_irqrestore(&ep->com.lock, flags);
+
+ if (release)
+ release_ep_resources(ep);
+ return 0;
+}
+
+/*
+ * Return whether a failed active open has allocated a TID
+ */
+static inline int act_open_has_tid(int status)
+{
+ return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
+ status != CPL_ERR_ARP_MISS;
+}
+
+static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *ep;
+ struct cpl_act_open_rpl *rpl = cplhdr(skb);
+ unsigned int atid = GET_TID_TID(GET_AOPEN_ATID(
+ ntohl(rpl->atid_status)));
+ struct tid_info *t = dev->rdev.lldi.tids;
+ int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status));
+
+ ep = lookup_atid(t, atid);
+
+ PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
+ status, status2errno(status));
+
+ if (status == CPL_ERR_RTX_NEG_ADVICE) {
+ printk(KERN_WARNING MOD "Connection problems for atid %u\n",
+ atid);
+ return 0;
+ }
+
+ connect_reply_upcall(ep, status2errno(status));
+ state_set(&ep->com, DEAD);
+
+ if (status && act_open_has_tid(status))
+ cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
+
+ cxgb4_free_atid(t, atid);
+ dst_release(ep->dst);
+ cxgb4_l2t_release(ep->l2t);
+ c4iw_put_ep(&ep->com);
+
+ return 0;
+}
+
+static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_pass_open_rpl *rpl = cplhdr(skb);
+ struct tid_info *t = dev->rdev.lldi.tids;
+ unsigned int stid = GET_TID(rpl);
+ struct c4iw_listen_ep *ep = lookup_stid(t, stid);
+
+ if (!ep) {
+ printk(KERN_ERR MOD "stid %d lookup failure!\n", stid);
+ return 0;
+ }
+ PDBG("%s ep %p status %d error %d\n", __func__, ep,
+ rpl->status, status2errno(rpl->status));
+ ep->com.rpl_err = status2errno(rpl->status);
+ ep->com.rpl_done = 1;
+ wake_up(&ep->com.waitq);
+
+ return 0;
+}
+
+static int listen_stop(struct c4iw_listen_ep *ep)
+{
+ struct sk_buff *skb;
+ struct cpl_close_listsvr_req *req;
+
+ PDBG("%s ep %p\n", __func__, ep);
+ skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
+ if (!skb) {
+ printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
+ return -ENOMEM;
+ }
+ req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req));
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ,
+ ep->stid));
+ req->reply_ctrl = cpu_to_be16(
+ QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0]));
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
+ return c4iw_ofld_send(&ep->com.dev->rdev, skb);
+}
+
+static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
+ struct tid_info *t = dev->rdev.lldi.tids;
+ unsigned int stid = GET_TID(rpl);
+ struct c4iw_listen_ep *ep = lookup_stid(t, stid);
+
+ PDBG("%s ep %p\n", __func__, ep);
+ ep->com.rpl_err = status2errno(rpl->status);
+ ep->com.rpl_done = 1;
+ wake_up(&ep->com.waitq);
+ return 0;
+}
+
+static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
+ struct cpl_pass_accept_req *req)
+{
+ struct cpl_pass_accept_rpl *rpl;
+ unsigned int mtu_idx;
+ u64 opt0;
+ u32 opt2;
+ int wscale;
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ BUG_ON(skb_cloned(skb));
+ skb_trim(skb, sizeof(*rpl));
+ skb_get(skb);
+ cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
+ wscale = compute_wscale(rcv_win);
+ opt0 = KEEP_ALIVE(1) |
+ WND_SCALE(wscale) |
+ MSS_IDX(mtu_idx) |
+ L2T_IDX(ep->l2t->idx) |
+ TX_CHAN(ep->tx_chan) |
+ SMAC_SEL(ep->smac_idx) |
+ DSCP(ep->tos) |
+ RCV_BUFSIZ(rcv_win>>10);
+ opt2 = RX_CHANNEL(0) |
+ RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
+
+ if (enable_tcp_timestamps && req->tcpopt.tstamp)
+ opt2 |= TSTAMPS_EN(1);
+ if (enable_tcp_sack && req->tcpopt.sack)
+ opt2 |= SACK_EN(1);
+ if (wscale && enable_tcp_window_scaling)
+ opt2 |= WND_SCALE_EN(1);
+
+ rpl = cplhdr(skb);
+ INIT_TP_WR(rpl, ep->hwtid);
+ OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
+ ep->hwtid));
+ rpl->opt0 = cpu_to_be64(opt0);
+ rpl->opt2 = cpu_to_be32(opt2);
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx);
+ c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+
+ return;
+}
+
+static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip,
+ struct sk_buff *skb)
+{
+ PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid,
+ peer_ip);
+ BUG_ON(skb_cloned(skb));
+ skb_trim(skb, sizeof(struct cpl_tid_release));
+ skb_get(skb);
+ release_tid(&dev->rdev, hwtid, skb);
+ return;
+}
+
+static void get_4tuple(struct cpl_pass_accept_req *req,
+ __be32 *local_ip, __be32 *peer_ip,
+ __be16 *local_port, __be16 *peer_port)
+{
+ int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len));
+ int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len));
+ struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
+ struct tcphdr *tcp = (struct tcphdr *)
+ ((u8 *)(req + 1) + eth_len + ip_len);
+
+ PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
+ ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
+ ntohs(tcp->dest));
+
+ *peer_ip = ip->saddr;
+ *local_ip = ip->daddr;
+ *peer_port = tcp->source;
+ *local_port = tcp->dest;
+
+ return;
+}
+
+static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *child_ep, *parent_ep;
+ struct cpl_pass_accept_req *req = cplhdr(skb);
+ unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
+ struct tid_info *t = dev->rdev.lldi.tids;
+ unsigned int hwtid = GET_TID(req);
+ struct dst_entry *dst;
+ struct l2t_entry *l2t;
+ struct rtable *rt;
+ __be32 local_ip, peer_ip;
+ __be16 local_port, peer_port;
+ struct net_device *pdev;
+ u32 tx_chan, smac_idx;
+ u16 rss_qid;
+ u32 mtu;
+ int step;
+ int txq_idx;
+
+ parent_ep = lookup_stid(t, stid);
+ PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
+
+ get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port);
+
+ if (state_read(&parent_ep->com) != LISTEN) {
+ printk(KERN_ERR "%s - listening ep not in LISTEN\n",
+ __func__);
+ goto reject;
+ }
+
+ /* Find output route */
+ rt = find_route(dev, local_ip, peer_ip, local_port, peer_port,
+ GET_POPEN_TOS(ntohl(req->tos_stid)));
+ if (!rt) {
+ printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
+ __func__);
+ goto reject;
+ }
+ dst = &rt->u.dst;
+ if (dst->neighbour->dev->flags & IFF_LOOPBACK) {
+ pdev = ip_dev_find(&init_net, peer_ip);
+ BUG_ON(!pdev);
+ l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour,
+ pdev, 0);
+ mtu = pdev->mtu;
+ tx_chan = cxgb4_port_chan(pdev);
+ smac_idx = tx_chan << 1;
+ step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
+ txq_idx = cxgb4_port_idx(pdev) * step;
+ step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
+ rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step];
+ dev_put(pdev);
+ } else {
+ l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour,
+ dst->neighbour->dev, 0);
+ mtu = dst_mtu(dst);
+ tx_chan = cxgb4_port_chan(dst->neighbour->dev);
+ smac_idx = tx_chan << 1;
+ step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
+ txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step;
+ step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
+ rss_qid = dev->rdev.lldi.rxq_ids[
+ cxgb4_port_idx(dst->neighbour->dev) * step];
+ }
+ if (!l2t) {
+ printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
+ __func__);
+ dst_release(dst);
+ goto reject;
+ }
+
+ child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
+ if (!child_ep) {
+ printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
+ __func__);
+ cxgb4_l2t_release(l2t);
+ dst_release(dst);
+ goto reject;
+ }
+ state_set(&child_ep->com, CONNECTING);
+ child_ep->com.dev = dev;
+ child_ep->com.cm_id = NULL;
+ child_ep->com.local_addr.sin_family = PF_INET;
+ child_ep->com.local_addr.sin_port = local_port;
+ child_ep->com.local_addr.sin_addr.s_addr = local_ip;
+ child_ep->com.remote_addr.sin_family = PF_INET;
+ child_ep->com.remote_addr.sin_port = peer_port;
+ child_ep->com.remote_addr.sin_addr.s_addr = peer_ip;
+ c4iw_get_ep(&parent_ep->com);
+ child_ep->parent_ep = parent_ep;
+ child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
+ child_ep->l2t = l2t;
+ child_ep->dst = dst;
+ child_ep->hwtid = hwtid;
+ child_ep->tx_chan = tx_chan;
+ child_ep->smac_idx = smac_idx;
+ child_ep->rss_qid = rss_qid;
+ child_ep->mtu = mtu;
+ child_ep->txq_idx = txq_idx;
+
+ PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
+ tx_chan, smac_idx, rss_qid);
+
+ init_timer(&child_ep->timer);
+ cxgb4_insert_tid(t, child_ep, hwtid);
+ accept_cr(child_ep, peer_ip, skb, req);
+ goto out;
+reject:
+ reject_cr(dev, hwtid, peer_ip, skb);
+out:
+ return 0;
+}
+
+static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *ep;
+ struct cpl_pass_establish *req = cplhdr(skb);
+ struct tid_info *t = dev->rdev.lldi.tids;
+ unsigned int tid = GET_TID(req);
+
+ ep = lookup_tid(t, tid);
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ ep->snd_seq = be32_to_cpu(req->snd_isn);
+ ep->rcv_seq = be32_to_cpu(req->rcv_isn);
+
+ set_emss(ep, ntohs(req->tcp_opt));
+
+ dst_confirm(ep->dst);
+ state_set(&ep->com, MPA_REQ_WAIT);
+ start_ep_timer(ep);
+ send_flowc(ep, skb);
+
+ return 0;
+}
+
+static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_peer_close *hdr = cplhdr(skb);
+ struct c4iw_ep *ep;
+ struct c4iw_qp_attributes attrs;
+ unsigned long flags;
+ int disconnect = 1;
+ int release = 0;
+ int closing = 0;
+ struct tid_info *t = dev->rdev.lldi.tids;
+ unsigned int tid = GET_TID(hdr);
+ int start_timer = 0;
+ int stop_timer = 0;
+
+ ep = lookup_tid(t, tid);
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ dst_confirm(ep->dst);
+
+ spin_lock_irqsave(&ep->com.lock, flags);
+ switch (ep->com.state) {
+ case MPA_REQ_WAIT:
+ __state_set(&ep->com, CLOSING);
+ break;
+ case MPA_REQ_SENT:
+ __state_set(&ep->com, CLOSING);
+ connect_reply_upcall(ep, -ECONNRESET);
+ break;
+ case MPA_REQ_RCVD:
+
+ /*
+ * We're gonna mark this puppy DEAD, but keep
+ * the reference on it until the ULP accepts or
+ * rejects the CR. Also wake up anyone waiting
+ * in rdma connection migration (see c4iw_accept_cr()).
+ */
+ __state_set(&ep->com, CLOSING);
+ ep->com.rpl_done = 1;
+ ep->com.rpl_err = -ECONNRESET;
+ PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
+ wake_up(&ep->com.waitq);
+ break;
+ case MPA_REP_SENT:
+ __state_set(&ep->com, CLOSING);
+ ep->com.rpl_done = 1;
+ ep->com.rpl_err = -ECONNRESET;
+ PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
+ wake_up(&ep->com.waitq);
+ break;
+ case FPDU_MODE:
+ start_timer = 1;
+ __state_set(&ep->com, CLOSING);
+ closing = 1;
+ peer_close_upcall(ep);
+ break;
+ case ABORTING:
+ disconnect = 0;
+ break;
+ case CLOSING:
+ __state_set(&ep->com, MORIBUND);
+ disconnect = 0;
+ break;
+ case MORIBUND:
+ stop_timer = 1;
+ if (ep->com.cm_id && ep->com.qp) {
+ attrs.next_state = C4IW_QP_STATE_IDLE;
+ c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+ }
+ close_complete_upcall(ep);
+ __state_set(&ep->com, DEAD);
+ release = 1;
+ disconnect = 0;
+ break;
+ case DEAD:
+ disconnect = 0;
+ break;
+ default:
+ BUG_ON(1);
+ }
+ spin_unlock_irqrestore(&ep->com.lock, flags);
+ if (closing) {
+ attrs.next_state = C4IW_QP_STATE_CLOSING;
+ c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+ }
+ if (start_timer)
+ start_ep_timer(ep);
+ if (stop_timer)
+ stop_ep_timer(ep);
+ if (disconnect)
+ c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
+ if (release)
+ release_ep_resources(ep);
+ return 0;
+}
+
+/*
+ * Returns whether an ABORT_REQ_RSS message is a negative advice.
+ */
+static int is_neg_adv_abort(unsigned int status)
+{
+ return status == CPL_ERR_RTX_NEG_ADVICE ||
+ status == CPL_ERR_PERSIST_NEG_ADVICE;
+}
+
+static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_abort_req_rss *req = cplhdr(skb);
+ struct c4iw_ep *ep;
+ struct cpl_abort_rpl *rpl;
+ struct sk_buff *rpl_skb;
+ struct c4iw_qp_attributes attrs;
+ int ret;
+ int release = 0;
+ unsigned long flags;
+ struct tid_info *t = dev->rdev.lldi.tids;
+ unsigned int tid = GET_TID(req);
+ int stop_timer = 0;
+
+ ep = lookup_tid(t, tid);
+ if (is_neg_adv_abort(req->status)) {
+ PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
+ ep->hwtid);
+ return 0;
+ }
+ spin_lock_irqsave(&ep->com.lock, flags);
+ PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
+ ep->com.state);
+ switch (ep->com.state) {
+ case CONNECTING:
+ break;
+ case MPA_REQ_WAIT:
+ stop_timer = 1;
+ break;
+ case MPA_REQ_SENT:
+ stop_timer = 1;
+ connect_reply_upcall(ep, -ECONNRESET);
+ break;
+ case MPA_REP_SENT:
+ ep->com.rpl_done = 1;
+ ep->com.rpl_err = -ECONNRESET;
+ PDBG("waking up ep %p\n", ep);
+ wake_up(&ep->com.waitq);
+ break;
+ case MPA_REQ_RCVD:
+
+ /*
+ * We're gonna mark this puppy DEAD, but keep
+ * the reference on it until the ULP accepts or
+ * rejects the CR. Also wake up anyone waiting
+ * in rdma connection migration (see c4iw_accept_cr()).
+ */
+ ep->com.rpl_done = 1;
+ ep->com.rpl_err = -ECONNRESET;
+ PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
+ wake_up(&ep->com.waitq);
+ break;
+ case MORIBUND:
+ case CLOSING:
+ stop_timer = 1;
+ /*FALLTHROUGH*/
+ case FPDU_MODE:
+ if (ep->com.cm_id && ep->com.qp) {
+ attrs.next_state = C4IW_QP_STATE_ERROR;
+ ret = c4iw_modify_qp(ep->com.qp->rhp,
+ ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
+ &attrs, 1);
+ if (ret)
+ printk(KERN_ERR MOD
+ "%s - qp <- error failed!\n",
+ __func__);
+ }
+ peer_abort_upcall(ep);
+ break;
+ case ABORTING:
+ break;
+ case DEAD:
+ PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
+ spin_unlock_irqrestore(&ep->com.lock, flags);
+ return 0;
+ default:
+ BUG_ON(1);
+ break;
+ }
+ dst_confirm(ep->dst);
+ if (ep->com.state != ABORTING) {
+ __state_set(&ep->com, DEAD);
+ release = 1;
+ }
+ spin_unlock_irqrestore(&ep->com.lock, flags);
+
+ rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
+ if (!rpl_skb) {
+ printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
+ __func__);
+ release = 1;
+ goto out;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
+ rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
+ INIT_TP_WR(rpl, ep->hwtid);
+ OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
+ rpl->cmd = CPL_ABORT_NO_RST;
+ c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
+out:
+ if (stop_timer)
+ stop_ep_timer(ep);
+ if (release)
+ release_ep_resources(ep);
+ return 0;
+}
+
+static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *ep;
+ struct c4iw_qp_attributes attrs;
+ struct cpl_close_con_rpl *rpl = cplhdr(skb);
+ unsigned long flags;
+ int release = 0;
+ struct tid_info *t = dev->rdev.lldi.tids;
+ unsigned int tid = GET_TID(rpl);
+ int stop_timer = 0;
+
+ ep = lookup_tid(t, tid);
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ BUG_ON(!ep);
+
+ /* The cm_id may be null if we failed to connect */
+ spin_lock_irqsave(&ep->com.lock, flags);
+ switch (ep->com.state) {
+ case CLOSING:
+ __state_set(&ep->com, MORIBUND);
+ break;
+ case MORIBUND:
+ stop_timer = 1;
+ if ((ep->com.cm_id) && (ep->com.qp)) {
+ attrs.next_state = C4IW_QP_STATE_IDLE;
+ c4iw_modify_qp(ep->com.qp->rhp,
+ ep->com.qp,
+ C4IW_QP_ATTR_NEXT_STATE,
+ &attrs, 1);
+ }
+ close_complete_upcall(ep);
+ __state_set(&ep->com, DEAD);
+ release = 1;
+ break;
+ case ABORTING:
+ case DEAD:
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+ spin_unlock_irqrestore(&ep->com.lock, flags);
+ if (stop_timer)
+ stop_ep_timer(ep);
+ if (release)
+ release_ep_resources(ep);
+ return 0;
+}
+
+static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *ep;
+ struct cpl_rdma_terminate *term = cplhdr(skb);
+ struct tid_info *t = dev->rdev.lldi.tids;
+ unsigned int tid = GET_TID(term);
+
+ ep = lookup_tid(t, tid);
+
+ if (state_read(&ep->com) != FPDU_MODE)
+ return 0;
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ skb_pull(skb, sizeof *term);
+ PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
+ skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
+ skb->len);
+ ep->com.qp->attr.terminate_msg_len = skb->len;
+ ep->com.qp->attr.is_terminate_local = 0;
+ return 0;
+}
+
+/*
+ * Upcall from the adapter indicating data has been transmitted.
+ * For us its just the single MPA request or reply. We can now free
+ * the skb holding the mpa message.
+ */
+static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *ep;
+ struct cpl_fw4_ack *hdr = cplhdr(skb);
+ u8 credits = hdr->credits;
+ unsigned int tid = GET_TID(hdr);
+ struct tid_info *t = dev->rdev.lldi.tids;
+
+
+ ep = lookup_tid(t, tid);
+ PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
+ if (credits == 0) {
+ PDBG(KERN_ERR "%s 0 credit ack ep %p tid %u state %u\n",
+ __func__, ep, ep->hwtid, state_read(&ep->com));
+ return 0;
+ }
+
+ dst_confirm(ep->dst);
+ if (ep->mpa_skb) {
+ PDBG("%s last streaming msg ack ep %p tid %u state %u "
+ "initiator %u freeing skb\n", __func__, ep, ep->hwtid,
+ state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
+ kfree_skb(ep->mpa_skb);
+ ep->mpa_skb = NULL;
+ }
+ return 0;
+}
+
+int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
+{
+ int err;
+ struct c4iw_ep *ep = to_ep(cm_id);
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+
+ if (state_read(&ep->com) == DEAD) {
+ c4iw_put_ep(&ep->com);
+ return -ECONNRESET;
+ }
+ BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
+ if (mpa_rev == 0)
+ abort_connection(ep, NULL, GFP_KERNEL);
+ else {
+ err = send_mpa_reject(ep, pdata, pdata_len);
+ err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
+ }
+ c4iw_put_ep(&ep->com);
+ return 0;
+}
+
+int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+ int err;
+ struct c4iw_qp_attributes attrs;
+ enum c4iw_qp_attr_mask mask;
+ struct c4iw_ep *ep = to_ep(cm_id);
+ struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
+ struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ if (state_read(&ep->com) == DEAD) {
+ err = -ECONNRESET;
+ goto err;
+ }
+
+ BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
+ BUG_ON(!qp);
+
+ if ((conn_param->ord > c4iw_max_read_depth) ||
+ (conn_param->ird > c4iw_max_read_depth)) {
+ abort_connection(ep, NULL, GFP_KERNEL);
+ err = -EINVAL;
+ goto err;
+ }
+
+ cm_id->add_ref(cm_id);
+ ep->com.cm_id = cm_id;
+ ep->com.qp = qp;
+
+ ep->ird = conn_param->ird;
+ ep->ord = conn_param->ord;
+
+ if (peer2peer && ep->ird == 0)
+ ep->ird = 1;
+
+ PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
+
+ /* bind QP to EP and move to RTS */
+ attrs.mpa_attr = ep->mpa_attr;
+ attrs.max_ird = ep->ird;
+ attrs.max_ord = ep->ord;
+ attrs.llp_stream_handle = ep;
+ attrs.next_state = C4IW_QP_STATE_RTS;
+
+ /* bind QP and TID with INIT_WR */
+ mask = C4IW_QP_ATTR_NEXT_STATE |
+ C4IW_QP_ATTR_LLP_STREAM_HANDLE |
+ C4IW_QP_ATTR_MPA_ATTR |
+ C4IW_QP_ATTR_MAX_IRD |
+ C4IW_QP_ATTR_MAX_ORD;
+
+ err = c4iw_modify_qp(ep->com.qp->rhp,
+ ep->com.qp, mask, &attrs, 1);
+ if (err)
+ goto err1;
+ err = send_mpa_reply(ep, conn_param->private_data,
+ conn_param->private_data_len);
+ if (err)
+ goto err1;
+
+ state_set(&ep->com, FPDU_MODE);
+ established_upcall(ep);
+ c4iw_put_ep(&ep->com);
+ return 0;
+err1:
+ ep->com.cm_id = NULL;
+ ep->com.qp = NULL;
+ cm_id->rem_ref(cm_id);
+err:
+ c4iw_put_ep(&ep->com);
+ return err;
+}
+
+int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+ int err = 0;
+ struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
+ struct c4iw_ep *ep;
+ struct rtable *rt;
+ struct net_device *pdev;
+ int step;
+
+ if ((conn_param->ord > c4iw_max_read_depth) ||
+ (conn_param->ird > c4iw_max_read_depth)) {
+ err = -EINVAL;
+ goto out;
+ }
+ ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
+ if (!ep) {
+ printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+ init_timer(&ep->timer);
+ ep->plen = conn_param->private_data_len;
+ if (ep->plen)
+ memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
+ conn_param->private_data, ep->plen);
+ ep->ird = conn_param->ird;
+ ep->ord = conn_param->ord;
+
+ if (peer2peer && ep->ord == 0)
+ ep->ord = 1;
+
+ cm_id->add_ref(cm_id);
+ ep->com.dev = dev;
+ ep->com.cm_id = cm_id;
+ ep->com.qp = get_qhp(dev, conn_param->qpn);
+ BUG_ON(!ep->com.qp);
+ PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
+ ep->com.qp, cm_id);
+
+ /*
+ * Allocate an active TID to initiate a TCP connection.
+ */
+ ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
+ if (ep->atid == -1) {
+ printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
+ err = -ENOMEM;
+ goto fail2;
+ }
+
+ PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__,
+ ntohl(cm_id->local_addr.sin_addr.s_addr),
+ ntohs(cm_id->local_addr.sin_port),
+ ntohl(cm_id->remote_addr.sin_addr.s_addr),
+ ntohs(cm_id->remote_addr.sin_port));
+
+ /* find a route */
+ rt = find_route(dev,
+ cm_id->local_addr.sin_addr.s_addr,
+ cm_id->remote_addr.sin_addr.s_addr,
+ cm_id->local_addr.sin_port,
+ cm_id->remote_addr.sin_port, 0);
+ if (!rt) {
+ printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
+ err = -EHOSTUNREACH;
+ goto fail3;
+ }
+ ep->dst = &rt->u.dst;
+
+ /* get a l2t entry */
+ if (ep->dst->neighbour->dev->flags & IFF_LOOPBACK) {
+ PDBG("%s LOOPBACK\n", __func__);
+ pdev = ip_dev_find(&init_net,
+ cm_id->remote_addr.sin_addr.s_addr);
+ ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
+ ep->dst->neighbour,
+ pdev, 0);
+ ep->mtu = pdev->mtu;
+ ep->tx_chan = cxgb4_port_chan(pdev);
+ ep->smac_idx = ep->tx_chan << 1;
+ step = ep->com.dev->rdev.lldi.ntxq /
+ ep->com.dev->rdev.lldi.nchan;
+ ep->txq_idx = cxgb4_port_idx(pdev) * step;
+ step = ep->com.dev->rdev.lldi.nrxq /
+ ep->com.dev->rdev.lldi.nchan;
+ ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
+ cxgb4_port_idx(pdev) * step];
+ dev_put(pdev);
+ } else {
+ ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
+ ep->dst->neighbour,
+ ep->dst->neighbour->dev, 0);
+ ep->mtu = dst_mtu(ep->dst);
+ ep->tx_chan = cxgb4_port_chan(ep->dst->neighbour->dev);
+ ep->smac_idx = ep->tx_chan << 1;
+ step = ep->com.dev->rdev.lldi.ntxq /
+ ep->com.dev->rdev.lldi.nchan;
+ ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step;
+ step = ep->com.dev->rdev.lldi.nrxq /
+ ep->com.dev->rdev.lldi.nchan;
+ ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
+ cxgb4_port_idx(ep->dst->neighbour->dev) * step];
+ }
+ if (!ep->l2t) {
+ printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
+ err = -ENOMEM;
+ goto fail4;
+ }
+
+ PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
+ __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
+ ep->l2t->idx);
+
+ state_set(&ep->com, CONNECTING);
+ ep->tos = 0;
+ ep->com.local_addr = cm_id->local_addr;
+ ep->com.remote_addr = cm_id->remote_addr;
+
+ /* send connect request to rnic */
+ err = send_connect(ep);
+ if (!err)
+ goto out;
+
+ cxgb4_l2t_release(ep->l2t);
+fail4:
+ dst_release(ep->dst);
+fail3:
+ cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
+fail2:
+ cm_id->rem_ref(cm_id);
+ c4iw_put_ep(&ep->com);
+out:
+ return err;
+}
+
+int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
+{
+ int err = 0;
+ struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
+ struct c4iw_listen_ep *ep;
+
+
+ might_sleep();
+
+ ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
+ if (!ep) {
+ printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
+ err = -ENOMEM;
+ goto fail1;
+ }
+ PDBG("%s ep %p\n", __func__, ep);
+ cm_id->add_ref(cm_id);
+ ep->com.cm_id = cm_id;
+ ep->com.dev = dev;
+ ep->backlog = backlog;
+ ep->com.local_addr = cm_id->local_addr;
+
+ /*
+ * Allocate a server TID.
+ */
+ ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
+ if (ep->stid == -1) {
+ printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
+ err = -ENOMEM;
+ goto fail2;
+ }
+
+ state_set(&ep->com, LISTEN);
+ err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid,
+ ep->com.local_addr.sin_addr.s_addr,
+ ep->com.local_addr.sin_port,
+ ep->com.dev->rdev.lldi.rxq_ids[0]);
+ if (err)
+ goto fail3;
+
+ /* wait for pass_open_rpl */
+ wait_event(ep->com.waitq, ep->com.rpl_done);
+ err = ep->com.rpl_err;
+ if (!err) {
+ cm_id->provider_data = ep;
+ goto out;
+ }
+fail3:
+ cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
+fail2:
+ cm_id->rem_ref(cm_id);
+ c4iw_put_ep(&ep->com);
+fail1:
+out:
+ return err;
+}
+
+int c4iw_destroy_listen(struct iw_cm_id *cm_id)
+{
+ int err;
+ struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
+
+ PDBG("%s ep %p\n", __func__, ep);
+
+ might_sleep();
+ state_set(&ep->com, DEAD);
+ ep->com.rpl_done = 0;
+ ep->com.rpl_err = 0;
+ err = listen_stop(ep);
+ if (err)
+ goto done;
+ wait_event(ep->com.waitq, ep->com.rpl_done);
+ cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
+done:
+ err = ep->com.rpl_err;
+ cm_id->rem_ref(cm_id);
+ c4iw_put_ep(&ep->com);
+ return err;
+}
+
+int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
+{
+ int ret = 0;
+ unsigned long flags;
+ int close = 0;
+ int fatal = 0;
+ struct c4iw_rdev *rdev;
+ int start_timer = 0;
+ int stop_timer = 0;
+
+ spin_lock_irqsave(&ep->com.lock, flags);
+
+ PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
+ states[ep->com.state], abrupt);
+
+ rdev = &ep->com.dev->rdev;
+ if (c4iw_fatal_error(rdev)) {
+ fatal = 1;
+ close_complete_upcall(ep);
+ ep->com.state = DEAD;
+ }
+ switch (ep->com.state) {
+ case MPA_REQ_WAIT:
+ case MPA_REQ_SENT:
+ case MPA_REQ_RCVD:
+ case MPA_REP_SENT:
+ case FPDU_MODE:
+ close = 1;
+ if (abrupt)
+ ep->com.state = ABORTING;
+ else {
+ ep->com.state = CLOSING;
+ start_timer = 1;
+ }
+ set_bit(CLOSE_SENT, &ep->com.flags);
+ break;
+ case CLOSING:
+ if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
+ close = 1;
+ if (abrupt) {
+ stop_timer = 1;
+ ep->com.state = ABORTING;
+ } else
+ ep->com.state = MORIBUND;
+ }
+ break;
+ case MORIBUND:
+ case ABORTING:
+ case DEAD:
+ PDBG("%s ignoring disconnect ep %p state %u\n",
+ __func__, ep, ep->com.state);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ spin_unlock_irqrestore(&ep->com.lock, flags);
+ if (start_timer)
+ start_ep_timer(ep);
+ if (stop_timer)
+ stop_ep_timer(ep);
+ if (close) {
+ if (abrupt)
+ ret = abort_connection(ep, NULL, gfp);
+ else
+ ret = send_halfclose(ep, gfp);
+ if (ret)
+ fatal = 1;
+ }
+ if (fatal)
+ release_ep_resources(ep);
+ return ret;
+}
+
+/*
+ * These are the real handlers that are called from a
+ * work queue.
+ */
+static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
+ [CPL_ACT_ESTABLISH] = act_establish,
+ [CPL_ACT_OPEN_RPL] = act_open_rpl,
+ [CPL_RX_DATA] = rx_data,
+ [CPL_ABORT_RPL_RSS] = abort_rpl,
+ [CPL_ABORT_RPL] = abort_rpl,
+ [CPL_PASS_OPEN_RPL] = pass_open_rpl,
+ [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
+ [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
+ [CPL_PASS_ESTABLISH] = pass_establish,
+ [CPL_PEER_CLOSE] = peer_close,
+ [CPL_ABORT_REQ_RSS] = peer_abort,
+ [CPL_CLOSE_CON_RPL] = close_con_rpl,
+ [CPL_RDMA_TERMINATE] = terminate,
+ [CPL_FW4_ACK] = fw4_ack
+};
+
+static void process_timeout(struct c4iw_ep *ep)
+{
+ struct c4iw_qp_attributes attrs;
+ int abort = 1;
+
+ spin_lock_irq(&ep->com.lock);
+ PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
+ ep->com.state);
+ switch (ep->com.state) {
+ case MPA_REQ_SENT:
+ __state_set(&ep->com, ABORTING);
+ connect_reply_upcall(ep, -ETIMEDOUT);
+ break;
+ case MPA_REQ_WAIT:
+ __state_set(&ep->com, ABORTING);
+ break;
+ case CLOSING:
+ case MORIBUND:
+ if (ep->com.cm_id && ep->com.qp) {
+ attrs.next_state = C4IW_QP_STATE_ERROR;
+ c4iw_modify_qp(ep->com.qp->rhp,
+ ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
+ &attrs, 1);
+ }
+ __state_set(&ep->com, ABORTING);
+ break;
+ default:
+ printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n",
+ __func__, ep, ep->hwtid, ep->com.state);
+ WARN_ON(1);
+ abort = 0;
+ }
+ spin_unlock_irq(&ep->com.lock);
+ if (abort)
+ abort_connection(ep, NULL, GFP_KERNEL);
+ c4iw_put_ep(&ep->com);
+}
+
+static void process_timedout_eps(void)
+{
+ struct c4iw_ep *ep;
+
+ spin_lock_irq(&timeout_lock);
+ while (!list_empty(&timeout_list)) {
+ struct list_head *tmp;
+
+ tmp = timeout_list.next;
+ list_del(tmp);
+ spin_unlock_irq(&timeout_lock);
+ ep = list_entry(tmp, struct c4iw_ep, entry);
+ process_timeout(ep);
+ spin_lock_irq(&timeout_lock);
+ }
+ spin_unlock_irq(&timeout_lock);
+}
+
+static void process_work(struct work_struct *work)
+{
+ struct sk_buff *skb = NULL;
+ struct c4iw_dev *dev;
+ struct cpl_act_establish *rpl = cplhdr(skb);
+ unsigned int opcode;
+ int ret;
+
+ while ((skb = skb_dequeue(&rxq))) {
+ rpl = cplhdr(skb);
+ dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
+ opcode = rpl->ot.opcode;
+
+ BUG_ON(!work_handlers[opcode]);
+ ret = work_handlers[opcode](dev, skb);
+ if (!ret)
+ kfree_skb(skb);
+ }
+ process_timedout_eps();
+}
+
+static DECLARE_WORK(skb_work, process_work);
+
+static void ep_timeout(unsigned long arg)
+{
+ struct c4iw_ep *ep = (struct c4iw_ep *)arg;
+
+ spin_lock(&timeout_lock);
+ list_add_tail(&ep->entry, &timeout_list);
+ spin_unlock(&timeout_lock);
+ queue_work(workq, &skb_work);
+}
+
+/*
+ * All the CM events are handled on a work queue to have a safe context.
+ */
+static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+
+ /*
+ * Save dev in the skb->cb area.
+ */
+ *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;
+
+ /*
+ * Queue the skb and schedule the worker thread.
+ */
+ skb_queue_tail(&rxq, skb);
+ queue_work(workq, &skb_work);
+ return 0;
+}
+
+static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
+
+ if (rpl->status != CPL_ERR_NONE) {
+ printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
+ "for tid %u\n", rpl->status, GET_TID(rpl));
+ }
+ return 0;
+}
+
+static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_fw6_msg *rpl = cplhdr(skb);
+ struct c4iw_wr_wait *wr_waitp;
+ int ret;
+
+ PDBG("%s type %u\n", __func__, rpl->type);
+
+ switch (rpl->type) {
+ case 1:
+ ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
+ wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1];
+ PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
+ if (wr_waitp) {
+ wr_waitp->ret = ret;
+ wr_waitp->done = 1;
+ wake_up(&wr_waitp->wait);
+ }
+ break;
+ case 2:
+ c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
+ break;
+ default:
+ printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
+ rpl->type);
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Most upcalls from the T4 Core go to sched() to
+ * schedule the processing on a work queue.
+ */
+c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
+ [CPL_ACT_ESTABLISH] = sched,
+ [CPL_ACT_OPEN_RPL] = sched,
+ [CPL_RX_DATA] = sched,
+ [CPL_ABORT_RPL_RSS] = sched,
+ [CPL_ABORT_RPL] = sched,
+ [CPL_PASS_OPEN_RPL] = sched,
+ [CPL_CLOSE_LISTSRV_RPL] = sched,
+ [CPL_PASS_ACCEPT_REQ] = sched,
+ [CPL_PASS_ESTABLISH] = sched,
+ [CPL_PEER_CLOSE] = sched,
+ [CPL_CLOSE_CON_RPL] = sched,
+ [CPL_ABORT_REQ_RSS] = sched,
+ [CPL_RDMA_TERMINATE] = sched,
+ [CPL_FW4_ACK] = sched,
+ [CPL_SET_TCB_RPL] = set_tcb_rpl,
+ [CPL_FW6_MSG] = fw6_msg
+};
+
+int __init c4iw_cm_init(void)
+{
+ spin_lock_init(&timeout_lock);
+ skb_queue_head_init(&rxq);
+
+ workq = create_singlethread_workqueue("iw_cxgb4");
+ if (!workq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void __exit c4iw_cm_term(void)
+{
+ WARN_ON(!list_empty(&timeout_list));
+ flush_workqueue(workq);
+ destroy_workqueue(workq);
+}
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
new file mode 100644
index 00000000000..2447f529548
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -0,0 +1,886 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "iw_cxgb4.h"
+
+static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
+ struct c4iw_dev_ucontext *uctx)
+{
+ struct fw_ri_res_wr *res_wr;
+ struct fw_ri_res *res;
+ int wr_len;
+ struct c4iw_wr_wait wr_wait;
+ struct sk_buff *skb;
+ int ret;
+
+ wr_len = sizeof *res_wr + sizeof *res;
+ skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb)
+ return -ENOMEM;
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
+
+ res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
+ memset(res_wr, 0, wr_len);
+ res_wr->op_nres = cpu_to_be32(
+ FW_WR_OP(FW_RI_RES_WR) |
+ V_FW_RI_RES_WR_NRES(1) |
+ FW_WR_COMPL(1));
+ res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
+ res_wr->cookie = (u64)&wr_wait;
+ res = res_wr->res;
+ res->u.cq.restype = FW_RI_RES_TYPE_CQ;
+ res->u.cq.op = FW_RI_RES_OP_RESET;
+ res->u.cq.iqid = cpu_to_be32(cq->cqid);
+
+ c4iw_init_wr_wait(&wr_wait);
+ ret = c4iw_ofld_send(rdev, skb);
+ if (!ret) {
+ wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
+ if (!wr_wait.done) {
+ printk(KERN_ERR MOD "Device %s not responding!\n",
+ pci_name(rdev->lldi.pdev));
+ rdev->flags = T4_FATAL_ERROR;
+ ret = -EIO;
+ } else
+ ret = wr_wait.ret;
+ }
+
+ kfree(cq->sw_queue);
+ dma_free_coherent(&(rdev->lldi.pdev->dev),
+ cq->memsize, cq->queue,
+ pci_unmap_addr(cq, mapping));
+ c4iw_put_cqid(rdev, cq->cqid, uctx);
+ return ret;
+}
+
+static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
+ struct c4iw_dev_ucontext *uctx)
+{
+ struct fw_ri_res_wr *res_wr;
+ struct fw_ri_res *res;
+ int wr_len;
+ int user = (uctx != &rdev->uctx);
+ struct c4iw_wr_wait wr_wait;
+ int ret;
+ struct sk_buff *skb;
+
+ cq->cqid = c4iw_get_cqid(rdev, uctx);
+ if (!cq->cqid) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ if (!user) {
+ cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
+ if (!cq->sw_queue) {
+ ret = -ENOMEM;
+ goto err2;
+ }
+ }
+ cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
+ &cq->dma_addr, GFP_KERNEL);
+ if (!cq->queue) {
+ ret = -ENOMEM;
+ goto err3;
+ }
+ pci_unmap_addr_set(cq, mapping, cq->dma_addr);
+ memset(cq->queue, 0, cq->memsize);
+
+ /* build fw_ri_res_wr */
+ wr_len = sizeof *res_wr + sizeof *res;
+
+ skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto err4;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
+
+ res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
+ memset(res_wr, 0, wr_len);
+ res_wr->op_nres = cpu_to_be32(
+ FW_WR_OP(FW_RI_RES_WR) |
+ V_FW_RI_RES_WR_NRES(1) |
+ FW_WR_COMPL(1));
+ res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
+ res_wr->cookie = (u64)&wr_wait;
+ res = res_wr->res;
+ res->u.cq.restype = FW_RI_RES_TYPE_CQ;
+ res->u.cq.op = FW_RI_RES_OP_WRITE;
+ res->u.cq.iqid = cpu_to_be32(cq->cqid);
+ res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
+ V_FW_RI_RES_WR_IQANUS(0) |
+ V_FW_RI_RES_WR_IQANUD(1) |
+ F_FW_RI_RES_WR_IQANDST |
+ V_FW_RI_RES_WR_IQANDSTINDEX(*rdev->lldi.rxq_ids));
+ res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
+ F_FW_RI_RES_WR_IQDROPRSS |
+ V_FW_RI_RES_WR_IQPCIECH(2) |
+ V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
+ F_FW_RI_RES_WR_IQO |
+ V_FW_RI_RES_WR_IQESIZE(1));
+ res->u.cq.iqsize = cpu_to_be16(cq->size);
+ res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
+
+ c4iw_init_wr_wait(&wr_wait);
+
+ ret = c4iw_ofld_send(rdev, skb);
+ if (ret)
+ goto err4;
+ PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
+ wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
+ if (!wr_wait.done) {
+ printk(KERN_ERR MOD "Device %s not responding!\n",
+ pci_name(rdev->lldi.pdev));
+ rdev->flags = T4_FATAL_ERROR;
+ ret = -EIO;
+ } else
+ ret = wr_wait.ret;
+ if (ret)
+ goto err4;
+
+ cq->gen = 1;
+ cq->gts = rdev->lldi.gts_reg;
+ cq->rdev = rdev;
+ if (user) {
+ cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
+ (cq->cqid << rdev->cqshift);
+ cq->ugts &= PAGE_MASK;
+ }
+ return 0;
+err4:
+ dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
+ pci_unmap_addr(cq, mapping));
+err3:
+ kfree(cq->sw_queue);
+err2:
+ c4iw_put_cqid(rdev, cq->cqid, uctx);
+err1:
+ return ret;
+}
+
+static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
+{
+ struct t4_cqe cqe;
+
+ PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
+ wq, cq, cq->sw_cidx, cq->sw_pidx);
+ memset(&cqe, 0, sizeof(cqe));
+ cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
+ V_CQE_OPCODE(FW_RI_SEND) |
+ V_CQE_TYPE(0) |
+ V_CQE_SWCQE(1) |
+ V_CQE_QPID(wq->rq.qid));
+ cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
+ cq->sw_queue[cq->sw_pidx] = cqe;
+ t4_swcq_produce(cq);
+}
+
+int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
+{
+ int flushed = 0;
+ int in_use = wq->rq.in_use - count;
+
+ BUG_ON(in_use < 0);
+ PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
+ wq, cq, wq->rq.in_use, count);
+ while (in_use--) {
+ insert_recv_cqe(wq, cq);
+ flushed++;
+ }
+ return flushed;
+}
+
+static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
+ struct t4_swsqe *swcqe)
+{
+ struct t4_cqe cqe;
+
+ PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
+ wq, cq, cq->sw_cidx, cq->sw_pidx);
+ memset(&cqe, 0, sizeof(cqe));
+ cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
+ V_CQE_OPCODE(swcqe->opcode) |
+ V_CQE_TYPE(1) |
+ V_CQE_SWCQE(1) |
+ V_CQE_QPID(wq->sq.qid));
+ CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
+ cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
+ cq->sw_queue[cq->sw_pidx] = cqe;
+ t4_swcq_produce(cq);
+}
+
+int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count)
+{
+ int flushed = 0;
+ struct t4_swsqe *swsqe = &wq->sq.sw_sq[wq->sq.cidx + count];
+ int in_use = wq->sq.in_use - count;
+
+ BUG_ON(in_use < 0);
+ while (in_use--) {
+ swsqe->signaled = 0;
+ insert_sq_cqe(wq, cq, swsqe);
+ swsqe++;
+ if (swsqe == (wq->sq.sw_sq + wq->sq.size))
+ swsqe = wq->sq.sw_sq;
+ flushed++;
+ }
+ return flushed;
+}
+
+/*
+ * Move all CQEs from the HWCQ into the SWCQ.
+ */
+void c4iw_flush_hw_cq(struct t4_cq *cq)
+{
+ struct t4_cqe *cqe = NULL, *swcqe;
+ int ret;
+
+ PDBG("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid);
+ ret = t4_next_hw_cqe(cq, &cqe);
+ while (!ret) {
+ PDBG("%s flushing hwcq cidx 0x%x swcq pidx 0x%x\n",
+ __func__, cq->cidx, cq->sw_pidx);
+ swcqe = &cq->sw_queue[cq->sw_pidx];
+ *swcqe = *cqe;
+ swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
+ t4_swcq_produce(cq);
+ t4_hwcq_consume(cq);
+ ret = t4_next_hw_cqe(cq, &cqe);
+ }
+}
+
+static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
+{
+ if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
+ return 0;
+
+ if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
+ return 0;
+
+ if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
+ return 0;
+
+ if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
+ return 0;
+ return 1;
+}
+
+void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
+{
+ struct t4_cqe *cqe;
+ u32 ptr;
+
+ *count = 0;
+ ptr = cq->sw_cidx;
+ while (ptr != cq->sw_pidx) {
+ cqe = &cq->sw_queue[ptr];
+ if ((SQ_TYPE(cqe) || ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) &&
+ wq->sq.oldest_read)) &&
+ (CQE_QPID(cqe) == wq->sq.qid))
+ (*count)++;
+ if (++ptr == cq->size)
+ ptr = 0;
+ }
+ PDBG("%s cq %p count %d\n", __func__, cq, *count);
+}
+
+void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
+{
+ struct t4_cqe *cqe;
+ u32 ptr;
+
+ *count = 0;
+ PDBG("%s count zero %d\n", __func__, *count);
+ ptr = cq->sw_cidx;
+ while (ptr != cq->sw_pidx) {
+ cqe = &cq->sw_queue[ptr];
+ if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
+ (CQE_QPID(cqe) == wq->rq.qid) && cqe_completes_wr(cqe, wq))
+ (*count)++;
+ if (++ptr == cq->size)
+ ptr = 0;
+ }
+ PDBG("%s cq %p count %d\n", __func__, cq, *count);
+}
+
+static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
+{
+ struct t4_swsqe *swsqe;
+ u16 ptr = wq->sq.cidx;
+ int count = wq->sq.in_use;
+ int unsignaled = 0;
+
+ swsqe = &wq->sq.sw_sq[ptr];
+ while (count--)
+ if (!swsqe->signaled) {
+ if (++ptr == wq->sq.size)
+ ptr = 0;
+ swsqe = &wq->sq.sw_sq[ptr];
+ unsignaled++;
+ } else if (swsqe->complete) {
+
+ /*
+ * Insert this completed cqe into the swcq.
+ */
+ PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
+ __func__, ptr, cq->sw_pidx);
+ swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
+ cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
+ t4_swcq_produce(cq);
+ swsqe->signaled = 0;
+ wq->sq.in_use -= unsignaled;
+ break;
+ } else
+ break;
+}
+
+static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
+ struct t4_cqe *read_cqe)
+{
+ read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
+ read_cqe->len = cpu_to_be32(wq->sq.oldest_read->read_len);
+ read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
+ V_CQE_SWCQE(SW_CQE(hw_cqe)) |
+ V_CQE_OPCODE(FW_RI_READ_REQ) |
+ V_CQE_TYPE(1));
+ read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
+}
+
+/*
+ * Return a ptr to the next read wr in the SWSQ or NULL.
+ */
+static void advance_oldest_read(struct t4_wq *wq)
+{
+
+ u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
+
+ if (rptr == wq->sq.size)
+ rptr = 0;
+ while (rptr != wq->sq.pidx) {
+ wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
+
+ if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
+ return;
+ if (++rptr == wq->sq.size)
+ rptr = 0;
+ }
+ wq->sq.oldest_read = NULL;
+}
+
+/*
+ * poll_cq
+ *
+ * Caller must:
+ * check the validity of the first CQE,
+ * supply the wq assicated with the qpid.
+ *
+ * credit: cq credit to return to sge.
+ * cqe_flushed: 1 iff the CQE is flushed.
+ * cqe: copy of the polled CQE.
+ *
+ * return value:
+ * 0 CQE returned ok.
+ * -EAGAIN CQE skipped, try again.
+ * -EOVERFLOW CQ overflow detected.
+ */
+static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
+ u8 *cqe_flushed, u64 *cookie, u32 *credit)
+{
+ int ret = 0;
+ struct t4_cqe *hw_cqe, read_cqe;
+
+ *cqe_flushed = 0;
+ *credit = 0;
+ ret = t4_next_cqe(cq, &hw_cqe);
+ if (ret)
+ return ret;
+
+ PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
+ " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
+ __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
+ CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
+ CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
+ CQE_WRID_LOW(hw_cqe));
+
+ /*
+ * skip cqe's not affiliated with a QP.
+ */
+ if (wq == NULL) {
+ ret = -EAGAIN;
+ goto skip_cqe;
+ }
+
+ /*
+ * Gotta tweak READ completions:
+ * 1) the cqe doesn't contain the sq_wptr from the wr.
+ * 2) opcode not reflected from the wr.
+ * 3) read_len not reflected from the wr.
+ * 4) cq_type is RQ_TYPE not SQ_TYPE.
+ */
+ if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
+
+ /*
+ * If this is an unsolicited read response, then the read
+ * was generated by the kernel driver as part of peer-2-peer
+ * connection setup. So ignore the completion.
+ */
+ if (!wq->sq.oldest_read) {
+ if (CQE_STATUS(hw_cqe))
+ t4_set_wq_in_error(wq);
+ ret = -EAGAIN;
+ goto skip_cqe;
+ }
+
+ /*
+ * Don't write to the HWCQ, so create a new read req CQE
+ * in local memory.
+ */
+ create_read_req_cqe(wq, hw_cqe, &read_cqe);
+ hw_cqe = &read_cqe;
+ advance_oldest_read(wq);
+ }
+
+ if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
+ *cqe_flushed = t4_wq_in_error(wq);
+ t4_set_wq_in_error(wq);
+ goto proc_cqe;
+ }
+
+ /*
+ * RECV completion.
+ */
+ if (RQ_TYPE(hw_cqe)) {
+
+ /*
+ * HW only validates 4 bits of MSN. So we must validate that
+ * the MSN in the SEND is the next expected MSN. If its not,
+ * then we complete this with T4_ERR_MSN and mark the wq in
+ * error.
+ */
+
+ if (t4_rq_empty(wq)) {
+ t4_set_wq_in_error(wq);
+ ret = -EAGAIN;
+ goto skip_cqe;
+ }
+ if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
+ t4_set_wq_in_error(wq);
+ hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
+ goto proc_cqe;
+ }
+ goto proc_cqe;
+ }
+
+ /*
+ * If we get here its a send completion.
+ *
+ * Handle out of order completion. These get stuffed
+ * in the SW SQ. Then the SW SQ is walked to move any
+ * now in-order completions into the SW CQ. This handles
+ * 2 cases:
+ * 1) reaping unsignaled WRs when the first subsequent
+ * signaled WR is completed.
+ * 2) out of order read completions.
+ */
+ if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
+ struct t4_swsqe *swsqe;
+
+ PDBG("%s out of order completion going in sw_sq at idx %u\n",
+ __func__, CQE_WRID_SQ_IDX(hw_cqe));
+ swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
+ swsqe->cqe = *hw_cqe;
+ swsqe->complete = 1;
+ ret = -EAGAIN;
+ goto flush_wq;
+ }
+
+proc_cqe:
+ *cqe = *hw_cqe;
+
+ /*
+ * Reap the associated WR(s) that are freed up with this
+ * completion.
+ */
+ if (SQ_TYPE(hw_cqe)) {
+ wq->sq.cidx = CQE_WRID_SQ_IDX(hw_cqe);
+ PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
+ *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
+ t4_sq_consume(wq);
+ } else {
+ PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
+ *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
+ BUG_ON(t4_rq_empty(wq));
+ t4_rq_consume(wq);
+ }
+
+flush_wq:
+ /*
+ * Flush any completed cqes that are now in-order.
+ */
+ flush_completed_wrs(wq, cq);
+
+skip_cqe:
+ if (SW_CQE(hw_cqe)) {
+ PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
+ __func__, cq, cq->cqid, cq->sw_cidx);
+ t4_swcq_consume(cq);
+ } else {
+ PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
+ __func__, cq, cq->cqid, cq->cidx);
+ t4_hwcq_consume(cq);
+ }
+ return ret;
+}
+
+/*
+ * Get one cq entry from c4iw and map it to openib.
+ *
+ * Returns:
+ * 0 cqe returned
+ * -ENODATA EMPTY;
+ * -EAGAIN caller must try again
+ * any other -errno fatal error
+ */
+static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
+{
+ struct c4iw_qp *qhp = NULL;
+ struct t4_cqe cqe = {0, 0}, *rd_cqe;
+ struct t4_wq *wq;
+ u32 credit = 0;
+ u8 cqe_flushed;
+ u64 cookie = 0;
+ int ret;
+
+ ret = t4_next_cqe(&chp->cq, &rd_cqe);
+
+ if (ret)
+ return ret;
+
+ qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
+ if (!qhp)
+ wq = NULL;
+ else {
+ spin_lock(&qhp->lock);
+ wq = &(qhp->wq);
+ }
+ ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
+ if (ret)
+ goto out;
+
+ wc->wr_id = cookie;
+ wc->qp = &qhp->ibqp;
+ wc->vendor_err = CQE_STATUS(&cqe);
+ wc->wc_flags = 0;
+
+ PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x "
+ "lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe),
+ CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe),
+ CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie);
+
+ if (CQE_TYPE(&cqe) == 0) {
+ if (!CQE_STATUS(&cqe))
+ wc->byte_len = CQE_LEN(&cqe);
+ else
+ wc->byte_len = 0;
+ wc->opcode = IB_WC_RECV;
+ if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
+ CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
+ wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
+ wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ }
+ } else {
+ switch (CQE_OPCODE(&cqe)) {
+ case FW_RI_RDMA_WRITE:
+ wc->opcode = IB_WC_RDMA_WRITE;
+ break;
+ case FW_RI_READ_REQ:
+ wc->opcode = IB_WC_RDMA_READ;
+ wc->byte_len = CQE_LEN(&cqe);
+ break;
+ case FW_RI_SEND_WITH_INV:
+ case FW_RI_SEND_WITH_SE_INV:
+ wc->opcode = IB_WC_SEND;
+ wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ break;
+ case FW_RI_SEND:
+ case FW_RI_SEND_WITH_SE:
+ wc->opcode = IB_WC_SEND;
+ break;
+ case FW_RI_BIND_MW:
+ wc->opcode = IB_WC_BIND_MW;
+ break;
+
+ case FW_RI_LOCAL_INV:
+ wc->opcode = IB_WC_LOCAL_INV;
+ break;
+ case FW_RI_FAST_REGISTER:
+ wc->opcode = IB_WC_FAST_REG_MR;
+ break;
+ default:
+ printk(KERN_ERR MOD "Unexpected opcode %d "
+ "in the CQE received for QPID=0x%0x\n",
+ CQE_OPCODE(&cqe), CQE_QPID(&cqe));
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (cqe_flushed)
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ else {
+
+ switch (CQE_STATUS(&cqe)) {
+ case T4_ERR_SUCCESS:
+ wc->status = IB_WC_SUCCESS;
+ break;
+ case T4_ERR_STAG:
+ wc->status = IB_WC_LOC_ACCESS_ERR;
+ break;
+ case T4_ERR_PDID:
+ wc->status = IB_WC_LOC_PROT_ERR;
+ break;
+ case T4_ERR_QPID:
+ case T4_ERR_ACCESS:
+ wc->status = IB_WC_LOC_ACCESS_ERR;
+ break;
+ case T4_ERR_WRAP:
+ wc->status = IB_WC_GENERAL_ERR;
+ break;
+ case T4_ERR_BOUND:
+ wc->status = IB_WC_LOC_LEN_ERR;
+ break;
+ case T4_ERR_INVALIDATE_SHARED_MR:
+ case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
+ wc->status = IB_WC_MW_BIND_ERR;
+ break;
+ case T4_ERR_CRC:
+ case T4_ERR_MARKER:
+ case T4_ERR_PDU_LEN_ERR:
+ case T4_ERR_OUT_OF_RQE:
+ case T4_ERR_DDP_VERSION:
+ case T4_ERR_RDMA_VERSION:
+ case T4_ERR_DDP_QUEUE_NUM:
+ case T4_ERR_MSN:
+ case T4_ERR_TBIT:
+ case T4_ERR_MO:
+ case T4_ERR_MSN_RANGE:
+ case T4_ERR_IRD_OVERFLOW:
+ case T4_ERR_OPCODE:
+ wc->status = IB_WC_FATAL_ERR;
+ break;
+ case T4_ERR_SWFLUSH:
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ break;
+ default:
+ printk(KERN_ERR MOD
+ "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
+ CQE_STATUS(&cqe), CQE_QPID(&cqe));
+ ret = -EINVAL;
+ }
+ }
+out:
+ if (wq)
+ spin_unlock(&qhp->lock);
+ return ret;
+}
+
+int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ struct c4iw_cq *chp;
+ unsigned long flags;
+ int npolled;
+ int err = 0;
+
+ chp = to_c4iw_cq(ibcq);
+
+ spin_lock_irqsave(&chp->lock, flags);
+ for (npolled = 0; npolled < num_entries; ++npolled) {
+ do {
+ err = c4iw_poll_cq_one(chp, wc + npolled);
+ } while (err == -EAGAIN);
+ if (err)
+ break;
+ }
+ spin_unlock_irqrestore(&chp->lock, flags);
+ return !err || err == -ENODATA ? npolled : err;
+}
+
+int c4iw_destroy_cq(struct ib_cq *ib_cq)
+{
+ struct c4iw_cq *chp;
+ struct c4iw_ucontext *ucontext;
+
+ PDBG("%s ib_cq %p\n", __func__, ib_cq);
+ chp = to_c4iw_cq(ib_cq);
+
+ remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
+ atomic_dec(&chp->refcnt);
+ wait_event(chp->wait, !atomic_read(&chp->refcnt));
+
+ ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
+ : NULL;
+ destroy_cq(&chp->rhp->rdev, &chp->cq,
+ ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
+ kfree(chp);
+ return 0;
+}
+
+struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
+ int vector, struct ib_ucontext *ib_context,
+ struct ib_udata *udata)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_cq *chp;
+ struct c4iw_create_cq_resp uresp;
+ struct c4iw_ucontext *ucontext = NULL;
+ int ret;
+ size_t memsize;
+ struct c4iw_mm_entry *mm, *mm2;
+
+ PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
+
+ rhp = to_c4iw_dev(ibdev);
+
+ chp = kzalloc(sizeof(*chp), GFP_KERNEL);
+ if (!chp)
+ return ERR_PTR(-ENOMEM);
+
+ if (ib_context)
+ ucontext = to_c4iw_ucontext(ib_context);
+
+ /* account for the status page. */
+ entries++;
+
+ /* IQ needs one extra entry to differentiate full vs empty. */
+ entries++;
+
+ /*
+ * entries must be multiple of 16 for HW.
+ */
+ entries = roundup(entries, 16);
+ memsize = entries * sizeof *chp->cq.queue;
+
+ /*
+ * memsize must be a multiple of the page size if its a user cq.
+ */
+ if (ucontext)
+ memsize = roundup(memsize, PAGE_SIZE);
+ chp->cq.size = entries;
+ chp->cq.memsize = memsize;
+
+ ret = create_cq(&rhp->rdev, &chp->cq,
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+ if (ret)
+ goto err1;
+
+ chp->rhp = rhp;
+ chp->cq.size--; /* status page */
+ chp->ibcq.cqe = chp->cq.size - 1;
+ spin_lock_init(&chp->lock);
+ atomic_set(&chp->refcnt, 1);
+ init_waitqueue_head(&chp->wait);
+ ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
+ if (ret)
+ goto err2;
+
+ if (ucontext) {
+ mm = kmalloc(sizeof *mm, GFP_KERNEL);
+ if (!mm)
+ goto err3;
+ mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
+ if (!mm2)
+ goto err4;
+
+ uresp.qid_mask = rhp->rdev.cqmask;
+ uresp.cqid = chp->cq.cqid;
+ uresp.size = chp->cq.size;
+ uresp.memsize = chp->cq.memsize;
+ spin_lock(&ucontext->mmap_lock);
+ uresp.key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ uresp.gts_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ spin_unlock(&ucontext->mmap_lock);
+ ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
+ if (ret)
+ goto err5;
+
+ mm->key = uresp.key;
+ mm->addr = virt_to_phys(chp->cq.queue);
+ mm->len = chp->cq.memsize;
+ insert_mmap(ucontext, mm);
+
+ mm2->key = uresp.gts_key;
+ mm2->addr = chp->cq.ugts;
+ mm2->len = PAGE_SIZE;
+ insert_mmap(ucontext, mm2);
+ }
+ PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
+ __func__, chp->cq.cqid, chp, chp->cq.size,
+ chp->cq.memsize,
+ (unsigned long long) chp->cq.dma_addr);
+ return &chp->ibcq;
+err5:
+ kfree(mm2);
+err4:
+ kfree(mm);
+err3:
+ remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
+err2:
+ destroy_cq(&chp->rhp->rdev, &chp->cq,
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+err1:
+ kfree(chp);
+ return ERR_PTR(ret);
+}
+
+int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
+{
+ return -ENOSYS;
+}
+
+int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+ struct c4iw_cq *chp;
+ int ret;
+ unsigned long flag;
+
+ chp = to_c4iw_cq(ibcq);
+ spin_lock_irqsave(&chp->lock, flag);
+ ret = t4_arm_cq(&chp->cq,
+ (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
+ spin_unlock_irqrestore(&chp->lock, flag);
+ if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
+ ret = 0;
+ return ret;
+}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
new file mode 100644
index 00000000000..d870f9c17c1
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -0,0 +1,544 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/debugfs.h>
+
+#include <rdma/ib_verbs.h>
+
+#include "iw_cxgb4.h"
+
+#define DRV_VERSION "0.1"
+
+MODULE_AUTHOR("Steve Wise");
+MODULE_DESCRIPTION("Chelsio T4 RDMA Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static LIST_HEAD(dev_list);
+static DEFINE_MUTEX(dev_mutex);
+
+static struct dentry *c4iw_debugfs_root;
+
+struct debugfs_qp_data {
+ struct c4iw_dev *devp;
+ char *buf;
+ int bufsize;
+ int pos;
+};
+
+static int count_qps(int id, void *p, void *data)
+{
+ struct c4iw_qp *qp = p;
+ int *countp = data;
+
+ if (id != qp->wq.sq.qid)
+ return 0;
+
+ *countp = *countp + 1;
+ return 0;
+}
+
+static int dump_qps(int id, void *p, void *data)
+{
+ struct c4iw_qp *qp = p;
+ struct debugfs_qp_data *qpd = data;
+ int space;
+ int cc;
+
+ if (id != qp->wq.sq.qid)
+ return 0;
+
+ space = qpd->bufsize - qpd->pos - 1;
+ if (space == 0)
+ return 1;
+
+ if (qp->ep)
+ cc = snprintf(qpd->buf + qpd->pos, space, "qp id %u state %u "
+ "ep tid %u state %u %pI4:%u->%pI4:%u\n",
+ qp->wq.sq.qid, (int)qp->attr.state,
+ qp->ep->hwtid, (int)qp->ep->com.state,
+ &qp->ep->com.local_addr.sin_addr.s_addr,
+ ntohs(qp->ep->com.local_addr.sin_port),
+ &qp->ep->com.remote_addr.sin_addr.s_addr,
+ ntohs(qp->ep->com.remote_addr.sin_port));
+ else
+ cc = snprintf(qpd->buf + qpd->pos, space, "qp id %u state %u\n",
+ qp->wq.sq.qid, (int)qp->attr.state);
+ if (cc < space)
+ qpd->pos += cc;
+ return 0;
+}
+
+static int qp_release(struct inode *inode, struct file *file)
+{
+ struct debugfs_qp_data *qpd = file->private_data;
+ if (!qpd) {
+ printk(KERN_INFO "%s null qpd?\n", __func__);
+ return 0;
+ }
+ kfree(qpd->buf);
+ kfree(qpd);
+ return 0;
+}
+
+static int qp_open(struct inode *inode, struct file *file)
+{
+ struct debugfs_qp_data *qpd;
+ int ret = 0;
+ int count = 1;
+
+ qpd = kmalloc(sizeof *qpd, GFP_KERNEL);
+ if (!qpd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ qpd->devp = inode->i_private;
+ qpd->pos = 0;
+
+ spin_lock_irq(&qpd->devp->lock);
+ idr_for_each(&qpd->devp->qpidr, count_qps, &count);
+ spin_unlock_irq(&qpd->devp->lock);
+
+ qpd->bufsize = count * 128;
+ qpd->buf = kmalloc(qpd->bufsize, GFP_KERNEL);
+ if (!qpd->buf) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ spin_lock_irq(&qpd->devp->lock);
+ idr_for_each(&qpd->devp->qpidr, dump_qps, qpd);
+ spin_unlock_irq(&qpd->devp->lock);
+
+ qpd->buf[qpd->pos++] = 0;
+ file->private_data = qpd;
+ goto out;
+err1:
+ kfree(qpd);
+out:
+ return ret;
+}
+
+static ssize_t qp_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct debugfs_qp_data *qpd = file->private_data;
+ loff_t pos = *ppos;
+ loff_t avail = qpd->pos;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= avail)
+ return 0;
+ if (count > avail - pos)
+ count = avail - pos;
+
+ while (count) {
+ size_t len = 0;
+
+ len = min((int)count, (int)qpd->pos - (int)pos);
+ if (copy_to_user(buf, qpd->buf + pos, len))
+ return -EFAULT;
+ if (len == 0)
+ return -EINVAL;
+
+ buf += len;
+ pos += len;
+ count -= len;
+ }
+ count = pos - *ppos;
+ *ppos = pos;
+ return count;
+}
+
+static const struct file_operations qp_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = qp_open,
+ .release = qp_release,
+ .read = qp_read,
+};
+
+static int setup_debugfs(struct c4iw_dev *devp)
+{
+ struct dentry *de;
+
+ if (!devp->debugfs_root)
+ return -1;
+
+ de = debugfs_create_file("qps", S_IWUSR, devp->debugfs_root,
+ (void *)devp, &qp_debugfs_fops);
+ if (de && de->d_inode)
+ de->d_inode->i_size = 4096;
+ return 0;
+}
+
+void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
+ struct c4iw_dev_ucontext *uctx)
+{
+ struct list_head *pos, *nxt;
+ struct c4iw_qid_list *entry;
+
+ mutex_lock(&uctx->lock);
+ list_for_each_safe(pos, nxt, &uctx->qpids) {
+ entry = list_entry(pos, struct c4iw_qid_list, entry);
+ list_del_init(&entry->entry);
+ if (!(entry->qid & rdev->qpmask))
+ c4iw_put_resource(&rdev->resource.qid_fifo, entry->qid,
+ &rdev->resource.qid_fifo_lock);
+ kfree(entry);
+ }
+
+ list_for_each_safe(pos, nxt, &uctx->qpids) {
+ entry = list_entry(pos, struct c4iw_qid_list, entry);
+ list_del_init(&entry->entry);
+ kfree(entry);
+ }
+ mutex_unlock(&uctx->lock);
+}
+
+void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
+ struct c4iw_dev_ucontext *uctx)
+{
+ INIT_LIST_HEAD(&uctx->qpids);
+ INIT_LIST_HEAD(&uctx->cqids);
+ mutex_init(&uctx->lock);
+}
+
+/* Caller takes care of locking if needed */
+static int c4iw_rdev_open(struct c4iw_rdev *rdev)
+{
+ int err;
+
+ c4iw_init_dev_ucontext(rdev, &rdev->uctx);
+
+ /*
+ * qpshift is the number of bits to shift the qpid left in order
+ * to get the correct address of the doorbell for that qp.
+ */
+ rdev->qpshift = PAGE_SHIFT - ilog2(rdev->lldi.udb_density);
+ rdev->qpmask = rdev->lldi.udb_density - 1;
+ rdev->cqshift = PAGE_SHIFT - ilog2(rdev->lldi.ucq_density);
+ rdev->cqmask = rdev->lldi.ucq_density - 1;
+ PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
+ "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x\n",
+ __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
+ rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
+ rdev->lldi.vr->pbl.start,
+ rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
+ rdev->lldi.vr->rq.size);
+ PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
+ "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
+ (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
+ (void *)pci_resource_start(rdev->lldi.pdev, 2),
+ rdev->lldi.db_reg,
+ rdev->lldi.gts_reg,
+ rdev->qpshift, rdev->qpmask,
+ rdev->cqshift, rdev->cqmask);
+
+ if (c4iw_num_stags(rdev) == 0) {
+ err = -EINVAL;
+ goto err1;
+ }
+
+ err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
+ if (err) {
+ printk(KERN_ERR MOD "error %d initializing resources\n", err);
+ goto err1;
+ }
+ err = c4iw_pblpool_create(rdev);
+ if (err) {
+ printk(KERN_ERR MOD "error %d initializing pbl pool\n", err);
+ goto err2;
+ }
+ err = c4iw_rqtpool_create(rdev);
+ if (err) {
+ printk(KERN_ERR MOD "error %d initializing rqt pool\n", err);
+ goto err3;
+ }
+ return 0;
+err3:
+ c4iw_pblpool_destroy(rdev);
+err2:
+ c4iw_destroy_resource(&rdev->resource);
+err1:
+ return err;
+}
+
+static void c4iw_rdev_close(struct c4iw_rdev *rdev)
+{
+ c4iw_pblpool_destroy(rdev);
+ c4iw_rqtpool_destroy(rdev);
+ c4iw_destroy_resource(&rdev->resource);
+}
+
+static void c4iw_remove(struct c4iw_dev *dev)
+{
+ PDBG("%s c4iw_dev %p\n", __func__, dev);
+ cancel_delayed_work_sync(&dev->db_drop_task);
+ list_del(&dev->entry);
+ if (dev->registered)
+ c4iw_unregister_device(dev);
+ c4iw_rdev_close(&dev->rdev);
+ idr_destroy(&dev->cqidr);
+ idr_destroy(&dev->qpidr);
+ idr_destroy(&dev->mmidr);
+ ib_dealloc_device(&dev->ibdev);
+}
+
+static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
+{
+ struct c4iw_dev *devp;
+ int ret;
+
+ devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
+ if (!devp) {
+ printk(KERN_ERR MOD "Cannot allocate ib device\n");
+ return NULL;
+ }
+ devp->rdev.lldi = *infop;
+
+ mutex_lock(&dev_mutex);
+
+ ret = c4iw_rdev_open(&devp->rdev);
+ if (ret) {
+ mutex_unlock(&dev_mutex);
+ printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
+ ib_dealloc_device(&devp->ibdev);
+ return NULL;
+ }
+
+ idr_init(&devp->cqidr);
+ idr_init(&devp->qpidr);
+ idr_init(&devp->mmidr);
+ spin_lock_init(&devp->lock);
+ list_add_tail(&devp->entry, &dev_list);
+ mutex_unlock(&dev_mutex);
+
+ if (c4iw_debugfs_root) {
+ devp->debugfs_root = debugfs_create_dir(
+ pci_name(devp->rdev.lldi.pdev),
+ c4iw_debugfs_root);
+ setup_debugfs(devp);
+ }
+ return devp;
+}
+
+static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
+{
+ struct c4iw_dev *dev;
+ static int vers_printed;
+ int i;
+
+ if (!vers_printed++)
+ printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n",
+ DRV_VERSION);
+
+ dev = c4iw_alloc(infop);
+ if (!dev)
+ goto out;
+
+ PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
+ __func__, pci_name(dev->rdev.lldi.pdev),
+ dev->rdev.lldi.nchan, dev->rdev.lldi.nrxq,
+ dev->rdev.lldi.ntxq, dev->rdev.lldi.nports);
+
+ for (i = 0; i < dev->rdev.lldi.nrxq; i++)
+ PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]);
+out:
+ return dev;
+}
+
+static struct sk_buff *t4_pktgl_to_skb(const struct pkt_gl *gl,
+ unsigned int skb_len,
+ unsigned int pull_len)
+{
+ struct sk_buff *skb;
+ struct skb_shared_info *ssi;
+
+ if (gl->tot_len <= 512) {
+ skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto out;
+ __skb_put(skb, gl->tot_len);
+ skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
+ } else {
+ skb = alloc_skb(skb_len, GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto out;
+ __skb_put(skb, pull_len);
+ skb_copy_to_linear_data(skb, gl->va, pull_len);
+
+ ssi = skb_shinfo(skb);
+ ssi->frags[0].page = gl->frags[0].page;
+ ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
+ ssi->frags[0].size = gl->frags[0].size - pull_len;
+ if (gl->nfrags > 1)
+ memcpy(&ssi->frags[1], &gl->frags[1],
+ (gl->nfrags - 1) * sizeof(skb_frag_t));
+ ssi->nr_frags = gl->nfrags;
+
+ skb->len = gl->tot_len;
+ skb->data_len = skb->len - pull_len;
+ skb->truesize += skb->data_len;
+
+ /* Get a reference for the last page, we don't own it */
+ get_page(gl->frags[gl->nfrags - 1].page);
+ }
+out:
+ return skb;
+}
+
+static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
+ const struct pkt_gl *gl)
+{
+ struct c4iw_dev *dev = handle;
+ struct sk_buff *skb;
+ const struct cpl_act_establish *rpl;
+ unsigned int opcode;
+
+ if (gl == NULL) {
+ /* omit RSS and rsp_ctrl at end of descriptor */
+ unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
+
+ skb = alloc_skb(256, GFP_ATOMIC);
+ if (!skb)
+ goto nomem;
+ __skb_put(skb, len);
+ skb_copy_to_linear_data(skb, &rsp[1], len);
+ } else if (gl == CXGB4_MSG_AN) {
+ const struct rsp_ctrl *rc = (void *)rsp;
+
+ u32 qid = be32_to_cpu(rc->pldbuflen_qid);
+ c4iw_ev_handler(dev, qid);
+ return 0;
+ } else {
+ skb = t4_pktgl_to_skb(gl, 128, 128);
+ if (unlikely(!skb))
+ goto nomem;
+ }
+
+ rpl = cplhdr(skb);
+ opcode = rpl->ot.opcode;
+
+ if (c4iw_handlers[opcode])
+ c4iw_handlers[opcode](dev, skb);
+ else
+ printk(KERN_INFO "%s no handler opcode 0x%x...\n", __func__,
+ opcode);
+
+ return 0;
+nomem:
+ return -1;
+}
+
+static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
+{
+ struct c4iw_dev *dev = handle;
+
+ PDBG("%s new_state %u\n", __func__, new_state);
+ switch (new_state) {
+ case CXGB4_STATE_UP:
+ printk(KERN_INFO MOD "%s: Up\n", pci_name(dev->rdev.lldi.pdev));
+ if (!dev->registered) {
+ int ret;
+ ret = c4iw_register_device(dev);
+ if (ret)
+ printk(KERN_ERR MOD
+ "%s: RDMA registration failed: %d\n",
+ pci_name(dev->rdev.lldi.pdev), ret);
+ }
+ break;
+ case CXGB4_STATE_DOWN:
+ printk(KERN_INFO MOD "%s: Down\n",
+ pci_name(dev->rdev.lldi.pdev));
+ if (dev->registered)
+ c4iw_unregister_device(dev);
+ break;
+ case CXGB4_STATE_START_RECOVERY:
+ printk(KERN_INFO MOD "%s: Fatal Error\n",
+ pci_name(dev->rdev.lldi.pdev));
+ if (dev->registered)
+ c4iw_unregister_device(dev);
+ break;
+ case CXGB4_STATE_DETACH:
+ printk(KERN_INFO MOD "%s: Detach\n",
+ pci_name(dev->rdev.lldi.pdev));
+ mutex_lock(&dev_mutex);
+ c4iw_remove(dev);
+ mutex_unlock(&dev_mutex);
+ break;
+ }
+ return 0;
+}
+
+static struct cxgb4_uld_info c4iw_uld_info = {
+ .name = DRV_NAME,
+ .add = c4iw_uld_add,
+ .rx_handler = c4iw_uld_rx_handler,
+ .state_change = c4iw_uld_state_change,
+};
+
+static int __init c4iw_init_module(void)
+{
+ int err;
+
+ err = c4iw_cm_init();
+ if (err)
+ return err;
+
+ c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
+ if (!c4iw_debugfs_root)
+ printk(KERN_WARNING MOD
+ "could not create debugfs entry, continuing\n");
+
+ cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
+
+ return 0;
+}
+
+static void __exit c4iw_exit_module(void)
+{
+ struct c4iw_dev *dev, *tmp;
+
+ mutex_lock(&dev_mutex);
+ list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
+ c4iw_remove(dev);
+ }
+ mutex_unlock(&dev_mutex);
+ cxgb4_unregister_uld(CXGB4_ULD_RDMA);
+ c4iw_cm_term();
+ debugfs_remove_recursive(c4iw_debugfs_root);
+}
+
+module_init(c4iw_init_module);
+module_exit(c4iw_exit_module);
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
new file mode 100644
index 00000000000..491e76a0327
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/slab.h>
+#include <linux/mman.h>
+#include <net/sock.h>
+
+#include "iw_cxgb4.h"
+
+static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
+ struct c4iw_qp *qhp,
+ struct t4_cqe *err_cqe,
+ enum ib_event_type ib_event)
+{
+ struct ib_event event;
+ struct c4iw_qp_attributes attrs;
+
+ if ((qhp->attr.state == C4IW_QP_STATE_ERROR) ||
+ (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) {
+ PDBG("%s AE received after RTS - "
+ "qp state %d qpid 0x%x status 0x%x\n", __func__,
+ qhp->attr.state, qhp->wq.sq.qid, CQE_STATUS(err_cqe));
+ return;
+ }
+
+ printk(KERN_ERR MOD "AE qpid 0x%x opcode %d status 0x%x "
+ "type %d wrid.hi 0x%x wrid.lo 0x%x\n",
+ CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
+ CQE_STATUS(err_cqe), CQE_TYPE(err_cqe),
+ CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
+
+ if (qhp->attr.state == C4IW_QP_STATE_RTS) {
+ attrs.next_state = C4IW_QP_STATE_TERMINATE;
+ c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE,
+ &attrs, 1);
+ }
+
+ event.event = ib_event;
+ event.device = chp->ibcq.device;
+ if (ib_event == IB_EVENT_CQ_ERR)
+ event.element.cq = &chp->ibcq;
+ else
+ event.element.qp = &qhp->ibqp;
+ if (qhp->ibqp.event_handler)
+ (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
+
+ (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
+}
+
+void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
+{
+ struct c4iw_cq *chp;
+ struct c4iw_qp *qhp;
+ u32 cqid;
+
+ spin_lock(&dev->lock);
+ qhp = get_qhp(dev, CQE_QPID(err_cqe));
+ if (!qhp) {
+ printk(KERN_ERR MOD "BAD AE qpid 0x%x opcode %d "
+ "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
+ CQE_QPID(err_cqe),
+ CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
+ CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
+ CQE_WRID_LOW(err_cqe));
+ spin_unlock(&dev->lock);
+ goto out;
+ }
+
+ if (SQ_TYPE(err_cqe))
+ cqid = qhp->attr.scq;
+ else
+ cqid = qhp->attr.rcq;
+ chp = get_chp(dev, cqid);
+ if (!chp) {
+ printk(KERN_ERR MOD "BAD AE cqid 0x%x qpid 0x%x opcode %d "
+ "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
+ cqid, CQE_QPID(err_cqe),
+ CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
+ CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
+ CQE_WRID_LOW(err_cqe));
+ spin_unlock(&dev->lock);
+ goto out;
+ }
+
+ c4iw_qp_add_ref(&qhp->ibqp);
+ atomic_inc(&chp->refcnt);
+ spin_unlock(&dev->lock);
+
+ /* Bad incoming write */
+ if (RQ_TYPE(err_cqe) &&
+ (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE)) {
+ post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_REQ_ERR);
+ goto done;
+ }
+
+ switch (CQE_STATUS(err_cqe)) {
+
+ /* Completion Events */
+ case T4_ERR_SUCCESS:
+ printk(KERN_ERR MOD "AE with status 0!\n");
+ break;
+
+ case T4_ERR_STAG:
+ case T4_ERR_PDID:
+ case T4_ERR_QPID:
+ case T4_ERR_ACCESS:
+ case T4_ERR_WRAP:
+ case T4_ERR_BOUND:
+ case T4_ERR_INVALIDATE_SHARED_MR:
+ case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
+ post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_ACCESS_ERR);
+ break;
+
+ /* Device Fatal Errors */
+ case T4_ERR_ECC:
+ case T4_ERR_ECC_PSTAG:
+ case T4_ERR_INTERNAL_ERR:
+ post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_DEVICE_FATAL);
+ break;
+
+ /* QP Fatal Errors */
+ case T4_ERR_OUT_OF_RQE:
+ case T4_ERR_PBL_ADDR_BOUND:
+ case T4_ERR_CRC:
+ case T4_ERR_MARKER:
+ case T4_ERR_PDU_LEN_ERR:
+ case T4_ERR_DDP_VERSION:
+ case T4_ERR_RDMA_VERSION:
+ case T4_ERR_OPCODE:
+ case T4_ERR_DDP_QUEUE_NUM:
+ case T4_ERR_MSN:
+ case T4_ERR_TBIT:
+ case T4_ERR_MO:
+ case T4_ERR_MSN_GAP:
+ case T4_ERR_MSN_RANGE:
+ case T4_ERR_RQE_ADDR_BOUND:
+ case T4_ERR_IRD_OVERFLOW:
+ post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
+ break;
+
+ default:
+ printk(KERN_ERR MOD "Unknown T4 status 0x%x QPID 0x%x\n",
+ CQE_STATUS(err_cqe), qhp->wq.sq.qid);
+ post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
+ break;
+ }
+done:
+ if (atomic_dec_and_test(&chp->refcnt))
+ wake_up(&chp->wait);
+ c4iw_qp_rem_ref(&qhp->ibqp);
+out:
+ return;
+}
+
+int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
+{
+ struct c4iw_cq *chp;
+
+ chp = get_chp(dev, qid);
+ if (chp)
+ (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
+ else
+ PDBG("%s unknown cqid 0x%x\n", __func__, qid);
+ return 0;
+}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
new file mode 100644
index 00000000000..277ab589b44
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -0,0 +1,746 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __IW_CXGB4_H__
+#define __IW_CXGB4_H__
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/idr.h>
+#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/inet.h>
+#include <linux/wait.h>
+#include <linux/kref.h>
+#include <linux/timer.h>
+#include <linux/io.h>
+#include <linux/kfifo.h>
+
+#include <asm/byteorder.h>
+
+#include <net/net_namespace.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/iw_cm.h>
+
+#include "cxgb4.h"
+#include "cxgb4_uld.h"
+#include "l2t.h"
+#include "user.h"
+
+#define DRV_NAME "iw_cxgb4"
+#define MOD DRV_NAME ":"
+
+extern int c4iw_debug;
+#define PDBG(fmt, args...) \
+do { \
+ if (c4iw_debug) \
+ printk(MOD fmt, ## args); \
+} while (0)
+
+#include "t4.h"
+
+#define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->pbl.start)
+#define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->rq.start)
+
+static inline void *cplhdr(struct sk_buff *skb)
+{
+ return skb->data;
+}
+
+#define C4IW_WR_TO (10*HZ)
+
+struct c4iw_wr_wait {
+ wait_queue_head_t wait;
+ int done;
+ int ret;
+};
+
+static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
+{
+ wr_waitp->ret = 0;
+ wr_waitp->done = 0;
+ init_waitqueue_head(&wr_waitp->wait);
+}
+
+struct c4iw_resource {
+ struct kfifo tpt_fifo;
+ spinlock_t tpt_fifo_lock;
+ struct kfifo qid_fifo;
+ spinlock_t qid_fifo_lock;
+ struct kfifo pdid_fifo;
+ spinlock_t pdid_fifo_lock;
+};
+
+struct c4iw_qid_list {
+ struct list_head entry;
+ u32 qid;
+};
+
+struct c4iw_dev_ucontext {
+ struct list_head qpids;
+ struct list_head cqids;
+ struct mutex lock;
+};
+
+enum c4iw_rdev_flags {
+ T4_FATAL_ERROR = (1<<0),
+};
+
+struct c4iw_rdev {
+ struct c4iw_resource resource;
+ unsigned long qpshift;
+ u32 qpmask;
+ unsigned long cqshift;
+ u32 cqmask;
+ struct c4iw_dev_ucontext uctx;
+ struct gen_pool *pbl_pool;
+ struct gen_pool *rqt_pool;
+ u32 flags;
+ struct cxgb4_lld_info lldi;
+};
+
+static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
+{
+ return rdev->flags & T4_FATAL_ERROR;
+}
+
+static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
+{
+ return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5));
+}
+
+struct c4iw_dev {
+ struct ib_device ibdev;
+ struct c4iw_rdev rdev;
+ u32 device_cap_flags;
+ struct idr cqidr;
+ struct idr qpidr;
+ struct idr mmidr;
+ spinlock_t lock;
+ struct list_head entry;
+ struct delayed_work db_drop_task;
+ struct dentry *debugfs_root;
+ u8 registered;
+};
+
+static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct c4iw_dev, ibdev);
+}
+
+static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
+{
+ return container_of(rdev, struct c4iw_dev, rdev);
+}
+
+static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
+{
+ return idr_find(&rhp->cqidr, cqid);
+}
+
+static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
+{
+ return idr_find(&rhp->qpidr, qpid);
+}
+
+static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
+{
+ return idr_find(&rhp->mmidr, mmid);
+}
+
+static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
+ void *handle, u32 id)
+{
+ int ret;
+ int newid;
+
+ do {
+ if (!idr_pre_get(idr, GFP_KERNEL))
+ return -ENOMEM;
+ spin_lock_irq(&rhp->lock);
+ ret = idr_get_new_above(idr, handle, id, &newid);
+ BUG_ON(newid != id);
+ spin_unlock_irq(&rhp->lock);
+ } while (ret == -EAGAIN);
+
+ return ret;
+}
+
+static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
+{
+ spin_lock_irq(&rhp->lock);
+ idr_remove(idr, id);
+ spin_unlock_irq(&rhp->lock);
+}
+
+struct c4iw_pd {
+ struct ib_pd ibpd;
+ u32 pdid;
+ struct c4iw_dev *rhp;
+};
+
+static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct c4iw_pd, ibpd);
+}
+
+struct tpt_attributes {
+ u64 len;
+ u64 va_fbo;
+ enum fw_ri_mem_perms perms;
+ u32 stag;
+ u32 pdid;
+ u32 qpid;
+ u32 pbl_addr;
+ u32 pbl_size;
+ u32 state:1;
+ u32 type:2;
+ u32 rsvd:1;
+ u32 remote_invaliate_disable:1;
+ u32 zbva:1;
+ u32 mw_bind_enable:1;
+ u32 page_size:5;
+};
+
+struct c4iw_mr {
+ struct ib_mr ibmr;
+ struct ib_umem *umem;
+ struct c4iw_dev *rhp;
+ u64 kva;
+ struct tpt_attributes attr;
+};
+
+static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct c4iw_mr, ibmr);
+}
+
+struct c4iw_mw {
+ struct ib_mw ibmw;
+ struct c4iw_dev *rhp;
+ u64 kva;
+ struct tpt_attributes attr;
+};
+
+static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
+{
+ return container_of(ibmw, struct c4iw_mw, ibmw);
+}
+
+struct c4iw_fr_page_list {
+ struct ib_fast_reg_page_list ibpl;
+ DECLARE_PCI_UNMAP_ADDR(mapping);
+ dma_addr_t dma_addr;
+ struct c4iw_dev *dev;
+ int size;
+};
+
+static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list(
+ struct ib_fast_reg_page_list *ibpl)
+{
+ return container_of(ibpl, struct c4iw_fr_page_list, ibpl);
+}
+
+struct c4iw_cq {
+ struct ib_cq ibcq;
+ struct c4iw_dev *rhp;
+ struct t4_cq cq;
+ spinlock_t lock;
+ atomic_t refcnt;
+ wait_queue_head_t wait;
+};
+
+static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct c4iw_cq, ibcq);
+}
+
+struct c4iw_mpa_attributes {
+ u8 initiator;
+ u8 recv_marker_enabled;
+ u8 xmit_marker_enabled;
+ u8 crc_enabled;
+ u8 version;
+ u8 p2p_type;
+};
+
+struct c4iw_qp_attributes {
+ u32 scq;
+ u32 rcq;
+ u32 sq_num_entries;
+ u32 rq_num_entries;
+ u32 sq_max_sges;
+ u32 sq_max_sges_rdma_write;
+ u32 rq_max_sges;
+ u32 state;
+ u8 enable_rdma_read;
+ u8 enable_rdma_write;
+ u8 enable_bind;
+ u8 enable_mmid0_fastreg;
+ u32 max_ord;
+ u32 max_ird;
+ u32 pd;
+ u32 next_state;
+ char terminate_buffer[52];
+ u32 terminate_msg_len;
+ u8 is_terminate_local;
+ struct c4iw_mpa_attributes mpa_attr;
+ struct c4iw_ep *llp_stream_handle;
+};
+
+struct c4iw_qp {
+ struct ib_qp ibqp;
+ struct c4iw_dev *rhp;
+ struct c4iw_ep *ep;
+ struct c4iw_qp_attributes attr;
+ struct t4_wq wq;
+ spinlock_t lock;
+ atomic_t refcnt;
+ wait_queue_head_t wait;
+ struct timer_list timer;
+};
+
+static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct c4iw_qp, ibqp);
+}
+
+struct c4iw_ucontext {
+ struct ib_ucontext ibucontext;
+ struct c4iw_dev_ucontext uctx;
+ u32 key;
+ spinlock_t mmap_lock;
+ struct list_head mmaps;
+};
+
+static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
+{
+ return container_of(c, struct c4iw_ucontext, ibucontext);
+}
+
+struct c4iw_mm_entry {
+ struct list_head entry;
+ u64 addr;
+ u32 key;
+ unsigned len;
+};
+
+static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
+ u32 key, unsigned len)
+{
+ struct list_head *pos, *nxt;
+ struct c4iw_mm_entry *mm;
+
+ spin_lock(&ucontext->mmap_lock);
+ list_for_each_safe(pos, nxt, &ucontext->mmaps) {
+
+ mm = list_entry(pos, struct c4iw_mm_entry, entry);
+ if (mm->key == key && mm->len == len) {
+ list_del_init(&mm->entry);
+ spin_unlock(&ucontext->mmap_lock);
+ PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
+ key, (unsigned long long) mm->addr, mm->len);
+ return mm;
+ }
+ }
+ spin_unlock(&ucontext->mmap_lock);
+ return NULL;
+}
+
+static inline void insert_mmap(struct c4iw_ucontext *ucontext,
+ struct c4iw_mm_entry *mm)
+{
+ spin_lock(&ucontext->mmap_lock);
+ PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
+ mm->key, (unsigned long long) mm->addr, mm->len);
+ list_add_tail(&mm->entry, &ucontext->mmaps);
+ spin_unlock(&ucontext->mmap_lock);
+}
+
+enum c4iw_qp_attr_mask {
+ C4IW_QP_ATTR_NEXT_STATE = 1 << 0,
+ C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
+ C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
+ C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
+ C4IW_QP_ATTR_MAX_ORD = 1 << 11,
+ C4IW_QP_ATTR_MAX_IRD = 1 << 12,
+ C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
+ C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
+ C4IW_QP_ATTR_MPA_ATTR = 1 << 24,
+ C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
+ C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ |
+ C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
+ C4IW_QP_ATTR_MAX_ORD |
+ C4IW_QP_ATTR_MAX_IRD |
+ C4IW_QP_ATTR_LLP_STREAM_HANDLE |
+ C4IW_QP_ATTR_STREAM_MSG_BUFFER |
+ C4IW_QP_ATTR_MPA_ATTR |
+ C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE)
+};
+
+int c4iw_modify_qp(struct c4iw_dev *rhp,
+ struct c4iw_qp *qhp,
+ enum c4iw_qp_attr_mask mask,
+ struct c4iw_qp_attributes *attrs,
+ int internal);
+
+enum c4iw_qp_state {
+ C4IW_QP_STATE_IDLE,
+ C4IW_QP_STATE_RTS,
+ C4IW_QP_STATE_ERROR,
+ C4IW_QP_STATE_TERMINATE,
+ C4IW_QP_STATE_CLOSING,
+ C4IW_QP_STATE_TOT
+};
+
+static inline int c4iw_convert_state(enum ib_qp_state ib_state)
+{
+ switch (ib_state) {
+ case IB_QPS_RESET:
+ case IB_QPS_INIT:
+ return C4IW_QP_STATE_IDLE;
+ case IB_QPS_RTS:
+ return C4IW_QP_STATE_RTS;
+ case IB_QPS_SQD:
+ return C4IW_QP_STATE_CLOSING;
+ case IB_QPS_SQE:
+ return C4IW_QP_STATE_TERMINATE;
+ case IB_QPS_ERR:
+ return C4IW_QP_STATE_ERROR;
+ default:
+ return -1;
+ }
+}
+
+static inline u32 c4iw_ib_to_tpt_access(int a)
+{
+ return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
+ (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) |
+ (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) |
+ FW_RI_MEM_ACCESS_LOCAL_READ;
+}
+
+static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
+{
+ return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
+ (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0);
+}
+
+enum c4iw_mmid_state {
+ C4IW_STAG_STATE_VALID,
+ C4IW_STAG_STATE_INVALID
+};
+
+#define C4IW_NODE_DESC "cxgb4 Chelsio Communications"
+
+#define MPA_KEY_REQ "MPA ID Req Frame"
+#define MPA_KEY_REP "MPA ID Rep Frame"
+
+#define MPA_MAX_PRIVATE_DATA 256
+#define MPA_REJECT 0x20
+#define MPA_CRC 0x40
+#define MPA_MARKERS 0x80
+#define MPA_FLAGS_MASK 0xE0
+
+#define c4iw_put_ep(ep) { \
+ PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \
+ ep, atomic_read(&((ep)->kref.refcount))); \
+ WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \
+ kref_put(&((ep)->kref), _c4iw_free_ep); \
+}
+
+#define c4iw_get_ep(ep) { \
+ PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \
+ ep, atomic_read(&((ep)->kref.refcount))); \
+ kref_get(&((ep)->kref)); \
+}
+void _c4iw_free_ep(struct kref *kref);
+
+struct mpa_message {
+ u8 key[16];
+ u8 flags;
+ u8 revision;
+ __be16 private_data_size;
+ u8 private_data[0];
+};
+
+struct terminate_message {
+ u8 layer_etype;
+ u8 ecode;
+ __be16 hdrct_rsvd;
+ u8 len_hdrs[0];
+};
+
+#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
+
+enum c4iw_layers_types {
+ LAYER_RDMAP = 0x00,
+ LAYER_DDP = 0x10,
+ LAYER_MPA = 0x20,
+ RDMAP_LOCAL_CATA = 0x00,
+ RDMAP_REMOTE_PROT = 0x01,
+ RDMAP_REMOTE_OP = 0x02,
+ DDP_LOCAL_CATA = 0x00,
+ DDP_TAGGED_ERR = 0x01,
+ DDP_UNTAGGED_ERR = 0x02,
+ DDP_LLP = 0x03
+};
+
+enum c4iw_rdma_ecodes {
+ RDMAP_INV_STAG = 0x00,
+ RDMAP_BASE_BOUNDS = 0x01,
+ RDMAP_ACC_VIOL = 0x02,
+ RDMAP_STAG_NOT_ASSOC = 0x03,
+ RDMAP_TO_WRAP = 0x04,
+ RDMAP_INV_VERS = 0x05,
+ RDMAP_INV_OPCODE = 0x06,
+ RDMAP_STREAM_CATA = 0x07,
+ RDMAP_GLOBAL_CATA = 0x08,
+ RDMAP_CANT_INV_STAG = 0x09,
+ RDMAP_UNSPECIFIED = 0xff
+};
+
+enum c4iw_ddp_ecodes {
+ DDPT_INV_STAG = 0x00,
+ DDPT_BASE_BOUNDS = 0x01,
+ DDPT_STAG_NOT_ASSOC = 0x02,
+ DDPT_TO_WRAP = 0x03,
+ DDPT_INV_VERS = 0x04,
+ DDPU_INV_QN = 0x01,
+ DDPU_INV_MSN_NOBUF = 0x02,
+ DDPU_INV_MSN_RANGE = 0x03,
+ DDPU_INV_MO = 0x04,
+ DDPU_MSG_TOOBIG = 0x05,
+ DDPU_INV_VERS = 0x06
+};
+
+enum c4iw_mpa_ecodes {
+ MPA_CRC_ERR = 0x02,
+ MPA_MARKER_ERR = 0x03
+};
+
+enum c4iw_ep_state {
+ IDLE = 0,
+ LISTEN,
+ CONNECTING,
+ MPA_REQ_WAIT,
+ MPA_REQ_SENT,
+ MPA_REQ_RCVD,
+ MPA_REP_SENT,
+ FPDU_MODE,
+ ABORTING,
+ CLOSING,
+ MORIBUND,
+ DEAD,
+};
+
+enum c4iw_ep_flags {
+ PEER_ABORT_IN_PROGRESS = 0,
+ ABORT_REQ_IN_PROGRESS = 1,
+ RELEASE_RESOURCES = 2,
+ CLOSE_SENT = 3,
+};
+
+struct c4iw_ep_common {
+ struct iw_cm_id *cm_id;
+ struct c4iw_qp *qp;
+ struct c4iw_dev *dev;
+ enum c4iw_ep_state state;
+ struct kref kref;
+ spinlock_t lock;
+ struct sockaddr_in local_addr;
+ struct sockaddr_in remote_addr;
+ wait_queue_head_t waitq;
+ int rpl_done;
+ int rpl_err;
+ unsigned long flags;
+};
+
+struct c4iw_listen_ep {
+ struct c4iw_ep_common com;
+ unsigned int stid;
+ int backlog;
+};
+
+struct c4iw_ep {
+ struct c4iw_ep_common com;
+ struct c4iw_ep *parent_ep;
+ struct timer_list timer;
+ struct list_head entry;
+ unsigned int atid;
+ u32 hwtid;
+ u32 snd_seq;
+ u32 rcv_seq;
+ struct l2t_entry *l2t;
+ struct dst_entry *dst;
+ struct sk_buff *mpa_skb;
+ struct c4iw_mpa_attributes mpa_attr;
+ u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
+ unsigned int mpa_pkt_len;
+ u32 ird;
+ u32 ord;
+ u32 smac_idx;
+ u32 tx_chan;
+ u32 mtu;
+ u16 mss;
+ u16 emss;
+ u16 plen;
+ u16 rss_qid;
+ u16 txq_idx;
+ u8 tos;
+};
+
+static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
+{
+ return cm_id->provider_data;
+}
+
+static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
+{
+ return cm_id->provider_data;
+}
+
+static inline int compute_wscale(int win)
+{
+ int wscale = 0;
+
+ while (wscale < 14 && (65535<<wscale) < win)
+ wscale++;
+ return wscale;
+}
+
+typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb);
+
+int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
+ struct l2t_entry *l2t);
+void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid,
+ struct c4iw_dev_ucontext *uctx);
+u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock);
+void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock);
+int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
+int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
+int c4iw_pblpool_create(struct c4iw_rdev *rdev);
+int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
+void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
+void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
+void c4iw_destroy_resource(struct c4iw_resource *rscp);
+int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
+int c4iw_register_device(struct c4iw_dev *dev);
+void c4iw_unregister_device(struct c4iw_dev *dev);
+int __init c4iw_cm_init(void);
+void __exit c4iw_cm_term(void);
+void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
+ struct c4iw_dev_ucontext *uctx);
+void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
+ struct c4iw_dev_ucontext *uctx);
+int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr);
+int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
+ struct ib_mw_bind *mw_bind);
+int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
+int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
+int c4iw_destroy_listen(struct iw_cm_id *cm_id);
+int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
+int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
+void c4iw_qp_add_ref(struct ib_qp *qp);
+void c4iw_qp_rem_ref(struct ib_qp *qp);
+void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list);
+struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(
+ struct ib_device *device,
+ int page_list_len);
+struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth);
+int c4iw_dealloc_mw(struct ib_mw *mw);
+struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd);
+struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
+ u64 length, u64 virt, int acc,
+ struct ib_udata *udata);
+struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
+struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
+ struct ib_phys_buf *buffer_list,
+ int num_phys_buf,
+ int acc,
+ u64 *iova_start);
+int c4iw_reregister_phys_mem(struct ib_mr *mr,
+ int mr_rereg_mask,
+ struct ib_pd *pd,
+ struct ib_phys_buf *buffer_list,
+ int num_phys_buf,
+ int acc, u64 *iova_start);
+int c4iw_dereg_mr(struct ib_mr *ib_mr);
+int c4iw_destroy_cq(struct ib_cq *ib_cq);
+struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
+ int vector,
+ struct ib_ucontext *ib_context,
+ struct ib_udata *udata);
+int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
+int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+int c4iw_destroy_qp(struct ib_qp *ib_qp);
+struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata);
+int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
+u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
+void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
+u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
+void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
+int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
+void c4iw_flush_hw_cq(struct t4_cq *cq);
+void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
+void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
+int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
+int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
+int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count);
+int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
+u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
+int c4iw_post_zb_read(struct c4iw_qp *qhp);
+int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
+u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
+void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
+ struct c4iw_dev_ucontext *uctx);
+u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
+void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
+ struct c4iw_dev_ucontext *uctx);
+void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
+
+extern struct cxgb4_client t4c_client;
+extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
+extern int c4iw_max_read_depth;
+
+#endif
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
new file mode 100644
index 00000000000..7f94da1a243
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -0,0 +1,812 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_umem.h>
+#include <asm/atomic.h>
+
+#include "iw_cxgb4.h"
+
+#define T4_ULPTX_MIN_IO 32
+#define C4IW_MAX_INLINE_SIZE 96
+
+static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
+ void *data)
+{
+ struct sk_buff *skb;
+ struct ulp_mem_io *req;
+ struct ulptx_idata *sc;
+ u8 wr_len, *to_dp, *from_dp;
+ int copy_len, num_wqe, i, ret = 0;
+ struct c4iw_wr_wait wr_wait;
+
+ addr &= 0x7FFFFFF;
+ PDBG("%s addr 0x%x len %u\n", __func__, addr, len);
+ num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
+ c4iw_init_wr_wait(&wr_wait);
+ for (i = 0; i < num_wqe; i++) {
+
+ copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE :
+ len;
+ wr_len = roundup(sizeof *req + sizeof *sc +
+ roundup(copy_len, T4_ULPTX_MIN_IO), 16);
+
+ skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb)
+ return -ENOMEM;
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
+
+ req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
+ memset(req, 0, wr_len);
+ INIT_ULPTX_WR(req, wr_len, 0, 0);
+
+ if (i == (num_wqe-1)) {
+ req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
+ FW_WR_COMPL(1));
+ req->wr.wr_lo = (__force __be64)&wr_wait;
+ } else
+ req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR));
+ req->wr.wr_mid = cpu_to_be32(
+ FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
+
+ req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE) | (1<<23));
+ req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(
+ DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
+ req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
+ 16));
+ req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr + i * 3));
+
+ sc = (struct ulptx_idata *)(req + 1);
+ sc->cmd_more = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_IMM));
+ sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
+
+ to_dp = (u8 *)(sc + 1);
+ from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
+ if (data)
+ memcpy(to_dp, from_dp, copy_len);
+ else
+ memset(to_dp, 0, copy_len);
+ if (copy_len % T4_ULPTX_MIN_IO)
+ memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
+ (copy_len % T4_ULPTX_MIN_IO));
+ ret = c4iw_ofld_send(rdev, skb);
+ if (ret)
+ return ret;
+ len -= C4IW_MAX_INLINE_SIZE;
+ }
+
+ wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
+ if (!wr_wait.done) {
+ printk(KERN_ERR MOD "Device %s not responding!\n",
+ pci_name(rdev->lldi.pdev));
+ rdev->flags = T4_FATAL_ERROR;
+ ret = -EIO;
+ } else
+ ret = wr_wait.ret;
+ return ret;
+}
+
+/*
+ * Build and write a TPT entry.
+ * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
+ * pbl_size and pbl_addr
+ * OUT: stag index
+ */
+static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
+ u32 *stag, u8 stag_state, u32 pdid,
+ enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
+ int bind_enabled, u32 zbva, u64 to,
+ u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr)
+{
+ int err;
+ struct fw_ri_tpte tpt;
+ u32 stag_idx;
+ static atomic_t key;
+
+ if (c4iw_fatal_error(rdev))
+ return -EIO;
+
+ stag_state = stag_state > 0;
+ stag_idx = (*stag) >> 8;
+
+ if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
+ stag_idx = c4iw_get_resource(&rdev->resource.tpt_fifo,
+ &rdev->resource.tpt_fifo_lock);
+ if (!stag_idx)
+ return -ENOMEM;
+ *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
+ }
+ PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
+ __func__, stag_state, type, pdid, stag_idx);
+
+ /* write TPT entry */
+ if (reset_tpt_entry)
+ memset(&tpt, 0, sizeof(tpt));
+ else {
+ tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID |
+ V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) |
+ V_FW_RI_TPTE_STAGSTATE(stag_state) |
+ V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid));
+ tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) |
+ (bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) |
+ V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO :
+ FW_RI_VA_BASED_TO))|
+ V_FW_RI_TPTE_PS(page_size));
+ tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
+ V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3));
+ tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
+ tpt.va_hi = cpu_to_be32((u32)(to >> 32));
+ tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
+ tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
+ tpt.len_hi = cpu_to_be32((u32)(len >> 32));
+ }
+ err = write_adapter_mem(rdev, stag_idx +
+ (rdev->lldi.vr->stag.start >> 5),
+ sizeof(tpt), &tpt);
+
+ if (reset_tpt_entry)
+ c4iw_put_resource(&rdev->resource.tpt_fifo, stag_idx,
+ &rdev->resource.tpt_fifo_lock);
+ return err;
+}
+
+static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
+ u32 pbl_addr, u32 pbl_size)
+{
+ int err;
+
+ PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
+ __func__, pbl_addr, rdev->lldi.vr->pbl.start,
+ pbl_size);
+
+ err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl);
+ return err;
+}
+
+static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
+ u32 pbl_addr)
+{
+ return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
+ pbl_size, pbl_addr);
+}
+
+static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
+{
+ *stag = T4_STAG_UNSET;
+ return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
+ 0UL, 0, 0, 0, 0);
+}
+
+static int deallocate_window(struct c4iw_rdev *rdev, u32 stag)
+{
+ return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
+ 0);
+}
+
+static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
+ u32 pbl_size, u32 pbl_addr)
+{
+ *stag = T4_STAG_UNSET;
+ return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
+ 0UL, 0, 0, pbl_size, pbl_addr);
+}
+
+static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
+{
+ u32 mmid;
+
+ mhp->attr.state = 1;
+ mhp->attr.stag = stag;
+ mmid = stag >> 8;
+ mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
+ PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
+ return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
+}
+
+static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
+ struct c4iw_mr *mhp, int shift)
+{
+ u32 stag = T4_STAG_UNSET;
+ int ret;
+
+ ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
+ FW_RI_STAG_NSMR, mhp->attr.perms,
+ mhp->attr.mw_bind_enable, mhp->attr.zbva,
+ mhp->attr.va_fbo, mhp->attr.len, shift - 12,
+ mhp->attr.pbl_size, mhp->attr.pbl_addr);
+ if (ret)
+ return ret;
+
+ ret = finish_mem_reg(mhp, stag);
+ if (ret)
+ dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
+ mhp->attr.pbl_addr);
+ return ret;
+}
+
+static int reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
+ struct c4iw_mr *mhp, int shift, int npages)
+{
+ u32 stag;
+ int ret;
+
+ if (npages > mhp->attr.pbl_size)
+ return -ENOMEM;
+
+ stag = mhp->attr.stag;
+ ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
+ FW_RI_STAG_NSMR, mhp->attr.perms,
+ mhp->attr.mw_bind_enable, mhp->attr.zbva,
+ mhp->attr.va_fbo, mhp->attr.len, shift - 12,
+ mhp->attr.pbl_size, mhp->attr.pbl_addr);
+ if (ret)
+ return ret;
+
+ ret = finish_mem_reg(mhp, stag);
+ if (ret)
+ dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
+ mhp->attr.pbl_addr);
+
+ return ret;
+}
+
+static int alloc_pbl(struct c4iw_mr *mhp, int npages)
+{
+ mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
+ npages << 3);
+
+ if (!mhp->attr.pbl_addr)
+ return -ENOMEM;
+
+ mhp->attr.pbl_size = npages;
+
+ return 0;
+}
+
+static int build_phys_page_list(struct ib_phys_buf *buffer_list,
+ int num_phys_buf, u64 *iova_start,
+ u64 *total_size, int *npages,
+ int *shift, __be64 **page_list)
+{
+ u64 mask;
+ int i, j, n;
+
+ mask = 0;
+ *total_size = 0;
+ for (i = 0; i < num_phys_buf; ++i) {
+ if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
+ return -EINVAL;
+ if (i != 0 && i != num_phys_buf - 1 &&
+ (buffer_list[i].size & ~PAGE_MASK))
+ return -EINVAL;
+ *total_size += buffer_list[i].size;
+ if (i > 0)
+ mask |= buffer_list[i].addr;
+ else
+ mask |= buffer_list[i].addr & PAGE_MASK;
+ if (i != num_phys_buf - 1)
+ mask |= buffer_list[i].addr + buffer_list[i].size;
+ else
+ mask |= (buffer_list[i].addr + buffer_list[i].size +
+ PAGE_SIZE - 1) & PAGE_MASK;
+ }
+
+ if (*total_size > 0xFFFFFFFFULL)
+ return -ENOMEM;
+
+ /* Find largest page shift we can use to cover buffers */
+ for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
+ if ((1ULL << *shift) & mask)
+ break;
+
+ buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
+ buffer_list[0].addr &= ~0ull << *shift;
+
+ *npages = 0;
+ for (i = 0; i < num_phys_buf; ++i)
+ *npages += (buffer_list[i].size +
+ (1ULL << *shift) - 1) >> *shift;
+
+ if (!*npages)
+ return -EINVAL;
+
+ *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
+ if (!*page_list)
+ return -ENOMEM;
+
+ n = 0;
+ for (i = 0; i < num_phys_buf; ++i)
+ for (j = 0;
+ j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
+ ++j)
+ (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
+ ((u64) j << *shift));
+
+ PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
+ __func__, (unsigned long long)*iova_start,
+ (unsigned long long)mask, *shift, (unsigned long long)*total_size,
+ *npages);
+
+ return 0;
+
+}
+
+int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask,
+ struct ib_pd *pd, struct ib_phys_buf *buffer_list,
+ int num_phys_buf, int acc, u64 *iova_start)
+{
+
+ struct c4iw_mr mh, *mhp;
+ struct c4iw_pd *php;
+ struct c4iw_dev *rhp;
+ __be64 *page_list = NULL;
+ int shift = 0;
+ u64 total_size;
+ int npages;
+ int ret;
+
+ PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
+
+ /* There can be no memory windows */
+ if (atomic_read(&mr->usecnt))
+ return -EINVAL;
+
+ mhp = to_c4iw_mr(mr);
+ rhp = mhp->rhp;
+ php = to_c4iw_pd(mr->pd);
+
+ /* make sure we are on the same adapter */
+ if (rhp != php->rhp)
+ return -EINVAL;
+
+ memcpy(&mh, mhp, sizeof *mhp);
+
+ if (mr_rereg_mask & IB_MR_REREG_PD)
+ php = to_c4iw_pd(pd);
+ if (mr_rereg_mask & IB_MR_REREG_ACCESS) {
+ mh.attr.perms = c4iw_ib_to_tpt_access(acc);
+ mh.attr.mw_bind_enable = (acc & IB_ACCESS_MW_BIND) ==
+ IB_ACCESS_MW_BIND;
+ }
+ if (mr_rereg_mask & IB_MR_REREG_TRANS) {
+ ret = build_phys_page_list(buffer_list, num_phys_buf,
+ iova_start,
+ &total_size, &npages,
+ &shift, &page_list);
+ if (ret)
+ return ret;
+ }
+
+ ret = reregister_mem(rhp, php, &mh, shift, npages);
+ kfree(page_list);
+ if (ret)
+ return ret;
+ if (mr_rereg_mask & IB_MR_REREG_PD)
+ mhp->attr.pdid = php->pdid;
+ if (mr_rereg_mask & IB_MR_REREG_ACCESS)
+ mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
+ if (mr_rereg_mask & IB_MR_REREG_TRANS) {
+ mhp->attr.zbva = 0;
+ mhp->attr.va_fbo = *iova_start;
+ mhp->attr.page_size = shift - 12;
+ mhp->attr.len = (u32) total_size;
+ mhp->attr.pbl_size = npages;
+ }
+
+ return 0;
+}
+
+struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
+ struct ib_phys_buf *buffer_list,
+ int num_phys_buf, int acc, u64 *iova_start)
+{
+ __be64 *page_list;
+ int shift;
+ u64 total_size;
+ int npages;
+ struct c4iw_dev *rhp;
+ struct c4iw_pd *php;
+ struct c4iw_mr *mhp;
+ int ret;
+
+ PDBG("%s ib_pd %p\n", __func__, pd);
+ php = to_c4iw_pd(pd);
+ rhp = php->rhp;
+
+ mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
+ if (!mhp)
+ return ERR_PTR(-ENOMEM);
+
+ mhp->rhp = rhp;
+
+ /* First check that we have enough alignment */
+ if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (num_phys_buf > 1 &&
+ ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
+ &total_size, &npages, &shift,
+ &page_list);
+ if (ret)
+ goto err;
+
+ ret = alloc_pbl(mhp, npages);
+ if (ret) {
+ kfree(page_list);
+ goto err_pbl;
+ }
+
+ ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr,
+ npages);
+ kfree(page_list);
+ if (ret)
+ goto err_pbl;
+
+ mhp->attr.pdid = php->pdid;
+ mhp->attr.zbva = 0;
+
+ mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
+ mhp->attr.va_fbo = *iova_start;
+ mhp->attr.page_size = shift - 12;
+
+ mhp->attr.len = (u32) total_size;
+ mhp->attr.pbl_size = npages;
+ ret = register_mem(rhp, php, mhp, shift);
+ if (ret)
+ goto err_pbl;
+
+ return &mhp->ibmr;
+
+err_pbl:
+ c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
+ mhp->attr.pbl_size << 3);
+
+err:
+ kfree(mhp);
+ return ERR_PTR(ret);
+
+}
+
+struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_pd *php;
+ struct c4iw_mr *mhp;
+ int ret;
+ u32 stag = T4_STAG_UNSET;
+
+ PDBG("%s ib_pd %p\n", __func__, pd);
+ php = to_c4iw_pd(pd);
+ rhp = php->rhp;
+
+ mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
+ if (!mhp)
+ return ERR_PTR(-ENOMEM);
+
+ mhp->rhp = rhp;
+ mhp->attr.pdid = php->pdid;
+ mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
+ mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
+ mhp->attr.zbva = 0;
+ mhp->attr.va_fbo = 0;
+ mhp->attr.page_size = 0;
+ mhp->attr.len = ~0UL;
+ mhp->attr.pbl_size = 0;
+
+ ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
+ FW_RI_STAG_NSMR, mhp->attr.perms,
+ mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0);
+ if (ret)
+ goto err1;
+
+ ret = finish_mem_reg(mhp, stag);
+ if (ret)
+ goto err2;
+ return &mhp->ibmr;
+err2:
+ dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
+ mhp->attr.pbl_addr);
+err1:
+ kfree(mhp);
+ return ERR_PTR(ret);
+}
+
+struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt, int acc, struct ib_udata *udata)
+{
+ __be64 *pages;
+ int shift, n, len;
+ int i, j, k;
+ int err = 0;
+ struct ib_umem_chunk *chunk;
+ struct c4iw_dev *rhp;
+ struct c4iw_pd *php;
+ struct c4iw_mr *mhp;
+
+ PDBG("%s ib_pd %p\n", __func__, pd);
+
+ if (length == ~0ULL)
+ return ERR_PTR(-EINVAL);
+
+ if ((length + start) < start)
+ return ERR_PTR(-EINVAL);
+
+ php = to_c4iw_pd(pd);
+ rhp = php->rhp;
+ mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
+ if (!mhp)
+ return ERR_PTR(-ENOMEM);
+
+ mhp->rhp = rhp;
+
+ mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
+ if (IS_ERR(mhp->umem)) {
+ err = PTR_ERR(mhp->umem);
+ kfree(mhp);
+ return ERR_PTR(err);
+ }
+
+ shift = ffs(mhp->umem->page_size) - 1;
+
+ n = 0;
+ list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
+ n += chunk->nents;
+
+ err = alloc_pbl(mhp, n);
+ if (err)
+ goto err;
+
+ pages = (__be64 *) __get_free_page(GFP_KERNEL);
+ if (!pages) {
+ err = -ENOMEM;
+ goto err_pbl;
+ }
+
+ i = n = 0;
+
+ list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
+ for (j = 0; j < chunk->nmap; ++j) {
+ len = sg_dma_len(&chunk->page_list[j]) >> shift;
+ for (k = 0; k < len; ++k) {
+ pages[i++] = cpu_to_be64(sg_dma_address(
+ &chunk->page_list[j]) +
+ mhp->umem->page_size * k);
+ if (i == PAGE_SIZE / sizeof *pages) {
+ err = write_pbl(&mhp->rhp->rdev,
+ pages,
+ mhp->attr.pbl_addr + (n << 3), i);
+ if (err)
+ goto pbl_done;
+ n += i;
+ i = 0;
+ }
+ }
+ }
+
+ if (i)
+ err = write_pbl(&mhp->rhp->rdev, pages,
+ mhp->attr.pbl_addr + (n << 3), i);
+
+pbl_done:
+ free_page((unsigned long) pages);
+ if (err)
+ goto err_pbl;
+
+ mhp->attr.pdid = php->pdid;
+ mhp->attr.zbva = 0;
+ mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
+ mhp->attr.va_fbo = virt;
+ mhp->attr.page_size = shift - 12;
+ mhp->attr.len = (u32) length;
+
+ err = register_mem(rhp, php, mhp, shift);
+ if (err)
+ goto err_pbl;
+
+ return &mhp->ibmr;
+
+err_pbl:
+ c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
+ mhp->attr.pbl_size << 3);
+
+err:
+ ib_umem_release(mhp->umem);
+ kfree(mhp);
+ return ERR_PTR(err);
+}
+
+struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_pd *php;
+ struct c4iw_mw *mhp;
+ u32 mmid;
+ u32 stag = 0;
+ int ret;
+
+ php = to_c4iw_pd(pd);
+ rhp = php->rhp;
+ mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
+ if (!mhp)
+ return ERR_PTR(-ENOMEM);
+ ret = allocate_window(&rhp->rdev, &stag, php->pdid);
+ if (ret) {
+ kfree(mhp);
+ return ERR_PTR(ret);
+ }
+ mhp->rhp = rhp;
+ mhp->attr.pdid = php->pdid;
+ mhp->attr.type = FW_RI_STAG_MW;
+ mhp->attr.stag = stag;
+ mmid = (stag) >> 8;
+ mhp->ibmw.rkey = stag;
+ if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
+ deallocate_window(&rhp->rdev, mhp->attr.stag);
+ kfree(mhp);
+ return ERR_PTR(-ENOMEM);
+ }
+ PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
+ return &(mhp->ibmw);
+}
+
+int c4iw_dealloc_mw(struct ib_mw *mw)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_mw *mhp;
+ u32 mmid;
+
+ mhp = to_c4iw_mw(mw);
+ rhp = mhp->rhp;
+ mmid = (mw->rkey) >> 8;
+ deallocate_window(&rhp->rdev, mhp->attr.stag);
+ remove_handle(rhp, &rhp->mmidr, mmid);
+ kfree(mhp);
+ PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
+ return 0;
+}
+
+struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_pd *php;
+ struct c4iw_mr *mhp;
+ u32 mmid;
+ u32 stag = 0;
+ int ret = 0;
+
+ php = to_c4iw_pd(pd);
+ rhp = php->rhp;
+ mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
+ if (!mhp) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ mhp->rhp = rhp;
+ ret = alloc_pbl(mhp, pbl_depth);
+ if (ret)
+ goto err1;
+ mhp->attr.pbl_size = pbl_depth;
+ ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
+ mhp->attr.pbl_size, mhp->attr.pbl_addr);
+ if (ret)
+ goto err2;
+ mhp->attr.pdid = php->pdid;
+ mhp->attr.type = FW_RI_STAG_NSMR;
+ mhp->attr.stag = stag;
+ mhp->attr.state = 1;
+ mmid = (stag) >> 8;
+ mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
+ if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
+ ret = -ENOMEM;
+ goto err3;
+ }
+
+ PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
+ return &(mhp->ibmr);
+err3:
+ dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
+ mhp->attr.pbl_addr);
+err2:
+ c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
+ mhp->attr.pbl_size << 3);
+err1:
+ kfree(mhp);
+err:
+ return ERR_PTR(ret);
+}
+
+struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
+ int page_list_len)
+{
+ struct c4iw_fr_page_list *c4pl;
+ struct c4iw_dev *dev = to_c4iw_dev(device);
+ dma_addr_t dma_addr;
+ int size = sizeof *c4pl + page_list_len * sizeof(u64);
+
+ c4pl = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev, size,
+ &dma_addr, GFP_KERNEL);
+ if (!c4pl)
+ return ERR_PTR(-ENOMEM);
+
+ pci_unmap_addr_set(c4pl, mapping, dma_addr);
+ c4pl->dma_addr = dma_addr;
+ c4pl->dev = dev;
+ c4pl->size = size;
+ c4pl->ibpl.page_list = (u64 *)(c4pl + 1);
+ c4pl->ibpl.max_page_list_len = page_list_len;
+
+ return &c4pl->ibpl;
+}
+
+void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
+{
+ struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
+
+ dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->size,
+ c4pl, pci_unmap_addr(c4pl, mapping));
+}
+
+int c4iw_dereg_mr(struct ib_mr *ib_mr)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_mr *mhp;
+ u32 mmid;
+
+ PDBG("%s ib_mr %p\n", __func__, ib_mr);
+ /* There can be no memory windows */
+ if (atomic_read(&ib_mr->usecnt))
+ return -EINVAL;
+
+ mhp = to_c4iw_mr(ib_mr);
+ rhp = mhp->rhp;
+ mmid = mhp->attr.stag >> 8;
+ dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
+ mhp->attr.pbl_addr);
+ if (mhp->attr.pbl_size)
+ c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
+ mhp->attr.pbl_size << 3);
+ remove_handle(rhp, &rhp->mmidr, mmid);
+ if (mhp->kva)
+ kfree((void *) (unsigned long) mhp->kva);
+ if (mhp->umem)
+ ib_umem_release(mhp->umem);
+ PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
+ kfree(mhp);
+ return 0;
+}
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
new file mode 100644
index 00000000000..8f645c83a12
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -0,0 +1,520 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/rtnetlink.h>
+#include <linux/inetdevice.h>
+#include <linux/io.h>
+
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+
+#include <rdma/iw_cm.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "iw_cxgb4.h"
+
+static int fastreg_support;
+module_param(fastreg_support, int, 0644);
+MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=0)");
+
+static int c4iw_modify_port(struct ib_device *ibdev,
+ u8 port, int port_modify_mask,
+ struct ib_port_modify *props)
+{
+ return -ENOSYS;
+}
+
+static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
+ struct ib_ah_attr *ah_attr)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static int c4iw_ah_destroy(struct ib_ah *ah)
+{
+ return -ENOSYS;
+}
+
+static int c4iw_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ return -ENOSYS;
+}
+
+static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ return -ENOSYS;
+}
+
+static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
+ u8 port_num, struct ib_wc *in_wc,
+ struct ib_grh *in_grh, struct ib_mad *in_mad,
+ struct ib_mad *out_mad)
+{
+ return -ENOSYS;
+}
+
+static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
+{
+ struct c4iw_dev *rhp = to_c4iw_dev(context->device);
+ struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
+ struct c4iw_mm_entry *mm, *tmp;
+
+ PDBG("%s context %p\n", __func__, context);
+ list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
+ kfree(mm);
+ c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
+ kfree(ucontext);
+ return 0;
+}
+
+static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata)
+{
+ struct c4iw_ucontext *context;
+ struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
+
+ PDBG("%s ibdev %p\n", __func__, ibdev);
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return ERR_PTR(-ENOMEM);
+ c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
+ INIT_LIST_HEAD(&context->mmaps);
+ spin_lock_init(&context->mmap_lock);
+ return &context->ibucontext;
+}
+
+static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+ int len = vma->vm_end - vma->vm_start;
+ u32 key = vma->vm_pgoff << PAGE_SHIFT;
+ struct c4iw_rdev *rdev;
+ int ret = 0;
+ struct c4iw_mm_entry *mm;
+ struct c4iw_ucontext *ucontext;
+ u64 addr;
+
+ PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
+ key, len);
+
+ if (vma->vm_start & (PAGE_SIZE-1))
+ return -EINVAL;
+
+ rdev = &(to_c4iw_dev(context->device)->rdev);
+ ucontext = to_c4iw_ucontext(context);
+
+ mm = remove_mmap(ucontext, key, len);
+ if (!mm)
+ return -EINVAL;
+ addr = mm->addr;
+ kfree(mm);
+
+ if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) &&
+ (addr < (pci_resource_start(rdev->lldi.pdev, 2) +
+ pci_resource_len(rdev->lldi.pdev, 2)))) {
+
+ /*
+ * Map T4 DB register.
+ */
+ if (vma->vm_flags & VM_READ)
+ return -EPERM;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+ vma->vm_flags &= ~VM_MAYREAD;
+ ret = io_remap_pfn_range(vma, vma->vm_start,
+ addr >> PAGE_SHIFT,
+ len, vma->vm_page_prot);
+ } else {
+
+ /*
+ * Map WQ or CQ contig dma memory...
+ */
+ ret = remap_pfn_range(vma, vma->vm_start,
+ addr >> PAGE_SHIFT,
+ len, vma->vm_page_prot);
+ }
+
+ return ret;
+}
+
+static int c4iw_deallocate_pd(struct ib_pd *pd)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_pd *php;
+
+ php = to_c4iw_pd(pd);
+ rhp = php->rhp;
+ PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
+ c4iw_put_resource(&rhp->rdev.resource.pdid_fifo, php->pdid,
+ &rhp->rdev.resource.pdid_fifo_lock);
+ kfree(php);
+ return 0;
+}
+
+static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct c4iw_pd *php;
+ u32 pdid;
+ struct c4iw_dev *rhp;
+
+ PDBG("%s ibdev %p\n", __func__, ibdev);
+ rhp = (struct c4iw_dev *) ibdev;
+ pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_fifo,
+ &rhp->rdev.resource.pdid_fifo_lock);
+ if (!pdid)
+ return ERR_PTR(-EINVAL);
+ php = kzalloc(sizeof(*php), GFP_KERNEL);
+ if (!php) {
+ c4iw_put_resource(&rhp->rdev.resource.pdid_fifo, pdid,
+ &rhp->rdev.resource.pdid_fifo_lock);
+ return ERR_PTR(-ENOMEM);
+ }
+ php->pdid = pdid;
+ php->rhp = rhp;
+ if (context) {
+ if (ib_copy_to_udata(udata, &php->pdid, sizeof(u32))) {
+ c4iw_deallocate_pd(&php->ibpd);
+ return ERR_PTR(-EFAULT);
+ }
+ }
+ PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
+ return &php->ibpd;
+}
+
+static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey)
+{
+ PDBG("%s ibdev %p\n", __func__, ibdev);
+ *pkey = 0;
+ return 0;
+}
+
+static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid)
+{
+ struct c4iw_dev *dev;
+
+ PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
+ __func__, ibdev, port, index, gid);
+ dev = to_c4iw_dev(ibdev);
+ BUG_ON(port == 0);
+ memset(&(gid->raw[0]), 0, sizeof(gid->raw));
+ memcpy(&(gid->raw[0]), dev->rdev.lldi.ports[port-1]->dev_addr, 6);
+ return 0;
+}
+
+static int c4iw_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props)
+{
+
+ struct c4iw_dev *dev;
+ PDBG("%s ibdev %p\n", __func__, ibdev);
+
+ dev = to_c4iw_dev(ibdev);
+ memset(props, 0, sizeof *props);
+ memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
+ props->hw_ver = dev->rdev.lldi.adapter_type;
+ props->fw_ver = dev->rdev.lldi.fw_vers;
+ props->device_cap_flags = dev->device_cap_flags;
+ props->page_size_cap = T4_PAGESIZE_MASK;
+ props->vendor_id = (u32)dev->rdev.lldi.pdev->vendor;
+ props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device;
+ props->max_mr_size = T4_MAX_MR_SIZE;
+ props->max_qp = T4_MAX_NUM_QP;
+ props->max_qp_wr = T4_MAX_QP_DEPTH;
+ props->max_sge = T4_MAX_RECV_SGE;
+ props->max_sge_rd = 1;
+ props->max_qp_rd_atom = c4iw_max_read_depth;
+ props->max_qp_init_rd_atom = c4iw_max_read_depth;
+ props->max_cq = T4_MAX_NUM_CQ;
+ props->max_cqe = T4_MAX_CQ_DEPTH;
+ props->max_mr = c4iw_num_stags(&dev->rdev);
+ props->max_pd = T4_MAX_NUM_PD;
+ props->local_ca_ack_delay = 0;
+ props->max_fast_reg_page_list_len = T4_MAX_FR_DEPTH;
+
+ return 0;
+}
+
+static int c4iw_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props)
+{
+ struct c4iw_dev *dev;
+ struct net_device *netdev;
+ struct in_device *inetdev;
+
+ PDBG("%s ibdev %p\n", __func__, ibdev);
+
+ dev = to_c4iw_dev(ibdev);
+ netdev = dev->rdev.lldi.ports[port-1];
+
+ memset(props, 0, sizeof(struct ib_port_attr));
+ props->max_mtu = IB_MTU_4096;
+ if (netdev->mtu >= 4096)
+ props->active_mtu = IB_MTU_4096;
+ else if (netdev->mtu >= 2048)
+ props->active_mtu = IB_MTU_2048;
+ else if (netdev->mtu >= 1024)
+ props->active_mtu = IB_MTU_1024;
+ else if (netdev->mtu >= 512)
+ props->active_mtu = IB_MTU_512;
+ else
+ props->active_mtu = IB_MTU_256;
+
+ if (!netif_carrier_ok(netdev))
+ props->state = IB_PORT_DOWN;
+ else {
+ inetdev = in_dev_get(netdev);
+ if (inetdev) {
+ if (inetdev->ifa_list)
+ props->state = IB_PORT_ACTIVE;
+ else
+ props->state = IB_PORT_INIT;
+ in_dev_put(inetdev);
+ } else
+ props->state = IB_PORT_INIT;
+ }
+
+ props->port_cap_flags =
+ IB_PORT_CM_SUP |
+ IB_PORT_SNMP_TUNNEL_SUP |
+ IB_PORT_REINIT_SUP |
+ IB_PORT_DEVICE_MGMT_SUP |
+ IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
+ props->gid_tbl_len = 1;
+ props->pkey_tbl_len = 1;
+ props->active_width = 2;
+ props->active_speed = 2;
+ props->max_msg_sz = -1;
+
+ return 0;
+}
+
+static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
+ ibdev.dev);
+ PDBG("%s dev 0x%p\n", __func__, dev);
+ return sprintf(buf, "%d\n", c4iw_dev->rdev.lldi.adapter_type);
+}
+
+static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
+ ibdev.dev);
+ PDBG("%s dev 0x%p\n", __func__, dev);
+
+ return sprintf(buf, "%u.%u.%u.%u\n",
+ FW_HDR_FW_VER_MAJOR_GET(c4iw_dev->rdev.lldi.fw_vers),
+ FW_HDR_FW_VER_MINOR_GET(c4iw_dev->rdev.lldi.fw_vers),
+ FW_HDR_FW_VER_MICRO_GET(c4iw_dev->rdev.lldi.fw_vers),
+ FW_HDR_FW_VER_BUILD_GET(c4iw_dev->rdev.lldi.fw_vers));
+}
+
+static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
+ ibdev.dev);
+ struct ethtool_drvinfo info;
+ struct net_device *lldev = c4iw_dev->rdev.lldi.ports[0];
+
+ PDBG("%s dev 0x%p\n", __func__, dev);
+ lldev->ethtool_ops->get_drvinfo(lldev, &info);
+ return sprintf(buf, "%s\n", info.driver);
+}
+
+static ssize_t show_board(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
+ ibdev.dev);
+ PDBG("%s dev 0x%p\n", __func__, dev);
+ return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
+ c4iw_dev->rdev.lldi.pdev->device);
+}
+
+static int c4iw_get_mib(struct ib_device *ibdev,
+ union rdma_protocol_stats *stats)
+{
+ return -ENOSYS;
+}
+
+static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
+static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
+static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
+static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+
+static struct device_attribute *c4iw_class_attributes[] = {
+ &dev_attr_hw_rev,
+ &dev_attr_fw_ver,
+ &dev_attr_hca_type,
+ &dev_attr_board_id,
+};
+
+int c4iw_register_device(struct c4iw_dev *dev)
+{
+ int ret;
+ int i;
+
+ PDBG("%s c4iw_dev %p\n", __func__, dev);
+ BUG_ON(!dev->rdev.lldi.ports[0]);
+ strlcpy(dev->ibdev.name, "cxgb4_%d", IB_DEVICE_NAME_MAX);
+ memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
+ memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
+ dev->ibdev.owner = THIS_MODULE;
+ dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
+ if (fastreg_support)
+ dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
+ dev->ibdev.local_dma_lkey = 0;
+ dev->ibdev.uverbs_cmd_mask =
+ (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
+ (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_REG_MR) |
+ (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
+ (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
+ (1ull << IB_USER_VERBS_CMD_POST_SEND) |
+ (1ull << IB_USER_VERBS_CMD_POST_RECV);
+ dev->ibdev.node_type = RDMA_NODE_RNIC;
+ memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC));
+ dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports;
+ dev->ibdev.num_comp_vectors = 1;
+ dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev);
+ dev->ibdev.query_device = c4iw_query_device;
+ dev->ibdev.query_port = c4iw_query_port;
+ dev->ibdev.modify_port = c4iw_modify_port;
+ dev->ibdev.query_pkey = c4iw_query_pkey;
+ dev->ibdev.query_gid = c4iw_query_gid;
+ dev->ibdev.alloc_ucontext = c4iw_alloc_ucontext;
+ dev->ibdev.dealloc_ucontext = c4iw_dealloc_ucontext;
+ dev->ibdev.mmap = c4iw_mmap;
+ dev->ibdev.alloc_pd = c4iw_allocate_pd;
+ dev->ibdev.dealloc_pd = c4iw_deallocate_pd;
+ dev->ibdev.create_ah = c4iw_ah_create;
+ dev->ibdev.destroy_ah = c4iw_ah_destroy;
+ dev->ibdev.create_qp = c4iw_create_qp;
+ dev->ibdev.modify_qp = c4iw_ib_modify_qp;
+ dev->ibdev.destroy_qp = c4iw_destroy_qp;
+ dev->ibdev.create_cq = c4iw_create_cq;
+ dev->ibdev.destroy_cq = c4iw_destroy_cq;
+ dev->ibdev.resize_cq = c4iw_resize_cq;
+ dev->ibdev.poll_cq = c4iw_poll_cq;
+ dev->ibdev.get_dma_mr = c4iw_get_dma_mr;
+ dev->ibdev.reg_phys_mr = c4iw_register_phys_mem;
+ dev->ibdev.rereg_phys_mr = c4iw_reregister_phys_mem;
+ dev->ibdev.reg_user_mr = c4iw_reg_user_mr;
+ dev->ibdev.dereg_mr = c4iw_dereg_mr;
+ dev->ibdev.alloc_mw = c4iw_alloc_mw;
+ dev->ibdev.bind_mw = c4iw_bind_mw;
+ dev->ibdev.dealloc_mw = c4iw_dealloc_mw;
+ dev->ibdev.alloc_fast_reg_mr = c4iw_alloc_fast_reg_mr;
+ dev->ibdev.alloc_fast_reg_page_list = c4iw_alloc_fastreg_pbl;
+ dev->ibdev.free_fast_reg_page_list = c4iw_free_fastreg_pbl;
+ dev->ibdev.attach_mcast = c4iw_multicast_attach;
+ dev->ibdev.detach_mcast = c4iw_multicast_detach;
+ dev->ibdev.process_mad = c4iw_process_mad;
+ dev->ibdev.req_notify_cq = c4iw_arm_cq;
+ dev->ibdev.post_send = c4iw_post_send;
+ dev->ibdev.post_recv = c4iw_post_receive;
+ dev->ibdev.get_protocol_stats = c4iw_get_mib;
+
+ dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
+ if (!dev->ibdev.iwcm)
+ return -ENOMEM;
+
+ dev->ibdev.iwcm->connect = c4iw_connect;
+ dev->ibdev.iwcm->accept = c4iw_accept_cr;
+ dev->ibdev.iwcm->reject = c4iw_reject_cr;
+ dev->ibdev.iwcm->create_listen = c4iw_create_listen;
+ dev->ibdev.iwcm->destroy_listen = c4iw_destroy_listen;
+ dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
+ dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
+ dev->ibdev.iwcm->get_qp = c4iw_get_qp;
+
+ ret = ib_register_device(&dev->ibdev, NULL);
+ if (ret)
+ goto bail1;
+
+ for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i) {
+ ret = device_create_file(&dev->ibdev.dev,
+ c4iw_class_attributes[i]);
+ if (ret)
+ goto bail2;
+ }
+ dev->registered = 1;
+ return 0;
+bail2:
+ ib_unregister_device(&dev->ibdev);
+bail1:
+ kfree(dev->ibdev.iwcm);
+ return ret;
+}
+
+void c4iw_unregister_device(struct c4iw_dev *dev)
+{
+ int i;
+
+ PDBG("%s c4iw_dev %p\n", __func__, dev);
+ for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i)
+ device_remove_file(&dev->ibdev.dev,
+ c4iw_class_attributes[i]);
+ ib_unregister_device(&dev->ibdev);
+ kfree(dev->ibdev.iwcm);
+ dev->registered = 0;
+ return;
+}
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
new file mode 100644
index 00000000000..0c28ed1eafa
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -0,0 +1,1576 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "iw_cxgb4.h"
+
+static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
+ struct c4iw_dev_ucontext *uctx)
+{
+ /*
+ * uP clears EQ contexts when the connection exits rdma mode,
+ * so no need to post a RESET WR for these EQs.
+ */
+ dma_free_coherent(&(rdev->lldi.pdev->dev),
+ wq->rq.memsize, wq->rq.queue,
+ pci_unmap_addr(&wq->rq, mapping));
+ dma_free_coherent(&(rdev->lldi.pdev->dev),
+ wq->sq.memsize, wq->sq.queue,
+ pci_unmap_addr(&wq->sq, mapping));
+ c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
+ kfree(wq->rq.sw_rq);
+ kfree(wq->sq.sw_sq);
+ c4iw_put_qpid(rdev, wq->rq.qid, uctx);
+ c4iw_put_qpid(rdev, wq->sq.qid, uctx);
+ return 0;
+}
+
+static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
+ struct t4_cq *rcq, struct t4_cq *scq,
+ struct c4iw_dev_ucontext *uctx)
+{
+ int user = (uctx != &rdev->uctx);
+ struct fw_ri_res_wr *res_wr;
+ struct fw_ri_res *res;
+ int wr_len;
+ struct c4iw_wr_wait wr_wait;
+ struct sk_buff *skb;
+ int ret;
+ int eqsize;
+
+ wq->sq.qid = c4iw_get_qpid(rdev, uctx);
+ if (!wq->sq.qid)
+ return -ENOMEM;
+
+ wq->rq.qid = c4iw_get_qpid(rdev, uctx);
+ if (!wq->rq.qid)
+ goto err1;
+
+ if (!user) {
+ wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
+ GFP_KERNEL);
+ if (!wq->sq.sw_sq)
+ goto err2;
+
+ wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
+ GFP_KERNEL);
+ if (!wq->rq.sw_rq)
+ goto err3;
+ }
+
+ /*
+ * RQT must be a power of 2.
+ */
+ wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
+ wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
+ if (!wq->rq.rqt_hwaddr)
+ goto err4;
+
+ wq->sq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
+ wq->sq.memsize, &(wq->sq.dma_addr),
+ GFP_KERNEL);
+ if (!wq->sq.queue)
+ goto err5;
+ memset(wq->sq.queue, 0, wq->sq.memsize);
+ pci_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
+
+ wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
+ wq->rq.memsize, &(wq->rq.dma_addr),
+ GFP_KERNEL);
+ if (!wq->rq.queue)
+ goto err6;
+ PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
+ __func__, wq->sq.queue,
+ (unsigned long long)virt_to_phys(wq->sq.queue),
+ wq->rq.queue,
+ (unsigned long long)virt_to_phys(wq->rq.queue));
+ memset(wq->rq.queue, 0, wq->rq.memsize);
+ pci_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
+
+ wq->db = rdev->lldi.db_reg;
+ wq->gts = rdev->lldi.gts_reg;
+ if (user) {
+ wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
+ (wq->sq.qid << rdev->qpshift);
+ wq->sq.udb &= PAGE_MASK;
+ wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
+ (wq->rq.qid << rdev->qpshift);
+ wq->rq.udb &= PAGE_MASK;
+ }
+ wq->rdev = rdev;
+ wq->rq.msn = 1;
+
+ /* build fw_ri_res_wr */
+ wr_len = sizeof *res_wr + 2 * sizeof *res;
+
+ skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto err7;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
+
+ res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
+ memset(res_wr, 0, wr_len);
+ res_wr->op_nres = cpu_to_be32(
+ FW_WR_OP(FW_RI_RES_WR) |
+ V_FW_RI_RES_WR_NRES(2) |
+ FW_WR_COMPL(1));
+ res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
+ res_wr->cookie = (u64)&wr_wait;
+ res = res_wr->res;
+ res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
+ res->u.sqrq.op = FW_RI_RES_OP_WRITE;
+
+ /*
+ * eqsize is the number of 64B entries plus the status page size.
+ */
+ eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
+
+ res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
+ V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
+ V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
+ V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
+ V_FW_RI_RES_WR_IQID(scq->cqid));
+ res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
+ V_FW_RI_RES_WR_DCAEN(0) |
+ V_FW_RI_RES_WR_DCACPU(0) |
+ V_FW_RI_RES_WR_FBMIN(3) |
+ V_FW_RI_RES_WR_FBMAX(3) |
+ V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
+ V_FW_RI_RES_WR_CIDXFTHRESH(0) |
+ V_FW_RI_RES_WR_EQSIZE(eqsize));
+ res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
+ res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
+ res++;
+ res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
+ res->u.sqrq.op = FW_RI_RES_OP_WRITE;
+
+ /*
+ * eqsize is the number of 64B entries plus the status page size.
+ */
+ eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
+ res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
+ V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
+ V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
+ V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
+ V_FW_RI_RES_WR_IQID(rcq->cqid));
+ res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
+ V_FW_RI_RES_WR_DCAEN(0) |
+ V_FW_RI_RES_WR_DCACPU(0) |
+ V_FW_RI_RES_WR_FBMIN(3) |
+ V_FW_RI_RES_WR_FBMAX(3) |
+ V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
+ V_FW_RI_RES_WR_CIDXFTHRESH(0) |
+ V_FW_RI_RES_WR_EQSIZE(eqsize));
+ res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
+ res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
+
+ c4iw_init_wr_wait(&wr_wait);
+
+ ret = c4iw_ofld_send(rdev, skb);
+ if (ret)
+ goto err7;
+ wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
+ if (!wr_wait.done) {
+ printk(KERN_ERR MOD "Device %s not responding!\n",
+ pci_name(rdev->lldi.pdev));
+ rdev->flags = T4_FATAL_ERROR;
+ ret = -EIO;
+ } else
+ ret = wr_wait.ret;
+ if (ret)
+ goto err7;
+
+ PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
+ __func__, wq->sq.qid, wq->rq.qid, wq->db,
+ (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);
+
+ return 0;
+err7:
+ dma_free_coherent(&(rdev->lldi.pdev->dev),
+ wq->rq.memsize, wq->rq.queue,
+ pci_unmap_addr(&wq->rq, mapping));
+err6:
+ dma_free_coherent(&(rdev->lldi.pdev->dev),
+ wq->sq.memsize, wq->sq.queue,
+ pci_unmap_addr(&wq->sq, mapping));
+err5:
+ c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
+err4:
+ kfree(wq->rq.sw_rq);
+err3:
+ kfree(wq->sq.sw_sq);
+err2:
+ c4iw_put_qpid(rdev, wq->rq.qid, uctx);
+err1:
+ c4iw_put_qpid(rdev, wq->sq.qid, uctx);
+ return -ENOMEM;
+}
+
+static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
+{
+ int i;
+ u32 plen;
+ int size;
+ u8 *datap;
+
+ if (wr->num_sge > T4_MAX_SEND_SGE)
+ return -EINVAL;
+ switch (wr->opcode) {
+ case IB_WR_SEND:
+ if (wr->send_flags & IB_SEND_SOLICITED)
+ wqe->send.sendop_pkd = cpu_to_be32(
+ V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
+ else
+ wqe->send.sendop_pkd = cpu_to_be32(
+ V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
+ wqe->send.stag_inv = 0;
+ break;
+ case IB_WR_SEND_WITH_INV:
+ if (wr->send_flags & IB_SEND_SOLICITED)
+ wqe->send.sendop_pkd = cpu_to_be32(
+ V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
+ else
+ wqe->send.sendop_pkd = cpu_to_be32(
+ V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
+ wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ plen = 0;
+ if (wr->num_sge) {
+ if (wr->send_flags & IB_SEND_INLINE) {
+ datap = (u8 *)wqe->send.u.immd_src[0].data;
+ for (i = 0; i < wr->num_sge; i++) {
+ if ((plen + wr->sg_list[i].length) >
+ T4_MAX_SEND_INLINE) {
+ return -EMSGSIZE;
+ }
+ plen += wr->sg_list[i].length;
+ memcpy(datap,
+ (void *)(unsigned long)wr->sg_list[i].addr,
+ wr->sg_list[i].length);
+ datap += wr->sg_list[i].length;
+ }
+ wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
+ wqe->send.u.immd_src[0].r1 = 0;
+ wqe->send.u.immd_src[0].r2 = 0;
+ wqe->send.u.immd_src[0].immdlen = cpu_to_be32(plen);
+ size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
+ plen;
+ } else {
+ for (i = 0; i < wr->num_sge; i++) {
+ if ((plen + wr->sg_list[i].length) < plen)
+ return -EMSGSIZE;
+ plen += wr->sg_list[i].length;
+ wqe->send.u.isgl_src[0].sge[i].stag =
+ cpu_to_be32(wr->sg_list[i].lkey);
+ wqe->send.u.isgl_src[0].sge[i].len =
+ cpu_to_be32(wr->sg_list[i].length);
+ wqe->send.u.isgl_src[0].sge[i].to =
+ cpu_to_be64(wr->sg_list[i].addr);
+ }
+ wqe->send.u.isgl_src[0].op = FW_RI_DATA_ISGL;
+ wqe->send.u.isgl_src[0].r1 = 0;
+ wqe->send.u.isgl_src[0].nsge = cpu_to_be16(wr->num_sge);
+ wqe->send.u.isgl_src[0].r2 = 0;
+ size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
+ wr->num_sge * sizeof(struct fw_ri_sge);
+ }
+ } else {
+ wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
+ wqe->send.u.immd_src[0].r1 = 0;
+ wqe->send.u.immd_src[0].r2 = 0;
+ wqe->send.u.immd_src[0].immdlen = 0;
+ size = sizeof wqe->send + sizeof(struct fw_ri_immd);
+ }
+ *len16 = DIV_ROUND_UP(size, 16);
+ wqe->send.plen = cpu_to_be32(plen);
+ return 0;
+}
+
+static int build_rdma_write(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
+{
+ int i;
+ u32 plen;
+ int size;
+ u8 *datap;
+
+ if (wr->num_sge > T4_MAX_WRITE_SGE)
+ return -EINVAL;
+ wqe->write.r2 = 0;
+ wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
+ wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
+ plen = 0;
+ if (wr->num_sge) {
+ if (wr->send_flags & IB_SEND_INLINE) {
+ datap = (u8 *)wqe->write.u.immd_src[0].data;
+ for (i = 0; i < wr->num_sge; i++) {
+ if ((plen + wr->sg_list[i].length) >
+ T4_MAX_WRITE_INLINE) {
+ return -EMSGSIZE;
+ }
+ plen += wr->sg_list[i].length;
+ memcpy(datap,
+ (void *)(unsigned long)wr->sg_list[i].addr,
+ wr->sg_list[i].length);
+ datap += wr->sg_list[i].length;
+ }
+ wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
+ wqe->write.u.immd_src[0].r1 = 0;
+ wqe->write.u.immd_src[0].r2 = 0;
+ wqe->write.u.immd_src[0].immdlen = cpu_to_be32(plen);
+ size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
+ plen;
+ } else {
+ for (i = 0; i < wr->num_sge; i++) {
+ if ((plen + wr->sg_list[i].length) < plen)
+ return -EMSGSIZE;
+ plen += wr->sg_list[i].length;
+ wqe->write.u.isgl_src[0].sge[i].stag =
+ cpu_to_be32(wr->sg_list[i].lkey);
+ wqe->write.u.isgl_src[0].sge[i].len =
+ cpu_to_be32(wr->sg_list[i].length);
+ wqe->write.u.isgl_src[0].sge[i].to =
+ cpu_to_be64(wr->sg_list[i].addr);
+ }
+ wqe->write.u.isgl_src[0].op = FW_RI_DATA_ISGL;
+ wqe->write.u.isgl_src[0].r1 = 0;
+ wqe->write.u.isgl_src[0].nsge =
+ cpu_to_be16(wr->num_sge);
+ wqe->write.u.isgl_src[0].r2 = 0;
+ size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
+ wr->num_sge * sizeof(struct fw_ri_sge);
+ }
+ } else {
+ wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
+ wqe->write.u.immd_src[0].r1 = 0;
+ wqe->write.u.immd_src[0].r2 = 0;
+ wqe->write.u.immd_src[0].immdlen = 0;
+ size = sizeof wqe->write + sizeof(struct fw_ri_immd);
+ }
+ *len16 = DIV_ROUND_UP(size, 16);
+ wqe->write.plen = cpu_to_be32(plen);
+ return 0;
+}
+
+static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
+{
+ if (wr->num_sge > 1)
+ return -EINVAL;
+ if (wr->num_sge) {
+ wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey);
+ wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr
+ >> 32));
+ wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr);
+ wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
+ wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
+ wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
+ >> 32));
+ wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
+ } else {
+ wqe->read.stag_src = cpu_to_be32(2);
+ wqe->read.to_src_hi = 0;
+ wqe->read.to_src_lo = 0;
+ wqe->read.stag_sink = cpu_to_be32(2);
+ wqe->read.plen = 0;
+ wqe->read.to_sink_hi = 0;
+ wqe->read.to_sink_lo = 0;
+ }
+ wqe->read.r2 = 0;
+ wqe->read.r5 = 0;
+ *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
+ return 0;
+}
+
+static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
+ struct ib_recv_wr *wr, u8 *len16)
+{
+ int i;
+ int plen = 0;
+
+ for (i = 0; i < wr->num_sge; i++) {
+ if ((plen + wr->sg_list[i].length) < plen)
+ return -EMSGSIZE;
+ plen += wr->sg_list[i].length;
+ wqe->recv.isgl.sge[i].stag =
+ cpu_to_be32(wr->sg_list[i].lkey);
+ wqe->recv.isgl.sge[i].len =
+ cpu_to_be32(wr->sg_list[i].length);
+ wqe->recv.isgl.sge[i].to =
+ cpu_to_be64(wr->sg_list[i].addr);
+ }
+ for (; i < T4_MAX_RECV_SGE; i++) {
+ wqe->recv.isgl.sge[i].stag = 0;
+ wqe->recv.isgl.sge[i].len = 0;
+ wqe->recv.isgl.sge[i].to = 0;
+ }
+ wqe->recv.isgl.op = FW_RI_DATA_ISGL;
+ wqe->recv.isgl.r1 = 0;
+ wqe->recv.isgl.nsge = cpu_to_be16(wr->num_sge);
+ wqe->recv.isgl.r2 = 0;
+ *len16 = DIV_ROUND_UP(sizeof wqe->recv +
+ wr->num_sge * sizeof(struct fw_ri_sge), 16);
+ return 0;
+}
+
+static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
+{
+
+ struct fw_ri_immd *imdp;
+ __be64 *p;
+ int i;
+ int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
+
+ if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH)
+ return -EINVAL;
+
+ wqe->fr.qpbinde_to_dcacpu = 0;
+ wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12;
+ wqe->fr.addr_type = FW_RI_VA_BASED_TO;
+ wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags);
+ wqe->fr.len_hi = 0;
+ wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length);
+ wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
+ wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
+ wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
+ 0xffffffff);
+ if (pbllen > T4_MAX_FR_IMMD) {
+ struct c4iw_fr_page_list *c4pl =
+ to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
+ struct fw_ri_dsgl *sglp;
+
+ sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
+ sglp->op = FW_RI_DATA_DSGL;
+ sglp->r1 = 0;
+ sglp->nsge = cpu_to_be16(1);
+ sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
+ sglp->len0 = cpu_to_be32(pbllen);
+
+ *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *sglp, 16);
+ } else {
+ imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
+ imdp->op = FW_RI_DATA_IMMD;
+ imdp->r1 = 0;
+ imdp->r2 = 0;
+ imdp->immdlen = cpu_to_be32(pbllen);
+ p = (__be64 *)(imdp + 1);
+ for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++)
+ *p = cpu_to_be64(
+ (u64)wr->wr.fast_reg.page_list->page_list[i]);
+ *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen,
+ 16);
+ }
+ return 0;
+}
+
+static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
+ u8 *len16)
+{
+ wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
+ wqe->inv.r2 = 0;
+ *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
+ return 0;
+}
+
+void c4iw_qp_add_ref(struct ib_qp *qp)
+{
+ PDBG("%s ib_qp %p\n", __func__, qp);
+ atomic_inc(&(to_c4iw_qp(qp)->refcnt));
+}
+
+void c4iw_qp_rem_ref(struct ib_qp *qp)
+{
+ PDBG("%s ib_qp %p\n", __func__, qp);
+ if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
+ wake_up(&(to_c4iw_qp(qp)->wait));
+}
+
+int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ int err = 0;
+ u8 len16 = 0;
+ enum fw_wr_opcodes fw_opcode = 0;
+ enum fw_ri_wr_flags fw_flags;
+ struct c4iw_qp *qhp;
+ union t4_wr *wqe;
+ u32 num_wrs;
+ struct t4_swsqe *swsqe;
+ unsigned long flag;
+ u16 idx = 0;
+
+ qhp = to_c4iw_qp(ibqp);
+ spin_lock_irqsave(&qhp->lock, flag);
+ if (t4_wq_in_error(&qhp->wq)) {
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ return -EINVAL;
+ }
+ num_wrs = t4_sq_avail(&qhp->wq);
+ if (num_wrs == 0) {
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ return -ENOMEM;
+ }
+ while (wr) {
+ if (num_wrs == 0) {
+ err = -ENOMEM;
+ *bad_wr = wr;
+ break;
+ }
+ wqe = &qhp->wq.sq.queue[qhp->wq.sq.pidx];
+ fw_flags = 0;
+ if (wr->send_flags & IB_SEND_SOLICITED)
+ fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
+ if (wr->send_flags & IB_SEND_SIGNALED)
+ fw_flags |= FW_RI_COMPLETION_FLAG;
+ swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
+ switch (wr->opcode) {
+ case IB_WR_SEND_WITH_INV:
+ case IB_WR_SEND:
+ if (wr->send_flags & IB_SEND_FENCE)
+ fw_flags |= FW_RI_READ_FENCE_FLAG;
+ fw_opcode = FW_RI_SEND_WR;
+ if (wr->opcode == IB_WR_SEND)
+ swsqe->opcode = FW_RI_SEND;
+ else
+ swsqe->opcode = FW_RI_SEND_WITH_INV;
+ err = build_rdma_send(wqe, wr, &len16);
+ break;
+ case IB_WR_RDMA_WRITE:
+ fw_opcode = FW_RI_RDMA_WRITE_WR;
+ swsqe->opcode = FW_RI_RDMA_WRITE;
+ err = build_rdma_write(wqe, wr, &len16);
+ break;
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_READ_WITH_INV:
+ fw_opcode = FW_RI_RDMA_READ_WR;
+ swsqe->opcode = FW_RI_READ_REQ;
+ if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
+ fw_flags |= FW_RI_RDMA_READ_INVALIDATE;
+ else
+ fw_flags = 0;
+ err = build_rdma_read(wqe, wr, &len16);
+ if (err)
+ break;
+ swsqe->read_len = wr->sg_list[0].length;
+ if (!qhp->wq.sq.oldest_read)
+ qhp->wq.sq.oldest_read = swsqe;
+ break;
+ case IB_WR_FAST_REG_MR:
+ fw_opcode = FW_RI_FR_NSMR_WR;
+ swsqe->opcode = FW_RI_FAST_REGISTER;
+ err = build_fastreg(wqe, wr, &len16);
+ break;
+ case IB_WR_LOCAL_INV:
+ if (wr->send_flags & IB_SEND_FENCE)
+ fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
+ fw_opcode = FW_RI_INV_LSTAG_WR;
+ swsqe->opcode = FW_RI_LOCAL_INV;
+ err = build_inv_stag(wqe, wr, &len16);
+ break;
+ default:
+ PDBG("%s post of type=%d TBD!\n", __func__,
+ wr->opcode);
+ err = -EINVAL;
+ }
+ if (err) {
+ *bad_wr = wr;
+ break;
+ }
+ swsqe->idx = qhp->wq.sq.pidx;
+ swsqe->complete = 0;
+ swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED);
+ swsqe->wr_id = wr->wr_id;
+
+ init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
+
+ PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
+ __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
+ swsqe->opcode, swsqe->read_len);
+ wr = wr->next;
+ num_wrs--;
+ t4_sq_produce(&qhp->wq);
+ idx++;
+ }
+ if (t4_wq_db_enabled(&qhp->wq))
+ t4_ring_sq_db(&qhp->wq, idx);
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ return err;
+}
+
+int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ int err = 0;
+ struct c4iw_qp *qhp;
+ union t4_recv_wr *wqe;
+ u32 num_wrs;
+ u8 len16 = 0;
+ unsigned long flag;
+ u16 idx = 0;
+
+ qhp = to_c4iw_qp(ibqp);
+ spin_lock_irqsave(&qhp->lock, flag);
+ if (t4_wq_in_error(&qhp->wq)) {
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ return -EINVAL;
+ }
+ num_wrs = t4_rq_avail(&qhp->wq);
+ if (num_wrs == 0) {
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ return -ENOMEM;
+ }
+ while (wr) {
+ if (wr->num_sge > T4_MAX_RECV_SGE) {
+ err = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
+ wqe = &qhp->wq.rq.queue[qhp->wq.rq.pidx];
+ if (num_wrs)
+ err = build_rdma_recv(qhp, wqe, wr, &len16);
+ else
+ err = -ENOMEM;
+ if (err) {
+ *bad_wr = wr;
+ break;
+ }
+
+ qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
+
+ wqe->recv.opcode = FW_RI_RECV_WR;
+ wqe->recv.r1 = 0;
+ wqe->recv.wrid = qhp->wq.rq.pidx;
+ wqe->recv.r2[0] = 0;
+ wqe->recv.r2[1] = 0;
+ wqe->recv.r2[2] = 0;
+ wqe->recv.len16 = len16;
+ if (len16 < 5)
+ wqe->flits[8] = 0;
+
+ PDBG("%s cookie 0x%llx pidx %u\n", __func__,
+ (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
+ t4_rq_produce(&qhp->wq);
+ wr = wr->next;
+ num_wrs--;
+ idx++;
+ }
+ if (t4_wq_db_enabled(&qhp->wq))
+ t4_ring_rq_db(&qhp->wq, idx);
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ return err;
+}
+
+int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind)
+{
+ return -ENOSYS;
+}
+
+static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
+ u8 *ecode)
+{
+ int status;
+ int tagged;
+ int opcode;
+ int rqtype;
+ int send_inv;
+
+ if (!err_cqe) {
+ *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
+ *ecode = 0;
+ return;
+ }
+
+ status = CQE_STATUS(err_cqe);
+ opcode = CQE_OPCODE(err_cqe);
+ rqtype = RQ_TYPE(err_cqe);
+ send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
+ (opcode == FW_RI_SEND_WITH_SE_INV);
+ tagged = (opcode == FW_RI_RDMA_WRITE) ||
+ (rqtype && (opcode == FW_RI_READ_RESP));
+
+ switch (status) {
+ case T4_ERR_STAG:
+ if (send_inv) {
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
+ *ecode = RDMAP_CANT_INV_STAG;
+ } else {
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
+ *ecode = RDMAP_INV_STAG;
+ }
+ break;
+ case T4_ERR_PDID:
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
+ if ((opcode == FW_RI_SEND_WITH_INV) ||
+ (opcode == FW_RI_SEND_WITH_SE_INV))
+ *ecode = RDMAP_CANT_INV_STAG;
+ else
+ *ecode = RDMAP_STAG_NOT_ASSOC;
+ break;
+ case T4_ERR_QPID:
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
+ *ecode = RDMAP_STAG_NOT_ASSOC;
+ break;
+ case T4_ERR_ACCESS:
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
+ *ecode = RDMAP_ACC_VIOL;
+ break;
+ case T4_ERR_WRAP:
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
+ *ecode = RDMAP_TO_WRAP;
+ break;
+ case T4_ERR_BOUND:
+ if (tagged) {
+ *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
+ *ecode = DDPT_BASE_BOUNDS;
+ } else {
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
+ *ecode = RDMAP_BASE_BOUNDS;
+ }
+ break;
+ case T4_ERR_INVALIDATE_SHARED_MR:
+ case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
+ *ecode = RDMAP_CANT_INV_STAG;
+ break;
+ case T4_ERR_ECC:
+ case T4_ERR_ECC_PSTAG:
+ case T4_ERR_INTERNAL_ERR:
+ *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
+ *ecode = 0;
+ break;
+ case T4_ERR_OUT_OF_RQE:
+ *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
+ *ecode = DDPU_INV_MSN_NOBUF;
+ break;
+ case T4_ERR_PBL_ADDR_BOUND:
+ *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
+ *ecode = DDPT_BASE_BOUNDS;
+ break;
+ case T4_ERR_CRC:
+ *layer_type = LAYER_MPA|DDP_LLP;
+ *ecode = MPA_CRC_ERR;
+ break;
+ case T4_ERR_MARKER:
+ *layer_type = LAYER_MPA|DDP_LLP;
+ *ecode = MPA_MARKER_ERR;
+ break;
+ case T4_ERR_PDU_LEN_ERR:
+ *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
+ *ecode = DDPU_MSG_TOOBIG;
+ break;
+ case T4_ERR_DDP_VERSION:
+ if (tagged) {
+ *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
+ *ecode = DDPT_INV_VERS;
+ } else {
+ *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
+ *ecode = DDPU_INV_VERS;
+ }
+ break;
+ case T4_ERR_RDMA_VERSION:
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
+ *ecode = RDMAP_INV_VERS;
+ break;
+ case T4_ERR_OPCODE:
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
+ *ecode = RDMAP_INV_OPCODE;
+ break;
+ case T4_ERR_DDP_QUEUE_NUM:
+ *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
+ *ecode = DDPU_INV_QN;
+ break;
+ case T4_ERR_MSN:
+ case T4_ERR_MSN_GAP:
+ case T4_ERR_MSN_RANGE:
+ case T4_ERR_IRD_OVERFLOW:
+ *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
+ *ecode = DDPU_INV_MSN_RANGE;
+ break;
+ case T4_ERR_TBIT:
+ *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
+ *ecode = 0;
+ break;
+ case T4_ERR_MO:
+ *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
+ *ecode = DDPU_INV_MO;
+ break;
+ default:
+ *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
+ *ecode = 0;
+ break;
+ }
+}
+
+int c4iw_post_zb_read(struct c4iw_qp *qhp)
+{
+ union t4_wr *wqe;
+ struct sk_buff *skb;
+ u8 len16;
+
+ PDBG("%s enter\n", __func__);
+ skb = alloc_skb(40, GFP_KERNEL);
+ if (!skb) {
+ printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
+ return -ENOMEM;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
+
+ wqe = (union t4_wr *)skb_put(skb, sizeof wqe->read);
+ memset(wqe, 0, sizeof wqe->read);
+ wqe->read.r2 = cpu_to_be64(0);
+ wqe->read.stag_sink = cpu_to_be32(1);
+ wqe->read.to_sink_hi = cpu_to_be32(0);
+ wqe->read.to_sink_lo = cpu_to_be32(1);
+ wqe->read.stag_src = cpu_to_be32(1);
+ wqe->read.plen = cpu_to_be32(0);
+ wqe->read.to_src_hi = cpu_to_be32(0);
+ wqe->read.to_src_lo = cpu_to_be32(1);
+ len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
+ init_wr_hdr(wqe, 0, FW_RI_RDMA_READ_WR, FW_RI_COMPLETION_FLAG, len16);
+
+ return c4iw_ofld_send(&qhp->rhp->rdev, skb);
+}
+
+static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
+ gfp_t gfp)
+{
+ struct fw_ri_wr *wqe;
+ struct sk_buff *skb;
+ struct terminate_message *term;
+
+ PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
+ qhp->ep->hwtid);
+
+ skb = alloc_skb(sizeof *wqe, gfp);
+ if (!skb)
+ return;
+ set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
+
+ wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
+ memset(wqe, 0, sizeof *wqe);
+ wqe->op_compl = cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR));
+ wqe->flowid_len16 = cpu_to_be32(
+ FW_WR_FLOWID(qhp->ep->hwtid) |
+ FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
+
+ wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
+ wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
+ term = (struct terminate_message *)wqe->u.terminate.termmsg;
+ build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
+ c4iw_ofld_send(&qhp->rhp->rdev, skb);
+}
+
+/*
+ * Assumes qhp lock is held.
+ */
+static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
+ struct c4iw_cq *schp, unsigned long *flag)
+{
+ int count;
+ int flushed;
+
+ PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
+ /* take a ref on the qhp since we must release the lock */
+ atomic_inc(&qhp->refcnt);
+ spin_unlock_irqrestore(&qhp->lock, *flag);
+
+ /* locking heirarchy: cq lock first, then qp lock. */
+ spin_lock_irqsave(&rchp->lock, *flag);
+ spin_lock(&qhp->lock);
+ c4iw_flush_hw_cq(&rchp->cq);
+ c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
+ flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
+ spin_unlock(&qhp->lock);
+ spin_unlock_irqrestore(&rchp->lock, *flag);
+ if (flushed)
+ (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
+
+ /* locking heirarchy: cq lock first, then qp lock. */
+ spin_lock_irqsave(&schp->lock, *flag);
+ spin_lock(&qhp->lock);
+ c4iw_flush_hw_cq(&schp->cq);
+ c4iw_count_scqes(&schp->cq, &qhp->wq, &count);
+ flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
+ spin_unlock(&qhp->lock);
+ spin_unlock_irqrestore(&schp->lock, *flag);
+ if (flushed)
+ (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
+
+ /* deref */
+ if (atomic_dec_and_test(&qhp->refcnt))
+ wake_up(&qhp->wait);
+
+ spin_lock_irqsave(&qhp->lock, *flag);
+}
+
+static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag)
+{
+ struct c4iw_cq *rchp, *schp;
+
+ rchp = get_chp(qhp->rhp, qhp->attr.rcq);
+ schp = get_chp(qhp->rhp, qhp->attr.scq);
+
+ if (qhp->ibqp.uobject) {
+ t4_set_wq_in_error(&qhp->wq);
+ t4_set_cq_in_error(&rchp->cq);
+ if (schp != rchp)
+ t4_set_cq_in_error(&schp->cq);
+ return;
+ }
+ __flush_qp(qhp, rchp, schp, flag);
+}
+
+static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
+{
+ struct fw_ri_wr *wqe;
+ int ret;
+ struct c4iw_wr_wait wr_wait;
+ struct sk_buff *skb;
+
+ PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
+ qhp->ep->hwtid);
+
+ skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb)
+ return -ENOMEM;
+ set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
+
+ wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
+ memset(wqe, 0, sizeof *wqe);
+ wqe->op_compl = cpu_to_be32(
+ FW_WR_OP(FW_RI_INIT_WR) |
+ FW_WR_COMPL(1));
+ wqe->flowid_len16 = cpu_to_be32(
+ FW_WR_FLOWID(qhp->ep->hwtid) |
+ FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
+ wqe->cookie = (u64)&wr_wait;
+
+ wqe->u.fini.type = FW_RI_TYPE_FINI;
+ c4iw_init_wr_wait(&wr_wait);
+ ret = c4iw_ofld_send(&rhp->rdev, skb);
+ if (ret)
+ goto out;
+
+ wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
+ if (!wr_wait.done) {
+ printk(KERN_ERR MOD "Device %s not responding!\n",
+ pci_name(rhp->rdev.lldi.pdev));
+ rhp->rdev.flags = T4_FATAL_ERROR;
+ ret = -EIO;
+ } else {
+ ret = wr_wait.ret;
+ if (ret)
+ printk(KERN_WARNING MOD
+ "%s: Abnormal close qpid %d ret %u\n",
+ pci_name(rhp->rdev.lldi.pdev), qhp->wq.sq.qid,
+ ret);
+ }
+out:
+ PDBG("%s ret %d\n", __func__, ret);
+ return ret;
+}
+
+static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
+{
+ memset(&init->u, 0, sizeof init->u);
+ switch (p2p_type) {
+ case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
+ init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
+ init->u.write.stag_sink = cpu_to_be32(1);
+ init->u.write.to_sink = cpu_to_be64(1);
+ init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
+ init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
+ sizeof(struct fw_ri_immd),
+ 16);
+ break;
+ case FW_RI_INIT_P2PTYPE_READ_REQ:
+ init->u.write.opcode = FW_RI_RDMA_READ_WR;
+ init->u.read.stag_src = cpu_to_be32(1);
+ init->u.read.to_src_lo = cpu_to_be32(1);
+ init->u.read.stag_sink = cpu_to_be32(1);
+ init->u.read.to_sink_lo = cpu_to_be32(1);
+ init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
+ break;
+ }
+}
+
+static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
+{
+ struct fw_ri_wr *wqe;
+ int ret;
+ struct c4iw_wr_wait wr_wait;
+ struct sk_buff *skb;
+
+ PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
+ qhp->ep->hwtid);
+
+ skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb)
+ return -ENOMEM;
+ set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
+
+ wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
+ memset(wqe, 0, sizeof *wqe);
+ wqe->op_compl = cpu_to_be32(
+ FW_WR_OP(FW_RI_INIT_WR) |
+ FW_WR_COMPL(1));
+ wqe->flowid_len16 = cpu_to_be32(
+ FW_WR_FLOWID(qhp->ep->hwtid) |
+ FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
+
+ wqe->cookie = (u64)&wr_wait;
+
+ wqe->u.init.type = FW_RI_TYPE_INIT;
+ wqe->u.init.mpareqbit_p2ptype =
+ V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
+ V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
+ wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
+ if (qhp->attr.mpa_attr.recv_marker_enabled)
+ wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
+ if (qhp->attr.mpa_attr.xmit_marker_enabled)
+ wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
+ if (qhp->attr.mpa_attr.crc_enabled)
+ wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
+
+ wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
+ FW_RI_QP_RDMA_WRITE_ENABLE |
+ FW_RI_QP_BIND_ENABLE;
+ if (!qhp->ibqp.uobject)
+ wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
+ FW_RI_QP_STAG0_ENABLE;
+ wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
+ wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
+ wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
+ wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
+ wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
+ wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
+ wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
+ wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
+ wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
+ wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
+ wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
+ wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
+ wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
+ rhp->rdev.lldi.vr->rq.start);
+ if (qhp->attr.mpa_attr.initiator)
+ build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
+
+ c4iw_init_wr_wait(&wr_wait);
+ ret = c4iw_ofld_send(&rhp->rdev, skb);
+ if (ret)
+ goto out;
+
+ wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
+ if (!wr_wait.done) {
+ printk(KERN_ERR MOD "Device %s not responding!\n",
+ pci_name(rhp->rdev.lldi.pdev));
+ rhp->rdev.flags = T4_FATAL_ERROR;
+ ret = -EIO;
+ } else
+ ret = wr_wait.ret;
+out:
+ PDBG("%s ret %d\n", __func__, ret);
+ return ret;
+}
+
+int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
+ enum c4iw_qp_attr_mask mask,
+ struct c4iw_qp_attributes *attrs,
+ int internal)
+{
+ int ret = 0;
+ struct c4iw_qp_attributes newattr = qhp->attr;
+ unsigned long flag;
+ int disconnect = 0;
+ int terminate = 0;
+ int abort = 0;
+ int free = 0;
+ struct c4iw_ep *ep = NULL;
+
+ PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__,
+ qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
+ (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
+
+ spin_lock_irqsave(&qhp->lock, flag);
+
+ /* Process attr changes if in IDLE */
+ if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
+ if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
+ ret = -EIO;
+ goto out;
+ }
+ if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
+ newattr.enable_rdma_read = attrs->enable_rdma_read;
+ if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
+ newattr.enable_rdma_write = attrs->enable_rdma_write;
+ if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
+ newattr.enable_bind = attrs->enable_bind;
+ if (mask & C4IW_QP_ATTR_MAX_ORD) {
+ if (attrs->max_ord > c4iw_max_read_depth) {
+ ret = -EINVAL;
+ goto out;
+ }
+ newattr.max_ord = attrs->max_ord;
+ }
+ if (mask & C4IW_QP_ATTR_MAX_IRD) {
+ if (attrs->max_ird > c4iw_max_read_depth) {
+ ret = -EINVAL;
+ goto out;
+ }
+ newattr.max_ird = attrs->max_ird;
+ }
+ qhp->attr = newattr;
+ }
+
+ if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
+ goto out;
+ if (qhp->attr.state == attrs->next_state)
+ goto out;
+
+ switch (qhp->attr.state) {
+ case C4IW_QP_STATE_IDLE:
+ switch (attrs->next_state) {
+ case C4IW_QP_STATE_RTS:
+ if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ qhp->attr.mpa_attr = attrs->mpa_attr;
+ qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
+ qhp->ep = qhp->attr.llp_stream_handle;
+ qhp->attr.state = C4IW_QP_STATE_RTS;
+
+ /*
+ * Ref the endpoint here and deref when we
+ * disassociate the endpoint from the QP. This
+ * happens in CLOSING->IDLE transition or *->ERROR
+ * transition.
+ */
+ c4iw_get_ep(&qhp->ep->com);
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ ret = rdma_init(rhp, qhp);
+ spin_lock_irqsave(&qhp->lock, flag);
+ if (ret)
+ goto err;
+ break;
+ case C4IW_QP_STATE_ERROR:
+ qhp->attr.state = C4IW_QP_STATE_ERROR;
+ flush_qp(qhp, &flag);
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+ break;
+ case C4IW_QP_STATE_RTS:
+ switch (attrs->next_state) {
+ case C4IW_QP_STATE_CLOSING:
+ BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
+ qhp->attr.state = C4IW_QP_STATE_CLOSING;
+ if (!internal) {
+ abort = 0;
+ disconnect = 1;
+ ep = qhp->ep;
+ c4iw_get_ep(&ep->com);
+ }
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ ret = rdma_fini(rhp, qhp);
+ spin_lock_irqsave(&qhp->lock, flag);
+ if (ret) {
+ ep = qhp->ep;
+ c4iw_get_ep(&ep->com);
+ disconnect = abort = 1;
+ goto err;
+ }
+ break;
+ case C4IW_QP_STATE_TERMINATE:
+ qhp->attr.state = C4IW_QP_STATE_TERMINATE;
+ if (qhp->ibqp.uobject)
+ t4_set_wq_in_error(&qhp->wq);
+ ep = qhp->ep;
+ c4iw_get_ep(&ep->com);
+ terminate = 1;
+ disconnect = 1;
+ break;
+ case C4IW_QP_STATE_ERROR:
+ qhp->attr.state = C4IW_QP_STATE_ERROR;
+ if (!internal) {
+ abort = 1;
+ disconnect = 1;
+ ep = qhp->ep;
+ c4iw_get_ep(&ep->com);
+ }
+ goto err;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+ break;
+ case C4IW_QP_STATE_CLOSING:
+ if (!internal) {
+ ret = -EINVAL;
+ goto out;
+ }
+ switch (attrs->next_state) {
+ case C4IW_QP_STATE_IDLE:
+ flush_qp(qhp, &flag);
+ qhp->attr.state = C4IW_QP_STATE_IDLE;
+ qhp->attr.llp_stream_handle = NULL;
+ c4iw_put_ep(&qhp->ep->com);
+ qhp->ep = NULL;
+ wake_up(&qhp->wait);
+ break;
+ case C4IW_QP_STATE_ERROR:
+ goto err;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+ break;
+ case C4IW_QP_STATE_ERROR:
+ if (attrs->next_state != C4IW_QP_STATE_IDLE) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ qhp->attr.state = C4IW_QP_STATE_IDLE;
+ break;
+ case C4IW_QP_STATE_TERMINATE:
+ if (!internal) {
+ ret = -EINVAL;
+ goto out;
+ }
+ goto err;
+ break;
+ default:
+ printk(KERN_ERR "%s in a bad state %d\n",
+ __func__, qhp->attr.state);
+ ret = -EINVAL;
+ goto err;
+ break;
+ }
+ goto out;
+err:
+ PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
+ qhp->wq.sq.qid);
+
+ /* disassociate the LLP connection */
+ qhp->attr.llp_stream_handle = NULL;
+ ep = qhp->ep;
+ qhp->ep = NULL;
+ qhp->attr.state = C4IW_QP_STATE_ERROR;
+ free = 1;
+ wake_up(&qhp->wait);
+ BUG_ON(!ep);
+ flush_qp(qhp, &flag);
+out:
+ spin_unlock_irqrestore(&qhp->lock, flag);
+
+ if (terminate)
+ post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
+
+ /*
+ * If disconnect is 1, then we need to initiate a disconnect
+ * on the EP. This can be a normal close (RTS->CLOSING) or
+ * an abnormal close (RTS/CLOSING->ERROR).
+ */
+ if (disconnect) {
+ c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
+ GFP_KERNEL);
+ c4iw_put_ep(&ep->com);
+ }
+
+ /*
+ * If free is 1, then we've disassociated the EP from the QP
+ * and we need to dereference the EP.
+ */
+ if (free)
+ c4iw_put_ep(&ep->com);
+
+ PDBG("%s exit state %d\n", __func__, qhp->attr.state);
+ return ret;
+}
+
+int c4iw_destroy_qp(struct ib_qp *ib_qp)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_qp *qhp;
+ struct c4iw_qp_attributes attrs;
+ struct c4iw_ucontext *ucontext;
+
+ qhp = to_c4iw_qp(ib_qp);
+ rhp = qhp->rhp;
+
+ attrs.next_state = C4IW_QP_STATE_ERROR;
+ c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+ wait_event(qhp->wait, !qhp->ep);
+
+ remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
+ atomic_dec(&qhp->refcnt);
+ wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
+
+ ucontext = ib_qp->uobject ?
+ to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
+ destroy_qp(&rhp->rdev, &qhp->wq,
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+
+ PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
+ kfree(qhp);
+ return 0;
+}
+
+struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_qp *qhp;
+ struct c4iw_pd *php;
+ struct c4iw_cq *schp;
+ struct c4iw_cq *rchp;
+ struct c4iw_create_qp_resp uresp;
+ int sqsize, rqsize;
+ struct c4iw_ucontext *ucontext;
+ int ret;
+ struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4;
+
+ PDBG("%s ib_pd %p\n", __func__, pd);
+
+ if (attrs->qp_type != IB_QPT_RC)
+ return ERR_PTR(-EINVAL);
+
+ php = to_c4iw_pd(pd);
+ rhp = php->rhp;
+ schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
+ rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
+ if (!schp || !rchp)
+ return ERR_PTR(-EINVAL);
+
+ if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
+ return ERR_PTR(-EINVAL);
+
+ rqsize = roundup(attrs->cap.max_recv_wr + 1, 16);
+ if (rqsize > T4_MAX_RQ_SIZE)
+ return ERR_PTR(-E2BIG);
+
+ sqsize = roundup(attrs->cap.max_send_wr + 1, 16);
+ if (sqsize > T4_MAX_SQ_SIZE)
+ return ERR_PTR(-E2BIG);
+
+ ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
+
+
+ qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
+ if (!qhp)
+ return ERR_PTR(-ENOMEM);
+ qhp->wq.sq.size = sqsize;
+ qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue;
+ qhp->wq.rq.size = rqsize;
+ qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue;
+
+ if (ucontext) {
+ qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
+ qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
+ }
+
+ PDBG("%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu\n",
+ __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize);
+
+ ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+ if (ret)
+ goto err1;
+
+ attrs->cap.max_recv_wr = rqsize - 1;
+ attrs->cap.max_send_wr = sqsize - 1;
+ attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
+
+ qhp->rhp = rhp;
+ qhp->attr.pd = php->pdid;
+ qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
+ qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
+ qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
+ qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
+ qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
+ qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
+ qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
+ qhp->attr.state = C4IW_QP_STATE_IDLE;
+ qhp->attr.next_state = C4IW_QP_STATE_IDLE;
+ qhp->attr.enable_rdma_read = 1;
+ qhp->attr.enable_rdma_write = 1;
+ qhp->attr.enable_bind = 1;
+ qhp->attr.max_ord = 1;
+ qhp->attr.max_ird = 1;
+ spin_lock_init(&qhp->lock);
+ init_waitqueue_head(&qhp->wait);
+ atomic_set(&qhp->refcnt, 1);
+
+ ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
+ if (ret)
+ goto err2;
+
+ if (udata) {
+ mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
+ if (!mm1) {
+ ret = -ENOMEM;
+ goto err3;
+ }
+ mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
+ if (!mm2) {
+ ret = -ENOMEM;
+ goto err4;
+ }
+ mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
+ if (!mm3) {
+ ret = -ENOMEM;
+ goto err5;
+ }
+ mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
+ if (!mm4) {
+ ret = -ENOMEM;
+ goto err6;
+ }
+
+ uresp.qid_mask = rhp->rdev.qpmask;
+ uresp.sqid = qhp->wq.sq.qid;
+ uresp.sq_size = qhp->wq.sq.size;
+ uresp.sq_memsize = qhp->wq.sq.memsize;
+ uresp.rqid = qhp->wq.rq.qid;
+ uresp.rq_size = qhp->wq.rq.size;
+ uresp.rq_memsize = qhp->wq.rq.memsize;
+ spin_lock(&ucontext->mmap_lock);
+ uresp.sq_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ uresp.rq_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ uresp.sq_db_gts_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ uresp.rq_db_gts_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ spin_unlock(&ucontext->mmap_lock);
+ ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
+ if (ret)
+ goto err7;
+ mm1->key = uresp.sq_key;
+ mm1->addr = virt_to_phys(qhp->wq.sq.queue);
+ mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
+ insert_mmap(ucontext, mm1);
+ mm2->key = uresp.rq_key;
+ mm2->addr = virt_to_phys(qhp->wq.rq.queue);
+ mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
+ insert_mmap(ucontext, mm2);
+ mm3->key = uresp.sq_db_gts_key;
+ mm3->addr = qhp->wq.sq.udb;
+ mm3->len = PAGE_SIZE;
+ insert_mmap(ucontext, mm3);
+ mm4->key = uresp.rq_db_gts_key;
+ mm4->addr = qhp->wq.rq.udb;
+ mm4->len = PAGE_SIZE;
+ insert_mmap(ucontext, mm4);
+ }
+ qhp->ibqp.qp_num = qhp->wq.sq.qid;
+ init_timer(&(qhp->timer));
+ PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
+ __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
+ qhp->wq.sq.qid);
+ return &qhp->ibqp;
+err7:
+ kfree(mm4);
+err6:
+ kfree(mm3);
+err5:
+ kfree(mm2);
+err4:
+ kfree(mm1);
+err3:
+ remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
+err2:
+ destroy_qp(&rhp->rdev, &qhp->wq,
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+err1:
+ kfree(qhp);
+ return ERR_PTR(ret);
+}
+
+int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_qp *qhp;
+ enum c4iw_qp_attr_mask mask = 0;
+ struct c4iw_qp_attributes attrs;
+
+ PDBG("%s ib_qp %p\n", __func__, ibqp);
+
+ /* iwarp does not support the RTR state */
+ if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
+ attr_mask &= ~IB_QP_STATE;
+
+ /* Make sure we still have something left to do */
+ if (!attr_mask)
+ return 0;
+
+ memset(&attrs, 0, sizeof attrs);
+ qhp = to_c4iw_qp(ibqp);
+ rhp = qhp->rhp;
+
+ attrs.next_state = c4iw_convert_state(attr->qp_state);
+ attrs.enable_rdma_read = (attr->qp_access_flags &
+ IB_ACCESS_REMOTE_READ) ? 1 : 0;
+ attrs.enable_rdma_write = (attr->qp_access_flags &
+ IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
+ attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
+
+
+ mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
+ mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
+ (C4IW_QP_ATTR_ENABLE_RDMA_READ |
+ C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
+ C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
+
+ return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
+}
+
+struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
+{
+ PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
+ return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
+}
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
new file mode 100644
index 00000000000..fb195d1d901
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/resource.c
@@ -0,0 +1,417 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/* Crude resource management */
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/kfifo.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/genalloc.h>
+#include "iw_cxgb4.h"
+
+#define RANDOM_SIZE 16
+
+static int __c4iw_init_resource_fifo(struct kfifo *fifo,
+ spinlock_t *fifo_lock,
+ u32 nr, u32 skip_low,
+ u32 skip_high,
+ int random)
+{
+ u32 i, j, entry = 0, idx;
+ u32 random_bytes;
+ u32 rarray[16];
+ spin_lock_init(fifo_lock);
+
+ if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL))
+ return -ENOMEM;
+
+ for (i = 0; i < skip_low + skip_high; i++)
+ kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
+ if (random) {
+ j = 0;
+ random_bytes = random32();
+ for (i = 0; i < RANDOM_SIZE; i++)
+ rarray[i] = i + skip_low;
+ for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) {
+ if (j >= RANDOM_SIZE) {
+ j = 0;
+ random_bytes = random32();
+ }
+ idx = (random_bytes >> (j * 2)) & 0xF;
+ kfifo_in(fifo,
+ (unsigned char *) &rarray[idx],
+ sizeof(u32));
+ rarray[idx] = i;
+ j++;
+ }
+ for (i = 0; i < RANDOM_SIZE; i++)
+ kfifo_in(fifo,
+ (unsigned char *) &rarray[i],
+ sizeof(u32));
+ } else
+ for (i = skip_low; i < nr - skip_high; i++)
+ kfifo_in(fifo, (unsigned char *) &i, sizeof(u32));
+
+ for (i = 0; i < skip_low + skip_high; i++)
+ if (kfifo_out_locked(fifo, (unsigned char *) &entry,
+ sizeof(u32), fifo_lock))
+ break;
+ return 0;
+}
+
+static int c4iw_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock,
+ u32 nr, u32 skip_low, u32 skip_high)
+{
+ return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
+ skip_high, 0);
+}
+
+static int c4iw_init_resource_fifo_random(struct kfifo *fifo,
+ spinlock_t *fifo_lock,
+ u32 nr, u32 skip_low, u32 skip_high)
+{
+ return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
+ skip_high, 1);
+}
+
+static int c4iw_init_qid_fifo(struct c4iw_rdev *rdev)
+{
+ u32 i;
+
+ spin_lock_init(&rdev->resource.qid_fifo_lock);
+
+ if (kfifo_alloc(&rdev->resource.qid_fifo, T4_MAX_QIDS * sizeof(u32),
+ GFP_KERNEL))
+ return -ENOMEM;
+
+ for (i = T4_QID_BASE; i < T4_QID_BASE + T4_MAX_QIDS; i++)
+ if (!(i & rdev->qpmask))
+ kfifo_in(&rdev->resource.qid_fifo,
+ (unsigned char *) &i, sizeof(u32));
+ return 0;
+}
+
+/* nr_* must be power of 2 */
+int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
+{
+ int err = 0;
+ err = c4iw_init_resource_fifo_random(&rdev->resource.tpt_fifo,
+ &rdev->resource.tpt_fifo_lock,
+ nr_tpt, 1, 0);
+ if (err)
+ goto tpt_err;
+ err = c4iw_init_qid_fifo(rdev);
+ if (err)
+ goto qid_err;
+ err = c4iw_init_resource_fifo(&rdev->resource.pdid_fifo,
+ &rdev->resource.pdid_fifo_lock,
+ nr_pdid, 1, 0);
+ if (err)
+ goto pdid_err;
+ return 0;
+pdid_err:
+ kfifo_free(&rdev->resource.qid_fifo);
+qid_err:
+ kfifo_free(&rdev->resource.tpt_fifo);
+tpt_err:
+ return -ENOMEM;
+}
+
+/*
+ * returns 0 if no resource available
+ */
+u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock)
+{
+ u32 entry;
+ if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock))
+ return entry;
+ else
+ return 0;
+}
+
+void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock)
+{
+ PDBG("%s entry 0x%x\n", __func__, entry);
+ kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock);
+}
+
+u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
+{
+ struct c4iw_qid_list *entry;
+ u32 qid;
+ int i;
+
+ mutex_lock(&uctx->lock);
+ if (!list_empty(&uctx->cqids)) {
+ entry = list_entry(uctx->cqids.next, struct c4iw_qid_list,
+ entry);
+ list_del(&entry->entry);
+ qid = entry->qid;
+ kfree(entry);
+ } else {
+ qid = c4iw_get_resource(&rdev->resource.qid_fifo,
+ &rdev->resource.qid_fifo_lock);
+ if (!qid)
+ goto out;
+ for (i = qid+1; i & rdev->qpmask; i++) {
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry)
+ goto out;
+ entry->qid = i;
+ list_add_tail(&entry->entry, &uctx->cqids);
+ }
+
+ /*
+ * now put the same ids on the qp list since they all
+ * map to the same db/gts page.
+ */
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry)
+ goto out;
+ entry->qid = qid;
+ list_add_tail(&entry->entry, &uctx->qpids);
+ for (i = qid+1; i & rdev->qpmask; i++) {
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry)
+ goto out;
+ entry->qid = i;
+ list_add_tail(&entry->entry, &uctx->qpids);
+ }
+ }
+out:
+ mutex_unlock(&uctx->lock);
+ PDBG("%s qid 0x%x\n", __func__, qid);
+ return qid;
+}
+
+void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
+ struct c4iw_dev_ucontext *uctx)
+{
+ struct c4iw_qid_list *entry;
+
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry)
+ return;
+ PDBG("%s qid 0x%x\n", __func__, qid);
+ entry->qid = qid;
+ mutex_lock(&uctx->lock);
+ list_add_tail(&entry->entry, &uctx->cqids);
+ mutex_unlock(&uctx->lock);
+}
+
+u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
+{
+ struct c4iw_qid_list *entry;
+ u32 qid;
+ int i;
+
+ mutex_lock(&uctx->lock);
+ if (!list_empty(&uctx->qpids)) {
+ entry = list_entry(uctx->qpids.next, struct c4iw_qid_list,
+ entry);
+ list_del(&entry->entry);
+ qid = entry->qid;
+ kfree(entry);
+ } else {
+ qid = c4iw_get_resource(&rdev->resource.qid_fifo,
+ &rdev->resource.qid_fifo_lock);
+ if (!qid)
+ goto out;
+ for (i = qid+1; i & rdev->qpmask; i++) {
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry)
+ goto out;
+ entry->qid = i;
+ list_add_tail(&entry->entry, &uctx->qpids);
+ }
+
+ /*
+ * now put the same ids on the cq list since they all
+ * map to the same db/gts page.
+ */
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry)
+ goto out;
+ entry->qid = qid;
+ list_add_tail(&entry->entry, &uctx->cqids);
+ for (i = qid; i & rdev->qpmask; i++) {
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry)
+ goto out;
+ entry->qid = i;
+ list_add_tail(&entry->entry, &uctx->cqids);
+ }
+ }
+out:
+ mutex_unlock(&uctx->lock);
+ PDBG("%s qid 0x%x\n", __func__, qid);
+ return qid;
+}
+
+void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
+ struct c4iw_dev_ucontext *uctx)
+{
+ struct c4iw_qid_list *entry;
+
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry)
+ return;
+ PDBG("%s qid 0x%x\n", __func__, qid);
+ entry->qid = qid;
+ mutex_lock(&uctx->lock);
+ list_add_tail(&entry->entry, &uctx->qpids);
+ mutex_unlock(&uctx->lock);
+}
+
+void c4iw_destroy_resource(struct c4iw_resource *rscp)
+{
+ kfifo_free(&rscp->tpt_fifo);
+ kfifo_free(&rscp->qid_fifo);
+ kfifo_free(&rscp->pdid_fifo);
+}
+
+/*
+ * PBL Memory Manager. Uses Linux generic allocator.
+ */
+
+#define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
+
+u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
+{
+ unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
+ PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
+ return (u32)addr;
+}
+
+void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
+{
+ PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
+ gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
+}
+
+int c4iw_pblpool_create(struct c4iw_rdev *rdev)
+{
+ unsigned pbl_start, pbl_chunk, pbl_top;
+
+ rdev->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
+ if (!rdev->pbl_pool)
+ return -ENOMEM;
+
+ pbl_start = rdev->lldi.vr->pbl.start;
+ pbl_chunk = rdev->lldi.vr->pbl.size;
+ pbl_top = pbl_start + pbl_chunk;
+
+ while (pbl_start < pbl_top) {
+ pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk);
+ if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) {
+ PDBG("%s failed to add PBL chunk (%x/%x)\n",
+ __func__, pbl_start, pbl_chunk);
+ if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
+ printk(KERN_WARNING MOD
+ "Failed to add all PBL chunks (%x/%x)\n",
+ pbl_start,
+ pbl_top - pbl_start);
+ return 0;
+ }
+ pbl_chunk >>= 1;
+ } else {
+ PDBG("%s added PBL chunk (%x/%x)\n",
+ __func__, pbl_start, pbl_chunk);
+ pbl_start += pbl_chunk;
+ }
+ }
+
+ return 0;
+}
+
+void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
+{
+ gen_pool_destroy(rdev->pbl_pool);
+}
+
+/*
+ * RQT Memory Manager. Uses Linux generic allocator.
+ */
+
+#define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */
+
+u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
+{
+ unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
+ PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
+ return (u32)addr;
+}
+
+void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
+{
+ PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6);
+ gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
+}
+
+int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
+{
+ unsigned rqt_start, rqt_chunk, rqt_top;
+
+ rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
+ if (!rdev->rqt_pool)
+ return -ENOMEM;
+
+ rqt_start = rdev->lldi.vr->rq.start;
+ rqt_chunk = rdev->lldi.vr->rq.size;
+ rqt_top = rqt_start + rqt_chunk;
+
+ while (rqt_start < rqt_top) {
+ rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk);
+ if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) {
+ PDBG("%s failed to add RQT chunk (%x/%x)\n",
+ __func__, rqt_start, rqt_chunk);
+ if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) {
+ printk(KERN_WARNING MOD
+ "Failed to add all RQT chunks (%x/%x)\n",
+ rqt_start, rqt_top - rqt_start);
+ return 0;
+ }
+ rqt_chunk >>= 1;
+ } else {
+ PDBG("%s added RQT chunk (%x/%x)\n",
+ __func__, rqt_start, rqt_chunk);
+ rqt_start += rqt_chunk;
+ }
+ }
+ return 0;
+}
+
+void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
+{
+ gen_pool_destroy(rdev->rqt_pool);
+}
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
new file mode 100644
index 00000000000..1057cb96302
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -0,0 +1,548 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __T4_H__
+#define __T4_H__
+
+#include "t4_hw.h"
+#include "t4_regs.h"
+#include "t4_msg.h"
+#include "t4fw_ri_api.h"
+
+#define T4_QID_BASE 1024
+#define T4_MAX_QIDS 256
+#define T4_MAX_NUM_QP (1<<16)
+#define T4_MAX_NUM_CQ (1<<15)
+#define T4_MAX_NUM_PD (1<<15)
+#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
+#define T4_MAX_EQ_SIZE (65520 - T4_EQ_STATUS_ENTRIES)
+#define T4_MAX_IQ_SIZE (65520 - 1)
+#define T4_MAX_RQ_SIZE (8192 - T4_EQ_STATUS_ENTRIES)
+#define T4_MAX_SQ_SIZE (T4_MAX_EQ_SIZE - 1)
+#define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE - 1)
+#define T4_MAX_CQ_DEPTH (T4_MAX_IQ_SIZE - 1)
+#define T4_MAX_NUM_STAG (1<<15)
+#define T4_MAX_MR_SIZE (~0ULL - 1)
+#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
+#define T4_STAG_UNSET 0xffffffff
+#define T4_FW_MAJ 0
+#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
+
+struct t4_status_page {
+ __be32 rsvd1; /* flit 0 - hw owns */
+ __be16 rsvd2;
+ __be16 qid;
+ __be16 cidx;
+ __be16 pidx;
+ u8 qp_err; /* flit 1 - sw owns */
+ u8 db_off;
+};
+
+#define T4_EQ_SIZE 64
+
+#define T4_SQ_NUM_SLOTS 4
+#define T4_SQ_NUM_BYTES (T4_EQ_SIZE * T4_SQ_NUM_SLOTS)
+#define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
+ sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
+#define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
+ sizeof(struct fw_ri_immd)))
+#define T4_MAX_WRITE_INLINE ((T4_SQ_NUM_BYTES - \
+ sizeof(struct fw_ri_rdma_write_wr) - \
+ sizeof(struct fw_ri_immd)))
+#define T4_MAX_WRITE_SGE ((T4_SQ_NUM_BYTES - \
+ sizeof(struct fw_ri_rdma_write_wr) - \
+ sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
+#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
+ sizeof(struct fw_ri_immd)))
+#define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64))
+
+#define T4_RQ_NUM_SLOTS 2
+#define T4_RQ_NUM_BYTES (T4_EQ_SIZE * T4_RQ_NUM_SLOTS)
+#define T4_MAX_RECV_SGE 4
+
+union t4_wr {
+ struct fw_ri_res_wr res;
+ struct fw_ri_wr ri;
+ struct fw_ri_rdma_write_wr write;
+ struct fw_ri_send_wr send;
+ struct fw_ri_rdma_read_wr read;
+ struct fw_ri_bind_mw_wr bind;
+ struct fw_ri_fr_nsmr_wr fr;
+ struct fw_ri_inv_lstag_wr inv;
+ struct t4_status_page status;
+ __be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
+};
+
+union t4_recv_wr {
+ struct fw_ri_recv_wr recv;
+ struct t4_status_page status;
+ __be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS];
+};
+
+static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid,
+ enum fw_wr_opcodes opcode, u8 flags, u8 len16)
+{
+ int slots_used;
+
+ wqe->send.opcode = (u8)opcode;
+ wqe->send.flags = flags;
+ wqe->send.wrid = wrid;
+ wqe->send.r1[0] = 0;
+ wqe->send.r1[1] = 0;
+ wqe->send.r1[2] = 0;
+ wqe->send.len16 = len16;
+
+ slots_used = DIV_ROUND_UP(len16*16, T4_EQ_SIZE);
+ while (slots_used < T4_SQ_NUM_SLOTS) {
+ wqe->flits[slots_used * T4_EQ_SIZE / sizeof(__be64)] = 0;
+ slots_used++;
+ }
+}
+
+/* CQE/AE status codes */
+#define T4_ERR_SUCCESS 0x0
+#define T4_ERR_STAG 0x1 /* STAG invalid: either the */
+ /* STAG is offlimt, being 0, */
+ /* or STAG_key mismatch */
+#define T4_ERR_PDID 0x2 /* PDID mismatch */
+#define T4_ERR_QPID 0x3 /* QPID mismatch */
+#define T4_ERR_ACCESS 0x4 /* Invalid access right */
+#define T4_ERR_WRAP 0x5 /* Wrap error */
+#define T4_ERR_BOUND 0x6 /* base and bounds voilation */
+#define T4_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
+ /* shared memory region */
+#define T4_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
+ /* shared memory region */
+#define T4_ERR_ECC 0x9 /* ECC error detected */
+#define T4_ERR_ECC_PSTAG 0xA /* ECC error detected when */
+ /* reading PSTAG for a MW */
+ /* Invalidate */
+#define T4_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
+ /* software error */
+#define T4_ERR_SWFLUSH 0xC /* SW FLUSHED */
+#define T4_ERR_CRC 0x10 /* CRC error */
+#define T4_ERR_MARKER 0x11 /* Marker error */
+#define T4_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
+#define T4_ERR_OUT_OF_RQE 0x13 /* out of RQE */
+#define T4_ERR_DDP_VERSION 0x14 /* wrong DDP version */
+#define T4_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
+#define T4_ERR_OPCODE 0x16 /* invalid rdma opcode */
+#define T4_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
+#define T4_ERR_MSN 0x18 /* MSN error */
+#define T4_ERR_TBIT 0x19 /* tag bit not set correctly */
+#define T4_ERR_MO 0x1A /* MO not 0 for TERMINATE */
+ /* or READ_REQ */
+#define T4_ERR_MSN_GAP 0x1B
+#define T4_ERR_MSN_RANGE 0x1C
+#define T4_ERR_IRD_OVERFLOW 0x1D
+#define T4_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
+ /* software error */
+#define T4_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
+ /* mismatch) */
+/*
+ * CQE defs
+ */
+struct t4_cqe {
+ __be32 header;
+ __be32 len;
+ union {
+ struct {
+ __be32 stag;
+ __be32 msn;
+ } rcqe;
+ struct {
+ u32 nada1;
+ u16 nada2;
+ u16 cidx;
+ } scqe;
+ struct {
+ __be32 wrid_hi;
+ __be32 wrid_low;
+ } gen;
+ } u;
+ __be64 reserved;
+ __be64 bits_type_ts;
+};
+
+/* macros for flit 0 of the cqe */
+
+#define S_CQE_QPID 12
+#define M_CQE_QPID 0xFFFFF
+#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
+#define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
+
+#define S_CQE_SWCQE 11
+#define M_CQE_SWCQE 0x1
+#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
+#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
+
+#define S_CQE_STATUS 5
+#define M_CQE_STATUS 0x1F
+#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
+#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
+
+#define S_CQE_TYPE 4
+#define M_CQE_TYPE 0x1
+#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
+#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
+
+#define S_CQE_OPCODE 0
+#define M_CQE_OPCODE 0xF
+#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
+#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
+
+#define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x)->header)))
+#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x)->header)))
+#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x)->header)))
+#define SQ_TYPE(x) (CQE_TYPE((x)))
+#define RQ_TYPE(x) (!CQE_TYPE((x)))
+#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x)->header)))
+#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x)->header)))
+
+#define CQE_SEND_OPCODE(x)( \
+ (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
+ (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
+ (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
+ (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
+
+#define CQE_LEN(x) (be32_to_cpu((x)->len))
+
+/* used for RQ completion processing */
+#define CQE_WRID_STAG(x) (be32_to_cpu((x)->u.rcqe.stag))
+#define CQE_WRID_MSN(x) (be32_to_cpu((x)->u.rcqe.msn))
+
+/* used for SQ completion processing */
+#define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx)
+
+/* generic accessor macros */
+#define CQE_WRID_HI(x) ((x)->u.gen.wrid_hi)
+#define CQE_WRID_LOW(x) ((x)->u.gen.wrid_low)
+
+/* macros for flit 3 of the cqe */
+#define S_CQE_GENBIT 63
+#define M_CQE_GENBIT 0x1
+#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
+#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
+
+#define S_CQE_OVFBIT 62
+#define M_CQE_OVFBIT 0x1
+#define G_CQE_OVFBIT(x) ((((x) >> S_CQE_OVFBIT)) & M_CQE_OVFBIT)
+
+#define S_CQE_IQTYPE 60
+#define M_CQE_IQTYPE 0x3
+#define G_CQE_IQTYPE(x) ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE)
+
+#define M_CQE_TS 0x0fffffffffffffffULL
+#define G_CQE_TS(x) ((x) & M_CQE_TS)
+
+#define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts)))
+#define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts)))
+#define CQE_TS(x) (G_CQE_TS(be64_to_cpu((x)->bits_type_ts)))
+
+struct t4_swsqe {
+ u64 wr_id;
+ struct t4_cqe cqe;
+ int read_len;
+ int opcode;
+ int complete;
+ int signaled;
+ u16 idx;
+};
+
+struct t4_sq {
+ union t4_wr *queue;
+ dma_addr_t dma_addr;
+ DECLARE_PCI_UNMAP_ADDR(mapping);
+ struct t4_swsqe *sw_sq;
+ struct t4_swsqe *oldest_read;
+ u64 udb;
+ size_t memsize;
+ u32 qid;
+ u16 in_use;
+ u16 size;
+ u16 cidx;
+ u16 pidx;
+};
+
+struct t4_swrqe {
+ u64 wr_id;
+};
+
+struct t4_rq {
+ union t4_recv_wr *queue;
+ dma_addr_t dma_addr;
+ DECLARE_PCI_UNMAP_ADDR(mapping);
+ struct t4_swrqe *sw_rq;
+ u64 udb;
+ size_t memsize;
+ u32 qid;
+ u32 msn;
+ u32 rqt_hwaddr;
+ u16 rqt_size;
+ u16 in_use;
+ u16 size;
+ u16 cidx;
+ u16 pidx;
+};
+
+struct t4_wq {
+ struct t4_sq sq;
+ struct t4_rq rq;
+ void __iomem *db;
+ void __iomem *gts;
+ struct c4iw_rdev *rdev;
+};
+
+static inline int t4_rqes_posted(struct t4_wq *wq)
+{
+ return wq->rq.in_use;
+}
+
+static inline int t4_rq_empty(struct t4_wq *wq)
+{
+ return wq->rq.in_use == 0;
+}
+
+static inline int t4_rq_full(struct t4_wq *wq)
+{
+ return wq->rq.in_use == (wq->rq.size - 1);
+}
+
+static inline u32 t4_rq_avail(struct t4_wq *wq)
+{
+ return wq->rq.size - 1 - wq->rq.in_use;
+}
+
+static inline void t4_rq_produce(struct t4_wq *wq)
+{
+ wq->rq.in_use++;
+ if (++wq->rq.pidx == wq->rq.size)
+ wq->rq.pidx = 0;
+}
+
+static inline void t4_rq_consume(struct t4_wq *wq)
+{
+ wq->rq.in_use--;
+ wq->rq.msn++;
+ if (++wq->rq.cidx == wq->rq.size)
+ wq->rq.cidx = 0;
+}
+
+static inline int t4_sq_empty(struct t4_wq *wq)
+{
+ return wq->sq.in_use == 0;
+}
+
+static inline int t4_sq_full(struct t4_wq *wq)
+{
+ return wq->sq.in_use == (wq->sq.size - 1);
+}
+
+static inline u32 t4_sq_avail(struct t4_wq *wq)
+{
+ return wq->sq.size - 1 - wq->sq.in_use;
+}
+
+static inline void t4_sq_produce(struct t4_wq *wq)
+{
+ wq->sq.in_use++;
+ if (++wq->sq.pidx == wq->sq.size)
+ wq->sq.pidx = 0;
+}
+
+static inline void t4_sq_consume(struct t4_wq *wq)
+{
+ wq->sq.in_use--;
+ if (++wq->sq.cidx == wq->sq.size)
+ wq->sq.cidx = 0;
+}
+
+static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc)
+{
+ inc *= T4_SQ_NUM_SLOTS;
+ wmb();
+ writel(QID(wq->sq.qid) | PIDX(inc), wq->db);
+}
+
+static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc)
+{
+ inc *= T4_RQ_NUM_SLOTS;
+ wmb();
+ writel(QID(wq->rq.qid) | PIDX(inc), wq->db);
+}
+
+static inline int t4_wq_in_error(struct t4_wq *wq)
+{
+ return wq->sq.queue[wq->sq.size].status.qp_err;
+}
+
+static inline void t4_set_wq_in_error(struct t4_wq *wq)
+{
+ wq->sq.queue[wq->sq.size].status.qp_err = 1;
+ wq->rq.queue[wq->rq.size].status.qp_err = 1;
+}
+
+static inline void t4_disable_wq_db(struct t4_wq *wq)
+{
+ wq->sq.queue[wq->sq.size].status.db_off = 1;
+ wq->rq.queue[wq->rq.size].status.db_off = 1;
+}
+
+static inline void t4_enable_wq_db(struct t4_wq *wq)
+{
+ wq->sq.queue[wq->sq.size].status.db_off = 0;
+ wq->rq.queue[wq->rq.size].status.db_off = 0;
+}
+
+static inline int t4_wq_db_enabled(struct t4_wq *wq)
+{
+ return !wq->sq.queue[wq->sq.size].status.db_off;
+}
+
+struct t4_cq {
+ struct t4_cqe *queue;
+ dma_addr_t dma_addr;
+ DECLARE_PCI_UNMAP_ADDR(mapping);
+ struct t4_cqe *sw_queue;
+ void __iomem *gts;
+ struct c4iw_rdev *rdev;
+ u64 ugts;
+ size_t memsize;
+ __be64 bits_type_ts;
+ u32 cqid;
+ u16 size; /* including status page */
+ u16 cidx;
+ u16 sw_pidx;
+ u16 sw_cidx;
+ u16 sw_in_use;
+ u16 cidx_inc;
+ u8 gen;
+ u8 error;
+};
+
+static inline int t4_arm_cq(struct t4_cq *cq, int se)
+{
+ u32 val;
+
+ while (cq->cidx_inc > CIDXINC_MASK) {
+ val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) |
+ INGRESSQID(cq->cqid);
+ writel(val, cq->gts);
+ cq->cidx_inc -= CIDXINC_MASK;
+ }
+ val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) |
+ INGRESSQID(cq->cqid);
+ writel(val, cq->gts);
+ cq->cidx_inc = 0;
+ return 0;
+}
+
+static inline void t4_swcq_produce(struct t4_cq *cq)
+{
+ cq->sw_in_use++;
+ if (++cq->sw_pidx == cq->size)
+ cq->sw_pidx = 0;
+}
+
+static inline void t4_swcq_consume(struct t4_cq *cq)
+{
+ cq->sw_in_use--;
+ if (++cq->sw_cidx == cq->size)
+ cq->sw_cidx = 0;
+}
+
+static inline void t4_hwcq_consume(struct t4_cq *cq)
+{
+ cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
+ if (++cq->cidx_inc == cq->size)
+ cq->cidx_inc = 0;
+ if (++cq->cidx == cq->size) {
+ cq->cidx = 0;
+ cq->gen ^= 1;
+ }
+}
+
+static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
+{
+ return (CQE_GENBIT(cqe) == cq->gen);
+}
+
+static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
+{
+ int ret;
+ u16 prev_cidx;
+
+ if (cq->cidx == 0)
+ prev_cidx = cq->size - 1;
+ else
+ prev_cidx = cq->cidx - 1;
+
+ if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) {
+ ret = -EOVERFLOW;
+ cq->error = 1;
+ printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid);
+ } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
+ *cqe = &cq->queue[cq->cidx];
+ ret = 0;
+ } else
+ ret = -ENODATA;
+ return ret;
+}
+
+static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
+{
+ if (cq->sw_in_use)
+ return &cq->sw_queue[cq->sw_cidx];
+ return NULL;
+}
+
+static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
+{
+ int ret = 0;
+
+ if (cq->error)
+ ret = -ENODATA;
+ else if (cq->sw_in_use)
+ *cqe = &cq->sw_queue[cq->sw_cidx];
+ else
+ ret = t4_next_hw_cqe(cq, cqe);
+ return ret;
+}
+
+static inline int t4_cq_in_error(struct t4_cq *cq)
+{
+ return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err;
+}
+
+static inline void t4_set_cq_in_error(struct t4_cq *cq)
+{
+ ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
+}
+#endif
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
new file mode 100644
index 00000000000..fc706bd07fa
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -0,0 +1,829 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _T4FW_RI_API_H_
+#define _T4FW_RI_API_H_
+
+#include "t4fw_api.h"
+
+enum fw_ri_wr_opcode {
+ FW_RI_RDMA_WRITE = 0x0, /* IETF RDMAP v1.0 ... */
+ FW_RI_READ_REQ = 0x1,
+ FW_RI_READ_RESP = 0x2,
+ FW_RI_SEND = 0x3,
+ FW_RI_SEND_WITH_INV = 0x4,
+ FW_RI_SEND_WITH_SE = 0x5,
+ FW_RI_SEND_WITH_SE_INV = 0x6,
+ FW_RI_TERMINATE = 0x7,
+ FW_RI_RDMA_INIT = 0x8, /* CHELSIO RI specific ... */
+ FW_RI_BIND_MW = 0x9,
+ FW_RI_FAST_REGISTER = 0xa,
+ FW_RI_LOCAL_INV = 0xb,
+ FW_RI_QP_MODIFY = 0xc,
+ FW_RI_BYPASS = 0xd,
+ FW_RI_RECEIVE = 0xe,
+
+ FW_RI_SGE_EC_CR_RETURN = 0xf
+};
+
+enum fw_ri_wr_flags {
+ FW_RI_COMPLETION_FLAG = 0x01,
+ FW_RI_NOTIFICATION_FLAG = 0x02,
+ FW_RI_SOLICITED_EVENT_FLAG = 0x04,
+ FW_RI_READ_FENCE_FLAG = 0x08,
+ FW_RI_LOCAL_FENCE_FLAG = 0x10,
+ FW_RI_RDMA_READ_INVALIDATE = 0x20
+};
+
+enum fw_ri_mpa_attrs {
+ FW_RI_MPA_RX_MARKER_ENABLE = 0x01,
+ FW_RI_MPA_TX_MARKER_ENABLE = 0x02,
+ FW_RI_MPA_CRC_ENABLE = 0x04,
+ FW_RI_MPA_IETF_ENABLE = 0x08
+};
+
+enum fw_ri_qp_caps {
+ FW_RI_QP_RDMA_READ_ENABLE = 0x01,
+ FW_RI_QP_RDMA_WRITE_ENABLE = 0x02,
+ FW_RI_QP_BIND_ENABLE = 0x04,
+ FW_RI_QP_FAST_REGISTER_ENABLE = 0x08,
+ FW_RI_QP_STAG0_ENABLE = 0x10
+};
+
+enum fw_ri_addr_type {
+ FW_RI_ZERO_BASED_TO = 0x00,
+ FW_RI_VA_BASED_TO = 0x01
+};
+
+enum fw_ri_mem_perms {
+ FW_RI_MEM_ACCESS_REM_WRITE = 0x01,
+ FW_RI_MEM_ACCESS_REM_READ = 0x02,
+ FW_RI_MEM_ACCESS_REM = 0x03,
+ FW_RI_MEM_ACCESS_LOCAL_WRITE = 0x04,
+ FW_RI_MEM_ACCESS_LOCAL_READ = 0x08,
+ FW_RI_MEM_ACCESS_LOCAL = 0x0C
+};
+
+enum fw_ri_stag_type {
+ FW_RI_STAG_NSMR = 0x00,
+ FW_RI_STAG_SMR = 0x01,
+ FW_RI_STAG_MW = 0x02,
+ FW_RI_STAG_MW_RELAXED = 0x03
+};
+
+enum fw_ri_data_op {
+ FW_RI_DATA_IMMD = 0x81,
+ FW_RI_DATA_DSGL = 0x82,
+ FW_RI_DATA_ISGL = 0x83
+};
+
+enum fw_ri_sgl_depth {
+ FW_RI_SGL_DEPTH_MAX_SQ = 16,
+ FW_RI_SGL_DEPTH_MAX_RQ = 4
+};
+
+struct fw_ri_dsge_pair {
+ __be32 len[2];
+ __be64 addr[2];
+};
+
+struct fw_ri_dsgl {
+ __u8 op;
+ __u8 r1;
+ __be16 nsge;
+ __be32 len0;
+ __be64 addr0;
+#ifndef C99_NOT_SUPPORTED
+ struct fw_ri_dsge_pair sge[0];
+#endif
+};
+
+struct fw_ri_sge {
+ __be32 stag;
+ __be32 len;
+ __be64 to;
+};
+
+struct fw_ri_isgl {
+ __u8 op;
+ __u8 r1;
+ __be16 nsge;
+ __be32 r2;
+#ifndef C99_NOT_SUPPORTED
+ struct fw_ri_sge sge[0];
+#endif
+};
+
+struct fw_ri_immd {
+ __u8 op;
+ __u8 r1;
+ __be16 r2;
+ __be32 immdlen;
+#ifndef C99_NOT_SUPPORTED
+ __u8 data[0];
+#endif
+};
+
+struct fw_ri_tpte {
+ __be32 valid_to_pdid;
+ __be32 locread_to_qpid;
+ __be32 nosnoop_pbladdr;
+ __be32 len_lo;
+ __be32 va_hi;
+ __be32 va_lo_fbo;
+ __be32 dca_mwbcnt_pstag;
+ __be32 len_hi;
+};
+
+#define S_FW_RI_TPTE_VALID 31
+#define M_FW_RI_TPTE_VALID 0x1
+#define V_FW_RI_TPTE_VALID(x) ((x) << S_FW_RI_TPTE_VALID)
+#define G_FW_RI_TPTE_VALID(x) \
+ (((x) >> S_FW_RI_TPTE_VALID) & M_FW_RI_TPTE_VALID)
+#define F_FW_RI_TPTE_VALID V_FW_RI_TPTE_VALID(1U)
+
+#define S_FW_RI_TPTE_STAGKEY 23
+#define M_FW_RI_TPTE_STAGKEY 0xff
+#define V_FW_RI_TPTE_STAGKEY(x) ((x) << S_FW_RI_TPTE_STAGKEY)
+#define G_FW_RI_TPTE_STAGKEY(x) \
+ (((x) >> S_FW_RI_TPTE_STAGKEY) & M_FW_RI_TPTE_STAGKEY)
+
+#define S_FW_RI_TPTE_STAGSTATE 22
+#define M_FW_RI_TPTE_STAGSTATE 0x1
+#define V_FW_RI_TPTE_STAGSTATE(x) ((x) << S_FW_RI_TPTE_STAGSTATE)
+#define G_FW_RI_TPTE_STAGSTATE(x) \
+ (((x) >> S_FW_RI_TPTE_STAGSTATE) & M_FW_RI_TPTE_STAGSTATE)
+#define F_FW_RI_TPTE_STAGSTATE V_FW_RI_TPTE_STAGSTATE(1U)
+
+#define S_FW_RI_TPTE_STAGTYPE 20
+#define M_FW_RI_TPTE_STAGTYPE 0x3
+#define V_FW_RI_TPTE_STAGTYPE(x) ((x) << S_FW_RI_TPTE_STAGTYPE)
+#define G_FW_RI_TPTE_STAGTYPE(x) \
+ (((x) >> S_FW_RI_TPTE_STAGTYPE) & M_FW_RI_TPTE_STAGTYPE)
+
+#define S_FW_RI_TPTE_PDID 0
+#define M_FW_RI_TPTE_PDID 0xfffff
+#define V_FW_RI_TPTE_PDID(x) ((x) << S_FW_RI_TPTE_PDID)
+#define G_FW_RI_TPTE_PDID(x) \
+ (((x) >> S_FW_RI_TPTE_PDID) & M_FW_RI_TPTE_PDID)
+
+#define S_FW_RI_TPTE_PERM 28
+#define M_FW_RI_TPTE_PERM 0xf
+#define V_FW_RI_TPTE_PERM(x) ((x) << S_FW_RI_TPTE_PERM)
+#define G_FW_RI_TPTE_PERM(x) \
+ (((x) >> S_FW_RI_TPTE_PERM) & M_FW_RI_TPTE_PERM)
+
+#define S_FW_RI_TPTE_REMINVDIS 27
+#define M_FW_RI_TPTE_REMINVDIS 0x1
+#define V_FW_RI_TPTE_REMINVDIS(x) ((x) << S_FW_RI_TPTE_REMINVDIS)
+#define G_FW_RI_TPTE_REMINVDIS(x) \
+ (((x) >> S_FW_RI_TPTE_REMINVDIS) & M_FW_RI_TPTE_REMINVDIS)
+#define F_FW_RI_TPTE_REMINVDIS V_FW_RI_TPTE_REMINVDIS(1U)
+
+#define S_FW_RI_TPTE_ADDRTYPE 26
+#define M_FW_RI_TPTE_ADDRTYPE 1
+#define V_FW_RI_TPTE_ADDRTYPE(x) ((x) << S_FW_RI_TPTE_ADDRTYPE)
+#define G_FW_RI_TPTE_ADDRTYPE(x) \
+ (((x) >> S_FW_RI_TPTE_ADDRTYPE) & M_FW_RI_TPTE_ADDRTYPE)
+#define F_FW_RI_TPTE_ADDRTYPE V_FW_RI_TPTE_ADDRTYPE(1U)
+
+#define S_FW_RI_TPTE_MWBINDEN 25
+#define M_FW_RI_TPTE_MWBINDEN 0x1
+#define V_FW_RI_TPTE_MWBINDEN(x) ((x) << S_FW_RI_TPTE_MWBINDEN)
+#define G_FW_RI_TPTE_MWBINDEN(x) \
+ (((x) >> S_FW_RI_TPTE_MWBINDEN) & M_FW_RI_TPTE_MWBINDEN)
+#define F_FW_RI_TPTE_MWBINDEN V_FW_RI_TPTE_MWBINDEN(1U)
+
+#define S_FW_RI_TPTE_PS 20
+#define M_FW_RI_TPTE_PS 0x1f
+#define V_FW_RI_TPTE_PS(x) ((x) << S_FW_RI_TPTE_PS)
+#define G_FW_RI_TPTE_PS(x) \
+ (((x) >> S_FW_RI_TPTE_PS) & M_FW_RI_TPTE_PS)
+
+#define S_FW_RI_TPTE_QPID 0
+#define M_FW_RI_TPTE_QPID 0xfffff
+#define V_FW_RI_TPTE_QPID(x) ((x) << S_FW_RI_TPTE_QPID)
+#define G_FW_RI_TPTE_QPID(x) \
+ (((x) >> S_FW_RI_TPTE_QPID) & M_FW_RI_TPTE_QPID)
+
+#define S_FW_RI_TPTE_NOSNOOP 30
+#define M_FW_RI_TPTE_NOSNOOP 0x1
+#define V_FW_RI_TPTE_NOSNOOP(x) ((x) << S_FW_RI_TPTE_NOSNOOP)
+#define G_FW_RI_TPTE_NOSNOOP(x) \
+ (((x) >> S_FW_RI_TPTE_NOSNOOP) & M_FW_RI_TPTE_NOSNOOP)
+#define F_FW_RI_TPTE_NOSNOOP V_FW_RI_TPTE_NOSNOOP(1U)
+
+#define S_FW_RI_TPTE_PBLADDR 0
+#define M_FW_RI_TPTE_PBLADDR 0x1fffffff
+#define V_FW_RI_TPTE_PBLADDR(x) ((x) << S_FW_RI_TPTE_PBLADDR)
+#define G_FW_RI_TPTE_PBLADDR(x) \
+ (((x) >> S_FW_RI_TPTE_PBLADDR) & M_FW_RI_TPTE_PBLADDR)
+
+#define S_FW_RI_TPTE_DCA 24
+#define M_FW_RI_TPTE_DCA 0x1f
+#define V_FW_RI_TPTE_DCA(x) ((x) << S_FW_RI_TPTE_DCA)
+#define G_FW_RI_TPTE_DCA(x) \
+ (((x) >> S_FW_RI_TPTE_DCA) & M_FW_RI_TPTE_DCA)
+
+#define S_FW_RI_TPTE_MWBCNT_PSTAG 0
+#define M_FW_RI_TPTE_MWBCNT_PSTAG 0xffffff
+#define V_FW_RI_TPTE_MWBCNT_PSTAT(x) \
+ ((x) << S_FW_RI_TPTE_MWBCNT_PSTAG)
+#define G_FW_RI_TPTE_MWBCNT_PSTAG(x) \
+ (((x) >> S_FW_RI_TPTE_MWBCNT_PSTAG) & M_FW_RI_TPTE_MWBCNT_PSTAG)
+
+enum fw_ri_res_type {
+ FW_RI_RES_TYPE_SQ,
+ FW_RI_RES_TYPE_RQ,
+ FW_RI_RES_TYPE_CQ,
+};
+
+enum fw_ri_res_op {
+ FW_RI_RES_OP_WRITE,
+ FW_RI_RES_OP_RESET,
+};
+
+struct fw_ri_res {
+ union fw_ri_restype {
+ struct fw_ri_res_sqrq {
+ __u8 restype;
+ __u8 op;
+ __be16 r3;
+ __be32 eqid;
+ __be32 r4[2];
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+ } sqrq;
+ struct fw_ri_res_cq {
+ __u8 restype;
+ __u8 op;
+ __be16 r3;
+ __be32 iqid;
+ __be32 r4[2];
+ __be32 iqandst_to_iqandstindex;
+ __be16 iqdroprss_to_iqesize;
+ __be16 iqsize;
+ __be64 iqaddr;
+ __be32 iqns_iqro;
+ __be32 r6_lo;
+ __be64 r7;
+ } cq;
+ } u;
+};
+
+struct fw_ri_res_wr {
+ __be32 op_nres;
+ __be32 len16_pkd;
+ __u64 cookie;
+#ifndef C99_NOT_SUPPORTED
+ struct fw_ri_res res[0];
+#endif
+};
+
+#define S_FW_RI_RES_WR_NRES 0
+#define M_FW_RI_RES_WR_NRES 0xff
+#define V_FW_RI_RES_WR_NRES(x) ((x) << S_FW_RI_RES_WR_NRES)
+#define G_FW_RI_RES_WR_NRES(x) \
+ (((x) >> S_FW_RI_RES_WR_NRES) & M_FW_RI_RES_WR_NRES)
+
+#define S_FW_RI_RES_WR_FETCHSZM 26
+#define M_FW_RI_RES_WR_FETCHSZM 0x1
+#define V_FW_RI_RES_WR_FETCHSZM(x) ((x) << S_FW_RI_RES_WR_FETCHSZM)
+#define G_FW_RI_RES_WR_FETCHSZM(x) \
+ (((x) >> S_FW_RI_RES_WR_FETCHSZM) & M_FW_RI_RES_WR_FETCHSZM)
+#define F_FW_RI_RES_WR_FETCHSZM V_FW_RI_RES_WR_FETCHSZM(1U)
+
+#define S_FW_RI_RES_WR_STATUSPGNS 25
+#define M_FW_RI_RES_WR_STATUSPGNS 0x1
+#define V_FW_RI_RES_WR_STATUSPGNS(x) ((x) << S_FW_RI_RES_WR_STATUSPGNS)
+#define G_FW_RI_RES_WR_STATUSPGNS(x) \
+ (((x) >> S_FW_RI_RES_WR_STATUSPGNS) & M_FW_RI_RES_WR_STATUSPGNS)
+#define F_FW_RI_RES_WR_STATUSPGNS V_FW_RI_RES_WR_STATUSPGNS(1U)
+
+#define S_FW_RI_RES_WR_STATUSPGRO 24
+#define M_FW_RI_RES_WR_STATUSPGRO 0x1
+#define V_FW_RI_RES_WR_STATUSPGRO(x) ((x) << S_FW_RI_RES_WR_STATUSPGRO)
+#define G_FW_RI_RES_WR_STATUSPGRO(x) \
+ (((x) >> S_FW_RI_RES_WR_STATUSPGRO) & M_FW_RI_RES_WR_STATUSPGRO)
+#define F_FW_RI_RES_WR_STATUSPGRO V_FW_RI_RES_WR_STATUSPGRO(1U)
+
+#define S_FW_RI_RES_WR_FETCHNS 23
+#define M_FW_RI_RES_WR_FETCHNS 0x1
+#define V_FW_RI_RES_WR_FETCHNS(x) ((x) << S_FW_RI_RES_WR_FETCHNS)
+#define G_FW_RI_RES_WR_FETCHNS(x) \
+ (((x) >> S_FW_RI_RES_WR_FETCHNS) & M_FW_RI_RES_WR_FETCHNS)
+#define F_FW_RI_RES_WR_FETCHNS V_FW_RI_RES_WR_FETCHNS(1U)
+
+#define S_FW_RI_RES_WR_FETCHRO 22
+#define M_FW_RI_RES_WR_FETCHRO 0x1
+#define V_FW_RI_RES_WR_FETCHRO(x) ((x) << S_FW_RI_RES_WR_FETCHRO)
+#define G_FW_RI_RES_WR_FETCHRO(x) \
+ (((x) >> S_FW_RI_RES_WR_FETCHRO) & M_FW_RI_RES_WR_FETCHRO)
+#define F_FW_RI_RES_WR_FETCHRO V_FW_RI_RES_WR_FETCHRO(1U)
+
+#define S_FW_RI_RES_WR_HOSTFCMODE 20
+#define M_FW_RI_RES_WR_HOSTFCMODE 0x3
+#define V_FW_RI_RES_WR_HOSTFCMODE(x) ((x) << S_FW_RI_RES_WR_HOSTFCMODE)
+#define G_FW_RI_RES_WR_HOSTFCMODE(x) \
+ (((x) >> S_FW_RI_RES_WR_HOSTFCMODE) & M_FW_RI_RES_WR_HOSTFCMODE)
+
+#define S_FW_RI_RES_WR_CPRIO 19
+#define M_FW_RI_RES_WR_CPRIO 0x1
+#define V_FW_RI_RES_WR_CPRIO(x) ((x) << S_FW_RI_RES_WR_CPRIO)
+#define G_FW_RI_RES_WR_CPRIO(x) \
+ (((x) >> S_FW_RI_RES_WR_CPRIO) & M_FW_RI_RES_WR_CPRIO)
+#define F_FW_RI_RES_WR_CPRIO V_FW_RI_RES_WR_CPRIO(1U)
+
+#define S_FW_RI_RES_WR_ONCHIP 18
+#define M_FW_RI_RES_WR_ONCHIP 0x1
+#define V_FW_RI_RES_WR_ONCHIP(x) ((x) << S_FW_RI_RES_WR_ONCHIP)
+#define G_FW_RI_RES_WR_ONCHIP(x) \
+ (((x) >> S_FW_RI_RES_WR_ONCHIP) & M_FW_RI_RES_WR_ONCHIP)
+#define F_FW_RI_RES_WR_ONCHIP V_FW_RI_RES_WR_ONCHIP(1U)
+
+#define S_FW_RI_RES_WR_PCIECHN 16
+#define M_FW_RI_RES_WR_PCIECHN 0x3
+#define V_FW_RI_RES_WR_PCIECHN(x) ((x) << S_FW_RI_RES_WR_PCIECHN)
+#define G_FW_RI_RES_WR_PCIECHN(x) \
+ (((x) >> S_FW_RI_RES_WR_PCIECHN) & M_FW_RI_RES_WR_PCIECHN)
+
+#define S_FW_RI_RES_WR_IQID 0
+#define M_FW_RI_RES_WR_IQID 0xffff
+#define V_FW_RI_RES_WR_IQID(x) ((x) << S_FW_RI_RES_WR_IQID)
+#define G_FW_RI_RES_WR_IQID(x) \
+ (((x) >> S_FW_RI_RES_WR_IQID) & M_FW_RI_RES_WR_IQID)
+
+#define S_FW_RI_RES_WR_DCAEN 31
+#define M_FW_RI_RES_WR_DCAEN 0x1
+#define V_FW_RI_RES_WR_DCAEN(x) ((x) << S_FW_RI_RES_WR_DCAEN)
+#define G_FW_RI_RES_WR_DCAEN(x) \
+ (((x) >> S_FW_RI_RES_WR_DCAEN) & M_FW_RI_RES_WR_DCAEN)
+#define F_FW_RI_RES_WR_DCAEN V_FW_RI_RES_WR_DCAEN(1U)
+
+#define S_FW_RI_RES_WR_DCACPU 26
+#define M_FW_RI_RES_WR_DCACPU 0x1f
+#define V_FW_RI_RES_WR_DCACPU(x) ((x) << S_FW_RI_RES_WR_DCACPU)
+#define G_FW_RI_RES_WR_DCACPU(x) \
+ (((x) >> S_FW_RI_RES_WR_DCACPU) & M_FW_RI_RES_WR_DCACPU)
+
+#define S_FW_RI_RES_WR_FBMIN 23
+#define M_FW_RI_RES_WR_FBMIN 0x7
+#define V_FW_RI_RES_WR_FBMIN(x) ((x) << S_FW_RI_RES_WR_FBMIN)
+#define G_FW_RI_RES_WR_FBMIN(x) \
+ (((x) >> S_FW_RI_RES_WR_FBMIN) & M_FW_RI_RES_WR_FBMIN)
+
+#define S_FW_RI_RES_WR_FBMAX 20
+#define M_FW_RI_RES_WR_FBMAX 0x7
+#define V_FW_RI_RES_WR_FBMAX(x) ((x) << S_FW_RI_RES_WR_FBMAX)
+#define G_FW_RI_RES_WR_FBMAX(x) \
+ (((x) >> S_FW_RI_RES_WR_FBMAX) & M_FW_RI_RES_WR_FBMAX)
+
+#define S_FW_RI_RES_WR_CIDXFTHRESHO 19
+#define M_FW_RI_RES_WR_CIDXFTHRESHO 0x1
+#define V_FW_RI_RES_WR_CIDXFTHRESHO(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESHO)
+#define G_FW_RI_RES_WR_CIDXFTHRESHO(x) \
+ (((x) >> S_FW_RI_RES_WR_CIDXFTHRESHO) & M_FW_RI_RES_WR_CIDXFTHRESHO)
+#define F_FW_RI_RES_WR_CIDXFTHRESHO V_FW_RI_RES_WR_CIDXFTHRESHO(1U)
+
+#define S_FW_RI_RES_WR_CIDXFTHRESH 16
+#define M_FW_RI_RES_WR_CIDXFTHRESH 0x7
+#define V_FW_RI_RES_WR_CIDXFTHRESH(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESH)
+#define G_FW_RI_RES_WR_CIDXFTHRESH(x) \
+ (((x) >> S_FW_RI_RES_WR_CIDXFTHRESH) & M_FW_RI_RES_WR_CIDXFTHRESH)
+
+#define S_FW_RI_RES_WR_EQSIZE 0
+#define M_FW_RI_RES_WR_EQSIZE 0xffff
+#define V_FW_RI_RES_WR_EQSIZE(x) ((x) << S_FW_RI_RES_WR_EQSIZE)
+#define G_FW_RI_RES_WR_EQSIZE(x) \
+ (((x) >> S_FW_RI_RES_WR_EQSIZE) & M_FW_RI_RES_WR_EQSIZE)
+
+#define S_FW_RI_RES_WR_IQANDST 15
+#define M_FW_RI_RES_WR_IQANDST 0x1
+#define V_FW_RI_RES_WR_IQANDST(x) ((x) << S_FW_RI_RES_WR_IQANDST)
+#define G_FW_RI_RES_WR_IQANDST(x) \
+ (((x) >> S_FW_RI_RES_WR_IQANDST) & M_FW_RI_RES_WR_IQANDST)
+#define F_FW_RI_RES_WR_IQANDST V_FW_RI_RES_WR_IQANDST(1U)
+
+#define S_FW_RI_RES_WR_IQANUS 14
+#define M_FW_RI_RES_WR_IQANUS 0x1
+#define V_FW_RI_RES_WR_IQANUS(x) ((x) << S_FW_RI_RES_WR_IQANUS)
+#define G_FW_RI_RES_WR_IQANUS(x) \
+ (((x) >> S_FW_RI_RES_WR_IQANUS) & M_FW_RI_RES_WR_IQANUS)
+#define F_FW_RI_RES_WR_IQANUS V_FW_RI_RES_WR_IQANUS(1U)
+
+#define S_FW_RI_RES_WR_IQANUD 12
+#define M_FW_RI_RES_WR_IQANUD 0x3
+#define V_FW_RI_RES_WR_IQANUD(x) ((x) << S_FW_RI_RES_WR_IQANUD)
+#define G_FW_RI_RES_WR_IQANUD(x) \
+ (((x) >> S_FW_RI_RES_WR_IQANUD) & M_FW_RI_RES_WR_IQANUD)
+
+#define S_FW_RI_RES_WR_IQANDSTINDEX 0
+#define M_FW_RI_RES_WR_IQANDSTINDEX 0xfff
+#define V_FW_RI_RES_WR_IQANDSTINDEX(x) ((x) << S_FW_RI_RES_WR_IQANDSTINDEX)
+#define G_FW_RI_RES_WR_IQANDSTINDEX(x) \
+ (((x) >> S_FW_RI_RES_WR_IQANDSTINDEX) & M_FW_RI_RES_WR_IQANDSTINDEX)
+
+#define S_FW_RI_RES_WR_IQDROPRSS 15
+#define M_FW_RI_RES_WR_IQDROPRSS 0x1
+#define V_FW_RI_RES_WR_IQDROPRSS(x) ((x) << S_FW_RI_RES_WR_IQDROPRSS)
+#define G_FW_RI_RES_WR_IQDROPRSS(x) \
+ (((x) >> S_FW_RI_RES_WR_IQDROPRSS) & M_FW_RI_RES_WR_IQDROPRSS)
+#define F_FW_RI_RES_WR_IQDROPRSS V_FW_RI_RES_WR_IQDROPRSS(1U)
+
+#define S_FW_RI_RES_WR_IQGTSMODE 14
+#define M_FW_RI_RES_WR_IQGTSMODE 0x1
+#define V_FW_RI_RES_WR_IQGTSMODE(x) ((x) << S_FW_RI_RES_WR_IQGTSMODE)
+#define G_FW_RI_RES_WR_IQGTSMODE(x) \
+ (((x) >> S_FW_RI_RES_WR_IQGTSMODE) & M_FW_RI_RES_WR_IQGTSMODE)
+#define F_FW_RI_RES_WR_IQGTSMODE V_FW_RI_RES_WR_IQGTSMODE(1U)
+
+#define S_FW_RI_RES_WR_IQPCIECH 12
+#define M_FW_RI_RES_WR_IQPCIECH 0x3
+#define V_FW_RI_RES_WR_IQPCIECH(x) ((x) << S_FW_RI_RES_WR_IQPCIECH)
+#define G_FW_RI_RES_WR_IQPCIECH(x) \
+ (((x) >> S_FW_RI_RES_WR_IQPCIECH) & M_FW_RI_RES_WR_IQPCIECH)
+
+#define S_FW_RI_RES_WR_IQDCAEN 11
+#define M_FW_RI_RES_WR_IQDCAEN 0x1
+#define V_FW_RI_RES_WR_IQDCAEN(x) ((x) << S_FW_RI_RES_WR_IQDCAEN)
+#define G_FW_RI_RES_WR_IQDCAEN(x) \
+ (((x) >> S_FW_RI_RES_WR_IQDCAEN) & M_FW_RI_RES_WR_IQDCAEN)
+#define F_FW_RI_RES_WR_IQDCAEN V_FW_RI_RES_WR_IQDCAEN(1U)
+
+#define S_FW_RI_RES_WR_IQDCACPU 6
+#define M_FW_RI_RES_WR_IQDCACPU 0x1f
+#define V_FW_RI_RES_WR_IQDCACPU(x) ((x) << S_FW_RI_RES_WR_IQDCACPU)
+#define G_FW_RI_RES_WR_IQDCACPU(x) \
+ (((x) >> S_FW_RI_RES_WR_IQDCACPU) & M_FW_RI_RES_WR_IQDCACPU)
+
+#define S_FW_RI_RES_WR_IQINTCNTTHRESH 4
+#define M_FW_RI_RES_WR_IQINTCNTTHRESH 0x3
+#define V_FW_RI_RES_WR_IQINTCNTTHRESH(x) \
+ ((x) << S_FW_RI_RES_WR_IQINTCNTTHRESH)
+#define G_FW_RI_RES_WR_IQINTCNTTHRESH(x) \
+ (((x) >> S_FW_RI_RES_WR_IQINTCNTTHRESH) & M_FW_RI_RES_WR_IQINTCNTTHRESH)
+
+#define S_FW_RI_RES_WR_IQO 3
+#define M_FW_RI_RES_WR_IQO 0x1
+#define V_FW_RI_RES_WR_IQO(x) ((x) << S_FW_RI_RES_WR_IQO)
+#define G_FW_RI_RES_WR_IQO(x) \
+ (((x) >> S_FW_RI_RES_WR_IQO) & M_FW_RI_RES_WR_IQO)
+#define F_FW_RI_RES_WR_IQO V_FW_RI_RES_WR_IQO(1U)
+
+#define S_FW_RI_RES_WR_IQCPRIO 2
+#define M_FW_RI_RES_WR_IQCPRIO 0x1
+#define V_FW_RI_RES_WR_IQCPRIO(x) ((x) << S_FW_RI_RES_WR_IQCPRIO)
+#define G_FW_RI_RES_WR_IQCPRIO(x) \
+ (((x) >> S_FW_RI_RES_WR_IQCPRIO) & M_FW_RI_RES_WR_IQCPRIO)
+#define F_FW_RI_RES_WR_IQCPRIO V_FW_RI_RES_WR_IQCPRIO(1U)
+
+#define S_FW_RI_RES_WR_IQESIZE 0
+#define M_FW_RI_RES_WR_IQESIZE 0x3
+#define V_FW_RI_RES_WR_IQESIZE(x) ((x) << S_FW_RI_RES_WR_IQESIZE)
+#define G_FW_RI_RES_WR_IQESIZE(x) \
+ (((x) >> S_FW_RI_RES_WR_IQESIZE) & M_FW_RI_RES_WR_IQESIZE)
+
+#define S_FW_RI_RES_WR_IQNS 31
+#define M_FW_RI_RES_WR_IQNS 0x1
+#define V_FW_RI_RES_WR_IQNS(x) ((x) << S_FW_RI_RES_WR_IQNS)
+#define G_FW_RI_RES_WR_IQNS(x) \
+ (((x) >> S_FW_RI_RES_WR_IQNS) & M_FW_RI_RES_WR_IQNS)
+#define F_FW_RI_RES_WR_IQNS V_FW_RI_RES_WR_IQNS(1U)
+
+#define S_FW_RI_RES_WR_IQRO 30
+#define M_FW_RI_RES_WR_IQRO 0x1
+#define V_FW_RI_RES_WR_IQRO(x) ((x) << S_FW_RI_RES_WR_IQRO)
+#define G_FW_RI_RES_WR_IQRO(x) \
+ (((x) >> S_FW_RI_RES_WR_IQRO) & M_FW_RI_RES_WR_IQRO)
+#define F_FW_RI_RES_WR_IQRO V_FW_RI_RES_WR_IQRO(1U)
+
+struct fw_ri_rdma_write_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be64 r2;
+ __be32 plen;
+ __be32 stag_sink;
+ __be64 to_sink;
+#ifndef C99_NOT_SUPPORTED
+ union {
+ struct fw_ri_immd immd_src[0];
+ struct fw_ri_isgl isgl_src[0];
+ } u;
+#endif
+};
+
+struct fw_ri_send_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 sendop_pkd;
+ __be32 stag_inv;
+ __be32 plen;
+ __be32 r3;
+ __be64 r4;
+#ifndef C99_NOT_SUPPORTED
+ union {
+ struct fw_ri_immd immd_src[0];
+ struct fw_ri_isgl isgl_src[0];
+ } u;
+#endif
+};
+
+#define S_FW_RI_SEND_WR_SENDOP 0
+#define M_FW_RI_SEND_WR_SENDOP 0xf
+#define V_FW_RI_SEND_WR_SENDOP(x) ((x) << S_FW_RI_SEND_WR_SENDOP)
+#define G_FW_RI_SEND_WR_SENDOP(x) \
+ (((x) >> S_FW_RI_SEND_WR_SENDOP) & M_FW_RI_SEND_WR_SENDOP)
+
+struct fw_ri_rdma_read_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be64 r2;
+ __be32 stag_sink;
+ __be32 to_sink_hi;
+ __be32 to_sink_lo;
+ __be32 plen;
+ __be32 stag_src;
+ __be32 to_src_hi;
+ __be32 to_src_lo;
+ __be32 r5;
+};
+
+struct fw_ri_recv_wr {
+ __u8 opcode;
+ __u8 r1;
+ __u16 wrid;
+ __u8 r2[3];
+ __u8 len16;
+ struct fw_ri_isgl isgl;
+};
+
+struct fw_ri_bind_mw_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __u8 qpbinde_to_dcacpu;
+ __u8 pgsz_shift;
+ __u8 addr_type;
+ __u8 mem_perms;
+ __be32 stag_mr;
+ __be32 stag_mw;
+ __be32 r3;
+ __be64 len_mw;
+ __be64 va_fbo;
+ __be64 r4;
+};
+
+#define S_FW_RI_BIND_MW_WR_QPBINDE 6
+#define M_FW_RI_BIND_MW_WR_QPBINDE 0x1
+#define V_FW_RI_BIND_MW_WR_QPBINDE(x) ((x) << S_FW_RI_BIND_MW_WR_QPBINDE)
+#define G_FW_RI_BIND_MW_WR_QPBINDE(x) \
+ (((x) >> S_FW_RI_BIND_MW_WR_QPBINDE) & M_FW_RI_BIND_MW_WR_QPBINDE)
+#define F_FW_RI_BIND_MW_WR_QPBINDE V_FW_RI_BIND_MW_WR_QPBINDE(1U)
+
+#define S_FW_RI_BIND_MW_WR_NS 5
+#define M_FW_RI_BIND_MW_WR_NS 0x1
+#define V_FW_RI_BIND_MW_WR_NS(x) ((x) << S_FW_RI_BIND_MW_WR_NS)
+#define G_FW_RI_BIND_MW_WR_NS(x) \
+ (((x) >> S_FW_RI_BIND_MW_WR_NS) & M_FW_RI_BIND_MW_WR_NS)
+#define F_FW_RI_BIND_MW_WR_NS V_FW_RI_BIND_MW_WR_NS(1U)
+
+#define S_FW_RI_BIND_MW_WR_DCACPU 0
+#define M_FW_RI_BIND_MW_WR_DCACPU 0x1f
+#define V_FW_RI_BIND_MW_WR_DCACPU(x) ((x) << S_FW_RI_BIND_MW_WR_DCACPU)
+#define G_FW_RI_BIND_MW_WR_DCACPU(x) \
+ (((x) >> S_FW_RI_BIND_MW_WR_DCACPU) & M_FW_RI_BIND_MW_WR_DCACPU)
+
+struct fw_ri_fr_nsmr_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __u8 qpbinde_to_dcacpu;
+ __u8 pgsz_shift;
+ __u8 addr_type;
+ __u8 mem_perms;
+ __be32 stag;
+ __be32 len_hi;
+ __be32 len_lo;
+ __be32 va_hi;
+ __be32 va_lo_fbo;
+};
+
+#define S_FW_RI_FR_NSMR_WR_QPBINDE 6
+#define M_FW_RI_FR_NSMR_WR_QPBINDE 0x1
+#define V_FW_RI_FR_NSMR_WR_QPBINDE(x) ((x) << S_FW_RI_FR_NSMR_WR_QPBINDE)
+#define G_FW_RI_FR_NSMR_WR_QPBINDE(x) \
+ (((x) >> S_FW_RI_FR_NSMR_WR_QPBINDE) & M_FW_RI_FR_NSMR_WR_QPBINDE)
+#define F_FW_RI_FR_NSMR_WR_QPBINDE V_FW_RI_FR_NSMR_WR_QPBINDE(1U)
+
+#define S_FW_RI_FR_NSMR_WR_NS 5
+#define M_FW_RI_FR_NSMR_WR_NS 0x1
+#define V_FW_RI_FR_NSMR_WR_NS(x) ((x) << S_FW_RI_FR_NSMR_WR_NS)
+#define G_FW_RI_FR_NSMR_WR_NS(x) \
+ (((x) >> S_FW_RI_FR_NSMR_WR_NS) & M_FW_RI_FR_NSMR_WR_NS)
+#define F_FW_RI_FR_NSMR_WR_NS V_FW_RI_FR_NSMR_WR_NS(1U)
+
+#define S_FW_RI_FR_NSMR_WR_DCACPU 0
+#define M_FW_RI_FR_NSMR_WR_DCACPU 0x1f
+#define V_FW_RI_FR_NSMR_WR_DCACPU(x) ((x) << S_FW_RI_FR_NSMR_WR_DCACPU)
+#define G_FW_RI_FR_NSMR_WR_DCACPU(x) \
+ (((x) >> S_FW_RI_FR_NSMR_WR_DCACPU) & M_FW_RI_FR_NSMR_WR_DCACPU)
+
+struct fw_ri_inv_lstag_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2;
+ __be32 stag_inv;
+};
+
+enum fw_ri_type {
+ FW_RI_TYPE_INIT,
+ FW_RI_TYPE_FINI,
+ FW_RI_TYPE_TERMINATE
+};
+
+enum fw_ri_init_p2ptype {
+ FW_RI_INIT_P2PTYPE_RDMA_WRITE = FW_RI_RDMA_WRITE,
+ FW_RI_INIT_P2PTYPE_READ_REQ = FW_RI_READ_REQ,
+ FW_RI_INIT_P2PTYPE_SEND = FW_RI_SEND,
+ FW_RI_INIT_P2PTYPE_SEND_WITH_INV = FW_RI_SEND_WITH_INV,
+ FW_RI_INIT_P2PTYPE_SEND_WITH_SE = FW_RI_SEND_WITH_SE,
+ FW_RI_INIT_P2PTYPE_SEND_WITH_SE_INV = FW_RI_SEND_WITH_SE_INV,
+ FW_RI_INIT_P2PTYPE_DISABLED = 0xf,
+};
+
+struct fw_ri_wr {
+ __be32 op_compl;
+ __be32 flowid_len16;
+ __u64 cookie;
+ union fw_ri {
+ struct fw_ri_init {
+ __u8 type;
+ __u8 mpareqbit_p2ptype;
+ __u8 r4[2];
+ __u8 mpa_attrs;
+ __u8 qp_caps;
+ __be16 nrqe;
+ __be32 pdid;
+ __be32 qpid;
+ __be32 sq_eqid;
+ __be32 rq_eqid;
+ __be32 scqid;
+ __be32 rcqid;
+ __be32 ord_max;
+ __be32 ird_max;
+ __be32 iss;
+ __be32 irs;
+ __be32 hwrqsize;
+ __be32 hwrqaddr;
+ __be64 r5;
+ union fw_ri_init_p2p {
+ struct fw_ri_rdma_write_wr write;
+ struct fw_ri_rdma_read_wr read;
+ struct fw_ri_send_wr send;
+ } u;
+ } init;
+ struct fw_ri_fini {
+ __u8 type;
+ __u8 r3[7];
+ __be64 r4;
+ } fini;
+ struct fw_ri_terminate {
+ __u8 type;
+ __u8 r3[3];
+ __be32 immdlen;
+ __u8 termmsg[40];
+ } terminate;
+ } u;
+};
+
+#define S_FW_RI_WR_MPAREQBIT 7
+#define M_FW_RI_WR_MPAREQBIT 0x1
+#define V_FW_RI_WR_MPAREQBIT(x) ((x) << S_FW_RI_WR_MPAREQBIT)
+#define G_FW_RI_WR_MPAREQBIT(x) \
+ (((x) >> S_FW_RI_WR_MPAREQBIT) & M_FW_RI_WR_MPAREQBIT)
+#define F_FW_RI_WR_MPAREQBIT V_FW_RI_WR_MPAREQBIT(1U)
+
+#define S_FW_RI_WR_P2PTYPE 0
+#define M_FW_RI_WR_P2PTYPE 0xf
+#define V_FW_RI_WR_P2PTYPE(x) ((x) << S_FW_RI_WR_P2PTYPE)
+#define G_FW_RI_WR_P2PTYPE(x) \
+ (((x) >> S_FW_RI_WR_P2PTYPE) & M_FW_RI_WR_P2PTYPE)
+
+struct tcp_options {
+ __be16 mss;
+ __u8 wsf;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8:4;
+ __u8 unknown:1;
+ __u8:1;
+ __u8 sack:1;
+ __u8 tstamp:1;
+#else
+ __u8 tstamp:1;
+ __u8 sack:1;
+ __u8:1;
+ __u8 unknown:1;
+ __u8:4;
+#endif
+};
+
+struct cpl_pass_accept_req {
+ union opcode_tid ot;
+ __be16 rsvd;
+ __be16 len;
+ __be32 hdr_len;
+ __be16 vlan;
+ __be16 l2info;
+ __be32 tos_stid;
+ struct tcp_options tcpopt;
+};
+
+/* cpl_pass_accept_req.hdr_len fields */
+#define S_SYN_RX_CHAN 0
+#define M_SYN_RX_CHAN 0xF
+#define V_SYN_RX_CHAN(x) ((x) << S_SYN_RX_CHAN)
+#define G_SYN_RX_CHAN(x) (((x) >> S_SYN_RX_CHAN) & M_SYN_RX_CHAN)
+
+#define S_TCP_HDR_LEN 10
+#define M_TCP_HDR_LEN 0x3F
+#define V_TCP_HDR_LEN(x) ((x) << S_TCP_HDR_LEN)
+#define G_TCP_HDR_LEN(x) (((x) >> S_TCP_HDR_LEN) & M_TCP_HDR_LEN)
+
+#define S_IP_HDR_LEN 16
+#define M_IP_HDR_LEN 0x3FF
+#define V_IP_HDR_LEN(x) ((x) << S_IP_HDR_LEN)
+#define G_IP_HDR_LEN(x) (((x) >> S_IP_HDR_LEN) & M_IP_HDR_LEN)
+
+#define S_ETH_HDR_LEN 26
+#define M_ETH_HDR_LEN 0x1F
+#define V_ETH_HDR_LEN(x) ((x) << S_ETH_HDR_LEN)
+#define G_ETH_HDR_LEN(x) (((x) >> S_ETH_HDR_LEN) & M_ETH_HDR_LEN)
+
+/* cpl_pass_accept_req.l2info fields */
+#define S_SYN_MAC_IDX 0
+#define M_SYN_MAC_IDX 0x1FF
+#define V_SYN_MAC_IDX(x) ((x) << S_SYN_MAC_IDX)
+#define G_SYN_MAC_IDX(x) (((x) >> S_SYN_MAC_IDX) & M_SYN_MAC_IDX)
+
+#define S_SYN_XACT_MATCH 9
+#define V_SYN_XACT_MATCH(x) ((x) << S_SYN_XACT_MATCH)
+#define F_SYN_XACT_MATCH V_SYN_XACT_MATCH(1U)
+
+#define S_SYN_INTF 12
+#define M_SYN_INTF 0xF
+#define V_SYN_INTF(x) ((x) << S_SYN_INTF)
+#define G_SYN_INTF(x) (((x) >> S_SYN_INTF) & M_SYN_INTF)
+
+struct ulptx_idata {
+ __be32 cmd_more;
+ __be32 len;
+};
+
+#define S_ULPTX_NSGE 0
+#define M_ULPTX_NSGE 0xFFFF
+#define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE)
+#endif /* _T4FW_RI_API_H_ */
diff --git a/drivers/infiniband/hw/cxgb4/user.h b/drivers/infiniband/hw/cxgb4/user.h
new file mode 100644
index 00000000000..ed6414abde0
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/user.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __C4IW_USER_H__
+#define __C4IW_USER_H__
+
+#define C4IW_UVERBS_ABI_VERSION 1
+
+/*
+ * Make sure that all structs defined in this file remain laid out so
+ * that they pack the same way on 32-bit and 64-bit architectures (to
+ * avoid incompatibility between 32-bit userspace and 64-bit kernels).
+ * In particular do not use pointer types -- pass pointers in __u64
+ * instead.
+ */
+struct c4iw_create_cq_resp {
+ __u64 key;
+ __u64 gts_key;
+ __u64 memsize;
+ __u32 cqid;
+ __u32 size;
+ __u32 qid_mask;
+};
+
+struct c4iw_create_qp_resp {
+ __u64 sq_key;
+ __u64 rq_key;
+ __u64 sq_db_gts_key;
+ __u64 rq_db_gts_key;
+ __u64 sq_memsize;
+ __u64 rq_memsize;
+ __u32 sqid;
+ __u32 rqid;
+ __u32 sq_size;
+ __u32 rq_size;
+ __u32 qid_mask;
+};
+#endif
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 07cae552caf..e571e60ecb8 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -847,7 +847,7 @@ static int __cpuinit comp_pool_callback(struct notifier_block *nfb,
ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
if (!create_comp_task(pool, cpu)) {
ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
- return NOTIFY_BAD;
+ return notifier_from_errno(-ENOMEM);
}
break;
case CPU_UP_CANCELED:
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 129a6bebd6e..ecb51b396c4 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -291,8 +291,9 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
};
ehca_gen_dbg("Probing adapter %s...",
- shca->ofdev->node->full_name);
- loc_code = of_get_property(shca->ofdev->node, "ibm,loc-code", NULL);
+ shca->ofdev->dev.of_node->full_name);
+ loc_code = of_get_property(shca->ofdev->dev.of_node, "ibm,loc-code",
+ NULL);
if (loc_code)
ehca_gen_dbg(" ... location lode=%s", loc_code);
@@ -720,16 +721,16 @@ static int __devinit ehca_probe(struct of_device *dev,
int ret, i, eq_size;
unsigned long flags;
- handle = of_get_property(dev->node, "ibm,hca-handle", NULL);
+ handle = of_get_property(dev->dev.of_node, "ibm,hca-handle", NULL);
if (!handle) {
ehca_gen_err("Cannot get eHCA handle for adapter: %s.",
- dev->node->full_name);
+ dev->dev.of_node->full_name);
return -ENODEV;
}
if (!(*handle)) {
ehca_gen_err("Wrong eHCA handle for adapter: %s.",
- dev->node->full_name);
+ dev->dev.of_node->full_name);
return -ENODEV;
}
@@ -798,7 +799,7 @@ static int __devinit ehca_probe(struct of_device *dev,
goto probe5;
}
- ret = ib_register_device(&shca->ib_device);
+ ret = ib_register_device(&shca->ib_device, NULL);
if (ret) {
ehca_err(&shca->ib_device,
"ib_register_device() failed ret=%i", ret);
@@ -936,12 +937,13 @@ static struct of_device_id ehca_device_table[] =
MODULE_DEVICE_TABLE(of, ehca_device_table);
static struct of_platform_driver ehca_driver = {
- .name = "ehca",
- .match_table = ehca_device_table,
.probe = ehca_probe,
.remove = ehca_remove,
- .driver = {
+ .driver = {
+ .name = "ehca",
+ .owner = THIS_MODULE,
.groups = ehca_drv_attr_groups,
+ .of_match_table = ehca_device_table,
},
};
diff --git a/drivers/infiniband/hw/ipath/Kconfig b/drivers/infiniband/hw/ipath/Kconfig
index 3c7968f25ec..1d9bb115cbf 100644
--- a/drivers/infiniband/hw/ipath/Kconfig
+++ b/drivers/infiniband/hw/ipath/Kconfig
@@ -1,9 +1,11 @@
config INFINIBAND_IPATH
- tristate "QLogic InfiniPath Driver"
- depends on 64BIT && NET
+ tristate "QLogic HTX HCA support"
+ depends on 64BIT && NET && HT_IRQ
---help---
- This is a driver for QLogic InfiniPath host channel adapters,
+ This is a driver for the obsolete QLogic Hyper-Transport
+ IB host channel adapter (model QHT7140),
including InfiniBand verbs support. This driver allows these
devices to be used with both kernel upper level protocols such
as IP-over-InfiniBand as well as with userspace applications
(in conjunction with InfiniBand userspace access).
+ For QLogic PCIe QLE based cards, use the QIB driver instead.
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile
index bf945006198..fa3df82681d 100644
--- a/drivers/infiniband/hw/ipath/Makefile
+++ b/drivers/infiniband/hw/ipath/Makefile
@@ -29,13 +29,9 @@ ib_ipath-y := \
ipath_user_pages.o \
ipath_user_sdma.o \
ipath_verbs_mcast.o \
- ipath_verbs.o \
- ipath_iba7220.o \
- ipath_sd7220.o \
- ipath_sd7220_img.o
+ ipath_verbs.o
ib_ipath-$(CONFIG_HT_IRQ) += ipath_iba6110.o
-ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba6120.o
ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o
ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 6302626d17f..21337468c65 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -132,18 +132,13 @@ static int __devinit ipath_init_one(struct pci_dev *,
/* Only needed for registration, nothing else needs this info */
#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
-#define PCI_VENDOR_ID_QLOGIC 0x1077
#define PCI_DEVICE_ID_INFINIPATH_HT 0xd
-#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10
-#define PCI_DEVICE_ID_INFINIPATH_7220 0x7220
/* Number of seconds before our card status check... */
#define STATUS_TIMEOUT 60
static const struct pci_device_id ipath_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
- { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) },
- { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_INFINIPATH_7220) },
{ 0, }
};
@@ -521,30 +516,9 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
/* setup the chip-specific functions, as early as possible. */
switch (ent->device) {
case PCI_DEVICE_ID_INFINIPATH_HT:
-#ifdef CONFIG_HT_IRQ
ipath_init_iba6110_funcs(dd);
break;
-#else
- ipath_dev_err(dd, "QLogic HT device 0x%x cannot work if "
- "CONFIG_HT_IRQ is not enabled\n", ent->device);
- return -ENODEV;
-#endif
- case PCI_DEVICE_ID_INFINIPATH_PE800:
-#ifdef CONFIG_PCI_MSI
- ipath_init_iba6120_funcs(dd);
- break;
-#else
- ipath_dev_err(dd, "QLogic PCIE device 0x%x cannot work if "
- "CONFIG_PCI_MSI is not enabled\n", ent->device);
- return -ENODEV;
-#endif
- case PCI_DEVICE_ID_INFINIPATH_7220:
-#ifndef CONFIG_PCI_MSI
- ipath_dbg("CONFIG_PCI_MSI is not enabled, "
- "using INTx for unit %u\n", dd->ipath_unit);
-#endif
- ipath_init_iba7220_funcs(dd);
- break;
+
default:
ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
"failing\n", ent->device);
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c
index 37d12e5efa4..1d7aea132a0 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6110.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c
@@ -1474,7 +1474,7 @@ static void ipath_ht_quiet_serdes(struct ipath_devdata *dd)
/**
* ipath_pe_put_tid - write a TID in chip
* @dd: the infinipath device
- * @tidptr: pointer to the expected TID (in chip) to udpate
+ * @tidptr: pointer to the expected TID (in chip) to update
* @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
* @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
*
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c
deleted file mode 100644
index fbf8c5379ea..00000000000
--- a/drivers/infiniband/hw/ipath/ipath_iba6120.c
+++ /dev/null
@@ -1,1862 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-/*
- * This file contains all of the code that is specific to the
- * InfiniPath PCIe chip.
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <rdma/ib_verbs.h>
-
-#include "ipath_kernel.h"
-#include "ipath_registers.h"
-
-static void ipath_setup_pe_setextled(struct ipath_devdata *, u64, u64);
-
-/*
- * This file contains all the chip-specific register information and
- * access functions for the QLogic InfiniPath PCI-Express chip.
- *
- * This lists the InfiniPath registers, in the actual chip layout.
- * This structure should never be directly accessed.
- */
-struct _infinipath_do_not_use_kernel_regs {
- unsigned long long Revision;
- unsigned long long Control;
- unsigned long long PageAlign;
- unsigned long long PortCnt;
- unsigned long long DebugPortSelect;
- unsigned long long Reserved0;
- unsigned long long SendRegBase;
- unsigned long long UserRegBase;
- unsigned long long CounterRegBase;
- unsigned long long Scratch;
- unsigned long long Reserved1;
- unsigned long long Reserved2;
- unsigned long long IntBlocked;
- unsigned long long IntMask;
- unsigned long long IntStatus;
- unsigned long long IntClear;
- unsigned long long ErrorMask;
- unsigned long long ErrorStatus;
- unsigned long long ErrorClear;
- unsigned long long HwErrMask;
- unsigned long long HwErrStatus;
- unsigned long long HwErrClear;
- unsigned long long HwDiagCtrl;
- unsigned long long MDIO;
- unsigned long long IBCStatus;
- unsigned long long IBCCtrl;
- unsigned long long ExtStatus;
- unsigned long long ExtCtrl;
- unsigned long long GPIOOut;
- unsigned long long GPIOMask;
- unsigned long long GPIOStatus;
- unsigned long long GPIOClear;
- unsigned long long RcvCtrl;
- unsigned long long RcvBTHQP;
- unsigned long long RcvHdrSize;
- unsigned long long RcvHdrCnt;
- unsigned long long RcvHdrEntSize;
- unsigned long long RcvTIDBase;
- unsigned long long RcvTIDCnt;
- unsigned long long RcvEgrBase;
- unsigned long long RcvEgrCnt;
- unsigned long long RcvBufBase;
- unsigned long long RcvBufSize;
- unsigned long long RxIntMemBase;
- unsigned long long RxIntMemSize;
- unsigned long long RcvPartitionKey;
- unsigned long long Reserved3;
- unsigned long long RcvPktLEDCnt;
- unsigned long long Reserved4[8];
- unsigned long long SendCtrl;
- unsigned long long SendPIOBufBase;
- unsigned long long SendPIOSize;
- unsigned long long SendPIOBufCnt;
- unsigned long long SendPIOAvailAddr;
- unsigned long long TxIntMemBase;
- unsigned long long TxIntMemSize;
- unsigned long long Reserved5;
- unsigned long long PCIeRBufTestReg0;
- unsigned long long PCIeRBufTestReg1;
- unsigned long long Reserved51[6];
- unsigned long long SendBufferError;
- unsigned long long SendBufferErrorCONT1;
- unsigned long long Reserved6SBE[6];
- unsigned long long RcvHdrAddr0;
- unsigned long long RcvHdrAddr1;
- unsigned long long RcvHdrAddr2;
- unsigned long long RcvHdrAddr3;
- unsigned long long RcvHdrAddr4;
- unsigned long long Reserved7RHA[11];
- unsigned long long RcvHdrTailAddr0;
- unsigned long long RcvHdrTailAddr1;
- unsigned long long RcvHdrTailAddr2;
- unsigned long long RcvHdrTailAddr3;
- unsigned long long RcvHdrTailAddr4;
- unsigned long long Reserved8RHTA[11];
- unsigned long long Reserved9SW[8];
- unsigned long long SerdesConfig0;
- unsigned long long SerdesConfig1;
- unsigned long long SerdesStatus;
- unsigned long long XGXSConfig;
- unsigned long long IBPLLCfg;
- unsigned long long Reserved10SW2[3];
- unsigned long long PCIEQ0SerdesConfig0;
- unsigned long long PCIEQ0SerdesConfig1;
- unsigned long long PCIEQ0SerdesStatus;
- unsigned long long Reserved11;
- unsigned long long PCIEQ1SerdesConfig0;
- unsigned long long PCIEQ1SerdesConfig1;
- unsigned long long PCIEQ1SerdesStatus;
- unsigned long long Reserved12;
-};
-
-struct _infinipath_do_not_use_counters {
- __u64 LBIntCnt;
- __u64 LBFlowStallCnt;
- __u64 Reserved1;
- __u64 TxUnsupVLErrCnt;
- __u64 TxDataPktCnt;
- __u64 TxFlowPktCnt;
- __u64 TxDwordCnt;
- __u64 TxLenErrCnt;
- __u64 TxMaxMinLenErrCnt;
- __u64 TxUnderrunCnt;
- __u64 TxFlowStallCnt;
- __u64 TxDroppedPktCnt;
- __u64 RxDroppedPktCnt;
- __u64 RxDataPktCnt;
- __u64 RxFlowPktCnt;
- __u64 RxDwordCnt;
- __u64 RxLenErrCnt;
- __u64 RxMaxMinLenErrCnt;
- __u64 RxICRCErrCnt;
- __u64 RxVCRCErrCnt;
- __u64 RxFlowCtrlErrCnt;
- __u64 RxBadFormatCnt;
- __u64 RxLinkProblemCnt;
- __u64 RxEBPCnt;
- __u64 RxLPCRCErrCnt;
- __u64 RxBufOvflCnt;
- __u64 RxTIDFullErrCnt;
- __u64 RxTIDValidErrCnt;
- __u64 RxPKeyMismatchCnt;
- __u64 RxP0HdrEgrOvflCnt;
- __u64 RxP1HdrEgrOvflCnt;
- __u64 RxP2HdrEgrOvflCnt;
- __u64 RxP3HdrEgrOvflCnt;
- __u64 RxP4HdrEgrOvflCnt;
- __u64 RxP5HdrEgrOvflCnt;
- __u64 RxP6HdrEgrOvflCnt;
- __u64 RxP7HdrEgrOvflCnt;
- __u64 RxP8HdrEgrOvflCnt;
- __u64 Reserved6;
- __u64 Reserved7;
- __u64 IBStatusChangeCnt;
- __u64 IBLinkErrRecoveryCnt;
- __u64 IBLinkDownedCnt;
- __u64 IBSymbolErrCnt;
-};
-
-#define IPATH_KREG_OFFSET(field) (offsetof( \
- struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
-#define IPATH_CREG_OFFSET(field) (offsetof( \
- struct _infinipath_do_not_use_counters, field) / sizeof(u64))
-
-static const struct ipath_kregs ipath_pe_kregs = {
- .kr_control = IPATH_KREG_OFFSET(Control),
- .kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase),
- .kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect),
- .kr_errorclear = IPATH_KREG_OFFSET(ErrorClear),
- .kr_errormask = IPATH_KREG_OFFSET(ErrorMask),
- .kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus),
- .kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl),
- .kr_extstatus = IPATH_KREG_OFFSET(ExtStatus),
- .kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear),
- .kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask),
- .kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut),
- .kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus),
- .kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl),
- .kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear),
- .kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask),
- .kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus),
- .kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl),
- .kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus),
- .kr_intblocked = IPATH_KREG_OFFSET(IntBlocked),
- .kr_intclear = IPATH_KREG_OFFSET(IntClear),
- .kr_intmask = IPATH_KREG_OFFSET(IntMask),
- .kr_intstatus = IPATH_KREG_OFFSET(IntStatus),
- .kr_mdio = IPATH_KREG_OFFSET(MDIO),
- .kr_pagealign = IPATH_KREG_OFFSET(PageAlign),
- .kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey),
- .kr_portcnt = IPATH_KREG_OFFSET(PortCnt),
- .kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP),
- .kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase),
- .kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize),
- .kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl),
- .kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase),
- .kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt),
- .kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt),
- .kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize),
- .kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize),
- .kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase),
- .kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize),
- .kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase),
- .kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt),
- .kr_revision = IPATH_KREG_OFFSET(Revision),
- .kr_scratch = IPATH_KREG_OFFSET(Scratch),
- .kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError),
- .kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl),
- .kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendPIOAvailAddr),
- .kr_sendpiobufbase = IPATH_KREG_OFFSET(SendPIOBufBase),
- .kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendPIOBufCnt),
- .kr_sendpiosize = IPATH_KREG_OFFSET(SendPIOSize),
- .kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase),
- .kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase),
- .kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize),
- .kr_userregbase = IPATH_KREG_OFFSET(UserRegBase),
- .kr_serdesconfig0 = IPATH_KREG_OFFSET(SerdesConfig0),
- .kr_serdesconfig1 = IPATH_KREG_OFFSET(SerdesConfig1),
- .kr_serdesstatus = IPATH_KREG_OFFSET(SerdesStatus),
- .kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig),
- .kr_ibpllcfg = IPATH_KREG_OFFSET(IBPLLCfg),
-
- /*
- * These should not be used directly via ipath_write_kreg64(),
- * use them with ipath_write_kreg64_port(),
- */
- .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
- .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0),
-
- /* The rcvpktled register controls one of the debug port signals, so
- * a packet activity LED can be connected to it. */
- .kr_rcvpktledcnt = IPATH_KREG_OFFSET(RcvPktLEDCnt),
- .kr_pcierbuftestreg0 = IPATH_KREG_OFFSET(PCIeRBufTestReg0),
- .kr_pcierbuftestreg1 = IPATH_KREG_OFFSET(PCIeRBufTestReg1),
- .kr_pcieq0serdesconfig0 = IPATH_KREG_OFFSET(PCIEQ0SerdesConfig0),
- .kr_pcieq0serdesconfig1 = IPATH_KREG_OFFSET(PCIEQ0SerdesConfig1),
- .kr_pcieq0serdesstatus = IPATH_KREG_OFFSET(PCIEQ0SerdesStatus),
- .kr_pcieq1serdesconfig0 = IPATH_KREG_OFFSET(PCIEQ1SerdesConfig0),
- .kr_pcieq1serdesconfig1 = IPATH_KREG_OFFSET(PCIEQ1SerdesConfig1),
- .kr_pcieq1serdesstatus = IPATH_KREG_OFFSET(PCIEQ1SerdesStatus)
-};
-
-static const struct ipath_cregs ipath_pe_cregs = {
- .cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt),
- .cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt),
- .cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt),
- .cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt),
- .cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt),
- .cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt),
- .cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt),
- .cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt),
- .cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt),
- .cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt),
- .cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt),
- .cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt),
- .cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt),
- .cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt),
- .cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt),
- .cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt),
- .cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt),
- .cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt),
- .cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt),
- .cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt),
- .cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt),
- .cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt),
- .cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt),
- .cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt),
- .cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt),
- .cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt),
- .cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt),
- .cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt),
- .cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt),
- .cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt),
- .cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt),
- .cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt),
- .cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt)
-};
-
-/* kr_control bits */
-#define INFINIPATH_C_RESET 1U
-
-/* kr_intstatus, kr_intclear, kr_intmask bits */
-#define INFINIPATH_I_RCVURG_MASK ((1U<<5)-1)
-#define INFINIPATH_I_RCVURG_SHIFT 0
-#define INFINIPATH_I_RCVAVAIL_MASK ((1U<<5)-1)
-#define INFINIPATH_I_RCVAVAIL_SHIFT 12
-
-/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
-#define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL
-#define INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT 0
-#define INFINIPATH_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
-#define INFINIPATH_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
-#define INFINIPATH_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
-#define INFINIPATH_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
-#define INFINIPATH_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
-#define INFINIPATH_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
-#define INFINIPATH_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
-#define INFINIPATH_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
-#define INFINIPATH_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
-#define INFINIPATH_HWE_SERDESPLLFAILED 0x1000000000000000ULL
-
-#define IBA6120_IBCS_LINKTRAININGSTATE_MASK 0xf
-#define IBA6120_IBCS_LINKSTATE_SHIFT 4
-
-/* kr_extstatus bits */
-#define INFINIPATH_EXTS_FREQSEL 0x2
-#define INFINIPATH_EXTS_SERDESSEL 0x4
-#define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000
-#define INFINIPATH_EXTS_MEMBIST_FOUND 0x0000000000008000
-
-/* kr_xgxsconfig bits */
-#define INFINIPATH_XGXS_RESET 0x5ULL
-
-#define _IPATH_GPIO_SDA_NUM 1
-#define _IPATH_GPIO_SCL_NUM 0
-
-#define IPATH_GPIO_SDA (1ULL << \
- (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-#define IPATH_GPIO_SCL (1ULL << \
- (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-
-#define INFINIPATH_RT_BUFSIZE_MASK 0xe0000000ULL
-#define INFINIPATH_RT_BUFSIZE_SHIFTVAL(tid) \
- ((((tid) & INFINIPATH_RT_BUFSIZE_MASK) >> 29) + 11 - 1)
-#define INFINIPATH_RT_BUFSIZE(tid) (1 << INFINIPATH_RT_BUFSIZE_SHIFTVAL(tid))
-#define INFINIPATH_RT_IS_VALID(tid) \
- (((tid) & INFINIPATH_RT_BUFSIZE_MASK) && \
- ((((tid) & INFINIPATH_RT_BUFSIZE_MASK) != INFINIPATH_RT_BUFSIZE_MASK)))
-#define INFINIPATH_RT_ADDR_MASK 0x1FFFFFFFULL /* 29 bits valid */
-#define INFINIPATH_RT_ADDR_SHIFT 10
-
-#define INFINIPATH_R_INTRAVAIL_SHIFT 16
-#define INFINIPATH_R_TAILUPD_SHIFT 31
-
-/* 6120 specific hardware errors... */
-static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs[] = {
- INFINIPATH_HWE_MSG(PCIEPOISONEDTLP, "PCIe Poisoned TLP"),
- INFINIPATH_HWE_MSG(PCIECPLTIMEOUT, "PCIe completion timeout"),
- /*
- * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
- * parity or memory parity error failures, because most likely we
- * won't be able to talk to the core of the chip. Nonetheless, we
- * might see them, if they are in parts of the PCIe core that aren't
- * essential.
- */
- INFINIPATH_HWE_MSG(PCIE1PLLFAILED, "PCIePLL1"),
- INFINIPATH_HWE_MSG(PCIE0PLLFAILED, "PCIePLL0"),
- INFINIPATH_HWE_MSG(PCIEBUSPARITYXTLH, "PCIe XTLH core parity"),
- INFINIPATH_HWE_MSG(PCIEBUSPARITYXADM, "PCIe ADM TX core parity"),
- INFINIPATH_HWE_MSG(PCIEBUSPARITYRADM, "PCIe ADM RX core parity"),
- INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR, "Rx Dsync"),
- INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"),
-};
-
-#define TXE_PIO_PARITY ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | \
- INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \
- << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)
-#define RXE_EAGER_PARITY (INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID \
- << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)
-
-static void ipath_pe_put_tid_2(struct ipath_devdata *, u64 __iomem *,
- u32, unsigned long);
-
-/*
- * On platforms using this chip, and not having ordered WC stores, we
- * can get TXE parity errors due to speculative reads to the PIO buffers,
- * and this, due to a chip bug can result in (many) false parity error
- * reports. So it's a debug print on those, and an info print on systems
- * where the speculative reads don't occur.
- */
-static void ipath_pe_txe_recover(struct ipath_devdata *dd)
-{
- if (ipath_unordered_wc())
- ipath_dbg("Recovering from TXE PIO parity error\n");
- else {
- ++ipath_stats.sps_txeparity;
- dev_info(&dd->pcidev->dev,
- "Recovering from TXE PIO parity error\n");
- }
-}
-
-
-/**
- * ipath_pe_handle_hwerrors - display hardware errors.
- * @dd: the infinipath device
- * @msg: the output buffer
- * @msgl: the size of the output buffer
- *
- * Use same msg buffer as regular errors to avoid excessive stack
- * use. Most hardware errors are catastrophic, but for right now,
- * we'll print them and continue. We reuse the same message buffer as
- * ipath_handle_errors() to avoid excessive stack usage.
- */
-static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
- size_t msgl)
-{
- ipath_err_t hwerrs;
- u32 bits, ctrl;
- int isfatal = 0;
- char bitsmsg[64];
- int log_idx;
-
- hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
- if (!hwerrs) {
- /*
- * better than printing cofusing messages
- * This seems to be related to clearing the crc error, or
- * the pll error during init.
- */
- ipath_cdbg(VERBOSE, "Called but no hardware errors set\n");
- return;
- } else if (hwerrs == ~0ULL) {
- ipath_dev_err(dd, "Read of hardware error status failed "
- "(all bits set); ignoring\n");
- return;
- }
- ipath_stats.sps_hwerrs++;
-
- /* Always clear the error status register, except MEMBISTFAIL,
- * regardless of whether we continue or stop using the chip.
- * We want that set so we know it failed, even across driver reload.
- * We'll still ignore it in the hwerrmask. We do this partly for
- * diagnostics, but also for support */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
- hwerrs&~INFINIPATH_HWE_MEMBISTFAILED);
-
- hwerrs &= dd->ipath_hwerrmask;
-
- /* We log some errors to EEPROM, check if we have any of those. */
- for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx)
- if (hwerrs & dd->ipath_eep_st_masks[log_idx].hwerrs_to_log)
- ipath_inc_eeprom_err(dd, log_idx, 1);
-
- /*
- * make sure we get this much out, unless told to be quiet,
- * or it's occurred within the last 5 seconds
- */
- if ((hwerrs & ~(dd->ipath_lasthwerror | TXE_PIO_PARITY |
- RXE_EAGER_PARITY)) ||
- (ipath_debug & __IPATH_VERBDBG))
- dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
- "(cleared)\n", (unsigned long long) hwerrs);
- dd->ipath_lasthwerror |= hwerrs;
-
- if (hwerrs & ~dd->ipath_hwe_bitsextant)
- ipath_dev_err(dd, "hwerror interrupt with unknown errors "
- "%llx set\n", (unsigned long long)
- (hwerrs & ~dd->ipath_hwe_bitsextant));
-
- ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
- if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) {
- /*
- * parity errors in send memory are recoverable,
- * just cancel the send (if indicated in * sendbuffererror),
- * count the occurrence, unfreeze (if no other handled
- * hardware error bits are set), and continue. They can
- * occur if a processor speculative read is done to the PIO
- * buffer while we are sending a packet, for example.
- */
- if (hwerrs & TXE_PIO_PARITY) {
- ipath_pe_txe_recover(dd);
- hwerrs &= ~TXE_PIO_PARITY;
- }
- if (!hwerrs) {
- static u32 freeze_cnt;
-
- freeze_cnt++;
- ipath_dbg("Clearing freezemode on ignored or recovered "
- "hardware error (%u)\n", freeze_cnt);
- ipath_clear_freeze(dd);
- }
- }
-
- *msg = '\0';
-
- if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
- strlcat(msg, "[Memory BIST test failed, InfiniPath hardware unusable]",
- msgl);
- /* ignore from now on, so disable until driver reloaded */
- *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
- dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask);
- }
-
- ipath_format_hwerrors(hwerrs,
- ipath_6120_hwerror_msgs,
- sizeof(ipath_6120_hwerror_msgs)/
- sizeof(ipath_6120_hwerror_msgs[0]),
- msg, msgl);
-
- if (hwerrs & (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK
- << INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT)) {
- bits = (u32) ((hwerrs >>
- INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) &
- INFINIPATH_HWE_PCIEMEMPARITYERR_MASK);
- snprintf(bitsmsg, sizeof bitsmsg,
- "[PCIe Mem Parity Errs %x] ", bits);
- strlcat(msg, bitsmsg, msgl);
- }
-
-#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP | \
- INFINIPATH_HWE_COREPLL_RFSLIP )
-
- if (hwerrs & _IPATH_PLL_FAIL) {
- snprintf(bitsmsg, sizeof bitsmsg,
- "[PLL failed (%llx), InfiniPath hardware unusable]",
- (unsigned long long) hwerrs & _IPATH_PLL_FAIL);
- strlcat(msg, bitsmsg, msgl);
- /* ignore from now on, so disable until driver reloaded */
- dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask);
- }
-
- if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
- /*
- * If it occurs, it is left masked since the external
- * interface is unused
- */
- dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask);
- }
-
- if (hwerrs) {
- /*
- * if any set that we aren't ignoring; only
- * make the complaint once, in case it's stuck
- * or recurring, and we get here multiple
- * times.
- */
- ipath_dev_err(dd, "%s hardware error\n", msg);
- if (dd->ipath_flags & IPATH_INITTED) {
- ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
- ipath_setup_pe_setextled(dd,
- INFINIPATH_IBCS_L_STATE_DOWN,
- INFINIPATH_IBCS_LT_STATE_DISABLED);
- ipath_dev_err(dd, "Fatal Hardware Error (freeze "
- "mode), no longer usable, SN %.16s\n",
- dd->ipath_serial);
- isfatal = 1;
- }
- *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
- /* mark as having had error */
- *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
- /*
- * mark as not usable, at a minimum until driver
- * is reloaded, probably until reboot, since no
- * other reset is possible.
- */
- dd->ipath_flags &= ~IPATH_INITTED;
- } else
- *msg = 0; /* recovered from all of them */
-
- if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg && msg) {
- /*
- * for /sys status file ; if no trailing brace is copied,
- * we'll know it was truncated.
- */
- snprintf(dd->ipath_freezemsg, dd->ipath_freezelen,
- "{%s}", msg);
- }
-}
-
-/**
- * ipath_pe_boardname - fill in the board name
- * @dd: the infinipath device
- * @name: the output buffer
- * @namelen: the size of the output buffer
- *
- * info is based on the board revision register
- */
-static int ipath_pe_boardname(struct ipath_devdata *dd, char *name,
- size_t namelen)
-{
- char *n = NULL;
- u8 boardrev = dd->ipath_boardrev;
- int ret;
-
- switch (boardrev) {
- case 0:
- n = "InfiniPath_Emulation";
- break;
- case 1:
- n = "InfiniPath_QLE7140-Bringup";
- break;
- case 2:
- n = "InfiniPath_QLE7140";
- break;
- case 3:
- n = "InfiniPath_QMI7140";
- break;
- case 4:
- n = "InfiniPath_QEM7140";
- break;
- case 5:
- n = "InfiniPath_QMH7140";
- break;
- case 6:
- n = "InfiniPath_QLE7142";
- break;
- default:
- ipath_dev_err(dd,
- "Don't yet know about board with ID %u\n",
- boardrev);
- snprintf(name, namelen, "Unknown_InfiniPath_PCIe_%u",
- boardrev);
- break;
- }
- if (n)
- snprintf(name, namelen, "%s", n);
-
- if (dd->ipath_majrev != 4 || !dd->ipath_minrev || dd->ipath_minrev>2) {
- ipath_dev_err(dd, "Unsupported InfiniPath hardware revision %u.%u!\n",
- dd->ipath_majrev, dd->ipath_minrev);
- ret = 1;
- } else {
- ret = 0;
- if (dd->ipath_minrev >= 2)
- dd->ipath_f_put_tid = ipath_pe_put_tid_2;
- }
-
- /*
- * set here, not in ipath_init_*_funcs because we have to do
- * it after we can read chip registers.
- */
- dd->ipath_ureg_align =
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign);
-
- return ret;
-}
-
-/**
- * ipath_pe_init_hwerrors - enable hardware errors
- * @dd: the infinipath device
- *
- * now that we have finished initializing everything that might reasonably
- * cause a hardware error, and cleared those errors bits as they occur,
- * we can enable hardware errors in the mask (potentially enabling
- * freeze mode), and enable hardware errors as errors (along with
- * everything else) in errormask
- */
-static void ipath_pe_init_hwerrors(struct ipath_devdata *dd)
-{
- ipath_err_t val;
- u64 extsval;
-
- extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
-
- if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST))
- ipath_dev_err(dd, "MemBIST did not complete!\n");
- if (extsval & INFINIPATH_EXTS_MEMBIST_FOUND)
- ipath_dbg("MemBIST corrected\n");
-
- val = ~0ULL; /* barring bugs, all hwerrors become interrupts, */
-
- if (!dd->ipath_boardrev) // no PLL for Emulator
- val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
-
- if (dd->ipath_minrev < 2) {
- /* workaround bug 9460 in internal interface bus parity
- * checking. Fixed (HW bug 9490) in Rev2.
- */
- val &= ~INFINIPATH_HWE_PCIEBUSPARITYRADM;
- }
- dd->ipath_hwerrmask = val;
-}
-
-/**
- * ipath_pe_bringup_serdes - bring up the serdes
- * @dd: the infinipath device
- */
-static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
-{
- u64 val, config1, prev_val;
- int ret = 0;
-
- ipath_dbg("Trying to bringup serdes\n");
-
- if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) &
- INFINIPATH_HWE_SERDESPLLFAILED) {
- ipath_dbg("At start, serdes PLL failed bit set "
- "in hwerrstatus, clearing and continuing\n");
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
- INFINIPATH_HWE_SERDESPLLFAILED);
- }
-
- dd->ibdeltainprog = 1;
- dd->ibsymsnap =
- ipath_read_creg32(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
- dd->iblnkerrsnap =
- ipath_read_creg32(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
-
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
- config1 = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig1);
-
- ipath_cdbg(VERBOSE, "SerDes status config0=%llx config1=%llx, "
- "xgxsconfig %llx\n", (unsigned long long) val,
- (unsigned long long) config1, (unsigned long long)
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
-
- /*
- * Force reset on, also set rxdetect enable. Must do before reading
- * serdesstatus at least for simulation, or some of the bits in
- * serdes status will come back as undefined and cause simulation
- * failures
- */
- val |= INFINIPATH_SERDC0_RESET_PLL | INFINIPATH_SERDC0_RXDETECT_EN
- | INFINIPATH_SERDC0_L1PWR_DN;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
- /* be sure chip saw it */
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- udelay(5); /* need pll reset set at least for a bit */
- /*
- * after PLL is reset, set the per-lane Resets and TxIdle and
- * clear the PLL reset and rxdetect (to get falling edge).
- * Leave L1PWR bits set (permanently)
- */
- val &= ~(INFINIPATH_SERDC0_RXDETECT_EN | INFINIPATH_SERDC0_RESET_PLL
- | INFINIPATH_SERDC0_L1PWR_DN);
- val |= INFINIPATH_SERDC0_RESET_MASK | INFINIPATH_SERDC0_TXIDLE;
- ipath_cdbg(VERBOSE, "Clearing pll reset and setting lane resets "
- "and txidle (%llx)\n", (unsigned long long) val);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
- /* be sure chip saw it */
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- /* need PLL reset clear for at least 11 usec before lane
- * resets cleared; give it a few more to be sure */
- udelay(15);
- val &= ~(INFINIPATH_SERDC0_RESET_MASK | INFINIPATH_SERDC0_TXIDLE);
-
- ipath_cdbg(VERBOSE, "Clearing lane resets and txidle "
- "(writing %llx)\n", (unsigned long long) val);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
- /* be sure chip saw it */
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
- prev_val = val;
- if (val & INFINIPATH_XGXS_RESET)
- val &= ~INFINIPATH_XGXS_RESET;
- if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) &
- INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) {
- /* need to compensate for Tx inversion in partner */
- val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
- INFINIPATH_XGXS_RX_POL_SHIFT);
- val |= dd->ipath_rx_pol_inv <<
- INFINIPATH_XGXS_RX_POL_SHIFT;
- }
- if (val != prev_val)
- ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
-
- /* clear current and de-emphasis bits */
- config1 &= ~0x0ffffffff00ULL;
- /* set current to 20ma */
- config1 |= 0x00000000000ULL;
- /* set de-emphasis to -5.68dB */
- config1 |= 0x0cccc000000ULL;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig1, config1);
-
- ipath_cdbg(VERBOSE, "done: SerDes status config0=%llx "
- "config1=%llx, sstatus=%llx xgxs=%llx\n",
- (unsigned long long) val, (unsigned long long) config1,
- (unsigned long long)
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus),
- (unsigned long long)
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
-
- return ret;
-}
-
-/**
- * ipath_pe_quiet_serdes - set serdes to txidle
- * @dd: the infinipath device
- * Called when driver is being unloaded
- */
-static void ipath_pe_quiet_serdes(struct ipath_devdata *dd)
-{
- u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
-
- if (dd->ibsymdelta || dd->iblnkerrdelta ||
- dd->ibdeltainprog) {
- u64 diagc;
- /* enable counter writes */
- diagc = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwdiagctrl);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl,
- diagc | INFINIPATH_DC_COUNTERWREN);
-
- if (dd->ibsymdelta || dd->ibdeltainprog) {
- val = ipath_read_creg32(dd,
- dd->ipath_cregs->cr_ibsymbolerrcnt);
- if (dd->ibdeltainprog)
- val -= val - dd->ibsymsnap;
- val -= dd->ibsymdelta;
- ipath_write_creg(dd,
- dd->ipath_cregs->cr_ibsymbolerrcnt, val);
- }
- if (dd->iblnkerrdelta || dd->ibdeltainprog) {
- val = ipath_read_creg32(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt);
- if (dd->ibdeltainprog)
- val -= val - dd->iblnkerrsnap;
- val -= dd->iblnkerrdelta;
- ipath_write_creg(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt, val);
- }
-
- /* and disable counter writes */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, diagc);
- }
- val |= INFINIPATH_SERDC0_TXIDLE;
- ipath_dbg("Setting TxIdleEn on serdes (config0 = %llx)\n",
- (unsigned long long) val);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
-}
-
-static int ipath_pe_intconfig(struct ipath_devdata *dd)
-{
- u32 chiprev;
-
- /*
- * If the chip supports added error indication via GPIO pins,
- * enable interrupts on those bits so the interrupt routine
- * can count the events. Also set flag so interrupt routine
- * can know they are expected.
- */
- chiprev = dd->ipath_revision >> INFINIPATH_R_CHIPREVMINOR_SHIFT;
- if ((chiprev & INFINIPATH_R_CHIPREVMINOR_MASK) > 1) {
- /* Rev2+ reports extra errors via internal GPIO pins */
- dd->ipath_flags |= IPATH_GPIO_ERRINTRS;
- dd->ipath_gpio_mask |= IPATH_GPIO_ERRINTR_MASK;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
- dd->ipath_gpio_mask);
- }
- return 0;
-}
-
-/**
- * ipath_setup_pe_setextled - set the state of the two external LEDs
- * @dd: the infinipath device
- * @lst: the L state
- * @ltst: the LT state
-
- * These LEDs indicate the physical and logical state of IB link.
- * For this chip (at least with recommended board pinouts), LED1
- * is Yellow (logical state) and LED2 is Green (physical state),
- *
- * Note: We try to match the Mellanox HCA LED behavior as best
- * we can. Green indicates physical link state is OK (something is
- * plugged in, and we can train).
- * Amber indicates the link is logically up (ACTIVE).
- * Mellanox further blinks the amber LED to indicate data packet
- * activity, but we have no hardware support for that, so it would
- * require waking up every 10-20 msecs and checking the counters
- * on the chip, and then turning the LED off if appropriate. That's
- * visible overhead, so not something we will do.
- *
- */
-static void ipath_setup_pe_setextled(struct ipath_devdata *dd, u64 lst,
- u64 ltst)
-{
- u64 extctl;
- unsigned long flags = 0;
-
- /* the diags use the LED to indicate diag info, so we leave
- * the external LED alone when the diags are running */
- if (ipath_diag_inuse)
- return;
-
- /* Allow override of LED display for, e.g. Locating system in rack */
- if (dd->ipath_led_override) {
- ltst = (dd->ipath_led_override & IPATH_LED_PHYS)
- ? INFINIPATH_IBCS_LT_STATE_LINKUP
- : INFINIPATH_IBCS_LT_STATE_DISABLED;
- lst = (dd->ipath_led_override & IPATH_LED_LOG)
- ? INFINIPATH_IBCS_L_STATE_ACTIVE
- : INFINIPATH_IBCS_L_STATE_DOWN;
- }
-
- spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
- extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
- INFINIPATH_EXTC_LED2PRIPORT_ON);
-
- if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP)
- extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
- if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
- extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
- dd->ipath_extctrl = extctl;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
- spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
-}
-
-/**
- * ipath_setup_pe_cleanup - clean up any per-chip chip-specific stuff
- * @dd: the infinipath device
- *
- * This is called during driver unload.
- * We do the pci_disable_msi here, not in generic code, because it
- * isn't used for the HT chips. If we do end up needing pci_enable_msi
- * at some point in the future for HT, we'll move the call back
- * into the main init_one code.
- */
-static void ipath_setup_pe_cleanup(struct ipath_devdata *dd)
-{
- dd->ipath_msi_lo = 0; /* just in case unload fails */
- pci_disable_msi(dd->pcidev);
-}
-
-static void ipath_6120_pcie_params(struct ipath_devdata *dd)
-{
- u16 linkstat, speed;
- int pos;
-
- pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
- if (!pos) {
- ipath_dev_err(dd, "Can't find PCI Express capability!\n");
- goto bail;
- }
-
- pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA,
- &linkstat);
- /*
- * speed is bits 0-4, linkwidth is bits 4-8
- * no defines for them in headers
- */
- speed = linkstat & 0xf;
- linkstat >>= 4;
- linkstat &= 0x1f;
- dd->ipath_lbus_width = linkstat;
-
- switch (speed) {
- case 1:
- dd->ipath_lbus_speed = 2500; /* Gen1, 2.5GHz */
- break;
- case 2:
- dd->ipath_lbus_speed = 5000; /* Gen1, 5GHz */
- break;
- default: /* not defined, assume gen1 */
- dd->ipath_lbus_speed = 2500;
- break;
- }
-
- if (linkstat < 8)
- ipath_dev_err(dd,
- "PCIe width %u (x8 HCA), performance reduced\n",
- linkstat);
- else
- ipath_cdbg(VERBOSE, "PCIe speed %u width %u (x8 HCA)\n",
- dd->ipath_lbus_speed, linkstat);
-
- if (speed != 1)
- ipath_dev_err(dd,
- "PCIe linkspeed %u is incorrect; "
- "should be 1 (2500)!\n", speed);
-bail:
- /* fill in string, even on errors */
- snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info),
- "PCIe,%uMHz,x%u\n",
- dd->ipath_lbus_speed,
- dd->ipath_lbus_width);
-
- return;
-}
-
-/**
- * ipath_setup_pe_config - setup PCIe config related stuff
- * @dd: the infinipath device
- * @pdev: the PCI device
- *
- * The pci_enable_msi() call will fail on systems with MSI quirks
- * such as those with AMD8131, even if the device of interest is not
- * attached to that device, (in the 2.6.13 - 2.6.15 kernels, at least, fixed
- * late in 2.6.16).
- * All that can be done is to edit the kernel source to remove the quirk
- * check until that is fixed.
- * We do not need to call enable_msi() for our HyperTransport chip,
- * even though it uses MSI, and we want to avoid the quirk warning, so
- * So we call enable_msi only for PCIe. If we do end up needing
- * pci_enable_msi at some point in the future for HT, we'll move the
- * call back into the main init_one code.
- * We save the msi lo and hi values, so we can restore them after
- * chip reset (the kernel PCI infrastructure doesn't yet handle that
- * correctly).
- */
-static int ipath_setup_pe_config(struct ipath_devdata *dd,
- struct pci_dev *pdev)
-{
- int pos, ret;
-
- dd->ipath_msi_lo = 0; /* used as a flag during reset processing */
- ret = pci_enable_msi(dd->pcidev);
- if (ret)
- ipath_dev_err(dd, "pci_enable_msi failed: %d, "
- "interrupts may not work\n", ret);
- /* continue even if it fails, we may still be OK... */
- dd->ipath_irq = pdev->irq;
-
- if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) {
- u16 control;
- pci_read_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
- &dd->ipath_msi_lo);
- pci_read_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
- &dd->ipath_msi_hi);
- pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
- &control);
- /* now save the data (vector) info */
- pci_read_config_word(dd->pcidev,
- pos + ((control & PCI_MSI_FLAGS_64BIT)
- ? 12 : 8),
- &dd->ipath_msi_data);
- ipath_cdbg(VERBOSE, "Read msi data 0x%x from config offset "
- "0x%x, control=0x%x\n", dd->ipath_msi_data,
- pos + ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
- control);
- /* we save the cachelinesize also, although it doesn't
- * really matter */
- pci_read_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE,
- &dd->ipath_pci_cacheline);
- } else
- ipath_dev_err(dd, "Can't find MSI capability, "
- "can't save MSI settings for reset\n");
-
- ipath_6120_pcie_params(dd);
-
- dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
- dd->ipath_link_speed_supported = IPATH_IB_SDR;
- dd->ipath_link_width_enabled = IB_WIDTH_4X;
- dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported;
- /* these can't change for this chip, so set once */
- dd->ipath_link_width_active = dd->ipath_link_width_enabled;
- dd->ipath_link_speed_active = dd->ipath_link_speed_enabled;
- return 0;
-}
-
-static void ipath_init_pe_variables(struct ipath_devdata *dd)
-{
- /*
- * setup the register offsets, since they are different for each
- * chip
- */
- dd->ipath_kregs = &ipath_pe_kregs;
- dd->ipath_cregs = &ipath_pe_cregs;
-
- /*
- * bits for selecting i2c direction and values,
- * used for I2C serial flash
- */
- dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
- dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
- dd->ipath_gpio_sda = IPATH_GPIO_SDA;
- dd->ipath_gpio_scl = IPATH_GPIO_SCL;
-
- /*
- * Fill in data for field-values that change in newer chips.
- * We dynamically specify only the mask for LINKTRAININGSTATE
- * and only the shift for LINKSTATE, as they are the only ones
- * that change. Also precalculate the 3 link states of interest
- * and the combined mask.
- */
- dd->ibcs_ls_shift = IBA6120_IBCS_LINKSTATE_SHIFT;
- dd->ibcs_lts_mask = IBA6120_IBCS_LINKTRAININGSTATE_MASK;
- dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK <<
- dd->ibcs_ls_shift) | dd->ibcs_lts_mask;
- dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
- INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
- (INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift);
- dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
- INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
- (INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift);
- dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
- INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
- (INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift);
-
- /*
- * Fill in data for ibcc field-values that change in newer chips.
- * We dynamically specify only the mask for LINKINITCMD
- * and only the shift for LINKCMD and MAXPKTLEN, as they are
- * the only ones that change.
- */
- dd->ibcc_lic_mask = INFINIPATH_IBCC_LINKINITCMD_MASK;
- dd->ibcc_lc_shift = INFINIPATH_IBCC_LINKCMD_SHIFT;
- dd->ibcc_mpl_shift = INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
-
- /* Fill in shifts for RcvCtrl. */
- dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT;
- dd->ipath_r_intravail_shift = INFINIPATH_R_INTRAVAIL_SHIFT;
- dd->ipath_r_tailupd_shift = INFINIPATH_R_TAILUPD_SHIFT;
- dd->ipath_r_portcfg_shift = 0; /* Not on IBA6120 */
-
- /* variables for sanity checking interrupt and errors */
- dd->ipath_hwe_bitsextant =
- (INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) |
- (INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) |
- (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) |
- INFINIPATH_HWE_PCIE1PLLFAILED |
- INFINIPATH_HWE_PCIE0PLLFAILED |
- INFINIPATH_HWE_PCIEPOISONEDTLP |
- INFINIPATH_HWE_PCIECPLTIMEOUT |
- INFINIPATH_HWE_PCIEBUSPARITYXTLH |
- INFINIPATH_HWE_PCIEBUSPARITYXADM |
- INFINIPATH_HWE_PCIEBUSPARITYRADM |
- INFINIPATH_HWE_MEMBISTFAILED |
- INFINIPATH_HWE_COREPLL_FBSLIP |
- INFINIPATH_HWE_COREPLL_RFSLIP |
- INFINIPATH_HWE_SERDESPLLFAILED |
- INFINIPATH_HWE_IBCBUSTOSPCPARITYERR |
- INFINIPATH_HWE_IBCBUSFRSPCPARITYERR;
- dd->ipath_i_bitsextant =
- (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
- (INFINIPATH_I_RCVAVAIL_MASK <<
- INFINIPATH_I_RCVAVAIL_SHIFT) |
- INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT |
- INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO;
- dd->ipath_e_bitsextant =
- INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC |
- INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN |
- INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN |
- INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR |
- INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP |
- INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION |
- INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
- INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN |
- INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK |
- INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SMAXPKTLEN |
- INFINIPATH_E_SUNDERRUN | INFINIPATH_E_SPKTLEN |
- INFINIPATH_E_SDROPPEDSMPPKT | INFINIPATH_E_SDROPPEDDATAPKT |
- INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM |
- INFINIPATH_E_SUNSUPVL | INFINIPATH_E_IBSTATUSCHANGED |
- INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET |
- INFINIPATH_E_HARDWARE;
-
- dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
- dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
- dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT;
- dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT;
-
- /*
- * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
- * 2 is Some Misc, 3 is reserved for future.
- */
- dd->ipath_eep_st_masks[0].hwerrs_to_log =
- INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT;
-
- /* Ignore errors in PIO/PBC on systems with unordered write-combining */
- if (ipath_unordered_wc())
- dd->ipath_eep_st_masks[0].hwerrs_to_log &= ~TXE_PIO_PARITY;
-
- dd->ipath_eep_st_masks[1].hwerrs_to_log =
- INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
-
- dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET;
- dd->delay_mult = 2; /* SDR, 4X, can't change */
-}
-
-/* setup the MSI stuff again after a reset. I'd like to just call
- * pci_enable_msi() and request_irq() again, but when I do that,
- * the MSI enable bit doesn't get set in the command word, and
- * we switch to to a different interrupt vector, which is confusing,
- * so I instead just do it all inline. Perhaps somehow can tie this
- * into the PCIe hotplug support at some point
- * Note, because I'm doing it all here, I don't call pci_disable_msi()
- * or free_irq() at the start of ipath_setup_pe_reset().
- */
-static int ipath_reinit_msi(struct ipath_devdata *dd)
-{
- int pos;
- u16 control;
- int ret;
-
- if (!dd->ipath_msi_lo) {
- dev_info(&dd->pcidev->dev, "Can't restore MSI config, "
- "initial setup failed?\n");
- ret = 0;
- goto bail;
- }
-
- if (!(pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) {
- ipath_dev_err(dd, "Can't find MSI capability, "
- "can't restore MSI settings\n");
- ret = 0;
- goto bail;
- }
- ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
- dd->ipath_msi_lo, pos + PCI_MSI_ADDRESS_LO);
- pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
- dd->ipath_msi_lo);
- ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
- dd->ipath_msi_hi, pos + PCI_MSI_ADDRESS_HI);
- pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
- dd->ipath_msi_hi);
- pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
- if (!(control & PCI_MSI_FLAGS_ENABLE)) {
- ipath_cdbg(VERBOSE, "MSI control at off %x was %x, "
- "setting MSI enable (%x)\n", pos + PCI_MSI_FLAGS,
- control, control | PCI_MSI_FLAGS_ENABLE);
- control |= PCI_MSI_FLAGS_ENABLE;
- pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
- control);
- }
- /* now rewrite the data (vector) info */
- pci_write_config_word(dd->pcidev, pos +
- ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
- dd->ipath_msi_data);
- /* we restore the cachelinesize also, although it doesn't really
- * matter */
- pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE,
- dd->ipath_pci_cacheline);
- /* and now set the pci master bit again */
- pci_set_master(dd->pcidev);
- ret = 1;
-
-bail:
- return ret;
-}
-
-/* This routine sleeps, so it can only be called from user context, not
- * from interrupt context. If we need interrupt context, we can split
- * it into two routines.
-*/
-static int ipath_setup_pe_reset(struct ipath_devdata *dd)
-{
- u64 val;
- int i;
- int ret;
- u16 cmdval;
-
- pci_read_config_word(dd->pcidev, PCI_COMMAND, &cmdval);
-
- /* Use ERROR so it shows up in logs, etc. */
- ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit);
- /* keep chip from being accessed in a few places */
- dd->ipath_flags &= ~(IPATH_INITTED|IPATH_PRESENT);
- val = dd->ipath_control | INFINIPATH_C_RESET;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val);
- mb();
-
- for (i = 1; i <= 5; i++) {
- int r;
- /* allow MBIST, etc. to complete; longer on each retry.
- * We sometimes get machine checks from bus timeout if no
- * response, so for now, make it *really* long.
- */
- msleep(1000 + (1 + i) * 2000);
- if ((r =
- pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
- dd->ipath_pcibar0)))
- ipath_dev_err(dd, "rewrite of BAR0 failed: %d\n",
- r);
- if ((r =
- pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
- dd->ipath_pcibar1)))
- ipath_dev_err(dd, "rewrite of BAR1 failed: %d\n",
- r);
- /* now re-enable memory access */
- pci_write_config_word(dd->pcidev, PCI_COMMAND, cmdval);
- if ((r = pci_enable_device(dd->pcidev)))
- ipath_dev_err(dd, "pci_enable_device failed after "
- "reset: %d\n", r);
- /*
- * whether it fully enabled or not, mark as present,
- * again (but not INITTED)
- */
- dd->ipath_flags |= IPATH_PRESENT;
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
- if (val == dd->ipath_revision) {
- ipath_cdbg(VERBOSE, "Got matching revision "
- "register %llx on try %d\n",
- (unsigned long long) val, i);
- ret = ipath_reinit_msi(dd);
- goto bail;
- }
- /* Probably getting -1 back */
- ipath_dbg("Didn't get expected revision register, "
- "got %llx, try %d\n", (unsigned long long) val,
- i + 1);
- }
- ret = 0; /* failed */
-
-bail:
- if (ret)
- ipath_6120_pcie_params(dd);
- return ret;
-}
-
-/**
- * ipath_pe_put_tid - write a TID in chip
- * @dd: the infinipath device
- * @tidptr: pointer to the expected TID (in chip) to udpate
- * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
- * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
- *
- * This exists as a separate routine to allow for special locking etc.
- * It's used for both the full cleanup on exit, as well as the normal
- * setup and teardown.
- */
-static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
- u32 type, unsigned long pa)
-{
- u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
- unsigned long flags = 0; /* keep gcc quiet */
- int tidx;
- spinlock_t *tidlockp;
-
- if (!dd->ipath_kregbase)
- return;
-
- if (pa != dd->ipath_tidinvalid) {
- if (pa & ((1U << 11) - 1)) {
- dev_info(&dd->pcidev->dev, "BUG: physaddr %lx "
- "not 2KB aligned!\n", pa);
- return;
- }
- pa >>= 11;
- /* paranoia check */
- if (pa & ~INFINIPATH_RT_ADDR_MASK)
- ipath_dev_err(dd,
- "BUG: Physical page address 0x%lx "
- "has bits set in 31-29\n", pa);
-
- if (type == RCVHQ_RCV_TYPE_EAGER)
- pa |= dd->ipath_tidtemplate;
- else /* for now, always full 4KB page */
- pa |= 2 << 29;
- }
-
- /*
- * Workaround chip bug 9437 by writing the scratch register
- * before and after the TID, and with an io write barrier.
- * We use a spinlock around the writes, so they can't intermix
- * with other TID (eager or expected) writes (the chip bug
- * is triggered by back to back TID writes). Unfortunately, this
- * call can be done from interrupt level for the port 0 eager TIDs,
- * so we have to use irqsave locks.
- */
- /*
- * Assumes tidptr always > ipath_egrtidbase
- * if type == RCVHQ_RCV_TYPE_EAGER.
- */
- tidx = tidptr - dd->ipath_egrtidbase;
-
- tidlockp = (type == RCVHQ_RCV_TYPE_EAGER && tidx < dd->ipath_rcvegrcnt)
- ? &dd->ipath_kernel_tid_lock : &dd->ipath_user_tid_lock;
- spin_lock_irqsave(tidlockp, flags);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeeddeaf);
- writel(pa, tidp32);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xdeadbeef);
- mmiowb();
- spin_unlock_irqrestore(tidlockp, flags);
-}
-
-/**
- * ipath_pe_put_tid_2 - write a TID in chip, Revision 2 or higher
- * @dd: the infinipath device
- * @tidptr: pointer to the expected TID (in chip) to udpate
- * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
- * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
- *
- * This exists as a separate routine to allow for selection of the
- * appropriate "flavor". The static calls in cleanup just use the
- * revision-agnostic form, as they are not performance critical.
- */
-static void ipath_pe_put_tid_2(struct ipath_devdata *dd, u64 __iomem *tidptr,
- u32 type, unsigned long pa)
-{
- u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
- u32 tidx;
-
- if (!dd->ipath_kregbase)
- return;
-
- if (pa != dd->ipath_tidinvalid) {
- if (pa & ((1U << 11) - 1)) {
- dev_info(&dd->pcidev->dev, "BUG: physaddr %lx "
- "not 2KB aligned!\n", pa);
- return;
- }
- pa >>= 11;
- /* paranoia check */
- if (pa & ~INFINIPATH_RT_ADDR_MASK)
- ipath_dev_err(dd,
- "BUG: Physical page address 0x%lx "
- "has bits set in 31-29\n", pa);
-
- if (type == RCVHQ_RCV_TYPE_EAGER)
- pa |= dd->ipath_tidtemplate;
- else /* for now, always full 4KB page */
- pa |= 2 << 29;
- }
- tidx = tidptr - dd->ipath_egrtidbase;
- writel(pa, tidp32);
- mmiowb();
-}
-
-
-/**
- * ipath_pe_clear_tid - clear all TID entries for a port, expected and eager
- * @dd: the infinipath device
- * @port: the port
- *
- * clear all TID entries for a port, expected and eager.
- * Used from ipath_close(). On this chip, TIDs are only 32 bits,
- * not 64, but they are still on 64 bit boundaries, so tidbase
- * is declared as u64 * for the pointer math, even though we write 32 bits
- */
-static void ipath_pe_clear_tids(struct ipath_devdata *dd, unsigned port)
-{
- u64 __iomem *tidbase;
- unsigned long tidinv;
- int i;
-
- if (!dd->ipath_kregbase)
- return;
-
- ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port);
-
- tidinv = dd->ipath_tidinvalid;
- tidbase = (u64 __iomem *)
- ((char __iomem *)(dd->ipath_kregbase) +
- dd->ipath_rcvtidbase +
- port * dd->ipath_rcvtidcnt * sizeof(*tidbase));
-
- for (i = 0; i < dd->ipath_rcvtidcnt; i++)
- dd->ipath_f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
- tidinv);
-
- tidbase = (u64 __iomem *)
- ((char __iomem *)(dd->ipath_kregbase) +
- dd->ipath_rcvegrbase +
- port * dd->ipath_rcvegrcnt * sizeof(*tidbase));
-
- for (i = 0; i < dd->ipath_rcvegrcnt; i++)
- dd->ipath_f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
- tidinv);
-}
-
-/**
- * ipath_pe_tidtemplate - setup constants for TID updates
- * @dd: the infinipath device
- *
- * We setup stuff that we use a lot, to avoid calculating each time
- */
-static void ipath_pe_tidtemplate(struct ipath_devdata *dd)
-{
- u32 egrsize = dd->ipath_rcvegrbufsize;
-
- /* For now, we always allocate 4KB buffers (at init) so we can
- * receive max size packets. We may want a module parameter to
- * specify 2KB or 4KB and/or make be per port instead of per device
- * for those who want to reduce memory footprint. Note that the
- * ipath_rcvhdrentsize size must be large enough to hold the largest
- * IB header (currently 96 bytes) that we expect to handle (plus of
- * course the 2 dwords of RHF).
- */
- if (egrsize == 2048)
- dd->ipath_tidtemplate = 1U << 29;
- else if (egrsize == 4096)
- dd->ipath_tidtemplate = 2U << 29;
- else {
- egrsize = 4096;
- dev_info(&dd->pcidev->dev, "BUG: unsupported egrbufsize "
- "%u, using %u\n", dd->ipath_rcvegrbufsize,
- egrsize);
- dd->ipath_tidtemplate = 2U << 29;
- }
- dd->ipath_tidinvalid = 0;
-}
-
-static int ipath_pe_early_init(struct ipath_devdata *dd)
-{
- dd->ipath_flags |= IPATH_4BYTE_TID;
- if (ipath_unordered_wc())
- dd->ipath_flags |= IPATH_PIO_FLUSH_WC;
-
- /*
- * For openfabrics, we need to be able to handle an IB header of
- * 24 dwords. HT chip has arbitrary sized receive buffers, so we
- * made them the same size as the PIO buffers. This chip does not
- * handle arbitrary size buffers, so we need the header large enough
- * to handle largest IB header, but still have room for a 2KB MTU
- * standard IB packet.
- */
- dd->ipath_rcvhdrentsize = 24;
- dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
- dd->ipath_rhf_offset = 0;
- dd->ipath_egrtidbase = (u64 __iomem *)
- ((char __iomem *) dd->ipath_kregbase + dd->ipath_rcvegrbase);
-
- dd->ipath_rcvegrbufsize = ipath_mtu4096 ? 4096 : 2048;
- /*
- * the min() check here is currently a nop, but it may not always
- * be, depending on just how we do ipath_rcvegrbufsize
- */
- dd->ipath_ibmaxlen = min(ipath_mtu4096 ? dd->ipath_piosize4k :
- dd->ipath_piosize2k,
- dd->ipath_rcvegrbufsize +
- (dd->ipath_rcvhdrentsize << 2));
- dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
-
- /*
- * We can request a receive interrupt for 1 or
- * more packets from current offset. For now, we set this
- * up for a single packet.
- */
- dd->ipath_rhdrhead_intr_off = 1ULL<<32;
-
- ipath_get_eeprom_info(dd);
-
- return 0;
-}
-
-int __attribute__((weak)) ipath_unordered_wc(void)
-{
- return 0;
-}
-
-/**
- * ipath_init_pe_get_base_info - set chip-specific flags for user code
- * @pd: the infinipath port
- * @kbase: ipath_base_info pointer
- *
- * We set the PCIE flag because the lower bandwidth on PCIe vs
- * HyperTransport can affect some user packet algorithms.
- */
-static int ipath_pe_get_base_info(struct ipath_portdata *pd, void *kbase)
-{
- struct ipath_base_info *kinfo = kbase;
- struct ipath_devdata *dd;
-
- if (ipath_unordered_wc()) {
- kinfo->spi_runtime_flags |= IPATH_RUNTIME_FORCE_WC_ORDER;
- ipath_cdbg(PROC, "Intel processor, forcing WC order\n");
- }
- else
- ipath_cdbg(PROC, "Not Intel processor, WC ordered\n");
-
- if (pd == NULL)
- goto done;
-
- dd = pd->port_dd;
-
-done:
- kinfo->spi_runtime_flags |= IPATH_RUNTIME_PCIE |
- IPATH_RUNTIME_FORCE_PIOAVAIL | IPATH_RUNTIME_PIO_REGSWAPPED;
- return 0;
-}
-
-static void ipath_pe_free_irq(struct ipath_devdata *dd)
-{
- free_irq(dd->ipath_irq, dd);
- dd->ipath_irq = 0;
-}
-
-
-static struct ipath_message_header *
-ipath_pe_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr)
-{
- return (struct ipath_message_header *)
- &rhf_addr[sizeof(u64) / sizeof(u32)];
-}
-
-static void ipath_pe_config_ports(struct ipath_devdata *dd, ushort cfgports)
-{
- dd->ipath_portcnt =
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
- dd->ipath_p0_rcvegrcnt =
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
-}
-
-static void ipath_pe_read_counters(struct ipath_devdata *dd,
- struct infinipath_counters *cntrs)
-{
- cntrs->LBIntCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBIntCnt));
- cntrs->LBFlowStallCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBFlowStallCnt));
- cntrs->TxSDmaDescCnt = 0;
- cntrs->TxUnsupVLErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnsupVLErrCnt));
- cntrs->TxDataPktCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDataPktCnt));
- cntrs->TxFlowPktCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowPktCnt));
- cntrs->TxDwordCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDwordCnt));
- cntrs->TxLenErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxLenErrCnt));
- cntrs->TxMaxMinLenErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxMaxMinLenErrCnt));
- cntrs->TxUnderrunCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnderrunCnt));
- cntrs->TxFlowStallCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowStallCnt));
- cntrs->TxDroppedPktCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDroppedPktCnt));
- cntrs->RxDroppedPktCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDroppedPktCnt));
- cntrs->RxDataPktCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDataPktCnt));
- cntrs->RxFlowPktCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowPktCnt));
- cntrs->RxDwordCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDwordCnt));
- cntrs->RxLenErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLenErrCnt));
- cntrs->RxMaxMinLenErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxMaxMinLenErrCnt));
- cntrs->RxICRCErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxICRCErrCnt));
- cntrs->RxVCRCErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxVCRCErrCnt));
- cntrs->RxFlowCtrlErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowCtrlErrCnt));
- cntrs->RxBadFormatCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBadFormatCnt));
- cntrs->RxLinkProblemCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLinkProblemCnt));
- cntrs->RxEBPCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxEBPCnt));
- cntrs->RxLPCRCErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLPCRCErrCnt));
- cntrs->RxBufOvflCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBufOvflCnt));
- cntrs->RxTIDFullErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDFullErrCnt));
- cntrs->RxTIDValidErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDValidErrCnt));
- cntrs->RxPKeyMismatchCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxPKeyMismatchCnt));
- cntrs->RxP0HdrEgrOvflCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt));
- cntrs->RxP1HdrEgrOvflCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP1HdrEgrOvflCnt));
- cntrs->RxP2HdrEgrOvflCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP2HdrEgrOvflCnt));
- cntrs->RxP3HdrEgrOvflCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP3HdrEgrOvflCnt));
- cntrs->RxP4HdrEgrOvflCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP4HdrEgrOvflCnt));
- cntrs->RxP5HdrEgrOvflCnt = 0;
- cntrs->RxP6HdrEgrOvflCnt = 0;
- cntrs->RxP7HdrEgrOvflCnt = 0;
- cntrs->RxP8HdrEgrOvflCnt = 0;
- cntrs->RxP9HdrEgrOvflCnt = 0;
- cntrs->RxP10HdrEgrOvflCnt = 0;
- cntrs->RxP11HdrEgrOvflCnt = 0;
- cntrs->RxP12HdrEgrOvflCnt = 0;
- cntrs->RxP13HdrEgrOvflCnt = 0;
- cntrs->RxP14HdrEgrOvflCnt = 0;
- cntrs->RxP15HdrEgrOvflCnt = 0;
- cntrs->RxP16HdrEgrOvflCnt = 0;
- cntrs->IBStatusChangeCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBStatusChangeCnt));
- cntrs->IBLinkErrRecoveryCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt));
- cntrs->IBLinkDownedCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkDownedCnt));
- cntrs->IBSymbolErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBSymbolErrCnt));
- cntrs->RxVL15DroppedPktCnt = 0;
- cntrs->RxOtherLocalPhyErrCnt = 0;
- cntrs->PcieRetryBufDiagQwordCnt = 0;
- cntrs->ExcessBufferOvflCnt = dd->ipath_overrun_thresh_errs;
- cntrs->LocalLinkIntegrityErrCnt = dd->ipath_lli_errs;
- cntrs->RxVlErrCnt = 0;
- cntrs->RxDlidFltrCnt = 0;
-}
-
-
-/* no interrupt fallback for these chips */
-static int ipath_pe_nointr_fallback(struct ipath_devdata *dd)
-{
- return 0;
-}
-
-
-/*
- * reset the XGXS (between serdes and IBC). Slightly less intrusive
- * than resetting the IBC or external link state, and useful in some
- * cases to cause some retraining. To do this right, we reset IBC
- * as well.
- */
-static void ipath_pe_xgxs_reset(struct ipath_devdata *dd)
-{
- u64 val, prev_val;
-
- prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
- val = prev_val | INFINIPATH_XGXS_RESET;
- prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
- dd->ipath_control & ~INFINIPATH_C_LINKENABLE);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
- dd->ipath_control);
-}
-
-
-static int ipath_pe_get_ib_cfg(struct ipath_devdata *dd, int which)
-{
- int ret;
-
- switch (which) {
- case IPATH_IB_CFG_LWID:
- ret = dd->ipath_link_width_active;
- break;
- case IPATH_IB_CFG_SPD:
- ret = dd->ipath_link_speed_active;
- break;
- case IPATH_IB_CFG_LWID_ENB:
- ret = dd->ipath_link_width_enabled;
- break;
- case IPATH_IB_CFG_SPD_ENB:
- ret = dd->ipath_link_speed_enabled;
- break;
- default:
- ret = -ENOTSUPP;
- break;
- }
- return ret;
-}
-
-
-/* we assume range checking is already done, if needed */
-static int ipath_pe_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val)
-{
- int ret = 0;
-
- if (which == IPATH_IB_CFG_LWID_ENB)
- dd->ipath_link_width_enabled = val;
- else if (which == IPATH_IB_CFG_SPD_ENB)
- dd->ipath_link_speed_enabled = val;
- else
- ret = -ENOTSUPP;
- return ret;
-}
-
-static void ipath_pe_config_jint(struct ipath_devdata *dd, u16 a, u16 b)
-{
-}
-
-
-static int ipath_pe_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs)
-{
- if (ibup) {
- if (dd->ibdeltainprog) {
- dd->ibdeltainprog = 0;
- dd->ibsymdelta +=
- ipath_read_creg32(dd,
- dd->ipath_cregs->cr_ibsymbolerrcnt) -
- dd->ibsymsnap;
- dd->iblnkerrdelta +=
- ipath_read_creg32(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt) -
- dd->iblnkerrsnap;
- }
- } else {
- dd->ipath_lli_counter = 0;
- if (!dd->ibdeltainprog) {
- dd->ibdeltainprog = 1;
- dd->ibsymsnap =
- ipath_read_creg32(dd,
- dd->ipath_cregs->cr_ibsymbolerrcnt);
- dd->iblnkerrsnap =
- ipath_read_creg32(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt);
- }
- }
-
- ipath_setup_pe_setextled(dd, ipath_ib_linkstate(dd, ibcs),
- ipath_ib_linktrstate(dd, ibcs));
- return 0;
-}
-
-
-/**
- * ipath_init_iba6120_funcs - set up the chip-specific function pointers
- * @dd: the infinipath device
- *
- * This is global, and is called directly at init to set up the
- * chip-specific function pointers for later use.
- */
-void ipath_init_iba6120_funcs(struct ipath_devdata *dd)
-{
- dd->ipath_f_intrsetup = ipath_pe_intconfig;
- dd->ipath_f_bus = ipath_setup_pe_config;
- dd->ipath_f_reset = ipath_setup_pe_reset;
- dd->ipath_f_get_boardname = ipath_pe_boardname;
- dd->ipath_f_init_hwerrors = ipath_pe_init_hwerrors;
- dd->ipath_f_early_init = ipath_pe_early_init;
- dd->ipath_f_handle_hwerrors = ipath_pe_handle_hwerrors;
- dd->ipath_f_quiet_serdes = ipath_pe_quiet_serdes;
- dd->ipath_f_bringup_serdes = ipath_pe_bringup_serdes;
- dd->ipath_f_clear_tids = ipath_pe_clear_tids;
- /*
- * _f_put_tid may get changed after we read the chip revision,
- * but we start with the safe version for all revs
- */
- dd->ipath_f_put_tid = ipath_pe_put_tid;
- dd->ipath_f_cleanup = ipath_setup_pe_cleanup;
- dd->ipath_f_setextled = ipath_setup_pe_setextled;
- dd->ipath_f_get_base_info = ipath_pe_get_base_info;
- dd->ipath_f_free_irq = ipath_pe_free_irq;
- dd->ipath_f_tidtemplate = ipath_pe_tidtemplate;
- dd->ipath_f_intr_fallback = ipath_pe_nointr_fallback;
- dd->ipath_f_xgxs_reset = ipath_pe_xgxs_reset;
- dd->ipath_f_get_msgheader = ipath_pe_get_msgheader;
- dd->ipath_f_config_ports = ipath_pe_config_ports;
- dd->ipath_f_read_counters = ipath_pe_read_counters;
- dd->ipath_f_get_ib_cfg = ipath_pe_get_ib_cfg;
- dd->ipath_f_set_ib_cfg = ipath_pe_set_ib_cfg;
- dd->ipath_f_config_jint = ipath_pe_config_jint;
- dd->ipath_f_ib_updown = ipath_pe_ib_updown;
-
-
- /* initialize chip-specific variables */
- ipath_init_pe_variables(dd);
-}
-
diff --git a/drivers/infiniband/hw/ipath/ipath_iba7220.c b/drivers/infiniband/hw/ipath/ipath_iba7220.c
deleted file mode 100644
index a805402dd4a..00000000000
--- a/drivers/infiniband/hw/ipath/ipath_iba7220.c
+++ /dev/null
@@ -1,2631 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-/*
- * This file contains all of the code that is specific to the
- * InfiniPath 7220 chip (except that specific to the SerDes)
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <rdma/ib_verbs.h>
-
-#include "ipath_kernel.h"
-#include "ipath_registers.h"
-#include "ipath_7220.h"
-
-static void ipath_setup_7220_setextled(struct ipath_devdata *, u64, u64);
-
-static unsigned ipath_compat_ddr_negotiate = 1;
-
-module_param_named(compat_ddr_negotiate, ipath_compat_ddr_negotiate, uint,
- S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(compat_ddr_negotiate,
- "Attempt pre-IBTA 1.2 DDR speed negotiation");
-
-static unsigned ipath_sdma_fetch_arb = 1;
-module_param_named(fetch_arb, ipath_sdma_fetch_arb, uint, S_IRUGO);
-MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration");
-
-/*
- * This file contains almost all the chip-specific register information and
- * access functions for the QLogic InfiniPath 7220 PCI-Express chip, with the
- * exception of SerDes support, which in in ipath_sd7220.c.
- *
- * This lists the InfiniPath registers, in the actual chip layout.
- * This structure should never be directly accessed.
- */
-struct _infinipath_do_not_use_kernel_regs {
- unsigned long long Revision;
- unsigned long long Control;
- unsigned long long PageAlign;
- unsigned long long PortCnt;
- unsigned long long DebugPortSelect;
- unsigned long long DebugSigsIntSel; /* was Reserved0;*/
- unsigned long long SendRegBase;
- unsigned long long UserRegBase;
- unsigned long long CounterRegBase;
- unsigned long long Scratch;
- unsigned long long EEPROMAddrCmd; /* was Reserved1; */
- unsigned long long EEPROMData; /* was Reserved2; */
- unsigned long long IntBlocked;
- unsigned long long IntMask;
- unsigned long long IntStatus;
- unsigned long long IntClear;
- unsigned long long ErrorMask;
- unsigned long long ErrorStatus;
- unsigned long long ErrorClear;
- unsigned long long HwErrMask;
- unsigned long long HwErrStatus;
- unsigned long long HwErrClear;
- unsigned long long HwDiagCtrl;
- unsigned long long MDIO;
- unsigned long long IBCStatus;
- unsigned long long IBCCtrl;
- unsigned long long ExtStatus;
- unsigned long long ExtCtrl;
- unsigned long long GPIOOut;
- unsigned long long GPIOMask;
- unsigned long long GPIOStatus;
- unsigned long long GPIOClear;
- unsigned long long RcvCtrl;
- unsigned long long RcvBTHQP;
- unsigned long long RcvHdrSize;
- unsigned long long RcvHdrCnt;
- unsigned long long RcvHdrEntSize;
- unsigned long long RcvTIDBase;
- unsigned long long RcvTIDCnt;
- unsigned long long RcvEgrBase;
- unsigned long long RcvEgrCnt;
- unsigned long long RcvBufBase;
- unsigned long long RcvBufSize;
- unsigned long long RxIntMemBase;
- unsigned long long RxIntMemSize;
- unsigned long long RcvPartitionKey;
- unsigned long long RcvQPMulticastPort;
- unsigned long long RcvPktLEDCnt;
- unsigned long long IBCDDRCtrl;
- unsigned long long HRTBT_GUID;
- unsigned long long IB_SDTEST_IF_TX;
- unsigned long long IB_SDTEST_IF_RX;
- unsigned long long IBCDDRCtrl2;
- unsigned long long IBCDDRStatus;
- unsigned long long JIntReload;
- unsigned long long IBNCModeCtrl;
- unsigned long long SendCtrl;
- unsigned long long SendBufBase;
- unsigned long long SendBufSize;
- unsigned long long SendBufCnt;
- unsigned long long SendAvailAddr;
- unsigned long long TxIntMemBase;
- unsigned long long TxIntMemSize;
- unsigned long long SendDmaBase;
- unsigned long long SendDmaLenGen;
- unsigned long long SendDmaTail;
- unsigned long long SendDmaHead;
- unsigned long long SendDmaHeadAddr;
- unsigned long long SendDmaBufMask0;
- unsigned long long SendDmaBufMask1;
- unsigned long long SendDmaBufMask2;
- unsigned long long SendDmaStatus;
- unsigned long long SendBufferError;
- unsigned long long SendBufferErrorCONT1;
- unsigned long long SendBufErr2; /* was Reserved6SBE[0/6] */
- unsigned long long Reserved6L[2];
- unsigned long long AvailUpdCount;
- unsigned long long RcvHdrAddr0;
- unsigned long long RcvHdrAddrs[16]; /* Why enumerate? */
- unsigned long long Reserved7hdtl; /* Align next to 300 */
- unsigned long long RcvHdrTailAddr0; /* 300, like others */
- unsigned long long RcvHdrTailAddrs[16];
- unsigned long long Reserved9SW[7]; /* was [8]; we have 17 ports */
- unsigned long long IbsdEpbAccCtl; /* IB Serdes EPB access control */
- unsigned long long IbsdEpbTransReg; /* IB Serdes EPB Transaction */
- unsigned long long Reserved10sds; /* was SerdesStatus on */
- unsigned long long XGXSConfig;
- unsigned long long IBSerDesCtrl; /* Was IBPLLCfg on Monty */
- unsigned long long EEPCtlStat; /* for "boot" EEPROM/FLASH */
- unsigned long long EEPAddrCmd;
- unsigned long long EEPData;
- unsigned long long PcieEpbAccCtl;
- unsigned long long PcieEpbTransCtl;
- unsigned long long EfuseCtl; /* E-Fuse control */
- unsigned long long EfuseData[4];
- unsigned long long ProcMon;
- /* this chip moves following two from previous 200, 208 */
- unsigned long long PCIeRBufTestReg0;
- unsigned long long PCIeRBufTestReg1;
- /* added for this chip */
- unsigned long long PCIeRBufTestReg2;
- unsigned long long PCIeRBufTestReg3;
- /* added for this chip, debug only */
- unsigned long long SPC_JTAG_ACCESS_REG;
- unsigned long long LAControlReg;
- unsigned long long GPIODebugSelReg;
- unsigned long long DebugPortValueReg;
- /* added for this chip, DMA */
- unsigned long long SendDmaBufUsed[3];
- unsigned long long SendDmaReqTagUsed;
- /*
- * added for this chip, EFUSE: note that these program 64-bit
- * words 2 and 3 */
- unsigned long long efuse_pgm_data[2];
- unsigned long long Reserved11LAalign[10]; /* Skip 4B0..4F8 */
- /* we have 30 regs for DDS and RXEQ in IB SERDES */
- unsigned long long SerDesDDSRXEQ[30];
- unsigned long long Reserved12LAalign[2]; /* Skip 5F0, 5F8 */
- /* added for LA debug support */
- unsigned long long LAMemory[32];
-};
-
-struct _infinipath_do_not_use_counters {
- __u64 LBIntCnt;
- __u64 LBFlowStallCnt;
- __u64 TxSDmaDescCnt; /* was Reserved1 */
- __u64 TxUnsupVLErrCnt;
- __u64 TxDataPktCnt;
- __u64 TxFlowPktCnt;
- __u64 TxDwordCnt;
- __u64 TxLenErrCnt;
- __u64 TxMaxMinLenErrCnt;
- __u64 TxUnderrunCnt;
- __u64 TxFlowStallCnt;
- __u64 TxDroppedPktCnt;
- __u64 RxDroppedPktCnt;
- __u64 RxDataPktCnt;
- __u64 RxFlowPktCnt;
- __u64 RxDwordCnt;
- __u64 RxLenErrCnt;
- __u64 RxMaxMinLenErrCnt;
- __u64 RxICRCErrCnt;
- __u64 RxVCRCErrCnt;
- __u64 RxFlowCtrlErrCnt;
- __u64 RxBadFormatCnt;
- __u64 RxLinkProblemCnt;
- __u64 RxEBPCnt;
- __u64 RxLPCRCErrCnt;
- __u64 RxBufOvflCnt;
- __u64 RxTIDFullErrCnt;
- __u64 RxTIDValidErrCnt;
- __u64 RxPKeyMismatchCnt;
- __u64 RxP0HdrEgrOvflCnt;
- __u64 RxP1HdrEgrOvflCnt;
- __u64 RxP2HdrEgrOvflCnt;
- __u64 RxP3HdrEgrOvflCnt;
- __u64 RxP4HdrEgrOvflCnt;
- __u64 RxP5HdrEgrOvflCnt;
- __u64 RxP6HdrEgrOvflCnt;
- __u64 RxP7HdrEgrOvflCnt;
- __u64 RxP8HdrEgrOvflCnt;
- __u64 RxP9HdrEgrOvflCnt; /* was Reserved6 */
- __u64 RxP10HdrEgrOvflCnt; /* was Reserved7 */
- __u64 RxP11HdrEgrOvflCnt; /* new for IBA7220 */
- __u64 RxP12HdrEgrOvflCnt; /* new for IBA7220 */
- __u64 RxP13HdrEgrOvflCnt; /* new for IBA7220 */
- __u64 RxP14HdrEgrOvflCnt; /* new for IBA7220 */
- __u64 RxP15HdrEgrOvflCnt; /* new for IBA7220 */
- __u64 RxP16HdrEgrOvflCnt; /* new for IBA7220 */
- __u64 IBStatusChangeCnt;
- __u64 IBLinkErrRecoveryCnt;
- __u64 IBLinkDownedCnt;
- __u64 IBSymbolErrCnt;
- /* The following are new for IBA7220 */
- __u64 RxVL15DroppedPktCnt;
- __u64 RxOtherLocalPhyErrCnt;
- __u64 PcieRetryBufDiagQwordCnt;
- __u64 ExcessBufferOvflCnt;
- __u64 LocalLinkIntegrityErrCnt;
- __u64 RxVlErrCnt;
- __u64 RxDlidFltrCnt;
- __u64 Reserved8[7];
- __u64 PSStat;
- __u64 PSStart;
- __u64 PSInterval;
- __u64 PSRcvDataCount;
- __u64 PSRcvPktsCount;
- __u64 PSXmitDataCount;
- __u64 PSXmitPktsCount;
- __u64 PSXmitWaitCount;
-};
-
-#define IPATH_KREG_OFFSET(field) (offsetof( \
- struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
-#define IPATH_CREG_OFFSET(field) (offsetof( \
- struct _infinipath_do_not_use_counters, field) / sizeof(u64))
-
-static const struct ipath_kregs ipath_7220_kregs = {
- .kr_control = IPATH_KREG_OFFSET(Control),
- .kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase),
- .kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect),
- .kr_errorclear = IPATH_KREG_OFFSET(ErrorClear),
- .kr_errormask = IPATH_KREG_OFFSET(ErrorMask),
- .kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus),
- .kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl),
- .kr_extstatus = IPATH_KREG_OFFSET(ExtStatus),
- .kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear),
- .kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask),
- .kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut),
- .kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus),
- .kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl),
- .kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear),
- .kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask),
- .kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus),
- .kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl),
- .kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus),
- .kr_intblocked = IPATH_KREG_OFFSET(IntBlocked),
- .kr_intclear = IPATH_KREG_OFFSET(IntClear),
- .kr_intmask = IPATH_KREG_OFFSET(IntMask),
- .kr_intstatus = IPATH_KREG_OFFSET(IntStatus),
- .kr_mdio = IPATH_KREG_OFFSET(MDIO),
- .kr_pagealign = IPATH_KREG_OFFSET(PageAlign),
- .kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey),
- .kr_portcnt = IPATH_KREG_OFFSET(PortCnt),
- .kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP),
- .kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase),
- .kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize),
- .kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl),
- .kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase),
- .kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt),
- .kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt),
- .kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize),
- .kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize),
- .kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase),
- .kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize),
- .kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase),
- .kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt),
- .kr_revision = IPATH_KREG_OFFSET(Revision),
- .kr_scratch = IPATH_KREG_OFFSET(Scratch),
- .kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError),
- .kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl),
- .kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendAvailAddr),
- .kr_sendpiobufbase = IPATH_KREG_OFFSET(SendBufBase),
- .kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendBufCnt),
- .kr_sendpiosize = IPATH_KREG_OFFSET(SendBufSize),
- .kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase),
- .kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase),
- .kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize),
- .kr_userregbase = IPATH_KREG_OFFSET(UserRegBase),
-
- .kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig),
-
- /* send dma related regs */
- .kr_senddmabase = IPATH_KREG_OFFSET(SendDmaBase),
- .kr_senddmalengen = IPATH_KREG_OFFSET(SendDmaLenGen),
- .kr_senddmatail = IPATH_KREG_OFFSET(SendDmaTail),
- .kr_senddmahead = IPATH_KREG_OFFSET(SendDmaHead),
- .kr_senddmaheadaddr = IPATH_KREG_OFFSET(SendDmaHeadAddr),
- .kr_senddmabufmask0 = IPATH_KREG_OFFSET(SendDmaBufMask0),
- .kr_senddmabufmask1 = IPATH_KREG_OFFSET(SendDmaBufMask1),
- .kr_senddmabufmask2 = IPATH_KREG_OFFSET(SendDmaBufMask2),
- .kr_senddmastatus = IPATH_KREG_OFFSET(SendDmaStatus),
-
- /* SerDes related regs */
- .kr_ibserdesctrl = IPATH_KREG_OFFSET(IBSerDesCtrl),
- .kr_ib_epbacc = IPATH_KREG_OFFSET(IbsdEpbAccCtl),
- .kr_ib_epbtrans = IPATH_KREG_OFFSET(IbsdEpbTransReg),
- .kr_pcie_epbacc = IPATH_KREG_OFFSET(PcieEpbAccCtl),
- .kr_pcie_epbtrans = IPATH_KREG_OFFSET(PcieEpbTransCtl),
- .kr_ib_ddsrxeq = IPATH_KREG_OFFSET(SerDesDDSRXEQ),
-
- /*
- * These should not be used directly via ipath_read_kreg64(),
- * use them with ipath_read_kreg64_port()
- */
- .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
- .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0),
-
- /*
- * The rcvpktled register controls one of the debug port signals, so
- * a packet activity LED can be connected to it.
- */
- .kr_rcvpktledcnt = IPATH_KREG_OFFSET(RcvPktLEDCnt),
- .kr_pcierbuftestreg0 = IPATH_KREG_OFFSET(PCIeRBufTestReg0),
- .kr_pcierbuftestreg1 = IPATH_KREG_OFFSET(PCIeRBufTestReg1),
-
- .kr_hrtbt_guid = IPATH_KREG_OFFSET(HRTBT_GUID),
- .kr_ibcddrctrl = IPATH_KREG_OFFSET(IBCDDRCtrl),
- .kr_ibcddrstatus = IPATH_KREG_OFFSET(IBCDDRStatus),
- .kr_jintreload = IPATH_KREG_OFFSET(JIntReload)
-};
-
-static const struct ipath_cregs ipath_7220_cregs = {
- .cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt),
- .cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt),
- .cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt),
- .cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt),
- .cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt),
- .cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt),
- .cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt),
- .cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt),
- .cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt),
- .cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt),
- .cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt),
- .cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt),
- .cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt),
- .cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt),
- .cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt),
- .cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt),
- .cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt),
- .cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt),
- .cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt),
- .cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt),
- .cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt),
- .cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt),
- .cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt),
- .cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt),
- .cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt),
- .cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt),
- .cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt),
- .cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt),
- .cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt),
- .cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt),
- .cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt),
- .cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt),
- .cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt),
- .cr_vl15droppedpktcnt = IPATH_CREG_OFFSET(RxVL15DroppedPktCnt),
- .cr_rxotherlocalphyerrcnt =
- IPATH_CREG_OFFSET(RxOtherLocalPhyErrCnt),
- .cr_excessbufferovflcnt = IPATH_CREG_OFFSET(ExcessBufferOvflCnt),
- .cr_locallinkintegrityerrcnt =
- IPATH_CREG_OFFSET(LocalLinkIntegrityErrCnt),
- .cr_rxvlerrcnt = IPATH_CREG_OFFSET(RxVlErrCnt),
- .cr_rxdlidfltrcnt = IPATH_CREG_OFFSET(RxDlidFltrCnt),
- .cr_psstat = IPATH_CREG_OFFSET(PSStat),
- .cr_psstart = IPATH_CREG_OFFSET(PSStart),
- .cr_psinterval = IPATH_CREG_OFFSET(PSInterval),
- .cr_psrcvdatacount = IPATH_CREG_OFFSET(PSRcvDataCount),
- .cr_psrcvpktscount = IPATH_CREG_OFFSET(PSRcvPktsCount),
- .cr_psxmitdatacount = IPATH_CREG_OFFSET(PSXmitDataCount),
- .cr_psxmitpktscount = IPATH_CREG_OFFSET(PSXmitPktsCount),
- .cr_psxmitwaitcount = IPATH_CREG_OFFSET(PSXmitWaitCount),
-};
-
-/* kr_control bits */
-#define INFINIPATH_C_RESET (1U<<7)
-
-/* kr_intstatus, kr_intclear, kr_intmask bits */
-#define INFINIPATH_I_RCVURG_MASK ((1ULL<<17)-1)
-#define INFINIPATH_I_RCVURG_SHIFT 32
-#define INFINIPATH_I_RCVAVAIL_MASK ((1ULL<<17)-1)
-#define INFINIPATH_I_RCVAVAIL_SHIFT 0
-#define INFINIPATH_I_SERDESTRIMDONE (1ULL<<27)
-
-/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
-#define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x00000000000000ffULL
-#define INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT 0
-#define INFINIPATH_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
-#define INFINIPATH_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
-#define INFINIPATH_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
-#define INFINIPATH_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
-#define INFINIPATH_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
-#define INFINIPATH_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
-#define INFINIPATH_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
-#define INFINIPATH_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
-#define INFINIPATH_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
-#define INFINIPATH_HWE_SERDESPLLFAILED 0x1000000000000000ULL
-/* specific to this chip */
-#define INFINIPATH_HWE_PCIECPLDATAQUEUEERR 0x0000000000000040ULL
-#define INFINIPATH_HWE_PCIECPLHDRQUEUEERR 0x0000000000000080ULL
-#define INFINIPATH_HWE_SDMAMEMREADERR 0x0000000010000000ULL
-#define INFINIPATH_HWE_CLK_UC_PLLNOTLOCKED 0x2000000000000000ULL
-#define INFINIPATH_HWE_PCIESERDESQ0PCLKNOTDETECT 0x0100000000000000ULL
-#define INFINIPATH_HWE_PCIESERDESQ1PCLKNOTDETECT 0x0200000000000000ULL
-#define INFINIPATH_HWE_PCIESERDESQ2PCLKNOTDETECT 0x0400000000000000ULL
-#define INFINIPATH_HWE_PCIESERDESQ3PCLKNOTDETECT 0x0800000000000000ULL
-#define INFINIPATH_HWE_DDSRXEQMEMORYPARITYERR 0x0000008000000000ULL
-#define INFINIPATH_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
-#define INFINIPATH_HWE_PCIE_UC_OCT0MEMORYPARITYERR 0x0000001000000000ULL
-#define INFINIPATH_HWE_PCIE_UC_OCT1MEMORYPARITYERR 0x0000002000000000ULL
-
-#define IBA7220_IBCS_LINKTRAININGSTATE_MASK 0x1F
-#define IBA7220_IBCS_LINKSTATE_SHIFT 5
-#define IBA7220_IBCS_LINKSPEED_SHIFT 8
-#define IBA7220_IBCS_LINKWIDTH_SHIFT 9
-
-#define IBA7220_IBCC_LINKINITCMD_MASK 0x7ULL
-#define IBA7220_IBCC_LINKCMD_SHIFT 19
-#define IBA7220_IBCC_MAXPKTLEN_SHIFT 21
-
-/* kr_ibcddrctrl bits */
-#define IBA7220_IBC_DLIDLMC_MASK 0xFFFFFFFFUL
-#define IBA7220_IBC_DLIDLMC_SHIFT 32
-#define IBA7220_IBC_HRTBT_MASK 3
-#define IBA7220_IBC_HRTBT_SHIFT 16
-#define IBA7220_IBC_HRTBT_ENB 0x10000UL
-#define IBA7220_IBC_LANE_REV_SUPPORTED (1<<8)
-#define IBA7220_IBC_LREV_MASK 1
-#define IBA7220_IBC_LREV_SHIFT 8
-#define IBA7220_IBC_RXPOL_MASK 1
-#define IBA7220_IBC_RXPOL_SHIFT 7
-#define IBA7220_IBC_WIDTH_SHIFT 5
-#define IBA7220_IBC_WIDTH_MASK 0x3
-#define IBA7220_IBC_WIDTH_1X_ONLY (0<<IBA7220_IBC_WIDTH_SHIFT)
-#define IBA7220_IBC_WIDTH_4X_ONLY (1<<IBA7220_IBC_WIDTH_SHIFT)
-#define IBA7220_IBC_WIDTH_AUTONEG (2<<IBA7220_IBC_WIDTH_SHIFT)
-#define IBA7220_IBC_SPEED_AUTONEG (1<<1)
-#define IBA7220_IBC_SPEED_SDR (1<<2)
-#define IBA7220_IBC_SPEED_DDR (1<<3)
-#define IBA7220_IBC_SPEED_AUTONEG_MASK (0x7<<1)
-#define IBA7220_IBC_IBTA_1_2_MASK (1)
-
-/* kr_ibcddrstatus */
-/* link latency shift is 0, don't bother defining */
-#define IBA7220_DDRSTAT_LINKLAT_MASK 0x3ffffff
-
-/* kr_extstatus bits */
-#define INFINIPATH_EXTS_FREQSEL 0x2
-#define INFINIPATH_EXTS_SERDESSEL 0x4
-#define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000
-#define INFINIPATH_EXTS_MEMBIST_DISABLED 0x0000000000008000
-
-/* kr_xgxsconfig bits */
-#define INFINIPATH_XGXS_RESET 0x5ULL
-#define INFINIPATH_XGXS_FC_SAFE (1ULL<<63)
-
-/* kr_rcvpktledcnt */
-#define IBA7220_LEDBLINK_ON_SHIFT 32 /* 4ns period on after packet */
-#define IBA7220_LEDBLINK_OFF_SHIFT 0 /* 4ns period off before next on */
-
-#define _IPATH_GPIO_SDA_NUM 1
-#define _IPATH_GPIO_SCL_NUM 0
-
-#define IPATH_GPIO_SDA (1ULL << \
- (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-#define IPATH_GPIO_SCL (1ULL << \
- (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-
-#define IBA7220_R_INTRAVAIL_SHIFT 17
-#define IBA7220_R_TAILUPD_SHIFT 35
-#define IBA7220_R_PORTCFG_SHIFT 36
-
-#define INFINIPATH_JINT_PACKETSHIFT 16
-#define INFINIPATH_JINT_DEFAULT_IDLE_TICKS 0
-#define INFINIPATH_JINT_DEFAULT_MAX_PACKETS 0
-
-#define IBA7220_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
-
-/*
- * the size bits give us 2^N, in KB units. 0 marks as invalid,
- * and 7 is reserved. We currently use only 2KB and 4KB
- */
-#define IBA7220_TID_SZ_SHIFT 37 /* shift to 3bit size selector */
-#define IBA7220_TID_SZ_2K (1UL<<IBA7220_TID_SZ_SHIFT) /* 2KB */
-#define IBA7220_TID_SZ_4K (2UL<<IBA7220_TID_SZ_SHIFT) /* 4KB */
-#define IBA7220_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
-
-#define IPATH_AUTONEG_TRIES 5 /* sequential retries to negotiate DDR */
-
-static char int_type[16] = "auto";
-module_param_string(interrupt_type, int_type, sizeof(int_type), 0444);
-MODULE_PARM_DESC(int_type, " interrupt_type=auto|force_msi|force_intx");
-
-/* packet rate matching delay; chip has support */
-static u8 rate_to_delay[2][2] = {
- /* 1x, 4x */
- { 8, 2 }, /* SDR */
- { 4, 1 } /* DDR */
-};
-
-/* 7220 specific hardware errors... */
-static const struct ipath_hwerror_msgs ipath_7220_hwerror_msgs[] = {
- INFINIPATH_HWE_MSG(PCIEPOISONEDTLP, "PCIe Poisoned TLP"),
- INFINIPATH_HWE_MSG(PCIECPLTIMEOUT, "PCIe completion timeout"),
- /*
- * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
- * parity or memory parity error failures, because most likely we
- * won't be able to talk to the core of the chip. Nonetheless, we
- * might see them, if they are in parts of the PCIe core that aren't
- * essential.
- */
- INFINIPATH_HWE_MSG(PCIE1PLLFAILED, "PCIePLL1"),
- INFINIPATH_HWE_MSG(PCIE0PLLFAILED, "PCIePLL0"),
- INFINIPATH_HWE_MSG(PCIEBUSPARITYXTLH, "PCIe XTLH core parity"),
- INFINIPATH_HWE_MSG(PCIEBUSPARITYXADM, "PCIe ADM TX core parity"),
- INFINIPATH_HWE_MSG(PCIEBUSPARITYRADM, "PCIe ADM RX core parity"),
- INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR, "Rx Dsync"),
- INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"),
- INFINIPATH_HWE_MSG(PCIECPLDATAQUEUEERR, "PCIe cpl header queue"),
- INFINIPATH_HWE_MSG(PCIECPLHDRQUEUEERR, "PCIe cpl data queue"),
- INFINIPATH_HWE_MSG(SDMAMEMREADERR, "Send DMA memory read"),
- INFINIPATH_HWE_MSG(CLK_UC_PLLNOTLOCKED, "uC PLL clock not locked"),
- INFINIPATH_HWE_MSG(PCIESERDESQ0PCLKNOTDETECT,
- "PCIe serdes Q0 no clock"),
- INFINIPATH_HWE_MSG(PCIESERDESQ1PCLKNOTDETECT,
- "PCIe serdes Q1 no clock"),
- INFINIPATH_HWE_MSG(PCIESERDESQ2PCLKNOTDETECT,
- "PCIe serdes Q2 no clock"),
- INFINIPATH_HWE_MSG(PCIESERDESQ3PCLKNOTDETECT,
- "PCIe serdes Q3 no clock"),
- INFINIPATH_HWE_MSG(DDSRXEQMEMORYPARITYERR,
- "DDS RXEQ memory parity"),
- INFINIPATH_HWE_MSG(IB_UC_MEMORYPARITYERR, "IB uC memory parity"),
- INFINIPATH_HWE_MSG(PCIE_UC_OCT0MEMORYPARITYERR,
- "PCIe uC oct0 memory parity"),
- INFINIPATH_HWE_MSG(PCIE_UC_OCT1MEMORYPARITYERR,
- "PCIe uC oct1 memory parity"),
-};
-
-static void autoneg_work(struct work_struct *);
-
-/*
- * the offset is different for different configured port numbers, since
- * port0 is fixed in size, but others can vary. Make it a function to
- * make the issue more obvious.
-*/
-static inline u32 port_egrtid_idx(struct ipath_devdata *dd, unsigned port)
-{
- return port ? dd->ipath_p0_rcvegrcnt +
- (port-1) * dd->ipath_rcvegrcnt : 0;
-}
-
-static void ipath_7220_txe_recover(struct ipath_devdata *dd)
-{
- ++ipath_stats.sps_txeparity;
-
- dev_info(&dd->pcidev->dev,
- "Recovering from TXE PIO parity error\n");
- ipath_disarm_senderrbufs(dd);
-}
-
-
-/**
- * ipath_7220_handle_hwerrors - display hardware errors.
- * @dd: the infinipath device
- * @msg: the output buffer
- * @msgl: the size of the output buffer
- *
- * Use same msg buffer as regular errors to avoid excessive stack
- * use. Most hardware errors are catastrophic, but for right now,
- * we'll print them and continue. We reuse the same message buffer as
- * ipath_handle_errors() to avoid excessive stack usage.
- */
-static void ipath_7220_handle_hwerrors(struct ipath_devdata *dd, char *msg,
- size_t msgl)
-{
- ipath_err_t hwerrs;
- u32 bits, ctrl;
- int isfatal = 0;
- char bitsmsg[64];
- int log_idx;
-
- hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
- if (!hwerrs) {
- /*
- * better than printing cofusing messages
- * This seems to be related to clearing the crc error, or
- * the pll error during init.
- */
- ipath_cdbg(VERBOSE, "Called but no hardware errors set\n");
- goto bail;
- } else if (hwerrs == ~0ULL) {
- ipath_dev_err(dd, "Read of hardware error status failed "
- "(all bits set); ignoring\n");
- goto bail;
- }
- ipath_stats.sps_hwerrs++;
-
- /*
- * Always clear the error status register, except MEMBISTFAIL,
- * regardless of whether we continue or stop using the chip.
- * We want that set so we know it failed, even across driver reload.
- * We'll still ignore it in the hwerrmask. We do this partly for
- * diagnostics, but also for support.
- */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
- hwerrs&~INFINIPATH_HWE_MEMBISTFAILED);
-
- hwerrs &= dd->ipath_hwerrmask;
-
- /* We log some errors to EEPROM, check if we have any of those. */
- for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx)
- if (hwerrs & dd->ipath_eep_st_masks[log_idx].hwerrs_to_log)
- ipath_inc_eeprom_err(dd, log_idx, 1);
- /*
- * Make sure we get this much out, unless told to be quiet,
- * or it's occurred within the last 5 seconds.
- */
- if ((hwerrs & ~(dd->ipath_lasthwerror |
- ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
- INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
- << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT))) ||
- (ipath_debug & __IPATH_VERBDBG))
- dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
- "(cleared)\n", (unsigned long long) hwerrs);
- dd->ipath_lasthwerror |= hwerrs;
-
- if (hwerrs & ~dd->ipath_hwe_bitsextant)
- ipath_dev_err(dd, "hwerror interrupt with unknown errors "
- "%llx set\n", (unsigned long long)
- (hwerrs & ~dd->ipath_hwe_bitsextant));
-
- if (hwerrs & INFINIPATH_HWE_IB_UC_MEMORYPARITYERR)
- ipath_sd7220_clr_ibpar(dd);
-
- ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
- if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) {
- /*
- * Parity errors in send memory are recoverable by h/w
- * just do housekeeping, exit freeze mode and continue.
- */
- if (hwerrs & ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
- INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
- << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) {
- ipath_7220_txe_recover(dd);
- hwerrs &= ~((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
- INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
- << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT);
- }
- if (hwerrs) {
- /*
- * If any set that we aren't ignoring only make the
- * complaint once, in case it's stuck or recurring,
- * and we get here multiple times
- * Force link down, so switch knows, and
- * LEDs are turned off.
- */
- if (dd->ipath_flags & IPATH_INITTED) {
- ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
- ipath_setup_7220_setextled(dd,
- INFINIPATH_IBCS_L_STATE_DOWN,
- INFINIPATH_IBCS_LT_STATE_DISABLED);
- ipath_dev_err(dd, "Fatal Hardware Error "
- "(freeze mode), no longer"
- " usable, SN %.16s\n",
- dd->ipath_serial);
- isfatal = 1;
- }
- /*
- * Mark as having had an error for driver, and also
- * for /sys and status word mapped to user programs.
- * This marks unit as not usable, until reset.
- */
- *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
- *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
- dd->ipath_flags &= ~IPATH_INITTED;
- } else {
- ipath_dbg("Clearing freezemode on ignored or "
- "recovered hardware error\n");
- ipath_clear_freeze(dd);
- }
- }
-
- *msg = '\0';
-
- if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
- strlcat(msg, "[Memory BIST test failed, "
- "InfiniPath hardware unusable]", msgl);
- /* ignore from now on, so disable until driver reloaded */
- *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
- dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask);
- }
-
- ipath_format_hwerrors(hwerrs,
- ipath_7220_hwerror_msgs,
- ARRAY_SIZE(ipath_7220_hwerror_msgs),
- msg, msgl);
-
- if (hwerrs & (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK
- << INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT)) {
- bits = (u32) ((hwerrs >>
- INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) &
- INFINIPATH_HWE_PCIEMEMPARITYERR_MASK);
- snprintf(bitsmsg, sizeof bitsmsg,
- "[PCIe Mem Parity Errs %x] ", bits);
- strlcat(msg, bitsmsg, msgl);
- }
-
-#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP | \
- INFINIPATH_HWE_COREPLL_RFSLIP)
-
- if (hwerrs & _IPATH_PLL_FAIL) {
- snprintf(bitsmsg, sizeof bitsmsg,
- "[PLL failed (%llx), InfiniPath hardware unusable]",
- (unsigned long long) hwerrs & _IPATH_PLL_FAIL);
- strlcat(msg, bitsmsg, msgl);
- /* ignore from now on, so disable until driver reloaded */
- dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask);
- }
-
- if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
- /*
- * If it occurs, it is left masked since the eternal
- * interface is unused.
- */
- dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask);
- }
-
- ipath_dev_err(dd, "%s hardware error\n", msg);
- /*
- * For /sys status file. if no trailing } is copied, we'll
- * know it was truncated.
- */
- if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg)
- snprintf(dd->ipath_freezemsg, dd->ipath_freezelen,
- "{%s}", msg);
-bail:;
-}
-
-/**
- * ipath_7220_boardname - fill in the board name
- * @dd: the infinipath device
- * @name: the output buffer
- * @namelen: the size of the output buffer
- *
- * info is based on the board revision register
- */
-static int ipath_7220_boardname(struct ipath_devdata *dd, char *name,
- size_t namelen)
-{
- char *n = NULL;
- u8 boardrev = dd->ipath_boardrev;
- int ret;
-
- if (boardrev == 15) {
- /*
- * Emulator sometimes comes up all-ones, rather than zero.
- */
- boardrev = 0;
- dd->ipath_boardrev = boardrev;
- }
- switch (boardrev) {
- case 0:
- n = "InfiniPath_7220_Emulation";
- break;
- case 1:
- n = "InfiniPath_QLE7240";
- break;
- case 2:
- n = "InfiniPath_QLE7280";
- break;
- case 3:
- n = "InfiniPath_QLE7242";
- break;
- case 4:
- n = "InfiniPath_QEM7240";
- break;
- case 5:
- n = "InfiniPath_QMI7240";
- break;
- case 6:
- n = "InfiniPath_QMI7264";
- break;
- case 7:
- n = "InfiniPath_QMH7240";
- break;
- case 8:
- n = "InfiniPath_QME7240";
- break;
- case 9:
- n = "InfiniPath_QLE7250";
- break;
- case 10:
- n = "InfiniPath_QLE7290";
- break;
- case 11:
- n = "InfiniPath_QEM7250";
- break;
- case 12:
- n = "InfiniPath_QLE-Bringup";
- break;
- default:
- ipath_dev_err(dd,
- "Don't yet know about board with ID %u\n",
- boardrev);
- snprintf(name, namelen, "Unknown_InfiniPath_PCIe_%u",
- boardrev);
- break;
- }
- if (n)
- snprintf(name, namelen, "%s", n);
-
- if (dd->ipath_majrev != 5 || !dd->ipath_minrev ||
- dd->ipath_minrev > 2) {
- ipath_dev_err(dd, "Unsupported InfiniPath hardware "
- "revision %u.%u!\n",
- dd->ipath_majrev, dd->ipath_minrev);
- ret = 1;
- } else if (dd->ipath_minrev == 1 &&
- !(dd->ipath_flags & IPATH_INITTED)) {
- /* Rev1 chips are prototype. Complain at init, but allow use */
- ipath_dev_err(dd, "Unsupported hardware "
- "revision %u.%u, Contact support@qlogic.com\n",
- dd->ipath_majrev, dd->ipath_minrev);
- ret = 0;
- } else
- ret = 0;
-
- /*
- * Set here not in ipath_init_*_funcs because we have to do
- * it after we can read chip registers.
- */
- dd->ipath_ureg_align = 0x10000; /* 64KB alignment */
-
- return ret;
-}
-
-/**
- * ipath_7220_init_hwerrors - enable hardware errors
- * @dd: the infinipath device
- *
- * now that we have finished initializing everything that might reasonably
- * cause a hardware error, and cleared those errors bits as they occur,
- * we can enable hardware errors in the mask (potentially enabling
- * freeze mode), and enable hardware errors as errors (along with
- * everything else) in errormask
- */
-static void ipath_7220_init_hwerrors(struct ipath_devdata *dd)
-{
- ipath_err_t val;
- u64 extsval;
-
- extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
-
- if (!(extsval & (INFINIPATH_EXTS_MEMBIST_ENDTEST |
- INFINIPATH_EXTS_MEMBIST_DISABLED)))
- ipath_dev_err(dd, "MemBIST did not complete!\n");
- if (extsval & INFINIPATH_EXTS_MEMBIST_DISABLED)
- dev_info(&dd->pcidev->dev, "MemBIST is disabled.\n");
-
- val = ~0ULL; /* barring bugs, all hwerrors become interrupts, */
-
- if (!dd->ipath_boardrev) /* no PLL for Emulator */
- val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
-
- if (dd->ipath_minrev == 1)
- val &= ~(1ULL << 42); /* TXE LaunchFIFO Parity rev1 issue */
-
- val &= ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR;
- dd->ipath_hwerrmask = val;
-
- /*
- * special trigger "error" is for debugging purposes. It
- * works around a processor/chipset problem. The error
- * interrupt allows us to count occurrences, but we don't
- * want to pay the overhead for normal use. Emulation only
- */
- if (!dd->ipath_boardrev)
- dd->ipath_maskederrs = INFINIPATH_E_SENDSPECIALTRIGGER;
-}
-
-/*
- * All detailed interaction with the SerDes has been moved to ipath_sd7220.c
- *
- * The portion of IBA7220-specific bringup_serdes() that actually deals with
- * registers and memory within the SerDes itself is ipath_sd7220_init().
- */
-
-/**
- * ipath_7220_bringup_serdes - bring up the serdes
- * @dd: the infinipath device
- */
-static int ipath_7220_bringup_serdes(struct ipath_devdata *dd)
-{
- int ret = 0;
- u64 val, prev_val, guid;
- int was_reset; /* Note whether uC was reset */
-
- ipath_dbg("Trying to bringup serdes\n");
-
- if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) &
- INFINIPATH_HWE_SERDESPLLFAILED) {
- ipath_dbg("At start, serdes PLL failed bit set "
- "in hwerrstatus, clearing and continuing\n");
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
- INFINIPATH_HWE_SERDESPLLFAILED);
- }
-
- dd->ibdeltainprog = 1;
- dd->ibsymsnap =
- ipath_read_creg32(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
- dd->iblnkerrsnap =
- ipath_read_creg32(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
-
- if (!dd->ipath_ibcddrctrl) {
- /* not on re-init after reset */
- dd->ipath_ibcddrctrl =
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcddrctrl);
-
- if (dd->ipath_link_speed_enabled ==
- (IPATH_IB_SDR | IPATH_IB_DDR))
- dd->ipath_ibcddrctrl |=
- IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK;
- else
- dd->ipath_ibcddrctrl |=
- dd->ipath_link_speed_enabled == IPATH_IB_DDR
- ? IBA7220_IBC_SPEED_DDR :
- IBA7220_IBC_SPEED_SDR;
- if ((dd->ipath_link_width_enabled & (IB_WIDTH_1X |
- IB_WIDTH_4X)) == (IB_WIDTH_1X | IB_WIDTH_4X))
- dd->ipath_ibcddrctrl |= IBA7220_IBC_WIDTH_AUTONEG;
- else
- dd->ipath_ibcddrctrl |=
- dd->ipath_link_width_enabled == IB_WIDTH_4X
- ? IBA7220_IBC_WIDTH_4X_ONLY :
- IBA7220_IBC_WIDTH_1X_ONLY;
-
- /* always enable these on driver reload, not sticky */
- dd->ipath_ibcddrctrl |=
- IBA7220_IBC_RXPOL_MASK << IBA7220_IBC_RXPOL_SHIFT;
- dd->ipath_ibcddrctrl |=
- IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
- /*
- * automatic lane reversal detection for receive
- * doesn't work correctly in rev 1, so disable it
- * on that rev, otherwise enable (disabling not
- * sticky across reload for >rev1)
- */
- if (dd->ipath_minrev == 1)
- dd->ipath_ibcddrctrl &=
- ~IBA7220_IBC_LANE_REV_SUPPORTED;
- else
- dd->ipath_ibcddrctrl |=
- IBA7220_IBC_LANE_REV_SUPPORTED;
- }
-
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl,
- dd->ipath_ibcddrctrl);
-
- ipath_write_kreg(dd, IPATH_KREG_OFFSET(IBNCModeCtrl), 0Ull);
-
- /* IBA7220 has SERDES MPU reset in D0 of what _was_ IBPLLCfg */
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl);
- /* remember if uC was in Reset or not, for dactrim */
- was_reset = (val & 1);
- ipath_cdbg(VERBOSE, "IBReset %s xgxsconfig %llx\n",
- was_reset ? "Asserted" : "Negated", (unsigned long long)
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
-
- if (dd->ipath_boardrev) {
- /*
- * Hardware is not emulator, and may have been reset. Init it.
- * Below will release reset, but needs to know if chip was
- * originally in reset, to only trim DACs on first time
- * after chip reset or powercycle (not driver reload)
- */
- ret = ipath_sd7220_init(dd, was_reset);
- }
-
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
- prev_val = val;
- val |= INFINIPATH_XGXS_FC_SAFE;
- if (val != prev_val) {
- ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
- }
- if (val & INFINIPATH_XGXS_RESET)
- val &= ~INFINIPATH_XGXS_RESET;
- if (val != prev_val)
- ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-
- ipath_cdbg(VERBOSE, "done: xgxs=%llx from %llx\n",
- (unsigned long long)
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig),
- (unsigned long long) prev_val);
-
- guid = be64_to_cpu(dd->ipath_guid);
-
- if (!guid) {
- /* have to have something, so use likely unique tsc */
- guid = get_cycles();
- ipath_dbg("No GUID for heartbeat, faking %llx\n",
- (unsigned long long)guid);
- } else
- ipath_cdbg(VERBOSE, "Wrote %llX to HRTBT_GUID\n",
- (unsigned long long) guid);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hrtbt_guid, guid);
- return ret;
-}
-
-static void ipath_7220_config_jint(struct ipath_devdata *dd,
- u16 idle_ticks, u16 max_packets)
-{
-
- /*
- * We can request a receive interrupt for 1 or more packets
- * from current offset.
- */
- if (idle_ticks == 0 || max_packets == 0)
- /* interrupt after one packet if no mitigation */
- dd->ipath_rhdrhead_intr_off =
- 1ULL << IBA7220_HDRHEAD_PKTINT_SHIFT;
- else
- /* Turn off RcvHdrHead interrupts if using mitigation */
- dd->ipath_rhdrhead_intr_off = 0ULL;
-
- /* refresh kernel RcvHdrHead registers... */
- ipath_write_ureg(dd, ur_rcvhdrhead,
- dd->ipath_rhdrhead_intr_off |
- dd->ipath_pd[0]->port_head, 0);
-
- dd->ipath_jint_max_packets = max_packets;
- dd->ipath_jint_idle_ticks = idle_ticks;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_jintreload,
- ((u64) max_packets << INFINIPATH_JINT_PACKETSHIFT) |
- idle_ticks);
-}
-
-/**
- * ipath_7220_quiet_serdes - set serdes to txidle
- * @dd: the infinipath device
- * Called when driver is being unloaded
- */
-static void ipath_7220_quiet_serdes(struct ipath_devdata *dd)
-{
- u64 val;
- if (dd->ibsymdelta || dd->iblnkerrdelta ||
- dd->ibdeltainprog) {
- u64 diagc;
- /* enable counter writes */
- diagc = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwdiagctrl);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl,
- diagc | INFINIPATH_DC_COUNTERWREN);
-
- if (dd->ibsymdelta || dd->ibdeltainprog) {
- val = ipath_read_creg32(dd,
- dd->ipath_cregs->cr_ibsymbolerrcnt);
- if (dd->ibdeltainprog)
- val -= val - dd->ibsymsnap;
- val -= dd->ibsymdelta;
- ipath_write_creg(dd,
- dd->ipath_cregs->cr_ibsymbolerrcnt, val);
- }
- if (dd->iblnkerrdelta || dd->ibdeltainprog) {
- val = ipath_read_creg32(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt);
- if (dd->ibdeltainprog)
- val -= val - dd->iblnkerrsnap;
- val -= dd->iblnkerrdelta;
- ipath_write_creg(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt, val);
- }
-
- /* and disable counter writes */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, diagc);
- }
-
- dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG;
- wake_up(&dd->ipath_autoneg_wait);
- cancel_delayed_work(&dd->ipath_autoneg_work);
- flush_scheduled_work();
- ipath_shutdown_relock_poll(dd);
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
- val |= INFINIPATH_XGXS_RESET;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-}
-
-static int ipath_7220_intconfig(struct ipath_devdata *dd)
-{
- ipath_7220_config_jint(dd, dd->ipath_jint_idle_ticks,
- dd->ipath_jint_max_packets);
- return 0;
-}
-
-/**
- * ipath_setup_7220_setextled - set the state of the two external LEDs
- * @dd: the infinipath device
- * @lst: the L state
- * @ltst: the LT state
- *
- * These LEDs indicate the physical and logical state of IB link.
- * For this chip (at least with recommended board pinouts), LED1
- * is Yellow (logical state) and LED2 is Green (physical state),
- *
- * Note: We try to match the Mellanox HCA LED behavior as best
- * we can. Green indicates physical link state is OK (something is
- * plugged in, and we can train).
- * Amber indicates the link is logically up (ACTIVE).
- * Mellanox further blinks the amber LED to indicate data packet
- * activity, but we have no hardware support for that, so it would
- * require waking up every 10-20 msecs and checking the counters
- * on the chip, and then turning the LED off if appropriate. That's
- * visible overhead, so not something we will do.
- *
- */
-static void ipath_setup_7220_setextled(struct ipath_devdata *dd, u64 lst,
- u64 ltst)
-{
- u64 extctl, ledblink = 0;
- unsigned long flags = 0;
-
- /* the diags use the LED to indicate diag info, so we leave
- * the external LED alone when the diags are running */
- if (ipath_diag_inuse)
- return;
-
- /* Allow override of LED display for, e.g. Locating system in rack */
- if (dd->ipath_led_override) {
- ltst = (dd->ipath_led_override & IPATH_LED_PHYS)
- ? INFINIPATH_IBCS_LT_STATE_LINKUP
- : INFINIPATH_IBCS_LT_STATE_DISABLED;
- lst = (dd->ipath_led_override & IPATH_LED_LOG)
- ? INFINIPATH_IBCS_L_STATE_ACTIVE
- : INFINIPATH_IBCS_L_STATE_DOWN;
- }
-
- spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
- extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
- INFINIPATH_EXTC_LED2PRIPORT_ON);
- if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP) {
- extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
- /*
- * counts are in chip clock (4ns) periods.
- * This is 1/16 sec (66.6ms) on,
- * 3/16 sec (187.5 ms) off, with packets rcvd
- */
- ledblink = ((66600*1000UL/4) << IBA7220_LEDBLINK_ON_SHIFT)
- | ((187500*1000UL/4) << IBA7220_LEDBLINK_OFF_SHIFT);
- }
- if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
- extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
- dd->ipath_extctrl = extctl;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
- spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
-
- if (ledblink) /* blink the LED on packet receive */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvpktledcnt,
- ledblink);
-}
-
-/*
- * Similar to pci_intx(pdev, 1), except that we make sure
- * msi is off...
- */
-static void ipath_enable_intx(struct pci_dev *pdev)
-{
- u16 cw, new;
- int pos;
-
- /* first, turn on INTx */
- pci_read_config_word(pdev, PCI_COMMAND, &cw);
- new = cw & ~PCI_COMMAND_INTX_DISABLE;
- if (new != cw)
- pci_write_config_word(pdev, PCI_COMMAND, new);
-
- /* then turn off MSI */
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
- if (pos) {
- pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
- new = cw & ~PCI_MSI_FLAGS_ENABLE;
- if (new != cw)
- pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new);
- }
-}
-
-static int ipath_msi_enabled(struct pci_dev *pdev)
-{
- int pos, ret = 0;
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
- if (pos) {
- u16 cw;
-
- pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
- ret = !!(cw & PCI_MSI_FLAGS_ENABLE);
- }
- return ret;
-}
-
-/*
- * disable msi interrupt if enabled, and clear the flag.
- * flag is used primarily for the fallback to INTx, but
- * is also used in reinit after reset as a flag.
- */
-static void ipath_7220_nomsi(struct ipath_devdata *dd)
-{
- dd->ipath_msi_lo = 0;
-
- if (ipath_msi_enabled(dd->pcidev)) {
- /*
- * free, but don't zero; later kernels require
- * it be freed before disable_msi, so the intx
- * setup has to request it again.
- */
- if (dd->ipath_irq)
- free_irq(dd->ipath_irq, dd);
- pci_disable_msi(dd->pcidev);
- }
-}
-
-/*
- * ipath_setup_7220_cleanup - clean up any per-chip chip-specific stuff
- * @dd: the infinipath device
- *
- * Nothing but msi interrupt cleanup for now.
- *
- * This is called during driver unload.
- */
-static void ipath_setup_7220_cleanup(struct ipath_devdata *dd)
-{
- ipath_7220_nomsi(dd);
-}
-
-
-static void ipath_7220_pcie_params(struct ipath_devdata *dd, u32 boardrev)
-{
- u16 linkstat, minwidth, speed;
- int pos;
-
- pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
- if (!pos) {
- ipath_dev_err(dd, "Can't find PCI Express capability!\n");
- goto bail;
- }
-
- pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA,
- &linkstat);
- /*
- * speed is bits 0-4, linkwidth is bits 4-8
- * no defines for them in headers
- */
- speed = linkstat & 0xf;
- linkstat >>= 4;
- linkstat &= 0x1f;
- dd->ipath_lbus_width = linkstat;
- switch (boardrev) {
- case 0:
- case 2:
- case 10:
- case 12:
- minwidth = 16; /* x16 capable boards */
- break;
- default:
- minwidth = 8; /* x8 capable boards */
- break;
- }
-
- switch (speed) {
- case 1:
- dd->ipath_lbus_speed = 2500; /* Gen1, 2.5GHz */
- break;
- case 2:
- dd->ipath_lbus_speed = 5000; /* Gen1, 5GHz */
- break;
- default: /* not defined, assume gen1 */
- dd->ipath_lbus_speed = 2500;
- break;
- }
-
- if (linkstat < minwidth)
- ipath_dev_err(dd,
- "PCIe width %u (x%u HCA), performance "
- "reduced\n", linkstat, minwidth);
- else
- ipath_cdbg(VERBOSE, "PCIe speed %u width %u (x%u HCA)\n",
- dd->ipath_lbus_speed, linkstat, minwidth);
-
- if (speed != 1)
- ipath_dev_err(dd,
- "PCIe linkspeed %u is incorrect; "
- "should be 1 (2500)!\n", speed);
-
-bail:
- /* fill in string, even on errors */
- snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info),
- "PCIe,%uMHz,x%u\n",
- dd->ipath_lbus_speed,
- dd->ipath_lbus_width);
- return;
-}
-
-
-/**
- * ipath_setup_7220_config - setup PCIe config related stuff
- * @dd: the infinipath device
- * @pdev: the PCI device
- *
- * The pci_enable_msi() call will fail on systems with MSI quirks
- * such as those with AMD8131, even if the device of interest is not
- * attached to that device, (in the 2.6.13 - 2.6.15 kernels, at least, fixed
- * late in 2.6.16).
- * All that can be done is to edit the kernel source to remove the quirk
- * check until that is fixed.
- * We do not need to call enable_msi() for our HyperTransport chip,
- * even though it uses MSI, and we want to avoid the quirk warning, so
- * So we call enable_msi only for PCIe. If we do end up needing
- * pci_enable_msi at some point in the future for HT, we'll move the
- * call back into the main init_one code.
- * We save the msi lo and hi values, so we can restore them after
- * chip reset (the kernel PCI infrastructure doesn't yet handle that
- * correctly).
- */
-static int ipath_setup_7220_config(struct ipath_devdata *dd,
- struct pci_dev *pdev)
-{
- int pos, ret = -1;
- u32 boardrev;
-
- dd->ipath_msi_lo = 0; /* used as a flag during reset processing */
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
- if (!strcmp(int_type, "force_msi") || !strcmp(int_type, "auto"))
- ret = pci_enable_msi(pdev);
- if (ret) {
- if (!strcmp(int_type, "force_msi")) {
- ipath_dev_err(dd, "pci_enable_msi failed: %d, "
- "force_msi is on, so not continuing.\n",
- ret);
- return ret;
- }
-
- ipath_enable_intx(pdev);
- if (!strcmp(int_type, "auto"))
- ipath_dev_err(dd, "pci_enable_msi failed: %d, "
- "falling back to INTx\n", ret);
- } else if (pos) {
- u16 control;
- pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO,
- &dd->ipath_msi_lo);
- pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI,
- &dd->ipath_msi_hi);
- pci_read_config_word(pdev, pos + PCI_MSI_FLAGS,
- &control);
- /* now save the data (vector) info */
- pci_read_config_word(pdev,
- pos + ((control & PCI_MSI_FLAGS_64BIT)
- ? PCI_MSI_DATA_64 :
- PCI_MSI_DATA_32),
- &dd->ipath_msi_data);
- } else
- ipath_dev_err(dd, "Can't find MSI capability, "
- "can't save MSI settings for reset\n");
-
- dd->ipath_irq = pdev->irq;
-
- /*
- * We save the cachelinesize also, although it doesn't
- * really matter.
- */
- pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
- &dd->ipath_pci_cacheline);
-
- /*
- * this function called early, ipath_boardrev not set yet. Can't
- * use ipath_read_kreg64() yet, too early in init, so use readq()
- */
- boardrev = (readq(&dd->ipath_kregbase[dd->ipath_kregs->kr_revision])
- >> INFINIPATH_R_BOARDID_SHIFT) & INFINIPATH_R_BOARDID_MASK;
-
- ipath_7220_pcie_params(dd, boardrev);
-
- dd->ipath_flags |= IPATH_NODMA_RTAIL | IPATH_HAS_SEND_DMA |
- IPATH_HAS_PBC_CNT | IPATH_HAS_THRESH_UPDATE;
- dd->ipath_pioupd_thresh = 4U; /* set default update threshold */
- return 0;
-}
-
-static void ipath_init_7220_variables(struct ipath_devdata *dd)
-{
- /*
- * setup the register offsets, since they are different for each
- * chip
- */
- dd->ipath_kregs = &ipath_7220_kregs;
- dd->ipath_cregs = &ipath_7220_cregs;
-
- /*
- * bits for selecting i2c direction and values,
- * used for I2C serial flash
- */
- dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
- dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
- dd->ipath_gpio_sda = IPATH_GPIO_SDA;
- dd->ipath_gpio_scl = IPATH_GPIO_SCL;
-
- /*
- * Fill in data for field-values that change in IBA7220.
- * We dynamically specify only the mask for LINKTRAININGSTATE
- * and only the shift for LINKSTATE, as they are the only ones
- * that change. Also precalculate the 3 link states of interest
- * and the combined mask.
- */
- dd->ibcs_ls_shift = IBA7220_IBCS_LINKSTATE_SHIFT;
- dd->ibcs_lts_mask = IBA7220_IBCS_LINKTRAININGSTATE_MASK;
- dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK <<
- dd->ibcs_ls_shift) | dd->ibcs_lts_mask;
- dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
- INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
- (INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift);
- dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
- INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
- (INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift);
- dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
- INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
- (INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift);
-
- /*
- * Fill in data for ibcc field-values that change in IBA7220.
- * We dynamically specify only the mask for LINKINITCMD
- * and only the shift for LINKCMD and MAXPKTLEN, as they are
- * the only ones that change.
- */
- dd->ibcc_lic_mask = IBA7220_IBCC_LINKINITCMD_MASK;
- dd->ibcc_lc_shift = IBA7220_IBCC_LINKCMD_SHIFT;
- dd->ibcc_mpl_shift = IBA7220_IBCC_MAXPKTLEN_SHIFT;
-
- /* Fill in shifts for RcvCtrl. */
- dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT;
- dd->ipath_r_intravail_shift = IBA7220_R_INTRAVAIL_SHIFT;
- dd->ipath_r_tailupd_shift = IBA7220_R_TAILUPD_SHIFT;
- dd->ipath_r_portcfg_shift = IBA7220_R_PORTCFG_SHIFT;
-
- /* variables for sanity checking interrupt and errors */
- dd->ipath_hwe_bitsextant =
- (INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) |
- (INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) |
- (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) |
- INFINIPATH_HWE_PCIE1PLLFAILED |
- INFINIPATH_HWE_PCIE0PLLFAILED |
- INFINIPATH_HWE_PCIEPOISONEDTLP |
- INFINIPATH_HWE_PCIECPLTIMEOUT |
- INFINIPATH_HWE_PCIEBUSPARITYXTLH |
- INFINIPATH_HWE_PCIEBUSPARITYXADM |
- INFINIPATH_HWE_PCIEBUSPARITYRADM |
- INFINIPATH_HWE_MEMBISTFAILED |
- INFINIPATH_HWE_COREPLL_FBSLIP |
- INFINIPATH_HWE_COREPLL_RFSLIP |
- INFINIPATH_HWE_SERDESPLLFAILED |
- INFINIPATH_HWE_IBCBUSTOSPCPARITYERR |
- INFINIPATH_HWE_IBCBUSFRSPCPARITYERR |
- INFINIPATH_HWE_PCIECPLDATAQUEUEERR |
- INFINIPATH_HWE_PCIECPLHDRQUEUEERR |
- INFINIPATH_HWE_SDMAMEMREADERR |
- INFINIPATH_HWE_CLK_UC_PLLNOTLOCKED |
- INFINIPATH_HWE_PCIESERDESQ0PCLKNOTDETECT |
- INFINIPATH_HWE_PCIESERDESQ1PCLKNOTDETECT |
- INFINIPATH_HWE_PCIESERDESQ2PCLKNOTDETECT |
- INFINIPATH_HWE_PCIESERDESQ3PCLKNOTDETECT |
- INFINIPATH_HWE_DDSRXEQMEMORYPARITYERR |
- INFINIPATH_HWE_IB_UC_MEMORYPARITYERR |
- INFINIPATH_HWE_PCIE_UC_OCT0MEMORYPARITYERR |
- INFINIPATH_HWE_PCIE_UC_OCT1MEMORYPARITYERR;
- dd->ipath_i_bitsextant =
- INFINIPATH_I_SDMAINT | INFINIPATH_I_SDMADISABLED |
- (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
- (INFINIPATH_I_RCVAVAIL_MASK <<
- INFINIPATH_I_RCVAVAIL_SHIFT) |
- INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT |
- INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO |
- INFINIPATH_I_JINT | INFINIPATH_I_SERDESTRIMDONE;
- dd->ipath_e_bitsextant =
- INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC |
- INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN |
- INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN |
- INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR |
- INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP |
- INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION |
- INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
- INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN |
- INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK |
- INFINIPATH_E_SENDSPECIALTRIGGER |
- INFINIPATH_E_SDMADISABLED | INFINIPATH_E_SMINPKTLEN |
- INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SUNDERRUN |
- INFINIPATH_E_SPKTLEN | INFINIPATH_E_SDROPPEDSMPPKT |
- INFINIPATH_E_SDROPPEDDATAPKT |
- INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM |
- INFINIPATH_E_SUNSUPVL | INFINIPATH_E_SENDBUFMISUSE |
- INFINIPATH_E_SDMAGENMISMATCH | INFINIPATH_E_SDMAOUTOFBOUND |
- INFINIPATH_E_SDMATAILOUTOFBOUND | INFINIPATH_E_SDMABASE |
- INFINIPATH_E_SDMA1STDESC | INFINIPATH_E_SDMARPYTAG |
- INFINIPATH_E_SDMADWEN | INFINIPATH_E_SDMAMISSINGDW |
- INFINIPATH_E_SDMAUNEXPDATA |
- INFINIPATH_E_IBSTATUSCHANGED | INFINIPATH_E_INVALIDADDR |
- INFINIPATH_E_RESET | INFINIPATH_E_HARDWARE |
- INFINIPATH_E_SDMADESCADDRMISALIGN |
- INFINIPATH_E_INVALIDEEPCMD;
-
- dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
- dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
- dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT;
- dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT;
- dd->ipath_flags |= IPATH_INTREG_64 | IPATH_HAS_MULT_IB_SPEED
- | IPATH_HAS_LINK_LATENCY;
-
- /*
- * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
- * 2 is Some Misc, 3 is reserved for future.
- */
- dd->ipath_eep_st_masks[0].hwerrs_to_log =
- INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT;
-
- dd->ipath_eep_st_masks[1].hwerrs_to_log =
- INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
-
- dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET;
-
- ipath_linkrecovery = 0;
-
- init_waitqueue_head(&dd->ipath_autoneg_wait);
- INIT_DELAYED_WORK(&dd->ipath_autoneg_work, autoneg_work);
-
- dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
- dd->ipath_link_speed_supported = IPATH_IB_SDR | IPATH_IB_DDR;
-
- dd->ipath_link_width_enabled = dd->ipath_link_width_supported;
- dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported;
- /*
- * set the initial values to reasonable default, will be set
- * for real when link is up.
- */
- dd->ipath_link_width_active = IB_WIDTH_4X;
- dd->ipath_link_speed_active = IPATH_IB_SDR;
- dd->delay_mult = rate_to_delay[0][1];
-}
-
-
-/*
- * Setup the MSI stuff again after a reset. I'd like to just call
- * pci_enable_msi() and request_irq() again, but when I do that,
- * the MSI enable bit doesn't get set in the command word, and
- * we switch to to a different interrupt vector, which is confusing,
- * so I instead just do it all inline. Perhaps somehow can tie this
- * into the PCIe hotplug support at some point
- * Note, because I'm doing it all here, I don't call pci_disable_msi()
- * or free_irq() at the start of ipath_setup_7220_reset().
- */
-static int ipath_reinit_msi(struct ipath_devdata *dd)
-{
- int ret = 0;
-
- int pos;
- u16 control;
- if (!dd->ipath_msi_lo) /* Using intX, or init problem */
- goto bail;
-
- pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
- if (!pos) {
- ipath_dev_err(dd, "Can't find MSI capability, "
- "can't restore MSI settings\n");
- goto bail;
- }
- ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
- dd->ipath_msi_lo, pos + PCI_MSI_ADDRESS_LO);
- pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
- dd->ipath_msi_lo);
- ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
- dd->ipath_msi_hi, pos + PCI_MSI_ADDRESS_HI);
- pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
- dd->ipath_msi_hi);
- pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
- if (!(control & PCI_MSI_FLAGS_ENABLE)) {
- ipath_cdbg(VERBOSE, "MSI control at off %x was %x, "
- "setting MSI enable (%x)\n", pos + PCI_MSI_FLAGS,
- control, control | PCI_MSI_FLAGS_ENABLE);
- control |= PCI_MSI_FLAGS_ENABLE;
- pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
- control);
- }
- /* now rewrite the data (vector) info */
- pci_write_config_word(dd->pcidev, pos +
- ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
- dd->ipath_msi_data);
- ret = 1;
-
-bail:
- if (!ret) {
- ipath_dbg("Using INTx, MSI disabled or not configured\n");
- ipath_enable_intx(dd->pcidev);
- ret = 1;
- }
- /*
- * We restore the cachelinesize also, although it doesn't really
- * matter.
- */
- pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE,
- dd->ipath_pci_cacheline);
- /* and now set the pci master bit again */
- pci_set_master(dd->pcidev);
-
- return ret;
-}
-
-/*
- * This routine sleeps, so it can only be called from user context, not
- * from interrupt context. If we need interrupt context, we can split
- * it into two routines.
- */
-static int ipath_setup_7220_reset(struct ipath_devdata *dd)
-{
- u64 val;
- int i;
- int ret;
- u16 cmdval;
-
- pci_read_config_word(dd->pcidev, PCI_COMMAND, &cmdval);
-
- /* Use dev_err so it shows up in logs, etc. */
- ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit);
-
- /* keep chip from being accessed in a few places */
- dd->ipath_flags &= ~(IPATH_INITTED | IPATH_PRESENT);
- val = dd->ipath_control | INFINIPATH_C_RESET;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val);
- mb();
-
- for (i = 1; i <= 5; i++) {
- int r;
-
- /*
- * Allow MBIST, etc. to complete; longer on each retry.
- * We sometimes get machine checks from bus timeout if no
- * response, so for now, make it *really* long.
- */
- msleep(1000 + (1 + i) * 2000);
- r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
- dd->ipath_pcibar0);
- if (r)
- ipath_dev_err(dd, "rewrite of BAR0 failed: %d\n", r);
- r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
- dd->ipath_pcibar1);
- if (r)
- ipath_dev_err(dd, "rewrite of BAR1 failed: %d\n", r);
- /* now re-enable memory access */
- pci_write_config_word(dd->pcidev, PCI_COMMAND, cmdval);
- r = pci_enable_device(dd->pcidev);
- if (r)
- ipath_dev_err(dd, "pci_enable_device failed after "
- "reset: %d\n", r);
- /*
- * whether it fully enabled or not, mark as present,
- * again (but not INITTED)
- */
- dd->ipath_flags |= IPATH_PRESENT;
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
- if (val == dd->ipath_revision) {
- ipath_cdbg(VERBOSE, "Got matching revision "
- "register %llx on try %d\n",
- (unsigned long long) val, i);
- ret = ipath_reinit_msi(dd);
- goto bail;
- }
- /* Probably getting -1 back */
- ipath_dbg("Didn't get expected revision register, "
- "got %llx, try %d\n", (unsigned long long) val,
- i + 1);
- }
- ret = 0; /* failed */
-
-bail:
- if (ret)
- ipath_7220_pcie_params(dd, dd->ipath_boardrev);
-
- return ret;
-}
-
-/**
- * ipath_7220_put_tid - write a TID to the chip
- * @dd: the infinipath device
- * @tidptr: pointer to the expected TID (in chip) to udpate
- * @tidtype: 0 for eager, 1 for expected
- * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
- *
- * This exists as a separate routine to allow for selection of the
- * appropriate "flavor". The static calls in cleanup just use the
- * revision-agnostic form, as they are not performance critical.
- */
-static void ipath_7220_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
- u32 type, unsigned long pa)
-{
- if (pa != dd->ipath_tidinvalid) {
- u64 chippa = pa >> IBA7220_TID_PA_SHIFT;
-
- /* paranoia checks */
- if (pa != (chippa << IBA7220_TID_PA_SHIFT)) {
- dev_info(&dd->pcidev->dev, "BUG: physaddr %lx "
- "not 2KB aligned!\n", pa);
- return;
- }
- if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) {
- ipath_dev_err(dd,
- "BUG: Physical page address 0x%lx "
- "larger than supported\n", pa);
- return;
- }
-
- if (type == RCVHQ_RCV_TYPE_EAGER)
- chippa |= dd->ipath_tidtemplate;
- else /* for now, always full 4KB page */
- chippa |= IBA7220_TID_SZ_4K;
- writeq(chippa, tidptr);
- } else
- writeq(pa, tidptr);
- mmiowb();
-}
-
-/**
- * ipath_7220_clear_tid - clear all TID entries for a port, expected and eager
- * @dd: the infinipath device
- * @port: the port
- *
- * clear all TID entries for a port, expected and eager.
- * Used from ipath_close(). On this chip, TIDs are only 32 bits,
- * not 64, but they are still on 64 bit boundaries, so tidbase
- * is declared as u64 * for the pointer math, even though we write 32 bits
- */
-static void ipath_7220_clear_tids(struct ipath_devdata *dd, unsigned port)
-{
- u64 __iomem *tidbase;
- unsigned long tidinv;
- int i;
-
- if (!dd->ipath_kregbase)
- return;
-
- ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port);
-
- tidinv = dd->ipath_tidinvalid;
- tidbase = (u64 __iomem *)
- ((char __iomem *)(dd->ipath_kregbase) +
- dd->ipath_rcvtidbase +
- port * dd->ipath_rcvtidcnt * sizeof(*tidbase));
-
- for (i = 0; i < dd->ipath_rcvtidcnt; i++)
- ipath_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
- tidinv);
-
- tidbase = (u64 __iomem *)
- ((char __iomem *)(dd->ipath_kregbase) +
- dd->ipath_rcvegrbase + port_egrtid_idx(dd, port)
- * sizeof(*tidbase));
-
- for (i = port ? dd->ipath_rcvegrcnt : dd->ipath_p0_rcvegrcnt; i; i--)
- ipath_7220_put_tid(dd, &tidbase[i-1], RCVHQ_RCV_TYPE_EAGER,
- tidinv);
-}
-
-/**
- * ipath_7220_tidtemplate - setup constants for TID updates
- * @dd: the infinipath device
- *
- * We setup stuff that we use a lot, to avoid calculating each time
- */
-static void ipath_7220_tidtemplate(struct ipath_devdata *dd)
-{
- /* For now, we always allocate 4KB buffers (at init) so we can
- * receive max size packets. We may want a module parameter to
- * specify 2KB or 4KB and/or make be per port instead of per device
- * for those who want to reduce memory footprint. Note that the
- * ipath_rcvhdrentsize size must be large enough to hold the largest
- * IB header (currently 96 bytes) that we expect to handle (plus of
- * course the 2 dwords of RHF).
- */
- if (dd->ipath_rcvegrbufsize == 2048)
- dd->ipath_tidtemplate = IBA7220_TID_SZ_2K;
- else if (dd->ipath_rcvegrbufsize == 4096)
- dd->ipath_tidtemplate = IBA7220_TID_SZ_4K;
- else {
- dev_info(&dd->pcidev->dev, "BUG: unsupported egrbufsize "
- "%u, using %u\n", dd->ipath_rcvegrbufsize,
- 4096);
- dd->ipath_tidtemplate = IBA7220_TID_SZ_4K;
- }
- dd->ipath_tidinvalid = 0;
-}
-
-static int ipath_7220_early_init(struct ipath_devdata *dd)
-{
- u32 i, s;
-
- if (strcmp(int_type, "auto") &&
- strcmp(int_type, "force_msi") &&
- strcmp(int_type, "force_intx")) {
- ipath_dev_err(dd, "Invalid interrupt_type: '%s', expecting "
- "auto, force_msi or force_intx\n", int_type);
- return -EINVAL;
- }
-
- /*
- * Control[4] has been added to change the arbitration within
- * the SDMA engine between favoring data fetches over descriptor
- * fetches. ipath_sdma_fetch_arb==0 gives data fetches priority.
- */
- if (ipath_sdma_fetch_arb && (dd->ipath_minrev > 1))
- dd->ipath_control |= 1<<4;
-
- dd->ipath_flags |= IPATH_4BYTE_TID;
-
- /*
- * For openfabrics, we need to be able to handle an IB header of
- * 24 dwords. HT chip has arbitrary sized receive buffers, so we
- * made them the same size as the PIO buffers. This chip does not
- * handle arbitrary size buffers, so we need the header large enough
- * to handle largest IB header, but still have room for a 2KB MTU
- * standard IB packet.
- */
- dd->ipath_rcvhdrentsize = 24;
- dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
- dd->ipath_rhf_offset =
- dd->ipath_rcvhdrentsize - sizeof(u64) / sizeof(u32);
-
- dd->ipath_rcvegrbufsize = ipath_mtu4096 ? 4096 : 2048;
- /*
- * the min() check here is currently a nop, but it may not always
- * be, depending on just how we do ipath_rcvegrbufsize
- */
- dd->ipath_ibmaxlen = min(ipath_mtu4096 ? dd->ipath_piosize4k :
- dd->ipath_piosize2k,
- dd->ipath_rcvegrbufsize +
- (dd->ipath_rcvhdrentsize << 2));
- dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
-
- ipath_7220_config_jint(dd, INFINIPATH_JINT_DEFAULT_IDLE_TICKS,
- INFINIPATH_JINT_DEFAULT_MAX_PACKETS);
-
- if (dd->ipath_boardrev) /* no eeprom on emulator */
- ipath_get_eeprom_info(dd);
-
- /* start of code to check and print procmon */
- s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon));
- s &= ~(1U<<31); /* clear done bit */
- s |= 1U<<14; /* clear counter (write 1 to clear) */
- ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s);
- /* make sure clear_counter low long enough before start */
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
-
- s &= ~(1U<<14); /* allow counter to count (before starting) */
- ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s);
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
- s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon));
-
- s |= 1U<<15; /* start the counter */
- s &= ~(1U<<31); /* clear done bit */
- s &= ~0x7ffU; /* clear frequency bits */
- s |= 0xe29; /* set frequency bits, in case cleared */
- ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s);
-
- s = 0;
- for (i = 500; i > 0 && !(s&(1ULL<<31)); i--) {
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
- s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon));
- }
- if (!(s&(1U<<31)))
- ipath_dev_err(dd, "ProcMon register not valid: 0x%x\n", s);
- else
- ipath_dbg("ProcMon=0x%x, count=0x%x\n", s, (s>>16)&0x1ff);
-
- return 0;
-}
-
-/**
- * ipath_init_7220_get_base_info - set chip-specific flags for user code
- * @pd: the infinipath port
- * @kbase: ipath_base_info pointer
- *
- * We set the PCIE flag because the lower bandwidth on PCIe vs
- * HyperTransport can affect some user packet algorithims.
- */
-static int ipath_7220_get_base_info(struct ipath_portdata *pd, void *kbase)
-{
- struct ipath_base_info *kinfo = kbase;
-
- kinfo->spi_runtime_flags |=
- IPATH_RUNTIME_PCIE | IPATH_RUNTIME_NODMA_RTAIL |
- IPATH_RUNTIME_SDMA;
-
- return 0;
-}
-
-static void ipath_7220_free_irq(struct ipath_devdata *dd)
-{
- free_irq(dd->ipath_irq, dd);
- dd->ipath_irq = 0;
-}
-
-static struct ipath_message_header *
-ipath_7220_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr)
-{
- u32 offset = ipath_hdrget_offset(rhf_addr);
-
- return (struct ipath_message_header *)
- (rhf_addr - dd->ipath_rhf_offset + offset);
-}
-
-static void ipath_7220_config_ports(struct ipath_devdata *dd, ushort cfgports)
-{
- u32 nchipports;
-
- nchipports = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
- if (!cfgports) {
- int ncpus = num_online_cpus();
-
- if (ncpus <= 4)
- dd->ipath_portcnt = 5;
- else if (ncpus <= 8)
- dd->ipath_portcnt = 9;
- if (dd->ipath_portcnt)
- ipath_dbg("Auto-configured for %u ports, %d cpus "
- "online\n", dd->ipath_portcnt, ncpus);
- } else if (cfgports <= nchipports)
- dd->ipath_portcnt = cfgports;
- if (!dd->ipath_portcnt) /* none of the above, set to max */
- dd->ipath_portcnt = nchipports;
- /*
- * chip can be configured for 5, 9, or 17 ports, and choice
- * affects number of eager TIDs per port (1K, 2K, 4K).
- */
- if (dd->ipath_portcnt > 9)
- dd->ipath_rcvctrl |= 2ULL << IBA7220_R_PORTCFG_SHIFT;
- else if (dd->ipath_portcnt > 5)
- dd->ipath_rcvctrl |= 1ULL << IBA7220_R_PORTCFG_SHIFT;
- /* else configure for default 5 receive ports */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
- dd->ipath_rcvctrl);
- dd->ipath_p0_rcvegrcnt = 2048; /* always */
- if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
- dd->ipath_pioreserved = 3; /* kpiobufs used for PIO */
-}
-
-
-static int ipath_7220_get_ib_cfg(struct ipath_devdata *dd, int which)
-{
- int lsb, ret = 0;
- u64 maskr; /* right-justified mask */
-
- switch (which) {
- case IPATH_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
- lsb = IBA7220_IBC_HRTBT_SHIFT;
- maskr = IBA7220_IBC_HRTBT_MASK;
- break;
-
- case IPATH_IB_CFG_LWID_ENB: /* Get allowed Link-width */
- ret = dd->ipath_link_width_enabled;
- goto done;
-
- case IPATH_IB_CFG_LWID: /* Get currently active Link-width */
- ret = dd->ipath_link_width_active;
- goto done;
-
- case IPATH_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
- ret = dd->ipath_link_speed_enabled;
- goto done;
-
- case IPATH_IB_CFG_SPD: /* Get current Link spd */
- ret = dd->ipath_link_speed_active;
- goto done;
-
- case IPATH_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
- lsb = IBA7220_IBC_RXPOL_SHIFT;
- maskr = IBA7220_IBC_RXPOL_MASK;
- break;
-
- case IPATH_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
- lsb = IBA7220_IBC_LREV_SHIFT;
- maskr = IBA7220_IBC_LREV_MASK;
- break;
-
- case IPATH_IB_CFG_LINKLATENCY:
- ret = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcddrstatus)
- & IBA7220_DDRSTAT_LINKLAT_MASK;
- goto done;
-
- default:
- ret = -ENOTSUPP;
- goto done;
- }
- ret = (int)((dd->ipath_ibcddrctrl >> lsb) & maskr);
-done:
- return ret;
-}
-
-static int ipath_7220_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val)
-{
- int lsb, ret = 0, setforce = 0;
- u64 maskr; /* right-justified mask */
-
- switch (which) {
- case IPATH_IB_CFG_LIDLMC:
- /*
- * Set LID and LMC. Combined to avoid possible hazard
- * caller puts LMC in 16MSbits, DLID in 16LSbits of val
- */
- lsb = IBA7220_IBC_DLIDLMC_SHIFT;
- maskr = IBA7220_IBC_DLIDLMC_MASK;
- break;
-
- case IPATH_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
- if (val & IPATH_IB_HRTBT_ON &&
- (dd->ipath_flags & IPATH_NO_HRTBT))
- goto bail;
- lsb = IBA7220_IBC_HRTBT_SHIFT;
- maskr = IBA7220_IBC_HRTBT_MASK;
- break;
-
- case IPATH_IB_CFG_LWID_ENB: /* set allowed Link-width */
- /*
- * As with speed, only write the actual register if
- * the link is currently down, otherwise takes effect
- * on next link change.
- */
- dd->ipath_link_width_enabled = val;
- if ((dd->ipath_flags & (IPATH_LINKDOWN|IPATH_LINKINIT)) !=
- IPATH_LINKDOWN)
- goto bail;
- /*
- * We set the IPATH_IB_FORCE_NOTIFY bit so updown
- * will get called because we want update
- * link_width_active, and the change may not take
- * effect for some time (if we are in POLL), so this
- * flag will force the updown routine to be called
- * on the next ibstatuschange down interrupt, even
- * if it's not an down->up transition.
- */
- val--; /* convert from IB to chip */
- maskr = IBA7220_IBC_WIDTH_MASK;
- lsb = IBA7220_IBC_WIDTH_SHIFT;
- setforce = 1;
- dd->ipath_flags |= IPATH_IB_FORCE_NOTIFY;
- break;
-
- case IPATH_IB_CFG_SPD_ENB: /* set allowed Link speeds */
- /*
- * If we turn off IB1.2, need to preset SerDes defaults,
- * but not right now. Set a flag for the next time
- * we command the link down. As with width, only write the
- * actual register if the link is currently down, otherwise
- * takes effect on next link change. Since setting is being
- * explictly requested (via MAD or sysfs), clear autoneg
- * failure status if speed autoneg is enabled.
- */
- dd->ipath_link_speed_enabled = val;
- if (dd->ipath_ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK &&
- !(val & (val - 1)))
- dd->ipath_presets_needed = 1;
- if ((dd->ipath_flags & (IPATH_LINKDOWN|IPATH_LINKINIT)) !=
- IPATH_LINKDOWN)
- goto bail;
- /*
- * We set the IPATH_IB_FORCE_NOTIFY bit so updown
- * will get called because we want update
- * link_speed_active, and the change may not take
- * effect for some time (if we are in POLL), so this
- * flag will force the updown routine to be called
- * on the next ibstatuschange down interrupt, even
- * if it's not an down->up transition. When setting
- * speed autoneg, clear AUTONEG_FAILED.
- */
- if (val == (IPATH_IB_SDR | IPATH_IB_DDR)) {
- val = IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK;
- dd->ipath_flags &= ~IPATH_IB_AUTONEG_FAILED;
- } else
- val = val == IPATH_IB_DDR ? IBA7220_IBC_SPEED_DDR
- : IBA7220_IBC_SPEED_SDR;
- maskr = IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK;
- lsb = 0; /* speed bits are low bits */
- setforce = 1;
- break;
-
- case IPATH_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
- lsb = IBA7220_IBC_RXPOL_SHIFT;
- maskr = IBA7220_IBC_RXPOL_MASK;
- break;
-
- case IPATH_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
- lsb = IBA7220_IBC_LREV_SHIFT;
- maskr = IBA7220_IBC_LREV_MASK;
- break;
-
- default:
- ret = -ENOTSUPP;
- goto bail;
- }
- dd->ipath_ibcddrctrl &= ~(maskr << lsb);
- dd->ipath_ibcddrctrl |= (((u64) val & maskr) << lsb);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl,
- dd->ipath_ibcddrctrl);
- if (setforce)
- dd->ipath_flags |= IPATH_IB_FORCE_NOTIFY;
-bail:
- return ret;
-}
-
-static void ipath_7220_read_counters(struct ipath_devdata *dd,
- struct infinipath_counters *cntrs)
-{
- u64 *counters = (u64 *) cntrs;
- int i;
-
- for (i = 0; i < sizeof(*cntrs) / sizeof(u64); i++)
- counters[i] = ipath_snap_cntr(dd, i);
-}
-
-/* if we are using MSI, try to fallback to INTx */
-static int ipath_7220_intr_fallback(struct ipath_devdata *dd)
-{
- if (dd->ipath_msi_lo) {
- dev_info(&dd->pcidev->dev, "MSI interrupt not detected,"
- " trying INTx interrupts\n");
- ipath_7220_nomsi(dd);
- ipath_enable_intx(dd->pcidev);
- /*
- * some newer kernels require free_irq before disable_msi,
- * and irq can be changed during disable and intx enable
- * and we need to therefore use the pcidev->irq value,
- * not our saved MSI value.
- */
- dd->ipath_irq = dd->pcidev->irq;
- if (request_irq(dd->ipath_irq, ipath_intr, IRQF_SHARED,
- IPATH_DRV_NAME, dd))
- ipath_dev_err(dd,
- "Could not re-request_irq for INTx\n");
- return 1;
- }
- return 0;
-}
-
-/*
- * reset the XGXS (between serdes and IBC). Slightly less intrusive
- * than resetting the IBC or external link state, and useful in some
- * cases to cause some retraining. To do this right, we reset IBC
- * as well.
- */
-static void ipath_7220_xgxs_reset(struct ipath_devdata *dd)
-{
- u64 val, prev_val;
-
- prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
- val = prev_val | INFINIPATH_XGXS_RESET;
- prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
- dd->ipath_control & ~INFINIPATH_C_LINKENABLE);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
- dd->ipath_control);
-}
-
-
-/* Still needs cleanup, too much hardwired stuff */
-static void autoneg_send(struct ipath_devdata *dd,
- u32 *hdr, u32 dcnt, u32 *data)
-{
- int i;
- u64 cnt;
- u32 __iomem *piobuf;
- u32 pnum;
-
- i = 0;
- cnt = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
- while (!(piobuf = ipath_getpiobuf(dd, cnt, &pnum))) {
- if (i++ > 15) {
- ipath_dbg("Couldn't get pio buffer for send\n");
- return;
- }
- udelay(2);
- }
- if (dd->ipath_flags&IPATH_HAS_PBC_CNT)
- cnt |= 0x80000000UL<<32; /* mark as VL15 */
- writeq(cnt, piobuf);
- ipath_flush_wc();
- __iowrite32_copy(piobuf + 2, hdr, 7);
- __iowrite32_copy(piobuf + 9, data, dcnt);
- ipath_flush_wc();
-}
-
-/*
- * _start packet gets sent twice at start, _done gets sent twice at end
- */
-static void ipath_autoneg_send(struct ipath_devdata *dd, int which)
-{
- static u32 swapped;
- u32 dw, i, hcnt, dcnt, *data;
- static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
- static u32 madpayload_start[0x40] = {
- 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
- 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
- };
- static u32 madpayload_done[0x40] = {
- 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
- 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- 0x40000001, 0x1388, 0x15e, /* rest 0's */
- };
- dcnt = ARRAY_SIZE(madpayload_start);
- hcnt = ARRAY_SIZE(hdr);
- if (!swapped) {
- /* for maintainability, do it at runtime */
- for (i = 0; i < hcnt; i++) {
- dw = (__force u32) cpu_to_be32(hdr[i]);
- hdr[i] = dw;
- }
- for (i = 0; i < dcnt; i++) {
- dw = (__force u32) cpu_to_be32(madpayload_start[i]);
- madpayload_start[i] = dw;
- dw = (__force u32) cpu_to_be32(madpayload_done[i]);
- madpayload_done[i] = dw;
- }
- swapped = 1;
- }
-
- data = which ? madpayload_done : madpayload_start;
- ipath_cdbg(PKT, "Sending %s special MADs\n", which?"done":"start");
-
- autoneg_send(dd, hdr, dcnt, data);
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- udelay(2);
- autoneg_send(dd, hdr, dcnt, data);
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- udelay(2);
-}
-
-
-
-/*
- * Do the absolute minimum to cause an IB speed change, and make it
- * ready, but don't actually trigger the change. The caller will
- * do that when ready (if link is in Polling training state, it will
- * happen immediately, otherwise when link next goes down)
- *
- * This routine should only be used as part of the DDR autonegotation
- * code for devices that are not compliant with IB 1.2 (or code that
- * fixes things up for same).
- *
- * When link has gone down, and autoneg enabled, or autoneg has
- * failed and we give up until next time we set both speeds, and
- * then we want IBTA enabled as well as "use max enabled speed.
- */
-static void set_speed_fast(struct ipath_devdata *dd, u32 speed)
-{
- dd->ipath_ibcddrctrl &= ~(IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK |
- (IBA7220_IBC_WIDTH_MASK << IBA7220_IBC_WIDTH_SHIFT));
-
- if (speed == (IPATH_IB_SDR | IPATH_IB_DDR))
- dd->ipath_ibcddrctrl |= IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK;
- else
- dd->ipath_ibcddrctrl |= speed == IPATH_IB_DDR ?
- IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
-
- /*
- * Convert from IB-style 1 = 1x, 2 = 4x, 3 = auto
- * to chip-centric 0 = 1x, 1 = 4x, 2 = auto
- */
- dd->ipath_ibcddrctrl |= (u64)(dd->ipath_link_width_enabled - 1) <<
- IBA7220_IBC_WIDTH_SHIFT;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl,
- dd->ipath_ibcddrctrl);
- ipath_cdbg(VERBOSE, "setup for IB speed (%x) done\n", speed);
-}
-
-
-/*
- * this routine is only used when we are not talking to another
- * IB 1.2-compliant device that we think can do DDR.
- * (This includes all existing switch chips as of Oct 2007.)
- * 1.2-compliant devices go directly to DDR prior to reaching INIT
- */
-static void try_auto_neg(struct ipath_devdata *dd)
-{
- /*
- * required for older non-IB1.2 DDR switches. Newer
- * non-IB-compliant switches don't need it, but so far,
- * aren't bothered by it either. "Magic constant"
- */
- ipath_write_kreg(dd, IPATH_KREG_OFFSET(IBNCModeCtrl),
- 0x3b9dc07);
- dd->ipath_flags |= IPATH_IB_AUTONEG_INPROG;
- ipath_autoneg_send(dd, 0);
- set_speed_fast(dd, IPATH_IB_DDR);
- ipath_toggle_rclkrls(dd);
- /* 2 msec is minimum length of a poll cycle */
- schedule_delayed_work(&dd->ipath_autoneg_work,
- msecs_to_jiffies(2));
-}
-
-
-static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs)
-{
- int ret = 0, symadj = 0;
- u32 ltstate = ipath_ib_linkstate(dd, ibcs);
-
- dd->ipath_link_width_active =
- ((ibcs >> IBA7220_IBCS_LINKWIDTH_SHIFT) & 1) ?
- IB_WIDTH_4X : IB_WIDTH_1X;
- dd->ipath_link_speed_active =
- ((ibcs >> IBA7220_IBCS_LINKSPEED_SHIFT) & 1) ?
- IPATH_IB_DDR : IPATH_IB_SDR;
-
- if (!ibup) {
- /*
- * when link goes down we don't want aeq running, so it
- * won't't interfere with IBC training, etc., and we need
- * to go back to the static SerDes preset values
- */
- if (dd->ipath_x1_fix_tries &&
- ltstate <= INFINIPATH_IBCS_LT_STATE_SLEEPQUIET &&
- ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP)
- dd->ipath_x1_fix_tries = 0;
- if (!(dd->ipath_flags & (IPATH_IB_AUTONEG_FAILED |
- IPATH_IB_AUTONEG_INPROG)))
- set_speed_fast(dd, dd->ipath_link_speed_enabled);
- if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)) {
- ipath_cdbg(VERBOSE, "Setting RXEQ defaults\n");
- ipath_sd7220_presets(dd);
- }
- /* this might better in ipath_sd7220_presets() */
- ipath_set_relock_poll(dd, ibup);
- } else {
- if (ipath_compat_ddr_negotiate &&
- !(dd->ipath_flags & (IPATH_IB_AUTONEG_FAILED |
- IPATH_IB_AUTONEG_INPROG)) &&
- dd->ipath_link_speed_active == IPATH_IB_SDR &&
- (dd->ipath_link_speed_enabled &
- (IPATH_IB_DDR | IPATH_IB_SDR)) ==
- (IPATH_IB_DDR | IPATH_IB_SDR) &&
- dd->ipath_autoneg_tries < IPATH_AUTONEG_TRIES) {
- /* we are SDR, and DDR auto-negotiation enabled */
- ++dd->ipath_autoneg_tries;
- ipath_dbg("DDR negotiation try, %u/%u\n",
- dd->ipath_autoneg_tries,
- IPATH_AUTONEG_TRIES);
- if (!dd->ibdeltainprog) {
- dd->ibdeltainprog = 1;
- dd->ibsymsnap = ipath_read_creg32(dd,
- dd->ipath_cregs->cr_ibsymbolerrcnt);
- dd->iblnkerrsnap = ipath_read_creg32(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt);
- }
- try_auto_neg(dd);
- ret = 1; /* no other IB status change processing */
- } else if ((dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)
- && dd->ipath_link_speed_active == IPATH_IB_SDR) {
- ipath_autoneg_send(dd, 1);
- set_speed_fast(dd, IPATH_IB_DDR);
- udelay(2);
- ipath_toggle_rclkrls(dd);
- ret = 1; /* no other IB status change processing */
- } else {
- if ((dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) &&
- (dd->ipath_link_speed_active & IPATH_IB_DDR)) {
- ipath_dbg("Got to INIT with DDR autoneg\n");
- dd->ipath_flags &= ~(IPATH_IB_AUTONEG_INPROG
- | IPATH_IB_AUTONEG_FAILED);
- dd->ipath_autoneg_tries = 0;
- /* re-enable SDR, for next link down */
- set_speed_fast(dd,
- dd->ipath_link_speed_enabled);
- wake_up(&dd->ipath_autoneg_wait);
- symadj = 1;
- } else if (dd->ipath_flags & IPATH_IB_AUTONEG_FAILED) {
- /*
- * clear autoneg failure flag, and do setup
- * so we'll try next time link goes down and
- * back to INIT (possibly connected to different
- * device).
- */
- ipath_dbg("INIT %sDR after autoneg failure\n",
- (dd->ipath_link_speed_active &
- IPATH_IB_DDR) ? "D" : "S");
- dd->ipath_flags &= ~IPATH_IB_AUTONEG_FAILED;
- dd->ipath_ibcddrctrl |=
- IBA7220_IBC_IBTA_1_2_MASK;
- ipath_write_kreg(dd,
- IPATH_KREG_OFFSET(IBNCModeCtrl), 0);
- symadj = 1;
- }
- }
- /*
- * if we are in 1X on rev1 only, and are in autoneg width,
- * it could be due to an xgxs problem, so if we haven't
- * already tried, try twice to get to 4X; if we
- * tried, and couldn't, report it, since it will
- * probably not be what is desired.
- */
- if (dd->ipath_minrev == 1 &&
- (dd->ipath_link_width_enabled & (IB_WIDTH_1X |
- IB_WIDTH_4X)) == (IB_WIDTH_1X | IB_WIDTH_4X)
- && dd->ipath_link_width_active == IB_WIDTH_1X
- && dd->ipath_x1_fix_tries < 3) {
- if (++dd->ipath_x1_fix_tries == 3) {
- dev_info(&dd->pcidev->dev,
- "IB link is in 1X mode\n");
- if (!(dd->ipath_flags &
- IPATH_IB_AUTONEG_INPROG))
- symadj = 1;
- }
- else {
- ipath_cdbg(VERBOSE, "IB 1X in "
- "auto-width, try %u to be "
- "sure it's really 1X; "
- "ltstate %u\n",
- dd->ipath_x1_fix_tries,
- ltstate);
- dd->ipath_f_xgxs_reset(dd);
- ret = 1; /* skip other processing */
- }
- } else if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG))
- symadj = 1;
-
- if (!ret) {
- dd->delay_mult = rate_to_delay
- [(ibcs >> IBA7220_IBCS_LINKSPEED_SHIFT) & 1]
- [(ibcs >> IBA7220_IBCS_LINKWIDTH_SHIFT) & 1];
-
- ipath_set_relock_poll(dd, ibup);
- }
- }
-
- if (symadj) {
- if (dd->ibdeltainprog) {
- dd->ibdeltainprog = 0;
- dd->ibsymdelta += ipath_read_creg32(dd,
- dd->ipath_cregs->cr_ibsymbolerrcnt) -
- dd->ibsymsnap;
- dd->iblnkerrdelta += ipath_read_creg32(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt) -
- dd->iblnkerrsnap;
- }
- } else if (!ibup && !dd->ibdeltainprog
- && !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)) {
- dd->ibdeltainprog = 1;
- dd->ibsymsnap = ipath_read_creg32(dd,
- dd->ipath_cregs->cr_ibsymbolerrcnt);
- dd->iblnkerrsnap = ipath_read_creg32(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt);
- }
-
- if (!ret)
- ipath_setup_7220_setextled(dd, ipath_ib_linkstate(dd, ibcs),
- ltstate);
- return ret;
-}
-
-
-/*
- * Handle the empirically determined mechanism for auto-negotiation
- * of DDR speed with switches.
- */
-static void autoneg_work(struct work_struct *work)
-{
- struct ipath_devdata *dd;
- u64 startms;
- u32 lastlts, i;
-
- dd = container_of(work, struct ipath_devdata,
- ipath_autoneg_work.work);
-
- startms = jiffies_to_msecs(jiffies);
-
- /*
- * busy wait for this first part, it should be at most a
- * few hundred usec, since we scheduled ourselves for 2msec.
- */
- for (i = 0; i < 25; i++) {
- lastlts = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
- if (lastlts == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
- ipath_set_linkstate(dd, IPATH_IB_LINKDOWN_DISABLE);
- break;
- }
- udelay(100);
- }
-
- if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG))
- goto done; /* we got there early or told to stop */
-
- /* we expect this to timeout */
- if (wait_event_timeout(dd->ipath_autoneg_wait,
- !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG),
- msecs_to_jiffies(90)))
- goto done;
-
- ipath_toggle_rclkrls(dd);
-
- /* we expect this to timeout */
- if (wait_event_timeout(dd->ipath_autoneg_wait,
- !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG),
- msecs_to_jiffies(1700)))
- goto done;
-
- set_speed_fast(dd, IPATH_IB_SDR);
- ipath_toggle_rclkrls(dd);
-
- /*
- * wait up to 250 msec for link to train and get to INIT at DDR;
- * this should terminate early
- */
- wait_event_timeout(dd->ipath_autoneg_wait,
- !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG),
- msecs_to_jiffies(250));
-done:
- if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) {
- ipath_dbg("Did not get to DDR INIT (%x) after %Lu msecs\n",
- ipath_ib_state(dd, dd->ipath_lastibcstat),
- (unsigned long long) jiffies_to_msecs(jiffies)-startms);
- dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG;
- if (dd->ipath_autoneg_tries == IPATH_AUTONEG_TRIES) {
- dd->ipath_flags |= IPATH_IB_AUTONEG_FAILED;
- ipath_dbg("Giving up on DDR until next IB "
- "link Down\n");
- dd->ipath_autoneg_tries = 0;
- }
- set_speed_fast(dd, dd->ipath_link_speed_enabled);
- }
-}
-
-
-/**
- * ipath_init_iba7220_funcs - set up the chip-specific function pointers
- * @dd: the infinipath device
- *
- * This is global, and is called directly at init to set up the
- * chip-specific function pointers for later use.
- */
-void ipath_init_iba7220_funcs(struct ipath_devdata *dd)
-{
- dd->ipath_f_intrsetup = ipath_7220_intconfig;
- dd->ipath_f_bus = ipath_setup_7220_config;
- dd->ipath_f_reset = ipath_setup_7220_reset;
- dd->ipath_f_get_boardname = ipath_7220_boardname;
- dd->ipath_f_init_hwerrors = ipath_7220_init_hwerrors;
- dd->ipath_f_early_init = ipath_7220_early_init;
- dd->ipath_f_handle_hwerrors = ipath_7220_handle_hwerrors;
- dd->ipath_f_quiet_serdes = ipath_7220_quiet_serdes;
- dd->ipath_f_bringup_serdes = ipath_7220_bringup_serdes;
- dd->ipath_f_clear_tids = ipath_7220_clear_tids;
- dd->ipath_f_put_tid = ipath_7220_put_tid;
- dd->ipath_f_cleanup = ipath_setup_7220_cleanup;
- dd->ipath_f_setextled = ipath_setup_7220_setextled;
- dd->ipath_f_get_base_info = ipath_7220_get_base_info;
- dd->ipath_f_free_irq = ipath_7220_free_irq;
- dd->ipath_f_tidtemplate = ipath_7220_tidtemplate;
- dd->ipath_f_intr_fallback = ipath_7220_intr_fallback;
- dd->ipath_f_xgxs_reset = ipath_7220_xgxs_reset;
- dd->ipath_f_get_ib_cfg = ipath_7220_get_ib_cfg;
- dd->ipath_f_set_ib_cfg = ipath_7220_set_ib_cfg;
- dd->ipath_f_config_jint = ipath_7220_config_jint;
- dd->ipath_f_config_ports = ipath_7220_config_ports;
- dd->ipath_f_read_counters = ipath_7220_read_counters;
- dd->ipath_f_get_msgheader = ipath_7220_get_msgheader;
- dd->ipath_f_ib_updown = ipath_7220_ib_updown;
-
- /* initialize chip-specific variables */
- ipath_init_7220_variables(dd);
-}
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index b3d7efcdf02..6559af60bff 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -1030,8 +1030,6 @@ void ipath_free_data(struct ipath_portdata *dd);
u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32, u32 *);
void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
unsigned len, int avail);
-void ipath_init_iba7220_funcs(struct ipath_devdata *);
-void ipath_init_iba6120_funcs(struct ipath_devdata *);
void ipath_init_iba6110_funcs(struct ipath_devdata *);
void ipath_get_eeprom_info(struct ipath_devdata *);
int ipath_update_eeprom_log(struct ipath_devdata *dd);
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 559f39be0dc..dd7f26d04d4 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -2182,7 +2182,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
snprintf(dev->node_desc, sizeof(dev->node_desc),
IPATH_IDSTR " %s", init_utsname()->nodename);
- ret = ib_register_device(dev);
+ ret = ib_register_device(dev, NULL);
if (ret)
goto err_reg;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index cc2ddd29ac5..5a219a2fdf1 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -661,6 +661,14 @@ repoll:
wc->opcode = IB_WC_FETCH_ADD;
wc->byte_len = 8;
break;
+ case MLX4_OPCODE_MASKED_ATOMIC_CS:
+ wc->opcode = IB_WC_MASKED_COMP_SWAP;
+ wc->byte_len = 8;
+ break;
+ case MLX4_OPCODE_MASKED_ATOMIC_FA:
+ wc->opcode = IB_WC_MASKED_FETCH_ADD;
+ wc->byte_len = 8;
+ break;
case MLX4_OPCODE_BIND_MW:
wc->opcode = IB_WC_BIND_MW;
break;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 01f2a3f9335..4e94e360e43 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -139,6 +139,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
IB_ATOMIC_HCA : IB_ATOMIC_NONE;
+ props->masked_atomic_cap = IB_ATOMIC_HCA;
props->max_pkeys = dev->dev->caps.pkey_table_len[1];
props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
@@ -661,7 +662,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
spin_lock_init(&ibdev->sm_lock);
mutex_init(&ibdev->cap_mask_mutex);
- if (ib_register_device(&ibdev->ib_dev))
+ if (ib_register_device(&ibdev->ib_dev, NULL))
goto err_map;
if (mlx4_ib_mad_init(ibdev))
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 5643f4a8ffe..6a60827b230 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -74,17 +74,19 @@ enum {
};
static const __be32 mlx4_ib_opcode[] = {
- [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
- [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO),
- [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM),
- [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
- [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
- [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ),
- [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
- [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
- [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
- [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
- [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR),
+ [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
+ [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO),
+ [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM),
+ [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
+ [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
+ [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ),
+ [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
+ [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
+ [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
+ [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
+ [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR),
+ [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS),
+ [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA),
};
static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
@@ -1407,6 +1409,9 @@ static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
+ } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
+ aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
+ aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask);
} else {
aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
aseg->compare = 0;
@@ -1414,6 +1419,15 @@ static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *
}
+static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
+ struct ib_send_wr *wr)
+{
+ aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
+ aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask);
+ aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
+ aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask);
+}
+
static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
struct ib_send_wr *wr)
{
@@ -1567,6 +1581,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
+ case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
wr->wr.atomic.rkey);
wqe += sizeof (struct mlx4_wqe_raddr_seg);
@@ -1579,6 +1594,19 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break;
+ case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
+ set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
+ wr->wr.atomic.rkey);
+ wqe += sizeof (struct mlx4_wqe_raddr_seg);
+
+ set_masked_atomic_seg(wqe, wr);
+ wqe += sizeof (struct mlx4_wqe_masked_atomic_seg);
+
+ size += (sizeof (struct mlx4_wqe_raddr_seg) +
+ sizeof (struct mlx4_wqe_masked_atomic_seg)) / 16;
+
+ break;
+
case IB_WR_RDMA_READ:
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c
index c5ccc2daab6..b4e0cf4e95c 100644
--- a/drivers/infiniband/hw/mthca/mthca_allocator.c
+++ b/drivers/infiniband/hw/mthca/mthca_allocator.c
@@ -211,7 +211,7 @@ int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
if (!buf->direct.buf)
return -ENOMEM;
- pci_unmap_addr_set(&buf->direct, mapping, t);
+ dma_unmap_addr_set(&buf->direct, mapping, t);
memset(buf->direct.buf, 0, size);
@@ -251,7 +251,7 @@ int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
goto err_free;
dma_list[i] = t;
- pci_unmap_addr_set(&buf->page_list[i], mapping, t);
+ dma_unmap_addr_set(&buf->page_list[i], mapping, t);
clear_page(buf->page_list[i].buf);
}
@@ -289,12 +289,12 @@ void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
if (is_direct)
dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
- pci_unmap_addr(&buf->direct, mapping));
+ dma_unmap_addr(&buf->direct, mapping));
else {
for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
buf->page_list[i].buf,
- pci_unmap_addr(&buf->page_list[i],
+ dma_unmap_addr(&buf->page_list[i],
mapping));
kfree(buf->page_list);
}
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 9388164b605..8e8c728aff8 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -504,7 +504,7 @@ static int mthca_create_eq(struct mthca_dev *dev,
goto err_out_free_pages;
dma_list[i] = t;
- pci_unmap_addr_set(&eq->page_list[i], mapping, t);
+ dma_unmap_addr_set(&eq->page_list[i], mapping, t);
clear_page(eq->page_list[i].buf);
}
@@ -579,7 +579,7 @@ static int mthca_create_eq(struct mthca_dev *dev,
if (eq->page_list[i].buf)
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
eq->page_list[i].buf,
- pci_unmap_addr(&eq->page_list[i],
+ dma_unmap_addr(&eq->page_list[i],
mapping));
mthca_free_mailbox(dev, mailbox);
@@ -629,7 +629,7 @@ static void mthca_free_eq(struct mthca_dev *dev,
for (i = 0; i < npages; ++i)
pci_free_consistent(dev->pdev, PAGE_SIZE,
eq->page_list[i].buf,
- pci_unmap_addr(&eq->page_list[i], mapping));
+ dma_unmap_addr(&eq->page_list[i], mapping));
kfree(eq->page_list);
mthca_free_mailbox(dev, mailbox);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index f080a784bc7..1e0b4b6074a 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1403,7 +1403,7 @@ int mthca_register_device(struct mthca_dev *dev)
mutex_init(&dev->cap_mask_mutex);
- ret = ib_register_device(&dev->ib_dev);
+ ret = ib_register_device(&dev->ib_dev, NULL);
if (ret)
return ret;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 90f4c4d2e98..596acc45569 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -46,7 +46,7 @@
struct mthca_buf_list {
void *buf;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
union mthca_buf {
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index c36a3f51492..57874a16508 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -1297,7 +1297,7 @@ int nes_destroy_cqp(struct nes_device *nesdev)
/**
* nes_init_1g_phy
*/
-int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
+static int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
{
u32 counter = 0;
u16 phy_data;
@@ -1351,7 +1351,7 @@ int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
/**
* nes_init_2025_phy
*/
-int nes_init_2025_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
+static int nes_init_2025_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
{
u32 temp_phy_data = 0;
u32 temp_phy_data2 = 0;
@@ -2458,7 +2458,6 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
return;
}
nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_INTERRUPT;
- spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
/* ack the MAC interrupt */
mac_status = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (mac_index * 0x200));
@@ -2469,11 +2468,9 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
if (mac_status & (NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT)) {
nesdev->link_status_interrupts++;
- if (0 == (++nesadapter->link_interrupt_count[mac_index] % ((u16)NES_MAX_LINK_INTERRUPTS))) {
- spin_lock_irqsave(&nesadapter->phy_lock, flags);
+ if (0 == (++nesadapter->link_interrupt_count[mac_index] % ((u16)NES_MAX_LINK_INTERRUPTS)))
nes_reset_link(nesdev, mac_index);
- spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
- }
+
/* read the PHY interrupt status register */
if ((nesadapter->OneG_Mode) &&
(nesadapter->phy_type[mac_index] != NES_PHY_TYPE_PUMA_1G)) {
@@ -2641,6 +2638,8 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
}
}
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+
nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_IDLE;
}
@@ -3424,6 +3423,7 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 aeq_info;
u32 next_iwarp_state = 0;
+ u32 aeqe_cq_id;
u16 async_event_id;
u8 tcp_state;
u8 iwarp_state;
@@ -3451,6 +3451,14 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe,
nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]);
+ aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]);
+ if (aeq_info & NES_AEQE_QP) {
+ if ((!nes_is_resource_allocated(nesadapter, nesadapter->allocated_qps,
+ aeqe_cq_id)) ||
+ (atomic_read(&nesqp->close_timer_started)))
+ return;
+ }
+
switch (async_event_id) {
case NES_AEQE_AEID_LLP_FIN_RECEIVED:
if (nesqp->term_flags)
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index b7c813f4be4..5cc0a9ae5bb 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -877,7 +877,7 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
if (!mc_all_on) {
char *addrs;
int i;
- struct dev_mc_list *mcaddr;
+ struct netdev_hw_addr *ha;
addrs = kmalloc(ETH_ALEN * mc_count, GFP_ATOMIC);
if (!addrs) {
@@ -885,9 +885,8 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
goto unlock;
}
i = 0;
- netdev_for_each_mc_addr(mcaddr, netdev)
- memcpy(get_addr(addrs, i++),
- mcaddr->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(get_addr(addrs, i++), ha->addr, ETH_ALEN);
perfect_filter_register_address = NES_IDX_PERFECT_FILTER_LOW +
pft_entries_preallocated * 0x8;
@@ -1002,6 +1001,7 @@ static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
return ret;
}
+
static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
"Link Change Interrupts",
"Linearized SKBs",
@@ -1016,11 +1016,15 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
"Rx Jabber Errors",
"Rx Oversized Frames",
"Rx Short Frames",
+ "Rx Length Errors",
+ "Rx CRC Errors",
+ "Rx Port Discard",
"Endnode Rx Discards",
"Endnode Rx Octets",
"Endnode Rx Frames",
"Endnode Tx Octets",
"Endnode Tx Frames",
+ "Tx Errors",
"mh detected",
"mh pauses",
"Retransmission Count",
@@ -1049,19 +1053,13 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
"CM Nodes Destroyed",
"CM Accel Drops",
"CM Resets Received",
+ "Free 4Kpbls",
+ "Free 256pbls",
"Timer Inits",
- "CQ Depth 1",
- "CQ Depth 4",
- "CQ Depth 16",
- "CQ Depth 24",
- "CQ Depth 32",
- "CQ Depth 128",
- "CQ Depth 256",
"LRO aggregated",
"LRO flushed",
"LRO no_desc",
};
-
#define NES_ETHTOOL_STAT_COUNT ARRAY_SIZE(nes_ethtool_stringset)
/**
@@ -1121,12 +1119,14 @@ static void nes_netdev_get_strings(struct net_device *netdev, u32 stringset,
/**
* nes_netdev_get_ethtool_stats
*/
+
static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *target_ethtool_stats, u64 *target_stat_values)
{
u64 u64temp;
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 nic_count;
u32 u32temp;
u32 index = 0;
@@ -1155,6 +1155,46 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
nesvnic->nesdev->port_tx_discards += u32temp;
nesvnic->netstats.tx_dropped += u32temp;
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_SHORT_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->netstats.rx_dropped += u32temp;
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+ nesvnic->nesdev->mac_rx_short_frames += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_OVERSIZED_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->netstats.rx_dropped += u32temp;
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+ nesvnic->nesdev->mac_rx_oversized_frames += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_JABBER_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->netstats.rx_dropped += u32temp;
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+ nesvnic->nesdev->mac_rx_jabber_frames += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->netstats.rx_dropped += u32temp;
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+ nesvnic->nesdev->mac_rx_symbol_err_frames += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_LENGTH_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->netstats.rx_length_errors += u32temp;
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_CRC_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+ nesvnic->nesdev->mac_rx_crc_errors += u32temp;
+ nesvnic->netstats.rx_crc_errors += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_TX_ERRORS + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->nesdev->mac_tx_errors += u32temp;
+ nesvnic->netstats.tx_errors += u32temp;
+
for (nic_count = 0; nic_count < NES_MAX_PORT_COUNT; nic_count++) {
if (nesvnic->qp_nic_index[nic_count] == 0xf)
break;
@@ -1219,11 +1259,15 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
target_stat_values[++index] = nesvnic->nesdev->mac_rx_jabber_frames;
target_stat_values[++index] = nesvnic->nesdev->mac_rx_oversized_frames;
target_stat_values[++index] = nesvnic->nesdev->mac_rx_short_frames;
+ target_stat_values[++index] = nesvnic->netstats.rx_length_errors;
+ target_stat_values[++index] = nesvnic->nesdev->mac_rx_crc_errors;
+ target_stat_values[++index] = nesvnic->nesdev->port_rx_discards;
target_stat_values[++index] = nesvnic->endnode_nstat_rx_discard;
target_stat_values[++index] = nesvnic->endnode_nstat_rx_octets;
target_stat_values[++index] = nesvnic->endnode_nstat_rx_frames;
target_stat_values[++index] = nesvnic->endnode_nstat_tx_octets;
target_stat_values[++index] = nesvnic->endnode_nstat_tx_frames;
+ target_stat_values[++index] = nesvnic->nesdev->mac_tx_errors;
target_stat_values[++index] = mh_detected;
target_stat_values[++index] = mh_pauses_sent;
target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
@@ -1252,21 +1296,14 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
target_stat_values[++index] = atomic_read(&cm_resets_recvd);
+ target_stat_values[++index] = nesadapter->free_4kpbl;
+ target_stat_values[++index] = nesadapter->free_256pbl;
target_stat_values[++index] = int_mod_timer_init;
- target_stat_values[++index] = int_mod_cq_depth_1;
- target_stat_values[++index] = int_mod_cq_depth_4;
- target_stat_values[++index] = int_mod_cq_depth_16;
- target_stat_values[++index] = int_mod_cq_depth_24;
- target_stat_values[++index] = int_mod_cq_depth_32;
- target_stat_values[++index] = int_mod_cq_depth_128;
- target_stat_values[++index] = int_mod_cq_depth_256;
target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
-
}
-
/**
* nes_netdev_get_drvinfo
*/
@@ -1461,11 +1498,14 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd
et_cmd->transceiver = XCVR_INTERNAL;
et_cmd->phy_address = mac_index;
} else {
+ unsigned long flags;
et_cmd->supported = SUPPORTED_1000baseT_Full
| SUPPORTED_Autoneg;
et_cmd->advertising = ADVERTISED_1000baseT_Full
| ADVERTISED_Autoneg;
+ spin_lock_irqsave(&nesadapter->phy_lock, flags);
nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
if (phy_data & 0x1000)
et_cmd->autoneg = AUTONEG_ENABLE;
else
@@ -1503,12 +1543,15 @@ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
- u16 phy_data;
if ((nesadapter->OneG_Mode) &&
(nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_PUMA_1G)) {
- nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index],
- &phy_data);
+ unsigned long flags;
+ u16 phy_data;
+ u8 phy_index = nesadapter->phy_index[nesdev->mac_index];
+
+ spin_lock_irqsave(&nesadapter->phy_lock, flags);
+ nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
if (et_cmd->autoneg) {
/* Turn on Full duplex, Autoneg, and restart autonegotiation */
phy_data |= 0x1300;
@@ -1516,8 +1559,8 @@ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd
/* Turn off autoneg */
phy_data &= ~0x1000;
}
- nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index],
- phy_data);
+ nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data);
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
}
return 0;
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index 186623d8695..a9f5dd272f1 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -381,12 +381,8 @@ static u16 nes_read16_eeprom(void __iomem *addr, u16 offset)
*/
void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 data)
{
- struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 u32temp;
u32 counter;
- unsigned long flags;
-
- spin_lock_irqsave(&nesadapter->phy_lock, flags);
nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
0x50020000 | data | ((u32)phy_reg << 18) | ((u32)phy_addr << 23));
@@ -402,8 +398,6 @@ void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u1
if (!(u32temp & 1))
nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
u32temp);
-
- spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
}
@@ -414,14 +408,11 @@ void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u1
*/
void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 *data)
{
- struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 u32temp;
u32 counter;
- unsigned long flags;
/* nes_debug(NES_DBG_PHY, "phy addr = %d, mac_index = %d\n",
phy_addr, nesdev->mac_index); */
- spin_lock_irqsave(&nesadapter->phy_lock, flags);
nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
0x60020000 | ((u32)phy_reg << 18) | ((u32)phy_addr << 23));
@@ -441,7 +432,6 @@ void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16
} else {
*data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
}
- spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
}
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index e54f312e4bd..9bc2d744b2e 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -374,7 +374,7 @@ static int alloc_fast_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
/*
* nes_alloc_fast_reg_mr
*/
-struct ib_mr *nes_alloc_fast_reg_mr(struct ib_pd *ibpd, int max_page_list_len)
+static struct ib_mr *nes_alloc_fast_reg_mr(struct ib_pd *ibpd, int max_page_list_len)
{
struct nes_pd *nespd = to_nespd(ibpd);
struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
@@ -3962,7 +3962,7 @@ int nes_register_ofa_device(struct nes_ib_device *nesibdev)
struct nes_adapter *nesadapter = nesdev->nesadapter;
int i, ret;
- ret = ib_register_device(&nesvnic->nesibdev->ibdev);
+ ret = ib_register_device(&nesvnic->nesibdev->ibdev, NULL);
if (ret) {
return ret;
}
diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig
new file mode 100644
index 00000000000..7c03a70c55a
--- /dev/null
+++ b/drivers/infiniband/hw/qib/Kconfig
@@ -0,0 +1,7 @@
+config INFINIBAND_QIB
+ tristate "QLogic PCIe HCA support"
+ depends on 64BIT && NET
+ ---help---
+ This is a low-level driver for QLogic PCIe QLE InfiniBand host
+ channel adapters. This driver does not support the QLogic
+ HyperTransport card (model QHT7140).
diff --git a/drivers/infiniband/hw/qib/Makefile b/drivers/infiniband/hw/qib/Makefile
new file mode 100644
index 00000000000..c6515a1b9a6
--- /dev/null
+++ b/drivers/infiniband/hw/qib/Makefile
@@ -0,0 +1,15 @@
+obj-$(CONFIG_INFINIBAND_QIB) += ib_qib.o
+
+ib_qib-y := qib_cq.o qib_diag.o qib_dma.o qib_driver.o qib_eeprom.o \
+ qib_file_ops.o qib_fs.o qib_init.o qib_intr.o qib_keys.o \
+ qib_mad.o qib_mmap.o qib_mr.o qib_pcie.o qib_pio_copy.o \
+ qib_qp.o qib_qsfp.o qib_rc.o qib_ruc.o qib_sdma.o qib_srq.o \
+ qib_sysfs.o qib_twsi.o qib_tx.o qib_uc.o qib_ud.o \
+ qib_user_pages.o qib_user_sdma.o qib_verbs_mcast.o qib_iba7220.o \
+ qib_sd7220.o qib_sd7220_img.o qib_iba7322.o qib_verbs.o
+
+# 6120 has no fallback if no MSI interrupts, others can do INTx
+ib_qib-$(CONFIG_PCI_MSI) += qib_iba6120.o
+
+ib_qib-$(CONFIG_X86_64) += qib_wc_x86_64.o
+ib_qib-$(CONFIG_PPC64) += qib_wc_ppc64.o
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
new file mode 100644
index 00000000000..32d9208efcf
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -0,0 +1,1439 @@
+#ifndef _QIB_KERNEL_H
+#define _QIB_KERNEL_H
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This header file is the base header file for qlogic_ib kernel code
+ * qib_user.h serves a similar purpose for user code.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/scatterlist.h>
+#include <linux/io.h>
+#include <linux/fs.h>
+#include <linux/completion.h>
+#include <linux/kref.h>
+#include <linux/sched.h>
+
+#include "qib_common.h"
+#include "qib_verbs.h"
+
+/* only s/w major version of QLogic_IB we can handle */
+#define QIB_CHIP_VERS_MAJ 2U
+
+/* don't care about this except printing */
+#define QIB_CHIP_VERS_MIN 0U
+
+/* The Organization Unique Identifier (Mfg code), and its position in GUID */
+#define QIB_OUI 0x001175
+#define QIB_OUI_LSB 40
+
+/*
+ * per driver stats, either not device nor port-specific, or
+ * summed over all of the devices and ports.
+ * They are described by name via ipathfs filesystem, so layout
+ * and number of elements can change without breaking compatibility.
+ * If members are added or deleted qib_statnames[] in qib_fs.c must
+ * change to match.
+ */
+struct qlogic_ib_stats {
+ __u64 sps_ints; /* number of interrupts handled */
+ __u64 sps_errints; /* number of error interrupts */
+ __u64 sps_txerrs; /* tx-related packet errors */
+ __u64 sps_rcverrs; /* non-crc rcv packet errors */
+ __u64 sps_hwerrs; /* hardware errors reported (parity, etc.) */
+ __u64 sps_nopiobufs; /* no pio bufs avail from kernel */
+ __u64 sps_ctxts; /* number of contexts currently open */
+ __u64 sps_lenerrs; /* number of kernel packets where RHF != LRH len */
+ __u64 sps_buffull;
+ __u64 sps_hdrfull;
+};
+
+extern struct qlogic_ib_stats qib_stats;
+extern struct pci_error_handlers qib_pci_err_handler;
+extern struct pci_driver qib_driver;
+
+#define QIB_CHIP_SWVERSION QIB_CHIP_VERS_MAJ
+/*
+ * First-cut critierion for "device is active" is
+ * two thousand dwords combined Tx, Rx traffic per
+ * 5-second interval. SMA packets are 64 dwords,
+ * and occur "a few per second", presumably each way.
+ */
+#define QIB_TRAFFIC_ACTIVE_THRESHOLD (2000)
+
+/*
+ * Struct used to indicate which errors are logged in each of the
+ * error-counters that are logged to EEPROM. A counter is incremented
+ * _once_ (saturating at 255) for each event with any bits set in
+ * the error or hwerror register masks below.
+ */
+#define QIB_EEP_LOG_CNT (4)
+struct qib_eep_log_mask {
+ u64 errs_to_log;
+ u64 hwerrs_to_log;
+};
+
+/*
+ * Below contains all data related to a single context (formerly called port).
+ */
+struct qib_ctxtdata {
+ void **rcvegrbuf;
+ dma_addr_t *rcvegrbuf_phys;
+ /* rcvhdrq base, needs mmap before useful */
+ void *rcvhdrq;
+ /* kernel virtual address where hdrqtail is updated */
+ void *rcvhdrtail_kvaddr;
+ /*
+ * temp buffer for expected send setup, allocated at open, instead
+ * of each setup call
+ */
+ void *tid_pg_list;
+ /*
+ * Shared page for kernel to signal user processes that send buffers
+ * need disarming. The process should call QIB_CMD_DISARM_BUFS
+ * or QIB_CMD_ACK_EVENT with IPATH_EVENT_DISARM_BUFS set.
+ */
+ unsigned long *user_event_mask;
+ /* when waiting for rcv or pioavail */
+ wait_queue_head_t wait;
+ /*
+ * rcvegr bufs base, physical, must fit
+ * in 44 bits so 32 bit programs mmap64 44 bit works)
+ */
+ dma_addr_t rcvegr_phys;
+ /* mmap of hdrq, must fit in 44 bits */
+ dma_addr_t rcvhdrq_phys;
+ dma_addr_t rcvhdrqtailaddr_phys;
+
+ /*
+ * number of opens (including slave sub-contexts) on this instance
+ * (ignoring forks, dup, etc. for now)
+ */
+ int cnt;
+ /*
+ * how much space to leave at start of eager TID entries for
+ * protocol use, on each TID
+ */
+ /* instead of calculating it */
+ unsigned ctxt;
+ /* non-zero if ctxt is being shared. */
+ u16 subctxt_cnt;
+ /* non-zero if ctxt is being shared. */
+ u16 subctxt_id;
+ /* number of eager TID entries. */
+ u16 rcvegrcnt;
+ /* index of first eager TID entry. */
+ u16 rcvegr_tid_base;
+ /* number of pio bufs for this ctxt (all procs, if shared) */
+ u32 piocnt;
+ /* first pio buffer for this ctxt */
+ u32 pio_base;
+ /* chip offset of PIO buffers for this ctxt */
+ u32 piobufs;
+ /* how many alloc_pages() chunks in rcvegrbuf_pages */
+ u32 rcvegrbuf_chunks;
+ /* how many egrbufs per chunk */
+ u32 rcvegrbufs_perchunk;
+ /* order for rcvegrbuf_pages */
+ size_t rcvegrbuf_size;
+ /* rcvhdrq size (for freeing) */
+ size_t rcvhdrq_size;
+ /* per-context flags for fileops/intr communication */
+ unsigned long flag;
+ /* next expected TID to check when looking for free */
+ u32 tidcursor;
+ /* WAIT_RCV that timed out, no interrupt */
+ u32 rcvwait_to;
+ /* WAIT_PIO that timed out, no interrupt */
+ u32 piowait_to;
+ /* WAIT_RCV already happened, no wait */
+ u32 rcvnowait;
+ /* WAIT_PIO already happened, no wait */
+ u32 pionowait;
+ /* total number of polled urgent packets */
+ u32 urgent;
+ /* saved total number of polled urgent packets for poll edge trigger */
+ u32 urgent_poll;
+ /* pid of process using this ctxt */
+ pid_t pid;
+ pid_t subpid[QLOGIC_IB_MAX_SUBCTXT];
+ /* same size as task_struct .comm[], command that opened context */
+ char comm[16];
+ /* pkeys set by this use of this ctxt */
+ u16 pkeys[4];
+ /* so file ops can get at unit */
+ struct qib_devdata *dd;
+ /* so funcs that need physical port can get it easily */
+ struct qib_pportdata *ppd;
+ /* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
+ void *subctxt_uregbase;
+ /* An array of pages for the eager receive buffers * N */
+ void *subctxt_rcvegrbuf;
+ /* An array of pages for the eager header queue entries * N */
+ void *subctxt_rcvhdr_base;
+ /* The version of the library which opened this ctxt */
+ u32 userversion;
+ /* Bitmask of active slaves */
+ u32 active_slaves;
+ /* Type of packets or conditions we want to poll for */
+ u16 poll_type;
+ /* receive packet sequence counter */
+ u8 seq_cnt;
+ u8 redirect_seq_cnt;
+ /* ctxt rcvhdrq head offset */
+ u32 head;
+ u32 pkt_count;
+ /* QPs waiting for context processing */
+ struct list_head qp_wait_list;
+};
+
+struct qib_sge_state;
+
+struct qib_sdma_txreq {
+ int flags;
+ int sg_count;
+ dma_addr_t addr;
+ void (*callback)(struct qib_sdma_txreq *, int);
+ u16 start_idx; /* sdma private */
+ u16 next_descq_idx; /* sdma private */
+ struct list_head list; /* sdma private */
+};
+
+struct qib_sdma_desc {
+ __le64 qw[2];
+};
+
+struct qib_verbs_txreq {
+ struct qib_sdma_txreq txreq;
+ struct qib_qp *qp;
+ struct qib_swqe *wqe;
+ u32 dwords;
+ u16 hdr_dwords;
+ u16 hdr_inx;
+ struct qib_pio_header *align_buf;
+ struct qib_mregion *mr;
+ struct qib_sge_state *ss;
+};
+
+#define QIB_SDMA_TXREQ_F_USELARGEBUF 0x1
+#define QIB_SDMA_TXREQ_F_HEADTOHOST 0x2
+#define QIB_SDMA_TXREQ_F_INTREQ 0x4
+#define QIB_SDMA_TXREQ_F_FREEBUF 0x8
+#define QIB_SDMA_TXREQ_F_FREEDESC 0x10
+
+#define QIB_SDMA_TXREQ_S_OK 0
+#define QIB_SDMA_TXREQ_S_SENDERROR 1
+#define QIB_SDMA_TXREQ_S_ABORTED 2
+#define QIB_SDMA_TXREQ_S_SHUTDOWN 3
+
+/*
+ * Get/Set IB link-level config parameters for f_get/set_ib_cfg()
+ * Mostly for MADs that set or query link parameters, also ipath
+ * config interfaces
+ */
+#define QIB_IB_CFG_LIDLMC 0 /* LID (LS16b) and Mask (MS16b) */
+#define QIB_IB_CFG_LWID_ENB 2 /* allowed Link-width */
+#define QIB_IB_CFG_LWID 3 /* currently active Link-width */
+#define QIB_IB_CFG_SPD_ENB 4 /* allowed Link speeds */
+#define QIB_IB_CFG_SPD 5 /* current Link spd */
+#define QIB_IB_CFG_RXPOL_ENB 6 /* Auto-RX-polarity enable */
+#define QIB_IB_CFG_LREV_ENB 7 /* Auto-Lane-reversal enable */
+#define QIB_IB_CFG_LINKLATENCY 8 /* Link Latency (IB1.2 only) */
+#define QIB_IB_CFG_HRTBT 9 /* IB heartbeat off/enable/auto; DDR/QDR only */
+#define QIB_IB_CFG_OP_VLS 10 /* operational VLs */
+#define QIB_IB_CFG_VL_HIGH_CAP 11 /* num of VL high priority weights */
+#define QIB_IB_CFG_VL_LOW_CAP 12 /* num of VL low priority weights */
+#define QIB_IB_CFG_OVERRUN_THRESH 13 /* IB overrun threshold */
+#define QIB_IB_CFG_PHYERR_THRESH 14 /* IB PHY error threshold */
+#define QIB_IB_CFG_LINKDEFAULT 15 /* IB link default (sleep/poll) */
+#define QIB_IB_CFG_PKEYS 16 /* update partition keys */
+#define QIB_IB_CFG_MTU 17 /* update MTU in IBC */
+#define QIB_IB_CFG_LSTATE 18 /* update linkcmd and linkinitcmd in IBC */
+#define QIB_IB_CFG_VL_HIGH_LIMIT 19
+#define QIB_IB_CFG_PMA_TICKS 20 /* PMA sample tick resolution */
+#define QIB_IB_CFG_PORT 21 /* switch port we are connected to */
+
+/*
+ * for CFG_LSTATE: LINKCMD in upper 16 bits, LINKINITCMD in lower 16
+ * IB_LINKINITCMD_POLL and SLEEP are also used as set/get values for
+ * QIB_IB_CFG_LINKDEFAULT cmd
+ */
+#define IB_LINKCMD_DOWN (0 << 16)
+#define IB_LINKCMD_ARMED (1 << 16)
+#define IB_LINKCMD_ACTIVE (2 << 16)
+#define IB_LINKINITCMD_NOP 0
+#define IB_LINKINITCMD_POLL 1
+#define IB_LINKINITCMD_SLEEP 2
+#define IB_LINKINITCMD_DISABLE 3
+
+/*
+ * valid states passed to qib_set_linkstate() user call
+ */
+#define QIB_IB_LINKDOWN 0
+#define QIB_IB_LINKARM 1
+#define QIB_IB_LINKACTIVE 2
+#define QIB_IB_LINKDOWN_ONLY 3
+#define QIB_IB_LINKDOWN_SLEEP 4
+#define QIB_IB_LINKDOWN_DISABLE 5
+
+/*
+ * These 7 values (SDR, DDR, and QDR may be ORed for auto-speed
+ * negotiation) are used for the 3rd argument to path_f_set_ib_cfg
+ * with cmd QIB_IB_CFG_SPD_ENB, by direct calls or via sysfs. They
+ * are also the the possible values for qib_link_speed_enabled and active
+ * The values were chosen to match values used within the IB spec.
+ */
+#define QIB_IB_SDR 1
+#define QIB_IB_DDR 2
+#define QIB_IB_QDR 4
+
+#define QIB_DEFAULT_MTU 4096
+
+/*
+ * Possible IB config parameters for f_get/set_ib_table()
+ */
+#define QIB_IB_TBL_VL_HIGH_ARB 1 /* Get/set VL high priority weights */
+#define QIB_IB_TBL_VL_LOW_ARB 2 /* Get/set VL low priority weights */
+
+/*
+ * Possible "operations" for f_rcvctrl(ppd, op, ctxt)
+ * these are bits so they can be combined, e.g.
+ * QIB_RCVCTRL_INTRAVAIL_ENB | QIB_RCVCTRL_CTXT_ENB
+ */
+#define QIB_RCVCTRL_TAILUPD_ENB 0x01
+#define QIB_RCVCTRL_TAILUPD_DIS 0x02
+#define QIB_RCVCTRL_CTXT_ENB 0x04
+#define QIB_RCVCTRL_CTXT_DIS 0x08
+#define QIB_RCVCTRL_INTRAVAIL_ENB 0x10
+#define QIB_RCVCTRL_INTRAVAIL_DIS 0x20
+#define QIB_RCVCTRL_PKEY_ENB 0x40 /* Note, default is enabled */
+#define QIB_RCVCTRL_PKEY_DIS 0x80
+#define QIB_RCVCTRL_BP_ENB 0x0100
+#define QIB_RCVCTRL_BP_DIS 0x0200
+#define QIB_RCVCTRL_TIDFLOW_ENB 0x0400
+#define QIB_RCVCTRL_TIDFLOW_DIS 0x0800
+
+/*
+ * Possible "operations" for f_sendctrl(ppd, op, var)
+ * these are bits so they can be combined, e.g.
+ * QIB_SENDCTRL_BUFAVAIL_ENB | QIB_SENDCTRL_ENB
+ * Some operations (e.g. DISARM, ABORT) are known to
+ * be "one-shot", so do not modify shadow.
+ */
+#define QIB_SENDCTRL_DISARM (0x1000)
+#define QIB_SENDCTRL_DISARM_BUF(bufn) ((bufn) | QIB_SENDCTRL_DISARM)
+ /* available (0x2000) */
+#define QIB_SENDCTRL_AVAIL_DIS (0x4000)
+#define QIB_SENDCTRL_AVAIL_ENB (0x8000)
+#define QIB_SENDCTRL_AVAIL_BLIP (0x10000)
+#define QIB_SENDCTRL_SEND_DIS (0x20000)
+#define QIB_SENDCTRL_SEND_ENB (0x40000)
+#define QIB_SENDCTRL_FLUSH (0x80000)
+#define QIB_SENDCTRL_CLEAR (0x100000)
+#define QIB_SENDCTRL_DISARM_ALL (0x200000)
+
+/*
+ * These are the generic indices for requesting per-port
+ * counter values via the f_portcntr function. They
+ * are always returned as 64 bit values, although most
+ * are 32 bit counters.
+ */
+/* send-related counters */
+#define QIBPORTCNTR_PKTSEND 0U
+#define QIBPORTCNTR_WORDSEND 1U
+#define QIBPORTCNTR_PSXMITDATA 2U
+#define QIBPORTCNTR_PSXMITPKTS 3U
+#define QIBPORTCNTR_PSXMITWAIT 4U
+#define QIBPORTCNTR_SENDSTALL 5U
+/* receive-related counters */
+#define QIBPORTCNTR_PKTRCV 6U
+#define QIBPORTCNTR_PSRCVDATA 7U
+#define QIBPORTCNTR_PSRCVPKTS 8U
+#define QIBPORTCNTR_RCVEBP 9U
+#define QIBPORTCNTR_RCVOVFL 10U
+#define QIBPORTCNTR_WORDRCV 11U
+/* IB link related error counters */
+#define QIBPORTCNTR_RXLOCALPHYERR 12U
+#define QIBPORTCNTR_RXVLERR 13U
+#define QIBPORTCNTR_ERRICRC 14U
+#define QIBPORTCNTR_ERRVCRC 15U
+#define QIBPORTCNTR_ERRLPCRC 16U
+#define QIBPORTCNTR_BADFORMAT 17U
+#define QIBPORTCNTR_ERR_RLEN 18U
+#define QIBPORTCNTR_IBSYMBOLERR 19U
+#define QIBPORTCNTR_INVALIDRLEN 20U
+#define QIBPORTCNTR_UNSUPVL 21U
+#define QIBPORTCNTR_EXCESSBUFOVFL 22U
+#define QIBPORTCNTR_ERRLINK 23U
+#define QIBPORTCNTR_IBLINKDOWN 24U
+#define QIBPORTCNTR_IBLINKERRRECOV 25U
+#define QIBPORTCNTR_LLI 26U
+/* other error counters */
+#define QIBPORTCNTR_RXDROPPKT 27U
+#define QIBPORTCNTR_VL15PKTDROP 28U
+#define QIBPORTCNTR_ERRPKEY 29U
+#define QIBPORTCNTR_KHDROVFL 30U
+/* sampling counters (these are actually control registers) */
+#define QIBPORTCNTR_PSINTERVAL 31U
+#define QIBPORTCNTR_PSSTART 32U
+#define QIBPORTCNTR_PSSTAT 33U
+
+/* how often we check for packet activity for "power on hours (in seconds) */
+#define ACTIVITY_TIMER 5
+
+/* Below is an opaque struct. Each chip (device) can maintain
+ * private data needed for its operation, but not germane to the
+ * rest of the driver. For convenience, we define another that
+ * is chip-specific, per-port
+ */
+struct qib_chip_specific;
+struct qib_chipport_specific;
+
+enum qib_sdma_states {
+ qib_sdma_state_s00_hw_down,
+ qib_sdma_state_s10_hw_start_up_wait,
+ qib_sdma_state_s20_idle,
+ qib_sdma_state_s30_sw_clean_up_wait,
+ qib_sdma_state_s40_hw_clean_up_wait,
+ qib_sdma_state_s50_hw_halt_wait,
+ qib_sdma_state_s99_running,
+};
+
+enum qib_sdma_events {
+ qib_sdma_event_e00_go_hw_down,
+ qib_sdma_event_e10_go_hw_start,
+ qib_sdma_event_e20_hw_started,
+ qib_sdma_event_e30_go_running,
+ qib_sdma_event_e40_sw_cleaned,
+ qib_sdma_event_e50_hw_cleaned,
+ qib_sdma_event_e60_hw_halted,
+ qib_sdma_event_e70_go_idle,
+ qib_sdma_event_e7220_err_halted,
+ qib_sdma_event_e7322_err_halted,
+ qib_sdma_event_e90_timer_tick,
+};
+
+extern char *qib_sdma_state_names[];
+extern char *qib_sdma_event_names[];
+
+struct sdma_set_state_action {
+ unsigned op_enable:1;
+ unsigned op_intenable:1;
+ unsigned op_halt:1;
+ unsigned op_drain:1;
+ unsigned go_s99_running_tofalse:1;
+ unsigned go_s99_running_totrue:1;
+};
+
+struct qib_sdma_state {
+ struct kref kref;
+ struct completion comp;
+ enum qib_sdma_states current_state;
+ struct sdma_set_state_action *set_state_action;
+ unsigned current_op;
+ unsigned go_s99_running;
+ unsigned first_sendbuf;
+ unsigned last_sendbuf; /* really last +1 */
+ /* debugging/devel */
+ enum qib_sdma_states previous_state;
+ unsigned previous_op;
+ enum qib_sdma_events last_event;
+};
+
+struct xmit_wait {
+ struct timer_list timer;
+ u64 counter;
+ u8 flags;
+ struct cache {
+ u64 psxmitdata;
+ u64 psrcvdata;
+ u64 psxmitpkts;
+ u64 psrcvpkts;
+ u64 psxmitwait;
+ } counter_cache;
+};
+
+/*
+ * The structure below encapsulates data relevant to a physical IB Port.
+ * Current chips support only one such port, but the separation
+ * clarifies things a bit. Note that to conform to IB conventions,
+ * port-numbers are one-based. The first or only port is port1.
+ */
+struct qib_pportdata {
+ struct qib_ibport ibport_data;
+
+ struct qib_devdata *dd;
+ struct qib_chippport_specific *cpspec; /* chip-specific per-port */
+ struct kobject pport_kobj;
+ struct kobject sl2vl_kobj;
+ struct kobject diagc_kobj;
+
+ /* GUID for this interface, in network order */
+ __be64 guid;
+
+ /* QIB_POLL, etc. link-state specific flags, per port */
+ u32 lflags;
+ /* qib_lflags driver is waiting for */
+ u32 state_wanted;
+ spinlock_t lflags_lock;
+ /* number of (port-specific) interrupts for this port -- saturates... */
+ u32 int_counter;
+
+ /* ref count for each pkey */
+ atomic_t pkeyrefs[4];
+
+ /*
+ * this address is mapped readonly into user processes so they can
+ * get status cheaply, whenever they want. One qword of status per port
+ */
+ u64 *statusp;
+
+ /* SendDMA related entries */
+ spinlock_t sdma_lock;
+ struct qib_sdma_state sdma_state;
+ unsigned long sdma_buf_jiffies;
+ struct qib_sdma_desc *sdma_descq;
+ u64 sdma_descq_added;
+ u64 sdma_descq_removed;
+ u16 sdma_descq_cnt;
+ u16 sdma_descq_tail;
+ u16 sdma_descq_head;
+ u16 sdma_next_intr;
+ u16 sdma_reset_wait;
+ u8 sdma_generation;
+ struct tasklet_struct sdma_sw_clean_up_task;
+ struct list_head sdma_activelist;
+
+ dma_addr_t sdma_descq_phys;
+ volatile __le64 *sdma_head_dma; /* DMA'ed by chip */
+ dma_addr_t sdma_head_phys;
+
+ wait_queue_head_t state_wait; /* for state_wanted */
+
+ /* HoL blocking for SMP replies */
+ unsigned hol_state;
+ struct timer_list hol_timer;
+
+ /*
+ * Shadow copies of registers; size indicates read access size.
+ * Most of them are readonly, but some are write-only register,
+ * where we manipulate the bits in the shadow copy, and then write
+ * the shadow copy to qlogic_ib.
+ *
+ * We deliberately make most of these 32 bits, since they have
+ * restricted range. For any that we read, we won't to generate 32
+ * bit accesses, since Opteron will generate 2 separate 32 bit HT
+ * transactions for a 64 bit read, and we want to avoid unnecessary
+ * bus transactions.
+ */
+
+ /* This is the 64 bit group */
+ /* last ibcstatus. opaque outside chip-specific code */
+ u64 lastibcstat;
+
+ /* these are the "32 bit" regs */
+
+ /*
+ * the following two are 32-bit bitmasks, but {test,clear,set}_bit
+ * all expect bit fields to be "unsigned long"
+ */
+ unsigned long p_rcvctrl; /* shadow per-port rcvctrl */
+ unsigned long p_sendctrl; /* shadow per-port sendctrl */
+
+ u32 ibmtu; /* The MTU programmed for this unit */
+ /*
+ * Current max size IB packet (in bytes) including IB headers, that
+ * we can send. Changes when ibmtu changes.
+ */
+ u32 ibmaxlen;
+ /*
+ * ibmaxlen at init time, limited by chip and by receive buffer
+ * size. Not changed after init.
+ */
+ u32 init_ibmaxlen;
+ /* LID programmed for this instance */
+ u16 lid;
+ /* list of pkeys programmed; 0 if not set */
+ u16 pkeys[4];
+ /* LID mask control */
+ u8 lmc;
+ u8 link_width_supported;
+ u8 link_speed_supported;
+ u8 link_width_enabled;
+ u8 link_speed_enabled;
+ u8 link_width_active;
+ u8 link_speed_active;
+ u8 vls_supported;
+ u8 vls_operational;
+ /* Rx Polarity inversion (compensate for ~tx on partner) */
+ u8 rx_pol_inv;
+
+ u8 hw_pidx; /* physical port index */
+ u8 port; /* IB port number and index into dd->pports - 1 */
+
+ u8 delay_mult;
+
+ /* used to override LED behavior */
+ u8 led_override; /* Substituted for normal value, if non-zero */
+ u16 led_override_timeoff; /* delta to next timer event */
+ u8 led_override_vals[2]; /* Alternates per blink-frame */
+ u8 led_override_phase; /* Just counts, LSB picks from vals[] */
+ atomic_t led_override_timer_active;
+ /* Used to flash LEDs in override mode */
+ struct timer_list led_override_timer;
+ struct xmit_wait cong_stats;
+ struct timer_list symerr_clear_timer;
+};
+
+/* Observers. Not to be taken lightly, possibly not to ship. */
+/*
+ * If a diag read or write is to (bottom <= offset <= top),
+ * the "hoook" is called, allowing, e.g. shadows to be
+ * updated in sync with the driver. struct diag_observer
+ * is the "visible" part.
+ */
+struct diag_observer;
+
+typedef int (*diag_hook) (struct qib_devdata *dd,
+ const struct diag_observer *op,
+ u32 offs, u64 *data, u64 mask, int only_32);
+
+struct diag_observer {
+ diag_hook hook;
+ u32 bottom;
+ u32 top;
+};
+
+extern int qib_register_observer(struct qib_devdata *dd,
+ const struct diag_observer *op);
+
+/* Only declared here, not defined. Private to diags */
+struct diag_observer_list_elt;
+
+/* device data struct now contains only "general per-device" info.
+ * fields related to a physical IB port are in a qib_pportdata struct,
+ * described above) while fields only used by a particualr chip-type are in
+ * a qib_chipdata struct, whose contents are opaque to this file.
+ */
+struct qib_devdata {
+ struct qib_ibdev verbs_dev; /* must be first */
+ struct list_head list;
+ /* pointers to related structs for this device */
+ /* pci access data structure */
+ struct pci_dev *pcidev;
+ struct cdev *user_cdev;
+ struct cdev *diag_cdev;
+ struct device *user_device;
+ struct device *diag_device;
+
+ /* mem-mapped pointer to base of chip regs */
+ u64 __iomem *kregbase;
+ /* end of mem-mapped chip space excluding sendbuf and user regs */
+ u64 __iomem *kregend;
+ /* physical address of chip for io_remap, etc. */
+ resource_size_t physaddr;
+ /* qib_cfgctxts pointers */
+ struct qib_ctxtdata **rcd; /* Receive Context Data */
+
+ /* qib_pportdata, points to array of (physical) port-specific
+ * data structs, indexed by pidx (0..n-1)
+ */
+ struct qib_pportdata *pport;
+ struct qib_chip_specific *cspec; /* chip-specific */
+
+ /* kvirt address of 1st 2k pio buffer */
+ void __iomem *pio2kbase;
+ /* kvirt address of 1st 4k pio buffer */
+ void __iomem *pio4kbase;
+ /* mem-mapped pointer to base of PIO buffers (if using WC PAT) */
+ void __iomem *piobase;
+ /* mem-mapped pointer to base of user chip regs (if using WC PAT) */
+ u64 __iomem *userbase;
+ /*
+ * points to area where PIOavail registers will be DMA'ed.
+ * Has to be on a page of it's own, because the page will be
+ * mapped into user program space. This copy is *ONLY* ever
+ * written by DMA, not by the driver! Need a copy per device
+ * when we get to multiple devices
+ */
+ volatile __le64 *pioavailregs_dma; /* DMA'ed by chip */
+ /* physical address where updates occur */
+ dma_addr_t pioavailregs_phys;
+
+ /* device-specific implementations of functions needed by
+ * common code. Contrary to previous consensus, we can't
+ * really just point to a device-specific table, because we
+ * may need to "bend", e.g. *_f_put_tid
+ */
+ /* fallback to alternate interrupt type if possible */
+ int (*f_intr_fallback)(struct qib_devdata *);
+ /* hard reset chip */
+ int (*f_reset)(struct qib_devdata *);
+ void (*f_quiet_serdes)(struct qib_pportdata *);
+ int (*f_bringup_serdes)(struct qib_pportdata *);
+ int (*f_early_init)(struct qib_devdata *);
+ void (*f_clear_tids)(struct qib_devdata *, struct qib_ctxtdata *);
+ void (*f_put_tid)(struct qib_devdata *, u64 __iomem*,
+ u32, unsigned long);
+ void (*f_cleanup)(struct qib_devdata *);
+ void (*f_setextled)(struct qib_pportdata *, u32);
+ /* fill out chip-specific fields */
+ int (*f_get_base_info)(struct qib_ctxtdata *, struct qib_base_info *);
+ /* free irq */
+ void (*f_free_irq)(struct qib_devdata *);
+ struct qib_message_header *(*f_get_msgheader)
+ (struct qib_devdata *, __le32 *);
+ void (*f_config_ctxts)(struct qib_devdata *);
+ int (*f_get_ib_cfg)(struct qib_pportdata *, int);
+ int (*f_set_ib_cfg)(struct qib_pportdata *, int, u32);
+ int (*f_set_ib_loopback)(struct qib_pportdata *, const char *);
+ int (*f_get_ib_table)(struct qib_pportdata *, int, void *);
+ int (*f_set_ib_table)(struct qib_pportdata *, int, void *);
+ u32 (*f_iblink_state)(u64);
+ u8 (*f_ibphys_portstate)(u64);
+ void (*f_xgxs_reset)(struct qib_pportdata *);
+ /* per chip actions needed for IB Link up/down changes */
+ int (*f_ib_updown)(struct qib_pportdata *, int, u64);
+ u32 __iomem *(*f_getsendbuf)(struct qib_pportdata *, u64, u32 *);
+ /* Read/modify/write of GPIO pins (potentially chip-specific */
+ int (*f_gpio_mod)(struct qib_devdata *dd, u32 out, u32 dir,
+ u32 mask);
+ /* Enable writes to config EEPROM (if supported) */
+ int (*f_eeprom_wen)(struct qib_devdata *dd, int wen);
+ /*
+ * modify rcvctrl shadow[s] and write to appropriate chip-regs.
+ * see above QIB_RCVCTRL_xxx_ENB/DIS for operations.
+ * (ctxt == -1) means "all contexts", only meaningful for
+ * clearing. Could remove if chip_spec shutdown properly done.
+ */
+ void (*f_rcvctrl)(struct qib_pportdata *, unsigned int op,
+ int ctxt);
+ /* Read/modify/write sendctrl appropriately for op and port. */
+ void (*f_sendctrl)(struct qib_pportdata *, u32 op);
+ void (*f_set_intr_state)(struct qib_devdata *, u32);
+ void (*f_set_armlaunch)(struct qib_devdata *, u32);
+ void (*f_wantpiobuf_intr)(struct qib_devdata *, u32);
+ int (*f_late_initreg)(struct qib_devdata *);
+ int (*f_init_sdma_regs)(struct qib_pportdata *);
+ u16 (*f_sdma_gethead)(struct qib_pportdata *);
+ int (*f_sdma_busy)(struct qib_pportdata *);
+ void (*f_sdma_update_tail)(struct qib_pportdata *, u16);
+ void (*f_sdma_set_desc_cnt)(struct qib_pportdata *, unsigned);
+ void (*f_sdma_sendctrl)(struct qib_pportdata *, unsigned);
+ void (*f_sdma_hw_clean_up)(struct qib_pportdata *);
+ void (*f_sdma_hw_start_up)(struct qib_pportdata *);
+ void (*f_sdma_init_early)(struct qib_pportdata *);
+ void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32);
+ void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32);
+ u32 (*f_hdrqempty)(struct qib_ctxtdata *);
+ u64 (*f_portcntr)(struct qib_pportdata *, u32);
+ u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **,
+ u64 **);
+ u32 (*f_read_portcntrs)(struct qib_devdata *, loff_t, u32,
+ char **, u64 **);
+ u32 (*f_setpbc_control)(struct qib_pportdata *, u32, u8, u8);
+ void (*f_initvl15_bufs)(struct qib_devdata *);
+ void (*f_init_ctxt)(struct qib_ctxtdata *);
+ void (*f_txchk_change)(struct qib_devdata *, u32, u32, u32,
+ struct qib_ctxtdata *);
+ void (*f_writescratch)(struct qib_devdata *, u32);
+ int (*f_tempsense_rd)(struct qib_devdata *, int regnum);
+
+ char *boardname; /* human readable board info */
+
+ /* template for writing TIDs */
+ u64 tidtemplate;
+ /* value to write to free TIDs */
+ u64 tidinvalid;
+
+ /* number of registers used for pioavail */
+ u32 pioavregs;
+ /* device (not port) flags, basically device capabilities */
+ u32 flags;
+ /* last buffer for user use */
+ u32 lastctxt_piobuf;
+
+ /* saturating counter of (non-port-specific) device interrupts */
+ u32 int_counter;
+
+ /* pio bufs allocated per ctxt */
+ u32 pbufsctxt;
+ /* if remainder on bufs/ctxt, ctxts < extrabuf get 1 extra */
+ u32 ctxts_extrabuf;
+ /*
+ * number of ctxts configured as max; zero is set to number chip
+ * supports, less gives more pio bufs/ctxt, etc.
+ */
+ u32 cfgctxts;
+
+ /*
+ * hint that we should update pioavailshadow before
+ * looking for a PIO buffer
+ */
+ u32 upd_pio_shadow;
+
+ /* internal debugging stats */
+ u32 maxpkts_call;
+ u32 avgpkts_call;
+ u64 nopiobufs;
+
+ /* PCI Vendor ID (here for NodeInfo) */
+ u16 vendorid;
+ /* PCI Device ID (here for NodeInfo) */
+ u16 deviceid;
+ /* for write combining settings */
+ unsigned long wc_cookie;
+ unsigned long wc_base;
+ unsigned long wc_len;
+
+ /* shadow copy of struct page *'s for exp tid pages */
+ struct page **pageshadow;
+ /* shadow copy of dma handles for exp tid pages */
+ dma_addr_t *physshadow;
+ u64 __iomem *egrtidbase;
+ spinlock_t sendctrl_lock; /* protect changes to sendctrl shadow */
+ /* around rcd and (user ctxts) ctxt_cnt use (intr vs free) */
+ spinlock_t uctxt_lock; /* rcd and user context changes */
+ /*
+ * per unit status, see also portdata statusp
+ * mapped readonly into user processes so they can get unit and
+ * IB link status cheaply
+ */
+ u64 *devstatusp;
+ char *freezemsg; /* freeze msg if hw error put chip in freeze */
+ u32 freezelen; /* max length of freezemsg */
+ /* timer used to prevent stats overflow, error throttling, etc. */
+ struct timer_list stats_timer;
+
+ /* timer to verify interrupts work, and fallback if possible */
+ struct timer_list intrchk_timer;
+ unsigned long ureg_align; /* user register alignment */
+
+ /*
+ * Protects pioavailshadow, pioavailkernel, pio_need_disarm, and
+ * pio_writing.
+ */
+ spinlock_t pioavail_lock;
+
+ /*
+ * Shadow copies of registers; size indicates read access size.
+ * Most of them are readonly, but some are write-only register,
+ * where we manipulate the bits in the shadow copy, and then write
+ * the shadow copy to qlogic_ib.
+ *
+ * We deliberately make most of these 32 bits, since they have
+ * restricted range. For any that we read, we won't to generate 32
+ * bit accesses, since Opteron will generate 2 separate 32 bit HT
+ * transactions for a 64 bit read, and we want to avoid unnecessary
+ * bus transactions.
+ */
+
+ /* This is the 64 bit group */
+
+ unsigned long pioavailshadow[6];
+ /* bitmap of send buffers available for the kernel to use with PIO. */
+ unsigned long pioavailkernel[6];
+ /* bitmap of send buffers which need to be disarmed. */
+ unsigned long pio_need_disarm[3];
+ /* bitmap of send buffers which are being written to. */
+ unsigned long pio_writing[3];
+ /* kr_revision shadow */
+ u64 revision;
+ /* Base GUID for device (from eeprom, network order) */
+ __be64 base_guid;
+
+ /*
+ * kr_sendpiobufbase value (chip offset of pio buffers), and the
+ * base of the 2KB buffer s(user processes only use 2K)
+ */
+ u64 piobufbase;
+ u32 pio2k_bufbase;
+
+ /* these are the "32 bit" regs */
+
+ /* number of GUIDs in the flash for this interface */
+ u32 nguid;
+ /*
+ * the following two are 32-bit bitmasks, but {test,clear,set}_bit
+ * all expect bit fields to be "unsigned long"
+ */
+ unsigned long rcvctrl; /* shadow per device rcvctrl */
+ unsigned long sendctrl; /* shadow per device sendctrl */
+
+ /* value we put in kr_rcvhdrcnt */
+ u32 rcvhdrcnt;
+ /* value we put in kr_rcvhdrsize */
+ u32 rcvhdrsize;
+ /* value we put in kr_rcvhdrentsize */
+ u32 rcvhdrentsize;
+ /* kr_ctxtcnt value */
+ u32 ctxtcnt;
+ /* kr_pagealign value */
+ u32 palign;
+ /* number of "2KB" PIO buffers */
+ u32 piobcnt2k;
+ /* size in bytes of "2KB" PIO buffers */
+ u32 piosize2k;
+ /* max usable size in dwords of a "2KB" PIO buffer before going "4KB" */
+ u32 piosize2kmax_dwords;
+ /* number of "4KB" PIO buffers */
+ u32 piobcnt4k;
+ /* size in bytes of "4KB" PIO buffers */
+ u32 piosize4k;
+ /* kr_rcvegrbase value */
+ u32 rcvegrbase;
+ /* kr_rcvtidbase value */
+ u32 rcvtidbase;
+ /* kr_rcvtidcnt value */
+ u32 rcvtidcnt;
+ /* kr_userregbase */
+ u32 uregbase;
+ /* shadow the control register contents */
+ u32 control;
+
+ /* chip address space used by 4k pio buffers */
+ u32 align4k;
+ /* size of each rcvegrbuffer */
+ u32 rcvegrbufsize;
+ /* localbus width (1, 2,4,8,16,32) from config space */
+ u32 lbus_width;
+ /* localbus speed in MHz */
+ u32 lbus_speed;
+ int unit; /* unit # of this chip */
+
+ /* start of CHIP_SPEC move to chipspec, but need code changes */
+ /* low and high portions of MSI capability/vector */
+ u32 msi_lo;
+ /* saved after PCIe init for restore after reset */
+ u32 msi_hi;
+ /* MSI data (vector) saved for restore */
+ u16 msi_data;
+ /* so we can rewrite it after a chip reset */
+ u32 pcibar0;
+ /* so we can rewrite it after a chip reset */
+ u32 pcibar1;
+ u64 rhdrhead_intr_off;
+
+ /*
+ * ASCII serial number, from flash, large enough for original
+ * all digit strings, and longer QLogic serial number format
+ */
+ u8 serial[16];
+ /* human readable board version */
+ u8 boardversion[96];
+ u8 lbus_info[32]; /* human readable localbus info */
+ /* chip major rev, from qib_revision */
+ u8 majrev;
+ /* chip minor rev, from qib_revision */
+ u8 minrev;
+
+ /* Misc small ints */
+ /* Number of physical ports available */
+ u8 num_pports;
+ /* Lowest context number which can be used by user processes */
+ u8 first_user_ctxt;
+ u8 n_krcv_queues;
+ u8 qpn_mask;
+ u8 skip_kctxt_mask;
+
+ u16 rhf_offset; /* offset of RHF within receive header entry */
+
+ /*
+ * GPIO pins for twsi-connected devices, and device code for eeprom
+ */
+ u8 gpio_sda_num;
+ u8 gpio_scl_num;
+ u8 twsi_eeprom_dev;
+ u8 board_atten;
+
+ /* Support (including locks) for EEPROM logging of errors and time */
+ /* control access to actual counters, timer */
+ spinlock_t eep_st_lock;
+ /* control high-level access to EEPROM */
+ struct mutex eep_lock;
+ uint64_t traffic_wds;
+ /* active time is kept in seconds, but logged in hours */
+ atomic_t active_time;
+ /* Below are nominal shadow of EEPROM, new since last EEPROM update */
+ uint8_t eep_st_errs[QIB_EEP_LOG_CNT];
+ uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT];
+ uint16_t eep_hrs;
+ /*
+ * masks for which bits of errs, hwerrs that cause
+ * each of the counters to increment.
+ */
+ struct qib_eep_log_mask eep_st_masks[QIB_EEP_LOG_CNT];
+ struct qib_diag_client *diag_client;
+ spinlock_t qib_diag_trans_lock; /* protect diag observer ops */
+ struct diag_observer_list_elt *diag_observer_list;
+
+ u8 psxmitwait_supported;
+ /* cycle length of PS* counters in HW (in picoseconds) */
+ u16 psxmitwait_check_rate;
+};
+
+/* hol_state values */
+#define QIB_HOL_UP 0
+#define QIB_HOL_INIT 1
+
+#define QIB_SDMA_SENDCTRL_OP_ENABLE (1U << 0)
+#define QIB_SDMA_SENDCTRL_OP_INTENABLE (1U << 1)
+#define QIB_SDMA_SENDCTRL_OP_HALT (1U << 2)
+#define QIB_SDMA_SENDCTRL_OP_CLEANUP (1U << 3)
+#define QIB_SDMA_SENDCTRL_OP_DRAIN (1U << 4)
+
+/* operation types for f_txchk_change() */
+#define TXCHK_CHG_TYPE_DIS1 3
+#define TXCHK_CHG_TYPE_ENAB1 2
+#define TXCHK_CHG_TYPE_KERN 1
+#define TXCHK_CHG_TYPE_USER 0
+
+#define QIB_CHASE_TIME msecs_to_jiffies(145)
+#define QIB_CHASE_DIS_TIME msecs_to_jiffies(160)
+
+/* Private data for file operations */
+struct qib_filedata {
+ struct qib_ctxtdata *rcd;
+ unsigned subctxt;
+ unsigned tidcursor;
+ struct qib_user_sdma_queue *pq;
+ int rec_cpu_num; /* for cpu affinity; -1 if none */
+};
+
+extern struct list_head qib_dev_list;
+extern spinlock_t qib_devs_lock;
+extern struct qib_devdata *qib_lookup(int unit);
+extern u32 qib_cpulist_count;
+extern unsigned long *qib_cpulist;
+
+extern unsigned qib_wc_pat;
+int qib_init(struct qib_devdata *, int);
+int init_chip_wc_pat(struct qib_devdata *dd, u32);
+int qib_enable_wc(struct qib_devdata *dd);
+void qib_disable_wc(struct qib_devdata *dd);
+int qib_count_units(int *npresentp, int *nupp);
+int qib_count_active_units(void);
+
+int qib_cdev_init(int minor, const char *name,
+ const struct file_operations *fops,
+ struct cdev **cdevp, struct device **devp);
+void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp);
+int qib_dev_init(void);
+void qib_dev_cleanup(void);
+
+int qib_diag_add(struct qib_devdata *);
+void qib_diag_remove(struct qib_devdata *);
+void qib_handle_e_ibstatuschanged(struct qib_pportdata *, u64);
+void qib_sdma_update_tail(struct qib_pportdata *, u16); /* hold sdma_lock */
+
+int qib_decode_err(struct qib_devdata *dd, char *buf, size_t blen, u64 err);
+void qib_bad_intrstatus(struct qib_devdata *);
+void qib_handle_urcv(struct qib_devdata *, u64);
+
+/* clean up any per-chip chip-specific stuff */
+void qib_chip_cleanup(struct qib_devdata *);
+/* clean up any chip type-specific stuff */
+void qib_chip_done(void);
+
+/* check to see if we have to force ordering for write combining */
+int qib_unordered_wc(void);
+void qib_pio_copy(void __iomem *to, const void *from, size_t count);
+
+void qib_disarm_piobufs(struct qib_devdata *, unsigned, unsigned);
+int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *);
+void qib_disarm_piobufs_set(struct qib_devdata *, unsigned long *, unsigned);
+void qib_cancel_sends(struct qib_pportdata *);
+
+int qib_create_rcvhdrq(struct qib_devdata *, struct qib_ctxtdata *);
+int qib_setup_eagerbufs(struct qib_ctxtdata *);
+void qib_set_ctxtcnt(struct qib_devdata *);
+int qib_create_ctxts(struct qib_devdata *dd);
+struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32);
+void qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8);
+void qib_free_ctxtdata(struct qib_devdata *, struct qib_ctxtdata *);
+
+u32 qib_kreceive(struct qib_ctxtdata *, u32 *, u32 *);
+int qib_reset_device(int);
+int qib_wait_linkstate(struct qib_pportdata *, u32, int);
+int qib_set_linkstate(struct qib_pportdata *, u8);
+int qib_set_mtu(struct qib_pportdata *, u16);
+int qib_set_lid(struct qib_pportdata *, u32, u8);
+void qib_hol_down(struct qib_pportdata *);
+void qib_hol_init(struct qib_pportdata *);
+void qib_hol_up(struct qib_pportdata *);
+void qib_hol_event(unsigned long);
+void qib_disable_after_error(struct qib_devdata *);
+int qib_set_uevent_bits(struct qib_pportdata *, const int);
+
+/* for use in system calls, where we want to know device type, etc. */
+#define ctxt_fp(fp) \
+ (((struct qib_filedata *)(fp)->private_data)->rcd)
+#define subctxt_fp(fp) \
+ (((struct qib_filedata *)(fp)->private_data)->subctxt)
+#define tidcursor_fp(fp) \
+ (((struct qib_filedata *)(fp)->private_data)->tidcursor)
+#define user_sdma_queue_fp(fp) \
+ (((struct qib_filedata *)(fp)->private_data)->pq)
+
+static inline struct qib_devdata *dd_from_ppd(struct qib_pportdata *ppd)
+{
+ return ppd->dd;
+}
+
+static inline struct qib_devdata *dd_from_dev(struct qib_ibdev *dev)
+{
+ return container_of(dev, struct qib_devdata, verbs_dev);
+}
+
+static inline struct qib_devdata *dd_from_ibdev(struct ib_device *ibdev)
+{
+ return dd_from_dev(to_idev(ibdev));
+}
+
+static inline struct qib_pportdata *ppd_from_ibp(struct qib_ibport *ibp)
+{
+ return container_of(ibp, struct qib_pportdata, ibport_data);
+}
+
+static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u8 port)
+{
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
+
+ WARN_ON(pidx >= dd->num_pports);
+ return &dd->pport[pidx].ibport_data;
+}
+
+/*
+ * values for dd->flags (_device_ related flags) and
+ */
+#define QIB_HAS_LINK_LATENCY 0x1 /* supports link latency (IB 1.2) */
+#define QIB_INITTED 0x2 /* chip and driver up and initted */
+#define QIB_DOING_RESET 0x4 /* in the middle of doing chip reset */
+#define QIB_PRESENT 0x8 /* chip accesses can be done */
+#define QIB_PIO_FLUSH_WC 0x10 /* Needs Write combining flush for PIO */
+#define QIB_HAS_THRESH_UPDATE 0x40
+#define QIB_HAS_SDMA_TIMEOUT 0x80
+#define QIB_USE_SPCL_TRIG 0x100 /* SpecialTrigger launch enabled */
+#define QIB_NODMA_RTAIL 0x200 /* rcvhdrtail register DMA enabled */
+#define QIB_HAS_INTX 0x800 /* Supports INTx interrupts */
+#define QIB_HAS_SEND_DMA 0x1000 /* Supports Send DMA */
+#define QIB_HAS_VLSUPP 0x2000 /* Supports multiple VLs; PBC different */
+#define QIB_HAS_HDRSUPP 0x4000 /* Supports header suppression */
+#define QIB_BADINTR 0x8000 /* severe interrupt problems */
+#define QIB_DCA_ENABLED 0x10000 /* Direct Cache Access enabled */
+#define QIB_HAS_QSFP 0x20000 /* device (card instance) has QSFP */
+
+/*
+ * values for ppd->lflags (_ib_port_ related flags)
+ */
+#define QIBL_LINKV 0x1 /* IB link state valid */
+#define QIBL_LINKDOWN 0x8 /* IB link is down */
+#define QIBL_LINKINIT 0x10 /* IB link level is up */
+#define QIBL_LINKARMED 0x20 /* IB link is ARMED */
+#define QIBL_LINKACTIVE 0x40 /* IB link is ACTIVE */
+/* leave a gap for more IB-link state */
+#define QIBL_IB_AUTONEG_INPROG 0x1000 /* non-IBTA DDR/QDR neg active */
+#define QIBL_IB_AUTONEG_FAILED 0x2000 /* non-IBTA DDR/QDR neg failed */
+#define QIBL_IB_LINK_DISABLED 0x4000 /* Linkdown-disable forced,
+ * Do not try to bring up */
+#define QIBL_IB_FORCE_NOTIFY 0x8000 /* force notify on next ib change */
+
+/* IB dword length mask in PBC (lower 11 bits); same for all chips */
+#define QIB_PBC_LENGTH_MASK ((1 << 11) - 1)
+
+
+/* ctxt_flag bit offsets */
+ /* waiting for a packet to arrive */
+#define QIB_CTXT_WAITING_RCV 2
+ /* master has not finished initializing */
+#define QIB_CTXT_MASTER_UNINIT 4
+ /* waiting for an urgent packet to arrive */
+#define QIB_CTXT_WAITING_URG 5
+
+/* free up any allocated data at closes */
+void qib_free_data(struct qib_ctxtdata *dd);
+void qib_chg_pioavailkernel(struct qib_devdata *, unsigned, unsigned,
+ u32, struct qib_ctxtdata *);
+struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *,
+ const struct pci_device_id *);
+struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *,
+ const struct pci_device_id *);
+struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *,
+ const struct pci_device_id *);
+void qib_free_devdata(struct qib_devdata *);
+struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra);
+
+#define QIB_TWSI_NO_DEV 0xFF
+/* Below qib_twsi_ functions must be called with eep_lock held */
+int qib_twsi_reset(struct qib_devdata *dd);
+int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
+ int len);
+int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
+ const void *buffer, int len);
+void qib_get_eeprom_info(struct qib_devdata *);
+int qib_update_eeprom_log(struct qib_devdata *dd);
+void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr);
+void qib_dump_lookup_output_queue(struct qib_devdata *);
+void qib_force_pio_avail_update(struct qib_devdata *);
+void qib_clear_symerror_on_linkup(unsigned long opaque);
+
+/*
+ * Set LED override, only the two LSBs have "public" meaning, but
+ * any non-zero value substitutes them for the Link and LinkTrain
+ * LED states.
+ */
+#define QIB_LED_PHYS 1 /* Physical (linktraining) GREEN LED */
+#define QIB_LED_LOG 2 /* Logical (link) YELLOW LED */
+void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val);
+
+/* send dma routines */
+int qib_setup_sdma(struct qib_pportdata *);
+void qib_teardown_sdma(struct qib_pportdata *);
+void __qib_sdma_intr(struct qib_pportdata *);
+void qib_sdma_intr(struct qib_pportdata *);
+int qib_sdma_verbs_send(struct qib_pportdata *, struct qib_sge_state *,
+ u32, struct qib_verbs_txreq *);
+/* ppd->sdma_lock should be locked before calling this. */
+int qib_sdma_make_progress(struct qib_pportdata *dd);
+
+/* must be called under qib_sdma_lock */
+static inline u16 qib_sdma_descq_freecnt(const struct qib_pportdata *ppd)
+{
+ return ppd->sdma_descq_cnt -
+ (ppd->sdma_descq_added - ppd->sdma_descq_removed) - 1;
+}
+
+static inline int __qib_sdma_running(struct qib_pportdata *ppd)
+{
+ return ppd->sdma_state.current_state == qib_sdma_state_s99_running;
+}
+int qib_sdma_running(struct qib_pportdata *);
+
+void __qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
+void qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
+
+/*
+ * number of words used for protocol header if not set by qib_userinit();
+ */
+#define QIB_DFLT_RCVHDRSIZE 9
+
+/*
+ * We need to be able to handle an IB header of at least 24 dwords.
+ * We need the rcvhdrq large enough to handle largest IB header, but
+ * still have room for a 2KB MTU standard IB packet.
+ * Additionally, some processor/memory controller combinations
+ * benefit quite strongly from having the DMA'ed data be cacheline
+ * aligned and a cacheline multiple, so we set the size to 32 dwords
+ * (2 64-byte primary cachelines for pretty much all processors of
+ * interest). The alignment hurts nothing, other than using somewhat
+ * more memory.
+ */
+#define QIB_RCVHDR_ENTSIZE 32
+
+int qib_get_user_pages(unsigned long, size_t, struct page **);
+void qib_release_user_pages(struct page **, size_t);
+int qib_eeprom_read(struct qib_devdata *, u8, void *, int);
+int qib_eeprom_write(struct qib_devdata *, u8, const void *, int);
+u32 __iomem *qib_getsendbuf_range(struct qib_devdata *, u32 *, u32, u32);
+void qib_sendbuf_done(struct qib_devdata *, unsigned);
+
+static inline void qib_clear_rcvhdrtail(const struct qib_ctxtdata *rcd)
+{
+ *((u64 *) rcd->rcvhdrtail_kvaddr) = 0ULL;
+}
+
+static inline u32 qib_get_rcvhdrtail(const struct qib_ctxtdata *rcd)
+{
+ /*
+ * volatile because it's a DMA target from the chip, routine is
+ * inlined, and don't want register caching or reordering.
+ */
+ return (u32) le64_to_cpu(
+ *((volatile __le64 *)rcd->rcvhdrtail_kvaddr)); /* DMA'ed */
+}
+
+static inline u32 qib_get_hdrqtail(const struct qib_ctxtdata *rcd)
+{
+ const struct qib_devdata *dd = rcd->dd;
+ u32 hdrqtail;
+
+ if (dd->flags & QIB_NODMA_RTAIL) {
+ __le32 *rhf_addr;
+ u32 seq;
+
+ rhf_addr = (__le32 *) rcd->rcvhdrq +
+ rcd->head + dd->rhf_offset;
+ seq = qib_hdrget_seq(rhf_addr);
+ hdrqtail = rcd->head;
+ if (seq == rcd->seq_cnt)
+ hdrqtail++;
+ } else
+ hdrqtail = qib_get_rcvhdrtail(rcd);
+
+ return hdrqtail;
+}
+
+/*
+ * sysfs interface.
+ */
+
+extern const char ib_qib_version[];
+
+int qib_device_create(struct qib_devdata *);
+void qib_device_remove(struct qib_devdata *);
+
+int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
+ struct kobject *kobj);
+int qib_verbs_register_sysfs(struct qib_devdata *);
+void qib_verbs_unregister_sysfs(struct qib_devdata *);
+/* Hook for sysfs read of QSFP */
+extern int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len);
+
+int __init qib_init_qibfs(void);
+int __exit qib_exit_qibfs(void);
+
+int qibfs_add(struct qib_devdata *);
+int qibfs_remove(struct qib_devdata *);
+
+int qib_pcie_init(struct pci_dev *, const struct pci_device_id *);
+int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *,
+ const struct pci_device_id *);
+void qib_pcie_ddcleanup(struct qib_devdata *);
+int qib_pcie_params(struct qib_devdata *, u32, u32 *, struct msix_entry *);
+int qib_reinit_intr(struct qib_devdata *);
+void qib_enable_intx(struct pci_dev *);
+void qib_nomsi(struct qib_devdata *);
+void qib_nomsix(struct qib_devdata *);
+void qib_pcie_getcmd(struct qib_devdata *, u16 *, u8 *, u8 *);
+void qib_pcie_reenable(struct qib_devdata *, u16, u8, u8);
+
+/*
+ * dma_addr wrappers - all 0's invalid for hw
+ */
+dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long,
+ size_t, int);
+const char *qib_get_unit_name(int unit);
+
+/*
+ * Flush write combining store buffers (if present) and perform a write
+ * barrier.
+ */
+#if defined(CONFIG_X86_64)
+#define qib_flush_wc() asm volatile("sfence" : : : "memory")
+#else
+#define qib_flush_wc() wmb() /* no reorder around wc flush */
+#endif
+
+/* global module parameter variables */
+extern unsigned qib_ibmtu;
+extern ushort qib_cfgctxts;
+extern ushort qib_num_cfg_vls;
+extern ushort qib_mini_init; /* If set, do few (ideally 0) writes to chip */
+extern unsigned qib_n_krcv_queues;
+extern unsigned qib_sdma_fetch_arb;
+extern unsigned qib_compat_ddr_negotiate;
+extern int qib_special_trigger;
+
+extern struct mutex qib_mutex;
+
+/* Number of seconds before our card status check... */
+#define STATUS_TIMEOUT 60
+
+#define QIB_DRV_NAME "ib_qib"
+#define QIB_USER_MINOR_BASE 0
+#define QIB_TRACE_MINOR 127
+#define QIB_DIAGPKT_MINOR 128
+#define QIB_DIAG_MINOR_BASE 129
+#define QIB_NMINORS 255
+
+#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
+#define PCI_VENDOR_ID_QLOGIC 0x1077
+#define PCI_DEVICE_ID_QLOGIC_IB_6120 0x10
+#define PCI_DEVICE_ID_QLOGIC_IB_7220 0x7220
+#define PCI_DEVICE_ID_QLOGIC_IB_7322 0x7322
+
+/*
+ * qib_early_err is used (only!) to print early errors before devdata is
+ * allocated, or when dd->pcidev may not be valid, and at the tail end of
+ * cleanup when devdata may have been freed, etc. qib_dev_porterr is
+ * the same as qib_dev_err, but is used when the message really needs
+ * the IB port# to be definitive as to what's happening..
+ * All of these go to the trace log, and the trace log entry is done
+ * first to avoid possible serial port delays from printk.
+ */
+#define qib_early_err(dev, fmt, ...) \
+ do { \
+ dev_info(dev, KERN_ERR QIB_DRV_NAME ": " fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define qib_dev_err(dd, fmt, ...) \
+ do { \
+ dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
+ qib_get_unit_name((dd)->unit), ##__VA_ARGS__); \
+ } while (0)
+
+#define qib_dev_porterr(dd, port, fmt, ...) \
+ do { \
+ dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
+ qib_get_unit_name((dd)->unit), (dd)->unit, (port), \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#define qib_devinfo(pcidev, fmt, ...) \
+ do { \
+ dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+/*
+ * this is used for formatting hw error messages...
+ */
+struct qib_hwerror_msgs {
+ u64 mask;
+ const char *msg;
+};
+
+#define QLOGIC_IB_HWE_MSG(a, b) { .mask = a, .msg = b }
+
+/* in qib_intr.c... */
+void qib_format_hwerrors(u64 hwerrs,
+ const struct qib_hwerror_msgs *hwerrmsgs,
+ size_t nhwerrmsgs, char *msg, size_t lmsg);
+#endif /* _QIB_KERNEL_H */
diff --git a/drivers/infiniband/hw/qib/qib_6120_regs.h b/drivers/infiniband/hw/qib/qib_6120_regs.h
new file mode 100644
index 00000000000..e16cb6f7de2
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_6120_regs.h
@@ -0,0 +1,977 @@
+/*
+ * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* This file is mechanically generated from RTL. Any hand-edits will be lost! */
+
+#define QIB_6120_Revision_OFFS 0x0
+#define QIB_6120_Revision_R_Simulator_LSB 0x3F
+#define QIB_6120_Revision_R_Simulator_RMASK 0x1
+#define QIB_6120_Revision_Reserved_LSB 0x28
+#define QIB_6120_Revision_Reserved_RMASK 0x7FFFFF
+#define QIB_6120_Revision_BoardID_LSB 0x20
+#define QIB_6120_Revision_BoardID_RMASK 0xFF
+#define QIB_6120_Revision_R_SW_LSB 0x18
+#define QIB_6120_Revision_R_SW_RMASK 0xFF
+#define QIB_6120_Revision_R_Arch_LSB 0x10
+#define QIB_6120_Revision_R_Arch_RMASK 0xFF
+#define QIB_6120_Revision_R_ChipRevMajor_LSB 0x8
+#define QIB_6120_Revision_R_ChipRevMajor_RMASK 0xFF
+#define QIB_6120_Revision_R_ChipRevMinor_LSB 0x0
+#define QIB_6120_Revision_R_ChipRevMinor_RMASK 0xFF
+
+#define QIB_6120_Control_OFFS 0x8
+#define QIB_6120_Control_TxLatency_LSB 0x4
+#define QIB_6120_Control_TxLatency_RMASK 0x1
+#define QIB_6120_Control_PCIERetryBufDiagEn_LSB 0x3
+#define QIB_6120_Control_PCIERetryBufDiagEn_RMASK 0x1
+#define QIB_6120_Control_LinkEn_LSB 0x2
+#define QIB_6120_Control_LinkEn_RMASK 0x1
+#define QIB_6120_Control_FreezeMode_LSB 0x1
+#define QIB_6120_Control_FreezeMode_RMASK 0x1
+#define QIB_6120_Control_SyncReset_LSB 0x0
+#define QIB_6120_Control_SyncReset_RMASK 0x1
+
+#define QIB_6120_PageAlign_OFFS 0x10
+
+#define QIB_6120_PortCnt_OFFS 0x18
+
+#define QIB_6120_SendRegBase_OFFS 0x30
+
+#define QIB_6120_UserRegBase_OFFS 0x38
+
+#define QIB_6120_CntrRegBase_OFFS 0x40
+
+#define QIB_6120_Scratch_OFFS 0x48
+#define QIB_6120_Scratch_TopHalf_LSB 0x20
+#define QIB_6120_Scratch_TopHalf_RMASK 0xFFFFFFFF
+#define QIB_6120_Scratch_BottomHalf_LSB 0x0
+#define QIB_6120_Scratch_BottomHalf_RMASK 0xFFFFFFFF
+
+#define QIB_6120_IntBlocked_OFFS 0x60
+#define QIB_6120_IntBlocked_ErrorIntBlocked_LSB 0x1F
+#define QIB_6120_IntBlocked_ErrorIntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_PioSetIntBlocked_LSB 0x1E
+#define QIB_6120_IntBlocked_PioSetIntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_PioBufAvailIntBlocked_LSB 0x1D
+#define QIB_6120_IntBlocked_PioBufAvailIntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_assertGPIOIntBlocked_LSB 0x1C
+#define QIB_6120_IntBlocked_assertGPIOIntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_Reserved_LSB 0xF
+#define QIB_6120_IntBlocked_Reserved_RMASK 0x1FFF
+#define QIB_6120_IntBlocked_RcvAvail4IntBlocked_LSB 0x10
+#define QIB_6120_IntBlocked_RcvAvail4IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvAvail3IntBlocked_LSB 0xF
+#define QIB_6120_IntBlocked_RcvAvail3IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvAvail2IntBlocked_LSB 0xE
+#define QIB_6120_IntBlocked_RcvAvail2IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvAvail1IntBlocked_LSB 0xD
+#define QIB_6120_IntBlocked_RcvAvail1IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvAvail0IntBlocked_LSB 0xC
+#define QIB_6120_IntBlocked_RcvAvail0IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_Reserved1_LSB 0x5
+#define QIB_6120_IntBlocked_Reserved1_RMASK 0x7F
+#define QIB_6120_IntBlocked_RcvUrg4IntBlocked_LSB 0x4
+#define QIB_6120_IntBlocked_RcvUrg4IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvUrg3IntBlocked_LSB 0x3
+#define QIB_6120_IntBlocked_RcvUrg3IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvUrg2IntBlocked_LSB 0x2
+#define QIB_6120_IntBlocked_RcvUrg2IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvUrg1IntBlocked_LSB 0x1
+#define QIB_6120_IntBlocked_RcvUrg1IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvUrg0IntBlocked_LSB 0x0
+#define QIB_6120_IntBlocked_RcvUrg0IntBlocked_RMASK 0x1
+
+#define QIB_6120_IntMask_OFFS 0x68
+#define QIB_6120_IntMask_ErrorIntMask_LSB 0x1F
+#define QIB_6120_IntMask_ErrorIntMask_RMASK 0x1
+#define QIB_6120_IntMask_PioSetIntMask_LSB 0x1E
+#define QIB_6120_IntMask_PioSetIntMask_RMASK 0x1
+#define QIB_6120_IntMask_PioBufAvailIntMask_LSB 0x1D
+#define QIB_6120_IntMask_PioBufAvailIntMask_RMASK 0x1
+#define QIB_6120_IntMask_assertGPIOIntMask_LSB 0x1C
+#define QIB_6120_IntMask_assertGPIOIntMask_RMASK 0x1
+#define QIB_6120_IntMask_Reserved_LSB 0x11
+#define QIB_6120_IntMask_Reserved_RMASK 0x7FF
+#define QIB_6120_IntMask_RcvAvail4IntMask_LSB 0x10
+#define QIB_6120_IntMask_RcvAvail4IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvAvail3IntMask_LSB 0xF
+#define QIB_6120_IntMask_RcvAvail3IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvAvail2IntMask_LSB 0xE
+#define QIB_6120_IntMask_RcvAvail2IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvAvail1IntMask_LSB 0xD
+#define QIB_6120_IntMask_RcvAvail1IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvAvail0IntMask_LSB 0xC
+#define QIB_6120_IntMask_RcvAvail0IntMask_RMASK 0x1
+#define QIB_6120_IntMask_Reserved1_LSB 0x5
+#define QIB_6120_IntMask_Reserved1_RMASK 0x7F
+#define QIB_6120_IntMask_RcvUrg4IntMask_LSB 0x4
+#define QIB_6120_IntMask_RcvUrg4IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvUrg3IntMask_LSB 0x3
+#define QIB_6120_IntMask_RcvUrg3IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvUrg2IntMask_LSB 0x2
+#define QIB_6120_IntMask_RcvUrg2IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvUrg1IntMask_LSB 0x1
+#define QIB_6120_IntMask_RcvUrg1IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvUrg0IntMask_LSB 0x0
+#define QIB_6120_IntMask_RcvUrg0IntMask_RMASK 0x1
+
+#define QIB_6120_IntStatus_OFFS 0x70
+#define QIB_6120_IntStatus_Error_LSB 0x1F
+#define QIB_6120_IntStatus_Error_RMASK 0x1
+#define QIB_6120_IntStatus_PioSent_LSB 0x1E
+#define QIB_6120_IntStatus_PioSent_RMASK 0x1
+#define QIB_6120_IntStatus_PioBufAvail_LSB 0x1D
+#define QIB_6120_IntStatus_PioBufAvail_RMASK 0x1
+#define QIB_6120_IntStatus_assertGPIO_LSB 0x1C
+#define QIB_6120_IntStatus_assertGPIO_RMASK 0x1
+#define QIB_6120_IntStatus_Reserved_LSB 0xF
+#define QIB_6120_IntStatus_Reserved_RMASK 0x1FFF
+#define QIB_6120_IntStatus_RcvAvail4_LSB 0x10
+#define QIB_6120_IntStatus_RcvAvail4_RMASK 0x1
+#define QIB_6120_IntStatus_RcvAvail3_LSB 0xF
+#define QIB_6120_IntStatus_RcvAvail3_RMASK 0x1
+#define QIB_6120_IntStatus_RcvAvail2_LSB 0xE
+#define QIB_6120_IntStatus_RcvAvail2_RMASK 0x1
+#define QIB_6120_IntStatus_RcvAvail1_LSB 0xD
+#define QIB_6120_IntStatus_RcvAvail1_RMASK 0x1
+#define QIB_6120_IntStatus_RcvAvail0_LSB 0xC
+#define QIB_6120_IntStatus_RcvAvail0_RMASK 0x1
+#define QIB_6120_IntStatus_Reserved1_LSB 0x5
+#define QIB_6120_IntStatus_Reserved1_RMASK 0x7F
+#define QIB_6120_IntStatus_RcvUrg4_LSB 0x4
+#define QIB_6120_IntStatus_RcvUrg4_RMASK 0x1
+#define QIB_6120_IntStatus_RcvUrg3_LSB 0x3
+#define QIB_6120_IntStatus_RcvUrg3_RMASK 0x1
+#define QIB_6120_IntStatus_RcvUrg2_LSB 0x2
+#define QIB_6120_IntStatus_RcvUrg2_RMASK 0x1
+#define QIB_6120_IntStatus_RcvUrg1_LSB 0x1
+#define QIB_6120_IntStatus_RcvUrg1_RMASK 0x1
+#define QIB_6120_IntStatus_RcvUrg0_LSB 0x0
+#define QIB_6120_IntStatus_RcvUrg0_RMASK 0x1
+
+#define QIB_6120_IntClear_OFFS 0x78
+#define QIB_6120_IntClear_ErrorIntClear_LSB 0x1F
+#define QIB_6120_IntClear_ErrorIntClear_RMASK 0x1
+#define QIB_6120_IntClear_PioSetIntClear_LSB 0x1E
+#define QIB_6120_IntClear_PioSetIntClear_RMASK 0x1
+#define QIB_6120_IntClear_PioBufAvailIntClear_LSB 0x1D
+#define QIB_6120_IntClear_PioBufAvailIntClear_RMASK 0x1
+#define QIB_6120_IntClear_assertGPIOIntClear_LSB 0x1C
+#define QIB_6120_IntClear_assertGPIOIntClear_RMASK 0x1
+#define QIB_6120_IntClear_Reserved_LSB 0xF
+#define QIB_6120_IntClear_Reserved_RMASK 0x1FFF
+#define QIB_6120_IntClear_RcvAvail4IntClear_LSB 0x10
+#define QIB_6120_IntClear_RcvAvail4IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvAvail3IntClear_LSB 0xF
+#define QIB_6120_IntClear_RcvAvail3IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvAvail2IntClear_LSB 0xE
+#define QIB_6120_IntClear_RcvAvail2IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvAvail1IntClear_LSB 0xD
+#define QIB_6120_IntClear_RcvAvail1IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvAvail0IntClear_LSB 0xC
+#define QIB_6120_IntClear_RcvAvail0IntClear_RMASK 0x1
+#define QIB_6120_IntClear_Reserved1_LSB 0x5
+#define QIB_6120_IntClear_Reserved1_RMASK 0x7F
+#define QIB_6120_IntClear_RcvUrg4IntClear_LSB 0x4
+#define QIB_6120_IntClear_RcvUrg4IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvUrg3IntClear_LSB 0x3
+#define QIB_6120_IntClear_RcvUrg3IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvUrg2IntClear_LSB 0x2
+#define QIB_6120_IntClear_RcvUrg2IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvUrg1IntClear_LSB 0x1
+#define QIB_6120_IntClear_RcvUrg1IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvUrg0IntClear_LSB 0x0
+#define QIB_6120_IntClear_RcvUrg0IntClear_RMASK 0x1
+
+#define QIB_6120_ErrMask_OFFS 0x80
+#define QIB_6120_ErrMask_Reserved_LSB 0x34
+#define QIB_6120_ErrMask_Reserved_RMASK 0xFFF
+#define QIB_6120_ErrMask_HardwareErrMask_LSB 0x33
+#define QIB_6120_ErrMask_HardwareErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_ResetNegatedMask_LSB 0x32
+#define QIB_6120_ErrMask_ResetNegatedMask_RMASK 0x1
+#define QIB_6120_ErrMask_InvalidAddrErrMask_LSB 0x31
+#define QIB_6120_ErrMask_InvalidAddrErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_IBStatusChangedMask_LSB 0x30
+#define QIB_6120_ErrMask_IBStatusChangedMask_RMASK 0x1
+#define QIB_6120_ErrMask_Reserved1_LSB 0x26
+#define QIB_6120_ErrMask_Reserved1_RMASK 0x3FF
+#define QIB_6120_ErrMask_SendUnsupportedVLErrMask_LSB 0x25
+#define QIB_6120_ErrMask_SendUnsupportedVLErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendUnexpectedPktNumErrMask_LSB 0x24
+#define QIB_6120_ErrMask_SendUnexpectedPktNumErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendPioArmLaunchErrMask_LSB 0x23
+#define QIB_6120_ErrMask_SendPioArmLaunchErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendDroppedDataPktErrMask_LSB 0x22
+#define QIB_6120_ErrMask_SendDroppedDataPktErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendDroppedSmpPktErrMask_LSB 0x21
+#define QIB_6120_ErrMask_SendDroppedSmpPktErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendPktLenErrMask_LSB 0x20
+#define QIB_6120_ErrMask_SendPktLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendUnderRunErrMask_LSB 0x1F
+#define QIB_6120_ErrMask_SendUnderRunErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendMaxPktLenErrMask_LSB 0x1E
+#define QIB_6120_ErrMask_SendMaxPktLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendMinPktLenErrMask_LSB 0x1D
+#define QIB_6120_ErrMask_SendMinPktLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_Reserved2_LSB 0x12
+#define QIB_6120_ErrMask_Reserved2_RMASK 0x7FF
+#define QIB_6120_ErrMask_RcvIBLostLinkErrMask_LSB 0x11
+#define QIB_6120_ErrMask_RcvIBLostLinkErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvHdrErrMask_LSB 0x10
+#define QIB_6120_ErrMask_RcvHdrErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvHdrLenErrMask_LSB 0xF
+#define QIB_6120_ErrMask_RcvHdrLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvBadTidErrMask_LSB 0xE
+#define QIB_6120_ErrMask_RcvBadTidErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvHdrFullErrMask_LSB 0xD
+#define QIB_6120_ErrMask_RcvHdrFullErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvEgrFullErrMask_LSB 0xC
+#define QIB_6120_ErrMask_RcvEgrFullErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvBadVersionErrMask_LSB 0xB
+#define QIB_6120_ErrMask_RcvBadVersionErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvIBFlowErrMask_LSB 0xA
+#define QIB_6120_ErrMask_RcvIBFlowErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvEBPErrMask_LSB 0x9
+#define QIB_6120_ErrMask_RcvEBPErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvUnsupportedVLErrMask_LSB 0x8
+#define QIB_6120_ErrMask_RcvUnsupportedVLErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvUnexpectedCharErrMask_LSB 0x7
+#define QIB_6120_ErrMask_RcvUnexpectedCharErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvShortPktLenErrMask_LSB 0x6
+#define QIB_6120_ErrMask_RcvShortPktLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvLongPktLenErrMask_LSB 0x5
+#define QIB_6120_ErrMask_RcvLongPktLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvMaxPktLenErrMask_LSB 0x4
+#define QIB_6120_ErrMask_RcvMaxPktLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvMinPktLenErrMask_LSB 0x3
+#define QIB_6120_ErrMask_RcvMinPktLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvICRCErrMask_LSB 0x2
+#define QIB_6120_ErrMask_RcvICRCErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvVCRCErrMask_LSB 0x1
+#define QIB_6120_ErrMask_RcvVCRCErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvFormatErrMask_LSB 0x0
+#define QIB_6120_ErrMask_RcvFormatErrMask_RMASK 0x1
+
+#define QIB_6120_ErrStatus_OFFS 0x88
+#define QIB_6120_ErrStatus_Reserved_LSB 0x34
+#define QIB_6120_ErrStatus_Reserved_RMASK 0xFFF
+#define QIB_6120_ErrStatus_HardwareErr_LSB 0x33
+#define QIB_6120_ErrStatus_HardwareErr_RMASK 0x1
+#define QIB_6120_ErrStatus_ResetNegated_LSB 0x32
+#define QIB_6120_ErrStatus_ResetNegated_RMASK 0x1
+#define QIB_6120_ErrStatus_InvalidAddrErr_LSB 0x31
+#define QIB_6120_ErrStatus_InvalidAddrErr_RMASK 0x1
+#define QIB_6120_ErrStatus_IBStatusChanged_LSB 0x30
+#define QIB_6120_ErrStatus_IBStatusChanged_RMASK 0x1
+#define QIB_6120_ErrStatus_Reserved1_LSB 0x26
+#define QIB_6120_ErrStatus_Reserved1_RMASK 0x3FF
+#define QIB_6120_ErrStatus_SendUnsupportedVLErr_LSB 0x25
+#define QIB_6120_ErrStatus_SendUnsupportedVLErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendUnexpectedPktNumErr_LSB 0x24
+#define QIB_6120_ErrStatus_SendUnexpectedPktNumErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendPioArmLaunchErr_LSB 0x23
+#define QIB_6120_ErrStatus_SendPioArmLaunchErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendDroppedDataPktErr_LSB 0x22
+#define QIB_6120_ErrStatus_SendDroppedDataPktErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendDroppedSmpPktErr_LSB 0x21
+#define QIB_6120_ErrStatus_SendDroppedSmpPktErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendPktLenErr_LSB 0x20
+#define QIB_6120_ErrStatus_SendPktLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendUnderRunErr_LSB 0x1F
+#define QIB_6120_ErrStatus_SendUnderRunErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendMaxPktLenErr_LSB 0x1E
+#define QIB_6120_ErrStatus_SendMaxPktLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendMinPktLenErr_LSB 0x1D
+#define QIB_6120_ErrStatus_SendMinPktLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_Reserved2_LSB 0x12
+#define QIB_6120_ErrStatus_Reserved2_RMASK 0x7FF
+#define QIB_6120_ErrStatus_RcvIBLostLinkErr_LSB 0x11
+#define QIB_6120_ErrStatus_RcvIBLostLinkErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvHdrErr_LSB 0x10
+#define QIB_6120_ErrStatus_RcvHdrErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvHdrLenErr_LSB 0xF
+#define QIB_6120_ErrStatus_RcvHdrLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvBadTidErr_LSB 0xE
+#define QIB_6120_ErrStatus_RcvBadTidErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvHdrFullErr_LSB 0xD
+#define QIB_6120_ErrStatus_RcvHdrFullErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvEgrFullErr_LSB 0xC
+#define QIB_6120_ErrStatus_RcvEgrFullErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvBadVersionErr_LSB 0xB
+#define QIB_6120_ErrStatus_RcvBadVersionErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvIBFlowErr_LSB 0xA
+#define QIB_6120_ErrStatus_RcvIBFlowErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvEBPErr_LSB 0x9
+#define QIB_6120_ErrStatus_RcvEBPErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvUnsupportedVLErr_LSB 0x8
+#define QIB_6120_ErrStatus_RcvUnsupportedVLErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvUnexpectedCharErr_LSB 0x7
+#define QIB_6120_ErrStatus_RcvUnexpectedCharErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvShortPktLenErr_LSB 0x6
+#define QIB_6120_ErrStatus_RcvShortPktLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvLongPktLenErr_LSB 0x5
+#define QIB_6120_ErrStatus_RcvLongPktLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvMaxPktLenErr_LSB 0x4
+#define QIB_6120_ErrStatus_RcvMaxPktLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvMinPktLenErr_LSB 0x3
+#define QIB_6120_ErrStatus_RcvMinPktLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvICRCErr_LSB 0x2
+#define QIB_6120_ErrStatus_RcvICRCErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvVCRCErr_LSB 0x1
+#define QIB_6120_ErrStatus_RcvVCRCErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvFormatErr_LSB 0x0
+#define QIB_6120_ErrStatus_RcvFormatErr_RMASK 0x1
+
+#define QIB_6120_ErrClear_OFFS 0x90
+#define QIB_6120_ErrClear_Reserved_LSB 0x34
+#define QIB_6120_ErrClear_Reserved_RMASK 0xFFF
+#define QIB_6120_ErrClear_HardwareErrClear_LSB 0x33
+#define QIB_6120_ErrClear_HardwareErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_ResetNegatedClear_LSB 0x32
+#define QIB_6120_ErrClear_ResetNegatedClear_RMASK 0x1
+#define QIB_6120_ErrClear_InvalidAddrErrClear_LSB 0x31
+#define QIB_6120_ErrClear_InvalidAddrErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_IBStatusChangedClear_LSB 0x30
+#define QIB_6120_ErrClear_IBStatusChangedClear_RMASK 0x1
+#define QIB_6120_ErrClear_Reserved1_LSB 0x26
+#define QIB_6120_ErrClear_Reserved1_RMASK 0x3FF
+#define QIB_6120_ErrClear_SendUnsupportedVLErrClear_LSB 0x25
+#define QIB_6120_ErrClear_SendUnsupportedVLErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendUnexpectedPktNumErrClear_LSB 0x24
+#define QIB_6120_ErrClear_SendUnexpectedPktNumErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendPioArmLaunchErrClear_LSB 0x23
+#define QIB_6120_ErrClear_SendPioArmLaunchErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendDroppedDataPktErrClear_LSB 0x22
+#define QIB_6120_ErrClear_SendDroppedDataPktErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendDroppedSmpPktErrClear_LSB 0x21
+#define QIB_6120_ErrClear_SendDroppedSmpPktErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendPktLenErrClear_LSB 0x20
+#define QIB_6120_ErrClear_SendPktLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendUnderRunErrClear_LSB 0x1F
+#define QIB_6120_ErrClear_SendUnderRunErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendMaxPktLenErrClear_LSB 0x1E
+#define QIB_6120_ErrClear_SendMaxPktLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendMinPktLenErrClear_LSB 0x1D
+#define QIB_6120_ErrClear_SendMinPktLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_Reserved2_LSB 0x12
+#define QIB_6120_ErrClear_Reserved2_RMASK 0x7FF
+#define QIB_6120_ErrClear_RcvIBLostLinkErrClear_LSB 0x11
+#define QIB_6120_ErrClear_RcvIBLostLinkErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvHdrErrClear_LSB 0x10
+#define QIB_6120_ErrClear_RcvHdrErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvHdrLenErrClear_LSB 0xF
+#define QIB_6120_ErrClear_RcvHdrLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvBadTidErrClear_LSB 0xE
+#define QIB_6120_ErrClear_RcvBadTidErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvHdrFullErrClear_LSB 0xD
+#define QIB_6120_ErrClear_RcvHdrFullErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvEgrFullErrClear_LSB 0xC
+#define QIB_6120_ErrClear_RcvEgrFullErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvBadVersionErrClear_LSB 0xB
+#define QIB_6120_ErrClear_RcvBadVersionErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvIBFlowErrClear_LSB 0xA
+#define QIB_6120_ErrClear_RcvIBFlowErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvEBPErrClear_LSB 0x9
+#define QIB_6120_ErrClear_RcvEBPErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvUnsupportedVLErrClear_LSB 0x8
+#define QIB_6120_ErrClear_RcvUnsupportedVLErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvUnexpectedCharErrClear_LSB 0x7
+#define QIB_6120_ErrClear_RcvUnexpectedCharErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvShortPktLenErrClear_LSB 0x6
+#define QIB_6120_ErrClear_RcvShortPktLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvLongPktLenErrClear_LSB 0x5
+#define QIB_6120_ErrClear_RcvLongPktLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvMaxPktLenErrClear_LSB 0x4
+#define QIB_6120_ErrClear_RcvMaxPktLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvMinPktLenErrClear_LSB 0x3
+#define QIB_6120_ErrClear_RcvMinPktLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvICRCErrClear_LSB 0x2
+#define QIB_6120_ErrClear_RcvICRCErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvVCRCErrClear_LSB 0x1
+#define QIB_6120_ErrClear_RcvVCRCErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvFormatErrClear_LSB 0x0
+#define QIB_6120_ErrClear_RcvFormatErrClear_RMASK 0x1
+
+#define QIB_6120_HwErrMask_OFFS 0x98
+#define QIB_6120_HwErrMask_IBCBusFromSPCParityErrMask_LSB 0x3F
+#define QIB_6120_HwErrMask_IBCBusFromSPCParityErrMask_RMASK 0x1
+#define QIB_6120_HwErrMask_IBCBusToSPCParityErrMask_LSB 0x3E
+#define QIB_6120_HwErrMask_IBCBusToSPCParityErrMask_RMASK 0x1
+#define QIB_6120_HwErrMask_Reserved_LSB 0x3D
+#define QIB_6120_HwErrMask_Reserved_RMASK 0x1
+#define QIB_6120_HwErrMask_IBSerdesPClkNotDetectMask_LSB 0x3C
+#define QIB_6120_HwErrMask_IBSerdesPClkNotDetectMask_RMASK 0x1
+#define QIB_6120_HwErrMask_PCIESerdesQ0PClkNotDetectMask_LSB 0x3B
+#define QIB_6120_HwErrMask_PCIESerdesQ0PClkNotDetectMask_RMASK 0x1
+#define QIB_6120_HwErrMask_PCIESerdesQ1PClkNotDetectMask_LSB 0x3A
+#define QIB_6120_HwErrMask_PCIESerdesQ1PClkNotDetectMask_RMASK 0x1
+#define QIB_6120_HwErrMask_Reserved1_LSB 0x39
+#define QIB_6120_HwErrMask_Reserved1_RMASK 0x1
+#define QIB_6120_HwErrMask_IBPLLrfSlipMask_LSB 0x38
+#define QIB_6120_HwErrMask_IBPLLrfSlipMask_RMASK 0x1
+#define QIB_6120_HwErrMask_IBPLLfbSlipMask_LSB 0x37
+#define QIB_6120_HwErrMask_IBPLLfbSlipMask_RMASK 0x1
+#define QIB_6120_HwErrMask_PowerOnBISTFailedMask_LSB 0x36
+#define QIB_6120_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1
+#define QIB_6120_HwErrMask_Reserved2_LSB 0x33
+#define QIB_6120_HwErrMask_Reserved2_RMASK 0x7
+#define QIB_6120_HwErrMask_RXEMemParityErrMask_LSB 0x2C
+#define QIB_6120_HwErrMask_RXEMemParityErrMask_RMASK 0x7F
+#define QIB_6120_HwErrMask_TXEMemParityErrMask_LSB 0x28
+#define QIB_6120_HwErrMask_TXEMemParityErrMask_RMASK 0xF
+#define QIB_6120_HwErrMask_Reserved3_LSB 0x22
+#define QIB_6120_HwErrMask_Reserved3_RMASK 0x3F
+#define QIB_6120_HwErrMask_PCIeBusParityErrMask_LSB 0x1F
+#define QIB_6120_HwErrMask_PCIeBusParityErrMask_RMASK 0x7
+#define QIB_6120_HwErrMask_PcieCplTimeoutMask_LSB 0x1E
+#define QIB_6120_HwErrMask_PcieCplTimeoutMask_RMASK 0x1
+#define QIB_6120_HwErrMask_PoisonedTLPMask_LSB 0x1D
+#define QIB_6120_HwErrMask_PoisonedTLPMask_RMASK 0x1
+#define QIB_6120_HwErrMask_Reserved4_LSB 0x6
+#define QIB_6120_HwErrMask_Reserved4_RMASK 0x7FFFFF
+#define QIB_6120_HwErrMask_PCIeMemParityErrMask_LSB 0x0
+#define QIB_6120_HwErrMask_PCIeMemParityErrMask_RMASK 0x3F
+
+#define QIB_6120_HwErrStatus_OFFS 0xA0
+#define QIB_6120_HwErrStatus_IBCBusFromSPCParityErr_LSB 0x3F
+#define QIB_6120_HwErrStatus_IBCBusFromSPCParityErr_RMASK 0x1
+#define QIB_6120_HwErrStatus_IBCBusToSPCParityErr_LSB 0x3E
+#define QIB_6120_HwErrStatus_IBCBusToSPCParityErr_RMASK 0x1
+#define QIB_6120_HwErrStatus_Reserved_LSB 0x3D
+#define QIB_6120_HwErrStatus_Reserved_RMASK 0x1
+#define QIB_6120_HwErrStatus_IBSerdesPClkNotDetect_LSB 0x3C
+#define QIB_6120_HwErrStatus_IBSerdesPClkNotDetect_RMASK 0x1
+#define QIB_6120_HwErrStatus_PCIESerdesQ0PClkNotDetect_LSB 0x3B
+#define QIB_6120_HwErrStatus_PCIESerdesQ0PClkNotDetect_RMASK 0x1
+#define QIB_6120_HwErrStatus_PCIESerdesQ1PClkNotDetect_LSB 0x3A
+#define QIB_6120_HwErrStatus_PCIESerdesQ1PClkNotDetect_RMASK 0x1
+#define QIB_6120_HwErrStatus_Reserved1_LSB 0x39
+#define QIB_6120_HwErrStatus_Reserved1_RMASK 0x1
+#define QIB_6120_HwErrStatus_IBPLLrfSlip_LSB 0x38
+#define QIB_6120_HwErrStatus_IBPLLrfSlip_RMASK 0x1
+#define QIB_6120_HwErrStatus_IBPLLfbSlip_LSB 0x37
+#define QIB_6120_HwErrStatus_IBPLLfbSlip_RMASK 0x1
+#define QIB_6120_HwErrStatus_PowerOnBISTFailed_LSB 0x36
+#define QIB_6120_HwErrStatus_PowerOnBISTFailed_RMASK 0x1
+#define QIB_6120_HwErrStatus_Reserved2_LSB 0x33
+#define QIB_6120_HwErrStatus_Reserved2_RMASK 0x7
+#define QIB_6120_HwErrStatus_RXEMemParity_LSB 0x2C
+#define QIB_6120_HwErrStatus_RXEMemParity_RMASK 0x7F
+#define QIB_6120_HwErrStatus_TXEMemParity_LSB 0x28
+#define QIB_6120_HwErrStatus_TXEMemParity_RMASK 0xF
+#define QIB_6120_HwErrStatus_Reserved3_LSB 0x22
+#define QIB_6120_HwErrStatus_Reserved3_RMASK 0x3F
+#define QIB_6120_HwErrStatus_PCIeBusParity_LSB 0x1F
+#define QIB_6120_HwErrStatus_PCIeBusParity_RMASK 0x7
+#define QIB_6120_HwErrStatus_PcieCplTimeout_LSB 0x1E
+#define QIB_6120_HwErrStatus_PcieCplTimeout_RMASK 0x1
+#define QIB_6120_HwErrStatus_PoisenedTLP_LSB 0x1D
+#define QIB_6120_HwErrStatus_PoisenedTLP_RMASK 0x1
+#define QIB_6120_HwErrStatus_Reserved4_LSB 0x6
+#define QIB_6120_HwErrStatus_Reserved4_RMASK 0x7FFFFF
+#define QIB_6120_HwErrStatus_PCIeMemParity_LSB 0x0
+#define QIB_6120_HwErrStatus_PCIeMemParity_RMASK 0x3F
+
+#define QIB_6120_HwErrClear_OFFS 0xA8
+#define QIB_6120_HwErrClear_IBCBusFromSPCParityErrClear_LSB 0x3F
+#define QIB_6120_HwErrClear_IBCBusFromSPCParityErrClear_RMASK 0x1
+#define QIB_6120_HwErrClear_IBCBusToSPCparityErrClear_LSB 0x3E
+#define QIB_6120_HwErrClear_IBCBusToSPCparityErrClear_RMASK 0x1
+#define QIB_6120_HwErrClear_Reserved_LSB 0x3D
+#define QIB_6120_HwErrClear_Reserved_RMASK 0x1
+#define QIB_6120_HwErrClear_IBSerdesPClkNotDetectClear_LSB 0x3C
+#define QIB_6120_HwErrClear_IBSerdesPClkNotDetectClear_RMASK 0x1
+#define QIB_6120_HwErrClear_PCIESerdesQ0PClkNotDetectClear_LSB 0x3B
+#define QIB_6120_HwErrClear_PCIESerdesQ0PClkNotDetectClear_RMASK 0x1
+#define QIB_6120_HwErrClear_PCIESerdesQ1PClkNotDetectClear_LSB 0x3A
+#define QIB_6120_HwErrClear_PCIESerdesQ1PClkNotDetectClear_RMASK 0x1
+#define QIB_6120_HwErrClear_Reserved1_LSB 0x39
+#define QIB_6120_HwErrClear_Reserved1_RMASK 0x1
+#define QIB_6120_HwErrClear_IBPLLrfSlipClear_LSB 0x38
+#define QIB_6120_HwErrClear_IBPLLrfSlipClear_RMASK 0x1
+#define QIB_6120_HwErrClear_IBPLLfbSlipClear_LSB 0x37
+#define QIB_6120_HwErrClear_IBPLLfbSlipClear_RMASK 0x1
+#define QIB_6120_HwErrClear_PowerOnBISTFailedClear_LSB 0x36
+#define QIB_6120_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1
+#define QIB_6120_HwErrClear_Reserved2_LSB 0x33
+#define QIB_6120_HwErrClear_Reserved2_RMASK 0x7
+#define QIB_6120_HwErrClear_RXEMemParityClear_LSB 0x2C
+#define QIB_6120_HwErrClear_RXEMemParityClear_RMASK 0x7F
+#define QIB_6120_HwErrClear_TXEMemParityClear_LSB 0x28
+#define QIB_6120_HwErrClear_TXEMemParityClear_RMASK 0xF
+#define QIB_6120_HwErrClear_Reserved3_LSB 0x22
+#define QIB_6120_HwErrClear_Reserved3_RMASK 0x3F
+#define QIB_6120_HwErrClear_PCIeBusParityClr_LSB 0x1F
+#define QIB_6120_HwErrClear_PCIeBusParityClr_RMASK 0x7
+#define QIB_6120_HwErrClear_PcieCplTimeoutClear_LSB 0x1E
+#define QIB_6120_HwErrClear_PcieCplTimeoutClear_RMASK 0x1
+#define QIB_6120_HwErrClear_PoisonedTLPClear_LSB 0x1D
+#define QIB_6120_HwErrClear_PoisonedTLPClear_RMASK 0x1
+#define QIB_6120_HwErrClear_Reserved4_LSB 0x6
+#define QIB_6120_HwErrClear_Reserved4_RMASK 0x7FFFFF
+#define QIB_6120_HwErrClear_PCIeMemParityClr_LSB 0x0
+#define QIB_6120_HwErrClear_PCIeMemParityClr_RMASK 0x3F
+
+#define QIB_6120_HwDiagCtrl_OFFS 0xB0
+#define QIB_6120_HwDiagCtrl_ForceIBCBusFromSPCParityErr_LSB 0x3F
+#define QIB_6120_HwDiagCtrl_ForceIBCBusFromSPCParityErr_RMASK 0x1
+#define QIB_6120_HwDiagCtrl_ForceIBCBusToSPCParityErr_LSB 0x3E
+#define QIB_6120_HwDiagCtrl_ForceIBCBusToSPCParityErr_RMASK 0x1
+#define QIB_6120_HwDiagCtrl_CounterWrEnable_LSB 0x3D
+#define QIB_6120_HwDiagCtrl_CounterWrEnable_RMASK 0x1
+#define QIB_6120_HwDiagCtrl_CounterDisable_LSB 0x3C
+#define QIB_6120_HwDiagCtrl_CounterDisable_RMASK 0x1
+#define QIB_6120_HwDiagCtrl_Reserved_LSB 0x33
+#define QIB_6120_HwDiagCtrl_Reserved_RMASK 0x1FF
+#define QIB_6120_HwDiagCtrl_ForceRxMemParityErr_LSB 0x2C
+#define QIB_6120_HwDiagCtrl_ForceRxMemParityErr_RMASK 0x7F
+#define QIB_6120_HwDiagCtrl_ForceTxMemparityErr_LSB 0x28
+#define QIB_6120_HwDiagCtrl_ForceTxMemparityErr_RMASK 0xF
+#define QIB_6120_HwDiagCtrl_Reserved1_LSB 0x23
+#define QIB_6120_HwDiagCtrl_Reserved1_RMASK 0x1F
+#define QIB_6120_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F
+#define QIB_6120_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF
+#define QIB_6120_HwDiagCtrl_Reserved2_LSB 0x6
+#define QIB_6120_HwDiagCtrl_Reserved2_RMASK 0x1FFFFFF
+#define QIB_6120_HwDiagCtrl_forcePCIeMemParity_LSB 0x0
+#define QIB_6120_HwDiagCtrl_forcePCIeMemParity_RMASK 0x3F
+
+#define QIB_6120_IBCStatus_OFFS 0xC0
+#define QIB_6120_IBCStatus_TxCreditOk_LSB 0x1F
+#define QIB_6120_IBCStatus_TxCreditOk_RMASK 0x1
+#define QIB_6120_IBCStatus_TxReady_LSB 0x1E
+#define QIB_6120_IBCStatus_TxReady_RMASK 0x1
+#define QIB_6120_IBCStatus_Reserved_LSB 0x7
+#define QIB_6120_IBCStatus_Reserved_RMASK 0x7FFFFF
+#define QIB_6120_IBCStatus_LinkState_LSB 0x4
+#define QIB_6120_IBCStatus_LinkState_RMASK 0x7
+#define QIB_6120_IBCStatus_LinkTrainingState_LSB 0x0
+#define QIB_6120_IBCStatus_LinkTrainingState_RMASK 0xF
+
+#define QIB_6120_IBCCtrl_OFFS 0xC8
+#define QIB_6120_IBCCtrl_Loopback_LSB 0x3F
+#define QIB_6120_IBCCtrl_Loopback_RMASK 0x1
+#define QIB_6120_IBCCtrl_LinkDownDefaultState_LSB 0x3E
+#define QIB_6120_IBCCtrl_LinkDownDefaultState_RMASK 0x1
+#define QIB_6120_IBCCtrl_Reserved_LSB 0x2B
+#define QIB_6120_IBCCtrl_Reserved_RMASK 0x7FFFF
+#define QIB_6120_IBCCtrl_CreditScale_LSB 0x28
+#define QIB_6120_IBCCtrl_CreditScale_RMASK 0x7
+#define QIB_6120_IBCCtrl_OverrunThreshold_LSB 0x24
+#define QIB_6120_IBCCtrl_OverrunThreshold_RMASK 0xF
+#define QIB_6120_IBCCtrl_PhyerrThreshold_LSB 0x20
+#define QIB_6120_IBCCtrl_PhyerrThreshold_RMASK 0xF
+#define QIB_6120_IBCCtrl_Reserved1_LSB 0x1F
+#define QIB_6120_IBCCtrl_Reserved1_RMASK 0x1
+#define QIB_6120_IBCCtrl_MaxPktLen_LSB 0x14
+#define QIB_6120_IBCCtrl_MaxPktLen_RMASK 0x7FF
+#define QIB_6120_IBCCtrl_LinkCmd_LSB 0x12
+#define QIB_6120_IBCCtrl_LinkCmd_RMASK 0x3
+#define QIB_6120_IBCCtrl_LinkInitCmd_LSB 0x10
+#define QIB_6120_IBCCtrl_LinkInitCmd_RMASK 0x3
+#define QIB_6120_IBCCtrl_FlowCtrlWaterMark_LSB 0x8
+#define QIB_6120_IBCCtrl_FlowCtrlWaterMark_RMASK 0xFF
+#define QIB_6120_IBCCtrl_FlowCtrlPeriod_LSB 0x0
+#define QIB_6120_IBCCtrl_FlowCtrlPeriod_RMASK 0xFF
+
+#define QIB_6120_EXTStatus_OFFS 0xD0
+#define QIB_6120_EXTStatus_GPIOIn_LSB 0x30
+#define QIB_6120_EXTStatus_GPIOIn_RMASK 0xFFFF
+#define QIB_6120_EXTStatus_Reserved_LSB 0x20
+#define QIB_6120_EXTStatus_Reserved_RMASK 0xFFFF
+#define QIB_6120_EXTStatus_Reserved1_LSB 0x10
+#define QIB_6120_EXTStatus_Reserved1_RMASK 0xFFFF
+#define QIB_6120_EXTStatus_MemBISTFoundErr_LSB 0xF
+#define QIB_6120_EXTStatus_MemBISTFoundErr_RMASK 0x1
+#define QIB_6120_EXTStatus_MemBISTEndTest_LSB 0xE
+#define QIB_6120_EXTStatus_MemBISTEndTest_RMASK 0x1
+#define QIB_6120_EXTStatus_Reserved2_LSB 0x0
+#define QIB_6120_EXTStatus_Reserved2_RMASK 0x3FFF
+
+#define QIB_6120_EXTCtrl_OFFS 0xD8
+#define QIB_6120_EXTCtrl_GPIOOe_LSB 0x30
+#define QIB_6120_EXTCtrl_GPIOOe_RMASK 0xFFFF
+#define QIB_6120_EXTCtrl_GPIOInvert_LSB 0x20
+#define QIB_6120_EXTCtrl_GPIOInvert_RMASK 0xFFFF
+#define QIB_6120_EXTCtrl_Reserved_LSB 0x4
+#define QIB_6120_EXTCtrl_Reserved_RMASK 0xFFFFFFF
+#define QIB_6120_EXTCtrl_LEDPriPortGreenOn_LSB 0x3
+#define QIB_6120_EXTCtrl_LEDPriPortGreenOn_RMASK 0x1
+#define QIB_6120_EXTCtrl_LEDPriPortYellowOn_LSB 0x2
+#define QIB_6120_EXTCtrl_LEDPriPortYellowOn_RMASK 0x1
+#define QIB_6120_EXTCtrl_LEDGblOkGreenOn_LSB 0x1
+#define QIB_6120_EXTCtrl_LEDGblOkGreenOn_RMASK 0x1
+#define QIB_6120_EXTCtrl_LEDGblErrRedOff_LSB 0x0
+#define QIB_6120_EXTCtrl_LEDGblErrRedOff_RMASK 0x1
+
+#define QIB_6120_GPIOOut_OFFS 0xE0
+
+#define QIB_6120_GPIOMask_OFFS 0xE8
+
+#define QIB_6120_GPIOStatus_OFFS 0xF0
+
+#define QIB_6120_GPIOClear_OFFS 0xF8
+
+#define QIB_6120_RcvCtrl_OFFS 0x100
+#define QIB_6120_RcvCtrl_TailUpd_LSB 0x1F
+#define QIB_6120_RcvCtrl_TailUpd_RMASK 0x1
+#define QIB_6120_RcvCtrl_RcvPartitionKeyDisable_LSB 0x1E
+#define QIB_6120_RcvCtrl_RcvPartitionKeyDisable_RMASK 0x1
+#define QIB_6120_RcvCtrl_Reserved_LSB 0x15
+#define QIB_6120_RcvCtrl_Reserved_RMASK 0x1FF
+#define QIB_6120_RcvCtrl_IntrAvail_LSB 0x10
+#define QIB_6120_RcvCtrl_IntrAvail_RMASK 0x1F
+#define QIB_6120_RcvCtrl_Reserved1_LSB 0x9
+#define QIB_6120_RcvCtrl_Reserved1_RMASK 0x7F
+#define QIB_6120_RcvCtrl_Reserved2_LSB 0x5
+#define QIB_6120_RcvCtrl_Reserved2_RMASK 0xF
+#define QIB_6120_RcvCtrl_PortEnable_LSB 0x0
+#define QIB_6120_RcvCtrl_PortEnable_RMASK 0x1F
+
+#define QIB_6120_RcvBTHQP_OFFS 0x108
+#define QIB_6120_RcvBTHQP_BTHQP_Mask_LSB 0x1E
+#define QIB_6120_RcvBTHQP_BTHQP_Mask_RMASK 0x3
+#define QIB_6120_RcvBTHQP_Reserved_LSB 0x18
+#define QIB_6120_RcvBTHQP_Reserved_RMASK 0x3F
+#define QIB_6120_RcvBTHQP_RcvBTHQP_LSB 0x0
+#define QIB_6120_RcvBTHQP_RcvBTHQP_RMASK 0xFFFFFF
+
+#define QIB_6120_RcvHdrSize_OFFS 0x110
+
+#define QIB_6120_RcvHdrCnt_OFFS 0x118
+
+#define QIB_6120_RcvHdrEntSize_OFFS 0x120
+
+#define QIB_6120_RcvTIDBase_OFFS 0x128
+
+#define QIB_6120_RcvTIDCnt_OFFS 0x130
+
+#define QIB_6120_RcvEgrBase_OFFS 0x138
+
+#define QIB_6120_RcvEgrCnt_OFFS 0x140
+
+#define QIB_6120_RcvBufBase_OFFS 0x148
+
+#define QIB_6120_RcvBufSize_OFFS 0x150
+
+#define QIB_6120_RxIntMemBase_OFFS 0x158
+
+#define QIB_6120_RxIntMemSize_OFFS 0x160
+
+#define QIB_6120_RcvPartitionKey_OFFS 0x168
+
+#define QIB_6120_RcvPktLEDCnt_OFFS 0x178
+#define QIB_6120_RcvPktLEDCnt_ONperiod_LSB 0x20
+#define QIB_6120_RcvPktLEDCnt_ONperiod_RMASK 0xFFFFFFFF
+#define QIB_6120_RcvPktLEDCnt_OFFperiod_LSB 0x0
+#define QIB_6120_RcvPktLEDCnt_OFFperiod_RMASK 0xFFFFFFFF
+
+#define QIB_6120_SendCtrl_OFFS 0x1C0
+#define QIB_6120_SendCtrl_Disarm_LSB 0x1F
+#define QIB_6120_SendCtrl_Disarm_RMASK 0x1
+#define QIB_6120_SendCtrl_Reserved_LSB 0x17
+#define QIB_6120_SendCtrl_Reserved_RMASK 0xFF
+#define QIB_6120_SendCtrl_DisarmPIOBuf_LSB 0x10
+#define QIB_6120_SendCtrl_DisarmPIOBuf_RMASK 0x7F
+#define QIB_6120_SendCtrl_Reserved1_LSB 0x4
+#define QIB_6120_SendCtrl_Reserved1_RMASK 0xFFF
+#define QIB_6120_SendCtrl_PIOEnable_LSB 0x3
+#define QIB_6120_SendCtrl_PIOEnable_RMASK 0x1
+#define QIB_6120_SendCtrl_PIOBufAvailUpd_LSB 0x2
+#define QIB_6120_SendCtrl_PIOBufAvailUpd_RMASK 0x1
+#define QIB_6120_SendCtrl_PIOIntBufAvail_LSB 0x1
+#define QIB_6120_SendCtrl_PIOIntBufAvail_RMASK 0x1
+#define QIB_6120_SendCtrl_Abort_LSB 0x0
+#define QIB_6120_SendCtrl_Abort_RMASK 0x1
+
+#define QIB_6120_SendPIOBufBase_OFFS 0x1C8
+#define QIB_6120_SendPIOBufBase_Reserved_LSB 0x35
+#define QIB_6120_SendPIOBufBase_Reserved_RMASK 0x7FF
+#define QIB_6120_SendPIOBufBase_BaseAddr_LargePIO_LSB 0x20
+#define QIB_6120_SendPIOBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF
+#define QIB_6120_SendPIOBufBase_Reserved1_LSB 0x15
+#define QIB_6120_SendPIOBufBase_Reserved1_RMASK 0x7FF
+#define QIB_6120_SendPIOBufBase_BaseAddr_SmallPIO_LSB 0x0
+#define QIB_6120_SendPIOBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF
+
+#define QIB_6120_SendPIOSize_OFFS 0x1D0
+#define QIB_6120_SendPIOSize_Reserved_LSB 0x2D
+#define QIB_6120_SendPIOSize_Reserved_RMASK 0xFFFFF
+#define QIB_6120_SendPIOSize_Size_LargePIO_LSB 0x20
+#define QIB_6120_SendPIOSize_Size_LargePIO_RMASK 0x1FFF
+#define QIB_6120_SendPIOSize_Reserved1_LSB 0xC
+#define QIB_6120_SendPIOSize_Reserved1_RMASK 0xFFFFF
+#define QIB_6120_SendPIOSize_Size_SmallPIO_LSB 0x0
+#define QIB_6120_SendPIOSize_Size_SmallPIO_RMASK 0xFFF
+
+#define QIB_6120_SendPIOBufCnt_OFFS 0x1D8
+#define QIB_6120_SendPIOBufCnt_Reserved_LSB 0x24
+#define QIB_6120_SendPIOBufCnt_Reserved_RMASK 0xFFFFFFF
+#define QIB_6120_SendPIOBufCnt_Num_LargePIO_LSB 0x20
+#define QIB_6120_SendPIOBufCnt_Num_LargePIO_RMASK 0xF
+#define QIB_6120_SendPIOBufCnt_Reserved1_LSB 0x9
+#define QIB_6120_SendPIOBufCnt_Reserved1_RMASK 0x7FFFFF
+#define QIB_6120_SendPIOBufCnt_Num_SmallPIO_LSB 0x0
+#define QIB_6120_SendPIOBufCnt_Num_SmallPIO_RMASK 0x1FF
+
+#define QIB_6120_SendPIOAvailAddr_OFFS 0x1E0
+#define QIB_6120_SendPIOAvailAddr_SendPIOAvailAddr_LSB 0x6
+#define QIB_6120_SendPIOAvailAddr_SendPIOAvailAddr_RMASK 0x3FFFFFFFF
+#define QIB_6120_SendPIOAvailAddr_Reserved_LSB 0x0
+#define QIB_6120_SendPIOAvailAddr_Reserved_RMASK 0x3F
+
+#define QIB_6120_SendBufErr0_OFFS 0x240
+#define QIB_6120_SendBufErr0_SendBufErrPIO_63_0_LSB 0x0
+#define QIB_6120_SendBufErr0_SendBufErrPIO_63_0_RMASK 0x0
+
+#define QIB_6120_RcvHdrAddr0_OFFS 0x280
+#define QIB_6120_RcvHdrAddr0_RcvHdrAddr0_LSB 0x2
+#define QIB_6120_RcvHdrAddr0_RcvHdrAddr0_RMASK 0x3FFFFFFFFF
+#define QIB_6120_RcvHdrAddr0_Reserved_LSB 0x0
+#define QIB_6120_RcvHdrAddr0_Reserved_RMASK 0x3
+
+#define QIB_6120_RcvHdrTailAddr0_OFFS 0x300
+#define QIB_6120_RcvHdrTailAddr0_RcvHdrTailAddr0_LSB 0x2
+#define QIB_6120_RcvHdrTailAddr0_RcvHdrTailAddr0_RMASK 0x3FFFFFFFFF
+#define QIB_6120_RcvHdrTailAddr0_Reserved_LSB 0x0
+#define QIB_6120_RcvHdrTailAddr0_Reserved_RMASK 0x3
+
+#define QIB_6120_SerdesCfg0_OFFS 0x3C0
+#define QIB_6120_SerdesCfg0_DisableIBTxIdleDetect_LSB 0x3F
+#define QIB_6120_SerdesCfg0_DisableIBTxIdleDetect_RMASK 0x1
+#define QIB_6120_SerdesCfg0_Reserved_LSB 0x38
+#define QIB_6120_SerdesCfg0_Reserved_RMASK 0x7F
+#define QIB_6120_SerdesCfg0_RxEqCtl_LSB 0x36
+#define QIB_6120_SerdesCfg0_RxEqCtl_RMASK 0x3
+#define QIB_6120_SerdesCfg0_TxTermAdj_LSB 0x34
+#define QIB_6120_SerdesCfg0_TxTermAdj_RMASK 0x3
+#define QIB_6120_SerdesCfg0_RxTermAdj_LSB 0x32
+#define QIB_6120_SerdesCfg0_RxTermAdj_RMASK 0x3
+#define QIB_6120_SerdesCfg0_TermAdj1_LSB 0x31
+#define QIB_6120_SerdesCfg0_TermAdj1_RMASK 0x1
+#define QIB_6120_SerdesCfg0_TermAdj0_LSB 0x30
+#define QIB_6120_SerdesCfg0_TermAdj0_RMASK 0x1
+#define QIB_6120_SerdesCfg0_LPBKA_LSB 0x2F
+#define QIB_6120_SerdesCfg0_LPBKA_RMASK 0x1
+#define QIB_6120_SerdesCfg0_LPBKB_LSB 0x2E
+#define QIB_6120_SerdesCfg0_LPBKB_RMASK 0x1
+#define QIB_6120_SerdesCfg0_LPBKC_LSB 0x2D
+#define QIB_6120_SerdesCfg0_LPBKC_RMASK 0x1
+#define QIB_6120_SerdesCfg0_LPBKD_LSB 0x2C
+#define QIB_6120_SerdesCfg0_LPBKD_RMASK 0x1
+#define QIB_6120_SerdesCfg0_PW_LSB 0x2B
+#define QIB_6120_SerdesCfg0_PW_RMASK 0x1
+#define QIB_6120_SerdesCfg0_RefSel_LSB 0x29
+#define QIB_6120_SerdesCfg0_RefSel_RMASK 0x3
+#define QIB_6120_SerdesCfg0_ParReset_LSB 0x28
+#define QIB_6120_SerdesCfg0_ParReset_RMASK 0x1
+#define QIB_6120_SerdesCfg0_ParLPBK_LSB 0x27
+#define QIB_6120_SerdesCfg0_ParLPBK_RMASK 0x1
+#define QIB_6120_SerdesCfg0_OffsetEn_LSB 0x26
+#define QIB_6120_SerdesCfg0_OffsetEn_RMASK 0x1
+#define QIB_6120_SerdesCfg0_Offset_LSB 0x1E
+#define QIB_6120_SerdesCfg0_Offset_RMASK 0xFF
+#define QIB_6120_SerdesCfg0_L2PwrDn_LSB 0x1D
+#define QIB_6120_SerdesCfg0_L2PwrDn_RMASK 0x1
+#define QIB_6120_SerdesCfg0_ResetPLL_LSB 0x1C
+#define QIB_6120_SerdesCfg0_ResetPLL_RMASK 0x1
+#define QIB_6120_SerdesCfg0_RxTermEnX_LSB 0x18
+#define QIB_6120_SerdesCfg0_RxTermEnX_RMASK 0xF
+#define QIB_6120_SerdesCfg0_BeaconTxEnX_LSB 0x14
+#define QIB_6120_SerdesCfg0_BeaconTxEnX_RMASK 0xF
+#define QIB_6120_SerdesCfg0_RxDetEnX_LSB 0x10
+#define QIB_6120_SerdesCfg0_RxDetEnX_RMASK 0xF
+#define QIB_6120_SerdesCfg0_TxIdeEnX_LSB 0xC
+#define QIB_6120_SerdesCfg0_TxIdeEnX_RMASK 0xF
+#define QIB_6120_SerdesCfg0_RxIdleEnX_LSB 0x8
+#define QIB_6120_SerdesCfg0_RxIdleEnX_RMASK 0xF
+#define QIB_6120_SerdesCfg0_L1PwrDnA_LSB 0x7
+#define QIB_6120_SerdesCfg0_L1PwrDnA_RMASK 0x1
+#define QIB_6120_SerdesCfg0_L1PwrDnB_LSB 0x6
+#define QIB_6120_SerdesCfg0_L1PwrDnB_RMASK 0x1
+#define QIB_6120_SerdesCfg0_L1PwrDnC_LSB 0x5
+#define QIB_6120_SerdesCfg0_L1PwrDnC_RMASK 0x1
+#define QIB_6120_SerdesCfg0_L1PwrDnD_LSB 0x4
+#define QIB_6120_SerdesCfg0_L1PwrDnD_RMASK 0x1
+#define QIB_6120_SerdesCfg0_ResetA_LSB 0x3
+#define QIB_6120_SerdesCfg0_ResetA_RMASK 0x1
+#define QIB_6120_SerdesCfg0_ResetB_LSB 0x2
+#define QIB_6120_SerdesCfg0_ResetB_RMASK 0x1
+#define QIB_6120_SerdesCfg0_ResetC_LSB 0x1
+#define QIB_6120_SerdesCfg0_ResetC_RMASK 0x1
+#define QIB_6120_SerdesCfg0_ResetD_LSB 0x0
+#define QIB_6120_SerdesCfg0_ResetD_RMASK 0x1
+
+#define QIB_6120_SerdesStat_OFFS 0x3D0
+#define QIB_6120_SerdesStat_Reserved_LSB 0xC
+#define QIB_6120_SerdesStat_Reserved_RMASK 0xFFFFFFFFFFFFF
+#define QIB_6120_SerdesStat_BeaconDetA_LSB 0xB
+#define QIB_6120_SerdesStat_BeaconDetA_RMASK 0x1
+#define QIB_6120_SerdesStat_BeaconDetB_LSB 0xA
+#define QIB_6120_SerdesStat_BeaconDetB_RMASK 0x1
+#define QIB_6120_SerdesStat_BeaconDetC_LSB 0x9
+#define QIB_6120_SerdesStat_BeaconDetC_RMASK 0x1
+#define QIB_6120_SerdesStat_BeaconDetD_LSB 0x8
+#define QIB_6120_SerdesStat_BeaconDetD_RMASK 0x1
+#define QIB_6120_SerdesStat_RxDetA_LSB 0x7
+#define QIB_6120_SerdesStat_RxDetA_RMASK 0x1
+#define QIB_6120_SerdesStat_RxDetB_LSB 0x6
+#define QIB_6120_SerdesStat_RxDetB_RMASK 0x1
+#define QIB_6120_SerdesStat_RxDetC_LSB 0x5
+#define QIB_6120_SerdesStat_RxDetC_RMASK 0x1
+#define QIB_6120_SerdesStat_RxDetD_LSB 0x4
+#define QIB_6120_SerdesStat_RxDetD_RMASK 0x1
+#define QIB_6120_SerdesStat_TxIdleDetA_LSB 0x3
+#define QIB_6120_SerdesStat_TxIdleDetA_RMASK 0x1
+#define QIB_6120_SerdesStat_TxIdleDetB_LSB 0x2
+#define QIB_6120_SerdesStat_TxIdleDetB_RMASK 0x1
+#define QIB_6120_SerdesStat_TxIdleDetC_LSB 0x1
+#define QIB_6120_SerdesStat_TxIdleDetC_RMASK 0x1
+#define QIB_6120_SerdesStat_TxIdleDetD_LSB 0x0
+#define QIB_6120_SerdesStat_TxIdleDetD_RMASK 0x1
+
+#define QIB_6120_XGXSCfg_OFFS 0x3D8
+#define QIB_6120_XGXSCfg_ArmLaunchErrorDisable_LSB 0x3F
+#define QIB_6120_XGXSCfg_ArmLaunchErrorDisable_RMASK 0x1
+#define QIB_6120_XGXSCfg_Reserved_LSB 0x17
+#define QIB_6120_XGXSCfg_Reserved_RMASK 0xFFFFFFFFFF
+#define QIB_6120_XGXSCfg_polarity_inv_LSB 0x13
+#define QIB_6120_XGXSCfg_polarity_inv_RMASK 0xF
+#define QIB_6120_XGXSCfg_link_sync_mask_LSB 0x9
+#define QIB_6120_XGXSCfg_link_sync_mask_RMASK 0x3FF
+#define QIB_6120_XGXSCfg_port_addr_LSB 0x4
+#define QIB_6120_XGXSCfg_port_addr_RMASK 0x1F
+#define QIB_6120_XGXSCfg_mdd_30_LSB 0x3
+#define QIB_6120_XGXSCfg_mdd_30_RMASK 0x1
+#define QIB_6120_XGXSCfg_xcv_resetn_LSB 0x2
+#define QIB_6120_XGXSCfg_xcv_resetn_RMASK 0x1
+#define QIB_6120_XGXSCfg_Reserved1_LSB 0x1
+#define QIB_6120_XGXSCfg_Reserved1_RMASK 0x1
+#define QIB_6120_XGXSCfg_tx_rx_resetn_LSB 0x0
+#define QIB_6120_XGXSCfg_tx_rx_resetn_RMASK 0x1
+
+#define QIB_6120_LBIntCnt_OFFS 0x12000
+
+#define QIB_6120_LBFlowStallCnt_OFFS 0x12008
+
+#define QIB_6120_TxUnsupVLErrCnt_OFFS 0x12018
+
+#define QIB_6120_TxDataPktCnt_OFFS 0x12020
+
+#define QIB_6120_TxFlowPktCnt_OFFS 0x12028
+
+#define QIB_6120_TxDwordCnt_OFFS 0x12030
+
+#define QIB_6120_TxLenErrCnt_OFFS 0x12038
+
+#define QIB_6120_TxMaxMinLenErrCnt_OFFS 0x12040
+
+#define QIB_6120_TxUnderrunCnt_OFFS 0x12048
+
+#define QIB_6120_TxFlowStallCnt_OFFS 0x12050
+
+#define QIB_6120_TxDroppedPktCnt_OFFS 0x12058
+
+#define QIB_6120_RxDroppedPktCnt_OFFS 0x12060
+
+#define QIB_6120_RxDataPktCnt_OFFS 0x12068
+
+#define QIB_6120_RxFlowPktCnt_OFFS 0x12070
+
+#define QIB_6120_RxDwordCnt_OFFS 0x12078
+
+#define QIB_6120_RxLenErrCnt_OFFS 0x12080
+
+#define QIB_6120_RxMaxMinLenErrCnt_OFFS 0x12088
+
+#define QIB_6120_RxICRCErrCnt_OFFS 0x12090
+
+#define QIB_6120_RxVCRCErrCnt_OFFS 0x12098
+
+#define QIB_6120_RxFlowCtrlErrCnt_OFFS 0x120A0
+
+#define QIB_6120_RxBadFormatCnt_OFFS 0x120A8
+
+#define QIB_6120_RxLinkProblemCnt_OFFS 0x120B0
+
+#define QIB_6120_RxEBPCnt_OFFS 0x120B8
+
+#define QIB_6120_RxLPCRCErrCnt_OFFS 0x120C0
+
+#define QIB_6120_RxBufOvflCnt_OFFS 0x120C8
+
+#define QIB_6120_RxTIDFullErrCnt_OFFS 0x120D0
+
+#define QIB_6120_RxTIDValidErrCnt_OFFS 0x120D8
+
+#define QIB_6120_RxPKeyMismatchCnt_OFFS 0x120E0
+
+#define QIB_6120_RxP0HdrEgrOvflCnt_OFFS 0x120E8
+
+#define QIB_6120_IBStatusChangeCnt_OFFS 0x12140
+
+#define QIB_6120_IBLinkErrRecoveryCnt_OFFS 0x12148
+
+#define QIB_6120_IBLinkDownedCnt_OFFS 0x12150
+
+#define QIB_6120_IBSymbolErrCnt_OFFS 0x12158
+
+#define QIB_6120_PcieRetryBufDiagQwordCnt_OFFS 0x12170
+
+#define QIB_6120_RcvEgrArray0_OFFS 0x14000
+
+#define QIB_6120_RcvTIDArray0_OFFS 0x54000
+
+#define QIB_6120_PIOLaunchFIFO_OFFS 0x64000
+
+#define QIB_6120_SendPIOpbcCache_OFFS 0x64800
+
+#define QIB_6120_RcvBuf1_OFFS 0x72000
+
+#define QIB_6120_RcvBuf2_OFFS 0x75000
+
+#define QIB_6120_RcvFlags_OFFS 0x77000
+
+#define QIB_6120_RcvLookupBuf1_OFFS 0x79000
+
+#define QIB_6120_RcvDMABuf_OFFS 0x7B000
+
+#define QIB_6120_MiscRXEIntMem_OFFS 0x7C000
+
+#define QIB_6120_PCIERcvBuf_OFFS 0x80000
+
+#define QIB_6120_PCIERetryBuf_OFFS 0x82000
+
+#define QIB_6120_PCIERcvBufRdToWrAddr_OFFS 0x84000
+
+#define QIB_6120_PIOBuf0_MA_OFFS 0x100000
diff --git a/drivers/infiniband/hw/qib/qib_7220.h b/drivers/infiniband/hw/qib/qib_7220.h
new file mode 100644
index 00000000000..ea0bfd896f9
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_7220.h
@@ -0,0 +1,156 @@
+#ifndef _QIB_7220_H
+#define _QIB_7220_H
+/*
+ * Copyright (c) 2007, 2009, 2010 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* grab register-defs auto-generated by HW */
+#include "qib_7220_regs.h"
+
+/* The number of eager receive TIDs for context zero. */
+#define IBA7220_KRCVEGRCNT 2048U
+
+#define IB_7220_LT_STATE_CFGRCVFCFG 0x09
+#define IB_7220_LT_STATE_CFGWAITRMT 0x0a
+#define IB_7220_LT_STATE_TXREVLANES 0x0d
+#define IB_7220_LT_STATE_CFGENH 0x10
+
+struct qib_chip_specific {
+ u64 __iomem *cregbase;
+ u64 *cntrs;
+ u64 *portcntrs;
+ spinlock_t sdepb_lock; /* serdes EPB bus */
+ spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
+ spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
+ u64 hwerrmask;
+ u64 errormask;
+ u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
+ u64 gpio_mask; /* shadow the gpio mask register */
+ u64 extctrl; /* shadow the gpio output enable, etc... */
+ u32 ncntrs;
+ u32 nportcntrs;
+ u32 cntrnamelen;
+ u32 portcntrnamelen;
+ u32 numctxts;
+ u32 rcvegrcnt;
+ u32 autoneg_tries;
+ u32 serdes_first_init_done;
+ u32 sdmabufcnt;
+ u32 lastbuf_for_pio;
+ u32 updthresh; /* current AvailUpdThld */
+ u32 updthresh_dflt; /* default AvailUpdThld */
+ int irq;
+ u8 presets_needed;
+ u8 relock_timer_active;
+ char emsgbuf[128];
+ char sdmamsgbuf[192];
+ char bitsmsgbuf[64];
+ struct timer_list relock_timer;
+ unsigned int relock_interval; /* in jiffies */
+};
+
+struct qib_chippport_specific {
+ struct qib_pportdata pportdata;
+ wait_queue_head_t autoneg_wait;
+ struct delayed_work autoneg_work;
+ struct timer_list chase_timer;
+ /*
+ * these 5 fields are used to establish deltas for IB symbol
+ * errors and linkrecovery errors. They can be reported on
+ * some chips during link negotiation prior to INIT, and with
+ * DDR when faking DDR negotiations with non-IBTA switches.
+ * The chip counters are adjusted at driver unload if there is
+ * a non-zero delta.
+ */
+ u64 ibdeltainprog;
+ u64 ibsymdelta;
+ u64 ibsymsnap;
+ u64 iblnkerrdelta;
+ u64 iblnkerrsnap;
+ u64 ibcctrl; /* kr_ibcctrl shadow */
+ u64 ibcddrctrl; /* kr_ibcddrctrl shadow */
+ u64 chase_end;
+ u32 last_delay_mult;
+};
+
+/*
+ * This header file provides the declarations and common definitions
+ * for (mostly) manipulation of the SerDes blocks within the IBA7220.
+ * the functions declared should only be called from within other
+ * 7220-related files such as qib_iba7220.c or qib_sd7220.c.
+ */
+int qib_sd7220_presets(struct qib_devdata *dd);
+int qib_sd7220_init(struct qib_devdata *dd);
+int qib_sd7220_prog_ld(struct qib_devdata *dd, int sdnum, u8 *img,
+ int len, int offset);
+int qib_sd7220_prog_vfy(struct qib_devdata *dd, int sdnum, const u8 *img,
+ int len, int offset);
+void qib_sd7220_clr_ibpar(struct qib_devdata *);
+/*
+ * Below used for sdnum parameter, selecting one of the two sections
+ * used for PCIe, or the single SerDes used for IB, which is the
+ * only one currently used
+ */
+#define IB_7220_SERDES 2
+
+int qib_sd7220_ib_load(struct qib_devdata *dd);
+int qib_sd7220_ib_vfy(struct qib_devdata *dd);
+
+static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
+ const u16 regno)
+{
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return -1;
+ return readl((u32 __iomem *)&dd->kregbase[regno]);
+}
+
+static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
+ const u16 regno)
+{
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return -1;
+
+ return readq(&dd->kregbase[regno]);
+}
+
+static inline void qib_write_kreg(const struct qib_devdata *dd,
+ const u16 regno, u64 value)
+{
+ if (dd->kregbase)
+ writeq(value, &dd->kregbase[regno]);
+}
+
+void set_7220_relock_poll(struct qib_devdata *, int);
+void shutdown_7220_relock_poll(struct qib_devdata *);
+void toggle_7220_rclkrls(struct qib_devdata *);
+
+
+#endif /* _QIB_7220_H */
diff --git a/drivers/infiniband/hw/qib/qib_7220_regs.h b/drivers/infiniband/hw/qib/qib_7220_regs.h
new file mode 100644
index 00000000000..0da5bb750e5
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_7220_regs.h
@@ -0,0 +1,1496 @@
+/*
+ * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
+ *
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+/* This file is mechanically generated from RTL. Any hand-edits will be lost! */
+
+#define QIB_7220_Revision_OFFS 0x0
+#define QIB_7220_Revision_R_Simulator_LSB 0x3F
+#define QIB_7220_Revision_R_Simulator_RMASK 0x1
+#define QIB_7220_Revision_R_Emulation_LSB 0x3E
+#define QIB_7220_Revision_R_Emulation_RMASK 0x1
+#define QIB_7220_Revision_R_Emulation_Revcode_LSB 0x28
+#define QIB_7220_Revision_R_Emulation_Revcode_RMASK 0x3FFFFF
+#define QIB_7220_Revision_BoardID_LSB 0x20
+#define QIB_7220_Revision_BoardID_RMASK 0xFF
+#define QIB_7220_Revision_R_SW_LSB 0x18
+#define QIB_7220_Revision_R_SW_RMASK 0xFF
+#define QIB_7220_Revision_R_Arch_LSB 0x10
+#define QIB_7220_Revision_R_Arch_RMASK 0xFF
+#define QIB_7220_Revision_R_ChipRevMajor_LSB 0x8
+#define QIB_7220_Revision_R_ChipRevMajor_RMASK 0xFF
+#define QIB_7220_Revision_R_ChipRevMinor_LSB 0x0
+#define QIB_7220_Revision_R_ChipRevMinor_RMASK 0xFF
+
+#define QIB_7220_Control_OFFS 0x8
+#define QIB_7220_Control_SyncResetExceptPcieIRAMRST_LSB 0x7
+#define QIB_7220_Control_SyncResetExceptPcieIRAMRST_RMASK 0x1
+#define QIB_7220_Control_PCIECplQDiagEn_LSB 0x6
+#define QIB_7220_Control_PCIECplQDiagEn_RMASK 0x1
+#define QIB_7220_Control_Reserved_LSB 0x5
+#define QIB_7220_Control_Reserved_RMASK 0x1
+#define QIB_7220_Control_TxLatency_LSB 0x4
+#define QIB_7220_Control_TxLatency_RMASK 0x1
+#define QIB_7220_Control_PCIERetryBufDiagEn_LSB 0x3
+#define QIB_7220_Control_PCIERetryBufDiagEn_RMASK 0x1
+#define QIB_7220_Control_LinkEn_LSB 0x2
+#define QIB_7220_Control_LinkEn_RMASK 0x1
+#define QIB_7220_Control_FreezeMode_LSB 0x1
+#define QIB_7220_Control_FreezeMode_RMASK 0x1
+#define QIB_7220_Control_SyncReset_LSB 0x0
+#define QIB_7220_Control_SyncReset_RMASK 0x1
+
+#define QIB_7220_PageAlign_OFFS 0x10
+
+#define QIB_7220_PortCnt_OFFS 0x18
+
+#define QIB_7220_SendRegBase_OFFS 0x30
+
+#define QIB_7220_UserRegBase_OFFS 0x38
+
+#define QIB_7220_CntrRegBase_OFFS 0x40
+
+#define QIB_7220_Scratch_OFFS 0x48
+
+#define QIB_7220_IntMask_OFFS 0x68
+#define QIB_7220_IntMask_SDmaIntMask_LSB 0x3F
+#define QIB_7220_IntMask_SDmaIntMask_RMASK 0x1
+#define QIB_7220_IntMask_SDmaDisabledMasked_LSB 0x3E
+#define QIB_7220_IntMask_SDmaDisabledMasked_RMASK 0x1
+#define QIB_7220_IntMask_Reserved_LSB 0x31
+#define QIB_7220_IntMask_Reserved_RMASK 0x1FFF
+#define QIB_7220_IntMask_RcvUrg16IntMask_LSB 0x30
+#define QIB_7220_IntMask_RcvUrg16IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg15IntMask_LSB 0x2F
+#define QIB_7220_IntMask_RcvUrg15IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg14IntMask_LSB 0x2E
+#define QIB_7220_IntMask_RcvUrg14IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg13IntMask_LSB 0x2D
+#define QIB_7220_IntMask_RcvUrg13IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg12IntMask_LSB 0x2C
+#define QIB_7220_IntMask_RcvUrg12IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg11IntMask_LSB 0x2B
+#define QIB_7220_IntMask_RcvUrg11IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg10IntMask_LSB 0x2A
+#define QIB_7220_IntMask_RcvUrg10IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg9IntMask_LSB 0x29
+#define QIB_7220_IntMask_RcvUrg9IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg8IntMask_LSB 0x28
+#define QIB_7220_IntMask_RcvUrg8IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg7IntMask_LSB 0x27
+#define QIB_7220_IntMask_RcvUrg7IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg6IntMask_LSB 0x26
+#define QIB_7220_IntMask_RcvUrg6IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg5IntMask_LSB 0x25
+#define QIB_7220_IntMask_RcvUrg5IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg4IntMask_LSB 0x24
+#define QIB_7220_IntMask_RcvUrg4IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg3IntMask_LSB 0x23
+#define QIB_7220_IntMask_RcvUrg3IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg2IntMask_LSB 0x22
+#define QIB_7220_IntMask_RcvUrg2IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg1IntMask_LSB 0x21
+#define QIB_7220_IntMask_RcvUrg1IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg0IntMask_LSB 0x20
+#define QIB_7220_IntMask_RcvUrg0IntMask_RMASK 0x1
+#define QIB_7220_IntMask_ErrorIntMask_LSB 0x1F
+#define QIB_7220_IntMask_ErrorIntMask_RMASK 0x1
+#define QIB_7220_IntMask_PioSetIntMask_LSB 0x1E
+#define QIB_7220_IntMask_PioSetIntMask_RMASK 0x1
+#define QIB_7220_IntMask_PioBufAvailIntMask_LSB 0x1D
+#define QIB_7220_IntMask_PioBufAvailIntMask_RMASK 0x1
+#define QIB_7220_IntMask_assertGPIOIntMask_LSB 0x1C
+#define QIB_7220_IntMask_assertGPIOIntMask_RMASK 0x1
+#define QIB_7220_IntMask_IBSerdesTrimDoneIntMask_LSB 0x1B
+#define QIB_7220_IntMask_IBSerdesTrimDoneIntMask_RMASK 0x1
+#define QIB_7220_IntMask_JIntMask_LSB 0x1A
+#define QIB_7220_IntMask_JIntMask_RMASK 0x1
+#define QIB_7220_IntMask_Reserved1_LSB 0x11
+#define QIB_7220_IntMask_Reserved1_RMASK 0x1FF
+#define QIB_7220_IntMask_RcvAvail16IntMask_LSB 0x10
+#define QIB_7220_IntMask_RcvAvail16IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail15IntMask_LSB 0xF
+#define QIB_7220_IntMask_RcvAvail15IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail14IntMask_LSB 0xE
+#define QIB_7220_IntMask_RcvAvail14IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail13IntMask_LSB 0xD
+#define QIB_7220_IntMask_RcvAvail13IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail12IntMask_LSB 0xC
+#define QIB_7220_IntMask_RcvAvail12IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail11IntMask_LSB 0xB
+#define QIB_7220_IntMask_RcvAvail11IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail10IntMask_LSB 0xA
+#define QIB_7220_IntMask_RcvAvail10IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail9IntMask_LSB 0x9
+#define QIB_7220_IntMask_RcvAvail9IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail8IntMask_LSB 0x8
+#define QIB_7220_IntMask_RcvAvail8IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail7IntMask_LSB 0x7
+#define QIB_7220_IntMask_RcvAvail7IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail6IntMask_LSB 0x6
+#define QIB_7220_IntMask_RcvAvail6IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail5IntMask_LSB 0x5
+#define QIB_7220_IntMask_RcvAvail5IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail4IntMask_LSB 0x4
+#define QIB_7220_IntMask_RcvAvail4IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail3IntMask_LSB 0x3
+#define QIB_7220_IntMask_RcvAvail3IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail2IntMask_LSB 0x2
+#define QIB_7220_IntMask_RcvAvail2IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail1IntMask_LSB 0x1
+#define QIB_7220_IntMask_RcvAvail1IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail0IntMask_LSB 0x0
+#define QIB_7220_IntMask_RcvAvail0IntMask_RMASK 0x1
+
+#define QIB_7220_IntStatus_OFFS 0x70
+#define QIB_7220_IntStatus_SDmaInt_LSB 0x3F
+#define QIB_7220_IntStatus_SDmaInt_RMASK 0x1
+#define QIB_7220_IntStatus_SDmaDisabled_LSB 0x3E
+#define QIB_7220_IntStatus_SDmaDisabled_RMASK 0x1
+#define QIB_7220_IntStatus_Reserved_LSB 0x31
+#define QIB_7220_IntStatus_Reserved_RMASK 0x1FFF
+#define QIB_7220_IntStatus_RcvUrg16_LSB 0x30
+#define QIB_7220_IntStatus_RcvUrg16_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg15_LSB 0x2F
+#define QIB_7220_IntStatus_RcvUrg15_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg14_LSB 0x2E
+#define QIB_7220_IntStatus_RcvUrg14_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg13_LSB 0x2D
+#define QIB_7220_IntStatus_RcvUrg13_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg12_LSB 0x2C
+#define QIB_7220_IntStatus_RcvUrg12_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg11_LSB 0x2B
+#define QIB_7220_IntStatus_RcvUrg11_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg10_LSB 0x2A
+#define QIB_7220_IntStatus_RcvUrg10_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg9_LSB 0x29
+#define QIB_7220_IntStatus_RcvUrg9_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg8_LSB 0x28
+#define QIB_7220_IntStatus_RcvUrg8_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg7_LSB 0x27
+#define QIB_7220_IntStatus_RcvUrg7_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg6_LSB 0x26
+#define QIB_7220_IntStatus_RcvUrg6_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg5_LSB 0x25
+#define QIB_7220_IntStatus_RcvUrg5_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg4_LSB 0x24
+#define QIB_7220_IntStatus_RcvUrg4_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg3_LSB 0x23
+#define QIB_7220_IntStatus_RcvUrg3_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg2_LSB 0x22
+#define QIB_7220_IntStatus_RcvUrg2_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg1_LSB 0x21
+#define QIB_7220_IntStatus_RcvUrg1_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg0_LSB 0x20
+#define QIB_7220_IntStatus_RcvUrg0_RMASK 0x1
+#define QIB_7220_IntStatus_Error_LSB 0x1F
+#define QIB_7220_IntStatus_Error_RMASK 0x1
+#define QIB_7220_IntStatus_PioSent_LSB 0x1E
+#define QIB_7220_IntStatus_PioSent_RMASK 0x1
+#define QIB_7220_IntStatus_PioBufAvail_LSB 0x1D
+#define QIB_7220_IntStatus_PioBufAvail_RMASK 0x1
+#define QIB_7220_IntStatus_assertGPIO_LSB 0x1C
+#define QIB_7220_IntStatus_assertGPIO_RMASK 0x1
+#define QIB_7220_IntStatus_IBSerdesTrimDone_LSB 0x1B
+#define QIB_7220_IntStatus_IBSerdesTrimDone_RMASK 0x1
+#define QIB_7220_IntStatus_JInt_LSB 0x1A
+#define QIB_7220_IntStatus_JInt_RMASK 0x1
+#define QIB_7220_IntStatus_Reserved1_LSB 0x11
+#define QIB_7220_IntStatus_Reserved1_RMASK 0x1FF
+#define QIB_7220_IntStatus_RcvAvail16_LSB 0x10
+#define QIB_7220_IntStatus_RcvAvail16_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail15_LSB 0xF
+#define QIB_7220_IntStatus_RcvAvail15_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail14_LSB 0xE
+#define QIB_7220_IntStatus_RcvAvail14_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail13_LSB 0xD
+#define QIB_7220_IntStatus_RcvAvail13_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail12_LSB 0xC
+#define QIB_7220_IntStatus_RcvAvail12_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail11_LSB 0xB
+#define QIB_7220_IntStatus_RcvAvail11_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail10_LSB 0xA
+#define QIB_7220_IntStatus_RcvAvail10_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail9_LSB 0x9
+#define QIB_7220_IntStatus_RcvAvail9_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail8_LSB 0x8
+#define QIB_7220_IntStatus_RcvAvail8_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail7_LSB 0x7
+#define QIB_7220_IntStatus_RcvAvail7_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail6_LSB 0x6
+#define QIB_7220_IntStatus_RcvAvail6_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail5_LSB 0x5
+#define QIB_7220_IntStatus_RcvAvail5_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail4_LSB 0x4
+#define QIB_7220_IntStatus_RcvAvail4_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail3_LSB 0x3
+#define QIB_7220_IntStatus_RcvAvail3_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail2_LSB 0x2
+#define QIB_7220_IntStatus_RcvAvail2_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail1_LSB 0x1
+#define QIB_7220_IntStatus_RcvAvail1_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail0_LSB 0x0
+#define QIB_7220_IntStatus_RcvAvail0_RMASK 0x1
+
+#define QIB_7220_IntClear_OFFS 0x78
+#define QIB_7220_IntClear_SDmaIntClear_LSB 0x3F
+#define QIB_7220_IntClear_SDmaIntClear_RMASK 0x1
+#define QIB_7220_IntClear_SDmaDisabledClear_LSB 0x3E
+#define QIB_7220_IntClear_SDmaDisabledClear_RMASK 0x1
+#define QIB_7220_IntClear_Reserved_LSB 0x31
+#define QIB_7220_IntClear_Reserved_RMASK 0x1FFF
+#define QIB_7220_IntClear_RcvUrg16IntClear_LSB 0x30
+#define QIB_7220_IntClear_RcvUrg16IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg15IntClear_LSB 0x2F
+#define QIB_7220_IntClear_RcvUrg15IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg14IntClear_LSB 0x2E
+#define QIB_7220_IntClear_RcvUrg14IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg13IntClear_LSB 0x2D
+#define QIB_7220_IntClear_RcvUrg13IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg12IntClear_LSB 0x2C
+#define QIB_7220_IntClear_RcvUrg12IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg11IntClear_LSB 0x2B
+#define QIB_7220_IntClear_RcvUrg11IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg10IntClear_LSB 0x2A
+#define QIB_7220_IntClear_RcvUrg10IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg9IntClear_LSB 0x29
+#define QIB_7220_IntClear_RcvUrg9IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg8IntClear_LSB 0x28
+#define QIB_7220_IntClear_RcvUrg8IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg7IntClear_LSB 0x27
+#define QIB_7220_IntClear_RcvUrg7IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg6IntClear_LSB 0x26
+#define QIB_7220_IntClear_RcvUrg6IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg5IntClear_LSB 0x25
+#define QIB_7220_IntClear_RcvUrg5IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg4IntClear_LSB 0x24
+#define QIB_7220_IntClear_RcvUrg4IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg3IntClear_LSB 0x23
+#define QIB_7220_IntClear_RcvUrg3IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg2IntClear_LSB 0x22
+#define QIB_7220_IntClear_RcvUrg2IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg1IntClear_LSB 0x21
+#define QIB_7220_IntClear_RcvUrg1IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg0IntClear_LSB 0x20
+#define QIB_7220_IntClear_RcvUrg0IntClear_RMASK 0x1
+#define QIB_7220_IntClear_ErrorIntClear_LSB 0x1F
+#define QIB_7220_IntClear_ErrorIntClear_RMASK 0x1
+#define QIB_7220_IntClear_PioSetIntClear_LSB 0x1E
+#define QIB_7220_IntClear_PioSetIntClear_RMASK 0x1
+#define QIB_7220_IntClear_PioBufAvailIntClear_LSB 0x1D
+#define QIB_7220_IntClear_PioBufAvailIntClear_RMASK 0x1
+#define QIB_7220_IntClear_assertGPIOIntClear_LSB 0x1C
+#define QIB_7220_IntClear_assertGPIOIntClear_RMASK 0x1
+#define QIB_7220_IntClear_IBSerdesTrimDoneClear_LSB 0x1B
+#define QIB_7220_IntClear_IBSerdesTrimDoneClear_RMASK 0x1
+#define QIB_7220_IntClear_JIntClear_LSB 0x1A
+#define QIB_7220_IntClear_JIntClear_RMASK 0x1
+#define QIB_7220_IntClear_Reserved1_LSB 0x11
+#define QIB_7220_IntClear_Reserved1_RMASK 0x1FF
+#define QIB_7220_IntClear_RcvAvail16IntClear_LSB 0x10
+#define QIB_7220_IntClear_RcvAvail16IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail15IntClear_LSB 0xF
+#define QIB_7220_IntClear_RcvAvail15IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail14IntClear_LSB 0xE
+#define QIB_7220_IntClear_RcvAvail14IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail13IntClear_LSB 0xD
+#define QIB_7220_IntClear_RcvAvail13IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail12IntClear_LSB 0xC
+#define QIB_7220_IntClear_RcvAvail12IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail11IntClear_LSB 0xB
+#define QIB_7220_IntClear_RcvAvail11IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail10IntClear_LSB 0xA
+#define QIB_7220_IntClear_RcvAvail10IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail9IntClear_LSB 0x9
+#define QIB_7220_IntClear_RcvAvail9IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail8IntClear_LSB 0x8
+#define QIB_7220_IntClear_RcvAvail8IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail7IntClear_LSB 0x7
+#define QIB_7220_IntClear_RcvAvail7IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail6IntClear_LSB 0x6
+#define QIB_7220_IntClear_RcvAvail6IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail5IntClear_LSB 0x5
+#define QIB_7220_IntClear_RcvAvail5IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail4IntClear_LSB 0x4
+#define QIB_7220_IntClear_RcvAvail4IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail3IntClear_LSB 0x3
+#define QIB_7220_IntClear_RcvAvail3IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail2IntClear_LSB 0x2
+#define QIB_7220_IntClear_RcvAvail2IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail1IntClear_LSB 0x1
+#define QIB_7220_IntClear_RcvAvail1IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail0IntClear_LSB 0x0
+#define QIB_7220_IntClear_RcvAvail0IntClear_RMASK 0x1
+
+#define QIB_7220_ErrMask_OFFS 0x80
+#define QIB_7220_ErrMask_Reserved_LSB 0x36
+#define QIB_7220_ErrMask_Reserved_RMASK 0x3FF
+#define QIB_7220_ErrMask_InvalidEEPCmdMask_LSB 0x35
+#define QIB_7220_ErrMask_InvalidEEPCmdMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaDescAddrMisalignErrMask_LSB 0x34
+#define QIB_7220_ErrMask_SDmaDescAddrMisalignErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_HardwareErrMask_LSB 0x33
+#define QIB_7220_ErrMask_HardwareErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_ResetNegatedMask_LSB 0x32
+#define QIB_7220_ErrMask_ResetNegatedMask_RMASK 0x1
+#define QIB_7220_ErrMask_InvalidAddrErrMask_LSB 0x31
+#define QIB_7220_ErrMask_InvalidAddrErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_IBStatusChangedMask_LSB 0x30
+#define QIB_7220_ErrMask_IBStatusChangedMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaUnexpDataErrMask_LSB 0x2F
+#define QIB_7220_ErrMask_SDmaUnexpDataErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaMissingDwErrMask_LSB 0x2E
+#define QIB_7220_ErrMask_SDmaMissingDwErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaDwEnErrMask_LSB 0x2D
+#define QIB_7220_ErrMask_SDmaDwEnErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaRpyTagErrMask_LSB 0x2C
+#define QIB_7220_ErrMask_SDmaRpyTagErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDma1stDescErrMask_LSB 0x2B
+#define QIB_7220_ErrMask_SDma1stDescErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaBaseErrMask_LSB 0x2A
+#define QIB_7220_ErrMask_SDmaBaseErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaTailOutOfBoundErrMask_LSB 0x29
+#define QIB_7220_ErrMask_SDmaTailOutOfBoundErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaOutOfBoundErrMask_LSB 0x28
+#define QIB_7220_ErrMask_SDmaOutOfBoundErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaGenMismatchErrMask_LSB 0x27
+#define QIB_7220_ErrMask_SDmaGenMismatchErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendBufMisuseErrMask_LSB 0x26
+#define QIB_7220_ErrMask_SendBufMisuseErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendUnsupportedVLErrMask_LSB 0x25
+#define QIB_7220_ErrMask_SendUnsupportedVLErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendUnexpectedPktNumErrMask_LSB 0x24
+#define QIB_7220_ErrMask_SendUnexpectedPktNumErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendPioArmLaunchErrMask_LSB 0x23
+#define QIB_7220_ErrMask_SendPioArmLaunchErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendDroppedDataPktErrMask_LSB 0x22
+#define QIB_7220_ErrMask_SendDroppedDataPktErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendDroppedSmpPktErrMask_LSB 0x21
+#define QIB_7220_ErrMask_SendDroppedSmpPktErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendPktLenErrMask_LSB 0x20
+#define QIB_7220_ErrMask_SendPktLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendUnderRunErrMask_LSB 0x1F
+#define QIB_7220_ErrMask_SendUnderRunErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendMaxPktLenErrMask_LSB 0x1E
+#define QIB_7220_ErrMask_SendMaxPktLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendMinPktLenErrMask_LSB 0x1D
+#define QIB_7220_ErrMask_SendMinPktLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaDisabledErrMask_LSB 0x1C
+#define QIB_7220_ErrMask_SDmaDisabledErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendSpecialTriggerErrMask_LSB 0x1B
+#define QIB_7220_ErrMask_SendSpecialTriggerErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_Reserved1_LSB 0x12
+#define QIB_7220_ErrMask_Reserved1_RMASK 0x1FF
+#define QIB_7220_ErrMask_RcvIBLostLinkErrMask_LSB 0x11
+#define QIB_7220_ErrMask_RcvIBLostLinkErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvHdrErrMask_LSB 0x10
+#define QIB_7220_ErrMask_RcvHdrErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvHdrLenErrMask_LSB 0xF
+#define QIB_7220_ErrMask_RcvHdrLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvBadTidErrMask_LSB 0xE
+#define QIB_7220_ErrMask_RcvBadTidErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvHdrFullErrMask_LSB 0xD
+#define QIB_7220_ErrMask_RcvHdrFullErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvEgrFullErrMask_LSB 0xC
+#define QIB_7220_ErrMask_RcvEgrFullErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvBadVersionErrMask_LSB 0xB
+#define QIB_7220_ErrMask_RcvBadVersionErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvIBFlowErrMask_LSB 0xA
+#define QIB_7220_ErrMask_RcvIBFlowErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvEBPErrMask_LSB 0x9
+#define QIB_7220_ErrMask_RcvEBPErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvUnsupportedVLErrMask_LSB 0x8
+#define QIB_7220_ErrMask_RcvUnsupportedVLErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvUnexpectedCharErrMask_LSB 0x7
+#define QIB_7220_ErrMask_RcvUnexpectedCharErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvShortPktLenErrMask_LSB 0x6
+#define QIB_7220_ErrMask_RcvShortPktLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvLongPktLenErrMask_LSB 0x5
+#define QIB_7220_ErrMask_RcvLongPktLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvMaxPktLenErrMask_LSB 0x4
+#define QIB_7220_ErrMask_RcvMaxPktLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvMinPktLenErrMask_LSB 0x3
+#define QIB_7220_ErrMask_RcvMinPktLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvICRCErrMask_LSB 0x2
+#define QIB_7220_ErrMask_RcvICRCErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvVCRCErrMask_LSB 0x1
+#define QIB_7220_ErrMask_RcvVCRCErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvFormatErrMask_LSB 0x0
+#define QIB_7220_ErrMask_RcvFormatErrMask_RMASK 0x1
+
+#define QIB_7220_ErrStatus_OFFS 0x88
+#define QIB_7220_ErrStatus_Reserved_LSB 0x36
+#define QIB_7220_ErrStatus_Reserved_RMASK 0x3FF
+#define QIB_7220_ErrStatus_InvalidEEPCmdErr_LSB 0x35
+#define QIB_7220_ErrStatus_InvalidEEPCmdErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaDescAddrMisalignErr_LSB 0x34
+#define QIB_7220_ErrStatus_SDmaDescAddrMisalignErr_RMASK 0x1
+#define QIB_7220_ErrStatus_HardwareErr_LSB 0x33
+#define QIB_7220_ErrStatus_HardwareErr_RMASK 0x1
+#define QIB_7220_ErrStatus_ResetNegated_LSB 0x32
+#define QIB_7220_ErrStatus_ResetNegated_RMASK 0x1
+#define QIB_7220_ErrStatus_InvalidAddrErr_LSB 0x31
+#define QIB_7220_ErrStatus_InvalidAddrErr_RMASK 0x1
+#define QIB_7220_ErrStatus_IBStatusChanged_LSB 0x30
+#define QIB_7220_ErrStatus_IBStatusChanged_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaUnexpDataErr_LSB 0x2F
+#define QIB_7220_ErrStatus_SDmaUnexpDataErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaMissingDwErr_LSB 0x2E
+#define QIB_7220_ErrStatus_SDmaMissingDwErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaDwEnErr_LSB 0x2D
+#define QIB_7220_ErrStatus_SDmaDwEnErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaRpyTagErr_LSB 0x2C
+#define QIB_7220_ErrStatus_SDmaRpyTagErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDma1stDescErr_LSB 0x2B
+#define QIB_7220_ErrStatus_SDma1stDescErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaBaseErr_LSB 0x2A
+#define QIB_7220_ErrStatus_SDmaBaseErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaTailOutOfBoundErr_LSB 0x29
+#define QIB_7220_ErrStatus_SDmaTailOutOfBoundErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaOutOfBoundErr_LSB 0x28
+#define QIB_7220_ErrStatus_SDmaOutOfBoundErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaGenMismatchErr_LSB 0x27
+#define QIB_7220_ErrStatus_SDmaGenMismatchErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendBufMisuseErr_LSB 0x26
+#define QIB_7220_ErrStatus_SendBufMisuseErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendUnsupportedVLErr_LSB 0x25
+#define QIB_7220_ErrStatus_SendUnsupportedVLErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendUnexpectedPktNumErr_LSB 0x24
+#define QIB_7220_ErrStatus_SendUnexpectedPktNumErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendPioArmLaunchErr_LSB 0x23
+#define QIB_7220_ErrStatus_SendPioArmLaunchErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendDroppedDataPktErr_LSB 0x22
+#define QIB_7220_ErrStatus_SendDroppedDataPktErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendDroppedSmpPktErr_LSB 0x21
+#define QIB_7220_ErrStatus_SendDroppedSmpPktErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendPktLenErr_LSB 0x20
+#define QIB_7220_ErrStatus_SendPktLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendUnderRunErr_LSB 0x1F
+#define QIB_7220_ErrStatus_SendUnderRunErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendMaxPktLenErr_LSB 0x1E
+#define QIB_7220_ErrStatus_SendMaxPktLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendMinPktLenErr_LSB 0x1D
+#define QIB_7220_ErrStatus_SendMinPktLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaDisabledErr_LSB 0x1C
+#define QIB_7220_ErrStatus_SDmaDisabledErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendSpecialTriggerErr_LSB 0x1B
+#define QIB_7220_ErrStatus_SendSpecialTriggerErr_RMASK 0x1
+#define QIB_7220_ErrStatus_Reserved1_LSB 0x12
+#define QIB_7220_ErrStatus_Reserved1_RMASK 0x1FF
+#define QIB_7220_ErrStatus_RcvIBLostLinkErr_LSB 0x11
+#define QIB_7220_ErrStatus_RcvIBLostLinkErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvHdrErr_LSB 0x10
+#define QIB_7220_ErrStatus_RcvHdrErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvHdrLenErr_LSB 0xF
+#define QIB_7220_ErrStatus_RcvHdrLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvBadTidErr_LSB 0xE
+#define QIB_7220_ErrStatus_RcvBadTidErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvHdrFullErr_LSB 0xD
+#define QIB_7220_ErrStatus_RcvHdrFullErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvEgrFullErr_LSB 0xC
+#define QIB_7220_ErrStatus_RcvEgrFullErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvBadVersionErr_LSB 0xB
+#define QIB_7220_ErrStatus_RcvBadVersionErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvIBFlowErr_LSB 0xA
+#define QIB_7220_ErrStatus_RcvIBFlowErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvEBPErr_LSB 0x9
+#define QIB_7220_ErrStatus_RcvEBPErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvUnsupportedVLErr_LSB 0x8
+#define QIB_7220_ErrStatus_RcvUnsupportedVLErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvUnexpectedCharErr_LSB 0x7
+#define QIB_7220_ErrStatus_RcvUnexpectedCharErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvShortPktLenErr_LSB 0x6
+#define QIB_7220_ErrStatus_RcvShortPktLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvLongPktLenErr_LSB 0x5
+#define QIB_7220_ErrStatus_RcvLongPktLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvMaxPktLenErr_LSB 0x4
+#define QIB_7220_ErrStatus_RcvMaxPktLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvMinPktLenErr_LSB 0x3
+#define QIB_7220_ErrStatus_RcvMinPktLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvICRCErr_LSB 0x2
+#define QIB_7220_ErrStatus_RcvICRCErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvVCRCErr_LSB 0x1
+#define QIB_7220_ErrStatus_RcvVCRCErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvFormatErr_LSB 0x0
+#define QIB_7220_ErrStatus_RcvFormatErr_RMASK 0x1
+
+#define QIB_7220_ErrClear_OFFS 0x90
+#define QIB_7220_ErrClear_Reserved_LSB 0x36
+#define QIB_7220_ErrClear_Reserved_RMASK 0x3FF
+#define QIB_7220_ErrClear_InvalidEEPCmdErrClear_LSB 0x35
+#define QIB_7220_ErrClear_InvalidEEPCmdErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaDescAddrMisalignErrClear_LSB 0x34
+#define QIB_7220_ErrClear_SDmaDescAddrMisalignErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_HardwareErrClear_LSB 0x33
+#define QIB_7220_ErrClear_HardwareErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_ResetNegatedClear_LSB 0x32
+#define QIB_7220_ErrClear_ResetNegatedClear_RMASK 0x1
+#define QIB_7220_ErrClear_InvalidAddrErrClear_LSB 0x31
+#define QIB_7220_ErrClear_InvalidAddrErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_IBStatusChangedClear_LSB 0x30
+#define QIB_7220_ErrClear_IBStatusChangedClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaUnexpDataErrClear_LSB 0x2F
+#define QIB_7220_ErrClear_SDmaUnexpDataErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaMissingDwErrClear_LSB 0x2E
+#define QIB_7220_ErrClear_SDmaMissingDwErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaDwEnErrClear_LSB 0x2D
+#define QIB_7220_ErrClear_SDmaDwEnErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaRpyTagErrClear_LSB 0x2C
+#define QIB_7220_ErrClear_SDmaRpyTagErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDma1stDescErrClear_LSB 0x2B
+#define QIB_7220_ErrClear_SDma1stDescErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaBaseErrClear_LSB 0x2A
+#define QIB_7220_ErrClear_SDmaBaseErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaTailOutOfBoundErrClear_LSB 0x29
+#define QIB_7220_ErrClear_SDmaTailOutOfBoundErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaOutOfBoundErrClear_LSB 0x28
+#define QIB_7220_ErrClear_SDmaOutOfBoundErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaGenMismatchErrClear_LSB 0x27
+#define QIB_7220_ErrClear_SDmaGenMismatchErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendBufMisuseErrClear_LSB 0x26
+#define QIB_7220_ErrClear_SendBufMisuseErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendUnsupportedVLErrClear_LSB 0x25
+#define QIB_7220_ErrClear_SendUnsupportedVLErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendUnexpectedPktNumErrClear_LSB 0x24
+#define QIB_7220_ErrClear_SendUnexpectedPktNumErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendPioArmLaunchErrClear_LSB 0x23
+#define QIB_7220_ErrClear_SendPioArmLaunchErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendDroppedDataPktErrClear_LSB 0x22
+#define QIB_7220_ErrClear_SendDroppedDataPktErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendDroppedSmpPktErrClear_LSB 0x21
+#define QIB_7220_ErrClear_SendDroppedSmpPktErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendPktLenErrClear_LSB 0x20
+#define QIB_7220_ErrClear_SendPktLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendUnderRunErrClear_LSB 0x1F
+#define QIB_7220_ErrClear_SendUnderRunErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendMaxPktLenErrClear_LSB 0x1E
+#define QIB_7220_ErrClear_SendMaxPktLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendMinPktLenErrClear_LSB 0x1D
+#define QIB_7220_ErrClear_SendMinPktLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaDisabledErrClear_LSB 0x1C
+#define QIB_7220_ErrClear_SDmaDisabledErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendSpecialTriggerErrClear_LSB 0x1B
+#define QIB_7220_ErrClear_SendSpecialTriggerErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_Reserved1_LSB 0x12
+#define QIB_7220_ErrClear_Reserved1_RMASK 0x1FF
+#define QIB_7220_ErrClear_RcvIBLostLinkErrClear_LSB 0x11
+#define QIB_7220_ErrClear_RcvIBLostLinkErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvHdrErrClear_LSB 0x10
+#define QIB_7220_ErrClear_RcvHdrErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvHdrLenErrClear_LSB 0xF
+#define QIB_7220_ErrClear_RcvHdrLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvBadTidErrClear_LSB 0xE
+#define QIB_7220_ErrClear_RcvBadTidErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvHdrFullErrClear_LSB 0xD
+#define QIB_7220_ErrClear_RcvHdrFullErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvEgrFullErrClear_LSB 0xC
+#define QIB_7220_ErrClear_RcvEgrFullErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvBadVersionErrClear_LSB 0xB
+#define QIB_7220_ErrClear_RcvBadVersionErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvIBFlowErrClear_LSB 0xA
+#define QIB_7220_ErrClear_RcvIBFlowErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvEBPErrClear_LSB 0x9
+#define QIB_7220_ErrClear_RcvEBPErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvUnsupportedVLErrClear_LSB 0x8
+#define QIB_7220_ErrClear_RcvUnsupportedVLErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvUnexpectedCharErrClear_LSB 0x7
+#define QIB_7220_ErrClear_RcvUnexpectedCharErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvShortPktLenErrClear_LSB 0x6
+#define QIB_7220_ErrClear_RcvShortPktLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvLongPktLenErrClear_LSB 0x5
+#define QIB_7220_ErrClear_RcvLongPktLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvMaxPktLenErrClear_LSB 0x4
+#define QIB_7220_ErrClear_RcvMaxPktLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvMinPktLenErrClear_LSB 0x3
+#define QIB_7220_ErrClear_RcvMinPktLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvICRCErrClear_LSB 0x2
+#define QIB_7220_ErrClear_RcvICRCErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvVCRCErrClear_LSB 0x1
+#define QIB_7220_ErrClear_RcvVCRCErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvFormatErrClear_LSB 0x0
+#define QIB_7220_ErrClear_RcvFormatErrClear_RMASK 0x1
+
+#define QIB_7220_HwErrMask_OFFS 0x98
+#define QIB_7220_HwErrMask_IBCBusFromSPCParityErrMask_LSB 0x3F
+#define QIB_7220_HwErrMask_IBCBusFromSPCParityErrMask_RMASK 0x1
+#define QIB_7220_HwErrMask_IBCBusToSPCParityErrMask_LSB 0x3E
+#define QIB_7220_HwErrMask_IBCBusToSPCParityErrMask_RMASK 0x1
+#define QIB_7220_HwErrMask_Clk_uC_PLLNotLockedMask_LSB 0x3D
+#define QIB_7220_HwErrMask_Clk_uC_PLLNotLockedMask_RMASK 0x1
+#define QIB_7220_HwErrMask_IBSerdesPClkNotDetectMask_LSB 0x3C
+#define QIB_7220_HwErrMask_IBSerdesPClkNotDetectMask_RMASK 0x1
+#define QIB_7220_HwErrMask_PCIESerdesQ3PClkNotDetectMask_LSB 0x3B
+#define QIB_7220_HwErrMask_PCIESerdesQ3PClkNotDetectMask_RMASK 0x1
+#define QIB_7220_HwErrMask_PCIESerdesQ2PClkNotDetectMask_LSB 0x3A
+#define QIB_7220_HwErrMask_PCIESerdesQ2PClkNotDetectMask_RMASK 0x1
+#define QIB_7220_HwErrMask_PCIESerdesQ1PClkNotDetectMask_LSB 0x39
+#define QIB_7220_HwErrMask_PCIESerdesQ1PClkNotDetectMask_RMASK 0x1
+#define QIB_7220_HwErrMask_PCIESerdesQ0PClkNotDetectMask_LSB 0x38
+#define QIB_7220_HwErrMask_PCIESerdesQ0PClkNotDetectMask_RMASK 0x1
+#define QIB_7220_HwErrMask_Reserved_LSB 0x37
+#define QIB_7220_HwErrMask_Reserved_RMASK 0x1
+#define QIB_7220_HwErrMask_PowerOnBISTFailedMask_LSB 0x36
+#define QIB_7220_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1
+#define QIB_7220_HwErrMask_Reserved1_LSB 0x33
+#define QIB_7220_HwErrMask_Reserved1_RMASK 0x7
+#define QIB_7220_HwErrMask_RXEMemParityErrMask_LSB 0x2C
+#define QIB_7220_HwErrMask_RXEMemParityErrMask_RMASK 0x7F
+#define QIB_7220_HwErrMask_TXEMemParityErrMask_LSB 0x28
+#define QIB_7220_HwErrMask_TXEMemParityErrMask_RMASK 0xF
+#define QIB_7220_HwErrMask_DDSRXEQMemoryParityErrMask_LSB 0x27
+#define QIB_7220_HwErrMask_DDSRXEQMemoryParityErrMask_RMASK 0x1
+#define QIB_7220_HwErrMask_IB_uC_MemoryParityErrMask_LSB 0x26
+#define QIB_7220_HwErrMask_IB_uC_MemoryParityErrMask_RMASK 0x1
+#define QIB_7220_HwErrMask_PCIEOct1_uC_MemoryParityErrMask_LSB 0x25
+#define QIB_7220_HwErrMask_PCIEOct1_uC_MemoryParityErrMask_RMASK 0x1
+#define QIB_7220_HwErrMask_PCIEOct0_uC_MemoryParityErrMask_LSB 0x24
+#define QIB_7220_HwErrMask_PCIEOct0_uC_MemoryParityErrMask_RMASK 0x1
+#define QIB_7220_HwErrMask_Reserved2_LSB 0x22
+#define QIB_7220_HwErrMask_Reserved2_RMASK 0x3
+#define QIB_7220_HwErrMask_PCIeBusParityErrMask_LSB 0x1F
+#define QIB_7220_HwErrMask_PCIeBusParityErrMask_RMASK 0x7
+#define QIB_7220_HwErrMask_PcieCplTimeoutMask_LSB 0x1E
+#define QIB_7220_HwErrMask_PcieCplTimeoutMask_RMASK 0x1
+#define QIB_7220_HwErrMask_PoisonedTLPMask_LSB 0x1D
+#define QIB_7220_HwErrMask_PoisonedTLPMask_RMASK 0x1
+#define QIB_7220_HwErrMask_SDmaMemReadErrMask_LSB 0x1C
+#define QIB_7220_HwErrMask_SDmaMemReadErrMask_RMASK 0x1
+#define QIB_7220_HwErrMask_Reserved3_LSB 0x8
+#define QIB_7220_HwErrMask_Reserved3_RMASK 0xFFFFF
+#define QIB_7220_HwErrMask_PCIeMemParityErrMask_LSB 0x0
+#define QIB_7220_HwErrMask_PCIeMemParityErrMask_RMASK 0xFF
+
+#define QIB_7220_HwErrStatus_OFFS 0xA0
+#define QIB_7220_HwErrStatus_IBCBusFromSPCParityErr_LSB 0x3F
+#define QIB_7220_HwErrStatus_IBCBusFromSPCParityErr_RMASK 0x1
+#define QIB_7220_HwErrStatus_IBCBusToSPCParityErr_LSB 0x3E
+#define QIB_7220_HwErrStatus_IBCBusToSPCParityErr_RMASK 0x1
+#define QIB_7220_HwErrStatus_Clk_uC_PLLNotLocked_LSB 0x3D
+#define QIB_7220_HwErrStatus_Clk_uC_PLLNotLocked_RMASK 0x1
+#define QIB_7220_HwErrStatus_IBSerdesPClkNotDetect_LSB 0x3C
+#define QIB_7220_HwErrStatus_IBSerdesPClkNotDetect_RMASK 0x1
+#define QIB_7220_HwErrStatus_PCIESerdesQ3PClkNotDetect_LSB 0x3B
+#define QIB_7220_HwErrStatus_PCIESerdesQ3PClkNotDetect_RMASK 0x1
+#define QIB_7220_HwErrStatus_PCIESerdesQ2PClkNotDetect_LSB 0x3A
+#define QIB_7220_HwErrStatus_PCIESerdesQ2PClkNotDetect_RMASK 0x1
+#define QIB_7220_HwErrStatus_PCIESerdesQ1PClkNotDetect_LSB 0x39
+#define QIB_7220_HwErrStatus_PCIESerdesQ1PClkNotDetect_RMASK 0x1
+#define QIB_7220_HwErrStatus_PCIESerdesQ0PClkNotDetect_LSB 0x38
+#define QIB_7220_HwErrStatus_PCIESerdesQ0PClkNotDetect_RMASK 0x1
+#define QIB_7220_HwErrStatus_Reserved_LSB 0x37
+#define QIB_7220_HwErrStatus_Reserved_RMASK 0x1
+#define QIB_7220_HwErrStatus_PowerOnBISTFailed_LSB 0x36
+#define QIB_7220_HwErrStatus_PowerOnBISTFailed_RMASK 0x1
+#define QIB_7220_HwErrStatus_Reserved1_LSB 0x33
+#define QIB_7220_HwErrStatus_Reserved1_RMASK 0x7
+#define QIB_7220_HwErrStatus_RXEMemParity_LSB 0x2C
+#define QIB_7220_HwErrStatus_RXEMemParity_RMASK 0x7F
+#define QIB_7220_HwErrStatus_TXEMemParity_LSB 0x28
+#define QIB_7220_HwErrStatus_TXEMemParity_RMASK 0xF
+#define QIB_7220_HwErrStatus_DDSRXEQMemoryParityErr_LSB 0x27
+#define QIB_7220_HwErrStatus_DDSRXEQMemoryParityErr_RMASK 0x1
+#define QIB_7220_HwErrStatus_IB_uC_MemoryParityErr_LSB 0x26
+#define QIB_7220_HwErrStatus_IB_uC_MemoryParityErr_RMASK 0x1
+#define QIB_7220_HwErrStatus_PCIE_uC_Oct1MemoryParityErr_LSB 0x25
+#define QIB_7220_HwErrStatus_PCIE_uC_Oct1MemoryParityErr_RMASK 0x1
+#define QIB_7220_HwErrStatus_PCIE_uC_Oct0MemoryParityErr_LSB 0x24
+#define QIB_7220_HwErrStatus_PCIE_uC_Oct0MemoryParityErr_RMASK 0x1
+#define QIB_7220_HwErrStatus_Reserved2_LSB 0x22
+#define QIB_7220_HwErrStatus_Reserved2_RMASK 0x3
+#define QIB_7220_HwErrStatus_PCIeBusParity_LSB 0x1F
+#define QIB_7220_HwErrStatus_PCIeBusParity_RMASK 0x7
+#define QIB_7220_HwErrStatus_PcieCplTimeout_LSB 0x1E
+#define QIB_7220_HwErrStatus_PcieCplTimeout_RMASK 0x1
+#define QIB_7220_HwErrStatus_PoisenedTLP_LSB 0x1D
+#define QIB_7220_HwErrStatus_PoisenedTLP_RMASK 0x1
+#define QIB_7220_HwErrStatus_SDmaMemReadErr_LSB 0x1C
+#define QIB_7220_HwErrStatus_SDmaMemReadErr_RMASK 0x1
+#define QIB_7220_HwErrStatus_Reserved3_LSB 0x8
+#define QIB_7220_HwErrStatus_Reserved3_RMASK 0xFFFFF
+#define QIB_7220_HwErrStatus_PCIeMemParity_LSB 0x0
+#define QIB_7220_HwErrStatus_PCIeMemParity_RMASK 0xFF
+
+#define QIB_7220_HwErrClear_OFFS 0xA8
+#define QIB_7220_HwErrClear_IBCBusFromSPCParityErrClear_LSB 0x3F
+#define QIB_7220_HwErrClear_IBCBusFromSPCParityErrClear_RMASK 0x1
+#define QIB_7220_HwErrClear_IBCBusToSPCparityErrClear_LSB 0x3E
+#define QIB_7220_HwErrClear_IBCBusToSPCparityErrClear_RMASK 0x1
+#define QIB_7220_HwErrClear_Clk_uC_PLLNotLockedClear_LSB 0x3D
+#define QIB_7220_HwErrClear_Clk_uC_PLLNotLockedClear_RMASK 0x1
+#define QIB_7220_HwErrClear_IBSerdesPClkNotDetectClear_LSB 0x3C
+#define QIB_7220_HwErrClear_IBSerdesPClkNotDetectClear_RMASK 0x1
+#define QIB_7220_HwErrClear_PCIESerdesQ3PClkNotDetectClear_LSB 0x3B
+#define QIB_7220_HwErrClear_PCIESerdesQ3PClkNotDetectClear_RMASK 0x1
+#define QIB_7220_HwErrClear_PCIESerdesQ2PClkNotDetectClear_LSB 0x3A
+#define QIB_7220_HwErrClear_PCIESerdesQ2PClkNotDetectClear_RMASK 0x1
+#define QIB_7220_HwErrClear_PCIESerdesQ1PClkNotDetectClear_LSB 0x39
+#define QIB_7220_HwErrClear_PCIESerdesQ1PClkNotDetectClear_RMASK 0x1
+#define QIB_7220_HwErrClear_PCIESerdesQ0PClkNotDetectClear_LSB 0x38
+#define QIB_7220_HwErrClear_PCIESerdesQ0PClkNotDetectClear_RMASK 0x1
+#define QIB_7220_HwErrClear_Reserved_LSB 0x37
+#define QIB_7220_HwErrClear_Reserved_RMASK 0x1
+#define QIB_7220_HwErrClear_PowerOnBISTFailedClear_LSB 0x36
+#define QIB_7220_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1
+#define QIB_7220_HwErrClear_Reserved1_LSB 0x33
+#define QIB_7220_HwErrClear_Reserved1_RMASK 0x7
+#define QIB_7220_HwErrClear_RXEMemParityClear_LSB 0x2C
+#define QIB_7220_HwErrClear_RXEMemParityClear_RMASK 0x7F
+#define QIB_7220_HwErrClear_TXEMemParityClear_LSB 0x28
+#define QIB_7220_HwErrClear_TXEMemParityClear_RMASK 0xF
+#define QIB_7220_HwErrClear_DDSRXEQMemoryParityErrClear_LSB 0x27
+#define QIB_7220_HwErrClear_DDSRXEQMemoryParityErrClear_RMASK 0x1
+#define QIB_7220_HwErrClear_IB_uC_MemoryParityErrClear_LSB 0x26
+#define QIB_7220_HwErrClear_IB_uC_MemoryParityErrClear_RMASK 0x1
+#define QIB_7220_HwErrClear_PCIE_uC_Oct1MemoryParityErrClear_LSB 0x25
+#define QIB_7220_HwErrClear_PCIE_uC_Oct1MemoryParityErrClear_RMASK 0x1
+#define QIB_7220_HwErrClear_PCIE_uC_Oct0MemoryParityErrClear_LSB 0x24
+#define QIB_7220_HwErrClear_PCIE_uC_Oct0MemoryParityErrClear_RMASK 0x1
+#define QIB_7220_HwErrClear_Reserved2_LSB 0x22
+#define QIB_7220_HwErrClear_Reserved2_RMASK 0x3
+#define QIB_7220_HwErrClear_PCIeBusParityClr_LSB 0x1F
+#define QIB_7220_HwErrClear_PCIeBusParityClr_RMASK 0x7
+#define QIB_7220_HwErrClear_PcieCplTimeoutClear_LSB 0x1E
+#define QIB_7220_HwErrClear_PcieCplTimeoutClear_RMASK 0x1
+#define QIB_7220_HwErrClear_PoisonedTLPClear_LSB 0x1D
+#define QIB_7220_HwErrClear_PoisonedTLPClear_RMASK 0x1
+#define QIB_7220_HwErrClear_SDmaMemReadErrClear_LSB 0x1C
+#define QIB_7220_HwErrClear_SDmaMemReadErrClear_RMASK 0x1
+#define QIB_7220_HwErrClear_Reserved3_LSB 0x8
+#define QIB_7220_HwErrClear_Reserved3_RMASK 0xFFFFF
+#define QIB_7220_HwErrClear_PCIeMemParityClr_LSB 0x0
+#define QIB_7220_HwErrClear_PCIeMemParityClr_RMASK 0xFF
+
+#define QIB_7220_HwDiagCtrl_OFFS 0xB0
+#define QIB_7220_HwDiagCtrl_ForceIBCBusFromSPCParityErr_LSB 0x3F
+#define QIB_7220_HwDiagCtrl_ForceIBCBusFromSPCParityErr_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_ForceIBCBusToSPCParityErr_LSB 0x3E
+#define QIB_7220_HwDiagCtrl_ForceIBCBusToSPCParityErr_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_CounterWrEnable_LSB 0x3D
+#define QIB_7220_HwDiagCtrl_CounterWrEnable_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_CounterDisable_LSB 0x3C
+#define QIB_7220_HwDiagCtrl_CounterDisable_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_Reserved_LSB 0x33
+#define QIB_7220_HwDiagCtrl_Reserved_RMASK 0x1FF
+#define QIB_7220_HwDiagCtrl_ForceRxMemParityErr_LSB 0x2C
+#define QIB_7220_HwDiagCtrl_ForceRxMemParityErr_RMASK 0x7F
+#define QIB_7220_HwDiagCtrl_ForceTxMemparityErr_LSB 0x28
+#define QIB_7220_HwDiagCtrl_ForceTxMemparityErr_RMASK 0xF
+#define QIB_7220_HwDiagCtrl_ForceDDSRXEQMemoryParityErr_LSB 0x27
+#define QIB_7220_HwDiagCtrl_ForceDDSRXEQMemoryParityErr_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_ForceIB_uC_MemoryParityErr_LSB 0x26
+#define QIB_7220_HwDiagCtrl_ForceIB_uC_MemoryParityErr_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct1MemoryParityErr_LSB 0x25
+#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct1MemoryParityErr_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct0MemoryParityErr_LSB 0x24
+#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct0MemoryParityErr_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_Reserved1_LSB 0x23
+#define QIB_7220_HwDiagCtrl_Reserved1_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F
+#define QIB_7220_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF
+#define QIB_7220_HwDiagCtrl_Reserved2_LSB 0x8
+#define QIB_7220_HwDiagCtrl_Reserved2_RMASK 0x7FFFFF
+#define QIB_7220_HwDiagCtrl_forcePCIeMemParity_LSB 0x0
+#define QIB_7220_HwDiagCtrl_forcePCIeMemParity_RMASK 0xFF
+
+#define QIB_7220_REG_0000B8_OFFS 0xB8
+
+#define QIB_7220_IBCStatus_OFFS 0xC0
+#define QIB_7220_IBCStatus_TxCreditOk_LSB 0x1F
+#define QIB_7220_IBCStatus_TxCreditOk_RMASK 0x1
+#define QIB_7220_IBCStatus_TxReady_LSB 0x1E
+#define QIB_7220_IBCStatus_TxReady_RMASK 0x1
+#define QIB_7220_IBCStatus_Reserved_LSB 0xE
+#define QIB_7220_IBCStatus_Reserved_RMASK 0xFFFF
+#define QIB_7220_IBCStatus_IBTxLaneReversed_LSB 0xD
+#define QIB_7220_IBCStatus_IBTxLaneReversed_RMASK 0x1
+#define QIB_7220_IBCStatus_IBRxLaneReversed_LSB 0xC
+#define QIB_7220_IBCStatus_IBRxLaneReversed_RMASK 0x1
+#define QIB_7220_IBCStatus_IB_SERDES_TRIM_DONE_LSB 0xB
+#define QIB_7220_IBCStatus_IB_SERDES_TRIM_DONE_RMASK 0x1
+#define QIB_7220_IBCStatus_DDS_RXEQ_FAIL_LSB 0xA
+#define QIB_7220_IBCStatus_DDS_RXEQ_FAIL_RMASK 0x1
+#define QIB_7220_IBCStatus_LinkWidthActive_LSB 0x9
+#define QIB_7220_IBCStatus_LinkWidthActive_RMASK 0x1
+#define QIB_7220_IBCStatus_LinkSpeedActive_LSB 0x8
+#define QIB_7220_IBCStatus_LinkSpeedActive_RMASK 0x1
+#define QIB_7220_IBCStatus_LinkState_LSB 0x5
+#define QIB_7220_IBCStatus_LinkState_RMASK 0x7
+#define QIB_7220_IBCStatus_LinkTrainingState_LSB 0x0
+#define QIB_7220_IBCStatus_LinkTrainingState_RMASK 0x1F
+
+#define QIB_7220_IBCCtrl_OFFS 0xC8
+#define QIB_7220_IBCCtrl_Loopback_LSB 0x3F
+#define QIB_7220_IBCCtrl_Loopback_RMASK 0x1
+#define QIB_7220_IBCCtrl_LinkDownDefaultState_LSB 0x3E
+#define QIB_7220_IBCCtrl_LinkDownDefaultState_RMASK 0x1
+#define QIB_7220_IBCCtrl_Reserved_LSB 0x2B
+#define QIB_7220_IBCCtrl_Reserved_RMASK 0x7FFFF
+#define QIB_7220_IBCCtrl_CreditScale_LSB 0x28
+#define QIB_7220_IBCCtrl_CreditScale_RMASK 0x7
+#define QIB_7220_IBCCtrl_OverrunThreshold_LSB 0x24
+#define QIB_7220_IBCCtrl_OverrunThreshold_RMASK 0xF
+#define QIB_7220_IBCCtrl_PhyerrThreshold_LSB 0x20
+#define QIB_7220_IBCCtrl_PhyerrThreshold_RMASK 0xF
+#define QIB_7220_IBCCtrl_MaxPktLen_LSB 0x15
+#define QIB_7220_IBCCtrl_MaxPktLen_RMASK 0x7FF
+#define QIB_7220_IBCCtrl_LinkCmd_LSB 0x13
+#define QIB_7220_IBCCtrl_LinkCmd_RMASK 0x3
+#define QIB_7220_IBCCtrl_LinkInitCmd_LSB 0x10
+#define QIB_7220_IBCCtrl_LinkInitCmd_RMASK 0x7
+#define QIB_7220_IBCCtrl_FlowCtrlWaterMark_LSB 0x8
+#define QIB_7220_IBCCtrl_FlowCtrlWaterMark_RMASK 0xFF
+#define QIB_7220_IBCCtrl_FlowCtrlPeriod_LSB 0x0
+#define QIB_7220_IBCCtrl_FlowCtrlPeriod_RMASK 0xFF
+
+#define QIB_7220_EXTStatus_OFFS 0xD0
+#define QIB_7220_EXTStatus_GPIOIn_LSB 0x30
+#define QIB_7220_EXTStatus_GPIOIn_RMASK 0xFFFF
+#define QIB_7220_EXTStatus_Reserved_LSB 0x20
+#define QIB_7220_EXTStatus_Reserved_RMASK 0xFFFF
+#define QIB_7220_EXTStatus_Reserved1_LSB 0x10
+#define QIB_7220_EXTStatus_Reserved1_RMASK 0xFFFF
+#define QIB_7220_EXTStatus_MemBISTDisabled_LSB 0xF
+#define QIB_7220_EXTStatus_MemBISTDisabled_RMASK 0x1
+#define QIB_7220_EXTStatus_MemBISTEndTest_LSB 0xE
+#define QIB_7220_EXTStatus_MemBISTEndTest_RMASK 0x1
+#define QIB_7220_EXTStatus_Reserved2_LSB 0x0
+#define QIB_7220_EXTStatus_Reserved2_RMASK 0x3FFF
+
+#define QIB_7220_EXTCtrl_OFFS 0xD8
+#define QIB_7220_EXTCtrl_GPIOOe_LSB 0x30
+#define QIB_7220_EXTCtrl_GPIOOe_RMASK 0xFFFF
+#define QIB_7220_EXTCtrl_GPIOInvert_LSB 0x20
+#define QIB_7220_EXTCtrl_GPIOInvert_RMASK 0xFFFF
+#define QIB_7220_EXTCtrl_Reserved_LSB 0x4
+#define QIB_7220_EXTCtrl_Reserved_RMASK 0xFFFFFFF
+#define QIB_7220_EXTCtrl_LEDPriPortGreenOn_LSB 0x3
+#define QIB_7220_EXTCtrl_LEDPriPortGreenOn_RMASK 0x1
+#define QIB_7220_EXTCtrl_LEDPriPortYellowOn_LSB 0x2
+#define QIB_7220_EXTCtrl_LEDPriPortYellowOn_RMASK 0x1
+#define QIB_7220_EXTCtrl_LEDGblOkGreenOn_LSB 0x1
+#define QIB_7220_EXTCtrl_LEDGblOkGreenOn_RMASK 0x1
+#define QIB_7220_EXTCtrl_LEDGblErrRedOff_LSB 0x0
+#define QIB_7220_EXTCtrl_LEDGblErrRedOff_RMASK 0x1
+
+#define QIB_7220_GPIOOut_OFFS 0xE0
+
+#define QIB_7220_GPIOMask_OFFS 0xE8
+
+#define QIB_7220_GPIOStatus_OFFS 0xF0
+
+#define QIB_7220_GPIOClear_OFFS 0xF8
+
+#define QIB_7220_RcvCtrl_OFFS 0x100
+#define QIB_7220_RcvCtrl_Reserved_LSB 0x27
+#define QIB_7220_RcvCtrl_Reserved_RMASK 0x1FFFFFF
+#define QIB_7220_RcvCtrl_RcvQPMapEnable_LSB 0x26
+#define QIB_7220_RcvCtrl_RcvQPMapEnable_RMASK 0x1
+#define QIB_7220_RcvCtrl_PortCfg_LSB 0x24
+#define QIB_7220_RcvCtrl_PortCfg_RMASK 0x3
+#define QIB_7220_RcvCtrl_TailUpd_LSB 0x23
+#define QIB_7220_RcvCtrl_TailUpd_RMASK 0x1
+#define QIB_7220_RcvCtrl_RcvPartitionKeyDisable_LSB 0x22
+#define QIB_7220_RcvCtrl_RcvPartitionKeyDisable_RMASK 0x1
+#define QIB_7220_RcvCtrl_IntrAvail_LSB 0x11
+#define QIB_7220_RcvCtrl_IntrAvail_RMASK 0x1FFFF
+#define QIB_7220_RcvCtrl_PortEnable_LSB 0x0
+#define QIB_7220_RcvCtrl_PortEnable_RMASK 0x1FFFF
+
+#define QIB_7220_RcvBTHQP_OFFS 0x108
+#define QIB_7220_RcvBTHQP_Reserved_LSB 0x18
+#define QIB_7220_RcvBTHQP_Reserved_RMASK 0xFF
+#define QIB_7220_RcvBTHQP_RcvBTHQP_LSB 0x0
+#define QIB_7220_RcvBTHQP_RcvBTHQP_RMASK 0xFFFFFF
+
+#define QIB_7220_RcvHdrSize_OFFS 0x110
+
+#define QIB_7220_RcvHdrCnt_OFFS 0x118
+
+#define QIB_7220_RcvHdrEntSize_OFFS 0x120
+
+#define QIB_7220_RcvTIDBase_OFFS 0x128
+
+#define QIB_7220_RcvTIDCnt_OFFS 0x130
+
+#define QIB_7220_RcvEgrBase_OFFS 0x138
+
+#define QIB_7220_RcvEgrCnt_OFFS 0x140
+
+#define QIB_7220_RcvBufBase_OFFS 0x148
+
+#define QIB_7220_RcvBufSize_OFFS 0x150
+
+#define QIB_7220_RxIntMemBase_OFFS 0x158
+
+#define QIB_7220_RxIntMemSize_OFFS 0x160
+
+#define QIB_7220_RcvPartitionKey_OFFS 0x168
+
+#define QIB_7220_RcvQPMulticastPort_OFFS 0x170
+#define QIB_7220_RcvQPMulticastPort_Reserved_LSB 0x5
+#define QIB_7220_RcvQPMulticastPort_Reserved_RMASK 0x7FFFFFFFFFFFFFF
+#define QIB_7220_RcvQPMulticastPort_RcvQpMcPort_LSB 0x0
+#define QIB_7220_RcvQPMulticastPort_RcvQpMcPort_RMASK 0x1F
+
+#define QIB_7220_RcvPktLEDCnt_OFFS 0x178
+#define QIB_7220_RcvPktLEDCnt_ONperiod_LSB 0x20
+#define QIB_7220_RcvPktLEDCnt_ONperiod_RMASK 0xFFFFFFFF
+#define QIB_7220_RcvPktLEDCnt_OFFperiod_LSB 0x0
+#define QIB_7220_RcvPktLEDCnt_OFFperiod_RMASK 0xFFFFFFFF
+
+#define QIB_7220_IBCDDRCtrl_OFFS 0x180
+#define QIB_7220_IBCDDRCtrl_IB_DLID_MASK_LSB 0x30
+#define QIB_7220_IBCDDRCtrl_IB_DLID_MASK_RMASK 0xFFFF
+#define QIB_7220_IBCDDRCtrl_IB_DLID_LSB 0x20
+#define QIB_7220_IBCDDRCtrl_IB_DLID_RMASK 0xFFFF
+#define QIB_7220_IBCDDRCtrl_Reserved_LSB 0x1B
+#define QIB_7220_IBCDDRCtrl_Reserved_RMASK 0x1F
+#define QIB_7220_IBCDDRCtrl_HRTBT_REQ_LSB 0x1A
+#define QIB_7220_IBCDDRCtrl_HRTBT_REQ_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_HRTBT_PORT_LSB 0x12
+#define QIB_7220_IBCDDRCtrl_HRTBT_PORT_RMASK 0xFF
+#define QIB_7220_IBCDDRCtrl_HRTBT_AUTO_LSB 0x11
+#define QIB_7220_IBCDDRCtrl_HRTBT_AUTO_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_HRTBT_ENB_LSB 0x10
+#define QIB_7220_IBCDDRCtrl_HRTBT_ENB_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_SD_DDS_LSB 0xC
+#define QIB_7220_IBCDDRCtrl_SD_DDS_RMASK 0xF
+#define QIB_7220_IBCDDRCtrl_SD_DDSV_LSB 0xB
+#define QIB_7220_IBCDDRCtrl_SD_DDSV_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_SD_ADD_ENB_LSB 0xA
+#define QIB_7220_IBCDDRCtrl_SD_ADD_ENB_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_SD_RX_EQUAL_ENABLE_LSB 0x9
+#define QIB_7220_IBCDDRCtrl_SD_RX_EQUAL_ENABLE_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_IB_LANE_REV_SUPPORTED_LSB 0x8
+#define QIB_7220_IBCDDRCtrl_IB_LANE_REV_SUPPORTED_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_IB_POLARITY_REV_SUPP_LSB 0x7
+#define QIB_7220_IBCDDRCtrl_IB_POLARITY_REV_SUPP_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_IB_NUM_CHANNELS_LSB 0x5
+#define QIB_7220_IBCDDRCtrl_IB_NUM_CHANNELS_RMASK 0x3
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_QDR_LSB 0x4
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_QDR_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_DDR_LSB 0x3
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_DDR_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_SDR_LSB 0x2
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_SDR_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_LSB 0x1
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_IB_ENHANCED_MODE_LSB 0x0
+#define QIB_7220_IBCDDRCtrl_IB_ENHANCED_MODE_RMASK 0x1
+
+#define QIB_7220_HRTBT_GUID_OFFS 0x188
+
+#define QIB_7220_IBCDDRCtrl2_OFFS 0x1A0
+#define QIB_7220_IBCDDRCtrl2_IB_BACK_PORCH_LSB 0x5
+#define QIB_7220_IBCDDRCtrl2_IB_BACK_PORCH_RMASK 0x1F
+#define QIB_7220_IBCDDRCtrl2_IB_FRONT_PORCH_LSB 0x0
+#define QIB_7220_IBCDDRCtrl2_IB_FRONT_PORCH_RMASK 0x1F
+
+#define QIB_7220_IBCDDRStatus_OFFS 0x1A8
+#define QIB_7220_IBCDDRStatus_heartbeat_timed_out_LSB 0x24
+#define QIB_7220_IBCDDRStatus_heartbeat_timed_out_RMASK 0x1
+#define QIB_7220_IBCDDRStatus_heartbeat_crosstalk_LSB 0x20
+#define QIB_7220_IBCDDRStatus_heartbeat_crosstalk_RMASK 0xF
+#define QIB_7220_IBCDDRStatus_RxEqLocalDevice_LSB 0x1E
+#define QIB_7220_IBCDDRStatus_RxEqLocalDevice_RMASK 0x3
+#define QIB_7220_IBCDDRStatus_ReqDDSLocalFromRmt_LSB 0x1A
+#define QIB_7220_IBCDDRStatus_ReqDDSLocalFromRmt_RMASK 0xF
+#define QIB_7220_IBCDDRStatus_LinkRoundTripLatency_LSB 0x0
+#define QIB_7220_IBCDDRStatus_LinkRoundTripLatency_RMASK 0x3FFFFFF
+
+#define QIB_7220_JIntReload_OFFS 0x1B0
+#define QIB_7220_JIntReload_J_limit_reload_LSB 0x10
+#define QIB_7220_JIntReload_J_limit_reload_RMASK 0xFFFF
+#define QIB_7220_JIntReload_J_reload_LSB 0x0
+#define QIB_7220_JIntReload_J_reload_RMASK 0xFFFF
+
+#define QIB_7220_IBNCModeCtrl_OFFS 0x1B8
+#define QIB_7220_IBNCModeCtrl_Reserved_LSB 0x1A
+#define QIB_7220_IBNCModeCtrl_Reserved_RMASK 0x3FFFFFFFFF
+#define QIB_7220_IBNCModeCtrl_TSMCode_TS2_LSB 0x11
+#define QIB_7220_IBNCModeCtrl_TSMCode_TS2_RMASK 0x1FF
+#define QIB_7220_IBNCModeCtrl_TSMCode_TS1_LSB 0x8
+#define QIB_7220_IBNCModeCtrl_TSMCode_TS1_RMASK 0x1FF
+#define QIB_7220_IBNCModeCtrl_Reserved1_LSB 0x3
+#define QIB_7220_IBNCModeCtrl_Reserved1_RMASK 0x1F
+#define QIB_7220_IBNCModeCtrl_TSMEnable_ignore_TSM_on_rx_LSB 0x2
+#define QIB_7220_IBNCModeCtrl_TSMEnable_ignore_TSM_on_rx_RMASK 0x1
+#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS2_LSB 0x1
+#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS2_RMASK 0x1
+#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS1_LSB 0x0
+#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS1_RMASK 0x1
+
+#define QIB_7220_SendCtrl_OFFS 0x1C0
+#define QIB_7220_SendCtrl_Disarm_LSB 0x1F
+#define QIB_7220_SendCtrl_Disarm_RMASK 0x1
+#define QIB_7220_SendCtrl_Reserved_LSB 0x1D
+#define QIB_7220_SendCtrl_Reserved_RMASK 0x3
+#define QIB_7220_SendCtrl_AvailUpdThld_LSB 0x18
+#define QIB_7220_SendCtrl_AvailUpdThld_RMASK 0x1F
+#define QIB_7220_SendCtrl_DisarmPIOBuf_LSB 0x10
+#define QIB_7220_SendCtrl_DisarmPIOBuf_RMASK 0xFF
+#define QIB_7220_SendCtrl_Reserved1_LSB 0xD
+#define QIB_7220_SendCtrl_Reserved1_RMASK 0x7
+#define QIB_7220_SendCtrl_SDmaHalt_LSB 0xC
+#define QIB_7220_SendCtrl_SDmaHalt_RMASK 0x1
+#define QIB_7220_SendCtrl_SDmaEnable_LSB 0xB
+#define QIB_7220_SendCtrl_SDmaEnable_RMASK 0x1
+#define QIB_7220_SendCtrl_SDmaSingleDescriptor_LSB 0xA
+#define QIB_7220_SendCtrl_SDmaSingleDescriptor_RMASK 0x1
+#define QIB_7220_SendCtrl_SDmaIntEnable_LSB 0x9
+#define QIB_7220_SendCtrl_SDmaIntEnable_RMASK 0x1
+#define QIB_7220_SendCtrl_Reserved2_LSB 0x5
+#define QIB_7220_SendCtrl_Reserved2_RMASK 0xF
+#define QIB_7220_SendCtrl_SSpecialTriggerEn_LSB 0x4
+#define QIB_7220_SendCtrl_SSpecialTriggerEn_RMASK 0x1
+#define QIB_7220_SendCtrl_SPioEnable_LSB 0x3
+#define QIB_7220_SendCtrl_SPioEnable_RMASK 0x1
+#define QIB_7220_SendCtrl_SendBufAvailUpd_LSB 0x2
+#define QIB_7220_SendCtrl_SendBufAvailUpd_RMASK 0x1
+#define QIB_7220_SendCtrl_SendIntBufAvail_LSB 0x1
+#define QIB_7220_SendCtrl_SendIntBufAvail_RMASK 0x1
+#define QIB_7220_SendCtrl_Abort_LSB 0x0
+#define QIB_7220_SendCtrl_Abort_RMASK 0x1
+
+#define QIB_7220_SendBufBase_OFFS 0x1C8
+#define QIB_7220_SendBufBase_Reserved_LSB 0x35
+#define QIB_7220_SendBufBase_Reserved_RMASK 0x7FF
+#define QIB_7220_SendBufBase_BaseAddr_LargePIO_LSB 0x20
+#define QIB_7220_SendBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF
+#define QIB_7220_SendBufBase_Reserved1_LSB 0x15
+#define QIB_7220_SendBufBase_Reserved1_RMASK 0x7FF
+#define QIB_7220_SendBufBase_BaseAddr_SmallPIO_LSB 0x0
+#define QIB_7220_SendBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF
+
+#define QIB_7220_SendBufSize_OFFS 0x1D0
+#define QIB_7220_SendBufSize_Reserved_LSB 0x2D
+#define QIB_7220_SendBufSize_Reserved_RMASK 0xFFFFF
+#define QIB_7220_SendBufSize_Size_LargePIO_LSB 0x20
+#define QIB_7220_SendBufSize_Size_LargePIO_RMASK 0x1FFF
+#define QIB_7220_SendBufSize_Reserved1_LSB 0xC
+#define QIB_7220_SendBufSize_Reserved1_RMASK 0xFFFFF
+#define QIB_7220_SendBufSize_Size_SmallPIO_LSB 0x0
+#define QIB_7220_SendBufSize_Size_SmallPIO_RMASK 0xFFF
+
+#define QIB_7220_SendBufCnt_OFFS 0x1D8
+#define QIB_7220_SendBufCnt_Reserved_LSB 0x24
+#define QIB_7220_SendBufCnt_Reserved_RMASK 0xFFFFFFF
+#define QIB_7220_SendBufCnt_Num_LargeBuffers_LSB 0x20
+#define QIB_7220_SendBufCnt_Num_LargeBuffers_RMASK 0xF
+#define QIB_7220_SendBufCnt_Reserved1_LSB 0x9
+#define QIB_7220_SendBufCnt_Reserved1_RMASK 0x7FFFFF
+#define QIB_7220_SendBufCnt_Num_SmallBuffers_LSB 0x0
+#define QIB_7220_SendBufCnt_Num_SmallBuffers_RMASK 0x1FF
+
+#define QIB_7220_SendBufAvailAddr_OFFS 0x1E0
+#define QIB_7220_SendBufAvailAddr_SendBufAvailAddr_LSB 0x6
+#define QIB_7220_SendBufAvailAddr_SendBufAvailAddr_RMASK 0x3FFFFFFFF
+#define QIB_7220_SendBufAvailAddr_Reserved_LSB 0x0
+#define QIB_7220_SendBufAvailAddr_Reserved_RMASK 0x3F
+
+#define QIB_7220_TxIntMemBase_OFFS 0x1E8
+
+#define QIB_7220_TxIntMemSize_OFFS 0x1F0
+
+#define QIB_7220_SendDmaBase_OFFS 0x1F8
+#define QIB_7220_SendDmaBase_Reserved_LSB 0x30
+#define QIB_7220_SendDmaBase_Reserved_RMASK 0xFFFF
+#define QIB_7220_SendDmaBase_SendDmaBase_LSB 0x0
+#define QIB_7220_SendDmaBase_SendDmaBase_RMASK 0xFFFFFFFFFFFF
+
+#define QIB_7220_SendDmaLenGen_OFFS 0x200
+#define QIB_7220_SendDmaLenGen_Reserved_LSB 0x13
+#define QIB_7220_SendDmaLenGen_Reserved_RMASK 0x1FFFFFFFFFFF
+#define QIB_7220_SendDmaLenGen_Generation_LSB 0x10
+#define QIB_7220_SendDmaLenGen_Generation_MSB 0x12
+#define QIB_7220_SendDmaLenGen_Generation_RMASK 0x7
+#define QIB_7220_SendDmaLenGen_Length_LSB 0x0
+#define QIB_7220_SendDmaLenGen_Length_RMASK 0xFFFF
+
+#define QIB_7220_SendDmaTail_OFFS 0x208
+#define QIB_7220_SendDmaTail_Reserved_LSB 0x10
+#define QIB_7220_SendDmaTail_Reserved_RMASK 0xFFFFFFFFFFFF
+#define QIB_7220_SendDmaTail_SendDmaTail_LSB 0x0
+#define QIB_7220_SendDmaTail_SendDmaTail_RMASK 0xFFFF
+
+#define QIB_7220_SendDmaHead_OFFS 0x210
+#define QIB_7220_SendDmaHead_Reserved_LSB 0x30
+#define QIB_7220_SendDmaHead_Reserved_RMASK 0xFFFF
+#define QIB_7220_SendDmaHead_InternalSendDmaHead_LSB 0x20
+#define QIB_7220_SendDmaHead_InternalSendDmaHead_RMASK 0xFFFF
+#define QIB_7220_SendDmaHead_Reserved1_LSB 0x10
+#define QIB_7220_SendDmaHead_Reserved1_RMASK 0xFFFF
+#define QIB_7220_SendDmaHead_SendDmaHead_LSB 0x0
+#define QIB_7220_SendDmaHead_SendDmaHead_RMASK 0xFFFF
+
+#define QIB_7220_SendDmaHeadAddr_OFFS 0x218
+#define QIB_7220_SendDmaHeadAddr_Reserved_LSB 0x30
+#define QIB_7220_SendDmaHeadAddr_Reserved_RMASK 0xFFFF
+#define QIB_7220_SendDmaHeadAddr_SendDmaHeadAddr_LSB 0x0
+#define QIB_7220_SendDmaHeadAddr_SendDmaHeadAddr_RMASK 0xFFFFFFFFFFFF
+
+#define QIB_7220_SendDmaBufMask0_OFFS 0x220
+#define QIB_7220_SendDmaBufMask0_BufMask_63_0_LSB 0x0
+#define QIB_7220_SendDmaBufMask0_BufMask_63_0_RMASK 0x0
+
+#define QIB_7220_SendDmaStatus_OFFS 0x238
+#define QIB_7220_SendDmaStatus_ScoreBoardDrainInProg_LSB 0x3F
+#define QIB_7220_SendDmaStatus_ScoreBoardDrainInProg_RMASK 0x1
+#define QIB_7220_SendDmaStatus_AbortInProg_LSB 0x3E
+#define QIB_7220_SendDmaStatus_AbortInProg_RMASK 0x1
+#define QIB_7220_SendDmaStatus_InternalSDmaEnable_LSB 0x3D
+#define QIB_7220_SendDmaStatus_InternalSDmaEnable_RMASK 0x1
+#define QIB_7220_SendDmaStatus_ScbDescIndex_13_0_LSB 0x2F
+#define QIB_7220_SendDmaStatus_ScbDescIndex_13_0_RMASK 0x3FFF
+#define QIB_7220_SendDmaStatus_RpyLowAddr_6_0_LSB 0x28
+#define QIB_7220_SendDmaStatus_RpyLowAddr_6_0_RMASK 0x7F
+#define QIB_7220_SendDmaStatus_RpyTag_7_0_LSB 0x20
+#define QIB_7220_SendDmaStatus_RpyTag_7_0_RMASK 0xFF
+#define QIB_7220_SendDmaStatus_ScbFull_LSB 0x1F
+#define QIB_7220_SendDmaStatus_ScbFull_RMASK 0x1
+#define QIB_7220_SendDmaStatus_ScbEmpty_LSB 0x1E
+#define QIB_7220_SendDmaStatus_ScbEmpty_RMASK 0x1
+#define QIB_7220_SendDmaStatus_ScbEntryValid_LSB 0x1D
+#define QIB_7220_SendDmaStatus_ScbEntryValid_RMASK 0x1
+#define QIB_7220_SendDmaStatus_ScbFetchDescFlag_LSB 0x1C
+#define QIB_7220_SendDmaStatus_ScbFetchDescFlag_RMASK 0x1
+#define QIB_7220_SendDmaStatus_SplFifoReadyToGo_LSB 0x1B
+#define QIB_7220_SendDmaStatus_SplFifoReadyToGo_RMASK 0x1
+#define QIB_7220_SendDmaStatus_SplFifoDisarmed_LSB 0x1A
+#define QIB_7220_SendDmaStatus_SplFifoDisarmed_RMASK 0x1
+#define QIB_7220_SendDmaStatus_SplFifoEmpty_LSB 0x19
+#define QIB_7220_SendDmaStatus_SplFifoEmpty_RMASK 0x1
+#define QIB_7220_SendDmaStatus_SplFifoFull_LSB 0x18
+#define QIB_7220_SendDmaStatus_SplFifoFull_RMASK 0x1
+#define QIB_7220_SendDmaStatus_SplFifoBufNum_LSB 0x10
+#define QIB_7220_SendDmaStatus_SplFifoBufNum_RMASK 0xFF
+#define QIB_7220_SendDmaStatus_SplFifoDescIndex_LSB 0x0
+#define QIB_7220_SendDmaStatus_SplFifoDescIndex_RMASK 0xFFFF
+
+#define QIB_7220_SendBufErr0_OFFS 0x240
+#define QIB_7220_SendBufErr0_SendBufErr_63_0_LSB 0x0
+#define QIB_7220_SendBufErr0_SendBufErr_63_0_RMASK 0x0
+
+#define QIB_7220_RcvHdrAddr0_OFFS 0x270
+#define QIB_7220_RcvHdrAddr0_RcvHdrAddr0_LSB 0x2
+#define QIB_7220_RcvHdrAddr0_RcvHdrAddr0_RMASK 0x3FFFFFFFFF
+#define QIB_7220_RcvHdrAddr0_Reserved_LSB 0x0
+#define QIB_7220_RcvHdrAddr0_Reserved_RMASK 0x3
+
+#define QIB_7220_RcvHdrTailAddr0_OFFS 0x300
+#define QIB_7220_RcvHdrTailAddr0_RcvHdrTailAddr0_LSB 0x2
+#define QIB_7220_RcvHdrTailAddr0_RcvHdrTailAddr0_RMASK 0x3FFFFFFFFF
+#define QIB_7220_RcvHdrTailAddr0_Reserved_LSB 0x0
+#define QIB_7220_RcvHdrTailAddr0_Reserved_RMASK 0x3
+
+#define QIB_7220_ibsd_epb_access_ctrl_OFFS 0x3C0
+#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_granted_LSB 0x8
+#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_granted_RMASK 0x1
+#define QIB_7220_ibsd_epb_access_ctrl_Reserved_LSB 0x1
+#define QIB_7220_ibsd_epb_access_ctrl_Reserved_RMASK 0x7F
+#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_LSB 0x0
+#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_RMASK 0x1
+
+#define QIB_7220_ibsd_epb_transaction_reg_OFFS 0x3C8
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_rdy_LSB 0x1F
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_rdy_RMASK 0x1
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_req_error_LSB 0x1E
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_req_error_RMASK 0x1
+#define QIB_7220_ibsd_epb_transaction_reg_Reserved_LSB 0x1D
+#define QIB_7220_ibsd_epb_transaction_reg_Reserved_RMASK 0x1
+#define QIB_7220_ibsd_epb_transaction_reg_mem_data_parity_LSB 0x1C
+#define QIB_7220_ibsd_epb_transaction_reg_mem_data_parity_RMASK 0x1
+#define QIB_7220_ibsd_epb_transaction_reg_Reserved1_LSB 0x1B
+#define QIB_7220_ibsd_epb_transaction_reg_Reserved1_RMASK 0x1
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_cs_LSB 0x19
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_cs_RMASK 0x3
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_read_write_LSB 0x18
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_read_write_RMASK 0x1
+#define QIB_7220_ibsd_epb_transaction_reg_Reserved2_LSB 0x17
+#define QIB_7220_ibsd_epb_transaction_reg_Reserved2_RMASK 0x1
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_address_LSB 0x8
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_address_RMASK 0x7FFF
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_data_LSB 0x0
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_data_RMASK 0xFF
+
+#define QIB_7220_XGXSCfg_OFFS 0x3D8
+#define QIB_7220_XGXSCfg_sel_link_down_for_fctrl_lane_sync_reset_LSB 0x3F
+#define QIB_7220_XGXSCfg_sel_link_down_for_fctrl_lane_sync_reset_RMASK 0x1
+#define QIB_7220_XGXSCfg_Reserved_LSB 0x13
+#define QIB_7220_XGXSCfg_Reserved_RMASK 0xFFFFFFFFFFF
+#define QIB_7220_XGXSCfg_link_sync_mask_LSB 0x9
+#define QIB_7220_XGXSCfg_link_sync_mask_RMASK 0x3FF
+#define QIB_7220_XGXSCfg_Reserved1_LSB 0x3
+#define QIB_7220_XGXSCfg_Reserved1_RMASK 0x3F
+#define QIB_7220_XGXSCfg_xcv_reset_LSB 0x2
+#define QIB_7220_XGXSCfg_xcv_reset_RMASK 0x1
+#define QIB_7220_XGXSCfg_Reserved2_LSB 0x1
+#define QIB_7220_XGXSCfg_Reserved2_RMASK 0x1
+#define QIB_7220_XGXSCfg_tx_rx_reset_LSB 0x0
+#define QIB_7220_XGXSCfg_tx_rx_reset_RMASK 0x1
+
+#define QIB_7220_IBSerDesCtrl_OFFS 0x3E0
+#define QIB_7220_IBSerDesCtrl_Reserved_LSB 0x2D
+#define QIB_7220_IBSerDesCtrl_Reserved_RMASK 0x7FFFF
+#define QIB_7220_IBSerDesCtrl_INT_uC_LSB 0x2C
+#define QIB_7220_IBSerDesCtrl_INT_uC_RMASK 0x1
+#define QIB_7220_IBSerDesCtrl_CKSEL_uC_LSB 0x2A
+#define QIB_7220_IBSerDesCtrl_CKSEL_uC_RMASK 0x3
+#define QIB_7220_IBSerDesCtrl_PLLN_LSB 0x28
+#define QIB_7220_IBSerDesCtrl_PLLN_RMASK 0x3
+#define QIB_7220_IBSerDesCtrl_PLLM_LSB 0x25
+#define QIB_7220_IBSerDesCtrl_PLLM_RMASK 0x7
+#define QIB_7220_IBSerDesCtrl_TXOBPD_LSB 0x24
+#define QIB_7220_IBSerDesCtrl_TXOBPD_RMASK 0x1
+#define QIB_7220_IBSerDesCtrl_TWC_LSB 0x23
+#define QIB_7220_IBSerDesCtrl_TWC_RMASK 0x1
+#define QIB_7220_IBSerDesCtrl_RXIDLE_LSB 0x22
+#define QIB_7220_IBSerDesCtrl_RXIDLE_RMASK 0x1
+#define QIB_7220_IBSerDesCtrl_RXINV_LSB 0x21
+#define QIB_7220_IBSerDesCtrl_RXINV_RMASK 0x1
+#define QIB_7220_IBSerDesCtrl_TXINV_LSB 0x20
+#define QIB_7220_IBSerDesCtrl_TXINV_RMASK 0x1
+#define QIB_7220_IBSerDesCtrl_Reserved1_LSB 0x12
+#define QIB_7220_IBSerDesCtrl_Reserved1_RMASK 0x3FFF
+#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForRXEQ_LSB 0xD
+#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForRXEQ_RMASK 0x1F
+#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForDDS_LSB 0x8
+#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForDDS_RMASK 0x1F
+#define QIB_7220_IBSerDesCtrl_Reserved2_LSB 0x1
+#define QIB_7220_IBSerDesCtrl_Reserved2_RMASK 0x7F
+#define QIB_7220_IBSerDesCtrl_ResetIB_uC_Core_LSB 0x0
+#define QIB_7220_IBSerDesCtrl_ResetIB_uC_Core_RMASK 0x1
+
+#define QIB_7220_pciesd_epb_access_ctrl_OFFS 0x400
+#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_granted_LSB 0x8
+#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_granted_RMASK 0x1
+#define QIB_7220_pciesd_epb_access_ctrl_Reserved_LSB 0x3
+#define QIB_7220_pciesd_epb_access_ctrl_Reserved_RMASK 0x1F
+#define QIB_7220_pciesd_epb_access_ctrl_sw_pcieepb_star_en_LSB 0x1
+#define QIB_7220_pciesd_epb_access_ctrl_sw_pcieepb_star_en_RMASK 0x3
+#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_LSB 0x0
+#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_RMASK 0x1
+
+#define QIB_7220_pciesd_epb_transaction_reg_OFFS 0x408
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_rdy_LSB 0x1F
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_rdy_RMASK 0x1
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_req_error_LSB 0x1E
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_req_error_RMASK 0x1
+#define QIB_7220_pciesd_epb_transaction_reg_Reserved_LSB 0x1D
+#define QIB_7220_pciesd_epb_transaction_reg_Reserved_RMASK 0x1
+#define QIB_7220_pciesd_epb_transaction_reg_mem_data_parity_LSB 0x1C
+#define QIB_7220_pciesd_epb_transaction_reg_mem_data_parity_RMASK 0x1
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_cs_LSB 0x19
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_cs_RMASK 0x7
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_read_write_LSB 0x18
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_read_write_RMASK 0x1
+#define QIB_7220_pciesd_epb_transaction_reg_Reserved1_LSB 0x17
+#define QIB_7220_pciesd_epb_transaction_reg_Reserved1_RMASK 0x1
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_address_LSB 0x8
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_address_RMASK 0x7FFF
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_data_LSB 0x0
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_data_RMASK 0xFF
+
+#define QIB_7220_SerDes_DDSRXEQ0_OFFS 0x500
+#define QIB_7220_SerDes_DDSRXEQ0_reg_addr_LSB 0x4
+#define QIB_7220_SerDes_DDSRXEQ0_reg_addr_RMASK 0x3F
+#define QIB_7220_SerDes_DDSRXEQ0_element_num_LSB 0x0
+#define QIB_7220_SerDes_DDSRXEQ0_element_num_RMASK 0xF
+
+#define QIB_7220_LBIntCnt_OFFS 0x13000
+
+#define QIB_7220_LBFlowStallCnt_OFFS 0x13008
+
+#define QIB_7220_TxSDmaDescCnt_OFFS 0x13010
+
+#define QIB_7220_TxUnsupVLErrCnt_OFFS 0x13018
+
+#define QIB_7220_TxDataPktCnt_OFFS 0x13020
+
+#define QIB_7220_TxFlowPktCnt_OFFS 0x13028
+
+#define QIB_7220_TxDwordCnt_OFFS 0x13030
+
+#define QIB_7220_TxLenErrCnt_OFFS 0x13038
+
+#define QIB_7220_TxMaxMinLenErrCnt_OFFS 0x13040
+
+#define QIB_7220_TxUnderrunCnt_OFFS 0x13048
+
+#define QIB_7220_TxFlowStallCnt_OFFS 0x13050
+
+#define QIB_7220_TxDroppedPktCnt_OFFS 0x13058
+
+#define QIB_7220_RxDroppedPktCnt_OFFS 0x13060
+
+#define QIB_7220_RxDataPktCnt_OFFS 0x13068
+
+#define QIB_7220_RxFlowPktCnt_OFFS 0x13070
+
+#define QIB_7220_RxDwordCnt_OFFS 0x13078
+
+#define QIB_7220_RxLenErrCnt_OFFS 0x13080
+
+#define QIB_7220_RxMaxMinLenErrCnt_OFFS 0x13088
+
+#define QIB_7220_RxICRCErrCnt_OFFS 0x13090
+
+#define QIB_7220_RxVCRCErrCnt_OFFS 0x13098
+
+#define QIB_7220_RxFlowCtrlViolCnt_OFFS 0x130A0
+
+#define QIB_7220_RxVersionErrCnt_OFFS 0x130A8
+
+#define QIB_7220_RxLinkMalformCnt_OFFS 0x130B0
+
+#define QIB_7220_RxEBPCnt_OFFS 0x130B8
+
+#define QIB_7220_RxLPCRCErrCnt_OFFS 0x130C0
+
+#define QIB_7220_RxBufOvflCnt_OFFS 0x130C8
+
+#define QIB_7220_RxTIDFullErrCnt_OFFS 0x130D0
+
+#define QIB_7220_RxTIDValidErrCnt_OFFS 0x130D8
+
+#define QIB_7220_RxPKeyMismatchCnt_OFFS 0x130E0
+
+#define QIB_7220_RxP0HdrEgrOvflCnt_OFFS 0x130E8
+
+#define QIB_7220_IBStatusChangeCnt_OFFS 0x13170
+
+#define QIB_7220_IBLinkErrRecoveryCnt_OFFS 0x13178
+
+#define QIB_7220_IBLinkDownedCnt_OFFS 0x13180
+
+#define QIB_7220_IBSymbolErrCnt_OFFS 0x13188
+
+#define QIB_7220_RxVL15DroppedPktCnt_OFFS 0x13190
+
+#define QIB_7220_RxOtherLocalPhyErrCnt_OFFS 0x13198
+
+#define QIB_7220_PcieRetryBufDiagQwordCnt_OFFS 0x131A0
+
+#define QIB_7220_ExcessBufferOvflCnt_OFFS 0x131A8
+
+#define QIB_7220_LocalLinkIntegrityErrCnt_OFFS 0x131B0
+
+#define QIB_7220_RxVlErrCnt_OFFS 0x131B8
+
+#define QIB_7220_RxDlidFltrCnt_OFFS 0x131C0
+
+#define QIB_7220_CNT_0131C8_OFFS 0x131C8
+
+#define QIB_7220_PSStat_OFFS 0x13200
+
+#define QIB_7220_PSStart_OFFS 0x13208
+
+#define QIB_7220_PSInterval_OFFS 0x13210
+
+#define QIB_7220_PSRcvDataCount_OFFS 0x13218
+
+#define QIB_7220_PSRcvPktsCount_OFFS 0x13220
+
+#define QIB_7220_PSXmitDataCount_OFFS 0x13228
+
+#define QIB_7220_PSXmitPktsCount_OFFS 0x13230
+
+#define QIB_7220_PSXmitWaitCount_OFFS 0x13238
+
+#define QIB_7220_CNT_013240_OFFS 0x13240
+
+#define QIB_7220_RcvEgrArray_OFFS 0x14000
+
+#define QIB_7220_MEM_038000_OFFS 0x38000
+
+#define QIB_7220_RcvTIDArray0_OFFS 0x53000
+
+#define QIB_7220_PIOLaunchFIFO_OFFS 0x64000
+
+#define QIB_7220_MEM_064480_OFFS 0x64480
+
+#define QIB_7220_SendPIOpbcCache_OFFS 0x64800
+
+#define QIB_7220_MEM_064C80_OFFS 0x64C80
+
+#define QIB_7220_PreLaunchFIFO_OFFS 0x65000
+
+#define QIB_7220_MEM_065080_OFFS 0x65080
+
+#define QIB_7220_ScoreBoard_OFFS 0x65400
+
+#define QIB_7220_MEM_065440_OFFS 0x65440
+
+#define QIB_7220_DescriptorFIFO_OFFS 0x65800
+
+#define QIB_7220_MEM_065880_OFFS 0x65880
+
+#define QIB_7220_RcvBuf1_OFFS 0x72000
+
+#define QIB_7220_MEM_074800_OFFS 0x74800
+
+#define QIB_7220_RcvBuf2_OFFS 0x75000
+
+#define QIB_7220_MEM_076400_OFFS 0x76400
+
+#define QIB_7220_RcvFlags_OFFS 0x77000
+
+#define QIB_7220_MEM_078400_OFFS 0x78400
+
+#define QIB_7220_RcvLookupBuf1_OFFS 0x79000
+
+#define QIB_7220_MEM_07A400_OFFS 0x7A400
+
+#define QIB_7220_RcvDMADatBuf_OFFS 0x7B000
+
+#define QIB_7220_RcvDMAHdrBuf_OFFS 0x7B800
+
+#define QIB_7220_MiscRXEIntMem_OFFS 0x7C000
+
+#define QIB_7220_MEM_07D400_OFFS 0x7D400
+
+#define QIB_7220_PCIERcvBuf_OFFS 0x80000
+
+#define QIB_7220_PCIERetryBuf_OFFS 0x84000
+
+#define QIB_7220_PCIERcvBufRdToWrAddr_OFFS 0x88000
+
+#define QIB_7220_PCIECplBuf_OFFS 0x90000
+
+#define QIB_7220_IBSerDesMappTable_OFFS 0x94000
+
+#define QIB_7220_MEM_095000_OFFS 0x95000
+
+#define QIB_7220_SendBuf0_MA_OFFS 0x100000
+
+#define QIB_7220_MEM_1A0000_OFFS 0x1A0000
diff --git a/drivers/infiniband/hw/qib/qib_7322_regs.h b/drivers/infiniband/hw/qib/qib_7322_regs.h
new file mode 100644
index 00000000000..a97440ba924
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_7322_regs.h
@@ -0,0 +1,3163 @@
+/*
+ * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* This file is mechanically generated from RTL. Any hand-edits will be lost! */
+
+#define QIB_7322_Revision_OFFS 0x0
+#define QIB_7322_Revision_DEF 0x0000000002010601
+#define QIB_7322_Revision_R_Simulator_LSB 0x3F
+#define QIB_7322_Revision_R_Simulator_MSB 0x3F
+#define QIB_7322_Revision_R_Simulator_RMASK 0x1
+#define QIB_7322_Revision_R_Emulation_LSB 0x3E
+#define QIB_7322_Revision_R_Emulation_MSB 0x3E
+#define QIB_7322_Revision_R_Emulation_RMASK 0x1
+#define QIB_7322_Revision_R_Emulation_Revcode_LSB 0x28
+#define QIB_7322_Revision_R_Emulation_Revcode_MSB 0x3D
+#define QIB_7322_Revision_R_Emulation_Revcode_RMASK 0x3FFFFF
+#define QIB_7322_Revision_BoardID_LSB 0x20
+#define QIB_7322_Revision_BoardID_MSB 0x27
+#define QIB_7322_Revision_BoardID_RMASK 0xFF
+#define QIB_7322_Revision_R_SW_LSB 0x18
+#define QIB_7322_Revision_R_SW_MSB 0x1F
+#define QIB_7322_Revision_R_SW_RMASK 0xFF
+#define QIB_7322_Revision_R_Arch_LSB 0x10
+#define QIB_7322_Revision_R_Arch_MSB 0x17
+#define QIB_7322_Revision_R_Arch_RMASK 0xFF
+#define QIB_7322_Revision_R_ChipRevMajor_LSB 0x8
+#define QIB_7322_Revision_R_ChipRevMajor_MSB 0xF
+#define QIB_7322_Revision_R_ChipRevMajor_RMASK 0xFF
+#define QIB_7322_Revision_R_ChipRevMinor_LSB 0x0
+#define QIB_7322_Revision_R_ChipRevMinor_MSB 0x7
+#define QIB_7322_Revision_R_ChipRevMinor_RMASK 0xFF
+
+#define QIB_7322_Control_OFFS 0x8
+#define QIB_7322_Control_DEF 0x0000000000000000
+#define QIB_7322_Control_PCIECplQDiagEn_LSB 0x6
+#define QIB_7322_Control_PCIECplQDiagEn_MSB 0x6
+#define QIB_7322_Control_PCIECplQDiagEn_RMASK 0x1
+#define QIB_7322_Control_PCIEPostQDiagEn_LSB 0x5
+#define QIB_7322_Control_PCIEPostQDiagEn_MSB 0x5
+#define QIB_7322_Control_PCIEPostQDiagEn_RMASK 0x1
+#define QIB_7322_Control_SDmaDescFetchPriorityEn_LSB 0x4
+#define QIB_7322_Control_SDmaDescFetchPriorityEn_MSB 0x4
+#define QIB_7322_Control_SDmaDescFetchPriorityEn_RMASK 0x1
+#define QIB_7322_Control_PCIERetryBufDiagEn_LSB 0x3
+#define QIB_7322_Control_PCIERetryBufDiagEn_MSB 0x3
+#define QIB_7322_Control_PCIERetryBufDiagEn_RMASK 0x1
+#define QIB_7322_Control_FreezeMode_LSB 0x1
+#define QIB_7322_Control_FreezeMode_MSB 0x1
+#define QIB_7322_Control_FreezeMode_RMASK 0x1
+#define QIB_7322_Control_SyncReset_LSB 0x0
+#define QIB_7322_Control_SyncReset_MSB 0x0
+#define QIB_7322_Control_SyncReset_RMASK 0x1
+
+#define QIB_7322_PageAlign_OFFS 0x10
+#define QIB_7322_PageAlign_DEF 0x0000000000001000
+
+#define QIB_7322_ContextCnt_OFFS 0x18
+#define QIB_7322_ContextCnt_DEF 0x0000000000000012
+
+#define QIB_7322_Scratch_OFFS 0x20
+#define QIB_7322_Scratch_DEF 0x0000000000000000
+
+#define QIB_7322_CntrRegBase_OFFS 0x28
+#define QIB_7322_CntrRegBase_DEF 0x0000000000011000
+
+#define QIB_7322_SendRegBase_OFFS 0x30
+#define QIB_7322_SendRegBase_DEF 0x0000000000003000
+
+#define QIB_7322_UserRegBase_OFFS 0x38
+#define QIB_7322_UserRegBase_DEF 0x0000000000200000
+
+#define QIB_7322_IntMask_OFFS 0x68
+#define QIB_7322_IntMask_DEF 0x0000000000000000
+#define QIB_7322_IntMask_SDmaIntMask_1_LSB 0x3F
+#define QIB_7322_IntMask_SDmaIntMask_1_MSB 0x3F
+#define QIB_7322_IntMask_SDmaIntMask_1_RMASK 0x1
+#define QIB_7322_IntMask_SDmaIntMask_0_LSB 0x3E
+#define QIB_7322_IntMask_SDmaIntMask_0_MSB 0x3E
+#define QIB_7322_IntMask_SDmaIntMask_0_RMASK 0x1
+#define QIB_7322_IntMask_SDmaProgressIntMask_1_LSB 0x3D
+#define QIB_7322_IntMask_SDmaProgressIntMask_1_MSB 0x3D
+#define QIB_7322_IntMask_SDmaProgressIntMask_1_RMASK 0x1
+#define QIB_7322_IntMask_SDmaProgressIntMask_0_LSB 0x3C
+#define QIB_7322_IntMask_SDmaProgressIntMask_0_MSB 0x3C
+#define QIB_7322_IntMask_SDmaProgressIntMask_0_RMASK 0x1
+#define QIB_7322_IntMask_SDmaIdleIntMask_1_LSB 0x3B
+#define QIB_7322_IntMask_SDmaIdleIntMask_1_MSB 0x3B
+#define QIB_7322_IntMask_SDmaIdleIntMask_1_RMASK 0x1
+#define QIB_7322_IntMask_SDmaIdleIntMask_0_LSB 0x3A
+#define QIB_7322_IntMask_SDmaIdleIntMask_0_MSB 0x3A
+#define QIB_7322_IntMask_SDmaIdleIntMask_0_RMASK 0x1
+#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_LSB 0x39
+#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_MSB 0x39
+#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_RMASK 0x1
+#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_LSB 0x38
+#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_MSB 0x38
+#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg17IntMask_LSB 0x31
+#define QIB_7322_IntMask_RcvUrg17IntMask_MSB 0x31
+#define QIB_7322_IntMask_RcvUrg17IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg16IntMask_LSB 0x30
+#define QIB_7322_IntMask_RcvUrg16IntMask_MSB 0x30
+#define QIB_7322_IntMask_RcvUrg16IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg15IntMask_LSB 0x2F
+#define QIB_7322_IntMask_RcvUrg15IntMask_MSB 0x2F
+#define QIB_7322_IntMask_RcvUrg15IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg14IntMask_LSB 0x2E
+#define QIB_7322_IntMask_RcvUrg14IntMask_MSB 0x2E
+#define QIB_7322_IntMask_RcvUrg14IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg13IntMask_LSB 0x2D
+#define QIB_7322_IntMask_RcvUrg13IntMask_MSB 0x2D
+#define QIB_7322_IntMask_RcvUrg13IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg12IntMask_LSB 0x2C
+#define QIB_7322_IntMask_RcvUrg12IntMask_MSB 0x2C
+#define QIB_7322_IntMask_RcvUrg12IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg11IntMask_LSB 0x2B
+#define QIB_7322_IntMask_RcvUrg11IntMask_MSB 0x2B
+#define QIB_7322_IntMask_RcvUrg11IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg10IntMask_LSB 0x2A
+#define QIB_7322_IntMask_RcvUrg10IntMask_MSB 0x2A
+#define QIB_7322_IntMask_RcvUrg10IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg9IntMask_LSB 0x29
+#define QIB_7322_IntMask_RcvUrg9IntMask_MSB 0x29
+#define QIB_7322_IntMask_RcvUrg9IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg8IntMask_LSB 0x28
+#define QIB_7322_IntMask_RcvUrg8IntMask_MSB 0x28
+#define QIB_7322_IntMask_RcvUrg8IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg7IntMask_LSB 0x27
+#define QIB_7322_IntMask_RcvUrg7IntMask_MSB 0x27
+#define QIB_7322_IntMask_RcvUrg7IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg6IntMask_LSB 0x26
+#define QIB_7322_IntMask_RcvUrg6IntMask_MSB 0x26
+#define QIB_7322_IntMask_RcvUrg6IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg5IntMask_LSB 0x25
+#define QIB_7322_IntMask_RcvUrg5IntMask_MSB 0x25
+#define QIB_7322_IntMask_RcvUrg5IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg4IntMask_LSB 0x24
+#define QIB_7322_IntMask_RcvUrg4IntMask_MSB 0x24
+#define QIB_7322_IntMask_RcvUrg4IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg3IntMask_LSB 0x23
+#define QIB_7322_IntMask_RcvUrg3IntMask_MSB 0x23
+#define QIB_7322_IntMask_RcvUrg3IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg2IntMask_LSB 0x22
+#define QIB_7322_IntMask_RcvUrg2IntMask_MSB 0x22
+#define QIB_7322_IntMask_RcvUrg2IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg1IntMask_LSB 0x21
+#define QIB_7322_IntMask_RcvUrg1IntMask_MSB 0x21
+#define QIB_7322_IntMask_RcvUrg1IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg0IntMask_LSB 0x20
+#define QIB_7322_IntMask_RcvUrg0IntMask_MSB 0x20
+#define QIB_7322_IntMask_RcvUrg0IntMask_RMASK 0x1
+#define QIB_7322_IntMask_ErrIntMask_1_LSB 0x1F
+#define QIB_7322_IntMask_ErrIntMask_1_MSB 0x1F
+#define QIB_7322_IntMask_ErrIntMask_1_RMASK 0x1
+#define QIB_7322_IntMask_ErrIntMask_0_LSB 0x1E
+#define QIB_7322_IntMask_ErrIntMask_0_MSB 0x1E
+#define QIB_7322_IntMask_ErrIntMask_0_RMASK 0x1
+#define QIB_7322_IntMask_ErrIntMask_LSB 0x1D
+#define QIB_7322_IntMask_ErrIntMask_MSB 0x1D
+#define QIB_7322_IntMask_ErrIntMask_RMASK 0x1
+#define QIB_7322_IntMask_AssertGPIOIntMask_LSB 0x1C
+#define QIB_7322_IntMask_AssertGPIOIntMask_MSB 0x1C
+#define QIB_7322_IntMask_AssertGPIOIntMask_RMASK 0x1
+#define QIB_7322_IntMask_SendDoneIntMask_1_LSB 0x19
+#define QIB_7322_IntMask_SendDoneIntMask_1_MSB 0x19
+#define QIB_7322_IntMask_SendDoneIntMask_1_RMASK 0x1
+#define QIB_7322_IntMask_SendDoneIntMask_0_LSB 0x18
+#define QIB_7322_IntMask_SendDoneIntMask_0_MSB 0x18
+#define QIB_7322_IntMask_SendDoneIntMask_0_RMASK 0x1
+#define QIB_7322_IntMask_SendBufAvailIntMask_LSB 0x17
+#define QIB_7322_IntMask_SendBufAvailIntMask_MSB 0x17
+#define QIB_7322_IntMask_SendBufAvailIntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail17IntMask_LSB 0x11
+#define QIB_7322_IntMask_RcvAvail17IntMask_MSB 0x11
+#define QIB_7322_IntMask_RcvAvail17IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail16IntMask_LSB 0x10
+#define QIB_7322_IntMask_RcvAvail16IntMask_MSB 0x10
+#define QIB_7322_IntMask_RcvAvail16IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail15IntMask_LSB 0xF
+#define QIB_7322_IntMask_RcvAvail15IntMask_MSB 0xF
+#define QIB_7322_IntMask_RcvAvail15IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail14IntMask_LSB 0xE
+#define QIB_7322_IntMask_RcvAvail14IntMask_MSB 0xE
+#define QIB_7322_IntMask_RcvAvail14IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail13IntMask_LSB 0xD
+#define QIB_7322_IntMask_RcvAvail13IntMask_MSB 0xD
+#define QIB_7322_IntMask_RcvAvail13IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail12IntMask_LSB 0xC
+#define QIB_7322_IntMask_RcvAvail12IntMask_MSB 0xC
+#define QIB_7322_IntMask_RcvAvail12IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail11IntMask_LSB 0xB
+#define QIB_7322_IntMask_RcvAvail11IntMask_MSB 0xB
+#define QIB_7322_IntMask_RcvAvail11IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail10IntMask_LSB 0xA
+#define QIB_7322_IntMask_RcvAvail10IntMask_MSB 0xA
+#define QIB_7322_IntMask_RcvAvail10IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail9IntMask_LSB 0x9
+#define QIB_7322_IntMask_RcvAvail9IntMask_MSB 0x9
+#define QIB_7322_IntMask_RcvAvail9IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail8IntMask_LSB 0x8
+#define QIB_7322_IntMask_RcvAvail8IntMask_MSB 0x8
+#define QIB_7322_IntMask_RcvAvail8IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail7IntMask_LSB 0x7
+#define QIB_7322_IntMask_RcvAvail7IntMask_MSB 0x7
+#define QIB_7322_IntMask_RcvAvail7IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail6IntMask_LSB 0x6
+#define QIB_7322_IntMask_RcvAvail6IntMask_MSB 0x6
+#define QIB_7322_IntMask_RcvAvail6IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail5IntMask_LSB 0x5
+#define QIB_7322_IntMask_RcvAvail5IntMask_MSB 0x5
+#define QIB_7322_IntMask_RcvAvail5IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail4IntMask_LSB 0x4
+#define QIB_7322_IntMask_RcvAvail4IntMask_MSB 0x4
+#define QIB_7322_IntMask_RcvAvail4IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail3IntMask_LSB 0x3
+#define QIB_7322_IntMask_RcvAvail3IntMask_MSB 0x3
+#define QIB_7322_IntMask_RcvAvail3IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail2IntMask_LSB 0x2
+#define QIB_7322_IntMask_RcvAvail2IntMask_MSB 0x2
+#define QIB_7322_IntMask_RcvAvail2IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail1IntMask_LSB 0x1
+#define QIB_7322_IntMask_RcvAvail1IntMask_MSB 0x1
+#define QIB_7322_IntMask_RcvAvail1IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail0IntMask_LSB 0x0
+#define QIB_7322_IntMask_RcvAvail0IntMask_MSB 0x0
+#define QIB_7322_IntMask_RcvAvail0IntMask_RMASK 0x1
+
+#define QIB_7322_IntStatus_OFFS 0x70
+#define QIB_7322_IntStatus_DEF 0x0000000000000000
+#define QIB_7322_IntStatus_SDmaInt_1_LSB 0x3F
+#define QIB_7322_IntStatus_SDmaInt_1_MSB 0x3F
+#define QIB_7322_IntStatus_SDmaInt_1_RMASK 0x1
+#define QIB_7322_IntStatus_SDmaInt_0_LSB 0x3E
+#define QIB_7322_IntStatus_SDmaInt_0_MSB 0x3E
+#define QIB_7322_IntStatus_SDmaInt_0_RMASK 0x1
+#define QIB_7322_IntStatus_SDmaProgressInt_1_LSB 0x3D
+#define QIB_7322_IntStatus_SDmaProgressInt_1_MSB 0x3D
+#define QIB_7322_IntStatus_SDmaProgressInt_1_RMASK 0x1
+#define QIB_7322_IntStatus_SDmaProgressInt_0_LSB 0x3C
+#define QIB_7322_IntStatus_SDmaProgressInt_0_MSB 0x3C
+#define QIB_7322_IntStatus_SDmaProgressInt_0_RMASK 0x1
+#define QIB_7322_IntStatus_SDmaIdleInt_1_LSB 0x3B
+#define QIB_7322_IntStatus_SDmaIdleInt_1_MSB 0x3B
+#define QIB_7322_IntStatus_SDmaIdleInt_1_RMASK 0x1
+#define QIB_7322_IntStatus_SDmaIdleInt_0_LSB 0x3A
+#define QIB_7322_IntStatus_SDmaIdleInt_0_MSB 0x3A
+#define QIB_7322_IntStatus_SDmaIdleInt_0_RMASK 0x1
+#define QIB_7322_IntStatus_SDmaCleanupDone_1_LSB 0x39
+#define QIB_7322_IntStatus_SDmaCleanupDone_1_MSB 0x39
+#define QIB_7322_IntStatus_SDmaCleanupDone_1_RMASK 0x1
+#define QIB_7322_IntStatus_SDmaCleanupDone_0_LSB 0x38
+#define QIB_7322_IntStatus_SDmaCleanupDone_0_MSB 0x38
+#define QIB_7322_IntStatus_SDmaCleanupDone_0_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg17_LSB 0x31
+#define QIB_7322_IntStatus_RcvUrg17_MSB 0x31
+#define QIB_7322_IntStatus_RcvUrg17_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg16_LSB 0x30
+#define QIB_7322_IntStatus_RcvUrg16_MSB 0x30
+#define QIB_7322_IntStatus_RcvUrg16_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg15_LSB 0x2F
+#define QIB_7322_IntStatus_RcvUrg15_MSB 0x2F
+#define QIB_7322_IntStatus_RcvUrg15_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg14_LSB 0x2E
+#define QIB_7322_IntStatus_RcvUrg14_MSB 0x2E
+#define QIB_7322_IntStatus_RcvUrg14_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg13_LSB 0x2D
+#define QIB_7322_IntStatus_RcvUrg13_MSB 0x2D
+#define QIB_7322_IntStatus_RcvUrg13_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg12_LSB 0x2C
+#define QIB_7322_IntStatus_RcvUrg12_MSB 0x2C
+#define QIB_7322_IntStatus_RcvUrg12_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg11_LSB 0x2B
+#define QIB_7322_IntStatus_RcvUrg11_MSB 0x2B
+#define QIB_7322_IntStatus_RcvUrg11_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg10_LSB 0x2A
+#define QIB_7322_IntStatus_RcvUrg10_MSB 0x2A
+#define QIB_7322_IntStatus_RcvUrg10_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg9_LSB 0x29
+#define QIB_7322_IntStatus_RcvUrg9_MSB 0x29
+#define QIB_7322_IntStatus_RcvUrg9_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg8_LSB 0x28
+#define QIB_7322_IntStatus_RcvUrg8_MSB 0x28
+#define QIB_7322_IntStatus_RcvUrg8_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg7_LSB 0x27
+#define QIB_7322_IntStatus_RcvUrg7_MSB 0x27
+#define QIB_7322_IntStatus_RcvUrg7_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg6_LSB 0x26
+#define QIB_7322_IntStatus_RcvUrg6_MSB 0x26
+#define QIB_7322_IntStatus_RcvUrg6_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg5_LSB 0x25
+#define QIB_7322_IntStatus_RcvUrg5_MSB 0x25
+#define QIB_7322_IntStatus_RcvUrg5_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg4_LSB 0x24
+#define QIB_7322_IntStatus_RcvUrg4_MSB 0x24
+#define QIB_7322_IntStatus_RcvUrg4_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg3_LSB 0x23
+#define QIB_7322_IntStatus_RcvUrg3_MSB 0x23
+#define QIB_7322_IntStatus_RcvUrg3_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg2_LSB 0x22
+#define QIB_7322_IntStatus_RcvUrg2_MSB 0x22
+#define QIB_7322_IntStatus_RcvUrg2_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg1_LSB 0x21
+#define QIB_7322_IntStatus_RcvUrg1_MSB 0x21
+#define QIB_7322_IntStatus_RcvUrg1_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg0_LSB 0x20
+#define QIB_7322_IntStatus_RcvUrg0_MSB 0x20
+#define QIB_7322_IntStatus_RcvUrg0_RMASK 0x1
+#define QIB_7322_IntStatus_Err_1_LSB 0x1F
+#define QIB_7322_IntStatus_Err_1_MSB 0x1F
+#define QIB_7322_IntStatus_Err_1_RMASK 0x1
+#define QIB_7322_IntStatus_Err_0_LSB 0x1E
+#define QIB_7322_IntStatus_Err_0_MSB 0x1E
+#define QIB_7322_IntStatus_Err_0_RMASK 0x1
+#define QIB_7322_IntStatus_Err_LSB 0x1D
+#define QIB_7322_IntStatus_Err_MSB 0x1D
+#define QIB_7322_IntStatus_Err_RMASK 0x1
+#define QIB_7322_IntStatus_AssertGPIO_LSB 0x1C
+#define QIB_7322_IntStatus_AssertGPIO_MSB 0x1C
+#define QIB_7322_IntStatus_AssertGPIO_RMASK 0x1
+#define QIB_7322_IntStatus_SendDone_1_LSB 0x19
+#define QIB_7322_IntStatus_SendDone_1_MSB 0x19
+#define QIB_7322_IntStatus_SendDone_1_RMASK 0x1
+#define QIB_7322_IntStatus_SendDone_0_LSB 0x18
+#define QIB_7322_IntStatus_SendDone_0_MSB 0x18
+#define QIB_7322_IntStatus_SendDone_0_RMASK 0x1
+#define QIB_7322_IntStatus_SendBufAvail_LSB 0x17
+#define QIB_7322_IntStatus_SendBufAvail_MSB 0x17
+#define QIB_7322_IntStatus_SendBufAvail_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail17_LSB 0x11
+#define QIB_7322_IntStatus_RcvAvail17_MSB 0x11
+#define QIB_7322_IntStatus_RcvAvail17_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail16_LSB 0x10
+#define QIB_7322_IntStatus_RcvAvail16_MSB 0x10
+#define QIB_7322_IntStatus_RcvAvail16_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail15_LSB 0xF
+#define QIB_7322_IntStatus_RcvAvail15_MSB 0xF
+#define QIB_7322_IntStatus_RcvAvail15_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail14_LSB 0xE
+#define QIB_7322_IntStatus_RcvAvail14_MSB 0xE
+#define QIB_7322_IntStatus_RcvAvail14_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail13_LSB 0xD
+#define QIB_7322_IntStatus_RcvAvail13_MSB 0xD
+#define QIB_7322_IntStatus_RcvAvail13_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail12_LSB 0xC
+#define QIB_7322_IntStatus_RcvAvail12_MSB 0xC
+#define QIB_7322_IntStatus_RcvAvail12_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail11_LSB 0xB
+#define QIB_7322_IntStatus_RcvAvail11_MSB 0xB
+#define QIB_7322_IntStatus_RcvAvail11_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail10_LSB 0xA
+#define QIB_7322_IntStatus_RcvAvail10_MSB 0xA
+#define QIB_7322_IntStatus_RcvAvail10_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail9_LSB 0x9
+#define QIB_7322_IntStatus_RcvAvail9_MSB 0x9
+#define QIB_7322_IntStatus_RcvAvail9_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail8_LSB 0x8
+#define QIB_7322_IntStatus_RcvAvail8_MSB 0x8
+#define QIB_7322_IntStatus_RcvAvail8_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail7_LSB 0x7
+#define QIB_7322_IntStatus_RcvAvail7_MSB 0x7
+#define QIB_7322_IntStatus_RcvAvail7_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail6_LSB 0x6
+#define QIB_7322_IntStatus_RcvAvail6_MSB 0x6
+#define QIB_7322_IntStatus_RcvAvail6_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail5_LSB 0x5
+#define QIB_7322_IntStatus_RcvAvail5_MSB 0x5
+#define QIB_7322_IntStatus_RcvAvail5_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail4_LSB 0x4
+#define QIB_7322_IntStatus_RcvAvail4_MSB 0x4
+#define QIB_7322_IntStatus_RcvAvail4_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail3_LSB 0x3
+#define QIB_7322_IntStatus_RcvAvail3_MSB 0x3
+#define QIB_7322_IntStatus_RcvAvail3_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail2_LSB 0x2
+#define QIB_7322_IntStatus_RcvAvail2_MSB 0x2
+#define QIB_7322_IntStatus_RcvAvail2_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail1_LSB 0x1
+#define QIB_7322_IntStatus_RcvAvail1_MSB 0x1
+#define QIB_7322_IntStatus_RcvAvail1_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail0_LSB 0x0
+#define QIB_7322_IntStatus_RcvAvail0_MSB 0x0
+#define QIB_7322_IntStatus_RcvAvail0_RMASK 0x1
+
+#define QIB_7322_IntClear_OFFS 0x78
+#define QIB_7322_IntClear_DEF 0x0000000000000000
+#define QIB_7322_IntClear_SDmaIntClear_1_LSB 0x3F
+#define QIB_7322_IntClear_SDmaIntClear_1_MSB 0x3F
+#define QIB_7322_IntClear_SDmaIntClear_1_RMASK 0x1
+#define QIB_7322_IntClear_SDmaIntClear_0_LSB 0x3E
+#define QIB_7322_IntClear_SDmaIntClear_0_MSB 0x3E
+#define QIB_7322_IntClear_SDmaIntClear_0_RMASK 0x1
+#define QIB_7322_IntClear_SDmaProgressIntClear_1_LSB 0x3D
+#define QIB_7322_IntClear_SDmaProgressIntClear_1_MSB 0x3D
+#define QIB_7322_IntClear_SDmaProgressIntClear_1_RMASK 0x1
+#define QIB_7322_IntClear_SDmaProgressIntClear_0_LSB 0x3C
+#define QIB_7322_IntClear_SDmaProgressIntClear_0_MSB 0x3C
+#define QIB_7322_IntClear_SDmaProgressIntClear_0_RMASK 0x1
+#define QIB_7322_IntClear_SDmaIdleIntClear_1_LSB 0x3B
+#define QIB_7322_IntClear_SDmaIdleIntClear_1_MSB 0x3B
+#define QIB_7322_IntClear_SDmaIdleIntClear_1_RMASK 0x1
+#define QIB_7322_IntClear_SDmaIdleIntClear_0_LSB 0x3A
+#define QIB_7322_IntClear_SDmaIdleIntClear_0_MSB 0x3A
+#define QIB_7322_IntClear_SDmaIdleIntClear_0_RMASK 0x1
+#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_LSB 0x39
+#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_MSB 0x39
+#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_RMASK 0x1
+#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_LSB 0x38
+#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_MSB 0x38
+#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg17IntClear_LSB 0x31
+#define QIB_7322_IntClear_RcvUrg17IntClear_MSB 0x31
+#define QIB_7322_IntClear_RcvUrg17IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg16IntClear_LSB 0x30
+#define QIB_7322_IntClear_RcvUrg16IntClear_MSB 0x30
+#define QIB_7322_IntClear_RcvUrg16IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg15IntClear_LSB 0x2F
+#define QIB_7322_IntClear_RcvUrg15IntClear_MSB 0x2F
+#define QIB_7322_IntClear_RcvUrg15IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg14IntClear_LSB 0x2E
+#define QIB_7322_IntClear_RcvUrg14IntClear_MSB 0x2E
+#define QIB_7322_IntClear_RcvUrg14IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg13IntClear_LSB 0x2D
+#define QIB_7322_IntClear_RcvUrg13IntClear_MSB 0x2D
+#define QIB_7322_IntClear_RcvUrg13IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg12IntClear_LSB 0x2C
+#define QIB_7322_IntClear_RcvUrg12IntClear_MSB 0x2C
+#define QIB_7322_IntClear_RcvUrg12IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg11IntClear_LSB 0x2B
+#define QIB_7322_IntClear_RcvUrg11IntClear_MSB 0x2B
+#define QIB_7322_IntClear_RcvUrg11IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg10IntClear_LSB 0x2A
+#define QIB_7322_IntClear_RcvUrg10IntClear_MSB 0x2A
+#define QIB_7322_IntClear_RcvUrg10IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg9IntClear_LSB 0x29
+#define QIB_7322_IntClear_RcvUrg9IntClear_MSB 0x29
+#define QIB_7322_IntClear_RcvUrg9IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg8IntClear_LSB 0x28
+#define QIB_7322_IntClear_RcvUrg8IntClear_MSB 0x28
+#define QIB_7322_IntClear_RcvUrg8IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg7IntClear_LSB 0x27
+#define QIB_7322_IntClear_RcvUrg7IntClear_MSB 0x27
+#define QIB_7322_IntClear_RcvUrg7IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg6IntClear_LSB 0x26
+#define QIB_7322_IntClear_RcvUrg6IntClear_MSB 0x26
+#define QIB_7322_IntClear_RcvUrg6IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg5IntClear_LSB 0x25
+#define QIB_7322_IntClear_RcvUrg5IntClear_MSB 0x25
+#define QIB_7322_IntClear_RcvUrg5IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg4IntClear_LSB 0x24
+#define QIB_7322_IntClear_RcvUrg4IntClear_MSB 0x24
+#define QIB_7322_IntClear_RcvUrg4IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg3IntClear_LSB 0x23
+#define QIB_7322_IntClear_RcvUrg3IntClear_MSB 0x23
+#define QIB_7322_IntClear_RcvUrg3IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg2IntClear_LSB 0x22
+#define QIB_7322_IntClear_RcvUrg2IntClear_MSB 0x22
+#define QIB_7322_IntClear_RcvUrg2IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg1IntClear_LSB 0x21
+#define QIB_7322_IntClear_RcvUrg1IntClear_MSB 0x21
+#define QIB_7322_IntClear_RcvUrg1IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg0IntClear_LSB 0x20
+#define QIB_7322_IntClear_RcvUrg0IntClear_MSB 0x20
+#define QIB_7322_IntClear_RcvUrg0IntClear_RMASK 0x1
+#define QIB_7322_IntClear_ErrIntClear_1_LSB 0x1F
+#define QIB_7322_IntClear_ErrIntClear_1_MSB 0x1F
+#define QIB_7322_IntClear_ErrIntClear_1_RMASK 0x1
+#define QIB_7322_IntClear_ErrIntClear_0_LSB 0x1E
+#define QIB_7322_IntClear_ErrIntClear_0_MSB 0x1E
+#define QIB_7322_IntClear_ErrIntClear_0_RMASK 0x1
+#define QIB_7322_IntClear_ErrIntClear_LSB 0x1D
+#define QIB_7322_IntClear_ErrIntClear_MSB 0x1D
+#define QIB_7322_IntClear_ErrIntClear_RMASK 0x1
+#define QIB_7322_IntClear_AssertGPIOIntClear_LSB 0x1C
+#define QIB_7322_IntClear_AssertGPIOIntClear_MSB 0x1C
+#define QIB_7322_IntClear_AssertGPIOIntClear_RMASK 0x1
+#define QIB_7322_IntClear_SendDoneIntClear_1_LSB 0x19
+#define QIB_7322_IntClear_SendDoneIntClear_1_MSB 0x19
+#define QIB_7322_IntClear_SendDoneIntClear_1_RMASK 0x1
+#define QIB_7322_IntClear_SendDoneIntClear_0_LSB 0x18
+#define QIB_7322_IntClear_SendDoneIntClear_0_MSB 0x18
+#define QIB_7322_IntClear_SendDoneIntClear_0_RMASK 0x1
+#define QIB_7322_IntClear_SendBufAvailIntClear_LSB 0x17
+#define QIB_7322_IntClear_SendBufAvailIntClear_MSB 0x17
+#define QIB_7322_IntClear_SendBufAvailIntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail17IntClear_LSB 0x11
+#define QIB_7322_IntClear_RcvAvail17IntClear_MSB 0x11
+#define QIB_7322_IntClear_RcvAvail17IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail16IntClear_LSB 0x10
+#define QIB_7322_IntClear_RcvAvail16IntClear_MSB 0x10
+#define QIB_7322_IntClear_RcvAvail16IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail15IntClear_LSB 0xF
+#define QIB_7322_IntClear_RcvAvail15IntClear_MSB 0xF
+#define QIB_7322_IntClear_RcvAvail15IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail14IntClear_LSB 0xE
+#define QIB_7322_IntClear_RcvAvail14IntClear_MSB 0xE
+#define QIB_7322_IntClear_RcvAvail14IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail13IntClear_LSB 0xD
+#define QIB_7322_IntClear_RcvAvail13IntClear_MSB 0xD
+#define QIB_7322_IntClear_RcvAvail13IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail12IntClear_LSB 0xC
+#define QIB_7322_IntClear_RcvAvail12IntClear_MSB 0xC
+#define QIB_7322_IntClear_RcvAvail12IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail11IntClear_LSB 0xB
+#define QIB_7322_IntClear_RcvAvail11IntClear_MSB 0xB
+#define QIB_7322_IntClear_RcvAvail11IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail10IntClear_LSB 0xA
+#define QIB_7322_IntClear_RcvAvail10IntClear_MSB 0xA
+#define QIB_7322_IntClear_RcvAvail10IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail9IntClear_LSB 0x9
+#define QIB_7322_IntClear_RcvAvail9IntClear_MSB 0x9
+#define QIB_7322_IntClear_RcvAvail9IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail8IntClear_LSB 0x8
+#define QIB_7322_IntClear_RcvAvail8IntClear_MSB 0x8
+#define QIB_7322_IntClear_RcvAvail8IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail7IntClear_LSB 0x7
+#define QIB_7322_IntClear_RcvAvail7IntClear_MSB 0x7
+#define QIB_7322_IntClear_RcvAvail7IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail6IntClear_LSB 0x6
+#define QIB_7322_IntClear_RcvAvail6IntClear_MSB 0x6
+#define QIB_7322_IntClear_RcvAvail6IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail5IntClear_LSB 0x5
+#define QIB_7322_IntClear_RcvAvail5IntClear_MSB 0x5
+#define QIB_7322_IntClear_RcvAvail5IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail4IntClear_LSB 0x4
+#define QIB_7322_IntClear_RcvAvail4IntClear_MSB 0x4
+#define QIB_7322_IntClear_RcvAvail4IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail3IntClear_LSB 0x3
+#define QIB_7322_IntClear_RcvAvail3IntClear_MSB 0x3
+#define QIB_7322_IntClear_RcvAvail3IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail2IntClear_LSB 0x2
+#define QIB_7322_IntClear_RcvAvail2IntClear_MSB 0x2
+#define QIB_7322_IntClear_RcvAvail2IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail1IntClear_LSB 0x1
+#define QIB_7322_IntClear_RcvAvail1IntClear_MSB 0x1
+#define QIB_7322_IntClear_RcvAvail1IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail0IntClear_LSB 0x0
+#define QIB_7322_IntClear_RcvAvail0IntClear_MSB 0x0
+#define QIB_7322_IntClear_RcvAvail0IntClear_RMASK 0x1
+
+#define QIB_7322_ErrMask_OFFS 0x80
+#define QIB_7322_ErrMask_DEF 0x0000000000000000
+#define QIB_7322_ErrMask_ResetNegatedMask_LSB 0x3F
+#define QIB_7322_ErrMask_ResetNegatedMask_MSB 0x3F
+#define QIB_7322_ErrMask_ResetNegatedMask_RMASK 0x1
+#define QIB_7322_ErrMask_HardwareErrMask_LSB 0x3E
+#define QIB_7322_ErrMask_HardwareErrMask_MSB 0x3E
+#define QIB_7322_ErrMask_HardwareErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_InvalidAddrErrMask_LSB 0x3D
+#define QIB_7322_ErrMask_InvalidAddrErrMask_MSB 0x3D
+#define QIB_7322_ErrMask_InvalidAddrErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_SDmaVL15ErrMask_LSB 0x38
+#define QIB_7322_ErrMask_SDmaVL15ErrMask_MSB 0x38
+#define QIB_7322_ErrMask_SDmaVL15ErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_LSB 0x37
+#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_MSB 0x37
+#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_InvalidEEPCmdMask_LSB 0x35
+#define QIB_7322_ErrMask_InvalidEEPCmdMask_MSB 0x35
+#define QIB_7322_ErrMask_InvalidEEPCmdMask_RMASK 0x1
+#define QIB_7322_ErrMask_RcvContextShareErrMask_LSB 0x34
+#define QIB_7322_ErrMask_RcvContextShareErrMask_MSB 0x34
+#define QIB_7322_ErrMask_RcvContextShareErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_SendVLMismatchErrMask_LSB 0x24
+#define QIB_7322_ErrMask_SendVLMismatchErrMask_MSB 0x24
+#define QIB_7322_ErrMask_SendVLMismatchErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_SendArmLaunchErrMask_LSB 0x23
+#define QIB_7322_ErrMask_SendArmLaunchErrMask_MSB 0x23
+#define QIB_7322_ErrMask_SendArmLaunchErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_LSB 0x1B
+#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_MSB 0x1B
+#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_SDmaWrongPortErrMask_LSB 0x1A
+#define QIB_7322_ErrMask_SDmaWrongPortErrMask_MSB 0x1A
+#define QIB_7322_ErrMask_SDmaWrongPortErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_LSB 0x19
+#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_MSB 0x19
+#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_RcvHdrFullErrMask_LSB 0xD
+#define QIB_7322_ErrMask_RcvHdrFullErrMask_MSB 0xD
+#define QIB_7322_ErrMask_RcvHdrFullErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_RcvEgrFullErrMask_LSB 0xC
+#define QIB_7322_ErrMask_RcvEgrFullErrMask_MSB 0xC
+#define QIB_7322_ErrMask_RcvEgrFullErrMask_RMASK 0x1
+
+#define QIB_7322_ErrStatus_OFFS 0x88
+#define QIB_7322_ErrStatus_DEF 0x0000000000000000
+#define QIB_7322_ErrStatus_ResetNegated_LSB 0x3F
+#define QIB_7322_ErrStatus_ResetNegated_MSB 0x3F
+#define QIB_7322_ErrStatus_ResetNegated_RMASK 0x1
+#define QIB_7322_ErrStatus_HardwareErr_LSB 0x3E
+#define QIB_7322_ErrStatus_HardwareErr_MSB 0x3E
+#define QIB_7322_ErrStatus_HardwareErr_RMASK 0x1
+#define QIB_7322_ErrStatus_InvalidAddrErr_LSB 0x3D
+#define QIB_7322_ErrStatus_InvalidAddrErr_MSB 0x3D
+#define QIB_7322_ErrStatus_InvalidAddrErr_RMASK 0x1
+#define QIB_7322_ErrStatus_SDmaVL15Err_LSB 0x38
+#define QIB_7322_ErrStatus_SDmaVL15Err_MSB 0x38
+#define QIB_7322_ErrStatus_SDmaVL15Err_RMASK 0x1
+#define QIB_7322_ErrStatus_SBufVL15MisUseErr_LSB 0x37
+#define QIB_7322_ErrStatus_SBufVL15MisUseErr_MSB 0x37
+#define QIB_7322_ErrStatus_SBufVL15MisUseErr_RMASK 0x1
+#define QIB_7322_ErrStatus_InvalidEEPCmdErr_LSB 0x35
+#define QIB_7322_ErrStatus_InvalidEEPCmdErr_MSB 0x35
+#define QIB_7322_ErrStatus_InvalidEEPCmdErr_RMASK 0x1
+#define QIB_7322_ErrStatus_RcvContextShareErr_LSB 0x34
+#define QIB_7322_ErrStatus_RcvContextShareErr_MSB 0x34
+#define QIB_7322_ErrStatus_RcvContextShareErr_RMASK 0x1
+#define QIB_7322_ErrStatus_SendVLMismatchErr_LSB 0x24
+#define QIB_7322_ErrStatus_SendVLMismatchErr_MSB 0x24
+#define QIB_7322_ErrStatus_SendVLMismatchErr_RMASK 0x1
+#define QIB_7322_ErrStatus_SendArmLaunchErr_LSB 0x23
+#define QIB_7322_ErrStatus_SendArmLaunchErr_MSB 0x23
+#define QIB_7322_ErrStatus_SendArmLaunchErr_RMASK 0x1
+#define QIB_7322_ErrStatus_SendSpecialTriggerErr_LSB 0x1B
+#define QIB_7322_ErrStatus_SendSpecialTriggerErr_MSB 0x1B
+#define QIB_7322_ErrStatus_SendSpecialTriggerErr_RMASK 0x1
+#define QIB_7322_ErrStatus_SDmaWrongPortErr_LSB 0x1A
+#define QIB_7322_ErrStatus_SDmaWrongPortErr_MSB 0x1A
+#define QIB_7322_ErrStatus_SDmaWrongPortErr_RMASK 0x1
+#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_LSB 0x19
+#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_MSB 0x19
+#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_RMASK 0x1
+#define QIB_7322_ErrStatus_RcvHdrFullErr_LSB 0xD
+#define QIB_7322_ErrStatus_RcvHdrFullErr_MSB 0xD
+#define QIB_7322_ErrStatus_RcvHdrFullErr_RMASK 0x1
+#define QIB_7322_ErrStatus_RcvEgrFullErr_LSB 0xC
+#define QIB_7322_ErrStatus_RcvEgrFullErr_MSB 0xC
+#define QIB_7322_ErrStatus_RcvEgrFullErr_RMASK 0x1
+
+#define QIB_7322_ErrClear_OFFS 0x90
+#define QIB_7322_ErrClear_DEF 0x0000000000000000
+#define QIB_7322_ErrClear_ResetNegatedClear_LSB 0x3F
+#define QIB_7322_ErrClear_ResetNegatedClear_MSB 0x3F
+#define QIB_7322_ErrClear_ResetNegatedClear_RMASK 0x1
+#define QIB_7322_ErrClear_HardwareErrClear_LSB 0x3E
+#define QIB_7322_ErrClear_HardwareErrClear_MSB 0x3E
+#define QIB_7322_ErrClear_HardwareErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_InvalidAddrErrClear_LSB 0x3D
+#define QIB_7322_ErrClear_InvalidAddrErrClear_MSB 0x3D
+#define QIB_7322_ErrClear_InvalidAddrErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_SDmaVL15ErrClear_LSB 0x38
+#define QIB_7322_ErrClear_SDmaVL15ErrClear_MSB 0x38
+#define QIB_7322_ErrClear_SDmaVL15ErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_LSB 0x37
+#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_MSB 0x37
+#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_LSB 0x35
+#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_MSB 0x35
+#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_RcvContextShareErrClear_LSB 0x34
+#define QIB_7322_ErrClear_RcvContextShareErrClear_MSB 0x34
+#define QIB_7322_ErrClear_RcvContextShareErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_SendVLMismatchErrMask_LSB 0x24
+#define QIB_7322_ErrClear_SendVLMismatchErrMask_MSB 0x24
+#define QIB_7322_ErrClear_SendVLMismatchErrMask_RMASK 0x1
+#define QIB_7322_ErrClear_SendArmLaunchErrClear_LSB 0x23
+#define QIB_7322_ErrClear_SendArmLaunchErrClear_MSB 0x23
+#define QIB_7322_ErrClear_SendArmLaunchErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_LSB 0x1B
+#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_MSB 0x1B
+#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_SDmaWrongPortErrClear_LSB 0x1A
+#define QIB_7322_ErrClear_SDmaWrongPortErrClear_MSB 0x1A
+#define QIB_7322_ErrClear_SDmaWrongPortErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_LSB 0x19
+#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_MSB 0x19
+#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_RcvHdrFullErrClear_LSB 0xD
+#define QIB_7322_ErrClear_RcvHdrFullErrClear_MSB 0xD
+#define QIB_7322_ErrClear_RcvHdrFullErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_RcvEgrFullErrClear_LSB 0xC
+#define QIB_7322_ErrClear_RcvEgrFullErrClear_MSB 0xC
+#define QIB_7322_ErrClear_RcvEgrFullErrClear_RMASK 0x1
+
+#define QIB_7322_HwErrMask_OFFS 0x98
+#define QIB_7322_HwErrMask_DEF 0x0000000000000000
+#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_LSB 0x3F
+#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_MSB 0x3F
+#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_RMASK 0x1
+#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_LSB 0x3E
+#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_MSB 0x3E
+#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_RMASK 0x1
+#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_LSB 0x37
+#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_MSB 0x37
+#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_RMASK 0x1
+#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_LSB 0x36
+#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_MSB 0x36
+#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1
+#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_LSB 0x35
+#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_MSB 0x35
+#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_RMASK 0x1
+#define QIB_7322_HwErrMask_MemoryErrMask_LSB 0x30
+#define QIB_7322_HwErrMask_MemoryErrMask_MSB 0x30
+#define QIB_7322_HwErrMask_MemoryErrMask_RMASK 0x1
+#define QIB_7322_HwErrMask_pcie_phy_txParityErr_LSB 0x22
+#define QIB_7322_HwErrMask_pcie_phy_txParityErr_MSB 0x22
+#define QIB_7322_HwErrMask_pcie_phy_txParityErr_RMASK 0x1
+#define QIB_7322_HwErrMask_PCIeBusParityErrMask_LSB 0x1F
+#define QIB_7322_HwErrMask_PCIeBusParityErrMask_MSB 0x21
+#define QIB_7322_HwErrMask_PCIeBusParityErrMask_RMASK 0x7
+#define QIB_7322_HwErrMask_PcieCplTimeoutMask_LSB 0x1E
+#define QIB_7322_HwErrMask_PcieCplTimeoutMask_MSB 0x1E
+#define QIB_7322_HwErrMask_PcieCplTimeoutMask_RMASK 0x1
+#define QIB_7322_HwErrMask_PciePoisonedTLPMask_LSB 0x1D
+#define QIB_7322_HwErrMask_PciePoisonedTLPMask_MSB 0x1D
+#define QIB_7322_HwErrMask_PciePoisonedTLPMask_RMASK 0x1
+#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_LSB 0x1C
+#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_MSB 0x1C
+#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_RMASK 0x1
+#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_LSB 0x1B
+#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_MSB 0x1B
+#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_RMASK 0x1
+#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_LSB 0xF
+#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_MSB 0xF
+#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_RMASK 0x1
+#define QIB_7322_HwErrMask_statusValidNoEopMask_1_LSB 0xE
+#define QIB_7322_HwErrMask_statusValidNoEopMask_1_MSB 0xE
+#define QIB_7322_HwErrMask_statusValidNoEopMask_1_RMASK 0x1
+#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_LSB 0xD
+#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_MSB 0xD
+#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_RMASK 0x1
+#define QIB_7322_HwErrMask_statusValidNoEopMask_0_LSB 0xC
+#define QIB_7322_HwErrMask_statusValidNoEopMask_0_MSB 0xC
+#define QIB_7322_HwErrMask_statusValidNoEopMask_0_RMASK 0x1
+#define QIB_7322_HwErrMask_LATriggeredMask_LSB 0xB
+#define QIB_7322_HwErrMask_LATriggeredMask_MSB 0xB
+#define QIB_7322_HwErrMask_LATriggeredMask_RMASK 0x1
+
+#define QIB_7322_HwErrStatus_OFFS 0xA0
+#define QIB_7322_HwErrStatus_DEF 0x0000000000000000
+#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_LSB 0x3F
+#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_MSB 0x3F
+#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_RMASK 0x1
+#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_LSB 0x3E
+#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_MSB 0x3E
+#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_RMASK 0x1
+#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_LSB 0x37
+#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_MSB 0x37
+#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_RMASK 0x1
+#define QIB_7322_HwErrStatus_PowerOnBISTFailed_LSB 0x36
+#define QIB_7322_HwErrStatus_PowerOnBISTFailed_MSB 0x36
+#define QIB_7322_HwErrStatus_PowerOnBISTFailed_RMASK 0x1
+#define QIB_7322_HwErrStatus_TempsenseTholdReached_LSB 0x35
+#define QIB_7322_HwErrStatus_TempsenseTholdReached_MSB 0x35
+#define QIB_7322_HwErrStatus_TempsenseTholdReached_RMASK 0x1
+#define QIB_7322_HwErrStatus_MemoryErr_LSB 0x30
+#define QIB_7322_HwErrStatus_MemoryErr_MSB 0x30
+#define QIB_7322_HwErrStatus_MemoryErr_RMASK 0x1
+#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_LSB 0x22
+#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_MSB 0x22
+#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_RMASK 0x1
+#define QIB_7322_HwErrStatus_PCIeBusParity_LSB 0x1F
+#define QIB_7322_HwErrStatus_PCIeBusParity_MSB 0x21
+#define QIB_7322_HwErrStatus_PCIeBusParity_RMASK 0x7
+#define QIB_7322_HwErrStatus_PcieCplTimeout_LSB 0x1E
+#define QIB_7322_HwErrStatus_PcieCplTimeout_MSB 0x1E
+#define QIB_7322_HwErrStatus_PcieCplTimeout_RMASK 0x1
+#define QIB_7322_HwErrStatus_PciePoisonedTLP_LSB 0x1D
+#define QIB_7322_HwErrStatus_PciePoisonedTLP_MSB 0x1D
+#define QIB_7322_HwErrStatus_PciePoisonedTLP_RMASK 0x1
+#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_LSB 0x1C
+#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_MSB 0x1C
+#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_RMASK 0x1
+#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_LSB 0x1B
+#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_MSB 0x1B
+#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_RMASK 0x1
+#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_LSB 0xF
+#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_MSB 0xF
+#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_RMASK 0x1
+#define QIB_7322_HwErrStatus_statusValidNoEop_1_LSB 0xE
+#define QIB_7322_HwErrStatus_statusValidNoEop_1_MSB 0xE
+#define QIB_7322_HwErrStatus_statusValidNoEop_1_RMASK 0x1
+#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_LSB 0xD
+#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_MSB 0xD
+#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_RMASK 0x1
+#define QIB_7322_HwErrStatus_statusValidNoEop_0_LSB 0xC
+#define QIB_7322_HwErrStatus_statusValidNoEop_0_MSB 0xC
+#define QIB_7322_HwErrStatus_statusValidNoEop_0_RMASK 0x1
+#define QIB_7322_HwErrStatus_LATriggered_LSB 0xB
+#define QIB_7322_HwErrStatus_LATriggered_MSB 0xB
+#define QIB_7322_HwErrStatus_LATriggered_RMASK 0x1
+
+#define QIB_7322_HwErrClear_OFFS 0xA8
+#define QIB_7322_HwErrClear_DEF 0x0000000000000000
+#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_LSB 0x3F
+#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_MSB 0x3F
+#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_RMASK 0x1
+#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_LSB 0x3E
+#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_MSB 0x3E
+#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_RMASK 0x1
+#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_LSB 0x37
+#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_MSB 0x37
+#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_RMASK 0x1
+#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_LSB 0x36
+#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_MSB 0x36
+#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1
+#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_LSB 0x35
+#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_MSB 0x35
+#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_RMASK 0x1
+#define QIB_7322_HwErrClear_MemoryErrClear_LSB 0x30
+#define QIB_7322_HwErrClear_MemoryErrClear_MSB 0x30
+#define QIB_7322_HwErrClear_MemoryErrClear_RMASK 0x1
+#define QIB_7322_HwErrClear_pcie_phy_txParityErr_LSB 0x22
+#define QIB_7322_HwErrClear_pcie_phy_txParityErr_MSB 0x22
+#define QIB_7322_HwErrClear_pcie_phy_txParityErr_RMASK 0x1
+#define QIB_7322_HwErrClear_PCIeBusParityClear_LSB 0x1F
+#define QIB_7322_HwErrClear_PCIeBusParityClear_MSB 0x21
+#define QIB_7322_HwErrClear_PCIeBusParityClear_RMASK 0x7
+#define QIB_7322_HwErrClear_PcieCplTimeoutClear_LSB 0x1E
+#define QIB_7322_HwErrClear_PcieCplTimeoutClear_MSB 0x1E
+#define QIB_7322_HwErrClear_PcieCplTimeoutClear_RMASK 0x1
+#define QIB_7322_HwErrClear_PciePoisonedTLPClear_LSB 0x1D
+#define QIB_7322_HwErrClear_PciePoisonedTLPClear_MSB 0x1D
+#define QIB_7322_HwErrClear_PciePoisonedTLPClear_RMASK 0x1
+#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_LSB 0x1C
+#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_MSB 0x1C
+#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_RMASK 0x1
+#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_LSB 0x1B
+#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_MSB 0x1B
+#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_RMASK 0x1
+#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_LSB 0xF
+#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_MSB 0xF
+#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_RMASK 0x1
+#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_LSB 0xE
+#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_MSB 0xE
+#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_RMASK 0x1
+#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_LSB 0xD
+#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_MSB 0xD
+#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_RMASK 0x1
+#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_LSB 0xC
+#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_MSB 0xC
+#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_RMASK 0x1
+#define QIB_7322_HwErrClear_LATriggeredClear_LSB 0xB
+#define QIB_7322_HwErrClear_LATriggeredClear_MSB 0xB
+#define QIB_7322_HwErrClear_LATriggeredClear_RMASK 0x1
+
+#define QIB_7322_HwDiagCtrl_OFFS 0xB0
+#define QIB_7322_HwDiagCtrl_DEF 0x0000000000000000
+#define QIB_7322_HwDiagCtrl_Diagnostic_LSB 0x3F
+#define QIB_7322_HwDiagCtrl_Diagnostic_MSB 0x3F
+#define QIB_7322_HwDiagCtrl_Diagnostic_RMASK 0x1
+#define QIB_7322_HwDiagCtrl_CounterWrEnable_LSB 0x3D
+#define QIB_7322_HwDiagCtrl_CounterWrEnable_MSB 0x3D
+#define QIB_7322_HwDiagCtrl_CounterWrEnable_RMASK 0x1
+#define QIB_7322_HwDiagCtrl_CounterDisable_LSB 0x3C
+#define QIB_7322_HwDiagCtrl_CounterDisable_MSB 0x3C
+#define QIB_7322_HwDiagCtrl_CounterDisable_RMASK 0x1
+#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F
+#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_MSB 0x22
+#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF
+#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_LSB 0xF
+#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_MSB 0xF
+#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_RMASK 0x1
+#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_LSB 0xE
+#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_MSB 0xE
+#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_RMASK 0x1
+#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_LSB 0xD
+#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_MSB 0xD
+#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_RMASK 0x1
+#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_LSB 0xC
+#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_MSB 0xC
+#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_RMASK 0x1
+
+#define QIB_7322_EXTStatus_OFFS 0xC0
+#define QIB_7322_EXTStatus_DEF 0x000000000000X000
+#define QIB_7322_EXTStatus_GPIOIn_LSB 0x30
+#define QIB_7322_EXTStatus_GPIOIn_MSB 0x3F
+#define QIB_7322_EXTStatus_GPIOIn_RMASK 0xFFFF
+#define QIB_7322_EXTStatus_MemBISTDisabled_LSB 0xF
+#define QIB_7322_EXTStatus_MemBISTDisabled_MSB 0xF
+#define QIB_7322_EXTStatus_MemBISTDisabled_RMASK 0x1
+#define QIB_7322_EXTStatus_MemBISTEndTest_LSB 0xE
+#define QIB_7322_EXTStatus_MemBISTEndTest_MSB 0xE
+#define QIB_7322_EXTStatus_MemBISTEndTest_RMASK 0x1
+
+#define QIB_7322_EXTCtrl_OFFS 0xC8
+#define QIB_7322_EXTCtrl_DEF 0x0000000000000000
+#define QIB_7322_EXTCtrl_GPIOOe_LSB 0x30
+#define QIB_7322_EXTCtrl_GPIOOe_MSB 0x3F
+#define QIB_7322_EXTCtrl_GPIOOe_RMASK 0xFFFF
+#define QIB_7322_EXTCtrl_GPIOInvert_LSB 0x20
+#define QIB_7322_EXTCtrl_GPIOInvert_MSB 0x2F
+#define QIB_7322_EXTCtrl_GPIOInvert_RMASK 0xFFFF
+#define QIB_7322_EXTCtrl_LEDPort1GreenOn_LSB 0x3
+#define QIB_7322_EXTCtrl_LEDPort1GreenOn_MSB 0x3
+#define QIB_7322_EXTCtrl_LEDPort1GreenOn_RMASK 0x1
+#define QIB_7322_EXTCtrl_LEDPort1YellowOn_LSB 0x2
+#define QIB_7322_EXTCtrl_LEDPort1YellowOn_MSB 0x2
+#define QIB_7322_EXTCtrl_LEDPort1YellowOn_RMASK 0x1
+#define QIB_7322_EXTCtrl_LEDPort0GreenOn_LSB 0x1
+#define QIB_7322_EXTCtrl_LEDPort0GreenOn_MSB 0x1
+#define QIB_7322_EXTCtrl_LEDPort0GreenOn_RMASK 0x1
+#define QIB_7322_EXTCtrl_LEDPort0YellowOn_LSB 0x0
+#define QIB_7322_EXTCtrl_LEDPort0YellowOn_MSB 0x0
+#define QIB_7322_EXTCtrl_LEDPort0YellowOn_RMASK 0x1
+
+#define QIB_7322_GPIOOut_OFFS 0xE0
+#define QIB_7322_GPIOOut_DEF 0x0000000000000000
+
+#define QIB_7322_GPIOMask_OFFS 0xE8
+#define QIB_7322_GPIOMask_DEF 0x0000000000000000
+
+#define QIB_7322_GPIOStatus_OFFS 0xF0
+#define QIB_7322_GPIOStatus_DEF 0x0000000000000000
+
+#define QIB_7322_GPIOClear_OFFS 0xF8
+#define QIB_7322_GPIOClear_DEF 0x0000000000000000
+
+#define QIB_7322_RcvCtrl_OFFS 0x100
+#define QIB_7322_RcvCtrl_DEF 0x0000000000000000
+#define QIB_7322_RcvCtrl_TidReDirect_LSB 0x30
+#define QIB_7322_RcvCtrl_TidReDirect_MSB 0x3F
+#define QIB_7322_RcvCtrl_TidReDirect_RMASK 0xFFFF
+#define QIB_7322_RcvCtrl_TailUpd_LSB 0x2F
+#define QIB_7322_RcvCtrl_TailUpd_MSB 0x2F
+#define QIB_7322_RcvCtrl_TailUpd_RMASK 0x1
+#define QIB_7322_RcvCtrl_XrcTypeCode_LSB 0x2C
+#define QIB_7322_RcvCtrl_XrcTypeCode_MSB 0x2E
+#define QIB_7322_RcvCtrl_XrcTypeCode_RMASK 0x7
+#define QIB_7322_RcvCtrl_TidFlowEnable_LSB 0x2B
+#define QIB_7322_RcvCtrl_TidFlowEnable_MSB 0x2B
+#define QIB_7322_RcvCtrl_TidFlowEnable_RMASK 0x1
+#define QIB_7322_RcvCtrl_ContextCfg_LSB 0x29
+#define QIB_7322_RcvCtrl_ContextCfg_MSB 0x2A
+#define QIB_7322_RcvCtrl_ContextCfg_RMASK 0x3
+#define QIB_7322_RcvCtrl_IntrAvail_LSB 0x14
+#define QIB_7322_RcvCtrl_IntrAvail_MSB 0x25
+#define QIB_7322_RcvCtrl_IntrAvail_RMASK 0x3FFFF
+#define QIB_7322_RcvCtrl_dontDropRHQFull_LSB 0x0
+#define QIB_7322_RcvCtrl_dontDropRHQFull_MSB 0x11
+#define QIB_7322_RcvCtrl_dontDropRHQFull_RMASK 0x3FFFF
+
+#define QIB_7322_RcvHdrSize_OFFS 0x110
+#define QIB_7322_RcvHdrSize_DEF 0x0000000000000000
+
+#define QIB_7322_RcvHdrCnt_OFFS 0x118
+#define QIB_7322_RcvHdrCnt_DEF 0x0000000000000000
+
+#define QIB_7322_RcvHdrEntSize_OFFS 0x120
+#define QIB_7322_RcvHdrEntSize_DEF 0x0000000000000000
+
+#define QIB_7322_RcvTIDBase_OFFS 0x128
+#define QIB_7322_RcvTIDBase_DEF 0x0000000000050000
+
+#define QIB_7322_RcvTIDCnt_OFFS 0x130
+#define QIB_7322_RcvTIDCnt_DEF 0x0000000000000200
+
+#define QIB_7322_RcvEgrBase_OFFS 0x138
+#define QIB_7322_RcvEgrBase_DEF 0x0000000000014000
+
+#define QIB_7322_RcvEgrCnt_OFFS 0x140
+#define QIB_7322_RcvEgrCnt_DEF 0x0000000000001000
+
+#define QIB_7322_RcvBufBase_OFFS 0x148
+#define QIB_7322_RcvBufBase_DEF 0x0000000000080000
+
+#define QIB_7322_RcvBufSize_OFFS 0x150
+#define QIB_7322_RcvBufSize_DEF 0x0000000000005000
+
+#define QIB_7322_RxIntMemBase_OFFS 0x158
+#define QIB_7322_RxIntMemBase_DEF 0x0000000000077000
+
+#define QIB_7322_RxIntMemSize_OFFS 0x160
+#define QIB_7322_RxIntMemSize_DEF 0x0000000000007000
+
+#define QIB_7322_feature_mask_OFFS 0x190
+#define QIB_7322_feature_mask_DEF 0x00000000000000XX
+
+#define QIB_7322_active_feature_mask_OFFS 0x198
+#define QIB_7322_active_feature_mask_DEF 0x00000000000000XX
+#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_LSB 0x5
+#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_MSB 0x5
+#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_RMASK 0x1
+#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_LSB 0x4
+#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_MSB 0x4
+#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_RMASK 0x1
+#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_LSB 0x3
+#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_MSB 0x3
+#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_RMASK 0x1
+#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_LSB 0x2
+#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_MSB 0x2
+#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_RMASK 0x1
+#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_LSB 0x1
+#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_MSB 0x1
+#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_RMASK 0x1
+#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_LSB 0x0
+#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_MSB 0x0
+#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_RMASK 0x1
+
+#define QIB_7322_SendCtrl_OFFS 0x1C0
+#define QIB_7322_SendCtrl_DEF 0x0000000000000000
+#define QIB_7322_SendCtrl_Disarm_LSB 0x1F
+#define QIB_7322_SendCtrl_Disarm_MSB 0x1F
+#define QIB_7322_SendCtrl_Disarm_RMASK 0x1
+#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_LSB 0x1D
+#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_MSB 0x1D
+#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_RMASK 0x1
+#define QIB_7322_SendCtrl_AvailUpdThld_LSB 0x18
+#define QIB_7322_SendCtrl_AvailUpdThld_MSB 0x1C
+#define QIB_7322_SendCtrl_AvailUpdThld_RMASK 0x1F
+#define QIB_7322_SendCtrl_DisarmSendBuf_LSB 0x10
+#define QIB_7322_SendCtrl_DisarmSendBuf_MSB 0x17
+#define QIB_7322_SendCtrl_DisarmSendBuf_RMASK 0xFF
+#define QIB_7322_SendCtrl_SpecialTriggerEn_LSB 0x4
+#define QIB_7322_SendCtrl_SpecialTriggerEn_MSB 0x4
+#define QIB_7322_SendCtrl_SpecialTriggerEn_RMASK 0x1
+#define QIB_7322_SendCtrl_SendBufAvailUpd_LSB 0x2
+#define QIB_7322_SendCtrl_SendBufAvailUpd_MSB 0x2
+#define QIB_7322_SendCtrl_SendBufAvailUpd_RMASK 0x1
+#define QIB_7322_SendCtrl_SendIntBufAvail_LSB 0x1
+#define QIB_7322_SendCtrl_SendIntBufAvail_MSB 0x1
+#define QIB_7322_SendCtrl_SendIntBufAvail_RMASK 0x1
+
+#define QIB_7322_SendBufBase_OFFS 0x1C8
+#define QIB_7322_SendBufBase_DEF 0x0018000000100000
+#define QIB_7322_SendBufBase_BaseAddr_LargePIO_LSB 0x20
+#define QIB_7322_SendBufBase_BaseAddr_LargePIO_MSB 0x34
+#define QIB_7322_SendBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF
+#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_LSB 0x0
+#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_MSB 0x14
+#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF
+
+#define QIB_7322_SendBufSize_OFFS 0x1D0
+#define QIB_7322_SendBufSize_DEF 0x0000108000000880
+#define QIB_7322_SendBufSize_Size_LargePIO_LSB 0x20
+#define QIB_7322_SendBufSize_Size_LargePIO_MSB 0x2C
+#define QIB_7322_SendBufSize_Size_LargePIO_RMASK 0x1FFF
+#define QIB_7322_SendBufSize_Size_SmallPIO_LSB 0x0
+#define QIB_7322_SendBufSize_Size_SmallPIO_MSB 0xB
+#define QIB_7322_SendBufSize_Size_SmallPIO_RMASK 0xFFF
+
+#define QIB_7322_SendBufCnt_OFFS 0x1D8
+#define QIB_7322_SendBufCnt_DEF 0x0000002000000080
+#define QIB_7322_SendBufCnt_Num_LargeBuffers_LSB 0x20
+#define QIB_7322_SendBufCnt_Num_LargeBuffers_MSB 0x25
+#define QIB_7322_SendBufCnt_Num_LargeBuffers_RMASK 0x3F
+#define QIB_7322_SendBufCnt_Num_SmallBuffers_LSB 0x0
+#define QIB_7322_SendBufCnt_Num_SmallBuffers_MSB 0x8
+#define QIB_7322_SendBufCnt_Num_SmallBuffers_RMASK 0x1FF
+
+#define QIB_7322_SendBufAvailAddr_OFFS 0x1E0
+#define QIB_7322_SendBufAvailAddr_DEF 0x0000000000000000
+#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_LSB 0x6
+#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_MSB 0x27
+#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_RMASK 0x3FFFFFFFF
+
+#define QIB_7322_SendBufErr0_OFFS 0x240
+#define QIB_7322_SendBufErr0_DEF 0x0000000000000000
+#define QIB_7322_SendBufErr0_SendBufErr_63_0_LSB 0x0
+#define QIB_7322_SendBufErr0_SendBufErr_63_0_MSB 0x3F
+#define QIB_7322_SendBufErr0_SendBufErr_63_0_RMASK 0x0
+
+#define QIB_7322_AvailUpdCount_OFFS 0x268
+#define QIB_7322_AvailUpdCount_DEF 0x0000000000000000
+#define QIB_7322_AvailUpdCount_AvailUpdCount_LSB 0x0
+#define QIB_7322_AvailUpdCount_AvailUpdCount_MSB 0x4
+#define QIB_7322_AvailUpdCount_AvailUpdCount_RMASK 0x1F
+
+#define QIB_7322_RcvHdrAddr0_OFFS 0x280
+#define QIB_7322_RcvHdrAddr0_DEF 0x0000000000000000
+#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_LSB 0x2
+#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_MSB 0x27
+#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_RMASK 0x3FFFFFFFFF
+
+#define QIB_7322_RcvHdrTailAddr0_OFFS 0x340
+#define QIB_7322_RcvHdrTailAddr0_DEF 0x0000000000000000
+#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_LSB 0x2
+#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_MSB 0x27
+#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_RMASK 0x3FFFFFFFFF
+
+#define QIB_7322_ahb_access_ctrl_OFFS 0x460
+#define QIB_7322_ahb_access_ctrl_DEF 0x0000000000000000
+#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_LSB 0x1
+#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_MSB 0x2
+#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_RMASK 0x3
+#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_LSB 0x0
+#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_MSB 0x0
+#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_RMASK 0x1
+
+#define QIB_7322_ahb_transaction_reg_OFFS 0x468
+#define QIB_7322_ahb_transaction_reg_DEF 0x0000000080000000
+#define QIB_7322_ahb_transaction_reg_ahb_data_LSB 0x20
+#define QIB_7322_ahb_transaction_reg_ahb_data_MSB 0x3F
+#define QIB_7322_ahb_transaction_reg_ahb_data_RMASK 0xFFFFFFFF
+#define QIB_7322_ahb_transaction_reg_ahb_rdy_LSB 0x1F
+#define QIB_7322_ahb_transaction_reg_ahb_rdy_MSB 0x1F
+#define QIB_7322_ahb_transaction_reg_ahb_rdy_RMASK 0x1
+#define QIB_7322_ahb_transaction_reg_ahb_req_err_LSB 0x1E
+#define QIB_7322_ahb_transaction_reg_ahb_req_err_MSB 0x1E
+#define QIB_7322_ahb_transaction_reg_ahb_req_err_RMASK 0x1
+#define QIB_7322_ahb_transaction_reg_write_not_read_LSB 0x1B
+#define QIB_7322_ahb_transaction_reg_write_not_read_MSB 0x1B
+#define QIB_7322_ahb_transaction_reg_write_not_read_RMASK 0x1
+#define QIB_7322_ahb_transaction_reg_ahb_address_LSB 0x10
+#define QIB_7322_ahb_transaction_reg_ahb_address_MSB 0x1A
+#define QIB_7322_ahb_transaction_reg_ahb_address_RMASK 0x7FF
+
+#define QIB_7322_SPC_JTAG_ACCESS_REG_OFFS 0x470
+#define QIB_7322_SPC_JTAG_ACCESS_REG_DEF 0x0000000000000001
+#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_LSB 0xA
+#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_MSB 0xA
+#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_RMASK 0x1
+#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_LSB 0x5
+#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_MSB 0x9
+#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_RMASK 0x1F
+#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_LSB 0x3
+#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_MSB 0x4
+#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_RMASK 0x3
+#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_LSB 0x2
+#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_MSB 0x2
+#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_RMASK 0x1
+#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_LSB 0x1
+#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_MSB 0x1
+#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_RMASK 0x1
+#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_LSB 0x0
+#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_MSB 0x0
+#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_RMASK 0x1
+
+#define QIB_7322_SendCheckMask0_OFFS 0x4C0
+#define QIB_7322_SendCheckMask0_DEF 0x0000000000000000
+#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_LSB 0x0
+#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_MSB 0x3F
+#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_RMASK 0x0
+
+#define QIB_7322_SendGRHCheckMask0_OFFS 0x4E0
+#define QIB_7322_SendGRHCheckMask0_DEF 0x0000000000000000
+#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_LSB 0x0
+#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_MSB 0x3F
+#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_RMASK 0x0
+
+#define QIB_7322_SendIBPacketMask0_OFFS 0x500
+#define QIB_7322_SendIBPacketMask0_DEF 0x0000000000000000
+#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_LSB 0x0
+#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_MSB 0x3F
+#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_RMASK 0x0
+
+#define QIB_7322_IntRedirect0_OFFS 0x540
+#define QIB_7322_IntRedirect0_DEF 0x0000000000000000
+#define QIB_7322_IntRedirect0_vec11_LSB 0x37
+#define QIB_7322_IntRedirect0_vec11_MSB 0x3B
+#define QIB_7322_IntRedirect0_vec11_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec10_LSB 0x32
+#define QIB_7322_IntRedirect0_vec10_MSB 0x36
+#define QIB_7322_IntRedirect0_vec10_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec9_LSB 0x2D
+#define QIB_7322_IntRedirect0_vec9_MSB 0x31
+#define QIB_7322_IntRedirect0_vec9_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec8_LSB 0x28
+#define QIB_7322_IntRedirect0_vec8_MSB 0x2C
+#define QIB_7322_IntRedirect0_vec8_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec7_LSB 0x23
+#define QIB_7322_IntRedirect0_vec7_MSB 0x27
+#define QIB_7322_IntRedirect0_vec7_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec6_LSB 0x1E
+#define QIB_7322_IntRedirect0_vec6_MSB 0x22
+#define QIB_7322_IntRedirect0_vec6_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec5_LSB 0x19
+#define QIB_7322_IntRedirect0_vec5_MSB 0x1D
+#define QIB_7322_IntRedirect0_vec5_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec4_LSB 0x14
+#define QIB_7322_IntRedirect0_vec4_MSB 0x18
+#define QIB_7322_IntRedirect0_vec4_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec3_LSB 0xF
+#define QIB_7322_IntRedirect0_vec3_MSB 0x13
+#define QIB_7322_IntRedirect0_vec3_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec2_LSB 0xA
+#define QIB_7322_IntRedirect0_vec2_MSB 0xE
+#define QIB_7322_IntRedirect0_vec2_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec1_LSB 0x5
+#define QIB_7322_IntRedirect0_vec1_MSB 0x9
+#define QIB_7322_IntRedirect0_vec1_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec0_LSB 0x0
+#define QIB_7322_IntRedirect0_vec0_MSB 0x4
+#define QIB_7322_IntRedirect0_vec0_RMASK 0x1F
+
+#define QIB_7322_Int_Granted_OFFS 0x570
+#define QIB_7322_Int_Granted_DEF 0x0000000000000000
+
+#define QIB_7322_vec_clr_without_int_OFFS 0x578
+#define QIB_7322_vec_clr_without_int_DEF 0x0000000000000000
+
+#define QIB_7322_DCACtrlA_OFFS 0x580
+#define QIB_7322_DCACtrlA_DEF 0x0000000000000000
+#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_LSB 0x4
+#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_MSB 0x4
+#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_RMASK 0x1
+#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_LSB 0x3
+#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_MSB 0x3
+#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_RMASK 0x1
+#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_LSB 0x2
+#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_MSB 0x2
+#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_RMASK 0x1
+#define QIB_7322_DCACtrlA_EagerDCAEnable_LSB 0x1
+#define QIB_7322_DCACtrlA_EagerDCAEnable_MSB 0x1
+#define QIB_7322_DCACtrlA_EagerDCAEnable_RMASK 0x1
+#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_LSB 0x0
+#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_MSB 0x0
+#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_RMASK 0x1
+
+#define QIB_7322_DCACtrlB_OFFS 0x588
+#define QIB_7322_DCACtrlB_DEF 0x0000000000000000
+#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_LSB 0x36
+#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_MSB 0x3B
+#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_LSB 0x2E
+#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_MSB 0x35
+#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_LSB 0x28
+#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_MSB 0x2D
+#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_LSB 0x20
+#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_MSB 0x27
+#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_LSB 0x16
+#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_MSB 0x1B
+#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_LSB 0xE
+#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_MSB 0x15
+#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_LSB 0x8
+#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_MSB 0xD
+#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_LSB 0x0
+#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_MSB 0x7
+#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_RMASK 0xFF
+
+#define QIB_7322_DCACtrlC_OFFS 0x590
+#define QIB_7322_DCACtrlC_DEF 0x0000000000000000
+#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_LSB 0x36
+#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_MSB 0x3B
+#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_LSB 0x2E
+#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_MSB 0x35
+#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_LSB 0x28
+#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_MSB 0x2D
+#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_LSB 0x20
+#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_MSB 0x27
+#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_LSB 0x16
+#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_MSB 0x1B
+#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_LSB 0xE
+#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_MSB 0x15
+#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_LSB 0x8
+#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_MSB 0xD
+#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_LSB 0x0
+#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_MSB 0x7
+#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_RMASK 0xFF
+
+#define QIB_7322_DCACtrlD_OFFS 0x598
+#define QIB_7322_DCACtrlD_DEF 0x0000000000000000
+#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_LSB 0x36
+#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_MSB 0x3B
+#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_LSB 0x2E
+#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_MSB 0x35
+#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_LSB 0x28
+#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_MSB 0x2D
+#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_LSB 0x20
+#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_MSB 0x27
+#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_LSB 0x16
+#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_MSB 0x1B
+#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_LSB 0xE
+#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_MSB 0x15
+#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_LSB 0x8
+#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_MSB 0xD
+#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_LSB 0x0
+#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_MSB 0x7
+#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_RMASK 0xFF
+
+#define QIB_7322_DCACtrlE_OFFS 0x5A0
+#define QIB_7322_DCACtrlE_DEF 0x0000000000000000
+#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_LSB 0x36
+#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_MSB 0x3B
+#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_LSB 0x2E
+#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_MSB 0x35
+#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_LSB 0x28
+#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_MSB 0x2D
+#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_LSB 0x20
+#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_MSB 0x27
+#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_LSB 0x16
+#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_MSB 0x1B
+#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_LSB 0xE
+#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_MSB 0x15
+#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_LSB 0x8
+#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_MSB 0xD
+#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_LSB 0x0
+#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_MSB 0x7
+#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_RMASK 0xFF
+
+#define QIB_7322_DCACtrlF_OFFS 0x5A8
+#define QIB_7322_DCACtrlF_DEF 0x0000000000000000
+#define QIB_7322_DCACtrlF_SendDma1DCAOPH_LSB 0x28
+#define QIB_7322_DCACtrlF_SendDma1DCAOPH_MSB 0x2F
+#define QIB_7322_DCACtrlF_SendDma1DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlF_SendDma0DCAOPH_LSB 0x20
+#define QIB_7322_DCACtrlF_SendDma0DCAOPH_MSB 0x27
+#define QIB_7322_DCACtrlF_SendDma0DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_LSB 0x16
+#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_MSB 0x1B
+#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_LSB 0xE
+#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_MSB 0x15
+#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_LSB 0x8
+#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_MSB 0xD
+#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_LSB 0x0
+#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_MSB 0x7
+#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_RMASK 0xFF
+
+#define QIB_7322_RcvAvailTimeOut0_OFFS 0xC00
+#define QIB_7322_RcvAvailTimeOut0_DEF 0x0000000000000000
+#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_LSB 0x10
+#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_MSB 0x1F
+#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_RMASK 0xFFFF
+#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_LSB 0x0
+#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_MSB 0xF
+#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_RMASK 0xFFFF
+
+#define QIB_7322_CntrRegBase_0_OFFS 0x1028
+#define QIB_7322_CntrRegBase_0_DEF 0x0000000000012000
+
+#define QIB_7322_ErrMask_0_OFFS 0x1080
+#define QIB_7322_ErrMask_0_DEF 0x0000000000000000
+#define QIB_7322_ErrMask_0_IBStatusChangedMask_LSB 0x3A
+#define QIB_7322_ErrMask_0_IBStatusChangedMask_MSB 0x3A
+#define QIB_7322_ErrMask_0_IBStatusChangedMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SHeadersErrMask_LSB 0x39
+#define QIB_7322_ErrMask_0_SHeadersErrMask_MSB 0x39
+#define QIB_7322_ErrMask_0_SHeadersErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_LSB 0x36
+#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_MSB 0x36
+#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaHaltErrMask_LSB 0x31
+#define QIB_7322_ErrMask_0_SDmaHaltErrMask_MSB 0x31
+#define QIB_7322_ErrMask_0_SDmaHaltErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_LSB 0x30
+#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_MSB 0x30
+#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_LSB 0x2F
+#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_MSB 0x2F
+#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_LSB 0x2E
+#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_MSB 0x2E
+#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_LSB 0x2D
+#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_MSB 0x2D
+#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_LSB 0x2C
+#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_MSB 0x2C
+#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDma1stDescErrMask_LSB 0x2B
+#define QIB_7322_ErrMask_0_SDma1stDescErrMask_MSB 0x2B
+#define QIB_7322_ErrMask_0_SDma1stDescErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaBaseErrMask_LSB 0x2A
+#define QIB_7322_ErrMask_0_SDmaBaseErrMask_MSB 0x2A
+#define QIB_7322_ErrMask_0_SDmaBaseErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_LSB 0x29
+#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_MSB 0x29
+#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_LSB 0x28
+#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_MSB 0x28
+#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_LSB 0x27
+#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_MSB 0x27
+#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_LSB 0x26
+#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_MSB 0x26
+#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_LSB 0x25
+#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_MSB 0x25
+#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_LSB 0x24
+#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_MSB 0x24
+#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_LSB 0x22
+#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_MSB 0x22
+#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_LSB 0x21
+#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_MSB 0x21
+#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendPktLenErrMask_LSB 0x20
+#define QIB_7322_ErrMask_0_SendPktLenErrMask_MSB 0x20
+#define QIB_7322_ErrMask_0_SendPktLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendUnderRunErrMask_LSB 0x1F
+#define QIB_7322_ErrMask_0_SendUnderRunErrMask_MSB 0x1F
+#define QIB_7322_ErrMask_0_SendUnderRunErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_LSB 0x1E
+#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_MSB 0x1E
+#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_LSB 0x1D
+#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_MSB 0x1D
+#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_LSB 0x11
+#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_MSB 0x11
+#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvHdrErrMask_LSB 0x10
+#define QIB_7322_ErrMask_0_RcvHdrErrMask_MSB 0x10
+#define QIB_7322_ErrMask_0_RcvHdrErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_LSB 0xF
+#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_MSB 0xF
+#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvBadTidErrMask_LSB 0xE
+#define QIB_7322_ErrMask_0_RcvBadTidErrMask_MSB 0xE
+#define QIB_7322_ErrMask_0_RcvBadTidErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_LSB 0xB
+#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_MSB 0xB
+#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_LSB 0xA
+#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_MSB 0xA
+#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvEBPErrMask_LSB 0x9
+#define QIB_7322_ErrMask_0_RcvEBPErrMask_MSB 0x9
+#define QIB_7322_ErrMask_0_RcvEBPErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_LSB 0x8
+#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_MSB 0x8
+#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_LSB 0x7
+#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_MSB 0x7
+#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_LSB 0x6
+#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_MSB 0x6
+#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_LSB 0x5
+#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_MSB 0x5
+#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_LSB 0x4
+#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_MSB 0x4
+#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_LSB 0x3
+#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_MSB 0x3
+#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvICRCErrMask_LSB 0x2
+#define QIB_7322_ErrMask_0_RcvICRCErrMask_MSB 0x2
+#define QIB_7322_ErrMask_0_RcvICRCErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvVCRCErrMask_LSB 0x1
+#define QIB_7322_ErrMask_0_RcvVCRCErrMask_MSB 0x1
+#define QIB_7322_ErrMask_0_RcvVCRCErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvFormatErrMask_LSB 0x0
+#define QIB_7322_ErrMask_0_RcvFormatErrMask_MSB 0x0
+#define QIB_7322_ErrMask_0_RcvFormatErrMask_RMASK 0x1
+
+#define QIB_7322_ErrStatus_0_OFFS 0x1088
+#define QIB_7322_ErrStatus_0_DEF 0x0000000000000000
+#define QIB_7322_ErrStatus_0_IBStatusChanged_LSB 0x3A
+#define QIB_7322_ErrStatus_0_IBStatusChanged_MSB 0x3A
+#define QIB_7322_ErrStatus_0_IBStatusChanged_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SHeadersErr_LSB 0x39
+#define QIB_7322_ErrStatus_0_SHeadersErr_MSB 0x39
+#define QIB_7322_ErrStatus_0_SHeadersErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_LSB 0x36
+#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_MSB 0x36
+#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaHaltErr_LSB 0x31
+#define QIB_7322_ErrStatus_0_SDmaHaltErr_MSB 0x31
+#define QIB_7322_ErrStatus_0_SDmaHaltErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_LSB 0x30
+#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_MSB 0x30
+#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_LSB 0x2F
+#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_MSB 0x2F
+#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_LSB 0x2E
+#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_MSB 0x2E
+#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaDwEnErr_LSB 0x2D
+#define QIB_7322_ErrStatus_0_SDmaDwEnErr_MSB 0x2D
+#define QIB_7322_ErrStatus_0_SDmaDwEnErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_LSB 0x2C
+#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_MSB 0x2C
+#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDma1stDescErr_LSB 0x2B
+#define QIB_7322_ErrStatus_0_SDma1stDescErr_MSB 0x2B
+#define QIB_7322_ErrStatus_0_SDma1stDescErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaBaseErr_LSB 0x2A
+#define QIB_7322_ErrStatus_0_SDmaBaseErr_MSB 0x2A
+#define QIB_7322_ErrStatus_0_SDmaBaseErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_LSB 0x29
+#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_MSB 0x29
+#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_LSB 0x28
+#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_MSB 0x28
+#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_LSB 0x27
+#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_MSB 0x27
+#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendBufMisuseErr_LSB 0x26
+#define QIB_7322_ErrStatus_0_SendBufMisuseErr_MSB 0x26
+#define QIB_7322_ErrStatus_0_SendBufMisuseErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_LSB 0x25
+#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_MSB 0x25
+#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_LSB 0x24
+#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_MSB 0x24
+#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_LSB 0x22
+#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_MSB 0x22
+#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_LSB 0x21
+#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_MSB 0x21
+#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendPktLenErr_LSB 0x20
+#define QIB_7322_ErrStatus_0_SendPktLenErr_MSB 0x20
+#define QIB_7322_ErrStatus_0_SendPktLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendUnderRunErr_LSB 0x1F
+#define QIB_7322_ErrStatus_0_SendUnderRunErr_MSB 0x1F
+#define QIB_7322_ErrStatus_0_SendUnderRunErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_LSB 0x1E
+#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_MSB 0x1E
+#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendMinPktLenErr_LSB 0x1D
+#define QIB_7322_ErrStatus_0_SendMinPktLenErr_MSB 0x1D
+#define QIB_7322_ErrStatus_0_SendMinPktLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_LSB 0x11
+#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_MSB 0x11
+#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvHdrErr_LSB 0x10
+#define QIB_7322_ErrStatus_0_RcvHdrErr_MSB 0x10
+#define QIB_7322_ErrStatus_0_RcvHdrErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvHdrLenErr_LSB 0xF
+#define QIB_7322_ErrStatus_0_RcvHdrLenErr_MSB 0xF
+#define QIB_7322_ErrStatus_0_RcvHdrLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvBadTidErr_LSB 0xE
+#define QIB_7322_ErrStatus_0_RcvBadTidErr_MSB 0xE
+#define QIB_7322_ErrStatus_0_RcvBadTidErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvBadVersionErr_LSB 0xB
+#define QIB_7322_ErrStatus_0_RcvBadVersionErr_MSB 0xB
+#define QIB_7322_ErrStatus_0_RcvBadVersionErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvIBFlowErr_LSB 0xA
+#define QIB_7322_ErrStatus_0_RcvIBFlowErr_MSB 0xA
+#define QIB_7322_ErrStatus_0_RcvIBFlowErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvEBPErr_LSB 0x9
+#define QIB_7322_ErrStatus_0_RcvEBPErr_MSB 0x9
+#define QIB_7322_ErrStatus_0_RcvEBPErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_LSB 0x8
+#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_MSB 0x8
+#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_LSB 0x7
+#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_MSB 0x7
+#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_LSB 0x6
+#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_MSB 0x6
+#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_LSB 0x5
+#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_MSB 0x5
+#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_LSB 0x4
+#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_MSB 0x4
+#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_LSB 0x3
+#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_MSB 0x3
+#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvICRCErr_LSB 0x2
+#define QIB_7322_ErrStatus_0_RcvICRCErr_MSB 0x2
+#define QIB_7322_ErrStatus_0_RcvICRCErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvVCRCErr_LSB 0x1
+#define QIB_7322_ErrStatus_0_RcvVCRCErr_MSB 0x1
+#define QIB_7322_ErrStatus_0_RcvVCRCErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvFormatErr_LSB 0x0
+#define QIB_7322_ErrStatus_0_RcvFormatErr_MSB 0x0
+#define QIB_7322_ErrStatus_0_RcvFormatErr_RMASK 0x1
+
+#define QIB_7322_ErrClear_0_OFFS 0x1090
+#define QIB_7322_ErrClear_0_DEF 0x0000000000000000
+#define QIB_7322_ErrClear_0_IBStatusChangedClear_LSB 0x3A
+#define QIB_7322_ErrClear_0_IBStatusChangedClear_MSB 0x3A
+#define QIB_7322_ErrClear_0_IBStatusChangedClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SHeadersErrClear_LSB 0x39
+#define QIB_7322_ErrClear_0_SHeadersErrClear_MSB 0x39
+#define QIB_7322_ErrClear_0_SHeadersErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_LSB 0x36
+#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_MSB 0x36
+#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaHaltErrClear_LSB 0x31
+#define QIB_7322_ErrClear_0_SDmaHaltErrClear_MSB 0x31
+#define QIB_7322_ErrClear_0_SDmaHaltErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_LSB 0x30
+#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_MSB 0x30
+#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_LSB 0x2F
+#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_MSB 0x2F
+#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_LSB 0x2E
+#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_MSB 0x2E
+#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_LSB 0x2D
+#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_MSB 0x2D
+#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_LSB 0x2C
+#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_MSB 0x2C
+#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDma1stDescErrClear_LSB 0x2B
+#define QIB_7322_ErrClear_0_SDma1stDescErrClear_MSB 0x2B
+#define QIB_7322_ErrClear_0_SDma1stDescErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaBaseErrClear_LSB 0x2A
+#define QIB_7322_ErrClear_0_SDmaBaseErrClear_MSB 0x2A
+#define QIB_7322_ErrClear_0_SDmaBaseErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_LSB 0x29
+#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_MSB 0x29
+#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_LSB 0x28
+#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_MSB 0x28
+#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_LSB 0x27
+#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_MSB 0x27
+#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_LSB 0x26
+#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_MSB 0x26
+#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_LSB 0x25
+#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_MSB 0x25
+#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_LSB 0x24
+#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_MSB 0x24
+#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_LSB 0x22
+#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_MSB 0x22
+#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_LSB 0x21
+#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_MSB 0x21
+#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendPktLenErrClear_LSB 0x20
+#define QIB_7322_ErrClear_0_SendPktLenErrClear_MSB 0x20
+#define QIB_7322_ErrClear_0_SendPktLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendUnderRunErrClear_LSB 0x1F
+#define QIB_7322_ErrClear_0_SendUnderRunErrClear_MSB 0x1F
+#define QIB_7322_ErrClear_0_SendUnderRunErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_LSB 0x1E
+#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_MSB 0x1E
+#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_LSB 0x1D
+#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_MSB 0x1D
+#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_LSB 0x11
+#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_MSB 0x11
+#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvHdrErrClear_LSB 0x10
+#define QIB_7322_ErrClear_0_RcvHdrErrClear_MSB 0x10
+#define QIB_7322_ErrClear_0_RcvHdrErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_LSB 0xF
+#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_MSB 0xF
+#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvBadTidErrClear_LSB 0xE
+#define QIB_7322_ErrClear_0_RcvBadTidErrClear_MSB 0xE
+#define QIB_7322_ErrClear_0_RcvBadTidErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_LSB 0xB
+#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_MSB 0xB
+#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_LSB 0xA
+#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_MSB 0xA
+#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvEBPErrClear_LSB 0x9
+#define QIB_7322_ErrClear_0_RcvEBPErrClear_MSB 0x9
+#define QIB_7322_ErrClear_0_RcvEBPErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_LSB 0x8
+#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_MSB 0x8
+#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_LSB 0x7
+#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_MSB 0x7
+#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_LSB 0x6
+#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_MSB 0x6
+#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_LSB 0x5
+#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_MSB 0x5
+#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_LSB 0x4
+#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_MSB 0x4
+#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_LSB 0x3
+#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_MSB 0x3
+#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvICRCErrClear_LSB 0x2
+#define QIB_7322_ErrClear_0_RcvICRCErrClear_MSB 0x2
+#define QIB_7322_ErrClear_0_RcvICRCErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvVCRCErrClear_LSB 0x1
+#define QIB_7322_ErrClear_0_RcvVCRCErrClear_MSB 0x1
+#define QIB_7322_ErrClear_0_RcvVCRCErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvFormatErrClear_LSB 0x0
+#define QIB_7322_ErrClear_0_RcvFormatErrClear_MSB 0x0
+#define QIB_7322_ErrClear_0_RcvFormatErrClear_RMASK 0x1
+
+#define QIB_7322_TXEStatus_0_OFFS 0x10B8
+#define QIB_7322_TXEStatus_0_DEF 0x0000000XC00080FF
+#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_LSB 0x1F
+#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_MSB 0x1F
+#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_RMASK 0x1
+#define QIB_7322_TXEStatus_0_RmFifoEmpty_LSB 0x1E
+#define QIB_7322_TXEStatus_0_RmFifoEmpty_MSB 0x1E
+#define QIB_7322_TXEStatus_0_RmFifoEmpty_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_LSB 0xF
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_MSB 0xF
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_LSB 0x7
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_MSB 0x7
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_LSB 0x6
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_MSB 0x6
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_LSB 0x5
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_MSB 0x5
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_LSB 0x4
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_MSB 0x4
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_LSB 0x3
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_MSB 0x3
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_LSB 0x2
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_MSB 0x2
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_LSB 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_MSB 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_LSB 0x0
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_MSB 0x0
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_RMASK 0x1
+
+#define QIB_7322_RcvCtrl_0_OFFS 0x1100
+#define QIB_7322_RcvCtrl_0_DEF 0x0000000000000000
+#define QIB_7322_RcvCtrl_0_RcvResetCredit_LSB 0x2A
+#define QIB_7322_RcvCtrl_0_RcvResetCredit_MSB 0x2A
+#define QIB_7322_RcvCtrl_0_RcvResetCredit_RMASK 0x1
+#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_LSB 0x29
+#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_MSB 0x29
+#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_RMASK 0x1
+#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_LSB 0x28
+#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_MSB 0x28
+#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_RMASK 0x1
+#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_LSB 0x27
+#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_MSB 0x27
+#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_RMASK 0x1
+#define QIB_7322_RcvCtrl_0_ContextEnableUser_LSB 0x2
+#define QIB_7322_RcvCtrl_0_ContextEnableUser_MSB 0x11
+#define QIB_7322_RcvCtrl_0_ContextEnableUser_RMASK 0xFFFF
+#define QIB_7322_RcvCtrl_0_ContextEnableKernel_LSB 0x0
+#define QIB_7322_RcvCtrl_0_ContextEnableKernel_MSB 0x0
+#define QIB_7322_RcvCtrl_0_ContextEnableKernel_RMASK 0x1
+
+#define QIB_7322_RcvBTHQP_0_OFFS 0x1108
+#define QIB_7322_RcvBTHQP_0_DEF 0x0000000000000000
+#define QIB_7322_RcvBTHQP_0_RcvBTHQP_LSB 0x0
+#define QIB_7322_RcvBTHQP_0_RcvBTHQP_MSB 0x17
+#define QIB_7322_RcvBTHQP_0_RcvBTHQP_RMASK 0xFFFFFF
+
+#define QIB_7322_RcvQPMapTableA_0_OFFS 0x1110
+#define QIB_7322_RcvQPMapTableA_0_DEF 0x0000000000000000
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_LSB 0x19
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_MSB 0x1D
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_LSB 0x14
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_MSB 0x18
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_LSB 0xF
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_MSB 0x13
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_LSB 0xA
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_MSB 0xE
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_LSB 0x5
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_MSB 0x9
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_LSB 0x0
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_MSB 0x4
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_RMASK 0x1F
+
+#define QIB_7322_RcvQPMapTableB_0_OFFS 0x1118
+#define QIB_7322_RcvQPMapTableB_0_DEF 0x0000000000000000
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_LSB 0x19
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_MSB 0x1D
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_LSB 0x14
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_MSB 0x18
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_LSB 0xF
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_MSB 0x13
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_LSB 0xA
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_MSB 0xE
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_LSB 0x5
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_MSB 0x9
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_LSB 0x0
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_MSB 0x4
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_RMASK 0x1F
+
+#define QIB_7322_RcvQPMapTableC_0_OFFS 0x1120
+#define QIB_7322_RcvQPMapTableC_0_DEF 0x0000000000000000
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_LSB 0x19
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_MSB 0x1D
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_LSB 0x14
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_MSB 0x18
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_LSB 0xF
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_MSB 0x13
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_LSB 0xA
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_MSB 0xE
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_LSB 0x5
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_MSB 0x9
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_LSB 0x0
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_MSB 0x4
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_RMASK 0x1F
+
+#define QIB_7322_RcvQPMapTableD_0_OFFS 0x1128
+#define QIB_7322_RcvQPMapTableD_0_DEF 0x0000000000000000
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_LSB 0x19
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_MSB 0x1D
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_LSB 0x14
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_MSB 0x18
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_LSB 0xF
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_MSB 0x13
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_LSB 0xA
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_MSB 0xE
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_LSB 0x5
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_MSB 0x9
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_LSB 0x0
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_MSB 0x4
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_RMASK 0x1F
+
+#define QIB_7322_RcvQPMapTableE_0_OFFS 0x1130
+#define QIB_7322_RcvQPMapTableE_0_DEF 0x0000000000000000
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_LSB 0x19
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_MSB 0x1D
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_LSB 0x14
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_MSB 0x18
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_LSB 0xF
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_MSB 0x13
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_LSB 0xA
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_MSB 0xE
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_LSB 0x5
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_MSB 0x9
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_LSB 0x0
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_MSB 0x4
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_RMASK 0x1F
+
+#define QIB_7322_RcvQPMapTableF_0_OFFS 0x1138
+#define QIB_7322_RcvQPMapTableF_0_DEF 0x0000000000000000
+#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_LSB 0x5
+#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_MSB 0x9
+#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_LSB 0x0
+#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_MSB 0x4
+#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_RMASK 0x1F
+
+#define QIB_7322_PSStat_0_OFFS 0x1140
+#define QIB_7322_PSStat_0_DEF 0x0000000000000000
+
+#define QIB_7322_PSStart_0_OFFS 0x1148
+#define QIB_7322_PSStart_0_DEF 0x0000000000000000
+
+#define QIB_7322_PSInterval_0_OFFS 0x1150
+#define QIB_7322_PSInterval_0_DEF 0x0000000000000000
+
+#define QIB_7322_RcvStatus_0_OFFS 0x1160
+#define QIB_7322_RcvStatus_0_DEF 0x0000000000000000
+#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_LSB 0x1
+#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_MSB 0x5
+#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_RMASK 0x1F
+#define QIB_7322_RcvStatus_0_RxPktInProgress_LSB 0x0
+#define QIB_7322_RcvStatus_0_RxPktInProgress_MSB 0x0
+#define QIB_7322_RcvStatus_0_RxPktInProgress_RMASK 0x1
+
+#define QIB_7322_RcvPartitionKey_0_OFFS 0x1168
+#define QIB_7322_RcvPartitionKey_0_DEF 0x0000000000000000
+
+#define QIB_7322_RcvQPMulticastContext_0_OFFS 0x1170
+#define QIB_7322_RcvQPMulticastContext_0_DEF 0x0000000000000000
+#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_LSB 0x0
+#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_MSB 0x4
+#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_RMASK 0x1F
+
+#define QIB_7322_RcvPktLEDCnt_0_OFFS 0x1178
+#define QIB_7322_RcvPktLEDCnt_0_DEF 0x0000000000000000
+#define QIB_7322_RcvPktLEDCnt_0_ONperiod_LSB 0x20
+#define QIB_7322_RcvPktLEDCnt_0_ONperiod_MSB 0x3F
+#define QIB_7322_RcvPktLEDCnt_0_ONperiod_RMASK 0xFFFFFFFF
+#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_LSB 0x0
+#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_MSB 0x1F
+#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_RMASK 0xFFFFFFFF
+
+#define QIB_7322_SendDmaIdleCnt_0_OFFS 0x1180
+#define QIB_7322_SendDmaIdleCnt_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_LSB 0x0
+#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_MSB 0xF
+#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_RMASK 0xFFFF
+
+#define QIB_7322_SendDmaReloadCnt_0_OFFS 0x1188
+#define QIB_7322_SendDmaReloadCnt_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_LSB 0x0
+#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_MSB 0xF
+#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_RMASK 0xFFFF
+
+#define QIB_7322_SendDmaDescCnt_0_OFFS 0x1190
+#define QIB_7322_SendDmaDescCnt_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_LSB 0x0
+#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_MSB 0xF
+#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_RMASK 0xFFFF
+
+#define QIB_7322_SendCtrl_0_OFFS 0x11C0
+#define QIB_7322_SendCtrl_0_DEF 0x0000000000000000
+#define QIB_7322_SendCtrl_0_IBVLArbiterEn_LSB 0xF
+#define QIB_7322_SendCtrl_0_IBVLArbiterEn_MSB 0xF
+#define QIB_7322_SendCtrl_0_IBVLArbiterEn_RMASK 0x1
+#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_LSB 0xE
+#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_MSB 0xE
+#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_RMASK 0x1
+#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_LSB 0xD
+#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_MSB 0xD
+#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_RMASK 0x1
+#define QIB_7322_SendCtrl_0_SDmaHalt_LSB 0xC
+#define QIB_7322_SendCtrl_0_SDmaHalt_MSB 0xC
+#define QIB_7322_SendCtrl_0_SDmaHalt_RMASK 0x1
+#define QIB_7322_SendCtrl_0_SDmaEnable_LSB 0xB
+#define QIB_7322_SendCtrl_0_SDmaEnable_MSB 0xB
+#define QIB_7322_SendCtrl_0_SDmaEnable_RMASK 0x1
+#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_LSB 0xA
+#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_MSB 0xA
+#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_RMASK 0x1
+#define QIB_7322_SendCtrl_0_SDmaIntEnable_LSB 0x9
+#define QIB_7322_SendCtrl_0_SDmaIntEnable_MSB 0x9
+#define QIB_7322_SendCtrl_0_SDmaIntEnable_RMASK 0x1
+#define QIB_7322_SendCtrl_0_SDmaCleanup_LSB 0x8
+#define QIB_7322_SendCtrl_0_SDmaCleanup_MSB 0x8
+#define QIB_7322_SendCtrl_0_SDmaCleanup_RMASK 0x1
+#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_LSB 0x7
+#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_MSB 0x7
+#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_RMASK 0x1
+#define QIB_7322_SendCtrl_0_SendEnable_LSB 0x3
+#define QIB_7322_SendCtrl_0_SendEnable_MSB 0x3
+#define QIB_7322_SendCtrl_0_SendEnable_RMASK 0x1
+#define QIB_7322_SendCtrl_0_TxeBypassIbc_LSB 0x1
+#define QIB_7322_SendCtrl_0_TxeBypassIbc_MSB 0x1
+#define QIB_7322_SendCtrl_0_TxeBypassIbc_RMASK 0x1
+#define QIB_7322_SendCtrl_0_TxeAbortIbc_LSB 0x0
+#define QIB_7322_SendCtrl_0_TxeAbortIbc_MSB 0x0
+#define QIB_7322_SendCtrl_0_TxeAbortIbc_RMASK 0x1
+
+#define QIB_7322_SendDmaBase_0_OFFS 0x11F8
+#define QIB_7322_SendDmaBase_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaBase_0_SendDmaBase_LSB 0x0
+#define QIB_7322_SendDmaBase_0_SendDmaBase_MSB 0x2F
+#define QIB_7322_SendDmaBase_0_SendDmaBase_RMASK 0xFFFFFFFFFFFF
+
+#define QIB_7322_SendDmaLenGen_0_OFFS 0x1200
+#define QIB_7322_SendDmaLenGen_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaLenGen_0_Generation_LSB 0x10
+#define QIB_7322_SendDmaLenGen_0_Generation_MSB 0x12
+#define QIB_7322_SendDmaLenGen_0_Generation_RMASK 0x7
+#define QIB_7322_SendDmaLenGen_0_Length_LSB 0x0
+#define QIB_7322_SendDmaLenGen_0_Length_MSB 0xF
+#define QIB_7322_SendDmaLenGen_0_Length_RMASK 0xFFFF
+
+#define QIB_7322_SendDmaTail_0_OFFS 0x1208
+#define QIB_7322_SendDmaTail_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaTail_0_SendDmaTail_LSB 0x0
+#define QIB_7322_SendDmaTail_0_SendDmaTail_MSB 0xF
+#define QIB_7322_SendDmaTail_0_SendDmaTail_RMASK 0xFFFF
+
+#define QIB_7322_SendDmaHead_0_OFFS 0x1210
+#define QIB_7322_SendDmaHead_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_LSB 0x20
+#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_MSB 0x2F
+#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_RMASK 0xFFFF
+#define QIB_7322_SendDmaHead_0_SendDmaHead_LSB 0x0
+#define QIB_7322_SendDmaHead_0_SendDmaHead_MSB 0xF
+#define QIB_7322_SendDmaHead_0_SendDmaHead_RMASK 0xFFFF
+
+#define QIB_7322_SendDmaHeadAddr_0_OFFS 0x1218
+#define QIB_7322_SendDmaHeadAddr_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_LSB 0x0
+#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_MSB 0x2F
+#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_RMASK 0xFFFFFFFFFFFF
+
+#define QIB_7322_SendDmaBufMask0_0_OFFS 0x1220
+#define QIB_7322_SendDmaBufMask0_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_LSB 0x0
+#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_MSB 0x3F
+#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_RMASK 0x0
+
+#define QIB_7322_SendDmaStatus_0_OFFS 0x1238
+#define QIB_7322_SendDmaStatus_0_DEF 0x0000000042000000
+#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_LSB 0x3F
+#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_MSB 0x3F
+#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_HaltInProg_LSB 0x3E
+#define QIB_7322_SendDmaStatus_0_HaltInProg_MSB 0x3E
+#define QIB_7322_SendDmaStatus_0_HaltInProg_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_LSB 0x3D
+#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_MSB 0x3D
+#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_LSB 0x2F
+#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_MSB 0x3C
+#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_RMASK 0x3FFF
+#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_LSB 0x28
+#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_MSB 0x2E
+#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_RMASK 0x7F
+#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_LSB 0x20
+#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_MSB 0x27
+#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_RMASK 0xFF
+#define QIB_7322_SendDmaStatus_0_ScbFull_LSB 0x1F
+#define QIB_7322_SendDmaStatus_0_ScbFull_MSB 0x1F
+#define QIB_7322_SendDmaStatus_0_ScbFull_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_ScbEmpty_LSB 0x1E
+#define QIB_7322_SendDmaStatus_0_ScbEmpty_MSB 0x1E
+#define QIB_7322_SendDmaStatus_0_ScbEmpty_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_ScbEntryValid_LSB 0x1D
+#define QIB_7322_SendDmaStatus_0_ScbEntryValid_MSB 0x1D
+#define QIB_7322_SendDmaStatus_0_ScbEntryValid_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_LSB 0x1C
+#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_MSB 0x1C
+#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_LSB 0x1B
+#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_MSB 0x1B
+#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_LSB 0x1A
+#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_MSB 0x1A
+#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_LSB 0x19
+#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_MSB 0x19
+#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_SplFifoFull_LSB 0x18
+#define QIB_7322_SendDmaStatus_0_SplFifoFull_MSB 0x18
+#define QIB_7322_SendDmaStatus_0_SplFifoFull_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_LSB 0x10
+#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_MSB 0x17
+#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_RMASK 0xFF
+#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_LSB 0x0
+#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_MSB 0xF
+#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_RMASK 0xFFFF
+
+#define QIB_7322_SendDmaPriorityThld_0_OFFS 0x1258
+#define QIB_7322_SendDmaPriorityThld_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_LSB 0x0
+#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_MSB 0x3
+#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_RMASK 0xF
+
+#define QIB_7322_SendHdrErrSymptom_0_OFFS 0x1260
+#define QIB_7322_SendHdrErrSymptom_0_DEF 0x0000000000000000
+#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_LSB 0x6
+#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_MSB 0x6
+#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_RMASK 0x1
+#define QIB_7322_SendHdrErrSymptom_0_GRHFail_LSB 0x5
+#define QIB_7322_SendHdrErrSymptom_0_GRHFail_MSB 0x5
+#define QIB_7322_SendHdrErrSymptom_0_GRHFail_RMASK 0x1
+#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_LSB 0x4
+#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_MSB 0x4
+#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_RMASK 0x1
+#define QIB_7322_SendHdrErrSymptom_0_QPFail_LSB 0x3
+#define QIB_7322_SendHdrErrSymptom_0_QPFail_MSB 0x3
+#define QIB_7322_SendHdrErrSymptom_0_QPFail_RMASK 0x1
+#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_LSB 0x2
+#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_MSB 0x2
+#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_RMASK 0x1
+#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_LSB 0x1
+#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_MSB 0x1
+#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_RMASK 0x1
+#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_LSB 0x0
+#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_MSB 0x0
+#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_RMASK 0x1
+
+#define QIB_7322_RxCreditVL0_0_OFFS 0x1280
+#define QIB_7322_RxCreditVL0_0_DEF 0x0000000000000000
+#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_LSB 0x10
+#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_MSB 0x1B
+#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_RMASK 0xFFF
+#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_LSB 0x0
+#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_MSB 0xB
+#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_RMASK 0xFFF
+
+#define QIB_7322_SendDmaBufUsed0_0_OFFS 0x1480
+#define QIB_7322_SendDmaBufUsed0_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_LSB 0x0
+#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_MSB 0x3F
+#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_RMASK 0x0
+
+#define QIB_7322_SendCheckControl_0_OFFS 0x14A8
+#define QIB_7322_SendCheckControl_0_DEF 0x0000000000000000
+#define QIB_7322_SendCheckControl_0_PKey_En_LSB 0x4
+#define QIB_7322_SendCheckControl_0_PKey_En_MSB 0x4
+#define QIB_7322_SendCheckControl_0_PKey_En_RMASK 0x1
+#define QIB_7322_SendCheckControl_0_BTHQP_En_LSB 0x3
+#define QIB_7322_SendCheckControl_0_BTHQP_En_MSB 0x3
+#define QIB_7322_SendCheckControl_0_BTHQP_En_RMASK 0x1
+#define QIB_7322_SendCheckControl_0_SLID_En_LSB 0x2
+#define QIB_7322_SendCheckControl_0_SLID_En_MSB 0x2
+#define QIB_7322_SendCheckControl_0_SLID_En_RMASK 0x1
+#define QIB_7322_SendCheckControl_0_RawIPV6_En_LSB 0x1
+#define QIB_7322_SendCheckControl_0_RawIPV6_En_MSB 0x1
+#define QIB_7322_SendCheckControl_0_RawIPV6_En_RMASK 0x1
+#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_LSB 0x0
+#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_MSB 0x0
+#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_RMASK 0x1
+
+#define QIB_7322_SendIBSLIDMask_0_OFFS 0x14B0
+#define QIB_7322_SendIBSLIDMask_0_DEF 0x0000000000000000
+#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_LSB 0x0
+#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_MSB 0xF
+#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK 0xFFFF
+
+#define QIB_7322_SendIBSLIDAssign_0_OFFS 0x14B8
+#define QIB_7322_SendIBSLIDAssign_0_DEF 0x0000000000000000
+#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_LSB 0x0
+#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_MSB 0xF
+#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK 0xFFFF
+
+#define QIB_7322_IBCStatusA_0_OFFS 0x1540
+#define QIB_7322_IBCStatusA_0_DEF 0x0000000000000X02
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_LSB 0x27
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_MSB 0x27
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_LSB 0x26
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_MSB 0x26
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_LSB 0x25
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_MSB 0x25
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_LSB 0x24
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_MSB 0x24
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_LSB 0x23
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_MSB 0x23
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_LSB 0x22
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_MSB 0x22
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_LSB 0x21
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_MSB 0x21
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_LSB 0x20
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_MSB 0x20
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxReady_LSB 0x1E
+#define QIB_7322_IBCStatusA_0_TxReady_MSB 0x1E
+#define QIB_7322_IBCStatusA_0_TxReady_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_LSB 0x1D
+#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_MSB 0x1D
+#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_LSB 0xF
+#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_MSB 0xF
+#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_ScrambleEn_LSB 0xE
+#define QIB_7322_IBCStatusA_0_ScrambleEn_MSB 0xE
+#define QIB_7322_IBCStatusA_0_ScrambleEn_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_LSB 0xD
+#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_MSB 0xD
+#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_LSB 0xC
+#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_MSB 0xC
+#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_LSB 0xA
+#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_MSB 0xA
+#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_LinkWidthActive_LSB 0x9
+#define QIB_7322_IBCStatusA_0_LinkWidthActive_MSB 0x9
+#define QIB_7322_IBCStatusA_0_LinkWidthActive_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_LinkSpeedActive_LSB 0x8
+#define QIB_7322_IBCStatusA_0_LinkSpeedActive_MSB 0x8
+#define QIB_7322_IBCStatusA_0_LinkSpeedActive_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_LinkState_LSB 0x5
+#define QIB_7322_IBCStatusA_0_LinkState_MSB 0x7
+#define QIB_7322_IBCStatusA_0_LinkState_RMASK 0x7
+#define QIB_7322_IBCStatusA_0_LinkTrainingState_LSB 0x0
+#define QIB_7322_IBCStatusA_0_LinkTrainingState_MSB 0x4
+#define QIB_7322_IBCStatusA_0_LinkTrainingState_RMASK 0x1F
+
+#define QIB_7322_IBCStatusB_0_OFFS 0x1548
+#define QIB_7322_IBCStatusB_0_DEF 0x00000000XXXXXXXX
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_LSB 0x27
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_MSB 0x27
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_RMASK 0x1
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_LSB 0x26
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_MSB 0x26
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_RMASK 0x1
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_LSB 0x25
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_MSB 0x25
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_RMASK 0x1
+#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_LSB 0x24
+#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_MSB 0x24
+#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_RMASK 0x1
+#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_LSB 0x20
+#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_MSB 0x23
+#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_RMASK 0xF
+#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_LSB 0x1E
+#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_MSB 0x1F
+#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_RMASK 0x3
+#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_LSB 0x1A
+#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_MSB 0x1D
+#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_RMASK 0xF
+#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_LSB 0x0
+#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_MSB 0x19
+#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_RMASK 0x3FFFFFF
+
+#define QIB_7322_IBCCtrlA_0_OFFS 0x1560
+#define QIB_7322_IBCCtrlA_0_DEF 0x0000000000000000
+#define QIB_7322_IBCCtrlA_0_Loopback_LSB 0x3F
+#define QIB_7322_IBCCtrlA_0_Loopback_MSB 0x3F
+#define QIB_7322_IBCCtrlA_0_Loopback_RMASK 0x1
+#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_LSB 0x3E
+#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_MSB 0x3E
+#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_RMASK 0x1
+#define QIB_7322_IBCCtrlA_0_IBLinkEn_LSB 0x3D
+#define QIB_7322_IBCCtrlA_0_IBLinkEn_MSB 0x3D
+#define QIB_7322_IBCCtrlA_0_IBLinkEn_RMASK 0x1
+#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_LSB 0x3C
+#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_MSB 0x3C
+#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_RMASK 0x1
+#define QIB_7322_IBCCtrlA_0_NumVLane_LSB 0x30
+#define QIB_7322_IBCCtrlA_0_NumVLane_MSB 0x32
+#define QIB_7322_IBCCtrlA_0_NumVLane_RMASK 0x7
+#define QIB_7322_IBCCtrlA_0_OverrunThreshold_LSB 0x24
+#define QIB_7322_IBCCtrlA_0_OverrunThreshold_MSB 0x27
+#define QIB_7322_IBCCtrlA_0_OverrunThreshold_RMASK 0xF
+#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_LSB 0x20
+#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_MSB 0x23
+#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_RMASK 0xF
+#define QIB_7322_IBCCtrlA_0_MaxPktLen_LSB 0x15
+#define QIB_7322_IBCCtrlA_0_MaxPktLen_MSB 0x1F
+#define QIB_7322_IBCCtrlA_0_MaxPktLen_RMASK 0x7FF
+#define QIB_7322_IBCCtrlA_0_LinkCmd_LSB 0x13
+#define QIB_7322_IBCCtrlA_0_LinkCmd_MSB 0x14
+#define QIB_7322_IBCCtrlA_0_LinkCmd_RMASK 0x3
+#define QIB_7322_IBCCtrlA_0_LinkInitCmd_LSB 0x10
+#define QIB_7322_IBCCtrlA_0_LinkInitCmd_MSB 0x12
+#define QIB_7322_IBCCtrlA_0_LinkInitCmd_RMASK 0x7
+#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_LSB 0x8
+#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_MSB 0xF
+#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_RMASK 0xFF
+#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_LSB 0x0
+#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_MSB 0x7
+#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_RMASK 0xFF
+
+#define QIB_7322_IBCCtrlB_0_OFFS 0x1568
+#define QIB_7322_IBCCtrlB_0_DEF 0x00000000000305FF
+#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_LSB 0x30
+#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_MSB 0x3F
+#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK 0xFFFF
+#define QIB_7322_IBCCtrlB_0_IB_DLID_LSB 0x20
+#define QIB_7322_IBCCtrlB_0_IB_DLID_MSB 0x2F
+#define QIB_7322_IBCCtrlB_0_IB_DLID_RMASK 0xFFFF
+#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_LSB 0x1B
+#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_MSB 0x1B
+#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_LSB 0x1A
+#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_MSB 0x1A
+#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_LSB 0x12
+#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_MSB 0x19
+#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_RMASK 0xFF
+#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_LSB 0x11
+#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_MSB 0x11
+#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_LSB 0x10
+#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_MSB 0x10
+#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_SD_DDS_LSB 0xC
+#define QIB_7322_IBCCtrlB_0_SD_DDS_MSB 0xF
+#define QIB_7322_IBCCtrlB_0_SD_DDS_RMASK 0xF
+#define QIB_7322_IBCCtrlB_0_SD_DDSV_LSB 0xB
+#define QIB_7322_IBCCtrlB_0_SD_DDSV_MSB 0xB
+#define QIB_7322_IBCCtrlB_0_SD_DDSV_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_LSB 0xA
+#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_MSB 0xA
+#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_LSB 0x9
+#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_MSB 0x9
+#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_LSB 0x8
+#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_MSB 0x8
+#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_LSB 0x7
+#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_MSB 0x7
+#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_LSB 0x5
+#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_MSB 0x6
+#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_RMASK 0x3
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_LSB 0x4
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_MSB 0x4
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_LSB 0x3
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_MSB 0x3
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_LSB 0x2
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_MSB 0x2
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_LSB 0x1
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_MSB 0x1
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_LSB 0x0
+#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_MSB 0x0
+#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_RMASK 0x1
+
+#define QIB_7322_IBCCtrlC_0_OFFS 0x1570
+#define QIB_7322_IBCCtrlC_0_DEF 0x0000000000000301
+#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_LSB 0x5
+#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_MSB 0x9
+#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_RMASK 0x1F
+#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_LSB 0x0
+#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_MSB 0x4
+#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_RMASK 0x1F
+
+#define QIB_7322_HRTBT_GUID_0_OFFS 0x1588
+#define QIB_7322_HRTBT_GUID_0_DEF 0x0000000000000000
+
+#define QIB_7322_IB_SDTEST_IF_TX_0_OFFS 0x1590
+#define QIB_7322_IB_SDTEST_IF_TX_0_DEF 0x0000000000000000
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_LSB 0x30
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_MSB 0x3F
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_RMASK 0xFFFF
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_LSB 0x20
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_MSB 0x2F
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_RMASK 0xFFFF
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_LSB 0xD
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_MSB 0xF
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_RMASK 0x7
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_LSB 0xB
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_MSB 0xC
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_RMASK 0x3
+#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_LSB 0x4
+#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_MSB 0x4
+#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_RMASK 0x1
+#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_LSB 0x2
+#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_MSB 0x3
+#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_RMASK 0x3
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_LSB 0x1
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_MSB 0x1
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_RMASK 0x1
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_LSB 0x0
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_MSB 0x0
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_RMASK 0x1
+
+#define QIB_7322_IB_SDTEST_IF_RX_0_OFFS 0x1598
+#define QIB_7322_IB_SDTEST_IF_RX_0_DEF 0x0000000000000000
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_LSB 0x30
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_MSB 0x3F
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_RMASK 0xFFFF
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_LSB 0x20
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_MSB 0x2F
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_RMASK 0xFFFF
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_LSB 0x18
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_MSB 0x1F
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_RMASK 0xFF
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_LSB 0x10
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_MSB 0x17
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_RMASK 0xFF
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_LSB 0x1
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_MSB 0x1
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_RMASK 0x1
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_LSB 0x0
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_MSB 0x0
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_RMASK 0x1
+
+#define QIB_7322_IBNCModeCtrl_0_OFFS 0x15B8
+#define QIB_7322_IBNCModeCtrl_0_DEF 0x0000000000000000
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_LSB 0x22
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_MSB 0x22
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_RMASK 0x1
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_LSB 0x21
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_MSB 0x21
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_RMASK 0x1
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_LSB 0x20
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_MSB 0x20
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_RMASK 0x1
+#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_LSB 0x11
+#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_MSB 0x19
+#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_RMASK 0x1FF
+#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_LSB 0x8
+#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_MSB 0x10
+#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_RMASK 0x1FF
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_LSB 0x2
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_MSB 0x2
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_RMASK 0x1
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_LSB 0x1
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_MSB 0x1
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_RMASK 0x1
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_LSB 0x0
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_MSB 0x0
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_RMASK 0x1
+
+#define QIB_7322_IBSerdesStatus_0_OFFS 0x15D0
+#define QIB_7322_IBSerdesStatus_0_DEF 0x0000000000000000
+
+#define QIB_7322_IBPCSConfig_0_OFFS 0x15D8
+#define QIB_7322_IBPCSConfig_0_DEF 0x0000000000000007
+#define QIB_7322_IBPCSConfig_0_link_sync_mask_LSB 0x9
+#define QIB_7322_IBPCSConfig_0_link_sync_mask_MSB 0x12
+#define QIB_7322_IBPCSConfig_0_link_sync_mask_RMASK 0x3FF
+#define QIB_7322_IBPCSConfig_0_xcv_rreset_LSB 0x2
+#define QIB_7322_IBPCSConfig_0_xcv_rreset_MSB 0x2
+#define QIB_7322_IBPCSConfig_0_xcv_rreset_RMASK 0x1
+#define QIB_7322_IBPCSConfig_0_xcv_treset_LSB 0x1
+#define QIB_7322_IBPCSConfig_0_xcv_treset_MSB 0x1
+#define QIB_7322_IBPCSConfig_0_xcv_treset_RMASK 0x1
+#define QIB_7322_IBPCSConfig_0_tx_rx_reset_LSB 0x0
+#define QIB_7322_IBPCSConfig_0_tx_rx_reset_MSB 0x0
+#define QIB_7322_IBPCSConfig_0_tx_rx_reset_RMASK 0x1
+
+#define QIB_7322_IBSerdesCtrl_0_OFFS 0x15E0
+#define QIB_7322_IBSerdesCtrl_0_DEF 0x0000000000FFA00F
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_LSB 0x1A
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_MSB 0x1A
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_LSB 0x19
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_MSB 0x19
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_LSB 0x18
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_MSB 0x18
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_LSB 0x14
+#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_MSB 0x17
+#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_RMASK 0xF
+#define QIB_7322_IBSerdesCtrl_0_CGMODE_LSB 0x10
+#define QIB_7322_IBSerdesCtrl_0_CGMODE_MSB 0x13
+#define QIB_7322_IBSerdesCtrl_0_CGMODE_RMASK 0xF
+#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_LSB 0xF
+#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_MSB 0xF
+#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_LSB 0xD
+#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_MSB 0xD
+#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_LPEN_LSB 0xC
+#define QIB_7322_IBSerdesCtrl_0_LPEN_MSB 0xC
+#define QIB_7322_IBSerdesCtrl_0_LPEN_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_PLLPD_LSB 0xB
+#define QIB_7322_IBSerdesCtrl_0_PLLPD_MSB 0xB
+#define QIB_7322_IBSerdesCtrl_0_PLLPD_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_TXPD_LSB 0xA
+#define QIB_7322_IBSerdesCtrl_0_TXPD_MSB 0xA
+#define QIB_7322_IBSerdesCtrl_0_TXPD_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_RXPD_LSB 0x9
+#define QIB_7322_IBSerdesCtrl_0_RXPD_MSB 0x9
+#define QIB_7322_IBSerdesCtrl_0_RXPD_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_TXIDLE_LSB 0x8
+#define QIB_7322_IBSerdesCtrl_0_TXIDLE_MSB 0x8
+#define QIB_7322_IBSerdesCtrl_0_TXIDLE_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_CMODE_LSB 0x0
+#define QIB_7322_IBSerdesCtrl_0_CMODE_MSB 0x6
+#define QIB_7322_IBSerdesCtrl_0_CMODE_RMASK 0x7F
+
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_OFFS 0x1600
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_DEF 0x0000000000000000
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_LSB 0x1F
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_MSB 0x1F
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_RMASK 0x1
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_LSB 0x1E
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_MSB 0x1E
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_RMASK 0x1
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_LSB 0xE
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_MSB 0x11
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_RMASK 0xF
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_LSB 0x9
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_MSB 0xD
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_RMASK 0x1F
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_LSB 0x5
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_MSB 0x8
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_RMASK 0xF
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_LSB 0x3
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_MSB 0x4
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_RMASK 0x3
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_LSB 0x0
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_MSB 0x2
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_RMASK 0x7
+
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_OFFS 0x1640
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_DEF 0x0000000000000000
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_LSB 0x27
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_MSB 0x27
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_LSB 0x26
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_MSB 0x26
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_LSB 0x25
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_MSB 0x25
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_LSB 0x24
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_MSB 0x24
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_LSB 0x23
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_MSB 0x23
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_LSB 0x22
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_MSB 0x22
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_LSB 0x21
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_MSB 0x21
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_LSB 0x20
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_MSB 0x20
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_LSB 0x18
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_MSB 0x1F
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_LSB 0x10
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_MSB 0x17
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_LSB 0x8
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_MSB 0xF
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_LSB 0x0
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_MSB 0x7
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_RMASK 0xFF
+
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_OFFS 0x1648
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_DEF 0x0000000000000000
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_LSB 0x27
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_MSB 0x27
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_LSB 0x26
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_MSB 0x26
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_LSB 0x25
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_MSB 0x25
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_LSB 0x24
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_MSB 0x24
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_LSB 0x23
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_MSB 0x23
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_LSB 0x22
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_MSB 0x22
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_LSB 0x21
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_MSB 0x21
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_LSB 0x20
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_MSB 0x20
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_LSB 0x18
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_MSB 0x1F
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_LSB 0x10
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_MSB 0x17
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_LSB 0x8
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_MSB 0xF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_LSB 0x0
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_MSB 0x7
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_RMASK 0xFF
+
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_OFFS 0x1650
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_DEF 0x0000000000000000
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_LSB 0x27
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_MSB 0x27
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_LSB 0x26
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_MSB 0x26
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_LSB 0x25
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_MSB 0x25
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_LSB 0x24
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_MSB 0x24
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_LSB 0x23
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_MSB 0x23
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_LSB 0x22
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_MSB 0x22
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_LSB 0x21
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_MSB 0x21
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_LSB 0x20
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_MSB 0x20
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_LSB 0x18
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_MSB 0x1F
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_LSB 0x10
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_MSB 0x17
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_LSB 0x8
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_MSB 0xF
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_LSB 0x0
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_MSB 0x7
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_RMASK 0xFF
+
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_OFFS 0x1658
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_DEF 0x0000000000000000
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_LSB 0x27
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_MSB 0x27
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_LSB 0x26
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_MSB 0x26
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_LSB 0x25
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_MSB 0x25
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_LSB 0x24
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_MSB 0x24
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_LSB 0x23
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_MSB 0x23
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_LSB 0x22
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_MSB 0x22
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_LSB 0x21
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_MSB 0x21
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_LSB 0x20
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_MSB 0x20
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_LSB 0x18
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_MSB 0x1F
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_LSB 0x10
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_MSB 0x17
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_LSB 0x8
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_MSB 0xF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_LSB 0x0
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_MSB 0x7
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_RMASK 0xFF
+
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_OFFS 0x1660
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_DEF 0x0000000000000000
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_LSB 0x27
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_MSB 0x27
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_LSB 0x26
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_MSB 0x26
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_LSB 0x25
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_MSB 0x25
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_LSB 0x24
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_MSB 0x24
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_LSB 0x23
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_MSB 0x23
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_LSB 0x22
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_MSB 0x22
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_LSB 0x21
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_MSB 0x21
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_LSB 0x20
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_MSB 0x20
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_LSB 0x18
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_MSB 0x1F
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_LSB 0x10
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_MSB 0x17
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_LSB 0x8
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_MSB 0xF
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_LSB 0x0
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_MSB 0x7
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_RMASK 0xFF
+
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_OFFS 0x1668
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_DEF 0x0000000000000000
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_LSB 0x27
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_MSB 0x27
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_LSB 0x26
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_MSB 0x26
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_LSB 0x25
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_MSB 0x25
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_LSB 0x24
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_MSB 0x24
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_LSB 0x23
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_MSB 0x23
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_LSB 0x22
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_MSB 0x22
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_LSB 0x21
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_MSB 0x21
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_LSB 0x20
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_MSB 0x20
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_LSB 0x18
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_MSB 0x1F
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_LSB 0x10
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_MSB 0x17
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_LSB 0x8
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_MSB 0xF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_LSB 0x0
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_MSB 0x7
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_RMASK 0xFF
+
+#define QIB_7322_ADAPT_DISABLE_TIMER_THRESHOLD_0_OFFS 0x1670
+#define QIB_7322_ADAPT_DISABLE_TIMER_THRESHOLD_0_DEF 0x0000000000000000
+
+#define QIB_7322_HighPriorityLimit_0_OFFS 0x1BC0
+#define QIB_7322_HighPriorityLimit_0_DEF 0x0000000000000000
+#define QIB_7322_HighPriorityLimit_0_Limit_LSB 0x0
+#define QIB_7322_HighPriorityLimit_0_Limit_MSB 0x7
+#define QIB_7322_HighPriorityLimit_0_Limit_RMASK 0xFF
+
+#define QIB_7322_LowPriority0_0_OFFS 0x1C00
+#define QIB_7322_LowPriority0_0_DEF 0x0000000000000000
+#define QIB_7322_LowPriority0_0_VirtualLane_LSB 0x10
+#define QIB_7322_LowPriority0_0_VirtualLane_MSB 0x12
+#define QIB_7322_LowPriority0_0_VirtualLane_RMASK 0x7
+#define QIB_7322_LowPriority0_0_Weight_LSB 0x0
+#define QIB_7322_LowPriority0_0_Weight_MSB 0x7
+#define QIB_7322_LowPriority0_0_Weight_RMASK 0xFF
+
+#define QIB_7322_HighPriority0_0_OFFS 0x1E00
+#define QIB_7322_HighPriority0_0_DEF 0x0000000000000000
+#define QIB_7322_HighPriority0_0_VirtualLane_LSB 0x10
+#define QIB_7322_HighPriority0_0_VirtualLane_MSB 0x12
+#define QIB_7322_HighPriority0_0_VirtualLane_RMASK 0x7
+#define QIB_7322_HighPriority0_0_Weight_LSB 0x0
+#define QIB_7322_HighPriority0_0_Weight_MSB 0x7
+#define QIB_7322_HighPriority0_0_Weight_RMASK 0xFF
+
+#define QIB_7322_CntrRegBase_1_OFFS 0x2028
+#define QIB_7322_CntrRegBase_1_DEF 0x0000000000013000
+
+#define QIB_7322_RcvQPMulticastContext_1_OFFS 0x2170
+
+#define QIB_7322_SendCtrl_1_OFFS 0x21C0
+
+#define QIB_7322_SendBufAvail0_OFFS 0x3000
+#define QIB_7322_SendBufAvail0_DEF 0x0000000000000000
+#define QIB_7322_SendBufAvail0_SendBuf_31_0_LSB 0x0
+#define QIB_7322_SendBufAvail0_SendBuf_31_0_MSB 0x3F
+#define QIB_7322_SendBufAvail0_SendBuf_31_0_RMASK 0x0
+
+#define QIB_7322_MsixTable_OFFS 0x8000
+#define QIB_7322_MsixTable_DEF 0x0000000000000000
+
+#define QIB_7322_MsixPba_OFFS 0x9000
+#define QIB_7322_MsixPba_DEF 0x0000000000000000
+
+#define QIB_7322_LAMemory_OFFS 0xA000
+#define QIB_7322_LAMemory_DEF 0x0000000000000000
+
+#define QIB_7322_LBIntCnt_OFFS 0x11000
+#define QIB_7322_LBIntCnt_DEF 0x0000000000000000
+
+#define QIB_7322_LBFlowStallCnt_OFFS 0x11008
+#define QIB_7322_LBFlowStallCnt_DEF 0x0000000000000000
+
+#define QIB_7322_RxTIDFullErrCnt_OFFS 0x110D0
+#define QIB_7322_RxTIDFullErrCnt_DEF 0x0000000000000000
+
+#define QIB_7322_RxTIDValidErrCnt_OFFS 0x110D8
+#define QIB_7322_RxTIDValidErrCnt_DEF 0x0000000000000000
+
+#define QIB_7322_RxP0HdrEgrOvflCnt_OFFS 0x110E8
+#define QIB_7322_RxP0HdrEgrOvflCnt_DEF 0x0000000000000000
+
+#define QIB_7322_PcieRetryBufDiagQwordCnt_OFFS 0x111A0
+#define QIB_7322_PcieRetryBufDiagQwordCnt_DEF 0x0000000000000000
+
+#define QIB_7322_RxTidFlowDropCnt_OFFS 0x111E0
+#define QIB_7322_RxTidFlowDropCnt_DEF 0x0000000000000000
+
+#define QIB_7322_LBIntCnt_0_OFFS 0x12000
+#define QIB_7322_LBIntCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxCreditUpToDateTimeOut_0_OFFS 0x12008
+#define QIB_7322_TxCreditUpToDateTimeOut_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxSDmaDescCnt_0_OFFS 0x12010
+#define QIB_7322_TxSDmaDescCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxUnsupVLErrCnt_0_OFFS 0x12018
+#define QIB_7322_TxUnsupVLErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxDataPktCnt_0_OFFS 0x12020
+#define QIB_7322_TxDataPktCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxFlowPktCnt_0_OFFS 0x12028
+#define QIB_7322_TxFlowPktCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxDwordCnt_0_OFFS 0x12030
+#define QIB_7322_TxDwordCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxLenErrCnt_0_OFFS 0x12038
+#define QIB_7322_TxLenErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxMaxMinLenErrCnt_0_OFFS 0x12040
+#define QIB_7322_TxMaxMinLenErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxUnderrunCnt_0_OFFS 0x12048
+#define QIB_7322_TxUnderrunCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxFlowStallCnt_0_OFFS 0x12050
+#define QIB_7322_TxFlowStallCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxDroppedPktCnt_0_OFFS 0x12058
+#define QIB_7322_TxDroppedPktCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxDroppedPktCnt_0_OFFS 0x12060
+#define QIB_7322_RxDroppedPktCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxDataPktCnt_0_OFFS 0x12068
+#define QIB_7322_RxDataPktCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxFlowPktCnt_0_OFFS 0x12070
+#define QIB_7322_RxFlowPktCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxDwordCnt_0_OFFS 0x12078
+#define QIB_7322_RxDwordCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxLenErrCnt_0_OFFS 0x12080
+#define QIB_7322_RxLenErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxMaxMinLenErrCnt_0_OFFS 0x12088
+#define QIB_7322_RxMaxMinLenErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxICRCErrCnt_0_OFFS 0x12090
+#define QIB_7322_RxICRCErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxVCRCErrCnt_0_OFFS 0x12098
+#define QIB_7322_RxVCRCErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxFlowCtrlViolCnt_0_OFFS 0x120A0
+#define QIB_7322_RxFlowCtrlViolCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxVersionErrCnt_0_OFFS 0x120A8
+#define QIB_7322_RxVersionErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxLinkMalformCnt_0_OFFS 0x120B0
+#define QIB_7322_RxLinkMalformCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxEBPCnt_0_OFFS 0x120B8
+#define QIB_7322_RxEBPCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxLPCRCErrCnt_0_OFFS 0x120C0
+#define QIB_7322_RxLPCRCErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxBufOvflCnt_0_OFFS 0x120C8
+#define QIB_7322_RxBufOvflCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxLenTruncateCnt_0_OFFS 0x120D0
+#define QIB_7322_RxLenTruncateCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxPKeyMismatchCnt_0_OFFS 0x120E0
+#define QIB_7322_RxPKeyMismatchCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_IBLinkDownedCnt_0_OFFS 0x12180
+#define QIB_7322_IBLinkDownedCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_IBSymbolErrCnt_0_OFFS 0x12188
+#define QIB_7322_IBSymbolErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_IBStatusChangeCnt_0_OFFS 0x12190
+#define QIB_7322_IBStatusChangeCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_IBLinkErrRecoveryCnt_0_OFFS 0x12198
+#define QIB_7322_IBLinkErrRecoveryCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_ExcessBufferOvflCnt_0_OFFS 0x121A8
+#define QIB_7322_ExcessBufferOvflCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_LocalLinkIntegrityErrCnt_0_OFFS 0x121B0
+#define QIB_7322_LocalLinkIntegrityErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxVlErrCnt_0_OFFS 0x121B8
+#define QIB_7322_RxVlErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxDlidFltrCnt_0_OFFS 0x121C0
+#define QIB_7322_RxDlidFltrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxVL15DroppedPktCnt_0_OFFS 0x121C8
+#define QIB_7322_RxVL15DroppedPktCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxOtherLocalPhyErrCnt_0_OFFS 0x121D0
+#define QIB_7322_RxOtherLocalPhyErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxQPInvalidContextCnt_0_OFFS 0x121D8
+#define QIB_7322_RxQPInvalidContextCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxHeadersErrCnt_0_OFFS 0x121F8
+#define QIB_7322_TxHeadersErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_PSRcvDataCount_0_OFFS 0x12218
+#define QIB_7322_PSRcvDataCount_0_DEF 0x0000000000000000
+
+#define QIB_7322_PSRcvPktsCount_0_OFFS 0x12220
+#define QIB_7322_PSRcvPktsCount_0_DEF 0x0000000000000000
+
+#define QIB_7322_PSXmitDataCount_0_OFFS 0x12228
+#define QIB_7322_PSXmitDataCount_0_DEF 0x0000000000000000
+
+#define QIB_7322_PSXmitPktsCount_0_OFFS 0x12230
+#define QIB_7322_PSXmitPktsCount_0_DEF 0x0000000000000000
+
+#define QIB_7322_PSXmitWaitCount_0_OFFS 0x12238
+#define QIB_7322_PSXmitWaitCount_0_DEF 0x0000000000000000
+
+#define QIB_7322_LBIntCnt_1_OFFS 0x13000
+#define QIB_7322_LBIntCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxCreditUpToDateTimeOut_1_OFFS 0x13008
+#define QIB_7322_TxCreditUpToDateTimeOut_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxSDmaDescCnt_1_OFFS 0x13010
+#define QIB_7322_TxSDmaDescCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxUnsupVLErrCnt_1_OFFS 0x13018
+#define QIB_7322_TxUnsupVLErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxDataPktCnt_1_OFFS 0x13020
+#define QIB_7322_TxDataPktCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxFlowPktCnt_1_OFFS 0x13028
+#define QIB_7322_TxFlowPktCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxDwordCnt_1_OFFS 0x13030
+#define QIB_7322_TxDwordCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxLenErrCnt_1_OFFS 0x13038
+#define QIB_7322_TxLenErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxMaxMinLenErrCnt_1_OFFS 0x13040
+#define QIB_7322_TxMaxMinLenErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxUnderrunCnt_1_OFFS 0x13048
+#define QIB_7322_TxUnderrunCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxFlowStallCnt_1_OFFS 0x13050
+#define QIB_7322_TxFlowStallCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxDroppedPktCnt_1_OFFS 0x13058
+#define QIB_7322_TxDroppedPktCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxDroppedPktCnt_1_OFFS 0x13060
+#define QIB_7322_RxDroppedPktCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxDataPktCnt_1_OFFS 0x13068
+#define QIB_7322_RxDataPktCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxFlowPktCnt_1_OFFS 0x13070
+#define QIB_7322_RxFlowPktCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxDwordCnt_1_OFFS 0x13078
+#define QIB_7322_RxDwordCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxLenErrCnt_1_OFFS 0x13080
+#define QIB_7322_RxLenErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxMaxMinLenErrCnt_1_OFFS 0x13088
+#define QIB_7322_RxMaxMinLenErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxICRCErrCnt_1_OFFS 0x13090
+#define QIB_7322_RxICRCErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxVCRCErrCnt_1_OFFS 0x13098
+#define QIB_7322_RxVCRCErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxFlowCtrlViolCnt_1_OFFS 0x130A0
+#define QIB_7322_RxFlowCtrlViolCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxVersionErrCnt_1_OFFS 0x130A8
+#define QIB_7322_RxVersionErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxLinkMalformCnt_1_OFFS 0x130B0
+#define QIB_7322_RxLinkMalformCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxEBPCnt_1_OFFS 0x130B8
+#define QIB_7322_RxEBPCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxLPCRCErrCnt_1_OFFS 0x130C0
+#define QIB_7322_RxLPCRCErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxBufOvflCnt_1_OFFS 0x130C8
+#define QIB_7322_RxBufOvflCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxLenTruncateCnt_1_OFFS 0x130D0
+#define QIB_7322_RxLenTruncateCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxPKeyMismatchCnt_1_OFFS 0x130E0
+#define QIB_7322_RxPKeyMismatchCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_IBLinkDownedCnt_1_OFFS 0x13180
+#define QIB_7322_IBLinkDownedCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_IBSymbolErrCnt_1_OFFS 0x13188
+#define QIB_7322_IBSymbolErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_IBStatusChangeCnt_1_OFFS 0x13190
+#define QIB_7322_IBStatusChangeCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_IBLinkErrRecoveryCnt_1_OFFS 0x13198
+#define QIB_7322_IBLinkErrRecoveryCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_ExcessBufferOvflCnt_1_OFFS 0x131A8
+#define QIB_7322_ExcessBufferOvflCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_LocalLinkIntegrityErrCnt_1_OFFS 0x131B0
+#define QIB_7322_LocalLinkIntegrityErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxVlErrCnt_1_OFFS 0x131B8
+#define QIB_7322_RxVlErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxDlidFltrCnt_1_OFFS 0x131C0
+#define QIB_7322_RxDlidFltrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxVL15DroppedPktCnt_1_OFFS 0x131C8
+#define QIB_7322_RxVL15DroppedPktCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxOtherLocalPhyErrCnt_1_OFFS 0x131D0
+#define QIB_7322_RxOtherLocalPhyErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxQPInvalidContextCnt_1_OFFS 0x131D8
+#define QIB_7322_RxQPInvalidContextCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxHeadersErrCnt_1_OFFS 0x131F8
+#define QIB_7322_TxHeadersErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_PSRcvDataCount_1_OFFS 0x13218
+#define QIB_7322_PSRcvDataCount_1_DEF 0x0000000000000000
+
+#define QIB_7322_PSRcvPktsCount_1_OFFS 0x13220
+#define QIB_7322_PSRcvPktsCount_1_DEF 0x0000000000000000
+
+#define QIB_7322_PSXmitDataCount_1_OFFS 0x13228
+#define QIB_7322_PSXmitDataCount_1_DEF 0x0000000000000000
+
+#define QIB_7322_PSXmitPktsCount_1_OFFS 0x13230
+#define QIB_7322_PSXmitPktsCount_1_DEF 0x0000000000000000
+
+#define QIB_7322_PSXmitWaitCount_1_OFFS 0x13238
+#define QIB_7322_PSXmitWaitCount_1_DEF 0x0000000000000000
+
+#define QIB_7322_RcvEgrArray_OFFS 0x14000
+#define QIB_7322_RcvEgrArray_DEF 0x0000000000000000
+#define QIB_7322_RcvEgrArray_RT_BufSize_LSB 0x25
+#define QIB_7322_RcvEgrArray_RT_BufSize_MSB 0x27
+#define QIB_7322_RcvEgrArray_RT_BufSize_RMASK 0x7
+#define QIB_7322_RcvEgrArray_RT_Addr_LSB 0x0
+#define QIB_7322_RcvEgrArray_RT_Addr_MSB 0x24
+#define QIB_7322_RcvEgrArray_RT_Addr_RMASK 0x1FFFFFFFFF
+
+#define QIB_7322_RcvTIDArray0_OFFS 0x50000
+#define QIB_7322_RcvTIDArray0_DEF 0x0000000000000000
+#define QIB_7322_RcvTIDArray0_RT_BufSize_LSB 0x25
+#define QIB_7322_RcvTIDArray0_RT_BufSize_MSB 0x27
+#define QIB_7322_RcvTIDArray0_RT_BufSize_RMASK 0x7
+#define QIB_7322_RcvTIDArray0_RT_Addr_LSB 0x0
+#define QIB_7322_RcvTIDArray0_RT_Addr_MSB 0x24
+#define QIB_7322_RcvTIDArray0_RT_Addr_RMASK 0x1FFFFFFFFF
+
+#define QIB_7322_IBSD_DDS_MAP_TABLE_0_OFFS 0xD0000
+#define QIB_7322_IBSD_DDS_MAP_TABLE_0_DEF 0x0000000000000000
+
+#define QIB_7322_RcvHdrTail0_OFFS 0x200000
+#define QIB_7322_RcvHdrTail0_DEF 0x0000000000000000
+
+#define QIB_7322_RcvHdrHead0_OFFS 0x200008
+#define QIB_7322_RcvHdrHead0_DEF 0x0000000000000000
+#define QIB_7322_RcvHdrHead0_counter_LSB 0x20
+#define QIB_7322_RcvHdrHead0_counter_MSB 0x2F
+#define QIB_7322_RcvHdrHead0_counter_RMASK 0xFFFF
+#define QIB_7322_RcvHdrHead0_RcvHeadPointer_LSB 0x0
+#define QIB_7322_RcvHdrHead0_RcvHeadPointer_MSB 0x1F
+#define QIB_7322_RcvHdrHead0_RcvHeadPointer_RMASK 0xFFFFFFFF
+
+#define QIB_7322_RcvEgrIndexTail0_OFFS 0x200010
+#define QIB_7322_RcvEgrIndexTail0_DEF 0x0000000000000000
+
+#define QIB_7322_RcvEgrIndexHead0_OFFS 0x200018
+#define QIB_7322_RcvEgrIndexHead0_DEF 0x0000000000000000
+
+#define QIB_7322_RcvTIDFlowTable0_OFFS 0x201000
+#define QIB_7322_RcvTIDFlowTable0_DEF 0x0000000000000000
+#define QIB_7322_RcvTIDFlowTable0_GenMismatch_LSB 0x1C
+#define QIB_7322_RcvTIDFlowTable0_GenMismatch_MSB 0x1C
+#define QIB_7322_RcvTIDFlowTable0_GenMismatch_RMASK 0x1
+#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_LSB 0x1B
+#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_MSB 0x1B
+#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_RMASK 0x1
+#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_LSB 0x16
+#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_MSB 0x16
+#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_RMASK 0x1
+#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_LSB 0x15
+#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_MSB 0x15
+#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_RMASK 0x1
+#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_LSB 0x14
+#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_MSB 0x14
+#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_RMASK 0x1
+#define QIB_7322_RcvTIDFlowTable0_FlowValid_LSB 0x13
+#define QIB_7322_RcvTIDFlowTable0_FlowValid_MSB 0x13
+#define QIB_7322_RcvTIDFlowTable0_FlowValid_RMASK 0x1
+#define QIB_7322_RcvTIDFlowTable0_GenVal_LSB 0xB
+#define QIB_7322_RcvTIDFlowTable0_GenVal_MSB 0x12
+#define QIB_7322_RcvTIDFlowTable0_GenVal_RMASK 0xFF
+#define QIB_7322_RcvTIDFlowTable0_SeqNum_LSB 0x0
+#define QIB_7322_RcvTIDFlowTable0_SeqNum_MSB 0xA
+#define QIB_7322_RcvTIDFlowTable0_SeqNum_RMASK 0x7FF
diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h
new file mode 100644
index 00000000000..b3955ed8f79
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_common.h
@@ -0,0 +1,758 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _QIB_COMMON_H
+#define _QIB_COMMON_H
+
+/*
+ * This file contains defines, structures, etc. that are used
+ * to communicate between kernel and user code.
+ */
+
+/* This is the IEEE-assigned OUI for QLogic Inc. QLogic_IB */
+#define QIB_SRC_OUI_1 0x00
+#define QIB_SRC_OUI_2 0x11
+#define QIB_SRC_OUI_3 0x75
+
+/* version of protocol header (known to chip also). In the long run,
+ * we should be able to generate and accept a range of version numbers;
+ * for now we only accept one, and it's compiled in.
+ */
+#define IPS_PROTO_VERSION 2
+
+/*
+ * These are compile time constants that you may want to enable or disable
+ * if you are trying to debug problems with code or performance.
+ * QIB_VERBOSE_TRACING define as 1 if you want additional tracing in
+ * fastpath code
+ * QIB_TRACE_REGWRITES define as 1 if you want register writes to be
+ * traced in faspath code
+ * _QIB_TRACING define as 0 if you want to remove all tracing in a
+ * compilation unit
+ */
+
+/*
+ * The value in the BTH QP field that QLogic_IB uses to differentiate
+ * an qlogic_ib protocol IB packet vs standard IB transport
+ * This it needs to be even (0x656b78), because the LSB is sometimes
+ * used for the MSB of context. The change may cause a problem
+ * interoperating with older software.
+ */
+#define QIB_KD_QP 0x656b78
+
+/*
+ * These are the status bits readable (in ascii form, 64bit value)
+ * from the "status" sysfs file. For binary compatibility, values
+ * must remain as is; removed states can be reused for different
+ * purposes.
+ */
+#define QIB_STATUS_INITTED 0x1 /* basic initialization done */
+/* Chip has been found and initted */
+#define QIB_STATUS_CHIP_PRESENT 0x20
+/* IB link is at ACTIVE, usable for data traffic */
+#define QIB_STATUS_IB_READY 0x40
+/* link is configured, LID, MTU, etc. have been set */
+#define QIB_STATUS_IB_CONF 0x80
+/* A Fatal hardware error has occurred. */
+#define QIB_STATUS_HWERROR 0x200
+
+/*
+ * The list of usermode accessible registers. Also see Reg_* later in file.
+ */
+enum qib_ureg {
+ /* (RO) DMA RcvHdr to be used next. */
+ ur_rcvhdrtail = 0,
+ /* (RW) RcvHdr entry to be processed next by host. */
+ ur_rcvhdrhead = 1,
+ /* (RO) Index of next Eager index to use. */
+ ur_rcvegrindextail = 2,
+ /* (RW) Eager TID to be processed next */
+ ur_rcvegrindexhead = 3,
+ /* For internal use only; max register number. */
+ _QIB_UregMax
+};
+
+/* bit values for spi_runtime_flags */
+#define QIB_RUNTIME_PCIE 0x0002
+#define QIB_RUNTIME_FORCE_WC_ORDER 0x0004
+#define QIB_RUNTIME_RCVHDR_COPY 0x0008
+#define QIB_RUNTIME_MASTER 0x0010
+#define QIB_RUNTIME_RCHK 0x0020
+#define QIB_RUNTIME_NODMA_RTAIL 0x0080
+#define QIB_RUNTIME_SPECIAL_TRIGGER 0x0100
+#define QIB_RUNTIME_SDMA 0x0200
+#define QIB_RUNTIME_FORCE_PIOAVAIL 0x0400
+#define QIB_RUNTIME_PIO_REGSWAPPED 0x0800
+#define QIB_RUNTIME_CTXT_MSB_IN_QP 0x1000
+#define QIB_RUNTIME_CTXT_REDIRECT 0x2000
+#define QIB_RUNTIME_HDRSUPP 0x4000
+
+/*
+ * This structure is returned by qib_userinit() immediately after
+ * open to get implementation-specific info, and info specific to this
+ * instance.
+ *
+ * This struct must have explict pad fields where type sizes
+ * may result in different alignments between 32 and 64 bit
+ * programs, since the 64 bit * bit kernel requires the user code
+ * to have matching offsets
+ */
+struct qib_base_info {
+ /* version of hardware, for feature checking. */
+ __u32 spi_hw_version;
+ /* version of software, for feature checking. */
+ __u32 spi_sw_version;
+ /* QLogic_IB context assigned, goes into sent packets */
+ __u16 spi_ctxt;
+ __u16 spi_subctxt;
+ /*
+ * IB MTU, packets IB data must be less than this.
+ * The MTU is in bytes, and will be a multiple of 4 bytes.
+ */
+ __u32 spi_mtu;
+ /*
+ * Size of a PIO buffer. Any given packet's total size must be less
+ * than this (in words). Included is the starting control word, so
+ * if 513 is returned, then total pkt size is 512 words or less.
+ */
+ __u32 spi_piosize;
+ /* size of the TID cache in qlogic_ib, in entries */
+ __u32 spi_tidcnt;
+ /* size of the TID Eager list in qlogic_ib, in entries */
+ __u32 spi_tidegrcnt;
+ /* size of a single receive header queue entry in words. */
+ __u32 spi_rcvhdrent_size;
+ /*
+ * Count of receive header queue entries allocated.
+ * This may be less than the spu_rcvhdrcnt passed in!.
+ */
+ __u32 spi_rcvhdr_cnt;
+
+ /* per-chip and other runtime features bitmap (QIB_RUNTIME_*) */
+ __u32 spi_runtime_flags;
+
+ /* address where hardware receive header queue is mapped */
+ __u64 spi_rcvhdr_base;
+
+ /* user program. */
+
+ /* base address of eager TID receive buffers used by hardware. */
+ __u64 spi_rcv_egrbufs;
+
+ /* Allocated by initialization code, not by protocol. */
+
+ /*
+ * Size of each TID buffer in host memory, starting at
+ * spi_rcv_egrbufs. The buffers are virtually contiguous.
+ */
+ __u32 spi_rcv_egrbufsize;
+ /*
+ * The special QP (queue pair) value that identifies an qlogic_ib
+ * protocol packet from standard IB packets. More, probably much
+ * more, to be added.
+ */
+ __u32 spi_qpair;
+
+ /*
+ * User register base for init code, not to be used directly by
+ * protocol or applications. Always points to chip registers,
+ * for normal or shared context.
+ */
+ __u64 spi_uregbase;
+ /*
+ * Maximum buffer size in bytes that can be used in a single TID
+ * entry (assuming the buffer is aligned to this boundary). This is
+ * the minimum of what the hardware and software support Guaranteed
+ * to be a power of 2.
+ */
+ __u32 spi_tid_maxsize;
+ /*
+ * alignment of each pio send buffer (byte count
+ * to add to spi_piobufbase to get to second buffer)
+ */
+ __u32 spi_pioalign;
+ /*
+ * The index of the first pio buffer available to this process;
+ * needed to do lookup in spi_pioavailaddr; not added to
+ * spi_piobufbase.
+ */
+ __u32 spi_pioindex;
+ /* number of buffers mapped for this process */
+ __u32 spi_piocnt;
+
+ /*
+ * Base address of writeonly pio buffers for this process.
+ * Each buffer has spi_piosize words, and is aligned on spi_pioalign
+ * boundaries. spi_piocnt buffers are mapped from this address
+ */
+ __u64 spi_piobufbase;
+
+ /*
+ * Base address of readonly memory copy of the pioavail registers.
+ * There are 2 bits for each buffer.
+ */
+ __u64 spi_pioavailaddr;
+
+ /*
+ * Address where driver updates a copy of the interface and driver
+ * status (QIB_STATUS_*) as a 64 bit value. It's followed by a
+ * link status qword (formerly combined with driver status), then a
+ * string indicating hardware error, if there was one.
+ */
+ __u64 spi_status;
+
+ /* number of chip ctxts available to user processes */
+ __u32 spi_nctxts;
+ __u16 spi_unit; /* unit number of chip we are using */
+ __u16 spi_port; /* IB port number we are using */
+ /* num bufs in each contiguous set */
+ __u32 spi_rcv_egrperchunk;
+ /* size in bytes of each contiguous set */
+ __u32 spi_rcv_egrchunksize;
+ /* total size of mmap to cover full rcvegrbuffers */
+ __u32 spi_rcv_egrbuftotlen;
+ __u32 spi_rhf_offset; /* dword offset in hdrqent for rcvhdr flags */
+ /* address of readonly memory copy of the rcvhdrq tail register. */
+ __u64 spi_rcvhdr_tailaddr;
+
+ /*
+ * shared memory pages for subctxts if ctxt is shared; these cover
+ * all the processes in the group sharing a single context.
+ * all have enough space for the num_subcontexts value on this job.
+ */
+ __u64 spi_subctxt_uregbase;
+ __u64 spi_subctxt_rcvegrbuf;
+ __u64 spi_subctxt_rcvhdr_base;
+
+ /* shared memory page for send buffer disarm status */
+ __u64 spi_sendbuf_status;
+} __attribute__ ((aligned(8)));
+
+/*
+ * This version number is given to the driver by the user code during
+ * initialization in the spu_userversion field of qib_user_info, so
+ * the driver can check for compatibility with user code.
+ *
+ * The major version changes when data structures
+ * change in an incompatible way. The driver must be the same or higher
+ * for initialization to succeed. In some cases, a higher version
+ * driver will not interoperate with older software, and initialization
+ * will return an error.
+ */
+#define QIB_USER_SWMAJOR 1
+
+/*
+ * Minor version differences are always compatible
+ * a within a major version, however if user software is larger
+ * than driver software, some new features and/or structure fields
+ * may not be implemented; the user code must deal with this if it
+ * cares, or it must abort after initialization reports the difference.
+ */
+#define QIB_USER_SWMINOR 10
+
+#define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR)
+
+#ifndef QIB_KERN_TYPE
+#define QIB_KERN_TYPE 0
+#define QIB_IDSTR "QLogic kernel.org driver"
+#endif
+
+/*
+ * Similarly, this is the kernel version going back to the user. It's
+ * slightly different, in that we want to tell if the driver was built as
+ * part of a QLogic release, or from the driver from openfabrics.org,
+ * kernel.org, or a standard distribution, for support reasons.
+ * The high bit is 0 for non-QLogic and 1 for QLogic-built/supplied.
+ *
+ * It's returned by the driver to the user code during initialization in the
+ * spi_sw_version field of qib_base_info, so the user code can in turn
+ * check for compatibility with the kernel.
+*/
+#define QIB_KERN_SWVERSION ((QIB_KERN_TYPE << 31) | QIB_USER_SWVERSION)
+
+/*
+ * This structure is passed to qib_userinit() to tell the driver where
+ * user code buffers are, sizes, etc. The offsets and sizes of the
+ * fields must remain unchanged, for binary compatibility. It can
+ * be extended, if userversion is changed so user code can tell, if needed
+ */
+struct qib_user_info {
+ /*
+ * version of user software, to detect compatibility issues.
+ * Should be set to QIB_USER_SWVERSION.
+ */
+ __u32 spu_userversion;
+
+ __u32 _spu_unused2;
+
+ /* size of struct base_info to write to */
+ __u32 spu_base_info_size;
+
+ __u32 _spu_unused3;
+
+ /*
+ * If two or more processes wish to share a context, each process
+ * must set the spu_subctxt_cnt and spu_subctxt_id to the same
+ * values. The only restriction on the spu_subctxt_id is that
+ * it be unique for a given node.
+ */
+ __u16 spu_subctxt_cnt;
+ __u16 spu_subctxt_id;
+
+ __u32 spu_port; /* IB port requested by user if > 0 */
+
+ /*
+ * address of struct base_info to write to
+ */
+ __u64 spu_base_info;
+
+} __attribute__ ((aligned(8)));
+
+/* User commands. */
+
+/* 16 available, was: old set up userspace (for old user code) */
+#define QIB_CMD_CTXT_INFO 17 /* find out what resources we got */
+#define QIB_CMD_RECV_CTRL 18 /* control receipt of packets */
+#define QIB_CMD_TID_UPDATE 19 /* update expected TID entries */
+#define QIB_CMD_TID_FREE 20 /* free expected TID entries */
+#define QIB_CMD_SET_PART_KEY 21 /* add partition key */
+/* 22 available, was: return info on slave processes (for old user code) */
+#define QIB_CMD_ASSIGN_CTXT 23 /* allocate HCA and ctxt */
+#define QIB_CMD_USER_INIT 24 /* set up userspace */
+#define QIB_CMD_UNUSED_1 25
+#define QIB_CMD_UNUSED_2 26
+#define QIB_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */
+#define QIB_CMD_POLL_TYPE 28 /* set the kind of polling we want */
+#define QIB_CMD_ARMLAUNCH_CTRL 29 /* armlaunch detection control */
+/* 30 is unused */
+#define QIB_CMD_SDMA_INFLIGHT 31 /* sdma inflight counter request */
+#define QIB_CMD_SDMA_COMPLETE 32 /* sdma completion counter request */
+/* 33 available, was a testing feature */
+#define QIB_CMD_DISARM_BUFS 34 /* disarm send buffers w/ errors */
+#define QIB_CMD_ACK_EVENT 35 /* ack & clear bits */
+#define QIB_CMD_CPUS_LIST 36 /* list of cpus allocated, for pinned
+ * processes: qib_cpus_list */
+
+/*
+ * QIB_CMD_ACK_EVENT obsoletes QIB_CMD_DISARM_BUFS, but we keep it for
+ * compatibility with libraries from previous release. The ACK_EVENT
+ * will take appropriate driver action (if any, just DISARM for now),
+ * then clear the bits passed in as part of the mask. These bits are
+ * in the first 64bit word at spi_sendbuf_status, and are passed to
+ * the driver in the event_mask union as well.
+ */
+#define _QIB_EVENT_DISARM_BUFS_BIT 0
+#define _QIB_EVENT_LINKDOWN_BIT 1
+#define _QIB_EVENT_LID_CHANGE_BIT 2
+#define _QIB_EVENT_LMC_CHANGE_BIT 3
+#define _QIB_EVENT_SL2VL_CHANGE_BIT 4
+#define _QIB_MAX_EVENT_BIT _QIB_EVENT_SL2VL_CHANGE_BIT
+
+#define QIB_EVENT_DISARM_BUFS_BIT (1UL << _QIB_EVENT_DISARM_BUFS_BIT)
+#define QIB_EVENT_LINKDOWN_BIT (1UL << _QIB_EVENT_LINKDOWN_BIT)
+#define QIB_EVENT_LID_CHANGE_BIT (1UL << _QIB_EVENT_LID_CHANGE_BIT)
+#define QIB_EVENT_LMC_CHANGE_BIT (1UL << _QIB_EVENT_LMC_CHANGE_BIT)
+#define QIB_EVENT_SL2VL_CHANGE_BIT (1UL << _QIB_EVENT_SL2VL_CHANGE_BIT)
+
+
+/*
+ * Poll types
+ */
+#define QIB_POLL_TYPE_ANYRCV 0x0
+#define QIB_POLL_TYPE_URGENT 0x1
+
+struct qib_ctxt_info {
+ __u16 num_active; /* number of active units */
+ __u16 unit; /* unit (chip) assigned to caller */
+ __u16 port; /* IB port assigned to caller (1-based) */
+ __u16 ctxt; /* ctxt on unit assigned to caller */
+ __u16 subctxt; /* subctxt on unit assigned to caller */
+ __u16 num_ctxts; /* number of ctxts available on unit */
+ __u16 num_subctxts; /* number of subctxts opened on ctxt */
+ __u16 rec_cpu; /* cpu # for affinity (ffff if none) */
+};
+
+struct qib_tid_info {
+ __u32 tidcnt;
+ /* make structure same size in 32 and 64 bit */
+ __u32 tid__unused;
+ /* virtual address of first page in transfer */
+ __u64 tidvaddr;
+ /* pointer (same size 32/64 bit) to __u16 tid array */
+ __u64 tidlist;
+
+ /*
+ * pointer (same size 32/64 bit) to bitmap of TIDs used
+ * for this call; checked for being large enough at open
+ */
+ __u64 tidmap;
+};
+
+struct qib_cmd {
+ __u32 type; /* command type */
+ union {
+ struct qib_tid_info tid_info;
+ struct qib_user_info user_info;
+
+ /*
+ * address in userspace where we should put the sdma
+ * inflight counter
+ */
+ __u64 sdma_inflight;
+ /*
+ * address in userspace where we should put the sdma
+ * completion counter
+ */
+ __u64 sdma_complete;
+ /* address in userspace of struct qib_ctxt_info to
+ write result to */
+ __u64 ctxt_info;
+ /* enable/disable receipt of packets */
+ __u32 recv_ctrl;
+ /* enable/disable armlaunch errors (non-zero to enable) */
+ __u32 armlaunch_ctrl;
+ /* partition key to set */
+ __u16 part_key;
+ /* user address of __u32 bitmask of active slaves */
+ __u64 slave_mask_addr;
+ /* type of polling we want */
+ __u16 poll_type;
+ /* back pressure enable bit for one particular context */
+ __u8 ctxt_bp;
+ /* qib_user_event_ack(), IPATH_EVENT_* bits */
+ __u64 event_mask;
+ } cmd;
+};
+
+struct qib_iovec {
+ /* Pointer to data, but same size 32 and 64 bit */
+ __u64 iov_base;
+
+ /*
+ * Length of data; don't need 64 bits, but want
+ * qib_sendpkt to remain same size as before 32 bit changes, so...
+ */
+ __u64 iov_len;
+};
+
+/*
+ * Describes a single packet for send. Each packet can have one or more
+ * buffers, but the total length (exclusive of IB headers) must be less
+ * than the MTU, and if using the PIO method, entire packet length,
+ * including IB headers, must be less than the qib_piosize value (words).
+ * Use of this necessitates including sys/uio.h
+ */
+struct __qib_sendpkt {
+ __u32 sps_flags; /* flags for packet (TBD) */
+ __u32 sps_cnt; /* number of entries to use in sps_iov */
+ /* array of iov's describing packet. TEMPORARY */
+ struct qib_iovec sps_iov[4];
+};
+
+/*
+ * Diagnostics can send a packet by "writing" the following
+ * structs to the diag data special file.
+ * This allows a custom
+ * pbc (+ static rate) qword, so that special modes and deliberate
+ * changes to CRCs can be used. The elements were also re-ordered
+ * for better alignment and to avoid padding issues.
+ */
+#define _DIAG_XPKT_VERS 3
+struct qib_diag_xpkt {
+ __u16 version;
+ __u16 unit;
+ __u16 port;
+ __u16 len;
+ __u64 data;
+ __u64 pbc_wd;
+};
+
+/*
+ * Data layout in I2C flash (for GUID, etc.)
+ * All fields are little-endian binary unless otherwise stated
+ */
+#define QIB_FLASH_VERSION 2
+struct qib_flash {
+ /* flash layout version (QIB_FLASH_VERSION) */
+ __u8 if_fversion;
+ /* checksum protecting if_length bytes */
+ __u8 if_csum;
+ /*
+ * valid length (in use, protected by if_csum), including
+ * if_fversion and if_csum themselves)
+ */
+ __u8 if_length;
+ /* the GUID, in network order */
+ __u8 if_guid[8];
+ /* number of GUIDs to use, starting from if_guid */
+ __u8 if_numguid;
+ /* the (last 10 characters of) board serial number, in ASCII */
+ char if_serial[12];
+ /* board mfg date (YYYYMMDD ASCII) */
+ char if_mfgdate[8];
+ /* last board rework/test date (YYYYMMDD ASCII) */
+ char if_testdate[8];
+ /* logging of error counts, TBD */
+ __u8 if_errcntp[4];
+ /* powered on hours, updated at driver unload */
+ __u8 if_powerhour[2];
+ /* ASCII free-form comment field */
+ char if_comment[32];
+ /* Backwards compatible prefix for longer QLogic Serial Numbers */
+ char if_sprefix[4];
+ /* 82 bytes used, min flash size is 128 bytes */
+ __u8 if_future[46];
+};
+
+/*
+ * These are the counters implemented in the chip, and are listed in order.
+ * The InterCaps naming is taken straight from the chip spec.
+ */
+struct qlogic_ib_counters {
+ __u64 LBIntCnt;
+ __u64 LBFlowStallCnt;
+ __u64 TxSDmaDescCnt; /* was Reserved1 */
+ __u64 TxUnsupVLErrCnt;
+ __u64 TxDataPktCnt;
+ __u64 TxFlowPktCnt;
+ __u64 TxDwordCnt;
+ __u64 TxLenErrCnt;
+ __u64 TxMaxMinLenErrCnt;
+ __u64 TxUnderrunCnt;
+ __u64 TxFlowStallCnt;
+ __u64 TxDroppedPktCnt;
+ __u64 RxDroppedPktCnt;
+ __u64 RxDataPktCnt;
+ __u64 RxFlowPktCnt;
+ __u64 RxDwordCnt;
+ __u64 RxLenErrCnt;
+ __u64 RxMaxMinLenErrCnt;
+ __u64 RxICRCErrCnt;
+ __u64 RxVCRCErrCnt;
+ __u64 RxFlowCtrlErrCnt;
+ __u64 RxBadFormatCnt;
+ __u64 RxLinkProblemCnt;
+ __u64 RxEBPCnt;
+ __u64 RxLPCRCErrCnt;
+ __u64 RxBufOvflCnt;
+ __u64 RxTIDFullErrCnt;
+ __u64 RxTIDValidErrCnt;
+ __u64 RxPKeyMismatchCnt;
+ __u64 RxP0HdrEgrOvflCnt;
+ __u64 RxP1HdrEgrOvflCnt;
+ __u64 RxP2HdrEgrOvflCnt;
+ __u64 RxP3HdrEgrOvflCnt;
+ __u64 RxP4HdrEgrOvflCnt;
+ __u64 RxP5HdrEgrOvflCnt;
+ __u64 RxP6HdrEgrOvflCnt;
+ __u64 RxP7HdrEgrOvflCnt;
+ __u64 RxP8HdrEgrOvflCnt;
+ __u64 RxP9HdrEgrOvflCnt;
+ __u64 RxP10HdrEgrOvflCnt;
+ __u64 RxP11HdrEgrOvflCnt;
+ __u64 RxP12HdrEgrOvflCnt;
+ __u64 RxP13HdrEgrOvflCnt;
+ __u64 RxP14HdrEgrOvflCnt;
+ __u64 RxP15HdrEgrOvflCnt;
+ __u64 RxP16HdrEgrOvflCnt;
+ __u64 IBStatusChangeCnt;
+ __u64 IBLinkErrRecoveryCnt;
+ __u64 IBLinkDownedCnt;
+ __u64 IBSymbolErrCnt;
+ __u64 RxVL15DroppedPktCnt;
+ __u64 RxOtherLocalPhyErrCnt;
+ __u64 PcieRetryBufDiagQwordCnt;
+ __u64 ExcessBufferOvflCnt;
+ __u64 LocalLinkIntegrityErrCnt;
+ __u64 RxVlErrCnt;
+ __u64 RxDlidFltrCnt;
+};
+
+/*
+ * The next set of defines are for packet headers, and chip register
+ * and memory bits that are visible to and/or used by user-mode software.
+ */
+
+/* RcvHdrFlags bits */
+#define QLOGIC_IB_RHF_LENGTH_MASK 0x7FF
+#define QLOGIC_IB_RHF_LENGTH_SHIFT 0
+#define QLOGIC_IB_RHF_RCVTYPE_MASK 0x7
+#define QLOGIC_IB_RHF_RCVTYPE_SHIFT 11
+#define QLOGIC_IB_RHF_EGRINDEX_MASK 0xFFF
+#define QLOGIC_IB_RHF_EGRINDEX_SHIFT 16
+#define QLOGIC_IB_RHF_SEQ_MASK 0xF
+#define QLOGIC_IB_RHF_SEQ_SHIFT 0
+#define QLOGIC_IB_RHF_HDRQ_OFFSET_MASK 0x7FF
+#define QLOGIC_IB_RHF_HDRQ_OFFSET_SHIFT 4
+#define QLOGIC_IB_RHF_H_ICRCERR 0x80000000
+#define QLOGIC_IB_RHF_H_VCRCERR 0x40000000
+#define QLOGIC_IB_RHF_H_PARITYERR 0x20000000
+#define QLOGIC_IB_RHF_H_LENERR 0x10000000
+#define QLOGIC_IB_RHF_H_MTUERR 0x08000000
+#define QLOGIC_IB_RHF_H_IHDRERR 0x04000000
+#define QLOGIC_IB_RHF_H_TIDERR 0x02000000
+#define QLOGIC_IB_RHF_H_MKERR 0x01000000
+#define QLOGIC_IB_RHF_H_IBERR 0x00800000
+#define QLOGIC_IB_RHF_H_ERR_MASK 0xFF800000
+#define QLOGIC_IB_RHF_L_USE_EGR 0x80000000
+#define QLOGIC_IB_RHF_L_SWA 0x00008000
+#define QLOGIC_IB_RHF_L_SWB 0x00004000
+
+/* qlogic_ib header fields */
+#define QLOGIC_IB_I_VERS_MASK 0xF
+#define QLOGIC_IB_I_VERS_SHIFT 28
+#define QLOGIC_IB_I_CTXT_MASK 0xF
+#define QLOGIC_IB_I_CTXT_SHIFT 24
+#define QLOGIC_IB_I_TID_MASK 0x7FF
+#define QLOGIC_IB_I_TID_SHIFT 13
+#define QLOGIC_IB_I_OFFSET_MASK 0x1FFF
+#define QLOGIC_IB_I_OFFSET_SHIFT 0
+
+/* K_PktFlags bits */
+#define QLOGIC_IB_KPF_INTR 0x1
+#define QLOGIC_IB_KPF_SUBCTXT_MASK 0x3
+#define QLOGIC_IB_KPF_SUBCTXT_SHIFT 1
+
+#define QLOGIC_IB_MAX_SUBCTXT 4
+
+/* SendPIO per-buffer control */
+#define QLOGIC_IB_SP_TEST 0x40
+#define QLOGIC_IB_SP_TESTEBP 0x20
+#define QLOGIC_IB_SP_TRIGGER_SHIFT 15
+
+/* SendPIOAvail bits */
+#define QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT 1
+#define QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT 0
+
+/* qlogic_ib header format */
+struct qib_header {
+ /*
+ * Version - 4 bits, Context - 4 bits, TID - 10 bits and Offset -
+ * 14 bits before ECO change ~28 Dec 03. After that, Vers 4,
+ * Context 4, TID 11, offset 13.
+ */
+ __le32 ver_ctxt_tid_offset;
+ __le16 chksum;
+ __le16 pkt_flags;
+};
+
+/*
+ * qlogic_ib user message header format.
+ * This structure contains the first 4 fields common to all protocols
+ * that employ qlogic_ib.
+ */
+struct qib_message_header {
+ __be16 lrh[4];
+ __be32 bth[3];
+ /* fields below this point are in host byte order */
+ struct qib_header iph;
+ __u8 sub_opcode;
+};
+
+/* IB - LRH header consts */
+#define QIB_LRH_GRH 0x0003 /* 1. word of IB LRH - next header: GRH */
+#define QIB_LRH_BTH 0x0002 /* 1. word of IB LRH - next header: BTH */
+
+/* misc. */
+#define SIZE_OF_CRC 1
+
+#define QIB_DEFAULT_P_KEY 0xFFFF
+#define QIB_PERMISSIVE_LID 0xFFFF
+#define QIB_AETH_CREDIT_SHIFT 24
+#define QIB_AETH_CREDIT_MASK 0x1F
+#define QIB_AETH_CREDIT_INVAL 0x1F
+#define QIB_PSN_MASK 0xFFFFFF
+#define QIB_MSN_MASK 0xFFFFFF
+#define QIB_QPN_MASK 0xFFFFFF
+#define QIB_MULTICAST_LID_BASE 0xC000
+#define QIB_EAGER_TID_ID QLOGIC_IB_I_TID_MASK
+#define QIB_MULTICAST_QPN 0xFFFFFF
+
+/* Receive Header Queue: receive type (from qlogic_ib) */
+#define RCVHQ_RCV_TYPE_EXPECTED 0
+#define RCVHQ_RCV_TYPE_EAGER 1
+#define RCVHQ_RCV_TYPE_NON_KD 2
+#define RCVHQ_RCV_TYPE_ERROR 3
+
+#define QIB_HEADER_QUEUE_WORDS 9
+
+/* functions for extracting fields from rcvhdrq entries for the driver.
+ */
+static inline __u32 qib_hdrget_err_flags(const __le32 *rbuf)
+{
+ return __le32_to_cpu(rbuf[1]) & QLOGIC_IB_RHF_H_ERR_MASK;
+}
+
+static inline __u32 qib_hdrget_rcv_type(const __le32 *rbuf)
+{
+ return (__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_RCVTYPE_SHIFT) &
+ QLOGIC_IB_RHF_RCVTYPE_MASK;
+}
+
+static inline __u32 qib_hdrget_length_in_bytes(const __le32 *rbuf)
+{
+ return ((__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_LENGTH_SHIFT) &
+ QLOGIC_IB_RHF_LENGTH_MASK) << 2;
+}
+
+static inline __u32 qib_hdrget_index(const __le32 *rbuf)
+{
+ return (__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_EGRINDEX_SHIFT) &
+ QLOGIC_IB_RHF_EGRINDEX_MASK;
+}
+
+static inline __u32 qib_hdrget_seq(const __le32 *rbuf)
+{
+ return (__le32_to_cpu(rbuf[1]) >> QLOGIC_IB_RHF_SEQ_SHIFT) &
+ QLOGIC_IB_RHF_SEQ_MASK;
+}
+
+static inline __u32 qib_hdrget_offset(const __le32 *rbuf)
+{
+ return (__le32_to_cpu(rbuf[1]) >> QLOGIC_IB_RHF_HDRQ_OFFSET_SHIFT) &
+ QLOGIC_IB_RHF_HDRQ_OFFSET_MASK;
+}
+
+static inline __u32 qib_hdrget_use_egr_buf(const __le32 *rbuf)
+{
+ return __le32_to_cpu(rbuf[0]) & QLOGIC_IB_RHF_L_USE_EGR;
+}
+
+static inline __u32 qib_hdrget_qib_ver(__le32 hdrword)
+{
+ return (__le32_to_cpu(hdrword) >> QLOGIC_IB_I_VERS_SHIFT) &
+ QLOGIC_IB_I_VERS_MASK;
+}
+
+#endif /* _QIB_COMMON_H */
diff --git a/drivers/infiniband/hw/qib/qib_cq.c b/drivers/infiniband/hw/qib/qib_cq.c
new file mode 100644
index 00000000000..a86cbf880f9
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_cq.c
@@ -0,0 +1,484 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2010 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "qib_verbs.h"
+
+/**
+ * qib_cq_enter - add a new entry to the completion queue
+ * @cq: completion queue
+ * @entry: work completion entry to add
+ * @sig: true if @entry is a solicitated entry
+ *
+ * This may be called with qp->s_lock held.
+ */
+void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited)
+{
+ struct qib_cq_wc *wc;
+ unsigned long flags;
+ u32 head;
+ u32 next;
+
+ spin_lock_irqsave(&cq->lock, flags);
+
+ /*
+ * Note that the head pointer might be writable by user processes.
+ * Take care to verify it is a sane value.
+ */
+ wc = cq->queue;
+ head = wc->head;
+ if (head >= (unsigned) cq->ibcq.cqe) {
+ head = cq->ibcq.cqe;
+ next = 0;
+ } else
+ next = head + 1;
+ if (unlikely(next == wc->tail)) {
+ spin_unlock_irqrestore(&cq->lock, flags);
+ if (cq->ibcq.event_handler) {
+ struct ib_event ev;
+
+ ev.device = cq->ibcq.device;
+ ev.element.cq = &cq->ibcq;
+ ev.event = IB_EVENT_CQ_ERR;
+ cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
+ }
+ return;
+ }
+ if (cq->ip) {
+ wc->uqueue[head].wr_id = entry->wr_id;
+ wc->uqueue[head].status = entry->status;
+ wc->uqueue[head].opcode = entry->opcode;
+ wc->uqueue[head].vendor_err = entry->vendor_err;
+ wc->uqueue[head].byte_len = entry->byte_len;
+ wc->uqueue[head].ex.imm_data =
+ (__u32 __force)entry->ex.imm_data;
+ wc->uqueue[head].qp_num = entry->qp->qp_num;
+ wc->uqueue[head].src_qp = entry->src_qp;
+ wc->uqueue[head].wc_flags = entry->wc_flags;
+ wc->uqueue[head].pkey_index = entry->pkey_index;
+ wc->uqueue[head].slid = entry->slid;
+ wc->uqueue[head].sl = entry->sl;
+ wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
+ wc->uqueue[head].port_num = entry->port_num;
+ /* Make sure entry is written before the head index. */
+ smp_wmb();
+ } else
+ wc->kqueue[head] = *entry;
+ wc->head = next;
+
+ if (cq->notify == IB_CQ_NEXT_COMP ||
+ (cq->notify == IB_CQ_SOLICITED && solicited)) {
+ cq->notify = IB_CQ_NONE;
+ cq->triggered++;
+ /*
+ * This will cause send_complete() to be called in
+ * another thread.
+ */
+ queue_work(qib_cq_wq, &cq->comptask);
+ }
+
+ spin_unlock_irqrestore(&cq->lock, flags);
+}
+
+/**
+ * qib_poll_cq - poll for work completion entries
+ * @ibcq: the completion queue to poll
+ * @num_entries: the maximum number of entries to return
+ * @entry: pointer to array where work completions are placed
+ *
+ * Returns the number of completion entries polled.
+ *
+ * This may be called from interrupt context. Also called by ib_poll_cq()
+ * in the generic verbs code.
+ */
+int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
+{
+ struct qib_cq *cq = to_icq(ibcq);
+ struct qib_cq_wc *wc;
+ unsigned long flags;
+ int npolled;
+ u32 tail;
+
+ /* The kernel can only poll a kernel completion queue */
+ if (cq->ip) {
+ npolled = -EINVAL;
+ goto bail;
+ }
+
+ spin_lock_irqsave(&cq->lock, flags);
+
+ wc = cq->queue;
+ tail = wc->tail;
+ if (tail > (u32) cq->ibcq.cqe)
+ tail = (u32) cq->ibcq.cqe;
+ for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
+ if (tail == wc->head)
+ break;
+ /* The kernel doesn't need a RMB since it has the lock. */
+ *entry = wc->kqueue[tail];
+ if (tail >= cq->ibcq.cqe)
+ tail = 0;
+ else
+ tail++;
+ }
+ wc->tail = tail;
+
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+bail:
+ return npolled;
+}
+
+static void send_complete(struct work_struct *work)
+{
+ struct qib_cq *cq = container_of(work, struct qib_cq, comptask);
+
+ /*
+ * The completion handler will most likely rearm the notification
+ * and poll for all pending entries. If a new completion entry
+ * is added while we are in this routine, queue_work()
+ * won't call us again until we return so we check triggered to
+ * see if we need to call the handler again.
+ */
+ for (;;) {
+ u8 triggered = cq->triggered;
+
+ /*
+ * IPoIB connected mode assumes the callback is from a
+ * soft IRQ. We simulate this by blocking "bottom halves".
+ * See the implementation for ipoib_cm_handle_tx_wc(),
+ * netif_tx_lock_bh() and netif_tx_lock().
+ */
+ local_bh_disable();
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+ local_bh_enable();
+
+ if (cq->triggered == triggered)
+ return;
+ }
+}
+
+/**
+ * qib_create_cq - create a completion queue
+ * @ibdev: the device this completion queue is attached to
+ * @entries: the minimum size of the completion queue
+ * @context: unused by the QLogic_IB driver
+ * @udata: user data for libibverbs.so
+ *
+ * Returns a pointer to the completion queue or negative errno values
+ * for failure.
+ *
+ * Called by ib_create_cq() in the generic verbs code.
+ */
+struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries,
+ int comp_vector, struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct qib_ibdev *dev = to_idev(ibdev);
+ struct qib_cq *cq;
+ struct qib_cq_wc *wc;
+ struct ib_cq *ret;
+ u32 sz;
+
+ if (entries < 1 || entries > ib_qib_max_cqes) {
+ ret = ERR_PTR(-EINVAL);
+ goto done;
+ }
+
+ /* Allocate the completion queue structure. */
+ cq = kmalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq) {
+ ret = ERR_PTR(-ENOMEM);
+ goto done;
+ }
+
+ /*
+ * Allocate the completion queue entries and head/tail pointers.
+ * This is allocated separately so that it can be resized and
+ * also mapped into user space.
+ * We need to use vmalloc() in order to support mmap and large
+ * numbers of entries.
+ */
+ sz = sizeof(*wc);
+ if (udata && udata->outlen >= sizeof(__u64))
+ sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
+ else
+ sz += sizeof(struct ib_wc) * (entries + 1);
+ wc = vmalloc_user(sz);
+ if (!wc) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_cq;
+ }
+
+ /*
+ * Return the address of the WC as the offset to mmap.
+ * See qib_mmap() for details.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ int err;
+
+ cq->ip = qib_create_mmap_info(dev, sz, context, wc);
+ if (!cq->ip) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_wc;
+ }
+
+ err = ib_copy_to_udata(udata, &cq->ip->offset,
+ sizeof(cq->ip->offset));
+ if (err) {
+ ret = ERR_PTR(err);
+ goto bail_ip;
+ }
+ } else
+ cq->ip = NULL;
+
+ spin_lock(&dev->n_cqs_lock);
+ if (dev->n_cqs_allocated == ib_qib_max_cqs) {
+ spin_unlock(&dev->n_cqs_lock);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_ip;
+ }
+
+ dev->n_cqs_allocated++;
+ spin_unlock(&dev->n_cqs_lock);
+
+ if (cq->ip) {
+ spin_lock_irq(&dev->pending_lock);
+ list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+ }
+
+ /*
+ * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
+ * The number of entries should be >= the number requested or return
+ * an error.
+ */
+ cq->ibcq.cqe = entries;
+ cq->notify = IB_CQ_NONE;
+ cq->triggered = 0;
+ spin_lock_init(&cq->lock);
+ INIT_WORK(&cq->comptask, send_complete);
+ wc->head = 0;
+ wc->tail = 0;
+ cq->queue = wc;
+
+ ret = &cq->ibcq;
+
+ goto done;
+
+bail_ip:
+ kfree(cq->ip);
+bail_wc:
+ vfree(wc);
+bail_cq:
+ kfree(cq);
+done:
+ return ret;
+}
+
+/**
+ * qib_destroy_cq - destroy a completion queue
+ * @ibcq: the completion queue to destroy.
+ *
+ * Returns 0 for success.
+ *
+ * Called by ib_destroy_cq() in the generic verbs code.
+ */
+int qib_destroy_cq(struct ib_cq *ibcq)
+{
+ struct qib_ibdev *dev = to_idev(ibcq->device);
+ struct qib_cq *cq = to_icq(ibcq);
+
+ flush_work(&cq->comptask);
+ spin_lock(&dev->n_cqs_lock);
+ dev->n_cqs_allocated--;
+ spin_unlock(&dev->n_cqs_lock);
+ if (cq->ip)
+ kref_put(&cq->ip->ref, qib_release_mmap_info);
+ else
+ vfree(cq->queue);
+ kfree(cq);
+
+ return 0;
+}
+
+/**
+ * qib_req_notify_cq - change the notification type for a completion queue
+ * @ibcq: the completion queue
+ * @notify_flags: the type of notification to request
+ *
+ * Returns 0 for success.
+ *
+ * This may be called from interrupt context. Also called by
+ * ib_req_notify_cq() in the generic verbs code.
+ */
+int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
+{
+ struct qib_cq *cq = to_icq(ibcq);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&cq->lock, flags);
+ /*
+ * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
+ * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
+ */
+ if (cq->notify != IB_CQ_NEXT_COMP)
+ cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
+
+ if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
+ cq->queue->head != cq->queue->tail)
+ ret = 1;
+
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+ return ret;
+}
+
+/**
+ * qib_resize_cq - change the size of the CQ
+ * @ibcq: the completion queue
+ *
+ * Returns 0 for success.
+ */
+int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
+{
+ struct qib_cq *cq = to_icq(ibcq);
+ struct qib_cq_wc *old_wc;
+ struct qib_cq_wc *wc;
+ u32 head, tail, n;
+ int ret;
+ u32 sz;
+
+ if (cqe < 1 || cqe > ib_qib_max_cqes) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /*
+ * Need to use vmalloc() if we want to support large #s of entries.
+ */
+ sz = sizeof(*wc);
+ if (udata && udata->outlen >= sizeof(__u64))
+ sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
+ else
+ sz += sizeof(struct ib_wc) * (cqe + 1);
+ wc = vmalloc_user(sz);
+ if (!wc) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ /* Check that we can write the offset to mmap. */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ __u64 offset = 0;
+
+ ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
+ if (ret)
+ goto bail_free;
+ }
+
+ spin_lock_irq(&cq->lock);
+ /*
+ * Make sure head and tail are sane since they
+ * might be user writable.
+ */
+ old_wc = cq->queue;
+ head = old_wc->head;
+ if (head > (u32) cq->ibcq.cqe)
+ head = (u32) cq->ibcq.cqe;
+ tail = old_wc->tail;
+ if (tail > (u32) cq->ibcq.cqe)
+ tail = (u32) cq->ibcq.cqe;
+ if (head < tail)
+ n = cq->ibcq.cqe + 1 + head - tail;
+ else
+ n = head - tail;
+ if (unlikely((u32)cqe < n)) {
+ ret = -EINVAL;
+ goto bail_unlock;
+ }
+ for (n = 0; tail != head; n++) {
+ if (cq->ip)
+ wc->uqueue[n] = old_wc->uqueue[tail];
+ else
+ wc->kqueue[n] = old_wc->kqueue[tail];
+ if (tail == (u32) cq->ibcq.cqe)
+ tail = 0;
+ else
+ tail++;
+ }
+ cq->ibcq.cqe = cqe;
+ wc->head = n;
+ wc->tail = 0;
+ cq->queue = wc;
+ spin_unlock_irq(&cq->lock);
+
+ vfree(old_wc);
+
+ if (cq->ip) {
+ struct qib_ibdev *dev = to_idev(ibcq->device);
+ struct qib_mmap_info *ip = cq->ip;
+
+ qib_update_mmap_info(dev, ip, sz, wc);
+
+ /*
+ * Return the offset to mmap.
+ * See qib_mmap() for details.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ ret = ib_copy_to_udata(udata, &ip->offset,
+ sizeof(ip->offset));
+ if (ret)
+ goto bail;
+ }
+
+ spin_lock_irq(&dev->pending_lock);
+ if (list_empty(&ip->pending_mmaps))
+ list_add(&ip->pending_mmaps, &dev->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+ }
+
+ ret = 0;
+ goto bail;
+
+bail_unlock:
+ spin_unlock_irq(&cq->lock);
+bail_free:
+ vfree(wc);
+bail:
+ return ret;
+}
diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c
new file mode 100644
index 00000000000..ca98dd52375
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_diag.c
@@ -0,0 +1,894 @@
+/*
+ * Copyright (c) 2010 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This file contains support for diagnostic functions. It is accessed by
+ * opening the qib_diag device, normally minor number 129. Diagnostic use
+ * of the QLogic_IB chip may render the chip or board unusable until the
+ * driver is unloaded, or in some cases, until the system is rebooted.
+ *
+ * Accesses to the chip through this interface are not similar to going
+ * through the /sys/bus/pci resource mmap interface.
+ */
+
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+
+#include "qib.h"
+#include "qib_common.h"
+
+/*
+ * Each client that opens the diag device must read then write
+ * offset 0, to prevent lossage from random cat or od. diag_state
+ * sequences this "handshake".
+ */
+enum diag_state { UNUSED = 0, OPENED, INIT, READY };
+
+/* State for an individual client. PID so children cannot abuse handshake */
+static struct qib_diag_client {
+ struct qib_diag_client *next;
+ struct qib_devdata *dd;
+ pid_t pid;
+ enum diag_state state;
+} *client_pool;
+
+/*
+ * Get a client struct. Recycled if possible, else kmalloc.
+ * Must be called with qib_mutex held
+ */
+static struct qib_diag_client *get_client(struct qib_devdata *dd)
+{
+ struct qib_diag_client *dc;
+
+ dc = client_pool;
+ if (dc)
+ /* got from pool remove it and use */
+ client_pool = dc->next;
+ else
+ /* None in pool, alloc and init */
+ dc = kmalloc(sizeof *dc, GFP_KERNEL);
+
+ if (dc) {
+ dc->next = NULL;
+ dc->dd = dd;
+ dc->pid = current->pid;
+ dc->state = OPENED;
+ }
+ return dc;
+}
+
+/*
+ * Return to pool. Must be called with qib_mutex held
+ */
+static void return_client(struct qib_diag_client *dc)
+{
+ struct qib_devdata *dd = dc->dd;
+ struct qib_diag_client *tdc, *rdc;
+
+ rdc = NULL;
+ if (dc == dd->diag_client) {
+ dd->diag_client = dc->next;
+ rdc = dc;
+ } else {
+ tdc = dc->dd->diag_client;
+ while (tdc) {
+ if (dc == tdc->next) {
+ tdc->next = dc->next;
+ rdc = dc;
+ break;
+ }
+ tdc = tdc->next;
+ }
+ }
+ if (rdc) {
+ rdc->state = UNUSED;
+ rdc->dd = NULL;
+ rdc->pid = 0;
+ rdc->next = client_pool;
+ client_pool = rdc;
+ }
+}
+
+static int qib_diag_open(struct inode *in, struct file *fp);
+static int qib_diag_release(struct inode *in, struct file *fp);
+static ssize_t qib_diag_read(struct file *fp, char __user *data,
+ size_t count, loff_t *off);
+static ssize_t qib_diag_write(struct file *fp, const char __user *data,
+ size_t count, loff_t *off);
+
+static const struct file_operations diag_file_ops = {
+ .owner = THIS_MODULE,
+ .write = qib_diag_write,
+ .read = qib_diag_read,
+ .open = qib_diag_open,
+ .release = qib_diag_release
+};
+
+static atomic_t diagpkt_count = ATOMIC_INIT(0);
+static struct cdev *diagpkt_cdev;
+static struct device *diagpkt_device;
+
+static ssize_t qib_diagpkt_write(struct file *fp, const char __user *data,
+ size_t count, loff_t *off);
+
+static const struct file_operations diagpkt_file_ops = {
+ .owner = THIS_MODULE,
+ .write = qib_diagpkt_write,
+};
+
+int qib_diag_add(struct qib_devdata *dd)
+{
+ char name[16];
+ int ret = 0;
+
+ if (atomic_inc_return(&diagpkt_count) == 1) {
+ ret = qib_cdev_init(QIB_DIAGPKT_MINOR, "ipath_diagpkt",
+ &diagpkt_file_ops, &diagpkt_cdev,
+ &diagpkt_device);
+ if (ret)
+ goto done;
+ }
+
+ snprintf(name, sizeof(name), "ipath_diag%d", dd->unit);
+ ret = qib_cdev_init(QIB_DIAG_MINOR_BASE + dd->unit, name,
+ &diag_file_ops, &dd->diag_cdev,
+ &dd->diag_device);
+done:
+ return ret;
+}
+
+static void qib_unregister_observers(struct qib_devdata *dd);
+
+void qib_diag_remove(struct qib_devdata *dd)
+{
+ struct qib_diag_client *dc;
+
+ if (atomic_dec_and_test(&diagpkt_count))
+ qib_cdev_cleanup(&diagpkt_cdev, &diagpkt_device);
+
+ qib_cdev_cleanup(&dd->diag_cdev, &dd->diag_device);
+
+ /*
+ * Return all diag_clients of this device. There should be none,
+ * as we are "guaranteed" that no clients are still open
+ */
+ while (dd->diag_client)
+ return_client(dd->diag_client);
+
+ /* Now clean up all unused client structs */
+ while (client_pool) {
+ dc = client_pool;
+ client_pool = dc->next;
+ kfree(dc);
+ }
+ /* Clean up observer list */
+ qib_unregister_observers(dd);
+}
+
+/* qib_remap_ioaddr32 - remap an offset into chip address space to __iomem *
+ *
+ * @dd: the qlogic_ib device
+ * @offs: the offset in chip-space
+ * @cntp: Pointer to max (byte) count for transfer starting at offset
+ * This returns a u32 __iomem * so it can be used for both 64 and 32-bit
+ * mapping. It is needed because with the use of PAT for control of
+ * write-combining, the logically contiguous address-space of the chip
+ * may be split into virtually non-contiguous spaces, with different
+ * attributes, which are them mapped to contiguous physical space
+ * based from the first BAR.
+ *
+ * The code below makes the same assumptions as were made in
+ * init_chip_wc_pat() (qib_init.c), copied here:
+ * Assumes chip address space looks like:
+ * - kregs + sregs + cregs + uregs (in any order)
+ * - piobufs (2K and 4K bufs in either order)
+ * or:
+ * - kregs + sregs + cregs (in any order)
+ * - piobufs (2K and 4K bufs in either order)
+ * - uregs
+ *
+ * If cntp is non-NULL, returns how many bytes from offset can be accessed
+ * Returns 0 if the offset is not mapped.
+ */
+static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
+ u32 *cntp)
+{
+ u32 kreglen;
+ u32 snd_bottom, snd_lim = 0;
+ u32 __iomem *krb32 = (u32 __iomem *)dd->kregbase;
+ u32 __iomem *map = NULL;
+ u32 cnt = 0;
+
+ /* First, simplest case, offset is within the first map. */
+ kreglen = (dd->kregend - dd->kregbase) * sizeof(u64);
+ if (offset < kreglen) {
+ map = krb32 + (offset / sizeof(u32));
+ cnt = kreglen - offset;
+ goto mapped;
+ }
+
+ /*
+ * Next check for user regs, the next most common case,
+ * and a cheap check because if they are not in the first map
+ * they are last in chip.
+ */
+ if (dd->userbase) {
+ /* If user regs mapped, they are after send, so set limit. */
+ u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase;
+ snd_lim = dd->uregbase;
+ krb32 = (u32 __iomem *)dd->userbase;
+ if (offset >= dd->uregbase && offset < ulim) {
+ map = krb32 + (offset - dd->uregbase) / sizeof(u32);
+ cnt = ulim - offset;
+ goto mapped;
+ }
+ }
+
+ /*
+ * Lastly, check for offset within Send Buffers.
+ * This is gnarly because struct devdata is deliberately vague
+ * about things like 7322 VL15 buffers, and we are not in
+ * chip-specific code here, so should not make many assumptions.
+ * The one we _do_ make is that the only chip that has more sndbufs
+ * than we admit is the 7322, and it has userregs above that, so
+ * we know the snd_lim.
+ */
+ /* Assume 2K buffers are first. */
+ snd_bottom = dd->pio2k_bufbase;
+ if (snd_lim == 0) {
+ u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign);
+ snd_lim = snd_bottom + tot2k;
+ }
+ /* If 4k buffers exist, account for them by bumping
+ * appropriate limit.
+ */
+ if (dd->piobcnt4k) {
+ u32 tot4k = dd->piobcnt4k * dd->align4k;
+ u32 offs4k = dd->piobufbase >> 32;
+ if (snd_bottom > offs4k)
+ snd_bottom = offs4k;
+ else {
+ /* 4k above 2k. Bump snd_lim, if needed*/
+ if (!dd->userbase)
+ snd_lim = offs4k + tot4k;
+ }
+ }
+ /*
+ * Judgement call: can we ignore the space between SendBuffs and
+ * UserRegs, where we would like to see vl15 buffs, but not more?
+ */
+ if (offset >= snd_bottom && offset < snd_lim) {
+ offset -= snd_bottom;
+ map = (u32 __iomem *)dd->piobase + (offset / sizeof(u32));
+ cnt = snd_lim - offset;
+ }
+
+mapped:
+ if (cntp)
+ *cntp = cnt;
+ return map;
+}
+
+/*
+ * qib_read_umem64 - read a 64-bit quantity from the chip into user space
+ * @dd: the qlogic_ib device
+ * @uaddr: the location to store the data in user memory
+ * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
+ * @count: number of bytes to copy (multiple of 32 bits)
+ *
+ * This function also localizes all chip memory accesses.
+ * The copy should be written such that we read full cacheline packets
+ * from the chip. This is usually used for a single qword
+ *
+ * NOTE: This assumes the chip address is 64-bit aligned.
+ */
+static int qib_read_umem64(struct qib_devdata *dd, void __user *uaddr,
+ u32 regoffs, size_t count)
+{
+ const u64 __iomem *reg_addr;
+ const u64 __iomem *reg_end;
+ u32 limit;
+ int ret;
+
+ reg_addr = (const u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit);
+ if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ if (count >= limit)
+ count = limit;
+ reg_end = reg_addr + (count / sizeof(u64));
+
+ /* not very efficient, but it works for now */
+ while (reg_addr < reg_end) {
+ u64 data = readq(reg_addr);
+
+ if (copy_to_user(uaddr, &data, sizeof(u64))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+ reg_addr++;
+ uaddr += sizeof(u64);
+ }
+ ret = 0;
+bail:
+ return ret;
+}
+
+/*
+ * qib_write_umem64 - write a 64-bit quantity to the chip from user space
+ * @dd: the qlogic_ib device
+ * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
+ * @uaddr: the source of the data in user memory
+ * @count: the number of bytes to copy (multiple of 32 bits)
+ *
+ * This is usually used for a single qword
+ * NOTE: This assumes the chip address is 64-bit aligned.
+ */
+
+static int qib_write_umem64(struct qib_devdata *dd, u32 regoffs,
+ const void __user *uaddr, size_t count)
+{
+ u64 __iomem *reg_addr;
+ const u64 __iomem *reg_end;
+ u32 limit;
+ int ret;
+
+ reg_addr = (u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit);
+ if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ if (count >= limit)
+ count = limit;
+ reg_end = reg_addr + (count / sizeof(u64));
+
+ /* not very efficient, but it works for now */
+ while (reg_addr < reg_end) {
+ u64 data;
+ if (copy_from_user(&data, uaddr, sizeof(data))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+ writeq(data, reg_addr);
+
+ reg_addr++;
+ uaddr += sizeof(u64);
+ }
+ ret = 0;
+bail:
+ return ret;
+}
+
+/*
+ * qib_read_umem32 - read a 32-bit quantity from the chip into user space
+ * @dd: the qlogic_ib device
+ * @uaddr: the location to store the data in user memory
+ * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
+ * @count: number of bytes to copy
+ *
+ * read 32 bit values, not 64 bit; for memories that only
+ * support 32 bit reads; usually a single dword.
+ */
+static int qib_read_umem32(struct qib_devdata *dd, void __user *uaddr,
+ u32 regoffs, size_t count)
+{
+ const u32 __iomem *reg_addr;
+ const u32 __iomem *reg_end;
+ u32 limit;
+ int ret;
+
+ reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit);
+ if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ if (count >= limit)
+ count = limit;
+ reg_end = reg_addr + (count / sizeof(u32));
+
+ /* not very efficient, but it works for now */
+ while (reg_addr < reg_end) {
+ u32 data = readl(reg_addr);
+
+ if (copy_to_user(uaddr, &data, sizeof(data))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ reg_addr++;
+ uaddr += sizeof(u32);
+
+ }
+ ret = 0;
+bail:
+ return ret;
+}
+
+/*
+ * qib_write_umem32 - write a 32-bit quantity to the chip from user space
+ * @dd: the qlogic_ib device
+ * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
+ * @uaddr: the source of the data in user memory
+ * @count: number of bytes to copy
+ *
+ * write 32 bit values, not 64 bit; for memories that only
+ * support 32 bit write; usually a single dword.
+ */
+
+static int qib_write_umem32(struct qib_devdata *dd, u32 regoffs,
+ const void __user *uaddr, size_t count)
+{
+ u32 __iomem *reg_addr;
+ const u32 __iomem *reg_end;
+ u32 limit;
+ int ret;
+
+ reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit);
+ if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ if (count >= limit)
+ count = limit;
+ reg_end = reg_addr + (count / sizeof(u32));
+
+ while (reg_addr < reg_end) {
+ u32 data;
+
+ if (copy_from_user(&data, uaddr, sizeof(data))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+ writel(data, reg_addr);
+
+ reg_addr++;
+ uaddr += sizeof(u32);
+ }
+ ret = 0;
+bail:
+ return ret;
+}
+
+static int qib_diag_open(struct inode *in, struct file *fp)
+{
+ int unit = iminor(in) - QIB_DIAG_MINOR_BASE;
+ struct qib_devdata *dd;
+ struct qib_diag_client *dc;
+ int ret;
+
+ mutex_lock(&qib_mutex);
+
+ dd = qib_lookup(unit);
+
+ if (dd == NULL || !(dd->flags & QIB_PRESENT) ||
+ !dd->kregbase) {
+ ret = -ENODEV;
+ goto bail;
+ }
+
+ dc = get_client(dd);
+ if (!dc) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+ dc->next = dd->diag_client;
+ dd->diag_client = dc;
+ fp->private_data = dc;
+ ret = 0;
+bail:
+ mutex_unlock(&qib_mutex);
+
+ return ret;
+}
+
+/**
+ * qib_diagpkt_write - write an IB packet
+ * @fp: the diag data device file pointer
+ * @data: qib_diag_pkt structure saying where to get the packet
+ * @count: size of data to write
+ * @off: unused by this code
+ */
+static ssize_t qib_diagpkt_write(struct file *fp,
+ const char __user *data,
+ size_t count, loff_t *off)
+{
+ u32 __iomem *piobuf;
+ u32 plen, clen, pbufn;
+ struct qib_diag_xpkt dp;
+ u32 *tmpbuf = NULL;
+ struct qib_devdata *dd;
+ struct qib_pportdata *ppd;
+ ssize_t ret = 0;
+
+ if (count != sizeof(dp)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ if (copy_from_user(&dp, data, sizeof(dp))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ dd = qib_lookup(dp.unit);
+ if (!dd || !(dd->flags & QIB_PRESENT) || !dd->kregbase) {
+ ret = -ENODEV;
+ goto bail;
+ }
+ if (!(dd->flags & QIB_INITTED)) {
+ /* no hardware, freeze, etc. */
+ ret = -ENODEV;
+ goto bail;
+ }
+
+ if (dp.version != _DIAG_XPKT_VERS) {
+ qib_dev_err(dd, "Invalid version %u for diagpkt_write\n",
+ dp.version);
+ ret = -EINVAL;
+ goto bail;
+ }
+ /* send count must be an exact number of dwords */
+ if (dp.len & 3) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ if (!dp.port || dp.port > dd->num_pports) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ ppd = &dd->pport[dp.port - 1];
+
+ /* need total length before first word written */
+ /* +1 word is for the qword padding */
+ plen = sizeof(u32) + dp.len;
+ clen = dp.len >> 2;
+
+ if ((plen + 4) > ppd->ibmaxlen) {
+ ret = -EINVAL;
+ goto bail; /* before writing pbc */
+ }
+ tmpbuf = vmalloc(plen);
+ if (!tmpbuf) {
+ qib_devinfo(dd->pcidev, "Unable to allocate tmp buffer, "
+ "failing\n");
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ if (copy_from_user(tmpbuf,
+ (const void __user *) (unsigned long) dp.data,
+ dp.len)) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ plen >>= 2; /* in dwords */
+
+ if (dp.pbc_wd == 0)
+ dp.pbc_wd = plen;
+
+ piobuf = dd->f_getsendbuf(ppd, dp.pbc_wd, &pbufn);
+ if (!piobuf) {
+ ret = -EBUSY;
+ goto bail;
+ }
+ /* disarm it just to be extra sure */
+ dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbufn));
+
+ /* disable header check on pbufn for this packet */
+ dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_DIS1, NULL);
+
+ writeq(dp.pbc_wd, piobuf);
+ /*
+ * Copy all but the trigger word, then flush, so it's written
+ * to chip before trigger word, then write trigger word, then
+ * flush again, so packet is sent.
+ */
+ if (dd->flags & QIB_PIO_FLUSH_WC) {
+ qib_flush_wc();
+ qib_pio_copy(piobuf + 2, tmpbuf, clen - 1);
+ qib_flush_wc();
+ __raw_writel(tmpbuf[clen - 1], piobuf + clen + 1);
+ } else
+ qib_pio_copy(piobuf + 2, tmpbuf, clen);
+
+ if (dd->flags & QIB_USE_SPCL_TRIG) {
+ u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
+
+ qib_flush_wc();
+ __raw_writel(0xaebecede, piobuf + spcl_off);
+ }
+
+ /*
+ * Ensure buffer is written to the chip, then re-enable
+ * header checks (if supported by chip). The txchk
+ * code will ensure seen by chip before returning.
+ */
+ qib_flush_wc();
+ qib_sendbuf_done(dd, pbufn);
+ dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
+
+ ret = sizeof(dp);
+
+bail:
+ vfree(tmpbuf);
+ return ret;
+}
+
+static int qib_diag_release(struct inode *in, struct file *fp)
+{
+ mutex_lock(&qib_mutex);
+ return_client(fp->private_data);
+ fp->private_data = NULL;
+ mutex_unlock(&qib_mutex);
+ return 0;
+}
+
+/*
+ * Chip-specific code calls to register its interest in
+ * a specific range.
+ */
+struct diag_observer_list_elt {
+ struct diag_observer_list_elt *next;
+ const struct diag_observer *op;
+};
+
+int qib_register_observer(struct qib_devdata *dd,
+ const struct diag_observer *op)
+{
+ struct diag_observer_list_elt *olp;
+ int ret = -EINVAL;
+
+ if (!dd || !op)
+ goto bail;
+ ret = -ENOMEM;
+ olp = vmalloc(sizeof *olp);
+ if (!olp) {
+ printk(KERN_ERR QIB_DRV_NAME ": vmalloc for observer failed\n");
+ goto bail;
+ }
+ if (olp) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
+ olp->op = op;
+ olp->next = dd->diag_observer_list;
+ dd->diag_observer_list = olp;
+ spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
+ ret = 0;
+ }
+bail:
+ return ret;
+}
+
+/* Remove all registered observers when device is closed */
+static void qib_unregister_observers(struct qib_devdata *dd)
+{
+ struct diag_observer_list_elt *olp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
+ olp = dd->diag_observer_list;
+ while (olp) {
+ /* Pop one observer, let go of lock */
+ dd->diag_observer_list = olp->next;
+ spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
+ vfree(olp);
+ /* try again. */
+ spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
+ olp = dd->diag_observer_list;
+ }
+ spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
+}
+
+/*
+ * Find the observer, if any, for the specified address. Initial implementation
+ * is simple stack of observers. This must be called with diag transaction
+ * lock held.
+ */
+static const struct diag_observer *diag_get_observer(struct qib_devdata *dd,
+ u32 addr)
+{
+ struct diag_observer_list_elt *olp;
+ const struct diag_observer *op = NULL;
+
+ olp = dd->diag_observer_list;
+ while (olp) {
+ op = olp->op;
+ if (addr >= op->bottom && addr <= op->top)
+ break;
+ olp = olp->next;
+ }
+ if (!olp)
+ op = NULL;
+
+ return op;
+}
+
+static ssize_t qib_diag_read(struct file *fp, char __user *data,
+ size_t count, loff_t *off)
+{
+ struct qib_diag_client *dc = fp->private_data;
+ struct qib_devdata *dd = dc->dd;
+ void __iomem *kreg_base;
+ ssize_t ret;
+
+ if (dc->pid != current->pid) {
+ ret = -EPERM;
+ goto bail;
+ }
+
+ kreg_base = dd->kregbase;
+
+ if (count == 0)
+ ret = 0;
+ else if ((count % 4) || (*off % 4))
+ /* address or length is not 32-bit aligned, hence invalid */
+ ret = -EINVAL;
+ else if (dc->state < READY && (*off || count != 8))
+ ret = -EINVAL; /* prevent cat /dev/qib_diag* */
+ else {
+ unsigned long flags;
+ u64 data64 = 0;
+ int use_32;
+ const struct diag_observer *op;
+
+ use_32 = (count % 8) || (*off % 8);
+ ret = -1;
+ spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
+ /*
+ * Check for observer on this address range.
+ * we only support a single 32 or 64-bit read
+ * via observer, currently.
+ */
+ op = diag_get_observer(dd, *off);
+ if (op) {
+ u32 offset = *off;
+ ret = op->hook(dd, op, offset, &data64, 0, use_32);
+ }
+ /*
+ * We need to release lock before any copy_to_user(),
+ * whether implicit in qib_read_umem* or explicit below.
+ */
+ spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
+ if (!op) {
+ if (use_32)
+ /*
+ * Address or length is not 64-bit aligned;
+ * do 32-bit rd
+ */
+ ret = qib_read_umem32(dd, data, (u32) *off,
+ count);
+ else
+ ret = qib_read_umem64(dd, data, (u32) *off,
+ count);
+ } else if (ret == count) {
+ /* Below finishes case where observer existed */
+ ret = copy_to_user(data, &data64, use_32 ?
+ sizeof(u32) : sizeof(u64));
+ if (ret)
+ ret = -EFAULT;
+ }
+ }
+
+ if (ret >= 0) {
+ *off += count;
+ ret = count;
+ if (dc->state == OPENED)
+ dc->state = INIT;
+ }
+bail:
+ return ret;
+}
+
+static ssize_t qib_diag_write(struct file *fp, const char __user *data,
+ size_t count, loff_t *off)
+{
+ struct qib_diag_client *dc = fp->private_data;
+ struct qib_devdata *dd = dc->dd;
+ void __iomem *kreg_base;
+ ssize_t ret;
+
+ if (dc->pid != current->pid) {
+ ret = -EPERM;
+ goto bail;
+ }
+
+ kreg_base = dd->kregbase;
+
+ if (count == 0)
+ ret = 0;
+ else if ((count % 4) || (*off % 4))
+ /* address or length is not 32-bit aligned, hence invalid */
+ ret = -EINVAL;
+ else if (dc->state < READY &&
+ ((*off || count != 8) || dc->state != INIT))
+ /* No writes except second-step of init seq */
+ ret = -EINVAL; /* before any other write allowed */
+ else {
+ unsigned long flags;
+ const struct diag_observer *op = NULL;
+ int use_32 = (count % 8) || (*off % 8);
+
+ /*
+ * Check for observer on this address range.
+ * We only support a single 32 or 64-bit write
+ * via observer, currently. This helps, because
+ * we would otherwise have to jump through hoops
+ * to make "diag transaction" meaningful when we
+ * cannot do a copy_from_user while holding the lock.
+ */
+ if (count == 4 || count == 8) {
+ u64 data64;
+ u32 offset = *off;
+ ret = copy_from_user(&data64, data, count);
+ if (ret) {
+ ret = -EFAULT;
+ goto bail;
+ }
+ spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
+ op = diag_get_observer(dd, *off);
+ if (op)
+ ret = op->hook(dd, op, offset, &data64, ~0Ull,
+ use_32);
+ spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
+ }
+
+ if (!op) {
+ if (use_32)
+ /*
+ * Address or length is not 64-bit aligned;
+ * do 32-bit write
+ */
+ ret = qib_write_umem32(dd, (u32) *off, data,
+ count);
+ else
+ ret = qib_write_umem64(dd, (u32) *off, data,
+ count);
+ }
+ }
+
+ if (ret >= 0) {
+ *off += count;
+ ret = count;
+ if (dc->state == INIT)
+ dc->state = READY; /* all read/write OK now */
+ }
+bail:
+ return ret;
+}
diff --git a/drivers/infiniband/hw/qib/qib_dma.c b/drivers/infiniband/hw/qib/qib_dma.c
new file mode 100644
index 00000000000..2920bb39a65
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_dma.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2006, 2009, 2010 QLogic, Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/types.h>
+#include <linux/scatterlist.h>
+
+#include "qib_verbs.h"
+
+#define BAD_DMA_ADDRESS ((u64) 0)
+
+/*
+ * The following functions implement driver specific replacements
+ * for the ib_dma_*() functions.
+ *
+ * These functions return kernel virtual addresses instead of
+ * device bus addresses since the driver uses the CPU to copy
+ * data instead of using hardware DMA.
+ */
+
+static int qib_mapping_error(struct ib_device *dev, u64 dma_addr)
+{
+ return dma_addr == BAD_DMA_ADDRESS;
+}
+
+static u64 qib_dma_map_single(struct ib_device *dev, void *cpu_addr,
+ size_t size, enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+ return (u64) cpu_addr;
+}
+
+static void qib_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+}
+
+static u64 qib_dma_map_page(struct ib_device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ u64 addr;
+
+ BUG_ON(!valid_dma_direction(direction));
+
+ if (offset + size > PAGE_SIZE) {
+ addr = BAD_DMA_ADDRESS;
+ goto done;
+ }
+
+ addr = (u64) page_address(page);
+ if (addr)
+ addr += offset;
+ /* TODO: handle highmem pages */
+
+done:
+ return addr;
+}
+
+static void qib_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+}
+
+static int qib_map_sg(struct ib_device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction direction)
+{
+ struct scatterlist *sg;
+ u64 addr;
+ int i;
+ int ret = nents;
+
+ BUG_ON(!valid_dma_direction(direction));
+
+ for_each_sg(sgl, sg, nents, i) {
+ addr = (u64) page_address(sg_page(sg));
+ /* TODO: handle highmem pages */
+ if (!addr) {
+ ret = 0;
+ break;
+ }
+ }
+ return ret;
+}
+
+static void qib_unmap_sg(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+}
+
+static u64 qib_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)
+{
+ u64 addr = (u64) page_address(sg_page(sg));
+
+ if (addr)
+ addr += sg->offset;
+ return addr;
+}
+
+static unsigned int qib_sg_dma_len(struct ib_device *dev,
+ struct scatterlist *sg)
+{
+ return sg->length;
+}
+
+static void qib_sync_single_for_cpu(struct ib_device *dev, u64 addr,
+ size_t size, enum dma_data_direction dir)
+{
+}
+
+static void qib_sync_single_for_device(struct ib_device *dev, u64 addr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+}
+
+static void *qib_dma_alloc_coherent(struct ib_device *dev, size_t size,
+ u64 *dma_handle, gfp_t flag)
+{
+ struct page *p;
+ void *addr = NULL;
+
+ p = alloc_pages(flag, get_order(size));
+ if (p)
+ addr = page_address(p);
+ if (dma_handle)
+ *dma_handle = (u64) addr;
+ return addr;
+}
+
+static void qib_dma_free_coherent(struct ib_device *dev, size_t size,
+ void *cpu_addr, u64 dma_handle)
+{
+ free_pages((unsigned long) cpu_addr, get_order(size));
+}
+
+struct ib_dma_mapping_ops qib_dma_mapping_ops = {
+ .mapping_error = qib_mapping_error,
+ .map_single = qib_dma_map_single,
+ .unmap_single = qib_dma_unmap_single,
+ .map_page = qib_dma_map_page,
+ .unmap_page = qib_dma_unmap_page,
+ .map_sg = qib_map_sg,
+ .unmap_sg = qib_unmap_sg,
+ .dma_address = qib_sg_dma_address,
+ .dma_len = qib_sg_dma_len,
+ .sync_single_for_cpu = qib_sync_single_for_cpu,
+ .sync_single_for_device = qib_sync_single_for_device,
+ .alloc_coherent = qib_dma_alloc_coherent,
+ .free_coherent = qib_dma_free_coherent
+};
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
new file mode 100644
index 00000000000..f15ce076ac4
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -0,0 +1,665 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+
+#include "qib.h"
+
+/*
+ * The size has to be longer than this string, so we can append
+ * board/chip information to it in the init code.
+ */
+const char ib_qib_version[] = QIB_IDSTR "\n";
+
+DEFINE_SPINLOCK(qib_devs_lock);
+LIST_HEAD(qib_dev_list);
+DEFINE_MUTEX(qib_mutex); /* general driver use */
+
+unsigned qib_ibmtu;
+module_param_named(ibmtu, qib_ibmtu, uint, S_IRUGO);
+MODULE_PARM_DESC(ibmtu, "Set max IB MTU (0=2KB, 1=256, 2=512, ... 5=4096");
+
+unsigned qib_compat_ddr_negotiate = 1;
+module_param_named(compat_ddr_negotiate, qib_compat_ddr_negotiate, uint,
+ S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(compat_ddr_negotiate,
+ "Attempt pre-IBTA 1.2 DDR speed negotiation");
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("QLogic <support@qlogic.com>");
+MODULE_DESCRIPTION("QLogic IB driver");
+
+/*
+ * QIB_PIO_MAXIBHDR is the max IB header size allowed for in our
+ * PIO send buffers. This is well beyond anything currently
+ * defined in the InfiniBand spec.
+ */
+#define QIB_PIO_MAXIBHDR 128
+
+struct qlogic_ib_stats qib_stats;
+
+const char *qib_get_unit_name(int unit)
+{
+ static char iname[16];
+
+ snprintf(iname, sizeof iname, "infinipath%u", unit);
+ return iname;
+}
+
+/*
+ * Return count of units with at least one port ACTIVE.
+ */
+int qib_count_active_units(void)
+{
+ struct qib_devdata *dd;
+ struct qib_pportdata *ppd;
+ unsigned long flags;
+ int pidx, nunits_active = 0;
+
+ spin_lock_irqsave(&qib_devs_lock, flags);
+ list_for_each_entry(dd, &qib_dev_list, list) {
+ if (!(dd->flags & QIB_PRESENT) || !dd->kregbase)
+ continue;
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
+ QIBL_LINKARMED | QIBL_LINKACTIVE))) {
+ nunits_active++;
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&qib_devs_lock, flags);
+ return nunits_active;
+}
+
+/*
+ * Return count of all units, optionally return in arguments
+ * the number of usable (present) units, and the number of
+ * ports that are up.
+ */
+int qib_count_units(int *npresentp, int *nupp)
+{
+ int nunits = 0, npresent = 0, nup = 0;
+ struct qib_devdata *dd;
+ unsigned long flags;
+ int pidx;
+ struct qib_pportdata *ppd;
+
+ spin_lock_irqsave(&qib_devs_lock, flags);
+
+ list_for_each_entry(dd, &qib_dev_list, list) {
+ nunits++;
+ if ((dd->flags & QIB_PRESENT) && dd->kregbase)
+ npresent++;
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
+ QIBL_LINKARMED | QIBL_LINKACTIVE)))
+ nup++;
+ }
+ }
+
+ spin_unlock_irqrestore(&qib_devs_lock, flags);
+
+ if (npresentp)
+ *npresentp = npresent;
+ if (nupp)
+ *nupp = nup;
+
+ return nunits;
+}
+
+/**
+ * qib_wait_linkstate - wait for an IB link state change to occur
+ * @dd: the qlogic_ib device
+ * @state: the state to wait for
+ * @msecs: the number of milliseconds to wait
+ *
+ * wait up to msecs milliseconds for IB link state change to occur for
+ * now, take the easy polling route. Currently used only by
+ * qib_set_linkstate. Returns 0 if state reached, otherwise
+ * -ETIMEDOUT state can have multiple states set, for any of several
+ * transitions.
+ */
+int qib_wait_linkstate(struct qib_pportdata *ppd, u32 state, int msecs)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ if (ppd->state_wanted) {
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ ret = -EBUSY;
+ goto bail;
+ }
+ ppd->state_wanted = state;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ wait_event_interruptible_timeout(ppd->state_wait,
+ (ppd->lflags & state),
+ msecs_to_jiffies(msecs));
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->state_wanted = 0;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+
+ if (!(ppd->lflags & state))
+ ret = -ETIMEDOUT;
+ else
+ ret = 0;
+bail:
+ return ret;
+}
+
+int qib_set_linkstate(struct qib_pportdata *ppd, u8 newstate)
+{
+ u32 lstate;
+ int ret;
+ struct qib_devdata *dd = ppd->dd;
+ unsigned long flags;
+
+ switch (newstate) {
+ case QIB_IB_LINKDOWN_ONLY:
+ dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
+ IB_LINKCMD_DOWN | IB_LINKINITCMD_NOP);
+ /* don't wait */
+ ret = 0;
+ goto bail;
+
+ case QIB_IB_LINKDOWN:
+ dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
+ IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL);
+ /* don't wait */
+ ret = 0;
+ goto bail;
+
+ case QIB_IB_LINKDOWN_SLEEP:
+ dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
+ IB_LINKCMD_DOWN | IB_LINKINITCMD_SLEEP);
+ /* don't wait */
+ ret = 0;
+ goto bail;
+
+ case QIB_IB_LINKDOWN_DISABLE:
+ dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
+ IB_LINKCMD_DOWN | IB_LINKINITCMD_DISABLE);
+ /* don't wait */
+ ret = 0;
+ goto bail;
+
+ case QIB_IB_LINKARM:
+ if (ppd->lflags & QIBL_LINKARMED) {
+ ret = 0;
+ goto bail;
+ }
+ if (!(ppd->lflags & (QIBL_LINKINIT | QIBL_LINKACTIVE))) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ /*
+ * Since the port can be ACTIVE when we ask for ARMED,
+ * clear QIBL_LINKV so we can wait for a transition.
+ * If the link isn't ARMED, then something else happened
+ * and there is no point waiting for ARMED.
+ */
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_LINKV;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
+ IB_LINKCMD_ARMED | IB_LINKINITCMD_NOP);
+ lstate = QIBL_LINKV;
+ break;
+
+ case QIB_IB_LINKACTIVE:
+ if (ppd->lflags & QIBL_LINKACTIVE) {
+ ret = 0;
+ goto bail;
+ }
+ if (!(ppd->lflags & QIBL_LINKARMED)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
+ IB_LINKCMD_ACTIVE | IB_LINKINITCMD_NOP);
+ lstate = QIBL_LINKACTIVE;
+ break;
+
+ default:
+ ret = -EINVAL;
+ goto bail;
+ }
+ ret = qib_wait_linkstate(ppd, lstate, 10);
+
+bail:
+ return ret;
+}
+
+/*
+ * Get address of eager buffer from it's index (allocated in chunks, not
+ * contiguous).
+ */
+static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail)
+{
+ const u32 chunk = etail / rcd->rcvegrbufs_perchunk;
+ const u32 idx = etail % rcd->rcvegrbufs_perchunk;
+
+ return rcd->rcvegrbuf[chunk] + idx * rcd->dd->rcvegrbufsize;
+}
+
+/*
+ * Returns 1 if error was a CRC, else 0.
+ * Needed for some chip's synthesized error counters.
+ */
+static u32 qib_rcv_hdrerr(struct qib_pportdata *ppd, u32 ctxt,
+ u32 eflags, u32 l, u32 etail, __le32 *rhf_addr,
+ struct qib_message_header *hdr)
+{
+ u32 ret = 0;
+
+ if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR))
+ ret = 1;
+ return ret;
+}
+
+/*
+ * qib_kreceive - receive a packet
+ * @rcd: the qlogic_ib context
+ * @llic: gets count of good packets needed to clear lli,
+ * (used with chips that need need to track crcs for lli)
+ *
+ * called from interrupt handler for errors or receive interrupt
+ * Returns number of CRC error packets, needed by some chips for
+ * local link integrity tracking. crcs are adjusted down by following
+ * good packets, if any, and count of good packets is also tracked.
+ */
+u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
+{
+ struct qib_devdata *dd = rcd->dd;
+ struct qib_pportdata *ppd = rcd->ppd;
+ __le32 *rhf_addr;
+ void *ebuf;
+ const u32 rsize = dd->rcvhdrentsize; /* words */
+ const u32 maxcnt = dd->rcvhdrcnt * rsize; /* words */
+ u32 etail = -1, l, hdrqtail;
+ struct qib_message_header *hdr;
+ u32 eflags, etype, tlen, i = 0, updegr = 0, crcs = 0;
+ int last;
+ u64 lval;
+ struct qib_qp *qp, *nqp;
+
+ l = rcd->head;
+ rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
+ if (dd->flags & QIB_NODMA_RTAIL) {
+ u32 seq = qib_hdrget_seq(rhf_addr);
+ if (seq != rcd->seq_cnt)
+ goto bail;
+ hdrqtail = 0;
+ } else {
+ hdrqtail = qib_get_rcvhdrtail(rcd);
+ if (l == hdrqtail)
+ goto bail;
+ smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
+ }
+
+ for (last = 0, i = 1; !last; i += !last) {
+ hdr = dd->f_get_msgheader(dd, rhf_addr);
+ eflags = qib_hdrget_err_flags(rhf_addr);
+ etype = qib_hdrget_rcv_type(rhf_addr);
+ /* total length */
+ tlen = qib_hdrget_length_in_bytes(rhf_addr);
+ ebuf = NULL;
+ if ((dd->flags & QIB_NODMA_RTAIL) ?
+ qib_hdrget_use_egr_buf(rhf_addr) :
+ (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
+ etail = qib_hdrget_index(rhf_addr);
+ updegr = 1;
+ if (tlen > sizeof(*hdr) ||
+ etype >= RCVHQ_RCV_TYPE_NON_KD)
+ ebuf = qib_get_egrbuf(rcd, etail);
+ }
+ if (!eflags) {
+ u16 lrh_len = be16_to_cpu(hdr->lrh[2]) << 2;
+
+ if (lrh_len != tlen) {
+ qib_stats.sps_lenerrs++;
+ goto move_along;
+ }
+ }
+ if (etype == RCVHQ_RCV_TYPE_NON_KD && !eflags &&
+ ebuf == NULL &&
+ tlen > (dd->rcvhdrentsize - 2 + 1 -
+ qib_hdrget_offset(rhf_addr)) << 2) {
+ goto move_along;
+ }
+
+ /*
+ * Both tiderr and qibhdrerr are set for all plain IB
+ * packets; only qibhdrerr should be set.
+ */
+ if (unlikely(eflags))
+ crcs += qib_rcv_hdrerr(ppd, rcd->ctxt, eflags, l,
+ etail, rhf_addr, hdr);
+ else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
+ qib_ib_rcv(rcd, hdr, ebuf, tlen);
+ if (crcs)
+ crcs--;
+ else if (llic && *llic)
+ --*llic;
+ }
+move_along:
+ l += rsize;
+ if (l >= maxcnt)
+ l = 0;
+ rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
+ if (dd->flags & QIB_NODMA_RTAIL) {
+ u32 seq = qib_hdrget_seq(rhf_addr);
+
+ if (++rcd->seq_cnt > 13)
+ rcd->seq_cnt = 1;
+ if (seq != rcd->seq_cnt)
+ last = 1;
+ } else if (l == hdrqtail)
+ last = 1;
+ /*
+ * Update head regs etc., every 16 packets, if not last pkt,
+ * to help prevent rcvhdrq overflows, when many packets
+ * are processed and queue is nearly full.
+ * Don't request an interrupt for intermediate updates.
+ */
+ lval = l;
+ if (!last && !(i & 0xf)) {
+ dd->f_update_usrhead(rcd, lval, updegr, etail);
+ updegr = 0;
+ }
+ }
+
+ rcd->head = l;
+ rcd->pkt_count += i;
+
+ /*
+ * Iterate over all QPs waiting to respond.
+ * The list won't change since the IRQ is only run on one CPU.
+ */
+ list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
+ list_del_init(&qp->rspwait);
+ if (qp->r_flags & QIB_R_RSP_NAK) {
+ qp->r_flags &= ~QIB_R_RSP_NAK;
+ qib_send_rc_ack(qp);
+ }
+ if (qp->r_flags & QIB_R_RSP_SEND) {
+ unsigned long flags;
+
+ qp->r_flags &= ~QIB_R_RSP_SEND;
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (ib_qib_state_ops[qp->state] &
+ QIB_PROCESS_OR_FLUSH_SEND)
+ qib_schedule_send(qp);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ }
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ }
+
+bail:
+ /* Report number of packets consumed */
+ if (npkts)
+ *npkts = i;
+
+ /*
+ * Always write head at end, and setup rcv interrupt, even
+ * if no packets were processed.
+ */
+ lval = (u64)rcd->head | dd->rhdrhead_intr_off;
+ dd->f_update_usrhead(rcd, lval, updegr, etail);
+ return crcs;
+}
+
+/**
+ * qib_set_mtu - set the MTU
+ * @ppd: the perport data
+ * @arg: the new MTU
+ *
+ * We can handle "any" incoming size, the issue here is whether we
+ * need to restrict our outgoing size. For now, we don't do any
+ * sanity checking on this, and we don't deal with what happens to
+ * programs that are already running when the size changes.
+ * NOTE: changing the MTU will usually cause the IBC to go back to
+ * link INIT state...
+ */
+int qib_set_mtu(struct qib_pportdata *ppd, u16 arg)
+{
+ u32 piosize;
+ int ret, chk;
+
+ if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
+ arg != 4096) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ chk = ib_mtu_enum_to_int(qib_ibmtu);
+ if (chk > 0 && arg > chk) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ piosize = ppd->ibmaxlen;
+ ppd->ibmtu = arg;
+
+ if (arg >= (piosize - QIB_PIO_MAXIBHDR)) {
+ /* Only if it's not the initial value (or reset to it) */
+ if (piosize != ppd->init_ibmaxlen) {
+ if (arg > piosize && arg <= ppd->init_ibmaxlen)
+ piosize = ppd->init_ibmaxlen - 2 * sizeof(u32);
+ ppd->ibmaxlen = piosize;
+ }
+ } else if ((arg + QIB_PIO_MAXIBHDR) != ppd->ibmaxlen) {
+ piosize = arg + QIB_PIO_MAXIBHDR - 2 * sizeof(u32);
+ ppd->ibmaxlen = piosize;
+ }
+
+ ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_MTU, 0);
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
+{
+ struct qib_devdata *dd = ppd->dd;
+ ppd->lid = lid;
+ ppd->lmc = lmc;
+
+ dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LIDLMC,
+ lid | (~((1U << lmc) - 1)) << 16);
+
+ qib_devinfo(dd->pcidev, "IB%u:%u got a lid: 0x%x\n",
+ dd->unit, ppd->port, lid);
+
+ return 0;
+}
+
+/*
+ * Following deal with the "obviously simple" task of overriding the state
+ * of the LEDS, which normally indicate link physical and logical status.
+ * The complications arise in dealing with different hardware mappings
+ * and the board-dependent routine being called from interrupts.
+ * and then there's the requirement to _flash_ them.
+ */
+#define LED_OVER_FREQ_SHIFT 8
+#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
+/* Below is "non-zero" to force override, but both actual LEDs are off */
+#define LED_OVER_BOTH_OFF (8)
+
+static void qib_run_led_override(unsigned long opaque)
+{
+ struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+ struct qib_devdata *dd = ppd->dd;
+ int timeoff;
+ int ph_idx;
+
+ if (!(dd->flags & QIB_INITTED))
+ return;
+
+ ph_idx = ppd->led_override_phase++ & 1;
+ ppd->led_override = ppd->led_override_vals[ph_idx];
+ timeoff = ppd->led_override_timeoff;
+
+ dd->f_setextled(ppd, 1);
+ /*
+ * don't re-fire the timer if user asked for it to be off; we let
+ * it fire one more time after they turn it off to simplify
+ */
+ if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
+ mod_timer(&ppd->led_override_timer, jiffies + timeoff);
+}
+
+void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int timeoff, freq;
+
+ if (!(dd->flags & QIB_INITTED))
+ return;
+
+ /* First check if we are blinking. If not, use 1HZ polling */
+ timeoff = HZ;
+ freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
+
+ if (freq) {
+ /* For blink, set each phase from one nybble of val */
+ ppd->led_override_vals[0] = val & 0xF;
+ ppd->led_override_vals[1] = (val >> 4) & 0xF;
+ timeoff = (HZ << 4)/freq;
+ } else {
+ /* Non-blink set both phases the same. */
+ ppd->led_override_vals[0] = val & 0xF;
+ ppd->led_override_vals[1] = val & 0xF;
+ }
+ ppd->led_override_timeoff = timeoff;
+
+ /*
+ * If the timer has not already been started, do so. Use a "quick"
+ * timeout so the function will be called soon, to look at our request.
+ */
+ if (atomic_inc_return(&ppd->led_override_timer_active) == 1) {
+ /* Need to start timer */
+ init_timer(&ppd->led_override_timer);
+ ppd->led_override_timer.function = qib_run_led_override;
+ ppd->led_override_timer.data = (unsigned long) ppd;
+ ppd->led_override_timer.expires = jiffies + 1;
+ add_timer(&ppd->led_override_timer);
+ } else {
+ if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
+ mod_timer(&ppd->led_override_timer, jiffies + 1);
+ atomic_dec(&ppd->led_override_timer_active);
+ }
+}
+
+/**
+ * qib_reset_device - reset the chip if possible
+ * @unit: the device to reset
+ *
+ * Whether or not reset is successful, we attempt to re-initialize the chip
+ * (that is, much like a driver unload/reload). We clear the INITTED flag
+ * so that the various entry points will fail until we reinitialize. For
+ * now, we only allow this if no user contexts are open that use chip resources
+ */
+int qib_reset_device(int unit)
+{
+ int ret, i;
+ struct qib_devdata *dd = qib_lookup(unit);
+ struct qib_pportdata *ppd;
+ unsigned long flags;
+ int pidx;
+
+ if (!dd) {
+ ret = -ENODEV;
+ goto bail;
+ }
+
+ qib_devinfo(dd->pcidev, "Reset on unit %u requested\n", unit);
+
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) {
+ qib_devinfo(dd->pcidev, "Invalid unit number %u or "
+ "not initialized or not present\n", unit);
+ ret = -ENXIO;
+ goto bail;
+ }
+
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ if (dd->rcd)
+ for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
+ if (!dd->rcd[i] || !dd->rcd[i]->cnt)
+ continue;
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+ ret = -EBUSY;
+ goto bail;
+ }
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ if (atomic_read(&ppd->led_override_timer_active)) {
+ /* Need to stop LED timer, _then_ shut off LEDs */
+ del_timer_sync(&ppd->led_override_timer);
+ atomic_set(&ppd->led_override_timer_active, 0);
+ }
+
+ /* Shut off LEDs after we are sure timer is not running */
+ ppd->led_override = LED_OVER_BOTH_OFF;
+ dd->f_setextled(ppd, 0);
+ if (dd->flags & QIB_HAS_SEND_DMA)
+ qib_teardown_sdma(ppd);
+ }
+
+ ret = dd->f_reset(dd);
+ if (ret == 1)
+ ret = qib_init(dd, 1);
+ else
+ ret = -EAGAIN;
+ if (ret)
+ qib_dev_err(dd, "Reinitialize unit %u after "
+ "reset failed with %d\n", unit, ret);
+ else
+ qib_devinfo(dd->pcidev, "Reinitialized unit %u after "
+ "resetting\n", unit);
+
+bail:
+ return ret;
+}
diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
new file mode 100644
index 00000000000..92d9cfe98a6
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_eeprom.c
@@ -0,0 +1,451 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+
+#include "qib.h"
+
+/*
+ * Functions specific to the serial EEPROM on cards handled by ib_qib.
+ * The actual serail interface code is in qib_twsi.c. This file is a client
+ */
+
+/**
+ * qib_eeprom_read - receives bytes from the eeprom via I2C
+ * @dd: the qlogic_ib device
+ * @eeprom_offset: address to read from
+ * @buffer: where to store result
+ * @len: number of bytes to receive
+ */
+int qib_eeprom_read(struct qib_devdata *dd, u8 eeprom_offset,
+ void *buff, int len)
+{
+ int ret;
+
+ ret = mutex_lock_interruptible(&dd->eep_lock);
+ if (!ret) {
+ ret = qib_twsi_reset(dd);
+ if (ret)
+ qib_dev_err(dd, "EEPROM Reset for read failed\n");
+ else
+ ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev,
+ eeprom_offset, buff, len);
+ mutex_unlock(&dd->eep_lock);
+ }
+
+ return ret;
+}
+
+/*
+ * Actually update the eeprom, first doing write enable if
+ * needed, then restoring write enable state.
+ * Must be called with eep_lock held
+ */
+static int eeprom_write_with_enable(struct qib_devdata *dd, u8 offset,
+ const void *buf, int len)
+{
+ int ret, pwen;
+
+ pwen = dd->f_eeprom_wen(dd, 1);
+ ret = qib_twsi_reset(dd);
+ if (ret)
+ qib_dev_err(dd, "EEPROM Reset for write failed\n");
+ else
+ ret = qib_twsi_blk_wr(dd, dd->twsi_eeprom_dev,
+ offset, buf, len);
+ dd->f_eeprom_wen(dd, pwen);
+ return ret;
+}
+
+/**
+ * qib_eeprom_write - writes data to the eeprom via I2C
+ * @dd: the qlogic_ib device
+ * @eeprom_offset: where to place data
+ * @buffer: data to write
+ * @len: number of bytes to write
+ */
+int qib_eeprom_write(struct qib_devdata *dd, u8 eeprom_offset,
+ const void *buff, int len)
+{
+ int ret;
+
+ ret = mutex_lock_interruptible(&dd->eep_lock);
+ if (!ret) {
+ ret = eeprom_write_with_enable(dd, eeprom_offset, buff, len);
+ mutex_unlock(&dd->eep_lock);
+ }
+
+ return ret;
+}
+
+static u8 flash_csum(struct qib_flash *ifp, int adjust)
+{
+ u8 *ip = (u8 *) ifp;
+ u8 csum = 0, len;
+
+ /*
+ * Limit length checksummed to max length of actual data.
+ * Checksum of erased eeprom will still be bad, but we avoid
+ * reading past the end of the buffer we were passed.
+ */
+ len = ifp->if_length;
+ if (len > sizeof(struct qib_flash))
+ len = sizeof(struct qib_flash);
+ while (len--)
+ csum += *ip++;
+ csum -= ifp->if_csum;
+ csum = ~csum;
+ if (adjust)
+ ifp->if_csum = csum;
+
+ return csum;
+}
+
+/**
+ * qib_get_eeprom_info- get the GUID et al. from the TSWI EEPROM device
+ * @dd: the qlogic_ib device
+ *
+ * We have the capability to use the nguid field, and get
+ * the guid from the first chip's flash, to use for all of them.
+ */
+void qib_get_eeprom_info(struct qib_devdata *dd)
+{
+ void *buf;
+ struct qib_flash *ifp;
+ __be64 guid;
+ int len, eep_stat;
+ u8 csum, *bguid;
+ int t = dd->unit;
+ struct qib_devdata *dd0 = qib_lookup(0);
+
+ if (t && dd0->nguid > 1 && t <= dd0->nguid) {
+ u8 oguid;
+ dd->base_guid = dd0->base_guid;
+ bguid = (u8 *) &dd->base_guid;
+
+ oguid = bguid[7];
+ bguid[7] += t;
+ if (oguid > bguid[7]) {
+ if (bguid[6] == 0xff) {
+ if (bguid[5] == 0xff) {
+ qib_dev_err(dd, "Can't set %s GUID"
+ " from base, wraps to"
+ " OUI!\n",
+ qib_get_unit_name(t));
+ dd->base_guid = 0;
+ goto bail;
+ }
+ bguid[5]++;
+ }
+ bguid[6]++;
+ }
+ dd->nguid = 1;
+ goto bail;
+ }
+
+ /*
+ * Read full flash, not just currently used part, since it may have
+ * been written with a newer definition.
+ * */
+ len = sizeof(struct qib_flash);
+ buf = vmalloc(len);
+ if (!buf) {
+ qib_dev_err(dd, "Couldn't allocate memory to read %u "
+ "bytes from eeprom for GUID\n", len);
+ goto bail;
+ }
+
+ /*
+ * Use "public" eeprom read function, which does locking and
+ * figures out device. This will migrate to chip-specific.
+ */
+ eep_stat = qib_eeprom_read(dd, 0, buf, len);
+
+ if (eep_stat) {
+ qib_dev_err(dd, "Failed reading GUID from eeprom\n");
+ goto done;
+ }
+ ifp = (struct qib_flash *)buf;
+
+ csum = flash_csum(ifp, 0);
+ if (csum != ifp->if_csum) {
+ qib_devinfo(dd->pcidev, "Bad I2C flash checksum: "
+ "0x%x, not 0x%x\n", csum, ifp->if_csum);
+ goto done;
+ }
+ if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) ||
+ *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) {
+ qib_dev_err(dd, "Invalid GUID %llx from flash; ignoring\n",
+ *(unsigned long long *) ifp->if_guid);
+ /* don't allow GUID if all 0 or all 1's */
+ goto done;
+ }
+
+ /* complain, but allow it */
+ if (*(u64 *) ifp->if_guid == 0x100007511000000ULL)
+ qib_devinfo(dd->pcidev, "Warning, GUID %llx is "
+ "default, probably not correct!\n",
+ *(unsigned long long *) ifp->if_guid);
+
+ bguid = ifp->if_guid;
+ if (!bguid[0] && !bguid[1] && !bguid[2]) {
+ /*
+ * Original incorrect GUID format in flash; fix in
+ * core copy, by shifting up 2 octets; don't need to
+ * change top octet, since both it and shifted are 0.
+ */
+ bguid[1] = bguid[3];
+ bguid[2] = bguid[4];
+ bguid[3] = 0;
+ bguid[4] = 0;
+ guid = *(__be64 *) ifp->if_guid;
+ } else
+ guid = *(__be64 *) ifp->if_guid;
+ dd->base_guid = guid;
+ dd->nguid = ifp->if_numguid;
+ /*
+ * Things are slightly complicated by the desire to transparently
+ * support both the Pathscale 10-digit serial number and the QLogic
+ * 13-character version.
+ */
+ if ((ifp->if_fversion > 1) && ifp->if_sprefix[0] &&
+ ((u8 *) ifp->if_sprefix)[0] != 0xFF) {
+ char *snp = dd->serial;
+
+ /*
+ * This board has a Serial-prefix, which is stored
+ * elsewhere for backward-compatibility.
+ */
+ memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix);
+ snp[sizeof ifp->if_sprefix] = '\0';
+ len = strlen(snp);
+ snp += len;
+ len = (sizeof dd->serial) - len;
+ if (len > sizeof ifp->if_serial)
+ len = sizeof ifp->if_serial;
+ memcpy(snp, ifp->if_serial, len);
+ } else
+ memcpy(dd->serial, ifp->if_serial,
+ sizeof ifp->if_serial);
+ if (!strstr(ifp->if_comment, "Tested successfully"))
+ qib_dev_err(dd, "Board SN %s did not pass functional "
+ "test: %s\n", dd->serial, ifp->if_comment);
+
+ memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT);
+ /*
+ * Power-on (actually "active") hours are kept as little-endian value
+ * in EEPROM, but as seconds in a (possibly as small as 24-bit)
+ * atomic_t while running.
+ */
+ atomic_set(&dd->active_time, 0);
+ dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
+
+done:
+ vfree(buf);
+
+bail:;
+}
+
+/**
+ * qib_update_eeprom_log - copy active-time and error counters to eeprom
+ * @dd: the qlogic_ib device
+ *
+ * Although the time is kept as seconds in the qib_devdata struct, it is
+ * rounded to hours for re-write, as we have only 16 bits in EEPROM.
+ * First-cut code reads whole (expected) struct qib_flash, modifies,
+ * re-writes. Future direction: read/write only what we need, assuming
+ * that the EEPROM had to have been "good enough" for driver init, and
+ * if not, we aren't making it worse.
+ *
+ */
+int qib_update_eeprom_log(struct qib_devdata *dd)
+{
+ void *buf;
+ struct qib_flash *ifp;
+ int len, hi_water;
+ uint32_t new_time, new_hrs;
+ u8 csum;
+ int ret, idx;
+ unsigned long flags;
+
+ /* first, check if we actually need to do anything. */
+ ret = 0;
+ for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
+ if (dd->eep_st_new_errs[idx]) {
+ ret = 1;
+ break;
+ }
+ }
+ new_time = atomic_read(&dd->active_time);
+
+ if (ret == 0 && new_time < 3600)
+ goto bail;
+
+ /*
+ * The quick-check above determined that there is something worthy
+ * of logging, so get current contents and do a more detailed idea.
+ * read full flash, not just currently used part, since it may have
+ * been written with a newer definition
+ */
+ len = sizeof(struct qib_flash);
+ buf = vmalloc(len);
+ ret = 1;
+ if (!buf) {
+ qib_dev_err(dd, "Couldn't allocate memory to read %u "
+ "bytes from eeprom for logging\n", len);
+ goto bail;
+ }
+
+ /* Grab semaphore and read current EEPROM. If we get an
+ * error, let go, but if not, keep it until we finish write.
+ */
+ ret = mutex_lock_interruptible(&dd->eep_lock);
+ if (ret) {
+ qib_dev_err(dd, "Unable to acquire EEPROM for logging\n");
+ goto free_bail;
+ }
+ ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len);
+ if (ret) {
+ mutex_unlock(&dd->eep_lock);
+ qib_dev_err(dd, "Unable read EEPROM for logging\n");
+ goto free_bail;
+ }
+ ifp = (struct qib_flash *)buf;
+
+ csum = flash_csum(ifp, 0);
+ if (csum != ifp->if_csum) {
+ mutex_unlock(&dd->eep_lock);
+ qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
+ csum, ifp->if_csum);
+ ret = 1;
+ goto free_bail;
+ }
+ hi_water = 0;
+ spin_lock_irqsave(&dd->eep_st_lock, flags);
+ for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
+ int new_val = dd->eep_st_new_errs[idx];
+ if (new_val) {
+ /*
+ * If we have seen any errors, add to EEPROM values
+ * We need to saturate at 0xFF (255) and we also
+ * would need to adjust the checksum if we were
+ * trying to minimize EEPROM traffic
+ * Note that we add to actual current count in EEPROM,
+ * in case it was altered while we were running.
+ */
+ new_val += ifp->if_errcntp[idx];
+ if (new_val > 0xFF)
+ new_val = 0xFF;
+ if (ifp->if_errcntp[idx] != new_val) {
+ ifp->if_errcntp[idx] = new_val;
+ hi_water = offsetof(struct qib_flash,
+ if_errcntp) + idx;
+ }
+ /*
+ * update our shadow (used to minimize EEPROM
+ * traffic), to match what we are about to write.
+ */
+ dd->eep_st_errs[idx] = new_val;
+ dd->eep_st_new_errs[idx] = 0;
+ }
+ }
+ /*
+ * Now update active-time. We would like to round to the nearest hour
+ * but unless atomic_t are sure to be proper signed ints we cannot,
+ * because we need to account for what we "transfer" to EEPROM and
+ * if we log an hour at 31 minutes, then we would need to set
+ * active_time to -29 to accurately count the _next_ hour.
+ */
+ if (new_time >= 3600) {
+ new_hrs = new_time / 3600;
+ atomic_sub((new_hrs * 3600), &dd->active_time);
+ new_hrs += dd->eep_hrs;
+ if (new_hrs > 0xFFFF)
+ new_hrs = 0xFFFF;
+ dd->eep_hrs = new_hrs;
+ if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
+ ifp->if_powerhour[0] = new_hrs & 0xFF;
+ hi_water = offsetof(struct qib_flash, if_powerhour);
+ }
+ if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
+ ifp->if_powerhour[1] = new_hrs >> 8;
+ hi_water = offsetof(struct qib_flash, if_powerhour) + 1;
+ }
+ }
+ /*
+ * There is a tiny possibility that we could somehow fail to write
+ * the EEPROM after updating our shadows, but problems from holding
+ * the spinlock too long are a much bigger issue.
+ */
+ spin_unlock_irqrestore(&dd->eep_st_lock, flags);
+ if (hi_water) {
+ /* we made some change to the data, uopdate cksum and write */
+ csum = flash_csum(ifp, 1);
+ ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1);
+ }
+ mutex_unlock(&dd->eep_lock);
+ if (ret)
+ qib_dev_err(dd, "Failed updating EEPROM\n");
+
+free_bail:
+ vfree(buf);
+bail:
+ return ret;
+}
+
+/**
+ * qib_inc_eeprom_err - increment one of the four error counters
+ * that are logged to EEPROM.
+ * @dd: the qlogic_ib device
+ * @eidx: 0..3, the counter to increment
+ * @incr: how much to add
+ *
+ * Each counter is 8-bits, and saturates at 255 (0xFF). They
+ * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log()
+ * is called, but it can only be called in a context that allows sleep.
+ * This function can be called even at interrupt level.
+ */
+void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr)
+{
+ uint new_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->eep_st_lock, flags);
+ new_val = dd->eep_st_new_errs[eidx] + incr;
+ if (new_val > 255)
+ new_val = 255;
+ dd->eep_st_new_errs[eidx] = new_val;
+ spin_unlock_irqrestore(&dd->eep_st_lock, flags);
+}
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
new file mode 100644
index 00000000000..a142a9eb522
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -0,0 +1,2317 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/cdev.h>
+#include <linux/swap.h>
+#include <linux/vmalloc.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/uio.h>
+#include <linux/jiffies.h>
+#include <asm/pgtable.h>
+#include <linux/delay.h>
+
+#include "qib.h"
+#include "qib_common.h"
+#include "qib_user_sdma.h"
+
+static int qib_open(struct inode *, struct file *);
+static int qib_close(struct inode *, struct file *);
+static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *);
+static ssize_t qib_aio_write(struct kiocb *, const struct iovec *,
+ unsigned long, loff_t);
+static unsigned int qib_poll(struct file *, struct poll_table_struct *);
+static int qib_mmapf(struct file *, struct vm_area_struct *);
+
+static const struct file_operations qib_file_ops = {
+ .owner = THIS_MODULE,
+ .write = qib_write,
+ .aio_write = qib_aio_write,
+ .open = qib_open,
+ .release = qib_close,
+ .poll = qib_poll,
+ .mmap = qib_mmapf
+};
+
+/*
+ * Convert kernel virtual addresses to physical addresses so they don't
+ * potentially conflict with the chip addresses used as mmap offsets.
+ * It doesn't really matter what mmap offset we use as long as we can
+ * interpret it correctly.
+ */
+static u64 cvt_kvaddr(void *p)
+{
+ struct page *page;
+ u64 paddr = 0;
+
+ page = vmalloc_to_page(p);
+ if (page)
+ paddr = page_to_pfn(page) << PAGE_SHIFT;
+
+ return paddr;
+}
+
+static int qib_get_base_info(struct file *fp, void __user *ubase,
+ size_t ubase_size)
+{
+ struct qib_ctxtdata *rcd = ctxt_fp(fp);
+ int ret = 0;
+ struct qib_base_info *kinfo = NULL;
+ struct qib_devdata *dd = rcd->dd;
+ struct qib_pportdata *ppd = rcd->ppd;
+ unsigned subctxt_cnt;
+ int shared, master;
+ size_t sz;
+
+ subctxt_cnt = rcd->subctxt_cnt;
+ if (!subctxt_cnt) {
+ shared = 0;
+ master = 0;
+ subctxt_cnt = 1;
+ } else {
+ shared = 1;
+ master = !subctxt_fp(fp);
+ }
+
+ sz = sizeof(*kinfo);
+ /* If context sharing is not requested, allow the old size structure */
+ if (!shared)
+ sz -= 7 * sizeof(u64);
+ if (ubase_size < sz) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
+ if (kinfo == NULL) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ ret = dd->f_get_base_info(rcd, kinfo);
+ if (ret < 0)
+ goto bail;
+
+ kinfo->spi_rcvhdr_cnt = dd->rcvhdrcnt;
+ kinfo->spi_rcvhdrent_size = dd->rcvhdrentsize;
+ kinfo->spi_tidegrcnt = rcd->rcvegrcnt;
+ kinfo->spi_rcv_egrbufsize = dd->rcvegrbufsize;
+ /*
+ * have to mmap whole thing
+ */
+ kinfo->spi_rcv_egrbuftotlen =
+ rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
+ kinfo->spi_rcv_egrperchunk = rcd->rcvegrbufs_perchunk;
+ kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
+ rcd->rcvegrbuf_chunks;
+ kinfo->spi_tidcnt = dd->rcvtidcnt / subctxt_cnt;
+ if (master)
+ kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt;
+ /*
+ * for this use, may be cfgctxts summed over all chips that
+ * are are configured and present
+ */
+ kinfo->spi_nctxts = dd->cfgctxts;
+ /* unit (chip/board) our context is on */
+ kinfo->spi_unit = dd->unit;
+ kinfo->spi_port = ppd->port;
+ /* for now, only a single page */
+ kinfo->spi_tid_maxsize = PAGE_SIZE;
+
+ /*
+ * Doing this per context, and based on the skip value, etc. This has
+ * to be the actual buffer size, since the protocol code treats it
+ * as an array.
+ *
+ * These have to be set to user addresses in the user code via mmap.
+ * These values are used on return to user code for the mmap target
+ * addresses only. For 32 bit, same 44 bit address problem, so use
+ * the physical address, not virtual. Before 2.6.11, using the
+ * page_address() macro worked, but in 2.6.11, even that returns the
+ * full 64 bit address (upper bits all 1's). So far, using the
+ * physical addresses (or chip offsets, for chip mapping) works, but
+ * no doubt some future kernel release will change that, and we'll be
+ * on to yet another method of dealing with this.
+ * Normally only one of rcvhdr_tailaddr or rhf_offset is useful
+ * since the chips with non-zero rhf_offset don't normally
+ * enable tail register updates to host memory, but for testing,
+ * both can be enabled and used.
+ */
+ kinfo->spi_rcvhdr_base = (u64) rcd->rcvhdrq_phys;
+ kinfo->spi_rcvhdr_tailaddr = (u64) rcd->rcvhdrqtailaddr_phys;
+ kinfo->spi_rhf_offset = dd->rhf_offset;
+ kinfo->spi_rcv_egrbufs = (u64) rcd->rcvegr_phys;
+ kinfo->spi_pioavailaddr = (u64) dd->pioavailregs_phys;
+ /* setup per-unit (not port) status area for user programs */
+ kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
+ (char *) ppd->statusp -
+ (char *) dd->pioavailregs_dma;
+ kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt;
+ if (!shared) {
+ kinfo->spi_piocnt = rcd->piocnt;
+ kinfo->spi_piobufbase = (u64) rcd->piobufs;
+ kinfo->spi_sendbuf_status = cvt_kvaddr(rcd->user_event_mask);
+ } else if (master) {
+ kinfo->spi_piocnt = (rcd->piocnt / subctxt_cnt) +
+ (rcd->piocnt % subctxt_cnt);
+ /* Master's PIO buffers are after all the slave's */
+ kinfo->spi_piobufbase = (u64) rcd->piobufs +
+ dd->palign *
+ (rcd->piocnt - kinfo->spi_piocnt);
+ } else {
+ unsigned slave = subctxt_fp(fp) - 1;
+
+ kinfo->spi_piocnt = rcd->piocnt / subctxt_cnt;
+ kinfo->spi_piobufbase = (u64) rcd->piobufs +
+ dd->palign * kinfo->spi_piocnt * slave;
+ }
+
+ if (shared) {
+ kinfo->spi_sendbuf_status =
+ cvt_kvaddr(&rcd->user_event_mask[subctxt_fp(fp)]);
+ /* only spi_subctxt_* fields should be set in this block! */
+ kinfo->spi_subctxt_uregbase = cvt_kvaddr(rcd->subctxt_uregbase);
+
+ kinfo->spi_subctxt_rcvegrbuf =
+ cvt_kvaddr(rcd->subctxt_rcvegrbuf);
+ kinfo->spi_subctxt_rcvhdr_base =
+ cvt_kvaddr(rcd->subctxt_rcvhdr_base);
+ }
+
+ /*
+ * All user buffers are 2KB buffers. If we ever support
+ * giving 4KB buffers to user processes, this will need some
+ * work. Can't use piobufbase directly, because it has
+ * both 2K and 4K buffer base values.
+ */
+ kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->pio2k_bufbase) /
+ dd->palign;
+ kinfo->spi_pioalign = dd->palign;
+ kinfo->spi_qpair = QIB_KD_QP;
+ /*
+ * user mode PIO buffers are always 2KB, even when 4KB can
+ * be received, and sent via the kernel; this is ibmaxlen
+ * for 2K MTU.
+ */
+ kinfo->spi_piosize = dd->piosize2k - 2 * sizeof(u32);
+ kinfo->spi_mtu = ppd->ibmaxlen; /* maxlen, not ibmtu */
+ kinfo->spi_ctxt = rcd->ctxt;
+ kinfo->spi_subctxt = subctxt_fp(fp);
+ kinfo->spi_sw_version = QIB_KERN_SWVERSION;
+ kinfo->spi_sw_version |= 1U << 31; /* QLogic-built, not kernel.org */
+ kinfo->spi_hw_version = dd->revision;
+
+ if (master)
+ kinfo->spi_runtime_flags |= QIB_RUNTIME_MASTER;
+
+ sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
+ if (copy_to_user(ubase, kinfo, sz))
+ ret = -EFAULT;
+bail:
+ kfree(kinfo);
+ return ret;
+}
+
+/**
+ * qib_tid_update - update a context TID
+ * @rcd: the context
+ * @fp: the qib device file
+ * @ti: the TID information
+ *
+ * The new implementation as of Oct 2004 is that the driver assigns
+ * the tid and returns it to the caller. To reduce search time, we
+ * keep a cursor for each context, walking the shadow tid array to find
+ * one that's not in use.
+ *
+ * For now, if we can't allocate the full list, we fail, although
+ * in the long run, we'll allocate as many as we can, and the
+ * caller will deal with that by trying the remaining pages later.
+ * That means that when we fail, we have to mark the tids as not in
+ * use again, in our shadow copy.
+ *
+ * It's up to the caller to free the tids when they are done.
+ * We'll unlock the pages as they free them.
+ *
+ * Also, right now we are locking one page at a time, but since
+ * the intended use of this routine is for a single group of
+ * virtually contiguous pages, that should change to improve
+ * performance.
+ */
+static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
+ const struct qib_tid_info *ti)
+{
+ int ret = 0, ntids;
+ u32 tid, ctxttid, cnt, i, tidcnt, tidoff;
+ u16 *tidlist;
+ struct qib_devdata *dd = rcd->dd;
+ u64 physaddr;
+ unsigned long vaddr;
+ u64 __iomem *tidbase;
+ unsigned long tidmap[8];
+ struct page **pagep = NULL;
+ unsigned subctxt = subctxt_fp(fp);
+
+ if (!dd->pageshadow) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ cnt = ti->tidcnt;
+ if (!cnt) {
+ ret = -EFAULT;
+ goto done;
+ }
+ ctxttid = rcd->ctxt * dd->rcvtidcnt;
+ if (!rcd->subctxt_cnt) {
+ tidcnt = dd->rcvtidcnt;
+ tid = rcd->tidcursor;
+ tidoff = 0;
+ } else if (!subctxt) {
+ tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
+ (dd->rcvtidcnt % rcd->subctxt_cnt);
+ tidoff = dd->rcvtidcnt - tidcnt;
+ ctxttid += tidoff;
+ tid = tidcursor_fp(fp);
+ } else {
+ tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
+ tidoff = tidcnt * (subctxt - 1);
+ ctxttid += tidoff;
+ tid = tidcursor_fp(fp);
+ }
+ if (cnt > tidcnt) {
+ /* make sure it all fits in tid_pg_list */
+ qib_devinfo(dd->pcidev, "Process tried to allocate %u "
+ "TIDs, only trying max (%u)\n", cnt, tidcnt);
+ cnt = tidcnt;
+ }
+ pagep = (struct page **) rcd->tid_pg_list;
+ tidlist = (u16 *) &pagep[dd->rcvtidcnt];
+ pagep += tidoff;
+ tidlist += tidoff;
+
+ memset(tidmap, 0, sizeof(tidmap));
+ /* before decrement; chip actual # */
+ ntids = tidcnt;
+ tidbase = (u64 __iomem *) (((char __iomem *) dd->kregbase) +
+ dd->rcvtidbase +
+ ctxttid * sizeof(*tidbase));
+
+ /* virtual address of first page in transfer */
+ vaddr = ti->tidvaddr;
+ if (!access_ok(VERIFY_WRITE, (void __user *) vaddr,
+ cnt * PAGE_SIZE)) {
+ ret = -EFAULT;
+ goto done;
+ }
+ ret = qib_get_user_pages(vaddr, cnt, pagep);
+ if (ret) {
+ /*
+ * if (ret == -EBUSY)
+ * We can't continue because the pagep array won't be
+ * initialized. This should never happen,
+ * unless perhaps the user has mpin'ed the pages
+ * themselves.
+ */
+ qib_devinfo(dd->pcidev,
+ "Failed to lock addr %p, %u pages: "
+ "errno %d\n", (void *) vaddr, cnt, -ret);
+ goto done;
+ }
+ for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
+ for (; ntids--; tid++) {
+ if (tid == tidcnt)
+ tid = 0;
+ if (!dd->pageshadow[ctxttid + tid])
+ break;
+ }
+ if (ntids < 0) {
+ /*
+ * Oops, wrapped all the way through their TIDs,
+ * and didn't have enough free; see comments at
+ * start of routine
+ */
+ i--; /* last tidlist[i] not filled in */
+ ret = -ENOMEM;
+ break;
+ }
+ tidlist[i] = tid + tidoff;
+ /* we "know" system pages and TID pages are same size */
+ dd->pageshadow[ctxttid + tid] = pagep[i];
+ dd->physshadow[ctxttid + tid] =
+ qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ /*
+ * don't need atomic or it's overhead
+ */
+ __set_bit(tid, tidmap);
+ physaddr = dd->physshadow[ctxttid + tid];
+ /* PERFORMANCE: below should almost certainly be cached */
+ dd->f_put_tid(dd, &tidbase[tid],
+ RCVHQ_RCV_TYPE_EXPECTED, physaddr);
+ /*
+ * don't check this tid in qib_ctxtshadow, since we
+ * just filled it in; start with the next one.
+ */
+ tid++;
+ }
+
+ if (ret) {
+ u32 limit;
+cleanup:
+ /* jump here if copy out of updated info failed... */
+ /* same code that's in qib_free_tid() */
+ limit = sizeof(tidmap) * BITS_PER_BYTE;
+ if (limit > tidcnt)
+ /* just in case size changes in future */
+ limit = tidcnt;
+ tid = find_first_bit((const unsigned long *)tidmap, limit);
+ for (; tid < limit; tid++) {
+ if (!test_bit(tid, tidmap))
+ continue;
+ if (dd->pageshadow[ctxttid + tid]) {
+ dma_addr_t phys;
+
+ phys = dd->physshadow[ctxttid + tid];
+ dd->physshadow[ctxttid + tid] = dd->tidinvalid;
+ /* PERFORMANCE: below should almost certainly
+ * be cached
+ */
+ dd->f_put_tid(dd, &tidbase[tid],
+ RCVHQ_RCV_TYPE_EXPECTED,
+ dd->tidinvalid);
+ pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ dd->pageshadow[ctxttid + tid] = NULL;
+ }
+ }
+ qib_release_user_pages(pagep, cnt);
+ } else {
+ /*
+ * Copy the updated array, with qib_tid's filled in, back
+ * to user. Since we did the copy in already, this "should
+ * never fail" If it does, we have to clean up...
+ */
+ if (copy_to_user((void __user *)
+ (unsigned long) ti->tidlist,
+ tidlist, cnt * sizeof(*tidlist))) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+ if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
+ tidmap, sizeof tidmap)) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+ if (tid == tidcnt)
+ tid = 0;
+ if (!rcd->subctxt_cnt)
+ rcd->tidcursor = tid;
+ else
+ tidcursor_fp(fp) = tid;
+ }
+
+done:
+ return ret;
+}
+
+/**
+ * qib_tid_free - free a context TID
+ * @rcd: the context
+ * @subctxt: the subcontext
+ * @ti: the TID info
+ *
+ * right now we are unlocking one page at a time, but since
+ * the intended use of this routine is for a single group of
+ * virtually contiguous pages, that should change to improve
+ * performance. We check that the TID is in range for this context
+ * but otherwise don't check validity; if user has an error and
+ * frees the wrong tid, it's only their own data that can thereby
+ * be corrupted. We do check that the TID was in use, for sanity
+ * We always use our idea of the saved address, not the address that
+ * they pass in to us.
+ */
+static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
+ const struct qib_tid_info *ti)
+{
+ int ret = 0;
+ u32 tid, ctxttid, cnt, limit, tidcnt;
+ struct qib_devdata *dd = rcd->dd;
+ u64 __iomem *tidbase;
+ unsigned long tidmap[8];
+
+ if (!dd->pageshadow) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
+ sizeof tidmap)) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ ctxttid = rcd->ctxt * dd->rcvtidcnt;
+ if (!rcd->subctxt_cnt)
+ tidcnt = dd->rcvtidcnt;
+ else if (!subctxt) {
+ tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
+ (dd->rcvtidcnt % rcd->subctxt_cnt);
+ ctxttid += dd->rcvtidcnt - tidcnt;
+ } else {
+ tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
+ ctxttid += tidcnt * (subctxt - 1);
+ }
+ tidbase = (u64 __iomem *) ((char __iomem *)(dd->kregbase) +
+ dd->rcvtidbase +
+ ctxttid * sizeof(*tidbase));
+
+ limit = sizeof(tidmap) * BITS_PER_BYTE;
+ if (limit > tidcnt)
+ /* just in case size changes in future */
+ limit = tidcnt;
+ tid = find_first_bit(tidmap, limit);
+ for (cnt = 0; tid < limit; tid++) {
+ /*
+ * small optimization; if we detect a run of 3 or so without
+ * any set, use find_first_bit again. That's mainly to
+ * accelerate the case where we wrapped, so we have some at
+ * the beginning, and some at the end, and a big gap
+ * in the middle.
+ */
+ if (!test_bit(tid, tidmap))
+ continue;
+ cnt++;
+ if (dd->pageshadow[ctxttid + tid]) {
+ struct page *p;
+ dma_addr_t phys;
+
+ p = dd->pageshadow[ctxttid + tid];
+ dd->pageshadow[ctxttid + tid] = NULL;
+ phys = dd->physshadow[ctxttid + tid];
+ dd->physshadow[ctxttid + tid] = dd->tidinvalid;
+ /* PERFORMANCE: below should almost certainly be
+ * cached
+ */
+ dd->f_put_tid(dd, &tidbase[tid],
+ RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid);
+ pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ qib_release_user_pages(&p, 1);
+ }
+ }
+done:
+ return ret;
+}
+
+/**
+ * qib_set_part_key - set a partition key
+ * @rcd: the context
+ * @key: the key
+ *
+ * We can have up to 4 active at a time (other than the default, which is
+ * always allowed). This is somewhat tricky, since multiple contexts may set
+ * the same key, so we reference count them, and clean up at exit. All 4
+ * partition keys are packed into a single qlogic_ib register. It's an
+ * error for a process to set the same pkey multiple times. We provide no
+ * mechanism to de-allocate a pkey at this time, we may eventually need to
+ * do that. I've used the atomic operations, and no locking, and only make
+ * a single pass through what's available. This should be more than
+ * adequate for some time. I'll think about spinlocks or the like if and as
+ * it's necessary.
+ */
+static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
+{
+ struct qib_pportdata *ppd = rcd->ppd;
+ int i, any = 0, pidx = -1;
+ u16 lkey = key & 0x7FFF;
+ int ret;
+
+ if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF)) {
+ /* nothing to do; this key always valid */
+ ret = 0;
+ goto bail;
+ }
+
+ if (!lkey) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /*
+ * Set the full membership bit, because it has to be
+ * set in the register or the packet, and it seems
+ * cleaner to set in the register than to force all
+ * callers to set it.
+ */
+ key |= 0x8000;
+
+ for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
+ if (!rcd->pkeys[i] && pidx == -1)
+ pidx = i;
+ if (rcd->pkeys[i] == key) {
+ ret = -EEXIST;
+ goto bail;
+ }
+ }
+ if (pidx == -1) {
+ ret = -EBUSY;
+ goto bail;
+ }
+ for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+ if (!ppd->pkeys[i]) {
+ any++;
+ continue;
+ }
+ if (ppd->pkeys[i] == key) {
+ atomic_t *pkrefs = &ppd->pkeyrefs[i];
+
+ if (atomic_inc_return(pkrefs) > 1) {
+ rcd->pkeys[pidx] = key;
+ ret = 0;
+ goto bail;
+ } else {
+ /*
+ * lost race, decrement count, catch below
+ */
+ atomic_dec(pkrefs);
+ any++;
+ }
+ }
+ if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
+ /*
+ * It makes no sense to have both the limited and
+ * full membership PKEY set at the same time since
+ * the unlimited one will disable the limited one.
+ */
+ ret = -EEXIST;
+ goto bail;
+ }
+ }
+ if (!any) {
+ ret = -EBUSY;
+ goto bail;
+ }
+ for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+ if (!ppd->pkeys[i] &&
+ atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
+ rcd->pkeys[pidx] = key;
+ ppd->pkeys[i] = key;
+ (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
+ ret = 0;
+ goto bail;
+ }
+ }
+ ret = -EBUSY;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_manage_rcvq - manage a context's receive queue
+ * @rcd: the context
+ * @subctxt: the subcontext
+ * @start_stop: action to carry out
+ *
+ * start_stop == 0 disables receive on the context, for use in queue
+ * overflow conditions. start_stop==1 re-enables, to be used to
+ * re-init the software copy of the head register
+ */
+static int qib_manage_rcvq(struct qib_ctxtdata *rcd, unsigned subctxt,
+ int start_stop)
+{
+ struct qib_devdata *dd = rcd->dd;
+ unsigned int rcvctrl_op;
+
+ if (subctxt)
+ goto bail;
+ /* atomically clear receive enable ctxt. */
+ if (start_stop) {
+ /*
+ * On enable, force in-memory copy of the tail register to
+ * 0, so that protocol code doesn't have to worry about
+ * whether or not the chip has yet updated the in-memory
+ * copy or not on return from the system call. The chip
+ * always resets it's tail register back to 0 on a
+ * transition from disabled to enabled.
+ */
+ if (rcd->rcvhdrtail_kvaddr)
+ qib_clear_rcvhdrtail(rcd);
+ rcvctrl_op = QIB_RCVCTRL_CTXT_ENB;
+ } else
+ rcvctrl_op = QIB_RCVCTRL_CTXT_DIS;
+ dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt);
+ /* always; new head should be equal to new tail; see above */
+bail:
+ return 0;
+}
+
+static void qib_clean_part_key(struct qib_ctxtdata *rcd,
+ struct qib_devdata *dd)
+{
+ int i, j, pchanged = 0;
+ u64 oldpkey;
+ struct qib_pportdata *ppd = rcd->ppd;
+
+ /* for debugging only */
+ oldpkey = (u64) ppd->pkeys[0] |
+ ((u64) ppd->pkeys[1] << 16) |
+ ((u64) ppd->pkeys[2] << 32) |
+ ((u64) ppd->pkeys[3] << 48);
+
+ for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
+ if (!rcd->pkeys[i])
+ continue;
+ for (j = 0; j < ARRAY_SIZE(ppd->pkeys); j++) {
+ /* check for match independent of the global bit */
+ if ((ppd->pkeys[j] & 0x7fff) !=
+ (rcd->pkeys[i] & 0x7fff))
+ continue;
+ if (atomic_dec_and_test(&ppd->pkeyrefs[j])) {
+ ppd->pkeys[j] = 0;
+ pchanged++;
+ }
+ break;
+ }
+ rcd->pkeys[i] = 0;
+ }
+ if (pchanged)
+ (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
+}
+
+/* common code for the mappings on dma_alloc_coherent mem */
+static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
+ unsigned len, void *kvaddr, u32 write_ok, char *what)
+{
+ struct qib_devdata *dd = rcd->dd;
+ unsigned long pfn;
+ int ret;
+
+ if ((vma->vm_end - vma->vm_start) > len) {
+ qib_devinfo(dd->pcidev,
+ "FAIL on %s: len %lx > %x\n", what,
+ vma->vm_end - vma->vm_start, len);
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ /*
+ * shared context user code requires rcvhdrq mapped r/w, others
+ * only allowed readonly mapping.
+ */
+ if (!write_ok) {
+ if (vma->vm_flags & VM_WRITE) {
+ qib_devinfo(dd->pcidev,
+ "%s must be mapped readonly\n", what);
+ ret = -EPERM;
+ goto bail;
+ }
+
+ /* don't allow them to later change with mprotect */
+ vma->vm_flags &= ~VM_MAYWRITE;
+ }
+
+ pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
+ ret = remap_pfn_range(vma, vma->vm_start, pfn,
+ len, vma->vm_page_prot);
+ if (ret)
+ qib_devinfo(dd->pcidev, "%s ctxt%u mmap of %lx, %x "
+ "bytes failed: %d\n", what, rcd->ctxt,
+ pfn, len, ret);
+bail:
+ return ret;
+}
+
+static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
+ u64 ureg)
+{
+ unsigned long phys;
+ unsigned long sz;
+ int ret;
+
+ /*
+ * This is real hardware, so use io_remap. This is the mechanism
+ * for the user process to update the head registers for their ctxt
+ * in the chip.
+ */
+ sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE;
+ if ((vma->vm_end - vma->vm_start) > sz) {
+ qib_devinfo(dd->pcidev, "FAIL mmap userreg: reqlen "
+ "%lx > PAGE\n", vma->vm_end - vma->vm_start);
+ ret = -EFAULT;
+ } else {
+ phys = dd->physaddr + ureg;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+ ret = io_remap_pfn_range(vma, vma->vm_start,
+ phys >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ }
+ return ret;
+}
+
+static int mmap_piobufs(struct vm_area_struct *vma,
+ struct qib_devdata *dd,
+ struct qib_ctxtdata *rcd,
+ unsigned piobufs, unsigned piocnt)
+{
+ unsigned long phys;
+ int ret;
+
+ /*
+ * When we map the PIO buffers in the chip, we want to map them as
+ * writeonly, no read possible; unfortunately, x86 doesn't allow
+ * for this in hardware, but we still prevent users from asking
+ * for it.
+ */
+ if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) {
+ qib_devinfo(dd->pcidev, "FAIL mmap piobufs: "
+ "reqlen %lx > PAGE\n",
+ vma->vm_end - vma->vm_start);
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ phys = dd->physaddr + piobufs;
+
+#if defined(__powerpc__)
+ /* There isn't a generic way to specify writethrough mappings */
+ pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
+ pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
+ pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
+#endif
+
+ /*
+ * don't allow them to later change to readable with mprotect (for when
+ * not initially mapped readable, as is normally the case)
+ */
+ vma->vm_flags &= ~VM_MAYREAD;
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+
+ if (qib_wc_pat)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+bail:
+ return ret;
+}
+
+static int mmap_rcvegrbufs(struct vm_area_struct *vma,
+ struct qib_ctxtdata *rcd)
+{
+ struct qib_devdata *dd = rcd->dd;
+ unsigned long start, size;
+ size_t total_size, i;
+ unsigned long pfn;
+ int ret;
+
+ size = rcd->rcvegrbuf_size;
+ total_size = rcd->rcvegrbuf_chunks * size;
+ if ((vma->vm_end - vma->vm_start) > total_size) {
+ qib_devinfo(dd->pcidev, "FAIL on egr bufs: "
+ "reqlen %lx > actual %lx\n",
+ vma->vm_end - vma->vm_start,
+ (unsigned long) total_size);
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ if (vma->vm_flags & VM_WRITE) {
+ qib_devinfo(dd->pcidev, "Can't map eager buffers as "
+ "writable (flags=%lx)\n", vma->vm_flags);
+ ret = -EPERM;
+ goto bail;
+ }
+ /* don't allow them to later change to writeable with mprotect */
+ vma->vm_flags &= ~VM_MAYWRITE;
+
+ start = vma->vm_start;
+
+ for (i = 0; i < rcd->rcvegrbuf_chunks; i++, start += size) {
+ pfn = virt_to_phys(rcd->rcvegrbuf[i]) >> PAGE_SHIFT;
+ ret = remap_pfn_range(vma, start, pfn, size,
+ vma->vm_page_prot);
+ if (ret < 0)
+ goto bail;
+ }
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/*
+ * qib_file_vma_fault - handle a VMA page fault.
+ */
+static int qib_file_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct page *page;
+
+ page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
+ if (!page)
+ return VM_FAULT_SIGBUS;
+
+ get_page(page);
+ vmf->page = page;
+
+ return 0;
+}
+
+static struct vm_operations_struct qib_file_vm_ops = {
+ .fault = qib_file_vma_fault,
+};
+
+static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
+ struct qib_ctxtdata *rcd, unsigned subctxt)
+{
+ struct qib_devdata *dd = rcd->dd;
+ unsigned subctxt_cnt;
+ unsigned long len;
+ void *addr;
+ size_t size;
+ int ret = 0;
+
+ subctxt_cnt = rcd->subctxt_cnt;
+ size = rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
+
+ /*
+ * Each process has all the subctxt uregbase, rcvhdrq, and
+ * rcvegrbufs mmapped - as an array for all the processes,
+ * and also separately for this process.
+ */
+ if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase)) {
+ addr = rcd->subctxt_uregbase;
+ size = PAGE_SIZE * subctxt_cnt;
+ } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base)) {
+ addr = rcd->subctxt_rcvhdr_base;
+ size = rcd->rcvhdrq_size * subctxt_cnt;
+ } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf)) {
+ addr = rcd->subctxt_rcvegrbuf;
+ size *= subctxt_cnt;
+ } else if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase +
+ PAGE_SIZE * subctxt)) {
+ addr = rcd->subctxt_uregbase + PAGE_SIZE * subctxt;
+ size = PAGE_SIZE;
+ } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base +
+ rcd->rcvhdrq_size * subctxt)) {
+ addr = rcd->subctxt_rcvhdr_base +
+ rcd->rcvhdrq_size * subctxt;
+ size = rcd->rcvhdrq_size;
+ } else if (pgaddr == cvt_kvaddr(&rcd->user_event_mask[subctxt])) {
+ addr = rcd->user_event_mask;
+ size = PAGE_SIZE;
+ } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf +
+ size * subctxt)) {
+ addr = rcd->subctxt_rcvegrbuf + size * subctxt;
+ /* rcvegrbufs are read-only on the slave */
+ if (vma->vm_flags & VM_WRITE) {
+ qib_devinfo(dd->pcidev,
+ "Can't map eager buffers as "
+ "writable (flags=%lx)\n", vma->vm_flags);
+ ret = -EPERM;
+ goto bail;
+ }
+ /*
+ * Don't allow permission to later change to writeable
+ * with mprotect.
+ */
+ vma->vm_flags &= ~VM_MAYWRITE;
+ } else
+ goto bail;
+ len = vma->vm_end - vma->vm_start;
+ if (len > size) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
+ vma->vm_ops = &qib_file_vm_ops;
+ vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
+ ret = 1;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_mmapf - mmap various structures into user space
+ * @fp: the file pointer
+ * @vma: the VM area
+ *
+ * We use this to have a shared buffer between the kernel and the user code
+ * for the rcvhdr queue, egr buffers, and the per-context user regs and pio
+ * buffers in the chip. We have the open and close entries so we can bump
+ * the ref count and keep the driver from being unloaded while still mapped.
+ */
+static int qib_mmapf(struct file *fp, struct vm_area_struct *vma)
+{
+ struct qib_ctxtdata *rcd;
+ struct qib_devdata *dd;
+ u64 pgaddr, ureg;
+ unsigned piobufs, piocnt;
+ int ret, match = 1;
+
+ rcd = ctxt_fp(fp);
+ if (!rcd || !(vma->vm_flags & VM_SHARED)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ dd = rcd->dd;
+
+ /*
+ * This is the qib_do_user_init() code, mapping the shared buffers
+ * and per-context user registers into the user process. The address
+ * referred to by vm_pgoff is the file offset passed via mmap().
+ * For shared contexts, this is the kernel vmalloc() address of the
+ * pages to share with the master.
+ * For non-shared or master ctxts, this is a physical address.
+ * We only do one mmap for each space mapped.
+ */
+ pgaddr = vma->vm_pgoff << PAGE_SHIFT;
+
+ /*
+ * Check for 0 in case one of the allocations failed, but user
+ * called mmap anyway.
+ */
+ if (!pgaddr) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /*
+ * Physical addresses must fit in 40 bits for our hardware.
+ * Check for kernel virtual addresses first, anything else must
+ * match a HW or memory address.
+ */
+ ret = mmap_kvaddr(vma, pgaddr, rcd, subctxt_fp(fp));
+ if (ret) {
+ if (ret > 0)
+ ret = 0;
+ goto bail;
+ }
+
+ ureg = dd->uregbase + dd->ureg_align * rcd->ctxt;
+ if (!rcd->subctxt_cnt) {
+ /* ctxt is not shared */
+ piocnt = rcd->piocnt;
+ piobufs = rcd->piobufs;
+ } else if (!subctxt_fp(fp)) {
+ /* caller is the master */
+ piocnt = (rcd->piocnt / rcd->subctxt_cnt) +
+ (rcd->piocnt % rcd->subctxt_cnt);
+ piobufs = rcd->piobufs +
+ dd->palign * (rcd->piocnt - piocnt);
+ } else {
+ unsigned slave = subctxt_fp(fp) - 1;
+
+ /* caller is a slave */
+ piocnt = rcd->piocnt / rcd->subctxt_cnt;
+ piobufs = rcd->piobufs + dd->palign * piocnt * slave;
+ }
+
+ if (pgaddr == ureg)
+ ret = mmap_ureg(vma, dd, ureg);
+ else if (pgaddr == piobufs)
+ ret = mmap_piobufs(vma, dd, rcd, piobufs, piocnt);
+ else if (pgaddr == dd->pioavailregs_phys)
+ /* in-memory copy of pioavail registers */
+ ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
+ (void *) dd->pioavailregs_dma, 0,
+ "pioavail registers");
+ else if (pgaddr == rcd->rcvegr_phys)
+ ret = mmap_rcvegrbufs(vma, rcd);
+ else if (pgaddr == (u64) rcd->rcvhdrq_phys)
+ /*
+ * The rcvhdrq itself; multiple pages, contiguous
+ * from an i/o perspective. Shared contexts need
+ * to map r/w, so we allow writing.
+ */
+ ret = qib_mmap_mem(vma, rcd, rcd->rcvhdrq_size,
+ rcd->rcvhdrq, 1, "rcvhdrq");
+ else if (pgaddr == (u64) rcd->rcvhdrqtailaddr_phys)
+ /* in-memory copy of rcvhdrq tail register */
+ ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
+ rcd->rcvhdrtail_kvaddr, 0,
+ "rcvhdrq tail");
+ else
+ match = 0;
+ if (!match)
+ ret = -EINVAL;
+
+ vma->vm_private_data = NULL;
+
+ if (ret < 0)
+ qib_devinfo(dd->pcidev,
+ "mmap Failure %d: off %llx len %lx\n",
+ -ret, (unsigned long long)pgaddr,
+ vma->vm_end - vma->vm_start);
+bail:
+ return ret;
+}
+
+static unsigned int qib_poll_urgent(struct qib_ctxtdata *rcd,
+ struct file *fp,
+ struct poll_table_struct *pt)
+{
+ struct qib_devdata *dd = rcd->dd;
+ unsigned pollflag;
+
+ poll_wait(fp, &rcd->wait, pt);
+
+ spin_lock_irq(&dd->uctxt_lock);
+ if (rcd->urgent != rcd->urgent_poll) {
+ pollflag = POLLIN | POLLRDNORM;
+ rcd->urgent_poll = rcd->urgent;
+ } else {
+ pollflag = 0;
+ set_bit(QIB_CTXT_WAITING_URG, &rcd->flag);
+ }
+ spin_unlock_irq(&dd->uctxt_lock);
+
+ return pollflag;
+}
+
+static unsigned int qib_poll_next(struct qib_ctxtdata *rcd,
+ struct file *fp,
+ struct poll_table_struct *pt)
+{
+ struct qib_devdata *dd = rcd->dd;
+ unsigned pollflag;
+
+ poll_wait(fp, &rcd->wait, pt);
+
+ spin_lock_irq(&dd->uctxt_lock);
+ if (dd->f_hdrqempty(rcd)) {
+ set_bit(QIB_CTXT_WAITING_RCV, &rcd->flag);
+ dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt);
+ pollflag = 0;
+ } else
+ pollflag = POLLIN | POLLRDNORM;
+ spin_unlock_irq(&dd->uctxt_lock);
+
+ return pollflag;
+}
+
+static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt)
+{
+ struct qib_ctxtdata *rcd;
+ unsigned pollflag;
+
+ rcd = ctxt_fp(fp);
+ if (!rcd)
+ pollflag = POLLERR;
+ else if (rcd->poll_type == QIB_POLL_TYPE_URGENT)
+ pollflag = qib_poll_urgent(rcd, fp, pt);
+ else if (rcd->poll_type == QIB_POLL_TYPE_ANYRCV)
+ pollflag = qib_poll_next(rcd, fp, pt);
+ else /* invalid */
+ pollflag = POLLERR;
+
+ return pollflag;
+}
+
+/*
+ * Check that userland and driver are compatible for subcontexts.
+ */
+static int qib_compatible_subctxts(int user_swmajor, int user_swminor)
+{
+ /* this code is written long-hand for clarity */
+ if (QIB_USER_SWMAJOR != user_swmajor) {
+ /* no promise of compatibility if major mismatch */
+ return 0;
+ }
+ if (QIB_USER_SWMAJOR == 1) {
+ switch (QIB_USER_SWMINOR) {
+ case 0:
+ case 1:
+ case 2:
+ /* no subctxt implementation so cannot be compatible */
+ return 0;
+ case 3:
+ /* 3 is only compatible with itself */
+ return user_swminor == 3;
+ default:
+ /* >= 4 are compatible (or are expected to be) */
+ return user_swminor >= 4;
+ }
+ }
+ /* make no promises yet for future major versions */
+ return 0;
+}
+
+static int init_subctxts(struct qib_devdata *dd,
+ struct qib_ctxtdata *rcd,
+ const struct qib_user_info *uinfo)
+{
+ int ret = 0;
+ unsigned num_subctxts;
+ size_t size;
+
+ /*
+ * If the user is requesting zero subctxts,
+ * skip the subctxt allocation.
+ */
+ if (uinfo->spu_subctxt_cnt <= 0)
+ goto bail;
+ num_subctxts = uinfo->spu_subctxt_cnt;
+
+ /* Check for subctxt compatibility */
+ if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
+ uinfo->spu_userversion & 0xffff)) {
+ qib_devinfo(dd->pcidev,
+ "Mismatched user version (%d.%d) and driver "
+ "version (%d.%d) while context sharing. Ensure "
+ "that driver and library are from the same "
+ "release.\n",
+ (int) (uinfo->spu_userversion >> 16),
+ (int) (uinfo->spu_userversion & 0xffff),
+ QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
+ goto bail;
+ }
+ if (num_subctxts > QLOGIC_IB_MAX_SUBCTXT) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ rcd->subctxt_uregbase = vmalloc_user(PAGE_SIZE * num_subctxts);
+ if (!rcd->subctxt_uregbase) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+ /* Note: rcd->rcvhdrq_size isn't initialized yet. */
+ size = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
+ sizeof(u32), PAGE_SIZE) * num_subctxts;
+ rcd->subctxt_rcvhdr_base = vmalloc_user(size);
+ if (!rcd->subctxt_rcvhdr_base) {
+ ret = -ENOMEM;
+ goto bail_ureg;
+ }
+
+ rcd->subctxt_rcvegrbuf = vmalloc_user(rcd->rcvegrbuf_chunks *
+ rcd->rcvegrbuf_size *
+ num_subctxts);
+ if (!rcd->subctxt_rcvegrbuf) {
+ ret = -ENOMEM;
+ goto bail_rhdr;
+ }
+
+ rcd->subctxt_cnt = uinfo->spu_subctxt_cnt;
+ rcd->subctxt_id = uinfo->spu_subctxt_id;
+ rcd->active_slaves = 1;
+ rcd->redirect_seq_cnt = 1;
+ set_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
+ goto bail;
+
+bail_rhdr:
+ vfree(rcd->subctxt_rcvhdr_base);
+bail_ureg:
+ vfree(rcd->subctxt_uregbase);
+ rcd->subctxt_uregbase = NULL;
+bail:
+ return ret;
+}
+
+static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
+ struct file *fp, const struct qib_user_info *uinfo)
+{
+ struct qib_devdata *dd = ppd->dd;
+ struct qib_ctxtdata *rcd;
+ void *ptmp = NULL;
+ int ret;
+
+ rcd = qib_create_ctxtdata(ppd, ctxt);
+
+ /*
+ * Allocate memory for use in qib_tid_update() at open to
+ * reduce cost of expected send setup per message segment
+ */
+ if (rcd)
+ ptmp = kmalloc(dd->rcvtidcnt * sizeof(u16) +
+ dd->rcvtidcnt * sizeof(struct page **),
+ GFP_KERNEL);
+
+ if (!rcd || !ptmp) {
+ qib_dev_err(dd, "Unable to allocate ctxtdata "
+ "memory, failing open\n");
+ ret = -ENOMEM;
+ goto bailerr;
+ }
+ rcd->userversion = uinfo->spu_userversion;
+ ret = init_subctxts(dd, rcd, uinfo);
+ if (ret)
+ goto bailerr;
+ rcd->tid_pg_list = ptmp;
+ rcd->pid = current->pid;
+ init_waitqueue_head(&dd->rcd[ctxt]->wait);
+ strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
+ ctxt_fp(fp) = rcd;
+ qib_stats.sps_ctxts++;
+ ret = 0;
+ goto bail;
+
+bailerr:
+ dd->rcd[ctxt] = NULL;
+ kfree(rcd);
+ kfree(ptmp);
+bail:
+ return ret;
+}
+
+static inline int usable(struct qib_pportdata *ppd, int active_only)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u32 linkok = active_only ? QIBL_LINKACTIVE :
+ (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE);
+
+ return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid &&
+ (ppd->lflags & linkok);
+}
+
+static int find_free_ctxt(int unit, struct file *fp,
+ const struct qib_user_info *uinfo)
+{
+ struct qib_devdata *dd = qib_lookup(unit);
+ struct qib_pportdata *ppd = NULL;
+ int ret;
+ u32 ctxt;
+
+ if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports)) {
+ ret = -ENODEV;
+ goto bail;
+ }
+
+ /*
+ * If users requests specific port, only try that one port, else
+ * select "best" port below, based on context.
+ */
+ if (uinfo->spu_port) {
+ ppd = dd->pport + uinfo->spu_port - 1;
+ if (!usable(ppd, 0)) {
+ ret = -ENETDOWN;
+ goto bail;
+ }
+ }
+
+ for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
+ if (dd->rcd[ctxt])
+ continue;
+ /*
+ * The setting and clearing of user context rcd[x] protected
+ * by the qib_mutex
+ */
+ if (!ppd) {
+ /* choose port based on ctxt, if up, else 1st up */
+ ppd = dd->pport + (ctxt % dd->num_pports);
+ if (!usable(ppd, 0)) {
+ int i;
+ for (i = 0; i < dd->num_pports; i++) {
+ ppd = dd->pport + i;
+ if (usable(ppd, 0))
+ break;
+ }
+ if (i == dd->num_pports) {
+ ret = -ENETDOWN;
+ goto bail;
+ }
+ }
+ }
+ ret = setup_ctxt(ppd, ctxt, fp, uinfo);
+ goto bail;
+ }
+ ret = -EBUSY;
+
+bail:
+ return ret;
+}
+
+static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo)
+{
+ struct qib_pportdata *ppd;
+ int ret = 0, devmax;
+ int npresent, nup;
+ int ndev;
+ u32 port = uinfo->spu_port, ctxt;
+
+ devmax = qib_count_units(&npresent, &nup);
+
+ for (ndev = 0; ndev < devmax; ndev++) {
+ struct qib_devdata *dd = qib_lookup(ndev);
+
+ /* device portion of usable() */
+ if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
+ continue;
+ for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
+ if (dd->rcd[ctxt])
+ continue;
+ if (port) {
+ if (port > dd->num_pports)
+ continue;
+ ppd = dd->pport + port - 1;
+ if (!usable(ppd, 0))
+ continue;
+ } else {
+ /*
+ * choose port based on ctxt, if up, else
+ * first port that's up for multi-port HCA
+ */
+ ppd = dd->pport + (ctxt % dd->num_pports);
+ if (!usable(ppd, 0)) {
+ int j;
+
+ ppd = NULL;
+ for (j = 0; j < dd->num_pports &&
+ !ppd; j++)
+ if (usable(dd->pport + j, 0))
+ ppd = dd->pport + j;
+ if (!ppd)
+ continue; /* to next unit */
+ }
+ }
+ ret = setup_ctxt(ppd, ctxt, fp, uinfo);
+ goto done;
+ }
+ }
+
+ if (npresent) {
+ if (nup == 0)
+ ret = -ENETDOWN;
+ else
+ ret = -EBUSY;
+ } else
+ ret = -ENXIO;
+
+done:
+ return ret;
+}
+
+static int find_shared_ctxt(struct file *fp,
+ const struct qib_user_info *uinfo)
+{
+ int devmax, ndev, i;
+ int ret = 0;
+
+ devmax = qib_count_units(NULL, NULL);
+
+ for (ndev = 0; ndev < devmax; ndev++) {
+ struct qib_devdata *dd = qib_lookup(ndev);
+
+ /* device portion of usable() */
+ if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
+ continue;
+ for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
+ struct qib_ctxtdata *rcd = dd->rcd[i];
+
+ /* Skip ctxts which are not yet open */
+ if (!rcd || !rcd->cnt)
+ continue;
+ /* Skip ctxt if it doesn't match the requested one */
+ if (rcd->subctxt_id != uinfo->spu_subctxt_id)
+ continue;
+ /* Verify the sharing process matches the master */
+ if (rcd->subctxt_cnt != uinfo->spu_subctxt_cnt ||
+ rcd->userversion != uinfo->spu_userversion ||
+ rcd->cnt >= rcd->subctxt_cnt) {
+ ret = -EINVAL;
+ goto done;
+ }
+ ctxt_fp(fp) = rcd;
+ subctxt_fp(fp) = rcd->cnt++;
+ rcd->subpid[subctxt_fp(fp)] = current->pid;
+ tidcursor_fp(fp) = 0;
+ rcd->active_slaves |= 1 << subctxt_fp(fp);
+ ret = 1;
+ goto done;
+ }
+ }
+
+done:
+ return ret;
+}
+
+static int qib_open(struct inode *in, struct file *fp)
+{
+ /* The real work is performed later in qib_assign_ctxt() */
+ fp->private_data = kzalloc(sizeof(struct qib_filedata), GFP_KERNEL);
+ if (fp->private_data) /* no cpu affinity by default */
+ ((struct qib_filedata *)fp->private_data)->rec_cpu_num = -1;
+ return fp->private_data ? 0 : -ENOMEM;
+}
+
+/*
+ * Get ctxt early, so can set affinity prior to memory allocation.
+ */
+static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
+{
+ int ret;
+ int i_minor;
+ unsigned swmajor, swminor;
+
+ /* Check to be sure we haven't already initialized this file */
+ if (ctxt_fp(fp)) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* for now, if major version is different, bail */
+ swmajor = uinfo->spu_userversion >> 16;
+ if (swmajor != QIB_USER_SWMAJOR) {
+ ret = -ENODEV;
+ goto done;
+ }
+
+ swminor = uinfo->spu_userversion & 0xffff;
+
+ mutex_lock(&qib_mutex);
+
+ if (qib_compatible_subctxts(swmajor, swminor) &&
+ uinfo->spu_subctxt_cnt) {
+ ret = find_shared_ctxt(fp, uinfo);
+ if (ret) {
+ if (ret > 0)
+ ret = 0;
+ goto done_chk_sdma;
+ }
+ }
+
+ i_minor = iminor(fp->f_dentry->d_inode) - QIB_USER_MINOR_BASE;
+ if (i_minor)
+ ret = find_free_ctxt(i_minor - 1, fp, uinfo);
+ else
+ ret = get_a_ctxt(fp, uinfo);
+
+done_chk_sdma:
+ if (!ret) {
+ struct qib_filedata *fd = fp->private_data;
+ const struct qib_ctxtdata *rcd = fd->rcd;
+ const struct qib_devdata *dd = rcd->dd;
+
+ if (dd->flags & QIB_HAS_SEND_DMA) {
+ fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
+ dd->unit,
+ rcd->ctxt,
+ fd->subctxt);
+ if (!fd->pq)
+ ret = -ENOMEM;
+ }
+
+ /*
+ * If process has NOT already set it's affinity, select and
+ * reserve a processor for it, as a rendevous for all
+ * users of the driver. If they don't actually later
+ * set affinity to this cpu, or set it to some other cpu,
+ * it just means that sooner or later we don't recommend
+ * a cpu, and let the scheduler do it's best.
+ */
+ if (!ret && cpus_weight(current->cpus_allowed) >=
+ qib_cpulist_count) {
+ int cpu;
+ cpu = find_first_zero_bit(qib_cpulist,
+ qib_cpulist_count);
+ if (cpu != qib_cpulist_count) {
+ __set_bit(cpu, qib_cpulist);
+ fd->rec_cpu_num = cpu;
+ }
+ } else if (cpus_weight(current->cpus_allowed) == 1 &&
+ test_bit(first_cpu(current->cpus_allowed),
+ qib_cpulist))
+ qib_devinfo(dd->pcidev, "%s PID %u affinity "
+ "set to cpu %d; already allocated\n",
+ current->comm, current->pid,
+ first_cpu(current->cpus_allowed));
+ }
+
+ mutex_unlock(&qib_mutex);
+
+done:
+ return ret;
+}
+
+
+static int qib_do_user_init(struct file *fp,
+ const struct qib_user_info *uinfo)
+{
+ int ret;
+ struct qib_ctxtdata *rcd = ctxt_fp(fp);
+ struct qib_devdata *dd;
+ unsigned uctxt;
+
+ /* Subctxts don't need to initialize anything since master did it. */
+ if (subctxt_fp(fp)) {
+ ret = wait_event_interruptible(rcd->wait,
+ !test_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag));
+ goto bail;
+ }
+
+ dd = rcd->dd;
+
+ /* some ctxts may get extra buffers, calculate that here */
+ uctxt = rcd->ctxt - dd->first_user_ctxt;
+ if (uctxt < dd->ctxts_extrabuf) {
+ rcd->piocnt = dd->pbufsctxt + 1;
+ rcd->pio_base = rcd->piocnt * uctxt;
+ } else {
+ rcd->piocnt = dd->pbufsctxt;
+ rcd->pio_base = rcd->piocnt * uctxt +
+ dd->ctxts_extrabuf;
+ }
+
+ /*
+ * All user buffers are 2KB buffers. If we ever support
+ * giving 4KB buffers to user processes, this will need some
+ * work. Can't use piobufbase directly, because it has
+ * both 2K and 4K buffer base values. So check and handle.
+ */
+ if ((rcd->pio_base + rcd->piocnt) > dd->piobcnt2k) {
+ if (rcd->pio_base >= dd->piobcnt2k) {
+ qib_dev_err(dd,
+ "%u:ctxt%u: no 2KB buffers available\n",
+ dd->unit, rcd->ctxt);
+ ret = -ENOBUFS;
+ goto bail;
+ }
+ rcd->piocnt = dd->piobcnt2k - rcd->pio_base;
+ qib_dev_err(dd, "Ctxt%u: would use 4KB bufs, using %u\n",
+ rcd->ctxt, rcd->piocnt);
+ }
+
+ rcd->piobufs = dd->pio2k_bufbase + rcd->pio_base * dd->palign;
+ qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
+ TXCHK_CHG_TYPE_USER, rcd);
+ /*
+ * try to ensure that processes start up with consistent avail update
+ * for their own range, at least. If system very quiet, it might
+ * have the in-memory copy out of date at startup for this range of
+ * buffers, when a context gets re-used. Do after the chg_pioavail
+ * and before the rest of setup, so it's "almost certain" the dma
+ * will have occurred (can't 100% guarantee, but should be many
+ * decimals of 9s, with this ordering), given how much else happens
+ * after this.
+ */
+ dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+
+ /*
+ * Now allocate the rcvhdr Q and eager TIDs; skip the TID
+ * array for time being. If rcd->ctxt > chip-supported,
+ * we need to do extra stuff here to handle by handling overflow
+ * through ctxt 0, someday
+ */
+ ret = qib_create_rcvhdrq(dd, rcd);
+ if (!ret)
+ ret = qib_setup_eagerbufs(rcd);
+ if (ret)
+ goto bail_pio;
+
+ rcd->tidcursor = 0; /* start at beginning after open */
+
+ /* initialize poll variables... */
+ rcd->urgent = 0;
+ rcd->urgent_poll = 0;
+
+ /*
+ * Now enable the ctxt for receive.
+ * For chips that are set to DMA the tail register to memory
+ * when they change (and when the update bit transitions from
+ * 0 to 1. So for those chips, we turn it off and then back on.
+ * This will (very briefly) affect any other open ctxts, but the
+ * duration is very short, and therefore isn't an issue. We
+ * explictly set the in-memory tail copy to 0 beforehand, so we
+ * don't have to wait to be sure the DMA update has happened
+ * (chip resets head/tail to 0 on transition to enable).
+ */
+ if (rcd->rcvhdrtail_kvaddr)
+ qib_clear_rcvhdrtail(rcd);
+
+ dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_TIDFLOW_ENB,
+ rcd->ctxt);
+
+ /* Notify any waiting slaves */
+ if (rcd->subctxt_cnt) {
+ clear_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
+ wake_up(&rcd->wait);
+ }
+ return 0;
+
+bail_pio:
+ qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
+ TXCHK_CHG_TYPE_KERN, rcd);
+bail:
+ return ret;
+}
+
+/**
+ * unlock_exptid - unlock any expected TID entries context still had in use
+ * @rcd: ctxt
+ *
+ * We don't actually update the chip here, because we do a bulk update
+ * below, using f_clear_tids.
+ */
+static void unlock_expected_tids(struct qib_ctxtdata *rcd)
+{
+ struct qib_devdata *dd = rcd->dd;
+ int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt;
+ int i, cnt = 0, maxtid = ctxt_tidbase + dd->rcvtidcnt;
+
+ for (i = ctxt_tidbase; i < maxtid; i++) {
+ struct page *p = dd->pageshadow[i];
+ dma_addr_t phys;
+
+ if (!p)
+ continue;
+
+ phys = dd->physshadow[i];
+ dd->physshadow[i] = dd->tidinvalid;
+ dd->pageshadow[i] = NULL;
+ pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ qib_release_user_pages(&p, 1);
+ cnt++;
+ }
+}
+
+static int qib_close(struct inode *in, struct file *fp)
+{
+ int ret = 0;
+ struct qib_filedata *fd;
+ struct qib_ctxtdata *rcd;
+ struct qib_devdata *dd;
+ unsigned long flags;
+ unsigned ctxt;
+ pid_t pid;
+
+ mutex_lock(&qib_mutex);
+
+ fd = (struct qib_filedata *) fp->private_data;
+ fp->private_data = NULL;
+ rcd = fd->rcd;
+ if (!rcd) {
+ mutex_unlock(&qib_mutex);
+ goto bail;
+ }
+
+ dd = rcd->dd;
+
+ /* ensure all pio buffer writes in progress are flushed */
+ qib_flush_wc();
+
+ /* drain user sdma queue */
+ if (fd->pq) {
+ qib_user_sdma_queue_drain(rcd->ppd, fd->pq);
+ qib_user_sdma_queue_destroy(fd->pq);
+ }
+
+ if (fd->rec_cpu_num != -1)
+ __clear_bit(fd->rec_cpu_num, qib_cpulist);
+
+ if (--rcd->cnt) {
+ /*
+ * XXX If the master closes the context before the slave(s),
+ * revoke the mmap for the eager receive queue so
+ * the slave(s) don't wait for receive data forever.
+ */
+ rcd->active_slaves &= ~(1 << fd->subctxt);
+ rcd->subpid[fd->subctxt] = 0;
+ mutex_unlock(&qib_mutex);
+ goto bail;
+ }
+
+ /* early; no interrupt users after this */
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ ctxt = rcd->ctxt;
+ dd->rcd[ctxt] = NULL;
+ pid = rcd->pid;
+ rcd->pid = 0;
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+
+ if (rcd->rcvwait_to || rcd->piowait_to ||
+ rcd->rcvnowait || rcd->pionowait) {
+ rcd->rcvwait_to = 0;
+ rcd->piowait_to = 0;
+ rcd->rcvnowait = 0;
+ rcd->pionowait = 0;
+ }
+ if (rcd->flag)
+ rcd->flag = 0;
+
+ if (dd->kregbase) {
+ /* atomically clear receive enable ctxt and intr avail. */
+ dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_DIS |
+ QIB_RCVCTRL_INTRAVAIL_DIS, ctxt);
+
+ /* clean up the pkeys for this ctxt user */
+ qib_clean_part_key(rcd, dd);
+ qib_disarm_piobufs(dd, rcd->pio_base, rcd->piocnt);
+ qib_chg_pioavailkernel(dd, rcd->pio_base,
+ rcd->piocnt, TXCHK_CHG_TYPE_KERN, NULL);
+
+ dd->f_clear_tids(dd, rcd);
+
+ if (dd->pageshadow)
+ unlock_expected_tids(rcd);
+ qib_stats.sps_ctxts--;
+ }
+
+ mutex_unlock(&qib_mutex);
+ qib_free_ctxtdata(dd, rcd); /* after releasing the mutex */
+
+bail:
+ kfree(fd);
+ return ret;
+}
+
+static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo)
+{
+ struct qib_ctxt_info info;
+ int ret;
+ size_t sz;
+ struct qib_ctxtdata *rcd = ctxt_fp(fp);
+ struct qib_filedata *fd;
+
+ fd = (struct qib_filedata *) fp->private_data;
+
+ info.num_active = qib_count_active_units();
+ info.unit = rcd->dd->unit;
+ info.port = rcd->ppd->port;
+ info.ctxt = rcd->ctxt;
+ info.subctxt = subctxt_fp(fp);
+ /* Number of user ctxts available for this device. */
+ info.num_ctxts = rcd->dd->cfgctxts - rcd->dd->first_user_ctxt;
+ info.num_subctxts = rcd->subctxt_cnt;
+ info.rec_cpu = fd->rec_cpu_num;
+ sz = sizeof(info);
+
+ if (copy_to_user(uinfo, &info, sz)) {
+ ret = -EFAULT;
+ goto bail;
+ }
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+static int qib_sdma_get_inflight(struct qib_user_sdma_queue *pq,
+ u32 __user *inflightp)
+{
+ const u32 val = qib_user_sdma_inflight_counter(pq);
+
+ if (put_user(val, inflightp))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int qib_sdma_get_complete(struct qib_pportdata *ppd,
+ struct qib_user_sdma_queue *pq,
+ u32 __user *completep)
+{
+ u32 val;
+ int err;
+
+ if (!pq)
+ return -EINVAL;
+
+ err = qib_user_sdma_make_progress(ppd, pq);
+ if (err < 0)
+ return err;
+
+ val = qib_user_sdma_complete_counter(pq);
+ if (put_user(val, completep))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int disarm_req_delay(struct qib_ctxtdata *rcd)
+{
+ int ret = 0;
+
+ if (!usable(rcd->ppd, 1)) {
+ int i;
+ /*
+ * if link is down, or otherwise not usable, delay
+ * the caller up to 30 seconds, so we don't thrash
+ * in trying to get the chip back to ACTIVE, and
+ * set flag so they make the call again.
+ */
+ if (rcd->user_event_mask) {
+ /*
+ * subctxt_cnt is 0 if not shared, so do base
+ * separately, first, then remaining subctxt, if any
+ */
+ set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
+ &rcd->user_event_mask[0]);
+ for (i = 1; i < rcd->subctxt_cnt; i++)
+ set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
+ &rcd->user_event_mask[i]);
+ }
+ for (i = 0; !usable(rcd->ppd, 1) && i < 300; i++)
+ msleep(100);
+ ret = -ENETDOWN;
+ }
+ return ret;
+}
+
+/*
+ * Find all user contexts in use, and set the specified bit in their
+ * event mask.
+ * See also find_ctxt() for a similar use, that is specific to send buffers.
+ */
+int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
+{
+ struct qib_ctxtdata *rcd;
+ unsigned ctxt;
+ int ret = 0;
+
+ spin_lock(&ppd->dd->uctxt_lock);
+ for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts;
+ ctxt++) {
+ rcd = ppd->dd->rcd[ctxt];
+ if (!rcd)
+ continue;
+ if (rcd->user_event_mask) {
+ int i;
+ /*
+ * subctxt_cnt is 0 if not shared, so do base
+ * separately, first, then remaining subctxt, if any
+ */
+ set_bit(evtbit, &rcd->user_event_mask[0]);
+ for (i = 1; i < rcd->subctxt_cnt; i++)
+ set_bit(evtbit, &rcd->user_event_mask[i]);
+ }
+ ret = 1;
+ break;
+ }
+ spin_unlock(&ppd->dd->uctxt_lock);
+
+ return ret;
+}
+
+/*
+ * clear the event notifier events for this context.
+ * For the DISARM_BUFS case, we also take action (this obsoletes
+ * the older QIB_CMD_DISARM_BUFS, but we keep it for backwards
+ * compatibility.
+ * Other bits don't currently require actions, just atomically clear.
+ * User process then performs actions appropriate to bit having been
+ * set, if desired, and checks again in future.
+ */
+static int qib_user_event_ack(struct qib_ctxtdata *rcd, int subctxt,
+ unsigned long events)
+{
+ int ret = 0, i;
+
+ for (i = 0; i <= _QIB_MAX_EVENT_BIT; i++) {
+ if (!test_bit(i, &events))
+ continue;
+ if (i == _QIB_EVENT_DISARM_BUFS_BIT) {
+ (void)qib_disarm_piobufs_ifneeded(rcd);
+ ret = disarm_req_delay(rcd);
+ } else
+ clear_bit(i, &rcd->user_event_mask[subctxt]);
+ }
+ return ret;
+}
+
+static ssize_t qib_write(struct file *fp, const char __user *data,
+ size_t count, loff_t *off)
+{
+ const struct qib_cmd __user *ucmd;
+ struct qib_ctxtdata *rcd;
+ const void __user *src;
+ size_t consumed, copy = 0;
+ struct qib_cmd cmd;
+ ssize_t ret = 0;
+ void *dest;
+
+ if (count < sizeof(cmd.type)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ ucmd = (const struct qib_cmd __user *) data;
+
+ if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ consumed = sizeof(cmd.type);
+
+ switch (cmd.type) {
+ case QIB_CMD_ASSIGN_CTXT:
+ case QIB_CMD_USER_INIT:
+ copy = sizeof(cmd.cmd.user_info);
+ dest = &cmd.cmd.user_info;
+ src = &ucmd->cmd.user_info;
+ break;
+
+ case QIB_CMD_RECV_CTRL:
+ copy = sizeof(cmd.cmd.recv_ctrl);
+ dest = &cmd.cmd.recv_ctrl;
+ src = &ucmd->cmd.recv_ctrl;
+ break;
+
+ case QIB_CMD_CTXT_INFO:
+ copy = sizeof(cmd.cmd.ctxt_info);
+ dest = &cmd.cmd.ctxt_info;
+ src = &ucmd->cmd.ctxt_info;
+ break;
+
+ case QIB_CMD_TID_UPDATE:
+ case QIB_CMD_TID_FREE:
+ copy = sizeof(cmd.cmd.tid_info);
+ dest = &cmd.cmd.tid_info;
+ src = &ucmd->cmd.tid_info;
+ break;
+
+ case QIB_CMD_SET_PART_KEY:
+ copy = sizeof(cmd.cmd.part_key);
+ dest = &cmd.cmd.part_key;
+ src = &ucmd->cmd.part_key;
+ break;
+
+ case QIB_CMD_DISARM_BUFS:
+ case QIB_CMD_PIOAVAILUPD: /* force an update of PIOAvail reg */
+ copy = 0;
+ src = NULL;
+ dest = NULL;
+ break;
+
+ case QIB_CMD_POLL_TYPE:
+ copy = sizeof(cmd.cmd.poll_type);
+ dest = &cmd.cmd.poll_type;
+ src = &ucmd->cmd.poll_type;
+ break;
+
+ case QIB_CMD_ARMLAUNCH_CTRL:
+ copy = sizeof(cmd.cmd.armlaunch_ctrl);
+ dest = &cmd.cmd.armlaunch_ctrl;
+ src = &ucmd->cmd.armlaunch_ctrl;
+ break;
+
+ case QIB_CMD_SDMA_INFLIGHT:
+ copy = sizeof(cmd.cmd.sdma_inflight);
+ dest = &cmd.cmd.sdma_inflight;
+ src = &ucmd->cmd.sdma_inflight;
+ break;
+
+ case QIB_CMD_SDMA_COMPLETE:
+ copy = sizeof(cmd.cmd.sdma_complete);
+ dest = &cmd.cmd.sdma_complete;
+ src = &ucmd->cmd.sdma_complete;
+ break;
+
+ case QIB_CMD_ACK_EVENT:
+ copy = sizeof(cmd.cmd.event_mask);
+ dest = &cmd.cmd.event_mask;
+ src = &ucmd->cmd.event_mask;
+ break;
+
+ default:
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ if (copy) {
+ if ((count - consumed) < copy) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ if (copy_from_user(dest, src, copy)) {
+ ret = -EFAULT;
+ goto bail;
+ }
+ consumed += copy;
+ }
+
+ rcd = ctxt_fp(fp);
+ if (!rcd && cmd.type != QIB_CMD_ASSIGN_CTXT) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ switch (cmd.type) {
+ case QIB_CMD_ASSIGN_CTXT:
+ ret = qib_assign_ctxt(fp, &cmd.cmd.user_info);
+ if (ret)
+ goto bail;
+ break;
+
+ case QIB_CMD_USER_INIT:
+ ret = qib_do_user_init(fp, &cmd.cmd.user_info);
+ if (ret)
+ goto bail;
+ ret = qib_get_base_info(fp, (void __user *) (unsigned long)
+ cmd.cmd.user_info.spu_base_info,
+ cmd.cmd.user_info.spu_base_info_size);
+ break;
+
+ case QIB_CMD_RECV_CTRL:
+ ret = qib_manage_rcvq(rcd, subctxt_fp(fp), cmd.cmd.recv_ctrl);
+ break;
+
+ case QIB_CMD_CTXT_INFO:
+ ret = qib_ctxt_info(fp, (struct qib_ctxt_info __user *)
+ (unsigned long) cmd.cmd.ctxt_info);
+ break;
+
+ case QIB_CMD_TID_UPDATE:
+ ret = qib_tid_update(rcd, fp, &cmd.cmd.tid_info);
+ break;
+
+ case QIB_CMD_TID_FREE:
+ ret = qib_tid_free(rcd, subctxt_fp(fp), &cmd.cmd.tid_info);
+ break;
+
+ case QIB_CMD_SET_PART_KEY:
+ ret = qib_set_part_key(rcd, cmd.cmd.part_key);
+ break;
+
+ case QIB_CMD_DISARM_BUFS:
+ (void)qib_disarm_piobufs_ifneeded(rcd);
+ ret = disarm_req_delay(rcd);
+ break;
+
+ case QIB_CMD_PIOAVAILUPD:
+ qib_force_pio_avail_update(rcd->dd);
+ break;
+
+ case QIB_CMD_POLL_TYPE:
+ rcd->poll_type = cmd.cmd.poll_type;
+ break;
+
+ case QIB_CMD_ARMLAUNCH_CTRL:
+ rcd->dd->f_set_armlaunch(rcd->dd, cmd.cmd.armlaunch_ctrl);
+ break;
+
+ case QIB_CMD_SDMA_INFLIGHT:
+ ret = qib_sdma_get_inflight(user_sdma_queue_fp(fp),
+ (u32 __user *) (unsigned long)
+ cmd.cmd.sdma_inflight);
+ break;
+
+ case QIB_CMD_SDMA_COMPLETE:
+ ret = qib_sdma_get_complete(rcd->ppd,
+ user_sdma_queue_fp(fp),
+ (u32 __user *) (unsigned long)
+ cmd.cmd.sdma_complete);
+ break;
+
+ case QIB_CMD_ACK_EVENT:
+ ret = qib_user_event_ack(rcd, subctxt_fp(fp),
+ cmd.cmd.event_mask);
+ break;
+ }
+
+ if (ret >= 0)
+ ret = consumed;
+
+bail:
+ return ret;
+}
+
+static ssize_t qib_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long dim, loff_t off)
+{
+ struct qib_filedata *fp = iocb->ki_filp->private_data;
+ struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp);
+ struct qib_user_sdma_queue *pq = fp->pq;
+
+ if (!dim || !pq)
+ return -EINVAL;
+
+ return qib_user_sdma_writev(rcd, pq, iov, dim);
+}
+
+static struct class *qib_class;
+static dev_t qib_dev;
+
+int qib_cdev_init(int minor, const char *name,
+ const struct file_operations *fops,
+ struct cdev **cdevp, struct device **devp)
+{
+ const dev_t dev = MKDEV(MAJOR(qib_dev), minor);
+ struct cdev *cdev;
+ struct device *device = NULL;
+ int ret;
+
+ cdev = cdev_alloc();
+ if (!cdev) {
+ printk(KERN_ERR QIB_DRV_NAME
+ ": Could not allocate cdev for minor %d, %s\n",
+ minor, name);
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ cdev->owner = THIS_MODULE;
+ cdev->ops = fops;
+ kobject_set_name(&cdev->kobj, name);
+
+ ret = cdev_add(cdev, dev, 1);
+ if (ret < 0) {
+ printk(KERN_ERR QIB_DRV_NAME
+ ": Could not add cdev for minor %d, %s (err %d)\n",
+ minor, name, -ret);
+ goto err_cdev;
+ }
+
+ device = device_create(qib_class, NULL, dev, NULL, name);
+ if (!IS_ERR(device))
+ goto done;
+ ret = PTR_ERR(device);
+ device = NULL;
+ printk(KERN_ERR QIB_DRV_NAME ": Could not create "
+ "device for minor %d, %s (err %d)\n",
+ minor, name, -ret);
+err_cdev:
+ cdev_del(cdev);
+ cdev = NULL;
+done:
+ *cdevp = cdev;
+ *devp = device;
+ return ret;
+}
+
+void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp)
+{
+ struct device *device = *devp;
+
+ if (device) {
+ device_unregister(device);
+ *devp = NULL;
+ }
+
+ if (*cdevp) {
+ cdev_del(*cdevp);
+ *cdevp = NULL;
+ }
+}
+
+static struct cdev *wildcard_cdev;
+static struct device *wildcard_device;
+
+int __init qib_dev_init(void)
+{
+ int ret;
+
+ ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME);
+ if (ret < 0) {
+ printk(KERN_ERR QIB_DRV_NAME ": Could not allocate "
+ "chrdev region (err %d)\n", -ret);
+ goto done;
+ }
+
+ qib_class = class_create(THIS_MODULE, "ipath");
+ if (IS_ERR(qib_class)) {
+ ret = PTR_ERR(qib_class);
+ printk(KERN_ERR QIB_DRV_NAME ": Could not create "
+ "device class (err %d)\n", -ret);
+ unregister_chrdev_region(qib_dev, QIB_NMINORS);
+ }
+
+done:
+ return ret;
+}
+
+void qib_dev_cleanup(void)
+{
+ if (qib_class) {
+ class_destroy(qib_class);
+ qib_class = NULL;
+ }
+
+ unregister_chrdev_region(qib_dev, QIB_NMINORS);
+}
+
+static atomic_t user_count = ATOMIC_INIT(0);
+
+static void qib_user_remove(struct qib_devdata *dd)
+{
+ if (atomic_dec_return(&user_count) == 0)
+ qib_cdev_cleanup(&wildcard_cdev, &wildcard_device);
+
+ qib_cdev_cleanup(&dd->user_cdev, &dd->user_device);
+}
+
+static int qib_user_add(struct qib_devdata *dd)
+{
+ char name[10];
+ int ret;
+
+ if (atomic_inc_return(&user_count) == 1) {
+ ret = qib_cdev_init(0, "ipath", &qib_file_ops,
+ &wildcard_cdev, &wildcard_device);
+ if (ret)
+ goto done;
+ }
+
+ snprintf(name, sizeof(name), "ipath%d", dd->unit);
+ ret = qib_cdev_init(dd->unit + 1, name, &qib_file_ops,
+ &dd->user_cdev, &dd->user_device);
+ if (ret)
+ qib_user_remove(dd);
+done:
+ return ret;
+}
+
+/*
+ * Create per-unit files in /dev
+ */
+int qib_device_create(struct qib_devdata *dd)
+{
+ int r, ret;
+
+ r = qib_user_add(dd);
+ ret = qib_diag_add(dd);
+ if (r && !ret)
+ ret = r;
+ return ret;
+}
+
+/*
+ * Remove per-unit files in /dev
+ * void, core kernel returns no errors for this stuff
+ */
+void qib_device_remove(struct qib_devdata *dd)
+{
+ qib_user_remove(dd);
+ qib_diag_remove(dd);
+}
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
new file mode 100644
index 00000000000..edef8527eb3
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -0,0 +1,618 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/namei.h>
+
+#include "qib.h"
+
+#define QIBFS_MAGIC 0x726a77
+
+static struct super_block *qib_super;
+
+#define private2dd(file) ((file)->f_dentry->d_inode->i_private)
+
+static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
+ int mode, const struct file_operations *fops,
+ void *data)
+{
+ int error;
+ struct inode *inode = new_inode(dir->i_sb);
+
+ if (!inode) {
+ error = -EPERM;
+ goto bail;
+ }
+
+ inode->i_mode = mode;
+ inode->i_uid = 0;
+ inode->i_gid = 0;
+ inode->i_blocks = 0;
+ inode->i_atime = CURRENT_TIME;
+ inode->i_mtime = inode->i_atime;
+ inode->i_ctime = inode->i_atime;
+ inode->i_private = data;
+ if ((mode & S_IFMT) == S_IFDIR) {
+ inode->i_op = &simple_dir_inode_operations;
+ inc_nlink(inode);
+ inc_nlink(dir);
+ }
+
+ inode->i_fop = fops;
+
+ d_instantiate(dentry, inode);
+ error = 0;
+
+bail:
+ return error;
+}
+
+static int create_file(const char *name, mode_t mode,
+ struct dentry *parent, struct dentry **dentry,
+ const struct file_operations *fops, void *data)
+{
+ int error;
+
+ *dentry = NULL;
+ mutex_lock(&parent->d_inode->i_mutex);
+ *dentry = lookup_one_len(name, parent, strlen(name));
+ if (!IS_ERR(*dentry))
+ error = qibfs_mknod(parent->d_inode, *dentry,
+ mode, fops, data);
+ else
+ error = PTR_ERR(*dentry);
+ mutex_unlock(&parent->d_inode->i_mutex);
+
+ return error;
+}
+
+static ssize_t driver_stats_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return simple_read_from_buffer(buf, count, ppos, &qib_stats,
+ sizeof qib_stats);
+}
+
+/*
+ * driver stats field names, one line per stat, single string. Used by
+ * programs like ipathstats to print the stats in a way which works for
+ * different versions of drivers, without changing program source.
+ * if qlogic_ib_stats changes, this needs to change. Names need to be
+ * 12 chars or less (w/o newline), for proper display by ipathstats utility.
+ */
+static const char qib_statnames[] =
+ "KernIntr\n"
+ "ErrorIntr\n"
+ "Tx_Errs\n"
+ "Rcv_Errs\n"
+ "H/W_Errs\n"
+ "NoPIOBufs\n"
+ "CtxtsOpen\n"
+ "RcvLen_Errs\n"
+ "EgrBufFull\n"
+ "EgrHdrFull\n"
+ ;
+
+static ssize_t driver_names_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return simple_read_from_buffer(buf, count, ppos, qib_statnames,
+ sizeof qib_statnames - 1); /* no null */
+}
+
+static const struct file_operations driver_ops[] = {
+ { .read = driver_stats_read, },
+ { .read = driver_names_read, },
+};
+
+/* read the per-device counters */
+static ssize_t dev_counters_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ u64 *counters;
+ size_t avail;
+ struct qib_devdata *dd = private2dd(file);
+
+ avail = dd->f_read_cntrs(dd, *ppos, NULL, &counters);
+ return simple_read_from_buffer(buf, count, ppos, counters, avail);
+}
+
+/* read the per-device counters */
+static ssize_t dev_names_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *names;
+ size_t avail;
+ struct qib_devdata *dd = private2dd(file);
+
+ avail = dd->f_read_cntrs(dd, *ppos, &names, NULL);
+ return simple_read_from_buffer(buf, count, ppos, names, avail);
+}
+
+static const struct file_operations cntr_ops[] = {
+ { .read = dev_counters_read, },
+ { .read = dev_names_read, },
+};
+
+/*
+ * Could use file->f_dentry->d_inode->i_ino to figure out which file,
+ * instead of separate routine for each, but for now, this works...
+ */
+
+/* read the per-port names (same for each port) */
+static ssize_t portnames_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *names;
+ size_t avail;
+ struct qib_devdata *dd = private2dd(file);
+
+ avail = dd->f_read_portcntrs(dd, *ppos, 0, &names, NULL);
+ return simple_read_from_buffer(buf, count, ppos, names, avail);
+}
+
+/* read the per-port counters for port 1 (pidx 0) */
+static ssize_t portcntrs_1_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ u64 *counters;
+ size_t avail;
+ struct qib_devdata *dd = private2dd(file);
+
+ avail = dd->f_read_portcntrs(dd, *ppos, 0, NULL, &counters);
+ return simple_read_from_buffer(buf, count, ppos, counters, avail);
+}
+
+/* read the per-port counters for port 2 (pidx 1) */
+static ssize_t portcntrs_2_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ u64 *counters;
+ size_t avail;
+ struct qib_devdata *dd = private2dd(file);
+
+ avail = dd->f_read_portcntrs(dd, *ppos, 1, NULL, &counters);
+ return simple_read_from_buffer(buf, count, ppos, counters, avail);
+}
+
+static const struct file_operations portcntr_ops[] = {
+ { .read = portnames_read, },
+ { .read = portcntrs_1_read, },
+ { .read = portcntrs_2_read, },
+};
+
+/*
+ * read the per-port QSFP data for port 1 (pidx 0)
+ */
+static ssize_t qsfp_1_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct qib_devdata *dd = private2dd(file);
+ char *tmp;
+ int ret;
+
+ tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ ret = qib_qsfp_dump(dd->pport, tmp, PAGE_SIZE);
+ if (ret > 0)
+ ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
+ kfree(tmp);
+ return ret;
+}
+
+/*
+ * read the per-port QSFP data for port 2 (pidx 1)
+ */
+static ssize_t qsfp_2_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct qib_devdata *dd = private2dd(file);
+ char *tmp;
+ int ret;
+
+ if (dd->num_pports < 2)
+ return -ENODEV;
+
+ tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ ret = qib_qsfp_dump(dd->pport + 1, tmp, PAGE_SIZE);
+ if (ret > 0)
+ ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
+ kfree(tmp);
+ return ret;
+}
+
+static const struct file_operations qsfp_ops[] = {
+ { .read = qsfp_1_read, },
+ { .read = qsfp_2_read, },
+};
+
+static ssize_t flash_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct qib_devdata *dd;
+ ssize_t ret;
+ loff_t pos;
+ char *tmp;
+
+ pos = *ppos;
+
+ if (pos < 0) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ if (pos >= sizeof(struct qib_flash)) {
+ ret = 0;
+ goto bail;
+ }
+
+ if (count > sizeof(struct qib_flash) - pos)
+ count = sizeof(struct qib_flash) - pos;
+
+ tmp = kmalloc(count, GFP_KERNEL);
+ if (!tmp) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ dd = private2dd(file);
+ if (qib_eeprom_read(dd, pos, tmp, count)) {
+ qib_dev_err(dd, "failed to read from flash\n");
+ ret = -ENXIO;
+ goto bail_tmp;
+ }
+
+ if (copy_to_user(buf, tmp, count)) {
+ ret = -EFAULT;
+ goto bail_tmp;
+ }
+
+ *ppos = pos + count;
+ ret = count;
+
+bail_tmp:
+ kfree(tmp);
+
+bail:
+ return ret;
+}
+
+static ssize_t flash_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct qib_devdata *dd;
+ ssize_t ret;
+ loff_t pos;
+ char *tmp;
+
+ pos = *ppos;
+
+ if (pos != 0) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ if (count != sizeof(struct qib_flash)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ tmp = kmalloc(count, GFP_KERNEL);
+ if (!tmp) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ if (copy_from_user(tmp, buf, count)) {
+ ret = -EFAULT;
+ goto bail_tmp;
+ }
+
+ dd = private2dd(file);
+ if (qib_eeprom_write(dd, pos, tmp, count)) {
+ ret = -ENXIO;
+ qib_dev_err(dd, "failed to write to flash\n");
+ goto bail_tmp;
+ }
+
+ *ppos = pos + count;
+ ret = count;
+
+bail_tmp:
+ kfree(tmp);
+
+bail:
+ return ret;
+}
+
+static const struct file_operations flash_ops = {
+ .read = flash_read,
+ .write = flash_write,
+};
+
+static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
+{
+ struct dentry *dir, *tmp;
+ char unit[10];
+ int ret, i;
+
+ /* create the per-unit directory */
+ snprintf(unit, sizeof unit, "%u", dd->unit);
+ ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
+ &simple_dir_operations, dd);
+ if (ret) {
+ printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret);
+ goto bail;
+ }
+
+ /* create the files in the new directory */
+ ret = create_file("counters", S_IFREG|S_IRUGO, dir, &tmp,
+ &cntr_ops[0], dd);
+ if (ret) {
+ printk(KERN_ERR "create_file(%s/counters) failed: %d\n",
+ unit, ret);
+ goto bail;
+ }
+ ret = create_file("counter_names", S_IFREG|S_IRUGO, dir, &tmp,
+ &cntr_ops[1], dd);
+ if (ret) {
+ printk(KERN_ERR "create_file(%s/counter_names) failed: %d\n",
+ unit, ret);
+ goto bail;
+ }
+ ret = create_file("portcounter_names", S_IFREG|S_IRUGO, dir, &tmp,
+ &portcntr_ops[0], dd);
+ if (ret) {
+ printk(KERN_ERR "create_file(%s/%s) failed: %d\n",
+ unit, "portcounter_names", ret);
+ goto bail;
+ }
+ for (i = 1; i <= dd->num_pports; i++) {
+ char fname[24];
+
+ sprintf(fname, "port%dcounters", i);
+ /* create the files in the new directory */
+ ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp,
+ &portcntr_ops[i], dd);
+ if (ret) {
+ printk(KERN_ERR "create_file(%s/%s) failed: %d\n",
+ unit, fname, ret);
+ goto bail;
+ }
+ if (!(dd->flags & QIB_HAS_QSFP))
+ continue;
+ sprintf(fname, "qsfp%d", i);
+ ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp,
+ &qsfp_ops[i - 1], dd);
+ if (ret) {
+ printk(KERN_ERR "create_file(%s/%s) failed: %d\n",
+ unit, fname, ret);
+ goto bail;
+ }
+ }
+
+ ret = create_file("flash", S_IFREG|S_IWUSR|S_IRUGO, dir, &tmp,
+ &flash_ops, dd);
+ if (ret)
+ printk(KERN_ERR "create_file(%s/flash) failed: %d\n",
+ unit, ret);
+bail:
+ return ret;
+}
+
+static int remove_file(struct dentry *parent, char *name)
+{
+ struct dentry *tmp;
+ int ret;
+
+ tmp = lookup_one_len(name, parent, strlen(name));
+
+ if (IS_ERR(tmp)) {
+ ret = PTR_ERR(tmp);
+ goto bail;
+ }
+
+ spin_lock(&dcache_lock);
+ spin_lock(&tmp->d_lock);
+ if (!(d_unhashed(tmp) && tmp->d_inode)) {
+ dget_locked(tmp);
+ __d_drop(tmp);
+ spin_unlock(&tmp->d_lock);
+ spin_unlock(&dcache_lock);
+ simple_unlink(parent->d_inode, tmp);
+ } else {
+ spin_unlock(&tmp->d_lock);
+ spin_unlock(&dcache_lock);
+ }
+
+ ret = 0;
+bail:
+ /*
+ * We don't expect clients to care about the return value, but
+ * it's there if they need it.
+ */
+ return ret;
+}
+
+static int remove_device_files(struct super_block *sb,
+ struct qib_devdata *dd)
+{
+ struct dentry *dir, *root;
+ char unit[10];
+ int ret, i;
+
+ root = dget(sb->s_root);
+ mutex_lock(&root->d_inode->i_mutex);
+ snprintf(unit, sizeof unit, "%u", dd->unit);
+ dir = lookup_one_len(unit, root, strlen(unit));
+
+ if (IS_ERR(dir)) {
+ ret = PTR_ERR(dir);
+ printk(KERN_ERR "Lookup of %s failed\n", unit);
+ goto bail;
+ }
+
+ remove_file(dir, "counters");
+ remove_file(dir, "counter_names");
+ remove_file(dir, "portcounter_names");
+ for (i = 0; i < dd->num_pports; i++) {
+ char fname[24];
+
+ sprintf(fname, "port%dcounters", i + 1);
+ remove_file(dir, fname);
+ if (dd->flags & QIB_HAS_QSFP) {
+ sprintf(fname, "qsfp%d", i + 1);
+ remove_file(dir, fname);
+ }
+ }
+ remove_file(dir, "flash");
+ d_delete(dir);
+ ret = simple_rmdir(root->d_inode, dir);
+
+bail:
+ mutex_unlock(&root->d_inode->i_mutex);
+ dput(root);
+ return ret;
+}
+
+/*
+ * This fills everything in when the fs is mounted, to handle umount/mount
+ * after device init. The direct add_cntr_files() call handles adding
+ * them from the init code, when the fs is already mounted.
+ */
+static int qibfs_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct qib_devdata *dd, *tmp;
+ unsigned long flags;
+ int ret;
+
+ static struct tree_descr files[] = {
+ [2] = {"driver_stats", &driver_ops[0], S_IRUGO},
+ [3] = {"driver_stats_names", &driver_ops[1], S_IRUGO},
+ {""},
+ };
+
+ ret = simple_fill_super(sb, QIBFS_MAGIC, files);
+ if (ret) {
+ printk(KERN_ERR "simple_fill_super failed: %d\n", ret);
+ goto bail;
+ }
+
+ spin_lock_irqsave(&qib_devs_lock, flags);
+
+ list_for_each_entry_safe(dd, tmp, &qib_dev_list, list) {
+ spin_unlock_irqrestore(&qib_devs_lock, flags);
+ ret = add_cntr_files(sb, dd);
+ if (ret) {
+ deactivate_super(sb);
+ goto bail;
+ }
+ spin_lock_irqsave(&qib_devs_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&qib_devs_lock, flags);
+
+bail:
+ return ret;
+}
+
+static int qibfs_get_sb(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data, struct vfsmount *mnt)
+{
+ int ret = get_sb_single(fs_type, flags, data,
+ qibfs_fill_super, mnt);
+ if (ret >= 0)
+ qib_super = mnt->mnt_sb;
+ return ret;
+}
+
+static void qibfs_kill_super(struct super_block *s)
+{
+ kill_litter_super(s);
+ qib_super = NULL;
+}
+
+int qibfs_add(struct qib_devdata *dd)
+{
+ int ret;
+
+ /*
+ * On first unit initialized, qib_super will not yet exist
+ * because nobody has yet tried to mount the filesystem, so
+ * we can't consider that to be an error; if an error occurs
+ * during the mount, that will get a complaint, so this is OK.
+ * add_cntr_files() for all units is done at mount from
+ * qibfs_fill_super(), so one way or another, everything works.
+ */
+ if (qib_super == NULL)
+ ret = 0;
+ else
+ ret = add_cntr_files(qib_super, dd);
+ return ret;
+}
+
+int qibfs_remove(struct qib_devdata *dd)
+{
+ int ret = 0;
+
+ if (qib_super)
+ ret = remove_device_files(qib_super, dd);
+
+ return ret;
+}
+
+static struct file_system_type qibfs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "ipathfs",
+ .get_sb = qibfs_get_sb,
+ .kill_sb = qibfs_kill_super,
+};
+
+int __init qib_init_qibfs(void)
+{
+ return register_filesystem(&qibfs_fs_type);
+}
+
+int __exit qib_exit_qibfs(void)
+{
+ return unregister_filesystem(&qibfs_fs_type);
+}
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
new file mode 100644
index 00000000000..1eadadc13da
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -0,0 +1,3576 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*
+ * This file contains all of the code that is specific to the
+ * QLogic_IB 6120 PCIe chip.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <rdma/ib_verbs.h>
+
+#include "qib.h"
+#include "qib_6120_regs.h"
+
+static void qib_6120_setup_setextled(struct qib_pportdata *, u32);
+static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op);
+static u8 qib_6120_phys_portstate(u64);
+static u32 qib_6120_iblink_state(u64);
+
+/*
+ * This file contains all the chip-specific register information and
+ * access functions for the QLogic QLogic_IB PCI-Express chip.
+ *
+ */
+
+/* KREG_IDX uses machine-generated #defines */
+#define KREG_IDX(regname) (QIB_6120_##regname##_OFFS / sizeof(u64))
+
+/* Use defines to tie machine-generated names to lower-case names */
+#define kr_extctrl KREG_IDX(EXTCtrl)
+#define kr_extstatus KREG_IDX(EXTStatus)
+#define kr_gpio_clear KREG_IDX(GPIOClear)
+#define kr_gpio_mask KREG_IDX(GPIOMask)
+#define kr_gpio_out KREG_IDX(GPIOOut)
+#define kr_gpio_status KREG_IDX(GPIOStatus)
+#define kr_rcvctrl KREG_IDX(RcvCtrl)
+#define kr_sendctrl KREG_IDX(SendCtrl)
+#define kr_partitionkey KREG_IDX(RcvPartitionKey)
+#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
+#define kr_ibcstatus KREG_IDX(IBCStatus)
+#define kr_ibcctrl KREG_IDX(IBCCtrl)
+#define kr_sendbuffererror KREG_IDX(SendBufErr0)
+#define kr_rcvbthqp KREG_IDX(RcvBTHQP)
+#define kr_counterregbase KREG_IDX(CntrRegBase)
+#define kr_palign KREG_IDX(PageAlign)
+#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
+#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
+#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
+#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
+#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
+#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
+#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
+#define kr_scratch KREG_IDX(Scratch)
+#define kr_sendctrl KREG_IDX(SendCtrl)
+#define kr_sendpioavailaddr KREG_IDX(SendPIOAvailAddr)
+#define kr_sendpiobufbase KREG_IDX(SendPIOBufBase)
+#define kr_sendpiobufcnt KREG_IDX(SendPIOBufCnt)
+#define kr_sendpiosize KREG_IDX(SendPIOSize)
+#define kr_sendregbase KREG_IDX(SendRegBase)
+#define kr_userregbase KREG_IDX(UserRegBase)
+#define kr_control KREG_IDX(Control)
+#define kr_intclear KREG_IDX(IntClear)
+#define kr_intmask KREG_IDX(IntMask)
+#define kr_intstatus KREG_IDX(IntStatus)
+#define kr_errclear KREG_IDX(ErrClear)
+#define kr_errmask KREG_IDX(ErrMask)
+#define kr_errstatus KREG_IDX(ErrStatus)
+#define kr_hwerrclear KREG_IDX(HwErrClear)
+#define kr_hwerrmask KREG_IDX(HwErrMask)
+#define kr_hwerrstatus KREG_IDX(HwErrStatus)
+#define kr_revision KREG_IDX(Revision)
+#define kr_portcnt KREG_IDX(PortCnt)
+#define kr_serdes_cfg0 KREG_IDX(SerdesCfg0)
+#define kr_serdes_cfg1 (kr_serdes_cfg0 + 1)
+#define kr_serdes_stat KREG_IDX(SerdesStat)
+#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
+
+/* These must only be written via qib_write_kreg_ctxt() */
+#define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0)
+#define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
+
+#define CREG_IDX(regname) ((QIB_6120_##regname##_OFFS - \
+ QIB_6120_LBIntCnt_OFFS) / sizeof(u64))
+
+#define cr_badformat CREG_IDX(RxBadFormatCnt)
+#define cr_erricrc CREG_IDX(RxICRCErrCnt)
+#define cr_errlink CREG_IDX(RxLinkProblemCnt)
+#define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt)
+#define cr_errpkey CREG_IDX(RxPKeyMismatchCnt)
+#define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlErrCnt)
+#define cr_err_rlen CREG_IDX(RxLenErrCnt)
+#define cr_errslen CREG_IDX(TxLenErrCnt)
+#define cr_errtidfull CREG_IDX(RxTIDFullErrCnt)
+#define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt)
+#define cr_errvcrc CREG_IDX(RxVCRCErrCnt)
+#define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt)
+#define cr_lbint CREG_IDX(LBIntCnt)
+#define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
+#define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt)
+#define cr_lbflowstall CREG_IDX(LBFlowStallCnt)
+#define cr_pktrcv CREG_IDX(RxDataPktCnt)
+#define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
+#define cr_pktsend CREG_IDX(TxDataPktCnt)
+#define cr_pktsendflow CREG_IDX(TxFlowPktCnt)
+#define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt)
+#define cr_rcvebp CREG_IDX(RxEBPCnt)
+#define cr_rcvovfl CREG_IDX(RxBufOvflCnt)
+#define cr_senddropped CREG_IDX(TxDroppedPktCnt)
+#define cr_sendstall CREG_IDX(TxFlowStallCnt)
+#define cr_sendunderrun CREG_IDX(TxUnderrunCnt)
+#define cr_wordrcv CREG_IDX(RxDwordCnt)
+#define cr_wordsend CREG_IDX(TxDwordCnt)
+#define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
+#define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt)
+#define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
+#define cr_iblinkdown CREG_IDX(IBLinkDownedCnt)
+#define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
+
+#define SYM_RMASK(regname, fldname) ((u64) \
+ QIB_6120_##regname##_##fldname##_RMASK)
+#define SYM_MASK(regname, fldname) ((u64) \
+ QIB_6120_##regname##_##fldname##_RMASK << \
+ QIB_6120_##regname##_##fldname##_LSB)
+#define SYM_LSB(regname, fldname) (QIB_6120_##regname##_##fldname##_LSB)
+
+#define SYM_FIELD(value, regname, fldname) ((u64) \
+ (((value) >> SYM_LSB(regname, fldname)) & \
+ SYM_RMASK(regname, fldname)))
+#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
+#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
+
+/* link training states, from IBC */
+#define IB_6120_LT_STATE_DISABLED 0x00
+#define IB_6120_LT_STATE_LINKUP 0x01
+#define IB_6120_LT_STATE_POLLACTIVE 0x02
+#define IB_6120_LT_STATE_POLLQUIET 0x03
+#define IB_6120_LT_STATE_SLEEPDELAY 0x04
+#define IB_6120_LT_STATE_SLEEPQUIET 0x05
+#define IB_6120_LT_STATE_CFGDEBOUNCE 0x08
+#define IB_6120_LT_STATE_CFGRCVFCFG 0x09
+#define IB_6120_LT_STATE_CFGWAITRMT 0x0a
+#define IB_6120_LT_STATE_CFGIDLE 0x0b
+#define IB_6120_LT_STATE_RECOVERRETRAIN 0x0c
+#define IB_6120_LT_STATE_RECOVERWAITRMT 0x0e
+#define IB_6120_LT_STATE_RECOVERIDLE 0x0f
+
+/* link state machine states from IBC */
+#define IB_6120_L_STATE_DOWN 0x0
+#define IB_6120_L_STATE_INIT 0x1
+#define IB_6120_L_STATE_ARM 0x2
+#define IB_6120_L_STATE_ACTIVE 0x3
+#define IB_6120_L_STATE_ACT_DEFER 0x4
+
+static const u8 qib_6120_physportstate[0x20] = {
+ [IB_6120_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
+ [IB_6120_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
+ [IB_6120_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
+ [IB_6120_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
+ [IB_6120_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
+ [IB_6120_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
+ [IB_6120_LT_STATE_CFGDEBOUNCE] =
+ IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_6120_LT_STATE_CFGRCVFCFG] =
+ IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_6120_LT_STATE_CFGWAITRMT] =
+ IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_6120_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_6120_LT_STATE_RECOVERRETRAIN] =
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+ [IB_6120_LT_STATE_RECOVERWAITRMT] =
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+ [IB_6120_LT_STATE_RECOVERIDLE] =
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+ [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
+};
+
+
+struct qib_chip_specific {
+ u64 __iomem *cregbase;
+ u64 *cntrs;
+ u64 *portcntrs;
+ void *dummy_hdrq; /* used after ctxt close */
+ dma_addr_t dummy_hdrq_phys;
+ spinlock_t kernel_tid_lock; /* no back to back kernel TID writes */
+ spinlock_t user_tid_lock; /* no back to back user TID writes */
+ spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
+ spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
+ u64 hwerrmask;
+ u64 errormask;
+ u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
+ u64 gpio_mask; /* shadow the gpio mask register */
+ u64 extctrl; /* shadow the gpio output enable, etc... */
+ /*
+ * these 5 fields are used to establish deltas for IB symbol
+ * errors and linkrecovery errors. They can be reported on
+ * some chips during link negotiation prior to INIT, and with
+ * DDR when faking DDR negotiations with non-IBTA switches.
+ * The chip counters are adjusted at driver unload if there is
+ * a non-zero delta.
+ */
+ u64 ibdeltainprog;
+ u64 ibsymdelta;
+ u64 ibsymsnap;
+ u64 iblnkerrdelta;
+ u64 iblnkerrsnap;
+ u64 ibcctrl; /* shadow for kr_ibcctrl */
+ u32 lastlinkrecov; /* link recovery issue */
+ int irq;
+ u32 cntrnamelen;
+ u32 portcntrnamelen;
+ u32 ncntrs;
+ u32 nportcntrs;
+ /* used with gpio interrupts to implement IB counters */
+ u32 rxfc_unsupvl_errs;
+ u32 overrun_thresh_errs;
+ /*
+ * these count only cases where _successive_ LocalLinkIntegrity
+ * errors were seen in the receive headers of IB standard packets
+ */
+ u32 lli_errs;
+ u32 lli_counter;
+ u64 lli_thresh;
+ u64 sword; /* total dwords sent (sample result) */
+ u64 rword; /* total dwords received (sample result) */
+ u64 spkts; /* total packets sent (sample result) */
+ u64 rpkts; /* total packets received (sample result) */
+ u64 xmit_wait; /* # of ticks no data sent (sample result) */
+ struct timer_list pma_timer;
+ char emsgbuf[128];
+ char bitsmsgbuf[64];
+ u8 pma_sample_status;
+};
+
+/* ibcctrl bits */
+#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
+/* cycle through TS1/TS2 till OK */
+#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
+/* wait for TS1, then go on */
+#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
+#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
+
+#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
+#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
+#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
+#define QLOGIC_IB_IBCC_LINKCMD_SHIFT 18
+
+/*
+ * We could have a single register get/put routine, that takes a group type,
+ * but this is somewhat clearer and cleaner. It also gives us some error
+ * checking. 64 bit register reads should always work, but are inefficient
+ * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
+ * so we use kreg32 wherever possible. User register and counter register
+ * reads are always 32 bit reads, so only one form of those routines.
+ */
+
+/**
+ * qib_read_ureg32 - read 32-bit virtualized per-context register
+ * @dd: device
+ * @regno: register number
+ * @ctxt: context number
+ *
+ * Return the contents of a register that is virtualized to be per context.
+ * Returns -1 on errors (not distinguishable from valid contents at
+ * runtime; we may add a separate error variable at some point).
+ */
+static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
+ enum qib_ureg regno, int ctxt)
+{
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+
+ if (dd->userbase)
+ return readl(regno + (u64 __iomem *)
+ ((char __iomem *)dd->userbase +
+ dd->ureg_align * ctxt));
+ else
+ return readl(regno + (u64 __iomem *)
+ (dd->uregbase +
+ (char __iomem *)dd->kregbase +
+ dd->ureg_align * ctxt));
+}
+
+/**
+ * qib_write_ureg - write 32-bit virtualized per-context register
+ * @dd: device
+ * @regno: register number
+ * @value: value
+ * @ctxt: context
+ *
+ * Write the contents of a register that is virtualized to be per context.
+ */
+static inline void qib_write_ureg(const struct qib_devdata *dd,
+ enum qib_ureg regno, u64 value, int ctxt)
+{
+ u64 __iomem *ubase;
+ if (dd->userbase)
+ ubase = (u64 __iomem *)
+ ((char __iomem *) dd->userbase +
+ dd->ureg_align * ctxt);
+ else
+ ubase = (u64 __iomem *)
+ (dd->uregbase +
+ (char __iomem *) dd->kregbase +
+ dd->ureg_align * ctxt);
+
+ if (dd->kregbase && (dd->flags & QIB_PRESENT))
+ writeq(value, &ubase[regno]);
+}
+
+static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
+ const u16 regno)
+{
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return -1;
+ return readl((u32 __iomem *)&dd->kregbase[regno]);
+}
+
+static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
+ const u16 regno)
+{
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return -1;
+
+ return readq(&dd->kregbase[regno]);
+}
+
+static inline void qib_write_kreg(const struct qib_devdata *dd,
+ const u16 regno, u64 value)
+{
+ if (dd->kregbase && (dd->flags & QIB_PRESENT))
+ writeq(value, &dd->kregbase[regno]);
+}
+
+/**
+ * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
+ * @dd: the qlogic_ib device
+ * @regno: the register number to write
+ * @ctxt: the context containing the register
+ * @value: the value to write
+ */
+static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
+ const u16 regno, unsigned ctxt,
+ u64 value)
+{
+ qib_write_kreg(dd, regno + ctxt, value);
+}
+
+static inline void write_6120_creg(const struct qib_devdata *dd,
+ u16 regno, u64 value)
+{
+ if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT))
+ writeq(value, &dd->cspec->cregbase[regno]);
+}
+
+static inline u64 read_6120_creg(const struct qib_devdata *dd, u16 regno)
+{
+ if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+ return readq(&dd->cspec->cregbase[regno]);
+}
+
+static inline u32 read_6120_creg32(const struct qib_devdata *dd, u16 regno)
+{
+ if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+ return readl(&dd->cspec->cregbase[regno]);
+}
+
+/* kr_control bits */
+#define QLOGIC_IB_C_RESET 1U
+
+/* kr_intstatus, kr_intclear, kr_intmask bits */
+#define QLOGIC_IB_I_RCVURG_MASK ((1U << 5) - 1)
+#define QLOGIC_IB_I_RCVURG_SHIFT 0
+#define QLOGIC_IB_I_RCVAVAIL_MASK ((1U << 5) - 1)
+#define QLOGIC_IB_I_RCVAVAIL_SHIFT 12
+
+#define QLOGIC_IB_C_FREEZEMODE 0x00000002
+#define QLOGIC_IB_C_LINKENABLE 0x00000004
+#define QLOGIC_IB_I_ERROR 0x0000000080000000ULL
+#define QLOGIC_IB_I_SPIOSENT 0x0000000040000000ULL
+#define QLOGIC_IB_I_SPIOBUFAVAIL 0x0000000020000000ULL
+#define QLOGIC_IB_I_GPIO 0x0000000010000000ULL
+#define QLOGIC_IB_I_BITSEXTANT \
+ ((QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \
+ (QLOGIC_IB_I_RCVAVAIL_MASK << \
+ QLOGIC_IB_I_RCVAVAIL_SHIFT) | \
+ QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \
+ QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO)
+
+/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
+#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL
+#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0
+#define QLOGIC_IB_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
+#define QLOGIC_IB_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
+#define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
+#define QLOGIC_IB_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
+#define QLOGIC_IB_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
+#define QLOGIC_IB_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
+#define QLOGIC_IB_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
+#define QLOGIC_IB_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
+#define QLOGIC_IB_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
+#define QLOGIC_IB_HWE_SERDESPLLFAILED 0x1000000000000000ULL
+
+
+/* kr_extstatus bits */
+#define QLOGIC_IB_EXTS_FREQSEL 0x2
+#define QLOGIC_IB_EXTS_SERDESSEL 0x4
+#define QLOGIC_IB_EXTS_MEMBIST_ENDTEST 0x0000000000004000
+#define QLOGIC_IB_EXTS_MEMBIST_FOUND 0x0000000000008000
+
+/* kr_xgxsconfig bits */
+#define QLOGIC_IB_XGXS_RESET 0x5ULL
+
+#define _QIB_GPIO_SDA_NUM 1
+#define _QIB_GPIO_SCL_NUM 0
+
+/* Bits in GPIO for the added IB link interrupts */
+#define GPIO_RXUVL_BIT 3
+#define GPIO_OVRUN_BIT 4
+#define GPIO_LLI_BIT 5
+#define GPIO_ERRINTR_MASK 0x38
+
+
+#define QLOGIC_IB_RT_BUFSIZE_MASK 0xe0000000ULL
+#define QLOGIC_IB_RT_BUFSIZE_SHIFTVAL(tid) \
+ ((((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) >> 29) + 11 - 1)
+#define QLOGIC_IB_RT_BUFSIZE(tid) (1 << QLOGIC_IB_RT_BUFSIZE_SHIFTVAL(tid))
+#define QLOGIC_IB_RT_IS_VALID(tid) \
+ (((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) && \
+ ((((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) != QLOGIC_IB_RT_BUFSIZE_MASK)))
+#define QLOGIC_IB_RT_ADDR_MASK 0x1FFFFFFFULL /* 29 bits valid */
+#define QLOGIC_IB_RT_ADDR_SHIFT 10
+
+#define QLOGIC_IB_R_INTRAVAIL_SHIFT 16
+#define QLOGIC_IB_R_TAILUPD_SHIFT 31
+#define IBA6120_R_PKEY_DIS_SHIFT 30
+
+#define PBC_6120_VL15_SEND_CTRL (1ULL << 31) /* pbc; VL15; link_buf only */
+
+#define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr)
+#define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr)
+
+#define SYM_MASK_BIT(regname, fldname, bit) ((u64) \
+ ((1ULL << (SYM_LSB(regname, fldname) + (bit)))))
+
+#define TXEMEMPARITYERR_PIOBUF \
+ SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0)
+#define TXEMEMPARITYERR_PIOPBC \
+ SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1)
+#define TXEMEMPARITYERR_PIOLAUNCHFIFO \
+ SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2)
+
+#define RXEMEMPARITYERR_RCVBUF \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0)
+#define RXEMEMPARITYERR_LOOKUPQ \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1)
+#define RXEMEMPARITYERR_EXPTID \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2)
+#define RXEMEMPARITYERR_EAGERTID \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3)
+#define RXEMEMPARITYERR_FLAGBUF \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4)
+#define RXEMEMPARITYERR_DATAINFO \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5)
+#define RXEMEMPARITYERR_HDRINFO \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6)
+
+/* 6120 specific hardware errors... */
+static const struct qib_hwerror_msgs qib_6120_hwerror_msgs[] = {
+ /* generic hardware errors */
+ QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"),
+ QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"),
+
+ QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF,
+ "TXE PIOBUF Memory Parity"),
+ QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC,
+ "TXE PIOPBC Memory Parity"),
+ QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO,
+ "TXE PIOLAUNCHFIFO Memory Parity"),
+
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF,
+ "RXE RCVBUF Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ,
+ "RXE LOOKUPQ Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID,
+ "RXE EAGERTID Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID,
+ "RXE EXPTID Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF,
+ "RXE FLAGBUF Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO,
+ "RXE DATAINFO Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO,
+ "RXE HDRINFO Memory Parity"),
+
+ /* chip-specific hardware errors */
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP,
+ "PCIe Poisoned TLP"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT,
+ "PCIe completion timeout"),
+ /*
+ * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
+ * parity or memory parity error failures, because most likely we
+ * won't be able to talk to the core of the chip. Nonetheless, we
+ * might see them, if they are in parts of the PCIe core that aren't
+ * essential.
+ */
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED,
+ "PCIePLL1"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED,
+ "PCIePLL0"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH,
+ "PCIe XTLH core parity"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM,
+ "PCIe ADM TX core parity"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM,
+ "PCIe ADM RX core parity"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED,
+ "SerDes PLL"),
+};
+
+#define TXE_PIO_PARITY (TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC)
+#define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP | \
+ QLOGIC_IB_HWE_COREPLL_RFSLIP)
+
+ /* variables for sanity checking interrupt and errors */
+#define IB_HWE_BITSEXTANT \
+ (HWE_MASK(RXEMemParityErr) | \
+ HWE_MASK(TXEMemParityErr) | \
+ (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << \
+ QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \
+ QLOGIC_IB_HWE_PCIE1PLLFAILED | \
+ QLOGIC_IB_HWE_PCIE0PLLFAILED | \
+ QLOGIC_IB_HWE_PCIEPOISONEDTLP | \
+ QLOGIC_IB_HWE_PCIECPLTIMEOUT | \
+ QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \
+ QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \
+ QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \
+ HWE_MASK(PowerOnBISTFailed) | \
+ QLOGIC_IB_HWE_COREPLL_FBSLIP | \
+ QLOGIC_IB_HWE_COREPLL_RFSLIP | \
+ QLOGIC_IB_HWE_SERDESPLLFAILED | \
+ HWE_MASK(IBCBusToSPCParityErr) | \
+ HWE_MASK(IBCBusFromSPCParityErr))
+
+#define IB_E_BITSEXTANT \
+ (ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) | \
+ ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) | \
+ ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) | \
+ ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \
+ ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) | \
+ ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) | \
+ ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | \
+ ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) | \
+ ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) | \
+ ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendMaxPktLenErr) | \
+ ERR_MASK(SendUnderRunErr) | ERR_MASK(SendPktLenErr) | \
+ ERR_MASK(SendDroppedSmpPktErr) | \
+ ERR_MASK(SendDroppedDataPktErr) | \
+ ERR_MASK(SendPioArmLaunchErr) | \
+ ERR_MASK(SendUnexpectedPktNumErr) | \
+ ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(IBStatusChanged) | \
+ ERR_MASK(InvalidAddrErr) | ERR_MASK(ResetNegated) | \
+ ERR_MASK(HardwareErr))
+
+#define QLOGIC_IB_E_PKTERRS ( \
+ ERR_MASK(SendPktLenErr) | \
+ ERR_MASK(SendDroppedDataPktErr) | \
+ ERR_MASK(RcvVCRCErr) | \
+ ERR_MASK(RcvICRCErr) | \
+ ERR_MASK(RcvShortPktLenErr) | \
+ ERR_MASK(RcvEBPErr))
+
+/* These are all rcv-related errors which we want to count for stats */
+#define E_SUM_PKTERRS \
+ (ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) | \
+ ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) | \
+ ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) | \
+ ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
+ ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) | \
+ ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr))
+
+/* These are all send-related errors which we want to count for stats */
+#define E_SUM_ERRS \
+ (ERR_MASK(SendPioArmLaunchErr) | \
+ ERR_MASK(SendUnexpectedPktNumErr) | \
+ ERR_MASK(SendDroppedDataPktErr) | \
+ ERR_MASK(SendDroppedSmpPktErr) | \
+ ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) | \
+ ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
+ ERR_MASK(InvalidAddrErr))
+
+/*
+ * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
+ * errors not related to freeze and cancelling buffers. Can't ignore
+ * armlaunch because could get more while still cleaning up, and need
+ * to cancel those as they happen.
+ */
+#define E_SPKT_ERRS_IGNORE \
+ (ERR_MASK(SendDroppedDataPktErr) | \
+ ERR_MASK(SendDroppedSmpPktErr) | \
+ ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) | \
+ ERR_MASK(SendPktLenErr))
+
+/*
+ * these are errors that can occur when the link changes state while
+ * a packet is being sent or received. This doesn't cover things
+ * like EBP or VCRC that can be the result of a sending having the
+ * link change state, so we receive a "known bad" packet.
+ */
+#define E_SUM_LINK_PKTERRS \
+ (ERR_MASK(SendDroppedDataPktErr) | \
+ ERR_MASK(SendDroppedSmpPktErr) | \
+ ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
+ ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
+ ERR_MASK(RcvUnexpectedCharErr))
+
+static void qib_6120_put_tid_2(struct qib_devdata *, u64 __iomem *,
+ u32, unsigned long);
+
+/*
+ * On platforms using this chip, and not having ordered WC stores, we
+ * can get TXE parity errors due to speculative reads to the PIO buffers,
+ * and this, due to a chip issue can result in (many) false parity error
+ * reports. So it's a debug print on those, and an info print on systems
+ * where the speculative reads don't occur.
+ */
+static void qib_6120_txe_recover(struct qib_devdata *dd)
+{
+ if (!qib_unordered_wc())
+ qib_devinfo(dd->pcidev,
+ "Recovering from TXE PIO parity error\n");
+}
+
+/* enable/disable chip from delivering interrupts */
+static void qib_6120_set_intr_state(struct qib_devdata *dd, u32 enable)
+{
+ if (enable) {
+ if (dd->flags & QIB_BADINTR)
+ return;
+ qib_write_kreg(dd, kr_intmask, ~0ULL);
+ /* force re-interrupt of any pending interrupts. */
+ qib_write_kreg(dd, kr_intclear, 0ULL);
+ } else
+ qib_write_kreg(dd, kr_intmask, 0ULL);
+}
+
+/*
+ * Try to cleanup as much as possible for anything that might have gone
+ * wrong while in freeze mode, such as pio buffers being written by user
+ * processes (causing armlaunch), send errors due to going into freeze mode,
+ * etc., and try to avoid causing extra interrupts while doing so.
+ * Forcibly update the in-memory pioavail register copies after cleanup
+ * because the chip won't do it while in freeze mode (the register values
+ * themselves are kept correct).
+ * Make sure that we don't lose any important interrupts by using the chip
+ * feature that says that writing 0 to a bit in *clear that is set in
+ * *status will cause an interrupt to be generated again (if allowed by
+ * the *mask value).
+ * This is in chip-specific code because of all of the register accesses,
+ * even though the details are similar on most chips
+ */
+static void qib_6120_clear_freeze(struct qib_devdata *dd)
+{
+ /* disable error interrupts, to avoid confusion */
+ qib_write_kreg(dd, kr_errmask, 0ULL);
+
+ /* also disable interrupts; errormask is sometimes overwriten */
+ qib_6120_set_intr_state(dd, 0);
+
+ qib_cancel_sends(dd->pport);
+
+ /* clear the freeze, and be sure chip saw it */
+ qib_write_kreg(dd, kr_control, dd->control);
+ qib_read_kreg32(dd, kr_scratch);
+
+ /* force in-memory update now we are out of freeze */
+ qib_force_pio_avail_update(dd);
+
+ /*
+ * force new interrupt if any hwerr, error or interrupt bits are
+ * still set, and clear "safe" send packet errors related to freeze
+ * and cancelling sends. Re-enable error interrupts before possible
+ * force of re-interrupt on pending interrupts.
+ */
+ qib_write_kreg(dd, kr_hwerrclear, 0ULL);
+ qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
+ qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
+ qib_6120_set_intr_state(dd, 1);
+}
+
+/**
+ * qib_handle_6120_hwerrors - display hardware errors.
+ * @dd: the qlogic_ib device
+ * @msg: the output buffer
+ * @msgl: the size of the output buffer
+ *
+ * Use same msg buffer as regular errors to avoid excessive stack
+ * use. Most hardware errors are catastrophic, but for right now,
+ * we'll print them and continue. Reuse the same message buffer as
+ * handle_6120_errors() to avoid excessive stack usage.
+ */
+static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
+ size_t msgl)
+{
+ u64 hwerrs;
+ u32 bits, ctrl;
+ int isfatal = 0;
+ char *bitsmsg;
+ int log_idx;
+
+ hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
+ if (!hwerrs)
+ return;
+ if (hwerrs == ~0ULL) {
+ qib_dev_err(dd, "Read of hardware error status failed "
+ "(all bits set); ignoring\n");
+ return;
+ }
+ qib_stats.sps_hwerrs++;
+
+ /* Always clear the error status register, except MEMBISTFAIL,
+ * regardless of whether we continue or stop using the chip.
+ * We want that set so we know it failed, even across driver reload.
+ * We'll still ignore it in the hwerrmask. We do this partly for
+ * diagnostics, but also for support */
+ qib_write_kreg(dd, kr_hwerrclear,
+ hwerrs & ~HWE_MASK(PowerOnBISTFailed));
+
+ hwerrs &= dd->cspec->hwerrmask;
+
+ /* We log some errors to EEPROM, check if we have any of those. */
+ for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
+ if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log)
+ qib_inc_eeprom_err(dd, log_idx, 1);
+
+ /*
+ * Make sure we get this much out, unless told to be quiet,
+ * or it's occurred within the last 5 seconds.
+ */
+ if (hwerrs & ~(TXE_PIO_PARITY | RXEMEMPARITYERR_EAGERTID))
+ qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
+ "(cleared)\n", (unsigned long long) hwerrs);
+
+ if (hwerrs & ~IB_HWE_BITSEXTANT)
+ qib_dev_err(dd, "hwerror interrupt with unknown errors "
+ "%llx set\n", (unsigned long long)
+ (hwerrs & ~IB_HWE_BITSEXTANT));
+
+ ctrl = qib_read_kreg32(dd, kr_control);
+ if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {
+ /*
+ * Parity errors in send memory are recoverable,
+ * just cancel the send (if indicated in * sendbuffererror),
+ * count the occurrence, unfreeze (if no other handled
+ * hardware error bits are set), and continue. They can
+ * occur if a processor speculative read is done to the PIO
+ * buffer while we are sending a packet, for example.
+ */
+ if (hwerrs & TXE_PIO_PARITY) {
+ qib_6120_txe_recover(dd);
+ hwerrs &= ~TXE_PIO_PARITY;
+ }
+
+ if (!hwerrs) {
+ static u32 freeze_cnt;
+
+ freeze_cnt++;
+ qib_6120_clear_freeze(dd);
+ } else
+ isfatal = 1;
+ }
+
+ *msg = '\0';
+
+ if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
+ isfatal = 1;
+ strlcat(msg, "[Memory BIST test failed, InfiniPath hardware"
+ " unusable]", msgl);
+ /* ignore from now on, so disable until driver reloaded */
+ dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+ }
+
+ qib_format_hwerrors(hwerrs, qib_6120_hwerror_msgs,
+ ARRAY_SIZE(qib_6120_hwerror_msgs), msg, msgl);
+
+ bitsmsg = dd->cspec->bitsmsgbuf;
+ if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<
+ QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) {
+ bits = (u32) ((hwerrs >>
+ QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
+ QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
+ snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+ "[PCIe Mem Parity Errs %x] ", bits);
+ strlcat(msg, bitsmsg, msgl);
+ }
+
+ if (hwerrs & _QIB_PLL_FAIL) {
+ isfatal = 1;
+ snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+ "[PLL failed (%llx), InfiniPath hardware unusable]",
+ (unsigned long long) hwerrs & _QIB_PLL_FAIL);
+ strlcat(msg, bitsmsg, msgl);
+ /* ignore from now on, so disable until driver reloaded */
+ dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL);
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+ }
+
+ if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) {
+ /*
+ * If it occurs, it is left masked since the external
+ * interface is unused
+ */
+ dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED;
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+ }
+
+ if (hwerrs)
+ /*
+ * if any set that we aren't ignoring; only
+ * make the complaint once, in case it's stuck
+ * or recurring, and we get here multiple
+ * times.
+ */
+ qib_dev_err(dd, "%s hardware error\n", msg);
+ else
+ *msg = 0; /* recovered from all of them */
+
+ if (isfatal && !dd->diag_client) {
+ qib_dev_err(dd, "Fatal Hardware Error, no longer"
+ " usable, SN %.16s\n", dd->serial);
+ /*
+ * for /sys status file and user programs to print; if no
+ * trailing brace is copied, we'll know it was truncated.
+ */
+ if (dd->freezemsg)
+ snprintf(dd->freezemsg, dd->freezelen,
+ "{%s}", msg);
+ qib_disable_after_error(dd);
+ }
+}
+
+/*
+ * Decode the error status into strings, deciding whether to always
+ * print * it or not depending on "normal packet errors" vs everything
+ * else. Return 1 if "real" errors, otherwise 0 if only packet
+ * errors, so caller can decide what to print with the string.
+ */
+static int qib_decode_6120_err(struct qib_devdata *dd, char *buf, size_t blen,
+ u64 err)
+{
+ int iserr = 1;
+
+ *buf = '\0';
+ if (err & QLOGIC_IB_E_PKTERRS) {
+ if (!(err & ~QLOGIC_IB_E_PKTERRS))
+ iserr = 0;
+ if ((err & ERR_MASK(RcvICRCErr)) &&
+ !(err&(ERR_MASK(RcvVCRCErr)|ERR_MASK(RcvEBPErr))))
+ strlcat(buf, "CRC ", blen);
+ if (!iserr)
+ goto done;
+ }
+ if (err & ERR_MASK(RcvHdrLenErr))
+ strlcat(buf, "rhdrlen ", blen);
+ if (err & ERR_MASK(RcvBadTidErr))
+ strlcat(buf, "rbadtid ", blen);
+ if (err & ERR_MASK(RcvBadVersionErr))
+ strlcat(buf, "rbadversion ", blen);
+ if (err & ERR_MASK(RcvHdrErr))
+ strlcat(buf, "rhdr ", blen);
+ if (err & ERR_MASK(RcvLongPktLenErr))
+ strlcat(buf, "rlongpktlen ", blen);
+ if (err & ERR_MASK(RcvMaxPktLenErr))
+ strlcat(buf, "rmaxpktlen ", blen);
+ if (err & ERR_MASK(RcvMinPktLenErr))
+ strlcat(buf, "rminpktlen ", blen);
+ if (err & ERR_MASK(SendMinPktLenErr))
+ strlcat(buf, "sminpktlen ", blen);
+ if (err & ERR_MASK(RcvFormatErr))
+ strlcat(buf, "rformaterr ", blen);
+ if (err & ERR_MASK(RcvUnsupportedVLErr))
+ strlcat(buf, "runsupvl ", blen);
+ if (err & ERR_MASK(RcvUnexpectedCharErr))
+ strlcat(buf, "runexpchar ", blen);
+ if (err & ERR_MASK(RcvIBFlowErr))
+ strlcat(buf, "ribflow ", blen);
+ if (err & ERR_MASK(SendUnderRunErr))
+ strlcat(buf, "sunderrun ", blen);
+ if (err & ERR_MASK(SendPioArmLaunchErr))
+ strlcat(buf, "spioarmlaunch ", blen);
+ if (err & ERR_MASK(SendUnexpectedPktNumErr))
+ strlcat(buf, "sunexperrpktnum ", blen);
+ if (err & ERR_MASK(SendDroppedSmpPktErr))
+ strlcat(buf, "sdroppedsmppkt ", blen);
+ if (err & ERR_MASK(SendMaxPktLenErr))
+ strlcat(buf, "smaxpktlen ", blen);
+ if (err & ERR_MASK(SendUnsupportedVLErr))
+ strlcat(buf, "sunsupVL ", blen);
+ if (err & ERR_MASK(InvalidAddrErr))
+ strlcat(buf, "invalidaddr ", blen);
+ if (err & ERR_MASK(RcvEgrFullErr))
+ strlcat(buf, "rcvegrfull ", blen);
+ if (err & ERR_MASK(RcvHdrFullErr))
+ strlcat(buf, "rcvhdrfull ", blen);
+ if (err & ERR_MASK(IBStatusChanged))
+ strlcat(buf, "ibcstatuschg ", blen);
+ if (err & ERR_MASK(RcvIBLostLinkErr))
+ strlcat(buf, "riblostlink ", blen);
+ if (err & ERR_MASK(HardwareErr))
+ strlcat(buf, "hardware ", blen);
+ if (err & ERR_MASK(ResetNegated))
+ strlcat(buf, "reset ", blen);
+done:
+ return iserr;
+}
+
+/*
+ * Called when we might have an error that is specific to a particular
+ * PIO buffer, and may need to cancel that buffer, so it can be re-used.
+ */
+static void qib_disarm_6120_senderrbufs(struct qib_pportdata *ppd)
+{
+ unsigned long sbuf[2];
+ struct qib_devdata *dd = ppd->dd;
+
+ /*
+ * It's possible that sendbuffererror could have bits set; might
+ * have already done this as a result of hardware error handling.
+ */
+ sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
+ sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
+
+ if (sbuf[0] || sbuf[1])
+ qib_disarm_piobufs_set(dd, sbuf,
+ dd->piobcnt2k + dd->piobcnt4k);
+}
+
+static int chk_6120_linkrecovery(struct qib_devdata *dd, u64 ibcs)
+{
+ int ret = 1;
+ u32 ibstate = qib_6120_iblink_state(ibcs);
+ u32 linkrecov = read_6120_creg32(dd, cr_iblinkerrrecov);
+
+ if (linkrecov != dd->cspec->lastlinkrecov) {
+ /* and no more until active again */
+ dd->cspec->lastlinkrecov = 0;
+ qib_set_linkstate(dd->pport, QIB_IB_LINKDOWN);
+ ret = 0;
+ }
+ if (ibstate == IB_PORT_ACTIVE)
+ dd->cspec->lastlinkrecov =
+ read_6120_creg32(dd, cr_iblinkerrrecov);
+ return ret;
+}
+
+static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
+{
+ char *msg;
+ u64 ignore_this_time = 0;
+ u64 iserr = 0;
+ int log_idx;
+ struct qib_pportdata *ppd = dd->pport;
+ u64 mask;
+
+ /* don't report errors that are masked */
+ errs &= dd->cspec->errormask;
+ msg = dd->cspec->emsgbuf;
+
+ /* do these first, they are most important */
+ if (errs & ERR_MASK(HardwareErr))
+ qib_handle_6120_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
+ else
+ for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
+ if (errs & dd->eep_st_masks[log_idx].errs_to_log)
+ qib_inc_eeprom_err(dd, log_idx, 1);
+
+ if (errs & ~IB_E_BITSEXTANT)
+ qib_dev_err(dd, "error interrupt with unknown errors "
+ "%llx set\n",
+ (unsigned long long) (errs & ~IB_E_BITSEXTANT));
+
+ if (errs & E_SUM_ERRS) {
+ qib_disarm_6120_senderrbufs(ppd);
+ if ((errs & E_SUM_LINK_PKTERRS) &&
+ !(ppd->lflags & QIBL_LINKACTIVE)) {
+ /*
+ * This can happen when trying to bring the link
+ * up, but the IB link changes state at the "wrong"
+ * time. The IB logic then complains that the packet
+ * isn't valid. We don't want to confuse people, so
+ * we just don't print them, except at debug
+ */
+ ignore_this_time = errs & E_SUM_LINK_PKTERRS;
+ }
+ } else if ((errs & E_SUM_LINK_PKTERRS) &&
+ !(ppd->lflags & QIBL_LINKACTIVE)) {
+ /*
+ * This can happen when SMA is trying to bring the link
+ * up, but the IB link changes state at the "wrong" time.
+ * The IB logic then complains that the packet isn't
+ * valid. We don't want to confuse people, so we just
+ * don't print them, except at debug
+ */
+ ignore_this_time = errs & E_SUM_LINK_PKTERRS;
+ }
+
+ qib_write_kreg(dd, kr_errclear, errs);
+
+ errs &= ~ignore_this_time;
+ if (!errs)
+ goto done;
+
+ /*
+ * The ones we mask off are handled specially below
+ * or above.
+ */
+ mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) |
+ ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr);
+ qib_decode_6120_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask);
+
+ if (errs & E_SUM_PKTERRS)
+ qib_stats.sps_rcverrs++;
+ if (errs & E_SUM_ERRS)
+ qib_stats.sps_txerrs++;
+
+ iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS);
+
+ if (errs & ERR_MASK(IBStatusChanged)) {
+ u64 ibcs = qib_read_kreg64(dd, kr_ibcstatus);
+ u32 ibstate = qib_6120_iblink_state(ibcs);
+ int handle = 1;
+
+ if (ibstate != IB_PORT_INIT && dd->cspec->lastlinkrecov)
+ handle = chk_6120_linkrecovery(dd, ibcs);
+ /*
+ * Since going into a recovery state causes the link state
+ * to go down and since recovery is transitory, it is better
+ * if we "miss" ever seeing the link training state go into
+ * recovery (i.e., ignore this transition for link state
+ * special handling purposes) without updating lastibcstat.
+ */
+ if (handle && qib_6120_phys_portstate(ibcs) ==
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER)
+ handle = 0;
+ if (handle)
+ qib_handle_e_ibstatuschanged(ppd, ibcs);
+ }
+
+ if (errs & ERR_MASK(ResetNegated)) {
+ qib_dev_err(dd, "Got reset, requires re-init "
+ "(unload and reload driver)\n");
+ dd->flags &= ~QIB_INITTED; /* needs re-init */
+ /* mark as having had error */
+ *dd->devstatusp |= QIB_STATUS_HWERROR;
+ *dd->pport->statusp &= ~QIB_STATUS_IB_CONF;
+ }
+
+ if (*msg && iserr)
+ qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
+
+ if (ppd->state_wanted & ppd->lflags)
+ wake_up_interruptible(&ppd->state_wait);
+
+ /*
+ * If there were hdrq or egrfull errors, wake up any processes
+ * waiting in poll. We used to try to check which contexts had
+ * the overflow, but given the cost of that and the chip reads
+ * to support it, it's better to just wake everybody up if we
+ * get an overflow; waiters can poll again if it's not them.
+ */
+ if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
+ qib_handle_urcv(dd, ~0U);
+ if (errs & ERR_MASK(RcvEgrFullErr))
+ qib_stats.sps_buffull++;
+ else
+ qib_stats.sps_hdrfull++;
+ }
+done:
+ return;
+}
+
+/**
+ * qib_6120_init_hwerrors - enable hardware errors
+ * @dd: the qlogic_ib device
+ *
+ * now that we have finished initializing everything that might reasonably
+ * cause a hardware error, and cleared those errors bits as they occur,
+ * we can enable hardware errors in the mask (potentially enabling
+ * freeze mode), and enable hardware errors as errors (along with
+ * everything else) in errormask
+ */
+static void qib_6120_init_hwerrors(struct qib_devdata *dd)
+{
+ u64 val;
+ u64 extsval;
+
+ extsval = qib_read_kreg64(dd, kr_extstatus);
+
+ if (!(extsval & QLOGIC_IB_EXTS_MEMBIST_ENDTEST))
+ qib_dev_err(dd, "MemBIST did not complete!\n");
+
+ /* init so all hwerrors interrupt, and enter freeze, ajdust below */
+ val = ~0ULL;
+ if (dd->minrev < 2) {
+ /*
+ * Avoid problem with internal interface bus parity
+ * checking. Fixed in Rev2.
+ */
+ val &= ~QLOGIC_IB_HWE_PCIEBUSPARITYRADM;
+ }
+ /* avoid some intel cpu's speculative read freeze mode issue */
+ val &= ~TXEMEMPARITYERR_PIOBUF;
+
+ dd->cspec->hwerrmask = val;
+
+ qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+
+ /* clear all */
+ qib_write_kreg(dd, kr_errclear, ~0ULL);
+ /* enable errors that are masked, at least this first time. */
+ qib_write_kreg(dd, kr_errmask, ~0ULL);
+ dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
+ /* clear any interrupts up to this point (ints still not enabled) */
+ qib_write_kreg(dd, kr_intclear, ~0ULL);
+
+ qib_write_kreg(dd, kr_rcvbthqp,
+ dd->qpn_mask << (QIB_6120_RcvBTHQP_BTHQP_Mask_LSB - 1) |
+ QIB_KD_QP);
+}
+
+/*
+ * Disable and enable the armlaunch error. Used for PIO bandwidth testing
+ * on chips that are count-based, rather than trigger-based. There is no
+ * reference counting, but that's also fine, given the intended use.
+ * Only chip-specific because it's all register accesses
+ */
+static void qib_set_6120_armlaunch(struct qib_devdata *dd, u32 enable)
+{
+ if (enable) {
+ qib_write_kreg(dd, kr_errclear,
+ ERR_MASK(SendPioArmLaunchErr));
+ dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr);
+ } else
+ dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr);
+ qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
+}
+
+/*
+ * Formerly took parameter <which> in pre-shifted,
+ * pre-merged form with LinkCmd and LinkInitCmd
+ * together, and assuming the zero was NOP.
+ */
+static void qib_set_ib_6120_lstate(struct qib_pportdata *ppd, u16 linkcmd,
+ u16 linitcmd)
+{
+ u64 mod_wd;
+ struct qib_devdata *dd = ppd->dd;
+ unsigned long flags;
+
+ if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
+ /*
+ * If we are told to disable, note that so link-recovery
+ * code does not attempt to bring us back up.
+ */
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_LINK_DISABLED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
+ /*
+ * Any other linkinitcmd will lead to LINKDOWN and then
+ * to INIT (if all is well), so clear flag to let
+ * link-recovery code attempt to bring us back up.
+ */
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ }
+
+ mod_wd = (linkcmd << QLOGIC_IB_IBCC_LINKCMD_SHIFT) |
+ (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+
+ qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl | mod_wd);
+ /* write to chip to prevent back-to-back writes of control reg */
+ qib_write_kreg(dd, kr_scratch, 0);
+}
+
+/**
+ * qib_6120_bringup_serdes - bring up the serdes
+ * @dd: the qlogic_ib device
+ */
+static int qib_6120_bringup_serdes(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 val, config1, prev_val, hwstat, ibc;
+
+ /* Put IBC in reset, sends disabled */
+ dd->control &= ~QLOGIC_IB_C_LINKENABLE;
+ qib_write_kreg(dd, kr_control, 0ULL);
+
+ dd->cspec->ibdeltainprog = 1;
+ dd->cspec->ibsymsnap = read_6120_creg32(dd, cr_ibsymbolerr);
+ dd->cspec->iblnkerrsnap = read_6120_creg32(dd, cr_iblinkerrrecov);
+
+ /* flowcontrolwatermark is in units of KBytes */
+ ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark);
+ /*
+ * How often flowctrl sent. More or less in usecs; balance against
+ * watermark value, so that in theory senders always get a flow
+ * control update in time to not let the IB link go idle.
+ */
+ ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod);
+ /* max error tolerance */
+ dd->cspec->lli_thresh = 0xf;
+ ibc |= (u64) dd->cspec->lli_thresh << SYM_LSB(IBCCtrl, PhyerrThreshold);
+ /* use "real" buffer space for */
+ ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale);
+ /* IB credit flow control. */
+ ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold);
+ /*
+ * set initial max size pkt IBC will send, including ICRC; it's the
+ * PIO buffer size in dwords, less 1; also see qib_set_mtu()
+ */
+ ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen);
+ dd->cspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */
+
+ /* initially come up waiting for TS1, without sending anything. */
+ val = dd->cspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
+ QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+ qib_write_kreg(dd, kr_ibcctrl, val);
+
+ val = qib_read_kreg64(dd, kr_serdes_cfg0);
+ config1 = qib_read_kreg64(dd, kr_serdes_cfg1);
+
+ /*
+ * Force reset on, also set rxdetect enable. Must do before reading
+ * serdesstatus at least for simulation, or some of the bits in
+ * serdes status will come back as undefined and cause simulation
+ * failures
+ */
+ val |= SYM_MASK(SerdesCfg0, ResetPLL) |
+ SYM_MASK(SerdesCfg0, RxDetEnX) |
+ (SYM_MASK(SerdesCfg0, L1PwrDnA) |
+ SYM_MASK(SerdesCfg0, L1PwrDnB) |
+ SYM_MASK(SerdesCfg0, L1PwrDnC) |
+ SYM_MASK(SerdesCfg0, L1PwrDnD));
+ qib_write_kreg(dd, kr_serdes_cfg0, val);
+ /* be sure chip saw it */
+ qib_read_kreg64(dd, kr_scratch);
+ udelay(5); /* need pll reset set at least for a bit */
+ /*
+ * after PLL is reset, set the per-lane Resets and TxIdle and
+ * clear the PLL reset and rxdetect (to get falling edge).
+ * Leave L1PWR bits set (permanently)
+ */
+ val &= ~(SYM_MASK(SerdesCfg0, RxDetEnX) |
+ SYM_MASK(SerdesCfg0, ResetPLL) |
+ (SYM_MASK(SerdesCfg0, L1PwrDnA) |
+ SYM_MASK(SerdesCfg0, L1PwrDnB) |
+ SYM_MASK(SerdesCfg0, L1PwrDnC) |
+ SYM_MASK(SerdesCfg0, L1PwrDnD)));
+ val |= (SYM_MASK(SerdesCfg0, ResetA) |
+ SYM_MASK(SerdesCfg0, ResetB) |
+ SYM_MASK(SerdesCfg0, ResetC) |
+ SYM_MASK(SerdesCfg0, ResetD)) |
+ SYM_MASK(SerdesCfg0, TxIdeEnX);
+ qib_write_kreg(dd, kr_serdes_cfg0, val);
+ /* be sure chip saw it */
+ (void) qib_read_kreg64(dd, kr_scratch);
+ /* need PLL reset clear for at least 11 usec before lane
+ * resets cleared; give it a few more to be sure */
+ udelay(15);
+ val &= ~((SYM_MASK(SerdesCfg0, ResetA) |
+ SYM_MASK(SerdesCfg0, ResetB) |
+ SYM_MASK(SerdesCfg0, ResetC) |
+ SYM_MASK(SerdesCfg0, ResetD)) |
+ SYM_MASK(SerdesCfg0, TxIdeEnX));
+
+ qib_write_kreg(dd, kr_serdes_cfg0, val);
+ /* be sure chip saw it */
+ (void) qib_read_kreg64(dd, kr_scratch);
+
+ val = qib_read_kreg64(dd, kr_xgxs_cfg);
+ prev_val = val;
+ if (val & QLOGIC_IB_XGXS_RESET)
+ val &= ~QLOGIC_IB_XGXS_RESET;
+ if (SYM_FIELD(val, XGXSCfg, polarity_inv) != ppd->rx_pol_inv) {
+ /* need to compensate for Tx inversion in partner */
+ val &= ~SYM_MASK(XGXSCfg, polarity_inv);
+ val |= (u64)ppd->rx_pol_inv << SYM_LSB(XGXSCfg, polarity_inv);
+ }
+ if (val != prev_val)
+ qib_write_kreg(dd, kr_xgxs_cfg, val);
+
+ val = qib_read_kreg64(dd, kr_serdes_cfg0);
+
+ /* clear current and de-emphasis bits */
+ config1 &= ~0x0ffffffff00ULL;
+ /* set current to 20ma */
+ config1 |= 0x00000000000ULL;
+ /* set de-emphasis to -5.68dB */
+ config1 |= 0x0cccc000000ULL;
+ qib_write_kreg(dd, kr_serdes_cfg1, config1);
+
+ /* base and port guid same for single port */
+ ppd->guid = dd->base_guid;
+
+ /*
+ * the process of setting and un-resetting the serdes normally
+ * causes a serdes PLL error, so check for that and clear it
+ * here. Also clearr hwerr bit in errstatus, but not others.
+ */
+ hwstat = qib_read_kreg64(dd, kr_hwerrstatus);
+ if (hwstat) {
+ /* should just have PLL, clear all set, in an case */
+ if (hwstat & ~QLOGIC_IB_HWE_SERDESPLLFAILED)
+ qib_write_kreg(dd, kr_hwerrclear, hwstat);
+ qib_write_kreg(dd, kr_errclear, ERR_MASK(HardwareErr));
+ }
+
+ dd->control |= QLOGIC_IB_C_LINKENABLE;
+ dd->control &= ~QLOGIC_IB_C_FREEZEMODE;
+ qib_write_kreg(dd, kr_control, dd->control);
+
+ return 0;
+}
+
+/**
+ * qib_6120_quiet_serdes - set serdes to txidle
+ * @ppd: physical port of the qlogic_ib device
+ * Called when driver is being unloaded
+ */
+static void qib_6120_quiet_serdes(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 val;
+
+ qib_set_ib_6120_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+
+ /* disable IBC */
+ dd->control &= ~QLOGIC_IB_C_LINKENABLE;
+ qib_write_kreg(dd, kr_control,
+ dd->control | QLOGIC_IB_C_FREEZEMODE);
+
+ if (dd->cspec->ibsymdelta || dd->cspec->iblnkerrdelta ||
+ dd->cspec->ibdeltainprog) {
+ u64 diagc;
+
+ /* enable counter writes */
+ diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
+ qib_write_kreg(dd, kr_hwdiagctrl,
+ diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
+
+ if (dd->cspec->ibsymdelta || dd->cspec->ibdeltainprog) {
+ val = read_6120_creg32(dd, cr_ibsymbolerr);
+ if (dd->cspec->ibdeltainprog)
+ val -= val - dd->cspec->ibsymsnap;
+ val -= dd->cspec->ibsymdelta;
+ write_6120_creg(dd, cr_ibsymbolerr, val);
+ }
+ if (dd->cspec->iblnkerrdelta || dd->cspec->ibdeltainprog) {
+ val = read_6120_creg32(dd, cr_iblinkerrrecov);
+ if (dd->cspec->ibdeltainprog)
+ val -= val - dd->cspec->iblnkerrsnap;
+ val -= dd->cspec->iblnkerrdelta;
+ write_6120_creg(dd, cr_iblinkerrrecov, val);
+ }
+
+ /* and disable counter writes */
+ qib_write_kreg(dd, kr_hwdiagctrl, diagc);
+ }
+
+ val = qib_read_kreg64(dd, kr_serdes_cfg0);
+ val |= SYM_MASK(SerdesCfg0, TxIdeEnX);
+ qib_write_kreg(dd, kr_serdes_cfg0, val);
+}
+
+/**
+ * qib_6120_setup_setextled - set the state of the two external LEDs
+ * @dd: the qlogic_ib device
+ * @on: whether the link is up or not
+ *
+ * The exact combo of LEDs if on is true is determined by looking
+ * at the ibcstatus.
+
+ * These LEDs indicate the physical and logical state of IB link.
+ * For this chip (at least with recommended board pinouts), LED1
+ * is Yellow (logical state) and LED2 is Green (physical state),
+ *
+ * Note: We try to match the Mellanox HCA LED behavior as best
+ * we can. Green indicates physical link state is OK (something is
+ * plugged in, and we can train).
+ * Amber indicates the link is logically up (ACTIVE).
+ * Mellanox further blinks the amber LED to indicate data packet
+ * activity, but we have no hardware support for that, so it would
+ * require waking up every 10-20 msecs and checking the counters
+ * on the chip, and then turning the LED off if appropriate. That's
+ * visible overhead, so not something we will do.
+ *
+ */
+static void qib_6120_setup_setextled(struct qib_pportdata *ppd, u32 on)
+{
+ u64 extctl, val, lst, ltst;
+ unsigned long flags;
+ struct qib_devdata *dd = ppd->dd;
+
+ /*
+ * The diags use the LED to indicate diag info, so we leave
+ * the external LED alone when the diags are running.
+ */
+ if (dd->diag_client)
+ return;
+
+ /* Allow override of LED display for, e.g. Locating system in rack */
+ if (ppd->led_override) {
+ ltst = (ppd->led_override & QIB_LED_PHYS) ?
+ IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED,
+ lst = (ppd->led_override & QIB_LED_LOG) ?
+ IB_PORT_ACTIVE : IB_PORT_DOWN;
+ } else if (on) {
+ val = qib_read_kreg64(dd, kr_ibcstatus);
+ ltst = qib_6120_phys_portstate(val);
+ lst = qib_6120_iblink_state(val);
+ } else {
+ ltst = 0;
+ lst = 0;
+ }
+
+ spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+ extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) |
+ SYM_MASK(EXTCtrl, LEDPriPortYellowOn));
+
+ if (ltst == IB_PHYSPORTSTATE_LINKUP)
+ extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn);
+ if (lst == IB_PORT_ACTIVE)
+ extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn);
+ dd->cspec->extctrl = extctl;
+ qib_write_kreg(dd, kr_extctrl, extctl);
+ spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+}
+
+static void qib_6120_free_irq(struct qib_devdata *dd)
+{
+ if (dd->cspec->irq) {
+ free_irq(dd->cspec->irq, dd);
+ dd->cspec->irq = 0;
+ }
+ qib_nomsi(dd);
+}
+
+/**
+ * qib_6120_setup_cleanup - clean up any per-chip chip-specific stuff
+ * @dd: the qlogic_ib device
+ *
+ * This is called during driver unload.
+*/
+static void qib_6120_setup_cleanup(struct qib_devdata *dd)
+{
+ qib_6120_free_irq(dd);
+ kfree(dd->cspec->cntrs);
+ kfree(dd->cspec->portcntrs);
+ if (dd->cspec->dummy_hdrq) {
+ dma_free_coherent(&dd->pcidev->dev,
+ ALIGN(dd->rcvhdrcnt *
+ dd->rcvhdrentsize *
+ sizeof(u32), PAGE_SIZE),
+ dd->cspec->dummy_hdrq,
+ dd->cspec->dummy_hdrq_phys);
+ dd->cspec->dummy_hdrq = NULL;
+ }
+}
+
+static void qib_wantpiobuf_6120_intr(struct qib_devdata *dd, u32 needint)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ if (needint)
+ dd->sendctrl |= SYM_MASK(SendCtrl, PIOIntBufAvail);
+ else
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOIntBufAvail);
+ qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+}
+
+/*
+ * handle errors and unusual events first, separate function
+ * to improve cache hits for fast path interrupt handling
+ */
+static noinline void unlikely_6120_intr(struct qib_devdata *dd, u64 istat)
+{
+ if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT))
+ qib_dev_err(dd, "interrupt with unknown interrupts %Lx set\n",
+ istat & ~QLOGIC_IB_I_BITSEXTANT);
+
+ if (istat & QLOGIC_IB_I_ERROR) {
+ u64 estat = 0;
+
+ qib_stats.sps_errints++;
+ estat = qib_read_kreg64(dd, kr_errstatus);
+ if (!estat)
+ qib_devinfo(dd->pcidev, "error interrupt (%Lx), "
+ "but no error bits set!\n", istat);
+ handle_6120_errors(dd, estat);
+ }
+
+ if (istat & QLOGIC_IB_I_GPIO) {
+ u32 gpiostatus;
+ u32 to_clear = 0;
+
+ /*
+ * GPIO_3..5 on IBA6120 Rev2 chips indicate
+ * errors that we need to count.
+ */
+ gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
+ /* First the error-counter case. */
+ if (gpiostatus & GPIO_ERRINTR_MASK) {
+ /* want to clear the bits we see asserted. */
+ to_clear |= (gpiostatus & GPIO_ERRINTR_MASK);
+
+ /*
+ * Count appropriately, clear bits out of our copy,
+ * as they have been "handled".
+ */
+ if (gpiostatus & (1 << GPIO_RXUVL_BIT))
+ dd->cspec->rxfc_unsupvl_errs++;
+ if (gpiostatus & (1 << GPIO_OVRUN_BIT))
+ dd->cspec->overrun_thresh_errs++;
+ if (gpiostatus & (1 << GPIO_LLI_BIT))
+ dd->cspec->lli_errs++;
+ gpiostatus &= ~GPIO_ERRINTR_MASK;
+ }
+ if (gpiostatus) {
+ /*
+ * Some unexpected bits remain. If they could have
+ * caused the interrupt, complain and clear.
+ * To avoid repetition of this condition, also clear
+ * the mask. It is almost certainly due to error.
+ */
+ const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
+
+ /*
+ * Also check that the chip reflects our shadow,
+ * and report issues, If they caused the interrupt.
+ * we will suppress by refreshing from the shadow.
+ */
+ if (mask & gpiostatus) {
+ to_clear |= (gpiostatus & mask);
+ dd->cspec->gpio_mask &= ~(gpiostatus & mask);
+ qib_write_kreg(dd, kr_gpio_mask,
+ dd->cspec->gpio_mask);
+ }
+ }
+ if (to_clear)
+ qib_write_kreg(dd, kr_gpio_clear, (u64) to_clear);
+ }
+}
+
+static irqreturn_t qib_6120intr(int irq, void *data)
+{
+ struct qib_devdata *dd = data;
+ irqreturn_t ret;
+ u32 istat, ctxtrbits, rmask, crcs = 0;
+ unsigned i;
+
+ if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
+ /*
+ * This return value is not great, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ ret = IRQ_HANDLED;
+ goto bail;
+ }
+
+ istat = qib_read_kreg32(dd, kr_intstatus);
+
+ if (unlikely(!istat)) {
+ ret = IRQ_NONE; /* not our interrupt, or already handled */
+ goto bail;
+ }
+ if (unlikely(istat == -1)) {
+ qib_bad_intrstatus(dd);
+ /* don't know if it was our interrupt or not */
+ ret = IRQ_NONE;
+ goto bail;
+ }
+
+ qib_stats.sps_ints++;
+ if (dd->int_counter != (u32) -1)
+ dd->int_counter++;
+
+ if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT |
+ QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR)))
+ unlikely_6120_intr(dd, istat);
+
+ /*
+ * Clear the interrupt bits we found set, relatively early, so we
+ * "know" know the chip will have seen this by the time we process
+ * the queue, and will re-interrupt if necessary. The processor
+ * itself won't take the interrupt again until we return.
+ */
+ qib_write_kreg(dd, kr_intclear, istat);
+
+ /*
+ * Handle kernel receive queues before checking for pio buffers
+ * available since receives can overflow; piobuf waiters can afford
+ * a few extra cycles, since they were waiting anyway.
+ */
+ ctxtrbits = istat &
+ ((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
+ (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT));
+ if (ctxtrbits) {
+ rmask = (1U << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
+ (1U << QLOGIC_IB_I_RCVURG_SHIFT);
+ for (i = 0; i < dd->first_user_ctxt; i++) {
+ if (ctxtrbits & rmask) {
+ ctxtrbits &= ~rmask;
+ crcs += qib_kreceive(dd->rcd[i],
+ &dd->cspec->lli_counter,
+ NULL);
+ }
+ rmask <<= 1;
+ }
+ if (crcs) {
+ u32 cntr = dd->cspec->lli_counter;
+ cntr += crcs;
+ if (cntr) {
+ if (cntr > dd->cspec->lli_thresh) {
+ dd->cspec->lli_counter = 0;
+ dd->cspec->lli_errs++;
+ } else
+ dd->cspec->lli_counter += cntr;
+ }
+ }
+
+
+ if (ctxtrbits) {
+ ctxtrbits =
+ (ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) |
+ (ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT);
+ qib_handle_urcv(dd, ctxtrbits);
+ }
+ }
+
+ if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
+ qib_ib_piobufavail(dd);
+
+ ret = IRQ_HANDLED;
+bail:
+ return ret;
+}
+
+/*
+ * Set up our chip-specific interrupt handler
+ * The interrupt type has already been setup, so
+ * we just need to do the registration and error checking.
+ */
+static void qib_setup_6120_interrupt(struct qib_devdata *dd)
+{
+ /*
+ * If the chip supports added error indication via GPIO pins,
+ * enable interrupts on those bits so the interrupt routine
+ * can count the events. Also set flag so interrupt routine
+ * can know they are expected.
+ */
+ if (SYM_FIELD(dd->revision, Revision_R,
+ ChipRevMinor) > 1) {
+ /* Rev2+ reports extra errors via internal GPIO pins */
+ dd->cspec->gpio_mask |= GPIO_ERRINTR_MASK;
+ qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
+ }
+
+ if (!dd->cspec->irq)
+ qib_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
+ "work\n");
+ else {
+ int ret;
+ ret = request_irq(dd->cspec->irq, qib_6120intr, 0,
+ QIB_DRV_NAME, dd);
+ if (ret)
+ qib_dev_err(dd, "Couldn't setup interrupt "
+ "(irq=%d): %d\n", dd->cspec->irq,
+ ret);
+ }
+}
+
+/**
+ * pe_boardname - fill in the board name
+ * @dd: the qlogic_ib device
+ *
+ * info is based on the board revision register
+ */
+static void pe_boardname(struct qib_devdata *dd)
+{
+ char *n;
+ u32 boardid, namelen;
+
+ boardid = SYM_FIELD(dd->revision, Revision,
+ BoardID);
+
+ switch (boardid) {
+ case 2:
+ n = "InfiniPath_QLE7140";
+ break;
+ default:
+ qib_dev_err(dd, "Unknown 6120 board with ID %u\n", boardid);
+ n = "Unknown_InfiniPath_6120";
+ break;
+ }
+ namelen = strlen(n) + 1;
+ dd->boardname = kmalloc(namelen, GFP_KERNEL);
+ if (!dd->boardname)
+ qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
+ else
+ snprintf(dd->boardname, namelen, "%s", n);
+
+ if (dd->majrev != 4 || !dd->minrev || dd->minrev > 2)
+ qib_dev_err(dd, "Unsupported InfiniPath hardware revision "
+ "%u.%u!\n", dd->majrev, dd->minrev);
+
+ snprintf(dd->boardversion, sizeof(dd->boardversion),
+ "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
+ QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
+ (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
+ dd->majrev, dd->minrev,
+ (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
+
+}
+
+/*
+ * This routine sleeps, so it can only be called from user context, not
+ * from interrupt context. If we need interrupt context, we can split
+ * it into two routines.
+ */
+static int qib_6120_setup_reset(struct qib_devdata *dd)
+{
+ u64 val;
+ int i;
+ int ret;
+ u16 cmdval;
+ u8 int_line, clinesz;
+
+ qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
+
+ /* Use ERROR so it shows up in logs, etc. */
+ qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
+
+ /* no interrupts till re-initted */
+ qib_6120_set_intr_state(dd, 0);
+
+ dd->cspec->ibdeltainprog = 0;
+ dd->cspec->ibsymdelta = 0;
+ dd->cspec->iblnkerrdelta = 0;
+
+ /*
+ * Keep chip from being accessed until we are ready. Use
+ * writeq() directly, to allow the write even though QIB_PRESENT
+ * isnt' set.
+ */
+ dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
+ dd->int_counter = 0; /* so we check interrupts work again */
+ val = dd->control | QLOGIC_IB_C_RESET;
+ writeq(val, &dd->kregbase[kr_control]);
+ mb(); /* prevent compiler re-ordering around actual reset */
+
+ for (i = 1; i <= 5; i++) {
+ /*
+ * Allow MBIST, etc. to complete; longer on each retry.
+ * We sometimes get machine checks from bus timeout if no
+ * response, so for now, make it *really* long.
+ */
+ msleep(1000 + (1 + i) * 2000);
+
+ qib_pcie_reenable(dd, cmdval, int_line, clinesz);
+
+ /*
+ * Use readq directly, so we don't need to mark it as PRESENT
+ * until we get a successful indication that all is well.
+ */
+ val = readq(&dd->kregbase[kr_revision]);
+ if (val == dd->revision) {
+ dd->flags |= QIB_PRESENT; /* it's back */
+ ret = qib_reinit_intr(dd);
+ goto bail;
+ }
+ }
+ ret = 0; /* failed */
+
+bail:
+ if (ret) {
+ if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL))
+ qib_dev_err(dd, "Reset failed to setup PCIe or "
+ "interrupts; continuing anyway\n");
+ /* clear the reset error, init error/hwerror mask */
+ qib_6120_init_hwerrors(dd);
+ /* for Rev2 error interrupts; nop for rev 1 */
+ qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
+ /* clear the reset error, init error/hwerror mask */
+ qib_6120_init_hwerrors(dd);
+ }
+ return ret;
+}
+
+/**
+ * qib_6120_put_tid - write a TID in chip
+ * @dd: the qlogic_ib device
+ * @tidptr: pointer to the expected TID (in chip) to update
+ * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)
+ * for expected
+ * @pa: physical address of in memory buffer; tidinvalid if freeing
+ *
+ * This exists as a separate routine to allow for special locking etc.
+ * It's used for both the full cleanup on exit, as well as the normal
+ * setup and teardown.
+ */
+static void qib_6120_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
+ u32 type, unsigned long pa)
+{
+ u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
+ unsigned long flags;
+ int tidx;
+ spinlock_t *tidlockp; /* select appropriate spinlock */
+
+ if (!dd->kregbase)
+ return;
+
+ if (pa != dd->tidinvalid) {
+ if (pa & ((1U << 11) - 1)) {
+ qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
+ pa);
+ return;
+ }
+ pa >>= 11;
+ if (pa & ~QLOGIC_IB_RT_ADDR_MASK) {
+ qib_dev_err(dd, "Physical page address 0x%lx "
+ "larger than supported\n", pa);
+ return;
+ }
+
+ if (type == RCVHQ_RCV_TYPE_EAGER)
+ pa |= dd->tidtemplate;
+ else /* for now, always full 4KB page */
+ pa |= 2 << 29;
+ }
+
+ /*
+ * Avoid chip issue by writing the scratch register
+ * before and after the TID, and with an io write barrier.
+ * We use a spinlock around the writes, so they can't intermix
+ * with other TID (eager or expected) writes (the chip problem
+ * is triggered by back to back TID writes). Unfortunately, this
+ * call can be done from interrupt level for the ctxt 0 eager TIDs,
+ * so we have to use irqsave locks.
+ */
+ /*
+ * Assumes tidptr always > egrtidbase
+ * if type == RCVHQ_RCV_TYPE_EAGER.
+ */
+ tidx = tidptr - dd->egrtidbase;
+
+ tidlockp = (type == RCVHQ_RCV_TYPE_EAGER && tidx < dd->rcvhdrcnt)
+ ? &dd->cspec->kernel_tid_lock : &dd->cspec->user_tid_lock;
+ spin_lock_irqsave(tidlockp, flags);
+ qib_write_kreg(dd, kr_scratch, 0xfeeddeaf);
+ writel(pa, tidp32);
+ qib_write_kreg(dd, kr_scratch, 0xdeadbeef);
+ mmiowb();
+ spin_unlock_irqrestore(tidlockp, flags);
+}
+
+/**
+ * qib_6120_put_tid_2 - write a TID in chip, Revision 2 or higher
+ * @dd: the qlogic_ib device
+ * @tidptr: pointer to the expected TID (in chip) to update
+ * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)
+ * for expected
+ * @pa: physical address of in memory buffer; tidinvalid if freeing
+ *
+ * This exists as a separate routine to allow for selection of the
+ * appropriate "flavor". The static calls in cleanup just use the
+ * revision-agnostic form, as they are not performance critical.
+ */
+static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr,
+ u32 type, unsigned long pa)
+{
+ u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
+ u32 tidx;
+
+ if (!dd->kregbase)
+ return;
+
+ if (pa != dd->tidinvalid) {
+ if (pa & ((1U << 11) - 1)) {
+ qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
+ pa);
+ return;
+ }
+ pa >>= 11;
+ if (pa & ~QLOGIC_IB_RT_ADDR_MASK) {
+ qib_dev_err(dd, "Physical page address 0x%lx "
+ "larger than supported\n", pa);
+ return;
+ }
+
+ if (type == RCVHQ_RCV_TYPE_EAGER)
+ pa |= dd->tidtemplate;
+ else /* for now, always full 4KB page */
+ pa |= 2 << 29;
+ }
+ tidx = tidptr - dd->egrtidbase;
+ writel(pa, tidp32);
+ mmiowb();
+}
+
+
+/**
+ * qib_6120_clear_tids - clear all TID entries for a context, expected and eager
+ * @dd: the qlogic_ib device
+ * @ctxt: the context
+ *
+ * clear all TID entries for a context, expected and eager.
+ * Used from qib_close(). On this chip, TIDs are only 32 bits,
+ * not 64, but they are still on 64 bit boundaries, so tidbase
+ * is declared as u64 * for the pointer math, even though we write 32 bits
+ */
+static void qib_6120_clear_tids(struct qib_devdata *dd,
+ struct qib_ctxtdata *rcd)
+{
+ u64 __iomem *tidbase;
+ unsigned long tidinv;
+ u32 ctxt;
+ int i;
+
+ if (!dd->kregbase || !rcd)
+ return;
+
+ ctxt = rcd->ctxt;
+
+ tidinv = dd->tidinvalid;
+ tidbase = (u64 __iomem *)
+ ((char __iomem *)(dd->kregbase) +
+ dd->rcvtidbase +
+ ctxt * dd->rcvtidcnt * sizeof(*tidbase));
+
+ for (i = 0; i < dd->rcvtidcnt; i++)
+ /* use func pointer because could be one of two funcs */
+ dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
+ tidinv);
+
+ tidbase = (u64 __iomem *)
+ ((char __iomem *)(dd->kregbase) +
+ dd->rcvegrbase +
+ rcd->rcvegr_tid_base * sizeof(*tidbase));
+
+ for (i = 0; i < rcd->rcvegrcnt; i++)
+ /* use func pointer because could be one of two funcs */
+ dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
+ tidinv);
+}
+
+/**
+ * qib_6120_tidtemplate - setup constants for TID updates
+ * @dd: the qlogic_ib device
+ *
+ * We setup stuff that we use a lot, to avoid calculating each time
+ */
+static void qib_6120_tidtemplate(struct qib_devdata *dd)
+{
+ u32 egrsize = dd->rcvegrbufsize;
+
+ /*
+ * For now, we always allocate 4KB buffers (at init) so we can
+ * receive max size packets. We may want a module parameter to
+ * specify 2KB or 4KB and/or make be per ctxt instead of per device
+ * for those who want to reduce memory footprint. Note that the
+ * rcvhdrentsize size must be large enough to hold the largest
+ * IB header (currently 96 bytes) that we expect to handle (plus of
+ * course the 2 dwords of RHF).
+ */
+ if (egrsize == 2048)
+ dd->tidtemplate = 1U << 29;
+ else if (egrsize == 4096)
+ dd->tidtemplate = 2U << 29;
+ dd->tidinvalid = 0;
+}
+
+int __attribute__((weak)) qib_unordered_wc(void)
+{
+ return 0;
+}
+
+/**
+ * qib_6120_get_base_info - set chip-specific flags for user code
+ * @rcd: the qlogic_ib ctxt
+ * @kbase: qib_base_info pointer
+ *
+ * We set the PCIE flag because the lower bandwidth on PCIe vs
+ * HyperTransport can affect some user packet algorithms.
+ */
+static int qib_6120_get_base_info(struct qib_ctxtdata *rcd,
+ struct qib_base_info *kinfo)
+{
+ if (qib_unordered_wc())
+ kinfo->spi_runtime_flags |= QIB_RUNTIME_FORCE_WC_ORDER;
+
+ kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE |
+ QIB_RUNTIME_FORCE_PIOAVAIL | QIB_RUNTIME_PIO_REGSWAPPED;
+ return 0;
+}
+
+
+static struct qib_message_header *
+qib_6120_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
+{
+ return (struct qib_message_header *)
+ &rhf_addr[sizeof(u64) / sizeof(u32)];
+}
+
+static void qib_6120_config_ctxts(struct qib_devdata *dd)
+{
+ dd->ctxtcnt = qib_read_kreg32(dd, kr_portcnt);
+ if (qib_n_krcv_queues > 1) {
+ dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
+ if (dd->first_user_ctxt > dd->ctxtcnt)
+ dd->first_user_ctxt = dd->ctxtcnt;
+ dd->qpn_mask = dd->first_user_ctxt <= 2 ? 2 : 6;
+ } else
+ dd->first_user_ctxt = dd->num_pports;
+ dd->n_krcv_queues = dd->first_user_ctxt;
+}
+
+static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
+ u32 updegr, u32 egrhd)
+{
+ qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ if (updegr)
+ qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
+}
+
+static u32 qib_6120_hdrqempty(struct qib_ctxtdata *rcd)
+{
+ u32 head, tail;
+
+ head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
+ if (rcd->rcvhdrtail_kvaddr)
+ tail = qib_get_rcvhdrtail(rcd);
+ else
+ tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
+ return head == tail;
+}
+
+/*
+ * Used when we close any ctxt, for DMA already in flight
+ * at close. Can't be done until we know hdrq size, so not
+ * early in chip init.
+ */
+static void alloc_dummy_hdrq(struct qib_devdata *dd)
+{
+ dd->cspec->dummy_hdrq = dma_alloc_coherent(&dd->pcidev->dev,
+ dd->rcd[0]->rcvhdrq_size,
+ &dd->cspec->dummy_hdrq_phys,
+ GFP_KERNEL | __GFP_COMP);
+ if (!dd->cspec->dummy_hdrq) {
+ qib_devinfo(dd->pcidev, "Couldn't allocate dummy hdrq\n");
+ /* fallback to just 0'ing */
+ dd->cspec->dummy_hdrq_phys = 0UL;
+ }
+}
+
+/*
+ * Modify the RCVCTRL register in chip-specific way. This
+ * is a function because bit positions and (future) register
+ * location is chip-specific, but the needed operations are
+ * generic. <op> is a bit-mask because we often want to
+ * do multiple modifications.
+ */
+static void rcvctrl_6120_mod(struct qib_pportdata *ppd, unsigned int op,
+ int ctxt)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 mask, val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
+
+ if (op & QIB_RCVCTRL_TAILUPD_ENB)
+ dd->rcvctrl |= (1ULL << QLOGIC_IB_R_TAILUPD_SHIFT);
+ if (op & QIB_RCVCTRL_TAILUPD_DIS)
+ dd->rcvctrl &= ~(1ULL << QLOGIC_IB_R_TAILUPD_SHIFT);
+ if (op & QIB_RCVCTRL_PKEY_ENB)
+ dd->rcvctrl &= ~(1ULL << IBA6120_R_PKEY_DIS_SHIFT);
+ if (op & QIB_RCVCTRL_PKEY_DIS)
+ dd->rcvctrl |= (1ULL << IBA6120_R_PKEY_DIS_SHIFT);
+ if (ctxt < 0)
+ mask = (1ULL << dd->ctxtcnt) - 1;
+ else
+ mask = (1ULL << ctxt);
+ if (op & QIB_RCVCTRL_CTXT_ENB) {
+ /* always done for specific ctxt */
+ dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable));
+ if (!(dd->flags & QIB_NODMA_RTAIL))
+ dd->rcvctrl |= 1ULL << QLOGIC_IB_R_TAILUPD_SHIFT;
+ /* Write these registers before the context is enabled. */
+ qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
+ dd->rcd[ctxt]->rcvhdrqtailaddr_phys);
+ qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
+ dd->rcd[ctxt]->rcvhdrq_phys);
+
+ if (ctxt == 0 && !dd->cspec->dummy_hdrq)
+ alloc_dummy_hdrq(dd);
+ }
+ if (op & QIB_RCVCTRL_CTXT_DIS)
+ dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable));
+ if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
+ dd->rcvctrl |= (mask << QLOGIC_IB_R_INTRAVAIL_SHIFT);
+ if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
+ dd->rcvctrl &= ~(mask << QLOGIC_IB_R_INTRAVAIL_SHIFT);
+ qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
+ if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) {
+ /* arm rcv interrupt */
+ val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) |
+ dd->rhdrhead_intr_off;
+ qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
+ }
+ if (op & QIB_RCVCTRL_CTXT_ENB) {
+ /*
+ * Init the context registers also; if we were
+ * disabled, tail and head should both be zero
+ * already from the enable, but since we don't
+ * know, we have to do it explictly.
+ */
+ val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
+ qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
+
+ val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
+ dd->rcd[ctxt]->head = val;
+ /* If kctxt, interrupt on next receive. */
+ if (ctxt < dd->first_user_ctxt)
+ val |= dd->rhdrhead_intr_off;
+ qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
+ }
+ if (op & QIB_RCVCTRL_CTXT_DIS) {
+ /*
+ * Be paranoid, and never write 0's to these, just use an
+ * unused page. Of course,
+ * rcvhdraddr points to a large chunk of memory, so this
+ * could still trash things, but at least it won't trash
+ * page 0, and by disabling the ctxt, it should stop "soon",
+ * even if a packet or two is in already in flight after we
+ * disabled the ctxt. Only 6120 has this issue.
+ */
+ if (ctxt >= 0) {
+ qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
+ dd->cspec->dummy_hdrq_phys);
+ qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
+ dd->cspec->dummy_hdrq_phys);
+ } else {
+ unsigned i;
+
+ for (i = 0; i < dd->cfgctxts; i++) {
+ qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr,
+ i, dd->cspec->dummy_hdrq_phys);
+ qib_write_kreg_ctxt(dd, kr_rcvhdraddr,
+ i, dd->cspec->dummy_hdrq_phys);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+}
+
+/*
+ * Modify the SENDCTRL register in chip-specific way. This
+ * is a function there may be multiple such registers with
+ * slightly different layouts. Only operations actually used
+ * are implemented yet.
+ * Chip requires no back-back sendctrl writes, so write
+ * scratch register after writing sendctrl
+ */
+static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 tmp_dd_sendctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+
+ /* First the ones that are "sticky", saved in shadow */
+ if (op & QIB_SENDCTRL_CLEAR)
+ dd->sendctrl = 0;
+ if (op & QIB_SENDCTRL_SEND_DIS)
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOEnable);
+ else if (op & QIB_SENDCTRL_SEND_ENB)
+ dd->sendctrl |= SYM_MASK(SendCtrl, PIOEnable);
+ if (op & QIB_SENDCTRL_AVAIL_DIS)
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd);
+ else if (op & QIB_SENDCTRL_AVAIL_ENB)
+ dd->sendctrl |= SYM_MASK(SendCtrl, PIOBufAvailUpd);
+
+ if (op & QIB_SENDCTRL_DISARM_ALL) {
+ u32 i, last;
+
+ tmp_dd_sendctrl = dd->sendctrl;
+ /*
+ * disarm any that are not yet launched, disabling sends
+ * and updates until done.
+ */
+ last = dd->piobcnt2k + dd->piobcnt4k;
+ tmp_dd_sendctrl &=
+ ~(SYM_MASK(SendCtrl, PIOEnable) |
+ SYM_MASK(SendCtrl, PIOBufAvailUpd));
+ for (i = 0; i < last; i++) {
+ qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl |
+ SYM_MASK(SendCtrl, Disarm) | i);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+ }
+
+ tmp_dd_sendctrl = dd->sendctrl;
+
+ if (op & QIB_SENDCTRL_FLUSH)
+ tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort);
+ if (op & QIB_SENDCTRL_DISARM)
+ tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
+ ((op & QIB_6120_SendCtrl_DisarmPIOBuf_RMASK) <<
+ SYM_LSB(SendCtrl, DisarmPIOBuf));
+ if (op & QIB_SENDCTRL_AVAIL_BLIP)
+ tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd);
+
+ qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+
+ if (op & QIB_SENDCTRL_AVAIL_BLIP) {
+ qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+
+ if (op & QIB_SENDCTRL_FLUSH) {
+ u32 v;
+ /*
+ * ensure writes have hit chip, then do a few
+ * more reads, to allow DMA of pioavail registers
+ * to occur, so in-memory copy is in sync with
+ * the chip. Not always safe to sleep.
+ */
+ v = qib_read_kreg32(dd, kr_scratch);
+ qib_write_kreg(dd, kr_scratch, v);
+ v = qib_read_kreg32(dd, kr_scratch);
+ qib_write_kreg(dd, kr_scratch, v);
+ qib_read_kreg32(dd, kr_scratch);
+ }
+}
+
+/**
+ * qib_portcntr_6120 - read a per-port counter
+ * @dd: the qlogic_ib device
+ * @creg: the counter to snapshot
+ */
+static u64 qib_portcntr_6120(struct qib_pportdata *ppd, u32 reg)
+{
+ u64 ret = 0ULL;
+ struct qib_devdata *dd = ppd->dd;
+ u16 creg;
+ /* 0xffff for unimplemented or synthesized counters */
+ static const u16 xlator[] = {
+ [QIBPORTCNTR_PKTSEND] = cr_pktsend,
+ [QIBPORTCNTR_WORDSEND] = cr_wordsend,
+ [QIBPORTCNTR_PSXMITDATA] = 0xffff,
+ [QIBPORTCNTR_PSXMITPKTS] = 0xffff,
+ [QIBPORTCNTR_PSXMITWAIT] = 0xffff,
+ [QIBPORTCNTR_SENDSTALL] = cr_sendstall,
+ [QIBPORTCNTR_PKTRCV] = cr_pktrcv,
+ [QIBPORTCNTR_PSRCVDATA] = 0xffff,
+ [QIBPORTCNTR_PSRCVPKTS] = 0xffff,
+ [QIBPORTCNTR_RCVEBP] = cr_rcvebp,
+ [QIBPORTCNTR_RCVOVFL] = cr_rcvovfl,
+ [QIBPORTCNTR_WORDRCV] = cr_wordrcv,
+ [QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt,
+ [QIBPORTCNTR_RXLOCALPHYERR] = 0xffff,
+ [QIBPORTCNTR_RXVLERR] = 0xffff,
+ [QIBPORTCNTR_ERRICRC] = cr_erricrc,
+ [QIBPORTCNTR_ERRVCRC] = cr_errvcrc,
+ [QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc,
+ [QIBPORTCNTR_BADFORMAT] = cr_badformat,
+ [QIBPORTCNTR_ERR_RLEN] = cr_err_rlen,
+ [QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr,
+ [QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen,
+ [QIBPORTCNTR_UNSUPVL] = cr_txunsupvl,
+ [QIBPORTCNTR_EXCESSBUFOVFL] = 0xffff,
+ [QIBPORTCNTR_ERRLINK] = cr_errlink,
+ [QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown,
+ [QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov,
+ [QIBPORTCNTR_LLI] = 0xffff,
+ [QIBPORTCNTR_PSINTERVAL] = 0xffff,
+ [QIBPORTCNTR_PSSTART] = 0xffff,
+ [QIBPORTCNTR_PSSTAT] = 0xffff,
+ [QIBPORTCNTR_VL15PKTDROP] = 0xffff,
+ [QIBPORTCNTR_ERRPKEY] = cr_errpkey,
+ [QIBPORTCNTR_KHDROVFL] = 0xffff,
+ };
+
+ if (reg >= ARRAY_SIZE(xlator)) {
+ qib_devinfo(ppd->dd->pcidev,
+ "Unimplemented portcounter %u\n", reg);
+ goto done;
+ }
+ creg = xlator[reg];
+
+ /* handle counters requests not implemented as chip counters */
+ if (reg == QIBPORTCNTR_LLI)
+ ret = dd->cspec->lli_errs;
+ else if (reg == QIBPORTCNTR_EXCESSBUFOVFL)
+ ret = dd->cspec->overrun_thresh_errs;
+ else if (reg == QIBPORTCNTR_KHDROVFL) {
+ int i;
+
+ /* sum over all kernel contexts */
+ for (i = 0; i < dd->first_user_ctxt; i++)
+ ret += read_6120_creg32(dd, cr_portovfl + i);
+ } else if (reg == QIBPORTCNTR_PSSTAT)
+ ret = dd->cspec->pma_sample_status;
+ if (creg == 0xffff)
+ goto done;
+
+ /*
+ * only fast incrementing counters are 64bit; use 32 bit reads to
+ * avoid two independent reads when on opteron
+ */
+ if (creg == cr_wordsend || creg == cr_wordrcv ||
+ creg == cr_pktsend || creg == cr_pktrcv)
+ ret = read_6120_creg(dd, creg);
+ else
+ ret = read_6120_creg32(dd, creg);
+ if (creg == cr_ibsymbolerr) {
+ if (dd->cspec->ibdeltainprog)
+ ret -= ret - dd->cspec->ibsymsnap;
+ ret -= dd->cspec->ibsymdelta;
+ } else if (creg == cr_iblinkerrrecov) {
+ if (dd->cspec->ibdeltainprog)
+ ret -= ret - dd->cspec->iblnkerrsnap;
+ ret -= dd->cspec->iblnkerrdelta;
+ }
+ if (reg == QIBPORTCNTR_RXDROPPKT) /* add special cased count */
+ ret += dd->cspec->rxfc_unsupvl_errs;
+
+done:
+ return ret;
+}
+
+/*
+ * Device counter names (not port-specific), one line per stat,
+ * single string. Used by utilities like ipathstats to print the stats
+ * in a way which works for different versions of drivers, without changing
+ * the utility. Names need to be 12 chars or less (w/o newline), for proper
+ * display by utility.
+ * Non-error counters are first.
+ * Start of "error" conters is indicated by a leading "E " on the first
+ * "error" counter, and doesn't count in label length.
+ * The EgrOvfl list needs to be last so we truncate them at the configured
+ * context count for the device.
+ * cntr6120indices contains the corresponding register indices.
+ */
+static const char cntr6120names[] =
+ "Interrupts\n"
+ "HostBusStall\n"
+ "E RxTIDFull\n"
+ "RxTIDInvalid\n"
+ "Ctxt0EgrOvfl\n"
+ "Ctxt1EgrOvfl\n"
+ "Ctxt2EgrOvfl\n"
+ "Ctxt3EgrOvfl\n"
+ "Ctxt4EgrOvfl\n";
+
+static const size_t cntr6120indices[] = {
+ cr_lbint,
+ cr_lbflowstall,
+ cr_errtidfull,
+ cr_errtidvalid,
+ cr_portovfl + 0,
+ cr_portovfl + 1,
+ cr_portovfl + 2,
+ cr_portovfl + 3,
+ cr_portovfl + 4,
+};
+
+/*
+ * same as cntr6120names and cntr6120indices, but for port-specific counters.
+ * portcntr6120indices is somewhat complicated by some registers needing
+ * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
+ */
+static const char portcntr6120names[] =
+ "TxPkt\n"
+ "TxFlowPkt\n"
+ "TxWords\n"
+ "RxPkt\n"
+ "RxFlowPkt\n"
+ "RxWords\n"
+ "TxFlowStall\n"
+ "E IBStatusChng\n"
+ "IBLinkDown\n"
+ "IBLnkRecov\n"
+ "IBRxLinkErr\n"
+ "IBSymbolErr\n"
+ "RxLLIErr\n"
+ "RxBadFormat\n"
+ "RxBadLen\n"
+ "RxBufOvrfl\n"
+ "RxEBP\n"
+ "RxFlowCtlErr\n"
+ "RxICRCerr\n"
+ "RxLPCRCerr\n"
+ "RxVCRCerr\n"
+ "RxInvalLen\n"
+ "RxInvalPKey\n"
+ "RxPktDropped\n"
+ "TxBadLength\n"
+ "TxDropped\n"
+ "TxInvalLen\n"
+ "TxUnderrun\n"
+ "TxUnsupVL\n"
+ ;
+
+#define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */
+static const size_t portcntr6120indices[] = {
+ QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
+ cr_pktsendflow,
+ QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
+ cr_pktrcvflowctrl,
+ QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
+ cr_ibstatuschange,
+ QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
+ cr_rcvflowctrl_err,
+ QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
+ cr_invalidslen,
+ cr_senddropped,
+ cr_errslen,
+ cr_sendunderrun,
+ cr_txunsupvl,
+};
+
+/* do all the setup to make the counter reads efficient later */
+static void init_6120_cntrnames(struct qib_devdata *dd)
+{
+ int i, j = 0;
+ char *s;
+
+ for (i = 0, s = (char *)cntr6120names; s && j <= dd->cfgctxts;
+ i++) {
+ /* we always have at least one counter before the egrovfl */
+ if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
+ j = 1;
+ s = strchr(s + 1, '\n');
+ if (s && j)
+ j++;
+ }
+ dd->cspec->ncntrs = i;
+ if (!s)
+ /* full list; size is without terminating null */
+ dd->cspec->cntrnamelen = sizeof(cntr6120names) - 1;
+ else
+ dd->cspec->cntrnamelen = 1 + s - cntr6120names;
+ dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
+ * sizeof(u64), GFP_KERNEL);
+ if (!dd->cspec->cntrs)
+ qib_dev_err(dd, "Failed allocation for counters\n");
+
+ for (i = 0, s = (char *)portcntr6120names; s; i++)
+ s = strchr(s + 1, '\n');
+ dd->cspec->nportcntrs = i - 1;
+ dd->cspec->portcntrnamelen = sizeof(portcntr6120names) - 1;
+ dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs
+ * sizeof(u64), GFP_KERNEL);
+ if (!dd->cspec->portcntrs)
+ qib_dev_err(dd, "Failed allocation for portcounters\n");
+}
+
+static u32 qib_read_6120cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
+ u64 **cntrp)
+{
+ u32 ret;
+
+ if (namep) {
+ ret = dd->cspec->cntrnamelen;
+ if (pos >= ret)
+ ret = 0; /* final read after getting everything */
+ else
+ *namep = (char *)cntr6120names;
+ } else {
+ u64 *cntr = dd->cspec->cntrs;
+ int i;
+
+ ret = dd->cspec->ncntrs * sizeof(u64);
+ if (!cntr || pos >= ret) {
+ /* everything read, or couldn't get memory */
+ ret = 0;
+ goto done;
+ }
+ if (pos >= ret) {
+ ret = 0; /* final read after getting everything */
+ goto done;
+ }
+ *cntrp = cntr;
+ for (i = 0; i < dd->cspec->ncntrs; i++)
+ *cntr++ = read_6120_creg32(dd, cntr6120indices[i]);
+ }
+done:
+ return ret;
+}
+
+static u32 qib_read_6120portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
+ char **namep, u64 **cntrp)
+{
+ u32 ret;
+
+ if (namep) {
+ ret = dd->cspec->portcntrnamelen;
+ if (pos >= ret)
+ ret = 0; /* final read after getting everything */
+ else
+ *namep = (char *)portcntr6120names;
+ } else {
+ u64 *cntr = dd->cspec->portcntrs;
+ struct qib_pportdata *ppd = &dd->pport[port];
+ int i;
+
+ ret = dd->cspec->nportcntrs * sizeof(u64);
+ if (!cntr || pos >= ret) {
+ /* everything read, or couldn't get memory */
+ ret = 0;
+ goto done;
+ }
+ *cntrp = cntr;
+ for (i = 0; i < dd->cspec->nportcntrs; i++) {
+ if (portcntr6120indices[i] & _PORT_VIRT_FLAG)
+ *cntr++ = qib_portcntr_6120(ppd,
+ portcntr6120indices[i] &
+ ~_PORT_VIRT_FLAG);
+ else
+ *cntr++ = read_6120_creg32(dd,
+ portcntr6120indices[i]);
+ }
+ }
+done:
+ return ret;
+}
+
+static void qib_chk_6120_errormask(struct qib_devdata *dd)
+{
+ static u32 fixed;
+ u32 ctrl;
+ unsigned long errormask;
+ unsigned long hwerrs;
+
+ if (!dd->cspec->errormask || !(dd->flags & QIB_INITTED))
+ return;
+
+ errormask = qib_read_kreg64(dd, kr_errmask);
+
+ if (errormask == dd->cspec->errormask)
+ return;
+ fixed++;
+
+ hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
+ ctrl = qib_read_kreg32(dd, kr_control);
+
+ qib_write_kreg(dd, kr_errmask,
+ dd->cspec->errormask);
+
+ if ((hwerrs & dd->cspec->hwerrmask) ||
+ (ctrl & QLOGIC_IB_C_FREEZEMODE)) {
+ qib_write_kreg(dd, kr_hwerrclear, 0ULL);
+ qib_write_kreg(dd, kr_errclear, 0ULL);
+ /* force re-interrupt of pending events, just in case */
+ qib_write_kreg(dd, kr_intclear, 0ULL);
+ qib_devinfo(dd->pcidev,
+ "errormask fixed(%u) %lx->%lx, ctrl %x hwerr %lx\n",
+ fixed, errormask, (unsigned long)dd->cspec->errormask,
+ ctrl, hwerrs);
+ }
+}
+
+/**
+ * qib_get_faststats - get word counters from chip before they overflow
+ * @opaque - contains a pointer to the qlogic_ib device qib_devdata
+ *
+ * This needs more work; in particular, decision on whether we really
+ * need traffic_wds done the way it is
+ * called from add_timer
+ */
+static void qib_get_6120_faststats(unsigned long opaque)
+{
+ struct qib_devdata *dd = (struct qib_devdata *) opaque;
+ struct qib_pportdata *ppd = dd->pport;
+ unsigned long flags;
+ u64 traffic_wds;
+
+ /*
+ * don't access the chip while running diags, or memory diags can
+ * fail
+ */
+ if (!(dd->flags & QIB_INITTED) || dd->diag_client)
+ /* but re-arm the timer, for diags case; won't hurt other */
+ goto done;
+
+ /*
+ * We now try to maintain an activity timer, based on traffic
+ * exceeding a threshold, so we need to check the word-counts
+ * even if they are 64-bit.
+ */
+ traffic_wds = qib_portcntr_6120(ppd, cr_wordsend) +
+ qib_portcntr_6120(ppd, cr_wordrcv);
+ spin_lock_irqsave(&dd->eep_st_lock, flags);
+ traffic_wds -= dd->traffic_wds;
+ dd->traffic_wds += traffic_wds;
+ if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
+ atomic_add(5, &dd->active_time); /* S/B #define */
+ spin_unlock_irqrestore(&dd->eep_st_lock, flags);
+
+ qib_chk_6120_errormask(dd);
+done:
+ mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
+}
+
+/* no interrupt fallback for these chips */
+static int qib_6120_nointr_fallback(struct qib_devdata *dd)
+{
+ return 0;
+}
+
+/*
+ * reset the XGXS (between serdes and IBC). Slightly less intrusive
+ * than resetting the IBC or external link state, and useful in some
+ * cases to cause some retraining. To do this right, we reset IBC
+ * as well.
+ */
+static void qib_6120_xgxs_reset(struct qib_pportdata *ppd)
+{
+ u64 val, prev_val;
+ struct qib_devdata *dd = ppd->dd;
+
+ prev_val = qib_read_kreg64(dd, kr_xgxs_cfg);
+ val = prev_val | QLOGIC_IB_XGXS_RESET;
+ prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */
+ qib_write_kreg(dd, kr_control,
+ dd->control & ~QLOGIC_IB_C_LINKENABLE);
+ qib_write_kreg(dd, kr_xgxs_cfg, val);
+ qib_read_kreg32(dd, kr_scratch);
+ qib_write_kreg(dd, kr_xgxs_cfg, prev_val);
+ qib_write_kreg(dd, kr_control, dd->control);
+}
+
+static int qib_6120_get_ib_cfg(struct qib_pportdata *ppd, int which)
+{
+ int ret;
+
+ switch (which) {
+ case QIB_IB_CFG_LWID:
+ ret = ppd->link_width_active;
+ break;
+
+ case QIB_IB_CFG_SPD:
+ ret = ppd->link_speed_active;
+ break;
+
+ case QIB_IB_CFG_LWID_ENB:
+ ret = ppd->link_width_enabled;
+ break;
+
+ case QIB_IB_CFG_SPD_ENB:
+ ret = ppd->link_speed_enabled;
+ break;
+
+ case QIB_IB_CFG_OP_VLS:
+ ret = ppd->vls_operational;
+ break;
+
+ case QIB_IB_CFG_VL_HIGH_CAP:
+ ret = 0;
+ break;
+
+ case QIB_IB_CFG_VL_LOW_CAP:
+ ret = 0;
+ break;
+
+ case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
+ ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl,
+ OverrunThreshold);
+ break;
+
+ case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
+ ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl,
+ PhyerrThreshold);
+ break;
+
+ case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
+ /* will only take effect when the link state changes */
+ ret = (ppd->dd->cspec->ibcctrl &
+ SYM_MASK(IBCCtrl, LinkDownDefaultState)) ?
+ IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
+ break;
+
+ case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
+ ret = 0; /* no heartbeat on this chip */
+ break;
+
+ case QIB_IB_CFG_PMA_TICKS:
+ ret = 250; /* 1 usec. */
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+/*
+ * We assume range checking is already done, if needed.
+ */
+static int qib_6120_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int ret = 0;
+ u64 val64;
+ u16 lcmd, licmd;
+
+ switch (which) {
+ case QIB_IB_CFG_LWID_ENB:
+ ppd->link_width_enabled = val;
+ break;
+
+ case QIB_IB_CFG_SPD_ENB:
+ ppd->link_speed_enabled = val;
+ break;
+
+ case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
+ val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl,
+ OverrunThreshold);
+ if (val64 != val) {
+ dd->cspec->ibcctrl &=
+ ~SYM_MASK(IBCCtrl, OverrunThreshold);
+ dd->cspec->ibcctrl |= (u64) val <<
+ SYM_LSB(IBCCtrl, OverrunThreshold);
+ qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+ break;
+
+ case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
+ val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl,
+ PhyerrThreshold);
+ if (val64 != val) {
+ dd->cspec->ibcctrl &=
+ ~SYM_MASK(IBCCtrl, PhyerrThreshold);
+ dd->cspec->ibcctrl |= (u64) val <<
+ SYM_LSB(IBCCtrl, PhyerrThreshold);
+ qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+ break;
+
+ case QIB_IB_CFG_PKEYS: /* update pkeys */
+ val64 = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
+ ((u64) ppd->pkeys[2] << 32) |
+ ((u64) ppd->pkeys[3] << 48);
+ qib_write_kreg(dd, kr_partitionkey, val64);
+ break;
+
+ case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
+ /* will only take effect when the link state changes */
+ if (val == IB_LINKINITCMD_POLL)
+ dd->cspec->ibcctrl &=
+ ~SYM_MASK(IBCCtrl, LinkDownDefaultState);
+ else /* SLEEP */
+ dd->cspec->ibcctrl |=
+ SYM_MASK(IBCCtrl, LinkDownDefaultState);
+ qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ break;
+
+ case QIB_IB_CFG_MTU: /* update the MTU in IBC */
+ /*
+ * Update our housekeeping variables, and set IBC max
+ * size, same as init code; max IBC is max we allow in
+ * buffer, less the qword pbc, plus 1 for ICRC, in dwords
+ * Set even if it's unchanged, print debug message only
+ * on changes.
+ */
+ val = (ppd->ibmaxlen >> 2) + 1;
+ dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen);
+ dd->cspec->ibcctrl |= (u64)val <<
+ SYM_LSB(IBCCtrl, MaxPktLen);
+ qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ break;
+
+ case QIB_IB_CFG_LSTATE: /* set the IB link state */
+ switch (val & 0xffff0000) {
+ case IB_LINKCMD_DOWN:
+ lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
+ if (!dd->cspec->ibdeltainprog) {
+ dd->cspec->ibdeltainprog = 1;
+ dd->cspec->ibsymsnap =
+ read_6120_creg32(dd, cr_ibsymbolerr);
+ dd->cspec->iblnkerrsnap =
+ read_6120_creg32(dd, cr_iblinkerrrecov);
+ }
+ break;
+
+ case IB_LINKCMD_ARMED:
+ lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
+ break;
+
+ case IB_LINKCMD_ACTIVE:
+ lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
+ break;
+
+ default:
+ ret = -EINVAL;
+ qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
+ goto bail;
+ }
+ switch (val & 0xffff) {
+ case IB_LINKINITCMD_NOP:
+ licmd = 0;
+ break;
+
+ case IB_LINKINITCMD_POLL:
+ licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
+ break;
+
+ case IB_LINKINITCMD_SLEEP:
+ licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
+ break;
+
+ case IB_LINKINITCMD_DISABLE:
+ licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
+ break;
+
+ default:
+ ret = -EINVAL;
+ qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
+ val & 0xffff);
+ goto bail;
+ }
+ qib_set_ib_6120_lstate(ppd, lcmd, licmd);
+ goto bail;
+
+ case QIB_IB_CFG_HRTBT:
+ ret = -EINVAL;
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+bail:
+ return ret;
+}
+
+static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what)
+{
+ int ret = 0;
+ if (!strncmp(what, "ibc", 3)) {
+ ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
+ qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
+ ppd->dd->unit, ppd->port);
+ } else if (!strncmp(what, "off", 3)) {
+ ppd->dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
+ qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
+ "(normal)\n", ppd->dd->unit, ppd->port);
+ } else
+ ret = -EINVAL;
+ if (!ret) {
+ qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->dd->cspec->ibcctrl);
+ qib_write_kreg(ppd->dd, kr_scratch, 0);
+ }
+ return ret;
+}
+
+static void pma_6120_timer(unsigned long data)
+{
+ struct qib_pportdata *ppd = (struct qib_pportdata *)data;
+ struct qib_chip_specific *cs = ppd->dd->cspec;
+ struct qib_ibport *ibp = &ppd->ibport_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ibp->lock, flags);
+ if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED) {
+ cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
+ qib_snapshot_counters(ppd, &cs->sword, &cs->rword,
+ &cs->spkts, &cs->rpkts, &cs->xmit_wait);
+ mod_timer(&cs->pma_timer,
+ jiffies + usecs_to_jiffies(ibp->pma_sample_interval));
+ } else if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
+ u64 ta, tb, tc, td, te;
+
+ cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
+ qib_snapshot_counters(ppd, &ta, &tb, &tc, &td, &te);
+
+ cs->sword = ta - cs->sword;
+ cs->rword = tb - cs->rword;
+ cs->spkts = tc - cs->spkts;
+ cs->rpkts = td - cs->rpkts;
+ cs->xmit_wait = te - cs->xmit_wait;
+ }
+ spin_unlock_irqrestore(&ibp->lock, flags);
+}
+
+/*
+ * Note that the caller has the ibp->lock held.
+ */
+static void qib_set_cntr_6120_sample(struct qib_pportdata *ppd, u32 intv,
+ u32 start)
+{
+ struct qib_chip_specific *cs = ppd->dd->cspec;
+
+ if (start && intv) {
+ cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED;
+ mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(start));
+ } else if (intv) {
+ cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
+ qib_snapshot_counters(ppd, &cs->sword, &cs->rword,
+ &cs->spkts, &cs->rpkts, &cs->xmit_wait);
+ mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(intv));
+ } else {
+ cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
+ cs->sword = 0;
+ cs->rword = 0;
+ cs->spkts = 0;
+ cs->rpkts = 0;
+ cs->xmit_wait = 0;
+ }
+}
+
+static u32 qib_6120_iblink_state(u64 ibcs)
+{
+ u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState);
+
+ switch (state) {
+ case IB_6120_L_STATE_INIT:
+ state = IB_PORT_INIT;
+ break;
+ case IB_6120_L_STATE_ARM:
+ state = IB_PORT_ARMED;
+ break;
+ case IB_6120_L_STATE_ACTIVE:
+ /* fall through */
+ case IB_6120_L_STATE_ACT_DEFER:
+ state = IB_PORT_ACTIVE;
+ break;
+ default: /* fall through */
+ case IB_6120_L_STATE_DOWN:
+ state = IB_PORT_DOWN;
+ break;
+ }
+ return state;
+}
+
+/* returns the IBTA port state, rather than the IBC link training state */
+static u8 qib_6120_phys_portstate(u64 ibcs)
+{
+ u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState);
+ return qib_6120_physportstate[state];
+}
+
+static int qib_6120_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+
+ if (ibup) {
+ if (ppd->dd->cspec->ibdeltainprog) {
+ ppd->dd->cspec->ibdeltainprog = 0;
+ ppd->dd->cspec->ibsymdelta +=
+ read_6120_creg32(ppd->dd, cr_ibsymbolerr) -
+ ppd->dd->cspec->ibsymsnap;
+ ppd->dd->cspec->iblnkerrdelta +=
+ read_6120_creg32(ppd->dd, cr_iblinkerrrecov) -
+ ppd->dd->cspec->iblnkerrsnap;
+ }
+ qib_hol_init(ppd);
+ } else {
+ ppd->dd->cspec->lli_counter = 0;
+ if (!ppd->dd->cspec->ibdeltainprog) {
+ ppd->dd->cspec->ibdeltainprog = 1;
+ ppd->dd->cspec->ibsymsnap =
+ read_6120_creg32(ppd->dd, cr_ibsymbolerr);
+ ppd->dd->cspec->iblnkerrsnap =
+ read_6120_creg32(ppd->dd, cr_iblinkerrrecov);
+ }
+ qib_hol_down(ppd);
+ }
+
+ qib_6120_setup_setextled(ppd, ibup);
+
+ return 0;
+}
+
+/* Does read/modify/write to appropriate registers to
+ * set output and direction bits selected by mask.
+ * these are in their canonical postions (e.g. lsb of
+ * dir will end up in D48 of extctrl on existing chips).
+ * returns contents of GP Inputs.
+ */
+static int gpio_6120_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
+{
+ u64 read_val, new_out;
+ unsigned long flags;
+
+ if (mask) {
+ /* some bits being written, lock access to GPIO */
+ dir &= mask;
+ out &= mask;
+ spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+ dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
+ dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
+ new_out = (dd->cspec->gpio_out & ~mask) | out;
+
+ qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
+ qib_write_kreg(dd, kr_gpio_out, new_out);
+ dd->cspec->gpio_out = new_out;
+ spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+ }
+ /*
+ * It is unlikely that a read at this time would get valid
+ * data on a pin whose direction line was set in the same
+ * call to this function. We include the read here because
+ * that allows us to potentially combine a change on one pin with
+ * a read on another, and because the old code did something like
+ * this.
+ */
+ read_val = qib_read_kreg64(dd, kr_extstatus);
+ return SYM_FIELD(read_val, EXTStatus, GPIOIn);
+}
+
+/*
+ * Read fundamental info we need to use the chip. These are
+ * the registers that describe chip capabilities, and are
+ * saved in shadow registers.
+ */
+static void get_6120_chip_params(struct qib_devdata *dd)
+{
+ u64 val;
+ u32 piobufs;
+ int mtu;
+
+ dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
+
+ dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
+ dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
+ dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
+ dd->palign = qib_read_kreg32(dd, kr_palign);
+ dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
+ dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
+
+ dd->rcvhdrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
+
+ val = qib_read_kreg64(dd, kr_sendpiosize);
+ dd->piosize2k = val & ~0U;
+ dd->piosize4k = val >> 32;
+
+ mtu = ib_mtu_enum_to_int(qib_ibmtu);
+ if (mtu == -1)
+ mtu = QIB_DEFAULT_MTU;
+ dd->pport->ibmtu = (u32)mtu;
+
+ val = qib_read_kreg64(dd, kr_sendpiobufcnt);
+ dd->piobcnt2k = val & ~0U;
+ dd->piobcnt4k = val >> 32;
+ /* these may be adjusted in init_chip_wc_pat() */
+ dd->pio2kbase = (u32 __iomem *)
+ (((char __iomem *)dd->kregbase) + dd->pio2k_bufbase);
+ if (dd->piobcnt4k) {
+ dd->pio4kbase = (u32 __iomem *)
+ (((char __iomem *) dd->kregbase) +
+ (dd->piobufbase >> 32));
+ /*
+ * 4K buffers take 2 pages; we use roundup just to be
+ * paranoid; we calculate it once here, rather than on
+ * ever buf allocate
+ */
+ dd->align4k = ALIGN(dd->piosize4k, dd->palign);
+ }
+
+ piobufs = dd->piobcnt4k + dd->piobcnt2k;
+
+ dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
+ (sizeof(u64) * BITS_PER_BYTE / 2);
+}
+
+/*
+ * The chip base addresses in cspec and cpspec have to be set
+ * after possible init_chip_wc_pat(), rather than in
+ * get_6120_chip_params(), so split out as separate function
+ */
+static void set_6120_baseaddrs(struct qib_devdata *dd)
+{
+ u32 cregbase;
+ cregbase = qib_read_kreg32(dd, kr_counterregbase);
+ dd->cspec->cregbase = (u64 __iomem *)
+ ((char __iomem *) dd->kregbase + cregbase);
+
+ dd->egrtidbase = (u64 __iomem *)
+ ((char __iomem *) dd->kregbase + dd->rcvegrbase);
+}
+
+/*
+ * Write the final few registers that depend on some of the
+ * init setup. Done late in init, just before bringing up
+ * the serdes.
+ */
+static int qib_late_6120_initreg(struct qib_devdata *dd)
+{
+ int ret = 0;
+ u64 val;
+
+ qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
+ qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
+ qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
+ qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
+ val = qib_read_kreg64(dd, kr_sendpioavailaddr);
+ if (val != dd->pioavailregs_phys) {
+ qib_dev_err(dd, "Catastrophic software error, "
+ "SendPIOAvailAddr written as %lx, "
+ "read back as %llx\n",
+ (unsigned long) dd->pioavailregs_phys,
+ (unsigned long long) val);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int init_6120_variables(struct qib_devdata *dd)
+{
+ int ret = 0;
+ struct qib_pportdata *ppd;
+ u32 sbufs;
+
+ ppd = (struct qib_pportdata *)(dd + 1);
+ dd->pport = ppd;
+ dd->num_pports = 1;
+
+ dd->cspec = (struct qib_chip_specific *)(ppd + dd->num_pports);
+ ppd->cpspec = NULL; /* not used in this chip */
+
+ spin_lock_init(&dd->cspec->kernel_tid_lock);
+ spin_lock_init(&dd->cspec->user_tid_lock);
+ spin_lock_init(&dd->cspec->rcvmod_lock);
+ spin_lock_init(&dd->cspec->gpio_lock);
+
+ /* we haven't yet set QIB_PRESENT, so use read directly */
+ dd->revision = readq(&dd->kregbase[kr_revision]);
+
+ if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
+ qib_dev_err(dd, "Revision register read failure, "
+ "giving up initialization\n");
+ ret = -ENODEV;
+ goto bail;
+ }
+ dd->flags |= QIB_PRESENT; /* now register routines work */
+
+ dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R,
+ ChipRevMajor);
+ dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R,
+ ChipRevMinor);
+
+ get_6120_chip_params(dd);
+ pe_boardname(dd); /* fill in boardname */
+
+ /*
+ * GPIO bits for TWSI data and clock,
+ * used for serial EEPROM.
+ */
+ dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
+ dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
+ dd->twsi_eeprom_dev = QIB_TWSI_NO_DEV;
+
+ if (qib_unordered_wc())
+ dd->flags |= QIB_PIO_FLUSH_WC;
+
+ /*
+ * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
+ * 2 is Some Misc, 3 is reserved for future.
+ */
+ dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr);
+
+ /* Ignore errors in PIO/PBC on systems with unordered write-combining */
+ if (qib_unordered_wc())
+ dd->eep_st_masks[0].hwerrs_to_log &= ~TXE_PIO_PARITY;
+
+ dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr);
+
+ dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated);
+
+ qib_init_pportdata(ppd, dd, 0, 1);
+ ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
+ ppd->link_speed_supported = QIB_IB_SDR;
+ ppd->link_width_enabled = IB_WIDTH_4X;
+ ppd->link_speed_enabled = ppd->link_speed_supported;
+ /* these can't change for this chip, so set once */
+ ppd->link_width_active = ppd->link_width_enabled;
+ ppd->link_speed_active = ppd->link_speed_enabled;
+ ppd->vls_supported = IB_VL_VL0;
+ ppd->vls_operational = ppd->vls_supported;
+
+ dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
+ dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
+ dd->rhf_offset = 0;
+
+ /* we always allocate at least 2048 bytes for eager buffers */
+ ret = ib_mtu_enum_to_int(qib_ibmtu);
+ dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
+
+ qib_6120_tidtemplate(dd);
+
+ /*
+ * We can request a receive interrupt for 1 or
+ * more packets from current offset. For now, we set this
+ * up for a single packet.
+ */
+ dd->rhdrhead_intr_off = 1ULL << 32;
+
+ /* setup the stats timer; the add_timer is done at end of init */
+ init_timer(&dd->stats_timer);
+ dd->stats_timer.function = qib_get_6120_faststats;
+ dd->stats_timer.data = (unsigned long) dd;
+
+ init_timer(&dd->cspec->pma_timer);
+ dd->cspec->pma_timer.function = pma_6120_timer;
+ dd->cspec->pma_timer.data = (unsigned long) ppd;
+
+ dd->ureg_align = qib_read_kreg32(dd, kr_palign);
+
+ dd->piosize2kmax_dwords = dd->piosize2k >> 2;
+ qib_6120_config_ctxts(dd);
+ qib_set_ctxtcnt(dd);
+
+ if (qib_wc_pat) {
+ ret = init_chip_wc_pat(dd, 0);
+ if (ret)
+ goto bail;
+ }
+ set_6120_baseaddrs(dd); /* set chip access pointers now */
+
+ ret = 0;
+ if (qib_mini_init)
+ goto bail;
+
+ qib_num_cfg_vls = 1; /* if any 6120's, only one VL */
+
+ ret = qib_create_ctxts(dd);
+ init_6120_cntrnames(dd);
+
+ /* use all of 4KB buffers for the kernel, otherwise 16 */
+ sbufs = dd->piobcnt4k ? dd->piobcnt4k : 16;
+
+ dd->lastctxt_piobuf = dd->piobcnt2k + dd->piobcnt4k - sbufs;
+ dd->pbufsctxt = dd->lastctxt_piobuf /
+ (dd->cfgctxts - dd->first_user_ctxt);
+
+ if (ret)
+ goto bail;
+bail:
+ return ret;
+}
+
+/*
+ * For this chip, we want to use the same buffer every time
+ * when we are trying to bring the link up (they are always VL15
+ * packets). At that link state the packet should always go out immediately
+ * (or at least be discarded at the tx interface if the link is down).
+ * If it doesn't, and the buffer isn't available, that means some other
+ * sender has gotten ahead of us, and is preventing our packet from going
+ * out. In that case, we flush all packets, and try again. If that still
+ * fails, we fail the request, and hope things work the next time around.
+ *
+ * We don't need very complicated heuristics on whether the packet had
+ * time to go out or not, since even at SDR 1X, it goes out in very short
+ * time periods, covered by the chip reads done here and as part of the
+ * flush.
+ */
+static u32 __iomem *get_6120_link_buf(struct qib_pportdata *ppd, u32 *bnum)
+{
+ u32 __iomem *buf;
+ u32 lbuf = ppd->dd->piobcnt2k + ppd->dd->piobcnt4k - 1;
+
+ /*
+ * always blip to get avail list updated, since it's almost
+ * always needed, and is fairly cheap.
+ */
+ sendctrl_6120_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+ qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
+ buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
+ if (buf)
+ goto done;
+
+ sendctrl_6120_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH |
+ QIB_SENDCTRL_AVAIL_BLIP);
+ ppd->dd->upd_pio_shadow = 1; /* update our idea of what's busy */
+ qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
+ buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
+done:
+ return buf;
+}
+
+static u32 __iomem *qib_6120_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
+ u32 *pbufnum)
+{
+ u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
+ struct qib_devdata *dd = ppd->dd;
+ u32 __iomem *buf;
+
+ if (((pbc >> 32) & PBC_6120_VL15_SEND_CTRL) &&
+ !(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE)))
+ buf = get_6120_link_buf(ppd, pbufnum);
+ else {
+
+ if ((plen + 1) > dd->piosize2kmax_dwords)
+ first = dd->piobcnt2k;
+ else
+ first = 0;
+ /* try 4k if all 2k busy, so same last for both sizes */
+ last = dd->piobcnt2k + dd->piobcnt4k - 1;
+ buf = qib_getsendbuf_range(dd, pbufnum, first, last);
+ }
+ return buf;
+}
+
+static int init_sdma_6120_regs(struct qib_pportdata *ppd)
+{
+ return -ENODEV;
+}
+
+static u16 qib_sdma_6120_gethead(struct qib_pportdata *ppd)
+{
+ return 0;
+}
+
+static int qib_sdma_6120_busy(struct qib_pportdata *ppd)
+{
+ return 0;
+}
+
+static void qib_sdma_update_6120_tail(struct qib_pportdata *ppd, u16 tail)
+{
+}
+
+static void qib_6120_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
+{
+}
+
+static void qib_sdma_set_6120_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
+{
+}
+
+/*
+ * the pbc doesn't need a VL15 indicator, but we need it for link_buf.
+ * The chip ignores the bit if set.
+ */
+static u32 qib_6120_setpbc_control(struct qib_pportdata *ppd, u32 plen,
+ u8 srate, u8 vl)
+{
+ return vl == 15 ? PBC_6120_VL15_SEND_CTRL : 0;
+}
+
+static void qib_6120_initvl15_bufs(struct qib_devdata *dd)
+{
+}
+
+static void qib_6120_init_ctxt(struct qib_ctxtdata *rcd)
+{
+ rcd->rcvegrcnt = rcd->dd->rcvhdrcnt;
+ rcd->rcvegr_tid_base = rcd->ctxt * rcd->rcvegrcnt;
+}
+
+static void qib_6120_txchk_change(struct qib_devdata *dd, u32 start,
+ u32 len, u32 avail, struct qib_ctxtdata *rcd)
+{
+}
+
+static void writescratch(struct qib_devdata *dd, u32 val)
+{
+ (void) qib_write_kreg(dd, kr_scratch, val);
+}
+
+static int qib_6120_tempsense_rd(struct qib_devdata *dd, int regnum)
+{
+ return -ENXIO;
+}
+
+/* Dummy function, as 6120 boards never disable EEPROM Write */
+static int qib_6120_eeprom_wen(struct qib_devdata *dd, int wen)
+{
+ return 1;
+}
+
+/**
+ * qib_init_iba6120_funcs - set up the chip-specific function pointers
+ * @pdev: pci_dev of the qlogic_ib device
+ * @ent: pci_device_id matching this chip
+ *
+ * This is global, and is called directly at init to set up the
+ * chip-specific function pointers for later use.
+ *
+ * It also allocates/partially-inits the qib_devdata struct for
+ * this device.
+ */
+struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct qib_devdata *dd;
+ int ret;
+
+ dd = qib_alloc_devdata(pdev, sizeof(struct qib_pportdata) +
+ sizeof(struct qib_chip_specific));
+ if (IS_ERR(dd))
+ goto bail;
+
+ dd->f_bringup_serdes = qib_6120_bringup_serdes;
+ dd->f_cleanup = qib_6120_setup_cleanup;
+ dd->f_clear_tids = qib_6120_clear_tids;
+ dd->f_free_irq = qib_6120_free_irq;
+ dd->f_get_base_info = qib_6120_get_base_info;
+ dd->f_get_msgheader = qib_6120_get_msgheader;
+ dd->f_getsendbuf = qib_6120_getsendbuf;
+ dd->f_gpio_mod = gpio_6120_mod;
+ dd->f_eeprom_wen = qib_6120_eeprom_wen;
+ dd->f_hdrqempty = qib_6120_hdrqempty;
+ dd->f_ib_updown = qib_6120_ib_updown;
+ dd->f_init_ctxt = qib_6120_init_ctxt;
+ dd->f_initvl15_bufs = qib_6120_initvl15_bufs;
+ dd->f_intr_fallback = qib_6120_nointr_fallback;
+ dd->f_late_initreg = qib_late_6120_initreg;
+ dd->f_setpbc_control = qib_6120_setpbc_control;
+ dd->f_portcntr = qib_portcntr_6120;
+ dd->f_put_tid = (dd->minrev >= 2) ?
+ qib_6120_put_tid_2 :
+ qib_6120_put_tid;
+ dd->f_quiet_serdes = qib_6120_quiet_serdes;
+ dd->f_rcvctrl = rcvctrl_6120_mod;
+ dd->f_read_cntrs = qib_read_6120cntrs;
+ dd->f_read_portcntrs = qib_read_6120portcntrs;
+ dd->f_reset = qib_6120_setup_reset;
+ dd->f_init_sdma_regs = init_sdma_6120_regs;
+ dd->f_sdma_busy = qib_sdma_6120_busy;
+ dd->f_sdma_gethead = qib_sdma_6120_gethead;
+ dd->f_sdma_sendctrl = qib_6120_sdma_sendctrl;
+ dd->f_sdma_set_desc_cnt = qib_sdma_set_6120_desc_cnt;
+ dd->f_sdma_update_tail = qib_sdma_update_6120_tail;
+ dd->f_sendctrl = sendctrl_6120_mod;
+ dd->f_set_armlaunch = qib_set_6120_armlaunch;
+ dd->f_set_cntr_sample = qib_set_cntr_6120_sample;
+ dd->f_iblink_state = qib_6120_iblink_state;
+ dd->f_ibphys_portstate = qib_6120_phys_portstate;
+ dd->f_get_ib_cfg = qib_6120_get_ib_cfg;
+ dd->f_set_ib_cfg = qib_6120_set_ib_cfg;
+ dd->f_set_ib_loopback = qib_6120_set_loopback;
+ dd->f_set_intr_state = qib_6120_set_intr_state;
+ dd->f_setextled = qib_6120_setup_setextled;
+ dd->f_txchk_change = qib_6120_txchk_change;
+ dd->f_update_usrhead = qib_update_6120_usrhead;
+ dd->f_wantpiobuf_intr = qib_wantpiobuf_6120_intr;
+ dd->f_xgxs_reset = qib_6120_xgxs_reset;
+ dd->f_writescratch = writescratch;
+ dd->f_tempsense_rd = qib_6120_tempsense_rd;
+ /*
+ * Do remaining pcie setup and save pcie values in dd.
+ * Any error printing is already done by the init code.
+ * On return, we have the chip mapped and accessible,
+ * but chip registers are not set up until start of
+ * init_6120_variables.
+ */
+ ret = qib_pcie_ddinit(dd, pdev, ent);
+ if (ret < 0)
+ goto bail_free;
+
+ /* initialize chip-specific variables */
+ ret = init_6120_variables(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ if (qib_mini_init)
+ goto bail;
+
+ if (qib_pcie_params(dd, 8, NULL, NULL))
+ qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
+ "continuing anyway\n");
+ dd->cspec->irq = pdev->irq; /* save IRQ */
+
+ /* clear diagctrl register, in case diags were running and crashed */
+ qib_write_kreg(dd, kr_hwdiagctrl, 0);
+
+ if (qib_read_kreg64(dd, kr_hwerrstatus) &
+ QLOGIC_IB_HWE_SERDESPLLFAILED)
+ qib_write_kreg(dd, kr_hwerrclear,
+ QLOGIC_IB_HWE_SERDESPLLFAILED);
+
+ /* setup interrupt handler (interrupt type handled above) */
+ qib_setup_6120_interrupt(dd);
+ /* Note that qpn_mask is set by qib_6120_config_ctxts() first */
+ qib_6120_init_hwerrors(dd);
+
+ goto bail;
+
+bail_cleanup:
+ qib_pcie_ddcleanup(dd);
+bail_free:
+ qib_free_devdata(dd);
+ dd = ERR_PTR(ret);
+bail:
+ return dd;
+}
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
new file mode 100644
index 00000000000..6fd8d74e739
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -0,0 +1,4618 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*
+ * This file contains all of the code that is specific to the
+ * QLogic_IB 7220 chip (except that specific to the SerDes)
+ */
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <rdma/ib_verbs.h>
+
+#include "qib.h"
+#include "qib_7220.h"
+
+static void qib_setup_7220_setextled(struct qib_pportdata *, u32);
+static void qib_7220_handle_hwerrors(struct qib_devdata *, char *, size_t);
+static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op);
+static u32 qib_7220_iblink_state(u64);
+static u8 qib_7220_phys_portstate(u64);
+static void qib_sdma_update_7220_tail(struct qib_pportdata *, u16);
+static void qib_set_ib_7220_lstate(struct qib_pportdata *, u16, u16);
+
+/*
+ * This file contains almost all the chip-specific register information and
+ * access functions for the QLogic QLogic_IB 7220 PCI-Express chip, with the
+ * exception of SerDes support, which in in qib_sd7220.c.
+ */
+
+/* Below uses machine-generated qib_chipnum_regs.h file */
+#define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64))
+
+/* Use defines to tie machine-generated names to lower-case names */
+#define kr_control KREG_IDX(Control)
+#define kr_counterregbase KREG_IDX(CntrRegBase)
+#define kr_errclear KREG_IDX(ErrClear)
+#define kr_errmask KREG_IDX(ErrMask)
+#define kr_errstatus KREG_IDX(ErrStatus)
+#define kr_extctrl KREG_IDX(EXTCtrl)
+#define kr_extstatus KREG_IDX(EXTStatus)
+#define kr_gpio_clear KREG_IDX(GPIOClear)
+#define kr_gpio_mask KREG_IDX(GPIOMask)
+#define kr_gpio_out KREG_IDX(GPIOOut)
+#define kr_gpio_status KREG_IDX(GPIOStatus)
+#define kr_hrtbt_guid KREG_IDX(HRTBT_GUID)
+#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
+#define kr_hwerrclear KREG_IDX(HwErrClear)
+#define kr_hwerrmask KREG_IDX(HwErrMask)
+#define kr_hwerrstatus KREG_IDX(HwErrStatus)
+#define kr_ibcctrl KREG_IDX(IBCCtrl)
+#define kr_ibcddrctrl KREG_IDX(IBCDDRCtrl)
+#define kr_ibcddrstatus KREG_IDX(IBCDDRStatus)
+#define kr_ibcstatus KREG_IDX(IBCStatus)
+#define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl)
+#define kr_intclear KREG_IDX(IntClear)
+#define kr_intmask KREG_IDX(IntMask)
+#define kr_intstatus KREG_IDX(IntStatus)
+#define kr_ncmodectrl KREG_IDX(IBNCModeCtrl)
+#define kr_palign KREG_IDX(PageAlign)
+#define kr_partitionkey KREG_IDX(RcvPartitionKey)
+#define kr_portcnt KREG_IDX(PortCnt)
+#define kr_rcvbthqp KREG_IDX(RcvBTHQP)
+#define kr_rcvctrl KREG_IDX(RcvCtrl)
+#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
+#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
+#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
+#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
+#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
+#define kr_rcvpktledcnt KREG_IDX(RcvPktLEDCnt)
+#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
+#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
+#define kr_revision KREG_IDX(Revision)
+#define kr_scratch KREG_IDX(Scratch)
+#define kr_sendbuffererror KREG_IDX(SendBufErr0)
+#define kr_sendctrl KREG_IDX(SendCtrl)
+#define kr_senddmabase KREG_IDX(SendDmaBase)
+#define kr_senddmabufmask0 KREG_IDX(SendDmaBufMask0)
+#define kr_senddmabufmask1 (KREG_IDX(SendDmaBufMask0) + 1)
+#define kr_senddmabufmask2 (KREG_IDX(SendDmaBufMask0) + 2)
+#define kr_senddmahead KREG_IDX(SendDmaHead)
+#define kr_senddmaheadaddr KREG_IDX(SendDmaHeadAddr)
+#define kr_senddmalengen KREG_IDX(SendDmaLenGen)
+#define kr_senddmastatus KREG_IDX(SendDmaStatus)
+#define kr_senddmatail KREG_IDX(SendDmaTail)
+#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
+#define kr_sendpiobufbase KREG_IDX(SendBufBase)
+#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
+#define kr_sendpiosize KREG_IDX(SendBufSize)
+#define kr_sendregbase KREG_IDX(SendRegBase)
+#define kr_userregbase KREG_IDX(UserRegBase)
+#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
+
+/* These must only be written via qib_write_kreg_ctxt() */
+#define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0)
+#define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
+
+
+#define CREG_IDX(regname) ((QIB_7220_##regname##_OFFS - \
+ QIB_7220_LBIntCnt_OFFS) / sizeof(u64))
+
+#define cr_badformat CREG_IDX(RxVersionErrCnt)
+#define cr_erricrc CREG_IDX(RxICRCErrCnt)
+#define cr_errlink CREG_IDX(RxLinkMalformCnt)
+#define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt)
+#define cr_errpkey CREG_IDX(RxPKeyMismatchCnt)
+#define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlViolCnt)
+#define cr_err_rlen CREG_IDX(RxLenErrCnt)
+#define cr_errslen CREG_IDX(TxLenErrCnt)
+#define cr_errtidfull CREG_IDX(RxTIDFullErrCnt)
+#define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt)
+#define cr_errvcrc CREG_IDX(RxVCRCErrCnt)
+#define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt)
+#define cr_lbint CREG_IDX(LBIntCnt)
+#define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
+#define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt)
+#define cr_lbflowstall CREG_IDX(LBFlowStallCnt)
+#define cr_pktrcv CREG_IDX(RxDataPktCnt)
+#define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
+#define cr_pktsend CREG_IDX(TxDataPktCnt)
+#define cr_pktsendflow CREG_IDX(TxFlowPktCnt)
+#define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt)
+#define cr_rcvebp CREG_IDX(RxEBPCnt)
+#define cr_rcvovfl CREG_IDX(RxBufOvflCnt)
+#define cr_senddropped CREG_IDX(TxDroppedPktCnt)
+#define cr_sendstall CREG_IDX(TxFlowStallCnt)
+#define cr_sendunderrun CREG_IDX(TxUnderrunCnt)
+#define cr_wordrcv CREG_IDX(RxDwordCnt)
+#define cr_wordsend CREG_IDX(TxDwordCnt)
+#define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
+#define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt)
+#define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
+#define cr_iblinkdown CREG_IDX(IBLinkDownedCnt)
+#define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
+#define cr_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
+#define cr_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
+#define cr_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
+#define cr_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
+#define cr_rxvlerr CREG_IDX(RxVlErrCnt)
+#define cr_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
+#define cr_psstat CREG_IDX(PSStat)
+#define cr_psstart CREG_IDX(PSStart)
+#define cr_psinterval CREG_IDX(PSInterval)
+#define cr_psrcvdatacount CREG_IDX(PSRcvDataCount)
+#define cr_psrcvpktscount CREG_IDX(PSRcvPktsCount)
+#define cr_psxmitdatacount CREG_IDX(PSXmitDataCount)
+#define cr_psxmitpktscount CREG_IDX(PSXmitPktsCount)
+#define cr_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
+#define cr_txsdmadesc CREG_IDX(TxSDmaDescCnt)
+#define cr_pcieretrydiag CREG_IDX(PcieRetryBufDiagQwordCnt)
+
+#define SYM_RMASK(regname, fldname) ((u64) \
+ QIB_7220_##regname##_##fldname##_RMASK)
+#define SYM_MASK(regname, fldname) ((u64) \
+ QIB_7220_##regname##_##fldname##_RMASK << \
+ QIB_7220_##regname##_##fldname##_LSB)
+#define SYM_LSB(regname, fldname) (QIB_7220_##regname##_##fldname##_LSB)
+#define SYM_FIELD(value, regname, fldname) ((u64) \
+ (((value) >> SYM_LSB(regname, fldname)) & \
+ SYM_RMASK(regname, fldname)))
+#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
+#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
+
+/* ibcctrl bits */
+#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
+/* cycle through TS1/TS2 till OK */
+#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
+/* wait for TS1, then go on */
+#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
+#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
+
+#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
+#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
+#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
+
+#define BLOB_7220_IBCHG 0x81
+
+/*
+ * We could have a single register get/put routine, that takes a group type,
+ * but this is somewhat clearer and cleaner. It also gives us some error
+ * checking. 64 bit register reads should always work, but are inefficient
+ * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
+ * so we use kreg32 wherever possible. User register and counter register
+ * reads are always 32 bit reads, so only one form of those routines.
+ */
+
+/**
+ * qib_read_ureg32 - read 32-bit virtualized per-context register
+ * @dd: device
+ * @regno: register number
+ * @ctxt: context number
+ *
+ * Return the contents of a register that is virtualized to be per context.
+ * Returns -1 on errors (not distinguishable from valid contents at
+ * runtime; we may add a separate error variable at some point).
+ */
+static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
+ enum qib_ureg regno, int ctxt)
+{
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+
+ if (dd->userbase)
+ return readl(regno + (u64 __iomem *)
+ ((char __iomem *)dd->userbase +
+ dd->ureg_align * ctxt));
+ else
+ return readl(regno + (u64 __iomem *)
+ (dd->uregbase +
+ (char __iomem *)dd->kregbase +
+ dd->ureg_align * ctxt));
+}
+
+/**
+ * qib_write_ureg - write 32-bit virtualized per-context register
+ * @dd: device
+ * @regno: register number
+ * @value: value
+ * @ctxt: context
+ *
+ * Write the contents of a register that is virtualized to be per context.
+ */
+static inline void qib_write_ureg(const struct qib_devdata *dd,
+ enum qib_ureg regno, u64 value, int ctxt)
+{
+ u64 __iomem *ubase;
+
+ if (dd->userbase)
+ ubase = (u64 __iomem *)
+ ((char __iomem *) dd->userbase +
+ dd->ureg_align * ctxt);
+ else
+ ubase = (u64 __iomem *)
+ (dd->uregbase +
+ (char __iomem *) dd->kregbase +
+ dd->ureg_align * ctxt);
+
+ if (dd->kregbase && (dd->flags & QIB_PRESENT))
+ writeq(value, &ubase[regno]);
+}
+
+/**
+ * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
+ * @dd: the qlogic_ib device
+ * @regno: the register number to write
+ * @ctxt: the context containing the register
+ * @value: the value to write
+ */
+static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
+ const u16 regno, unsigned ctxt,
+ u64 value)
+{
+ qib_write_kreg(dd, regno + ctxt, value);
+}
+
+static inline void write_7220_creg(const struct qib_devdata *dd,
+ u16 regno, u64 value)
+{
+ if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT))
+ writeq(value, &dd->cspec->cregbase[regno]);
+}
+
+static inline u64 read_7220_creg(const struct qib_devdata *dd, u16 regno)
+{
+ if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+ return readq(&dd->cspec->cregbase[regno]);
+}
+
+static inline u32 read_7220_creg32(const struct qib_devdata *dd, u16 regno)
+{
+ if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+ return readl(&dd->cspec->cregbase[regno]);
+}
+
+/* kr_revision bits */
+#define QLOGIC_IB_R_EMULATORREV_MASK ((1ULL << 22) - 1)
+#define QLOGIC_IB_R_EMULATORREV_SHIFT 40
+
+/* kr_control bits */
+#define QLOGIC_IB_C_RESET (1U << 7)
+
+/* kr_intstatus, kr_intclear, kr_intmask bits */
+#define QLOGIC_IB_I_RCVURG_MASK ((1ULL << 17) - 1)
+#define QLOGIC_IB_I_RCVURG_SHIFT 32
+#define QLOGIC_IB_I_RCVAVAIL_MASK ((1ULL << 17) - 1)
+#define QLOGIC_IB_I_RCVAVAIL_SHIFT 0
+#define QLOGIC_IB_I_SERDESTRIMDONE (1ULL << 27)
+
+#define QLOGIC_IB_C_FREEZEMODE 0x00000002
+#define QLOGIC_IB_C_LINKENABLE 0x00000004
+
+#define QLOGIC_IB_I_SDMAINT 0x8000000000000000ULL
+#define QLOGIC_IB_I_SDMADISABLED 0x4000000000000000ULL
+#define QLOGIC_IB_I_ERROR 0x0000000080000000ULL
+#define QLOGIC_IB_I_SPIOSENT 0x0000000040000000ULL
+#define QLOGIC_IB_I_SPIOBUFAVAIL 0x0000000020000000ULL
+#define QLOGIC_IB_I_GPIO 0x0000000010000000ULL
+
+/* variables for sanity checking interrupt and errors */
+#define QLOGIC_IB_I_BITSEXTANT \
+ (QLOGIC_IB_I_SDMAINT | QLOGIC_IB_I_SDMADISABLED | \
+ (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \
+ (QLOGIC_IB_I_RCVAVAIL_MASK << \
+ QLOGIC_IB_I_RCVAVAIL_SHIFT) | \
+ QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \
+ QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO | \
+ QLOGIC_IB_I_SERDESTRIMDONE)
+
+#define IB_HWE_BITSEXTANT \
+ (HWE_MASK(RXEMemParityErr) | \
+ HWE_MASK(TXEMemParityErr) | \
+ (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << \
+ QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \
+ QLOGIC_IB_HWE_PCIE1PLLFAILED | \
+ QLOGIC_IB_HWE_PCIE0PLLFAILED | \
+ QLOGIC_IB_HWE_PCIEPOISONEDTLP | \
+ QLOGIC_IB_HWE_PCIECPLTIMEOUT | \
+ QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \
+ QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \
+ QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \
+ HWE_MASK(PowerOnBISTFailed) | \
+ QLOGIC_IB_HWE_COREPLL_FBSLIP | \
+ QLOGIC_IB_HWE_COREPLL_RFSLIP | \
+ QLOGIC_IB_HWE_SERDESPLLFAILED | \
+ HWE_MASK(IBCBusToSPCParityErr) | \
+ HWE_MASK(IBCBusFromSPCParityErr) | \
+ QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR | \
+ QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR | \
+ QLOGIC_IB_HWE_SDMAMEMREADERR | \
+ QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED | \
+ QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT | \
+ QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT | \
+ QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT | \
+ QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT | \
+ QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR | \
+ QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR | \
+ QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR | \
+ QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR)
+
+#define IB_E_BITSEXTANT \
+ (ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) | \
+ ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) | \
+ ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) | \
+ ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \
+ ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) | \
+ ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) | \
+ ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | \
+ ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) | \
+ ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) | \
+ ERR_MASK(SendSpecialTriggerErr) | \
+ ERR_MASK(SDmaDisabledErr) | ERR_MASK(SendMinPktLenErr) | \
+ ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnderRunErr) | \
+ ERR_MASK(SendPktLenErr) | ERR_MASK(SendDroppedSmpPktErr) | \
+ ERR_MASK(SendDroppedDataPktErr) | \
+ ERR_MASK(SendPioArmLaunchErr) | \
+ ERR_MASK(SendUnexpectedPktNumErr) | \
+ ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(SendBufMisuseErr) | \
+ ERR_MASK(SDmaGenMismatchErr) | ERR_MASK(SDmaOutOfBoundErr) | \
+ ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \
+ ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \
+ ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \
+ ERR_MASK(SDmaUnexpDataErr) | \
+ ERR_MASK(IBStatusChanged) | ERR_MASK(InvalidAddrErr) | \
+ ERR_MASK(ResetNegated) | ERR_MASK(HardwareErr) | \
+ ERR_MASK(SDmaDescAddrMisalignErr) | \
+ ERR_MASK(InvalidEEPCmd))
+
+/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
+#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK 0x00000000000000ffULL
+#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0
+#define QLOGIC_IB_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
+#define QLOGIC_IB_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
+#define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
+#define QLOGIC_IB_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
+#define QLOGIC_IB_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
+#define QLOGIC_IB_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
+#define QLOGIC_IB_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
+#define QLOGIC_IB_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
+#define QLOGIC_IB_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
+#define QLOGIC_IB_HWE_SERDESPLLFAILED 0x1000000000000000ULL
+/* specific to this chip */
+#define QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR 0x0000000000000040ULL
+#define QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR 0x0000000000000080ULL
+#define QLOGIC_IB_HWE_SDMAMEMREADERR 0x0000000010000000ULL
+#define QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED 0x2000000000000000ULL
+#define QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT 0x0100000000000000ULL
+#define QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT 0x0200000000000000ULL
+#define QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT 0x0400000000000000ULL
+#define QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT 0x0800000000000000ULL
+#define QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR 0x0000008000000000ULL
+#define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
+#define QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR 0x0000001000000000ULL
+#define QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR 0x0000002000000000ULL
+
+#define IBA7220_IBCC_LINKCMD_SHIFT 19
+
+/* kr_ibcddrctrl bits */
+#define IBA7220_IBC_DLIDLMC_MASK 0xFFFFFFFFUL
+#define IBA7220_IBC_DLIDLMC_SHIFT 32
+
+#define IBA7220_IBC_HRTBT_MASK (SYM_RMASK(IBCDDRCtrl, HRTBT_AUTO) | \
+ SYM_RMASK(IBCDDRCtrl, HRTBT_ENB))
+#define IBA7220_IBC_HRTBT_SHIFT SYM_LSB(IBCDDRCtrl, HRTBT_ENB)
+
+#define IBA7220_IBC_LANE_REV_SUPPORTED (1<<8)
+#define IBA7220_IBC_LREV_MASK 1
+#define IBA7220_IBC_LREV_SHIFT 8
+#define IBA7220_IBC_RXPOL_MASK 1
+#define IBA7220_IBC_RXPOL_SHIFT 7
+#define IBA7220_IBC_WIDTH_SHIFT 5
+#define IBA7220_IBC_WIDTH_MASK 0x3
+#define IBA7220_IBC_WIDTH_1X_ONLY (0 << IBA7220_IBC_WIDTH_SHIFT)
+#define IBA7220_IBC_WIDTH_4X_ONLY (1 << IBA7220_IBC_WIDTH_SHIFT)
+#define IBA7220_IBC_WIDTH_AUTONEG (2 << IBA7220_IBC_WIDTH_SHIFT)
+#define IBA7220_IBC_SPEED_AUTONEG (1 << 1)
+#define IBA7220_IBC_SPEED_SDR (1 << 2)
+#define IBA7220_IBC_SPEED_DDR (1 << 3)
+#define IBA7220_IBC_SPEED_AUTONEG_MASK (0x7 << 1)
+#define IBA7220_IBC_IBTA_1_2_MASK (1)
+
+/* kr_ibcddrstatus */
+/* link latency shift is 0, don't bother defining */
+#define IBA7220_DDRSTAT_LINKLAT_MASK 0x3ffffff
+
+/* kr_extstatus bits */
+#define QLOGIC_IB_EXTS_FREQSEL 0x2
+#define QLOGIC_IB_EXTS_SERDESSEL 0x4
+#define QLOGIC_IB_EXTS_MEMBIST_ENDTEST 0x0000000000004000
+#define QLOGIC_IB_EXTS_MEMBIST_DISABLED 0x0000000000008000
+
+/* kr_xgxsconfig bits */
+#define QLOGIC_IB_XGXS_RESET 0x5ULL
+#define QLOGIC_IB_XGXS_FC_SAFE (1ULL << 63)
+
+/* kr_rcvpktledcnt */
+#define IBA7220_LEDBLINK_ON_SHIFT 32 /* 4ns period on after packet */
+#define IBA7220_LEDBLINK_OFF_SHIFT 0 /* 4ns period off before next on */
+
+#define _QIB_GPIO_SDA_NUM 1
+#define _QIB_GPIO_SCL_NUM 0
+#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7220 cards. */
+#define QIB_TWSI_TEMP_DEV 0x98
+
+/* HW counter clock is at 4nsec */
+#define QIB_7220_PSXMITWAIT_CHECK_RATE 4000
+
+#define IBA7220_R_INTRAVAIL_SHIFT 17
+#define IBA7220_R_PKEY_DIS_SHIFT 34
+#define IBA7220_R_TAILUPD_SHIFT 35
+#define IBA7220_R_CTXTCFG_SHIFT 36
+
+#define IBA7220_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
+
+/*
+ * the size bits give us 2^N, in KB units. 0 marks as invalid,
+ * and 7 is reserved. We currently use only 2KB and 4KB
+ */
+#define IBA7220_TID_SZ_SHIFT 37 /* shift to 3bit size selector */
+#define IBA7220_TID_SZ_2K (1UL << IBA7220_TID_SZ_SHIFT) /* 2KB */
+#define IBA7220_TID_SZ_4K (2UL << IBA7220_TID_SZ_SHIFT) /* 4KB */
+#define IBA7220_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
+#define PBC_7220_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
+#define PBC_7220_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
+
+#define AUTONEG_TRIES 5 /* sequential retries to negotiate DDR */
+
+/* packet rate matching delay multiplier */
+static u8 rate_to_delay[2][2] = {
+ /* 1x, 4x */
+ { 8, 2 }, /* SDR */
+ { 4, 1 } /* DDR */
+};
+
+static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
+ [IB_RATE_2_5_GBPS] = 8,
+ [IB_RATE_5_GBPS] = 4,
+ [IB_RATE_10_GBPS] = 2,
+ [IB_RATE_20_GBPS] = 1
+};
+
+#define IBA7220_LINKSPEED_SHIFT SYM_LSB(IBCStatus, LinkSpeedActive)
+#define IBA7220_LINKWIDTH_SHIFT SYM_LSB(IBCStatus, LinkWidthActive)
+
+/* link training states, from IBC */
+#define IB_7220_LT_STATE_DISABLED 0x00
+#define IB_7220_LT_STATE_LINKUP 0x01
+#define IB_7220_LT_STATE_POLLACTIVE 0x02
+#define IB_7220_LT_STATE_POLLQUIET 0x03
+#define IB_7220_LT_STATE_SLEEPDELAY 0x04
+#define IB_7220_LT_STATE_SLEEPQUIET 0x05
+#define IB_7220_LT_STATE_CFGDEBOUNCE 0x08
+#define IB_7220_LT_STATE_CFGRCVFCFG 0x09
+#define IB_7220_LT_STATE_CFGWAITRMT 0x0a
+#define IB_7220_LT_STATE_CFGIDLE 0x0b
+#define IB_7220_LT_STATE_RECOVERRETRAIN 0x0c
+#define IB_7220_LT_STATE_RECOVERWAITRMT 0x0e
+#define IB_7220_LT_STATE_RECOVERIDLE 0x0f
+
+/* link state machine states from IBC */
+#define IB_7220_L_STATE_DOWN 0x0
+#define IB_7220_L_STATE_INIT 0x1
+#define IB_7220_L_STATE_ARM 0x2
+#define IB_7220_L_STATE_ACTIVE 0x3
+#define IB_7220_L_STATE_ACT_DEFER 0x4
+
+static const u8 qib_7220_physportstate[0x20] = {
+ [IB_7220_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
+ [IB_7220_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
+ [IB_7220_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
+ [IB_7220_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
+ [IB_7220_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
+ [IB_7220_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
+ [IB_7220_LT_STATE_CFGDEBOUNCE] =
+ IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_7220_LT_STATE_CFGRCVFCFG] =
+ IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_7220_LT_STATE_CFGWAITRMT] =
+ IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_7220_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_7220_LT_STATE_RECOVERRETRAIN] =
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+ [IB_7220_LT_STATE_RECOVERWAITRMT] =
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+ [IB_7220_LT_STATE_RECOVERIDLE] =
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+ [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
+};
+
+int qib_special_trigger;
+module_param_named(special_trigger, qib_special_trigger, int, S_IRUGO);
+MODULE_PARM_DESC(special_trigger, "Enable SpecialTrigger arm/launch");
+
+#define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr)
+#define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr)
+
+#define SYM_MASK_BIT(regname, fldname, bit) ((u64) \
+ (1ULL << (SYM_LSB(regname, fldname) + (bit))))
+
+#define TXEMEMPARITYERR_PIOBUF \
+ SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0)
+#define TXEMEMPARITYERR_PIOPBC \
+ SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1)
+#define TXEMEMPARITYERR_PIOLAUNCHFIFO \
+ SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2)
+
+#define RXEMEMPARITYERR_RCVBUF \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0)
+#define RXEMEMPARITYERR_LOOKUPQ \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1)
+#define RXEMEMPARITYERR_EXPTID \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2)
+#define RXEMEMPARITYERR_EAGERTID \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3)
+#define RXEMEMPARITYERR_FLAGBUF \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4)
+#define RXEMEMPARITYERR_DATAINFO \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5)
+#define RXEMEMPARITYERR_HDRINFO \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6)
+
+/* 7220 specific hardware errors... */
+static const struct qib_hwerror_msgs qib_7220_hwerror_msgs[] = {
+ /* generic hardware errors */
+ QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"),
+ QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"),
+
+ QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF,
+ "TXE PIOBUF Memory Parity"),
+ QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC,
+ "TXE PIOPBC Memory Parity"),
+ QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO,
+ "TXE PIOLAUNCHFIFO Memory Parity"),
+
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF,
+ "RXE RCVBUF Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ,
+ "RXE LOOKUPQ Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID,
+ "RXE EAGERTID Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID,
+ "RXE EXPTID Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF,
+ "RXE FLAGBUF Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO,
+ "RXE DATAINFO Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO,
+ "RXE HDRINFO Memory Parity"),
+
+ /* chip-specific hardware errors */
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP,
+ "PCIe Poisoned TLP"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT,
+ "PCIe completion timeout"),
+ /*
+ * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
+ * parity or memory parity error failures, because most likely we
+ * won't be able to talk to the core of the chip. Nonetheless, we
+ * might see them, if they are in parts of the PCIe core that aren't
+ * essential.
+ */
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED,
+ "PCIePLL1"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED,
+ "PCIePLL0"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH,
+ "PCIe XTLH core parity"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM,
+ "PCIe ADM TX core parity"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM,
+ "PCIe ADM RX core parity"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED,
+ "SerDes PLL"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR,
+ "PCIe cpl header queue"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR,
+ "PCIe cpl data queue"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SDMAMEMREADERR,
+ "Send DMA memory read"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED,
+ "uC PLL clock not locked"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT,
+ "PCIe serdes Q0 no clock"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT,
+ "PCIe serdes Q1 no clock"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT,
+ "PCIe serdes Q2 no clock"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT,
+ "PCIe serdes Q3 no clock"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR,
+ "DDS RXEQ memory parity"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR,
+ "IB uC memory parity"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR,
+ "PCIe uC oct0 memory parity"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR,
+ "PCIe uC oct1 memory parity"),
+};
+
+#define RXE_PARITY (RXEMEMPARITYERR_EAGERTID|RXEMEMPARITYERR_EXPTID)
+
+#define QLOGIC_IB_E_PKTERRS (\
+ ERR_MASK(SendPktLenErr) | \
+ ERR_MASK(SendDroppedDataPktErr) | \
+ ERR_MASK(RcvVCRCErr) | \
+ ERR_MASK(RcvICRCErr) | \
+ ERR_MASK(RcvShortPktLenErr) | \
+ ERR_MASK(RcvEBPErr))
+
+/* Convenience for decoding Send DMA errors */
+#define QLOGIC_IB_E_SDMAERRS ( \
+ ERR_MASK(SDmaGenMismatchErr) | \
+ ERR_MASK(SDmaOutOfBoundErr) | \
+ ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \
+ ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \
+ ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \
+ ERR_MASK(SDmaUnexpDataErr) | \
+ ERR_MASK(SDmaDescAddrMisalignErr) | \
+ ERR_MASK(SDmaDisabledErr) | \
+ ERR_MASK(SendBufMisuseErr))
+
+/* These are all rcv-related errors which we want to count for stats */
+#define E_SUM_PKTERRS \
+ (ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) | \
+ ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) | \
+ ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) | \
+ ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
+ ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) | \
+ ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr))
+
+/* These are all send-related errors which we want to count for stats */
+#define E_SUM_ERRS \
+ (ERR_MASK(SendPioArmLaunchErr) | ERR_MASK(SendUnexpectedPktNumErr) | \
+ ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
+ ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) | \
+ ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
+ ERR_MASK(InvalidAddrErr))
+
+/*
+ * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
+ * errors not related to freeze and cancelling buffers. Can't ignore
+ * armlaunch because could get more while still cleaning up, and need
+ * to cancel those as they happen.
+ */
+#define E_SPKT_ERRS_IGNORE \
+ (ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
+ ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) | \
+ ERR_MASK(SendPktLenErr))
+
+/*
+ * these are errors that can occur when the link changes state while
+ * a packet is being sent or received. This doesn't cover things
+ * like EBP or VCRC that can be the result of a sending having the
+ * link change state, so we receive a "known bad" packet.
+ */
+#define E_SUM_LINK_PKTERRS \
+ (ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
+ ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
+ ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
+ ERR_MASK(RcvUnexpectedCharErr))
+
+static void autoneg_7220_work(struct work_struct *);
+static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *, u64, u32 *);
+
+/*
+ * Called when we might have an error that is specific to a particular
+ * PIO buffer, and may need to cancel that buffer, so it can be re-used.
+ * because we don't need to force the update of pioavail.
+ */
+static void qib_disarm_7220_senderrbufs(struct qib_pportdata *ppd)
+{
+ unsigned long sbuf[3];
+ struct qib_devdata *dd = ppd->dd;
+
+ /*
+ * It's possible that sendbuffererror could have bits set; might
+ * have already done this as a result of hardware error handling.
+ */
+ /* read these before writing errorclear */
+ sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
+ sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
+ sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2);
+
+ if (sbuf[0] || sbuf[1] || sbuf[2])
+ qib_disarm_piobufs_set(dd, sbuf,
+ dd->piobcnt2k + dd->piobcnt4k);
+}
+
+static void qib_7220_txe_recover(struct qib_devdata *dd)
+{
+ qib_devinfo(dd->pcidev, "Recovering from TXE PIO parity error\n");
+ qib_disarm_7220_senderrbufs(dd->pport);
+}
+
+/*
+ * This is called with interrupts disabled and sdma_lock held.
+ */
+static void qib_7220_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 set_sendctrl = 0;
+ u64 clr_sendctrl = 0;
+
+ if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
+ set_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable);
+ else
+ clr_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable);
+
+ if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
+ set_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable);
+ else
+ clr_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable);
+
+ if (op & QIB_SDMA_SENDCTRL_OP_HALT)
+ set_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt);
+ else
+ clr_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt);
+
+ spin_lock(&dd->sendctrl_lock);
+
+ dd->sendctrl |= set_sendctrl;
+ dd->sendctrl &= ~clr_sendctrl;
+
+ qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+
+ spin_unlock(&dd->sendctrl_lock);
+}
+
+static void qib_decode_7220_sdma_errs(struct qib_pportdata *ppd,
+ u64 err, char *buf, size_t blen)
+{
+ static const struct {
+ u64 err;
+ const char *msg;
+ } errs[] = {
+ { ERR_MASK(SDmaGenMismatchErr),
+ "SDmaGenMismatch" },
+ { ERR_MASK(SDmaOutOfBoundErr),
+ "SDmaOutOfBound" },
+ { ERR_MASK(SDmaTailOutOfBoundErr),
+ "SDmaTailOutOfBound" },
+ { ERR_MASK(SDmaBaseErr),
+ "SDmaBase" },
+ { ERR_MASK(SDma1stDescErr),
+ "SDma1stDesc" },
+ { ERR_MASK(SDmaRpyTagErr),
+ "SDmaRpyTag" },
+ { ERR_MASK(SDmaDwEnErr),
+ "SDmaDwEn" },
+ { ERR_MASK(SDmaMissingDwErr),
+ "SDmaMissingDw" },
+ { ERR_MASK(SDmaUnexpDataErr),
+ "SDmaUnexpData" },
+ { ERR_MASK(SDmaDescAddrMisalignErr),
+ "SDmaDescAddrMisalign" },
+ { ERR_MASK(SendBufMisuseErr),
+ "SendBufMisuse" },
+ { ERR_MASK(SDmaDisabledErr),
+ "SDmaDisabled" },
+ };
+ int i;
+ size_t bidx = 0;
+
+ for (i = 0; i < ARRAY_SIZE(errs); i++) {
+ if (err & errs[i].err)
+ bidx += scnprintf(buf + bidx, blen - bidx,
+ "%s ", errs[i].msg);
+ }
+}
+
+/*
+ * This is called as part of link down clean up so disarm and flush
+ * all send buffers so that SMP packets can be sent.
+ */
+static void qib_7220_sdma_hw_clean_up(struct qib_pportdata *ppd)
+{
+ /* This will trigger the Abort interrupt */
+ sendctrl_7220_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH |
+ QIB_SENDCTRL_AVAIL_BLIP);
+ ppd->dd->upd_pio_shadow = 1; /* update our idea of what's busy */
+}
+
+static void qib_sdma_7220_setlengen(struct qib_pportdata *ppd)
+{
+ /*
+ * Set SendDmaLenGen and clear and set
+ * the MSB of the generation count to enable generation checking
+ * and load the internal generation counter.
+ */
+ qib_write_kreg(ppd->dd, kr_senddmalengen, ppd->sdma_descq_cnt);
+ qib_write_kreg(ppd->dd, kr_senddmalengen,
+ ppd->sdma_descq_cnt |
+ (1ULL << QIB_7220_SendDmaLenGen_Generation_MSB));
+}
+
+static void qib_7220_sdma_hw_start_up(struct qib_pportdata *ppd)
+{
+ qib_sdma_7220_setlengen(ppd);
+ qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */
+ ppd->sdma_head_dma[0] = 0;
+}
+
+#define DISABLES_SDMA ( \
+ ERR_MASK(SDmaDisabledErr) | \
+ ERR_MASK(SDmaBaseErr) | \
+ ERR_MASK(SDmaTailOutOfBoundErr) | \
+ ERR_MASK(SDmaOutOfBoundErr) | \
+ ERR_MASK(SDma1stDescErr) | \
+ ERR_MASK(SDmaRpyTagErr) | \
+ ERR_MASK(SDmaGenMismatchErr) | \
+ ERR_MASK(SDmaDescAddrMisalignErr) | \
+ ERR_MASK(SDmaMissingDwErr) | \
+ ERR_MASK(SDmaDwEnErr))
+
+static void sdma_7220_errors(struct qib_pportdata *ppd, u64 errs)
+{
+ unsigned long flags;
+ struct qib_devdata *dd = ppd->dd;
+ char *msg;
+
+ errs &= QLOGIC_IB_E_SDMAERRS;
+
+ msg = dd->cspec->sdmamsgbuf;
+ qib_decode_7220_sdma_errs(ppd, errs, msg, sizeof dd->cspec->sdmamsgbuf);
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+ if (errs & ERR_MASK(SendBufMisuseErr)) {
+ unsigned long sbuf[3];
+
+ sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
+ sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
+ sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2);
+
+ qib_dev_err(ppd->dd,
+ "IB%u:%u SendBufMisuse: %04lx %016lx %016lx\n",
+ ppd->dd->unit, ppd->port, sbuf[2], sbuf[1],
+ sbuf[0]);
+ }
+
+ if (errs & ERR_MASK(SDmaUnexpDataErr))
+ qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", ppd->dd->unit,
+ ppd->port);
+
+ switch (ppd->sdma_state.current_state) {
+ case qib_sdma_state_s00_hw_down:
+ /* not expecting any interrupts */
+ break;
+
+ case qib_sdma_state_s10_hw_start_up_wait:
+ /* handled in intr path */
+ break;
+
+ case qib_sdma_state_s20_idle:
+ /* not expecting any interrupts */
+ break;
+
+ case qib_sdma_state_s30_sw_clean_up_wait:
+ /* not expecting any interrupts */
+ break;
+
+ case qib_sdma_state_s40_hw_clean_up_wait:
+ if (errs & ERR_MASK(SDmaDisabledErr))
+ __qib_sdma_process_event(ppd,
+ qib_sdma_event_e50_hw_cleaned);
+ break;
+
+ case qib_sdma_state_s50_hw_halt_wait:
+ /* handled in intr path */
+ break;
+
+ case qib_sdma_state_s99_running:
+ if (errs & DISABLES_SDMA)
+ __qib_sdma_process_event(ppd,
+ qib_sdma_event_e7220_err_halted);
+ break;
+ }
+
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+}
+
+/*
+ * Decode the error status into strings, deciding whether to always
+ * print * it or not depending on "normal packet errors" vs everything
+ * else. Return 1 if "real" errors, otherwise 0 if only packet
+ * errors, so caller can decide what to print with the string.
+ */
+static int qib_decode_7220_err(struct qib_devdata *dd, char *buf, size_t blen,
+ u64 err)
+{
+ int iserr = 1;
+
+ *buf = '\0';
+ if (err & QLOGIC_IB_E_PKTERRS) {
+ if (!(err & ~QLOGIC_IB_E_PKTERRS))
+ iserr = 0;
+ if ((err & ERR_MASK(RcvICRCErr)) &&
+ !(err & (ERR_MASK(RcvVCRCErr) | ERR_MASK(RcvEBPErr))))
+ strlcat(buf, "CRC ", blen);
+ if (!iserr)
+ goto done;
+ }
+ if (err & ERR_MASK(RcvHdrLenErr))
+ strlcat(buf, "rhdrlen ", blen);
+ if (err & ERR_MASK(RcvBadTidErr))
+ strlcat(buf, "rbadtid ", blen);
+ if (err & ERR_MASK(RcvBadVersionErr))
+ strlcat(buf, "rbadversion ", blen);
+ if (err & ERR_MASK(RcvHdrErr))
+ strlcat(buf, "rhdr ", blen);
+ if (err & ERR_MASK(SendSpecialTriggerErr))
+ strlcat(buf, "sendspecialtrigger ", blen);
+ if (err & ERR_MASK(RcvLongPktLenErr))
+ strlcat(buf, "rlongpktlen ", blen);
+ if (err & ERR_MASK(RcvMaxPktLenErr))
+ strlcat(buf, "rmaxpktlen ", blen);
+ if (err & ERR_MASK(RcvMinPktLenErr))
+ strlcat(buf, "rminpktlen ", blen);
+ if (err & ERR_MASK(SendMinPktLenErr))
+ strlcat(buf, "sminpktlen ", blen);
+ if (err & ERR_MASK(RcvFormatErr))
+ strlcat(buf, "rformaterr ", blen);
+ if (err & ERR_MASK(RcvUnsupportedVLErr))
+ strlcat(buf, "runsupvl ", blen);
+ if (err & ERR_MASK(RcvUnexpectedCharErr))
+ strlcat(buf, "runexpchar ", blen);
+ if (err & ERR_MASK(RcvIBFlowErr))
+ strlcat(buf, "ribflow ", blen);
+ if (err & ERR_MASK(SendUnderRunErr))
+ strlcat(buf, "sunderrun ", blen);
+ if (err & ERR_MASK(SendPioArmLaunchErr))
+ strlcat(buf, "spioarmlaunch ", blen);
+ if (err & ERR_MASK(SendUnexpectedPktNumErr))
+ strlcat(buf, "sunexperrpktnum ", blen);
+ if (err & ERR_MASK(SendDroppedSmpPktErr))
+ strlcat(buf, "sdroppedsmppkt ", blen);
+ if (err & ERR_MASK(SendMaxPktLenErr))
+ strlcat(buf, "smaxpktlen ", blen);
+ if (err & ERR_MASK(SendUnsupportedVLErr))
+ strlcat(buf, "sunsupVL ", blen);
+ if (err & ERR_MASK(InvalidAddrErr))
+ strlcat(buf, "invalidaddr ", blen);
+ if (err & ERR_MASK(RcvEgrFullErr))
+ strlcat(buf, "rcvegrfull ", blen);
+ if (err & ERR_MASK(RcvHdrFullErr))
+ strlcat(buf, "rcvhdrfull ", blen);
+ if (err & ERR_MASK(IBStatusChanged))
+ strlcat(buf, "ibcstatuschg ", blen);
+ if (err & ERR_MASK(RcvIBLostLinkErr))
+ strlcat(buf, "riblostlink ", blen);
+ if (err & ERR_MASK(HardwareErr))
+ strlcat(buf, "hardware ", blen);
+ if (err & ERR_MASK(ResetNegated))
+ strlcat(buf, "reset ", blen);
+ if (err & QLOGIC_IB_E_SDMAERRS)
+ qib_decode_7220_sdma_errs(dd->pport, err, buf, blen);
+ if (err & ERR_MASK(InvalidEEPCmd))
+ strlcat(buf, "invalideepromcmd ", blen);
+done:
+ return iserr;
+}
+
+static void reenable_7220_chase(unsigned long opaque)
+{
+ struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+ ppd->cpspec->chase_timer.expires = 0;
+ qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
+ QLOGIC_IB_IBCC_LINKINITCMD_POLL);
+}
+
+static void handle_7220_chase(struct qib_pportdata *ppd, u64 ibcst)
+{
+ u8 ibclt;
+ u64 tnow;
+
+ ibclt = (u8)SYM_FIELD(ibcst, IBCStatus, LinkTrainingState);
+
+ /*
+ * Detect and handle the state chase issue, where we can
+ * get stuck if we are unlucky on timing on both sides of
+ * the link. If we are, we disable, set a timer, and
+ * then re-enable.
+ */
+ switch (ibclt) {
+ case IB_7220_LT_STATE_CFGRCVFCFG:
+ case IB_7220_LT_STATE_CFGWAITRMT:
+ case IB_7220_LT_STATE_TXREVLANES:
+ case IB_7220_LT_STATE_CFGENH:
+ tnow = get_jiffies_64();
+ if (ppd->cpspec->chase_end &&
+ time_after64(tnow, ppd->cpspec->chase_end)) {
+ ppd->cpspec->chase_end = 0;
+ qib_set_ib_7220_lstate(ppd,
+ QLOGIC_IB_IBCC_LINKCMD_DOWN,
+ QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+ ppd->cpspec->chase_timer.expires = jiffies +
+ QIB_CHASE_DIS_TIME;
+ add_timer(&ppd->cpspec->chase_timer);
+ } else if (!ppd->cpspec->chase_end)
+ ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
+ break;
+
+ default:
+ ppd->cpspec->chase_end = 0;
+ break;
+ }
+}
+
+static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
+{
+ char *msg;
+ u64 ignore_this_time = 0;
+ u64 iserr = 0;
+ int log_idx;
+ struct qib_pportdata *ppd = dd->pport;
+ u64 mask;
+
+ /* don't report errors that are masked */
+ errs &= dd->cspec->errormask;
+ msg = dd->cspec->emsgbuf;
+
+ /* do these first, they are most important */
+ if (errs & ERR_MASK(HardwareErr))
+ qib_7220_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
+ else
+ for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
+ if (errs & dd->eep_st_masks[log_idx].errs_to_log)
+ qib_inc_eeprom_err(dd, log_idx, 1);
+
+ if (errs & QLOGIC_IB_E_SDMAERRS)
+ sdma_7220_errors(ppd, errs);
+
+ if (errs & ~IB_E_BITSEXTANT)
+ qib_dev_err(dd, "error interrupt with unknown errors "
+ "%llx set\n", (unsigned long long)
+ (errs & ~IB_E_BITSEXTANT));
+
+ if (errs & E_SUM_ERRS) {
+ qib_disarm_7220_senderrbufs(ppd);
+ if ((errs & E_SUM_LINK_PKTERRS) &&
+ !(ppd->lflags & QIBL_LINKACTIVE)) {
+ /*
+ * This can happen when trying to bring the link
+ * up, but the IB link changes state at the "wrong"
+ * time. The IB logic then complains that the packet
+ * isn't valid. We don't want to confuse people, so
+ * we just don't print them, except at debug
+ */
+ ignore_this_time = errs & E_SUM_LINK_PKTERRS;
+ }
+ } else if ((errs & E_SUM_LINK_PKTERRS) &&
+ !(ppd->lflags & QIBL_LINKACTIVE)) {
+ /*
+ * This can happen when SMA is trying to bring the link
+ * up, but the IB link changes state at the "wrong" time.
+ * The IB logic then complains that the packet isn't
+ * valid. We don't want to confuse people, so we just
+ * don't print them, except at debug
+ */
+ ignore_this_time = errs & E_SUM_LINK_PKTERRS;
+ }
+
+ qib_write_kreg(dd, kr_errclear, errs);
+
+ errs &= ~ignore_this_time;
+ if (!errs)
+ goto done;
+
+ /*
+ * The ones we mask off are handled specially below
+ * or above. Also mask SDMADISABLED by default as it
+ * is too chatty.
+ */
+ mask = ERR_MASK(IBStatusChanged) |
+ ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) |
+ ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr);
+
+ qib_decode_7220_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask);
+
+ if (errs & E_SUM_PKTERRS)
+ qib_stats.sps_rcverrs++;
+ if (errs & E_SUM_ERRS)
+ qib_stats.sps_txerrs++;
+ iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS |
+ ERR_MASK(SDmaDisabledErr));
+
+ if (errs & ERR_MASK(IBStatusChanged)) {
+ u64 ibcs;
+
+ ibcs = qib_read_kreg64(dd, kr_ibcstatus);
+ if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
+ handle_7220_chase(ppd, ibcs);
+
+ /* Update our picture of width and speed from chip */
+ ppd->link_width_active =
+ ((ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1) ?
+ IB_WIDTH_4X : IB_WIDTH_1X;
+ ppd->link_speed_active =
+ ((ibcs >> IBA7220_LINKSPEED_SHIFT) & 1) ?
+ QIB_IB_DDR : QIB_IB_SDR;
+
+ /*
+ * Since going into a recovery state causes the link state
+ * to go down and since recovery is transitory, it is better
+ * if we "miss" ever seeing the link training state go into
+ * recovery (i.e., ignore this transition for link state
+ * special handling purposes) without updating lastibcstat.
+ */
+ if (qib_7220_phys_portstate(ibcs) !=
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER)
+ qib_handle_e_ibstatuschanged(ppd, ibcs);
+ }
+
+ if (errs & ERR_MASK(ResetNegated)) {
+ qib_dev_err(dd, "Got reset, requires re-init "
+ "(unload and reload driver)\n");
+ dd->flags &= ~QIB_INITTED; /* needs re-init */
+ /* mark as having had error */
+ *dd->devstatusp |= QIB_STATUS_HWERROR;
+ *dd->pport->statusp &= ~QIB_STATUS_IB_CONF;
+ }
+
+ if (*msg && iserr)
+ qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
+
+ if (ppd->state_wanted & ppd->lflags)
+ wake_up_interruptible(&ppd->state_wait);
+
+ /*
+ * If there were hdrq or egrfull errors, wake up any processes
+ * waiting in poll. We used to try to check which contexts had
+ * the overflow, but given the cost of that and the chip reads
+ * to support it, it's better to just wake everybody up if we
+ * get an overflow; waiters can poll again if it's not them.
+ */
+ if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
+ qib_handle_urcv(dd, ~0U);
+ if (errs & ERR_MASK(RcvEgrFullErr))
+ qib_stats.sps_buffull++;
+ else
+ qib_stats.sps_hdrfull++;
+ }
+done:
+ return;
+}
+
+/* enable/disable chip from delivering interrupts */
+static void qib_7220_set_intr_state(struct qib_devdata *dd, u32 enable)
+{
+ if (enable) {
+ if (dd->flags & QIB_BADINTR)
+ return;
+ qib_write_kreg(dd, kr_intmask, ~0ULL);
+ /* force re-interrupt of any pending interrupts. */
+ qib_write_kreg(dd, kr_intclear, 0ULL);
+ } else
+ qib_write_kreg(dd, kr_intmask, 0ULL);
+}
+
+/*
+ * Try to cleanup as much as possible for anything that might have gone
+ * wrong while in freeze mode, such as pio buffers being written by user
+ * processes (causing armlaunch), send errors due to going into freeze mode,
+ * etc., and try to avoid causing extra interrupts while doing so.
+ * Forcibly update the in-memory pioavail register copies after cleanup
+ * because the chip won't do it while in freeze mode (the register values
+ * themselves are kept correct).
+ * Make sure that we don't lose any important interrupts by using the chip
+ * feature that says that writing 0 to a bit in *clear that is set in
+ * *status will cause an interrupt to be generated again (if allowed by
+ * the *mask value).
+ * This is in chip-specific code because of all of the register accesses,
+ * even though the details are similar on most chips.
+ */
+static void qib_7220_clear_freeze(struct qib_devdata *dd)
+{
+ /* disable error interrupts, to avoid confusion */
+ qib_write_kreg(dd, kr_errmask, 0ULL);
+
+ /* also disable interrupts; errormask is sometimes overwriten */
+ qib_7220_set_intr_state(dd, 0);
+
+ qib_cancel_sends(dd->pport);
+
+ /* clear the freeze, and be sure chip saw it */
+ qib_write_kreg(dd, kr_control, dd->control);
+ qib_read_kreg32(dd, kr_scratch);
+
+ /* force in-memory update now we are out of freeze */
+ qib_force_pio_avail_update(dd);
+
+ /*
+ * force new interrupt if any hwerr, error or interrupt bits are
+ * still set, and clear "safe" send packet errors related to freeze
+ * and cancelling sends. Re-enable error interrupts before possible
+ * force of re-interrupt on pending interrupts.
+ */
+ qib_write_kreg(dd, kr_hwerrclear, 0ULL);
+ qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
+ qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
+ qib_7220_set_intr_state(dd, 1);
+}
+
+/**
+ * qib_7220_handle_hwerrors - display hardware errors.
+ * @dd: the qlogic_ib device
+ * @msg: the output buffer
+ * @msgl: the size of the output buffer
+ *
+ * Use same msg buffer as regular errors to avoid excessive stack
+ * use. Most hardware errors are catastrophic, but for right now,
+ * we'll print them and continue. We reuse the same message buffer as
+ * handle_7220_errors() to avoid excessive stack usage.
+ */
+static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
+ size_t msgl)
+{
+ u64 hwerrs;
+ u32 bits, ctrl;
+ int isfatal = 0;
+ char *bitsmsg;
+ int log_idx;
+
+ hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
+ if (!hwerrs)
+ goto bail;
+ if (hwerrs == ~0ULL) {
+ qib_dev_err(dd, "Read of hardware error status failed "
+ "(all bits set); ignoring\n");
+ goto bail;
+ }
+ qib_stats.sps_hwerrs++;
+
+ /*
+ * Always clear the error status register, except MEMBISTFAIL,
+ * regardless of whether we continue or stop using the chip.
+ * We want that set so we know it failed, even across driver reload.
+ * We'll still ignore it in the hwerrmask. We do this partly for
+ * diagnostics, but also for support.
+ */
+ qib_write_kreg(dd, kr_hwerrclear,
+ hwerrs & ~HWE_MASK(PowerOnBISTFailed));
+
+ hwerrs &= dd->cspec->hwerrmask;
+
+ /* We log some errors to EEPROM, check if we have any of those. */
+ for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
+ if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log)
+ qib_inc_eeprom_err(dd, log_idx, 1);
+ if (hwerrs & ~(TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC |
+ RXE_PARITY))
+ qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
+ "(cleared)\n", (unsigned long long) hwerrs);
+
+ if (hwerrs & ~IB_HWE_BITSEXTANT)
+ qib_dev_err(dd, "hwerror interrupt with unknown errors "
+ "%llx set\n", (unsigned long long)
+ (hwerrs & ~IB_HWE_BITSEXTANT));
+
+ if (hwerrs & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR)
+ qib_sd7220_clr_ibpar(dd);
+
+ ctrl = qib_read_kreg32(dd, kr_control);
+ if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {
+ /*
+ * Parity errors in send memory are recoverable by h/w
+ * just do housekeeping, exit freeze mode and continue.
+ */
+ if (hwerrs & (TXEMEMPARITYERR_PIOBUF |
+ TXEMEMPARITYERR_PIOPBC)) {
+ qib_7220_txe_recover(dd);
+ hwerrs &= ~(TXEMEMPARITYERR_PIOBUF |
+ TXEMEMPARITYERR_PIOPBC);
+ }
+ if (hwerrs)
+ isfatal = 1;
+ else
+ qib_7220_clear_freeze(dd);
+ }
+
+ *msg = '\0';
+
+ if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
+ isfatal = 1;
+ strlcat(msg, "[Memory BIST test failed, "
+ "InfiniPath hardware unusable]", msgl);
+ /* ignore from now on, so disable until driver reloaded */
+ dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+ }
+
+ qib_format_hwerrors(hwerrs, qib_7220_hwerror_msgs,
+ ARRAY_SIZE(qib_7220_hwerror_msgs), msg, msgl);
+
+ bitsmsg = dd->cspec->bitsmsgbuf;
+ if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<
+ QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) {
+ bits = (u32) ((hwerrs >>
+ QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
+ QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
+ snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+ "[PCIe Mem Parity Errs %x] ", bits);
+ strlcat(msg, bitsmsg, msgl);
+ }
+
+#define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP | \
+ QLOGIC_IB_HWE_COREPLL_RFSLIP)
+
+ if (hwerrs & _QIB_PLL_FAIL) {
+ isfatal = 1;
+ snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+ "[PLL failed (%llx), InfiniPath hardware unusable]",
+ (unsigned long long) hwerrs & _QIB_PLL_FAIL);
+ strlcat(msg, bitsmsg, msgl);
+ /* ignore from now on, so disable until driver reloaded */
+ dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL);
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+ }
+
+ if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) {
+ /*
+ * If it occurs, it is left masked since the eternal
+ * interface is unused.
+ */
+ dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED;
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+ }
+
+ qib_dev_err(dd, "%s hardware error\n", msg);
+
+ if (isfatal && !dd->diag_client) {
+ qib_dev_err(dd, "Fatal Hardware Error, no longer"
+ " usable, SN %.16s\n", dd->serial);
+ /*
+ * For /sys status file and user programs to print; if no
+ * trailing brace is copied, we'll know it was truncated.
+ */
+ if (dd->freezemsg)
+ snprintf(dd->freezemsg, dd->freezelen,
+ "{%s}", msg);
+ qib_disable_after_error(dd);
+ }
+bail:;
+}
+
+/**
+ * qib_7220_init_hwerrors - enable hardware errors
+ * @dd: the qlogic_ib device
+ *
+ * now that we have finished initializing everything that might reasonably
+ * cause a hardware error, and cleared those errors bits as they occur,
+ * we can enable hardware errors in the mask (potentially enabling
+ * freeze mode), and enable hardware errors as errors (along with
+ * everything else) in errormask
+ */
+static void qib_7220_init_hwerrors(struct qib_devdata *dd)
+{
+ u64 val;
+ u64 extsval;
+
+ extsval = qib_read_kreg64(dd, kr_extstatus);
+
+ if (!(extsval & (QLOGIC_IB_EXTS_MEMBIST_ENDTEST |
+ QLOGIC_IB_EXTS_MEMBIST_DISABLED)))
+ qib_dev_err(dd, "MemBIST did not complete!\n");
+ if (extsval & QLOGIC_IB_EXTS_MEMBIST_DISABLED)
+ qib_devinfo(dd->pcidev, "MemBIST is disabled.\n");
+
+ val = ~0ULL; /* default to all hwerrors become interrupts, */
+
+ val &= ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR;
+ dd->cspec->hwerrmask = val;
+
+ qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+
+ /* clear all */
+ qib_write_kreg(dd, kr_errclear, ~0ULL);
+ /* enable errors that are masked, at least this first time. */
+ qib_write_kreg(dd, kr_errmask, ~0ULL);
+ dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
+ /* clear any interrupts up to this point (ints still not enabled) */
+ qib_write_kreg(dd, kr_intclear, ~0ULL);
+}
+
+/*
+ * Disable and enable the armlaunch error. Used for PIO bandwidth testing
+ * on chips that are count-based, rather than trigger-based. There is no
+ * reference counting, but that's also fine, given the intended use.
+ * Only chip-specific because it's all register accesses
+ */
+static void qib_set_7220_armlaunch(struct qib_devdata *dd, u32 enable)
+{
+ if (enable) {
+ qib_write_kreg(dd, kr_errclear, ERR_MASK(SendPioArmLaunchErr));
+ dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr);
+ } else
+ dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr);
+ qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
+}
+
+/*
+ * Formerly took parameter <which> in pre-shifted,
+ * pre-merged form with LinkCmd and LinkInitCmd
+ * together, and assuming the zero was NOP.
+ */
+static void qib_set_ib_7220_lstate(struct qib_pportdata *ppd, u16 linkcmd,
+ u16 linitcmd)
+{
+ u64 mod_wd;
+ struct qib_devdata *dd = ppd->dd;
+ unsigned long flags;
+
+ if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
+ /*
+ * If we are told to disable, note that so link-recovery
+ * code does not attempt to bring us back up.
+ */
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_LINK_DISABLED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
+ /*
+ * Any other linkinitcmd will lead to LINKDOWN and then
+ * to INIT (if all is well), so clear flag to let
+ * link-recovery code attempt to bring us back up.
+ */
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ }
+
+ mod_wd = (linkcmd << IBA7220_IBCC_LINKCMD_SHIFT) |
+ (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+
+ qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl | mod_wd);
+ /* write to chip to prevent back-to-back writes of ibc reg */
+ qib_write_kreg(dd, kr_scratch, 0);
+}
+
+/*
+ * All detailed interaction with the SerDes has been moved to qib_sd7220.c
+ *
+ * The portion of IBA7220-specific bringup_serdes() that actually deals with
+ * registers and memory within the SerDes itself is qib_sd7220_init().
+ */
+
+/**
+ * qib_7220_bringup_serdes - bring up the serdes
+ * @ppd: physical port on the qlogic_ib device
+ */
+static int qib_7220_bringup_serdes(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 val, prev_val, guid, ibc;
+ int ret = 0;
+
+ /* Put IBC in reset, sends disabled */
+ dd->control &= ~QLOGIC_IB_C_LINKENABLE;
+ qib_write_kreg(dd, kr_control, 0ULL);
+
+ if (qib_compat_ddr_negotiate) {
+ ppd->cpspec->ibdeltainprog = 1;
+ ppd->cpspec->ibsymsnap = read_7220_creg32(dd, cr_ibsymbolerr);
+ ppd->cpspec->iblnkerrsnap =
+ read_7220_creg32(dd, cr_iblinkerrrecov);
+ }
+
+ /* flowcontrolwatermark is in units of KBytes */
+ ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark);
+ /*
+ * How often flowctrl sent. More or less in usecs; balance against
+ * watermark value, so that in theory senders always get a flow
+ * control update in time to not let the IB link go idle.
+ */
+ ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod);
+ /* max error tolerance */
+ ibc |= 0xfULL << SYM_LSB(IBCCtrl, PhyerrThreshold);
+ /* use "real" buffer space for */
+ ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale);
+ /* IB credit flow control. */
+ ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold);
+ /*
+ * set initial max size pkt IBC will send, including ICRC; it's the
+ * PIO buffer size in dwords, less 1; also see qib_set_mtu()
+ */
+ ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen);
+ ppd->cpspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */
+
+ /* initially come up waiting for TS1, without sending anything. */
+ val = ppd->cpspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
+ QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+ qib_write_kreg(dd, kr_ibcctrl, val);
+
+ if (!ppd->cpspec->ibcddrctrl) {
+ /* not on re-init after reset */
+ ppd->cpspec->ibcddrctrl = qib_read_kreg64(dd, kr_ibcddrctrl);
+
+ if (ppd->link_speed_enabled == (QIB_IB_SDR | QIB_IB_DDR))
+ ppd->cpspec->ibcddrctrl |=
+ IBA7220_IBC_SPEED_AUTONEG_MASK |
+ IBA7220_IBC_IBTA_1_2_MASK;
+ else
+ ppd->cpspec->ibcddrctrl |=
+ ppd->link_speed_enabled == QIB_IB_DDR ?
+ IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
+ if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
+ (IB_WIDTH_1X | IB_WIDTH_4X))
+ ppd->cpspec->ibcddrctrl |= IBA7220_IBC_WIDTH_AUTONEG;
+ else
+ ppd->cpspec->ibcddrctrl |=
+ ppd->link_width_enabled == IB_WIDTH_4X ?
+ IBA7220_IBC_WIDTH_4X_ONLY :
+ IBA7220_IBC_WIDTH_1X_ONLY;
+
+ /* always enable these on driver reload, not sticky */
+ ppd->cpspec->ibcddrctrl |=
+ IBA7220_IBC_RXPOL_MASK << IBA7220_IBC_RXPOL_SHIFT;
+ ppd->cpspec->ibcddrctrl |=
+ IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
+
+ /* enable automatic lane reversal detection for receive */
+ ppd->cpspec->ibcddrctrl |= IBA7220_IBC_LANE_REV_SUPPORTED;
+ } else
+ /* write to chip to prevent back-to-back writes of ibc reg */
+ qib_write_kreg(dd, kr_scratch, 0);
+
+ qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+
+ qib_write_kreg(dd, kr_ncmodectrl, 0Ull);
+ qib_write_kreg(dd, kr_scratch, 0);
+
+ ret = qib_sd7220_init(dd);
+
+ val = qib_read_kreg64(dd, kr_xgxs_cfg);
+ prev_val = val;
+ val |= QLOGIC_IB_XGXS_FC_SAFE;
+ if (val != prev_val) {
+ qib_write_kreg(dd, kr_xgxs_cfg, val);
+ qib_read_kreg32(dd, kr_scratch);
+ }
+ if (val & QLOGIC_IB_XGXS_RESET)
+ val &= ~QLOGIC_IB_XGXS_RESET;
+ if (val != prev_val)
+ qib_write_kreg(dd, kr_xgxs_cfg, val);
+
+ /* first time through, set port guid */
+ if (!ppd->guid)
+ ppd->guid = dd->base_guid;
+ guid = be64_to_cpu(ppd->guid);
+
+ qib_write_kreg(dd, kr_hrtbt_guid, guid);
+ if (!ret) {
+ dd->control |= QLOGIC_IB_C_LINKENABLE;
+ qib_write_kreg(dd, kr_control, dd->control);
+ } else
+ /* write to chip to prevent back-to-back writes of ibc reg */
+ qib_write_kreg(dd, kr_scratch, 0);
+ return ret;
+}
+
+/**
+ * qib_7220_quiet_serdes - set serdes to txidle
+ * @ppd: physical port of the qlogic_ib device
+ * Called when driver is being unloaded
+ */
+static void qib_7220_quiet_serdes(struct qib_pportdata *ppd)
+{
+ u64 val;
+ struct qib_devdata *dd = ppd->dd;
+ unsigned long flags;
+
+ /* disable IBC */
+ dd->control &= ~QLOGIC_IB_C_LINKENABLE;
+ qib_write_kreg(dd, kr_control,
+ dd->control | QLOGIC_IB_C_FREEZEMODE);
+
+ ppd->cpspec->chase_end = 0;
+ if (ppd->cpspec->chase_timer.data) /* if initted */
+ del_timer_sync(&ppd->cpspec->chase_timer);
+
+ if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
+ ppd->cpspec->ibdeltainprog) {
+ u64 diagc;
+
+ /* enable counter writes */
+ diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
+ qib_write_kreg(dd, kr_hwdiagctrl,
+ diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
+
+ if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
+ val = read_7220_creg32(dd, cr_ibsymbolerr);
+ if (ppd->cpspec->ibdeltainprog)
+ val -= val - ppd->cpspec->ibsymsnap;
+ val -= ppd->cpspec->ibsymdelta;
+ write_7220_creg(dd, cr_ibsymbolerr, val);
+ }
+ if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
+ val = read_7220_creg32(dd, cr_iblinkerrrecov);
+ if (ppd->cpspec->ibdeltainprog)
+ val -= val - ppd->cpspec->iblnkerrsnap;
+ val -= ppd->cpspec->iblnkerrdelta;
+ write_7220_creg(dd, cr_iblinkerrrecov, val);
+ }
+
+ /* and disable counter writes */
+ qib_write_kreg(dd, kr_hwdiagctrl, diagc);
+ }
+ qib_set_ib_7220_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ wake_up(&ppd->cpspec->autoneg_wait);
+ cancel_delayed_work(&ppd->cpspec->autoneg_work);
+ flush_scheduled_work();
+
+ shutdown_7220_relock_poll(ppd->dd);
+ val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg);
+ val |= QLOGIC_IB_XGXS_RESET;
+ qib_write_kreg(ppd->dd, kr_xgxs_cfg, val);
+}
+
+/**
+ * qib_setup_7220_setextled - set the state of the two external LEDs
+ * @dd: the qlogic_ib device
+ * @on: whether the link is up or not
+ *
+ * The exact combo of LEDs if on is true is determined by looking
+ * at the ibcstatus.
+ *
+ * These LEDs indicate the physical and logical state of IB link.
+ * For this chip (at least with recommended board pinouts), LED1
+ * is Yellow (logical state) and LED2 is Green (physical state),
+ *
+ * Note: We try to match the Mellanox HCA LED behavior as best
+ * we can. Green indicates physical link state is OK (something is
+ * plugged in, and we can train).
+ * Amber indicates the link is logically up (ACTIVE).
+ * Mellanox further blinks the amber LED to indicate data packet
+ * activity, but we have no hardware support for that, so it would
+ * require waking up every 10-20 msecs and checking the counters
+ * on the chip, and then turning the LED off if appropriate. That's
+ * visible overhead, so not something we will do.
+ *
+ */
+static void qib_setup_7220_setextled(struct qib_pportdata *ppd, u32 on)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 extctl, ledblink = 0, val, lst, ltst;
+ unsigned long flags;
+
+ /*
+ * The diags use the LED to indicate diag info, so we leave
+ * the external LED alone when the diags are running.
+ */
+ if (dd->diag_client)
+ return;
+
+ if (ppd->led_override) {
+ ltst = (ppd->led_override & QIB_LED_PHYS) ?
+ IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED,
+ lst = (ppd->led_override & QIB_LED_LOG) ?
+ IB_PORT_ACTIVE : IB_PORT_DOWN;
+ } else if (on) {
+ val = qib_read_kreg64(dd, kr_ibcstatus);
+ ltst = qib_7220_phys_portstate(val);
+ lst = qib_7220_iblink_state(val);
+ } else {
+ ltst = 0;
+ lst = 0;
+ }
+
+ spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+ extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) |
+ SYM_MASK(EXTCtrl, LEDPriPortYellowOn));
+ if (ltst == IB_PHYSPORTSTATE_LINKUP) {
+ extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn);
+ /*
+ * counts are in chip clock (4ns) periods.
+ * This is 1/16 sec (66.6ms) on,
+ * 3/16 sec (187.5 ms) off, with packets rcvd
+ */
+ ledblink = ((66600 * 1000UL / 4) << IBA7220_LEDBLINK_ON_SHIFT)
+ | ((187500 * 1000UL / 4) << IBA7220_LEDBLINK_OFF_SHIFT);
+ }
+ if (lst == IB_PORT_ACTIVE)
+ extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn);
+ dd->cspec->extctrl = extctl;
+ qib_write_kreg(dd, kr_extctrl, extctl);
+ spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+
+ if (ledblink) /* blink the LED on packet receive */
+ qib_write_kreg(dd, kr_rcvpktledcnt, ledblink);
+}
+
+static void qib_7220_free_irq(struct qib_devdata *dd)
+{
+ if (dd->cspec->irq) {
+ free_irq(dd->cspec->irq, dd);
+ dd->cspec->irq = 0;
+ }
+ qib_nomsi(dd);
+}
+
+/*
+ * qib_setup_7220_cleanup - clean up any per-chip chip-specific stuff
+ * @dd: the qlogic_ib device
+ *
+ * This is called during driver unload.
+ *
+ */
+static void qib_setup_7220_cleanup(struct qib_devdata *dd)
+{
+ qib_7220_free_irq(dd);
+ kfree(dd->cspec->cntrs);
+ kfree(dd->cspec->portcntrs);
+}
+
+/*
+ * This is only called for SDmaInt.
+ * SDmaDisabled is handled on the error path.
+ */
+static void sdma_7220_intr(struct qib_pportdata *ppd, u64 istat)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+ switch (ppd->sdma_state.current_state) {
+ case qib_sdma_state_s00_hw_down:
+ break;
+
+ case qib_sdma_state_s10_hw_start_up_wait:
+ __qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
+ break;
+
+ case qib_sdma_state_s20_idle:
+ break;
+
+ case qib_sdma_state_s30_sw_clean_up_wait:
+ break;
+
+ case qib_sdma_state_s40_hw_clean_up_wait:
+ break;
+
+ case qib_sdma_state_s50_hw_halt_wait:
+ __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
+ break;
+
+ case qib_sdma_state_s99_running:
+ /* too chatty to print here */
+ __qib_sdma_intr(ppd);
+ break;
+ }
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+}
+
+static void qib_wantpiobuf_7220_intr(struct qib_devdata *dd, u32 needint)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ if (needint) {
+ if (!(dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
+ goto done;
+ /*
+ * blip the availupd off, next write will be on, so
+ * we ensure an avail update, regardless of threshold or
+ * buffers becoming free, whenever we want an interrupt
+ */
+ qib_write_kreg(dd, kr_sendctrl, dd->sendctrl &
+ ~SYM_MASK(SendCtrl, SendBufAvailUpd));
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
+ } else
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
+ qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+done:
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+}
+
+/*
+ * Handle errors and unusual events first, separate function
+ * to improve cache hits for fast path interrupt handling.
+ */
+static noinline void unlikely_7220_intr(struct qib_devdata *dd, u64 istat)
+{
+ if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT))
+ qib_dev_err(dd,
+ "interrupt with unknown interrupts %Lx set\n",
+ istat & ~QLOGIC_IB_I_BITSEXTANT);
+
+ if (istat & QLOGIC_IB_I_GPIO) {
+ u32 gpiostatus;
+
+ /*
+ * Boards for this chip currently don't use GPIO interrupts,
+ * so clear by writing GPIOstatus to GPIOclear, and complain
+ * to alert developer. To avoid endless repeats, clear
+ * the bits in the mask, since there is some kind of
+ * programming error or chip problem.
+ */
+ gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
+ /*
+ * In theory, writing GPIOstatus to GPIOclear could
+ * have a bad side-effect on some diagnostic that wanted
+ * to poll for a status-change, but the various shadows
+ * make that problematic at best. Diags will just suppress
+ * all GPIO interrupts during such tests.
+ */
+ qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
+
+ if (gpiostatus) {
+ const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
+ u32 gpio_irq = mask & gpiostatus;
+
+ /*
+ * A bit set in status and (chip) Mask register
+ * would cause an interrupt. Since we are not
+ * expecting any, report it. Also check that the
+ * chip reflects our shadow, report issues,
+ * and refresh from the shadow.
+ */
+ /*
+ * Clear any troublemakers, and update chip
+ * from shadow
+ */
+ dd->cspec->gpio_mask &= ~gpio_irq;
+ qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
+ }
+ }
+
+ if (istat & QLOGIC_IB_I_ERROR) {
+ u64 estat;
+
+ qib_stats.sps_errints++;
+ estat = qib_read_kreg64(dd, kr_errstatus);
+ if (!estat)
+ qib_devinfo(dd->pcidev, "error interrupt (%Lx), "
+ "but no error bits set!\n", istat);
+ else
+ handle_7220_errors(dd, estat);
+ }
+}
+
+static irqreturn_t qib_7220intr(int irq, void *data)
+{
+ struct qib_devdata *dd = data;
+ irqreturn_t ret;
+ u64 istat;
+ u64 ctxtrbits;
+ u64 rmask;
+ unsigned i;
+
+ if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
+ /*
+ * This return value is not great, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ ret = IRQ_HANDLED;
+ goto bail;
+ }
+
+ istat = qib_read_kreg64(dd, kr_intstatus);
+
+ if (unlikely(!istat)) {
+ ret = IRQ_NONE; /* not our interrupt, or already handled */
+ goto bail;
+ }
+ if (unlikely(istat == -1)) {
+ qib_bad_intrstatus(dd);
+ /* don't know if it was our interrupt or not */
+ ret = IRQ_NONE;
+ goto bail;
+ }
+
+ qib_stats.sps_ints++;
+ if (dd->int_counter != (u32) -1)
+ dd->int_counter++;
+
+ if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT |
+ QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR)))
+ unlikely_7220_intr(dd, istat);
+
+ /*
+ * Clear the interrupt bits we found set, relatively early, so we
+ * "know" know the chip will have seen this by the time we process
+ * the queue, and will re-interrupt if necessary. The processor
+ * itself won't take the interrupt again until we return.
+ */
+ qib_write_kreg(dd, kr_intclear, istat);
+
+ /*
+ * Handle kernel receive queues before checking for pio buffers
+ * available since receives can overflow; piobuf waiters can afford
+ * a few extra cycles, since they were waiting anyway.
+ */
+ ctxtrbits = istat &
+ ((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
+ (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT));
+ if (ctxtrbits) {
+ rmask = (1ULL << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
+ (1ULL << QLOGIC_IB_I_RCVURG_SHIFT);
+ for (i = 0; i < dd->first_user_ctxt; i++) {
+ if (ctxtrbits & rmask) {
+ ctxtrbits &= ~rmask;
+ qib_kreceive(dd->rcd[i], NULL, NULL);
+ }
+ rmask <<= 1;
+ }
+ if (ctxtrbits) {
+ ctxtrbits =
+ (ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) |
+ (ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT);
+ qib_handle_urcv(dd, ctxtrbits);
+ }
+ }
+
+ /* only call for SDmaInt */
+ if (istat & QLOGIC_IB_I_SDMAINT)
+ sdma_7220_intr(dd->pport, istat);
+
+ if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
+ qib_ib_piobufavail(dd);
+
+ ret = IRQ_HANDLED;
+bail:
+ return ret;
+}
+
+/*
+ * Set up our chip-specific interrupt handler.
+ * The interrupt type has already been setup, so
+ * we just need to do the registration and error checking.
+ * If we are using MSI interrupts, we may fall back to
+ * INTx later, if the interrupt handler doesn't get called
+ * within 1/2 second (see verify_interrupt()).
+ */
+static void qib_setup_7220_interrupt(struct qib_devdata *dd)
+{
+ if (!dd->cspec->irq)
+ qib_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
+ "work\n");
+ else {
+ int ret = request_irq(dd->cspec->irq, qib_7220intr,
+ dd->msi_lo ? 0 : IRQF_SHARED,
+ QIB_DRV_NAME, dd);
+
+ if (ret)
+ qib_dev_err(dd, "Couldn't setup %s interrupt "
+ "(irq=%d): %d\n", dd->msi_lo ?
+ "MSI" : "INTx", dd->cspec->irq, ret);
+ }
+}
+
+/**
+ * qib_7220_boardname - fill in the board name
+ * @dd: the qlogic_ib device
+ *
+ * info is based on the board revision register
+ */
+static void qib_7220_boardname(struct qib_devdata *dd)
+{
+ char *n;
+ u32 boardid, namelen;
+
+ boardid = SYM_FIELD(dd->revision, Revision,
+ BoardID);
+
+ switch (boardid) {
+ case 1:
+ n = "InfiniPath_QLE7240";
+ break;
+ case 2:
+ n = "InfiniPath_QLE7280";
+ break;
+ default:
+ qib_dev_err(dd, "Unknown 7220 board with ID %u\n", boardid);
+ n = "Unknown_InfiniPath_7220";
+ break;
+ }
+
+ namelen = strlen(n) + 1;
+ dd->boardname = kmalloc(namelen, GFP_KERNEL);
+ if (!dd->boardname)
+ qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
+ else
+ snprintf(dd->boardname, namelen, "%s", n);
+
+ if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2)
+ qib_dev_err(dd, "Unsupported InfiniPath hardware "
+ "revision %u.%u!\n",
+ dd->majrev, dd->minrev);
+
+ snprintf(dd->boardversion, sizeof(dd->boardversion),
+ "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
+ QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
+ (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
+ dd->majrev, dd->minrev,
+ (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
+}
+
+/*
+ * This routine sleeps, so it can only be called from user context, not
+ * from interrupt context.
+ */
+static int qib_setup_7220_reset(struct qib_devdata *dd)
+{
+ u64 val;
+ int i;
+ int ret;
+ u16 cmdval;
+ u8 int_line, clinesz;
+ unsigned long flags;
+
+ qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
+
+ /* Use dev_err so it shows up in logs, etc. */
+ qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
+
+ /* no interrupts till re-initted */
+ qib_7220_set_intr_state(dd, 0);
+
+ dd->pport->cpspec->ibdeltainprog = 0;
+ dd->pport->cpspec->ibsymdelta = 0;
+ dd->pport->cpspec->iblnkerrdelta = 0;
+
+ /*
+ * Keep chip from being accessed until we are ready. Use
+ * writeq() directly, to allow the write even though QIB_PRESENT
+ * isnt' set.
+ */
+ dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
+ dd->int_counter = 0; /* so we check interrupts work again */
+ val = dd->control | QLOGIC_IB_C_RESET;
+ writeq(val, &dd->kregbase[kr_control]);
+ mb(); /* prevent compiler reordering around actual reset */
+
+ for (i = 1; i <= 5; i++) {
+ /*
+ * Allow MBIST, etc. to complete; longer on each retry.
+ * We sometimes get machine checks from bus timeout if no
+ * response, so for now, make it *really* long.
+ */
+ msleep(1000 + (1 + i) * 2000);
+
+ qib_pcie_reenable(dd, cmdval, int_line, clinesz);
+
+ /*
+ * Use readq directly, so we don't need to mark it as PRESENT
+ * until we get a successful indication that all is well.
+ */
+ val = readq(&dd->kregbase[kr_revision]);
+ if (val == dd->revision) {
+ dd->flags |= QIB_PRESENT; /* it's back */
+ ret = qib_reinit_intr(dd);
+ goto bail;
+ }
+ }
+ ret = 0; /* failed */
+
+bail:
+ if (ret) {
+ if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL))
+ qib_dev_err(dd, "Reset failed to setup PCIe or "
+ "interrupts; continuing anyway\n");
+
+ /* hold IBC in reset, no sends, etc till later */
+ qib_write_kreg(dd, kr_control, 0ULL);
+
+ /* clear the reset error, init error/hwerror mask */
+ qib_7220_init_hwerrors(dd);
+
+ /* do setup similar to speed or link-width changes */
+ if (dd->pport->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK)
+ dd->cspec->presets_needed = 1;
+ spin_lock_irqsave(&dd->pport->lflags_lock, flags);
+ dd->pport->lflags |= QIBL_IB_FORCE_NOTIFY;
+ dd->pport->lflags &= ~QIBL_IB_AUTONEG_FAILED;
+ spin_unlock_irqrestore(&dd->pport->lflags_lock, flags);
+ }
+
+ return ret;
+}
+
+/**
+ * qib_7220_put_tid - write a TID to the chip
+ * @dd: the qlogic_ib device
+ * @tidptr: pointer to the expected TID (in chip) to update
+ * @tidtype: 0 for eager, 1 for expected
+ * @pa: physical address of in memory buffer; tidinvalid if freeing
+ */
+static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
+ u32 type, unsigned long pa)
+{
+ if (pa != dd->tidinvalid) {
+ u64 chippa = pa >> IBA7220_TID_PA_SHIFT;
+
+ /* paranoia checks */
+ if (pa != (chippa << IBA7220_TID_PA_SHIFT)) {
+ qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
+ pa);
+ return;
+ }
+ if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) {
+ qib_dev_err(dd, "Physical page address 0x%lx "
+ "larger than supported\n", pa);
+ return;
+ }
+
+ if (type == RCVHQ_RCV_TYPE_EAGER)
+ chippa |= dd->tidtemplate;
+ else /* for now, always full 4KB page */
+ chippa |= IBA7220_TID_SZ_4K;
+ pa = chippa;
+ }
+ writeq(pa, tidptr);
+ mmiowb();
+}
+
+/**
+ * qib_7220_clear_tids - clear all TID entries for a ctxt, expected and eager
+ * @dd: the qlogic_ib device
+ * @ctxt: the ctxt
+ *
+ * clear all TID entries for a ctxt, expected and eager.
+ * Used from qib_close(). On this chip, TIDs are only 32 bits,
+ * not 64, but they are still on 64 bit boundaries, so tidbase
+ * is declared as u64 * for the pointer math, even though we write 32 bits
+ */
+static void qib_7220_clear_tids(struct qib_devdata *dd,
+ struct qib_ctxtdata *rcd)
+{
+ u64 __iomem *tidbase;
+ unsigned long tidinv;
+ u32 ctxt;
+ int i;
+
+ if (!dd->kregbase || !rcd)
+ return;
+
+ ctxt = rcd->ctxt;
+
+ tidinv = dd->tidinvalid;
+ tidbase = (u64 __iomem *)
+ ((char __iomem *)(dd->kregbase) +
+ dd->rcvtidbase +
+ ctxt * dd->rcvtidcnt * sizeof(*tidbase));
+
+ for (i = 0; i < dd->rcvtidcnt; i++)
+ qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
+ tidinv);
+
+ tidbase = (u64 __iomem *)
+ ((char __iomem *)(dd->kregbase) +
+ dd->rcvegrbase +
+ rcd->rcvegr_tid_base * sizeof(*tidbase));
+
+ for (i = 0; i < rcd->rcvegrcnt; i++)
+ qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
+ tidinv);
+}
+
+/**
+ * qib_7220_tidtemplate - setup constants for TID updates
+ * @dd: the qlogic_ib device
+ *
+ * We setup stuff that we use a lot, to avoid calculating each time
+ */
+static void qib_7220_tidtemplate(struct qib_devdata *dd)
+{
+ if (dd->rcvegrbufsize == 2048)
+ dd->tidtemplate = IBA7220_TID_SZ_2K;
+ else if (dd->rcvegrbufsize == 4096)
+ dd->tidtemplate = IBA7220_TID_SZ_4K;
+ dd->tidinvalid = 0;
+}
+
+/**
+ * qib_init_7220_get_base_info - set chip-specific flags for user code
+ * @rcd: the qlogic_ib ctxt
+ * @kbase: qib_base_info pointer
+ *
+ * We set the PCIE flag because the lower bandwidth on PCIe vs
+ * HyperTransport can affect some user packet algorithims.
+ */
+static int qib_7220_get_base_info(struct qib_ctxtdata *rcd,
+ struct qib_base_info *kinfo)
+{
+ kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE |
+ QIB_RUNTIME_NODMA_RTAIL | QIB_RUNTIME_SDMA;
+
+ if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
+ kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
+
+ return 0;
+}
+
+static struct qib_message_header *
+qib_7220_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
+{
+ u32 offset = qib_hdrget_offset(rhf_addr);
+
+ return (struct qib_message_header *)
+ (rhf_addr - dd->rhf_offset + offset);
+}
+
+static void qib_7220_config_ctxts(struct qib_devdata *dd)
+{
+ unsigned long flags;
+ u32 nchipctxts;
+
+ nchipctxts = qib_read_kreg32(dd, kr_portcnt);
+ dd->cspec->numctxts = nchipctxts;
+ if (qib_n_krcv_queues > 1) {
+ dd->qpn_mask = 0x3f;
+ dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
+ if (dd->first_user_ctxt > nchipctxts)
+ dd->first_user_ctxt = nchipctxts;
+ } else
+ dd->first_user_ctxt = dd->num_pports;
+ dd->n_krcv_queues = dd->first_user_ctxt;
+
+ if (!qib_cfgctxts) {
+ int nctxts = dd->first_user_ctxt + num_online_cpus();
+
+ if (nctxts <= 5)
+ dd->ctxtcnt = 5;
+ else if (nctxts <= 9)
+ dd->ctxtcnt = 9;
+ else if (nctxts <= nchipctxts)
+ dd->ctxtcnt = nchipctxts;
+ } else if (qib_cfgctxts <= nchipctxts)
+ dd->ctxtcnt = qib_cfgctxts;
+ if (!dd->ctxtcnt) /* none of the above, set to max */
+ dd->ctxtcnt = nchipctxts;
+
+ /*
+ * Chip can be configured for 5, 9, or 17 ctxts, and choice
+ * affects number of eager TIDs per ctxt (1K, 2K, 4K).
+ * Lock to be paranoid about later motion, etc.
+ */
+ spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
+ if (dd->ctxtcnt > 9)
+ dd->rcvctrl |= 2ULL << IBA7220_R_CTXTCFG_SHIFT;
+ else if (dd->ctxtcnt > 5)
+ dd->rcvctrl |= 1ULL << IBA7220_R_CTXTCFG_SHIFT;
+ /* else configure for default 5 receive ctxts */
+ if (dd->qpn_mask)
+ dd->rcvctrl |= 1ULL << QIB_7220_RcvCtrl_RcvQPMapEnable_LSB;
+ qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
+ spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+
+ /* kr_rcvegrcnt changes based on the number of contexts enabled */
+ dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
+ dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, IBA7220_KRCVEGRCNT);
+}
+
+static int qib_7220_get_ib_cfg(struct qib_pportdata *ppd, int which)
+{
+ int lsb, ret = 0;
+ u64 maskr; /* right-justified mask */
+
+ switch (which) {
+ case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
+ ret = ppd->link_width_enabled;
+ goto done;
+
+ case QIB_IB_CFG_LWID: /* Get currently active Link-width */
+ ret = ppd->link_width_active;
+ goto done;
+
+ case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
+ ret = ppd->link_speed_enabled;
+ goto done;
+
+ case QIB_IB_CFG_SPD: /* Get current Link spd */
+ ret = ppd->link_speed_active;
+ goto done;
+
+ case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
+ lsb = IBA7220_IBC_RXPOL_SHIFT;
+ maskr = IBA7220_IBC_RXPOL_MASK;
+ break;
+
+ case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
+ lsb = IBA7220_IBC_LREV_SHIFT;
+ maskr = IBA7220_IBC_LREV_MASK;
+ break;
+
+ case QIB_IB_CFG_LINKLATENCY:
+ ret = qib_read_kreg64(ppd->dd, kr_ibcddrstatus)
+ & IBA7220_DDRSTAT_LINKLAT_MASK;
+ goto done;
+
+ case QIB_IB_CFG_OP_VLS:
+ ret = ppd->vls_operational;
+ goto done;
+
+ case QIB_IB_CFG_VL_HIGH_CAP:
+ ret = 0;
+ goto done;
+
+ case QIB_IB_CFG_VL_LOW_CAP:
+ ret = 0;
+ goto done;
+
+ case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
+ ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
+ OverrunThreshold);
+ goto done;
+
+ case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
+ ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
+ PhyerrThreshold);
+ goto done;
+
+ case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
+ /* will only take effect when the link state changes */
+ ret = (ppd->cpspec->ibcctrl &
+ SYM_MASK(IBCCtrl, LinkDownDefaultState)) ?
+ IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
+ goto done;
+
+ case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
+ lsb = IBA7220_IBC_HRTBT_SHIFT;
+ maskr = IBA7220_IBC_HRTBT_MASK;
+ break;
+
+ case QIB_IB_CFG_PMA_TICKS:
+ /*
+ * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
+ * Since the clock is always 250MHz, the value is 1 or 0.
+ */
+ ret = (ppd->link_speed_active == QIB_IB_DDR);
+ goto done;
+
+ default:
+ ret = -EINVAL;
+ goto done;
+ }
+ ret = (int)((ppd->cpspec->ibcddrctrl >> lsb) & maskr);
+done:
+ return ret;
+}
+
+static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 maskr; /* right-justified mask */
+ int lsb, ret = 0, setforce = 0;
+ u16 lcmd, licmd;
+ unsigned long flags;
+
+ switch (which) {
+ case QIB_IB_CFG_LIDLMC:
+ /*
+ * Set LID and LMC. Combined to avoid possible hazard
+ * caller puts LMC in 16MSbits, DLID in 16LSbits of val
+ */
+ lsb = IBA7220_IBC_DLIDLMC_SHIFT;
+ maskr = IBA7220_IBC_DLIDLMC_MASK;
+ break;
+
+ case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
+ /*
+ * As with speed, only write the actual register if
+ * the link is currently down, otherwise takes effect
+ * on next link change.
+ */
+ ppd->link_width_enabled = val;
+ if (!(ppd->lflags & QIBL_LINKDOWN))
+ goto bail;
+ /*
+ * We set the QIBL_IB_FORCE_NOTIFY bit so updown
+ * will get called because we want update
+ * link_width_active, and the change may not take
+ * effect for some time (if we are in POLL), so this
+ * flag will force the updown routine to be called
+ * on the next ibstatuschange down interrupt, even
+ * if it's not an down->up transition.
+ */
+ val--; /* convert from IB to chip */
+ maskr = IBA7220_IBC_WIDTH_MASK;
+ lsb = IBA7220_IBC_WIDTH_SHIFT;
+ setforce = 1;
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ break;
+
+ case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
+ /*
+ * If we turn off IB1.2, need to preset SerDes defaults,
+ * but not right now. Set a flag for the next time
+ * we command the link down. As with width, only write the
+ * actual register if the link is currently down, otherwise
+ * takes effect on next link change. Since setting is being
+ * explictly requested (via MAD or sysfs), clear autoneg
+ * failure status if speed autoneg is enabled.
+ */
+ ppd->link_speed_enabled = val;
+ if ((ppd->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK) &&
+ !(val & (val - 1)))
+ dd->cspec->presets_needed = 1;
+ if (!(ppd->lflags & QIBL_LINKDOWN))
+ goto bail;
+ /*
+ * We set the QIBL_IB_FORCE_NOTIFY bit so updown
+ * will get called because we want update
+ * link_speed_active, and the change may not take
+ * effect for some time (if we are in POLL), so this
+ * flag will force the updown routine to be called
+ * on the next ibstatuschange down interrupt, even
+ * if it's not an down->up transition.
+ */
+ if (val == (QIB_IB_SDR | QIB_IB_DDR)) {
+ val = IBA7220_IBC_SPEED_AUTONEG_MASK |
+ IBA7220_IBC_IBTA_1_2_MASK;
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ } else
+ val = val == QIB_IB_DDR ?
+ IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
+ maskr = IBA7220_IBC_SPEED_AUTONEG_MASK |
+ IBA7220_IBC_IBTA_1_2_MASK;
+ /* IBTA 1.2 mode + speed bits are contiguous */
+ lsb = SYM_LSB(IBCDDRCtrl, IB_ENHANCED_MODE);
+ setforce = 1;
+ break;
+
+ case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
+ lsb = IBA7220_IBC_RXPOL_SHIFT;
+ maskr = IBA7220_IBC_RXPOL_MASK;
+ break;
+
+ case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
+ lsb = IBA7220_IBC_LREV_SHIFT;
+ maskr = IBA7220_IBC_LREV_MASK;
+ break;
+
+ case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
+ maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
+ OverrunThreshold);
+ if (maskr != val) {
+ ppd->cpspec->ibcctrl &=
+ ~SYM_MASK(IBCCtrl, OverrunThreshold);
+ ppd->cpspec->ibcctrl |= (u64) val <<
+ SYM_LSB(IBCCtrl, OverrunThreshold);
+ qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+ goto bail;
+
+ case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
+ maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
+ PhyerrThreshold);
+ if (maskr != val) {
+ ppd->cpspec->ibcctrl &=
+ ~SYM_MASK(IBCCtrl, PhyerrThreshold);
+ ppd->cpspec->ibcctrl |= (u64) val <<
+ SYM_LSB(IBCCtrl, PhyerrThreshold);
+ qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+ goto bail;
+
+ case QIB_IB_CFG_PKEYS: /* update pkeys */
+ maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
+ ((u64) ppd->pkeys[2] << 32) |
+ ((u64) ppd->pkeys[3] << 48);
+ qib_write_kreg(dd, kr_partitionkey, maskr);
+ goto bail;
+
+ case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
+ /* will only take effect when the link state changes */
+ if (val == IB_LINKINITCMD_POLL)
+ ppd->cpspec->ibcctrl &=
+ ~SYM_MASK(IBCCtrl, LinkDownDefaultState);
+ else /* SLEEP */
+ ppd->cpspec->ibcctrl |=
+ SYM_MASK(IBCCtrl, LinkDownDefaultState);
+ qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ goto bail;
+
+ case QIB_IB_CFG_MTU: /* update the MTU in IBC */
+ /*
+ * Update our housekeeping variables, and set IBC max
+ * size, same as init code; max IBC is max we allow in
+ * buffer, less the qword pbc, plus 1 for ICRC, in dwords
+ * Set even if it's unchanged, print debug message only
+ * on changes.
+ */
+ val = (ppd->ibmaxlen >> 2) + 1;
+ ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen);
+ ppd->cpspec->ibcctrl |= (u64)val << SYM_LSB(IBCCtrl, MaxPktLen);
+ qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ goto bail;
+
+ case QIB_IB_CFG_LSTATE: /* set the IB link state */
+ switch (val & 0xffff0000) {
+ case IB_LINKCMD_DOWN:
+ lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
+ if (!ppd->cpspec->ibdeltainprog &&
+ qib_compat_ddr_negotiate) {
+ ppd->cpspec->ibdeltainprog = 1;
+ ppd->cpspec->ibsymsnap =
+ read_7220_creg32(dd, cr_ibsymbolerr);
+ ppd->cpspec->iblnkerrsnap =
+ read_7220_creg32(dd, cr_iblinkerrrecov);
+ }
+ break;
+
+ case IB_LINKCMD_ARMED:
+ lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
+ break;
+
+ case IB_LINKCMD_ACTIVE:
+ lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
+ break;
+
+ default:
+ ret = -EINVAL;
+ qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
+ goto bail;
+ }
+ switch (val & 0xffff) {
+ case IB_LINKINITCMD_NOP:
+ licmd = 0;
+ break;
+
+ case IB_LINKINITCMD_POLL:
+ licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
+ break;
+
+ case IB_LINKINITCMD_SLEEP:
+ licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
+ break;
+
+ case IB_LINKINITCMD_DISABLE:
+ licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
+ ppd->cpspec->chase_end = 0;
+ /*
+ * stop state chase counter and timer, if running.
+ * wait forpending timer, but don't clear .data (ppd)!
+ */
+ if (ppd->cpspec->chase_timer.expires) {
+ del_timer_sync(&ppd->cpspec->chase_timer);
+ ppd->cpspec->chase_timer.expires = 0;
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
+ val & 0xffff);
+ goto bail;
+ }
+ qib_set_ib_7220_lstate(ppd, lcmd, licmd);
+ goto bail;
+
+ case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
+ if (val > IBA7220_IBC_HRTBT_MASK) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ lsb = IBA7220_IBC_HRTBT_SHIFT;
+ maskr = IBA7220_IBC_HRTBT_MASK;
+ break;
+
+ default:
+ ret = -EINVAL;
+ goto bail;
+ }
+ ppd->cpspec->ibcddrctrl &= ~(maskr << lsb);
+ ppd->cpspec->ibcddrctrl |= (((u64) val & maskr) << lsb);
+ qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ if (setforce) {
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ }
+bail:
+ return ret;
+}
+
+static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what)
+{
+ int ret = 0;
+ u64 val, ddr;
+
+ if (!strncmp(what, "ibc", 3)) {
+ ppd->cpspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
+ val = 0; /* disable heart beat, so link will come up */
+ qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
+ ppd->dd->unit, ppd->port);
+ } else if (!strncmp(what, "off", 3)) {
+ ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
+ /* enable heart beat again */
+ val = IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
+ qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
+ "(normal)\n", ppd->dd->unit, ppd->port);
+ } else
+ ret = -EINVAL;
+ if (!ret) {
+ qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
+ ddr = ppd->cpspec->ibcddrctrl & ~(IBA7220_IBC_HRTBT_MASK
+ << IBA7220_IBC_HRTBT_SHIFT);
+ ppd->cpspec->ibcddrctrl = ddr | val;
+ qib_write_kreg(ppd->dd, kr_ibcddrctrl,
+ ppd->cpspec->ibcddrctrl);
+ qib_write_kreg(ppd->dd, kr_scratch, 0);
+ }
+ return ret;
+}
+
+static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
+ u32 updegr, u32 egrhd)
+{
+ qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ if (updegr)
+ qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
+}
+
+static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd)
+{
+ u32 head, tail;
+
+ head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
+ if (rcd->rcvhdrtail_kvaddr)
+ tail = qib_get_rcvhdrtail(rcd);
+ else
+ tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
+ return head == tail;
+}
+
+/*
+ * Modify the RCVCTRL register in chip-specific way. This
+ * is a function because bit positions and (future) register
+ * location is chip-specifc, but the needed operations are
+ * generic. <op> is a bit-mask because we often want to
+ * do multiple modifications.
+ */
+static void rcvctrl_7220_mod(struct qib_pportdata *ppd, unsigned int op,
+ int ctxt)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 mask, val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
+ if (op & QIB_RCVCTRL_TAILUPD_ENB)
+ dd->rcvctrl |= (1ULL << IBA7220_R_TAILUPD_SHIFT);
+ if (op & QIB_RCVCTRL_TAILUPD_DIS)
+ dd->rcvctrl &= ~(1ULL << IBA7220_R_TAILUPD_SHIFT);
+ if (op & QIB_RCVCTRL_PKEY_ENB)
+ dd->rcvctrl &= ~(1ULL << IBA7220_R_PKEY_DIS_SHIFT);
+ if (op & QIB_RCVCTRL_PKEY_DIS)
+ dd->rcvctrl |= (1ULL << IBA7220_R_PKEY_DIS_SHIFT);
+ if (ctxt < 0)
+ mask = (1ULL << dd->ctxtcnt) - 1;
+ else
+ mask = (1ULL << ctxt);
+ if (op & QIB_RCVCTRL_CTXT_ENB) {
+ /* always done for specific ctxt */
+ dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable));
+ if (!(dd->flags & QIB_NODMA_RTAIL))
+ dd->rcvctrl |= 1ULL << IBA7220_R_TAILUPD_SHIFT;
+ /* Write these registers before the context is enabled. */
+ qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
+ dd->rcd[ctxt]->rcvhdrqtailaddr_phys);
+ qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
+ dd->rcd[ctxt]->rcvhdrq_phys);
+ dd->rcd[ctxt]->seq_cnt = 1;
+ }
+ if (op & QIB_RCVCTRL_CTXT_DIS)
+ dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable));
+ if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
+ dd->rcvctrl |= (mask << IBA7220_R_INTRAVAIL_SHIFT);
+ if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
+ dd->rcvctrl &= ~(mask << IBA7220_R_INTRAVAIL_SHIFT);
+ qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
+ if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) {
+ /* arm rcv interrupt */
+ val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) |
+ dd->rhdrhead_intr_off;
+ qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
+ }
+ if (op & QIB_RCVCTRL_CTXT_ENB) {
+ /*
+ * Init the context registers also; if we were
+ * disabled, tail and head should both be zero
+ * already from the enable, but since we don't
+ * know, we have to do it explictly.
+ */
+ val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
+ qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
+
+ val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
+ dd->rcd[ctxt]->head = val;
+ /* If kctxt, interrupt on next receive. */
+ if (ctxt < dd->first_user_ctxt)
+ val |= dd->rhdrhead_intr_off;
+ qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
+ }
+ if (op & QIB_RCVCTRL_CTXT_DIS) {
+ if (ctxt >= 0) {
+ qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, 0);
+ qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, 0);
+ } else {
+ unsigned i;
+
+ for (i = 0; i < dd->cfgctxts; i++) {
+ qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr,
+ i, 0);
+ qib_write_kreg_ctxt(dd, kr_rcvhdraddr, i, 0);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+}
+
+/*
+ * Modify the SENDCTRL register in chip-specific way. This
+ * is a function there may be multiple such registers with
+ * slightly different layouts. To start, we assume the
+ * "canonical" register layout of the first chips.
+ * Chip requires no back-back sendctrl writes, so write
+ * scratch register after writing sendctrl
+ */
+static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 tmp_dd_sendctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+
+ /* First the ones that are "sticky", saved in shadow */
+ if (op & QIB_SENDCTRL_CLEAR)
+ dd->sendctrl = 0;
+ if (op & QIB_SENDCTRL_SEND_DIS)
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, SPioEnable);
+ else if (op & QIB_SENDCTRL_SEND_ENB) {
+ dd->sendctrl |= SYM_MASK(SendCtrl, SPioEnable);
+ if (dd->flags & QIB_USE_SPCL_TRIG)
+ dd->sendctrl |= SYM_MASK(SendCtrl,
+ SSpecialTriggerEn);
+ }
+ if (op & QIB_SENDCTRL_AVAIL_DIS)
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
+ else if (op & QIB_SENDCTRL_AVAIL_ENB)
+ dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
+
+ if (op & QIB_SENDCTRL_DISARM_ALL) {
+ u32 i, last;
+
+ tmp_dd_sendctrl = dd->sendctrl;
+ /*
+ * disarm any that are not yet launched, disabling sends
+ * and updates until done.
+ */
+ last = dd->piobcnt2k + dd->piobcnt4k;
+ tmp_dd_sendctrl &=
+ ~(SYM_MASK(SendCtrl, SPioEnable) |
+ SYM_MASK(SendCtrl, SendBufAvailUpd));
+ for (i = 0; i < last; i++) {
+ qib_write_kreg(dd, kr_sendctrl,
+ tmp_dd_sendctrl |
+ SYM_MASK(SendCtrl, Disarm) | i);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+ }
+
+ tmp_dd_sendctrl = dd->sendctrl;
+
+ if (op & QIB_SENDCTRL_FLUSH)
+ tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort);
+ if (op & QIB_SENDCTRL_DISARM)
+ tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
+ ((op & QIB_7220_SendCtrl_DisarmPIOBuf_RMASK) <<
+ SYM_LSB(SendCtrl, DisarmPIOBuf));
+ if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
+ (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
+ tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
+
+ qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+
+ if (op & QIB_SENDCTRL_AVAIL_BLIP) {
+ qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+
+ if (op & QIB_SENDCTRL_FLUSH) {
+ u32 v;
+ /*
+ * ensure writes have hit chip, then do a few
+ * more reads, to allow DMA of pioavail registers
+ * to occur, so in-memory copy is in sync with
+ * the chip. Not always safe to sleep.
+ */
+ v = qib_read_kreg32(dd, kr_scratch);
+ qib_write_kreg(dd, kr_scratch, v);
+ v = qib_read_kreg32(dd, kr_scratch);
+ qib_write_kreg(dd, kr_scratch, v);
+ qib_read_kreg32(dd, kr_scratch);
+ }
+}
+
+/**
+ * qib_portcntr_7220 - read a per-port counter
+ * @dd: the qlogic_ib device
+ * @creg: the counter to snapshot
+ */
+static u64 qib_portcntr_7220(struct qib_pportdata *ppd, u32 reg)
+{
+ u64 ret = 0ULL;
+ struct qib_devdata *dd = ppd->dd;
+ u16 creg;
+ /* 0xffff for unimplemented or synthesized counters */
+ static const u16 xlator[] = {
+ [QIBPORTCNTR_PKTSEND] = cr_pktsend,
+ [QIBPORTCNTR_WORDSEND] = cr_wordsend,
+ [QIBPORTCNTR_PSXMITDATA] = cr_psxmitdatacount,
+ [QIBPORTCNTR_PSXMITPKTS] = cr_psxmitpktscount,
+ [QIBPORTCNTR_PSXMITWAIT] = cr_psxmitwaitcount,
+ [QIBPORTCNTR_SENDSTALL] = cr_sendstall,
+ [QIBPORTCNTR_PKTRCV] = cr_pktrcv,
+ [QIBPORTCNTR_PSRCVDATA] = cr_psrcvdatacount,
+ [QIBPORTCNTR_PSRCVPKTS] = cr_psrcvpktscount,
+ [QIBPORTCNTR_RCVEBP] = cr_rcvebp,
+ [QIBPORTCNTR_RCVOVFL] = cr_rcvovfl,
+ [QIBPORTCNTR_WORDRCV] = cr_wordrcv,
+ [QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt,
+ [QIBPORTCNTR_RXLOCALPHYERR] = cr_rxotherlocalphyerr,
+ [QIBPORTCNTR_RXVLERR] = cr_rxvlerr,
+ [QIBPORTCNTR_ERRICRC] = cr_erricrc,
+ [QIBPORTCNTR_ERRVCRC] = cr_errvcrc,
+ [QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc,
+ [QIBPORTCNTR_BADFORMAT] = cr_badformat,
+ [QIBPORTCNTR_ERR_RLEN] = cr_err_rlen,
+ [QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr,
+ [QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen,
+ [QIBPORTCNTR_UNSUPVL] = cr_txunsupvl,
+ [QIBPORTCNTR_EXCESSBUFOVFL] = cr_excessbufferovfl,
+ [QIBPORTCNTR_ERRLINK] = cr_errlink,
+ [QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown,
+ [QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov,
+ [QIBPORTCNTR_LLI] = cr_locallinkintegrityerr,
+ [QIBPORTCNTR_PSINTERVAL] = cr_psinterval,
+ [QIBPORTCNTR_PSSTART] = cr_psstart,
+ [QIBPORTCNTR_PSSTAT] = cr_psstat,
+ [QIBPORTCNTR_VL15PKTDROP] = cr_vl15droppedpkt,
+ [QIBPORTCNTR_ERRPKEY] = cr_errpkey,
+ [QIBPORTCNTR_KHDROVFL] = 0xffff,
+ };
+
+ if (reg >= ARRAY_SIZE(xlator)) {
+ qib_devinfo(ppd->dd->pcidev,
+ "Unimplemented portcounter %u\n", reg);
+ goto done;
+ }
+ creg = xlator[reg];
+
+ if (reg == QIBPORTCNTR_KHDROVFL) {
+ int i;
+
+ /* sum over all kernel contexts */
+ for (i = 0; i < dd->first_user_ctxt; i++)
+ ret += read_7220_creg32(dd, cr_portovfl + i);
+ }
+ if (creg == 0xffff)
+ goto done;
+
+ /*
+ * only fast incrementing counters are 64bit; use 32 bit reads to
+ * avoid two independent reads when on opteron
+ */
+ if ((creg == cr_wordsend || creg == cr_wordrcv ||
+ creg == cr_pktsend || creg == cr_pktrcv))
+ ret = read_7220_creg(dd, creg);
+ else
+ ret = read_7220_creg32(dd, creg);
+ if (creg == cr_ibsymbolerr) {
+ if (dd->pport->cpspec->ibdeltainprog)
+ ret -= ret - ppd->cpspec->ibsymsnap;
+ ret -= dd->pport->cpspec->ibsymdelta;
+ } else if (creg == cr_iblinkerrrecov) {
+ if (dd->pport->cpspec->ibdeltainprog)
+ ret -= ret - ppd->cpspec->iblnkerrsnap;
+ ret -= dd->pport->cpspec->iblnkerrdelta;
+ }
+done:
+ return ret;
+}
+
+/*
+ * Device counter names (not port-specific), one line per stat,
+ * single string. Used by utilities like ipathstats to print the stats
+ * in a way which works for different versions of drivers, without changing
+ * the utility. Names need to be 12 chars or less (w/o newline), for proper
+ * display by utility.
+ * Non-error counters are first.
+ * Start of "error" conters is indicated by a leading "E " on the first
+ * "error" counter, and doesn't count in label length.
+ * The EgrOvfl list needs to be last so we truncate them at the configured
+ * context count for the device.
+ * cntr7220indices contains the corresponding register indices.
+ */
+static const char cntr7220names[] =
+ "Interrupts\n"
+ "HostBusStall\n"
+ "E RxTIDFull\n"
+ "RxTIDInvalid\n"
+ "Ctxt0EgrOvfl\n"
+ "Ctxt1EgrOvfl\n"
+ "Ctxt2EgrOvfl\n"
+ "Ctxt3EgrOvfl\n"
+ "Ctxt4EgrOvfl\n"
+ "Ctxt5EgrOvfl\n"
+ "Ctxt6EgrOvfl\n"
+ "Ctxt7EgrOvfl\n"
+ "Ctxt8EgrOvfl\n"
+ "Ctxt9EgrOvfl\n"
+ "Ctx10EgrOvfl\n"
+ "Ctx11EgrOvfl\n"
+ "Ctx12EgrOvfl\n"
+ "Ctx13EgrOvfl\n"
+ "Ctx14EgrOvfl\n"
+ "Ctx15EgrOvfl\n"
+ "Ctx16EgrOvfl\n";
+
+static const size_t cntr7220indices[] = {
+ cr_lbint,
+ cr_lbflowstall,
+ cr_errtidfull,
+ cr_errtidvalid,
+ cr_portovfl + 0,
+ cr_portovfl + 1,
+ cr_portovfl + 2,
+ cr_portovfl + 3,
+ cr_portovfl + 4,
+ cr_portovfl + 5,
+ cr_portovfl + 6,
+ cr_portovfl + 7,
+ cr_portovfl + 8,
+ cr_portovfl + 9,
+ cr_portovfl + 10,
+ cr_portovfl + 11,
+ cr_portovfl + 12,
+ cr_portovfl + 13,
+ cr_portovfl + 14,
+ cr_portovfl + 15,
+ cr_portovfl + 16,
+};
+
+/*
+ * same as cntr7220names and cntr7220indices, but for port-specific counters.
+ * portcntr7220indices is somewhat complicated by some registers needing
+ * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
+ */
+static const char portcntr7220names[] =
+ "TxPkt\n"
+ "TxFlowPkt\n"
+ "TxWords\n"
+ "RxPkt\n"
+ "RxFlowPkt\n"
+ "RxWords\n"
+ "TxFlowStall\n"
+ "TxDmaDesc\n" /* 7220 and 7322-only */
+ "E RxDlidFltr\n" /* 7220 and 7322-only */
+ "IBStatusChng\n"
+ "IBLinkDown\n"
+ "IBLnkRecov\n"
+ "IBRxLinkErr\n"
+ "IBSymbolErr\n"
+ "RxLLIErr\n"
+ "RxBadFormat\n"
+ "RxBadLen\n"
+ "RxBufOvrfl\n"
+ "RxEBP\n"
+ "RxFlowCtlErr\n"
+ "RxICRCerr\n"
+ "RxLPCRCerr\n"
+ "RxVCRCerr\n"
+ "RxInvalLen\n"
+ "RxInvalPKey\n"
+ "RxPktDropped\n"
+ "TxBadLength\n"
+ "TxDropped\n"
+ "TxInvalLen\n"
+ "TxUnderrun\n"
+ "TxUnsupVL\n"
+ "RxLclPhyErr\n" /* 7220 and 7322-only */
+ "RxVL15Drop\n" /* 7220 and 7322-only */
+ "RxVlErr\n" /* 7220 and 7322-only */
+ "XcessBufOvfl\n" /* 7220 and 7322-only */
+ ;
+
+#define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */
+static const size_t portcntr7220indices[] = {
+ QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
+ cr_pktsendflow,
+ QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
+ cr_pktrcvflowctrl,
+ QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
+ cr_txsdmadesc,
+ cr_rxdlidfltr,
+ cr_ibstatuschange,
+ QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
+ cr_rcvflowctrl_err,
+ QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
+ cr_invalidslen,
+ cr_senddropped,
+ cr_errslen,
+ cr_sendunderrun,
+ cr_txunsupvl,
+ QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
+};
+
+/* do all the setup to make the counter reads efficient later */
+static void init_7220_cntrnames(struct qib_devdata *dd)
+{
+ int i, j = 0;
+ char *s;
+
+ for (i = 0, s = (char *)cntr7220names; s && j <= dd->cfgctxts;
+ i++) {
+ /* we always have at least one counter before the egrovfl */
+ if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
+ j = 1;
+ s = strchr(s + 1, '\n');
+ if (s && j)
+ j++;
+ }
+ dd->cspec->ncntrs = i;
+ if (!s)
+ /* full list; size is without terminating null */
+ dd->cspec->cntrnamelen = sizeof(cntr7220names) - 1;
+ else
+ dd->cspec->cntrnamelen = 1 + s - cntr7220names;
+ dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
+ * sizeof(u64), GFP_KERNEL);
+ if (!dd->cspec->cntrs)
+ qib_dev_err(dd, "Failed allocation for counters\n");
+
+ for (i = 0, s = (char *)portcntr7220names; s; i++)
+ s = strchr(s + 1, '\n');
+ dd->cspec->nportcntrs = i - 1;
+ dd->cspec->portcntrnamelen = sizeof(portcntr7220names) - 1;
+ dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs
+ * sizeof(u64), GFP_KERNEL);
+ if (!dd->cspec->portcntrs)
+ qib_dev_err(dd, "Failed allocation for portcounters\n");
+}
+
+static u32 qib_read_7220cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
+ u64 **cntrp)
+{
+ u32 ret;
+
+ if (!dd->cspec->cntrs) {
+ ret = 0;
+ goto done;
+ }
+
+ if (namep) {
+ *namep = (char *)cntr7220names;
+ ret = dd->cspec->cntrnamelen;
+ if (pos >= ret)
+ ret = 0; /* final read after getting everything */
+ } else {
+ u64 *cntr = dd->cspec->cntrs;
+ int i;
+
+ ret = dd->cspec->ncntrs * sizeof(u64);
+ if (!cntr || pos >= ret) {
+ /* everything read, or couldn't get memory */
+ ret = 0;
+ goto done;
+ }
+
+ *cntrp = cntr;
+ for (i = 0; i < dd->cspec->ncntrs; i++)
+ *cntr++ = read_7220_creg32(dd, cntr7220indices[i]);
+ }
+done:
+ return ret;
+}
+
+static u32 qib_read_7220portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
+ char **namep, u64 **cntrp)
+{
+ u32 ret;
+
+ if (!dd->cspec->portcntrs) {
+ ret = 0;
+ goto done;
+ }
+ if (namep) {
+ *namep = (char *)portcntr7220names;
+ ret = dd->cspec->portcntrnamelen;
+ if (pos >= ret)
+ ret = 0; /* final read after getting everything */
+ } else {
+ u64 *cntr = dd->cspec->portcntrs;
+ struct qib_pportdata *ppd = &dd->pport[port];
+ int i;
+
+ ret = dd->cspec->nportcntrs * sizeof(u64);
+ if (!cntr || pos >= ret) {
+ /* everything read, or couldn't get memory */
+ ret = 0;
+ goto done;
+ }
+ *cntrp = cntr;
+ for (i = 0; i < dd->cspec->nportcntrs; i++) {
+ if (portcntr7220indices[i] & _PORT_VIRT_FLAG)
+ *cntr++ = qib_portcntr_7220(ppd,
+ portcntr7220indices[i] &
+ ~_PORT_VIRT_FLAG);
+ else
+ *cntr++ = read_7220_creg32(dd,
+ portcntr7220indices[i]);
+ }
+ }
+done:
+ return ret;
+}
+
+/**
+ * qib_get_7220_faststats - get word counters from chip before they overflow
+ * @opaque - contains a pointer to the qlogic_ib device qib_devdata
+ *
+ * This needs more work; in particular, decision on whether we really
+ * need traffic_wds done the way it is
+ * called from add_timer
+ */
+static void qib_get_7220_faststats(unsigned long opaque)
+{
+ struct qib_devdata *dd = (struct qib_devdata *) opaque;
+ struct qib_pportdata *ppd = dd->pport;
+ unsigned long flags;
+ u64 traffic_wds;
+
+ /*
+ * don't access the chip while running diags, or memory diags can
+ * fail
+ */
+ if (!(dd->flags & QIB_INITTED) || dd->diag_client)
+ /* but re-arm the timer, for diags case; won't hurt other */
+ goto done;
+
+ /*
+ * We now try to maintain an activity timer, based on traffic
+ * exceeding a threshold, so we need to check the word-counts
+ * even if they are 64-bit.
+ */
+ traffic_wds = qib_portcntr_7220(ppd, cr_wordsend) +
+ qib_portcntr_7220(ppd, cr_wordrcv);
+ spin_lock_irqsave(&dd->eep_st_lock, flags);
+ traffic_wds -= dd->traffic_wds;
+ dd->traffic_wds += traffic_wds;
+ if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
+ atomic_add(5, &dd->active_time); /* S/B #define */
+ spin_unlock_irqrestore(&dd->eep_st_lock, flags);
+done:
+ mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
+}
+
+/*
+ * If we are using MSI, try to fallback to INTx.
+ */
+static int qib_7220_intr_fallback(struct qib_devdata *dd)
+{
+ if (!dd->msi_lo)
+ return 0;
+
+ qib_devinfo(dd->pcidev, "MSI interrupt not detected,"
+ " trying INTx interrupts\n");
+ qib_7220_free_irq(dd);
+ qib_enable_intx(dd->pcidev);
+ /*
+ * Some newer kernels require free_irq before disable_msi,
+ * and irq can be changed during disable and INTx enable
+ * and we need to therefore use the pcidev->irq value,
+ * not our saved MSI value.
+ */
+ dd->cspec->irq = dd->pcidev->irq;
+ qib_setup_7220_interrupt(dd);
+ return 1;
+}
+
+/*
+ * Reset the XGXS (between serdes and IBC). Slightly less intrusive
+ * than resetting the IBC or external link state, and useful in some
+ * cases to cause some retraining. To do this right, we reset IBC
+ * as well.
+ */
+static void qib_7220_xgxs_reset(struct qib_pportdata *ppd)
+{
+ u64 val, prev_val;
+ struct qib_devdata *dd = ppd->dd;
+
+ prev_val = qib_read_kreg64(dd, kr_xgxs_cfg);
+ val = prev_val | QLOGIC_IB_XGXS_RESET;
+ prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */
+ qib_write_kreg(dd, kr_control,
+ dd->control & ~QLOGIC_IB_C_LINKENABLE);
+ qib_write_kreg(dd, kr_xgxs_cfg, val);
+ qib_read_kreg32(dd, kr_scratch);
+ qib_write_kreg(dd, kr_xgxs_cfg, prev_val);
+ qib_write_kreg(dd, kr_control, dd->control);
+}
+
+/*
+ * For this chip, we want to use the same buffer every time
+ * when we are trying to bring the link up (they are always VL15
+ * packets). At that link state the packet should always go out immediately
+ * (or at least be discarded at the tx interface if the link is down).
+ * If it doesn't, and the buffer isn't available, that means some other
+ * sender has gotten ahead of us, and is preventing our packet from going
+ * out. In that case, we flush all packets, and try again. If that still
+ * fails, we fail the request, and hope things work the next time around.
+ *
+ * We don't need very complicated heuristics on whether the packet had
+ * time to go out or not, since even at SDR 1X, it goes out in very short
+ * time periods, covered by the chip reads done here and as part of the
+ * flush.
+ */
+static u32 __iomem *get_7220_link_buf(struct qib_pportdata *ppd, u32 *bnum)
+{
+ u32 __iomem *buf;
+ u32 lbuf = ppd->dd->cspec->lastbuf_for_pio;
+ int do_cleanup;
+ unsigned long flags;
+
+ /*
+ * always blip to get avail list updated, since it's almost
+ * always needed, and is fairly cheap.
+ */
+ sendctrl_7220_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+ qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
+ buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
+ if (buf)
+ goto done;
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+ if (ppd->sdma_state.current_state == qib_sdma_state_s20_idle &&
+ ppd->sdma_state.current_state != qib_sdma_state_s00_hw_down) {
+ __qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
+ do_cleanup = 0;
+ } else {
+ do_cleanup = 1;
+ qib_7220_sdma_hw_clean_up(ppd);
+ }
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+
+ if (do_cleanup) {
+ qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
+ buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
+ }
+done:
+ return buf;
+}
+
+/*
+ * This code for non-IBTA-compliant IB speed negotiation is only known to
+ * work for the SDR to DDR transition, and only between an HCA and a switch
+ * with recent firmware. It is based on observed heuristics, rather than
+ * actual knowledge of the non-compliant speed negotiation.
+ * It has a number of hard-coded fields, since the hope is to rewrite this
+ * when a spec is available on how the negoation is intended to work.
+ */
+static void autoneg_7220_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
+ u32 dcnt, u32 *data)
+{
+ int i;
+ u64 pbc;
+ u32 __iomem *piobuf;
+ u32 pnum;
+ struct qib_devdata *dd = ppd->dd;
+
+ i = 0;
+ pbc = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
+ pbc |= PBC_7220_VL15_SEND;
+ while (!(piobuf = get_7220_link_buf(ppd, &pnum))) {
+ if (i++ > 5)
+ return;
+ udelay(2);
+ }
+ sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_DISARM_BUF(pnum));
+ writeq(pbc, piobuf);
+ qib_flush_wc();
+ qib_pio_copy(piobuf + 2, hdr, 7);
+ qib_pio_copy(piobuf + 9, data, dcnt);
+ if (dd->flags & QIB_USE_SPCL_TRIG) {
+ u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
+
+ qib_flush_wc();
+ __raw_writel(0xaebecede, piobuf + spcl_off);
+ }
+ qib_flush_wc();
+ qib_sendbuf_done(dd, pnum);
+}
+
+/*
+ * _start packet gets sent twice at start, _done gets sent twice at end
+ */
+static void autoneg_7220_send(struct qib_pportdata *ppd, int which)
+{
+ struct qib_devdata *dd = ppd->dd;
+ static u32 swapped;
+ u32 dw, i, hcnt, dcnt, *data;
+ static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
+ static u32 madpayload_start[0x40] = {
+ 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
+ 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
+ };
+ static u32 madpayload_done[0x40] = {
+ 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
+ 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x40000001, 0x1388, 0x15e, /* rest 0's */
+ };
+
+ dcnt = ARRAY_SIZE(madpayload_start);
+ hcnt = ARRAY_SIZE(hdr);
+ if (!swapped) {
+ /* for maintainability, do it at runtime */
+ for (i = 0; i < hcnt; i++) {
+ dw = (__force u32) cpu_to_be32(hdr[i]);
+ hdr[i] = dw;
+ }
+ for (i = 0; i < dcnt; i++) {
+ dw = (__force u32) cpu_to_be32(madpayload_start[i]);
+ madpayload_start[i] = dw;
+ dw = (__force u32) cpu_to_be32(madpayload_done[i]);
+ madpayload_done[i] = dw;
+ }
+ swapped = 1;
+ }
+
+ data = which ? madpayload_done : madpayload_start;
+
+ autoneg_7220_sendpkt(ppd, hdr, dcnt, data);
+ qib_read_kreg64(dd, kr_scratch);
+ udelay(2);
+ autoneg_7220_sendpkt(ppd, hdr, dcnt, data);
+ qib_read_kreg64(dd, kr_scratch);
+ udelay(2);
+}
+
+/*
+ * Do the absolute minimum to cause an IB speed change, and make it
+ * ready, but don't actually trigger the change. The caller will
+ * do that when ready (if link is in Polling training state, it will
+ * happen immediately, otherwise when link next goes down)
+ *
+ * This routine should only be used as part of the DDR autonegotation
+ * code for devices that are not compliant with IB 1.2 (or code that
+ * fixes things up for same).
+ *
+ * When link has gone down, and autoneg enabled, or autoneg has
+ * failed and we give up until next time we set both speeds, and
+ * then we want IBTA enabled as well as "use max enabled speed.
+ */
+static void set_7220_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
+{
+ ppd->cpspec->ibcddrctrl &= ~(IBA7220_IBC_SPEED_AUTONEG_MASK |
+ IBA7220_IBC_IBTA_1_2_MASK);
+
+ if (speed == (QIB_IB_SDR | QIB_IB_DDR))
+ ppd->cpspec->ibcddrctrl |= IBA7220_IBC_SPEED_AUTONEG_MASK |
+ IBA7220_IBC_IBTA_1_2_MASK;
+ else
+ ppd->cpspec->ibcddrctrl |= speed == QIB_IB_DDR ?
+ IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
+
+ qib_write_kreg(ppd->dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
+ qib_write_kreg(ppd->dd, kr_scratch, 0);
+}
+
+/*
+ * This routine is only used when we are not talking to another
+ * IB 1.2-compliant device that we think can do DDR.
+ * (This includes all existing switch chips as of Oct 2007.)
+ * 1.2-compliant devices go directly to DDR prior to reaching INIT
+ */
+static void try_7220_autoneg(struct qib_pportdata *ppd)
+{
+ unsigned long flags;
+
+ /*
+ * Required for older non-IB1.2 DDR switches. Newer
+ * non-IB-compliant switches don't need it, but so far,
+ * aren't bothered by it either. "Magic constant"
+ */
+ qib_write_kreg(ppd->dd, kr_ncmodectrl, 0x3b9dc07);
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ autoneg_7220_send(ppd, 0);
+ set_7220_ibspeed_fast(ppd, QIB_IB_DDR);
+
+ toggle_7220_rclkrls(ppd->dd);
+ /* 2 msec is minimum length of a poll cycle */
+ schedule_delayed_work(&ppd->cpspec->autoneg_work,
+ msecs_to_jiffies(2));
+}
+
+/*
+ * Handle the empirically determined mechanism for auto-negotiation
+ * of DDR speed with switches.
+ */
+static void autoneg_7220_work(struct work_struct *work)
+{
+ struct qib_pportdata *ppd;
+ struct qib_devdata *dd;
+ u64 startms;
+ u32 i;
+ unsigned long flags;
+
+ ppd = &container_of(work, struct qib_chippport_specific,
+ autoneg_work.work)->pportdata;
+ dd = ppd->dd;
+
+ startms = jiffies_to_msecs(jiffies);
+
+ /*
+ * Busy wait for this first part, it should be at most a
+ * few hundred usec, since we scheduled ourselves for 2msec.
+ */
+ for (i = 0; i < 25; i++) {
+ if (SYM_FIELD(ppd->lastibcstat, IBCStatus, LinkTrainingState)
+ == IB_7220_LT_STATE_POLLQUIET) {
+ qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
+ break;
+ }
+ udelay(100);
+ }
+
+ if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
+ goto done; /* we got there early or told to stop */
+
+ /* we expect this to timeout */
+ if (wait_event_timeout(ppd->cpspec->autoneg_wait,
+ !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
+ msecs_to_jiffies(90)))
+ goto done;
+
+ toggle_7220_rclkrls(dd);
+
+ /* we expect this to timeout */
+ if (wait_event_timeout(ppd->cpspec->autoneg_wait,
+ !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
+ msecs_to_jiffies(1700)))
+ goto done;
+
+ set_7220_ibspeed_fast(ppd, QIB_IB_SDR);
+ toggle_7220_rclkrls(dd);
+
+ /*
+ * Wait up to 250 msec for link to train and get to INIT at DDR;
+ * this should terminate early.
+ */
+ wait_event_timeout(ppd->cpspec->autoneg_wait,
+ !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
+ msecs_to_jiffies(250));
+done:
+ if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
+ if (dd->cspec->autoneg_tries == AUTONEG_TRIES) {
+ ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
+ dd->cspec->autoneg_tries = 0;
+ }
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled);
+ }
+}
+
+static u32 qib_7220_iblink_state(u64 ibcs)
+{
+ u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState);
+
+ switch (state) {
+ case IB_7220_L_STATE_INIT:
+ state = IB_PORT_INIT;
+ break;
+ case IB_7220_L_STATE_ARM:
+ state = IB_PORT_ARMED;
+ break;
+ case IB_7220_L_STATE_ACTIVE:
+ /* fall through */
+ case IB_7220_L_STATE_ACT_DEFER:
+ state = IB_PORT_ACTIVE;
+ break;
+ default: /* fall through */
+ case IB_7220_L_STATE_DOWN:
+ state = IB_PORT_DOWN;
+ break;
+ }
+ return state;
+}
+
+/* returns the IBTA port state, rather than the IBC link training state */
+static u8 qib_7220_phys_portstate(u64 ibcs)
+{
+ u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState);
+ return qib_7220_physportstate[state];
+}
+
+static int qib_7220_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
+{
+ int ret = 0, symadj = 0;
+ struct qib_devdata *dd = ppd->dd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+
+ if (!ibup) {
+ /*
+ * When the link goes down we don't want AEQ running, so it
+ * won't interfere with IBC training, etc., and we need
+ * to go back to the static SerDes preset values.
+ */
+ if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
+ QIBL_IB_AUTONEG_INPROG)))
+ set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled);
+ if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
+ qib_sd7220_presets(dd);
+ qib_cancel_sends(ppd); /* initial disarm, etc. */
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+ if (__qib_sdma_running(ppd))
+ __qib_sdma_process_event(ppd,
+ qib_sdma_event_e70_go_idle);
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+ }
+ /* this might better in qib_sd7220_presets() */
+ set_7220_relock_poll(dd, ibup);
+ } else {
+ if (qib_compat_ddr_negotiate &&
+ !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
+ QIBL_IB_AUTONEG_INPROG)) &&
+ ppd->link_speed_active == QIB_IB_SDR &&
+ (ppd->link_speed_enabled & (QIB_IB_DDR | QIB_IB_SDR)) ==
+ (QIB_IB_DDR | QIB_IB_SDR) &&
+ dd->cspec->autoneg_tries < AUTONEG_TRIES) {
+ /* we are SDR, and DDR auto-negotiation enabled */
+ ++dd->cspec->autoneg_tries;
+ if (!ppd->cpspec->ibdeltainprog) {
+ ppd->cpspec->ibdeltainprog = 1;
+ ppd->cpspec->ibsymsnap = read_7220_creg32(dd,
+ cr_ibsymbolerr);
+ ppd->cpspec->iblnkerrsnap = read_7220_creg32(dd,
+ cr_iblinkerrrecov);
+ }
+ try_7220_autoneg(ppd);
+ ret = 1; /* no other IB status change processing */
+ } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
+ ppd->link_speed_active == QIB_IB_SDR) {
+ autoneg_7220_send(ppd, 1);
+ set_7220_ibspeed_fast(ppd, QIB_IB_DDR);
+ udelay(2);
+ toggle_7220_rclkrls(dd);
+ ret = 1; /* no other IB status change processing */
+ } else {
+ if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
+ (ppd->link_speed_active & QIB_IB_DDR)) {
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
+ QIBL_IB_AUTONEG_FAILED);
+ spin_unlock_irqrestore(&ppd->lflags_lock,
+ flags);
+ dd->cspec->autoneg_tries = 0;
+ /* re-enable SDR, for next link down */
+ set_7220_ibspeed_fast(ppd,
+ ppd->link_speed_enabled);
+ wake_up(&ppd->cpspec->autoneg_wait);
+ symadj = 1;
+ } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
+ /*
+ * Clear autoneg failure flag, and do setup
+ * so we'll try next time link goes down and
+ * back to INIT (possibly connected to a
+ * different device).
+ */
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
+ spin_unlock_irqrestore(&ppd->lflags_lock,
+ flags);
+ ppd->cpspec->ibcddrctrl |=
+ IBA7220_IBC_IBTA_1_2_MASK;
+ qib_write_kreg(dd, kr_ncmodectrl, 0);
+ symadj = 1;
+ }
+ }
+
+ if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
+ symadj = 1;
+
+ if (!ret) {
+ ppd->delay_mult = rate_to_delay
+ [(ibcs >> IBA7220_LINKSPEED_SHIFT) & 1]
+ [(ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1];
+
+ set_7220_relock_poll(dd, ibup);
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+ /*
+ * Unlike 7322, the 7220 needs this, due to lack of
+ * interrupt in some cases when we have sdma active
+ * when the link goes down.
+ */
+ if (ppd->sdma_state.current_state !=
+ qib_sdma_state_s20_idle)
+ __qib_sdma_process_event(ppd,
+ qib_sdma_event_e00_go_hw_down);
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+ }
+ }
+
+ if (symadj) {
+ if (ppd->cpspec->ibdeltainprog) {
+ ppd->cpspec->ibdeltainprog = 0;
+ ppd->cpspec->ibsymdelta += read_7220_creg32(ppd->dd,
+ cr_ibsymbolerr) - ppd->cpspec->ibsymsnap;
+ ppd->cpspec->iblnkerrdelta += read_7220_creg32(ppd->dd,
+ cr_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
+ }
+ } else if (!ibup && qib_compat_ddr_negotiate &&
+ !ppd->cpspec->ibdeltainprog &&
+ !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
+ ppd->cpspec->ibdeltainprog = 1;
+ ppd->cpspec->ibsymsnap = read_7220_creg32(ppd->dd,
+ cr_ibsymbolerr);
+ ppd->cpspec->iblnkerrsnap = read_7220_creg32(ppd->dd,
+ cr_iblinkerrrecov);
+ }
+
+ if (!ret)
+ qib_setup_7220_setextled(ppd, ibup);
+ return ret;
+}
+
+/*
+ * Does read/modify/write to appropriate registers to
+ * set output and direction bits selected by mask.
+ * these are in their canonical postions (e.g. lsb of
+ * dir will end up in D48 of extctrl on existing chips).
+ * returns contents of GP Inputs.
+ */
+static int gpio_7220_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
+{
+ u64 read_val, new_out;
+ unsigned long flags;
+
+ if (mask) {
+ /* some bits being written, lock access to GPIO */
+ dir &= mask;
+ out &= mask;
+ spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+ dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
+ dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
+ new_out = (dd->cspec->gpio_out & ~mask) | out;
+
+ qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
+ qib_write_kreg(dd, kr_gpio_out, new_out);
+ dd->cspec->gpio_out = new_out;
+ spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+ }
+ /*
+ * It is unlikely that a read at this time would get valid
+ * data on a pin whose direction line was set in the same
+ * call to this function. We include the read here because
+ * that allows us to potentially combine a change on one pin with
+ * a read on another, and because the old code did something like
+ * this.
+ */
+ read_val = qib_read_kreg64(dd, kr_extstatus);
+ return SYM_FIELD(read_val, EXTStatus, GPIOIn);
+}
+
+/*
+ * Read fundamental info we need to use the chip. These are
+ * the registers that describe chip capabilities, and are
+ * saved in shadow registers.
+ */
+static void get_7220_chip_params(struct qib_devdata *dd)
+{
+ u64 val;
+ u32 piobufs;
+ int mtu;
+
+ dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
+
+ dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
+ dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
+ dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
+ dd->palign = qib_read_kreg32(dd, kr_palign);
+ dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
+ dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
+
+ val = qib_read_kreg64(dd, kr_sendpiosize);
+ dd->piosize2k = val & ~0U;
+ dd->piosize4k = val >> 32;
+
+ mtu = ib_mtu_enum_to_int(qib_ibmtu);
+ if (mtu == -1)
+ mtu = QIB_DEFAULT_MTU;
+ dd->pport->ibmtu = (u32)mtu;
+
+ val = qib_read_kreg64(dd, kr_sendpiobufcnt);
+ dd->piobcnt2k = val & ~0U;
+ dd->piobcnt4k = val >> 32;
+ /* these may be adjusted in init_chip_wc_pat() */
+ dd->pio2kbase = (u32 __iomem *)
+ ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
+ if (dd->piobcnt4k) {
+ dd->pio4kbase = (u32 __iomem *)
+ ((char __iomem *) dd->kregbase +
+ (dd->piobufbase >> 32));
+ /*
+ * 4K buffers take 2 pages; we use roundup just to be
+ * paranoid; we calculate it once here, rather than on
+ * ever buf allocate
+ */
+ dd->align4k = ALIGN(dd->piosize4k, dd->palign);
+ }
+
+ piobufs = dd->piobcnt4k + dd->piobcnt2k;
+
+ dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
+ (sizeof(u64) * BITS_PER_BYTE / 2);
+}
+
+/*
+ * The chip base addresses in cspec and cpspec have to be set
+ * after possible init_chip_wc_pat(), rather than in
+ * qib_get_7220_chip_params(), so split out as separate function
+ */
+static void set_7220_baseaddrs(struct qib_devdata *dd)
+{
+ u32 cregbase;
+ /* init after possible re-map in init_chip_wc_pat() */
+ cregbase = qib_read_kreg32(dd, kr_counterregbase);
+ dd->cspec->cregbase = (u64 __iomem *)
+ ((char __iomem *) dd->kregbase + cregbase);
+
+ dd->egrtidbase = (u64 __iomem *)
+ ((char __iomem *) dd->kregbase + dd->rcvegrbase);
+}
+
+
+#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl, SendIntBufAvail) | \
+ SYM_MASK(SendCtrl, SPioEnable) | \
+ SYM_MASK(SendCtrl, SSpecialTriggerEn) | \
+ SYM_MASK(SendCtrl, SendBufAvailUpd) | \
+ SYM_MASK(SendCtrl, AvailUpdThld) | \
+ SYM_MASK(SendCtrl, SDmaEnable) | \
+ SYM_MASK(SendCtrl, SDmaIntEnable) | \
+ SYM_MASK(SendCtrl, SDmaHalt) | \
+ SYM_MASK(SendCtrl, SDmaSingleDescriptor))
+
+static int sendctrl_hook(struct qib_devdata *dd,
+ const struct diag_observer *op,
+ u32 offs, u64 *data, u64 mask, int only_32)
+{
+ unsigned long flags;
+ unsigned idx = offs / sizeof(u64);
+ u64 local_data, all_bits;
+
+ if (idx != kr_sendctrl) {
+ qib_dev_err(dd, "SendCtrl Hook called with offs %X, %s-bit\n",
+ offs, only_32 ? "32" : "64");
+ return 0;
+ }
+
+ all_bits = ~0ULL;
+ if (only_32)
+ all_bits >>= 32;
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ if ((mask & all_bits) != all_bits) {
+ /*
+ * At least some mask bits are zero, so we need
+ * to read. The judgement call is whether from
+ * reg or shadow. First-cut: read reg, and complain
+ * if any bits which should be shadowed are different
+ * from their shadowed value.
+ */
+ if (only_32)
+ local_data = (u64)qib_read_kreg32(dd, idx);
+ else
+ local_data = qib_read_kreg64(dd, idx);
+ qib_dev_err(dd, "Sendctrl -> %X, Shad -> %X\n",
+ (u32)local_data, (u32)dd->sendctrl);
+ if ((local_data & SENDCTRL_SHADOWED) !=
+ (dd->sendctrl & SENDCTRL_SHADOWED))
+ qib_dev_err(dd, "Sendctrl read: %X shadow is %X\n",
+ (u32)local_data, (u32) dd->sendctrl);
+ *data = (local_data & ~mask) | (*data & mask);
+ }
+ if (mask) {
+ /*
+ * At least some mask bits are one, so we need
+ * to write, but only shadow some bits.
+ */
+ u64 sval, tval; /* Shadowed, transient */
+
+ /*
+ * New shadow val is bits we don't want to touch,
+ * ORed with bits we do, that are intended for shadow.
+ */
+ sval = (dd->sendctrl & ~mask);
+ sval |= *data & SENDCTRL_SHADOWED & mask;
+ dd->sendctrl = sval;
+ tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
+ qib_dev_err(dd, "Sendctrl <- %X, Shad <- %X\n",
+ (u32)tval, (u32)sval);
+ qib_write_kreg(dd, kr_sendctrl, tval);
+ qib_write_kreg(dd, kr_scratch, 0Ull);
+ }
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+
+ return only_32 ? 4 : 8;
+}
+
+static const struct diag_observer sendctrl_observer = {
+ sendctrl_hook, kr_sendctrl * sizeof(u64),
+ kr_sendctrl * sizeof(u64)
+};
+
+/*
+ * write the final few registers that depend on some of the
+ * init setup. Done late in init, just before bringing up
+ * the serdes.
+ */
+static int qib_late_7220_initreg(struct qib_devdata *dd)
+{
+ int ret = 0;
+ u64 val;
+
+ qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
+ qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
+ qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
+ qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
+ val = qib_read_kreg64(dd, kr_sendpioavailaddr);
+ if (val != dd->pioavailregs_phys) {
+ qib_dev_err(dd, "Catastrophic software error, "
+ "SendPIOAvailAddr written as %lx, "
+ "read back as %llx\n",
+ (unsigned long) dd->pioavailregs_phys,
+ (unsigned long long) val);
+ ret = -EINVAL;
+ }
+ qib_register_observer(dd, &sendctrl_observer);
+ return ret;
+}
+
+static int qib_init_7220_variables(struct qib_devdata *dd)
+{
+ struct qib_chippport_specific *cpspec;
+ struct qib_pportdata *ppd;
+ int ret = 0;
+ u32 sbufs, updthresh;
+
+ cpspec = (struct qib_chippport_specific *)(dd + 1);
+ ppd = &cpspec->pportdata;
+ dd->pport = ppd;
+ dd->num_pports = 1;
+
+ dd->cspec = (struct qib_chip_specific *)(cpspec + dd->num_pports);
+ ppd->cpspec = cpspec;
+
+ spin_lock_init(&dd->cspec->sdepb_lock);
+ spin_lock_init(&dd->cspec->rcvmod_lock);
+ spin_lock_init(&dd->cspec->gpio_lock);
+
+ /* we haven't yet set QIB_PRESENT, so use read directly */
+ dd->revision = readq(&dd->kregbase[kr_revision]);
+
+ if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
+ qib_dev_err(dd, "Revision register read failure, "
+ "giving up initialization\n");
+ ret = -ENODEV;
+ goto bail;
+ }
+ dd->flags |= QIB_PRESENT; /* now register routines work */
+
+ dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R,
+ ChipRevMajor);
+ dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R,
+ ChipRevMinor);
+
+ get_7220_chip_params(dd);
+ qib_7220_boardname(dd);
+
+ /*
+ * GPIO bits for TWSI data and clock,
+ * used for serial EEPROM.
+ */
+ dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
+ dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
+ dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
+
+ dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
+ QIB_NODMA_RTAIL | QIB_HAS_THRESH_UPDATE;
+ dd->flags |= qib_special_trigger ?
+ QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
+
+ /*
+ * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
+ * 2 is Some Misc, 3 is reserved for future.
+ */
+ dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr);
+
+ dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr);
+
+ dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated);
+
+ init_waitqueue_head(&cpspec->autoneg_wait);
+ INIT_DELAYED_WORK(&cpspec->autoneg_work, autoneg_7220_work);
+
+ qib_init_pportdata(ppd, dd, 0, 1);
+ ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
+ ppd->link_speed_supported = QIB_IB_SDR | QIB_IB_DDR;
+
+ ppd->link_width_enabled = ppd->link_width_supported;
+ ppd->link_speed_enabled = ppd->link_speed_supported;
+ /*
+ * Set the initial values to reasonable default, will be set
+ * for real when link is up.
+ */
+ ppd->link_width_active = IB_WIDTH_4X;
+ ppd->link_speed_active = QIB_IB_SDR;
+ ppd->delay_mult = rate_to_delay[0][1];
+ ppd->vls_supported = IB_VL_VL0;
+ ppd->vls_operational = ppd->vls_supported;
+
+ if (!qib_mini_init)
+ qib_write_kreg(dd, kr_rcvbthqp, QIB_KD_QP);
+
+ init_timer(&ppd->cpspec->chase_timer);
+ ppd->cpspec->chase_timer.function = reenable_7220_chase;
+ ppd->cpspec->chase_timer.data = (unsigned long)ppd;
+
+ qib_num_cfg_vls = 1; /* if any 7220's, only one VL */
+
+ dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
+ dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
+ dd->rhf_offset =
+ dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
+
+ /* we always allocate at least 2048 bytes for eager buffers */
+ ret = ib_mtu_enum_to_int(qib_ibmtu);
+ dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
+
+ qib_7220_tidtemplate(dd);
+
+ /*
+ * We can request a receive interrupt for 1 or
+ * more packets from current offset. For now, we set this
+ * up for a single packet.
+ */
+ dd->rhdrhead_intr_off = 1ULL << 32;
+
+ /* setup the stats timer; the add_timer is done at end of init */
+ init_timer(&dd->stats_timer);
+ dd->stats_timer.function = qib_get_7220_faststats;
+ dd->stats_timer.data = (unsigned long) dd;
+ dd->stats_timer.expires = jiffies + ACTIVITY_TIMER * HZ;
+
+ /*
+ * Control[4] has been added to change the arbitration within
+ * the SDMA engine between favoring data fetches over descriptor
+ * fetches. qib_sdma_fetch_arb==0 gives data fetches priority.
+ */
+ if (qib_sdma_fetch_arb)
+ dd->control |= 1 << 4;
+
+ dd->ureg_align = 0x10000; /* 64KB alignment */
+
+ dd->piosize2kmax_dwords = (dd->piosize2k >> 2)-1;
+ qib_7220_config_ctxts(dd);
+ qib_set_ctxtcnt(dd); /* needed for PAT setup */
+
+ if (qib_wc_pat) {
+ ret = init_chip_wc_pat(dd, 0);
+ if (ret)
+ goto bail;
+ }
+ set_7220_baseaddrs(dd); /* set chip access pointers now */
+
+ ret = 0;
+ if (qib_mini_init)
+ goto bail;
+
+ ret = qib_create_ctxts(dd);
+ init_7220_cntrnames(dd);
+
+ /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
+ * reserve the update threshold amount for other kernel use, such
+ * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
+ * unless we aren't enabling SDMA, in which case we want to use
+ * all the 4k bufs for the kernel.
+ * if this was less than the update threshold, we could wait
+ * a long time for an update. Coded this way because we
+ * sometimes change the update threshold for various reasons,
+ * and we want this to remain robust.
+ */
+ updthresh = 8U; /* update threshold */
+ if (dd->flags & QIB_HAS_SEND_DMA) {
+ dd->cspec->sdmabufcnt = dd->piobcnt4k;
+ sbufs = updthresh > 3 ? updthresh : 3;
+ } else {
+ dd->cspec->sdmabufcnt = 0;
+ sbufs = dd->piobcnt4k;
+ }
+
+ dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
+ dd->cspec->sdmabufcnt;
+ dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
+ dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
+ dd->pbufsctxt = dd->lastctxt_piobuf /
+ (dd->cfgctxts - dd->first_user_ctxt);
+
+ /*
+ * if we are at 16 user contexts, we will have one 7 sbufs
+ * per context, so drop the update threshold to match. We
+ * want to update before we actually run out, at low pbufs/ctxt
+ * so give ourselves some margin
+ */
+ if ((dd->pbufsctxt - 2) < updthresh)
+ updthresh = dd->pbufsctxt - 2;
+
+ dd->cspec->updthresh_dflt = updthresh;
+ dd->cspec->updthresh = updthresh;
+
+ /* before full enable, no interrupts, no locking needed */
+ dd->sendctrl |= (updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
+ << SYM_LSB(SendCtrl, AvailUpdThld);
+
+ dd->psxmitwait_supported = 1;
+ dd->psxmitwait_check_rate = QIB_7220_PSXMITWAIT_CHECK_RATE;
+bail:
+ return ret;
+}
+
+static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
+ u32 *pbufnum)
+{
+ u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
+ struct qib_devdata *dd = ppd->dd;
+ u32 __iomem *buf;
+
+ if (((pbc >> 32) & PBC_7220_VL15_SEND_CTRL) &&
+ !(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE)))
+ buf = get_7220_link_buf(ppd, pbufnum);
+ else {
+ if ((plen + 1) > dd->piosize2kmax_dwords)
+ first = dd->piobcnt2k;
+ else
+ first = 0;
+ /* try 4k if all 2k busy, so same last for both sizes */
+ last = dd->cspec->lastbuf_for_pio;
+ buf = qib_getsendbuf_range(dd, pbufnum, first, last);
+ }
+ return buf;
+}
+
+/* these 2 "counters" are really control registers, and are always RW */
+static void qib_set_cntr_7220_sample(struct qib_pportdata *ppd, u32 intv,
+ u32 start)
+{
+ write_7220_creg(ppd->dd, cr_psinterval, intv);
+ write_7220_creg(ppd->dd, cr_psstart, start);
+}
+
+/*
+ * NOTE: no real attempt is made to generalize the SDMA stuff.
+ * At some point "soon" we will have a new more generalized
+ * set of sdma interface, and then we'll clean this up.
+ */
+
+/* Must be called with sdma_lock held, or before init finished */
+static void qib_sdma_update_7220_tail(struct qib_pportdata *ppd, u16 tail)
+{
+ /* Commit writes to memory and advance the tail on the chip */
+ wmb();
+ ppd->sdma_descq_tail = tail;
+ qib_write_kreg(ppd->dd, kr_senddmatail, tail);
+}
+
+static void qib_sdma_set_7220_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
+{
+}
+
+static struct sdma_set_state_action sdma_7220_action_table[] = {
+ [qib_sdma_state_s00_hw_down] = {
+ .op_enable = 0,
+ .op_intenable = 0,
+ .op_halt = 0,
+ .go_s99_running_tofalse = 1,
+ },
+ [qib_sdma_state_s10_hw_start_up_wait] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 1,
+ },
+ [qib_sdma_state_s20_idle] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 1,
+ },
+ [qib_sdma_state_s30_sw_clean_up_wait] = {
+ .op_enable = 0,
+ .op_intenable = 1,
+ .op_halt = 0,
+ },
+ [qib_sdma_state_s40_hw_clean_up_wait] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 1,
+ },
+ [qib_sdma_state_s50_hw_halt_wait] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 1,
+ },
+ [qib_sdma_state_s99_running] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 0,
+ .go_s99_running_totrue = 1,
+ },
+};
+
+static void qib_7220_sdma_init_early(struct qib_pportdata *ppd)
+{
+ ppd->sdma_state.set_state_action = sdma_7220_action_table;
+}
+
+static int init_sdma_7220_regs(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ unsigned i, n;
+ u64 senddmabufmask[3] = { 0 };
+
+ /* Set SendDmaBase */
+ qib_write_kreg(dd, kr_senddmabase, ppd->sdma_descq_phys);
+ qib_sdma_7220_setlengen(ppd);
+ qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */
+ /* Set SendDmaHeadAddr */
+ qib_write_kreg(dd, kr_senddmaheadaddr, ppd->sdma_head_phys);
+
+ /*
+ * Reserve all the former "kernel" piobufs, using high number range
+ * so we get as many 4K buffers as possible
+ */
+ n = dd->piobcnt2k + dd->piobcnt4k;
+ i = n - dd->cspec->sdmabufcnt;
+
+ for (; i < n; ++i) {
+ unsigned word = i / 64;
+ unsigned bit = i & 63;
+
+ BUG_ON(word >= 3);
+ senddmabufmask[word] |= 1ULL << bit;
+ }
+ qib_write_kreg(dd, kr_senddmabufmask0, senddmabufmask[0]);
+ qib_write_kreg(dd, kr_senddmabufmask1, senddmabufmask[1]);
+ qib_write_kreg(dd, kr_senddmabufmask2, senddmabufmask[2]);
+
+ ppd->sdma_state.first_sendbuf = i;
+ ppd->sdma_state.last_sendbuf = n;
+
+ return 0;
+}
+
+/* sdma_lock must be held */
+static u16 qib_sdma_7220_gethead(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int sane;
+ int use_dmahead;
+ u16 swhead;
+ u16 swtail;
+ u16 cnt;
+ u16 hwhead;
+
+ use_dmahead = __qib_sdma_running(ppd) &&
+ (dd->flags & QIB_HAS_SDMA_TIMEOUT);
+retry:
+ hwhead = use_dmahead ?
+ (u16)le64_to_cpu(*ppd->sdma_head_dma) :
+ (u16)qib_read_kreg32(dd, kr_senddmahead);
+
+ swhead = ppd->sdma_descq_head;
+ swtail = ppd->sdma_descq_tail;
+ cnt = ppd->sdma_descq_cnt;
+
+ if (swhead < swtail) {
+ /* not wrapped */
+ sane = (hwhead >= swhead) & (hwhead <= swtail);
+ } else if (swhead > swtail) {
+ /* wrapped around */
+ sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
+ (hwhead <= swtail);
+ } else {
+ /* empty */
+ sane = (hwhead == swhead);
+ }
+
+ if (unlikely(!sane)) {
+ if (use_dmahead) {
+ /* try one more time, directly from the register */
+ use_dmahead = 0;
+ goto retry;
+ }
+ /* assume no progress */
+ hwhead = swhead;
+ }
+
+ return hwhead;
+}
+
+static int qib_sdma_7220_busy(struct qib_pportdata *ppd)
+{
+ u64 hwstatus = qib_read_kreg64(ppd->dd, kr_senddmastatus);
+
+ return (hwstatus & SYM_MASK(SendDmaStatus, ScoreBoardDrainInProg)) ||
+ (hwstatus & SYM_MASK(SendDmaStatus, AbortInProg)) ||
+ (hwstatus & SYM_MASK(SendDmaStatus, InternalSDmaEnable)) ||
+ !(hwstatus & SYM_MASK(SendDmaStatus, ScbEmpty));
+}
+
+/*
+ * Compute the amount of delay before sending the next packet if the
+ * port's send rate differs from the static rate set for the QP.
+ * Since the delay affects this packet but the amount of the delay is
+ * based on the length of the previous packet, use the last delay computed
+ * and save the delay count for this packet to be used next time
+ * we get here.
+ */
+static u32 qib_7220_setpbc_control(struct qib_pportdata *ppd, u32 plen,
+ u8 srate, u8 vl)
+{
+ u8 snd_mult = ppd->delay_mult;
+ u8 rcv_mult = ib_rate_to_delay[srate];
+ u32 ret = ppd->cpspec->last_delay_mult;
+
+ ppd->cpspec->last_delay_mult = (rcv_mult > snd_mult) ?
+ (plen * (rcv_mult - snd_mult) + 1) >> 1 : 0;
+
+ /* Indicate VL15, if necessary */
+ if (vl == 15)
+ ret |= PBC_7220_VL15_SEND_CTRL;
+ return ret;
+}
+
+static void qib_7220_initvl15_bufs(struct qib_devdata *dd)
+{
+}
+
+static void qib_7220_init_ctxt(struct qib_ctxtdata *rcd)
+{
+ if (!rcd->ctxt) {
+ rcd->rcvegrcnt = IBA7220_KRCVEGRCNT;
+ rcd->rcvegr_tid_base = 0;
+ } else {
+ rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
+ rcd->rcvegr_tid_base = IBA7220_KRCVEGRCNT +
+ (rcd->ctxt - 1) * rcd->rcvegrcnt;
+ }
+}
+
+static void qib_7220_txchk_change(struct qib_devdata *dd, u32 start,
+ u32 len, u32 which, struct qib_ctxtdata *rcd)
+{
+ int i;
+ unsigned long flags;
+
+ switch (which) {
+ case TXCHK_CHG_TYPE_KERN:
+ /* see if we need to raise avail update threshold */
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ for (i = dd->first_user_ctxt;
+ dd->cspec->updthresh != dd->cspec->updthresh_dflt
+ && i < dd->cfgctxts; i++)
+ if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
+ ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
+ < dd->cspec->updthresh_dflt)
+ break;
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+ if (i == dd->cfgctxts) {
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ dd->cspec->updthresh = dd->cspec->updthresh_dflt;
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
+ dd->sendctrl |= (dd->cspec->updthresh &
+ SYM_RMASK(SendCtrl, AvailUpdThld)) <<
+ SYM_LSB(SendCtrl, AvailUpdThld);
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+ sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+ }
+ break;
+ case TXCHK_CHG_TYPE_USER:
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
+ / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
+ dd->cspec->updthresh = (rcd->piocnt /
+ rcd->subctxt_cnt) - 1;
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
+ dd->sendctrl |= (dd->cspec->updthresh &
+ SYM_RMASK(SendCtrl, AvailUpdThld))
+ << SYM_LSB(SendCtrl, AvailUpdThld);
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+ sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+ } else
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+ break;
+ }
+}
+
+static void writescratch(struct qib_devdata *dd, u32 val)
+{
+ qib_write_kreg(dd, kr_scratch, val);
+}
+
+#define VALID_TS_RD_REG_MASK 0xBF
+/**
+ * qib_7220_tempsense_read - read register of temp sensor via TWSI
+ * @dd: the qlogic_ib device
+ * @regnum: register to read from
+ *
+ * returns reg contents (0..255) or < 0 for error
+ */
+static int qib_7220_tempsense_rd(struct qib_devdata *dd, int regnum)
+{
+ int ret;
+ u8 rdata;
+
+ if (regnum > 7) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /* return a bogus value for (the one) register we do not have */
+ if (!((1 << regnum) & VALID_TS_RD_REG_MASK)) {
+ ret = 0;
+ goto bail;
+ }
+
+ ret = mutex_lock_interruptible(&dd->eep_lock);
+ if (ret)
+ goto bail;
+
+ ret = qib_twsi_blk_rd(dd, QIB_TWSI_TEMP_DEV, regnum, &rdata, 1);
+ if (!ret)
+ ret = rdata;
+
+ mutex_unlock(&dd->eep_lock);
+
+ /*
+ * There are three possibilities here:
+ * ret is actual value (0..255)
+ * ret is -ENXIO or -EINVAL from twsi code or this file
+ * ret is -EINTR from mutex_lock_interruptible.
+ */
+bail:
+ return ret;
+}
+
+/* Dummy function, as 7220 boards never disable EEPROM Write */
+static int qib_7220_eeprom_wen(struct qib_devdata *dd, int wen)
+{
+ return 1;
+}
+
+/**
+ * qib_init_iba7220_funcs - set up the chip-specific function pointers
+ * @dev: the pci_dev for qlogic_ib device
+ * @ent: pci_device_id struct for this dev
+ *
+ * This is global, and is called directly at init to set up the
+ * chip-specific function pointers for later use.
+ */
+struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct qib_devdata *dd;
+ int ret;
+ u32 boardid, minwidth;
+
+ dd = qib_alloc_devdata(pdev, sizeof(struct qib_chip_specific) +
+ sizeof(struct qib_chippport_specific));
+ if (IS_ERR(dd))
+ goto bail;
+
+ dd->f_bringup_serdes = qib_7220_bringup_serdes;
+ dd->f_cleanup = qib_setup_7220_cleanup;
+ dd->f_clear_tids = qib_7220_clear_tids;
+ dd->f_free_irq = qib_7220_free_irq;
+ dd->f_get_base_info = qib_7220_get_base_info;
+ dd->f_get_msgheader = qib_7220_get_msgheader;
+ dd->f_getsendbuf = qib_7220_getsendbuf;
+ dd->f_gpio_mod = gpio_7220_mod;
+ dd->f_eeprom_wen = qib_7220_eeprom_wen;
+ dd->f_hdrqempty = qib_7220_hdrqempty;
+ dd->f_ib_updown = qib_7220_ib_updown;
+ dd->f_init_ctxt = qib_7220_init_ctxt;
+ dd->f_initvl15_bufs = qib_7220_initvl15_bufs;
+ dd->f_intr_fallback = qib_7220_intr_fallback;
+ dd->f_late_initreg = qib_late_7220_initreg;
+ dd->f_setpbc_control = qib_7220_setpbc_control;
+ dd->f_portcntr = qib_portcntr_7220;
+ dd->f_put_tid = qib_7220_put_tid;
+ dd->f_quiet_serdes = qib_7220_quiet_serdes;
+ dd->f_rcvctrl = rcvctrl_7220_mod;
+ dd->f_read_cntrs = qib_read_7220cntrs;
+ dd->f_read_portcntrs = qib_read_7220portcntrs;
+ dd->f_reset = qib_setup_7220_reset;
+ dd->f_init_sdma_regs = init_sdma_7220_regs;
+ dd->f_sdma_busy = qib_sdma_7220_busy;
+ dd->f_sdma_gethead = qib_sdma_7220_gethead;
+ dd->f_sdma_sendctrl = qib_7220_sdma_sendctrl;
+ dd->f_sdma_set_desc_cnt = qib_sdma_set_7220_desc_cnt;
+ dd->f_sdma_update_tail = qib_sdma_update_7220_tail;
+ dd->f_sdma_hw_clean_up = qib_7220_sdma_hw_clean_up;
+ dd->f_sdma_hw_start_up = qib_7220_sdma_hw_start_up;
+ dd->f_sdma_init_early = qib_7220_sdma_init_early;
+ dd->f_sendctrl = sendctrl_7220_mod;
+ dd->f_set_armlaunch = qib_set_7220_armlaunch;
+ dd->f_set_cntr_sample = qib_set_cntr_7220_sample;
+ dd->f_iblink_state = qib_7220_iblink_state;
+ dd->f_ibphys_portstate = qib_7220_phys_portstate;
+ dd->f_get_ib_cfg = qib_7220_get_ib_cfg;
+ dd->f_set_ib_cfg = qib_7220_set_ib_cfg;
+ dd->f_set_ib_loopback = qib_7220_set_loopback;
+ dd->f_set_intr_state = qib_7220_set_intr_state;
+ dd->f_setextled = qib_setup_7220_setextled;
+ dd->f_txchk_change = qib_7220_txchk_change;
+ dd->f_update_usrhead = qib_update_7220_usrhead;
+ dd->f_wantpiobuf_intr = qib_wantpiobuf_7220_intr;
+ dd->f_xgxs_reset = qib_7220_xgxs_reset;
+ dd->f_writescratch = writescratch;
+ dd->f_tempsense_rd = qib_7220_tempsense_rd;
+ /*
+ * Do remaining pcie setup and save pcie values in dd.
+ * Any error printing is already done by the init code.
+ * On return, we have the chip mapped, but chip registers
+ * are not set up until start of qib_init_7220_variables.
+ */
+ ret = qib_pcie_ddinit(dd, pdev, ent);
+ if (ret < 0)
+ goto bail_free;
+
+ /* initialize chip-specific variables */
+ ret = qib_init_7220_variables(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ if (qib_mini_init)
+ goto bail;
+
+ boardid = SYM_FIELD(dd->revision, Revision,
+ BoardID);
+ switch (boardid) {
+ case 0:
+ case 2:
+ case 10:
+ case 12:
+ minwidth = 16; /* x16 capable boards */
+ break;
+ default:
+ minwidth = 8; /* x8 capable boards */
+ break;
+ }
+ if (qib_pcie_params(dd, minwidth, NULL, NULL))
+ qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
+ "continuing anyway\n");
+
+ /* save IRQ for possible later use */
+ dd->cspec->irq = pdev->irq;
+
+ if (qib_read_kreg64(dd, kr_hwerrstatus) &
+ QLOGIC_IB_HWE_SERDESPLLFAILED)
+ qib_write_kreg(dd, kr_hwerrclear,
+ QLOGIC_IB_HWE_SERDESPLLFAILED);
+
+ /* setup interrupt handler (interrupt type handled above) */
+ qib_setup_7220_interrupt(dd);
+ qib_7220_init_hwerrors(dd);
+
+ /* clear diagctrl register, in case diags were running and crashed */
+ qib_write_kreg(dd, kr_hwdiagctrl, 0);
+
+ goto bail;
+
+bail_cleanup:
+ qib_pcie_ddcleanup(dd);
+bail_free:
+ qib_free_devdata(dd);
+ dd = ERR_PTR(ret);
+bail:
+ return dd;
+}
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
new file mode 100644
index 00000000000..503992d9c5c
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -0,0 +1,7645 @@
+/*
+ * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This file contains all of the code that is specific to the
+ * InfiniPath 7322 chip
+ */
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_smi.h>
+
+#include "qib.h"
+#include "qib_7322_regs.h"
+#include "qib_qsfp.h"
+
+#include "qib_mad.h"
+
+static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
+static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
+static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
+static irqreturn_t qib_7322intr(int irq, void *data);
+static irqreturn_t qib_7322bufavail(int irq, void *data);
+static irqreturn_t sdma_intr(int irq, void *data);
+static irqreturn_t sdma_idle_intr(int irq, void *data);
+static irqreturn_t sdma_progress_intr(int irq, void *data);
+static irqreturn_t sdma_cleanup_intr(int irq, void *data);
+static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
+ struct qib_ctxtdata *rcd);
+static u8 qib_7322_phys_portstate(u64);
+static u32 qib_7322_iblink_state(u64);
+static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
+ u16 linitcmd);
+static void force_h1(struct qib_pportdata *);
+static void adj_tx_serdes(struct qib_pportdata *);
+static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
+static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
+
+static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
+static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
+
+#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
+
+/* LE2 serdes values for different cases */
+#define LE2_DEFAULT 5
+#define LE2_5m 4
+#define LE2_QME 0
+
+/* Below is special-purpose, so only really works for the IB SerDes blocks. */
+#define IBSD(hw_pidx) (hw_pidx + 2)
+
+/* these are variables for documentation and experimentation purposes */
+static const unsigned rcv_int_timeout = 375;
+static const unsigned rcv_int_count = 16;
+static const unsigned sdma_idle_cnt = 64;
+
+/* Time to stop altering Rx Equalization parameters, after link up. */
+#define RXEQ_DISABLE_MSECS 2500
+
+/*
+ * Number of VLs we are configured to use (to allow for more
+ * credits per vl, etc.)
+ */
+ushort qib_num_cfg_vls = 2;
+module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
+MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
+
+static ushort qib_chase = 1;
+module_param_named(chase, qib_chase, ushort, S_IRUGO);
+MODULE_PARM_DESC(chase, "Enable state chase handling");
+
+static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
+module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
+MODULE_PARM_DESC(long_attenuation, \
+ "attenuation cutoff (dB) for long copper cable setup");
+
+static ushort qib_singleport;
+module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
+MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
+
+#define MAX_ATTEN_LEN 64 /* plenty for any real system */
+/* for read back, default index is ~5m copper cable */
+static char txselect_list[MAX_ATTEN_LEN] = "10";
+static struct kparam_string kp_txselect = {
+ .string = txselect_list,
+ .maxlen = MAX_ATTEN_LEN
+};
+static int setup_txselect(const char *, struct kernel_param *);
+module_param_call(txselect, setup_txselect, param_get_string,
+ &kp_txselect, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(txselect, \
+ "Tx serdes indices (for no QSFP or invalid QSFP data)");
+
+#define BOARD_QME7342 5
+#define BOARD_QMH7342 6
+#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
+ BOARD_QMH7342)
+#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
+ BOARD_QME7342)
+
+#define KREG_IDX(regname) (QIB_7322_##regname##_OFFS / sizeof(u64))
+
+#define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
+
+#define MASK_ACROSS(lsb, msb) \
+ (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
+
+#define SYM_RMASK(regname, fldname) ((u64) \
+ QIB_7322_##regname##_##fldname##_RMASK)
+
+#define SYM_MASK(regname, fldname) ((u64) \
+ QIB_7322_##regname##_##fldname##_RMASK << \
+ QIB_7322_##regname##_##fldname##_LSB)
+
+#define SYM_FIELD(value, regname, fldname) ((u64) \
+ (((value) >> SYM_LSB(regname, fldname)) & \
+ SYM_RMASK(regname, fldname)))
+
+/* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
+#define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
+ (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
+
+#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
+#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
+#define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
+#define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
+#define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
+/* Below because most, but not all, fields of IntMask have that full suffix */
+#define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
+
+
+#define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
+
+/*
+ * the size bits give us 2^N, in KB units. 0 marks as invalid,
+ * and 7 is reserved. We currently use only 2KB and 4KB
+ */
+#define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
+#define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
+#define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
+#define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
+
+#define SendIBSLIDAssignMask \
+ QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
+#define SendIBSLMCMask \
+ QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
+
+#define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
+#define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
+#define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
+#define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
+#define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
+#define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
+
+#define _QIB_GPIO_SDA_NUM 1
+#define _QIB_GPIO_SCL_NUM 0
+#define QIB_EEPROM_WEN_NUM 14
+#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
+
+/* HW counter clock is at 4nsec */
+#define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
+
+/* full speed IB port 1 only */
+#define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
+#define PORT_SPD_CAP_SHIFT 3
+
+/* full speed featuremask, both ports */
+#define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
+
+/*
+ * This file contains almost all the chip-specific register information and
+ * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
+ */
+
+/* Use defines to tie machine-generated names to lower-case names */
+#define kr_contextcnt KREG_IDX(ContextCnt)
+#define kr_control KREG_IDX(Control)
+#define kr_counterregbase KREG_IDX(CntrRegBase)
+#define kr_errclear KREG_IDX(ErrClear)
+#define kr_errmask KREG_IDX(ErrMask)
+#define kr_errstatus KREG_IDX(ErrStatus)
+#define kr_extctrl KREG_IDX(EXTCtrl)
+#define kr_extstatus KREG_IDX(EXTStatus)
+#define kr_gpio_clear KREG_IDX(GPIOClear)
+#define kr_gpio_mask KREG_IDX(GPIOMask)
+#define kr_gpio_out KREG_IDX(GPIOOut)
+#define kr_gpio_status KREG_IDX(GPIOStatus)
+#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
+#define kr_debugportval KREG_IDX(DebugPortValueReg)
+#define kr_fmask KREG_IDX(feature_mask)
+#define kr_act_fmask KREG_IDX(active_feature_mask)
+#define kr_hwerrclear KREG_IDX(HwErrClear)
+#define kr_hwerrmask KREG_IDX(HwErrMask)
+#define kr_hwerrstatus KREG_IDX(HwErrStatus)
+#define kr_intclear KREG_IDX(IntClear)
+#define kr_intmask KREG_IDX(IntMask)
+#define kr_intredirect KREG_IDX(IntRedirect0)
+#define kr_intstatus KREG_IDX(IntStatus)
+#define kr_pagealign KREG_IDX(PageAlign)
+#define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
+#define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
+#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
+#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
+#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
+#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
+#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
+#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
+#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
+#define kr_revision KREG_IDX(Revision)
+#define kr_scratch KREG_IDX(Scratch)
+#define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
+#define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
+#define kr_sendctrl KREG_IDX(SendCtrl)
+#define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
+#define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
+#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
+#define kr_sendpiobufbase KREG_IDX(SendBufBase)
+#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
+#define kr_sendpiosize KREG_IDX(SendBufSize)
+#define kr_sendregbase KREG_IDX(SendRegBase)
+#define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
+#define kr_userregbase KREG_IDX(UserRegBase)
+#define kr_intgranted KREG_IDX(Int_Granted)
+#define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
+#define kr_intblocked KREG_IDX(IntBlocked)
+#define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
+
+/*
+ * per-port kernel registers. Access only with qib_read_kreg_port()
+ * or qib_write_kreg_port()
+ */
+#define krp_errclear KREG_IBPORT_IDX(ErrClear)
+#define krp_errmask KREG_IBPORT_IDX(ErrMask)
+#define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
+#define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
+#define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
+#define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
+#define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
+#define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
+#define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
+#define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
+#define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
+#define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
+#define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
+#define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
+#define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
+#define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
+#define krp_psinterval KREG_IBPORT_IDX(PSInterval)
+#define krp_psstart KREG_IBPORT_IDX(PSStart)
+#define krp_psstat KREG_IBPORT_IDX(PSStat)
+#define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
+#define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
+#define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
+#define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
+#define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
+#define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
+#define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
+#define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
+#define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
+#define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
+#define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
+#define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
+#define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
+#define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
+#define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
+#define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
+#define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
+#define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
+#define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
+#define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
+#define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
+#define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
+#define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
+#define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
+#define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
+#define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
+#define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
+#define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
+#define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
+#define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
+#define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
+
+/*
+ * Per-context kernel registers. Acess only with qib_read_kreg_ctxt()
+ * or qib_write_kreg_ctxt()
+ */
+#define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
+#define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
+
+/*
+ * TID Flow table, per context. Reduces
+ * number of hdrq updates to one per flow (or on errors).
+ * context 0 and 1 share same memory, but have distinct
+ * addresses. Since for now, we never use expected sends
+ * on kernel contexts, we don't worry about that (we initialize
+ * those entries for ctxt 0/1 on driver load twice, for example).
+ */
+#define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
+#define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
+
+/* these are the error bits in the tid flows, and are W1C */
+#define TIDFLOW_ERRBITS ( \
+ (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
+ SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
+ (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
+ SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
+
+/* Most (not all) Counters are per-IBport.
+ * Requires LBIntCnt is at offset 0 in the group
+ */
+#define CREG_IDX(regname) \
+((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
+
+#define crp_badformat CREG_IDX(RxVersionErrCnt)
+#define crp_err_rlen CREG_IDX(RxLenErrCnt)
+#define crp_erricrc CREG_IDX(RxICRCErrCnt)
+#define crp_errlink CREG_IDX(RxLinkMalformCnt)
+#define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
+#define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
+#define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
+#define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
+#define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
+#define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
+#define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
+#define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
+#define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
+#define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
+#define crp_pktrcv CREG_IDX(RxDataPktCnt)
+#define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
+#define crp_pktsend CREG_IDX(TxDataPktCnt)
+#define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
+#define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
+#define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
+#define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
+#define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
+#define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
+#define crp_rcvebp CREG_IDX(RxEBPCnt)
+#define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
+#define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
+#define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
+#define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
+#define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
+#define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
+#define crp_rxvlerr CREG_IDX(RxVlErrCnt)
+#define crp_sendstall CREG_IDX(TxFlowStallCnt)
+#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
+#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
+#define crp_txlenerr CREG_IDX(TxLenErrCnt)
+#define crp_txlenerr CREG_IDX(TxLenErrCnt)
+#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
+#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
+#define crp_txunderrun CREG_IDX(TxUnderrunCnt)
+#define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
+#define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
+#define crp_wordrcv CREG_IDX(RxDwordCnt)
+#define crp_wordsend CREG_IDX(TxDwordCnt)
+#define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
+
+/* these are the (few) counters that are not port-specific */
+#define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
+ QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
+#define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
+#define cr_lbint CREG_DEVIDX(LBIntCnt)
+#define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
+#define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
+#define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
+#define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
+#define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
+
+/* no chip register for # of IB ports supported, so define */
+#define NUM_IB_PORTS 2
+
+/* 1 VL15 buffer per hardware IB port, no register for this, so define */
+#define NUM_VL15_BUFS NUM_IB_PORTS
+
+/*
+ * context 0 and 1 are special, and there is no chip register that
+ * defines this value, so we have to define it here.
+ * These are all allocated to either 0 or 1 for single port
+ * hardware configuration, otherwise each gets half
+ */
+#define KCTXT0_EGRCNT 2048
+
+/* values for vl and port fields in PBC, 7322-specific */
+#define PBC_PORT_SEL_LSB 26
+#define PBC_PORT_SEL_RMASK 1
+#define PBC_VL_NUM_LSB 27
+#define PBC_VL_NUM_RMASK 7
+#define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
+#define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
+
+static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
+ [IB_RATE_2_5_GBPS] = 16,
+ [IB_RATE_5_GBPS] = 8,
+ [IB_RATE_10_GBPS] = 4,
+ [IB_RATE_20_GBPS] = 2,
+ [IB_RATE_30_GBPS] = 2,
+ [IB_RATE_40_GBPS] = 1
+};
+
+#define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
+#define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
+
+/* link training states, from IBC */
+#define IB_7322_LT_STATE_DISABLED 0x00
+#define IB_7322_LT_STATE_LINKUP 0x01
+#define IB_7322_LT_STATE_POLLACTIVE 0x02
+#define IB_7322_LT_STATE_POLLQUIET 0x03
+#define IB_7322_LT_STATE_SLEEPDELAY 0x04
+#define IB_7322_LT_STATE_SLEEPQUIET 0x05
+#define IB_7322_LT_STATE_CFGDEBOUNCE 0x08
+#define IB_7322_LT_STATE_CFGRCVFCFG 0x09
+#define IB_7322_LT_STATE_CFGWAITRMT 0x0a
+#define IB_7322_LT_STATE_CFGIDLE 0x0b
+#define IB_7322_LT_STATE_RECOVERRETRAIN 0x0c
+#define IB_7322_LT_STATE_TXREVLANES 0x0d
+#define IB_7322_LT_STATE_RECOVERWAITRMT 0x0e
+#define IB_7322_LT_STATE_RECOVERIDLE 0x0f
+#define IB_7322_LT_STATE_CFGENH 0x10
+#define IB_7322_LT_STATE_CFGTEST 0x11
+
+/* link state machine states from IBC */
+#define IB_7322_L_STATE_DOWN 0x0
+#define IB_7322_L_STATE_INIT 0x1
+#define IB_7322_L_STATE_ARM 0x2
+#define IB_7322_L_STATE_ACTIVE 0x3
+#define IB_7322_L_STATE_ACT_DEFER 0x4
+
+static const u8 qib_7322_physportstate[0x20] = {
+ [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
+ [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
+ [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
+ [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
+ [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
+ [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
+ [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_7322_LT_STATE_CFGRCVFCFG] =
+ IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_7322_LT_STATE_CFGWAITRMT] =
+ IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
+ [IB_7322_LT_STATE_RECOVERRETRAIN] =
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+ [IB_7322_LT_STATE_RECOVERWAITRMT] =
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+ [IB_7322_LT_STATE_RECOVERIDLE] =
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+ [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
+ [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x13] = IB_PHYSPORTSTATE_CFG_WAIT_ENH,
+ [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
+};
+
+struct qib_chip_specific {
+ u64 __iomem *cregbase;
+ u64 *cntrs;
+ spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
+ spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
+ u64 main_int_mask; /* clear bits which have dedicated handlers */
+ u64 int_enable_mask; /* for per port interrupts in single port mode */
+ u64 errormask;
+ u64 hwerrmask;
+ u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
+ u64 gpio_mask; /* shadow the gpio mask register */
+ u64 extctrl; /* shadow the gpio output enable, etc... */
+ u32 ncntrs;
+ u32 nportcntrs;
+ u32 cntrnamelen;
+ u32 portcntrnamelen;
+ u32 numctxts;
+ u32 rcvegrcnt;
+ u32 updthresh; /* current AvailUpdThld */
+ u32 updthresh_dflt; /* default AvailUpdThld */
+ u32 r1;
+ int irq;
+ u32 num_msix_entries;
+ u32 sdmabufcnt;
+ u32 lastbuf_for_pio;
+ u32 stay_in_freeze;
+ u32 recovery_ports_initted;
+ struct msix_entry *msix_entries;
+ void **msix_arg;
+ unsigned long *sendchkenable;
+ unsigned long *sendgrhchk;
+ unsigned long *sendibchk;
+ u32 rcvavail_timeout[18];
+ char emsgbuf[128]; /* for device error interrupt msg buffer */
+};
+
+/* Table of entries in "human readable" form Tx Emphasis. */
+struct txdds_ent {
+ u8 amp;
+ u8 pre;
+ u8 main;
+ u8 post;
+};
+
+struct vendor_txdds_ent {
+ u8 oui[QSFP_VOUI_LEN];
+ u8 *partnum;
+ struct txdds_ent sdr;
+ struct txdds_ent ddr;
+ struct txdds_ent qdr;
+};
+
+static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
+
+#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
+#define TXDDS_EXTRA_SZ 11 /* number of extra tx settings entries */
+#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
+
+#define H1_FORCE_VAL 8
+#define H1_FORCE_QME 1 /* may be overridden via setup_txselect() */
+#define H1_FORCE_QMH 7 /* may be overridden via setup_txselect() */
+
+/* The static and dynamic registers are paired, and the pairs indexed by spd */
+#define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
+ + ((spd) * 2))
+
+#define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
+#define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
+#define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
+#define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
+#define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
+
+struct qib_chippport_specific {
+ u64 __iomem *kpregbase;
+ u64 __iomem *cpregbase;
+ u64 *portcntrs;
+ struct qib_pportdata *ppd;
+ wait_queue_head_t autoneg_wait;
+ struct delayed_work autoneg_work;
+ struct delayed_work ipg_work;
+ struct timer_list chase_timer;
+ /*
+ * these 5 fields are used to establish deltas for IB symbol
+ * errors and linkrecovery errors. They can be reported on
+ * some chips during link negotiation prior to INIT, and with
+ * DDR when faking DDR negotiations with non-IBTA switches.
+ * The chip counters are adjusted at driver unload if there is
+ * a non-zero delta.
+ */
+ u64 ibdeltainprog;
+ u64 ibsymdelta;
+ u64 ibsymsnap;
+ u64 iblnkerrdelta;
+ u64 iblnkerrsnap;
+ u64 iblnkdownsnap;
+ u64 iblnkdowndelta;
+ u64 ibmalfdelta;
+ u64 ibmalfsnap;
+ u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
+ u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
+ u64 qdr_dfe_time;
+ u64 chase_end;
+ u32 autoneg_tries;
+ u32 recovery_init;
+ u32 qdr_dfe_on;
+ u32 qdr_reforce;
+ /*
+ * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
+ * entry zero is unused, to simplify indexing
+ */
+ u8 h1_val;
+ u8 no_eep; /* txselect table index to use if no qsfp info */
+ u8 ipg_tries;
+ u8 ibmalfusesnap;
+ struct qib_qsfp_data qsfp_data;
+ char epmsgbuf[192]; /* for port error interrupt msg buffer */
+};
+
+static struct {
+ const char *name;
+ irq_handler_t handler;
+ int lsb;
+ int port; /* 0 if not port-specific, else port # */
+} irq_table[] = {
+ { QIB_DRV_NAME, qib_7322intr, -1, 0 },
+ { QIB_DRV_NAME " (buf avail)", qib_7322bufavail,
+ SYM_LSB(IntStatus, SendBufAvail), 0 },
+ { QIB_DRV_NAME " (sdma 0)", sdma_intr,
+ SYM_LSB(IntStatus, SDmaInt_0), 1 },
+ { QIB_DRV_NAME " (sdma 1)", sdma_intr,
+ SYM_LSB(IntStatus, SDmaInt_1), 2 },
+ { QIB_DRV_NAME " (sdmaI 0)", sdma_idle_intr,
+ SYM_LSB(IntStatus, SDmaIdleInt_0), 1 },
+ { QIB_DRV_NAME " (sdmaI 1)", sdma_idle_intr,
+ SYM_LSB(IntStatus, SDmaIdleInt_1), 2 },
+ { QIB_DRV_NAME " (sdmaP 0)", sdma_progress_intr,
+ SYM_LSB(IntStatus, SDmaProgressInt_0), 1 },
+ { QIB_DRV_NAME " (sdmaP 1)", sdma_progress_intr,
+ SYM_LSB(IntStatus, SDmaProgressInt_1), 2 },
+ { QIB_DRV_NAME " (sdmaC 0)", sdma_cleanup_intr,
+ SYM_LSB(IntStatus, SDmaCleanupDone_0), 1 },
+ { QIB_DRV_NAME " (sdmaC 1)", sdma_cleanup_intr,
+ SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 },
+};
+
+/* ibcctrl bits */
+#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
+/* cycle through TS1/TS2 till OK */
+#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
+/* wait for TS1, then go on */
+#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
+#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
+
+#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
+#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
+#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
+
+#define BLOB_7322_IBCHG 0x101
+
+static inline void qib_write_kreg(const struct qib_devdata *dd,
+ const u32 regno, u64 value);
+static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
+static void write_7322_initregs(struct qib_devdata *);
+static void write_7322_init_portregs(struct qib_pportdata *);
+static void setup_7322_link_recovery(struct qib_pportdata *, u32);
+static void check_7322_rxe_status(struct qib_pportdata *);
+static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
+
+/**
+ * qib_read_ureg32 - read 32-bit virtualized per-context register
+ * @dd: device
+ * @regno: register number
+ * @ctxt: context number
+ *
+ * Return the contents of a register that is virtualized to be per context.
+ * Returns -1 on errors (not distinguishable from valid contents at
+ * runtime; we may add a separate error variable at some point).
+ */
+static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
+ enum qib_ureg regno, int ctxt)
+{
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+ return readl(regno + (u64 __iomem *)(
+ (dd->ureg_align * ctxt) + (dd->userbase ?
+ (char __iomem *)dd->userbase :
+ (char __iomem *)dd->kregbase + dd->uregbase)));
+}
+
+/**
+ * qib_read_ureg - read virtualized per-context register
+ * @dd: device
+ * @regno: register number
+ * @ctxt: context number
+ *
+ * Return the contents of a register that is virtualized to be per context.
+ * Returns -1 on errors (not distinguishable from valid contents at
+ * runtime; we may add a separate error variable at some point).
+ */
+static inline u64 qib_read_ureg(const struct qib_devdata *dd,
+ enum qib_ureg regno, int ctxt)
+{
+
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+ return readq(regno + (u64 __iomem *)(
+ (dd->ureg_align * ctxt) + (dd->userbase ?
+ (char __iomem *)dd->userbase :
+ (char __iomem *)dd->kregbase + dd->uregbase)));
+}
+
+/**
+ * qib_write_ureg - write virtualized per-context register
+ * @dd: device
+ * @regno: register number
+ * @value: value
+ * @ctxt: context
+ *
+ * Write the contents of a register that is virtualized to be per context.
+ */
+static inline void qib_write_ureg(const struct qib_devdata *dd,
+ enum qib_ureg regno, u64 value, int ctxt)
+{
+ u64 __iomem *ubase;
+ if (dd->userbase)
+ ubase = (u64 __iomem *)
+ ((char __iomem *) dd->userbase +
+ dd->ureg_align * ctxt);
+ else
+ ubase = (u64 __iomem *)
+ (dd->uregbase +
+ (char __iomem *) dd->kregbase +
+ dd->ureg_align * ctxt);
+
+ if (dd->kregbase && (dd->flags & QIB_PRESENT))
+ writeq(value, &ubase[regno]);
+}
+
+static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
+ const u32 regno)
+{
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return -1;
+ return readl((u32 __iomem *) &dd->kregbase[regno]);
+}
+
+static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
+ const u32 regno)
+{
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return -1;
+ return readq(&dd->kregbase[regno]);
+}
+
+static inline void qib_write_kreg(const struct qib_devdata *dd,
+ const u32 regno, u64 value)
+{
+ if (dd->kregbase && (dd->flags & QIB_PRESENT))
+ writeq(value, &dd->kregbase[regno]);
+}
+
+/*
+ * not many sanity checks for the port-specific kernel register routines,
+ * since they are only used when it's known to be safe.
+*/
+static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
+ const u16 regno)
+{
+ if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
+ return 0ULL;
+ return readq(&ppd->cpspec->kpregbase[regno]);
+}
+
+static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
+ const u16 regno, u64 value)
+{
+ if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
+ (ppd->dd->flags & QIB_PRESENT))
+ writeq(value, &ppd->cpspec->kpregbase[regno]);
+}
+
+/**
+ * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
+ * @dd: the qlogic_ib device
+ * @regno: the register number to write
+ * @ctxt: the context containing the register
+ * @value: the value to write
+ */
+static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
+ const u16 regno, unsigned ctxt,
+ u64 value)
+{
+ qib_write_kreg(dd, regno + ctxt, value);
+}
+
+static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
+{
+ if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+ return readq(&dd->cspec->cregbase[regno]);
+
+
+}
+
+static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
+{
+ if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+ return readl(&dd->cspec->cregbase[regno]);
+
+
+}
+
+static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
+ u16 regno, u64 value)
+{
+ if (ppd->cpspec && ppd->cpspec->cpregbase &&
+ (ppd->dd->flags & QIB_PRESENT))
+ writeq(value, &ppd->cpspec->cpregbase[regno]);
+}
+
+static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
+ u16 regno)
+{
+ if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
+ !(ppd->dd->flags & QIB_PRESENT))
+ return 0;
+ return readq(&ppd->cpspec->cpregbase[regno]);
+}
+
+static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
+ u16 regno)
+{
+ if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
+ !(ppd->dd->flags & QIB_PRESENT))
+ return 0;
+ return readl(&ppd->cpspec->cpregbase[regno]);
+}
+
+/* bits in Control register */
+#define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
+#define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
+
+/* bits in general interrupt regs */
+#define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
+#define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
+#define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
+#define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
+#define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
+#define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
+#define QIB_I_C_ERROR INT_MASK(Err)
+
+#define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
+#define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
+#define QIB_I_GPIO INT_MASK(AssertGPIO)
+#define QIB_I_P_SDMAINT(pidx) \
+ (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
+ INT_MASK_P(SDmaProgress, pidx) | \
+ INT_MASK_PM(SDmaCleanupDone, pidx))
+
+/* Interrupt bits that are "per port" */
+#define QIB_I_P_BITSEXTANT(pidx) \
+ (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
+ INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
+ INT_MASK_P(SDmaProgress, pidx) | \
+ INT_MASK_PM(SDmaCleanupDone, pidx))
+
+/* Interrupt bits that are common to a device */
+/* currently unused: QIB_I_SPIOSENT */
+#define QIB_I_C_BITSEXTANT \
+ (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
+ QIB_I_SPIOSENT | \
+ QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
+
+#define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
+ QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
+
+/*
+ * Error bits that are "per port".
+ */
+#define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
+#define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
+#define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
+#define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
+#define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
+#define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
+#define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
+#define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
+#define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
+#define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
+#define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
+#define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
+#define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
+#define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
+#define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
+#define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
+#define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
+#define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
+#define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
+#define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
+#define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
+#define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
+#define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
+#define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
+#define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
+#define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
+#define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
+#define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
+
+#define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
+#define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
+#define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
+#define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
+#define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
+#define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
+#define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
+#define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
+#define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
+#define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
+#define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
+
+/* Error bits that are common to a device */
+#define QIB_E_RESET ERR_MASK(ResetNegated)
+#define QIB_E_HARDWARE ERR_MASK(HardwareErr)
+#define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
+
+
+/*
+ * Per chip (rather than per-port) errors. Most either do
+ * nothing but trigger a print (because they self-recover, or
+ * always occur in tandem with other errors that handle the
+ * issue), or because they indicate errors with no recovery,
+ * but we want to know that they happened.
+ */
+#define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
+#define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
+#define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
+#define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
+#define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
+#define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
+#define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
+#define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
+
+/* SDMA chip errors (not per port)
+ * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
+ * the SDMAHALT error immediately, so we just print the dup error via the
+ * E_AUTO mechanism. This is true of most of the per-port fatal errors
+ * as well, but since this is port-independent, by definition, it's
+ * handled a bit differently. SDMA_VL15 and SDMA_WRONG_PORT are per
+ * packet send errors, and so are handled in the same manner as other
+ * per-packet errors.
+ */
+#define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
+#define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
+#define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
+
+/*
+ * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
+ * it is used to print "common" packet errors.
+ */
+#define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
+ QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
+ QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
+ QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
+ QIB_E_P_REBP)
+
+/* Error Bits that Packet-related (Receive, per-port) */
+#define QIB_E_P_RPKTERRS (\
+ QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
+ QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
+ QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
+ QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
+ QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
+ QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
+
+/*
+ * Error bits that are Send-related (per port)
+ * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
+ * All of these potentially need to have a buffer disarmed
+ */
+#define QIB_E_P_SPKTERRS (\
+ QIB_E_P_SUNEXP_PKTNUM |\
+ QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
+ QIB_E_P_SMAXPKTLEN |\
+ QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
+ QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
+ QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
+
+#define QIB_E_SPKTERRS ( \
+ QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
+ ERR_MASK_N(SendUnsupportedVLErr) | \
+ QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
+
+#define QIB_E_P_SDMAERRS ( \
+ QIB_E_P_SDMAHALT | \
+ QIB_E_P_SDMADESCADDRMISALIGN | \
+ QIB_E_P_SDMAUNEXPDATA | \
+ QIB_E_P_SDMAMISSINGDW | \
+ QIB_E_P_SDMADWEN | \
+ QIB_E_P_SDMARPYTAG | \
+ QIB_E_P_SDMA1STDESC | \
+ QIB_E_P_SDMABASE | \
+ QIB_E_P_SDMATAILOUTOFBOUND | \
+ QIB_E_P_SDMAOUTOFBOUND | \
+ QIB_E_P_SDMAGENMISMATCH)
+
+/*
+ * This sets some bits more than once, but makes it more obvious which
+ * bits are not handled under other categories, and the repeat definition
+ * is not a problem.
+ */
+#define QIB_E_P_BITSEXTANT ( \
+ QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
+ QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
+ QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
+ QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
+ )
+
+/*
+ * These are errors that can occur when the link
+ * changes state while a packet is being sent or received. This doesn't
+ * cover things like EBP or VCRC that can be the result of a sending
+ * having the link change state, so we receive a "known bad" packet.
+ * All of these are "per port", so renamed:
+ */
+#define QIB_E_P_LINK_PKTERRS (\
+ QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
+ QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
+ QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
+ QIB_E_P_RUNEXPCHAR)
+
+/*
+ * This sets some bits more than once, but makes it more obvious which
+ * bits are not handled under other categories (such as QIB_E_SPKTERRS),
+ * and the repeat definition is not a problem.
+ */
+#define QIB_E_C_BITSEXTANT (\
+ QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
+ QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
+ QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
+
+/* Likewise Neuter E_SPKT_ERRS_IGNORE */
+#define E_SPKT_ERRS_IGNORE 0
+
+#define QIB_EXTS_MEMBIST_DISABLED \
+ SYM_MASK(EXTStatus, MemBISTDisabled)
+#define QIB_EXTS_MEMBIST_ENDTEST \
+ SYM_MASK(EXTStatus, MemBISTEndTest)
+
+#define QIB_E_SPIOARMLAUNCH \
+ ERR_MASK(SendArmLaunchErr)
+
+#define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
+#define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
+
+/*
+ * IBTA_1_2 is set when multiple speeds are enabled (normal),
+ * and also if forced QDR (only QDR enabled). It's enabled for the
+ * forced QDR case so that scrambling will be enabled by the TS3
+ * exchange, when supported by both sides of the link.
+ */
+#define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
+#define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
+#define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
+#define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
+#define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
+#define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
+ SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
+#define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
+
+#define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
+#define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
+
+#define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
+#define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
+#define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
+
+#define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
+#define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
+#define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
+ SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
+#define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
+ SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
+#define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
+
+#define IBA7322_REDIRECT_VEC_PER_REG 12
+
+#define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
+#define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
+#define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
+#define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
+#define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
+
+#define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
+
+#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
+ .msg = #fldname }
+#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
+ fldname##Mask##_##port), .msg = #fldname }
+static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
+ HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
+ HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
+ HWE_AUTO(PCIESerdesPClkNotDetect),
+ HWE_AUTO(PowerOnBISTFailed),
+ HWE_AUTO(TempsenseTholdReached),
+ HWE_AUTO(MemoryErr),
+ HWE_AUTO(PCIeBusParityErr),
+ HWE_AUTO(PcieCplTimeout),
+ HWE_AUTO(PciePoisonedTLP),
+ HWE_AUTO_P(SDmaMemReadErr, 1),
+ HWE_AUTO_P(SDmaMemReadErr, 0),
+ HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
+ HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
+ HWE_AUTO_P(statusValidNoEop, 1),
+ HWE_AUTO_P(statusValidNoEop, 0),
+ HWE_AUTO(LATriggered),
+ { .mask = 0 }
+};
+
+#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
+ .msg = #fldname }
+#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
+ .msg = #fldname }
+static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
+ E_AUTO(ResetNegated),
+ E_AUTO(HardwareErr),
+ E_AUTO(InvalidAddrErr),
+ E_AUTO(SDmaVL15Err),
+ E_AUTO(SBufVL15MisUseErr),
+ E_AUTO(InvalidEEPCmd),
+ E_AUTO(RcvContextShareErr),
+ E_AUTO(SendVLMismatchErr),
+ E_AUTO(SendArmLaunchErr),
+ E_AUTO(SendSpecialTriggerErr),
+ E_AUTO(SDmaWrongPortErr),
+ E_AUTO(SDmaBufMaskDuplicateErr),
+ E_AUTO(RcvHdrFullErr),
+ E_AUTO(RcvEgrFullErr),
+ { .mask = 0 }
+};
+
+static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
+ E_P_AUTO(IBStatusChanged),
+ E_P_AUTO(SHeadersErr),
+ E_P_AUTO(VL15BufMisuseErr),
+ /*
+ * SDmaHaltErr is not really an error, make it clearer;
+ */
+ {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted"},
+ E_P_AUTO(SDmaDescAddrMisalignErr),
+ E_P_AUTO(SDmaUnexpDataErr),
+ E_P_AUTO(SDmaMissingDwErr),
+ E_P_AUTO(SDmaDwEnErr),
+ E_P_AUTO(SDmaRpyTagErr),
+ E_P_AUTO(SDma1stDescErr),
+ E_P_AUTO(SDmaBaseErr),
+ E_P_AUTO(SDmaTailOutOfBoundErr),
+ E_P_AUTO(SDmaOutOfBoundErr),
+ E_P_AUTO(SDmaGenMismatchErr),
+ E_P_AUTO(SendBufMisuseErr),
+ E_P_AUTO(SendUnsupportedVLErr),
+ E_P_AUTO(SendUnexpectedPktNumErr),
+ E_P_AUTO(SendDroppedDataPktErr),
+ E_P_AUTO(SendDroppedSmpPktErr),
+ E_P_AUTO(SendPktLenErr),
+ E_P_AUTO(SendUnderRunErr),
+ E_P_AUTO(SendMaxPktLenErr),
+ E_P_AUTO(SendMinPktLenErr),
+ E_P_AUTO(RcvIBLostLinkErr),
+ E_P_AUTO(RcvHdrErr),
+ E_P_AUTO(RcvHdrLenErr),
+ E_P_AUTO(RcvBadTidErr),
+ E_P_AUTO(RcvBadVersionErr),
+ E_P_AUTO(RcvIBFlowErr),
+ E_P_AUTO(RcvEBPErr),
+ E_P_AUTO(RcvUnsupportedVLErr),
+ E_P_AUTO(RcvUnexpectedCharErr),
+ E_P_AUTO(RcvShortPktLenErr),
+ E_P_AUTO(RcvLongPktLenErr),
+ E_P_AUTO(RcvMaxPktLenErr),
+ E_P_AUTO(RcvMinPktLenErr),
+ E_P_AUTO(RcvICRCErr),
+ E_P_AUTO(RcvVCRCErr),
+ E_P_AUTO(RcvFormatErr),
+ { .mask = 0 }
+};
+
+/*
+ * Below generates "auto-message" for interrupts not specific to any port or
+ * context
+ */
+#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
+ .msg = #fldname }
+/* Below generates "auto-message" for interrupts specific to a port */
+#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
+ SYM_LSB(IntMask, fldname##Mask##_0), \
+ SYM_LSB(IntMask, fldname##Mask##_1)), \
+ .msg = #fldname "_P" }
+/* For some reason, the SerDesTrimDone bits are reversed */
+#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
+ SYM_LSB(IntMask, fldname##Mask##_1), \
+ SYM_LSB(IntMask, fldname##Mask##_0)), \
+ .msg = #fldname "_P" }
+/*
+ * Below generates "auto-message" for interrupts specific to a context,
+ * with ctxt-number appended
+ */
+#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
+ SYM_LSB(IntMask, fldname##0IntMask), \
+ SYM_LSB(IntMask, fldname##17IntMask)), \
+ .msg = #fldname "_C"}
+
+static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
+ INTR_AUTO_P(SDmaInt),
+ INTR_AUTO_P(SDmaProgressInt),
+ INTR_AUTO_P(SDmaIdleInt),
+ INTR_AUTO_P(SDmaCleanupDone),
+ INTR_AUTO_C(RcvUrg),
+ INTR_AUTO_P(ErrInt),
+ INTR_AUTO(ErrInt), /* non-port-specific errs */
+ INTR_AUTO(AssertGPIOInt),
+ INTR_AUTO_P(SendDoneInt),
+ INTR_AUTO(SendBufAvailInt),
+ INTR_AUTO_C(RcvAvail),
+ { .mask = 0 }
+};
+
+#define TXSYMPTOM_AUTO_P(fldname) \
+ { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), .msg = #fldname }
+static const struct qib_hwerror_msgs hdrchk_msgs[] = {
+ TXSYMPTOM_AUTO_P(NonKeyPacket),
+ TXSYMPTOM_AUTO_P(GRHFail),
+ TXSYMPTOM_AUTO_P(PkeyFail),
+ TXSYMPTOM_AUTO_P(QPFail),
+ TXSYMPTOM_AUTO_P(SLIDFail),
+ TXSYMPTOM_AUTO_P(RawIPV6),
+ TXSYMPTOM_AUTO_P(PacketTooSmall),
+ { .mask = 0 }
+};
+
+#define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
+
+/*
+ * Called when we might have an error that is specific to a particular
+ * PIO buffer, and may need to cancel that buffer, so it can be re-used,
+ * because we don't need to force the update of pioavail
+ */
+static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u32 i;
+ int any;
+ u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
+ u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ unsigned long sbuf[4];
+
+ /*
+ * It's possible that sendbuffererror could have bits set; might
+ * have already done this as a result of hardware error handling.
+ */
+ any = 0;
+ for (i = 0; i < regcnt; ++i) {
+ sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
+ if (sbuf[i]) {
+ any = 1;
+ qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
+ }
+ }
+
+ if (any)
+ qib_disarm_piobufs_set(dd, sbuf, piobcnt);
+}
+
+/* No txe_recover yet, if ever */
+
+/* No decode__errors yet */
+static void err_decode(char *msg, size_t len, u64 errs,
+ const struct qib_hwerror_msgs *msp)
+{
+ u64 these, lmask;
+ int took, multi, n = 0;
+
+ while (msp && msp->mask) {
+ multi = (msp->mask & (msp->mask - 1));
+ while (errs & msp->mask) {
+ these = (errs & msp->mask);
+ lmask = (these & (these - 1)) ^ these;
+ if (len) {
+ if (n++) {
+ /* separate the strings */
+ *msg++ = ',';
+ len--;
+ }
+ took = scnprintf(msg, len, "%s", msp->msg);
+ len -= took;
+ msg += took;
+ }
+ errs &= ~lmask;
+ if (len && multi) {
+ /* More than one bit this mask */
+ int idx = -1;
+
+ while (lmask & msp->mask) {
+ ++idx;
+ lmask >>= 1;
+ }
+ took = scnprintf(msg, len, "_%d", idx);
+ len -= took;
+ msg += took;
+ }
+ }
+ ++msp;
+ }
+ /* If some bits are left, show in hex. */
+ if (len && errs)
+ snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
+ (unsigned long long) errs);
+}
+
+/* only called if r1 set */
+static void flush_fifo(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u32 __iomem *piobuf;
+ u32 bufn;
+ u32 *hdr;
+ u64 pbc;
+ const unsigned hdrwords = 7;
+ static struct qib_ib_header ibhdr = {
+ .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
+ .lrh[1] = IB_LID_PERMISSIVE,
+ .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
+ .lrh[3] = IB_LID_PERMISSIVE,
+ .u.oth.bth[0] = cpu_to_be32(
+ (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
+ .u.oth.bth[1] = cpu_to_be32(0),
+ .u.oth.bth[2] = cpu_to_be32(0),
+ .u.oth.u.ud.deth[0] = cpu_to_be32(0),
+ .u.oth.u.ud.deth[1] = cpu_to_be32(0),
+ };
+
+ /*
+ * Send a dummy VL15 packet to flush the launch FIFO.
+ * This will not actually be sent since the TxeBypassIbc bit is set.
+ */
+ pbc = PBC_7322_VL15_SEND |
+ (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
+ (hdrwords + SIZE_OF_CRC);
+ piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
+ if (!piobuf)
+ return;
+ writeq(pbc, piobuf);
+ hdr = (u32 *) &ibhdr;
+ if (dd->flags & QIB_PIO_FLUSH_WC) {
+ qib_flush_wc();
+ qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
+ qib_flush_wc();
+ __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
+ qib_flush_wc();
+ } else
+ qib_pio_copy(piobuf + 2, hdr, hdrwords);
+ qib_sendbuf_done(dd, bufn);
+}
+
+/*
+ * This is called with interrupts disabled and sdma_lock held.
+ */
+static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 set_sendctrl = 0;
+ u64 clr_sendctrl = 0;
+
+ if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
+ set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
+ else
+ clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
+
+ if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
+ set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
+ else
+ clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
+
+ if (op & QIB_SDMA_SENDCTRL_OP_HALT)
+ set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
+ else
+ clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
+
+ if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
+ set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
+ SYM_MASK(SendCtrl_0, TxeAbortIbc) |
+ SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
+ else
+ clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
+ SYM_MASK(SendCtrl_0, TxeAbortIbc) |
+ SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
+
+ spin_lock(&dd->sendctrl_lock);
+
+ /* If we are draining everything, block sends first */
+ if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
+ ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
+ qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+
+ ppd->p_sendctrl |= set_sendctrl;
+ ppd->p_sendctrl &= ~clr_sendctrl;
+
+ if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
+ qib_write_kreg_port(ppd, krp_sendctrl,
+ ppd->p_sendctrl |
+ SYM_MASK(SendCtrl_0, SDmaCleanup));
+ else
+ qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+
+ if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
+ ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
+ qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+
+ spin_unlock(&dd->sendctrl_lock);
+
+ if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
+ flush_fifo(ppd);
+}
+
+static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
+{
+ __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
+}
+
+static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
+{
+ /*
+ * Set SendDmaLenGen and clear and set
+ * the MSB of the generation count to enable generation checking
+ * and load the internal generation counter.
+ */
+ qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
+ qib_write_kreg_port(ppd, krp_senddmalengen,
+ ppd->sdma_descq_cnt |
+ (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
+}
+
+/*
+ * Must be called with sdma_lock held, or before init finished.
+ */
+static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
+{
+ /* Commit writes to memory and advance the tail on the chip */
+ wmb();
+ ppd->sdma_descq_tail = tail;
+ qib_write_kreg_port(ppd, krp_senddmatail, tail);
+}
+
+/*
+ * This is called with interrupts disabled and sdma_lock held.
+ */
+static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
+{
+ /*
+ * Drain all FIFOs.
+ * The hardware doesn't require this but we do it so that verbs
+ * and user applications don't wait for link active to send stale
+ * data.
+ */
+ sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
+
+ qib_sdma_7322_setlengen(ppd);
+ qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
+ ppd->sdma_head_dma[0] = 0;
+ qib_7322_sdma_sendctrl(ppd,
+ ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
+}
+
+#define DISABLES_SDMA ( \
+ QIB_E_P_SDMAHALT | \
+ QIB_E_P_SDMADESCADDRMISALIGN | \
+ QIB_E_P_SDMAMISSINGDW | \
+ QIB_E_P_SDMADWEN | \
+ QIB_E_P_SDMARPYTAG | \
+ QIB_E_P_SDMA1STDESC | \
+ QIB_E_P_SDMABASE | \
+ QIB_E_P_SDMATAILOUTOFBOUND | \
+ QIB_E_P_SDMAOUTOFBOUND | \
+ QIB_E_P_SDMAGENMISMATCH)
+
+static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
+{
+ unsigned long flags;
+ struct qib_devdata *dd = ppd->dd;
+
+ errs &= QIB_E_P_SDMAERRS;
+
+ if (errs & QIB_E_P_SDMAUNEXPDATA)
+ qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
+ ppd->port);
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+ switch (ppd->sdma_state.current_state) {
+ case qib_sdma_state_s00_hw_down:
+ break;
+
+ case qib_sdma_state_s10_hw_start_up_wait:
+ if (errs & QIB_E_P_SDMAHALT)
+ __qib_sdma_process_event(ppd,
+ qib_sdma_event_e20_hw_started);
+ break;
+
+ case qib_sdma_state_s20_idle:
+ break;
+
+ case qib_sdma_state_s30_sw_clean_up_wait:
+ break;
+
+ case qib_sdma_state_s40_hw_clean_up_wait:
+ if (errs & QIB_E_P_SDMAHALT)
+ __qib_sdma_process_event(ppd,
+ qib_sdma_event_e50_hw_cleaned);
+ break;
+
+ case qib_sdma_state_s50_hw_halt_wait:
+ if (errs & QIB_E_P_SDMAHALT)
+ __qib_sdma_process_event(ppd,
+ qib_sdma_event_e60_hw_halted);
+ break;
+
+ case qib_sdma_state_s99_running:
+ __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
+ __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
+ break;
+ }
+
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+}
+
+/*
+ * handle per-device errors (not per-port errors)
+ */
+static noinline void handle_7322_errors(struct qib_devdata *dd)
+{
+ char *msg;
+ u64 iserr = 0;
+ u64 errs;
+ u64 mask;
+ int log_idx;
+
+ qib_stats.sps_errints++;
+ errs = qib_read_kreg64(dd, kr_errstatus);
+ if (!errs) {
+ qib_devinfo(dd->pcidev, "device error interrupt, "
+ "but no error bits set!\n");
+ goto done;
+ }
+
+ /* don't report errors that are masked */
+ errs &= dd->cspec->errormask;
+ msg = dd->cspec->emsgbuf;
+
+ /* do these first, they are most important */
+ if (errs & QIB_E_HARDWARE) {
+ *msg = '\0';
+ qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
+ } else
+ for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
+ if (errs & dd->eep_st_masks[log_idx].errs_to_log)
+ qib_inc_eeprom_err(dd, log_idx, 1);
+
+ if (errs & QIB_E_SPKTERRS) {
+ qib_disarm_7322_senderrbufs(dd->pport);
+ qib_stats.sps_txerrs++;
+ } else if (errs & QIB_E_INVALIDADDR)
+ qib_stats.sps_txerrs++;
+ else if (errs & QIB_E_ARMLAUNCH) {
+ qib_stats.sps_txerrs++;
+ qib_disarm_7322_senderrbufs(dd->pport);
+ }
+ qib_write_kreg(dd, kr_errclear, errs);
+
+ /*
+ * The ones we mask off are handled specially below
+ * or above. Also mask SDMADISABLED by default as it
+ * is too chatty.
+ */
+ mask = QIB_E_HARDWARE;
+ *msg = '\0';
+
+ err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask,
+ qib_7322error_msgs);
+
+ /*
+ * Getting reset is a tragedy for all ports. Mark the device
+ * _and_ the ports as "offline" in way meaningful to each.
+ */
+ if (errs & QIB_E_RESET) {
+ int pidx;
+
+ qib_dev_err(dd, "Got reset, requires re-init "
+ "(unload and reload driver)\n");
+ dd->flags &= ~QIB_INITTED; /* needs re-init */
+ /* mark as having had error */
+ *dd->devstatusp |= QIB_STATUS_HWERROR;
+ for (pidx = 0; pidx < dd->num_pports; ++pidx)
+ if (dd->pport[pidx].link_speed_supported)
+ *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
+ }
+
+ if (*msg && iserr)
+ qib_dev_err(dd, "%s error\n", msg);
+
+ /*
+ * If there were hdrq or egrfull errors, wake up any processes
+ * waiting in poll. We used to try to check which contexts had
+ * the overflow, but given the cost of that and the chip reads
+ * to support it, it's better to just wake everybody up if we
+ * get an overflow; waiters can poll again if it's not them.
+ */
+ if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
+ qib_handle_urcv(dd, ~0U);
+ if (errs & ERR_MASK(RcvEgrFullErr))
+ qib_stats.sps_buffull++;
+ else
+ qib_stats.sps_hdrfull++;
+ }
+
+done:
+ return;
+}
+
+static void reenable_chase(unsigned long opaque)
+{
+ struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+
+ ppd->cpspec->chase_timer.expires = 0;
+ qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
+ QLOGIC_IB_IBCC_LINKINITCMD_POLL);
+}
+
+static void disable_chase(struct qib_pportdata *ppd, u64 tnow, u8 ibclt)
+{
+ ppd->cpspec->chase_end = 0;
+
+ if (!qib_chase)
+ return;
+
+ qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
+ QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+ ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
+ add_timer(&ppd->cpspec->chase_timer);
+}
+
+static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
+{
+ u8 ibclt;
+ u64 tnow;
+
+ ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
+
+ /*
+ * Detect and handle the state chase issue, where we can
+ * get stuck if we are unlucky on timing on both sides of
+ * the link. If we are, we disable, set a timer, and
+ * then re-enable.
+ */
+ switch (ibclt) {
+ case IB_7322_LT_STATE_CFGRCVFCFG:
+ case IB_7322_LT_STATE_CFGWAITRMT:
+ case IB_7322_LT_STATE_TXREVLANES:
+ case IB_7322_LT_STATE_CFGENH:
+ tnow = get_jiffies_64();
+ if (ppd->cpspec->chase_end &&
+ time_after64(tnow, ppd->cpspec->chase_end))
+ disable_chase(ppd, tnow, ibclt);
+ else if (!ppd->cpspec->chase_end)
+ ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
+ break;
+ default:
+ ppd->cpspec->chase_end = 0;
+ break;
+ }
+
+ if (ibclt == IB_7322_LT_STATE_CFGTEST &&
+ (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
+ force_h1(ppd);
+ ppd->cpspec->qdr_reforce = 1;
+ } else if (ppd->cpspec->qdr_reforce &&
+ (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
+ (ibclt == IB_7322_LT_STATE_CFGENH ||
+ ibclt == IB_7322_LT_STATE_CFGIDLE ||
+ ibclt == IB_7322_LT_STATE_LINKUP))
+ force_h1(ppd);
+
+ if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
+ ppd->link_speed_enabled == QIB_IB_QDR &&
+ (ibclt == IB_7322_LT_STATE_CFGTEST ||
+ ibclt == IB_7322_LT_STATE_CFGENH ||
+ (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
+ ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
+ adj_tx_serdes(ppd);
+
+ if (!ppd->cpspec->qdr_dfe_on && ibclt != IB_7322_LT_STATE_LINKUP &&
+ ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
+ ppd->cpspec->qdr_dfe_on = 1;
+ ppd->cpspec->qdr_dfe_time = 0;
+ /* On link down, reenable QDR adaptation */
+ qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
+ ppd->dd->cspec->r1 ?
+ QDR_STATIC_ADAPT_DOWN_R1 :
+ QDR_STATIC_ADAPT_DOWN);
+ }
+}
+
+/*
+ * This is per-pport error handling.
+ * will likely get it's own MSIx interrupt (one for each port,
+ * although just a single handler).
+ */
+static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
+{
+ char *msg;
+ u64 ignore_this_time = 0, iserr = 0, errs, fmask;
+ struct qib_devdata *dd = ppd->dd;
+
+ /* do this as soon as possible */
+ fmask = qib_read_kreg64(dd, kr_act_fmask);
+ if (!fmask)
+ check_7322_rxe_status(ppd);
+
+ errs = qib_read_kreg_port(ppd, krp_errstatus);
+ if (!errs)
+ qib_devinfo(dd->pcidev,
+ "Port%d error interrupt, but no error bits set!\n",
+ ppd->port);
+ if (!fmask)
+ errs &= ~QIB_E_P_IBSTATUSCHANGED;
+ if (!errs)
+ goto done;
+
+ msg = ppd->cpspec->epmsgbuf;
+ *msg = '\0';
+
+ if (errs & ~QIB_E_P_BITSEXTANT) {
+ err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
+ errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
+ if (!*msg)
+ snprintf(msg, sizeof ppd->cpspec->epmsgbuf,
+ "no others");
+ qib_dev_porterr(dd, ppd->port, "error interrupt with unknown"
+ " errors 0x%016Lx set (and %s)\n",
+ (errs & ~QIB_E_P_BITSEXTANT), msg);
+ *msg = '\0';
+ }
+
+ if (errs & QIB_E_P_SHDR) {
+ u64 symptom;
+
+ /* determine cause, then write to clear */
+ symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
+ qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
+ err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom,
+ hdrchk_msgs);
+ *msg = '\0';
+ /* senderrbuf cleared in SPKTERRS below */
+ }
+
+ if (errs & QIB_E_P_SPKTERRS) {
+ if ((errs & QIB_E_P_LINK_PKTERRS) &&
+ !(ppd->lflags & QIBL_LINKACTIVE)) {
+ /*
+ * This can happen when trying to bring the link
+ * up, but the IB link changes state at the "wrong"
+ * time. The IB logic then complains that the packet
+ * isn't valid. We don't want to confuse people, so
+ * we just don't print them, except at debug
+ */
+ err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
+ (errs & QIB_E_P_LINK_PKTERRS),
+ qib_7322p_error_msgs);
+ *msg = '\0';
+ ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
+ }
+ qib_disarm_7322_senderrbufs(ppd);
+ } else if ((errs & QIB_E_P_LINK_PKTERRS) &&
+ !(ppd->lflags & QIBL_LINKACTIVE)) {
+ /*
+ * This can happen when SMA is trying to bring the link
+ * up, but the IB link changes state at the "wrong" time.
+ * The IB logic then complains that the packet isn't
+ * valid. We don't want to confuse people, so we just
+ * don't print them, except at debug
+ */
+ err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs,
+ qib_7322p_error_msgs);
+ ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
+ *msg = '\0';
+ }
+
+ qib_write_kreg_port(ppd, krp_errclear, errs);
+
+ errs &= ~ignore_this_time;
+ if (!errs)
+ goto done;
+
+ if (errs & QIB_E_P_RPKTERRS)
+ qib_stats.sps_rcverrs++;
+ if (errs & QIB_E_P_SPKTERRS)
+ qib_stats.sps_txerrs++;
+
+ iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
+
+ if (errs & QIB_E_P_SDMAERRS)
+ sdma_7322_p_errors(ppd, errs);
+
+ if (errs & QIB_E_P_IBSTATUSCHANGED) {
+ u64 ibcs;
+ u8 ltstate;
+
+ ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
+ ltstate = qib_7322_phys_portstate(ibcs);
+
+ if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
+ handle_serdes_issues(ppd, ibcs);
+ if (!(ppd->cpspec->ibcctrl_a &
+ SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
+ /*
+ * We got our interrupt, so init code should be
+ * happy and not try alternatives. Now squelch
+ * other "chatter" from link-negotiation (pre Init)
+ */
+ ppd->cpspec->ibcctrl_a |=
+ SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a,
+ ppd->cpspec->ibcctrl_a);
+ }
+
+ /* Update our picture of width and speed from chip */
+ ppd->link_width_active =
+ (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
+ IB_WIDTH_4X : IB_WIDTH_1X;
+ ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
+ LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
+ SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
+ QIB_IB_DDR : QIB_IB_SDR;
+
+ if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
+ IB_PHYSPORTSTATE_DISABLED)
+ qib_set_ib_7322_lstate(ppd, 0,
+ QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+ else
+ /*
+ * Since going into a recovery state causes the link
+ * state to go down and since recovery is transitory,
+ * it is better if we "miss" ever seeing the link
+ * training state go into recovery (i.e., ignore this
+ * transition for link state special handling purposes)
+ * without updating lastibcstat.
+ */
+ if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
+ ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
+ ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
+ ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
+ qib_handle_e_ibstatuschanged(ppd, ibcs);
+ }
+ if (*msg && iserr)
+ qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
+
+ if (ppd->state_wanted & ppd->lflags)
+ wake_up_interruptible(&ppd->state_wait);
+done:
+ return;
+}
+
+/* enable/disable chip from delivering interrupts */
+static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
+{
+ if (enable) {
+ if (dd->flags & QIB_BADINTR)
+ return;
+ qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
+ /* cause any pending enabled interrupts to be re-delivered */
+ qib_write_kreg(dd, kr_intclear, 0ULL);
+ if (dd->cspec->num_msix_entries) {
+ /* and same for MSIx */
+ u64 val = qib_read_kreg64(dd, kr_intgranted);
+ if (val)
+ qib_write_kreg(dd, kr_intgranted, val);
+ }
+ } else
+ qib_write_kreg(dd, kr_intmask, 0ULL);
+}
+
+/*
+ * Try to cleanup as much as possible for anything that might have gone
+ * wrong while in freeze mode, such as pio buffers being written by user
+ * processes (causing armlaunch), send errors due to going into freeze mode,
+ * etc., and try to avoid causing extra interrupts while doing so.
+ * Forcibly update the in-memory pioavail register copies after cleanup
+ * because the chip won't do it while in freeze mode (the register values
+ * themselves are kept correct).
+ * Make sure that we don't lose any important interrupts by using the chip
+ * feature that says that writing 0 to a bit in *clear that is set in
+ * *status will cause an interrupt to be generated again (if allowed by
+ * the *mask value).
+ * This is in chip-specific code because of all of the register accesses,
+ * even though the details are similar on most chips.
+ */
+static void qib_7322_clear_freeze(struct qib_devdata *dd)
+{
+ int pidx;
+
+ /* disable error interrupts, to avoid confusion */
+ qib_write_kreg(dd, kr_errmask, 0ULL);
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx)
+ if (dd->pport[pidx].link_speed_supported)
+ qib_write_kreg_port(dd->pport + pidx, krp_errmask,
+ 0ULL);
+
+ /* also disable interrupts; errormask is sometimes overwriten */
+ qib_7322_set_intr_state(dd, 0);
+
+ /* clear the freeze, and be sure chip saw it */
+ qib_write_kreg(dd, kr_control, dd->control);
+ qib_read_kreg32(dd, kr_scratch);
+
+ /*
+ * Force new interrupt if any hwerr, error or interrupt bits are
+ * still set, and clear "safe" send packet errors related to freeze
+ * and cancelling sends. Re-enable error interrupts before possible
+ * force of re-interrupt on pending interrupts.
+ */
+ qib_write_kreg(dd, kr_hwerrclear, 0ULL);
+ qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
+ qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
+ /* We need to purge per-port errs and reset mask, too */
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ if (!dd->pport[pidx].link_speed_supported)
+ continue;
+ qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
+ qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
+ }
+ qib_7322_set_intr_state(dd, 1);
+}
+
+/* no error handling to speak of */
+/**
+ * qib_7322_handle_hwerrors - display hardware errors.
+ * @dd: the qlogic_ib device
+ * @msg: the output buffer
+ * @msgl: the size of the output buffer
+ *
+ * Use same msg buffer as regular errors to avoid excessive stack
+ * use. Most hardware errors are catastrophic, but for right now,
+ * we'll print them and continue. We reuse the same message buffer as
+ * qib_handle_errors() to avoid excessive stack usage.
+ */
+static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
+ size_t msgl)
+{
+ u64 hwerrs;
+ u32 ctrl;
+ int isfatal = 0;
+
+ hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
+ if (!hwerrs)
+ goto bail;
+ if (hwerrs == ~0ULL) {
+ qib_dev_err(dd, "Read of hardware error status failed "
+ "(all bits set); ignoring\n");
+ goto bail;
+ }
+ qib_stats.sps_hwerrs++;
+
+ /* Always clear the error status register, except BIST fail */
+ qib_write_kreg(dd, kr_hwerrclear, hwerrs &
+ ~HWE_MASK(PowerOnBISTFailed));
+
+ hwerrs &= dd->cspec->hwerrmask;
+
+ /* no EEPROM logging, yet */
+
+ if (hwerrs)
+ qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
+ "(cleared)\n", (unsigned long long) hwerrs);
+
+ ctrl = qib_read_kreg32(dd, kr_control);
+ if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
+ /*
+ * No recovery yet...
+ */
+ if ((hwerrs & ~HWE_MASK(LATriggered)) ||
+ dd->cspec->stay_in_freeze) {
+ /*
+ * If any set that we aren't ignoring only make the
+ * complaint once, in case it's stuck or recurring,
+ * and we get here multiple times
+ * Force link down, so switch knows, and
+ * LEDs are turned off.
+ */
+ if (dd->flags & QIB_INITTED)
+ isfatal = 1;
+ } else
+ qib_7322_clear_freeze(dd);
+ }
+
+ if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
+ isfatal = 1;
+ strlcpy(msg, "[Memory BIST test failed, "
+ "InfiniPath hardware unusable]", msgl);
+ /* ignore from now on, so disable until driver reloaded */
+ dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+ }
+
+ err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
+
+ /* Ignore esoteric PLL failures et al. */
+
+ qib_dev_err(dd, "%s hardware error\n", msg);
+
+ if (isfatal && !dd->diag_client) {
+ qib_dev_err(dd, "Fatal Hardware Error, no longer"
+ " usable, SN %.16s\n", dd->serial);
+ /*
+ * for /sys status file and user programs to print; if no
+ * trailing brace is copied, we'll know it was truncated.
+ */
+ if (dd->freezemsg)
+ snprintf(dd->freezemsg, dd->freezelen,
+ "{%s}", msg);
+ qib_disable_after_error(dd);
+ }
+bail:;
+}
+
+/**
+ * qib_7322_init_hwerrors - enable hardware errors
+ * @dd: the qlogic_ib device
+ *
+ * now that we have finished initializing everything that might reasonably
+ * cause a hardware error, and cleared those errors bits as they occur,
+ * we can enable hardware errors in the mask (potentially enabling
+ * freeze mode), and enable hardware errors as errors (along with
+ * everything else) in errormask
+ */
+static void qib_7322_init_hwerrors(struct qib_devdata *dd)
+{
+ int pidx;
+ u64 extsval;
+
+ extsval = qib_read_kreg64(dd, kr_extstatus);
+ if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
+ QIB_EXTS_MEMBIST_ENDTEST)))
+ qib_dev_err(dd, "MemBIST did not complete!\n");
+
+ /* never clear BIST failure, so reported on each driver load */
+ qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+
+ /* clear all */
+ qib_write_kreg(dd, kr_errclear, ~0ULL);
+ /* enable errors that are masked, at least this first time. */
+ qib_write_kreg(dd, kr_errmask, ~0ULL);
+ dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
+ for (pidx = 0; pidx < dd->num_pports; ++pidx)
+ if (dd->pport[pidx].link_speed_supported)
+ qib_write_kreg_port(dd->pport + pidx, krp_errmask,
+ ~0ULL);
+}
+
+/*
+ * Disable and enable the armlaunch error. Used for PIO bandwidth testing
+ * on chips that are count-based, rather than trigger-based. There is no
+ * reference counting, but that's also fine, given the intended use.
+ * Only chip-specific because it's all register accesses
+ */
+static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
+{
+ if (enable) {
+ qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
+ dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
+ } else
+ dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
+ qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
+}
+
+/*
+ * Formerly took parameter <which> in pre-shifted,
+ * pre-merged form with LinkCmd and LinkInitCmd
+ * together, and assuming the zero was NOP.
+ */
+static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
+ u16 linitcmd)
+{
+ u64 mod_wd;
+ struct qib_devdata *dd = ppd->dd;
+ unsigned long flags;
+
+ if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
+ /*
+ * If we are told to disable, note that so link-recovery
+ * code does not attempt to bring us back up.
+ * Also reset everything that we can, so we start
+ * completely clean when re-enabled (before we
+ * actually issue the disable to the IBC)
+ */
+ qib_7322_mini_pcs_reset(ppd);
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_LINK_DISABLED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
+ /*
+ * Any other linkinitcmd will lead to LINKDOWN and then
+ * to INIT (if all is well), so clear flag to let
+ * link-recovery code attempt to bring us back up.
+ */
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ /*
+ * Clear status change interrupt reduction so the
+ * new state is seen.
+ */
+ ppd->cpspec->ibcctrl_a &=
+ ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
+ }
+
+ mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
+ (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
+ mod_wd);
+ /* write to chip to prevent back-to-back writes of ibc reg */
+ qib_write_kreg(dd, kr_scratch, 0);
+
+}
+
+/*
+ * The total RCV buffer memory is 64KB, used for both ports, and is
+ * in units of 64 bytes (same as IB flow control credit unit).
+ * The consumedVL unit in the same registers are in 32 byte units!
+ * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
+ * and we can therefore allocate just 9 IB credits for 2 VL15 packets
+ * in krp_rxcreditvl15, rather than 10.
+ */
+#define RCV_BUF_UNITSZ 64
+#define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
+
+static void set_vls(struct qib_pportdata *ppd)
+{
+ int i, numvls, totcred, cred_vl, vl0extra;
+ struct qib_devdata *dd = ppd->dd;
+ u64 val;
+
+ numvls = qib_num_vls(ppd->vls_operational);
+
+ /*
+ * Set up per-VL credits. Below is kluge based on these assumptions:
+ * 1) port is disabled at the time early_init is called.
+ * 2) give VL15 17 credits, for two max-plausible packets.
+ * 3) Give VL0-N the rest, with any rounding excess used for VL0
+ */
+ /* 2 VL15 packets @ 288 bytes each (including IB headers) */
+ totcred = NUM_RCV_BUF_UNITS(dd);
+ cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
+ totcred -= cred_vl;
+ qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
+ cred_vl = totcred / numvls;
+ vl0extra = totcred - cred_vl * numvls;
+ qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
+ for (i = 1; i < numvls; i++)
+ qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
+ for (; i < 8; i++) /* no buffer space for other VLs */
+ qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
+
+ /* Notify IBC that credits need to be recalculated */
+ val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
+ val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
+ qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
+ qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
+
+ for (i = 0; i < numvls; i++)
+ val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
+ val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
+
+ /* Change the number of operational VLs */
+ ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
+ ~SYM_MASK(IBCCtrlA_0, NumVLane)) |
+ ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+}
+
+/*
+ * The code that deals with actual SerDes is in serdes_7322_init().
+ * Compared to the code for iba7220, it is minimal.
+ */
+static int serdes_7322_init(struct qib_pportdata *ppd);
+
+/**
+ * qib_7322_bringup_serdes - bring up the serdes
+ * @ppd: physical port on the qlogic_ib device
+ */
+static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 val, guid, ibc;
+ unsigned long flags;
+ int ret = 0;
+
+ /*
+ * SerDes model not in Pd, but still need to
+ * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
+ * eventually.
+ */
+ /* Put IBC in reset, sends disabled (should be in reset already) */
+ ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+
+ if (qib_compat_ddr_negotiate) {
+ ppd->cpspec->ibdeltainprog = 1;
+ ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
+ crp_ibsymbolerr);
+ ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
+ crp_iblinkerrrecov);
+ }
+
+ /* flowcontrolwatermark is in units of KBytes */
+ ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
+ /*
+ * Flow control is sent this often, even if no changes in
+ * buffer space occur. Units are 128ns for this chip.
+ * Set to 3usec.
+ */
+ ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
+ /* max error tolerance */
+ ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
+ /* IB credit flow control. */
+ ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
+ /*
+ * set initial max size pkt IBC will send, including ICRC; it's the
+ * PIO buffer size in dwords, less 1; also see qib_set_mtu()
+ */
+ ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
+ SYM_LSB(IBCCtrlA_0, MaxPktLen);
+ ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
+
+ /* initially come up waiting for TS1, without sending anything. */
+ val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
+ QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+
+ /*
+ * Reset the PCS interface to the serdes (and also ibc, which is still
+ * in reset from above). Writes new value of ibcctrl_a as last step.
+ */
+ qib_7322_mini_pcs_reset(ppd);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+
+ if (!ppd->cpspec->ibcctrl_b) {
+ unsigned lse = ppd->link_speed_enabled;
+
+ /*
+ * Not on re-init after reset, establish shadow
+ * and force initial config.
+ */
+ ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
+ krp_ibcctrl_b);
+ ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
+ IBA7322_IBC_SPEED_DDR |
+ IBA7322_IBC_SPEED_SDR |
+ IBA7322_IBC_WIDTH_AUTONEG |
+ SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
+ if (lse & (lse - 1)) /* Muliple speeds enabled */
+ ppd->cpspec->ibcctrl_b |=
+ (lse << IBA7322_IBC_SPEED_LSB) |
+ IBA7322_IBC_IBTA_1_2_MASK |
+ IBA7322_IBC_MAX_SPEED_MASK;
+ else
+ ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
+ IBA7322_IBC_SPEED_QDR |
+ IBA7322_IBC_IBTA_1_2_MASK :
+ (lse == QIB_IB_DDR) ?
+ IBA7322_IBC_SPEED_DDR :
+ IBA7322_IBC_SPEED_SDR;
+ if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
+ (IB_WIDTH_1X | IB_WIDTH_4X))
+ ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
+ else
+ ppd->cpspec->ibcctrl_b |=
+ ppd->link_width_enabled == IB_WIDTH_4X ?
+ IBA7322_IBC_WIDTH_4X_ONLY :
+ IBA7322_IBC_WIDTH_1X_ONLY;
+
+ /* always enable these on driver reload, not sticky */
+ ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
+ IBA7322_IBC_HRTBT_MASK);
+ }
+ qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
+
+ /* setup so we have more time at CFGTEST to change H1 */
+ val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
+ val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
+ val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
+ qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
+
+ serdes_7322_init(ppd);
+
+ guid = be64_to_cpu(ppd->guid);
+ if (!guid) {
+ if (dd->base_guid)
+ guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
+ ppd->guid = cpu_to_be64(guid);
+ }
+
+ qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
+ /* write to chip to prevent back-to-back writes of ibc reg */
+ qib_write_kreg(dd, kr_scratch, 0);
+
+ /* Enable port */
+ ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
+ set_vls(ppd);
+
+ /* be paranoid against later code motion, etc. */
+ spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
+ ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
+ qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
+ spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+
+ /* Also enable IBSTATUSCHG interrupt. */
+ val = qib_read_kreg_port(ppd, krp_errmask);
+ qib_write_kreg_port(ppd, krp_errmask,
+ val | ERR_MASK_N(IBStatusChanged));
+
+ /* Always zero until we start messing with SerDes for real */
+ return ret;
+}
+
+/**
+ * qib_7322_quiet_serdes - set serdes to txidle
+ * @dd: the qlogic_ib device
+ * Called when driver is being unloaded
+ */
+static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
+{
+ u64 val;
+ unsigned long flags;
+
+ qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ wake_up(&ppd->cpspec->autoneg_wait);
+ cancel_delayed_work(&ppd->cpspec->autoneg_work);
+ if (ppd->dd->cspec->r1)
+ cancel_delayed_work(&ppd->cpspec->ipg_work);
+ flush_scheduled_work();
+
+ ppd->cpspec->chase_end = 0;
+ if (ppd->cpspec->chase_timer.data) /* if initted */
+ del_timer_sync(&ppd->cpspec->chase_timer);
+
+ /*
+ * Despite the name, actually disables IBC as well. Do it when
+ * we are as sure as possible that no more packets can be
+ * received, following the down and the PCS reset.
+ * The actual disabling happens in qib_7322_mini_pci_reset(),
+ * along with the PCS being reset.
+ */
+ ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
+ qib_7322_mini_pcs_reset(ppd);
+
+ /*
+ * Update the adjusted counters so the adjustment persists
+ * across driver reload.
+ */
+ if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
+ ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
+ struct qib_devdata *dd = ppd->dd;
+ u64 diagc;
+
+ /* enable counter writes */
+ diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
+ qib_write_kreg(dd, kr_hwdiagctrl,
+ diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
+
+ if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
+ val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
+ if (ppd->cpspec->ibdeltainprog)
+ val -= val - ppd->cpspec->ibsymsnap;
+ val -= ppd->cpspec->ibsymdelta;
+ write_7322_creg_port(ppd, crp_ibsymbolerr, val);
+ }
+ if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
+ val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
+ if (ppd->cpspec->ibdeltainprog)
+ val -= val - ppd->cpspec->iblnkerrsnap;
+ val -= ppd->cpspec->iblnkerrdelta;
+ write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
+ }
+ if (ppd->cpspec->iblnkdowndelta) {
+ val = read_7322_creg32_port(ppd, crp_iblinkdown);
+ val += ppd->cpspec->iblnkdowndelta;
+ write_7322_creg_port(ppd, crp_iblinkdown, val);
+ }
+ /*
+ * No need to save ibmalfdelta since IB perfcounters
+ * are cleared on driver reload.
+ */
+
+ /* and disable counter writes */
+ qib_write_kreg(dd, kr_hwdiagctrl, diagc);
+ }
+}
+
+/**
+ * qib_setup_7322_setextled - set the state of the two external LEDs
+ * @ppd: physical port on the qlogic_ib device
+ * @on: whether the link is up or not
+ *
+ * The exact combo of LEDs if on is true is determined by looking
+ * at the ibcstatus.
+ *
+ * These LEDs indicate the physical and logical state of IB link.
+ * For this chip (at least with recommended board pinouts), LED1
+ * is Yellow (logical state) and LED2 is Green (physical state),
+ *
+ * Note: We try to match the Mellanox HCA LED behavior as best
+ * we can. Green indicates physical link state is OK (something is
+ * plugged in, and we can train).
+ * Amber indicates the link is logically up (ACTIVE).
+ * Mellanox further blinks the amber LED to indicate data packet
+ * activity, but we have no hardware support for that, so it would
+ * require waking up every 10-20 msecs and checking the counters
+ * on the chip, and then turning the LED off if appropriate. That's
+ * visible overhead, so not something we will do.
+ */
+static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 extctl, ledblink = 0, val;
+ unsigned long flags;
+ int yel, grn;
+
+ /*
+ * The diags use the LED to indicate diag info, so we leave
+ * the external LED alone when the diags are running.
+ */
+ if (dd->diag_client)
+ return;
+
+ /* Allow override of LED display for, e.g. Locating system in rack */
+ if (ppd->led_override) {
+ grn = (ppd->led_override & QIB_LED_PHYS);
+ yel = (ppd->led_override & QIB_LED_LOG);
+ } else if (on) {
+ val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
+ grn = qib_7322_phys_portstate(val) ==
+ IB_PHYSPORTSTATE_LINKUP;
+ yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
+ } else {
+ grn = 0;
+ yel = 0;
+ }
+
+ spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+ extctl = dd->cspec->extctrl & (ppd->port == 1 ?
+ ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
+ if (grn) {
+ extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
+ /*
+ * Counts are in chip clock (4ns) periods.
+ * This is 1/16 sec (66.6ms) on,
+ * 3/16 sec (187.5 ms) off, with packets rcvd.
+ */
+ ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
+ ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
+ }
+ if (yel)
+ extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
+ dd->cspec->extctrl = extctl;
+ qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
+ spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+
+ if (ledblink) /* blink the LED on packet receive */
+ qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
+}
+
+/*
+ * Disable MSIx interrupt if enabled, call generic MSIx code
+ * to cleanup, and clear pending MSIx interrupts.
+ * Used for fallback to INTx, after reset, and when MSIx setup fails.
+ */
+static void qib_7322_nomsix(struct qib_devdata *dd)
+{
+ u64 intgranted;
+ int n;
+
+ dd->cspec->main_int_mask = ~0ULL;
+ n = dd->cspec->num_msix_entries;
+ if (n) {
+ int i;
+
+ dd->cspec->num_msix_entries = 0;
+ for (i = 0; i < n; i++)
+ free_irq(dd->cspec->msix_entries[i].vector,
+ dd->cspec->msix_arg[i]);
+ qib_nomsix(dd);
+ }
+ /* make sure no MSIx interrupts are left pending */
+ intgranted = qib_read_kreg64(dd, kr_intgranted);
+ if (intgranted)
+ qib_write_kreg(dd, kr_intgranted, intgranted);
+}
+
+static void qib_7322_free_irq(struct qib_devdata *dd)
+{
+ if (dd->cspec->irq) {
+ free_irq(dd->cspec->irq, dd);
+ dd->cspec->irq = 0;
+ }
+ qib_7322_nomsix(dd);
+}
+
+static void qib_setup_7322_cleanup(struct qib_devdata *dd)
+{
+ int i;
+
+ qib_7322_free_irq(dd);
+ kfree(dd->cspec->cntrs);
+ kfree(dd->cspec->sendchkenable);
+ kfree(dd->cspec->sendgrhchk);
+ kfree(dd->cspec->sendibchk);
+ kfree(dd->cspec->msix_entries);
+ kfree(dd->cspec->msix_arg);
+ for (i = 0; i < dd->num_pports; i++) {
+ unsigned long flags;
+ u32 mask = QSFP_GPIO_MOD_PRS_N |
+ (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
+
+ kfree(dd->pport[i].cpspec->portcntrs);
+ if (dd->flags & QIB_HAS_QSFP) {
+ spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+ dd->cspec->gpio_mask &= ~mask;
+ qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
+ spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+ qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data);
+ }
+ if (dd->pport[i].ibport_data.smi_ah)
+ ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah);
+ }
+}
+
+/* handle SDMA interrupts */
+static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
+{
+ struct qib_pportdata *ppd0 = &dd->pport[0];
+ struct qib_pportdata *ppd1 = &dd->pport[1];
+ u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
+ INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
+ u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
+ INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
+
+ if (intr0)
+ qib_sdma_intr(ppd0);
+ if (intr1)
+ qib_sdma_intr(ppd1);
+
+ if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
+ qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
+ if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
+ qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
+}
+
+/*
+ * Set or clear the Send buffer available interrupt enable bit.
+ */
+static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ if (needint)
+ dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
+ else
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
+ qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+}
+
+/*
+ * Somehow got an interrupt with reserved bits set in interrupt status.
+ * Print a message so we know it happened, then clear them.
+ * keep mainline interrupt handler cache-friendly
+ */
+static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
+{
+ u64 kills;
+ char msg[128];
+
+ kills = istat & ~QIB_I_BITSEXTANT;
+ qib_dev_err(dd, "Clearing reserved interrupt(s) 0x%016llx:"
+ " %s\n", (unsigned long long) kills, msg);
+ qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
+}
+
+/* keep mainline interrupt handler cache-friendly */
+static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
+{
+ u32 gpiostatus;
+ int handled = 0;
+ int pidx;
+
+ /*
+ * Boards for this chip currently don't use GPIO interrupts,
+ * so clear by writing GPIOstatus to GPIOclear, and complain
+ * to developer. To avoid endless repeats, clear
+ * the bits in the mask, since there is some kind of
+ * programming error or chip problem.
+ */
+ gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
+ /*
+ * In theory, writing GPIOstatus to GPIOclear could
+ * have a bad side-effect on some diagnostic that wanted
+ * to poll for a status-change, but the various shadows
+ * make that problematic at best. Diags will just suppress
+ * all GPIO interrupts during such tests.
+ */
+ qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
+ /*
+ * Check for QSFP MOD_PRS changes
+ * only works for single port if IB1 != pidx1
+ */
+ for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
+ ++pidx) {
+ struct qib_pportdata *ppd;
+ struct qib_qsfp_data *qd;
+ u32 mask;
+ if (!dd->pport[pidx].link_speed_supported)
+ continue;
+ mask = QSFP_GPIO_MOD_PRS_N;
+ ppd = dd->pport + pidx;
+ mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
+ if (gpiostatus & dd->cspec->gpio_mask & mask) {
+ u64 pins;
+ qd = &ppd->cpspec->qsfp_data;
+ gpiostatus &= ~mask;
+ pins = qib_read_kreg64(dd, kr_extstatus);
+ pins >>= SYM_LSB(EXTStatus, GPIOIn);
+ if (!(pins & mask)) {
+ ++handled;
+ qd->t_insert = get_jiffies_64();
+ schedule_work(&qd->work);
+ }
+ }
+ }
+
+ if (gpiostatus && !handled) {
+ const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
+ u32 gpio_irq = mask & gpiostatus;
+
+ /*
+ * Clear any troublemakers, and update chip from shadow
+ */
+ dd->cspec->gpio_mask &= ~gpio_irq;
+ qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
+ }
+}
+
+/*
+ * Handle errors and unusual events first, separate function
+ * to improve cache hits for fast path interrupt handling.
+ */
+static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
+{
+ if (istat & ~QIB_I_BITSEXTANT)
+ unknown_7322_ibits(dd, istat);
+ if (istat & QIB_I_GPIO)
+ unknown_7322_gpio_intr(dd);
+ if (istat & QIB_I_C_ERROR)
+ handle_7322_errors(dd);
+ if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
+ handle_7322_p_errors(dd->rcd[0]->ppd);
+ if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
+ handle_7322_p_errors(dd->rcd[1]->ppd);
+}
+
+/*
+ * Dynamically adjust the rcv int timeout for a context based on incoming
+ * packet rate.
+ */
+static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
+{
+ struct qib_devdata *dd = rcd->dd;
+ u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
+
+ /*
+ * Dynamically adjust idle timeout on chip
+ * based on number of packets processed.
+ */
+ if (npkts < rcv_int_count && timeout > 2)
+ timeout >>= 1;
+ else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
+ timeout = min(timeout << 1, rcv_int_timeout);
+ else
+ return;
+
+ dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
+ qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
+}
+
+/*
+ * This is the main interrupt handler.
+ * It will normally only be used for low frequency interrupts but may
+ * have to handle all interrupts if INTx is enabled or fewer than normal
+ * MSIx interrupts were allocated.
+ * This routine should ignore the interrupt bits for any of the
+ * dedicated MSIx handlers.
+ */
+static irqreturn_t qib_7322intr(int irq, void *data)
+{
+ struct qib_devdata *dd = data;
+ irqreturn_t ret;
+ u64 istat;
+ u64 ctxtrbits;
+ u64 rmask;
+ unsigned i;
+ u32 npkts;
+
+ if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
+ /*
+ * This return value is not great, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ ret = IRQ_HANDLED;
+ goto bail;
+ }
+
+ istat = qib_read_kreg64(dd, kr_intstatus);
+
+ if (unlikely(istat == ~0ULL)) {
+ qib_bad_intrstatus(dd);
+ qib_dev_err(dd, "Interrupt status all f's, skipping\n");
+ /* don't know if it was our interrupt or not */
+ ret = IRQ_NONE;
+ goto bail;
+ }
+
+ istat &= dd->cspec->main_int_mask;
+ if (unlikely(!istat)) {
+ /* already handled, or shared and not us */
+ ret = IRQ_NONE;
+ goto bail;
+ }
+
+ qib_stats.sps_ints++;
+ if (dd->int_counter != (u32) -1)
+ dd->int_counter++;
+
+ /* handle "errors" of various kinds first, device ahead of port */
+ if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
+ QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
+ INT_MASK_P(Err, 1))))
+ unlikely_7322_intr(dd, istat);
+
+ /*
+ * Clear the interrupt bits we found set, relatively early, so we
+ * "know" know the chip will have seen this by the time we process
+ * the queue, and will re-interrupt if necessary. The processor
+ * itself won't take the interrupt again until we return.
+ */
+ qib_write_kreg(dd, kr_intclear, istat);
+
+ /*
+ * Handle kernel receive queues before checking for pio buffers
+ * available since receives can overflow; piobuf waiters can afford
+ * a few extra cycles, since they were waiting anyway.
+ */
+ ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
+ if (ctxtrbits) {
+ rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
+ (1ULL << QIB_I_RCVURG_LSB);
+ for (i = 0; i < dd->first_user_ctxt; i++) {
+ if (ctxtrbits & rmask) {
+ ctxtrbits &= ~rmask;
+ if (dd->rcd[i]) {
+ qib_kreceive(dd->rcd[i], NULL, &npkts);
+ adjust_rcv_timeout(dd->rcd[i], npkts);
+ }
+ }
+ rmask <<= 1;
+ }
+ if (ctxtrbits) {
+ ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
+ (ctxtrbits >> QIB_I_RCVURG_LSB);
+ qib_handle_urcv(dd, ctxtrbits);
+ }
+ }
+
+ if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
+ sdma_7322_intr(dd, istat);
+
+ if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
+ qib_ib_piobufavail(dd);
+
+ ret = IRQ_HANDLED;
+bail:
+ return ret;
+}
+
+/*
+ * Dedicated receive packet available interrupt handler.
+ */
+static irqreturn_t qib_7322pintr(int irq, void *data)
+{
+ struct qib_ctxtdata *rcd = data;
+ struct qib_devdata *dd = rcd->dd;
+ u32 npkts;
+
+ if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
+ /*
+ * This return value is not great, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ return IRQ_HANDLED;
+
+ qib_stats.sps_ints++;
+ if (dd->int_counter != (u32) -1)
+ dd->int_counter++;
+
+ /* Clear the interrupt bit we expect to be set. */
+ qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
+ (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
+
+ qib_kreceive(rcd, NULL, &npkts);
+ adjust_rcv_timeout(rcd, npkts);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Dedicated Send buffer available interrupt handler.
+ */
+static irqreturn_t qib_7322bufavail(int irq, void *data)
+{
+ struct qib_devdata *dd = data;
+
+ if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
+ /*
+ * This return value is not great, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ return IRQ_HANDLED;
+
+ qib_stats.sps_ints++;
+ if (dd->int_counter != (u32) -1)
+ dd->int_counter++;
+
+ /* Clear the interrupt bit we expect to be set. */
+ qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
+
+ /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
+ if (dd->flags & QIB_INITTED)
+ qib_ib_piobufavail(dd);
+ else
+ qib_wantpiobuf_7322_intr(dd, 0);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Dedicated Send DMA interrupt handler.
+ */
+static irqreturn_t sdma_intr(int irq, void *data)
+{
+ struct qib_pportdata *ppd = data;
+ struct qib_devdata *dd = ppd->dd;
+
+ if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
+ /*
+ * This return value is not great, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ return IRQ_HANDLED;
+
+ qib_stats.sps_ints++;
+ if (dd->int_counter != (u32) -1)
+ dd->int_counter++;
+
+ /* Clear the interrupt bit we expect to be set. */
+ qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
+ INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
+ qib_sdma_intr(ppd);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Dedicated Send DMA idle interrupt handler.
+ */
+static irqreturn_t sdma_idle_intr(int irq, void *data)
+{
+ struct qib_pportdata *ppd = data;
+ struct qib_devdata *dd = ppd->dd;
+
+ if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
+ /*
+ * This return value is not great, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ return IRQ_HANDLED;
+
+ qib_stats.sps_ints++;
+ if (dd->int_counter != (u32) -1)
+ dd->int_counter++;
+
+ /* Clear the interrupt bit we expect to be set. */
+ qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
+ INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
+ qib_sdma_intr(ppd);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Dedicated Send DMA progress interrupt handler.
+ */
+static irqreturn_t sdma_progress_intr(int irq, void *data)
+{
+ struct qib_pportdata *ppd = data;
+ struct qib_devdata *dd = ppd->dd;
+
+ if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
+ /*
+ * This return value is not great, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ return IRQ_HANDLED;
+
+ qib_stats.sps_ints++;
+ if (dd->int_counter != (u32) -1)
+ dd->int_counter++;
+
+ /* Clear the interrupt bit we expect to be set. */
+ qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
+ INT_MASK_P(SDmaProgress, 1) :
+ INT_MASK_P(SDmaProgress, 0));
+ qib_sdma_intr(ppd);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Dedicated Send DMA cleanup interrupt handler.
+ */
+static irqreturn_t sdma_cleanup_intr(int irq, void *data)
+{
+ struct qib_pportdata *ppd = data;
+ struct qib_devdata *dd = ppd->dd;
+
+ if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
+ /*
+ * This return value is not great, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ return IRQ_HANDLED;
+
+ qib_stats.sps_ints++;
+ if (dd->int_counter != (u32) -1)
+ dd->int_counter++;
+
+ /* Clear the interrupt bit we expect to be set. */
+ qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
+ INT_MASK_PM(SDmaCleanupDone, 1) :
+ INT_MASK_PM(SDmaCleanupDone, 0));
+ qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Set up our chip-specific interrupt handler.
+ * The interrupt type has already been setup, so
+ * we just need to do the registration and error checking.
+ * If we are using MSIx interrupts, we may fall back to
+ * INTx later, if the interrupt handler doesn't get called
+ * within 1/2 second (see verify_interrupt()).
+ */
+static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
+{
+ int ret, i, msixnum;
+ u64 redirect[6];
+ u64 mask;
+
+ if (!dd->num_pports)
+ return;
+
+ if (clearpend) {
+ /*
+ * if not switching interrupt types, be sure interrupts are
+ * disabled, and then clear anything pending at this point,
+ * because we are starting clean.
+ */
+ qib_7322_set_intr_state(dd, 0);
+
+ /* clear the reset error, init error/hwerror mask */
+ qib_7322_init_hwerrors(dd);
+
+ /* clear any interrupt bits that might be set */
+ qib_write_kreg(dd, kr_intclear, ~0ULL);
+
+ /* make sure no pending MSIx intr, and clear diag reg */
+ qib_write_kreg(dd, kr_intgranted, ~0ULL);
+ qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
+ }
+
+ if (!dd->cspec->num_msix_entries) {
+ /* Try to get INTx interrupt */
+try_intx:
+ if (!dd->pcidev->irq) {
+ qib_dev_err(dd, "irq is 0, BIOS error? "
+ "Interrupts won't work\n");
+ goto bail;
+ }
+ ret = request_irq(dd->pcidev->irq, qib_7322intr,
+ IRQF_SHARED, QIB_DRV_NAME, dd);
+ if (ret) {
+ qib_dev_err(dd, "Couldn't setup INTx "
+ "interrupt (irq=%d): %d\n",
+ dd->pcidev->irq, ret);
+ goto bail;
+ }
+ dd->cspec->irq = dd->pcidev->irq;
+ dd->cspec->main_int_mask = ~0ULL;
+ goto bail;
+ }
+
+ /* Try to get MSIx interrupts */
+ memset(redirect, 0, sizeof redirect);
+ mask = ~0ULL;
+ msixnum = 0;
+ for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
+ irq_handler_t handler;
+ const char *name;
+ void *arg;
+ u64 val;
+ int lsb, reg, sh;
+
+ if (i < ARRAY_SIZE(irq_table)) {
+ if (irq_table[i].port) {
+ /* skip if for a non-configured port */
+ if (irq_table[i].port > dd->num_pports)
+ continue;
+ arg = dd->pport + irq_table[i].port - 1;
+ } else
+ arg = dd;
+ lsb = irq_table[i].lsb;
+ handler = irq_table[i].handler;
+ name = irq_table[i].name;
+ } else {
+ unsigned ctxt;
+
+ ctxt = i - ARRAY_SIZE(irq_table);
+ /* per krcvq context receive interrupt */
+ arg = dd->rcd[ctxt];
+ if (!arg)
+ continue;
+ lsb = QIB_I_RCVAVAIL_LSB + ctxt;
+ handler = qib_7322pintr;
+ name = QIB_DRV_NAME " (kctx)";
+ }
+ ret = request_irq(dd->cspec->msix_entries[msixnum].vector,
+ handler, 0, name, arg);
+ if (ret) {
+ /*
+ * Shouldn't happen since the enable said we could
+ * have as many as we are trying to setup here.
+ */
+ qib_dev_err(dd, "Couldn't setup MSIx "
+ "interrupt (vec=%d, irq=%d): %d\n", msixnum,
+ dd->cspec->msix_entries[msixnum].vector,
+ ret);
+ qib_7322_nomsix(dd);
+ goto try_intx;
+ }
+ dd->cspec->msix_arg[msixnum] = arg;
+ if (lsb >= 0) {
+ reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
+ sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
+ SYM_LSB(IntRedirect0, vec1);
+ mask &= ~(1ULL << lsb);
+ redirect[reg] |= ((u64) msixnum) << sh;
+ }
+ val = qib_read_kreg64(dd, 2 * msixnum + 1 +
+ (QIB_7322_MsixTable_OFFS / sizeof(u64)));
+ msixnum++;
+ }
+ /* Initialize the vector mapping */
+ for (i = 0; i < ARRAY_SIZE(redirect); i++)
+ qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
+ dd->cspec->main_int_mask = mask;
+bail:;
+}
+
+/**
+ * qib_7322_boardname - fill in the board name and note features
+ * @dd: the qlogic_ib device
+ *
+ * info will be based on the board revision register
+ */
+static unsigned qib_7322_boardname(struct qib_devdata *dd)
+{
+ /* Will need enumeration of board-types here */
+ char *n;
+ u32 boardid, namelen;
+ unsigned features = DUAL_PORT_CAP;
+
+ boardid = SYM_FIELD(dd->revision, Revision, BoardID);
+
+ switch (boardid) {
+ case 0:
+ n = "InfiniPath_QLE7342_Emulation";
+ break;
+ case 1:
+ n = "InfiniPath_QLE7340";
+ dd->flags |= QIB_HAS_QSFP;
+ features = PORT_SPD_CAP;
+ break;
+ case 2:
+ n = "InfiniPath_QLE7342";
+ dd->flags |= QIB_HAS_QSFP;
+ break;
+ case 3:
+ n = "InfiniPath_QMI7342";
+ break;
+ case 4:
+ n = "InfiniPath_Unsupported7342";
+ qib_dev_err(dd, "Unsupported version of QMH7342\n");
+ features = 0;
+ break;
+ case BOARD_QMH7342:
+ n = "InfiniPath_QMH7342";
+ features = 0x24;
+ break;
+ case BOARD_QME7342:
+ n = "InfiniPath_QME7342";
+ break;
+ case 15:
+ n = "InfiniPath_QLE7342_TEST";
+ dd->flags |= QIB_HAS_QSFP;
+ break;
+ default:
+ n = "InfiniPath_QLE73xy_UNKNOWN";
+ qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
+ break;
+ }
+ dd->board_atten = 1; /* index into txdds_Xdr */
+
+ namelen = strlen(n) + 1;
+ dd->boardname = kmalloc(namelen, GFP_KERNEL);
+ if (!dd->boardname)
+ qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
+ else
+ snprintf(dd->boardname, namelen, "%s", n);
+
+ snprintf(dd->boardversion, sizeof(dd->boardversion),
+ "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
+ QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
+ (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
+ dd->majrev, dd->minrev,
+ (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
+
+ if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
+ qib_devinfo(dd->pcidev, "IB%u: Forced to single port mode"
+ " by module parameter\n", dd->unit);
+ features &= PORT_SPD_CAP;
+ }
+
+ return features;
+}
+
+/*
+ * This routine sleeps, so it can only be called from user context, not
+ * from interrupt context.
+ */
+static int qib_do_7322_reset(struct qib_devdata *dd)
+{
+ u64 val;
+ u64 *msix_vecsave;
+ int i, msix_entries, ret = 1;
+ u16 cmdval;
+ u8 int_line, clinesz;
+ unsigned long flags;
+
+ /* Use dev_err so it shows up in logs, etc. */
+ qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
+
+ qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
+
+ msix_entries = dd->cspec->num_msix_entries;
+
+ /* no interrupts till re-initted */
+ qib_7322_set_intr_state(dd, 0);
+
+ if (msix_entries) {
+ qib_7322_nomsix(dd);
+ /* can be up to 512 bytes, too big for stack */
+ msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
+ sizeof(u64), GFP_KERNEL);
+ if (!msix_vecsave)
+ qib_dev_err(dd, "No mem to save MSIx data\n");
+ } else
+ msix_vecsave = NULL;
+
+ /*
+ * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
+ * info that is set up by the BIOS, so we have to save and restore
+ * it ourselves. There is some risk something could change it,
+ * after we save it, but since we have disabled the MSIx, it
+ * shouldn't be touched...
+ */
+ for (i = 0; i < msix_entries; i++) {
+ u64 vecaddr, vecdata;
+ vecaddr = qib_read_kreg64(dd, 2 * i +
+ (QIB_7322_MsixTable_OFFS / sizeof(u64)));
+ vecdata = qib_read_kreg64(dd, 1 + 2 * i +
+ (QIB_7322_MsixTable_OFFS / sizeof(u64)));
+ if (msix_vecsave) {
+ msix_vecsave[2 * i] = vecaddr;
+ /* save it without the masked bit set */
+ msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
+ }
+ }
+
+ dd->pport->cpspec->ibdeltainprog = 0;
+ dd->pport->cpspec->ibsymdelta = 0;
+ dd->pport->cpspec->iblnkerrdelta = 0;
+ dd->pport->cpspec->ibmalfdelta = 0;
+ dd->int_counter = 0; /* so we check interrupts work again */
+
+ /*
+ * Keep chip from being accessed until we are ready. Use
+ * writeq() directly, to allow the write even though QIB_PRESENT
+ * isnt' set.
+ */
+ dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
+ dd->flags |= QIB_DOING_RESET;
+ val = dd->control | QLOGIC_IB_C_RESET;
+ writeq(val, &dd->kregbase[kr_control]);
+
+ for (i = 1; i <= 5; i++) {
+ /*
+ * Allow MBIST, etc. to complete; longer on each retry.
+ * We sometimes get machine checks from bus timeout if no
+ * response, so for now, make it *really* long.
+ */
+ msleep(1000 + (1 + i) * 3000);
+
+ qib_pcie_reenable(dd, cmdval, int_line, clinesz);
+
+ /*
+ * Use readq directly, so we don't need to mark it as PRESENT
+ * until we get a successful indication that all is well.
+ */
+ val = readq(&dd->kregbase[kr_revision]);
+ if (val == dd->revision)
+ break;
+ if (i == 5) {
+ qib_dev_err(dd, "Failed to initialize after reset, "
+ "unusable\n");
+ ret = 0;
+ goto bail;
+ }
+ }
+
+ dd->flags |= QIB_PRESENT; /* it's back */
+
+ if (msix_entries) {
+ /* restore the MSIx vector address and data if saved above */
+ for (i = 0; i < msix_entries; i++) {
+ dd->cspec->msix_entries[i].entry = i;
+ if (!msix_vecsave || !msix_vecsave[2 * i])
+ continue;
+ qib_write_kreg(dd, 2 * i +
+ (QIB_7322_MsixTable_OFFS / sizeof(u64)),
+ msix_vecsave[2 * i]);
+ qib_write_kreg(dd, 1 + 2 * i +
+ (QIB_7322_MsixTable_OFFS / sizeof(u64)),
+ msix_vecsave[1 + 2 * i]);
+ }
+ }
+
+ /* initialize the remaining registers. */
+ for (i = 0; i < dd->num_pports; ++i)
+ write_7322_init_portregs(&dd->pport[i]);
+ write_7322_initregs(dd);
+
+ if (qib_pcie_params(dd, dd->lbus_width,
+ &dd->cspec->num_msix_entries,
+ dd->cspec->msix_entries))
+ qib_dev_err(dd, "Reset failed to setup PCIe or interrupts; "
+ "continuing anyway\n");
+
+ qib_setup_7322_interrupt(dd, 1);
+
+ for (i = 0; i < dd->num_pports; ++i) {
+ struct qib_pportdata *ppd = &dd->pport[i];
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
+ ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ }
+
+bail:
+ dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
+ kfree(msix_vecsave);
+ return ret;
+}
+
+/**
+ * qib_7322_put_tid - write a TID to the chip
+ * @dd: the qlogic_ib device
+ * @tidptr: pointer to the expected TID (in chip) to update
+ * @tidtype: 0 for eager, 1 for expected
+ * @pa: physical address of in memory buffer; tidinvalid if freeing
+ */
+static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
+ u32 type, unsigned long pa)
+{
+ if (!(dd->flags & QIB_PRESENT))
+ return;
+ if (pa != dd->tidinvalid) {
+ u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
+
+ /* paranoia checks */
+ if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
+ qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
+ pa);
+ return;
+ }
+ if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
+ qib_dev_err(dd, "Physical page address 0x%lx "
+ "larger than supported\n", pa);
+ return;
+ }
+
+ if (type == RCVHQ_RCV_TYPE_EAGER)
+ chippa |= dd->tidtemplate;
+ else /* for now, always full 4KB page */
+ chippa |= IBA7322_TID_SZ_4K;
+ pa = chippa;
+ }
+ writeq(pa, tidptr);
+ mmiowb();
+}
+
+/**
+ * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
+ * @dd: the qlogic_ib device
+ * @ctxt: the ctxt
+ *
+ * clear all TID entries for a ctxt, expected and eager.
+ * Used from qib_close().
+ */
+static void qib_7322_clear_tids(struct qib_devdata *dd,
+ struct qib_ctxtdata *rcd)
+{
+ u64 __iomem *tidbase;
+ unsigned long tidinv;
+ u32 ctxt;
+ int i;
+
+ if (!dd->kregbase || !rcd)
+ return;
+
+ ctxt = rcd->ctxt;
+
+ tidinv = dd->tidinvalid;
+ tidbase = (u64 __iomem *)
+ ((char __iomem *) dd->kregbase +
+ dd->rcvtidbase +
+ ctxt * dd->rcvtidcnt * sizeof(*tidbase));
+
+ for (i = 0; i < dd->rcvtidcnt; i++)
+ qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
+ tidinv);
+
+ tidbase = (u64 __iomem *)
+ ((char __iomem *) dd->kregbase +
+ dd->rcvegrbase +
+ rcd->rcvegr_tid_base * sizeof(*tidbase));
+
+ for (i = 0; i < rcd->rcvegrcnt; i++)
+ qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
+ tidinv);
+}
+
+/**
+ * qib_7322_tidtemplate - setup constants for TID updates
+ * @dd: the qlogic_ib device
+ *
+ * We setup stuff that we use a lot, to avoid calculating each time
+ */
+static void qib_7322_tidtemplate(struct qib_devdata *dd)
+{
+ /*
+ * For now, we always allocate 4KB buffers (at init) so we can
+ * receive max size packets. We may want a module parameter to
+ * specify 2KB or 4KB and/or make it per port instead of per device
+ * for those who want to reduce memory footprint. Note that the
+ * rcvhdrentsize size must be large enough to hold the largest
+ * IB header (currently 96 bytes) that we expect to handle (plus of
+ * course the 2 dwords of RHF).
+ */
+ if (dd->rcvegrbufsize == 2048)
+ dd->tidtemplate = IBA7322_TID_SZ_2K;
+ else if (dd->rcvegrbufsize == 4096)
+ dd->tidtemplate = IBA7322_TID_SZ_4K;
+ dd->tidinvalid = 0;
+}
+
+/**
+ * qib_init_7322_get_base_info - set chip-specific flags for user code
+ * @rcd: the qlogic_ib ctxt
+ * @kbase: qib_base_info pointer
+ *
+ * We set the PCIE flag because the lower bandwidth on PCIe vs
+ * HyperTransport can affect some user packet algorithims.
+ */
+
+static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
+ struct qib_base_info *kinfo)
+{
+ kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
+ QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
+ QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
+ if (rcd->dd->cspec->r1)
+ kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
+ if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
+ kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
+
+ return 0;
+}
+
+static struct qib_message_header *
+qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
+{
+ u32 offset = qib_hdrget_offset(rhf_addr);
+
+ return (struct qib_message_header *)
+ (rhf_addr - dd->rhf_offset + offset);
+}
+
+/*
+ * Configure number of contexts.
+ */
+static void qib_7322_config_ctxts(struct qib_devdata *dd)
+{
+ unsigned long flags;
+ u32 nchipctxts;
+
+ nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
+ dd->cspec->numctxts = nchipctxts;
+ if (qib_n_krcv_queues > 1 && dd->num_pports) {
+ /*
+ * Set the mask for which bits from the QPN are used
+ * to select a context number.
+ */
+ dd->qpn_mask = 0x3f;
+ dd->first_user_ctxt = NUM_IB_PORTS +
+ (qib_n_krcv_queues - 1) * dd->num_pports;
+ if (dd->first_user_ctxt > nchipctxts)
+ dd->first_user_ctxt = nchipctxts;
+ dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
+ } else {
+ dd->first_user_ctxt = NUM_IB_PORTS;
+ dd->n_krcv_queues = 1;
+ }
+
+ if (!qib_cfgctxts) {
+ int nctxts = dd->first_user_ctxt + num_online_cpus();
+
+ if (nctxts <= 6)
+ dd->ctxtcnt = 6;
+ else if (nctxts <= 10)
+ dd->ctxtcnt = 10;
+ else if (nctxts <= nchipctxts)
+ dd->ctxtcnt = nchipctxts;
+ } else if (qib_cfgctxts < dd->num_pports)
+ dd->ctxtcnt = dd->num_pports;
+ else if (qib_cfgctxts <= nchipctxts)
+ dd->ctxtcnt = qib_cfgctxts;
+ if (!dd->ctxtcnt) /* none of the above, set to max */
+ dd->ctxtcnt = nchipctxts;
+
+ /*
+ * Chip can be configured for 6, 10, or 18 ctxts, and choice
+ * affects number of eager TIDs per ctxt (1K, 2K, 4K).
+ * Lock to be paranoid about later motion, etc.
+ */
+ spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
+ if (dd->ctxtcnt > 10)
+ dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
+ else if (dd->ctxtcnt > 6)
+ dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
+ /* else configure for default 6 receive ctxts */
+
+ /* The XRC opcode is 5. */
+ dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
+
+ /*
+ * RcvCtrl *must* be written here so that the
+ * chip understands how to change rcvegrcnt below.
+ */
+ qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
+ spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+
+ /* kr_rcvegrcnt changes based on the number of contexts enabled */
+ dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
+ dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
+ dd->num_pports > 1 ? 1024U : 2048U);
+}
+
+static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
+{
+
+ int lsb, ret = 0;
+ u64 maskr; /* right-justified mask */
+
+ switch (which) {
+
+ case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
+ ret = ppd->link_width_enabled;
+ goto done;
+
+ case QIB_IB_CFG_LWID: /* Get currently active Link-width */
+ ret = ppd->link_width_active;
+ goto done;
+
+ case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
+ ret = ppd->link_speed_enabled;
+ goto done;
+
+ case QIB_IB_CFG_SPD: /* Get current Link spd */
+ ret = ppd->link_speed_active;
+ goto done;
+
+ case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
+ lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
+ maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
+ break;
+
+ case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
+ lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
+ maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
+ break;
+
+ case QIB_IB_CFG_LINKLATENCY:
+ ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
+ SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
+ goto done;
+
+ case QIB_IB_CFG_OP_VLS:
+ ret = ppd->vls_operational;
+ goto done;
+
+ case QIB_IB_CFG_VL_HIGH_CAP:
+ ret = 16;
+ goto done;
+
+ case QIB_IB_CFG_VL_LOW_CAP:
+ ret = 16;
+ goto done;
+
+ case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
+ ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
+ OverrunThreshold);
+ goto done;
+
+ case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
+ ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
+ PhyerrThreshold);
+ goto done;
+
+ case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
+ /* will only take effect when the link state changes */
+ ret = (ppd->cpspec->ibcctrl_a &
+ SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
+ IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
+ goto done;
+
+ case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
+ lsb = IBA7322_IBC_HRTBT_LSB;
+ maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
+ break;
+
+ case QIB_IB_CFG_PMA_TICKS:
+ /*
+ * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
+ * Since the clock is always 250MHz, the value is 3, 1 or 0.
+ */
+ if (ppd->link_speed_active == QIB_IB_QDR)
+ ret = 3;
+ else if (ppd->link_speed_active == QIB_IB_DDR)
+ ret = 1;
+ else
+ ret = 0;
+ goto done;
+
+ default:
+ ret = -EINVAL;
+ goto done;
+ }
+ ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
+done:
+ return ret;
+}
+
+/*
+ * Below again cribbed liberally from older version. Do not lean
+ * heavily on it.
+ */
+#define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
+#define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
+ | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
+
+static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 maskr; /* right-justified mask */
+ int lsb, ret = 0;
+ u16 lcmd, licmd;
+ unsigned long flags;
+
+ switch (which) {
+ case QIB_IB_CFG_LIDLMC:
+ /*
+ * Set LID and LMC. Combined to avoid possible hazard
+ * caller puts LMC in 16MSbits, DLID in 16LSbits of val
+ */
+ lsb = IBA7322_IBC_DLIDLMC_SHIFT;
+ maskr = IBA7322_IBC_DLIDLMC_MASK;
+ /*
+ * For header-checking, the SLID in the packet will
+ * be masked with SendIBSLMCMask, and compared
+ * with SendIBSLIDAssignMask. Make sure we do not
+ * set any bits not covered by the mask, or we get
+ * false-positives.
+ */
+ qib_write_kreg_port(ppd, krp_sendslid,
+ val & (val >> 16) & SendIBSLIDAssignMask);
+ qib_write_kreg_port(ppd, krp_sendslidmask,
+ (val >> 16) & SendIBSLMCMask);
+ break;
+
+ case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
+ ppd->link_width_enabled = val;
+ /* convert IB value to chip register value */
+ if (val == IB_WIDTH_1X)
+ val = 0;
+ else if (val == IB_WIDTH_4X)
+ val = 1;
+ else
+ val = 3;
+ maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
+ lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
+ break;
+
+ case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
+ /*
+ * As with width, only write the actual register if the
+ * link is currently down, otherwise takes effect on next
+ * link change. Since setting is being explictly requested
+ * (via MAD or sysfs), clear autoneg failure status if speed
+ * autoneg is enabled.
+ */
+ ppd->link_speed_enabled = val;
+ val <<= IBA7322_IBC_SPEED_LSB;
+ maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
+ IBA7322_IBC_MAX_SPEED_MASK;
+ if (val & (val - 1)) {
+ /* Muliple speeds enabled */
+ val |= IBA7322_IBC_IBTA_1_2_MASK |
+ IBA7322_IBC_MAX_SPEED_MASK;
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ } else if (val & IBA7322_IBC_SPEED_QDR)
+ val |= IBA7322_IBC_IBTA_1_2_MASK;
+ /* IBTA 1.2 mode + min/max + speed bits are contiguous */
+ lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
+ break;
+
+ case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
+ lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
+ maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
+ break;
+
+ case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
+ lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
+ maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
+ break;
+
+ case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
+ maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
+ OverrunThreshold);
+ if (maskr != val) {
+ ppd->cpspec->ibcctrl_a &=
+ ~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
+ ppd->cpspec->ibcctrl_a |= (u64) val <<
+ SYM_LSB(IBCCtrlA_0, OverrunThreshold);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a,
+ ppd->cpspec->ibcctrl_a);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ }
+ goto bail;
+
+ case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
+ maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
+ PhyerrThreshold);
+ if (maskr != val) {
+ ppd->cpspec->ibcctrl_a &=
+ ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
+ ppd->cpspec->ibcctrl_a |= (u64) val <<
+ SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a,
+ ppd->cpspec->ibcctrl_a);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ }
+ goto bail;
+
+ case QIB_IB_CFG_PKEYS: /* update pkeys */
+ maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
+ ((u64) ppd->pkeys[2] << 32) |
+ ((u64) ppd->pkeys[3] << 48);
+ qib_write_kreg_port(ppd, krp_partitionkey, maskr);
+ goto bail;
+
+ case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
+ /* will only take effect when the link state changes */
+ if (val == IB_LINKINITCMD_POLL)
+ ppd->cpspec->ibcctrl_a &=
+ ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
+ else /* SLEEP */
+ ppd->cpspec->ibcctrl_a |=
+ SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ goto bail;
+
+ case QIB_IB_CFG_MTU: /* update the MTU in IBC */
+ /*
+ * Update our housekeeping variables, and set IBC max
+ * size, same as init code; max IBC is max we allow in
+ * buffer, less the qword pbc, plus 1 for ICRC, in dwords
+ * Set even if it's unchanged, print debug message only
+ * on changes.
+ */
+ val = (ppd->ibmaxlen >> 2) + 1;
+ ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
+ ppd->cpspec->ibcctrl_a |= (u64)val <<
+ SYM_LSB(IBCCtrlA_0, MaxPktLen);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a,
+ ppd->cpspec->ibcctrl_a);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ goto bail;
+
+ case QIB_IB_CFG_LSTATE: /* set the IB link state */
+ switch (val & 0xffff0000) {
+ case IB_LINKCMD_DOWN:
+ lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
+ ppd->cpspec->ibmalfusesnap = 1;
+ ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
+ crp_errlink);
+ if (!ppd->cpspec->ibdeltainprog &&
+ qib_compat_ddr_negotiate) {
+ ppd->cpspec->ibdeltainprog = 1;
+ ppd->cpspec->ibsymsnap =
+ read_7322_creg32_port(ppd,
+ crp_ibsymbolerr);
+ ppd->cpspec->iblnkerrsnap =
+ read_7322_creg32_port(ppd,
+ crp_iblinkerrrecov);
+ }
+ break;
+
+ case IB_LINKCMD_ARMED:
+ lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
+ if (ppd->cpspec->ibmalfusesnap) {
+ ppd->cpspec->ibmalfusesnap = 0;
+ ppd->cpspec->ibmalfdelta +=
+ read_7322_creg32_port(ppd,
+ crp_errlink) -
+ ppd->cpspec->ibmalfsnap;
+ }
+ break;
+
+ case IB_LINKCMD_ACTIVE:
+ lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
+ break;
+
+ default:
+ ret = -EINVAL;
+ qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
+ goto bail;
+ }
+ switch (val & 0xffff) {
+ case IB_LINKINITCMD_NOP:
+ licmd = 0;
+ break;
+
+ case IB_LINKINITCMD_POLL:
+ licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
+ break;
+
+ case IB_LINKINITCMD_SLEEP:
+ licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
+ break;
+
+ case IB_LINKINITCMD_DISABLE:
+ licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
+ ppd->cpspec->chase_end = 0;
+ /*
+ * stop state chase counter and timer, if running.
+ * wait forpending timer, but don't clear .data (ppd)!
+ */
+ if (ppd->cpspec->chase_timer.expires) {
+ del_timer_sync(&ppd->cpspec->chase_timer);
+ ppd->cpspec->chase_timer.expires = 0;
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
+ val & 0xffff);
+ goto bail;
+ }
+ qib_set_ib_7322_lstate(ppd, lcmd, licmd);
+ goto bail;
+
+ case QIB_IB_CFG_OP_VLS:
+ if (ppd->vls_operational != val) {
+ ppd->vls_operational = val;
+ set_vls(ppd);
+ }
+ goto bail;
+
+ case QIB_IB_CFG_VL_HIGH_LIMIT:
+ qib_write_kreg_port(ppd, krp_highprio_limit, val);
+ goto bail;
+
+ case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
+ if (val > 3) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ lsb = IBA7322_IBC_HRTBT_LSB;
+ maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
+ break;
+
+ case QIB_IB_CFG_PORT:
+ /* val is the port number of the switch we are connected to. */
+ if (ppd->dd->cspec->r1) {
+ cancel_delayed_work(&ppd->cpspec->ipg_work);
+ ppd->cpspec->ipg_tries = 0;
+ }
+ goto bail;
+
+ default:
+ ret = -EINVAL;
+ goto bail;
+ }
+ ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
+ ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
+ qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
+ qib_write_kreg(dd, kr_scratch, 0);
+bail:
+ return ret;
+}
+
+static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
+{
+ int ret = 0;
+ u64 val, ctrlb;
+
+ /* only IBC loopback, may add serdes and xgxs loopbacks later */
+ if (!strncmp(what, "ibc", 3)) {
+ ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
+ Loopback);
+ val = 0; /* disable heart beat, so link will come up */
+ qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
+ ppd->dd->unit, ppd->port);
+ } else if (!strncmp(what, "off", 3)) {
+ ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
+ Loopback);
+ /* enable heart beat again */
+ val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
+ qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
+ "(normal)\n", ppd->dd->unit, ppd->port);
+ } else
+ ret = -EINVAL;
+ if (!ret) {
+ qib_write_kreg_port(ppd, krp_ibcctrl_a,
+ ppd->cpspec->ibcctrl_a);
+ ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
+ << IBA7322_IBC_HRTBT_LSB);
+ ppd->cpspec->ibcctrl_b = ctrlb | val;
+ qib_write_kreg_port(ppd, krp_ibcctrl_b,
+ ppd->cpspec->ibcctrl_b);
+ qib_write_kreg(ppd->dd, kr_scratch, 0);
+ }
+ return ret;
+}
+
+static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
+ struct ib_vl_weight_elem *vl)
+{
+ unsigned i;
+
+ for (i = 0; i < 16; i++, regno++, vl++) {
+ u32 val = qib_read_kreg_port(ppd, regno);
+
+ vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
+ SYM_RMASK(LowPriority0_0, VirtualLane);
+ vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
+ SYM_RMASK(LowPriority0_0, Weight);
+ }
+}
+
+static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
+ struct ib_vl_weight_elem *vl)
+{
+ unsigned i;
+
+ for (i = 0; i < 16; i++, regno++, vl++) {
+ u64 val;
+
+ val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
+ SYM_LSB(LowPriority0_0, VirtualLane)) |
+ ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
+ SYM_LSB(LowPriority0_0, Weight));
+ qib_write_kreg_port(ppd, regno, val);
+ }
+ if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
+ struct qib_devdata *dd = ppd->dd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
+ qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+ }
+}
+
+static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
+{
+ switch (which) {
+ case QIB_IB_TBL_VL_HIGH_ARB:
+ get_vl_weights(ppd, krp_highprio_0, t);
+ break;
+
+ case QIB_IB_TBL_VL_LOW_ARB:
+ get_vl_weights(ppd, krp_lowprio_0, t);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
+{
+ switch (which) {
+ case QIB_IB_TBL_VL_HIGH_ARB:
+ set_vl_weights(ppd, krp_highprio_0, t);
+ break;
+
+ case QIB_IB_TBL_VL_LOW_ARB:
+ set_vl_weights(ppd, krp_lowprio_0, t);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
+ u32 updegr, u32 egrhd)
+{
+ qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ if (updegr)
+ qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
+}
+
+static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
+{
+ u32 head, tail;
+
+ head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
+ if (rcd->rcvhdrtail_kvaddr)
+ tail = qib_get_rcvhdrtail(rcd);
+ else
+ tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
+ return head == tail;
+}
+
+#define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
+ QIB_RCVCTRL_CTXT_DIS | \
+ QIB_RCVCTRL_TIDFLOW_ENB | \
+ QIB_RCVCTRL_TIDFLOW_DIS | \
+ QIB_RCVCTRL_TAILUPD_ENB | \
+ QIB_RCVCTRL_TAILUPD_DIS | \
+ QIB_RCVCTRL_INTRAVAIL_ENB | \
+ QIB_RCVCTRL_INTRAVAIL_DIS | \
+ QIB_RCVCTRL_BP_ENB | \
+ QIB_RCVCTRL_BP_DIS)
+
+#define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
+ QIB_RCVCTRL_CTXT_DIS | \
+ QIB_RCVCTRL_PKEY_DIS | \
+ QIB_RCVCTRL_PKEY_ENB)
+
+/*
+ * Modify the RCVCTRL register in chip-specific way. This
+ * is a function because bit positions and (future) register
+ * location is chip-specifc, but the needed operations are
+ * generic. <op> is a bit-mask because we often want to
+ * do multiple modifications.
+ */
+static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
+ int ctxt)
+{
+ struct qib_devdata *dd = ppd->dd;
+ struct qib_ctxtdata *rcd;
+ u64 mask, val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
+
+ if (op & QIB_RCVCTRL_TIDFLOW_ENB)
+ dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
+ if (op & QIB_RCVCTRL_TIDFLOW_DIS)
+ dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
+ if (op & QIB_RCVCTRL_TAILUPD_ENB)
+ dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
+ if (op & QIB_RCVCTRL_TAILUPD_DIS)
+ dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
+ if (op & QIB_RCVCTRL_PKEY_ENB)
+ ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
+ if (op & QIB_RCVCTRL_PKEY_DIS)
+ ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
+ if (ctxt < 0) {
+ mask = (1ULL << dd->ctxtcnt) - 1;
+ rcd = NULL;
+ } else {
+ mask = (1ULL << ctxt);
+ rcd = dd->rcd[ctxt];
+ }
+ if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
+ ppd->p_rcvctrl |=
+ (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
+ if (!(dd->flags & QIB_NODMA_RTAIL)) {
+ op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
+ dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
+ }
+ /* Write these registers before the context is enabled. */
+ qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
+ rcd->rcvhdrqtailaddr_phys);
+ qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
+ rcd->rcvhdrq_phys);
+ rcd->seq_cnt = 1;
+ }
+ if (op & QIB_RCVCTRL_CTXT_DIS)
+ ppd->p_rcvctrl &=
+ ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
+ if (op & QIB_RCVCTRL_BP_ENB)
+ dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
+ if (op & QIB_RCVCTRL_BP_DIS)
+ dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
+ if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
+ dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
+ if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
+ dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
+ /*
+ * Decide which registers to write depending on the ops enabled.
+ * Special case is "flush" (no bits set at all)
+ * which needs to write both.
+ */
+ if (op == 0 || (op & RCVCTRL_COMMON_MODS))
+ qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
+ if (op == 0 || (op & RCVCTRL_PORT_MODS))
+ qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
+ if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
+ /*
+ * Init the context registers also; if we were
+ * disabled, tail and head should both be zero
+ * already from the enable, but since we don't
+ * know, we have to do it explictly.
+ */
+ val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
+ qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
+
+ /* be sure enabling write seen; hd/tl should be 0 */
+ (void) qib_read_kreg32(dd, kr_scratch);
+ val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
+ dd->rcd[ctxt]->head = val;
+ /* If kctxt, interrupt on next receive. */
+ if (ctxt < dd->first_user_ctxt)
+ val |= dd->rhdrhead_intr_off;
+ qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
+ } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
+ dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
+ /* arm rcv interrupt */
+ val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
+ qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
+ }
+ if (op & QIB_RCVCTRL_CTXT_DIS) {
+ unsigned f;
+
+ /* Now that the context is disabled, clear these registers. */
+ if (ctxt >= 0) {
+ qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
+ qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
+ for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
+ qib_write_ureg(dd, ur_rcvflowtable + f,
+ TIDFLOW_ERRBITS, ctxt);
+ } else {
+ unsigned i;
+
+ for (i = 0; i < dd->cfgctxts; i++) {
+ qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
+ i, 0);
+ qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
+ for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
+ qib_write_ureg(dd, ur_rcvflowtable + f,
+ TIDFLOW_ERRBITS, i);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+}
+
+/*
+ * Modify the SENDCTRL register in chip-specific way. This
+ * is a function where there are multiple such registers with
+ * slightly different layouts.
+ * The chip doesn't allow back-to-back sendctrl writes, so write
+ * the scratch register after writing sendctrl.
+ *
+ * Which register is written depends on the operation.
+ * Most operate on the common register, while
+ * SEND_ENB and SEND_DIS operate on the per-port ones.
+ * SEND_ENB is included in common because it can change SPCL_TRIG
+ */
+#define SENDCTRL_COMMON_MODS (\
+ QIB_SENDCTRL_CLEAR | \
+ QIB_SENDCTRL_AVAIL_DIS | \
+ QIB_SENDCTRL_AVAIL_ENB | \
+ QIB_SENDCTRL_AVAIL_BLIP | \
+ QIB_SENDCTRL_DISARM | \
+ QIB_SENDCTRL_DISARM_ALL | \
+ QIB_SENDCTRL_SEND_ENB)
+
+#define SENDCTRL_PORT_MODS (\
+ QIB_SENDCTRL_CLEAR | \
+ QIB_SENDCTRL_SEND_ENB | \
+ QIB_SENDCTRL_SEND_DIS | \
+ QIB_SENDCTRL_FLUSH)
+
+static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 tmp_dd_sendctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+
+ /* First the dd ones that are "sticky", saved in shadow */
+ if (op & QIB_SENDCTRL_CLEAR)
+ dd->sendctrl = 0;
+ if (op & QIB_SENDCTRL_AVAIL_DIS)
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
+ else if (op & QIB_SENDCTRL_AVAIL_ENB) {
+ dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
+ if (dd->flags & QIB_USE_SPCL_TRIG)
+ dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
+ }
+
+ /* Then the ppd ones that are "sticky", saved in shadow */
+ if (op & QIB_SENDCTRL_SEND_DIS)
+ ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
+ else if (op & QIB_SENDCTRL_SEND_ENB)
+ ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
+
+ if (op & QIB_SENDCTRL_DISARM_ALL) {
+ u32 i, last;
+
+ tmp_dd_sendctrl = dd->sendctrl;
+ last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
+ /*
+ * Disarm any buffers that are not yet launched,
+ * disabling updates until done.
+ */
+ tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
+ for (i = 0; i < last; i++) {
+ qib_write_kreg(dd, kr_sendctrl,
+ tmp_dd_sendctrl |
+ SYM_MASK(SendCtrl, Disarm) | i);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+ }
+
+ if (op & QIB_SENDCTRL_FLUSH) {
+ u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
+
+ /*
+ * Now drain all the fifos. The Abort bit should never be
+ * needed, so for now, at least, we don't use it.
+ */
+ tmp_ppd_sendctrl |=
+ SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
+ SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
+ SYM_MASK(SendCtrl_0, TxeBypassIbc);
+ qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+
+ tmp_dd_sendctrl = dd->sendctrl;
+
+ if (op & QIB_SENDCTRL_DISARM)
+ tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
+ ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
+ SYM_LSB(SendCtrl, DisarmSendBuf));
+ if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
+ (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
+ tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
+
+ if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
+ qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+
+ if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
+ qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+
+ if (op & QIB_SENDCTRL_AVAIL_BLIP) {
+ qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+
+ if (op & QIB_SENDCTRL_FLUSH) {
+ u32 v;
+ /*
+ * ensure writes have hit chip, then do a few
+ * more reads, to allow DMA of pioavail registers
+ * to occur, so in-memory copy is in sync with
+ * the chip. Not always safe to sleep.
+ */
+ v = qib_read_kreg32(dd, kr_scratch);
+ qib_write_kreg(dd, kr_scratch, v);
+ v = qib_read_kreg32(dd, kr_scratch);
+ qib_write_kreg(dd, kr_scratch, v);
+ qib_read_kreg32(dd, kr_scratch);
+ }
+}
+
+#define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
+#define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
+#define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
+
+/**
+ * qib_portcntr_7322 - read a per-port chip counter
+ * @ppd: the qlogic_ib pport
+ * @creg: the counter to read (not a chip offset)
+ */
+static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 ret = 0ULL;
+ u16 creg;
+ /* 0xffff for unimplemented or synthesized counters */
+ static const u32 xlator[] = {
+ [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
+ [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
+ [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
+ [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
+ [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
+ [QIBPORTCNTR_SENDSTALL] = crp_sendstall,
+ [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
+ [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
+ [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
+ [QIBPORTCNTR_RCVEBP] = crp_rcvebp,
+ [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
+ [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
+ [QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed for 7322 */
+ [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
+ [QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
+ [QIBPORTCNTR_ERRICRC] = crp_erricrc,
+ [QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
+ [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
+ [QIBPORTCNTR_BADFORMAT] = crp_badformat,
+ [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
+ [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
+ [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
+ [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
+ [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
+ [QIBPORTCNTR_ERRLINK] = crp_errlink,
+ [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
+ [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
+ [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
+ [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
+ [QIBPORTCNTR_ERRPKEY] = crp_errpkey,
+ /*
+ * the next 3 aren't really counters, but were implemented
+ * as counters in older chips, so still get accessed as
+ * though they were counters from this code.
+ */
+ [QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
+ [QIBPORTCNTR_PSSTART] = krp_psstart,
+ [QIBPORTCNTR_PSSTAT] = krp_psstat,
+ /* pseudo-counter, summed for all ports */
+ [QIBPORTCNTR_KHDROVFL] = 0xffff,
+ };
+
+ if (reg >= ARRAY_SIZE(xlator)) {
+ qib_devinfo(ppd->dd->pcidev,
+ "Unimplemented portcounter %u\n", reg);
+ goto done;
+ }
+ creg = xlator[reg] & _PORT_CNTR_IDXMASK;
+
+ /* handle non-counters and special cases first */
+ if (reg == QIBPORTCNTR_KHDROVFL) {
+ int i;
+
+ /* sum over all kernel contexts (skip if mini_init) */
+ for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
+ struct qib_ctxtdata *rcd = dd->rcd[i];
+
+ if (!rcd || rcd->ppd != ppd)
+ continue;
+ ret += read_7322_creg32(dd, cr_base_egrovfl + i);
+ }
+ goto done;
+ } else if (reg == QIBPORTCNTR_RXDROPPKT) {
+ /*
+ * Used as part of the synthesis of port_rcv_errors
+ * in the verbs code for IBTA counters. Not needed for 7322,
+ * because all the errors are already counted by other cntrs.
+ */
+ goto done;
+ } else if (reg == QIBPORTCNTR_PSINTERVAL ||
+ reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
+ /* were counters in older chips, now per-port kernel regs */
+ ret = qib_read_kreg_port(ppd, creg);
+ goto done;
+ }
+
+ /*
+ * Only fast increment counters are 64 bits; use 32 bit reads to
+ * avoid two independent reads when on Opteron.
+ */
+ if (xlator[reg] & _PORT_64BIT_FLAG)
+ ret = read_7322_creg_port(ppd, creg);
+ else
+ ret = read_7322_creg32_port(ppd, creg);
+ if (creg == crp_ibsymbolerr) {
+ if (ppd->cpspec->ibdeltainprog)
+ ret -= ret - ppd->cpspec->ibsymsnap;
+ ret -= ppd->cpspec->ibsymdelta;
+ } else if (creg == crp_iblinkerrrecov) {
+ if (ppd->cpspec->ibdeltainprog)
+ ret -= ret - ppd->cpspec->iblnkerrsnap;
+ ret -= ppd->cpspec->iblnkerrdelta;
+ } else if (creg == crp_errlink)
+ ret -= ppd->cpspec->ibmalfdelta;
+ else if (creg == crp_iblinkdown)
+ ret += ppd->cpspec->iblnkdowndelta;
+done:
+ return ret;
+}
+
+/*
+ * Device counter names (not port-specific), one line per stat,
+ * single string. Used by utilities like ipathstats to print the stats
+ * in a way which works for different versions of drivers, without changing
+ * the utility. Names need to be 12 chars or less (w/o newline), for proper
+ * display by utility.
+ * Non-error counters are first.
+ * Start of "error" conters is indicated by a leading "E " on the first
+ * "error" counter, and doesn't count in label length.
+ * The EgrOvfl list needs to be last so we truncate them at the configured
+ * context count for the device.
+ * cntr7322indices contains the corresponding register indices.
+ */
+static const char cntr7322names[] =
+ "Interrupts\n"
+ "HostBusStall\n"
+ "E RxTIDFull\n"
+ "RxTIDInvalid\n"
+ "RxTIDFloDrop\n" /* 7322 only */
+ "Ctxt0EgrOvfl\n"
+ "Ctxt1EgrOvfl\n"
+ "Ctxt2EgrOvfl\n"
+ "Ctxt3EgrOvfl\n"
+ "Ctxt4EgrOvfl\n"
+ "Ctxt5EgrOvfl\n"
+ "Ctxt6EgrOvfl\n"
+ "Ctxt7EgrOvfl\n"
+ "Ctxt8EgrOvfl\n"
+ "Ctxt9EgrOvfl\n"
+ "Ctx10EgrOvfl\n"
+ "Ctx11EgrOvfl\n"
+ "Ctx12EgrOvfl\n"
+ "Ctx13EgrOvfl\n"
+ "Ctx14EgrOvfl\n"
+ "Ctx15EgrOvfl\n"
+ "Ctx16EgrOvfl\n"
+ "Ctx17EgrOvfl\n"
+ ;
+
+static const u32 cntr7322indices[] = {
+ cr_lbint | _PORT_64BIT_FLAG,
+ cr_lbstall | _PORT_64BIT_FLAG,
+ cr_tidfull,
+ cr_tidinvalid,
+ cr_rxtidflowdrop,
+ cr_base_egrovfl + 0,
+ cr_base_egrovfl + 1,
+ cr_base_egrovfl + 2,
+ cr_base_egrovfl + 3,
+ cr_base_egrovfl + 4,
+ cr_base_egrovfl + 5,
+ cr_base_egrovfl + 6,
+ cr_base_egrovfl + 7,
+ cr_base_egrovfl + 8,
+ cr_base_egrovfl + 9,
+ cr_base_egrovfl + 10,
+ cr_base_egrovfl + 11,
+ cr_base_egrovfl + 12,
+ cr_base_egrovfl + 13,
+ cr_base_egrovfl + 14,
+ cr_base_egrovfl + 15,
+ cr_base_egrovfl + 16,
+ cr_base_egrovfl + 17,
+};
+
+/*
+ * same as cntr7322names and cntr7322indices, but for port-specific counters.
+ * portcntr7322indices is somewhat complicated by some registers needing
+ * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
+ */
+static const char portcntr7322names[] =
+ "TxPkt\n"
+ "TxFlowPkt\n"
+ "TxWords\n"
+ "RxPkt\n"
+ "RxFlowPkt\n"
+ "RxWords\n"
+ "TxFlowStall\n"
+ "TxDmaDesc\n" /* 7220 and 7322-only */
+ "E RxDlidFltr\n" /* 7220 and 7322-only */
+ "IBStatusChng\n"
+ "IBLinkDown\n"
+ "IBLnkRecov\n"
+ "IBRxLinkErr\n"
+ "IBSymbolErr\n"
+ "RxLLIErr\n"
+ "RxBadFormat\n"
+ "RxBadLen\n"
+ "RxBufOvrfl\n"
+ "RxEBP\n"
+ "RxFlowCtlErr\n"
+ "RxICRCerr\n"
+ "RxLPCRCerr\n"
+ "RxVCRCerr\n"
+ "RxInvalLen\n"
+ "RxInvalPKey\n"
+ "RxPktDropped\n"
+ "TxBadLength\n"
+ "TxDropped\n"
+ "TxInvalLen\n"
+ "TxUnderrun\n"
+ "TxUnsupVL\n"
+ "RxLclPhyErr\n" /* 7220 and 7322-only from here down */
+ "RxVL15Drop\n"
+ "RxVlErr\n"
+ "XcessBufOvfl\n"
+ "RxQPBadCtxt\n" /* 7322-only from here down */
+ "TXBadHeader\n"
+ ;
+
+static const u32 portcntr7322indices[] = {
+ QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
+ crp_pktsendflow,
+ QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
+ crp_pktrcvflowctrl,
+ QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
+ crp_txsdmadesc | _PORT_64BIT_FLAG,
+ crp_rxdlidfltr,
+ crp_ibstatuschange,
+ QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
+ crp_rcvflowctrlviol,
+ QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
+ crp_txminmaxlenerr,
+ crp_txdroppedpkt,
+ crp_txlenerr,
+ crp_txunderrun,
+ crp_txunsupvl,
+ QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
+ crp_rxqpinvalidctxt,
+ crp_txhdrerr,
+};
+
+/* do all the setup to make the counter reads efficient later */
+static void init_7322_cntrnames(struct qib_devdata *dd)
+{
+ int i, j = 0;
+ char *s;
+
+ for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
+ i++) {
+ /* we always have at least one counter before the egrovfl */
+ if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
+ j = 1;
+ s = strchr(s + 1, '\n');
+ if (s && j)
+ j++;
+ }
+ dd->cspec->ncntrs = i;
+ if (!s)
+ /* full list; size is without terminating null */
+ dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
+ else
+ dd->cspec->cntrnamelen = 1 + s - cntr7322names;
+ dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
+ * sizeof(u64), GFP_KERNEL);
+ if (!dd->cspec->cntrs)
+ qib_dev_err(dd, "Failed allocation for counters\n");
+
+ for (i = 0, s = (char *)portcntr7322names; s; i++)
+ s = strchr(s + 1, '\n');
+ dd->cspec->nportcntrs = i - 1;
+ dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
+ for (i = 0; i < dd->num_pports; ++i) {
+ dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
+ * sizeof(u64), GFP_KERNEL);
+ if (!dd->pport[i].cpspec->portcntrs)
+ qib_dev_err(dd, "Failed allocation for"
+ " portcounters\n");
+ }
+}
+
+static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
+ u64 **cntrp)
+{
+ u32 ret;
+
+ if (namep) {
+ ret = dd->cspec->cntrnamelen;
+ if (pos >= ret)
+ ret = 0; /* final read after getting everything */
+ else
+ *namep = (char *) cntr7322names;
+ } else {
+ u64 *cntr = dd->cspec->cntrs;
+ int i;
+
+ ret = dd->cspec->ncntrs * sizeof(u64);
+ if (!cntr || pos >= ret) {
+ /* everything read, or couldn't get memory */
+ ret = 0;
+ goto done;
+ }
+ *cntrp = cntr;
+ for (i = 0; i < dd->cspec->ncntrs; i++)
+ if (cntr7322indices[i] & _PORT_64BIT_FLAG)
+ *cntr++ = read_7322_creg(dd,
+ cntr7322indices[i] &
+ _PORT_CNTR_IDXMASK);
+ else
+ *cntr++ = read_7322_creg32(dd,
+ cntr7322indices[i]);
+ }
+done:
+ return ret;
+}
+
+static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
+ char **namep, u64 **cntrp)
+{
+ u32 ret;
+
+ if (namep) {
+ ret = dd->cspec->portcntrnamelen;
+ if (pos >= ret)
+ ret = 0; /* final read after getting everything */
+ else
+ *namep = (char *)portcntr7322names;
+ } else {
+ struct qib_pportdata *ppd = &dd->pport[port];
+ u64 *cntr = ppd->cpspec->portcntrs;
+ int i;
+
+ ret = dd->cspec->nportcntrs * sizeof(u64);
+ if (!cntr || pos >= ret) {
+ /* everything read, or couldn't get memory */
+ ret = 0;
+ goto done;
+ }
+ *cntrp = cntr;
+ for (i = 0; i < dd->cspec->nportcntrs; i++) {
+ if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
+ *cntr++ = qib_portcntr_7322(ppd,
+ portcntr7322indices[i] &
+ _PORT_CNTR_IDXMASK);
+ else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
+ *cntr++ = read_7322_creg_port(ppd,
+ portcntr7322indices[i] &
+ _PORT_CNTR_IDXMASK);
+ else
+ *cntr++ = read_7322_creg32_port(ppd,
+ portcntr7322indices[i]);
+ }
+ }
+done:
+ return ret;
+}
+
+/**
+ * qib_get_7322_faststats - get word counters from chip before they overflow
+ * @opaque - contains a pointer to the qlogic_ib device qib_devdata
+ *
+ * VESTIGIAL IBA7322 has no "small fast counters", so the only
+ * real purpose of this function is to maintain the notion of
+ * "active time", which in turn is only logged into the eeprom,
+ * which we don;t have, yet, for 7322-based boards.
+ *
+ * called from add_timer
+ */
+static void qib_get_7322_faststats(unsigned long opaque)
+{
+ struct qib_devdata *dd = (struct qib_devdata *) opaque;
+ struct qib_pportdata *ppd;
+ unsigned long flags;
+ u64 traffic_wds;
+ int pidx;
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+
+ /*
+ * If port isn't enabled or not operational ports, or
+ * diags is running (can cause memory diags to fail)
+ * skip this port this time.
+ */
+ if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
+ || dd->diag_client)
+ continue;
+
+ /*
+ * Maintain an activity timer, based on traffic
+ * exceeding a threshold, so we need to check the word-counts
+ * even if they are 64-bit.
+ */
+ traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
+ qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
+ spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
+ traffic_wds -= ppd->dd->traffic_wds;
+ ppd->dd->traffic_wds += traffic_wds;
+ if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
+ atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
+ spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
+ if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
+ QIB_IB_QDR) &&
+ (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
+ QIBL_LINKACTIVE)) &&
+ ppd->cpspec->qdr_dfe_time &&
+ time_after64(get_jiffies_64(), ppd->cpspec->qdr_dfe_time)) {
+ ppd->cpspec->qdr_dfe_on = 0;
+
+ qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
+ ppd->dd->cspec->r1 ?
+ QDR_STATIC_ADAPT_INIT_R1 :
+ QDR_STATIC_ADAPT_INIT);
+ force_h1(ppd);
+ }
+ }
+ mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
+}
+
+/*
+ * If we were using MSIx, try to fallback to INTx.
+ */
+static int qib_7322_intr_fallback(struct qib_devdata *dd)
+{
+ if (!dd->cspec->num_msix_entries)
+ return 0; /* already using INTx */
+
+ qib_devinfo(dd->pcidev, "MSIx interrupt not detected,"
+ " trying INTx interrupts\n");
+ qib_7322_nomsix(dd);
+ qib_enable_intx(dd->pcidev);
+ qib_setup_7322_interrupt(dd, 0);
+ return 1;
+}
+
+/*
+ * Reset the XGXS (between serdes and IBC). Slightly less intrusive
+ * than resetting the IBC or external link state, and useful in some
+ * cases to cause some retraining. To do this right, we reset IBC
+ * as well, then return to previous state (which may be still in reset)
+ * NOTE: some callers of this "know" this writes the current value
+ * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
+ * check all callers.
+ */
+static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
+{
+ u64 val;
+ struct qib_devdata *dd = ppd->dd;
+ const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
+ SYM_MASK(IBPCSConfig_0, xcv_treset) |
+ SYM_MASK(IBPCSConfig_0, tx_rx_reset);
+
+ val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a,
+ ppd->cpspec->ibcctrl_a &
+ ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
+
+ qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
+ qib_read_kreg32(dd, kr_scratch);
+ qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+}
+
+/*
+ * This code for non-IBTA-compliant IB speed negotiation is only known to
+ * work for the SDR to DDR transition, and only between an HCA and a switch
+ * with recent firmware. It is based on observed heuristics, rather than
+ * actual knowledge of the non-compliant speed negotiation.
+ * It has a number of hard-coded fields, since the hope is to rewrite this
+ * when a spec is available on how the negoation is intended to work.
+ */
+static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
+ u32 dcnt, u32 *data)
+{
+ int i;
+ u64 pbc;
+ u32 __iomem *piobuf;
+ u32 pnum, control, len;
+ struct qib_devdata *dd = ppd->dd;
+
+ i = 0;
+ len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
+ control = qib_7322_setpbc_control(ppd, len, 0, 15);
+ pbc = ((u64) control << 32) | len;
+ while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
+ if (i++ > 15)
+ return;
+ udelay(2);
+ }
+ /* disable header check on this packet, since it can't be valid */
+ dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
+ writeq(pbc, piobuf);
+ qib_flush_wc();
+ qib_pio_copy(piobuf + 2, hdr, 7);
+ qib_pio_copy(piobuf + 9, data, dcnt);
+ if (dd->flags & QIB_USE_SPCL_TRIG) {
+ u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
+
+ qib_flush_wc();
+ __raw_writel(0xaebecede, piobuf + spcl_off);
+ }
+ qib_flush_wc();
+ qib_sendbuf_done(dd, pnum);
+ /* and re-enable hdr check */
+ dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
+}
+
+/*
+ * _start packet gets sent twice at start, _done gets sent twice at end
+ */
+static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
+{
+ struct qib_devdata *dd = ppd->dd;
+ static u32 swapped;
+ u32 dw, i, hcnt, dcnt, *data;
+ static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
+ static u32 madpayload_start[0x40] = {
+ 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
+ 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
+ };
+ static u32 madpayload_done[0x40] = {
+ 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
+ 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x40000001, 0x1388, 0x15e, /* rest 0's */
+ };
+
+ dcnt = ARRAY_SIZE(madpayload_start);
+ hcnt = ARRAY_SIZE(hdr);
+ if (!swapped) {
+ /* for maintainability, do it at runtime */
+ for (i = 0; i < hcnt; i++) {
+ dw = (__force u32) cpu_to_be32(hdr[i]);
+ hdr[i] = dw;
+ }
+ for (i = 0; i < dcnt; i++) {
+ dw = (__force u32) cpu_to_be32(madpayload_start[i]);
+ madpayload_start[i] = dw;
+ dw = (__force u32) cpu_to_be32(madpayload_done[i]);
+ madpayload_done[i] = dw;
+ }
+ swapped = 1;
+ }
+
+ data = which ? madpayload_done : madpayload_start;
+
+ autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
+ qib_read_kreg64(dd, kr_scratch);
+ udelay(2);
+ autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
+ qib_read_kreg64(dd, kr_scratch);
+ udelay(2);
+}
+
+/*
+ * Do the absolute minimum to cause an IB speed change, and make it
+ * ready, but don't actually trigger the change. The caller will
+ * do that when ready (if link is in Polling training state, it will
+ * happen immediately, otherwise when link next goes down)
+ *
+ * This routine should only be used as part of the DDR autonegotation
+ * code for devices that are not compliant with IB 1.2 (or code that
+ * fixes things up for same).
+ *
+ * When link has gone down, and autoneg enabled, or autoneg has
+ * failed and we give up until next time we set both speeds, and
+ * then we want IBTA enabled as well as "use max enabled speed.
+ */
+static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
+{
+ u64 newctrlb;
+ newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
+ IBA7322_IBC_IBTA_1_2_MASK |
+ IBA7322_IBC_MAX_SPEED_MASK);
+
+ if (speed & (speed - 1)) /* multiple speeds */
+ newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
+ IBA7322_IBC_IBTA_1_2_MASK |
+ IBA7322_IBC_MAX_SPEED_MASK;
+ else
+ newctrlb |= speed == QIB_IB_QDR ?
+ IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
+ ((speed == QIB_IB_DDR ?
+ IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
+
+ if (newctrlb == ppd->cpspec->ibcctrl_b)
+ return;
+
+ ppd->cpspec->ibcctrl_b = newctrlb;
+ qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
+ qib_write_kreg(ppd->dd, kr_scratch, 0);
+}
+
+/*
+ * This routine is only used when we are not talking to another
+ * IB 1.2-compliant device that we think can do DDR.
+ * (This includes all existing switch chips as of Oct 2007.)
+ * 1.2-compliant devices go directly to DDR prior to reaching INIT
+ */
+static void try_7322_autoneg(struct qib_pportdata *ppd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ qib_autoneg_7322_send(ppd, 0);
+ set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
+ qib_7322_mini_pcs_reset(ppd);
+ /* 2 msec is minimum length of a poll cycle */
+ schedule_delayed_work(&ppd->cpspec->autoneg_work,
+ msecs_to_jiffies(2));
+}
+
+/*
+ * Handle the empirically determined mechanism for auto-negotiation
+ * of DDR speed with switches.
+ */
+static void autoneg_7322_work(struct work_struct *work)
+{
+ struct qib_pportdata *ppd;
+ struct qib_devdata *dd;
+ u64 startms;
+ u32 i;
+ unsigned long flags;
+
+ ppd = container_of(work, struct qib_chippport_specific,
+ autoneg_work.work)->ppd;
+ dd = ppd->dd;
+
+ startms = jiffies_to_msecs(jiffies);
+
+ /*
+ * Busy wait for this first part, it should be at most a
+ * few hundred usec, since we scheduled ourselves for 2msec.
+ */
+ for (i = 0; i < 25; i++) {
+ if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
+ == IB_7322_LT_STATE_POLLQUIET) {
+ qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
+ break;
+ }
+ udelay(100);
+ }
+
+ if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
+ goto done; /* we got there early or told to stop */
+
+ /* we expect this to timeout */
+ if (wait_event_timeout(ppd->cpspec->autoneg_wait,
+ !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
+ msecs_to_jiffies(90)))
+ goto done;
+ qib_7322_mini_pcs_reset(ppd);
+
+ /* we expect this to timeout */
+ if (wait_event_timeout(ppd->cpspec->autoneg_wait,
+ !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
+ msecs_to_jiffies(1700)))
+ goto done;
+ qib_7322_mini_pcs_reset(ppd);
+
+ set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
+
+ /*
+ * Wait up to 250 msec for link to train and get to INIT at DDR;
+ * this should terminate early.
+ */
+ wait_event_timeout(ppd->cpspec->autoneg_wait,
+ !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
+ msecs_to_jiffies(250));
+done:
+ if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
+ if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
+ ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
+ ppd->cpspec->autoneg_tries = 0;
+ }
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
+ }
+}
+
+/*
+ * This routine is used to request IPG set in the QLogic switch.
+ * Only called if r1.
+ */
+static void try_7322_ipg(struct qib_pportdata *ppd)
+{
+ struct qib_ibport *ibp = &ppd->ibport_data;
+ struct ib_mad_send_buf *send_buf;
+ struct ib_mad_agent *agent;
+ struct ib_smp *smp;
+ unsigned delay;
+ int ret;
+
+ agent = ibp->send_agent;
+ if (!agent)
+ goto retry;
+
+ send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
+ IB_MGMT_MAD_DATA, GFP_ATOMIC);
+ if (IS_ERR(send_buf))
+ goto retry;
+
+ if (!ibp->smi_ah) {
+ struct ib_ah_attr attr;
+ struct ib_ah *ah;
+
+ memset(&attr, 0, sizeof attr);
+ attr.dlid = be16_to_cpu(IB_LID_PERMISSIVE);
+ attr.port_num = ppd->port;
+ ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr);
+ if (IS_ERR(ah))
+ ret = -EINVAL;
+ else {
+ send_buf->ah = ah;
+ ibp->smi_ah = to_iah(ah);
+ ret = 0;
+ }
+ } else {
+ send_buf->ah = &ibp->smi_ah->ibah;
+ ret = 0;
+ }
+
+ smp = send_buf->mad;
+ smp->base_version = IB_MGMT_BASE_VERSION;
+ smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
+ smp->class_version = 1;
+ smp->method = IB_MGMT_METHOD_SEND;
+ smp->hop_cnt = 1;
+ smp->attr_id = QIB_VENDOR_IPG;
+ smp->attr_mod = 0;
+
+ if (!ret)
+ ret = ib_post_send_mad(send_buf, NULL);
+ if (ret)
+ ib_free_send_mad(send_buf);
+retry:
+ delay = 2 << ppd->cpspec->ipg_tries;
+ schedule_delayed_work(&ppd->cpspec->ipg_work, msecs_to_jiffies(delay));
+}
+
+/*
+ * Timeout handler for setting IPG.
+ * Only called if r1.
+ */
+static void ipg_7322_work(struct work_struct *work)
+{
+ struct qib_pportdata *ppd;
+
+ ppd = container_of(work, struct qib_chippport_specific,
+ ipg_work.work)->ppd;
+ if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
+ && ++ppd->cpspec->ipg_tries <= 10)
+ try_7322_ipg(ppd);
+}
+
+static u32 qib_7322_iblink_state(u64 ibcs)
+{
+ u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
+
+ switch (state) {
+ case IB_7322_L_STATE_INIT:
+ state = IB_PORT_INIT;
+ break;
+ case IB_7322_L_STATE_ARM:
+ state = IB_PORT_ARMED;
+ break;
+ case IB_7322_L_STATE_ACTIVE:
+ /* fall through */
+ case IB_7322_L_STATE_ACT_DEFER:
+ state = IB_PORT_ACTIVE;
+ break;
+ default: /* fall through */
+ case IB_7322_L_STATE_DOWN:
+ state = IB_PORT_DOWN;
+ break;
+ }
+ return state;
+}
+
+/* returns the IBTA port state, rather than the IBC link training state */
+static u8 qib_7322_phys_portstate(u64 ibcs)
+{
+ u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
+ return qib_7322_physportstate[state];
+}
+
+static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
+{
+ int ret = 0, symadj = 0;
+ unsigned long flags;
+ int mult;
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+
+ /* Update our picture of width and speed from chip */
+ if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
+ ppd->link_speed_active = QIB_IB_QDR;
+ mult = 4;
+ } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
+ ppd->link_speed_active = QIB_IB_DDR;
+ mult = 2;
+ } else {
+ ppd->link_speed_active = QIB_IB_SDR;
+ mult = 1;
+ }
+ if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
+ ppd->link_width_active = IB_WIDTH_4X;
+ mult *= 4;
+ } else
+ ppd->link_width_active = IB_WIDTH_1X;
+ ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
+
+ if (!ibup) {
+ u64 clr;
+
+ /* Link went down. */
+ /* do IPG MAD again after linkdown, even if last time failed */
+ ppd->cpspec->ipg_tries = 0;
+ clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
+ (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
+ SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
+ if (clr)
+ qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
+ if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
+ QIBL_IB_AUTONEG_INPROG)))
+ set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
+ if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
+ /* unlock the Tx settings, speed may change */
+ qib_write_kreg_port(ppd, krp_tx_deemph_override,
+ SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ reset_tx_deemphasis_override));
+ qib_cancel_sends(ppd);
+ /* on link down, ensure sane pcs state */
+ qib_7322_mini_pcs_reset(ppd);
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+ if (__qib_sdma_running(ppd))
+ __qib_sdma_process_event(ppd,
+ qib_sdma_event_e70_go_idle);
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+ }
+ clr = read_7322_creg32_port(ppd, crp_iblinkdown);
+ if (clr == ppd->cpspec->iblnkdownsnap)
+ ppd->cpspec->iblnkdowndelta++;
+ } else {
+ if (qib_compat_ddr_negotiate &&
+ !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
+ QIBL_IB_AUTONEG_INPROG)) &&
+ ppd->link_speed_active == QIB_IB_SDR &&
+ (ppd->link_speed_enabled & QIB_IB_DDR)
+ && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
+ /* we are SDR, and auto-negotiation enabled */
+ ++ppd->cpspec->autoneg_tries;
+ if (!ppd->cpspec->ibdeltainprog) {
+ ppd->cpspec->ibdeltainprog = 1;
+ ppd->cpspec->ibsymdelta +=
+ read_7322_creg32_port(ppd,
+ crp_ibsymbolerr) -
+ ppd->cpspec->ibsymsnap;
+ ppd->cpspec->iblnkerrdelta +=
+ read_7322_creg32_port(ppd,
+ crp_iblinkerrrecov) -
+ ppd->cpspec->iblnkerrsnap;
+ }
+ try_7322_autoneg(ppd);
+ ret = 1; /* no other IB status change processing */
+ } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
+ ppd->link_speed_active == QIB_IB_SDR) {
+ qib_autoneg_7322_send(ppd, 1);
+ set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
+ qib_7322_mini_pcs_reset(ppd);
+ udelay(2);
+ ret = 1; /* no other IB status change processing */
+ } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
+ (ppd->link_speed_active & QIB_IB_DDR)) {
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
+ QIBL_IB_AUTONEG_FAILED);
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ ppd->cpspec->autoneg_tries = 0;
+ /* re-enable SDR, for next link down */
+ set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
+ wake_up(&ppd->cpspec->autoneg_wait);
+ symadj = 1;
+ } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
+ /*
+ * Clear autoneg failure flag, and do setup
+ * so we'll try next time link goes down and
+ * back to INIT (possibly connected to a
+ * different device).
+ */
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
+ symadj = 1;
+ }
+ if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
+ symadj = 1;
+ if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
+ try_7322_ipg(ppd);
+ if (!ppd->cpspec->recovery_init)
+ setup_7322_link_recovery(ppd, 0);
+ ppd->cpspec->qdr_dfe_time = jiffies +
+ msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
+ }
+ ppd->cpspec->ibmalfusesnap = 0;
+ ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
+ crp_errlink);
+ }
+ if (symadj) {
+ ppd->cpspec->iblnkdownsnap =
+ read_7322_creg32_port(ppd, crp_iblinkdown);
+ if (ppd->cpspec->ibdeltainprog) {
+ ppd->cpspec->ibdeltainprog = 0;
+ ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
+ crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
+ ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
+ crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
+ }
+ } else if (!ibup && qib_compat_ddr_negotiate &&
+ !ppd->cpspec->ibdeltainprog &&
+ !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
+ ppd->cpspec->ibdeltainprog = 1;
+ ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
+ crp_ibsymbolerr);
+ ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
+ crp_iblinkerrrecov);
+ }
+
+ if (!ret)
+ qib_setup_7322_setextled(ppd, ibup);
+ return ret;
+}
+
+/*
+ * Does read/modify/write to appropriate registers to
+ * set output and direction bits selected by mask.
+ * these are in their canonical postions (e.g. lsb of
+ * dir will end up in D48 of extctrl on existing chips).
+ * returns contents of GP Inputs.
+ */
+static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
+{
+ u64 read_val, new_out;
+ unsigned long flags;
+
+ if (mask) {
+ /* some bits being written, lock access to GPIO */
+ dir &= mask;
+ out &= mask;
+ spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+ dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
+ dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
+ new_out = (dd->cspec->gpio_out & ~mask) | out;
+
+ qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
+ qib_write_kreg(dd, kr_gpio_out, new_out);
+ dd->cspec->gpio_out = new_out;
+ spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+ }
+ /*
+ * It is unlikely that a read at this time would get valid
+ * data on a pin whose direction line was set in the same
+ * call to this function. We include the read here because
+ * that allows us to potentially combine a change on one pin with
+ * a read on another, and because the old code did something like
+ * this.
+ */
+ read_val = qib_read_kreg64(dd, kr_extstatus);
+ return SYM_FIELD(read_val, EXTStatus, GPIOIn);
+}
+
+/* Enable writes to config EEPROM, if possible. Returns previous state */
+static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
+{
+ int prev_wen;
+ u32 mask;
+
+ mask = 1 << QIB_EEPROM_WEN_NUM;
+ prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
+ gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
+
+ return prev_wen & 1;
+}
+
+/*
+ * Read fundamental info we need to use the chip. These are
+ * the registers that describe chip capabilities, and are
+ * saved in shadow registers.
+ */
+static void get_7322_chip_params(struct qib_devdata *dd)
+{
+ u64 val;
+ u32 piobufs;
+ int mtu;
+
+ dd->palign = qib_read_kreg32(dd, kr_pagealign);
+
+ dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
+
+ dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
+ dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
+ dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
+ dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
+ dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
+
+ val = qib_read_kreg64(dd, kr_sendpiobufcnt);
+ dd->piobcnt2k = val & ~0U;
+ dd->piobcnt4k = val >> 32;
+ val = qib_read_kreg64(dd, kr_sendpiosize);
+ dd->piosize2k = val & ~0U;
+ dd->piosize4k = val >> 32;
+
+ mtu = ib_mtu_enum_to_int(qib_ibmtu);
+ if (mtu == -1)
+ mtu = QIB_DEFAULT_MTU;
+ dd->pport[0].ibmtu = (u32)mtu;
+ dd->pport[1].ibmtu = (u32)mtu;
+
+ /* these may be adjusted in init_chip_wc_pat() */
+ dd->pio2kbase = (u32 __iomem *)
+ ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
+ dd->pio4kbase = (u32 __iomem *)
+ ((char __iomem *) dd->kregbase +
+ (dd->piobufbase >> 32));
+ /*
+ * 4K buffers take 2 pages; we use roundup just to be
+ * paranoid; we calculate it once here, rather than on
+ * ever buf allocate
+ */
+ dd->align4k = ALIGN(dd->piosize4k, dd->palign);
+
+ piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
+
+ dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
+ (sizeof(u64) * BITS_PER_BYTE / 2);
+}
+
+/*
+ * The chip base addresses in cspec and cpspec have to be set
+ * after possible init_chip_wc_pat(), rather than in
+ * get_7322_chip_params(), so split out as separate function
+ */
+static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
+{
+ u32 cregbase;
+ cregbase = qib_read_kreg32(dd, kr_counterregbase);
+
+ dd->cspec->cregbase = (u64 __iomem *)(cregbase +
+ (char __iomem *)dd->kregbase);
+
+ dd->egrtidbase = (u64 __iomem *)
+ ((char __iomem *) dd->kregbase + dd->rcvegrbase);
+
+ /* port registers are defined as relative to base of chip */
+ dd->pport[0].cpspec->kpregbase =
+ (u64 __iomem *)((char __iomem *)dd->kregbase);
+ dd->pport[1].cpspec->kpregbase =
+ (u64 __iomem *)(dd->palign +
+ (char __iomem *)dd->kregbase);
+ dd->pport[0].cpspec->cpregbase =
+ (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
+ kr_counterregbase) + (char __iomem *)dd->kregbase);
+ dd->pport[1].cpspec->cpregbase =
+ (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
+ kr_counterregbase) + (char __iomem *)dd->kregbase);
+}
+
+/*
+ * This is a fairly special-purpose observer, so we only support
+ * the port-specific parts of SendCtrl
+ */
+
+#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) | \
+ SYM_MASK(SendCtrl_0, SDmaEnable) | \
+ SYM_MASK(SendCtrl_0, SDmaIntEnable) | \
+ SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
+ SYM_MASK(SendCtrl_0, SDmaHalt) | \
+ SYM_MASK(SendCtrl_0, IBVLArbiterEn) | \
+ SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
+
+static int sendctrl_hook(struct qib_devdata *dd,
+ const struct diag_observer *op, u32 offs,
+ u64 *data, u64 mask, int only_32)
+{
+ unsigned long flags;
+ unsigned idx;
+ unsigned pidx;
+ struct qib_pportdata *ppd = NULL;
+ u64 local_data, all_bits;
+
+ /*
+ * The fixed correspondence between Physical ports and pports is
+ * severed. We need to hunt for the ppd that corresponds
+ * to the offset we got. And we have to do that without admitting
+ * we know the stride, apparently.
+ */
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ u64 __iomem *psptr;
+ u32 psoffs;
+
+ ppd = dd->pport + pidx;
+ if (!ppd->cpspec->kpregbase)
+ continue;
+
+ psptr = ppd->cpspec->kpregbase + krp_sendctrl;
+ psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
+ if (psoffs == offs)
+ break;
+ }
+
+ /* If pport is not being managed by driver, just avoid shadows. */
+ if (pidx >= dd->num_pports)
+ ppd = NULL;
+
+ /* In any case, "idx" is flat index in kreg space */
+ idx = offs / sizeof(u64);
+
+ all_bits = ~0ULL;
+ if (only_32)
+ all_bits >>= 32;
+
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ if (!ppd || (mask & all_bits) != all_bits) {
+ /*
+ * At least some mask bits are zero, so we need
+ * to read. The judgement call is whether from
+ * reg or shadow. First-cut: read reg, and complain
+ * if any bits which should be shadowed are different
+ * from their shadowed value.
+ */
+ if (only_32)
+ local_data = (u64)qib_read_kreg32(dd, idx);
+ else
+ local_data = qib_read_kreg64(dd, idx);
+ *data = (local_data & ~mask) | (*data & mask);
+ }
+ if (mask) {
+ /*
+ * At least some mask bits are one, so we need
+ * to write, but only shadow some bits.
+ */
+ u64 sval, tval; /* Shadowed, transient */
+
+ /*
+ * New shadow val is bits we don't want to touch,
+ * ORed with bits we do, that are intended for shadow.
+ */
+ if (ppd) {
+ sval = ppd->p_sendctrl & ~mask;
+ sval |= *data & SENDCTRL_SHADOWED & mask;
+ ppd->p_sendctrl = sval;
+ } else
+ sval = *data & SENDCTRL_SHADOWED & mask;
+ tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
+ qib_write_kreg(dd, idx, tval);
+ qib_write_kreg(dd, kr_scratch, 0Ull);
+ }
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+ return only_32 ? 4 : 8;
+}
+
+static const struct diag_observer sendctrl_0_observer = {
+ sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
+ KREG_IDX(SendCtrl_0) * sizeof(u64)
+};
+
+static const struct diag_observer sendctrl_1_observer = {
+ sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
+ KREG_IDX(SendCtrl_1) * sizeof(u64)
+};
+
+static ushort sdma_fetch_prio = 8;
+module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
+MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
+
+/* Besides logging QSFP events, we set appropriate TxDDS values */
+static void init_txdds_table(struct qib_pportdata *ppd, int override);
+
+static void qsfp_7322_event(struct work_struct *work)
+{
+ struct qib_qsfp_data *qd;
+ struct qib_pportdata *ppd;
+ u64 pwrup;
+ int ret;
+ u32 le2;
+
+ qd = container_of(work, struct qib_qsfp_data, work);
+ ppd = qd->ppd;
+ pwrup = qd->t_insert + msecs_to_jiffies(QSFP_PWR_LAG_MSEC);
+
+ /*
+ * Some QSFP's not only do not respond until the full power-up
+ * time, but may behave badly if we try. So hold off responding
+ * to insertion.
+ */
+ while (1) {
+ u64 now = get_jiffies_64();
+ if (time_after64(now, pwrup))
+ break;
+ msleep(1);
+ }
+ ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
+ /*
+ * Need to change LE2 back to defaults if we couldn't
+ * read the cable type (to handle cable swaps), so do this
+ * even on failure to read cable information. We don't
+ * get here for QME, so IS_QME check not needed here.
+ */
+ le2 = (!ret && qd->cache.atten[1] >= qib_long_atten &&
+ !ppd->dd->cspec->r1 && QSFP_IS_CU(qd->cache.tech)) ?
+ LE2_5m : LE2_DEFAULT;
+ ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
+ init_txdds_table(ppd, 0);
+}
+
+/*
+ * There is little we can do but complain to the user if QSFP
+ * initialization fails.
+ */
+static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
+{
+ unsigned long flags;
+ struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
+ struct qib_devdata *dd = ppd->dd;
+ u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
+
+ mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
+ qd->ppd = ppd;
+ qib_qsfp_init(qd, qsfp_7322_event);
+ spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+ dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
+ dd->cspec->gpio_mask |= mod_prs_bit;
+ qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
+ qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
+ spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+}
+
+/*
+ * called at device initialization time, and also if the txselect
+ * module parameter is changed. This is used for cables that don't
+ * have valid QSFP EEPROMs (not present, or attenuation is zero).
+ * We initialize to the default, then if there is a specific
+ * unit,port match, we use that (and set it immediately, for the
+ * current speed, if the link is at INIT or better).
+ * String format is "default# unit#,port#=# ... u,p=#", separators must
+ * be a SPACE character. A newline terminates. The u,p=# tuples may
+ * optionally have "u,p=#,#", where the final # is the H1 value
+ * The last specific match is used (actually, all are used, but last
+ * one is the one that winds up set); if none at all, fall back on default.
+ */
+static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
+{
+ char *nxt, *str;
+ u32 pidx, unit, port, deflt, h1;
+ unsigned long val;
+ int any = 0, seth1;
+
+ str = txselect_list;
+
+ /* default number is validated in setup_txselect() */
+ deflt = simple_strtoul(str, &nxt, 0);
+ for (pidx = 0; pidx < dd->num_pports; ++pidx)
+ dd->pport[pidx].cpspec->no_eep = deflt;
+
+ while (*nxt && nxt[1]) {
+ str = ++nxt;
+ unit = simple_strtoul(str, &nxt, 0);
+ if (nxt == str || !*nxt || *nxt != ',') {
+ while (*nxt && *nxt++ != ' ') /* skip to next, if any */
+ ;
+ continue;
+ }
+ str = ++nxt;
+ port = simple_strtoul(str, &nxt, 0);
+ if (nxt == str || *nxt != '=') {
+ while (*nxt && *nxt++ != ' ') /* skip to next, if any */
+ ;
+ continue;
+ }
+ str = ++nxt;
+ val = simple_strtoul(str, &nxt, 0);
+ if (nxt == str) {
+ while (*nxt && *nxt++ != ' ') /* skip to next, if any */
+ ;
+ continue;
+ }
+ if (val >= TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)
+ continue;
+ seth1 = 0;
+ h1 = 0; /* gcc thinks it might be used uninitted */
+ if (*nxt == ',' && nxt[1]) {
+ str = ++nxt;
+ h1 = (u32)simple_strtoul(str, &nxt, 0);
+ if (nxt == str)
+ while (*nxt && *nxt++ != ' ') /* skip */
+ ;
+ else
+ seth1 = 1;
+ }
+ for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
+ ++pidx) {
+ struct qib_pportdata *ppd = &dd->pport[pidx];
+
+ if (ppd->port != port || !ppd->link_speed_supported)
+ continue;
+ ppd->cpspec->no_eep = val;
+ /* now change the IBC and serdes, overriding generic */
+ init_txdds_table(ppd, 1);
+ any++;
+ }
+ if (*nxt == '\n')
+ break; /* done */
+ }
+ if (change && !any) {
+ /* no specific setting, use the default.
+ * Change the IBC and serdes, but since it's
+ * general, don't override specific settings.
+ */
+ for (pidx = 0; pidx < dd->num_pports; ++pidx)
+ if (dd->pport[pidx].link_speed_supported)
+ init_txdds_table(&dd->pport[pidx], 0);
+ }
+}
+
+/* handle the txselect parameter changing */
+static int setup_txselect(const char *str, struct kernel_param *kp)
+{
+ struct qib_devdata *dd;
+ unsigned long val;
+ char *n;
+ if (strlen(str) >= MAX_ATTEN_LEN) {
+ printk(KERN_INFO QIB_DRV_NAME " txselect_values string "
+ "too long\n");
+ return -ENOSPC;
+ }
+ val = simple_strtoul(str, &n, 0);
+ if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
+ printk(KERN_INFO QIB_DRV_NAME
+ "txselect_values must start with a number < %d\n",
+ TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
+ return -EINVAL;
+ }
+ strcpy(txselect_list, str);
+
+ list_for_each_entry(dd, &qib_dev_list, list)
+ if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
+ set_no_qsfp_atten(dd, 1);
+ return 0;
+}
+
+/*
+ * Write the final few registers that depend on some of the
+ * init setup. Done late in init, just before bringing up
+ * the serdes.
+ */
+static int qib_late_7322_initreg(struct qib_devdata *dd)
+{
+ int ret = 0, n;
+ u64 val;
+
+ qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
+ qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
+ qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
+ qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
+ val = qib_read_kreg64(dd, kr_sendpioavailaddr);
+ if (val != dd->pioavailregs_phys) {
+ qib_dev_err(dd, "Catastrophic software error, "
+ "SendPIOAvailAddr written as %lx, "
+ "read back as %llx\n",
+ (unsigned long) dd->pioavailregs_phys,
+ (unsigned long long) val);
+ ret = -EINVAL;
+ }
+
+ n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
+ qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
+ /* driver sends get pkey, lid, etc. checking also, to catch bugs */
+ qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
+
+ qib_register_observer(dd, &sendctrl_0_observer);
+ qib_register_observer(dd, &sendctrl_1_observer);
+
+ dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
+ qib_write_kreg(dd, kr_control, dd->control);
+ /*
+ * Set SendDmaFetchPriority and init Tx params, including
+ * QSFP handler on boards that have QSFP.
+ * First set our default attenuation entry for cables that
+ * don't have valid attenuation.
+ */
+ set_no_qsfp_atten(dd, 0);
+ for (n = 0; n < dd->num_pports; ++n) {
+ struct qib_pportdata *ppd = dd->pport + n;
+
+ qib_write_kreg_port(ppd, krp_senddmaprioritythld,
+ sdma_fetch_prio & 0xf);
+ /* Initialize qsfp if present on board. */
+ if (dd->flags & QIB_HAS_QSFP)
+ qib_init_7322_qsfp(ppd);
+ }
+ dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
+ qib_write_kreg(dd, kr_control, dd->control);
+
+ return ret;
+}
+
+/* per IB port errors. */
+#define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
+ MASK_ACROSS(8, 15))
+#define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
+#define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
+ MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
+ MASK_ACROSS(0, 11))
+
+/*
+ * Write the initialization per-port registers that need to be done at
+ * driver load and after reset completes (i.e., that aren't done as part
+ * of other init procedures called from qib_init.c).
+ * Some of these should be redundant on reset, but play safe.
+ */
+static void write_7322_init_portregs(struct qib_pportdata *ppd)
+{
+ u64 val;
+ int i;
+
+ if (!ppd->link_speed_supported) {
+ /* no buffer credits for this port */
+ for (i = 1; i < 8; i++)
+ qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
+ qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
+ qib_write_kreg(ppd->dd, kr_scratch, 0);
+ return;
+ }
+
+ /*
+ * Set the number of supported virtual lanes in IBC,
+ * for flow control packet handling on unsupported VLs
+ */
+ val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
+ val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
+ val |= (u64)(ppd->vls_supported - 1) <<
+ SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
+ qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
+
+ qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
+
+ /* enable tx header checking */
+ qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
+ IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
+ IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
+
+ qib_write_kreg_port(ppd, krp_ncmodectrl,
+ SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
+
+ /*
+ * Unconditionally clear the bufmask bits. If SDMA is
+ * enabled, we'll set them appropriately later.
+ */
+ qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
+ qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
+ qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
+ if (ppd->dd->cspec->r1)
+ ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
+}
+
+/*
+ * Write the initialization per-device registers that need to be done at
+ * driver load and after reset completes (i.e., that aren't done as part
+ * of other init procedures called from qib_init.c). Also write per-port
+ * registers that are affected by overall device config, such as QP mapping
+ * Some of these should be redundant on reset, but play safe.
+ */
+static void write_7322_initregs(struct qib_devdata *dd)
+{
+ struct qib_pportdata *ppd;
+ int i, pidx;
+ u64 val;
+
+ /* Set Multicast QPs received by port 2 to map to context one. */
+ qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ unsigned n, regno;
+ unsigned long flags;
+
+ if (!dd->qpn_mask || !dd->pport[pidx].link_speed_supported)
+ continue;
+
+ ppd = &dd->pport[pidx];
+
+ /* be paranoid against later code motion, etc. */
+ spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
+ ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
+ spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+
+ /* Initialize QP to context mapping */
+ regno = krp_rcvqpmaptable;
+ val = 0;
+ if (dd->num_pports > 1)
+ n = dd->first_user_ctxt / dd->num_pports;
+ else
+ n = dd->first_user_ctxt - 1;
+ for (i = 0; i < 32; ) {
+ unsigned ctxt;
+
+ if (dd->num_pports > 1)
+ ctxt = (i % n) * dd->num_pports + pidx;
+ else if (i % n)
+ ctxt = (i % n) + 1;
+ else
+ ctxt = ppd->hw_pidx;
+ val |= ctxt << (5 * (i % 6));
+ i++;
+ if (i % 6 == 0) {
+ qib_write_kreg_port(ppd, regno, val);
+ val = 0;
+ regno++;
+ }
+ }
+ qib_write_kreg_port(ppd, regno, val);
+ }
+
+ /*
+ * Setup up interrupt mitigation for kernel contexts, but
+ * not user contexts (user contexts use interrupts when
+ * stalled waiting for any packet, so want those interrupts
+ * right away).
+ */
+ for (i = 0; i < dd->first_user_ctxt; i++) {
+ dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
+ qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
+ }
+
+ /*
+ * Initialize as (disabled) rcvflow tables. Application code
+ * will setup each flow as it uses the flow.
+ * Doesn't clear any of the error bits that might be set.
+ */
+ val = TIDFLOW_ERRBITS; /* these are W1C */
+ for (i = 0; i < dd->ctxtcnt; i++) {
+ int flow;
+ for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
+ qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
+ }
+
+ /*
+ * dual cards init to dual port recovery, single port cards to
+ * the one port. Dual port cards may later adjust to 1 port,
+ * and then back to dual port if both ports are connected
+ * */
+ if (dd->num_pports)
+ setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
+}
+
+static int qib_init_7322_variables(struct qib_devdata *dd)
+{
+ struct qib_pportdata *ppd;
+ unsigned features, pidx, sbufcnt;
+ int ret, mtu;
+ u32 sbufs, updthresh;
+
+ /* pport structs are contiguous, allocated after devdata */
+ ppd = (struct qib_pportdata *)(dd + 1);
+ dd->pport = ppd;
+ ppd[0].dd = dd;
+ ppd[1].dd = dd;
+
+ dd->cspec = (struct qib_chip_specific *)(ppd + 2);
+
+ ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
+ ppd[1].cpspec = &ppd[0].cpspec[1];
+ ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
+ ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
+
+ spin_lock_init(&dd->cspec->rcvmod_lock);
+ spin_lock_init(&dd->cspec->gpio_lock);
+
+ /* we haven't yet set QIB_PRESENT, so use read directly */
+ dd->revision = readq(&dd->kregbase[kr_revision]);
+
+ if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
+ qib_dev_err(dd, "Revision register read failure, "
+ "giving up initialization\n");
+ ret = -ENODEV;
+ goto bail;
+ }
+ dd->flags |= QIB_PRESENT; /* now register routines work */
+
+ dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
+ dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
+ dd->cspec->r1 = dd->minrev == 1;
+
+ get_7322_chip_params(dd);
+ features = qib_7322_boardname(dd);
+
+ /* now that piobcnt2k and 4k set, we can allocate these */
+ sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
+ NUM_VL15_BUFS + BITS_PER_LONG - 1;
+ sbufcnt /= BITS_PER_LONG;
+ dd->cspec->sendchkenable = kmalloc(sbufcnt *
+ sizeof(*dd->cspec->sendchkenable), GFP_KERNEL);
+ dd->cspec->sendgrhchk = kmalloc(sbufcnt *
+ sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL);
+ dd->cspec->sendibchk = kmalloc(sbufcnt *
+ sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
+ if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
+ !dd->cspec->sendibchk) {
+ qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n");
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ ppd = dd->pport;
+
+ /*
+ * GPIO bits for TWSI data and clock,
+ * used for serial EEPROM.
+ */
+ dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
+ dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
+ dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
+
+ dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
+ QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
+ QIB_HAS_THRESH_UPDATE |
+ (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
+ dd->flags |= qib_special_trigger ?
+ QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
+
+ /*
+ * Setup initial values. These may change when PAT is enabled, but
+ * we need these to do initial chip register accesses.
+ */
+ qib_7322_set_baseaddrs(dd);
+
+ mtu = ib_mtu_enum_to_int(qib_ibmtu);
+ if (mtu == -1)
+ mtu = QIB_DEFAULT_MTU;
+
+ dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
+ /* all hwerrors become interrupts, unless special purposed */
+ dd->cspec->hwerrmask = ~0ULL;
+ /* link_recovery setup causes these errors, so ignore them,
+ * other than clearing them when they occur */
+ dd->cspec->hwerrmask &=
+ ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
+ SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
+ HWE_MASK(LATriggered));
+
+ for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
+ struct qib_chippport_specific *cp = ppd->cpspec;
+ ppd->link_speed_supported = features & PORT_SPD_CAP;
+ features >>= PORT_SPD_CAP_SHIFT;
+ if (!ppd->link_speed_supported) {
+ /* single port mode (7340, or configured) */
+ dd->skip_kctxt_mask |= 1 << pidx;
+ if (pidx == 0) {
+ /* Make sure port is disabled. */
+ qib_write_kreg_port(ppd, krp_rcvctrl, 0);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
+ ppd[0] = ppd[1];
+ dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
+ IBSerdesPClkNotDetectMask_0)
+ | SYM_MASK(HwErrMask,
+ SDmaMemReadErrMask_0));
+ dd->cspec->int_enable_mask &= ~(
+ SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
+ SYM_MASK(IntMask, SDmaIdleIntMask_0) |
+ SYM_MASK(IntMask, SDmaProgressIntMask_0) |
+ SYM_MASK(IntMask, SDmaIntMask_0) |
+ SYM_MASK(IntMask, ErrIntMask_0) |
+ SYM_MASK(IntMask, SendDoneIntMask_0));
+ } else {
+ /* Make sure port is disabled. */
+ qib_write_kreg_port(ppd, krp_rcvctrl, 0);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
+ dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
+ IBSerdesPClkNotDetectMask_1)
+ | SYM_MASK(HwErrMask,
+ SDmaMemReadErrMask_1));
+ dd->cspec->int_enable_mask &= ~(
+ SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
+ SYM_MASK(IntMask, SDmaIdleIntMask_1) |
+ SYM_MASK(IntMask, SDmaProgressIntMask_1) |
+ SYM_MASK(IntMask, SDmaIntMask_1) |
+ SYM_MASK(IntMask, ErrIntMask_1) |
+ SYM_MASK(IntMask, SendDoneIntMask_1));
+ }
+ continue;
+ }
+
+ dd->num_pports++;
+ qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
+
+ ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
+ ppd->link_width_enabled = IB_WIDTH_4X;
+ ppd->link_speed_enabled = ppd->link_speed_supported;
+ /*
+ * Set the initial values to reasonable default, will be set
+ * for real when link is up.
+ */
+ ppd->link_width_active = IB_WIDTH_4X;
+ ppd->link_speed_active = QIB_IB_SDR;
+ ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
+ switch (qib_num_cfg_vls) {
+ case 1:
+ ppd->vls_supported = IB_VL_VL0;
+ break;
+ case 2:
+ ppd->vls_supported = IB_VL_VL0_1;
+ break;
+ default:
+ qib_devinfo(dd->pcidev,
+ "Invalid num_vls %u, using 4 VLs\n",
+ qib_num_cfg_vls);
+ qib_num_cfg_vls = 4;
+ /* fall through */
+ case 4:
+ ppd->vls_supported = IB_VL_VL0_3;
+ break;
+ case 8:
+ if (mtu <= 2048)
+ ppd->vls_supported = IB_VL_VL0_7;
+ else {
+ qib_devinfo(dd->pcidev,
+ "Invalid num_vls %u for MTU %d "
+ ", using 4 VLs\n",
+ qib_num_cfg_vls, mtu);
+ ppd->vls_supported = IB_VL_VL0_3;
+ qib_num_cfg_vls = 4;
+ }
+ break;
+ }
+ ppd->vls_operational = ppd->vls_supported;
+
+ init_waitqueue_head(&cp->autoneg_wait);
+ INIT_DELAYED_WORK(&cp->autoneg_work,
+ autoneg_7322_work);
+ if (ppd->dd->cspec->r1)
+ INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
+
+ /*
+ * For Mez and similar cards, no qsfp info, so do
+ * the "cable info" setup here. Can be overridden
+ * in adapter-specific routines.
+ */
+ if (!(ppd->dd->flags & QIB_HAS_QSFP)) {
+ if (!IS_QMH(ppd->dd) && !IS_QME(ppd->dd))
+ qib_devinfo(ppd->dd->pcidev, "IB%u:%u: "
+ "Unknown mezzanine card type\n",
+ dd->unit, ppd->port);
+ cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
+ /*
+ * Choose center value as default tx serdes setting
+ * until changed through module parameter.
+ */
+ ppd->cpspec->no_eep = IS_QMH(dd) ?
+ TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
+ } else
+ cp->h1_val = H1_FORCE_VAL;
+
+ /* Avoid writes to chip for mini_init */
+ if (!qib_mini_init)
+ write_7322_init_portregs(ppd);
+
+ init_timer(&cp->chase_timer);
+ cp->chase_timer.function = reenable_chase;
+ cp->chase_timer.data = (unsigned long)ppd;
+
+ ppd++;
+ }
+
+ dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
+ dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
+ dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
+
+ /* we always allocate at least 2048 bytes for eager buffers */
+ dd->rcvegrbufsize = max(mtu, 2048);
+
+ qib_7322_tidtemplate(dd);
+
+ /*
+ * We can request a receive interrupt for 1 or
+ * more packets from current offset.
+ */
+ dd->rhdrhead_intr_off =
+ (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
+
+ /* setup the stats timer; the add_timer is done at end of init */
+ init_timer(&dd->stats_timer);
+ dd->stats_timer.function = qib_get_7322_faststats;
+ dd->stats_timer.data = (unsigned long) dd;
+
+ dd->ureg_align = 0x10000; /* 64KB alignment */
+
+ dd->piosize2kmax_dwords = dd->piosize2k >> 2;
+
+ qib_7322_config_ctxts(dd);
+ qib_set_ctxtcnt(dd);
+
+ if (qib_wc_pat) {
+ ret = init_chip_wc_pat(dd, NUM_VL15_BUFS * dd->align4k);
+ if (ret)
+ goto bail;
+ }
+ qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
+
+ ret = 0;
+ if (qib_mini_init)
+ goto bail;
+ if (!dd->num_pports) {
+ qib_dev_err(dd, "No ports enabled, giving up initialization\n");
+ goto bail; /* no error, so can still figure out why err */
+ }
+
+ write_7322_initregs(dd);
+ ret = qib_create_ctxts(dd);
+ init_7322_cntrnames(dd);
+
+ updthresh = 8U; /* update threshold */
+
+ /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
+ * reserve the update threshold amount for other kernel use, such
+ * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
+ * unless we aren't enabling SDMA, in which case we want to use
+ * all the 4k bufs for the kernel.
+ * if this was less than the update threshold, we could wait
+ * a long time for an update. Coded this way because we
+ * sometimes change the update threshold for various reasons,
+ * and we want this to remain robust.
+ */
+ if (dd->flags & QIB_HAS_SEND_DMA) {
+ dd->cspec->sdmabufcnt = dd->piobcnt4k;
+ sbufs = updthresh > 3 ? updthresh : 3;
+ } else {
+ dd->cspec->sdmabufcnt = 0;
+ sbufs = dd->piobcnt4k;
+ }
+ dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
+ dd->cspec->sdmabufcnt;
+ dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
+ dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
+ dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
+ dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
+
+ /*
+ * If we have 16 user contexts, we will have 7 sbufs
+ * per context, so reduce the update threshold to match. We
+ * want to update before we actually run out, at low pbufs/ctxt
+ * so give ourselves some margin.
+ */
+ if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
+ updthresh = dd->pbufsctxt - 2;
+ dd->cspec->updthresh_dflt = updthresh;
+ dd->cspec->updthresh = updthresh;
+
+ /* before full enable, no interrupts, no locking needed */
+ dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
+ << SYM_LSB(SendCtrl, AvailUpdThld)) |
+ SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
+
+ dd->psxmitwait_supported = 1;
+ dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
+bail:
+ if (!dd->ctxtcnt)
+ dd->ctxtcnt = 1; /* for other initialization code */
+
+ return ret;
+}
+
+static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
+ u32 *pbufnum)
+{
+ u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
+ struct qib_devdata *dd = ppd->dd;
+
+ /* last is same for 2k and 4k, because we use 4k if all 2k busy */
+ if (pbc & PBC_7322_VL15_SEND) {
+ first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
+ last = first;
+ } else {
+ if ((plen + 1) > dd->piosize2kmax_dwords)
+ first = dd->piobcnt2k;
+ else
+ first = 0;
+ last = dd->cspec->lastbuf_for_pio;
+ }
+ return qib_getsendbuf_range(dd, pbufnum, first, last);
+}
+
+static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
+ u32 start)
+{
+ qib_write_kreg_port(ppd, krp_psinterval, intv);
+ qib_write_kreg_port(ppd, krp_psstart, start);
+}
+
+/*
+ * Must be called with sdma_lock held, or before init finished.
+ */
+static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
+{
+ qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
+}
+
+static struct sdma_set_state_action sdma_7322_action_table[] = {
+ [qib_sdma_state_s00_hw_down] = {
+ .go_s99_running_tofalse = 1,
+ .op_enable = 0,
+ .op_intenable = 0,
+ .op_halt = 0,
+ .op_drain = 0,
+ },
+ [qib_sdma_state_s10_hw_start_up_wait] = {
+ .op_enable = 0,
+ .op_intenable = 1,
+ .op_halt = 1,
+ .op_drain = 0,
+ },
+ [qib_sdma_state_s20_idle] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 1,
+ .op_drain = 0,
+ },
+ [qib_sdma_state_s30_sw_clean_up_wait] = {
+ .op_enable = 0,
+ .op_intenable = 1,
+ .op_halt = 1,
+ .op_drain = 0,
+ },
+ [qib_sdma_state_s40_hw_clean_up_wait] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 1,
+ .op_drain = 0,
+ },
+ [qib_sdma_state_s50_hw_halt_wait] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 1,
+ .op_drain = 1,
+ },
+ [qib_sdma_state_s99_running] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 0,
+ .op_drain = 0,
+ .go_s99_running_totrue = 1,
+ },
+};
+
+static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
+{
+ ppd->sdma_state.set_state_action = sdma_7322_action_table;
+}
+
+static int init_sdma_7322_regs(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ unsigned lastbuf, erstbuf;
+ u64 senddmabufmask[3] = { 0 };
+ int n, ret = 0;
+
+ qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
+ qib_sdma_7322_setlengen(ppd);
+ qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
+ qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
+ qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
+ qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
+
+ if (dd->num_pports)
+ n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
+ else
+ n = dd->cspec->sdmabufcnt; /* failsafe for init */
+ erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
+ ((dd->num_pports == 1 || ppd->port == 2) ? n :
+ dd->cspec->sdmabufcnt);
+ lastbuf = erstbuf + n;
+
+ ppd->sdma_state.first_sendbuf = erstbuf;
+ ppd->sdma_state.last_sendbuf = lastbuf;
+ for (; erstbuf < lastbuf; ++erstbuf) {
+ unsigned word = erstbuf / BITS_PER_LONG;
+ unsigned bit = erstbuf & (BITS_PER_LONG - 1);
+
+ BUG_ON(word >= 3);
+ senddmabufmask[word] |= 1ULL << bit;
+ }
+ qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
+ qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
+ qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
+ return ret;
+}
+
+/* sdma_lock must be held */
+static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int sane;
+ int use_dmahead;
+ u16 swhead;
+ u16 swtail;
+ u16 cnt;
+ u16 hwhead;
+
+ use_dmahead = __qib_sdma_running(ppd) &&
+ (dd->flags & QIB_HAS_SDMA_TIMEOUT);
+retry:
+ hwhead = use_dmahead ?
+ (u16) le64_to_cpu(*ppd->sdma_head_dma) :
+ (u16) qib_read_kreg_port(ppd, krp_senddmahead);
+
+ swhead = ppd->sdma_descq_head;
+ swtail = ppd->sdma_descq_tail;
+ cnt = ppd->sdma_descq_cnt;
+
+ if (swhead < swtail)
+ /* not wrapped */
+ sane = (hwhead >= swhead) & (hwhead <= swtail);
+ else if (swhead > swtail)
+ /* wrapped around */
+ sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
+ (hwhead <= swtail);
+ else
+ /* empty */
+ sane = (hwhead == swhead);
+
+ if (unlikely(!sane)) {
+ if (use_dmahead) {
+ /* try one more time, directly from the register */
+ use_dmahead = 0;
+ goto retry;
+ }
+ /* proceed as if no progress */
+ hwhead = swhead;
+ }
+
+ return hwhead;
+}
+
+static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
+{
+ u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
+
+ return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
+ (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
+ !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
+ !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
+}
+
+/*
+ * Compute the amount of delay before sending the next packet if the
+ * port's send rate differs from the static rate set for the QP.
+ * The delay affects the next packet and the amount of the delay is
+ * based on the length of the this packet.
+ */
+static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
+ u8 srate, u8 vl)
+{
+ u8 snd_mult = ppd->delay_mult;
+ u8 rcv_mult = ib_rate_to_delay[srate];
+ u32 ret;
+
+ ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
+
+ /* Indicate VL15, else set the VL in the control word */
+ if (vl == 15)
+ ret |= PBC_7322_VL15_SEND_CTRL;
+ else
+ ret |= vl << PBC_VL_NUM_LSB;
+ ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
+
+ return ret;
+}
+
+/*
+ * Enable the per-port VL15 send buffers for use.
+ * They follow the rest of the buffers, without a config parameter.
+ * This was in initregs, but that is done before the shadow
+ * is set up, and this has to be done after the shadow is
+ * set up.
+ */
+static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
+{
+ unsigned vl15bufs;
+
+ vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
+ qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
+ TXCHK_CHG_TYPE_KERN, NULL);
+}
+
+static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
+{
+ if (rcd->ctxt < NUM_IB_PORTS) {
+ if (rcd->dd->num_pports > 1) {
+ rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
+ rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
+ } else {
+ rcd->rcvegrcnt = KCTXT0_EGRCNT;
+ rcd->rcvegr_tid_base = 0;
+ }
+ } else {
+ rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
+ rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
+ (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
+ }
+}
+
+#define QTXSLEEPS 5000
+static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
+ u32 len, u32 which, struct qib_ctxtdata *rcd)
+{
+ int i;
+ const int last = start + len - 1;
+ const int lastr = last / BITS_PER_LONG;
+ u32 sleeps = 0;
+ int wait = rcd != NULL;
+ unsigned long flags;
+
+ while (wait) {
+ unsigned long shadow;
+ int cstart, previ = -1;
+
+ /*
+ * when flipping from kernel to user, we can't change
+ * the checking type if the buffer is allocated to the
+ * driver. It's OK the other direction, because it's
+ * from close, and we have just disarm'ed all the
+ * buffers. All the kernel to kernel changes are also
+ * OK.
+ */
+ for (cstart = start; cstart <= last; cstart++) {
+ i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
+ / BITS_PER_LONG;
+ if (i != previ) {
+ shadow = (unsigned long)
+ le64_to_cpu(dd->pioavailregs_dma[i]);
+ previ = i;
+ }
+ if (test_bit(((2 * cstart) +
+ QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
+ % BITS_PER_LONG, &shadow))
+ break;
+ }
+
+ if (cstart > last)
+ break;
+
+ if (sleeps == QTXSLEEPS)
+ break;
+ /* make sure we see an updated copy next time around */
+ sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+ sleeps++;
+ msleep(1);
+ }
+
+ switch (which) {
+ case TXCHK_CHG_TYPE_DIS1:
+ /*
+ * disable checking on a range; used by diags; just
+ * one buffer, but still written generically
+ */
+ for (i = start; i <= last; i++)
+ clear_bit(i, dd->cspec->sendchkenable);
+ break;
+
+ case TXCHK_CHG_TYPE_ENAB1:
+ /*
+ * (re)enable checking on a range; used by diags; just
+ * one buffer, but still written generically; read
+ * scratch to be sure buffer actually triggered, not
+ * just flushed from processor.
+ */
+ qib_read_kreg32(dd, kr_scratch);
+ for (i = start; i <= last; i++)
+ set_bit(i, dd->cspec->sendchkenable);
+ break;
+
+ case TXCHK_CHG_TYPE_KERN:
+ /* usable by kernel */
+ for (i = start; i <= last; i++) {
+ set_bit(i, dd->cspec->sendibchk);
+ clear_bit(i, dd->cspec->sendgrhchk);
+ }
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ /* see if we need to raise avail update threshold */
+ for (i = dd->first_user_ctxt;
+ dd->cspec->updthresh != dd->cspec->updthresh_dflt
+ && i < dd->cfgctxts; i++)
+ if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
+ ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
+ < dd->cspec->updthresh_dflt)
+ break;
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+ if (i == dd->cfgctxts) {
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ dd->cspec->updthresh = dd->cspec->updthresh_dflt;
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
+ dd->sendctrl |= (dd->cspec->updthresh &
+ SYM_RMASK(SendCtrl, AvailUpdThld)) <<
+ SYM_LSB(SendCtrl, AvailUpdThld);
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+ sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+ }
+ break;
+
+ case TXCHK_CHG_TYPE_USER:
+ /* for user process */
+ for (i = start; i <= last; i++) {
+ clear_bit(i, dd->cspec->sendibchk);
+ set_bit(i, dd->cspec->sendgrhchk);
+ }
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
+ / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
+ dd->cspec->updthresh = (rcd->piocnt /
+ rcd->subctxt_cnt) - 1;
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
+ dd->sendctrl |= (dd->cspec->updthresh &
+ SYM_RMASK(SendCtrl, AvailUpdThld))
+ << SYM_LSB(SendCtrl, AvailUpdThld);
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+ sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+ } else
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+ break;
+
+ default:
+ break;
+ }
+
+ for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
+ qib_write_kreg(dd, kr_sendcheckmask + i,
+ dd->cspec->sendchkenable[i]);
+
+ for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
+ qib_write_kreg(dd, kr_sendgrhcheckmask + i,
+ dd->cspec->sendgrhchk[i]);
+ qib_write_kreg(dd, kr_sendibpktmask + i,
+ dd->cspec->sendibchk[i]);
+ }
+
+ /*
+ * Be sure whatever we did was seen by the chip and acted upon,
+ * before we return. Mostly important for which >= 2.
+ */
+ qib_read_kreg32(dd, kr_scratch);
+}
+
+
+/* useful for trigger analyzers, etc. */
+static void writescratch(struct qib_devdata *dd, u32 val)
+{
+ qib_write_kreg(dd, kr_scratch, val);
+}
+
+/* Dummy for now, use chip regs soon */
+static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
+{
+ return -ENXIO;
+}
+
+/**
+ * qib_init_iba7322_funcs - set up the chip-specific function pointers
+ * @dev: the pci_dev for qlogic_ib device
+ * @ent: pci_device_id struct for this dev
+ *
+ * Also allocates, inits, and returns the devdata struct for this
+ * device instance
+ *
+ * This is global, and is called directly at init to set up the
+ * chip-specific function pointers for later use.
+ */
+struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct qib_devdata *dd;
+ int ret, i;
+ u32 tabsize, actual_cnt = 0;
+
+ dd = qib_alloc_devdata(pdev,
+ NUM_IB_PORTS * sizeof(struct qib_pportdata) +
+ sizeof(struct qib_chip_specific) +
+ NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
+ if (IS_ERR(dd))
+ goto bail;
+
+ dd->f_bringup_serdes = qib_7322_bringup_serdes;
+ dd->f_cleanup = qib_setup_7322_cleanup;
+ dd->f_clear_tids = qib_7322_clear_tids;
+ dd->f_free_irq = qib_7322_free_irq;
+ dd->f_get_base_info = qib_7322_get_base_info;
+ dd->f_get_msgheader = qib_7322_get_msgheader;
+ dd->f_getsendbuf = qib_7322_getsendbuf;
+ dd->f_gpio_mod = gpio_7322_mod;
+ dd->f_eeprom_wen = qib_7322_eeprom_wen;
+ dd->f_hdrqempty = qib_7322_hdrqempty;
+ dd->f_ib_updown = qib_7322_ib_updown;
+ dd->f_init_ctxt = qib_7322_init_ctxt;
+ dd->f_initvl15_bufs = qib_7322_initvl15_bufs;
+ dd->f_intr_fallback = qib_7322_intr_fallback;
+ dd->f_late_initreg = qib_late_7322_initreg;
+ dd->f_setpbc_control = qib_7322_setpbc_control;
+ dd->f_portcntr = qib_portcntr_7322;
+ dd->f_put_tid = qib_7322_put_tid;
+ dd->f_quiet_serdes = qib_7322_mini_quiet_serdes;
+ dd->f_rcvctrl = rcvctrl_7322_mod;
+ dd->f_read_cntrs = qib_read_7322cntrs;
+ dd->f_read_portcntrs = qib_read_7322portcntrs;
+ dd->f_reset = qib_do_7322_reset;
+ dd->f_init_sdma_regs = init_sdma_7322_regs;
+ dd->f_sdma_busy = qib_sdma_7322_busy;
+ dd->f_sdma_gethead = qib_sdma_7322_gethead;
+ dd->f_sdma_sendctrl = qib_7322_sdma_sendctrl;
+ dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
+ dd->f_sdma_update_tail = qib_sdma_update_7322_tail;
+ dd->f_sendctrl = sendctrl_7322_mod;
+ dd->f_set_armlaunch = qib_set_7322_armlaunch;
+ dd->f_set_cntr_sample = qib_set_cntr_7322_sample;
+ dd->f_iblink_state = qib_7322_iblink_state;
+ dd->f_ibphys_portstate = qib_7322_phys_portstate;
+ dd->f_get_ib_cfg = qib_7322_get_ib_cfg;
+ dd->f_set_ib_cfg = qib_7322_set_ib_cfg;
+ dd->f_set_ib_loopback = qib_7322_set_loopback;
+ dd->f_get_ib_table = qib_7322_get_ib_table;
+ dd->f_set_ib_table = qib_7322_set_ib_table;
+ dd->f_set_intr_state = qib_7322_set_intr_state;
+ dd->f_setextled = qib_setup_7322_setextled;
+ dd->f_txchk_change = qib_7322_txchk_change;
+ dd->f_update_usrhead = qib_update_7322_usrhead;
+ dd->f_wantpiobuf_intr = qib_wantpiobuf_7322_intr;
+ dd->f_xgxs_reset = qib_7322_mini_pcs_reset;
+ dd->f_sdma_hw_clean_up = qib_7322_sdma_hw_clean_up;
+ dd->f_sdma_hw_start_up = qib_7322_sdma_hw_start_up;
+ dd->f_sdma_init_early = qib_7322_sdma_init_early;
+ dd->f_writescratch = writescratch;
+ dd->f_tempsense_rd = qib_7322_tempsense_rd;
+ /*
+ * Do remaining PCIe setup and save PCIe values in dd.
+ * Any error printing is already done by the init code.
+ * On return, we have the chip mapped, but chip registers
+ * are not set up until start of qib_init_7322_variables.
+ */
+ ret = qib_pcie_ddinit(dd, pdev, ent);
+ if (ret < 0)
+ goto bail_free;
+
+ /* initialize chip-specific variables */
+ ret = qib_init_7322_variables(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ if (qib_mini_init || !dd->num_pports)
+ goto bail;
+
+ /*
+ * Determine number of vectors we want; depends on port count
+ * and number of configured kernel receive queues actually used.
+ * Should also depend on whether sdma is enabled or not, but
+ * that's such a rare testing case it's not worth worrying about.
+ */
+ tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
+ for (i = 0; i < tabsize; i++)
+ if ((i < ARRAY_SIZE(irq_table) &&
+ irq_table[i].port <= dd->num_pports) ||
+ (i >= ARRAY_SIZE(irq_table) &&
+ dd->rcd[i - ARRAY_SIZE(irq_table)]))
+ actual_cnt++;
+ tabsize = actual_cnt;
+ dd->cspec->msix_entries = kmalloc(tabsize *
+ sizeof(struct msix_entry), GFP_KERNEL);
+ dd->cspec->msix_arg = kmalloc(tabsize *
+ sizeof(void *), GFP_KERNEL);
+ if (!dd->cspec->msix_entries || !dd->cspec->msix_arg) {
+ qib_dev_err(dd, "No memory for MSIx table\n");
+ tabsize = 0;
+ }
+ for (i = 0; i < tabsize; i++)
+ dd->cspec->msix_entries[i].entry = i;
+
+ if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
+ qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
+ "continuing anyway\n");
+ /* may be less than we wanted, if not enough available */
+ dd->cspec->num_msix_entries = tabsize;
+
+ /* setup interrupt handler */
+ qib_setup_7322_interrupt(dd, 1);
+
+ /* clear diagctrl register, in case diags were running and crashed */
+ qib_write_kreg(dd, kr_hwdiagctrl, 0);
+
+ goto bail;
+
+bail_cleanup:
+ qib_pcie_ddcleanup(dd);
+bail_free:
+ qib_free_devdata(dd);
+ dd = ERR_PTR(ret);
+bail:
+ return dd;
+}
+
+/*
+ * Set the table entry at the specified index from the table specifed.
+ * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
+ * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
+ * 'idx' below addresses the correct entry, while its 4 LSBs select the
+ * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
+ */
+#define DDS_ENT_AMP_LSB 14
+#define DDS_ENT_MAIN_LSB 9
+#define DDS_ENT_POST_LSB 5
+#define DDS_ENT_PRE_XTRA_LSB 3
+#define DDS_ENT_PRE_LSB 0
+
+/*
+ * Set one entry in the TxDDS table for spec'd port
+ * ridx picks one of the entries, while tp points
+ * to the appropriate table entry.
+ */
+static void set_txdds(struct qib_pportdata *ppd, int ridx,
+ const struct txdds_ent *tp)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u32 pack_ent;
+ int regidx;
+
+ /* Get correct offset in chip-space, and in source table */
+ regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
+ /*
+ * We do not use qib_write_kreg_port() because it was intended
+ * only for registers in the lower "port specific" pages.
+ * So do index calculation by hand.
+ */
+ if (ppd->hw_pidx)
+ regidx += (dd->palign / sizeof(u64));
+
+ pack_ent = tp->amp << DDS_ENT_AMP_LSB;
+ pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
+ pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
+ pack_ent |= tp->post << DDS_ENT_POST_LSB;
+ qib_write_kreg(dd, regidx, pack_ent);
+ /* Prevent back-to-back writes by hitting scratch */
+ qib_write_kreg(ppd->dd, kr_scratch, 0);
+}
+
+static const struct vendor_txdds_ent vendor_txdds[] = {
+ { /* Amphenol 1m 30awg NoEq */
+ { 0x41, 0x50, 0x48 }, "584470002 ",
+ { 10, 0, 0, 5 }, { 10, 0, 0, 9 }, { 7, 1, 0, 13 },
+ },
+ { /* Amphenol 3m 28awg NoEq */
+ { 0x41, 0x50, 0x48 }, "584470004 ",
+ { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 7, 15 },
+ },
+ { /* Finisar 3m OM2 Optical */
+ { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
+ { 0, 0, 0, 3 }, { 0, 0, 0, 4 }, { 0, 0, 0, 13 },
+ },
+ { /* Finisar 30m OM2 Optical */
+ { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
+ { 0, 0, 0, 1 }, { 0, 0, 0, 5 }, { 0, 0, 0, 11 },
+ },
+ { /* Finisar Default OM2 Optical */
+ { 0x00, 0x90, 0x65 }, NULL,
+ { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 0, 0, 12 },
+ },
+ { /* Gore 1m 30awg NoEq */
+ { 0x00, 0x21, 0x77 }, "QSN3300-1 ",
+ { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 0, 15 },
+ },
+ { /* Gore 2m 30awg NoEq */
+ { 0x00, 0x21, 0x77 }, "QSN3300-2 ",
+ { 0, 0, 0, 8 }, { 0, 0, 0, 10 }, { 0, 1, 7, 15 },
+ },
+ { /* Gore 1m 28awg NoEq */
+ { 0x00, 0x21, 0x77 }, "QSN3800-1 ",
+ { 0, 0, 0, 6 }, { 0, 0, 0, 8 }, { 0, 1, 0, 15 },
+ },
+ { /* Gore 3m 28awg NoEq */
+ { 0x00, 0x21, 0x77 }, "QSN3800-3 ",
+ { 0, 0, 0, 9 }, { 0, 0, 0, 13 }, { 0, 1, 7, 15 },
+ },
+ { /* Gore 5m 24awg Eq */
+ { 0x00, 0x21, 0x77 }, "QSN7000-5 ",
+ { 0, 0, 0, 7 }, { 0, 0, 0, 9 }, { 0, 1, 3, 15 },
+ },
+ { /* Gore 7m 24awg Eq */
+ { 0x00, 0x21, 0x77 }, "QSN7000-7 ",
+ { 0, 0, 0, 9 }, { 0, 0, 0, 11 }, { 0, 2, 6, 15 },
+ },
+ { /* Gore 5m 26awg Eq */
+ { 0x00, 0x21, 0x77 }, "QSN7600-5 ",
+ { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 9, 13 },
+ },
+ { /* Gore 7m 26awg Eq */
+ { 0x00, 0x21, 0x77 }, "QSN7600-7 ",
+ { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 10, 1, 8, 15 },
+ },
+ { /* Intersil 12m 24awg Active */
+ { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
+ { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 3, 0, 9 },
+ },
+ { /* Intersil 10m 28awg Active */
+ { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
+ { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 2, 0, 2 },
+ },
+ { /* Intersil 7m 30awg Active */
+ { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
+ { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 1, 0, 3 },
+ },
+ { /* Intersil 5m 32awg Active */
+ { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
+ { 0, 0, 0, 6 }, { 0, 0, 0, 6 }, { 0, 2, 0, 8 },
+ },
+ { /* Intersil Default Active */
+ { 0x00, 0x30, 0xB4 }, NULL,
+ { 0, 0, 0, 6 }, { 0, 0, 0, 5 }, { 0, 2, 0, 5 },
+ },
+ { /* Luxtera 20m Active Optical */
+ { 0x00, 0x25, 0x63 }, NULL,
+ { 0, 0, 0, 5 }, { 0, 0, 0, 8 }, { 0, 2, 0, 12 },
+ },
+ { /* Molex 1M Cu loopback */
+ { 0x00, 0x09, 0x3A }, "74763-0025 ",
+ { 2, 2, 6, 15 }, { 2, 2, 6, 15 }, { 2, 2, 6, 15 },
+ },
+ { /* Molex 2m 28awg NoEq */
+ { 0x00, 0x09, 0x3A }, "74757-2201 ",
+ { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 1, 15 },
+ },
+};
+
+static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
+ /* amp, pre, main, post */
+ { 2, 2, 15, 6 }, /* Loopback */
+ { 0, 0, 0, 1 }, /* 2 dB */
+ { 0, 0, 0, 2 }, /* 3 dB */
+ { 0, 0, 0, 3 }, /* 4 dB */
+ { 0, 0, 0, 4 }, /* 5 dB */
+ { 0, 0, 0, 5 }, /* 6 dB */
+ { 0, 0, 0, 6 }, /* 7 dB */
+ { 0, 0, 0, 7 }, /* 8 dB */
+ { 0, 0, 0, 8 }, /* 9 dB */
+ { 0, 0, 0, 9 }, /* 10 dB */
+ { 0, 0, 0, 10 }, /* 11 dB */
+ { 0, 0, 0, 11 }, /* 12 dB */
+ { 0, 0, 0, 12 }, /* 13 dB */
+ { 0, 0, 0, 13 }, /* 14 dB */
+ { 0, 0, 0, 14 }, /* 15 dB */
+ { 0, 0, 0, 15 }, /* 16 dB */
+};
+
+static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
+ /* amp, pre, main, post */
+ { 2, 2, 15, 6 }, /* Loopback */
+ { 0, 0, 0, 8 }, /* 2 dB */
+ { 0, 0, 0, 8 }, /* 3 dB */
+ { 0, 0, 0, 9 }, /* 4 dB */
+ { 0, 0, 0, 9 }, /* 5 dB */
+ { 0, 0, 0, 10 }, /* 6 dB */
+ { 0, 0, 0, 10 }, /* 7 dB */
+ { 0, 0, 0, 11 }, /* 8 dB */
+ { 0, 0, 0, 11 }, /* 9 dB */
+ { 0, 0, 0, 12 }, /* 10 dB */
+ { 0, 0, 0, 12 }, /* 11 dB */
+ { 0, 0, 0, 13 }, /* 12 dB */
+ { 0, 0, 0, 13 }, /* 13 dB */
+ { 0, 0, 0, 14 }, /* 14 dB */
+ { 0, 0, 0, 14 }, /* 15 dB */
+ { 0, 0, 0, 15 }, /* 16 dB */
+};
+
+static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
+ /* amp, pre, main, post */
+ { 2, 2, 15, 6 }, /* Loopback */
+ { 0, 1, 0, 7 }, /* 2 dB (also QMH7342) */
+ { 0, 1, 0, 9 }, /* 3 dB (also QMH7342) */
+ { 0, 1, 0, 11 }, /* 4 dB */
+ { 0, 1, 0, 13 }, /* 5 dB */
+ { 0, 1, 0, 15 }, /* 6 dB */
+ { 0, 1, 3, 15 }, /* 7 dB */
+ { 0, 1, 7, 15 }, /* 8 dB */
+ { 0, 1, 7, 15 }, /* 9 dB */
+ { 0, 1, 8, 15 }, /* 10 dB */
+ { 0, 1, 9, 15 }, /* 11 dB */
+ { 0, 1, 10, 15 }, /* 12 dB */
+ { 0, 2, 6, 15 }, /* 13 dB */
+ { 0, 2, 7, 15 }, /* 14 dB */
+ { 0, 2, 8, 15 }, /* 15 dB */
+ { 0, 2, 9, 15 }, /* 16 dB */
+};
+
+/*
+ * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
+ * These are mostly used for mez cards going through connectors
+ * and backplane traces, but can be used to add other "unusual"
+ * table values as well.
+ */
+static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
+ /* amp, pre, main, post */
+ { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
+ { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
+ { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
+ { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
+ { 0, 0, 0, 11 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 11 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 11 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 11 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 11 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 11 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 11 }, /* QME7342 backplane settings */
+};
+
+static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
+ /* amp, pre, main, post */
+ { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
+ { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
+ { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
+ { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
+ { 0, 0, 0, 13 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 13 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 13 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 13 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 13 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 13 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 13 }, /* QME7342 backplane settings */
+};
+
+static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
+ /* amp, pre, main, post */
+ { 0, 1, 0, 4 }, /* QMH7342 backplane settings */
+ { 0, 1, 0, 5 }, /* QMH7342 backplane settings */
+ { 0, 1, 0, 6 }, /* QMH7342 backplane settings */
+ { 0, 1, 0, 8 }, /* QMH7342 backplane settings */
+ { 0, 1, 12, 10 }, /* QME7342 backplane setting */
+ { 0, 1, 12, 11 }, /* QME7342 backplane setting */
+ { 0, 1, 12, 12 }, /* QME7342 backplane setting */
+ { 0, 1, 12, 14 }, /* QME7342 backplane setting */
+ { 0, 1, 12, 6 }, /* QME7342 backplane setting */
+ { 0, 1, 12, 7 }, /* QME7342 backplane setting */
+ { 0, 1, 12, 8 }, /* QME7342 backplane setting */
+};
+
+static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
+ unsigned atten)
+{
+ /*
+ * The attenuation table starts at 2dB for entry 1,
+ * with entry 0 being the loopback entry.
+ */
+ if (atten <= 2)
+ atten = 1;
+ else if (atten > TXDDS_TABLE_SZ)
+ atten = TXDDS_TABLE_SZ - 1;
+ else
+ atten--;
+ return txdds + atten;
+}
+
+/*
+ * if override is set, the module parameter txselect has a value
+ * for this specific port, so use it, rather than our normal mechanism.
+ */
+static void find_best_ent(struct qib_pportdata *ppd,
+ const struct txdds_ent **sdr_dds,
+ const struct txdds_ent **ddr_dds,
+ const struct txdds_ent **qdr_dds, int override)
+{
+ struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
+ int idx;
+
+ /* Search table of known cables */
+ for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
+ const struct vendor_txdds_ent *v = vendor_txdds + idx;
+
+ if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
+ (!v->partnum ||
+ !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
+ *sdr_dds = &v->sdr;
+ *ddr_dds = &v->ddr;
+ *qdr_dds = &v->qdr;
+ return;
+ }
+ }
+
+ /* Lookup serdes setting by cable type and attenuation */
+ if (!override && QSFP_IS_ACTIVE(qd->tech)) {
+ *sdr_dds = txdds_sdr + ppd->dd->board_atten;
+ *ddr_dds = txdds_ddr + ppd->dd->board_atten;
+ *qdr_dds = txdds_qdr + ppd->dd->board_atten;
+ return;
+ }
+
+ if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
+ qd->atten[1])) {
+ *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
+ *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
+ *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
+ return;
+ } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
+ /*
+ * If we have no (or incomplete) data from the cable
+ * EEPROM, or no QSFP, or override is set, use the
+ * module parameter value to index into the attentuation
+ * table.
+ */
+ idx = ppd->cpspec->no_eep;
+ *sdr_dds = &txdds_sdr[idx];
+ *ddr_dds = &txdds_ddr[idx];
+ *qdr_dds = &txdds_qdr[idx];
+ } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
+ /* similar to above, but index into the "extra" table. */
+ idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
+ *sdr_dds = &txdds_extra_sdr[idx];
+ *ddr_dds = &txdds_extra_ddr[idx];
+ *qdr_dds = &txdds_extra_qdr[idx];
+ } else {
+ /* this shouldn't happen, it's range checked */
+ *sdr_dds = txdds_sdr + qib_long_atten;
+ *ddr_dds = txdds_ddr + qib_long_atten;
+ *qdr_dds = txdds_qdr + qib_long_atten;
+ }
+}
+
+static void init_txdds_table(struct qib_pportdata *ppd, int override)
+{
+ const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
+ struct txdds_ent *dds;
+ int idx;
+ int single_ent = 0;
+
+ find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
+
+ /* for mez cards or override, use the selected value for all entries */
+ if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
+ single_ent = 1;
+
+ /* Fill in the first entry with the best entry found. */
+ set_txdds(ppd, 0, sdr_dds);
+ set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
+ set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
+ if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
+ QIBL_LINKACTIVE)) {
+ dds = (struct txdds_ent *)(ppd->link_speed_active ==
+ QIB_IB_QDR ? qdr_dds :
+ (ppd->link_speed_active ==
+ QIB_IB_DDR ? ddr_dds : sdr_dds));
+ write_tx_serdes_param(ppd, dds);
+ }
+
+ /* Fill in the remaining entries with the default table values. */
+ for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
+ set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
+ set_txdds(ppd, idx + TXDDS_TABLE_SZ,
+ single_ent ? ddr_dds : txdds_ddr + idx);
+ set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
+ single_ent ? qdr_dds : txdds_qdr + idx);
+ }
+}
+
+#define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
+#define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
+#define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
+#define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
+#define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
+#define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
+#define AHB_TRANS_TRIES 10
+
+/*
+ * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
+ * 5=subsystem which is why most calls have "chan + chan >> 1"
+ * for the channel argument.
+ */
+static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
+ u32 data, u32 mask)
+{
+ u32 rd_data, wr_data, sz_mask;
+ u64 trans, acc, prev_acc;
+ u32 ret = 0xBAD0BAD;
+ int tries;
+
+ prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
+ /* From this point on, make sure we return access */
+ acc = (quad << 1) | 1;
+ qib_write_kreg(dd, KR_AHB_ACC, acc);
+
+ for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
+ trans = qib_read_kreg64(dd, KR_AHB_TRANS);
+ if (trans & AHB_TRANS_RDY)
+ break;
+ }
+ if (tries >= AHB_TRANS_TRIES) {
+ qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
+ goto bail;
+ }
+
+ /* If mask is not all 1s, we need to read, but different SerDes
+ * entities have different sizes
+ */
+ sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
+ wr_data = data & mask & sz_mask;
+ if ((~mask & sz_mask) != 0) {
+ trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
+ qib_write_kreg(dd, KR_AHB_TRANS, trans);
+
+ for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
+ trans = qib_read_kreg64(dd, KR_AHB_TRANS);
+ if (trans & AHB_TRANS_RDY)
+ break;
+ }
+ if (tries >= AHB_TRANS_TRIES) {
+ qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
+ AHB_TRANS_TRIES);
+ goto bail;
+ }
+ /* Re-read in case host split reads and read data first */
+ trans = qib_read_kreg64(dd, KR_AHB_TRANS);
+ rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
+ wr_data |= (rd_data & ~mask & sz_mask);
+ }
+
+ /* If mask is not zero, we need to write. */
+ if (mask & sz_mask) {
+ trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
+ trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
+ trans |= AHB_WR;
+ qib_write_kreg(dd, KR_AHB_TRANS, trans);
+
+ for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
+ trans = qib_read_kreg64(dd, KR_AHB_TRANS);
+ if (trans & AHB_TRANS_RDY)
+ break;
+ }
+ if (tries >= AHB_TRANS_TRIES) {
+ qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
+ AHB_TRANS_TRIES);
+ goto bail;
+ }
+ }
+ ret = wr_data;
+bail:
+ qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
+ return ret;
+}
+
+static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
+ unsigned mask)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int chan;
+ u32 rbc;
+
+ for (chan = 0; chan < SERDES_CHANS; ++chan) {
+ ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
+ data, mask);
+ rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+ addr, 0, 0);
+ }
+}
+
+static int serdes_7322_init(struct qib_pportdata *ppd)
+{
+ u64 data;
+ u32 le_val;
+
+ /*
+ * Initialize the Tx DDS tables. Also done every QSFP event,
+ * for adapters with QSFP
+ */
+ init_txdds_table(ppd, 0);
+
+ /* ensure no tx overrides from earlier driver loads */
+ qib_write_kreg_port(ppd, krp_tx_deemph_override,
+ SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ reset_tx_deemphasis_override));
+
+ /* Patch some SerDes defaults to "Better for IB" */
+ /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
+ ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
+
+ /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
+ ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
+ /* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
+ ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
+
+ /* May be overridden in qsfp_7322_event */
+ le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
+ ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
+
+ /* enable LE1 adaptation for all but QME, which is disabled */
+ le_val = IS_QME(ppd->dd) ? 0 : 1;
+ ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
+
+ /* Clear cmode-override, may be set from older driver */
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
+
+ /* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
+ ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
+
+ /* setup LoS params; these are subsystem, so chan == 5 */
+ /* LoS filter threshold_count on, ch 0-3, set to 8 */
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
+
+ /* LoS filter threshold_count off, ch 0-3, set to 4 */
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
+
+ /* LoS filter select enabled */
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
+
+ /* LoS target data: SDR=4, DDR=2, QDR=1 */
+ ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
+ ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
+ ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
+
+ data = qib_read_kreg_port(ppd, krp_serdesctrl);
+ qib_write_kreg_port(ppd, krp_serdesctrl, data |
+ SYM_MASK(IBSerdesCtrl_0, RXLOSEN));
+
+ /* rxbistena; set 0 to avoid effects of it switch later */
+ ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
+
+ /* Configure 4 DFE taps, and only they adapt */
+ ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
+
+ /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
+ le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
+ ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
+
+ /*
+ * Set receive adaptation mode. SDR and DDR adaptation are
+ * always on, and QDR is initially enabled; later disabled.
+ */
+ qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
+ qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
+ qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
+ ppd->dd->cspec->r1 ?
+ QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
+ ppd->cpspec->qdr_dfe_on = 1;
+
+ /* FLoop LOS gate: PPM filter enabled */
+ ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
+
+ /* rx offset center enabled */
+ ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
+
+ if (!ppd->dd->cspec->r1) {
+ ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
+ ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
+ }
+
+ /* Set the frequency loop bandwidth to 15 */
+ ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
+
+ return 0;
+}
+
+/* start adjust QMH serdes parameters */
+
+static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
+{
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+ 9, code << 9, 0x3f << 9);
+}
+
+static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
+ int enable, u32 tapenable)
+{
+ if (enable)
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+ 1, 3 << 10, 0x1f << 10);
+ else
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+ 1, 0, 0x1f << 10);
+}
+
+/* Set clock to 1, 0, 1, 0 */
+static void clock_man(struct qib_pportdata *ppd, int chan)
+{
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+ 4, 0x4000, 0x4000);
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+ 4, 0, 0x4000);
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+ 4, 0x4000, 0x4000);
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+ 4, 0, 0x4000);
+}
+
+/*
+ * write the current Tx serdes pre,post,main,amp settings into the serdes.
+ * The caller must pass the settings appropriate for the current speed,
+ * or not care if they are correct for the current speed.
+ */
+static void write_tx_serdes_param(struct qib_pportdata *ppd,
+ struct txdds_ent *txdds)
+{
+ u64 deemph;
+
+ deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
+ /* field names for amp, main, post, pre, respectively */
+ deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
+ SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
+ SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
+ SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
+
+ deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ tx_override_deemphasis_select);
+ deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ txampcntl_d2a);
+ deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ txc0_ena);
+ deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ txcp1_ena);
+ deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ txcn1_ena);
+ qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
+}
+
+/*
+ * Set the parameters for mez cards on link bounce, so they are
+ * always exactly what was requested. Similar logic to init_txdds
+ * but does just the serdes.
+ */
+static void adj_tx_serdes(struct qib_pportdata *ppd)
+{
+ const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
+ struct txdds_ent *dds;
+
+ find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
+ dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
+ qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
+ ddr_dds : sdr_dds));
+ write_tx_serdes_param(ppd, dds);
+}
+
+/* set QDR forced value for H1, if needed */
+static void force_h1(struct qib_pportdata *ppd)
+{
+ int chan;
+
+ ppd->cpspec->qdr_reforce = 0;
+ if (!ppd->dd->cspec->r1)
+ return;
+
+ for (chan = 0; chan < SERDES_CHANS; chan++) {
+ set_man_mode_h1(ppd, chan, 1, 0);
+ set_man_code(ppd, chan, ppd->cpspec->h1_val);
+ clock_man(ppd, chan);
+ set_man_mode_h1(ppd, chan, 0, 0);
+ }
+}
+
+#define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
+#define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
+
+#define R_OPCODE_LSB 3
+#define R_OP_NOP 0
+#define R_OP_SHIFT 2
+#define R_OP_UPDATE 3
+#define R_TDI_LSB 2
+#define R_TDO_LSB 1
+#define R_RDY 1
+
+static int qib_r_grab(struct qib_devdata *dd)
+{
+ u64 val;
+ val = SJA_EN;
+ qib_write_kreg(dd, kr_r_access, val);
+ qib_read_kreg32(dd, kr_scratch);
+ return 0;
+}
+
+/* qib_r_wait_for_rdy() not only waits for the ready bit, it
+ * returns the current state of R_TDO
+ */
+static int qib_r_wait_for_rdy(struct qib_devdata *dd)
+{
+ u64 val;
+ int timeout;
+ for (timeout = 0; timeout < 100 ; ++timeout) {
+ val = qib_read_kreg32(dd, kr_r_access);
+ if (val & R_RDY)
+ return (val >> R_TDO_LSB) & 1;
+ }
+ return -1;
+}
+
+static int qib_r_shift(struct qib_devdata *dd, int bisten,
+ int len, u8 *inp, u8 *outp)
+{
+ u64 valbase, val;
+ int ret, pos;
+
+ valbase = SJA_EN | (bisten << BISTEN_LSB) |
+ (R_OP_SHIFT << R_OPCODE_LSB);
+ ret = qib_r_wait_for_rdy(dd);
+ if (ret < 0)
+ goto bail;
+ for (pos = 0; pos < len; ++pos) {
+ val = valbase;
+ if (outp) {
+ outp[pos >> 3] &= ~(1 << (pos & 7));
+ outp[pos >> 3] |= (ret << (pos & 7));
+ }
+ if (inp) {
+ int tdi = inp[pos >> 3] >> (pos & 7);
+ val |= ((tdi & 1) << R_TDI_LSB);
+ }
+ qib_write_kreg(dd, kr_r_access, val);
+ qib_read_kreg32(dd, kr_scratch);
+ ret = qib_r_wait_for_rdy(dd);
+ if (ret < 0)
+ break;
+ }
+ /* Restore to NOP between operations. */
+ val = SJA_EN | (bisten << BISTEN_LSB);
+ qib_write_kreg(dd, kr_r_access, val);
+ qib_read_kreg32(dd, kr_scratch);
+ ret = qib_r_wait_for_rdy(dd);
+
+ if (ret >= 0)
+ ret = pos;
+bail:
+ return ret;
+}
+
+static int qib_r_update(struct qib_devdata *dd, int bisten)
+{
+ u64 val;
+ int ret;
+
+ val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
+ ret = qib_r_wait_for_rdy(dd);
+ if (ret >= 0) {
+ qib_write_kreg(dd, kr_r_access, val);
+ qib_read_kreg32(dd, kr_scratch);
+ }
+ return ret;
+}
+
+#define BISTEN_PORT_SEL 15
+#define LEN_PORT_SEL 625
+#define BISTEN_AT 17
+#define LEN_AT 156
+#define BISTEN_ETM 16
+#define LEN_ETM 632
+
+#define BIT2BYTE(x) (((x) + BITS_PER_BYTE - 1) / BITS_PER_BYTE)
+
+/* these are common for all IB port use cases. */
+static u8 reset_at[BIT2BYTE(LEN_AT)] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
+};
+static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
+ 0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
+ 0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
+ 0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
+ 0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+};
+static u8 at[BIT2BYTE(LEN_AT)] = {
+ 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
+};
+
+/* used for IB1 or IB2, only one in use */
+static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
+ 0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
+};
+
+/* used when both IB1 and IB2 are in use */
+static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
+ 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
+ 0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
+ 0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
+ 0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
+};
+
+/* used when only IB1 is in use */
+static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
+ 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
+ 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
+ 0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
+ 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
+ 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
+ 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
+ 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+/* used when only IB2 is in use */
+static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
+ 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
+ 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
+ 0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
+ 0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
+ 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
+ 0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
+ 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
+ 0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
+};
+
+/* used when both IB1 and IB2 are in use */
+static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
+ 0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
+ 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
+ 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
+ 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
+ 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
+ 0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
+ 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+/*
+ * Do setup to properly handle IB link recovery; if port is zero, we
+ * are initializing to cover both ports; otherwise we are initializing
+ * to cover a single port card, or the port has reached INIT and we may
+ * need to switch coverage types.
+ */
+static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
+{
+ u8 *portsel, *etm;
+ struct qib_devdata *dd = ppd->dd;
+
+ if (!ppd->dd->cspec->r1)
+ return;
+ if (!both) {
+ dd->cspec->recovery_ports_initted++;
+ ppd->cpspec->recovery_init = 1;
+ }
+ if (!both && dd->cspec->recovery_ports_initted == 1) {
+ portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
+ etm = atetm_1port;
+ } else {
+ portsel = portsel_2port;
+ etm = atetm_2port;
+ }
+
+ if (qib_r_grab(dd) < 0 ||
+ qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
+ qib_r_update(dd, BISTEN_ETM) < 0 ||
+ qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
+ qib_r_update(dd, BISTEN_AT) < 0 ||
+ qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
+ portsel, NULL) < 0 ||
+ qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
+ qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
+ qib_r_update(dd, BISTEN_AT) < 0 ||
+ qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
+ qib_r_update(dd, BISTEN_ETM) < 0)
+ qib_dev_err(dd, "Failed IB link recovery setup\n");
+}
+
+static void check_7322_rxe_status(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 fmask;
+
+ if (dd->cspec->recovery_ports_initted != 1)
+ return; /* rest doesn't apply to dualport */
+ qib_write_kreg(dd, kr_control, dd->control |
+ SYM_MASK(Control, FreezeMode));
+ (void)qib_read_kreg64(dd, kr_scratch);
+ udelay(3); /* ibcreset asserted 400ns, be sure that's over */
+ fmask = qib_read_kreg64(dd, kr_act_fmask);
+ if (!fmask) {
+ /*
+ * require a powercycle before we'll work again, and make
+ * sure we get no more interrupts, and don't turn off
+ * freeze.
+ */
+ ppd->dd->cspec->stay_in_freeze = 1;
+ qib_7322_set_intr_state(ppd->dd, 0);
+ qib_write_kreg(dd, kr_fmask, 0ULL);
+ qib_dev_err(dd, "HCA unusable until powercycled\n");
+ return; /* eventually reset */
+ }
+
+ qib_write_kreg(ppd->dd, kr_hwerrclear,
+ SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
+
+ /* don't do the full clear_freeze(), not needed for this */
+ qib_write_kreg(dd, kr_control, dd->control);
+ qib_read_kreg32(dd, kr_scratch);
+ /* take IBC out of reset */
+ if (ppd->link_speed_supported) {
+ ppd->cpspec->ibcctrl_a &=
+ ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a,
+ ppd->cpspec->ibcctrl_a);
+ qib_read_kreg32(dd, kr_scratch);
+ if (ppd->lflags & QIBL_IB_LINK_DISABLED)
+ qib_set_ib_7322_lstate(ppd, 0,
+ QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+ }
+}
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
new file mode 100644
index 00000000000..9b40f345ac3
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -0,0 +1,1586 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/idr.h>
+
+#include "qib.h"
+#include "qib_common.h"
+
+/*
+ * min buffers we want to have per context, after driver
+ */
+#define QIB_MIN_USER_CTXT_BUFCNT 7
+
+#define QLOGIC_IB_R_SOFTWARE_MASK 0xFF
+#define QLOGIC_IB_R_SOFTWARE_SHIFT 24
+#define QLOGIC_IB_R_EMULATOR_MASK (1ULL<<62)
+
+/*
+ * Number of ctxts we are configured to use (to allow for more pio
+ * buffers per ctxt, etc.) Zero means use chip value.
+ */
+ushort qib_cfgctxts;
+module_param_named(cfgctxts, qib_cfgctxts, ushort, S_IRUGO);
+MODULE_PARM_DESC(cfgctxts, "Set max number of contexts to use");
+
+/*
+ * If set, do not write to any regs if avoidable, hack to allow
+ * check for deranged default register values.
+ */
+ushort qib_mini_init;
+module_param_named(mini_init, qib_mini_init, ushort, S_IRUGO);
+MODULE_PARM_DESC(mini_init, "If set, do minimal diag init");
+
+unsigned qib_n_krcv_queues;
+module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO);
+MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port");
+
+/*
+ * qib_wc_pat parameter:
+ * 0 is WC via MTRR
+ * 1 is WC via PAT
+ * If PAT initialization fails, code reverts back to MTRR
+ */
+unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */
+module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO);
+MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism");
+
+struct workqueue_struct *qib_wq;
+struct workqueue_struct *qib_cq_wq;
+
+static void verify_interrupt(unsigned long);
+
+static struct idr qib_unit_table;
+u32 qib_cpulist_count;
+unsigned long *qib_cpulist;
+
+/* set number of contexts we'll actually use */
+void qib_set_ctxtcnt(struct qib_devdata *dd)
+{
+ if (!qib_cfgctxts)
+ dd->cfgctxts = dd->ctxtcnt;
+ else if (qib_cfgctxts < dd->num_pports)
+ dd->cfgctxts = dd->ctxtcnt;
+ else if (qib_cfgctxts <= dd->ctxtcnt)
+ dd->cfgctxts = qib_cfgctxts;
+ else
+ dd->cfgctxts = dd->ctxtcnt;
+}
+
+/*
+ * Common code for creating the receive context array.
+ */
+int qib_create_ctxts(struct qib_devdata *dd)
+{
+ unsigned i;
+ int ret;
+
+ /*
+ * Allocate full ctxtcnt array, rather than just cfgctxts, because
+ * cleanup iterates across all possible ctxts.
+ */
+ dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL);
+ if (!dd->rcd) {
+ qib_dev_err(dd, "Unable to allocate ctxtdata array, "
+ "failing\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ /* create (one or more) kctxt */
+ for (i = 0; i < dd->first_user_ctxt; ++i) {
+ struct qib_pportdata *ppd;
+ struct qib_ctxtdata *rcd;
+
+ if (dd->skip_kctxt_mask & (1 << i))
+ continue;
+
+ ppd = dd->pport + (i % dd->num_pports);
+ rcd = qib_create_ctxtdata(ppd, i);
+ if (!rcd) {
+ qib_dev_err(dd, "Unable to allocate ctxtdata"
+ " for Kernel ctxt, failing\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+ rcd->pkeys[0] = QIB_DEFAULT_P_KEY;
+ rcd->seq_cnt = 1;
+ }
+ ret = 0;
+done:
+ return ret;
+}
+
+/*
+ * Common code for user and kernel context setup.
+ */
+struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt)
+{
+ struct qib_devdata *dd = ppd->dd;
+ struct qib_ctxtdata *rcd;
+
+ rcd = kzalloc(sizeof(*rcd), GFP_KERNEL);
+ if (rcd) {
+ INIT_LIST_HEAD(&rcd->qp_wait_list);
+ rcd->ppd = ppd;
+ rcd->dd = dd;
+ rcd->cnt = 1;
+ rcd->ctxt = ctxt;
+ dd->rcd[ctxt] = rcd;
+
+ dd->f_init_ctxt(rcd);
+
+ /*
+ * To avoid wasting a lot of memory, we allocate 32KB chunks
+ * of physically contiguous memory, advance through it until
+ * used up and then allocate more. Of course, we need
+ * memory to store those extra pointers, now. 32KB seems to
+ * be the most that is "safe" under memory pressure
+ * (creating large files and then copying them over
+ * NFS while doing lots of MPI jobs). The OOM killer can
+ * get invoked, even though we say we can sleep and this can
+ * cause significant system problems....
+ */
+ rcd->rcvegrbuf_size = 0x8000;
+ rcd->rcvegrbufs_perchunk =
+ rcd->rcvegrbuf_size / dd->rcvegrbufsize;
+ rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt +
+ rcd->rcvegrbufs_perchunk - 1) /
+ rcd->rcvegrbufs_perchunk;
+ }
+ return rcd;
+}
+
+/*
+ * Common code for initializing the physical port structure.
+ */
+void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
+ u8 hw_pidx, u8 port)
+{
+ ppd->dd = dd;
+ ppd->hw_pidx = hw_pidx;
+ ppd->port = port; /* IB port number, not index */
+
+ spin_lock_init(&ppd->sdma_lock);
+ spin_lock_init(&ppd->lflags_lock);
+ init_waitqueue_head(&ppd->state_wait);
+
+ init_timer(&ppd->symerr_clear_timer);
+ ppd->symerr_clear_timer.function = qib_clear_symerror_on_linkup;
+ ppd->symerr_clear_timer.data = (unsigned long)ppd;
+}
+
+static int init_pioavailregs(struct qib_devdata *dd)
+{
+ int ret, pidx;
+ u64 *status_page;
+
+ dd->pioavailregs_dma = dma_alloc_coherent(
+ &dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys,
+ GFP_KERNEL);
+ if (!dd->pioavailregs_dma) {
+ qib_dev_err(dd, "failed to allocate PIOavail reg area "
+ "in memory\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ /*
+ * We really want L2 cache aligned, but for current CPUs of
+ * interest, they are the same.
+ */
+ status_page = (u64 *)
+ ((char *) dd->pioavailregs_dma +
+ ((2 * L1_CACHE_BYTES +
+ dd->pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES));
+ /* device status comes first, for backwards compatibility */
+ dd->devstatusp = status_page;
+ *status_page++ = 0;
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ dd->pport[pidx].statusp = status_page;
+ *status_page++ = 0;
+ }
+
+ /*
+ * Setup buffer to hold freeze and other messages, accessible to
+ * apps, following statusp. This is per-unit, not per port.
+ */
+ dd->freezemsg = (char *) status_page;
+ *dd->freezemsg = 0;
+ /* length of msg buffer is "whatever is left" */
+ ret = (char *) status_page - (char *) dd->pioavailregs_dma;
+ dd->freezelen = PAGE_SIZE - ret;
+
+ ret = 0;
+
+done:
+ return ret;
+}
+
+/**
+ * init_shadow_tids - allocate the shadow TID array
+ * @dd: the qlogic_ib device
+ *
+ * allocate the shadow TID array, so we can qib_munlock previous
+ * entries. It may make more sense to move the pageshadow to the
+ * ctxt data structure, so we only allocate memory for ctxts actually
+ * in use, since we at 8k per ctxt, now.
+ * We don't want failures here to prevent use of the driver/chip,
+ * so no return value.
+ */
+static void init_shadow_tids(struct qib_devdata *dd)
+{
+ struct page **pages;
+ dma_addr_t *addrs;
+
+ pages = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
+ if (!pages) {
+ qib_dev_err(dd, "failed to allocate shadow page * "
+ "array, no expected sends!\n");
+ goto bail;
+ }
+
+ addrs = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
+ if (!addrs) {
+ qib_dev_err(dd, "failed to allocate shadow dma handle "
+ "array, no expected sends!\n");
+ goto bail_free;
+ }
+
+ memset(pages, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
+ memset(addrs, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
+
+ dd->pageshadow = pages;
+ dd->physshadow = addrs;
+ return;
+
+bail_free:
+ vfree(pages);
+bail:
+ dd->pageshadow = NULL;
+}
+
+/*
+ * Do initialization for device that is only needed on
+ * first detect, not on resets.
+ */
+static int loadtime_init(struct qib_devdata *dd)
+{
+ int ret = 0;
+
+ if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) &
+ QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) {
+ qib_dev_err(dd, "Driver only handles version %d, "
+ "chip swversion is %d (%llx), failng\n",
+ QIB_CHIP_SWVERSION,
+ (int)(dd->revision >>
+ QLOGIC_IB_R_SOFTWARE_SHIFT) &
+ QLOGIC_IB_R_SOFTWARE_MASK,
+ (unsigned long long) dd->revision);
+ ret = -ENOSYS;
+ goto done;
+ }
+
+ if (dd->revision & QLOGIC_IB_R_EMULATOR_MASK)
+ qib_devinfo(dd->pcidev, "%s", dd->boardversion);
+
+ spin_lock_init(&dd->pioavail_lock);
+ spin_lock_init(&dd->sendctrl_lock);
+ spin_lock_init(&dd->uctxt_lock);
+ spin_lock_init(&dd->qib_diag_trans_lock);
+ spin_lock_init(&dd->eep_st_lock);
+ mutex_init(&dd->eep_lock);
+
+ if (qib_mini_init)
+ goto done;
+
+ ret = init_pioavailregs(dd);
+ init_shadow_tids(dd);
+
+ qib_get_eeprom_info(dd);
+
+ /* setup time (don't start yet) to verify we got interrupt */
+ init_timer(&dd->intrchk_timer);
+ dd->intrchk_timer.function = verify_interrupt;
+ dd->intrchk_timer.data = (unsigned long) dd;
+
+done:
+ return ret;
+}
+
+/**
+ * init_after_reset - re-initialize after a reset
+ * @dd: the qlogic_ib device
+ *
+ * sanity check at least some of the values after reset, and
+ * ensure no receive or transmit (explictly, in case reset
+ * failed
+ */
+static int init_after_reset(struct qib_devdata *dd)
+{
+ int i;
+
+ /*
+ * Ensure chip does no sends or receives, tail updates, or
+ * pioavail updates while we re-initialize. This is mostly
+ * for the driver data structures, not chip registers.
+ */
+ for (i = 0; i < dd->num_pports; ++i) {
+ /*
+ * ctxt == -1 means "all contexts". Only really safe for
+ * _dis_abling things, as here.
+ */
+ dd->f_rcvctrl(dd->pport + i, QIB_RCVCTRL_CTXT_DIS |
+ QIB_RCVCTRL_INTRAVAIL_DIS |
+ QIB_RCVCTRL_TAILUPD_DIS, -1);
+ /* Redundant across ports for some, but no big deal. */
+ dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_DIS |
+ QIB_SENDCTRL_AVAIL_DIS);
+ }
+
+ return 0;
+}
+
+static void enable_chip(struct qib_devdata *dd)
+{
+ u64 rcvmask;
+ int i;
+
+ /*
+ * Enable PIO send, and update of PIOavail regs to memory.
+ */
+ for (i = 0; i < dd->num_pports; ++i)
+ dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_ENB |
+ QIB_SENDCTRL_AVAIL_ENB);
+ /*
+ * Enable kernel ctxts' receive and receive interrupt.
+ * Other ctxts done as user opens and inits them.
+ */
+ rcvmask = QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_INTRAVAIL_ENB;
+ rcvmask |= (dd->flags & QIB_NODMA_RTAIL) ?
+ QIB_RCVCTRL_TAILUPD_DIS : QIB_RCVCTRL_TAILUPD_ENB;
+ for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
+ struct qib_ctxtdata *rcd = dd->rcd[i];
+
+ if (rcd)
+ dd->f_rcvctrl(rcd->ppd, rcvmask, i);
+ }
+}
+
+static void verify_interrupt(unsigned long opaque)
+{
+ struct qib_devdata *dd = (struct qib_devdata *) opaque;
+
+ if (!dd)
+ return; /* being torn down */
+
+ /*
+ * If we don't have a lid or any interrupts, let the user know and
+ * don't bother checking again.
+ */
+ if (dd->int_counter == 0) {
+ if (!dd->f_intr_fallback(dd))
+ dev_err(&dd->pcidev->dev, "No interrupts detected, "
+ "not usable.\n");
+ else /* re-arm the timer to see if fallback works */
+ mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
+ }
+}
+
+static void init_piobuf_state(struct qib_devdata *dd)
+{
+ int i, pidx;
+ u32 uctxts;
+
+ /*
+ * Ensure all buffers are free, and fifos empty. Buffers
+ * are common, so only do once for port 0.
+ *
+ * After enable and qib_chg_pioavailkernel so we can safely
+ * enable pioavail updates and PIOENABLE. After this, packets
+ * are ready and able to go out.
+ */
+ dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_ALL);
+ for (pidx = 0; pidx < dd->num_pports; ++pidx)
+ dd->f_sendctrl(dd->pport + pidx, QIB_SENDCTRL_FLUSH);
+
+ /*
+ * If not all sendbufs are used, add the one to each of the lower
+ * numbered contexts. pbufsctxt and lastctxt_piobuf are
+ * calculated in chip-specific code because it may cause some
+ * chip-specific adjustments to be made.
+ */
+ uctxts = dd->cfgctxts - dd->first_user_ctxt;
+ dd->ctxts_extrabuf = dd->pbufsctxt ?
+ dd->lastctxt_piobuf - (dd->pbufsctxt * uctxts) : 0;
+
+ /*
+ * Set up the shadow copies of the piobufavail registers,
+ * which we compare against the chip registers for now, and
+ * the in memory DMA'ed copies of the registers.
+ * By now pioavail updates to memory should have occurred, so
+ * copy them into our working/shadow registers; this is in
+ * case something went wrong with abort, but mostly to get the
+ * initial values of the generation bit correct.
+ */
+ for (i = 0; i < dd->pioavregs; i++) {
+ __le64 tmp;
+
+ tmp = dd->pioavailregs_dma[i];
+ /*
+ * Don't need to worry about pioavailkernel here
+ * because we will call qib_chg_pioavailkernel() later
+ * in initialization, to busy out buffers as needed.
+ */
+ dd->pioavailshadow[i] = le64_to_cpu(tmp);
+ }
+ while (i < ARRAY_SIZE(dd->pioavailshadow))
+ dd->pioavailshadow[i++] = 0; /* for debugging sanity */
+
+ /* after pioavailshadow is setup */
+ qib_chg_pioavailkernel(dd, 0, dd->piobcnt2k + dd->piobcnt4k,
+ TXCHK_CHG_TYPE_KERN, NULL);
+ dd->f_initvl15_bufs(dd);
+}
+
+/**
+ * qib_init - do the actual initialization sequence on the chip
+ * @dd: the qlogic_ib device
+ * @reinit: reinitializing, so don't allocate new memory
+ *
+ * Do the actual initialization sequence on the chip. This is done
+ * both from the init routine called from the PCI infrastructure, and
+ * when we reset the chip, or detect that it was reset internally,
+ * or it's administratively re-enabled.
+ *
+ * Memory allocation here and in called routines is only done in
+ * the first case (reinit == 0). We have to be careful, because even
+ * without memory allocation, we need to re-write all the chip registers
+ * TIDs, etc. after the reset or enable has completed.
+ */
+int qib_init(struct qib_devdata *dd, int reinit)
+{
+ int ret = 0, pidx, lastfail = 0;
+ u32 portok = 0;
+ unsigned i;
+ struct qib_ctxtdata *rcd;
+ struct qib_pportdata *ppd;
+ unsigned long flags;
+
+ /* Set linkstate to unknown, so we can watch for a transition. */
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~(QIBL_LINKACTIVE | QIBL_LINKARMED |
+ QIBL_LINKDOWN | QIBL_LINKINIT |
+ QIBL_LINKV);
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ }
+
+ if (reinit)
+ ret = init_after_reset(dd);
+ else
+ ret = loadtime_init(dd);
+ if (ret)
+ goto done;
+
+ /* Bypass most chip-init, to get to device creation */
+ if (qib_mini_init)
+ return 0;
+
+ ret = dd->f_late_initreg(dd);
+ if (ret)
+ goto done;
+
+ /* dd->rcd can be NULL if early init failed */
+ for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
+ /*
+ * Set up the (kernel) rcvhdr queue and egr TIDs. If doing
+ * re-init, the simplest way to handle this is to free
+ * existing, and re-allocate.
+ * Need to re-create rest of ctxt 0 ctxtdata as well.
+ */
+ rcd = dd->rcd[i];
+ if (!rcd)
+ continue;
+
+ lastfail = qib_create_rcvhdrq(dd, rcd);
+ if (!lastfail)
+ lastfail = qib_setup_eagerbufs(rcd);
+ if (lastfail) {
+ qib_dev_err(dd, "failed to allocate kernel ctxt's "
+ "rcvhdrq and/or egr bufs\n");
+ continue;
+ }
+ }
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ int mtu;
+ if (lastfail)
+ ret = lastfail;
+ ppd = dd->pport + pidx;
+ mtu = ib_mtu_enum_to_int(qib_ibmtu);
+ if (mtu == -1) {
+ mtu = QIB_DEFAULT_MTU;
+ qib_ibmtu = 0; /* don't leave invalid value */
+ }
+ /* set max we can ever have for this driver load */
+ ppd->init_ibmaxlen = min(mtu > 2048 ?
+ dd->piosize4k : dd->piosize2k,
+ dd->rcvegrbufsize +
+ (dd->rcvhdrentsize << 2));
+ /*
+ * Have to initialize ibmaxlen, but this will normally
+ * change immediately in qib_set_mtu().
+ */
+ ppd->ibmaxlen = ppd->init_ibmaxlen;
+ qib_set_mtu(ppd, mtu);
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_LINK_DISABLED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+
+ lastfail = dd->f_bringup_serdes(ppd);
+ if (lastfail) {
+ qib_devinfo(dd->pcidev,
+ "Failed to bringup IB port %u\n", ppd->port);
+ lastfail = -ENETDOWN;
+ continue;
+ }
+
+ /* let link come up, and enable IBC */
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ portok++;
+ }
+
+ if (!portok) {
+ /* none of the ports initialized */
+ if (!ret && lastfail)
+ ret = lastfail;
+ else if (!ret)
+ ret = -ENETDOWN;
+ /* but continue on, so we can debug cause */
+ }
+
+ enable_chip(dd);
+
+ init_piobuf_state(dd);
+
+done:
+ if (!ret) {
+ /* chip is OK for user apps; mark it as initialized */
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ /*
+ * Set status even if port serdes is not initialized
+ * so that diags will work.
+ */
+ *ppd->statusp |= QIB_STATUS_CHIP_PRESENT |
+ QIB_STATUS_INITTED;
+ if (!ppd->link_speed_enabled)
+ continue;
+ if (dd->flags & QIB_HAS_SEND_DMA)
+ ret = qib_setup_sdma(ppd);
+ init_timer(&ppd->hol_timer);
+ ppd->hol_timer.function = qib_hol_event;
+ ppd->hol_timer.data = (unsigned long)ppd;
+ ppd->hol_state = QIB_HOL_UP;
+ }
+
+ /* now we can enable all interrupts from the chip */
+ dd->f_set_intr_state(dd, 1);
+
+ /*
+ * Setup to verify we get an interrupt, and fallback
+ * to an alternate if necessary and possible.
+ */
+ mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
+ /* start stats retrieval timer */
+ mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
+ }
+
+ /* if ret is non-zero, we probably should do some cleanup here... */
+ return ret;
+}
+
+/*
+ * These next two routines are placeholders in case we don't have per-arch
+ * code for controlling write combining. If explicit control of write
+ * combining is not available, performance will probably be awful.
+ */
+
+int __attribute__((weak)) qib_enable_wc(struct qib_devdata *dd)
+{
+ return -EOPNOTSUPP;
+}
+
+void __attribute__((weak)) qib_disable_wc(struct qib_devdata *dd)
+{
+}
+
+static inline struct qib_devdata *__qib_lookup(int unit)
+{
+ return idr_find(&qib_unit_table, unit);
+}
+
+struct qib_devdata *qib_lookup(int unit)
+{
+ struct qib_devdata *dd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qib_devs_lock, flags);
+ dd = __qib_lookup(unit);
+ spin_unlock_irqrestore(&qib_devs_lock, flags);
+
+ return dd;
+}
+
+/*
+ * Stop the timers during unit shutdown, or after an error late
+ * in initialization.
+ */
+static void qib_stop_timers(struct qib_devdata *dd)
+{
+ struct qib_pportdata *ppd;
+ int pidx;
+
+ if (dd->stats_timer.data) {
+ del_timer_sync(&dd->stats_timer);
+ dd->stats_timer.data = 0;
+ }
+ if (dd->intrchk_timer.data) {
+ del_timer_sync(&dd->intrchk_timer);
+ dd->intrchk_timer.data = 0;
+ }
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ if (ppd->hol_timer.data)
+ del_timer_sync(&ppd->hol_timer);
+ if (ppd->led_override_timer.data) {
+ del_timer_sync(&ppd->led_override_timer);
+ atomic_set(&ppd->led_override_timer_active, 0);
+ }
+ if (ppd->symerr_clear_timer.data)
+ del_timer_sync(&ppd->symerr_clear_timer);
+ }
+}
+
+/**
+ * qib_shutdown_device - shut down a device
+ * @dd: the qlogic_ib device
+ *
+ * This is called to make the device quiet when we are about to
+ * unload the driver, and also when the device is administratively
+ * disabled. It does not free any data structures.
+ * Everything it does has to be setup again by qib_init(dd, 1)
+ */
+static void qib_shutdown_device(struct qib_devdata *dd)
+{
+ struct qib_pportdata *ppd;
+ unsigned pidx;
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+
+ spin_lock_irq(&ppd->lflags_lock);
+ ppd->lflags &= ~(QIBL_LINKDOWN | QIBL_LINKINIT |
+ QIBL_LINKARMED | QIBL_LINKACTIVE |
+ QIBL_LINKV);
+ spin_unlock_irq(&ppd->lflags_lock);
+ *ppd->statusp &= ~(QIB_STATUS_IB_CONF | QIB_STATUS_IB_READY);
+ }
+ dd->flags &= ~QIB_INITTED;
+
+ /* mask interrupts, but not errors */
+ dd->f_set_intr_state(dd, 0);
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ dd->f_rcvctrl(ppd, QIB_RCVCTRL_TAILUPD_DIS |
+ QIB_RCVCTRL_CTXT_DIS |
+ QIB_RCVCTRL_INTRAVAIL_DIS |
+ QIB_RCVCTRL_PKEY_ENB, -1);
+ /*
+ * Gracefully stop all sends allowing any in progress to
+ * trickle out first.
+ */
+ dd->f_sendctrl(ppd, QIB_SENDCTRL_CLEAR);
+ }
+
+ /*
+ * Enough for anything that's going to trickle out to have actually
+ * done so.
+ */
+ udelay(20);
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ dd->f_setextled(ppd, 0); /* make sure LEDs are off */
+
+ if (dd->flags & QIB_HAS_SEND_DMA)
+ qib_teardown_sdma(ppd);
+
+ dd->f_sendctrl(ppd, QIB_SENDCTRL_AVAIL_DIS |
+ QIB_SENDCTRL_SEND_DIS);
+ /*
+ * Clear SerdesEnable.
+ * We can't count on interrupts since we are stopping.
+ */
+ dd->f_quiet_serdes(ppd);
+ }
+
+ qib_update_eeprom_log(dd);
+}
+
+/**
+ * qib_free_ctxtdata - free a context's allocated data
+ * @dd: the qlogic_ib device
+ * @rcd: the ctxtdata structure
+ *
+ * free up any allocated data for a context
+ * This should not touch anything that would affect a simultaneous
+ * re-allocation of context data, because it is called after qib_mutex
+ * is released (and can be called from reinit as well).
+ * It should never change any chip state, or global driver state.
+ */
+void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
+{
+ if (!rcd)
+ return;
+
+ if (rcd->rcvhdrq) {
+ dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
+ rcd->rcvhdrq, rcd->rcvhdrq_phys);
+ rcd->rcvhdrq = NULL;
+ if (rcd->rcvhdrtail_kvaddr) {
+ dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
+ rcd->rcvhdrtail_kvaddr,
+ rcd->rcvhdrqtailaddr_phys);
+ rcd->rcvhdrtail_kvaddr = NULL;
+ }
+ }
+ if (rcd->rcvegrbuf) {
+ unsigned e;
+
+ for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
+ void *base = rcd->rcvegrbuf[e];
+ size_t size = rcd->rcvegrbuf_size;
+
+ dma_free_coherent(&dd->pcidev->dev, size,
+ base, rcd->rcvegrbuf_phys[e]);
+ }
+ kfree(rcd->rcvegrbuf);
+ rcd->rcvegrbuf = NULL;
+ kfree(rcd->rcvegrbuf_phys);
+ rcd->rcvegrbuf_phys = NULL;
+ rcd->rcvegrbuf_chunks = 0;
+ }
+
+ kfree(rcd->tid_pg_list);
+ vfree(rcd->user_event_mask);
+ vfree(rcd->subctxt_uregbase);
+ vfree(rcd->subctxt_rcvegrbuf);
+ vfree(rcd->subctxt_rcvhdr_base);
+ kfree(rcd);
+}
+
+/*
+ * Perform a PIO buffer bandwidth write test, to verify proper system
+ * configuration. Even when all the setup calls work, occasionally
+ * BIOS or other issues can prevent write combining from working, or
+ * can cause other bandwidth problems to the chip.
+ *
+ * This test simply writes the same buffer over and over again, and
+ * measures close to the peak bandwidth to the chip (not testing
+ * data bandwidth to the wire). On chips that use an address-based
+ * trigger to send packets to the wire, this is easy. On chips that
+ * use a count to trigger, we want to make sure that the packet doesn't
+ * go out on the wire, or trigger flow control checks.
+ */
+static void qib_verify_pioperf(struct qib_devdata *dd)
+{
+ u32 pbnum, cnt, lcnt;
+ u32 __iomem *piobuf;
+ u32 *addr;
+ u64 msecs, emsecs;
+
+ piobuf = dd->f_getsendbuf(dd->pport, 0ULL, &pbnum);
+ if (!piobuf) {
+ qib_devinfo(dd->pcidev,
+ "No PIObufs for checking perf, skipping\n");
+ return;
+ }
+
+ /*
+ * Enough to give us a reasonable test, less than piobuf size, and
+ * likely multiple of store buffer length.
+ */
+ cnt = 1024;
+
+ addr = vmalloc(cnt);
+ if (!addr) {
+ qib_devinfo(dd->pcidev,
+ "Couldn't get memory for checking PIO perf,"
+ " skipping\n");
+ goto done;
+ }
+
+ preempt_disable(); /* we want reasonably accurate elapsed time */
+ msecs = 1 + jiffies_to_msecs(jiffies);
+ for (lcnt = 0; lcnt < 10000U; lcnt++) {
+ /* wait until we cross msec boundary */
+ if (jiffies_to_msecs(jiffies) >= msecs)
+ break;
+ udelay(1);
+ }
+
+ dd->f_set_armlaunch(dd, 0);
+
+ /*
+ * length 0, no dwords actually sent
+ */
+ writeq(0, piobuf);
+ qib_flush_wc();
+
+ /*
+ * This is only roughly accurate, since even with preempt we
+ * still take interrupts that could take a while. Running for
+ * >= 5 msec seems to get us "close enough" to accurate values.
+ */
+ msecs = jiffies_to_msecs(jiffies);
+ for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) {
+ qib_pio_copy(piobuf + 64, addr, cnt >> 2);
+ emsecs = jiffies_to_msecs(jiffies) - msecs;
+ }
+
+ /* 1 GiB/sec, slightly over IB SDR line rate */
+ if (lcnt < (emsecs * 1024U))
+ qib_dev_err(dd,
+ "Performance problem: bandwidth to PIO buffers is "
+ "only %u MiB/sec\n",
+ lcnt / (u32) emsecs);
+
+ preempt_enable();
+
+ vfree(addr);
+
+done:
+ /* disarm piobuf, so it's available again */
+ dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbnum));
+ qib_sendbuf_done(dd, pbnum);
+ dd->f_set_armlaunch(dd, 1);
+}
+
+
+void qib_free_devdata(struct qib_devdata *dd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qib_devs_lock, flags);
+ idr_remove(&qib_unit_table, dd->unit);
+ list_del(&dd->list);
+ spin_unlock_irqrestore(&qib_devs_lock, flags);
+
+ ib_dealloc_device(&dd->verbs_dev.ibdev);
+}
+
+/*
+ * Allocate our primary per-unit data structure. Must be done via verbs
+ * allocator, because the verbs cleanup process both does cleanup and
+ * free of the data structure.
+ * "extra" is for chip-specific data.
+ *
+ * Use the idr mechanism to get a unit number for this unit.
+ */
+struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
+{
+ unsigned long flags;
+ struct qib_devdata *dd;
+ int ret;
+
+ if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) {
+ dd = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra);
+ if (!dd) {
+ dd = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ spin_lock_irqsave(&qib_devs_lock, flags);
+ ret = idr_get_new(&qib_unit_table, dd, &dd->unit);
+ if (ret >= 0)
+ list_add(&dd->list, &qib_dev_list);
+ spin_unlock_irqrestore(&qib_devs_lock, flags);
+
+ if (ret < 0) {
+ qib_early_err(&pdev->dev,
+ "Could not allocate unit ID: error %d\n", -ret);
+ ib_dealloc_device(&dd->verbs_dev.ibdev);
+ dd = ERR_PTR(ret);
+ goto bail;
+ }
+
+ if (!qib_cpulist_count) {
+ u32 count = num_online_cpus();
+ qib_cpulist = kzalloc(BITS_TO_LONGS(count) *
+ sizeof(long), GFP_KERNEL);
+ if (qib_cpulist)
+ qib_cpulist_count = count;
+ else
+ qib_early_err(&pdev->dev, "Could not alloc cpulist "
+ "info, cpu affinity might be wrong\n");
+ }
+
+bail:
+ return dd;
+}
+
+/*
+ * Called from freeze mode handlers, and from PCI error
+ * reporting code. Should be paranoid about state of
+ * system and data structures.
+ */
+void qib_disable_after_error(struct qib_devdata *dd)
+{
+ if (dd->flags & QIB_INITTED) {
+ u32 pidx;
+
+ dd->flags &= ~QIB_INITTED;
+ if (dd->pport)
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ struct qib_pportdata *ppd;
+
+ ppd = dd->pport + pidx;
+ if (dd->flags & QIB_PRESENT) {
+ qib_set_linkstate(ppd,
+ QIB_IB_LINKDOWN_DISABLE);
+ dd->f_setextled(ppd, 0);
+ }
+ *ppd->statusp &= ~QIB_STATUS_IB_READY;
+ }
+ }
+
+ /*
+ * Mark as having had an error for driver, and also
+ * for /sys and status word mapped to user programs.
+ * This marks unit as not usable, until reset.
+ */
+ if (dd->devstatusp)
+ *dd->devstatusp |= QIB_STATUS_HWERROR;
+}
+
+static void __devexit qib_remove_one(struct pci_dev *);
+static int __devinit qib_init_one(struct pci_dev *,
+ const struct pci_device_id *);
+
+#define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: "
+#define PFX QIB_DRV_NAME ": "
+
+static const struct pci_device_id qib_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, qib_pci_tbl);
+
+struct pci_driver qib_driver = {
+ .name = QIB_DRV_NAME,
+ .probe = qib_init_one,
+ .remove = __devexit_p(qib_remove_one),
+ .id_table = qib_pci_tbl,
+ .err_handler = &qib_pci_err_handler,
+};
+
+/*
+ * Do all the generic driver unit- and chip-independent memory
+ * allocation and initialization.
+ */
+static int __init qlogic_ib_init(void)
+{
+ int ret;
+
+ ret = qib_dev_init();
+ if (ret)
+ goto bail;
+
+ /*
+ * We create our own workqueue mainly because we want to be
+ * able to flush it when devices are being removed. We can't
+ * use schedule_work()/flush_scheduled_work() because both
+ * unregister_netdev() and linkwatch_event take the rtnl lock,
+ * so flush_scheduled_work() can deadlock during device
+ * removal.
+ */
+ qib_wq = create_workqueue("qib");
+ if (!qib_wq) {
+ ret = -ENOMEM;
+ goto bail_dev;
+ }
+
+ qib_cq_wq = create_workqueue("qib_cq");
+ if (!qib_cq_wq) {
+ ret = -ENOMEM;
+ goto bail_wq;
+ }
+
+ /*
+ * These must be called before the driver is registered with
+ * the PCI subsystem.
+ */
+ idr_init(&qib_unit_table);
+ if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) {
+ printk(KERN_ERR QIB_DRV_NAME ": idr_pre_get() failed\n");
+ ret = -ENOMEM;
+ goto bail_cq_wq;
+ }
+
+ ret = pci_register_driver(&qib_driver);
+ if (ret < 0) {
+ printk(KERN_ERR QIB_DRV_NAME
+ ": Unable to register driver: error %d\n", -ret);
+ goto bail_unit;
+ }
+
+ /* not fatal if it doesn't work */
+ if (qib_init_qibfs())
+ printk(KERN_ERR QIB_DRV_NAME ": Unable to register ipathfs\n");
+ goto bail; /* all OK */
+
+bail_unit:
+ idr_destroy(&qib_unit_table);
+bail_cq_wq:
+ destroy_workqueue(qib_cq_wq);
+bail_wq:
+ destroy_workqueue(qib_wq);
+bail_dev:
+ qib_dev_cleanup();
+bail:
+ return ret;
+}
+
+module_init(qlogic_ib_init);
+
+/*
+ * Do the non-unit driver cleanup, memory free, etc. at unload.
+ */
+static void __exit qlogic_ib_cleanup(void)
+{
+ int ret;
+
+ ret = qib_exit_qibfs();
+ if (ret)
+ printk(KERN_ERR QIB_DRV_NAME ": "
+ "Unable to cleanup counter filesystem: "
+ "error %d\n", -ret);
+
+ pci_unregister_driver(&qib_driver);
+
+ destroy_workqueue(qib_wq);
+ destroy_workqueue(qib_cq_wq);
+
+ qib_cpulist_count = 0;
+ kfree(qib_cpulist);
+
+ idr_destroy(&qib_unit_table);
+ qib_dev_cleanup();
+}
+
+module_exit(qlogic_ib_cleanup);
+
+/* this can only be called after a successful initialization */
+static void cleanup_device_data(struct qib_devdata *dd)
+{
+ int ctxt;
+ int pidx;
+ struct qib_ctxtdata **tmp;
+ unsigned long flags;
+
+ /* users can't do anything more with chip */
+ for (pidx = 0; pidx < dd->num_pports; ++pidx)
+ if (dd->pport[pidx].statusp)
+ *dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT;
+
+ if (!qib_wc_pat)
+ qib_disable_wc(dd);
+
+ if (dd->pioavailregs_dma) {
+ dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
+ (void *) dd->pioavailregs_dma,
+ dd->pioavailregs_phys);
+ dd->pioavailregs_dma = NULL;
+ }
+
+ if (dd->pageshadow) {
+ struct page **tmpp = dd->pageshadow;
+ dma_addr_t *tmpd = dd->physshadow;
+ int i, cnt = 0;
+
+ for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) {
+ int ctxt_tidbase = ctxt * dd->rcvtidcnt;
+ int maxtid = ctxt_tidbase + dd->rcvtidcnt;
+
+ for (i = ctxt_tidbase; i < maxtid; i++) {
+ if (!tmpp[i])
+ continue;
+ pci_unmap_page(dd->pcidev, tmpd[i],
+ PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ qib_release_user_pages(&tmpp[i], 1);
+ tmpp[i] = NULL;
+ cnt++;
+ }
+ }
+
+ tmpp = dd->pageshadow;
+ dd->pageshadow = NULL;
+ vfree(tmpp);
+ }
+
+ /*
+ * Free any resources still in use (usually just kernel contexts)
+ * at unload; we do for ctxtcnt, because that's what we allocate.
+ * We acquire lock to be really paranoid that rcd isn't being
+ * accessed from some interrupt-related code (that should not happen,
+ * but best to be sure).
+ */
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ tmp = dd->rcd;
+ dd->rcd = NULL;
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+ for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) {
+ struct qib_ctxtdata *rcd = tmp[ctxt];
+
+ tmp[ctxt] = NULL; /* debugging paranoia */
+ qib_free_ctxtdata(dd, rcd);
+ }
+ kfree(tmp);
+ kfree(dd->boardname);
+}
+
+/*
+ * Clean up on unit shutdown, or error during unit load after
+ * successful initialization.
+ */
+static void qib_postinit_cleanup(struct qib_devdata *dd)
+{
+ /*
+ * Clean up chip-specific stuff.
+ * We check for NULL here, because it's outside
+ * the kregbase check, and we need to call it
+ * after the free_irq. Thus it's possible that
+ * the function pointers were never initialized.
+ */
+ if (dd->f_cleanup)
+ dd->f_cleanup(dd);
+
+ qib_pcie_ddcleanup(dd);
+
+ cleanup_device_data(dd);
+
+ qib_free_devdata(dd);
+}
+
+static int __devinit qib_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int ret, j, pidx, initfail;
+ struct qib_devdata *dd = NULL;
+
+ ret = qib_pcie_init(pdev, ent);
+ if (ret)
+ goto bail;
+
+ /*
+ * Do device-specific initialiation, function table setup, dd
+ * allocation, etc.
+ */
+ switch (ent->device) {
+ case PCI_DEVICE_ID_QLOGIC_IB_6120:
+#ifdef CONFIG_PCI_MSI
+ dd = qib_init_iba6120_funcs(pdev, ent);
+#else
+ qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot "
+ "work if CONFIG_PCI_MSI is not enabled\n",
+ ent->device);
+#endif
+ break;
+
+ case PCI_DEVICE_ID_QLOGIC_IB_7220:
+ dd = qib_init_iba7220_funcs(pdev, ent);
+ break;
+
+ case PCI_DEVICE_ID_QLOGIC_IB_7322:
+ dd = qib_init_iba7322_funcs(pdev, ent);
+ break;
+
+ default:
+ qib_early_err(&pdev->dev, "Failing on unknown QLogic "
+ "deviceid 0x%x\n", ent->device);
+ ret = -ENODEV;
+ }
+
+ if (IS_ERR(dd))
+ ret = PTR_ERR(dd);
+ if (ret)
+ goto bail; /* error already printed */
+
+ /* do the generic initialization */
+ initfail = qib_init(dd, 0);
+
+ ret = qib_register_ib_device(dd);
+
+ /*
+ * Now ready for use. this should be cleared whenever we
+ * detect a reset, or initiate one. If earlier failure,
+ * we still create devices, so diags, etc. can be used
+ * to determine cause of problem.
+ */
+ if (!qib_mini_init && !initfail && !ret)
+ dd->flags |= QIB_INITTED;
+
+ j = qib_device_create(dd);
+ if (j)
+ qib_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
+ j = qibfs_add(dd);
+ if (j)
+ qib_dev_err(dd, "Failed filesystem setup for counters: %d\n",
+ -j);
+
+ if (qib_mini_init || initfail || ret) {
+ qib_stop_timers(dd);
+ for (pidx = 0; pidx < dd->num_pports; ++pidx)
+ dd->f_quiet_serdes(dd->pport + pidx);
+ if (initfail)
+ ret = initfail;
+ goto bail;
+ }
+
+ if (!qib_wc_pat) {
+ ret = qib_enable_wc(dd);
+ if (ret) {
+ qib_dev_err(dd, "Write combining not enabled "
+ "(err %d): performance may be poor\n",
+ -ret);
+ ret = 0;
+ }
+ }
+
+ qib_verify_pioperf(dd);
+bail:
+ return ret;
+}
+
+static void __devexit qib_remove_one(struct pci_dev *pdev)
+{
+ struct qib_devdata *dd = pci_get_drvdata(pdev);
+ int ret;
+
+ /* unregister from IB core */
+ qib_unregister_ib_device(dd);
+
+ /*
+ * Disable the IB link, disable interrupts on the device,
+ * clear dma engines, etc.
+ */
+ if (!qib_mini_init)
+ qib_shutdown_device(dd);
+
+ qib_stop_timers(dd);
+
+ /* wait until all of our (qsfp) schedule_work() calls complete */
+ flush_scheduled_work();
+
+ ret = qibfs_remove(dd);
+ if (ret)
+ qib_dev_err(dd, "Failed counters filesystem cleanup: %d\n",
+ -ret);
+
+ qib_device_remove(dd);
+
+ qib_postinit_cleanup(dd);
+}
+
+/**
+ * qib_create_rcvhdrq - create a receive header queue
+ * @dd: the qlogic_ib device
+ * @rcd: the context data
+ *
+ * This must be contiguous memory (from an i/o perspective), and must be
+ * DMA'able (which means for some systems, it will go through an IOMMU,
+ * or be forced into a low address range).
+ */
+int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
+{
+ unsigned amt;
+
+ if (!rcd->rcvhdrq) {
+ dma_addr_t phys_hdrqtail;
+ gfp_t gfp_flags;
+
+ amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
+ sizeof(u32), PAGE_SIZE);
+ gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
+ GFP_USER : GFP_KERNEL;
+ rcd->rcvhdrq = dma_alloc_coherent(
+ &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys,
+ gfp_flags | __GFP_COMP);
+
+ if (!rcd->rcvhdrq) {
+ qib_dev_err(dd, "attempt to allocate %d bytes "
+ "for ctxt %u rcvhdrq failed\n",
+ amt, rcd->ctxt);
+ goto bail;
+ }
+
+ if (rcd->ctxt >= dd->first_user_ctxt) {
+ rcd->user_event_mask = vmalloc_user(PAGE_SIZE);
+ if (!rcd->user_event_mask)
+ goto bail_free_hdrq;
+ }
+
+ if (!(dd->flags & QIB_NODMA_RTAIL)) {
+ rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(
+ &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
+ gfp_flags);
+ if (!rcd->rcvhdrtail_kvaddr)
+ goto bail_free;
+ rcd->rcvhdrqtailaddr_phys = phys_hdrqtail;
+ }
+
+ rcd->rcvhdrq_size = amt;
+ }
+
+ /* clear for security and sanity on each use */
+ memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
+ if (rcd->rcvhdrtail_kvaddr)
+ memset(rcd->rcvhdrtail_kvaddr, 0, PAGE_SIZE);
+ return 0;
+
+bail_free:
+ qib_dev_err(dd, "attempt to allocate 1 page for ctxt %u "
+ "rcvhdrqtailaddr failed\n", rcd->ctxt);
+ vfree(rcd->user_event_mask);
+ rcd->user_event_mask = NULL;
+bail_free_hdrq:
+ dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
+ rcd->rcvhdrq_phys);
+ rcd->rcvhdrq = NULL;
+bail:
+ return -ENOMEM;
+}
+
+/**
+ * allocate eager buffers, both kernel and user contexts.
+ * @rcd: the context we are setting up.
+ *
+ * Allocate the eager TID buffers and program them into hip.
+ * They are no longer completely contiguous, we do multiple allocation
+ * calls. Otherwise we get the OOM code involved, by asking for too
+ * much per call, with disastrous results on some kernels.
+ */
+int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
+{
+ struct qib_devdata *dd = rcd->dd;
+ unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;
+ size_t size;
+ gfp_t gfp_flags;
+
+ /*
+ * GFP_USER, but without GFP_FS, so buffer cache can be
+ * coalesced (we hope); otherwise, even at order 4,
+ * heavy filesystem activity makes these fail, and we can
+ * use compound pages.
+ */
+ gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
+
+ egrcnt = rcd->rcvegrcnt;
+ egroff = rcd->rcvegr_tid_base;
+ egrsize = dd->rcvegrbufsize;
+
+ chunk = rcd->rcvegrbuf_chunks;
+ egrperchunk = rcd->rcvegrbufs_perchunk;
+ size = rcd->rcvegrbuf_size;
+ if (!rcd->rcvegrbuf) {
+ rcd->rcvegrbuf =
+ kzalloc(chunk * sizeof(rcd->rcvegrbuf[0]),
+ GFP_KERNEL);
+ if (!rcd->rcvegrbuf)
+ goto bail;
+ }
+ if (!rcd->rcvegrbuf_phys) {
+ rcd->rcvegrbuf_phys =
+ kmalloc(chunk * sizeof(rcd->rcvegrbuf_phys[0]),
+ GFP_KERNEL);
+ if (!rcd->rcvegrbuf_phys)
+ goto bail_rcvegrbuf;
+ }
+ for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
+ if (rcd->rcvegrbuf[e])
+ continue;
+ rcd->rcvegrbuf[e] =
+ dma_alloc_coherent(&dd->pcidev->dev, size,
+ &rcd->rcvegrbuf_phys[e],
+ gfp_flags);
+ if (!rcd->rcvegrbuf[e])
+ goto bail_rcvegrbuf_phys;
+ }
+
+ rcd->rcvegr_phys = rcd->rcvegrbuf_phys[0];
+
+ for (e = chunk = 0; chunk < rcd->rcvegrbuf_chunks; chunk++) {
+ dma_addr_t pa = rcd->rcvegrbuf_phys[chunk];
+ unsigned i;
+
+ for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
+ dd->f_put_tid(dd, e + egroff +
+ (u64 __iomem *)
+ ((char __iomem *)
+ dd->kregbase +
+ dd->rcvegrbase),
+ RCVHQ_RCV_TYPE_EAGER, pa);
+ pa += egrsize;
+ }
+ cond_resched(); /* don't hog the cpu */
+ }
+
+ return 0;
+
+bail_rcvegrbuf_phys:
+ for (e = 0; e < rcd->rcvegrbuf_chunks && rcd->rcvegrbuf[e]; e++)
+ dma_free_coherent(&dd->pcidev->dev, size,
+ rcd->rcvegrbuf[e], rcd->rcvegrbuf_phys[e]);
+ kfree(rcd->rcvegrbuf_phys);
+ rcd->rcvegrbuf_phys = NULL;
+bail_rcvegrbuf:
+ kfree(rcd->rcvegrbuf);
+ rcd->rcvegrbuf = NULL;
+bail:
+ return -ENOMEM;
+}
+
+int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
+{
+ u64 __iomem *qib_kregbase = NULL;
+ void __iomem *qib_piobase = NULL;
+ u64 __iomem *qib_userbase = NULL;
+ u64 qib_kreglen;
+ u64 qib_pio2koffset = dd->piobufbase & 0xffffffff;
+ u64 qib_pio4koffset = dd->piobufbase >> 32;
+ u64 qib_pio2klen = dd->piobcnt2k * dd->palign;
+ u64 qib_pio4klen = dd->piobcnt4k * dd->align4k;
+ u64 qib_physaddr = dd->physaddr;
+ u64 qib_piolen;
+ u64 qib_userlen = 0;
+
+ /*
+ * Free the old mapping because the kernel will try to reuse the
+ * old mapping and not create a new mapping with the
+ * write combining attribute.
+ */
+ iounmap(dd->kregbase);
+ dd->kregbase = NULL;
+
+ /*
+ * Assumes chip address space looks like:
+ * - kregs + sregs + cregs + uregs (in any order)
+ * - piobufs (2K and 4K bufs in either order)
+ * or:
+ * - kregs + sregs + cregs (in any order)
+ * - piobufs (2K and 4K bufs in either order)
+ * - uregs
+ */
+ if (dd->piobcnt4k == 0) {
+ qib_kreglen = qib_pio2koffset;
+ qib_piolen = qib_pio2klen;
+ } else if (qib_pio2koffset < qib_pio4koffset) {
+ qib_kreglen = qib_pio2koffset;
+ qib_piolen = qib_pio4koffset + qib_pio4klen - qib_kreglen;
+ } else {
+ qib_kreglen = qib_pio4koffset;
+ qib_piolen = qib_pio2koffset + qib_pio2klen - qib_kreglen;
+ }
+ qib_piolen += vl15buflen;
+ /* Map just the configured ports (not all hw ports) */
+ if (dd->uregbase > qib_kreglen)
+ qib_userlen = dd->ureg_align * dd->cfgctxts;
+
+ /* Sanity checks passed, now create the new mappings */
+ qib_kregbase = ioremap_nocache(qib_physaddr, qib_kreglen);
+ if (!qib_kregbase)
+ goto bail;
+
+ qib_piobase = ioremap_wc(qib_physaddr + qib_kreglen, qib_piolen);
+ if (!qib_piobase)
+ goto bail_kregbase;
+
+ if (qib_userlen) {
+ qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase,
+ qib_userlen);
+ if (!qib_userbase)
+ goto bail_piobase;
+ }
+
+ dd->kregbase = qib_kregbase;
+ dd->kregend = (u64 __iomem *)
+ ((char __iomem *) qib_kregbase + qib_kreglen);
+ dd->piobase = qib_piobase;
+ dd->pio2kbase = (void __iomem *)
+ (((char __iomem *) dd->piobase) +
+ qib_pio2koffset - qib_kreglen);
+ if (dd->piobcnt4k)
+ dd->pio4kbase = (void __iomem *)
+ (((char __iomem *) dd->piobase) +
+ qib_pio4koffset - qib_kreglen);
+ if (qib_userlen)
+ /* ureg will now be accessed relative to dd->userbase */
+ dd->userbase = qib_userbase;
+ return 0;
+
+bail_piobase:
+ iounmap(qib_piobase);
+bail_kregbase:
+ iounmap(qib_kregbase);
+bail:
+ return -ENOMEM;
+}
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
new file mode 100644
index 00000000000..54a40828a10
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "qib.h"
+#include "qib_common.h"
+
+/**
+ * qib_format_hwmsg - format a single hwerror message
+ * @msg message buffer
+ * @msgl length of message buffer
+ * @hwmsg message to add to message buffer
+ */
+static void qib_format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
+{
+ strlcat(msg, "[", msgl);
+ strlcat(msg, hwmsg, msgl);
+ strlcat(msg, "]", msgl);
+}
+
+/**
+ * qib_format_hwerrors - format hardware error messages for display
+ * @hwerrs hardware errors bit vector
+ * @hwerrmsgs hardware error descriptions
+ * @nhwerrmsgs number of hwerrmsgs
+ * @msg message buffer
+ * @msgl message buffer length
+ */
+void qib_format_hwerrors(u64 hwerrs, const struct qib_hwerror_msgs *hwerrmsgs,
+ size_t nhwerrmsgs, char *msg, size_t msgl)
+{
+ int i;
+
+ for (i = 0; i < nhwerrmsgs; i++)
+ if (hwerrs & hwerrmsgs[i].mask)
+ qib_format_hwmsg(msg, msgl, hwerrmsgs[i].msg);
+}
+
+static void signal_ib_event(struct qib_pportdata *ppd, enum ib_event_type ev)
+{
+ struct ib_event event;
+ struct qib_devdata *dd = ppd->dd;
+
+ event.device = &dd->verbs_dev.ibdev;
+ event.element.port_num = ppd->port;
+ event.event = ev;
+ ib_dispatch_event(&event);
+}
+
+void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
+{
+ struct qib_devdata *dd = ppd->dd;
+ unsigned long flags;
+ u32 lstate;
+ u8 ltstate;
+ enum ib_event_type ev = 0;
+
+ lstate = dd->f_iblink_state(ibcs); /* linkstate */
+ ltstate = dd->f_ibphys_portstate(ibcs);
+
+ /*
+ * If linkstate transitions into INIT from any of the various down
+ * states, or if it transitions from any of the up (INIT or better)
+ * states into any of the down states (except link recovery), then
+ * call the chip-specific code to take appropriate actions.
+ */
+ if (lstate >= IB_PORT_INIT && (ppd->lflags & QIBL_LINKDOWN) &&
+ ltstate == IB_PHYSPORTSTATE_LINKUP) {
+ /* transitioned to UP */
+ if (dd->f_ib_updown(ppd, 1, ibcs))
+ goto skip_ibchange; /* chip-code handled */
+ } else if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
+ QIBL_LINKACTIVE | QIBL_IB_FORCE_NOTIFY)) {
+ if (ltstate != IB_PHYSPORTSTATE_LINKUP &&
+ ltstate <= IB_PHYSPORTSTATE_CFG_TRAIN &&
+ dd->f_ib_updown(ppd, 0, ibcs))
+ goto skip_ibchange; /* chip-code handled */
+ qib_set_uevent_bits(ppd, _QIB_EVENT_LINKDOWN_BIT);
+ }
+
+ if (lstate != IB_PORT_DOWN) {
+ /* lstate is INIT, ARMED, or ACTIVE */
+ if (lstate != IB_PORT_ACTIVE) {
+ *ppd->statusp &= ~QIB_STATUS_IB_READY;
+ if (ppd->lflags & QIBL_LINKACTIVE)
+ ev = IB_EVENT_PORT_ERR;
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ if (lstate == IB_PORT_ARMED) {
+ ppd->lflags |= QIBL_LINKARMED | QIBL_LINKV;
+ ppd->lflags &= ~(QIBL_LINKINIT |
+ QIBL_LINKDOWN | QIBL_LINKACTIVE);
+ } else {
+ ppd->lflags |= QIBL_LINKINIT | QIBL_LINKV;
+ ppd->lflags &= ~(QIBL_LINKARMED |
+ QIBL_LINKDOWN | QIBL_LINKACTIVE);
+ }
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ /* start a 75msec timer to clear symbol errors */
+ mod_timer(&ppd->symerr_clear_timer,
+ msecs_to_jiffies(75));
+ } else if (ltstate == IB_PHYSPORTSTATE_LINKUP) {
+ /* active, but not active defered */
+ qib_hol_up(ppd); /* useful only for 6120 now */
+ *ppd->statusp |=
+ QIB_STATUS_IB_READY | QIB_STATUS_IB_CONF;
+ qib_clear_symerror_on_linkup((unsigned long)ppd);
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_LINKACTIVE | QIBL_LINKV;
+ ppd->lflags &= ~(QIBL_LINKINIT |
+ QIBL_LINKDOWN | QIBL_LINKARMED);
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ if (dd->flags & QIB_HAS_SEND_DMA)
+ qib_sdma_process_event(ppd,
+ qib_sdma_event_e30_go_running);
+ ev = IB_EVENT_PORT_ACTIVE;
+ dd->f_setextled(ppd, 1);
+ }
+ } else { /* down */
+ if (ppd->lflags & QIBL_LINKACTIVE)
+ ev = IB_EVENT_PORT_ERR;
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_LINKDOWN | QIBL_LINKV;
+ ppd->lflags &= ~(QIBL_LINKINIT |
+ QIBL_LINKACTIVE | QIBL_LINKARMED);
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ *ppd->statusp &= ~QIB_STATUS_IB_READY;
+ }
+
+skip_ibchange:
+ ppd->lastibcstat = ibcs;
+ if (ev)
+ signal_ib_event(ppd, ev);
+ return;
+}
+
+void qib_clear_symerror_on_linkup(unsigned long opaque)
+{
+ struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+
+ if (ppd->lflags & QIBL_LINKACTIVE)
+ return;
+
+ ppd->ibport_data.z_symbol_error_counter =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
+}
+
+/*
+ * Handle receive interrupts for user ctxts; this means a user
+ * process was waiting for a packet to arrive, and didn't want
+ * to poll.
+ */
+void qib_handle_urcv(struct qib_devdata *dd, u64 ctxtr)
+{
+ struct qib_ctxtdata *rcd;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ for (i = dd->first_user_ctxt; dd->rcd && i < dd->cfgctxts; i++) {
+ if (!(ctxtr & (1ULL << i)))
+ continue;
+ rcd = dd->rcd[i];
+ if (!rcd || !rcd->cnt)
+ continue;
+
+ if (test_and_clear_bit(QIB_CTXT_WAITING_RCV, &rcd->flag)) {
+ wake_up_interruptible(&rcd->wait);
+ dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_DIS,
+ rcd->ctxt);
+ } else if (test_and_clear_bit(QIB_CTXT_WAITING_URG,
+ &rcd->flag)) {
+ rcd->urgent++;
+ wake_up_interruptible(&rcd->wait);
+ }
+ }
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+}
+
+void qib_bad_intrstatus(struct qib_devdata *dd)
+{
+ static int allbits;
+
+ /* separate routine, for better optimization of qib_intr() */
+
+ /*
+ * We print the message and disable interrupts, in hope of
+ * having a better chance of debugging the problem.
+ */
+ qib_dev_err(dd, "Read of chip interrupt status failed"
+ " disabling interrupts\n");
+ if (allbits++) {
+ /* disable interrupt delivery, something is very wrong */
+ if (allbits == 2)
+ dd->f_set_intr_state(dd, 0);
+ if (allbits == 3) {
+ qib_dev_err(dd, "2nd bad interrupt status, "
+ "unregistering interrupts\n");
+ dd->flags |= QIB_BADINTR;
+ dd->flags &= ~QIB_INITTED;
+ dd->f_free_irq(dd);
+ }
+ }
+}
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
new file mode 100644
index 00000000000..4b80eb153d5
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright (c) 2006, 2007, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "qib.h"
+
+/**
+ * qib_alloc_lkey - allocate an lkey
+ * @rkt: lkey table in which to allocate the lkey
+ * @mr: memory region that this lkey protects
+ *
+ * Returns 1 if successful, otherwise returns 0.
+ */
+
+int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr)
+{
+ unsigned long flags;
+ u32 r;
+ u32 n;
+ int ret;
+
+ spin_lock_irqsave(&rkt->lock, flags);
+
+ /* Find the next available LKEY */
+ r = rkt->next;
+ n = r;
+ for (;;) {
+ if (rkt->table[r] == NULL)
+ break;
+ r = (r + 1) & (rkt->max - 1);
+ if (r == n) {
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ ret = 0;
+ goto bail;
+ }
+ }
+ rkt->next = (r + 1) & (rkt->max - 1);
+ /*
+ * Make sure lkey is never zero which is reserved to indicate an
+ * unrestricted LKEY.
+ */
+ rkt->gen++;
+ mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
+ ((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
+ << 8);
+ if (mr->lkey == 0) {
+ mr->lkey |= 1 << 8;
+ rkt->gen++;
+ }
+ rkt->table[r] = mr;
+ spin_unlock_irqrestore(&rkt->lock, flags);
+
+ ret = 1;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_free_lkey - free an lkey
+ * @rkt: table from which to free the lkey
+ * @lkey: lkey id to free
+ */
+int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr)
+{
+ unsigned long flags;
+ u32 lkey = mr->lkey;
+ u32 r;
+ int ret;
+
+ spin_lock_irqsave(&dev->lk_table.lock, flags);
+ if (lkey == 0) {
+ if (dev->dma_mr && dev->dma_mr == mr) {
+ ret = atomic_read(&dev->dma_mr->refcount);
+ if (!ret)
+ dev->dma_mr = NULL;
+ } else
+ ret = 0;
+ } else {
+ r = lkey >> (32 - ib_qib_lkey_table_size);
+ ret = atomic_read(&dev->lk_table.table[r]->refcount);
+ if (!ret)
+ dev->lk_table.table[r] = NULL;
+ }
+ spin_unlock_irqrestore(&dev->lk_table.lock, flags);
+
+ if (ret)
+ ret = -EBUSY;
+ return ret;
+}
+
+/**
+ * qib_lkey_ok - check IB SGE for validity and initialize
+ * @rkt: table containing lkey to check SGE against
+ * @isge: outgoing internal SGE
+ * @sge: SGE to check
+ * @acc: access flags
+ *
+ * Return 1 if valid and successful, otherwise returns 0.
+ *
+ * Check the IB SGE for validity and initialize our internal version
+ * of it.
+ */
+int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
+ struct qib_sge *isge, struct ib_sge *sge, int acc)
+{
+ struct qib_mregion *mr;
+ unsigned n, m;
+ size_t off;
+ int ret = 0;
+ unsigned long flags;
+
+ /*
+ * We use LKEY == zero for kernel virtual addresses
+ * (see qib_get_dma_mr and qib_dma.c).
+ */
+ spin_lock_irqsave(&rkt->lock, flags);
+ if (sge->lkey == 0) {
+ struct qib_ibdev *dev = to_idev(pd->ibpd.device);
+
+ if (pd->user)
+ goto bail;
+ if (!dev->dma_mr)
+ goto bail;
+ atomic_inc(&dev->dma_mr->refcount);
+ isge->mr = dev->dma_mr;
+ isge->vaddr = (void *) sge->addr;
+ isge->length = sge->length;
+ isge->sge_length = sge->length;
+ isge->m = 0;
+ isge->n = 0;
+ goto ok;
+ }
+ mr = rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))];
+ if (unlikely(mr == NULL || mr->lkey != sge->lkey ||
+ mr->pd != &pd->ibpd))
+ goto bail;
+
+ off = sge->addr - mr->user_base;
+ if (unlikely(sge->addr < mr->user_base ||
+ off + sge->length > mr->length ||
+ (mr->access_flags & acc) != acc))
+ goto bail;
+
+ off += mr->offset;
+ m = 0;
+ n = 0;
+ while (off >= mr->map[m]->segs[n].length) {
+ off -= mr->map[m]->segs[n].length;
+ n++;
+ if (n >= QIB_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ atomic_inc(&mr->refcount);
+ isge->mr = mr;
+ isge->vaddr = mr->map[m]->segs[n].vaddr + off;
+ isge->length = mr->map[m]->segs[n].length - off;
+ isge->sge_length = sge->length;
+ isge->m = m;
+ isge->n = n;
+ok:
+ ret = 1;
+bail:
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ return ret;
+}
+
+/**
+ * qib_rkey_ok - check the IB virtual address, length, and RKEY
+ * @dev: infiniband device
+ * @ss: SGE state
+ * @len: length of data
+ * @vaddr: virtual address to place data
+ * @rkey: rkey to check
+ * @acc: access flags
+ *
+ * Return 1 if successful, otherwise 0.
+ */
+int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
+ u32 len, u64 vaddr, u32 rkey, int acc)
+{
+ struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
+ struct qib_mregion *mr;
+ unsigned n, m;
+ size_t off;
+ int ret = 0;
+ unsigned long flags;
+
+ /*
+ * We use RKEY == zero for kernel virtual addresses
+ * (see qib_get_dma_mr and qib_dma.c).
+ */
+ spin_lock_irqsave(&rkt->lock, flags);
+ if (rkey == 0) {
+ struct qib_pd *pd = to_ipd(qp->ibqp.pd);
+ struct qib_ibdev *dev = to_idev(pd->ibpd.device);
+
+ if (pd->user)
+ goto bail;
+ if (!dev->dma_mr)
+ goto bail;
+ atomic_inc(&dev->dma_mr->refcount);
+ sge->mr = dev->dma_mr;
+ sge->vaddr = (void *) vaddr;
+ sge->length = len;
+ sge->sge_length = len;
+ sge->m = 0;
+ sge->n = 0;
+ goto ok;
+ }
+
+ mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))];
+ if (unlikely(mr == NULL || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
+ goto bail;
+
+ off = vaddr - mr->iova;
+ if (unlikely(vaddr < mr->iova || off + len > mr->length ||
+ (mr->access_flags & acc) == 0))
+ goto bail;
+
+ off += mr->offset;
+ m = 0;
+ n = 0;
+ while (off >= mr->map[m]->segs[n].length) {
+ off -= mr->map[m]->segs[n].length;
+ n++;
+ if (n >= QIB_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ atomic_inc(&mr->refcount);
+ sge->mr = mr;
+ sge->vaddr = mr->map[m]->segs[n].vaddr + off;
+ sge->length = mr->map[m]->segs[n].length - off;
+ sge->sge_length = len;
+ sge->m = m;
+ sge->n = n;
+ok:
+ ret = 1;
+bail:
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ return ret;
+}
+
+/*
+ * Initialize the memory region specified by the work reqeust.
+ */
+int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr)
+{
+ struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
+ struct qib_pd *pd = to_ipd(qp->ibqp.pd);
+ struct qib_mregion *mr;
+ u32 rkey = wr->wr.fast_reg.rkey;
+ unsigned i, n, m;
+ int ret = -EINVAL;
+ unsigned long flags;
+ u64 *page_list;
+ size_t ps;
+
+ spin_lock_irqsave(&rkt->lock, flags);
+ if (pd->user || rkey == 0)
+ goto bail;
+
+ mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))];
+ if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd))
+ goto bail;
+
+ if (wr->wr.fast_reg.page_list_len > mr->max_segs)
+ goto bail;
+
+ ps = 1UL << wr->wr.fast_reg.page_shift;
+ if (wr->wr.fast_reg.length > ps * wr->wr.fast_reg.page_list_len)
+ goto bail;
+
+ mr->user_base = wr->wr.fast_reg.iova_start;
+ mr->iova = wr->wr.fast_reg.iova_start;
+ mr->lkey = rkey;
+ mr->length = wr->wr.fast_reg.length;
+ mr->access_flags = wr->wr.fast_reg.access_flags;
+ page_list = wr->wr.fast_reg.page_list->page_list;
+ m = 0;
+ n = 0;
+ for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
+ mr->map[m]->segs[n].vaddr = (void *) page_list[i];
+ mr->map[m]->segs[n].length = ps;
+ if (++n == QIB_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+
+ ret = 0;
+bail:
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ return ret;
+}
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
new file mode 100644
index 00000000000..94b0d1f3a8f
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -0,0 +1,2173 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_smi.h>
+
+#include "qib.h"
+#include "qib_mad.h"
+
+static int reply(struct ib_smp *smp)
+{
+ /*
+ * The verbs framework will handle the directed/LID route
+ * packet changes.
+ */
+ smp->method = IB_MGMT_METHOD_GET_RESP;
+ if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ smp->status |= IB_SMP_DIRECTION;
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+}
+
+static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
+{
+ struct ib_mad_send_buf *send_buf;
+ struct ib_mad_agent *agent;
+ struct ib_smp *smp;
+ int ret;
+ unsigned long flags;
+ unsigned long timeout;
+
+ agent = ibp->send_agent;
+ if (!agent)
+ return;
+
+ /* o14-3.2.1 */
+ if (!(ppd_from_ibp(ibp)->lflags & QIBL_LINKACTIVE))
+ return;
+
+ /* o14-2 */
+ if (ibp->trap_timeout && time_before(jiffies, ibp->trap_timeout))
+ return;
+
+ send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
+ IB_MGMT_MAD_DATA, GFP_ATOMIC);
+ if (IS_ERR(send_buf))
+ return;
+
+ smp = send_buf->mad;
+ smp->base_version = IB_MGMT_BASE_VERSION;
+ smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
+ smp->class_version = 1;
+ smp->method = IB_MGMT_METHOD_TRAP;
+ ibp->tid++;
+ smp->tid = cpu_to_be64(ibp->tid);
+ smp->attr_id = IB_SMP_ATTR_NOTICE;
+ /* o14-1: smp->mkey = 0; */
+ memcpy(smp->data, data, len);
+
+ spin_lock_irqsave(&ibp->lock, flags);
+ if (!ibp->sm_ah) {
+ if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
+ struct ib_ah *ah;
+ struct ib_ah_attr attr;
+
+ memset(&attr, 0, sizeof attr);
+ attr.dlid = ibp->sm_lid;
+ attr.port_num = ppd_from_ibp(ibp)->port;
+ ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr);
+ if (IS_ERR(ah))
+ ret = -EINVAL;
+ else {
+ send_buf->ah = ah;
+ ibp->sm_ah = to_iah(ah);
+ ret = 0;
+ }
+ } else
+ ret = -EINVAL;
+ } else {
+ send_buf->ah = &ibp->sm_ah->ibah;
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&ibp->lock, flags);
+
+ if (!ret)
+ ret = ib_post_send_mad(send_buf, NULL);
+ if (!ret) {
+ /* 4.096 usec. */
+ timeout = (4096 * (1UL << ibp->subnet_timeout)) / 1000;
+ ibp->trap_timeout = jiffies + usecs_to_jiffies(timeout);
+ } else {
+ ib_free_send_mad(send_buf);
+ ibp->trap_timeout = 0;
+ }
+}
+
+/*
+ * Send a bad [PQ]_Key trap (ch. 14.3.8).
+ */
+void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
+ u32 qp1, u32 qp2, __be16 lid1, __be16 lid2)
+{
+ struct ib_mad_notice_attr data;
+
+ if (trap_num == IB_NOTICE_TRAP_BAD_PKEY)
+ ibp->pkey_violations++;
+ else
+ ibp->qkey_violations++;
+ ibp->n_pkt_drops++;
+
+ /* Send violation trap */
+ data.generic_type = IB_NOTICE_TYPE_SECURITY;
+ data.prod_type_msb = 0;
+ data.prod_type_lsb = IB_NOTICE_PROD_CA;
+ data.trap_num = trap_num;
+ data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
+ data.toggle_count = 0;
+ memset(&data.details, 0, sizeof data.details);
+ data.details.ntc_257_258.lid1 = lid1;
+ data.details.ntc_257_258.lid2 = lid2;
+ data.details.ntc_257_258.key = cpu_to_be32(key);
+ data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);
+ data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);
+
+ qib_send_trap(ibp, &data, sizeof data);
+}
+
+/*
+ * Send a bad M_Key trap (ch. 14.3.9).
+ */
+static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
+{
+ struct ib_mad_notice_attr data;
+
+ /* Send violation trap */
+ data.generic_type = IB_NOTICE_TYPE_SECURITY;
+ data.prod_type_msb = 0;
+ data.prod_type_lsb = IB_NOTICE_PROD_CA;
+ data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;
+ data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
+ data.toggle_count = 0;
+ memset(&data.details, 0, sizeof data.details);
+ data.details.ntc_256.lid = data.issuer_lid;
+ data.details.ntc_256.method = smp->method;
+ data.details.ntc_256.attr_id = smp->attr_id;
+ data.details.ntc_256.attr_mod = smp->attr_mod;
+ data.details.ntc_256.mkey = smp->mkey;
+ if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
+ u8 hop_cnt;
+
+ data.details.ntc_256.dr_slid = smp->dr_slid;
+ data.details.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
+ hop_cnt = smp->hop_cnt;
+ if (hop_cnt > ARRAY_SIZE(data.details.ntc_256.dr_rtn_path)) {
+ data.details.ntc_256.dr_trunc_hop |=
+ IB_NOTICE_TRAP_DR_TRUNC;
+ hop_cnt = ARRAY_SIZE(data.details.ntc_256.dr_rtn_path);
+ }
+ data.details.ntc_256.dr_trunc_hop |= hop_cnt;
+ memcpy(data.details.ntc_256.dr_rtn_path, smp->return_path,
+ hop_cnt);
+ }
+
+ qib_send_trap(ibp, &data, sizeof data);
+}
+
+/*
+ * Send a Port Capability Mask Changed trap (ch. 14.3.11).
+ */
+void qib_cap_mask_chg(struct qib_ibport *ibp)
+{
+ struct ib_mad_notice_attr data;
+
+ data.generic_type = IB_NOTICE_TYPE_INFO;
+ data.prod_type_msb = 0;
+ data.prod_type_lsb = IB_NOTICE_PROD_CA;
+ data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
+ data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
+ data.toggle_count = 0;
+ memset(&data.details, 0, sizeof data.details);
+ data.details.ntc_144.lid = data.issuer_lid;
+ data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);
+
+ qib_send_trap(ibp, &data, sizeof data);
+}
+
+/*
+ * Send a System Image GUID Changed trap (ch. 14.3.12).
+ */
+void qib_sys_guid_chg(struct qib_ibport *ibp)
+{
+ struct ib_mad_notice_attr data;
+
+ data.generic_type = IB_NOTICE_TYPE_INFO;
+ data.prod_type_msb = 0;
+ data.prod_type_lsb = IB_NOTICE_PROD_CA;
+ data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;
+ data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
+ data.toggle_count = 0;
+ memset(&data.details, 0, sizeof data.details);
+ data.details.ntc_145.lid = data.issuer_lid;
+ data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid;
+
+ qib_send_trap(ibp, &data, sizeof data);
+}
+
+/*
+ * Send a Node Description Changed trap (ch. 14.3.13).
+ */
+void qib_node_desc_chg(struct qib_ibport *ibp)
+{
+ struct ib_mad_notice_attr data;
+
+ data.generic_type = IB_NOTICE_TYPE_INFO;
+ data.prod_type_msb = 0;
+ data.prod_type_lsb = IB_NOTICE_PROD_CA;
+ data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
+ data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
+ data.toggle_count = 0;
+ memset(&data.details, 0, sizeof data.details);
+ data.details.ntc_144.lid = data.issuer_lid;
+ data.details.ntc_144.local_changes = 1;
+ data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;
+
+ qib_send_trap(ibp, &data, sizeof data);
+}
+
+static int subn_get_nodedescription(struct ib_smp *smp,
+ struct ib_device *ibdev)
+{
+ if (smp->attr_mod)
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
+
+ return reply(smp);
+}
+
+static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ u32 vendor, majrev, minrev;
+ unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
+
+ /* GUID 0 is illegal */
+ if (smp->attr_mod || pidx >= dd->num_pports ||
+ dd->pport[pidx].guid == 0)
+ smp->status |= IB_SMP_INVALID_FIELD;
+ else
+ nip->port_guid = dd->pport[pidx].guid;
+
+ nip->base_version = 1;
+ nip->class_version = 1;
+ nip->node_type = 1; /* channel adapter */
+ nip->num_ports = ibdev->phys_port_cnt;
+ /* This is already in network order */
+ nip->sys_guid = ib_qib_sys_image_guid;
+ nip->node_guid = dd->pport->guid; /* Use first-port GUID as node */
+ nip->partition_cap = cpu_to_be16(qib_get_npkeys(dd));
+ nip->device_id = cpu_to_be16(dd->deviceid);
+ majrev = dd->majrev;
+ minrev = dd->minrev;
+ nip->revision = cpu_to_be32((majrev << 16) | minrev);
+ nip->local_port_num = port;
+ vendor = dd->vendorid;
+ nip->vendor_id[0] = QIB_SRC_OUI_1;
+ nip->vendor_id[1] = QIB_SRC_OUI_2;
+ nip->vendor_id[2] = QIB_SRC_OUI_3;
+
+ return reply(smp);
+}
+
+static int subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
+ __be64 *p = (__be64 *) smp->data;
+ unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
+
+ /* 32 blocks of 8 64-bit GUIDs per block */
+
+ memset(smp->data, 0, sizeof(smp->data));
+
+ if (startgx == 0 && pidx < dd->num_pports) {
+ struct qib_pportdata *ppd = dd->pport + pidx;
+ struct qib_ibport *ibp = &ppd->ibport_data;
+ __be64 g = ppd->guid;
+ unsigned i;
+
+ /* GUID 0 is illegal */
+ if (g == 0)
+ smp->status |= IB_SMP_INVALID_FIELD;
+ else {
+ /* The first is a copy of the read-only HW GUID. */
+ p[0] = g;
+ for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
+ p[i] = ibp->guids[i - 1];
+ }
+ } else
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ return reply(smp);
+}
+
+static void set_link_width_enabled(struct qib_pportdata *ppd, u32 w)
+{
+ (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LWID_ENB, w);
+}
+
+static void set_link_speed_enabled(struct qib_pportdata *ppd, u32 s)
+{
+ (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_SPD_ENB, s);
+}
+
+static int get_overrunthreshold(struct qib_pportdata *ppd)
+{
+ return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH);
+}
+
+/**
+ * set_overrunthreshold - set the overrun threshold
+ * @ppd: the physical port data
+ * @n: the new threshold
+ *
+ * Note that this will only take effect when the link state changes.
+ */
+static int set_overrunthreshold(struct qib_pportdata *ppd, unsigned n)
+{
+ (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH,
+ (u32)n);
+ return 0;
+}
+
+static int get_phyerrthreshold(struct qib_pportdata *ppd)
+{
+ return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH);
+}
+
+/**
+ * set_phyerrthreshold - set the physical error threshold
+ * @ppd: the physical port data
+ * @n: the new threshold
+ *
+ * Note that this will only take effect when the link state changes.
+ */
+static int set_phyerrthreshold(struct qib_pportdata *ppd, unsigned n)
+{
+ (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH,
+ (u32)n);
+ return 0;
+}
+
+/**
+ * get_linkdowndefaultstate - get the default linkdown state
+ * @ppd: the physical port data
+ *
+ * Returns zero if the default is POLL, 1 if the default is SLEEP.
+ */
+static int get_linkdowndefaultstate(struct qib_pportdata *ppd)
+{
+ return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT) ==
+ IB_LINKINITCMD_SLEEP;
+}
+
+static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)
+{
+ int ret = 0;
+
+ /* Is the mkey in the process of expiring? */
+ if (ibp->mkey_lease_timeout &&
+ time_after_eq(jiffies, ibp->mkey_lease_timeout)) {
+ /* Clear timeout and mkey protection field. */
+ ibp->mkey_lease_timeout = 0;
+ ibp->mkeyprot = 0;
+ }
+
+ /* M_Key checking depends on Portinfo:M_Key_protect_bits */
+ if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && ibp->mkey != 0 &&
+ ibp->mkey != smp->mkey &&
+ (smp->method == IB_MGMT_METHOD_SET ||
+ smp->method == IB_MGMT_METHOD_TRAP_REPRESS ||
+ (smp->method == IB_MGMT_METHOD_GET && ibp->mkeyprot >= 2))) {
+ if (ibp->mkey_violations != 0xFFFF)
+ ++ibp->mkey_violations;
+ if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period)
+ ibp->mkey_lease_timeout = jiffies +
+ ibp->mkey_lease_period * HZ;
+ /* Generate a trap notice. */
+ qib_bad_mkey(ibp, smp);
+ ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+ } else if (ibp->mkey_lease_timeout)
+ ibp->mkey_lease_timeout = 0;
+
+ return ret;
+}
+
+static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ struct qib_devdata *dd;
+ struct qib_pportdata *ppd;
+ struct qib_ibport *ibp;
+ struct ib_port_info *pip = (struct ib_port_info *)smp->data;
+ u16 lid;
+ u8 mtu;
+ int ret;
+ u32 state;
+ u32 port_num = be32_to_cpu(smp->attr_mod);
+
+ if (port_num == 0)
+ port_num = port;
+ else {
+ if (port_num > ibdev->phys_port_cnt) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ ret = reply(smp);
+ goto bail;
+ }
+ if (port_num != port) {
+ ibp = to_iport(ibdev, port_num);
+ ret = check_mkey(ibp, smp, 0);
+ if (ret)
+ goto bail;
+ }
+ }
+
+ dd = dd_from_ibdev(ibdev);
+ /* IB numbers ports from 1, hdw from 0 */
+ ppd = dd->pport + (port_num - 1);
+ ibp = &ppd->ibport_data;
+
+ /* Clear all fields. Only set the non-zero fields. */
+ memset(smp->data, 0, sizeof(smp->data));
+
+ /* Only return the mkey if the protection field allows it. */
+ if (smp->method == IB_MGMT_METHOD_SET || ibp->mkey == smp->mkey ||
+ ibp->mkeyprot == 0)
+ pip->mkey = ibp->mkey;
+ pip->gid_prefix = ibp->gid_prefix;
+ lid = ppd->lid;
+ pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
+ pip->sm_lid = cpu_to_be16(ibp->sm_lid);
+ pip->cap_mask = cpu_to_be32(ibp->port_cap_flags);
+ /* pip->diag_code; */
+ pip->mkey_lease_period = cpu_to_be16(ibp->mkey_lease_period);
+ pip->local_port_num = port;
+ pip->link_width_enabled = ppd->link_width_enabled;
+ pip->link_width_supported = ppd->link_width_supported;
+ pip->link_width_active = ppd->link_width_active;
+ state = dd->f_iblink_state(ppd->lastibcstat);
+ pip->linkspeed_portstate = ppd->link_speed_supported << 4 | state;
+
+ pip->portphysstate_linkdown =
+ (dd->f_ibphys_portstate(ppd->lastibcstat) << 4) |
+ (get_linkdowndefaultstate(ppd) ? 1 : 2);
+ pip->mkeyprot_resv_lmc = (ibp->mkeyprot << 6) | ppd->lmc;
+ pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) |
+ ppd->link_speed_enabled;
+ switch (ppd->ibmtu) {
+ default: /* something is wrong; fall through */
+ case 4096:
+ mtu = IB_MTU_4096;
+ break;
+ case 2048:
+ mtu = IB_MTU_2048;
+ break;
+ case 1024:
+ mtu = IB_MTU_1024;
+ break;
+ case 512:
+ mtu = IB_MTU_512;
+ break;
+ case 256:
+ mtu = IB_MTU_256;
+ break;
+ }
+ pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->sm_sl;
+ pip->vlcap_inittype = ppd->vls_supported << 4; /* InitType = 0 */
+ pip->vl_high_limit = ibp->vl_high_limit;
+ pip->vl_arb_high_cap =
+ dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP);
+ pip->vl_arb_low_cap =
+ dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_LOW_CAP);
+ /* InitTypeReply = 0 */
+ pip->inittypereply_mtucap = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
+ /* HCAs ignore VLStallCount and HOQLife */
+ /* pip->vlstallcnt_hoqlife; */
+ pip->operationalvl_pei_peo_fpi_fpo =
+ dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4;
+ pip->mkey_violations = cpu_to_be16(ibp->mkey_violations);
+ /* P_KeyViolations are counted by hardware. */
+ pip->pkey_violations = cpu_to_be16(ibp->pkey_violations);
+ pip->qkey_violations = cpu_to_be16(ibp->qkey_violations);
+ /* Only the hardware GUID is supported for now */
+ pip->guid_cap = QIB_GUIDS_PER_PORT;
+ pip->clientrereg_resv_subnetto = ibp->subnet_timeout;
+ /* 32.768 usec. response time (guessing) */
+ pip->resv_resptimevalue = 3;
+ pip->localphyerrors_overrunerrors =
+ (get_phyerrthreshold(ppd) << 4) |
+ get_overrunthreshold(ppd);
+ /* pip->max_credit_hint; */
+ if (ibp->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
+ u32 v;
+
+ v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY);
+ pip->link_roundtrip_latency[0] = v >> 16;
+ pip->link_roundtrip_latency[1] = v >> 8;
+ pip->link_roundtrip_latency[2] = v;
+ }
+
+ ret = reply(smp);
+
+bail:
+ return ret;
+}
+
+/**
+ * get_pkeys - return the PKEY table
+ * @dd: the qlogic_ib device
+ * @port: the IB port number
+ * @pkeys: the pkey table is placed here
+ */
+static int get_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
+{
+ struct qib_pportdata *ppd = dd->pport + port - 1;
+ /*
+ * always a kernel context, no locking needed.
+ * If we get here with ppd setup, no need to check
+ * that pd is valid.
+ */
+ struct qib_ctxtdata *rcd = dd->rcd[ppd->hw_pidx];
+
+ memcpy(pkeys, rcd->pkeys, sizeof(rcd->pkeys));
+
+ return 0;
+}
+
+static int subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
+ u16 *p = (u16 *) smp->data;
+ __be16 *q = (__be16 *) smp->data;
+
+ /* 64 blocks of 32 16-bit P_Key entries */
+
+ memset(smp->data, 0, sizeof(smp->data));
+ if (startpx == 0) {
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ unsigned i, n = qib_get_npkeys(dd);
+
+ get_pkeys(dd, port, p);
+
+ for (i = 0; i < n; i++)
+ q[i] = cpu_to_be16(p[i]);
+ } else
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ return reply(smp);
+}
+
+static int subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
+ __be64 *p = (__be64 *) smp->data;
+ unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
+
+ /* 32 blocks of 8 64-bit GUIDs per block */
+
+ if (startgx == 0 && pidx < dd->num_pports) {
+ struct qib_pportdata *ppd = dd->pport + pidx;
+ struct qib_ibport *ibp = &ppd->ibport_data;
+ unsigned i;
+
+ /* The first entry is read-only. */
+ for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
+ ibp->guids[i - 1] = p[i];
+ } else
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ /* The only GUID we support is the first read-only entry. */
+ return subn_get_guidinfo(smp, ibdev, port);
+}
+
+/**
+ * subn_set_portinfo - set port information
+ * @smp: the incoming SM packet
+ * @ibdev: the infiniband device
+ * @port: the port on the device
+ *
+ * Set Portinfo (see ch. 14.2.5.6).
+ */
+static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ struct ib_port_info *pip = (struct ib_port_info *)smp->data;
+ struct ib_event event;
+ struct qib_devdata *dd;
+ struct qib_pportdata *ppd;
+ struct qib_ibport *ibp;
+ char clientrereg = 0;
+ unsigned long flags;
+ u16 lid, smlid;
+ u8 lwe;
+ u8 lse;
+ u8 state;
+ u8 vls;
+ u8 msl;
+ u16 lstate;
+ int ret, ore, mtu;
+ u32 port_num = be32_to_cpu(smp->attr_mod);
+
+ if (port_num == 0)
+ port_num = port;
+ else {
+ if (port_num > ibdev->phys_port_cnt)
+ goto err;
+ /* Port attributes can only be set on the receiving port */
+ if (port_num != port)
+ goto get_only;
+ }
+
+ dd = dd_from_ibdev(ibdev);
+ /* IB numbers ports from 1, hdw from 0 */
+ ppd = dd->pport + (port_num - 1);
+ ibp = &ppd->ibport_data;
+ event.device = ibdev;
+ event.element.port_num = port;
+
+ ibp->mkey = pip->mkey;
+ ibp->gid_prefix = pip->gid_prefix;
+ ibp->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
+
+ lid = be16_to_cpu(pip->lid);
+ /* Must be a valid unicast LID address. */
+ if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE)
+ goto err;
+ if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
+ if (ppd->lid != lid)
+ qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);
+ if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7))
+ qib_set_uevent_bits(ppd, _QIB_EVENT_LMC_CHANGE_BIT);
+ qib_set_lid(ppd, lid, pip->mkeyprot_resv_lmc & 7);
+ event.event = IB_EVENT_LID_CHANGE;
+ ib_dispatch_event(&event);
+ }
+
+ smlid = be16_to_cpu(pip->sm_lid);
+ msl = pip->neighbormtu_mastersmsl & 0xF;
+ /* Must be a valid unicast LID address. */
+ if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE)
+ goto err;
+ if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
+ spin_lock_irqsave(&ibp->lock, flags);
+ if (ibp->sm_ah) {
+ if (smlid != ibp->sm_lid)
+ ibp->sm_ah->attr.dlid = smlid;
+ if (msl != ibp->sm_sl)
+ ibp->sm_ah->attr.sl = msl;
+ }
+ spin_unlock_irqrestore(&ibp->lock, flags);
+ if (smlid != ibp->sm_lid)
+ ibp->sm_lid = smlid;
+ if (msl != ibp->sm_sl)
+ ibp->sm_sl = msl;
+ event.event = IB_EVENT_SM_CHANGE;
+ ib_dispatch_event(&event);
+ }
+
+ /* Allow 1x or 4x to be set (see 14.2.6.6). */
+ lwe = pip->link_width_enabled;
+ if (lwe) {
+ if (lwe == 0xFF)
+ lwe = ppd->link_width_supported;
+ else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
+ goto err;
+ set_link_width_enabled(ppd, lwe);
+ }
+
+ lse = pip->linkspeedactive_enabled & 0xF;
+ if (lse) {
+ /*
+ * The IB 1.2 spec. only allows link speed values
+ * 1, 3, 5, 7, 15. 1.2.1 extended to allow specific
+ * speeds.
+ */
+ if (lse == 15)
+ lse = ppd->link_speed_supported;
+ else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
+ goto err;
+ set_link_speed_enabled(ppd, lse);
+ }
+
+ /* Set link down default state. */
+ switch (pip->portphysstate_linkdown & 0xF) {
+ case 0: /* NOP */
+ break;
+ case 1: /* SLEEP */
+ (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
+ IB_LINKINITCMD_SLEEP);
+ break;
+ case 2: /* POLL */
+ (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
+ IB_LINKINITCMD_POLL);
+ break;
+ default:
+ goto err;
+ }
+
+ ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
+ ibp->vl_high_limit = pip->vl_high_limit;
+ (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT,
+ ibp->vl_high_limit);
+
+ mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
+ if (mtu == -1)
+ goto err;
+ qib_set_mtu(ppd, mtu);
+
+ /* Set operational VLs */
+ vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;
+ if (vls) {
+ if (vls > ppd->vls_supported)
+ goto err;
+ (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
+ }
+
+ if (pip->mkey_violations == 0)
+ ibp->mkey_violations = 0;
+
+ if (pip->pkey_violations == 0)
+ ibp->pkey_violations = 0;
+
+ if (pip->qkey_violations == 0)
+ ibp->qkey_violations = 0;
+
+ ore = pip->localphyerrors_overrunerrors;
+ if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
+ goto err;
+
+ if (set_overrunthreshold(ppd, (ore & 0xF)))
+ goto err;
+
+ ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
+
+ if (pip->clientrereg_resv_subnetto & 0x80) {
+ clientrereg = 1;
+ event.event = IB_EVENT_CLIENT_REREGISTER;
+ ib_dispatch_event(&event);
+ }
+
+ /*
+ * Do the port state change now that the other link parameters
+ * have been set.
+ * Changing the port physical state only makes sense if the link
+ * is down or is being set to down.
+ */
+ state = pip->linkspeed_portstate & 0xF;
+ lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
+ if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
+ goto err;
+
+ /*
+ * Only state changes of DOWN, ARM, and ACTIVE are valid
+ * and must be in the correct state to take effect (see 7.2.6).
+ */
+ switch (state) {
+ case IB_PORT_NOP:
+ if (lstate == 0)
+ break;
+ /* FALLTHROUGH */
+ case IB_PORT_DOWN:
+ if (lstate == 0)
+ lstate = QIB_IB_LINKDOWN_ONLY;
+ else if (lstate == 1)
+ lstate = QIB_IB_LINKDOWN_SLEEP;
+ else if (lstate == 2)
+ lstate = QIB_IB_LINKDOWN;
+ else if (lstate == 3)
+ lstate = QIB_IB_LINKDOWN_DISABLE;
+ else
+ goto err;
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_LINKV;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ qib_set_linkstate(ppd, lstate);
+ /*
+ * Don't send a reply if the response would be sent
+ * through the disabled port.
+ */
+ if (lstate == QIB_IB_LINKDOWN_DISABLE && smp->hop_cnt) {
+ ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+ goto done;
+ }
+ qib_wait_linkstate(ppd, QIBL_LINKV, 10);
+ break;
+ case IB_PORT_ARMED:
+ qib_set_linkstate(ppd, QIB_IB_LINKARM);
+ break;
+ case IB_PORT_ACTIVE:
+ qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);
+ break;
+ default:
+ /* XXX We have already partially updated our state! */
+ goto err;
+ }
+
+ ret = subn_get_portinfo(smp, ibdev, port);
+
+ if (clientrereg)
+ pip->clientrereg_resv_subnetto |= 0x80;
+
+ goto done;
+
+err:
+ smp->status |= IB_SMP_INVALID_FIELD;
+get_only:
+ ret = subn_get_portinfo(smp, ibdev, port);
+done:
+ return ret;
+}
+
+/**
+ * rm_pkey - decrecment the reference count for the given PKEY
+ * @dd: the qlogic_ib device
+ * @key: the PKEY index
+ *
+ * Return true if this was the last reference and the hardware table entry
+ * needs to be changed.
+ */
+static int rm_pkey(struct qib_pportdata *ppd, u16 key)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+ if (ppd->pkeys[i] != key)
+ continue;
+ if (atomic_dec_and_test(&ppd->pkeyrefs[i])) {
+ ppd->pkeys[i] = 0;
+ ret = 1;
+ goto bail;
+ }
+ break;
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * add_pkey - add the given PKEY to the hardware table
+ * @dd: the qlogic_ib device
+ * @key: the PKEY
+ *
+ * Return an error code if unable to add the entry, zero if no change,
+ * or 1 if the hardware PKEY register needs to be updated.
+ */
+static int add_pkey(struct qib_pportdata *ppd, u16 key)
+{
+ int i;
+ u16 lkey = key & 0x7FFF;
+ int any = 0;
+ int ret;
+
+ if (lkey == 0x7FFF) {
+ ret = 0;
+ goto bail;
+ }
+
+ /* Look for an empty slot or a matching PKEY. */
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+ if (!ppd->pkeys[i]) {
+ any++;
+ continue;
+ }
+ /* If it matches exactly, try to increment the ref count */
+ if (ppd->pkeys[i] == key) {
+ if (atomic_inc_return(&ppd->pkeyrefs[i]) > 1) {
+ ret = 0;
+ goto bail;
+ }
+ /* Lost the race. Look for an empty slot below. */
+ atomic_dec(&ppd->pkeyrefs[i]);
+ any++;
+ }
+ /*
+ * It makes no sense to have both the limited and unlimited
+ * PKEY set at the same time since the unlimited one will
+ * disable the limited one.
+ */
+ if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
+ ret = -EEXIST;
+ goto bail;
+ }
+ }
+ if (!any) {
+ ret = -EBUSY;
+ goto bail;
+ }
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+ if (!ppd->pkeys[i] &&
+ atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
+ /* for qibstats, etc. */
+ ppd->pkeys[i] = key;
+ ret = 1;
+ goto bail;
+ }
+ }
+ ret = -EBUSY;
+
+bail:
+ return ret;
+}
+
+/**
+ * set_pkeys - set the PKEY table for ctxt 0
+ * @dd: the qlogic_ib device
+ * @port: the IB port number
+ * @pkeys: the PKEY table
+ */
+static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
+{
+ struct qib_pportdata *ppd;
+ struct qib_ctxtdata *rcd;
+ int i;
+ int changed = 0;
+
+ /*
+ * IB port one/two always maps to context zero/one,
+ * always a kernel context, no locking needed
+ * If we get here with ppd setup, no need to check
+ * that rcd is valid.
+ */
+ ppd = dd->pport + (port - 1);
+ rcd = dd->rcd[ppd->hw_pidx];
+
+ for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
+ u16 key = pkeys[i];
+ u16 okey = rcd->pkeys[i];
+
+ if (key == okey)
+ continue;
+ /*
+ * The value of this PKEY table entry is changing.
+ * Remove the old entry in the hardware's array of PKEYs.
+ */
+ if (okey & 0x7FFF)
+ changed |= rm_pkey(ppd, okey);
+ if (key & 0x7FFF) {
+ int ret = add_pkey(ppd, key);
+
+ if (ret < 0)
+ key = 0;
+ else
+ changed |= ret;
+ }
+ rcd->pkeys[i] = key;
+ }
+ if (changed) {
+ struct ib_event event;
+
+ (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
+
+ event.event = IB_EVENT_PKEY_CHANGE;
+ event.device = &dd->verbs_dev.ibdev;
+ event.element.port_num = 1;
+ ib_dispatch_event(&event);
+ }
+ return 0;
+}
+
+static int subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
+ __be16 *p = (__be16 *) smp->data;
+ u16 *q = (u16 *) smp->data;
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ unsigned i, n = qib_get_npkeys(dd);
+
+ for (i = 0; i < n; i++)
+ q[i] = be16_to_cpu(p[i]);
+
+ if (startpx != 0 || set_pkeys(dd, port, q) != 0)
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ return subn_get_pkeytable(smp, ibdev, port);
+}
+
+static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ u8 *p = (u8 *) smp->data;
+ unsigned i;
+
+ memset(smp->data, 0, sizeof(smp->data));
+
+ if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP))
+ smp->status |= IB_SMP_UNSUP_METHOD;
+ else
+ for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2)
+ *p++ = (ibp->sl_to_vl[i] << 4) | ibp->sl_to_vl[i + 1];
+
+ return reply(smp);
+}
+
+static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ u8 *p = (u8 *) smp->data;
+ unsigned i;
+
+ if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) {
+ smp->status |= IB_SMP_UNSUP_METHOD;
+ return reply(smp);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2, p++) {
+ ibp->sl_to_vl[i] = *p >> 4;
+ ibp->sl_to_vl[i + 1] = *p & 0xF;
+ }
+ qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev, port)),
+ _QIB_EVENT_SL2VL_CHANGE_BIT);
+
+ return subn_get_sl_to_vl(smp, ibdev, port);
+}
+
+static int subn_get_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
+ struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
+
+ memset(smp->data, 0, sizeof(smp->data));
+
+ if (ppd->vls_supported == IB_VL_VL0)
+ smp->status |= IB_SMP_UNSUP_METHOD;
+ else if (which == IB_VLARB_LOWPRI_0_31)
+ (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
+ smp->data);
+ else if (which == IB_VLARB_HIGHPRI_0_31)
+ (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
+ smp->data);
+ else
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ return reply(smp);
+}
+
+static int subn_set_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
+ struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
+
+ if (ppd->vls_supported == IB_VL_VL0)
+ smp->status |= IB_SMP_UNSUP_METHOD;
+ else if (which == IB_VLARB_LOWPRI_0_31)
+ (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
+ smp->data);
+ else if (which == IB_VLARB_HIGHPRI_0_31)
+ (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
+ smp->data);
+ else
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ return subn_get_vl_arb(smp, ibdev, port);
+}
+
+static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ /*
+ * For now, we only send the trap once so no need to process this.
+ * o13-6, o13-7,
+ * o14-3.a4 The SMA shall not send any message in response to a valid
+ * SubnTrapRepress() message.
+ */
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+}
+
+static int pma_get_classportinfo(struct ib_perf *pmp,
+ struct ib_device *ibdev)
+{
+ struct ib_pma_classportinfo *p =
+ (struct ib_pma_classportinfo *)pmp->data;
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+
+ memset(pmp->data, 0, sizeof(pmp->data));
+
+ if (pmp->attr_mod != 0)
+ pmp->status |= IB_SMP_INVALID_FIELD;
+
+ /* Note that AllPortSelect is not valid */
+ p->base_version = 1;
+ p->class_version = 1;
+ p->cap_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
+ /*
+ * Set the most significant bit of CM2 to indicate support for
+ * congestion statistics
+ */
+ p->reserved[0] = dd->psxmitwait_supported << 7;
+ /*
+ * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
+ */
+ p->resp_time_value = 18;
+
+ return reply((struct ib_smp *) pmp);
+}
+
+static int pma_get_portsamplescontrol(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portsamplescontrol *p =
+ (struct ib_pma_portsamplescontrol *)pmp->data;
+ struct qib_ibdev *dev = to_idev(ibdev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ unsigned long flags;
+ u8 port_select = p->port_select;
+
+ memset(pmp->data, 0, sizeof(pmp->data));
+
+ p->port_select = port_select;
+ if (pmp->attr_mod != 0 || port_select != port) {
+ pmp->status |= IB_SMP_INVALID_FIELD;
+ goto bail;
+ }
+ spin_lock_irqsave(&ibp->lock, flags);
+ p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS);
+ p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
+ p->counter_width = 4; /* 32 bit counters */
+ p->counter_mask0_9 = COUNTER_MASK0_9;
+ p->sample_start = cpu_to_be32(ibp->pma_sample_start);
+ p->sample_interval = cpu_to_be32(ibp->pma_sample_interval);
+ p->tag = cpu_to_be16(ibp->pma_tag);
+ p->counter_select[0] = ibp->pma_counter_select[0];
+ p->counter_select[1] = ibp->pma_counter_select[1];
+ p->counter_select[2] = ibp->pma_counter_select[2];
+ p->counter_select[3] = ibp->pma_counter_select[3];
+ p->counter_select[4] = ibp->pma_counter_select[4];
+ spin_unlock_irqrestore(&ibp->lock, flags);
+
+bail:
+ return reply((struct ib_smp *) pmp);
+}
+
+static int pma_set_portsamplescontrol(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portsamplescontrol *p =
+ (struct ib_pma_portsamplescontrol *)pmp->data;
+ struct qib_ibdev *dev = to_idev(ibdev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ unsigned long flags;
+ u8 status, xmit_flags;
+ int ret;
+
+ if (pmp->attr_mod != 0 || p->port_select != port) {
+ pmp->status |= IB_SMP_INVALID_FIELD;
+ ret = reply((struct ib_smp *) pmp);
+ goto bail;
+ }
+
+ spin_lock_irqsave(&ibp->lock, flags);
+
+ /* Port Sampling code owns the PS* HW counters */
+ xmit_flags = ppd->cong_stats.flags;
+ ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_SAMPLE;
+ status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
+ if (status == IB_PMA_SAMPLE_STATUS_DONE ||
+ (status == IB_PMA_SAMPLE_STATUS_RUNNING &&
+ xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) {
+ ibp->pma_sample_start = be32_to_cpu(p->sample_start);
+ ibp->pma_sample_interval = be32_to_cpu(p->sample_interval);
+ ibp->pma_tag = be16_to_cpu(p->tag);
+ ibp->pma_counter_select[0] = p->counter_select[0];
+ ibp->pma_counter_select[1] = p->counter_select[1];
+ ibp->pma_counter_select[2] = p->counter_select[2];
+ ibp->pma_counter_select[3] = p->counter_select[3];
+ ibp->pma_counter_select[4] = p->counter_select[4];
+ dd->f_set_cntr_sample(ppd, ibp->pma_sample_interval,
+ ibp->pma_sample_start);
+ }
+ spin_unlock_irqrestore(&ibp->lock, flags);
+
+ ret = pma_get_portsamplescontrol(pmp, ibdev, port);
+
+bail:
+ return ret;
+}
+
+static u64 get_counter(struct qib_ibport *ibp, struct qib_pportdata *ppd,
+ __be16 sel)
+{
+ u64 ret;
+
+ switch (sel) {
+ case IB_PMA_PORT_XMIT_DATA:
+ ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITDATA);
+ break;
+ case IB_PMA_PORT_RCV_DATA:
+ ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVDATA);
+ break;
+ case IB_PMA_PORT_XMIT_PKTS:
+ ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITPKTS);
+ break;
+ case IB_PMA_PORT_RCV_PKTS:
+ ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVPKTS);
+ break;
+ case IB_PMA_PORT_XMIT_WAIT:
+ ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITWAIT);
+ break;
+ default:
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/* This function assumes that the xmit_wait lock is already held */
+static u64 xmit_wait_get_value_delta(struct qib_pportdata *ppd)
+{
+ u32 delta;
+
+ delta = get_counter(&ppd->ibport_data, ppd,
+ IB_PMA_PORT_XMIT_WAIT);
+ return ppd->cong_stats.counter + delta;
+}
+
+static void cache_hw_sample_counters(struct qib_pportdata *ppd)
+{
+ struct qib_ibport *ibp = &ppd->ibport_data;
+
+ ppd->cong_stats.counter_cache.psxmitdata =
+ get_counter(ibp, ppd, IB_PMA_PORT_XMIT_DATA);
+ ppd->cong_stats.counter_cache.psrcvdata =
+ get_counter(ibp, ppd, IB_PMA_PORT_RCV_DATA);
+ ppd->cong_stats.counter_cache.psxmitpkts =
+ get_counter(ibp, ppd, IB_PMA_PORT_XMIT_PKTS);
+ ppd->cong_stats.counter_cache.psrcvpkts =
+ get_counter(ibp, ppd, IB_PMA_PORT_RCV_PKTS);
+ ppd->cong_stats.counter_cache.psxmitwait =
+ get_counter(ibp, ppd, IB_PMA_PORT_XMIT_WAIT);
+}
+
+static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd,
+ __be16 sel)
+{
+ u64 ret;
+
+ switch (sel) {
+ case IB_PMA_PORT_XMIT_DATA:
+ ret = ppd->cong_stats.counter_cache.psxmitdata;
+ break;
+ case IB_PMA_PORT_RCV_DATA:
+ ret = ppd->cong_stats.counter_cache.psrcvdata;
+ break;
+ case IB_PMA_PORT_XMIT_PKTS:
+ ret = ppd->cong_stats.counter_cache.psxmitpkts;
+ break;
+ case IB_PMA_PORT_RCV_PKTS:
+ ret = ppd->cong_stats.counter_cache.psrcvpkts;
+ break;
+ case IB_PMA_PORT_XMIT_WAIT:
+ ret = ppd->cong_stats.counter_cache.psxmitwait;
+ break;
+ default:
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int pma_get_portsamplesresult(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portsamplesresult *p =
+ (struct ib_pma_portsamplesresult *)pmp->data;
+ struct qib_ibdev *dev = to_idev(ibdev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ unsigned long flags;
+ u8 status;
+ int i;
+
+ memset(pmp->data, 0, sizeof(pmp->data));
+ spin_lock_irqsave(&ibp->lock, flags);
+ p->tag = cpu_to_be16(ibp->pma_tag);
+ if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
+ p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
+ else {
+ status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
+ p->sample_status = cpu_to_be16(status);
+ if (status == IB_PMA_SAMPLE_STATUS_DONE) {
+ cache_hw_sample_counters(ppd);
+ ppd->cong_stats.counter =
+ xmit_wait_get_value_delta(ppd);
+ dd->f_set_cntr_sample(ppd,
+ QIB_CONG_TIMER_PSINTERVAL, 0);
+ ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++)
+ p->counter[i] = cpu_to_be32(
+ get_cache_hw_sample_counters(
+ ppd, ibp->pma_counter_select[i]));
+ spin_unlock_irqrestore(&ibp->lock, flags);
+
+ return reply((struct ib_smp *) pmp);
+}
+
+static int pma_get_portsamplesresult_ext(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portsamplesresult_ext *p =
+ (struct ib_pma_portsamplesresult_ext *)pmp->data;
+ struct qib_ibdev *dev = to_idev(ibdev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ unsigned long flags;
+ u8 status;
+ int i;
+
+ /* Port Sampling code owns the PS* HW counters */
+ memset(pmp->data, 0, sizeof(pmp->data));
+ spin_lock_irqsave(&ibp->lock, flags);
+ p->tag = cpu_to_be16(ibp->pma_tag);
+ if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
+ p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
+ else {
+ status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
+ p->sample_status = cpu_to_be16(status);
+ /* 64 bits */
+ p->extended_width = cpu_to_be32(0x80000000);
+ if (status == IB_PMA_SAMPLE_STATUS_DONE) {
+ cache_hw_sample_counters(ppd);
+ ppd->cong_stats.counter =
+ xmit_wait_get_value_delta(ppd);
+ dd->f_set_cntr_sample(ppd,
+ QIB_CONG_TIMER_PSINTERVAL, 0);
+ ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++)
+ p->counter[i] = cpu_to_be64(
+ get_cache_hw_sample_counters(
+ ppd, ibp->pma_counter_select[i]));
+ spin_unlock_irqrestore(&ibp->lock, flags);
+
+ return reply((struct ib_smp *) pmp);
+}
+
+static int pma_get_portcounters(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
+ pmp->data;
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_verbs_counters cntrs;
+ u8 port_select = p->port_select;
+
+ qib_get_counters(ppd, &cntrs);
+
+ /* Adjust counters for any resets done. */
+ cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
+ cntrs.link_error_recovery_counter -=
+ ibp->z_link_error_recovery_counter;
+ cntrs.link_downed_counter -= ibp->z_link_downed_counter;
+ cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
+ cntrs.port_rcv_remphys_errors -= ibp->z_port_rcv_remphys_errors;
+ cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
+ cntrs.port_xmit_data -= ibp->z_port_xmit_data;
+ cntrs.port_rcv_data -= ibp->z_port_rcv_data;
+ cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
+ cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
+ cntrs.local_link_integrity_errors -=
+ ibp->z_local_link_integrity_errors;
+ cntrs.excessive_buffer_overrun_errors -=
+ ibp->z_excessive_buffer_overrun_errors;
+ cntrs.vl15_dropped -= ibp->z_vl15_dropped;
+ cntrs.vl15_dropped += ibp->n_vl15_dropped;
+
+ memset(pmp->data, 0, sizeof(pmp->data));
+
+ p->port_select = port_select;
+ if (pmp->attr_mod != 0 || port_select != port)
+ pmp->status |= IB_SMP_INVALID_FIELD;
+
+ if (cntrs.symbol_error_counter > 0xFFFFUL)
+ p->symbol_error_counter = cpu_to_be16(0xFFFF);
+ else
+ p->symbol_error_counter =
+ cpu_to_be16((u16)cntrs.symbol_error_counter);
+ if (cntrs.link_error_recovery_counter > 0xFFUL)
+ p->link_error_recovery_counter = 0xFF;
+ else
+ p->link_error_recovery_counter =
+ (u8)cntrs.link_error_recovery_counter;
+ if (cntrs.link_downed_counter > 0xFFUL)
+ p->link_downed_counter = 0xFF;
+ else
+ p->link_downed_counter = (u8)cntrs.link_downed_counter;
+ if (cntrs.port_rcv_errors > 0xFFFFUL)
+ p->port_rcv_errors = cpu_to_be16(0xFFFF);
+ else
+ p->port_rcv_errors =
+ cpu_to_be16((u16) cntrs.port_rcv_errors);
+ if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
+ p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
+ else
+ p->port_rcv_remphys_errors =
+ cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
+ if (cntrs.port_xmit_discards > 0xFFFFUL)
+ p->port_xmit_discards = cpu_to_be16(0xFFFF);
+ else
+ p->port_xmit_discards =
+ cpu_to_be16((u16)cntrs.port_xmit_discards);
+ if (cntrs.local_link_integrity_errors > 0xFUL)
+ cntrs.local_link_integrity_errors = 0xFUL;
+ if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
+ cntrs.excessive_buffer_overrun_errors = 0xFUL;
+ p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
+ cntrs.excessive_buffer_overrun_errors;
+ if (cntrs.vl15_dropped > 0xFFFFUL)
+ p->vl15_dropped = cpu_to_be16(0xFFFF);
+ else
+ p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
+ if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
+ p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
+ else
+ p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
+ if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
+ p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
+ else
+ p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
+ if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
+ p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
+ else
+ p->port_xmit_packets =
+ cpu_to_be32((u32)cntrs.port_xmit_packets);
+ if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
+ p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
+ else
+ p->port_rcv_packets =
+ cpu_to_be32((u32) cntrs.port_rcv_packets);
+
+ return reply((struct ib_smp *) pmp);
+}
+
+static int pma_get_portcounters_cong(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ /* Congestion PMA packets start at offset 24 not 64 */
+ struct ib_pma_portcounters_cong *p =
+ (struct ib_pma_portcounters_cong *)pmp->reserved;
+ struct qib_verbs_counters cntrs;
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_devdata *dd = dd_from_ppd(ppd);
+ u32 port_select = be32_to_cpu(pmp->attr_mod) & 0xFF;
+ u64 xmit_wait_counter;
+ unsigned long flags;
+
+ /*
+ * This check is performed only in the GET method because the
+ * SET method ends up calling this anyway.
+ */
+ if (!dd->psxmitwait_supported)
+ pmp->status |= IB_SMP_UNSUP_METH_ATTR;
+ if (port_select != port)
+ pmp->status |= IB_SMP_INVALID_FIELD;
+
+ qib_get_counters(ppd, &cntrs);
+ spin_lock_irqsave(&ppd->ibport_data.lock, flags);
+ xmit_wait_counter = xmit_wait_get_value_delta(ppd);
+ spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
+
+ /* Adjust counters for any resets done. */
+ cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
+ cntrs.link_error_recovery_counter -=
+ ibp->z_link_error_recovery_counter;
+ cntrs.link_downed_counter -= ibp->z_link_downed_counter;
+ cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
+ cntrs.port_rcv_remphys_errors -=
+ ibp->z_port_rcv_remphys_errors;
+ cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
+ cntrs.local_link_integrity_errors -=
+ ibp->z_local_link_integrity_errors;
+ cntrs.excessive_buffer_overrun_errors -=
+ ibp->z_excessive_buffer_overrun_errors;
+ cntrs.vl15_dropped -= ibp->z_vl15_dropped;
+ cntrs.vl15_dropped += ibp->n_vl15_dropped;
+ cntrs.port_xmit_data -= ibp->z_port_xmit_data;
+ cntrs.port_rcv_data -= ibp->z_port_rcv_data;
+ cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
+ cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
+
+ memset(pmp->reserved, 0, sizeof(pmp->reserved) +
+ sizeof(pmp->data));
+
+ /*
+ * Set top 3 bits to indicate interval in picoseconds in
+ * remaining bits.
+ */
+ p->port_check_rate =
+ cpu_to_be16((QIB_XMIT_RATE_PICO << 13) |
+ (dd->psxmitwait_check_rate &
+ ~(QIB_XMIT_RATE_PICO << 13)));
+ p->port_adr_events = cpu_to_be64(0);
+ p->port_xmit_wait = cpu_to_be64(xmit_wait_counter);
+ p->port_xmit_data = cpu_to_be64(cntrs.port_xmit_data);
+ p->port_rcv_data = cpu_to_be64(cntrs.port_rcv_data);
+ p->port_xmit_packets =
+ cpu_to_be64(cntrs.port_xmit_packets);
+ p->port_rcv_packets =
+ cpu_to_be64(cntrs.port_rcv_packets);
+ if (cntrs.symbol_error_counter > 0xFFFFUL)
+ p->symbol_error_counter = cpu_to_be16(0xFFFF);
+ else
+ p->symbol_error_counter =
+ cpu_to_be16(
+ (u16)cntrs.symbol_error_counter);
+ if (cntrs.link_error_recovery_counter > 0xFFUL)
+ p->link_error_recovery_counter = 0xFF;
+ else
+ p->link_error_recovery_counter =
+ (u8)cntrs.link_error_recovery_counter;
+ if (cntrs.link_downed_counter > 0xFFUL)
+ p->link_downed_counter = 0xFF;
+ else
+ p->link_downed_counter =
+ (u8)cntrs.link_downed_counter;
+ if (cntrs.port_rcv_errors > 0xFFFFUL)
+ p->port_rcv_errors = cpu_to_be16(0xFFFF);
+ else
+ p->port_rcv_errors =
+ cpu_to_be16((u16) cntrs.port_rcv_errors);
+ if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
+ p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
+ else
+ p->port_rcv_remphys_errors =
+ cpu_to_be16(
+ (u16)cntrs.port_rcv_remphys_errors);
+ if (cntrs.port_xmit_discards > 0xFFFFUL)
+ p->port_xmit_discards = cpu_to_be16(0xFFFF);
+ else
+ p->port_xmit_discards =
+ cpu_to_be16((u16)cntrs.port_xmit_discards);
+ if (cntrs.local_link_integrity_errors > 0xFUL)
+ cntrs.local_link_integrity_errors = 0xFUL;
+ if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
+ cntrs.excessive_buffer_overrun_errors = 0xFUL;
+ p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
+ cntrs.excessive_buffer_overrun_errors;
+ if (cntrs.vl15_dropped > 0xFFFFUL)
+ p->vl15_dropped = cpu_to_be16(0xFFFF);
+ else
+ p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
+
+ return reply((struct ib_smp *)pmp);
+}
+
+static int pma_get_portcounters_ext(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portcounters_ext *p =
+ (struct ib_pma_portcounters_ext *)pmp->data;
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ u64 swords, rwords, spkts, rpkts, xwait;
+ u8 port_select = p->port_select;
+
+ memset(pmp->data, 0, sizeof(pmp->data));
+
+ p->port_select = port_select;
+ if (pmp->attr_mod != 0 || port_select != port) {
+ pmp->status |= IB_SMP_INVALID_FIELD;
+ goto bail;
+ }
+
+ qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
+
+ /* Adjust counters for any resets done. */
+ swords -= ibp->z_port_xmit_data;
+ rwords -= ibp->z_port_rcv_data;
+ spkts -= ibp->z_port_xmit_packets;
+ rpkts -= ibp->z_port_rcv_packets;
+
+ p->port_xmit_data = cpu_to_be64(swords);
+ p->port_rcv_data = cpu_to_be64(rwords);
+ p->port_xmit_packets = cpu_to_be64(spkts);
+ p->port_rcv_packets = cpu_to_be64(rpkts);
+ p->port_unicast_xmit_packets = cpu_to_be64(ibp->n_unicast_xmit);
+ p->port_unicast_rcv_packets = cpu_to_be64(ibp->n_unicast_rcv);
+ p->port_multicast_xmit_packets = cpu_to_be64(ibp->n_multicast_xmit);
+ p->port_multicast_rcv_packets = cpu_to_be64(ibp->n_multicast_rcv);
+
+bail:
+ return reply((struct ib_smp *) pmp);
+}
+
+static int pma_set_portcounters(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
+ pmp->data;
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_verbs_counters cntrs;
+
+ /*
+ * Since the HW doesn't support clearing counters, we save the
+ * current count and subtract it from future responses.
+ */
+ qib_get_counters(ppd, &cntrs);
+
+ if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
+ ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
+
+ if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
+ ibp->z_link_error_recovery_counter =
+ cntrs.link_error_recovery_counter;
+
+ if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
+ ibp->z_link_downed_counter = cntrs.link_downed_counter;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
+ ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
+ ibp->z_port_rcv_remphys_errors =
+ cntrs.port_rcv_remphys_errors;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
+ ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
+
+ if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
+ ibp->z_local_link_integrity_errors =
+ cntrs.local_link_integrity_errors;
+
+ if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
+ ibp->z_excessive_buffer_overrun_errors =
+ cntrs.excessive_buffer_overrun_errors;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
+ ibp->n_vl15_dropped = 0;
+ ibp->z_vl15_dropped = cntrs.vl15_dropped;
+ }
+
+ if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
+ ibp->z_port_xmit_data = cntrs.port_xmit_data;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
+ ibp->z_port_rcv_data = cntrs.port_rcv_data;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
+ ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
+ ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
+
+ return pma_get_portcounters(pmp, ibdev, port);
+}
+
+static int pma_set_portcounters_cong(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_devdata *dd = dd_from_ppd(ppd);
+ struct qib_verbs_counters cntrs;
+ u32 counter_select = (be32_to_cpu(pmp->attr_mod) >> 24) & 0xFF;
+ int ret = 0;
+ unsigned long flags;
+
+ qib_get_counters(ppd, &cntrs);
+ /* Get counter values before we save them */
+ ret = pma_get_portcounters_cong(pmp, ibdev, port);
+
+ if (counter_select & IB_PMA_SEL_CONG_XMIT) {
+ spin_lock_irqsave(&ppd->ibport_data.lock, flags);
+ ppd->cong_stats.counter = 0;
+ dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL,
+ 0x0);
+ spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
+ }
+ if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) {
+ ibp->z_port_xmit_data = cntrs.port_xmit_data;
+ ibp->z_port_rcv_data = cntrs.port_rcv_data;
+ ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
+ ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
+ }
+ if (counter_select & IB_PMA_SEL_CONG_ALL) {
+ ibp->z_symbol_error_counter =
+ cntrs.symbol_error_counter;
+ ibp->z_link_error_recovery_counter =
+ cntrs.link_error_recovery_counter;
+ ibp->z_link_downed_counter =
+ cntrs.link_downed_counter;
+ ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
+ ibp->z_port_rcv_remphys_errors =
+ cntrs.port_rcv_remphys_errors;
+ ibp->z_port_xmit_discards =
+ cntrs.port_xmit_discards;
+ ibp->z_local_link_integrity_errors =
+ cntrs.local_link_integrity_errors;
+ ibp->z_excessive_buffer_overrun_errors =
+ cntrs.excessive_buffer_overrun_errors;
+ ibp->n_vl15_dropped = 0;
+ ibp->z_vl15_dropped = cntrs.vl15_dropped;
+ }
+
+ return ret;
+}
+
+static int pma_set_portcounters_ext(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
+ pmp->data;
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ u64 swords, rwords, spkts, rpkts, xwait;
+
+ qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
+
+ if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
+ ibp->z_port_xmit_data = swords;
+
+ if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
+ ibp->z_port_rcv_data = rwords;
+
+ if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
+ ibp->z_port_xmit_packets = spkts;
+
+ if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
+ ibp->z_port_rcv_packets = rpkts;
+
+ if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
+ ibp->n_unicast_xmit = 0;
+
+ if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
+ ibp->n_unicast_rcv = 0;
+
+ if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
+ ibp->n_multicast_xmit = 0;
+
+ if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
+ ibp->n_multicast_rcv = 0;
+
+ return pma_get_portcounters_ext(pmp, ibdev, port);
+}
+
+static int process_subn(struct ib_device *ibdev, int mad_flags,
+ u8 port, struct ib_mad *in_mad,
+ struct ib_mad *out_mad)
+{
+ struct ib_smp *smp = (struct ib_smp *)out_mad;
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ int ret;
+
+ *out_mad = *in_mad;
+ if (smp->class_version != 1) {
+ smp->status |= IB_SMP_UNSUP_VERSION;
+ ret = reply(smp);
+ goto bail;
+ }
+
+ ret = check_mkey(ibp, smp, mad_flags);
+ if (ret) {
+ u32 port_num = be32_to_cpu(smp->attr_mod);
+
+ /*
+ * If this is a get/set portinfo, we already check the
+ * M_Key if the MAD is for another port and the M_Key
+ * is OK on the receiving port. This check is needed
+ * to increment the error counters when the M_Key
+ * fails to match on *both* ports.
+ */
+ if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
+ (smp->method == IB_MGMT_METHOD_GET ||
+ smp->method == IB_MGMT_METHOD_SET) &&
+ port_num && port_num <= ibdev->phys_port_cnt &&
+ port != port_num)
+ (void) check_mkey(to_iport(ibdev, port_num), smp, 0);
+ goto bail;
+ }
+
+ switch (smp->method) {
+ case IB_MGMT_METHOD_GET:
+ switch (smp->attr_id) {
+ case IB_SMP_ATTR_NODE_DESC:
+ ret = subn_get_nodedescription(smp, ibdev);
+ goto bail;
+ case IB_SMP_ATTR_NODE_INFO:
+ ret = subn_get_nodeinfo(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_GUID_INFO:
+ ret = subn_get_guidinfo(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_PORT_INFO:
+ ret = subn_get_portinfo(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_PKEY_TABLE:
+ ret = subn_get_pkeytable(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_SL_TO_VL_TABLE:
+ ret = subn_get_sl_to_vl(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_VL_ARB_TABLE:
+ ret = subn_get_vl_arb(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_SM_INFO:
+ if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) {
+ ret = IB_MAD_RESULT_SUCCESS |
+ IB_MAD_RESULT_CONSUMED;
+ goto bail;
+ }
+ if (ibp->port_cap_flags & IB_PORT_SM) {
+ ret = IB_MAD_RESULT_SUCCESS;
+ goto bail;
+ }
+ /* FALLTHROUGH */
+ default:
+ smp->status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply(smp);
+ goto bail;
+ }
+
+ case IB_MGMT_METHOD_SET:
+ switch (smp->attr_id) {
+ case IB_SMP_ATTR_GUID_INFO:
+ ret = subn_set_guidinfo(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_PORT_INFO:
+ ret = subn_set_portinfo(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_PKEY_TABLE:
+ ret = subn_set_pkeytable(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_SL_TO_VL_TABLE:
+ ret = subn_set_sl_to_vl(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_VL_ARB_TABLE:
+ ret = subn_set_vl_arb(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_SM_INFO:
+ if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) {
+ ret = IB_MAD_RESULT_SUCCESS |
+ IB_MAD_RESULT_CONSUMED;
+ goto bail;
+ }
+ if (ibp->port_cap_flags & IB_PORT_SM) {
+ ret = IB_MAD_RESULT_SUCCESS;
+ goto bail;
+ }
+ /* FALLTHROUGH */
+ default:
+ smp->status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply(smp);
+ goto bail;
+ }
+
+ case IB_MGMT_METHOD_TRAP_REPRESS:
+ if (smp->attr_id == IB_SMP_ATTR_NOTICE)
+ ret = subn_trap_repress(smp, ibdev, port);
+ else {
+ smp->status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply(smp);
+ }
+ goto bail;
+
+ case IB_MGMT_METHOD_TRAP:
+ case IB_MGMT_METHOD_REPORT:
+ case IB_MGMT_METHOD_REPORT_RESP:
+ case IB_MGMT_METHOD_GET_RESP:
+ /*
+ * The ib_mad module will call us to process responses
+ * before checking for other consumers.
+ * Just tell the caller to process it normally.
+ */
+ ret = IB_MAD_RESULT_SUCCESS;
+ goto bail;
+
+ case IB_MGMT_METHOD_SEND:
+ if (ib_get_smp_direction(smp) &&
+ smp->attr_id == QIB_VENDOR_IPG) {
+ ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PORT,
+ smp->data[0]);
+ ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+ } else
+ ret = IB_MAD_RESULT_SUCCESS;
+ goto bail;
+
+ default:
+ smp->status |= IB_SMP_UNSUP_METHOD;
+ ret = reply(smp);
+ }
+
+bail:
+ return ret;
+}
+
+static int process_perf(struct ib_device *ibdev, u8 port,
+ struct ib_mad *in_mad,
+ struct ib_mad *out_mad)
+{
+ struct ib_perf *pmp = (struct ib_perf *)out_mad;
+ int ret;
+
+ *out_mad = *in_mad;
+ if (pmp->class_version != 1) {
+ pmp->status |= IB_SMP_UNSUP_VERSION;
+ ret = reply((struct ib_smp *) pmp);
+ goto bail;
+ }
+
+ switch (pmp->method) {
+ case IB_MGMT_METHOD_GET:
+ switch (pmp->attr_id) {
+ case IB_PMA_CLASS_PORT_INFO:
+ ret = pma_get_classportinfo(pmp, ibdev);
+ goto bail;
+ case IB_PMA_PORT_SAMPLES_CONTROL:
+ ret = pma_get_portsamplescontrol(pmp, ibdev, port);
+ goto bail;
+ case IB_PMA_PORT_SAMPLES_RESULT:
+ ret = pma_get_portsamplesresult(pmp, ibdev, port);
+ goto bail;
+ case IB_PMA_PORT_SAMPLES_RESULT_EXT:
+ ret = pma_get_portsamplesresult_ext(pmp, ibdev, port);
+ goto bail;
+ case IB_PMA_PORT_COUNTERS:
+ ret = pma_get_portcounters(pmp, ibdev, port);
+ goto bail;
+ case IB_PMA_PORT_COUNTERS_EXT:
+ ret = pma_get_portcounters_ext(pmp, ibdev, port);
+ goto bail;
+ case IB_PMA_PORT_COUNTERS_CONG:
+ ret = pma_get_portcounters_cong(pmp, ibdev, port);
+ goto bail;
+ default:
+ pmp->status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply((struct ib_smp *) pmp);
+ goto bail;
+ }
+
+ case IB_MGMT_METHOD_SET:
+ switch (pmp->attr_id) {
+ case IB_PMA_PORT_SAMPLES_CONTROL:
+ ret = pma_set_portsamplescontrol(pmp, ibdev, port);
+ goto bail;
+ case IB_PMA_PORT_COUNTERS:
+ ret = pma_set_portcounters(pmp, ibdev, port);
+ goto bail;
+ case IB_PMA_PORT_COUNTERS_EXT:
+ ret = pma_set_portcounters_ext(pmp, ibdev, port);
+ goto bail;
+ case IB_PMA_PORT_COUNTERS_CONG:
+ ret = pma_set_portcounters_cong(pmp, ibdev, port);
+ goto bail;
+ default:
+ pmp->status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply((struct ib_smp *) pmp);
+ goto bail;
+ }
+
+ case IB_MGMT_METHOD_TRAP:
+ case IB_MGMT_METHOD_GET_RESP:
+ /*
+ * The ib_mad module will call us to process responses
+ * before checking for other consumers.
+ * Just tell the caller to process it normally.
+ */
+ ret = IB_MAD_RESULT_SUCCESS;
+ goto bail;
+
+ default:
+ pmp->status |= IB_SMP_UNSUP_METHOD;
+ ret = reply((struct ib_smp *) pmp);
+ }
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_process_mad - process an incoming MAD packet
+ * @ibdev: the infiniband device this packet came in on
+ * @mad_flags: MAD flags
+ * @port: the port number this packet came in on
+ * @in_wc: the work completion entry for this packet
+ * @in_grh: the global route header for this packet
+ * @in_mad: the incoming MAD
+ * @out_mad: any outgoing MAD reply
+ *
+ * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
+ * interested in processing.
+ *
+ * Note that the verbs framework has already done the MAD sanity checks,
+ * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
+ * MADs.
+ *
+ * This is called by the ib_mad module.
+ */
+int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
+ struct ib_wc *in_wc, struct ib_grh *in_grh,
+ struct ib_mad *in_mad, struct ib_mad *out_mad)
+{
+ int ret;
+
+ switch (in_mad->mad_hdr.mgmt_class) {
+ case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
+ case IB_MGMT_CLASS_SUBN_LID_ROUTED:
+ ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
+ goto bail;
+
+ case IB_MGMT_CLASS_PERF_MGMT:
+ ret = process_perf(ibdev, port, in_mad, out_mad);
+ goto bail;
+
+ default:
+ ret = IB_MAD_RESULT_SUCCESS;
+ }
+
+bail:
+ return ret;
+}
+
+static void send_handler(struct ib_mad_agent *agent,
+ struct ib_mad_send_wc *mad_send_wc)
+{
+ ib_free_send_mad(mad_send_wc->send_buf);
+}
+
+static void xmit_wait_timer_func(unsigned long opaque)
+{
+ struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+ struct qib_devdata *dd = dd_from_ppd(ppd);
+ unsigned long flags;
+ u8 status;
+
+ spin_lock_irqsave(&ppd->ibport_data.lock, flags);
+ if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) {
+ status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
+ if (status == IB_PMA_SAMPLE_STATUS_DONE) {
+ /* save counter cache */
+ cache_hw_sample_counters(ppd);
+ ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
+ } else
+ goto done;
+ }
+ ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd);
+ dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0);
+done:
+ spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
+ mod_timer(&ppd->cong_stats.timer, jiffies + HZ);
+}
+
+int qib_create_agents(struct qib_ibdev *dev)
+{
+ struct qib_devdata *dd = dd_from_dev(dev);
+ struct ib_mad_agent *agent;
+ struct qib_ibport *ibp;
+ int p;
+ int ret;
+
+ for (p = 0; p < dd->num_pports; p++) {
+ ibp = &dd->pport[p].ibport_data;
+ agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI,
+ NULL, 0, send_handler,
+ NULL, NULL);
+ if (IS_ERR(agent)) {
+ ret = PTR_ERR(agent);
+ goto err;
+ }
+
+ /* Initialize xmit_wait structure */
+ dd->pport[p].cong_stats.counter = 0;
+ init_timer(&dd->pport[p].cong_stats.timer);
+ dd->pport[p].cong_stats.timer.function = xmit_wait_timer_func;
+ dd->pport[p].cong_stats.timer.data =
+ (unsigned long)(&dd->pport[p]);
+ dd->pport[p].cong_stats.timer.expires = 0;
+ add_timer(&dd->pport[p].cong_stats.timer);
+
+ ibp->send_agent = agent;
+ }
+
+ return 0;
+
+err:
+ for (p = 0; p < dd->num_pports; p++) {
+ ibp = &dd->pport[p].ibport_data;
+ if (ibp->send_agent) {
+ agent = ibp->send_agent;
+ ibp->send_agent = NULL;
+ ib_unregister_mad_agent(agent);
+ }
+ }
+
+ return ret;
+}
+
+void qib_free_agents(struct qib_ibdev *dev)
+{
+ struct qib_devdata *dd = dd_from_dev(dev);
+ struct ib_mad_agent *agent;
+ struct qib_ibport *ibp;
+ int p;
+
+ for (p = 0; p < dd->num_pports; p++) {
+ ibp = &dd->pport[p].ibport_data;
+ if (ibp->send_agent) {
+ agent = ibp->send_agent;
+ ibp->send_agent = NULL;
+ ib_unregister_mad_agent(agent);
+ }
+ if (ibp->sm_ah) {
+ ib_destroy_ah(&ibp->sm_ah->ibah);
+ ibp->sm_ah = NULL;
+ }
+ if (dd->pport[p].cong_stats.timer.data)
+ del_timer_sync(&dd->pport[p].cong_stats.timer);
+ }
+}
diff --git a/drivers/infiniband/hw/qib/qib_mad.h b/drivers/infiniband/hw/qib/qib_mad.h
new file mode 100644
index 00000000000..147aff9117d
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_mad.h
@@ -0,0 +1,373 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004)
+#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008)
+#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
+#define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C)
+
+struct ib_node_info {
+ u8 base_version;
+ u8 class_version;
+ u8 node_type;
+ u8 num_ports;
+ __be64 sys_guid;
+ __be64 node_guid;
+ __be64 port_guid;
+ __be16 partition_cap;
+ __be16 device_id;
+ __be32 revision;
+ u8 local_port_num;
+ u8 vendor_id[3];
+} __attribute__ ((packed));
+
+struct ib_mad_notice_attr {
+ u8 generic_type;
+ u8 prod_type_msb;
+ __be16 prod_type_lsb;
+ __be16 trap_num;
+ __be16 issuer_lid;
+ __be16 toggle_count;
+
+ union {
+ struct {
+ u8 details[54];
+ } raw_data;
+
+ struct {
+ __be16 reserved;
+ __be16 lid; /* where violation happened */
+ u8 port_num; /* where violation happened */
+ } __attribute__ ((packed)) ntc_129_131;
+
+ struct {
+ __be16 reserved;
+ __be16 lid; /* LID where change occured */
+ u8 reserved2;
+ u8 local_changes; /* low bit - local changes */
+ __be32 new_cap_mask; /* new capability mask */
+ u8 reserved3;
+ u8 change_flags; /* low 3 bits only */
+ } __attribute__ ((packed)) ntc_144;
+
+ struct {
+ __be16 reserved;
+ __be16 lid; /* lid where sys guid changed */
+ __be16 reserved2;
+ __be64 new_sys_guid;
+ } __attribute__ ((packed)) ntc_145;
+
+ struct {
+ __be16 reserved;
+ __be16 lid;
+ __be16 dr_slid;
+ u8 method;
+ u8 reserved2;
+ __be16 attr_id;
+ __be32 attr_mod;
+ __be64 mkey;
+ u8 reserved3;
+ u8 dr_trunc_hop;
+ u8 dr_rtn_path[30];
+ } __attribute__ ((packed)) ntc_256;
+
+ struct {
+ __be16 reserved;
+ __be16 lid1;
+ __be16 lid2;
+ __be32 key;
+ __be32 sl_qp1; /* SL: high 4 bits */
+ __be32 qp2; /* high 8 bits reserved */
+ union ib_gid gid1;
+ union ib_gid gid2;
+ } __attribute__ ((packed)) ntc_257_258;
+
+ } details;
+};
+
+/*
+ * Generic trap/notice types
+ */
+#define IB_NOTICE_TYPE_FATAL 0x80
+#define IB_NOTICE_TYPE_URGENT 0x81
+#define IB_NOTICE_TYPE_SECURITY 0x82
+#define IB_NOTICE_TYPE_SM 0x83
+#define IB_NOTICE_TYPE_INFO 0x84
+
+/*
+ * Generic trap/notice producers
+ */
+#define IB_NOTICE_PROD_CA cpu_to_be16(1)
+#define IB_NOTICE_PROD_SWITCH cpu_to_be16(2)
+#define IB_NOTICE_PROD_ROUTER cpu_to_be16(3)
+#define IB_NOTICE_PROD_CLASS_MGR cpu_to_be16(4)
+
+/*
+ * Generic trap/notice numbers
+ */
+#define IB_NOTICE_TRAP_LLI_THRESH cpu_to_be16(129)
+#define IB_NOTICE_TRAP_EBO_THRESH cpu_to_be16(130)
+#define IB_NOTICE_TRAP_FLOW_UPDATE cpu_to_be16(131)
+#define IB_NOTICE_TRAP_CAP_MASK_CHG cpu_to_be16(144)
+#define IB_NOTICE_TRAP_SYS_GUID_CHG cpu_to_be16(145)
+#define IB_NOTICE_TRAP_BAD_MKEY cpu_to_be16(256)
+#define IB_NOTICE_TRAP_BAD_PKEY cpu_to_be16(257)
+#define IB_NOTICE_TRAP_BAD_QKEY cpu_to_be16(258)
+
+/*
+ * Repress trap/notice flags
+ */
+#define IB_NOTICE_REPRESS_LLI_THRESH (1 << 0)
+#define IB_NOTICE_REPRESS_EBO_THRESH (1 << 1)
+#define IB_NOTICE_REPRESS_FLOW_UPDATE (1 << 2)
+#define IB_NOTICE_REPRESS_CAP_MASK_CHG (1 << 3)
+#define IB_NOTICE_REPRESS_SYS_GUID_CHG (1 << 4)
+#define IB_NOTICE_REPRESS_BAD_MKEY (1 << 5)
+#define IB_NOTICE_REPRESS_BAD_PKEY (1 << 6)
+#define IB_NOTICE_REPRESS_BAD_QKEY (1 << 7)
+
+/*
+ * Generic trap/notice other local changes flags (trap 144).
+ */
+#define IB_NOTICE_TRAP_LSE_CHG 0x04 /* Link Speed Enable changed */
+#define IB_NOTICE_TRAP_LWE_CHG 0x02 /* Link Width Enable changed */
+#define IB_NOTICE_TRAP_NODE_DESC_CHG 0x01
+
+/*
+ * Generic trap/notice M_Key volation flags in dr_trunc_hop (trap 256).
+ */
+#define IB_NOTICE_TRAP_DR_NOTICE 0x80
+#define IB_NOTICE_TRAP_DR_TRUNC 0x40
+
+struct ib_vl_weight_elem {
+ u8 vl; /* Only low 4 bits, upper 4 bits reserved */
+ u8 weight;
+};
+
+#define IB_VLARB_LOWPRI_0_31 1
+#define IB_VLARB_LOWPRI_32_63 2
+#define IB_VLARB_HIGHPRI_0_31 3
+#define IB_VLARB_HIGHPRI_32_63 4
+
+/*
+ * PMA class portinfo capability mask bits
+ */
+#define IB_PMA_CLASS_CAP_ALLPORTSELECT cpu_to_be16(1 << 8)
+#define IB_PMA_CLASS_CAP_EXT_WIDTH cpu_to_be16(1 << 9)
+#define IB_PMA_CLASS_CAP_XMIT_WAIT cpu_to_be16(1 << 12)
+
+#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
+#define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010)
+#define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011)
+#define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012)
+#define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D)
+#define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E)
+#define IB_PMA_PORT_COUNTERS_CONG cpu_to_be16(0xFF00)
+
+struct ib_perf {
+ u8 base_version;
+ u8 mgmt_class;
+ u8 class_version;
+ u8 method;
+ __be16 status;
+ __be16 unused;
+ __be64 tid;
+ __be16 attr_id;
+ __be16 resv;
+ __be32 attr_mod;
+ u8 reserved[40];
+ u8 data[192];
+} __attribute__ ((packed));
+
+struct ib_pma_classportinfo {
+ u8 base_version;
+ u8 class_version;
+ __be16 cap_mask;
+ u8 reserved[3];
+ u8 resp_time_value; /* only lower 5 bits */
+ union ib_gid redirect_gid;
+ __be32 redirect_tc_sl_fl; /* 8, 4, 20 bits respectively */
+ __be16 redirect_lid;
+ __be16 redirect_pkey;
+ __be32 redirect_qp; /* only lower 24 bits */
+ __be32 redirect_qkey;
+ union ib_gid trap_gid;
+ __be32 trap_tc_sl_fl; /* 8, 4, 20 bits respectively */
+ __be16 trap_lid;
+ __be16 trap_pkey;
+ __be32 trap_hl_qp; /* 8, 24 bits respectively */
+ __be32 trap_qkey;
+} __attribute__ ((packed));
+
+struct ib_pma_portsamplescontrol {
+ u8 opcode;
+ u8 port_select;
+ u8 tick;
+ u8 counter_width; /* only lower 3 bits */
+ __be32 counter_mask0_9; /* 2, 10 * 3, bits */
+ __be16 counter_mask10_14; /* 1, 5 * 3, bits */
+ u8 sample_mechanisms;
+ u8 sample_status; /* only lower 2 bits */
+ __be64 option_mask;
+ __be64 vendor_mask;
+ __be32 sample_start;
+ __be32 sample_interval;
+ __be16 tag;
+ __be16 counter_select[15];
+} __attribute__ ((packed));
+
+struct ib_pma_portsamplesresult {
+ __be16 tag;
+ __be16 sample_status; /* only lower 2 bits */
+ __be32 counter[15];
+} __attribute__ ((packed));
+
+struct ib_pma_portsamplesresult_ext {
+ __be16 tag;
+ __be16 sample_status; /* only lower 2 bits */
+ __be32 extended_width; /* only upper 2 bits */
+ __be64 counter[15];
+} __attribute__ ((packed));
+
+struct ib_pma_portcounters {
+ u8 reserved;
+ u8 port_select;
+ __be16 counter_select;
+ __be16 symbol_error_counter;
+ u8 link_error_recovery_counter;
+ u8 link_downed_counter;
+ __be16 port_rcv_errors;
+ __be16 port_rcv_remphys_errors;
+ __be16 port_rcv_switch_relay_errors;
+ __be16 port_xmit_discards;
+ u8 port_xmit_constraint_errors;
+ u8 port_rcv_constraint_errors;
+ u8 reserved1;
+ u8 lli_ebor_errors; /* 4, 4, bits */
+ __be16 reserved2;
+ __be16 vl15_dropped;
+ __be32 port_xmit_data;
+ __be32 port_rcv_data;
+ __be32 port_xmit_packets;
+ __be32 port_rcv_packets;
+} __attribute__ ((packed));
+
+struct ib_pma_portcounters_cong {
+ u8 reserved;
+ u8 reserved1;
+ __be16 port_check_rate;
+ __be16 symbol_error_counter;
+ u8 link_error_recovery_counter;
+ u8 link_downed_counter;
+ __be16 port_rcv_errors;
+ __be16 port_rcv_remphys_errors;
+ __be16 port_rcv_switch_relay_errors;
+ __be16 port_xmit_discards;
+ u8 port_xmit_constraint_errors;
+ u8 port_rcv_constraint_errors;
+ u8 reserved2;
+ u8 lli_ebor_errors; /* 4, 4, bits */
+ __be16 reserved3;
+ __be16 vl15_dropped;
+ __be64 port_xmit_data;
+ __be64 port_rcv_data;
+ __be64 port_xmit_packets;
+ __be64 port_rcv_packets;
+ __be64 port_xmit_wait;
+ __be64 port_adr_events;
+} __attribute__ ((packed));
+
+#define IB_PMA_CONG_HW_CONTROL_TIMER 0x00
+#define IB_PMA_CONG_HW_CONTROL_SAMPLE 0x01
+
+#define QIB_XMIT_RATE_UNSUPPORTED 0x0
+#define QIB_XMIT_RATE_PICO 0x7
+/* number of 4nsec cycles equaling 2secs */
+#define QIB_CONG_TIMER_PSINTERVAL 0x1DCD64EC
+
+#define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001)
+#define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002)
+#define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004)
+#define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008)
+#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010)
+#define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040)
+#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200)
+#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400)
+#define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800)
+#define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000)
+#define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000)
+#define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000)
+#define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000)
+
+#define IB_PMA_SEL_CONG_ALL 0x01
+#define IB_PMA_SEL_CONG_PORT_DATA 0x02
+#define IB_PMA_SEL_CONG_XMIT 0x04
+#define IB_PMA_SEL_CONG_ROUTING 0x08
+
+struct ib_pma_portcounters_ext {
+ u8 reserved;
+ u8 port_select;
+ __be16 counter_select;
+ __be32 reserved1;
+ __be64 port_xmit_data;
+ __be64 port_rcv_data;
+ __be64 port_xmit_packets;
+ __be64 port_rcv_packets;
+ __be64 port_unicast_xmit_packets;
+ __be64 port_unicast_rcv_packets;
+ __be64 port_multicast_xmit_packets;
+ __be64 port_multicast_rcv_packets;
+} __attribute__ ((packed));
+
+#define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001)
+#define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002)
+#define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004)
+#define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008)
+#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010)
+#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020)
+#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040)
+#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080)
+
+/*
+ * The PortSamplesControl.CounterMasks field is an array of 3 bit fields
+ * which specify the N'th counter's capabilities. See ch. 16.1.3.2.
+ * We support 5 counters which only count the mandatory quantities.
+ */
+#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
+#define COUNTER_MASK0_9 \
+ cpu_to_be32(COUNTER_MASK(1, 0) | \
+ COUNTER_MASK(1, 1) | \
+ COUNTER_MASK(1, 2) | \
+ COUNTER_MASK(1, 3) | \
+ COUNTER_MASK(1, 4))
diff --git a/drivers/infiniband/hw/qib/qib_mmap.c b/drivers/infiniband/hw/qib/qib_mmap.c
new file mode 100644
index 00000000000..8b73a11d571
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_mmap.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <asm/pgtable.h>
+
+#include "qib_verbs.h"
+
+/**
+ * qib_release_mmap_info - free mmap info structure
+ * @ref: a pointer to the kref within struct qib_mmap_info
+ */
+void qib_release_mmap_info(struct kref *ref)
+{
+ struct qib_mmap_info *ip =
+ container_of(ref, struct qib_mmap_info, ref);
+ struct qib_ibdev *dev = to_idev(ip->context->device);
+
+ spin_lock_irq(&dev->pending_lock);
+ list_del(&ip->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+
+ vfree(ip->obj);
+ kfree(ip);
+}
+
+/*
+ * open and close keep track of how many times the CQ is mapped,
+ * to avoid releasing it.
+ */
+static void qib_vma_open(struct vm_area_struct *vma)
+{
+ struct qib_mmap_info *ip = vma->vm_private_data;
+
+ kref_get(&ip->ref);
+}
+
+static void qib_vma_close(struct vm_area_struct *vma)
+{
+ struct qib_mmap_info *ip = vma->vm_private_data;
+
+ kref_put(&ip->ref, qib_release_mmap_info);
+}
+
+static struct vm_operations_struct qib_vm_ops = {
+ .open = qib_vma_open,
+ .close = qib_vma_close,
+};
+
+/**
+ * qib_mmap - create a new mmap region
+ * @context: the IB user context of the process making the mmap() call
+ * @vma: the VMA to be initialized
+ * Return zero if the mmap is OK. Otherwise, return an errno.
+ */
+int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+ struct qib_ibdev *dev = to_idev(context->device);
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long size = vma->vm_end - vma->vm_start;
+ struct qib_mmap_info *ip, *pp;
+ int ret = -EINVAL;
+
+ /*
+ * Search the device's list of objects waiting for a mmap call.
+ * Normally, this list is very short since a call to create a
+ * CQ, QP, or SRQ is soon followed by a call to mmap().
+ */
+ spin_lock_irq(&dev->pending_lock);
+ list_for_each_entry_safe(ip, pp, &dev->pending_mmaps,
+ pending_mmaps) {
+ /* Only the creator is allowed to mmap the object */
+ if (context != ip->context || (__u64) offset != ip->offset)
+ continue;
+ /* Don't allow a mmap larger than the object. */
+ if (size > ip->size)
+ break;
+
+ list_del_init(&ip->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+
+ ret = remap_vmalloc_range(vma, ip->obj, 0);
+ if (ret)
+ goto done;
+ vma->vm_ops = &qib_vm_ops;
+ vma->vm_private_data = ip;
+ qib_vma_open(vma);
+ goto done;
+ }
+ spin_unlock_irq(&dev->pending_lock);
+done:
+ return ret;
+}
+
+/*
+ * Allocate information for qib_mmap
+ */
+struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev,
+ u32 size,
+ struct ib_ucontext *context,
+ void *obj) {
+ struct qib_mmap_info *ip;
+
+ ip = kmalloc(sizeof *ip, GFP_KERNEL);
+ if (!ip)
+ goto bail;
+
+ size = PAGE_ALIGN(size);
+
+ spin_lock_irq(&dev->mmap_offset_lock);
+ if (dev->mmap_offset == 0)
+ dev->mmap_offset = PAGE_SIZE;
+ ip->offset = dev->mmap_offset;
+ dev->mmap_offset += size;
+ spin_unlock_irq(&dev->mmap_offset_lock);
+
+ INIT_LIST_HEAD(&ip->pending_mmaps);
+ ip->size = size;
+ ip->context = context;
+ ip->obj = obj;
+ kref_init(&ip->ref);
+
+bail:
+ return ip;
+}
+
+void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
+ u32 size, void *obj)
+{
+ size = PAGE_ALIGN(size);
+
+ spin_lock_irq(&dev->mmap_offset_lock);
+ if (dev->mmap_offset == 0)
+ dev->mmap_offset = PAGE_SIZE;
+ ip->offset = dev->mmap_offset;
+ dev->mmap_offset += size;
+ spin_unlock_irq(&dev->mmap_offset_lock);
+
+ ip->size = size;
+ ip->obj = obj;
+}
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
new file mode 100644
index 00000000000..5f95f0f6385
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_mr.c
@@ -0,0 +1,503 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_umem.h>
+#include <rdma/ib_smi.h>
+
+#include "qib.h"
+
+/* Fast memory region */
+struct qib_fmr {
+ struct ib_fmr ibfmr;
+ u8 page_shift;
+ struct qib_mregion mr; /* must be last */
+};
+
+static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
+{
+ return container_of(ibfmr, struct qib_fmr, ibfmr);
+}
+
+/**
+ * qib_get_dma_mr - get a DMA memory region
+ * @pd: protection domain for this memory region
+ * @acc: access flags
+ *
+ * Returns the memory region on success, otherwise returns an errno.
+ * Note that all DMA addresses should be created via the
+ * struct ib_dma_mapping_ops functions (see qib_dma.c).
+ */
+struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ struct qib_ibdev *dev = to_idev(pd->device);
+ struct qib_mr *mr;
+ struct ib_mr *ret;
+ unsigned long flags;
+
+ if (to_ipd(pd)->user) {
+ ret = ERR_PTR(-EPERM);
+ goto bail;
+ }
+
+ mr = kzalloc(sizeof *mr, GFP_KERNEL);
+ if (!mr) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ mr->mr.access_flags = acc;
+ atomic_set(&mr->mr.refcount, 0);
+
+ spin_lock_irqsave(&dev->lk_table.lock, flags);
+ if (!dev->dma_mr)
+ dev->dma_mr = &mr->mr;
+ spin_unlock_irqrestore(&dev->lk_table.lock, flags);
+
+ ret = &mr->ibmr;
+
+bail:
+ return ret;
+}
+
+static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table)
+{
+ struct qib_mr *mr;
+ int m, i = 0;
+
+ /* Allocate struct plus pointers to first level page tables. */
+ m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
+ mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
+ if (!mr)
+ goto done;
+
+ /* Allocate first level page tables. */
+ for (; i < m; i++) {
+ mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);
+ if (!mr->mr.map[i])
+ goto bail;
+ }
+ mr->mr.mapsz = m;
+ mr->mr.max_segs = count;
+
+ /*
+ * ib_reg_phys_mr() will initialize mr->ibmr except for
+ * lkey and rkey.
+ */
+ if (!qib_alloc_lkey(lk_table, &mr->mr))
+ goto bail;
+ mr->ibmr.lkey = mr->mr.lkey;
+ mr->ibmr.rkey = mr->mr.lkey;
+
+ atomic_set(&mr->mr.refcount, 0);
+ goto done;
+
+bail:
+ while (i)
+ kfree(mr->mr.map[--i]);
+ kfree(mr);
+ mr = NULL;
+
+done:
+ return mr;
+}
+
+/**
+ * qib_reg_phys_mr - register a physical memory region
+ * @pd: protection domain for this memory region
+ * @buffer_list: pointer to the list of physical buffers to register
+ * @num_phys_buf: the number of physical buffers to register
+ * @iova_start: the starting address passed over IB which maps to this MR
+ *
+ * Returns the memory region on success, otherwise returns an errno.
+ */
+struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
+ struct ib_phys_buf *buffer_list,
+ int num_phys_buf, int acc, u64 *iova_start)
+{
+ struct qib_mr *mr;
+ int n, m, i;
+ struct ib_mr *ret;
+
+ mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table);
+ if (mr == NULL) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ mr->mr.pd = pd;
+ mr->mr.user_base = *iova_start;
+ mr->mr.iova = *iova_start;
+ mr->mr.length = 0;
+ mr->mr.offset = 0;
+ mr->mr.access_flags = acc;
+ mr->umem = NULL;
+
+ m = 0;
+ n = 0;
+ for (i = 0; i < num_phys_buf; i++) {
+ mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
+ mr->mr.map[m]->segs[n].length = buffer_list[i].size;
+ mr->mr.length += buffer_list[i].size;
+ n++;
+ if (n == QIB_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+
+ ret = &mr->ibmr;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_reg_user_mr - register a userspace memory region
+ * @pd: protection domain for this memory region
+ * @start: starting userspace address
+ * @length: length of region to register
+ * @virt_addr: virtual address to use (from HCA's point of view)
+ * @mr_access_flags: access flags for this memory region
+ * @udata: unused by the QLogic_IB driver
+ *
+ * Returns the memory region on success, otherwise returns an errno.
+ */
+struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags,
+ struct ib_udata *udata)
+{
+ struct qib_mr *mr;
+ struct ib_umem *umem;
+ struct ib_umem_chunk *chunk;
+ int n, m, i;
+ struct ib_mr *ret;
+
+ if (length == 0) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+
+ umem = ib_umem_get(pd->uobject->context, start, length,
+ mr_access_flags, 0);
+ if (IS_ERR(umem))
+ return (void *) umem;
+
+ n = 0;
+ list_for_each_entry(chunk, &umem->chunk_list, list)
+ n += chunk->nents;
+
+ mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
+ if (!mr) {
+ ret = ERR_PTR(-ENOMEM);
+ ib_umem_release(umem);
+ goto bail;
+ }
+
+ mr->mr.pd = pd;
+ mr->mr.user_base = start;
+ mr->mr.iova = virt_addr;
+ mr->mr.length = length;
+ mr->mr.offset = umem->offset;
+ mr->mr.access_flags = mr_access_flags;
+ mr->umem = umem;
+
+ m = 0;
+ n = 0;
+ list_for_each_entry(chunk, &umem->chunk_list, list) {
+ for (i = 0; i < chunk->nents; i++) {
+ void *vaddr;
+
+ vaddr = page_address(sg_page(&chunk->page_list[i]));
+ if (!vaddr) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+ mr->mr.map[m]->segs[n].vaddr = vaddr;
+ mr->mr.map[m]->segs[n].length = umem->page_size;
+ n++;
+ if (n == QIB_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ }
+ ret = &mr->ibmr;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_dereg_mr - unregister and free a memory region
+ * @ibmr: the memory region to free
+ *
+ * Returns 0 on success.
+ *
+ * Note that this is called to free MRs created by qib_get_dma_mr()
+ * or qib_reg_user_mr().
+ */
+int qib_dereg_mr(struct ib_mr *ibmr)
+{
+ struct qib_mr *mr = to_imr(ibmr);
+ struct qib_ibdev *dev = to_idev(ibmr->device);
+ int ret;
+ int i;
+
+ ret = qib_free_lkey(dev, &mr->mr);
+ if (ret)
+ return ret;
+
+ i = mr->mr.mapsz;
+ while (i)
+ kfree(mr->mr.map[--i]);
+ if (mr->umem)
+ ib_umem_release(mr->umem);
+ kfree(mr);
+ return 0;
+}
+
+/*
+ * Allocate a memory region usable with the
+ * IB_WR_FAST_REG_MR send work request.
+ *
+ * Return the memory region on success, otherwise return an errno.
+ */
+struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
+{
+ struct qib_mr *mr;
+
+ mr = alloc_mr(max_page_list_len, &to_idev(pd->device)->lk_table);
+ if (mr == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ mr->mr.pd = pd;
+ mr->mr.user_base = 0;
+ mr->mr.iova = 0;
+ mr->mr.length = 0;
+ mr->mr.offset = 0;
+ mr->mr.access_flags = 0;
+ mr->umem = NULL;
+
+ return &mr->ibmr;
+}
+
+struct ib_fast_reg_page_list *
+qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
+{
+ unsigned size = page_list_len * sizeof(u64);
+ struct ib_fast_reg_page_list *pl;
+
+ if (size > PAGE_SIZE)
+ return ERR_PTR(-EINVAL);
+
+ pl = kmalloc(sizeof *pl, GFP_KERNEL);
+ if (!pl)
+ return ERR_PTR(-ENOMEM);
+
+ pl->page_list = kmalloc(size, GFP_KERNEL);
+ if (!pl->page_list)
+ goto err_free;
+
+ return pl;
+
+err_free:
+ kfree(pl);
+ return ERR_PTR(-ENOMEM);
+}
+
+void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl)
+{
+ kfree(pl->page_list);
+ kfree(pl);
+}
+
+/**
+ * qib_alloc_fmr - allocate a fast memory region
+ * @pd: the protection domain for this memory region
+ * @mr_access_flags: access flags for this memory region
+ * @fmr_attr: fast memory region attributes
+ *
+ * Returns the memory region on success, otherwise returns an errno.
+ */
+struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
+ struct ib_fmr_attr *fmr_attr)
+{
+ struct qib_fmr *fmr;
+ int m, i = 0;
+ struct ib_fmr *ret;
+
+ /* Allocate struct plus pointers to first level page tables. */
+ m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
+ fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
+ if (!fmr)
+ goto bail;
+
+ /* Allocate first level page tables. */
+ for (; i < m; i++) {
+ fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
+ GFP_KERNEL);
+ if (!fmr->mr.map[i])
+ goto bail;
+ }
+ fmr->mr.mapsz = m;
+
+ /*
+ * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
+ * rkey.
+ */
+ if (!qib_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr))
+ goto bail;
+ fmr->ibfmr.rkey = fmr->mr.lkey;
+ fmr->ibfmr.lkey = fmr->mr.lkey;
+ /*
+ * Resources are allocated but no valid mapping (RKEY can't be
+ * used).
+ */
+ fmr->mr.pd = pd;
+ fmr->mr.user_base = 0;
+ fmr->mr.iova = 0;
+ fmr->mr.length = 0;
+ fmr->mr.offset = 0;
+ fmr->mr.access_flags = mr_access_flags;
+ fmr->mr.max_segs = fmr_attr->max_pages;
+ fmr->page_shift = fmr_attr->page_shift;
+
+ atomic_set(&fmr->mr.refcount, 0);
+ ret = &fmr->ibfmr;
+ goto done;
+
+bail:
+ while (i)
+ kfree(fmr->mr.map[--i]);
+ kfree(fmr);
+ ret = ERR_PTR(-ENOMEM);
+
+done:
+ return ret;
+}
+
+/**
+ * qib_map_phys_fmr - set up a fast memory region
+ * @ibmfr: the fast memory region to set up
+ * @page_list: the list of pages to associate with the fast memory region
+ * @list_len: the number of pages to associate with the fast memory region
+ * @iova: the virtual address of the start of the fast memory region
+ *
+ * This may be called from interrupt context.
+ */
+
+int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
+ int list_len, u64 iova)
+{
+ struct qib_fmr *fmr = to_ifmr(ibfmr);
+ struct qib_lkey_table *rkt;
+ unsigned long flags;
+ int m, n, i;
+ u32 ps;
+ int ret;
+
+ if (atomic_read(&fmr->mr.refcount))
+ return -EBUSY;
+
+ if (list_len > fmr->mr.max_segs) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ rkt = &to_idev(ibfmr->device)->lk_table;
+ spin_lock_irqsave(&rkt->lock, flags);
+ fmr->mr.user_base = iova;
+ fmr->mr.iova = iova;
+ ps = 1 << fmr->page_shift;
+ fmr->mr.length = list_len * ps;
+ m = 0;
+ n = 0;
+ for (i = 0; i < list_len; i++) {
+ fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
+ fmr->mr.map[m]->segs[n].length = ps;
+ if (++n == QIB_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_unmap_fmr - unmap fast memory regions
+ * @fmr_list: the list of fast memory regions to unmap
+ *
+ * Returns 0 on success.
+ */
+int qib_unmap_fmr(struct list_head *fmr_list)
+{
+ struct qib_fmr *fmr;
+ struct qib_lkey_table *rkt;
+ unsigned long flags;
+
+ list_for_each_entry(fmr, fmr_list, ibfmr.list) {
+ rkt = &to_idev(fmr->ibfmr.device)->lk_table;
+ spin_lock_irqsave(&rkt->lock, flags);
+ fmr->mr.user_base = 0;
+ fmr->mr.iova = 0;
+ fmr->mr.length = 0;
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ }
+ return 0;
+}
+
+/**
+ * qib_dealloc_fmr - deallocate a fast memory region
+ * @ibfmr: the fast memory region to deallocate
+ *
+ * Returns 0 on success.
+ */
+int qib_dealloc_fmr(struct ib_fmr *ibfmr)
+{
+ struct qib_fmr *fmr = to_ifmr(ibfmr);
+ int ret;
+ int i;
+
+ ret = qib_free_lkey(to_idev(ibfmr->device), &fmr->mr);
+ if (ret)
+ return ret;
+
+ i = fmr->mr.mapsz;
+ while (i)
+ kfree(fmr->mr.map[--i]);
+ kfree(fmr);
+ return 0;
+}
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
new file mode 100644
index 00000000000..c926bf4541d
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -0,0 +1,738 @@
+/*
+ * Copyright (c) 2008, 2009 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/aer.h>
+
+#include "qib.h"
+
+/*
+ * This file contains PCIe utility routines that are common to the
+ * various QLogic InfiniPath adapters
+ */
+
+/*
+ * Code to adjust PCIe capabilities.
+ * To minimize the change footprint, we call it
+ * from qib_pcie_params, which every chip-specific
+ * file calls, even though this violates some
+ * expectations of harmlessness.
+ */
+static int qib_tune_pcie_caps(struct qib_devdata *);
+static int qib_tune_pcie_coalesce(struct qib_devdata *);
+
+/*
+ * Do all the common PCIe setup and initialization.
+ * devdata is not yet allocated, and is not allocated until after this
+ * routine returns success. Therefore qib_dev_err() can't be used for error
+ * printing.
+ */
+int qib_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ /*
+ * This can happen (in theory) iff:
+ * We did a chip reset, and then failed to reprogram the
+ * BAR, or the chip reset due to an internal error. We then
+ * unloaded the driver and reloaded it.
+ *
+ * Both reset cases set the BAR back to initial state. For
+ * the latter case, the AER sticky error bit at offset 0x718
+ * should be set, but the Linux kernel doesn't yet know
+ * about that, it appears. If the original BAR was retained
+ * in the kernel data structures, this may be OK.
+ */
+ qib_early_err(&pdev->dev, "pci enable failed: error %d\n",
+ -ret);
+ goto done;
+ }
+
+ ret = pci_request_regions(pdev, QIB_DRV_NAME);
+ if (ret) {
+ qib_devinfo(pdev, "pci_request_regions fails: err %d\n", -ret);
+ goto bail;
+ }
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret) {
+ /*
+ * If the 64 bit setup fails, try 32 bit. Some systems
+ * do not setup 64 bit maps on systems with 2GB or less
+ * memory installed.
+ */
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret) {
+ qib_devinfo(pdev, "Unable to set DMA mask: %d\n", ret);
+ goto bail;
+ }
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ } else
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret)
+ qib_early_err(&pdev->dev,
+ "Unable to set DMA consistent mask: %d\n", ret);
+
+ pci_set_master(pdev);
+ ret = pci_enable_pcie_error_reporting(pdev);
+ if (ret)
+ qib_early_err(&pdev->dev,
+ "Unable to enable pcie error reporting: %d\n",
+ ret);
+ goto done;
+
+bail:
+ pci_disable_device(pdev);
+ pci_release_regions(pdev);
+done:
+ return ret;
+}
+
+/*
+ * Do remaining PCIe setup, once dd is allocated, and save away
+ * fields required to re-initialize after a chip reset, or for
+ * various other purposes
+ */
+int qib_pcie_ddinit(struct qib_devdata *dd, struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ unsigned long len;
+ resource_size_t addr;
+
+ dd->pcidev = pdev;
+ pci_set_drvdata(pdev, dd);
+
+ addr = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
+
+#if defined(__powerpc__)
+ /* There isn't a generic way to specify writethrough mappings */
+ dd->kregbase = __ioremap(addr, len, _PAGE_NO_CACHE | _PAGE_WRITETHRU);
+#else
+ dd->kregbase = ioremap_nocache(addr, len);
+#endif
+
+ if (!dd->kregbase)
+ return -ENOMEM;
+
+ dd->kregend = (u64 __iomem *)((void __iomem *) dd->kregbase + len);
+ dd->physaddr = addr; /* used for io_remap, etc. */
+
+ /*
+ * Save BARs to rewrite after device reset. Save all 64 bits of
+ * BAR, just in case.
+ */
+ dd->pcibar0 = addr;
+ dd->pcibar1 = addr >> 32;
+ dd->deviceid = ent->device; /* save for later use */
+ dd->vendorid = ent->vendor;
+
+ return 0;
+}
+
+/*
+ * Do PCIe cleanup, after chip-specific cleanup, etc. Just prior
+ * to releasing the dd memory.
+ * void because none of the core pcie cleanup returns are void
+ */
+void qib_pcie_ddcleanup(struct qib_devdata *dd)
+{
+ u64 __iomem *base = (void __iomem *) dd->kregbase;
+
+ dd->kregbase = NULL;
+ iounmap(base);
+ if (dd->piobase)
+ iounmap(dd->piobase);
+ if (dd->userbase)
+ iounmap(dd->userbase);
+
+ pci_disable_device(dd->pcidev);
+ pci_release_regions(dd->pcidev);
+
+ pci_set_drvdata(dd->pcidev, NULL);
+}
+
+static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
+ struct msix_entry *msix_entry)
+{
+ int ret;
+ u32 tabsize = 0;
+ u16 msix_flags;
+
+ pci_read_config_word(dd->pcidev, pos + PCI_MSIX_FLAGS, &msix_flags);
+ tabsize = 1 + (msix_flags & PCI_MSIX_FLAGS_QSIZE);
+ if (tabsize > *msixcnt)
+ tabsize = *msixcnt;
+ ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize);
+ if (ret > 0) {
+ tabsize = ret;
+ ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize);
+ }
+ if (ret) {
+ qib_dev_err(dd, "pci_enable_msix %d vectors failed: %d, "
+ "falling back to INTx\n", tabsize, ret);
+ tabsize = 0;
+ }
+ *msixcnt = tabsize;
+
+ if (ret)
+ qib_enable_intx(dd->pcidev);
+
+}
+
+/**
+ * We save the msi lo and hi values, so we can restore them after
+ * chip reset (the kernel PCI infrastructure doesn't yet handle that
+ * correctly.
+ */
+static int qib_msi_setup(struct qib_devdata *dd, int pos)
+{
+ struct pci_dev *pdev = dd->pcidev;
+ u16 control;
+ int ret;
+
+ ret = pci_enable_msi(pdev);
+ if (ret)
+ qib_dev_err(dd, "pci_enable_msi failed: %d, "
+ "interrupts may not work\n", ret);
+ /* continue even if it fails, we may still be OK... */
+
+ pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO,
+ &dd->msi_lo);
+ pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI,
+ &dd->msi_hi);
+ pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
+ /* now save the data (vector) info */
+ pci_read_config_word(pdev, pos + ((control & PCI_MSI_FLAGS_64BIT)
+ ? 12 : 8),
+ &dd->msi_data);
+ return ret;
+}
+
+int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent,
+ struct msix_entry *entry)
+{
+ u16 linkstat, speed;
+ int pos = 0, pose, ret = 1;
+
+ pose = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
+ if (!pose) {
+ qib_dev_err(dd, "Can't find PCI Express capability!\n");
+ /* set up something... */
+ dd->lbus_width = 1;
+ dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
+ goto bail;
+ }
+
+ pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSIX);
+ if (nent && *nent && pos) {
+ qib_msix_setup(dd, pos, nent, entry);
+ ret = 0; /* did it, either MSIx or INTx */
+ } else {
+ pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
+ if (pos)
+ ret = qib_msi_setup(dd, pos);
+ else
+ qib_dev_err(dd, "No PCI MSI or MSIx capability!\n");
+ }
+ if (!pos)
+ qib_enable_intx(dd->pcidev);
+
+ pci_read_config_word(dd->pcidev, pose + PCI_EXP_LNKSTA, &linkstat);
+ /*
+ * speed is bits 0-3, linkwidth is bits 4-8
+ * no defines for them in headers
+ */
+ speed = linkstat & 0xf;
+ linkstat >>= 4;
+ linkstat &= 0x1f;
+ dd->lbus_width = linkstat;
+
+ switch (speed) {
+ case 1:
+ dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
+ break;
+ case 2:
+ dd->lbus_speed = 5000; /* Gen1, 5GHz */
+ break;
+ default: /* not defined, assume gen1 */
+ dd->lbus_speed = 2500;
+ break;
+ }
+
+ /*
+ * Check against expected pcie width and complain if "wrong"
+ * on first initialization, not afterwards (i.e., reset).
+ */
+ if (minw && linkstat < minw)
+ qib_dev_err(dd,
+ "PCIe width %u (x%u HCA), performance reduced\n",
+ linkstat, minw);
+
+ qib_tune_pcie_caps(dd);
+
+ qib_tune_pcie_coalesce(dd);
+
+bail:
+ /* fill in string, even on errors */
+ snprintf(dd->lbus_info, sizeof(dd->lbus_info),
+ "PCIe,%uMHz,x%u\n", dd->lbus_speed, dd->lbus_width);
+ return ret;
+}
+
+/*
+ * Setup pcie interrupt stuff again after a reset. I'd like to just call
+ * pci_enable_msi() again for msi, but when I do that,
+ * the MSI enable bit doesn't get set in the command word, and
+ * we switch to to a different interrupt vector, which is confusing,
+ * so I instead just do it all inline. Perhaps somehow can tie this
+ * into the PCIe hotplug support at some point
+ */
+int qib_reinit_intr(struct qib_devdata *dd)
+{
+ int pos;
+ u16 control;
+ int ret = 0;
+
+ /* If we aren't using MSI, don't restore it */
+ if (!dd->msi_lo)
+ goto bail;
+
+ pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
+ if (!pos) {
+ qib_dev_err(dd, "Can't find MSI capability, "
+ "can't restore MSI settings\n");
+ ret = 0;
+ /* nothing special for MSIx, just MSI */
+ goto bail;
+ }
+ pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
+ dd->msi_lo);
+ pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
+ dd->msi_hi);
+ pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
+ if (!(control & PCI_MSI_FLAGS_ENABLE)) {
+ control |= PCI_MSI_FLAGS_ENABLE;
+ pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
+ control);
+ }
+ /* now rewrite the data (vector) info */
+ pci_write_config_word(dd->pcidev, pos +
+ ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
+ dd->msi_data);
+ ret = 1;
+bail:
+ if (!ret && (dd->flags & QIB_HAS_INTX)) {
+ qib_enable_intx(dd->pcidev);
+ ret = 1;
+ }
+
+ /* and now set the pci master bit again */
+ pci_set_master(dd->pcidev);
+
+ return ret;
+}
+
+/*
+ * Disable msi interrupt if enabled, and clear msi_lo.
+ * This is used primarily for the fallback to INTx, but
+ * is also used in reinit after reset, and during cleanup.
+ */
+void qib_nomsi(struct qib_devdata *dd)
+{
+ dd->msi_lo = 0;
+ pci_disable_msi(dd->pcidev);
+}
+
+/*
+ * Same as qib_nosmi, but for MSIx.
+ */
+void qib_nomsix(struct qib_devdata *dd)
+{
+ pci_disable_msix(dd->pcidev);
+}
+
+/*
+ * Similar to pci_intx(pdev, 1), except that we make sure
+ * msi(x) is off.
+ */
+void qib_enable_intx(struct pci_dev *pdev)
+{
+ u16 cw, new;
+ int pos;
+
+ /* first, turn on INTx */
+ pci_read_config_word(pdev, PCI_COMMAND, &cw);
+ new = cw & ~PCI_COMMAND_INTX_DISABLE;
+ if (new != cw)
+ pci_write_config_word(pdev, PCI_COMMAND, new);
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
+ if (pos) {
+ /* then turn off MSI */
+ pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
+ new = cw & ~PCI_MSI_FLAGS_ENABLE;
+ if (new != cw)
+ pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new);
+ }
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ /* then turn off MSIx */
+ pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &cw);
+ new = cw & ~PCI_MSIX_FLAGS_ENABLE;
+ if (new != cw)
+ pci_write_config_word(pdev, pos + PCI_MSIX_FLAGS, new);
+ }
+}
+
+/*
+ * These two routines are helper routines for the device reset code
+ * to move all the pcie code out of the chip-specific driver code.
+ */
+void qib_pcie_getcmd(struct qib_devdata *dd, u16 *cmd, u8 *iline, u8 *cline)
+{
+ pci_read_config_word(dd->pcidev, PCI_COMMAND, cmd);
+ pci_read_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline);
+ pci_read_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
+}
+
+void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline)
+{
+ int r;
+ r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
+ dd->pcibar0);
+ if (r)
+ qib_dev_err(dd, "rewrite of BAR0 failed: %d\n", r);
+ r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
+ dd->pcibar1);
+ if (r)
+ qib_dev_err(dd, "rewrite of BAR1 failed: %d\n", r);
+ /* now re-enable memory access, and restore cosmetic settings */
+ pci_write_config_word(dd->pcidev, PCI_COMMAND, cmd);
+ pci_write_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline);
+ pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
+ r = pci_enable_device(dd->pcidev);
+ if (r)
+ qib_dev_err(dd, "pci_enable_device failed after "
+ "reset: %d\n", r);
+}
+
+/* code to adjust PCIe capabilities. */
+
+static int fld2val(int wd, int mask)
+{
+ int lsbmask;
+
+ if (!mask)
+ return 0;
+ wd &= mask;
+ lsbmask = mask ^ (mask & (mask - 1));
+ wd /= lsbmask;
+ return wd;
+}
+
+static int val2fld(int wd, int mask)
+{
+ int lsbmask;
+
+ if (!mask)
+ return 0;
+ lsbmask = mask ^ (mask & (mask - 1));
+ wd *= lsbmask;
+ return wd;
+}
+
+static int qib_pcie_coalesce;
+module_param_named(pcie_coalesce, qib_pcie_coalesce, int, S_IRUGO);
+MODULE_PARM_DESC(pcie_coalesce, "tune PCIe colescing on some Intel chipsets");
+
+/*
+ * Enable PCIe completion and data coalescing, on Intel 5x00 and 7300
+ * chipsets. This is known to be unsafe for some revisions of some
+ * of these chipsets, with some BIOS settings, and enabling it on those
+ * systems may result in the system crashing, and/or data corruption.
+ */
+static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
+{
+ int r;
+ struct pci_dev *parent;
+ int ppos;
+ u16 devid;
+ u32 mask, bits, val;
+
+ if (!qib_pcie_coalesce)
+ return 0;
+
+ /* Find out supported and configured values for parent (root) */
+ parent = dd->pcidev->bus->self;
+ if (parent->bus->parent) {
+ qib_devinfo(dd->pcidev, "Parent not root\n");
+ return 1;
+ }
+ ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+ if (!ppos)
+ return 1;
+ if (parent->vendor != 0x8086)
+ return 1;
+
+ /*
+ * - bit 12: Max_rdcmp_Imt_EN: need to set to 1
+ * - bit 11: COALESCE_FORCE: need to set to 0
+ * - bit 10: COALESCE_EN: need to set to 1
+ * (but limitations on some on some chipsets)
+ *
+ * On the Intel 5000, 5100, and 7300 chipsets, there is
+ * also: - bit 25:24: COALESCE_MODE, need to set to 0
+ */
+ devid = parent->device;
+ if (devid >= 0x25e2 && devid <= 0x25fa) {
+ u8 rev;
+
+ /* 5000 P/V/X/Z */
+ pci_read_config_byte(parent, PCI_REVISION_ID, &rev);
+ if (rev <= 0xb2)
+ bits = 1U << 10;
+ else
+ bits = 7U << 10;
+ mask = (3U << 24) | (7U << 10);
+ } else if (devid >= 0x65e2 && devid <= 0x65fa) {
+ /* 5100 */
+ bits = 1U << 10;
+ mask = (3U << 24) | (7U << 10);
+ } else if (devid >= 0x4021 && devid <= 0x402e) {
+ /* 5400 */
+ bits = 7U << 10;
+ mask = 7U << 10;
+ } else if (devid >= 0x3604 && devid <= 0x360a) {
+ /* 7300 */
+ bits = 7U << 10;
+ mask = (3U << 24) | (7U << 10);
+ } else {
+ /* not one of the chipsets that we know about */
+ return 1;
+ }
+ pci_read_config_dword(parent, 0x48, &val);
+ val &= ~mask;
+ val |= bits;
+ r = pci_write_config_dword(parent, 0x48, val);
+ return 0;
+}
+
+/*
+ * BIOS may not set PCIe bus-utilization parameters for best performance.
+ * Check and optionally adjust them to maximize our throughput.
+ */
+static int qib_pcie_caps;
+module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO);
+MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (4lsb), ReadReq (D4..7)");
+
+static int qib_tune_pcie_caps(struct qib_devdata *dd)
+{
+ int ret = 1; /* Assume the worst */
+ struct pci_dev *parent;
+ int ppos, epos;
+ u16 pcaps, pctl, ecaps, ectl;
+ int rc_sup, ep_sup;
+ int rc_cur, ep_cur;
+
+ /* Find out supported and configured values for parent (root) */
+ parent = dd->pcidev->bus->self;
+ if (parent->bus->parent) {
+ qib_devinfo(dd->pcidev, "Parent not root\n");
+ goto bail;
+ }
+ ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+ if (ppos) {
+ pci_read_config_word(parent, ppos + PCI_EXP_DEVCAP, &pcaps);
+ pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl);
+ } else
+ goto bail;
+ /* Find out supported and configured values for endpoint (us) */
+ epos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
+ if (epos) {
+ pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCAP, &ecaps);
+ pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, &ectl);
+ } else
+ goto bail;
+ ret = 0;
+ /* Find max payload supported by root, endpoint */
+ rc_sup = fld2val(pcaps, PCI_EXP_DEVCAP_PAYLOAD);
+ ep_sup = fld2val(ecaps, PCI_EXP_DEVCAP_PAYLOAD);
+ if (rc_sup > ep_sup)
+ rc_sup = ep_sup;
+
+ rc_cur = fld2val(pctl, PCI_EXP_DEVCTL_PAYLOAD);
+ ep_cur = fld2val(ectl, PCI_EXP_DEVCTL_PAYLOAD);
+
+ /* If Supported greater than limit in module param, limit it */
+ if (rc_sup > (qib_pcie_caps & 7))
+ rc_sup = qib_pcie_caps & 7;
+ /* If less than (allowed, supported), bump root payload */
+ if (rc_sup > rc_cur) {
+ rc_cur = rc_sup;
+ pctl = (pctl & ~PCI_EXP_DEVCTL_PAYLOAD) |
+ val2fld(rc_cur, PCI_EXP_DEVCTL_PAYLOAD);
+ pci_write_config_word(parent, ppos + PCI_EXP_DEVCTL, pctl);
+ }
+ /* If less than (allowed, supported), bump endpoint payload */
+ if (rc_sup > ep_cur) {
+ ep_cur = rc_sup;
+ ectl = (ectl & ~PCI_EXP_DEVCTL_PAYLOAD) |
+ val2fld(ep_cur, PCI_EXP_DEVCTL_PAYLOAD);
+ pci_write_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, ectl);
+ }
+
+ /*
+ * Now the Read Request size.
+ * No field for max supported, but PCIe spec limits it to 4096,
+ * which is code '5' (log2(4096) - 7)
+ */
+ rc_sup = 5;
+ if (rc_sup > ((qib_pcie_caps >> 4) & 7))
+ rc_sup = (qib_pcie_caps >> 4) & 7;
+ rc_cur = fld2val(pctl, PCI_EXP_DEVCTL_READRQ);
+ ep_cur = fld2val(ectl, PCI_EXP_DEVCTL_READRQ);
+
+ if (rc_sup > rc_cur) {
+ rc_cur = rc_sup;
+ pctl = (pctl & ~PCI_EXP_DEVCTL_READRQ) |
+ val2fld(rc_cur, PCI_EXP_DEVCTL_READRQ);
+ pci_write_config_word(parent, ppos + PCI_EXP_DEVCTL, pctl);
+ }
+ if (rc_sup > ep_cur) {
+ ep_cur = rc_sup;
+ ectl = (ectl & ~PCI_EXP_DEVCTL_READRQ) |
+ val2fld(ep_cur, PCI_EXP_DEVCTL_READRQ);
+ pci_write_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, ectl);
+ }
+bail:
+ return ret;
+}
+/* End of PCIe capability tuning */
+
+/*
+ * From here through qib_pci_err_handler definition is invoked via
+ * PCI error infrastructure, registered via pci
+ */
+static pci_ers_result_t
+qib_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct qib_devdata *dd = pci_get_drvdata(pdev);
+ pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
+
+ switch (state) {
+ case pci_channel_io_normal:
+ qib_devinfo(pdev, "State Normal, ignoring\n");
+ break;
+
+ case pci_channel_io_frozen:
+ qib_devinfo(pdev, "State Frozen, requesting reset\n");
+ pci_disable_device(pdev);
+ ret = PCI_ERS_RESULT_NEED_RESET;
+ break;
+
+ case pci_channel_io_perm_failure:
+ qib_devinfo(pdev, "State Permanent Failure, disabling\n");
+ if (dd) {
+ /* no more register accesses! */
+ dd->flags &= ~QIB_PRESENT;
+ qib_disable_after_error(dd);
+ }
+ /* else early, or other problem */
+ ret = PCI_ERS_RESULT_DISCONNECT;
+ break;
+
+ default: /* shouldn't happen */
+ qib_devinfo(pdev, "QIB PCI errors detected (state %d)\n",
+ state);
+ break;
+ }
+ return ret;
+}
+
+static pci_ers_result_t
+qib_pci_mmio_enabled(struct pci_dev *pdev)
+{
+ u64 words = 0U;
+ struct qib_devdata *dd = pci_get_drvdata(pdev);
+ pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
+
+ if (dd && dd->pport) {
+ words = dd->f_portcntr(dd->pport, QIBPORTCNTR_WORDRCV);
+ if (words == ~0ULL)
+ ret = PCI_ERS_RESULT_NEED_RESET;
+ }
+ qib_devinfo(pdev, "QIB mmio_enabled function called, "
+ "read wordscntr %Lx, returning %d\n", words, ret);
+ return ret;
+}
+
+static pci_ers_result_t
+qib_pci_slot_reset(struct pci_dev *pdev)
+{
+ qib_devinfo(pdev, "QIB link_reset function called, ignored\n");
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
+
+static pci_ers_result_t
+qib_pci_link_reset(struct pci_dev *pdev)
+{
+ qib_devinfo(pdev, "QIB link_reset function called, ignored\n");
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
+
+static void
+qib_pci_resume(struct pci_dev *pdev)
+{
+ struct qib_devdata *dd = pci_get_drvdata(pdev);
+ qib_devinfo(pdev, "QIB resume function called\n");
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ /*
+ * Running jobs will fail, since it's asynchronous
+ * unlike sysfs-requested reset. Better than
+ * doing nothing.
+ */
+ qib_init(dd, 1); /* same as re-init after reset */
+}
+
+struct pci_error_handlers qib_pci_err_handler = {
+ .error_detected = qib_pci_error_detected,
+ .mmio_enabled = qib_pci_mmio_enabled,
+ .link_reset = qib_pci_link_reset,
+ .slot_reset = qib_pci_slot_reset,
+ .resume = qib_pci_resume,
+};
diff --git a/drivers/infiniband/hw/qib/qib_pio_copy.c b/drivers/infiniband/hw/qib/qib_pio_copy.c
new file mode 100644
index 00000000000..10b8c444dd3
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_pio_copy.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2009 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "qib.h"
+
+/**
+ * qib_pio_copy - copy data to MMIO space, in multiples of 32-bits
+ * @to: destination, in MMIO space (must be 64-bit aligned)
+ * @from: source (must be 64-bit aligned)
+ * @count: number of 32-bit quantities to copy
+ *
+ * Copy data from kernel space to MMIO space, in multiples of 32 bits at a
+ * time. Order of access is not guaranteed, nor is a memory barrier
+ * performed afterwards.
+ */
+void qib_pio_copy(void __iomem *to, const void *from, size_t count)
+{
+#ifdef CONFIG_64BIT
+ u64 __iomem *dst = to;
+ const u64 *src = from;
+ const u64 *end = src + (count >> 1);
+
+ while (src < end)
+ __raw_writeq(*src++, dst++);
+ if (count & 1)
+ __raw_writel(*(const u32 *)src, dst);
+#else
+ u32 __iomem *dst = to;
+ const u32 *src = from;
+ const u32 *end = src + count;
+
+ while (src < end)
+ __raw_writel(*src++, dst++);
+#endif
+}
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
new file mode 100644
index 00000000000..e0f65e39076
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -0,0 +1,1255 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/err.h>
+#include <linux/vmalloc.h>
+
+#include "qib.h"
+
+#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
+#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
+
+static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
+ struct qpn_map *map, unsigned off)
+{
+ return (map - qpt->map) * BITS_PER_PAGE + off;
+}
+
+static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
+ struct qpn_map *map, unsigned off,
+ unsigned r)
+{
+ if (qpt->mask) {
+ off++;
+ if ((off & qpt->mask) >> 1 != r)
+ off = ((off & qpt->mask) ?
+ (off | qpt->mask) + 1 : off) | (r << 1);
+ } else
+ off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
+ return off;
+}
+
+/*
+ * Convert the AETH credit code into the number of credits.
+ */
+static u32 credit_table[31] = {
+ 0, /* 0 */
+ 1, /* 1 */
+ 2, /* 2 */
+ 3, /* 3 */
+ 4, /* 4 */
+ 6, /* 5 */
+ 8, /* 6 */
+ 12, /* 7 */
+ 16, /* 8 */
+ 24, /* 9 */
+ 32, /* A */
+ 48, /* B */
+ 64, /* C */
+ 96, /* D */
+ 128, /* E */
+ 192, /* F */
+ 256, /* 10 */
+ 384, /* 11 */
+ 512, /* 12 */
+ 768, /* 13 */
+ 1024, /* 14 */
+ 1536, /* 15 */
+ 2048, /* 16 */
+ 3072, /* 17 */
+ 4096, /* 18 */
+ 6144, /* 19 */
+ 8192, /* 1A */
+ 12288, /* 1B */
+ 16384, /* 1C */
+ 24576, /* 1D */
+ 32768 /* 1E */
+};
+
+static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
+{
+ unsigned long page = get_zeroed_page(GFP_KERNEL);
+
+ /*
+ * Free the page if someone raced with us installing it.
+ */
+
+ spin_lock(&qpt->lock);
+ if (map->page)
+ free_page(page);
+ else
+ map->page = (void *)page;
+ spin_unlock(&qpt->lock);
+}
+
+/*
+ * Allocate the next available QPN or
+ * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
+ */
+static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
+ enum ib_qp_type type, u8 port)
+{
+ u32 i, offset, max_scan, qpn;
+ struct qpn_map *map;
+ u32 ret;
+ int r;
+
+ if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
+ unsigned n;
+
+ ret = type == IB_QPT_GSI;
+ n = 1 << (ret + 2 * (port - 1));
+ spin_lock(&qpt->lock);
+ if (qpt->flags & n)
+ ret = -EINVAL;
+ else
+ qpt->flags |= n;
+ spin_unlock(&qpt->lock);
+ goto bail;
+ }
+
+ r = smp_processor_id();
+ if (r >= dd->n_krcv_queues)
+ r %= dd->n_krcv_queues;
+ qpn = qpt->last + 1;
+ if (qpn >= QPN_MAX)
+ qpn = 2;
+ if (qpt->mask && ((qpn & qpt->mask) >> 1) != r)
+ qpn = ((qpn & qpt->mask) ? (qpn | qpt->mask) + 1 : qpn) |
+ (r << 1);
+ offset = qpn & BITS_PER_PAGE_MASK;
+ map = &qpt->map[qpn / BITS_PER_PAGE];
+ max_scan = qpt->nmaps - !offset;
+ for (i = 0;;) {
+ if (unlikely(!map->page)) {
+ get_map_page(qpt, map);
+ if (unlikely(!map->page))
+ break;
+ }
+ do {
+ if (!test_and_set_bit(offset, map->page)) {
+ qpt->last = qpn;
+ ret = qpn;
+ goto bail;
+ }
+ offset = find_next_offset(qpt, map, offset, r);
+ qpn = mk_qpn(qpt, map, offset);
+ /*
+ * This test differs from alloc_pidmap().
+ * If find_next_offset() does find a zero
+ * bit, we don't need to check for QPN
+ * wrapping around past our starting QPN.
+ * We just need to be sure we don't loop
+ * forever.
+ */
+ } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
+ /*
+ * In order to keep the number of pages allocated to a
+ * minimum, we scan the all existing pages before increasing
+ * the size of the bitmap table.
+ */
+ if (++i > max_scan) {
+ if (qpt->nmaps == QPNMAP_ENTRIES)
+ break;
+ map = &qpt->map[qpt->nmaps++];
+ offset = qpt->mask ? (r << 1) : 0;
+ } else if (map < &qpt->map[qpt->nmaps]) {
+ ++map;
+ offset = qpt->mask ? (r << 1) : 0;
+ } else {
+ map = &qpt->map[0];
+ offset = qpt->mask ? (r << 1) : 2;
+ }
+ qpn = mk_qpn(qpt, map, offset);
+ }
+
+ ret = -ENOMEM;
+
+bail:
+ return ret;
+}
+
+static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
+{
+ struct qpn_map *map;
+
+ map = qpt->map + qpn / BITS_PER_PAGE;
+ if (map->page)
+ clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
+}
+
+/*
+ * Put the QP into the hash table.
+ * The hash table holds a reference to the QP.
+ */
+static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
+{
+ struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ unsigned n = qp->ibqp.qp_num % dev->qp_table_size;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->qpt_lock, flags);
+
+ if (qp->ibqp.qp_num == 0)
+ ibp->qp0 = qp;
+ else if (qp->ibqp.qp_num == 1)
+ ibp->qp1 = qp;
+ else {
+ qp->next = dev->qp_table[n];
+ dev->qp_table[n] = qp;
+ }
+ atomic_inc(&qp->refcount);
+
+ spin_unlock_irqrestore(&dev->qpt_lock, flags);
+}
+
+/*
+ * Remove the QP from the table so it can't be found asynchronously by
+ * the receive interrupt routine.
+ */
+static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
+{
+ struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct qib_qp *q, **qpp;
+ unsigned long flags;
+
+ qpp = &dev->qp_table[qp->ibqp.qp_num % dev->qp_table_size];
+
+ spin_lock_irqsave(&dev->qpt_lock, flags);
+
+ if (ibp->qp0 == qp) {
+ ibp->qp0 = NULL;
+ atomic_dec(&qp->refcount);
+ } else if (ibp->qp1 == qp) {
+ ibp->qp1 = NULL;
+ atomic_dec(&qp->refcount);
+ } else
+ for (; (q = *qpp) != NULL; qpp = &q->next)
+ if (q == qp) {
+ *qpp = qp->next;
+ qp->next = NULL;
+ atomic_dec(&qp->refcount);
+ break;
+ }
+
+ spin_unlock_irqrestore(&dev->qpt_lock, flags);
+}
+
+/**
+ * qib_free_all_qps - check for QPs still in use
+ * @qpt: the QP table to empty
+ *
+ * There should not be any QPs still in use.
+ * Free memory for table.
+ */
+unsigned qib_free_all_qps(struct qib_devdata *dd)
+{
+ struct qib_ibdev *dev = &dd->verbs_dev;
+ unsigned long flags;
+ struct qib_qp *qp;
+ unsigned n, qp_inuse = 0;
+
+ for (n = 0; n < dd->num_pports; n++) {
+ struct qib_ibport *ibp = &dd->pport[n].ibport_data;
+
+ if (!qib_mcast_tree_empty(ibp))
+ qp_inuse++;
+ if (ibp->qp0)
+ qp_inuse++;
+ if (ibp->qp1)
+ qp_inuse++;
+ }
+
+ spin_lock_irqsave(&dev->qpt_lock, flags);
+ for (n = 0; n < dev->qp_table_size; n++) {
+ qp = dev->qp_table[n];
+ dev->qp_table[n] = NULL;
+
+ for (; qp; qp = qp->next)
+ qp_inuse++;
+ }
+ spin_unlock_irqrestore(&dev->qpt_lock, flags);
+
+ return qp_inuse;
+}
+
+/**
+ * qib_lookup_qpn - return the QP with the given QPN
+ * @qpt: the QP table
+ * @qpn: the QP number to look up
+ *
+ * The caller is responsible for decrementing the QP reference count
+ * when done.
+ */
+struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
+{
+ struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
+ unsigned long flags;
+ struct qib_qp *qp;
+
+ spin_lock_irqsave(&dev->qpt_lock, flags);
+
+ if (qpn == 0)
+ qp = ibp->qp0;
+ else if (qpn == 1)
+ qp = ibp->qp1;
+ else
+ for (qp = dev->qp_table[qpn % dev->qp_table_size]; qp;
+ qp = qp->next)
+ if (qp->ibqp.qp_num == qpn)
+ break;
+ if (qp)
+ atomic_inc(&qp->refcount);
+
+ spin_unlock_irqrestore(&dev->qpt_lock, flags);
+ return qp;
+}
+
+/**
+ * qib_reset_qp - initialize the QP state to the reset state
+ * @qp: the QP to reset
+ * @type: the QP type
+ */
+static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type)
+{
+ qp->remote_qpn = 0;
+ qp->qkey = 0;
+ qp->qp_access_flags = 0;
+ atomic_set(&qp->s_dma_busy, 0);
+ qp->s_flags &= QIB_S_SIGNAL_REQ_WR;
+ qp->s_hdrwords = 0;
+ qp->s_wqe = NULL;
+ qp->s_draining = 0;
+ qp->s_next_psn = 0;
+ qp->s_last_psn = 0;
+ qp->s_sending_psn = 0;
+ qp->s_sending_hpsn = 0;
+ qp->s_psn = 0;
+ qp->r_psn = 0;
+ qp->r_msn = 0;
+ if (type == IB_QPT_RC) {
+ qp->s_state = IB_OPCODE_RC_SEND_LAST;
+ qp->r_state = IB_OPCODE_RC_SEND_LAST;
+ } else {
+ qp->s_state = IB_OPCODE_UC_SEND_LAST;
+ qp->r_state = IB_OPCODE_UC_SEND_LAST;
+ }
+ qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
+ qp->r_nak_state = 0;
+ qp->r_aflags = 0;
+ qp->r_flags = 0;
+ qp->s_head = 0;
+ qp->s_tail = 0;
+ qp->s_cur = 0;
+ qp->s_acked = 0;
+ qp->s_last = 0;
+ qp->s_ssn = 1;
+ qp->s_lsn = 0;
+ qp->s_mig_state = IB_MIG_MIGRATED;
+ memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
+ qp->r_head_ack_queue = 0;
+ qp->s_tail_ack_queue = 0;
+ qp->s_num_rd_atomic = 0;
+ if (qp->r_rq.wq) {
+ qp->r_rq.wq->head = 0;
+ qp->r_rq.wq->tail = 0;
+ }
+ qp->r_sge.num_sge = 0;
+}
+
+static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
+{
+ unsigned n;
+
+ if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
+ while (qp->s_rdma_read_sge.num_sge) {
+ atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
+ if (--qp->s_rdma_read_sge.num_sge)
+ qp->s_rdma_read_sge.sge =
+ *qp->s_rdma_read_sge.sg_list++;
+ }
+
+ while (qp->r_sge.num_sge) {
+ atomic_dec(&qp->r_sge.sge.mr->refcount);
+ if (--qp->r_sge.num_sge)
+ qp->r_sge.sge = *qp->r_sge.sg_list++;
+ }
+
+ if (clr_sends) {
+ while (qp->s_last != qp->s_head) {
+ struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
+ unsigned i;
+
+ for (i = 0; i < wqe->wr.num_sge; i++) {
+ struct qib_sge *sge = &wqe->sg_list[i];
+
+ atomic_dec(&sge->mr->refcount);
+ }
+ if (qp->ibqp.qp_type == IB_QPT_UD ||
+ qp->ibqp.qp_type == IB_QPT_SMI ||
+ qp->ibqp.qp_type == IB_QPT_GSI)
+ atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
+ if (++qp->s_last >= qp->s_size)
+ qp->s_last = 0;
+ }
+ if (qp->s_rdma_mr) {
+ atomic_dec(&qp->s_rdma_mr->refcount);
+ qp->s_rdma_mr = NULL;
+ }
+ }
+
+ if (qp->ibqp.qp_type != IB_QPT_RC)
+ return;
+
+ for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
+ struct qib_ack_entry *e = &qp->s_ack_queue[n];
+
+ if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
+ e->rdma_sge.mr) {
+ atomic_dec(&e->rdma_sge.mr->refcount);
+ e->rdma_sge.mr = NULL;
+ }
+ }
+}
+
+/**
+ * qib_error_qp - put a QP into the error state
+ * @qp: the QP to put into the error state
+ * @err: the receive completion error to signal if a RWQE is active
+ *
+ * Flushes both send and receive work queues.
+ * Returns true if last WQE event should be generated.
+ * The QP s_lock should be held and interrupts disabled.
+ * If we are already in error state, just return.
+ */
+int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
+{
+ struct qib_ibdev *dev = to_idev(qp->ibqp.device);
+ struct ib_wc wc;
+ int ret = 0;
+
+ if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
+ goto bail;
+
+ qp->state = IB_QPS_ERR;
+
+ if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
+ qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
+ del_timer(&qp->s_timer);
+ }
+ spin_lock(&dev->pending_lock);
+ if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
+ qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
+ list_del_init(&qp->iowait);
+ }
+ spin_unlock(&dev->pending_lock);
+
+ if (!(qp->s_flags & QIB_S_BUSY)) {
+ qp->s_hdrwords = 0;
+ if (qp->s_rdma_mr) {
+ atomic_dec(&qp->s_rdma_mr->refcount);
+ qp->s_rdma_mr = NULL;
+ }
+ if (qp->s_tx) {
+ qib_put_txreq(qp->s_tx);
+ qp->s_tx = NULL;
+ }
+ }
+
+ /* Schedule the sending tasklet to drain the send work queue. */
+ if (qp->s_last != qp->s_head)
+ qib_schedule_send(qp);
+
+ clear_mr_refs(qp, 0);
+
+ memset(&wc, 0, sizeof(wc));
+ wc.qp = &qp->ibqp;
+ wc.opcode = IB_WC_RECV;
+
+ if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) {
+ wc.wr_id = qp->r_wr_id;
+ wc.status = err;
+ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
+ }
+ wc.status = IB_WC_WR_FLUSH_ERR;
+
+ if (qp->r_rq.wq) {
+ struct qib_rwq *wq;
+ u32 head;
+ u32 tail;
+
+ spin_lock(&qp->r_rq.lock);
+
+ /* sanity check pointers before trusting them */
+ wq = qp->r_rq.wq;
+ head = wq->head;
+ if (head >= qp->r_rq.size)
+ head = 0;
+ tail = wq->tail;
+ if (tail >= qp->r_rq.size)
+ tail = 0;
+ while (tail != head) {
+ wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
+ if (++tail >= qp->r_rq.size)
+ tail = 0;
+ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
+ }
+ wq->tail = tail;
+
+ spin_unlock(&qp->r_rq.lock);
+ } else if (qp->ibqp.event_handler)
+ ret = 1;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_modify_qp - modify the attributes of a queue pair
+ * @ibqp: the queue pair who's attributes we're modifying
+ * @attr: the new attributes
+ * @attr_mask: the mask of attributes to modify
+ * @udata: user data for libibverbs.so
+ *
+ * Returns 0 on success, otherwise returns an errno.
+ */
+int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct qib_ibdev *dev = to_idev(ibqp->device);
+ struct qib_qp *qp = to_iqp(ibqp);
+ enum ib_qp_state cur_state, new_state;
+ struct ib_event ev;
+ int lastwqe = 0;
+ int mig = 0;
+ int ret;
+ u32 pmtu = 0; /* for gcc warning only */
+
+ spin_lock_irq(&qp->r_lock);
+ spin_lock(&qp->s_lock);
+
+ cur_state = attr_mask & IB_QP_CUR_STATE ?
+ attr->cur_qp_state : qp->state;
+ new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
+
+ if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
+ attr_mask))
+ goto inval;
+
+ if (attr_mask & IB_QP_AV) {
+ if (attr->ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
+ goto inval;
+ if (qib_check_ah(qp->ibqp.device, &attr->ah_attr))
+ goto inval;
+ }
+
+ if (attr_mask & IB_QP_ALT_PATH) {
+ if (attr->alt_ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
+ goto inval;
+ if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
+ goto inval;
+ if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
+ goto inval;
+ }
+
+ if (attr_mask & IB_QP_PKEY_INDEX)
+ if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
+ goto inval;
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER)
+ if (attr->min_rnr_timer > 31)
+ goto inval;
+
+ if (attr_mask & IB_QP_PORT)
+ if (qp->ibqp.qp_type == IB_QPT_SMI ||
+ qp->ibqp.qp_type == IB_QPT_GSI ||
+ attr->port_num == 0 ||
+ attr->port_num > ibqp->device->phys_port_cnt)
+ goto inval;
+
+ if (attr_mask & IB_QP_DEST_QPN)
+ if (attr->dest_qp_num > QIB_QPN_MASK)
+ goto inval;
+
+ if (attr_mask & IB_QP_RETRY_CNT)
+ if (attr->retry_cnt > 7)
+ goto inval;
+
+ if (attr_mask & IB_QP_RNR_RETRY)
+ if (attr->rnr_retry > 7)
+ goto inval;
+
+ /*
+ * Don't allow invalid path_mtu values. OK to set greater
+ * than the active mtu (or even the max_cap, if we have tuned
+ * that to a small mtu. We'll set qp->path_mtu
+ * to the lesser of requested attribute mtu and active,
+ * for packetizing messages.
+ * Note that the QP port has to be set in INIT and MTU in RTR.
+ */
+ if (attr_mask & IB_QP_PATH_MTU) {
+ struct qib_devdata *dd = dd_from_dev(dev);
+ int mtu, pidx = qp->port_num - 1;
+
+ mtu = ib_mtu_enum_to_int(attr->path_mtu);
+ if (mtu == -1)
+ goto inval;
+ if (mtu > dd->pport[pidx].ibmtu) {
+ switch (dd->pport[pidx].ibmtu) {
+ case 4096:
+ pmtu = IB_MTU_4096;
+ break;
+ case 2048:
+ pmtu = IB_MTU_2048;
+ break;
+ case 1024:
+ pmtu = IB_MTU_1024;
+ break;
+ case 512:
+ pmtu = IB_MTU_512;
+ break;
+ case 256:
+ pmtu = IB_MTU_256;
+ break;
+ default:
+ pmtu = IB_MTU_2048;
+ }
+ } else
+ pmtu = attr->path_mtu;
+ }
+
+ if (attr_mask & IB_QP_PATH_MIG_STATE) {
+ if (attr->path_mig_state == IB_MIG_REARM) {
+ if (qp->s_mig_state == IB_MIG_ARMED)
+ goto inval;
+ if (new_state != IB_QPS_RTS)
+ goto inval;
+ } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
+ if (qp->s_mig_state == IB_MIG_REARM)
+ goto inval;
+ if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
+ goto inval;
+ if (qp->s_mig_state == IB_MIG_ARMED)
+ mig = 1;
+ } else
+ goto inval;
+ }
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC)
+ goto inval;
+
+ switch (new_state) {
+ case IB_QPS_RESET:
+ if (qp->state != IB_QPS_RESET) {
+ qp->state = IB_QPS_RESET;
+ spin_lock(&dev->pending_lock);
+ if (!list_empty(&qp->iowait))
+ list_del_init(&qp->iowait);
+ spin_unlock(&dev->pending_lock);
+ qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
+ spin_unlock(&qp->s_lock);
+ spin_unlock_irq(&qp->r_lock);
+ /* Stop the sending work queue and retry timer */
+ cancel_work_sync(&qp->s_work);
+ del_timer_sync(&qp->s_timer);
+ wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
+ if (qp->s_tx) {
+ qib_put_txreq(qp->s_tx);
+ qp->s_tx = NULL;
+ }
+ remove_qp(dev, qp);
+ wait_event(qp->wait, !atomic_read(&qp->refcount));
+ spin_lock_irq(&qp->r_lock);
+ spin_lock(&qp->s_lock);
+ clear_mr_refs(qp, 1);
+ qib_reset_qp(qp, ibqp->qp_type);
+ }
+ break;
+
+ case IB_QPS_RTR:
+ /* Allow event to retrigger if QP set to RTR more than once */
+ qp->r_flags &= ~QIB_R_COMM_EST;
+ qp->state = new_state;
+ break;
+
+ case IB_QPS_SQD:
+ qp->s_draining = qp->s_last != qp->s_cur;
+ qp->state = new_state;
+ break;
+
+ case IB_QPS_SQE:
+ if (qp->ibqp.qp_type == IB_QPT_RC)
+ goto inval;
+ qp->state = new_state;
+ break;
+
+ case IB_QPS_ERR:
+ lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ break;
+
+ default:
+ qp->state = new_state;
+ break;
+ }
+
+ if (attr_mask & IB_QP_PKEY_INDEX)
+ qp->s_pkey_index = attr->pkey_index;
+
+ if (attr_mask & IB_QP_PORT)
+ qp->port_num = attr->port_num;
+
+ if (attr_mask & IB_QP_DEST_QPN)
+ qp->remote_qpn = attr->dest_qp_num;
+
+ if (attr_mask & IB_QP_SQ_PSN) {
+ qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK;
+ qp->s_psn = qp->s_next_psn;
+ qp->s_sending_psn = qp->s_next_psn;
+ qp->s_last_psn = qp->s_next_psn - 1;
+ qp->s_sending_hpsn = qp->s_last_psn;
+ }
+
+ if (attr_mask & IB_QP_RQ_PSN)
+ qp->r_psn = attr->rq_psn & QIB_PSN_MASK;
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS)
+ qp->qp_access_flags = attr->qp_access_flags;
+
+ if (attr_mask & IB_QP_AV) {
+ qp->remote_ah_attr = attr->ah_attr;
+ qp->s_srate = attr->ah_attr.static_rate;
+ }
+
+ if (attr_mask & IB_QP_ALT_PATH) {
+ qp->alt_ah_attr = attr->alt_ah_attr;
+ qp->s_alt_pkey_index = attr->alt_pkey_index;
+ }
+
+ if (attr_mask & IB_QP_PATH_MIG_STATE) {
+ qp->s_mig_state = attr->path_mig_state;
+ if (mig) {
+ qp->remote_ah_attr = qp->alt_ah_attr;
+ qp->port_num = qp->alt_ah_attr.port_num;
+ qp->s_pkey_index = qp->s_alt_pkey_index;
+ }
+ }
+
+ if (attr_mask & IB_QP_PATH_MTU)
+ qp->path_mtu = pmtu;
+
+ if (attr_mask & IB_QP_RETRY_CNT) {
+ qp->s_retry_cnt = attr->retry_cnt;
+ qp->s_retry = attr->retry_cnt;
+ }
+
+ if (attr_mask & IB_QP_RNR_RETRY) {
+ qp->s_rnr_retry_cnt = attr->rnr_retry;
+ qp->s_rnr_retry = attr->rnr_retry;
+ }
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER)
+ qp->r_min_rnr_timer = attr->min_rnr_timer;
+
+ if (attr_mask & IB_QP_TIMEOUT)
+ qp->timeout = attr->timeout;
+
+ if (attr_mask & IB_QP_QKEY)
+ qp->qkey = attr->qkey;
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
+ qp->s_max_rd_atomic = attr->max_rd_atomic;
+
+ spin_unlock(&qp->s_lock);
+ spin_unlock_irq(&qp->r_lock);
+
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
+ insert_qp(dev, qp);
+
+ if (lastwqe) {
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+ if (mig) {
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_PATH_MIG;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+ ret = 0;
+ goto bail;
+
+inval:
+ spin_unlock(&qp->s_lock);
+ spin_unlock_irq(&qp->r_lock);
+ ret = -EINVAL;
+
+bail:
+ return ret;
+}
+
+int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_qp_init_attr *init_attr)
+{
+ struct qib_qp *qp = to_iqp(ibqp);
+
+ attr->qp_state = qp->state;
+ attr->cur_qp_state = attr->qp_state;
+ attr->path_mtu = qp->path_mtu;
+ attr->path_mig_state = qp->s_mig_state;
+ attr->qkey = qp->qkey;
+ attr->rq_psn = qp->r_psn & QIB_PSN_MASK;
+ attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK;
+ attr->dest_qp_num = qp->remote_qpn;
+ attr->qp_access_flags = qp->qp_access_flags;
+ attr->cap.max_send_wr = qp->s_size - 1;
+ attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
+ attr->cap.max_send_sge = qp->s_max_sge;
+ attr->cap.max_recv_sge = qp->r_rq.max_sge;
+ attr->cap.max_inline_data = 0;
+ attr->ah_attr = qp->remote_ah_attr;
+ attr->alt_ah_attr = qp->alt_ah_attr;
+ attr->pkey_index = qp->s_pkey_index;
+ attr->alt_pkey_index = qp->s_alt_pkey_index;
+ attr->en_sqd_async_notify = 0;
+ attr->sq_draining = qp->s_draining;
+ attr->max_rd_atomic = qp->s_max_rd_atomic;
+ attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
+ attr->min_rnr_timer = qp->r_min_rnr_timer;
+ attr->port_num = qp->port_num;
+ attr->timeout = qp->timeout;
+ attr->retry_cnt = qp->s_retry_cnt;
+ attr->rnr_retry = qp->s_rnr_retry_cnt;
+ attr->alt_port_num = qp->alt_ah_attr.port_num;
+ attr->alt_timeout = qp->alt_timeout;
+
+ init_attr->event_handler = qp->ibqp.event_handler;
+ init_attr->qp_context = qp->ibqp.qp_context;
+ init_attr->send_cq = qp->ibqp.send_cq;
+ init_attr->recv_cq = qp->ibqp.recv_cq;
+ init_attr->srq = qp->ibqp.srq;
+ init_attr->cap = attr->cap;
+ if (qp->s_flags & QIB_S_SIGNAL_REQ_WR)
+ init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
+ else
+ init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
+ init_attr->qp_type = qp->ibqp.qp_type;
+ init_attr->port_num = qp->port_num;
+ return 0;
+}
+
+/**
+ * qib_compute_aeth - compute the AETH (syndrome + MSN)
+ * @qp: the queue pair to compute the AETH for
+ *
+ * Returns the AETH.
+ */
+__be32 qib_compute_aeth(struct qib_qp *qp)
+{
+ u32 aeth = qp->r_msn & QIB_MSN_MASK;
+
+ if (qp->ibqp.srq) {
+ /*
+ * Shared receive queues don't generate credits.
+ * Set the credit field to the invalid value.
+ */
+ aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
+ } else {
+ u32 min, max, x;
+ u32 credits;
+ struct qib_rwq *wq = qp->r_rq.wq;
+ u32 head;
+ u32 tail;
+
+ /* sanity check pointers before trusting them */
+ head = wq->head;
+ if (head >= qp->r_rq.size)
+ head = 0;
+ tail = wq->tail;
+ if (tail >= qp->r_rq.size)
+ tail = 0;
+ /*
+ * Compute the number of credits available (RWQEs).
+ * XXX Not holding the r_rq.lock here so there is a small
+ * chance that the pair of reads are not atomic.
+ */
+ credits = head - tail;
+ if ((int)credits < 0)
+ credits += qp->r_rq.size;
+ /*
+ * Binary search the credit table to find the code to
+ * use.
+ */
+ min = 0;
+ max = 31;
+ for (;;) {
+ x = (min + max) / 2;
+ if (credit_table[x] == credits)
+ break;
+ if (credit_table[x] > credits)
+ max = x;
+ else if (min == x)
+ break;
+ else
+ min = x;
+ }
+ aeth |= x << QIB_AETH_CREDIT_SHIFT;
+ }
+ return cpu_to_be32(aeth);
+}
+
+/**
+ * qib_create_qp - create a queue pair for a device
+ * @ibpd: the protection domain who's device we create the queue pair for
+ * @init_attr: the attributes of the queue pair
+ * @udata: user data for libibverbs.so
+ *
+ * Returns the queue pair on success, otherwise returns an errno.
+ *
+ * Called by the ib_create_qp() core verbs function.
+ */
+struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct qib_qp *qp;
+ int err;
+ struct qib_swqe *swq = NULL;
+ struct qib_ibdev *dev;
+ struct qib_devdata *dd;
+ size_t sz;
+ size_t sg_list_sz;
+ struct ib_qp *ret;
+
+ if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
+ init_attr->cap.max_send_wr > ib_qib_max_qp_wrs) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+
+ /* Check receive queue parameters if no SRQ is specified. */
+ if (!init_attr->srq) {
+ if (init_attr->cap.max_recv_sge > ib_qib_max_sges ||
+ init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+ if (init_attr->cap.max_send_sge +
+ init_attr->cap.max_send_wr +
+ init_attr->cap.max_recv_sge +
+ init_attr->cap.max_recv_wr == 0) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+ }
+
+ switch (init_attr->qp_type) {
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ if (init_attr->port_num == 0 ||
+ init_attr->port_num > ibpd->device->phys_port_cnt) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+ case IB_QPT_UC:
+ case IB_QPT_RC:
+ case IB_QPT_UD:
+ sz = sizeof(struct qib_sge) *
+ init_attr->cap.max_send_sge +
+ sizeof(struct qib_swqe);
+ swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
+ if (swq == NULL) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+ sz = sizeof(*qp);
+ sg_list_sz = 0;
+ if (init_attr->srq) {
+ struct qib_srq *srq = to_isrq(init_attr->srq);
+
+ if (srq->rq.max_sge > 1)
+ sg_list_sz = sizeof(*qp->r_sg_list) *
+ (srq->rq.max_sge - 1);
+ } else if (init_attr->cap.max_recv_sge > 1)
+ sg_list_sz = sizeof(*qp->r_sg_list) *
+ (init_attr->cap.max_recv_sge - 1);
+ qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
+ if (!qp) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_swq;
+ }
+ if (init_attr->srq)
+ sz = 0;
+ else {
+ qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
+ qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
+ sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
+ sizeof(struct qib_rwqe);
+ qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) +
+ qp->r_rq.size * sz);
+ if (!qp->r_rq.wq) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_qp;
+ }
+ }
+
+ /*
+ * ib_create_qp() will initialize qp->ibqp
+ * except for qp->ibqp.qp_num.
+ */
+ spin_lock_init(&qp->r_lock);
+ spin_lock_init(&qp->s_lock);
+ spin_lock_init(&qp->r_rq.lock);
+ atomic_set(&qp->refcount, 0);
+ init_waitqueue_head(&qp->wait);
+ init_waitqueue_head(&qp->wait_dma);
+ init_timer(&qp->s_timer);
+ qp->s_timer.data = (unsigned long)qp;
+ INIT_WORK(&qp->s_work, qib_do_send);
+ INIT_LIST_HEAD(&qp->iowait);
+ INIT_LIST_HEAD(&qp->rspwait);
+ qp->state = IB_QPS_RESET;
+ qp->s_wq = swq;
+ qp->s_size = init_attr->cap.max_send_wr + 1;
+ qp->s_max_sge = init_attr->cap.max_send_sge;
+ if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
+ qp->s_flags = QIB_S_SIGNAL_REQ_WR;
+ dev = to_idev(ibpd->device);
+ dd = dd_from_dev(dev);
+ err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
+ init_attr->port_num);
+ if (err < 0) {
+ ret = ERR_PTR(err);
+ vfree(qp->r_rq.wq);
+ goto bail_qp;
+ }
+ qp->ibqp.qp_num = err;
+ qp->port_num = init_attr->port_num;
+ qp->processor_id = smp_processor_id();
+ qib_reset_qp(qp, init_attr->qp_type);
+ break;
+
+ default:
+ /* Don't support raw QPs */
+ ret = ERR_PTR(-ENOSYS);
+ goto bail;
+ }
+
+ init_attr->cap.max_inline_data = 0;
+
+ /*
+ * Return the address of the RWQ as the offset to mmap.
+ * See qib_mmap() for details.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ if (!qp->r_rq.wq) {
+ __u64 offset = 0;
+
+ err = ib_copy_to_udata(udata, &offset,
+ sizeof(offset));
+ if (err) {
+ ret = ERR_PTR(err);
+ goto bail_ip;
+ }
+ } else {
+ u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz;
+
+ qp->ip = qib_create_mmap_info(dev, s,
+ ibpd->uobject->context,
+ qp->r_rq.wq);
+ if (!qp->ip) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_ip;
+ }
+
+ err = ib_copy_to_udata(udata, &(qp->ip->offset),
+ sizeof(qp->ip->offset));
+ if (err) {
+ ret = ERR_PTR(err);
+ goto bail_ip;
+ }
+ }
+ }
+
+ spin_lock(&dev->n_qps_lock);
+ if (dev->n_qps_allocated == ib_qib_max_qps) {
+ spin_unlock(&dev->n_qps_lock);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_ip;
+ }
+
+ dev->n_qps_allocated++;
+ spin_unlock(&dev->n_qps_lock);
+
+ if (qp->ip) {
+ spin_lock_irq(&dev->pending_lock);
+ list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+ }
+
+ ret = &qp->ibqp;
+ goto bail;
+
+bail_ip:
+ if (qp->ip)
+ kref_put(&qp->ip->ref, qib_release_mmap_info);
+ else
+ vfree(qp->r_rq.wq);
+ free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
+bail_qp:
+ kfree(qp);
+bail_swq:
+ vfree(swq);
+bail:
+ return ret;
+}
+
+/**
+ * qib_destroy_qp - destroy a queue pair
+ * @ibqp: the queue pair to destroy
+ *
+ * Returns 0 on success.
+ *
+ * Note that this can be called while the QP is actively sending or
+ * receiving!
+ */
+int qib_destroy_qp(struct ib_qp *ibqp)
+{
+ struct qib_qp *qp = to_iqp(ibqp);
+ struct qib_ibdev *dev = to_idev(ibqp->device);
+
+ /* Make sure HW and driver activity is stopped. */
+ spin_lock_irq(&qp->s_lock);
+ if (qp->state != IB_QPS_RESET) {
+ qp->state = IB_QPS_RESET;
+ spin_lock(&dev->pending_lock);
+ if (!list_empty(&qp->iowait))
+ list_del_init(&qp->iowait);
+ spin_unlock(&dev->pending_lock);
+ qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
+ spin_unlock_irq(&qp->s_lock);
+ cancel_work_sync(&qp->s_work);
+ del_timer_sync(&qp->s_timer);
+ wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
+ if (qp->s_tx) {
+ qib_put_txreq(qp->s_tx);
+ qp->s_tx = NULL;
+ }
+ remove_qp(dev, qp);
+ wait_event(qp->wait, !atomic_read(&qp->refcount));
+ clear_mr_refs(qp, 1);
+ } else
+ spin_unlock_irq(&qp->s_lock);
+
+ /* all user's cleaned up, mark it available */
+ free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
+ spin_lock(&dev->n_qps_lock);
+ dev->n_qps_allocated--;
+ spin_unlock(&dev->n_qps_lock);
+
+ if (qp->ip)
+ kref_put(&qp->ip->ref, qib_release_mmap_info);
+ else
+ vfree(qp->r_rq.wq);
+ vfree(qp->s_wq);
+ kfree(qp);
+ return 0;
+}
+
+/**
+ * qib_init_qpn_table - initialize the QP number table for a device
+ * @qpt: the QPN table
+ */
+void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt)
+{
+ spin_lock_init(&qpt->lock);
+ qpt->last = 1; /* start with QPN 2 */
+ qpt->nmaps = 1;
+ qpt->mask = dd->qpn_mask;
+}
+
+/**
+ * qib_free_qpn_table - free the QP number table for a device
+ * @qpt: the QPN table
+ */
+void qib_free_qpn_table(struct qib_qpn_table *qpt)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
+ if (qpt->map[i].page)
+ free_page((unsigned long) qpt->map[i].page);
+}
+
+/**
+ * qib_get_credit - flush the send work queue of a QP
+ * @qp: the qp who's send work queue to flush
+ * @aeth: the Acknowledge Extended Transport Header
+ *
+ * The QP s_lock should be held.
+ */
+void qib_get_credit(struct qib_qp *qp, u32 aeth)
+{
+ u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
+
+ /*
+ * If the credit is invalid, we can send
+ * as many packets as we like. Otherwise, we have to
+ * honor the credit field.
+ */
+ if (credit == QIB_AETH_CREDIT_INVAL) {
+ if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
+ qp->s_flags |= QIB_S_UNLIMITED_CREDIT;
+ if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
+ qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
+ qib_schedule_send(qp);
+ }
+ }
+ } else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
+ /* Compute new LSN (i.e., MSN + credit) */
+ credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
+ if (qib_cmp24(credit, qp->s_lsn) > 0) {
+ qp->s_lsn = credit;
+ if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
+ qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
+ qib_schedule_send(qp);
+ }
+ }
+ }
+}
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
new file mode 100644
index 00000000000..35b3604b691
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -0,0 +1,564 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+
+#include "qib.h"
+#include "qib_qsfp.h"
+
+/*
+ * QSFP support for ib_qib driver, using "Two Wire Serial Interface" driver
+ * in qib_twsi.c
+ */
+#define QSFP_MAX_RETRY 4
+
+static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u32 out, mask;
+ int ret, cnt, pass = 0;
+ int stuck = 0;
+ u8 *buff = bp;
+
+ ret = mutex_lock_interruptible(&dd->eep_lock);
+ if (ret)
+ goto no_unlock;
+
+ if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) {
+ ret = -ENXIO;
+ goto bail;
+ }
+
+ /*
+ * We presume, if we are called at all, that this board has
+ * QSFP. This is on the same i2c chain as the legacy parts,
+ * but only responds if the module is selected via GPIO pins.
+ * Further, there are very long setup and hold requirements
+ * on MODSEL.
+ */
+ mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
+ out = QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
+ if (ppd->hw_pidx) {
+ mask <<= QSFP_GPIO_PORT2_SHIFT;
+ out <<= QSFP_GPIO_PORT2_SHIFT;
+ }
+
+ dd->f_gpio_mod(dd, out, mask, mask);
+
+ /*
+ * Module could take up to 2 Msec to respond to MOD_SEL, and there
+ * is no way to tell if it is ready, so we must wait.
+ */
+ msleep(2);
+
+ /* Make sure TWSI bus is in sane state. */
+ ret = qib_twsi_reset(dd);
+ if (ret) {
+ qib_dev_porterr(dd, ppd->port,
+ "QSFP interface Reset for read failed\n");
+ ret = -EIO;
+ stuck = 1;
+ goto deselect;
+ }
+
+ /* All QSFP modules are at A0 */
+
+ cnt = 0;
+ while (cnt < len) {
+ unsigned in_page;
+ int wlen = len - cnt;
+ in_page = addr % QSFP_PAGESIZE;
+ if ((in_page + wlen) > QSFP_PAGESIZE)
+ wlen = QSFP_PAGESIZE - in_page;
+ ret = qib_twsi_blk_rd(dd, QSFP_DEV, addr, buff + cnt, wlen);
+ /* Some QSFP's fail first try. Retry as experiment */
+ if (ret && cnt == 0 && ++pass < QSFP_MAX_RETRY)
+ continue;
+ if (ret) {
+ /* qib_twsi_blk_rd() 1 for error, else 0 */
+ ret = -EIO;
+ goto deselect;
+ }
+ addr += wlen;
+ cnt += wlen;
+ }
+ ret = cnt;
+
+deselect:
+ /*
+ * Module could take up to 10 uSec after transfer before
+ * ready to respond to MOD_SEL negation, and there is no way
+ * to tell if it is ready, so we must wait.
+ */
+ udelay(10);
+ /* set QSFP MODSEL, RST. LP all high */
+ dd->f_gpio_mod(dd, mask, mask, mask);
+
+ /*
+ * Module could take up to 2 Msec to respond to MOD_SEL
+ * going away, and there is no way to tell if it is ready.
+ * so we must wait.
+ */
+ if (stuck)
+ qib_dev_err(dd, "QSFP interface bus stuck non-idle\n");
+
+ if (pass >= QSFP_MAX_RETRY && ret)
+ qib_dev_porterr(dd, ppd->port, "QSFP failed even retrying\n");
+ else if (pass)
+ qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass);
+
+ msleep(2);
+
+bail:
+ mutex_unlock(&dd->eep_lock);
+
+no_unlock:
+ return ret;
+}
+
+/*
+ * qsfp_write
+ * We do not ordinarily write the QSFP, but this is needed to select
+ * the page on non-flat QSFPs, and possibly later unusual cases
+ */
+static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
+ int len)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u32 out, mask;
+ int ret, cnt;
+ u8 *buff = bp;
+
+ ret = mutex_lock_interruptible(&dd->eep_lock);
+ if (ret)
+ goto no_unlock;
+
+ if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) {
+ ret = -ENXIO;
+ goto bail;
+ }
+
+ /*
+ * We presume, if we are called at all, that this board has
+ * QSFP. This is on the same i2c chain as the legacy parts,
+ * but only responds if the module is selected via GPIO pins.
+ * Further, there are very long setup and hold requirements
+ * on MODSEL.
+ */
+ mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
+ out = QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
+ if (ppd->hw_pidx) {
+ mask <<= QSFP_GPIO_PORT2_SHIFT;
+ out <<= QSFP_GPIO_PORT2_SHIFT;
+ }
+ dd->f_gpio_mod(dd, out, mask, mask);
+
+ /*
+ * Module could take up to 2 Msec to respond to MOD_SEL,
+ * and there is no way to tell if it is ready, so we must wait.
+ */
+ msleep(2);
+
+ /* Make sure TWSI bus is in sane state. */
+ ret = qib_twsi_reset(dd);
+ if (ret) {
+ qib_dev_porterr(dd, ppd->port,
+ "QSFP interface Reset for write failed\n");
+ ret = -EIO;
+ goto deselect;
+ }
+
+ /* All QSFP modules are at A0 */
+
+ cnt = 0;
+ while (cnt < len) {
+ unsigned in_page;
+ int wlen = len - cnt;
+ in_page = addr % QSFP_PAGESIZE;
+ if ((in_page + wlen) > QSFP_PAGESIZE)
+ wlen = QSFP_PAGESIZE - in_page;
+ ret = qib_twsi_blk_wr(dd, QSFP_DEV, addr, buff + cnt, wlen);
+ if (ret) {
+ /* qib_twsi_blk_wr() 1 for error, else 0 */
+ ret = -EIO;
+ goto deselect;
+ }
+ addr += wlen;
+ cnt += wlen;
+ }
+ ret = cnt;
+
+deselect:
+ /*
+ * Module could take up to 10 uSec after transfer before
+ * ready to respond to MOD_SEL negation, and there is no way
+ * to tell if it is ready, so we must wait.
+ */
+ udelay(10);
+ /* set QSFP MODSEL, RST, LP high */
+ dd->f_gpio_mod(dd, mask, mask, mask);
+ /*
+ * Module could take up to 2 Msec to respond to MOD_SEL
+ * going away, and there is no way to tell if it is ready.
+ * so we must wait.
+ */
+ msleep(2);
+
+bail:
+ mutex_unlock(&dd->eep_lock);
+
+no_unlock:
+ return ret;
+}
+
+/*
+ * For validation, we want to check the checksums, even of the
+ * fields we do not otherwise use. This function reads the bytes from
+ * <first> to <next-1> and returns the 8lsbs of the sum, or <0 for errors
+ */
+static int qsfp_cks(struct qib_pportdata *ppd, int first, int next)
+{
+ int ret;
+ u16 cks;
+ u8 bval;
+
+ cks = 0;
+ while (first < next) {
+ ret = qsfp_read(ppd, first, &bval, 1);
+ if (ret < 0)
+ goto bail;
+ cks += bval;
+ ++first;
+ }
+ ret = cks & 0xFF;
+bail:
+ return ret;
+
+}
+
+int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp)
+{
+ int ret;
+ int idx;
+ u16 cks;
+ u32 mask;
+ u8 peek[4];
+
+ /* ensure sane contents on invalid reads, for cable swaps */
+ memset(cp, 0, sizeof(*cp));
+
+ mask = QSFP_GPIO_MOD_PRS_N;
+ if (ppd->hw_pidx)
+ mask <<= QSFP_GPIO_PORT2_SHIFT;
+
+ ret = ppd->dd->f_gpio_mod(ppd->dd, 0, 0, 0);
+ if (ret & mask) {
+ ret = -ENODEV;
+ goto bail;
+ }
+
+ ret = qsfp_read(ppd, 0, peek, 3);
+ if (ret < 0)
+ goto bail;
+ if ((peek[0] & 0xFE) != 0x0C)
+ qib_dev_porterr(ppd->dd, ppd->port,
+ "QSFP byte0 is 0x%02X, S/B 0x0C/D\n", peek[0]);
+
+ if ((peek[2] & 2) == 0) {
+ /*
+ * If cable is paged, rather than "flat memory", we need to
+ * set the page to zero, Even if it already appears to be zero.
+ */
+ u8 poke = 0;
+ ret = qib_qsfp_write(ppd, 127, &poke, 1);
+ udelay(50);
+ if (ret != 1) {
+ qib_dev_porterr(ppd->dd, ppd->port,
+ "Failed QSFP Page set\n");
+ goto bail;
+ }
+ }
+
+ ret = qsfp_read(ppd, QSFP_MOD_ID_OFFS, &cp->id, 1);
+ if (ret < 0)
+ goto bail;
+ if ((cp->id & 0xFE) != 0x0C)
+ qib_dev_porterr(ppd->dd, ppd->port,
+ "QSFP ID byte is 0x%02X, S/B 0x0C/D\n", cp->id);
+ cks = cp->id;
+
+ ret = qsfp_read(ppd, QSFP_MOD_PWR_OFFS, &cp->pwr, 1);
+ if (ret < 0)
+ goto bail;
+ cks += cp->pwr;
+
+ ret = qsfp_cks(ppd, QSFP_MOD_PWR_OFFS + 1, QSFP_MOD_LEN_OFFS);
+ if (ret < 0)
+ goto bail;
+ cks += ret;
+
+ ret = qsfp_read(ppd, QSFP_MOD_LEN_OFFS, &cp->len, 1);
+ if (ret < 0)
+ goto bail;
+ cks += cp->len;
+
+ ret = qsfp_read(ppd, QSFP_MOD_TECH_OFFS, &cp->tech, 1);
+ if (ret < 0)
+ goto bail;
+ cks += cp->tech;
+
+ ret = qsfp_read(ppd, QSFP_VEND_OFFS, &cp->vendor, QSFP_VEND_LEN);
+ if (ret < 0)
+ goto bail;
+ for (idx = 0; idx < QSFP_VEND_LEN; ++idx)
+ cks += cp->vendor[idx];
+
+ ret = qsfp_read(ppd, QSFP_IBXCV_OFFS, &cp->xt_xcv, 1);
+ if (ret < 0)
+ goto bail;
+ cks += cp->xt_xcv;
+
+ ret = qsfp_read(ppd, QSFP_VOUI_OFFS, &cp->oui, QSFP_VOUI_LEN);
+ if (ret < 0)
+ goto bail;
+ for (idx = 0; idx < QSFP_VOUI_LEN; ++idx)
+ cks += cp->oui[idx];
+
+ ret = qsfp_read(ppd, QSFP_PN_OFFS, &cp->partnum, QSFP_PN_LEN);
+ if (ret < 0)
+ goto bail;
+ for (idx = 0; idx < QSFP_PN_LEN; ++idx)
+ cks += cp->partnum[idx];
+
+ ret = qsfp_read(ppd, QSFP_REV_OFFS, &cp->rev, QSFP_REV_LEN);
+ if (ret < 0)
+ goto bail;
+ for (idx = 0; idx < QSFP_REV_LEN; ++idx)
+ cks += cp->rev[idx];
+
+ ret = qsfp_read(ppd, QSFP_ATTEN_OFFS, &cp->atten, QSFP_ATTEN_LEN);
+ if (ret < 0)
+ goto bail;
+ for (idx = 0; idx < QSFP_ATTEN_LEN; ++idx)
+ cks += cp->atten[idx];
+
+ ret = qsfp_cks(ppd, QSFP_ATTEN_OFFS + QSFP_ATTEN_LEN, QSFP_CC_OFFS);
+ if (ret < 0)
+ goto bail;
+ cks += ret;
+
+ cks &= 0xFF;
+ ret = qsfp_read(ppd, QSFP_CC_OFFS, &cp->cks1, 1);
+ if (ret < 0)
+ goto bail;
+ if (cks != cp->cks1)
+ qib_dev_porterr(ppd->dd, ppd->port,
+ "QSFP cks1 is %02X, computed %02X\n", cp->cks1,
+ cks);
+
+ /* Second checksum covers 192 to (serial, date, lot) */
+ ret = qsfp_cks(ppd, QSFP_CC_OFFS + 1, QSFP_SN_OFFS);
+ if (ret < 0)
+ goto bail;
+ cks = ret;
+
+ ret = qsfp_read(ppd, QSFP_SN_OFFS, &cp->serial, QSFP_SN_LEN);
+ if (ret < 0)
+ goto bail;
+ for (idx = 0; idx < QSFP_SN_LEN; ++idx)
+ cks += cp->serial[idx];
+
+ ret = qsfp_read(ppd, QSFP_DATE_OFFS, &cp->date, QSFP_DATE_LEN);
+ if (ret < 0)
+ goto bail;
+ for (idx = 0; idx < QSFP_DATE_LEN; ++idx)
+ cks += cp->date[idx];
+
+ ret = qsfp_read(ppd, QSFP_LOT_OFFS, &cp->lot, QSFP_LOT_LEN);
+ if (ret < 0)
+ goto bail;
+ for (idx = 0; idx < QSFP_LOT_LEN; ++idx)
+ cks += cp->lot[idx];
+
+ ret = qsfp_cks(ppd, QSFP_LOT_OFFS + QSFP_LOT_LEN, QSFP_CC_EXT_OFFS);
+ if (ret < 0)
+ goto bail;
+ cks += ret;
+
+ ret = qsfp_read(ppd, QSFP_CC_EXT_OFFS, &cp->cks2, 1);
+ if (ret < 0)
+ goto bail;
+ cks &= 0xFF;
+ if (cks != cp->cks2)
+ qib_dev_porterr(ppd->dd, ppd->port,
+ "QSFP cks2 is %02X, computed %02X\n", cp->cks2,
+ cks);
+ return 0;
+
+bail:
+ cp->id = 0;
+ return ret;
+}
+
+const char * const qib_qsfp_devtech[16] = {
+ "850nm VCSEL", "1310nm VCSEL", "1550nm VCSEL", "1310nm FP",
+ "1310nm DFB", "1550nm DFB", "1310nm EML", "1550nm EML",
+ "Cu Misc", "1490nm DFB", "Cu NoEq", "Cu Eq",
+ "Undef", "Cu Active BothEq", "Cu FarEq", "Cu NearEq"
+};
+
+#define QSFP_DUMP_CHUNK 16 /* Holds longest string */
+#define QSFP_DEFAULT_HDR_CNT 224
+
+static const char *pwr_codes = "1.5W2.0W2.5W3.5W";
+
+/*
+ * Initialize structures that control access to QSFP. Called once per port
+ * on cards that support QSFP.
+ */
+void qib_qsfp_init(struct qib_qsfp_data *qd,
+ void (*fevent)(struct work_struct *))
+{
+ u32 mask, highs;
+ int pins;
+
+ struct qib_devdata *dd = qd->ppd->dd;
+
+ /* Initialize work struct for later QSFP events */
+ INIT_WORK(&qd->work, fevent);
+
+ /*
+ * Later, we may want more validation. For now, just set up pins and
+ * blip reset. If module is present, call qib_refresh_qsfp_cache(),
+ * to do further init.
+ */
+ mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
+ highs = mask - QSFP_GPIO_MOD_RST_N;
+ if (qd->ppd->hw_pidx) {
+ mask <<= QSFP_GPIO_PORT2_SHIFT;
+ highs <<= QSFP_GPIO_PORT2_SHIFT;
+ }
+ dd->f_gpio_mod(dd, highs, mask, mask);
+ udelay(20); /* Generous RST dwell */
+
+ dd->f_gpio_mod(dd, mask, mask, mask);
+ /* Spec says module can take up to two seconds! */
+ mask = QSFP_GPIO_MOD_PRS_N;
+ if (qd->ppd->hw_pidx)
+ mask <<= QSFP_GPIO_PORT2_SHIFT;
+
+ /* Do not try to wait here. Better to let event handle it */
+ pins = dd->f_gpio_mod(dd, 0, 0, 0);
+ if (pins & mask)
+ goto bail;
+ /* We see a module, but it may be unwise to look yet. Just schedule */
+ qd->t_insert = get_jiffies_64();
+ schedule_work(&qd->work);
+bail:
+ return;
+}
+
+void qib_qsfp_deinit(struct qib_qsfp_data *qd)
+{
+ /*
+ * There is nothing to do here for now. our
+ * work is scheduled with schedule_work(), and
+ * flush_scheduled_work() from remove_one will
+ * block until all work ssetup with schedule_work()
+ * completes.
+ */
+}
+
+int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len)
+{
+ struct qib_qsfp_cache cd;
+ u8 bin_buff[QSFP_DUMP_CHUNK];
+ char lenstr[6];
+ int sofar, ret;
+ int bidx = 0;
+
+ sofar = 0;
+ ret = qib_refresh_qsfp_cache(ppd, &cd);
+ if (ret < 0)
+ goto bail;
+
+ lenstr[0] = ' ';
+ lenstr[1] = '\0';
+ if (QSFP_IS_CU(cd.tech))
+ sprintf(lenstr, "%dM ", cd.len);
+
+ sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n", pwr_codes +
+ (QSFP_PWR(cd.pwr) * 4));
+
+ sofar += scnprintf(buf + sofar, len - sofar, "TECH:%s%s\n", lenstr,
+ qib_qsfp_devtech[cd.tech >> 4]);
+
+ sofar += scnprintf(buf + sofar, len - sofar, "Vendor:%.*s\n",
+ QSFP_VEND_LEN, cd.vendor);
+
+ sofar += scnprintf(buf + sofar, len - sofar, "OUI:%06X\n",
+ QSFP_OUI(cd.oui));
+
+ sofar += scnprintf(buf + sofar, len - sofar, "Part#:%.*s\n",
+ QSFP_PN_LEN, cd.partnum);
+ sofar += scnprintf(buf + sofar, len - sofar, "Rev:%.*s\n",
+ QSFP_REV_LEN, cd.rev);
+ if (QSFP_IS_CU(cd.tech))
+ sofar += scnprintf(buf + sofar, len - sofar, "Atten:%d, %d\n",
+ QSFP_ATTEN_SDR(cd.atten),
+ QSFP_ATTEN_DDR(cd.atten));
+ sofar += scnprintf(buf + sofar, len - sofar, "Serial:%.*s\n",
+ QSFP_SN_LEN, cd.serial);
+ sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n",
+ QSFP_DATE_LEN, cd.date);
+ sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n",
+ QSFP_LOT_LEN, cd.date);
+
+ while (bidx < QSFP_DEFAULT_HDR_CNT) {
+ int iidx;
+ ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK);
+ if (ret < 0)
+ goto bail;
+ for (iidx = 0; iidx < ret; ++iidx) {
+ sofar += scnprintf(buf + sofar, len-sofar, " %02X",
+ bin_buff[iidx]);
+ }
+ sofar += scnprintf(buf + sofar, len - sofar, "\n");
+ bidx += QSFP_DUMP_CHUNK;
+ }
+ ret = sofar;
+bail:
+ return ret;
+}
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.h b/drivers/infiniband/hw/qib/qib_qsfp.h
new file mode 100644
index 00000000000..19b527bafd5
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_qsfp.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/* QSFP support common definitions, for ib_qib driver */
+
+#define QSFP_DEV 0xA0
+#define QSFP_PWR_LAG_MSEC 2000
+
+/*
+ * Below are masks for various QSFP signals, for Port 1.
+ * Port2 equivalents are shifted by QSFP_GPIO_PORT2_SHIFT.
+ * _N means asserted low
+ */
+#define QSFP_GPIO_MOD_SEL_N (4)
+#define QSFP_GPIO_MOD_PRS_N (8)
+#define QSFP_GPIO_INT_N (0x10)
+#define QSFP_GPIO_MOD_RST_N (0x20)
+#define QSFP_GPIO_LP_MODE (0x40)
+#define QSFP_GPIO_PORT2_SHIFT 5
+
+#define QSFP_PAGESIZE 128
+/* Defined fields that QLogic requires of qualified cables */
+/* Byte 0 is Identifier, not checked */
+/* Byte 1 is reserved "status MSB" */
+/* Byte 2 is "status LSB" We only care that D2 "Flat Mem" is set. */
+/*
+ * Rest of first 128 not used, although 127 is reserved for page select
+ * if module is not "Flat memory".
+ */
+/* Byte 128 is Identifier: must be 0x0c for QSFP, or 0x0d for QSFP+ */
+#define QSFP_MOD_ID_OFFS 128
+/*
+ * Byte 129 is "Extended Identifier". We only care about D7,D6: Power class
+ * 0:1.5W, 1:2.0W, 2:2.5W, 3:3.5W
+ */
+#define QSFP_MOD_PWR_OFFS 129
+/* Byte 130 is Connector type. Not QLogic req'd */
+/* Bytes 131..138 are Transceiver types, bit maps for various tech, none IB */
+/* Byte 139 is encoding. code 0x01 is 8b10b. Not QLogic req'd */
+/* byte 140 is nominal bit-rate, in units of 100Mbits/sec Not QLogic req'd */
+/* Byte 141 is Extended Rate Select. Not QLogic req'd */
+/* Bytes 142..145 are lengths for various fiber types. Not QLogic req'd */
+/* Byte 146 is length for Copper. Units of 1 meter */
+#define QSFP_MOD_LEN_OFFS 146
+/*
+ * Byte 147 is Device technology. D0..3 not Qlogc req'd
+ * D4..7 select from 15 choices, translated by table:
+ */
+#define QSFP_MOD_TECH_OFFS 147
+extern const char *const qib_qsfp_devtech[16];
+/* Active Equalization includes fiber, copper full EQ, and copper near Eq */
+#define QSFP_IS_ACTIVE(tech) ((0xA2FF >> ((tech) >> 4)) & 1)
+/* Attenuation should be valid for copper other than full/near Eq */
+#define QSFP_HAS_ATTEN(tech) ((0x4D00 >> ((tech) >> 4)) & 1)
+/* Length is only valid if technology is "copper" */
+#define QSFP_IS_CU(tech) ((0xED00 >> ((tech) >> 4)) & 1)
+#define QSFP_TECH_1490 9
+
+#define QSFP_OUI(oui) (((unsigned)oui[0] << 16) | ((unsigned)oui[1] << 8) | \
+ oui[2])
+#define QSFP_OUI_AMPHENOL 0x415048
+#define QSFP_OUI_FINISAR 0x009065
+#define QSFP_OUI_GORE 0x002177
+
+/* Bytes 148..163 are Vendor Name, Left-justified Blank-filled */
+#define QSFP_VEND_OFFS 148
+#define QSFP_VEND_LEN 16
+/* Byte 164 is IB Extended tranceiver codes Bits D0..3 are SDR,DDR,QDR,EDR */
+#define QSFP_IBXCV_OFFS 164
+/* Bytes 165..167 are Vendor OUI number */
+#define QSFP_VOUI_OFFS 165
+#define QSFP_VOUI_LEN 3
+/* Bytes 168..183 are Vendor Part Number, string */
+#define QSFP_PN_OFFS 168
+#define QSFP_PN_LEN 16
+/* Bytes 184,185 are Vendor Rev. Left Justified, Blank-filled */
+#define QSFP_REV_OFFS 184
+#define QSFP_REV_LEN 2
+/*
+ * Bytes 186,187 are Wavelength, if Optical. Not Qlogic req'd
+ * If copper, they are attenuation in dB:
+ * Byte 186 is at 2.5Gb/sec (SDR), Byte 187 at 5.0Gb/sec (DDR)
+ */
+#define QSFP_ATTEN_OFFS 186
+#define QSFP_ATTEN_LEN 2
+/* Bytes 188,189 are Wavelength tolerance, not QLogic req'd */
+/* Byte 190 is Max Case Temp. Not QLogic req'd */
+/* Byte 191 is LSB of sum of bytes 128..190. Not QLogic req'd */
+#define QSFP_CC_OFFS 191
+/* Bytes 192..195 are Options implemented in qsfp. Not Qlogic req'd */
+/* Bytes 196..211 are Serial Number, String */
+#define QSFP_SN_OFFS 196
+#define QSFP_SN_LEN 16
+/* Bytes 212..219 are date-code YYMMDD (MM==1 for Jan) */
+#define QSFP_DATE_OFFS 212
+#define QSFP_DATE_LEN 6
+/* Bytes 218,219 are optional lot-code, string */
+#define QSFP_LOT_OFFS 218
+#define QSFP_LOT_LEN 2
+/* Bytes 220, 221 indicate monitoring options, Not QLogic req'd */
+/* Byte 223 is LSB of sum of bytes 192..222 */
+#define QSFP_CC_EXT_OFFS 223
+
+/*
+ * struct qib_qsfp_data encapsulates state of QSFP device for one port.
+ * it will be part of port-chip-specific data if a board supports QSFP.
+ *
+ * Since multiple board-types use QSFP, and their pport_data structs
+ * differ (in the chip-specific section), we need a pointer to its head.
+ *
+ * Avoiding premature optimization, we will have one work_struct per port,
+ * and let the (increasingly inaccurately named) eep_lock arbitrate
+ * access to common resources.
+ *
+ */
+
+/*
+ * Hold the parts of the onboard EEPROM that we care about, so we aren't
+ * coonstantly bit-boffing
+ */
+struct qib_qsfp_cache {
+ u8 id; /* must be 0x0C or 0x0D; 0 indicates invalid EEPROM read */
+ u8 pwr; /* in D6,7 */
+ u8 len; /* in meters, Cu only */
+ u8 tech;
+ char vendor[QSFP_VEND_LEN];
+ u8 xt_xcv; /* Ext. tranceiver codes, 4 lsbs are IB speed supported */
+ u8 oui[QSFP_VOUI_LEN];
+ u8 partnum[QSFP_PN_LEN];
+ u8 rev[QSFP_REV_LEN];
+ u8 atten[QSFP_ATTEN_LEN];
+ u8 cks1; /* Checksum of bytes 128..190 */
+ u8 serial[QSFP_SN_LEN];
+ u8 date[QSFP_DATE_LEN];
+ u8 lot[QSFP_LOT_LEN];
+ u8 cks2; /* Checsum of bytes 192..222 */
+};
+
+#define QSFP_PWR(pbyte) (((pbyte) >> 6) & 3)
+#define QSFP_ATTEN_SDR(attenarray) (attenarray[0])
+#define QSFP_ATTEN_DDR(attenarray) (attenarray[1])
+
+struct qib_qsfp_data {
+ /* Helps to find our way */
+ struct qib_pportdata *ppd;
+ struct work_struct work;
+ struct qib_qsfp_cache cache;
+ u64 t_insert;
+};
+
+extern int qib_refresh_qsfp_cache(struct qib_pportdata *ppd,
+ struct qib_qsfp_cache *cp);
+extern void qib_qsfp_init(struct qib_qsfp_data *qd,
+ void (*fevent)(struct work_struct *));
+extern void qib_qsfp_deinit(struct qib_qsfp_data *qd);
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
new file mode 100644
index 00000000000..40c0a373719
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -0,0 +1,2288 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/io.h>
+
+#include "qib.h"
+
+/* cut down ridiculously long IB macro names */
+#define OP(x) IB_OPCODE_RC_##x
+
+static void rc_timeout(unsigned long arg);
+
+static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe,
+ u32 psn, u32 pmtu)
+{
+ u32 len;
+
+ len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
+ ss->sge = wqe->sg_list[0];
+ ss->sg_list = wqe->sg_list + 1;
+ ss->num_sge = wqe->wr.num_sge;
+ ss->total_len = wqe->length;
+ qib_skip_sge(ss, len, 0);
+ return wqe->length - len;
+}
+
+static void start_timer(struct qib_qp *qp)
+{
+ qp->s_flags |= QIB_S_TIMER;
+ qp->s_timer.function = rc_timeout;
+ /* 4.096 usec. * (1 << qp->timeout) */
+ qp->s_timer.expires = jiffies +
+ usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 1000UL);
+ add_timer(&qp->s_timer);
+}
+
+/**
+ * qib_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
+ * @dev: the device for this QP
+ * @qp: a pointer to the QP
+ * @ohdr: a pointer to the IB header being constructed
+ * @pmtu: the path MTU
+ *
+ * Return 1 if constructed; otherwise, return 0.
+ * Note that we are in the responder's side of the QP context.
+ * Note the QP s_lock must be held.
+ */
+static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
+ struct qib_other_headers *ohdr, u32 pmtu)
+{
+ struct qib_ack_entry *e;
+ u32 hwords;
+ u32 len;
+ u32 bth0;
+ u32 bth2;
+
+ /* Don't send an ACK if we aren't supposed to. */
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ goto bail;
+
+ /* header size in 32-bit words LRH+BTH = (8+12)/4. */
+ hwords = 5;
+
+ switch (qp->s_ack_state) {
+ case OP(RDMA_READ_RESPONSE_LAST):
+ case OP(RDMA_READ_RESPONSE_ONLY):
+ e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+ if (e->rdma_sge.mr) {
+ atomic_dec(&e->rdma_sge.mr->refcount);
+ e->rdma_sge.mr = NULL;
+ }
+ /* FALLTHROUGH */
+ case OP(ATOMIC_ACKNOWLEDGE):
+ /*
+ * We can increment the tail pointer now that the last
+ * response has been sent instead of only being
+ * constructed.
+ */
+ if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC)
+ qp->s_tail_ack_queue = 0;
+ /* FALLTHROUGH */
+ case OP(SEND_ONLY):
+ case OP(ACKNOWLEDGE):
+ /* Check for no next entry in the queue. */
+ if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
+ if (qp->s_flags & QIB_S_ACK_PENDING)
+ goto normal;
+ goto bail;
+ }
+
+ e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+ if (e->opcode == OP(RDMA_READ_REQUEST)) {
+ /*
+ * If a RDMA read response is being resent and
+ * we haven't seen the duplicate request yet,
+ * then stop sending the remaining responses the
+ * responder has seen until the requester resends it.
+ */
+ len = e->rdma_sge.sge_length;
+ if (len && !e->rdma_sge.mr) {
+ qp->s_tail_ack_queue = qp->r_head_ack_queue;
+ goto bail;
+ }
+ /* Copy SGE state in case we need to resend */
+ qp->s_rdma_mr = e->rdma_sge.mr;
+ if (qp->s_rdma_mr)
+ atomic_inc(&qp->s_rdma_mr->refcount);
+ qp->s_ack_rdma_sge.sge = e->rdma_sge;
+ qp->s_ack_rdma_sge.num_sge = 1;
+ qp->s_cur_sge = &qp->s_ack_rdma_sge;
+ if (len > pmtu) {
+ len = pmtu;
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
+ } else {
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
+ e->sent = 1;
+ }
+ ohdr->u.aeth = qib_compute_aeth(qp);
+ hwords++;
+ qp->s_ack_rdma_psn = e->psn;
+ bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
+ } else {
+ /* COMPARE_SWAP or FETCH_ADD */
+ qp->s_cur_sge = NULL;
+ len = 0;
+ qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
+ ohdr->u.at.aeth = qib_compute_aeth(qp);
+ ohdr->u.at.atomic_ack_eth[0] =
+ cpu_to_be32(e->atomic_data >> 32);
+ ohdr->u.at.atomic_ack_eth[1] =
+ cpu_to_be32(e->atomic_data);
+ hwords += sizeof(ohdr->u.at) / sizeof(u32);
+ bth2 = e->psn & QIB_PSN_MASK;
+ e->sent = 1;
+ }
+ bth0 = qp->s_ack_state << 24;
+ break;
+
+ case OP(RDMA_READ_RESPONSE_FIRST):
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
+ /* FALLTHROUGH */
+ case OP(RDMA_READ_RESPONSE_MIDDLE):
+ qp->s_cur_sge = &qp->s_ack_rdma_sge;
+ qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
+ if (qp->s_rdma_mr)
+ atomic_inc(&qp->s_rdma_mr->refcount);
+ len = qp->s_ack_rdma_sge.sge.sge_length;
+ if (len > pmtu)
+ len = pmtu;
+ else {
+ ohdr->u.aeth = qib_compute_aeth(qp);
+ hwords++;
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
+ e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+ e->sent = 1;
+ }
+ bth0 = qp->s_ack_state << 24;
+ bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
+ break;
+
+ default:
+normal:
+ /*
+ * Send a regular ACK.
+ * Set the s_ack_state so we wait until after sending
+ * the ACK before setting s_ack_state to ACKNOWLEDGE
+ * (see above).
+ */
+ qp->s_ack_state = OP(SEND_ONLY);
+ qp->s_flags &= ~QIB_S_ACK_PENDING;
+ qp->s_cur_sge = NULL;
+ if (qp->s_nak_state)
+ ohdr->u.aeth =
+ cpu_to_be32((qp->r_msn & QIB_MSN_MASK) |
+ (qp->s_nak_state <<
+ QIB_AETH_CREDIT_SHIFT));
+ else
+ ohdr->u.aeth = qib_compute_aeth(qp);
+ hwords++;
+ len = 0;
+ bth0 = OP(ACKNOWLEDGE) << 24;
+ bth2 = qp->s_ack_psn & QIB_PSN_MASK;
+ }
+ qp->s_rdma_ack_cnt++;
+ qp->s_hdrwords = hwords;
+ qp->s_cur_size = len;
+ qib_make_ruc_header(qp, ohdr, bth0, bth2);
+ return 1;
+
+bail:
+ qp->s_ack_state = OP(ACKNOWLEDGE);
+ qp->s_flags &= ~(QIB_S_RESP_PENDING | QIB_S_ACK_PENDING);
+ return 0;
+}
+
+/**
+ * qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
+ * @qp: a pointer to the QP
+ *
+ * Return 1 if constructed; otherwise, return 0.
+ */
+int qib_make_rc_req(struct qib_qp *qp)
+{
+ struct qib_ibdev *dev = to_idev(qp->ibqp.device);
+ struct qib_other_headers *ohdr;
+ struct qib_sge_state *ss;
+ struct qib_swqe *wqe;
+ u32 hwords;
+ u32 len;
+ u32 bth0;
+ u32 bth2;
+ u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+ char newreq;
+ unsigned long flags;
+ int ret = 0;
+ int delta;
+
+ ohdr = &qp->s_hdr.u.oth;
+ if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
+ ohdr = &qp->s_hdr.u.l.oth;
+
+ /*
+ * The lock is needed to synchronize between the sending tasklet,
+ * the receive interrupt handler, and timeout resends.
+ */
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ /* Sending responses has higher priority over sending requests. */
+ if ((qp->s_flags & QIB_S_RESP_PENDING) &&
+ qib_make_rc_ack(dev, qp, ohdr, pmtu))
+ goto done;
+
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
+ if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
+ goto bail;
+ /* We are in the error state, flush the work request. */
+ if (qp->s_last == qp->s_head)
+ goto bail;
+ /* If DMAs are in progress, we can't flush immediately. */
+ if (atomic_read(&qp->s_dma_busy)) {
+ qp->s_flags |= QIB_S_WAIT_DMA;
+ goto bail;
+ }
+ wqe = get_swqe_ptr(qp, qp->s_last);
+ while (qp->s_last != qp->s_acked) {
+ qib_send_complete(qp, wqe, IB_WC_SUCCESS);
+ if (++qp->s_last >= qp->s_size)
+ qp->s_last = 0;
+ wqe = get_swqe_ptr(qp, qp->s_last);
+ }
+ qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ goto done;
+ }
+
+ if (qp->s_flags & (QIB_S_WAIT_RNR | QIB_S_WAIT_ACK))
+ goto bail;
+
+ if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) {
+ if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
+ qp->s_flags |= QIB_S_WAIT_PSN;
+ goto bail;
+ }
+ qp->s_sending_psn = qp->s_psn;
+ qp->s_sending_hpsn = qp->s_psn - 1;
+ }
+
+ /* header size in 32-bit words LRH+BTH = (8+12)/4. */
+ hwords = 5;
+ bth0 = 0;
+
+ /* Send a request. */
+ wqe = get_swqe_ptr(qp, qp->s_cur);
+ switch (qp->s_state) {
+ default:
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK))
+ goto bail;
+ /*
+ * Resend an old request or start a new one.
+ *
+ * We keep track of the current SWQE so that
+ * we don't reset the "furthest progress" state
+ * if we need to back up.
+ */
+ newreq = 0;
+ if (qp->s_cur == qp->s_tail) {
+ /* Check if send work queue is empty. */
+ if (qp->s_tail == qp->s_head)
+ goto bail;
+ /*
+ * If a fence is requested, wait for previous
+ * RDMA read and atomic operations to finish.
+ */
+ if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
+ qp->s_num_rd_atomic) {
+ qp->s_flags |= QIB_S_WAIT_FENCE;
+ goto bail;
+ }
+ wqe->psn = qp->s_next_psn;
+ newreq = 1;
+ }
+ /*
+ * Note that we have to be careful not to modify the
+ * original work request since we may need to resend
+ * it.
+ */
+ len = wqe->length;
+ ss = &qp->s_sge;
+ bth2 = qp->s_psn & QIB_PSN_MASK;
+ switch (wqe->wr.opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ /* If no credit, return. */
+ if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) &&
+ qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
+ qp->s_flags |= QIB_S_WAIT_SSN_CREDIT;
+ goto bail;
+ }
+ wqe->lpsn = wqe->psn;
+ if (len > pmtu) {
+ wqe->lpsn += (len - 1) / pmtu;
+ qp->s_state = OP(SEND_FIRST);
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_SEND)
+ qp->s_state = OP(SEND_ONLY);
+ else {
+ qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ }
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ bth2 |= IB_BTH_REQ_ACK;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case IB_WR_RDMA_WRITE:
+ if (newreq && !(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
+ qp->s_lsn++;
+ /* FALLTHROUGH */
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ /* If no credit, return. */
+ if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) &&
+ qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
+ qp->s_flags |= QIB_S_WAIT_SSN_CREDIT;
+ goto bail;
+ }
+ ohdr->u.rc.reth.vaddr =
+ cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
+ ohdr->u.rc.reth.rkey =
+ cpu_to_be32(wqe->wr.wr.rdma.rkey);
+ ohdr->u.rc.reth.length = cpu_to_be32(len);
+ hwords += sizeof(struct ib_reth) / sizeof(u32);
+ wqe->lpsn = wqe->psn;
+ if (len > pmtu) {
+ wqe->lpsn += (len - 1) / pmtu;
+ qp->s_state = OP(RDMA_WRITE_FIRST);
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+ qp->s_state = OP(RDMA_WRITE_ONLY);
+ else {
+ qp->s_state =
+ OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
+ /* Immediate data comes after RETH */
+ ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ }
+ bth2 |= IB_BTH_REQ_ACK;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case IB_WR_RDMA_READ:
+ /*
+ * Don't allow more operations to be started
+ * than the QP limits allow.
+ */
+ if (newreq) {
+ if (qp->s_num_rd_atomic >=
+ qp->s_max_rd_atomic) {
+ qp->s_flags |= QIB_S_WAIT_RDMAR;
+ goto bail;
+ }
+ qp->s_num_rd_atomic++;
+ if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
+ qp->s_lsn++;
+ /*
+ * Adjust s_next_psn to count the
+ * expected number of responses.
+ */
+ if (len > pmtu)
+ qp->s_next_psn += (len - 1) / pmtu;
+ wqe->lpsn = qp->s_next_psn++;
+ }
+ ohdr->u.rc.reth.vaddr =
+ cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
+ ohdr->u.rc.reth.rkey =
+ cpu_to_be32(wqe->wr.wr.rdma.rkey);
+ ohdr->u.rc.reth.length = cpu_to_be32(len);
+ qp->s_state = OP(RDMA_READ_REQUEST);
+ hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
+ ss = NULL;
+ len = 0;
+ bth2 |= IB_BTH_REQ_ACK;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ /*
+ * Don't allow more operations to be started
+ * than the QP limits allow.
+ */
+ if (newreq) {
+ if (qp->s_num_rd_atomic >=
+ qp->s_max_rd_atomic) {
+ qp->s_flags |= QIB_S_WAIT_RDMAR;
+ goto bail;
+ }
+ qp->s_num_rd_atomic++;
+ if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
+ qp->s_lsn++;
+ wqe->lpsn = wqe->psn;
+ }
+ if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
+ qp->s_state = OP(COMPARE_SWAP);
+ ohdr->u.atomic_eth.swap_data = cpu_to_be64(
+ wqe->wr.wr.atomic.swap);
+ ohdr->u.atomic_eth.compare_data = cpu_to_be64(
+ wqe->wr.wr.atomic.compare_add);
+ } else {
+ qp->s_state = OP(FETCH_ADD);
+ ohdr->u.atomic_eth.swap_data = cpu_to_be64(
+ wqe->wr.wr.atomic.compare_add);
+ ohdr->u.atomic_eth.compare_data = 0;
+ }
+ ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
+ wqe->wr.wr.atomic.remote_addr >> 32);
+ ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
+ wqe->wr.wr.atomic.remote_addr);
+ ohdr->u.atomic_eth.rkey = cpu_to_be32(
+ wqe->wr.wr.atomic.rkey);
+ hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
+ ss = NULL;
+ len = 0;
+ bth2 |= IB_BTH_REQ_ACK;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ default:
+ goto bail;
+ }
+ qp->s_sge.sge = wqe->sg_list[0];
+ qp->s_sge.sg_list = wqe->sg_list + 1;
+ qp->s_sge.num_sge = wqe->wr.num_sge;
+ qp->s_sge.total_len = wqe->length;
+ qp->s_len = wqe->length;
+ if (newreq) {
+ qp->s_tail++;
+ if (qp->s_tail >= qp->s_size)
+ qp->s_tail = 0;
+ }
+ if (wqe->wr.opcode == IB_WR_RDMA_READ)
+ qp->s_psn = wqe->lpsn + 1;
+ else {
+ qp->s_psn++;
+ if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
+ qp->s_next_psn = qp->s_psn;
+ }
+ break;
+
+ case OP(RDMA_READ_RESPONSE_FIRST):
+ /*
+ * qp->s_state is normally set to the opcode of the
+ * last packet constructed for new requests and therefore
+ * is never set to RDMA read response.
+ * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
+ * thread to indicate a SEND needs to be restarted from an
+ * earlier PSN without interferring with the sending thread.
+ * See qib_restart_rc().
+ */
+ qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
+ /* FALLTHROUGH */
+ case OP(SEND_FIRST):
+ qp->s_state = OP(SEND_MIDDLE);
+ /* FALLTHROUGH */
+ case OP(SEND_MIDDLE):
+ bth2 = qp->s_psn++ & QIB_PSN_MASK;
+ if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
+ qp->s_next_psn = qp->s_psn;
+ ss = &qp->s_sge;
+ len = qp->s_len;
+ if (len > pmtu) {
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_SEND)
+ qp->s_state = OP(SEND_LAST);
+ else {
+ qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ }
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ bth2 |= IB_BTH_REQ_ACK;
+ qp->s_cur++;
+ if (qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case OP(RDMA_READ_RESPONSE_LAST):
+ /*
+ * qp->s_state is normally set to the opcode of the
+ * last packet constructed for new requests and therefore
+ * is never set to RDMA read response.
+ * RDMA_READ_RESPONSE_LAST is used by the ACK processing
+ * thread to indicate a RDMA write needs to be restarted from
+ * an earlier PSN without interferring with the sending thread.
+ * See qib_restart_rc().
+ */
+ qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
+ /* FALLTHROUGH */
+ case OP(RDMA_WRITE_FIRST):
+ qp->s_state = OP(RDMA_WRITE_MIDDLE);
+ /* FALLTHROUGH */
+ case OP(RDMA_WRITE_MIDDLE):
+ bth2 = qp->s_psn++ & QIB_PSN_MASK;
+ if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
+ qp->s_next_psn = qp->s_psn;
+ ss = &qp->s_sge;
+ len = qp->s_len;
+ if (len > pmtu) {
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+ qp->s_state = OP(RDMA_WRITE_LAST);
+ else {
+ qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ }
+ bth2 |= IB_BTH_REQ_ACK;
+ qp->s_cur++;
+ if (qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case OP(RDMA_READ_RESPONSE_MIDDLE):
+ /*
+ * qp->s_state is normally set to the opcode of the
+ * last packet constructed for new requests and therefore
+ * is never set to RDMA read response.
+ * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
+ * thread to indicate a RDMA read needs to be restarted from
+ * an earlier PSN without interferring with the sending thread.
+ * See qib_restart_rc().
+ */
+ len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
+ ohdr->u.rc.reth.vaddr =
+ cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
+ ohdr->u.rc.reth.rkey =
+ cpu_to_be32(wqe->wr.wr.rdma.rkey);
+ ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
+ qp->s_state = OP(RDMA_READ_REQUEST);
+ hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
+ bth2 = (qp->s_psn & QIB_PSN_MASK) | IB_BTH_REQ_ACK;
+ qp->s_psn = wqe->lpsn + 1;
+ ss = NULL;
+ len = 0;
+ qp->s_cur++;
+ if (qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+ }
+ qp->s_sending_hpsn = bth2;
+ delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8;
+ if (delta && delta % QIB_PSN_CREDIT == 0)
+ bth2 |= IB_BTH_REQ_ACK;
+ if (qp->s_flags & QIB_S_SEND_ONE) {
+ qp->s_flags &= ~QIB_S_SEND_ONE;
+ qp->s_flags |= QIB_S_WAIT_ACK;
+ bth2 |= IB_BTH_REQ_ACK;
+ }
+ qp->s_len -= len;
+ qp->s_hdrwords = hwords;
+ qp->s_cur_sge = ss;
+ qp->s_cur_size = len;
+ qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2);
+done:
+ ret = 1;
+ goto unlock;
+
+bail:
+ qp->s_flags &= ~QIB_S_BUSY;
+unlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return ret;
+}
+
+/**
+ * qib_send_rc_ack - Construct an ACK packet and send it
+ * @qp: a pointer to the QP
+ *
+ * This is called from qib_rc_rcv() and qib_kreceive().
+ * Note that RDMA reads and atomics are handled in the
+ * send side QP state and tasklet.
+ */
+void qib_send_rc_ack(struct qib_qp *qp)
+{
+ struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ u64 pbc;
+ u16 lrh0;
+ u32 bth0;
+ u32 hwords;
+ u32 pbufn;
+ u32 __iomem *piobuf;
+ struct qib_ib_header hdr;
+ struct qib_other_headers *ohdr;
+ u32 control;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ goto unlock;
+
+ /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
+ if ((qp->s_flags & QIB_S_RESP_PENDING) || qp->s_rdma_ack_cnt)
+ goto queue_ack;
+
+ /* Construct the header with s_lock held so APM doesn't change it. */
+ ohdr = &hdr.u.oth;
+ lrh0 = QIB_LRH_BTH;
+ /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
+ hwords = 6;
+ if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
+ hwords += qib_make_grh(ibp, &hdr.u.l.grh,
+ &qp->remote_ah_attr.grh, hwords, 0);
+ ohdr = &hdr.u.l.oth;
+ lrh0 = QIB_LRH_GRH;
+ }
+ /* read pkey_index w/o lock (its atomic) */
+ bth0 = qib_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
+ if (qp->s_mig_state == IB_MIG_MIGRATED)
+ bth0 |= IB_BTH_MIG_REQ;
+ if (qp->r_nak_state)
+ ohdr->u.aeth = cpu_to_be32((qp->r_msn & QIB_MSN_MASK) |
+ (qp->r_nak_state <<
+ QIB_AETH_CREDIT_SHIFT));
+ else
+ ohdr->u.aeth = qib_compute_aeth(qp);
+ lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
+ qp->remote_ah_attr.sl << 4;
+ hdr.lrh[0] = cpu_to_be16(lrh0);
+ hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
+ hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
+ hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits);
+ ohdr->bth[0] = cpu_to_be32(bth0);
+ ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
+ ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & QIB_PSN_MASK);
+
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ /* Don't try to send ACKs if the link isn't ACTIVE */
+ if (!(ppd->lflags & QIBL_LINKACTIVE))
+ goto done;
+
+ control = dd->f_setpbc_control(ppd, hwords + SIZE_OF_CRC,
+ qp->s_srate, lrh0 >> 12);
+ /* length is + 1 for the control dword */
+ pbc = ((u64) control << 32) | (hwords + 1);
+
+ piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
+ if (!piobuf) {
+ /*
+ * We are out of PIO buffers at the moment.
+ * Pass responsibility for sending the ACK to the
+ * send tasklet so that when a PIO buffer becomes
+ * available, the ACK is sent ahead of other outgoing
+ * packets.
+ */
+ spin_lock_irqsave(&qp->s_lock, flags);
+ goto queue_ack;
+ }
+
+ /*
+ * Write the pbc.
+ * We have to flush after the PBC for correctness
+ * on some cpus or WC buffer can be written out of order.
+ */
+ writeq(pbc, piobuf);
+
+ if (dd->flags & QIB_PIO_FLUSH_WC) {
+ u32 *hdrp = (u32 *) &hdr;
+
+ qib_flush_wc();
+ qib_pio_copy(piobuf + 2, hdrp, hwords - 1);
+ qib_flush_wc();
+ __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
+ } else
+ qib_pio_copy(piobuf + 2, (u32 *) &hdr, hwords);
+
+ if (dd->flags & QIB_USE_SPCL_TRIG) {
+ u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
+
+ qib_flush_wc();
+ __raw_writel(0xaebecede, piobuf + spcl_off);
+ }
+
+ qib_flush_wc();
+ qib_sendbuf_done(dd, pbufn);
+
+ ibp->n_unicast_xmit++;
+ goto done;
+
+queue_ack:
+ if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
+ ibp->n_rc_qacks++;
+ qp->s_flags |= QIB_S_ACK_PENDING | QIB_S_RESP_PENDING;
+ qp->s_nak_state = qp->r_nak_state;
+ qp->s_ack_psn = qp->r_ack_psn;
+
+ /* Schedule the send tasklet. */
+ qib_schedule_send(qp);
+ }
+unlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+done:
+ return;
+}
+
+/**
+ * reset_psn - reset the QP state to send starting from PSN
+ * @qp: the QP
+ * @psn: the packet sequence number to restart at
+ *
+ * This is called from qib_rc_rcv() to process an incoming RC ACK
+ * for the given QP.
+ * Called at interrupt level with the QP s_lock held.
+ */
+static void reset_psn(struct qib_qp *qp, u32 psn)
+{
+ u32 n = qp->s_acked;
+ struct qib_swqe *wqe = get_swqe_ptr(qp, n);
+ u32 opcode;
+
+ qp->s_cur = n;
+
+ /*
+ * If we are starting the request from the beginning,
+ * let the normal send code handle initialization.
+ */
+ if (qib_cmp24(psn, wqe->psn) <= 0) {
+ qp->s_state = OP(SEND_LAST);
+ goto done;
+ }
+
+ /* Find the work request opcode corresponding to the given PSN. */
+ opcode = wqe->wr.opcode;
+ for (;;) {
+ int diff;
+
+ if (++n == qp->s_size)
+ n = 0;
+ if (n == qp->s_tail)
+ break;
+ wqe = get_swqe_ptr(qp, n);
+ diff = qib_cmp24(psn, wqe->psn);
+ if (diff < 0)
+ break;
+ qp->s_cur = n;
+ /*
+ * If we are starting the request from the beginning,
+ * let the normal send code handle initialization.
+ */
+ if (diff == 0) {
+ qp->s_state = OP(SEND_LAST);
+ goto done;
+ }
+ opcode = wqe->wr.opcode;
+ }
+
+ /*
+ * Set the state to restart in the middle of a request.
+ * Don't change the s_sge, s_cur_sge, or s_cur_size.
+ * See qib_make_rc_req().
+ */
+ switch (opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
+ break;
+
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
+ break;
+
+ case IB_WR_RDMA_READ:
+ qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
+ break;
+
+ default:
+ /*
+ * This case shouldn't happen since its only
+ * one PSN per req.
+ */
+ qp->s_state = OP(SEND_LAST);
+ }
+done:
+ qp->s_psn = psn;
+ /*
+ * Set QIB_S_WAIT_PSN as qib_rc_complete() may start the timer
+ * asynchronously before the send tasklet can get scheduled.
+ * Doing it in qib_make_rc_req() is too late.
+ */
+ if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
+ (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
+ qp->s_flags |= QIB_S_WAIT_PSN;
+}
+
+/*
+ * Back up requester to resend the last un-ACKed request.
+ * The QP s_lock should be held and interrupts disabled.
+ */
+static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait)
+{
+ struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_acked);
+ struct qib_ibport *ibp;
+
+ if (qp->s_retry == 0) {
+ if (qp->s_mig_state == IB_MIG_ARMED) {
+ qib_migrate_qp(qp);
+ qp->s_retry = qp->s_retry_cnt;
+ } else if (qp->s_last == qp->s_acked) {
+ qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
+ qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ return;
+ } else /* XXX need to handle delayed completion */
+ return;
+ } else
+ qp->s_retry--;
+
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+ if (wqe->wr.opcode == IB_WR_RDMA_READ)
+ ibp->n_rc_resends++;
+ else
+ ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
+
+ qp->s_flags &= ~(QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR |
+ QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_PSN |
+ QIB_S_WAIT_ACK);
+ if (wait)
+ qp->s_flags |= QIB_S_SEND_ONE;
+ reset_psn(qp, psn);
+}
+
+/*
+ * This is called from s_timer for missing responses.
+ */
+static void rc_timeout(unsigned long arg)
+{
+ struct qib_qp *qp = (struct qib_qp *)arg;
+ struct qib_ibport *ibp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (qp->s_flags & QIB_S_TIMER) {
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+ ibp->n_rc_timeouts++;
+ qp->s_flags &= ~QIB_S_TIMER;
+ del_timer(&qp->s_timer);
+ qib_restart_rc(qp, qp->s_last_psn + 1, 1);
+ qib_schedule_send(qp);
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+}
+
+/*
+ * This is called from s_timer for RNR timeouts.
+ */
+void qib_rc_rnr_retry(unsigned long arg)
+{
+ struct qib_qp *qp = (struct qib_qp *)arg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (qp->s_flags & QIB_S_WAIT_RNR) {
+ qp->s_flags &= ~QIB_S_WAIT_RNR;
+ del_timer(&qp->s_timer);
+ qib_schedule_send(qp);
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+}
+
+/*
+ * Set qp->s_sending_psn to the next PSN after the given one.
+ * This would be psn+1 except when RDMA reads are present.
+ */
+static void reset_sending_psn(struct qib_qp *qp, u32 psn)
+{
+ struct qib_swqe *wqe;
+ u32 n = qp->s_last;
+
+ /* Find the work request corresponding to the given PSN. */
+ for (;;) {
+ wqe = get_swqe_ptr(qp, n);
+ if (qib_cmp24(psn, wqe->lpsn) <= 0) {
+ if (wqe->wr.opcode == IB_WR_RDMA_READ)
+ qp->s_sending_psn = wqe->lpsn + 1;
+ else
+ qp->s_sending_psn = psn + 1;
+ break;
+ }
+ if (++n == qp->s_size)
+ n = 0;
+ if (n == qp->s_tail)
+ break;
+ }
+}
+
+/*
+ * This should be called with the QP s_lock held and interrupts disabled.
+ */
+void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
+{
+ struct qib_other_headers *ohdr;
+ struct qib_swqe *wqe;
+ struct ib_wc wc;
+ unsigned i;
+ u32 opcode;
+ u32 psn;
+
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND))
+ return;
+
+ /* Find out where the BTH is */
+ if ((be16_to_cpu(hdr->lrh[0]) & 3) == QIB_LRH_BTH)
+ ohdr = &hdr->u.oth;
+ else
+ ohdr = &hdr->u.l.oth;
+
+ opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
+ if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
+ opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
+ WARN_ON(!qp->s_rdma_ack_cnt);
+ qp->s_rdma_ack_cnt--;
+ return;
+ }
+
+ psn = be32_to_cpu(ohdr->bth[2]);
+ reset_sending_psn(qp, psn);
+
+ /*
+ * Start timer after a packet requesting an ACK has been sent and
+ * there are still requests that haven't been acked.
+ */
+ if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
+ !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)))
+ start_timer(qp);
+
+ while (qp->s_last != qp->s_acked) {
+ wqe = get_swqe_ptr(qp, qp->s_last);
+ if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
+ qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
+ break;
+ for (i = 0; i < wqe->wr.num_sge; i++) {
+ struct qib_sge *sge = &wqe->sg_list[i];
+
+ atomic_dec(&sge->mr->refcount);
+ }
+ /* Post a send completion queue entry if requested. */
+ if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
+ (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
+ memset(&wc, 0, sizeof wc);
+ wc.wr_id = wqe->wr.wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
+ wc.byte_len = wqe->length;
+ wc.qp = &qp->ibqp;
+ qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
+ }
+ if (++qp->s_last >= qp->s_size)
+ qp->s_last = 0;
+ }
+ /*
+ * If we were waiting for sends to complete before resending,
+ * and they are now complete, restart sending.
+ */
+ if (qp->s_flags & QIB_S_WAIT_PSN &&
+ qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
+ qp->s_flags &= ~QIB_S_WAIT_PSN;
+ qp->s_sending_psn = qp->s_psn;
+ qp->s_sending_hpsn = qp->s_psn - 1;
+ qib_schedule_send(qp);
+ }
+}
+
+static inline void update_last_psn(struct qib_qp *qp, u32 psn)
+{
+ qp->s_last_psn = psn;
+}
+
+/*
+ * Generate a SWQE completion.
+ * This is similar to qib_send_complete but has to check to be sure
+ * that the SGEs are not being referenced if the SWQE is being resent.
+ */
+static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
+ struct qib_swqe *wqe,
+ struct qib_ibport *ibp)
+{
+ struct ib_wc wc;
+ unsigned i;
+
+ /*
+ * Don't decrement refcount and don't generate a
+ * completion if the SWQE is being resent until the send
+ * is finished.
+ */
+ if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
+ qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
+ for (i = 0; i < wqe->wr.num_sge; i++) {
+ struct qib_sge *sge = &wqe->sg_list[i];
+
+ atomic_dec(&sge->mr->refcount);
+ }
+ /* Post a send completion queue entry if requested. */
+ if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
+ (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
+ memset(&wc, 0, sizeof wc);
+ wc.wr_id = wqe->wr.wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
+ wc.byte_len = wqe->length;
+ wc.qp = &qp->ibqp;
+ qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
+ }
+ if (++qp->s_last >= qp->s_size)
+ qp->s_last = 0;
+ } else
+ ibp->n_rc_delayed_comp++;
+
+ qp->s_retry = qp->s_retry_cnt;
+ update_last_psn(qp, wqe->lpsn);
+
+ /*
+ * If we are completing a request which is in the process of
+ * being resent, we can stop resending it since we know the
+ * responder has already seen it.
+ */
+ if (qp->s_acked == qp->s_cur) {
+ if (++qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ qp->s_acked = qp->s_cur;
+ wqe = get_swqe_ptr(qp, qp->s_cur);
+ if (qp->s_acked != qp->s_tail) {
+ qp->s_state = OP(SEND_LAST);
+ qp->s_psn = wqe->psn;
+ }
+ } else {
+ if (++qp->s_acked >= qp->s_size)
+ qp->s_acked = 0;
+ if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
+ qp->s_draining = 0;
+ wqe = get_swqe_ptr(qp, qp->s_acked);
+ }
+ return wqe;
+}
+
+/**
+ * do_rc_ack - process an incoming RC ACK
+ * @qp: the QP the ACK came in on
+ * @psn: the packet sequence number of the ACK
+ * @opcode: the opcode of the request that resulted in the ACK
+ *
+ * This is called from qib_rc_rcv_resp() to process an incoming RC ACK
+ * for the given QP.
+ * Called at interrupt level with the QP s_lock held.
+ * Returns 1 if OK, 0 if current operation should be aborted (NAK).
+ */
+static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
+ u64 val, struct qib_ctxtdata *rcd)
+{
+ struct qib_ibport *ibp;
+ enum ib_wc_status status;
+ struct qib_swqe *wqe;
+ int ret = 0;
+ u32 ack_psn;
+ int diff;
+
+ /* Remove QP from retry timer */
+ if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
+ qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
+ del_timer(&qp->s_timer);
+ }
+
+ /*
+ * Note that NAKs implicitly ACK outstanding SEND and RDMA write
+ * requests and implicitly NAK RDMA read and atomic requests issued
+ * before the NAK'ed request. The MSN won't include the NAK'ed
+ * request but will include an ACK'ed request(s).
+ */
+ ack_psn = psn;
+ if (aeth >> 29)
+ ack_psn--;
+ wqe = get_swqe_ptr(qp, qp->s_acked);
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+
+ /*
+ * The MSN might be for a later WQE than the PSN indicates so
+ * only complete WQEs that the PSN finishes.
+ */
+ while ((diff = qib_cmp24(ack_psn, wqe->lpsn)) >= 0) {
+ /*
+ * RDMA_READ_RESPONSE_ONLY is a special case since
+ * we want to generate completion events for everything
+ * before the RDMA read, copy the data, then generate
+ * the completion for the read.
+ */
+ if (wqe->wr.opcode == IB_WR_RDMA_READ &&
+ opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
+ diff == 0) {
+ ret = 1;
+ goto bail;
+ }
+ /*
+ * If this request is a RDMA read or atomic, and the ACK is
+ * for a later operation, this ACK NAKs the RDMA read or
+ * atomic. In other words, only a RDMA_READ_LAST or ONLY
+ * can ACK a RDMA read and likewise for atomic ops. Note
+ * that the NAK case can only happen if relaxed ordering is
+ * used and requests are sent after an RDMA read or atomic
+ * is sent but before the response is received.
+ */
+ if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
+ (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
+ ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
+ (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
+ /* Retry this request. */
+ if (!(qp->r_flags & QIB_R_RDMAR_SEQ)) {
+ qp->r_flags |= QIB_R_RDMAR_SEQ;
+ qib_restart_rc(qp, qp->s_last_psn + 1, 0);
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= QIB_R_RSP_SEND;
+ atomic_inc(&qp->refcount);
+ list_add_tail(&qp->rspwait,
+ &rcd->qp_wait_list);
+ }
+ }
+ /*
+ * No need to process the ACK/NAK since we are
+ * restarting an earlier request.
+ */
+ goto bail;
+ }
+ if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
+ u64 *vaddr = wqe->sg_list[0].vaddr;
+ *vaddr = val;
+ }
+ if (qp->s_num_rd_atomic &&
+ (wqe->wr.opcode == IB_WR_RDMA_READ ||
+ wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
+ qp->s_num_rd_atomic--;
+ /* Restart sending task if fence is complete */
+ if ((qp->s_flags & QIB_S_WAIT_FENCE) &&
+ !qp->s_num_rd_atomic) {
+ qp->s_flags &= ~(QIB_S_WAIT_FENCE |
+ QIB_S_WAIT_ACK);
+ qib_schedule_send(qp);
+ } else if (qp->s_flags & QIB_S_WAIT_RDMAR) {
+ qp->s_flags &= ~(QIB_S_WAIT_RDMAR |
+ QIB_S_WAIT_ACK);
+ qib_schedule_send(qp);
+ }
+ }
+ wqe = do_rc_completion(qp, wqe, ibp);
+ if (qp->s_acked == qp->s_tail)
+ break;
+ }
+
+ switch (aeth >> 29) {
+ case 0: /* ACK */
+ ibp->n_rc_acks++;
+ if (qp->s_acked != qp->s_tail) {
+ /*
+ * We are expecting more ACKs so
+ * reset the retransmit timer.
+ */
+ start_timer(qp);
+ /*
+ * We can stop resending the earlier packets and
+ * continue with the next packet the receiver wants.
+ */
+ if (qib_cmp24(qp->s_psn, psn) <= 0)
+ reset_psn(qp, psn + 1);
+ } else if (qib_cmp24(qp->s_psn, psn) <= 0) {
+ qp->s_state = OP(SEND_LAST);
+ qp->s_psn = psn + 1;
+ }
+ if (qp->s_flags & QIB_S_WAIT_ACK) {
+ qp->s_flags &= ~QIB_S_WAIT_ACK;
+ qib_schedule_send(qp);
+ }
+ qib_get_credit(qp, aeth);
+ qp->s_rnr_retry = qp->s_rnr_retry_cnt;
+ qp->s_retry = qp->s_retry_cnt;
+ update_last_psn(qp, psn);
+ ret = 1;
+ goto bail;
+
+ case 1: /* RNR NAK */
+ ibp->n_rnr_naks++;
+ if (qp->s_acked == qp->s_tail)
+ goto bail;
+ if (qp->s_flags & QIB_S_WAIT_RNR)
+ goto bail;
+ if (qp->s_rnr_retry == 0) {
+ status = IB_WC_RNR_RETRY_EXC_ERR;
+ goto class_b;
+ }
+ if (qp->s_rnr_retry_cnt < 7)
+ qp->s_rnr_retry--;
+
+ /* The last valid PSN is the previous PSN. */
+ update_last_psn(qp, psn - 1);
+
+ ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
+
+ reset_psn(qp, psn);
+
+ qp->s_flags &= ~(QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_ACK);
+ qp->s_flags |= QIB_S_WAIT_RNR;
+ qp->s_timer.function = qib_rc_rnr_retry;
+ qp->s_timer.expires = jiffies + usecs_to_jiffies(
+ ib_qib_rnr_table[(aeth >> QIB_AETH_CREDIT_SHIFT) &
+ QIB_AETH_CREDIT_MASK]);
+ add_timer(&qp->s_timer);
+ goto bail;
+
+ case 3: /* NAK */
+ if (qp->s_acked == qp->s_tail)
+ goto bail;
+ /* The last valid PSN is the previous PSN. */
+ update_last_psn(qp, psn - 1);
+ switch ((aeth >> QIB_AETH_CREDIT_SHIFT) &
+ QIB_AETH_CREDIT_MASK) {
+ case 0: /* PSN sequence error */
+ ibp->n_seq_naks++;
+ /*
+ * Back up to the responder's expected PSN.
+ * Note that we might get a NAK in the middle of an
+ * RDMA READ response which terminates the RDMA
+ * READ.
+ */
+ qib_restart_rc(qp, psn, 0);
+ qib_schedule_send(qp);
+ break;
+
+ case 1: /* Invalid Request */
+ status = IB_WC_REM_INV_REQ_ERR;
+ ibp->n_other_naks++;
+ goto class_b;
+
+ case 2: /* Remote Access Error */
+ status = IB_WC_REM_ACCESS_ERR;
+ ibp->n_other_naks++;
+ goto class_b;
+
+ case 3: /* Remote Operation Error */
+ status = IB_WC_REM_OP_ERR;
+ ibp->n_other_naks++;
+class_b:
+ if (qp->s_last == qp->s_acked) {
+ qib_send_complete(qp, wqe, status);
+ qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ }
+ break;
+
+ default:
+ /* Ignore other reserved NAK error codes */
+ goto reserved;
+ }
+ qp->s_retry = qp->s_retry_cnt;
+ qp->s_rnr_retry = qp->s_rnr_retry_cnt;
+ goto bail;
+
+ default: /* 2: reserved */
+reserved:
+ /* Ignore reserved NAK codes. */
+ goto bail;
+ }
+
+bail:
+ return ret;
+}
+
+/*
+ * We have seen an out of sequence RDMA read middle or last packet.
+ * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
+ */
+static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn,
+ struct qib_ctxtdata *rcd)
+{
+ struct qib_swqe *wqe;
+
+ /* Remove QP from retry timer */
+ if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
+ qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
+ del_timer(&qp->s_timer);
+ }
+
+ wqe = get_swqe_ptr(qp, qp->s_acked);
+
+ while (qib_cmp24(psn, wqe->lpsn) > 0) {
+ if (wqe->wr.opcode == IB_WR_RDMA_READ ||
+ wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
+ break;
+ wqe = do_rc_completion(qp, wqe, ibp);
+ }
+
+ ibp->n_rdma_seq++;
+ qp->r_flags |= QIB_R_RDMAR_SEQ;
+ qib_restart_rc(qp, qp->s_last_psn + 1, 0);
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= QIB_R_RSP_SEND;
+ atomic_inc(&qp->refcount);
+ list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+ }
+}
+
+/**
+ * qib_rc_rcv_resp - process an incoming RC response packet
+ * @ibp: the port this packet came in on
+ * @ohdr: the other headers for this packet
+ * @data: the packet data
+ * @tlen: the packet length
+ * @qp: the QP for this packet
+ * @opcode: the opcode for this packet
+ * @psn: the packet sequence number for this packet
+ * @hdrsize: the header length
+ * @pmtu: the path MTU
+ *
+ * This is called from qib_rc_rcv() to process an incoming RC response
+ * packet for the given QP.
+ * Called at interrupt level.
+ */
+static void qib_rc_rcv_resp(struct qib_ibport *ibp,
+ struct qib_other_headers *ohdr,
+ void *data, u32 tlen,
+ struct qib_qp *qp,
+ u32 opcode,
+ u32 psn, u32 hdrsize, u32 pmtu,
+ struct qib_ctxtdata *rcd)
+{
+ struct qib_swqe *wqe;
+ enum ib_wc_status status;
+ unsigned long flags;
+ int diff;
+ u32 pad;
+ u32 aeth;
+ u64 val;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ /* Double check we can process this now that we hold the s_lock. */
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ goto ack_done;
+
+ /* Ignore invalid responses. */
+ if (qib_cmp24(psn, qp->s_next_psn) >= 0)
+ goto ack_done;
+
+ /* Ignore duplicate responses. */
+ diff = qib_cmp24(psn, qp->s_last_psn);
+ if (unlikely(diff <= 0)) {
+ /* Update credits for "ghost" ACKs */
+ if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
+ aeth = be32_to_cpu(ohdr->u.aeth);
+ if ((aeth >> 29) == 0)
+ qib_get_credit(qp, aeth);
+ }
+ goto ack_done;
+ }
+
+ /*
+ * Skip everything other than the PSN we expect, if we are waiting
+ * for a reply to a restarted RDMA read or atomic op.
+ */
+ if (qp->r_flags & QIB_R_RDMAR_SEQ) {
+ if (qib_cmp24(psn, qp->s_last_psn + 1) != 0)
+ goto ack_done;
+ qp->r_flags &= ~QIB_R_RDMAR_SEQ;
+ }
+
+ if (unlikely(qp->s_acked == qp->s_tail))
+ goto ack_done;
+ wqe = get_swqe_ptr(qp, qp->s_acked);
+ status = IB_WC_SUCCESS;
+
+ switch (opcode) {
+ case OP(ACKNOWLEDGE):
+ case OP(ATOMIC_ACKNOWLEDGE):
+ case OP(RDMA_READ_RESPONSE_FIRST):
+ aeth = be32_to_cpu(ohdr->u.aeth);
+ if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
+ __be32 *p = ohdr->u.at.atomic_ack_eth;
+
+ val = ((u64) be32_to_cpu(p[0]) << 32) |
+ be32_to_cpu(p[1]);
+ } else
+ val = 0;
+ if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
+ opcode != OP(RDMA_READ_RESPONSE_FIRST))
+ goto ack_done;
+ hdrsize += 4;
+ wqe = get_swqe_ptr(qp, qp->s_acked);
+ if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
+ goto ack_op_err;
+ /*
+ * If this is a response to a resent RDMA read, we
+ * have to be careful to copy the data to the right
+ * location.
+ */
+ qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
+ wqe, psn, pmtu);
+ goto read_middle;
+
+ case OP(RDMA_READ_RESPONSE_MIDDLE):
+ /* no AETH, no ACK */
+ if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
+ goto ack_seq_err;
+ if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
+ goto ack_op_err;
+read_middle:
+ if (unlikely(tlen != (hdrsize + pmtu + 4)))
+ goto ack_len_err;
+ if (unlikely(pmtu >= qp->s_rdma_read_len))
+ goto ack_len_err;
+
+ /*
+ * We got a response so update the timeout.
+ * 4.096 usec. * (1 << qp->timeout)
+ */
+ qp->s_flags |= QIB_S_TIMER;
+ mod_timer(&qp->s_timer, jiffies +
+ usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
+ 1000UL));
+ if (qp->s_flags & QIB_S_WAIT_ACK) {
+ qp->s_flags &= ~QIB_S_WAIT_ACK;
+ qib_schedule_send(qp);
+ }
+
+ if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
+ qp->s_retry = qp->s_retry_cnt;
+
+ /*
+ * Update the RDMA receive state but do the copy w/o
+ * holding the locks and blocking interrupts.
+ */
+ qp->s_rdma_read_len -= pmtu;
+ update_last_psn(qp, psn);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ qib_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0);
+ goto bail;
+
+ case OP(RDMA_READ_RESPONSE_ONLY):
+ aeth = be32_to_cpu(ohdr->u.aeth);
+ if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
+ goto ack_done;
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ /*
+ * Check that the data size is >= 0 && <= pmtu.
+ * Remember to account for the AETH header (4) and
+ * ICRC (4).
+ */
+ if (unlikely(tlen < (hdrsize + pad + 8)))
+ goto ack_len_err;
+ /*
+ * If this is a response to a resent RDMA read, we
+ * have to be careful to copy the data to the right
+ * location.
+ */
+ wqe = get_swqe_ptr(qp, qp->s_acked);
+ qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
+ wqe, psn, pmtu);
+ goto read_last;
+
+ case OP(RDMA_READ_RESPONSE_LAST):
+ /* ACKs READ req. */
+ if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
+ goto ack_seq_err;
+ if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
+ goto ack_op_err;
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ /*
+ * Check that the data size is >= 1 && <= pmtu.
+ * Remember to account for the AETH header (4) and
+ * ICRC (4).
+ */
+ if (unlikely(tlen <= (hdrsize + pad + 8)))
+ goto ack_len_err;
+read_last:
+ tlen -= hdrsize + pad + 8;
+ if (unlikely(tlen != qp->s_rdma_read_len))
+ goto ack_len_err;
+ aeth = be32_to_cpu(ohdr->u.aeth);
+ qib_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0);
+ WARN_ON(qp->s_rdma_read_sge.num_sge);
+ (void) do_rc_ack(qp, aeth, psn,
+ OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
+ goto ack_done;
+ }
+
+ack_op_err:
+ status = IB_WC_LOC_QP_OP_ERR;
+ goto ack_err;
+
+ack_seq_err:
+ rdma_seq_err(qp, ibp, psn, rcd);
+ goto ack_done;
+
+ack_len_err:
+ status = IB_WC_LOC_LEN_ERR;
+ack_err:
+ if (qp->s_last == qp->s_acked) {
+ qib_send_complete(qp, wqe, status);
+ qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ }
+ack_done:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+bail:
+ return;
+}
+
+/**
+ * qib_rc_rcv_error - process an incoming duplicate or error RC packet
+ * @ohdr: the other headers for this packet
+ * @data: the packet data
+ * @qp: the QP for this packet
+ * @opcode: the opcode for this packet
+ * @psn: the packet sequence number for this packet
+ * @diff: the difference between the PSN and the expected PSN
+ *
+ * This is called from qib_rc_rcv() to process an unexpected
+ * incoming RC packet for the given QP.
+ * Called at interrupt level.
+ * Return 1 if no more processing is needed; otherwise return 0 to
+ * schedule a response to be sent.
+ */
+static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
+ void *data,
+ struct qib_qp *qp,
+ u32 opcode,
+ u32 psn,
+ int diff,
+ struct qib_ctxtdata *rcd)
+{
+ struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct qib_ack_entry *e;
+ unsigned long flags;
+ u8 i, prev;
+ int old_req;
+
+ if (diff > 0) {
+ /*
+ * Packet sequence error.
+ * A NAK will ACK earlier sends and RDMA writes.
+ * Don't queue the NAK if we already sent one.
+ */
+ if (!qp->r_nak_state) {
+ ibp->n_rc_seqnak++;
+ qp->r_nak_state = IB_NAK_PSN_ERROR;
+ /* Use the expected PSN. */
+ qp->r_ack_psn = qp->r_psn;
+ /*
+ * Wait to send the sequence NAK until all packets
+ * in the receive queue have been processed.
+ * Otherwise, we end up propagating congestion.
+ */
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= QIB_R_RSP_NAK;
+ atomic_inc(&qp->refcount);
+ list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+ }
+ }
+ goto done;
+ }
+
+ /*
+ * Handle a duplicate request. Don't re-execute SEND, RDMA
+ * write or atomic op. Don't NAK errors, just silently drop
+ * the duplicate request. Note that r_sge, r_len, and
+ * r_rcv_len may be in use so don't modify them.
+ *
+ * We are supposed to ACK the earliest duplicate PSN but we
+ * can coalesce an outstanding duplicate ACK. We have to
+ * send the earliest so that RDMA reads can be restarted at
+ * the requester's expected PSN.
+ *
+ * First, find where this duplicate PSN falls within the
+ * ACKs previously sent.
+ * old_req is true if there is an older response that is scheduled
+ * to be sent before sending this one.
+ */
+ e = NULL;
+ old_req = 1;
+ ibp->n_rc_dupreq++;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ /* Double check we can process this now that we hold the s_lock. */
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ goto unlock_done;
+
+ for (i = qp->r_head_ack_queue; ; i = prev) {
+ if (i == qp->s_tail_ack_queue)
+ old_req = 0;
+ if (i)
+ prev = i - 1;
+ else
+ prev = QIB_MAX_RDMA_ATOMIC;
+ if (prev == qp->r_head_ack_queue) {
+ e = NULL;
+ break;
+ }
+ e = &qp->s_ack_queue[prev];
+ if (!e->opcode) {
+ e = NULL;
+ break;
+ }
+ if (qib_cmp24(psn, e->psn) >= 0) {
+ if (prev == qp->s_tail_ack_queue &&
+ qib_cmp24(psn, e->lpsn) <= 0)
+ old_req = 0;
+ break;
+ }
+ }
+ switch (opcode) {
+ case OP(RDMA_READ_REQUEST): {
+ struct ib_reth *reth;
+ u32 offset;
+ u32 len;
+
+ /*
+ * If we didn't find the RDMA read request in the ack queue,
+ * we can ignore this request.
+ */
+ if (!e || e->opcode != OP(RDMA_READ_REQUEST))
+ goto unlock_done;
+ /* RETH comes after BTH */
+ reth = &ohdr->u.rc.reth;
+ /*
+ * Address range must be a subset of the original
+ * request and start on pmtu boundaries.
+ * We reuse the old ack_queue slot since the requester
+ * should not back up and request an earlier PSN for the
+ * same request.
+ */
+ offset = ((psn - e->psn) & QIB_PSN_MASK) *
+ ib_mtu_enum_to_int(qp->path_mtu);
+ len = be32_to_cpu(reth->length);
+ if (unlikely(offset + len != e->rdma_sge.sge_length))
+ goto unlock_done;
+ if (e->rdma_sge.mr) {
+ atomic_dec(&e->rdma_sge.mr->refcount);
+ e->rdma_sge.mr = NULL;
+ }
+ if (len != 0) {
+ u32 rkey = be32_to_cpu(reth->rkey);
+ u64 vaddr = be64_to_cpu(reth->vaddr);
+ int ok;
+
+ ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
+ IB_ACCESS_REMOTE_READ);
+ if (unlikely(!ok))
+ goto unlock_done;
+ } else {
+ e->rdma_sge.vaddr = NULL;
+ e->rdma_sge.length = 0;
+ e->rdma_sge.sge_length = 0;
+ }
+ e->psn = psn;
+ if (old_req)
+ goto unlock_done;
+ qp->s_tail_ack_queue = prev;
+ break;
+ }
+
+ case OP(COMPARE_SWAP):
+ case OP(FETCH_ADD): {
+ /*
+ * If we didn't find the atomic request in the ack queue
+ * or the send tasklet is already backed up to send an
+ * earlier entry, we can ignore this request.
+ */
+ if (!e || e->opcode != (u8) opcode || old_req)
+ goto unlock_done;
+ qp->s_tail_ack_queue = prev;
+ break;
+ }
+
+ default:
+ /*
+ * Ignore this operation if it doesn't request an ACK
+ * or an earlier RDMA read or atomic is going to be resent.
+ */
+ if (!(psn & IB_BTH_REQ_ACK) || old_req)
+ goto unlock_done;
+ /*
+ * Resend the most recent ACK if this request is
+ * after all the previous RDMA reads and atomics.
+ */
+ if (i == qp->r_head_ack_queue) {
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ qp->r_nak_state = 0;
+ qp->r_ack_psn = qp->r_psn - 1;
+ goto send_ack;
+ }
+ /*
+ * Try to send a simple ACK to work around a Mellanox bug
+ * which doesn't accept a RDMA read response or atomic
+ * response as an ACK for earlier SENDs or RDMA writes.
+ */
+ if (!(qp->s_flags & QIB_S_RESP_PENDING)) {
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ qp->r_nak_state = 0;
+ qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
+ goto send_ack;
+ }
+ /*
+ * Resend the RDMA read or atomic op which
+ * ACKs this duplicate request.
+ */
+ qp->s_tail_ack_queue = i;
+ break;
+ }
+ qp->s_ack_state = OP(ACKNOWLEDGE);
+ qp->s_flags |= QIB_S_RESP_PENDING;
+ qp->r_nak_state = 0;
+ qib_schedule_send(qp);
+
+unlock_done:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+done:
+ return 1;
+
+send_ack:
+ return 0;
+}
+
+void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err)
+{
+ unsigned long flags;
+ int lastwqe;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ lastwqe = qib_error_qp(qp, err);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ if (lastwqe) {
+ struct ib_event ev;
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+}
+
+static inline void qib_update_ack_queue(struct qib_qp *qp, unsigned n)
+{
+ unsigned next;
+
+ next = n + 1;
+ if (next > QIB_MAX_RDMA_ATOMIC)
+ next = 0;
+ qp->s_tail_ack_queue = next;
+ qp->s_ack_state = OP(ACKNOWLEDGE);
+}
+
+/**
+ * qib_rc_rcv - process an incoming RC packet
+ * @rcd: the context pointer
+ * @hdr: the header of this packet
+ * @has_grh: true if the header has a GRH
+ * @data: the packet data
+ * @tlen: the packet length
+ * @qp: the QP for this packet
+ *
+ * This is called from qib_qp_rcv() to process an incoming RC packet
+ * for the given QP.
+ * Called at interrupt level.
+ */
+void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
+ int has_grh, void *data, u32 tlen, struct qib_qp *qp)
+{
+ struct qib_ibport *ibp = &rcd->ppd->ibport_data;
+ struct qib_other_headers *ohdr;
+ u32 opcode;
+ u32 hdrsize;
+ u32 psn;
+ u32 pad;
+ struct ib_wc wc;
+ u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+ int diff;
+ struct ib_reth *reth;
+ unsigned long flags;
+ int ret;
+
+ /* Check for GRH */
+ if (!has_grh) {
+ ohdr = &hdr->u.oth;
+ hdrsize = 8 + 12; /* LRH + BTH */
+ } else {
+ ohdr = &hdr->u.l.oth;
+ hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
+ }
+
+ opcode = be32_to_cpu(ohdr->bth[0]);
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
+ goto sunlock;
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ psn = be32_to_cpu(ohdr->bth[2]);
+ opcode >>= 24;
+
+ /* Prevent simultaneous processing after APM on different CPUs */
+ spin_lock(&qp->r_lock);
+
+ /*
+ * Process responses (ACKs) before anything else. Note that the
+ * packet sequence number will be for something in the send work
+ * queue rather than the expected receive packet sequence number.
+ * In other words, this QP is the requester.
+ */
+ if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
+ opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
+ qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
+ hdrsize, pmtu, rcd);
+ goto runlock;
+ }
+
+ /* Compute 24 bits worth of difference. */
+ diff = qib_cmp24(psn, qp->r_psn);
+ if (unlikely(diff)) {
+ if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
+ goto runlock;
+ goto send_ack;
+ }
+
+ /* Check for opcode sequence errors. */
+ switch (qp->r_state) {
+ case OP(SEND_FIRST):
+ case OP(SEND_MIDDLE):
+ if (opcode == OP(SEND_MIDDLE) ||
+ opcode == OP(SEND_LAST) ||
+ opcode == OP(SEND_LAST_WITH_IMMEDIATE))
+ break;
+ goto nack_inv;
+
+ case OP(RDMA_WRITE_FIRST):
+ case OP(RDMA_WRITE_MIDDLE):
+ if (opcode == OP(RDMA_WRITE_MIDDLE) ||
+ opcode == OP(RDMA_WRITE_LAST) ||
+ opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
+ break;
+ goto nack_inv;
+
+ default:
+ if (opcode == OP(SEND_MIDDLE) ||
+ opcode == OP(SEND_LAST) ||
+ opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
+ opcode == OP(RDMA_WRITE_MIDDLE) ||
+ opcode == OP(RDMA_WRITE_LAST) ||
+ opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
+ goto nack_inv;
+ /*
+ * Note that it is up to the requester to not send a new
+ * RDMA read or atomic operation before receiving an ACK
+ * for the previous operation.
+ */
+ break;
+ }
+
+ memset(&wc, 0, sizeof wc);
+
+ if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
+ qp->r_flags |= QIB_R_COMM_EST;
+ if (qp->ibqp.event_handler) {
+ struct ib_event ev;
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_COMM_EST;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+ }
+
+ /* OK, process the packet. */
+ switch (opcode) {
+ case OP(SEND_FIRST):
+ ret = qib_get_rwqe(qp, 0);
+ if (ret < 0)
+ goto nack_op_err;
+ if (!ret)
+ goto rnr_nak;
+ qp->r_rcv_len = 0;
+ /* FALLTHROUGH */
+ case OP(SEND_MIDDLE):
+ case OP(RDMA_WRITE_MIDDLE):
+send_middle:
+ /* Check for invalid length PMTU or posted rwqe len. */
+ if (unlikely(tlen != (hdrsize + pmtu + 4)))
+ goto nack_inv;
+ qp->r_rcv_len += pmtu;
+ if (unlikely(qp->r_rcv_len > qp->r_len))
+ goto nack_inv;
+ qib_copy_sge(&qp->r_sge, data, pmtu, 1);
+ break;
+
+ case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
+ /* consume RWQE */
+ ret = qib_get_rwqe(qp, 1);
+ if (ret < 0)
+ goto nack_op_err;
+ if (!ret)
+ goto rnr_nak;
+ goto send_last_imm;
+
+ case OP(SEND_ONLY):
+ case OP(SEND_ONLY_WITH_IMMEDIATE):
+ ret = qib_get_rwqe(qp, 0);
+ if (ret < 0)
+ goto nack_op_err;
+ if (!ret)
+ goto rnr_nak;
+ qp->r_rcv_len = 0;
+ if (opcode == OP(SEND_ONLY))
+ goto send_last;
+ /* FALLTHROUGH */
+ case OP(SEND_LAST_WITH_IMMEDIATE):
+send_last_imm:
+ wc.ex.imm_data = ohdr->u.imm_data;
+ hdrsize += 4;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ /* FALLTHROUGH */
+ case OP(SEND_LAST):
+ case OP(RDMA_WRITE_LAST):
+send_last:
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ /* Check for invalid length. */
+ /* XXX LAST len should be >= 1 */
+ if (unlikely(tlen < (hdrsize + pad + 4)))
+ goto nack_inv;
+ /* Don't count the CRC. */
+ tlen -= (hdrsize + pad + 4);
+ wc.byte_len = tlen + qp->r_rcv_len;
+ if (unlikely(wc.byte_len > qp->r_len))
+ goto nack_inv;
+ qib_copy_sge(&qp->r_sge, data, tlen, 1);
+ while (qp->r_sge.num_sge) {
+ atomic_dec(&qp->r_sge.sge.mr->refcount);
+ if (--qp->r_sge.num_sge)
+ qp->r_sge.sge = *qp->r_sge.sg_list++;
+ }
+ qp->r_msn++;
+ if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
+ break;
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
+ opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
+ wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ else
+ wc.opcode = IB_WC_RECV;
+ wc.qp = &qp->ibqp;
+ wc.src_qp = qp->remote_qpn;
+ wc.slid = qp->remote_ah_attr.dlid;
+ wc.sl = qp->remote_ah_attr.sl;
+ /* Signal completion event if the solicited bit is set. */
+ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ (ohdr->bth[0] &
+ cpu_to_be32(IB_BTH_SOLICITED)) != 0);
+ break;
+
+ case OP(RDMA_WRITE_FIRST):
+ case OP(RDMA_WRITE_ONLY):
+ case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
+ goto nack_inv;
+ /* consume RWQE */
+ reth = &ohdr->u.rc.reth;
+ hdrsize += sizeof(*reth);
+ qp->r_len = be32_to_cpu(reth->length);
+ qp->r_rcv_len = 0;
+ qp->r_sge.sg_list = NULL;
+ if (qp->r_len != 0) {
+ u32 rkey = be32_to_cpu(reth->rkey);
+ u64 vaddr = be64_to_cpu(reth->vaddr);
+ int ok;
+
+ /* Check rkey & NAK */
+ ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
+ rkey, IB_ACCESS_REMOTE_WRITE);
+ if (unlikely(!ok))
+ goto nack_acc;
+ qp->r_sge.num_sge = 1;
+ } else {
+ qp->r_sge.num_sge = 0;
+ qp->r_sge.sge.mr = NULL;
+ qp->r_sge.sge.vaddr = NULL;
+ qp->r_sge.sge.length = 0;
+ qp->r_sge.sge.sge_length = 0;
+ }
+ if (opcode == OP(RDMA_WRITE_FIRST))
+ goto send_middle;
+ else if (opcode == OP(RDMA_WRITE_ONLY))
+ goto send_last;
+ ret = qib_get_rwqe(qp, 1);
+ if (ret < 0)
+ goto nack_op_err;
+ if (!ret)
+ goto rnr_nak;
+ goto send_last_imm;
+
+ case OP(RDMA_READ_REQUEST): {
+ struct qib_ack_entry *e;
+ u32 len;
+ u8 next;
+
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
+ goto nack_inv;
+ next = qp->r_head_ack_queue + 1;
+ /* s_ack_queue is size QIB_MAX_RDMA_ATOMIC+1 so use > not >= */
+ if (next > QIB_MAX_RDMA_ATOMIC)
+ next = 0;
+ spin_lock_irqsave(&qp->s_lock, flags);
+ /* Double check we can process this while holding the s_lock. */
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ goto srunlock;
+ if (unlikely(next == qp->s_tail_ack_queue)) {
+ if (!qp->s_ack_queue[next].sent)
+ goto nack_inv_unlck;
+ qib_update_ack_queue(qp, next);
+ }
+ e = &qp->s_ack_queue[qp->r_head_ack_queue];
+ if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+ atomic_dec(&e->rdma_sge.mr->refcount);
+ e->rdma_sge.mr = NULL;
+ }
+ reth = &ohdr->u.rc.reth;
+ len = be32_to_cpu(reth->length);
+ if (len) {
+ u32 rkey = be32_to_cpu(reth->rkey);
+ u64 vaddr = be64_to_cpu(reth->vaddr);
+ int ok;
+
+ /* Check rkey & NAK */
+ ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr,
+ rkey, IB_ACCESS_REMOTE_READ);
+ if (unlikely(!ok))
+ goto nack_acc_unlck;
+ /*
+ * Update the next expected PSN. We add 1 later
+ * below, so only add the remainder here.
+ */
+ if (len > pmtu)
+ qp->r_psn += (len - 1) / pmtu;
+ } else {
+ e->rdma_sge.mr = NULL;
+ e->rdma_sge.vaddr = NULL;
+ e->rdma_sge.length = 0;
+ e->rdma_sge.sge_length = 0;
+ }
+ e->opcode = opcode;
+ e->sent = 0;
+ e->psn = psn;
+ e->lpsn = qp->r_psn;
+ /*
+ * We need to increment the MSN here instead of when we
+ * finish sending the result since a duplicate request would
+ * increment it more than once.
+ */
+ qp->r_msn++;
+ qp->r_psn++;
+ qp->r_state = opcode;
+ qp->r_nak_state = 0;
+ qp->r_head_ack_queue = next;
+
+ /* Schedule the send tasklet. */
+ qp->s_flags |= QIB_S_RESP_PENDING;
+ qib_schedule_send(qp);
+
+ goto srunlock;
+ }
+
+ case OP(COMPARE_SWAP):
+ case OP(FETCH_ADD): {
+ struct ib_atomic_eth *ateth;
+ struct qib_ack_entry *e;
+ u64 vaddr;
+ atomic64_t *maddr;
+ u64 sdata;
+ u32 rkey;
+ u8 next;
+
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
+ goto nack_inv;
+ next = qp->r_head_ack_queue + 1;
+ if (next > QIB_MAX_RDMA_ATOMIC)
+ next = 0;
+ spin_lock_irqsave(&qp->s_lock, flags);
+ /* Double check we can process this while holding the s_lock. */
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ goto srunlock;
+ if (unlikely(next == qp->s_tail_ack_queue)) {
+ if (!qp->s_ack_queue[next].sent)
+ goto nack_inv_unlck;
+ qib_update_ack_queue(qp, next);
+ }
+ e = &qp->s_ack_queue[qp->r_head_ack_queue];
+ if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+ atomic_dec(&e->rdma_sge.mr->refcount);
+ e->rdma_sge.mr = NULL;
+ }
+ ateth = &ohdr->u.atomic_eth;
+ vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) |
+ be32_to_cpu(ateth->vaddr[1]);
+ if (unlikely(vaddr & (sizeof(u64) - 1)))
+ goto nack_inv_unlck;
+ rkey = be32_to_cpu(ateth->rkey);
+ /* Check rkey & NAK */
+ if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
+ vaddr, rkey,
+ IB_ACCESS_REMOTE_ATOMIC)))
+ goto nack_acc_unlck;
+ /* Perform atomic OP and save result. */
+ maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
+ sdata = be64_to_cpu(ateth->swap_data);
+ e->atomic_data = (opcode == OP(FETCH_ADD)) ?
+ (u64) atomic64_add_return(sdata, maddr) - sdata :
+ (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
+ be64_to_cpu(ateth->compare_data),
+ sdata);
+ atomic_dec(&qp->r_sge.sge.mr->refcount);
+ qp->r_sge.num_sge = 0;
+ e->opcode = opcode;
+ e->sent = 0;
+ e->psn = psn;
+ e->lpsn = psn;
+ qp->r_msn++;
+ qp->r_psn++;
+ qp->r_state = opcode;
+ qp->r_nak_state = 0;
+ qp->r_head_ack_queue = next;
+
+ /* Schedule the send tasklet. */
+ qp->s_flags |= QIB_S_RESP_PENDING;
+ qib_schedule_send(qp);
+
+ goto srunlock;
+ }
+
+ default:
+ /* NAK unknown opcodes. */
+ goto nack_inv;
+ }
+ qp->r_psn++;
+ qp->r_state = opcode;
+ qp->r_ack_psn = psn;
+ qp->r_nak_state = 0;
+ /* Send an ACK if requested or required. */
+ if (psn & (1 << 31))
+ goto send_ack;
+ goto runlock;
+
+rnr_nak:
+ qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
+ qp->r_ack_psn = qp->r_psn;
+ /* Queue RNR NAK for later */
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= QIB_R_RSP_NAK;
+ atomic_inc(&qp->refcount);
+ list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+ }
+ goto runlock;
+
+nack_op_err:
+ qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
+ qp->r_ack_psn = qp->r_psn;
+ /* Queue NAK for later */
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= QIB_R_RSP_NAK;
+ atomic_inc(&qp->refcount);
+ list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+ }
+ goto runlock;
+
+nack_inv_unlck:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+nack_inv:
+ qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ qp->r_nak_state = IB_NAK_INVALID_REQUEST;
+ qp->r_ack_psn = qp->r_psn;
+ /* Queue NAK for later */
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= QIB_R_RSP_NAK;
+ atomic_inc(&qp->refcount);
+ list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+ }
+ goto runlock;
+
+nack_acc_unlck:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+nack_acc:
+ qib_rc_error(qp, IB_WC_LOC_PROT_ERR);
+ qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
+ qp->r_ack_psn = qp->r_psn;
+send_ack:
+ qib_send_rc_ack(qp);
+runlock:
+ spin_unlock(&qp->r_lock);
+ return;
+
+srunlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ spin_unlock(&qp->r_lock);
+ return;
+
+sunlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+}
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
new file mode 100644
index 00000000000..eb78d9367f0
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -0,0 +1,817 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/spinlock.h>
+
+#include "qib.h"
+#include "qib_mad.h"
+
+/*
+ * Convert the AETH RNR timeout code into the number of microseconds.
+ */
+const u32 ib_qib_rnr_table[32] = {
+ 655360, /* 00: 655.36 */
+ 10, /* 01: .01 */
+ 20, /* 02 .02 */
+ 30, /* 03: .03 */
+ 40, /* 04: .04 */
+ 60, /* 05: .06 */
+ 80, /* 06: .08 */
+ 120, /* 07: .12 */
+ 160, /* 08: .16 */
+ 240, /* 09: .24 */
+ 320, /* 0A: .32 */
+ 480, /* 0B: .48 */
+ 640, /* 0C: .64 */
+ 960, /* 0D: .96 */
+ 1280, /* 0E: 1.28 */
+ 1920, /* 0F: 1.92 */
+ 2560, /* 10: 2.56 */
+ 3840, /* 11: 3.84 */
+ 5120, /* 12: 5.12 */
+ 7680, /* 13: 7.68 */
+ 10240, /* 14: 10.24 */
+ 15360, /* 15: 15.36 */
+ 20480, /* 16: 20.48 */
+ 30720, /* 17: 30.72 */
+ 40960, /* 18: 40.96 */
+ 61440, /* 19: 61.44 */
+ 81920, /* 1A: 81.92 */
+ 122880, /* 1B: 122.88 */
+ 163840, /* 1C: 163.84 */
+ 245760, /* 1D: 245.76 */
+ 327680, /* 1E: 327.68 */
+ 491520 /* 1F: 491.52 */
+};
+
+/*
+ * Validate a RWQE and fill in the SGE state.
+ * Return 1 if OK.
+ */
+static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe)
+{
+ int i, j, ret;
+ struct ib_wc wc;
+ struct qib_lkey_table *rkt;
+ struct qib_pd *pd;
+ struct qib_sge_state *ss;
+
+ rkt = &to_idev(qp->ibqp.device)->lk_table;
+ pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
+ ss = &qp->r_sge;
+ ss->sg_list = qp->r_sg_list;
+ qp->r_len = 0;
+ for (i = j = 0; i < wqe->num_sge; i++) {
+ if (wqe->sg_list[i].length == 0)
+ continue;
+ /* Check LKEY */
+ if (!qib_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
+ &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
+ goto bad_lkey;
+ qp->r_len += wqe->sg_list[i].length;
+ j++;
+ }
+ ss->num_sge = j;
+ ss->total_len = qp->r_len;
+ ret = 1;
+ goto bail;
+
+bad_lkey:
+ while (j) {
+ struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
+
+ atomic_dec(&sge->mr->refcount);
+ }
+ ss->num_sge = 0;
+ memset(&wc, 0, sizeof(wc));
+ wc.wr_id = wqe->wr_id;
+ wc.status = IB_WC_LOC_PROT_ERR;
+ wc.opcode = IB_WC_RECV;
+ wc.qp = &qp->ibqp;
+ /* Signal solicited completion event. */
+ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
+ ret = 0;
+bail:
+ return ret;
+}
+
+/**
+ * qib_get_rwqe - copy the next RWQE into the QP's RWQE
+ * @qp: the QP
+ * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
+ *
+ * Return -1 if there is a local error, 0 if no RWQE is available,
+ * otherwise return 1.
+ *
+ * Can be called from interrupt level.
+ */
+int qib_get_rwqe(struct qib_qp *qp, int wr_id_only)
+{
+ unsigned long flags;
+ struct qib_rq *rq;
+ struct qib_rwq *wq;
+ struct qib_srq *srq;
+ struct qib_rwqe *wqe;
+ void (*handler)(struct ib_event *, void *);
+ u32 tail;
+ int ret;
+
+ if (qp->ibqp.srq) {
+ srq = to_isrq(qp->ibqp.srq);
+ handler = srq->ibsrq.event_handler;
+ rq = &srq->rq;
+ } else {
+ srq = NULL;
+ handler = NULL;
+ rq = &qp->r_rq;
+ }
+
+ spin_lock_irqsave(&rq->lock, flags);
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
+ ret = 0;
+ goto unlock;
+ }
+
+ wq = rq->wq;
+ tail = wq->tail;
+ /* Validate tail before using it since it is user writable. */
+ if (tail >= rq->size)
+ tail = 0;
+ if (unlikely(tail == wq->head)) {
+ ret = 0;
+ goto unlock;
+ }
+ /* Make sure entry is read after head index is read. */
+ smp_rmb();
+ wqe = get_rwqe_ptr(rq, tail);
+ /*
+ * Even though we update the tail index in memory, the verbs
+ * consumer is not supposed to post more entries until a
+ * completion is generated.
+ */
+ if (++tail >= rq->size)
+ tail = 0;
+ wq->tail = tail;
+ if (!wr_id_only && !qib_init_sge(qp, wqe)) {
+ ret = -1;
+ goto unlock;
+ }
+ qp->r_wr_id = wqe->wr_id;
+
+ ret = 1;
+ set_bit(QIB_R_WRID_VALID, &qp->r_aflags);
+ if (handler) {
+ u32 n;
+
+ /*
+ * Validate head pointer value and compute
+ * the number of remaining WQEs.
+ */
+ n = wq->head;
+ if (n >= rq->size)
+ n = 0;
+ if (n < tail)
+ n += rq->size - tail;
+ else
+ n -= tail;
+ if (n < srq->limit) {
+ struct ib_event ev;
+
+ srq->limit = 0;
+ spin_unlock_irqrestore(&rq->lock, flags);
+ ev.device = qp->ibqp.device;
+ ev.element.srq = qp->ibqp.srq;
+ ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ handler(&ev, srq->ibsrq.srq_context);
+ goto bail;
+ }
+ }
+unlock:
+ spin_unlock_irqrestore(&rq->lock, flags);
+bail:
+ return ret;
+}
+
+/*
+ * Switch to alternate path.
+ * The QP s_lock should be held and interrupts disabled.
+ */
+void qib_migrate_qp(struct qib_qp *qp)
+{
+ struct ib_event ev;
+
+ qp->s_mig_state = IB_MIG_MIGRATED;
+ qp->remote_ah_attr = qp->alt_ah_attr;
+ qp->port_num = qp->alt_ah_attr.port_num;
+ qp->s_pkey_index = qp->s_alt_pkey_index;
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_PATH_MIG;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+}
+
+static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
+{
+ if (!index) {
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+
+ return ppd->guid;
+ } else
+ return ibp->guids[index - 1];
+}
+
+static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
+{
+ return (gid->global.interface_id == id &&
+ (gid->global.subnet_prefix == gid_prefix ||
+ gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
+}
+
+/*
+ *
+ * This should be called with the QP s_lock held.
+ */
+int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+ int has_grh, struct qib_qp *qp, u32 bth0)
+{
+ __be64 guid;
+
+ if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
+ if (!has_grh) {
+ if (qp->alt_ah_attr.ah_flags & IB_AH_GRH)
+ goto err;
+ } else {
+ if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH))
+ goto err;
+ guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index);
+ if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
+ goto err;
+ if (!gid_ok(&hdr->u.l.grh.sgid,
+ qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
+ qp->alt_ah_attr.grh.dgid.global.interface_id))
+ goto err;
+ }
+ if (!qib_pkey_ok((u16)bth0,
+ qib_get_pkey(ibp, qp->s_alt_pkey_index))) {
+ qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
+ (u16)bth0,
+ (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
+ 0, qp->ibqp.qp_num,
+ hdr->lrh[3], hdr->lrh[1]);
+ goto err;
+ }
+ /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
+ if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid ||
+ ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num)
+ goto err;
+ qib_migrate_qp(qp);
+ } else {
+ if (!has_grh) {
+ if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
+ goto err;
+ } else {
+ if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH))
+ goto err;
+ guid = get_sguid(ibp,
+ qp->remote_ah_attr.grh.sgid_index);
+ if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
+ goto err;
+ if (!gid_ok(&hdr->u.l.grh.sgid,
+ qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
+ qp->remote_ah_attr.grh.dgid.global.interface_id))
+ goto err;
+ }
+ if (!qib_pkey_ok((u16)bth0,
+ qib_get_pkey(ibp, qp->s_pkey_index))) {
+ qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
+ (u16)bth0,
+ (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
+ 0, qp->ibqp.qp_num,
+ hdr->lrh[3], hdr->lrh[1]);
+ goto err;
+ }
+ /* Validate the SLID. See Ch. 9.6.1.5 */
+ if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid ||
+ ppd_from_ibp(ibp)->port != qp->port_num)
+ goto err;
+ if (qp->s_mig_state == IB_MIG_REARM &&
+ !(bth0 & IB_BTH_MIG_REQ))
+ qp->s_mig_state = IB_MIG_ARMED;
+ }
+
+ return 0;
+
+err:
+ return 1;
+}
+
+/**
+ * qib_ruc_loopback - handle UC and RC lookback requests
+ * @sqp: the sending QP
+ *
+ * This is called from qib_do_send() to
+ * forward a WQE addressed to the same HCA.
+ * Note that although we are single threaded due to the tasklet, we still
+ * have to protect against post_send(). We don't have to worry about
+ * receive interrupts since this is a connected protocol and all packets
+ * will pass through here.
+ */
+static void qib_ruc_loopback(struct qib_qp *sqp)
+{
+ struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
+ struct qib_qp *qp;
+ struct qib_swqe *wqe;
+ struct qib_sge *sge;
+ unsigned long flags;
+ struct ib_wc wc;
+ u64 sdata;
+ atomic64_t *maddr;
+ enum ib_wc_status send_status;
+ int release;
+ int ret;
+
+ /*
+ * Note that we check the responder QP state after
+ * checking the requester's state.
+ */
+ qp = qib_lookup_qpn(ibp, sqp->remote_qpn);
+
+ spin_lock_irqsave(&sqp->s_lock, flags);
+
+ /* Return if we are already busy processing a work request. */
+ if ((sqp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT)) ||
+ !(ib_qib_state_ops[sqp->state] & QIB_PROCESS_OR_FLUSH_SEND))
+ goto unlock;
+
+ sqp->s_flags |= QIB_S_BUSY;
+
+again:
+ if (sqp->s_last == sqp->s_head)
+ goto clr_busy;
+ wqe = get_swqe_ptr(sqp, sqp->s_last);
+
+ /* Return if it is not OK to start a new work reqeust. */
+ if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
+ if (!(ib_qib_state_ops[sqp->state] & QIB_FLUSH_SEND))
+ goto clr_busy;
+ /* We are in the error state, flush the work request. */
+ send_status = IB_WC_WR_FLUSH_ERR;
+ goto flush_send;
+ }
+
+ /*
+ * We can rely on the entry not changing without the s_lock
+ * being held until we update s_last.
+ * We increment s_cur to indicate s_last is in progress.
+ */
+ if (sqp->s_last == sqp->s_cur) {
+ if (++sqp->s_cur >= sqp->s_size)
+ sqp->s_cur = 0;
+ }
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+
+ if (!qp || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) ||
+ qp->ibqp.qp_type != sqp->ibqp.qp_type) {
+ ibp->n_pkt_drops++;
+ /*
+ * For RC, the requester would timeout and retry so
+ * shortcut the timeouts and just signal too many retries.
+ */
+ if (sqp->ibqp.qp_type == IB_QPT_RC)
+ send_status = IB_WC_RETRY_EXC_ERR;
+ else
+ send_status = IB_WC_SUCCESS;
+ goto serr;
+ }
+
+ memset(&wc, 0, sizeof wc);
+ send_status = IB_WC_SUCCESS;
+
+ release = 1;
+ sqp->s_sge.sge = wqe->sg_list[0];
+ sqp->s_sge.sg_list = wqe->sg_list + 1;
+ sqp->s_sge.num_sge = wqe->wr.num_sge;
+ sqp->s_len = wqe->length;
+ switch (wqe->wr.opcode) {
+ case IB_WR_SEND_WITH_IMM:
+ wc.wc_flags = IB_WC_WITH_IMM;
+ wc.ex.imm_data = wqe->wr.ex.imm_data;
+ /* FALLTHROUGH */
+ case IB_WR_SEND:
+ ret = qib_get_rwqe(qp, 0);
+ if (ret < 0)
+ goto op_err;
+ if (!ret)
+ goto rnr_nak;
+ break;
+
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
+ goto inv_err;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ wc.ex.imm_data = wqe->wr.ex.imm_data;
+ ret = qib_get_rwqe(qp, 1);
+ if (ret < 0)
+ goto op_err;
+ if (!ret)
+ goto rnr_nak;
+ /* FALLTHROUGH */
+ case IB_WR_RDMA_WRITE:
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
+ goto inv_err;
+ if (wqe->length == 0)
+ break;
+ if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
+ wqe->wr.wr.rdma.remote_addr,
+ wqe->wr.wr.rdma.rkey,
+ IB_ACCESS_REMOTE_WRITE)))
+ goto acc_err;
+ qp->r_sge.sg_list = NULL;
+ qp->r_sge.num_sge = 1;
+ qp->r_sge.total_len = wqe->length;
+ break;
+
+ case IB_WR_RDMA_READ:
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
+ goto inv_err;
+ if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
+ wqe->wr.wr.rdma.remote_addr,
+ wqe->wr.wr.rdma.rkey,
+ IB_ACCESS_REMOTE_READ)))
+ goto acc_err;
+ release = 0;
+ sqp->s_sge.sg_list = NULL;
+ sqp->s_sge.num_sge = 1;
+ qp->r_sge.sge = wqe->sg_list[0];
+ qp->r_sge.sg_list = wqe->sg_list + 1;
+ qp->r_sge.num_sge = wqe->wr.num_sge;
+ qp->r_sge.total_len = wqe->length;
+ break;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
+ goto inv_err;
+ if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
+ wqe->wr.wr.atomic.remote_addr,
+ wqe->wr.wr.atomic.rkey,
+ IB_ACCESS_REMOTE_ATOMIC)))
+ goto acc_err;
+ /* Perform atomic OP and save result. */
+ maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
+ sdata = wqe->wr.wr.atomic.compare_add;
+ *(u64 *) sqp->s_sge.sge.vaddr =
+ (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
+ (u64) atomic64_add_return(sdata, maddr) - sdata :
+ (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
+ sdata, wqe->wr.wr.atomic.swap);
+ atomic_dec(&qp->r_sge.sge.mr->refcount);
+ qp->r_sge.num_sge = 0;
+ goto send_comp;
+
+ default:
+ send_status = IB_WC_LOC_QP_OP_ERR;
+ goto serr;
+ }
+
+ sge = &sqp->s_sge.sge;
+ while (sqp->s_len) {
+ u32 len = sqp->s_len;
+
+ if (len > sge->length)
+ len = sge->length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
+ BUG_ON(len == 0);
+ qib_copy_sge(&qp->r_sge, sge->vaddr, len, release);
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (!release)
+ atomic_dec(&sge->mr->refcount);
+ if (--sqp->s_sge.num_sge)
+ *sge = *sqp->s_sge.sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+ sqp->s_len -= len;
+ }
+ if (release)
+ while (qp->r_sge.num_sge) {
+ atomic_dec(&qp->r_sge.sge.mr->refcount);
+ if (--qp->r_sge.num_sge)
+ qp->r_sge.sge = *qp->r_sge.sg_list++;
+ }
+
+ if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
+ goto send_comp;
+
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
+ wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ else
+ wc.opcode = IB_WC_RECV;
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.byte_len = wqe->length;
+ wc.qp = &qp->ibqp;
+ wc.src_qp = qp->remote_qpn;
+ wc.slid = qp->remote_ah_attr.dlid;
+ wc.sl = qp->remote_ah_attr.sl;
+ wc.port_num = 1;
+ /* Signal completion event if the solicited bit is set. */
+ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ wqe->wr.send_flags & IB_SEND_SOLICITED);
+
+send_comp:
+ spin_lock_irqsave(&sqp->s_lock, flags);
+ ibp->n_loop_pkts++;
+flush_send:
+ sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
+ qib_send_complete(sqp, wqe, send_status);
+ goto again;
+
+rnr_nak:
+ /* Handle RNR NAK */
+ if (qp->ibqp.qp_type == IB_QPT_UC)
+ goto send_comp;
+ ibp->n_rnr_naks++;
+ /*
+ * Note: we don't need the s_lock held since the BUSY flag
+ * makes this single threaded.
+ */
+ if (sqp->s_rnr_retry == 0) {
+ send_status = IB_WC_RNR_RETRY_EXC_ERR;
+ goto serr;
+ }
+ if (sqp->s_rnr_retry_cnt < 7)
+ sqp->s_rnr_retry--;
+ spin_lock_irqsave(&sqp->s_lock, flags);
+ if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_RECV_OK))
+ goto clr_busy;
+ sqp->s_flags |= QIB_S_WAIT_RNR;
+ sqp->s_timer.function = qib_rc_rnr_retry;
+ sqp->s_timer.expires = jiffies +
+ usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]);
+ add_timer(&sqp->s_timer);
+ goto clr_busy;
+
+op_err:
+ send_status = IB_WC_REM_OP_ERR;
+ wc.status = IB_WC_LOC_QP_OP_ERR;
+ goto err;
+
+inv_err:
+ send_status = IB_WC_REM_INV_REQ_ERR;
+ wc.status = IB_WC_LOC_QP_OP_ERR;
+ goto err;
+
+acc_err:
+ send_status = IB_WC_REM_ACCESS_ERR;
+ wc.status = IB_WC_LOC_PROT_ERR;
+err:
+ /* responder goes to error state */
+ qib_rc_error(qp, wc.status);
+
+serr:
+ spin_lock_irqsave(&sqp->s_lock, flags);
+ qib_send_complete(sqp, wqe, send_status);
+ if (sqp->ibqp.qp_type == IB_QPT_RC) {
+ int lastwqe = qib_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
+
+ sqp->s_flags &= ~QIB_S_BUSY;
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+ if (lastwqe) {
+ struct ib_event ev;
+
+ ev.device = sqp->ibqp.device;
+ ev.element.qp = &sqp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
+ }
+ goto done;
+ }
+clr_busy:
+ sqp->s_flags &= ~QIB_S_BUSY;
+unlock:
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+done:
+ if (qp && atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+}
+
+/**
+ * qib_make_grh - construct a GRH header
+ * @ibp: a pointer to the IB port
+ * @hdr: a pointer to the GRH header being constructed
+ * @grh: the global route address to send to
+ * @hwords: the number of 32 bit words of header being sent
+ * @nwords: the number of 32 bit words of data being sent
+ *
+ * Return the size of the header in 32 bit words.
+ */
+u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
+ struct ib_global_route *grh, u32 hwords, u32 nwords)
+{
+ hdr->version_tclass_flow =
+ cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
+ (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
+ (grh->flow_label << IB_GRH_FLOW_SHIFT));
+ hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
+ /* next_hdr is defined by C8-7 in ch. 8.4.1 */
+ hdr->next_hdr = IB_GRH_NEXT_HDR;
+ hdr->hop_limit = grh->hop_limit;
+ /* The SGID is 32-bit aligned. */
+ hdr->sgid.global.subnet_prefix = ibp->gid_prefix;
+ hdr->sgid.global.interface_id = grh->sgid_index ?
+ ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid;
+ hdr->dgid = grh->dgid;
+
+ /* GRH header size in 32-bit words. */
+ return sizeof(struct ib_grh) / sizeof(u32);
+}
+
+void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
+ u32 bth0, u32 bth2)
+{
+ struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ u16 lrh0;
+ u32 nwords;
+ u32 extra_bytes;
+
+ /* Construct the header. */
+ extra_bytes = -qp->s_cur_size & 3;
+ nwords = (qp->s_cur_size + extra_bytes) >> 2;
+ lrh0 = QIB_LRH_BTH;
+ if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
+ qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh,
+ &qp->remote_ah_attr.grh,
+ qp->s_hdrwords, nwords);
+ lrh0 = QIB_LRH_GRH;
+ }
+ lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
+ qp->remote_ah_attr.sl << 4;
+ qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
+ qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
+ qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
+ qp->s_hdr.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
+ qp->remote_ah_attr.src_path_bits);
+ bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
+ bth0 |= extra_bytes << 20;
+ if (qp->s_mig_state == IB_MIG_MIGRATED)
+ bth0 |= IB_BTH_MIG_REQ;
+ ohdr->bth[0] = cpu_to_be32(bth0);
+ ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
+ ohdr->bth[2] = cpu_to_be32(bth2);
+}
+
+/**
+ * qib_do_send - perform a send on a QP
+ * @work: contains a pointer to the QP
+ *
+ * Process entries in the send work queue until credit or queue is
+ * exhausted. Only allow one CPU to send a packet per QP (tasklet).
+ * Otherwise, two threads could send packets out of order.
+ */
+void qib_do_send(struct work_struct *work)
+{
+ struct qib_qp *qp = container_of(work, struct qib_qp, s_work);
+ struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ int (*make_req)(struct qib_qp *qp);
+ unsigned long flags;
+
+ if ((qp->ibqp.qp_type == IB_QPT_RC ||
+ qp->ibqp.qp_type == IB_QPT_UC) &&
+ (qp->remote_ah_attr.dlid & ~((1 << ppd->lmc) - 1)) == ppd->lid) {
+ qib_ruc_loopback(qp);
+ return;
+ }
+
+ if (qp->ibqp.qp_type == IB_QPT_RC)
+ make_req = qib_make_rc_req;
+ else if (qp->ibqp.qp_type == IB_QPT_UC)
+ make_req = qib_make_uc_req;
+ else
+ make_req = qib_make_ud_req;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ /* Return if we are already busy processing a work request. */
+ if (!qib_send_ok(qp)) {
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return;
+ }
+
+ qp->s_flags |= QIB_S_BUSY;
+
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ do {
+ /* Check for a constructed packet to be sent. */
+ if (qp->s_hdrwords != 0) {
+ /*
+ * If the packet cannot be sent now, return and
+ * the send tasklet will be woken up later.
+ */
+ if (qib_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords,
+ qp->s_cur_sge, qp->s_cur_size))
+ break;
+ /* Record that s_hdr is empty. */
+ qp->s_hdrwords = 0;
+ }
+ } while (make_req(qp));
+}
+
+/*
+ * This should be called with s_lock held.
+ */
+void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
+ enum ib_wc_status status)
+{
+ u32 old_last, last;
+ unsigned i;
+
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND))
+ return;
+
+ for (i = 0; i < wqe->wr.num_sge; i++) {
+ struct qib_sge *sge = &wqe->sg_list[i];
+
+ atomic_dec(&sge->mr->refcount);
+ }
+ if (qp->ibqp.qp_type == IB_QPT_UD ||
+ qp->ibqp.qp_type == IB_QPT_SMI ||
+ qp->ibqp.qp_type == IB_QPT_GSI)
+ atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
+
+ /* See ch. 11.2.4.1 and 10.7.3.1 */
+ if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
+ (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
+ status != IB_WC_SUCCESS) {
+ struct ib_wc wc;
+
+ memset(&wc, 0, sizeof wc);
+ wc.wr_id = wqe->wr.wr_id;
+ wc.status = status;
+ wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
+ wc.qp = &qp->ibqp;
+ if (status == IB_WC_SUCCESS)
+ wc.byte_len = wqe->length;
+ qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
+ status != IB_WC_SUCCESS);
+ }
+
+ last = qp->s_last;
+ old_last = last;
+ if (++last >= qp->s_size)
+ last = 0;
+ qp->s_last = last;
+ if (qp->s_acked == old_last)
+ qp->s_acked = last;
+ if (qp->s_cur == old_last)
+ qp->s_cur = last;
+ if (qp->s_tail == old_last)
+ qp->s_tail = last;
+ if (qp->state == IB_QPS_SQD && last == qp->s_cur)
+ qp->s_draining = 0;
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index 2a68d9f624d..0aeed0e74cb 100644
--- a/drivers/infiniband/hw/ipath/ipath_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -32,22 +32,40 @@
*/
/*
* This file contains all of the code that is specific to the SerDes
- * on the InfiniPath 7220 chip.
+ * on the QLogic_IB 7220 chip.
*/
#include <linux/pci.h>
#include <linux/delay.h>
-#include "ipath_kernel.h"
-#include "ipath_registers.h"
-#include "ipath_7220.h"
+#include "qib.h"
+#include "qib_7220.h"
+
+/*
+ * Same as in qib_iba7220.c, but just the registers needed here.
+ * Could move whole set to qib_7220.h, but decided better to keep
+ * local.
+ */
+#define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64))
+#define kr_hwerrclear KREG_IDX(HwErrClear)
+#define kr_hwerrmask KREG_IDX(HwErrMask)
+#define kr_hwerrstatus KREG_IDX(HwErrStatus)
+#define kr_ibcstatus KREG_IDX(IBCStatus)
+#define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl)
+#define kr_scratch KREG_IDX(Scratch)
+#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
+/* these are used only here, not in qib_iba7220.c */
+#define kr_ibsd_epb_access_ctrl KREG_IDX(ibsd_epb_access_ctrl)
+#define kr_ibsd_epb_transaction_reg KREG_IDX(ibsd_epb_transaction_reg)
+#define kr_pciesd_epb_transaction_reg KREG_IDX(pciesd_epb_transaction_reg)
+#define kr_pciesd_epb_access_ctrl KREG_IDX(pciesd_epb_access_ctrl)
+#define kr_serdes_ddsrxeq0 KREG_IDX(SerDes_DDSRXEQ0)
/*
* The IBSerDesMappTable is a memory that holds values to be stored in
- * various SerDes registers by IBC. It is not part of the normal kregs
- * map and is used in exactly one place, hence the #define below.
+ * various SerDes registers by IBC.
*/
-#define KR_IBSerDesMappTable (0x94000 / (sizeof(uint64_t)))
+#define kr_serdes_maptable KREG_IDX(IBSerDesMappTable)
/*
* Below used for sdnum parameter, selecting one of the two sections
@@ -71,42 +89,37 @@
#define EPB_GLOBAL_WR (1U << (EPB_ADDR_SHF + 8))
/* Forward declarations. */
-static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
- u32 data, u32 mask);
-static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
+static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc,
+ u32 data, u32 mask);
+static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
int mask);
-static int ipath_sd_trimdone_poll(struct ipath_devdata *dd);
-static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd,
- const char *where);
-static int ipath_sd_setvals(struct ipath_devdata *dd);
-static int ipath_sd_early(struct ipath_devdata *dd);
-static int ipath_sd_dactrim(struct ipath_devdata *dd);
-/* Set the registers that IBC may muck with to their default "preset" values */
-int ipath_sd7220_presets(struct ipath_devdata *dd);
-static int ipath_internal_presets(struct ipath_devdata *dd);
+static int qib_sd_trimdone_poll(struct qib_devdata *dd);
+static void qib_sd_trimdone_monitor(struct qib_devdata *dd, const char *where);
+static int qib_sd_setvals(struct qib_devdata *dd);
+static int qib_sd_early(struct qib_devdata *dd);
+static int qib_sd_dactrim(struct qib_devdata *dd);
+static int qib_internal_presets(struct qib_devdata *dd);
/* Tweak the register (CMUCTRL5) that contains the TRIMSELF controls */
-static int ipath_sd_trimself(struct ipath_devdata *dd, int val);
-static int epb_access(struct ipath_devdata *dd, int sdnum, int claim);
-
-void ipath_set_relock_poll(struct ipath_devdata *dd, int ibup);
+static int qib_sd_trimself(struct qib_devdata *dd, int val);
+static int epb_access(struct qib_devdata *dd, int sdnum, int claim);
/*
* Below keeps track of whether the "once per power-on" initialization has
* been done, because uC code Version 1.32.17 or higher allows the uC to
* be reset at will, and Automatic Equalization may require it. So the
- * state of the reset "pin", as reflected in was_reset parameter to
- * ipath_sd7220_init() is no longer valid. Instead, we check for the
+ * state of the reset "pin", is no longer valid. Instead, we check for the
* actual uC code having been loaded.
*/
-static int ipath_ibsd_ucode_loaded(struct ipath_devdata *dd)
+static int qib_ibsd_ucode_loaded(struct qib_pportdata *ppd)
{
- if (!dd->serdes_first_init_done && (ipath_sd7220_ib_vfy(dd) > 0))
- dd->serdes_first_init_done = 1;
- return dd->serdes_first_init_done;
+ struct qib_devdata *dd = ppd->dd;
+ if (!dd->cspec->serdes_first_init_done && (qib_sd7220_ib_vfy(dd) > 0))
+ dd->cspec->serdes_first_init_done = 1;
+ return dd->cspec->serdes_first_init_done;
}
-/* repeat #define for local use. "Real" #define is in ipath_iba7220.c */
-#define INFINIPATH_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
+/* repeat #define for local use. "Real" #define is in qib_iba7220.c */
+#define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
#define IB_MPREG5 (EPB_LOC(6, 0, 0xE) | (1L << EPB_IB_UC_CS_SHF))
#define IB_MPREG6 (EPB_LOC(6, 0, 0xF) | (1U << EPB_IB_UC_CS_SHF))
#define UC_PAR_CLR_D 8
@@ -114,25 +127,25 @@ static int ipath_ibsd_ucode_loaded(struct ipath_devdata *dd)
#define IB_CTRL2(chn) (EPB_LOC(chn, 7, 3) | EPB_IB_QUAD0_CS)
#define START_EQ1(chan) EPB_LOC(chan, 7, 0x27)
-void ipath_sd7220_clr_ibpar(struct ipath_devdata *dd)
+void qib_sd7220_clr_ibpar(struct qib_devdata *dd)
{
int ret;
/* clear, then re-enable parity errs */
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6,
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6,
UC_PAR_CLR_D, UC_PAR_CLR_M);
if (ret < 0) {
- ipath_dev_err(dd, "Failed clearing IBSerDes Parity err\n");
+ qib_dev_err(dd, "Failed clearing IBSerDes Parity err\n");
goto bail;
}
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0,
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0,
UC_PAR_CLR_M);
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+ qib_read_kreg32(dd, kr_scratch);
udelay(4);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
- INFINIPATH_HWE_IB_UC_MEMORYPARITYERR);
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+ qib_write_kreg(dd, kr_hwerrclear,
+ QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
+ qib_read_kreg32(dd, kr_scratch);
bail:
return;
}
@@ -146,7 +159,7 @@ bail:
#define IB_PGUDP(chn) (EPB_LOC((chn), 2, 1) | EPB_IB_QUAD0_CS)
#define IB_CMUDONE(chn) (EPB_LOC((chn), 7, 0xF) | EPB_IB_QUAD0_CS)
-static int ipath_resync_ibepb(struct ipath_devdata *dd)
+static int qib_resync_ibepb(struct qib_devdata *dd)
{
int ret, pat, tries, chn;
u32 loc;
@@ -155,43 +168,42 @@ static int ipath_resync_ibepb(struct ipath_devdata *dd)
chn = 0;
for (tries = 0; tries < (4 * IBSD_RESYNC_TRIES); ++tries) {
loc = IB_PGUDP(chn);
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
if (ret < 0) {
- ipath_dev_err(dd, "Failed read in resync\n");
+ qib_dev_err(dd, "Failed read in resync\n");
continue;
}
if (ret != 0xF0 && ret != 0x55 && tries == 0)
- ipath_dev_err(dd, "unexpected pattern in resync\n");
+ qib_dev_err(dd, "unexpected pattern in resync\n");
pat = ret ^ 0xA5; /* alternate F0 and 55 */
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF);
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF);
if (ret < 0) {
- ipath_dev_err(dd, "Failed write in resync\n");
+ qib_dev_err(dd, "Failed write in resync\n");
continue;
}
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
if (ret < 0) {
- ipath_dev_err(dd, "Failed re-read in resync\n");
+ qib_dev_err(dd, "Failed re-read in resync\n");
continue;
}
if (ret != pat) {
- ipath_dev_err(dd, "Failed compare1 in resync\n");
+ qib_dev_err(dd, "Failed compare1 in resync\n");
continue;
}
loc = IB_CMUDONE(chn);
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
if (ret < 0) {
- ipath_dev_err(dd, "Failed CMUDONE rd in resync\n");
+ qib_dev_err(dd, "Failed CMUDONE rd in resync\n");
continue;
}
if ((ret & 0x70) != ((chn << 4) | 0x40)) {
- ipath_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n",
- ret, chn);
+ qib_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n",
+ ret, chn);
continue;
}
if (++chn == 4)
break; /* Success */
}
- ipath_cdbg(VERBOSE, "Resync in %d tries\n", tries);
return (ret > 0) ? 0 : ret;
}
@@ -199,32 +211,32 @@ static int ipath_resync_ibepb(struct ipath_devdata *dd)
* Localize the stuff that should be done to change IB uC reset
* returns <0 for errors.
*/
-static int ipath_ibsd_reset(struct ipath_devdata *dd, int assert_rst)
+static int qib_ibsd_reset(struct qib_devdata *dd, int assert_rst)
{
u64 rst_val;
int ret = 0;
unsigned long flags;
- rst_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl);
+ rst_val = qib_read_kreg64(dd, kr_ibserdesctrl);
if (assert_rst) {
/*
* Vendor recommends "interrupting" uC before reset, to
* minimize possible glitches.
*/
- spin_lock_irqsave(&dd->ipath_sdepb_lock, flags);
+ spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
epb_access(dd, IB_7220_SERDES, 1);
rst_val |= 1ULL;
/* Squelch possible parity error from _asserting_ reset */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask &
- ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, rst_val);
+ qib_write_kreg(dd, kr_hwerrmask,
+ dd->cspec->hwerrmask &
+ ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
+ qib_write_kreg(dd, kr_ibserdesctrl, rst_val);
/* flush write, delay to ensure it took effect */
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+ qib_read_kreg32(dd, kr_scratch);
udelay(2);
/* once it's reset, can remove interrupt */
epb_access(dd, IB_7220_SERDES, -1);
- spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
+ spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
} else {
/*
* Before we de-assert reset, we need to deal with
@@ -235,46 +247,46 @@ static int ipath_ibsd_reset(struct ipath_devdata *dd, int assert_rst)
*/
u64 val;
rst_val &= ~(1ULL);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask &
- ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR);
+ qib_write_kreg(dd, kr_hwerrmask,
+ dd->cspec->hwerrmask &
+ ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
- ret = ipath_resync_ibepb(dd);
+ ret = qib_resync_ibepb(dd);
if (ret < 0)
- ipath_dev_err(dd, "unable to re-sync IB EPB\n");
+ qib_dev_err(dd, "unable to re-sync IB EPB\n");
/* set uC control regs to suppress parity errs */
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1);
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1);
if (ret < 0)
goto bail;
/* IB uC code past Version 1.32.17 allow suppression of wdog */
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80,
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80,
0x80);
if (ret < 0) {
- ipath_dev_err(dd, "Failed to set WDOG disable\n");
+ qib_dev_err(dd, "Failed to set WDOG disable\n");
goto bail;
}
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, rst_val);
+ qib_write_kreg(dd, kr_ibserdesctrl, rst_val);
/* flush write, delay for startup */
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+ qib_read_kreg32(dd, kr_scratch);
udelay(1);
/* clear, then re-enable parity errs */
- ipath_sd7220_clr_ibpar(dd);
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
- if (val & INFINIPATH_HWE_IB_UC_MEMORYPARITYERR) {
- ipath_dev_err(dd, "IBUC Parity still set after RST\n");
- dd->ipath_hwerrmask &=
- ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR;
+ qib_sd7220_clr_ibpar(dd);
+ val = qib_read_kreg64(dd, kr_hwerrstatus);
+ if (val & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR) {
+ qib_dev_err(dd, "IBUC Parity still set after RST\n");
+ dd->cspec->hwerrmask &=
+ ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR;
}
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask);
+ qib_write_kreg(dd, kr_hwerrmask,
+ dd->cspec->hwerrmask);
}
bail:
return ret;
}
-static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd,
+static void qib_sd_trimdone_monitor(struct qib_devdata *dd,
const char *where)
{
int ret, chn, baduns;
@@ -286,69 +298,71 @@ static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd,
/* give time for reset to settle out in EPB */
udelay(2);
- ret = ipath_resync_ibepb(dd);
+ ret = qib_resync_ibepb(dd);
if (ret < 0)
- ipath_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where);
+ qib_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where);
/* Do "sacrificial read" to get EPB in sane state after reset */
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0);
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0);
if (ret < 0)
- ipath_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where);
+ qib_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where);
/* Check/show "summary" Trim-done bit in IBCStatus */
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
- if (val & (1ULL << 11))
- ipath_cdbg(VERBOSE, "IBCS TRIMDONE set (%s)\n", where);
- else
- ipath_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where);
-
+ val = qib_read_kreg64(dd, kr_ibcstatus);
+ if (!(val & (1ULL << 11)))
+ qib_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where);
+ /*
+ * Do "dummy read/mod/wr" to get EPB in sane state after reset
+ * The default value for MPREG6 is 0.
+ */
udelay(2);
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80);
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80);
if (ret < 0)
- ipath_dev_err(dd, "Failed Dummy RMW, (%s)\n", where);
+ qib_dev_err(dd, "Failed Dummy RMW, (%s)\n", where);
udelay(10);
baduns = 0;
for (chn = 3; chn >= 0; --chn) {
/* Read CTRL reg for each channel to check TRIMDONE */
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
IB_CTRL2(chn), 0, 0);
if (ret < 0)
- ipath_dev_err(dd, "Failed checking TRIMDONE, chn %d"
- " (%s)\n", chn, where);
+ qib_dev_err(dd, "Failed checking TRIMDONE, chn %d"
+ " (%s)\n", chn, where);
if (!(ret & 0x10)) {
int probe;
+
baduns |= (1 << chn);
- ipath_dev_err(dd, "TRIMDONE cleared on chn %d (%02X)."
+ qib_dev_err(dd, "TRIMDONE cleared on chn %d (%02X)."
" (%s)\n", chn, ret, where);
- probe = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
+ probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
IB_PGUDP(0), 0, 0);
- ipath_dev_err(dd, "probe is %d (%02X)\n",
+ qib_dev_err(dd, "probe is %d (%02X)\n",
probe, probe);
- probe = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
+ probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
IB_CTRL2(chn), 0, 0);
- ipath_dev_err(dd, "re-read: %d (%02X)\n",
+ qib_dev_err(dd, "re-read: %d (%02X)\n",
probe, probe);
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
IB_CTRL2(chn), 0x10, 0x10);
if (ret < 0)
- ipath_dev_err(dd,
+ qib_dev_err(dd,
"Err on TRIMDONE rewrite1\n");
}
}
for (chn = 3; chn >= 0; --chn) {
/* Read CTRL reg for each channel to check TRIMDONE */
if (baduns & (1 << chn)) {
- ipath_dev_err(dd,
+ qib_dev_err(dd,
"Reseting TRIMDONE on chn %d (%s)\n",
chn, where);
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
IB_CTRL2(chn), 0x10, 0x10);
if (ret < 0)
- ipath_dev_err(dd, "Failed re-setting "
+ qib_dev_err(dd, "Failed re-setting "
"TRIMDONE, chn %d (%s)\n",
chn, where);
}
@@ -361,96 +375,86 @@ static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd,
* Post IB uC code version 1.32.17, was_reset being 1 is not really
* informative, so we double-check.
*/
-int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset)
+int qib_sd7220_init(struct qib_devdata *dd)
{
int ret = 1; /* default to failure */
- int first_reset;
- int val_stat;
+ int first_reset, was_reset;
+ /* SERDES MPU reset recorded in D0 */
+ was_reset = (qib_read_kreg64(dd, kr_ibserdesctrl) & 1);
if (!was_reset) {
/* entered with reset not asserted, we need to do it */
- ipath_ibsd_reset(dd, 1);
- ipath_sd_trimdone_monitor(dd, "Driver-reload");
+ qib_ibsd_reset(dd, 1);
+ qib_sd_trimdone_monitor(dd, "Driver-reload");
}
-
/* Substitute our deduced value for was_reset */
- ret = ipath_ibsd_ucode_loaded(dd);
- if (ret < 0) {
- ret = 1;
- goto done;
- }
- first_reset = !ret; /* First reset if IBSD uCode not yet loaded */
+ ret = qib_ibsd_ucode_loaded(dd->pport);
+ if (ret < 0)
+ goto bail;
+ first_reset = !ret; /* First reset if IBSD uCode not yet loaded */
/*
* Alter some regs per vendor latest doc, reset-defaults
* are not right for IB.
*/
- ret = ipath_sd_early(dd);
+ ret = qib_sd_early(dd);
if (ret < 0) {
- ipath_dev_err(dd, "Failed to set IB SERDES early defaults\n");
- ret = 1;
- goto done;
+ qib_dev_err(dd, "Failed to set IB SERDES early defaults\n");
+ goto bail;
}
-
/*
* Set DAC manual trim IB.
* We only do this once after chip has been reset (usually
* same as once per system boot).
*/
if (first_reset) {
- ret = ipath_sd_dactrim(dd);
+ ret = qib_sd_dactrim(dd);
if (ret < 0) {
- ipath_dev_err(dd, "Failed IB SERDES DAC trim\n");
- ret = 1;
- goto done;
+ qib_dev_err(dd, "Failed IB SERDES DAC trim\n");
+ goto bail;
}
}
-
/*
* Set various registers (DDS and RXEQ) that will be
* controlled by IBC (in 1.2 mode) to reasonable preset values
* Calling the "internal" version avoids the "check for needed"
* and "trimdone monitor" that might be counter-productive.
*/
- ret = ipath_internal_presets(dd);
+ ret = qib_internal_presets(dd);
if (ret < 0) {
- ipath_dev_err(dd, "Failed to set IB SERDES presets\n");
- ret = 1;
- goto done;
+ qib_dev_err(dd, "Failed to set IB SERDES presets\n");
+ goto bail;
}
- ret = ipath_sd_trimself(dd, 0x80);
+ ret = qib_sd_trimself(dd, 0x80);
if (ret < 0) {
- ipath_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n");
- ret = 1;
- goto done;
+ qib_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n");
+ goto bail;
}
/* Load image, then try to verify */
- ret = 0; /* Assume success */
+ ret = 0; /* Assume success */
if (first_reset) {
int vfy;
int trim_done;
- ipath_dbg("SerDes uC was reset, reloading PRAM\n");
- ret = ipath_sd7220_ib_load(dd);
+
+ ret = qib_sd7220_ib_load(dd);
if (ret < 0) {
- ipath_dev_err(dd, "Failed to load IB SERDES image\n");
- ret = 1;
- goto done;
- }
+ qib_dev_err(dd, "Failed to load IB SERDES image\n");
+ goto bail;
+ } else {
+ /* Loaded image, try to verify */
+ vfy = qib_sd7220_ib_vfy(dd);
+ if (vfy != ret) {
+ qib_dev_err(dd, "SERDES PRAM VFY failed\n");
+ goto bail;
+ } /* end if verified */
+ } /* end if loaded */
- /* Loaded image, try to verify */
- vfy = ipath_sd7220_ib_vfy(dd);
- if (vfy != ret) {
- ipath_dev_err(dd, "SERDES PRAM VFY failed\n");
- ret = 1;
- goto done;
- }
/*
* Loaded and verified. Almost good...
* hold "success" in ret
*/
ret = 0;
-
/*
* Prev steps all worked, continue bringup
* De-assert RESET to uC, only in first reset, to allow
@@ -461,45 +465,47 @@ int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset)
*/
ret = ibsd_mod_allchnls(dd, START_EQ1(0), 0, 0x38);
if (ret < 0) {
- ipath_dev_err(dd, "Failed clearing START_EQ1\n");
- ret = 1;
- goto done;
+ qib_dev_err(dd, "Failed clearing START_EQ1\n");
+ goto bail;
}
- ipath_ibsd_reset(dd, 0);
+ qib_ibsd_reset(dd, 0);
/*
* If this is not the first reset, trimdone should be set
- * already.
+ * already. We may need to check about this.
*/
- trim_done = ipath_sd_trimdone_poll(dd);
+ trim_done = qib_sd_trimdone_poll(dd);
/*
* Whether or not trimdone succeeded, we need to put the
* uC back into reset to avoid a possible fight with the
* IBC state-machine.
*/
- ipath_ibsd_reset(dd, 1);
+ qib_ibsd_reset(dd, 1);
if (!trim_done) {
- ipath_dev_err(dd, "No TRIMDONE seen\n");
- ret = 1;
- goto done;
+ qib_dev_err(dd, "No TRIMDONE seen\n");
+ goto bail;
}
-
- ipath_sd_trimdone_monitor(dd, "First-reset");
+ /*
+ * DEBUG: check each time we reset if trimdone bits have
+ * gotten cleared, and re-set them.
+ */
+ qib_sd_trimdone_monitor(dd, "First-reset");
/* Remember so we do not re-do the load, dactrim, etc. */
- dd->serdes_first_init_done = 1;
+ dd->cspec->serdes_first_init_done = 1;
}
/*
- * Setup for channel training and load values for
+ * setup for channel training and load values for
* RxEq and DDS in tables used by IBC in IB1.2 mode
*/
-
- val_stat = ipath_sd_setvals(dd);
- if (val_stat < 0)
- ret = 1;
+ ret = 0;
+ if (qib_sd_setvals(dd) >= 0)
+ goto done;
+bail:
+ ret = 1;
done:
/* start relock timer regardless, but start at 1 second */
- ipath_set_relock_poll(dd, -1);
+ set_7220_relock_poll(dd, -1);
return ret;
}
@@ -517,7 +523,7 @@ done:
* the "claim" parameter is >0 to claim, <0 to release, 0 to query.
* Returns <0 for errors, >0 if we had ownership, else 0.
*/
-static int epb_access(struct ipath_devdata *dd, int sdnum, int claim)
+static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
{
u16 acc;
u64 accval;
@@ -525,28 +531,30 @@ static int epb_access(struct ipath_devdata *dd, int sdnum, int claim)
u64 oct_sel = 0;
switch (sdnum) {
- case IB_7220_SERDES :
+ case IB_7220_SERDES:
/*
* The IB SERDES "ownership" is fairly simple. A single each
* request/grant.
*/
- acc = dd->ipath_kregs->kr_ib_epbacc;
+ acc = kr_ibsd_epb_access_ctrl;
break;
- case PCIE_SERDES0 :
- case PCIE_SERDES1 :
+
+ case PCIE_SERDES0:
+ case PCIE_SERDES1:
/* PCIe SERDES has two "octants", need to select which */
- acc = dd->ipath_kregs->kr_pcie_epbacc;
+ acc = kr_pciesd_epb_access_ctrl;
oct_sel = (2 << (sdnum - PCIE_SERDES0));
break;
- default :
+
+ default:
return 0;
}
/* Make sure any outstanding transaction was seen */
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+ qib_read_kreg32(dd, kr_scratch);
udelay(15);
- accval = ipath_read_kreg32(dd, acc);
+ accval = qib_read_kreg32(dd, acc);
owned = !!(accval & EPB_ACC_GNT);
if (claim < 0) {
@@ -557,22 +565,22 @@ static int epb_access(struct ipath_devdata *dd, int sdnum, int claim)
* Both should be clear
*/
u64 newval = 0;
- ipath_write_kreg(dd, acc, newval);
+ qib_write_kreg(dd, acc, newval);
/* First read after write is not trustworthy */
- pollval = ipath_read_kreg32(dd, acc);
+ pollval = qib_read_kreg32(dd, acc);
udelay(5);
- pollval = ipath_read_kreg32(dd, acc);
+ pollval = qib_read_kreg32(dd, acc);
if (pollval & EPB_ACC_GNT)
owned = -1;
} else if (claim > 0) {
/* Need to claim */
u64 pollval;
u64 newval = EPB_ACC_REQ | oct_sel;
- ipath_write_kreg(dd, acc, newval);
+ qib_write_kreg(dd, acc, newval);
/* First read after write is not trustworthy */
- pollval = ipath_read_kreg32(dd, acc);
+ pollval = qib_read_kreg32(dd, acc);
udelay(5);
- pollval = ipath_read_kreg32(dd, acc);
+ pollval = qib_read_kreg32(dd, acc);
if (!(pollval & EPB_ACC_GNT))
owned = -1;
}
@@ -582,18 +590,17 @@ static int epb_access(struct ipath_devdata *dd, int sdnum, int claim)
/*
* Lemma to deal with race condition of write..read to epb regs
*/
-static int epb_trans(struct ipath_devdata *dd, u16 reg, u64 i_val, u64 *o_vp)
+static int epb_trans(struct qib_devdata *dd, u16 reg, u64 i_val, u64 *o_vp)
{
int tries;
u64 transval;
-
- ipath_write_kreg(dd, reg, i_val);
+ qib_write_kreg(dd, reg, i_val);
/* Throw away first read, as RDY bit may be stale */
- transval = ipath_read_kreg64(dd, reg);
+ transval = qib_read_kreg64(dd, reg);
for (tries = EPB_TRANS_TRIES; tries; --tries) {
- transval = ipath_read_kreg32(dd, reg);
+ transval = qib_read_kreg32(dd, reg);
if (transval & EPB_TRANS_RDY)
break;
udelay(5);
@@ -606,21 +613,20 @@ static int epb_trans(struct ipath_devdata *dd, u16 reg, u64 i_val, u64 *o_vp)
}
/**
- *
- * ipath_sd7220_reg_mod - modify SERDES register
- * @dd: the infinipath device
+ * qib_sd7220_reg_mod - modify SERDES register
+ * @dd: the qlogic_ib device
* @sdnum: which SERDES to access
* @loc: location - channel, element, register, as packed by EPB_LOC() macro.
* @wd: Write Data - value to set in register
* @mask: ones where data should be spliced into reg.
*
- * Basic register read/modify/write, with un-needed accesses elided. That is,
+ * Basic register read/modify/write, with un-needed acesses elided. That is,
* a mask of zero will prevent write, while a mask of 0xFF will prevent read.
* returns current (presumed, if a write was done) contents of selected
* register, or <0 if errors.
*/
-static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
- u32 wd, u32 mask)
+static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc,
+ u32 wd, u32 mask)
{
u16 trans;
u64 transval;
@@ -629,14 +635,16 @@ static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
unsigned long flags;
switch (sdnum) {
- case IB_7220_SERDES :
- trans = dd->ipath_kregs->kr_ib_epbtrans;
+ case IB_7220_SERDES:
+ trans = kr_ibsd_epb_transaction_reg;
break;
- case PCIE_SERDES0 :
- case PCIE_SERDES1 :
- trans = dd->ipath_kregs->kr_pcie_epbtrans;
+
+ case PCIE_SERDES0:
+ case PCIE_SERDES1:
+ trans = kr_pciesd_epb_transaction_reg;
break;
- default :
+
+ default:
return -1;
}
@@ -644,23 +652,23 @@ static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
* All access is locked in software (vs other host threads) and
* hardware (vs uC access).
*/
- spin_lock_irqsave(&dd->ipath_sdepb_lock, flags);
+ spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
owned = epb_access(dd, sdnum, 1);
if (owned < 0) {
- spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
+ spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
return -1;
}
ret = 0;
for (tries = EPB_TRANS_TRIES; tries; --tries) {
- transval = ipath_read_kreg32(dd, trans);
+ transval = qib_read_kreg32(dd, trans);
if (transval & EPB_TRANS_RDY)
break;
udelay(5);
}
if (tries > 0) {
- tries = 1; /* to make read-skip work */
+ tries = 1; /* to make read-skip work */
if (mask != 0xFF) {
/*
* Not a pure write, so need to read.
@@ -688,7 +696,7 @@ static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
else
ret = transval & EPB_DATA_MASK;
- spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
+ spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
if (tries <= 0)
ret = -1;
return ret;
@@ -707,7 +715,7 @@ static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
#define EPB_RAMDATA EPB_LOC(6, 0, 5)
/* Transfer date to/from uC Program RAM of IB or PCIe SerDes */
-static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
+static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc,
u8 *buf, int cnt, int rd_notwr)
{
u16 trans;
@@ -723,29 +731,28 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
/* Pick appropriate transaction reg and "Chip select" for this serdes */
switch (sdnum) {
- case IB_7220_SERDES :
+ case IB_7220_SERDES:
csbit = 1ULL << EPB_IB_UC_CS_SHF;
- trans = dd->ipath_kregs->kr_ib_epbtrans;
+ trans = kr_ibsd_epb_transaction_reg;
break;
- case PCIE_SERDES0 :
- case PCIE_SERDES1 :
+
+ case PCIE_SERDES0:
+ case PCIE_SERDES1:
/* PCIe SERDES has uC "chip select" in different bit, too */
csbit = 1ULL << EPB_PCIE_UC_CS_SHF;
- trans = dd->ipath_kregs->kr_pcie_epbtrans;
+ trans = kr_pciesd_epb_transaction_reg;
break;
- default :
+
+ default:
return -1;
}
op = rd_notwr ? "Rd" : "Wr";
- spin_lock_irqsave(&dd->ipath_sdepb_lock, flags);
+ spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
owned = epb_access(dd, sdnum, 1);
if (owned < 0) {
- spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
- ipath_dbg("Could not get %s access to %s EPB: %X, loc %X\n",
- op, (sdnum == IB_7220_SERDES) ? "IB" : "PCIe",
- owned, loc);
+ spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
return -1;
}
@@ -758,16 +765,14 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
*/
addr = loc & 0x1FFF;
for (tries = EPB_TRANS_TRIES; tries; --tries) {
- transval = ipath_read_kreg32(dd, trans);
+ transval = qib_read_kreg32(dd, trans);
if (transval & EPB_TRANS_RDY)
break;
udelay(5);
}
sofar = 0;
- if (tries <= 0)
- ipath_dbg("No initial RDY on EPB access request\n");
- else {
+ if (tries > 0) {
/*
* Every "memory" access is doubly-indirect.
* We set two bytes of address, then read/write
@@ -778,8 +783,6 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
transval = csbit | EPB_UC_CTL |
(rd_notwr ? EPB_ROM_R : EPB_ROM_W);
tries = epb_trans(dd, trans, transval, &transval);
- if (tries <= 0)
- ipath_dbg("No EPB response to uC %s cmd\n", op);
while (tries > 0 && sofar < cnt) {
if (!sofar) {
/* Only set address at start of chunk */
@@ -787,18 +790,14 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
transval = csbit | EPB_MADDRH | addrbyte;
tries = epb_trans(dd, trans, transval,
&transval);
- if (tries <= 0) {
- ipath_dbg("No EPB response ADDRH\n");
+ if (tries <= 0)
break;
- }
addrbyte = (addr + sofar) & 0xFF;
transval = csbit | EPB_MADDRL | addrbyte;
tries = epb_trans(dd, trans, transval,
&transval);
- if (tries <= 0) {
- ipath_dbg("No EPB response ADDRL\n");
+ if (tries <= 0)
break;
- }
}
if (rd_notwr)
@@ -806,10 +805,8 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
else
transval = csbit | EPB_ROMDATA | buf[sofar];
tries = epb_trans(dd, trans, transval, &transval);
- if (tries <= 0) {
- ipath_dbg("No EPB response DATA\n");
+ if (tries <= 0)
break;
- }
if (rd_notwr)
buf[sofar] = transval & EPB_DATA_MASK;
++sofar;
@@ -817,8 +814,6 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
/* Finally, clear control-bit for Read or Write */
transval = csbit | EPB_UC_CTL;
tries = epb_trans(dd, trans, transval, &transval);
- if (tries <= 0)
- ipath_dbg("No EPB response to drop of uC %s cmd\n", op);
}
ret = sofar;
@@ -826,18 +821,16 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
if (epb_access(dd, sdnum, -1) < 0)
ret = -1;
- spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
- if (tries <= 0) {
- ipath_dbg("SERDES PRAM %s failed after %d bytes\n", op, sofar);
+ spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
+ if (tries <= 0)
ret = -1;
- }
return ret;
}
#define PROG_CHUNK 64
-int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum,
- u8 *img, int len, int offset)
+int qib_sd7220_prog_ld(struct qib_devdata *dd, int sdnum,
+ u8 *img, int len, int offset)
{
int cnt, sofar, req;
@@ -846,7 +839,7 @@ int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum,
req = len - sofar;
if (req > PROG_CHUNK)
req = PROG_CHUNK;
- cnt = ipath_sd7220_ram_xfer(dd, sdnum, offset + sofar,
+ cnt = qib_sd7220_ram_xfer(dd, sdnum, offset + sofar,
img + sofar, req, 0);
if (cnt < req) {
sofar = -1;
@@ -860,8 +853,8 @@ int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum,
#define VFY_CHUNK 64
#define SD_PRAM_ERROR_LIMIT 42
-int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum,
- const u8 *img, int len, int offset)
+int qib_sd7220_prog_vfy(struct qib_devdata *dd, int sdnum,
+ const u8 *img, int len, int offset)
{
int cnt, sofar, req, idx, errors;
unsigned char readback[VFY_CHUNK];
@@ -872,7 +865,7 @@ int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum,
req = len - sofar;
if (req > VFY_CHUNK)
req = VFY_CHUNK;
- cnt = ipath_sd7220_ram_xfer(dd, sdnum, sofar + offset,
+ cnt = qib_sd7220_ram_xfer(dd, sdnum, sofar + offset,
readback, req, 1);
if (cnt < req) {
/* failed in read itself */
@@ -888,11 +881,13 @@ int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum,
return errors ? -errors : sofar;
}
-/* IRQ not set up at this point in init, so we poll. */
+/*
+ * IRQ not set up at this point in init, so we poll.
+ */
#define IB_SERDES_TRIM_DONE (1ULL << 11)
#define TRIM_TMO (30)
-static int ipath_sd_trimdone_poll(struct ipath_devdata *dd)
+static int qib_sd_trimdone_poll(struct qib_devdata *dd)
{
int trim_tmo, ret;
uint64_t val;
@@ -903,16 +898,15 @@ static int ipath_sd_trimdone_poll(struct ipath_devdata *dd)
*/
ret = 0;
for (trim_tmo = 0; trim_tmo < TRIM_TMO; ++trim_tmo) {
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
+ val = qib_read_kreg64(dd, kr_ibcstatus);
if (val & IB_SERDES_TRIM_DONE) {
- ipath_cdbg(VERBOSE, "TRIMDONE after %d\n", trim_tmo);
ret = 1;
break;
}
msleep(10);
}
if (trim_tmo >= TRIM_TMO) {
- ipath_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
+ qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
ret = 0;
}
return ret;
@@ -964,8 +958,7 @@ static struct dds_init {
};
/*
- * Next, values related to Receive Equalization.
- * In comments, FDR (Full) is IB DDR, HDR (Half) is IB SDR
+ * Now the RXEQ section of the table.
*/
/* Hardware packs an element number and register address thus: */
#define RXEQ_INIT_RDESC(elt, addr) (((elt) & 0xF) | ((addr) << 4))
@@ -981,23 +974,23 @@ static struct dds_init {
#define RXEQ_SDR_ZCNT 23
static struct rxeq_init {
- u16 rdesc; /* in form used in SerDesDDSRXEQ */
+ u16 rdesc; /* in form used in SerDesDDSRXEQ */
u8 rdata[4];
} rxeq_init_vals[] = {
/* Set Rcv Eq. to Preset node */
RXEQ_VAL_ALL(7, 0x27, 0x10),
/* Set DFELTHFDR/HDR thresholds */
- RXEQ_VAL(7, 8, 0, 0, 0, 0), /* FDR */
+ RXEQ_VAL(7, 8, 0, 0, 0, 0), /* FDR, was 0, 1, 2, 3 */
RXEQ_VAL(7, 0x21, 0, 0, 0, 0), /* HDR */
- /* Set TLTHFDR/HDR threshold */
- RXEQ_VAL(7, 9, 2, 2, 2, 2), /* FDR */
- RXEQ_VAL(7, 0x23, 2, 2, 2, 2), /* HDR */
+ /* Set TLTHFDR/HDR theshold */
+ RXEQ_VAL(7, 9, 2, 2, 2, 2), /* FDR, was 0, 2, 4, 6 */
+ RXEQ_VAL(7, 0x23, 2, 2, 2, 2), /* HDR, was 0, 1, 2, 3 */
/* Set Preamp setting 2 (ZFR/ZCNT) */
- RXEQ_VAL(7, 0x1B, 12, 12, 12, 12), /* FDR */
- RXEQ_VAL(7, 0x1C, 12, 12, 12, 12), /* HDR */
+ RXEQ_VAL(7, 0x1B, 12, 12, 12, 12), /* FDR, was 12, 16, 20, 24 */
+ RXEQ_VAL(7, 0x1C, 12, 12, 12, 12), /* HDR, was 12, 16, 20, 24 */
/* Set Preamp DC gain and Setting 1 (GFR/GHR) */
- RXEQ_VAL(7, 0x1E, 0x10, 0x10, 0x10, 0x10), /* FDR */
- RXEQ_VAL(7, 0x1F, 0x10, 0x10, 0x10, 0x10), /* HDR */
+ RXEQ_VAL(7, 0x1E, 16, 16, 16, 16), /* FDR, was 16, 17, 18, 20 */
+ RXEQ_VAL(7, 0x1F, 16, 16, 16, 16), /* HDR, was 16, 17, 18, 20 */
/* Toggle RELOCK (in VCDL_CTRL0) to lock to data */
RXEQ_VAL_ALL(6, 6, 0x20), /* Set D5 High */
RXEQ_VAL_ALL(6, 6, 0), /* Set D5 Low */
@@ -1007,27 +1000,27 @@ static struct rxeq_init {
#define DDS_ROWS (16)
#define RXEQ_ROWS ARRAY_SIZE(rxeq_init_vals)
-static int ipath_sd_setvals(struct ipath_devdata *dd)
+static int qib_sd_setvals(struct qib_devdata *dd)
{
int idx, midx;
- int min_idx; /* Minimum index for this portion of table */
+ int min_idx; /* Minimum index for this portion of table */
uint32_t dds_reg_map;
u64 __iomem *taddr, *iaddr;
uint64_t data;
uint64_t sdctl;
- taddr = dd->ipath_kregbase + KR_IBSerDesMappTable;
- iaddr = dd->ipath_kregbase + dd->ipath_kregs->kr_ib_ddsrxeq;
+ taddr = dd->kregbase + kr_serdes_maptable;
+ iaddr = dd->kregbase + kr_serdes_ddsrxeq0;
/*
* Init the DDS section of the table.
* Each "row" of the table provokes NUM_DDS_REG writes, to the
* registers indicated in DDS_REG_MAP.
*/
- sdctl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl);
+ sdctl = qib_read_kreg64(dd, kr_ibserdesctrl);
sdctl = (sdctl & ~(0x1f << 8)) | (NUM_DDS_REGS << 8);
sdctl = (sdctl & ~(0x1f << 13)) | (RXEQ_ROWS << 13);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, sdctl);
+ qib_write_kreg(dd, kr_ibserdesctrl, sdctl);
/*
* Iterate down table within loop for each register to store.
@@ -1037,21 +1030,21 @@ static int ipath_sd_setvals(struct ipath_devdata *dd)
data = ((dds_reg_map & 0xF) << 4) | TX_FAST_ELT;
writeq(data, iaddr + idx);
mmiowb();
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+ qib_read_kreg32(dd, kr_scratch);
dds_reg_map >>= 4;
for (midx = 0; midx < DDS_ROWS; ++midx) {
u64 __iomem *daddr = taddr + ((midx << 4) + idx);
data = dds_init_vals[midx].reg_vals[idx];
writeq(data, daddr);
mmiowb();
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+ qib_read_kreg32(dd, kr_scratch);
} /* End inner for (vals for this reg, each row) */
} /* end outer for (regs to be stored) */
/*
- * Init the RXEQ section of the table. As explained above the table
- * rxeq_init_vals[], this runs in a different order, as the pattern
- * of register references is more complex, but there are only
+ * Init the RXEQ section of the table.
+ * This runs in a different order, as the pattern of
+ * register references is more complex, but there are only
* four "data" values per register.
*/
min_idx = idx; /* RXEQ indices pick up where DDS left off */
@@ -1066,13 +1059,13 @@ static int ipath_sd_setvals(struct ipath_devdata *dd)
/* Store the next RXEQ register address */
writeq(rxeq_init_vals[idx].rdesc, iaddr + didx);
mmiowb();
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+ qib_read_kreg32(dd, kr_scratch);
/* Iterate through RXEQ values */
for (vidx = 0; vidx < 4; vidx++) {
data = rxeq_init_vals[idx].rdata[vidx];
writeq(data, taddr + (vidx << 6) + idx);
mmiowb();
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+ qib_read_kreg32(dd, kr_scratch);
}
} /* end outer for (Reg-writes for RXEQ) */
return 0;
@@ -1085,33 +1078,18 @@ static int ipath_sd_setvals(struct ipath_devdata *dd)
#define VCDL_CTRL2(chan) EPB_LOC(chan, 6, 8)
#define START_EQ2(chan) EPB_LOC(chan, 7, 0x28)
-static int ibsd_sto_noisy(struct ipath_devdata *dd, int loc, int val, int mask)
-{
- int ret = -1;
- int sloc; /* shifted loc, for messages */
-
- loc |= (1U << EPB_IB_QUAD0_CS_SHF);
- sloc = loc >> EPB_ADDR_SHF;
-
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, mask);
- if (ret < 0)
- ipath_dev_err(dd, "Write failed: elt %d,"
- " addr 0x%X, chnl %d, val 0x%02X, mask 0x%02X\n",
- (sloc & 0xF), (sloc >> 9) & 0x3f, (sloc >> 4) & 7,
- val & 0xFF, mask & 0xFF);
- return ret;
-}
-
/*
* Repeat a "store" across all channels of the IB SerDes.
* Although nominally it inherits the "read value" of the last
* channel it modified, the only really useful return is <0 for
* failure, >= 0 for success. The parameter 'loc' is assumed to
- * be the location for the channel-0 copy of the register to
- * be modified.
+ * be the location in some channel of the register to be modified
+ * The caller can specify use of the "gang write" option of EPB,
+ * in which case we use the specified channel data for any fields
+ * not explicitely written.
*/
-static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
- int mask)
+static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
+ int mask)
{
int ret = -1;
int chnl;
@@ -1126,24 +1104,27 @@ static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
loc |= (1U << EPB_IB_QUAD0_CS_SHF);
chnl = (loc >> (4 + EPB_ADDR_SHF)) & 7;
if (mask != 0xFF) {
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
- loc & ~EPB_GLOBAL_WR, 0, 0);
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
+ loc & ~EPB_GLOBAL_WR, 0, 0);
if (ret < 0) {
int sloc = loc >> EPB_ADDR_SHF;
- ipath_dev_err(dd, "pre-read failed: elt %d,"
- " addr 0x%X, chnl %d\n", (sloc & 0xF),
- (sloc >> 9) & 0x3f, chnl);
+
+ qib_dev_err(dd, "pre-read failed: elt %d,"
+ " addr 0x%X, chnl %d\n",
+ (sloc & 0xF),
+ (sloc >> 9) & 0x3f, chnl);
return ret;
}
val = (ret & ~mask) | (val & mask);
}
loc &= ~(7 << (4+EPB_ADDR_SHF));
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
if (ret < 0) {
int sloc = loc >> EPB_ADDR_SHF;
- ipath_dev_err(dd, "Global WR failed: elt %d,"
- " addr 0x%X, val %02X\n",
- (sloc & 0xF), (sloc >> 9) & 0x3f, val);
+
+ qib_dev_err(dd, "Global WR failed: elt %d,"
+ " addr 0x%X, val %02X\n",
+ (sloc & 0xF), (sloc >> 9) & 0x3f, val);
}
return ret;
}
@@ -1151,16 +1132,17 @@ static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
loc &= ~(7 << (4+EPB_ADDR_SHF));
loc |= (1U << EPB_IB_QUAD0_CS_SHF);
for (chnl = 0; chnl < 4; ++chnl) {
- int cloc;
- cloc = loc | (chnl << (4+EPB_ADDR_SHF));
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, cloc, val, mask);
+ int cloc = loc | (chnl << (4+EPB_ADDR_SHF));
+
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, cloc, val, mask);
if (ret < 0) {
int sloc = loc >> EPB_ADDR_SHF;
- ipath_dev_err(dd, "Write failed: elt %d,"
- " addr 0x%X, chnl %d, val 0x%02X,"
- " mask 0x%02X\n",
- (sloc & 0xF), (sloc >> 9) & 0x3f, chnl,
- val & 0xFF, mask & 0xFF);
+
+ qib_dev_err(dd, "Write failed: elt %d,"
+ " addr 0x%X, chnl %d, val 0x%02X,"
+ " mask 0x%02X\n",
+ (sloc & 0xF), (sloc >> 9) & 0x3f, chnl,
+ val & 0xFF, mask & 0xFF);
break;
}
}
@@ -1171,7 +1153,7 @@ static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
* Set the Tx values normally modified by IBC in IB1.2 mode to default
* values, as gotten from first row of init table.
*/
-static int set_dds_vals(struct ipath_devdata *dd, struct dds_init *ddi)
+static int set_dds_vals(struct qib_devdata *dd, struct dds_init *ddi)
{
int ret;
int idx, reg, data;
@@ -1194,7 +1176,7 @@ static int set_dds_vals(struct ipath_devdata *dd, struct dds_init *ddi)
* Set the Rx values normally modified by IBC in IB1.2 mode to default
* values, as gotten from selected column of init table.
*/
-static int set_rxeq_vals(struct ipath_devdata *dd, int vsel)
+static int set_rxeq_vals(struct qib_devdata *dd, int vsel)
{
int ret;
int ridx;
@@ -1202,6 +1184,7 @@ static int set_rxeq_vals(struct ipath_devdata *dd, int vsel)
for (ridx = 0; ridx < cnt; ++ridx) {
int elt, reg, val, loc;
+
elt = rxeq_init_vals[ridx].rdesc & 0xF;
reg = rxeq_init_vals[ridx].rdesc >> 4;
loc = EPB_LOC(0, elt, reg);
@@ -1217,83 +1200,66 @@ static int set_rxeq_vals(struct ipath_devdata *dd, int vsel)
/*
* Set the default values (row 0) for DDR Driver Demphasis.
* we do this initially and whenever we turn off IB-1.2
+ *
* The "default" values for Rx equalization are also stored to
* SerDes registers. Formerly (and still default), we used set 2.
* For experimenting with cables and link-partners, we allow changing
* that via a module parameter.
*/
-static unsigned ipath_rxeq_set = 2;
-module_param_named(rxeq_default_set, ipath_rxeq_set, uint,
- S_IWUSR | S_IRUGO);
+static unsigned qib_rxeq_set = 2;
+module_param_named(rxeq_default_set, qib_rxeq_set, uint,
+ S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(rxeq_default_set,
- "Which set [0..3] of Rx Equalization values is default");
+ "Which set [0..3] of Rx Equalization values is default");
-static int ipath_internal_presets(struct ipath_devdata *dd)
+static int qib_internal_presets(struct qib_devdata *dd)
{
int ret = 0;
ret = set_dds_vals(dd, dds_init_vals + DDS_3M);
if (ret < 0)
- ipath_dev_err(dd, "Failed to set default DDS values\n");
- ret = set_rxeq_vals(dd, ipath_rxeq_set & 3);
+ qib_dev_err(dd, "Failed to set default DDS values\n");
+ ret = set_rxeq_vals(dd, qib_rxeq_set & 3);
if (ret < 0)
- ipath_dev_err(dd, "Failed to set default RXEQ values\n");
+ qib_dev_err(dd, "Failed to set default RXEQ values\n");
return ret;
}
-int ipath_sd7220_presets(struct ipath_devdata *dd)
+int qib_sd7220_presets(struct qib_devdata *dd)
{
int ret = 0;
- if (!dd->ipath_presets_needed)
+ if (!dd->cspec->presets_needed)
return ret;
- dd->ipath_presets_needed = 0;
+ dd->cspec->presets_needed = 0;
/* Assert uC reset, so we don't clash with it. */
- ipath_ibsd_reset(dd, 1);
+ qib_ibsd_reset(dd, 1);
udelay(2);
- ipath_sd_trimdone_monitor(dd, "link-down");
+ qib_sd_trimdone_monitor(dd, "link-down");
- ret = ipath_internal_presets(dd);
-return ret;
+ ret = qib_internal_presets(dd);
+ return ret;
}
-static int ipath_sd_trimself(struct ipath_devdata *dd, int val)
+static int qib_sd_trimself(struct qib_devdata *dd, int val)
{
- return ibsd_sto_noisy(dd, CMUCTRL5, val, 0xFF);
+ int loc = CMUCTRL5 | (1U << EPB_IB_QUAD0_CS_SHF);
+
+ return qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
}
-static int ipath_sd_early(struct ipath_devdata *dd)
+static int qib_sd_early(struct qib_devdata *dd)
{
- int ret = -1; /* Default failed */
- int chnl;
+ int ret;
- for (chnl = 0; chnl < 4; ++chnl) {
- ret = ibsd_sto_noisy(dd, RXHSCTRL0(chnl), 0xD4, 0xFF);
- if (ret < 0)
- goto bail;
- }
- for (chnl = 0; chnl < 4; ++chnl) {
- ret = ibsd_sto_noisy(dd, VCDL_DAC2(chnl), 0x2D, 0xFF);
- if (ret < 0)
- goto bail;
- }
- /* more fine-tuning of what will be default */
- for (chnl = 0; chnl < 4; ++chnl) {
- ret = ibsd_sto_noisy(dd, VCDL_CTRL2(chnl), 3, 0xF);
- if (ret < 0)
- goto bail;
- }
- for (chnl = 0; chnl < 4; ++chnl) {
- ret = ibsd_sto_noisy(dd, START_EQ1(chnl), 0x10, 0xFF);
- if (ret < 0)
- goto bail;
- }
- for (chnl = 0; chnl < 4; ++chnl) {
- ret = ibsd_sto_noisy(dd, START_EQ2(chnl), 0x30, 0xFF);
- if (ret < 0)
- goto bail;
- }
+ ret = ibsd_mod_allchnls(dd, RXHSCTRL0(0) | EPB_GLOBAL_WR, 0xD4, 0xFF);
+ if (ret < 0)
+ goto bail;
+ ret = ibsd_mod_allchnls(dd, START_EQ1(0) | EPB_GLOBAL_WR, 0x10, 0xFF);
+ if (ret < 0)
+ goto bail;
+ ret = ibsd_mod_allchnls(dd, START_EQ2(0) | EPB_GLOBAL_WR, 0x30, 0xFF);
bail:
return ret;
}
@@ -1302,50 +1268,53 @@ bail:
#define LDOUTCTRL1(chnl) EPB_LOC(chnl, 7, 6)
#define RXHSSTATUS(chnl) EPB_LOC(chnl, 6, 0xF)
-static int ipath_sd_dactrim(struct ipath_devdata *dd)
+static int qib_sd_dactrim(struct qib_devdata *dd)
{
- int ret = -1; /* Default failed */
- int chnl;
+ int ret;
+
+ ret = ibsd_mod_allchnls(dd, VCDL_DAC2(0) | EPB_GLOBAL_WR, 0x2D, 0xFF);
+ if (ret < 0)
+ goto bail;
+
+ /* more fine-tuning of what will be default */
+ ret = ibsd_mod_allchnls(dd, VCDL_CTRL2(0), 3, 0xF);
+ if (ret < 0)
+ goto bail;
+
+ ret = ibsd_mod_allchnls(dd, BACTRL(0) | EPB_GLOBAL_WR, 0x40, 0xFF);
+ if (ret < 0)
+ goto bail;
+
+ ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x04, 0xFF);
+ if (ret < 0)
+ goto bail;
+
+ ret = ibsd_mod_allchnls(dd, RXHSSTATUS(0) | EPB_GLOBAL_WR, 0x04, 0xFF);
+ if (ret < 0)
+ goto bail;
- for (chnl = 0; chnl < 4; ++chnl) {
- ret = ibsd_sto_noisy(dd, BACTRL(chnl), 0x40, 0xFF);
- if (ret < 0)
- goto bail;
- }
- for (chnl = 0; chnl < 4; ++chnl) {
- ret = ibsd_sto_noisy(dd, LDOUTCTRL1(chnl), 0x04, 0xFF);
- if (ret < 0)
- goto bail;
- }
- for (chnl = 0; chnl < 4; ++chnl) {
- ret = ibsd_sto_noisy(dd, RXHSSTATUS(chnl), 0x04, 0xFF);
- if (ret < 0)
- goto bail;
- }
/*
- * delay for max possible number of steps, with slop.
+ * Delay for max possible number of steps, with slop.
* Each step is about 4usec.
*/
udelay(415);
- for (chnl = 0; chnl < 4; ++chnl) {
- ret = ibsd_sto_noisy(dd, LDOUTCTRL1(chnl), 0x00, 0xFF);
- if (ret < 0)
- goto bail;
- }
+
+ ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x00, 0xFF);
+
bail:
return ret;
}
#define RELOCK_FIRST_MS 3
#define RXLSPPM(chan) EPB_LOC(chan, 0, 2)
-void ipath_toggle_rclkrls(struct ipath_devdata *dd)
+void toggle_7220_rclkrls(struct qib_devdata *dd)
{
int loc = RXLSPPM(0) | EPB_GLOBAL_WR;
int ret;
ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
if (ret < 0)
- ipath_dev_err(dd, "RCLKRLS failed to clear D7\n");
+ qib_dev_err(dd, "RCLKRLS failed to clear D7\n");
else {
udelay(1);
ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
@@ -1354,109 +1323,91 @@ void ipath_toggle_rclkrls(struct ipath_devdata *dd)
udelay(1);
ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
if (ret < 0)
- ipath_dev_err(dd, "RCLKRLS failed to clear D7\n");
+ qib_dev_err(dd, "RCLKRLS failed to clear D7\n");
else {
udelay(1);
ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
}
/* Now reset xgxs and IBC to complete the recovery */
- dd->ipath_f_xgxs_reset(dd);
+ dd->f_xgxs_reset(dd->pport);
}
/*
* Shut down the timer that polls for relock occasions, if needed
- * this is "hooked" from ipath_7220_quiet_serdes(), which is called
- * just before ipath_shutdown_device() in ipath_driver.c shuts down all
+ * this is "hooked" from qib_7220_quiet_serdes(), which is called
+ * just before qib_shutdown_device() in qib_driver.c shuts down all
* the other timers
*/
-void ipath_shutdown_relock_poll(struct ipath_devdata *dd)
+void shutdown_7220_relock_poll(struct qib_devdata *dd)
{
- struct ipath_relock *irp = &dd->ipath_relock_singleton;
- if (atomic_read(&irp->ipath_relock_timer_active)) {
- del_timer_sync(&irp->ipath_relock_timer);
- atomic_set(&irp->ipath_relock_timer_active, 0);
- }
+ if (dd->cspec->relock_timer_active)
+ del_timer_sync(&dd->cspec->relock_timer);
}
-static unsigned ipath_relock_by_timer = 1;
-module_param_named(relock_by_timer, ipath_relock_by_timer, uint,
- S_IWUSR | S_IRUGO);
+static unsigned qib_relock_by_timer = 1;
+module_param_named(relock_by_timer, qib_relock_by_timer, uint,
+ S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(relock_by_timer, "Allow relock attempt if link not up");
-static void ipath_run_relock(unsigned long opaque)
+static void qib_run_relock(unsigned long opaque)
{
- struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
- struct ipath_relock *irp = &dd->ipath_relock_singleton;
- u64 val, ltstate;
-
- if (!(dd->ipath_flags & IPATH_INITTED)) {
- /* Not yet up, just reenable the timer for later */
- irp->ipath_relock_interval = HZ;
- mod_timer(&irp->ipath_relock_timer, jiffies + HZ);
- return;
- }
+ struct qib_devdata *dd = (struct qib_devdata *)opaque;
+ struct qib_pportdata *ppd = dd->pport;
+ struct qib_chip_specific *cs = dd->cspec;
+ int timeoff;
/*
- * Check link-training state for "stuck" state.
+ * Check link-training state for "stuck" state, when down.
* if found, try relock and schedule another try at
* exponentially growing delay, maxed at one second.
* if not stuck, our work is done.
*/
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
- ltstate = ipath_ib_linktrstate(dd, val);
-
- if (ltstate <= INFINIPATH_IBCS_LT_STATE_CFGWAITRMT
- && ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) {
- int timeoff;
- /* Not up yet. Try again, if allowed by module-param */
- if (ipath_relock_by_timer) {
- if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)
- ipath_cdbg(VERBOSE, "Skip RELOCK in AUTONEG\n");
- else if (!(dd->ipath_flags & IPATH_IB_LINK_DISABLED)) {
- ipath_cdbg(VERBOSE, "RELOCK\n");
- ipath_toggle_rclkrls(dd);
- }
+ if ((dd->flags & QIB_INITTED) && !(ppd->lflags &
+ (QIBL_IB_AUTONEG_INPROG | QIBL_LINKINIT | QIBL_LINKARMED |
+ QIBL_LINKACTIVE))) {
+ if (qib_relock_by_timer) {
+ if (!(ppd->lflags & QIBL_IB_LINK_DISABLED))
+ toggle_7220_rclkrls(dd);
}
/* re-set timer for next check */
- timeoff = irp->ipath_relock_interval << 1;
+ timeoff = cs->relock_interval << 1;
if (timeoff > HZ)
timeoff = HZ;
- irp->ipath_relock_interval = timeoff;
-
- mod_timer(&irp->ipath_relock_timer, jiffies + timeoff);
- } else {
- /* Up, so no more need to check so often */
- mod_timer(&irp->ipath_relock_timer, jiffies + HZ);
- }
+ cs->relock_interval = timeoff;
+ } else
+ timeoff = HZ;
+ mod_timer(&cs->relock_timer, jiffies + timeoff);
}
-void ipath_set_relock_poll(struct ipath_devdata *dd, int ibup)
+void set_7220_relock_poll(struct qib_devdata *dd, int ibup)
{
- struct ipath_relock *irp = &dd->ipath_relock_singleton;
+ struct qib_chip_specific *cs = dd->cspec;
- if (ibup > 0) {
- /* we are now up, so relax timer to 1 second interval */
- if (atomic_read(&irp->ipath_relock_timer_active))
- mod_timer(&irp->ipath_relock_timer, jiffies + HZ);
+ if (ibup) {
+ /* We are now up, relax timer to 1 second interval */
+ if (cs->relock_timer_active) {
+ cs->relock_interval = HZ;
+ mod_timer(&cs->relock_timer, jiffies + HZ);
+ }
} else {
/* Transition to down, (re-)set timer to short interval. */
- int timeout;
- timeout = (HZ * ((ibup == -1) ? 1000 : RELOCK_FIRST_MS))/1000;
+ unsigned int timeout;
+
+ timeout = msecs_to_jiffies(RELOCK_FIRST_MS);
if (timeout == 0)
timeout = 1;
/* If timer has not yet been started, do so. */
- if (atomic_inc_return(&irp->ipath_relock_timer_active) == 1) {
- init_timer(&irp->ipath_relock_timer);
- irp->ipath_relock_timer.function = ipath_run_relock;
- irp->ipath_relock_timer.data = (unsigned long) dd;
- irp->ipath_relock_interval = timeout;
- irp->ipath_relock_timer.expires = jiffies + timeout;
- add_timer(&irp->ipath_relock_timer);
+ if (!cs->relock_timer_active) {
+ cs->relock_timer_active = 1;
+ init_timer(&cs->relock_timer);
+ cs->relock_timer.function = qib_run_relock;
+ cs->relock_timer.data = (unsigned long) dd;
+ cs->relock_interval = timeout;
+ cs->relock_timer.expires = jiffies + timeout;
+ add_timer(&cs->relock_timer);
} else {
- irp->ipath_relock_interval = timeout;
- mod_timer(&irp->ipath_relock_timer, jiffies + timeout);
- atomic_dec(&irp->ipath_relock_timer_active);
+ cs->relock_interval = timeout;
+ mod_timer(&cs->relock_timer, jiffies + timeout);
}
}
}
-
diff --git a/drivers/infiniband/hw/ipath/ipath_sd7220_img.c b/drivers/infiniband/hw/qib/qib_sd7220_img.c
index 5ef59da9270..a1118fbd237 100644
--- a/drivers/infiniband/hw/ipath/ipath_sd7220_img.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220_img.c
@@ -38,11 +38,10 @@
#include <linux/pci.h>
#include <linux/delay.h>
-#include "ipath_kernel.h"
-#include "ipath_registers.h"
-#include "ipath_7220.h"
+#include "qib.h"
+#include "qib_7220.h"
-static unsigned char ipath_sd7220_ib_img[] = {
+static unsigned char qib_sd7220_ib_img[] = {
/*0000*/0x02, 0x0A, 0x29, 0x02, 0x0A, 0x87, 0xE5, 0xE6,
0x30, 0xE6, 0x04, 0x7F, 0x01, 0x80, 0x02, 0x7F,
/*0010*/0x00, 0xE5, 0xE2, 0x30, 0xE4, 0x04, 0x7E, 0x01,
@@ -1069,14 +1068,14 @@ static unsigned char ipath_sd7220_ib_img[] = {
0x01, 0x20, 0x11, 0x00, 0x04, 0x20, 0x00, 0x81
};
-int ipath_sd7220_ib_load(struct ipath_devdata *dd)
+int qib_sd7220_ib_load(struct qib_devdata *dd)
{
- return ipath_sd7220_prog_ld(dd, IB_7220_SERDES, ipath_sd7220_ib_img,
- sizeof(ipath_sd7220_ib_img), 0);
+ return qib_sd7220_prog_ld(dd, IB_7220_SERDES, qib_sd7220_ib_img,
+ sizeof(qib_sd7220_ib_img), 0);
}
-int ipath_sd7220_ib_vfy(struct ipath_devdata *dd)
+int qib_sd7220_ib_vfy(struct qib_devdata *dd)
{
- return ipath_sd7220_prog_vfy(dd, IB_7220_SERDES, ipath_sd7220_ib_img,
- sizeof(ipath_sd7220_ib_img), 0);
+ return qib_sd7220_prog_vfy(dd, IB_7220_SERDES, qib_sd7220_ib_img,
+ sizeof(qib_sd7220_ib_img), 0);
}
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
new file mode 100644
index 00000000000..b8456881f7f
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -0,0 +1,973 @@
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+
+#include "qib.h"
+#include "qib_common.h"
+
+/* default pio off, sdma on */
+static ushort sdma_descq_cnt = 256;
+module_param_named(sdma_descq_cnt, sdma_descq_cnt, ushort, S_IRUGO);
+MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
+
+/*
+ * Bits defined in the send DMA descriptor.
+ */
+#define SDMA_DESC_LAST (1ULL << 11)
+#define SDMA_DESC_FIRST (1ULL << 12)
+#define SDMA_DESC_DMA_HEAD (1ULL << 13)
+#define SDMA_DESC_USE_LARGE_BUF (1ULL << 14)
+#define SDMA_DESC_INTR (1ULL << 15)
+#define SDMA_DESC_COUNT_LSB 16
+#define SDMA_DESC_GEN_LSB 30
+
+char *qib_sdma_state_names[] = {
+ [qib_sdma_state_s00_hw_down] = "s00_HwDown",
+ [qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait",
+ [qib_sdma_state_s20_idle] = "s20_Idle",
+ [qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
+ [qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
+ [qib_sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",
+ [qib_sdma_state_s99_running] = "s99_Running",
+};
+
+char *qib_sdma_event_names[] = {
+ [qib_sdma_event_e00_go_hw_down] = "e00_GoHwDown",
+ [qib_sdma_event_e10_go_hw_start] = "e10_GoHwStart",
+ [qib_sdma_event_e20_hw_started] = "e20_HwStarted",
+ [qib_sdma_event_e30_go_running] = "e30_GoRunning",
+ [qib_sdma_event_e40_sw_cleaned] = "e40_SwCleaned",
+ [qib_sdma_event_e50_hw_cleaned] = "e50_HwCleaned",
+ [qib_sdma_event_e60_hw_halted] = "e60_HwHalted",
+ [qib_sdma_event_e70_go_idle] = "e70_GoIdle",
+ [qib_sdma_event_e7220_err_halted] = "e7220_ErrHalted",
+ [qib_sdma_event_e7322_err_halted] = "e7322_ErrHalted",
+ [qib_sdma_event_e90_timer_tick] = "e90_TimerTick",
+};
+
+/* declare all statics here rather than keep sorting */
+static int alloc_sdma(struct qib_pportdata *);
+static void sdma_complete(struct kref *);
+static void sdma_finalput(struct qib_sdma_state *);
+static void sdma_get(struct qib_sdma_state *);
+static void sdma_put(struct qib_sdma_state *);
+static void sdma_set_state(struct qib_pportdata *, enum qib_sdma_states);
+static void sdma_start_sw_clean_up(struct qib_pportdata *);
+static void sdma_sw_clean_up_task(unsigned long);
+static void unmap_desc(struct qib_pportdata *, unsigned);
+
+static void sdma_get(struct qib_sdma_state *ss)
+{
+ kref_get(&ss->kref);
+}
+
+static void sdma_complete(struct kref *kref)
+{
+ struct qib_sdma_state *ss =
+ container_of(kref, struct qib_sdma_state, kref);
+
+ complete(&ss->comp);
+}
+
+static void sdma_put(struct qib_sdma_state *ss)
+{
+ kref_put(&ss->kref, sdma_complete);
+}
+
+static void sdma_finalput(struct qib_sdma_state *ss)
+{
+ sdma_put(ss);
+ wait_for_completion(&ss->comp);
+}
+
+/*
+ * Complete all the sdma requests on the active list, in the correct
+ * order, and with appropriate processing. Called when cleaning up
+ * after sdma shutdown, and when new sdma requests are submitted for
+ * a link that is down. This matches what is done for requests
+ * that complete normally, it's just the full list.
+ *
+ * Must be called with sdma_lock held
+ */
+static void clear_sdma_activelist(struct qib_pportdata *ppd)
+{
+ struct qib_sdma_txreq *txp, *txp_next;
+
+ list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) {
+ list_del_init(&txp->list);
+ if (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) {
+ unsigned idx;
+
+ idx = txp->start_idx;
+ while (idx != txp->next_descq_idx) {
+ unmap_desc(ppd, idx);
+ if (++idx == ppd->sdma_descq_cnt)
+ idx = 0;
+ }
+ }
+ if (txp->callback)
+ (*txp->callback)(txp, QIB_SDMA_TXREQ_S_ABORTED);
+ }
+}
+
+static void sdma_sw_clean_up_task(unsigned long opaque)
+{
+ struct qib_pportdata *ppd = (struct qib_pportdata *) opaque;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+ /*
+ * At this point, the following should always be true:
+ * - We are halted, so no more descriptors are getting retired.
+ * - We are not running, so no one is submitting new work.
+ * - Only we can send the e40_sw_cleaned, so we can't start
+ * running again until we say so. So, the active list and
+ * descq are ours to play with.
+ */
+
+ /* Process all retired requests. */
+ qib_sdma_make_progress(ppd);
+
+ clear_sdma_activelist(ppd);
+
+ /*
+ * Resync count of added and removed. It is VERY important that
+ * sdma_descq_removed NEVER decrement - user_sdma depends on it.
+ */
+ ppd->sdma_descq_removed = ppd->sdma_descq_added;
+
+ /*
+ * Reset our notion of head and tail.
+ * Note that the HW registers will be reset when switching states
+ * due to calling __qib_sdma_process_event() below.
+ */
+ ppd->sdma_descq_tail = 0;
+ ppd->sdma_descq_head = 0;
+ ppd->sdma_head_dma[0] = 0;
+ ppd->sdma_generation = 0;
+
+ __qib_sdma_process_event(ppd, qib_sdma_event_e40_sw_cleaned);
+
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+}
+
+/*
+ * This is called when changing to state qib_sdma_state_s10_hw_start_up_wait
+ * as a result of send buffer errors or send DMA descriptor errors.
+ * We want to disarm the buffers in these cases.
+ */
+static void sdma_hw_start_up(struct qib_pportdata *ppd)
+{
+ struct qib_sdma_state *ss = &ppd->sdma_state;
+ unsigned bufno;
+
+ for (bufno = ss->first_sendbuf; bufno < ss->last_sendbuf; ++bufno)
+ ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno));
+
+ ppd->dd->f_sdma_hw_start_up(ppd);
+}
+
+static void sdma_sw_tear_down(struct qib_pportdata *ppd)
+{
+ struct qib_sdma_state *ss = &ppd->sdma_state;
+
+ /* Releasing this reference means the state machine has stopped. */
+ sdma_put(ss);
+}
+
+static void sdma_start_sw_clean_up(struct qib_pportdata *ppd)
+{
+ tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task);
+}
+
+static void sdma_set_state(struct qib_pportdata *ppd,
+ enum qib_sdma_states next_state)
+{
+ struct qib_sdma_state *ss = &ppd->sdma_state;
+ struct sdma_set_state_action *action = ss->set_state_action;
+ unsigned op = 0;
+
+ /* debugging bookkeeping */
+ ss->previous_state = ss->current_state;
+ ss->previous_op = ss->current_op;
+
+ ss->current_state = next_state;
+
+ if (action[next_state].op_enable)
+ op |= QIB_SDMA_SENDCTRL_OP_ENABLE;
+
+ if (action[next_state].op_intenable)
+ op |= QIB_SDMA_SENDCTRL_OP_INTENABLE;
+
+ if (action[next_state].op_halt)
+ op |= QIB_SDMA_SENDCTRL_OP_HALT;
+
+ if (action[next_state].op_drain)
+ op |= QIB_SDMA_SENDCTRL_OP_DRAIN;
+
+ if (action[next_state].go_s99_running_tofalse)
+ ss->go_s99_running = 0;
+
+ if (action[next_state].go_s99_running_totrue)
+ ss->go_s99_running = 1;
+
+ ss->current_op = op;
+
+ ppd->dd->f_sdma_sendctrl(ppd, ss->current_op);
+}
+
+static void unmap_desc(struct qib_pportdata *ppd, unsigned head)
+{
+ __le64 *descqp = &ppd->sdma_descq[head].qw[0];
+ u64 desc[2];
+ dma_addr_t addr;
+ size_t len;
+
+ desc[0] = le64_to_cpu(descqp[0]);
+ desc[1] = le64_to_cpu(descqp[1]);
+
+ addr = (desc[1] << 32) | (desc[0] >> 32);
+ len = (desc[0] >> 14) & (0x7ffULL << 2);
+ dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
+}
+
+static int alloc_sdma(struct qib_pportdata *ppd)
+{
+ ppd->sdma_descq_cnt = sdma_descq_cnt;
+ if (!ppd->sdma_descq_cnt)
+ ppd->sdma_descq_cnt = 256;
+
+ /* Allocate memory for SendDMA descriptor FIFO */
+ ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev,
+ ppd->sdma_descq_cnt * sizeof(u64[2]), &ppd->sdma_descq_phys,
+ GFP_KERNEL);
+
+ if (!ppd->sdma_descq) {
+ qib_dev_err(ppd->dd, "failed to allocate SendDMA descriptor "
+ "FIFO memory\n");
+ goto bail;
+ }
+
+ /* Allocate memory for DMA of head register to memory */
+ ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev,
+ PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL);
+ if (!ppd->sdma_head_dma) {
+ qib_dev_err(ppd->dd, "failed to allocate SendDMA "
+ "head memory\n");
+ goto cleanup_descq;
+ }
+ ppd->sdma_head_dma[0] = 0;
+ return 0;
+
+cleanup_descq:
+ dma_free_coherent(&ppd->dd->pcidev->dev,
+ ppd->sdma_descq_cnt * sizeof(u64[2]), (void *)ppd->sdma_descq,
+ ppd->sdma_descq_phys);
+ ppd->sdma_descq = NULL;
+ ppd->sdma_descq_phys = 0;
+bail:
+ ppd->sdma_descq_cnt = 0;
+ return -ENOMEM;
+}
+
+static void free_sdma(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+
+ if (ppd->sdma_head_dma) {
+ dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
+ (void *)ppd->sdma_head_dma,
+ ppd->sdma_head_phys);
+ ppd->sdma_head_dma = NULL;
+ ppd->sdma_head_phys = 0;
+ }
+
+ if (ppd->sdma_descq) {
+ dma_free_coherent(&dd->pcidev->dev,
+ ppd->sdma_descq_cnt * sizeof(u64[2]),
+ ppd->sdma_descq, ppd->sdma_descq_phys);
+ ppd->sdma_descq = NULL;
+ ppd->sdma_descq_phys = 0;
+ }
+}
+
+static inline void make_sdma_desc(struct qib_pportdata *ppd,
+ u64 *sdmadesc, u64 addr, u64 dwlen,
+ u64 dwoffset)
+{
+
+ WARN_ON(addr & 3);
+ /* SDmaPhyAddr[47:32] */
+ sdmadesc[1] = addr >> 32;
+ /* SDmaPhyAddr[31:0] */
+ sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
+ /* SDmaGeneration[1:0] */
+ sdmadesc[0] |= (ppd->sdma_generation & 3ULL) <<
+ SDMA_DESC_GEN_LSB;
+ /* SDmaDwordCount[10:0] */
+ sdmadesc[0] |= (dwlen & 0x7ffULL) << SDMA_DESC_COUNT_LSB;
+ /* SDmaBufOffset[12:2] */
+ sdmadesc[0] |= dwoffset & 0x7ffULL;
+}
+
+/* sdma_lock must be held */
+int qib_sdma_make_progress(struct qib_pportdata *ppd)
+{
+ struct list_head *lp = NULL;
+ struct qib_sdma_txreq *txp = NULL;
+ struct qib_devdata *dd = ppd->dd;
+ int progress = 0;
+ u16 hwhead;
+ u16 idx = 0;
+
+ hwhead = dd->f_sdma_gethead(ppd);
+
+ /* The reason for some of the complexity of this code is that
+ * not all descriptors have corresponding txps. So, we have to
+ * be able to skip over descs until we wander into the range of
+ * the next txp on the list.
+ */
+
+ if (!list_empty(&ppd->sdma_activelist)) {
+ lp = ppd->sdma_activelist.next;
+ txp = list_entry(lp, struct qib_sdma_txreq, list);
+ idx = txp->start_idx;
+ }
+
+ while (ppd->sdma_descq_head != hwhead) {
+ /* if desc is part of this txp, unmap if needed */
+ if (txp && (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) &&
+ (idx == ppd->sdma_descq_head)) {
+ unmap_desc(ppd, ppd->sdma_descq_head);
+ if (++idx == ppd->sdma_descq_cnt)
+ idx = 0;
+ }
+
+ /* increment dequed desc count */
+ ppd->sdma_descq_removed++;
+
+ /* advance head, wrap if needed */
+ if (++ppd->sdma_descq_head == ppd->sdma_descq_cnt)
+ ppd->sdma_descq_head = 0;
+
+ /* if now past this txp's descs, do the callback */
+ if (txp && txp->next_descq_idx == ppd->sdma_descq_head) {
+ /* remove from active list */
+ list_del_init(&txp->list);
+ if (txp->callback)
+ (*txp->callback)(txp, QIB_SDMA_TXREQ_S_OK);
+ /* see if there is another txp */
+ if (list_empty(&ppd->sdma_activelist))
+ txp = NULL;
+ else {
+ lp = ppd->sdma_activelist.next;
+ txp = list_entry(lp, struct qib_sdma_txreq,
+ list);
+ idx = txp->start_idx;
+ }
+ }
+ progress = 1;
+ }
+ if (progress)
+ qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
+ return progress;
+}
+
+/*
+ * This is called from interrupt context.
+ */
+void qib_sdma_intr(struct qib_pportdata *ppd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+ __qib_sdma_intr(ppd);
+
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+}
+
+void __qib_sdma_intr(struct qib_pportdata *ppd)
+{
+ if (__qib_sdma_running(ppd))
+ qib_sdma_make_progress(ppd);
+}
+
+int qib_setup_sdma(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ unsigned long flags;
+ int ret = 0;
+
+ ret = alloc_sdma(ppd);
+ if (ret)
+ goto bail;
+
+ /* set consistent sdma state */
+ ppd->dd->f_sdma_init_early(ppd);
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+ sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+
+ /* set up reference counting */
+ kref_init(&ppd->sdma_state.kref);
+ init_completion(&ppd->sdma_state.comp);
+
+ ppd->sdma_generation = 0;
+ ppd->sdma_descq_head = 0;
+ ppd->sdma_descq_removed = 0;
+ ppd->sdma_descq_added = 0;
+
+ INIT_LIST_HEAD(&ppd->sdma_activelist);
+
+ tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
+ (unsigned long)ppd);
+
+ ret = dd->f_init_sdma_regs(ppd);
+ if (ret)
+ goto bail_alloc;
+
+ qib_sdma_process_event(ppd, qib_sdma_event_e10_go_hw_start);
+
+ return 0;
+
+bail_alloc:
+ qib_teardown_sdma(ppd);
+bail:
+ return ret;
+}
+
+void qib_teardown_sdma(struct qib_pportdata *ppd)
+{
+ qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
+
+ /*
+ * This waits for the state machine to exit so it is not
+ * necessary to kill the sdma_sw_clean_up_task to make sure
+ * it is not running.
+ */
+ sdma_finalput(&ppd->sdma_state);
+
+ free_sdma(ppd);
+}
+
+int qib_sdma_running(struct qib_pportdata *ppd)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+ ret = __qib_sdma_running(ppd);
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+
+ return ret;
+}
+
+/*
+ * Complete a request when sdma not running; likely only request
+ * but to simplify the code, always queue it, then process the full
+ * activelist. We process the entire list to ensure that this particular
+ * request does get it's callback, but in the correct order.
+ * Must be called with sdma_lock held
+ */
+static void complete_sdma_err_req(struct qib_pportdata *ppd,
+ struct qib_verbs_txreq *tx)
+{
+ atomic_inc(&tx->qp->s_dma_busy);
+ /* no sdma descriptors, so no unmap_desc */
+ tx->txreq.start_idx = 0;
+ tx->txreq.next_descq_idx = 0;
+ list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
+ clear_sdma_activelist(ppd);
+}
+
+/*
+ * This function queues one IB packet onto the send DMA queue per call.
+ * The caller is responsible for checking:
+ * 1) The number of send DMA descriptor entries is less than the size of
+ * the descriptor queue.
+ * 2) The IB SGE addresses and lengths are 32-bit aligned
+ * (except possibly the last SGE's length)
+ * 3) The SGE addresses are suitable for passing to dma_map_single().
+ */
+int qib_sdma_verbs_send(struct qib_pportdata *ppd,
+ struct qib_sge_state *ss, u32 dwords,
+ struct qib_verbs_txreq *tx)
+{
+ unsigned long flags;
+ struct qib_sge *sge;
+ struct qib_qp *qp;
+ int ret = 0;
+ u16 tail;
+ __le64 *descqp;
+ u64 sdmadesc[2];
+ u32 dwoffset;
+ dma_addr_t addr;
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+retry:
+ if (unlikely(!__qib_sdma_running(ppd))) {
+ complete_sdma_err_req(ppd, tx);
+ goto unlock;
+ }
+
+ if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) {
+ if (qib_sdma_make_progress(ppd))
+ goto retry;
+ if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT)
+ ppd->dd->f_sdma_set_desc_cnt(ppd,
+ ppd->sdma_descq_cnt / 2);
+ goto busy;
+ }
+
+ dwoffset = tx->hdr_dwords;
+ make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0);
+
+ sdmadesc[0] |= SDMA_DESC_FIRST;
+ if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
+ sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
+
+ /* write to the descq */
+ tail = ppd->sdma_descq_tail;
+ descqp = &ppd->sdma_descq[tail].qw[0];
+ *descqp++ = cpu_to_le64(sdmadesc[0]);
+ *descqp++ = cpu_to_le64(sdmadesc[1]);
+
+ /* increment the tail */
+ if (++tail == ppd->sdma_descq_cnt) {
+ tail = 0;
+ descqp = &ppd->sdma_descq[0].qw[0];
+ ++ppd->sdma_generation;
+ }
+
+ tx->txreq.start_idx = tail;
+
+ sge = &ss->sge;
+ while (dwords) {
+ u32 dw;
+ u32 len;
+
+ len = dwords << 2;
+ if (len > sge->length)
+ len = sge->length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
+ BUG_ON(len == 0);
+ dw = (len + 3) >> 2;
+ addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr,
+ dw << 2, DMA_TO_DEVICE);
+ if (dma_mapping_error(&ppd->dd->pcidev->dev, addr))
+ goto unmap;
+ sdmadesc[0] = 0;
+ make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset);
+ /* SDmaUseLargeBuf has to be set in every descriptor */
+ if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
+ sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
+ /* write to the descq */
+ *descqp++ = cpu_to_le64(sdmadesc[0]);
+ *descqp++ = cpu_to_le64(sdmadesc[1]);
+
+ /* increment the tail */
+ if (++tail == ppd->sdma_descq_cnt) {
+ tail = 0;
+ descqp = &ppd->sdma_descq[0].qw[0];
+ ++ppd->sdma_generation;
+ }
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (--ss->num_sge)
+ *sge = *ss->sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+
+ dwoffset += dw;
+ dwords -= dw;
+ }
+
+ if (!tail)
+ descqp = &ppd->sdma_descq[ppd->sdma_descq_cnt].qw[0];
+ descqp -= 2;
+ descqp[0] |= cpu_to_le64(SDMA_DESC_LAST);
+ if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST)
+ descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);
+ if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
+ descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);
+
+ atomic_inc(&tx->qp->s_dma_busy);
+ tx->txreq.next_descq_idx = tail;
+ ppd->dd->f_sdma_update_tail(ppd, tail);
+ ppd->sdma_descq_added += tx->txreq.sg_count;
+ list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
+ goto unlock;
+
+unmap:
+ for (;;) {
+ if (!tail)
+ tail = ppd->sdma_descq_cnt - 1;
+ else
+ tail--;
+ if (tail == ppd->sdma_descq_tail)
+ break;
+ unmap_desc(ppd, tail);
+ }
+ qp = tx->qp;
+ qib_put_txreq(tx);
+ spin_lock(&qp->s_lock);
+ if (qp->ibqp.qp_type == IB_QPT_RC) {
+ /* XXX what about error sending RDMA read responses? */
+ if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)
+ qib_error_qp(qp, IB_WC_GENERAL_ERR);
+ } else if (qp->s_wqe)
+ qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
+ spin_unlock(&qp->s_lock);
+ /* return zero to process the next send work request */
+ goto unlock;
+
+busy:
+ qp = tx->qp;
+ spin_lock(&qp->s_lock);
+ if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
+ struct qib_ibdev *dev;
+
+ /*
+ * If we couldn't queue the DMA request, save the info
+ * and try again later rather than destroying the
+ * buffer and undoing the side effects of the copy.
+ */
+ tx->ss = ss;
+ tx->dwords = dwords;
+ qp->s_tx = tx;
+ dev = &ppd->dd->verbs_dev;
+ spin_lock(&dev->pending_lock);
+ if (list_empty(&qp->iowait)) {
+ struct qib_ibport *ibp;
+
+ ibp = &ppd->ibport_data;
+ ibp->n_dmawait++;
+ qp->s_flags |= QIB_S_WAIT_DMA_DESC;
+ list_add_tail(&qp->iowait, &dev->dmawait);
+ }
+ spin_unlock(&dev->pending_lock);
+ qp->s_flags &= ~QIB_S_BUSY;
+ spin_unlock(&qp->s_lock);
+ ret = -EBUSY;
+ } else {
+ spin_unlock(&qp->s_lock);
+ qib_put_txreq(tx);
+ }
+unlock:
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+ return ret;
+}
+
+void qib_sdma_process_event(struct qib_pportdata *ppd,
+ enum qib_sdma_events event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+ __qib_sdma_process_event(ppd, event);
+
+ if (ppd->sdma_state.current_state == qib_sdma_state_s99_running)
+ qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
+
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+}
+
+void __qib_sdma_process_event(struct qib_pportdata *ppd,
+ enum qib_sdma_events event)
+{
+ struct qib_sdma_state *ss = &ppd->sdma_state;
+
+ switch (ss->current_state) {
+ case qib_sdma_state_s00_hw_down:
+ switch (event) {
+ case qib_sdma_event_e00_go_hw_down:
+ break;
+ case qib_sdma_event_e30_go_running:
+ /*
+ * If down, but running requested (usually result
+ * of link up, then we need to start up.
+ * This can happen when hw down is requested while
+ * bringing the link up with traffic active on
+ * 7220, e.g. */
+ ss->go_s99_running = 1;
+ /* fall through and start dma engine */
+ case qib_sdma_event_e10_go_hw_start:
+ /* This reference means the state machine is started */
+ sdma_get(&ppd->sdma_state);
+ sdma_set_state(ppd,
+ qib_sdma_state_s10_hw_start_up_wait);
+ break;
+ case qib_sdma_event_e20_hw_started:
+ break;
+ case qib_sdma_event_e40_sw_cleaned:
+ sdma_sw_tear_down(ppd);
+ break;
+ case qib_sdma_event_e50_hw_cleaned:
+ break;
+ case qib_sdma_event_e60_hw_halted:
+ break;
+ case qib_sdma_event_e70_go_idle:
+ break;
+ case qib_sdma_event_e7220_err_halted:
+ break;
+ case qib_sdma_event_e7322_err_halted:
+ break;
+ case qib_sdma_event_e90_timer_tick:
+ break;
+ }
+ break;
+
+ case qib_sdma_state_s10_hw_start_up_wait:
+ switch (event) {
+ case qib_sdma_event_e00_go_hw_down:
+ sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
+ sdma_sw_tear_down(ppd);
+ break;
+ case qib_sdma_event_e10_go_hw_start:
+ break;
+ case qib_sdma_event_e20_hw_started:
+ sdma_set_state(ppd, ss->go_s99_running ?
+ qib_sdma_state_s99_running :
+ qib_sdma_state_s20_idle);
+ break;
+ case qib_sdma_event_e30_go_running:
+ ss->go_s99_running = 1;
+ break;
+ case qib_sdma_event_e40_sw_cleaned:
+ break;
+ case qib_sdma_event_e50_hw_cleaned:
+ break;
+ case qib_sdma_event_e60_hw_halted:
+ break;
+ case qib_sdma_event_e70_go_idle:
+ ss->go_s99_running = 0;
+ break;
+ case qib_sdma_event_e7220_err_halted:
+ break;
+ case qib_sdma_event_e7322_err_halted:
+ break;
+ case qib_sdma_event_e90_timer_tick:
+ break;
+ }
+ break;
+
+ case qib_sdma_state_s20_idle:
+ switch (event) {
+ case qib_sdma_event_e00_go_hw_down:
+ sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
+ sdma_sw_tear_down(ppd);
+ break;
+ case qib_sdma_event_e10_go_hw_start:
+ break;
+ case qib_sdma_event_e20_hw_started:
+ break;
+ case qib_sdma_event_e30_go_running:
+ sdma_set_state(ppd, qib_sdma_state_s99_running);
+ ss->go_s99_running = 1;
+ break;
+ case qib_sdma_event_e40_sw_cleaned:
+ break;
+ case qib_sdma_event_e50_hw_cleaned:
+ break;
+ case qib_sdma_event_e60_hw_halted:
+ break;
+ case qib_sdma_event_e70_go_idle:
+ break;
+ case qib_sdma_event_e7220_err_halted:
+ break;
+ case qib_sdma_event_e7322_err_halted:
+ break;
+ case qib_sdma_event_e90_timer_tick:
+ break;
+ }
+ break;
+
+ case qib_sdma_state_s30_sw_clean_up_wait:
+ switch (event) {
+ case qib_sdma_event_e00_go_hw_down:
+ sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
+ break;
+ case qib_sdma_event_e10_go_hw_start:
+ break;
+ case qib_sdma_event_e20_hw_started:
+ break;
+ case qib_sdma_event_e30_go_running:
+ ss->go_s99_running = 1;
+ break;
+ case qib_sdma_event_e40_sw_cleaned:
+ sdma_set_state(ppd,
+ qib_sdma_state_s10_hw_start_up_wait);
+ sdma_hw_start_up(ppd);
+ break;
+ case qib_sdma_event_e50_hw_cleaned:
+ break;
+ case qib_sdma_event_e60_hw_halted:
+ break;
+ case qib_sdma_event_e70_go_idle:
+ ss->go_s99_running = 0;
+ break;
+ case qib_sdma_event_e7220_err_halted:
+ break;
+ case qib_sdma_event_e7322_err_halted:
+ break;
+ case qib_sdma_event_e90_timer_tick:
+ break;
+ }
+ break;
+
+ case qib_sdma_state_s40_hw_clean_up_wait:
+ switch (event) {
+ case qib_sdma_event_e00_go_hw_down:
+ sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
+ sdma_start_sw_clean_up(ppd);
+ break;
+ case qib_sdma_event_e10_go_hw_start:
+ break;
+ case qib_sdma_event_e20_hw_started:
+ break;
+ case qib_sdma_event_e30_go_running:
+ ss->go_s99_running = 1;
+ break;
+ case qib_sdma_event_e40_sw_cleaned:
+ break;
+ case qib_sdma_event_e50_hw_cleaned:
+ sdma_set_state(ppd,
+ qib_sdma_state_s30_sw_clean_up_wait);
+ sdma_start_sw_clean_up(ppd);
+ break;
+ case qib_sdma_event_e60_hw_halted:
+ break;
+ case qib_sdma_event_e70_go_idle:
+ ss->go_s99_running = 0;
+ break;
+ case qib_sdma_event_e7220_err_halted:
+ break;
+ case qib_sdma_event_e7322_err_halted:
+ break;
+ case qib_sdma_event_e90_timer_tick:
+ break;
+ }
+ break;
+
+ case qib_sdma_state_s50_hw_halt_wait:
+ switch (event) {
+ case qib_sdma_event_e00_go_hw_down:
+ sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
+ sdma_start_sw_clean_up(ppd);
+ break;
+ case qib_sdma_event_e10_go_hw_start:
+ break;
+ case qib_sdma_event_e20_hw_started:
+ break;
+ case qib_sdma_event_e30_go_running:
+ ss->go_s99_running = 1;
+ break;
+ case qib_sdma_event_e40_sw_cleaned:
+ break;
+ case qib_sdma_event_e50_hw_cleaned:
+ break;
+ case qib_sdma_event_e60_hw_halted:
+ sdma_set_state(ppd,
+ qib_sdma_state_s40_hw_clean_up_wait);
+ ppd->dd->f_sdma_hw_clean_up(ppd);
+ break;
+ case qib_sdma_event_e70_go_idle:
+ ss->go_s99_running = 0;
+ break;
+ case qib_sdma_event_e7220_err_halted:
+ break;
+ case qib_sdma_event_e7322_err_halted:
+ break;
+ case qib_sdma_event_e90_timer_tick:
+ break;
+ }
+ break;
+
+ case qib_sdma_state_s99_running:
+ switch (event) {
+ case qib_sdma_event_e00_go_hw_down:
+ sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
+ sdma_start_sw_clean_up(ppd);
+ break;
+ case qib_sdma_event_e10_go_hw_start:
+ break;
+ case qib_sdma_event_e20_hw_started:
+ break;
+ case qib_sdma_event_e30_go_running:
+ break;
+ case qib_sdma_event_e40_sw_cleaned:
+ break;
+ case qib_sdma_event_e50_hw_cleaned:
+ break;
+ case qib_sdma_event_e60_hw_halted:
+ sdma_set_state(ppd,
+ qib_sdma_state_s30_sw_clean_up_wait);
+ sdma_start_sw_clean_up(ppd);
+ break;
+ case qib_sdma_event_e70_go_idle:
+ sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
+ ss->go_s99_running = 0;
+ break;
+ case qib_sdma_event_e7220_err_halted:
+ sdma_set_state(ppd,
+ qib_sdma_state_s30_sw_clean_up_wait);
+ sdma_start_sw_clean_up(ppd);
+ break;
+ case qib_sdma_event_e7322_err_halted:
+ sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
+ break;
+ case qib_sdma_event_e90_timer_tick:
+ break;
+ }
+ break;
+ }
+
+ ss->last_event = event;
+}
diff --git a/drivers/infiniband/hw/qib/qib_srq.c b/drivers/infiniband/hw/qib/qib_srq.c
new file mode 100644
index 00000000000..c3ec8efc2ed
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_srq.c
@@ -0,0 +1,375 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "qib_verbs.h"
+
+/**
+ * qib_post_srq_receive - post a receive on a shared receive queue
+ * @ibsrq: the SRQ to post the receive on
+ * @wr: the list of work requests to post
+ * @bad_wr: A pointer to the first WR to cause a problem is put here
+ *
+ * This may be called from interrupt context.
+ */
+int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct qib_srq *srq = to_isrq(ibsrq);
+ struct qib_rwq *wq;
+ unsigned long flags;
+ int ret;
+
+ for (; wr; wr = wr->next) {
+ struct qib_rwqe *wqe;
+ u32 next;
+ int i;
+
+ if ((unsigned) wr->num_sge > srq->rq.max_sge) {
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ spin_lock_irqsave(&srq->rq.lock, flags);
+ wq = srq->rq.wq;
+ next = wq->head + 1;
+ if (next >= srq->rq.size)
+ next = 0;
+ if (next == wq->tail) {
+ spin_unlock_irqrestore(&srq->rq.lock, flags);
+ *bad_wr = wr;
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ wqe = get_rwqe_ptr(&srq->rq, wq->head);
+ wqe->wr_id = wr->wr_id;
+ wqe->num_sge = wr->num_sge;
+ for (i = 0; i < wr->num_sge; i++)
+ wqe->sg_list[i] = wr->sg_list[i];
+ /* Make sure queue entry is written before the head index. */
+ smp_wmb();
+ wq->head = next;
+ spin_unlock_irqrestore(&srq->rq.lock, flags);
+ }
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_create_srq - create a shared receive queue
+ * @ibpd: the protection domain of the SRQ to create
+ * @srq_init_attr: the attributes of the SRQ
+ * @udata: data from libibverbs when creating a user SRQ
+ */
+struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata)
+{
+ struct qib_ibdev *dev = to_idev(ibpd->device);
+ struct qib_srq *srq;
+ u32 sz;
+ struct ib_srq *ret;
+
+ if (srq_init_attr->attr.max_sge == 0 ||
+ srq_init_attr->attr.max_sge > ib_qib_max_srq_sges ||
+ srq_init_attr->attr.max_wr == 0 ||
+ srq_init_attr->attr.max_wr > ib_qib_max_srq_wrs) {
+ ret = ERR_PTR(-EINVAL);
+ goto done;
+ }
+
+ srq = kmalloc(sizeof(*srq), GFP_KERNEL);
+ if (!srq) {
+ ret = ERR_PTR(-ENOMEM);
+ goto done;
+ }
+
+ /*
+ * Need to use vmalloc() if we want to support large #s of entries.
+ */
+ srq->rq.size = srq_init_attr->attr.max_wr + 1;
+ srq->rq.max_sge = srq_init_attr->attr.max_sge;
+ sz = sizeof(struct ib_sge) * srq->rq.max_sge +
+ sizeof(struct qib_rwqe);
+ srq->rq.wq = vmalloc_user(sizeof(struct qib_rwq) + srq->rq.size * sz);
+ if (!srq->rq.wq) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_srq;
+ }
+
+ /*
+ * Return the address of the RWQ as the offset to mmap.
+ * See qib_mmap() for details.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ int err;
+ u32 s = sizeof(struct qib_rwq) + srq->rq.size * sz;
+
+ srq->ip =
+ qib_create_mmap_info(dev, s, ibpd->uobject->context,
+ srq->rq.wq);
+ if (!srq->ip) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_wq;
+ }
+
+ err = ib_copy_to_udata(udata, &srq->ip->offset,
+ sizeof(srq->ip->offset));
+ if (err) {
+ ret = ERR_PTR(err);
+ goto bail_ip;
+ }
+ } else
+ srq->ip = NULL;
+
+ /*
+ * ib_create_srq() will initialize srq->ibsrq.
+ */
+ spin_lock_init(&srq->rq.lock);
+ srq->rq.wq->head = 0;
+ srq->rq.wq->tail = 0;
+ srq->limit = srq_init_attr->attr.srq_limit;
+
+ spin_lock(&dev->n_srqs_lock);
+ if (dev->n_srqs_allocated == ib_qib_max_srqs) {
+ spin_unlock(&dev->n_srqs_lock);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_ip;
+ }
+
+ dev->n_srqs_allocated++;
+ spin_unlock(&dev->n_srqs_lock);
+
+ if (srq->ip) {
+ spin_lock_irq(&dev->pending_lock);
+ list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+ }
+
+ ret = &srq->ibsrq;
+ goto done;
+
+bail_ip:
+ kfree(srq->ip);
+bail_wq:
+ vfree(srq->rq.wq);
+bail_srq:
+ kfree(srq);
+done:
+ return ret;
+}
+
+/**
+ * qib_modify_srq - modify a shared receive queue
+ * @ibsrq: the SRQ to modify
+ * @attr: the new attributes of the SRQ
+ * @attr_mask: indicates which attributes to modify
+ * @udata: user data for libibverbs.so
+ */
+int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask,
+ struct ib_udata *udata)
+{
+ struct qib_srq *srq = to_isrq(ibsrq);
+ struct qib_rwq *wq;
+ int ret = 0;
+
+ if (attr_mask & IB_SRQ_MAX_WR) {
+ struct qib_rwq *owq;
+ struct qib_rwqe *p;
+ u32 sz, size, n, head, tail;
+
+ /* Check that the requested sizes are below the limits. */
+ if ((attr->max_wr > ib_qib_max_srq_wrs) ||
+ ((attr_mask & IB_SRQ_LIMIT) ?
+ attr->srq_limit : srq->limit) > attr->max_wr) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ sz = sizeof(struct qib_rwqe) +
+ srq->rq.max_sge * sizeof(struct ib_sge);
+ size = attr->max_wr + 1;
+ wq = vmalloc_user(sizeof(struct qib_rwq) + size * sz);
+ if (!wq) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ /* Check that we can write the offset to mmap. */
+ if (udata && udata->inlen >= sizeof(__u64)) {
+ __u64 offset_addr;
+ __u64 offset = 0;
+
+ ret = ib_copy_from_udata(&offset_addr, udata,
+ sizeof(offset_addr));
+ if (ret)
+ goto bail_free;
+ udata->outbuf =
+ (void __user *) (unsigned long) offset_addr;
+ ret = ib_copy_to_udata(udata, &offset,
+ sizeof(offset));
+ if (ret)
+ goto bail_free;
+ }
+
+ spin_lock_irq(&srq->rq.lock);
+ /*
+ * validate head and tail pointer values and compute
+ * the number of remaining WQEs.
+ */
+ owq = srq->rq.wq;
+ head = owq->head;
+ tail = owq->tail;
+ if (head >= srq->rq.size || tail >= srq->rq.size) {
+ ret = -EINVAL;
+ goto bail_unlock;
+ }
+ n = head;
+ if (n < tail)
+ n += srq->rq.size - tail;
+ else
+ n -= tail;
+ if (size <= n) {
+ ret = -EINVAL;
+ goto bail_unlock;
+ }
+ n = 0;
+ p = wq->wq;
+ while (tail != head) {
+ struct qib_rwqe *wqe;
+ int i;
+
+ wqe = get_rwqe_ptr(&srq->rq, tail);
+ p->wr_id = wqe->wr_id;
+ p->num_sge = wqe->num_sge;
+ for (i = 0; i < wqe->num_sge; i++)
+ p->sg_list[i] = wqe->sg_list[i];
+ n++;
+ p = (struct qib_rwqe *)((char *) p + sz);
+ if (++tail >= srq->rq.size)
+ tail = 0;
+ }
+ srq->rq.wq = wq;
+ srq->rq.size = size;
+ wq->head = n;
+ wq->tail = 0;
+ if (attr_mask & IB_SRQ_LIMIT)
+ srq->limit = attr->srq_limit;
+ spin_unlock_irq(&srq->rq.lock);
+
+ vfree(owq);
+
+ if (srq->ip) {
+ struct qib_mmap_info *ip = srq->ip;
+ struct qib_ibdev *dev = to_idev(srq->ibsrq.device);
+ u32 s = sizeof(struct qib_rwq) + size * sz;
+
+ qib_update_mmap_info(dev, ip, s, wq);
+
+ /*
+ * Return the offset to mmap.
+ * See qib_mmap() for details.
+ */
+ if (udata && udata->inlen >= sizeof(__u64)) {
+ ret = ib_copy_to_udata(udata, &ip->offset,
+ sizeof(ip->offset));
+ if (ret)
+ goto bail;
+ }
+
+ /*
+ * Put user mapping info onto the pending list
+ * unless it already is on the list.
+ */
+ spin_lock_irq(&dev->pending_lock);
+ if (list_empty(&ip->pending_mmaps))
+ list_add(&ip->pending_mmaps,
+ &dev->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+ }
+ } else if (attr_mask & IB_SRQ_LIMIT) {
+ spin_lock_irq(&srq->rq.lock);
+ if (attr->srq_limit >= srq->rq.size)
+ ret = -EINVAL;
+ else
+ srq->limit = attr->srq_limit;
+ spin_unlock_irq(&srq->rq.lock);
+ }
+ goto bail;
+
+bail_unlock:
+ spin_unlock_irq(&srq->rq.lock);
+bail_free:
+ vfree(wq);
+bail:
+ return ret;
+}
+
+int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
+{
+ struct qib_srq *srq = to_isrq(ibsrq);
+
+ attr->max_wr = srq->rq.size - 1;
+ attr->max_sge = srq->rq.max_sge;
+ attr->srq_limit = srq->limit;
+ return 0;
+}
+
+/**
+ * qib_destroy_srq - destroy a shared receive queue
+ * @ibsrq: the SRQ to destroy
+ */
+int qib_destroy_srq(struct ib_srq *ibsrq)
+{
+ struct qib_srq *srq = to_isrq(ibsrq);
+ struct qib_ibdev *dev = to_idev(ibsrq->device);
+
+ spin_lock(&dev->n_srqs_lock);
+ dev->n_srqs_allocated--;
+ spin_unlock(&dev->n_srqs_lock);
+ if (srq->ip)
+ kref_put(&srq->ip->ref, qib_release_mmap_info);
+ else
+ vfree(srq->rq.wq);
+ kfree(srq);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
new file mode 100644
index 00000000000..dab4d9f4a2c
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -0,0 +1,691 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/ctype.h>
+
+#include "qib.h"
+
+/**
+ * qib_parse_ushort - parse an unsigned short value in an arbitrary base
+ * @str: the string containing the number
+ * @valp: where to put the result
+ *
+ * Returns the number of bytes consumed, or negative value on error.
+ */
+static int qib_parse_ushort(const char *str, unsigned short *valp)
+{
+ unsigned long val;
+ char *end;
+ int ret;
+
+ if (!isdigit(str[0])) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ val = simple_strtoul(str, &end, 0);
+
+ if (val > 0xffff) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ *valp = val;
+
+ ret = end + 1 - str;
+ if (ret == 0)
+ ret = -EINVAL;
+
+bail:
+ return ret;
+}
+
+/* start of per-port functions */
+/*
+ * Get/Set heartbeat enable. OR of 1=enabled, 2=auto
+ */
+static ssize_t show_hrtbt_enb(struct qib_pportdata *ppd, char *buf)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int ret;
+
+ ret = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT);
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
+ return ret;
+}
+
+static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf,
+ size_t count)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int ret;
+ u16 val;
+
+ ret = qib_parse_ushort(buf, &val);
+
+ /*
+ * Set the "intentional" heartbeat enable per either of
+ * "Enable" and "Auto", as these are normally set together.
+ * This bit is consulted when leaving loopback mode,
+ * because entering loopback mode overrides it and automatically
+ * disables heartbeat.
+ */
+ if (ret >= 0)
+ ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val);
+ if (ret < 0)
+ qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t store_loopback(struct qib_pportdata *ppd, const char *buf,
+ size_t count)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int ret = count, r;
+
+ r = dd->f_set_ib_loopback(ppd, buf);
+ if (r < 0)
+ ret = r;
+
+ return ret;
+}
+
+static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf,
+ size_t count)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int ret;
+ u16 val;
+
+ ret = qib_parse_ushort(buf, &val);
+ if (ret > 0)
+ qib_set_led_override(ppd, val);
+ else
+ qib_dev_err(dd, "attempt to set invalid LED override\n");
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t show_status(struct qib_pportdata *ppd, char *buf)
+{
+ ssize_t ret;
+
+ if (!ppd->statusp)
+ ret = -EINVAL;
+ else
+ ret = scnprintf(buf, PAGE_SIZE, "0x%llx\n",
+ (unsigned long long) *(ppd->statusp));
+ return ret;
+}
+
+/*
+ * For userland compatibility, these offsets must remain fixed.
+ * They are strings for QIB_STATUS_*
+ */
+static const char *qib_status_str[] = {
+ "Initted",
+ "",
+ "",
+ "",
+ "",
+ "Present",
+ "IB_link_up",
+ "IB_configured",
+ "",
+ "Fatal_Hardware_Error",
+ NULL,
+};
+
+static ssize_t show_status_str(struct qib_pportdata *ppd, char *buf)
+{
+ int i, any;
+ u64 s;
+ ssize_t ret;
+
+ if (!ppd->statusp) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ s = *(ppd->statusp);
+ *buf = '\0';
+ for (any = i = 0; s && qib_status_str[i]; i++) {
+ if (s & 1) {
+ /* if overflow */
+ if (any && strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
+ break;
+ if (strlcat(buf, qib_status_str[i], PAGE_SIZE) >=
+ PAGE_SIZE)
+ break;
+ any = 1;
+ }
+ s >>= 1;
+ }
+ if (any)
+ strlcat(buf, "\n", PAGE_SIZE);
+
+ ret = strlen(buf);
+
+bail:
+ return ret;
+}
+
+/* end of per-port functions */
+
+/*
+ * Start of per-port file structures and support code
+ * Because we are fitting into other infrastructure, we have to supply the
+ * full set of kobject/sysfs_ops structures and routines.
+ */
+#define QIB_PORT_ATTR(name, mode, show, store) \
+ static struct qib_port_attr qib_port_attr_##name = \
+ __ATTR(name, mode, show, store)
+
+struct qib_port_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct qib_pportdata *, char *);
+ ssize_t (*store)(struct qib_pportdata *, const char *, size_t);
+};
+
+QIB_PORT_ATTR(loopback, S_IWUSR, NULL, store_loopback);
+QIB_PORT_ATTR(led_override, S_IWUSR, NULL, store_led_override);
+QIB_PORT_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb,
+ store_hrtbt_enb);
+QIB_PORT_ATTR(status, S_IRUGO, show_status, NULL);
+QIB_PORT_ATTR(status_str, S_IRUGO, show_status_str, NULL);
+
+static struct attribute *port_default_attributes[] = {
+ &qib_port_attr_loopback.attr,
+ &qib_port_attr_led_override.attr,
+ &qib_port_attr_hrtbt_enable.attr,
+ &qib_port_attr_status.attr,
+ &qib_port_attr_status_str.attr,
+ NULL
+};
+
+static ssize_t qib_portattr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct qib_port_attr *pattr =
+ container_of(attr, struct qib_port_attr, attr);
+ struct qib_pportdata *ppd =
+ container_of(kobj, struct qib_pportdata, pport_kobj);
+
+ return pattr->show(ppd, buf);
+}
+
+static ssize_t qib_portattr_store(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t len)
+{
+ struct qib_port_attr *pattr =
+ container_of(attr, struct qib_port_attr, attr);
+ struct qib_pportdata *ppd =
+ container_of(kobj, struct qib_pportdata, pport_kobj);
+
+ return pattr->store(ppd, buf, len);
+}
+
+static void qib_port_release(struct kobject *kobj)
+{
+ /* nothing to do since memory is freed by qib_free_devdata() */
+}
+
+static const struct sysfs_ops qib_port_ops = {
+ .show = qib_portattr_show,
+ .store = qib_portattr_store,
+};
+
+static struct kobj_type qib_port_ktype = {
+ .release = qib_port_release,
+ .sysfs_ops = &qib_port_ops,
+ .default_attrs = port_default_attributes
+};
+
+/* Start sl2vl */
+
+#define QIB_SL2VL_ATTR(N) \
+ static struct qib_sl2vl_attr qib_sl2vl_attr_##N = { \
+ .attr = { .name = __stringify(N), .mode = 0444 }, \
+ .sl = N \
+ }
+
+struct qib_sl2vl_attr {
+ struct attribute attr;
+ int sl;
+};
+
+QIB_SL2VL_ATTR(0);
+QIB_SL2VL_ATTR(1);
+QIB_SL2VL_ATTR(2);
+QIB_SL2VL_ATTR(3);
+QIB_SL2VL_ATTR(4);
+QIB_SL2VL_ATTR(5);
+QIB_SL2VL_ATTR(6);
+QIB_SL2VL_ATTR(7);
+QIB_SL2VL_ATTR(8);
+QIB_SL2VL_ATTR(9);
+QIB_SL2VL_ATTR(10);
+QIB_SL2VL_ATTR(11);
+QIB_SL2VL_ATTR(12);
+QIB_SL2VL_ATTR(13);
+QIB_SL2VL_ATTR(14);
+QIB_SL2VL_ATTR(15);
+
+static struct attribute *sl2vl_default_attributes[] = {
+ &qib_sl2vl_attr_0.attr,
+ &qib_sl2vl_attr_1.attr,
+ &qib_sl2vl_attr_2.attr,
+ &qib_sl2vl_attr_3.attr,
+ &qib_sl2vl_attr_4.attr,
+ &qib_sl2vl_attr_5.attr,
+ &qib_sl2vl_attr_6.attr,
+ &qib_sl2vl_attr_7.attr,
+ &qib_sl2vl_attr_8.attr,
+ &qib_sl2vl_attr_9.attr,
+ &qib_sl2vl_attr_10.attr,
+ &qib_sl2vl_attr_11.attr,
+ &qib_sl2vl_attr_12.attr,
+ &qib_sl2vl_attr_13.attr,
+ &qib_sl2vl_attr_14.attr,
+ &qib_sl2vl_attr_15.attr,
+ NULL
+};
+
+static ssize_t sl2vl_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct qib_sl2vl_attr *sattr =
+ container_of(attr, struct qib_sl2vl_attr, attr);
+ struct qib_pportdata *ppd =
+ container_of(kobj, struct qib_pportdata, sl2vl_kobj);
+ struct qib_ibport *qibp = &ppd->ibport_data;
+
+ return sprintf(buf, "%u\n", qibp->sl_to_vl[sattr->sl]);
+}
+
+static const struct sysfs_ops qib_sl2vl_ops = {
+ .show = sl2vl_attr_show,
+};
+
+static struct kobj_type qib_sl2vl_ktype = {
+ .release = qib_port_release,
+ .sysfs_ops = &qib_sl2vl_ops,
+ .default_attrs = sl2vl_default_attributes
+};
+
+/* End sl2vl */
+
+/* Start diag_counters */
+
+#define QIB_DIAGC_ATTR(N) \
+ static struct qib_diagc_attr qib_diagc_attr_##N = { \
+ .attr = { .name = __stringify(N), .mode = 0444 }, \
+ .counter = offsetof(struct qib_ibport, n_##N) \
+ }
+
+struct qib_diagc_attr {
+ struct attribute attr;
+ size_t counter;
+};
+
+QIB_DIAGC_ATTR(rc_resends);
+QIB_DIAGC_ATTR(rc_acks);
+QIB_DIAGC_ATTR(rc_qacks);
+QIB_DIAGC_ATTR(rc_delayed_comp);
+QIB_DIAGC_ATTR(seq_naks);
+QIB_DIAGC_ATTR(rdma_seq);
+QIB_DIAGC_ATTR(rnr_naks);
+QIB_DIAGC_ATTR(other_naks);
+QIB_DIAGC_ATTR(rc_timeouts);
+QIB_DIAGC_ATTR(loop_pkts);
+QIB_DIAGC_ATTR(pkt_drops);
+QIB_DIAGC_ATTR(dmawait);
+QIB_DIAGC_ATTR(unaligned);
+QIB_DIAGC_ATTR(rc_dupreq);
+QIB_DIAGC_ATTR(rc_seqnak);
+
+static struct attribute *diagc_default_attributes[] = {
+ &qib_diagc_attr_rc_resends.attr,
+ &qib_diagc_attr_rc_acks.attr,
+ &qib_diagc_attr_rc_qacks.attr,
+ &qib_diagc_attr_rc_delayed_comp.attr,
+ &qib_diagc_attr_seq_naks.attr,
+ &qib_diagc_attr_rdma_seq.attr,
+ &qib_diagc_attr_rnr_naks.attr,
+ &qib_diagc_attr_other_naks.attr,
+ &qib_diagc_attr_rc_timeouts.attr,
+ &qib_diagc_attr_loop_pkts.attr,
+ &qib_diagc_attr_pkt_drops.attr,
+ &qib_diagc_attr_dmawait.attr,
+ &qib_diagc_attr_unaligned.attr,
+ &qib_diagc_attr_rc_dupreq.attr,
+ &qib_diagc_attr_rc_seqnak.attr,
+ NULL
+};
+
+static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct qib_diagc_attr *dattr =
+ container_of(attr, struct qib_diagc_attr, attr);
+ struct qib_pportdata *ppd =
+ container_of(kobj, struct qib_pportdata, diagc_kobj);
+ struct qib_ibport *qibp = &ppd->ibport_data;
+
+ return sprintf(buf, "%u\n", *(u32 *)((char *)qibp + dattr->counter));
+}
+
+static const struct sysfs_ops qib_diagc_ops = {
+ .show = diagc_attr_show,
+};
+
+static struct kobj_type qib_diagc_ktype = {
+ .release = qib_port_release,
+ .sysfs_ops = &qib_diagc_ops,
+ .default_attrs = diagc_default_attributes
+};
+
+/* End diag_counters */
+
+/* end of per-port file structures and support code */
+
+/*
+ * Start of per-unit (or driver, in some cases, but replicated
+ * per unit) functions (these get a device *)
+ */
+static ssize_t show_rev(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+
+ return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
+}
+
+static ssize_t show_hca(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+ int ret;
+
+ if (!dd->boardname)
+ ret = -EINVAL;
+ else
+ ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname);
+ return ret;
+}
+
+static ssize_t show_version(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ /* The string printed here is already newline-terminated. */
+ return scnprintf(buf, PAGE_SIZE, "%s", (char *)ib_qib_version);
+}
+
+static ssize_t show_boardversion(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+
+ /* The string printed here is already newline-terminated. */
+ return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion);
+}
+
+
+static ssize_t show_localbus_info(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+
+ /* The string printed here is already newline-terminated. */
+ return scnprintf(buf, PAGE_SIZE, "%s", dd->lbus_info);
+}
+
+
+static ssize_t show_nctxts(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+
+ /* Return the number of user ports (contexts) available. */
+ return scnprintf(buf, PAGE_SIZE, "%u\n", dd->cfgctxts -
+ dd->first_user_ctxt);
+}
+
+static ssize_t show_serial(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+
+ buf[sizeof dd->serial] = '\0';
+ memcpy(buf, dd->serial, sizeof dd->serial);
+ strcat(buf, "\n");
+ return strlen(buf);
+}
+
+static ssize_t store_chip_reset(struct device *device,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+ int ret;
+
+ if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ ret = qib_reset_device(dd->unit);
+bail:
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t show_logged_errs(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+ int idx, count;
+
+ /* force consistency with actual EEPROM */
+ if (qib_update_eeprom_log(dd) != 0)
+ return -ENXIO;
+
+ count = 0;
+ for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
+ count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
+ dd->eep_st_errs[idx],
+ idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' ');
+ }
+
+ return count;
+}
+
+/*
+ * Dump tempsense regs. in decimal, to ease shell-scripts.
+ */
+static ssize_t show_tempsense(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+ int ret;
+ int idx;
+ u8 regvals[8];
+
+ ret = -ENXIO;
+ for (idx = 0; idx < 8; ++idx) {
+ if (idx == 6)
+ continue;
+ ret = dd->f_tempsense_rd(dd, idx);
+ if (ret < 0)
+ break;
+ regvals[idx] = ret;
+ }
+ if (idx == 8)
+ ret = scnprintf(buf, PAGE_SIZE, "%d %d %02X %02X %d %d\n",
+ *(signed char *)(regvals),
+ *(signed char *)(regvals + 1),
+ regvals[2], regvals[3],
+ *(signed char *)(regvals + 5),
+ *(signed char *)(regvals + 7));
+ return ret;
+}
+
+/*
+ * end of per-unit (or driver, in some cases, but replicated
+ * per unit) functions
+ */
+
+/* start of per-unit file structures and support code */
+static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
+static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
+static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
+static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
+static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
+static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
+static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
+static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
+static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
+static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
+static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
+
+static struct device_attribute *qib_attributes[] = {
+ &dev_attr_hw_rev,
+ &dev_attr_hca_type,
+ &dev_attr_board_id,
+ &dev_attr_version,
+ &dev_attr_nctxts,
+ &dev_attr_serial,
+ &dev_attr_boardversion,
+ &dev_attr_logged_errors,
+ &dev_attr_tempsense,
+ &dev_attr_localbus_info,
+ &dev_attr_chip_reset,
+};
+
+int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
+ struct kobject *kobj)
+{
+ struct qib_pportdata *ppd;
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ int ret;
+
+ if (!port_num || port_num > dd->num_pports) {
+ qib_dev_err(dd, "Skipping infiniband class with "
+ "invalid port %u\n", port_num);
+ ret = -ENODEV;
+ goto bail;
+ }
+ ppd = &dd->pport[port_num - 1];
+
+ ret = kobject_init_and_add(&ppd->pport_kobj, &qib_port_ktype, kobj,
+ "linkcontrol");
+ if (ret) {
+ qib_dev_err(dd, "Skipping linkcontrol sysfs info, "
+ "(err %d) port %u\n", ret, port_num);
+ goto bail;
+ }
+ kobject_uevent(&ppd->pport_kobj, KOBJ_ADD);
+
+ ret = kobject_init_and_add(&ppd->sl2vl_kobj, &qib_sl2vl_ktype, kobj,
+ "sl2vl");
+ if (ret) {
+ qib_dev_err(dd, "Skipping sl2vl sysfs info, "
+ "(err %d) port %u\n", ret, port_num);
+ goto bail_sl;
+ }
+ kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD);
+
+ ret = kobject_init_and_add(&ppd->diagc_kobj, &qib_diagc_ktype, kobj,
+ "diag_counters");
+ if (ret) {
+ qib_dev_err(dd, "Skipping diag_counters sysfs info, "
+ "(err %d) port %u\n", ret, port_num);
+ goto bail_diagc;
+ }
+ kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD);
+
+ return 0;
+
+bail_diagc:
+ kobject_put(&ppd->sl2vl_kobj);
+bail_sl:
+ kobject_put(&ppd->pport_kobj);
+bail:
+ return ret;
+}
+
+/*
+ * Register and create our files in /sys/class/infiniband.
+ */
+int qib_verbs_register_sysfs(struct qib_devdata *dd)
+{
+ struct ib_device *dev = &dd->verbs_dev.ibdev;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) {
+ ret = device_create_file(&dev->dev, qib_attributes[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Unregister and remove our files in /sys/class/infiniband.
+ */
+void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
+{
+ struct qib_pportdata *ppd;
+ int i;
+
+ for (i = 0; i < dd->num_pports; i++) {
+ ppd = &dd->pport[i];
+ kobject_put(&ppd->pport_kobj);
+ kobject_put(&ppd->sl2vl_kobj);
+ }
+}
diff --git a/drivers/infiniband/hw/qib/qib_twsi.c b/drivers/infiniband/hw/qib/qib_twsi.c
new file mode 100644
index 00000000000..6f31ca5039d
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_twsi.c
@@ -0,0 +1,498 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+
+#include "qib.h"
+
+/*
+ * QLogic_IB "Two Wire Serial Interface" driver.
+ * Originally written for a not-quite-i2c serial eeprom, which is
+ * still used on some supported boards. Later boards have added a
+ * variety of other uses, most board-specific, so teh bit-boffing
+ * part has been split off to this file, while the other parts
+ * have been moved to chip-specific files.
+ *
+ * We have also dropped all pretense of fully generic (e.g. pretend
+ * we don't know whether '1' is the higher voltage) interface, as
+ * the restrictions of the generic i2c interface (e.g. no access from
+ * driver itself) make it unsuitable for this use.
+ */
+
+#define READ_CMD 1
+#define WRITE_CMD 0
+
+/**
+ * i2c_wait_for_writes - wait for a write
+ * @dd: the qlogic_ib device
+ *
+ * We use this instead of udelay directly, so we can make sure
+ * that previous register writes have been flushed all the way
+ * to the chip. Since we are delaying anyway, the cost doesn't
+ * hurt, and makes the bit twiddling more regular
+ */
+static void i2c_wait_for_writes(struct qib_devdata *dd)
+{
+ /*
+ * implicit read of EXTStatus is as good as explicit
+ * read of scratch, if all we want to do is flush
+ * writes.
+ */
+ dd->f_gpio_mod(dd, 0, 0, 0);
+ rmb(); /* inlined, so prevent compiler reordering */
+}
+
+/*
+ * QSFP modules are allowed to hold SCL low for 500uSec. Allow twice that
+ * for "almost compliant" modules
+ */
+#define SCL_WAIT_USEC 1000
+
+/* BUF_WAIT is time bus must be free between STOP or ACK and to next START.
+ * Should be 20, but some chips need more.
+ */
+#define TWSI_BUF_WAIT_USEC 60
+
+static void scl_out(struct qib_devdata *dd, u8 bit)
+{
+ u32 mask;
+
+ udelay(1);
+
+ mask = 1UL << dd->gpio_scl_num;
+
+ /* SCL is meant to be bare-drain, so never set "OUT", just DIR */
+ dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask);
+
+ /*
+ * Allow for slow slaves by simple
+ * delay for falling edge, sampling on rise.
+ */
+ if (!bit)
+ udelay(2);
+ else {
+ int rise_usec;
+ for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) {
+ if (mask & dd->f_gpio_mod(dd, 0, 0, 0))
+ break;
+ udelay(2);
+ }
+ if (rise_usec <= 0)
+ qib_dev_err(dd, "SCL interface stuck low > %d uSec\n",
+ SCL_WAIT_USEC);
+ }
+ i2c_wait_for_writes(dd);
+}
+
+static void sda_out(struct qib_devdata *dd, u8 bit)
+{
+ u32 mask;
+
+ mask = 1UL << dd->gpio_sda_num;
+
+ /* SDA is meant to be bare-drain, so never set "OUT", just DIR */
+ dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask);
+
+ i2c_wait_for_writes(dd);
+ udelay(2);
+}
+
+static u8 sda_in(struct qib_devdata *dd, int wait)
+{
+ int bnum;
+ u32 read_val, mask;
+
+ bnum = dd->gpio_sda_num;
+ mask = (1UL << bnum);
+ /* SDA is meant to be bare-drain, so never set "OUT", just DIR */
+ dd->f_gpio_mod(dd, 0, 0, mask);
+ read_val = dd->f_gpio_mod(dd, 0, 0, 0);
+ if (wait)
+ i2c_wait_for_writes(dd);
+ return (read_val & mask) >> bnum;
+}
+
+/**
+ * i2c_ackrcv - see if ack following write is true
+ * @dd: the qlogic_ib device
+ */
+static int i2c_ackrcv(struct qib_devdata *dd)
+{
+ u8 ack_received;
+
+ /* AT ENTRY SCL = LOW */
+ /* change direction, ignore data */
+ ack_received = sda_in(dd, 1);
+ scl_out(dd, 1);
+ ack_received = sda_in(dd, 1) == 0;
+ scl_out(dd, 0);
+ return ack_received;
+}
+
+static void stop_cmd(struct qib_devdata *dd);
+
+/**
+ * rd_byte - read a byte, sending STOP on last, else ACK
+ * @dd: the qlogic_ib device
+ *
+ * Returns byte shifted out of device
+ */
+static int rd_byte(struct qib_devdata *dd, int last)
+{
+ int bit_cntr, data;
+
+ data = 0;
+
+ for (bit_cntr = 7; bit_cntr >= 0; --bit_cntr) {
+ data <<= 1;
+ scl_out(dd, 1);
+ data |= sda_in(dd, 0);
+ scl_out(dd, 0);
+ }
+ if (last) {
+ scl_out(dd, 1);
+ stop_cmd(dd);
+ } else {
+ sda_out(dd, 0);
+ scl_out(dd, 1);
+ scl_out(dd, 0);
+ sda_out(dd, 1);
+ }
+ return data;
+}
+
+/**
+ * wr_byte - write a byte, one bit at a time
+ * @dd: the qlogic_ib device
+ * @data: the byte to write
+ *
+ * Returns 0 if we got the following ack, otherwise 1
+ */
+static int wr_byte(struct qib_devdata *dd, u8 data)
+{
+ int bit_cntr;
+ u8 bit;
+
+ for (bit_cntr = 7; bit_cntr >= 0; bit_cntr--) {
+ bit = (data >> bit_cntr) & 1;
+ sda_out(dd, bit);
+ scl_out(dd, 1);
+ scl_out(dd, 0);
+ }
+ return (!i2c_ackrcv(dd)) ? 1 : 0;
+}
+
+/*
+ * issue TWSI start sequence:
+ * (both clock/data high, clock high, data low while clock is high)
+ */
+static void start_seq(struct qib_devdata *dd)
+{
+ sda_out(dd, 1);
+ scl_out(dd, 1);
+ sda_out(dd, 0);
+ udelay(1);
+ scl_out(dd, 0);
+}
+
+/**
+ * stop_seq - transmit the stop sequence
+ * @dd: the qlogic_ib device
+ *
+ * (both clock/data low, clock high, data high while clock is high)
+ */
+static void stop_seq(struct qib_devdata *dd)
+{
+ scl_out(dd, 0);
+ sda_out(dd, 0);
+ scl_out(dd, 1);
+ sda_out(dd, 1);
+}
+
+/**
+ * stop_cmd - transmit the stop condition
+ * @dd: the qlogic_ib device
+ *
+ * (both clock/data low, clock high, data high while clock is high)
+ */
+static void stop_cmd(struct qib_devdata *dd)
+{
+ stop_seq(dd);
+ udelay(TWSI_BUF_WAIT_USEC);
+}
+
+/**
+ * qib_twsi_reset - reset I2C communication
+ * @dd: the qlogic_ib device
+ */
+
+int qib_twsi_reset(struct qib_devdata *dd)
+{
+ int clock_cycles_left = 9;
+ int was_high = 0;
+ u32 pins, mask;
+
+ /* Both SCL and SDA should be high. If not, there
+ * is something wrong.
+ */
+ mask = (1UL << dd->gpio_scl_num) | (1UL << dd->gpio_sda_num);
+
+ /*
+ * Force pins to desired innocuous state.
+ * This is the default power-on state with out=0 and dir=0,
+ * So tri-stated and should be floating high (barring HW problems)
+ */
+ dd->f_gpio_mod(dd, 0, 0, mask);
+
+ /*
+ * Clock nine times to get all listeners into a sane state.
+ * If SDA does not go high at any point, we are wedged.
+ * One vendor recommends then issuing START followed by STOP.
+ * we cannot use our "normal" functions to do that, because
+ * if SCL drops between them, another vendor's part will
+ * wedge, dropping SDA and keeping it low forever, at the end of
+ * the next transaction (even if it was not the device addressed).
+ * So our START and STOP take place with SCL held high.
+ */
+ while (clock_cycles_left--) {
+ scl_out(dd, 0);
+ scl_out(dd, 1);
+ /* Note if SDA is high, but keep clocking to sync slave */
+ was_high |= sda_in(dd, 0);
+ }
+
+ if (was_high) {
+ /*
+ * We saw a high, which we hope means the slave is sync'd.
+ * Issue START, STOP, pause for T_BUF.
+ */
+
+ pins = dd->f_gpio_mod(dd, 0, 0, 0);
+ if ((pins & mask) != mask)
+ qib_dev_err(dd, "GPIO pins not at rest: %d\n",
+ pins & mask);
+ /* Drop SDA to issue START */
+ udelay(1); /* Guarantee .6 uSec setup */
+ sda_out(dd, 0);
+ udelay(1); /* Guarantee .6 uSec hold */
+ /* At this point, SCL is high, SDA low. Raise SDA for STOP */
+ sda_out(dd, 1);
+ udelay(TWSI_BUF_WAIT_USEC);
+ }
+
+ return !was_high;
+}
+
+#define QIB_TWSI_START 0x100
+#define QIB_TWSI_STOP 0x200
+
+/* Write byte to TWSI, optionally prefixed with START or suffixed with
+ * STOP.
+ * returns 0 if OK (ACK received), else != 0
+ */
+static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags)
+{
+ int ret = 1;
+ if (flags & QIB_TWSI_START)
+ start_seq(dd);
+
+ ret = wr_byte(dd, data); /* Leaves SCL low (from i2c_ackrcv()) */
+
+ if (flags & QIB_TWSI_STOP)
+ stop_cmd(dd);
+ return ret;
+}
+
+/* Added functionality for IBA7220-based cards */
+#define QIB_TEMP_DEV 0x98
+
+/*
+ * qib_twsi_blk_rd
+ * Formerly called qib_eeprom_internal_read, and only used for eeprom,
+ * but now the general interface for data transfer from twsi devices.
+ * One vestige of its former role is that it recognizes a device
+ * QIB_TWSI_NO_DEV and does the correct operation for the legacy part,
+ * which responded to all TWSI device codes, interpreting them as
+ * address within device. On all other devices found on board handled by
+ * this driver, the device is followed by a one-byte "address" which selects
+ * the "register" or "offset" within the device from which data should
+ * be read.
+ */
+int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr,
+ void *buffer, int len)
+{
+ int ret;
+ u8 *bp = buffer;
+
+ ret = 1;
+
+ if (dev == QIB_TWSI_NO_DEV) {
+ /* legacy not-really-I2C */
+ addr = (addr << 1) | READ_CMD;
+ ret = qib_twsi_wr(dd, addr, QIB_TWSI_START);
+ } else {
+ /* Actual I2C */
+ ret = qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START);
+ if (ret) {
+ stop_cmd(dd);
+ ret = 1;
+ goto bail;
+ }
+ /*
+ * SFF spec claims we do _not_ stop after the addr
+ * but simply issue a start with the "read" dev-addr.
+ * Since we are implicitely waiting for ACK here,
+ * we need t_buf (nominally 20uSec) before that start,
+ * and cannot rely on the delay built in to the STOP
+ */
+ ret = qib_twsi_wr(dd, addr, 0);
+ udelay(TWSI_BUF_WAIT_USEC);
+
+ if (ret) {
+ qib_dev_err(dd,
+ "Failed to write interface read addr %02X\n",
+ addr);
+ ret = 1;
+ goto bail;
+ }
+ ret = qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START);
+ }
+ if (ret) {
+ stop_cmd(dd);
+ ret = 1;
+ goto bail;
+ }
+
+ /*
+ * block devices keeps clocking data out as long as we ack,
+ * automatically incrementing the address. Some have "pages"
+ * whose boundaries will not be crossed, but the handling
+ * of these is left to the caller, who is in a better
+ * position to know.
+ */
+ while (len-- > 0) {
+ /*
+ * Get and store data, sending ACK if length remaining,
+ * else STOP
+ */
+ *bp++ = rd_byte(dd, !len);
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/*
+ * qib_twsi_blk_wr
+ * Formerly called qib_eeprom_internal_write, and only used for eeprom,
+ * but now the general interface for data transfer to twsi devices.
+ * One vestige of its former role is that it recognizes a device
+ * QIB_TWSI_NO_DEV and does the correct operation for the legacy part,
+ * which responded to all TWSI device codes, interpreting them as
+ * address within device. On all other devices found on board handled by
+ * this driver, the device is followed by a one-byte "address" which selects
+ * the "register" or "offset" within the device to which data should
+ * be written.
+ */
+int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
+ const void *buffer, int len)
+{
+ int sub_len;
+ const u8 *bp = buffer;
+ int max_wait_time, i;
+ int ret;
+ ret = 1;
+
+ while (len > 0) {
+ if (dev == QIB_TWSI_NO_DEV) {
+ if (qib_twsi_wr(dd, (addr << 1) | WRITE_CMD,
+ QIB_TWSI_START)) {
+ goto failed_write;
+ }
+ } else {
+ /* Real I2C */
+ if (qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START))
+ goto failed_write;
+ ret = qib_twsi_wr(dd, addr, 0);
+ if (ret) {
+ qib_dev_err(dd, "Failed to write interface"
+ " write addr %02X\n", addr);
+ goto failed_write;
+ }
+ }
+
+ sub_len = min(len, 4);
+ addr += sub_len;
+ len -= sub_len;
+
+ for (i = 0; i < sub_len; i++)
+ if (qib_twsi_wr(dd, *bp++, 0))
+ goto failed_write;
+
+ stop_cmd(dd);
+
+ /*
+ * Wait for write complete by waiting for a successful
+ * read (the chip replies with a zero after the write
+ * cmd completes, and before it writes to the eeprom.
+ * The startcmd for the read will fail the ack until
+ * the writes have completed. We do this inline to avoid
+ * the debug prints that are in the real read routine
+ * if the startcmd fails.
+ * We also use the proper device address, so it doesn't matter
+ * whether we have real eeprom_dev. Legacy likes any address.
+ */
+ max_wait_time = 100;
+ while (qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START)) {
+ stop_cmd(dd);
+ if (!--max_wait_time)
+ goto failed_write;
+ }
+ /* now read (and ignore) the resulting byte */
+ rd_byte(dd, 1);
+ }
+
+ ret = 0;
+ goto bail;
+
+failed_write:
+ stop_cmd(dd);
+ ret = 1;
+
+bail:
+ return ret;
+}
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c
new file mode 100644
index 00000000000..f7eb1ddff5f
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_tx.c
@@ -0,0 +1,557 @@
+/*
+ * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+
+#include "qib.h"
+
+static unsigned qib_hol_timeout_ms = 3000;
+module_param_named(hol_timeout_ms, qib_hol_timeout_ms, uint, S_IRUGO);
+MODULE_PARM_DESC(hol_timeout_ms,
+ "duration of user app suspension after link failure");
+
+unsigned qib_sdma_fetch_arb = 1;
+module_param_named(fetch_arb, qib_sdma_fetch_arb, uint, S_IRUGO);
+MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration");
+
+/**
+ * qib_disarm_piobufs - cancel a range of PIO buffers
+ * @dd: the qlogic_ib device
+ * @first: the first PIO buffer to cancel
+ * @cnt: the number of PIO buffers to cancel
+ *
+ * Cancel a range of PIO buffers. Used at user process close,
+ * in case it died while writing to a PIO buffer.
+ */
+void qib_disarm_piobufs(struct qib_devdata *dd, unsigned first, unsigned cnt)
+{
+ unsigned long flags;
+ unsigned i;
+ unsigned last;
+
+ last = first + cnt;
+ spin_lock_irqsave(&dd->pioavail_lock, flags);
+ for (i = first; i < last; i++) {
+ __clear_bit(i, dd->pio_need_disarm);
+ dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
+ }
+ spin_unlock_irqrestore(&dd->pioavail_lock, flags);
+}
+
+/*
+ * This is called by a user process when it sees the DISARM_BUFS event
+ * bit is set.
+ */
+int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *rcd)
+{
+ struct qib_devdata *dd = rcd->dd;
+ unsigned i;
+ unsigned last;
+ unsigned n = 0;
+
+ last = rcd->pio_base + rcd->piocnt;
+ /*
+ * Don't need uctxt_lock here, since user has called in to us.
+ * Clear at start in case more interrupts set bits while we
+ * are disarming
+ */
+ if (rcd->user_event_mask) {
+ /*
+ * subctxt_cnt is 0 if not shared, so do base
+ * separately, first, then remaining subctxt, if any
+ */
+ clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, &rcd->user_event_mask[0]);
+ for (i = 1; i < rcd->subctxt_cnt; i++)
+ clear_bit(_QIB_EVENT_DISARM_BUFS_BIT,
+ &rcd->user_event_mask[i]);
+ }
+ spin_lock_irq(&dd->pioavail_lock);
+ for (i = rcd->pio_base; i < last; i++) {
+ if (__test_and_clear_bit(i, dd->pio_need_disarm)) {
+ n++;
+ dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i));
+ }
+ }
+ spin_unlock_irq(&dd->pioavail_lock);
+ return 0;
+}
+
+static struct qib_pportdata *is_sdma_buf(struct qib_devdata *dd, unsigned i)
+{
+ struct qib_pportdata *ppd;
+ unsigned pidx;
+
+ for (pidx = 0; pidx < dd->num_pports; pidx++) {
+ ppd = dd->pport + pidx;
+ if (i >= ppd->sdma_state.first_sendbuf &&
+ i < ppd->sdma_state.last_sendbuf)
+ return ppd;
+ }
+ return NULL;
+}
+
+/*
+ * Return true if send buffer is being used by a user context.
+ * Sets _QIB_EVENT_DISARM_BUFS_BIT in user_event_mask as a side effect
+ */
+static int find_ctxt(struct qib_devdata *dd, unsigned bufn)
+{
+ struct qib_ctxtdata *rcd;
+ unsigned ctxt;
+ int ret = 0;
+
+ spin_lock(&dd->uctxt_lock);
+ for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
+ rcd = dd->rcd[ctxt];
+ if (!rcd || bufn < rcd->pio_base ||
+ bufn >= rcd->pio_base + rcd->piocnt)
+ continue;
+ if (rcd->user_event_mask) {
+ int i;
+ /*
+ * subctxt_cnt is 0 if not shared, so do base
+ * separately, first, then remaining subctxt, if any
+ */
+ set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
+ &rcd->user_event_mask[0]);
+ for (i = 1; i < rcd->subctxt_cnt; i++)
+ set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
+ &rcd->user_event_mask[i]);
+ }
+ ret = 1;
+ break;
+ }
+ spin_unlock(&dd->uctxt_lock);
+
+ return ret;
+}
+
+/*
+ * Disarm a set of send buffers. If the buffer might be actively being
+ * written to, mark the buffer to be disarmed later when it is not being
+ * written to.
+ *
+ * This should only be called from the IRQ error handler.
+ */
+void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
+ unsigned cnt)
+{
+ struct qib_pportdata *ppd, *pppd[dd->num_pports];
+ unsigned i;
+ unsigned long flags;
+
+ for (i = 0; i < dd->num_pports; i++)
+ pppd[i] = NULL;
+
+ for (i = 0; i < cnt; i++) {
+ int which;
+ if (!test_bit(i, mask))
+ continue;
+ /*
+ * If the buffer is owned by the DMA hardware,
+ * reset the DMA engine.
+ */
+ ppd = is_sdma_buf(dd, i);
+ if (ppd) {
+ pppd[ppd->port] = ppd;
+ continue;
+ }
+ /*
+ * If the kernel is writing the buffer or the buffer is
+ * owned by a user process, we can't clear it yet.
+ */
+ spin_lock_irqsave(&dd->pioavail_lock, flags);
+ if (test_bit(i, dd->pio_writing) ||
+ (!test_bit(i << 1, dd->pioavailkernel) &&
+ find_ctxt(dd, i))) {
+ __set_bit(i, dd->pio_need_disarm);
+ which = 0;
+ } else {
+ which = 1;
+ dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
+ }
+ spin_unlock_irqrestore(&dd->pioavail_lock, flags);
+ }
+
+ /* do cancel_sends once per port that had sdma piobufs in error */
+ for (i = 0; i < dd->num_pports; i++)
+ if (pppd[i])
+ qib_cancel_sends(pppd[i]);
+}
+
+/**
+ * update_send_bufs - update shadow copy of the PIO availability map
+ * @dd: the qlogic_ib device
+ *
+ * called whenever our local copy indicates we have run out of send buffers
+ */
+static void update_send_bufs(struct qib_devdata *dd)
+{
+ unsigned long flags;
+ unsigned i;
+ const unsigned piobregs = dd->pioavregs;
+
+ /*
+ * If the generation (check) bits have changed, then we update the
+ * busy bit for the corresponding PIO buffer. This algorithm will
+ * modify positions to the value they already have in some cases
+ * (i.e., no change), but it's faster than changing only the bits
+ * that have changed.
+ *
+ * We would like to do this atomicly, to avoid spinlocks in the
+ * critical send path, but that's not really possible, given the
+ * type of changes, and that this routine could be called on
+ * multiple cpu's simultaneously, so we lock in this routine only,
+ * to avoid conflicting updates; all we change is the shadow, and
+ * it's a single 64 bit memory location, so by definition the update
+ * is atomic in terms of what other cpu's can see in testing the
+ * bits. The spin_lock overhead isn't too bad, since it only
+ * happens when all buffers are in use, so only cpu overhead, not
+ * latency or bandwidth is affected.
+ */
+ if (!dd->pioavailregs_dma)
+ return;
+ spin_lock_irqsave(&dd->pioavail_lock, flags);
+ for (i = 0; i < piobregs; i++) {
+ u64 pchbusy, pchg, piov, pnew;
+
+ piov = le64_to_cpu(dd->pioavailregs_dma[i]);
+ pchg = dd->pioavailkernel[i] &
+ ~(dd->pioavailshadow[i] ^ piov);
+ pchbusy = pchg << QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT;
+ if (pchg && (pchbusy & dd->pioavailshadow[i])) {
+ pnew = dd->pioavailshadow[i] & ~pchbusy;
+ pnew |= piov & pchbusy;
+ dd->pioavailshadow[i] = pnew;
+ }
+ }
+ spin_unlock_irqrestore(&dd->pioavail_lock, flags);
+}
+
+/*
+ * Debugging code and stats updates if no pio buffers available.
+ */
+static noinline void no_send_bufs(struct qib_devdata *dd)
+{
+ dd->upd_pio_shadow = 1;
+
+ /* not atomic, but if we lose a stat count in a while, that's OK */
+ qib_stats.sps_nopiobufs++;
+}
+
+/*
+ * Common code for normal driver send buffer allocation, and reserved
+ * allocation.
+ *
+ * Do appropriate marking as busy, etc.
+ * Returns buffer pointer if one is found, otherwise NULL.
+ */
+u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum,
+ u32 first, u32 last)
+{
+ unsigned i, j, updated = 0;
+ unsigned nbufs;
+ unsigned long flags;
+ unsigned long *shadow = dd->pioavailshadow;
+ u32 __iomem *buf;
+
+ if (!(dd->flags & QIB_PRESENT))
+ return NULL;
+
+ nbufs = last - first + 1; /* number in range to check */
+ if (dd->upd_pio_shadow) {
+ /*
+ * Minor optimization. If we had no buffers on last call,
+ * start out by doing the update; continue and do scan even
+ * if no buffers were updated, to be paranoid.
+ */
+ update_send_bufs(dd);
+ updated++;
+ }
+ i = first;
+rescan:
+ /*
+ * While test_and_set_bit() is atomic, we do that and then the
+ * change_bit(), and the pair is not. See if this is the cause
+ * of the remaining armlaunch errors.
+ */
+ spin_lock_irqsave(&dd->pioavail_lock, flags);
+ for (j = 0; j < nbufs; j++, i++) {
+ if (i > last)
+ i = first;
+ if (__test_and_set_bit((2 * i) + 1, shadow))
+ continue;
+ /* flip generation bit */
+ __change_bit(2 * i, shadow);
+ /* remember that the buffer can be written to now */
+ __set_bit(i, dd->pio_writing);
+ break;
+ }
+ spin_unlock_irqrestore(&dd->pioavail_lock, flags);
+
+ if (j == nbufs) {
+ if (!updated) {
+ /*
+ * First time through; shadow exhausted, but may be
+ * buffers available, try an update and then rescan.
+ */
+ update_send_bufs(dd);
+ updated++;
+ i = first;
+ goto rescan;
+ }
+ no_send_bufs(dd);
+ buf = NULL;
+ } else {
+ if (i < dd->piobcnt2k)
+ buf = (u32 __iomem *)(dd->pio2kbase +
+ i * dd->palign);
+ else
+ buf = (u32 __iomem *)(dd->pio4kbase +
+ (i - dd->piobcnt2k) * dd->align4k);
+ if (pbufnum)
+ *pbufnum = i;
+ dd->upd_pio_shadow = 0;
+ }
+
+ return buf;
+}
+
+/*
+ * Record that the caller is finished writing to the buffer so we don't
+ * disarm it while it is being written and disarm it now if needed.
+ */
+void qib_sendbuf_done(struct qib_devdata *dd, unsigned n)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->pioavail_lock, flags);
+ __clear_bit(n, dd->pio_writing);
+ if (__test_and_clear_bit(n, dd->pio_need_disarm))
+ dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n));
+ spin_unlock_irqrestore(&dd->pioavail_lock, flags);
+}
+
+/**
+ * qib_chg_pioavailkernel - change which send buffers are available for kernel
+ * @dd: the qlogic_ib device
+ * @start: the starting send buffer number
+ * @len: the number of send buffers
+ * @avail: true if the buffers are available for kernel use, false otherwise
+ */
+void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start,
+ unsigned len, u32 avail, struct qib_ctxtdata *rcd)
+{
+ unsigned long flags;
+ unsigned end;
+ unsigned ostart = start;
+
+ /* There are two bits per send buffer (busy and generation) */
+ start *= 2;
+ end = start + len * 2;
+
+ spin_lock_irqsave(&dd->pioavail_lock, flags);
+ /* Set or clear the busy bit in the shadow. */
+ while (start < end) {
+ if (avail) {
+ unsigned long dma;
+ int i;
+
+ /*
+ * The BUSY bit will never be set, because we disarm
+ * the user buffers before we hand them back to the
+ * kernel. We do have to make sure the generation
+ * bit is set correctly in shadow, since it could
+ * have changed many times while allocated to user.
+ * We can't use the bitmap functions on the full
+ * dma array because it is always little-endian, so
+ * we have to flip to host-order first.
+ * BITS_PER_LONG is slightly wrong, since it's
+ * always 64 bits per register in chip...
+ * We only work on 64 bit kernels, so that's OK.
+ */
+ i = start / BITS_PER_LONG;
+ __clear_bit(QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT + start,
+ dd->pioavailshadow);
+ dma = (unsigned long)
+ le64_to_cpu(dd->pioavailregs_dma[i]);
+ if (test_bit((QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
+ start) % BITS_PER_LONG, &dma))
+ __set_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
+ start, dd->pioavailshadow);
+ else
+ __clear_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT
+ + start, dd->pioavailshadow);
+ __set_bit(start, dd->pioavailkernel);
+ } else {
+ __set_bit(start + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT,
+ dd->pioavailshadow);
+ __clear_bit(start, dd->pioavailkernel);
+ }
+ start += 2;
+ }
+
+ spin_unlock_irqrestore(&dd->pioavail_lock, flags);
+
+ dd->f_txchk_change(dd, ostart, len, avail, rcd);
+}
+
+/*
+ * Flush all sends that might be in the ready to send state, as well as any
+ * that are in the process of being sent. Used whenever we need to be
+ * sure the send side is idle. Cleans up all buffer state by canceling
+ * all pio buffers, and issuing an abort, which cleans up anything in the
+ * launch fifo. The cancel is superfluous on some chip versions, but
+ * it's safer to always do it.
+ * PIOAvail bits are updated by the chip as if a normal send had happened.
+ */
+void qib_cancel_sends(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ struct qib_ctxtdata *rcd;
+ unsigned long flags;
+ unsigned ctxt;
+ unsigned i;
+ unsigned last;
+
+ /*
+ * Tell PSM to disarm buffers again before trying to reuse them.
+ * We need to be sure the rcd doesn't change out from under us
+ * while we do so. We hold the two locks sequentially. We might
+ * needlessly set some need_disarm bits as a result, if the
+ * context is closed after we release the uctxt_lock, but that's
+ * fairly benign, and safer than nesting the locks.
+ */
+ for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ rcd = dd->rcd[ctxt];
+ if (rcd && rcd->ppd == ppd) {
+ last = rcd->pio_base + rcd->piocnt;
+ if (rcd->user_event_mask) {
+ /*
+ * subctxt_cnt is 0 if not shared, so do base
+ * separately, first, then remaining subctxt,
+ * if any
+ */
+ set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
+ &rcd->user_event_mask[0]);
+ for (i = 1; i < rcd->subctxt_cnt; i++)
+ set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
+ &rcd->user_event_mask[i]);
+ }
+ i = rcd->pio_base;
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+ spin_lock_irqsave(&dd->pioavail_lock, flags);
+ for (; i < last; i++)
+ __set_bit(i, dd->pio_need_disarm);
+ spin_unlock_irqrestore(&dd->pioavail_lock, flags);
+ } else
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+ }
+
+ if (!(dd->flags & QIB_HAS_SEND_DMA))
+ dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_ALL |
+ QIB_SENDCTRL_FLUSH);
+}
+
+/*
+ * Force an update of in-memory copy of the pioavail registers, when
+ * needed for any of a variety of reasons.
+ * If already off, this routine is a nop, on the assumption that the
+ * caller (or set of callers) will "do the right thing".
+ * This is a per-device operation, so just the first port.
+ */
+void qib_force_pio_avail_update(struct qib_devdata *dd)
+{
+ dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+}
+
+void qib_hol_down(struct qib_pportdata *ppd)
+{
+ /*
+ * Cancel sends when the link goes DOWN so that we aren't doing it
+ * at INIT when we might be trying to send SMI packets.
+ */
+ if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
+ qib_cancel_sends(ppd);
+}
+
+/*
+ * Link is at INIT.
+ * We start the HoL timer so we can detect stuck packets blocking SMP replies.
+ * Timer may already be running, so use mod_timer, not add_timer.
+ */
+void qib_hol_init(struct qib_pportdata *ppd)
+{
+ if (ppd->hol_state != QIB_HOL_INIT) {
+ ppd->hol_state = QIB_HOL_INIT;
+ mod_timer(&ppd->hol_timer,
+ jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
+ }
+}
+
+/*
+ * Link is up, continue any user processes, and ensure timer
+ * is a nop, if running. Let timer keep running, if set; it
+ * will nop when it sees the link is up.
+ */
+void qib_hol_up(struct qib_pportdata *ppd)
+{
+ ppd->hol_state = QIB_HOL_UP;
+}
+
+/*
+ * This is only called via the timer.
+ */
+void qib_hol_event(unsigned long opaque)
+{
+ struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+
+ /* If hardware error, etc, skip. */
+ if (!(ppd->dd->flags & QIB_INITTED))
+ return;
+
+ if (ppd->hol_state != QIB_HOL_UP) {
+ /*
+ * Try to flush sends in case a stuck packet is blocking
+ * SMP replies.
+ */
+ qib_hol_down(ppd);
+ mod_timer(&ppd->hol_timer,
+ jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
+ }
+}
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
new file mode 100644
index 00000000000..6c7fe78cca6
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -0,0 +1,555 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "qib.h"
+
+/* cut down ridiculously long IB macro names */
+#define OP(x) IB_OPCODE_UC_##x
+
+/**
+ * qib_make_uc_req - construct a request packet (SEND, RDMA write)
+ * @qp: a pointer to the QP
+ *
+ * Return 1 if constructed; otherwise, return 0.
+ */
+int qib_make_uc_req(struct qib_qp *qp)
+{
+ struct qib_other_headers *ohdr;
+ struct qib_swqe *wqe;
+ unsigned long flags;
+ u32 hwords;
+ u32 bth0;
+ u32 len;
+ u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+ int ret = 0;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
+ if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
+ goto bail;
+ /* We are in the error state, flush the work request. */
+ if (qp->s_last == qp->s_head)
+ goto bail;
+ /* If DMAs are in progress, we can't flush immediately. */
+ if (atomic_read(&qp->s_dma_busy)) {
+ qp->s_flags |= QIB_S_WAIT_DMA;
+ goto bail;
+ }
+ wqe = get_swqe_ptr(qp, qp->s_last);
+ qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ goto done;
+ }
+
+ ohdr = &qp->s_hdr.u.oth;
+ if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
+ ohdr = &qp->s_hdr.u.l.oth;
+
+ /* header size in 32-bit words LRH+BTH = (8+12)/4. */
+ hwords = 5;
+ bth0 = 0;
+
+ /* Get the next send request. */
+ wqe = get_swqe_ptr(qp, qp->s_cur);
+ qp->s_wqe = NULL;
+ switch (qp->s_state) {
+ default:
+ if (!(ib_qib_state_ops[qp->state] &
+ QIB_PROCESS_NEXT_SEND_OK))
+ goto bail;
+ /* Check if send work queue is empty. */
+ if (qp->s_cur == qp->s_head)
+ goto bail;
+ /*
+ * Start a new request.
+ */
+ wqe->psn = qp->s_next_psn;
+ qp->s_psn = qp->s_next_psn;
+ qp->s_sge.sge = wqe->sg_list[0];
+ qp->s_sge.sg_list = wqe->sg_list + 1;
+ qp->s_sge.num_sge = wqe->wr.num_sge;
+ qp->s_sge.total_len = wqe->length;
+ len = wqe->length;
+ qp->s_len = len;
+ switch (wqe->wr.opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ if (len > pmtu) {
+ qp->s_state = OP(SEND_FIRST);
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_SEND)
+ qp->s_state = OP(SEND_ONLY);
+ else {
+ qp->s_state =
+ OP(SEND_ONLY_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ }
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ qp->s_wqe = wqe;
+ if (++qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ ohdr->u.rc.reth.vaddr =
+ cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
+ ohdr->u.rc.reth.rkey =
+ cpu_to_be32(wqe->wr.wr.rdma.rkey);
+ ohdr->u.rc.reth.length = cpu_to_be32(len);
+ hwords += sizeof(struct ib_reth) / 4;
+ if (len > pmtu) {
+ qp->s_state = OP(RDMA_WRITE_FIRST);
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+ qp->s_state = OP(RDMA_WRITE_ONLY);
+ else {
+ qp->s_state =
+ OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
+ /* Immediate data comes after the RETH */
+ ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ }
+ qp->s_wqe = wqe;
+ if (++qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ default:
+ goto bail;
+ }
+ break;
+
+ case OP(SEND_FIRST):
+ qp->s_state = OP(SEND_MIDDLE);
+ /* FALLTHROUGH */
+ case OP(SEND_MIDDLE):
+ len = qp->s_len;
+ if (len > pmtu) {
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_SEND)
+ qp->s_state = OP(SEND_LAST);
+ else {
+ qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ }
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ qp->s_wqe = wqe;
+ if (++qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case OP(RDMA_WRITE_FIRST):
+ qp->s_state = OP(RDMA_WRITE_MIDDLE);
+ /* FALLTHROUGH */
+ case OP(RDMA_WRITE_MIDDLE):
+ len = qp->s_len;
+ if (len > pmtu) {
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+ qp->s_state = OP(RDMA_WRITE_LAST);
+ else {
+ qp->s_state =
+ OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ }
+ qp->s_wqe = wqe;
+ if (++qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ break;
+ }
+ qp->s_len -= len;
+ qp->s_hdrwords = hwords;
+ qp->s_cur_sge = &qp->s_sge;
+ qp->s_cur_size = len;
+ qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
+ qp->s_next_psn++ & QIB_PSN_MASK);
+done:
+ ret = 1;
+ goto unlock;
+
+bail:
+ qp->s_flags &= ~QIB_S_BUSY;
+unlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return ret;
+}
+
+/**
+ * qib_uc_rcv - handle an incoming UC packet
+ * @ibp: the port the packet came in on
+ * @hdr: the header of the packet
+ * @has_grh: true if the packet has a GRH
+ * @data: the packet data
+ * @tlen: the length of the packet
+ * @qp: the QP for this packet.
+ *
+ * This is called from qib_qp_rcv() to process an incoming UC packet
+ * for the given QP.
+ * Called at interrupt level.
+ */
+void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+ int has_grh, void *data, u32 tlen, struct qib_qp *qp)
+{
+ struct qib_other_headers *ohdr;
+ unsigned long flags;
+ u32 opcode;
+ u32 hdrsize;
+ u32 psn;
+ u32 pad;
+ struct ib_wc wc;
+ u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+ struct ib_reth *reth;
+ int ret;
+
+ /* Check for GRH */
+ if (!has_grh) {
+ ohdr = &hdr->u.oth;
+ hdrsize = 8 + 12; /* LRH + BTH */
+ } else {
+ ohdr = &hdr->u.l.oth;
+ hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
+ }
+
+ opcode = be32_to_cpu(ohdr->bth[0]);
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
+ goto sunlock;
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ psn = be32_to_cpu(ohdr->bth[2]);
+ opcode >>= 24;
+ memset(&wc, 0, sizeof wc);
+
+ /* Prevent simultaneous processing after APM on different CPUs */
+ spin_lock(&qp->r_lock);
+
+ /* Compare the PSN verses the expected PSN. */
+ if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) {
+ /*
+ * Handle a sequence error.
+ * Silently drop any current message.
+ */
+ qp->r_psn = psn;
+inv:
+ if (qp->r_state == OP(SEND_FIRST) ||
+ qp->r_state == OP(SEND_MIDDLE)) {
+ set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
+ qp->r_sge.num_sge = 0;
+ } else
+ while (qp->r_sge.num_sge) {
+ atomic_dec(&qp->r_sge.sge.mr->refcount);
+ if (--qp->r_sge.num_sge)
+ qp->r_sge.sge = *qp->r_sge.sg_list++;
+ }
+ qp->r_state = OP(SEND_LAST);
+ switch (opcode) {
+ case OP(SEND_FIRST):
+ case OP(SEND_ONLY):
+ case OP(SEND_ONLY_WITH_IMMEDIATE):
+ goto send_first;
+
+ case OP(RDMA_WRITE_FIRST):
+ case OP(RDMA_WRITE_ONLY):
+ case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
+ goto rdma_first;
+
+ default:
+ goto drop;
+ }
+ }
+
+ /* Check for opcode sequence errors. */
+ switch (qp->r_state) {
+ case OP(SEND_FIRST):
+ case OP(SEND_MIDDLE):
+ if (opcode == OP(SEND_MIDDLE) ||
+ opcode == OP(SEND_LAST) ||
+ opcode == OP(SEND_LAST_WITH_IMMEDIATE))
+ break;
+ goto inv;
+
+ case OP(RDMA_WRITE_FIRST):
+ case OP(RDMA_WRITE_MIDDLE):
+ if (opcode == OP(RDMA_WRITE_MIDDLE) ||
+ opcode == OP(RDMA_WRITE_LAST) ||
+ opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
+ break;
+ goto inv;
+
+ default:
+ if (opcode == OP(SEND_FIRST) ||
+ opcode == OP(SEND_ONLY) ||
+ opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
+ opcode == OP(RDMA_WRITE_FIRST) ||
+ opcode == OP(RDMA_WRITE_ONLY) ||
+ opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
+ break;
+ goto inv;
+ }
+
+ if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
+ qp->r_flags |= QIB_R_COMM_EST;
+ if (qp->ibqp.event_handler) {
+ struct ib_event ev;
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_COMM_EST;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+ }
+
+ /* OK, process the packet. */
+ switch (opcode) {
+ case OP(SEND_FIRST):
+ case OP(SEND_ONLY):
+ case OP(SEND_ONLY_WITH_IMMEDIATE):
+send_first:
+ if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
+ qp->r_sge = qp->s_rdma_read_sge;
+ else {
+ ret = qib_get_rwqe(qp, 0);
+ if (ret < 0)
+ goto op_err;
+ if (!ret)
+ goto drop;
+ /*
+ * qp->s_rdma_read_sge will be the owner
+ * of the mr references.
+ */
+ qp->s_rdma_read_sge = qp->r_sge;
+ }
+ qp->r_rcv_len = 0;
+ if (opcode == OP(SEND_ONLY))
+ goto send_last;
+ else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
+ goto send_last_imm;
+ /* FALLTHROUGH */
+ case OP(SEND_MIDDLE):
+ /* Check for invalid length PMTU or posted rwqe len. */
+ if (unlikely(tlen != (hdrsize + pmtu + 4)))
+ goto rewind;
+ qp->r_rcv_len += pmtu;
+ if (unlikely(qp->r_rcv_len > qp->r_len))
+ goto rewind;
+ qib_copy_sge(&qp->r_sge, data, pmtu, 0);
+ break;
+
+ case OP(SEND_LAST_WITH_IMMEDIATE):
+send_last_imm:
+ wc.ex.imm_data = ohdr->u.imm_data;
+ hdrsize += 4;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ /* FALLTHROUGH */
+ case OP(SEND_LAST):
+send_last:
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ /* Check for invalid length. */
+ /* XXX LAST len should be >= 1 */
+ if (unlikely(tlen < (hdrsize + pad + 4)))
+ goto rewind;
+ /* Don't count the CRC. */
+ tlen -= (hdrsize + pad + 4);
+ wc.byte_len = tlen + qp->r_rcv_len;
+ if (unlikely(wc.byte_len > qp->r_len))
+ goto rewind;
+ wc.opcode = IB_WC_RECV;
+last_imm:
+ qib_copy_sge(&qp->r_sge, data, tlen, 0);
+ while (qp->s_rdma_read_sge.num_sge) {
+ atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
+ if (--qp->s_rdma_read_sge.num_sge)
+ qp->s_rdma_read_sge.sge =
+ *qp->s_rdma_read_sge.sg_list++;
+ }
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.qp = &qp->ibqp;
+ wc.src_qp = qp->remote_qpn;
+ wc.slid = qp->remote_ah_attr.dlid;
+ wc.sl = qp->remote_ah_attr.sl;
+ /* Signal completion event if the solicited bit is set. */
+ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ (ohdr->bth[0] &
+ cpu_to_be32(IB_BTH_SOLICITED)) != 0);
+ break;
+
+ case OP(RDMA_WRITE_FIRST):
+ case OP(RDMA_WRITE_ONLY):
+ case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
+rdma_first:
+ if (unlikely(!(qp->qp_access_flags &
+ IB_ACCESS_REMOTE_WRITE))) {
+ goto drop;
+ }
+ reth = &ohdr->u.rc.reth;
+ hdrsize += sizeof(*reth);
+ qp->r_len = be32_to_cpu(reth->length);
+ qp->r_rcv_len = 0;
+ qp->r_sge.sg_list = NULL;
+ if (qp->r_len != 0) {
+ u32 rkey = be32_to_cpu(reth->rkey);
+ u64 vaddr = be64_to_cpu(reth->vaddr);
+ int ok;
+
+ /* Check rkey */
+ ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
+ vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
+ if (unlikely(!ok))
+ goto drop;
+ qp->r_sge.num_sge = 1;
+ } else {
+ qp->r_sge.num_sge = 0;
+ qp->r_sge.sge.mr = NULL;
+ qp->r_sge.sge.vaddr = NULL;
+ qp->r_sge.sge.length = 0;
+ qp->r_sge.sge.sge_length = 0;
+ }
+ if (opcode == OP(RDMA_WRITE_ONLY))
+ goto rdma_last;
+ else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
+ goto rdma_last_imm;
+ /* FALLTHROUGH */
+ case OP(RDMA_WRITE_MIDDLE):
+ /* Check for invalid length PMTU or posted rwqe len. */
+ if (unlikely(tlen != (hdrsize + pmtu + 4)))
+ goto drop;
+ qp->r_rcv_len += pmtu;
+ if (unlikely(qp->r_rcv_len > qp->r_len))
+ goto drop;
+ qib_copy_sge(&qp->r_sge, data, pmtu, 1);
+ break;
+
+ case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
+rdma_last_imm:
+ wc.ex.imm_data = ohdr->u.imm_data;
+ hdrsize += 4;
+ wc.wc_flags = IB_WC_WITH_IMM;
+
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ /* Check for invalid length. */
+ /* XXX LAST len should be >= 1 */
+ if (unlikely(tlen < (hdrsize + pad + 4)))
+ goto drop;
+ /* Don't count the CRC. */
+ tlen -= (hdrsize + pad + 4);
+ if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
+ goto drop;
+ if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
+ while (qp->s_rdma_read_sge.num_sge) {
+ atomic_dec(&qp->s_rdma_read_sge.sge.mr->
+ refcount);
+ if (--qp->s_rdma_read_sge.num_sge)
+ qp->s_rdma_read_sge.sge =
+ *qp->s_rdma_read_sge.sg_list++;
+ }
+ else {
+ ret = qib_get_rwqe(qp, 1);
+ if (ret < 0)
+ goto op_err;
+ if (!ret)
+ goto drop;
+ }
+ wc.byte_len = qp->r_len;
+ wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ goto last_imm;
+
+ case OP(RDMA_WRITE_LAST):
+rdma_last:
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ /* Check for invalid length. */
+ /* XXX LAST len should be >= 1 */
+ if (unlikely(tlen < (hdrsize + pad + 4)))
+ goto drop;
+ /* Don't count the CRC. */
+ tlen -= (hdrsize + pad + 4);
+ if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
+ goto drop;
+ qib_copy_sge(&qp->r_sge, data, tlen, 1);
+ while (qp->r_sge.num_sge) {
+ atomic_dec(&qp->r_sge.sge.mr->refcount);
+ if (--qp->r_sge.num_sge)
+ qp->r_sge.sge = *qp->r_sge.sg_list++;
+ }
+ break;
+
+ default:
+ /* Drop packet for unknown opcodes. */
+ goto drop;
+ }
+ qp->r_psn++;
+ qp->r_state = opcode;
+ spin_unlock(&qp->r_lock);
+ return;
+
+rewind:
+ set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
+ qp->r_sge.num_sge = 0;
+drop:
+ ibp->n_pkt_drops++;
+ spin_unlock(&qp->r_lock);
+ return;
+
+op_err:
+ qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ spin_unlock(&qp->r_lock);
+ return;
+
+sunlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+}
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
new file mode 100644
index 00000000000..c838cda7334
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -0,0 +1,607 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_smi.h>
+
+#include "qib.h"
+#include "qib_mad.h"
+
+/**
+ * qib_ud_loopback - handle send on loopback QPs
+ * @sqp: the sending QP
+ * @swqe: the send work request
+ *
+ * This is called from qib_make_ud_req() to forward a WQE addressed
+ * to the same HCA.
+ * Note that the receive interrupt handler may be calling qib_ud_rcv()
+ * while this is being called.
+ */
+static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
+{
+ struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
+ struct qib_pportdata *ppd;
+ struct qib_qp *qp;
+ struct ib_ah_attr *ah_attr;
+ unsigned long flags;
+ struct qib_sge_state ssge;
+ struct qib_sge *sge;
+ struct ib_wc wc;
+ u32 length;
+
+ qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn);
+ if (!qp) {
+ ibp->n_pkt_drops++;
+ return;
+ }
+ if (qp->ibqp.qp_type != sqp->ibqp.qp_type ||
+ !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
+ ibp->n_pkt_drops++;
+ goto drop;
+ }
+
+ ah_attr = &to_iah(swqe->wr.wr.ud.ah)->attr;
+ ppd = ppd_from_ibp(ibp);
+
+ if (qp->ibqp.qp_num > 1) {
+ u16 pkey1;
+ u16 pkey2;
+ u16 lid;
+
+ pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index);
+ pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
+ if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
+ lid = ppd->lid | (ah_attr->src_path_bits &
+ ((1 << ppd->lmc) - 1));
+ qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, pkey1,
+ ah_attr->sl,
+ sqp->ibqp.qp_num, qp->ibqp.qp_num,
+ cpu_to_be16(lid),
+ cpu_to_be16(ah_attr->dlid));
+ goto drop;
+ }
+ }
+
+ /*
+ * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
+ * Qkeys with the high order bit set mean use the
+ * qkey from the QP context instead of the WR (see 10.2.5).
+ */
+ if (qp->ibqp.qp_num) {
+ u32 qkey;
+
+ qkey = (int)swqe->wr.wr.ud.remote_qkey < 0 ?
+ sqp->qkey : swqe->wr.wr.ud.remote_qkey;
+ if (unlikely(qkey != qp->qkey)) {
+ u16 lid;
+
+ lid = ppd->lid | (ah_attr->src_path_bits &
+ ((1 << ppd->lmc) - 1));
+ qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
+ ah_attr->sl,
+ sqp->ibqp.qp_num, qp->ibqp.qp_num,
+ cpu_to_be16(lid),
+ cpu_to_be16(ah_attr->dlid));
+ goto drop;
+ }
+ }
+
+ /*
+ * A GRH is expected to preceed the data even if not
+ * present on the wire.
+ */
+ length = swqe->length;
+ memset(&wc, 0, sizeof wc);
+ wc.byte_len = length + sizeof(struct ib_grh);
+
+ if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
+ wc.wc_flags = IB_WC_WITH_IMM;
+ wc.ex.imm_data = swqe->wr.ex.imm_data;
+ }
+
+ spin_lock_irqsave(&qp->r_lock, flags);
+
+ /*
+ * Get the next work request entry to find where to put the data.
+ */
+ if (qp->r_flags & QIB_R_REUSE_SGE)
+ qp->r_flags &= ~QIB_R_REUSE_SGE;
+ else {
+ int ret;
+
+ ret = qib_get_rwqe(qp, 0);
+ if (ret < 0) {
+ qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ goto bail_unlock;
+ }
+ if (!ret) {
+ if (qp->ibqp.qp_num == 0)
+ ibp->n_vl15_dropped++;
+ goto bail_unlock;
+ }
+ }
+ /* Silently drop packets which are too big. */
+ if (unlikely(wc.byte_len > qp->r_len)) {
+ qp->r_flags |= QIB_R_REUSE_SGE;
+ ibp->n_pkt_drops++;
+ goto bail_unlock;
+ }
+
+ if (ah_attr->ah_flags & IB_AH_GRH) {
+ qib_copy_sge(&qp->r_sge, &ah_attr->grh,
+ sizeof(struct ib_grh), 1);
+ wc.wc_flags |= IB_WC_GRH;
+ } else
+ qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
+ ssge.sg_list = swqe->sg_list + 1;
+ ssge.sge = *swqe->sg_list;
+ ssge.num_sge = swqe->wr.num_sge;
+ sge = &ssge.sge;
+ while (length) {
+ u32 len = sge->length;
+
+ if (len > length)
+ len = length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
+ BUG_ON(len == 0);
+ qib_copy_sge(&qp->r_sge, sge->vaddr, len, 1);
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (--ssge.num_sge)
+ *sge = *ssge.sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+ length -= len;
+ }
+ while (qp->r_sge.num_sge) {
+ atomic_dec(&qp->r_sge.sge.mr->refcount);
+ if (--qp->r_sge.num_sge)
+ qp->r_sge.sge = *qp->r_sge.sg_list++;
+ }
+ if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
+ goto bail_unlock;
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.opcode = IB_WC_RECV;
+ wc.qp = &qp->ibqp;
+ wc.src_qp = sqp->ibqp.qp_num;
+ wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
+ swqe->wr.wr.ud.pkey_index : 0;
+ wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1));
+ wc.sl = ah_attr->sl;
+ wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1);
+ wc.port_num = qp->port_num;
+ /* Signal completion event if the solicited bit is set. */
+ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ swqe->wr.send_flags & IB_SEND_SOLICITED);
+ ibp->n_loop_pkts++;
+bail_unlock:
+ spin_unlock_irqrestore(&qp->r_lock, flags);
+drop:
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+}
+
+/**
+ * qib_make_ud_req - construct a UD request packet
+ * @qp: the QP
+ *
+ * Return 1 if constructed; otherwise, return 0.
+ */
+int qib_make_ud_req(struct qib_qp *qp)
+{
+ struct qib_other_headers *ohdr;
+ struct ib_ah_attr *ah_attr;
+ struct qib_pportdata *ppd;
+ struct qib_ibport *ibp;
+ struct qib_swqe *wqe;
+ unsigned long flags;
+ u32 nwords;
+ u32 extra_bytes;
+ u32 bth0;
+ u16 lrh0;
+ u16 lid;
+ int ret = 0;
+ int next_cur;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
+ if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
+ goto bail;
+ /* We are in the error state, flush the work request. */
+ if (qp->s_last == qp->s_head)
+ goto bail;
+ /* If DMAs are in progress, we can't flush immediately. */
+ if (atomic_read(&qp->s_dma_busy)) {
+ qp->s_flags |= QIB_S_WAIT_DMA;
+ goto bail;
+ }
+ wqe = get_swqe_ptr(qp, qp->s_last);
+ qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ goto done;
+ }
+
+ if (qp->s_cur == qp->s_head)
+ goto bail;
+
+ wqe = get_swqe_ptr(qp, qp->s_cur);
+ next_cur = qp->s_cur + 1;
+ if (next_cur >= qp->s_size)
+ next_cur = 0;
+
+ /* Construct the header. */
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+ ppd = ppd_from_ibp(ibp);
+ ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr;
+ if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE) {
+ if (ah_attr->dlid != QIB_PERMISSIVE_LID)
+ ibp->n_multicast_xmit++;
+ else
+ ibp->n_unicast_xmit++;
+ } else {
+ ibp->n_unicast_xmit++;
+ lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
+ if (unlikely(lid == ppd->lid)) {
+ /*
+ * If DMAs are in progress, we can't generate
+ * a completion for the loopback packet since
+ * it would be out of order.
+ * XXX Instead of waiting, we could queue a
+ * zero length descriptor so we get a callback.
+ */
+ if (atomic_read(&qp->s_dma_busy)) {
+ qp->s_flags |= QIB_S_WAIT_DMA;
+ goto bail;
+ }
+ qp->s_cur = next_cur;
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ qib_ud_loopback(qp, wqe);
+ spin_lock_irqsave(&qp->s_lock, flags);
+ qib_send_complete(qp, wqe, IB_WC_SUCCESS);
+ goto done;
+ }
+ }
+
+ qp->s_cur = next_cur;
+ extra_bytes = -wqe->length & 3;
+ nwords = (wqe->length + extra_bytes) >> 2;
+
+ /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
+ qp->s_hdrwords = 7;
+ qp->s_cur_size = wqe->length;
+ qp->s_cur_sge = &qp->s_sge;
+ qp->s_srate = ah_attr->static_rate;
+ qp->s_wqe = wqe;
+ qp->s_sge.sge = wqe->sg_list[0];
+ qp->s_sge.sg_list = wqe->sg_list + 1;
+ qp->s_sge.num_sge = wqe->wr.num_sge;
+ qp->s_sge.total_len = wqe->length;
+
+ if (ah_attr->ah_flags & IB_AH_GRH) {
+ /* Header size in 32-bit words. */
+ qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh,
+ &ah_attr->grh,
+ qp->s_hdrwords, nwords);
+ lrh0 = QIB_LRH_GRH;
+ ohdr = &qp->s_hdr.u.l.oth;
+ /*
+ * Don't worry about sending to locally attached multicast
+ * QPs. It is unspecified by the spec. what happens.
+ */
+ } else {
+ /* Header size in 32-bit words. */
+ lrh0 = QIB_LRH_BTH;
+ ohdr = &qp->s_hdr.u.oth;
+ }
+ if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
+ qp->s_hdrwords++;
+ ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
+ bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
+ } else
+ bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
+ lrh0 |= ah_attr->sl << 4;
+ if (qp->ibqp.qp_type == IB_QPT_SMI)
+ lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
+ else
+ lrh0 |= ibp->sl_to_vl[ah_attr->sl] << 12;
+ qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
+ qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
+ qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
+ lid = ppd->lid;
+ if (lid) {
+ lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1);
+ qp->s_hdr.lrh[3] = cpu_to_be16(lid);
+ } else
+ qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE;
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ bth0 |= extra_bytes << 20;
+ bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY :
+ qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ?
+ wqe->wr.wr.ud.pkey_index : qp->s_pkey_index);
+ ohdr->bth[0] = cpu_to_be32(bth0);
+ /*
+ * Use the multicast QP if the destination LID is a multicast LID.
+ */
+ ohdr->bth[1] = ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
+ ah_attr->dlid != QIB_PERMISSIVE_LID ?
+ cpu_to_be32(QIB_MULTICAST_QPN) :
+ cpu_to_be32(wqe->wr.wr.ud.remote_qpn);
+ ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & QIB_PSN_MASK);
+ /*
+ * Qkeys with the high order bit set mean use the
+ * qkey from the QP context instead of the WR (see 10.2.5).
+ */
+ ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->wr.wr.ud.remote_qkey < 0 ?
+ qp->qkey : wqe->wr.wr.ud.remote_qkey);
+ ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
+
+done:
+ ret = 1;
+ goto unlock;
+
+bail:
+ qp->s_flags &= ~QIB_S_BUSY;
+unlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return ret;
+}
+
+static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey)
+{
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_devdata *dd = ppd->dd;
+ unsigned ctxt = ppd->hw_pidx;
+ unsigned i;
+
+ pkey &= 0x7fff; /* remove limited/full membership bit */
+
+ for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i)
+ if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey)
+ return i;
+
+ /*
+ * Should not get here, this means hardware failed to validate pkeys.
+ * Punt and return index 0.
+ */
+ return 0;
+}
+
+/**
+ * qib_ud_rcv - receive an incoming UD packet
+ * @ibp: the port the packet came in on
+ * @hdr: the packet header
+ * @has_grh: true if the packet has a GRH
+ * @data: the packet data
+ * @tlen: the packet length
+ * @qp: the QP the packet came on
+ *
+ * This is called from qib_qp_rcv() to process an incoming UD packet
+ * for the given QP.
+ * Called at interrupt level.
+ */
+void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+ int has_grh, void *data, u32 tlen, struct qib_qp *qp)
+{
+ struct qib_other_headers *ohdr;
+ int opcode;
+ u32 hdrsize;
+ u32 pad;
+ struct ib_wc wc;
+ u32 qkey;
+ u32 src_qp;
+ u16 dlid;
+
+ /* Check for GRH */
+ if (!has_grh) {
+ ohdr = &hdr->u.oth;
+ hdrsize = 8 + 12 + 8; /* LRH + BTH + DETH */
+ } else {
+ ohdr = &hdr->u.l.oth;
+ hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
+ }
+ qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
+ src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK;
+
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ if (unlikely(tlen < (hdrsize + pad + 4))) {
+ /* Drop incomplete packets. */
+ ibp->n_pkt_drops++;
+ goto bail;
+ }
+ tlen -= hdrsize + pad + 4;
+
+ /*
+ * Check that the permissive LID is only used on QP0
+ * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
+ */
+ if (qp->ibqp.qp_num) {
+ if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
+ hdr->lrh[3] == IB_LID_PERMISSIVE)) {
+ ibp->n_pkt_drops++;
+ goto bail;
+ }
+ if (qp->ibqp.qp_num > 1) {
+ u16 pkey1, pkey2;
+
+ pkey1 = be32_to_cpu(ohdr->bth[0]);
+ pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
+ if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
+ qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
+ pkey1,
+ (be16_to_cpu(hdr->lrh[0]) >> 4) &
+ 0xF,
+ src_qp, qp->ibqp.qp_num,
+ hdr->lrh[3], hdr->lrh[1]);
+ goto bail;
+ }
+ }
+ if (unlikely(qkey != qp->qkey)) {
+ qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
+ (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
+ src_qp, qp->ibqp.qp_num,
+ hdr->lrh[3], hdr->lrh[1]);
+ goto bail;
+ }
+ /* Drop invalid MAD packets (see 13.5.3.1). */
+ if (unlikely(qp->ibqp.qp_num == 1 &&
+ (tlen != 256 ||
+ (be16_to_cpu(hdr->lrh[0]) >> 12) == 15))) {
+ ibp->n_pkt_drops++;
+ goto bail;
+ }
+ } else {
+ struct ib_smp *smp;
+
+ /* Drop invalid MAD packets (see 13.5.3.1). */
+ if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15) {
+ ibp->n_pkt_drops++;
+ goto bail;
+ }
+ smp = (struct ib_smp *) data;
+ if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
+ hdr->lrh[3] == IB_LID_PERMISSIVE) &&
+ smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
+ ibp->n_pkt_drops++;
+ goto bail;
+ }
+ }
+
+ /*
+ * The opcode is in the low byte when its in network order
+ * (top byte when in host order).
+ */
+ opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
+ if (qp->ibqp.qp_num > 1 &&
+ opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
+ wc.ex.imm_data = ohdr->u.ud.imm_data;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ hdrsize += sizeof(u32);
+ } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
+ wc.ex.imm_data = 0;
+ wc.wc_flags = 0;
+ } else {
+ ibp->n_pkt_drops++;
+ goto bail;
+ }
+
+ /*
+ * A GRH is expected to preceed the data even if not
+ * present on the wire.
+ */
+ wc.byte_len = tlen + sizeof(struct ib_grh);
+
+ /*
+ * We need to serialize getting a receive work queue entry and
+ * generating a completion for it against QPs sending to this QP
+ * locally.
+ */
+ spin_lock(&qp->r_lock);
+
+ /*
+ * Get the next work request entry to find where to put the data.
+ */
+ if (qp->r_flags & QIB_R_REUSE_SGE)
+ qp->r_flags &= ~QIB_R_REUSE_SGE;
+ else {
+ int ret;
+
+ ret = qib_get_rwqe(qp, 0);
+ if (ret < 0) {
+ qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ goto bail_unlock;
+ }
+ if (!ret) {
+ if (qp->ibqp.qp_num == 0)
+ ibp->n_vl15_dropped++;
+ goto bail_unlock;
+ }
+ }
+ /* Silently drop packets which are too big. */
+ if (unlikely(wc.byte_len > qp->r_len)) {
+ qp->r_flags |= QIB_R_REUSE_SGE;
+ ibp->n_pkt_drops++;
+ goto bail_unlock;
+ }
+ if (has_grh) {
+ qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
+ sizeof(struct ib_grh), 1);
+ wc.wc_flags |= IB_WC_GRH;
+ } else
+ qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
+ qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
+ while (qp->r_sge.num_sge) {
+ atomic_dec(&qp->r_sge.sge.mr->refcount);
+ if (--qp->r_sge.num_sge)
+ qp->r_sge.sge = *qp->r_sge.sg_list++;
+ }
+ if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
+ goto bail_unlock;
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.opcode = IB_WC_RECV;
+ wc.vendor_err = 0;
+ wc.qp = &qp->ibqp;
+ wc.src_qp = src_qp;
+ wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
+ qib_lookup_pkey(ibp, be32_to_cpu(ohdr->bth[0])) : 0;
+ wc.slid = be16_to_cpu(hdr->lrh[3]);
+ wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
+ dlid = be16_to_cpu(hdr->lrh[1]);
+ /*
+ * Save the LMC lower bits if the destination LID is a unicast LID.
+ */
+ wc.dlid_path_bits = dlid >= QIB_MULTICAST_LID_BASE ? 0 :
+ dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
+ wc.port_num = qp->port_num;
+ /* Signal completion event if the solicited bit is set. */
+ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ (ohdr->bth[0] &
+ cpu_to_be32(IB_BTH_SOLICITED)) != 0);
+bail_unlock:
+ spin_unlock(&qp->r_lock);
+bail:;
+}
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
new file mode 100644
index 00000000000..d7a26c1d4f3
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mm.h>
+#include <linux/device.h>
+
+#include "qib.h"
+
+static void __qib_release_user_pages(struct page **p, size_t num_pages,
+ int dirty)
+{
+ size_t i;
+
+ for (i = 0; i < num_pages; i++) {
+ if (dirty)
+ set_page_dirty_lock(p[i]);
+ put_page(p[i]);
+ }
+}
+
+/*
+ * Call with current->mm->mmap_sem held.
+ */
+static int __get_user_pages(unsigned long start_page, size_t num_pages,
+ struct page **p, struct vm_area_struct **vma)
+{
+ unsigned long lock_limit;
+ size_t got;
+ int ret;
+
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+
+ if (num_pages > lock_limit && !capable(CAP_IPC_LOCK)) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ for (got = 0; got < num_pages; got += ret) {
+ ret = get_user_pages(current, current->mm,
+ start_page + got * PAGE_SIZE,
+ num_pages - got, 1, 1,
+ p + got, vma);
+ if (ret < 0)
+ goto bail_release;
+ }
+
+ current->mm->locked_vm += num_pages;
+
+ ret = 0;
+ goto bail;
+
+bail_release:
+ __qib_release_user_pages(p, got, 0);
+bail:
+ return ret;
+}
+
+/**
+ * qib_map_page - a safety wrapper around pci_map_page()
+ *
+ * A dma_addr of all 0's is interpreted by the chip as "disabled".
+ * Unfortunately, it can also be a valid dma_addr returned on some
+ * architectures.
+ *
+ * The powerpc iommu assigns dma_addrs in ascending order, so we don't
+ * have to bother with retries or mapping a dummy page to insure we
+ * don't just get the same mapping again.
+ *
+ * I'm sure we won't be so lucky with other iommu's, so FIXME.
+ */
+dma_addr_t qib_map_page(struct pci_dev *hwdev, struct page *page,
+ unsigned long offset, size_t size, int direction)
+{
+ dma_addr_t phys;
+
+ phys = pci_map_page(hwdev, page, offset, size, direction);
+
+ if (phys == 0) {
+ pci_unmap_page(hwdev, phys, size, direction);
+ phys = pci_map_page(hwdev, page, offset, size, direction);
+ /*
+ * FIXME: If we get 0 again, we should keep this page,
+ * map another, then free the 0 page.
+ */
+ }
+
+ return phys;
+}
+
+/**
+ * qib_get_user_pages - lock user pages into memory
+ * @start_page: the start page
+ * @num_pages: the number of pages
+ * @p: the output page structures
+ *
+ * This function takes a given start page (page aligned user virtual
+ * address) and pins it and the following specified number of pages. For
+ * now, num_pages is always 1, but that will probably change at some point
+ * (because caller is doing expected sends on a single virtually contiguous
+ * buffer, so we can do all pages at once).
+ */
+int qib_get_user_pages(unsigned long start_page, size_t num_pages,
+ struct page **p)
+{
+ int ret;
+
+ down_write(&current->mm->mmap_sem);
+
+ ret = __get_user_pages(start_page, num_pages, p, NULL);
+
+ up_write(&current->mm->mmap_sem);
+
+ return ret;
+}
+
+void qib_release_user_pages(struct page **p, size_t num_pages)
+{
+ if (current->mm) /* during close after signal, mm can be NULL */
+ down_write(&current->mm->mmap_sem);
+
+ __qib_release_user_pages(p, num_pages, 1);
+
+ if (current->mm) {
+ current->mm->locked_vm -= num_pages;
+ up_write(&current->mm->mmap_sem);
+ }
+}
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
new file mode 100644
index 00000000000..4c19e06b5e8
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -0,0 +1,897 @@
+/*
+ * Copyright (c) 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/uio.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+
+#include "qib.h"
+#include "qib_user_sdma.h"
+
+/* minimum size of header */
+#define QIB_USER_SDMA_MIN_HEADER_LENGTH 64
+/* expected size of headers (for dma_pool) */
+#define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
+/* attempt to drain the queue for 5secs */
+#define QIB_USER_SDMA_DRAIN_TIMEOUT 500
+
+struct qib_user_sdma_pkt {
+ u8 naddr; /* dimension of addr (1..3) ... */
+ u32 counter; /* sdma pkts queued counter for this entry */
+ u64 added; /* global descq number of entries */
+
+ struct {
+ u32 offset; /* offset for kvaddr, addr */
+ u32 length; /* length in page */
+ u8 put_page; /* should we put_page? */
+ u8 dma_mapped; /* is page dma_mapped? */
+ struct page *page; /* may be NULL (coherent mem) */
+ void *kvaddr; /* FIXME: only for pio hack */
+ dma_addr_t addr;
+ } addr[4]; /* max pages, any more and we coalesce */
+ struct list_head list; /* list element */
+};
+
+struct qib_user_sdma_queue {
+ /*
+ * pkts sent to dma engine are queued on this
+ * list head. the type of the elements of this
+ * list are struct qib_user_sdma_pkt...
+ */
+ struct list_head sent;
+
+ /* headers with expected length are allocated from here... */
+ char header_cache_name[64];
+ struct dma_pool *header_cache;
+
+ /* packets are allocated from the slab cache... */
+ char pkt_slab_name[64];
+ struct kmem_cache *pkt_slab;
+
+ /* as packets go on the queued queue, they are counted... */
+ u32 counter;
+ u32 sent_counter;
+
+ /* dma page table */
+ struct rb_root dma_pages_root;
+
+ /* protect everything above... */
+ struct mutex lock;
+};
+
+struct qib_user_sdma_queue *
+qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
+{
+ struct qib_user_sdma_queue *pq =
+ kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL);
+
+ if (!pq)
+ goto done;
+
+ pq->counter = 0;
+ pq->sent_counter = 0;
+ INIT_LIST_HEAD(&pq->sent);
+
+ mutex_init(&pq->lock);
+
+ snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
+ "qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt);
+ pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
+ sizeof(struct qib_user_sdma_pkt),
+ 0, 0, NULL);
+
+ if (!pq->pkt_slab)
+ goto err_kfree;
+
+ snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
+ "qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt);
+ pq->header_cache = dma_pool_create(pq->header_cache_name,
+ dev,
+ QIB_USER_SDMA_EXP_HEADER_LENGTH,
+ 4, 0);
+ if (!pq->header_cache)
+ goto err_slab;
+
+ pq->dma_pages_root = RB_ROOT;
+
+ goto done;
+
+err_slab:
+ kmem_cache_destroy(pq->pkt_slab);
+err_kfree:
+ kfree(pq);
+ pq = NULL;
+
+done:
+ return pq;
+}
+
+static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,
+ int i, size_t offset, size_t len,
+ int put_page, int dma_mapped,
+ struct page *page,
+ void *kvaddr, dma_addr_t dma_addr)
+{
+ pkt->addr[i].offset = offset;
+ pkt->addr[i].length = len;
+ pkt->addr[i].put_page = put_page;
+ pkt->addr[i].dma_mapped = dma_mapped;
+ pkt->addr[i].page = page;
+ pkt->addr[i].kvaddr = kvaddr;
+ pkt->addr[i].addr = dma_addr;
+}
+
+static void qib_user_sdma_init_header(struct qib_user_sdma_pkt *pkt,
+ u32 counter, size_t offset,
+ size_t len, int dma_mapped,
+ struct page *page,
+ void *kvaddr, dma_addr_t dma_addr)
+{
+ pkt->naddr = 1;
+ pkt->counter = counter;
+ qib_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,
+ kvaddr, dma_addr);
+}
+
+/* we've too many pages in the iovec, coalesce to a single page */
+static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
+ struct qib_user_sdma_pkt *pkt,
+ const struct iovec *iov,
+ unsigned long niov)
+{
+ int ret = 0;
+ struct page *page = alloc_page(GFP_KERNEL);
+ void *mpage_save;
+ char *mpage;
+ int i;
+ int len = 0;
+ dma_addr_t dma_addr;
+
+ if (!page) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ mpage = kmap(page);
+ mpage_save = mpage;
+ for (i = 0; i < niov; i++) {
+ int cfur;
+
+ cfur = copy_from_user(mpage,
+ iov[i].iov_base, iov[i].iov_len);
+ if (cfur) {
+ ret = -EFAULT;
+ goto free_unmap;
+ }
+
+ mpage += iov[i].iov_len;
+ len += iov[i].iov_len;
+ }
+
+ dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
+ ret = -ENOMEM;
+ goto free_unmap;
+ }
+
+ qib_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
+ dma_addr);
+ pkt->naddr = 2;
+
+ goto done;
+
+free_unmap:
+ kunmap(page);
+ __free_page(page);
+done:
+ return ret;
+}
+
+/*
+ * How many pages in this iovec element?
+ */
+static int qib_user_sdma_num_pages(const struct iovec *iov)
+{
+ const unsigned long addr = (unsigned long) iov->iov_base;
+ const unsigned long len = iov->iov_len;
+ const unsigned long spage = addr & PAGE_MASK;
+ const unsigned long epage = (addr + len - 1) & PAGE_MASK;
+
+ return 1 + ((epage - spage) >> PAGE_SHIFT);
+}
+
+/*
+ * Truncate length to page boundry.
+ */
+static int qib_user_sdma_page_length(unsigned long addr, unsigned long len)
+{
+ const unsigned long offset = addr & ~PAGE_MASK;
+
+ return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len;
+}
+
+static void qib_user_sdma_free_pkt_frag(struct device *dev,
+ struct qib_user_sdma_queue *pq,
+ struct qib_user_sdma_pkt *pkt,
+ int frag)
+{
+ const int i = frag;
+
+ if (pkt->addr[i].page) {
+ if (pkt->addr[i].dma_mapped)
+ dma_unmap_page(dev,
+ pkt->addr[i].addr,
+ pkt->addr[i].length,
+ DMA_TO_DEVICE);
+
+ if (pkt->addr[i].kvaddr)
+ kunmap(pkt->addr[i].page);
+
+ if (pkt->addr[i].put_page)
+ put_page(pkt->addr[i].page);
+ else
+ __free_page(pkt->addr[i].page);
+ } else if (pkt->addr[i].kvaddr)
+ /* free coherent mem from cache... */
+ dma_pool_free(pq->header_cache,
+ pkt->addr[i].kvaddr, pkt->addr[i].addr);
+}
+
+/* return number of pages pinned... */
+static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
+ struct qib_user_sdma_pkt *pkt,
+ unsigned long addr, int tlen, int npages)
+{
+ struct page *pages[2];
+ int j;
+ int ret;
+
+ ret = get_user_pages(current, current->mm, addr,
+ npages, 0, 1, pages, NULL);
+
+ if (ret != npages) {
+ int i;
+
+ for (i = 0; i < ret; i++)
+ put_page(pages[i]);
+
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ for (j = 0; j < npages; j++) {
+ /* map the pages... */
+ const int flen = qib_user_sdma_page_length(addr, tlen);
+ dma_addr_t dma_addr =
+ dma_map_page(&dd->pcidev->dev,
+ pages[j], 0, flen, DMA_TO_DEVICE);
+ unsigned long fofs = addr & ~PAGE_MASK;
+
+ if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ qib_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,
+ pages[j], kmap(pages[j]), dma_addr);
+
+ pkt->naddr++;
+ addr += flen;
+ tlen -= flen;
+ }
+
+done:
+ return ret;
+}
+
+static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
+ struct qib_user_sdma_queue *pq,
+ struct qib_user_sdma_pkt *pkt,
+ const struct iovec *iov,
+ unsigned long niov)
+{
+ int ret = 0;
+ unsigned long idx;
+
+ for (idx = 0; idx < niov; idx++) {
+ const int npages = qib_user_sdma_num_pages(iov + idx);
+ const unsigned long addr = (unsigned long) iov[idx].iov_base;
+
+ ret = qib_user_sdma_pin_pages(dd, pkt, addr,
+ iov[idx].iov_len, npages);
+ if (ret < 0)
+ goto free_pkt;
+ }
+
+ goto done;
+
+free_pkt:
+ for (idx = 0; idx < pkt->naddr; idx++)
+ qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
+
+done:
+ return ret;
+}
+
+static int qib_user_sdma_init_payload(const struct qib_devdata *dd,
+ struct qib_user_sdma_queue *pq,
+ struct qib_user_sdma_pkt *pkt,
+ const struct iovec *iov,
+ unsigned long niov, int npages)
+{
+ int ret = 0;
+
+ if (npages >= ARRAY_SIZE(pkt->addr))
+ ret = qib_user_sdma_coalesce(dd, pkt, iov, niov);
+ else
+ ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
+
+ return ret;
+}
+
+/* free a packet list -- return counter value of last packet */
+static void qib_user_sdma_free_pkt_list(struct device *dev,
+ struct qib_user_sdma_queue *pq,
+ struct list_head *list)
+{
+ struct qib_user_sdma_pkt *pkt, *pkt_next;
+
+ list_for_each_entry_safe(pkt, pkt_next, list, list) {
+ int i;
+
+ for (i = 0; i < pkt->naddr; i++)
+ qib_user_sdma_free_pkt_frag(dev, pq, pkt, i);
+
+ kmem_cache_free(pq->pkt_slab, pkt);
+ }
+}
+
+/*
+ * copy headers, coalesce etc -- pq->lock must be held
+ *
+ * we queue all the packets to list, returning the
+ * number of bytes total. list must be empty initially,
+ * as, if there is an error we clean it...
+ */
+static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
+ struct qib_user_sdma_queue *pq,
+ struct list_head *list,
+ const struct iovec *iov,
+ unsigned long niov,
+ int maxpkts)
+{
+ unsigned long idx = 0;
+ int ret = 0;
+ int npkts = 0;
+ struct page *page = NULL;
+ __le32 *pbc;
+ dma_addr_t dma_addr;
+ struct qib_user_sdma_pkt *pkt = NULL;
+ size_t len;
+ size_t nw;
+ u32 counter = pq->counter;
+ int dma_mapped = 0;
+
+ while (idx < niov && npkts < maxpkts) {
+ const unsigned long addr = (unsigned long) iov[idx].iov_base;
+ const unsigned long idx_save = idx;
+ unsigned pktnw;
+ unsigned pktnwc;
+ int nfrags = 0;
+ int npages = 0;
+ int cfur;
+
+ dma_mapped = 0;
+ len = iov[idx].iov_len;
+ nw = len >> 2;
+ page = NULL;
+
+ pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
+ if (!pkt) {
+ ret = -ENOMEM;
+ goto free_list;
+ }
+
+ if (len < QIB_USER_SDMA_MIN_HEADER_LENGTH ||
+ len > PAGE_SIZE || len & 3 || addr & 3) {
+ ret = -EINVAL;
+ goto free_pkt;
+ }
+
+ if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH)
+ pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
+ &dma_addr);
+ else
+ pbc = NULL;
+
+ if (!pbc) {
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ ret = -ENOMEM;
+ goto free_pkt;
+ }
+ pbc = kmap(page);
+ }
+
+ cfur = copy_from_user(pbc, iov[idx].iov_base, len);
+ if (cfur) {
+ ret = -EFAULT;
+ goto free_pbc;
+ }
+
+ /*
+ * This assignment is a bit strange. it's because the
+ * the pbc counts the number of 32 bit words in the full
+ * packet _except_ the first word of the pbc itself...
+ */
+ pktnwc = nw - 1;
+
+ /*
+ * pktnw computation yields the number of 32 bit words
+ * that the caller has indicated in the PBC. note that
+ * this is one less than the total number of words that
+ * goes to the send DMA engine as the first 32 bit word
+ * of the PBC itself is not counted. Armed with this count,
+ * we can verify that the packet is consistent with the
+ * iovec lengths.
+ */
+ pktnw = le32_to_cpu(*pbc) & QIB_PBC_LENGTH_MASK;
+ if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) {
+ ret = -EINVAL;
+ goto free_pbc;
+ }
+
+ idx++;
+ while (pktnwc < pktnw && idx < niov) {
+ const size_t slen = iov[idx].iov_len;
+ const unsigned long faddr =
+ (unsigned long) iov[idx].iov_base;
+
+ if (slen & 3 || faddr & 3 || !slen ||
+ slen > PAGE_SIZE) {
+ ret = -EINVAL;
+ goto free_pbc;
+ }
+
+ npages++;
+ if ((faddr & PAGE_MASK) !=
+ ((faddr + slen - 1) & PAGE_MASK))
+ npages++;
+
+ pktnwc += slen >> 2;
+ idx++;
+ nfrags++;
+ }
+
+ if (pktnwc != pktnw) {
+ ret = -EINVAL;
+ goto free_pbc;
+ }
+
+ if (page) {
+ dma_addr = dma_map_page(&dd->pcidev->dev,
+ page, 0, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
+ ret = -ENOMEM;
+ goto free_pbc;
+ }
+
+ dma_mapped = 1;
+ }
+
+ qib_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,
+ page, pbc, dma_addr);
+
+ if (nfrags) {
+ ret = qib_user_sdma_init_payload(dd, pq, pkt,
+ iov + idx_save + 1,
+ nfrags, npages);
+ if (ret < 0)
+ goto free_pbc_dma;
+ }
+
+ counter++;
+ npkts++;
+
+ list_add_tail(&pkt->list, list);
+ }
+
+ ret = idx;
+ goto done;
+
+free_pbc_dma:
+ if (dma_mapped)
+ dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE);
+free_pbc:
+ if (page) {
+ kunmap(page);
+ __free_page(page);
+ } else
+ dma_pool_free(pq->header_cache, pbc, dma_addr);
+free_pkt:
+ kmem_cache_free(pq->pkt_slab, pkt);
+free_list:
+ qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
+done:
+ return ret;
+}
+
+static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq,
+ u32 c)
+{
+ pq->sent_counter = c;
+}
+
+/* try to clean out queue -- needs pq->lock */
+static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd,
+ struct qib_user_sdma_queue *pq)
+{
+ struct qib_devdata *dd = ppd->dd;
+ struct list_head free_list;
+ struct qib_user_sdma_pkt *pkt;
+ struct qib_user_sdma_pkt *pkt_prev;
+ int ret = 0;
+
+ INIT_LIST_HEAD(&free_list);
+
+ list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
+ s64 descd = ppd->sdma_descq_removed - pkt->added;
+
+ if (descd < 0)
+ break;
+
+ list_move_tail(&pkt->list, &free_list);
+
+ /* one more packet cleaned */
+ ret++;
+ }
+
+ if (!list_empty(&free_list)) {
+ u32 counter;
+
+ pkt = list_entry(free_list.prev,
+ struct qib_user_sdma_pkt, list);
+ counter = pkt->counter;
+
+ qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
+ qib_user_sdma_set_complete_counter(pq, counter);
+ }
+
+ return ret;
+}
+
+void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq)
+{
+ if (!pq)
+ return;
+
+ kmem_cache_destroy(pq->pkt_slab);
+ dma_pool_destroy(pq->header_cache);
+ kfree(pq);
+}
+
+/* clean descriptor queue, returns > 0 if some elements cleaned */
+static int qib_user_sdma_hwqueue_clean(struct qib_pportdata *ppd)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+ ret = qib_sdma_make_progress(ppd);
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+
+ return ret;
+}
+
+/* we're in close, drain packets so that we can cleanup successfully... */
+void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
+ struct qib_user_sdma_queue *pq)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int i;
+
+ if (!pq)
+ return;
+
+ for (i = 0; i < QIB_USER_SDMA_DRAIN_TIMEOUT; i++) {
+ mutex_lock(&pq->lock);
+ if (list_empty(&pq->sent)) {
+ mutex_unlock(&pq->lock);
+ break;
+ }
+ qib_user_sdma_hwqueue_clean(ppd);
+ qib_user_sdma_queue_clean(ppd, pq);
+ mutex_unlock(&pq->lock);
+ msleep(10);
+ }
+
+ if (!list_empty(&pq->sent)) {
+ struct list_head free_list;
+
+ qib_dev_err(dd, "user sdma lists not empty: forcing!\n");
+ INIT_LIST_HEAD(&free_list);
+ mutex_lock(&pq->lock);
+ list_splice_init(&pq->sent, &free_list);
+ qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
+ mutex_unlock(&pq->lock);
+ }
+}
+
+static inline __le64 qib_sdma_make_desc0(struct qib_pportdata *ppd,
+ u64 addr, u64 dwlen, u64 dwoffset)
+{
+ u8 tmpgen;
+
+ tmpgen = ppd->sdma_generation;
+
+ return cpu_to_le64(/* SDmaPhyAddr[31:0] */
+ ((addr & 0xfffffffcULL) << 32) |
+ /* SDmaGeneration[1:0] */
+ ((tmpgen & 3ULL) << 30) |
+ /* SDmaDwordCount[10:0] */
+ ((dwlen & 0x7ffULL) << 16) |
+ /* SDmaBufOffset[12:2] */
+ (dwoffset & 0x7ffULL));
+}
+
+static inline __le64 qib_sdma_make_first_desc0(__le64 descq)
+{
+ return descq | cpu_to_le64(1ULL << 12);
+}
+
+static inline __le64 qib_sdma_make_last_desc0(__le64 descq)
+{
+ /* last */ /* dma head */
+ return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
+}
+
+static inline __le64 qib_sdma_make_desc1(u64 addr)
+{
+ /* SDmaPhyAddr[47:32] */
+ return cpu_to_le64(addr >> 32);
+}
+
+static void qib_user_sdma_send_frag(struct qib_pportdata *ppd,
+ struct qib_user_sdma_pkt *pkt, int idx,
+ unsigned ofs, u16 tail)
+{
+ const u64 addr = (u64) pkt->addr[idx].addr +
+ (u64) pkt->addr[idx].offset;
+ const u64 dwlen = (u64) pkt->addr[idx].length / 4;
+ __le64 *descqp;
+ __le64 descq0;
+
+ descqp = &ppd->sdma_descq[tail].qw[0];
+
+ descq0 = qib_sdma_make_desc0(ppd, addr, dwlen, ofs);
+ if (idx == 0)
+ descq0 = qib_sdma_make_first_desc0(descq0);
+ if (idx == pkt->naddr - 1)
+ descq0 = qib_sdma_make_last_desc0(descq0);
+
+ descqp[0] = descq0;
+ descqp[1] = qib_sdma_make_desc1(addr);
+}
+
+/* pq->lock must be held, get packets on the wire... */
+static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,
+ struct qib_user_sdma_queue *pq,
+ struct list_head *pktlist)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int ret = 0;
+ unsigned long flags;
+ u16 tail;
+ u8 generation;
+ u64 descq_added;
+
+ if (list_empty(pktlist))
+ return 0;
+
+ if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))
+ return -ECOMM;
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+ /* keep a copy for restoring purposes in case of problems */
+ generation = ppd->sdma_generation;
+ descq_added = ppd->sdma_descq_added;
+
+ if (unlikely(!__qib_sdma_running(ppd))) {
+ ret = -ECOMM;
+ goto unlock;
+ }
+
+ tail = ppd->sdma_descq_tail;
+ while (!list_empty(pktlist)) {
+ struct qib_user_sdma_pkt *pkt =
+ list_entry(pktlist->next, struct qib_user_sdma_pkt,
+ list);
+ int i;
+ unsigned ofs = 0;
+ u16 dtail = tail;
+
+ if (pkt->naddr > qib_sdma_descq_freecnt(ppd))
+ goto unlock_check_tail;
+
+ for (i = 0; i < pkt->naddr; i++) {
+ qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail);
+ ofs += pkt->addr[i].length >> 2;
+
+ if (++tail == ppd->sdma_descq_cnt) {
+ tail = 0;
+ ++ppd->sdma_generation;
+ }
+ }
+
+ if ((ofs << 2) > ppd->ibmaxlen) {
+ ret = -EMSGSIZE;
+ goto unlock;
+ }
+
+ /*
+ * If the packet is >= 2KB mtu equivalent, we have to use
+ * the large buffers, and have to mark each descriptor as
+ * part of a large buffer packet.
+ */
+ if (ofs > dd->piosize2kmax_dwords) {
+ for (i = 0; i < pkt->naddr; i++) {
+ ppd->sdma_descq[dtail].qw[0] |=
+ cpu_to_le64(1ULL << 14);
+ if (++dtail == ppd->sdma_descq_cnt)
+ dtail = 0;
+ }
+ }
+
+ ppd->sdma_descq_added += pkt->naddr;
+ pkt->added = ppd->sdma_descq_added;
+ list_move_tail(&pkt->list, &pq->sent);
+ ret++;
+ }
+
+unlock_check_tail:
+ /* advance the tail on the chip if necessary */
+ if (ppd->sdma_descq_tail != tail)
+ dd->f_sdma_update_tail(ppd, tail);
+
+unlock:
+ if (unlikely(ret < 0)) {
+ ppd->sdma_generation = generation;
+ ppd->sdma_descq_added = descq_added;
+ }
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+
+ return ret;
+}
+
+int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
+ struct qib_user_sdma_queue *pq,
+ const struct iovec *iov,
+ unsigned long dim)
+{
+ struct qib_devdata *dd = rcd->dd;
+ struct qib_pportdata *ppd = rcd->ppd;
+ int ret = 0;
+ struct list_head list;
+ int npkts = 0;
+
+ INIT_LIST_HEAD(&list);
+
+ mutex_lock(&pq->lock);
+
+ /* why not -ECOMM like qib_user_sdma_push_pkts() below? */
+ if (!qib_sdma_running(ppd))
+ goto done_unlock;
+
+ if (ppd->sdma_descq_added != ppd->sdma_descq_removed) {
+ qib_user_sdma_hwqueue_clean(ppd);
+ qib_user_sdma_queue_clean(ppd, pq);
+ }
+
+ while (dim) {
+ const int mxp = 8;
+
+ down_write(&current->mm->mmap_sem);
+ ret = qib_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
+ up_write(&current->mm->mmap_sem);
+
+ if (ret <= 0)
+ goto done_unlock;
+ else {
+ dim -= ret;
+ iov += ret;
+ }
+
+ /* force packets onto the sdma hw queue... */
+ if (!list_empty(&list)) {
+ /*
+ * Lazily clean hw queue. the 4 is a guess of about
+ * how many sdma descriptors a packet will take (it
+ * doesn't have to be perfect).
+ */
+ if (qib_sdma_descq_freecnt(ppd) < ret * 4) {
+ qib_user_sdma_hwqueue_clean(ppd);
+ qib_user_sdma_queue_clean(ppd, pq);
+ }
+
+ ret = qib_user_sdma_push_pkts(ppd, pq, &list);
+ if (ret < 0)
+ goto done_unlock;
+ else {
+ npkts += ret;
+ pq->counter += ret;
+
+ if (!list_empty(&list))
+ goto done_unlock;
+ }
+ }
+ }
+
+done_unlock:
+ if (!list_empty(&list))
+ qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
+ mutex_unlock(&pq->lock);
+
+ return (ret < 0) ? ret : npkts;
+}
+
+int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
+ struct qib_user_sdma_queue *pq)
+{
+ int ret = 0;
+
+ mutex_lock(&pq->lock);
+ qib_user_sdma_hwqueue_clean(ppd);
+ ret = qib_user_sdma_queue_clean(ppd, pq);
+ mutex_unlock(&pq->lock);
+
+ return ret;
+}
+
+u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq)
+{
+ return pq ? pq->sent_counter : 0;
+}
+
+u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq)
+{
+ return pq ? pq->counter : 0;
+}
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.h b/drivers/infiniband/hw/qib/qib_user_sdma.h
new file mode 100644
index 00000000000..ce8cbaf6a5c
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/device.h>
+
+struct qib_user_sdma_queue;
+
+struct qib_user_sdma_queue *
+qib_user_sdma_queue_create(struct device *dev, int unit, int port, int sport);
+void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq);
+
+int qib_user_sdma_writev(struct qib_ctxtdata *pd,
+ struct qib_user_sdma_queue *pq,
+ const struct iovec *iov,
+ unsigned long dim);
+
+int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
+ struct qib_user_sdma_queue *pq);
+
+void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
+ struct qib_user_sdma_queue *pq);
+
+u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq);
+u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq);
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
new file mode 100644
index 00000000000..cda8f4173d2
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -0,0 +1,2248 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_mad.h>
+#include <rdma/ib_user_verbs.h>
+#include <linux/io.h>
+#include <linux/utsname.h>
+#include <linux/rculist.h>
+#include <linux/mm.h>
+
+#include "qib.h"
+#include "qib_common.h"
+
+static unsigned int ib_qib_qp_table_size = 251;
+module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
+MODULE_PARM_DESC(qp_table_size, "QP table size");
+
+unsigned int ib_qib_lkey_table_size = 16;
+module_param_named(lkey_table_size, ib_qib_lkey_table_size, uint,
+ S_IRUGO);
+MODULE_PARM_DESC(lkey_table_size,
+ "LKEY table size in bits (2^n, 1 <= n <= 23)");
+
+static unsigned int ib_qib_max_pds = 0xFFFF;
+module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO);
+MODULE_PARM_DESC(max_pds,
+ "Maximum number of protection domains to support");
+
+static unsigned int ib_qib_max_ahs = 0xFFFF;
+module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
+
+unsigned int ib_qib_max_cqes = 0x2FFFF;
+module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO);
+MODULE_PARM_DESC(max_cqes,
+ "Maximum number of completion queue entries to support");
+
+unsigned int ib_qib_max_cqs = 0x1FFFF;
+module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
+
+unsigned int ib_qib_max_qp_wrs = 0x3FFF;
+module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
+
+unsigned int ib_qib_max_qps = 16384;
+module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO);
+MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
+
+unsigned int ib_qib_max_sges = 0x60;
+module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO);
+MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
+
+unsigned int ib_qib_max_mcast_grps = 16384;
+module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO);
+MODULE_PARM_DESC(max_mcast_grps,
+ "Maximum number of multicast groups to support");
+
+unsigned int ib_qib_max_mcast_qp_attached = 16;
+module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached,
+ uint, S_IRUGO);
+MODULE_PARM_DESC(max_mcast_qp_attached,
+ "Maximum number of attached QPs to support");
+
+unsigned int ib_qib_max_srqs = 1024;
+module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
+
+unsigned int ib_qib_max_srq_sges = 128;
+module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO);
+MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
+
+unsigned int ib_qib_max_srq_wrs = 0x1FFFF;
+module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
+
+static unsigned int ib_qib_disable_sma;
+module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(disable_sma, "Disable the SMA");
+
+/*
+ * Note that it is OK to post send work requests in the SQE and ERR
+ * states; qib_do_send() will process them and generate error
+ * completions as per IB 1.2 C10-96.
+ */
+const int ib_qib_state_ops[IB_QPS_ERR + 1] = {
+ [IB_QPS_RESET] = 0,
+ [IB_QPS_INIT] = QIB_POST_RECV_OK,
+ [IB_QPS_RTR] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK,
+ [IB_QPS_RTS] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
+ QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK |
+ QIB_PROCESS_NEXT_SEND_OK,
+ [IB_QPS_SQD] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
+ QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK,
+ [IB_QPS_SQE] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
+ QIB_POST_SEND_OK | QIB_FLUSH_SEND,
+ [IB_QPS_ERR] = QIB_POST_RECV_OK | QIB_FLUSH_RECV |
+ QIB_POST_SEND_OK | QIB_FLUSH_SEND,
+};
+
+struct qib_ucontext {
+ struct ib_ucontext ibucontext;
+};
+
+static inline struct qib_ucontext *to_iucontext(struct ib_ucontext
+ *ibucontext)
+{
+ return container_of(ibucontext, struct qib_ucontext, ibucontext);
+}
+
+/*
+ * Translate ib_wr_opcode into ib_wc_opcode.
+ */
+const enum ib_wc_opcode ib_qib_wc_opcode[] = {
+ [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
+ [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
+ [IB_WR_SEND] = IB_WC_SEND,
+ [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
+ [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
+ [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
+ [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
+};
+
+/*
+ * System image GUID.
+ */
+__be64 ib_qib_sys_image_guid;
+
+/**
+ * qib_copy_sge - copy data to SGE memory
+ * @ss: the SGE state
+ * @data: the data to copy
+ * @length: the length of the data
+ */
+void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release)
+{
+ struct qib_sge *sge = &ss->sge;
+
+ while (length) {
+ u32 len = sge->length;
+
+ if (len > length)
+ len = length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
+ BUG_ON(len == 0);
+ memcpy(sge->vaddr, data, len);
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (release)
+ atomic_dec(&sge->mr->refcount);
+ if (--ss->num_sge)
+ *sge = *ss->sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+ data += len;
+ length -= len;
+ }
+}
+
+/**
+ * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func
+ * @ss: the SGE state
+ * @length: the number of bytes to skip
+ */
+void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release)
+{
+ struct qib_sge *sge = &ss->sge;
+
+ while (length) {
+ u32 len = sge->length;
+
+ if (len > length)
+ len = length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
+ BUG_ON(len == 0);
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (release)
+ atomic_dec(&sge->mr->refcount);
+ if (--ss->num_sge)
+ *sge = *ss->sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+ length -= len;
+ }
+}
+
+/*
+ * Count the number of DMA descriptors needed to send length bytes of data.
+ * Don't modify the qib_sge_state to get the count.
+ * Return zero if any of the segments is not aligned.
+ */
+static u32 qib_count_sge(struct qib_sge_state *ss, u32 length)
+{
+ struct qib_sge *sg_list = ss->sg_list;
+ struct qib_sge sge = ss->sge;
+ u8 num_sge = ss->num_sge;
+ u32 ndesc = 1; /* count the header */
+
+ while (length) {
+ u32 len = sge.length;
+
+ if (len > length)
+ len = length;
+ if (len > sge.sge_length)
+ len = sge.sge_length;
+ BUG_ON(len == 0);
+ if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
+ (len != length && (len & (sizeof(u32) - 1)))) {
+ ndesc = 0;
+ break;
+ }
+ ndesc++;
+ sge.vaddr += len;
+ sge.length -= len;
+ sge.sge_length -= len;
+ if (sge.sge_length == 0) {
+ if (--num_sge)
+ sge = *sg_list++;
+ } else if (sge.length == 0 && sge.mr->lkey) {
+ if (++sge.n >= QIB_SEGSZ) {
+ if (++sge.m >= sge.mr->mapsz)
+ break;
+ sge.n = 0;
+ }
+ sge.vaddr =
+ sge.mr->map[sge.m]->segs[sge.n].vaddr;
+ sge.length =
+ sge.mr->map[sge.m]->segs[sge.n].length;
+ }
+ length -= len;
+ }
+ return ndesc;
+}
+
+/*
+ * Copy from the SGEs to the data buffer.
+ */
+static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length)
+{
+ struct qib_sge *sge = &ss->sge;
+
+ while (length) {
+ u32 len = sge->length;
+
+ if (len > length)
+ len = length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
+ BUG_ON(len == 0);
+ memcpy(data, sge->vaddr, len);
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (--ss->num_sge)
+ *sge = *ss->sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+ data += len;
+ length -= len;
+ }
+}
+
+/**
+ * qib_post_one_send - post one RC, UC, or UD send work request
+ * @qp: the QP to post on
+ * @wr: the work request to send
+ */
+static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr)
+{
+ struct qib_swqe *wqe;
+ u32 next;
+ int i;
+ int j;
+ int acc;
+ int ret;
+ unsigned long flags;
+ struct qib_lkey_table *rkt;
+ struct qib_pd *pd;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ /* Check that state is OK to post send. */
+ if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK)))
+ goto bail_inval;
+
+ /* IB spec says that num_sge == 0 is OK. */
+ if (wr->num_sge > qp->s_max_sge)
+ goto bail_inval;
+
+ /*
+ * Don't allow RDMA reads or atomic operations on UC or
+ * undefined operations.
+ * Make sure buffer is large enough to hold the result for atomics.
+ */
+ if (wr->opcode == IB_WR_FAST_REG_MR) {
+ if (qib_fast_reg_mr(qp, wr))
+ goto bail_inval;
+ } else if (qp->ibqp.qp_type == IB_QPT_UC) {
+ if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
+ goto bail_inval;
+ } else if (qp->ibqp.qp_type != IB_QPT_RC) {
+ /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
+ if (wr->opcode != IB_WR_SEND &&
+ wr->opcode != IB_WR_SEND_WITH_IMM)
+ goto bail_inval;
+ /* Check UD destination address PD */
+ if (qp->ibqp.pd != wr->wr.ud.ah->pd)
+ goto bail_inval;
+ } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
+ goto bail_inval;
+ else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
+ (wr->num_sge == 0 ||
+ wr->sg_list[0].length < sizeof(u64) ||
+ wr->sg_list[0].addr & (sizeof(u64) - 1)))
+ goto bail_inval;
+ else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
+ goto bail_inval;
+
+ next = qp->s_head + 1;
+ if (next >= qp->s_size)
+ next = 0;
+ if (next == qp->s_last) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ rkt = &to_idev(qp->ibqp.device)->lk_table;
+ pd = to_ipd(qp->ibqp.pd);
+ wqe = get_swqe_ptr(qp, qp->s_head);
+ wqe->wr = *wr;
+ wqe->length = 0;
+ j = 0;
+ if (wr->num_sge) {
+ acc = wr->opcode >= IB_WR_RDMA_READ ?
+ IB_ACCESS_LOCAL_WRITE : 0;
+ for (i = 0; i < wr->num_sge; i++) {
+ u32 length = wr->sg_list[i].length;
+ int ok;
+
+ if (length == 0)
+ continue;
+ ok = qib_lkey_ok(rkt, pd, &wqe->sg_list[j],
+ &wr->sg_list[i], acc);
+ if (!ok)
+ goto bail_inval_free;
+ wqe->length += length;
+ j++;
+ }
+ wqe->wr.num_sge = j;
+ }
+ if (qp->ibqp.qp_type == IB_QPT_UC ||
+ qp->ibqp.qp_type == IB_QPT_RC) {
+ if (wqe->length > 0x80000000U)
+ goto bail_inval_free;
+ } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport +
+ qp->port_num - 1)->ibmtu)
+ goto bail_inval_free;
+ else
+ atomic_inc(&to_iah(wr->wr.ud.ah)->refcount);
+ wqe->ssn = qp->s_ssn++;
+ qp->s_head = next;
+
+ ret = 0;
+ goto bail;
+
+bail_inval_free:
+ while (j) {
+ struct qib_sge *sge = &wqe->sg_list[--j];
+
+ atomic_dec(&sge->mr->refcount);
+ }
+bail_inval:
+ ret = -EINVAL;
+bail:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return ret;
+}
+
+/**
+ * qib_post_send - post a send on a QP
+ * @ibqp: the QP to post the send on
+ * @wr: the list of work requests to post
+ * @bad_wr: the first bad WR is put here
+ *
+ * This may be called from interrupt context.
+ */
+static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct qib_qp *qp = to_iqp(ibqp);
+ int err = 0;
+
+ for (; wr; wr = wr->next) {
+ err = qib_post_one_send(qp, wr);
+ if (err) {
+ *bad_wr = wr;
+ goto bail;
+ }
+ }
+
+ /* Try to do the send work in the caller's context. */
+ qib_do_send(&qp->s_work);
+
+bail:
+ return err;
+}
+
+/**
+ * qib_post_receive - post a receive on a QP
+ * @ibqp: the QP to post the receive on
+ * @wr: the WR to post
+ * @bad_wr: the first bad WR is put here
+ *
+ * This may be called from interrupt context.
+ */
+static int qib_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct qib_qp *qp = to_iqp(ibqp);
+ struct qib_rwq *wq = qp->r_rq.wq;
+ unsigned long flags;
+ int ret;
+
+ /* Check that state is OK to post receive. */
+ if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) {
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ for (; wr; wr = wr->next) {
+ struct qib_rwqe *wqe;
+ u32 next;
+ int i;
+
+ if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ spin_lock_irqsave(&qp->r_rq.lock, flags);
+ next = wq->head + 1;
+ if (next >= qp->r_rq.size)
+ next = 0;
+ if (next == wq->tail) {
+ spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+ *bad_wr = wr;
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
+ wqe->wr_id = wr->wr_id;
+ wqe->num_sge = wr->num_sge;
+ for (i = 0; i < wr->num_sge; i++)
+ wqe->sg_list[i] = wr->sg_list[i];
+ /* Make sure queue entry is written before the head index. */
+ smp_wmb();
+ wq->head = next;
+ spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+ }
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_qp_rcv - processing an incoming packet on a QP
+ * @rcd: the context pointer
+ * @hdr: the packet header
+ * @has_grh: true if the packet has a GRH
+ * @data: the packet data
+ * @tlen: the packet length
+ * @qp: the QP the packet came on
+ *
+ * This is called from qib_ib_rcv() to process an incoming packet
+ * for the given QP.
+ * Called at interrupt level.
+ */
+static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
+ int has_grh, void *data, u32 tlen, struct qib_qp *qp)
+{
+ struct qib_ibport *ibp = &rcd->ppd->ibport_data;
+
+ /* Check for valid receive state. */
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
+ ibp->n_pkt_drops++;
+ return;
+ }
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ if (ib_qib_disable_sma)
+ break;
+ /* FALLTHROUGH */
+ case IB_QPT_UD:
+ qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
+ break;
+
+ case IB_QPT_RC:
+ qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp);
+ break;
+
+ case IB_QPT_UC:
+ qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp);
+ break;
+
+ default:
+ break;
+ }
+}
+
+/**
+ * qib_ib_rcv - process an incoming packet
+ * @rcd: the context pointer
+ * @rhdr: the header of the packet
+ * @data: the packet payload
+ * @tlen: the packet length
+ *
+ * This is called from qib_kreceive() to process an incoming packet at
+ * interrupt level. Tlen is the length of the header + data + CRC in bytes.
+ */
+void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
+{
+ struct qib_pportdata *ppd = rcd->ppd;
+ struct qib_ibport *ibp = &ppd->ibport_data;
+ struct qib_ib_header *hdr = rhdr;
+ struct qib_other_headers *ohdr;
+ struct qib_qp *qp;
+ u32 qp_num;
+ int lnh;
+ u8 opcode;
+ u16 lid;
+
+ /* 24 == LRH+BTH+CRC */
+ if (unlikely(tlen < 24))
+ goto drop;
+
+ /* Check for a valid destination LID (see ch. 7.11.1). */
+ lid = be16_to_cpu(hdr->lrh[1]);
+ if (lid < QIB_MULTICAST_LID_BASE) {
+ lid &= ~((1 << ppd->lmc) - 1);
+ if (unlikely(lid != ppd->lid))
+ goto drop;
+ }
+
+ /* Check for GRH */
+ lnh = be16_to_cpu(hdr->lrh[0]) & 3;
+ if (lnh == QIB_LRH_BTH)
+ ohdr = &hdr->u.oth;
+ else if (lnh == QIB_LRH_GRH) {
+ u32 vtf;
+
+ ohdr = &hdr->u.l.oth;
+ if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
+ goto drop;
+ vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
+ if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
+ goto drop;
+ } else
+ goto drop;
+
+ opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
+ ibp->opstats[opcode & 0x7f].n_bytes += tlen;
+ ibp->opstats[opcode & 0x7f].n_packets++;
+
+ /* Get the destination QP number. */
+ qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
+ if (qp_num == QIB_MULTICAST_QPN) {
+ struct qib_mcast *mcast;
+ struct qib_mcast_qp *p;
+
+ if (lnh != QIB_LRH_GRH)
+ goto drop;
+ mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid);
+ if (mcast == NULL)
+ goto drop;
+ ibp->n_multicast_rcv++;
+ list_for_each_entry_rcu(p, &mcast->qp_list, list)
+ qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
+ /*
+ * Notify qib_multicast_detach() if it is waiting for us
+ * to finish.
+ */
+ if (atomic_dec_return(&mcast->refcount) <= 1)
+ wake_up(&mcast->wait);
+ } else {
+ qp = qib_lookup_qpn(ibp, qp_num);
+ if (!qp)
+ goto drop;
+ ibp->n_unicast_rcv++;
+ qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
+ /*
+ * Notify qib_destroy_qp() if it is waiting
+ * for us to finish.
+ */
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ }
+ return;
+
+drop:
+ ibp->n_pkt_drops++;
+}
+
+/*
+ * This is called from a timer to check for QPs
+ * which need kernel memory in order to send a packet.
+ */
+static void mem_timer(unsigned long data)
+{
+ struct qib_ibdev *dev = (struct qib_ibdev *) data;
+ struct list_head *list = &dev->memwait;
+ struct qib_qp *qp = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->pending_lock, flags);
+ if (!list_empty(list)) {
+ qp = list_entry(list->next, struct qib_qp, iowait);
+ list_del_init(&qp->iowait);
+ atomic_inc(&qp->refcount);
+ if (!list_empty(list))
+ mod_timer(&dev->mem_timer, jiffies + 1);
+ }
+ spin_unlock_irqrestore(&dev->pending_lock, flags);
+
+ if (qp) {
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (qp->s_flags & QIB_S_WAIT_KMEM) {
+ qp->s_flags &= ~QIB_S_WAIT_KMEM;
+ qib_schedule_send(qp);
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ }
+}
+
+static void update_sge(struct qib_sge_state *ss, u32 length)
+{
+ struct qib_sge *sge = &ss->sge;
+
+ sge->vaddr += length;
+ sge->length -= length;
+ sge->sge_length -= length;
+ if (sge->sge_length == 0) {
+ if (--ss->num_sge)
+ *sge = *ss->sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ return;
+ sge->n = 0;
+ }
+ sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+}
+
+#ifdef __LITTLE_ENDIAN
+static inline u32 get_upper_bits(u32 data, u32 shift)
+{
+ return data >> shift;
+}
+
+static inline u32 set_upper_bits(u32 data, u32 shift)
+{
+ return data << shift;
+}
+
+static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
+{
+ data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
+ data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
+ return data;
+}
+#else
+static inline u32 get_upper_bits(u32 data, u32 shift)
+{
+ return data << shift;
+}
+
+static inline u32 set_upper_bits(u32 data, u32 shift)
+{
+ return data >> shift;
+}
+
+static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
+{
+ data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
+ data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
+ return data;
+}
+#endif
+
+static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss,
+ u32 length, unsigned flush_wc)
+{
+ u32 extra = 0;
+ u32 data = 0;
+ u32 last;
+
+ while (1) {
+ u32 len = ss->sge.length;
+ u32 off;
+
+ if (len > length)
+ len = length;
+ if (len > ss->sge.sge_length)
+ len = ss->sge.sge_length;
+ BUG_ON(len == 0);
+ /* If the source address is not aligned, try to align it. */
+ off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
+ if (off) {
+ u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
+ ~(sizeof(u32) - 1));
+ u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
+ u32 y;
+
+ y = sizeof(u32) - off;
+ if (len > y)
+ len = y;
+ if (len + extra >= sizeof(u32)) {
+ data |= set_upper_bits(v, extra *
+ BITS_PER_BYTE);
+ len = sizeof(u32) - extra;
+ if (len == length) {
+ last = data;
+ break;
+ }
+ __raw_writel(data, piobuf);
+ piobuf++;
+ extra = 0;
+ data = 0;
+ } else {
+ /* Clear unused upper bytes */
+ data |= clear_upper_bytes(v, len, extra);
+ if (len == length) {
+ last = data;
+ break;
+ }
+ extra += len;
+ }
+ } else if (extra) {
+ /* Source address is aligned. */
+ u32 *addr = (u32 *) ss->sge.vaddr;
+ int shift = extra * BITS_PER_BYTE;
+ int ushift = 32 - shift;
+ u32 l = len;
+
+ while (l >= sizeof(u32)) {
+ u32 v = *addr;
+
+ data |= set_upper_bits(v, shift);
+ __raw_writel(data, piobuf);
+ data = get_upper_bits(v, ushift);
+ piobuf++;
+ addr++;
+ l -= sizeof(u32);
+ }
+ /*
+ * We still have 'extra' number of bytes leftover.
+ */
+ if (l) {
+ u32 v = *addr;
+
+ if (l + extra >= sizeof(u32)) {
+ data |= set_upper_bits(v, shift);
+ len -= l + extra - sizeof(u32);
+ if (len == length) {
+ last = data;
+ break;
+ }
+ __raw_writel(data, piobuf);
+ piobuf++;
+ extra = 0;
+ data = 0;
+ } else {
+ /* Clear unused upper bytes */
+ data |= clear_upper_bytes(v, l, extra);
+ if (len == length) {
+ last = data;
+ break;
+ }
+ extra += l;
+ }
+ } else if (len == length) {
+ last = data;
+ break;
+ }
+ } else if (len == length) {
+ u32 w;
+
+ /*
+ * Need to round up for the last dword in the
+ * packet.
+ */
+ w = (len + 3) >> 2;
+ qib_pio_copy(piobuf, ss->sge.vaddr, w - 1);
+ piobuf += w - 1;
+ last = ((u32 *) ss->sge.vaddr)[w - 1];
+ break;
+ } else {
+ u32 w = len >> 2;
+
+ qib_pio_copy(piobuf, ss->sge.vaddr, w);
+ piobuf += w;
+
+ extra = len & (sizeof(u32) - 1);
+ if (extra) {
+ u32 v = ((u32 *) ss->sge.vaddr)[w];
+
+ /* Clear unused upper bytes */
+ data = clear_upper_bytes(v, extra, 0);
+ }
+ }
+ update_sge(ss, len);
+ length -= len;
+ }
+ /* Update address before sending packet. */
+ update_sge(ss, length);
+ if (flush_wc) {
+ /* must flush early everything before trigger word */
+ qib_flush_wc();
+ __raw_writel(last, piobuf);
+ /* be sure trigger word is written */
+ qib_flush_wc();
+ } else
+ __raw_writel(last, piobuf);
+}
+
+static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
+ struct qib_qp *qp, int *retp)
+{
+ struct qib_verbs_txreq *tx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ spin_lock(&dev->pending_lock);
+
+ if (!list_empty(&dev->txreq_free)) {
+ struct list_head *l = dev->txreq_free.next;
+
+ list_del(l);
+ tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
+ *retp = 0;
+ } else {
+ if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK &&
+ list_empty(&qp->iowait)) {
+ dev->n_txwait++;
+ qp->s_flags |= QIB_S_WAIT_TX;
+ list_add_tail(&qp->iowait, &dev->txwait);
+ }
+ tx = NULL;
+ qp->s_flags &= ~QIB_S_BUSY;
+ *retp = -EBUSY;
+ }
+
+ spin_unlock(&dev->pending_lock);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ return tx;
+}
+
+void qib_put_txreq(struct qib_verbs_txreq *tx)
+{
+ struct qib_ibdev *dev;
+ struct qib_qp *qp;
+ unsigned long flags;
+
+ qp = tx->qp;
+ dev = to_idev(qp->ibqp.device);
+
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ if (tx->mr) {
+ atomic_dec(&tx->mr->refcount);
+ tx->mr = NULL;
+ }
+ if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
+ tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF;
+ dma_unmap_single(&dd_from_dev(dev)->pcidev->dev,
+ tx->txreq.addr, tx->hdr_dwords << 2,
+ DMA_TO_DEVICE);
+ kfree(tx->align_buf);
+ }
+
+ spin_lock_irqsave(&dev->pending_lock, flags);
+
+ /* Put struct back on free list */
+ list_add(&tx->txreq.list, &dev->txreq_free);
+
+ if (!list_empty(&dev->txwait)) {
+ /* Wake up first QP wanting a free struct */
+ qp = list_entry(dev->txwait.next, struct qib_qp, iowait);
+ list_del_init(&qp->iowait);
+ atomic_inc(&qp->refcount);
+ spin_unlock_irqrestore(&dev->pending_lock, flags);
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (qp->s_flags & QIB_S_WAIT_TX) {
+ qp->s_flags &= ~QIB_S_WAIT_TX;
+ qib_schedule_send(qp);
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ } else
+ spin_unlock_irqrestore(&dev->pending_lock, flags);
+}
+
+/*
+ * This is called when there are send DMA descriptors that might be
+ * available.
+ *
+ * This is called with ppd->sdma_lock held.
+ */
+void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
+{
+ struct qib_qp *qp, *nqp;
+ struct qib_qp *qps[20];
+ struct qib_ibdev *dev;
+ unsigned i, n;
+
+ n = 0;
+ dev = &ppd->dd->verbs_dev;
+ spin_lock(&dev->pending_lock);
+
+ /* Search wait list for first QP wanting DMA descriptors. */
+ list_for_each_entry_safe(qp, nqp, &dev->dmawait, iowait) {
+ if (qp->port_num != ppd->port)
+ continue;
+ if (n == ARRAY_SIZE(qps))
+ break;
+ if (qp->s_tx->txreq.sg_count > avail)
+ break;
+ avail -= qp->s_tx->txreq.sg_count;
+ list_del_init(&qp->iowait);
+ atomic_inc(&qp->refcount);
+ qps[n++] = qp;
+ }
+
+ spin_unlock(&dev->pending_lock);
+
+ for (i = 0; i < n; i++) {
+ qp = qps[i];
+ spin_lock(&qp->s_lock);
+ if (qp->s_flags & QIB_S_WAIT_DMA_DESC) {
+ qp->s_flags &= ~QIB_S_WAIT_DMA_DESC;
+ qib_schedule_send(qp);
+ }
+ spin_unlock(&qp->s_lock);
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ }
+}
+
+/*
+ * This is called with ppd->sdma_lock held.
+ */
+static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
+{
+ struct qib_verbs_txreq *tx =
+ container_of(cookie, struct qib_verbs_txreq, txreq);
+ struct qib_qp *qp = tx->qp;
+
+ spin_lock(&qp->s_lock);
+ if (tx->wqe)
+ qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
+ else if (qp->ibqp.qp_type == IB_QPT_RC) {
+ struct qib_ib_header *hdr;
+
+ if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
+ hdr = &tx->align_buf->hdr;
+ else {
+ struct qib_ibdev *dev = to_idev(qp->ibqp.device);
+
+ hdr = &dev->pio_hdrs[tx->hdr_inx].hdr;
+ }
+ qib_rc_send_complete(qp, hdr);
+ }
+ if (atomic_dec_and_test(&qp->s_dma_busy)) {
+ if (qp->state == IB_QPS_RESET)
+ wake_up(&qp->wait_dma);
+ else if (qp->s_flags & QIB_S_WAIT_DMA) {
+ qp->s_flags &= ~QIB_S_WAIT_DMA;
+ qib_schedule_send(qp);
+ }
+ }
+ spin_unlock(&qp->s_lock);
+
+ qib_put_txreq(tx);
+}
+
+static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
+ spin_lock(&dev->pending_lock);
+ if (list_empty(&qp->iowait)) {
+ if (list_empty(&dev->memwait))
+ mod_timer(&dev->mem_timer, jiffies + 1);
+ qp->s_flags |= QIB_S_WAIT_KMEM;
+ list_add_tail(&qp->iowait, &dev->memwait);
+ }
+ spin_unlock(&dev->pending_lock);
+ qp->s_flags &= ~QIB_S_BUSY;
+ ret = -EBUSY;
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ return ret;
+}
+
+static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr,
+ u32 hdrwords, struct qib_sge_state *ss, u32 len,
+ u32 plen, u32 dwords)
+{
+ struct qib_ibdev *dev = to_idev(qp->ibqp.device);
+ struct qib_devdata *dd = dd_from_dev(dev);
+ struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_verbs_txreq *tx;
+ struct qib_pio_header *phdr;
+ u32 control;
+ u32 ndesc;
+ int ret;
+
+ tx = qp->s_tx;
+ if (tx) {
+ qp->s_tx = NULL;
+ /* resend previously constructed packet */
+ ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
+ goto bail;
+ }
+
+ tx = get_txreq(dev, qp, &ret);
+ if (!tx)
+ goto bail;
+
+ control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
+ be16_to_cpu(hdr->lrh[0]) >> 12);
+ tx->qp = qp;
+ atomic_inc(&qp->refcount);
+ tx->wqe = qp->s_wqe;
+ tx->mr = qp->s_rdma_mr;
+ if (qp->s_rdma_mr)
+ qp->s_rdma_mr = NULL;
+ tx->txreq.callback = sdma_complete;
+ if (dd->flags & QIB_HAS_SDMA_TIMEOUT)
+ tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST;
+ else
+ tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ;
+ if (plen + 1 > dd->piosize2kmax_dwords)
+ tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF;
+
+ if (len) {
+ /*
+ * Don't try to DMA if it takes more descriptors than
+ * the queue holds.
+ */
+ ndesc = qib_count_sge(ss, len);
+ if (ndesc >= ppd->sdma_descq_cnt)
+ ndesc = 0;
+ } else
+ ndesc = 1;
+ if (ndesc) {
+ phdr = &dev->pio_hdrs[tx->hdr_inx];
+ phdr->pbc[0] = cpu_to_le32(plen);
+ phdr->pbc[1] = cpu_to_le32(control);
+ memcpy(&phdr->hdr, hdr, hdrwords << 2);
+ tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC;
+ tx->txreq.sg_count = ndesc;
+ tx->txreq.addr = dev->pio_hdrs_phys +
+ tx->hdr_inx * sizeof(struct qib_pio_header);
+ tx->hdr_dwords = hdrwords + 2; /* add PBC length */
+ ret = qib_sdma_verbs_send(ppd, ss, dwords, tx);
+ goto bail;
+ }
+
+ /* Allocate a buffer and copy the header and payload to it. */
+ tx->hdr_dwords = plen + 1;
+ phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC);
+ if (!phdr)
+ goto err_tx;
+ phdr->pbc[0] = cpu_to_le32(plen);
+ phdr->pbc[1] = cpu_to_le32(control);
+ memcpy(&phdr->hdr, hdr, hdrwords << 2);
+ qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len);
+
+ tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
+ tx->hdr_dwords << 2, DMA_TO_DEVICE);
+ if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
+ goto map_err;
+ tx->align_buf = phdr;
+ tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF;
+ tx->txreq.sg_count = 1;
+ ret = qib_sdma_verbs_send(ppd, NULL, 0, tx);
+ goto unaligned;
+
+map_err:
+ kfree(phdr);
+err_tx:
+ qib_put_txreq(tx);
+ ret = wait_kmem(dev, qp);
+unaligned:
+ ibp->n_unaligned++;
+bail:
+ return ret;
+}
+
+/*
+ * If we are now in the error state, return zero to flush the
+ * send work request.
+ */
+static int no_bufs_available(struct qib_qp *qp)
+{
+ struct qib_ibdev *dev = to_idev(qp->ibqp.device);
+ struct qib_devdata *dd;
+ unsigned long flags;
+ int ret = 0;
+
+ /*
+ * Note that as soon as want_buffer() is called and
+ * possibly before it returns, qib_ib_piobufavail()
+ * could be called. Therefore, put QP on the I/O wait list before
+ * enabling the PIO avail interrupt.
+ */
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
+ spin_lock(&dev->pending_lock);
+ if (list_empty(&qp->iowait)) {
+ dev->n_piowait++;
+ qp->s_flags |= QIB_S_WAIT_PIO;
+ list_add_tail(&qp->iowait, &dev->piowait);
+ dd = dd_from_dev(dev);
+ dd->f_wantpiobuf_intr(dd, 1);
+ }
+ spin_unlock(&dev->pending_lock);
+ qp->s_flags &= ~QIB_S_BUSY;
+ ret = -EBUSY;
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return ret;
+}
+
+static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr,
+ u32 hdrwords, struct qib_sge_state *ss, u32 len,
+ u32 plen, u32 dwords)
+{
+ struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ struct qib_pportdata *ppd = dd->pport + qp->port_num - 1;
+ u32 *hdr = (u32 *) ibhdr;
+ u32 __iomem *piobuf_orig;
+ u32 __iomem *piobuf;
+ u64 pbc;
+ unsigned long flags;
+ unsigned flush_wc;
+ u32 control;
+ u32 pbufn;
+
+ control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
+ be16_to_cpu(ibhdr->lrh[0]) >> 12);
+ pbc = ((u64) control << 32) | plen;
+ piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
+ if (unlikely(piobuf == NULL))
+ return no_bufs_available(qp);
+
+ /*
+ * Write the pbc.
+ * We have to flush after the PBC for correctness on some cpus
+ * or WC buffer can be written out of order.
+ */
+ writeq(pbc, piobuf);
+ piobuf_orig = piobuf;
+ piobuf += 2;
+
+ flush_wc = dd->flags & QIB_PIO_FLUSH_WC;
+ if (len == 0) {
+ /*
+ * If there is just the header portion, must flush before
+ * writing last word of header for correctness, and after
+ * the last header word (trigger word).
+ */
+ if (flush_wc) {
+ qib_flush_wc();
+ qib_pio_copy(piobuf, hdr, hdrwords - 1);
+ qib_flush_wc();
+ __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
+ qib_flush_wc();
+ } else
+ qib_pio_copy(piobuf, hdr, hdrwords);
+ goto done;
+ }
+
+ if (flush_wc)
+ qib_flush_wc();
+ qib_pio_copy(piobuf, hdr, hdrwords);
+ piobuf += hdrwords;
+
+ /* The common case is aligned and contained in one segment. */
+ if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
+ !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
+ u32 *addr = (u32 *) ss->sge.vaddr;
+
+ /* Update address before sending packet. */
+ update_sge(ss, len);
+ if (flush_wc) {
+ qib_pio_copy(piobuf, addr, dwords - 1);
+ /* must flush early everything before trigger word */
+ qib_flush_wc();
+ __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
+ /* be sure trigger word is written */
+ qib_flush_wc();
+ } else
+ qib_pio_copy(piobuf, addr, dwords);
+ goto done;
+ }
+ copy_io(piobuf, ss, len, flush_wc);
+done:
+ if (dd->flags & QIB_USE_SPCL_TRIG) {
+ u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
+ qib_flush_wc();
+ __raw_writel(0xaebecede, piobuf_orig + spcl_off);
+ }
+ qib_sendbuf_done(dd, pbufn);
+ if (qp->s_rdma_mr) {
+ atomic_dec(&qp->s_rdma_mr->refcount);
+ qp->s_rdma_mr = NULL;
+ }
+ if (qp->s_wqe) {
+ spin_lock_irqsave(&qp->s_lock, flags);
+ qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ } else if (qp->ibqp.qp_type == IB_QPT_RC) {
+ spin_lock_irqsave(&qp->s_lock, flags);
+ qib_rc_send_complete(qp, ibhdr);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ }
+ return 0;
+}
+
+/**
+ * qib_verbs_send - send a packet
+ * @qp: the QP to send on
+ * @hdr: the packet header
+ * @hdrwords: the number of 32-bit words in the header
+ * @ss: the SGE to send
+ * @len: the length of the packet in bytes
+ *
+ * Return zero if packet is sent or queued OK.
+ * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise.
+ */
+int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
+ u32 hdrwords, struct qib_sge_state *ss, u32 len)
+{
+ struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ u32 plen;
+ int ret;
+ u32 dwords = (len + 3) >> 2;
+
+ /*
+ * Calculate the send buffer trigger address.
+ * The +1 counts for the pbc control dword following the pbc length.
+ */
+ plen = hdrwords + dwords + 1;
+
+ /*
+ * VL15 packets (IB_QPT_SMI) will always use PIO, so we
+ * can defer SDMA restart until link goes ACTIVE without
+ * worrying about just how we got there.
+ */
+ if (qp->ibqp.qp_type == IB_QPT_SMI ||
+ !(dd->flags & QIB_HAS_SEND_DMA))
+ ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len,
+ plen, dwords);
+ else
+ ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len,
+ plen, dwords);
+
+ return ret;
+}
+
+int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
+ u64 *rwords, u64 *spkts, u64 *rpkts,
+ u64 *xmit_wait)
+{
+ int ret;
+ struct qib_devdata *dd = ppd->dd;
+
+ if (!(dd->flags & QIB_PRESENT)) {
+ /* no hardware, freeze, etc. */
+ ret = -EINVAL;
+ goto bail;
+ }
+ *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND);
+ *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV);
+ *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND);
+ *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV);
+ *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL);
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_get_counters - get various chip counters
+ * @dd: the qlogic_ib device
+ * @cntrs: counters are placed here
+ *
+ * Return the counters needed by recv_pma_get_portcounters().
+ */
+int qib_get_counters(struct qib_pportdata *ppd,
+ struct qib_verbs_counters *cntrs)
+{
+ int ret;
+
+ if (!(ppd->dd->flags & QIB_PRESENT)) {
+ /* no hardware, freeze, etc. */
+ ret = -EINVAL;
+ goto bail;
+ }
+ cntrs->symbol_error_counter =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
+ cntrs->link_error_recovery_counter =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV);
+ /*
+ * The link downed counter counts when the other side downs the
+ * connection. We add in the number of times we downed the link
+ * due to local link integrity errors to compensate.
+ */
+ cntrs->link_downed_counter =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN);
+ cntrs->port_rcv_errors =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) +
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) +
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) +
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) +
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) +
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) +
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) +
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) +
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT);
+ cntrs->port_rcv_errors +=
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR);
+ cntrs->port_rcv_errors +=
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR);
+ cntrs->port_rcv_remphys_errors =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP);
+ cntrs->port_xmit_discards =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL);
+ cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd,
+ QIBPORTCNTR_WORDSEND);
+ cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd,
+ QIBPORTCNTR_WORDRCV);
+ cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd,
+ QIBPORTCNTR_PKTSEND);
+ cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd,
+ QIBPORTCNTR_PKTRCV);
+ cntrs->local_link_integrity_errors =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI);
+ cntrs->excessive_buffer_overrun_errors =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL);
+ cntrs->vl15_dropped =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP);
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_ib_piobufavail - callback when a PIO buffer is available
+ * @dd: the device pointer
+ *
+ * This is called from qib_intr() at interrupt level when a PIO buffer is
+ * available after qib_verbs_send() returned an error that no buffers were
+ * available. Disable the interrupt if there are no more QPs waiting.
+ */
+void qib_ib_piobufavail(struct qib_devdata *dd)
+{
+ struct qib_ibdev *dev = &dd->verbs_dev;
+ struct list_head *list;
+ struct qib_qp *qps[5];
+ struct qib_qp *qp;
+ unsigned long flags;
+ unsigned i, n;
+
+ list = &dev->piowait;
+ n = 0;
+
+ /*
+ * Note: checking that the piowait list is empty and clearing
+ * the buffer available interrupt needs to be atomic or we
+ * could end up with QPs on the wait list with the interrupt
+ * disabled.
+ */
+ spin_lock_irqsave(&dev->pending_lock, flags);
+ while (!list_empty(list)) {
+ if (n == ARRAY_SIZE(qps))
+ goto full;
+ qp = list_entry(list->next, struct qib_qp, iowait);
+ list_del_init(&qp->iowait);
+ atomic_inc(&qp->refcount);
+ qps[n++] = qp;
+ }
+ dd->f_wantpiobuf_intr(dd, 0);
+full:
+ spin_unlock_irqrestore(&dev->pending_lock, flags);
+
+ for (i = 0; i < n; i++) {
+ qp = qps[i];
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (qp->s_flags & QIB_S_WAIT_PIO) {
+ qp->s_flags &= ~QIB_S_WAIT_PIO;
+ qib_schedule_send(qp);
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ /* Notify qib_destroy_qp() if it is waiting. */
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ }
+}
+
+static int qib_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props)
+{
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ struct qib_ibdev *dev = to_idev(ibdev);
+
+ memset(props, 0, sizeof(*props));
+
+ props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
+ IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
+ IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
+ IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
+ props->page_size_cap = PAGE_SIZE;
+ props->vendor_id =
+ QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
+ props->vendor_part_id = dd->deviceid;
+ props->hw_ver = dd->minrev;
+ props->sys_image_guid = ib_qib_sys_image_guid;
+ props->max_mr_size = ~0ULL;
+ props->max_qp = ib_qib_max_qps;
+ props->max_qp_wr = ib_qib_max_qp_wrs;
+ props->max_sge = ib_qib_max_sges;
+ props->max_cq = ib_qib_max_cqs;
+ props->max_ah = ib_qib_max_ahs;
+ props->max_cqe = ib_qib_max_cqes;
+ props->max_mr = dev->lk_table.max;
+ props->max_fmr = dev->lk_table.max;
+ props->max_map_per_fmr = 32767;
+ props->max_pd = ib_qib_max_pds;
+ props->max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
+ props->max_qp_init_rd_atom = 255;
+ /* props->max_res_rd_atom */
+ props->max_srq = ib_qib_max_srqs;
+ props->max_srq_wr = ib_qib_max_srq_wrs;
+ props->max_srq_sge = ib_qib_max_srq_sges;
+ /* props->local_ca_ack_delay */
+ props->atomic_cap = IB_ATOMIC_GLOB;
+ props->max_pkeys = qib_get_npkeys(dd);
+ props->max_mcast_grp = ib_qib_max_mcast_grps;
+ props->max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
+ props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
+ props->max_mcast_grp;
+
+ return 0;
+}
+
+static int qib_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props)
+{
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ enum ib_mtu mtu;
+ u16 lid = ppd->lid;
+
+ memset(props, 0, sizeof(*props));
+ props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
+ props->lmc = ppd->lmc;
+ props->sm_lid = ibp->sm_lid;
+ props->sm_sl = ibp->sm_sl;
+ props->state = dd->f_iblink_state(ppd->lastibcstat);
+ props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
+ props->port_cap_flags = ibp->port_cap_flags;
+ props->gid_tbl_len = QIB_GUIDS_PER_PORT;
+ props->max_msg_sz = 0x80000000;
+ props->pkey_tbl_len = qib_get_npkeys(dd);
+ props->bad_pkey_cntr = ibp->pkey_violations;
+ props->qkey_viol_cntr = ibp->qkey_violations;
+ props->active_width = ppd->link_width_active;
+ /* See rate_show() */
+ props->active_speed = ppd->link_speed_active;
+ props->max_vl_num = qib_num_vls(ppd->vls_supported);
+ props->init_type_reply = 0;
+
+ props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
+ switch (ppd->ibmtu) {
+ case 4096:
+ mtu = IB_MTU_4096;
+ break;
+ case 2048:
+ mtu = IB_MTU_2048;
+ break;
+ case 1024:
+ mtu = IB_MTU_1024;
+ break;
+ case 512:
+ mtu = IB_MTU_512;
+ break;
+ case 256:
+ mtu = IB_MTU_256;
+ break;
+ default:
+ mtu = IB_MTU_2048;
+ }
+ props->active_mtu = mtu;
+ props->subnet_timeout = ibp->subnet_timeout;
+
+ return 0;
+}
+
+static int qib_modify_device(struct ib_device *device,
+ int device_modify_mask,
+ struct ib_device_modify *device_modify)
+{
+ struct qib_devdata *dd = dd_from_ibdev(device);
+ unsigned i;
+ int ret;
+
+ if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
+ IB_DEVICE_MODIFY_NODE_DESC)) {
+ ret = -EOPNOTSUPP;
+ goto bail;
+ }
+
+ if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
+ memcpy(device->node_desc, device_modify->node_desc, 64);
+ for (i = 0; i < dd->num_pports; i++) {
+ struct qib_ibport *ibp = &dd->pport[i].ibport_data;
+
+ qib_node_desc_chg(ibp);
+ }
+ }
+
+ if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
+ ib_qib_sys_image_guid =
+ cpu_to_be64(device_modify->sys_image_guid);
+ for (i = 0; i < dd->num_pports; i++) {
+ struct qib_ibport *ibp = &dd->pport[i].ibport_data;
+
+ qib_sys_guid_chg(ibp);
+ }
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+static int qib_modify_port(struct ib_device *ibdev, u8 port,
+ int port_modify_mask, struct ib_port_modify *props)
+{
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+
+ ibp->port_cap_flags |= props->set_port_cap_mask;
+ ibp->port_cap_flags &= ~props->clr_port_cap_mask;
+ if (props->set_port_cap_mask || props->clr_port_cap_mask)
+ qib_cap_mask_chg(ibp);
+ if (port_modify_mask & IB_PORT_SHUTDOWN)
+ qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
+ if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
+ ibp->qkey_violations = 0;
+ return 0;
+}
+
+static int qib_query_gid(struct ib_device *ibdev, u8 port,
+ int index, union ib_gid *gid)
+{
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ int ret = 0;
+
+ if (!port || port > dd->num_pports)
+ ret = -EINVAL;
+ else {
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+
+ gid->global.subnet_prefix = ibp->gid_prefix;
+ if (index == 0)
+ gid->global.interface_id = ppd->guid;
+ else if (index < QIB_GUIDS_PER_PORT)
+ gid->global.interface_id = ibp->guids[index - 1];
+ else
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct qib_ibdev *dev = to_idev(ibdev);
+ struct qib_pd *pd;
+ struct ib_pd *ret;
+
+ /*
+ * This is actually totally arbitrary. Some correctness tests
+ * assume there's a maximum number of PDs that can be allocated.
+ * We don't actually have this limit, but we fail the test if
+ * we allow allocations of more than we report for this value.
+ */
+
+ pd = kmalloc(sizeof *pd, GFP_KERNEL);
+ if (!pd) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ spin_lock(&dev->n_pds_lock);
+ if (dev->n_pds_allocated == ib_qib_max_pds) {
+ spin_unlock(&dev->n_pds_lock);
+ kfree(pd);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ dev->n_pds_allocated++;
+ spin_unlock(&dev->n_pds_lock);
+
+ /* ib_alloc_pd() will initialize pd->ibpd. */
+ pd->user = udata != NULL;
+
+ ret = &pd->ibpd;
+
+bail:
+ return ret;
+}
+
+static int qib_dealloc_pd(struct ib_pd *ibpd)
+{
+ struct qib_pd *pd = to_ipd(ibpd);
+ struct qib_ibdev *dev = to_idev(ibpd->device);
+
+ spin_lock(&dev->n_pds_lock);
+ dev->n_pds_allocated--;
+ spin_unlock(&dev->n_pds_lock);
+
+ kfree(pd);
+
+ return 0;
+}
+
+int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
+{
+ /* A multicast address requires a GRH (see ch. 8.4.1). */
+ if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
+ ah_attr->dlid != QIB_PERMISSIVE_LID &&
+ !(ah_attr->ah_flags & IB_AH_GRH))
+ goto bail;
+ if ((ah_attr->ah_flags & IB_AH_GRH) &&
+ ah_attr->grh.sgid_index >= QIB_GUIDS_PER_PORT)
+ goto bail;
+ if (ah_attr->dlid == 0)
+ goto bail;
+ if (ah_attr->port_num < 1 ||
+ ah_attr->port_num > ibdev->phys_port_cnt)
+ goto bail;
+ if (ah_attr->static_rate != IB_RATE_PORT_CURRENT &&
+ ib_rate_to_mult(ah_attr->static_rate) < 0)
+ goto bail;
+ if (ah_attr->sl > 15)
+ goto bail;
+ return 0;
+bail:
+ return -EINVAL;
+}
+
+/**
+ * qib_create_ah - create an address handle
+ * @pd: the protection domain
+ * @ah_attr: the attributes of the AH
+ *
+ * This may be called from interrupt context.
+ */
+static struct ib_ah *qib_create_ah(struct ib_pd *pd,
+ struct ib_ah_attr *ah_attr)
+{
+ struct qib_ah *ah;
+ struct ib_ah *ret;
+ struct qib_ibdev *dev = to_idev(pd->device);
+ unsigned long flags;
+
+ if (qib_check_ah(pd->device, ah_attr)) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+
+ ah = kmalloc(sizeof *ah, GFP_ATOMIC);
+ if (!ah) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ spin_lock_irqsave(&dev->n_ahs_lock, flags);
+ if (dev->n_ahs_allocated == ib_qib_max_ahs) {
+ spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
+ kfree(ah);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ dev->n_ahs_allocated++;
+ spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
+
+ /* ib_create_ah() will initialize ah->ibah. */
+ ah->attr = *ah_attr;
+ atomic_set(&ah->refcount, 0);
+
+ ret = &ah->ibah;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_destroy_ah - destroy an address handle
+ * @ibah: the AH to destroy
+ *
+ * This may be called from interrupt context.
+ */
+static int qib_destroy_ah(struct ib_ah *ibah)
+{
+ struct qib_ibdev *dev = to_idev(ibah->device);
+ struct qib_ah *ah = to_iah(ibah);
+ unsigned long flags;
+
+ if (atomic_read(&ah->refcount) != 0)
+ return -EBUSY;
+
+ spin_lock_irqsave(&dev->n_ahs_lock, flags);
+ dev->n_ahs_allocated--;
+ spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
+
+ kfree(ah);
+
+ return 0;
+}
+
+static int qib_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
+{
+ struct qib_ah *ah = to_iah(ibah);
+
+ if (qib_check_ah(ibah->device, ah_attr))
+ return -EINVAL;
+
+ ah->attr = *ah_attr;
+
+ return 0;
+}
+
+static int qib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
+{
+ struct qib_ah *ah = to_iah(ibah);
+
+ *ah_attr = ah->attr;
+
+ return 0;
+}
+
+/**
+ * qib_get_npkeys - return the size of the PKEY table for context 0
+ * @dd: the qlogic_ib device
+ */
+unsigned qib_get_npkeys(struct qib_devdata *dd)
+{
+ return ARRAY_SIZE(dd->rcd[0]->pkeys);
+}
+
+/*
+ * Return the indexed PKEY from the port PKEY table.
+ * No need to validate rcd[ctxt]; the port is setup if we are here.
+ */
+unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
+{
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_devdata *dd = ppd->dd;
+ unsigned ctxt = ppd->hw_pidx;
+ unsigned ret;
+
+ /* dd->rcd null if mini_init or some init failures */
+ if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys))
+ ret = 0;
+ else
+ ret = dd->rcd[ctxt]->pkeys[index];
+
+ return ret;
+}
+
+static int qib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey)
+{
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ int ret;
+
+ if (index >= qib_get_npkeys(dd)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ *pkey = qib_get_pkey(to_iport(ibdev, port), index);
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_alloc_ucontext - allocate a ucontest
+ * @ibdev: the infiniband device
+ * @udata: not used by the QLogic_IB driver
+ */
+
+static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata)
+{
+ struct qib_ucontext *context;
+ struct ib_ucontext *ret;
+
+ context = kmalloc(sizeof *context, GFP_KERNEL);
+ if (!context) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ ret = &context->ibucontext;
+
+bail:
+ return ret;
+}
+
+static int qib_dealloc_ucontext(struct ib_ucontext *context)
+{
+ kfree(to_iucontext(context));
+ return 0;
+}
+
+static void init_ibport(struct qib_pportdata *ppd)
+{
+ struct qib_verbs_counters cntrs;
+ struct qib_ibport *ibp = &ppd->ibport_data;
+
+ spin_lock_init(&ibp->lock);
+ /* Set the prefix to the default value (see ch. 4.1.1) */
+ ibp->gid_prefix = IB_DEFAULT_GID_PREFIX;
+ ibp->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
+ ibp->port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
+ IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
+ IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
+ IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
+ IB_PORT_OTHER_LOCAL_CHANGES_SUP;
+ if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
+ ibp->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
+ ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
+ ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
+ ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
+ ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
+ ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
+
+ /* Snapshot current HW counters to "clear" them. */
+ qib_get_counters(ppd, &cntrs);
+ ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
+ ibp->z_link_error_recovery_counter =
+ cntrs.link_error_recovery_counter;
+ ibp->z_link_downed_counter = cntrs.link_downed_counter;
+ ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
+ ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors;
+ ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
+ ibp->z_port_xmit_data = cntrs.port_xmit_data;
+ ibp->z_port_rcv_data = cntrs.port_rcv_data;
+ ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
+ ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
+ ibp->z_local_link_integrity_errors =
+ cntrs.local_link_integrity_errors;
+ ibp->z_excessive_buffer_overrun_errors =
+ cntrs.excessive_buffer_overrun_errors;
+ ibp->z_vl15_dropped = cntrs.vl15_dropped;
+}
+
+/**
+ * qib_register_ib_device - register our device with the infiniband core
+ * @dd: the device data structure
+ * Return the allocated qib_ibdev pointer or NULL on error.
+ */
+int qib_register_ib_device(struct qib_devdata *dd)
+{
+ struct qib_ibdev *dev = &dd->verbs_dev;
+ struct ib_device *ibdev = &dev->ibdev;
+ struct qib_pportdata *ppd = dd->pport;
+ unsigned i, lk_tab_size;
+ int ret;
+
+ dev->qp_table_size = ib_qib_qp_table_size;
+ dev->qp_table = kzalloc(dev->qp_table_size * sizeof *dev->qp_table,
+ GFP_KERNEL);
+ if (!dev->qp_table) {
+ ret = -ENOMEM;
+ goto err_qpt;
+ }
+
+ for (i = 0; i < dd->num_pports; i++)
+ init_ibport(ppd + i);
+
+ /* Only need to initialize non-zero fields. */
+ spin_lock_init(&dev->qpt_lock);
+ spin_lock_init(&dev->n_pds_lock);
+ spin_lock_init(&dev->n_ahs_lock);
+ spin_lock_init(&dev->n_cqs_lock);
+ spin_lock_init(&dev->n_qps_lock);
+ spin_lock_init(&dev->n_srqs_lock);
+ spin_lock_init(&dev->n_mcast_grps_lock);
+ init_timer(&dev->mem_timer);
+ dev->mem_timer.function = mem_timer;
+ dev->mem_timer.data = (unsigned long) dev;
+
+ qib_init_qpn_table(dd, &dev->qpn_table);
+
+ /*
+ * The top ib_qib_lkey_table_size bits are used to index the
+ * table. The lower 8 bits can be owned by the user (copied from
+ * the LKEY). The remaining bits act as a generation number or tag.
+ */
+ spin_lock_init(&dev->lk_table.lock);
+ dev->lk_table.max = 1 << ib_qib_lkey_table_size;
+ lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
+ dev->lk_table.table = (struct qib_mregion **)
+ __get_free_pages(GFP_KERNEL, get_order(lk_tab_size));
+ if (dev->lk_table.table == NULL) {
+ ret = -ENOMEM;
+ goto err_lk;
+ }
+ memset(dev->lk_table.table, 0, lk_tab_size);
+ INIT_LIST_HEAD(&dev->pending_mmaps);
+ spin_lock_init(&dev->pending_lock);
+ dev->mmap_offset = PAGE_SIZE;
+ spin_lock_init(&dev->mmap_offset_lock);
+ INIT_LIST_HEAD(&dev->piowait);
+ INIT_LIST_HEAD(&dev->dmawait);
+ INIT_LIST_HEAD(&dev->txwait);
+ INIT_LIST_HEAD(&dev->memwait);
+ INIT_LIST_HEAD(&dev->txreq_free);
+
+ if (ppd->sdma_descq_cnt) {
+ dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev,
+ ppd->sdma_descq_cnt *
+ sizeof(struct qib_pio_header),
+ &dev->pio_hdrs_phys,
+ GFP_KERNEL);
+ if (!dev->pio_hdrs) {
+ ret = -ENOMEM;
+ goto err_hdrs;
+ }
+ }
+
+ for (i = 0; i < ppd->sdma_descq_cnt; i++) {
+ struct qib_verbs_txreq *tx;
+
+ tx = kzalloc(sizeof *tx, GFP_KERNEL);
+ if (!tx) {
+ ret = -ENOMEM;
+ goto err_tx;
+ }
+ tx->hdr_inx = i;
+ list_add(&tx->txreq.list, &dev->txreq_free);
+ }
+
+ /*
+ * The system image GUID is supposed to be the same for all
+ * IB HCAs in a single system but since there can be other
+ * device types in the system, we can't be sure this is unique.
+ */
+ if (!ib_qib_sys_image_guid)
+ ib_qib_sys_image_guid = ppd->guid;
+
+ strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
+ ibdev->owner = THIS_MODULE;
+ ibdev->node_guid = ppd->guid;
+ ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION;
+ ibdev->uverbs_cmd_mask =
+ (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
+ (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
+ (1ull << IB_USER_VERBS_CMD_REG_MR) |
+ (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
+ (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
+ (1ull << IB_USER_VERBS_CMD_POST_SEND) |
+ (1ull << IB_USER_VERBS_CMD_POST_RECV) |
+ (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
+ (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
+ ibdev->node_type = RDMA_NODE_IB_CA;
+ ibdev->phys_port_cnt = dd->num_pports;
+ ibdev->num_comp_vectors = 1;
+ ibdev->dma_device = &dd->pcidev->dev;
+ ibdev->query_device = qib_query_device;
+ ibdev->modify_device = qib_modify_device;
+ ibdev->query_port = qib_query_port;
+ ibdev->modify_port = qib_modify_port;
+ ibdev->query_pkey = qib_query_pkey;
+ ibdev->query_gid = qib_query_gid;
+ ibdev->alloc_ucontext = qib_alloc_ucontext;
+ ibdev->dealloc_ucontext = qib_dealloc_ucontext;
+ ibdev->alloc_pd = qib_alloc_pd;
+ ibdev->dealloc_pd = qib_dealloc_pd;
+ ibdev->create_ah = qib_create_ah;
+ ibdev->destroy_ah = qib_destroy_ah;
+ ibdev->modify_ah = qib_modify_ah;
+ ibdev->query_ah = qib_query_ah;
+ ibdev->create_srq = qib_create_srq;
+ ibdev->modify_srq = qib_modify_srq;
+ ibdev->query_srq = qib_query_srq;
+ ibdev->destroy_srq = qib_destroy_srq;
+ ibdev->create_qp = qib_create_qp;
+ ibdev->modify_qp = qib_modify_qp;
+ ibdev->query_qp = qib_query_qp;
+ ibdev->destroy_qp = qib_destroy_qp;
+ ibdev->post_send = qib_post_send;
+ ibdev->post_recv = qib_post_receive;
+ ibdev->post_srq_recv = qib_post_srq_receive;
+ ibdev->create_cq = qib_create_cq;
+ ibdev->destroy_cq = qib_destroy_cq;
+ ibdev->resize_cq = qib_resize_cq;
+ ibdev->poll_cq = qib_poll_cq;
+ ibdev->req_notify_cq = qib_req_notify_cq;
+ ibdev->get_dma_mr = qib_get_dma_mr;
+ ibdev->reg_phys_mr = qib_reg_phys_mr;
+ ibdev->reg_user_mr = qib_reg_user_mr;
+ ibdev->dereg_mr = qib_dereg_mr;
+ ibdev->alloc_fast_reg_mr = qib_alloc_fast_reg_mr;
+ ibdev->alloc_fast_reg_page_list = qib_alloc_fast_reg_page_list;
+ ibdev->free_fast_reg_page_list = qib_free_fast_reg_page_list;
+ ibdev->alloc_fmr = qib_alloc_fmr;
+ ibdev->map_phys_fmr = qib_map_phys_fmr;
+ ibdev->unmap_fmr = qib_unmap_fmr;
+ ibdev->dealloc_fmr = qib_dealloc_fmr;
+ ibdev->attach_mcast = qib_multicast_attach;
+ ibdev->detach_mcast = qib_multicast_detach;
+ ibdev->process_mad = qib_process_mad;
+ ibdev->mmap = qib_mmap;
+ ibdev->dma_ops = &qib_dma_mapping_ops;
+
+ snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
+ QIB_IDSTR " %s", init_utsname()->nodename);
+
+ ret = ib_register_device(ibdev, qib_create_port_files);
+ if (ret)
+ goto err_reg;
+
+ ret = qib_create_agents(dev);
+ if (ret)
+ goto err_agents;
+
+ if (qib_verbs_register_sysfs(dd))
+ goto err_class;
+
+ goto bail;
+
+err_class:
+ qib_free_agents(dev);
+err_agents:
+ ib_unregister_device(ibdev);
+err_reg:
+err_tx:
+ while (!list_empty(&dev->txreq_free)) {
+ struct list_head *l = dev->txreq_free.next;
+ struct qib_verbs_txreq *tx;
+
+ list_del(l);
+ tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
+ kfree(tx);
+ }
+ if (ppd->sdma_descq_cnt)
+ dma_free_coherent(&dd->pcidev->dev,
+ ppd->sdma_descq_cnt *
+ sizeof(struct qib_pio_header),
+ dev->pio_hdrs, dev->pio_hdrs_phys);
+err_hdrs:
+ free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size));
+err_lk:
+ kfree(dev->qp_table);
+err_qpt:
+ qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
+bail:
+ return ret;
+}
+
+void qib_unregister_ib_device(struct qib_devdata *dd)
+{
+ struct qib_ibdev *dev = &dd->verbs_dev;
+ struct ib_device *ibdev = &dev->ibdev;
+ u32 qps_inuse;
+ unsigned lk_tab_size;
+
+ qib_verbs_unregister_sysfs(dd);
+
+ qib_free_agents(dev);
+
+ ib_unregister_device(ibdev);
+
+ if (!list_empty(&dev->piowait))
+ qib_dev_err(dd, "piowait list not empty!\n");
+ if (!list_empty(&dev->dmawait))
+ qib_dev_err(dd, "dmawait list not empty!\n");
+ if (!list_empty(&dev->txwait))
+ qib_dev_err(dd, "txwait list not empty!\n");
+ if (!list_empty(&dev->memwait))
+ qib_dev_err(dd, "memwait list not empty!\n");
+ if (dev->dma_mr)
+ qib_dev_err(dd, "DMA MR not NULL!\n");
+
+ qps_inuse = qib_free_all_qps(dd);
+ if (qps_inuse)
+ qib_dev_err(dd, "QP memory leak! %u still in use\n",
+ qps_inuse);
+
+ del_timer_sync(&dev->mem_timer);
+ qib_free_qpn_table(&dev->qpn_table);
+ while (!list_empty(&dev->txreq_free)) {
+ struct list_head *l = dev->txreq_free.next;
+ struct qib_verbs_txreq *tx;
+
+ list_del(l);
+ tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
+ kfree(tx);
+ }
+ if (dd->pport->sdma_descq_cnt)
+ dma_free_coherent(&dd->pcidev->dev,
+ dd->pport->sdma_descq_cnt *
+ sizeof(struct qib_pio_header),
+ dev->pio_hdrs, dev->pio_hdrs_phys);
+ lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
+ free_pages((unsigned long) dev->lk_table.table,
+ get_order(lk_tab_size));
+ kfree(dev->qp_table);
+}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
new file mode 100644
index 00000000000..bd57c127322
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -0,0 +1,1100 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef QIB_VERBS_H
+#define QIB_VERBS_H
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/kref.h>
+#include <linux/workqueue.h>
+#include <rdma/ib_pack.h>
+#include <rdma/ib_user_verbs.h>
+
+struct qib_ctxtdata;
+struct qib_pportdata;
+struct qib_devdata;
+struct qib_verbs_txreq;
+
+#define QIB_MAX_RDMA_ATOMIC 16
+#define QIB_GUIDS_PER_PORT 5
+
+#define QPN_MAX (1 << 24)
+#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
+
+/*
+ * Increment this value if any changes that break userspace ABI
+ * compatibility are made.
+ */
+#define QIB_UVERBS_ABI_VERSION 2
+
+/*
+ * Define an ib_cq_notify value that is not valid so we know when CQ
+ * notifications are armed.
+ */
+#define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
+
+#define IB_SEQ_NAK (3 << 29)
+
+/* AETH NAK opcode values */
+#define IB_RNR_NAK 0x20
+#define IB_NAK_PSN_ERROR 0x60
+#define IB_NAK_INVALID_REQUEST 0x61
+#define IB_NAK_REMOTE_ACCESS_ERROR 0x62
+#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
+#define IB_NAK_INVALID_RD_REQUEST 0x64
+
+/* Flags for checking QP state (see ib_qib_state_ops[]) */
+#define QIB_POST_SEND_OK 0x01
+#define QIB_POST_RECV_OK 0x02
+#define QIB_PROCESS_RECV_OK 0x04
+#define QIB_PROCESS_SEND_OK 0x08
+#define QIB_PROCESS_NEXT_SEND_OK 0x10
+#define QIB_FLUSH_SEND 0x20
+#define QIB_FLUSH_RECV 0x40
+#define QIB_PROCESS_OR_FLUSH_SEND \
+ (QIB_PROCESS_SEND_OK | QIB_FLUSH_SEND)
+
+/* IB Performance Manager status values */
+#define IB_PMA_SAMPLE_STATUS_DONE 0x00
+#define IB_PMA_SAMPLE_STATUS_STARTED 0x01
+#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
+
+/* Mandatory IB performance counter select values. */
+#define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
+#define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
+#define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
+#define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
+#define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
+
+#define QIB_VENDOR_IPG cpu_to_be16(0xFFA0)
+
+#define IB_BTH_REQ_ACK (1 << 31)
+#define IB_BTH_SOLICITED (1 << 23)
+#define IB_BTH_MIG_REQ (1 << 22)
+
+/* XXX Should be defined in ib_verbs.h enum ib_port_cap_flags */
+#define IB_PORT_OTHER_LOCAL_CHANGES_SUP (1 << 26)
+
+#define IB_GRH_VERSION 6
+#define IB_GRH_VERSION_MASK 0xF
+#define IB_GRH_VERSION_SHIFT 28
+#define IB_GRH_TCLASS_MASK 0xFF
+#define IB_GRH_TCLASS_SHIFT 20
+#define IB_GRH_FLOW_MASK 0xFFFFF
+#define IB_GRH_FLOW_SHIFT 0
+#define IB_GRH_NEXT_HDR 0x1B
+
+#define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
+
+/* Values for set/get portinfo VLCap OperationalVLs */
+#define IB_VL_VL0 1
+#define IB_VL_VL0_1 2
+#define IB_VL_VL0_3 3
+#define IB_VL_VL0_7 4
+#define IB_VL_VL0_14 5
+
+static inline int qib_num_vls(int vls)
+{
+ switch (vls) {
+ default:
+ case IB_VL_VL0:
+ return 1;
+ case IB_VL_VL0_1:
+ return 2;
+ case IB_VL_VL0_3:
+ return 4;
+ case IB_VL_VL0_7:
+ return 8;
+ case IB_VL_VL0_14:
+ return 15;
+ }
+}
+
+struct ib_reth {
+ __be64 vaddr;
+ __be32 rkey;
+ __be32 length;
+} __attribute__ ((packed));
+
+struct ib_atomic_eth {
+ __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */
+ __be32 rkey;
+ __be64 swap_data;
+ __be64 compare_data;
+} __attribute__ ((packed));
+
+struct qib_other_headers {
+ __be32 bth[3];
+ union {
+ struct {
+ __be32 deth[2];
+ __be32 imm_data;
+ } ud;
+ struct {
+ struct ib_reth reth;
+ __be32 imm_data;
+ } rc;
+ struct {
+ __be32 aeth;
+ __be32 atomic_ack_eth[2];
+ } at;
+ __be32 imm_data;
+ __be32 aeth;
+ struct ib_atomic_eth atomic_eth;
+ } u;
+} __attribute__ ((packed));
+
+/*
+ * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
+ * long (72 w/ imm_data). Only the first 56 bytes of the IB header
+ * will be in the eager header buffer. The remaining 12 or 16 bytes
+ * are in the data buffer.
+ */
+struct qib_ib_header {
+ __be16 lrh[4];
+ union {
+ struct {
+ struct ib_grh grh;
+ struct qib_other_headers oth;
+ } l;
+ struct qib_other_headers oth;
+ } u;
+} __attribute__ ((packed));
+
+struct qib_pio_header {
+ __le32 pbc[2];
+ struct qib_ib_header hdr;
+} __attribute__ ((packed));
+
+/*
+ * There is one struct qib_mcast for each multicast GID.
+ * All attached QPs are then stored as a list of
+ * struct qib_mcast_qp.
+ */
+struct qib_mcast_qp {
+ struct list_head list;
+ struct qib_qp *qp;
+};
+
+struct qib_mcast {
+ struct rb_node rb_node;
+ union ib_gid mgid;
+ struct list_head qp_list;
+ wait_queue_head_t wait;
+ atomic_t refcount;
+ int n_attached;
+};
+
+/* Protection domain */
+struct qib_pd {
+ struct ib_pd ibpd;
+ int user; /* non-zero if created from user space */
+};
+
+/* Address Handle */
+struct qib_ah {
+ struct ib_ah ibah;
+ struct ib_ah_attr attr;
+ atomic_t refcount;
+};
+
+/*
+ * This structure is used by qib_mmap() to validate an offset
+ * when an mmap() request is made. The vm_area_struct then uses
+ * this as its vm_private_data.
+ */
+struct qib_mmap_info {
+ struct list_head pending_mmaps;
+ struct ib_ucontext *context;
+ void *obj;
+ __u64 offset;
+ struct kref ref;
+ unsigned size;
+};
+
+/*
+ * This structure is used to contain the head pointer, tail pointer,
+ * and completion queue entries as a single memory allocation so
+ * it can be mmap'ed into user space.
+ */
+struct qib_cq_wc {
+ u32 head; /* index of next entry to fill */
+ u32 tail; /* index of next ib_poll_cq() entry */
+ union {
+ /* these are actually size ibcq.cqe + 1 */
+ struct ib_uverbs_wc uqueue[0];
+ struct ib_wc kqueue[0];
+ };
+};
+
+/*
+ * The completion queue structure.
+ */
+struct qib_cq {
+ struct ib_cq ibcq;
+ struct work_struct comptask;
+ spinlock_t lock; /* protect changes in this struct */
+ u8 notify;
+ u8 triggered;
+ struct qib_cq_wc *queue;
+ struct qib_mmap_info *ip;
+};
+
+/*
+ * A segment is a linear region of low physical memory.
+ * XXX Maybe we should use phys addr here and kmap()/kunmap().
+ * Used by the verbs layer.
+ */
+struct qib_seg {
+ void *vaddr;
+ size_t length;
+};
+
+/* The number of qib_segs that fit in a page. */
+#define QIB_SEGSZ (PAGE_SIZE / sizeof(struct qib_seg))
+
+struct qib_segarray {
+ struct qib_seg segs[QIB_SEGSZ];
+};
+
+struct qib_mregion {
+ struct ib_pd *pd; /* shares refcnt of ibmr.pd */
+ u64 user_base; /* User's address for this region */
+ u64 iova; /* IB start address of this region */
+ size_t length;
+ u32 lkey;
+ u32 offset; /* offset (bytes) to start of region */
+ int access_flags;
+ u32 max_segs; /* number of qib_segs in all the arrays */
+ u32 mapsz; /* size of the map array */
+ atomic_t refcount;
+ struct qib_segarray *map[0]; /* the segments */
+};
+
+/*
+ * These keep track of the copy progress within a memory region.
+ * Used by the verbs layer.
+ */
+struct qib_sge {
+ struct qib_mregion *mr;
+ void *vaddr; /* kernel virtual address of segment */
+ u32 sge_length; /* length of the SGE */
+ u32 length; /* remaining length of the segment */
+ u16 m; /* current index: mr->map[m] */
+ u16 n; /* current index: mr->map[m]->segs[n] */
+};
+
+/* Memory region */
+struct qib_mr {
+ struct ib_mr ibmr;
+ struct ib_umem *umem;
+ struct qib_mregion mr; /* must be last */
+};
+
+/*
+ * Send work request queue entry.
+ * The size of the sg_list is determined when the QP is created and stored
+ * in qp->s_max_sge.
+ */
+struct qib_swqe {
+ struct ib_send_wr wr; /* don't use wr.sg_list */
+ u32 psn; /* first packet sequence number */
+ u32 lpsn; /* last packet sequence number */
+ u32 ssn; /* send sequence number */
+ u32 length; /* total length of data in sg_list */
+ struct qib_sge sg_list[0];
+};
+
+/*
+ * Receive work request queue entry.
+ * The size of the sg_list is determined when the QP (or SRQ) is created
+ * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
+ */
+struct qib_rwqe {
+ u64 wr_id;
+ u8 num_sge;
+ struct ib_sge sg_list[0];
+};
+
+/*
+ * This structure is used to contain the head pointer, tail pointer,
+ * and receive work queue entries as a single memory allocation so
+ * it can be mmap'ed into user space.
+ * Note that the wq array elements are variable size so you can't
+ * just index into the array to get the N'th element;
+ * use get_rwqe_ptr() instead.
+ */
+struct qib_rwq {
+ u32 head; /* new work requests posted to the head */
+ u32 tail; /* receives pull requests from here. */
+ struct qib_rwqe wq[0];
+};
+
+struct qib_rq {
+ struct qib_rwq *wq;
+ spinlock_t lock; /* protect changes in this struct */
+ u32 size; /* size of RWQE array */
+ u8 max_sge;
+};
+
+struct qib_srq {
+ struct ib_srq ibsrq;
+ struct qib_rq rq;
+ struct qib_mmap_info *ip;
+ /* send signal when number of RWQEs < limit */
+ u32 limit;
+};
+
+struct qib_sge_state {
+ struct qib_sge *sg_list; /* next SGE to be used if any */
+ struct qib_sge sge; /* progress state for the current SGE */
+ u32 total_len;
+ u8 num_sge;
+};
+
+/*
+ * This structure holds the information that the send tasklet needs
+ * to send a RDMA read response or atomic operation.
+ */
+struct qib_ack_entry {
+ u8 opcode;
+ u8 sent;
+ u32 psn;
+ u32 lpsn;
+ union {
+ struct qib_sge rdma_sge;
+ u64 atomic_data;
+ };
+};
+
+/*
+ * Variables prefixed with s_ are for the requester (sender).
+ * Variables prefixed with r_ are for the responder (receiver).
+ * Variables prefixed with ack_ are for responder replies.
+ *
+ * Common variables are protected by both r_rq.lock and s_lock in that order
+ * which only happens in modify_qp() or changing the QP 'state'.
+ */
+struct qib_qp {
+ struct ib_qp ibqp;
+ struct qib_qp *next; /* link list for QPN hash table */
+ struct qib_qp *timer_next; /* link list for qib_ib_timer() */
+ struct list_head iowait; /* link for wait PIO buf */
+ struct list_head rspwait; /* link for waititing to respond */
+ struct ib_ah_attr remote_ah_attr;
+ struct ib_ah_attr alt_ah_attr;
+ struct qib_ib_header s_hdr; /* next packet header to send */
+ atomic_t refcount;
+ wait_queue_head_t wait;
+ wait_queue_head_t wait_dma;
+ struct timer_list s_timer;
+ struct work_struct s_work;
+ struct qib_mmap_info *ip;
+ struct qib_sge_state *s_cur_sge;
+ struct qib_verbs_txreq *s_tx;
+ struct qib_mregion *s_rdma_mr;
+ struct qib_sge_state s_sge; /* current send request data */
+ struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1];
+ struct qib_sge_state s_ack_rdma_sge;
+ struct qib_sge_state s_rdma_read_sge;
+ struct qib_sge_state r_sge; /* current receive data */
+ spinlock_t r_lock; /* used for APM */
+ spinlock_t s_lock;
+ atomic_t s_dma_busy;
+ unsigned processor_id; /* Processor ID QP is bound to */
+ u32 s_flags;
+ u32 s_cur_size; /* size of send packet in bytes */
+ u32 s_len; /* total length of s_sge */
+ u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
+ u32 s_next_psn; /* PSN for next request */
+ u32 s_last_psn; /* last response PSN processed */
+ u32 s_sending_psn; /* lowest PSN that is being sent */
+ u32 s_sending_hpsn; /* highest PSN that is being sent */
+ u32 s_psn; /* current packet sequence number */
+ u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
+ u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
+ u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
+ u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
+ u64 r_wr_id; /* ID for current receive WQE */
+ unsigned long r_aflags;
+ u32 r_len; /* total length of r_sge */
+ u32 r_rcv_len; /* receive data len processed */
+ u32 r_psn; /* expected rcv packet sequence number */
+ u32 r_msn; /* message sequence number */
+ u16 s_hdrwords; /* size of s_hdr in 32 bit words */
+ u16 s_rdma_ack_cnt;
+ u8 state; /* QP state */
+ u8 s_state; /* opcode of last packet sent */
+ u8 s_ack_state; /* opcode of packet to ACK */
+ u8 s_nak_state; /* non-zero if NAK is pending */
+ u8 r_state; /* opcode of last packet received */
+ u8 r_nak_state; /* non-zero if NAK is pending */
+ u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
+ u8 r_flags;
+ u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
+ u8 r_head_ack_queue; /* index into s_ack_queue[] */
+ u8 qp_access_flags;
+ u8 s_max_sge; /* size of s_wq->sg_list */
+ u8 s_retry_cnt; /* number of times to retry */
+ u8 s_rnr_retry_cnt;
+ u8 s_retry; /* requester retry counter */
+ u8 s_rnr_retry; /* requester RNR retry counter */
+ u8 s_pkey_index; /* PKEY index to use */
+ u8 s_alt_pkey_index; /* Alternate path PKEY index to use */
+ u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
+ u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
+ u8 s_tail_ack_queue; /* index into s_ack_queue[] */
+ u8 s_srate;
+ u8 s_draining;
+ u8 s_mig_state;
+ u8 timeout; /* Timeout for this QP */
+ u8 alt_timeout; /* Alternate path timeout for this QP */
+ u8 port_num;
+ enum ib_mtu path_mtu;
+ u32 remote_qpn;
+ u32 qkey; /* QKEY for this QP (for UD or RD) */
+ u32 s_size; /* send work queue size */
+ u32 s_head; /* new entries added here */
+ u32 s_tail; /* next entry to process */
+ u32 s_cur; /* current work queue entry */
+ u32 s_acked; /* last un-ACK'ed entry */
+ u32 s_last; /* last completed entry */
+ u32 s_ssn; /* SSN of tail entry */
+ u32 s_lsn; /* limit sequence number (credit) */
+ struct qib_swqe *s_wq; /* send work queue */
+ struct qib_swqe *s_wqe;
+ struct qib_rq r_rq; /* receive work queue */
+ struct qib_sge r_sg_list[0]; /* verified SGEs */
+};
+
+/*
+ * Atomic bit definitions for r_aflags.
+ */
+#define QIB_R_WRID_VALID 0
+#define QIB_R_REWIND_SGE 1
+
+/*
+ * Bit definitions for r_flags.
+ */
+#define QIB_R_REUSE_SGE 0x01
+#define QIB_R_RDMAR_SEQ 0x02
+#define QIB_R_RSP_NAK 0x04
+#define QIB_R_RSP_SEND 0x08
+#define QIB_R_COMM_EST 0x10
+
+/*
+ * Bit definitions for s_flags.
+ *
+ * QIB_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
+ * QIB_S_BUSY - send tasklet is processing the QP
+ * QIB_S_TIMER - the RC retry timer is active
+ * QIB_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
+ * QIB_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
+ * before processing the next SWQE
+ * QIB_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
+ * before processing the next SWQE
+ * QIB_S_WAIT_RNR - waiting for RNR timeout
+ * QIB_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
+ * QIB_S_WAIT_DMA - waiting for send DMA queue to drain before generating
+ * next send completion entry not via send DMA
+ * QIB_S_WAIT_PIO - waiting for a send buffer to be available
+ * QIB_S_WAIT_TX - waiting for a struct qib_verbs_txreq to be available
+ * QIB_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
+ * QIB_S_WAIT_KMEM - waiting for kernel memory to be available
+ * QIB_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
+ * QIB_S_WAIT_ACK - waiting for an ACK packet before sending more requests
+ * QIB_S_SEND_ONE - send one packet, request ACK, then wait for ACK
+ */
+#define QIB_S_SIGNAL_REQ_WR 0x0001
+#define QIB_S_BUSY 0x0002
+#define QIB_S_TIMER 0x0004
+#define QIB_S_RESP_PENDING 0x0008
+#define QIB_S_ACK_PENDING 0x0010
+#define QIB_S_WAIT_FENCE 0x0020
+#define QIB_S_WAIT_RDMAR 0x0040
+#define QIB_S_WAIT_RNR 0x0080
+#define QIB_S_WAIT_SSN_CREDIT 0x0100
+#define QIB_S_WAIT_DMA 0x0200
+#define QIB_S_WAIT_PIO 0x0400
+#define QIB_S_WAIT_TX 0x0800
+#define QIB_S_WAIT_DMA_DESC 0x1000
+#define QIB_S_WAIT_KMEM 0x2000
+#define QIB_S_WAIT_PSN 0x4000
+#define QIB_S_WAIT_ACK 0x8000
+#define QIB_S_SEND_ONE 0x10000
+#define QIB_S_UNLIMITED_CREDIT 0x20000
+
+/*
+ * Wait flags that would prevent any packet type from being sent.
+ */
+#define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \
+ QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM)
+
+/*
+ * Wait flags that would prevent send work requests from making progress.
+ */
+#define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \
+ QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \
+ QIB_S_WAIT_PSN | QIB_S_WAIT_ACK)
+
+#define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND)
+
+#define QIB_PSN_CREDIT 16
+
+/*
+ * Since struct qib_swqe is not a fixed size, we can't simply index into
+ * struct qib_qp.s_wq. This function does the array index computation.
+ */
+static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp,
+ unsigned n)
+{
+ return (struct qib_swqe *)((char *)qp->s_wq +
+ (sizeof(struct qib_swqe) +
+ qp->s_max_sge *
+ sizeof(struct qib_sge)) * n);
+}
+
+/*
+ * Since struct qib_rwqe is not a fixed size, we can't simply index into
+ * struct qib_rwq.wq. This function does the array index computation.
+ */
+static inline struct qib_rwqe *get_rwqe_ptr(struct qib_rq *rq, unsigned n)
+{
+ return (struct qib_rwqe *)
+ ((char *) rq->wq->wq +
+ (sizeof(struct qib_rwqe) +
+ rq->max_sge * sizeof(struct ib_sge)) * n);
+}
+
+/*
+ * QPN-map pages start out as NULL, they get allocated upon
+ * first use and are never deallocated. This way,
+ * large bitmaps are not allocated unless large numbers of QPs are used.
+ */
+struct qpn_map {
+ void *page;
+};
+
+struct qib_qpn_table {
+ spinlock_t lock; /* protect changes in this struct */
+ unsigned flags; /* flags for QP0/1 allocated for each port */
+ u32 last; /* last QP number allocated */
+ u32 nmaps; /* size of the map table */
+ u16 limit;
+ u16 mask;
+ /* bit map of free QP numbers other than 0/1 */
+ struct qpn_map map[QPNMAP_ENTRIES];
+};
+
+struct qib_lkey_table {
+ spinlock_t lock; /* protect changes in this struct */
+ u32 next; /* next unused index (speeds search) */
+ u32 gen; /* generation count */
+ u32 max; /* size of the table */
+ struct qib_mregion **table;
+};
+
+struct qib_opcode_stats {
+ u64 n_packets; /* number of packets */
+ u64 n_bytes; /* total number of bytes */
+};
+
+struct qib_ibport {
+ struct qib_qp *qp0;
+ struct qib_qp *qp1;
+ struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
+ struct qib_ah *sm_ah;
+ struct qib_ah *smi_ah;
+ struct rb_root mcast_tree;
+ spinlock_t lock; /* protect changes in this struct */
+
+ /* non-zero when timer is set */
+ unsigned long mkey_lease_timeout;
+ unsigned long trap_timeout;
+ __be64 gid_prefix; /* in network order */
+ __be64 mkey;
+ __be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */
+ u64 tid; /* TID for traps */
+ u64 n_unicast_xmit; /* total unicast packets sent */
+ u64 n_unicast_rcv; /* total unicast packets received */
+ u64 n_multicast_xmit; /* total multicast packets sent */
+ u64 n_multicast_rcv; /* total multicast packets received */
+ u64 z_symbol_error_counter; /* starting count for PMA */
+ u64 z_link_error_recovery_counter; /* starting count for PMA */
+ u64 z_link_downed_counter; /* starting count for PMA */
+ u64 z_port_rcv_errors; /* starting count for PMA */
+ u64 z_port_rcv_remphys_errors; /* starting count for PMA */
+ u64 z_port_xmit_discards; /* starting count for PMA */
+ u64 z_port_xmit_data; /* starting count for PMA */
+ u64 z_port_rcv_data; /* starting count for PMA */
+ u64 z_port_xmit_packets; /* starting count for PMA */
+ u64 z_port_rcv_packets; /* starting count for PMA */
+ u32 z_local_link_integrity_errors; /* starting count for PMA */
+ u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */
+ u32 z_vl15_dropped; /* starting count for PMA */
+ u32 n_rc_resends;
+ u32 n_rc_acks;
+ u32 n_rc_qacks;
+ u32 n_rc_delayed_comp;
+ u32 n_seq_naks;
+ u32 n_rdma_seq;
+ u32 n_rnr_naks;
+ u32 n_other_naks;
+ u32 n_loop_pkts;
+ u32 n_pkt_drops;
+ u32 n_vl15_dropped;
+ u32 n_rc_timeouts;
+ u32 n_dmawait;
+ u32 n_unaligned;
+ u32 n_rc_dupreq;
+ u32 n_rc_seqnak;
+ u32 port_cap_flags;
+ u32 pma_sample_start;
+ u32 pma_sample_interval;
+ __be16 pma_counter_select[5];
+ u16 pma_tag;
+ u16 pkey_violations;
+ u16 qkey_violations;
+ u16 mkey_violations;
+ u16 mkey_lease_period;
+ u16 sm_lid;
+ u16 repress_traps;
+ u8 sm_sl;
+ u8 mkeyprot;
+ u8 subnet_timeout;
+ u8 vl_high_limit;
+ u8 sl_to_vl[16];
+
+ struct qib_opcode_stats opstats[128];
+};
+
+struct qib_ibdev {
+ struct ib_device ibdev;
+ struct list_head pending_mmaps;
+ spinlock_t mmap_offset_lock; /* protect mmap_offset */
+ u32 mmap_offset;
+ struct qib_mregion *dma_mr;
+
+ /* QP numbers are shared by all IB ports */
+ struct qib_qpn_table qpn_table;
+ struct qib_lkey_table lk_table;
+ struct list_head piowait; /* list for wait PIO buf */
+ struct list_head dmawait; /* list for wait DMA */
+ struct list_head txwait; /* list for wait qib_verbs_txreq */
+ struct list_head memwait; /* list for wait kernel memory */
+ struct list_head txreq_free;
+ struct timer_list mem_timer;
+ struct qib_qp **qp_table;
+ struct qib_pio_header *pio_hdrs;
+ dma_addr_t pio_hdrs_phys;
+ /* list of QPs waiting for RNR timer */
+ spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */
+ unsigned qp_table_size; /* size of the hash table */
+ spinlock_t qpt_lock;
+
+ u32 n_piowait;
+ u32 n_txwait;
+
+ u32 n_pds_allocated; /* number of PDs allocated for device */
+ spinlock_t n_pds_lock;
+ u32 n_ahs_allocated; /* number of AHs allocated for device */
+ spinlock_t n_ahs_lock;
+ u32 n_cqs_allocated; /* number of CQs allocated for device */
+ spinlock_t n_cqs_lock;
+ u32 n_qps_allocated; /* number of QPs allocated for device */
+ spinlock_t n_qps_lock;
+ u32 n_srqs_allocated; /* number of SRQs allocated for device */
+ spinlock_t n_srqs_lock;
+ u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
+ spinlock_t n_mcast_grps_lock;
+};
+
+struct qib_verbs_counters {
+ u64 symbol_error_counter;
+ u64 link_error_recovery_counter;
+ u64 link_downed_counter;
+ u64 port_rcv_errors;
+ u64 port_rcv_remphys_errors;
+ u64 port_xmit_discards;
+ u64 port_xmit_data;
+ u64 port_rcv_data;
+ u64 port_xmit_packets;
+ u64 port_rcv_packets;
+ u32 local_link_integrity_errors;
+ u32 excessive_buffer_overrun_errors;
+ u32 vl15_dropped;
+};
+
+static inline struct qib_mr *to_imr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct qib_mr, ibmr);
+}
+
+static inline struct qib_pd *to_ipd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct qib_pd, ibpd);
+}
+
+static inline struct qib_ah *to_iah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct qib_ah, ibah);
+}
+
+static inline struct qib_cq *to_icq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct qib_cq, ibcq);
+}
+
+static inline struct qib_srq *to_isrq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct qib_srq, ibsrq);
+}
+
+static inline struct qib_qp *to_iqp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct qib_qp, ibqp);
+}
+
+static inline struct qib_ibdev *to_idev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct qib_ibdev, ibdev);
+}
+
+/*
+ * Send if not busy or waiting for I/O and either
+ * a RC response is pending or we can process send work requests.
+ */
+static inline int qib_send_ok(struct qib_qp *qp)
+{
+ return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) &&
+ (qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) ||
+ !(qp->s_flags & QIB_S_ANY_WAIT_SEND));
+}
+
+extern struct workqueue_struct *qib_wq;
+extern struct workqueue_struct *qib_cq_wq;
+
+/*
+ * This must be called with s_lock held.
+ */
+static inline void qib_schedule_send(struct qib_qp *qp)
+{
+ if (qib_send_ok(qp)) {
+ if (qp->processor_id == smp_processor_id())
+ queue_work(qib_wq, &qp->s_work);
+ else
+ queue_work_on(qp->processor_id,
+ qib_wq, &qp->s_work);
+ }
+}
+
+static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
+{
+ u16 p1 = pkey1 & 0x7FFF;
+ u16 p2 = pkey2 & 0x7FFF;
+
+ /*
+ * Low 15 bits must be non-zero and match, and
+ * one of the two must be a full member.
+ */
+ return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0);
+}
+
+void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
+ u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
+void qib_cap_mask_chg(struct qib_ibport *ibp);
+void qib_sys_guid_chg(struct qib_ibport *ibp);
+void qib_node_desc_chg(struct qib_ibport *ibp);
+int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ struct ib_wc *in_wc, struct ib_grh *in_grh,
+ struct ib_mad *in_mad, struct ib_mad *out_mad);
+int qib_create_agents(struct qib_ibdev *dev);
+void qib_free_agents(struct qib_ibdev *dev);
+
+/*
+ * Compare the lower 24 bits of the two values.
+ * Returns an integer <, ==, or > than zero.
+ */
+static inline int qib_cmp24(u32 a, u32 b)
+{
+ return (((int) a) - ((int) b)) << 8;
+}
+
+struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid);
+
+int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
+ u64 *rwords, u64 *spkts, u64 *rpkts,
+ u64 *xmit_wait);
+
+int qib_get_counters(struct qib_pportdata *ppd,
+ struct qib_verbs_counters *cntrs);
+
+int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
+
+int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
+
+int qib_mcast_tree_empty(struct qib_ibport *ibp);
+
+__be32 qib_compute_aeth(struct qib_qp *qp);
+
+struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn);
+
+struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
+
+int qib_destroy_qp(struct ib_qp *ibqp);
+
+int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err);
+
+int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+
+int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_qp_init_attr *init_attr);
+
+unsigned qib_free_all_qps(struct qib_devdata *dd);
+
+void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt);
+
+void qib_free_qpn_table(struct qib_qpn_table *qpt);
+
+void qib_get_credit(struct qib_qp *qp, u32 aeth);
+
+unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult);
+
+void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail);
+
+void qib_put_txreq(struct qib_verbs_txreq *tx);
+
+int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
+ u32 hdrwords, struct qib_sge_state *ss, u32 len);
+
+void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length,
+ int release);
+
+void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release);
+
+void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+ int has_grh, void *data, u32 tlen, struct qib_qp *qp);
+
+void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
+ int has_grh, void *data, u32 tlen, struct qib_qp *qp);
+
+int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
+
+void qib_rc_rnr_retry(unsigned long arg);
+
+void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr);
+
+void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err);
+
+int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr);
+
+void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+ int has_grh, void *data, u32 tlen, struct qib_qp *qp);
+
+int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr);
+
+int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr);
+
+int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
+ struct qib_sge *isge, struct ib_sge *sge, int acc);
+
+int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
+ u32 len, u64 vaddr, u32 rkey, int acc);
+
+int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+
+struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata);
+
+int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask,
+ struct ib_udata *udata);
+
+int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
+
+int qib_destroy_srq(struct ib_srq *ibsrq);
+
+void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig);
+
+int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
+
+struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries,
+ int comp_vector, struct ib_ucontext *context,
+ struct ib_udata *udata);
+
+int qib_destroy_cq(struct ib_cq *ibcq);
+
+int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
+
+int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
+
+struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc);
+
+struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
+ struct ib_phys_buf *buffer_list,
+ int num_phys_buf, int acc, u64 *iova_start);
+
+struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags,
+ struct ib_udata *udata);
+
+int qib_dereg_mr(struct ib_mr *ibmr);
+
+struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
+
+struct ib_fast_reg_page_list *qib_alloc_fast_reg_page_list(
+ struct ib_device *ibdev, int page_list_len);
+
+void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl);
+
+int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr);
+
+struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
+ struct ib_fmr_attr *fmr_attr);
+
+int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
+ int list_len, u64 iova);
+
+int qib_unmap_fmr(struct list_head *fmr_list);
+
+int qib_dealloc_fmr(struct ib_fmr *ibfmr);
+
+void qib_release_mmap_info(struct kref *ref);
+
+struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size,
+ struct ib_ucontext *context,
+ void *obj);
+
+void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
+ u32 size, void *obj);
+
+int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+
+int qib_get_rwqe(struct qib_qp *qp, int wr_id_only);
+
+void qib_migrate_qp(struct qib_qp *qp);
+
+int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+ int has_grh, struct qib_qp *qp, u32 bth0);
+
+u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
+ struct ib_global_route *grh, u32 hwords, u32 nwords);
+
+void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
+ u32 bth0, u32 bth2);
+
+void qib_do_send(struct work_struct *work);
+
+void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
+ enum ib_wc_status status);
+
+void qib_send_rc_ack(struct qib_qp *qp);
+
+int qib_make_rc_req(struct qib_qp *qp);
+
+int qib_make_uc_req(struct qib_qp *qp);
+
+int qib_make_ud_req(struct qib_qp *qp);
+
+int qib_register_ib_device(struct qib_devdata *);
+
+void qib_unregister_ib_device(struct qib_devdata *);
+
+void qib_ib_rcv(struct qib_ctxtdata *, void *, void *, u32);
+
+void qib_ib_piobufavail(struct qib_devdata *);
+
+unsigned qib_get_npkeys(struct qib_devdata *);
+
+unsigned qib_get_pkey(struct qib_ibport *, unsigned);
+
+extern const enum ib_wc_opcode ib_qib_wc_opcode[];
+
+/*
+ * Below HCA-independent IB PhysPortState values, returned
+ * by the f_ibphys_portstate() routine.
+ */
+#define IB_PHYSPORTSTATE_SLEEP 1
+#define IB_PHYSPORTSTATE_POLL 2
+#define IB_PHYSPORTSTATE_DISABLED 3
+#define IB_PHYSPORTSTATE_CFG_TRAIN 4
+#define IB_PHYSPORTSTATE_LINKUP 5
+#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
+#define IB_PHYSPORTSTATE_CFG_DEBOUNCE 8
+#define IB_PHYSPORTSTATE_CFG_IDLE 0xB
+#define IB_PHYSPORTSTATE_RECOVERY_RETRAIN 0xC
+#define IB_PHYSPORTSTATE_RECOVERY_WAITRMT 0xE
+#define IB_PHYSPORTSTATE_RECOVERY_IDLE 0xF
+#define IB_PHYSPORTSTATE_CFG_ENH 0x10
+#define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13
+
+extern const int ib_qib_state_ops[];
+
+extern __be64 ib_qib_sys_image_guid; /* in network order */
+
+extern unsigned int ib_qib_lkey_table_size;
+
+extern unsigned int ib_qib_max_cqes;
+
+extern unsigned int ib_qib_max_cqs;
+
+extern unsigned int ib_qib_max_qp_wrs;
+
+extern unsigned int ib_qib_max_qps;
+
+extern unsigned int ib_qib_max_sges;
+
+extern unsigned int ib_qib_max_mcast_grps;
+
+extern unsigned int ib_qib_max_mcast_qp_attached;
+
+extern unsigned int ib_qib_max_srqs;
+
+extern unsigned int ib_qib_max_srq_sges;
+
+extern unsigned int ib_qib_max_srq_wrs;
+
+extern const u32 ib_qib_rnr_table[];
+
+extern struct ib_dma_mapping_ops qib_dma_mapping_ops;
+
+#endif /* QIB_VERBS_H */
diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
new file mode 100644
index 00000000000..dabb697b1c2
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/rculist.h>
+
+#include "qib.h"
+
+/**
+ * qib_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
+ * @qp: the QP to link
+ */
+static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp)
+{
+ struct qib_mcast_qp *mqp;
+
+ mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
+ if (!mqp)
+ goto bail;
+
+ mqp->qp = qp;
+ atomic_inc(&qp->refcount);
+
+bail:
+ return mqp;
+}
+
+static void qib_mcast_qp_free(struct qib_mcast_qp *mqp)
+{
+ struct qib_qp *qp = mqp->qp;
+
+ /* Notify qib_destroy_qp() if it is waiting. */
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+
+ kfree(mqp);
+}
+
+/**
+ * qib_mcast_alloc - allocate the multicast GID structure
+ * @mgid: the multicast GID
+ *
+ * A list of QPs will be attached to this structure.
+ */
+static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid)
+{
+ struct qib_mcast *mcast;
+
+ mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
+ if (!mcast)
+ goto bail;
+
+ mcast->mgid = *mgid;
+ INIT_LIST_HEAD(&mcast->qp_list);
+ init_waitqueue_head(&mcast->wait);
+ atomic_set(&mcast->refcount, 0);
+ mcast->n_attached = 0;
+
+bail:
+ return mcast;
+}
+
+static void qib_mcast_free(struct qib_mcast *mcast)
+{
+ struct qib_mcast_qp *p, *tmp;
+
+ list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
+ qib_mcast_qp_free(p);
+
+ kfree(mcast);
+}
+
+/**
+ * qib_mcast_find - search the global table for the given multicast GID
+ * @ibp: the IB port structure
+ * @mgid: the multicast GID to search for
+ *
+ * Returns NULL if not found.
+ *
+ * The caller is responsible for decrementing the reference count if found.
+ */
+struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid)
+{
+ struct rb_node *n;
+ unsigned long flags;
+ struct qib_mcast *mcast;
+
+ spin_lock_irqsave(&ibp->lock, flags);
+ n = ibp->mcast_tree.rb_node;
+ while (n) {
+ int ret;
+
+ mcast = rb_entry(n, struct qib_mcast, rb_node);
+
+ ret = memcmp(mgid->raw, mcast->mgid.raw,
+ sizeof(union ib_gid));
+ if (ret < 0)
+ n = n->rb_left;
+ else if (ret > 0)
+ n = n->rb_right;
+ else {
+ atomic_inc(&mcast->refcount);
+ spin_unlock_irqrestore(&ibp->lock, flags);
+ goto bail;
+ }
+ }
+ spin_unlock_irqrestore(&ibp->lock, flags);
+
+ mcast = NULL;
+
+bail:
+ return mcast;
+}
+
+/**
+ * qib_mcast_add - insert mcast GID into table and attach QP struct
+ * @mcast: the mcast GID table
+ * @mqp: the QP to attach
+ *
+ * Return zero if both were added. Return EEXIST if the GID was already in
+ * the table but the QP was added. Return ESRCH if the QP was already
+ * attached and neither structure was added.
+ */
+static int qib_mcast_add(struct qib_ibdev *dev, struct qib_ibport *ibp,
+ struct qib_mcast *mcast, struct qib_mcast_qp *mqp)
+{
+ struct rb_node **n = &ibp->mcast_tree.rb_node;
+ struct rb_node *pn = NULL;
+ int ret;
+
+ spin_lock_irq(&ibp->lock);
+
+ while (*n) {
+ struct qib_mcast *tmcast;
+ struct qib_mcast_qp *p;
+
+ pn = *n;
+ tmcast = rb_entry(pn, struct qib_mcast, rb_node);
+
+ ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
+ sizeof(union ib_gid));
+ if (ret < 0) {
+ n = &pn->rb_left;
+ continue;
+ }
+ if (ret > 0) {
+ n = &pn->rb_right;
+ continue;
+ }
+
+ /* Search the QP list to see if this is already there. */
+ list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
+ if (p->qp == mqp->qp) {
+ ret = ESRCH;
+ goto bail;
+ }
+ }
+ if (tmcast->n_attached == ib_qib_max_mcast_qp_attached) {
+ ret = ENOMEM;
+ goto bail;
+ }
+
+ tmcast->n_attached++;
+
+ list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
+ ret = EEXIST;
+ goto bail;
+ }
+
+ spin_lock(&dev->n_mcast_grps_lock);
+ if (dev->n_mcast_grps_allocated == ib_qib_max_mcast_grps) {
+ spin_unlock(&dev->n_mcast_grps_lock);
+ ret = ENOMEM;
+ goto bail;
+ }
+
+ dev->n_mcast_grps_allocated++;
+ spin_unlock(&dev->n_mcast_grps_lock);
+
+ mcast->n_attached++;
+
+ list_add_tail_rcu(&mqp->list, &mcast->qp_list);
+
+ atomic_inc(&mcast->refcount);
+ rb_link_node(&mcast->rb_node, pn, n);
+ rb_insert_color(&mcast->rb_node, &ibp->mcast_tree);
+
+ ret = 0;
+
+bail:
+ spin_unlock_irq(&ibp->lock);
+
+ return ret;
+}
+
+int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ struct qib_qp *qp = to_iqp(ibqp);
+ struct qib_ibdev *dev = to_idev(ibqp->device);
+ struct qib_ibport *ibp;
+ struct qib_mcast *mcast;
+ struct qib_mcast_qp *mqp;
+ int ret;
+
+ if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /*
+ * Allocate data structures since its better to do this outside of
+ * spin locks and it will most likely be needed.
+ */
+ mcast = qib_mcast_alloc(gid);
+ if (mcast == NULL) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+ mqp = qib_mcast_qp_alloc(qp);
+ if (mqp == NULL) {
+ qib_mcast_free(mcast);
+ ret = -ENOMEM;
+ goto bail;
+ }
+ ibp = to_iport(ibqp->device, qp->port_num);
+ switch (qib_mcast_add(dev, ibp, mcast, mqp)) {
+ case ESRCH:
+ /* Neither was used: OK to attach the same QP twice. */
+ qib_mcast_qp_free(mqp);
+ qib_mcast_free(mcast);
+ break;
+
+ case EEXIST: /* The mcast wasn't used */
+ qib_mcast_free(mcast);
+ break;
+
+ case ENOMEM:
+ /* Exceeded the maximum number of mcast groups. */
+ qib_mcast_qp_free(mqp);
+ qib_mcast_free(mcast);
+ ret = -ENOMEM;
+ goto bail;
+
+ default:
+ break;
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ struct qib_qp *qp = to_iqp(ibqp);
+ struct qib_ibdev *dev = to_idev(ibqp->device);
+ struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
+ struct qib_mcast *mcast = NULL;
+ struct qib_mcast_qp *p, *tmp;
+ struct rb_node *n;
+ int last = 0;
+ int ret;
+
+ if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ spin_lock_irq(&ibp->lock);
+
+ /* Find the GID in the mcast table. */
+ n = ibp->mcast_tree.rb_node;
+ while (1) {
+ if (n == NULL) {
+ spin_unlock_irq(&ibp->lock);
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ mcast = rb_entry(n, struct qib_mcast, rb_node);
+ ret = memcmp(gid->raw, mcast->mgid.raw,
+ sizeof(union ib_gid));
+ if (ret < 0)
+ n = n->rb_left;
+ else if (ret > 0)
+ n = n->rb_right;
+ else
+ break;
+ }
+
+ /* Search the QP list. */
+ list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
+ if (p->qp != qp)
+ continue;
+ /*
+ * We found it, so remove it, but don't poison the forward
+ * link until we are sure there are no list walkers.
+ */
+ list_del_rcu(&p->list);
+ mcast->n_attached--;
+
+ /* If this was the last attached QP, remove the GID too. */
+ if (list_empty(&mcast->qp_list)) {
+ rb_erase(&mcast->rb_node, &ibp->mcast_tree);
+ last = 1;
+ }
+ break;
+ }
+
+ spin_unlock_irq(&ibp->lock);
+
+ if (p) {
+ /*
+ * Wait for any list walkers to finish before freeing the
+ * list element.
+ */
+ wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
+ qib_mcast_qp_free(p);
+ }
+ if (last) {
+ atomic_dec(&mcast->refcount);
+ wait_event(mcast->wait, !atomic_read(&mcast->refcount));
+ qib_mcast_free(mcast);
+ spin_lock_irq(&dev->n_mcast_grps_lock);
+ dev->n_mcast_grps_allocated--;
+ spin_unlock_irq(&dev->n_mcast_grps_lock);
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+int qib_mcast_tree_empty(struct qib_ibport *ibp)
+{
+ return ibp->mcast_tree.rb_node == NULL;
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_7220.h b/drivers/infiniband/hw/qib/qib_wc_ppc64.c
index 74fa5cc5131..673cf4c22eb 100644
--- a/drivers/infiniband/hw/ipath/ipath_7220.h
+++ b/drivers/infiniband/hw/qib/qib_wc_ppc64.c
@@ -1,7 +1,5 @@
-#ifndef _IPATH_7220_H
-#define _IPATH_7220_H
/*
- * Copyright (c) 2007 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -33,25 +31,32 @@
*/
/*
- * This header file provides the declarations and common definitions
- * for (mostly) manipulation of the SerDes blocks within the IBA7220.
- * the functions declared should only be called from within other
- * 7220-related files such as ipath_iba7220.c or ipath_sd7220.c.
+ * This file is conditionally built on PowerPC only. Otherwise weak symbol
+ * versions of the functions exported from here are used.
*/
-int ipath_sd7220_presets(struct ipath_devdata *dd);
-int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset);
-int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum, u8 *img,
- int len, int offset);
-int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum, const u8 *img,
- int len, int offset);
-/*
- * Below used for sdnum parameter, selecting one of the two sections
- * used for PCIe, or the single SerDes used for IB, which is the
- * only one currently used
- */
-#define IB_7220_SERDES 2
-int ipath_sd7220_ib_load(struct ipath_devdata *dd);
-int ipath_sd7220_ib_vfy(struct ipath_devdata *dd);
+#include "qib.h"
-#endif /* _IPATH_7220_H */
+/**
+ * qib_enable_wc - enable write combining for MMIO writes to the device
+ * @dd: qlogic_ib device
+ *
+ * Nothing to do on PowerPC, so just return without error.
+ */
+int qib_enable_wc(struct qib_devdata *dd)
+{
+ return 0;
+}
+
+/**
+ * qib_unordered_wc - indicate whether write combining is unordered
+ *
+ * Because our performance depends on our ability to do write
+ * combining mmio writes in the most efficient way, we need to
+ * know if we are on a processor that may reorder stores when
+ * write combining.
+ */
+int qib_unordered_wc(void)
+{
+ return 1;
+}
diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
new file mode 100644
index 00000000000..561b8bca406
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This file is conditionally built on x86_64 only. Otherwise weak symbol
+ * versions of the functions exported from here are used.
+ */
+
+#include <linux/pci.h>
+#include <asm/mtrr.h>
+#include <asm/processor.h>
+
+#include "qib.h"
+
+/**
+ * qib_enable_wc - enable write combining for MMIO writes to the device
+ * @dd: qlogic_ib device
+ *
+ * This routine is x86_64-specific; it twiddles the CPU's MTRRs to enable
+ * write combining.
+ */
+int qib_enable_wc(struct qib_devdata *dd)
+{
+ int ret = 0;
+ u64 pioaddr, piolen;
+ unsigned bits;
+ const unsigned long addr = pci_resource_start(dd->pcidev, 0);
+ const size_t len = pci_resource_len(dd->pcidev, 0);
+
+ /*
+ * Set the PIO buffers to be WCCOMB, so we get HT bursts to the
+ * chip. Linux (possibly the hardware) requires it to be on a power
+ * of 2 address matching the length (which has to be a power of 2).
+ * For rev1, that means the base address, for rev2, it will be just
+ * the PIO buffers themselves.
+ * For chips with two sets of buffers, the calculations are
+ * somewhat more complicated; we need to sum, and the piobufbase
+ * register has both offsets, 2K in low 32 bits, 4K in high 32 bits.
+ * The buffers are still packed, so a single range covers both.
+ */
+ if (dd->piobcnt2k && dd->piobcnt4k) {
+ /* 2 sizes for chip */
+ unsigned long pio2kbase, pio4kbase;
+ pio2kbase = dd->piobufbase & 0xffffffffUL;
+ pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL;
+ if (pio2kbase < pio4kbase) {
+ /* all current chips */
+ pioaddr = addr + pio2kbase;
+ piolen = pio4kbase - pio2kbase +
+ dd->piobcnt4k * dd->align4k;
+ } else {
+ pioaddr = addr + pio4kbase;
+ piolen = pio2kbase - pio4kbase +
+ dd->piobcnt2k * dd->palign;
+ }
+ } else { /* single buffer size (2K, currently) */
+ pioaddr = addr + dd->piobufbase;
+ piolen = dd->piobcnt2k * dd->palign +
+ dd->piobcnt4k * dd->align4k;
+ }
+
+ for (bits = 0; !(piolen & (1ULL << bits)); bits++)
+ /* do nothing */ ;
+
+ if (piolen != (1ULL << bits)) {
+ piolen >>= bits;
+ while (piolen >>= 1)
+ bits++;
+ piolen = 1ULL << (bits + 1);
+ }
+ if (pioaddr & (piolen - 1)) {
+ u64 atmp;
+ atmp = pioaddr & ~(piolen - 1);
+ if (atmp < addr || (atmp + piolen) > (addr + len)) {
+ qib_dev_err(dd, "No way to align address/size "
+ "(%llx/%llx), no WC mtrr\n",
+ (unsigned long long) atmp,
+ (unsigned long long) piolen << 1);
+ ret = -ENODEV;
+ } else {
+ pioaddr = atmp;
+ piolen <<= 1;
+ }
+ }
+
+ if (!ret) {
+ int cookie;
+
+ cookie = mtrr_add(pioaddr, piolen, MTRR_TYPE_WRCOMB, 0);
+ if (cookie < 0) {
+ {
+ qib_devinfo(dd->pcidev,
+ "mtrr_add() WC for PIO bufs "
+ "failed (%d)\n",
+ cookie);
+ ret = -EINVAL;
+ }
+ } else {
+ dd->wc_cookie = cookie;
+ dd->wc_base = (unsigned long) pioaddr;
+ dd->wc_len = (unsigned long) piolen;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * qib_disable_wc - disable write combining for MMIO writes to the device
+ * @dd: qlogic_ib device
+ */
+void qib_disable_wc(struct qib_devdata *dd)
+{
+ if (dd->wc_cookie) {
+ int r;
+
+ r = mtrr_del(dd->wc_cookie, dd->wc_base,
+ dd->wc_len);
+ if (r < 0)
+ qib_devinfo(dd->pcidev,
+ "mtrr_del(%lx, %lx, %lx) failed: %d\n",
+ dd->wc_cookie, dd->wc_base,
+ dd->wc_len, r);
+ dd->wc_cookie = 0; /* even on failure */
+ }
+}
+
+/**
+ * qib_unordered_wc - indicate whether write combining is ordered
+ *
+ * Because our performance depends on our ability to do write combining mmio
+ * writes in the most efficient way, we need to know if we are on an Intel
+ * or AMD x86_64 processor. AMD x86_64 processors flush WC buffers out in
+ * the order completed, and so no special flushing is required to get
+ * correct ordering. Intel processors, however, will flush write buffers
+ * out in "random" orders, and so explicit ordering is needed at times.
+ */
+int qib_unordered_wc(void)
+{
+ return boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
+}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index d10b4ec68d2..40e858492f9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -49,6 +49,25 @@ static u32 ipoib_get_rx_csum(struct net_device *dev)
!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
}
+static int ipoib_set_tso(struct net_device *dev, u32 data)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+ if (data) {
+ if (!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
+ (dev->features & NETIF_F_SG) &&
+ (priv->hca_caps & IB_DEVICE_UD_TSO)) {
+ dev->features |= NETIF_F_TSO;
+ } else {
+ ipoib_warn(priv, "can't set TSO on\n");
+ return -EOPNOTSUPP;
+ }
+ } else
+ dev->features &= ~NETIF_F_TSO;
+
+ return 0;
+}
+
static int ipoib_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal)
{
@@ -131,6 +150,7 @@ static void ipoib_get_ethtool_stats(struct net_device *dev,
static const struct ethtool_ops ipoib_ethtool_ops = {
.get_drvinfo = ipoib_get_drvinfo,
.get_rx_csum = ipoib_get_rx_csum,
+ .set_tso = ipoib_set_tso,
.get_coalesce = ipoib_get_coalesce,
.set_coalesce = ipoib_set_coalesce,
.get_flags = ethtool_op_get_flags,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index b166bb75753..3871ac66355 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -768,11 +768,8 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
}
}
-static int ipoib_mcast_addr_is_valid(const u8 *addr, unsigned int addrlen,
- const u8 *broadcast)
+static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)
{
- if (addrlen != INFINIBAND_ALEN)
- return 0;
/* reserved QPN, prefix, scope */
if (memcmp(addr, broadcast, 6))
return 0;
@@ -787,7 +784,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, restart_task);
struct net_device *dev = priv->dev;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
struct ipoib_mcast *mcast, *tmcast;
LIST_HEAD(remove_list);
unsigned long flags;
@@ -812,15 +809,13 @@ void ipoib_mcast_restart_task(struct work_struct *work)
clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
/* Mark all of the entries that are found or don't exist */
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
union ib_gid mgid;
- if (!ipoib_mcast_addr_is_valid(mclist->dmi_addr,
- mclist->dmi_addrlen,
- dev->broadcast))
+ if (!ipoib_mcast_addr_is_valid(ha->addr, dev->broadcast))
continue;
- memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid);
+ memcpy(mgid.raw, ha->addr + 4, sizeof mgid);
mcast = __ipoib_mcast_find(dev, &mgid);
if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 93399dff0c6..7b2fc98e2f2 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -325,7 +325,7 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
*/
if (ib_conn) {
ib_conn->iser_conn = NULL;
- iser_conn_put(ib_conn);
+ iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */
}
}
@@ -357,11 +357,12 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
/* binds the iSER connection retrieved from the previously
* connected ep_handle to the iSCSI layer connection. exchanges
* connection pointers */
- iser_err("binding iscsi conn %p to iser_conn %p\n",conn,ib_conn);
+ iser_err("binding iscsi/iser conn %p %p to ib_conn %p\n",
+ conn, conn->dd_data, ib_conn);
iser_conn = conn->dd_data;
ib_conn->iser_conn = iser_conn;
iser_conn->ib_conn = ib_conn;
- iser_conn_get(ib_conn);
+ iser_conn_get(ib_conn); /* ref iscsi/ib conn binding */
return 0;
}
@@ -382,7 +383,7 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
* There is no unbind event so the stop callback
* must release the ref from the bind.
*/
- iser_conn_put(ib_conn);
+ iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */
}
iser_conn->ib_conn = NULL;
}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 036934cdcb9..f1df01567bb 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -232,6 +232,7 @@ struct iser_device {
struct ib_cq *tx_cq;
struct ib_mr *mr;
struct tasklet_struct cq_tasklet;
+ struct ib_event_handler event_handler;
struct list_head ig_list; /* entry in ig devices list */
int refcount;
};
@@ -246,7 +247,6 @@ struct iser_conn {
struct rdma_cm_id *cma_id; /* CMA ID */
struct ib_qp *qp; /* QP */
struct ib_fmr_pool *fmr_pool; /* pool of IB FMRs */
- int disc_evt_flag; /* disconn event delivered */
wait_queue_head_t wait; /* waitq for conn/disconn */
int post_recv_buf_count; /* posted rx count */
atomic_t post_send_buf_count; /* posted tx count */
@@ -320,7 +320,7 @@ void iser_conn_init(struct iser_conn *ib_conn);
void iser_conn_get(struct iser_conn *ib_conn);
-void iser_conn_put(struct iser_conn *ib_conn);
+int iser_conn_put(struct iser_conn *ib_conn, int destroy_cma_id_allowed);
void iser_conn_terminate(struct iser_conn *ib_conn);
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index b89d76b39a1..9876865732f 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -54,6 +54,13 @@ static void iser_qp_event_callback(struct ib_event *cause, void *context)
iser_err("got qp event %d\n",cause->event);
}
+static void iser_event_handler(struct ib_event_handler *handler,
+ struct ib_event *event)
+{
+ iser_err("async event %d on device %s port %d\n", event->event,
+ event->device->name, event->element.port_num);
+}
+
/**
* iser_create_device_ib_res - creates Protection Domain (PD), Completion
* Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
@@ -96,8 +103,15 @@ static int iser_create_device_ib_res(struct iser_device *device)
if (IS_ERR(device->mr))
goto dma_mr_err;
+ INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
+ iser_event_handler);
+ if (ib_register_event_handler(&device->event_handler))
+ goto handler_err;
+
return 0;
+handler_err:
+ ib_dereg_mr(device->mr);
dma_mr_err:
tasklet_kill(&device->cq_tasklet);
cq_arm_err:
@@ -120,7 +134,7 @@ static void iser_free_device_ib_res(struct iser_device *device)
BUG_ON(device->mr == NULL);
tasklet_kill(&device->cq_tasklet);
-
+ (void)ib_unregister_event_handler(&device->event_handler);
(void)ib_dereg_mr(device->mr);
(void)ib_destroy_cq(device->tx_cq);
(void)ib_destroy_cq(device->rx_cq);
@@ -149,10 +163,8 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
device = ib_conn->device;
ib_conn->login_buf = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL);
- if (!ib_conn->login_buf) {
- goto alloc_err;
- ret = -ENOMEM;
- }
+ if (!ib_conn->login_buf)
+ goto out_err;
ib_conn->login_dma = ib_dma_map_single(ib_conn->device->ib_device,
(void *)ib_conn->login_buf, ISER_RX_LOGIN_SIZE,
@@ -161,10 +173,9 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) +
(sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)),
GFP_KERNEL);
- if (!ib_conn->page_vec) {
- ret = -ENOMEM;
- goto alloc_err;
- }
+ if (!ib_conn->page_vec)
+ goto out_err;
+
ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1);
params.page_shift = SHIFT_4K;
@@ -184,7 +195,8 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, &params);
if (IS_ERR(ib_conn->fmr_pool)) {
ret = PTR_ERR(ib_conn->fmr_pool);
- goto fmr_pool_err;
+ ib_conn->fmr_pool = NULL;
+ goto out_err;
}
memset(&init_attr, 0, sizeof init_attr);
@@ -202,7 +214,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
if (ret)
- goto qp_err;
+ goto out_err;
ib_conn->qp = ib_conn->cma_id->qp;
iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n",
@@ -210,12 +222,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
ib_conn->fmr_pool, ib_conn->cma_id->qp);
return ret;
-qp_err:
- (void)ib_destroy_fmr_pool(ib_conn->fmr_pool);
-fmr_pool_err:
- kfree(ib_conn->page_vec);
- kfree(ib_conn->login_buf);
-alloc_err:
+out_err:
iser_err("unable to alloc mem or create resource, err %d\n", ret);
return ret;
}
@@ -224,7 +231,7 @@ alloc_err:
* releases the FMR pool, QP and CMA ID objects, returns 0 on success,
* -1 on failure
*/
-static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
+static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
{
BUG_ON(ib_conn == NULL);
@@ -239,7 +246,8 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
if (ib_conn->qp != NULL)
rdma_destroy_qp(ib_conn->cma_id);
- if (ib_conn->cma_id != NULL)
+ /* if cma handler context, the caller acts s.t the cma destroy the id */
+ if (ib_conn->cma_id != NULL && can_destroy_id)
rdma_destroy_id(ib_conn->cma_id);
ib_conn->fmr_pool = NULL;
@@ -317,7 +325,7 @@ static int iser_conn_state_comp_exch(struct iser_conn *ib_conn,
/**
* Frees all conn objects and deallocs conn descriptor
*/
-static void iser_conn_release(struct iser_conn *ib_conn)
+static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id)
{
struct iser_device *device = ib_conn->device;
@@ -327,13 +335,11 @@ static void iser_conn_release(struct iser_conn *ib_conn)
list_del(&ib_conn->conn_list);
mutex_unlock(&ig.connlist_mutex);
iser_free_rx_descriptors(ib_conn);
- iser_free_ib_conn_res(ib_conn);
+ iser_free_ib_conn_res(ib_conn, can_destroy_id);
ib_conn->device = NULL;
/* on EVENT_ADDR_ERROR there's no device yet for this conn */
if (device != NULL)
iser_device_try_release(device);
- if (ib_conn->iser_conn)
- ib_conn->iser_conn->ib_conn = NULL;
iscsi_destroy_endpoint(ib_conn->ep);
}
@@ -342,10 +348,13 @@ void iser_conn_get(struct iser_conn *ib_conn)
atomic_inc(&ib_conn->refcount);
}
-void iser_conn_put(struct iser_conn *ib_conn)
+int iser_conn_put(struct iser_conn *ib_conn, int can_destroy_id)
{
- if (atomic_dec_and_test(&ib_conn->refcount))
- iser_conn_release(ib_conn);
+ if (atomic_dec_and_test(&ib_conn->refcount)) {
+ iser_conn_release(ib_conn, can_destroy_id);
+ return 1;
+ }
+ return 0;
}
/**
@@ -369,19 +378,20 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
wait_event_interruptible(ib_conn->wait,
ib_conn->state == ISER_CONN_DOWN);
- iser_conn_put(ib_conn);
+ iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */
}
-static void iser_connect_error(struct rdma_cm_id *cma_id)
+static int iser_connect_error(struct rdma_cm_id *cma_id)
{
struct iser_conn *ib_conn;
ib_conn = (struct iser_conn *)cma_id->context;
ib_conn->state = ISER_CONN_DOWN;
wake_up_interruptible(&ib_conn->wait);
+ return iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */
}
-static void iser_addr_handler(struct rdma_cm_id *cma_id)
+static int iser_addr_handler(struct rdma_cm_id *cma_id)
{
struct iser_device *device;
struct iser_conn *ib_conn;
@@ -390,8 +400,7 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
device = iser_device_find_by_ib_device(cma_id);
if (!device) {
iser_err("device lookup/creation failed\n");
- iser_connect_error(cma_id);
- return;
+ return iser_connect_error(cma_id);
}
ib_conn = (struct iser_conn *)cma_id->context;
@@ -400,11 +409,13 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
ret = rdma_resolve_route(cma_id, 1000);
if (ret) {
iser_err("resolve route failed: %d\n", ret);
- iser_connect_error(cma_id);
+ return iser_connect_error(cma_id);
}
+
+ return 0;
}
-static void iser_route_handler(struct rdma_cm_id *cma_id)
+static int iser_route_handler(struct rdma_cm_id *cma_id)
{
struct rdma_conn_param conn_param;
int ret;
@@ -425,9 +436,9 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
goto failure;
}
- return;
+ return 0;
failure:
- iser_connect_error(cma_id);
+ return iser_connect_error(cma_id);
}
static void iser_connected_handler(struct rdma_cm_id *cma_id)
@@ -439,12 +450,12 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id)
wake_up_interruptible(&ib_conn->wait);
}
-static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
+static int iser_disconnected_handler(struct rdma_cm_id *cma_id)
{
struct iser_conn *ib_conn;
+ int ret;
ib_conn = (struct iser_conn *)cma_id->context;
- ib_conn->disc_evt_flag = 1;
/* getting here when the state is UP means that the conn is being *
* terminated asynchronously from the iSCSI layer's perspective. */
@@ -459,20 +470,24 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
ib_conn->state = ISER_CONN_DOWN;
wake_up_interruptible(&ib_conn->wait);
}
+
+ ret = iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */
+ return ret;
}
static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
int ret = 0;
- iser_err("event %d conn %p id %p\n",event->event,cma_id->context,cma_id);
+ iser_err("event %d status %d conn %p id %p\n",
+ event->event, event->status, cma_id->context, cma_id);
switch (event->event) {
case RDMA_CM_EVENT_ADDR_RESOLVED:
- iser_addr_handler(cma_id);
+ ret = iser_addr_handler(cma_id);
break;
case RDMA_CM_EVENT_ROUTE_RESOLVED:
- iser_route_handler(cma_id);
+ ret = iser_route_handler(cma_id);
break;
case RDMA_CM_EVENT_ESTABLISHED:
iser_connected_handler(cma_id);
@@ -482,13 +497,12 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
case RDMA_CM_EVENT_REJECTED:
- iser_err("event: %d, error: %d\n", event->event, event->status);
- iser_connect_error(cma_id);
+ ret = iser_connect_error(cma_id);
break;
case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_DEVICE_REMOVAL:
case RDMA_CM_EVENT_ADDR_CHANGE:
- iser_disconnected_handler(cma_id);
+ ret = iser_disconnected_handler(cma_id);
break;
default:
iser_err("Unexpected RDMA CM event (%d)\n", event->event);
@@ -503,7 +517,7 @@ void iser_conn_init(struct iser_conn *ib_conn)
init_waitqueue_head(&ib_conn->wait);
ib_conn->post_recv_buf_count = 0;
atomic_set(&ib_conn->post_send_buf_count, 0);
- atomic_set(&ib_conn->refcount, 1);
+ atomic_set(&ib_conn->refcount, 1); /* ref ib conn allocation */
INIT_LIST_HEAD(&ib_conn->conn_list);
spin_lock_init(&ib_conn->lock);
}
@@ -531,6 +545,7 @@ int iser_connect(struct iser_conn *ib_conn,
ib_conn->state = ISER_CONN_PENDING;
+ iser_conn_get(ib_conn); /* ref ib conn's cma id */
ib_conn->cma_id = rdma_create_id(iser_cma_handler,
(void *)ib_conn,
RDMA_PS_TCP);
@@ -568,7 +583,7 @@ id_failure:
addr_failure:
ib_conn->state = ISER_CONN_DOWN;
connect_failure:
- iser_conn_release(ib_conn);
+ iser_conn_release(ib_conn, 1);
return err;
}
@@ -737,12 +752,10 @@ static void iser_handle_comp_error(struct iser_tx_desc *desc,
iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
ISCSI_ERR_CONN_FAILED);
- /* complete the termination process if disconnect event was delivered *
- * note there are no more non completed posts to the QP */
- if (ib_conn->disc_evt_flag) {
- ib_conn->state = ISER_CONN_DOWN;
- wake_up_interruptible(&ib_conn->wait);
- }
+ /* no more non completed posts to the QP, complete the
+ * termination process w.o worrying on disconnect event */
+ ib_conn->state = ISER_CONN_DOWN;
+ wake_up_interruptible(&ib_conn->wait);
}
}
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index 7e18bcf05a6..46239e47a26 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -59,11 +59,11 @@ static unsigned int get_time_pit(void)
unsigned long flags;
unsigned int count;
- spin_lock_irqsave(&i8253_lock, flags);
+ raw_spin_lock_irqsave(&i8253_lock, flags);
outb_p(0x00, 0x43);
count = inb_p(0x40);
count |= inb_p(0x40) << 8;
- spin_unlock_irqrestore(&i8253_lock, flags);
+ raw_spin_unlock_irqrestore(&i8253_lock, flags);
return count;
}
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 423e0e6031a..34157bb97ed 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -47,15 +47,15 @@ struct joydev {
struct mutex mutex;
struct device dev;
- struct js_corr corr[ABS_MAX + 1];
+ struct js_corr corr[ABS_CNT];
struct JS_DATA_SAVE_TYPE glue;
int nabs;
int nkey;
__u16 keymap[KEY_MAX - BTN_MISC + 1];
__u16 keypam[KEY_MAX - BTN_MISC + 1];
- __u8 absmap[ABS_MAX + 1];
- __u8 abspam[ABS_MAX + 1];
- __s16 abs[ABS_MAX + 1];
+ __u8 absmap[ABS_CNT];
+ __u8 abspam[ABS_CNT];
+ __s16 abs[ABS_CNT];
};
struct joydev_client {
@@ -826,7 +826,7 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev,
joydev->handle.handler = handler;
joydev->handle.private = joydev;
- for (i = 0; i < ABS_MAX + 1; i++)
+ for (i = 0; i < ABS_CNT; i++)
if (test_bit(i, dev->absbit)) {
joydev->absmap[i] = joydev->nabs;
joydev->abspam[joydev->nabs] = i;
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 1c0b529c06a..4afe0a3b488 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -146,11 +146,11 @@ static unsigned int get_time_pit(void)
unsigned long flags;
unsigned int count;
- spin_lock_irqsave(&i8253_lock, flags);
+ raw_spin_lock_irqsave(&i8253_lock, flags);
outb_p(0x00, 0x43);
count = inb_p(0x40);
count |= inb_p(0x40) << 8;
- spin_unlock_irqrestore(&i8253_lock, flags);
+ raw_spin_unlock_irqrestore(&i8253_lock, flags);
return count;
}
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 9b3353b404d..c1087ce4cef 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -533,8 +533,8 @@ static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
if (xpad->xtype != XTYPE_XBOX360 && xpad->xtype != XTYPE_XBOX)
return 0;
- xpad->odata = usb_buffer_alloc(xpad->udev, XPAD_PKT_LEN,
- GFP_KERNEL, &xpad->odata_dma);
+ xpad->odata = usb_alloc_coherent(xpad->udev, XPAD_PKT_LEN,
+ GFP_KERNEL, &xpad->odata_dma);
if (!xpad->odata)
goto fail1;
@@ -554,7 +554,7 @@ static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
return 0;
- fail2: usb_buffer_free(xpad->udev, XPAD_PKT_LEN, xpad->odata, xpad->odata_dma);
+ fail2: usb_free_coherent(xpad->udev, XPAD_PKT_LEN, xpad->odata, xpad->odata_dma);
fail1: return error;
}
@@ -568,7 +568,7 @@ static void xpad_deinit_output(struct usb_xpad *xpad)
{
if (xpad->xtype == XTYPE_XBOX360 || xpad->xtype == XTYPE_XBOX) {
usb_free_urb(xpad->irq_out);
- usb_buffer_free(xpad->udev, XPAD_PKT_LEN,
+ usb_free_coherent(xpad->udev, XPAD_PKT_LEN,
xpad->odata, xpad->odata_dma);
}
}
@@ -788,8 +788,8 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
if (!xpad || !input_dev)
goto fail1;
- xpad->idata = usb_buffer_alloc(udev, XPAD_PKT_LEN,
- GFP_KERNEL, &xpad->idata_dma);
+ xpad->idata = usb_alloc_coherent(udev, XPAD_PKT_LEN,
+ GFP_KERNEL, &xpad->idata_dma);
if (!xpad->idata)
goto fail1;
@@ -942,7 +942,7 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
fail5: usb_kill_urb(xpad->irq_in);
fail4: usb_free_urb(xpad->irq_in);
fail3: xpad_deinit_output(xpad);
- fail2: usb_buffer_free(udev, XPAD_PKT_LEN, xpad->idata, xpad->idata_dma);
+ fail2: usb_free_coherent(udev, XPAD_PKT_LEN, xpad->idata, xpad->idata_dma);
fail1: input_free_device(input_dev);
kfree(xpad);
return error;
@@ -964,7 +964,7 @@ static void xpad_disconnect(struct usb_interface *intf)
usb_kill_urb(xpad->irq_in);
}
usb_free_urb(xpad->irq_in);
- usb_buffer_free(xpad->udev, XPAD_PKT_LEN,
+ usb_free_coherent(xpad->udev, XPAD_PKT_LEN,
xpad->idata, xpad->idata_dma);
kfree(xpad);
}
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 64c102355f5..d8fa5d724c5 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -73,7 +73,7 @@ config KEYBOARD_ATKBD
default y
select SERIO
select SERIO_LIBPS2
- select SERIO_I8042 if X86
+ select SERIO_I8042 if X86 && !X86_MRST
select SERIO_GSCPS2 if GSC
help
Say Y here if you want to use a standard AT or PS/2 keyboard. Usually
@@ -143,19 +143,6 @@ config KEYBOARD_BFIN
To compile this driver as a module, choose M here: the
module will be called bf54x-keys.
-config KEYBOARD_CORGI
- tristate "Corgi keyboard (deprecated)"
- depends on PXA_SHARPSL
- help
- Say Y here to enable the keyboard on the Sharp Zaurus SL-C7xx
- series of PDAs.
-
- This driver is now deprecated, use generic GPIO based matrix
- keyboard driver instead.
-
- To compile this driver as a module, choose M here: the
- module will be called corgikbd.
-
config KEYBOARD_LKKBD
tristate "DECstation/VAXstation LK201/LK401 keyboard"
select SERIO
@@ -192,6 +179,22 @@ config KEYBOARD_GPIO
To compile this driver as a module, choose M here: the
module will be called gpio_keys.
+config KEYBOARD_TCA6416
+ tristate "TCA6416 Keypad Support"
+ depends on I2C
+ help
+ This driver implements basic keypad functionality
+ for keys connected through TCA6416 IO expander
+
+ Say Y here if your device has keys connected to
+ TCA6416 IO expander. Your board-specific setup logic
+ must also provide pin-mask details(of which TCA6416 pins
+ are used for keypad).
+
+ If enabled the complete TCA6416 device will be managed through
+ this driver.
+
+
config KEYBOARD_MATRIX
tristate "GPIO driven matrix keypad support"
depends on GENERIC_GPIO
@@ -339,19 +342,6 @@ config KEYBOARD_PXA930_ROTARY
To compile this driver as a module, choose M here: the
module will be called pxa930_rotary.
-config KEYBOARD_SPITZ
- tristate "Spitz keyboard (deprecated)"
- depends on PXA_SHARPSL
- help
- Say Y here to enable the keyboard on the Sharp Zaurus SL-C1000,
- SL-C3000 and Sl-C3100 series of PDAs.
-
- This driver is now deprecated, use generic GPIO based matrix
- keyboard driver instead.
-
- To compile this driver as a module, choose M here: the
- module will be called spitzkbd.
-
config KEYBOARD_STOWAWAY
tristate "Stowaway keyboard"
select SERIO
@@ -414,28 +404,6 @@ config KEYBOARD_TWL4030
To compile this driver as a module, choose M here: the
module will be called twl4030_keypad.
-config KEYBOARD_TOSA
- tristate "Tosa keyboard (deprecated)"
- depends on MACH_TOSA
- help
- Say Y here to enable the keyboard on the Sharp Zaurus SL-6000x (Tosa)
-
- This driver is now deprecated, use generic GPIO based matrix
- keyboard driver instead.
-
- To compile this driver as a module, choose M here: the
- module will be called tosakbd.
-
-config KEYBOARD_TOSA_USE_EXT_KEYCODES
- bool "Tosa keyboard: use extended keycodes"
- depends on KEYBOARD_TOSA
- help
- Say Y here to enable the tosa keyboard driver to generate extended
- (>= 127) keycodes. Be aware, that they can't be correctly interpreted
- by either console keyboard driver or by Kdrive keybd driver.
-
- Say Y only if you know, what you are doing!
-
config KEYBOARD_XTKBD
tristate "XT keyboard"
select SERIO
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 706c6b5ed5f..4596d0c6f92 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -11,10 +11,10 @@ obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o
obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o
obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o
obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o
-obj-$(CONFIG_KEYBOARD_CORGI) += corgikbd.o
obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o
obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
+obj-$(CONFIG_KEYBOARD_TCA6416) += tca6416-keypad.o
obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o
obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o
obj-$(CONFIG_KEYBOARD_IMX) += imx_keypad.o
@@ -33,10 +33,8 @@ obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keypad.o
obj-$(CONFIG_KEYBOARD_PXA930_ROTARY) += pxa930_rotary.o
obj-$(CONFIG_KEYBOARD_QT2160) += qt2160.o
obj-$(CONFIG_KEYBOARD_SH_KEYSC) += sh_keysc.o
-obj-$(CONFIG_KEYBOARD_SPITZ) += spitzkbd.o
obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o
obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd.o
-obj-$(CONFIG_KEYBOARD_TOSA) += tosakbd.o
obj-$(CONFIG_KEYBOARD_TWL4030) += twl4030_keypad.o
obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o
obj-$(CONFIG_KEYBOARD_W90P910) += w90p910_keypad.o
diff --git a/drivers/input/keyboard/amikbd.c b/drivers/input/keyboard/amikbd.c
index 35149ec455a..79172af164f 100644
--- a/drivers/input/keyboard/amikbd.c
+++ b/drivers/input/keyboard/amikbd.c
@@ -35,6 +35,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/keyboard.h>
+#include <linux/platform_device.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
@@ -154,10 +155,9 @@ static const char *amikbd_messages[8] = {
[7] = KERN_WARNING "amikbd: keyboard interrupt\n"
};
-static struct input_dev *amikbd_dev;
-
-static irqreturn_t amikbd_interrupt(int irq, void *dummy)
+static irqreturn_t amikbd_interrupt(int irq, void *data)
{
+ struct input_dev *dev = data;
unsigned char scancode, down;
scancode = ~ciaa.sdr; /* get and invert scancode (keyboard is active low) */
@@ -170,47 +170,42 @@ static irqreturn_t amikbd_interrupt(int irq, void *dummy)
if (scancode < 0x78) { /* scancodes < 0x78 are keys */
if (scancode == 98) { /* CapsLock is a toggle switch key on Amiga */
- input_report_key(amikbd_dev, scancode, 1);
- input_report_key(amikbd_dev, scancode, 0);
+ input_report_key(dev, scancode, 1);
+ input_report_key(dev, scancode, 0);
} else {
- input_report_key(amikbd_dev, scancode, down);
+ input_report_key(dev, scancode, down);
}
- input_sync(amikbd_dev);
+ input_sync(dev);
} else /* scancodes >= 0x78 are error codes */
printk(amikbd_messages[scancode - 0x78]);
return IRQ_HANDLED;
}
-static int __init amikbd_init(void)
+static int __init amikbd_probe(struct platform_device *pdev)
{
+ struct input_dev *dev;
int i, j, err;
- if (!AMIGAHW_PRESENT(AMI_KEYBOARD))
- return -ENODEV;
-
- if (!request_mem_region(CIAA_PHYSADDR-1+0xb00, 0x100, "amikeyb"))
- return -EBUSY;
-
- amikbd_dev = input_allocate_device();
- if (!amikbd_dev) {
- printk(KERN_ERR "amikbd: not enough memory for input device\n");
- err = -ENOMEM;
- goto fail1;
+ dev = input_allocate_device();
+ if (!dev) {
+ dev_err(&pdev->dev, "Not enough memory for input device\n");
+ return -ENOMEM;
}
- amikbd_dev->name = "Amiga Keyboard";
- amikbd_dev->phys = "amikbd/input0";
- amikbd_dev->id.bustype = BUS_AMIGA;
- amikbd_dev->id.vendor = 0x0001;
- amikbd_dev->id.product = 0x0001;
- amikbd_dev->id.version = 0x0100;
+ dev->name = pdev->name;
+ dev->phys = "amikbd/input0";
+ dev->id.bustype = BUS_AMIGA;
+ dev->id.vendor = 0x0001;
+ dev->id.product = 0x0001;
+ dev->id.version = 0x0100;
+ dev->dev.parent = &pdev->dev;
- amikbd_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
+ dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
for (i = 0; i < 0x78; i++)
- set_bit(i, amikbd_dev->keybit);
+ set_bit(i, dev->keybit);
for (i = 0; i < MAX_NR_KEYMAPS; i++) {
static u_short temp_map[NR_KEYS] __initdata;
@@ -229,30 +224,54 @@ static int __init amikbd_init(void)
memcpy(key_maps[i], temp_map, sizeof(temp_map));
}
ciaa.cra &= ~0x41; /* serial data in, turn off TA */
- if (request_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt, 0, "amikbd",
- amikbd_interrupt)) {
- err = -EBUSY;
+ err = request_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt, 0, "amikbd",
+ dev);
+ if (err)
goto fail2;
- }
- err = input_register_device(amikbd_dev);
+ err = input_register_device(dev);
if (err)
goto fail3;
+ platform_set_drvdata(pdev, dev);
+
return 0;
- fail3: free_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt);
- fail2: input_free_device(amikbd_dev);
- fail1: release_mem_region(CIAA_PHYSADDR - 1 + 0xb00, 0x100);
+ fail3: free_irq(IRQ_AMIGA_CIAA_SP, dev);
+ fail2: input_free_device(dev);
return err;
}
-static void __exit amikbd_exit(void)
+static int __exit amikbd_remove(struct platform_device *pdev)
+{
+ struct input_dev *dev = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+ free_irq(IRQ_AMIGA_CIAA_SP, dev);
+ input_unregister_device(dev);
+ return 0;
+}
+
+static struct platform_driver amikbd_driver = {
+ .remove = __exit_p(amikbd_remove),
+ .driver = {
+ .name = "amiga-keyboard",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init amikbd_init(void)
{
- free_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt);
- input_unregister_device(amikbd_dev);
- release_mem_region(CIAA_PHYSADDR - 1 + 0xb00, 0x100);
+ return platform_driver_probe(&amikbd_driver, amikbd_probe);
}
module_init(amikbd_init);
+
+static void __exit amikbd_exit(void)
+{
+ platform_driver_unregister(&amikbd_driver);
+}
+
module_exit(amikbd_exit);
+
+MODULE_ALIAS("platform:amiga-keyboard");
diff --git a/drivers/input/keyboard/corgikbd.c b/drivers/input/keyboard/corgikbd.c
deleted file mode 100644
index 634af6a8e6b..00000000000
--- a/drivers/input/keyboard/corgikbd.c
+++ /dev/null
@@ -1,414 +0,0 @@
-/*
- * Keyboard driver for Sharp Corgi models (SL-C7xx)
- *
- * Copyright (c) 2004-2005 Richard Purdie
- *
- * Based on xtkbd.c/locomkbd.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/input.h>
-#include <linux/interrupt.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include <mach/corgi.h>
-#include <mach/pxa2xx-gpio.h>
-#include <asm/hardware/scoop.h>
-
-#define KB_ROWS 8
-#define KB_COLS 12
-#define KB_ROWMASK(r) (1 << (r))
-#define SCANCODE(r,c) ( ((r)<<4) + (c) + 1 )
-/* zero code, 124 scancodes */
-#define NR_SCANCODES ( SCANCODE(KB_ROWS-1,KB_COLS-1) +1 +1 )
-
-#define SCAN_INTERVAL (50) /* ms */
-#define HINGE_SCAN_INTERVAL (250) /* ms */
-
-#define CORGI_KEY_CALENDER KEY_F1
-#define CORGI_KEY_ADDRESS KEY_F2
-#define CORGI_KEY_FN KEY_F3
-#define CORGI_KEY_CANCEL KEY_F4
-#define CORGI_KEY_OFF KEY_SUSPEND
-#define CORGI_KEY_EXOK KEY_F5
-#define CORGI_KEY_EXCANCEL KEY_F6
-#define CORGI_KEY_EXJOGDOWN KEY_F7
-#define CORGI_KEY_EXJOGUP KEY_F8
-#define CORGI_KEY_JAP1 KEY_LEFTCTRL
-#define CORGI_KEY_JAP2 KEY_LEFTALT
-#define CORGI_KEY_MAIL KEY_F10
-#define CORGI_KEY_OK KEY_F11
-#define CORGI_KEY_MENU KEY_F12
-
-static unsigned char corgikbd_keycode[NR_SCANCODES] = {
- 0, /* 0 */
- 0, KEY_1, KEY_3, KEY_5, KEY_6, KEY_7, KEY_9, KEY_0, KEY_BACKSPACE, 0, 0, 0, 0, 0, 0, 0, /* 1-16 */
- 0, KEY_2, KEY_4, KEY_R, KEY_Y, KEY_8, KEY_I, KEY_O, KEY_P, 0, 0, 0, 0, 0, 0, 0, /* 17-32 */
- KEY_TAB, KEY_Q, KEY_E, KEY_T, KEY_G, KEY_U, KEY_J, KEY_K, 0, 0, 0, 0, 0, 0, 0, 0, /* 33-48 */
- CORGI_KEY_CALENDER, KEY_W, KEY_S, KEY_F, KEY_V, KEY_H, KEY_M, KEY_L, 0, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, 0, /* 49-64 */
- CORGI_KEY_ADDRESS, KEY_A, KEY_D, KEY_C, KEY_B, KEY_N, KEY_DOT, 0, KEY_ENTER, 0, KEY_LEFTSHIFT, 0, 0, 0, 0, 0, /* 65-80 */
- CORGI_KEY_MAIL, KEY_Z, KEY_X, KEY_MINUS, KEY_SPACE, KEY_COMMA, 0, KEY_UP, 0, 0, 0, CORGI_KEY_FN, 0, 0, 0, 0, /* 81-96 */
- KEY_SYSRQ, CORGI_KEY_JAP1, CORGI_KEY_JAP2, CORGI_KEY_CANCEL, CORGI_KEY_OK, CORGI_KEY_MENU, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0, 0, 0, 0, /* 97-112 */
- CORGI_KEY_OFF, CORGI_KEY_EXOK, CORGI_KEY_EXCANCEL, CORGI_KEY_EXJOGDOWN, CORGI_KEY_EXJOGUP, 0, 0, 0, 0, 0, 0, 0, /* 113-124 */
-};
-
-
-struct corgikbd {
- unsigned char keycode[ARRAY_SIZE(corgikbd_keycode)];
- struct input_dev *input;
-
- spinlock_t lock;
- struct timer_list timer;
- struct timer_list htimer;
-
- unsigned int suspended;
- unsigned long suspend_jiffies;
-};
-
-#define KB_DISCHARGE_DELAY 10
-#define KB_ACTIVATE_DELAY 10
-
-/* Helper functions for reading the keyboard matrix
- * Note: We should really be using the generic gpio functions to alter
- * GPDR but it requires a function call per GPIO bit which is
- * excessive when we need to access 12 bits at once, multiple times.
- * These functions must be called within local_irq_save()/local_irq_restore()
- * or similar.
- */
-static inline void corgikbd_discharge_all(void)
-{
- /* STROBE All HiZ */
- GPCR2 = CORGI_GPIO_ALL_STROBE_BIT;
- GPDR2 &= ~CORGI_GPIO_ALL_STROBE_BIT;
-}
-
-static inline void corgikbd_activate_all(void)
-{
- /* STROBE ALL -> High */
- GPSR2 = CORGI_GPIO_ALL_STROBE_BIT;
- GPDR2 |= CORGI_GPIO_ALL_STROBE_BIT;
-
- udelay(KB_DISCHARGE_DELAY);
-
- /* Clear any interrupts we may have triggered when altering the GPIO lines */
- GEDR1 = CORGI_GPIO_HIGH_SENSE_BIT;
- GEDR2 = CORGI_GPIO_LOW_SENSE_BIT;
-}
-
-static inline void corgikbd_activate_col(int col)
-{
- /* STROBE col -> High, not col -> HiZ */
- GPSR2 = CORGI_GPIO_STROBE_BIT(col);
- GPDR2 = (GPDR2 & ~CORGI_GPIO_ALL_STROBE_BIT) | CORGI_GPIO_STROBE_BIT(col);
-}
-
-static inline void corgikbd_reset_col(int col)
-{
- /* STROBE col -> Low */
- GPCR2 = CORGI_GPIO_STROBE_BIT(col);
- /* STROBE col -> out, not col -> HiZ */
- GPDR2 = (GPDR2 & ~CORGI_GPIO_ALL_STROBE_BIT) | CORGI_GPIO_STROBE_BIT(col);
-}
-
-#define GET_ROWS_STATUS(c) (((GPLR1 & CORGI_GPIO_HIGH_SENSE_BIT) >> CORGI_GPIO_HIGH_SENSE_RSHIFT) | ((GPLR2 & CORGI_GPIO_LOW_SENSE_BIT) << CORGI_GPIO_LOW_SENSE_LSHIFT))
-
-/*
- * The corgi keyboard only generates interrupts when a key is pressed.
- * When a key is pressed, we enable a timer which then scans the
- * keyboard to detect when the key is released.
- */
-
-/* Scan the hardware keyboard and push any changes up through the input layer */
-static void corgikbd_scankeyboard(struct corgikbd *corgikbd_data)
-{
- unsigned int row, col, rowd;
- unsigned long flags;
- unsigned int num_pressed;
-
- if (corgikbd_data->suspended)
- return;
-
- spin_lock_irqsave(&corgikbd_data->lock, flags);
-
- num_pressed = 0;
- for (col = 0; col < KB_COLS; col++) {
- /*
- * Discharge the output driver capacitatance
- * in the keyboard matrix. (Yes it is significant..)
- */
-
- corgikbd_discharge_all();
- udelay(KB_DISCHARGE_DELAY);
-
- corgikbd_activate_col(col);
- udelay(KB_ACTIVATE_DELAY);
-
- rowd = GET_ROWS_STATUS(col);
- for (row = 0; row < KB_ROWS; row++) {
- unsigned int scancode, pressed;
-
- scancode = SCANCODE(row, col);
- pressed = rowd & KB_ROWMASK(row);
-
- input_report_key(corgikbd_data->input, corgikbd_data->keycode[scancode], pressed);
-
- if (pressed)
- num_pressed++;
-
- if (pressed && (corgikbd_data->keycode[scancode] == CORGI_KEY_OFF)
- && time_after(jiffies, corgikbd_data->suspend_jiffies + HZ)) {
- input_event(corgikbd_data->input, EV_PWR, CORGI_KEY_OFF, 1);
- corgikbd_data->suspend_jiffies=jiffies;
- }
- }
- corgikbd_reset_col(col);
- }
-
- corgikbd_activate_all();
-
- input_sync(corgikbd_data->input);
-
- /* if any keys are pressed, enable the timer */
- if (num_pressed)
- mod_timer(&corgikbd_data->timer, jiffies + msecs_to_jiffies(SCAN_INTERVAL));
-
- spin_unlock_irqrestore(&corgikbd_data->lock, flags);
-}
-
-/*
- * corgi keyboard interrupt handler.
- */
-static irqreturn_t corgikbd_interrupt(int irq, void *dev_id)
-{
- struct corgikbd *corgikbd_data = dev_id;
-
- if (!timer_pending(&corgikbd_data->timer)) {
- /** wait chattering delay **/
- udelay(20);
- corgikbd_scankeyboard(corgikbd_data);
- }
-
- return IRQ_HANDLED;
-}
-
-/*
- * corgi timer checking for released keys
- */
-static void corgikbd_timer_callback(unsigned long data)
-{
- struct corgikbd *corgikbd_data = (struct corgikbd *) data;
- corgikbd_scankeyboard(corgikbd_data);
-}
-
-/*
- * The hinge switches generate no interrupt so they need to be
- * monitored by a timer.
- *
- * We debounce the switches and pass them to the input system.
- *
- * gprr == 0x00 - Keyboard with Landscape Screen
- * 0x08 - No Keyboard with Portrait Screen
- * 0x0c - Keyboard and Screen Closed
- */
-
-#define READ_GPIO_BIT(x) (GPLR(x) & GPIO_bit(x))
-#define HINGE_STABLE_COUNT 2
-static int sharpsl_hinge_state;
-static int hinge_count;
-
-static void corgikbd_hinge_timer(unsigned long data)
-{
- struct corgikbd *corgikbd_data = (struct corgikbd *) data;
- unsigned long gprr;
- unsigned long flags;
-
- gprr = read_scoop_reg(&corgiscoop_device.dev, SCOOP_GPRR) & (CORGI_SCP_SWA | CORGI_SCP_SWB);
- gprr |= (READ_GPIO_BIT(CORGI_GPIO_AK_INT) != 0);
- if (gprr != sharpsl_hinge_state) {
- hinge_count = 0;
- sharpsl_hinge_state = gprr;
- } else if (hinge_count < HINGE_STABLE_COUNT) {
- hinge_count++;
- if (hinge_count >= HINGE_STABLE_COUNT) {
- spin_lock_irqsave(&corgikbd_data->lock, flags);
-
- input_report_switch(corgikbd_data->input, SW_LID, ((sharpsl_hinge_state & CORGI_SCP_SWA) != 0));
- input_report_switch(corgikbd_data->input, SW_TABLET_MODE, ((sharpsl_hinge_state & CORGI_SCP_SWB) != 0));
- input_report_switch(corgikbd_data->input, SW_HEADPHONE_INSERT, (READ_GPIO_BIT(CORGI_GPIO_AK_INT) != 0));
- input_sync(corgikbd_data->input);
-
- spin_unlock_irqrestore(&corgikbd_data->lock, flags);
- }
- }
- mod_timer(&corgikbd_data->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
-}
-
-#ifdef CONFIG_PM
-static int corgikbd_suspend(struct platform_device *dev, pm_message_t state)
-{
- int i;
- struct corgikbd *corgikbd = platform_get_drvdata(dev);
-
- corgikbd->suspended = 1;
- /* strobe 0 is the power key so this can't be made an input for
- powersaving therefore i = 1 */
- for (i = 1; i < CORGI_KEY_STROBE_NUM; i++)
- pxa_gpio_mode(CORGI_GPIO_KEY_STROBE(i) | GPIO_IN);
-
- return 0;
-}
-
-static int corgikbd_resume(struct platform_device *dev)
-{
- int i;
- struct corgikbd *corgikbd = platform_get_drvdata(dev);
-
- for (i = 1; i < CORGI_KEY_STROBE_NUM; i++)
- pxa_gpio_mode(CORGI_GPIO_KEY_STROBE(i) | GPIO_OUT | GPIO_DFLT_HIGH);
-
- /* Upon resume, ignore the suspend key for a short while */
- corgikbd->suspend_jiffies=jiffies;
- corgikbd->suspended = 0;
-
- return 0;
-}
-#else
-#define corgikbd_suspend NULL
-#define corgikbd_resume NULL
-#endif
-
-static int __devinit corgikbd_probe(struct platform_device *pdev)
-{
- struct corgikbd *corgikbd;
- struct input_dev *input_dev;
- int i, err = -ENOMEM;
-
- corgikbd = kzalloc(sizeof(struct corgikbd), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!corgikbd || !input_dev)
- goto fail;
-
- platform_set_drvdata(pdev, corgikbd);
-
- corgikbd->input = input_dev;
- spin_lock_init(&corgikbd->lock);
-
- /* Init Keyboard rescan timer */
- init_timer(&corgikbd->timer);
- corgikbd->timer.function = corgikbd_timer_callback;
- corgikbd->timer.data = (unsigned long) corgikbd;
-
- /* Init Hinge Timer */
- init_timer(&corgikbd->htimer);
- corgikbd->htimer.function = corgikbd_hinge_timer;
- corgikbd->htimer.data = (unsigned long) corgikbd;
-
- corgikbd->suspend_jiffies=jiffies;
-
- memcpy(corgikbd->keycode, corgikbd_keycode, sizeof(corgikbd->keycode));
-
- input_dev->name = "Corgi Keyboard";
- input_dev->phys = "corgikbd/input0";
- input_dev->id.bustype = BUS_HOST;
- input_dev->id.vendor = 0x0001;
- input_dev->id.product = 0x0001;
- input_dev->id.version = 0x0100;
- input_dev->dev.parent = &pdev->dev;
-
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) |
- BIT_MASK(EV_PWR) | BIT_MASK(EV_SW);
- input_dev->keycode = corgikbd->keycode;
- input_dev->keycodesize = sizeof(unsigned char);
- input_dev->keycodemax = ARRAY_SIZE(corgikbd_keycode);
-
- for (i = 0; i < ARRAY_SIZE(corgikbd_keycode); i++)
- set_bit(corgikbd->keycode[i], input_dev->keybit);
- clear_bit(0, input_dev->keybit);
- set_bit(SW_LID, input_dev->swbit);
- set_bit(SW_TABLET_MODE, input_dev->swbit);
- set_bit(SW_HEADPHONE_INSERT, input_dev->swbit);
-
- err = input_register_device(corgikbd->input);
- if (err)
- goto fail;
-
- mod_timer(&corgikbd->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
-
- /* Setup sense interrupts - RisingEdge Detect, sense lines as inputs */
- for (i = 0; i < CORGI_KEY_SENSE_NUM; i++) {
- pxa_gpio_mode(CORGI_GPIO_KEY_SENSE(i) | GPIO_IN);
- if (request_irq(CORGI_IRQ_GPIO_KEY_SENSE(i), corgikbd_interrupt,
- IRQF_DISABLED | IRQF_TRIGGER_RISING,
- "corgikbd", corgikbd))
- printk(KERN_WARNING "corgikbd: Can't get IRQ: %d!\n", i);
- }
-
- /* Set Strobe lines as outputs - set high */
- for (i = 0; i < CORGI_KEY_STROBE_NUM; i++)
- pxa_gpio_mode(CORGI_GPIO_KEY_STROBE(i) | GPIO_OUT | GPIO_DFLT_HIGH);
-
- /* Setup the headphone jack as an input */
- pxa_gpio_mode(CORGI_GPIO_AK_INT | GPIO_IN);
-
- return 0;
-
- fail: input_free_device(input_dev);
- kfree(corgikbd);
- return err;
-}
-
-static int __devexit corgikbd_remove(struct platform_device *pdev)
-{
- int i;
- struct corgikbd *corgikbd = platform_get_drvdata(pdev);
-
- for (i = 0; i < CORGI_KEY_SENSE_NUM; i++)
- free_irq(CORGI_IRQ_GPIO_KEY_SENSE(i), corgikbd);
-
- del_timer_sync(&corgikbd->htimer);
- del_timer_sync(&corgikbd->timer);
-
- input_unregister_device(corgikbd->input);
-
- kfree(corgikbd);
-
- return 0;
-}
-
-static struct platform_driver corgikbd_driver = {
- .probe = corgikbd_probe,
- .remove = __devexit_p(corgikbd_remove),
- .suspend = corgikbd_suspend,
- .resume = corgikbd_resume,
- .driver = {
- .name = "corgi-keyboard",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init corgikbd_init(void)
-{
- return platform_driver_register(&corgikbd_driver);
-}
-
-static void __exit corgikbd_exit(void)
-{
- platform_driver_unregister(&corgikbd_driver);
-}
-
-module_init(corgikbd_init);
-module_exit(corgikbd_exit);
-
-MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
-MODULE_DESCRIPTION("Corgi Keyboard Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:corgi-keyboard");
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index 60ac4684f87..bc696931fed 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -670,8 +670,6 @@ static int __devinit lm8323_probe(struct i2c_client *client,
goto fail1;
}
- i2c_set_clientdata(client, lm);
-
lm->client = client;
lm->idev = idev;
mutex_init(&lm->lock);
@@ -753,6 +751,8 @@ static int __devinit lm8323_probe(struct i2c_client *client,
goto fail4;
}
+ i2c_set_clientdata(client, lm);
+
device_init_wakeup(&client->dev, 1);
enable_irq_wake(client->irq);
@@ -778,6 +778,8 @@ static int __devexit lm8323_remove(struct i2c_client *client)
struct lm8323_chip *lm = i2c_get_clientdata(client);
int i;
+ i2c_set_clientdata(client, NULL);
+
disable_irq_wake(client->irq);
free_irq(client->irq, lm);
cancel_work_sync(&lm->work);
diff --git a/drivers/input/keyboard/spitzkbd.c b/drivers/input/keyboard/spitzkbd.c
deleted file mode 100644
index 13967422658..00000000000
--- a/drivers/input/keyboard/spitzkbd.c
+++ /dev/null
@@ -1,496 +0,0 @@
-/*
- * Keyboard driver for Sharp Spitz, Borzoi and Akita (SL-Cxx00 series)
- *
- * Copyright (c) 2005 Richard Purdie
- *
- * Based on corgikbd.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/input.h>
-#include <linux/interrupt.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include <mach/spitz.h>
-#include <mach/pxa2xx-gpio.h>
-
-#define KB_ROWS 7
-#define KB_COLS 11
-#define KB_ROWMASK(r) (1 << (r))
-#define SCANCODE(r,c) (((r)<<4) + (c) + 1)
-#define NR_SCANCODES ((KB_ROWS<<4) + 1)
-
-#define SCAN_INTERVAL (50) /* ms */
-#define HINGE_SCAN_INTERVAL (150) /* ms */
-
-#define SPITZ_KEY_CALENDER KEY_F1
-#define SPITZ_KEY_ADDRESS KEY_F2
-#define SPITZ_KEY_FN KEY_F3
-#define SPITZ_KEY_CANCEL KEY_F4
-#define SPITZ_KEY_EXOK KEY_F5
-#define SPITZ_KEY_EXCANCEL KEY_F6
-#define SPITZ_KEY_EXJOGDOWN KEY_F7
-#define SPITZ_KEY_EXJOGUP KEY_F8
-#define SPITZ_KEY_JAP1 KEY_LEFTALT
-#define SPITZ_KEY_JAP2 KEY_RIGHTCTRL
-#define SPITZ_KEY_SYNC KEY_F9
-#define SPITZ_KEY_MAIL KEY_F10
-#define SPITZ_KEY_OK KEY_F11
-#define SPITZ_KEY_MENU KEY_F12
-
-static unsigned char spitzkbd_keycode[NR_SCANCODES] = {
- 0, /* 0 */
- KEY_LEFTCTRL, KEY_1, KEY_3, KEY_5, KEY_6, KEY_7, KEY_9, KEY_0, KEY_BACKSPACE, SPITZ_KEY_EXOK, SPITZ_KEY_EXCANCEL, 0, 0, 0, 0, 0, /* 1-16 */
- 0, KEY_2, KEY_4, KEY_R, KEY_Y, KEY_8, KEY_I, KEY_O, KEY_P, SPITZ_KEY_EXJOGDOWN, SPITZ_KEY_EXJOGUP, 0, 0, 0, 0, 0, /* 17-32 */
- KEY_TAB, KEY_Q, KEY_E, KEY_T, KEY_G, KEY_U, KEY_J, KEY_K, 0, 0, 0, 0, 0, 0, 0, 0, /* 33-48 */
- SPITZ_KEY_ADDRESS, KEY_W, KEY_S, KEY_F, KEY_V, KEY_H, KEY_M, KEY_L, 0, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, 0, /* 49-64 */
- SPITZ_KEY_CALENDER, KEY_A, KEY_D, KEY_C, KEY_B, KEY_N, KEY_DOT, 0, KEY_ENTER, KEY_LEFTSHIFT, 0, 0, 0, 0, 0, 0, /* 65-80 */
- SPITZ_KEY_MAIL, KEY_Z, KEY_X, KEY_MINUS, KEY_SPACE, KEY_COMMA, 0, KEY_UP, 0, 0, SPITZ_KEY_FN, 0, 0, 0, 0, 0, /* 81-96 */
- KEY_SYSRQ, SPITZ_KEY_JAP1, SPITZ_KEY_JAP2, SPITZ_KEY_CANCEL, SPITZ_KEY_OK, SPITZ_KEY_MENU, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0, 0, 0, 0 /* 97-112 */
-};
-
-static int spitz_strobes[] = {
- SPITZ_GPIO_KEY_STROBE0,
- SPITZ_GPIO_KEY_STROBE1,
- SPITZ_GPIO_KEY_STROBE2,
- SPITZ_GPIO_KEY_STROBE3,
- SPITZ_GPIO_KEY_STROBE4,
- SPITZ_GPIO_KEY_STROBE5,
- SPITZ_GPIO_KEY_STROBE6,
- SPITZ_GPIO_KEY_STROBE7,
- SPITZ_GPIO_KEY_STROBE8,
- SPITZ_GPIO_KEY_STROBE9,
- SPITZ_GPIO_KEY_STROBE10,
-};
-
-static int spitz_senses[] = {
- SPITZ_GPIO_KEY_SENSE0,
- SPITZ_GPIO_KEY_SENSE1,
- SPITZ_GPIO_KEY_SENSE2,
- SPITZ_GPIO_KEY_SENSE3,
- SPITZ_GPIO_KEY_SENSE4,
- SPITZ_GPIO_KEY_SENSE5,
- SPITZ_GPIO_KEY_SENSE6,
-};
-
-struct spitzkbd {
- unsigned char keycode[ARRAY_SIZE(spitzkbd_keycode)];
- struct input_dev *input;
- char phys[32];
-
- spinlock_t lock;
- struct timer_list timer;
- struct timer_list htimer;
-
- unsigned int suspended;
- unsigned long suspend_jiffies;
-};
-
-#define KB_DISCHARGE_DELAY 10
-#define KB_ACTIVATE_DELAY 10
-
-/* Helper functions for reading the keyboard matrix
- * Note: We should really be using the generic gpio functions to alter
- * GPDR but it requires a function call per GPIO bit which is
- * excessive when we need to access 11 bits at once, multiple times.
- * These functions must be called within local_irq_save()/local_irq_restore()
- * or similar.
- */
-static inline void spitzkbd_discharge_all(void)
-{
- /* STROBE All HiZ */
- GPCR0 = SPITZ_GPIO_G0_STROBE_BIT;
- GPDR0 &= ~SPITZ_GPIO_G0_STROBE_BIT;
- GPCR1 = SPITZ_GPIO_G1_STROBE_BIT;
- GPDR1 &= ~SPITZ_GPIO_G1_STROBE_BIT;
- GPCR2 = SPITZ_GPIO_G2_STROBE_BIT;
- GPDR2 &= ~SPITZ_GPIO_G2_STROBE_BIT;
- GPCR3 = SPITZ_GPIO_G3_STROBE_BIT;
- GPDR3 &= ~SPITZ_GPIO_G3_STROBE_BIT;
-}
-
-static inline void spitzkbd_activate_all(void)
-{
- /* STROBE ALL -> High */
- GPSR0 = SPITZ_GPIO_G0_STROBE_BIT;
- GPDR0 |= SPITZ_GPIO_G0_STROBE_BIT;
- GPSR1 = SPITZ_GPIO_G1_STROBE_BIT;
- GPDR1 |= SPITZ_GPIO_G1_STROBE_BIT;
- GPSR2 = SPITZ_GPIO_G2_STROBE_BIT;
- GPDR2 |= SPITZ_GPIO_G2_STROBE_BIT;
- GPSR3 = SPITZ_GPIO_G3_STROBE_BIT;
- GPDR3 |= SPITZ_GPIO_G3_STROBE_BIT;
-
- udelay(KB_DISCHARGE_DELAY);
-
- /* Clear any interrupts we may have triggered when altering the GPIO lines */
- GEDR0 = SPITZ_GPIO_G0_SENSE_BIT;
- GEDR1 = SPITZ_GPIO_G1_SENSE_BIT;
- GEDR2 = SPITZ_GPIO_G2_SENSE_BIT;
- GEDR3 = SPITZ_GPIO_G3_SENSE_BIT;
-}
-
-static inline void spitzkbd_activate_col(int col)
-{
- int gpio = spitz_strobes[col];
- GPDR0 &= ~SPITZ_GPIO_G0_STROBE_BIT;
- GPDR1 &= ~SPITZ_GPIO_G1_STROBE_BIT;
- GPDR2 &= ~SPITZ_GPIO_G2_STROBE_BIT;
- GPDR3 &= ~SPITZ_GPIO_G3_STROBE_BIT;
- GPSR(gpio) = GPIO_bit(gpio);
- GPDR(gpio) |= GPIO_bit(gpio);
-}
-
-static inline void spitzkbd_reset_col(int col)
-{
- int gpio = spitz_strobes[col];
- GPDR0 &= ~SPITZ_GPIO_G0_STROBE_BIT;
- GPDR1 &= ~SPITZ_GPIO_G1_STROBE_BIT;
- GPDR2 &= ~SPITZ_GPIO_G2_STROBE_BIT;
- GPDR3 &= ~SPITZ_GPIO_G3_STROBE_BIT;
- GPCR(gpio) = GPIO_bit(gpio);
- GPDR(gpio) |= GPIO_bit(gpio);
-}
-
-static inline int spitzkbd_get_row_status(int col)
-{
- return ((GPLR0 >> 12) & 0x01) | ((GPLR0 >> 16) & 0x02)
- | ((GPLR2 >> 25) & 0x04) | ((GPLR1 << 1) & 0x08)
- | ((GPLR1 >> 0) & 0x10) | ((GPLR1 >> 1) & 0x60);
-}
-
-/*
- * The spitz keyboard only generates interrupts when a key is pressed.
- * When a key is pressed, we enable a timer which then scans the
- * keyboard to detect when the key is released.
- */
-
-/* Scan the hardware keyboard and push any changes up through the input layer */
-static void spitzkbd_scankeyboard(struct spitzkbd *spitzkbd_data)
-{
- unsigned int row, col, rowd;
- unsigned long flags;
- unsigned int num_pressed, pwrkey = ((GPLR(SPITZ_GPIO_ON_KEY) & GPIO_bit(SPITZ_GPIO_ON_KEY)) != 0);
-
- if (spitzkbd_data->suspended)
- return;
-
- spin_lock_irqsave(&spitzkbd_data->lock, flags);
-
- num_pressed = 0;
- for (col = 0; col < KB_COLS; col++) {
- /*
- * Discharge the output driver capacitatance
- * in the keyboard matrix. (Yes it is significant..)
- */
-
- spitzkbd_discharge_all();
- udelay(KB_DISCHARGE_DELAY);
-
- spitzkbd_activate_col(col);
- udelay(KB_ACTIVATE_DELAY);
-
- rowd = spitzkbd_get_row_status(col);
- for (row = 0; row < KB_ROWS; row++) {
- unsigned int scancode, pressed;
-
- scancode = SCANCODE(row, col);
- pressed = rowd & KB_ROWMASK(row);
-
- input_report_key(spitzkbd_data->input, spitzkbd_data->keycode[scancode], pressed);
-
- if (pressed)
- num_pressed++;
- }
- spitzkbd_reset_col(col);
- }
-
- spitzkbd_activate_all();
-
- input_report_key(spitzkbd_data->input, SPITZ_KEY_SYNC, (GPLR(SPITZ_GPIO_SYNC) & GPIO_bit(SPITZ_GPIO_SYNC)) != 0 );
- input_report_key(spitzkbd_data->input, KEY_SUSPEND, pwrkey);
-
- if (pwrkey && time_after(jiffies, spitzkbd_data->suspend_jiffies + msecs_to_jiffies(1000))) {
- input_event(spitzkbd_data->input, EV_PWR, KEY_SUSPEND, 1);
- spitzkbd_data->suspend_jiffies = jiffies;
- }
-
- input_sync(spitzkbd_data->input);
-
- /* if any keys are pressed, enable the timer */
- if (num_pressed)
- mod_timer(&spitzkbd_data->timer, jiffies + msecs_to_jiffies(SCAN_INTERVAL));
-
- spin_unlock_irqrestore(&spitzkbd_data->lock, flags);
-}
-
-/*
- * spitz keyboard interrupt handler.
- */
-static irqreturn_t spitzkbd_interrupt(int irq, void *dev_id)
-{
- struct spitzkbd *spitzkbd_data = dev_id;
-
- if (!timer_pending(&spitzkbd_data->timer)) {
- /** wait chattering delay **/
- udelay(20);
- spitzkbd_scankeyboard(spitzkbd_data);
- }
-
- return IRQ_HANDLED;
-}
-
-/*
- * spitz timer checking for released keys
- */
-static void spitzkbd_timer_callback(unsigned long data)
-{
- struct spitzkbd *spitzkbd_data = (struct spitzkbd *) data;
-
- spitzkbd_scankeyboard(spitzkbd_data);
-}
-
-/*
- * The hinge switches generate an interrupt.
- * We debounce the switches and pass them to the input system.
- */
-
-static irqreturn_t spitzkbd_hinge_isr(int irq, void *dev_id)
-{
- struct spitzkbd *spitzkbd_data = dev_id;
-
- if (!timer_pending(&spitzkbd_data->htimer))
- mod_timer(&spitzkbd_data->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
-
- return IRQ_HANDLED;
-}
-
-#define HINGE_STABLE_COUNT 2
-static int sharpsl_hinge_state;
-static int hinge_count;
-
-static void spitzkbd_hinge_timer(unsigned long data)
-{
- struct spitzkbd *spitzkbd_data = (struct spitzkbd *) data;
- unsigned long state;
- unsigned long flags;
-
- state = GPLR(SPITZ_GPIO_SWA) & (GPIO_bit(SPITZ_GPIO_SWA)|GPIO_bit(SPITZ_GPIO_SWB));
- state |= (GPLR(SPITZ_GPIO_AK_INT) & GPIO_bit(SPITZ_GPIO_AK_INT));
- if (state != sharpsl_hinge_state) {
- hinge_count = 0;
- sharpsl_hinge_state = state;
- } else if (hinge_count < HINGE_STABLE_COUNT) {
- hinge_count++;
- }
-
- if (hinge_count >= HINGE_STABLE_COUNT) {
- spin_lock_irqsave(&spitzkbd_data->lock, flags);
-
- input_report_switch(spitzkbd_data->input, SW_LID, ((GPLR(SPITZ_GPIO_SWA) & GPIO_bit(SPITZ_GPIO_SWA)) != 0));
- input_report_switch(spitzkbd_data->input, SW_TABLET_MODE, ((GPLR(SPITZ_GPIO_SWB) & GPIO_bit(SPITZ_GPIO_SWB)) != 0));
- input_report_switch(spitzkbd_data->input, SW_HEADPHONE_INSERT, ((GPLR(SPITZ_GPIO_AK_INT) & GPIO_bit(SPITZ_GPIO_AK_INT)) != 0));
- input_sync(spitzkbd_data->input);
-
- spin_unlock_irqrestore(&spitzkbd_data->lock, flags);
- } else {
- mod_timer(&spitzkbd_data->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
- }
-}
-
-#ifdef CONFIG_PM
-static int spitzkbd_suspend(struct platform_device *dev, pm_message_t state)
-{
- int i;
- struct spitzkbd *spitzkbd = platform_get_drvdata(dev);
- spitzkbd->suspended = 1;
-
- /* Set Strobe lines as inputs - *except* strobe line 0 leave this
- enabled so we can detect a power button press for resume */
- for (i = 1; i < SPITZ_KEY_STROBE_NUM; i++)
- pxa_gpio_mode(spitz_strobes[i] | GPIO_IN);
-
- return 0;
-}
-
-static int spitzkbd_resume(struct platform_device *dev)
-{
- int i;
- struct spitzkbd *spitzkbd = platform_get_drvdata(dev);
-
- for (i = 0; i < SPITZ_KEY_STROBE_NUM; i++)
- pxa_gpio_mode(spitz_strobes[i] | GPIO_OUT | GPIO_DFLT_HIGH);
-
- /* Upon resume, ignore the suspend key for a short while */
- spitzkbd->suspend_jiffies = jiffies;
- spitzkbd->suspended = 0;
-
- return 0;
-}
-#else
-#define spitzkbd_suspend NULL
-#define spitzkbd_resume NULL
-#endif
-
-static int __devinit spitzkbd_probe(struct platform_device *dev)
-{
- struct spitzkbd *spitzkbd;
- struct input_dev *input_dev;
- int i, err = -ENOMEM;
-
- spitzkbd = kzalloc(sizeof(struct spitzkbd), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!spitzkbd || !input_dev)
- goto fail;
-
- platform_set_drvdata(dev, spitzkbd);
- strcpy(spitzkbd->phys, "spitzkbd/input0");
-
- spin_lock_init(&spitzkbd->lock);
-
- /* Init Keyboard rescan timer */
- init_timer(&spitzkbd->timer);
- spitzkbd->timer.function = spitzkbd_timer_callback;
- spitzkbd->timer.data = (unsigned long) spitzkbd;
-
- /* Init Hinge Timer */
- init_timer(&spitzkbd->htimer);
- spitzkbd->htimer.function = spitzkbd_hinge_timer;
- spitzkbd->htimer.data = (unsigned long) spitzkbd;
-
- spitzkbd->suspend_jiffies = jiffies;
-
- spitzkbd->input = input_dev;
-
- input_dev->name = "Spitz Keyboard";
- input_dev->phys = spitzkbd->phys;
- input_dev->dev.parent = &dev->dev;
-
- input_dev->id.bustype = BUS_HOST;
- input_dev->id.vendor = 0x0001;
- input_dev->id.product = 0x0001;
- input_dev->id.version = 0x0100;
-
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) |
- BIT_MASK(EV_PWR) | BIT_MASK(EV_SW);
- input_dev->keycode = spitzkbd->keycode;
- input_dev->keycodesize = sizeof(unsigned char);
- input_dev->keycodemax = ARRAY_SIZE(spitzkbd_keycode);
-
- memcpy(spitzkbd->keycode, spitzkbd_keycode, sizeof(spitzkbd->keycode));
- for (i = 0; i < ARRAY_SIZE(spitzkbd_keycode); i++)
- set_bit(spitzkbd->keycode[i], input_dev->keybit);
- clear_bit(0, input_dev->keybit);
- set_bit(KEY_SUSPEND, input_dev->keybit);
- set_bit(SW_LID, input_dev->swbit);
- set_bit(SW_TABLET_MODE, input_dev->swbit);
- set_bit(SW_HEADPHONE_INSERT, input_dev->swbit);
-
- err = input_register_device(input_dev);
- if (err)
- goto fail;
-
- mod_timer(&spitzkbd->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
-
- /* Setup sense interrupts - RisingEdge Detect, sense lines as inputs */
- for (i = 0; i < SPITZ_KEY_SENSE_NUM; i++) {
- pxa_gpio_mode(spitz_senses[i] | GPIO_IN);
- if (request_irq(IRQ_GPIO(spitz_senses[i]), spitzkbd_interrupt,
- IRQF_DISABLED|IRQF_TRIGGER_RISING,
- "Spitzkbd Sense", spitzkbd))
- printk(KERN_WARNING "spitzkbd: Can't get Sense IRQ: %d!\n", i);
- }
-
- /* Set Strobe lines as outputs - set high */
- for (i = 0; i < SPITZ_KEY_STROBE_NUM; i++)
- pxa_gpio_mode(spitz_strobes[i] | GPIO_OUT | GPIO_DFLT_HIGH);
-
- pxa_gpio_mode(SPITZ_GPIO_SYNC | GPIO_IN);
- pxa_gpio_mode(SPITZ_GPIO_ON_KEY | GPIO_IN);
- pxa_gpio_mode(SPITZ_GPIO_SWA | GPIO_IN);
- pxa_gpio_mode(SPITZ_GPIO_SWB | GPIO_IN);
-
- request_irq(SPITZ_IRQ_GPIO_SYNC, spitzkbd_interrupt,
- IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "Spitzkbd Sync", spitzkbd);
- request_irq(SPITZ_IRQ_GPIO_ON_KEY, spitzkbd_interrupt,
- IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "Spitzkbd PwrOn", spitzkbd);
- request_irq(SPITZ_IRQ_GPIO_SWA, spitzkbd_hinge_isr,
- IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "Spitzkbd SWA", spitzkbd);
- request_irq(SPITZ_IRQ_GPIO_SWB, spitzkbd_hinge_isr,
- IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "Spitzkbd SWB", spitzkbd);
- request_irq(SPITZ_IRQ_GPIO_AK_INT, spitzkbd_hinge_isr,
- IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "Spitzkbd HP", spitzkbd);
-
- return 0;
-
- fail: input_free_device(input_dev);
- kfree(spitzkbd);
- return err;
-}
-
-static int __devexit spitzkbd_remove(struct platform_device *dev)
-{
- int i;
- struct spitzkbd *spitzkbd = platform_get_drvdata(dev);
-
- for (i = 0; i < SPITZ_KEY_SENSE_NUM; i++)
- free_irq(IRQ_GPIO(spitz_senses[i]), spitzkbd);
-
- free_irq(SPITZ_IRQ_GPIO_SYNC, spitzkbd);
- free_irq(SPITZ_IRQ_GPIO_ON_KEY, spitzkbd);
- free_irq(SPITZ_IRQ_GPIO_SWA, spitzkbd);
- free_irq(SPITZ_IRQ_GPIO_SWB, spitzkbd);
- free_irq(SPITZ_IRQ_GPIO_AK_INT, spitzkbd);
-
- del_timer_sync(&spitzkbd->htimer);
- del_timer_sync(&spitzkbd->timer);
-
- input_unregister_device(spitzkbd->input);
-
- kfree(spitzkbd);
-
- return 0;
-}
-
-static struct platform_driver spitzkbd_driver = {
- .probe = spitzkbd_probe,
- .remove = __devexit_p(spitzkbd_remove),
- .suspend = spitzkbd_suspend,
- .resume = spitzkbd_resume,
- .driver = {
- .name = "spitz-keyboard",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init spitzkbd_init(void)
-{
- return platform_driver_register(&spitzkbd_driver);
-}
-
-static void __exit spitzkbd_exit(void)
-{
- platform_driver_unregister(&spitzkbd_driver);
-}
-
-module_init(spitzkbd_init);
-module_exit(spitzkbd_exit);
-
-MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
-MODULE_DESCRIPTION("Spitz Keyboard Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:spitz-keyboard");
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
new file mode 100644
index 00000000000..493c93f25e2
--- /dev/null
+++ b/drivers/input/keyboard/tca6416-keypad.c
@@ -0,0 +1,349 @@
+/*
+ * Driver for keys on TCA6416 I2C IO expander
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * Author : Sriramakrishnan.A.G. <srk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/tca6416_keypad.h>
+
+#define TCA6416_INPUT 0
+#define TCA6416_OUTPUT 1
+#define TCA6416_INVERT 2
+#define TCA6416_DIRECTION 3
+
+static const struct i2c_device_id tca6416_id[] = {
+ { "tca6416-keys", 16, },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tca6416_id);
+
+struct tca6416_drv_data {
+ struct input_dev *input;
+ struct tca6416_button data[0];
+};
+
+struct tca6416_keypad_chip {
+ uint16_t reg_output;
+ uint16_t reg_direction;
+ uint16_t reg_input;
+
+ struct i2c_client *client;
+ struct input_dev *input;
+ struct delayed_work dwork;
+ u16 pinmask;
+ int irqnum;
+ bool use_polling;
+ struct tca6416_button buttons[0];
+};
+
+static int tca6416_write_reg(struct tca6416_keypad_chip *chip, int reg, u16 val)
+{
+ int error;
+
+ error = i2c_smbus_write_word_data(chip->client, reg << 1, val);
+ if (error < 0) {
+ dev_err(&chip->client->dev,
+ "%s failed, reg: %d, val: %d, error: %d\n",
+ __func__, reg, val, error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int tca6416_read_reg(struct tca6416_keypad_chip *chip, int reg, u16 *val)
+{
+ int retval;
+
+ retval = i2c_smbus_read_word_data(chip->client, reg << 1);
+ if (retval < 0) {
+ dev_err(&chip->client->dev, "%s failed, reg: %d, error: %d\n",
+ __func__, reg, retval);
+ return retval;
+ }
+
+ *val = (u16)retval;
+ return 0;
+}
+
+static void tca6416_keys_scan(struct tca6416_keypad_chip *chip)
+{
+ struct input_dev *input = chip->input;
+ u16 reg_val, val;
+ int error, i, pin_index;
+
+ error = tca6416_read_reg(chip, TCA6416_INPUT, &reg_val);
+ if (error)
+ return;
+
+ reg_val &= chip->pinmask;
+
+ /* Figure out which lines have changed */
+ val = reg_val ^ chip->reg_input;
+ chip->reg_input = reg_val;
+
+ for (i = 0, pin_index = 0; i < 16; i++) {
+ if (val & (1 << i)) {
+ struct tca6416_button *button = &chip->buttons[pin_index];
+ unsigned int type = button->type ?: EV_KEY;
+ int state = ((reg_val & (1 << i)) ? 1 : 0)
+ ^ button->active_low;
+
+ input_event(input, type, button->code, !!state);
+ input_sync(input);
+ }
+
+ if (chip->pinmask & (1 << i))
+ pin_index++;
+ }
+}
+
+/*
+ * This is threaded IRQ handler and this can (and will) sleep.
+ */
+static irqreturn_t tca6416_keys_isr(int irq, void *dev_id)
+{
+ struct tca6416_keypad_chip *chip = dev_id;
+
+ tca6416_keys_scan(chip);
+
+ return IRQ_HANDLED;
+}
+
+static void tca6416_keys_work_func(struct work_struct *work)
+{
+ struct tca6416_keypad_chip *chip =
+ container_of(work, struct tca6416_keypad_chip, dwork.work);
+
+ tca6416_keys_scan(chip);
+ schedule_delayed_work(&chip->dwork, msecs_to_jiffies(100));
+}
+
+static int tca6416_keys_open(struct input_dev *dev)
+{
+ struct tca6416_keypad_chip *chip = input_get_drvdata(dev);
+
+ /* Get initial device state in case it has switches */
+ tca6416_keys_scan(chip);
+
+ if (chip->use_polling)
+ schedule_delayed_work(&chip->dwork, msecs_to_jiffies(100));
+ else
+ enable_irq(chip->irqnum);
+
+ return 0;
+}
+
+static void tca6416_keys_close(struct input_dev *dev)
+{
+ struct tca6416_keypad_chip *chip = input_get_drvdata(dev);
+
+ if (chip->use_polling)
+ cancel_delayed_work_sync(&chip->dwork);
+ else
+ disable_irq(chip->irqnum);
+}
+
+static int __devinit tca6416_setup_registers(struct tca6416_keypad_chip *chip)
+{
+ int error;
+
+ error = tca6416_read_reg(chip, TCA6416_OUTPUT, &chip->reg_output);
+ if (error)
+ return error;
+
+ error = tca6416_read_reg(chip, TCA6416_DIRECTION, &chip->reg_direction);
+ if (error)
+ return error;
+
+ /* ensure that keypad pins are set to input */
+ error = tca6416_write_reg(chip, TCA6416_DIRECTION,
+ chip->reg_direction | chip->pinmask);
+ if (error)
+ return error;
+
+ error = tca6416_read_reg(chip, TCA6416_DIRECTION, &chip->reg_direction);
+ if (error)
+ return error;
+
+ error = tca6416_read_reg(chip, TCA6416_INPUT, &chip->reg_input);
+ if (error)
+ return error;
+
+ chip->reg_input &= chip->pinmask;
+
+ return 0;
+}
+
+static int __devinit tca6416_keypad_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tca6416_keys_platform_data *pdata;
+ struct tca6416_keypad_chip *chip;
+ struct input_dev *input;
+ int error;
+ int i;
+
+ /* Check functionality */
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
+ dev_err(&client->dev, "%s adapter not supported\n",
+ dev_driver_string(&client->adapter->dev));
+ return -ENODEV;
+ }
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_dbg(&client->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ chip = kzalloc(sizeof(struct tca6416_keypad_chip) +
+ pdata->nbuttons * sizeof(struct tca6416_button),
+ GFP_KERNEL);
+ input = input_allocate_device();
+ if (!chip || !input) {
+ error = -ENOMEM;
+ goto fail1;
+ }
+
+ chip->client = client;
+ chip->input = input;
+ chip->pinmask = pdata->pinmask;
+ chip->use_polling = pdata->use_polling;
+
+ INIT_DELAYED_WORK(&chip->dwork, tca6416_keys_work_func);
+
+ input->phys = "tca6416-keys/input0";
+ input->name = client->name;
+ input->dev.parent = &client->dev;
+
+ input->open = tca6416_keys_open;
+ input->close = tca6416_keys_close;
+
+ input->id.bustype = BUS_HOST;
+ input->id.vendor = 0x0001;
+ input->id.product = 0x0001;
+ input->id.version = 0x0100;
+
+ /* Enable auto repeat feature of Linux input subsystem */
+ if (pdata->rep)
+ __set_bit(EV_REP, input->evbit);
+
+ for (i = 0; i < pdata->nbuttons; i++) {
+ unsigned int type;
+
+ chip->buttons[i] = pdata->buttons[i];
+ type = (pdata->buttons[i].type) ?: EV_KEY;
+ input_set_capability(input, type, pdata->buttons[i].code);
+ }
+
+ input_set_drvdata(input, chip);
+
+ /*
+ * Initialize cached registers from their original values.
+ * we can't share this chip with another i2c master.
+ */
+ error = tca6416_setup_registers(chip);
+ if (error)
+ goto fail1;
+
+ if (!chip->use_polling) {
+ if (pdata->irq_is_gpio)
+ chip->irqnum = gpio_to_irq(client->irq);
+ else
+ chip->irqnum = client->irq;
+
+ error = request_threaded_irq(chip->irqnum, NULL,
+ tca6416_keys_isr,
+ IRQF_TRIGGER_FALLING,
+ "tca6416-keypad", chip);
+ if (error) {
+ dev_dbg(&client->dev,
+ "Unable to claim irq %d; error %d\n",
+ chip->irqnum, error);
+ goto fail1;
+ }
+ disable_irq(chip->irqnum);
+ }
+
+ error = input_register_device(input);
+ if (error) {
+ dev_dbg(&client->dev,
+ "Unable to register input device, error: %d\n", error);
+ goto fail2;
+ }
+
+ i2c_set_clientdata(client, chip);
+
+ return 0;
+
+fail2:
+ if (!chip->use_polling) {
+ free_irq(chip->irqnum, chip);
+ enable_irq(chip->irqnum);
+ }
+fail1:
+ input_free_device(input);
+ kfree(chip);
+ return error;
+}
+
+static int __devexit tca6416_keypad_remove(struct i2c_client *client)
+{
+ struct tca6416_keypad_chip *chip = i2c_get_clientdata(client);
+
+ if (!chip->use_polling) {
+ free_irq(chip->irqnum, chip);
+ enable_irq(chip->irqnum);
+ }
+
+ input_unregister_device(chip->input);
+ kfree(chip);
+
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+
+static struct i2c_driver tca6416_keypad_driver = {
+ .driver = {
+ .name = "tca6416-keypad",
+ },
+ .probe = tca6416_keypad_probe,
+ .remove = __devexit_p(tca6416_keypad_remove),
+ .id_table = tca6416_id,
+};
+
+static int __init tca6416_keypad_init(void)
+{
+ return i2c_add_driver(&tca6416_keypad_driver);
+}
+
+subsys_initcall(tca6416_keypad_init);
+
+static void __exit tca6416_keypad_exit(void)
+{
+ i2c_del_driver(&tca6416_keypad_driver);
+}
+module_exit(tca6416_keypad_exit);
+
+MODULE_AUTHOR("Sriramakrishnan <srk@ti.com>");
+MODULE_DESCRIPTION("Keypad driver over tca6146 IO expander");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/tosakbd.c b/drivers/input/keyboard/tosakbd.c
deleted file mode 100644
index 3910f269cfc..00000000000
--- a/drivers/input/keyboard/tosakbd.c
+++ /dev/null
@@ -1,431 +0,0 @@
-/*
- * Keyboard driver for Sharp Tosa models (SL-6000x)
- *
- * Copyright (c) 2005 Dirk Opfer
- * Copyright (c) 2007 Dmitry Baryshkov
- *
- * Based on xtkbd.c/locomkbd.c/corgikbd.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/input.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-
-#include <mach/gpio.h>
-#include <mach/tosa.h>
-
-#define KB_ROWMASK(r) (1 << (r))
-#define SCANCODE(r, c) (((r)<<4) + (c) + 1)
-#define NR_SCANCODES SCANCODE(TOSA_KEY_SENSE_NUM - 1, TOSA_KEY_STROBE_NUM - 1) + 1
-
-#define SCAN_INTERVAL (HZ/10)
-
-#define KB_DISCHARGE_DELAY 10
-#define KB_ACTIVATE_DELAY 10
-
-static unsigned short tosakbd_keycode[NR_SCANCODES] = {
-0,
-0, KEY_W, 0, 0, 0, KEY_K, KEY_BACKSPACE, KEY_P,
-0, 0, 0, 0, 0, 0, 0, 0,
-KEY_Q, KEY_E, KEY_T, KEY_Y, 0, KEY_O, KEY_I, KEY_COMMA,
-0, 0, 0, 0, 0, 0, 0, 0,
-KEY_A, KEY_D, KEY_G, KEY_U, 0, KEY_L, KEY_ENTER, KEY_DOT,
-0, 0, 0, 0, 0, 0, 0, 0,
-KEY_Z, KEY_C, KEY_V, KEY_J, TOSA_KEY_ADDRESSBOOK, TOSA_KEY_CANCEL, TOSA_KEY_CENTER, TOSA_KEY_OK,
-KEY_LEFTSHIFT, 0, 0, 0, 0, 0, 0, 0,
-KEY_S, KEY_R, KEY_B, KEY_N, TOSA_KEY_CALENDAR, TOSA_KEY_HOMEPAGE, KEY_LEFTCTRL, TOSA_KEY_LIGHT,
-0, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, 0,
-KEY_TAB, KEY_SLASH, KEY_H, KEY_M, TOSA_KEY_MENU, 0, KEY_UP, 0,
-0, 0, TOSA_KEY_FN, 0, 0, 0, 0, 0,
-KEY_X, KEY_F, KEY_SPACE, KEY_APOSTROPHE, TOSA_KEY_MAIL, KEY_LEFT, KEY_DOWN, KEY_RIGHT,
-0, 0, 0,
-};
-
-struct tosakbd {
- unsigned short keycode[ARRAY_SIZE(tosakbd_keycode)];
- struct input_dev *input;
- bool suspended;
- spinlock_t lock; /* protect kbd scanning */
- struct timer_list timer;
-};
-
-
-/* Helper functions for reading the keyboard matrix
- * Note: We should really be using the generic gpio functions to alter
- * GPDR but it requires a function call per GPIO bit which is
- * excessive when we need to access 12 bits at once, multiple times.
- * These functions must be called within local_irq_save()/local_irq_restore()
- * or similar.
- */
-#define GET_ROWS_STATUS(c) ((GPLR2 & TOSA_GPIO_ALL_SENSE_BIT) >> TOSA_GPIO_ALL_SENSE_RSHIFT)
-
-static inline void tosakbd_discharge_all(void)
-{
- /* STROBE All HiZ */
- GPCR1 = TOSA_GPIO_HIGH_STROBE_BIT;
- GPDR1 &= ~TOSA_GPIO_HIGH_STROBE_BIT;
- GPCR2 = TOSA_GPIO_LOW_STROBE_BIT;
- GPDR2 &= ~TOSA_GPIO_LOW_STROBE_BIT;
-}
-
-static inline void tosakbd_activate_all(void)
-{
- /* STROBE ALL -> High */
- GPSR1 = TOSA_GPIO_HIGH_STROBE_BIT;
- GPDR1 |= TOSA_GPIO_HIGH_STROBE_BIT;
- GPSR2 = TOSA_GPIO_LOW_STROBE_BIT;
- GPDR2 |= TOSA_GPIO_LOW_STROBE_BIT;
-
- udelay(KB_DISCHARGE_DELAY);
-
- /* STATE CLEAR */
- GEDR2 |= TOSA_GPIO_ALL_SENSE_BIT;
-}
-
-static inline void tosakbd_activate_col(int col)
-{
- if (col <= 5) {
- /* STROBE col -> High, not col -> HiZ */
- GPSR1 = TOSA_GPIO_STROBE_BIT(col);
- GPDR1 = (GPDR1 & ~TOSA_GPIO_HIGH_STROBE_BIT) | TOSA_GPIO_STROBE_BIT(col);
- } else {
- /* STROBE col -> High, not col -> HiZ */
- GPSR2 = TOSA_GPIO_STROBE_BIT(col);
- GPDR2 = (GPDR2 & ~TOSA_GPIO_LOW_STROBE_BIT) | TOSA_GPIO_STROBE_BIT(col);
- }
-}
-
-static inline void tosakbd_reset_col(int col)
-{
- if (col <= 5) {
- /* STROBE col -> Low */
- GPCR1 = TOSA_GPIO_STROBE_BIT(col);
- /* STROBE col -> out, not col -> HiZ */
- GPDR1 = (GPDR1 & ~TOSA_GPIO_HIGH_STROBE_BIT) | TOSA_GPIO_STROBE_BIT(col);
- } else {
- /* STROBE col -> Low */
- GPCR2 = TOSA_GPIO_STROBE_BIT(col);
- /* STROBE col -> out, not col -> HiZ */
- GPDR2 = (GPDR2 & ~TOSA_GPIO_LOW_STROBE_BIT) | TOSA_GPIO_STROBE_BIT(col);
- }
-}
-/*
- * The tosa keyboard only generates interrupts when a key is pressed.
- * So when a key is pressed, we enable a timer. This timer scans the
- * keyboard, and this is how we detect when the key is released.
- */
-
-/* Scan the hardware keyboard and push any changes up through the input layer */
-static void tosakbd_scankeyboard(struct platform_device *dev)
-{
- struct tosakbd *tosakbd = platform_get_drvdata(dev);
- unsigned int row, col, rowd;
- unsigned long flags;
- unsigned int num_pressed = 0;
-
- spin_lock_irqsave(&tosakbd->lock, flags);
-
- if (tosakbd->suspended)
- goto out;
-
- for (col = 0; col < TOSA_KEY_STROBE_NUM; col++) {
- /*
- * Discharge the output driver capacitatance
- * in the keyboard matrix. (Yes it is significant..)
- */
- tosakbd_discharge_all();
- udelay(KB_DISCHARGE_DELAY);
-
- tosakbd_activate_col(col);
- udelay(KB_ACTIVATE_DELAY);
-
- rowd = GET_ROWS_STATUS(col);
-
- for (row = 0; row < TOSA_KEY_SENSE_NUM; row++) {
- unsigned int scancode, pressed;
- scancode = SCANCODE(row, col);
- pressed = rowd & KB_ROWMASK(row);
-
- if (pressed && !tosakbd->keycode[scancode])
- dev_warn(&dev->dev,
- "unhandled scancode: 0x%02x\n",
- scancode);
-
- input_report_key(tosakbd->input,
- tosakbd->keycode[scancode],
- pressed);
- if (pressed)
- num_pressed++;
- }
-
- tosakbd_reset_col(col);
- }
-
- tosakbd_activate_all();
-
- input_sync(tosakbd->input);
-
- /* if any keys are pressed, enable the timer */
- if (num_pressed)
- mod_timer(&tosakbd->timer, jiffies + SCAN_INTERVAL);
-
- out:
- spin_unlock_irqrestore(&tosakbd->lock, flags);
-}
-
-/*
- * tosa keyboard interrupt handler.
- */
-static irqreturn_t tosakbd_interrupt(int irq, void *__dev)
-{
- struct platform_device *dev = __dev;
- struct tosakbd *tosakbd = platform_get_drvdata(dev);
-
- if (!timer_pending(&tosakbd->timer)) {
- /** wait chattering delay **/
- udelay(20);
- tosakbd_scankeyboard(dev);
- }
-
- return IRQ_HANDLED;
-}
-
-/*
- * tosa timer checking for released keys
- */
-static void tosakbd_timer_callback(unsigned long __dev)
-{
- struct platform_device *dev = (struct platform_device *)__dev;
-
- tosakbd_scankeyboard(dev);
-}
-
-#ifdef CONFIG_PM
-static int tosakbd_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct tosakbd *tosakbd = platform_get_drvdata(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&tosakbd->lock, flags);
- tosakbd->suspended = true;
- spin_unlock_irqrestore(&tosakbd->lock, flags);
-
- del_timer_sync(&tosakbd->timer);
-
- return 0;
-}
-
-static int tosakbd_resume(struct platform_device *dev)
-{
- struct tosakbd *tosakbd = platform_get_drvdata(dev);
-
- tosakbd->suspended = false;
- tosakbd_scankeyboard(dev);
-
- return 0;
-}
-#else
-#define tosakbd_suspend NULL
-#define tosakbd_resume NULL
-#endif
-
-static int __devinit tosakbd_probe(struct platform_device *pdev) {
-
- int i;
- struct tosakbd *tosakbd;
- struct input_dev *input_dev;
- int error;
-
- tosakbd = kzalloc(sizeof(struct tosakbd), GFP_KERNEL);
- if (!tosakbd)
- return -ENOMEM;
-
- input_dev = input_allocate_device();
- if (!input_dev) {
- kfree(tosakbd);
- return -ENOMEM;
- }
-
- platform_set_drvdata(pdev, tosakbd);
-
- spin_lock_init(&tosakbd->lock);
-
- /* Init Keyboard rescan timer */
- init_timer(&tosakbd->timer);
- tosakbd->timer.function = tosakbd_timer_callback;
- tosakbd->timer.data = (unsigned long) pdev;
-
- tosakbd->input = input_dev;
-
- input_set_drvdata(input_dev, tosakbd);
- input_dev->name = "Tosa Keyboard";
- input_dev->phys = "tosakbd/input0";
- input_dev->dev.parent = &pdev->dev;
-
- input_dev->id.bustype = BUS_HOST;
- input_dev->id.vendor = 0x0001;
- input_dev->id.product = 0x0001;
- input_dev->id.version = 0x0100;
-
- input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REP);
- input_dev->keycode = tosakbd->keycode;
- input_dev->keycodesize = sizeof(tosakbd->keycode[0]);
- input_dev->keycodemax = ARRAY_SIZE(tosakbd_keycode);
-
- memcpy(tosakbd->keycode, tosakbd_keycode, sizeof(tosakbd_keycode));
-
- for (i = 0; i < ARRAY_SIZE(tosakbd_keycode); i++)
- __set_bit(tosakbd->keycode[i], input_dev->keybit);
- __clear_bit(KEY_RESERVED, input_dev->keybit);
-
- /* Setup sense interrupts - RisingEdge Detect, sense lines as inputs */
- for (i = 0; i < TOSA_KEY_SENSE_NUM; i++) {
- int gpio = TOSA_GPIO_KEY_SENSE(i);
- int irq;
- error = gpio_request(gpio, "tosakbd");
- if (error < 0) {
- printk(KERN_ERR "tosakbd: failed to request GPIO %d, "
- " error %d\n", gpio, error);
- goto fail;
- }
-
- error = gpio_direction_input(TOSA_GPIO_KEY_SENSE(i));
- if (error < 0) {
- printk(KERN_ERR "tosakbd: failed to configure input"
- " direction for GPIO %d, error %d\n",
- gpio, error);
- gpio_free(gpio);
- goto fail;
- }
-
- irq = gpio_to_irq(gpio);
- if (irq < 0) {
- error = irq;
- printk(KERN_ERR "gpio-keys: Unable to get irq number"
- " for GPIO %d, error %d\n",
- gpio, error);
- gpio_free(gpio);
- goto fail;
- }
-
- error = request_irq(irq, tosakbd_interrupt,
- IRQF_DISABLED | IRQF_TRIGGER_RISING,
- "tosakbd", pdev);
-
- if (error) {
- printk("tosakbd: Can't get IRQ: %d: error %d!\n",
- irq, error);
- gpio_free(gpio);
- goto fail;
- }
- }
-
- /* Set Strobe lines as outputs - set high */
- for (i = 0; i < TOSA_KEY_STROBE_NUM; i++) {
- int gpio = TOSA_GPIO_KEY_STROBE(i);
- error = gpio_request(gpio, "tosakbd");
- if (error < 0) {
- printk(KERN_ERR "tosakbd: failed to request GPIO %d, "
- " error %d\n", gpio, error);
- goto fail2;
- }
-
- error = gpio_direction_output(gpio, 1);
- if (error < 0) {
- printk(KERN_ERR "tosakbd: failed to configure input"
- " direction for GPIO %d, error %d\n",
- gpio, error);
- gpio_free(gpio);
- goto fail2;
- }
-
- }
-
- error = input_register_device(input_dev);
- if (error) {
- printk(KERN_ERR "tosakbd: Unable to register input device, "
- "error: %d\n", error);
- goto fail2;
- }
-
- printk(KERN_INFO "input: Tosa Keyboard Registered\n");
-
- return 0;
-
-fail2:
- while (--i >= 0)
- gpio_free(TOSA_GPIO_KEY_STROBE(i));
-
- i = TOSA_KEY_SENSE_NUM;
-fail:
- while (--i >= 0) {
- free_irq(gpio_to_irq(TOSA_GPIO_KEY_SENSE(i)), pdev);
- gpio_free(TOSA_GPIO_KEY_SENSE(i));
- }
-
- platform_set_drvdata(pdev, NULL);
- input_free_device(input_dev);
- kfree(tosakbd);
-
- return error;
-}
-
-static int __devexit tosakbd_remove(struct platform_device *dev)
-{
- int i;
- struct tosakbd *tosakbd = platform_get_drvdata(dev);
-
- for (i = 0; i < TOSA_KEY_STROBE_NUM; i++)
- gpio_free(TOSA_GPIO_KEY_STROBE(i));
-
- for (i = 0; i < TOSA_KEY_SENSE_NUM; i++) {
- free_irq(gpio_to_irq(TOSA_GPIO_KEY_SENSE(i)), dev);
- gpio_free(TOSA_GPIO_KEY_SENSE(i));
- }
-
- del_timer_sync(&tosakbd->timer);
-
- input_unregister_device(tosakbd->input);
-
- kfree(tosakbd);
-
- return 0;
-}
-
-static struct platform_driver tosakbd_driver = {
- .probe = tosakbd_probe,
- .remove = __devexit_p(tosakbd_remove),
- .suspend = tosakbd_suspend,
- .resume = tosakbd_resume,
- .driver = {
- .name = "tosa-keyboard",
- .owner = THIS_MODULE,
- },
-};
-
-static int __devinit tosakbd_init(void)
-{
- return platform_driver_register(&tosakbd_driver);
-}
-
-static void __exit tosakbd_exit(void)
-{
- platform_driver_unregister(&tosakbd_driver);
-}
-
-module_init(tosakbd_init);
-module_exit(tosakbd_exit);
-
-MODULE_AUTHOR("Dirk Opfer <Dirk@Opfer-Online.de>");
-MODULE_DESCRIPTION("Tosa Keyboard Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:tosa-keyboard");
diff --git a/drivers/input/misc/88pm860x_onkey.c b/drivers/input/misc/88pm860x_onkey.c
index 40dabd8487b..4cc82826ea6 100644
--- a/drivers/input/misc/88pm860x_onkey.c
+++ b/drivers/input/misc/88pm860x_onkey.c
@@ -87,7 +87,6 @@ static int __devinit pm860x_onkey_probe(struct platform_device *pdev)
info->idev->phys = "88pm860x_on/input0";
info->idev->id.bustype = BUS_I2C;
info->idev->dev.parent = &pdev->dev;
- info->irq = irq;
info->idev->evbit[0] = BIT_MASK(EV_KEY);
info->idev->keybit[BIT_WORD(KEY_POWER)] = BIT_MASK(KEY_POWER);
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 23140a3bb8e..c44b9eafc55 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -22,6 +22,36 @@ config INPUT_88PM860X_ONKEY
To compile this driver as a module, choose M here: the module
will be called 88pm860x_onkey.
+config INPUT_AD714X
+ tristate "Analog Devices AD714x Capacitance Touch Sensor"
+ help
+ Say Y here if you want to support an AD7142/3/7/8/7A touch sensor.
+
+ You should select a bus connection too.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad714x.
+
+config INPUT_AD714X_I2C
+ tristate "support I2C bus connection"
+ depends on INPUT_AD714X && I2C
+ default y
+ help
+ Say Y here if you have AD7142/AD7147 hooked to an I2C bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad714x-i2c.
+
+config INPUT_AD714X_SPI
+ tristate "support SPI bus connection"
+ depends on INPUT_AD714X && SPI
+ default y
+ help
+ Say Y here if you have AD7142/AD7147 hooked to a SPI bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad714x-spi.
+
config INPUT_PCSPKR
tristate "PC Speaker support"
depends on PCSPKR_PLATFORM
@@ -50,6 +80,16 @@ config INPUT_M68K_BEEP
tristate "M68k Beeper support"
depends on M68K
+config INPUT_MAX8925_ONKEY
+ tristate "MAX8925 ONKEY support"
+ depends on MFD_MAX8925
+ help
+ Support the ONKEY of MAX8925 PMICs as an input device
+ reporting power button status.
+
+ To compile this driver as a module, choose M here: the module
+ will be called max8925_onkey.
+
config INPUT_APANEL
tristate "Fujitsu Lifebook Application Panel buttons"
depends on X86 && I2C && LEDS_CLASS
@@ -277,6 +317,16 @@ config INPUT_PCF50633_PMU
Say Y to include support for delivering PMU events via input
layer on NXP PCF50633.
+config INPUT_PCF8574
+ tristate "PCF8574 Keypad input device"
+ depends on I2C && EXPERIMENTAL
+ help
+ Say Y here if you want to support a keypad connetced via I2C
+ with a PCF8574.
+
+ To compile this driver as a module, choose M here: the
+ module will be called pcf8574_keypad.
+
config INPUT_GPIO_ROTARY_ENCODER
tristate "Rotary encoders connected to GPIO pins"
depends on GPIOLIB && GENERIC_GPIO
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 7e95a5d474d..71fe57d8023 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -5,6 +5,9 @@
# Each configuration option enables a list of files.
obj-$(CONFIG_INPUT_88PM860X_ONKEY) += 88pm860x_onkey.o
+obj-$(CONFIG_INPUT_AD714X) += ad714x.o
+obj-$(CONFIG_INPUT_AD714X_I2C) += ad714x-i2c.o
+obj-$(CONFIG_INPUT_AD714X_SPI) += ad714x-spi.o
obj-$(CONFIG_INPUT_APANEL) += apanel.o
obj-$(CONFIG_INPUT_ATI_REMOTE) += ati_remote.o
obj-$(CONFIG_INPUT_ATI_REMOTE2) += ati_remote2.o
@@ -17,8 +20,10 @@ obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
+obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o
obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o
obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o
+obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o
obj-$(CONFIG_INPUT_PCSPKR) += pcspkr.o
obj-$(CONFIG_INPUT_POWERMATE) += powermate.o
obj-$(CONFIG_INPUT_RB532_BUTTON) += rb532_button.o
diff --git a/drivers/input/misc/ad714x-i2c.c b/drivers/input/misc/ad714x-i2c.c
new file mode 100644
index 00000000000..e9adbe49f6a
--- /dev/null
+++ b/drivers/input/misc/ad714x-i2c.c
@@ -0,0 +1,140 @@
+/*
+ * AD714X CapTouch Programmable Controller driver (I2C bus)
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/input.h> /* BUS_I2C */
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include "ad714x.h"
+
+#ifdef CONFIG_PM
+static int ad714x_i2c_suspend(struct i2c_client *client, pm_message_t message)
+{
+ return ad714x_disable(i2c_get_clientdata(client));
+}
+
+static int ad714x_i2c_resume(struct i2c_client *client)
+{
+ return ad714x_enable(i2c_get_clientdata(client));
+}
+#else
+# define ad714x_i2c_suspend NULL
+# define ad714x_i2c_resume NULL
+#endif
+
+static int ad714x_i2c_write(struct device *dev, unsigned short reg,
+ unsigned short data)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret = 0;
+ u8 *_reg = (u8 *)&reg;
+ u8 *_data = (u8 *)&data;
+
+ u8 tx[4] = {
+ _reg[1],
+ _reg[0],
+ _data[1],
+ _data[0]
+ };
+
+ ret = i2c_master_send(client, tx, 4);
+ if (ret < 0)
+ dev_err(&client->dev, "I2C write error\n");
+
+ return ret;
+}
+
+static int ad714x_i2c_read(struct device *dev, unsigned short reg,
+ unsigned short *data)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret = 0;
+ u8 *_reg = (u8 *)&reg;
+ u8 *_data = (u8 *)data;
+
+ u8 tx[2] = {
+ _reg[1],
+ _reg[0]
+ };
+ u8 rx[2];
+
+ ret = i2c_master_send(client, tx, 2);
+ if (ret >= 0)
+ ret = i2c_master_recv(client, rx, 2);
+
+ if (unlikely(ret < 0)) {
+ dev_err(&client->dev, "I2C read error\n");
+ } else {
+ _data[0] = rx[1];
+ _data[1] = rx[0];
+ }
+
+ return ret;
+}
+
+static int __devinit ad714x_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ad714x_chip *chip;
+
+ chip = ad714x_probe(&client->dev, BUS_I2C, client->irq,
+ ad714x_i2c_read, ad714x_i2c_write);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ i2c_set_clientdata(client, chip);
+
+ return 0;
+}
+
+static int __devexit ad714x_i2c_remove(struct i2c_client *client)
+{
+ struct ad714x_chip *chip = i2c_get_clientdata(client);
+
+ ad714x_remove(chip);
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+static const struct i2c_device_id ad714x_id[] = {
+ { "ad7142_captouch", 0 },
+ { "ad7143_captouch", 0 },
+ { "ad7147_captouch", 0 },
+ { "ad7147a_captouch", 0 },
+ { "ad7148_captouch", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ad714x_id);
+
+static struct i2c_driver ad714x_i2c_driver = {
+ .driver = {
+ .name = "ad714x_captouch",
+ },
+ .probe = ad714x_i2c_probe,
+ .remove = __devexit_p(ad714x_i2c_remove),
+ .suspend = ad714x_i2c_suspend,
+ .resume = ad714x_i2c_resume,
+ .id_table = ad714x_id,
+};
+
+static __init int ad714x_i2c_init(void)
+{
+ return i2c_add_driver(&ad714x_i2c_driver);
+}
+module_init(ad714x_i2c_init);
+
+static __exit void ad714x_i2c_exit(void)
+{
+ i2c_del_driver(&ad714x_i2c_driver);
+}
+module_exit(ad714x_i2c_exit);
+
+MODULE_DESCRIPTION("Analog Devices AD714X Capacitance Touch Sensor I2C Bus Driver");
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/ad714x-spi.c b/drivers/input/misc/ad714x-spi.c
new file mode 100644
index 00000000000..7f8dedfd1bf
--- /dev/null
+++ b/drivers/input/misc/ad714x-spi.c
@@ -0,0 +1,103 @@
+/*
+ * AD714X CapTouch Programmable Controller driver (SPI bus)
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/input.h> /* BUS_I2C */
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+#include "ad714x.h"
+
+#define AD714x_SPI_CMD_PREFIX 0xE000 /* bits 15:11 */
+#define AD714x_SPI_READ BIT(10)
+
+#ifdef CONFIG_PM
+static int ad714x_spi_suspend(struct spi_device *spi, pm_message_t message)
+{
+ return ad714x_disable(spi_get_drvdata(spi));
+}
+
+static int ad714x_spi_resume(struct spi_device *spi)
+{
+ return ad714x_enable(spi_get_drvdata(spi));
+}
+#else
+# define ad714x_spi_suspend NULL
+# define ad714x_spi_resume NULL
+#endif
+
+static int ad714x_spi_read(struct device *dev, unsigned short reg,
+ unsigned short *data)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ unsigned short tx = AD714x_SPI_CMD_PREFIX | AD714x_SPI_READ | reg;
+
+ return spi_write_then_read(spi, (u8 *)&tx, 2, (u8 *)data, 2);
+}
+
+static int ad714x_spi_write(struct device *dev, unsigned short reg,
+ unsigned short data)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ unsigned short tx[2] = {
+ AD714x_SPI_CMD_PREFIX | reg,
+ data
+ };
+
+ return spi_write(spi, (u8 *)tx, 4);
+}
+
+static int __devinit ad714x_spi_probe(struct spi_device *spi)
+{
+ struct ad714x_chip *chip;
+
+ chip = ad714x_probe(&spi->dev, BUS_SPI, spi->irq,
+ ad714x_spi_read, ad714x_spi_write);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ spi_set_drvdata(spi, chip);
+
+ return 0;
+}
+
+static int __devexit ad714x_spi_remove(struct spi_device *spi)
+{
+ struct ad714x_chip *chip = spi_get_drvdata(spi);
+
+ ad714x_remove(chip);
+ spi_set_drvdata(spi, NULL);
+
+ return 0;
+}
+
+static struct spi_driver ad714x_spi_driver = {
+ .driver = {
+ .name = "ad714x_captouch",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad714x_spi_probe,
+ .remove = __devexit_p(ad714x_spi_remove),
+ .suspend = ad714x_spi_suspend,
+ .resume = ad714x_spi_resume,
+};
+
+static __init int ad714x_spi_init(void)
+{
+ return spi_register_driver(&ad714x_spi_driver);
+}
+module_init(ad714x_spi_init);
+
+static __exit void ad714x_spi_exit(void)
+{
+ spi_unregister_driver(&ad714x_spi_driver);
+}
+module_exit(ad714x_spi_exit);
+
+MODULE_DESCRIPTION("Analog Devices AD714X Capacitance Touch Sensor SPI Bus Driver");
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/ad714x.c b/drivers/input/misc/ad714x.c
new file mode 100644
index 00000000000..0fe27baf5e7
--- /dev/null
+++ b/drivers/input/misc/ad714x.c
@@ -0,0 +1,1347 @@
+/*
+ * AD714X CapTouch Programmable Controller driver supporting AD7142/3/7/8/7A
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/input/ad714x.h>
+#include "ad714x.h"
+
+#define AD714X_PWR_CTRL 0x0
+#define AD714X_STG_CAL_EN_REG 0x1
+#define AD714X_AMB_COMP_CTRL0_REG 0x2
+#define AD714X_PARTID_REG 0x17
+#define AD7142_PARTID 0xE620
+#define AD7143_PARTID 0xE630
+#define AD7147_PARTID 0x1470
+#define AD7148_PARTID 0x1480
+#define AD714X_STAGECFG_REG 0x80
+#define AD714X_SYSCFG_REG 0x0
+
+#define STG_LOW_INT_EN_REG 0x5
+#define STG_HIGH_INT_EN_REG 0x6
+#define STG_COM_INT_EN_REG 0x7
+#define STG_LOW_INT_STA_REG 0x8
+#define STG_HIGH_INT_STA_REG 0x9
+#define STG_COM_INT_STA_REG 0xA
+
+#define CDC_RESULT_S0 0xB
+#define CDC_RESULT_S1 0xC
+#define CDC_RESULT_S2 0xD
+#define CDC_RESULT_S3 0xE
+#define CDC_RESULT_S4 0xF
+#define CDC_RESULT_S5 0x10
+#define CDC_RESULT_S6 0x11
+#define CDC_RESULT_S7 0x12
+#define CDC_RESULT_S8 0x13
+#define CDC_RESULT_S9 0x14
+#define CDC_RESULT_S10 0x15
+#define CDC_RESULT_S11 0x16
+
+#define STAGE0_AMBIENT 0xF1
+#define STAGE1_AMBIENT 0x115
+#define STAGE2_AMBIENT 0x139
+#define STAGE3_AMBIENT 0x15D
+#define STAGE4_AMBIENT 0x181
+#define STAGE5_AMBIENT 0x1A5
+#define STAGE6_AMBIENT 0x1C9
+#define STAGE7_AMBIENT 0x1ED
+#define STAGE8_AMBIENT 0x211
+#define STAGE9_AMBIENT 0x234
+#define STAGE10_AMBIENT 0x259
+#define STAGE11_AMBIENT 0x27D
+
+#define PER_STAGE_REG_NUM 36
+#define STAGE_NUM 12
+#define STAGE_CFGREG_NUM 8
+#define SYS_CFGREG_NUM 8
+
+/*
+ * driver information which will be used to maintain the software flow
+ */
+enum ad714x_device_state { IDLE, JITTER, ACTIVE, SPACE };
+
+struct ad714x_slider_drv {
+ int highest_stage;
+ int abs_pos;
+ int flt_pos;
+ enum ad714x_device_state state;
+ struct input_dev *input;
+};
+
+struct ad714x_wheel_drv {
+ int abs_pos;
+ int flt_pos;
+ int pre_mean_value;
+ int pre_highest_stage;
+ int pre_mean_value_no_offset;
+ int mean_value;
+ int mean_value_no_offset;
+ int pos_offset;
+ int pos_ratio;
+ int highest_stage;
+ enum ad714x_device_state state;
+ struct input_dev *input;
+};
+
+struct ad714x_touchpad_drv {
+ int x_highest_stage;
+ int x_flt_pos;
+ int x_abs_pos;
+ int y_highest_stage;
+ int y_flt_pos;
+ int y_abs_pos;
+ int left_ep;
+ int left_ep_val;
+ int right_ep;
+ int right_ep_val;
+ int top_ep;
+ int top_ep_val;
+ int bottom_ep;
+ int bottom_ep_val;
+ enum ad714x_device_state state;
+ struct input_dev *input;
+};
+
+struct ad714x_button_drv {
+ enum ad714x_device_state state;
+ /*
+ * Unlike slider/wheel/touchpad, all buttons point to
+ * same input_dev instance
+ */
+ struct input_dev *input;
+};
+
+struct ad714x_driver_data {
+ struct ad714x_slider_drv *slider;
+ struct ad714x_wheel_drv *wheel;
+ struct ad714x_touchpad_drv *touchpad;
+ struct ad714x_button_drv *button;
+};
+
+/*
+ * information to integrate all things which will be private data
+ * of spi/i2c device
+ */
+struct ad714x_chip {
+ unsigned short h_state;
+ unsigned short l_state;
+ unsigned short c_state;
+ unsigned short adc_reg[STAGE_NUM];
+ unsigned short amb_reg[STAGE_NUM];
+ unsigned short sensor_val[STAGE_NUM];
+
+ struct ad714x_platform_data *hw;
+ struct ad714x_driver_data *sw;
+
+ int irq;
+ struct device *dev;
+ ad714x_read_t read;
+ ad714x_write_t write;
+
+ struct mutex mutex;
+
+ unsigned product;
+ unsigned version;
+};
+
+static void ad714x_use_com_int(struct ad714x_chip *ad714x,
+ int start_stage, int end_stage)
+{
+ unsigned short data;
+ unsigned short mask;
+
+ mask = ((1 << (end_stage + 1)) - 1) - (1 << start_stage);
+
+ ad714x->read(ad714x->dev, STG_COM_INT_EN_REG, &data);
+ data |= 1 << start_stage;
+ ad714x->write(ad714x->dev, STG_COM_INT_EN_REG, data);
+
+ ad714x->read(ad714x->dev, STG_HIGH_INT_EN_REG, &data);
+ data &= ~mask;
+ ad714x->write(ad714x->dev, STG_HIGH_INT_EN_REG, data);
+}
+
+static void ad714x_use_thr_int(struct ad714x_chip *ad714x,
+ int start_stage, int end_stage)
+{
+ unsigned short data;
+ unsigned short mask;
+
+ mask = ((1 << (end_stage + 1)) - 1) - (1 << start_stage);
+
+ ad714x->read(ad714x->dev, STG_COM_INT_EN_REG, &data);
+ data &= ~(1 << start_stage);
+ ad714x->write(ad714x->dev, STG_COM_INT_EN_REG, data);
+
+ ad714x->read(ad714x->dev, STG_HIGH_INT_EN_REG, &data);
+ data |= mask;
+ ad714x->write(ad714x->dev, STG_HIGH_INT_EN_REG, data);
+}
+
+static int ad714x_cal_highest_stage(struct ad714x_chip *ad714x,
+ int start_stage, int end_stage)
+{
+ int max_res = 0;
+ int max_idx = 0;
+ int i;
+
+ for (i = start_stage; i <= end_stage; i++) {
+ if (ad714x->sensor_val[i] > max_res) {
+ max_res = ad714x->sensor_val[i];
+ max_idx = i;
+ }
+ }
+
+ return max_idx;
+}
+
+static int ad714x_cal_abs_pos(struct ad714x_chip *ad714x,
+ int start_stage, int end_stage,
+ int highest_stage, int max_coord)
+{
+ int a_param, b_param;
+
+ if (highest_stage == start_stage) {
+ a_param = ad714x->sensor_val[start_stage + 1];
+ b_param = ad714x->sensor_val[start_stage] +
+ ad714x->sensor_val[start_stage + 1];
+ } else if (highest_stage == end_stage) {
+ a_param = ad714x->sensor_val[end_stage] *
+ (end_stage - start_stage) +
+ ad714x->sensor_val[end_stage - 1] *
+ (end_stage - start_stage - 1);
+ b_param = ad714x->sensor_val[end_stage] +
+ ad714x->sensor_val[end_stage - 1];
+ } else {
+ a_param = ad714x->sensor_val[highest_stage] *
+ (highest_stage - start_stage) +
+ ad714x->sensor_val[highest_stage - 1] *
+ (highest_stage - start_stage - 1) +
+ ad714x->sensor_val[highest_stage + 1] *
+ (highest_stage - start_stage + 1);
+ b_param = ad714x->sensor_val[highest_stage] +
+ ad714x->sensor_val[highest_stage - 1] +
+ ad714x->sensor_val[highest_stage + 1];
+ }
+
+ return (max_coord / (end_stage - start_stage)) * a_param / b_param;
+}
+
+/*
+ * One button can connect to multi positive and negative of CDCs
+ * Multi-buttons can connect to same positive/negative of one CDC
+ */
+static void ad714x_button_state_machine(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_button_plat *hw = &ad714x->hw->button[idx];
+ struct ad714x_button_drv *sw = &ad714x->sw->button[idx];
+
+ switch (sw->state) {
+ case IDLE:
+ if (((ad714x->h_state & hw->h_mask) == hw->h_mask) &&
+ ((ad714x->l_state & hw->l_mask) == hw->l_mask)) {
+ dev_dbg(ad714x->dev, "button %d touched\n", idx);
+ input_report_key(sw->input, hw->keycode, 1);
+ input_sync(sw->input);
+ sw->state = ACTIVE;
+ }
+ break;
+
+ case ACTIVE:
+ if (((ad714x->h_state & hw->h_mask) != hw->h_mask) ||
+ ((ad714x->l_state & hw->l_mask) != hw->l_mask)) {
+ dev_dbg(ad714x->dev, "button %d released\n", idx);
+ input_report_key(sw->input, hw->keycode, 0);
+ input_sync(sw->input);
+ sw->state = IDLE;
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * The response of a sensor is defined by the absolute number of codes
+ * between the current CDC value and the ambient value.
+ */
+static void ad714x_slider_cal_sensor_val(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx];
+ int i;
+
+ for (i = hw->start_stage; i <= hw->end_stage; i++) {
+ ad714x->read(ad714x->dev, CDC_RESULT_S0 + i,
+ &ad714x->adc_reg[i]);
+ ad714x->read(ad714x->dev,
+ STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
+ &ad714x->amb_reg[i]);
+
+ ad714x->sensor_val[i] = abs(ad714x->adc_reg[i] -
+ ad714x->amb_reg[i]);
+ }
+}
+
+static void ad714x_slider_cal_highest_stage(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx];
+ struct ad714x_slider_drv *sw = &ad714x->sw->slider[idx];
+
+ sw->highest_stage = ad714x_cal_highest_stage(ad714x, hw->start_stage,
+ hw->end_stage);
+
+ dev_dbg(ad714x->dev, "slider %d highest_stage:%d\n", idx,
+ sw->highest_stage);
+}
+
+/*
+ * The formulae are very straight forward. It uses the sensor with the
+ * highest response and the 2 adjacent ones.
+ * When Sensor 0 has the highest response, only sensor 0 and sensor 1
+ * are used in the calculations. Similarly when the last sensor has the
+ * highest response, only the last sensor and the second last sensors
+ * are used in the calculations.
+ *
+ * For i= idx_of_peak_Sensor-1 to i= idx_of_peak_Sensor+1
+ * v += Sensor response(i)*i
+ * w += Sensor response(i)
+ * POS=(Number_of_Positions_Wanted/(Number_of_Sensors_Used-1)) *(v/w)
+ */
+static void ad714x_slider_cal_abs_pos(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx];
+ struct ad714x_slider_drv *sw = &ad714x->sw->slider[idx];
+
+ sw->abs_pos = ad714x_cal_abs_pos(ad714x, hw->start_stage, hw->end_stage,
+ sw->highest_stage, hw->max_coord);
+
+ dev_dbg(ad714x->dev, "slider %d absolute position:%d\n", idx,
+ sw->abs_pos);
+}
+
+/*
+ * To minimise the Impact of the noise on the algorithm, ADI developed a
+ * routine that filters the CDC results after they have been read by the
+ * host processor.
+ * The filter used is an Infinite Input Response(IIR) filter implemented
+ * in firmware and attenuates the noise on the CDC results after they've
+ * been read by the host processor.
+ * Filtered_CDC_result = (Filtered_CDC_result * (10 - Coefficient) +
+ * Latest_CDC_result * Coefficient)/10
+ */
+static void ad714x_slider_cal_flt_pos(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_slider_drv *sw = &ad714x->sw->slider[idx];
+
+ sw->flt_pos = (sw->flt_pos * (10 - 4) +
+ sw->abs_pos * 4)/10;
+
+ dev_dbg(ad714x->dev, "slider %d filter position:%d\n", idx,
+ sw->flt_pos);
+}
+
+static void ad714x_slider_use_com_int(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx];
+
+ ad714x_use_com_int(ad714x, hw->start_stage, hw->end_stage);
+}
+
+static void ad714x_slider_use_thr_int(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx];
+
+ ad714x_use_thr_int(ad714x, hw->start_stage, hw->end_stage);
+}
+
+static void ad714x_slider_state_machine(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx];
+ struct ad714x_slider_drv *sw = &ad714x->sw->slider[idx];
+ unsigned short h_state, c_state;
+ unsigned short mask;
+
+ mask = ((1 << (hw->end_stage + 1)) - 1) - ((1 << hw->start_stage) - 1);
+
+ h_state = ad714x->h_state & mask;
+ c_state = ad714x->c_state & mask;
+
+ switch (sw->state) {
+ case IDLE:
+ if (h_state) {
+ sw->state = JITTER;
+ /* In End of Conversion interrupt mode, the AD714X
+ * continuously generates hardware interrupts.
+ */
+ ad714x_slider_use_com_int(ad714x, idx);
+ dev_dbg(ad714x->dev, "slider %d touched\n", idx);
+ }
+ break;
+
+ case JITTER:
+ if (c_state == mask) {
+ ad714x_slider_cal_sensor_val(ad714x, idx);
+ ad714x_slider_cal_highest_stage(ad714x, idx);
+ ad714x_slider_cal_abs_pos(ad714x, idx);
+ sw->flt_pos = sw->abs_pos;
+ sw->state = ACTIVE;
+ }
+ break;
+
+ case ACTIVE:
+ if (c_state == mask) {
+ if (h_state) {
+ ad714x_slider_cal_sensor_val(ad714x, idx);
+ ad714x_slider_cal_highest_stage(ad714x, idx);
+ ad714x_slider_cal_abs_pos(ad714x, idx);
+ ad714x_slider_cal_flt_pos(ad714x, idx);
+
+ input_report_abs(sw->input, ABS_X, sw->flt_pos);
+ input_report_key(sw->input, BTN_TOUCH, 1);
+ } else {
+ /* When the user lifts off the sensor, configure
+ * the AD714X back to threshold interrupt mode.
+ */
+ ad714x_slider_use_thr_int(ad714x, idx);
+ sw->state = IDLE;
+ input_report_key(sw->input, BTN_TOUCH, 0);
+ dev_dbg(ad714x->dev, "slider %d released\n",
+ idx);
+ }
+ input_sync(sw->input);
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * When the scroll wheel is activated, we compute the absolute position based
+ * on the sensor values. To calculate the position, we first determine the
+ * sensor that has the greatest response among the 8 sensors that constitutes
+ * the scrollwheel. Then we determined the 2 sensors on either sides of the
+ * sensor with the highest response and we apply weights to these sensors.
+ */
+static void ad714x_wheel_cal_highest_stage(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx];
+ struct ad714x_wheel_drv *sw = &ad714x->sw->wheel[idx];
+
+ sw->pre_highest_stage = sw->highest_stage;
+ sw->highest_stage = ad714x_cal_highest_stage(ad714x, hw->start_stage,
+ hw->end_stage);
+
+ dev_dbg(ad714x->dev, "wheel %d highest_stage:%d\n", idx,
+ sw->highest_stage);
+}
+
+static void ad714x_wheel_cal_sensor_val(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx];
+ int i;
+
+ for (i = hw->start_stage; i <= hw->end_stage; i++) {
+ ad714x->read(ad714x->dev, CDC_RESULT_S0 + i,
+ &ad714x->adc_reg[i]);
+ ad714x->read(ad714x->dev,
+ STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
+ &ad714x->amb_reg[i]);
+ if (ad714x->adc_reg[i] > ad714x->amb_reg[i])
+ ad714x->sensor_val[i] = ad714x->adc_reg[i] -
+ ad714x->amb_reg[i];
+ else
+ ad714x->sensor_val[i] = 0;
+ }
+}
+
+/*
+ * When the scroll wheel is activated, we compute the absolute position based
+ * on the sensor values. To calculate the position, we first determine the
+ * sensor that has the greatest response among the 8 sensors that constitutes
+ * the scrollwheel. Then we determined the 2 sensors on either sides of the
+ * sensor with the highest response and we apply weights to these sensors. The
+ * result of this computation gives us the mean value which defined by the
+ * following formula:
+ * For i= second_before_highest_stage to i= second_after_highest_stage
+ * v += Sensor response(i)*WEIGHT*(i+3)
+ * w += Sensor response(i)
+ * Mean_Value=v/w
+ * pos_on_scrollwheel = (Mean_Value - position_offset) / position_ratio
+ */
+
+#define WEIGHT_FACTOR 30
+/* This constant prevents the "PositionOffset" from reaching a big value */
+#define OFFSET_POSITION_CLAMP 120
+static void ad714x_wheel_cal_abs_pos(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx];
+ struct ad714x_wheel_drv *sw = &ad714x->sw->wheel[idx];
+ int stage_num = hw->end_stage - hw->start_stage + 1;
+ int second_before, first_before, highest, first_after, second_after;
+ int a_param, b_param;
+
+ /* Calculate Mean value */
+
+ second_before = (sw->highest_stage + stage_num - 2) % stage_num;
+ first_before = (sw->highest_stage + stage_num - 1) % stage_num;
+ highest = sw->highest_stage;
+ first_after = (sw->highest_stage + stage_num + 1) % stage_num;
+ second_after = (sw->highest_stage + stage_num + 2) % stage_num;
+
+ if (((sw->highest_stage - hw->start_stage) > 1) &&
+ ((hw->end_stage - sw->highest_stage) > 1)) {
+ a_param = ad714x->sensor_val[second_before] *
+ (second_before - hw->start_stage + 3) +
+ ad714x->sensor_val[first_before] *
+ (second_before - hw->start_stage + 3) +
+ ad714x->sensor_val[highest] *
+ (second_before - hw->start_stage + 3) +
+ ad714x->sensor_val[first_after] *
+ (first_after - hw->start_stage + 3) +
+ ad714x->sensor_val[second_after] *
+ (second_after - hw->start_stage + 3);
+ } else {
+ a_param = ad714x->sensor_val[second_before] *
+ (second_before - hw->start_stage + 1) +
+ ad714x->sensor_val[first_before] *
+ (second_before - hw->start_stage + 2) +
+ ad714x->sensor_val[highest] *
+ (second_before - hw->start_stage + 3) +
+ ad714x->sensor_val[first_after] *
+ (first_after - hw->start_stage + 4) +
+ ad714x->sensor_val[second_after] *
+ (second_after - hw->start_stage + 5);
+ }
+ a_param *= WEIGHT_FACTOR;
+
+ b_param = ad714x->sensor_val[second_before] +
+ ad714x->sensor_val[first_before] +
+ ad714x->sensor_val[highest] +
+ ad714x->sensor_val[first_after] +
+ ad714x->sensor_val[second_after];
+
+ sw->pre_mean_value = sw->mean_value;
+ sw->mean_value = a_param / b_param;
+
+ /* Calculate the offset */
+
+ if ((sw->pre_highest_stage == hw->end_stage) &&
+ (sw->highest_stage == hw->start_stage))
+ sw->pos_offset = sw->mean_value;
+ else if ((sw->pre_highest_stage == hw->start_stage) &&
+ (sw->highest_stage == hw->end_stage))
+ sw->pos_offset = sw->pre_mean_value;
+
+ if (sw->pos_offset > OFFSET_POSITION_CLAMP)
+ sw->pos_offset = OFFSET_POSITION_CLAMP;
+
+ /* Calculate the mean value without the offset */
+
+ sw->pre_mean_value_no_offset = sw->mean_value_no_offset;
+ sw->mean_value_no_offset = sw->mean_value - sw->pos_offset;
+ if (sw->mean_value_no_offset < 0)
+ sw->mean_value_no_offset = 0;
+
+ /* Calculate ratio to scale down to NUMBER_OF_WANTED_POSITIONS */
+
+ if ((sw->pre_highest_stage == hw->end_stage) &&
+ (sw->highest_stage == hw->start_stage))
+ sw->pos_ratio = (sw->pre_mean_value_no_offset * 100) /
+ hw->max_coord;
+ else if ((sw->pre_highest_stage == hw->start_stage) &&
+ (sw->highest_stage == hw->end_stage))
+ sw->pos_ratio = (sw->mean_value_no_offset * 100) /
+ hw->max_coord;
+ sw->abs_pos = (sw->mean_value_no_offset * 100) / sw->pos_ratio;
+ if (sw->abs_pos > hw->max_coord)
+ sw->abs_pos = hw->max_coord;
+}
+
+static void ad714x_wheel_cal_flt_pos(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx];
+ struct ad714x_wheel_drv *sw = &ad714x->sw->wheel[idx];
+ if (((sw->pre_highest_stage == hw->end_stage) &&
+ (sw->highest_stage == hw->start_stage)) ||
+ ((sw->pre_highest_stage == hw->start_stage) &&
+ (sw->highest_stage == hw->end_stage)))
+ sw->flt_pos = sw->abs_pos;
+ else
+ sw->flt_pos = ((sw->flt_pos * 30) + (sw->abs_pos * 71)) / 100;
+
+ if (sw->flt_pos > hw->max_coord)
+ sw->flt_pos = hw->max_coord;
+}
+
+static void ad714x_wheel_use_com_int(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx];
+
+ ad714x_use_com_int(ad714x, hw->start_stage, hw->end_stage);
+}
+
+static void ad714x_wheel_use_thr_int(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx];
+
+ ad714x_use_thr_int(ad714x, hw->start_stage, hw->end_stage);
+}
+
+static void ad714x_wheel_state_machine(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx];
+ struct ad714x_wheel_drv *sw = &ad714x->sw->wheel[idx];
+ unsigned short h_state, c_state;
+ unsigned short mask;
+
+ mask = ((1 << (hw->end_stage + 1)) - 1) - ((1 << hw->start_stage) - 1);
+
+ h_state = ad714x->h_state & mask;
+ c_state = ad714x->c_state & mask;
+
+ switch (sw->state) {
+ case IDLE:
+ if (h_state) {
+ sw->state = JITTER;
+ /* In End of Conversion interrupt mode, the AD714X
+ * continuously generates hardware interrupts.
+ */
+ ad714x_wheel_use_com_int(ad714x, idx);
+ dev_dbg(ad714x->dev, "wheel %d touched\n", idx);
+ }
+ break;
+
+ case JITTER:
+ if (c_state == mask) {
+ ad714x_wheel_cal_sensor_val(ad714x, idx);
+ ad714x_wheel_cal_highest_stage(ad714x, idx);
+ ad714x_wheel_cal_abs_pos(ad714x, idx);
+ sw->flt_pos = sw->abs_pos;
+ sw->state = ACTIVE;
+ }
+ break;
+
+ case ACTIVE:
+ if (c_state == mask) {
+ if (h_state) {
+ ad714x_wheel_cal_sensor_val(ad714x, idx);
+ ad714x_wheel_cal_highest_stage(ad714x, idx);
+ ad714x_wheel_cal_abs_pos(ad714x, idx);
+ ad714x_wheel_cal_flt_pos(ad714x, idx);
+
+ input_report_abs(sw->input, ABS_WHEEL,
+ sw->abs_pos);
+ input_report_key(sw->input, BTN_TOUCH, 1);
+ } else {
+ /* When the user lifts off the sensor, configure
+ * the AD714X back to threshold interrupt mode.
+ */
+ ad714x_wheel_use_thr_int(ad714x, idx);
+ sw->state = IDLE;
+ input_report_key(sw->input, BTN_TOUCH, 0);
+
+ dev_dbg(ad714x->dev, "wheel %d released\n",
+ idx);
+ }
+ input_sync(sw->input);
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void touchpad_cal_sensor_val(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];
+ int i;
+
+ for (i = hw->x_start_stage; i <= hw->x_end_stage; i++) {
+ ad714x->read(ad714x->dev, CDC_RESULT_S0 + i,
+ &ad714x->adc_reg[i]);
+ ad714x->read(ad714x->dev,
+ STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
+ &ad714x->amb_reg[i]);
+ if (ad714x->adc_reg[i] > ad714x->amb_reg[i])
+ ad714x->sensor_val[i] = ad714x->adc_reg[i] -
+ ad714x->amb_reg[i];
+ else
+ ad714x->sensor_val[i] = 0;
+ }
+}
+
+static void touchpad_cal_highest_stage(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];
+ struct ad714x_touchpad_drv *sw = &ad714x->sw->touchpad[idx];
+
+ sw->x_highest_stage = ad714x_cal_highest_stage(ad714x,
+ hw->x_start_stage, hw->x_end_stage);
+ sw->y_highest_stage = ad714x_cal_highest_stage(ad714x,
+ hw->y_start_stage, hw->y_end_stage);
+
+ dev_dbg(ad714x->dev,
+ "touchpad %d x_highest_stage:%d, y_highest_stage:%d\n",
+ idx, sw->x_highest_stage, sw->y_highest_stage);
+}
+
+/*
+ * If 2 fingers are touching the sensor then 2 peaks can be observed in the
+ * distribution.
+ * The arithmetic doesn't support to get absolute coordinates for multi-touch
+ * yet.
+ */
+static int touchpad_check_second_peak(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];
+ struct ad714x_touchpad_drv *sw = &ad714x->sw->touchpad[idx];
+ int i;
+
+ for (i = hw->x_start_stage; i < sw->x_highest_stage; i++) {
+ if ((ad714x->sensor_val[i] - ad714x->sensor_val[i + 1])
+ > (ad714x->sensor_val[i + 1] / 10))
+ return 1;
+ }
+
+ for (i = sw->x_highest_stage; i < hw->x_end_stage; i++) {
+ if ((ad714x->sensor_val[i + 1] - ad714x->sensor_val[i])
+ > (ad714x->sensor_val[i] / 10))
+ return 1;
+ }
+
+ for (i = hw->y_start_stage; i < sw->y_highest_stage; i++) {
+ if ((ad714x->sensor_val[i] - ad714x->sensor_val[i + 1])
+ > (ad714x->sensor_val[i + 1] / 10))
+ return 1;
+ }
+
+ for (i = sw->y_highest_stage; i < hw->y_end_stage; i++) {
+ if ((ad714x->sensor_val[i + 1] - ad714x->sensor_val[i])
+ > (ad714x->sensor_val[i] / 10))
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * If only one finger is used to activate the touch pad then only 1 peak will be
+ * registered in the distribution. This peak and the 2 adjacent sensors will be
+ * used in the calculation of the absolute position. This will prevent hand
+ * shadows to affect the absolute position calculation.
+ */
+static void touchpad_cal_abs_pos(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];
+ struct ad714x_touchpad_drv *sw = &ad714x->sw->touchpad[idx];
+
+ sw->x_abs_pos = ad714x_cal_abs_pos(ad714x, hw->x_start_stage,
+ hw->x_end_stage, sw->x_highest_stage, hw->x_max_coord);
+ sw->y_abs_pos = ad714x_cal_abs_pos(ad714x, hw->y_start_stage,
+ hw->y_end_stage, sw->y_highest_stage, hw->y_max_coord);
+
+ dev_dbg(ad714x->dev, "touchpad %d absolute position:(%d, %d)\n", idx,
+ sw->x_abs_pos, sw->y_abs_pos);
+}
+
+static void touchpad_cal_flt_pos(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_touchpad_drv *sw = &ad714x->sw->touchpad[idx];
+
+ sw->x_flt_pos = (sw->x_flt_pos * (10 - 4) +
+ sw->x_abs_pos * 4)/10;
+ sw->y_flt_pos = (sw->y_flt_pos * (10 - 4) +
+ sw->y_abs_pos * 4)/10;
+
+ dev_dbg(ad714x->dev, "touchpad %d filter position:(%d, %d)\n",
+ idx, sw->x_flt_pos, sw->y_flt_pos);
+}
+
+/*
+ * To prevent distortion from showing in the absolute position, it is
+ * necessary to detect the end points. When endpoints are detected, the
+ * driver stops updating the status variables with absolute positions.
+ * End points are detected on the 4 edges of the touchpad sensor. The
+ * method to detect them is the same for all 4.
+ * To detect the end points, the firmware computes the difference in
+ * percent between the sensor on the edge and the adjacent one. The
+ * difference is calculated in percent in order to make the end point
+ * detection independent of the pressure.
+ */
+
+#define LEFT_END_POINT_DETECTION_LEVEL 550
+#define RIGHT_END_POINT_DETECTION_LEVEL 750
+#define LEFT_RIGHT_END_POINT_DEAVTIVALION_LEVEL 850
+#define TOP_END_POINT_DETECTION_LEVEL 550
+#define BOTTOM_END_POINT_DETECTION_LEVEL 950
+#define TOP_BOTTOM_END_POINT_DEAVTIVALION_LEVEL 700
+static int touchpad_check_endpoint(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];
+ struct ad714x_touchpad_drv *sw = &ad714x->sw->touchpad[idx];
+ int percent_sensor_diff;
+
+ /* left endpoint detect */
+ percent_sensor_diff = (ad714x->sensor_val[hw->x_start_stage] -
+ ad714x->sensor_val[hw->x_start_stage + 1]) * 100 /
+ ad714x->sensor_val[hw->x_start_stage + 1];
+ if (!sw->left_ep) {
+ if (percent_sensor_diff >= LEFT_END_POINT_DETECTION_LEVEL) {
+ sw->left_ep = 1;
+ sw->left_ep_val =
+ ad714x->sensor_val[hw->x_start_stage + 1];
+ }
+ } else {
+ if ((percent_sensor_diff < LEFT_END_POINT_DETECTION_LEVEL) &&
+ (ad714x->sensor_val[hw->x_start_stage + 1] >
+ LEFT_RIGHT_END_POINT_DEAVTIVALION_LEVEL + sw->left_ep_val))
+ sw->left_ep = 0;
+ }
+
+ /* right endpoint detect */
+ percent_sensor_diff = (ad714x->sensor_val[hw->x_end_stage] -
+ ad714x->sensor_val[hw->x_end_stage - 1]) * 100 /
+ ad714x->sensor_val[hw->x_end_stage - 1];
+ if (!sw->right_ep) {
+ if (percent_sensor_diff >= RIGHT_END_POINT_DETECTION_LEVEL) {
+ sw->right_ep = 1;
+ sw->right_ep_val =
+ ad714x->sensor_val[hw->x_end_stage - 1];
+ }
+ } else {
+ if ((percent_sensor_diff < RIGHT_END_POINT_DETECTION_LEVEL) &&
+ (ad714x->sensor_val[hw->x_end_stage - 1] >
+ LEFT_RIGHT_END_POINT_DEAVTIVALION_LEVEL + sw->right_ep_val))
+ sw->right_ep = 0;
+ }
+
+ /* top endpoint detect */
+ percent_sensor_diff = (ad714x->sensor_val[hw->y_start_stage] -
+ ad714x->sensor_val[hw->y_start_stage + 1]) * 100 /
+ ad714x->sensor_val[hw->y_start_stage + 1];
+ if (!sw->top_ep) {
+ if (percent_sensor_diff >= TOP_END_POINT_DETECTION_LEVEL) {
+ sw->top_ep = 1;
+ sw->top_ep_val =
+ ad714x->sensor_val[hw->y_start_stage + 1];
+ }
+ } else {
+ if ((percent_sensor_diff < TOP_END_POINT_DETECTION_LEVEL) &&
+ (ad714x->sensor_val[hw->y_start_stage + 1] >
+ TOP_BOTTOM_END_POINT_DEAVTIVALION_LEVEL + sw->top_ep_val))
+ sw->top_ep = 0;
+ }
+
+ /* bottom endpoint detect */
+ percent_sensor_diff = (ad714x->sensor_val[hw->y_end_stage] -
+ ad714x->sensor_val[hw->y_end_stage - 1]) * 100 /
+ ad714x->sensor_val[hw->y_end_stage - 1];
+ if (!sw->bottom_ep) {
+ if (percent_sensor_diff >= BOTTOM_END_POINT_DETECTION_LEVEL) {
+ sw->bottom_ep = 1;
+ sw->bottom_ep_val =
+ ad714x->sensor_val[hw->y_end_stage - 1];
+ }
+ } else {
+ if ((percent_sensor_diff < BOTTOM_END_POINT_DETECTION_LEVEL) &&
+ (ad714x->sensor_val[hw->y_end_stage - 1] >
+ TOP_BOTTOM_END_POINT_DEAVTIVALION_LEVEL + sw->bottom_ep_val))
+ sw->bottom_ep = 0;
+ }
+
+ return sw->left_ep || sw->right_ep || sw->top_ep || sw->bottom_ep;
+}
+
+static void touchpad_use_com_int(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];
+
+ ad714x_use_com_int(ad714x, hw->x_start_stage, hw->x_end_stage);
+}
+
+static void touchpad_use_thr_int(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];
+
+ ad714x_use_thr_int(ad714x, hw->x_start_stage, hw->x_end_stage);
+ ad714x_use_thr_int(ad714x, hw->y_start_stage, hw->y_end_stage);
+}
+
+static void ad714x_touchpad_state_machine(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];
+ struct ad714x_touchpad_drv *sw = &ad714x->sw->touchpad[idx];
+ unsigned short h_state, c_state;
+ unsigned short mask;
+
+ mask = (((1 << (hw->x_end_stage + 1)) - 1) -
+ ((1 << hw->x_start_stage) - 1)) +
+ (((1 << (hw->y_end_stage + 1)) - 1) -
+ ((1 << hw->y_start_stage) - 1));
+
+ h_state = ad714x->h_state & mask;
+ c_state = ad714x->c_state & mask;
+
+ switch (sw->state) {
+ case IDLE:
+ if (h_state) {
+ sw->state = JITTER;
+ /* In End of Conversion interrupt mode, the AD714X
+ * continuously generates hardware interrupts.
+ */
+ touchpad_use_com_int(ad714x, idx);
+ dev_dbg(ad714x->dev, "touchpad %d touched\n", idx);
+ }
+ break;
+
+ case JITTER:
+ if (c_state == mask) {
+ touchpad_cal_sensor_val(ad714x, idx);
+ touchpad_cal_highest_stage(ad714x, idx);
+ if ((!touchpad_check_second_peak(ad714x, idx)) &&
+ (!touchpad_check_endpoint(ad714x, idx))) {
+ dev_dbg(ad714x->dev,
+ "touchpad%d, 2 fingers or endpoint\n",
+ idx);
+ touchpad_cal_abs_pos(ad714x, idx);
+ sw->x_flt_pos = sw->x_abs_pos;
+ sw->y_flt_pos = sw->y_abs_pos;
+ sw->state = ACTIVE;
+ }
+ }
+ break;
+
+ case ACTIVE:
+ if (c_state == mask) {
+ if (h_state) {
+ touchpad_cal_sensor_val(ad714x, idx);
+ touchpad_cal_highest_stage(ad714x, idx);
+ if ((!touchpad_check_second_peak(ad714x, idx))
+ && (!touchpad_check_endpoint(ad714x, idx))) {
+ touchpad_cal_abs_pos(ad714x, idx);
+ touchpad_cal_flt_pos(ad714x, idx);
+ input_report_abs(sw->input, ABS_X,
+ sw->x_flt_pos);
+ input_report_abs(sw->input, ABS_Y,
+ sw->y_flt_pos);
+ input_report_key(sw->input, BTN_TOUCH,
+ 1);
+ }
+ } else {
+ /* When the user lifts off the sensor, configure
+ * the AD714X back to threshold interrupt mode.
+ */
+ touchpad_use_thr_int(ad714x, idx);
+ sw->state = IDLE;
+ input_report_key(sw->input, BTN_TOUCH, 0);
+ dev_dbg(ad714x->dev, "touchpad %d released\n",
+ idx);
+ }
+ input_sync(sw->input);
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int ad714x_hw_detect(struct ad714x_chip *ad714x)
+{
+ unsigned short data;
+
+ ad714x->read(ad714x->dev, AD714X_PARTID_REG, &data);
+ switch (data & 0xFFF0) {
+ case AD7142_PARTID:
+ ad714x->product = 0x7142;
+ ad714x->version = data & 0xF;
+ dev_info(ad714x->dev, "found AD7142 captouch, rev:%d\n",
+ ad714x->version);
+ return 0;
+
+ case AD7143_PARTID:
+ ad714x->product = 0x7143;
+ ad714x->version = data & 0xF;
+ dev_info(ad714x->dev, "found AD7143 captouch, rev:%d\n",
+ ad714x->version);
+ return 0;
+
+ case AD7147_PARTID:
+ ad714x->product = 0x7147;
+ ad714x->version = data & 0xF;
+ dev_info(ad714x->dev, "found AD7147(A) captouch, rev:%d\n",
+ ad714x->version);
+ return 0;
+
+ case AD7148_PARTID:
+ ad714x->product = 0x7148;
+ ad714x->version = data & 0xF;
+ dev_info(ad714x->dev, "found AD7148 captouch, rev:%d\n",
+ ad714x->version);
+ return 0;
+
+ default:
+ dev_err(ad714x->dev,
+ "fail to detect AD714X captouch, read ID is %04x\n",
+ data);
+ return -ENODEV;
+ }
+}
+
+static void ad714x_hw_init(struct ad714x_chip *ad714x)
+{
+ int i, j;
+ unsigned short reg_base;
+ unsigned short data;
+
+ /* configuration CDC and interrupts */
+
+ for (i = 0; i < STAGE_NUM; i++) {
+ reg_base = AD714X_STAGECFG_REG + i * STAGE_CFGREG_NUM;
+ for (j = 0; j < STAGE_CFGREG_NUM; j++)
+ ad714x->write(ad714x->dev, reg_base + j,
+ ad714x->hw->stage_cfg_reg[i][j]);
+ }
+
+ for (i = 0; i < SYS_CFGREG_NUM; i++)
+ ad714x->write(ad714x->dev, AD714X_SYSCFG_REG + i,
+ ad714x->hw->sys_cfg_reg[i]);
+ for (i = 0; i < SYS_CFGREG_NUM; i++)
+ ad714x->read(ad714x->dev, AD714X_SYSCFG_REG + i,
+ &data);
+
+ ad714x->write(ad714x->dev, AD714X_STG_CAL_EN_REG, 0xFFF);
+
+ /* clear all interrupts */
+ ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &data);
+ ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &data);
+ ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &data);
+}
+
+static irqreturn_t ad714x_interrupt_thread(int irq, void *data)
+{
+ struct ad714x_chip *ad714x = data;
+ int i;
+
+ mutex_lock(&ad714x->mutex);
+
+ ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &ad714x->l_state);
+ ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &ad714x->h_state);
+ ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &ad714x->c_state);
+
+ for (i = 0; i < ad714x->hw->button_num; i++)
+ ad714x_button_state_machine(ad714x, i);
+ for (i = 0; i < ad714x->hw->slider_num; i++)
+ ad714x_slider_state_machine(ad714x, i);
+ for (i = 0; i < ad714x->hw->wheel_num; i++)
+ ad714x_wheel_state_machine(ad714x, i);
+ for (i = 0; i < ad714x->hw->touchpad_num; i++)
+ ad714x_touchpad_state_machine(ad714x, i);
+
+ mutex_unlock(&ad714x->mutex);
+
+ return IRQ_HANDLED;
+}
+
+#define MAX_DEVICE_NUM 8
+struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
+ ad714x_read_t read, ad714x_write_t write)
+{
+ int i, alloc_idx;
+ int error;
+ struct input_dev *input[MAX_DEVICE_NUM];
+
+ struct ad714x_platform_data *plat_data = dev->platform_data;
+ struct ad714x_chip *ad714x;
+ void *drv_mem;
+
+ struct ad714x_button_drv *bt_drv;
+ struct ad714x_slider_drv *sd_drv;
+ struct ad714x_wheel_drv *wl_drv;
+ struct ad714x_touchpad_drv *tp_drv;
+
+
+ if (irq <= 0) {
+ dev_err(dev, "IRQ not configured!\n");
+ error = -EINVAL;
+ goto err_out;
+ }
+
+ if (dev->platform_data == NULL) {
+ dev_err(dev, "platform data for ad714x doesn't exist\n");
+ error = -EINVAL;
+ goto err_out;
+ }
+
+ ad714x = kzalloc(sizeof(*ad714x) + sizeof(*ad714x->sw) +
+ sizeof(*sd_drv) * plat_data->slider_num +
+ sizeof(*wl_drv) * plat_data->wheel_num +
+ sizeof(*tp_drv) * plat_data->touchpad_num +
+ sizeof(*bt_drv) * plat_data->button_num, GFP_KERNEL);
+ if (!ad714x) {
+ error = -ENOMEM;
+ goto err_out;
+ }
+
+ ad714x->hw = plat_data;
+
+ drv_mem = ad714x + 1;
+ ad714x->sw = drv_mem;
+ drv_mem += sizeof(*ad714x->sw);
+ ad714x->sw->slider = sd_drv = drv_mem;
+ drv_mem += sizeof(*sd_drv) * ad714x->hw->slider_num;
+ ad714x->sw->wheel = wl_drv = drv_mem;
+ drv_mem += sizeof(*wl_drv) * ad714x->hw->wheel_num;
+ ad714x->sw->touchpad = tp_drv = drv_mem;
+ drv_mem += sizeof(*tp_drv) * ad714x->hw->touchpad_num;
+ ad714x->sw->button = bt_drv = drv_mem;
+ drv_mem += sizeof(*bt_drv) * ad714x->hw->button_num;
+
+ ad714x->read = read;
+ ad714x->write = write;
+ ad714x->irq = irq;
+ ad714x->dev = dev;
+
+ error = ad714x_hw_detect(ad714x);
+ if (error)
+ goto err_free_mem;
+
+ /* initilize and request sw/hw resources */
+
+ ad714x_hw_init(ad714x);
+ mutex_init(&ad714x->mutex);
+
+ /*
+ * Allocate and register AD714X input device
+ */
+ alloc_idx = 0;
+
+ /* a slider uses one input_dev instance */
+ if (ad714x->hw->slider_num > 0) {
+ struct ad714x_slider_plat *sd_plat = ad714x->hw->slider;
+
+ for (i = 0; i < ad714x->hw->slider_num; i++) {
+ sd_drv[i].input = input[alloc_idx] = input_allocate_device();
+ if (!input[alloc_idx]) {
+ error = -ENOMEM;
+ goto err_free_dev;
+ }
+
+ __set_bit(EV_ABS, input[alloc_idx]->evbit);
+ __set_bit(EV_KEY, input[alloc_idx]->evbit);
+ __set_bit(ABS_X, input[alloc_idx]->absbit);
+ __set_bit(BTN_TOUCH, input[alloc_idx]->keybit);
+ input_set_abs_params(input[alloc_idx],
+ ABS_X, 0, sd_plat->max_coord, 0, 0);
+
+ input[alloc_idx]->id.bustype = bus_type;
+ input[alloc_idx]->id.product = ad714x->product;
+ input[alloc_idx]->id.version = ad714x->version;
+
+ error = input_register_device(input[alloc_idx]);
+ if (error)
+ goto err_free_dev;
+
+ alloc_idx++;
+ }
+ }
+
+ /* a wheel uses one input_dev instance */
+ if (ad714x->hw->wheel_num > 0) {
+ struct ad714x_wheel_plat *wl_plat = ad714x->hw->wheel;
+
+ for (i = 0; i < ad714x->hw->wheel_num; i++) {
+ wl_drv[i].input = input[alloc_idx] = input_allocate_device();
+ if (!input[alloc_idx]) {
+ error = -ENOMEM;
+ goto err_free_dev;
+ }
+
+ __set_bit(EV_KEY, input[alloc_idx]->evbit);
+ __set_bit(EV_ABS, input[alloc_idx]->evbit);
+ __set_bit(ABS_WHEEL, input[alloc_idx]->absbit);
+ __set_bit(BTN_TOUCH, input[alloc_idx]->keybit);
+ input_set_abs_params(input[alloc_idx],
+ ABS_WHEEL, 0, wl_plat->max_coord, 0, 0);
+
+ input[alloc_idx]->id.bustype = bus_type;
+ input[alloc_idx]->id.product = ad714x->product;
+ input[alloc_idx]->id.version = ad714x->version;
+
+ error = input_register_device(input[alloc_idx]);
+ if (error)
+ goto err_free_dev;
+
+ alloc_idx++;
+ }
+ }
+
+ /* a touchpad uses one input_dev instance */
+ if (ad714x->hw->touchpad_num > 0) {
+ struct ad714x_touchpad_plat *tp_plat = ad714x->hw->touchpad;
+
+ for (i = 0; i < ad714x->hw->touchpad_num; i++) {
+ tp_drv[i].input = input[alloc_idx] = input_allocate_device();
+ if (!input[alloc_idx]) {
+ error = -ENOMEM;
+ goto err_free_dev;
+ }
+
+ __set_bit(EV_ABS, input[alloc_idx]->evbit);
+ __set_bit(EV_KEY, input[alloc_idx]->evbit);
+ __set_bit(ABS_X, input[alloc_idx]->absbit);
+ __set_bit(ABS_Y, input[alloc_idx]->absbit);
+ __set_bit(BTN_TOUCH, input[alloc_idx]->keybit);
+ input_set_abs_params(input[alloc_idx],
+ ABS_X, 0, tp_plat->x_max_coord, 0, 0);
+ input_set_abs_params(input[alloc_idx],
+ ABS_Y, 0, tp_plat->y_max_coord, 0, 0);
+
+ input[alloc_idx]->id.bustype = bus_type;
+ input[alloc_idx]->id.product = ad714x->product;
+ input[alloc_idx]->id.version = ad714x->version;
+
+ error = input_register_device(input[alloc_idx]);
+ if (error)
+ goto err_free_dev;
+
+ alloc_idx++;
+ }
+ }
+
+ /* all buttons use one input node */
+ if (ad714x->hw->button_num > 0) {
+ struct ad714x_button_plat *bt_plat = ad714x->hw->button;
+
+ input[alloc_idx] = input_allocate_device();
+ if (!input[alloc_idx]) {
+ error = -ENOMEM;
+ goto err_free_dev;
+ }
+
+ __set_bit(EV_KEY, input[alloc_idx]->evbit);
+ for (i = 0; i < ad714x->hw->button_num; i++) {
+ bt_drv[i].input = input[alloc_idx];
+ __set_bit(bt_plat[i].keycode, input[alloc_idx]->keybit);
+ }
+
+ input[alloc_idx]->id.bustype = bus_type;
+ input[alloc_idx]->id.product = ad714x->product;
+ input[alloc_idx]->id.version = ad714x->version;
+
+ error = input_register_device(input[alloc_idx]);
+ if (error)
+ goto err_free_dev;
+
+ alloc_idx++;
+ }
+
+ error = request_threaded_irq(ad714x->irq, NULL, ad714x_interrupt_thread,
+ IRQF_TRIGGER_FALLING, "ad714x_captouch", ad714x);
+ if (error) {
+ dev_err(dev, "can't allocate irq %d\n", ad714x->irq);
+ goto err_unreg_dev;
+ }
+
+ return ad714x;
+
+ err_free_dev:
+ dev_err(dev, "failed to setup AD714x input device %i\n", alloc_idx);
+ input_free_device(input[alloc_idx]);
+ err_unreg_dev:
+ while (--alloc_idx >= 0)
+ input_unregister_device(input[alloc_idx]);
+ err_free_mem:
+ kfree(ad714x);
+ err_out:
+ return ERR_PTR(error);
+}
+EXPORT_SYMBOL(ad714x_probe);
+
+void ad714x_remove(struct ad714x_chip *ad714x)
+{
+ struct ad714x_platform_data *hw = ad714x->hw;
+ struct ad714x_driver_data *sw = ad714x->sw;
+ int i;
+
+ free_irq(ad714x->irq, ad714x);
+
+ /* unregister and free all input devices */
+
+ for (i = 0; i < hw->slider_num; i++)
+ input_unregister_device(sw->slider[i].input);
+
+ for (i = 0; i < hw->wheel_num; i++)
+ input_unregister_device(sw->wheel[i].input);
+
+ for (i = 0; i < hw->touchpad_num; i++)
+ input_unregister_device(sw->touchpad[i].input);
+
+ if (hw->button_num)
+ input_unregister_device(sw->button[0].input);
+
+ kfree(ad714x);
+}
+EXPORT_SYMBOL(ad714x_remove);
+
+#ifdef CONFIG_PM
+int ad714x_disable(struct ad714x_chip *ad714x)
+{
+ unsigned short data;
+
+ dev_dbg(ad714x->dev, "%s enter\n", __func__);
+
+ mutex_lock(&ad714x->mutex);
+
+ data = ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL] | 0x3;
+ ad714x->write(ad714x->dev, AD714X_PWR_CTRL, data);
+
+ mutex_unlock(&ad714x->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(ad714x_disable);
+
+int ad714x_enable(struct ad714x_chip *ad714x)
+{
+ unsigned short data;
+
+ dev_dbg(ad714x->dev, "%s enter\n", __func__);
+
+ mutex_lock(&ad714x->mutex);
+
+ /* resume to non-shutdown mode */
+
+ ad714x->write(ad714x->dev, AD714X_PWR_CTRL,
+ ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL]);
+
+ /* make sure the interrupt output line is not low level after resume,
+ * otherwise we will get no chance to enter falling-edge irq again
+ */
+
+ ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &data);
+ ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &data);
+ ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &data);
+
+ mutex_unlock(&ad714x->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(ad714x_enable);
+#endif
+
+MODULE_DESCRIPTION("Analog Devices AD714X Capacitance Touch Sensor Driver");
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/ad714x.h b/drivers/input/misc/ad714x.h
new file mode 100644
index 00000000000..45c54fb13f0
--- /dev/null
+++ b/drivers/input/misc/ad714x.h
@@ -0,0 +1,26 @@
+/*
+ * AD714X CapTouch Programmable Controller driver (bus interfaces)
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _AD714X_H_
+#define _AD714X_H_
+
+#include <linux/types.h>
+
+struct device;
+struct ad714x_chip;
+
+typedef int (*ad714x_read_t)(struct device *, unsigned short, unsigned short *);
+typedef int (*ad714x_write_t)(struct device *, unsigned short, unsigned short);
+
+int ad714x_disable(struct ad714x_chip *ad714x);
+int ad714x_enable(struct ad714x_chip *ad714x);
+struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
+ ad714x_read_t read, ad714x_write_t write);
+void ad714x_remove(struct ad714x_chip *ad714x);
+
+#endif
diff --git a/drivers/input/misc/ati_remote.c b/drivers/input/misc/ati_remote.c
index e8bbc619f6d..bce57129afb 100644
--- a/drivers/input/misc/ati_remote.c
+++ b/drivers/input/misc/ati_remote.c
@@ -624,13 +624,13 @@ static void ati_remote_irq_in(struct urb *urb)
static int ati_remote_alloc_buffers(struct usb_device *udev,
struct ati_remote *ati_remote)
{
- ati_remote->inbuf = usb_buffer_alloc(udev, DATA_BUFSIZE, GFP_ATOMIC,
- &ati_remote->inbuf_dma);
+ ati_remote->inbuf = usb_alloc_coherent(udev, DATA_BUFSIZE, GFP_ATOMIC,
+ &ati_remote->inbuf_dma);
if (!ati_remote->inbuf)
return -1;
- ati_remote->outbuf = usb_buffer_alloc(udev, DATA_BUFSIZE, GFP_ATOMIC,
- &ati_remote->outbuf_dma);
+ ati_remote->outbuf = usb_alloc_coherent(udev, DATA_BUFSIZE, GFP_ATOMIC,
+ &ati_remote->outbuf_dma);
if (!ati_remote->outbuf)
return -1;
@@ -653,10 +653,10 @@ static void ati_remote_free_buffers(struct ati_remote *ati_remote)
usb_free_urb(ati_remote->irq_urb);
usb_free_urb(ati_remote->out_urb);
- usb_buffer_free(ati_remote->udev, DATA_BUFSIZE,
+ usb_free_coherent(ati_remote->udev, DATA_BUFSIZE,
ati_remote->inbuf, ati_remote->inbuf_dma);
- usb_buffer_free(ati_remote->udev, DATA_BUFSIZE,
+ usb_free_coherent(ati_remote->udev, DATA_BUFSIZE,
ati_remote->outbuf, ati_remote->outbuf_dma);
}
diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
index 2124b99378b..e148749b585 100644
--- a/drivers/input/misc/ati_remote2.c
+++ b/drivers/input/misc/ati_remote2.c
@@ -589,7 +589,7 @@ static int ati_remote2_urb_init(struct ati_remote2 *ar2)
int i, pipe, maxp;
for (i = 0; i < 2; i++) {
- ar2->buf[i] = usb_buffer_alloc(udev, 4, GFP_KERNEL, &ar2->buf_dma[i]);
+ ar2->buf[i] = usb_alloc_coherent(udev, 4, GFP_KERNEL, &ar2->buf_dma[i]);
if (!ar2->buf[i])
return -ENOMEM;
@@ -617,7 +617,7 @@ static void ati_remote2_urb_cleanup(struct ati_remote2 *ar2)
for (i = 0; i < 2; i++) {
usb_free_urb(ar2->urb[i]);
- usb_buffer_free(ar2->udev, 4, ar2->buf[i], ar2->buf_dma[i]);
+ usb_free_coherent(ar2->udev, 4, ar2->buf[i], ar2->buf_dma[i]);
}
}
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index 86457feccfc..2b0eba6619b 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -102,7 +102,6 @@ struct cm109_dev {
struct cm109_ctl_packet *ctl_data;
dma_addr_t ctl_dma;
struct usb_ctrlrequest *ctl_req;
- dma_addr_t ctl_req_dma;
struct urb *urb_ctl;
/*
* The 3 bitfields below are protected by ctl_submit_lock.
@@ -629,15 +628,13 @@ static const struct usb_device_id cm109_usb_table[] = {
static void cm109_usb_cleanup(struct cm109_dev *dev)
{
- if (dev->ctl_req)
- usb_buffer_free(dev->udev, sizeof(*(dev->ctl_req)),
- dev->ctl_req, dev->ctl_req_dma);
+ kfree(dev->ctl_req);
if (dev->ctl_data)
- usb_buffer_free(dev->udev, USB_PKT_LEN,
- dev->ctl_data, dev->ctl_dma);
+ usb_free_coherent(dev->udev, USB_PKT_LEN,
+ dev->ctl_data, dev->ctl_dma);
if (dev->irq_data)
- usb_buffer_free(dev->udev, USB_PKT_LEN,
- dev->irq_data, dev->irq_dma);
+ usb_free_coherent(dev->udev, USB_PKT_LEN,
+ dev->irq_data, dev->irq_dma);
usb_free_urb(dev->urb_irq); /* parameter validation in core/urb */
usb_free_urb(dev->urb_ctl); /* parameter validation in core/urb */
@@ -686,18 +683,17 @@ static int cm109_usb_probe(struct usb_interface *intf,
goto err_out;
/* allocate usb buffers */
- dev->irq_data = usb_buffer_alloc(udev, USB_PKT_LEN,
- GFP_KERNEL, &dev->irq_dma);
+ dev->irq_data = usb_alloc_coherent(udev, USB_PKT_LEN,
+ GFP_KERNEL, &dev->irq_dma);
if (!dev->irq_data)
goto err_out;
- dev->ctl_data = usb_buffer_alloc(udev, USB_PKT_LEN,
- GFP_KERNEL, &dev->ctl_dma);
+ dev->ctl_data = usb_alloc_coherent(udev, USB_PKT_LEN,
+ GFP_KERNEL, &dev->ctl_dma);
if (!dev->ctl_data)
goto err_out;
- dev->ctl_req = usb_buffer_alloc(udev, sizeof(*(dev->ctl_req)),
- GFP_KERNEL, &dev->ctl_req_dma);
+ dev->ctl_req = kmalloc(sizeof(*(dev->ctl_req)), GFP_KERNEL);
if (!dev->ctl_req)
goto err_out;
@@ -735,10 +731,8 @@ static int cm109_usb_probe(struct usb_interface *intf,
usb_fill_control_urb(dev->urb_ctl, udev, usb_sndctrlpipe(udev, 0),
(void *)dev->ctl_req, dev->ctl_data, USB_PKT_LEN,
cm109_urb_ctl_callback, dev);
- dev->urb_ctl->setup_dma = dev->ctl_req_dma;
dev->urb_ctl->transfer_dma = dev->ctl_dma;
- dev->urb_ctl->transfer_flags |= URB_NO_SETUP_DMA_MAP |
- URB_NO_TRANSFER_DMA_MAP;
+ dev->urb_ctl->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
dev->urb_ctl->dev = udev;
/* find out the physical bus location */
diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c
index ad730e15afc..e00a1cc79c0 100644
--- a/drivers/input/misc/hp_sdc_rtc.c
+++ b/drivers/input/misc/hp_sdc_rtc.c
@@ -43,6 +43,7 @@
#include <linux/proc_fs.h>
#include <linux/poll.h>
#include <linux/rtc.h>
+#include <linux/smp_lock.h>
#include <linux/semaphore.h>
MODULE_AUTHOR("Brian S. Julin <bri@calyx.com>");
@@ -64,8 +65,8 @@ static DECLARE_WAIT_QUEUE_HEAD(hp_sdc_rtc_wait);
static ssize_t hp_sdc_rtc_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos);
-static int hp_sdc_rtc_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg);
+static long hp_sdc_rtc_unlocked_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg);
static unsigned int hp_sdc_rtc_poll(struct file *file, poll_table *wait);
@@ -512,7 +513,7 @@ static int hp_sdc_rtc_read_proc(char *page, char **start, off_t off,
return len;
}
-static int hp_sdc_rtc_ioctl(struct inode *inode, struct file *file,
+static int hp_sdc_rtc_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
#if 1
@@ -659,14 +660,27 @@ static int hp_sdc_rtc_ioctl(struct inode *inode, struct file *file,
#endif
}
+static long hp_sdc_rtc_unlocked_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = hp_sdc_rtc_ioctl(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
+
static const struct file_operations hp_sdc_rtc_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = hp_sdc_rtc_read,
- .poll = hp_sdc_rtc_poll,
- .ioctl = hp_sdc_rtc_ioctl,
- .open = hp_sdc_rtc_open,
- .fasync = hp_sdc_rtc_fasync,
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = hp_sdc_rtc_read,
+ .poll = hp_sdc_rtc_poll,
+ .unlocked_ioctl = hp_sdc_rtc_ioctl,
+ .open = hp_sdc_rtc_open,
+ .fasync = hp_sdc_rtc_fasync,
};
static struct miscdevice hp_sdc_rtc_dev = {
diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c
index 86afdd1fdf9..a93c525475c 100644
--- a/drivers/input/misc/keyspan_remote.c
+++ b/drivers/input/misc/keyspan_remote.c
@@ -464,7 +464,7 @@ static int keyspan_probe(struct usb_interface *interface, const struct usb_devic
remote->in_endpoint = endpoint;
remote->toggle = -1; /* Set to -1 so we will always not match the toggle from the first remote message. */
- remote->in_buffer = usb_buffer_alloc(udev, RECV_SIZE, GFP_ATOMIC, &remote->in_dma);
+ remote->in_buffer = usb_alloc_coherent(udev, RECV_SIZE, GFP_ATOMIC, &remote->in_dma);
if (!remote->in_buffer) {
error = -ENOMEM;
goto fail1;
@@ -543,7 +543,7 @@ static int keyspan_probe(struct usb_interface *interface, const struct usb_devic
return 0;
fail3: usb_free_urb(remote->irq_urb);
- fail2: usb_buffer_free(udev, RECV_SIZE, remote->in_buffer, remote->in_dma);
+ fail2: usb_free_coherent(udev, RECV_SIZE, remote->in_buffer, remote->in_dma);
fail1: kfree(remote);
input_free_device(input_dev);
@@ -564,7 +564,7 @@ static void keyspan_disconnect(struct usb_interface *interface)
input_unregister_device(remote->input);
usb_kill_urb(remote->irq_urb);
usb_free_urb(remote->irq_urb);
- usb_buffer_free(remote->udev, RECV_SIZE, remote->in_buffer, remote->in_dma);
+ usb_free_coherent(remote->udev, RECV_SIZE, remote->in_buffer, remote->in_dma);
kfree(remote);
}
}
diff --git a/drivers/input/misc/max8925_onkey.c b/drivers/input/misc/max8925_onkey.c
new file mode 100644
index 00000000000..80af4460801
--- /dev/null
+++ b/drivers/input/misc/max8925_onkey.c
@@ -0,0 +1,148 @@
+/**
+ * max8925_onkey.c - MAX8925 ONKEY driver
+ *
+ * Copyright (C) 2009 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/max8925.h>
+#include <linux/slab.h>
+
+#define HARDRESET_EN (1 << 7)
+#define PWREN_EN (1 << 7)
+
+struct max8925_onkey_info {
+ struct input_dev *idev;
+ struct i2c_client *i2c;
+ int irq;
+};
+
+/*
+ * MAX8925 gives us an interrupt when ONKEY is held for 3 seconds.
+ * max8925_set_bits() operates I2C bus and may sleep. So implement
+ * it in thread IRQ handler.
+ */
+static irqreturn_t max8925_onkey_handler(int irq, void *data)
+{
+ struct max8925_onkey_info *info = data;
+
+ input_report_key(info->idev, KEY_POWER, 1);
+ input_sync(info->idev);
+
+ /* Enable hardreset to halt if system isn't shutdown on time */
+ max8925_set_bits(info->i2c, MAX8925_SYSENSEL,
+ HARDRESET_EN, HARDRESET_EN);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit max8925_onkey_probe(struct platform_device *pdev)
+{
+ struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
+ struct max8925_onkey_info *info;
+ int error;
+
+ info = kzalloc(sizeof(struct max8925_onkey_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->i2c = chip->i2c;
+ info->irq = chip->irq_base + MAX8925_IRQ_GPM_SW_3SEC;
+
+ info->idev = input_allocate_device();
+ if (!info->idev) {
+ dev_err(chip->dev, "Failed to allocate input dev\n");
+ error = -ENOMEM;
+ goto out_input;
+ }
+
+ info->idev->name = "max8925_on";
+ info->idev->phys = "max8925_on/input0";
+ info->idev->id.bustype = BUS_I2C;
+ info->idev->dev.parent = &pdev->dev;
+ info->idev->evbit[0] = BIT_MASK(EV_KEY);
+ info->idev->keybit[BIT_WORD(KEY_POWER)] = BIT_MASK(KEY_POWER);
+
+ error = request_threaded_irq(info->irq, NULL, max8925_onkey_handler,
+ IRQF_ONESHOT, "onkey", info);
+ if (error < 0) {
+ dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n",
+ info->irq, error);
+ goto out_irq;
+ }
+
+ error = input_register_device(info->idev);
+ if (error) {
+ dev_err(chip->dev, "Can't register input device: %d\n", error);
+ goto out;
+ }
+
+ platform_set_drvdata(pdev, info);
+
+ return 0;
+
+out:
+ free_irq(info->irq, info);
+out_irq:
+ input_free_device(info->idev);
+out_input:
+ kfree(info);
+ return error;
+}
+
+static int __devexit max8925_onkey_remove(struct platform_device *pdev)
+{
+ struct max8925_onkey_info *info = platform_get_drvdata(pdev);
+
+ free_irq(info->irq, info);
+ input_unregister_device(info->idev);
+ kfree(info);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver max8925_onkey_driver = {
+ .driver = {
+ .name = "max8925-onkey",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8925_onkey_probe,
+ .remove = __devexit_p(max8925_onkey_remove),
+};
+
+static int __init max8925_onkey_init(void)
+{
+ return platform_driver_register(&max8925_onkey_driver);
+}
+module_init(max8925_onkey_init);
+
+static void __exit max8925_onkey_exit(void)
+{
+ platform_driver_unregister(&max8925_onkey_driver);
+}
+module_exit(max8925_onkey_exit);
+
+MODULE_DESCRIPTION("Maxim MAX8925 ONKEY driver");
+MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c
new file mode 100644
index 00000000000..5c3ac4e0b05
--- /dev/null
+++ b/drivers/input/misc/pcf8574_keypad.c
@@ -0,0 +1,227 @@
+/*
+ * Driver for a keypad w/16 buttons connected to a PCF8574 I2C I/O expander
+ *
+ * Copyright 2005-2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#define DRV_NAME "pcf8574_keypad"
+
+static const unsigned char pcf8574_kp_btncode[] = {
+ [0] = KEY_RESERVED,
+ [1] = KEY_ENTER,
+ [2] = KEY_BACKSLASH,
+ [3] = KEY_0,
+ [4] = KEY_RIGHTBRACE,
+ [5] = KEY_C,
+ [6] = KEY_9,
+ [7] = KEY_8,
+ [8] = KEY_7,
+ [9] = KEY_B,
+ [10] = KEY_6,
+ [11] = KEY_5,
+ [12] = KEY_4,
+ [13] = KEY_A,
+ [14] = KEY_3,
+ [15] = KEY_2,
+ [16] = KEY_1
+};
+
+struct kp_data {
+ unsigned short btncode[ARRAY_SIZE(pcf8574_kp_btncode)];
+ struct input_dev *idev;
+ struct i2c_client *client;
+ char name[64];
+ char phys[32];
+ unsigned char laststate;
+};
+
+static short read_state(struct kp_data *lp)
+{
+ unsigned char x, y, a, b;
+
+ i2c_smbus_write_byte(lp->client, 240);
+ x = 0xF & (~(i2c_smbus_read_byte(lp->client) >> 4));
+
+ i2c_smbus_write_byte(lp->client, 15);
+ y = 0xF & (~i2c_smbus_read_byte(lp->client));
+
+ for (a = 0; x > 0; a++)
+ x = x >> 1;
+ for (b = 0; y > 0; b++)
+ y = y >> 1;
+
+ return ((a - 1) * 4) + b;
+}
+
+static irqreturn_t pcf8574_kp_irq_handler(int irq, void *dev_id)
+{
+ struct kp_data *lp = dev_id;
+ unsigned char nextstate = read_state(lp);
+
+ if (lp->laststate != nextstate) {
+ int key_down = nextstate <= ARRAY_SIZE(lp->btncode);
+ unsigned short keycode = key_down ?
+ lp->btncode[nextstate] : lp->btncode[lp->laststate];
+
+ input_report_key(lp->idev, keycode, key_down);
+ input_sync(lp->idev);
+
+ lp->laststate = nextstate;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit pcf8574_kp_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ int i, ret;
+ struct input_dev *idev;
+ struct kp_data *lp;
+
+ if (i2c_smbus_write_byte(client, 240) < 0) {
+ dev_err(&client->dev, "probe: write fail\n");
+ return -ENODEV;
+ }
+
+ lp = kzalloc(sizeof(*lp), GFP_KERNEL);
+ if (!lp)
+ return -ENOMEM;
+
+ idev = input_allocate_device();
+ if (!idev) {
+ dev_err(&client->dev, "Can't allocate input device\n");
+ ret = -ENOMEM;
+ goto fail_allocate;
+ }
+
+ lp->idev = idev;
+ lp->client = client;
+
+ idev->evbit[0] = BIT_MASK(EV_KEY);
+ idev->keycode = lp->btncode;
+ idev->keycodesize = sizeof(lp->btncode[0]);
+ idev->keycodemax = ARRAY_SIZE(lp->btncode);
+
+ for (i = 0; i < ARRAY_SIZE(pcf8574_kp_btncode); i++) {
+ lp->btncode[i] = pcf8574_kp_btncode[i];
+ __set_bit(lp->btncode[i] & KEY_MAX, idev->keybit);
+ }
+
+ sprintf(lp->name, DRV_NAME);
+ sprintf(lp->phys, "kp_data/input0");
+
+ idev->name = lp->name;
+ idev->phys = lp->phys;
+ idev->id.bustype = BUS_I2C;
+ idev->id.vendor = 0x0001;
+ idev->id.product = 0x0001;
+ idev->id.version = 0x0100;
+
+ input_set_drvdata(idev, lp);
+
+ ret = input_register_device(idev);
+ if (ret) {
+ dev_err(&client->dev, "input_register_device() failed\n");
+ goto fail_register;
+ }
+
+ lp->laststate = read_state(lp);
+
+ ret = request_threaded_irq(client->irq, NULL, pcf8574_kp_irq_handler,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ DRV_NAME, lp);
+ if (ret) {
+ dev_err(&client->dev, "IRQ %d is not free\n", client->irq);
+ goto fail_irq;
+ }
+
+ i2c_set_clientdata(client, lp);
+ return 0;
+
+ fail_irq:
+ input_unregister_device(idev);
+ fail_register:
+ input_set_drvdata(idev, NULL);
+ input_free_device(idev);
+ fail_allocate:
+ kfree(lp);
+
+ return ret;
+}
+
+static int __devexit pcf8574_kp_remove(struct i2c_client *client)
+{
+ struct kp_data *lp = i2c_get_clientdata(client);
+
+ free_irq(client->irq, lp);
+
+ input_unregister_device(lp->idev);
+ kfree(lp);
+
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int pcf8574_kp_resume(struct i2c_client *client)
+{
+ enable_irq(client->irq);
+
+ return 0;
+}
+
+static int pcf8574_kp_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ disable_irq(client->irq);
+
+ return 0;
+}
+#else
+# define pcf8574_kp_resume NULL
+# define pcf8574_kp_suspend NULL
+#endif
+
+static const struct i2c_device_id pcf8574_kp_id[] = {
+ { DRV_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, pcf8574_kp_id);
+
+static struct i2c_driver pcf8574_kp_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = pcf8574_kp_probe,
+ .remove = __devexit_p(pcf8574_kp_remove),
+ .suspend = pcf8574_kp_suspend,
+ .resume = pcf8574_kp_resume,
+ .id_table = pcf8574_kp_id,
+};
+
+static int __init pcf8574_kp_init(void)
+{
+ return i2c_add_driver(&pcf8574_kp_driver);
+}
+module_init(pcf8574_kp_init);
+
+static void __exit pcf8574_kp_exit(void)
+{
+ i2c_del_driver(&pcf8574_kp_driver);
+}
+module_exit(pcf8574_kp_exit);
+
+MODULE_AUTHOR("Michael Hennerich");
+MODULE_DESCRIPTION("Keypad input driver for 16 keys connected to PCF8574");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index ea4e1fd1265..f080dd31499 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -30,7 +30,7 @@ MODULE_ALIAS("platform:pcspkr");
#include <asm/i8253.h>
#else
#include <asm/8253pit.h>
-static DEFINE_SPINLOCK(i8253_lock);
+static DEFINE_RAW_SPINLOCK(i8253_lock);
#endif
static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
@@ -50,7 +50,7 @@ static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int c
if (value > 20 && value < 32767)
count = PIT_TICK_RATE / value;
- spin_lock_irqsave(&i8253_lock, flags);
+ raw_spin_lock_irqsave(&i8253_lock, flags);
if (count) {
/* set command for counter 2, 2 byte write */
@@ -65,7 +65,7 @@ static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int c
outb(inb_p(0x61) & 0xFC, 0x61);
}
- spin_unlock_irqrestore(&i8253_lock, flags);
+ raw_spin_unlock_irqrestore(&i8253_lock, flags);
return 0;
}
diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
index 668913d1204..bf170f6b442 100644
--- a/drivers/input/misc/powermate.c
+++ b/drivers/input/misc/powermate.c
@@ -64,7 +64,6 @@ struct powermate_device {
dma_addr_t data_dma;
struct urb *irq, *config;
struct usb_ctrlrequest *configcr;
- dma_addr_t configcr_dma;
struct usb_device *udev;
struct input_dev *input;
spinlock_t lock;
@@ -182,8 +181,6 @@ static void powermate_sync_state(struct powermate_device *pm)
usb_fill_control_urb(pm->config, pm->udev, usb_sndctrlpipe(pm->udev, 0),
(void *) pm->configcr, NULL, 0,
powermate_config_complete, pm);
- pm->config->setup_dma = pm->configcr_dma;
- pm->config->transfer_flags |= URB_NO_SETUP_DMA_MAP;
if (usb_submit_urb(pm->config, GFP_ATOMIC))
printk(KERN_ERR "powermate: usb_submit_urb(config) failed");
@@ -276,13 +273,12 @@ static int powermate_input_event(struct input_dev *dev, unsigned int type, unsig
static int powermate_alloc_buffers(struct usb_device *udev, struct powermate_device *pm)
{
- pm->data = usb_buffer_alloc(udev, POWERMATE_PAYLOAD_SIZE_MAX,
- GFP_ATOMIC, &pm->data_dma);
+ pm->data = usb_alloc_coherent(udev, POWERMATE_PAYLOAD_SIZE_MAX,
+ GFP_ATOMIC, &pm->data_dma);
if (!pm->data)
return -1;
- pm->configcr = usb_buffer_alloc(udev, sizeof(*(pm->configcr)),
- GFP_ATOMIC, &pm->configcr_dma);
+ pm->configcr = kmalloc(sizeof(*(pm->configcr)), GFP_KERNEL);
if (!pm->configcr)
return -1;
@@ -291,10 +287,9 @@ static int powermate_alloc_buffers(struct usb_device *udev, struct powermate_dev
static void powermate_free_buffers(struct usb_device *udev, struct powermate_device *pm)
{
- usb_buffer_free(udev, POWERMATE_PAYLOAD_SIZE_MAX,
- pm->data, pm->data_dma);
- usb_buffer_free(udev, sizeof(*(pm->configcr)),
- pm->configcr, pm->configcr_dma);
+ usb_free_coherent(udev, POWERMATE_PAYLOAD_SIZE_MAX,
+ pm->data, pm->data_dma);
+ kfree(pm->configcr);
}
/* Called whenever a USB device matching one in our supported devices table is connected */
diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c
index 0d45422f809..1dacae4b43f 100644
--- a/drivers/input/misc/sparcspkr.c
+++ b/drivers/input/misc/sparcspkr.c
@@ -259,8 +259,11 @@ static const struct of_device_id bbc_beep_match[] = {
};
static struct of_platform_driver bbc_beep_driver = {
- .name = "bbcbeep",
- .match_table = bbc_beep_match,
+ .driver = {
+ .name = "bbcbeep",
+ .owner = THIS_MODULE,
+ .of_match_table = bbc_beep_match,
+ },
.probe = bbc_beep_probe,
.remove = __devexit_p(bbc_remove),
.shutdown = sparcspkr_shutdown,
@@ -338,8 +341,11 @@ static const struct of_device_id grover_beep_match[] = {
};
static struct of_platform_driver grover_beep_driver = {
- .name = "groverbeep",
- .match_table = grover_beep_match,
+ .driver = {
+ .name = "groverbeep",
+ .owner = THIS_MODULE,
+ .of_match_table = grover_beep_match,
+ },
.probe = grover_beep_probe,
.remove = __devexit_p(grover_remove),
.shutdown = sparcspkr_shutdown,
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index fee9eac8e04..4f9b2afc24e 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -90,8 +90,8 @@ static void vibra_disable(struct vibra_info *info)
twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
(reg & ~TWL4030_VIBRA_EN), TWL4030_REG_VIBRA_CTL);
- twl4030_codec_disable_resource(TWL4030_CODEC_RES_POWER);
twl4030_codec_disable_resource(TWL4030_CODEC_RES_APLL);
+ twl4030_codec_disable_resource(TWL4030_CODEC_RES_POWER);
info->enabled = false;
}
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 1477466076a..b71eb55f2db 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -300,7 +300,7 @@ static int uinput_validate_absbits(struct input_dev *dev)
unsigned int cnt;
int retval = 0;
- for (cnt = 0; cnt < ABS_MAX + 1; cnt++) {
+ for (cnt = 0; cnt < ABS_CNT; cnt++) {
if (!test_bit(cnt, dev->absbit))
continue;
@@ -387,7 +387,7 @@ static int uinput_setup_device(struct uinput_device *udev, const char __user *bu
dev->id.product = user_dev->id.product;
dev->id.version = user_dev->id.version;
- size = sizeof(int) * (ABS_MAX + 1);
+ size = sizeof(int) * ABS_CNT;
memcpy(dev->absmax, user_dev->absmax, size);
memcpy(dev->absmin, user_dev->absmin, size);
memcpy(dev->absfuzz, user_dev->absfuzz, size);
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index 04d5a4a3181..4dac8b79fcd 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -983,11 +983,11 @@ static int __init copy_keymap(void)
for (key = keymap; key->type != KE_END; key++)
length++;
- new_keymap = kmalloc(length * sizeof(struct key_entry), GFP_KERNEL);
+ new_keymap = kmemdup(keymap, length * sizeof(struct key_entry),
+ GFP_KERNEL);
if (!new_keymap)
return -ENOMEM;
- memcpy(new_keymap, keymap, length * sizeof(struct key_entry));
keymap = new_keymap;
return 0;
diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c
index 93a22ac0f88..41201c6b5e6 100644
--- a/drivers/input/misc/yealink.c
+++ b/drivers/input/misc/yealink.c
@@ -111,7 +111,6 @@ struct yealink_dev {
struct yld_ctl_packet *ctl_data;
dma_addr_t ctl_dma;
struct usb_ctrlrequest *ctl_req;
- dma_addr_t ctl_req_dma;
struct urb *urb_ctl;
char phys[64]; /* physical device path */
@@ -836,12 +835,9 @@ static int usb_cleanup(struct yealink_dev *yld, int err)
usb_free_urb(yld->urb_irq);
usb_free_urb(yld->urb_ctl);
- usb_buffer_free(yld->udev, sizeof(*(yld->ctl_req)),
- yld->ctl_req, yld->ctl_req_dma);
- usb_buffer_free(yld->udev, USB_PKT_LEN,
- yld->ctl_data, yld->ctl_dma);
- usb_buffer_free(yld->udev, USB_PKT_LEN,
- yld->irq_data, yld->irq_dma);
+ kfree(yld->ctl_req);
+ usb_free_coherent(yld->udev, USB_PKT_LEN, yld->ctl_data, yld->ctl_dma);
+ usb_free_coherent(yld->udev, USB_PKT_LEN, yld->irq_data, yld->irq_dma);
kfree(yld);
return err;
@@ -886,18 +882,17 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
return usb_cleanup(yld, -ENOMEM);
/* allocate usb buffers */
- yld->irq_data = usb_buffer_alloc(udev, USB_PKT_LEN,
- GFP_ATOMIC, &yld->irq_dma);
+ yld->irq_data = usb_alloc_coherent(udev, USB_PKT_LEN,
+ GFP_ATOMIC, &yld->irq_dma);
if (yld->irq_data == NULL)
return usb_cleanup(yld, -ENOMEM);
- yld->ctl_data = usb_buffer_alloc(udev, USB_PKT_LEN,
- GFP_ATOMIC, &yld->ctl_dma);
+ yld->ctl_data = usb_alloc_coherent(udev, USB_PKT_LEN,
+ GFP_ATOMIC, &yld->ctl_dma);
if (!yld->ctl_data)
return usb_cleanup(yld, -ENOMEM);
- yld->ctl_req = usb_buffer_alloc(udev, sizeof(*(yld->ctl_req)),
- GFP_ATOMIC, &yld->ctl_req_dma);
+ yld->ctl_req = kmalloc(sizeof(*(yld->ctl_req)), GFP_KERNEL);
if (yld->ctl_req == NULL)
return usb_cleanup(yld, -ENOMEM);
@@ -936,10 +931,8 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
usb_fill_control_urb(yld->urb_ctl, udev, usb_sndctrlpipe(udev, 0),
(void *)yld->ctl_req, yld->ctl_data, USB_PKT_LEN,
urb_ctl_callback, yld);
- yld->urb_ctl->setup_dma = yld->ctl_req_dma;
yld->urb_ctl->transfer_dma = yld->ctl_dma;
- yld->urb_ctl->transfer_flags |= URB_NO_SETUP_DMA_MAP |
- URB_NO_TRANSFER_DMA_MAP;
+ yld->urb_ctl->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
yld->urb_ctl->dev = udev;
/* find out the physical bus location */
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index c714ca2407f..eeb58c1cac1 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -17,7 +17,7 @@ config MOUSE_PS2
default y
select SERIO
select SERIO_LIBPS2
- select SERIO_I8042 if X86
+ select SERIO_I8042 if X86 && !X86_MRST
select SERIO_GSCPS2 if GSC
help
Say Y here if you have a PS/2 mouse connected to your system. This
diff --git a/drivers/input/mouse/amimouse.c b/drivers/input/mouse/amimouse.c
index a185ac78a42..ff5f61a0fd3 100644
--- a/drivers/input/mouse/amimouse.c
+++ b/drivers/input/mouse/amimouse.c
@@ -21,6 +21,7 @@
#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
+#include <linux/platform_device.h>
#include <asm/irq.h>
#include <asm/setup.h>
@@ -34,10 +35,10 @@ MODULE_DESCRIPTION("Amiga mouse driver");
MODULE_LICENSE("GPL");
static int amimouse_lastx, amimouse_lasty;
-static struct input_dev *amimouse_dev;
-static irqreturn_t amimouse_interrupt(int irq, void *dummy)
+static irqreturn_t amimouse_interrupt(int irq, void *data)
{
+ struct input_dev *dev = data;
unsigned short joy0dat, potgor;
int nx, ny, dx, dy;
@@ -59,14 +60,14 @@ static irqreturn_t amimouse_interrupt(int irq, void *dummy)
potgor = amiga_custom.potgor;
- input_report_rel(amimouse_dev, REL_X, dx);
- input_report_rel(amimouse_dev, REL_Y, dy);
+ input_report_rel(dev, REL_X, dx);
+ input_report_rel(dev, REL_Y, dy);
- input_report_key(amimouse_dev, BTN_LEFT, ciaa.pra & 0x40);
- input_report_key(amimouse_dev, BTN_MIDDLE, potgor & 0x0100);
- input_report_key(amimouse_dev, BTN_RIGHT, potgor & 0x0400);
+ input_report_key(dev, BTN_LEFT, ciaa.pra & 0x40);
+ input_report_key(dev, BTN_MIDDLE, potgor & 0x0100);
+ input_report_key(dev, BTN_RIGHT, potgor & 0x0400);
- input_sync(amimouse_dev);
+ input_sync(dev);
return IRQ_HANDLED;
}
@@ -74,63 +75,90 @@ static irqreturn_t amimouse_interrupt(int irq, void *dummy)
static int amimouse_open(struct input_dev *dev)
{
unsigned short joy0dat;
+ int error;
joy0dat = amiga_custom.joy0dat;
amimouse_lastx = joy0dat & 0xff;
amimouse_lasty = joy0dat >> 8;
- if (request_irq(IRQ_AMIGA_VERTB, amimouse_interrupt, 0, "amimouse", amimouse_interrupt)) {
- printk(KERN_ERR "amimouse.c: Can't allocate irq %d\n", IRQ_AMIGA_VERTB);
- return -EBUSY;
- }
+ error = request_irq(IRQ_AMIGA_VERTB, amimouse_interrupt, 0, "amimouse",
+ dev);
+ if (error)
+ dev_err(&dev->dev, "Can't allocate irq %d\n", IRQ_AMIGA_VERTB);
- return 0;
+ return error;
}
static void amimouse_close(struct input_dev *dev)
{
- free_irq(IRQ_AMIGA_VERTB, amimouse_interrupt);
+ free_irq(IRQ_AMIGA_VERTB, dev);
}
-static int __init amimouse_init(void)
+static int __init amimouse_probe(struct platform_device *pdev)
{
int err;
+ struct input_dev *dev;
- if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_MOUSE))
- return -ENODEV;
-
- amimouse_dev = input_allocate_device();
- if (!amimouse_dev)
+ dev = input_allocate_device();
+ if (!dev)
return -ENOMEM;
- amimouse_dev->name = "Amiga mouse";
- amimouse_dev->phys = "amimouse/input0";
- amimouse_dev->id.bustype = BUS_AMIGA;
- amimouse_dev->id.vendor = 0x0001;
- amimouse_dev->id.product = 0x0002;
- amimouse_dev->id.version = 0x0100;
+ dev->name = pdev->name;
+ dev->phys = "amimouse/input0";
+ dev->id.bustype = BUS_AMIGA;
+ dev->id.vendor = 0x0001;
+ dev->id.product = 0x0002;
+ dev->id.version = 0x0100;
- amimouse_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
- amimouse_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y);
- amimouse_dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) |
+ dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
+ dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y);
+ dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) |
BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT);
- amimouse_dev->open = amimouse_open;
- amimouse_dev->close = amimouse_close;
+ dev->open = amimouse_open;
+ dev->close = amimouse_close;
+ dev->dev.parent = &pdev->dev;
- err = input_register_device(amimouse_dev);
+ err = input_register_device(dev);
if (err) {
- input_free_device(amimouse_dev);
+ input_free_device(dev);
return err;
}
+ platform_set_drvdata(pdev, dev);
+
return 0;
}
-static void __exit amimouse_exit(void)
+static int __exit amimouse_remove(struct platform_device *pdev)
{
- input_unregister_device(amimouse_dev);
+ struct input_dev *dev = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+ input_unregister_device(dev);
+ return 0;
+}
+
+static struct platform_driver amimouse_driver = {
+ .remove = __exit_p(amimouse_remove),
+ .driver = {
+ .name = "amiga-mouse",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init amimouse_init(void)
+{
+ return platform_driver_probe(&amimouse_driver, amimouse_probe);
}
module_init(amimouse_init);
+
+static void __exit amimouse_exit(void)
+{
+ platform_driver_unregister(&amimouse_driver);
+}
+
module_exit(amimouse_exit);
+
+MODULE_ALIAS("platform:amiga-mouse");
diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c
index 53ec7ddd182..05edd75abca 100644
--- a/drivers/input/mouse/appletouch.c
+++ b/drivers/input/mouse/appletouch.c
@@ -806,8 +806,8 @@ static int atp_probe(struct usb_interface *iface,
if (!dev->urb)
goto err_free_devs;
- dev->data = usb_buffer_alloc(dev->udev, dev->info->datalen, GFP_KERNEL,
- &dev->urb->transfer_dma);
+ dev->data = usb_alloc_coherent(dev->udev, dev->info->datalen, GFP_KERNEL,
+ &dev->urb->transfer_dma);
if (!dev->data)
goto err_free_urb;
@@ -862,8 +862,8 @@ static int atp_probe(struct usb_interface *iface,
return 0;
err_free_buffer:
- usb_buffer_free(dev->udev, dev->info->datalen,
- dev->data, dev->urb->transfer_dma);
+ usb_free_coherent(dev->udev, dev->info->datalen,
+ dev->data, dev->urb->transfer_dma);
err_free_urb:
usb_free_urb(dev->urb);
err_free_devs:
@@ -881,8 +881,8 @@ static void atp_disconnect(struct usb_interface *iface)
if (dev) {
usb_kill_urb(dev->urb);
input_unregister_device(dev->input);
- usb_buffer_free(dev->udev, dev->info->datalen,
- dev->data, dev->urb->transfer_dma);
+ usb_free_coherent(dev->udev, dev->info->datalen,
+ dev->data, dev->urb->transfer_dma);
usb_free_urb(dev->urb);
kfree(dev);
}
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index b89879bd860..6dedded2722 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -715,15 +715,15 @@ static int bcm5974_probe(struct usb_interface *iface,
if (!dev->tp_urb)
goto err_free_bt_urb;
- dev->bt_data = usb_buffer_alloc(dev->udev,
- dev->cfg.bt_datalen, GFP_KERNEL,
- &dev->bt_urb->transfer_dma);
+ dev->bt_data = usb_alloc_coherent(dev->udev,
+ dev->cfg.bt_datalen, GFP_KERNEL,
+ &dev->bt_urb->transfer_dma);
if (!dev->bt_data)
goto err_free_urb;
- dev->tp_data = usb_buffer_alloc(dev->udev,
- dev->cfg.tp_datalen, GFP_KERNEL,
- &dev->tp_urb->transfer_dma);
+ dev->tp_data = usb_alloc_coherent(dev->udev,
+ dev->cfg.tp_datalen, GFP_KERNEL,
+ &dev->tp_urb->transfer_dma);
if (!dev->tp_data)
goto err_free_bt_buffer;
@@ -765,10 +765,10 @@ static int bcm5974_probe(struct usb_interface *iface,
return 0;
err_free_buffer:
- usb_buffer_free(dev->udev, dev->cfg.tp_datalen,
+ usb_free_coherent(dev->udev, dev->cfg.tp_datalen,
dev->tp_data, dev->tp_urb->transfer_dma);
err_free_bt_buffer:
- usb_buffer_free(dev->udev, dev->cfg.bt_datalen,
+ usb_free_coherent(dev->udev, dev->cfg.bt_datalen,
dev->bt_data, dev->bt_urb->transfer_dma);
err_free_urb:
usb_free_urb(dev->tp_urb);
@@ -788,10 +788,10 @@ static void bcm5974_disconnect(struct usb_interface *iface)
usb_set_intfdata(iface, NULL);
input_unregister_device(dev->input);
- usb_buffer_free(dev->udev, dev->cfg.tp_datalen,
- dev->tp_data, dev->tp_urb->transfer_dma);
- usb_buffer_free(dev->udev, dev->cfg.bt_datalen,
- dev->bt_data, dev->bt_urb->transfer_dma);
+ usb_free_coherent(dev->udev, dev->cfg.tp_datalen,
+ dev->tp_data, dev->tp_urb->transfer_dma);
+ usb_free_coherent(dev->udev, dev->cfg.bt_datalen,
+ dev->bt_data, dev->bt_urb->transfer_dma);
usb_free_urb(dev->tp_urb);
usb_free_urb(dev->bt_urb);
kfree(dev);
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 112b4ee52ff..b18862b2a70 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -10,6 +10,8 @@
* Trademarks are the property of their respective owners.
*/
+#define pr_fmt(fmt) KBUILD_BASENAME ": " fmt
+
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -19,10 +21,10 @@
#include "psmouse.h"
#include "elantech.h"
-#define elantech_debug(format, arg...) \
- do { \
- if (etd->debug) \
- printk(KERN_DEBUG format, ##arg); \
+#define elantech_debug(fmt, ...) \
+ do { \
+ if (etd->debug) \
+ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
} while (0)
static bool force_elantech;
@@ -37,7 +39,7 @@ static int synaptics_send_cmd(struct psmouse *psmouse, unsigned char c,
{
if (psmouse_sliced_command(psmouse, c) ||
ps2_command(&psmouse->ps2dev, param, PSMOUSE_CMD_GETINFO)) {
- pr_err("elantech.c: synaptics_send_cmd query 0x%02x failed.\n", c);
+ pr_err("synaptics_send_cmd query 0x%02x failed.\n", c);
return -1;
}
@@ -60,13 +62,13 @@ static int elantech_ps2_command(struct psmouse *psmouse,
if (rc == 0)
break;
tries--;
- elantech_debug("elantech.c: retrying ps2 command 0x%02x (%d).\n",
- command, tries);
+ elantech_debug("retrying ps2 command 0x%02x (%d).\n",
+ command, tries);
msleep(ETP_PS2_COMMAND_DELAY);
} while (tries > 0);
if (rc)
- pr_err("elantech.c: ps2 command 0x%02x failed.\n", command);
+ pr_err("ps2 command 0x%02x failed.\n", command);
return rc;
}
@@ -108,7 +110,7 @@ static int elantech_read_reg(struct psmouse *psmouse, unsigned char reg,
}
if (rc)
- pr_err("elantech.c: failed to read register 0x%02x.\n", reg);
+ pr_err("failed to read register 0x%02x.\n", reg);
else
*val = param[0];
@@ -154,7 +156,7 @@ static int elantech_write_reg(struct psmouse *psmouse, unsigned char reg,
}
if (rc)
- pr_err("elantech.c: failed to write register 0x%02x with value 0x%02x.\n",
+ pr_err("failed to write register 0x%02x with value 0x%02x.\n",
reg, val);
return rc;
@@ -167,7 +169,7 @@ static void elantech_packet_dump(unsigned char *packet, int size)
{
int i;
- printk(KERN_DEBUG "elantech.c: PS/2 packet [");
+ printk(KERN_DEBUG pr_fmt("PS/2 packet ["));
for (i = 0; i < size; i++)
printk("%s0x%02x ", (i) ? ", " : " ", packet[i]);
printk("]\n");
@@ -203,7 +205,7 @@ static void elantech_report_absolute_v1(struct psmouse *psmouse)
if (etd->jumpy_cursor) {
/* Discard packets that are likely to have bogus coordinates */
if (fingers > old_fingers) {
- elantech_debug("elantech.c: discarding packet\n");
+ elantech_debug("discarding packet\n");
goto discard_packet_v1;
}
}
@@ -413,23 +415,21 @@ static int elantech_set_absolute_mode(struct psmouse *psmouse)
if (rc == 0)
break;
tries--;
- elantech_debug("elantech.c: retrying read (%d).\n",
- tries);
+ elantech_debug("retrying read (%d).\n", tries);
msleep(ETP_READ_BACK_DELAY);
} while (tries > 0);
if (rc) {
- pr_err("elantech.c: failed to read back register 0x10.\n");
+ pr_err("failed to read back register 0x10.\n");
} else if (etd->hw_version == 1 &&
!(val & ETP_R10_ABSOLUTE_MODE)) {
- pr_err("elantech.c: touchpad refuses "
- "to switch to absolute mode.\n");
+ pr_err("touchpad refuses to switch to absolute mode.\n");
rc = -1;
}
}
if (rc)
- pr_err("elantech.c: failed to initialise registers.\n");
+ pr_err("failed to initialise registers.\n");
return rc;
}
@@ -575,6 +575,24 @@ static struct attribute_group elantech_attr_group = {
.attrs = elantech_attrs,
};
+static bool elantech_is_signature_valid(const unsigned char *param)
+{
+ static const unsigned char rates[] = { 200, 100, 80, 60, 40, 20, 10 };
+ int i;
+
+ if (param[0] == 0)
+ return false;
+
+ if (param[1] == 0)
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(rates); i++)
+ if (param[2] == rates[i])
+ return false;
+
+ return true;
+}
+
/*
* Use magic knock to detect Elantech touchpad
*/
@@ -590,7 +608,7 @@ int elantech_detect(struct psmouse *psmouse, bool set_properties)
ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {
- pr_debug("elantech.c: sending Elantech magic knock failed.\n");
+ pr_debug("sending Elantech magic knock failed.\n");
return -1;
}
@@ -599,8 +617,7 @@ int elantech_detect(struct psmouse *psmouse, bool set_properties)
* set of magic numbers
*/
if (param[0] != 0x3c || param[1] != 0x03 || param[2] != 0xc8) {
- pr_debug("elantech.c: "
- "unexpected magic knock result 0x%02x, 0x%02x, 0x%02x.\n",
+ pr_debug("unexpected magic knock result 0x%02x, 0x%02x, 0x%02x.\n",
param[0], param[1], param[2]);
return -1;
}
@@ -611,20 +628,20 @@ int elantech_detect(struct psmouse *psmouse, bool set_properties)
* to Elantech magic knock and there might be more.
*/
if (synaptics_send_cmd(psmouse, ETP_FW_VERSION_QUERY, param)) {
- pr_debug("elantech.c: failed to query firmware version.\n");
+ pr_debug("failed to query firmware version.\n");
return -1;
}
- pr_debug("elantech.c: Elantech version query result 0x%02x, 0x%02x, 0x%02x.\n",
+ pr_debug("Elantech version query result 0x%02x, 0x%02x, 0x%02x.\n",
param[0], param[1], param[2]);
- if (param[0] == 0 || param[1] != 0) {
+ if (!elantech_is_signature_valid(param)) {
if (!force_elantech) {
- pr_debug("elantech.c: Probably not a real Elantech touchpad. Aborting.\n");
+ pr_debug("Probably not a real Elantech touchpad. Aborting.\n");
return -1;
}
- pr_debug("elantech.c: Probably not a real Elantech touchpad. Enabling anyway due to force_elantech.\n");
+ pr_debug("Probably not a real Elantech touchpad. Enabling anyway due to force_elantech.\n");
}
if (set_properties) {
@@ -655,7 +672,7 @@ static int elantech_reconnect(struct psmouse *psmouse)
return -1;
if (elantech_set_absolute_mode(psmouse)) {
- pr_err("elantech.c: failed to put touchpad back into absolute mode.\n");
+ pr_err("failed to put touchpad back into absolute mode.\n");
return -1;
}
@@ -683,7 +700,7 @@ int elantech_init(struct psmouse *psmouse)
* Do the version query again so we can store the result
*/
if (synaptics_send_cmd(psmouse, ETP_FW_VERSION_QUERY, param)) {
- pr_err("elantech.c: failed to query firmware version.\n");
+ pr_err("failed to query firmware version.\n");
goto init_fail;
}
@@ -704,14 +721,14 @@ int elantech_init(struct psmouse *psmouse)
etd->paritycheck = 1;
}
- pr_info("elantech.c: assuming hardware version %d, firmware version %d.%d.%d\n",
+ pr_info("assuming hardware version %d, firmware version %d.%d.%d\n",
etd->hw_version, param[0], param[1], param[2]);
if (synaptics_send_cmd(psmouse, ETP_CAPABILITIES_QUERY, param)) {
- pr_err("elantech.c: failed to query capabilities.\n");
+ pr_err("failed to query capabilities.\n");
goto init_fail;
}
- pr_info("elantech.c: Synaptics capabilities query result 0x%02x, 0x%02x, 0x%02x.\n",
+ pr_info("Synaptics capabilities query result 0x%02x, 0x%02x, 0x%02x.\n",
param[0], param[1], param[2]);
etd->capabilities = param[0];
@@ -721,13 +738,12 @@ int elantech_init(struct psmouse *psmouse)
* to jump. Enable a workaround.
*/
if (etd->fw_version == 0x020022) {
- pr_info("elantech.c: firmware version 2.0.34 detected, "
- "enabling jumpy cursor workaround\n");
+ pr_info("firmware version 2.0.34 detected, enabling jumpy cursor workaround\n");
etd->jumpy_cursor = 1;
}
if (elantech_set_absolute_mode(psmouse)) {
- pr_err("elantech.c: failed to put touchpad into absolute mode.\n");
+ pr_err("failed to put touchpad into absolute mode.\n");
goto init_fail;
}
@@ -736,8 +752,7 @@ int elantech_init(struct psmouse *psmouse)
error = sysfs_create_group(&psmouse->ps2dev.serio->dev.kobj,
&elantech_attr_group);
if (error) {
- pr_err("elantech.c: failed to create sysfs attributes, error: %d.\n",
- error);
+ pr_err("failed to create sysfs attributes, error: %d.\n", error);
goto init_fail;
}
diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c
index 08d66d820d2..1d2205b2480 100644
--- a/drivers/input/mouse/hgpk.c
+++ b/drivers/input/mouse/hgpk.c
@@ -40,8 +40,8 @@
#include "psmouse.h"
#include "hgpk.h"
-static int tpdebug;
-module_param(tpdebug, int, 0644);
+static bool tpdebug;
+module_param(tpdebug, bool, 0644);
MODULE_PARM_DESC(tpdebug, "enable debugging, dumping packets to KERN_DEBUG.");
static int recalib_delta = 100;
diff --git a/drivers/input/mouse/logips2pp.c b/drivers/input/mouse/logips2pp.c
index 543c240a85f..c9983aee908 100644
--- a/drivers/input/mouse/logips2pp.c
+++ b/drivers/input/mouse/logips2pp.c
@@ -56,36 +56,36 @@ static psmouse_ret_t ps2pp_process_byte(struct psmouse *psmouse)
/* Logitech extended packet */
switch ((packet[1] >> 4) | (packet[0] & 0x30)) {
- case 0x0d: /* Mouse extra info */
+ case 0x0d: /* Mouse extra info */
- input_report_rel(dev, packet[2] & 0x80 ? REL_HWHEEL : REL_WHEEL,
- (int) (packet[2] & 8) - (int) (packet[2] & 7));
- input_report_key(dev, BTN_SIDE, (packet[2] >> 4) & 1);
- input_report_key(dev, BTN_EXTRA, (packet[2] >> 5) & 1);
+ input_report_rel(dev, packet[2] & 0x80 ? REL_HWHEEL : REL_WHEEL,
+ (int) (packet[2] & 8) - (int) (packet[2] & 7));
+ input_report_key(dev, BTN_SIDE, (packet[2] >> 4) & 1);
+ input_report_key(dev, BTN_EXTRA, (packet[2] >> 5) & 1);
- break;
+ break;
- case 0x0e: /* buttons 4, 5, 6, 7, 8, 9, 10 info */
+ case 0x0e: /* buttons 4, 5, 6, 7, 8, 9, 10 info */
- input_report_key(dev, BTN_SIDE, (packet[2]) & 1);
- input_report_key(dev, BTN_EXTRA, (packet[2] >> 1) & 1);
- input_report_key(dev, BTN_BACK, (packet[2] >> 3) & 1);
- input_report_key(dev, BTN_FORWARD, (packet[2] >> 4) & 1);
- input_report_key(dev, BTN_TASK, (packet[2] >> 2) & 1);
+ input_report_key(dev, BTN_SIDE, (packet[2]) & 1);
+ input_report_key(dev, BTN_EXTRA, (packet[2] >> 1) & 1);
+ input_report_key(dev, BTN_BACK, (packet[2] >> 3) & 1);
+ input_report_key(dev, BTN_FORWARD, (packet[2] >> 4) & 1);
+ input_report_key(dev, BTN_TASK, (packet[2] >> 2) & 1);
- break;
+ break;
- case 0x0f: /* TouchPad extra info */
+ case 0x0f: /* TouchPad extra info */
- input_report_rel(dev, packet[2] & 0x08 ? REL_HWHEEL : REL_WHEEL,
- (int) ((packet[2] >> 4) & 8) - (int) ((packet[2] >> 4) & 7));
- packet[0] = packet[2] | 0x08;
- break;
+ input_report_rel(dev, packet[2] & 0x08 ? REL_HWHEEL : REL_WHEEL,
+ (int) ((packet[2] >> 4) & 8) - (int) ((packet[2] >> 4) & 7));
+ packet[0] = packet[2] | 0x08;
+ break;
#ifdef DEBUG
- default:
- printk(KERN_WARNING "psmouse.c: Received PS2++ packet #%x, but don't know how to handle.\n",
- (packet[1] >> 4) | (packet[0] & 0x30));
+ default:
+ printk(KERN_WARNING "psmouse.c: Received PS2++ packet #%x, but don't know how to handle.\n",
+ (packet[1] >> 4) | (packet[0] & 0x30));
#endif
}
} else {
@@ -250,7 +250,6 @@ static const struct ps2pp_info *get_model_info(unsigned char model)
if (model == ps2pp_list[i].model)
return &ps2pp_list[i];
- printk(KERN_WARNING "logips2pp: Detected unknown logitech mouse model %d\n", model);
return NULL;
}
@@ -285,31 +284,32 @@ static void ps2pp_set_model_properties(struct psmouse *psmouse,
__set_bit(REL_HWHEEL, input_dev->relbit);
switch (model_info->kind) {
- case PS2PP_KIND_WHEEL:
- psmouse->name = "Wheel Mouse";
- break;
-
- case PS2PP_KIND_MX:
- psmouse->name = "MX Mouse";
- break;
- case PS2PP_KIND_TP3:
- psmouse->name = "TouchPad 3";
- break;
-
- case PS2PP_KIND_TRACKMAN:
- psmouse->name = "TrackMan";
- break;
-
- default:
- /*
- * Set name to "Mouse" only when using PS2++,
- * otherwise let other protocols define suitable
- * name
- */
- if (using_ps2pp)
- psmouse->name = "Mouse";
- break;
+ case PS2PP_KIND_WHEEL:
+ psmouse->name = "Wheel Mouse";
+ break;
+
+ case PS2PP_KIND_MX:
+ psmouse->name = "MX Mouse";
+ break;
+
+ case PS2PP_KIND_TP3:
+ psmouse->name = "TouchPad 3";
+ break;
+
+ case PS2PP_KIND_TRACKMAN:
+ psmouse->name = "TrackMan";
+ break;
+
+ default:
+ /*
+ * Set name to "Mouse" only when using PS2++,
+ * otherwise let other protocols define suitable
+ * name
+ */
+ if (using_ps2pp)
+ psmouse->name = "Mouse";
+ break;
}
}
@@ -343,7 +343,8 @@ int ps2pp_init(struct psmouse *psmouse, bool set_properties)
if (!model || !buttons)
return -1;
- if ((model_info = get_model_info(model)) != NULL) {
+ model_info = get_model_info(model);
+ if (model_info) {
/*
* Do Logitech PS2++ / PS2T++ magic init.
@@ -379,6 +380,9 @@ int ps2pp_init(struct psmouse *psmouse, bool set_properties)
use_ps2pp = true;
}
}
+
+ } else {
+ printk(KERN_WARNING "logips2pp: Detected unknown logitech mouse model %d\n", model);
}
if (set_properties) {
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index a3c97315a47..979c5021528 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -147,18 +147,18 @@ static psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
if (psmouse->type == PSMOUSE_IMEX) {
switch (packet[3] & 0xC0) {
- case 0x80: /* vertical scroll on IntelliMouse Explorer 4.0 */
- input_report_rel(dev, REL_WHEEL, (int) (packet[3] & 32) - (int) (packet[3] & 31));
- break;
- case 0x40: /* horizontal scroll on IntelliMouse Explorer 4.0 */
- input_report_rel(dev, REL_HWHEEL, (int) (packet[3] & 32) - (int) (packet[3] & 31));
- break;
- case 0x00:
- case 0xC0:
- input_report_rel(dev, REL_WHEEL, (int) (packet[3] & 8) - (int) (packet[3] & 7));
- input_report_key(dev, BTN_SIDE, (packet[3] >> 4) & 1);
- input_report_key(dev, BTN_EXTRA, (packet[3] >> 5) & 1);
- break;
+ case 0x80: /* vertical scroll on IntelliMouse Explorer 4.0 */
+ input_report_rel(dev, REL_WHEEL, (int) (packet[3] & 32) - (int) (packet[3] & 31));
+ break;
+ case 0x40: /* horizontal scroll on IntelliMouse Explorer 4.0 */
+ input_report_rel(dev, REL_HWHEEL, (int) (packet[3] & 32) - (int) (packet[3] & 31));
+ break;
+ case 0x00:
+ case 0xC0:
+ input_report_rel(dev, REL_WHEEL, (int) (packet[3] & 8) - (int) (packet[3] & 7));
+ input_report_key(dev, BTN_SIDE, (packet[3] >> 4) & 1);
+ input_report_key(dev, BTN_EXTRA, (packet[3] >> 5) & 1);
+ break;
}
}
@@ -247,31 +247,31 @@ static int psmouse_handle_byte(struct psmouse *psmouse)
psmouse_ret_t rc = psmouse->protocol_handler(psmouse);
switch (rc) {
- case PSMOUSE_BAD_DATA:
- if (psmouse->state == PSMOUSE_ACTIVATED) {
- printk(KERN_WARNING "psmouse.c: %s at %s lost sync at byte %d\n",
- psmouse->name, psmouse->phys, psmouse->pktcnt);
- if (++psmouse->out_of_sync_cnt == psmouse->resetafter) {
- __psmouse_set_state(psmouse, PSMOUSE_IGNORE);
- printk(KERN_NOTICE "psmouse.c: issuing reconnect request\n");
- serio_reconnect(psmouse->ps2dev.serio);
- return -1;
- }
- }
- psmouse->pktcnt = 0;
- break;
-
- case PSMOUSE_FULL_PACKET:
- psmouse->pktcnt = 0;
- if (psmouse->out_of_sync_cnt) {
- psmouse->out_of_sync_cnt = 0;
- printk(KERN_NOTICE "psmouse.c: %s at %s - driver resynched.\n",
- psmouse->name, psmouse->phys);
+ case PSMOUSE_BAD_DATA:
+ if (psmouse->state == PSMOUSE_ACTIVATED) {
+ printk(KERN_WARNING "psmouse.c: %s at %s lost sync at byte %d\n",
+ psmouse->name, psmouse->phys, psmouse->pktcnt);
+ if (++psmouse->out_of_sync_cnt == psmouse->resetafter) {
+ __psmouse_set_state(psmouse, PSMOUSE_IGNORE);
+ printk(KERN_NOTICE "psmouse.c: issuing reconnect request\n");
+ serio_reconnect(psmouse->ps2dev.serio);
+ return -1;
}
- break;
+ }
+ psmouse->pktcnt = 0;
+ break;
+
+ case PSMOUSE_FULL_PACKET:
+ psmouse->pktcnt = 0;
+ if (psmouse->out_of_sync_cnt) {
+ psmouse->out_of_sync_cnt = 0;
+ printk(KERN_NOTICE "psmouse.c: %s at %s - driver resynched.\n",
+ psmouse->name, psmouse->phys);
+ }
+ break;
- case PSMOUSE_GOOD_DATA:
- break;
+ case PSMOUSE_GOOD_DATA:
+ break;
}
return 0;
}
@@ -1245,7 +1245,7 @@ static int psmouse_switch_protocol(struct psmouse *psmouse,
psmouse->pktsize = 3;
if (proto && (proto->detect || proto->init)) {
- if (proto->detect && proto->detect(psmouse, 1) < 0)
+ if (proto->detect && proto->detect(psmouse, true) < 0)
return -1;
if (proto->init && proto->init(psmouse) < 0)
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index ebd7a99efea..40cea334ad1 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -36,6 +36,8 @@
* The x/y limits are taken from the Synaptics TouchPad interfacing Guide,
* section 2.3.2, which says that they should be valid regardless of the
* actual size of the sensor.
+ * Note that newer firmware allows querying device for maximum useable
+ * coordinates.
*/
#define XMIN_NOMINAL 1472
#define XMAX_NOMINAL 5472
@@ -194,23 +196,33 @@ static int synaptics_identify(struct psmouse *psmouse)
}
/*
- * Read touchpad resolution
+ * Read touchpad resolution and maximum reported coordinates
* Resolution is left zero if touchpad does not support the query
*/
static int synaptics_resolution(struct psmouse *psmouse)
{
struct synaptics_data *priv = psmouse->private;
unsigned char res[3];
+ unsigned char max[3];
if (SYN_ID_MAJOR(priv->identity) < 4)
- return 0;
- if (synaptics_send_cmd(psmouse, SYN_QUE_RESOLUTION, res))
- return 0;
+ if (synaptics_send_cmd(psmouse, SYN_QUE_RESOLUTION, res) == 0) {
+ if (res[0] != 0 && (res[1] & 0x80) && res[2] != 0) {
+ priv->x_res = res[0]; /* x resolution in units/mm */
+ priv->y_res = res[2]; /* y resolution in units/mm */
+ }
+ }
- if ((res[0] != 0) && (res[1] & 0x80) && (res[2] != 0)) {
- priv->x_res = res[0]; /* x resolution in units/mm */
- priv->y_res = res[2]; /* y resolution in units/mm */
+ if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 &&
+ SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) {
+ if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_DIMENSIONS, max)) {
+ printk(KERN_ERR "Synaptics claims to have dimensions query,"
+ " but I'm not able to read it.\n");
+ } else {
+ priv->x_max = (max[0] << 5) | ((max[1] & 0x0f) << 1);
+ priv->y_max = (max[2] << 5) | ((max[1] & 0xf0) >> 3);
+ }
}
return 0;
@@ -520,19 +532,20 @@ static int synaptics_validate_byte(unsigned char packet[], int idx, unsigned cha
return 0;
switch (pkt_type) {
- case SYN_NEWABS:
- case SYN_NEWABS_RELAXED:
- return (packet[idx] & newabs_rel_mask[idx]) == newabs_rslt[idx];
- case SYN_NEWABS_STRICT:
- return (packet[idx] & newabs_mask[idx]) == newabs_rslt[idx];
+ case SYN_NEWABS:
+ case SYN_NEWABS_RELAXED:
+ return (packet[idx] & newabs_rel_mask[idx]) == newabs_rslt[idx];
- case SYN_OLDABS:
- return (packet[idx] & oldabs_mask[idx]) == oldabs_rslt[idx];
+ case SYN_NEWABS_STRICT:
+ return (packet[idx] & newabs_mask[idx]) == newabs_rslt[idx];
- default:
- printk(KERN_ERR "synaptics: unknown packet type %d\n", pkt_type);
- return 0;
+ case SYN_OLDABS:
+ return (packet[idx] & oldabs_mask[idx]) == oldabs_rslt[idx];
+
+ default:
+ printk(KERN_ERR "synaptics: unknown packet type %d\n", pkt_type);
+ return 0;
}
}
@@ -578,8 +591,10 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
int i;
__set_bit(EV_ABS, dev->evbit);
- input_set_abs_params(dev, ABS_X, XMIN_NOMINAL, XMAX_NOMINAL, 0, 0);
- input_set_abs_params(dev, ABS_Y, YMIN_NOMINAL, YMAX_NOMINAL, 0, 0);
+ input_set_abs_params(dev, ABS_X,
+ XMIN_NOMINAL, priv->x_max ?: XMAX_NOMINAL, 0, 0);
+ input_set_abs_params(dev, ABS_Y,
+ YMIN_NOMINAL, priv->y_max ?: YMAX_NOMINAL, 0, 0);
input_set_abs_params(dev, ABS_PRESSURE, 0, 255, 0, 0);
__set_bit(ABS_TOOL_WIDTH, dev->absbit);
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index ae37c5d162a..7d4d5e12c0d 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -19,6 +19,7 @@
#define SYN_QUE_RESOLUTION 0x08
#define SYN_QUE_EXT_CAPAB 0x09
#define SYN_QUE_EXT_CAPAB_0C 0x0c
+#define SYN_QUE_EXT_DIMENSIONS 0x0d
/* synatics modes */
#define SYN_BIT_ABSOLUTE_MODE (1 << 7)
@@ -51,6 +52,7 @@
#define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12)
#define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16)
#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100100)
+#define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000)
/* synaptics modes query bits */
#define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7))
@@ -101,8 +103,8 @@ struct synaptics_data {
unsigned long int ext_cap; /* Extended Capabilities */
unsigned long int ext_cap_0c; /* Ext Caps from 0x0c query */
unsigned long int identity; /* Identification */
- int x_res; /* X resolution in units/mm */
- int y_res; /* Y resolution in units/mm */
+ unsigned int x_res, y_res; /* X/Y resolution in units/mm */
+ unsigned int x_max, y_max; /* Max dimensions (from FW) */
unsigned char pkt_type; /* packet type - old, new, etc */
unsigned char mode; /* current mode byte */
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 7e319d65ec5..f34f1dbeb57 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -209,4 +209,20 @@ config SERIO_ALTERA_PS2
To compile this driver as a module, choose M here: the
module will be called altera_ps2.
+config SERIO_AMS_DELTA
+ tristate "Amstrad Delta (E3) mailboard support"
+ depends on MACH_AMS_DELTA
+ default y
+ select AMS_DELTA_FIQ
+ ---help---
+ Say Y here if you have an E3 and want to use its mailboard,
+ or any standard AT keyboard connected to the mailboard port.
+
+ When used for the E3 mailboard, a non-standard key table
+ must be loaded from userspace, possibly using udev extras
+ provided keymap helper utility.
+
+ To compile this driver as a module, choose M here;
+ the module will be called ams_delta_serio.
+
endif
diff --git a/drivers/input/serio/Makefile b/drivers/input/serio/Makefile
index bf945f789d0..84c80bf7185 100644
--- a/drivers/input/serio/Makefile
+++ b/drivers/input/serio/Makefile
@@ -21,5 +21,6 @@ obj-$(CONFIG_SERIO_PCIPS2) += pcips2.o
obj-$(CONFIG_SERIO_MACEPS2) += maceps2.o
obj-$(CONFIG_SERIO_LIBPS2) += libps2.o
obj-$(CONFIG_SERIO_RAW) += serio_raw.o
+obj-$(CONFIG_SERIO_AMS_DELTA) += ams_delta_serio.o
obj-$(CONFIG_SERIO_XILINX_XPS_PS2) += xilinx_ps2.o
obj-$(CONFIG_SERIO_ALTERA_PS2) += altera_ps2.o
diff --git a/drivers/input/serio/ams_delta_serio.c b/drivers/input/serio/ams_delta_serio.c
new file mode 100644
index 00000000000..8f1770e1e08
--- /dev/null
+++ b/drivers/input/serio/ams_delta_serio.c
@@ -0,0 +1,177 @@
+/*
+ * Amstrad E3 (Delta) keyboard port driver
+ *
+ * Copyright (c) 2006 Matt Callow
+ * Copyright (c) 2010 Janusz Krzysztofik
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Thanks to Cliff Lawson for his help
+ *
+ * The Amstrad Delta keyboard (aka mailboard) uses normal PC-AT style serial
+ * transmission. The keyboard port is formed of two GPIO lines, for clock
+ * and data. Due to strict timing requirements of the interface,
+ * the serial data stream is read and processed by a FIQ handler.
+ * The resulting words are fetched by this driver from a circular buffer.
+ *
+ * Standard AT keyboard driver (atkbd) is used for handling the keyboard data.
+ * However, when used with the E3 mailboard that producecs non-standard
+ * scancodes, a custom key table must be prepared and loaded from userspace.
+ */
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/serio.h>
+#include <linux/slab.h>
+
+#include <asm/mach-types.h>
+#include <plat/board-ams-delta.h>
+
+#include <mach/ams-delta-fiq.h>
+
+MODULE_AUTHOR("Matt Callow");
+MODULE_DESCRIPTION("AMS Delta (E3) keyboard port driver");
+MODULE_LICENSE("GPL");
+
+static struct serio *ams_delta_serio;
+
+static int check_data(int data)
+{
+ int i, parity = 0;
+
+ /* check valid stop bit */
+ if (!(data & 0x400)) {
+ dev_warn(&ams_delta_serio->dev,
+ "invalid stop bit, data=0x%X\n",
+ data);
+ return SERIO_FRAME;
+ }
+ /* calculate the parity */
+ for (i = 1; i < 10; i++) {
+ if (data & (1 << i))
+ parity++;
+ }
+ /* it should be odd */
+ if (!(parity & 0x01)) {
+ dev_warn(&ams_delta_serio->dev,
+ "paritiy check failed, data=0x%X parity=0x%X\n",
+ data, parity);
+ return SERIO_PARITY;
+ }
+ return 0;
+}
+
+static irqreturn_t ams_delta_serio_interrupt(int irq, void *dev_id)
+{
+ int *circ_buff = &fiq_buffer[FIQ_CIRC_BUFF];
+ int data, dfl;
+ u8 scancode;
+
+ fiq_buffer[FIQ_IRQ_PEND] = 0;
+
+ /*
+ * Read data from the circular buffer, check it
+ * and then pass it on the serio
+ */
+ while (fiq_buffer[FIQ_KEYS_CNT] > 0) {
+
+ data = circ_buff[fiq_buffer[FIQ_HEAD_OFFSET]++];
+ fiq_buffer[FIQ_KEYS_CNT]--;
+ if (fiq_buffer[FIQ_HEAD_OFFSET] == fiq_buffer[FIQ_BUF_LEN])
+ fiq_buffer[FIQ_HEAD_OFFSET] = 0;
+
+ dfl = check_data(data);
+ scancode = (u8) (data >> 1) & 0xFF;
+ serio_interrupt(ams_delta_serio, scancode, dfl);
+ }
+ return IRQ_HANDLED;
+}
+
+static int ams_delta_serio_open(struct serio *serio)
+{
+ /* enable keyboard */
+ ams_delta_latch2_write(AMD_DELTA_LATCH2_KEYBRD_PWR,
+ AMD_DELTA_LATCH2_KEYBRD_PWR);
+
+ return 0;
+}
+
+static void ams_delta_serio_close(struct serio *serio)
+{
+ /* disable keyboard */
+ ams_delta_latch2_write(AMD_DELTA_LATCH2_KEYBRD_PWR, 0);
+}
+
+static int __init ams_delta_serio_init(void)
+{
+ int err;
+
+ if (!machine_is_ams_delta())
+ return -ENODEV;
+
+ ams_delta_serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
+ if (!ams_delta_serio)
+ return -ENOMEM;
+
+ ams_delta_serio->id.type = SERIO_8042;
+ ams_delta_serio->open = ams_delta_serio_open;
+ ams_delta_serio->close = ams_delta_serio_close;
+ strlcpy(ams_delta_serio->name, "AMS DELTA keyboard adapter",
+ sizeof(ams_delta_serio->name));
+ strlcpy(ams_delta_serio->phys, "GPIO/serio0",
+ sizeof(ams_delta_serio->phys));
+
+ err = gpio_request(AMS_DELTA_GPIO_PIN_KEYBRD_DATA, "serio-data");
+ if (err) {
+ pr_err("ams_delta_serio: Couldn't request gpio pin for data\n");
+ goto serio;
+ }
+ gpio_direction_input(AMS_DELTA_GPIO_PIN_KEYBRD_DATA);
+
+ err = gpio_request(AMS_DELTA_GPIO_PIN_KEYBRD_CLK, "serio-clock");
+ if (err) {
+ pr_err("ams_delta_serio: couldn't request gpio pin for clock\n");
+ goto gpio_data;
+ }
+ gpio_direction_input(AMS_DELTA_GPIO_PIN_KEYBRD_CLK);
+
+ err = request_irq(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK),
+ ams_delta_serio_interrupt, IRQ_TYPE_EDGE_RISING,
+ "ams-delta-serio", 0);
+ if (err < 0) {
+ pr_err("ams_delta_serio: couldn't request gpio interrupt %d\n",
+ gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK));
+ goto gpio_clk;
+ }
+ /*
+ * Since GPIO register handling for keyboard clock pin is performed
+ * at FIQ level, switch back from edge to simple interrupt handler
+ * to avoid bad interaction.
+ */
+ set_irq_handler(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK),
+ handle_simple_irq);
+
+ serio_register_port(ams_delta_serio);
+ dev_info(&ams_delta_serio->dev, "%s\n", ams_delta_serio->name);
+
+ return 0;
+gpio_clk:
+ gpio_free(AMS_DELTA_GPIO_PIN_KEYBRD_CLK);
+gpio_data:
+ gpio_free(AMS_DELTA_GPIO_PIN_KEYBRD_DATA);
+serio:
+ kfree(ams_delta_serio);
+ return err;
+}
+module_init(ams_delta_serio_init);
+
+static void __exit ams_delta_serio_exit(void)
+{
+ serio_unregister_port(ams_delta_serio);
+ free_irq(OMAP_GPIO_IRQ(AMS_DELTA_GPIO_PIN_KEYBRD_CLK), 0);
+ gpio_free(AMS_DELTA_GPIO_PIN_KEYBRD_CLK);
+ gpio_free(AMS_DELTA_GPIO_PIN_KEYBRD_DATA);
+ kfree(ams_delta_serio);
+}
+module_exit(ams_delta_serio_exit);
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index 5071af2c060..04e32f2d124 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -51,7 +51,7 @@ static inline void i8042_write_command(int val)
static int __devinit sparc_i8042_probe(struct of_device *op, const struct of_device_id *match)
{
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
dp = dp->child;
while (dp) {
@@ -96,8 +96,11 @@ static const struct of_device_id sparc_i8042_match[] = {
MODULE_DEVICE_TABLE(of, sparc_i8042_match);
static struct of_platform_driver sparc_i8042_driver = {
- .name = "i8042",
- .match_table = sparc_i8042_match,
+ .driver = {
+ .name = "i8042",
+ .owner = THIS_MODULE,
+ .of_match_table = sparc_i8042_match,
+ },
.probe = sparc_i8042_probe,
.remove = __devexit_p(sparc_i8042_remove),
};
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index ead0494721d..6168469ad1a 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -660,8 +660,21 @@ static int i8042_pnp_aux_probe(struct pnp_dev *dev, const struct pnp_device_id *
}
static struct pnp_device_id pnp_kbd_devids[] = {
+ { .id = "PNP0300", .driver_data = 0 },
+ { .id = "PNP0301", .driver_data = 0 },
+ { .id = "PNP0302", .driver_data = 0 },
{ .id = "PNP0303", .driver_data = 0 },
+ { .id = "PNP0304", .driver_data = 0 },
+ { .id = "PNP0305", .driver_data = 0 },
+ { .id = "PNP0306", .driver_data = 0 },
+ { .id = "PNP0309", .driver_data = 0 },
+ { .id = "PNP030a", .driver_data = 0 },
{ .id = "PNP030b", .driver_data = 0 },
+ { .id = "PNP0320", .driver_data = 0 },
+ { .id = "PNP0343", .driver_data = 0 },
+ { .id = "PNP0344", .driver_data = 0 },
+ { .id = "PNP0345", .driver_data = 0 },
+ { .id = "CPQA0D7", .driver_data = 0 },
{ .id = "", },
};
@@ -672,6 +685,7 @@ static struct pnp_driver i8042_pnp_kbd_driver = {
};
static struct pnp_device_id pnp_aux_devids[] = {
+ { .id = "AUI0200", .driver_data = 0 },
{ .id = "FJC6000", .driver_data = 0 },
{ .id = "FJC6001", .driver_data = 0 },
{ .id = "PNP0f03", .driver_data = 0 },
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index f84f8e32e3f..e2c028d2638 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -244,17 +244,17 @@ static int __devinit xps2_of_probe(struct of_device *ofdev,
int error;
dev_info(dev, "Device Tree Probing \'%s\'\n",
- ofdev->node->name);
+ ofdev->dev.of_node->name);
/* Get iospace for the device */
- error = of_address_to_resource(ofdev->node, 0, &r_mem);
+ error = of_address_to_resource(ofdev->dev.of_node, 0, &r_mem);
if (error) {
dev_err(dev, "invalid address\n");
return error;
}
/* Get IRQ for the device */
- if (of_irq_to_resource(ofdev->node, 0, &r_irq) == NO_IRQ) {
+ if (of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq) == NO_IRQ) {
dev_err(dev, "no IRQ found\n");
return -ENODEV;
}
@@ -342,7 +342,7 @@ static int __devexit xps2_of_remove(struct of_device *of_dev)
iounmap(drvdata->base_address);
/* Get iospace of the device */
- if (of_address_to_resource(of_dev->node, 0, &r_mem))
+ if (of_address_to_resource(of_dev->dev.of_node, 0, &r_mem))
dev_err(dev, "invalid address\n");
else
release_mem_region(r_mem.start, resource_size(&r_mem));
@@ -362,8 +362,11 @@ static const struct of_device_id xps2_of_match[] __devinitconst = {
MODULE_DEVICE_TABLE(of, xps2_of_match);
static struct of_platform_driver xps2_of_driver = {
- .name = DRIVER_NAME,
- .match_table = xps2_of_match,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = xps2_of_match,
+ },
.probe = xps2_of_probe,
.remove = __devexit_p(xps2_of_remove),
};
diff --git a/drivers/input/tablet/acecad.c b/drivers/input/tablet/acecad.c
index 670c61c5a51..aea9a9399a3 100644
--- a/drivers/input/tablet/acecad.c
+++ b/drivers/input/tablet/acecad.c
@@ -66,18 +66,18 @@ static void usb_acecad_irq(struct urb *urb)
int prox, status;
switch (urb->status) {
- case 0:
- /* success */
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* this urb is terminated, clean up */
- dbg("%s - urb shutting down with status: %d", __func__, urb->status);
- return;
- default:
- dbg("%s - nonzero urb status received: %d", __func__, urb->status);
- goto resubmit;
+ case 0:
+ /* success */
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ /* this urb is terminated, clean up */
+ dbg("%s - urb shutting down with status: %d", __func__, urb->status);
+ return;
+ default:
+ dbg("%s - nonzero urb status received: %d", __func__, urb->status);
+ goto resubmit;
}
prox = (data[0] & 0x04) >> 2;
@@ -135,7 +135,7 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_
struct usb_acecad *acecad;
struct input_dev *input_dev;
int pipe, maxp;
- int err = -ENOMEM;
+ int err;
if (interface->desc.bNumEndpoints != 1)
return -ENODEV;
@@ -155,7 +155,7 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_
goto fail1;
}
- acecad->data = usb_buffer_alloc(dev, 8, GFP_KERNEL, &acecad->data_dma);
+ acecad->data = usb_alloc_coherent(dev, 8, GFP_KERNEL, &acecad->data_dma);
if (!acecad->data) {
err= -ENOMEM;
goto fail1;
@@ -193,40 +193,34 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_
input_dev->close = usb_acecad_close;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- input_dev->absbit[0] = BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) |
- BIT_MASK(ABS_PRESSURE);
- input_dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) |
- BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE);
input_dev->keybit[BIT_WORD(BTN_DIGI)] = BIT_MASK(BTN_TOOL_PEN) |
BIT_MASK(BTN_TOUCH) | BIT_MASK(BTN_STYLUS) |
BIT_MASK(BTN_STYLUS2);
switch (id->driver_info) {
- case 0:
- input_dev->absmax[ABS_X] = 5000;
- input_dev->absmax[ABS_Y] = 3750;
- input_dev->absmax[ABS_PRESSURE] = 512;
- if (!strlen(acecad->name))
- snprintf(acecad->name, sizeof(acecad->name),
- "USB Acecad Flair Tablet %04x:%04x",
- le16_to_cpu(dev->descriptor.idVendor),
- le16_to_cpu(dev->descriptor.idProduct));
- break;
- case 1:
- input_dev->absmax[ABS_X] = 3000;
- input_dev->absmax[ABS_Y] = 2250;
- input_dev->absmax[ABS_PRESSURE] = 1024;
- if (!strlen(acecad->name))
- snprintf(acecad->name, sizeof(acecad->name),
- "USB Acecad 302 Tablet %04x:%04x",
- le16_to_cpu(dev->descriptor.idVendor),
- le16_to_cpu(dev->descriptor.idProduct));
- break;
+ case 0:
+ input_set_abs_params(input_dev, ABS_X, 0, 5000, 4, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, 3750, 4, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, 512, 0, 0);
+ if (!strlen(acecad->name))
+ snprintf(acecad->name, sizeof(acecad->name),
+ "USB Acecad Flair Tablet %04x:%04x",
+ le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct));
+ break;
+
+ case 1:
+ input_set_abs_params(input_dev, ABS_X, 0, 53000, 4, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, 2250, 4, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, 1024, 0, 0);
+ if (!strlen(acecad->name))
+ snprintf(acecad->name, sizeof(acecad->name),
+ "USB Acecad 302 Tablet %04x:%04x",
+ le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct));
+ break;
}
- input_dev->absfuzz[ABS_X] = 4;
- input_dev->absfuzz[ABS_Y] = 4;
-
usb_fill_int_urb(acecad->irq, dev, pipe,
acecad->data, maxp > 8 ? 8 : maxp,
usb_acecad_irq, acecad, endpoint->bInterval);
@@ -241,7 +235,7 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_
return 0;
- fail2: usb_buffer_free(dev, 8, acecad->data, acecad->data_dma);
+ fail2: usb_free_coherent(dev, 8, acecad->data, acecad->data_dma);
fail1: input_free_device(input_dev);
kfree(acecad);
return err;
@@ -252,13 +246,11 @@ static void usb_acecad_disconnect(struct usb_interface *intf)
struct usb_acecad *acecad = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
- if (acecad) {
- usb_kill_urb(acecad->irq);
- input_unregister_device(acecad->input);
- usb_free_urb(acecad->irq);
- usb_buffer_free(interface_to_usbdev(intf), 10, acecad->data, acecad->data_dma);
- kfree(acecad);
- }
+
+ input_unregister_device(acecad->input);
+ usb_free_urb(acecad->irq);
+ usb_free_coherent(acecad->usbdev, 8, acecad->data, acecad->data_dma);
+ kfree(acecad);
}
static struct usb_device_id usb_acecad_id_table [] = {
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 4be039d7dca..51b80b08d46 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1711,8 +1711,8 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
goto fail1;
}
- aiptek->data = usb_buffer_alloc(usbdev, AIPTEK_PACKET_LENGTH,
- GFP_ATOMIC, &aiptek->data_dma);
+ aiptek->data = usb_alloc_coherent(usbdev, AIPTEK_PACKET_LENGTH,
+ GFP_ATOMIC, &aiptek->data_dma);
if (!aiptek->data) {
dev_warn(&intf->dev, "cannot allocate usb buffer\n");
goto fail1;
@@ -1884,8 +1884,8 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
fail4: sysfs_remove_group(&intf->dev.kobj, &aiptek_attribute_group);
fail3: usb_free_urb(aiptek->urb);
- fail2: usb_buffer_free(usbdev, AIPTEK_PACKET_LENGTH, aiptek->data,
- aiptek->data_dma);
+ fail2: usb_free_coherent(usbdev, AIPTEK_PACKET_LENGTH, aiptek->data,
+ aiptek->data_dma);
fail1: usb_set_intfdata(intf, NULL);
input_free_device(inputdev);
kfree(aiptek);
@@ -1909,9 +1909,9 @@ static void aiptek_disconnect(struct usb_interface *intf)
input_unregister_device(aiptek->inputdev);
sysfs_remove_group(&intf->dev.kobj, &aiptek_attribute_group);
usb_free_urb(aiptek->urb);
- usb_buffer_free(interface_to_usbdev(intf),
- AIPTEK_PACKET_LENGTH,
- aiptek->data, aiptek->data_dma);
+ usb_free_coherent(interface_to_usbdev(intf),
+ AIPTEK_PACKET_LENGTH,
+ aiptek->data, aiptek->data_dma);
kfree(aiptek);
}
}
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 866a9ee1af1..8ea6afe2e99 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -850,8 +850,8 @@ static int gtco_probe(struct usb_interface *usbinterface,
gtco->usbdev = usb_get_dev(interface_to_usbdev(usbinterface));
/* Allocate some data for incoming reports */
- gtco->buffer = usb_buffer_alloc(gtco->usbdev, REPORT_MAX_SIZE,
- GFP_KERNEL, &gtco->buf_dma);
+ gtco->buffer = usb_alloc_coherent(gtco->usbdev, REPORT_MAX_SIZE,
+ GFP_KERNEL, &gtco->buf_dma);
if (!gtco->buffer) {
err("No more memory for us buffers");
error = -ENOMEM;
@@ -982,8 +982,8 @@ static int gtco_probe(struct usb_interface *usbinterface,
err_free_urb:
usb_free_urb(gtco->urbinfo);
err_free_buf:
- usb_buffer_free(gtco->usbdev, REPORT_MAX_SIZE,
- gtco->buffer, gtco->buf_dma);
+ usb_free_coherent(gtco->usbdev, REPORT_MAX_SIZE,
+ gtco->buffer, gtco->buf_dma);
err_free_devs:
input_free_device(input_dev);
kfree(gtco);
@@ -1005,8 +1005,8 @@ static void gtco_disconnect(struct usb_interface *interface)
input_unregister_device(gtco->inputdevice);
usb_kill_urb(gtco->urbinfo);
usb_free_urb(gtco->urbinfo);
- usb_buffer_free(gtco->usbdev, REPORT_MAX_SIZE,
- gtco->buffer, gtco->buf_dma);
+ usb_free_coherent(gtco->usbdev, REPORT_MAX_SIZE,
+ gtco->buffer, gtco->buf_dma);
kfree(gtco);
}
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index 6682b17bf84..290f4e57b58 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -34,10 +34,6 @@ struct kbtab {
struct input_dev *dev;
struct usb_device *usbdev;
struct urb *irq;
- int x, y;
- int button;
- int pressure;
- __u32 serial[2];
char phys[32];
};
@@ -46,6 +42,7 @@ static void kbtab_irq(struct urb *urb)
struct kbtab *kbtab = urb->context;
unsigned char *data = kbtab->data;
struct input_dev *dev = kbtab->dev;
+ int pressure;
int retval;
switch (urb->status) {
@@ -63,31 +60,27 @@ static void kbtab_irq(struct urb *urb)
goto exit;
}
- kbtab->x = get_unaligned_le16(&data[1]);
- kbtab->y = get_unaligned_le16(&data[3]);
-
- kbtab->pressure = (data[5]);
input_report_key(dev, BTN_TOOL_PEN, 1);
- input_report_abs(dev, ABS_X, kbtab->x);
- input_report_abs(dev, ABS_Y, kbtab->y);
+ input_report_abs(dev, ABS_X, get_unaligned_le16(&data[1]));
+ input_report_abs(dev, ABS_Y, get_unaligned_le16(&data[3]));
/*input_report_key(dev, BTN_TOUCH , data[0] & 0x01);*/
input_report_key(dev, BTN_RIGHT, data[0] & 0x02);
- if (-1 == kb_pressure_click) {
- input_report_abs(dev, ABS_PRESSURE, kbtab->pressure);
- } else {
- input_report_key(dev, BTN_LEFT, (kbtab->pressure > kb_pressure_click) ? 1 : 0);
- };
+ pressure = data[5];
+ if (kb_pressure_click == -1)
+ input_report_abs(dev, ABS_PRESSURE, pressure);
+ else
+ input_report_key(dev, BTN_LEFT, pressure > kb_pressure_click ? 1 : 0);
input_sync(dev);
exit:
- retval = usb_submit_urb (urb, GFP_ATOMIC);
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
- err ("%s - usb_submit_urb failed with result %d",
+ err("%s - usb_submit_urb failed with result %d",
__func__, retval);
}
@@ -129,7 +122,7 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
if (!kbtab || !input_dev)
goto fail1;
- kbtab->data = usb_buffer_alloc(dev, 8, GFP_KERNEL, &kbtab->data_dma);
+ kbtab->data = usb_alloc_coherent(dev, 8, GFP_KERNEL, &kbtab->data_dma);
if (!kbtab->data)
goto fail1;
@@ -153,13 +146,11 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
input_dev->open = kbtab_open;
input_dev->close = kbtab_close;
- input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) |
- BIT_MASK(EV_MSC);
- input_dev->keybit[BIT_WORD(BTN_LEFT)] |= BIT_MASK(BTN_LEFT) |
- BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE);
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_PEN) |
- BIT_MASK(BTN_TOUCH);
- input_dev->mscbit[0] |= BIT_MASK(MSC_SERIAL);
+ input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ input_dev->keybit[BIT_WORD(BTN_LEFT)] |=
+ BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT);
+ input_dev->keybit[BIT_WORD(BTN_DIGI)] |=
+ BIT_MASK(BTN_TOOL_PEN) | BIT_MASK(BTN_TOUCH);
input_set_abs_params(input_dev, ABS_X, 0, 0x2000, 4, 0);
input_set_abs_params(input_dev, ABS_Y, 0, 0x1750, 4, 0);
input_set_abs_params(input_dev, ABS_PRESSURE, 0, 0xff, 0, 0);
@@ -182,7 +173,7 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
return 0;
fail3: usb_free_urb(kbtab->irq);
- fail2: usb_buffer_free(dev, 10, kbtab->data, kbtab->data_dma);
+ fail2: usb_free_coherent(dev, 8, kbtab->data, kbtab->data_dma);
fail1: input_free_device(input_dev);
kfree(kbtab);
return error;
@@ -193,13 +184,11 @@ static void kbtab_disconnect(struct usb_interface *intf)
struct kbtab *kbtab = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
- if (kbtab) {
- usb_kill_urb(kbtab->irq);
- input_unregister_device(kbtab->dev);
- usb_free_urb(kbtab->irq);
- usb_buffer_free(interface_to_usbdev(intf), 10, kbtab->data, kbtab->data_dma);
- kfree(kbtab);
- }
+
+ input_unregister_device(kbtab->dev);
+ usb_free_urb(kbtab->irq);
+ usb_free_coherent(kbtab->usbdev, 8, kbtab->data, kbtab->data_dma);
+ kfree(kbtab);
}
static struct usb_driver kbtab_driver = {
diff --git a/drivers/input/tablet/wacom.h b/drivers/input/tablet/wacom.h
index 8fef1b689c6..284dfaab6b8 100644
--- a/drivers/input/tablet/wacom.h
+++ b/drivers/input/tablet/wacom.h
@@ -106,44 +106,18 @@ MODULE_LICENSE(DRIVER_LICENSE);
struct wacom {
dma_addr_t data_dma;
- struct input_dev *dev;
struct usb_device *usbdev;
struct usb_interface *intf;
struct urb *irq;
- struct wacom_wac *wacom_wac;
+ struct wacom_wac wacom_wac;
struct mutex lock;
- unsigned int open:1;
+ bool open;
char phys[32];
};
-struct wacom_combo {
- struct wacom *wacom;
- struct urb *urb;
-};
-
extern const struct usb_device_id wacom_ids[];
-extern int wacom_wac_irq(struct wacom_wac * wacom_wac, void * wcombo);
-extern void wacom_report_abs(void *wcombo, unsigned int abs_type, int abs_data);
-extern void wacom_report_rel(void *wcombo, unsigned int rel_type, int rel_data);
-extern void wacom_report_key(void *wcombo, unsigned int key_type, int key_data);
-extern void wacom_input_event(void *wcombo, unsigned int type, unsigned int code, int value);
-extern void wacom_input_sync(void *wcombo);
-extern void wacom_init_input_dev(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_g4(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_g(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_i3s(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_i3(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_i(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_i4s(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_i4(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_pl(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_pt(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_tpc(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_tpc2fg(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_mo(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_bee(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern __u16 wacom_le16_to_cpu(unsigned char *data);
-extern __u16 wacom_be16_to_cpu(unsigned char *data);
-
+void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len);
+void wacom_setup_input_capabilities(struct input_dev *input_dev,
+ struct wacom_wac *wacom_wac);
#endif
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index f46502589e4..2dc0c07c046 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -11,8 +11,8 @@
* (at your option) any later version.
*/
-#include "wacom.h"
#include "wacom_wac.h"
+#include "wacom.h"
/* defines to get HID report descriptor */
#define HID_DEVICET_HID (USB_TYPE_CLASS | 0x01)
@@ -70,15 +70,9 @@ static int usb_set_report(struct usb_interface *intf, unsigned char type,
buf, size, 1000);
}
-static struct input_dev * get_input_dev(struct wacom_combo *wcombo)
-{
- return wcombo->wacom->dev;
-}
-
static void wacom_sys_irq(struct urb *urb)
{
struct wacom *wacom = urb->context;
- struct wacom_combo wcombo;
int retval;
switch (urb->status) {
@@ -96,59 +90,16 @@ static void wacom_sys_irq(struct urb *urb)
goto exit;
}
- wcombo.wacom = wacom;
- wcombo.urb = urb;
-
- if (wacom_wac_irq(wacom->wacom_wac, (void *)&wcombo))
- input_sync(get_input_dev(&wcombo));
+ wacom_wac_irq(&wacom->wacom_wac, urb->actual_length);
exit:
usb_mark_last_busy(wacom->usbdev);
- retval = usb_submit_urb (urb, GFP_ATOMIC);
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
err ("%s - usb_submit_urb failed with result %d",
__func__, retval);
}
-void wacom_report_key(void *wcombo, unsigned int key_type, int key_data)
-{
- input_report_key(get_input_dev((struct wacom_combo *)wcombo), key_type, key_data);
-}
-
-void wacom_report_abs(void *wcombo, unsigned int abs_type, int abs_data)
-{
- input_report_abs(get_input_dev((struct wacom_combo *)wcombo), abs_type, abs_data);
-}
-
-void wacom_report_rel(void *wcombo, unsigned int rel_type, int rel_data)
-{
- input_report_rel(get_input_dev((struct wacom_combo *)wcombo), rel_type, rel_data);
-}
-
-void wacom_input_event(void *wcombo, unsigned int type, unsigned int code, int value)
-{
- input_event(get_input_dev((struct wacom_combo *)wcombo), type, code, value);
-}
-
-__u16 wacom_be16_to_cpu(unsigned char *data)
-{
- __u16 value;
- value = be16_to_cpu(*(__be16 *) data);
- return value;
-}
-
-__u16 wacom_le16_to_cpu(unsigned char *data)
-{
- __u16 value;
- value = le16_to_cpu(*(__le16 *) data);
- return value;
-}
-
-void wacom_input_sync(void *wcombo)
-{
- input_sync(get_input_dev((struct wacom_combo *)wcombo));
-}
-
static int wacom_open(struct input_dev *dev)
{
struct wacom *wacom = input_get_drvdata(dev);
@@ -168,7 +119,7 @@ static int wacom_open(struct input_dev *dev)
return -EIO;
}
- wacom->open = 1;
+ wacom->open = true;
wacom->intf->needs_remote_wakeup = 1;
mutex_unlock(&wacom->lock);
@@ -181,128 +132,11 @@ static void wacom_close(struct input_dev *dev)
mutex_lock(&wacom->lock);
usb_kill_urb(wacom->irq);
- wacom->open = 0;
+ wacom->open = false;
wacom->intf->needs_remote_wakeup = 0;
mutex_unlock(&wacom->lock);
}
-void input_dev_mo(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_1) |
- BIT_MASK(BTN_5);
- input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
-}
-
-void input_dev_g4(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->evbit[0] |= BIT_MASK(EV_MSC);
- input_dev->mscbit[0] |= BIT_MASK(MSC_SERIAL);
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_FINGER);
- input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_0) |
- BIT_MASK(BTN_4);
-}
-
-void input_dev_g(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->evbit[0] |= BIT_MASK(EV_REL);
- input_dev->relbit[0] |= BIT_MASK(REL_WHEEL);
- input_dev->keybit[BIT_WORD(BTN_MOUSE)] |= BIT_MASK(BTN_LEFT) |
- BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE);
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_RUBBER) |
- BIT_MASK(BTN_TOOL_PEN) | BIT_MASK(BTN_STYLUS) |
- BIT_MASK(BTN_TOOL_MOUSE) | BIT_MASK(BTN_STYLUS2);
- input_set_abs_params(input_dev, ABS_DISTANCE,
- 0, wacom_wac->features.distance_max, 0, 0);
-}
-
-void input_dev_i3s(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_FINGER);
- input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_0) |
- BIT_MASK(BTN_1) | BIT_MASK(BTN_2) | BIT_MASK(BTN_3);
- input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
- input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
-}
-
-void input_dev_i3(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_4) |
- BIT_MASK(BTN_5) | BIT_MASK(BTN_6) | BIT_MASK(BTN_7);
- input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
-}
-
-void input_dev_i4s(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_FINGER);
- input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_0) | BIT_MASK(BTN_1) | BIT_MASK(BTN_2) | BIT_MASK(BTN_3);
- input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_4) | BIT_MASK(BTN_5) | BIT_MASK(BTN_6);
- input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
-}
-
-void input_dev_i4(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_7) | BIT_MASK(BTN_8);
-}
-
-void input_dev_bee(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_8) | BIT_MASK(BTN_9);
-}
-
-void input_dev_i(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->evbit[0] |= BIT_MASK(EV_MSC) | BIT_MASK(EV_REL);
- input_dev->mscbit[0] |= BIT_MASK(MSC_SERIAL);
- input_dev->relbit[0] |= BIT_MASK(REL_WHEEL);
- input_dev->keybit[BIT_WORD(BTN_MOUSE)] |= BIT_MASK(BTN_LEFT) |
- BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE) |
- BIT_MASK(BTN_SIDE) | BIT_MASK(BTN_EXTRA);
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_RUBBER) |
- BIT_MASK(BTN_TOOL_PEN) | BIT_MASK(BTN_STYLUS) |
- BIT_MASK(BTN_TOOL_MOUSE) | BIT_MASK(BTN_TOOL_BRUSH) |
- BIT_MASK(BTN_TOOL_PENCIL) | BIT_MASK(BTN_TOOL_AIRBRUSH) |
- BIT_MASK(BTN_TOOL_LENS) | BIT_MASK(BTN_STYLUS2);
- input_set_abs_params(input_dev, ABS_DISTANCE,
- 0, wacom_wac->features.distance_max, 0, 0);
- input_set_abs_params(input_dev, ABS_WHEEL, 0, 1023, 0, 0);
- input_set_abs_params(input_dev, ABS_TILT_X, 0, 127, 0, 0);
- input_set_abs_params(input_dev, ABS_TILT_Y, 0, 127, 0, 0);
- input_set_abs_params(input_dev, ABS_RZ, -900, 899, 0, 0);
- input_set_abs_params(input_dev, ABS_THROTTLE, -1023, 1023, 0, 0);
-}
-
-void input_dev_pl(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_PEN) |
- BIT_MASK(BTN_STYLUS) | BIT_MASK(BTN_STYLUS2);
-}
-
-void input_dev_pt(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_RUBBER);
-}
-
-void input_dev_tpc(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- struct wacom_features *features = &wacom_wac->features;
-
- if (features->device_type == BTN_TOOL_DOUBLETAP ||
- features->device_type == BTN_TOOL_TRIPLETAP) {
- input_set_abs_params(input_dev, ABS_RX, 0, features->x_phy, 0, 0);
- input_set_abs_params(input_dev, ABS_RY, 0, features->y_phy, 0, 0);
- __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
- }
-}
-
-void input_dev_tpc2fg(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- if (wacom_wac->features.device_type == BTN_TOOL_TRIPLETAP) {
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_TRIPLETAP);
- input_dev->evbit[0] |= BIT_MASK(EV_MSC);
- input_dev->mscbit[0] |= BIT_MASK(MSC_SERIAL);
- }
-}
-
static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hid_desc,
struct wacom_features *features)
{
@@ -362,9 +196,9 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
features->device_type = BTN_TOOL_TRIPLETAP;
}
features->x_max =
- wacom_le16_to_cpu(&report[i + 3]);
+ get_unaligned_le16(&report[i + 3]);
features->x_phy =
- wacom_le16_to_cpu(&report[i + 6]);
+ get_unaligned_le16(&report[i + 6]);
features->unit = report[i + 9];
features->unitExpo = report[i + 11];
i += 12;
@@ -374,7 +208,7 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
features->pktlen = WACOM_PKGLEN_GRAPHIRE;
features->device_type = BTN_TOOL_PEN;
features->x_max =
- wacom_le16_to_cpu(&report[i + 3]);
+ get_unaligned_le16(&report[i + 3]);
i += 4;
}
} else if (usage == WCM_DIGITIZER) {
@@ -396,15 +230,15 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
features->pktlen = WACOM_PKGLEN_TPC2FG;
features->device_type = BTN_TOOL_TRIPLETAP;
features->y_max =
- wacom_le16_to_cpu(&report[i + 3]);
+ get_unaligned_le16(&report[i + 3]);
features->y_phy =
- wacom_le16_to_cpu(&report[i + 6]);
+ get_unaligned_le16(&report[i + 6]);
i += 7;
} else {
features->y_max =
features->x_max;
features->y_phy =
- wacom_le16_to_cpu(&report[i + 3]);
+ get_unaligned_le16(&report[i + 3]);
i += 4;
}
} else if (pen) {
@@ -413,7 +247,7 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
features->pktlen = WACOM_PKGLEN_GRAPHIRE;
features->device_type = BTN_TOOL_PEN;
features->y_max =
- wacom_le16_to_cpu(&report[i + 3]);
+ get_unaligned_le16(&report[i + 3]);
i += 4;
}
}
@@ -432,7 +266,7 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
case HID_USAGE_UNDEFINED:
if (usage == WCM_DESKTOP && finger) /* capacity */
features->pressure_max =
- wacom_le16_to_cpu(&report[i + 3]);
+ get_unaligned_le16(&report[i + 3]);
i += 4;
break;
}
@@ -528,6 +362,81 @@ static int wacom_retrieve_hid_descriptor(struct usb_interface *intf,
return error;
}
+struct wacom_usbdev_data {
+ struct list_head list;
+ struct kref kref;
+ struct usb_device *dev;
+ struct wacom_shared shared;
+};
+
+static LIST_HEAD(wacom_udev_list);
+static DEFINE_MUTEX(wacom_udev_list_lock);
+
+static struct wacom_usbdev_data *wacom_get_usbdev_data(struct usb_device *dev)
+{
+ struct wacom_usbdev_data *data;
+
+ list_for_each_entry(data, &wacom_udev_list, list) {
+ if (data->dev == dev) {
+ kref_get(&data->kref);
+ return data;
+ }
+ }
+
+ return NULL;
+}
+
+static int wacom_add_shared_data(struct wacom_wac *wacom,
+ struct usb_device *dev)
+{
+ struct wacom_usbdev_data *data;
+ int retval = 0;
+
+ mutex_lock(&wacom_udev_list_lock);
+
+ data = wacom_get_usbdev_data(dev);
+ if (!data) {
+ data = kzalloc(sizeof(struct wacom_usbdev_data), GFP_KERNEL);
+ if (!data) {
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ kref_init(&data->kref);
+ data->dev = dev;
+ list_add_tail(&data->list, &wacom_udev_list);
+ }
+
+ wacom->shared = &data->shared;
+
+out:
+ mutex_unlock(&wacom_udev_list_lock);
+ return retval;
+}
+
+static void wacom_release_shared_data(struct kref *kref)
+{
+ struct wacom_usbdev_data *data =
+ container_of(kref, struct wacom_usbdev_data, kref);
+
+ mutex_lock(&wacom_udev_list_lock);
+ list_del(&data->list);
+ mutex_unlock(&wacom_udev_list_lock);
+
+ kfree(data);
+}
+
+static void wacom_remove_shared_data(struct wacom_wac *wacom)
+{
+ struct wacom_usbdev_data *data;
+
+ if (wacom->shared) {
+ data = container_of(wacom->shared, struct wacom_usbdev_data, shared);
+ kref_put(&data->kref, wacom_release_shared_data);
+ wacom->shared = NULL;
+ }
+}
+
static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *dev = interface_to_usbdev(intf);
@@ -542,13 +451,13 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
return -EINVAL;
wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL);
- wacom_wac = kzalloc(sizeof(struct wacom_wac), GFP_KERNEL);
input_dev = input_allocate_device();
- if (!wacom || !input_dev || !wacom_wac) {
+ if (!wacom || !input_dev) {
error = -ENOMEM;
goto fail1;
}
+ wacom_wac = &wacom->wacom_wac;
wacom_wac->features = *((struct wacom_features *)id->driver_info);
features = &wacom_wac->features;
if (features->pktlen > WACOM_PKGLEN_MAX) {
@@ -556,8 +465,8 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
goto fail1;
}
- wacom_wac->data = usb_buffer_alloc(dev, WACOM_PKGLEN_MAX,
- GFP_KERNEL, &wacom->data_dma);
+ wacom_wac->data = usb_alloc_coherent(dev, WACOM_PKGLEN_MAX,
+ GFP_KERNEL, &wacom->data_dma);
if (!wacom_wac->data) {
error = -ENOMEM;
goto fail1;
@@ -570,20 +479,12 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
}
wacom->usbdev = dev;
- wacom->dev = input_dev;
wacom->intf = intf;
mutex_init(&wacom->lock);
usb_make_path(dev, wacom->phys, sizeof(wacom->phys));
strlcat(wacom->phys, "/input0", sizeof(wacom->phys));
- usb_to_input_id(dev, &input_dev->id);
-
- input_dev->dev.parent = &intf->dev;
-
- input_set_drvdata(input_dev, wacom);
-
- input_dev->open = wacom_open;
- input_dev->close = wacom_close;
+ wacom_wac->input = input_dev;
endpoint = &intf->cur_altsetting->endpoint[0].desc;
@@ -600,20 +501,21 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
features->device_type == BTN_TOOL_PEN ?
" Pen" : " Finger",
sizeof(wacom_wac->name));
+
+ error = wacom_add_shared_data(wacom_wac, dev);
+ if (error)
+ goto fail3;
}
input_dev->name = wacom_wac->name;
- wacom->wacom_wac = wacom_wac;
-
- input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOUCH);
-
- input_set_abs_params(input_dev, ABS_X, 0, features->x_max, 4, 0);
- input_set_abs_params(input_dev, ABS_Y, 0, features->y_max, 4, 0);
- input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, 0, 0);
- input_dev->absbit[BIT_WORD(ABS_MISC)] |= BIT_MASK(ABS_MISC);
+ input_dev->name = wacom_wac->name;
+ input_dev->dev.parent = &intf->dev;
+ input_dev->open = wacom_open;
+ input_dev->close = wacom_close;
+ usb_to_input_id(dev, &input_dev->id);
+ input_set_drvdata(input_dev, wacom);
- wacom_init_input_dev(input_dev, wacom_wac);
+ wacom_setup_input_capabilities(input_dev, wacom_wac);
usb_fill_int_urb(wacom->irq, dev,
usb_rcvintpipe(dev, endpoint->bEndpointAddress),
@@ -622,9 +524,9 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
wacom->irq->transfer_dma = wacom->data_dma;
wacom->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
- error = input_register_device(wacom->dev);
+ error = input_register_device(input_dev);
if (error)
- goto fail3;
+ goto fail4;
/* Note that if query fails it is not a hard failure */
wacom_query_tablet_data(intf, features);
@@ -632,11 +534,11 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
usb_set_intfdata(intf, wacom);
return 0;
+ fail4: wacom_remove_shared_data(wacom_wac);
fail3: usb_free_urb(wacom->irq);
- fail2: usb_buffer_free(dev, WACOM_PKGLEN_MAX, wacom_wac->data, wacom->data_dma);
+ fail2: usb_free_coherent(dev, WACOM_PKGLEN_MAX, wacom_wac->data, wacom->data_dma);
fail1: input_free_device(input_dev);
kfree(wacom);
- kfree(wacom_wac);
return error;
}
@@ -647,11 +549,11 @@ static void wacom_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
usb_kill_urb(wacom->irq);
- input_unregister_device(wacom->dev);
+ input_unregister_device(wacom->wacom_wac.input);
usb_free_urb(wacom->irq);
- usb_buffer_free(interface_to_usbdev(intf), WACOM_PKGLEN_MAX,
- wacom->wacom_wac->data, wacom->data_dma);
- kfree(wacom->wacom_wac);
+ usb_free_coherent(interface_to_usbdev(intf), WACOM_PKGLEN_MAX,
+ wacom->wacom_wac.data, wacom->data_dma);
+ wacom_remove_shared_data(&wacom->wacom_wac);
kfree(wacom);
}
@@ -669,7 +571,7 @@ static int wacom_suspend(struct usb_interface *intf, pm_message_t message)
static int wacom_resume(struct usb_interface *intf)
{
struct wacom *wacom = usb_get_intfdata(intf);
- struct wacom_features *features = &wacom->wacom_wac->features;
+ struct wacom_features *features = &wacom->wacom_wac.features;
int rv;
mutex_lock(&wacom->lock);
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 4a852d815c6..847fd0135bc 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -11,52 +11,58 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
-#include "wacom.h"
+
#include "wacom_wac.h"
+#include "wacom.h"
-static int wacom_penpartner_irq(struct wacom_wac *wacom, void *wcombo)
+static int wacom_penpartner_irq(struct wacom_wac *wacom)
{
unsigned char *data = wacom->data;
+ struct input_dev *input = wacom->input;
switch (data[0]) {
- case 1:
- if (data[5] & 0x80) {
- wacom->tool[0] = (data[5] & 0x20) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
- wacom->id[0] = (data[5] & 0x20) ? ERASER_DEVICE_ID : STYLUS_DEVICE_ID;
- wacom_report_key(wcombo, wacom->tool[0], 1);
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); /* report tool id */
- wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[1]));
- wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[3]));
- wacom_report_abs(wcombo, ABS_PRESSURE, (signed char)data[6] + 127);
- wacom_report_key(wcombo, BTN_TOUCH, ((signed char)data[6] > -127));
- wacom_report_key(wcombo, BTN_STYLUS, (data[5] & 0x40));
- } else {
- wacom_report_key(wcombo, wacom->tool[0], 0);
- wacom_report_abs(wcombo, ABS_MISC, 0); /* report tool id */
- wacom_report_abs(wcombo, ABS_PRESSURE, -1);
- wacom_report_key(wcombo, BTN_TOUCH, 0);
- }
- break;
- case 2:
- wacom_report_key(wcombo, BTN_TOOL_PEN, 1);
- wacom_report_abs(wcombo, ABS_MISC, STYLUS_DEVICE_ID); /* report tool id */
- wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[1]));
- wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[3]));
- wacom_report_abs(wcombo, ABS_PRESSURE, (signed char)data[6] + 127);
- wacom_report_key(wcombo, BTN_TOUCH, ((signed char)data[6] > -80) && !(data[5] & 0x20));
- wacom_report_key(wcombo, BTN_STYLUS, (data[5] & 0x40));
- break;
- default:
- printk(KERN_INFO "wacom_penpartner_irq: received unknown report #%d\n", data[0]);
- return 0;
+ case 1:
+ if (data[5] & 0x80) {
+ wacom->tool[0] = (data[5] & 0x20) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
+ wacom->id[0] = (data[5] & 0x20) ? ERASER_DEVICE_ID : STYLUS_DEVICE_ID;
+ input_report_key(input, wacom->tool[0], 1);
+ input_report_abs(input, ABS_MISC, wacom->id[0]); /* report tool id */
+ input_report_abs(input, ABS_X, get_unaligned_le16(&data[1]));
+ input_report_abs(input, ABS_Y, get_unaligned_le16(&data[3]));
+ input_report_abs(input, ABS_PRESSURE, (signed char)data[6] + 127);
+ input_report_key(input, BTN_TOUCH, ((signed char)data[6] > -127));
+ input_report_key(input, BTN_STYLUS, (data[5] & 0x40));
+ } else {
+ input_report_key(input, wacom->tool[0], 0);
+ input_report_abs(input, ABS_MISC, 0); /* report tool id */
+ input_report_abs(input, ABS_PRESSURE, -1);
+ input_report_key(input, BTN_TOUCH, 0);
+ }
+ break;
+
+ case 2:
+ input_report_key(input, BTN_TOOL_PEN, 1);
+ input_report_abs(input, ABS_MISC, STYLUS_DEVICE_ID); /* report tool id */
+ input_report_abs(input, ABS_X, get_unaligned_le16(&data[1]));
+ input_report_abs(input, ABS_Y, get_unaligned_le16(&data[3]));
+ input_report_abs(input, ABS_PRESSURE, (signed char)data[6] + 127);
+ input_report_key(input, BTN_TOUCH, ((signed char)data[6] > -80) && !(data[5] & 0x20));
+ input_report_key(input, BTN_STYLUS, (data[5] & 0x40));
+ break;
+
+ default:
+ printk(KERN_INFO "wacom_penpartner_irq: received unknown report #%d\n", data[0]);
+ return 0;
}
+
return 1;
}
-static int wacom_pl_irq(struct wacom_wac *wacom, void *wcombo)
+static int wacom_pl_irq(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
unsigned char *data = wacom->data;
+ struct input_dev *input = wacom->input;
int prox, pressure;
if (data[0] != WACOM_REPORT_PENABLED) {
@@ -90,8 +96,8 @@ static int wacom_pl_irq(struct wacom_wac *wacom, void *wcombo)
/* was entered with stylus2 pressed */
if (wacom->tool[1] == BTN_TOOL_RUBBER && !(data[4] & 0x20)) {
/* report out proximity for previous tool */
- wacom_report_key(wcombo, wacom->tool[1], 0);
- wacom_input_sync(wcombo);
+ input_report_key(input, wacom->tool[1], 0);
+ input_sync(input);
wacom->tool[1] = BTN_TOOL_PEN;
return 0;
}
@@ -101,32 +107,33 @@ static int wacom_pl_irq(struct wacom_wac *wacom, void *wcombo)
wacom->tool[1] = BTN_TOOL_PEN;
wacom->id[0] = STYLUS_DEVICE_ID;
}
- wacom_report_key(wcombo, wacom->tool[1], prox); /* report in proximity for tool */
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); /* report tool id */
- wacom_report_abs(wcombo, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
- wacom_report_abs(wcombo, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
- wacom_report_abs(wcombo, ABS_PRESSURE, pressure);
-
- wacom_report_key(wcombo, BTN_TOUCH, data[4] & 0x08);
- wacom_report_key(wcombo, BTN_STYLUS, data[4] & 0x10);
+ input_report_key(input, wacom->tool[1], prox); /* report in proximity for tool */
+ input_report_abs(input, ABS_MISC, wacom->id[0]); /* report tool id */
+ input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
+ input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
+ input_report_abs(input, ABS_PRESSURE, pressure);
+
+ input_report_key(input, BTN_TOUCH, data[4] & 0x08);
+ input_report_key(input, BTN_STYLUS, data[4] & 0x10);
/* Only allow the stylus2 button to be reported for the pen tool. */
- wacom_report_key(wcombo, BTN_STYLUS2, (wacom->tool[1] == BTN_TOOL_PEN) && (data[4] & 0x20));
+ input_report_key(input, BTN_STYLUS2, (wacom->tool[1] == BTN_TOOL_PEN) && (data[4] & 0x20));
} else {
/* report proximity-out of a (valid) tool */
if (wacom->tool[1] != BTN_TOOL_RUBBER) {
/* Unknown tool selected default to pen tool */
wacom->tool[1] = BTN_TOOL_PEN;
}
- wacom_report_key(wcombo, wacom->tool[1], prox);
+ input_report_key(input, wacom->tool[1], prox);
}
wacom->tool[0] = prox; /* Save proximity state */
return 1;
}
-static int wacom_ptu_irq(struct wacom_wac *wacom, void *wcombo)
+static int wacom_ptu_irq(struct wacom_wac *wacom)
{
unsigned char *data = wacom->data;
+ struct input_dev *input = wacom->input;
if (data[0] != WACOM_REPORT_PENABLED) {
printk(KERN_INFO "wacom_ptu_irq: received unknown report #%d\n", data[0]);
@@ -134,40 +141,41 @@ static int wacom_ptu_irq(struct wacom_wac *wacom, void *wcombo)
}
if (data[1] & 0x04) {
- wacom_report_key(wcombo, BTN_TOOL_RUBBER, data[1] & 0x20);
- wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x08);
+ input_report_key(input, BTN_TOOL_RUBBER, data[1] & 0x20);
+ input_report_key(input, BTN_TOUCH, data[1] & 0x08);
wacom->id[0] = ERASER_DEVICE_ID;
} else {
- wacom_report_key(wcombo, BTN_TOOL_PEN, data[1] & 0x20);
- wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x01);
+ input_report_key(input, BTN_TOOL_PEN, data[1] & 0x20);
+ input_report_key(input, BTN_TOUCH, data[1] & 0x01);
wacom->id[0] = STYLUS_DEVICE_ID;
}
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); /* report tool id */
- wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2]));
- wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4]));
- wacom_report_abs(wcombo, ABS_PRESSURE, wacom_le16_to_cpu(&data[6]));
- wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02);
- wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x10);
+ input_report_abs(input, ABS_MISC, wacom->id[0]); /* report tool id */
+ input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
+ input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4]));
+ input_report_abs(input, ABS_PRESSURE, le16_to_cpup((__le16 *)&data[6]));
+ input_report_key(input, BTN_STYLUS, data[1] & 0x02);
+ input_report_key(input, BTN_STYLUS2, data[1] & 0x10);
return 1;
}
-static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
+static int wacom_graphire_irq(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
unsigned char *data = wacom->data;
- int x, y, rw;
- static int penData = 0;
+ struct input_dev *input = wacom->input;
+ int prox;
+ int rw = 0;
+ int retval = 0;
if (data[0] != WACOM_REPORT_PENABLED) {
dbg("wacom_graphire_irq: received unknown report #%d", data[0]);
- return 0;
+ goto exit;
}
- if (data[1] & 0x80) {
- /* in prox and not a pad data */
- penData = 1;
-
- switch ((data[1] >> 5) & 3) {
+ prox = data[1] & 0x80;
+ if (prox || wacom->id[0]) {
+ if (prox) {
+ switch ((data[1] >> 5) & 3) {
case 0: /* Pen */
wacom->tool[0] = BTN_TOOL_PEN;
@@ -180,128 +188,89 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
break;
case 2: /* Mouse with wheel */
- wacom_report_key(wcombo, BTN_MIDDLE, data[1] & 0x04);
- if (features->type == WACOM_G4 || features->type == WACOM_MO) {
- rw = data[7] & 0x04 ? (data[7] & 0x03)-4 : (data[7] & 0x03);
- wacom_report_rel(wcombo, REL_WHEEL, -rw);
- } else
- wacom_report_rel(wcombo, REL_WHEEL, -(signed char) data[6]);
+ input_report_key(input, BTN_MIDDLE, data[1] & 0x04);
/* fall through */
case 3: /* Mouse without wheel */
wacom->tool[0] = BTN_TOOL_MOUSE;
wacom->id[0] = CURSOR_DEVICE_ID;
- wacom_report_key(wcombo, BTN_LEFT, data[1] & 0x01);
- wacom_report_key(wcombo, BTN_RIGHT, data[1] & 0x02);
- if (features->type == WACOM_G4 || features->type == WACOM_MO)
- wacom_report_abs(wcombo, ABS_DISTANCE, data[6] & 0x3f);
- else
- wacom_report_abs(wcombo, ABS_DISTANCE, data[7] & 0x3f);
break;
+ }
}
- x = wacom_le16_to_cpu(&data[2]);
- y = wacom_le16_to_cpu(&data[4]);
- wacom_report_abs(wcombo, ABS_X, x);
- wacom_report_abs(wcombo, ABS_Y, y);
+ input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
+ input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4]));
if (wacom->tool[0] != BTN_TOOL_MOUSE) {
- wacom_report_abs(wcombo, ABS_PRESSURE, data[6] | ((data[7] & 0x01) << 8));
- wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x01);
- wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02);
- wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x04);
- }
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); /* report tool id */
- wacom_report_key(wcombo, wacom->tool[0], 1);
- } else if (wacom->id[0]) {
- wacom_report_abs(wcombo, ABS_X, 0);
- wacom_report_abs(wcombo, ABS_Y, 0);
- if (wacom->tool[0] == BTN_TOOL_MOUSE) {
- wacom_report_key(wcombo, BTN_LEFT, 0);
- wacom_report_key(wcombo, BTN_RIGHT, 0);
- wacom_report_abs(wcombo, ABS_DISTANCE, 0);
+ input_report_abs(input, ABS_PRESSURE, data[6] | ((data[7] & 0x01) << 8));
+ input_report_key(input, BTN_TOUCH, data[1] & 0x01);
+ input_report_key(input, BTN_STYLUS, data[1] & 0x02);
+ input_report_key(input, BTN_STYLUS2, data[1] & 0x04);
} else {
- wacom_report_abs(wcombo, ABS_PRESSURE, 0);
- wacom_report_key(wcombo, BTN_TOUCH, 0);
- wacom_report_key(wcombo, BTN_STYLUS, 0);
- wacom_report_key(wcombo, BTN_STYLUS2, 0);
+ input_report_key(input, BTN_LEFT, data[1] & 0x01);
+ input_report_key(input, BTN_RIGHT, data[1] & 0x02);
+ if (features->type == WACOM_G4 ||
+ features->type == WACOM_MO) {
+ input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f);
+ rw = (signed)(data[7] & 0x04) - (data[7] & 0x03);
+ } else {
+ input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f);
+ rw = -(signed)data[6];
+ }
+ input_report_rel(input, REL_WHEEL, rw);
}
- wacom->id[0] = 0;
- wacom_report_abs(wcombo, ABS_MISC, 0); /* reset tool id */
- wacom_report_key(wcombo, wacom->tool[0], 0);
+
+ if (!prox)
+ wacom->id[0] = 0;
+ input_report_abs(input, ABS_MISC, wacom->id[0]); /* report tool id */
+ input_report_key(input, wacom->tool[0], prox);
+ input_sync(input); /* sync last event */
}
/* send pad data */
switch (features->type) {
- case WACOM_G4:
- if (data[7] & 0xf8) {
- if (penData) {
- wacom_input_sync(wcombo); /* sync last event */
- if (!wacom->id[0])
- penData = 0;
- }
+ case WACOM_G4:
+ prox = data[7] & 0xf8;
+ if (prox || wacom->id[1]) {
wacom->id[1] = PAD_DEVICE_ID;
- wacom_report_key(wcombo, BTN_0, (data[7] & 0x40));
- wacom_report_key(wcombo, BTN_4, (data[7] & 0x80));
+ input_report_key(input, BTN_0, (data[7] & 0x40));
+ input_report_key(input, BTN_4, (data[7] & 0x80));
rw = ((data[7] & 0x18) >> 3) - ((data[7] & 0x20) >> 3);
- wacom_report_rel(wcombo, REL_WHEEL, rw);
- wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0);
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]);
- wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
- } else if (wacom->id[1]) {
- if (penData) {
- wacom_input_sync(wcombo); /* sync last event */
- if (!wacom->id[0])
- penData = 0;
- }
- wacom->id[1] = 0;
- wacom_report_key(wcombo, BTN_0, (data[7] & 0x40));
- wacom_report_key(wcombo, BTN_4, (data[7] & 0x80));
- wacom_report_rel(wcombo, REL_WHEEL, 0);
- wacom_report_key(wcombo, BTN_TOOL_FINGER, 0);
- wacom_report_abs(wcombo, ABS_MISC, 0);
- wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
+ input_report_rel(input, REL_WHEEL, rw);
+ input_report_key(input, BTN_TOOL_FINGER, 0xf0);
+ if (!prox)
+ wacom->id[1] = 0;
+ input_report_abs(input, ABS_MISC, wacom->id[1]);
+ input_event(input, EV_MSC, MSC_SERIAL, 0xf0);
+ retval = 1;
}
break;
- case WACOM_MO:
- if ((data[7] & 0xf8) || (data[8] & 0xff)) {
- if (penData) {
- wacom_input_sync(wcombo); /* sync last event */
- if (!wacom->id[0])
- penData = 0;
- }
+
+ case WACOM_MO:
+ prox = (data[7] & 0xf8) || data[8];
+ if (prox || wacom->id[1]) {
wacom->id[1] = PAD_DEVICE_ID;
- wacom_report_key(wcombo, BTN_0, (data[7] & 0x08));
- wacom_report_key(wcombo, BTN_1, (data[7] & 0x20));
- wacom_report_key(wcombo, BTN_4, (data[7] & 0x10));
- wacom_report_key(wcombo, BTN_5, (data[7] & 0x40));
- wacom_report_abs(wcombo, ABS_WHEEL, (data[8] & 0x7f));
- wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0);
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]);
- wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
- } else if (wacom->id[1]) {
- if (penData) {
- wacom_input_sync(wcombo); /* sync last event */
- if (!wacom->id[0])
- penData = 0;
- }
- wacom->id[1] = 0;
- wacom_report_key(wcombo, BTN_0, (data[7] & 0x08));
- wacom_report_key(wcombo, BTN_1, (data[7] & 0x20));
- wacom_report_key(wcombo, BTN_4, (data[7] & 0x10));
- wacom_report_key(wcombo, BTN_5, (data[7] & 0x40));
- wacom_report_abs(wcombo, ABS_WHEEL, (data[8] & 0x7f));
- wacom_report_key(wcombo, BTN_TOOL_FINGER, 0);
- wacom_report_abs(wcombo, ABS_MISC, 0);
- wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
+ input_report_key(input, BTN_0, (data[7] & 0x08));
+ input_report_key(input, BTN_1, (data[7] & 0x20));
+ input_report_key(input, BTN_4, (data[7] & 0x10));
+ input_report_key(input, BTN_5, (data[7] & 0x40));
+ input_report_abs(input, ABS_WHEEL, (data[8] & 0x7f));
+ input_report_key(input, BTN_TOOL_FINGER, 0xf0);
+ if (!prox)
+ wacom->id[1] = 0;
+ input_report_abs(input, ABS_MISC, wacom->id[1]);
+ input_event(input, EV_MSC, MSC_SERIAL, 0xf0);
}
+ retval = 1;
break;
}
- return 1;
+exit:
+ return retval;
}
-static int wacom_intuos_inout(struct wacom_wac *wacom, void *wcombo)
+static int wacom_intuos_inout(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
unsigned char *data = wacom->data;
+ struct input_dev *input = wacom->input;
int idx = 0;
/* tool number */
@@ -316,64 +285,73 @@ static int wacom_intuos_inout(struct wacom_wac *wacom, void *wcombo)
(data[6] << 4) + (data[7] >> 4);
wacom->id[idx] = (data[2] << 4) | (data[3] >> 4);
+
switch (wacom->id[idx]) {
- case 0x812: /* Inking pen */
- case 0x801: /* Intuos3 Inking pen */
- case 0x20802: /* Intuos4 Classic Pen */
- case 0x012:
- wacom->tool[idx] = BTN_TOOL_PENCIL;
- break;
- case 0x822: /* Pen */
- case 0x842:
- case 0x852:
- case 0x823: /* Intuos3 Grip Pen */
- case 0x813: /* Intuos3 Classic Pen */
- case 0x885: /* Intuos3 Marker Pen */
- case 0x802: /* Intuos4 Grip Pen Eraser */
- case 0x804: /* Intuos4 Marker Pen */
- case 0x40802: /* Intuos4 Classic Pen */
- case 0x022:
- wacom->tool[idx] = BTN_TOOL_PEN;
- break;
- case 0x832: /* Stroke pen */
- case 0x032:
- wacom->tool[idx] = BTN_TOOL_BRUSH;
- break;
- case 0x007: /* Mouse 4D and 2D */
- case 0x09c:
- case 0x094:
- case 0x017: /* Intuos3 2D Mouse */
- case 0x806: /* Intuos4 Mouse */
- wacom->tool[idx] = BTN_TOOL_MOUSE;
- break;
- case 0x096: /* Lens cursor */
- case 0x097: /* Intuos3 Lens cursor */
- case 0x006: /* Intuos4 Lens cursor */
- wacom->tool[idx] = BTN_TOOL_LENS;
- break;
- case 0x82a: /* Eraser */
- case 0x85a:
- case 0x91a:
- case 0xd1a:
- case 0x0fa:
- case 0x82b: /* Intuos3 Grip Pen Eraser */
- case 0x81b: /* Intuos3 Classic Pen Eraser */
- case 0x91b: /* Intuos3 Airbrush Eraser */
- case 0x80c: /* Intuos4 Marker Pen Eraser */
- case 0x80a: /* Intuos4 Grip Pen Eraser */
- case 0x4080a: /* Intuos4 Classic Pen Eraser */
- case 0x90a: /* Intuos4 Airbrush Eraser */
- wacom->tool[idx] = BTN_TOOL_RUBBER;
- break;
- case 0xd12:
- case 0x912:
- case 0x112:
- case 0x913: /* Intuos3 Airbrush */
- case 0x902: /* Intuos4 Airbrush */
- wacom->tool[idx] = BTN_TOOL_AIRBRUSH;
- break;
- default: /* Unknown tool */
- wacom->tool[idx] = BTN_TOOL_PEN;
+ case 0x812: /* Inking pen */
+ case 0x801: /* Intuos3 Inking pen */
+ case 0x20802: /* Intuos4 Classic Pen */
+ case 0x012:
+ wacom->tool[idx] = BTN_TOOL_PENCIL;
+ break;
+
+ case 0x822: /* Pen */
+ case 0x842:
+ case 0x852:
+ case 0x823: /* Intuos3 Grip Pen */
+ case 0x813: /* Intuos3 Classic Pen */
+ case 0x885: /* Intuos3 Marker Pen */
+ case 0x802: /* Intuos4 Grip Pen Eraser */
+ case 0x804: /* Intuos4 Marker Pen */
+ case 0x40802: /* Intuos4 Classic Pen */
+ case 0x022:
+ wacom->tool[idx] = BTN_TOOL_PEN;
+ break;
+
+ case 0x832: /* Stroke pen */
+ case 0x032:
+ wacom->tool[idx] = BTN_TOOL_BRUSH;
+ break;
+
+ case 0x007: /* Mouse 4D and 2D */
+ case 0x09c:
+ case 0x094:
+ case 0x017: /* Intuos3 2D Mouse */
+ case 0x806: /* Intuos4 Mouse */
+ wacom->tool[idx] = BTN_TOOL_MOUSE;
+ break;
+
+ case 0x096: /* Lens cursor */
+ case 0x097: /* Intuos3 Lens cursor */
+ case 0x006: /* Intuos4 Lens cursor */
+ wacom->tool[idx] = BTN_TOOL_LENS;
+ break;
+
+ case 0x82a: /* Eraser */
+ case 0x85a:
+ case 0x91a:
+ case 0xd1a:
+ case 0x0fa:
+ case 0x82b: /* Intuos3 Grip Pen Eraser */
+ case 0x81b: /* Intuos3 Classic Pen Eraser */
+ case 0x91b: /* Intuos3 Airbrush Eraser */
+ case 0x80c: /* Intuos4 Marker Pen Eraser */
+ case 0x80a: /* Intuos4 Grip Pen Eraser */
+ case 0x4080a: /* Intuos4 Classic Pen Eraser */
+ case 0x90a: /* Intuos4 Airbrush Eraser */
+ wacom->tool[idx] = BTN_TOOL_RUBBER;
+ break;
+
+ case 0xd12:
+ case 0x912:
+ case 0x112:
+ case 0x913: /* Intuos3 Airbrush */
+ case 0x902: /* Intuos4 Airbrush */
+ wacom->tool[idx] = BTN_TOOL_AIRBRUSH;
+ break;
+
+ default: /* Unknown tool */
+ wacom->tool[idx] = BTN_TOOL_PEN;
+ break;
}
return 1;
}
@@ -384,41 +362,42 @@ static int wacom_intuos_inout(struct wacom_wac *wacom, void *wcombo)
* Reset all states otherwise we lose the initial states
* when in-prox next time
*/
- wacom_report_abs(wcombo, ABS_X, 0);
- wacom_report_abs(wcombo, ABS_Y, 0);
- wacom_report_abs(wcombo, ABS_DISTANCE, 0);
- wacom_report_abs(wcombo, ABS_TILT_X, 0);
- wacom_report_abs(wcombo, ABS_TILT_Y, 0);
+ input_report_abs(input, ABS_X, 0);
+ input_report_abs(input, ABS_Y, 0);
+ input_report_abs(input, ABS_DISTANCE, 0);
+ input_report_abs(input, ABS_TILT_X, 0);
+ input_report_abs(input, ABS_TILT_Y, 0);
if (wacom->tool[idx] >= BTN_TOOL_MOUSE) {
- wacom_report_key(wcombo, BTN_LEFT, 0);
- wacom_report_key(wcombo, BTN_MIDDLE, 0);
- wacom_report_key(wcombo, BTN_RIGHT, 0);
- wacom_report_key(wcombo, BTN_SIDE, 0);
- wacom_report_key(wcombo, BTN_EXTRA, 0);
- wacom_report_abs(wcombo, ABS_THROTTLE, 0);
- wacom_report_abs(wcombo, ABS_RZ, 0);
+ input_report_key(input, BTN_LEFT, 0);
+ input_report_key(input, BTN_MIDDLE, 0);
+ input_report_key(input, BTN_RIGHT, 0);
+ input_report_key(input, BTN_SIDE, 0);
+ input_report_key(input, BTN_EXTRA, 0);
+ input_report_abs(input, ABS_THROTTLE, 0);
+ input_report_abs(input, ABS_RZ, 0);
} else {
- wacom_report_abs(wcombo, ABS_PRESSURE, 0);
- wacom_report_key(wcombo, BTN_STYLUS, 0);
- wacom_report_key(wcombo, BTN_STYLUS2, 0);
- wacom_report_key(wcombo, BTN_TOUCH, 0);
- wacom_report_abs(wcombo, ABS_WHEEL, 0);
+ input_report_abs(input, ABS_PRESSURE, 0);
+ input_report_key(input, BTN_STYLUS, 0);
+ input_report_key(input, BTN_STYLUS2, 0);
+ input_report_key(input, BTN_TOUCH, 0);
+ input_report_abs(input, ABS_WHEEL, 0);
if (features->type >= INTUOS3S)
- wacom_report_abs(wcombo, ABS_Z, 0);
+ input_report_abs(input, ABS_Z, 0);
}
- wacom_report_key(wcombo, wacom->tool[idx], 0);
- wacom_report_abs(wcombo, ABS_MISC, 0); /* reset tool id */
- wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
+ input_report_key(input, wacom->tool[idx], 0);
+ input_report_abs(input, ABS_MISC, 0); /* reset tool id */
+ input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
wacom->id[idx] = 0;
return 2;
}
return 0;
}
-static void wacom_intuos_general(struct wacom_wac *wacom, void *wcombo)
+static void wacom_intuos_general(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
unsigned char *data = wacom->data;
+ struct input_dev *input = wacom->input;
unsigned int t;
/* general pen packet */
@@ -426,30 +405,30 @@ static void wacom_intuos_general(struct wacom_wac *wacom, void *wcombo)
t = (data[6] << 2) | ((data[7] >> 6) & 3);
if (features->type >= INTUOS4S && features->type <= INTUOS4L)
t = (t << 1) | (data[1] & 1);
- wacom_report_abs(wcombo, ABS_PRESSURE, t);
- wacom_report_abs(wcombo, ABS_TILT_X,
+ input_report_abs(input, ABS_PRESSURE, t);
+ input_report_abs(input, ABS_TILT_X,
((data[7] << 1) & 0x7e) | (data[8] >> 7));
- wacom_report_abs(wcombo, ABS_TILT_Y, data[8] & 0x7f);
- wacom_report_key(wcombo, BTN_STYLUS, data[1] & 2);
- wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 4);
- wacom_report_key(wcombo, BTN_TOUCH, t > 10);
+ input_report_abs(input, ABS_TILT_Y, data[8] & 0x7f);
+ input_report_key(input, BTN_STYLUS, data[1] & 2);
+ input_report_key(input, BTN_STYLUS2, data[1] & 4);
+ input_report_key(input, BTN_TOUCH, t > 10);
}
/* airbrush second packet */
if ((data[1] & 0xbc) == 0xb4) {
- wacom_report_abs(wcombo, ABS_WHEEL,
+ input_report_abs(input, ABS_WHEEL,
(data[6] << 2) | ((data[7] >> 6) & 3));
- wacom_report_abs(wcombo, ABS_TILT_X,
+ input_report_abs(input, ABS_TILT_X,
((data[7] << 1) & 0x7e) | (data[8] >> 7));
- wacom_report_abs(wcombo, ABS_TILT_Y, data[8] & 0x7f);
+ input_report_abs(input, ABS_TILT_Y, data[8] & 0x7f);
}
- return;
}
-static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo)
+static int wacom_intuos_irq(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
unsigned char *data = wacom->data;
+ struct input_dev *input = wacom->input;
unsigned int t;
int idx = 0, result;
@@ -470,61 +449,61 @@ static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo)
wacom->tool[1] = BTN_TOOL_FINGER;
if (features->type >= INTUOS4S && features->type <= INTUOS4L) {
- wacom_report_key(wcombo, BTN_0, (data[2] & 0x01));
- wacom_report_key(wcombo, BTN_1, (data[3] & 0x01));
- wacom_report_key(wcombo, BTN_2, (data[3] & 0x02));
- wacom_report_key(wcombo, BTN_3, (data[3] & 0x04));
- wacom_report_key(wcombo, BTN_4, (data[3] & 0x08));
- wacom_report_key(wcombo, BTN_5, (data[3] & 0x10));
- wacom_report_key(wcombo, BTN_6, (data[3] & 0x20));
+ input_report_key(input, BTN_0, (data[2] & 0x01));
+ input_report_key(input, BTN_1, (data[3] & 0x01));
+ input_report_key(input, BTN_2, (data[3] & 0x02));
+ input_report_key(input, BTN_3, (data[3] & 0x04));
+ input_report_key(input, BTN_4, (data[3] & 0x08));
+ input_report_key(input, BTN_5, (data[3] & 0x10));
+ input_report_key(input, BTN_6, (data[3] & 0x20));
if (data[1] & 0x80) {
- wacom_report_abs(wcombo, ABS_WHEEL, (data[1] & 0x7f));
+ input_report_abs(input, ABS_WHEEL, (data[1] & 0x7f));
} else {
/* Out of proximity, clear wheel value. */
- wacom_report_abs(wcombo, ABS_WHEEL, 0);
+ input_report_abs(input, ABS_WHEEL, 0);
}
if (features->type != INTUOS4S) {
- wacom_report_key(wcombo, BTN_7, (data[3] & 0x40));
- wacom_report_key(wcombo, BTN_8, (data[3] & 0x80));
+ input_report_key(input, BTN_7, (data[3] & 0x40));
+ input_report_key(input, BTN_8, (data[3] & 0x80));
}
if (data[1] | (data[2] & 0x01) | data[3]) {
- wacom_report_key(wcombo, wacom->tool[1], 1);
- wacom_report_abs(wcombo, ABS_MISC, PAD_DEVICE_ID);
+ input_report_key(input, wacom->tool[1], 1);
+ input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
} else {
- wacom_report_key(wcombo, wacom->tool[1], 0);
- wacom_report_abs(wcombo, ABS_MISC, 0);
+ input_report_key(input, wacom->tool[1], 0);
+ input_report_abs(input, ABS_MISC, 0);
}
} else {
- wacom_report_key(wcombo, BTN_0, (data[5] & 0x01));
- wacom_report_key(wcombo, BTN_1, (data[5] & 0x02));
- wacom_report_key(wcombo, BTN_2, (data[5] & 0x04));
- wacom_report_key(wcombo, BTN_3, (data[5] & 0x08));
- wacom_report_key(wcombo, BTN_4, (data[6] & 0x01));
- wacom_report_key(wcombo, BTN_5, (data[6] & 0x02));
- wacom_report_key(wcombo, BTN_6, (data[6] & 0x04));
- wacom_report_key(wcombo, BTN_7, (data[6] & 0x08));
- wacom_report_key(wcombo, BTN_8, (data[5] & 0x10));
- wacom_report_key(wcombo, BTN_9, (data[6] & 0x10));
- wacom_report_abs(wcombo, ABS_RX, ((data[1] & 0x1f) << 8) | data[2]);
- wacom_report_abs(wcombo, ABS_RY, ((data[3] & 0x1f) << 8) | data[4]);
+ input_report_key(input, BTN_0, (data[5] & 0x01));
+ input_report_key(input, BTN_1, (data[5] & 0x02));
+ input_report_key(input, BTN_2, (data[5] & 0x04));
+ input_report_key(input, BTN_3, (data[5] & 0x08));
+ input_report_key(input, BTN_4, (data[6] & 0x01));
+ input_report_key(input, BTN_5, (data[6] & 0x02));
+ input_report_key(input, BTN_6, (data[6] & 0x04));
+ input_report_key(input, BTN_7, (data[6] & 0x08));
+ input_report_key(input, BTN_8, (data[5] & 0x10));
+ input_report_key(input, BTN_9, (data[6] & 0x10));
+ input_report_abs(input, ABS_RX, ((data[1] & 0x1f) << 8) | data[2]);
+ input_report_abs(input, ABS_RY, ((data[3] & 0x1f) << 8) | data[4]);
if ((data[5] & 0x1f) | (data[6] & 0x1f) | (data[1] & 0x1f) |
data[2] | (data[3] & 0x1f) | data[4]) {
- wacom_report_key(wcombo, wacom->tool[1], 1);
- wacom_report_abs(wcombo, ABS_MISC, PAD_DEVICE_ID);
+ input_report_key(input, wacom->tool[1], 1);
+ input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
} else {
- wacom_report_key(wcombo, wacom->tool[1], 0);
- wacom_report_abs(wcombo, ABS_MISC, 0);
+ input_report_key(input, wacom->tool[1], 0);
+ input_report_abs(input, ABS_MISC, 0);
}
}
- wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xffffffff);
+ input_event(input, EV_MSC, MSC_SERIAL, 0xffffffff);
return 1;
}
/* process in/out prox events */
- result = wacom_intuos_inout(wacom, wcombo);
+ result = wacom_intuos_inout(wacom);
if (result)
- return result-1;
+ return result - 1;
/* don't proceed if we don't know the ID */
if (!wacom->id[idx])
@@ -545,17 +524,17 @@ static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo)
return 0;
if (features->type >= INTUOS3S) {
- wacom_report_abs(wcombo, ABS_X, (data[2] << 9) | (data[3] << 1) | ((data[9] >> 1) & 1));
- wacom_report_abs(wcombo, ABS_Y, (data[4] << 9) | (data[5] << 1) | (data[9] & 1));
- wacom_report_abs(wcombo, ABS_DISTANCE, ((data[9] >> 2) & 0x3f));
+ input_report_abs(input, ABS_X, (data[2] << 9) | (data[3] << 1) | ((data[9] >> 1) & 1));
+ input_report_abs(input, ABS_Y, (data[4] << 9) | (data[5] << 1) | (data[9] & 1));
+ input_report_abs(input, ABS_DISTANCE, ((data[9] >> 2) & 0x3f));
} else {
- wacom_report_abs(wcombo, ABS_X, wacom_be16_to_cpu(&data[2]));
- wacom_report_abs(wcombo, ABS_Y, wacom_be16_to_cpu(&data[4]));
- wacom_report_abs(wcombo, ABS_DISTANCE, ((data[9] >> 3) & 0x1f));
+ input_report_abs(input, ABS_X, be16_to_cpup((__be16 *)&data[2]));
+ input_report_abs(input, ABS_Y, be16_to_cpup((__be16 *)&data[4]));
+ input_report_abs(input, ABS_DISTANCE, ((data[9] >> 3) & 0x1f));
}
/* process general packets */
- wacom_intuos_general(wacom, wcombo);
+ wacom_intuos_general(wacom);
/* 4D mouse, 2D mouse, marker pen rotation, tilt mouse, or Lens cursor packets */
if ((data[1] & 0xbc) == 0xa8 || (data[1] & 0xbe) == 0xb0 || (data[1] & 0xbc) == 0xac) {
@@ -567,174 +546,191 @@ static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo)
t = (data[6] << 3) | ((data[7] >> 5) & 7);
t = (data[7] & 0x20) ? ((t > 900) ? ((t-1) / 2 - 1350) :
((t-1) / 2 + 450)) : (450 - t / 2) ;
- wacom_report_abs(wcombo, ABS_Z, t);
+ input_report_abs(input, ABS_Z, t);
} else {
/* 4D mouse rotation packet */
t = (data[6] << 3) | ((data[7] >> 5) & 7);
- wacom_report_abs(wcombo, ABS_RZ, (data[7] & 0x20) ?
+ input_report_abs(input, ABS_RZ, (data[7] & 0x20) ?
((t - 1) / 2) : -t / 2);
}
} else if (!(data[1] & 0x10) && features->type < INTUOS3S) {
/* 4D mouse packet */
- wacom_report_key(wcombo, BTN_LEFT, data[8] & 0x01);
- wacom_report_key(wcombo, BTN_MIDDLE, data[8] & 0x02);
- wacom_report_key(wcombo, BTN_RIGHT, data[8] & 0x04);
+ input_report_key(input, BTN_LEFT, data[8] & 0x01);
+ input_report_key(input, BTN_MIDDLE, data[8] & 0x02);
+ input_report_key(input, BTN_RIGHT, data[8] & 0x04);
- wacom_report_key(wcombo, BTN_SIDE, data[8] & 0x20);
- wacom_report_key(wcombo, BTN_EXTRA, data[8] & 0x10);
+ input_report_key(input, BTN_SIDE, data[8] & 0x20);
+ input_report_key(input, BTN_EXTRA, data[8] & 0x10);
t = (data[6] << 2) | ((data[7] >> 6) & 3);
- wacom_report_abs(wcombo, ABS_THROTTLE, (data[8] & 0x08) ? -t : t);
+ input_report_abs(input, ABS_THROTTLE, (data[8] & 0x08) ? -t : t);
} else if (wacom->tool[idx] == BTN_TOOL_MOUSE) {
/* I4 mouse */
if (features->type >= INTUOS4S && features->type <= INTUOS4L) {
- wacom_report_key(wcombo, BTN_LEFT, data[6] & 0x01);
- wacom_report_key(wcombo, BTN_MIDDLE, data[6] & 0x02);
- wacom_report_key(wcombo, BTN_RIGHT, data[6] & 0x04);
- wacom_report_rel(wcombo, REL_WHEEL, ((data[7] & 0x80) >> 7)
+ input_report_key(input, BTN_LEFT, data[6] & 0x01);
+ input_report_key(input, BTN_MIDDLE, data[6] & 0x02);
+ input_report_key(input, BTN_RIGHT, data[6] & 0x04);
+ input_report_rel(input, REL_WHEEL, ((data[7] & 0x80) >> 7)
- ((data[7] & 0x40) >> 6));
- wacom_report_key(wcombo, BTN_SIDE, data[6] & 0x08);
- wacom_report_key(wcombo, BTN_EXTRA, data[6] & 0x10);
+ input_report_key(input, BTN_SIDE, data[6] & 0x08);
+ input_report_key(input, BTN_EXTRA, data[6] & 0x10);
- wacom_report_abs(wcombo, ABS_TILT_X,
+ input_report_abs(input, ABS_TILT_X,
((data[7] << 1) & 0x7e) | (data[8] >> 7));
- wacom_report_abs(wcombo, ABS_TILT_Y, data[8] & 0x7f);
+ input_report_abs(input, ABS_TILT_Y, data[8] & 0x7f);
} else {
/* 2D mouse packet */
- wacom_report_key(wcombo, BTN_LEFT, data[8] & 0x04);
- wacom_report_key(wcombo, BTN_MIDDLE, data[8] & 0x08);
- wacom_report_key(wcombo, BTN_RIGHT, data[8] & 0x10);
- wacom_report_rel(wcombo, REL_WHEEL, (data[8] & 0x01)
+ input_report_key(input, BTN_LEFT, data[8] & 0x04);
+ input_report_key(input, BTN_MIDDLE, data[8] & 0x08);
+ input_report_key(input, BTN_RIGHT, data[8] & 0x10);
+ input_report_rel(input, REL_WHEEL, (data[8] & 0x01)
- ((data[8] & 0x02) >> 1));
/* I3 2D mouse side buttons */
if (features->type >= INTUOS3S && features->type <= INTUOS3L) {
- wacom_report_key(wcombo, BTN_SIDE, data[8] & 0x40);
- wacom_report_key(wcombo, BTN_EXTRA, data[8] & 0x20);
+ input_report_key(input, BTN_SIDE, data[8] & 0x40);
+ input_report_key(input, BTN_EXTRA, data[8] & 0x20);
}
}
} else if ((features->type < INTUOS3S || features->type == INTUOS3L ||
features->type == INTUOS4L) &&
wacom->tool[idx] == BTN_TOOL_LENS) {
/* Lens cursor packets */
- wacom_report_key(wcombo, BTN_LEFT, data[8] & 0x01);
- wacom_report_key(wcombo, BTN_MIDDLE, data[8] & 0x02);
- wacom_report_key(wcombo, BTN_RIGHT, data[8] & 0x04);
- wacom_report_key(wcombo, BTN_SIDE, data[8] & 0x10);
- wacom_report_key(wcombo, BTN_EXTRA, data[8] & 0x08);
+ input_report_key(input, BTN_LEFT, data[8] & 0x01);
+ input_report_key(input, BTN_MIDDLE, data[8] & 0x02);
+ input_report_key(input, BTN_RIGHT, data[8] & 0x04);
+ input_report_key(input, BTN_SIDE, data[8] & 0x10);
+ input_report_key(input, BTN_EXTRA, data[8] & 0x08);
}
}
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[idx]); /* report tool id */
- wacom_report_key(wcombo, wacom->tool[idx], 1);
- wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
+ input_report_abs(input, ABS_MISC, wacom->id[idx]); /* report tool id */
+ input_report_key(input, wacom->tool[idx], 1);
+ input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
return 1;
}
-static void wacom_tpc_finger_in(struct wacom_wac *wacom, void *wcombo, char *data, int idx)
+static void wacom_tpc_finger_in(struct wacom_wac *wacom, char *data, int idx)
{
- wacom_report_abs(wcombo, ABS_X,
- (data[2 + idx * 2] & 0xff) | ((data[3 + idx * 2] & 0x7f) << 8));
- wacom_report_abs(wcombo, ABS_Y,
- (data[6 + idx * 2] & 0xff) | ((data[7 + idx * 2] & 0x7f) << 8));
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
- wacom_report_key(wcombo, wacom->tool[idx], 1);
- if (idx)
- wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
- else
- wacom_report_key(wcombo, BTN_TOUCH, 1);
+ struct input_dev *input = wacom->input;
+ int finger = idx + 1;
+ int x = le16_to_cpup((__le16 *)&data[finger * 2]) & 0x7fff;
+ int y = le16_to_cpup((__le16 *)&data[4 + finger * 2]) & 0x7fff;
+
+ /*
+ * Work around input core suppressing "duplicate" events since
+ * we are abusing ABS_X/ABS_Y to transmit multi-finger data.
+ * This should go away once we switch to true multitouch
+ * protocol.
+ */
+ if (wacom->last_finger != finger) {
+ if (x == input->abs[ABS_X])
+ x++;
+
+ if (y == input->abs[ABS_Y])
+ y++;
+ }
+
+ input_report_abs(input, ABS_X, x);
+ input_report_abs(input, ABS_Y, y);
+ input_report_abs(input, ABS_MISC, wacom->id[0]);
+ input_report_key(input, wacom->tool[finger], 1);
+ if (!idx)
+ input_report_key(input, BTN_TOUCH, 1);
+ input_event(input, EV_MSC, MSC_SERIAL, finger);
+ input_sync(wacom->input);
+
+ wacom->last_finger = finger;
}
-static void wacom_tpc_touch_out(struct wacom_wac *wacom, void *wcombo, int idx)
+static void wacom_tpc_touch_out(struct wacom_wac *wacom, int idx)
{
- wacom_report_abs(wcombo, ABS_X, 0);
- wacom_report_abs(wcombo, ABS_Y, 0);
- wacom_report_abs(wcombo, ABS_MISC, 0);
- wacom_report_key(wcombo, wacom->tool[idx], 0);
- if (idx)
- wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
- else
- wacom_report_key(wcombo, BTN_TOUCH, 0);
- return;
+ struct input_dev *input = wacom->input;
+ int finger = idx + 1;
+
+ input_report_abs(input, ABS_X, 0);
+ input_report_abs(input, ABS_Y, 0);
+ input_report_abs(input, ABS_MISC, 0);
+ input_report_key(input, wacom->tool[finger], 0);
+ if (!idx)
+ input_report_key(input, BTN_TOUCH, 0);
+ input_event(input, EV_MSC, MSC_SERIAL, finger);
+ input_sync(input);
}
-static void wacom_tpc_touch_in(struct wacom_wac *wacom, void *wcombo)
+static void wacom_tpc_touch_in(struct wacom_wac *wacom, size_t len)
{
char *data = wacom->data;
- struct urb *urb = ((struct wacom_combo *)wcombo)->urb;
- static int firstFinger = 0;
- static int secondFinger = 0;
+ struct input_dev *input = wacom->input;
- wacom->tool[0] = BTN_TOOL_DOUBLETAP;
+ wacom->tool[1] = BTN_TOOL_DOUBLETAP;
wacom->id[0] = TOUCH_DEVICE_ID;
- wacom->tool[1] = BTN_TOOL_TRIPLETAP;
+ wacom->tool[2] = BTN_TOOL_TRIPLETAP;
+
+ if (len != WACOM_PKGLEN_TPC1FG) {
- if (urb->actual_length != WACOM_PKGLEN_TPC1FG) {
switch (data[0]) {
- case WACOM_REPORT_TPC1FG:
- wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2]));
- wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4]));
- wacom_report_abs(wcombo, ABS_PRESSURE, wacom_le16_to_cpu(&data[6]));
- wacom_report_key(wcombo, BTN_TOUCH, wacom_le16_to_cpu(&data[6]));
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
- wacom_report_key(wcombo, wacom->tool[0], 1);
- break;
- case WACOM_REPORT_TPC2FG:
- /* keep this byte to send proper out-prox event */
- wacom->id[1] = data[1] & 0x03;
-
- if (data[1] & 0x01) {
- wacom_tpc_finger_in(wacom, wcombo, data, 0);
- firstFinger = 1;
- } else if (firstFinger) {
- wacom_tpc_touch_out(wacom, wcombo, 0);
- }
- if (data[1] & 0x02) {
- /* sync first finger data */
- if (firstFinger)
- wacom_input_sync(wcombo);
+ case WACOM_REPORT_TPC1FG:
+ input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
+ input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4]));
+ input_report_abs(input, ABS_PRESSURE, le16_to_cpup((__le16 *)&data[6]));
+ input_report_key(input, BTN_TOUCH, le16_to_cpup((__le16 *)&data[6]));
+ input_report_abs(input, ABS_MISC, wacom->id[0]);
+ input_report_key(input, wacom->tool[1], 1);
+ input_sync(input);
+ break;
- wacom_tpc_finger_in(wacom, wcombo, data, 1);
- secondFinger = 1;
- } else if (secondFinger) {
- /* sync first finger data */
- if (firstFinger)
- wacom_input_sync(wcombo);
+ case WACOM_REPORT_TPC2FG:
+ if (data[1] & 0x01)
+ wacom_tpc_finger_in(wacom, data, 0);
+ else if (wacom->id[1] & 0x01)
+ wacom_tpc_touch_out(wacom, 0);
- wacom_tpc_touch_out(wacom, wcombo, 1);
- secondFinger = 0;
- }
- if (!(data[1] & 0x01))
- firstFinger = 0;
- break;
+ if (data[1] & 0x02)
+ wacom_tpc_finger_in(wacom, data, 1);
+ else if (wacom->id[1] & 0x02)
+ wacom_tpc_touch_out(wacom, 1);
+ break;
}
} else {
- wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[1]));
- wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[3]));
- wacom_report_key(wcombo, BTN_TOUCH, 1);
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
- wacom_report_key(wcombo, wacom->tool[0], 1);
+ input_report_abs(input, ABS_X, get_unaligned_le16(&data[1]));
+ input_report_abs(input, ABS_Y, get_unaligned_le16(&data[3]));
+ input_report_key(input, BTN_TOUCH, 1);
+ input_report_abs(input, ABS_MISC, wacom->id[1]);
+ input_report_key(input, wacom->tool[1], 1);
+ input_sync(input);
}
- return;
}
-static int wacom_tpc_irq(struct wacom_wac *wacom, void *wcombo)
+static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
{
struct wacom_features *features = &wacom->features;
char *data = wacom->data;
- int prox = 0, pressure, idx = -1;
- static int stylusInProx, touchInProx = 1, touchOut;
- struct urb *urb = ((struct wacom_combo *)wcombo)->urb;
+ struct input_dev *input = wacom->input;
+ int prox = 0, pressure;
+ int retval = 0;
dbg("wacom_tpc_irq: received report #%d", data[0]);
- if (urb->actual_length == WACOM_PKGLEN_TPC1FG || /* single touch */
+ if (len == WACOM_PKGLEN_TPC1FG || /* single touch */
data[0] == WACOM_REPORT_TPC1FG || /* single touch */
data[0] == WACOM_REPORT_TPC2FG) { /* 2FG touch */
- if (urb->actual_length == WACOM_PKGLEN_TPC1FG) { /* with touch */
+
+ if (wacom->shared->stylus_in_proximity) {
+ if (wacom->id[1] & 0x01)
+ wacom_tpc_touch_out(wacom, 0);
+
+ if (wacom->id[1] & 0x02)
+ wacom_tpc_touch_out(wacom, 1);
+
+ wacom->id[1] = 0;
+ return 0;
+ }
+
+ if (len == WACOM_PKGLEN_TPC1FG) { /* with touch */
prox = data[0] & 0x01;
} else { /* with capacity */
if (data[0] == WACOM_REPORT_TPC1FG)
@@ -745,168 +741,264 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, void *wcombo)
prox = data[1] & 0x03;
}
- if (!stylusInProx) { /* stylus not in prox */
- if (prox) {
- if (touchInProx) {
- wacom_tpc_touch_in(wacom, wcombo);
- touchOut = 1;
- return 1;
- }
- } else {
+ if (prox) {
+ if (!wacom->id[1])
+ wacom->last_finger = 1;
+ wacom_tpc_touch_in(wacom, len);
+ } else {
+ if (data[0] == WACOM_REPORT_TPC2FG) {
/* 2FGT out-prox */
- if (data[0] == WACOM_REPORT_TPC2FG) {
- idx = (wacom->id[1] & 0x01) - 1;
- if (idx == 0) {
- wacom_tpc_touch_out(wacom, wcombo, idx);
- /* sync first finger event */
- if (wacom->id[1] & 0x02)
- wacom_input_sync(wcombo);
- }
- idx = (wacom->id[1] & 0x02) - 1;
- if (idx == 1)
- wacom_tpc_touch_out(wacom, wcombo, idx);
- } else /* one finger touch */
- wacom_tpc_touch_out(wacom, wcombo, 0);
- touchOut = 0;
- touchInProx = 1;
- return 1;
- }
- } else if (touchOut || !prox) { /* force touch out-prox */
- wacom_tpc_touch_out(wacom, wcombo, 0);
- touchOut = 0;
- touchInProx = 1;
- return 1;
+ if (wacom->id[1] & 0x01)
+ wacom_tpc_touch_out(wacom, 0);
+
+ if (wacom->id[1] & 0x02)
+ wacom_tpc_touch_out(wacom, 1);
+ } else
+ /* one finger touch */
+ wacom_tpc_touch_out(wacom, 0);
+
+ wacom->id[0] = 0;
}
+ /* keep prox bit to send proper out-prox event */
+ wacom->id[1] = prox;
} else if (data[0] == WACOM_REPORT_PENABLED) { /* Penabled */
prox = data[1] & 0x20;
- touchInProx = 0;
+ if (!wacom->shared->stylus_in_proximity) { /* first in prox */
+ /* Going into proximity select tool */
+ wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
+ if (wacom->tool[0] == BTN_TOOL_PEN)
+ wacom->id[0] = STYLUS_DEVICE_ID;
+ else
+ wacom->id[0] = ERASER_DEVICE_ID;
- if (prox) { /* in prox */
- if (!wacom->id[0]) {
- /* Going into proximity select tool */
- wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
- if (wacom->tool[0] == BTN_TOOL_PEN)
- wacom->id[0] = STYLUS_DEVICE_ID;
- else
- wacom->id[0] = ERASER_DEVICE_ID;
- }
- wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02);
- wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x10);
- wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2]));
- wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4]));
- pressure = ((data[7] & 0x01) << 8) | data[6];
- if (pressure < 0)
- pressure = features->pressure_max + pressure + 1;
- wacom_report_abs(wcombo, ABS_PRESSURE, pressure);
- wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x05);
- } else {
- wacom_report_abs(wcombo, ABS_X, 0);
- wacom_report_abs(wcombo, ABS_Y, 0);
- wacom_report_abs(wcombo, ABS_PRESSURE, 0);
- wacom_report_key(wcombo, BTN_STYLUS, 0);
- wacom_report_key(wcombo, BTN_STYLUS2, 0);
- wacom_report_key(wcombo, BTN_TOUCH, 0);
+ wacom->shared->stylus_in_proximity = true;
+ }
+ input_report_key(input, BTN_STYLUS, data[1] & 0x02);
+ input_report_key(input, BTN_STYLUS2, data[1] & 0x10);
+ input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
+ input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4]));
+ pressure = ((data[7] & 0x01) << 8) | data[6];
+ if (pressure < 0)
+ pressure = features->pressure_max + pressure + 1;
+ input_report_abs(input, ABS_PRESSURE, pressure);
+ input_report_key(input, BTN_TOUCH, data[1] & 0x05);
+ if (!prox) { /* out-prox */
wacom->id[0] = 0;
- /* pen is out so touch can be enabled now */
- touchInProx = 1;
+ wacom->shared->stylus_in_proximity = false;
}
- wacom_report_key(wcombo, wacom->tool[0], prox);
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
- stylusInProx = prox;
- return 1;
+ input_report_key(input, wacom->tool[0], prox);
+ input_report_abs(input, ABS_MISC, wacom->id[0]);
+ retval = 1;
}
- return 0;
+ return retval;
}
-int wacom_wac_irq(struct wacom_wac *wacom_wac, void *wcombo)
+void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
{
+ bool sync;
+
switch (wacom_wac->features.type) {
- case PENPARTNER:
- return wacom_penpartner_irq(wacom_wac, wcombo);
-
- case PL:
- return wacom_pl_irq(wacom_wac, wcombo);
-
- case WACOM_G4:
- case GRAPHIRE:
- case WACOM_MO:
- return wacom_graphire_irq(wacom_wac, wcombo);
-
- case PTU:
- return wacom_ptu_irq(wacom_wac, wcombo);
-
- case INTUOS:
- case INTUOS3S:
- case INTUOS3:
- case INTUOS3L:
- case INTUOS4S:
- case INTUOS4:
- case INTUOS4L:
- case CINTIQ:
- case WACOM_BEE:
- return wacom_intuos_irq(wacom_wac, wcombo);
-
- case TABLETPC:
- case TABLETPC2FG:
- return wacom_tpc_irq(wacom_wac, wcombo);
-
- default:
- return 0;
+ case PENPARTNER:
+ sync = wacom_penpartner_irq(wacom_wac);
+ break;
+
+ case PL:
+ sync = wacom_pl_irq(wacom_wac);
+ break;
+
+ case WACOM_G4:
+ case GRAPHIRE:
+ case WACOM_MO:
+ sync = wacom_graphire_irq(wacom_wac);
+ break;
+
+ case PTU:
+ sync = wacom_ptu_irq(wacom_wac);
+ break;
+
+ case INTUOS:
+ case INTUOS3S:
+ case INTUOS3:
+ case INTUOS3L:
+ case INTUOS4S:
+ case INTUOS4:
+ case INTUOS4L:
+ case CINTIQ:
+ case WACOM_BEE:
+ sync = wacom_intuos_irq(wacom_wac);
+ break;
+
+ case TABLETPC:
+ case TABLETPC2FG:
+ sync = wacom_tpc_irq(wacom_wac, len);
+ break;
+
+ default:
+ sync = false;
+ break;
}
- return 0;
+
+ if (sync)
+ input_sync(wacom_wac->input);
+}
+
+static void wacom_setup_intuos(struct wacom_wac *wacom_wac)
+{
+ struct input_dev *input_dev = wacom_wac->input;
+
+ input_set_capability(input_dev, EV_MSC, MSC_SERIAL);
+ input_set_capability(input_dev, EV_REL, REL_WHEEL);
+
+ __set_bit(BTN_LEFT, input_dev->keybit);
+ __set_bit(BTN_RIGHT, input_dev->keybit);
+ __set_bit(BTN_MIDDLE, input_dev->keybit);
+ __set_bit(BTN_SIDE, input_dev->keybit);
+ __set_bit(BTN_EXTRA, input_dev->keybit);
+
+ __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
+ __set_bit(BTN_TOOL_PEN, input_dev->keybit);
+ __set_bit(BTN_TOOL_MOUSE, input_dev->keybit);
+ __set_bit(BTN_TOOL_BRUSH, input_dev->keybit);
+ __set_bit(BTN_TOOL_PENCIL, input_dev->keybit);
+ __set_bit(BTN_TOOL_AIRBRUSH, input_dev->keybit);
+ __set_bit(BTN_TOOL_LENS, input_dev->keybit);
+ __set_bit(BTN_STYLUS, input_dev->keybit);
+ __set_bit(BTN_STYLUS2, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_DISTANCE,
+ 0, wacom_wac->features.distance_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_WHEEL, 0, 1023, 0, 0);
+ input_set_abs_params(input_dev, ABS_TILT_X, 0, 127, 0, 0);
+ input_set_abs_params(input_dev, ABS_TILT_Y, 0, 127, 0, 0);
+ input_set_abs_params(input_dev, ABS_RZ, -900, 899, 0, 0);
+ input_set_abs_params(input_dev, ABS_THROTTLE, -1023, 1023, 0, 0);
}
-void wacom_init_input_dev(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
+void wacom_setup_input_capabilities(struct input_dev *input_dev,
+ struct wacom_wac *wacom_wac)
{
+ struct wacom_features *features = &wacom_wac->features;
+ int i;
+
+ input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+
+ __set_bit(BTN_TOUCH, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_X, 0, features->x_max, 4, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, features->y_max, 4, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, 0, 0);
+
+ __set_bit(ABS_MISC, input_dev->absbit);
+
switch (wacom_wac->features.type) {
- case WACOM_MO:
- input_dev_mo(input_dev, wacom_wac);
- case WACOM_G4:
- input_dev_g4(input_dev, wacom_wac);
- /* fall through */
- case GRAPHIRE:
- input_dev_g(input_dev, wacom_wac);
- break;
- case WACOM_BEE:
- input_dev_bee(input_dev, wacom_wac);
- case INTUOS3:
- case INTUOS3L:
- case CINTIQ:
- input_dev_i3(input_dev, wacom_wac);
- /* fall through */
- case INTUOS3S:
- input_dev_i3s(input_dev, wacom_wac);
- /* fall through */
- case INTUOS:
- input_dev_i(input_dev, wacom_wac);
- break;
- case INTUOS4:
- case INTUOS4L:
- input_dev_i4(input_dev, wacom_wac);
- /* fall through */
- case INTUOS4S:
- input_dev_i4s(input_dev, wacom_wac);
- input_dev_i(input_dev, wacom_wac);
- break;
- case TABLETPC2FG:
- input_dev_tpc2fg(input_dev, wacom_wac);
- /* fall through */
- case TABLETPC:
- input_dev_tpc(input_dev, wacom_wac);
- if (wacom_wac->features.device_type != BTN_TOOL_PEN)
- break; /* no need to process stylus stuff */
-
- /* fall through */
- case PL:
- case PTU:
- input_dev_pl(input_dev, wacom_wac);
- /* fall through */
- case PENPARTNER:
- input_dev_pt(input_dev, wacom_wac);
- break;
+ case WACOM_MO:
+ __set_bit(BTN_1, input_dev->keybit);
+ __set_bit(BTN_5, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
+ /* fall through */
+
+ case WACOM_G4:
+ input_set_capability(input_dev, EV_MSC, MSC_SERIAL);
+
+ __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
+ __set_bit(BTN_0, input_dev->keybit);
+ __set_bit(BTN_4, input_dev->keybit);
+ /* fall through */
+
+ case GRAPHIRE:
+ input_set_capability(input_dev, EV_REL, REL_WHEEL);
+
+ __set_bit(BTN_LEFT, input_dev->keybit);
+ __set_bit(BTN_RIGHT, input_dev->keybit);
+ __set_bit(BTN_MIDDLE, input_dev->keybit);
+
+ __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
+ __set_bit(BTN_TOOL_PEN, input_dev->keybit);
+ __set_bit(BTN_TOOL_MOUSE, input_dev->keybit);
+ __set_bit(BTN_STYLUS, input_dev->keybit);
+ __set_bit(BTN_STYLUS2, input_dev->keybit);
+ break;
+
+ case WACOM_BEE:
+ __set_bit(BTN_8, input_dev->keybit);
+ __set_bit(BTN_9, input_dev->keybit);
+ /* fall through */
+
+ case INTUOS3:
+ case INTUOS3L:
+ case CINTIQ:
+ __set_bit(BTN_4, input_dev->keybit);
+ __set_bit(BTN_5, input_dev->keybit);
+ __set_bit(BTN_6, input_dev->keybit);
+ __set_bit(BTN_7, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
+ /* fall through */
+
+ case INTUOS3S:
+ __set_bit(BTN_0, input_dev->keybit);
+ __set_bit(BTN_1, input_dev->keybit);
+ __set_bit(BTN_2, input_dev->keybit);
+ __set_bit(BTN_3, input_dev->keybit);
+
+ __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
+ input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+ /* fall through */
+
+ case INTUOS:
+ wacom_setup_intuos(wacom_wac);
+ break;
+
+ case INTUOS4:
+ case INTUOS4L:
+ __set_bit(BTN_7, input_dev->keybit);
+ __set_bit(BTN_8, input_dev->keybit);
+ /* fall through */
+
+ case INTUOS4S:
+ for (i = 0; i < 7; i++)
+ __set_bit(BTN_0 + i, input_dev->keybit);
+ __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+ wacom_setup_intuos(wacom_wac);
+ break;
+
+ case TABLETPC2FG:
+ if (features->device_type == BTN_TOOL_TRIPLETAP) {
+ __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
+ input_set_capability(input_dev, EV_MSC, MSC_SERIAL);
+ }
+ /* fall through */
+
+ case TABLETPC:
+ if (features->device_type == BTN_TOOL_DOUBLETAP ||
+ features->device_type == BTN_TOOL_TRIPLETAP) {
+ input_set_abs_params(input_dev, ABS_RX, 0, features->x_phy, 0, 0);
+ input_set_abs_params(input_dev, ABS_RY, 0, features->y_phy, 0, 0);
+ __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
+ }
+
+ if (features->device_type != BTN_TOOL_PEN)
+ break; /* no need to process stylus stuff */
+
+ /* fall through */
+
+ case PL:
+ case PTU:
+ __set_bit(BTN_TOOL_PEN, input_dev->keybit);
+ __set_bit(BTN_STYLUS, input_dev->keybit);
+ __set_bit(BTN_STYLUS2, input_dev->keybit);
+ /* fall through */
+
+ case PENPARTNER:
+ __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
+ break;
}
- return;
}
static const struct wacom_features wacom_features_0x00 =
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index b50cf04e61a..063f1af3204 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -9,6 +9,8 @@
#ifndef WACOM_WAC_H
#define WACOM_WAC_H
+#include <linux/types.h>
+
/* maximum packet length for USB devices */
#define WACOM_PKGLEN_MAX 32
@@ -71,13 +73,20 @@ struct wacom_features {
unsigned char unitExpo;
};
+struct wacom_shared {
+ bool stylus_in_proximity;
+};
+
struct wacom_wac {
char name[64];
unsigned char *data;
- int tool[2];
- int id[2];
+ int tool[3];
+ int id[3];
__u32 serial[2];
+ int last_finger;
struct wacom_features features;
+ struct wacom_shared *shared;
+ struct input_dev *input;
};
#endif
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 8a8fa4d2d6a..6703c6b9800 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -99,22 +99,6 @@ config TOUCHSCREEN_BITSY
To compile this driver as a module, choose M here: the
module will be called h3600_ts_input.
-config TOUCHSCREEN_CORGI
- tristate "SharpSL (Corgi and Spitz series) touchscreen driver (DEPRECATED)"
- depends on PXA_SHARPSL
- select CORGI_SSP_DEPRECATED
- help
- Say Y here to enable the driver for the touchscreen on the
- Sharp SL-C7xx and SL-Cxx00 series of PDAs.
-
- If unsure, say N.
-
- To compile this driver as a module, choose M here: the
- module will be called corgi_ts.
-
- NOTE: this driver is deprecated, try enable SPI and generic
- ADS7846-based touchscreen driver.
-
config TOUCHSCREEN_DA9034
tristate "Touchscreen support for Dialog Semiconductor DA9034"
depends on PMIC_DA903X
@@ -135,6 +119,18 @@ config TOUCHSCREEN_DYNAPRO
To compile this driver as a module, choose M here: the
module will be called dynapro.
+config TOUCHSCREEN_HAMPSHIRE
+ tristate "Hampshire serial touchscreen"
+ select SERIO
+ help
+ Say Y here if you have a Hampshire serial touchscreen connected to
+ your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hampshire.
+
config TOUCHSCREEN_EETI
tristate "EETI touchscreen panel support"
depends on I2C
@@ -158,8 +154,8 @@ config TOUCHSCREEN_FUJITSU
module will be called fujitsu-ts.
config TOUCHSCREEN_S3C2410
- tristate "Samsung S3C2410 touchscreen input driver"
- depends on ARCH_S3C2410
+ tristate "Samsung S3C2410/generic touchscreen input driver"
+ depends on ARCH_S3C2410 || SAMSUNG_DEV_TS
select S3C24XX_ADC
help
Say Y here if you have the s3c2410 touchscreen.
@@ -594,4 +590,17 @@ config TOUCHSCREEN_PCAP
To compile this driver as a module, choose M here: the
module will be called pcap_ts.
+
+config TOUCHSCREEN_TPS6507X
+ tristate "TPS6507x based touchscreens"
+ depends on I2C
+ help
+ Say Y here if you have a TPS6507x based touchscreen
+ controller.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tps6507x_ts.
+
endif
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 7fef7d5cca2..497964a7a21 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -12,8 +12,8 @@ obj-$(CONFIG_TOUCHSCREEN_AD7879) += ad7879.o
obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o
obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o
obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o
-obj-$(CONFIG_TOUCHSCREEN_CORGI) += corgi_ts.o
obj-$(CONFIG_TOUCHSCREEN_DYNAPRO) += dynapro.o
+obj-$(CONFIG_TOUCHSCREEN_HAMPSHIRE) += hampshire.o
obj-$(CONFIG_TOUCHSCREEN_GUNZE) += gunze.o
obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o
obj-$(CONFIG_TOUCHSCREEN_ELO) += elo.o
@@ -46,3 +46,4 @@ obj-$(CONFIG_TOUCHSCREEN_WM97XX_ATMEL) += atmel-wm97xx.o
obj-$(CONFIG_TOUCHSCREEN_WM97XX_MAINSTONE) += mainstone-wm97xx.o
obj-$(CONFIG_TOUCHSCREEN_WM97XX_ZYLONITE) += zylonite-wm97xx.o
obj-$(CONFIG_TOUCHSCREEN_W90X900) += w90p910_ts.o
+obj-$(CONFIG_TOUCHSCREEN_TPS6507X) += tps6507x-ts.o
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 532279cda0e..634f6f6b9b1 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -1163,8 +1163,8 @@ static int __devinit ads7846_probe(struct spi_device *spi)
ts->reg = regulator_get(&spi->dev, "vcc");
if (IS_ERR(ts->reg)) {
- dev_err(&spi->dev, "unable to get regulator: %ld\n",
- PTR_ERR(ts->reg));
+ err = PTR_ERR(ts->reg);
+ dev_err(&spi->dev, "unable to get regulator: %ld\n", err);
goto err_free_gpio;
}
diff --git a/drivers/input/touchscreen/corgi_ts.c b/drivers/input/touchscreen/corgi_ts.c
deleted file mode 100644
index 94a1919d439..00000000000
--- a/drivers/input/touchscreen/corgi_ts.c
+++ /dev/null
@@ -1,385 +0,0 @@
-/*
- * Touchscreen driver for Sharp SL-C7xx and SL-Cxx00 models
- *
- * Copyright (c) 2004-2005 Richard Purdie
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/input.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/irq.h>
-
-#include <mach/sharpsl.h>
-#include <mach/hardware.h>
-#include <mach/pxa2xx-gpio.h>
-
-
-#define PWR_MODE_ACTIVE 0
-#define PWR_MODE_SUSPEND 1
-
-#define X_AXIS_MAX 3830
-#define X_AXIS_MIN 150
-#define Y_AXIS_MAX 3830
-#define Y_AXIS_MIN 190
-#define PRESSURE_MIN 0
-#define PRESSURE_MAX 15000
-
-struct ts_event {
- short pressure;
- short x;
- short y;
-};
-
-struct corgi_ts {
- struct input_dev *input;
- struct timer_list timer;
- struct ts_event tc;
- int pendown;
- int power_mode;
- int irq_gpio;
- struct corgits_machinfo *machinfo;
-};
-
-#ifdef CONFIG_PXA25x
-#define CCNT(a) asm volatile ("mrc p14, 0, %0, C1, C0, 0" : "=r"(a))
-#define PMNC_GET(x) asm volatile ("mrc p14, 0, %0, C0, C0, 0" : "=r"(x))
-#define PMNC_SET(x) asm volatile ("mcr p14, 0, %0, C0, C0, 0" : : "r"(x))
-#endif
-#ifdef CONFIG_PXA27x
-#define CCNT(a) asm volatile ("mrc p14, 0, %0, C1, C1, 0" : "=r"(a))
-#define PMNC_GET(x) asm volatile ("mrc p14, 0, %0, C0, C1, 0" : "=r"(x))
-#define PMNC_SET(x) asm volatile ("mcr p14, 0, %0, C0, C1, 0" : : "r"(x))
-#endif
-
-/* ADS7846 Touch Screen Controller bit definitions */
-#define ADSCTRL_PD0 (1u << 0) /* PD0 */
-#define ADSCTRL_PD1 (1u << 1) /* PD1 */
-#define ADSCTRL_DFR (1u << 2) /* SER/DFR */
-#define ADSCTRL_MOD (1u << 3) /* Mode */
-#define ADSCTRL_ADR_SH 4 /* Address setting */
-#define ADSCTRL_STS (1u << 7) /* Start Bit */
-
-/* External Functions */
-extern unsigned int get_clk_frequency_khz(int info);
-
-static unsigned long calc_waittime(struct corgi_ts *corgi_ts)
-{
- unsigned long hsync_invperiod = corgi_ts->machinfo->get_hsync_invperiod();
-
- if (hsync_invperiod)
- return get_clk_frequency_khz(0)*1000/hsync_invperiod;
- else
- return 0;
-}
-
-static int sync_receive_data_send_cmd(struct corgi_ts *corgi_ts, int doRecive, int doSend,
- unsigned int address, unsigned long wait_time)
-{
- unsigned long timer1 = 0, timer2, pmnc = 0;
- int pos = 0;
-
- if (wait_time && doSend) {
- PMNC_GET(pmnc);
- if (!(pmnc & 0x01))
- PMNC_SET(0x01);
-
- /* polling HSync */
- corgi_ts->machinfo->wait_hsync();
- /* get CCNT */
- CCNT(timer1);
- }
-
- if (doRecive)
- pos = corgi_ssp_ads7846_get();
-
- if (doSend) {
- int cmd = ADSCTRL_PD0 | ADSCTRL_PD1 | (address << ADSCTRL_ADR_SH) | ADSCTRL_STS;
- /* dummy command */
- corgi_ssp_ads7846_put(cmd);
- corgi_ssp_ads7846_get();
-
- if (wait_time) {
- /* Wait after HSync */
- CCNT(timer2);
- if (timer2-timer1 > wait_time) {
- /* too slow - timeout, try again */
- corgi_ts->machinfo->wait_hsync();
- /* get CCNT */
- CCNT(timer1);
- /* Wait after HSync */
- CCNT(timer2);
- }
- while (timer2 - timer1 < wait_time)
- CCNT(timer2);
- }
- corgi_ssp_ads7846_put(cmd);
- if (wait_time && !(pmnc & 0x01))
- PMNC_SET(pmnc);
- }
- return pos;
-}
-
-static int read_xydata(struct corgi_ts *corgi_ts)
-{
- unsigned int x, y, z1, z2;
- unsigned long flags, wait_time;
-
- /* critical section */
- local_irq_save(flags);
- corgi_ssp_ads7846_lock();
- wait_time = calc_waittime(corgi_ts);
-
- /* Y-axis */
- sync_receive_data_send_cmd(corgi_ts, 0, 1, 1u, wait_time);
-
- /* Y-axis */
- sync_receive_data_send_cmd(corgi_ts, 1, 1, 1u, wait_time);
-
- /* X-axis */
- y = sync_receive_data_send_cmd(corgi_ts, 1, 1, 5u, wait_time);
-
- /* Z1 */
- x = sync_receive_data_send_cmd(corgi_ts, 1, 1, 3u, wait_time);
-
- /* Z2 */
- z1 = sync_receive_data_send_cmd(corgi_ts, 1, 1, 4u, wait_time);
- z2 = sync_receive_data_send_cmd(corgi_ts, 1, 0, 4u, wait_time);
-
- /* Power-Down Enable */
- corgi_ssp_ads7846_put((1u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
- corgi_ssp_ads7846_get();
-
- corgi_ssp_ads7846_unlock();
- local_irq_restore(flags);
-
- if (x== 0 || y == 0 || z1 == 0 || (x * (z2 - z1) / z1) >= 15000) {
- corgi_ts->tc.pressure = 0;
- return 0;
- }
-
- corgi_ts->tc.x = x;
- corgi_ts->tc.y = y;
- corgi_ts->tc.pressure = (x * (z2 - z1)) / z1;
- return 1;
-}
-
-static void new_data(struct corgi_ts *corgi_ts)
-{
- struct input_dev *dev = corgi_ts->input;
-
- if (corgi_ts->power_mode != PWR_MODE_ACTIVE)
- return;
-
- if (!corgi_ts->tc.pressure && corgi_ts->pendown == 0)
- return;
-
- input_report_abs(dev, ABS_X, corgi_ts->tc.x);
- input_report_abs(dev, ABS_Y, corgi_ts->tc.y);
- input_report_abs(dev, ABS_PRESSURE, corgi_ts->tc.pressure);
- input_report_key(dev, BTN_TOUCH, corgi_ts->pendown);
- input_sync(dev);
-}
-
-static void ts_interrupt_main(struct corgi_ts *corgi_ts, int isTimer)
-{
- if ((GPLR(IRQ_TO_GPIO(corgi_ts->irq_gpio)) & GPIO_bit(IRQ_TO_GPIO(corgi_ts->irq_gpio))) == 0) {
- /* Disable Interrupt */
- set_irq_type(corgi_ts->irq_gpio, IRQ_TYPE_NONE);
- if (read_xydata(corgi_ts)) {
- corgi_ts->pendown = 1;
- new_data(corgi_ts);
- }
- mod_timer(&corgi_ts->timer, jiffies + HZ / 100);
- } else {
- if (corgi_ts->pendown == 1 || corgi_ts->pendown == 2) {
- mod_timer(&corgi_ts->timer, jiffies + HZ / 100);
- corgi_ts->pendown++;
- return;
- }
-
- if (corgi_ts->pendown) {
- corgi_ts->tc.pressure = 0;
- new_data(corgi_ts);
- }
-
- /* Enable Falling Edge */
- set_irq_type(corgi_ts->irq_gpio, IRQ_TYPE_EDGE_FALLING);
- corgi_ts->pendown = 0;
- }
-}
-
-static void corgi_ts_timer(unsigned long data)
-{
- struct corgi_ts *corgits_data = (struct corgi_ts *) data;
-
- ts_interrupt_main(corgits_data, 1);
-}
-
-static irqreturn_t ts_interrupt(int irq, void *dev_id)
-{
- struct corgi_ts *corgits_data = dev_id;
-
- ts_interrupt_main(corgits_data, 0);
- return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_PM
-static int corgits_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct corgi_ts *corgi_ts = platform_get_drvdata(dev);
-
- if (corgi_ts->pendown) {
- del_timer_sync(&corgi_ts->timer);
- corgi_ts->tc.pressure = 0;
- new_data(corgi_ts);
- corgi_ts->pendown = 0;
- }
- corgi_ts->power_mode = PWR_MODE_SUSPEND;
-
- corgi_ssp_ads7846_putget((1u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
-
- return 0;
-}
-
-static int corgits_resume(struct platform_device *dev)
-{
- struct corgi_ts *corgi_ts = platform_get_drvdata(dev);
-
- corgi_ssp_ads7846_putget((4u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
- /* Enable Falling Edge */
- set_irq_type(corgi_ts->irq_gpio, IRQ_TYPE_EDGE_FALLING);
- corgi_ts->power_mode = PWR_MODE_ACTIVE;
-
- return 0;
-}
-#else
-#define corgits_suspend NULL
-#define corgits_resume NULL
-#endif
-
-static int __devinit corgits_probe(struct platform_device *pdev)
-{
- struct corgi_ts *corgi_ts;
- struct input_dev *input_dev;
- int err = -ENOMEM;
-
- corgi_ts = kzalloc(sizeof(struct corgi_ts), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!corgi_ts || !input_dev)
- goto fail1;
-
- platform_set_drvdata(pdev, corgi_ts);
-
- corgi_ts->machinfo = pdev->dev.platform_data;
- corgi_ts->irq_gpio = platform_get_irq(pdev, 0);
-
- if (corgi_ts->irq_gpio < 0) {
- err = -ENODEV;
- goto fail1;
- }
-
- corgi_ts->input = input_dev;
-
- init_timer(&corgi_ts->timer);
- corgi_ts->timer.data = (unsigned long) corgi_ts;
- corgi_ts->timer.function = corgi_ts_timer;
-
- input_dev->name = "Corgi Touchscreen";
- input_dev->phys = "corgits/input0";
- input_dev->id.bustype = BUS_HOST;
- input_dev->id.vendor = 0x0001;
- input_dev->id.product = 0x0002;
- input_dev->id.version = 0x0100;
- input_dev->dev.parent = &pdev->dev;
-
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
- input_set_abs_params(input_dev, ABS_X, X_AXIS_MIN, X_AXIS_MAX, 0, 0);
- input_set_abs_params(input_dev, ABS_Y, Y_AXIS_MIN, Y_AXIS_MAX, 0, 0);
- input_set_abs_params(input_dev, ABS_PRESSURE, PRESSURE_MIN, PRESSURE_MAX, 0, 0);
-
- pxa_gpio_mode(IRQ_TO_GPIO(corgi_ts->irq_gpio) | GPIO_IN);
-
- /* Initiaize ADS7846 Difference Reference mode */
- corgi_ssp_ads7846_putget((1u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
- mdelay(5);
- corgi_ssp_ads7846_putget((3u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
- mdelay(5);
- corgi_ssp_ads7846_putget((4u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
- mdelay(5);
- corgi_ssp_ads7846_putget((5u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
- mdelay(5);
-
- if (request_irq(corgi_ts->irq_gpio, ts_interrupt, IRQF_DISABLED, "ts", corgi_ts)) {
- err = -EBUSY;
- goto fail1;
- }
-
- err = input_register_device(corgi_ts->input);
- if (err)
- goto fail2;
-
- corgi_ts->power_mode = PWR_MODE_ACTIVE;
-
- /* Enable Falling Edge */
- set_irq_type(corgi_ts->irq_gpio, IRQ_TYPE_EDGE_FALLING);
-
- return 0;
-
- fail2: free_irq(corgi_ts->irq_gpio, corgi_ts);
- fail1: input_free_device(input_dev);
- kfree(corgi_ts);
- return err;
-}
-
-static int __devexit corgits_remove(struct platform_device *pdev)
-{
- struct corgi_ts *corgi_ts = platform_get_drvdata(pdev);
-
- free_irq(corgi_ts->irq_gpio, corgi_ts);
- del_timer_sync(&corgi_ts->timer);
- corgi_ts->machinfo->put_hsync();
- input_unregister_device(corgi_ts->input);
- kfree(corgi_ts);
-
- return 0;
-}
-
-static struct platform_driver corgits_driver = {
- .probe = corgits_probe,
- .remove = __devexit_p(corgits_remove),
- .suspend = corgits_suspend,
- .resume = corgits_resume,
- .driver = {
- .name = "corgi-ts",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init corgits_init(void)
-{
- return platform_driver_register(&corgits_driver);
-}
-
-static void __exit corgits_exit(void)
-{
- platform_driver_unregister(&corgits_driver);
-}
-
-module_init(corgits_init);
-module_exit(corgits_exit);
-
-MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
-MODULE_DESCRIPTION("Corgi TouchScreen Driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:corgi-ts");
diff --git a/drivers/input/touchscreen/hampshire.c b/drivers/input/touchscreen/hampshire.c
new file mode 100644
index 00000000000..2da6cc31bb2
--- /dev/null
+++ b/drivers/input/touchscreen/hampshire.c
@@ -0,0 +1,205 @@
+/*
+ * Hampshire serial touchscreen driver
+ *
+ * Copyright (c) 2010 Adam Bennett
+ * Based on the dynapro driver (c) Tias Guns
+ *
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+/*
+ * 2010/04/08 Adam Bennett <abennett72@gmail.com>
+ * Copied dynapro.c and edited for Hampshire 4-byte protocol
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/serio.h>
+#include <linux/init.h>
+
+#define DRIVER_DESC "Hampshire serial touchscreen driver"
+
+MODULE_AUTHOR("Adam Bennett <abennett72@gmail.com>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+/*
+ * Definitions & global arrays.
+ */
+
+#define HAMPSHIRE_FORMAT_TOUCH_BIT 0x40
+#define HAMPSHIRE_FORMAT_LENGTH 4
+#define HAMPSHIRE_RESPONSE_BEGIN_BYTE 0x80
+
+#define HAMPSHIRE_MIN_XC 0
+#define HAMPSHIRE_MAX_XC 0x1000
+#define HAMPSHIRE_MIN_YC 0
+#define HAMPSHIRE_MAX_YC 0x1000
+
+#define HAMPSHIRE_GET_XC(data) (((data[3] & 0x0c) >> 2) | (data[1] << 2) | ((data[0] & 0x38) << 6))
+#define HAMPSHIRE_GET_YC(data) ((data[3] & 0x03) | (data[2] << 2) | ((data[0] & 0x07) << 9))
+#define HAMPSHIRE_GET_TOUCHED(data) (HAMPSHIRE_FORMAT_TOUCH_BIT & data[0])
+
+/*
+ * Per-touchscreen data.
+ */
+
+struct hampshire {
+ struct input_dev *dev;
+ struct serio *serio;
+ int idx;
+ unsigned char data[HAMPSHIRE_FORMAT_LENGTH];
+ char phys[32];
+};
+
+static void hampshire_process_data(struct hampshire *phampshire)
+{
+ struct input_dev *dev = phampshire->dev;
+
+ if (HAMPSHIRE_FORMAT_LENGTH == ++phampshire->idx) {
+ input_report_abs(dev, ABS_X, HAMPSHIRE_GET_XC(phampshire->data));
+ input_report_abs(dev, ABS_Y, HAMPSHIRE_GET_YC(phampshire->data));
+ input_report_key(dev, BTN_TOUCH,
+ HAMPSHIRE_GET_TOUCHED(phampshire->data));
+ input_sync(dev);
+
+ phampshire->idx = 0;
+ }
+}
+
+static irqreturn_t hampshire_interrupt(struct serio *serio,
+ unsigned char data, unsigned int flags)
+{
+ struct hampshire *phampshire = serio_get_drvdata(serio);
+
+ phampshire->data[phampshire->idx] = data;
+
+ if (HAMPSHIRE_RESPONSE_BEGIN_BYTE & phampshire->data[0])
+ hampshire_process_data(phampshire);
+ else
+ dev_dbg(&serio->dev, "unknown/unsynchronized data: %x\n",
+ phampshire->data[0]);
+
+ return IRQ_HANDLED;
+}
+
+static void hampshire_disconnect(struct serio *serio)
+{
+ struct hampshire *phampshire = serio_get_drvdata(serio);
+
+ input_get_device(phampshire->dev);
+ input_unregister_device(phampshire->dev);
+ serio_close(serio);
+ serio_set_drvdata(serio, NULL);
+ input_put_device(phampshire->dev);
+ kfree(phampshire);
+}
+
+/*
+ * hampshire_connect() is the routine that is called when someone adds a
+ * new serio device that supports hampshire protocol and registers it as
+ * an input device. This is usually accomplished using inputattach.
+ */
+
+static int hampshire_connect(struct serio *serio, struct serio_driver *drv)
+{
+ struct hampshire *phampshire;
+ struct input_dev *input_dev;
+ int err;
+
+ phampshire = kzalloc(sizeof(struct hampshire), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!phampshire || !input_dev) {
+ err = -ENOMEM;
+ goto fail1;
+ }
+
+ phampshire->serio = serio;
+ phampshire->dev = input_dev;
+ snprintf(phampshire->phys, sizeof(phampshire->phys),
+ "%s/input0", serio->phys);
+
+ input_dev->name = "Hampshire Serial TouchScreen";
+ input_dev->phys = phampshire->phys;
+ input_dev->id.bustype = BUS_RS232;
+ input_dev->id.vendor = SERIO_HAMPSHIRE;
+ input_dev->id.product = 0;
+ input_dev->id.version = 0x0001;
+ input_dev->dev.parent = &serio->dev;
+ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+ input_set_abs_params(phampshire->dev, ABS_X,
+ HAMPSHIRE_MIN_XC, HAMPSHIRE_MAX_XC, 0, 0);
+ input_set_abs_params(phampshire->dev, ABS_Y,
+ HAMPSHIRE_MIN_YC, HAMPSHIRE_MAX_YC, 0, 0);
+
+ serio_set_drvdata(serio, phampshire);
+
+ err = serio_open(serio, drv);
+ if (err)
+ goto fail2;
+
+ err = input_register_device(phampshire->dev);
+ if (err)
+ goto fail3;
+
+ return 0;
+
+ fail3: serio_close(serio);
+ fail2: serio_set_drvdata(serio, NULL);
+ fail1: input_free_device(input_dev);
+ kfree(phampshire);
+ return err;
+}
+
+/*
+ * The serio driver structure.
+ */
+
+static struct serio_device_id hampshire_serio_ids[] = {
+ {
+ .type = SERIO_RS232,
+ .proto = SERIO_HAMPSHIRE,
+ .id = SERIO_ANY,
+ .extra = SERIO_ANY,
+ },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(serio, hampshire_serio_ids);
+
+static struct serio_driver hampshire_drv = {
+ .driver = {
+ .name = "hampshire",
+ },
+ .description = DRIVER_DESC,
+ .id_table = hampshire_serio_ids,
+ .interrupt = hampshire_interrupt,
+ .connect = hampshire_connect,
+ .disconnect = hampshire_disconnect,
+};
+
+/*
+ * The functions for inserting/removing us as a module.
+ */
+
+static int __init hampshire_init(void)
+{
+ return serio_register_driver(&hampshire_drv);
+}
+
+static void __exit hampshire_exit(void)
+{
+ serio_unregister_driver(&hampshire_drv);
+}
+
+module_init(hampshire_init);
+module_exit(hampshire_exit);
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index 98a7d127948..ac5d0f9b0cb 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -37,9 +37,7 @@
#include <plat/adc.h>
#include <plat/regs-adc.h>
-
-#include <mach/regs-gpio.h>
-#include <mach/ts.h>
+#include <plat/ts.h>
#define TSC_SLEEP (S3C2410_ADCTSC_PULL_UP_DISABLE | S3C2410_ADCTSC_XY_PST(0))
@@ -57,6 +55,8 @@
S3C2410_ADCTSC_AUTO_PST | \
S3C2410_ADCTSC_XY_PST(0))
+#define FEAT_PEN_IRQ (1 << 0) /* HAS ADCCLRINTPNDNUP */
+
/* Per-touchscreen data. */
/**
@@ -71,6 +71,7 @@
* @irq_tc: The interrupt number for pen up/down interrupt
* @count: The number of samples collected.
* @shift: The log2 of the maximum count to read in one go.
+ * @features: The features supported by the TSADC MOdule.
*/
struct s3c2410ts {
struct s3c_adc_client *client;
@@ -83,26 +84,12 @@ struct s3c2410ts {
int irq_tc;
int count;
int shift;
+ int features;
};
static struct s3c2410ts ts;
/**
- * s3c2410_ts_connect - configure gpio for s3c2410 systems
- *
- * Configure the GPIO for the S3C2410 system, where we have external FETs
- * connected to the device (later systems such as the S3C2440 integrate
- * these into the device).
-*/
-static inline void s3c2410_ts_connect(void)
-{
- s3c2410_gpio_cfgpin(S3C2410_GPG(12), S3C2410_GPG12_XMON);
- s3c2410_gpio_cfgpin(S3C2410_GPG(13), S3C2410_GPG13_nXPON);
- s3c2410_gpio_cfgpin(S3C2410_GPG(14), S3C2410_GPG14_YMON);
- s3c2410_gpio_cfgpin(S3C2410_GPG(15), S3C2410_GPG15_nYPON);
-}
-
-/**
* get_down - return the down state of the pen
* @data0: The data read from ADCDAT0 register.
* @data1: The data read from ADCDAT1 register.
@@ -188,6 +175,11 @@ static irqreturn_t stylus_irq(int irq, void *dev_id)
else
dev_info(ts.dev, "%s: count=%d\n", __func__, ts.count);
+ if (ts.features & FEAT_PEN_IRQ) {
+ /* Clear pen down/up interrupt */
+ writel(0x0, ts.io + S3C64XX_ADCCLRINTPNDNUP);
+ }
+
return IRQ_HANDLED;
}
@@ -296,9 +288,9 @@ static int __devinit s3c2410ts_probe(struct platform_device *pdev)
goto err_clk;
}
- /* Configure the touchscreen external FETs on the S3C2410 */
- if (!platform_get_device_id(pdev)->driver_data)
- s3c2410_ts_connect();
+ /* inititalise the gpio */
+ if (info->cfg_gpio)
+ info->cfg_gpio(to_platform_device(ts.dev));
ts.client = s3c_adc_register(pdev, s3c24xx_ts_select,
s3c24xx_ts_conversion, 1);
@@ -334,6 +326,7 @@ static int __devinit s3c2410ts_probe(struct platform_device *pdev)
ts.input->id.version = 0x0102;
ts.shift = info->oversampling_shift;
+ ts.features = platform_get_device_id(pdev)->driver_data;
ret = request_irq(ts.irq_tc, stylus_irq, IRQF_DISABLED,
"s3c2410_ts_pen", ts.input);
@@ -421,14 +414,15 @@ static struct dev_pm_ops s3c_ts_pmops = {
static struct platform_device_id s3cts_driver_ids[] = {
{ "s3c2410-ts", 0 },
- { "s3c2440-ts", 1 },
+ { "s3c2440-ts", 0 },
+ { "s3c64xx-ts", FEAT_PEN_IRQ },
{ }
};
MODULE_DEVICE_TABLE(platform, s3cts_driver_ids);
static struct platform_driver s3c_ts_driver = {
.driver = {
- .name = "s3c24xx-ts",
+ .name = "samsung-ts",
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &s3c_ts_pmops,
diff --git a/drivers/input/touchscreen/tps6507x-ts.c b/drivers/input/touchscreen/tps6507x-ts.c
new file mode 100644
index 00000000000..5de80a1a730
--- /dev/null
+++ b/drivers/input/touchscreen/tps6507x-ts.c
@@ -0,0 +1,400 @@
+/*
+ * drivers/input/touchscreen/tps6507x_ts.c
+ *
+ * Touchscreen driver for the tps6507x chip.
+ *
+ * Copyright (c) 2009 RidgeRun (todd.fischer@ridgerun.com)
+ *
+ * Credits:
+ *
+ * Using code from tsc2007, MtekVision Co., Ltd.
+ *
+ * For licencing details see kernel-base/COPYING
+ *
+ * TPS65070, TPS65073, TPS650731, and TPS650732 support
+ * 10 bit touch screen interface.
+ */
+
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/tps6507x.h>
+#include <linux/input/tps6507x-ts.h>
+#include <linux/delay.h>
+
+#define TSC_DEFAULT_POLL_PERIOD 30 /* ms */
+#define TPS_DEFAULT_MIN_PRESSURE 0x30
+#define MAX_10BIT ((1 << 10) - 1)
+
+#define TPS6507X_ADCONFIG_CONVERT_TS (TPS6507X_ADCONFIG_AD_ENABLE | \
+ TPS6507X_ADCONFIG_START_CONVERSION | \
+ TPS6507X_ADCONFIG_INPUT_REAL_TSC)
+#define TPS6507X_ADCONFIG_POWER_DOWN_TS (TPS6507X_ADCONFIG_INPUT_REAL_TSC)
+
+struct ts_event {
+ u16 x;
+ u16 y;
+ u16 pressure;
+};
+
+struct tps6507x_ts {
+ struct input_dev *input_dev;
+ struct device *dev;
+ char phys[32];
+ struct workqueue_struct *wq;
+ struct delayed_work work;
+ unsigned polling; /* polling is active */
+ struct ts_event tc;
+ struct tps6507x_dev *mfd;
+ u16 model;
+ unsigned pendown;
+ int irq;
+ void (*clear_penirq)(void);
+ unsigned long poll_period; /* ms */
+ u16 min_pressure;
+ int vref; /* non-zero to leave vref on */
+};
+
+static int tps6507x_read_u8(struct tps6507x_ts *tsc, u8 reg, u8 *data)
+{
+ int err;
+
+ err = tsc->mfd->read_dev(tsc->mfd, reg, 1, data);
+
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tps6507x_write_u8(struct tps6507x_ts *tsc, u8 reg, u8 data)
+{
+ return tsc->mfd->write_dev(tsc->mfd, reg, 1, &data);
+}
+
+static s32 tps6507x_adc_conversion(struct tps6507x_ts *tsc,
+ u8 tsc_mode, u16 *value)
+{
+ s32 ret;
+ u8 adc_status;
+ u8 result;
+
+ /* Route input signal to A/D converter */
+
+ ret = tps6507x_write_u8(tsc, TPS6507X_REG_TSCMODE, tsc_mode);
+ if (ret) {
+ dev_err(tsc->dev, "TSC mode read failed\n");
+ goto err;
+ }
+
+ /* Start A/D conversion */
+
+ ret = tps6507x_write_u8(tsc, TPS6507X_REG_ADCONFIG,
+ TPS6507X_ADCONFIG_CONVERT_TS);
+ if (ret) {
+ dev_err(tsc->dev, "ADC config write failed\n");
+ return ret;
+ }
+
+ do {
+ ret = tps6507x_read_u8(tsc, TPS6507X_REG_ADCONFIG,
+ &adc_status);
+ if (ret) {
+ dev_err(tsc->dev, "ADC config read failed\n");
+ goto err;
+ }
+ } while (adc_status & TPS6507X_ADCONFIG_START_CONVERSION);
+
+ ret = tps6507x_read_u8(tsc, TPS6507X_REG_ADRESULT_2, &result);
+ if (ret) {
+ dev_err(tsc->dev, "ADC result 2 read failed\n");
+ goto err;
+ }
+
+ *value = (result & TPS6507X_REG_ADRESULT_2_MASK) << 8;
+
+ ret = tps6507x_read_u8(tsc, TPS6507X_REG_ADRESULT_1, &result);
+ if (ret) {
+ dev_err(tsc->dev, "ADC result 1 read failed\n");
+ goto err;
+ }
+
+ *value |= result;
+
+ dev_dbg(tsc->dev, "TSC channel %d = 0x%X\n", tsc_mode, *value);
+
+err:
+ return ret;
+}
+
+/* Need to call tps6507x_adc_standby() after using A/D converter for the
+ * touch screen interrupt to work properly.
+ */
+
+static s32 tps6507x_adc_standby(struct tps6507x_ts *tsc)
+{
+ s32 ret;
+ s32 loops = 0;
+ u8 val;
+
+ ret = tps6507x_write_u8(tsc, TPS6507X_REG_ADCONFIG,
+ TPS6507X_ADCONFIG_INPUT_TSC);
+ if (ret)
+ return ret;
+
+ ret = tps6507x_write_u8(tsc, TPS6507X_REG_TSCMODE,
+ TPS6507X_TSCMODE_STANDBY);
+ if (ret)
+ return ret;
+
+ ret = tps6507x_read_u8(tsc, TPS6507X_REG_INT, &val);
+ if (ret)
+ return ret;
+
+ while (val & TPS6507X_REG_TSC_INT) {
+ mdelay(10);
+ ret = tps6507x_read_u8(tsc, TPS6507X_REG_INT, &val);
+ if (ret)
+ return ret;
+ loops++;
+ }
+
+ return ret;
+}
+
+static void tps6507x_ts_handler(struct work_struct *work)
+{
+ struct tps6507x_ts *tsc = container_of(work,
+ struct tps6507x_ts, work.work);
+ struct input_dev *input_dev = tsc->input_dev;
+ int pendown;
+ int schd;
+ int poll = 0;
+ s32 ret;
+
+ ret = tps6507x_adc_conversion(tsc, TPS6507X_TSCMODE_PRESSURE,
+ &tsc->tc.pressure);
+ if (ret)
+ goto done;
+
+ pendown = tsc->tc.pressure > tsc->min_pressure;
+
+ if (unlikely(!pendown && tsc->pendown)) {
+ dev_dbg(tsc->dev, "UP\n");
+ input_report_key(input_dev, BTN_TOUCH, 0);
+ input_report_abs(input_dev, ABS_PRESSURE, 0);
+ input_sync(input_dev);
+ tsc->pendown = 0;
+ }
+
+ if (pendown) {
+
+ if (!tsc->pendown) {
+ dev_dbg(tsc->dev, "DOWN\n");
+ input_report_key(input_dev, BTN_TOUCH, 1);
+ } else
+ dev_dbg(tsc->dev, "still down\n");
+
+ ret = tps6507x_adc_conversion(tsc, TPS6507X_TSCMODE_X_POSITION,
+ &tsc->tc.x);
+ if (ret)
+ goto done;
+
+ ret = tps6507x_adc_conversion(tsc, TPS6507X_TSCMODE_Y_POSITION,
+ &tsc->tc.y);
+ if (ret)
+ goto done;
+
+ input_report_abs(input_dev, ABS_X, tsc->tc.x);
+ input_report_abs(input_dev, ABS_Y, tsc->tc.y);
+ input_report_abs(input_dev, ABS_PRESSURE, tsc->tc.pressure);
+ input_sync(input_dev);
+ tsc->pendown = 1;
+ poll = 1;
+ }
+
+done:
+ /* always poll if not using interrupts */
+ poll = 1;
+
+ if (poll) {
+ schd = queue_delayed_work(tsc->wq, &tsc->work,
+ tsc->poll_period * HZ / 1000);
+ if (schd)
+ tsc->polling = 1;
+ else {
+ tsc->polling = 0;
+ dev_err(tsc->dev, "re-schedule failed");
+ }
+ } else
+ tsc->polling = 0;
+
+ ret = tps6507x_adc_standby(tsc);
+}
+
+static int tps6507x_ts_probe(struct platform_device *pdev)
+{
+ int error;
+ struct tps6507x_ts *tsc;
+ struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent);
+ struct touchscreen_init_data *init_data;
+ struct input_dev *input_dev;
+ struct tps6507x_board *tps_board;
+ int schd;
+
+ /**
+ * tps_board points to pmic related constants
+ * coming from the board-evm file.
+ */
+
+ tps_board = (struct tps6507x_board *)tps6507x_dev->dev->platform_data;
+
+ if (!tps_board) {
+ dev_err(tps6507x_dev->dev,
+ "Could not find tps6507x platform data\n");
+ return -EIO;
+ }
+
+ /**
+ * init_data points to array of regulator_init structures
+ * coming from the board-evm file.
+ */
+
+ init_data = tps_board->tps6507x_ts_init_data;
+
+ tsc = kzalloc(sizeof(struct tps6507x_ts), GFP_KERNEL);
+ if (!tsc) {
+ dev_err(tps6507x_dev->dev, "failed to allocate driver data\n");
+ error = -ENOMEM;
+ goto err0;
+ }
+
+ tps6507x_dev->ts = tsc;
+ tsc->mfd = tps6507x_dev;
+ tsc->dev = tps6507x_dev->dev;
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ dev_err(tsc->dev, "Failed to allocate input device.\n");
+ error = -ENOMEM;
+ goto err1;
+ }
+
+ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+
+ input_set_abs_params(input_dev, ABS_X, 0, MAX_10BIT, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, MAX_10BIT, 0, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_10BIT, 0, 0);
+
+ input_dev->name = "TPS6507x Touchscreen";
+ input_dev->id.bustype = BUS_I2C;
+ input_dev->dev.parent = tsc->dev;
+
+ snprintf(tsc->phys, sizeof(tsc->phys),
+ "%s/input0", dev_name(tsc->dev));
+ input_dev->phys = tsc->phys;
+
+ dev_dbg(tsc->dev, "device: %s\n", input_dev->phys);
+
+ input_set_drvdata(input_dev, tsc);
+
+ tsc->input_dev = input_dev;
+
+ INIT_DELAYED_WORK(&tsc->work, tps6507x_ts_handler);
+ tsc->wq = create_workqueue("TPS6507x Touchscreen");
+
+ if (init_data) {
+ tsc->poll_period = init_data->poll_period;
+ tsc->vref = init_data->vref;
+ tsc->min_pressure = init_data->min_pressure;
+ input_dev->id.vendor = init_data->vendor;
+ input_dev->id.product = init_data->product;
+ input_dev->id.version = init_data->version;
+ } else {
+ tsc->poll_period = TSC_DEFAULT_POLL_PERIOD;
+ tsc->min_pressure = TPS_DEFAULT_MIN_PRESSURE;
+ }
+
+ error = tps6507x_adc_standby(tsc);
+ if (error)
+ goto err2;
+
+ error = input_register_device(input_dev);
+ if (error)
+ goto err2;
+
+ schd = queue_delayed_work(tsc->wq, &tsc->work,
+ tsc->poll_period * HZ / 1000);
+
+ if (schd)
+ tsc->polling = 1;
+ else {
+ tsc->polling = 0;
+ dev_err(tsc->dev, "schedule failed");
+ goto err2;
+ }
+
+ return 0;
+
+err2:
+ cancel_delayed_work(&tsc->work);
+ flush_workqueue(tsc->wq);
+ destroy_workqueue(tsc->wq);
+ tsc->wq = 0;
+ input_free_device(input_dev);
+err1:
+ kfree(tsc);
+ tps6507x_dev->ts = NULL;
+err0:
+ return error;
+}
+
+static int __devexit tps6507x_ts_remove(struct platform_device *pdev)
+{
+ struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev);
+ struct tps6507x_ts *tsc = tps6507x_dev->ts;
+ struct input_dev *input_dev = tsc->input_dev;
+
+ if (!tsc)
+ return 0;
+
+ cancel_delayed_work(&tsc->work);
+ flush_workqueue(tsc->wq);
+ destroy_workqueue(tsc->wq);
+ tsc->wq = 0;
+
+ input_free_device(input_dev);
+
+ tps6507x_dev->ts = NULL;
+ kfree(tsc);
+
+ return 0;
+}
+
+static struct platform_driver tps6507x_ts_driver = {
+ .driver = {
+ .name = "tps6507x-ts",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps6507x_ts_probe,
+ .remove = __devexit_p(tps6507x_ts_remove),
+};
+
+static int __init tps6507x_ts_init(void)
+{
+ return platform_driver_register(&tps6507x_ts_driver);
+}
+module_init(tps6507x_ts_init);
+
+static void __exit tps6507x_ts_exit(void)
+{
+ platform_driver_unregister(&tps6507x_ts_driver);
+}
+module_exit(tps6507x_ts_exit);
+
+MODULE_AUTHOR("Todd Fischer <todd.fischer@ridgerun.com>");
+MODULE_DESCRIPTION("TPS6507x - TouchScreen driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tps6507x-tsc");
diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
index be23780e8a3..769b479fcaa 100644
--- a/drivers/input/touchscreen/tsc2007.c
+++ b/drivers/input/touchscreen/tsc2007.c
@@ -347,6 +347,8 @@ static int __devexit tsc2007_remove(struct i2c_client *client)
struct tsc2007 *ts = i2c_get_clientdata(client);
struct tsc2007_platform_data *pdata = client->dev.platform_data;
+ i2c_set_clientdata(client, NULL);
+
tsc2007_free_irq(ts);
if (pdata->exit_platform_hw)
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 99330bbdbac..567d57215c2 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -811,12 +811,11 @@ static int nexio_init(struct usbtouch_usb *usbtouch)
priv = usbtouch->priv;
- priv->ack_buf = kmalloc(sizeof(nexio_ack_pkt), GFP_KERNEL);
+ priv->ack_buf = kmemdup(nexio_ack_pkt, sizeof(nexio_ack_pkt),
+ GFP_KERNEL);
if (!priv->ack_buf)
goto err_priv;
- memcpy(priv->ack_buf, nexio_ack_pkt, sizeof(nexio_ack_pkt));
-
priv->ack = usb_alloc_urb(0, GFP_KERNEL);
if (!priv->ack) {
dbg("%s - usb_alloc_urb failed: usbtouch->ack", __func__);
@@ -858,6 +857,11 @@ static int nexio_read_data(struct usbtouch_usb *usbtouch, unsigned char *pkt)
if ((pkt[0] & 0xe0) != 0xe0)
return 0;
+ if (be16_to_cpu(packet->data_len) > 0xff)
+ packet->data_len = cpu_to_be16(be16_to_cpu(packet->data_len) - 0x100);
+ if (be16_to_cpu(packet->x_len) > 0xff)
+ packet->x_len = cpu_to_be16(be16_to_cpu(packet->x_len) - 0x80);
+
/* send ACK */
ret = usb_submit_urb(priv->ack, GFP_ATOMIC);
@@ -1113,7 +1117,7 @@ static struct usbtouch_device_info usbtouch_dev_info[] = {
#ifdef CONFIG_TOUCHSCREEN_USB_NEXIO
[DEVTYPE_NEXIO] = {
- .rept_size = 128,
+ .rept_size = 1024,
.irq_always = true,
.read_data = nexio_read_data,
.init = nexio_init,
@@ -1291,8 +1295,8 @@ static void usbtouch_close(struct input_dev *input)
static void usbtouch_free_buffers(struct usb_device *udev,
struct usbtouch_usb *usbtouch)
{
- usb_buffer_free(udev, usbtouch->type->rept_size,
- usbtouch->data, usbtouch->data_dma);
+ usb_free_coherent(udev, usbtouch->type->rept_size,
+ usbtouch->data, usbtouch->data_dma);
kfree(usbtouch->buffer);
}
@@ -1336,8 +1340,8 @@ static int usbtouch_probe(struct usb_interface *intf,
if (!type->process_pkt)
type->process_pkt = usbtouch_process_pkt;
- usbtouch->data = usb_buffer_alloc(udev, type->rept_size,
- GFP_KERNEL, &usbtouch->data_dma);
+ usbtouch->data = usb_alloc_coherent(udev, type->rept_size,
+ GFP_KERNEL, &usbtouch->data_dma);
if (!usbtouch->data)
goto out_free;
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index 5109bf3dd85..cbfef1ea7e3 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -200,7 +200,7 @@ void wm97xx_set_gpio(struct wm97xx *wm, u32 gpio,
mutex_lock(&wm->codec_mutex);
reg = wm97xx_reg_read(wm, AC97_GPIO_STATUS);
- if (status & WM97XX_GPIO_HIGH)
+ if (status == WM97XX_GPIO_HIGH)
reg |= gpio;
else
reg &= ~gpio;
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index ee5837522f5..0cabe31f26d 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -787,8 +787,7 @@ capi_poll(struct file *file, poll_table * wait)
}
static int
-capi_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+capi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct capidev *cdev = file->private_data;
capi_ioctl_struct data;
@@ -981,6 +980,18 @@ register_out:
}
}
+static long
+capi_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = capi_ioctl(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
static int capi_open(struct inode *inode, struct file *file)
{
struct capidev *cdev;
@@ -1026,7 +1037,7 @@ static const struct file_operations capi_fops =
.read = capi_read,
.write = capi_write,
.poll = capi_poll,
- .ioctl = capi_ioctl,
+ .unlocked_ioctl = capi_unlocked_ioctl,
.open = capi_open,
.release = capi_release,
};
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index bd00dceacaf..bde3c88b8b2 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -1147,6 +1147,12 @@ load_unlock_out:
if (ctr->state == CAPI_CTR_DETECTED)
goto reset_unlock_out;
+ if (ctr->reset_ctr == NULL) {
+ printk(KERN_DEBUG "kcapi: reset: no reset function\n");
+ retval = -ESRCH;
+ goto reset_unlock_out;
+ }
+
ctr->reset_ctr(ctr);
retval = wait_on_ctr_state(ctr, CAPI_CTR_DETECTED);
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index 9f49d906579..c53e2417e7d 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -20,6 +20,7 @@
#include <linux/sched.h>
#include <linux/isdnif.h>
#include <net/net_namespace.h>
+#include <linux/smp_lock.h>
#include "isdn_divert.h"
@@ -177,9 +178,7 @@ isdn_divert_close(struct inode *ino, struct file *filep)
/*********/
/* IOCTL */
/*********/
-static int
-isdn_divert_ioctl(struct inode *inode, struct file *file,
- uint cmd, ulong arg)
+static int isdn_divert_ioctl_unlocked(struct file *file, uint cmd, ulong arg)
{
divert_ioctl dioctl;
int i;
@@ -258,6 +257,17 @@ isdn_divert_ioctl(struct inode *inode, struct file *file,
return copy_to_user((void __user *)arg, &dioctl, sizeof(dioctl)) ? -EFAULT : 0;
} /* isdn_divert_ioctl */
+static long isdn_divert_ioctl(struct file *file, uint cmd, ulong arg)
+{
+ long ret;
+
+ lock_kernel();
+ ret = isdn_divert_ioctl_unlocked(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
static const struct file_operations isdn_fops =
{
.owner = THIS_MODULE,
@@ -265,7 +275,7 @@ static const struct file_operations isdn_fops =
.read = isdn_divert_read,
.write = isdn_divert_write,
.poll = isdn_divert_poll,
- .ioctl = isdn_divert_ioctl,
+ .unlocked_ioctl = isdn_divert_ioctl,
.open = isdn_divert_open,
.release = isdn_divert_close,
};
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index 964a55fb148..8f78f15c8ef 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -170,17 +170,6 @@ static inline void ignore_cstruct_param(struct cardstate *cs, _cstruct param,
}
/*
- * convert hex to binary
- */
-static inline u8 hex2bin(char c)
-{
- int result = c & 0x0f;
- if (c & 0x40)
- result += 9;
- return result;
-}
-
-/*
* convert an IE from Gigaset hex string to ETSI binary representation
* including length byte
* return value: result length, -1 on error
@@ -191,7 +180,7 @@ static int encode_ie(char *in, u8 *out, int maxlen)
while (*in) {
if (!isxdigit(in[0]) || !isxdigit(in[1]) || l >= maxlen)
return -1;
- out[++l] = (hex2bin(in[0]) << 4) + hex2bin(in[1]);
+ out[++l] = (hex_to_bin(in[0]) << 4) + hex_to_bin(in[1]);
in += 2;
}
out[0] = l;
@@ -933,30 +922,6 @@ void gigaset_isdn_stop(struct cardstate *cs)
*/
/*
- * load firmware
- */
-static int gigaset_load_firmware(struct capi_ctr *ctr, capiloaddata *data)
-{
- struct cardstate *cs = ctr->driverdata;
-
- /* AVM specific operation, not needed for Gigaset -- ignore */
- dev_notice(cs->dev, "load_firmware ignored\n");
-
- return 0;
-}
-
-/*
- * reset (deactivate) controller
- */
-static void gigaset_reset_ctr(struct capi_ctr *ctr)
-{
- struct cardstate *cs = ctr->driverdata;
-
- /* AVM specific operation, not needed for Gigaset -- ignore */
- dev_notice(cs->dev, "reset_ctr ignored\n");
-}
-
-/*
* register CAPI application
*/
static void gigaset_register_appl(struct capi_ctr *ctr, u16 appl,
@@ -2213,8 +2178,8 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
iif->ctr.driverdata = cs;
strncpy(iif->ctr.name, isdnid, sizeof(iif->ctr.name));
iif->ctr.driver_name = "gigaset";
- iif->ctr.load_firmware = gigaset_load_firmware;
- iif->ctr.reset_ctr = gigaset_reset_ctr;
+ iif->ctr.load_firmware = NULL;
+ iif->ctr.reset_ctr = NULL;
iif->ctr.register_appl = gigaset_register_appl;
iif->ctr.release_appl = gigaset_release_appl;
iif->ctr.send_message = gigaset_send_message;
diff --git a/drivers/isdn/hardware/avm/avm_cs.c b/drivers/isdn/hardware/avm/avm_cs.c
index 94b796d8405..f410d0eb2fe 100644
--- a/drivers/isdn/hardware/avm/avm_cs.c
+++ b/drivers/isdn/hardware/avm/avm_cs.c
@@ -13,7 +13,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ptrace.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/tty.h>
#include <linux/serial.h>
@@ -61,31 +60,6 @@ static void avmcs_release(struct pcmcia_device *link);
static void avmcs_detach(struct pcmcia_device *p_dev);
-/*
- A linked list of "instances" of the skeleton device. Each actual
- PCMCIA card corresponds to one device instance, and is described
- by one struct pcmcia_device structure (defined in ds.h).
-
- You may not want to use a linked list for this -- for example, the
- memory card driver uses an array of struct pcmcia_device pointers, where minor
- device numbers are used to derive the corresponding array index.
-*/
-
-/*
- A driver needs to provide a dev_node_t structure for each device
- on a card. In some cases, there is only one device per card (for
- example, ethernet cards, modems). In other cases, there may be
- many actual or logical devices (SCSI adapters, memory cards with
- multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a struct pcmcia_device
- structure. We allocate them in the card's private data structure,
- because they generally can't be allocated dynamically.
-*/
-
-typedef struct local_info_t {
- dev_node_t node;
-} local_info_t;
-
/*======================================================================
avmcs_attach() creates an "instance" of the driver, allocating
@@ -100,32 +74,19 @@ typedef struct local_info_t {
static int avmcs_probe(struct pcmcia_device *p_dev)
{
- local_info_t *local;
/* The io structure describes IO port mapping */
p_dev->io.NumPorts1 = 16;
p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
p_dev->io.NumPorts2 = 0;
- /* Interrupt setup */
- p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
-
/* General socket configuration */
p_dev->conf.Attributes = CONF_ENABLE_IRQ;
p_dev->conf.IntType = INT_MEMORY_AND_IO;
p_dev->conf.ConfigIndex = 1;
p_dev->conf.Present = PRESENT_OPTION;
- /* Allocate space for private device-specific data */
- local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
- if (!local)
- goto err;
- p_dev->priv = local;
-
return avmcs_config(p_dev);
-
- err:
- return -ENOMEM;
} /* avmcs_attach */
/*======================================================================
@@ -140,7 +101,6 @@ static int avmcs_probe(struct pcmcia_device *p_dev)
static void avmcs_detach(struct pcmcia_device *link)
{
avmcs_release(link);
- kfree(link->priv);
} /* avmcs_detach */
/*======================================================================
@@ -171,14 +131,11 @@ static int avmcs_configcheck(struct pcmcia_device *p_dev,
static int avmcs_config(struct pcmcia_device *link)
{
- local_info_t *dev;
- int i;
+ int i = -1;
char devname[128];
int cardtype;
int (*addcard)(unsigned int port, unsigned irq);
- dev = link->priv;
-
devname[0] = 0;
if (link->prod_id[1])
strlcpy(devname, link->prod_id[1], sizeof(devname));
@@ -190,11 +147,7 @@ static int avmcs_config(struct pcmcia_device *link)
return -ENODEV;
do {
- /*
- * allocate an interrupt line
- */
- i = pcmcia_request_irq(link, &link->irq);
- if (i != 0) {
+ if (!link->irq) {
/* undo */
pcmcia_disable_device(link);
break;
@@ -211,15 +164,11 @@ static int avmcs_config(struct pcmcia_device *link)
} while (0);
- /* At this point, the dev_node_t structure(s) should be
- initialized and arranged in a linked list at link->dev. */
-
if (devname[0]) {
char *s = strrchr(devname, ' ');
if (!s)
s = devname;
else s++;
- strcpy(dev->node.dev_name, s);
if (strcmp("M1", s) == 0) {
cardtype = AVM_CARDTYPE_M1;
} else if (strcmp("M2", s) == 0) {
@@ -227,14 +176,8 @@ static int avmcs_config(struct pcmcia_device *link)
} else {
cardtype = AVM_CARDTYPE_B1;
}
- } else {
- strcpy(dev->node.dev_name, "b1");
+ } else
cardtype = AVM_CARDTYPE_B1;
- }
-
- dev->node.major = 64;
- dev->node.minor = 0;
- link->dev_node = &dev->node;
/* If any step failed, release any partially configured state */
if (i != 0) {
@@ -249,13 +192,12 @@ static int avmcs_config(struct pcmcia_device *link)
default:
case AVM_CARDTYPE_B1: addcard = b1pcmcia_addcard_b1; break;
}
- if ((i = (*addcard)(link->io.BasePort1, link->irq.AssignedIRQ)) < 0) {
- printk(KERN_ERR "avm_cs: failed to add AVM-%s-Controller at i/o %#x, irq %d\n",
- dev->node.dev_name, link->io.BasePort1, link->irq.AssignedIRQ);
- avmcs_release(link);
- return -ENODEV;
+ if ((i = (*addcard)(link->io.BasePort1, link->irq)) < 0) {
+ dev_err(&link->dev, "avm_cs: failed to add AVM-Controller at i/o %#x, irq %d\n",
+ link->io.BasePort1, link->irq);
+ avmcs_release(link);
+ return -ENODEV;
}
- dev->node.minor = i;
return 0;
} /* avmcs_config */
@@ -270,7 +212,7 @@ static int avmcs_config(struct pcmcia_device *link)
static void avmcs_release(struct pcmcia_device *link)
{
- b1pcmcia_delcard(link->io.BasePort1, link->irq.AssignedIRQ);
+ b1pcmcia_delcard(link->io.BasePort1, link->irq);
pcmcia_disable_device(link);
} /* avmcs_release */
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 75e71b5d921..095ed76ebe8 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -117,7 +117,7 @@
* NOTE: only one mode value must be given for every card.
* -> See hfc_multi.h for HFC_IO_MODE_* values
* By default, the IO mode is pci memory IO (MEMIO).
- * Some cards requre specific IO mode, so it cannot be changed.
+ * Some cards require specific IO mode, so it cannot be changed.
* It may be usefull to set IO mode to register io (REGIO) to solve
* PCI bridge problems.
* If unsure, don't give this parameter.
diff --git a/drivers/isdn/hisax/avma1_cs.c b/drivers/isdn/hisax/avma1_cs.c
index 8d1d63a02b3..a80a7617f16 100644
--- a/drivers/isdn/hisax/avma1_cs.c
+++ b/drivers/isdn/hisax/avma1_cs.c
@@ -62,31 +62,6 @@ static void avma1cs_release(struct pcmcia_device *link);
static void avma1cs_detach(struct pcmcia_device *p_dev) __devexit ;
-/*
- A linked list of "instances" of the skeleton device. Each actual
- PCMCIA card corresponds to one device instance, and is described
- by one struct pcmcia_device structure (defined in ds.h).
-
- You may not want to use a linked list for this -- for example, the
- memory card driver uses an array of struct pcmcia_device pointers, where minor
- device numbers are used to derive the corresponding array index.
-*/
-
-/*
- A driver needs to provide a dev_node_t structure for each device
- on a card. In some cases, there is only one device per card (for
- example, ethernet cards, modems). In other cases, there may be
- many actual or logical devices (SCSI adapters, memory cards with
- multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a struct pcmcia_device
- structure. We allocate them in the card's private data structure,
- because they generally can't be allocated dynamically.
-*/
-
-typedef struct local_info_t {
- dev_node_t node;
-} local_info_t;
-
/*======================================================================
avma1cs_attach() creates an "instance" of the driver, allocating
@@ -101,17 +76,8 @@ typedef struct local_info_t {
static int __devinit avma1cs_probe(struct pcmcia_device *p_dev)
{
- local_info_t *local;
-
dev_dbg(&p_dev->dev, "avma1cs_attach()\n");
- /* Allocate space for private device-specific data */
- local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
- if (!local)
- return -ENOMEM;
-
- p_dev->priv = local;
-
/* The io structure describes IO port mapping */
p_dev->io.NumPorts1 = 16;
p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
@@ -119,9 +85,6 @@ static int __devinit avma1cs_probe(struct pcmcia_device *p_dev)
p_dev->io.Attributes2 = IO_DATA_PATH_WIDTH_16;
p_dev->io.IOAddrLines = 5;
- /* Interrupt setup */
- p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
-
/* General socket configuration */
p_dev->conf.Attributes = CONF_ENABLE_IRQ;
p_dev->conf.IntType = INT_MEMORY_AND_IO;
@@ -176,14 +139,11 @@ static int avma1cs_configcheck(struct pcmcia_device *p_dev,
static int __devinit avma1cs_config(struct pcmcia_device *link)
{
- local_info_t *dev;
- int i;
+ int i = -1;
char devname[128];
IsdnCard_t icard;
int busy = 0;
- dev = link->priv;
-
dev_dbg(&link->dev, "avma1cs_config(0x%p)\n", link);
devname[0] = 0;
@@ -197,8 +157,7 @@ static int __devinit avma1cs_config(struct pcmcia_device *link)
/*
* allocate an interrupt line
*/
- i = pcmcia_request_irq(link, &link->irq);
- if (i != 0) {
+ if (!link->irq) {
/* undo */
pcmcia_disable_device(link);
break;
@@ -215,14 +174,6 @@ static int __devinit avma1cs_config(struct pcmcia_device *link)
} while (0);
- /* At this point, the dev_node_t structure(s) should be
- initialized and arranged in a linked list at link->dev. */
-
- strcpy(dev->node.dev_name, "A1");
- dev->node.major = 45;
- dev->node.minor = 0;
- link->dev_node = &dev->node;
-
/* If any step failed, release any partially configured state */
if (i != 0) {
avma1cs_release(link);
@@ -230,9 +181,9 @@ static int __devinit avma1cs_config(struct pcmcia_device *link)
}
printk(KERN_NOTICE "avma1_cs: checking at i/o %#x, irq %d\n",
- link->io.BasePort1, link->irq.AssignedIRQ);
+ link->io.BasePort1, link->irq);
- icard.para[0] = link->irq.AssignedIRQ;
+ icard.para[0] = link->irq;
icard.para[1] = link->io.BasePort1;
icard.protocol = isdnprot;
icard.typ = ISDN_CTYPE_A1_PCMCIA;
@@ -243,7 +194,7 @@ static int __devinit avma1cs_config(struct pcmcia_device *link)
avma1cs_release(link);
return -ENODEV;
}
- dev->node.minor = i;
+ link->priv = (void *) (unsigned long) i;
return 0;
} /* avma1cs_config */
@@ -258,12 +209,12 @@ static int __devinit avma1cs_config(struct pcmcia_device *link)
static void avma1cs_release(struct pcmcia_device *link)
{
- local_info_t *local = link->priv;
+ unsigned long minor = (unsigned long) link->priv;
dev_dbg(&link->dev, "avma1cs_release(0x%p)\n", link);
/* now unregister function with hisax */
- HiSax_closecard(local->node.minor);
+ HiSax_closecard(minor);
pcmcia_disable_device(link);
} /* avma1cs_release */
diff --git a/drivers/isdn/hisax/elsa_cs.c b/drivers/isdn/hisax/elsa_cs.c
index c9f2279e21f..218927e3a4e 100644
--- a/drivers/isdn/hisax/elsa_cs.c
+++ b/drivers/isdn/hisax/elsa_cs.c
@@ -87,24 +87,8 @@ static void elsa_cs_release(struct pcmcia_device *link);
static void elsa_cs_detach(struct pcmcia_device *p_dev) __devexit;
-/*
- A driver needs to provide a dev_node_t structure for each device
- on a card. In some cases, there is only one device per card (for
- example, ethernet cards, modems). In other cases, there may be
- many actual or logical devices (SCSI adapters, memory cards with
- multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a struct pcmcia_device
- structure. We allocate them in the card's private data structure,
- because they generally shouldn't be allocated dynamically.
- In this case, we also provide a flag to indicate if a device is
- "stopped" due to a power management event, or card ejection. The
- device IO routines can use a flag like this to throttle IO to a
- card that is not ready to accept it.
-*/
-
typedef struct local_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
int busy;
int cardnr;
} local_info_t;
@@ -136,10 +120,6 @@ static int __devinit elsa_cs_probe(struct pcmcia_device *link)
local->cardnr = -1;
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = NULL;
-
/*
General socket configuration defaults can go here. In this
client, we assume very little, and rely on the CIS for almost
@@ -223,28 +203,18 @@ static int __devinit elsa_cs_config(struct pcmcia_device *link)
if (i != 0)
goto failed;
- i = pcmcia_request_irq(link, &link->irq);
- if (i != 0) {
- link->irq.AssignedIRQ = 0;
+ if (!link->irq)
goto failed;
- }
i = pcmcia_request_configuration(link, &link->conf);
if (i != 0)
goto failed;
- /* At this point, the dev_node_t structure(s) should be
- initialized and arranged in a linked list at link->dev. *//* */
- sprintf(dev->node.dev_name, "elsa");
- dev->node.major = dev->node.minor = 0x0;
-
- link->dev_node = &dev->node;
-
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x: ",
- dev->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x: ",
+ link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %d", link->irq.AssignedIRQ);
+ printk(", irq %d", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1+link->io.NumPorts1-1);
@@ -253,7 +223,7 @@ static int __devinit elsa_cs_config(struct pcmcia_device *link)
link->io.BasePort2+link->io.NumPorts2-1);
printk("\n");
- icard.para[0] = link->irq.AssignedIRQ;
+ icard.para[0] = link->irq;
icard.para[1] = link->io.BasePort1;
icard.protocol = protocol;
icard.typ = ISDN_CTYPE_ELSA_PCMCIA;
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index 051b44e2556..384d5118e32 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -310,7 +310,7 @@ wait_busy(hfc4s8s_hw * a)
/******************************************************/
/* function to read critical counter registers that */
-/* may be udpated by the chip during read */
+/* may be updated by the chip during read */
/******************************************************/
static u_char
Read_hfc8_stable(hfc4s8s_hw * hw, int reg)
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c
index 1925118122f..8b0a7d86b30 100644
--- a/drivers/isdn/hisax/hisax_fcpcipnp.c
+++ b/drivers/isdn/hisax/hisax_fcpcipnp.c
@@ -74,9 +74,10 @@ static struct pnp_device_id fcpnp_ids[] __devinitdata = {
.id = "AVM0900",
.driver_data = (unsigned long) "Fritz!Card PnP",
},
+ { .id = "" }
};
-MODULE_DEVICE_TABLE(isapnp, fcpnp_ids);
+MODULE_DEVICE_TABLE(pnp, fcpnp_ids);
#endif
static int protocol = 2; /* EURO-ISDN Default */
diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c
index 71b3ddef03b..1f4feaab21a 100644
--- a/drivers/isdn/hisax/sedlbauer_cs.c
+++ b/drivers/isdn/hisax/sedlbauer_cs.c
@@ -87,32 +87,8 @@ static void sedlbauer_release(struct pcmcia_device *link);
static void sedlbauer_detach(struct pcmcia_device *p_dev) __devexit;
-/*
- You'll also need to prototype all the functions that will actually
- be used to talk to your device. See 'memory_cs' for a good example
- of a fully self-sufficient driver; the other drivers rely more or
- less on other parts of the kernel.
-*/
-
-/*
- A driver needs to provide a dev_node_t structure for each device
- on a card. In some cases, there is only one device per card (for
- example, ethernet cards, modems). In other cases, there may be
- many actual or logical devices (SCSI adapters, memory cards with
- multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a struct pcmcia_device
- structure. We allocate them in the card's private data structure,
- because they generally shouldn't be allocated dynamically.
-
- In this case, we also provide a flag to indicate if a device is
- "stopped" due to a power management event, or card ejection. The
- device IO routines can use a flag like this to throttle IO to a
- card that is not ready to accept it.
-*/
-
typedef struct local_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
int stop;
int cardnr;
} local_info_t;
@@ -143,10 +119,6 @@ static int __devinit sedlbauer_probe(struct pcmcia_device *link)
local->p_dev = link;
link->priv = local;
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = NULL;
-
/*
General socket configuration defaults can go here. In this
client, we assume very little, and rely on the CIS for almost
@@ -227,9 +199,7 @@ static int sedlbauer_config_check(struct pcmcia_device *p_dev,
else if (dflt->vpp1.present & (1<<CISTPL_POWER_VNOM))
p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM]/10000;
- /* Do we need to allocate an interrupt? */
- if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
+ p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -285,7 +255,6 @@ static int sedlbauer_config_check(struct pcmcia_device *p_dev,
static int __devinit sedlbauer_config(struct pcmcia_device *link)
{
- local_info_t *dev = link->priv;
win_req_t *req;
int ret;
IsdnCard_t icard;
@@ -313,17 +282,6 @@ static int __devinit sedlbauer_config(struct pcmcia_device *link)
goto failed;
/*
- Allocate an interrupt line. Note that this does not assign a
- handler to the interrupt, unless the 'Handler' member of the
- irq structure is initialized.
- */
- if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
- goto failed;
- }
-
- /*
This actually configures the PCMCIA socket -- setting up
the I/O windows and the interrupt mapping, and putting the
card and host interface into "Memory and IO" mode.
@@ -332,21 +290,13 @@ static int __devinit sedlbauer_config(struct pcmcia_device *link)
if (ret)
goto failed;
- /*
- At this point, the dev_node_t structure(s) need to be
- initialized and arranged in a linked list at link->dev.
- */
- sprintf(dev->node.dev_name, "sedlbauer");
- dev->node.major = dev->node.minor = 0;
- link->dev_node = &dev->node;
-
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x:",
- dev->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x:",
+ link->conf.ConfigIndex);
if (link->conf.Vpp)
printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %d", link->irq.AssignedIRQ);
+ printk(", irq %d", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1+link->io.NumPorts1-1);
@@ -358,7 +308,7 @@ static int __devinit sedlbauer_config(struct pcmcia_device *link)
req->Base+req->Size-1);
printk("\n");
- icard.para[0] = link->irq.AssignedIRQ;
+ icard.para[0] = link->irq;
icard.para[1] = link->io.BasePort1;
icard.protocol = protocol;
icard.typ = ISDN_CTYPE_SEDLBAUER_PCMCIA;
diff --git a/drivers/isdn/hisax/teles_cs.c b/drivers/isdn/hisax/teles_cs.c
index d010a0da8e1..5771955cc53 100644
--- a/drivers/isdn/hisax/teles_cs.c
+++ b/drivers/isdn/hisax/teles_cs.c
@@ -68,34 +68,8 @@ static void teles_cs_release(struct pcmcia_device *link);
static void teles_detach(struct pcmcia_device *p_dev) __devexit ;
-/*
- A linked list of "instances" of the teles_cs device. Each actual
- PCMCIA card corresponds to one device instance, and is described
- by one struct pcmcia_device structure (defined in ds.h).
-
- You may not want to use a linked list for this -- for example, the
- memory card driver uses an array of struct pcmcia_device pointers, where minor
- device numbers are used to derive the corresponding array index.
-*/
-
-/*
- A driver needs to provide a dev_node_t structure for each device
- on a card. In some cases, there is only one device per card (for
- example, ethernet cards, modems). In other cases, there may be
- many actual or logical devices (SCSI adapters, memory cards with
- multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a struct pcmcia_device
- structure. We allocate them in the card's private data structure,
- because they generally shouldn't be allocated dynamically.
- In this case, we also provide a flag to indicate if a device is
- "stopped" due to a power management event, or card ejection. The
- device IO routines can use a flag like this to throttle IO to a
- card that is not ready to accept it.
-*/
-
typedef struct local_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
int busy;
int cardnr;
} local_info_t;
@@ -126,10 +100,6 @@ static int __devinit teles_probe(struct pcmcia_device *link)
local->p_dev = link;
link->priv = local;
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = NULL;
-
/*
General socket configuration defaults can go here. In this
client, we assume very little, and rely on the CIS for almost
@@ -213,28 +183,18 @@ static int __devinit teles_cs_config(struct pcmcia_device *link)
if (i != 0)
goto cs_failed;
- i = pcmcia_request_irq(link, &link->irq);
- if (i != 0) {
- link->irq.AssignedIRQ = 0;
+ if (!link->irq)
goto cs_failed;
- }
i = pcmcia_request_configuration(link, &link->conf);
if (i != 0)
goto cs_failed;
- /* At this point, the dev_node_t structure(s) should be
- initialized and arranged in a linked list at link->dev. *//* */
- sprintf(dev->node.dev_name, "teles");
- dev->node.major = dev->node.minor = 0x0;
-
- link->dev_node = &dev->node;
-
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x:",
- dev->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x:",
+ link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %d", link->irq.AssignedIRQ);
+ printk(", irq %d", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1+link->io.NumPorts1-1);
@@ -243,7 +203,7 @@ static int __devinit teles_cs_config(struct pcmcia_device *link)
link->io.BasePort2+link->io.NumPorts2-1);
printk("\n");
- icard.para[0] = link->irq.AssignedIRQ;
+ icard.para[0] = link->irq;
icard.para[1] = link->io.BasePort1;
icard.protocol = protocol;
icard.typ = ISDN_CTYPE_TELESPCMCIA;
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 70044ee4b22..a44cdb492ea 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -1272,9 +1272,9 @@ isdn_poll(struct file *file, poll_table * wait)
static int
-isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
+isdn_ioctl(struct file *file, uint cmd, ulong arg)
{
- uint minor = iminor(inode);
+ uint minor = iminor(file->f_path.dentry->d_inode);
isdn_ctrl c;
int drvidx;
int chidx;
@@ -1722,6 +1722,18 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
#undef cfg
}
+static long
+isdn_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = isdn_ioctl(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
/*
* Open the device code.
*/
@@ -1838,7 +1850,7 @@ static const struct file_operations isdn_fops =
.read = isdn_read,
.write = isdn_write,
.poll = isdn_poll,
- .ioctl = isdn_ioctl,
+ .unlocked_ioctl = isdn_unlocked_ioctl,
.open = isdn_open,
.release = isdn_close,
};
diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
index efcf1f9327e..fd10d7c785d 100644
--- a/drivers/isdn/i4l/isdn_x25iface.c
+++ b/drivers/isdn/i4l/isdn_x25iface.c
@@ -194,7 +194,7 @@ static int isdn_x25iface_receive(struct concap_proto *cprot, struct sk_buff *skb
if ( ( (ix25_pdata_t*) (cprot->proto_data) )
-> state == WAN_CONNECTED ){
if( skb_push(skb, 1)){
- skb -> data[0]=0x00;
+ skb->data[0] = X25_IFACE_DATA;
skb->protocol = x25_type_trans(skb, cprot->net_dev);
netif_rx(skb);
return 0;
@@ -224,7 +224,7 @@ static int isdn_x25iface_connect_ind(struct concap_proto *cprot)
skb = dev_alloc_skb(1);
if( skb ){
- *( skb_put(skb, 1) ) = 0x01;
+ *(skb_put(skb, 1)) = X25_IFACE_CONNECT;
skb->protocol = x25_type_trans(skb, cprot->net_dev);
netif_rx(skb);
return 0;
@@ -253,7 +253,7 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *cprot)
*state_p = WAN_DISCONNECTED;
skb = dev_alloc_skb(1);
if( skb ){
- *( skb_put(skb, 1) ) = 0x02;
+ *(skb_put(skb, 1)) = X25_IFACE_DISCONNECT;
skb->protocol = x25_type_trans(skb, cprot->net_dev);
netif_rx(skb);
return 0;
@@ -272,9 +272,10 @@ static int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb)
unsigned char firstbyte = skb->data[0];
enum wan_states *state = &((ix25_pdata_t*)cprot->proto_data)->state;
int ret = 0;
- IX25DEBUG( "isdn_x25iface_xmit: %s first=%x state=%d \n", MY_DEVNAME(cprot -> net_dev), firstbyte, *state );
+ IX25DEBUG("isdn_x25iface_xmit: %s first=%x state=%d\n",
+ MY_DEVNAME(cprot->net_dev), firstbyte, *state);
switch ( firstbyte ){
- case 0x00: /* dl_data request */
+ case X25_IFACE_DATA:
if( *state == WAN_CONNECTED ){
skb_pull(skb, 1);
cprot -> net_dev -> trans_start = jiffies;
@@ -285,7 +286,7 @@ static int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb)
}
illegal_state_warn( *state, firstbyte );
break;
- case 0x01: /* dl_connect request */
+ case X25_IFACE_CONNECT:
if( *state == WAN_DISCONNECTED ){
*state = WAN_CONNECTING;
ret = cprot -> dops -> connect_req(cprot);
@@ -298,7 +299,7 @@ static int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb)
illegal_state_warn( *state, firstbyte );
}
break;
- case 0x02: /* dl_disconnect request */
+ case X25_IFACE_DISCONNECT:
switch ( *state ){
case WAN_DISCONNECTED:
/* Should not happen. However, give upper layer a
@@ -318,7 +319,7 @@ static int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb)
illegal_state_warn( *state, firstbyte );
}
break;
- case 0x03: /* changing lapb parameters requested */
+ case X25_IFACE_PARAMS:
printk(KERN_WARNING "isdn_x25iface_xmit: setting of lapb"
" options not yet supported\n");
break;
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index 8785004e85e..81048b8ed8a 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -24,6 +24,7 @@
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/mISDNif.h>
+#include <linux/smp_lock.h>
#include "core.h"
static u_int *debug;
@@ -97,8 +98,6 @@ mISDN_read(struct file *filep, char __user *buf, size_t count, loff_t *off)
if (*debug & DEBUG_TIMER)
printk(KERN_DEBUG "%s(%p, %p, %d, %p)\n", __func__,
filep, buf, (int)count, off);
- if (*off != filep->f_pos)
- return -ESPIPE;
if (list_empty(&dev->expired) && (dev->work == 0)) {
if (filep->f_flags & O_NONBLOCK)
@@ -215,9 +214,8 @@ unlock:
return ret;
}
-static int
-mISDN_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
- unsigned long arg)
+static long
+mISDN_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
struct mISDNtimerdev *dev = filep->private_data;
int id, tout, ret = 0;
@@ -226,6 +224,7 @@ mISDN_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
if (*debug & DEBUG_TIMER)
printk(KERN_DEBUG "%s(%p, %x, %lx)\n", __func__,
filep, cmd, arg);
+ lock_kernel();
switch (cmd) {
case IMADDTIMER:
if (get_user(tout, (int __user *)arg)) {
@@ -257,13 +256,14 @@ mISDN_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
default:
ret = -EINVAL;
}
+ unlock_kernel();
return ret;
}
static const struct file_operations mISDN_fops = {
.read = mISDN_read,
.poll = mISDN_poll,
- .ioctl = mISDN_ioctl,
+ .unlocked_ioctl = mISDN_ioctl,
.open = mISDN_open,
.release = mISDN_close,
};
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 505eb64c329..81bf25e67ce 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -21,7 +21,7 @@ comment "LED drivers"
config LEDS_88PM860X
tristate "LED Support for Marvell 88PM860x PMIC"
- depends on LEDS_CLASS && MFD_88PM860X
+ depends on MFD_88PM860X
help
This option enables support for on-chip LED drivers found on Marvell
Semiconductor 88PM8606 PMIC.
@@ -67,6 +67,16 @@ config LEDS_NET48XX
This option enables support for the Soekris net4801 and net4826 error
LED.
+config LEDS_NET5501
+ tristate "LED Support for Soekris net5501 series Error LED"
+ depends on LEDS_TRIGGERS
+ depends on X86 && LEDS_GPIO_PLATFORM && GPIO_CS5535
+ select LEDS_TRIGGER_DEFAULT_ON
+ default n
+ help
+ Add support for the Soekris net5501 board (detection, error led
+ and GPIO).
+
config LEDS_FSG
tristate "LED Support for the Freecom FSG-3"
depends on MACH_FSG
@@ -285,6 +295,13 @@ config LEDS_DELL_NETBOOKS
This adds support for the Latitude 2100 and similar
notebooks that have an external LED.
+config LEDS_MC13783
+ tristate "LED Support for MC13783 PMIC"
+ depends on MFD_MC13783
+ help
+ This option enable support for on-chip LED drivers found
+ on Freescale Semiconductor MC13783 PMIC.
+
config LEDS_TRIGGERS
bool "LED Trigger support"
help
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 0cd8b995738..2493de49937 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o
obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
obj-$(CONFIG_LEDS_AMS_DELTA) += leds-ams-delta.o
obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o
+obj-$(CONFIG_LEDS_NET5501) += leds-net5501.o
obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o
obj-$(CONFIG_LEDS_ALIX2) += leds-alix2.o
obj-$(CONFIG_LEDS_H1940) += leds-h1940.o
@@ -35,6 +36,7 @@ obj-$(CONFIG_LEDS_INTEL_SS4200) += leds-ss4200.o
obj-$(CONFIG_LEDS_LT3593) += leds-lt3593.o
obj-$(CONFIG_LEDS_ADP5520) += leds-adp5520.o
obj-$(CONFIG_LEDS_DELL_NETBOOKS) += dell-led.o
+obj-$(CONFIG_LEDS_MC13783) += leds-mc13783.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 69e7d86a514..26066007650 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -74,7 +74,7 @@ static ssize_t led_max_brightness_show(struct device *dev,
static struct device_attribute led_class_attrs[] = {
__ATTR(brightness, 0644, led_brightness_show, led_brightness_store),
- __ATTR(max_brightness, 0644, led_max_brightness_show, NULL),
+ __ATTR(max_brightness, 0444, led_max_brightness_show, NULL),
#ifdef CONFIG_LEDS_TRIGGERS
__ATTR(trigger, 0644, led_trigger_show, led_trigger_store),
#endif
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index 16a60c06c96..b7677106cff 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -256,8 +256,10 @@ static int pm860x_led_probe(struct platform_device *pdev)
if (pdev->dev.parent->platform_data) {
pm860x_pdata = pdev->dev.parent->platform_data;
pdata = pm860x_pdata->led;
- } else
- pdata = NULL;
+ } else {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -EINVAL;
+ }
data = kzalloc(sizeof(struct pm860x_led), GFP_KERNEL);
if (data == NULL)
@@ -268,8 +270,11 @@ static int pm860x_led_probe(struct platform_device *pdev)
data->i2c = (chip->id == CHIP_PM8606) ? chip->client : chip->companion;
data->iset = pdata->iset;
data->port = __check_device(pdata, data->name);
- if (data->port < 0)
+ if (data->port < 0) {
+ dev_err(&pdev->dev, "check device failed\n");
+ kfree(data);
return -EINVAL;
+ }
data->current_brightness = 0;
data->cdev.name = data->name;
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index c6e4b772b75..cc22eeefa10 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -26,7 +26,8 @@ struct gpio_led_data {
u8 new_level;
u8 can_sleep;
u8 active_low;
- int (*platform_gpio_blink_set)(unsigned gpio,
+ u8 blinking;
+ int (*platform_gpio_blink_set)(unsigned gpio, int state,
unsigned long *delay_on, unsigned long *delay_off);
};
@@ -35,7 +36,13 @@ static void gpio_led_work(struct work_struct *work)
struct gpio_led_data *led_dat =
container_of(work, struct gpio_led_data, work);
- gpio_set_value_cansleep(led_dat->gpio, led_dat->new_level);
+ if (led_dat->blinking) {
+ led_dat->platform_gpio_blink_set(led_dat->gpio,
+ led_dat->new_level,
+ NULL, NULL);
+ led_dat->blinking = 0;
+ } else
+ gpio_set_value_cansleep(led_dat->gpio, led_dat->new_level);
}
static void gpio_led_set(struct led_classdev *led_cdev,
@@ -60,8 +67,14 @@ static void gpio_led_set(struct led_classdev *led_cdev,
if (led_dat->can_sleep) {
led_dat->new_level = level;
schedule_work(&led_dat->work);
- } else
- gpio_set_value(led_dat->gpio, level);
+ } else {
+ if (led_dat->blinking) {
+ led_dat->platform_gpio_blink_set(led_dat->gpio, level,
+ NULL, NULL);
+ led_dat->blinking = 0;
+ } else
+ gpio_set_value(led_dat->gpio, level);
+ }
}
static int gpio_blink_set(struct led_classdev *led_cdev,
@@ -70,12 +83,14 @@ static int gpio_blink_set(struct led_classdev *led_cdev,
struct gpio_led_data *led_dat =
container_of(led_cdev, struct gpio_led_data, cdev);
- return led_dat->platform_gpio_blink_set(led_dat->gpio, delay_on, delay_off);
+ led_dat->blinking = 1;
+ return led_dat->platform_gpio_blink_set(led_dat->gpio, GPIO_LED_BLINK,
+ delay_on, delay_off);
}
static int __devinit create_gpio_led(const struct gpio_led *template,
struct gpio_led_data *led_dat, struct device *parent,
- int (*blink_set)(unsigned, unsigned long *, unsigned long *))
+ int (*blink_set)(unsigned, int, unsigned long *, unsigned long *))
{
int ret, state;
@@ -97,6 +112,7 @@ static int __devinit create_gpio_led(const struct gpio_led *template,
led_dat->gpio = template->gpio;
led_dat->can_sleep = gpio_cansleep(template->gpio);
led_dat->active_low = template->active_low;
+ led_dat->blinking = 0;
if (blink_set) {
led_dat->platform_gpio_blink_set = blink_set;
led_dat->cdev.blink_set = gpio_blink_set;
@@ -113,7 +129,7 @@ static int __devinit create_gpio_led(const struct gpio_led *template,
ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state);
if (ret < 0)
goto err;
-
+
INIT_WORK(&led_dat->work, gpio_led_work);
ret = led_classdev_register(parent, &led_dat->cdev);
@@ -211,7 +227,7 @@ struct gpio_led_of_platform_data {
static int __devinit of_gpio_leds_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
- struct device_node *np = ofdev->node, *child;
+ struct device_node *np = ofdev->dev.of_node, *child;
struct gpio_led_of_platform_data *pdata;
int count = 0, ret;
@@ -291,8 +307,8 @@ static struct of_platform_driver of_gpio_leds_driver = {
.driver = {
.name = "of_gpio_leds",
.owner = THIS_MODULE,
+ .of_match_table = of_gpio_leds_match,
},
- .match_table = of_gpio_leds_match,
.probe = of_gpio_leds_probe,
.remove = __devexit_p(of_gpio_leds_remove),
};
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
index 8d5ecceba18..932a58da76c 100644
--- a/drivers/leds/leds-lp3944.c
+++ b/drivers/leds/leds-lp3944.c
@@ -379,6 +379,7 @@ static int __devinit lp3944_probe(struct i2c_client *client,
{
struct lp3944_platform_data *lp3944_pdata = client->dev.platform_data;
struct lp3944_data *data;
+ int err;
if (lp3944_pdata == NULL) {
dev_err(&client->dev, "no platform data\n");
@@ -401,9 +402,13 @@ static int __devinit lp3944_probe(struct i2c_client *client,
mutex_init(&data->lock);
- dev_info(&client->dev, "lp3944 enabled\n");
+ err = lp3944_configure(client, data, lp3944_pdata);
+ if (err < 0) {
+ kfree(data);
+ return err;
+ }
- lp3944_configure(client, data, lp3944_pdata);
+ dev_info(&client->dev, "lp3944 enabled\n");
return 0;
}
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
new file mode 100644
index 00000000000..f05bb08d0f0
--- /dev/null
+++ b/drivers/leds/leds-mc13783.c
@@ -0,0 +1,403 @@
+/*
+ * LEDs driver for Freescale MC13783
+ *
+ * Copyright (C) 2010 Philippe Rétornaz
+ *
+ * Based on leds-da903x:
+ * Copyright (C) 2008 Compulab, Ltd.
+ * Mike Rapoport <mike@compulab.co.il>
+ *
+ * Copyright (C) 2006-2008 Marvell International Ltd.
+ * Eric Miao <eric.miao@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/workqueue.h>
+#include <linux/mfd/mc13783.h>
+#include <linux/slab.h>
+
+struct mc13783_led {
+ struct led_classdev cdev;
+ struct work_struct work;
+ struct mc13783 *master;
+ enum led_brightness new_brightness;
+ int id;
+};
+
+#define MC13783_REG_LED_CONTROL_0 51
+#define MC13783_LED_C0_ENABLE_BIT (1 << 0)
+#define MC13783_LED_C0_TRIODE_MD_BIT (1 << 7)
+#define MC13783_LED_C0_TRIODE_AD_BIT (1 << 8)
+#define MC13783_LED_C0_TRIODE_KP_BIT (1 << 9)
+#define MC13783_LED_C0_BOOST_BIT (1 << 10)
+#define MC13783_LED_C0_ABMODE_MASK 0x7
+#define MC13783_LED_C0_ABMODE 11
+#define MC13783_LED_C0_ABREF_MASK 0x3
+#define MC13783_LED_C0_ABREF 14
+
+#define MC13783_REG_LED_CONTROL_1 52
+#define MC13783_LED_C1_TC1HALF_BIT (1 << 18)
+
+#define MC13783_REG_LED_CONTROL_2 53
+#define MC13783_LED_C2_BL_P_MASK 0xf
+#define MC13783_LED_C2_MD_P 9
+#define MC13783_LED_C2_AD_P 13
+#define MC13783_LED_C2_KP_P 17
+#define MC13783_LED_C2_BL_C_MASK 0x7
+#define MC13783_LED_C2_MD_C 0
+#define MC13783_LED_C2_AD_C 3
+#define MC13783_LED_C2_KP_C 6
+
+#define MC13783_REG_LED_CONTROL_3 54
+#define MC13783_LED_C3_TC_P 6
+#define MC13783_LED_C3_TC_P_MASK 0x1f
+
+#define MC13783_REG_LED_CONTROL_4 55
+#define MC13783_REG_LED_CONTROL_5 56
+
+#define MC13783_LED_Cx_PERIOD 21
+#define MC13783_LED_Cx_PERIOD_MASK 0x3
+#define MC13783_LED_Cx_SLEWLIM_BIT (1 << 23)
+#define MC13783_LED_Cx_TRIODE_TC_BIT (1 << 23)
+#define MC13783_LED_Cx_TC_C_MASK 0x3
+
+static void mc13783_led_work(struct work_struct *work)
+{
+ struct mc13783_led *led = container_of(work, struct mc13783_led, work);
+ int reg = 0;
+ int mask = 0;
+ int value = 0;
+ int bank, off, shift;
+
+ switch (led->id) {
+ case MC13783_LED_MD:
+ reg = MC13783_REG_LED_CONTROL_2;
+ mask = MC13783_LED_C2_BL_P_MASK << MC13783_LED_C2_MD_P;
+ value = (led->new_brightness >> 4) << MC13783_LED_C2_MD_P;
+ break;
+ case MC13783_LED_AD:
+ reg = MC13783_REG_LED_CONTROL_2;
+ mask = MC13783_LED_C2_BL_P_MASK << MC13783_LED_C2_AD_P;
+ value = (led->new_brightness >> 4) << MC13783_LED_C2_AD_P;
+ break;
+ case MC13783_LED_KP:
+ reg = MC13783_REG_LED_CONTROL_2;
+ mask = MC13783_LED_C2_BL_P_MASK << MC13783_LED_C2_KP_P;
+ value = (led->new_brightness >> 4) << MC13783_LED_C2_KP_P;
+ break;
+ case MC13783_LED_R1:
+ case MC13783_LED_G1:
+ case MC13783_LED_B1:
+ case MC13783_LED_R2:
+ case MC13783_LED_G2:
+ case MC13783_LED_B2:
+ case MC13783_LED_R3:
+ case MC13783_LED_G3:
+ case MC13783_LED_B3:
+ off = led->id - MC13783_LED_R1;
+ bank = off/3;
+ reg = MC13783_REG_LED_CONTROL_3 + off/3;
+ shift = (off - bank * 3) * 5 + MC13783_LED_C3_TC_P;
+ value = (led->new_brightness >> 3) << shift;
+ mask = MC13783_LED_C3_TC_P_MASK << shift;
+ break;
+ }
+
+ mc13783_lock(led->master);
+
+ mc13783_reg_rmw(led->master, reg, mask, value);
+
+ mc13783_unlock(led->master);
+}
+
+static void mc13783_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct mc13783_led *led;
+
+ led = container_of(led_cdev, struct mc13783_led, cdev);
+ led->new_brightness = value;
+ schedule_work(&led->work);
+}
+
+static int __devinit mc13783_led_setup(struct mc13783_led *led, int max_current)
+{
+ int shift = 0;
+ int mask = 0;
+ int value = 0;
+ int reg = 0;
+ int ret, bank;
+
+ switch (led->id) {
+ case MC13783_LED_MD:
+ shift = MC13783_LED_C2_MD_C;
+ mask = MC13783_LED_C2_BL_C_MASK;
+ value = max_current & MC13783_LED_C2_BL_C_MASK;
+ reg = MC13783_REG_LED_CONTROL_2;
+ break;
+ case MC13783_LED_AD:
+ shift = MC13783_LED_C2_AD_C;
+ mask = MC13783_LED_C2_BL_C_MASK;
+ value = max_current & MC13783_LED_C2_BL_C_MASK;
+ reg = MC13783_REG_LED_CONTROL_2;
+ break;
+ case MC13783_LED_KP:
+ shift = MC13783_LED_C2_KP_C;
+ mask = MC13783_LED_C2_BL_C_MASK;
+ value = max_current & MC13783_LED_C2_BL_C_MASK;
+ reg = MC13783_REG_LED_CONTROL_2;
+ break;
+ case MC13783_LED_R1:
+ case MC13783_LED_G1:
+ case MC13783_LED_B1:
+ case MC13783_LED_R2:
+ case MC13783_LED_G2:
+ case MC13783_LED_B2:
+ case MC13783_LED_R3:
+ case MC13783_LED_G3:
+ case MC13783_LED_B3:
+ bank = (led->id - MC13783_LED_R1)/3;
+ reg = MC13783_REG_LED_CONTROL_3 + bank;
+ shift = ((led->id - MC13783_LED_R1) - bank * 3) * 2;
+ mask = MC13783_LED_Cx_TC_C_MASK;
+ value = max_current & MC13783_LED_Cx_TC_C_MASK;
+ break;
+ }
+
+ mc13783_lock(led->master);
+
+ ret = mc13783_reg_rmw(led->master, reg, mask << shift,
+ value << shift);
+
+ mc13783_unlock(led->master);
+ return ret;
+}
+
+static int __devinit mc13783_leds_prepare(struct platform_device *pdev)
+{
+ struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent);
+ int ret = 0;
+ int reg = 0;
+
+ mc13783_lock(dev);
+
+ if (pdata->flags & MC13783_LED_TC1HALF)
+ reg |= MC13783_LED_C1_TC1HALF_BIT;
+
+ if (pdata->flags & MC13783_LED_SLEWLIMTC)
+ reg |= MC13783_LED_Cx_SLEWLIM_BIT;
+
+ ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_1, reg);
+ if (ret)
+ goto out;
+
+ reg = (pdata->bl_period & MC13783_LED_Cx_PERIOD_MASK) <<
+ MC13783_LED_Cx_PERIOD;
+
+ if (pdata->flags & MC13783_LED_SLEWLIMBL)
+ reg |= MC13783_LED_Cx_SLEWLIM_BIT;
+
+ ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_2, reg);
+ if (ret)
+ goto out;
+
+ reg = (pdata->tc1_period & MC13783_LED_Cx_PERIOD_MASK) <<
+ MC13783_LED_Cx_PERIOD;
+
+ if (pdata->flags & MC13783_LED_TRIODE_TC1)
+ reg |= MC13783_LED_Cx_TRIODE_TC_BIT;
+
+ ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_3, reg);
+ if (ret)
+ goto out;
+
+ reg = (pdata->tc2_period & MC13783_LED_Cx_PERIOD_MASK) <<
+ MC13783_LED_Cx_PERIOD;
+
+ if (pdata->flags & MC13783_LED_TRIODE_TC2)
+ reg |= MC13783_LED_Cx_TRIODE_TC_BIT;
+
+ ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_4, reg);
+ if (ret)
+ goto out;
+
+ reg = (pdata->tc3_period & MC13783_LED_Cx_PERIOD_MASK) <<
+ MC13783_LED_Cx_PERIOD;
+
+ if (pdata->flags & MC13783_LED_TRIODE_TC3)
+ reg |= MC13783_LED_Cx_TRIODE_TC_BIT;;
+
+ ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_5, reg);
+ if (ret)
+ goto out;
+
+ reg = MC13783_LED_C0_ENABLE_BIT;
+ if (pdata->flags & MC13783_LED_TRIODE_MD)
+ reg |= MC13783_LED_C0_TRIODE_MD_BIT;
+ if (pdata->flags & MC13783_LED_TRIODE_AD)
+ reg |= MC13783_LED_C0_TRIODE_AD_BIT;
+ if (pdata->flags & MC13783_LED_TRIODE_KP)
+ reg |= MC13783_LED_C0_TRIODE_KP_BIT;
+ if (pdata->flags & MC13783_LED_BOOST_EN)
+ reg |= MC13783_LED_C0_BOOST_BIT;
+
+ reg |= (pdata->abmode & MC13783_LED_C0_ABMODE_MASK) <<
+ MC13783_LED_C0_ABMODE;
+ reg |= (pdata->abref & MC13783_LED_C0_ABREF_MASK) <<
+ MC13783_LED_C0_ABREF;
+
+ ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_0, reg);
+
+out:
+ mc13783_unlock(dev);
+ return ret;
+}
+
+static int __devinit mc13783_led_probe(struct platform_device *pdev)
+{
+ struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct mc13783_led_platform_data *led_cur;
+ struct mc13783_led *led, *led_dat;
+ int ret, i;
+ int init_led = 0;
+
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -ENODEV;
+ }
+
+ if (pdata->num_leds < 1 || pdata->num_leds > MC13783_LED_MAX) {
+ dev_err(&pdev->dev, "Invalid led count %d\n", pdata->num_leds);
+ return -EINVAL;
+ }
+
+ led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
+ if (led == NULL) {
+ dev_err(&pdev->dev, "failed to alloc memory\n");
+ return -ENOMEM;
+ }
+
+ ret = mc13783_leds_prepare(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to init led driver\n");
+ goto err_free;
+ }
+
+ for (i = 0; i < pdata->num_leds; i++) {
+ led_dat = &led[i];
+ led_cur = &pdata->led[i];
+
+ if (led_cur->id > MC13783_LED_MAX || led_cur->id < 0) {
+ dev_err(&pdev->dev, "invalid id %d\n", led_cur->id);
+ ret = -EINVAL;
+ goto err_register;
+ }
+
+ if (init_led & (1 << led_cur->id)) {
+ dev_err(&pdev->dev, "led %d already initialized\n",
+ led_cur->id);
+ ret = -EINVAL;
+ goto err_register;
+ }
+
+ init_led |= 1 << led_cur->id;
+ led_dat->cdev.name = led_cur->name;
+ led_dat->cdev.default_trigger = led_cur->default_trigger;
+ led_dat->cdev.brightness_set = mc13783_led_set;
+ led_dat->cdev.brightness = LED_OFF;
+ led_dat->id = led_cur->id;
+ led_dat->master = dev_get_drvdata(pdev->dev.parent);
+
+ INIT_WORK(&led_dat->work, mc13783_led_work);
+
+ ret = led_classdev_register(pdev->dev.parent, &led_dat->cdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register led %d\n",
+ led_dat->id);
+ goto err_register;
+ }
+
+ ret = mc13783_led_setup(led_dat, led_cur->max_current);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to init led %d\n",
+ led_dat->id);
+ i++;
+ goto err_register;
+ }
+ }
+
+ platform_set_drvdata(pdev, led);
+ return 0;
+
+err_register:
+ for (i = i - 1; i >= 0; i--) {
+ led_classdev_unregister(&led[i].cdev);
+ cancel_work_sync(&led[i].work);
+ }
+
+err_free:
+ kfree(led);
+ return ret;
+}
+
+static int __devexit mc13783_led_remove(struct platform_device *pdev)
+{
+ struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct mc13783_led *led = platform_get_drvdata(pdev);
+ struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent);
+ int i;
+
+ for (i = 0; i < pdata->num_leds; i++) {
+ led_classdev_unregister(&led[i].cdev);
+ cancel_work_sync(&led[i].work);
+ }
+
+ mc13783_lock(dev);
+
+ mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_0, 0);
+ mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_1, 0);
+ mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_2, 0);
+ mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_3, 0);
+ mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_4, 0);
+ mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_5, 0);
+
+ mc13783_unlock(dev);
+
+ kfree(led);
+ return 0;
+}
+
+static struct platform_driver mc13783_led_driver = {
+ .driver = {
+ .name = "mc13783-led",
+ .owner = THIS_MODULE,
+ },
+ .probe = mc13783_led_probe,
+ .remove = __devexit_p(mc13783_led_remove),
+};
+
+static int __init mc13783_led_init(void)
+{
+ return platform_driver_register(&mc13783_led_driver);
+}
+module_init(mc13783_led_init);
+
+static void __exit mc13783_led_exit(void)
+{
+ platform_driver_unregister(&mc13783_led_driver);
+}
+module_exit(mc13783_led_exit);
+
+MODULE_DESCRIPTION("LEDs driver for Freescale MC13783 PMIC");
+MODULE_AUTHOR("Philippe Retornaz <philippe.retornaz@epfl.ch>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mc13783-led");
diff --git a/drivers/leds/leds-net5501.c b/drivers/leds/leds-net5501.c
new file mode 100644
index 00000000000..3063f591f0d
--- /dev/null
+++ b/drivers/leds/leds-net5501.c
@@ -0,0 +1,94 @@
+/*
+ * Soekris board support code
+ *
+ * Copyright (C) 2008-2009 Tower Technologies
+ * Written by Alessandro Zummo <a.zummo@towertech.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+
+#include <asm/geode.h>
+
+static struct gpio_led net5501_leds[] = {
+ {
+ .name = "error",
+ .gpio = 6,
+ .default_trigger = "default-on",
+ },
+};
+
+static struct gpio_led_platform_data net5501_leds_data = {
+ .num_leds = ARRAY_SIZE(net5501_leds),
+ .leds = net5501_leds,
+};
+
+static struct platform_device net5501_leds_dev = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev.platform_data = &net5501_leds_data,
+};
+
+static void __init init_net5501(void)
+{
+ platform_device_register(&net5501_leds_dev);
+}
+
+struct soekris_board {
+ u16 offset;
+ char *sig;
+ u8 len;
+ void (*init)(void);
+};
+
+static struct soekris_board __initdata boards[] = {
+ { 0xb7b, "net5501", 7, init_net5501 }, /* net5501 v1.33/1.33c */
+ { 0xb1f, "net5501", 7, init_net5501 }, /* net5501 v1.32i */
+};
+
+static int __init soekris_init(void)
+{
+ int i;
+ unsigned char *rombase, *bios;
+
+ if (!is_geode())
+ return 0;
+
+ rombase = ioremap(0xffff0000, 0xffff);
+ if (!rombase) {
+ printk(KERN_INFO "Soekris net5501 LED driver failed to get rombase");
+ return 0;
+ }
+
+ bios = rombase + 0x20; /* null terminated */
+
+ if (strncmp(bios, "comBIOS", 7))
+ goto unmap;
+
+ for (i = 0; i < ARRAY_SIZE(boards); i++) {
+ unsigned char *model = rombase + boards[i].offset;
+
+ if (strncmp(model, boards[i].sig, boards[i].len) == 0) {
+ printk(KERN_INFO "Soekris %s: %s\n", model, bios);
+
+ if (boards[i].init)
+ boards[i].init();
+ break;
+ }
+ }
+
+unmap:
+ iounmap(rombase);
+ return 0;
+}
+
+arch_initcall(soekris_init);
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
index 51477ec7139..a688293abd0 100644
--- a/drivers/leds/leds-ss4200.c
+++ b/drivers/leds/leds-ss4200.c
@@ -534,7 +534,7 @@ static int __init nas_gpio_init(void)
set_power_light_amber_noblink();
return 0;
out_err:
- for (; i >= 0; i--)
+ for (i--; i >= 0; i--)
unregister_nasgpio_led(i);
pci_unregister_driver(&nas_gpio_pci_driver);
return ret;
diff --git a/drivers/macintosh/macio-adb.c b/drivers/macintosh/macio-adb.c
index 79119f56e82..bd6da7a9c55 100644
--- a/drivers/macintosh/macio-adb.c
+++ b/drivers/macintosh/macio-adb.c
@@ -155,6 +155,7 @@ static int macio_adb_reset_bus(void)
while ((in_8(&adb->ctrl.r) & ADB_RST) != 0) {
if (--timeout == 0) {
out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) & ~ADB_RST);
+ spin_unlock_irqrestore(&macio_lock, flags);
return -1;
}
}
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 26a303a1d1a..97147804a49 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -39,14 +39,12 @@ static struct macio_chip *macio_on_hold;
static int macio_bus_match(struct device *dev, struct device_driver *drv)
{
- struct macio_dev * macio_dev = to_macio_device(dev);
- struct macio_driver * macio_drv = to_macio_driver(drv);
- const struct of_device_id * matches = macio_drv->match_table;
+ const struct of_device_id * matches = drv->of_match_table;
if (!matches)
return 0;
- return of_match_device(matches, &macio_dev->ofdev) != NULL;
+ return of_match_device(matches, dev) != NULL;
}
struct macio_dev *macio_dev_get(struct macio_dev *dev)
@@ -84,7 +82,7 @@ static int macio_device_probe(struct device *dev)
macio_dev_get(macio_dev);
- match = of_match_device(drv->match_table, &macio_dev->ofdev);
+ match = of_match_device(drv->driver.of_match_table, dev);
if (match)
error = drv->probe(macio_dev, match);
if (error)
@@ -248,7 +246,7 @@ static void macio_create_fixup_irq(struct macio_dev *dev, int index,
static void macio_add_missing_resources(struct macio_dev *dev)
{
- struct device_node *np = dev->ofdev.node;
+ struct device_node *np = dev->ofdev.dev.of_node;
unsigned int irq_base;
/* Gatwick has some missing interrupts on child nodes */
@@ -289,7 +287,7 @@ static void macio_add_missing_resources(struct macio_dev *dev)
static void macio_setup_interrupts(struct macio_dev *dev)
{
- struct device_node *np = dev->ofdev.node;
+ struct device_node *np = dev->ofdev.dev.of_node;
unsigned int irq;
int i = 0, j = 0;
@@ -317,7 +315,7 @@ static void macio_setup_interrupts(struct macio_dev *dev)
static void macio_setup_resources(struct macio_dev *dev,
struct resource *parent_res)
{
- struct device_node *np = dev->ofdev.node;
+ struct device_node *np = dev->ofdev.dev.of_node;
struct resource r;
int index;
@@ -373,9 +371,9 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
dev->bus = &chip->lbus;
dev->media_bay = in_bay;
- dev->ofdev.node = np;
- dev->ofdev.dma_mask = 0xffffffffUL;
- dev->ofdev.dev.dma_mask = &dev->ofdev.dma_mask;
+ dev->ofdev.dev.of_node = np;
+ dev->ofdev.archdata.dma_mask = 0xffffffffUL;
+ dev->ofdev.dev.dma_mask = &dev->ofdev.archdata.dma_mask;
dev->ofdev.dev.parent = parent;
dev->ofdev.dev.bus = &macio_bus_type;
dev->ofdev.dev.release = macio_release_dev;
@@ -494,9 +492,9 @@ static void macio_pci_add_devices(struct macio_chip *chip)
}
/* Add media bay devices if any */
+ pnode = mbdev->ofdev.dev.of_node;
if (mbdev)
- for (np = NULL; (np = of_get_next_child(mbdev->ofdev.node, np))
- != NULL;) {
+ for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) {
if (macio_skip_device(np))
continue;
of_node_get(np);
@@ -506,9 +504,9 @@ static void macio_pci_add_devices(struct macio_chip *chip)
}
/* Add serial ports if any */
+ pnode = sdev->ofdev.dev.of_node;
if (sdev) {
- for (np = NULL; (np = of_get_next_child(sdev->ofdev.node, np))
- != NULL;) {
+ for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) {
if (macio_skip_device(np))
continue;
of_node_get(np);
diff --git a/drivers/macintosh/macio_sysfs.c b/drivers/macintosh/macio_sysfs.c
index 9e9453b5842..6999ce59fd1 100644
--- a/drivers/macintosh/macio_sysfs.c
+++ b/drivers/macintosh/macio_sysfs.c
@@ -9,7 +9,7 @@ field##_show (struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct macio_dev *mdev = to_macio_device (dev); \
- return sprintf (buf, format_string, mdev->ofdev.node->field); \
+ return sprintf (buf, format_string, mdev->ofdev.dev.of_node->field); \
}
static ssize_t
@@ -21,7 +21,7 @@ compatible_show (struct device *dev, struct device_attribute *attr, char *buf)
int length = 0;
of = &to_macio_device (dev)->ofdev;
- compat = of_get_property(of->node, "compatible", &cplen);
+ compat = of_get_property(of->dev.of_node, "compatible", &cplen);
if (!compat) {
*buf = '\0';
return 0;
@@ -58,7 +58,7 @@ static ssize_t devspec_show(struct device *dev,
struct of_device *ofdev;
ofdev = to_of_device(dev);
- return sprintf(buf, "%s\n", ofdev->node->full_name);
+ return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name);
}
macio_config_of_attr (name, "%s\n");
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index 08002b88f34..288acce76b7 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -564,7 +564,7 @@ static int __devinit media_bay_attach(struct macio_dev *mdev, const struct of_de
unsigned long base;
int i;
- ofnode = mdev->ofdev.node;
+ ofnode = mdev->ofdev.dev.of_node;
if (macio_resource_count(mdev) < 1)
return -ENODEV;
diff --git a/drivers/macintosh/nvram.c b/drivers/macintosh/nvram.c
index c876349c32d..a271c8218d8 100644
--- a/drivers/macintosh/nvram.c
+++ b/drivers/macintosh/nvram.c
@@ -100,7 +100,7 @@ const struct file_operations nvram_fops = {
.llseek = nvram_llseek,
.read = read_nvram,
.write = write_nvram,
- .ioctl = nvram_ioctl,
+ .unlocked_ioctl = nvram_ioctl,
};
static struct miscdevice nvram_dev = {
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 7c54d80c4fb..12946c5f583 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -375,7 +375,7 @@ static int __devinit rackmeter_probe(struct macio_dev* mdev,
pr_debug("rackmeter_probe()\n");
/* Get i2s-a node */
- while ((i2s = of_get_next_child(mdev->ofdev.node, i2s)) != NULL)
+ while ((i2s = of_get_next_child(mdev->ofdev.dev.of_node, i2s)) != NULL)
if (strcmp(i2s->name, "i2s-a") == 0)
break;
if (i2s == NULL) {
@@ -431,7 +431,7 @@ static int __devinit rackmeter_probe(struct macio_dev* mdev,
of_address_to_resource(i2s, 1, &rdma)) {
printk(KERN_ERR
"rackmeter: found match but lacks resources: %s",
- mdev->ofdev.node->full_name);
+ mdev->ofdev.dev.of_node->full_name);
rc = -ENXIO;
goto bail_free;
}
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 888448cf7f1..2506c957712 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -671,8 +671,11 @@ static const struct of_device_id smu_platform_match[] =
static struct of_platform_driver smu_of_platform_driver =
{
- .name = "smu",
- .match_table = smu_platform_match,
+ .driver = {
+ .name = "smu",
+ .owner = THIS_MODULE,
+ .of_match_table = smu_platform_match,
+ },
.probe = smu_platform_probe,
};
@@ -1183,8 +1186,10 @@ static ssize_t smu_read_command(struct file *file, struct smu_private *pp,
return -EOVERFLOW;
spin_lock_irqsave(&pp->lock, flags);
if (pp->cmd.status == 1) {
- if (file->f_flags & O_NONBLOCK)
+ if (file->f_flags & O_NONBLOCK) {
+ spin_unlock_irqrestore(&pp->lock, flags);
return -EAGAIN;
+ }
add_wait_queue(&pp->wait, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index c42eeb43042..16d82f17ae8 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -182,6 +182,7 @@ remove_thermostat(struct i2c_client *client)
thermostat = NULL;
+ i2c_set_clientdata(client, NULL);
kfree(th);
return 0;
@@ -399,6 +400,7 @@ static int probe_thermostat(struct i2c_client *client,
rc = read_reg(th, CONFIG_REG);
if (rc < 0) {
dev_err(&client->dev, "Thermostat failed to read config!\n");
+ i2c_set_clientdata(client, NULL);
kfree(th);
return -ENODEV;
}
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index b18fa948f3d..e60605bd0ea 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -2215,7 +2215,7 @@ static int fcu_of_probe(struct of_device* dev, const struct of_device_id *match)
state = state_detached;
/* Lookup the fans in the device tree */
- fcu_lookup_fans(dev->node);
+ fcu_lookup_fans(dev->dev.of_node);
/* Add the driver */
return i2c_add_driver(&therm_pm72_driver);
@@ -2238,8 +2238,11 @@ static const struct of_device_id fcu_match[] =
static struct of_platform_driver fcu_of_platform_driver =
{
- .name = "temperature",
- .match_table = fcu_match,
+ .driver = {
+ .name = "temperature",
+ .owner = THIS_MODULE,
+ .of_match_table = fcu_match,
+ },
.probe = fcu_of_probe,
.remove = fcu_of_remove
};
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index 0839770e4ec..5c9367acf0c 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -463,8 +463,11 @@ static const struct of_device_id therm_of_match[] = {{
};
static struct of_platform_driver therm_of_driver = {
- .name = "temperature",
- .match_table = therm_of_match,
+ .driver = {
+ .name = "temperature",
+ .owner = THIS_MODULE,
+ .of_match_table = therm_of_match,
+ },
.probe = therm_of_probe,
.remove = therm_of_remove,
};
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 42764849eb7..3d4fc0f7b00 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -2273,8 +2273,7 @@ static int register_pmu_pm_ops(void)
device_initcall(register_pmu_pm_ops);
#endif
-static int
-pmu_ioctl(struct inode * inode, struct file *filp,
+static int pmu_ioctl(struct file *filp,
u_int cmd, u_long arg)
{
__u32 __user *argp = (__u32 __user *)arg;
@@ -2337,11 +2336,23 @@ pmu_ioctl(struct inode * inode, struct file *filp,
return error;
}
+static long pmu_unlocked_ioctl(struct file *filp,
+ u_int cmd, u_long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = pmu_ioctl(filp, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
static const struct file_operations pmu_device_fops = {
.read = pmu_read,
.write = pmu_write,
.poll = pmu_fpoll,
- .ioctl = pmu_ioctl,
+ .unlocked_ioctl = pmu_unlocked_ioctl,
.open = pmu_open,
.release = pmu_release,
};
diff --git a/drivers/macintosh/windfarm_pm81.c b/drivers/macintosh/windfarm_pm81.c
index 565d5b2adc9..749d174b0dc 100644
--- a/drivers/macintosh/windfarm_pm81.c
+++ b/drivers/macintosh/windfarm_pm81.c
@@ -188,7 +188,7 @@ struct wf_smu_sys_fans_state {
};
/*
- * Configs for SMU Sytem Fan control loop
+ * Configs for SMU System Fan control loop
*/
static struct wf_smu_sys_fans_param wf_smu_sys_all_params[] = {
/* Model ID 2 */
@@ -757,10 +757,8 @@ static int __devexit wf_smu_remove(struct platform_device *ddev)
wf_put_control(cpufreq_clamp);
/* Destroy control loops state structures */
- if (wf_smu_sys_fans)
- kfree(wf_smu_sys_fans);
- if (wf_smu_cpu_fans)
- kfree(wf_smu_cpu_fans);
+ kfree(wf_smu_sys_fans);
+ kfree(wf_smu_cpu_fans);
return 0;
}
diff --git a/drivers/macintosh/windfarm_pm91.c b/drivers/macintosh/windfarm_pm91.c
index bea99168ff3..34427323512 100644
--- a/drivers/macintosh/windfarm_pm91.c
+++ b/drivers/macintosh/windfarm_pm91.c
@@ -687,12 +687,9 @@ static int __devexit wf_smu_remove(struct platform_device *ddev)
wf_put_control(cpufreq_clamp);
/* Destroy control loops state structures */
- if (wf_smu_slots_fans)
- kfree(wf_smu_cpu_fans);
- if (wf_smu_drive_fans)
- kfree(wf_smu_cpu_fans);
- if (wf_smu_cpu_fans)
- kfree(wf_smu_cpu_fans);
+ kfree(wf_smu_slots_fans);
+ kfree(wf_smu_drive_fans);
+ kfree(wf_smu_cpu_fans);
return 0;
}
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index acb3a4e404f..4a6feac8c94 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -100,8 +100,8 @@ config MD_RAID1
If unsure, say Y.
config MD_RAID10
- tristate "RAID-10 (mirrored striping) mode (EXPERIMENTAL)"
- depends on BLK_DEV_MD && EXPERIMENTAL
+ tristate "RAID-10 (mirrored striping) mode"
+ depends on BLK_DEV_MD
---help---
RAID-10 provides a combination of striping (RAID-0) and
mirroring (RAID-1) with easier configuration and more flexible
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 26ac8aad0b1..1742435ce3a 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -505,7 +505,7 @@ void bitmap_update_sb(struct bitmap *bitmap)
return;
}
spin_unlock_irqrestore(&bitmap->lock, flags);
- sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
+ sb = kmap_atomic(bitmap->sb_page, KM_USER0);
sb->events = cpu_to_le64(bitmap->mddev->events);
if (bitmap->mddev->events < bitmap->events_cleared) {
/* rocking back to read-only */
@@ -526,7 +526,7 @@ void bitmap_print_sb(struct bitmap *bitmap)
if (!bitmap || !bitmap->sb_page)
return;
- sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
+ sb = kmap_atomic(bitmap->sb_page, KM_USER0);
printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap));
printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic));
printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version));
@@ -575,7 +575,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
return err;
}
- sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
+ sb = kmap_atomic(bitmap->sb_page, KM_USER0);
chunksize = le32_to_cpu(sb->chunksize);
daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
@@ -661,7 +661,7 @@ static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
return 0;
}
spin_unlock_irqrestore(&bitmap->lock, flags);
- sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
+ sb = kmap_atomic(bitmap->sb_page, KM_USER0);
old = le32_to_cpu(sb->state) & bits;
switch (op) {
case MASK_SET: sb->state |= cpu_to_le32(bits);
@@ -1292,9 +1292,14 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
if (!bitmap) return 0;
if (behind) {
+ int bw;
atomic_inc(&bitmap->behind_writes);
+ bw = atomic_read(&bitmap->behind_writes);
+ if (bw > bitmap->behind_writes_used)
+ bitmap->behind_writes_used = bw;
+
PRINTK(KERN_DEBUG "inc write-behind count %d/%d\n",
- atomic_read(&bitmap->behind_writes), bitmap->max_write_behind);
+ bw, bitmap->max_write_behind);
}
while (sectors) {
@@ -1351,7 +1356,8 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
{
if (!bitmap) return;
if (behind) {
- atomic_dec(&bitmap->behind_writes);
+ if (atomic_dec_and_test(&bitmap->behind_writes))
+ wake_up(&bitmap->behind_wait);
PRINTK(KERN_DEBUG "dec write-behind count %d/%d\n",
atomic_read(&bitmap->behind_writes), bitmap->max_write_behind);
}
@@ -1675,12 +1681,13 @@ int bitmap_create(mddev_t *mddev)
atomic_set(&bitmap->pending_writes, 0);
init_waitqueue_head(&bitmap->write_wait);
init_waitqueue_head(&bitmap->overflow_wait);
+ init_waitqueue_head(&bitmap->behind_wait);
bitmap->mddev = mddev;
- bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap");
+ bm = sysfs_get_dirent(mddev->kobj.sd, NULL, "bitmap");
if (bm) {
- bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear");
+ bitmap->sysfs_can_clear = sysfs_get_dirent(bm, NULL, "can_clear");
sysfs_put(bm);
} else
bitmap->sysfs_can_clear = NULL;
@@ -1692,7 +1699,7 @@ int bitmap_create(mddev_t *mddev)
* and bypass the page cache, we must sync the file
* first.
*/
- vfs_fsync(file, file->f_dentry, 1);
+ vfs_fsync(file, 1);
}
/* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
if (!mddev->bitmap_info.external)
@@ -2006,6 +2013,27 @@ static ssize_t can_clear_store(mddev_t *mddev, const char *buf, size_t len)
static struct md_sysfs_entry bitmap_can_clear =
__ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);
+static ssize_t
+behind_writes_used_show(mddev_t *mddev, char *page)
+{
+ if (mddev->bitmap == NULL)
+ return sprintf(page, "0\n");
+ return sprintf(page, "%lu\n",
+ mddev->bitmap->behind_writes_used);
+}
+
+static ssize_t
+behind_writes_used_reset(mddev_t *mddev, const char *buf, size_t len)
+{
+ if (mddev->bitmap)
+ mddev->bitmap->behind_writes_used = 0;
+ return len;
+}
+
+static struct md_sysfs_entry max_backlog_used =
+__ATTR(max_backlog_used, S_IRUGO | S_IWUSR,
+ behind_writes_used_show, behind_writes_used_reset);
+
static struct attribute *md_bitmap_attrs[] = {
&bitmap_location.attr,
&bitmap_timeout.attr,
@@ -2013,6 +2041,7 @@ static struct attribute *md_bitmap_attrs[] = {
&bitmap_chunksize.attr,
&bitmap_metadata.attr,
&bitmap_can_clear.attr,
+ &max_backlog_used.attr,
NULL
};
struct attribute_group md_bitmap_group = {
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index cb821d76d1b..3797dea4723 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -227,6 +227,7 @@ struct bitmap {
int allclean;
atomic_t behind_writes;
+ unsigned long behind_writes_used; /* highest actual value at runtime */
/*
* the bitmap daemon - periodically wakes up and sweeps the bitmap
@@ -239,6 +240,7 @@ struct bitmap {
atomic_t pending_writes; /* pending writes to the bitmap file */
wait_queue_head_t write_wait;
wait_queue_head_t overflow_wait;
+ wait_queue_head_t behind_wait;
struct sysfs_dirent *sysfs_can_clear;
};
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 8e3850b98cc..1a898788461 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -169,10 +169,9 @@ static void add_sector(conf_t *conf, sector_t start, int mode)
conf->nfaults = n+1;
}
-static int make_request(struct request_queue *q, struct bio *bio)
+static int make_request(mddev_t *mddev, struct bio *bio)
{
- mddev_t *mddev = q->queuedata;
- conf_t *conf = (conf_t*)mddev->private;
+ conf_t *conf = mddev->private;
int failit = 0;
if (bio_data_dir(bio) == WRITE) {
@@ -225,7 +224,7 @@ static int make_request(struct request_queue *q, struct bio *bio)
static void status(struct seq_file *seq, mddev_t *mddev)
{
- conf_t *conf = (conf_t*)mddev->private;
+ conf_t *conf = mddev->private;
int n;
if ((n=atomic_read(&conf->counters[WriteTransient])) != 0)
@@ -328,7 +327,7 @@ static int run(mddev_t *mddev)
static int stop(mddev_t *mddev)
{
- conf_t *conf = (conf_t *)mddev->private;
+ conf_t *conf = mddev->private;
kfree(conf);
mddev->private = NULL;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 09437e95823..7e0e057db9a 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -159,7 +159,8 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
sector_t sectors;
if (j < 0 || j >= raid_disks || disk->rdev) {
- printk("linear: disk numbering problem. Aborting!\n");
+ printk(KERN_ERR "md/linear:%s: disk numbering problem. Aborting!\n",
+ mdname(mddev));
goto out;
}
@@ -187,7 +188,8 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
}
if (cnt != raid_disks) {
- printk("linear: not enough drives present. Aborting!\n");
+ printk(KERN_ERR "md/linear:%s: not enough drives present. Aborting!\n",
+ mdname(mddev));
goto out;
}
@@ -282,29 +284,21 @@ static int linear_stop (mddev_t *mddev)
rcu_barrier();
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
kfree(conf);
+ mddev->private = NULL;
return 0;
}
-static int linear_make_request (struct request_queue *q, struct bio *bio)
+static int linear_make_request (mddev_t *mddev, struct bio *bio)
{
- const int rw = bio_data_dir(bio);
- mddev_t *mddev = q->queuedata;
dev_info_t *tmp_dev;
sector_t start_sector;
- int cpu;
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
md_barrier_request(mddev, bio);
return 0;
}
- cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
- bio_sectors(bio));
- part_stat_unlock();
-
rcu_read_lock();
tmp_dev = which_dev(mddev, bio->bi_sector);
start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
@@ -314,12 +308,14 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
|| (bio->bi_sector < start_sector))) {
char b[BDEVNAME_SIZE];
- printk("linear_make_request: Sector %llu out of bounds on "
- "dev %s: %llu sectors, offset %llu\n",
- (unsigned long long)bio->bi_sector,
- bdevname(tmp_dev->rdev->bdev, b),
- (unsigned long long)tmp_dev->rdev->sectors,
- (unsigned long long)start_sector);
+ printk(KERN_ERR
+ "md/linear:%s: make_request: Sector %llu out of bounds on "
+ "dev %s: %llu sectors, offset %llu\n",
+ mdname(mddev),
+ (unsigned long long)bio->bi_sector,
+ bdevname(tmp_dev->rdev->bdev, b),
+ (unsigned long long)tmp_dev->rdev->sectors,
+ (unsigned long long)start_sector);
rcu_read_unlock();
bio_io_error(bio);
return 0;
@@ -336,9 +332,9 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
bp = bio_split(bio, end_sector - bio->bi_sector);
- if (linear_make_request(q, &bp->bio1))
+ if (linear_make_request(mddev, &bp->bio1))
generic_make_request(&bp->bio1);
- if (linear_make_request(q, &bp->bio2))
+ if (linear_make_request(mddev, &bp->bio2))
generic_make_request(&bp->bio2);
bio_pair_release(bp);
return 0;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index cefd63daff3..46b3a044ead 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -215,8 +215,11 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
*/
static int md_make_request(struct request_queue *q, struct bio *bio)
{
+ const int rw = bio_data_dir(bio);
mddev_t *mddev = q->queuedata;
int rv;
+ int cpu;
+
if (mddev == NULL || mddev->pers == NULL) {
bio_io_error(bio);
return 0;
@@ -237,13 +240,27 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
}
atomic_inc(&mddev->active_io);
rcu_read_unlock();
- rv = mddev->pers->make_request(q, bio);
+
+ rv = mddev->pers->make_request(mddev, bio);
+
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+ bio_sectors(bio));
+ part_stat_unlock();
+
if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
wake_up(&mddev->sb_wait);
return rv;
}
+/* mddev_suspend makes sure no new requests are submitted
+ * to the device, and that any requests that have been submitted
+ * are completely handled.
+ * Once ->stop is called and completes, the module will be completely
+ * unused.
+ */
static void mddev_suspend(mddev_t *mddev)
{
BUG_ON(mddev->suspended);
@@ -251,13 +268,6 @@ static void mddev_suspend(mddev_t *mddev)
synchronize_rcu();
wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
mddev->pers->quiesce(mddev, 1);
- md_unregister_thread(mddev->thread);
- mddev->thread = NULL;
- /* we now know that no code is executing in the personality module,
- * except possibly the tail end of a ->bi_end_io function, but that
- * is certain to complete before the module has a chance to get
- * unloaded
- */
}
static void mddev_resume(mddev_t *mddev)
@@ -344,7 +354,7 @@ static void md_submit_barrier(struct work_struct *ws)
bio_endio(bio, 0);
else {
bio->bi_rw &= ~(1<<BIO_RW_BARRIER);
- if (mddev->pers->make_request(mddev->queue, bio))
+ if (mddev->pers->make_request(mddev, bio))
generic_make_request(bio);
mddev->barrier = POST_REQUEST_BARRIER;
submit_barriers(mddev);
@@ -406,6 +416,27 @@ static void mddev_put(mddev_t *mddev)
spin_unlock(&all_mddevs_lock);
}
+static void mddev_init(mddev_t *mddev)
+{
+ mutex_init(&mddev->open_mutex);
+ mutex_init(&mddev->reconfig_mutex);
+ mutex_init(&mddev->bitmap_info.mutex);
+ INIT_LIST_HEAD(&mddev->disks);
+ INIT_LIST_HEAD(&mddev->all_mddevs);
+ init_timer(&mddev->safemode_timer);
+ atomic_set(&mddev->active, 1);
+ atomic_set(&mddev->openers, 0);
+ atomic_set(&mddev->active_io, 0);
+ spin_lock_init(&mddev->write_lock);
+ atomic_set(&mddev->flush_pending, 0);
+ init_waitqueue_head(&mddev->sb_wait);
+ init_waitqueue_head(&mddev->recovery_wait);
+ mddev->reshape_position = MaxSector;
+ mddev->resync_min = 0;
+ mddev->resync_max = MaxSector;
+ mddev->level = LEVEL_NONE;
+}
+
static mddev_t * mddev_find(dev_t unit)
{
mddev_t *mddev, *new = NULL;
@@ -472,23 +503,7 @@ static mddev_t * mddev_find(dev_t unit)
else
new->md_minor = MINOR(unit) >> MdpMinorShift;
- mutex_init(&new->open_mutex);
- mutex_init(&new->reconfig_mutex);
- mutex_init(&new->bitmap_info.mutex);
- INIT_LIST_HEAD(&new->disks);
- INIT_LIST_HEAD(&new->all_mddevs);
- init_timer(&new->safemode_timer);
- atomic_set(&new->active, 1);
- atomic_set(&new->openers, 0);
- atomic_set(&new->active_io, 0);
- spin_lock_init(&new->write_lock);
- atomic_set(&new->flush_pending, 0);
- init_waitqueue_head(&new->sb_wait);
- init_waitqueue_head(&new->recovery_wait);
- new->reshape_position = MaxSector;
- new->resync_min = 0;
- new->resync_max = MaxSector;
- new->level = LEVEL_NONE;
+ mddev_init(new);
goto retry;
}
@@ -508,9 +523,36 @@ static inline int mddev_trylock(mddev_t * mddev)
return mutex_trylock(&mddev->reconfig_mutex);
}
-static inline void mddev_unlock(mddev_t * mddev)
+static struct attribute_group md_redundancy_group;
+
+static void mddev_unlock(mddev_t * mddev)
{
- mutex_unlock(&mddev->reconfig_mutex);
+ if (mddev->to_remove) {
+ /* These cannot be removed under reconfig_mutex as
+ * an access to the files will try to take reconfig_mutex
+ * while holding the file unremovable, which leads to
+ * a deadlock.
+ * So hold open_mutex instead - we are allowed to take
+ * it while holding reconfig_mutex, and md_run can
+ * use it to wait for the remove to complete.
+ */
+ struct attribute_group *to_remove = mddev->to_remove;
+ mddev->to_remove = NULL;
+ mutex_lock(&mddev->open_mutex);
+ mutex_unlock(&mddev->reconfig_mutex);
+
+ if (to_remove != &md_redundancy_group)
+ sysfs_remove_group(&mddev->kobj, to_remove);
+ if (mddev->pers == NULL ||
+ mddev->pers->sync_request == NULL) {
+ sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
+ if (mddev->sysfs_action)
+ sysfs_put(mddev->sysfs_action);
+ mddev->sysfs_action = NULL;
+ }
+ mutex_unlock(&mddev->open_mutex);
+ } else
+ mutex_unlock(&mddev->reconfig_mutex);
md_wakeup_thread(mddev->thread);
}
@@ -1029,10 +1071,13 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->bitmap_info.default_offset;
} else if (mddev->pers == NULL) {
- /* Insist on good event counter while assembling */
+ /* Insist on good event counter while assembling, except
+ * for spares (which don't need an event count) */
++ev1;
- if (ev1 < mddev->events)
- return -EINVAL;
+ if (sb->disks[rdev->desc_nr].state & (
+ (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
+ if (ev1 < mddev->events)
+ return -EINVAL;
} else if (mddev->bitmap) {
/* if adding to array with a bitmap, then we can accept an
* older device ... but not too old.
@@ -1428,10 +1473,14 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
}
} else if (mddev->pers == NULL) {
- /* Insist of good event counter while assembling */
+ /* Insist of good event counter while assembling, except for
+ * spares (which don't need an event count) */
++ev1;
- if (ev1 < mddev->events)
- return -EINVAL;
+ if (rdev->desc_nr >= 0 &&
+ rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
+ le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe)
+ if (ev1 < mddev->events)
+ return -EINVAL;
} else if (mddev->bitmap) {
/* If adding to array with a bitmap, then we can accept an
* older device, but not too old.
@@ -1766,7 +1815,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
kobject_del(&rdev->kobj);
goto fail;
}
- rdev->sysfs_state = sysfs_get_dirent(rdev->kobj.sd, "state");
+ rdev->sysfs_state = sysfs_get_dirent(rdev->kobj.sd, NULL, "state");
list_add_rcu(&rdev->same_set, &mddev->disks);
bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
@@ -2047,7 +2096,6 @@ static void sync_sbs(mddev_t * mddev, int nospares)
if (rdev->sb_events == mddev->events ||
(nospares &&
rdev->raid_disk < 0 &&
- (rdev->sb_events&1)==0 &&
rdev->sb_events+1 == mddev->events)) {
/* Don't update this superblock */
rdev->sb_loaded = 2;
@@ -2100,28 +2148,14 @@ repeat:
* and 'events' is odd, we can roll back to the previous clean state */
if (nospares
&& (mddev->in_sync && mddev->recovery_cp == MaxSector)
- && (mddev->events & 1)
- && mddev->events != 1)
+ && mddev->can_decrease_events
+ && mddev->events != 1) {
mddev->events--;
- else {
+ mddev->can_decrease_events = 0;
+ } else {
/* otherwise we have to go forward and ... */
mddev->events ++;
- if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
- /* .. if the array isn't clean, an 'even' event must also go
- * to spares. */
- if ((mddev->events&1)==0) {
- nospares = 0;
- sync_req = 2; /* force a second update to get the
- * even/odd in sync */
- }
- } else {
- /* otherwise an 'odd' event must go to spares */
- if ((mddev->events&1)) {
- nospares = 0;
- sync_req = 2; /* force a second update to get the
- * even/odd in sync */
- }
- }
+ mddev->can_decrease_events = nospares;
}
if (!mddev->events) {
@@ -2365,6 +2399,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
return err;
sprintf(nm, "rd%d", rdev->raid_disk);
sysfs_remove_link(&rdev->mddev->kobj, nm);
+ rdev->raid_disk = -1;
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
md_wakeup_thread(rdev->mddev->thread);
} else if (rdev->mddev->pers) {
@@ -2780,8 +2815,9 @@ static void analyze_sbs(mddev_t * mddev)
i = 0;
rdev_for_each(rdev, tmp, mddev) {
- if (rdev->desc_nr >= mddev->max_disks ||
- i > mddev->max_disks) {
+ if (mddev->max_disks &&
+ (rdev->desc_nr >= mddev->max_disks ||
+ i > mddev->max_disks)) {
printk(KERN_WARNING
"md: %s: %s: only %d devices permitted\n",
mdname(mddev), bdevname(rdev->bdev, b),
@@ -2897,9 +2933,10 @@ level_show(mddev_t *mddev, char *page)
static ssize_t
level_store(mddev_t *mddev, const char *buf, size_t len)
{
- char level[16];
+ char clevel[16];
ssize_t rv = len;
struct mdk_personality *pers;
+ long level;
void *priv;
mdk_rdev_t *rdev;
@@ -2932,19 +2969,22 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
}
/* Now find the new personality */
- if (len == 0 || len >= sizeof(level))
+ if (len == 0 || len >= sizeof(clevel))
return -EINVAL;
- strncpy(level, buf, len);
- if (level[len-1] == '\n')
+ strncpy(clevel, buf, len);
+ if (clevel[len-1] == '\n')
len--;
- level[len] = 0;
+ clevel[len] = 0;
+ if (strict_strtol(clevel, 10, &level))
+ level = LEVEL_NONE;
- request_module("md-%s", level);
+ if (request_module("md-%s", clevel) != 0)
+ request_module("md-level-%s", clevel);
spin_lock(&pers_lock);
- pers = find_pers(LEVEL_NONE, level);
+ pers = find_pers(level, clevel);
if (!pers || !try_module_get(pers->owner)) {
spin_unlock(&pers_lock);
- printk(KERN_WARNING "md: personality %s not loaded\n", level);
+ printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
return -EINVAL;
}
spin_unlock(&pers_lock);
@@ -2957,7 +2997,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
if (!pers->takeover) {
module_put(pers->owner);
printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
- mdname(mddev), level);
+ mdname(mddev), clevel);
return -EINVAL;
}
@@ -2973,13 +3013,44 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
mddev->delta_disks = 0;
module_put(pers->owner);
printk(KERN_WARNING "md: %s: %s would not accept array\n",
- mdname(mddev), level);
+ mdname(mddev), clevel);
return PTR_ERR(priv);
}
/* Looks like we have a winner */
mddev_suspend(mddev);
mddev->pers->stop(mddev);
+
+ if (mddev->pers->sync_request == NULL &&
+ pers->sync_request != NULL) {
+ /* need to add the md_redundancy_group */
+ if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
+ printk(KERN_WARNING
+ "md: cannot register extra attributes for %s\n",
+ mdname(mddev));
+ mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action");
+ }
+ if (mddev->pers->sync_request != NULL &&
+ pers->sync_request == NULL) {
+ /* need to remove the md_redundancy_group */
+ if (mddev->to_remove == NULL)
+ mddev->to_remove = &md_redundancy_group;
+ }
+
+ if (mddev->pers->sync_request == NULL &&
+ mddev->external) {
+ /* We are converting from a no-redundancy array
+ * to a redundancy array and metadata is managed
+ * externally so we need to be sure that writes
+ * won't block due to a need to transition
+ * clean->dirty
+ * until external management is started.
+ */
+ mddev->in_sync = 0;
+ mddev->safemode_delay = 0;
+ mddev->safemode = 0;
+ }
+
module_put(mddev->pers->owner);
/* Invalidate devices that are now superfluous */
list_for_each_entry(rdev, &mddev->disks, same_set)
@@ -2994,11 +3065,20 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
mddev->layout = mddev->new_layout;
mddev->chunk_sectors = mddev->new_chunk_sectors;
mddev->delta_disks = 0;
+ if (mddev->pers->sync_request == NULL) {
+ /* this is now an array without redundancy, so
+ * it must always be in_sync
+ */
+ mddev->in_sync = 1;
+ del_timer_sync(&mddev->safemode_timer);
+ }
pers->run(mddev);
mddev_resume(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
+ sysfs_notify(&mddev->kobj, NULL, "level");
+ md_new_event(mddev);
return rv;
}
@@ -3237,6 +3317,7 @@ array_state_show(mddev_t *mddev, char *page)
}
static int do_md_stop(mddev_t * mddev, int ro, int is_open);
+static int md_set_readonly(mddev_t * mddev, int is_open);
static int do_md_run(mddev_t * mddev);
static int restart_array(mddev_t *mddev);
@@ -3267,7 +3348,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
break; /* not supported yet */
case readonly:
if (mddev->pers)
- err = do_md_stop(mddev, 1, 0);
+ err = md_set_readonly(mddev, 0);
else {
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
@@ -3277,7 +3358,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
case read_auto:
if (mddev->pers) {
if (mddev->ro == 0)
- err = do_md_stop(mddev, 1, 0);
+ err = md_set_readonly(mddev, 0);
else if (mddev->ro == 1)
err = restart_array(mddev);
if (err == 0) {
@@ -4082,15 +4163,6 @@ static void mddev_delayed_delete(struct work_struct *ws)
{
mddev_t *mddev = container_of(ws, mddev_t, del_work);
- if (mddev->private) {
- sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
- if (mddev->private != (void*)1)
- sysfs_remove_group(&mddev->kobj, mddev->private);
- if (mddev->sysfs_action)
- sysfs_put(mddev->sysfs_action);
- mddev->sysfs_action = NULL;
- mddev->private = NULL;
- }
sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
kobject_del(&mddev->kobj);
kobject_put(&mddev->kobj);
@@ -4189,7 +4261,7 @@ static int md_alloc(dev_t dev, char *name)
mutex_unlock(&disks_mutex);
if (!error) {
kobject_uevent(&mddev->kobj, KOBJ_ADD);
- mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state");
+ mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, NULL, "array_state");
}
mddev_put(mddev);
return error;
@@ -4234,11 +4306,10 @@ static void md_safemode_timeout(unsigned long data)
static int start_dirty_degraded;
-static int do_md_run(mddev_t * mddev)
+static int md_run(mddev_t *mddev)
{
int err;
mdk_rdev_t *rdev;
- struct gendisk *disk;
struct mdk_personality *pers;
if (list_empty(&mddev->disks))
@@ -4248,6 +4319,13 @@ static int do_md_run(mddev_t * mddev)
if (mddev->pers)
return -EBUSY;
+ /* These two calls synchronise us with the
+ * sysfs_remove_group calls in mddev_unlock,
+ * so they must have completed.
+ */
+ mutex_lock(&mddev->open_mutex);
+ mutex_unlock(&mddev->open_mutex);
+
/*
* Analyze all RAID superblock(s)
*/
@@ -4296,8 +4374,6 @@ static int do_md_run(mddev_t * mddev)
sysfs_notify_dirent(rdev->sysfs_state);
}
- disk = mddev->gendisk;
-
spin_lock(&pers_lock);
pers = find_pers(mddev->level, mddev->clevel);
if (!pers || !try_module_get(pers->owner)) {
@@ -4398,7 +4474,7 @@ static int do_md_run(mddev_t * mddev)
printk(KERN_WARNING
"md: cannot register extra attributes for %s\n",
mdname(mddev));
- mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
+ mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action");
} else if (mddev->ro == 2) /* auto-readonly not meaningful */
mddev->ro = 0;
@@ -4425,22 +4501,32 @@ static int do_md_run(mddev_t * mddev)
if (mddev->flags)
md_update_sb(mddev, 0);
- set_capacity(disk, mddev->array_sectors);
-
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
- revalidate_disk(mddev->gendisk);
- mddev->changed = 1;
md_new_event(mddev);
sysfs_notify_dirent(mddev->sysfs_state);
if (mddev->sysfs_action)
sysfs_notify_dirent(mddev->sysfs_action);
sysfs_notify(&mddev->kobj, NULL, "degraded");
- kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
return 0;
}
+static int do_md_run(mddev_t *mddev)
+{
+ int err;
+
+ err = md_run(mddev);
+ if (err)
+ goto out;
+
+ set_capacity(mddev->gendisk, mddev->array_sectors);
+ revalidate_disk(mddev->gendisk);
+ kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
+out:
+ return err;
+}
+
static int restart_array(mddev_t *mddev)
{
struct gendisk *disk = mddev->gendisk;
@@ -4491,9 +4577,110 @@ void restore_bitmap_write_access(struct file *file)
spin_unlock(&inode->i_lock);
}
+static void md_clean(mddev_t *mddev)
+{
+ mddev->array_sectors = 0;
+ mddev->external_size = 0;
+ mddev->dev_sectors = 0;
+ mddev->raid_disks = 0;
+ mddev->recovery_cp = 0;
+ mddev->resync_min = 0;
+ mddev->resync_max = MaxSector;
+ mddev->reshape_position = MaxSector;
+ mddev->external = 0;
+ mddev->persistent = 0;
+ mddev->level = LEVEL_NONE;
+ mddev->clevel[0] = 0;
+ mddev->flags = 0;
+ mddev->ro = 0;
+ mddev->metadata_type[0] = 0;
+ mddev->chunk_sectors = 0;
+ mddev->ctime = mddev->utime = 0;
+ mddev->layout = 0;
+ mddev->max_disks = 0;
+ mddev->events = 0;
+ mddev->can_decrease_events = 0;
+ mddev->delta_disks = 0;
+ mddev->new_level = LEVEL_NONE;
+ mddev->new_layout = 0;
+ mddev->new_chunk_sectors = 0;
+ mddev->curr_resync = 0;
+ mddev->resync_mismatches = 0;
+ mddev->suspend_lo = mddev->suspend_hi = 0;
+ mddev->sync_speed_min = mddev->sync_speed_max = 0;
+ mddev->recovery = 0;
+ mddev->in_sync = 0;
+ mddev->degraded = 0;
+ mddev->barriers_work = 0;
+ mddev->safemode = 0;
+ mddev->bitmap_info.offset = 0;
+ mddev->bitmap_info.default_offset = 0;
+ mddev->bitmap_info.chunksize = 0;
+ mddev->bitmap_info.daemon_sleep = 0;
+ mddev->bitmap_info.max_write_behind = 0;
+}
+
+static void md_stop_writes(mddev_t *mddev)
+{
+ if (mddev->sync_thread) {
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ md_unregister_thread(mddev->sync_thread);
+ mddev->sync_thread = NULL;
+ }
+
+ del_timer_sync(&mddev->safemode_timer);
+
+ bitmap_flush(mddev);
+ md_super_wait(mddev);
+
+ if (!mddev->in_sync || mddev->flags) {
+ /* mark array as shutdown cleanly */
+ mddev->in_sync = 1;
+ md_update_sb(mddev, 1);
+ }
+}
+
+static void md_stop(mddev_t *mddev)
+{
+ md_stop_writes(mddev);
+
+ mddev->pers->stop(mddev);
+ if (mddev->pers->sync_request && mddev->to_remove == NULL)
+ mddev->to_remove = &md_redundancy_group;
+ module_put(mddev->pers->owner);
+ mddev->pers = NULL;
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+}
+
+static int md_set_readonly(mddev_t *mddev, int is_open)
+{
+ int err = 0;
+ mutex_lock(&mddev->open_mutex);
+ if (atomic_read(&mddev->openers) > is_open) {
+ printk("md: %s still in use.\n",mdname(mddev));
+ err = -EBUSY;
+ goto out;
+ }
+ if (mddev->pers) {
+ md_stop_writes(mddev);
+
+ err = -ENXIO;
+ if (mddev->ro==1)
+ goto out;
+ mddev->ro = 1;
+ set_disk_ro(mddev->gendisk, 1);
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ sysfs_notify_dirent(mddev->sysfs_state);
+ err = 0;
+ }
+out:
+ mutex_unlock(&mddev->open_mutex);
+ return err;
+}
+
/* mode:
* 0 - completely stop and dis-assemble array
- * 1 - switch to readonly
* 2 - stop but do not disassemble array
*/
static int do_md_stop(mddev_t * mddev, int mode, int is_open)
@@ -4508,64 +4695,32 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
err = -EBUSY;
} else if (mddev->pers) {
- if (mddev->sync_thread) {
- set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- md_unregister_thread(mddev->sync_thread);
- mddev->sync_thread = NULL;
- }
-
- del_timer_sync(&mddev->safemode_timer);
+ if (mddev->ro)
+ set_disk_ro(disk, 0);
- switch(mode) {
- case 1: /* readonly */
- err = -ENXIO;
- if (mddev->ro==1)
- goto out;
- mddev->ro = 1;
- break;
- case 0: /* disassemble */
- case 2: /* stop */
- bitmap_flush(mddev);
- md_super_wait(mddev);
- if (mddev->ro)
- set_disk_ro(disk, 0);
+ md_stop(mddev);
+ mddev->queue->merge_bvec_fn = NULL;
+ mddev->queue->unplug_fn = NULL;
+ mddev->queue->backing_dev_info.congested_fn = NULL;
- mddev->pers->stop(mddev);
- mddev->queue->merge_bvec_fn = NULL;
- mddev->queue->unplug_fn = NULL;
- mddev->queue->backing_dev_info.congested_fn = NULL;
- module_put(mddev->pers->owner);
- if (mddev->pers->sync_request && mddev->private == NULL)
- mddev->private = (void*)1;
- mddev->pers = NULL;
- /* tell userspace to handle 'inactive' */
- sysfs_notify_dirent(mddev->sysfs_state);
+ /* tell userspace to handle 'inactive' */
+ sysfs_notify_dirent(mddev->sysfs_state);
- list_for_each_entry(rdev, &mddev->disks, same_set)
- if (rdev->raid_disk >= 0) {
- char nm[20];
- sprintf(nm, "rd%d", rdev->raid_disk);
- sysfs_remove_link(&mddev->kobj, nm);
- }
+ list_for_each_entry(rdev, &mddev->disks, same_set)
+ if (rdev->raid_disk >= 0) {
+ char nm[20];
+ sprintf(nm, "rd%d", rdev->raid_disk);
+ sysfs_remove_link(&mddev->kobj, nm);
+ }
- set_capacity(disk, 0);
- mddev->changed = 1;
+ set_capacity(disk, 0);
+ revalidate_disk(disk);
- if (mddev->ro)
- mddev->ro = 0;
- }
- if (!mddev->in_sync || mddev->flags) {
- /* mark array as shutdown cleanly */
- mddev->in_sync = 1;
- md_update_sb(mddev, 1);
- }
- if (mode == 1)
- set_disk_ro(disk, 1);
- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ if (mddev->ro)
+ mddev->ro = 0;
+
err = 0;
}
-out:
mutex_unlock(&mddev->open_mutex);
if (err)
return err;
@@ -4586,52 +4741,12 @@ out:
export_array(mddev);
- mddev->array_sectors = 0;
- mddev->external_size = 0;
- mddev->dev_sectors = 0;
- mddev->raid_disks = 0;
- mddev->recovery_cp = 0;
- mddev->resync_min = 0;
- mddev->resync_max = MaxSector;
- mddev->reshape_position = MaxSector;
- mddev->external = 0;
- mddev->persistent = 0;
- mddev->level = LEVEL_NONE;
- mddev->clevel[0] = 0;
- mddev->flags = 0;
- mddev->ro = 0;
- mddev->metadata_type[0] = 0;
- mddev->chunk_sectors = 0;
- mddev->ctime = mddev->utime = 0;
- mddev->layout = 0;
- mddev->max_disks = 0;
- mddev->events = 0;
- mddev->delta_disks = 0;
- mddev->new_level = LEVEL_NONE;
- mddev->new_layout = 0;
- mddev->new_chunk_sectors = 0;
- mddev->curr_resync = 0;
- mddev->resync_mismatches = 0;
- mddev->suspend_lo = mddev->suspend_hi = 0;
- mddev->sync_speed_min = mddev->sync_speed_max = 0;
- mddev->recovery = 0;
- mddev->in_sync = 0;
- mddev->changed = 0;
- mddev->degraded = 0;
- mddev->barriers_work = 0;
- mddev->safemode = 0;
- mddev->bitmap_info.offset = 0;
- mddev->bitmap_info.default_offset = 0;
- mddev->bitmap_info.chunksize = 0;
- mddev->bitmap_info.daemon_sleep = 0;
- mddev->bitmap_info.max_write_behind = 0;
+ md_clean(mddev);
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
if (mddev->hold_active == UNTIL_STOP)
mddev->hold_active = 0;
- } else if (mddev->pers)
- printk(KERN_INFO "md: %s switched to read-only mode.\n",
- mdname(mddev));
+ }
err = 0;
blk_integrity_unregister(disk);
md_new_event(mddev);
@@ -5349,7 +5464,7 @@ static int update_raid_disks(mddev_t *mddev, int raid_disks)
if (mddev->pers->check_reshape == NULL)
return -EINVAL;
if (raid_disks <= 0 ||
- raid_disks >= mddev->max_disks)
+ (mddev->max_disks && raid_disks >= mddev->max_disks))
return -EINVAL;
if (mddev->sync_thread || mddev->reshape_position != MaxSector)
return -EBUSY;
@@ -5486,7 +5601,7 @@ static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
geo->heads = 2;
geo->sectors = 4;
- geo->cylinders = get_capacity(mddev->gendisk) / 8;
+ geo->cylinders = mddev->array_sectors / 8;
return 0;
}
@@ -5496,6 +5611,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
int err = 0;
void __user *argp = (void __user *)arg;
mddev_t *mddev = NULL;
+ int ro;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -5628,9 +5744,37 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
goto done_unlock;
case STOP_ARRAY_RO:
- err = do_md_stop(mddev, 1, 1);
+ err = md_set_readonly(mddev, 1);
goto done_unlock;
+ case BLKROSET:
+ if (get_user(ro, (int __user *)(arg))) {
+ err = -EFAULT;
+ goto done_unlock;
+ }
+ err = -EINVAL;
+
+ /* if the bdev is going readonly the value of mddev->ro
+ * does not matter, no writes are coming
+ */
+ if (ro)
+ goto done_unlock;
+
+ /* are we are already prepared for writes? */
+ if (mddev->ro != 1)
+ goto done_unlock;
+
+ /* transitioning to readauto need only happen for
+ * arrays that call md_write_start
+ */
+ if (mddev->pers) {
+ err = restart_array(mddev);
+ if (err == 0) {
+ mddev->ro = 2;
+ set_disk_ro(mddev->gendisk, 0);
+ }
+ }
+ goto done_unlock;
}
/*
@@ -5751,7 +5895,6 @@ static int md_open(struct block_device *bdev, fmode_t mode)
atomic_inc(&mddev->openers);
mutex_unlock(&mddev->open_mutex);
- check_disk_change(bdev);
out:
return err;
}
@@ -5766,21 +5909,6 @@ static int md_release(struct gendisk *disk, fmode_t mode)
return 0;
}
-
-static int md_media_changed(struct gendisk *disk)
-{
- mddev_t *mddev = disk->private_data;
-
- return mddev->changed;
-}
-
-static int md_revalidate(struct gendisk *disk)
-{
- mddev_t *mddev = disk->private_data;
-
- mddev->changed = 0;
- return 0;
-}
static const struct block_device_operations md_fops =
{
.owner = THIS_MODULE,
@@ -5791,8 +5919,6 @@ static const struct block_device_operations md_fops =
.compat_ioctl = md_compat_ioctl,
#endif
.getgeo = md_getgeo,
- .media_changed = md_media_changed,
- .revalidate_disk= md_revalidate,
};
static int md_thread(void * arg)
@@ -5906,7 +6032,7 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->pers->error_handler(mddev,rdev);
if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
- set_bit(StateChanged, &rdev->flags);
+ sysfs_notify_dirent(rdev->sysfs_state);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
@@ -6898,11 +7024,6 @@ void md_check_recovery(mddev_t *mddev)
if (mddev->flags)
md_update_sb(mddev, 0);
- list_for_each_entry(rdev, &mddev->disks, same_set)
- if (test_and_clear_bit(StateChanged, &rdev->flags))
- sysfs_notify_dirent(rdev->sysfs_state);
-
-
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
/* resync/recovery still happening */
@@ -7039,7 +7160,7 @@ static int md_notify_reboot(struct notifier_block *this,
* appears to still be in use. Hence
* the '100'.
*/
- do_md_stop(mddev, 1, 100);
+ md_set_readonly(mddev, 100);
mddev_unlock(mddev);
}
/*
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 8e4c75c00d4..7ab5ea15545 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -74,9 +74,6 @@ struct mdk_rdev_s
#define Blocked 8 /* An error occured on an externally
* managed array, don't allow writes
* until it is cleared */
-#define StateChanged 9 /* Faulty or Blocked has changed during
- * interrupt, so it needs to be
- * notified by the thread */
wait_queue_head_t blocked_wait;
int desc_nr; /* descriptor index in the superblock */
@@ -153,6 +150,12 @@ struct mddev_s
int external_size; /* size managed
* externally */
__u64 events;
+ /* If the last 'event' was simply a clean->dirty transition, and
+ * we didn't write it to the spares, then it is safe and simple
+ * to just decrement the event count on a dirty->clean transition.
+ * So we record that possibility here.
+ */
+ int can_decrease_events;
char uuid[16];
@@ -240,7 +243,6 @@ struct mddev_s
atomic_t active; /* general refcount */
atomic_t openers; /* number of active opens */
- int changed; /* true if we might need to reread partition info */
int degraded; /* whether md should consider
* adding a spare
*/
@@ -279,9 +281,6 @@ struct mddev_s
atomic_t writes_pending;
struct request_queue *queue; /* for plugging ... */
- atomic_t write_behind; /* outstanding async IO */
- unsigned int max_write_behind; /* 0 = sync */
-
struct bitmap *bitmap; /* the bitmap for the device */
struct {
struct file *file; /* the bitmap file */
@@ -305,6 +304,7 @@ struct mddev_s
atomic_t max_corr_read_errors; /* max read retries */
struct list_head all_mddevs;
+ struct attribute_group *to_remove;
/* Generic barrier handling.
* If there is a pending barrier request, all other
* writes are blocked while the devices are flushed.
@@ -336,7 +336,7 @@ struct mdk_personality
int level;
struct list_head list;
struct module *owner;
- int (*make_request)(struct request_queue *q, struct bio *bio);
+ int (*make_request)(mddev_t *mddev, struct bio *bio);
int (*run)(mddev_t *mddev);
int (*stop)(mddev_t *mddev);
void (*status)(struct seq_file *seq, mddev_t *mddev);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 789bf535d29..410fb60699a 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -85,7 +85,7 @@ static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
static void multipath_end_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private);
+ struct multipath_bh *mp_bh = bio->bi_private;
multipath_conf_t *conf = mp_bh->mddev->private;
mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev;
@@ -136,14 +136,11 @@ static void multipath_unplug(struct request_queue *q)
}
-static int multipath_make_request (struct request_queue *q, struct bio * bio)
+static int multipath_make_request(mddev_t *mddev, struct bio * bio)
{
- mddev_t *mddev = q->queuedata;
multipath_conf_t *conf = mddev->private;
struct multipath_bh * mp_bh;
struct multipath_info *multipath;
- const int rw = bio_data_dir(bio);
- int cpu;
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
md_barrier_request(mddev, bio);
@@ -155,12 +152,6 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
mp_bh->master_bio = bio;
mp_bh->mddev = mddev;
- cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
- bio_sectors(bio));
- part_stat_unlock();
-
mp_bh->path = multipath_map(conf);
if (mp_bh->path < 0) {
bio_endio(bio, -EIO);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c3bec024612..e70f004c99e 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -23,15 +23,17 @@
#include <linux/slab.h>
#include "md.h"
#include "raid0.h"
+#include "raid5.h"
static void raid0_unplug(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
raid0_conf_t *conf = mddev->private;
mdk_rdev_t **devlist = conf->devlist;
+ int raid_disks = conf->strip_zone[0].nb_dev;
int i;
- for (i=0; i<mddev->raid_disks; i++) {
+ for (i=0; i < raid_disks; i++) {
struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
blk_unplug(r_queue);
@@ -43,12 +45,13 @@ static int raid0_congested(void *data, int bits)
mddev_t *mddev = data;
raid0_conf_t *conf = mddev->private;
mdk_rdev_t **devlist = conf->devlist;
+ int raid_disks = conf->strip_zone[0].nb_dev;
int i, ret = 0;
if (mddev_congested(mddev, bits))
return 1;
- for (i = 0; i < mddev->raid_disks && !ret ; i++) {
+ for (i = 0; i < raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits);
@@ -66,16 +69,17 @@ static void dump_zones(mddev_t *mddev)
sector_t zone_start = 0;
char b[BDEVNAME_SIZE];
raid0_conf_t *conf = mddev->private;
+ int raid_disks = conf->strip_zone[0].nb_dev;
printk(KERN_INFO "******* %s configuration *********\n",
mdname(mddev));
h = 0;
for (j = 0; j < conf->nr_strip_zones; j++) {
printk(KERN_INFO "zone%d=[", j);
for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
- printk("%s/",
- bdevname(conf->devlist[j*mddev->raid_disks
+ printk(KERN_CONT "%s/",
+ bdevname(conf->devlist[j*raid_disks
+ k]->bdev, b));
- printk("]\n");
+ printk(KERN_CONT "]\n");
zone_size = conf->strip_zone[j].zone_end - zone_start;
printk(KERN_INFO " zone offset=%llukb "
@@ -88,7 +92,7 @@ static void dump_zones(mddev_t *mddev)
printk(KERN_INFO "**********************************\n\n");
}
-static int create_strip_zones(mddev_t *mddev)
+static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
{
int i, c, err;
sector_t curr_zone_end, sectors;
@@ -101,8 +105,9 @@ static int create_strip_zones(mddev_t *mddev)
if (!conf)
return -ENOMEM;
list_for_each_entry(rdev1, &mddev->disks, same_set) {
- printk(KERN_INFO "raid0: looking at %s\n",
- bdevname(rdev1->bdev,b));
+ printk(KERN_INFO "md/raid0:%s: looking at %s\n",
+ mdname(mddev),
+ bdevname(rdev1->bdev, b));
c = 0;
/* round size to chunk_size */
@@ -111,14 +116,16 @@ static int create_strip_zones(mddev_t *mddev)
rdev1->sectors = sectors * mddev->chunk_sectors;
list_for_each_entry(rdev2, &mddev->disks, same_set) {
- printk(KERN_INFO "raid0: comparing %s(%llu)",
+ printk(KERN_INFO "md/raid0:%s: comparing %s(%llu)",
+ mdname(mddev),
bdevname(rdev1->bdev,b),
(unsigned long long)rdev1->sectors);
- printk(KERN_INFO " with %s(%llu)\n",
+ printk(KERN_CONT " with %s(%llu)\n",
bdevname(rdev2->bdev,b),
(unsigned long long)rdev2->sectors);
if (rdev2 == rdev1) {
- printk(KERN_INFO "raid0: END\n");
+ printk(KERN_INFO "md/raid0:%s: END\n",
+ mdname(mddev));
break;
}
if (rdev2->sectors == rdev1->sectors) {
@@ -126,20 +133,24 @@ static int create_strip_zones(mddev_t *mddev)
* Not unique, don't count it as a new
* group
*/
- printk(KERN_INFO "raid0: EQUAL\n");
+ printk(KERN_INFO "md/raid0:%s: EQUAL\n",
+ mdname(mddev));
c = 1;
break;
}
- printk(KERN_INFO "raid0: NOT EQUAL\n");
+ printk(KERN_INFO "md/raid0:%s: NOT EQUAL\n",
+ mdname(mddev));
}
if (!c) {
- printk(KERN_INFO "raid0: ==> UNIQUE\n");
+ printk(KERN_INFO "md/raid0:%s: ==> UNIQUE\n",
+ mdname(mddev));
conf->nr_strip_zones++;
- printk(KERN_INFO "raid0: %d zones\n",
- conf->nr_strip_zones);
+ printk(KERN_INFO "md/raid0:%s: %d zones\n",
+ mdname(mddev), conf->nr_strip_zones);
}
}
- printk(KERN_INFO "raid0: FINAL %d zones\n", conf->nr_strip_zones);
+ printk(KERN_INFO "md/raid0:%s: FINAL %d zones\n",
+ mdname(mddev), conf->nr_strip_zones);
err = -ENOMEM;
conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
conf->nr_strip_zones, GFP_KERNEL);
@@ -162,14 +173,18 @@ static int create_strip_zones(mddev_t *mddev)
list_for_each_entry(rdev1, &mddev->disks, same_set) {
int j = rdev1->raid_disk;
+ if (mddev->level == 10)
+ /* taking over a raid10-n2 array */
+ j /= 2;
+
if (j < 0 || j >= mddev->raid_disks) {
- printk(KERN_ERR "raid0: bad disk number %d - "
- "aborting!\n", j);
+ printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
+ "aborting!\n", mdname(mddev), j);
goto abort;
}
if (dev[j]) {
- printk(KERN_ERR "raid0: multiple devices for %d - "
- "aborting!\n", j);
+ printk(KERN_ERR "md/raid0:%s: multiple devices for %d - "
+ "aborting!\n", mdname(mddev), j);
goto abort;
}
dev[j] = rdev1;
@@ -191,8 +206,8 @@ static int create_strip_zones(mddev_t *mddev)
cnt++;
}
if (cnt != mddev->raid_disks) {
- printk(KERN_ERR "raid0: too few disks (%d of %d) - "
- "aborting!\n", cnt, mddev->raid_disks);
+ printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
+ "aborting!\n", mdname(mddev), cnt, mddev->raid_disks);
goto abort;
}
zone->nb_dev = cnt;
@@ -208,39 +223,44 @@ static int create_strip_zones(mddev_t *mddev)
zone = conf->strip_zone + i;
dev = conf->devlist + i * mddev->raid_disks;
- printk(KERN_INFO "raid0: zone %d\n", i);
+ printk(KERN_INFO "md/raid0:%s: zone %d\n",
+ mdname(mddev), i);
zone->dev_start = smallest->sectors;
smallest = NULL;
c = 0;
for (j=0; j<cnt; j++) {
rdev = conf->devlist[j];
- printk(KERN_INFO "raid0: checking %s ...",
- bdevname(rdev->bdev, b));
+ printk(KERN_INFO "md/raid0:%s: checking %s ...",
+ mdname(mddev),
+ bdevname(rdev->bdev, b));
if (rdev->sectors <= zone->dev_start) {
- printk(KERN_INFO " nope.\n");
+ printk(KERN_CONT " nope.\n");
continue;
}
- printk(KERN_INFO " contained as device %d\n", c);
+ printk(KERN_CONT " contained as device %d\n", c);
dev[c] = rdev;
c++;
if (!smallest || rdev->sectors < smallest->sectors) {
smallest = rdev;
- printk(KERN_INFO " (%llu) is smallest!.\n",
- (unsigned long long)rdev->sectors);
+ printk(KERN_INFO "md/raid0:%s: (%llu) is smallest!.\n",
+ mdname(mddev),
+ (unsigned long long)rdev->sectors);
}
}
zone->nb_dev = c;
sectors = (smallest->sectors - zone->dev_start) * c;
- printk(KERN_INFO "raid0: zone->nb_dev: %d, sectors: %llu\n",
- zone->nb_dev, (unsigned long long)sectors);
+ printk(KERN_INFO "md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
+ mdname(mddev),
+ zone->nb_dev, (unsigned long long)sectors);
curr_zone_end += sectors;
zone->zone_end = curr_zone_end;
- printk(KERN_INFO "raid0: current zone start: %llu\n",
- (unsigned long long)smallest->sectors);
+ printk(KERN_INFO "md/raid0:%s: current zone start: %llu\n",
+ mdname(mddev),
+ (unsigned long long)smallest->sectors);
}
mddev->queue->unplug_fn = raid0_unplug;
mddev->queue->backing_dev_info.congested_fn = raid0_congested;
@@ -251,7 +271,7 @@ static int create_strip_zones(mddev_t *mddev)
* chunk size is a multiple of that sector size
*/
if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
- printk(KERN_ERR "%s chunk_size of %d not valid\n",
+ printk(KERN_ERR "md/raid0:%s: chunk_size of %d not valid\n",
mdname(mddev),
mddev->chunk_sectors << 9);
goto abort;
@@ -261,14 +281,15 @@ static int create_strip_zones(mddev_t *mddev)
blk_queue_io_opt(mddev->queue,
(mddev->chunk_sectors << 9) * mddev->raid_disks);
- printk(KERN_INFO "raid0: done.\n");
- mddev->private = conf;
+ printk(KERN_INFO "md/raid0:%s: done.\n", mdname(mddev));
+ *private_conf = conf;
+
return 0;
abort:
kfree(conf->strip_zone);
kfree(conf->devlist);
kfree(conf);
- mddev->private = NULL;
+ *private_conf = NULL;
return err;
}
@@ -319,10 +340,12 @@ static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks)
static int raid0_run(mddev_t *mddev)
{
+ raid0_conf_t *conf;
int ret;
if (mddev->chunk_sectors == 0) {
- printk(KERN_ERR "md/raid0: chunk size must be set.\n");
+ printk(KERN_ERR "md/raid0:%s: chunk size must be set.\n",
+ mdname(mddev));
return -EINVAL;
}
if (md_check_no_bitmap(mddev))
@@ -330,15 +353,27 @@ static int raid0_run(mddev_t *mddev)
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
mddev->queue->queue_lock = &mddev->queue->__queue_lock;
- ret = create_strip_zones(mddev);
- if (ret < 0)
- return ret;
+ /* if private is not null, we are here after takeover */
+ if (mddev->private == NULL) {
+ ret = create_strip_zones(mddev, &conf);
+ if (ret < 0)
+ return ret;
+ mddev->private = conf;
+ }
+ conf = mddev->private;
+ if (conf->scale_raid_disks) {
+ int i;
+ for (i=0; i < conf->strip_zone[0].nb_dev; i++)
+ conf->devlist[i]->raid_disk /= conf->scale_raid_disks;
+ /* FIXME update sysfs rd links */
+ }
/* calculate array device size */
md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
- printk(KERN_INFO "raid0 : md_size is %llu sectors.\n",
- (unsigned long long)mddev->array_sectors);
+ printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n",
+ mdname(mddev),
+ (unsigned long long)mddev->array_sectors);
/* calculate the max read-ahead size.
* For read-ahead of large files to be effective, we need to
* readahead at least twice a whole stripe. i.e. number of devices
@@ -402,6 +437,7 @@ static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone,
unsigned int sect_in_chunk;
sector_t chunk;
raid0_conf_t *conf = mddev->private;
+ int raid_disks = conf->strip_zone[0].nb_dev;
unsigned int chunk_sects = mddev->chunk_sectors;
if (is_power_of_2(chunk_sects)) {
@@ -424,7 +460,7 @@ static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone,
* + the position in the chunk
*/
*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
- return conf->devlist[(zone - conf->strip_zone)*mddev->raid_disks
+ return conf->devlist[(zone - conf->strip_zone)*raid_disks
+ sector_div(sector, zone->nb_dev)];
}
@@ -444,27 +480,18 @@ static inline int is_io_in_chunk_boundary(mddev_t *mddev,
}
}
-static int raid0_make_request(struct request_queue *q, struct bio *bio)
+static int raid0_make_request(mddev_t *mddev, struct bio *bio)
{
- mddev_t *mddev = q->queuedata;
unsigned int chunk_sects;
sector_t sector_offset;
struct strip_zone *zone;
mdk_rdev_t *tmp_dev;
- const int rw = bio_data_dir(bio);
- int cpu;
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
md_barrier_request(mddev, bio);
return 0;
}
- cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
- bio_sectors(bio));
- part_stat_unlock();
-
chunk_sects = mddev->chunk_sectors;
if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
sector_t sector = bio->bi_sector;
@@ -482,9 +509,9 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio)
else
bp = bio_split(bio, chunk_sects -
sector_div(sector, chunk_sects));
- if (raid0_make_request(q, &bp->bio1))
+ if (raid0_make_request(mddev, &bp->bio1))
generic_make_request(&bp->bio1);
- if (raid0_make_request(q, &bp->bio2))
+ if (raid0_make_request(mddev, &bp->bio2))
generic_make_request(&bp->bio2);
bio_pair_release(bp);
@@ -504,9 +531,10 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio)
return 1;
bad_map:
- printk("raid0_make_request bug: can't convert block across chunks"
- " or bigger than %dk %llu %d\n", chunk_sects / 2,
- (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
+ printk("md/raid0:%s: make_request bug: can't convert block across chunks"
+ " or bigger than %dk %llu %d\n",
+ mdname(mddev), chunk_sects / 2,
+ (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
bio_io_error(bio);
return 0;
@@ -519,6 +547,7 @@ static void raid0_status(struct seq_file *seq, mddev_t *mddev)
int j, k, h;
char b[BDEVNAME_SIZE];
raid0_conf_t *conf = mddev->private;
+ int raid_disks = conf->strip_zone[0].nb_dev;
sector_t zone_size;
sector_t zone_start = 0;
@@ -529,7 +558,7 @@ static void raid0_status(struct seq_file *seq, mddev_t *mddev)
seq_printf(seq, "=[");
for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
seq_printf(seq, "%s/", bdevname(
- conf->devlist[j*mddev->raid_disks + k]
+ conf->devlist[j*raid_disks + k]
->bdev, b));
zone_size = conf->strip_zone[j].zone_end - zone_start;
@@ -544,6 +573,104 @@ static void raid0_status(struct seq_file *seq, mddev_t *mddev)
return;
}
+static void *raid0_takeover_raid5(mddev_t *mddev)
+{
+ mdk_rdev_t *rdev;
+ raid0_conf_t *priv_conf;
+
+ if (mddev->degraded != 1) {
+ printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
+ mdname(mddev),
+ mddev->degraded);
+ return ERR_PTR(-EINVAL);
+ }
+
+ list_for_each_entry(rdev, &mddev->disks, same_set) {
+ /* check slot number for a disk */
+ if (rdev->raid_disk == mddev->raid_disks-1) {
+ printk(KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* Set new parameters */
+ mddev->new_level = 0;
+ mddev->new_chunk_sectors = mddev->chunk_sectors;
+ mddev->raid_disks--;
+ mddev->delta_disks = -1;
+ /* make sure it will be not marked as dirty */
+ mddev->recovery_cp = MaxSector;
+
+ create_strip_zones(mddev, &priv_conf);
+ return priv_conf;
+}
+
+static void *raid0_takeover_raid10(mddev_t *mddev)
+{
+ raid0_conf_t *priv_conf;
+
+ /* Check layout:
+ * - far_copies must be 1
+ * - near_copies must be 2
+ * - disks number must be even
+ * - all mirrors must be already degraded
+ */
+ if (mddev->layout != ((1 << 8) + 2)) {
+ printk(KERN_ERR "md/raid0:%s:: Raid0 cannot takover layout: 0x%x\n",
+ mdname(mddev),
+ mddev->layout);
+ return ERR_PTR(-EINVAL);
+ }
+ if (mddev->raid_disks & 1) {
+ printk(KERN_ERR "md/raid0:%s: Raid0 cannot takover Raid10 with odd disk number.\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
+ }
+ if (mddev->degraded != (mddev->raid_disks>>1)) {
+ printk(KERN_ERR "md/raid0:%s: All mirrors must be already degraded!\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Set new parameters */
+ mddev->new_level = 0;
+ mddev->new_chunk_sectors = mddev->chunk_sectors;
+ mddev->delta_disks = - mddev->raid_disks / 2;
+ mddev->raid_disks += mddev->delta_disks;
+ mddev->degraded = 0;
+ /* make sure it will be not marked as dirty */
+ mddev->recovery_cp = MaxSector;
+
+ create_strip_zones(mddev, &priv_conf);
+ priv_conf->scale_raid_disks = 2;
+ return priv_conf;
+}
+
+static void *raid0_takeover(mddev_t *mddev)
+{
+ /* raid0 can take over:
+ * raid5 - providing it is Raid4 layout and one disk is faulty
+ * raid10 - assuming we have all necessary active disks
+ */
+ if (mddev->level == 5) {
+ if (mddev->layout == ALGORITHM_PARITY_N)
+ return raid0_takeover_raid5(mddev);
+
+ printk(KERN_ERR "md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
+ mdname(mddev), ALGORITHM_PARITY_N);
+ }
+
+ if (mddev->level == 10)
+ return raid0_takeover_raid10(mddev);
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void raid0_quiesce(mddev_t *mddev, int state)
+{
+}
+
static struct mdk_personality raid0_personality=
{
.name = "raid0",
@@ -554,6 +681,8 @@ static struct mdk_personality raid0_personality=
.stop = raid0_stop,
.status = raid0_status,
.size = raid0_size,
+ .takeover = raid0_takeover,
+ .quiesce = raid0_quiesce,
};
static int __init raid0_init (void)
diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h
index 91f8e876ee6..d724e664ca4 100644
--- a/drivers/md/raid0.h
+++ b/drivers/md/raid0.h
@@ -13,6 +13,9 @@ struct raid0_private_data
struct strip_zone *strip_zone;
mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */
int nr_strip_zones;
+ int scale_raid_disks; /* divide rdev->raid_disks by this in run()
+ * to handle conversion from raid10
+ */
};
typedef struct raid0_private_data raid0_conf_t;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index e59b10e66ed..a948da8012d 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -263,7 +263,7 @@ static inline void update_head_pos(int disk, r1bio_t *r1_bio)
static void raid1_end_read_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
+ r1bio_t *r1_bio = bio->bi_private;
int mirror;
conf_t *conf = r1_bio->mddev->private;
@@ -297,7 +297,8 @@ static void raid1_end_read_request(struct bio *bio, int error)
*/
char b[BDEVNAME_SIZE];
if (printk_ratelimit())
- printk(KERN_ERR "raid1: %s: rescheduling sector %llu\n",
+ printk(KERN_ERR "md/raid1:%s: %s: rescheduling sector %llu\n",
+ mdname(conf->mddev),
bdevname(conf->mirrors[mirror].rdev->bdev,b), (unsigned long long)r1_bio->sector);
reschedule_retry(r1_bio);
}
@@ -308,7 +309,7 @@ static void raid1_end_read_request(struct bio *bio, int error)
static void raid1_end_write_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
+ r1bio_t *r1_bio = bio->bi_private;
int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
conf_t *conf = r1_bio->mddev->private;
struct bio *to_put = NULL;
@@ -418,7 +419,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
*/
static int read_balance(conf_t *conf, r1bio_t *r1_bio)
{
- const unsigned long this_sector = r1_bio->sector;
+ const sector_t this_sector = r1_bio->sector;
int new_disk = conf->last_used, disk = new_disk;
int wonly_disk = -1;
const int sectors = r1_bio->sectors;
@@ -434,7 +435,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
retry:
if (conf->mddev->recovery_cp < MaxSector &&
(this_sector + sectors >= conf->next_resync)) {
- /* Choose the first operation device, for consistancy */
+ /* Choose the first operational device, for consistancy */
new_disk = 0;
for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
@@ -774,9 +775,8 @@ do_sync_io:
return NULL;
}
-static int make_request(struct request_queue *q, struct bio * bio)
+static int make_request(mddev_t *mddev, struct bio * bio)
{
- mddev_t *mddev = q->queuedata;
conf_t *conf = mddev->private;
mirror_info_t *mirror;
r1bio_t *r1_bio;
@@ -788,7 +788,6 @@ static int make_request(struct request_queue *q, struct bio * bio)
struct page **behind_pages = NULL;
const int rw = bio_data_dir(bio);
const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
- int cpu;
bool do_barriers;
mdk_rdev_t *blocked_rdev;
@@ -834,12 +833,6 @@ static int make_request(struct request_queue *q, struct bio * bio)
bitmap = mddev->bitmap;
- cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
- bio_sectors(bio));
- part_stat_unlock();
-
/*
* make_request() can abort the operation when READA is being
* used and no empty request is available.
@@ -866,6 +859,15 @@ static int make_request(struct request_queue *q, struct bio * bio)
}
mirror = conf->mirrors + rdisk;
+ if (test_bit(WriteMostly, &mirror->rdev->flags) &&
+ bitmap) {
+ /* Reading from a write-mostly device must
+ * take care not to over-take any writes
+ * that are 'behind'
+ */
+ wait_event(bitmap->behind_wait,
+ atomic_read(&bitmap->behind_writes) == 0);
+ }
r1_bio->read_disk = rdisk;
read_bio = bio_clone(bio, GFP_NOIO);
@@ -912,9 +914,10 @@ static int make_request(struct request_queue *q, struct bio * bio)
if (test_bit(Faulty, &rdev->flags)) {
rdev_dec_pending(rdev, mddev);
r1_bio->bios[i] = NULL;
- } else
+ } else {
r1_bio->bios[i] = bio;
- targets++;
+ targets++;
+ }
} else
r1_bio->bios[i] = NULL;
}
@@ -942,10 +945,14 @@ static int make_request(struct request_queue *q, struct bio * bio)
set_bit(R1BIO_Degraded, &r1_bio->state);
}
- /* do behind I/O ? */
+ /* do behind I/O ?
+ * Not if there are too many, or cannot allocate memory,
+ * or a reader on WriteMostly is waiting for behind writes
+ * to flush */
if (bitmap &&
(atomic_read(&bitmap->behind_writes)
< mddev->bitmap_info.max_write_behind) &&
+ !waitqueue_active(&bitmap->behind_wait) &&
(behind_pages = alloc_behind_pages(bio)) != NULL)
set_bit(R1BIO_BehindIO, &r1_bio->state);
@@ -1070,21 +1077,22 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
} else
set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
- printk(KERN_ALERT "raid1: Disk failure on %s, disabling device.\n"
- "raid1: Operation continuing on %d devices.\n",
- bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
+ printk(KERN_ALERT "md/raid1:%s: Disk failure on %s, disabling device.\n"
+ KERN_ALERT "md/raid1:%s: Operation continuing on %d devices.\n",
+ mdname(mddev), bdevname(rdev->bdev, b),
+ mdname(mddev), conf->raid_disks - mddev->degraded);
}
static void print_conf(conf_t *conf)
{
int i;
- printk("RAID1 conf printout:\n");
+ printk(KERN_DEBUG "RAID1 conf printout:\n");
if (!conf) {
- printk("(!conf)\n");
+ printk(KERN_DEBUG "(!conf)\n");
return;
}
- printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
+ printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
conf->raid_disks);
rcu_read_lock();
@@ -1092,7 +1100,7 @@ static void print_conf(conf_t *conf)
char b[BDEVNAME_SIZE];
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev)
- printk(" disk %d, wo:%d, o:%d, dev:%s\n",
+ printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
i, !test_bit(In_sync, &rdev->flags),
!test_bit(Faulty, &rdev->flags),
bdevname(rdev->bdev,b));
@@ -1223,7 +1231,7 @@ abort:
static void end_sync_read(struct bio *bio, int error)
{
- r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
+ r1bio_t *r1_bio = bio->bi_private;
int i;
for (i=r1_bio->mddev->raid_disks; i--; )
@@ -1246,7 +1254,7 @@ static void end_sync_read(struct bio *bio, int error)
static void end_sync_write(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
+ r1bio_t *r1_bio = bio->bi_private;
mddev_t *mddev = r1_bio->mddev;
conf_t *conf = mddev->private;
int i;
@@ -1453,9 +1461,10 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
char b[BDEVNAME_SIZE];
/* Cannot read from anywhere, array is toast */
md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
- printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error"
+ printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
" for block %llu\n",
- bdevname(bio->bi_bdev,b),
+ mdname(mddev),
+ bdevname(bio->bi_bdev, b),
(unsigned long long)r1_bio->sector);
md_done_sync(mddev, r1_bio->sectors, 0);
put_buf(r1_bio);
@@ -1577,7 +1586,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
else {
atomic_add(s, &rdev->corrected_errors);
printk(KERN_INFO
- "raid1:%s: read error corrected "
+ "md/raid1:%s: read error corrected "
"(%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(sect +
@@ -1682,8 +1691,9 @@ static void raid1d(mddev_t *mddev)
bio = r1_bio->bios[r1_bio->read_disk];
if ((disk=read_balance(conf, r1_bio)) == -1) {
- printk(KERN_ALERT "raid1: %s: unrecoverable I/O"
+ printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
" read error for block %llu\n",
+ mdname(mddev),
bdevname(bio->bi_bdev,b),
(unsigned long long)r1_bio->sector);
raid_end_bio_io(r1_bio);
@@ -1697,10 +1707,11 @@ static void raid1d(mddev_t *mddev)
r1_bio->bios[r1_bio->read_disk] = bio;
rdev = conf->mirrors[disk].rdev;
if (printk_ratelimit())
- printk(KERN_ERR "raid1: %s: redirecting sector %llu to"
- " another mirror\n",
- bdevname(rdev->bdev,b),
- (unsigned long long)r1_bio->sector);
+ printk(KERN_ERR "md/raid1:%s: redirecting sector %llu to"
+ " other mirror: %s\n",
+ mdname(mddev),
+ (unsigned long long)r1_bio->sector,
+ bdevname(rdev->bdev,b));
bio->bi_sector = r1_bio->sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
bio->bi_end_io = raid1_end_read_request;
@@ -1755,13 +1766,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
int still_degraded = 0;
if (!conf->r1buf_pool)
- {
-/*
- printk("sync start - bitmap %p\n", mddev->bitmap);
-*/
if (init_resync(conf))
return 0;
- }
max_sector = mddev->dev_sectors;
if (sector_nr >= max_sector) {
@@ -2042,7 +2048,7 @@ static conf_t *setup_conf(mddev_t *mddev)
err = -EIO;
if (conf->last_used < 0) {
- printk(KERN_ERR "raid1: no operational mirrors for %s\n",
+ printk(KERN_ERR "md/raid1:%s: no operational mirrors\n",
mdname(mddev));
goto abort;
}
@@ -2050,7 +2056,7 @@ static conf_t *setup_conf(mddev_t *mddev)
conf->thread = md_register_thread(raid1d, mddev, NULL);
if (!conf->thread) {
printk(KERN_ERR
- "raid1: couldn't allocate thread for %s\n",
+ "md/raid1:%s: couldn't allocate thread\n",
mdname(mddev));
goto abort;
}
@@ -2076,12 +2082,12 @@ static int run(mddev_t *mddev)
mdk_rdev_t *rdev;
if (mddev->level != 1) {
- printk("raid1: %s: raid level not set to mirroring (%d)\n",
+ printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
mdname(mddev), mddev->level);
return -EIO;
}
if (mddev->reshape_position != MaxSector) {
- printk("raid1: %s: reshape_position set but not supported\n",
+ printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n",
mdname(mddev));
return -EIO;
}
@@ -2124,11 +2130,11 @@ static int run(mddev_t *mddev)
mddev->recovery_cp = MaxSector;
if (mddev->recovery_cp != MaxSector)
- printk(KERN_NOTICE "raid1: %s is not clean"
+ printk(KERN_NOTICE "md/raid1:%s: not clean"
" -- starting background reconstruction\n",
mdname(mddev));
printk(KERN_INFO
- "raid1: raid set %s active with %d out of %d mirrors\n",
+ "md/raid1:%s: active with %d out of %d mirrors\n",
mdname(mddev), mddev->raid_disks - mddev->degraded,
mddev->raid_disks);
@@ -2152,15 +2158,14 @@ static int stop(mddev_t *mddev)
{
conf_t *conf = mddev->private;
struct bitmap *bitmap = mddev->bitmap;
- int behind_wait = 0;
/* wait for behind writes to complete */
- while (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
- behind_wait++;
- printk(KERN_INFO "raid1: behind writes in progress on device %s, waiting to stop (%d)\n", mdname(mddev), behind_wait);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ); /* wait a second */
+ if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
+ printk(KERN_INFO "md/raid1:%s: behind writes in progress - waiting to stop.\n",
+ mdname(mddev));
/* need to kick something here to make sure I/O goes? */
+ wait_event(bitmap->behind_wait,
+ atomic_read(&bitmap->behind_writes) == 0);
}
raise_barrier(conf);
@@ -2191,7 +2196,6 @@ static int raid1_resize(mddev_t *mddev, sector_t sectors)
if (mddev->array_sectors > raid1_size(mddev, sectors, 0))
return -EINVAL;
set_capacity(mddev->gendisk, mddev->array_sectors);
- mddev->changed = 1;
revalidate_disk(mddev->gendisk);
if (sectors > mddev->dev_sectors &&
mddev->recovery_cp == MaxSector) {
@@ -2286,9 +2290,9 @@ static int raid1_reshape(mddev_t *mddev)
if (sysfs_create_link(&mddev->kobj,
&rdev->kobj, nm))
printk(KERN_WARNING
- "md/raid1: cannot register "
- "%s for %s\n",
- nm, mdname(mddev));
+ "md/raid1:%s: cannot register "
+ "%s\n",
+ mdname(mddev), nm);
}
if (rdev)
newmirrors[d2++].rdev = rdev;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index e2766d8251a..03724992cdf 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -24,6 +24,7 @@
#include <linux/seq_file.h>
#include "md.h"
#include "raid10.h"
+#include "raid0.h"
#include "bitmap.h"
/*
@@ -255,7 +256,7 @@ static inline void update_head_pos(int slot, r10bio_t *r10_bio)
static void raid10_end_read_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
+ r10bio_t *r10_bio = bio->bi_private;
int slot, dev;
conf_t *conf = r10_bio->mddev->private;
@@ -285,7 +286,8 @@ static void raid10_end_read_request(struct bio *bio, int error)
*/
char b[BDEVNAME_SIZE];
if (printk_ratelimit())
- printk(KERN_ERR "raid10: %s: rescheduling sector %llu\n",
+ printk(KERN_ERR "md/raid10:%s: %s: rescheduling sector %llu\n",
+ mdname(conf->mddev),
bdevname(conf->mirrors[dev].rdev->bdev,b), (unsigned long long)r10_bio->sector);
reschedule_retry(r10_bio);
}
@@ -296,7 +298,7 @@ static void raid10_end_read_request(struct bio *bio, int error)
static void raid10_end_write_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
+ r10bio_t *r10_bio = bio->bi_private;
int slot, dev;
conf_t *conf = r10_bio->mddev->private;
@@ -494,7 +496,7 @@ static int raid10_mergeable_bvec(struct request_queue *q,
*/
static int read_balance(conf_t *conf, r10bio_t *r10_bio)
{
- const unsigned long this_sector = r10_bio->sector;
+ const sector_t this_sector = r10_bio->sector;
int disk, slot, nslot;
const int sectors = r10_bio->sectors;
sector_t new_distance, current_distance;
@@ -601,7 +603,7 @@ static void unplug_slaves(mddev_t *mddev)
int i;
rcu_read_lock();
- for (i=0; i<mddev->raid_disks; i++) {
+ for (i=0; i < conf->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
@@ -635,7 +637,7 @@ static int raid10_congested(void *data, int bits)
if (mddev_congested(mddev, bits))
return 1;
rcu_read_lock();
- for (i = 0; i < mddev->raid_disks && ret == 0; i++) {
+ for (i = 0; i < conf->raid_disks && ret == 0; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev);
@@ -788,14 +790,12 @@ static void unfreeze_array(conf_t *conf)
spin_unlock_irq(&conf->resync_lock);
}
-static int make_request(struct request_queue *q, struct bio * bio)
+static int make_request(mddev_t *mddev, struct bio * bio)
{
- mddev_t *mddev = q->queuedata;
conf_t *conf = mddev->private;
mirror_info_t *mirror;
r10bio_t *r10_bio;
struct bio *read_bio;
- int cpu;
int i;
int chunk_sects = conf->chunk_mask + 1;
const int rw = bio_data_dir(bio);
@@ -825,16 +825,16 @@ static int make_request(struct request_queue *q, struct bio * bio)
*/
bp = bio_split(bio,
chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
- if (make_request(q, &bp->bio1))
+ if (make_request(mddev, &bp->bio1))
generic_make_request(&bp->bio1);
- if (make_request(q, &bp->bio2))
+ if (make_request(mddev, &bp->bio2))
generic_make_request(&bp->bio2);
bio_pair_release(bp);
return 0;
bad_map:
- printk("raid10_make_request bug: can't convert block across chunks"
- " or bigger than %dk %llu %d\n", chunk_sects/2,
+ printk("md/raid10:%s: make_request bug: can't convert block across chunks"
+ " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
(unsigned long long)bio->bi_sector, bio->bi_size >> 10);
bio_io_error(bio);
@@ -850,12 +850,6 @@ static int make_request(struct request_queue *q, struct bio * bio)
*/
wait_barrier(conf);
- cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
- bio_sectors(bio));
- part_stat_unlock();
-
r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
r10_bio->master_bio = bio;
@@ -1039,9 +1033,10 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
}
set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
- printk(KERN_ALERT "raid10: Disk failure on %s, disabling device.\n"
- "raid10: Operation continuing on %d devices.\n",
- bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
+ printk(KERN_ALERT "md/raid10:%s: Disk failure on %s, disabling device.\n"
+ KERN_ALERT "md/raid10:%s: Operation continuing on %d devices.\n",
+ mdname(mddev), bdevname(rdev->bdev, b),
+ mdname(mddev), conf->raid_disks - mddev->degraded);
}
static void print_conf(conf_t *conf)
@@ -1049,19 +1044,19 @@ static void print_conf(conf_t *conf)
int i;
mirror_info_t *tmp;
- printk("RAID10 conf printout:\n");
+ printk(KERN_DEBUG "RAID10 conf printout:\n");
if (!conf) {
- printk("(!conf)\n");
+ printk(KERN_DEBUG "(!conf)\n");
return;
}
- printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
+ printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
conf->raid_disks);
for (i = 0; i < conf->raid_disks; i++) {
char b[BDEVNAME_SIZE];
tmp = conf->mirrors + i;
if (tmp->rdev)
- printk(" disk %d, wo:%d, o:%d, dev:%s\n",
+ printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
i, !test_bit(In_sync, &tmp->rdev->flags),
!test_bit(Faulty, &tmp->rdev->flags),
bdevname(tmp->rdev->bdev,b));
@@ -1132,7 +1127,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
int mirror;
mirror_info_t *p;
int first = 0;
- int last = mddev->raid_disks - 1;
+ int last = conf->raid_disks - 1;
if (mddev->recovery_cp < MaxSector)
/* only hot-add to in-sync arrays, as recovery is
@@ -1224,7 +1219,7 @@ abort:
static void end_sync_read(struct bio *bio, int error)
{
- r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
+ r10bio_t *r10_bio = bio->bi_private;
conf_t *conf = r10_bio->mddev->private;
int i,d;
@@ -1261,7 +1256,7 @@ static void end_sync_read(struct bio *bio, int error)
static void end_sync_write(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
+ r10bio_t *r10_bio = bio->bi_private;
mddev_t *mddev = r10_bio->mddev;
conf_t *conf = mddev->private;
int i,d;
@@ -1510,13 +1505,14 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
if (cur_read_error_count > max_read_errors) {
rcu_read_unlock();
printk(KERN_NOTICE
- "raid10: %s: Raid device exceeded "
+ "md/raid10:%s: %s: Raid device exceeded "
"read_error threshold "
"[cur %d:max %d]\n",
+ mdname(mddev),
b, cur_read_error_count, max_read_errors);
printk(KERN_NOTICE
- "raid10: %s: Failing raid "
- "device\n", b);
+ "md/raid10:%s: %s: Failing raid "
+ "device\n", mdname(mddev), b);
md_error(mddev, conf->mirrors[d].rdev);
return;
}
@@ -1586,15 +1582,16 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
== 0) {
/* Well, this device is dead */
printk(KERN_NOTICE
- "raid10:%s: read correction "
+ "md/raid10:%s: read correction "
"write failed"
" (%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(sect+
rdev->data_offset),
bdevname(rdev->bdev, b));
- printk(KERN_NOTICE "raid10:%s: failing "
+ printk(KERN_NOTICE "md/raid10:%s: %s: failing "
"drive\n",
+ mdname(mddev),
bdevname(rdev->bdev, b));
md_error(mddev, rdev);
}
@@ -1622,20 +1619,21 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
READ) == 0) {
/* Well, this device is dead */
printk(KERN_NOTICE
- "raid10:%s: unable to read back "
+ "md/raid10:%s: unable to read back "
"corrected sectors"
" (%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(sect+
rdev->data_offset),
bdevname(rdev->bdev, b));
- printk(KERN_NOTICE "raid10:%s: failing drive\n",
+ printk(KERN_NOTICE "md/raid10:%s: %s: failing drive\n",
+ mdname(mddev),
bdevname(rdev->bdev, b));
md_error(mddev, rdev);
} else {
printk(KERN_INFO
- "raid10:%s: read error corrected"
+ "md/raid10:%s: read error corrected"
" (%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(sect+
@@ -1710,8 +1708,9 @@ static void raid10d(mddev_t *mddev)
mddev->ro ? IO_BLOCKED : NULL;
mirror = read_balance(conf, r10_bio);
if (mirror == -1) {
- printk(KERN_ALERT "raid10: %s: unrecoverable I/O"
+ printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
" read error for block %llu\n",
+ mdname(mddev),
bdevname(bio->bi_bdev,b),
(unsigned long long)r10_bio->sector);
raid_end_bio_io(r10_bio);
@@ -1721,8 +1720,9 @@ static void raid10d(mddev_t *mddev)
bio_put(bio);
rdev = conf->mirrors[mirror].rdev;
if (printk_ratelimit())
- printk(KERN_ERR "raid10: %s: redirecting sector %llu to"
+ printk(KERN_ERR "md/raid10:%s: %s: redirecting sector %llu to"
" another mirror\n",
+ mdname(mddev),
bdevname(rdev->bdev,b),
(unsigned long long)r10_bio->sector);
bio = bio_clone(r10_bio->master_bio, GFP_NOIO);
@@ -1980,7 +1980,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
r10_bio = rb2;
if (!test_and_set_bit(MD_RECOVERY_INTR,
&mddev->recovery))
- printk(KERN_INFO "raid10: %s: insufficient working devices for recovery.\n",
+ printk(KERN_INFO "md/raid10:%s: insufficient "
+ "working devices for recovery.\n",
mdname(mddev));
break;
}
@@ -2140,9 +2141,9 @@ raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
conf_t *conf = mddev->private;
if (!raid_disks)
- raid_disks = mddev->raid_disks;
+ raid_disks = conf->raid_disks;
if (!sectors)
- sectors = mddev->dev_sectors;
+ sectors = conf->dev_sectors;
size = sectors >> conf->chunk_shift;
sector_div(size, conf->far_copies);
@@ -2152,62 +2153,61 @@ raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
return size << conf->chunk_shift;
}
-static int run(mddev_t *mddev)
+
+static conf_t *setup_conf(mddev_t *mddev)
{
- conf_t *conf;
- int i, disk_idx, chunk_size;
- mirror_info_t *disk;
- mdk_rdev_t *rdev;
+ conf_t *conf = NULL;
int nc, fc, fo;
sector_t stride, size;
+ int err = -EINVAL;
if (mddev->chunk_sectors < (PAGE_SIZE >> 9) ||
!is_power_of_2(mddev->chunk_sectors)) {
- printk(KERN_ERR "md/raid10: chunk size must be "
- "at least PAGE_SIZE(%ld) and be a power of 2.\n", PAGE_SIZE);
- return -EINVAL;
+ printk(KERN_ERR "md/raid10:%s: chunk size must be "
+ "at least PAGE_SIZE(%ld) and be a power of 2.\n",
+ mdname(mddev), PAGE_SIZE);
+ goto out;
}
nc = mddev->layout & 255;
fc = (mddev->layout >> 8) & 255;
fo = mddev->layout & (1<<16);
+
if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks ||
(mddev->layout >> 17)) {
- printk(KERN_ERR "raid10: %s: unsupported raid10 layout: 0x%8x\n",
+ printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
mdname(mddev), mddev->layout);
goto out;
}
- /*
- * copy the already verified devices into our private RAID10
- * bookkeeping area. [whatever we allocate in run(),
- * should be freed in stop()]
- */
+
+ err = -ENOMEM;
conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
- mddev->private = conf;
- if (!conf) {
- printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
- mdname(mddev));
+ if (!conf)
goto out;
- }
+
conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
- GFP_KERNEL);
- if (!conf->mirrors) {
- printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
- mdname(mddev));
- goto out_free_conf;
- }
+ GFP_KERNEL);
+ if (!conf->mirrors)
+ goto out;
conf->tmppage = alloc_page(GFP_KERNEL);
if (!conf->tmppage)
- goto out_free_conf;
+ goto out;
+
conf->raid_disks = mddev->raid_disks;
conf->near_copies = nc;
conf->far_copies = fc;
conf->copies = nc*fc;
conf->far_offset = fo;
- conf->chunk_mask = mddev->chunk_sectors - 1;
- conf->chunk_shift = ffz(~mddev->chunk_sectors);
+ conf->chunk_mask = mddev->new_chunk_sectors - 1;
+ conf->chunk_shift = ffz(~mddev->new_chunk_sectors);
+
+ conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
+ r10bio_pool_free, conf);
+ if (!conf->r10bio_pool)
+ goto out;
+
size = mddev->dev_sectors >> conf->chunk_shift;
sector_div(size, fc);
size = size * conf->raid_disks;
@@ -2221,7 +2221,8 @@ static int run(mddev_t *mddev)
*/
stride += conf->raid_disks - 1;
sector_div(stride, conf->raid_disks);
- mddev->dev_sectors = stride << conf->chunk_shift;
+
+ conf->dev_sectors = stride << conf->chunk_shift;
if (fo)
stride = 1;
@@ -2229,18 +2230,63 @@ static int run(mddev_t *mddev)
sector_div(stride, fc);
conf->stride = stride << conf->chunk_shift;
- conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
- r10bio_pool_free, conf);
- if (!conf->r10bio_pool) {
- printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
- mdname(mddev));
- goto out_free_conf;
- }
- conf->mddev = mddev;
spin_lock_init(&conf->device_lock);
+ INIT_LIST_HEAD(&conf->retry_list);
+
+ spin_lock_init(&conf->resync_lock);
+ init_waitqueue_head(&conf->wait_barrier);
+
+ conf->thread = md_register_thread(raid10d, mddev, NULL);
+ if (!conf->thread)
+ goto out;
+
+ conf->scale_disks = 0;
+ conf->mddev = mddev;
+ return conf;
+
+ out:
+ printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
+ mdname(mddev));
+ if (conf) {
+ if (conf->r10bio_pool)
+ mempool_destroy(conf->r10bio_pool);
+ kfree(conf->mirrors);
+ safe_put_page(conf->tmppage);
+ kfree(conf);
+ }
+ return ERR_PTR(err);
+}
+
+static int run(mddev_t *mddev)
+{
+ conf_t *conf;
+ int i, disk_idx, chunk_size;
+ mirror_info_t *disk;
+ mdk_rdev_t *rdev;
+ sector_t size;
+
+ /*
+ * copy the already verified devices into our private RAID10
+ * bookkeeping area. [whatever we allocate in run(),
+ * should be freed in stop()]
+ */
+
+ if (mddev->private == NULL) {
+ conf = setup_conf(mddev);
+ if (IS_ERR(conf))
+ return PTR_ERR(conf);
+ mddev->private = conf;
+ }
+ conf = mddev->private;
+ if (!conf)
+ goto out;
+
mddev->queue->queue_lock = &conf->device_lock;
+ mddev->thread = conf->thread;
+ conf->thread = NULL;
+
chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size);
if (conf->raid_disks % conf->near_copies)
@@ -2251,9 +2297,14 @@ static int run(mddev_t *mddev)
list_for_each_entry(rdev, &mddev->disks, same_set) {
disk_idx = rdev->raid_disk;
- if (disk_idx >= mddev->raid_disks
+ if (disk_idx >= conf->raid_disks
|| disk_idx < 0)
continue;
+ if (conf->scale_disks) {
+ disk_idx *= conf->scale_disks;
+ rdev->raid_disk = disk_idx;
+ /* MOVE 'rd%d' link !! */
+ }
disk = conf->mirrors + disk_idx;
disk->rdev = rdev;
@@ -2271,14 +2322,9 @@ static int run(mddev_t *mddev)
disk->head_position = 0;
}
- INIT_LIST_HEAD(&conf->retry_list);
-
- spin_lock_init(&conf->resync_lock);
- init_waitqueue_head(&conf->wait_barrier);
-
/* need to check that every block has at least one working mirror */
if (!enough(conf)) {
- printk(KERN_ERR "raid10: not enough operational mirrors for %s\n",
+ printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
mdname(mddev));
goto out_free_conf;
}
@@ -2297,28 +2343,21 @@ static int run(mddev_t *mddev)
}
}
-
- mddev->thread = md_register_thread(raid10d, mddev, NULL);
- if (!mddev->thread) {
- printk(KERN_ERR
- "raid10: couldn't allocate thread for %s\n",
- mdname(mddev));
- goto out_free_conf;
- }
-
if (mddev->recovery_cp != MaxSector)
- printk(KERN_NOTICE "raid10: %s is not clean"
+ printk(KERN_NOTICE "md/raid10:%s: not clean"
" -- starting background reconstruction\n",
mdname(mddev));
printk(KERN_INFO
- "raid10: raid set %s active with %d out of %d devices\n",
- mdname(mddev), mddev->raid_disks - mddev->degraded,
- mddev->raid_disks);
+ "md/raid10:%s: active with %d out of %d devices\n",
+ mdname(mddev), conf->raid_disks - mddev->degraded,
+ conf->raid_disks);
/*
* Ok, everything is just fine now
*/
- md_set_array_sectors(mddev, raid10_size(mddev, 0, 0));
- mddev->resync_max_sectors = raid10_size(mddev, 0, 0);
+ mddev->dev_sectors = conf->dev_sectors;
+ size = raid10_size(mddev, 0, 0);
+ md_set_array_sectors(mddev, size);
+ mddev->resync_max_sectors = size;
mddev->queue->unplug_fn = raid10_unplug;
mddev->queue->backing_dev_info.congested_fn = raid10_congested;
@@ -2336,7 +2375,7 @@ static int run(mddev_t *mddev)
mddev->queue->backing_dev_info.ra_pages = 2* stripe;
}
- if (conf->near_copies < mddev->raid_disks)
+ if (conf->near_copies < conf->raid_disks)
blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
md_integrity_register(mddev);
return 0;
@@ -2348,6 +2387,7 @@ out_free_conf:
kfree(conf->mirrors);
kfree(conf);
mddev->private = NULL;
+ md_unregister_thread(mddev->thread);
out:
return -EIO;
}
@@ -2384,6 +2424,61 @@ static void raid10_quiesce(mddev_t *mddev, int state)
}
}
+static void *raid10_takeover_raid0(mddev_t *mddev)
+{
+ mdk_rdev_t *rdev;
+ conf_t *conf;
+
+ if (mddev->degraded > 0) {
+ printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Update slot numbers to obtain
+ * degraded raid10 with missing mirrors
+ */
+ list_for_each_entry(rdev, &mddev->disks, same_set) {
+ rdev->raid_disk *= 2;
+ }
+
+ /* Set new parameters */
+ mddev->new_level = 10;
+ /* new layout: far_copies = 1, near_copies = 2 */
+ mddev->new_layout = (1<<8) + 2;
+ mddev->new_chunk_sectors = mddev->chunk_sectors;
+ mddev->delta_disks = mddev->raid_disks;
+ mddev->degraded = mddev->raid_disks;
+ mddev->raid_disks *= 2;
+ /* make sure it will be not marked as dirty */
+ mddev->recovery_cp = MaxSector;
+
+ conf = setup_conf(mddev);
+ conf->scale_disks = 2;
+ return conf;
+}
+
+static void *raid10_takeover(mddev_t *mddev)
+{
+ struct raid0_private_data *raid0_priv;
+
+ /* raid10 can take over:
+ * raid0 - providing it has only two drives
+ */
+ if (mddev->level == 0) {
+ /* for raid0 takeover only one zone is supported */
+ raid0_priv = mddev->private;
+ if (raid0_priv->nr_strip_zones > 1) {
+ printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0"
+ " with more than one zone.\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
+ }
+ return raid10_takeover_raid0(mddev);
+ }
+ return ERR_PTR(-EINVAL);
+}
+
static struct mdk_personality raid10_personality =
{
.name = "raid10",
@@ -2400,6 +2495,7 @@ static struct mdk_personality raid10_personality =
.sync_request = sync_request,
.quiesce = raid10_quiesce,
.size = raid10_size,
+ .takeover = raid10_takeover,
};
static int __init raid_init(void)
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 59cd1efb8d3..3824a087e17 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -33,9 +33,16 @@ struct r10_private_data_s {
* 1 stripe.
*/
+ sector_t dev_sectors; /* temp copy of mddev->dev_sectors */
+
int chunk_shift; /* shift from chunks to sectors */
sector_t chunk_mask;
+ int scale_disks; /* When starting array, multiply
+ * each ->raid_disk by this.
+ * Need for raid0->raid10 migration
+ */
+
struct list_head retry_list;
/* queue pending writes and submit them on unplug */
struct bio_list pending_bio_list;
@@ -57,6 +64,11 @@ struct r10_private_data_s {
mempool_t *r10bio_pool;
mempool_t *r10buf_pool;
struct page *tmppage;
+
+ /* When taking over an array from a different personality, we store
+ * the new thread here until we fully activate the array.
+ */
+ struct mdk_thread_s *thread;
};
typedef struct r10_private_data_s conf_t;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 15348c393b5..d2c0f94fa37 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -53,6 +53,7 @@
#include <linux/slab.h>
#include "md.h"
#include "raid5.h"
+#include "raid0.h"
#include "bitmap.h"
/*
@@ -1509,7 +1510,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
set_bit(R5_UPTODATE, &sh->dev[i].flags);
if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
rdev = conf->disks[i].rdev;
- printk_rl(KERN_INFO "raid5:%s: read error corrected"
+ printk_rl(KERN_INFO "md/raid:%s: read error corrected"
" (%lu sectors at %llu on %s)\n",
mdname(conf->mddev), STRIPE_SECTORS,
(unsigned long long)(sh->sector
@@ -1529,7 +1530,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
atomic_inc(&rdev->read_errors);
if (conf->mddev->degraded >= conf->max_degraded)
printk_rl(KERN_WARNING
- "raid5:%s: read error not correctable "
+ "md/raid:%s: read error not correctable "
"(sector %llu on %s).\n",
mdname(conf->mddev),
(unsigned long long)(sh->sector
@@ -1538,7 +1539,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
/* Oh, no!!! */
printk_rl(KERN_WARNING
- "raid5:%s: read error NOT corrected!! "
+ "md/raid:%s: read error NOT corrected!! "
"(sector %llu on %s).\n",
mdname(conf->mddev),
(unsigned long long)(sh->sector
@@ -1547,7 +1548,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
else if (atomic_read(&rdev->read_errors)
> conf->max_nr_stripes)
printk(KERN_WARNING
- "raid5:%s: Too many read errors, failing device %s.\n",
+ "md/raid:%s: Too many read errors, failing device %s.\n",
mdname(conf->mddev), bdn);
else
retry = 1;
@@ -1619,8 +1620,8 @@ static void raid5_build_block(struct stripe_head *sh, int i, int previous)
static void error(mddev_t *mddev, mdk_rdev_t *rdev)
{
char b[BDEVNAME_SIZE];
- raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
- pr_debug("raid5: error called\n");
+ raid5_conf_t *conf = mddev->private;
+ pr_debug("raid456: error called\n");
if (!test_bit(Faulty, &rdev->flags)) {
set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -1636,9 +1637,13 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
}
set_bit(Faulty, &rdev->flags);
printk(KERN_ALERT
- "raid5: Disk failure on %s, disabling device.\n"
- "raid5: Operation continuing on %d devices.\n",
- bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
+ "md/raid:%s: Disk failure on %s, disabling device.\n"
+ KERN_ALERT
+ "md/raid:%s: Operation continuing on %d devices.\n",
+ mdname(mddev),
+ bdevname(rdev->bdev, b),
+ mdname(mddev),
+ conf->raid_disks - mddev->degraded);
}
}
@@ -1714,8 +1719,6 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
pd_idx = data_disks;
break;
default:
- printk(KERN_ERR "raid5: unsupported algorithm %d\n",
- algorithm);
BUG();
}
break;
@@ -1832,10 +1835,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
qd_idx = raid_disks - 1;
break;
-
default:
- printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
- algorithm);
BUG();
}
break;
@@ -1898,8 +1898,6 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
case ALGORITHM_PARITY_N:
break;
default:
- printk(KERN_ERR "raid5: unsupported algorithm %d\n",
- algorithm);
BUG();
}
break;
@@ -1958,8 +1956,6 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
i -= 1;
break;
default:
- printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
- algorithm);
BUG();
}
break;
@@ -1972,7 +1968,8 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
previous, &dummy1, &sh2);
if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
|| sh2.qd_idx != sh->qd_idx) {
- printk(KERN_ERR "compute_blocknr: map not correct\n");
+ printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n",
+ mdname(conf->mddev));
return 0;
}
return r_sector;
@@ -3709,10 +3706,10 @@ static void raid5_align_endio(struct bio *bi, int error)
bio_put(bi);
- mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata;
- conf = mddev->private;
rdev = (void*)raid_bi->bi_next;
raid_bi->bi_next = NULL;
+ mddev = rdev->mddev;
+ conf = mddev->private;
rdev_dec_pending(rdev, conf->mddev);
@@ -3749,9 +3746,8 @@ static int bio_fits_rdev(struct bio *bi)
}
-static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
+static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio)
{
- mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev->private;
int dd_idx;
struct bio* align_bi;
@@ -3866,16 +3862,15 @@ static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf)
return sh;
}
-static int make_request(struct request_queue *q, struct bio * bi)
+static int make_request(mddev_t *mddev, struct bio * bi)
{
- mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev->private;
int dd_idx;
sector_t new_sector;
sector_t logical_sector, last_sector;
struct stripe_head *sh;
const int rw = bio_data_dir(bi);
- int cpu, remaining;
+ int remaining;
if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) {
/* Drain all pending writes. We only really need
@@ -3890,15 +3885,9 @@ static int make_request(struct request_queue *q, struct bio * bi)
md_write_start(mddev, bi);
- cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
- bio_sectors(bi));
- part_stat_unlock();
-
if (rw == READ &&
mddev->reshape_position == MaxSector &&
- chunk_aligned_read(q,bi))
+ chunk_aligned_read(mddev,bi))
return 0;
logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
@@ -3946,7 +3935,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
new_sector = raid5_compute_sector(conf, logical_sector,
previous,
&dd_idx, NULL);
- pr_debug("raid5: make_request, sector %llu logical %llu\n",
+ pr_debug("raid456: make_request, sector %llu logical %llu\n",
(unsigned long long)new_sector,
(unsigned long long)logical_sector);
@@ -4054,7 +4043,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
* As the reads complete, handle_stripe will copy the data
* into the destination stripe and release that stripe.
*/
- raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
+ raid5_conf_t *conf = mddev->private;
struct stripe_head *sh;
sector_t first_sector, last_sector;
int raid_disks = conf->previous_raid_disks;
@@ -4263,7 +4252,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
/* FIXME go_faster isn't used */
static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
{
- raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
+ raid5_conf_t *conf = mddev->private;
struct stripe_head *sh;
sector_t max_sector = mddev->dev_sectors;
int sync_blocks;
@@ -4656,7 +4645,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
kfree(percpu->scribble);
pr_err("%s: failed memory allocation for cpu%ld\n",
__func__, cpu);
- return NOTIFY_BAD;
+ return notifier_from_errno(-ENOMEM);
}
break;
case CPU_DEAD:
@@ -4725,7 +4714,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
if (mddev->new_level != 5
&& mddev->new_level != 4
&& mddev->new_level != 6) {
- printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n",
+ printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n",
mdname(mddev), mddev->new_level);
return ERR_PTR(-EIO);
}
@@ -4733,12 +4722,12 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
&& !algorithm_valid_raid5(mddev->new_layout)) ||
(mddev->new_level == 6
&& !algorithm_valid_raid6(mddev->new_layout))) {
- printk(KERN_ERR "raid5: %s: layout %d not supported\n",
+ printk(KERN_ERR "md/raid:%s: layout %d not supported\n",
mdname(mddev), mddev->new_layout);
return ERR_PTR(-EIO);
}
if (mddev->new_level == 6 && mddev->raid_disks < 4) {
- printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n",
+ printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n",
mdname(mddev), mddev->raid_disks);
return ERR_PTR(-EINVAL);
}
@@ -4746,8 +4735,8 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
if (!mddev->new_chunk_sectors ||
(mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
!is_power_of_2(mddev->new_chunk_sectors)) {
- printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
- mddev->new_chunk_sectors << 9, mdname(mddev));
+ printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n",
+ mdname(mddev), mddev->new_chunk_sectors << 9);
return ERR_PTR(-EINVAL);
}
@@ -4789,7 +4778,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
if (raid5_alloc_percpu(conf) != 0)
goto abort;
- pr_debug("raid5: run(%s) called.\n", mdname(mddev));
+ pr_debug("raid456: run(%s) called.\n", mdname(mddev));
list_for_each_entry(rdev, &mddev->disks, same_set) {
raid_disk = rdev->raid_disk;
@@ -4802,9 +4791,9 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
if (test_bit(In_sync, &rdev->flags)) {
char b[BDEVNAME_SIZE];
- printk(KERN_INFO "raid5: device %s operational as raid"
- " disk %d\n", bdevname(rdev->bdev,b),
- raid_disk);
+ printk(KERN_INFO "md/raid:%s: device %s operational as raid"
+ " disk %d\n",
+ mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
} else
/* Cannot rely on bitmap to complete recovery */
conf->fullsync = 1;
@@ -4828,16 +4817,17 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
if (grow_stripes(conf, conf->max_nr_stripes)) {
printk(KERN_ERR
- "raid5: couldn't allocate %dkB for buffers\n", memory);
+ "md/raid:%s: couldn't allocate %dkB for buffers\n",
+ mdname(mddev), memory);
goto abort;
} else
- printk(KERN_INFO "raid5: allocated %dkB for %s\n",
- memory, mdname(mddev));
+ printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
+ mdname(mddev), memory);
conf->thread = md_register_thread(raid5d, mddev, NULL);
if (!conf->thread) {
printk(KERN_ERR
- "raid5: couldn't allocate thread for %s\n",
+ "md/raid:%s: couldn't allocate thread.\n",
mdname(mddev));
goto abort;
}
@@ -4888,7 +4878,7 @@ static int run(mddev_t *mddev)
sector_t reshape_offset = 0;
if (mddev->recovery_cp != MaxSector)
- printk(KERN_NOTICE "raid5: %s is not clean"
+ printk(KERN_NOTICE "md/raid:%s: not clean"
" -- starting background reconstruction\n",
mdname(mddev));
if (mddev->reshape_position != MaxSector) {
@@ -4902,7 +4892,7 @@ static int run(mddev_t *mddev)
int max_degraded = (mddev->level == 6 ? 2 : 1);
if (mddev->new_level != mddev->level) {
- printk(KERN_ERR "raid5: %s: unsupported reshape "
+ printk(KERN_ERR "md/raid:%s: unsupported reshape "
"required - aborting.\n",
mdname(mddev));
return -EINVAL;
@@ -4915,8 +4905,8 @@ static int run(mddev_t *mddev)
here_new = mddev->reshape_position;
if (sector_div(here_new, mddev->new_chunk_sectors *
(mddev->raid_disks - max_degraded))) {
- printk(KERN_ERR "raid5: reshape_position not "
- "on a stripe boundary\n");
+ printk(KERN_ERR "md/raid:%s: reshape_position not "
+ "on a stripe boundary\n", mdname(mddev));
return -EINVAL;
}
reshape_offset = here_new * mddev->new_chunk_sectors;
@@ -4937,8 +4927,9 @@ static int run(mddev_t *mddev)
if ((here_new * mddev->new_chunk_sectors !=
here_old * mddev->chunk_sectors) ||
mddev->ro == 0) {
- printk(KERN_ERR "raid5: in-place reshape must be started"
- " in read-only mode - aborting\n");
+ printk(KERN_ERR "md/raid:%s: in-place reshape must be started"
+ " in read-only mode - aborting\n",
+ mdname(mddev));
return -EINVAL;
}
} else if (mddev->delta_disks < 0
@@ -4947,11 +4938,13 @@ static int run(mddev_t *mddev)
: (here_new * mddev->new_chunk_sectors >=
here_old * mddev->chunk_sectors)) {
/* Reading from the same stripe as writing to - bad */
- printk(KERN_ERR "raid5: reshape_position too early for "
- "auto-recovery - aborting.\n");
+ printk(KERN_ERR "md/raid:%s: reshape_position too early for "
+ "auto-recovery - aborting.\n",
+ mdname(mddev));
return -EINVAL;
}
- printk(KERN_INFO "raid5: reshape will continue\n");
+ printk(KERN_INFO "md/raid:%s: reshape will continue\n",
+ mdname(mddev));
/* OK, we should be able to continue; */
} else {
BUG_ON(mddev->level != mddev->new_level);
@@ -4993,18 +4986,6 @@ static int run(mddev_t *mddev)
mddev->minor_version > 90)
rdev->recovery_offset = reshape_offset;
- printk("%d: w=%d pa=%d pr=%d m=%d a=%d r=%d op1=%d op2=%d\n",
- rdev->raid_disk, working_disks, conf->prev_algo,
- conf->previous_raid_disks, conf->max_degraded,
- conf->algorithm, conf->raid_disks,
- only_parity(rdev->raid_disk,
- conf->prev_algo,
- conf->previous_raid_disks,
- conf->max_degraded),
- only_parity(rdev->raid_disk,
- conf->algorithm,
- conf->raid_disks,
- conf->max_degraded));
if (rdev->recovery_offset < reshape_offset) {
/* We need to check old and new layout */
if (!only_parity(rdev->raid_disk,
@@ -5025,7 +5006,7 @@ static int run(mddev_t *mddev)
- working_disks);
if (mddev->degraded > conf->max_degraded) {
- printk(KERN_ERR "raid5: not enough operational devices for %s"
+ printk(KERN_ERR "md/raid:%s: not enough operational devices"
" (%d/%d failed)\n",
mdname(mddev), mddev->degraded, conf->raid_disks);
goto abort;
@@ -5039,32 +5020,32 @@ static int run(mddev_t *mddev)
mddev->recovery_cp != MaxSector) {
if (mddev->ok_start_degraded)
printk(KERN_WARNING
- "raid5: starting dirty degraded array: %s"
- "- data corruption possible.\n",
+ "md/raid:%s: starting dirty degraded array"
+ " - data corruption possible.\n",
mdname(mddev));
else {
printk(KERN_ERR
- "raid5: cannot start dirty degraded array for %s\n",
+ "md/raid:%s: cannot start dirty degraded array.\n",
mdname(mddev));
goto abort;
}
}
if (mddev->degraded == 0)
- printk("raid5: raid level %d set %s active with %d out of %d"
- " devices, algorithm %d\n", conf->level, mdname(mddev),
+ printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d"
+ " devices, algorithm %d\n", mdname(mddev), conf->level,
mddev->raid_disks-mddev->degraded, mddev->raid_disks,
mddev->new_layout);
else
- printk(KERN_ALERT "raid5: raid level %d set %s active with %d"
- " out of %d devices, algorithm %d\n", conf->level,
- mdname(mddev), mddev->raid_disks - mddev->degraded,
- mddev->raid_disks, mddev->new_layout);
+ printk(KERN_ALERT "md/raid:%s: raid level %d active with %d"
+ " out of %d devices, algorithm %d\n",
+ mdname(mddev), conf->level,
+ mddev->raid_disks - mddev->degraded,
+ mddev->raid_disks, mddev->new_layout);
print_raid5_conf(conf);
if (conf->reshape_progress != MaxSector) {
- printk("...ok start reshape thread\n");
conf->reshape_safe = conf->reshape_progress;
atomic_set(&conf->reshape_stripes, 0);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
@@ -5087,9 +5068,11 @@ static int run(mddev_t *mddev)
}
/* Ok, everything is just fine now */
- if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
+ if (mddev->to_remove == &raid5_attrs_group)
+ mddev->to_remove = NULL;
+ else if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
printk(KERN_WARNING
- "raid5: failed to create sysfs attributes for %s\n",
+ "md/raid:%s: failed to create sysfs attributes.\n",
mdname(mddev));
mddev->queue->queue_lock = &conf->device_lock;
@@ -5119,22 +5102,21 @@ abort:
free_conf(conf);
}
mddev->private = NULL;
- printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev));
+ printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev));
return -EIO;
}
-
-
static int stop(mddev_t *mddev)
{
- raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
+ raid5_conf_t *conf = mddev->private;
md_unregister_thread(mddev->thread);
mddev->thread = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
free_conf(conf);
- mddev->private = &raid5_attrs_group;
+ mddev->private = NULL;
+ mddev->to_remove = &raid5_attrs_group;
return 0;
}
@@ -5175,7 +5157,7 @@ static void printall(struct seq_file *seq, raid5_conf_t *conf)
static void status(struct seq_file *seq, mddev_t *mddev)
{
- raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
+ raid5_conf_t *conf = mddev->private;
int i;
seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
@@ -5197,21 +5179,22 @@ static void print_raid5_conf (raid5_conf_t *conf)
int i;
struct disk_info *tmp;
- printk("RAID5 conf printout:\n");
+ printk(KERN_DEBUG "RAID conf printout:\n");
if (!conf) {
printk("(conf==NULL)\n");
return;
}
- printk(" --- rd:%d wd:%d\n", conf->raid_disks,
- conf->raid_disks - conf->mddev->degraded);
+ printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level,
+ conf->raid_disks,
+ conf->raid_disks - conf->mddev->degraded);
for (i = 0; i < conf->raid_disks; i++) {
char b[BDEVNAME_SIZE];
tmp = conf->disks + i;
if (tmp->rdev)
- printk(" disk %d, o:%d, dev:%s\n",
- i, !test_bit(Faulty, &tmp->rdev->flags),
- bdevname(tmp->rdev->bdev,b));
+ printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n",
+ i, !test_bit(Faulty, &tmp->rdev->flags),
+ bdevname(tmp->rdev->bdev, b));
}
}
@@ -5334,7 +5317,6 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
raid5_size(mddev, sectors, mddev->raid_disks))
return -EINVAL;
set_capacity(mddev->gendisk, mddev->array_sectors);
- mddev->changed = 1;
revalidate_disk(mddev->gendisk);
if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) {
mddev->recovery_cp = mddev->dev_sectors;
@@ -5360,7 +5342,8 @@ static int check_stripe_cache(mddev_t *mddev)
> conf->max_nr_stripes ||
((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
> conf->max_nr_stripes) {
- printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n",
+ printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n",
+ mdname(mddev),
((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
/ STRIPE_SIZE)*4);
return 0;
@@ -5431,7 +5414,7 @@ static int raid5_start_reshape(mddev_t *mddev)
*/
if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
< mddev->array_sectors) {
- printk(KERN_ERR "md: %s: array size must be reduced "
+ printk(KERN_ERR "md/raid:%s: array size must be reduced "
"before number of disks\n", mdname(mddev));
return -EINVAL;
}
@@ -5469,9 +5452,9 @@ static int raid5_start_reshape(mddev_t *mddev)
if (sysfs_create_link(&mddev->kobj,
&rdev->kobj, nm))
printk(KERN_WARNING
- "raid5: failed to create "
- " link %s for %s\n",
- nm, mdname(mddev));
+ "md/raid:%s: failed to create "
+ " link %s\n",
+ mdname(mddev), nm);
} else
break;
}
@@ -5548,7 +5531,6 @@ static void raid5_finish_reshape(mddev_t *mddev)
if (mddev->delta_disks > 0) {
md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
set_capacity(mddev->gendisk, mddev->array_sectors);
- mddev->changed = 1;
revalidate_disk(mddev->gendisk);
} else {
int d;
@@ -5613,6 +5595,29 @@ static void raid5_quiesce(mddev_t *mddev, int state)
}
+static void *raid45_takeover_raid0(mddev_t *mddev, int level)
+{
+ struct raid0_private_data *raid0_priv = mddev->private;
+
+ /* for raid0 takeover only one zone is supported */
+ if (raid0_priv->nr_strip_zones > 1) {
+ printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
+ }
+
+ mddev->new_level = level;
+ mddev->new_layout = ALGORITHM_PARITY_N;
+ mddev->new_chunk_sectors = mddev->chunk_sectors;
+ mddev->raid_disks += 1;
+ mddev->delta_disks = 1;
+ /* make sure it will be not marked as dirty */
+ mddev->recovery_cp = MaxSector;
+
+ return setup_conf(mddev);
+}
+
+
static void *raid5_takeover_raid1(mddev_t *mddev)
{
int chunksect;
@@ -5737,12 +5742,13 @@ static int raid6_check_reshape(mddev_t *mddev)
static void *raid5_takeover(mddev_t *mddev)
{
/* raid5 can take over:
- * raid0 - if all devices are the same - make it a raid4 layout
+ * raid0 - if there is only one strip zone - make it a raid4 layout
* raid1 - if there are two drives. We need to know the chunk size
* raid4 - trivial - just use a raid4 layout.
* raid6 - Providing it is a *_6 layout
*/
-
+ if (mddev->level == 0)
+ return raid45_takeover_raid0(mddev, 5);
if (mddev->level == 1)
return raid5_takeover_raid1(mddev);
if (mddev->level == 4) {
@@ -5756,6 +5762,22 @@ static void *raid5_takeover(mddev_t *mddev)
return ERR_PTR(-EINVAL);
}
+static void *raid4_takeover(mddev_t *mddev)
+{
+ /* raid4 can take over:
+ * raid0 - if there is only one strip zone
+ * raid5 - if layout is right
+ */
+ if (mddev->level == 0)
+ return raid45_takeover_raid0(mddev, 4);
+ if (mddev->level == 5 &&
+ mddev->layout == ALGORITHM_PARITY_N) {
+ mddev->new_layout = 0;
+ mddev->new_level = 4;
+ return setup_conf(mddev);
+ }
+ return ERR_PTR(-EINVAL);
+}
static struct mdk_personality raid5_personality;
@@ -5871,6 +5893,7 @@ static struct mdk_personality raid4_personality =
.start_reshape = raid5_start_reshape,
.finish_reshape = raid5_finish_reshape,
.quiesce = raid5_quiesce,
+ .takeover = raid4_takeover,
};
static int __init raid5_init(void)
diff --git a/drivers/media/IR/Kconfig b/drivers/media/IR/Kconfig
index 4dde7d180a3..195c6cf359f 100644
--- a/drivers/media/IR/Kconfig
+++ b/drivers/media/IR/Kconfig
@@ -7,3 +7,62 @@ config VIDEO_IR
tristate
depends on IR_CORE
default IR_CORE
+
+source "drivers/media/IR/keymaps/Kconfig"
+
+config IR_NEC_DECODER
+ tristate "Enable IR raw decoder for the NEC protocol"
+ depends on IR_CORE
+ default y
+
+ ---help---
+ Enable this option if you have IR with NEC protocol, and
+ if the IR is decoded in software
+
+config IR_RC5_DECODER
+ tristate "Enable IR raw decoder for the RC-5 protocol"
+ depends on IR_CORE
+ default y
+
+ ---help---
+ Enable this option if you have IR with RC-5 protocol, and
+ if the IR is decoded in software
+
+config IR_RC6_DECODER
+ tristate "Enable IR raw decoder for the RC6 protocol"
+ depends on IR_CORE
+ default y
+
+ ---help---
+ Enable this option if you have an infrared remote control which
+ uses the RC6 protocol, and you need software decoding support.
+
+config IR_JVC_DECODER
+ tristate "Enable IR raw decoder for the JVC protocol"
+ depends on IR_CORE
+ default y
+
+ ---help---
+ Enable this option if you have an infrared remote control which
+ uses the JVC protocol, and you need software decoding support.
+
+config IR_SONY_DECODER
+ tristate "Enable IR raw decoder for the Sony protocol"
+ depends on IR_CORE
+ default y
+
+ ---help---
+ Enable this option if you have an infrared remote control which
+ uses the Sony protocol, and you need software decoding support.
+
+config IR_IMON
+ tristate "SoundGraph iMON Receiver and Display"
+ depends on USB_ARCH_HAS_HCD
+ depends on IR_CORE
+ select USB
+ ---help---
+ Say Y here if you want to use a SoundGraph iMON (aka Antec Veris)
+ IR Receiver and/or LCD/VFD/VGA display.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imon.
diff --git a/drivers/media/IR/Makefile b/drivers/media/IR/Makefile
index 171890e7a41..b998fcced2e 100644
--- a/drivers/media/IR/Makefile
+++ b/drivers/media/IR/Makefile
@@ -1,5 +1,15 @@
-ir-common-objs := ir-functions.o ir-keymaps.o
-ir-core-objs := ir-keytable.o ir-sysfs.o
+ir-common-objs := ir-functions.o
+ir-core-objs := ir-keytable.o ir-sysfs.o ir-raw-event.o rc-map.o
+
+obj-y += keymaps/
obj-$(CONFIG_IR_CORE) += ir-core.o
obj-$(CONFIG_VIDEO_IR) += ir-common.o
+obj-$(CONFIG_IR_NEC_DECODER) += ir-nec-decoder.o
+obj-$(CONFIG_IR_RC5_DECODER) += ir-rc5-decoder.o
+obj-$(CONFIG_IR_RC6_DECODER) += ir-rc6-decoder.o
+obj-$(CONFIG_IR_JVC_DECODER) += ir-jvc-decoder.o
+obj-$(CONFIG_IR_SONY_DECODER) += ir-sony-decoder.o
+
+# stand-alone IR receivers/transmitters
+obj-$(CONFIG_IR_IMON) += imon.o
diff --git a/drivers/media/IR/imon.c b/drivers/media/IR/imon.c
new file mode 100644
index 00000000000..5e204567000
--- /dev/null
+++ b/drivers/media/IR/imon.c
@@ -0,0 +1,2396 @@
+/*
+ * imon.c: input and display driver for SoundGraph iMON IR/VFD/LCD
+ *
+ * Copyright(C) 2009 Jarod Wilson <jarod@wilsonet.com>
+ * Portions based on the original lirc_imon driver,
+ * Copyright(C) 2004 Venky Raju(dev@venky.ws)
+ *
+ * Huge thanks to R. Geoff Newbury for invaluable debugging on the
+ * 0xffdc iMON devices, and for sending me one to hack on, without
+ * which the support for them wouldn't be nearly as good. Thanks
+ * also to the numerous 0xffdc device owners that tested auto-config
+ * support for me and provided debug dumps from their devices.
+ *
+ * imon is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <linux/input.h>
+#include <linux/usb.h>
+#include <linux/usb/input.h>
+#include <media/ir-core.h>
+
+#include <linux/time.h>
+#include <linux/timer.h>
+
+#define MOD_AUTHOR "Jarod Wilson <jarod@wilsonet.com>"
+#define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display"
+#define MOD_NAME "imon"
+#define MOD_VERSION "0.9.1"
+
+#define DISPLAY_MINOR_BASE 144
+#define DEVICE_NAME "lcd%d"
+
+#define BUF_CHUNK_SIZE 8
+#define BUF_SIZE 128
+
+#define BIT_DURATION 250 /* each bit received is 250us */
+
+#define IMON_CLOCK_ENABLE_PACKETS 2
+
+/*** P R O T O T Y P E S ***/
+
+/* USB Callback prototypes */
+static int imon_probe(struct usb_interface *interface,
+ const struct usb_device_id *id);
+static void imon_disconnect(struct usb_interface *interface);
+static void usb_rx_callback_intf0(struct urb *urb);
+static void usb_rx_callback_intf1(struct urb *urb);
+static void usb_tx_callback(struct urb *urb);
+
+/* suspend/resume support */
+static int imon_resume(struct usb_interface *intf);
+static int imon_suspend(struct usb_interface *intf, pm_message_t message);
+
+/* Display file_operations function prototypes */
+static int display_open(struct inode *inode, struct file *file);
+static int display_close(struct inode *inode, struct file *file);
+
+/* VFD write operation */
+static ssize_t vfd_write(struct file *file, const char *buf,
+ size_t n_bytes, loff_t *pos);
+
+/* LCD file_operations override function prototypes */
+static ssize_t lcd_write(struct file *file, const char *buf,
+ size_t n_bytes, loff_t *pos);
+
+/*** G L O B A L S ***/
+
+struct imon_context {
+ struct device *dev;
+ struct ir_dev_props *props;
+ struct ir_input_dev *ir;
+ /* Newer devices have two interfaces */
+ struct usb_device *usbdev_intf0;
+ struct usb_device *usbdev_intf1;
+
+ bool display_supported; /* not all controllers do */
+ bool display_isopen; /* display port has been opened */
+ bool rf_isassociating; /* RF remote associating */
+ bool dev_present_intf0; /* USB device presence, interface 0 */
+ bool dev_present_intf1; /* USB device presence, interface 1 */
+
+ struct mutex lock; /* to lock this object */
+ wait_queue_head_t remove_ok; /* For unexpected USB disconnects */
+
+ struct usb_endpoint_descriptor *rx_endpoint_intf0;
+ struct usb_endpoint_descriptor *rx_endpoint_intf1;
+ struct usb_endpoint_descriptor *tx_endpoint;
+ struct urb *rx_urb_intf0;
+ struct urb *rx_urb_intf1;
+ struct urb *tx_urb;
+ bool tx_control;
+ unsigned char usb_rx_buf[8];
+ unsigned char usb_tx_buf[8];
+
+ struct tx_t {
+ unsigned char data_buf[35]; /* user data buffer */
+ struct completion finished; /* wait for write to finish */
+ bool busy; /* write in progress */
+ int status; /* status of tx completion */
+ } tx;
+
+ u16 vendor; /* usb vendor ID */
+ u16 product; /* usb product ID */
+
+ struct input_dev *idev; /* input device for remote */
+ struct input_dev *touch; /* input device for touchscreen */
+
+ u32 kc; /* current input keycode */
+ u32 last_keycode; /* last reported input keycode */
+ u64 ir_type; /* iMON or MCE (RC6) IR protocol? */
+ u8 mce_toggle_bit; /* last mce toggle bit */
+ bool release_code; /* some keys send a release code */
+
+ u8 display_type; /* store the display type */
+ bool pad_mouse; /* toggle kbd(0)/mouse(1) mode */
+
+ char name_idev[128]; /* input device name */
+ char phys_idev[64]; /* input device phys path */
+ struct timer_list itimer; /* input device timer, need for rc6 */
+
+ char name_touch[128]; /* touch screen name */
+ char phys_touch[64]; /* touch screen phys path */
+ struct timer_list ttimer; /* touch screen timer */
+ int touch_x; /* x coordinate on touchscreen */
+ int touch_y; /* y coordinate on touchscreen */
+};
+
+#define TOUCH_TIMEOUT (HZ/30)
+
+/* vfd character device file operations */
+static const struct file_operations vfd_fops = {
+ .owner = THIS_MODULE,
+ .open = &display_open,
+ .write = &vfd_write,
+ .release = &display_close
+};
+
+/* lcd character device file operations */
+static const struct file_operations lcd_fops = {
+ .owner = THIS_MODULE,
+ .open = &display_open,
+ .write = &lcd_write,
+ .release = &display_close
+};
+
+enum {
+ IMON_DISPLAY_TYPE_AUTO = 0,
+ IMON_DISPLAY_TYPE_VFD = 1,
+ IMON_DISPLAY_TYPE_LCD = 2,
+ IMON_DISPLAY_TYPE_VGA = 3,
+ IMON_DISPLAY_TYPE_NONE = 4,
+};
+
+enum {
+ IMON_KEY_IMON = 0,
+ IMON_KEY_MCE = 1,
+ IMON_KEY_PANEL = 2,
+};
+
+/*
+ * USB Device ID for iMON USB Control Boards
+ *
+ * The Windows drivers contain 6 different inf files, more or less one for
+ * each new device until the 0x0034-0x0046 devices, which all use the same
+ * driver. Some of the devices in the 34-46 range haven't been definitively
+ * identified yet. Early devices have either a TriGem Computer, Inc. or a
+ * Samsung vendor ID (0x0aa8 and 0x04e8 respectively), while all later
+ * devices use the SoundGraph vendor ID (0x15c2). This driver only supports
+ * the ffdc and later devices, which do onboard decoding.
+ */
+static struct usb_device_id imon_usb_id_table[] = {
+ /*
+ * Several devices with this same device ID, all use iMON_PAD.inf
+ * SoundGraph iMON PAD (IR & VFD)
+ * SoundGraph iMON PAD (IR & LCD)
+ * SoundGraph iMON Knob (IR only)
+ */
+ { USB_DEVICE(0x15c2, 0xffdc) },
+
+ /*
+ * Newer devices, all driven by the latest iMON Windows driver, full
+ * list of device IDs extracted via 'strings Setup/data1.hdr |grep 15c2'
+ * Need user input to fill in details on unknown devices.
+ */
+ /* SoundGraph iMON OEM Touch LCD (IR & 7" VGA LCD) */
+ { USB_DEVICE(0x15c2, 0x0034) },
+ /* SoundGraph iMON OEM Touch LCD (IR & 4.3" VGA LCD) */
+ { USB_DEVICE(0x15c2, 0x0035) },
+ /* SoundGraph iMON OEM VFD (IR & VFD) */
+ { USB_DEVICE(0x15c2, 0x0036) },
+ /* device specifics unknown */
+ { USB_DEVICE(0x15c2, 0x0037) },
+ /* SoundGraph iMON OEM LCD (IR & LCD) */
+ { USB_DEVICE(0x15c2, 0x0038) },
+ /* SoundGraph iMON UltraBay (IR & LCD) */
+ { USB_DEVICE(0x15c2, 0x0039) },
+ /* device specifics unknown */
+ { USB_DEVICE(0x15c2, 0x003a) },
+ /* device specifics unknown */
+ { USB_DEVICE(0x15c2, 0x003b) },
+ /* SoundGraph iMON OEM Inside (IR only) */
+ { USB_DEVICE(0x15c2, 0x003c) },
+ /* device specifics unknown */
+ { USB_DEVICE(0x15c2, 0x003d) },
+ /* device specifics unknown */
+ { USB_DEVICE(0x15c2, 0x003e) },
+ /* device specifics unknown */
+ { USB_DEVICE(0x15c2, 0x003f) },
+ /* device specifics unknown */
+ { USB_DEVICE(0x15c2, 0x0040) },
+ /* SoundGraph iMON MINI (IR only) */
+ { USB_DEVICE(0x15c2, 0x0041) },
+ /* Antec Veris Multimedia Station EZ External (IR only) */
+ { USB_DEVICE(0x15c2, 0x0042) },
+ /* Antec Veris Multimedia Station Basic Internal (IR only) */
+ { USB_DEVICE(0x15c2, 0x0043) },
+ /* Antec Veris Multimedia Station Elite (IR & VFD) */
+ { USB_DEVICE(0x15c2, 0x0044) },
+ /* Antec Veris Multimedia Station Premiere (IR & LCD) */
+ { USB_DEVICE(0x15c2, 0x0045) },
+ /* device specifics unknown */
+ { USB_DEVICE(0x15c2, 0x0046) },
+ {}
+};
+
+/* USB Device data */
+static struct usb_driver imon_driver = {
+ .name = MOD_NAME,
+ .probe = imon_probe,
+ .disconnect = imon_disconnect,
+ .suspend = imon_suspend,
+ .resume = imon_resume,
+ .id_table = imon_usb_id_table,
+};
+
+static struct usb_class_driver imon_vfd_class = {
+ .name = DEVICE_NAME,
+ .fops = &vfd_fops,
+ .minor_base = DISPLAY_MINOR_BASE,
+};
+
+static struct usb_class_driver imon_lcd_class = {
+ .name = DEVICE_NAME,
+ .fops = &lcd_fops,
+ .minor_base = DISPLAY_MINOR_BASE,
+};
+
+/* imon receiver front panel/knob key table */
+static const struct {
+ u64 hw_code;
+ u32 keycode;
+} imon_panel_key_table[] = {
+ { 0x000000000f00ffeell, KEY_PROG1 }, /* Go */
+ { 0x000000001f00ffeell, KEY_AUDIO },
+ { 0x000000002000ffeell, KEY_VIDEO },
+ { 0x000000002100ffeell, KEY_CAMERA },
+ { 0x000000002700ffeell, KEY_DVD },
+ { 0x000000002300ffeell, KEY_TV },
+ { 0x000000000500ffeell, KEY_PREVIOUS },
+ { 0x000000000700ffeell, KEY_REWIND },
+ { 0x000000000400ffeell, KEY_STOP },
+ { 0x000000003c00ffeell, KEY_PLAYPAUSE },
+ { 0x000000000800ffeell, KEY_FASTFORWARD },
+ { 0x000000000600ffeell, KEY_NEXT },
+ { 0x000000010000ffeell, KEY_RIGHT },
+ { 0x000001000000ffeell, KEY_LEFT },
+ { 0x000000003d00ffeell, KEY_SELECT },
+ { 0x000100000000ffeell, KEY_VOLUMEUP },
+ { 0x010000000000ffeell, KEY_VOLUMEDOWN },
+ { 0x000000000100ffeell, KEY_MUTE },
+ /* iMON Knob values */
+ { 0x000100ffffffffeell, KEY_VOLUMEUP },
+ { 0x010000ffffffffeell, KEY_VOLUMEDOWN },
+ { 0x000008ffffffffeell, KEY_MUTE },
+};
+
+/* to prevent races between open() and disconnect(), probing, etc */
+static DEFINE_MUTEX(driver_lock);
+
+/* Module bookkeeping bits */
+MODULE_AUTHOR(MOD_AUTHOR);
+MODULE_DESCRIPTION(MOD_DESC);
+MODULE_VERSION(MOD_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(usb, imon_usb_id_table);
+
+static bool debug;
+module_param(debug, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug messages: 0=no, 1=yes(default: no)");
+
+/* lcd, vfd, vga or none? should be auto-detected, but can be overridden... */
+static int display_type;
+module_param(display_type, int, S_IRUGO);
+MODULE_PARM_DESC(display_type, "Type of attached display. 0=autodetect, "
+ "1=vfd, 2=lcd, 3=vga, 4=none (default: autodetect)");
+
+static int pad_stabilize = 1;
+module_param(pad_stabilize, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(pad_stabilize, "Apply stabilization algorithm to iMON PAD "
+ "presses in arrow key mode. 0=disable, 1=enable (default).");
+
+/*
+ * In certain use cases, mouse mode isn't really helpful, and could actually
+ * cause confusion, so allow disabling it when the IR device is open.
+ */
+static bool nomouse;
+module_param(nomouse, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(nomouse, "Disable mouse input device mode when IR device is "
+ "open. 0=don't disable, 1=disable. (default: don't disable)");
+
+/* threshold at which a pad push registers as an arrow key in kbd mode */
+static int pad_thresh;
+module_param(pad_thresh, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(pad_thresh, "Threshold at which a pad push registers as an "
+ "arrow key in kbd mode (default: 28)");
+
+
+static void free_imon_context(struct imon_context *ictx)
+{
+ struct device *dev = ictx->dev;
+
+ usb_free_urb(ictx->tx_urb);
+ usb_free_urb(ictx->rx_urb_intf0);
+ usb_free_urb(ictx->rx_urb_intf1);
+ kfree(ictx);
+
+ dev_dbg(dev, "%s: iMON context freed\n", __func__);
+}
+
+/**
+ * Called when the Display device (e.g. /dev/lcd0)
+ * is opened by the application.
+ */
+static int display_open(struct inode *inode, struct file *file)
+{
+ struct usb_interface *interface;
+ struct imon_context *ictx = NULL;
+ int subminor;
+ int retval = 0;
+
+ /* prevent races with disconnect */
+ mutex_lock(&driver_lock);
+
+ subminor = iminor(inode);
+ interface = usb_find_interface(&imon_driver, subminor);
+ if (!interface) {
+ err("%s: could not find interface for minor %d",
+ __func__, subminor);
+ retval = -ENODEV;
+ goto exit;
+ }
+ ictx = usb_get_intfdata(interface);
+
+ if (!ictx) {
+ err("%s: no context found for minor %d", __func__, subminor);
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ mutex_lock(&ictx->lock);
+
+ if (!ictx->display_supported) {
+ err("%s: display not supported by device", __func__);
+ retval = -ENODEV;
+ } else if (ictx->display_isopen) {
+ err("%s: display port is already open", __func__);
+ retval = -EBUSY;
+ } else {
+ ictx->display_isopen = 1;
+ file->private_data = ictx;
+ dev_dbg(ictx->dev, "display port opened\n");
+ }
+
+ mutex_unlock(&ictx->lock);
+
+exit:
+ mutex_unlock(&driver_lock);
+ return retval;
+}
+
+/**
+ * Called when the display device (e.g. /dev/lcd0)
+ * is closed by the application.
+ */
+static int display_close(struct inode *inode, struct file *file)
+{
+ struct imon_context *ictx = NULL;
+ int retval = 0;
+
+ ictx = (struct imon_context *)file->private_data;
+
+ if (!ictx) {
+ err("%s: no context for device", __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&ictx->lock);
+
+ if (!ictx->display_supported) {
+ err("%s: display not supported by device", __func__);
+ retval = -ENODEV;
+ } else if (!ictx->display_isopen) {
+ err("%s: display is not open", __func__);
+ retval = -EIO;
+ } else {
+ ictx->display_isopen = 0;
+ dev_dbg(ictx->dev, "display port closed\n");
+ if (!ictx->dev_present_intf0) {
+ /*
+ * Device disconnected before close and IR port is not
+ * open. If IR port is open, context will be deleted by
+ * ir_close.
+ */
+ mutex_unlock(&ictx->lock);
+ free_imon_context(ictx);
+ return retval;
+ }
+ }
+
+ mutex_unlock(&ictx->lock);
+ return retval;
+}
+
+/**
+ * Sends a packet to the device -- this function must be called
+ * with ictx->lock held.
+ */
+static int send_packet(struct imon_context *ictx)
+{
+ unsigned int pipe;
+ unsigned long timeout;
+ int interval = 0;
+ int retval = 0;
+ struct usb_ctrlrequest *control_req = NULL;
+
+ /* Check if we need to use control or interrupt urb */
+ if (!ictx->tx_control) {
+ pipe = usb_sndintpipe(ictx->usbdev_intf0,
+ ictx->tx_endpoint->bEndpointAddress);
+ interval = ictx->tx_endpoint->bInterval;
+
+ usb_fill_int_urb(ictx->tx_urb, ictx->usbdev_intf0, pipe,
+ ictx->usb_tx_buf,
+ sizeof(ictx->usb_tx_buf),
+ usb_tx_callback, ictx, interval);
+
+ ictx->tx_urb->actual_length = 0;
+ } else {
+ /* fill request into kmalloc'ed space: */
+ control_req = kmalloc(sizeof(struct usb_ctrlrequest),
+ GFP_KERNEL);
+ if (control_req == NULL)
+ return -ENOMEM;
+
+ /* setup packet is '21 09 0200 0001 0008' */
+ control_req->bRequestType = 0x21;
+ control_req->bRequest = 0x09;
+ control_req->wValue = cpu_to_le16(0x0200);
+ control_req->wIndex = cpu_to_le16(0x0001);
+ control_req->wLength = cpu_to_le16(0x0008);
+
+ /* control pipe is endpoint 0x00 */
+ pipe = usb_sndctrlpipe(ictx->usbdev_intf0, 0);
+
+ /* build the control urb */
+ usb_fill_control_urb(ictx->tx_urb, ictx->usbdev_intf0,
+ pipe, (unsigned char *)control_req,
+ ictx->usb_tx_buf,
+ sizeof(ictx->usb_tx_buf),
+ usb_tx_callback, ictx);
+ ictx->tx_urb->actual_length = 0;
+ }
+
+ init_completion(&ictx->tx.finished);
+ ictx->tx.busy = 1;
+ smp_rmb(); /* ensure later readers know we're busy */
+
+ retval = usb_submit_urb(ictx->tx_urb, GFP_KERNEL);
+ if (retval) {
+ ictx->tx.busy = 0;
+ smp_rmb(); /* ensure later readers know we're not busy */
+ err("%s: error submitting urb(%d)", __func__, retval);
+ } else {
+ /* Wait for transmission to complete (or abort) */
+ mutex_unlock(&ictx->lock);
+ retval = wait_for_completion_interruptible(
+ &ictx->tx.finished);
+ if (retval)
+ err("%s: task interrupted", __func__);
+ mutex_lock(&ictx->lock);
+
+ retval = ictx->tx.status;
+ if (retval)
+ err("%s: packet tx failed (%d)", __func__, retval);
+ }
+
+ kfree(control_req);
+
+ /*
+ * Induce a mandatory 5ms delay before returning, as otherwise,
+ * send_packet can get called so rapidly as to overwhelm the device,
+ * particularly on faster systems and/or those with quirky usb.
+ */
+ timeout = msecs_to_jiffies(5);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(timeout);
+
+ return retval;
+}
+
+/**
+ * Sends an associate packet to the iMON 2.4G.
+ *
+ * This might not be such a good idea, since it has an id collision with
+ * some versions of the "IR & VFD" combo. The only way to determine if it
+ * is an RF version is to look at the product description string. (Which
+ * we currently do not fetch).
+ */
+static int send_associate_24g(struct imon_context *ictx)
+{
+ int retval;
+ const unsigned char packet[8] = { 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x20 };
+
+ if (!ictx) {
+ err("%s: no context for device", __func__);
+ return -ENODEV;
+ }
+
+ if (!ictx->dev_present_intf0) {
+ err("%s: no iMON device present", __func__);
+ return -ENODEV;
+ }
+
+ memcpy(ictx->usb_tx_buf, packet, sizeof(packet));
+ retval = send_packet(ictx);
+
+ return retval;
+}
+
+/**
+ * Sends packets to setup and show clock on iMON display
+ *
+ * Arguments: year - last 2 digits of year, month - 1..12,
+ * day - 1..31, dow - day of the week (0-Sun...6-Sat),
+ * hour - 0..23, minute - 0..59, second - 0..59
+ */
+static int send_set_imon_clock(struct imon_context *ictx,
+ unsigned int year, unsigned int month,
+ unsigned int day, unsigned int dow,
+ unsigned int hour, unsigned int minute,
+ unsigned int second)
+{
+ unsigned char clock_enable_pkt[IMON_CLOCK_ENABLE_PACKETS][8];
+ int retval = 0;
+ int i;
+
+ if (!ictx) {
+ err("%s: no context for device", __func__);
+ return -ENODEV;
+ }
+
+ switch (ictx->display_type) {
+ case IMON_DISPLAY_TYPE_LCD:
+ clock_enable_pkt[0][0] = 0x80;
+ clock_enable_pkt[0][1] = year;
+ clock_enable_pkt[0][2] = month-1;
+ clock_enable_pkt[0][3] = day;
+ clock_enable_pkt[0][4] = hour;
+ clock_enable_pkt[0][5] = minute;
+ clock_enable_pkt[0][6] = second;
+
+ clock_enable_pkt[1][0] = 0x80;
+ clock_enable_pkt[1][1] = 0;
+ clock_enable_pkt[1][2] = 0;
+ clock_enable_pkt[1][3] = 0;
+ clock_enable_pkt[1][4] = 0;
+ clock_enable_pkt[1][5] = 0;
+ clock_enable_pkt[1][6] = 0;
+
+ if (ictx->product == 0xffdc) {
+ clock_enable_pkt[0][7] = 0x50;
+ clock_enable_pkt[1][7] = 0x51;
+ } else {
+ clock_enable_pkt[0][7] = 0x88;
+ clock_enable_pkt[1][7] = 0x8a;
+ }
+
+ break;
+
+ case IMON_DISPLAY_TYPE_VFD:
+ clock_enable_pkt[0][0] = year;
+ clock_enable_pkt[0][1] = month-1;
+ clock_enable_pkt[0][2] = day;
+ clock_enable_pkt[0][3] = dow;
+ clock_enable_pkt[0][4] = hour;
+ clock_enable_pkt[0][5] = minute;
+ clock_enable_pkt[0][6] = second;
+ clock_enable_pkt[0][7] = 0x40;
+
+ clock_enable_pkt[1][0] = 0;
+ clock_enable_pkt[1][1] = 0;
+ clock_enable_pkt[1][2] = 1;
+ clock_enable_pkt[1][3] = 0;
+ clock_enable_pkt[1][4] = 0;
+ clock_enable_pkt[1][5] = 0;
+ clock_enable_pkt[1][6] = 0;
+ clock_enable_pkt[1][7] = 0x42;
+
+ break;
+
+ default:
+ return -ENODEV;
+ }
+
+ for (i = 0; i < IMON_CLOCK_ENABLE_PACKETS; i++) {
+ memcpy(ictx->usb_tx_buf, clock_enable_pkt[i], 8);
+ retval = send_packet(ictx);
+ if (retval) {
+ err("%s: send_packet failed for packet %d",
+ __func__, i);
+ break;
+ }
+ }
+
+ return retval;
+}
+
+/**
+ * These are the sysfs functions to handle the association on the iMON 2.4G LT.
+ */
+static ssize_t show_associate_remote(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct imon_context *ictx = dev_get_drvdata(d);
+
+ if (!ictx)
+ return -ENODEV;
+
+ mutex_lock(&ictx->lock);
+ if (ictx->rf_isassociating)
+ strcpy(buf, "associating\n");
+ else
+ strcpy(buf, "closed\n");
+
+ dev_info(d, "Visit http://www.lirc.org/html/imon-24g.html for "
+ "instructions on how to associate your iMON 2.4G DT/LT "
+ "remote\n");
+ mutex_unlock(&ictx->lock);
+ return strlen(buf);
+}
+
+static ssize_t store_associate_remote(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct imon_context *ictx;
+
+ ictx = dev_get_drvdata(d);
+
+ if (!ictx)
+ return -ENODEV;
+
+ mutex_lock(&ictx->lock);
+ ictx->rf_isassociating = 1;
+ send_associate_24g(ictx);
+ mutex_unlock(&ictx->lock);
+
+ return count;
+}
+
+/**
+ * sysfs functions to control internal imon clock
+ */
+static ssize_t show_imon_clock(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct imon_context *ictx = dev_get_drvdata(d);
+ size_t len;
+
+ if (!ictx)
+ return -ENODEV;
+
+ mutex_lock(&ictx->lock);
+
+ if (!ictx->display_supported) {
+ len = snprintf(buf, PAGE_SIZE, "Not supported.");
+ } else {
+ len = snprintf(buf, PAGE_SIZE,
+ "To set the clock on your iMON display:\n"
+ "# date \"+%%y %%m %%d %%w %%H %%M %%S\" > imon_clock\n"
+ "%s", ictx->display_isopen ?
+ "\nNOTE: imon device must be closed\n" : "");
+ }
+
+ mutex_unlock(&ictx->lock);
+
+ return len;
+}
+
+static ssize_t store_imon_clock(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct imon_context *ictx = dev_get_drvdata(d);
+ ssize_t retval;
+ unsigned int year, month, day, dow, hour, minute, second;
+
+ if (!ictx)
+ return -ENODEV;
+
+ mutex_lock(&ictx->lock);
+
+ if (!ictx->display_supported) {
+ retval = -ENODEV;
+ goto exit;
+ } else if (ictx->display_isopen) {
+ retval = -EBUSY;
+ goto exit;
+ }
+
+ if (sscanf(buf, "%u %u %u %u %u %u %u", &year, &month, &day, &dow,
+ &hour, &minute, &second) != 7) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if ((month < 1 || month > 12) ||
+ (day < 1 || day > 31) || (dow > 6) ||
+ (hour > 23) || (minute > 59) || (second > 59)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ retval = send_set_imon_clock(ictx, year, month, day, dow,
+ hour, minute, second);
+ if (retval)
+ goto exit;
+
+ retval = count;
+exit:
+ mutex_unlock(&ictx->lock);
+
+ return retval;
+}
+
+
+static DEVICE_ATTR(imon_clock, S_IWUSR | S_IRUGO, show_imon_clock,
+ store_imon_clock);
+
+static DEVICE_ATTR(associate_remote, S_IWUSR | S_IRUGO, show_associate_remote,
+ store_associate_remote);
+
+static struct attribute *imon_display_sysfs_entries[] = {
+ &dev_attr_imon_clock.attr,
+ NULL
+};
+
+static struct attribute_group imon_display_attribute_group = {
+ .attrs = imon_display_sysfs_entries
+};
+
+static struct attribute *imon_rf_sysfs_entries[] = {
+ &dev_attr_associate_remote.attr,
+ NULL
+};
+
+static struct attribute_group imon_rf_attribute_group = {
+ .attrs = imon_rf_sysfs_entries
+};
+
+/**
+ * Writes data to the VFD. The iMON VFD is 2x16 characters
+ * and requires data in 5 consecutive USB interrupt packets,
+ * each packet but the last carrying 7 bytes.
+ *
+ * I don't know if the VFD board supports features such as
+ * scrolling, clearing rows, blanking, etc. so at
+ * the caller must provide a full screen of data. If fewer
+ * than 32 bytes are provided spaces will be appended to
+ * generate a full screen.
+ */
+static ssize_t vfd_write(struct file *file, const char *buf,
+ size_t n_bytes, loff_t *pos)
+{
+ int i;
+ int offset;
+ int seq;
+ int retval = 0;
+ struct imon_context *ictx;
+ const unsigned char vfd_packet6[] = {
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF };
+
+ ictx = (struct imon_context *)file->private_data;
+ if (!ictx) {
+ err("%s: no context for device", __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&ictx->lock);
+
+ if (!ictx->dev_present_intf0) {
+ err("%s: no iMON device present", __func__);
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ if (n_bytes <= 0 || n_bytes > 32) {
+ err("%s: invalid payload size", __func__);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (copy_from_user(ictx->tx.data_buf, buf, n_bytes)) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ /* Pad with spaces */
+ for (i = n_bytes; i < 32; ++i)
+ ictx->tx.data_buf[i] = ' ';
+
+ for (i = 32; i < 35; ++i)
+ ictx->tx.data_buf[i] = 0xFF;
+
+ offset = 0;
+ seq = 0;
+
+ do {
+ memcpy(ictx->usb_tx_buf, ictx->tx.data_buf + offset, 7);
+ ictx->usb_tx_buf[7] = (unsigned char) seq;
+
+ retval = send_packet(ictx);
+ if (retval) {
+ err("%s: send packet failed for packet #%d",
+ __func__, seq/2);
+ goto exit;
+ } else {
+ seq += 2;
+ offset += 7;
+ }
+
+ } while (offset < 35);
+
+ /* Send packet #6 */
+ memcpy(ictx->usb_tx_buf, &vfd_packet6, sizeof(vfd_packet6));
+ ictx->usb_tx_buf[7] = (unsigned char) seq;
+ retval = send_packet(ictx);
+ if (retval)
+ err("%s: send packet failed for packet #%d",
+ __func__, seq / 2);
+
+exit:
+ mutex_unlock(&ictx->lock);
+
+ return (!retval) ? n_bytes : retval;
+}
+
+/**
+ * Writes data to the LCD. The iMON OEM LCD screen expects 8-byte
+ * packets. We accept data as 16 hexadecimal digits, followed by a
+ * newline (to make it easy to drive the device from a command-line
+ * -- even though the actual binary data is a bit complicated).
+ *
+ * The device itself is not a "traditional" text-mode display. It's
+ * actually a 16x96 pixel bitmap display. That means if you want to
+ * display text, you've got to have your own "font" and translate the
+ * text into bitmaps for display. This is really flexible (you can
+ * display whatever diacritics you need, and so on), but it's also
+ * a lot more complicated than most LCDs...
+ */
+static ssize_t lcd_write(struct file *file, const char *buf,
+ size_t n_bytes, loff_t *pos)
+{
+ int retval = 0;
+ struct imon_context *ictx;
+
+ ictx = (struct imon_context *)file->private_data;
+ if (!ictx) {
+ err("%s: no context for device", __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&ictx->lock);
+
+ if (!ictx->display_supported) {
+ err("%s: no iMON display present", __func__);
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ if (n_bytes != 8) {
+ err("%s: invalid payload size: %d (expecting 8)",
+ __func__, (int) n_bytes);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (copy_from_user(ictx->usb_tx_buf, buf, 8)) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ retval = send_packet(ictx);
+ if (retval) {
+ err("%s: send packet failed!", __func__);
+ goto exit;
+ } else {
+ dev_dbg(ictx->dev, "%s: write %d bytes to LCD\n",
+ __func__, (int) n_bytes);
+ }
+exit:
+ mutex_unlock(&ictx->lock);
+ return (!retval) ? n_bytes : retval;
+}
+
+/**
+ * Callback function for USB core API: transmit data
+ */
+static void usb_tx_callback(struct urb *urb)
+{
+ struct imon_context *ictx;
+
+ if (!urb)
+ return;
+ ictx = (struct imon_context *)urb->context;
+ if (!ictx)
+ return;
+
+ ictx->tx.status = urb->status;
+
+ /* notify waiters that write has finished */
+ ictx->tx.busy = 0;
+ smp_rmb(); /* ensure later readers know we're not busy */
+ complete(&ictx->tx.finished);
+}
+
+/**
+ * mce/rc6 keypresses have no distinct release code, use timer
+ */
+static void imon_mce_timeout(unsigned long data)
+{
+ struct imon_context *ictx = (struct imon_context *)data;
+
+ input_report_key(ictx->idev, ictx->last_keycode, 0);
+ input_sync(ictx->idev);
+}
+
+/**
+ * report touchscreen input
+ */
+static void imon_touch_display_timeout(unsigned long data)
+{
+ struct imon_context *ictx = (struct imon_context *)data;
+
+ if (ictx->display_type != IMON_DISPLAY_TYPE_VGA)
+ return;
+
+ input_report_abs(ictx->touch, ABS_X, ictx->touch_x);
+ input_report_abs(ictx->touch, ABS_Y, ictx->touch_y);
+ input_report_key(ictx->touch, BTN_TOUCH, 0x00);
+ input_sync(ictx->touch);
+}
+
+/**
+ * iMON IR receivers support two different signal sets -- those used by
+ * the iMON remotes, and those used by the Windows MCE remotes (which is
+ * really just RC-6), but only one or the other at a time, as the signals
+ * are decoded onboard the receiver.
+ */
+int imon_ir_change_protocol(void *priv, u64 ir_type)
+{
+ int retval;
+ struct imon_context *ictx = priv;
+ struct device *dev = ictx->dev;
+ bool pad_mouse;
+ unsigned char ir_proto_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 };
+
+ if (ir_type && !(ir_type & ictx->props->allowed_protos))
+ dev_warn(dev, "Looks like you're trying to use an IR protocol "
+ "this device does not support\n");
+
+ switch (ir_type) {
+ case IR_TYPE_RC6:
+ dev_dbg(dev, "Configuring IR receiver for MCE protocol\n");
+ ir_proto_packet[0] = 0x01;
+ pad_mouse = false;
+ init_timer(&ictx->itimer);
+ ictx->itimer.data = (unsigned long)ictx;
+ ictx->itimer.function = imon_mce_timeout;
+ break;
+ case IR_TYPE_UNKNOWN:
+ case IR_TYPE_OTHER:
+ dev_dbg(dev, "Configuring IR receiver for iMON protocol\n");
+ if (pad_stabilize)
+ pad_mouse = true;
+ else {
+ dev_dbg(dev, "PAD stabilize functionality disabled\n");
+ pad_mouse = false;
+ }
+ /* ir_proto_packet[0] = 0x00; // already the default */
+ ir_type = IR_TYPE_OTHER;
+ break;
+ default:
+ dev_warn(dev, "Unsupported IR protocol specified, overriding "
+ "to iMON IR protocol\n");
+ if (pad_stabilize)
+ pad_mouse = true;
+ else {
+ dev_dbg(dev, "PAD stabilize functionality disabled\n");
+ pad_mouse = false;
+ }
+ /* ir_proto_packet[0] = 0x00; // already the default */
+ ir_type = IR_TYPE_OTHER;
+ break;
+ }
+
+ memcpy(ictx->usb_tx_buf, &ir_proto_packet, sizeof(ir_proto_packet));
+
+ retval = send_packet(ictx);
+ if (retval)
+ goto out;
+
+ ictx->ir_type = ir_type;
+ ictx->pad_mouse = pad_mouse;
+
+out:
+ return retval;
+}
+
+static inline int tv2int(const struct timeval *a, const struct timeval *b)
+{
+ int usecs = 0;
+ int sec = 0;
+
+ if (b->tv_usec > a->tv_usec) {
+ usecs = 1000000;
+ sec--;
+ }
+
+ usecs += a->tv_usec - b->tv_usec;
+
+ sec += a->tv_sec - b->tv_sec;
+ sec *= 1000;
+ usecs /= 1000;
+ sec += usecs;
+
+ if (sec < 0)
+ sec = 1000;
+
+ return sec;
+}
+
+/**
+ * The directional pad behaves a bit differently, depending on whether this is
+ * one of the older ffdc devices or a newer device. Newer devices appear to
+ * have a higher resolution matrix for more precise mouse movement, but it
+ * makes things overly sensitive in keyboard mode, so we do some interesting
+ * contortions to make it less touchy. Older devices run through the same
+ * routine with shorter timeout and a smaller threshold.
+ */
+static int stabilize(int a, int b, u16 timeout, u16 threshold)
+{
+ struct timeval ct;
+ static struct timeval prev_time = {0, 0};
+ static struct timeval hit_time = {0, 0};
+ static int x, y, prev_result, hits;
+ int result = 0;
+ int msec, msec_hit;
+
+ do_gettimeofday(&ct);
+ msec = tv2int(&ct, &prev_time);
+ msec_hit = tv2int(&ct, &hit_time);
+
+ if (msec > 100) {
+ x = 0;
+ y = 0;
+ hits = 0;
+ }
+
+ x += a;
+ y += b;
+
+ prev_time = ct;
+
+ if (abs(x) > threshold || abs(y) > threshold) {
+ if (abs(y) > abs(x))
+ result = (y > 0) ? 0x7F : 0x80;
+ else
+ result = (x > 0) ? 0x7F00 : 0x8000;
+
+ x = 0;
+ y = 0;
+
+ if (result == prev_result) {
+ hits++;
+
+ if (hits > 3) {
+ switch (result) {
+ case 0x7F:
+ y = 17 * threshold / 30;
+ break;
+ case 0x80:
+ y -= 17 * threshold / 30;
+ break;
+ case 0x7F00:
+ x = 17 * threshold / 30;
+ break;
+ case 0x8000:
+ x -= 17 * threshold / 30;
+ break;
+ }
+ }
+
+ if (hits == 2 && msec_hit < timeout) {
+ result = 0;
+ hits = 1;
+ }
+ } else {
+ prev_result = result;
+ hits = 1;
+ hit_time = ct;
+ }
+ }
+
+ return result;
+}
+
+static u32 imon_remote_key_lookup(struct imon_context *ictx, u32 hw_code)
+{
+ u32 scancode = be32_to_cpu(hw_code);
+ u32 keycode;
+ u32 release;
+ bool is_release_code = false;
+
+ /* Look for the initial press of a button */
+ keycode = ir_g_keycode_from_table(ictx->idev, scancode);
+
+ /* Look for the release of a button */
+ if (keycode == KEY_RESERVED) {
+ release = scancode & ~0x4000;
+ keycode = ir_g_keycode_from_table(ictx->idev, release);
+ if (keycode != KEY_RESERVED)
+ is_release_code = true;
+ }
+
+ ictx->release_code = is_release_code;
+
+ return keycode;
+}
+
+static u32 imon_mce_key_lookup(struct imon_context *ictx, u32 hw_code)
+{
+ u32 scancode = be32_to_cpu(hw_code);
+ u32 keycode;
+
+#define MCE_KEY_MASK 0x7000
+#define MCE_TOGGLE_BIT 0x8000
+
+ /*
+ * On some receivers, mce keys decode to 0x8000f04xx and 0x8000f84xx
+ * (the toggle bit flipping between alternating key presses), while
+ * on other receivers, we see 0x8000f74xx and 0x8000ff4xx. To keep
+ * the table trim, we always or in the bits to look up 0x8000ff4xx,
+ * but we can't or them into all codes, as some keys are decoded in
+ * a different way w/o the same use of the toggle bit...
+ */
+ if ((scancode >> 24) & 0x80)
+ scancode = scancode | MCE_KEY_MASK | MCE_TOGGLE_BIT;
+
+ keycode = ir_g_keycode_from_table(ictx->idev, scancode);
+
+ return keycode;
+}
+
+static u32 imon_panel_key_lookup(u64 hw_code)
+{
+ int i;
+ u64 code = be64_to_cpu(hw_code);
+ u32 keycode = KEY_RESERVED;
+
+ for (i = 0; i < ARRAY_SIZE(imon_panel_key_table); i++) {
+ if (imon_panel_key_table[i].hw_code == (code | 0xffee)) {
+ keycode = imon_panel_key_table[i].keycode;
+ break;
+ }
+ }
+
+ return keycode;
+}
+
+static bool imon_mouse_event(struct imon_context *ictx,
+ unsigned char *buf, int len)
+{
+ char rel_x = 0x00, rel_y = 0x00;
+ u8 right_shift = 1;
+ bool mouse_input = 1;
+ int dir = 0;
+
+ /* newer iMON device PAD or mouse button */
+ if (ictx->product != 0xffdc && (buf[0] & 0x01) && len == 5) {
+ rel_x = buf[2];
+ rel_y = buf[3];
+ right_shift = 1;
+ /* 0xffdc iMON PAD or mouse button input */
+ } else if (ictx->product == 0xffdc && (buf[0] & 0x40) &&
+ !((buf[1] & 0x01) || ((buf[1] >> 2) & 0x01))) {
+ rel_x = (buf[1] & 0x08) | (buf[1] & 0x10) >> 2 |
+ (buf[1] & 0x20) >> 4 | (buf[1] & 0x40) >> 6;
+ if (buf[0] & 0x02)
+ rel_x |= ~0x0f;
+ rel_x = rel_x + rel_x / 2;
+ rel_y = (buf[2] & 0x08) | (buf[2] & 0x10) >> 2 |
+ (buf[2] & 0x20) >> 4 | (buf[2] & 0x40) >> 6;
+ if (buf[0] & 0x01)
+ rel_y |= ~0x0f;
+ rel_y = rel_y + rel_y / 2;
+ right_shift = 2;
+ /* some ffdc devices decode mouse buttons differently... */
+ } else if (ictx->product == 0xffdc && (buf[0] == 0x68)) {
+ right_shift = 2;
+ /* ch+/- buttons, which we use for an emulated scroll wheel */
+ } else if (ictx->kc == KEY_CHANNELUP && (buf[2] & 0x40) != 0x40) {
+ dir = 1;
+ } else if (ictx->kc == KEY_CHANNELDOWN && (buf[2] & 0x40) != 0x40) {
+ dir = -1;
+ } else
+ mouse_input = 0;
+
+ if (mouse_input) {
+ dev_dbg(ictx->dev, "sending mouse data via input subsystem\n");
+
+ if (dir) {
+ input_report_rel(ictx->idev, REL_WHEEL, dir);
+ } else if (rel_x || rel_y) {
+ input_report_rel(ictx->idev, REL_X, rel_x);
+ input_report_rel(ictx->idev, REL_Y, rel_y);
+ } else {
+ input_report_key(ictx->idev, BTN_LEFT, buf[1] & 0x1);
+ input_report_key(ictx->idev, BTN_RIGHT,
+ buf[1] >> right_shift & 0x1);
+ }
+ input_sync(ictx->idev);
+ ictx->last_keycode = ictx->kc;
+ }
+
+ return mouse_input;
+}
+
+static void imon_touch_event(struct imon_context *ictx, unsigned char *buf)
+{
+ mod_timer(&ictx->ttimer, jiffies + TOUCH_TIMEOUT);
+ ictx->touch_x = (buf[0] << 4) | (buf[1] >> 4);
+ ictx->touch_y = 0xfff - ((buf[2] << 4) | (buf[1] & 0xf));
+ input_report_abs(ictx->touch, ABS_X, ictx->touch_x);
+ input_report_abs(ictx->touch, ABS_Y, ictx->touch_y);
+ input_report_key(ictx->touch, BTN_TOUCH, 0x01);
+ input_sync(ictx->touch);
+}
+
+static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf)
+{
+ int dir = 0;
+ char rel_x = 0x00, rel_y = 0x00;
+ u16 timeout, threshold;
+ u64 temp_key;
+ u32 remote_key;
+
+ /*
+ * The imon directional pad functions more like a touchpad. Bytes 3 & 4
+ * contain a position coordinate (x,y), with each component ranging
+ * from -14 to 14. We want to down-sample this to only 4 discrete values
+ * for up/down/left/right arrow keys. Also, when you get too close to
+ * diagonals, it has a tendancy to jump back and forth, so lets try to
+ * ignore when they get too close.
+ */
+ if (ictx->product != 0xffdc) {
+ /* first, pad to 8 bytes so it conforms with everything else */
+ buf[5] = buf[6] = buf[7] = 0;
+ timeout = 500; /* in msecs */
+ /* (2*threshold) x (2*threshold) square */
+ threshold = pad_thresh ? pad_thresh : 28;
+ rel_x = buf[2];
+ rel_y = buf[3];
+
+ if (ictx->ir_type == IR_TYPE_OTHER && pad_stabilize) {
+ if ((buf[1] == 0) && ((rel_x != 0) || (rel_y != 0))) {
+ dir = stabilize((int)rel_x, (int)rel_y,
+ timeout, threshold);
+ if (!dir) {
+ ictx->kc = KEY_UNKNOWN;
+ return;
+ }
+ buf[2] = dir & 0xFF;
+ buf[3] = (dir >> 8) & 0xFF;
+ memcpy(&temp_key, buf, sizeof(temp_key));
+ remote_key = (u32) (le64_to_cpu(temp_key)
+ & 0xffffffff);
+ ictx->kc = imon_remote_key_lookup(ictx,
+ remote_key);
+ }
+ } else {
+ if (abs(rel_y) > abs(rel_x)) {
+ buf[2] = (rel_y > 0) ? 0x7F : 0x80;
+ buf[3] = 0;
+ ictx->kc = (rel_y > 0) ? KEY_DOWN : KEY_UP;
+ } else {
+ buf[2] = 0;
+ buf[3] = (rel_x > 0) ? 0x7F : 0x80;
+ ictx->kc = (rel_x > 0) ? KEY_RIGHT : KEY_LEFT;
+ }
+ }
+
+ /*
+ * Handle on-board decoded pad events for e.g. older VFD/iMON-Pad
+ * device (15c2:ffdc). The remote generates various codes from
+ * 0x68nnnnB7 to 0x6AnnnnB7, the left mouse button generates
+ * 0x688301b7 and the right one 0x688481b7. All other keys generate
+ * 0x2nnnnnnn. Position coordinate is encoded in buf[1] and buf[2] with
+ * reversed endianess. Extract direction from buffer, rotate endianess,
+ * adjust sign and feed the values into stabilize(). The resulting codes
+ * will be 0x01008000, 0x01007F00, which match the newer devices.
+ */
+ } else {
+ timeout = 10; /* in msecs */
+ /* (2*threshold) x (2*threshold) square */
+ threshold = pad_thresh ? pad_thresh : 15;
+
+ /* buf[1] is x */
+ rel_x = (buf[1] & 0x08) | (buf[1] & 0x10) >> 2 |
+ (buf[1] & 0x20) >> 4 | (buf[1] & 0x40) >> 6;
+ if (buf[0] & 0x02)
+ rel_x |= ~0x10+1;
+ /* buf[2] is y */
+ rel_y = (buf[2] & 0x08) | (buf[2] & 0x10) >> 2 |
+ (buf[2] & 0x20) >> 4 | (buf[2] & 0x40) >> 6;
+ if (buf[0] & 0x01)
+ rel_y |= ~0x10+1;
+
+ buf[0] = 0x01;
+ buf[1] = buf[4] = buf[5] = buf[6] = buf[7] = 0;
+
+ if (ictx->ir_type == IR_TYPE_OTHER && pad_stabilize) {
+ dir = stabilize((int)rel_x, (int)rel_y,
+ timeout, threshold);
+ if (!dir) {
+ ictx->kc = KEY_UNKNOWN;
+ return;
+ }
+ buf[2] = dir & 0xFF;
+ buf[3] = (dir >> 8) & 0xFF;
+ memcpy(&temp_key, buf, sizeof(temp_key));
+ remote_key = (u32) (le64_to_cpu(temp_key) & 0xffffffff);
+ ictx->kc = imon_remote_key_lookup(ictx, remote_key);
+ } else {
+ if (abs(rel_y) > abs(rel_x)) {
+ buf[2] = (rel_y > 0) ? 0x7F : 0x80;
+ buf[3] = 0;
+ ictx->kc = (rel_y > 0) ? KEY_DOWN : KEY_UP;
+ } else {
+ buf[2] = 0;
+ buf[3] = (rel_x > 0) ? 0x7F : 0x80;
+ ictx->kc = (rel_x > 0) ? KEY_RIGHT : KEY_LEFT;
+ }
+ }
+ }
+}
+
+static int imon_parse_press_type(struct imon_context *ictx,
+ unsigned char *buf, u8 ktype)
+{
+ int press_type = 0;
+ int rep_delay = ictx->idev->rep[REP_DELAY];
+ int rep_period = ictx->idev->rep[REP_PERIOD];
+
+ /* key release of 0x02XXXXXX key */
+ if (ictx->kc == KEY_RESERVED && buf[0] == 0x02 && buf[3] == 0x00)
+ ictx->kc = ictx->last_keycode;
+
+ /* mouse button release on (some) 0xffdc devices */
+ else if (ictx->kc == KEY_RESERVED && buf[0] == 0x68 && buf[1] == 0x82 &&
+ buf[2] == 0x81 && buf[3] == 0xb7)
+ ictx->kc = ictx->last_keycode;
+
+ /* mouse button release on (some other) 0xffdc devices */
+ else if (ictx->kc == KEY_RESERVED && buf[0] == 0x01 && buf[1] == 0x00 &&
+ buf[2] == 0x81 && buf[3] == 0xb7)
+ ictx->kc = ictx->last_keycode;
+
+ /* mce-specific button handling */
+ else if (ktype == IMON_KEY_MCE) {
+ /* initial press */
+ if (ictx->kc != ictx->last_keycode
+ || buf[2] != ictx->mce_toggle_bit) {
+ ictx->last_keycode = ictx->kc;
+ ictx->mce_toggle_bit = buf[2];
+ press_type = 1;
+ mod_timer(&ictx->itimer,
+ jiffies + msecs_to_jiffies(rep_delay));
+ /* repeat */
+ } else {
+ press_type = 2;
+ mod_timer(&ictx->itimer,
+ jiffies + msecs_to_jiffies(rep_period));
+ }
+
+ /* incoherent or irrelevant data */
+ } else if (ictx->kc == KEY_RESERVED)
+ press_type = -EINVAL;
+
+ /* key release of 0xXXXXXXb7 key */
+ else if (ictx->release_code)
+ press_type = 0;
+
+ /* this is a button press */
+ else
+ press_type = 1;
+
+ return press_type;
+}
+
+/**
+ * Process the incoming packet
+ */
+static void imon_incoming_packet(struct imon_context *ictx,
+ struct urb *urb, int intf)
+{
+ int len = urb->actual_length;
+ unsigned char *buf = urb->transfer_buffer;
+ struct device *dev = ictx->dev;
+ u32 kc;
+ bool norelease = 0;
+ int i;
+ u64 temp_key;
+ u64 panel_key = 0;
+ u32 remote_key = 0;
+ struct input_dev *idev = NULL;
+ int press_type = 0;
+ int msec;
+ struct timeval t;
+ static struct timeval prev_time = { 0, 0 };
+ u8 ktype = IMON_KEY_IMON;
+
+ idev = ictx->idev;
+
+ /* filter out junk data on the older 0xffdc imon devices */
+ if ((buf[0] == 0xff) && (buf[7] == 0xff))
+ return;
+
+ /* Figure out what key was pressed */
+ memcpy(&temp_key, buf, sizeof(temp_key));
+ if (len == 8 && buf[7] == 0xee) {
+ ktype = IMON_KEY_PANEL;
+ panel_key = le64_to_cpu(temp_key);
+ kc = imon_panel_key_lookup(panel_key);
+ } else {
+ remote_key = (u32) (le64_to_cpu(temp_key) & 0xffffffff);
+ if (ictx->ir_type == IR_TYPE_RC6) {
+ if (buf[0] == 0x80)
+ ktype = IMON_KEY_MCE;
+ kc = imon_mce_key_lookup(ictx, remote_key);
+ } else
+ kc = imon_remote_key_lookup(ictx, remote_key);
+ }
+
+ /* keyboard/mouse mode toggle button */
+ if (kc == KEY_KEYBOARD && !ictx->release_code) {
+ ictx->last_keycode = kc;
+ if (!nomouse) {
+ ictx->pad_mouse = ~(ictx->pad_mouse) & 0x1;
+ dev_dbg(dev, "toggling to %s mode\n",
+ ictx->pad_mouse ? "mouse" : "keyboard");
+ return;
+ } else {
+ ictx->pad_mouse = 0;
+ dev_dbg(dev, "mouse mode disabled, passing key value\n");
+ }
+ }
+
+ ictx->kc = kc;
+
+ /* send touchscreen events through input subsystem if touchpad data */
+ if (ictx->display_type == IMON_DISPLAY_TYPE_VGA && len == 8 &&
+ buf[7] == 0x86) {
+ imon_touch_event(ictx, buf);
+
+ /* look for mouse events with pad in mouse mode */
+ } else if (ictx->pad_mouse) {
+ if (imon_mouse_event(ictx, buf, len))
+ return;
+ }
+
+ /* Now for some special handling to convert pad input to arrow keys */
+ if (((len == 5) && (buf[0] == 0x01) && (buf[4] == 0x00)) ||
+ ((len == 8) && (buf[0] & 0x40) &&
+ !(buf[1] & 0x1 || buf[1] >> 2 & 0x1))) {
+ len = 8;
+ imon_pad_to_keys(ictx, buf);
+ norelease = 1;
+ }
+
+ if (debug) {
+ printk(KERN_INFO "intf%d decoded packet: ", intf);
+ for (i = 0; i < len; ++i)
+ printk("%02x ", buf[i]);
+ printk("\n");
+ }
+
+ press_type = imon_parse_press_type(ictx, buf, ktype);
+ if (press_type < 0)
+ goto not_input_data;
+
+ if (ictx->kc == KEY_UNKNOWN)
+ goto unknown_key;
+
+ /* KEY_MUTE repeats from MCE and knob need to be suppressed */
+ if ((ictx->kc == KEY_MUTE && ictx->kc == ictx->last_keycode)
+ && (buf[7] == 0xee || ktype == IMON_KEY_MCE)) {
+ do_gettimeofday(&t);
+ msec = tv2int(&t, &prev_time);
+ prev_time = t;
+ if (msec < idev->rep[REP_DELAY])
+ return;
+ }
+
+ input_report_key(idev, ictx->kc, press_type);
+ input_sync(idev);
+
+ /* panel keys and some remote keys don't generate a release */
+ if (panel_key || norelease) {
+ input_report_key(idev, ictx->kc, 0);
+ input_sync(idev);
+ }
+
+ ictx->last_keycode = ictx->kc;
+
+ return;
+
+unknown_key:
+ dev_info(dev, "%s: unknown keypress, code 0x%llx\n", __func__,
+ (panel_key ? be64_to_cpu(panel_key) :
+ be32_to_cpu(remote_key)));
+ return;
+
+not_input_data:
+ if (len != 8) {
+ dev_warn(dev, "imon %s: invalid incoming packet "
+ "size (len = %d, intf%d)\n", __func__, len, intf);
+ return;
+ }
+
+ /* iMON 2.4G associate frame */
+ if (buf[0] == 0x00 &&
+ buf[2] == 0xFF && /* REFID */
+ buf[3] == 0xFF &&
+ buf[4] == 0xFF &&
+ buf[5] == 0xFF && /* iMON 2.4G */
+ ((buf[6] == 0x4E && buf[7] == 0xDF) || /* LT */
+ (buf[6] == 0x5E && buf[7] == 0xDF))) { /* DT */
+ dev_warn(dev, "%s: remote associated refid=%02X\n",
+ __func__, buf[1]);
+ ictx->rf_isassociating = 0;
+ }
+}
+
+/**
+ * Callback function for USB core API: receive data
+ */
+static void usb_rx_callback_intf0(struct urb *urb)
+{
+ struct imon_context *ictx;
+ int intfnum = 0;
+
+ if (!urb)
+ return;
+
+ ictx = (struct imon_context *)urb->context;
+ if (!ictx)
+ return;
+
+ switch (urb->status) {
+ case -ENOENT: /* usbcore unlink successful! */
+ return;
+
+ case -ESHUTDOWN: /* transport endpoint was shut down */
+ break;
+
+ case 0:
+ imon_incoming_packet(ictx, urb, intfnum);
+ break;
+
+ default:
+ dev_warn(ictx->dev, "imon %s: status(%d): ignored\n",
+ __func__, urb->status);
+ break;
+ }
+
+ usb_submit_urb(ictx->rx_urb_intf0, GFP_ATOMIC);
+}
+
+static void usb_rx_callback_intf1(struct urb *urb)
+{
+ struct imon_context *ictx;
+ int intfnum = 1;
+
+ if (!urb)
+ return;
+
+ ictx = (struct imon_context *)urb->context;
+ if (!ictx)
+ return;
+
+ switch (urb->status) {
+ case -ENOENT: /* usbcore unlink successful! */
+ return;
+
+ case -ESHUTDOWN: /* transport endpoint was shut down */
+ break;
+
+ case 0:
+ imon_incoming_packet(ictx, urb, intfnum);
+ break;
+
+ default:
+ dev_warn(ictx->dev, "imon %s: status(%d): ignored\n",
+ __func__, urb->status);
+ break;
+ }
+
+ usb_submit_urb(ictx->rx_urb_intf1, GFP_ATOMIC);
+}
+
+static struct input_dev *imon_init_idev(struct imon_context *ictx)
+{
+ struct input_dev *idev;
+ struct ir_dev_props *props;
+ struct ir_input_dev *ir;
+ int ret, i;
+
+ idev = input_allocate_device();
+ if (!idev) {
+ dev_err(ictx->dev, "remote input dev allocation failed\n");
+ goto idev_alloc_failed;
+ }
+
+ props = kzalloc(sizeof(struct ir_dev_props), GFP_KERNEL);
+ if (!props) {
+ dev_err(ictx->dev, "remote ir dev props allocation failed\n");
+ goto props_alloc_failed;
+ }
+
+ ir = kzalloc(sizeof(struct ir_input_dev), GFP_KERNEL);
+ if (!ir) {
+ dev_err(ictx->dev, "remote ir input dev allocation failed\n");
+ goto ir_dev_alloc_failed;
+ }
+
+ snprintf(ictx->name_idev, sizeof(ictx->name_idev),
+ "iMON Remote (%04x:%04x)", ictx->vendor, ictx->product);
+ idev->name = ictx->name_idev;
+
+ usb_make_path(ictx->usbdev_intf0, ictx->phys_idev,
+ sizeof(ictx->phys_idev));
+ strlcat(ictx->phys_idev, "/input0", sizeof(ictx->phys_idev));
+ idev->phys = ictx->phys_idev;
+
+ idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) | BIT_MASK(EV_REL);
+
+ idev->keybit[BIT_WORD(BTN_MOUSE)] =
+ BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT);
+ idev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y) |
+ BIT_MASK(REL_WHEEL);
+
+ /* panel and/or knob code support */
+ for (i = 0; i < ARRAY_SIZE(imon_panel_key_table); i++) {
+ u32 kc = imon_panel_key_table[i].keycode;
+ __set_bit(kc, idev->keybit);
+ }
+
+ props->priv = ictx;
+ props->driver_type = RC_DRIVER_SCANCODE;
+ /* IR_TYPE_OTHER maps to iMON PAD remote, IR_TYPE_RC6 to MCE remote */
+ props->allowed_protos = IR_TYPE_OTHER | IR_TYPE_RC6;
+ props->change_protocol = imon_ir_change_protocol;
+ ictx->props = props;
+
+ ictx->ir = ir;
+ memcpy(&ir->dev, ictx->dev, sizeof(struct device));
+
+ usb_to_input_id(ictx->usbdev_intf0, &idev->id);
+ idev->dev.parent = ictx->dev;
+
+ input_set_drvdata(idev, ir);
+
+ ret = ir_input_register(idev, RC_MAP_IMON_PAD, props, MOD_NAME);
+ if (ret < 0) {
+ dev_err(ictx->dev, "remote input dev register failed\n");
+ goto idev_register_failed;
+ }
+
+ return idev;
+
+idev_register_failed:
+ kfree(ir);
+ir_dev_alloc_failed:
+ kfree(props);
+props_alloc_failed:
+ input_free_device(idev);
+idev_alloc_failed:
+
+ return NULL;
+}
+
+static struct input_dev *imon_init_touch(struct imon_context *ictx)
+{
+ struct input_dev *touch;
+ int ret;
+
+ touch = input_allocate_device();
+ if (!touch) {
+ dev_err(ictx->dev, "touchscreen input dev allocation failed\n");
+ goto touch_alloc_failed;
+ }
+
+ snprintf(ictx->name_touch, sizeof(ictx->name_touch),
+ "iMON USB Touchscreen (%04x:%04x)",
+ ictx->vendor, ictx->product);
+ touch->name = ictx->name_touch;
+
+ usb_make_path(ictx->usbdev_intf1, ictx->phys_touch,
+ sizeof(ictx->phys_touch));
+ strlcat(ictx->phys_touch, "/input1", sizeof(ictx->phys_touch));
+ touch->phys = ictx->phys_touch;
+
+ touch->evbit[0] =
+ BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ touch->keybit[BIT_WORD(BTN_TOUCH)] =
+ BIT_MASK(BTN_TOUCH);
+ input_set_abs_params(touch, ABS_X,
+ 0x00, 0xfff, 0, 0);
+ input_set_abs_params(touch, ABS_Y,
+ 0x00, 0xfff, 0, 0);
+
+ input_set_drvdata(touch, ictx);
+
+ usb_to_input_id(ictx->usbdev_intf1, &touch->id);
+ touch->dev.parent = ictx->dev;
+ ret = input_register_device(touch);
+ if (ret < 0) {
+ dev_info(ictx->dev, "touchscreen input dev register failed\n");
+ goto touch_register_failed;
+ }
+
+ return touch;
+
+touch_register_failed:
+ input_free_device(ictx->touch);
+
+touch_alloc_failed:
+ return NULL;
+}
+
+static bool imon_find_endpoints(struct imon_context *ictx,
+ struct usb_host_interface *iface_desc)
+{
+ struct usb_endpoint_descriptor *ep;
+ struct usb_endpoint_descriptor *rx_endpoint = NULL;
+ struct usb_endpoint_descriptor *tx_endpoint = NULL;
+ int ifnum = iface_desc->desc.bInterfaceNumber;
+ int num_endpts = iface_desc->desc.bNumEndpoints;
+ int i, ep_dir, ep_type;
+ bool ir_ep_found = 0;
+ bool display_ep_found = 0;
+ bool tx_control = 0;
+
+ /*
+ * Scan the endpoint list and set:
+ * first input endpoint = IR endpoint
+ * first output endpoint = display endpoint
+ */
+ for (i = 0; i < num_endpts && !(ir_ep_found && display_ep_found); ++i) {
+ ep = &iface_desc->endpoint[i].desc;
+ ep_dir = ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK;
+ ep_type = ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+ if (!ir_ep_found && ep_dir == USB_DIR_IN &&
+ ep_type == USB_ENDPOINT_XFER_INT) {
+
+ rx_endpoint = ep;
+ ir_ep_found = 1;
+ dev_dbg(ictx->dev, "%s: found IR endpoint\n", __func__);
+
+ } else if (!display_ep_found && ep_dir == USB_DIR_OUT &&
+ ep_type == USB_ENDPOINT_XFER_INT) {
+ tx_endpoint = ep;
+ display_ep_found = 1;
+ dev_dbg(ictx->dev, "%s: found display endpoint\n", __func__);
+ }
+ }
+
+ if (ifnum == 0) {
+ ictx->rx_endpoint_intf0 = rx_endpoint;
+ /*
+ * tx is used to send characters to lcd/vfd, associate RF
+ * remotes, set IR protocol, and maybe more...
+ */
+ ictx->tx_endpoint = tx_endpoint;
+ } else {
+ ictx->rx_endpoint_intf1 = rx_endpoint;
+ }
+
+ /*
+ * If we didn't find a display endpoint, this is probably one of the
+ * newer iMON devices that use control urb instead of interrupt
+ */
+ if (!display_ep_found) {
+ tx_control = 1;
+ display_ep_found = 1;
+ dev_dbg(ictx->dev, "%s: device uses control endpoint, not "
+ "interface OUT endpoint\n", __func__);
+ }
+
+ /*
+ * Some iMON receivers have no display. Unfortunately, it seems
+ * that SoundGraph recycles device IDs between devices both with
+ * and without... :\
+ */
+ if (ictx->display_type == IMON_DISPLAY_TYPE_NONE) {
+ display_ep_found = 0;
+ dev_dbg(ictx->dev, "%s: device has no display\n", __func__);
+ }
+
+ /*
+ * iMON Touch devices have a VGA touchscreen, but no "display", as
+ * that refers to e.g. /dev/lcd0 (a character device LCD or VFD).
+ */
+ if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) {
+ display_ep_found = 0;
+ dev_dbg(ictx->dev, "%s: iMON Touch device found\n", __func__);
+ }
+
+ /* Input endpoint is mandatory */
+ if (!ir_ep_found)
+ err("%s: no valid input (IR) endpoint found.", __func__);
+
+ ictx->tx_control = tx_control;
+
+ if (display_ep_found)
+ ictx->display_supported = true;
+
+ return ir_ep_found;
+
+}
+
+static struct imon_context *imon_init_intf0(struct usb_interface *intf)
+{
+ struct imon_context *ictx;
+ struct urb *rx_urb;
+ struct urb *tx_urb;
+ struct device *dev = &intf->dev;
+ struct usb_host_interface *iface_desc;
+ int ret = -ENOMEM;
+
+ ictx = kzalloc(sizeof(struct imon_context), GFP_KERNEL);
+ if (!ictx) {
+ dev_err(dev, "%s: kzalloc failed for context", __func__);
+ goto exit;
+ }
+ rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!rx_urb) {
+ dev_err(dev, "%s: usb_alloc_urb failed for IR urb", __func__);
+ goto rx_urb_alloc_failed;
+ }
+ tx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!tx_urb) {
+ dev_err(dev, "%s: usb_alloc_urb failed for display urb",
+ __func__);
+ goto tx_urb_alloc_failed;
+ }
+
+ mutex_init(&ictx->lock);
+
+ mutex_lock(&ictx->lock);
+
+ ictx->dev = dev;
+ ictx->usbdev_intf0 = usb_get_dev(interface_to_usbdev(intf));
+ ictx->dev_present_intf0 = 1;
+ ictx->rx_urb_intf0 = rx_urb;
+ ictx->tx_urb = tx_urb;
+
+ ictx->vendor = le16_to_cpu(ictx->usbdev_intf0->descriptor.idVendor);
+ ictx->product = le16_to_cpu(ictx->usbdev_intf0->descriptor.idProduct);
+
+ ret = -ENODEV;
+ iface_desc = intf->cur_altsetting;
+ if (!imon_find_endpoints(ictx, iface_desc)) {
+ goto find_endpoint_failed;
+ }
+
+ ictx->idev = imon_init_idev(ictx);
+ if (!ictx->idev) {
+ dev_err(dev, "%s: input device setup failed\n", __func__);
+ goto idev_setup_failed;
+ }
+
+ usb_fill_int_urb(ictx->rx_urb_intf0, ictx->usbdev_intf0,
+ usb_rcvintpipe(ictx->usbdev_intf0,
+ ictx->rx_endpoint_intf0->bEndpointAddress),
+ ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf),
+ usb_rx_callback_intf0, ictx,
+ ictx->rx_endpoint_intf0->bInterval);
+
+ ret = usb_submit_urb(ictx->rx_urb_intf0, GFP_KERNEL);
+ if (ret) {
+ err("%s: usb_submit_urb failed for intf0 (%d)",
+ __func__, ret);
+ goto urb_submit_failed;
+ }
+
+ return ictx;
+
+urb_submit_failed:
+ input_unregister_device(ictx->idev);
+ input_free_device(ictx->idev);
+idev_setup_failed:
+find_endpoint_failed:
+ mutex_unlock(&ictx->lock);
+ usb_free_urb(tx_urb);
+tx_urb_alloc_failed:
+ usb_free_urb(rx_urb);
+rx_urb_alloc_failed:
+ kfree(ictx);
+exit:
+ dev_err(dev, "unable to initialize intf0, err %d\n", ret);
+
+ return NULL;
+}
+
+static struct imon_context *imon_init_intf1(struct usb_interface *intf,
+ struct imon_context *ictx)
+{
+ struct urb *rx_urb;
+ struct usb_host_interface *iface_desc;
+ int ret = -ENOMEM;
+
+ rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!rx_urb) {
+ err("%s: usb_alloc_urb failed for IR urb", __func__);
+ goto rx_urb_alloc_failed;
+ }
+
+ mutex_lock(&ictx->lock);
+
+ if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) {
+ init_timer(&ictx->ttimer);
+ ictx->ttimer.data = (unsigned long)ictx;
+ ictx->ttimer.function = imon_touch_display_timeout;
+ }
+
+ ictx->usbdev_intf1 = usb_get_dev(interface_to_usbdev(intf));
+ ictx->dev_present_intf1 = 1;
+ ictx->rx_urb_intf1 = rx_urb;
+
+ ret = -ENODEV;
+ iface_desc = intf->cur_altsetting;
+ if (!imon_find_endpoints(ictx, iface_desc))
+ goto find_endpoint_failed;
+
+ if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) {
+ ictx->touch = imon_init_touch(ictx);
+ if (!ictx->touch)
+ goto touch_setup_failed;
+ } else
+ ictx->touch = NULL;
+
+ usb_fill_int_urb(ictx->rx_urb_intf1, ictx->usbdev_intf1,
+ usb_rcvintpipe(ictx->usbdev_intf1,
+ ictx->rx_endpoint_intf1->bEndpointAddress),
+ ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf),
+ usb_rx_callback_intf1, ictx,
+ ictx->rx_endpoint_intf1->bInterval);
+
+ ret = usb_submit_urb(ictx->rx_urb_intf1, GFP_KERNEL);
+
+ if (ret) {
+ err("%s: usb_submit_urb failed for intf1 (%d)",
+ __func__, ret);
+ goto urb_submit_failed;
+ }
+
+ return ictx;
+
+urb_submit_failed:
+ if (ictx->touch) {
+ input_unregister_device(ictx->touch);
+ input_free_device(ictx->touch);
+ }
+touch_setup_failed:
+find_endpoint_failed:
+ mutex_unlock(&ictx->lock);
+ usb_free_urb(rx_urb);
+rx_urb_alloc_failed:
+ dev_err(ictx->dev, "unable to initialize intf0, err %d\n", ret);
+
+ return NULL;
+}
+
+/*
+ * The 0x15c2:0xffdc device ID was used for umpteen different imon
+ * devices, and all of them constantly spew interrupts, even when there
+ * is no actual data to report. However, byte 6 of this buffer looks like
+ * its unique across device variants, so we're trying to key off that to
+ * figure out which display type (if any) and what IR protocol the device
+ * actually supports. These devices have their IR protocol hard-coded into
+ * their firmware, they can't be changed on the fly like the newer hardware.
+ */
+static void imon_get_ffdc_type(struct imon_context *ictx)
+{
+ u8 ffdc_cfg_byte = ictx->usb_rx_buf[6];
+ u8 detected_display_type = IMON_DISPLAY_TYPE_NONE;
+ u64 allowed_protos = IR_TYPE_OTHER;
+
+ switch (ffdc_cfg_byte) {
+ /* iMON Knob, no display, iMON IR + vol knob */
+ case 0x21:
+ dev_info(ictx->dev, "0xffdc iMON Knob, iMON IR");
+ ictx->display_supported = false;
+ break;
+ /* iMON VFD, no IR (does have vol knob tho) */
+ case 0x35:
+ dev_info(ictx->dev, "0xffdc iMON VFD + knob, no IR");
+ detected_display_type = IMON_DISPLAY_TYPE_VFD;
+ break;
+ /* iMON VFD, iMON IR */
+ case 0x24:
+ case 0x85:
+ dev_info(ictx->dev, "0xffdc iMON VFD, iMON IR");
+ detected_display_type = IMON_DISPLAY_TYPE_VFD;
+ break;
+ /* iMON LCD, MCE IR */
+ case 0x9f:
+ dev_info(ictx->dev, "0xffdc iMON LCD, MCE IR");
+ detected_display_type = IMON_DISPLAY_TYPE_LCD;
+ allowed_protos = IR_TYPE_RC6;
+ break;
+ default:
+ dev_info(ictx->dev, "Unknown 0xffdc device, "
+ "defaulting to VFD and iMON IR");
+ detected_display_type = IMON_DISPLAY_TYPE_VFD;
+ break;
+ }
+
+ printk(KERN_CONT " (id 0x%02x)\n", ffdc_cfg_byte);
+
+ ictx->display_type = detected_display_type;
+ ictx->props->allowed_protos = allowed_protos;
+ ictx->ir_type = allowed_protos;
+}
+
+static void imon_set_display_type(struct imon_context *ictx,
+ struct usb_interface *intf)
+{
+ u8 configured_display_type = IMON_DISPLAY_TYPE_VFD;
+
+ /*
+ * Try to auto-detect the type of display if the user hasn't set
+ * it by hand via the display_type modparam. Default is VFD.
+ */
+
+ if (display_type == IMON_DISPLAY_TYPE_AUTO) {
+ switch (ictx->product) {
+ case 0xffdc:
+ /* set in imon_get_ffdc_type() */
+ configured_display_type = ictx->display_type;
+ break;
+ case 0x0034:
+ case 0x0035:
+ configured_display_type = IMON_DISPLAY_TYPE_VGA;
+ break;
+ case 0x0038:
+ case 0x0039:
+ case 0x0045:
+ configured_display_type = IMON_DISPLAY_TYPE_LCD;
+ break;
+ case 0x003c:
+ case 0x0041:
+ case 0x0042:
+ case 0x0043:
+ configured_display_type = IMON_DISPLAY_TYPE_NONE;
+ ictx->display_supported = false;
+ break;
+ case 0x0036:
+ case 0x0044:
+ default:
+ configured_display_type = IMON_DISPLAY_TYPE_VFD;
+ break;
+ }
+ } else {
+ configured_display_type = display_type;
+ if (display_type == IMON_DISPLAY_TYPE_NONE)
+ ictx->display_supported = false;
+ else
+ ictx->display_supported = true;
+ dev_info(ictx->dev, "%s: overriding display type to %d via "
+ "modparam\n", __func__, display_type);
+ }
+
+ ictx->display_type = configured_display_type;
+}
+
+static void imon_init_display(struct imon_context *ictx,
+ struct usb_interface *intf)
+{
+ int ret;
+
+ dev_dbg(ictx->dev, "Registering iMON display with sysfs\n");
+
+ /* set up sysfs entry for built-in clock */
+ ret = sysfs_create_group(&intf->dev.kobj,
+ &imon_display_attribute_group);
+ if (ret)
+ dev_err(ictx->dev, "Could not create display sysfs "
+ "entries(%d)", ret);
+
+ if (ictx->display_type == IMON_DISPLAY_TYPE_LCD)
+ ret = usb_register_dev(intf, &imon_lcd_class);
+ else
+ ret = usb_register_dev(intf, &imon_vfd_class);
+ if (ret)
+ /* Not a fatal error, so ignore */
+ dev_info(ictx->dev, "could not get a minor number for "
+ "display\n");
+
+}
+
+/**
+ * Callback function for USB core API: Probe
+ */
+static int __devinit imon_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct usb_device *usbdev = NULL;
+ struct usb_host_interface *iface_desc = NULL;
+ struct usb_interface *first_if;
+ struct device *dev = &interface->dev;
+ int ifnum, code_length, sysfs_err;
+ int ret = 0;
+ struct imon_context *ictx = NULL;
+ struct imon_context *first_if_ctx = NULL;
+ u16 vendor, product;
+ const unsigned char fp_packet[] = { 0x40, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x88 };
+
+ code_length = BUF_CHUNK_SIZE * 8;
+
+ usbdev = usb_get_dev(interface_to_usbdev(interface));
+ iface_desc = interface->cur_altsetting;
+ ifnum = iface_desc->desc.bInterfaceNumber;
+ vendor = le16_to_cpu(usbdev->descriptor.idVendor);
+ product = le16_to_cpu(usbdev->descriptor.idProduct);
+
+ dev_dbg(dev, "%s: found iMON device (%04x:%04x, intf%d)\n",
+ __func__, vendor, product, ifnum);
+
+ /* prevent races probing devices w/multiple interfaces */
+ mutex_lock(&driver_lock);
+
+ first_if = usb_ifnum_to_if(usbdev, 0);
+ first_if_ctx = (struct imon_context *)usb_get_intfdata(first_if);
+
+ if (ifnum == 0) {
+ ictx = imon_init_intf0(interface);
+ if (!ictx) {
+ err("%s: failed to initialize context!\n", __func__);
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ if (product == 0xffdc) {
+ /* RF products *also* use 0xffdc... sigh... */
+ sysfs_err = sysfs_create_group(&interface->dev.kobj,
+ &imon_rf_attribute_group);
+ if (sysfs_err)
+ err("%s: Could not create RF sysfs entries(%d)",
+ __func__, sysfs_err);
+ }
+
+ } else {
+ /* this is the secondary interface on the device */
+ ictx = imon_init_intf1(interface, first_if_ctx);
+ if (!ictx) {
+ err("%s: failed to attach to context!\n", __func__);
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ }
+
+ usb_set_intfdata(interface, ictx);
+
+ if (ifnum == 0) {
+ /* Enable front-panel buttons and/or knobs */
+ memcpy(ictx->usb_tx_buf, &fp_packet, sizeof(fp_packet));
+ ret = send_packet(ictx);
+ /* Not fatal, but warn about it */
+ if (ret)
+ dev_info(dev, "failed to enable panel buttons "
+ "and/or knobs\n");
+
+ if (product == 0xffdc)
+ imon_get_ffdc_type(ictx);
+
+ imon_set_display_type(ictx, interface);
+
+ if (ictx->display_supported)
+ imon_init_display(ictx, interface);
+ }
+
+ /* set IR protocol/remote type */
+ ret = imon_ir_change_protocol(ictx, ictx->ir_type);
+ if (ret) {
+ dev_warn(dev, "%s: failed to set IR protocol, falling back "
+ "to standard iMON protocol mode\n", __func__);
+ ictx->ir_type = IR_TYPE_OTHER;
+ }
+
+ dev_info(dev, "iMON device (%04x:%04x, intf%d) on "
+ "usb<%d:%d> initialized\n", vendor, product, ifnum,
+ usbdev->bus->busnum, usbdev->devnum);
+
+ mutex_unlock(&ictx->lock);
+ mutex_unlock(&driver_lock);
+
+ return 0;
+
+fail:
+ mutex_unlock(&driver_lock);
+ dev_err(dev, "unable to register, err %d\n", ret);
+
+ return ret;
+}
+
+/**
+ * Callback function for USB core API: disconnect
+ */
+static void __devexit imon_disconnect(struct usb_interface *interface)
+{
+ struct imon_context *ictx;
+ struct device *dev;
+ int ifnum;
+
+ /* prevent races with multi-interface device probing and display_open */
+ mutex_lock(&driver_lock);
+
+ ictx = usb_get_intfdata(interface);
+ dev = ictx->dev;
+ ifnum = interface->cur_altsetting->desc.bInterfaceNumber;
+
+ mutex_lock(&ictx->lock);
+
+ /*
+ * sysfs_remove_group is safe to call even if sysfs_create_group
+ * hasn't been called
+ */
+ sysfs_remove_group(&interface->dev.kobj,
+ &imon_display_attribute_group);
+ sysfs_remove_group(&interface->dev.kobj,
+ &imon_rf_attribute_group);
+
+ usb_set_intfdata(interface, NULL);
+
+ /* Abort ongoing write */
+ if (ictx->tx.busy) {
+ usb_kill_urb(ictx->tx_urb);
+ complete_all(&ictx->tx.finished);
+ }
+
+ if (ifnum == 0) {
+ ictx->dev_present_intf0 = 0;
+ usb_kill_urb(ictx->rx_urb_intf0);
+ input_unregister_device(ictx->idev);
+ if (ictx->display_supported) {
+ if (ictx->display_type == IMON_DISPLAY_TYPE_LCD)
+ usb_deregister_dev(interface, &imon_lcd_class);
+ else
+ usb_deregister_dev(interface, &imon_vfd_class);
+ }
+ } else {
+ ictx->dev_present_intf1 = 0;
+ usb_kill_urb(ictx->rx_urb_intf1);
+ if (ictx->display_type == IMON_DISPLAY_TYPE_VGA)
+ input_unregister_device(ictx->touch);
+ }
+
+ if (!ictx->dev_present_intf0 && !ictx->dev_present_intf1) {
+ if (ictx->display_type == IMON_DISPLAY_TYPE_VGA)
+ del_timer_sync(&ictx->ttimer);
+ mutex_unlock(&ictx->lock);
+ if (!ictx->display_isopen)
+ free_imon_context(ictx);
+ } else {
+ if (ictx->ir_type == IR_TYPE_RC6)
+ del_timer_sync(&ictx->itimer);
+ mutex_unlock(&ictx->lock);
+ }
+
+ mutex_unlock(&driver_lock);
+
+ dev_dbg(dev, "%s: iMON device (intf%d) disconnected\n",
+ __func__, ifnum);
+}
+
+static int imon_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct imon_context *ictx = usb_get_intfdata(intf);
+ int ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
+
+ if (ifnum == 0)
+ usb_kill_urb(ictx->rx_urb_intf0);
+ else
+ usb_kill_urb(ictx->rx_urb_intf1);
+
+ return 0;
+}
+
+static int imon_resume(struct usb_interface *intf)
+{
+ int rc = 0;
+ struct imon_context *ictx = usb_get_intfdata(intf);
+ int ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
+
+ if (ifnum == 0) {
+ usb_fill_int_urb(ictx->rx_urb_intf0, ictx->usbdev_intf0,
+ usb_rcvintpipe(ictx->usbdev_intf0,
+ ictx->rx_endpoint_intf0->bEndpointAddress),
+ ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf),
+ usb_rx_callback_intf0, ictx,
+ ictx->rx_endpoint_intf0->bInterval);
+
+ rc = usb_submit_urb(ictx->rx_urb_intf0, GFP_ATOMIC);
+
+ } else {
+ usb_fill_int_urb(ictx->rx_urb_intf1, ictx->usbdev_intf1,
+ usb_rcvintpipe(ictx->usbdev_intf1,
+ ictx->rx_endpoint_intf1->bEndpointAddress),
+ ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf),
+ usb_rx_callback_intf1, ictx,
+ ictx->rx_endpoint_intf1->bInterval);
+
+ rc = usb_submit_urb(ictx->rx_urb_intf1, GFP_ATOMIC);
+ }
+
+ return rc;
+}
+
+static int __init imon_init(void)
+{
+ int rc;
+
+ rc = usb_register(&imon_driver);
+ if (rc) {
+ err("%s: usb register failed(%d)", __func__, rc);
+ rc = -ENODEV;
+ }
+
+ return rc;
+}
+
+static void __exit imon_exit(void)
+{
+ usb_deregister(&imon_driver);
+}
+
+module_init(imon_init);
+module_exit(imon_exit);
diff --git a/drivers/media/IR/ir-core-priv.h b/drivers/media/IR/ir-core-priv.h
new file mode 100644
index 00000000000..9a5e65a471a
--- /dev/null
+++ b/drivers/media/IR/ir-core-priv.h
@@ -0,0 +1,126 @@
+/*
+ * Remote Controller core raw events header
+ *
+ * Copyright (C) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IR_RAW_EVENT
+#define _IR_RAW_EVENT
+
+#include <linux/slab.h>
+#include <media/ir-core.h>
+
+struct ir_raw_handler {
+ struct list_head list;
+
+ int (*decode)(struct input_dev *input_dev, struct ir_raw_event event);
+ int (*raw_register)(struct input_dev *input_dev);
+ int (*raw_unregister)(struct input_dev *input_dev);
+};
+
+struct ir_raw_event_ctrl {
+ struct work_struct rx_work; /* for the rx decoding workqueue */
+ struct kfifo kfifo; /* fifo for the pulse/space durations */
+ ktime_t last_event; /* when last event occurred */
+ enum raw_event_type last_type; /* last event type */
+ struct input_dev *input_dev; /* pointer to the parent input_dev */
+};
+
+/* macros for IR decoders */
+static inline bool geq_margin(unsigned d1, unsigned d2, unsigned margin)
+{
+ return d1 > (d2 - margin);
+}
+
+static inline bool eq_margin(unsigned d1, unsigned d2, unsigned margin)
+{
+ return ((d1 > (d2 - margin)) && (d1 < (d2 + margin)));
+}
+
+static inline bool is_transition(struct ir_raw_event *x, struct ir_raw_event *y)
+{
+ return x->pulse != y->pulse;
+}
+
+static inline void decrease_duration(struct ir_raw_event *ev, unsigned duration)
+{
+ if (duration > ev->duration)
+ ev->duration = 0;
+ else
+ ev->duration -= duration;
+}
+
+#define TO_US(duration) (((duration) + 500) / 1000)
+#define TO_STR(is_pulse) ((is_pulse) ? "pulse" : "space")
+#define IS_RESET(ev) (ev.duration == 0)
+
+/*
+ * Routines from ir-sysfs.c - Meant to be called only internally inside
+ * ir-core
+ */
+
+int ir_register_class(struct input_dev *input_dev);
+void ir_unregister_class(struct input_dev *input_dev);
+
+/*
+ * Routines from ir-raw-event.c to be used internally and by decoders
+ */
+int ir_raw_event_register(struct input_dev *input_dev);
+void ir_raw_event_unregister(struct input_dev *input_dev);
+int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler);
+void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler);
+void ir_raw_init(void);
+
+
+/*
+ * Decoder initialization code
+ *
+ * Those load logic are called during ir-core init, and automatically
+ * loads the compiled decoders for their usage with IR raw events
+ */
+
+/* from ir-nec-decoder.c */
+#ifdef CONFIG_IR_NEC_DECODER_MODULE
+#define load_nec_decode() request_module("ir-nec-decoder")
+#else
+#define load_nec_decode() 0
+#endif
+
+/* from ir-rc5-decoder.c */
+#ifdef CONFIG_IR_RC5_DECODER_MODULE
+#define load_rc5_decode() request_module("ir-rc5-decoder")
+#else
+#define load_rc5_decode() 0
+#endif
+
+/* from ir-rc6-decoder.c */
+#ifdef CONFIG_IR_RC6_DECODER_MODULE
+#define load_rc6_decode() request_module("ir-rc6-decoder")
+#else
+#define load_rc6_decode() 0
+#endif
+
+/* from ir-jvc-decoder.c */
+#ifdef CONFIG_IR_JVC_DECODER_MODULE
+#define load_jvc_decode() request_module("ir-jvc-decoder")
+#else
+#define load_jvc_decode() 0
+#endif
+
+/* from ir-sony-decoder.c */
+#ifdef CONFIG_IR_SONY_DECODER_MODULE
+#define load_sony_decode() request_module("ir-sony-decoder")
+#else
+#define load_sony_decode() 0
+#endif
+
+#endif /* _IR_RAW_EVENT */
diff --git a/drivers/media/IR/ir-functions.c b/drivers/media/IR/ir-functions.c
index ab06919ad5f..db591e42188 100644
--- a/drivers/media/IR/ir-functions.c
+++ b/drivers/media/IR/ir-functions.c
@@ -24,6 +24,7 @@
#include <linux/string.h>
#include <linux/jiffies.h>
#include <media/ir-common.h>
+#include "ir-core-priv.h"
/* -------------------------------------------------------------------------- */
diff --git a/drivers/media/IR/ir-jvc-decoder.c b/drivers/media/IR/ir-jvc-decoder.c
new file mode 100644
index 00000000000..0b804944cbb
--- /dev/null
+++ b/drivers/media/IR/ir-jvc-decoder.c
@@ -0,0 +1,320 @@
+/* ir-jvc-decoder.c - handle JVC IR Pulse/Space protocol
+ *
+ * Copyright (C) 2010 by David Härdeman <david@hardeman.nu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitrev.h>
+#include "ir-core-priv.h"
+
+#define JVC_NBITS 16 /* dev(8) + func(8) */
+#define JVC_UNIT 525000 /* ns */
+#define JVC_HEADER_PULSE (16 * JVC_UNIT) /* lack of header -> repeat */
+#define JVC_HEADER_SPACE (8 * JVC_UNIT)
+#define JVC_BIT_PULSE (1 * JVC_UNIT)
+#define JVC_BIT_0_SPACE (1 * JVC_UNIT)
+#define JVC_BIT_1_SPACE (3 * JVC_UNIT)
+#define JVC_TRAILER_PULSE (1 * JVC_UNIT)
+#define JVC_TRAILER_SPACE (35 * JVC_UNIT)
+
+/* Used to register jvc_decoder clients */
+static LIST_HEAD(decoder_list);
+DEFINE_SPINLOCK(decoder_lock);
+
+enum jvc_state {
+ STATE_INACTIVE,
+ STATE_HEADER_SPACE,
+ STATE_BIT_PULSE,
+ STATE_BIT_SPACE,
+ STATE_TRAILER_PULSE,
+ STATE_TRAILER_SPACE,
+};
+
+struct decoder_data {
+ struct list_head list;
+ struct ir_input_dev *ir_dev;
+ int enabled:1;
+
+ /* State machine control */
+ enum jvc_state state;
+ u16 jvc_bits;
+ u16 jvc_old_bits;
+ unsigned count;
+ bool first;
+ bool toggle;
+};
+
+
+/**
+ * get_decoder_data() - gets decoder data
+ * @input_dev: input device
+ *
+ * Returns the struct decoder_data that corresponds to a device
+ */
+static struct decoder_data *get_decoder_data(struct ir_input_dev *ir_dev)
+{
+ struct decoder_data *data = NULL;
+
+ spin_lock(&decoder_lock);
+ list_for_each_entry(data, &decoder_list, list) {
+ if (data->ir_dev == ir_dev)
+ break;
+ }
+ spin_unlock(&decoder_lock);
+ return data;
+}
+
+static ssize_t store_enabled(struct device *d,
+ struct device_attribute *mattr,
+ const char *buf,
+ size_t len)
+{
+ unsigned long value;
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (strict_strtoul(buf, 10, &value) || value > 1)
+ return -EINVAL;
+
+ data->enabled = value;
+
+ return len;
+}
+
+static ssize_t show_enabled(struct device *d,
+ struct device_attribute *mattr, char *buf)
+{
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (data->enabled)
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static DEVICE_ATTR(enabled, S_IRUGO | S_IWUSR, show_enabled, store_enabled);
+
+static struct attribute *decoder_attributes[] = {
+ &dev_attr_enabled.attr,
+ NULL
+};
+
+static struct attribute_group decoder_attribute_group = {
+ .name = "jvc_decoder",
+ .attrs = decoder_attributes,
+};
+
+/**
+ * ir_jvc_decode() - Decode one JVC pulse or space
+ * @input_dev: the struct input_dev descriptor of the device
+ * @duration: the struct ir_raw_event descriptor of the pulse/space
+ *
+ * This function returns -EINVAL if the pulse violates the state machine
+ */
+static int ir_jvc_decode(struct input_dev *input_dev, struct ir_raw_event ev)
+{
+ struct decoder_data *data;
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return -EINVAL;
+
+ if (!data->enabled)
+ return 0;
+
+ if (IS_RESET(ev)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+ if (!geq_margin(ev.duration, JVC_UNIT, JVC_UNIT / 2))
+ goto out;
+
+ IR_dprintk(2, "JVC decode started at state %d (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+
+ switch (data->state) {
+
+ case STATE_INACTIVE:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, JVC_HEADER_PULSE, JVC_UNIT / 2))
+ break;
+
+ data->count = 0;
+ data->first = true;
+ data->toggle = !data->toggle;
+ data->state = STATE_HEADER_SPACE;
+ return 0;
+
+ case STATE_HEADER_SPACE:
+ if (ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, JVC_HEADER_SPACE, JVC_UNIT / 2))
+ break;
+
+ data->state = STATE_BIT_PULSE;
+ return 0;
+
+ case STATE_BIT_PULSE:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, JVC_BIT_PULSE, JVC_UNIT / 2))
+ break;
+
+ data->state = STATE_BIT_SPACE;
+ return 0;
+
+ case STATE_BIT_SPACE:
+ if (ev.pulse)
+ break;
+
+ data->jvc_bits <<= 1;
+ if (eq_margin(ev.duration, JVC_BIT_1_SPACE, JVC_UNIT / 2)) {
+ data->jvc_bits |= 1;
+ decrease_duration(&ev, JVC_BIT_1_SPACE);
+ } else if (eq_margin(ev.duration, JVC_BIT_0_SPACE, JVC_UNIT / 2))
+ decrease_duration(&ev, JVC_BIT_0_SPACE);
+ else
+ break;
+ data->count++;
+
+ if (data->count == JVC_NBITS)
+ data->state = STATE_TRAILER_PULSE;
+ else
+ data->state = STATE_BIT_PULSE;
+ return 0;
+
+ case STATE_TRAILER_PULSE:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, JVC_TRAILER_PULSE, JVC_UNIT / 2))
+ break;
+
+ data->state = STATE_TRAILER_SPACE;
+ return 0;
+
+ case STATE_TRAILER_SPACE:
+ if (ev.pulse)
+ break;
+
+ if (!geq_margin(ev.duration, JVC_TRAILER_SPACE, JVC_UNIT / 2))
+ break;
+
+ if (data->first) {
+ u32 scancode;
+ scancode = (bitrev8((data->jvc_bits >> 8) & 0xff) << 8) |
+ (bitrev8((data->jvc_bits >> 0) & 0xff) << 0);
+ IR_dprintk(1, "JVC scancode 0x%04x\n", scancode);
+ ir_keydown(input_dev, scancode, data->toggle);
+ data->first = false;
+ data->jvc_old_bits = data->jvc_bits;
+ } else if (data->jvc_bits == data->jvc_old_bits) {
+ IR_dprintk(1, "JVC repeat\n");
+ ir_repeat(input_dev);
+ } else {
+ IR_dprintk(1, "JVC invalid repeat msg\n");
+ break;
+ }
+
+ data->count = 0;
+ data->state = STATE_BIT_PULSE;
+ return 0;
+ }
+
+out:
+ IR_dprintk(1, "JVC decode failed at state %d (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state = STATE_INACTIVE;
+ return -EINVAL;
+}
+
+static int ir_jvc_register(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ struct decoder_data *data;
+ int rc;
+
+ rc = sysfs_create_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ if (rc < 0)
+ return rc;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ return -ENOMEM;
+ }
+
+ data->ir_dev = ir_dev;
+ data->enabled = 1;
+
+ spin_lock(&decoder_lock);
+ list_add_tail(&data->list, &decoder_list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static int ir_jvc_unregister(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ static struct decoder_data *data;
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return 0;
+
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+
+ spin_lock(&decoder_lock);
+ list_del(&data->list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static struct ir_raw_handler jvc_handler = {
+ .decode = ir_jvc_decode,
+ .raw_register = ir_jvc_register,
+ .raw_unregister = ir_jvc_unregister,
+};
+
+static int __init ir_jvc_decode_init(void)
+{
+ ir_raw_handler_register(&jvc_handler);
+
+ printk(KERN_INFO "IR JVC protocol handler initialized\n");
+ return 0;
+}
+
+static void __exit ir_jvc_decode_exit(void)
+{
+ ir_raw_handler_unregister(&jvc_handler);
+}
+
+module_init(ir_jvc_decode_init);
+module_exit(ir_jvc_decode_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Härdeman <david@hardeman.nu>");
+MODULE_DESCRIPTION("JVC IR protocol decoder");
diff --git a/drivers/media/IR/ir-keymaps.c b/drivers/media/IR/ir-keymaps.c
deleted file mode 100644
index 0efdefe75f3..00000000000
--- a/drivers/media/IR/ir-keymaps.c
+++ /dev/null
@@ -1,3494 +0,0 @@
-/*
- Keytables for supported remote controls, used on drivers/media
- devices.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-/*
- * NOTICE FOR DEVELOPERS:
- * The IR mappings should be as close as possible to what's
- * specified at:
- * http://linuxtv.org/wiki/index.php/Remote_Controllers
- */
-#include <linux/module.h>
-
-#include <linux/input.h>
-#include <media/ir-common.h>
-
-/* empty keytable, can be used as placeholder for not-yet created keytables */
-static struct ir_scancode ir_codes_empty[] = {
- { 0x2a, KEY_COFFEE },
-};
-
-struct ir_scancode_table ir_codes_empty_table = {
- .scan = ir_codes_empty,
- .size = ARRAY_SIZE(ir_codes_empty),
-};
-EXPORT_SYMBOL_GPL(ir_codes_empty_table);
-
-/* Michal Majchrowicz <mmajchrowicz@gmail.com> */
-static struct ir_scancode ir_codes_proteus_2309[] = {
- /* numeric */
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- { 0x5c, KEY_POWER }, /* power */
- { 0x20, KEY_ZOOM }, /* full screen */
- { 0x0f, KEY_BACKSPACE }, /* recall */
- { 0x1b, KEY_ENTER }, /* mute */
- { 0x41, KEY_RECORD }, /* record */
- { 0x43, KEY_STOP }, /* stop */
- { 0x16, KEY_S },
- { 0x1a, KEY_POWER2 }, /* off */
- { 0x2e, KEY_RED },
- { 0x1f, KEY_CHANNELDOWN }, /* channel - */
- { 0x1c, KEY_CHANNELUP }, /* channel + */
- { 0x10, KEY_VOLUMEDOWN }, /* volume - */
- { 0x1e, KEY_VOLUMEUP }, /* volume + */
- { 0x14, KEY_F1 },
-};
-
-struct ir_scancode_table ir_codes_proteus_2309_table = {
- .scan = ir_codes_proteus_2309,
- .size = ARRAY_SIZE(ir_codes_proteus_2309),
-};
-EXPORT_SYMBOL_GPL(ir_codes_proteus_2309_table);
-
-/* Matt Jesson <dvb@jesson.eclipse.co.uk */
-static struct ir_scancode ir_codes_avermedia_dvbt[] = {
- { 0x28, KEY_0 }, /* '0' / 'enter' */
- { 0x22, KEY_1 }, /* '1' */
- { 0x12, KEY_2 }, /* '2' / 'up arrow' */
- { 0x32, KEY_3 }, /* '3' */
- { 0x24, KEY_4 }, /* '4' / 'left arrow' */
- { 0x14, KEY_5 }, /* '5' */
- { 0x34, KEY_6 }, /* '6' / 'right arrow' */
- { 0x26, KEY_7 }, /* '7' */
- { 0x16, KEY_8 }, /* '8' / 'down arrow' */
- { 0x36, KEY_9 }, /* '9' */
-
- { 0x20, KEY_LIST }, /* 'source' */
- { 0x10, KEY_TEXT }, /* 'teletext' */
- { 0x00, KEY_POWER }, /* 'power' */
- { 0x04, KEY_AUDIO }, /* 'audio' */
- { 0x06, KEY_ZOOM }, /* 'full screen' */
- { 0x18, KEY_VIDEO }, /* 'display' */
- { 0x38, KEY_SEARCH }, /* 'loop' */
- { 0x08, KEY_INFO }, /* 'preview' */
- { 0x2a, KEY_REWIND }, /* 'backward <<' */
- { 0x1a, KEY_FASTFORWARD }, /* 'forward >>' */
- { 0x3a, KEY_RECORD }, /* 'capture' */
- { 0x0a, KEY_MUTE }, /* 'mute' */
- { 0x2c, KEY_RECORD }, /* 'record' */
- { 0x1c, KEY_PAUSE }, /* 'pause' */
- { 0x3c, KEY_STOP }, /* 'stop' */
- { 0x0c, KEY_PLAY }, /* 'play' */
- { 0x2e, KEY_RED }, /* 'red' */
- { 0x01, KEY_BLUE }, /* 'blue' / 'cancel' */
- { 0x0e, KEY_YELLOW }, /* 'yellow' / 'ok' */
- { 0x21, KEY_GREEN }, /* 'green' */
- { 0x11, KEY_CHANNELDOWN }, /* 'channel -' */
- { 0x31, KEY_CHANNELUP }, /* 'channel +' */
- { 0x1e, KEY_VOLUMEDOWN }, /* 'volume -' */
- { 0x3e, KEY_VOLUMEUP }, /* 'volume +' */
-};
-
-struct ir_scancode_table ir_codes_avermedia_dvbt_table = {
- .scan = ir_codes_avermedia_dvbt,
- .size = ARRAY_SIZE(ir_codes_avermedia_dvbt),
-};
-EXPORT_SYMBOL_GPL(ir_codes_avermedia_dvbt_table);
-
-/* Mauro Carvalho Chehab <mchehab@infradead.org> */
-static struct ir_scancode ir_codes_avermedia_m135a[] = {
- { 0x00, KEY_POWER2 },
- { 0x2e, KEY_DOT }, /* '.' */
- { 0x01, KEY_MODE }, /* TV/FM */
-
- { 0x05, KEY_1 },
- { 0x06, KEY_2 },
- { 0x07, KEY_3 },
- { 0x09, KEY_4 },
- { 0x0a, KEY_5 },
- { 0x0b, KEY_6 },
- { 0x0d, KEY_7 },
- { 0x0e, KEY_8 },
- { 0x0f, KEY_9 },
- { 0x11, KEY_0 },
-
- { 0x13, KEY_RIGHT }, /* -> */
- { 0x12, KEY_LEFT }, /* <- */
-
- { 0x17, KEY_SLEEP }, /* Capturar Imagem */
- { 0x10, KEY_SHUFFLE }, /* Amostra */
-
- /* FIXME: The keys bellow aren't ok */
-
- { 0x43, KEY_CHANNELUP },
- { 0x42, KEY_CHANNELDOWN },
- { 0x1f, KEY_VOLUMEUP },
- { 0x1e, KEY_VOLUMEDOWN },
- { 0x0c, KEY_ENTER },
-
- { 0x14, KEY_MUTE },
- { 0x08, KEY_AUDIO },
-
- { 0x03, KEY_TEXT },
- { 0x04, KEY_EPG },
- { 0x2b, KEY_TV2 }, /* TV2 */
-
- { 0x1d, KEY_RED },
- { 0x1c, KEY_YELLOW },
- { 0x41, KEY_GREEN },
- { 0x40, KEY_BLUE },
-
- { 0x1a, KEY_PLAYPAUSE },
- { 0x19, KEY_RECORD },
- { 0x18, KEY_PLAY },
- { 0x1b, KEY_STOP },
-};
-
-struct ir_scancode_table ir_codes_avermedia_m135a_table = {
- .scan = ir_codes_avermedia_m135a,
- .size = ARRAY_SIZE(ir_codes_avermedia_m135a),
-};
-EXPORT_SYMBOL_GPL(ir_codes_avermedia_m135a_table);
-
-/* Oldrich Jedlicka <oldium.pro@seznam.cz> */
-static struct ir_scancode ir_codes_avermedia_cardbus[] = {
- { 0x00, KEY_POWER },
- { 0x01, KEY_TUNER }, /* TV/FM */
- { 0x03, KEY_TEXT }, /* Teletext */
- { 0x04, KEY_EPG },
- { 0x05, KEY_1 },
- { 0x06, KEY_2 },
- { 0x07, KEY_3 },
- { 0x08, KEY_AUDIO },
- { 0x09, KEY_4 },
- { 0x0a, KEY_5 },
- { 0x0b, KEY_6 },
- { 0x0c, KEY_ZOOM }, /* Full screen */
- { 0x0d, KEY_7 },
- { 0x0e, KEY_8 },
- { 0x0f, KEY_9 },
- { 0x10, KEY_PAGEUP }, /* 16-CH PREV */
- { 0x11, KEY_0 },
- { 0x12, KEY_INFO },
- { 0x13, KEY_AGAIN }, /* CH RTN - channel return */
- { 0x14, KEY_MUTE },
- { 0x15, KEY_EDIT }, /* Autoscan */
- { 0x17, KEY_SAVE }, /* Screenshot */
- { 0x18, KEY_PLAYPAUSE },
- { 0x19, KEY_RECORD },
- { 0x1a, KEY_PLAY },
- { 0x1b, KEY_STOP },
- { 0x1c, KEY_FASTFORWARD },
- { 0x1d, KEY_REWIND },
- { 0x1e, KEY_VOLUMEDOWN },
- { 0x1f, KEY_VOLUMEUP },
- { 0x22, KEY_SLEEP }, /* Sleep */
- { 0x23, KEY_ZOOM }, /* Aspect */
- { 0x26, KEY_SCREEN }, /* Pos */
- { 0x27, KEY_ANGLE }, /* Size */
- { 0x28, KEY_SELECT }, /* Select */
- { 0x29, KEY_BLUE }, /* Blue/Picture */
- { 0x2a, KEY_BACKSPACE }, /* Back */
- { 0x2b, KEY_MEDIA }, /* PIP (Picture-in-picture) */
- { 0x2c, KEY_DOWN },
- { 0x2e, KEY_DOT },
- { 0x2f, KEY_TV }, /* Live TV */
- { 0x32, KEY_LEFT },
- { 0x33, KEY_CLEAR }, /* Clear */
- { 0x35, KEY_RED }, /* Red/TV */
- { 0x36, KEY_UP },
- { 0x37, KEY_HOME }, /* Home */
- { 0x39, KEY_GREEN }, /* Green/Video */
- { 0x3d, KEY_YELLOW }, /* Yellow/Music */
- { 0x3e, KEY_OK }, /* Ok */
- { 0x3f, KEY_RIGHT },
- { 0x40, KEY_NEXT }, /* Next */
- { 0x41, KEY_PREVIOUS }, /* Previous */
- { 0x42, KEY_CHANNELDOWN }, /* Channel down */
- { 0x43, KEY_CHANNELUP }, /* Channel up */
-};
-
-struct ir_scancode_table ir_codes_avermedia_cardbus_table = {
- .scan = ir_codes_avermedia_cardbus,
- .size = ARRAY_SIZE(ir_codes_avermedia_cardbus),
-};
-EXPORT_SYMBOL_GPL(ir_codes_avermedia_cardbus_table);
-
-/* Attila Kondoros <attila.kondoros@chello.hu> */
-static struct ir_scancode ir_codes_apac_viewcomp[] = {
-
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
- { 0x00, KEY_0 },
- { 0x17, KEY_LAST }, /* +100 */
- { 0x0a, KEY_LIST }, /* recall */
-
-
- { 0x1c, KEY_TUNER }, /* TV/FM */
- { 0x15, KEY_SEARCH }, /* scan */
- { 0x12, KEY_POWER }, /* power */
- { 0x1f, KEY_VOLUMEDOWN }, /* vol up */
- { 0x1b, KEY_VOLUMEUP }, /* vol down */
- { 0x1e, KEY_CHANNELDOWN }, /* chn up */
- { 0x1a, KEY_CHANNELUP }, /* chn down */
-
- { 0x11, KEY_VIDEO }, /* video */
- { 0x0f, KEY_ZOOM }, /* full screen */
- { 0x13, KEY_MUTE }, /* mute/unmute */
- { 0x10, KEY_TEXT }, /* min */
-
- { 0x0d, KEY_STOP }, /* freeze */
- { 0x0e, KEY_RECORD }, /* record */
- { 0x1d, KEY_PLAYPAUSE }, /* stop */
- { 0x19, KEY_PLAY }, /* play */
-
- { 0x16, KEY_GOTO }, /* osd */
- { 0x14, KEY_REFRESH }, /* default */
- { 0x0c, KEY_KPPLUS }, /* fine tune >>>> */
- { 0x18, KEY_KPMINUS }, /* fine tune <<<< */
-};
-
-struct ir_scancode_table ir_codes_apac_viewcomp_table = {
- .scan = ir_codes_apac_viewcomp,
- .size = ARRAY_SIZE(ir_codes_apac_viewcomp),
-};
-EXPORT_SYMBOL_GPL(ir_codes_apac_viewcomp_table);
-
-/* ---------------------------------------------------------------------- */
-
-static struct ir_scancode ir_codes_pixelview[] = {
-
- { 0x1e, KEY_POWER }, /* power */
- { 0x07, KEY_MEDIA }, /* source */
- { 0x1c, KEY_SEARCH }, /* scan */
-
-
- { 0x03, KEY_TUNER }, /* TV/FM */
-
- { 0x00, KEY_RECORD },
- { 0x08, KEY_STOP },
- { 0x11, KEY_PLAY },
-
- { 0x1a, KEY_PLAYPAUSE }, /* freeze */
- { 0x19, KEY_ZOOM }, /* zoom */
- { 0x0f, KEY_TEXT }, /* min */
-
- { 0x01, KEY_1 },
- { 0x0b, KEY_2 },
- { 0x1b, KEY_3 },
- { 0x05, KEY_4 },
- { 0x09, KEY_5 },
- { 0x15, KEY_6 },
- { 0x06, KEY_7 },
- { 0x0a, KEY_8 },
- { 0x12, KEY_9 },
- { 0x02, KEY_0 },
- { 0x10, KEY_LAST }, /* +100 */
- { 0x13, KEY_LIST }, /* recall */
-
- { 0x1f, KEY_CHANNELUP }, /* chn down */
- { 0x17, KEY_CHANNELDOWN }, /* chn up */
- { 0x16, KEY_VOLUMEUP }, /* vol down */
- { 0x14, KEY_VOLUMEDOWN }, /* vol up */
-
- { 0x04, KEY_KPMINUS }, /* <<< */
- { 0x0e, KEY_SETUP }, /* function */
- { 0x0c, KEY_KPPLUS }, /* >>> */
-
- { 0x0d, KEY_GOTO }, /* mts */
- { 0x1d, KEY_REFRESH }, /* reset */
- { 0x18, KEY_MUTE }, /* mute/unmute */
-};
-
-struct ir_scancode_table ir_codes_pixelview_table = {
- .scan = ir_codes_pixelview,
- .size = ARRAY_SIZE(ir_codes_pixelview),
-};
-EXPORT_SYMBOL_GPL(ir_codes_pixelview_table);
-
-/*
- Mauro Carvalho Chehab <mchehab@infradead.org>
- present on PV MPEG 8000GT
- */
-static struct ir_scancode ir_codes_pixelview_new[] = {
- { 0x3c, KEY_TIME }, /* Timeshift */
- { 0x12, KEY_POWER },
-
- { 0x3d, KEY_1 },
- { 0x38, KEY_2 },
- { 0x18, KEY_3 },
- { 0x35, KEY_4 },
- { 0x39, KEY_5 },
- { 0x15, KEY_6 },
- { 0x36, KEY_7 },
- { 0x3a, KEY_8 },
- { 0x1e, KEY_9 },
- { 0x3e, KEY_0 },
-
- { 0x1c, KEY_AGAIN }, /* LOOP */
- { 0x3f, KEY_MEDIA }, /* Source */
- { 0x1f, KEY_LAST }, /* +100 */
- { 0x1b, KEY_MUTE },
-
- { 0x17, KEY_CHANNELDOWN },
- { 0x16, KEY_CHANNELUP },
- { 0x10, KEY_VOLUMEUP },
- { 0x14, KEY_VOLUMEDOWN },
- { 0x13, KEY_ZOOM },
-
- { 0x19, KEY_CAMERA }, /* SNAPSHOT */
- { 0x1a, KEY_SEARCH }, /* scan */
-
- { 0x37, KEY_REWIND }, /* << */
- { 0x32, KEY_RECORD }, /* o (red) */
- { 0x33, KEY_FORWARD }, /* >> */
- { 0x11, KEY_STOP }, /* square */
- { 0x3b, KEY_PLAY }, /* > */
- { 0x30, KEY_PLAYPAUSE }, /* || */
-
- { 0x31, KEY_TV },
- { 0x34, KEY_RADIO },
-};
-
-struct ir_scancode_table ir_codes_pixelview_new_table = {
- .scan = ir_codes_pixelview_new,
- .size = ARRAY_SIZE(ir_codes_pixelview_new),
-};
-EXPORT_SYMBOL_GPL(ir_codes_pixelview_new_table);
-
-static struct ir_scancode ir_codes_nebula[] = {
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
- { 0x0a, KEY_TV },
- { 0x0b, KEY_AUX },
- { 0x0c, KEY_DVD },
- { 0x0d, KEY_POWER },
- { 0x0e, KEY_MHP }, /* labelled 'Picture' */
- { 0x0f, KEY_AUDIO },
- { 0x10, KEY_INFO },
- { 0x11, KEY_F13 }, /* 16:9 */
- { 0x12, KEY_F14 }, /* 14:9 */
- { 0x13, KEY_EPG },
- { 0x14, KEY_EXIT },
- { 0x15, KEY_MENU },
- { 0x16, KEY_UP },
- { 0x17, KEY_DOWN },
- { 0x18, KEY_LEFT },
- { 0x19, KEY_RIGHT },
- { 0x1a, KEY_ENTER },
- { 0x1b, KEY_CHANNELUP },
- { 0x1c, KEY_CHANNELDOWN },
- { 0x1d, KEY_VOLUMEUP },
- { 0x1e, KEY_VOLUMEDOWN },
- { 0x1f, KEY_RED },
- { 0x20, KEY_GREEN },
- { 0x21, KEY_YELLOW },
- { 0x22, KEY_BLUE },
- { 0x23, KEY_SUBTITLE },
- { 0x24, KEY_F15 }, /* AD */
- { 0x25, KEY_TEXT },
- { 0x26, KEY_MUTE },
- { 0x27, KEY_REWIND },
- { 0x28, KEY_STOP },
- { 0x29, KEY_PLAY },
- { 0x2a, KEY_FASTFORWARD },
- { 0x2b, KEY_F16 }, /* chapter */
- { 0x2c, KEY_PAUSE },
- { 0x2d, KEY_PLAY },
- { 0x2e, KEY_RECORD },
- { 0x2f, KEY_F17 }, /* picture in picture */
- { 0x30, KEY_KPPLUS }, /* zoom in */
- { 0x31, KEY_KPMINUS }, /* zoom out */
- { 0x32, KEY_F18 }, /* capture */
- { 0x33, KEY_F19 }, /* web */
- { 0x34, KEY_EMAIL },
- { 0x35, KEY_PHONE },
- { 0x36, KEY_PC },
-};
-
-struct ir_scancode_table ir_codes_nebula_table = {
- .scan = ir_codes_nebula,
- .size = ARRAY_SIZE(ir_codes_nebula),
-};
-EXPORT_SYMBOL_GPL(ir_codes_nebula_table);
-
-/* DigitalNow DNTV Live DVB-T Remote */
-static struct ir_scancode ir_codes_dntv_live_dvb_t[] = {
- { 0x00, KEY_ESC }, /* 'go up a level?' */
- /* Keys 0 to 9 */
- { 0x0a, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- { 0x0b, KEY_TUNER }, /* tv/fm */
- { 0x0c, KEY_SEARCH }, /* scan */
- { 0x0d, KEY_STOP },
- { 0x0e, KEY_PAUSE },
- { 0x0f, KEY_LIST }, /* source */
-
- { 0x10, KEY_MUTE },
- { 0x11, KEY_REWIND }, /* backward << */
- { 0x12, KEY_POWER },
- { 0x13, KEY_CAMERA }, /* snap */
- { 0x14, KEY_AUDIO }, /* stereo */
- { 0x15, KEY_CLEAR }, /* reset */
- { 0x16, KEY_PLAY },
- { 0x17, KEY_ENTER },
- { 0x18, KEY_ZOOM }, /* full screen */
- { 0x19, KEY_FASTFORWARD }, /* forward >> */
- { 0x1a, KEY_CHANNELUP },
- { 0x1b, KEY_VOLUMEUP },
- { 0x1c, KEY_INFO }, /* preview */
- { 0x1d, KEY_RECORD }, /* record */
- { 0x1e, KEY_CHANNELDOWN },
- { 0x1f, KEY_VOLUMEDOWN },
-};
-
-struct ir_scancode_table ir_codes_dntv_live_dvb_t_table = {
- .scan = ir_codes_dntv_live_dvb_t,
- .size = ARRAY_SIZE(ir_codes_dntv_live_dvb_t),
-};
-EXPORT_SYMBOL_GPL(ir_codes_dntv_live_dvb_t_table);
-
-/* ---------------------------------------------------------------------- */
-
-/* IO-DATA BCTV7E Remote */
-static struct ir_scancode ir_codes_iodata_bctv7e[] = {
- { 0x40, KEY_TV },
- { 0x20, KEY_RADIO }, /* FM */
- { 0x60, KEY_EPG },
- { 0x00, KEY_POWER },
-
- /* Keys 0 to 9 */
- { 0x44, KEY_0 }, /* 10 */
- { 0x50, KEY_1 },
- { 0x30, KEY_2 },
- { 0x70, KEY_3 },
- { 0x48, KEY_4 },
- { 0x28, KEY_5 },
- { 0x68, KEY_6 },
- { 0x58, KEY_7 },
- { 0x38, KEY_8 },
- { 0x78, KEY_9 },
-
- { 0x10, KEY_L }, /* Live */
- { 0x08, KEY_TIME }, /* Time Shift */
-
- { 0x18, KEY_PLAYPAUSE }, /* Play */
-
- { 0x24, KEY_ENTER }, /* 11 */
- { 0x64, KEY_ESC }, /* 12 */
- { 0x04, KEY_M }, /* Multi */
-
- { 0x54, KEY_VIDEO },
- { 0x34, KEY_CHANNELUP },
- { 0x74, KEY_VOLUMEUP },
- { 0x14, KEY_MUTE },
-
- { 0x4c, KEY_VCR }, /* SVIDEO */
- { 0x2c, KEY_CHANNELDOWN },
- { 0x6c, KEY_VOLUMEDOWN },
- { 0x0c, KEY_ZOOM },
-
- { 0x5c, KEY_PAUSE },
- { 0x3c, KEY_RED }, /* || (red) */
- { 0x7c, KEY_RECORD }, /* recording */
- { 0x1c, KEY_STOP },
-
- { 0x41, KEY_REWIND }, /* backward << */
- { 0x21, KEY_PLAY },
- { 0x61, KEY_FASTFORWARD }, /* forward >> */
- { 0x01, KEY_NEXT }, /* skip >| */
-};
-
-struct ir_scancode_table ir_codes_iodata_bctv7e_table = {
- .scan = ir_codes_iodata_bctv7e,
- .size = ARRAY_SIZE(ir_codes_iodata_bctv7e),
-};
-EXPORT_SYMBOL_GPL(ir_codes_iodata_bctv7e_table);
-
-/* ---------------------------------------------------------------------- */
-
-/* ADS Tech Instant TV DVB-T PCI Remote */
-static struct ir_scancode ir_codes_adstech_dvb_t_pci[] = {
- /* Keys 0 to 9 */
- { 0x4d, KEY_0 },
- { 0x57, KEY_1 },
- { 0x4f, KEY_2 },
- { 0x53, KEY_3 },
- { 0x56, KEY_4 },
- { 0x4e, KEY_5 },
- { 0x5e, KEY_6 },
- { 0x54, KEY_7 },
- { 0x4c, KEY_8 },
- { 0x5c, KEY_9 },
-
- { 0x5b, KEY_POWER },
- { 0x5f, KEY_MUTE },
- { 0x55, KEY_GOTO },
- { 0x5d, KEY_SEARCH },
- { 0x17, KEY_EPG }, /* Guide */
- { 0x1f, KEY_MENU },
- { 0x0f, KEY_UP },
- { 0x46, KEY_DOWN },
- { 0x16, KEY_LEFT },
- { 0x1e, KEY_RIGHT },
- { 0x0e, KEY_SELECT }, /* Enter */
- { 0x5a, KEY_INFO },
- { 0x52, KEY_EXIT },
- { 0x59, KEY_PREVIOUS },
- { 0x51, KEY_NEXT },
- { 0x58, KEY_REWIND },
- { 0x50, KEY_FORWARD },
- { 0x44, KEY_PLAYPAUSE },
- { 0x07, KEY_STOP },
- { 0x1b, KEY_RECORD },
- { 0x13, KEY_TUNER }, /* Live */
- { 0x0a, KEY_A },
- { 0x12, KEY_B },
- { 0x03, KEY_PROG1 }, /* 1 */
- { 0x01, KEY_PROG2 }, /* 2 */
- { 0x00, KEY_PROG3 }, /* 3 */
- { 0x06, KEY_DVD },
- { 0x48, KEY_AUX }, /* Photo */
- { 0x40, KEY_VIDEO },
- { 0x19, KEY_AUDIO }, /* Music */
- { 0x0b, KEY_CHANNELUP },
- { 0x08, KEY_CHANNELDOWN },
- { 0x15, KEY_VOLUMEUP },
- { 0x1c, KEY_VOLUMEDOWN },
-};
-
-struct ir_scancode_table ir_codes_adstech_dvb_t_pci_table = {
- .scan = ir_codes_adstech_dvb_t_pci,
- .size = ARRAY_SIZE(ir_codes_adstech_dvb_t_pci),
-};
-EXPORT_SYMBOL_GPL(ir_codes_adstech_dvb_t_pci_table);
-
-/* ---------------------------------------------------------------------- */
-
-/* MSI TV@nywhere MASTER remote */
-
-static struct ir_scancode ir_codes_msi_tvanywhere[] = {
- /* Keys 0 to 9 */
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- { 0x0c, KEY_MUTE },
- { 0x0f, KEY_SCREEN }, /* Full Screen */
- { 0x10, KEY_FN }, /* Funtion */
- { 0x11, KEY_TIME }, /* Time shift */
- { 0x12, KEY_POWER },
- { 0x13, KEY_MEDIA }, /* MTS */
- { 0x14, KEY_SLOW },
- { 0x16, KEY_REWIND }, /* backward << */
- { 0x17, KEY_ENTER }, /* Return */
- { 0x18, KEY_FASTFORWARD }, /* forward >> */
- { 0x1a, KEY_CHANNELUP },
- { 0x1b, KEY_VOLUMEUP },
- { 0x1e, KEY_CHANNELDOWN },
- { 0x1f, KEY_VOLUMEDOWN },
-};
-
-struct ir_scancode_table ir_codes_msi_tvanywhere_table = {
- .scan = ir_codes_msi_tvanywhere,
- .size = ARRAY_SIZE(ir_codes_msi_tvanywhere),
-};
-EXPORT_SYMBOL_GPL(ir_codes_msi_tvanywhere_table);
-
-/* ---------------------------------------------------------------------- */
-
-/*
- Keycodes for remote on the MSI TV@nywhere Plus. The controller IC on the card
- is marked "KS003". The controller is I2C at address 0x30, but does not seem
- to respond to probes until a read is performed from a valid device.
- I don't know why...
-
- Note: This remote may be of similar or identical design to the
- Pixelview remote (?). The raw codes and duplicate button codes
- appear to be the same.
-
- Henry Wong <henry@stuffedcow.net>
- Some changes to formatting and keycodes by Mark Schultz <n9xmj@yahoo.com>
-
-*/
-
-static struct ir_scancode ir_codes_msi_tvanywhere_plus[] = {
-
-/* ---- Remote Button Layout ----
-
- POWER SOURCE SCAN MUTE
- TV/FM 1 2 3
- |> 4 5 6
- <| 7 8 9
- ^^UP 0 + RECALL
- vvDN RECORD STOP PLAY
-
- MINIMIZE ZOOM
-
- CH+
- VOL- VOL+
- CH-
-
- SNAPSHOT MTS
-
- << FUNC >> RESET
-*/
-
- { 0x01, KEY_1 }, /* 1 */
- { 0x0b, KEY_2 }, /* 2 */
- { 0x1b, KEY_3 }, /* 3 */
- { 0x05, KEY_4 }, /* 4 */
- { 0x09, KEY_5 }, /* 5 */
- { 0x15, KEY_6 }, /* 6 */
- { 0x06, KEY_7 }, /* 7 */
- { 0x0a, KEY_8 }, /* 8 */
- { 0x12, KEY_9 }, /* 9 */
- { 0x02, KEY_0 }, /* 0 */
- { 0x10, KEY_KPPLUS }, /* + */
- { 0x13, KEY_AGAIN }, /* Recall */
-
- { 0x1e, KEY_POWER }, /* Power */
- { 0x07, KEY_TUNER }, /* Source */
- { 0x1c, KEY_SEARCH }, /* Scan */
- { 0x18, KEY_MUTE }, /* Mute */
-
- { 0x03, KEY_RADIO }, /* TV/FM */
- /* The next four keys are duplicates that appear to send the
- same IR code as Ch+, Ch-, >>, and << . The raw code assigned
- to them is the actual code + 0x20 - they will never be
- detected as such unless some way is discovered to distinguish
- these buttons from those that have the same code. */
- { 0x3f, KEY_RIGHT }, /* |> and Ch+ */
- { 0x37, KEY_LEFT }, /* <| and Ch- */
- { 0x2c, KEY_UP }, /* ^^Up and >> */
- { 0x24, KEY_DOWN }, /* vvDn and << */
-
- { 0x00, KEY_RECORD }, /* Record */
- { 0x08, KEY_STOP }, /* Stop */
- { 0x11, KEY_PLAY }, /* Play */
-
- { 0x0f, KEY_CLOSE }, /* Minimize */
- { 0x19, KEY_ZOOM }, /* Zoom */
- { 0x1a, KEY_CAMERA }, /* Snapshot */
- { 0x0d, KEY_LANGUAGE }, /* MTS */
-
- { 0x14, KEY_VOLUMEDOWN }, /* Vol- */
- { 0x16, KEY_VOLUMEUP }, /* Vol+ */
- { 0x17, KEY_CHANNELDOWN }, /* Ch- */
- { 0x1f, KEY_CHANNELUP }, /* Ch+ */
-
- { 0x04, KEY_REWIND }, /* << */
- { 0x0e, KEY_MENU }, /* Function */
- { 0x0c, KEY_FASTFORWARD }, /* >> */
- { 0x1d, KEY_RESTART }, /* Reset */
-};
-
-struct ir_scancode_table ir_codes_msi_tvanywhere_plus_table = {
- .scan = ir_codes_msi_tvanywhere_plus,
- .size = ARRAY_SIZE(ir_codes_msi_tvanywhere_plus),
-};
-EXPORT_SYMBOL_GPL(ir_codes_msi_tvanywhere_plus_table);
-
-/* ---------------------------------------------------------------------- */
-
-/* Cinergy 1400 DVB-T */
-static struct ir_scancode ir_codes_cinergy_1400[] = {
- { 0x01, KEY_POWER },
- { 0x02, KEY_1 },
- { 0x03, KEY_2 },
- { 0x04, KEY_3 },
- { 0x05, KEY_4 },
- { 0x06, KEY_5 },
- { 0x07, KEY_6 },
- { 0x08, KEY_7 },
- { 0x09, KEY_8 },
- { 0x0a, KEY_9 },
- { 0x0c, KEY_0 },
-
- { 0x0b, KEY_VIDEO },
- { 0x0d, KEY_REFRESH },
- { 0x0e, KEY_SELECT },
- { 0x0f, KEY_EPG },
- { 0x10, KEY_UP },
- { 0x11, KEY_LEFT },
- { 0x12, KEY_OK },
- { 0x13, KEY_RIGHT },
- { 0x14, KEY_DOWN },
- { 0x15, KEY_TEXT },
- { 0x16, KEY_INFO },
-
- { 0x17, KEY_RED },
- { 0x18, KEY_GREEN },
- { 0x19, KEY_YELLOW },
- { 0x1a, KEY_BLUE },
-
- { 0x1b, KEY_CHANNELUP },
- { 0x1c, KEY_VOLUMEUP },
- { 0x1d, KEY_MUTE },
- { 0x1e, KEY_VOLUMEDOWN },
- { 0x1f, KEY_CHANNELDOWN },
-
- { 0x40, KEY_PAUSE },
- { 0x4c, KEY_PLAY },
- { 0x58, KEY_RECORD },
- { 0x54, KEY_PREVIOUS },
- { 0x48, KEY_STOP },
- { 0x5c, KEY_NEXT },
-};
-
-struct ir_scancode_table ir_codes_cinergy_1400_table = {
- .scan = ir_codes_cinergy_1400,
- .size = ARRAY_SIZE(ir_codes_cinergy_1400),
-};
-EXPORT_SYMBOL_GPL(ir_codes_cinergy_1400_table);
-
-/* ---------------------------------------------------------------------- */
-
-/* AVERTV STUDIO 303 Remote */
-static struct ir_scancode ir_codes_avertv_303[] = {
- { 0x2a, KEY_1 },
- { 0x32, KEY_2 },
- { 0x3a, KEY_3 },
- { 0x4a, KEY_4 },
- { 0x52, KEY_5 },
- { 0x5a, KEY_6 },
- { 0x6a, KEY_7 },
- { 0x72, KEY_8 },
- { 0x7a, KEY_9 },
- { 0x0e, KEY_0 },
-
- { 0x02, KEY_POWER },
- { 0x22, KEY_VIDEO },
- { 0x42, KEY_AUDIO },
- { 0x62, KEY_ZOOM },
- { 0x0a, KEY_TV },
- { 0x12, KEY_CD },
- { 0x1a, KEY_TEXT },
-
- { 0x16, KEY_SUBTITLE },
- { 0x1e, KEY_REWIND },
- { 0x06, KEY_PRINT },
-
- { 0x2e, KEY_SEARCH },
- { 0x36, KEY_SLEEP },
- { 0x3e, KEY_SHUFFLE },
- { 0x26, KEY_MUTE },
-
- { 0x4e, KEY_RECORD },
- { 0x56, KEY_PAUSE },
- { 0x5e, KEY_STOP },
- { 0x46, KEY_PLAY },
-
- { 0x6e, KEY_RED },
- { 0x0b, KEY_GREEN },
- { 0x66, KEY_YELLOW },
- { 0x03, KEY_BLUE },
-
- { 0x76, KEY_LEFT },
- { 0x7e, KEY_RIGHT },
- { 0x13, KEY_DOWN },
- { 0x1b, KEY_UP },
-};
-
-struct ir_scancode_table ir_codes_avertv_303_table = {
- .scan = ir_codes_avertv_303,
- .size = ARRAY_SIZE(ir_codes_avertv_303),
-};
-EXPORT_SYMBOL_GPL(ir_codes_avertv_303_table);
-
-/* ---------------------------------------------------------------------- */
-
-/* DigitalNow DNTV Live! DVB-T Pro Remote */
-static struct ir_scancode ir_codes_dntv_live_dvbt_pro[] = {
- { 0x16, KEY_POWER },
- { 0x5b, KEY_HOME },
-
- { 0x55, KEY_TV }, /* live tv */
- { 0x58, KEY_TUNER }, /* digital Radio */
- { 0x5a, KEY_RADIO }, /* FM radio */
- { 0x59, KEY_DVD }, /* dvd menu */
- { 0x03, KEY_1 },
- { 0x01, KEY_2 },
- { 0x06, KEY_3 },
- { 0x09, KEY_4 },
- { 0x1d, KEY_5 },
- { 0x1f, KEY_6 },
- { 0x0d, KEY_7 },
- { 0x19, KEY_8 },
- { 0x1b, KEY_9 },
- { 0x0c, KEY_CANCEL },
- { 0x15, KEY_0 },
- { 0x4a, KEY_CLEAR },
- { 0x13, KEY_BACK },
- { 0x00, KEY_TAB },
- { 0x4b, KEY_UP },
- { 0x4e, KEY_LEFT },
- { 0x4f, KEY_OK },
- { 0x52, KEY_RIGHT },
- { 0x51, KEY_DOWN },
- { 0x1e, KEY_VOLUMEUP },
- { 0x0a, KEY_VOLUMEDOWN },
- { 0x02, KEY_CHANNELDOWN },
- { 0x05, KEY_CHANNELUP },
- { 0x11, KEY_RECORD },
- { 0x14, KEY_PLAY },
- { 0x4c, KEY_PAUSE },
- { 0x1a, KEY_STOP },
- { 0x40, KEY_REWIND },
- { 0x12, KEY_FASTFORWARD },
- { 0x41, KEY_PREVIOUSSONG }, /* replay |< */
- { 0x42, KEY_NEXTSONG }, /* skip >| */
- { 0x54, KEY_CAMERA }, /* capture */
- { 0x50, KEY_LANGUAGE }, /* sap */
- { 0x47, KEY_TV2 }, /* pip */
- { 0x4d, KEY_SCREEN },
- { 0x43, KEY_SUBTITLE },
- { 0x10, KEY_MUTE },
- { 0x49, KEY_AUDIO }, /* l/r */
- { 0x07, KEY_SLEEP },
- { 0x08, KEY_VIDEO }, /* a/v */
- { 0x0e, KEY_PREVIOUS }, /* recall */
- { 0x45, KEY_ZOOM }, /* zoom + */
- { 0x46, KEY_ANGLE }, /* zoom - */
- { 0x56, KEY_RED },
- { 0x57, KEY_GREEN },
- { 0x5c, KEY_YELLOW },
- { 0x5d, KEY_BLUE },
-};
-
-struct ir_scancode_table ir_codes_dntv_live_dvbt_pro_table = {
- .scan = ir_codes_dntv_live_dvbt_pro,
- .size = ARRAY_SIZE(ir_codes_dntv_live_dvbt_pro),
-};
-EXPORT_SYMBOL_GPL(ir_codes_dntv_live_dvbt_pro_table);
-
-static struct ir_scancode ir_codes_em_terratec[] = {
- { 0x01, KEY_CHANNEL },
- { 0x02, KEY_SELECT },
- { 0x03, KEY_MUTE },
- { 0x04, KEY_POWER },
- { 0x05, KEY_1 },
- { 0x06, KEY_2 },
- { 0x07, KEY_3 },
- { 0x08, KEY_CHANNELUP },
- { 0x09, KEY_4 },
- { 0x0a, KEY_5 },
- { 0x0b, KEY_6 },
- { 0x0c, KEY_CHANNELDOWN },
- { 0x0d, KEY_7 },
- { 0x0e, KEY_8 },
- { 0x0f, KEY_9 },
- { 0x10, KEY_VOLUMEUP },
- { 0x11, KEY_0 },
- { 0x12, KEY_MENU },
- { 0x13, KEY_PRINT },
- { 0x14, KEY_VOLUMEDOWN },
- { 0x16, KEY_PAUSE },
- { 0x18, KEY_RECORD },
- { 0x19, KEY_REWIND },
- { 0x1a, KEY_PLAY },
- { 0x1b, KEY_FORWARD },
- { 0x1c, KEY_BACKSPACE },
- { 0x1e, KEY_STOP },
- { 0x40, KEY_ZOOM },
-};
-
-struct ir_scancode_table ir_codes_em_terratec_table = {
- .scan = ir_codes_em_terratec,
- .size = ARRAY_SIZE(ir_codes_em_terratec),
-};
-EXPORT_SYMBOL_GPL(ir_codes_em_terratec_table);
-
-static struct ir_scancode ir_codes_pinnacle_grey[] = {
- { 0x3a, KEY_0 },
- { 0x31, KEY_1 },
- { 0x32, KEY_2 },
- { 0x33, KEY_3 },
- { 0x34, KEY_4 },
- { 0x35, KEY_5 },
- { 0x36, KEY_6 },
- { 0x37, KEY_7 },
- { 0x38, KEY_8 },
- { 0x39, KEY_9 },
-
- { 0x2f, KEY_POWER },
-
- { 0x2e, KEY_P },
- { 0x1f, KEY_L },
- { 0x2b, KEY_I },
-
- { 0x2d, KEY_SCREEN },
- { 0x1e, KEY_ZOOM },
- { 0x1b, KEY_VOLUMEUP },
- { 0x0f, KEY_VOLUMEDOWN },
- { 0x17, KEY_CHANNELUP },
- { 0x1c, KEY_CHANNELDOWN },
- { 0x25, KEY_INFO },
-
- { 0x3c, KEY_MUTE },
-
- { 0x3d, KEY_LEFT },
- { 0x3b, KEY_RIGHT },
-
- { 0x3f, KEY_UP },
- { 0x3e, KEY_DOWN },
- { 0x1a, KEY_ENTER },
-
- { 0x1d, KEY_MENU },
- { 0x19, KEY_AGAIN },
- { 0x16, KEY_PREVIOUSSONG },
- { 0x13, KEY_NEXTSONG },
- { 0x15, KEY_PAUSE },
- { 0x0e, KEY_REWIND },
- { 0x0d, KEY_PLAY },
- { 0x0b, KEY_STOP },
- { 0x07, KEY_FORWARD },
- { 0x27, KEY_RECORD },
- { 0x26, KEY_TUNER },
- { 0x29, KEY_TEXT },
- { 0x2a, KEY_MEDIA },
- { 0x18, KEY_EPG },
-};
-
-struct ir_scancode_table ir_codes_pinnacle_grey_table = {
- .scan = ir_codes_pinnacle_grey,
- .size = ARRAY_SIZE(ir_codes_pinnacle_grey),
-};
-EXPORT_SYMBOL_GPL(ir_codes_pinnacle_grey_table);
-
-static struct ir_scancode ir_codes_flyvideo[] = {
- { 0x0f, KEY_0 },
- { 0x03, KEY_1 },
- { 0x04, KEY_2 },
- { 0x05, KEY_3 },
- { 0x07, KEY_4 },
- { 0x08, KEY_5 },
- { 0x09, KEY_6 },
- { 0x0b, KEY_7 },
- { 0x0c, KEY_8 },
- { 0x0d, KEY_9 },
-
- { 0x0e, KEY_MODE }, /* Air/Cable */
- { 0x11, KEY_VIDEO }, /* Video */
- { 0x15, KEY_AUDIO }, /* Audio */
- { 0x00, KEY_POWER }, /* Power */
- { 0x18, KEY_TUNER }, /* AV Source */
- { 0x02, KEY_ZOOM }, /* Fullscreen */
- { 0x1a, KEY_LANGUAGE }, /* Stereo */
- { 0x1b, KEY_MUTE }, /* Mute */
- { 0x14, KEY_VOLUMEUP }, /* Volume + */
- { 0x17, KEY_VOLUMEDOWN },/* Volume - */
- { 0x12, KEY_CHANNELUP },/* Channel + */
- { 0x13, KEY_CHANNELDOWN },/* Channel - */
- { 0x06, KEY_AGAIN }, /* Recall */
- { 0x10, KEY_ENTER }, /* Enter */
-
- { 0x19, KEY_BACK }, /* Rewind ( <<< ) */
- { 0x1f, KEY_FORWARD }, /* Forward ( >>> ) */
- { 0x0a, KEY_ANGLE }, /* no label, may be used as the PAUSE button */
-};
-
-struct ir_scancode_table ir_codes_flyvideo_table = {
- .scan = ir_codes_flyvideo,
- .size = ARRAY_SIZE(ir_codes_flyvideo),
-};
-EXPORT_SYMBOL_GPL(ir_codes_flyvideo_table);
-
-static struct ir_scancode ir_codes_flydvb[] = {
- { 0x01, KEY_ZOOM }, /* Full Screen */
- { 0x00, KEY_POWER }, /* Power */
-
- { 0x03, KEY_1 },
- { 0x04, KEY_2 },
- { 0x05, KEY_3 },
- { 0x07, KEY_4 },
- { 0x08, KEY_5 },
- { 0x09, KEY_6 },
- { 0x0b, KEY_7 },
- { 0x0c, KEY_8 },
- { 0x0d, KEY_9 },
- { 0x06, KEY_AGAIN }, /* Recall */
- { 0x0f, KEY_0 },
- { 0x10, KEY_MUTE }, /* Mute */
- { 0x02, KEY_RADIO }, /* TV/Radio */
- { 0x1b, KEY_LANGUAGE }, /* SAP (Second Audio Program) */
-
- { 0x14, KEY_VOLUMEUP }, /* VOL+ */
- { 0x17, KEY_VOLUMEDOWN }, /* VOL- */
- { 0x12, KEY_CHANNELUP }, /* CH+ */
- { 0x13, KEY_CHANNELDOWN }, /* CH- */
- { 0x1d, KEY_ENTER }, /* Enter */
-
- { 0x1a, KEY_MODE }, /* PIP */
- { 0x18, KEY_TUNER }, /* Source */
-
- { 0x1e, KEY_RECORD }, /* Record/Pause */
- { 0x15, KEY_ANGLE }, /* Swap (no label on key) */
- { 0x1c, KEY_PAUSE }, /* Timeshift/Pause */
- { 0x19, KEY_BACK }, /* Rewind << */
- { 0x0a, KEY_PLAYPAUSE }, /* Play/Pause */
- { 0x1f, KEY_FORWARD }, /* Forward >> */
- { 0x16, KEY_PREVIOUS }, /* Back |<< */
- { 0x11, KEY_STOP }, /* Stop */
- { 0x0e, KEY_NEXT }, /* End >>| */
-};
-
-struct ir_scancode_table ir_codes_flydvb_table = {
- .scan = ir_codes_flydvb,
- .size = ARRAY_SIZE(ir_codes_flydvb),
-};
-EXPORT_SYMBOL_GPL(ir_codes_flydvb_table);
-
-static struct ir_scancode ir_codes_cinergy[] = {
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- { 0x0a, KEY_POWER },
- { 0x0b, KEY_PROG1 }, /* app */
- { 0x0c, KEY_ZOOM }, /* zoom/fullscreen */
- { 0x0d, KEY_CHANNELUP }, /* channel */
- { 0x0e, KEY_CHANNELDOWN }, /* channel- */
- { 0x0f, KEY_VOLUMEUP },
- { 0x10, KEY_VOLUMEDOWN },
- { 0x11, KEY_TUNER }, /* AV */
- { 0x12, KEY_NUMLOCK }, /* -/-- */
- { 0x13, KEY_AUDIO }, /* audio */
- { 0x14, KEY_MUTE },
- { 0x15, KEY_UP },
- { 0x16, KEY_DOWN },
- { 0x17, KEY_LEFT },
- { 0x18, KEY_RIGHT },
- { 0x19, BTN_LEFT, },
- { 0x1a, BTN_RIGHT, },
- { 0x1b, KEY_WWW }, /* text */
- { 0x1c, KEY_REWIND },
- { 0x1d, KEY_FORWARD },
- { 0x1e, KEY_RECORD },
- { 0x1f, KEY_PLAY },
- { 0x20, KEY_PREVIOUSSONG },
- { 0x21, KEY_NEXTSONG },
- { 0x22, KEY_PAUSE },
- { 0x23, KEY_STOP },
-};
-
-struct ir_scancode_table ir_codes_cinergy_table = {
- .scan = ir_codes_cinergy,
- .size = ARRAY_SIZE(ir_codes_cinergy),
-};
-EXPORT_SYMBOL_GPL(ir_codes_cinergy_table);
-
-/* Alfons Geser <a.geser@cox.net>
- * updates from Job D. R. Borges <jobdrb@ig.com.br> */
-static struct ir_scancode ir_codes_eztv[] = {
- { 0x12, KEY_POWER },
- { 0x01, KEY_TV }, /* DVR */
- { 0x15, KEY_DVD }, /* DVD */
- { 0x17, KEY_AUDIO }, /* music */
- /* DVR mode / DVD mode / music mode */
-
- { 0x1b, KEY_MUTE }, /* mute */
- { 0x02, KEY_LANGUAGE }, /* MTS/SAP / audio / autoseek */
- { 0x1e, KEY_SUBTITLE }, /* closed captioning / subtitle / seek */
- { 0x16, KEY_ZOOM }, /* full screen */
- { 0x1c, KEY_VIDEO }, /* video source / eject / delall */
- { 0x1d, KEY_RESTART }, /* playback / angle / del */
- { 0x2f, KEY_SEARCH }, /* scan / menu / playlist */
- { 0x30, KEY_CHANNEL }, /* CH surfing / bookmark / memo */
-
- { 0x31, KEY_HELP }, /* help */
- { 0x32, KEY_MODE }, /* num/memo */
- { 0x33, KEY_ESC }, /* cancel */
-
- { 0x0c, KEY_UP }, /* up */
- { 0x10, KEY_DOWN }, /* down */
- { 0x08, KEY_LEFT }, /* left */
- { 0x04, KEY_RIGHT }, /* right */
- { 0x03, KEY_SELECT }, /* select */
-
- { 0x1f, KEY_REWIND }, /* rewind */
- { 0x20, KEY_PLAYPAUSE },/* play/pause */
- { 0x29, KEY_FORWARD }, /* forward */
- { 0x14, KEY_AGAIN }, /* repeat */
- { 0x2b, KEY_RECORD }, /* recording */
- { 0x2c, KEY_STOP }, /* stop */
- { 0x2d, KEY_PLAY }, /* play */
- { 0x2e, KEY_CAMERA }, /* snapshot / shuffle */
-
- { 0x00, KEY_0 },
- { 0x05, KEY_1 },
- { 0x06, KEY_2 },
- { 0x07, KEY_3 },
- { 0x09, KEY_4 },
- { 0x0a, KEY_5 },
- { 0x0b, KEY_6 },
- { 0x0d, KEY_7 },
- { 0x0e, KEY_8 },
- { 0x0f, KEY_9 },
-
- { 0x2a, KEY_VOLUMEUP },
- { 0x11, KEY_VOLUMEDOWN },
- { 0x18, KEY_CHANNELUP },/* CH.tracking up */
- { 0x19, KEY_CHANNELDOWN },/* CH.tracking down */
-
- { 0x13, KEY_ENTER }, /* enter */
- { 0x21, KEY_DOT }, /* . (decimal dot) */
-};
-
-struct ir_scancode_table ir_codes_eztv_table = {
- .scan = ir_codes_eztv,
- .size = ARRAY_SIZE(ir_codes_eztv),
-};
-EXPORT_SYMBOL_GPL(ir_codes_eztv_table);
-
-/* Alex Hermann <gaaf@gmx.net> */
-static struct ir_scancode ir_codes_avermedia[] = {
- { 0x28, KEY_1 },
- { 0x18, KEY_2 },
- { 0x38, KEY_3 },
- { 0x24, KEY_4 },
- { 0x14, KEY_5 },
- { 0x34, KEY_6 },
- { 0x2c, KEY_7 },
- { 0x1c, KEY_8 },
- { 0x3c, KEY_9 },
- { 0x22, KEY_0 },
-
- { 0x20, KEY_TV }, /* TV/FM */
- { 0x10, KEY_CD }, /* CD */
- { 0x30, KEY_TEXT }, /* TELETEXT */
- { 0x00, KEY_POWER }, /* POWER */
-
- { 0x08, KEY_VIDEO }, /* VIDEO */
- { 0x04, KEY_AUDIO }, /* AUDIO */
- { 0x0c, KEY_ZOOM }, /* FULL SCREEN */
-
- { 0x12, KEY_SUBTITLE }, /* DISPLAY */
- { 0x32, KEY_REWIND }, /* LOOP */
- { 0x02, KEY_PRINT }, /* PREVIEW */
-
- { 0x2a, KEY_SEARCH }, /* AUTOSCAN */
- { 0x1a, KEY_SLEEP }, /* FREEZE */
- { 0x3a, KEY_CAMERA }, /* SNAPSHOT */
- { 0x0a, KEY_MUTE }, /* MUTE */
-
- { 0x26, KEY_RECORD }, /* RECORD */
- { 0x16, KEY_PAUSE }, /* PAUSE */
- { 0x36, KEY_STOP }, /* STOP */
- { 0x06, KEY_PLAY }, /* PLAY */
-
- { 0x2e, KEY_RED }, /* RED */
- { 0x21, KEY_GREEN }, /* GREEN */
- { 0x0e, KEY_YELLOW }, /* YELLOW */
- { 0x01, KEY_BLUE }, /* BLUE */
-
- { 0x1e, KEY_VOLUMEDOWN }, /* VOLUME- */
- { 0x3e, KEY_VOLUMEUP }, /* VOLUME+ */
- { 0x11, KEY_CHANNELDOWN }, /* CHANNEL/PAGE- */
- { 0x31, KEY_CHANNELUP } /* CHANNEL/PAGE+ */
-};
-
-struct ir_scancode_table ir_codes_avermedia_table = {
- .scan = ir_codes_avermedia,
- .size = ARRAY_SIZE(ir_codes_avermedia),
-};
-EXPORT_SYMBOL_GPL(ir_codes_avermedia_table);
-
-static struct ir_scancode ir_codes_videomate_tv_pvr[] = {
- { 0x14, KEY_MUTE },
- { 0x24, KEY_ZOOM },
-
- { 0x01, KEY_DVD },
- { 0x23, KEY_RADIO },
- { 0x00, KEY_TV },
-
- { 0x0a, KEY_REWIND },
- { 0x08, KEY_PLAYPAUSE },
- { 0x0f, KEY_FORWARD },
-
- { 0x02, KEY_PREVIOUS },
- { 0x07, KEY_STOP },
- { 0x06, KEY_NEXT },
-
- { 0x0c, KEY_UP },
- { 0x0e, KEY_DOWN },
- { 0x0b, KEY_LEFT },
- { 0x0d, KEY_RIGHT },
- { 0x11, KEY_OK },
-
- { 0x03, KEY_MENU },
- { 0x09, KEY_SETUP },
- { 0x05, KEY_VIDEO },
- { 0x22, KEY_CHANNEL },
-
- { 0x12, KEY_VOLUMEUP },
- { 0x15, KEY_VOLUMEDOWN },
- { 0x10, KEY_CHANNELUP },
- { 0x13, KEY_CHANNELDOWN },
-
- { 0x04, KEY_RECORD },
-
- { 0x16, KEY_1 },
- { 0x17, KEY_2 },
- { 0x18, KEY_3 },
- { 0x19, KEY_4 },
- { 0x1a, KEY_5 },
- { 0x1b, KEY_6 },
- { 0x1c, KEY_7 },
- { 0x1d, KEY_8 },
- { 0x1e, KEY_9 },
- { 0x1f, KEY_0 },
-
- { 0x20, KEY_LANGUAGE },
- { 0x21, KEY_SLEEP },
-};
-
-struct ir_scancode_table ir_codes_videomate_tv_pvr_table = {
- .scan = ir_codes_videomate_tv_pvr,
- .size = ARRAY_SIZE(ir_codes_videomate_tv_pvr),
-};
-EXPORT_SYMBOL_GPL(ir_codes_videomate_tv_pvr_table);
-
-/* Michael Tokarev <mjt@tls.msk.ru>
- http://www.corpit.ru/mjt/beholdTV/remote_control.jpg
- keytable is used by MANLI MTV00[0x0c] and BeholdTV 40[13] at
- least, and probably other cards too.
- The "ascii-art picture" below (in comments, first row
- is the keycode in hex, and subsequent row(s) shows
- the button labels (several variants when appropriate)
- helps to descide which keycodes to assign to the buttons.
- */
-static struct ir_scancode ir_codes_manli[] = {
-
- /* 0x1c 0x12 *
- * FUNCTION POWER *
- * FM (|) *
- * */
- { 0x1c, KEY_RADIO }, /*XXX*/
- { 0x12, KEY_POWER },
-
- /* 0x01 0x02 0x03 *
- * 1 2 3 *
- * *
- * 0x04 0x05 0x06 *
- * 4 5 6 *
- * *
- * 0x07 0x08 0x09 *
- * 7 8 9 *
- * */
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- /* 0x0a 0x00 0x17 *
- * RECALL 0 +100 *
- * PLUS *
- * */
- { 0x0a, KEY_AGAIN }, /*XXX KEY_REWIND? */
- { 0x00, KEY_0 },
- { 0x17, KEY_DIGITS }, /*XXX*/
-
- /* 0x14 0x10 *
- * MENU INFO *
- * OSD */
- { 0x14, KEY_MENU },
- { 0x10, KEY_INFO },
-
- /* 0x0b *
- * Up *
- * *
- * 0x18 0x16 0x0c *
- * Left Ok Right *
- * *
- * 0x015 *
- * Down *
- * */
- { 0x0b, KEY_UP },
- { 0x18, KEY_LEFT },
- { 0x16, KEY_OK }, /*XXX KEY_SELECT? KEY_ENTER? */
- { 0x0c, KEY_RIGHT },
- { 0x15, KEY_DOWN },
-
- /* 0x11 0x0d *
- * TV/AV MODE *
- * SOURCE STEREO *
- * */
- { 0x11, KEY_TV }, /*XXX*/
- { 0x0d, KEY_MODE }, /*XXX there's no KEY_STEREO */
-
- /* 0x0f 0x1b 0x1a *
- * AUDIO Vol+ Chan+ *
- * TIMESHIFT??? *
- * *
- * 0x0e 0x1f 0x1e *
- * SLEEP Vol- Chan- *
- * */
- { 0x0f, KEY_AUDIO },
- { 0x1b, KEY_VOLUMEUP },
- { 0x1a, KEY_CHANNELUP },
- { 0x0e, KEY_TIME },
- { 0x1f, KEY_VOLUMEDOWN },
- { 0x1e, KEY_CHANNELDOWN },
-
- /* 0x13 0x19 *
- * MUTE SNAPSHOT*
- * */
- { 0x13, KEY_MUTE },
- { 0x19, KEY_CAMERA },
-
- /* 0x1d unused ? */
-};
-
-struct ir_scancode_table ir_codes_manli_table = {
- .scan = ir_codes_manli,
- .size = ARRAY_SIZE(ir_codes_manli),
-};
-EXPORT_SYMBOL_GPL(ir_codes_manli_table);
-
-/* Mike Baikov <mike@baikov.com> */
-static struct ir_scancode ir_codes_gotview7135[] = {
-
- { 0x11, KEY_POWER },
- { 0x35, KEY_TV },
- { 0x1b, KEY_0 },
- { 0x29, KEY_1 },
- { 0x19, KEY_2 },
- { 0x39, KEY_3 },
- { 0x1f, KEY_4 },
- { 0x2c, KEY_5 },
- { 0x21, KEY_6 },
- { 0x24, KEY_7 },
- { 0x18, KEY_8 },
- { 0x2b, KEY_9 },
- { 0x3b, KEY_AGAIN }, /* LOOP */
- { 0x06, KEY_AUDIO },
- { 0x31, KEY_PRINT }, /* PREVIEW */
- { 0x3e, KEY_VIDEO },
- { 0x10, KEY_CHANNELUP },
- { 0x20, KEY_CHANNELDOWN },
- { 0x0c, KEY_VOLUMEDOWN },
- { 0x28, KEY_VOLUMEUP },
- { 0x08, KEY_MUTE },
- { 0x26, KEY_SEARCH }, /* SCAN */
- { 0x3f, KEY_CAMERA }, /* SNAPSHOT */
- { 0x12, KEY_RECORD },
- { 0x32, KEY_STOP },
- { 0x3c, KEY_PLAY },
- { 0x1d, KEY_REWIND },
- { 0x2d, KEY_PAUSE },
- { 0x0d, KEY_FORWARD },
- { 0x05, KEY_ZOOM }, /*FULL*/
-
- { 0x2a, KEY_F21 }, /* LIVE TIMESHIFT */
- { 0x0e, KEY_F22 }, /* MIN TIMESHIFT */
- { 0x1e, KEY_TIME }, /* TIMESHIFT */
- { 0x38, KEY_F24 }, /* NORMAL TIMESHIFT */
-};
-
-struct ir_scancode_table ir_codes_gotview7135_table = {
- .scan = ir_codes_gotview7135,
- .size = ARRAY_SIZE(ir_codes_gotview7135),
-};
-EXPORT_SYMBOL_GPL(ir_codes_gotview7135_table);
-
-static struct ir_scancode ir_codes_purpletv[] = {
- { 0x03, KEY_POWER },
- { 0x6f, KEY_MUTE },
- { 0x10, KEY_BACKSPACE }, /* Recall */
-
- { 0x11, KEY_0 },
- { 0x04, KEY_1 },
- { 0x05, KEY_2 },
- { 0x06, KEY_3 },
- { 0x08, KEY_4 },
- { 0x09, KEY_5 },
- { 0x0a, KEY_6 },
- { 0x0c, KEY_7 },
- { 0x0d, KEY_8 },
- { 0x0e, KEY_9 },
- { 0x12, KEY_DOT }, /* 100+ */
-
- { 0x07, KEY_VOLUMEUP },
- { 0x0b, KEY_VOLUMEDOWN },
- { 0x1a, KEY_KPPLUS },
- { 0x18, KEY_KPMINUS },
- { 0x15, KEY_UP },
- { 0x1d, KEY_DOWN },
- { 0x0f, KEY_CHANNELUP },
- { 0x13, KEY_CHANNELDOWN },
- { 0x48, KEY_ZOOM },
-
- { 0x1b, KEY_VIDEO }, /* Video source */
- { 0x1f, KEY_CAMERA }, /* Snapshot */
- { 0x49, KEY_LANGUAGE }, /* MTS Select */
- { 0x19, KEY_SEARCH }, /* Auto Scan */
-
- { 0x4b, KEY_RECORD },
- { 0x46, KEY_PLAY },
- { 0x45, KEY_PAUSE }, /* Pause */
- { 0x44, KEY_STOP },
- { 0x43, KEY_TIME }, /* Time Shift */
- { 0x17, KEY_CHANNEL }, /* SURF CH */
- { 0x40, KEY_FORWARD }, /* Forward ? */
- { 0x42, KEY_REWIND }, /* Backward ? */
-
-};
-
-struct ir_scancode_table ir_codes_purpletv_table = {
- .scan = ir_codes_purpletv,
- .size = ARRAY_SIZE(ir_codes_purpletv),
-};
-EXPORT_SYMBOL_GPL(ir_codes_purpletv_table);
-
-/* Mapping for the 28 key remote control as seen at
- http://www.sednacomputer.com/photo/cardbus-tv.jpg
- Pavel Mihaylov <bin@bash.info>
- Also for the remote bundled with Kozumi KTV-01C card */
-static struct ir_scancode ir_codes_pctv_sedna[] = {
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- { 0x0a, KEY_AGAIN }, /* Recall */
- { 0x0b, KEY_CHANNELUP },
- { 0x0c, KEY_VOLUMEUP },
- { 0x0d, KEY_MODE }, /* Stereo */
- { 0x0e, KEY_STOP },
- { 0x0f, KEY_PREVIOUSSONG },
- { 0x10, KEY_ZOOM },
- { 0x11, KEY_TUNER }, /* Source */
- { 0x12, KEY_POWER },
- { 0x13, KEY_MUTE },
- { 0x15, KEY_CHANNELDOWN },
- { 0x18, KEY_VOLUMEDOWN },
- { 0x19, KEY_CAMERA }, /* Snapshot */
- { 0x1a, KEY_NEXTSONG },
- { 0x1b, KEY_TIME }, /* Time Shift */
- { 0x1c, KEY_RADIO }, /* FM Radio */
- { 0x1d, KEY_RECORD },
- { 0x1e, KEY_PAUSE },
- /* additional codes for Kozumi's remote */
- { 0x14, KEY_INFO }, /* OSD */
- { 0x16, KEY_OK }, /* OK */
- { 0x17, KEY_DIGITS }, /* Plus */
- { 0x1f, KEY_PLAY }, /* Play */
-};
-
-struct ir_scancode_table ir_codes_pctv_sedna_table = {
- .scan = ir_codes_pctv_sedna,
- .size = ARRAY_SIZE(ir_codes_pctv_sedna),
-};
-EXPORT_SYMBOL_GPL(ir_codes_pctv_sedna_table);
-
-/* Mark Phalan <phalanm@o2.ie> */
-static struct ir_scancode ir_codes_pv951[] = {
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- { 0x12, KEY_POWER },
- { 0x10, KEY_MUTE },
- { 0x1f, KEY_VOLUMEDOWN },
- { 0x1b, KEY_VOLUMEUP },
- { 0x1a, KEY_CHANNELUP },
- { 0x1e, KEY_CHANNELDOWN },
- { 0x0e, KEY_PAGEUP },
- { 0x1d, KEY_PAGEDOWN },
- { 0x13, KEY_SOUND },
-
- { 0x18, KEY_KPPLUSMINUS }, /* CH +/- */
- { 0x16, KEY_SUBTITLE }, /* CC */
- { 0x0d, KEY_TEXT }, /* TTX */
- { 0x0b, KEY_TV }, /* AIR/CBL */
- { 0x11, KEY_PC }, /* PC/TV */
- { 0x17, KEY_OK }, /* CH RTN */
- { 0x19, KEY_MODE }, /* FUNC */
- { 0x0c, KEY_SEARCH }, /* AUTOSCAN */
-
- /* Not sure what to do with these ones! */
- { 0x0f, KEY_SELECT }, /* SOURCE */
- { 0x0a, KEY_KPPLUS }, /* +100 */
- { 0x14, KEY_EQUAL }, /* SYNC */
- { 0x1c, KEY_MEDIA }, /* PC/TV */
-};
-
-struct ir_scancode_table ir_codes_pv951_table = {
- .scan = ir_codes_pv951,
- .size = ARRAY_SIZE(ir_codes_pv951),
-};
-EXPORT_SYMBOL_GPL(ir_codes_pv951_table);
-
-/* generic RC5 keytable */
-/* see http://users.pandora.be/nenya/electronics/rc5/codes00.htm */
-/* used by old (black) Hauppauge remotes */
-static struct ir_scancode ir_codes_rc5_tv[] = {
- /* Keys 0 to 9 */
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- { 0x0b, KEY_CHANNEL }, /* channel / program (japan: 11) */
- { 0x0c, KEY_POWER }, /* standby */
- { 0x0d, KEY_MUTE }, /* mute / demute */
- { 0x0f, KEY_TV }, /* display */
- { 0x10, KEY_VOLUMEUP },
- { 0x11, KEY_VOLUMEDOWN },
- { 0x12, KEY_BRIGHTNESSUP },
- { 0x13, KEY_BRIGHTNESSDOWN },
- { 0x1e, KEY_SEARCH }, /* search + */
- { 0x20, KEY_CHANNELUP }, /* channel / program + */
- { 0x21, KEY_CHANNELDOWN }, /* channel / program - */
- { 0x22, KEY_CHANNEL }, /* alt / channel */
- { 0x23, KEY_LANGUAGE }, /* 1st / 2nd language */
- { 0x26, KEY_SLEEP }, /* sleeptimer */
- { 0x2e, KEY_MENU }, /* 2nd controls (USA: menu) */
- { 0x30, KEY_PAUSE },
- { 0x32, KEY_REWIND },
- { 0x33, KEY_GOTO },
- { 0x35, KEY_PLAY },
- { 0x36, KEY_STOP },
- { 0x37, KEY_RECORD }, /* recording */
- { 0x3c, KEY_TEXT }, /* teletext submode (Japan: 12) */
- { 0x3d, KEY_SUSPEND }, /* system standby */
-
-};
-
-struct ir_scancode_table ir_codes_rc5_tv_table = {
- .scan = ir_codes_rc5_tv,
- .size = ARRAY_SIZE(ir_codes_rc5_tv),
-};
-EXPORT_SYMBOL_GPL(ir_codes_rc5_tv_table);
-
-/* Table for Leadtek Winfast Remote Controls - used by both bttv and cx88 */
-static struct ir_scancode ir_codes_winfast[] = {
- /* Keys 0 to 9 */
- { 0x12, KEY_0 },
- { 0x05, KEY_1 },
- { 0x06, KEY_2 },
- { 0x07, KEY_3 },
- { 0x09, KEY_4 },
- { 0x0a, KEY_5 },
- { 0x0b, KEY_6 },
- { 0x0d, KEY_7 },
- { 0x0e, KEY_8 },
- { 0x0f, KEY_9 },
-
- { 0x00, KEY_POWER },
- { 0x1b, KEY_AUDIO }, /* Audio Source */
- { 0x02, KEY_TUNER }, /* TV/FM, not on Y0400052 */
- { 0x1e, KEY_VIDEO }, /* Video Source */
- { 0x16, KEY_INFO }, /* Display information */
- { 0x04, KEY_VOLUMEUP },
- { 0x08, KEY_VOLUMEDOWN },
- { 0x0c, KEY_CHANNELUP },
- { 0x10, KEY_CHANNELDOWN },
- { 0x03, KEY_ZOOM }, /* fullscreen */
- { 0x1f, KEY_TEXT }, /* closed caption/teletext */
- { 0x20, KEY_SLEEP },
- { 0x29, KEY_CLEAR }, /* boss key */
- { 0x14, KEY_MUTE },
- { 0x2b, KEY_RED },
- { 0x2c, KEY_GREEN },
- { 0x2d, KEY_YELLOW },
- { 0x2e, KEY_BLUE },
- { 0x18, KEY_KPPLUS }, /* fine tune + , not on Y040052 */
- { 0x19, KEY_KPMINUS }, /* fine tune - , not on Y040052 */
- { 0x2a, KEY_MEDIA }, /* PIP (Picture in picture */
- { 0x21, KEY_DOT },
- { 0x13, KEY_ENTER },
- { 0x11, KEY_LAST }, /* Recall (last channel */
- { 0x22, KEY_PREVIOUS },
- { 0x23, KEY_PLAYPAUSE },
- { 0x24, KEY_NEXT },
- { 0x25, KEY_TIME }, /* Time Shifting */
- { 0x26, KEY_STOP },
- { 0x27, KEY_RECORD },
- { 0x28, KEY_SAVE }, /* Screenshot */
- { 0x2f, KEY_MENU },
- { 0x30, KEY_CANCEL },
- { 0x31, KEY_CHANNEL }, /* Channel Surf */
- { 0x32, KEY_SUBTITLE },
- { 0x33, KEY_LANGUAGE },
- { 0x34, KEY_REWIND },
- { 0x35, KEY_FASTFORWARD },
- { 0x36, KEY_TV },
- { 0x37, KEY_RADIO }, /* FM */
- { 0x38, KEY_DVD },
-
- { 0x1a, KEY_MODE}, /* change to MCE mode on Y04G0051 */
- { 0x3e, KEY_F21 }, /* MCE +VOL, on Y04G0033 */
- { 0x3a, KEY_F22 }, /* MCE -VOL, on Y04G0033 */
- { 0x3b, KEY_F23 }, /* MCE +CH, on Y04G0033 */
- { 0x3f, KEY_F24 } /* MCE -CH, on Y04G0033 */
-};
-
-struct ir_scancode_table ir_codes_winfast_table = {
- .scan = ir_codes_winfast,
- .size = ARRAY_SIZE(ir_codes_winfast),
-};
-EXPORT_SYMBOL_GPL(ir_codes_winfast_table);
-
-static struct ir_scancode ir_codes_pinnacle_color[] = {
- { 0x59, KEY_MUTE },
- { 0x4a, KEY_POWER },
-
- { 0x18, KEY_TEXT },
- { 0x26, KEY_TV },
- { 0x3d, KEY_PRINT },
-
- { 0x48, KEY_RED },
- { 0x04, KEY_GREEN },
- { 0x11, KEY_YELLOW },
- { 0x00, KEY_BLUE },
-
- { 0x2d, KEY_VOLUMEUP },
- { 0x1e, KEY_VOLUMEDOWN },
-
- { 0x49, KEY_MENU },
-
- { 0x16, KEY_CHANNELUP },
- { 0x17, KEY_CHANNELDOWN },
-
- { 0x20, KEY_UP },
- { 0x21, KEY_DOWN },
- { 0x22, KEY_LEFT },
- { 0x23, KEY_RIGHT },
- { 0x0d, KEY_SELECT },
-
- { 0x08, KEY_BACK },
- { 0x07, KEY_REFRESH },
-
- { 0x2f, KEY_ZOOM },
- { 0x29, KEY_RECORD },
-
- { 0x4b, KEY_PAUSE },
- { 0x4d, KEY_REWIND },
- { 0x2e, KEY_PLAY },
- { 0x4e, KEY_FORWARD },
- { 0x53, KEY_PREVIOUS },
- { 0x4c, KEY_STOP },
- { 0x54, KEY_NEXT },
-
- { 0x69, KEY_0 },
- { 0x6a, KEY_1 },
- { 0x6b, KEY_2 },
- { 0x6c, KEY_3 },
- { 0x6d, KEY_4 },
- { 0x6e, KEY_5 },
- { 0x6f, KEY_6 },
- { 0x70, KEY_7 },
- { 0x71, KEY_8 },
- { 0x72, KEY_9 },
-
- { 0x74, KEY_CHANNEL },
- { 0x0a, KEY_BACKSPACE },
-};
-
-struct ir_scancode_table ir_codes_pinnacle_color_table = {
- .scan = ir_codes_pinnacle_color,
- .size = ARRAY_SIZE(ir_codes_pinnacle_color),
-};
-EXPORT_SYMBOL_GPL(ir_codes_pinnacle_color_table);
-
-/* Hauppauge: the newer, gray remotes (seems there are multiple
- * slightly different versions), shipped with cx88+ivtv cards.
- * almost rc5 coding, but some non-standard keys */
-static struct ir_scancode ir_codes_hauppauge_new[] = {
- /* Keys 0 to 9 */
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- { 0x0a, KEY_TEXT }, /* keypad asterisk as well */
- { 0x0b, KEY_RED }, /* red button */
- { 0x0c, KEY_RADIO },
- { 0x0d, KEY_MENU },
- { 0x0e, KEY_SUBTITLE }, /* also the # key */
- { 0x0f, KEY_MUTE },
- { 0x10, KEY_VOLUMEUP },
- { 0x11, KEY_VOLUMEDOWN },
- { 0x12, KEY_PREVIOUS }, /* previous channel */
- { 0x14, KEY_UP },
- { 0x15, KEY_DOWN },
- { 0x16, KEY_LEFT },
- { 0x17, KEY_RIGHT },
- { 0x18, KEY_VIDEO }, /* Videos */
- { 0x19, KEY_AUDIO }, /* Music */
- /* 0x1a: Pictures - presume this means
- "Multimedia Home Platform" -
- no "PICTURES" key in input.h
- */
- { 0x1a, KEY_MHP },
-
- { 0x1b, KEY_EPG }, /* Guide */
- { 0x1c, KEY_TV },
- { 0x1e, KEY_NEXTSONG }, /* skip >| */
- { 0x1f, KEY_EXIT }, /* back/exit */
- { 0x20, KEY_CHANNELUP }, /* channel / program + */
- { 0x21, KEY_CHANNELDOWN }, /* channel / program - */
- { 0x22, KEY_CHANNEL }, /* source (old black remote) */
- { 0x24, KEY_PREVIOUSSONG }, /* replay |< */
- { 0x25, KEY_ENTER }, /* OK */
- { 0x26, KEY_SLEEP }, /* minimize (old black remote) */
- { 0x29, KEY_BLUE }, /* blue key */
- { 0x2e, KEY_GREEN }, /* green button */
- { 0x30, KEY_PAUSE }, /* pause */
- { 0x32, KEY_REWIND }, /* backward << */
- { 0x34, KEY_FASTFORWARD }, /* forward >> */
- { 0x35, KEY_PLAY },
- { 0x36, KEY_STOP },
- { 0x37, KEY_RECORD }, /* recording */
- { 0x38, KEY_YELLOW }, /* yellow key */
- { 0x3b, KEY_SELECT }, /* top right button */
- { 0x3c, KEY_ZOOM }, /* full */
- { 0x3d, KEY_POWER }, /* system power (green button) */
-};
-
-struct ir_scancode_table ir_codes_hauppauge_new_table = {
- .scan = ir_codes_hauppauge_new,
- .size = ARRAY_SIZE(ir_codes_hauppauge_new),
-};
-EXPORT_SYMBOL_GPL(ir_codes_hauppauge_new_table);
-
-static struct ir_scancode ir_codes_npgtech[] = {
- { 0x1d, KEY_SWITCHVIDEOMODE }, /* switch inputs */
- { 0x2a, KEY_FRONT },
-
- { 0x3e, KEY_1 },
- { 0x02, KEY_2 },
- { 0x06, KEY_3 },
- { 0x0a, KEY_4 },
- { 0x0e, KEY_5 },
- { 0x12, KEY_6 },
- { 0x16, KEY_7 },
- { 0x1a, KEY_8 },
- { 0x1e, KEY_9 },
- { 0x3a, KEY_0 },
- { 0x22, KEY_NUMLOCK }, /* -/-- */
- { 0x20, KEY_REFRESH },
-
- { 0x03, KEY_BRIGHTNESSDOWN },
- { 0x28, KEY_AUDIO },
- { 0x3c, KEY_CHANNELUP },
- { 0x3f, KEY_VOLUMEDOWN },
- { 0x2e, KEY_MUTE },
- { 0x3b, KEY_VOLUMEUP },
- { 0x00, KEY_CHANNELDOWN },
- { 0x07, KEY_BRIGHTNESSUP },
- { 0x2c, KEY_TEXT },
-
- { 0x37, KEY_RECORD },
- { 0x17, KEY_PLAY },
- { 0x13, KEY_PAUSE },
- { 0x26, KEY_STOP },
- { 0x18, KEY_FASTFORWARD },
- { 0x14, KEY_REWIND },
- { 0x33, KEY_ZOOM },
- { 0x32, KEY_KEYBOARD },
- { 0x30, KEY_GOTO }, /* Pointing arrow */
- { 0x36, KEY_MACRO }, /* Maximize/Minimize (yellow) */
- { 0x0b, KEY_RADIO },
- { 0x10, KEY_POWER },
-
-};
-
-struct ir_scancode_table ir_codes_npgtech_table = {
- .scan = ir_codes_npgtech,
- .size = ARRAY_SIZE(ir_codes_npgtech),
-};
-EXPORT_SYMBOL_GPL(ir_codes_npgtech_table);
-
-/* Norwood Micro (non-Pro) TV Tuner
- By Peter Naulls <peter@chocky.org>
- Key comments are the functions given in the manual */
-static struct ir_scancode ir_codes_norwood[] = {
- /* Keys 0 to 9 */
- { 0x20, KEY_0 },
- { 0x21, KEY_1 },
- { 0x22, KEY_2 },
- { 0x23, KEY_3 },
- { 0x24, KEY_4 },
- { 0x25, KEY_5 },
- { 0x26, KEY_6 },
- { 0x27, KEY_7 },
- { 0x28, KEY_8 },
- { 0x29, KEY_9 },
-
- { 0x78, KEY_TUNER }, /* Video Source */
- { 0x2c, KEY_EXIT }, /* Open/Close software */
- { 0x2a, KEY_SELECT }, /* 2 Digit Select */
- { 0x69, KEY_AGAIN }, /* Recall */
-
- { 0x32, KEY_BRIGHTNESSUP }, /* Brightness increase */
- { 0x33, KEY_BRIGHTNESSDOWN }, /* Brightness decrease */
- { 0x6b, KEY_KPPLUS }, /* (not named >>>>>) */
- { 0x6c, KEY_KPMINUS }, /* (not named <<<<<) */
-
- { 0x2d, KEY_MUTE }, /* Mute */
- { 0x30, KEY_VOLUMEUP }, /* Volume up */
- { 0x31, KEY_VOLUMEDOWN }, /* Volume down */
- { 0x60, KEY_CHANNELUP }, /* Channel up */
- { 0x61, KEY_CHANNELDOWN }, /* Channel down */
-
- { 0x3f, KEY_RECORD }, /* Record */
- { 0x37, KEY_PLAY }, /* Play */
- { 0x36, KEY_PAUSE }, /* Pause */
- { 0x2b, KEY_STOP }, /* Stop */
- { 0x67, KEY_FASTFORWARD }, /* Foward */
- { 0x66, KEY_REWIND }, /* Rewind */
- { 0x3e, KEY_SEARCH }, /* Auto Scan */
- { 0x2e, KEY_CAMERA }, /* Capture Video */
- { 0x6d, KEY_MENU }, /* Show/Hide Control */
- { 0x2f, KEY_ZOOM }, /* Full Screen */
- { 0x34, KEY_RADIO }, /* FM */
- { 0x65, KEY_POWER }, /* Computer power */
-};
-
-struct ir_scancode_table ir_codes_norwood_table = {
- .scan = ir_codes_norwood,
- .size = ARRAY_SIZE(ir_codes_norwood),
-};
-EXPORT_SYMBOL_GPL(ir_codes_norwood_table);
-
-/* From reading the following remotes:
- * Zenith Universal 7 / TV Mode 807 / VCR Mode 837
- * Hauppauge (from NOVA-CI-s box product)
- * This is a "middle of the road" approach, differences are noted
- */
-static struct ir_scancode ir_codes_budget_ci_old[] = {
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
- { 0x0a, KEY_ENTER },
- { 0x0b, KEY_RED },
- { 0x0c, KEY_POWER }, /* RADIO on Hauppauge */
- { 0x0d, KEY_MUTE },
- { 0x0f, KEY_A }, /* TV on Hauppauge */
- { 0x10, KEY_VOLUMEUP },
- { 0x11, KEY_VOLUMEDOWN },
- { 0x14, KEY_B },
- { 0x1c, KEY_UP },
- { 0x1d, KEY_DOWN },
- { 0x1e, KEY_OPTION }, /* RESERVED on Hauppauge */
- { 0x1f, KEY_BREAK },
- { 0x20, KEY_CHANNELUP },
- { 0x21, KEY_CHANNELDOWN },
- { 0x22, KEY_PREVIOUS }, /* Prev Ch on Zenith, SOURCE on Hauppauge */
- { 0x24, KEY_RESTART },
- { 0x25, KEY_OK },
- { 0x26, KEY_CYCLEWINDOWS }, /* MINIMIZE on Hauppauge */
- { 0x28, KEY_ENTER }, /* VCR mode on Zenith */
- { 0x29, KEY_PAUSE },
- { 0x2b, KEY_RIGHT },
- { 0x2c, KEY_LEFT },
- { 0x2e, KEY_MENU }, /* FULL SCREEN on Hauppauge */
- { 0x30, KEY_SLOW },
- { 0x31, KEY_PREVIOUS }, /* VCR mode on Zenith */
- { 0x32, KEY_REWIND },
- { 0x34, KEY_FASTFORWARD },
- { 0x35, KEY_PLAY },
- { 0x36, KEY_STOP },
- { 0x37, KEY_RECORD },
- { 0x38, KEY_TUNER }, /* TV/VCR on Zenith */
- { 0x3a, KEY_C },
- { 0x3c, KEY_EXIT },
- { 0x3d, KEY_POWER2 },
- { 0x3e, KEY_TUNER },
-};
-
-struct ir_scancode_table ir_codes_budget_ci_old_table = {
- .scan = ir_codes_budget_ci_old,
- .size = ARRAY_SIZE(ir_codes_budget_ci_old),
-};
-EXPORT_SYMBOL_GPL(ir_codes_budget_ci_old_table);
-
-/*
- * Marc Fargas <telenieko@telenieko.com>
- * this is the remote control that comes with the asus p7131
- * which has a label saying is "Model PC-39"
- */
-static struct ir_scancode ir_codes_asus_pc39[] = {
- /* Keys 0 to 9 */
- { 0x15, KEY_0 },
- { 0x29, KEY_1 },
- { 0x2d, KEY_2 },
- { 0x2b, KEY_3 },
- { 0x09, KEY_4 },
- { 0x0d, KEY_5 },
- { 0x0b, KEY_6 },
- { 0x31, KEY_7 },
- { 0x35, KEY_8 },
- { 0x33, KEY_9 },
-
- { 0x3e, KEY_RADIO }, /* radio */
- { 0x03, KEY_MENU }, /* dvd/menu */
- { 0x2a, KEY_VOLUMEUP },
- { 0x19, KEY_VOLUMEDOWN },
- { 0x37, KEY_UP },
- { 0x3b, KEY_DOWN },
- { 0x27, KEY_LEFT },
- { 0x2f, KEY_RIGHT },
- { 0x25, KEY_VIDEO }, /* video */
- { 0x39, KEY_AUDIO }, /* music */
-
- { 0x21, KEY_TV }, /* tv */
- { 0x1d, KEY_EXIT }, /* back */
- { 0x0a, KEY_CHANNELUP }, /* channel / program + */
- { 0x1b, KEY_CHANNELDOWN }, /* channel / program - */
- { 0x1a, KEY_ENTER }, /* enter */
-
- { 0x06, KEY_PAUSE }, /* play/pause */
- { 0x1e, KEY_PREVIOUS }, /* rew */
- { 0x26, KEY_NEXT }, /* forward */
- { 0x0e, KEY_REWIND }, /* backward << */
- { 0x3a, KEY_FASTFORWARD }, /* forward >> */
- { 0x36, KEY_STOP },
- { 0x2e, KEY_RECORD }, /* recording */
- { 0x16, KEY_POWER }, /* the button that reads "close" */
-
- { 0x11, KEY_ZOOM }, /* full screen */
- { 0x13, KEY_MACRO }, /* recall */
- { 0x23, KEY_HOME }, /* home */
- { 0x05, KEY_PVR }, /* picture */
- { 0x3d, KEY_MUTE }, /* mute */
- { 0x01, KEY_DVD }, /* dvd */
-};
-
-struct ir_scancode_table ir_codes_asus_pc39_table = {
- .scan = ir_codes_asus_pc39,
- .size = ARRAY_SIZE(ir_codes_asus_pc39),
-};
-EXPORT_SYMBOL_GPL(ir_codes_asus_pc39_table);
-
-
-/* Encore ENLTV-FM - black plastic, white front cover with white glowing buttons
- Juan Pablo Sormani <sorman@gmail.com> */
-static struct ir_scancode ir_codes_encore_enltv[] = {
-
- /* Power button does nothing, neither in Windows app,
- although it sends data (used for BIOS wakeup?) */
- { 0x0d, KEY_MUTE },
-
- { 0x1e, KEY_TV },
- { 0x00, KEY_VIDEO },
- { 0x01, KEY_AUDIO }, /* music */
- { 0x02, KEY_MHP }, /* picture */
-
- { 0x1f, KEY_1 },
- { 0x03, KEY_2 },
- { 0x04, KEY_3 },
- { 0x05, KEY_4 },
- { 0x1c, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x1d, KEY_9 },
- { 0x0a, KEY_0 },
-
- { 0x09, KEY_LIST }, /* -/-- */
- { 0x0b, KEY_LAST }, /* recall */
-
- { 0x14, KEY_HOME }, /* win start menu */
- { 0x15, KEY_EXIT }, /* exit */
- { 0x16, KEY_CHANNELUP }, /* UP */
- { 0x12, KEY_CHANNELDOWN }, /* DOWN */
- { 0x0c, KEY_VOLUMEUP }, /* RIGHT */
- { 0x17, KEY_VOLUMEDOWN }, /* LEFT */
-
- { 0x18, KEY_ENTER }, /* OK */
-
- { 0x0e, KEY_ESC },
- { 0x13, KEY_CYCLEWINDOWS }, /* desktop */
- { 0x11, KEY_TAB },
- { 0x19, KEY_SWITCHVIDEOMODE }, /* switch */
-
- { 0x1a, KEY_MENU },
- { 0x1b, KEY_ZOOM }, /* fullscreen */
- { 0x44, KEY_TIME }, /* time shift */
- { 0x40, KEY_MODE }, /* source */
-
- { 0x5a, KEY_RECORD },
- { 0x42, KEY_PLAY }, /* play/pause */
- { 0x45, KEY_STOP },
- { 0x43, KEY_CAMERA }, /* camera icon */
-
- { 0x48, KEY_REWIND },
- { 0x4a, KEY_FASTFORWARD },
- { 0x49, KEY_PREVIOUS },
- { 0x4b, KEY_NEXT },
-
- { 0x4c, KEY_FAVORITES }, /* tv wall */
- { 0x4d, KEY_SOUND }, /* DVD sound */
- { 0x4e, KEY_LANGUAGE }, /* DVD lang */
- { 0x4f, KEY_TEXT }, /* DVD text */
-
- { 0x50, KEY_SLEEP }, /* shutdown */
- { 0x51, KEY_MODE }, /* stereo > main */
- { 0x52, KEY_SELECT }, /* stereo > sap */
- { 0x53, KEY_PROG1 }, /* teletext */
-
-
- { 0x59, KEY_RED }, /* AP1 */
- { 0x41, KEY_GREEN }, /* AP2 */
- { 0x47, KEY_YELLOW }, /* AP3 */
- { 0x57, KEY_BLUE }, /* AP4 */
-};
-
-struct ir_scancode_table ir_codes_encore_enltv_table = {
- .scan = ir_codes_encore_enltv,
- .size = ARRAY_SIZE(ir_codes_encore_enltv),
-};
-EXPORT_SYMBOL_GPL(ir_codes_encore_enltv_table);
-
-/* Encore ENLTV2-FM - silver plastic - "Wand Media" written at the botton
- Mauro Carvalho Chehab <mchehab@infradead.org> */
-static struct ir_scancode ir_codes_encore_enltv2[] = {
- { 0x4c, KEY_POWER2 },
- { 0x4a, KEY_TUNER },
- { 0x40, KEY_1 },
- { 0x60, KEY_2 },
- { 0x50, KEY_3 },
- { 0x70, KEY_4 },
- { 0x48, KEY_5 },
- { 0x68, KEY_6 },
- { 0x58, KEY_7 },
- { 0x78, KEY_8 },
- { 0x44, KEY_9 },
- { 0x54, KEY_0 },
-
- { 0x64, KEY_LAST }, /* +100 */
- { 0x4e, KEY_AGAIN }, /* Recall */
-
- { 0x6c, KEY_SWITCHVIDEOMODE }, /* Video Source */
- { 0x5e, KEY_MENU },
- { 0x56, KEY_SCREEN },
- { 0x7a, KEY_SETUP },
-
- { 0x46, KEY_MUTE },
- { 0x5c, KEY_MODE }, /* Stereo */
- { 0x74, KEY_INFO },
- { 0x7c, KEY_CLEAR },
-
- { 0x55, KEY_UP },
- { 0x49, KEY_DOWN },
- { 0x7e, KEY_LEFT },
- { 0x59, KEY_RIGHT },
- { 0x6a, KEY_ENTER },
-
- { 0x42, KEY_VOLUMEUP },
- { 0x62, KEY_VOLUMEDOWN },
- { 0x52, KEY_CHANNELUP },
- { 0x72, KEY_CHANNELDOWN },
-
- { 0x41, KEY_RECORD },
- { 0x51, KEY_CAMERA }, /* Snapshot */
- { 0x75, KEY_TIME }, /* Timeshift */
- { 0x71, KEY_TV2 }, /* PIP */
-
- { 0x45, KEY_REWIND },
- { 0x6f, KEY_PAUSE },
- { 0x7d, KEY_FORWARD },
- { 0x79, KEY_STOP },
-};
-
-struct ir_scancode_table ir_codes_encore_enltv2_table = {
- .scan = ir_codes_encore_enltv2,
- .size = ARRAY_SIZE(ir_codes_encore_enltv2),
-};
-EXPORT_SYMBOL_GPL(ir_codes_encore_enltv2_table);
-
-/* for the Technotrend 1500 bundled remotes (grey and black): */
-static struct ir_scancode ir_codes_tt_1500[] = {
- { 0x01, KEY_POWER },
- { 0x02, KEY_SHUFFLE }, /* ? double-arrow key */
- { 0x03, KEY_1 },
- { 0x04, KEY_2 },
- { 0x05, KEY_3 },
- { 0x06, KEY_4 },
- { 0x07, KEY_5 },
- { 0x08, KEY_6 },
- { 0x09, KEY_7 },
- { 0x0a, KEY_8 },
- { 0x0b, KEY_9 },
- { 0x0c, KEY_0 },
- { 0x0d, KEY_UP },
- { 0x0e, KEY_LEFT },
- { 0x0f, KEY_OK },
- { 0x10, KEY_RIGHT },
- { 0x11, KEY_DOWN },
- { 0x12, KEY_INFO },
- { 0x13, KEY_EXIT },
- { 0x14, KEY_RED },
- { 0x15, KEY_GREEN },
- { 0x16, KEY_YELLOW },
- { 0x17, KEY_BLUE },
- { 0x18, KEY_MUTE },
- { 0x19, KEY_TEXT },
- { 0x1a, KEY_MODE }, /* ? TV/Radio */
- { 0x21, KEY_OPTION },
- { 0x22, KEY_EPG },
- { 0x23, KEY_CHANNELUP },
- { 0x24, KEY_CHANNELDOWN },
- { 0x25, KEY_VOLUMEUP },
- { 0x26, KEY_VOLUMEDOWN },
- { 0x27, KEY_SETUP },
- { 0x3a, KEY_RECORD }, /* these keys are only in the black remote */
- { 0x3b, KEY_PLAY },
- { 0x3c, KEY_STOP },
- { 0x3d, KEY_REWIND },
- { 0x3e, KEY_PAUSE },
- { 0x3f, KEY_FORWARD },
-};
-
-struct ir_scancode_table ir_codes_tt_1500_table = {
- .scan = ir_codes_tt_1500,
- .size = ARRAY_SIZE(ir_codes_tt_1500),
-};
-EXPORT_SYMBOL_GPL(ir_codes_tt_1500_table);
-
-/* DViCO FUSION HDTV MCE remote */
-static struct ir_scancode ir_codes_fusionhdtv_mce[] = {
-
- { 0x0b, KEY_1 },
- { 0x17, KEY_2 },
- { 0x1b, KEY_3 },
- { 0x07, KEY_4 },
- { 0x50, KEY_5 },
- { 0x54, KEY_6 },
- { 0x48, KEY_7 },
- { 0x4c, KEY_8 },
- { 0x58, KEY_9 },
- { 0x03, KEY_0 },
-
- { 0x5e, KEY_OK },
- { 0x51, KEY_UP },
- { 0x53, KEY_DOWN },
- { 0x5b, KEY_LEFT },
- { 0x5f, KEY_RIGHT },
-
- { 0x02, KEY_TV }, /* Labeled DTV on remote */
- { 0x0e, KEY_MP3 },
- { 0x1a, KEY_DVD },
- { 0x1e, KEY_FAVORITES }, /* Labeled CPF on remote */
- { 0x16, KEY_SETUP },
- { 0x46, KEY_POWER2 }, /* TV On/Off button on remote */
- { 0x0a, KEY_EPG }, /* Labeled Guide on remote */
-
- { 0x49, KEY_BACK },
- { 0x59, KEY_INFO }, /* Labeled MORE on remote */
- { 0x4d, KEY_MENU }, /* Labeled DVDMENU on remote */
- { 0x55, KEY_CYCLEWINDOWS }, /* Labeled ALT-TAB on remote */
-
- { 0x0f, KEY_PREVIOUSSONG }, /* Labeled |<< REPLAY on remote */
- { 0x12, KEY_NEXTSONG }, /* Labeled >>| SKIP on remote */
- { 0x42, KEY_ENTER }, /* Labeled START with a green
- MS windows logo on remote */
-
- { 0x15, KEY_VOLUMEUP },
- { 0x05, KEY_VOLUMEDOWN },
- { 0x11, KEY_CHANNELUP },
- { 0x09, KEY_CHANNELDOWN },
-
- { 0x52, KEY_CAMERA },
- { 0x5a, KEY_TUNER },
- { 0x19, KEY_OPEN },
-
- { 0x13, KEY_MODE }, /* 4:3 16:9 select */
- { 0x1f, KEY_ZOOM },
-
- { 0x43, KEY_REWIND },
- { 0x47, KEY_PLAYPAUSE },
- { 0x4f, KEY_FASTFORWARD },
- { 0x57, KEY_MUTE },
- { 0x0d, KEY_STOP },
- { 0x01, KEY_RECORD },
- { 0x4e, KEY_POWER },
-};
-
-struct ir_scancode_table ir_codes_fusionhdtv_mce_table = {
- .scan = ir_codes_fusionhdtv_mce,
- .size = ARRAY_SIZE(ir_codes_fusionhdtv_mce),
-};
-EXPORT_SYMBOL_GPL(ir_codes_fusionhdtv_mce_table);
-
-/* Pinnacle PCTV HD 800i mini remote */
-static struct ir_scancode ir_codes_pinnacle_pctv_hd[] = {
-
- { 0x0f, KEY_1 },
- { 0x15, KEY_2 },
- { 0x10, KEY_3 },
- { 0x18, KEY_4 },
- { 0x1b, KEY_5 },
- { 0x1e, KEY_6 },
- { 0x11, KEY_7 },
- { 0x21, KEY_8 },
- { 0x12, KEY_9 },
- { 0x27, KEY_0 },
-
- { 0x24, KEY_ZOOM },
- { 0x2a, KEY_SUBTITLE },
-
- { 0x00, KEY_MUTE },
- { 0x01, KEY_ENTER }, /* Pinnacle Logo */
- { 0x39, KEY_POWER },
-
- { 0x03, KEY_VOLUMEUP },
- { 0x09, KEY_VOLUMEDOWN },
- { 0x06, KEY_CHANNELUP },
- { 0x0c, KEY_CHANNELDOWN },
-
- { 0x2d, KEY_REWIND },
- { 0x30, KEY_PLAYPAUSE },
- { 0x33, KEY_FASTFORWARD },
- { 0x3c, KEY_STOP },
- { 0x36, KEY_RECORD },
- { 0x3f, KEY_EPG }, /* Labeled "?" */
-};
-
-struct ir_scancode_table ir_codes_pinnacle_pctv_hd_table = {
- .scan = ir_codes_pinnacle_pctv_hd,
- .size = ARRAY_SIZE(ir_codes_pinnacle_pctv_hd),
-};
-EXPORT_SYMBOL_GPL(ir_codes_pinnacle_pctv_hd_table);
-
-/*
- * Igor Kuznetsov <igk72@ya.ru>
- * Andrey J. Melnikov <temnota@kmv.ru>
- *
- * Keytable is used by BeholdTV 60x series, M6 series at
- * least, and probably other cards too.
- * The "ascii-art picture" below (in comments, first row
- * is the keycode in hex, and subsequent row(s) shows
- * the button labels (several variants when appropriate)
- * helps to descide which keycodes to assign to the buttons.
- */
-static struct ir_scancode ir_codes_behold[] = {
-
- /* 0x1c 0x12 *
- * TV/FM POWER *
- * */
- { 0x1c, KEY_TUNER }, /* XXX KEY_TV / KEY_RADIO */
- { 0x12, KEY_POWER },
-
- /* 0x01 0x02 0x03 *
- * 1 2 3 *
- * *
- * 0x04 0x05 0x06 *
- * 4 5 6 *
- * *
- * 0x07 0x08 0x09 *
- * 7 8 9 *
- * */
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- /* 0x0a 0x00 0x17 *
- * RECALL 0 MODE *
- * */
- { 0x0a, KEY_AGAIN },
- { 0x00, KEY_0 },
- { 0x17, KEY_MODE },
-
- /* 0x14 0x10 *
- * ASPECT FULLSCREEN *
- * */
- { 0x14, KEY_SCREEN },
- { 0x10, KEY_ZOOM },
-
- /* 0x0b *
- * Up *
- * *
- * 0x18 0x16 0x0c *
- * Left Ok Right *
- * *
- * 0x015 *
- * Down *
- * */
- { 0x0b, KEY_CHANNELUP },
- { 0x18, KEY_VOLUMEDOWN },
- { 0x16, KEY_OK }, /* XXX KEY_ENTER */
- { 0x0c, KEY_VOLUMEUP },
- { 0x15, KEY_CHANNELDOWN },
-
- /* 0x11 0x0d *
- * MUTE INFO *
- * */
- { 0x11, KEY_MUTE },
- { 0x0d, KEY_INFO },
-
- /* 0x0f 0x1b 0x1a *
- * RECORD PLAY/PAUSE STOP *
- * *
- * 0x0e 0x1f 0x1e *
- *TELETEXT AUDIO SOURCE *
- * RED YELLOW *
- * */
- { 0x0f, KEY_RECORD },
- { 0x1b, KEY_PLAYPAUSE },
- { 0x1a, KEY_STOP },
- { 0x0e, KEY_TEXT },
- { 0x1f, KEY_RED }, /*XXX KEY_AUDIO */
- { 0x1e, KEY_YELLOW }, /*XXX KEY_SOURCE */
-
- /* 0x1d 0x13 0x19 *
- * SLEEP PREVIEW DVB *
- * GREEN BLUE *
- * */
- { 0x1d, KEY_SLEEP },
- { 0x13, KEY_GREEN },
- { 0x19, KEY_BLUE }, /* XXX KEY_SAT */
-
- /* 0x58 0x5c *
- * FREEZE SNAPSHOT *
- * */
- { 0x58, KEY_SLOW },
- { 0x5c, KEY_CAMERA },
-
-};
-
-struct ir_scancode_table ir_codes_behold_table = {
- .scan = ir_codes_behold,
- .size = ARRAY_SIZE(ir_codes_behold),
-};
-EXPORT_SYMBOL_GPL(ir_codes_behold_table);
-
-/* Beholder Intl. Ltd. 2008
- * Dmitry Belimov d.belimov@google.com
- * Keytable is used by BeholdTV Columbus
- * The "ascii-art picture" below (in comments, first row
- * is the keycode in hex, and subsequent row(s) shows
- * the button labels (several variants when appropriate)
- * helps to descide which keycodes to assign to the buttons.
- */
-static struct ir_scancode ir_codes_behold_columbus[] = {
-
- /* 0x13 0x11 0x1C 0x12 *
- * Mute Source TV/FM Power *
- * */
-
- { 0x13, KEY_MUTE },
- { 0x11, KEY_PROPS },
- { 0x1C, KEY_TUNER }, /* KEY_TV/KEY_RADIO */
- { 0x12, KEY_POWER },
-
- /* 0x01 0x02 0x03 0x0D *
- * 1 2 3 Stereo *
- * *
- * 0x04 0x05 0x06 0x19 *
- * 4 5 6 Snapshot *
- * *
- * 0x07 0x08 0x09 0x10 *
- * 7 8 9 Zoom *
- * */
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x0D, KEY_SETUP }, /* Setup key */
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x19, KEY_CAMERA }, /* Snapshot key */
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
- { 0x10, KEY_ZOOM },
-
- /* 0x0A 0x00 0x0B 0x0C *
- * RECALL 0 ChannelUp VolumeUp *
- * */
- { 0x0A, KEY_AGAIN },
- { 0x00, KEY_0 },
- { 0x0B, KEY_CHANNELUP },
- { 0x0C, KEY_VOLUMEUP },
-
- /* 0x1B 0x1D 0x15 0x18 *
- * Timeshift Record ChannelDown VolumeDown *
- * */
-
- { 0x1B, KEY_TIME },
- { 0x1D, KEY_RECORD },
- { 0x15, KEY_CHANNELDOWN },
- { 0x18, KEY_VOLUMEDOWN },
-
- /* 0x0E 0x1E 0x0F 0x1A *
- * Stop Pause Previouse Next *
- * */
-
- { 0x0E, KEY_STOP },
- { 0x1E, KEY_PAUSE },
- { 0x0F, KEY_PREVIOUS },
- { 0x1A, KEY_NEXT },
-
-};
-
-struct ir_scancode_table ir_codes_behold_columbus_table = {
- .scan = ir_codes_behold_columbus,
- .size = ARRAY_SIZE(ir_codes_behold_columbus),
-};
-EXPORT_SYMBOL_GPL(ir_codes_behold_columbus_table);
-
-/*
- * Remote control for the Genius TVGO A11MCE
- * Adrian Pardini <pardo.bsso@gmail.com>
- */
-static struct ir_scancode ir_codes_genius_tvgo_a11mce[] = {
- /* Keys 0 to 9 */
- { 0x48, KEY_0 },
- { 0x09, KEY_1 },
- { 0x1d, KEY_2 },
- { 0x1f, KEY_3 },
- { 0x19, KEY_4 },
- { 0x1b, KEY_5 },
- { 0x11, KEY_6 },
- { 0x17, KEY_7 },
- { 0x12, KEY_8 },
- { 0x16, KEY_9 },
-
- { 0x54, KEY_RECORD }, /* recording */
- { 0x06, KEY_MUTE }, /* mute */
- { 0x10, KEY_POWER },
- { 0x40, KEY_LAST }, /* recall */
- { 0x4c, KEY_CHANNELUP }, /* channel / program + */
- { 0x00, KEY_CHANNELDOWN }, /* channel / program - */
- { 0x0d, KEY_VOLUMEUP },
- { 0x15, KEY_VOLUMEDOWN },
- { 0x4d, KEY_OK }, /* also labeled as Pause */
- { 0x1c, KEY_ZOOM }, /* full screen and Stop*/
- { 0x02, KEY_MODE }, /* AV Source or Rewind*/
- { 0x04, KEY_LIST }, /* -/-- */
- /* small arrows above numbers */
- { 0x1a, KEY_NEXT }, /* also Fast Forward */
- { 0x0e, KEY_PREVIOUS }, /* also Rewind */
- /* these are in a rather non standard layout and have
- an alternate name written */
- { 0x1e, KEY_UP }, /* Video Setting */
- { 0x0a, KEY_DOWN }, /* Video Default */
- { 0x05, KEY_CAMERA }, /* Snapshot */
- { 0x0c, KEY_RIGHT }, /* Hide Panel */
- /* Four buttons without label */
- { 0x49, KEY_RED },
- { 0x0b, KEY_GREEN },
- { 0x13, KEY_YELLOW },
- { 0x50, KEY_BLUE },
-};
-
-struct ir_scancode_table ir_codes_genius_tvgo_a11mce_table = {
- .scan = ir_codes_genius_tvgo_a11mce,
- .size = ARRAY_SIZE(ir_codes_genius_tvgo_a11mce),
-};
-EXPORT_SYMBOL_GPL(ir_codes_genius_tvgo_a11mce_table);
-
-/*
- * Remote control for Powercolor Real Angel 330
- * Daniel Fraga <fragabr@gmail.com>
- */
-static struct ir_scancode ir_codes_powercolor_real_angel[] = {
- { 0x38, KEY_SWITCHVIDEOMODE }, /* switch inputs */
- { 0x0c, KEY_MEDIA }, /* Turn ON/OFF App */
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
- { 0x0a, KEY_DIGITS }, /* single, double, tripple digit */
- { 0x29, KEY_PREVIOUS }, /* previous channel */
- { 0x12, KEY_BRIGHTNESSUP },
- { 0x13, KEY_BRIGHTNESSDOWN },
- { 0x2b, KEY_MODE }, /* stereo/mono */
- { 0x2c, KEY_TEXT }, /* teletext */
- { 0x20, KEY_CHANNELUP }, /* channel up */
- { 0x21, KEY_CHANNELDOWN }, /* channel down */
- { 0x10, KEY_VOLUMEUP }, /* volume up */
- { 0x11, KEY_VOLUMEDOWN }, /* volume down */
- { 0x0d, KEY_MUTE },
- { 0x1f, KEY_RECORD },
- { 0x17, KEY_PLAY },
- { 0x16, KEY_PAUSE },
- { 0x0b, KEY_STOP },
- { 0x27, KEY_FASTFORWARD },
- { 0x26, KEY_REWIND },
- { 0x1e, KEY_SEARCH }, /* autoscan */
- { 0x0e, KEY_CAMERA }, /* snapshot */
- { 0x2d, KEY_SETUP },
- { 0x0f, KEY_SCREEN }, /* full screen */
- { 0x14, KEY_RADIO }, /* FM radio */
- { 0x25, KEY_POWER }, /* power */
-};
-
-struct ir_scancode_table ir_codes_powercolor_real_angel_table = {
- .scan = ir_codes_powercolor_real_angel,
- .size = ARRAY_SIZE(ir_codes_powercolor_real_angel),
-};
-EXPORT_SYMBOL_GPL(ir_codes_powercolor_real_angel_table);
-
-/* Kworld Plus TV Analog Lite PCI IR
- Mauro Carvalho Chehab <mchehab@infradead.org>
- */
-static struct ir_scancode ir_codes_kworld_plus_tv_analog[] = {
- { 0x0c, KEY_PROG1 }, /* Kworld key */
- { 0x16, KEY_CLOSECD }, /* -> ) */
- { 0x1d, KEY_POWER2 },
-
- { 0x00, KEY_1 },
- { 0x01, KEY_2 },
- { 0x02, KEY_3 }, /* Two keys have the same code: 3 and left */
- { 0x03, KEY_4 }, /* Two keys have the same code: 3 and right */
- { 0x04, KEY_5 },
- { 0x05, KEY_6 },
- { 0x06, KEY_7 },
- { 0x07, KEY_8 },
- { 0x08, KEY_9 },
- { 0x0a, KEY_0 },
-
- { 0x09, KEY_AGAIN },
- { 0x14, KEY_MUTE },
-
- { 0x20, KEY_UP },
- { 0x21, KEY_DOWN },
- { 0x0b, KEY_ENTER },
-
- { 0x10, KEY_CHANNELUP },
- { 0x11, KEY_CHANNELDOWN },
-
- /* Couldn't map key left/key right since those
- conflict with '3' and '4' scancodes
- I dunno what the original driver does
- */
-
- { 0x13, KEY_VOLUMEUP },
- { 0x12, KEY_VOLUMEDOWN },
-
- /* The lower part of the IR
- There are several duplicated keycodes there.
- Most of them conflict with digits.
- Add mappings just to the unused scancodes.
- Somehow, the original driver has a way to know,
- but this doesn't seem to be on some GPIO.
- Also, it is not related to the time between keyup
- and keydown.
- */
- { 0x19, KEY_TIME}, /* Timeshift */
- { 0x1a, KEY_STOP},
- { 0x1b, KEY_RECORD},
-
- { 0x22, KEY_TEXT},
-
- { 0x15, KEY_AUDIO}, /* ((*)) */
- { 0x0f, KEY_ZOOM},
- { 0x1c, KEY_CAMERA}, /* snapshot */
-
- { 0x18, KEY_RED}, /* B */
- { 0x23, KEY_GREEN}, /* C */
-};
-struct ir_scancode_table ir_codes_kworld_plus_tv_analog_table = {
- .scan = ir_codes_kworld_plus_tv_analog,
- .size = ARRAY_SIZE(ir_codes_kworld_plus_tv_analog),
-};
-EXPORT_SYMBOL_GPL(ir_codes_kworld_plus_tv_analog_table);
-
-/* Kaiomy TVnPC U2
- Mauro Carvalho Chehab <mchehab@infradead.org>
- */
-static struct ir_scancode ir_codes_kaiomy[] = {
- { 0x43, KEY_POWER2},
- { 0x01, KEY_LIST},
- { 0x0b, KEY_ZOOM},
- { 0x03, KEY_POWER},
-
- { 0x04, KEY_1},
- { 0x08, KEY_2},
- { 0x02, KEY_3},
-
- { 0x0f, KEY_4},
- { 0x05, KEY_5},
- { 0x06, KEY_6},
-
- { 0x0c, KEY_7},
- { 0x0d, KEY_8},
- { 0x0a, KEY_9},
-
- { 0x11, KEY_0},
-
- { 0x09, KEY_CHANNELUP},
- { 0x07, KEY_CHANNELDOWN},
-
- { 0x0e, KEY_VOLUMEUP},
- { 0x13, KEY_VOLUMEDOWN},
-
- { 0x10, KEY_HOME},
- { 0x12, KEY_ENTER},
-
- { 0x14, KEY_RECORD},
- { 0x15, KEY_STOP},
- { 0x16, KEY_PLAY},
- { 0x17, KEY_MUTE},
-
- { 0x18, KEY_UP},
- { 0x19, KEY_DOWN},
- { 0x1a, KEY_LEFT},
- { 0x1b, KEY_RIGHT},
-
- { 0x1c, KEY_RED},
- { 0x1d, KEY_GREEN},
- { 0x1e, KEY_YELLOW},
- { 0x1f, KEY_BLUE},
-};
-struct ir_scancode_table ir_codes_kaiomy_table = {
- .scan = ir_codes_kaiomy,
- .size = ARRAY_SIZE(ir_codes_kaiomy),
-};
-EXPORT_SYMBOL_GPL(ir_codes_kaiomy_table);
-
-static struct ir_scancode ir_codes_avermedia_a16d[] = {
- { 0x20, KEY_LIST},
- { 0x00, KEY_POWER},
- { 0x28, KEY_1},
- { 0x18, KEY_2},
- { 0x38, KEY_3},
- { 0x24, KEY_4},
- { 0x14, KEY_5},
- { 0x34, KEY_6},
- { 0x2c, KEY_7},
- { 0x1c, KEY_8},
- { 0x3c, KEY_9},
- { 0x12, KEY_SUBTITLE},
- { 0x22, KEY_0},
- { 0x32, KEY_REWIND},
- { 0x3a, KEY_SHUFFLE},
- { 0x02, KEY_PRINT},
- { 0x11, KEY_CHANNELDOWN},
- { 0x31, KEY_CHANNELUP},
- { 0x0c, KEY_ZOOM},
- { 0x1e, KEY_VOLUMEDOWN},
- { 0x3e, KEY_VOLUMEUP},
- { 0x0a, KEY_MUTE},
- { 0x04, KEY_AUDIO},
- { 0x26, KEY_RECORD},
- { 0x06, KEY_PLAY},
- { 0x36, KEY_STOP},
- { 0x16, KEY_PAUSE},
- { 0x2e, KEY_REWIND},
- { 0x0e, KEY_FASTFORWARD},
- { 0x30, KEY_TEXT},
- { 0x21, KEY_GREEN},
- { 0x01, KEY_BLUE},
- { 0x08, KEY_EPG},
- { 0x2a, KEY_MENU},
-};
-struct ir_scancode_table ir_codes_avermedia_a16d_table = {
- .scan = ir_codes_avermedia_a16d,
- .size = ARRAY_SIZE(ir_codes_avermedia_a16d),
-};
-EXPORT_SYMBOL_GPL(ir_codes_avermedia_a16d_table);
-
-/* Encore ENLTV-FM v5.3
- Mauro Carvalho Chehab <mchehab@infradead.org>
- */
-static struct ir_scancode ir_codes_encore_enltv_fm53[] = {
- { 0x10, KEY_POWER2},
- { 0x06, KEY_MUTE},
-
- { 0x09, KEY_1},
- { 0x1d, KEY_2},
- { 0x1f, KEY_3},
- { 0x19, KEY_4},
- { 0x1b, KEY_5},
- { 0x11, KEY_6},
- { 0x17, KEY_7},
- { 0x12, KEY_8},
- { 0x16, KEY_9},
- { 0x48, KEY_0},
-
- { 0x04, KEY_LIST}, /* -/-- */
- { 0x40, KEY_LAST}, /* recall */
-
- { 0x02, KEY_MODE}, /* TV/AV */
- { 0x05, KEY_CAMERA}, /* SNAPSHOT */
-
- { 0x4c, KEY_CHANNELUP}, /* UP */
- { 0x00, KEY_CHANNELDOWN}, /* DOWN */
- { 0x0d, KEY_VOLUMEUP}, /* RIGHT */
- { 0x15, KEY_VOLUMEDOWN}, /* LEFT */
- { 0x49, KEY_ENTER}, /* OK */
-
- { 0x54, KEY_RECORD},
- { 0x4d, KEY_PLAY}, /* pause */
-
- { 0x1e, KEY_MENU}, /* video setting */
- { 0x0e, KEY_RIGHT}, /* <- */
- { 0x1a, KEY_LEFT}, /* -> */
-
- { 0x0a, KEY_CLEAR}, /* video default */
- { 0x0c, KEY_ZOOM}, /* hide pannel */
- { 0x47, KEY_SLEEP}, /* shutdown */
-};
-struct ir_scancode_table ir_codes_encore_enltv_fm53_table = {
- .scan = ir_codes_encore_enltv_fm53,
- .size = ARRAY_SIZE(ir_codes_encore_enltv_fm53),
-};
-EXPORT_SYMBOL_GPL(ir_codes_encore_enltv_fm53_table);
-
-/* Zogis Real Audio 220 - 32 keys IR */
-static struct ir_scancode ir_codes_real_audio_220_32_keys[] = {
- { 0x1c, KEY_RADIO},
- { 0x12, KEY_POWER2},
-
- { 0x01, KEY_1},
- { 0x02, KEY_2},
- { 0x03, KEY_3},
- { 0x04, KEY_4},
- { 0x05, KEY_5},
- { 0x06, KEY_6},
- { 0x07, KEY_7},
- { 0x08, KEY_8},
- { 0x09, KEY_9},
- { 0x00, KEY_0},
-
- { 0x0c, KEY_VOLUMEUP},
- { 0x18, KEY_VOLUMEDOWN},
- { 0x0b, KEY_CHANNELUP},
- { 0x15, KEY_CHANNELDOWN},
- { 0x16, KEY_ENTER},
-
- { 0x11, KEY_LIST}, /* Source */
- { 0x0d, KEY_AUDIO}, /* stereo */
-
- { 0x0f, KEY_PREVIOUS}, /* Prev */
- { 0x1b, KEY_TIME}, /* Timeshift */
- { 0x1a, KEY_NEXT}, /* Next */
-
- { 0x0e, KEY_STOP},
- { 0x1f, KEY_PLAY},
- { 0x1e, KEY_PLAYPAUSE}, /* Pause */
-
- { 0x1d, KEY_RECORD},
- { 0x13, KEY_MUTE},
- { 0x19, KEY_CAMERA}, /* Snapshot */
-
-};
-struct ir_scancode_table ir_codes_real_audio_220_32_keys_table = {
- .scan = ir_codes_real_audio_220_32_keys,
- .size = ARRAY_SIZE(ir_codes_real_audio_220_32_keys),
-};
-EXPORT_SYMBOL_GPL(ir_codes_real_audio_220_32_keys_table);
-
-/* ATI TV Wonder HD 600 USB
- Devin Heitmueller <devin.heitmueller@gmail.com>
- */
-static struct ir_scancode ir_codes_ati_tv_wonder_hd_600[] = {
- { 0x00, KEY_RECORD}, /* Row 1 */
- { 0x01, KEY_PLAYPAUSE},
- { 0x02, KEY_STOP},
- { 0x03, KEY_POWER},
- { 0x04, KEY_PREVIOUS}, /* Row 2 */
- { 0x05, KEY_REWIND},
- { 0x06, KEY_FORWARD},
- { 0x07, KEY_NEXT},
- { 0x08, KEY_EPG}, /* Row 3 */
- { 0x09, KEY_HOME},
- { 0x0a, KEY_MENU},
- { 0x0b, KEY_CHANNELUP},
- { 0x0c, KEY_BACK}, /* Row 4 */
- { 0x0d, KEY_UP},
- { 0x0e, KEY_INFO},
- { 0x0f, KEY_CHANNELDOWN},
- { 0x10, KEY_LEFT}, /* Row 5 */
- { 0x11, KEY_SELECT},
- { 0x12, KEY_RIGHT},
- { 0x13, KEY_VOLUMEUP},
- { 0x14, KEY_LAST}, /* Row 6 */
- { 0x15, KEY_DOWN},
- { 0x16, KEY_MUTE},
- { 0x17, KEY_VOLUMEDOWN},
-};
-struct ir_scancode_table ir_codes_ati_tv_wonder_hd_600_table = {
- .scan = ir_codes_ati_tv_wonder_hd_600,
- .size = ARRAY_SIZE(ir_codes_ati_tv_wonder_hd_600),
-};
-EXPORT_SYMBOL_GPL(ir_codes_ati_tv_wonder_hd_600_table);
-
-/* DVBWorld remotes
- Igor M. Liplianin <liplianin@me.by>
- */
-static struct ir_scancode ir_codes_dm1105_nec[] = {
- { 0x0a, KEY_POWER2}, /* power */
- { 0x0c, KEY_MUTE}, /* mute */
- { 0x11, KEY_1},
- { 0x12, KEY_2},
- { 0x13, KEY_3},
- { 0x14, KEY_4},
- { 0x15, KEY_5},
- { 0x16, KEY_6},
- { 0x17, KEY_7},
- { 0x18, KEY_8},
- { 0x19, KEY_9},
- { 0x10, KEY_0},
- { 0x1c, KEY_CHANNELUP}, /* ch+ */
- { 0x0f, KEY_CHANNELDOWN}, /* ch- */
- { 0x1a, KEY_VOLUMEUP}, /* vol+ */
- { 0x0e, KEY_VOLUMEDOWN}, /* vol- */
- { 0x04, KEY_RECORD}, /* rec */
- { 0x09, KEY_CHANNEL}, /* fav */
- { 0x08, KEY_BACKSPACE}, /* rewind */
- { 0x07, KEY_FASTFORWARD}, /* fast */
- { 0x0b, KEY_PAUSE}, /* pause */
- { 0x02, KEY_ESC}, /* cancel */
- { 0x03, KEY_TAB}, /* tab */
- { 0x00, KEY_UP}, /* up */
- { 0x1f, KEY_ENTER}, /* ok */
- { 0x01, KEY_DOWN}, /* down */
- { 0x05, KEY_RECORD}, /* cap */
- { 0x06, KEY_STOP}, /* stop */
- { 0x40, KEY_ZOOM}, /* full */
- { 0x1e, KEY_TV}, /* tvmode */
- { 0x1b, KEY_B}, /* recall */
-};
-struct ir_scancode_table ir_codes_dm1105_nec_table = {
- .scan = ir_codes_dm1105_nec,
- .size = ARRAY_SIZE(ir_codes_dm1105_nec),
-};
-EXPORT_SYMBOL_GPL(ir_codes_dm1105_nec_table);
-
-static struct ir_scancode ir_codes_tevii_nec[] = {
- { 0x0a, KEY_POWER2},
- { 0x0c, KEY_MUTE},
- { 0x11, KEY_1},
- { 0x12, KEY_2},
- { 0x13, KEY_3},
- { 0x14, KEY_4},
- { 0x15, KEY_5},
- { 0x16, KEY_6},
- { 0x17, KEY_7},
- { 0x18, KEY_8},
- { 0x19, KEY_9},
- { 0x10, KEY_0},
- { 0x1c, KEY_MENU},
- { 0x0f, KEY_VOLUMEDOWN},
- { 0x1a, KEY_LAST},
- { 0x0e, KEY_OPEN},
- { 0x04, KEY_RECORD},
- { 0x09, KEY_VOLUMEUP},
- { 0x08, KEY_CHANNELUP},
- { 0x07, KEY_PVR},
- { 0x0b, KEY_TIME},
- { 0x02, KEY_RIGHT},
- { 0x03, KEY_LEFT},
- { 0x00, KEY_UP},
- { 0x1f, KEY_OK},
- { 0x01, KEY_DOWN},
- { 0x05, KEY_TUNER},
- { 0x06, KEY_CHANNELDOWN},
- { 0x40, KEY_PLAYPAUSE},
- { 0x1e, KEY_REWIND},
- { 0x1b, KEY_FAVORITES},
- { 0x1d, KEY_BACK},
- { 0x4d, KEY_FASTFORWARD},
- { 0x44, KEY_EPG},
- { 0x4c, KEY_INFO},
- { 0x41, KEY_AB},
- { 0x43, KEY_AUDIO},
- { 0x45, KEY_SUBTITLE},
- { 0x4a, KEY_LIST},
- { 0x46, KEY_F1},
- { 0x47, KEY_F2},
- { 0x5e, KEY_F3},
- { 0x5c, KEY_F4},
- { 0x52, KEY_F5},
- { 0x5a, KEY_F6},
- { 0x56, KEY_MODE},
- { 0x58, KEY_SWITCHVIDEOMODE},
-};
-struct ir_scancode_table ir_codes_tevii_nec_table = {
- .scan = ir_codes_tevii_nec,
- .size = ARRAY_SIZE(ir_codes_tevii_nec),
-};
-EXPORT_SYMBOL_GPL(ir_codes_tevii_nec_table);
-
-static struct ir_scancode ir_codes_tbs_nec[] = {
- { 0x04, KEY_POWER2}, /*power*/
- { 0x14, KEY_MUTE}, /*mute*/
- { 0x07, KEY_1},
- { 0x06, KEY_2},
- { 0x05, KEY_3},
- { 0x0b, KEY_4},
- { 0x0a, KEY_5},
- { 0x09, KEY_6},
- { 0x0f, KEY_7},
- { 0x0e, KEY_8},
- { 0x0d, KEY_9},
- { 0x12, KEY_0},
- { 0x16, KEY_CHANNELUP}, /*ch+*/
- { 0x11, KEY_CHANNELDOWN},/*ch-*/
- { 0x13, KEY_VOLUMEUP}, /*vol+*/
- { 0x0c, KEY_VOLUMEDOWN},/*vol-*/
- { 0x03, KEY_RECORD}, /*rec*/
- { 0x18, KEY_PAUSE}, /*pause*/
- { 0x19, KEY_OK}, /*ok*/
- { 0x1a, KEY_CAMERA}, /* snapshot */
- { 0x01, KEY_UP},
- { 0x10, KEY_LEFT},
- { 0x02, KEY_RIGHT},
- { 0x08, KEY_DOWN},
- { 0x15, KEY_FAVORITES},
- { 0x17, KEY_SUBTITLE},
- { 0x1d, KEY_ZOOM},
- { 0x1f, KEY_EXIT},
- { 0x1e, KEY_MENU},
- { 0x1c, KEY_EPG},
- { 0x00, KEY_PREVIOUS},
- { 0x1b, KEY_MODE},
-};
-struct ir_scancode_table ir_codes_tbs_nec_table = {
- .scan = ir_codes_tbs_nec,
- .size = ARRAY_SIZE(ir_codes_tbs_nec),
-};
-EXPORT_SYMBOL_GPL(ir_codes_tbs_nec_table);
-
-/* Terratec Cinergy Hybrid T USB XS
- Devin Heitmueller <dheitmueller@linuxtv.org>
- */
-static struct ir_scancode ir_codes_terratec_cinergy_xs[] = {
- { 0x41, KEY_HOME},
- { 0x01, KEY_POWER},
- { 0x42, KEY_MENU},
- { 0x02, KEY_1},
- { 0x03, KEY_2},
- { 0x04, KEY_3},
- { 0x43, KEY_SUBTITLE},
- { 0x05, KEY_4},
- { 0x06, KEY_5},
- { 0x07, KEY_6},
- { 0x44, KEY_TEXT},
- { 0x08, KEY_7},
- { 0x09, KEY_8},
- { 0x0a, KEY_9},
- { 0x45, KEY_DELETE},
- { 0x0b, KEY_TUNER},
- { 0x0c, KEY_0},
- { 0x0d, KEY_MODE},
- { 0x46, KEY_TV},
- { 0x47, KEY_DVD},
- { 0x49, KEY_VIDEO},
- { 0x4b, KEY_AUX},
- { 0x10, KEY_UP},
- { 0x11, KEY_LEFT},
- { 0x12, KEY_OK},
- { 0x13, KEY_RIGHT},
- { 0x14, KEY_DOWN},
- { 0x0f, KEY_EPG},
- { 0x16, KEY_INFO},
- { 0x4d, KEY_BACKSPACE},
- { 0x1c, KEY_VOLUMEUP},
- { 0x4c, KEY_PLAY},
- { 0x1b, KEY_CHANNELUP},
- { 0x1e, KEY_VOLUMEDOWN},
- { 0x1d, KEY_MUTE},
- { 0x1f, KEY_CHANNELDOWN},
- { 0x17, KEY_RED},
- { 0x18, KEY_GREEN},
- { 0x19, KEY_YELLOW},
- { 0x1a, KEY_BLUE},
- { 0x58, KEY_RECORD},
- { 0x48, KEY_STOP},
- { 0x40, KEY_PAUSE},
- { 0x54, KEY_LAST},
- { 0x4e, KEY_REWIND},
- { 0x4f, KEY_FASTFORWARD},
- { 0x5c, KEY_NEXT},
-};
-struct ir_scancode_table ir_codes_terratec_cinergy_xs_table = {
- .scan = ir_codes_terratec_cinergy_xs,
- .size = ARRAY_SIZE(ir_codes_terratec_cinergy_xs),
-};
-EXPORT_SYMBOL_GPL(ir_codes_terratec_cinergy_xs_table);
-
-/* EVGA inDtube
- Devin Heitmueller <devin.heitmueller@gmail.com>
- */
-static struct ir_scancode ir_codes_evga_indtube[] = {
- { 0x12, KEY_POWER},
- { 0x02, KEY_MODE}, /* TV */
- { 0x14, KEY_MUTE},
- { 0x1a, KEY_CHANNELUP},
- { 0x16, KEY_TV2}, /* PIP */
- { 0x1d, KEY_VOLUMEUP},
- { 0x05, KEY_CHANNELDOWN},
- { 0x0f, KEY_PLAYPAUSE},
- { 0x19, KEY_VOLUMEDOWN},
- { 0x1c, KEY_REWIND},
- { 0x0d, KEY_RECORD},
- { 0x18, KEY_FORWARD},
- { 0x1e, KEY_PREVIOUS},
- { 0x1b, KEY_STOP},
- { 0x1f, KEY_NEXT},
- { 0x13, KEY_CAMERA},
-};
-struct ir_scancode_table ir_codes_evga_indtube_table = {
- .scan = ir_codes_evga_indtube,
- .size = ARRAY_SIZE(ir_codes_evga_indtube),
-};
-EXPORT_SYMBOL_GPL(ir_codes_evga_indtube_table);
-
-static struct ir_scancode ir_codes_videomate_s350[] = {
- { 0x00, KEY_TV},
- { 0x01, KEY_DVD},
- { 0x04, KEY_RECORD},
- { 0x05, KEY_VIDEO}, /* TV/Video */
- { 0x07, KEY_STOP},
- { 0x08, KEY_PLAYPAUSE},
- { 0x0a, KEY_REWIND},
- { 0x0f, KEY_FASTFORWARD},
- { 0x10, KEY_CHANNELUP},
- { 0x12, KEY_VOLUMEUP},
- { 0x13, KEY_CHANNELDOWN},
- { 0x14, KEY_MUTE},
- { 0x15, KEY_VOLUMEDOWN},
- { 0x16, KEY_1},
- { 0x17, KEY_2},
- { 0x18, KEY_3},
- { 0x19, KEY_4},
- { 0x1a, KEY_5},
- { 0x1b, KEY_6},
- { 0x1c, KEY_7},
- { 0x1d, KEY_8},
- { 0x1e, KEY_9},
- { 0x1f, KEY_0},
- { 0x21, KEY_SLEEP},
- { 0x24, KEY_ZOOM},
- { 0x25, KEY_LAST}, /* Recall */
- { 0x26, KEY_SUBTITLE}, /* CC */
- { 0x27, KEY_LANGUAGE}, /* MTS */
- { 0x29, KEY_CHANNEL}, /* SURF */
- { 0x2b, KEY_A},
- { 0x2c, KEY_B},
- { 0x2f, KEY_CAMERA}, /* Snapshot */
- { 0x23, KEY_RADIO},
- { 0x02, KEY_PREVIOUSSONG},
- { 0x06, KEY_NEXTSONG},
- { 0x03, KEY_EPG},
- { 0x09, KEY_SETUP},
- { 0x22, KEY_BACKSPACE},
- { 0x0c, KEY_UP},
- { 0x0e, KEY_DOWN},
- { 0x0b, KEY_LEFT},
- { 0x0d, KEY_RIGHT},
- { 0x11, KEY_ENTER},
- { 0x20, KEY_TEXT},
-};
-struct ir_scancode_table ir_codes_videomate_s350_table = {
- .scan = ir_codes_videomate_s350,
- .size = ARRAY_SIZE(ir_codes_videomate_s350),
-};
-EXPORT_SYMBOL_GPL(ir_codes_videomate_s350_table);
-
-/* GADMEI UTV330+ RM008Z remote
- Shine Liu <shinel@foxmail.com>
- */
-static struct ir_scancode ir_codes_gadmei_rm008z[] = {
- { 0x14, KEY_POWER2}, /* POWER OFF */
- { 0x0c, KEY_MUTE}, /* MUTE */
-
- { 0x18, KEY_TV}, /* TV */
- { 0x0e, KEY_VIDEO}, /* AV */
- { 0x0b, KEY_AUDIO}, /* SV */
- { 0x0f, KEY_RADIO}, /* FM */
-
- { 0x00, KEY_1},
- { 0x01, KEY_2},
- { 0x02, KEY_3},
- { 0x03, KEY_4},
- { 0x04, KEY_5},
- { 0x05, KEY_6},
- { 0x06, KEY_7},
- { 0x07, KEY_8},
- { 0x08, KEY_9},
- { 0x09, KEY_0},
- { 0x0a, KEY_INFO}, /* OSD */
- { 0x1c, KEY_BACKSPACE}, /* LAST */
-
- { 0x0d, KEY_PLAY}, /* PLAY */
- { 0x1e, KEY_CAMERA}, /* SNAPSHOT */
- { 0x1a, KEY_RECORD}, /* RECORD */
- { 0x17, KEY_STOP}, /* STOP */
-
- { 0x1f, KEY_UP}, /* UP */
- { 0x44, KEY_DOWN}, /* DOWN */
- { 0x46, KEY_TAB}, /* BACK */
- { 0x4a, KEY_ZOOM}, /* FULLSECREEN */
-
- { 0x10, KEY_VOLUMEUP}, /* VOLUMEUP */
- { 0x11, KEY_VOLUMEDOWN}, /* VOLUMEDOWN */
- { 0x12, KEY_CHANNELUP}, /* CHANNELUP */
- { 0x13, KEY_CHANNELDOWN}, /* CHANNELDOWN */
- { 0x15, KEY_ENTER}, /* OK */
-};
-struct ir_scancode_table ir_codes_gadmei_rm008z_table = {
- .scan = ir_codes_gadmei_rm008z,
- .size = ARRAY_SIZE(ir_codes_gadmei_rm008z),
-};
-EXPORT_SYMBOL_GPL(ir_codes_gadmei_rm008z_table);
-
-/*************************************************************
- * COMPLETE SCANCODE TABLES
- * Instead of just a partial scancode, the tables bellow
- * contains the complete scancode and the receiver protocol
- *************************************************************/
-
-/*
- * Hauppauge:the newer, gray remotes (seems there are multiple
- * slightly different versions), shipped with cx88+ivtv cards.
- *
- * This table contains the complete RC5 code, instead of just the data part
- */
-static struct ir_scancode ir_codes_rc5_hauppauge_new[] = {
- /* Keys 0 to 9 */
- { 0x1e00, KEY_0 },
- { 0x1e01, KEY_1 },
- { 0x1e02, KEY_2 },
- { 0x1e03, KEY_3 },
- { 0x1e04, KEY_4 },
- { 0x1e05, KEY_5 },
- { 0x1e06, KEY_6 },
- { 0x1e07, KEY_7 },
- { 0x1e08, KEY_8 },
- { 0x1e09, KEY_9 },
-
- { 0x1e0a, KEY_TEXT }, /* keypad asterisk as well */
- { 0x1e0b, KEY_RED }, /* red button */
- { 0x1e0c, KEY_RADIO },
- { 0x1e0d, KEY_MENU },
- { 0x1e0e, KEY_SUBTITLE }, /* also the # key */
- { 0x1e0f, KEY_MUTE },
- { 0x1e10, KEY_VOLUMEUP },
- { 0x1e11, KEY_VOLUMEDOWN },
- { 0x1e12, KEY_PREVIOUS }, /* previous channel */
- { 0x1e14, KEY_UP },
- { 0x1e15, KEY_DOWN },
- { 0x1e16, KEY_LEFT },
- { 0x1e17, KEY_RIGHT },
- { 0x1e18, KEY_VIDEO }, /* Videos */
- { 0x1e19, KEY_AUDIO }, /* Music */
- /* 0x1e1a: Pictures - presume this means
- "Multimedia Home Platform" -
- no "PICTURES" key in input.h
- */
- { 0x1e1a, KEY_MHP },
-
- { 0x1e1b, KEY_EPG }, /* Guide */
- { 0x1e1c, KEY_TV },
- { 0x1e1e, KEY_NEXTSONG }, /* skip >| */
- { 0x1e1f, KEY_EXIT }, /* back/exit */
- { 0x1e20, KEY_CHANNELUP }, /* channel / program + */
- { 0x1e21, KEY_CHANNELDOWN }, /* channel / program - */
- { 0x1e22, KEY_CHANNEL }, /* source (old black remote) */
- { 0x1e24, KEY_PREVIOUSSONG }, /* replay |< */
- { 0x1e25, KEY_ENTER }, /* OK */
- { 0x1e26, KEY_SLEEP }, /* minimize (old black remote) */
- { 0x1e29, KEY_BLUE }, /* blue key */
- { 0x1e2e, KEY_GREEN }, /* green button */
- { 0x1e30, KEY_PAUSE }, /* pause */
- { 0x1e32, KEY_REWIND }, /* backward << */
- { 0x1e34, KEY_FASTFORWARD }, /* forward >> */
- { 0x1e35, KEY_PLAY },
- { 0x1e36, KEY_STOP },
- { 0x1e37, KEY_RECORD }, /* recording */
- { 0x1e38, KEY_YELLOW }, /* yellow key */
- { 0x1e3b, KEY_SELECT }, /* top right button */
- { 0x1e3c, KEY_ZOOM }, /* full */
- { 0x1e3d, KEY_POWER }, /* system power (green button) */
-};
-
-struct ir_scancode_table ir_codes_rc5_hauppauge_new_table = {
- .scan = ir_codes_rc5_hauppauge_new,
- .size = ARRAY_SIZE(ir_codes_rc5_hauppauge_new),
- .ir_type = IR_TYPE_RC5,
-};
-EXPORT_SYMBOL_GPL(ir_codes_rc5_hauppauge_new_table);
-
-/* Terratec Cinergy Hybrid T USB XS FM
- Mauro Carvalho Chehab <mchehab@redhat.com>
- */
-static struct ir_scancode ir_codes_nec_terratec_cinergy_xs[] = {
- { 0x1441, KEY_HOME},
- { 0x1401, KEY_POWER2},
-
- { 0x1442, KEY_MENU}, /* DVD menu */
- { 0x1443, KEY_SUBTITLE},
- { 0x1444, KEY_TEXT}, /* Teletext */
- { 0x1445, KEY_DELETE},
-
- { 0x1402, KEY_1},
- { 0x1403, KEY_2},
- { 0x1404, KEY_3},
- { 0x1405, KEY_4},
- { 0x1406, KEY_5},
- { 0x1407, KEY_6},
- { 0x1408, KEY_7},
- { 0x1409, KEY_8},
- { 0x140a, KEY_9},
- { 0x140c, KEY_0},
-
- { 0x140b, KEY_TUNER}, /* AV */
- { 0x140d, KEY_MODE}, /* A.B */
-
- { 0x1446, KEY_TV},
- { 0x1447, KEY_DVD},
- { 0x1449, KEY_VIDEO},
- { 0x144a, KEY_RADIO}, /* Music */
- { 0x144b, KEY_CAMERA}, /* PIC */
-
- { 0x1410, KEY_UP},
- { 0x1411, KEY_LEFT},
- { 0x1412, KEY_OK},
- { 0x1413, KEY_RIGHT},
- { 0x1414, KEY_DOWN},
-
- { 0x140f, KEY_EPG},
- { 0x1416, KEY_INFO},
- { 0x144d, KEY_BACKSPACE},
-
- { 0x141c, KEY_VOLUMEUP},
- { 0x141e, KEY_VOLUMEDOWN},
-
- { 0x144c, KEY_PLAY},
- { 0x141d, KEY_MUTE},
-
- { 0x141b, KEY_CHANNELUP},
- { 0x141f, KEY_CHANNELDOWN},
-
- { 0x1417, KEY_RED},
- { 0x1418, KEY_GREEN},
- { 0x1419, KEY_YELLOW},
- { 0x141a, KEY_BLUE},
-
- { 0x1458, KEY_RECORD},
- { 0x1448, KEY_STOP},
- { 0x1440, KEY_PAUSE},
-
- { 0x1454, KEY_LAST},
- { 0x144e, KEY_REWIND},
- { 0x144f, KEY_FASTFORWARD},
- { 0x145c, KEY_NEXT},
-};
-struct ir_scancode_table ir_codes_nec_terratec_cinergy_xs_table = {
- .scan = ir_codes_nec_terratec_cinergy_xs,
- .size = ARRAY_SIZE(ir_codes_nec_terratec_cinergy_xs),
- .ir_type = IR_TYPE_NEC,
-};
-EXPORT_SYMBOL_GPL(ir_codes_nec_terratec_cinergy_xs_table);
-
-
-/* Leadtek Winfast TV USB II Deluxe remote
- Magnus Alm <magnus.alm@gmail.com>
- */
-static struct ir_scancode ir_codes_winfast_usbii_deluxe[] = {
- { 0x62, KEY_0},
- { 0x75, KEY_1},
- { 0x76, KEY_2},
- { 0x77, KEY_3},
- { 0x79, KEY_4},
- { 0x7a, KEY_5},
- { 0x7b, KEY_6},
- { 0x7d, KEY_7},
- { 0x7e, KEY_8},
- { 0x7f, KEY_9},
-
- { 0x38, KEY_CAMERA}, /* SNAPSHOT */
- { 0x37, KEY_RECORD}, /* RECORD */
- { 0x35, KEY_TIME}, /* TIMESHIFT */
-
- { 0x74, KEY_VOLUMEUP}, /* VOLUMEUP */
- { 0x78, KEY_VOLUMEDOWN}, /* VOLUMEDOWN */
- { 0x64, KEY_MUTE}, /* MUTE */
-
- { 0x21, KEY_CHANNEL}, /* SURF */
- { 0x7c, KEY_CHANNELUP}, /* CHANNELUP */
- { 0x60, KEY_CHANNELDOWN}, /* CHANNELDOWN */
- { 0x61, KEY_LAST}, /* LAST CHANNEL (RECALL) */
-
- { 0x72, KEY_VIDEO}, /* INPUT MODES (TV/FM) */
-
- { 0x70, KEY_POWER2}, /* TV ON/OFF */
-
- { 0x39, KEY_CYCLEWINDOWS}, /* MINIMIZE (BOSS) */
- { 0x3a, KEY_NEW}, /* PIP */
- { 0x73, KEY_ZOOM}, /* FULLSECREEN */
-
- { 0x66, KEY_INFO}, /* OSD (DISPLAY) */
-
- { 0x31, KEY_DOT}, /* '.' */
- { 0x63, KEY_ENTER}, /* ENTER */
-
-};
-struct ir_scancode_table ir_codes_winfast_usbii_deluxe_table = {
- .scan = ir_codes_winfast_usbii_deluxe,
- .size = ARRAY_SIZE(ir_codes_winfast_usbii_deluxe),
-};
-EXPORT_SYMBOL_GPL(ir_codes_winfast_usbii_deluxe_table);
-
-/* Kworld 315U
- */
-static struct ir_scancode ir_codes_kworld_315u[] = {
- { 0x6143, KEY_POWER },
- { 0x6101, KEY_TUNER }, /* source */
- { 0x610b, KEY_ZOOM },
- { 0x6103, KEY_POWER2 }, /* shutdown */
-
- { 0x6104, KEY_1 },
- { 0x6108, KEY_2 },
- { 0x6102, KEY_3 },
- { 0x6109, KEY_CHANNELUP },
-
- { 0x610f, KEY_4 },
- { 0x6105, KEY_5 },
- { 0x6106, KEY_6 },
- { 0x6107, KEY_CHANNELDOWN },
-
- { 0x610c, KEY_7 },
- { 0x610d, KEY_8 },
- { 0x610a, KEY_9 },
- { 0x610e, KEY_VOLUMEUP },
-
- { 0x6110, KEY_LAST },
- { 0x6111, KEY_0 },
- { 0x6112, KEY_ENTER },
- { 0x6113, KEY_VOLUMEDOWN },
-
- { 0x6114, KEY_RECORD },
- { 0x6115, KEY_STOP },
- { 0x6116, KEY_PLAY },
- { 0x6117, KEY_MUTE },
-
- { 0x6118, KEY_UP },
- { 0x6119, KEY_DOWN },
- { 0x611a, KEY_LEFT },
- { 0x611b, KEY_RIGHT },
-
- { 0x611c, KEY_RED },
- { 0x611d, KEY_GREEN },
- { 0x611e, KEY_YELLOW },
- { 0x611f, KEY_BLUE },
-};
-
-struct ir_scancode_table ir_codes_kworld_315u_table = {
- .scan = ir_codes_kworld_315u,
- .size = ARRAY_SIZE(ir_codes_kworld_315u),
- .ir_type = IR_TYPE_NEC,
-};
-EXPORT_SYMBOL_GPL(ir_codes_kworld_315u_table);
diff --git a/drivers/media/IR/ir-keytable.c b/drivers/media/IR/ir-keytable.c
index bfca26d5182..9374a006f43 100644
--- a/drivers/media/IR/ir-keytable.c
+++ b/drivers/media/IR/ir-keytable.c
@@ -1,4 +1,4 @@
-/* ir-register.c - handle IR scancode->keycode tables
+/* ir-keytable.c - handle IR scancode->keycode tables
*
* Copyright (C) 2009 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
@@ -15,384 +15,408 @@
#include <linux/input.h>
#include <linux/slab.h>
-#include <media/ir-common.h>
+#include "ir-core-priv.h"
-#define IR_TAB_MIN_SIZE 32
-#define IR_TAB_MAX_SIZE 1024
+/* Sizes are in bytes, 256 bytes allows for 32 entries on x64 */
+#define IR_TAB_MIN_SIZE 256
+#define IR_TAB_MAX_SIZE 8192
+
+/* FIXME: IR_KEYPRESS_TIMEOUT should be protocol specific */
+#define IR_KEYPRESS_TIMEOUT 250
/**
- * ir_seek_table() - returns the element order on the table
- * @rc_tab: the ir_scancode_table with the keymap to be used
- * @scancode: the scancode that we're seeking
+ * ir_resize_table() - resizes a scancode table if necessary
+ * @rc_tab: the ir_scancode_table to resize
+ * @return: zero on success or a negative error code
*
- * This routine is used by the input routines when a key is pressed at the
- * IR. The scancode is received and needs to be converted into a keycode.
- * If the key is not found, it returns KEY_UNKNOWN. Otherwise, returns the
- * corresponding keycode from the table.
+ * This routine will shrink the ir_scancode_table if it has lots of
+ * unused entries and grow it if it is full.
*/
-static int ir_seek_table(struct ir_scancode_table *rc_tab, u32 scancode)
+static int ir_resize_table(struct ir_scancode_table *rc_tab)
{
- int rc;
- unsigned long flags;
- struct ir_scancode *keymap = rc_tab->scan;
+ unsigned int oldalloc = rc_tab->alloc;
+ unsigned int newalloc = oldalloc;
+ struct ir_scancode *oldscan = rc_tab->scan;
+ struct ir_scancode *newscan;
+
+ if (rc_tab->size == rc_tab->len) {
+ /* All entries in use -> grow keytable */
+ if (rc_tab->alloc >= IR_TAB_MAX_SIZE)
+ return -ENOMEM;
- spin_lock_irqsave(&rc_tab->lock, flags);
+ newalloc *= 2;
+ IR_dprintk(1, "Growing table to %u bytes\n", newalloc);
+ }
- /* FIXME: replace it by a binary search */
+ if ((rc_tab->len * 3 < rc_tab->size) && (oldalloc > IR_TAB_MIN_SIZE)) {
+ /* Less than 1/3 of entries in use -> shrink keytable */
+ newalloc /= 2;
+ IR_dprintk(1, "Shrinking table to %u bytes\n", newalloc);
+ }
- for (rc = 0; rc < rc_tab->size; rc++)
- if (keymap[rc].scancode == scancode)
- goto exit;
+ if (newalloc == oldalloc)
+ return 0;
- /* Not found */
- rc = -EINVAL;
+ newscan = kmalloc(newalloc, GFP_ATOMIC);
+ if (!newscan) {
+ IR_dprintk(1, "Failed to kmalloc %u bytes\n", newalloc);
+ return -ENOMEM;
+ }
-exit:
- spin_unlock_irqrestore(&rc_tab->lock, flags);
- return rc;
+ memcpy(newscan, rc_tab->scan, rc_tab->len * sizeof(struct ir_scancode));
+ rc_tab->scan = newscan;
+ rc_tab->alloc = newalloc;
+ rc_tab->size = rc_tab->alloc / sizeof(struct ir_scancode);
+ kfree(oldscan);
+ return 0;
}
/**
- * ir_roundup_tablesize() - gets an optimum value for the table size
- * @n_elems: minimum number of entries to store keycodes
- *
- * This routine is used to choose the keycode table size.
+ * ir_do_setkeycode() - internal function to set a keycode in the
+ * scancode->keycode table
+ * @dev: the struct input_dev device descriptor
+ * @rc_tab: the struct ir_scancode_table to set the keycode in
+ * @scancode: the scancode for the ir command
+ * @keycode: the keycode for the ir command
+ * @resize: whether the keytable may be shrunk
+ * @return: -EINVAL if the keycode could not be inserted, otherwise zero.
*
- * In order to have some empty space for new keycodes,
- * and knowing in advance that kmalloc allocates only power of two
- * segments, it optimizes the allocated space to have some spare space
- * for those new keycodes by using the maximum number of entries that
- * will be effectively be allocated by kmalloc.
- * In order to reduce the quantity of table resizes, it has a minimum
- * table size of IR_TAB_MIN_SIZE.
+ * This routine is used internally to manipulate the scancode->keycode table.
+ * The caller has to hold @rc_tab->lock.
*/
-static int ir_roundup_tablesize(int n_elems)
+static int ir_do_setkeycode(struct input_dev *dev,
+ struct ir_scancode_table *rc_tab,
+ unsigned scancode, unsigned keycode,
+ bool resize)
{
- size_t size;
-
- if (n_elems < IR_TAB_MIN_SIZE)
- n_elems = IR_TAB_MIN_SIZE;
+ unsigned int i;
+ int old_keycode = KEY_RESERVED;
+ struct ir_input_dev *ir_dev = input_get_drvdata(dev);
/*
- * As kmalloc only allocates sizes of power of two, get as
- * much entries as possible for the allocated memory segment
+ * Unfortunately, some hardware-based IR decoders don't provide
+ * all bits for the complete IR code. In general, they provide only
+ * the command part of the IR code. Yet, as it is possible to replace
+ * the provided IR with another one, it is needed to allow loading
+ * IR tables from other remotes. So,
*/
- size = roundup_pow_of_two(n_elems * sizeof(struct ir_scancode));
- n_elems = size / sizeof(struct ir_scancode);
+ if (ir_dev->props && ir_dev->props->scanmask) {
+ scancode &= ir_dev->props->scanmask;
+ }
- return n_elems;
+ /* First check if we already have a mapping for this ir command */
+ for (i = 0; i < rc_tab->len; i++) {
+ /* Keytable is sorted from lowest to highest scancode */
+ if (rc_tab->scan[i].scancode > scancode)
+ break;
+ else if (rc_tab->scan[i].scancode < scancode)
+ continue;
+
+ old_keycode = rc_tab->scan[i].keycode;
+ rc_tab->scan[i].keycode = keycode;
+
+ /* Did the user wish to remove the mapping? */
+ if (keycode == KEY_RESERVED || keycode == KEY_UNKNOWN) {
+ IR_dprintk(1, "#%d: Deleting scan 0x%04x\n",
+ i, scancode);
+ rc_tab->len--;
+ memmove(&rc_tab->scan[i], &rc_tab->scan[i + 1],
+ (rc_tab->len - i) * sizeof(struct ir_scancode));
+ }
+
+ /* Possibly shrink the keytable, failure is not a problem */
+ ir_resize_table(rc_tab);
+ break;
+ }
+
+ if (old_keycode == KEY_RESERVED && keycode != KEY_RESERVED) {
+ /* No previous mapping found, we might need to grow the table */
+ if (resize && ir_resize_table(rc_tab))
+ return -ENOMEM;
+
+ IR_dprintk(1, "#%d: New scan 0x%04x with key 0x%04x\n",
+ i, scancode, keycode);
+
+ /* i is the proper index to insert our new keycode */
+ memmove(&rc_tab->scan[i + 1], &rc_tab->scan[i],
+ (rc_tab->len - i) * sizeof(struct ir_scancode));
+ rc_tab->scan[i].scancode = scancode;
+ rc_tab->scan[i].keycode = keycode;
+ rc_tab->len++;
+ set_bit(keycode, dev->keybit);
+ } else {
+ IR_dprintk(1, "#%d: Replacing scan 0x%04x with key 0x%04x\n",
+ i, scancode, keycode);
+ /* A previous mapping was updated... */
+ clear_bit(old_keycode, dev->keybit);
+ /* ...but another scancode might use the same keycode */
+ for (i = 0; i < rc_tab->len; i++) {
+ if (rc_tab->scan[i].keycode == old_keycode) {
+ set_bit(old_keycode, dev->keybit);
+ break;
+ }
+ }
+ }
+
+ return 0;
}
/**
- * ir_copy_table() - copies a keytable, discarding the unused entries
- * @destin: destin table
- * @origin: origin table
+ * ir_setkeycode() - set a keycode in the scancode->keycode table
+ * @dev: the struct input_dev device descriptor
+ * @scancode: the desired scancode
+ * @keycode: result
+ * @return: -EINVAL if the keycode could not be inserted, otherwise zero.
*
- * Copies all entries where the keycode is not KEY_UNKNOWN/KEY_RESERVED
- * Also copies table size and table protocol.
- * NOTE: It shouldn't copy the lock field
+ * This routine is used to handle evdev EVIOCSKEY ioctl.
*/
-
-static int ir_copy_table(struct ir_scancode_table *destin,
- const struct ir_scancode_table *origin)
+static int ir_setkeycode(struct input_dev *dev,
+ unsigned int scancode, unsigned int keycode)
{
- int i, j = 0;
-
- for (i = 0; i < origin->size; i++) {
- if (origin->scan[i].keycode == KEY_UNKNOWN ||
- origin->scan[i].keycode == KEY_RESERVED)
- continue;
+ int rc;
+ unsigned long flags;
+ struct ir_input_dev *ir_dev = input_get_drvdata(dev);
+ struct ir_scancode_table *rc_tab = &ir_dev->rc_tab;
- memcpy(&destin->scan[j], &origin->scan[i], sizeof(struct ir_scancode));
- j++;
- }
- destin->size = j;
- destin->ir_type = origin->ir_type;
+ spin_lock_irqsave(&rc_tab->lock, flags);
+ rc = ir_do_setkeycode(dev, rc_tab, scancode, keycode, true);
+ spin_unlock_irqrestore(&rc_tab->lock, flags);
+ return rc;
+}
- IR_dprintk(1, "Copied %d scancodes to the new keycode table\n", destin->size);
+/**
+ * ir_setkeytable() - sets several entries in the scancode->keycode table
+ * @dev: the struct input_dev device descriptor
+ * @to: the struct ir_scancode_table to copy entries to
+ * @from: the struct ir_scancode_table to copy entries from
+ * @return: -EINVAL if all keycodes could not be inserted, otherwise zero.
+ *
+ * This routine is used to handle table initialization.
+ */
+static int ir_setkeytable(struct input_dev *dev,
+ struct ir_scancode_table *to,
+ const struct ir_scancode_table *from)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(dev);
+ struct ir_scancode_table *rc_tab = &ir_dev->rc_tab;
+ unsigned long flags;
+ unsigned int i;
+ int rc = 0;
- return 0;
+ spin_lock_irqsave(&rc_tab->lock, flags);
+ for (i = 0; i < from->size; i++) {
+ rc = ir_do_setkeycode(dev, to, from->scan[i].scancode,
+ from->scan[i].keycode, false);
+ if (rc)
+ break;
+ }
+ spin_unlock_irqrestore(&rc_tab->lock, flags);
+ return rc;
}
/**
- * ir_getkeycode() - get a keycode at the evdev scancode ->keycode table
+ * ir_getkeycode() - get a keycode from the scancode->keycode table
* @dev: the struct input_dev device descriptor
* @scancode: the desired scancode
- * @keycode: the keycode to be retorned.
+ * @keycode: used to return the keycode, if found, or KEY_RESERVED
+ * @return: always returns zero.
*
* This routine is used to handle evdev EVIOCGKEY ioctl.
- * If the key is not found, returns -EINVAL, otherwise, returns 0.
*/
static int ir_getkeycode(struct input_dev *dev,
unsigned int scancode, unsigned int *keycode)
{
- int elem;
+ int start, end, mid;
+ unsigned long flags;
+ int key = KEY_RESERVED;
struct ir_input_dev *ir_dev = input_get_drvdata(dev);
struct ir_scancode_table *rc_tab = &ir_dev->rc_tab;
- elem = ir_seek_table(rc_tab, scancode);
- if (elem >= 0) {
- *keycode = rc_tab->scan[elem].keycode;
- return 0;
+ spin_lock_irqsave(&rc_tab->lock, flags);
+ start = 0;
+ end = rc_tab->len - 1;
+ while (start <= end) {
+ mid = (start + end) / 2;
+ if (rc_tab->scan[mid].scancode < scancode)
+ start = mid + 1;
+ else if (rc_tab->scan[mid].scancode > scancode)
+ end = mid - 1;
+ else {
+ key = rc_tab->scan[mid].keycode;
+ break;
+ }
}
+ spin_unlock_irqrestore(&rc_tab->lock, flags);
- /*
- * Scancode not found and table can't be expanded
- */
- if (elem < 0 && rc_tab->size == IR_TAB_MAX_SIZE)
- return -EINVAL;
+ if (key == KEY_RESERVED)
+ IR_dprintk(1, "unknown key for scancode 0x%04x\n",
+ scancode);
- /*
- * If is there extra space, returns KEY_RESERVED,
- * otherwise, input core won't let ir_setkeycode to work
- */
- *keycode = KEY_RESERVED;
+ *keycode = key;
return 0;
}
/**
- * ir_is_resize_needed() - Check if the table needs rezise
- * @table: keycode table that may need to resize
- * @n_elems: minimum number of entries to store keycodes
- *
- * Considering that kmalloc uses power of two storage areas, this
- * routine detects if the real alloced size will change. If not, it
- * just returns without doing nothing. Otherwise, it will extend or
- * reduce the table size to meet the new needs.
+ * ir_g_keycode_from_table() - gets the keycode that corresponds to a scancode
+ * @input_dev: the struct input_dev descriptor of the device
+ * @scancode: the scancode that we're seeking
*
- * It returns 0 if no resize is needed, 1 otherwise.
+ * This routine is used by the input routines when a key is pressed at the
+ * IR. The scancode is received and needs to be converted into a keycode.
+ * If the key is not found, it returns KEY_RESERVED. Otherwise, returns the
+ * corresponding keycode from the table.
*/
-static int ir_is_resize_needed(struct ir_scancode_table *table, int n_elems)
+u32 ir_g_keycode_from_table(struct input_dev *dev, u32 scancode)
{
- int cur_size = ir_roundup_tablesize(table->size);
- int new_size = ir_roundup_tablesize(n_elems);
-
- if (cur_size == new_size)
- return 0;
+ int keycode;
- /* Resize is needed */
- return 1;
+ ir_getkeycode(dev, scancode, &keycode);
+ if (keycode != KEY_RESERVED)
+ IR_dprintk(1, "%s: scancode 0x%04x keycode 0x%02x\n",
+ dev->name, scancode, keycode);
+ return keycode;
}
+EXPORT_SYMBOL_GPL(ir_g_keycode_from_table);
/**
- * ir_delete_key() - remove a keycode from the table
- * @rc_tab: keycode table
- * @elem: element to be removed
+ * ir_keyup() - generates input event to cleanup a key press
+ * @ir: the struct ir_input_dev descriptor of the device
*
+ * This routine is used to signal that a key has been released on the
+ * remote control. It reports a keyup input event via input_report_key().
*/
-static void ir_delete_key(struct ir_scancode_table *rc_tab, int elem)
+static void ir_keyup(struct ir_input_dev *ir)
{
- unsigned long flags = 0;
- int newsize = rc_tab->size - 1;
- int resize = ir_is_resize_needed(rc_tab, newsize);
- struct ir_scancode *oldkeymap = rc_tab->scan;
- struct ir_scancode *newkeymap = NULL;
-
- if (resize)
- newkeymap = kzalloc(ir_roundup_tablesize(newsize) *
- sizeof(*newkeymap), GFP_ATOMIC);
-
- /* There's no memory for resize. Keep the old table */
- if (!resize || !newkeymap) {
- newkeymap = oldkeymap;
-
- /* We'll modify the live table. Lock it */
- spin_lock_irqsave(&rc_tab->lock, flags);
- }
+ if (!ir->keypressed)
+ return;
- /*
- * Copy the elements before the one that will be deleted
- * if (!resize), both oldkeymap and newkeymap points
- * to the same place, so, there's no need to copy
- */
- if (resize && elem > 0)
- memcpy(newkeymap, oldkeymap,
- elem * sizeof(*newkeymap));
+ IR_dprintk(1, "keyup key 0x%04x\n", ir->last_keycode);
+ input_report_key(ir->input_dev, ir->last_keycode, 0);
+ input_sync(ir->input_dev);
+ ir->keypressed = false;
+}
+
+/**
+ * ir_timer_keyup() - generates a keyup event after a timeout
+ * @cookie: a pointer to struct ir_input_dev passed to setup_timer()
+ *
+ * This routine will generate a keyup event some time after a keydown event
+ * is generated when no further activity has been detected.
+ */
+static void ir_timer_keyup(unsigned long cookie)
+{
+ struct ir_input_dev *ir = (struct ir_input_dev *)cookie;
+ unsigned long flags;
/*
- * Copy the other elements overwriting the element to be removed
- * This operation applies to both resize and non-resize case
+ * ir->keyup_jiffies is used to prevent a race condition if a
+ * hardware interrupt occurs at this point and the keyup timer
+ * event is moved further into the future as a result.
+ *
+ * The timer will then be reactivated and this function called
+ * again in the future. We need to exit gracefully in that case
+ * to allow the input subsystem to do its auto-repeat magic or
+ * a keyup event might follow immediately after the keydown.
*/
- if (elem < newsize)
- memcpy(&newkeymap[elem], &oldkeymap[elem + 1],
- (newsize - elem) * sizeof(*newkeymap));
-
- if (resize) {
- /*
- * As the copy happened to a temporary table, only here
- * it needs to lock while replacing the table pointers
- * to use the new table
- */
- spin_lock_irqsave(&rc_tab->lock, flags);
- rc_tab->size = newsize;
- rc_tab->scan = newkeymap;
- spin_unlock_irqrestore(&rc_tab->lock, flags);
-
- /* Frees the old keytable */
- kfree(oldkeymap);
- } else {
- rc_tab->size = newsize;
- spin_unlock_irqrestore(&rc_tab->lock, flags);
- }
+ spin_lock_irqsave(&ir->keylock, flags);
+ if (time_is_after_eq_jiffies(ir->keyup_jiffies))
+ ir_keyup(ir);
+ spin_unlock_irqrestore(&ir->keylock, flags);
}
/**
- * ir_insert_key() - insert a keycode at the table
- * @rc_tab: keycode table
- * @scancode: the desired scancode
- * @keycode: the keycode to be retorned.
+ * ir_repeat() - notifies the IR core that a key is still pressed
+ * @dev: the struct input_dev descriptor of the device
*
+ * This routine is used by IR decoders when a repeat message which does
+ * not include the necessary bits to reproduce the scancode has been
+ * received.
*/
-static int ir_insert_key(struct ir_scancode_table *rc_tab,
- int scancode, int keycode)
+void ir_repeat(struct input_dev *dev)
{
unsigned long flags;
- int elem = rc_tab->size;
- int newsize = rc_tab->size + 1;
- int resize = ir_is_resize_needed(rc_tab, newsize);
- struct ir_scancode *oldkeymap = rc_tab->scan;
- struct ir_scancode *newkeymap;
-
- if (resize) {
- newkeymap = kzalloc(ir_roundup_tablesize(newsize) *
- sizeof(*newkeymap), GFP_ATOMIC);
- if (!newkeymap)
- return -ENOMEM;
+ struct ir_input_dev *ir = input_get_drvdata(dev);
- memcpy(newkeymap, oldkeymap,
- rc_tab->size * sizeof(*newkeymap));
- } else
- newkeymap = oldkeymap;
+ spin_lock_irqsave(&ir->keylock, flags);
- /* Stores the new code at the table */
- IR_dprintk(1, "#%d: New scan 0x%04x with key 0x%04x\n",
- rc_tab->size, scancode, keycode);
+ if (!ir->keypressed)
+ goto out;
- spin_lock_irqsave(&rc_tab->lock, flags);
- rc_tab->size = newsize;
- if (resize) {
- rc_tab->scan = newkeymap;
- kfree(oldkeymap);
- }
- newkeymap[elem].scancode = scancode;
- newkeymap[elem].keycode = keycode;
- spin_unlock_irqrestore(&rc_tab->lock, flags);
+ ir->keyup_jiffies = jiffies + msecs_to_jiffies(IR_KEYPRESS_TIMEOUT);
+ mod_timer(&ir->timer_keyup, ir->keyup_jiffies);
- return 0;
+out:
+ spin_unlock_irqrestore(&ir->keylock, flags);
}
+EXPORT_SYMBOL_GPL(ir_repeat);
/**
- * ir_setkeycode() - set a keycode at the evdev scancode ->keycode table
- * @dev: the struct input_dev device descriptor
- * @scancode: the desired scancode
- * @keycode: the keycode to be retorned.
+ * ir_keydown() - generates input event for a key press
+ * @dev: the struct input_dev descriptor of the device
+ * @scancode: the scancode that we're seeking
+ * @toggle: the toggle value (protocol dependent, if the protocol doesn't
+ * support toggle values, this should be set to zero)
*
- * This routine is used to handle evdev EVIOCSKEY ioctl.
- * There's one caveat here: how can we increase the size of the table?
- * If the key is not found, returns -EINVAL, otherwise, returns 0.
+ * This routine is used by the input routines when a key is pressed at the
+ * IR. It gets the keycode for a scancode and reports an input event via
+ * input_report_key().
*/
-static int ir_setkeycode(struct input_dev *dev,
- unsigned int scancode, unsigned int keycode)
+void ir_keydown(struct input_dev *dev, int scancode, u8 toggle)
{
- int rc = 0;
- struct ir_input_dev *ir_dev = input_get_drvdata(dev);
- struct ir_scancode_table *rc_tab = &ir_dev->rc_tab;
- struct ir_scancode *keymap = rc_tab->scan;
unsigned long flags;
+ struct ir_input_dev *ir = input_get_drvdata(dev);
- /*
- * Handle keycode table deletions
- *
- * If userspace is adding a KEY_UNKNOWN or KEY_RESERVED,
- * deal as a trial to remove an existing scancode attribution
- * if table become too big, reduce it to save space
- */
- if (keycode == KEY_UNKNOWN || keycode == KEY_RESERVED) {
- rc = ir_seek_table(rc_tab, scancode);
- if (rc < 0)
- return 0;
-
- IR_dprintk(1, "#%d: Deleting scan 0x%04x\n", rc, scancode);
- clear_bit(keymap[rc].keycode, dev->keybit);
- ir_delete_key(rc_tab, rc);
-
- return 0;
- }
+ u32 keycode = ir_g_keycode_from_table(dev, scancode);
- /*
- * Handle keycode replacements
- *
- * If the scancode exists, just replace by the new value
- */
- rc = ir_seek_table(rc_tab, scancode);
- if (rc >= 0) {
- IR_dprintk(1, "#%d: Replacing scan 0x%04x with key 0x%04x\n",
- rc, scancode, keycode);
+ spin_lock_irqsave(&ir->keylock, flags);
- clear_bit(keymap[rc].keycode, dev->keybit);
+ /* Repeat event? */
+ if (ir->keypressed &&
+ ir->last_scancode == scancode &&
+ ir->last_toggle == toggle)
+ goto set_timer;
- spin_lock_irqsave(&rc_tab->lock, flags);
- keymap[rc].keycode = keycode;
- spin_unlock_irqrestore(&rc_tab->lock, flags);
+ /* Release old keypress */
+ ir_keyup(ir);
- set_bit(keycode, dev->keybit);
+ ir->last_scancode = scancode;
+ ir->last_toggle = toggle;
+ ir->last_keycode = keycode;
- return 0;
- }
+ if (keycode == KEY_RESERVED)
+ goto out;
- /*
- * Handle new scancode inserts
- *
- * reallocate table if needed and insert a new keycode
- */
+ /* Register a keypress */
+ ir->keypressed = true;
+ IR_dprintk(1, "%s: key down event, key 0x%04x, scancode 0x%04x\n",
+ dev->name, keycode, scancode);
+ input_report_key(dev, ir->last_keycode, 1);
+ input_sync(dev);
- /* Avoid growing the table indefinitely */
- if (rc_tab->size + 1 > IR_TAB_MAX_SIZE)
- return -EINVAL;
-
- rc = ir_insert_key(rc_tab, scancode, keycode);
- if (rc < 0)
- return rc;
- set_bit(keycode, dev->keybit);
-
- return 0;
+set_timer:
+ ir->keyup_jiffies = jiffies + msecs_to_jiffies(IR_KEYPRESS_TIMEOUT);
+ mod_timer(&ir->timer_keyup, ir->keyup_jiffies);
+out:
+ spin_unlock_irqrestore(&ir->keylock, flags);
}
+EXPORT_SYMBOL_GPL(ir_keydown);
-/**
- * ir_g_keycode_from_table() - gets the keycode that corresponds to a scancode
- * @input_dev: the struct input_dev descriptor of the device
- * @scancode: the scancode that we're seeking
- *
- * This routine is used by the input routines when a key is pressed at the
- * IR. The scancode is received and needs to be converted into a keycode.
- * If the key is not found, it returns KEY_UNKNOWN. Otherwise, returns the
- * corresponding keycode from the table.
- */
-u32 ir_g_keycode_from_table(struct input_dev *dev, u32 scancode)
+static int ir_open(struct input_dev *input_dev)
{
- struct ir_input_dev *ir_dev = input_get_drvdata(dev);
- struct ir_scancode_table *rc_tab = &ir_dev->rc_tab;
- struct ir_scancode *keymap = rc_tab->scan;
- int elem;
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
- elem = ir_seek_table(rc_tab, scancode);
- if (elem >= 0) {
- IR_dprintk(1, "%s: scancode 0x%04x keycode 0x%02x\n",
- dev->name, scancode, keymap[elem].keycode);
-
- return rc_tab->scan[elem].keycode;
- }
+ return ir_dev->props->open(ir_dev->props->priv);
+}
- printk(KERN_INFO "%s: unknown key for scancode 0x%04x\n",
- dev->name, scancode);
+static void ir_close(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
- /* Reports userspace that an unknown keycode were got */
- return KEY_RESERVED;
+ ir_dev->props->close(ir_dev->props->priv);
}
-EXPORT_SYMBOL_GPL(ir_g_keycode_from_table);
/**
- * ir_input_register() - sets the IR keycode table and add the handlers
+ * __ir_input_register() - sets the IR keycode table and add the handlers
* for keymap table get/set
* @input_dev: the struct input_dev descriptor of the device
* @rc_tab: the struct ir_scancode_table table of scancode/keymap
@@ -402,13 +426,13 @@ EXPORT_SYMBOL_GPL(ir_g_keycode_from_table);
* It will register the input/evdev interface for the device and
* register the syfs code for IR class
*/
-int ir_input_register(struct input_dev *input_dev,
+int __ir_input_register(struct input_dev *input_dev,
const struct ir_scancode_table *rc_tab,
- const struct ir_dev_props *props)
+ const struct ir_dev_props *props,
+ const char *driver_name)
{
struct ir_input_dev *ir_dev;
- struct ir_scancode *keymap = rc_tab->scan;
- int i, rc;
+ int rc;
if (rc_tab->scan == NULL || !rc_tab->size)
return -EINVAL;
@@ -417,57 +441,77 @@ int ir_input_register(struct input_dev *input_dev,
if (!ir_dev)
return -ENOMEM;
- spin_lock_init(&ir_dev->rc_tab.lock);
-
- ir_dev->rc_tab.size = ir_roundup_tablesize(rc_tab->size);
- ir_dev->rc_tab.scan = kzalloc(ir_dev->rc_tab.size *
- sizeof(struct ir_scancode), GFP_KERNEL);
- if (!ir_dev->rc_tab.scan) {
- kfree(ir_dev);
- return -ENOMEM;
+ ir_dev->driver_name = kasprintf(GFP_KERNEL, "%s", driver_name);
+ if (!ir_dev->driver_name) {
+ rc = -ENOMEM;
+ goto out_dev;
}
- IR_dprintk(1, "Allocated space for %d keycode entries (%zd bytes)\n",
- ir_dev->rc_tab.size,
- ir_dev->rc_tab.size * sizeof(ir_dev->rc_tab.scan));
+ input_dev->getkeycode = ir_getkeycode;
+ input_dev->setkeycode = ir_setkeycode;
+ input_set_drvdata(input_dev, ir_dev);
+ ir_dev->input_dev = input_dev;
- ir_copy_table(&ir_dev->rc_tab, rc_tab);
- ir_dev->props = props;
+ spin_lock_init(&ir_dev->rc_tab.lock);
+ spin_lock_init(&ir_dev->keylock);
+ setup_timer(&ir_dev->timer_keyup, ir_timer_keyup, (unsigned long)ir_dev);
+
+ ir_dev->rc_tab.name = rc_tab->name;
+ ir_dev->rc_tab.ir_type = rc_tab->ir_type;
+ ir_dev->rc_tab.alloc = roundup_pow_of_two(rc_tab->size *
+ sizeof(struct ir_scancode));
+ ir_dev->rc_tab.scan = kmalloc(ir_dev->rc_tab.alloc, GFP_KERNEL);
+ ir_dev->rc_tab.size = ir_dev->rc_tab.alloc / sizeof(struct ir_scancode);
+ if (props) {
+ ir_dev->props = props;
+ if (props->open)
+ input_dev->open = ir_open;
+ if (props->close)
+ input_dev->close = ir_close;
+ }
- /* set the bits for the keys */
- IR_dprintk(1, "key map size: %d\n", rc_tab->size);
- for (i = 0; i < rc_tab->size; i++) {
- IR_dprintk(1, "#%d: setting bit for keycode 0x%04x\n",
- i, keymap[i].keycode);
- set_bit(keymap[i].keycode, input_dev->keybit);
+ if (!ir_dev->rc_tab.scan) {
+ rc = -ENOMEM;
+ goto out_name;
}
- clear_bit(0, input_dev->keybit);
+
+ IR_dprintk(1, "Allocated space for %u keycode entries (%u bytes)\n",
+ ir_dev->rc_tab.size, ir_dev->rc_tab.alloc);
set_bit(EV_KEY, input_dev->evbit);
+ set_bit(EV_REP, input_dev->evbit);
- input_dev->getkeycode = ir_getkeycode;
- input_dev->setkeycode = ir_setkeycode;
- input_set_drvdata(input_dev, ir_dev);
+ if (ir_setkeytable(input_dev, &ir_dev->rc_tab, rc_tab)) {
+ rc = -ENOMEM;
+ goto out_table;
+ }
- rc = input_register_device(input_dev);
+ rc = ir_register_class(input_dev);
if (rc < 0)
- goto err;
+ goto out_table;
- rc = ir_register_class(input_dev);
- if (rc < 0) {
- input_unregister_device(input_dev);
- goto err;
+ if (ir_dev->props->driver_type == RC_DRIVER_IR_RAW) {
+ rc = ir_raw_event_register(input_dev);
+ if (rc < 0)
+ goto out_event;
}
+ IR_dprintk(1, "Registered input device on %s for %s remote.\n",
+ driver_name, rc_tab->name);
+
return 0;
-err:
- kfree(rc_tab->scan);
+out_event:
+ ir_unregister_class(input_dev);
+out_table:
+ kfree(ir_dev->rc_tab.scan);
+out_name:
+ kfree(ir_dev->driver_name);
+out_dev:
kfree(ir_dev);
- input_set_drvdata(input_dev, NULL);
return rc;
}
-EXPORT_SYMBOL_GPL(ir_input_register);
+EXPORT_SYMBOL_GPL(__ir_input_register);
/**
* ir_input_unregister() - unregisters IR and frees resources
@@ -475,9 +519,9 @@ EXPORT_SYMBOL_GPL(ir_input_register);
* This routine is used to free memory and de-register interfaces.
*/
-void ir_input_unregister(struct input_dev *dev)
+void ir_input_unregister(struct input_dev *input_dev)
{
- struct ir_input_dev *ir_dev = input_get_drvdata(dev);
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
struct ir_scancode_table *rc_tab;
if (!ir_dev)
@@ -485,15 +529,18 @@ void ir_input_unregister(struct input_dev *dev)
IR_dprintk(1, "Freed keycode table\n");
+ del_timer_sync(&ir_dev->timer_keyup);
+ if (ir_dev->props->driver_type == RC_DRIVER_IR_RAW)
+ ir_raw_event_unregister(input_dev);
rc_tab = &ir_dev->rc_tab;
rc_tab->size = 0;
kfree(rc_tab->scan);
rc_tab->scan = NULL;
- ir_unregister_class(dev);
+ ir_unregister_class(input_dev);
+ kfree(ir_dev->driver_name);
kfree(ir_dev);
- input_unregister_device(dev);
}
EXPORT_SYMBOL_GPL(ir_input_unregister);
diff --git a/drivers/media/IR/ir-nec-decoder.c b/drivers/media/IR/ir-nec-decoder.c
new file mode 100644
index 00000000000..ba79233112e
--- /dev/null
+++ b/drivers/media/IR/ir-nec-decoder.c
@@ -0,0 +1,328 @@
+/* ir-nec-decoder.c - handle NEC IR Pulse/Space protocol
+ *
+ * Copyright (C) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitrev.h>
+#include "ir-core-priv.h"
+
+#define NEC_NBITS 32
+#define NEC_UNIT 562500 /* ns */
+#define NEC_HEADER_PULSE (16 * NEC_UNIT)
+#define NECX_HEADER_PULSE (8 * NEC_UNIT) /* Less common NEC variant */
+#define NEC_HEADER_SPACE (8 * NEC_UNIT)
+#define NEC_REPEAT_SPACE (8 * NEC_UNIT)
+#define NEC_BIT_PULSE (1 * NEC_UNIT)
+#define NEC_BIT_0_SPACE (1 * NEC_UNIT)
+#define NEC_BIT_1_SPACE (3 * NEC_UNIT)
+#define NEC_TRAILER_PULSE (1 * NEC_UNIT)
+#define NEC_TRAILER_SPACE (10 * NEC_UNIT) /* even longer in reality */
+
+/* Used to register nec_decoder clients */
+static LIST_HEAD(decoder_list);
+static DEFINE_SPINLOCK(decoder_lock);
+
+enum nec_state {
+ STATE_INACTIVE,
+ STATE_HEADER_SPACE,
+ STATE_BIT_PULSE,
+ STATE_BIT_SPACE,
+ STATE_TRAILER_PULSE,
+ STATE_TRAILER_SPACE,
+};
+
+struct decoder_data {
+ struct list_head list;
+ struct ir_input_dev *ir_dev;
+ int enabled:1;
+
+ /* State machine control */
+ enum nec_state state;
+ u32 nec_bits;
+ unsigned count;
+};
+
+
+/**
+ * get_decoder_data() - gets decoder data
+ * @input_dev: input device
+ *
+ * Returns the struct decoder_data that corresponds to a device
+ */
+static struct decoder_data *get_decoder_data(struct ir_input_dev *ir_dev)
+{
+ struct decoder_data *data = NULL;
+
+ spin_lock(&decoder_lock);
+ list_for_each_entry(data, &decoder_list, list) {
+ if (data->ir_dev == ir_dev)
+ break;
+ }
+ spin_unlock(&decoder_lock);
+ return data;
+}
+
+static ssize_t store_enabled(struct device *d,
+ struct device_attribute *mattr,
+ const char *buf,
+ size_t len)
+{
+ unsigned long value;
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (strict_strtoul(buf, 10, &value) || value > 1)
+ return -EINVAL;
+
+ data->enabled = value;
+
+ return len;
+}
+
+static ssize_t show_enabled(struct device *d,
+ struct device_attribute *mattr, char *buf)
+{
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (data->enabled)
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static DEVICE_ATTR(enabled, S_IRUGO | S_IWUSR, show_enabled, store_enabled);
+
+static struct attribute *decoder_attributes[] = {
+ &dev_attr_enabled.attr,
+ NULL
+};
+
+static struct attribute_group decoder_attribute_group = {
+ .name = "nec_decoder",
+ .attrs = decoder_attributes,
+};
+
+/**
+ * ir_nec_decode() - Decode one NEC pulse or space
+ * @input_dev: the struct input_dev descriptor of the device
+ * @duration: the struct ir_raw_event descriptor of the pulse/space
+ *
+ * This function returns -EINVAL if the pulse violates the state machine
+ */
+static int ir_nec_decode(struct input_dev *input_dev, struct ir_raw_event ev)
+{
+ struct decoder_data *data;
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ u32 scancode;
+ u8 address, not_address, command, not_command;
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return -EINVAL;
+
+ if (!data->enabled)
+ return 0;
+
+ if (IS_RESET(ev)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+ IR_dprintk(2, "NEC decode started at state %d (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+
+ switch (data->state) {
+
+ case STATE_INACTIVE:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, NEC_HEADER_PULSE, NEC_UNIT / 2) &&
+ !eq_margin(ev.duration, NECX_HEADER_PULSE, NEC_UNIT / 2))
+ break;
+
+ data->count = 0;
+ data->state = STATE_HEADER_SPACE;
+ return 0;
+
+ case STATE_HEADER_SPACE:
+ if (ev.pulse)
+ break;
+
+ if (eq_margin(ev.duration, NEC_HEADER_SPACE, NEC_UNIT / 2)) {
+ data->state = STATE_BIT_PULSE;
+ return 0;
+ } else if (eq_margin(ev.duration, NEC_REPEAT_SPACE, NEC_UNIT / 2)) {
+ ir_repeat(input_dev);
+ IR_dprintk(1, "Repeat last key\n");
+ data->state = STATE_TRAILER_PULSE;
+ return 0;
+ }
+
+ break;
+
+ case STATE_BIT_PULSE:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, NEC_BIT_PULSE, NEC_UNIT / 2))
+ break;
+
+ data->state = STATE_BIT_SPACE;
+ return 0;
+
+ case STATE_BIT_SPACE:
+ if (ev.pulse)
+ break;
+
+ data->nec_bits <<= 1;
+ if (eq_margin(ev.duration, NEC_BIT_1_SPACE, NEC_UNIT / 2))
+ data->nec_bits |= 1;
+ else if (!eq_margin(ev.duration, NEC_BIT_0_SPACE, NEC_UNIT / 2))
+ break;
+ data->count++;
+
+ if (data->count == NEC_NBITS)
+ data->state = STATE_TRAILER_PULSE;
+ else
+ data->state = STATE_BIT_PULSE;
+
+ return 0;
+
+ case STATE_TRAILER_PULSE:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, NEC_TRAILER_PULSE, NEC_UNIT / 2))
+ break;
+
+ data->state = STATE_TRAILER_SPACE;
+ return 0;
+
+ case STATE_TRAILER_SPACE:
+ if (ev.pulse)
+ break;
+
+ if (!geq_margin(ev.duration, NEC_TRAILER_SPACE, NEC_UNIT / 2))
+ break;
+
+ address = bitrev8((data->nec_bits >> 24) & 0xff);
+ not_address = bitrev8((data->nec_bits >> 16) & 0xff);
+ command = bitrev8((data->nec_bits >> 8) & 0xff);
+ not_command = bitrev8((data->nec_bits >> 0) & 0xff);
+
+ if ((command ^ not_command) != 0xff) {
+ IR_dprintk(1, "NEC checksum error: received 0x%08x\n",
+ data->nec_bits);
+ break;
+ }
+
+ if ((address ^ not_address) != 0xff) {
+ /* Extended NEC */
+ scancode = address << 16 |
+ not_address << 8 |
+ command;
+ IR_dprintk(1, "NEC (Ext) scancode 0x%06x\n", scancode);
+ } else {
+ /* Normal NEC */
+ scancode = address << 8 | command;
+ IR_dprintk(1, "NEC scancode 0x%04x\n", scancode);
+ }
+
+ ir_keydown(input_dev, scancode, 0);
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+ IR_dprintk(1, "NEC decode failed at state %d (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state = STATE_INACTIVE;
+ return -EINVAL;
+}
+
+static int ir_nec_register(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ struct decoder_data *data;
+ int rc;
+
+ rc = sysfs_create_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ if (rc < 0)
+ return rc;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ return -ENOMEM;
+ }
+
+ data->ir_dev = ir_dev;
+ data->enabled = 1;
+
+ spin_lock(&decoder_lock);
+ list_add_tail(&data->list, &decoder_list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static int ir_nec_unregister(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ static struct decoder_data *data;
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return 0;
+
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+
+ spin_lock(&decoder_lock);
+ list_del(&data->list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static struct ir_raw_handler nec_handler = {
+ .decode = ir_nec_decode,
+ .raw_register = ir_nec_register,
+ .raw_unregister = ir_nec_unregister,
+};
+
+static int __init ir_nec_decode_init(void)
+{
+ ir_raw_handler_register(&nec_handler);
+
+ printk(KERN_INFO "IR NEC protocol handler initialized\n");
+ return 0;
+}
+
+static void __exit ir_nec_decode_exit(void)
+{
+ ir_raw_handler_unregister(&nec_handler);
+}
+
+module_init(ir_nec_decode_init);
+module_exit(ir_nec_decode_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
+MODULE_DESCRIPTION("NEC IR protocol decoder");
diff --git a/drivers/media/IR/ir-raw-event.c b/drivers/media/IR/ir-raw-event.c
new file mode 100644
index 00000000000..ea68a3f2eff
--- /dev/null
+++ b/drivers/media/IR/ir-raw-event.c
@@ -0,0 +1,251 @@
+/* ir-raw-event.c - handle IR Pulse/Space event
+ *
+ * Copyright (C) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include "ir-core-priv.h"
+
+/* Define the max number of pulse/space transitions to buffer */
+#define MAX_IR_EVENT_SIZE 512
+
+/* Used to handle IR raw handler extensions */
+static LIST_HEAD(ir_raw_handler_list);
+static DEFINE_SPINLOCK(ir_raw_handler_lock);
+
+/**
+ * RUN_DECODER() - runs an operation on all IR decoders
+ * @ops: IR raw handler operation to be called
+ * @arg: arguments to be passed to the callback
+ *
+ * Calls ir_raw_handler::ops for all registered IR handlers. It prevents
+ * new decode addition/removal while running, by locking ir_raw_handler_lock
+ * mutex. If an error occurs, it stops the ops. Otherwise, it returns a sum
+ * of the return codes.
+ */
+#define RUN_DECODER(ops, ...) ({ \
+ struct ir_raw_handler *_ir_raw_handler; \
+ int _sumrc = 0, _rc; \
+ spin_lock(&ir_raw_handler_lock); \
+ list_for_each_entry(_ir_raw_handler, &ir_raw_handler_list, list) { \
+ if (_ir_raw_handler->ops) { \
+ _rc = _ir_raw_handler->ops(__VA_ARGS__); \
+ if (_rc < 0) \
+ break; \
+ _sumrc += _rc; \
+ } \
+ } \
+ spin_unlock(&ir_raw_handler_lock); \
+ _sumrc; \
+})
+
+#ifdef MODULE
+/* Used to load the decoders */
+static struct work_struct wq_load;
+#endif
+
+static void ir_raw_event_work(struct work_struct *work)
+{
+ struct ir_raw_event ev;
+ struct ir_raw_event_ctrl *raw =
+ container_of(work, struct ir_raw_event_ctrl, rx_work);
+
+ while (kfifo_out(&raw->kfifo, &ev, sizeof(ev)) == sizeof(ev))
+ RUN_DECODER(decode, raw->input_dev, ev);
+}
+
+int ir_raw_event_register(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir = input_get_drvdata(input_dev);
+ int rc;
+
+ ir->raw = kzalloc(sizeof(*ir->raw), GFP_KERNEL);
+ if (!ir->raw)
+ return -ENOMEM;
+
+ ir->raw->input_dev = input_dev;
+ INIT_WORK(&ir->raw->rx_work, ir_raw_event_work);
+
+ rc = kfifo_alloc(&ir->raw->kfifo, sizeof(s64) * MAX_IR_EVENT_SIZE,
+ GFP_KERNEL);
+ if (rc < 0) {
+ kfree(ir->raw);
+ ir->raw = NULL;
+ return rc;
+ }
+
+ rc = RUN_DECODER(raw_register, input_dev);
+ if (rc < 0) {
+ kfifo_free(&ir->raw->kfifo);
+ kfree(ir->raw);
+ ir->raw = NULL;
+ return rc;
+ }
+
+ return rc;
+}
+
+void ir_raw_event_unregister(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir = input_get_drvdata(input_dev);
+
+ if (!ir->raw)
+ return;
+
+ cancel_work_sync(&ir->raw->rx_work);
+ RUN_DECODER(raw_unregister, input_dev);
+
+ kfifo_free(&ir->raw->kfifo);
+ kfree(ir->raw);
+ ir->raw = NULL;
+}
+
+/**
+ * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
+ * @input_dev: the struct input_dev device descriptor
+ * @ev: the struct ir_raw_event descriptor of the pulse/space
+ *
+ * This routine (which may be called from an interrupt context) stores a
+ * pulse/space duration for the raw ir decoding state machines. Pulses are
+ * signalled as positive values and spaces as negative values. A zero value
+ * will reset the decoding state machines.
+ */
+int ir_raw_event_store(struct input_dev *input_dev, struct ir_raw_event *ev)
+{
+ struct ir_input_dev *ir = input_get_drvdata(input_dev);
+
+ if (!ir->raw)
+ return -EINVAL;
+
+ if (kfifo_in(&ir->raw->kfifo, ev, sizeof(*ev)) != sizeof(*ev))
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ir_raw_event_store);
+
+/**
+ * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
+ * @input_dev: the struct input_dev device descriptor
+ * @type: the type of the event that has occurred
+ *
+ * This routine (which may be called from an interrupt context) is used to
+ * store the beginning of an ir pulse or space (or the start/end of ir
+ * reception) for the raw ir decoding state machines. This is used by
+ * hardware which does not provide durations directly but only interrupts
+ * (or similar events) on state change.
+ */
+int ir_raw_event_store_edge(struct input_dev *input_dev, enum raw_event_type type)
+{
+ struct ir_input_dev *ir = input_get_drvdata(input_dev);
+ ktime_t now;
+ s64 delta; /* ns */
+ struct ir_raw_event ev;
+ int rc = 0;
+
+ if (!ir->raw)
+ return -EINVAL;
+
+ now = ktime_get();
+ delta = ktime_to_ns(ktime_sub(now, ir->raw->last_event));
+
+ /* Check for a long duration since last event or if we're
+ * being called for the first time, note that delta can't
+ * possibly be negative.
+ */
+ ev.duration = 0;
+ if (delta > IR_MAX_DURATION || !ir->raw->last_type)
+ type |= IR_START_EVENT;
+ else
+ ev.duration = delta;
+
+ if (type & IR_START_EVENT)
+ ir_raw_event_reset(input_dev);
+ else if (ir->raw->last_type & IR_SPACE) {
+ ev.pulse = false;
+ rc = ir_raw_event_store(input_dev, &ev);
+ } else if (ir->raw->last_type & IR_PULSE) {
+ ev.pulse = true;
+ rc = ir_raw_event_store(input_dev, &ev);
+ } else
+ return 0;
+
+ ir->raw->last_event = now;
+ ir->raw->last_type = type;
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
+
+/**
+ * ir_raw_event_handle() - schedules the decoding of stored ir data
+ * @input_dev: the struct input_dev device descriptor
+ *
+ * This routine will signal the workqueue to start decoding stored ir data.
+ */
+void ir_raw_event_handle(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir = input_get_drvdata(input_dev);
+
+ if (!ir->raw)
+ return;
+
+ schedule_work(&ir->raw->rx_work);
+}
+EXPORT_SYMBOL_GPL(ir_raw_event_handle);
+
+/*
+ * Extension interface - used to register the IR decoders
+ */
+
+int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
+{
+ spin_lock(&ir_raw_handler_lock);
+ list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
+ spin_unlock(&ir_raw_handler_lock);
+ return 0;
+}
+EXPORT_SYMBOL(ir_raw_handler_register);
+
+void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
+{
+ spin_lock(&ir_raw_handler_lock);
+ list_del(&ir_raw_handler->list);
+ spin_unlock(&ir_raw_handler_lock);
+}
+EXPORT_SYMBOL(ir_raw_handler_unregister);
+
+#ifdef MODULE
+static void init_decoders(struct work_struct *work)
+{
+ /* Load the decoder modules */
+
+ load_nec_decode();
+ load_rc5_decode();
+ load_rc6_decode();
+ load_jvc_decode();
+ load_sony_decode();
+
+ /* If needed, we may later add some init code. In this case,
+ it is needed to change the CONFIG_MODULE test at ir-core.h
+ */
+}
+#endif
+
+void ir_raw_init(void)
+{
+#ifdef MODULE
+ INIT_WORK(&wq_load, init_decoders);
+ schedule_work(&wq_load);
+#endif
+}
diff --git a/drivers/media/IR/ir-rc5-decoder.c b/drivers/media/IR/ir-rc5-decoder.c
new file mode 100644
index 00000000000..23cdb1b1a3b
--- /dev/null
+++ b/drivers/media/IR/ir-rc5-decoder.c
@@ -0,0 +1,324 @@
+/* ir-rc5-decoder.c - handle RC5(x) IR Pulse/Space protocol
+ *
+ * Copyright (C) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * This code handles 14 bits RC5 protocols and 20 bits RC5x protocols.
+ * There are other variants that use a different number of bits.
+ * This is currently unsupported.
+ * It considers a carrier of 36 kHz, with a total of 14/20 bits, where
+ * the first two bits are start bits, and a third one is a filing bit
+ */
+
+#include "ir-core-priv.h"
+
+#define RC5_NBITS 14
+#define RC5X_NBITS 20
+#define CHECK_RC5X_NBITS 8
+#define RC5_UNIT 888888 /* ns */
+#define RC5_BIT_START (1 * RC5_UNIT)
+#define RC5_BIT_END (1 * RC5_UNIT)
+#define RC5X_SPACE (4 * RC5_UNIT)
+
+/* Used to register rc5_decoder clients */
+static LIST_HEAD(decoder_list);
+static DEFINE_SPINLOCK(decoder_lock);
+
+enum rc5_state {
+ STATE_INACTIVE,
+ STATE_BIT_START,
+ STATE_BIT_END,
+ STATE_CHECK_RC5X,
+ STATE_FINISHED,
+};
+
+struct decoder_data {
+ struct list_head list;
+ struct ir_input_dev *ir_dev;
+ int enabled:1;
+
+ /* State machine control */
+ enum rc5_state state;
+ u32 rc5_bits;
+ struct ir_raw_event prev_ev;
+ unsigned count;
+ unsigned wanted_bits;
+};
+
+
+/**
+ * get_decoder_data() - gets decoder data
+ * @input_dev: input device
+ *
+ * Returns the struct decoder_data that corresponds to a device
+ */
+
+static struct decoder_data *get_decoder_data(struct ir_input_dev *ir_dev)
+{
+ struct decoder_data *data = NULL;
+
+ spin_lock(&decoder_lock);
+ list_for_each_entry(data, &decoder_list, list) {
+ if (data->ir_dev == ir_dev)
+ break;
+ }
+ spin_unlock(&decoder_lock);
+ return data;
+}
+
+static ssize_t store_enabled(struct device *d,
+ struct device_attribute *mattr,
+ const char *buf,
+ size_t len)
+{
+ unsigned long value;
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (strict_strtoul(buf, 10, &value) || value > 1)
+ return -EINVAL;
+
+ data->enabled = value;
+
+ return len;
+}
+
+static ssize_t show_enabled(struct device *d,
+ struct device_attribute *mattr, char *buf)
+{
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (data->enabled)
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static DEVICE_ATTR(enabled, S_IRUGO | S_IWUSR, show_enabled, store_enabled);
+
+static struct attribute *decoder_attributes[] = {
+ &dev_attr_enabled.attr,
+ NULL
+};
+
+static struct attribute_group decoder_attribute_group = {
+ .name = "rc5_decoder",
+ .attrs = decoder_attributes,
+};
+
+/**
+ * ir_rc5_decode() - Decode one RC-5 pulse or space
+ * @input_dev: the struct input_dev descriptor of the device
+ * @ev: the struct ir_raw_event descriptor of the pulse/space
+ *
+ * This function returns -EINVAL if the pulse violates the state machine
+ */
+static int ir_rc5_decode(struct input_dev *input_dev, struct ir_raw_event ev)
+{
+ struct decoder_data *data;
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ u8 toggle;
+ u32 scancode;
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return -EINVAL;
+
+ if (!data->enabled)
+ return 0;
+
+ if (IS_RESET(ev)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+ if (!geq_margin(ev.duration, RC5_UNIT, RC5_UNIT / 2))
+ goto out;
+
+again:
+ IR_dprintk(2, "RC5(x) decode started at state %i (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+
+ if (!geq_margin(ev.duration, RC5_UNIT, RC5_UNIT / 2))
+ return 0;
+
+ switch (data->state) {
+
+ case STATE_INACTIVE:
+ if (!ev.pulse)
+ break;
+
+ data->state = STATE_BIT_START;
+ data->count = 1;
+ /* We just need enough bits to get to STATE_CHECK_RC5X */
+ data->wanted_bits = RC5X_NBITS;
+ decrease_duration(&ev, RC5_BIT_START);
+ goto again;
+
+ case STATE_BIT_START:
+ if (!eq_margin(ev.duration, RC5_BIT_START, RC5_UNIT / 2))
+ break;
+
+ data->rc5_bits <<= 1;
+ if (!ev.pulse)
+ data->rc5_bits |= 1;
+ data->count++;
+ data->prev_ev = ev;
+ data->state = STATE_BIT_END;
+ return 0;
+
+ case STATE_BIT_END:
+ if (!is_transition(&ev, &data->prev_ev))
+ break;
+
+ if (data->count == data->wanted_bits)
+ data->state = STATE_FINISHED;
+ else if (data->count == CHECK_RC5X_NBITS)
+ data->state = STATE_CHECK_RC5X;
+ else
+ data->state = STATE_BIT_START;
+
+ decrease_duration(&ev, RC5_BIT_END);
+ goto again;
+
+ case STATE_CHECK_RC5X:
+ if (!ev.pulse && geq_margin(ev.duration, RC5X_SPACE, RC5_UNIT / 2)) {
+ /* RC5X */
+ data->wanted_bits = RC5X_NBITS;
+ decrease_duration(&ev, RC5X_SPACE);
+ } else {
+ /* RC5 */
+ data->wanted_bits = RC5_NBITS;
+ }
+ data->state = STATE_BIT_START;
+ goto again;
+
+ case STATE_FINISHED:
+ if (ev.pulse)
+ break;
+
+ if (data->wanted_bits == RC5X_NBITS) {
+ /* RC5X */
+ u8 xdata, command, system;
+ xdata = (data->rc5_bits & 0x0003F) >> 0;
+ command = (data->rc5_bits & 0x00FC0) >> 6;
+ system = (data->rc5_bits & 0x1F000) >> 12;
+ toggle = (data->rc5_bits & 0x20000) ? 1 : 0;
+ command += (data->rc5_bits & 0x01000) ? 0 : 0x40;
+ scancode = system << 16 | command << 8 | xdata;
+
+ IR_dprintk(1, "RC5X scancode 0x%06x (toggle: %u)\n",
+ scancode, toggle);
+
+ } else {
+ /* RC5 */
+ u8 command, system;
+ command = (data->rc5_bits & 0x0003F) >> 0;
+ system = (data->rc5_bits & 0x007C0) >> 6;
+ toggle = (data->rc5_bits & 0x00800) ? 1 : 0;
+ command += (data->rc5_bits & 0x01000) ? 0 : 0x40;
+ scancode = system << 8 | command;
+
+ IR_dprintk(1, "RC5 scancode 0x%04x (toggle: %u)\n",
+ scancode, toggle);
+ }
+
+ ir_keydown(input_dev, scancode, toggle);
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+out:
+ IR_dprintk(1, "RC5(x) decode failed at state %i (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state = STATE_INACTIVE;
+ return -EINVAL;
+}
+
+static int ir_rc5_register(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ struct decoder_data *data;
+ int rc;
+
+ rc = sysfs_create_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ if (rc < 0)
+ return rc;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ return -ENOMEM;
+ }
+
+ data->ir_dev = ir_dev;
+ data->enabled = 1;
+
+ spin_lock(&decoder_lock);
+ list_add_tail(&data->list, &decoder_list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static int ir_rc5_unregister(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ static struct decoder_data *data;
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return 0;
+
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+
+ spin_lock(&decoder_lock);
+ list_del(&data->list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static struct ir_raw_handler rc5_handler = {
+ .decode = ir_rc5_decode,
+ .raw_register = ir_rc5_register,
+ .raw_unregister = ir_rc5_unregister,
+};
+
+static int __init ir_rc5_decode_init(void)
+{
+ ir_raw_handler_register(&rc5_handler);
+
+ printk(KERN_INFO "IR RC5(x) protocol handler initialized\n");
+ return 0;
+}
+
+static void __exit ir_rc5_decode_exit(void)
+{
+ ir_raw_handler_unregister(&rc5_handler);
+}
+
+module_init(ir_rc5_decode_init);
+module_exit(ir_rc5_decode_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
+MODULE_DESCRIPTION("RC5(x) IR protocol decoder");
diff --git a/drivers/media/IR/ir-rc6-decoder.c b/drivers/media/IR/ir-rc6-decoder.c
new file mode 100644
index 00000000000..2bf479f4f1b
--- /dev/null
+++ b/drivers/media/IR/ir-rc6-decoder.c
@@ -0,0 +1,419 @@
+/* ir-rc6-decoder.c - A decoder for the RC6 IR protocol
+ *
+ * Copyright (C) 2010 by David Härdeman <david@hardeman.nu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "ir-core-priv.h"
+
+/*
+ * This decoder currently supports:
+ * RC6-0-16 (standard toggle bit in header)
+ * RC6-6A-24 (no toggle bit)
+ * RC6-6A-32 (MCE version with toggle bit in body)
+ */
+
+#define RC6_UNIT 444444 /* us */
+#define RC6_HEADER_NBITS 4 /* not including toggle bit */
+#define RC6_0_NBITS 16
+#define RC6_6A_SMALL_NBITS 24
+#define RC6_6A_LARGE_NBITS 32
+#define RC6_PREFIX_PULSE (6 * RC6_UNIT)
+#define RC6_PREFIX_SPACE (2 * RC6_UNIT)
+#define RC6_BIT_START (1 * RC6_UNIT)
+#define RC6_BIT_END (1 * RC6_UNIT)
+#define RC6_TOGGLE_START (2 * RC6_UNIT)
+#define RC6_TOGGLE_END (2 * RC6_UNIT)
+#define RC6_MODE_MASK 0x07 /* for the header bits */
+#define RC6_STARTBIT_MASK 0x08 /* for the header bits */
+#define RC6_6A_MCE_TOGGLE_MASK 0x8000 /* for the body bits */
+
+/* Used to register rc6_decoder clients */
+static LIST_HEAD(decoder_list);
+static DEFINE_SPINLOCK(decoder_lock);
+
+enum rc6_mode {
+ RC6_MODE_0,
+ RC6_MODE_6A,
+ RC6_MODE_UNKNOWN,
+};
+
+enum rc6_state {
+ STATE_INACTIVE,
+ STATE_PREFIX_SPACE,
+ STATE_HEADER_BIT_START,
+ STATE_HEADER_BIT_END,
+ STATE_TOGGLE_START,
+ STATE_TOGGLE_END,
+ STATE_BODY_BIT_START,
+ STATE_BODY_BIT_END,
+ STATE_FINISHED,
+};
+
+struct decoder_data {
+ struct list_head list;
+ struct ir_input_dev *ir_dev;
+ int enabled:1;
+
+ /* State machine control */
+ enum rc6_state state;
+ u8 header;
+ u32 body;
+ struct ir_raw_event prev_ev;
+ bool toggle;
+ unsigned count;
+ unsigned wanted_bits;
+};
+
+
+/**
+ * get_decoder_data() - gets decoder data
+ * @input_dev: input device
+ *
+ * Returns the struct decoder_data that corresponds to a device
+ */
+static struct decoder_data *get_decoder_data(struct ir_input_dev *ir_dev)
+{
+ struct decoder_data *data = NULL;
+
+ spin_lock(&decoder_lock);
+ list_for_each_entry(data, &decoder_list, list) {
+ if (data->ir_dev == ir_dev)
+ break;
+ }
+ spin_unlock(&decoder_lock);
+ return data;
+}
+
+static ssize_t store_enabled(struct device *d,
+ struct device_attribute *mattr,
+ const char *buf,
+ size_t len)
+{
+ unsigned long value;
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (strict_strtoul(buf, 10, &value) || value > 1)
+ return -EINVAL;
+
+ data->enabled = value;
+
+ return len;
+}
+
+static ssize_t show_enabled(struct device *d,
+ struct device_attribute *mattr, char *buf)
+{
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (data->enabled)
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static DEVICE_ATTR(enabled, S_IRUGO | S_IWUSR, show_enabled, store_enabled);
+
+static struct attribute *decoder_attributes[] = {
+ &dev_attr_enabled.attr,
+ NULL
+};
+
+static struct attribute_group decoder_attribute_group = {
+ .name = "rc6_decoder",
+ .attrs = decoder_attributes,
+};
+
+static enum rc6_mode rc6_mode(struct decoder_data *data) {
+ switch (data->header & RC6_MODE_MASK) {
+ case 0:
+ return RC6_MODE_0;
+ case 6:
+ if (!data->toggle)
+ return RC6_MODE_6A;
+ /* fall through */
+ default:
+ return RC6_MODE_UNKNOWN;
+ }
+}
+
+/**
+ * ir_rc6_decode() - Decode one RC6 pulse or space
+ * @input_dev: the struct input_dev descriptor of the device
+ * @ev: the struct ir_raw_event descriptor of the pulse/space
+ *
+ * This function returns -EINVAL if the pulse violates the state machine
+ */
+static int ir_rc6_decode(struct input_dev *input_dev, struct ir_raw_event ev)
+{
+ struct decoder_data *data;
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ u32 scancode;
+ u8 toggle;
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return -EINVAL;
+
+ if (!data->enabled)
+ return 0;
+
+ if (IS_RESET(ev)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+ if (!geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2))
+ goto out;
+
+again:
+ IR_dprintk(2, "RC6 decode started at state %i (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+
+ if (!geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2))
+ return 0;
+
+ switch (data->state) {
+
+ case STATE_INACTIVE:
+ if (!ev.pulse)
+ break;
+
+ /* Note: larger margin on first pulse since each RC6_UNIT
+ is quite short and some hardware takes some time to
+ adjust to the signal */
+ if (!eq_margin(ev.duration, RC6_PREFIX_PULSE, RC6_UNIT))
+ break;
+
+ data->state = STATE_PREFIX_SPACE;
+ data->count = 0;
+ return 0;
+
+ case STATE_PREFIX_SPACE:
+ if (ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, RC6_PREFIX_SPACE, RC6_UNIT / 2))
+ break;
+
+ data->state = STATE_HEADER_BIT_START;
+ return 0;
+
+ case STATE_HEADER_BIT_START:
+ if (!eq_margin(ev.duration, RC6_BIT_START, RC6_UNIT / 2))
+ break;
+
+ data->header <<= 1;
+ if (ev.pulse)
+ data->header |= 1;
+ data->count++;
+ data->prev_ev = ev;
+ data->state = STATE_HEADER_BIT_END;
+ return 0;
+
+ case STATE_HEADER_BIT_END:
+ if (!is_transition(&ev, &data->prev_ev))
+ break;
+
+ if (data->count == RC6_HEADER_NBITS)
+ data->state = STATE_TOGGLE_START;
+ else
+ data->state = STATE_HEADER_BIT_START;
+
+ decrease_duration(&ev, RC6_BIT_END);
+ goto again;
+
+ case STATE_TOGGLE_START:
+ if (!eq_margin(ev.duration, RC6_TOGGLE_START, RC6_UNIT / 2))
+ break;
+
+ data->toggle = ev.pulse;
+ data->prev_ev = ev;
+ data->state = STATE_TOGGLE_END;
+ return 0;
+
+ case STATE_TOGGLE_END:
+ if (!is_transition(&ev, &data->prev_ev) ||
+ !geq_margin(ev.duration, RC6_TOGGLE_END, RC6_UNIT / 2))
+ break;
+
+ if (!(data->header & RC6_STARTBIT_MASK)) {
+ IR_dprintk(1, "RC6 invalid start bit\n");
+ break;
+ }
+
+ data->state = STATE_BODY_BIT_START;
+ data->prev_ev = ev;
+ decrease_duration(&ev, RC6_TOGGLE_END);
+ data->count = 0;
+
+ switch (rc6_mode(data)) {
+ case RC6_MODE_0:
+ data->wanted_bits = RC6_0_NBITS;
+ break;
+ case RC6_MODE_6A:
+ /* This might look weird, but we basically
+ check the value of the first body bit to
+ determine the number of bits in mode 6A */
+ if ((!ev.pulse && !geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2)) ||
+ geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2))
+ data->wanted_bits = RC6_6A_LARGE_NBITS;
+ else
+ data->wanted_bits = RC6_6A_SMALL_NBITS;
+ break;
+ default:
+ IR_dprintk(1, "RC6 unknown mode\n");
+ goto out;
+ }
+ goto again;
+
+ case STATE_BODY_BIT_START:
+ if (!eq_margin(ev.duration, RC6_BIT_START, RC6_UNIT / 2))
+ break;
+
+ data->body <<= 1;
+ if (ev.pulse)
+ data->body |= 1;
+ data->count++;
+ data->prev_ev = ev;
+
+ data->state = STATE_BODY_BIT_END;
+ return 0;
+
+ case STATE_BODY_BIT_END:
+ if (!is_transition(&ev, &data->prev_ev))
+ break;
+
+ if (data->count == data->wanted_bits)
+ data->state = STATE_FINISHED;
+ else
+ data->state = STATE_BODY_BIT_START;
+
+ decrease_duration(&ev, RC6_BIT_END);
+ goto again;
+
+ case STATE_FINISHED:
+ if (ev.pulse)
+ break;
+
+ switch (rc6_mode(data)) {
+ case RC6_MODE_0:
+ scancode = data->body & 0xffff;
+ toggle = data->toggle;
+ IR_dprintk(1, "RC6(0) scancode 0x%04x (toggle: %u)\n",
+ scancode, toggle);
+ break;
+ case RC6_MODE_6A:
+ if (data->wanted_bits == RC6_6A_LARGE_NBITS) {
+ toggle = data->body & RC6_6A_MCE_TOGGLE_MASK ? 1 : 0;
+ scancode = data->body & ~RC6_6A_MCE_TOGGLE_MASK;
+ } else {
+ toggle = 0;
+ scancode = data->body & 0xffffff;
+ }
+
+ IR_dprintk(1, "RC6(6A) scancode 0x%08x (toggle: %u)\n",
+ scancode, toggle);
+ break;
+ default:
+ IR_dprintk(1, "RC6 unknown mode\n");
+ goto out;
+ }
+
+ ir_keydown(input_dev, scancode, toggle);
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+out:
+ IR_dprintk(1, "RC6 decode failed at state %i (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state = STATE_INACTIVE;
+ return -EINVAL;
+}
+
+static int ir_rc6_register(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ struct decoder_data *data;
+ int rc;
+
+ rc = sysfs_create_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ if (rc < 0)
+ return rc;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ return -ENOMEM;
+ }
+
+ data->ir_dev = ir_dev;
+ data->enabled = 1;
+
+ spin_lock(&decoder_lock);
+ list_add_tail(&data->list, &decoder_list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static int ir_rc6_unregister(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ static struct decoder_data *data;
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return 0;
+
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+
+ spin_lock(&decoder_lock);
+ list_del(&data->list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static struct ir_raw_handler rc6_handler = {
+ .decode = ir_rc6_decode,
+ .raw_register = ir_rc6_register,
+ .raw_unregister = ir_rc6_unregister,
+};
+
+static int __init ir_rc6_decode_init(void)
+{
+ ir_raw_handler_register(&rc6_handler);
+
+ printk(KERN_INFO "IR RC6 protocol handler initialized\n");
+ return 0;
+}
+
+static void __exit ir_rc6_decode_exit(void)
+{
+ ir_raw_handler_unregister(&rc6_handler);
+}
+
+module_init(ir_rc6_decode_init);
+module_exit(ir_rc6_decode_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Härdeman <david@hardeman.nu>");
+MODULE_DESCRIPTION("RC6 IR protocol decoder");
diff --git a/drivers/media/IR/ir-sony-decoder.c b/drivers/media/IR/ir-sony-decoder.c
new file mode 100644
index 00000000000..9f440c5c060
--- /dev/null
+++ b/drivers/media/IR/ir-sony-decoder.c
@@ -0,0 +1,312 @@
+/* ir-sony-decoder.c - handle Sony IR Pulse/Space protocol
+ *
+ * Copyright (C) 2010 by David Härdeman <david@hardeman.nu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitrev.h>
+#include "ir-core-priv.h"
+
+#define SONY_UNIT 600000 /* ns */
+#define SONY_HEADER_PULSE (4 * SONY_UNIT)
+#define SONY_HEADER_SPACE (1 * SONY_UNIT)
+#define SONY_BIT_0_PULSE (1 * SONY_UNIT)
+#define SONY_BIT_1_PULSE (2 * SONY_UNIT)
+#define SONY_BIT_SPACE (1 * SONY_UNIT)
+#define SONY_TRAILER_SPACE (10 * SONY_UNIT) /* minimum */
+
+/* Used to register sony_decoder clients */
+static LIST_HEAD(decoder_list);
+static DEFINE_SPINLOCK(decoder_lock);
+
+enum sony_state {
+ STATE_INACTIVE,
+ STATE_HEADER_SPACE,
+ STATE_BIT_PULSE,
+ STATE_BIT_SPACE,
+ STATE_FINISHED,
+};
+
+struct decoder_data {
+ struct list_head list;
+ struct ir_input_dev *ir_dev;
+ int enabled:1;
+
+ /* State machine control */
+ enum sony_state state;
+ u32 sony_bits;
+ unsigned count;
+};
+
+
+/**
+ * get_decoder_data() - gets decoder data
+ * @input_dev: input device
+ *
+ * Returns the struct decoder_data that corresponds to a device
+ */
+static struct decoder_data *get_decoder_data(struct ir_input_dev *ir_dev)
+{
+ struct decoder_data *data = NULL;
+
+ spin_lock(&decoder_lock);
+ list_for_each_entry(data, &decoder_list, list) {
+ if (data->ir_dev == ir_dev)
+ break;
+ }
+ spin_unlock(&decoder_lock);
+ return data;
+}
+
+static ssize_t store_enabled(struct device *d,
+ struct device_attribute *mattr,
+ const char *buf,
+ size_t len)
+{
+ unsigned long value;
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (strict_strtoul(buf, 10, &value) || value > 1)
+ return -EINVAL;
+
+ data->enabled = value;
+
+ return len;
+}
+
+static ssize_t show_enabled(struct device *d,
+ struct device_attribute *mattr, char *buf)
+{
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (data->enabled)
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static DEVICE_ATTR(enabled, S_IRUGO | S_IWUSR, show_enabled, store_enabled);
+
+static struct attribute *decoder_attributes[] = {
+ &dev_attr_enabled.attr,
+ NULL
+};
+
+static struct attribute_group decoder_attribute_group = {
+ .name = "sony_decoder",
+ .attrs = decoder_attributes,
+};
+
+/**
+ * ir_sony_decode() - Decode one Sony pulse or space
+ * @input_dev: the struct input_dev descriptor of the device
+ * @ev: the struct ir_raw_event descriptor of the pulse/space
+ *
+ * This function returns -EINVAL if the pulse violates the state machine
+ */
+static int ir_sony_decode(struct input_dev *input_dev, struct ir_raw_event ev)
+{
+ struct decoder_data *data;
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ u32 scancode;
+ u8 device, subdevice, function;
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return -EINVAL;
+
+ if (!data->enabled)
+ return 0;
+
+ if (IS_RESET(ev)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+ if (!geq_margin(ev.duration, SONY_UNIT, SONY_UNIT / 2))
+ goto out;
+
+ IR_dprintk(2, "Sony decode started at state %d (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+
+ switch (data->state) {
+
+ case STATE_INACTIVE:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, SONY_HEADER_PULSE, SONY_UNIT / 2))
+ break;
+
+ data->count = 0;
+ data->state = STATE_HEADER_SPACE;
+ return 0;
+
+ case STATE_HEADER_SPACE:
+ if (ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, SONY_HEADER_SPACE, SONY_UNIT / 2))
+ break;
+
+ data->state = STATE_BIT_PULSE;
+ return 0;
+
+ case STATE_BIT_PULSE:
+ if (!ev.pulse)
+ break;
+
+ data->sony_bits <<= 1;
+ if (eq_margin(ev.duration, SONY_BIT_1_PULSE, SONY_UNIT / 2))
+ data->sony_bits |= 1;
+ else if (!eq_margin(ev.duration, SONY_BIT_0_PULSE, SONY_UNIT / 2))
+ break;
+
+ data->count++;
+ data->state = STATE_BIT_SPACE;
+ return 0;
+
+ case STATE_BIT_SPACE:
+ if (ev.pulse)
+ break;
+
+ if (!geq_margin(ev.duration, SONY_BIT_SPACE, SONY_UNIT / 2))
+ break;
+
+ decrease_duration(&ev, SONY_BIT_SPACE);
+
+ if (!geq_margin(ev.duration, SONY_UNIT, SONY_UNIT / 2)) {
+ data->state = STATE_BIT_PULSE;
+ return 0;
+ }
+
+ data->state = STATE_FINISHED;
+ /* Fall through */
+
+ case STATE_FINISHED:
+ if (ev.pulse)
+ break;
+
+ if (!geq_margin(ev.duration, SONY_TRAILER_SPACE, SONY_UNIT / 2))
+ break;
+
+ switch (data->count) {
+ case 12:
+ device = bitrev8((data->sony_bits << 3) & 0xF8);
+ subdevice = 0;
+ function = bitrev8((data->sony_bits >> 4) & 0xFE);
+ break;
+ case 15:
+ device = bitrev8((data->sony_bits >> 0) & 0xFF);
+ subdevice = 0;
+ function = bitrev8((data->sony_bits >> 7) & 0xFD);
+ break;
+ case 20:
+ device = bitrev8((data->sony_bits >> 5) & 0xF8);
+ subdevice = bitrev8((data->sony_bits >> 0) & 0xFF);
+ function = bitrev8((data->sony_bits >> 12) & 0xFE);
+ break;
+ default:
+ IR_dprintk(1, "Sony invalid bitcount %u\n", data->count);
+ goto out;
+ }
+
+ scancode = device << 16 | subdevice << 8 | function;
+ IR_dprintk(1, "Sony(%u) scancode 0x%05x\n", data->count, scancode);
+ ir_keydown(input_dev, scancode, 0);
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+out:
+ IR_dprintk(1, "Sony decode failed at state %d (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state = STATE_INACTIVE;
+ return -EINVAL;
+}
+
+static int ir_sony_register(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ struct decoder_data *data;
+ int rc;
+
+ rc = sysfs_create_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ if (rc < 0)
+ return rc;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ return -ENOMEM;
+ }
+
+ data->ir_dev = ir_dev;
+ data->enabled = 1;
+
+ spin_lock(&decoder_lock);
+ list_add_tail(&data->list, &decoder_list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static int ir_sony_unregister(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ static struct decoder_data *data;
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return 0;
+
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+
+ spin_lock(&decoder_lock);
+ list_del(&data->list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static struct ir_raw_handler sony_handler = {
+ .decode = ir_sony_decode,
+ .raw_register = ir_sony_register,
+ .raw_unregister = ir_sony_unregister,
+};
+
+static int __init ir_sony_decode_init(void)
+{
+ ir_raw_handler_register(&sony_handler);
+
+ printk(KERN_INFO "IR Sony protocol handler initialized\n");
+ return 0;
+}
+
+static void __exit ir_sony_decode_exit(void)
+{
+ ir_raw_handler_unregister(&sony_handler);
+}
+
+module_init(ir_sony_decode_init);
+module_exit(ir_sony_decode_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Härdeman <david@hardeman.nu>");
+MODULE_DESCRIPTION("Sony IR protocol decoder");
diff --git a/drivers/media/IR/ir-sysfs.c b/drivers/media/IR/ir-sysfs.c
index e14e6c486b5..d7da63e16c9 100644
--- a/drivers/media/IR/ir-sysfs.c
+++ b/drivers/media/IR/ir-sysfs.c
@@ -1,6 +1,6 @@
-/* ir-register.c - handle IR scancode->keycode tables
+/* ir-sysfs.c - sysfs interface for RC devices (/sys/class/rc)
*
- * Copyright (C) 2009 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (C) 2009-2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -15,15 +15,23 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/device.h>
-#include <media/ir-core.h>
+#include "ir-core-priv.h"
#define IRRCV_NUM_DEVICES 256
/* bit array to represent IR sysfs device number */
static unsigned long ir_core_dev_number;
-/* class for /sys/class/irrcv */
-static struct class *ir_input_class;
+/* class for /sys/class/rc */
+static char *ir_devnode(struct device *dev, mode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "rc/%s", dev_name(dev));
+}
+
+static struct class ir_input_class = {
+ .name = "rc",
+ .devnode = ir_devnode,
+};
/**
* show_protocol() - shows the current IR protocol
@@ -32,7 +40,7 @@ static struct class *ir_input_class;
* @buf: a pointer to the output buffer
*
* This routine is a callback routine for input read the IR protocol type.
- * it is trigged by reading /sys/class/irrcv/irrcv?/current_protocol.
+ * it is trigged by reading /sys/class/rc/rc?/current_protocol.
* It returns the protocol name, as understood by the driver.
*/
static ssize_t show_protocol(struct device *d,
@@ -48,13 +56,17 @@ static ssize_t show_protocol(struct device *d,
if (ir_type == IR_TYPE_UNKNOWN)
s = "Unknown";
else if (ir_type == IR_TYPE_RC5)
- s = "RC-5";
- else if (ir_type == IR_TYPE_PD)
- s = "Pulse/distance";
+ s = "rc-5";
else if (ir_type == IR_TYPE_NEC)
- s = "NEC";
+ s = "nec";
+ else if (ir_type == IR_TYPE_RC6)
+ s = "rc6";
+ else if (ir_type == IR_TYPE_JVC)
+ s = "jvc";
+ else if (ir_type == IR_TYPE_SONY)
+ s = "sony";
else
- s = "Other";
+ s = "other";
return sprintf(buf, "%s\n", s);
}
@@ -67,7 +79,7 @@ static ssize_t show_protocol(struct device *d,
* @len: length of the input buffer
*
* This routine is a callback routine for changing the IR protocol type.
- * it is trigged by reading /sys/class/irrcv/irrcv?/current_protocol.
+ * it is trigged by reading /sys/class/rc/rc?/current_protocol.
* It changes the IR the protocol name, if the IR type is recognized
* by the driver.
* If an unknown protocol name is used, returns -EINVAL.
@@ -78,23 +90,24 @@ static ssize_t store_protocol(struct device *d,
size_t len)
{
struct ir_input_dev *ir_dev = dev_get_drvdata(d);
- u64 ir_type = IR_TYPE_UNKNOWN;
+ u64 ir_type = 0;
int rc = -EINVAL;
unsigned long flags;
char *buf;
- buf = strsep((char **) &data, "\n");
-
- if (!strcasecmp(buf, "rc-5"))
- ir_type = IR_TYPE_RC5;
- else if (!strcasecmp(buf, "pd"))
- ir_type = IR_TYPE_PD;
- else if (!strcasecmp(buf, "nec"))
- ir_type = IR_TYPE_NEC;
+ while ((buf = strsep((char **) &data, " \n")) != NULL) {
+ if (!strcasecmp(buf, "rc-5") || !strcasecmp(buf, "rc5"))
+ ir_type |= IR_TYPE_RC5;
+ if (!strcasecmp(buf, "nec"))
+ ir_type |= IR_TYPE_NEC;
+ if (!strcasecmp(buf, "jvc"))
+ ir_type |= IR_TYPE_JVC;
+ if (!strcasecmp(buf, "sony"))
+ ir_type |= IR_TYPE_SONY;
+ }
- if (ir_type == IR_TYPE_UNKNOWN) {
- IR_dprintk(1, "Error setting protocol to %lld\n",
- (long long)ir_type);
+ if (!ir_type) {
+ IR_dprintk(1, "Unknown protocol\n");
return -EINVAL;
}
@@ -112,25 +125,87 @@ static ssize_t store_protocol(struct device *d,
ir_dev->rc_tab.ir_type = ir_type;
spin_unlock_irqrestore(&ir_dev->rc_tab.lock, flags);
- IR_dprintk(1, "Current protocol is %lld\n",
+ IR_dprintk(1, "Current protocol(s) is(are) %lld\n",
(long long)ir_type);
return len;
}
+static ssize_t show_supported_protocols(struct device *d,
+ struct device_attribute *mattr, char *buf)
+{
+ char *orgbuf = buf;
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+
+ /* FIXME: doesn't support multiple protocols at the same time */
+ if (ir_dev->props->allowed_protos == IR_TYPE_UNKNOWN)
+ buf += sprintf(buf, "unknown ");
+ if (ir_dev->props->allowed_protos & IR_TYPE_RC5)
+ buf += sprintf(buf, "rc-5 ");
+ if (ir_dev->props->allowed_protos & IR_TYPE_NEC)
+ buf += sprintf(buf, "nec ");
+ if (buf == orgbuf)
+ buf += sprintf(buf, "other ");
+
+ buf += sprintf(buf - 1, "\n");
+
+ return buf - orgbuf;
+}
+
+#define ADD_HOTPLUG_VAR(fmt, val...) \
+ do { \
+ int err = add_uevent_var(env, fmt, val); \
+ if (err) \
+ return err; \
+ } while (0)
+
+static int ir_dev_uevent(struct device *device, struct kobj_uevent_env *env)
+{
+ struct ir_input_dev *ir_dev = dev_get_drvdata(device);
+
+ if (ir_dev->rc_tab.name)
+ ADD_HOTPLUG_VAR("NAME=%s", ir_dev->rc_tab.name);
+ if (ir_dev->driver_name)
+ ADD_HOTPLUG_VAR("DRV_NAME=%s", ir_dev->driver_name);
+
+ return 0;
+}
+
/*
* Static device attribute struct with the sysfs attributes for IR's
*/
-static DEVICE_ATTR(current_protocol, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(protocol, S_IRUGO | S_IWUSR,
show_protocol, store_protocol);
-static struct attribute *ir_dev_attrs[] = {
- &dev_attr_current_protocol.attr,
+static DEVICE_ATTR(supported_protocols, S_IRUGO | S_IWUSR,
+ show_supported_protocols, NULL);
+
+static struct attribute *ir_hw_dev_attrs[] = {
+ &dev_attr_protocol.attr,
+ &dev_attr_supported_protocols.attr,
NULL,
};
+static struct attribute_group ir_hw_dev_attr_grp = {
+ .attrs = ir_hw_dev_attrs,
+};
+
+static const struct attribute_group *ir_hw_dev_attr_groups[] = {
+ &ir_hw_dev_attr_grp,
+ NULL
+};
+
+static struct device_type rc_dev_type = {
+ .groups = ir_hw_dev_attr_groups,
+ .uevent = ir_dev_uevent,
+};
+
+static struct device_type ir_raw_dev_type = {
+ .uevent = ir_dev_uevent,
+};
+
/**
- * ir_register_class() - creates the sysfs for /sys/class/irrcv/irrcv?
+ * ir_register_class() - creates the sysfs for /sys/class/rc/rc?
* @input_dev: the struct input_dev descriptor of the device
*
* This routine is used to register the syfs code for IR class
@@ -138,8 +213,7 @@ static struct attribute *ir_dev_attrs[] = {
int ir_register_class(struct input_dev *input_dev)
{
int rc;
- struct kobject *kobj;
-
+ const char *path;
struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
int devno = find_first_zero_bit(&ir_core_dev_number,
IRRCV_NUM_DEVICES);
@@ -147,19 +221,36 @@ int ir_register_class(struct input_dev *input_dev)
if (unlikely(devno < 0))
return devno;
- ir_dev->attr.attrs = ir_dev_attrs;
- ir_dev->class_dev = device_create(ir_input_class, NULL,
- input_dev->dev.devt, ir_dev,
- "irrcv%d", devno);
- kobj = &ir_dev->class_dev->kobj;
-
- printk(KERN_WARNING "Creating IR device %s\n", kobject_name(kobj));
- rc = sysfs_create_group(kobj, &ir_dev->attr);
- if (unlikely(rc < 0)) {
- device_destroy(ir_input_class, input_dev->dev.devt);
- return -ENOMEM;
+ if (ir_dev->props->driver_type == RC_DRIVER_SCANCODE)
+ ir_dev->dev.type = &rc_dev_type;
+ else
+ ir_dev->dev.type = &ir_raw_dev_type;
+
+ ir_dev->dev.class = &ir_input_class;
+ ir_dev->dev.parent = input_dev->dev.parent;
+ dev_set_name(&ir_dev->dev, "rc%d", devno);
+ dev_set_drvdata(&ir_dev->dev, ir_dev);
+ rc = device_register(&ir_dev->dev);
+ if (rc)
+ return rc;
+
+
+ input_dev->dev.parent = &ir_dev->dev;
+ rc = input_register_device(input_dev);
+ if (rc < 0) {
+ device_del(&ir_dev->dev);
+ return rc;
}
+ __module_get(THIS_MODULE);
+
+ path = kobject_get_path(&ir_dev->dev.kobj, GFP_KERNEL);
+ printk(KERN_INFO "%s: %s as %s\n",
+ dev_name(&ir_dev->dev),
+ input_dev->name ? input_dev->name : "Unspecified device",
+ path ? path : "N/A");
+ kfree(path);
+
ir_dev->devno = devno;
set_bit(devno, &ir_core_dev_number);
@@ -168,7 +259,7 @@ int ir_register_class(struct input_dev *input_dev)
/**
* ir_unregister_class() - removes the sysfs for sysfs for
- * /sys/class/irrcv/irrcv?
+ * /sys/class/rc/rc?
* @input_dev: the struct input_dev descriptor of the device
*
* This routine is used to unregister the syfs code for IR class
@@ -176,36 +267,35 @@ int ir_register_class(struct input_dev *input_dev)
void ir_unregister_class(struct input_dev *input_dev)
{
struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
- struct kobject *kobj;
clear_bit(ir_dev->devno, &ir_core_dev_number);
+ input_unregister_device(input_dev);
+ device_del(&ir_dev->dev);
- kobj = &ir_dev->class_dev->kobj;
-
- sysfs_remove_group(kobj, &ir_dev->attr);
- device_destroy(ir_input_class, input_dev->dev.devt);
-
- kfree(ir_dev->attr.name);
+ module_put(THIS_MODULE);
}
/*
- * Init/exit code for the module. Basically, creates/removes /sys/class/irrcv
+ * Init/exit code for the module. Basically, creates/removes /sys/class/rc
*/
static int __init ir_core_init(void)
{
- ir_input_class = class_create(THIS_MODULE, "irrcv");
- if (IS_ERR(ir_input_class)) {
- printk(KERN_ERR "ir_core: unable to register irrcv class\n");
- return PTR_ERR(ir_input_class);
+ int rc = class_register(&ir_input_class);
+ if (rc) {
+ printk(KERN_ERR "ir_core: unable to register rc class\n");
+ return rc;
}
+ /* Initialize/load the decoders/keymap code that will be used */
+ ir_raw_init();
+
return 0;
}
static void __exit ir_core_exit(void)
{
- class_destroy(ir_input_class);
+ class_unregister(&ir_input_class);
}
module_init(ir_core_init);
diff --git a/drivers/media/IR/keymaps/Kconfig b/drivers/media/IR/keymaps/Kconfig
new file mode 100644
index 00000000000..14b22f58f82
--- /dev/null
+++ b/drivers/media/IR/keymaps/Kconfig
@@ -0,0 +1,15 @@
+config RC_MAP
+ tristate "Compile Remote Controller keymap modules"
+ depends on IR_CORE
+ default y
+
+ ---help---
+ This option enables the compilation of lots of Remote
+ Controller tables. They are short tables, but if you
+ don't use a remote controller, or prefer to load the
+ tables on userspace, you should disable it.
+
+ The ir-keytable program, available at v4l-utils package
+ provide the tool and the same RC maps for load from
+ userspace. Its available at
+ http://git.linuxtv.org/v4l-utils
diff --git a/drivers/media/IR/keymaps/Makefile b/drivers/media/IR/keymaps/Makefile
new file mode 100644
index 00000000000..ec25258a955
--- /dev/null
+++ b/drivers/media/IR/keymaps/Makefile
@@ -0,0 +1,67 @@
+obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
+ rc-apac-viewcomp.o \
+ rc-asus-pc39.o \
+ rc-ati-tv-wonder-hd-600.o \
+ rc-avermedia-a16d.o \
+ rc-avermedia.o \
+ rc-avermedia-cardbus.o \
+ rc-avermedia-dvbt.o \
+ rc-avermedia-m135a-rm-jx.o \
+ rc-avertv-303.o \
+ rc-behold.o \
+ rc-behold-columbus.o \
+ rc-budget-ci-old.o \
+ rc-cinergy-1400.o \
+ rc-cinergy.o \
+ rc-dm1105-nec.o \
+ rc-dntv-live-dvb-t.o \
+ rc-dntv-live-dvbt-pro.o \
+ rc-empty.o \
+ rc-em-terratec.o \
+ rc-encore-enltv2.o \
+ rc-encore-enltv.o \
+ rc-encore-enltv-fm53.o \
+ rc-evga-indtube.o \
+ rc-eztv.o \
+ rc-flydvb.o \
+ rc-flyvideo.o \
+ rc-fusionhdtv-mce.o \
+ rc-gadmei-rm008z.o \
+ rc-genius-tvgo-a11mce.o \
+ rc-gotview7135.o \
+ rc-hauppauge-new.o \
+ rc-imon-mce.o \
+ rc-imon-pad.o \
+ rc-iodata-bctv7e.o \
+ rc-kaiomy.o \
+ rc-kworld-315u.o \
+ rc-kworld-plus-tv-analog.o \
+ rc-manli.o \
+ rc-msi-tvanywhere.o \
+ rc-msi-tvanywhere-plus.o \
+ rc-nebula.o \
+ rc-nec-terratec-cinergy-xs.o \
+ rc-norwood.o \
+ rc-npgtech.o \
+ rc-pctv-sedna.o \
+ rc-pinnacle-color.o \
+ rc-pinnacle-grey.o \
+ rc-pinnacle-pctv-hd.o \
+ rc-pixelview.o \
+ rc-pixelview-mk12.o \
+ rc-pixelview-new.o \
+ rc-powercolor-real-angel.o \
+ rc-proteus-2309.o \
+ rc-purpletv.o \
+ rc-pv951.o \
+ rc-rc5-hauppauge-new.o \
+ rc-rc5-tv.o \
+ rc-real-audio-220-32-keys.o \
+ rc-tbs-nec.o \
+ rc-terratec-cinergy-xs.o \
+ rc-tevii-nec.o \
+ rc-tt-1500.o \
+ rc-videomate-s350.o \
+ rc-videomate-tv-pvr.o \
+ rc-winfast.o \
+ rc-winfast-usbii-deluxe.o
diff --git a/drivers/media/IR/keymaps/rc-adstech-dvb-t-pci.c b/drivers/media/IR/keymaps/rc-adstech-dvb-t-pci.c
new file mode 100644
index 00000000000..b17283176ec
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-adstech-dvb-t-pci.c
@@ -0,0 +1,89 @@
+/* adstech-dvb-t-pci.h - Keytable for adstech_dvb_t_pci Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* ADS Tech Instant TV DVB-T PCI Remote */
+
+static struct ir_scancode adstech_dvb_t_pci[] = {
+ /* Keys 0 to 9 */
+ { 0x4d, KEY_0 },
+ { 0x57, KEY_1 },
+ { 0x4f, KEY_2 },
+ { 0x53, KEY_3 },
+ { 0x56, KEY_4 },
+ { 0x4e, KEY_5 },
+ { 0x5e, KEY_6 },
+ { 0x54, KEY_7 },
+ { 0x4c, KEY_8 },
+ { 0x5c, KEY_9 },
+
+ { 0x5b, KEY_POWER },
+ { 0x5f, KEY_MUTE },
+ { 0x55, KEY_GOTO },
+ { 0x5d, KEY_SEARCH },
+ { 0x17, KEY_EPG }, /* Guide */
+ { 0x1f, KEY_MENU },
+ { 0x0f, KEY_UP },
+ { 0x46, KEY_DOWN },
+ { 0x16, KEY_LEFT },
+ { 0x1e, KEY_RIGHT },
+ { 0x0e, KEY_SELECT }, /* Enter */
+ { 0x5a, KEY_INFO },
+ { 0x52, KEY_EXIT },
+ { 0x59, KEY_PREVIOUS },
+ { 0x51, KEY_NEXT },
+ { 0x58, KEY_REWIND },
+ { 0x50, KEY_FORWARD },
+ { 0x44, KEY_PLAYPAUSE },
+ { 0x07, KEY_STOP },
+ { 0x1b, KEY_RECORD },
+ { 0x13, KEY_TUNER }, /* Live */
+ { 0x0a, KEY_A },
+ { 0x12, KEY_B },
+ { 0x03, KEY_PROG1 }, /* 1 */
+ { 0x01, KEY_PROG2 }, /* 2 */
+ { 0x00, KEY_PROG3 }, /* 3 */
+ { 0x06, KEY_DVD },
+ { 0x48, KEY_AUX }, /* Photo */
+ { 0x40, KEY_VIDEO },
+ { 0x19, KEY_AUDIO }, /* Music */
+ { 0x0b, KEY_CHANNELUP },
+ { 0x08, KEY_CHANNELDOWN },
+ { 0x15, KEY_VOLUMEUP },
+ { 0x1c, KEY_VOLUMEDOWN },
+};
+
+static struct rc_keymap adstech_dvb_t_pci_map = {
+ .map = {
+ .scan = adstech_dvb_t_pci,
+ .size = ARRAY_SIZE(adstech_dvb_t_pci),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_ADSTECH_DVB_T_PCI,
+ }
+};
+
+static int __init init_rc_map_adstech_dvb_t_pci(void)
+{
+ return ir_register_map(&adstech_dvb_t_pci_map);
+}
+
+static void __exit exit_rc_map_adstech_dvb_t_pci(void)
+{
+ ir_unregister_map(&adstech_dvb_t_pci_map);
+}
+
+module_init(init_rc_map_adstech_dvb_t_pci)
+module_exit(exit_rc_map_adstech_dvb_t_pci)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-apac-viewcomp.c b/drivers/media/IR/keymaps/rc-apac-viewcomp.c
new file mode 100644
index 00000000000..0ef2b562baf
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-apac-viewcomp.c
@@ -0,0 +1,80 @@
+/* apac-viewcomp.h - Keytable for apac_viewcomp Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Attila Kondoros <attila.kondoros@chello.hu> */
+
+static struct ir_scancode apac_viewcomp[] = {
+
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+ { 0x00, KEY_0 },
+ { 0x17, KEY_LAST }, /* +100 */
+ { 0x0a, KEY_LIST }, /* recall */
+
+
+ { 0x1c, KEY_TUNER }, /* TV/FM */
+ { 0x15, KEY_SEARCH }, /* scan */
+ { 0x12, KEY_POWER }, /* power */
+ { 0x1f, KEY_VOLUMEDOWN }, /* vol up */
+ { 0x1b, KEY_VOLUMEUP }, /* vol down */
+ { 0x1e, KEY_CHANNELDOWN }, /* chn up */
+ { 0x1a, KEY_CHANNELUP }, /* chn down */
+
+ { 0x11, KEY_VIDEO }, /* video */
+ { 0x0f, KEY_ZOOM }, /* full screen */
+ { 0x13, KEY_MUTE }, /* mute/unmute */
+ { 0x10, KEY_TEXT }, /* min */
+
+ { 0x0d, KEY_STOP }, /* freeze */
+ { 0x0e, KEY_RECORD }, /* record */
+ { 0x1d, KEY_PLAYPAUSE }, /* stop */
+ { 0x19, KEY_PLAY }, /* play */
+
+ { 0x16, KEY_GOTO }, /* osd */
+ { 0x14, KEY_REFRESH }, /* default */
+ { 0x0c, KEY_KPPLUS }, /* fine tune >>>> */
+ { 0x18, KEY_KPMINUS }, /* fine tune <<<< */
+};
+
+static struct rc_keymap apac_viewcomp_map = {
+ .map = {
+ .scan = apac_viewcomp,
+ .size = ARRAY_SIZE(apac_viewcomp),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_APAC_VIEWCOMP,
+ }
+};
+
+static int __init init_rc_map_apac_viewcomp(void)
+{
+ return ir_register_map(&apac_viewcomp_map);
+}
+
+static void __exit exit_rc_map_apac_viewcomp(void)
+{
+ ir_unregister_map(&apac_viewcomp_map);
+}
+
+module_init(init_rc_map_apac_viewcomp)
+module_exit(exit_rc_map_apac_viewcomp)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-asus-pc39.c b/drivers/media/IR/keymaps/rc-asus-pc39.c
new file mode 100644
index 00000000000..2aa068cd6c7
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-asus-pc39.c
@@ -0,0 +1,91 @@
+/* asus-pc39.h - Keytable for asus_pc39 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ * Marc Fargas <telenieko@telenieko.com>
+ * this is the remote control that comes with the asus p7131
+ * which has a label saying is "Model PC-39"
+ */
+
+static struct ir_scancode asus_pc39[] = {
+ /* Keys 0 to 9 */
+ { 0x15, KEY_0 },
+ { 0x29, KEY_1 },
+ { 0x2d, KEY_2 },
+ { 0x2b, KEY_3 },
+ { 0x09, KEY_4 },
+ { 0x0d, KEY_5 },
+ { 0x0b, KEY_6 },
+ { 0x31, KEY_7 },
+ { 0x35, KEY_8 },
+ { 0x33, KEY_9 },
+
+ { 0x3e, KEY_RADIO }, /* radio */
+ { 0x03, KEY_MENU }, /* dvd/menu */
+ { 0x2a, KEY_VOLUMEUP },
+ { 0x19, KEY_VOLUMEDOWN },
+ { 0x37, KEY_UP },
+ { 0x3b, KEY_DOWN },
+ { 0x27, KEY_LEFT },
+ { 0x2f, KEY_RIGHT },
+ { 0x25, KEY_VIDEO }, /* video */
+ { 0x39, KEY_AUDIO }, /* music */
+
+ { 0x21, KEY_TV }, /* tv */
+ { 0x1d, KEY_EXIT }, /* back */
+ { 0x0a, KEY_CHANNELUP }, /* channel / program + */
+ { 0x1b, KEY_CHANNELDOWN }, /* channel / program - */
+ { 0x1a, KEY_ENTER }, /* enter */
+
+ { 0x06, KEY_PAUSE }, /* play/pause */
+ { 0x1e, KEY_PREVIOUS }, /* rew */
+ { 0x26, KEY_NEXT }, /* forward */
+ { 0x0e, KEY_REWIND }, /* backward << */
+ { 0x3a, KEY_FASTFORWARD }, /* forward >> */
+ { 0x36, KEY_STOP },
+ { 0x2e, KEY_RECORD }, /* recording */
+ { 0x16, KEY_POWER }, /* the button that reads "close" */
+
+ { 0x11, KEY_ZOOM }, /* full screen */
+ { 0x13, KEY_MACRO }, /* recall */
+ { 0x23, KEY_HOME }, /* home */
+ { 0x05, KEY_PVR }, /* picture */
+ { 0x3d, KEY_MUTE }, /* mute */
+ { 0x01, KEY_DVD }, /* dvd */
+};
+
+static struct rc_keymap asus_pc39_map = {
+ .map = {
+ .scan = asus_pc39,
+ .size = ARRAY_SIZE(asus_pc39),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_ASUS_PC39,
+ }
+};
+
+static int __init init_rc_map_asus_pc39(void)
+{
+ return ir_register_map(&asus_pc39_map);
+}
+
+static void __exit exit_rc_map_asus_pc39(void)
+{
+ ir_unregister_map(&asus_pc39_map);
+}
+
+module_init(init_rc_map_asus_pc39)
+module_exit(exit_rc_map_asus_pc39)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-ati-tv-wonder-hd-600.c b/drivers/media/IR/keymaps/rc-ati-tv-wonder-hd-600.c
new file mode 100644
index 00000000000..8edfd293d01
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-ati-tv-wonder-hd-600.c
@@ -0,0 +1,69 @@
+/* ati-tv-wonder-hd-600.h - Keytable for ati_tv_wonder_hd_600 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* ATI TV Wonder HD 600 USB
+ Devin Heitmueller <devin.heitmueller@gmail.com>
+ */
+
+static struct ir_scancode ati_tv_wonder_hd_600[] = {
+ { 0x00, KEY_RECORD}, /* Row 1 */
+ { 0x01, KEY_PLAYPAUSE},
+ { 0x02, KEY_STOP},
+ { 0x03, KEY_POWER},
+ { 0x04, KEY_PREVIOUS}, /* Row 2 */
+ { 0x05, KEY_REWIND},
+ { 0x06, KEY_FORWARD},
+ { 0x07, KEY_NEXT},
+ { 0x08, KEY_EPG}, /* Row 3 */
+ { 0x09, KEY_HOME},
+ { 0x0a, KEY_MENU},
+ { 0x0b, KEY_CHANNELUP},
+ { 0x0c, KEY_BACK}, /* Row 4 */
+ { 0x0d, KEY_UP},
+ { 0x0e, KEY_INFO},
+ { 0x0f, KEY_CHANNELDOWN},
+ { 0x10, KEY_LEFT}, /* Row 5 */
+ { 0x11, KEY_SELECT},
+ { 0x12, KEY_RIGHT},
+ { 0x13, KEY_VOLUMEUP},
+ { 0x14, KEY_LAST}, /* Row 6 */
+ { 0x15, KEY_DOWN},
+ { 0x16, KEY_MUTE},
+ { 0x17, KEY_VOLUMEDOWN},
+};
+
+static struct rc_keymap ati_tv_wonder_hd_600_map = {
+ .map = {
+ .scan = ati_tv_wonder_hd_600,
+ .size = ARRAY_SIZE(ati_tv_wonder_hd_600),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_ATI_TV_WONDER_HD_600,
+ }
+};
+
+static int __init init_rc_map_ati_tv_wonder_hd_600(void)
+{
+ return ir_register_map(&ati_tv_wonder_hd_600_map);
+}
+
+static void __exit exit_rc_map_ati_tv_wonder_hd_600(void)
+{
+ ir_unregister_map(&ati_tv_wonder_hd_600_map);
+}
+
+module_init(init_rc_map_ati_tv_wonder_hd_600)
+module_exit(exit_rc_map_ati_tv_wonder_hd_600)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-avermedia-a16d.c b/drivers/media/IR/keymaps/rc-avermedia-a16d.c
new file mode 100644
index 00000000000..12f043587f2
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-avermedia-a16d.c
@@ -0,0 +1,75 @@
+/* avermedia-a16d.h - Keytable for avermedia_a16d Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode avermedia_a16d[] = {
+ { 0x20, KEY_LIST},
+ { 0x00, KEY_POWER},
+ { 0x28, KEY_1},
+ { 0x18, KEY_2},
+ { 0x38, KEY_3},
+ { 0x24, KEY_4},
+ { 0x14, KEY_5},
+ { 0x34, KEY_6},
+ { 0x2c, KEY_7},
+ { 0x1c, KEY_8},
+ { 0x3c, KEY_9},
+ { 0x12, KEY_SUBTITLE},
+ { 0x22, KEY_0},
+ { 0x32, KEY_REWIND},
+ { 0x3a, KEY_SHUFFLE},
+ { 0x02, KEY_PRINT},
+ { 0x11, KEY_CHANNELDOWN},
+ { 0x31, KEY_CHANNELUP},
+ { 0x0c, KEY_ZOOM},
+ { 0x1e, KEY_VOLUMEDOWN},
+ { 0x3e, KEY_VOLUMEUP},
+ { 0x0a, KEY_MUTE},
+ { 0x04, KEY_AUDIO},
+ { 0x26, KEY_RECORD},
+ { 0x06, KEY_PLAY},
+ { 0x36, KEY_STOP},
+ { 0x16, KEY_PAUSE},
+ { 0x2e, KEY_REWIND},
+ { 0x0e, KEY_FASTFORWARD},
+ { 0x30, KEY_TEXT},
+ { 0x21, KEY_GREEN},
+ { 0x01, KEY_BLUE},
+ { 0x08, KEY_EPG},
+ { 0x2a, KEY_MENU},
+};
+
+static struct rc_keymap avermedia_a16d_map = {
+ .map = {
+ .scan = avermedia_a16d,
+ .size = ARRAY_SIZE(avermedia_a16d),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_AVERMEDIA_A16D,
+ }
+};
+
+static int __init init_rc_map_avermedia_a16d(void)
+{
+ return ir_register_map(&avermedia_a16d_map);
+}
+
+static void __exit exit_rc_map_avermedia_a16d(void)
+{
+ ir_unregister_map(&avermedia_a16d_map);
+}
+
+module_init(init_rc_map_avermedia_a16d)
+module_exit(exit_rc_map_avermedia_a16d)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-avermedia-cardbus.c b/drivers/media/IR/keymaps/rc-avermedia-cardbus.c
new file mode 100644
index 00000000000..2a945b02e8c
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-avermedia-cardbus.c
@@ -0,0 +1,97 @@
+/* avermedia-cardbus.h - Keytable for avermedia_cardbus Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Oldrich Jedlicka <oldium.pro@seznam.cz> */
+
+static struct ir_scancode avermedia_cardbus[] = {
+ { 0x00, KEY_POWER },
+ { 0x01, KEY_TUNER }, /* TV/FM */
+ { 0x03, KEY_TEXT }, /* Teletext */
+ { 0x04, KEY_EPG },
+ { 0x05, KEY_1 },
+ { 0x06, KEY_2 },
+ { 0x07, KEY_3 },
+ { 0x08, KEY_AUDIO },
+ { 0x09, KEY_4 },
+ { 0x0a, KEY_5 },
+ { 0x0b, KEY_6 },
+ { 0x0c, KEY_ZOOM }, /* Full screen */
+ { 0x0d, KEY_7 },
+ { 0x0e, KEY_8 },
+ { 0x0f, KEY_9 },
+ { 0x10, KEY_PAGEUP }, /* 16-CH PREV */
+ { 0x11, KEY_0 },
+ { 0x12, KEY_INFO },
+ { 0x13, KEY_AGAIN }, /* CH RTN - channel return */
+ { 0x14, KEY_MUTE },
+ { 0x15, KEY_EDIT }, /* Autoscan */
+ { 0x17, KEY_SAVE }, /* Screenshot */
+ { 0x18, KEY_PLAYPAUSE },
+ { 0x19, KEY_RECORD },
+ { 0x1a, KEY_PLAY },
+ { 0x1b, KEY_STOP },
+ { 0x1c, KEY_FASTFORWARD },
+ { 0x1d, KEY_REWIND },
+ { 0x1e, KEY_VOLUMEDOWN },
+ { 0x1f, KEY_VOLUMEUP },
+ { 0x22, KEY_SLEEP }, /* Sleep */
+ { 0x23, KEY_ZOOM }, /* Aspect */
+ { 0x26, KEY_SCREEN }, /* Pos */
+ { 0x27, KEY_ANGLE }, /* Size */
+ { 0x28, KEY_SELECT }, /* Select */
+ { 0x29, KEY_BLUE }, /* Blue/Picture */
+ { 0x2a, KEY_BACKSPACE }, /* Back */
+ { 0x2b, KEY_MEDIA }, /* PIP (Picture-in-picture) */
+ { 0x2c, KEY_DOWN },
+ { 0x2e, KEY_DOT },
+ { 0x2f, KEY_TV }, /* Live TV */
+ { 0x32, KEY_LEFT },
+ { 0x33, KEY_CLEAR }, /* Clear */
+ { 0x35, KEY_RED }, /* Red/TV */
+ { 0x36, KEY_UP },
+ { 0x37, KEY_HOME }, /* Home */
+ { 0x39, KEY_GREEN }, /* Green/Video */
+ { 0x3d, KEY_YELLOW }, /* Yellow/Music */
+ { 0x3e, KEY_OK }, /* Ok */
+ { 0x3f, KEY_RIGHT },
+ { 0x40, KEY_NEXT }, /* Next */
+ { 0x41, KEY_PREVIOUS }, /* Previous */
+ { 0x42, KEY_CHANNELDOWN }, /* Channel down */
+ { 0x43, KEY_CHANNELUP }, /* Channel up */
+};
+
+static struct rc_keymap avermedia_cardbus_map = {
+ .map = {
+ .scan = avermedia_cardbus,
+ .size = ARRAY_SIZE(avermedia_cardbus),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_AVERMEDIA_CARDBUS,
+ }
+};
+
+static int __init init_rc_map_avermedia_cardbus(void)
+{
+ return ir_register_map(&avermedia_cardbus_map);
+}
+
+static void __exit exit_rc_map_avermedia_cardbus(void)
+{
+ ir_unregister_map(&avermedia_cardbus_map);
+}
+
+module_init(init_rc_map_avermedia_cardbus)
+module_exit(exit_rc_map_avermedia_cardbus)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-avermedia-dvbt.c b/drivers/media/IR/keymaps/rc-avermedia-dvbt.c
new file mode 100644
index 00000000000..39dde622287
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-avermedia-dvbt.c
@@ -0,0 +1,78 @@
+/* avermedia-dvbt.h - Keytable for avermedia_dvbt Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Matt Jesson <dvb@jesson.eclipse.co.uk */
+
+static struct ir_scancode avermedia_dvbt[] = {
+ { 0x28, KEY_0 }, /* '0' / 'enter' */
+ { 0x22, KEY_1 }, /* '1' */
+ { 0x12, KEY_2 }, /* '2' / 'up arrow' */
+ { 0x32, KEY_3 }, /* '3' */
+ { 0x24, KEY_4 }, /* '4' / 'left arrow' */
+ { 0x14, KEY_5 }, /* '5' */
+ { 0x34, KEY_6 }, /* '6' / 'right arrow' */
+ { 0x26, KEY_7 }, /* '7' */
+ { 0x16, KEY_8 }, /* '8' / 'down arrow' */
+ { 0x36, KEY_9 }, /* '9' */
+
+ { 0x20, KEY_LIST }, /* 'source' */
+ { 0x10, KEY_TEXT }, /* 'teletext' */
+ { 0x00, KEY_POWER }, /* 'power' */
+ { 0x04, KEY_AUDIO }, /* 'audio' */
+ { 0x06, KEY_ZOOM }, /* 'full screen' */
+ { 0x18, KEY_VIDEO }, /* 'display' */
+ { 0x38, KEY_SEARCH }, /* 'loop' */
+ { 0x08, KEY_INFO }, /* 'preview' */
+ { 0x2a, KEY_REWIND }, /* 'backward <<' */
+ { 0x1a, KEY_FASTFORWARD }, /* 'forward >>' */
+ { 0x3a, KEY_RECORD }, /* 'capture' */
+ { 0x0a, KEY_MUTE }, /* 'mute' */
+ { 0x2c, KEY_RECORD }, /* 'record' */
+ { 0x1c, KEY_PAUSE }, /* 'pause' */
+ { 0x3c, KEY_STOP }, /* 'stop' */
+ { 0x0c, KEY_PLAY }, /* 'play' */
+ { 0x2e, KEY_RED }, /* 'red' */
+ { 0x01, KEY_BLUE }, /* 'blue' / 'cancel' */
+ { 0x0e, KEY_YELLOW }, /* 'yellow' / 'ok' */
+ { 0x21, KEY_GREEN }, /* 'green' */
+ { 0x11, KEY_CHANNELDOWN }, /* 'channel -' */
+ { 0x31, KEY_CHANNELUP }, /* 'channel +' */
+ { 0x1e, KEY_VOLUMEDOWN }, /* 'volume -' */
+ { 0x3e, KEY_VOLUMEUP }, /* 'volume +' */
+};
+
+static struct rc_keymap avermedia_dvbt_map = {
+ .map = {
+ .scan = avermedia_dvbt,
+ .size = ARRAY_SIZE(avermedia_dvbt),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_AVERMEDIA_DVBT,
+ }
+};
+
+static int __init init_rc_map_avermedia_dvbt(void)
+{
+ return ir_register_map(&avermedia_dvbt_map);
+}
+
+static void __exit exit_rc_map_avermedia_dvbt(void)
+{
+ ir_unregister_map(&avermedia_dvbt_map);
+}
+
+module_init(init_rc_map_avermedia_dvbt)
+module_exit(exit_rc_map_avermedia_dvbt)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-avermedia-m135a-rm-jx.c b/drivers/media/IR/keymaps/rc-avermedia-m135a-rm-jx.c
new file mode 100644
index 00000000000..101e7ea8594
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-avermedia-m135a-rm-jx.c
@@ -0,0 +1,90 @@
+/* avermedia-m135a-rm-jx.h - Keytable for avermedia_m135a_rm_jx Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ * Avermedia M135A with IR model RM-JX
+ * The same codes exist on both Positivo (BR) and original IR
+ * Mauro Carvalho Chehab <mchehab@infradead.org>
+ */
+
+static struct ir_scancode avermedia_m135a_rm_jx[] = {
+ { 0x0200, KEY_POWER2 },
+ { 0x022e, KEY_DOT }, /* '.' */
+ { 0x0201, KEY_MODE }, /* TV/FM or SOURCE */
+
+ { 0x0205, KEY_1 },
+ { 0x0206, KEY_2 },
+ { 0x0207, KEY_3 },
+ { 0x0209, KEY_4 },
+ { 0x020a, KEY_5 },
+ { 0x020b, KEY_6 },
+ { 0x020d, KEY_7 },
+ { 0x020e, KEY_8 },
+ { 0x020f, KEY_9 },
+ { 0x0211, KEY_0 },
+
+ { 0x0213, KEY_RIGHT }, /* -> or L */
+ { 0x0212, KEY_LEFT }, /* <- or R */
+
+ { 0x0217, KEY_SLEEP }, /* Capturar Imagem or Snapshot */
+ { 0x0210, KEY_SHUFFLE }, /* Amostra or 16 chan prev */
+
+ { 0x0303, KEY_CHANNELUP },
+ { 0x0302, KEY_CHANNELDOWN },
+ { 0x021f, KEY_VOLUMEUP },
+ { 0x021e, KEY_VOLUMEDOWN },
+ { 0x020c, KEY_ENTER }, /* Full Screen */
+
+ { 0x0214, KEY_MUTE },
+ { 0x0208, KEY_AUDIO },
+
+ { 0x0203, KEY_TEXT }, /* Teletext */
+ { 0x0204, KEY_EPG },
+ { 0x022b, KEY_TV2 }, /* TV2 or PIP */
+
+ { 0x021d, KEY_RED },
+ { 0x021c, KEY_YELLOW },
+ { 0x0301, KEY_GREEN },
+ { 0x0300, KEY_BLUE },
+
+ { 0x021a, KEY_PLAYPAUSE },
+ { 0x0219, KEY_RECORD },
+ { 0x0218, KEY_PLAY },
+ { 0x021b, KEY_STOP },
+};
+
+static struct rc_keymap avermedia_m135a_rm_jx_map = {
+ .map = {
+ .scan = avermedia_m135a_rm_jx,
+ .size = ARRAY_SIZE(avermedia_m135a_rm_jx),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_AVERMEDIA_M135A_RM_JX,
+ }
+};
+
+static int __init init_rc_map_avermedia_m135a_rm_jx(void)
+{
+ return ir_register_map(&avermedia_m135a_rm_jx_map);
+}
+
+static void __exit exit_rc_map_avermedia_m135a_rm_jx(void)
+{
+ ir_unregister_map(&avermedia_m135a_rm_jx_map);
+}
+
+module_init(init_rc_map_avermedia_m135a_rm_jx)
+module_exit(exit_rc_map_avermedia_m135a_rm_jx)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-avermedia.c b/drivers/media/IR/keymaps/rc-avermedia.c
new file mode 100644
index 00000000000..21effd5bfb0
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-avermedia.c
@@ -0,0 +1,86 @@
+/* avermedia.h - Keytable for avermedia Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Alex Hermann <gaaf@gmx.net> */
+
+static struct ir_scancode avermedia[] = {
+ { 0x28, KEY_1 },
+ { 0x18, KEY_2 },
+ { 0x38, KEY_3 },
+ { 0x24, KEY_4 },
+ { 0x14, KEY_5 },
+ { 0x34, KEY_6 },
+ { 0x2c, KEY_7 },
+ { 0x1c, KEY_8 },
+ { 0x3c, KEY_9 },
+ { 0x22, KEY_0 },
+
+ { 0x20, KEY_TV }, /* TV/FM */
+ { 0x10, KEY_CD }, /* CD */
+ { 0x30, KEY_TEXT }, /* TELETEXT */
+ { 0x00, KEY_POWER }, /* POWER */
+
+ { 0x08, KEY_VIDEO }, /* VIDEO */
+ { 0x04, KEY_AUDIO }, /* AUDIO */
+ { 0x0c, KEY_ZOOM }, /* FULL SCREEN */
+
+ { 0x12, KEY_SUBTITLE }, /* DISPLAY */
+ { 0x32, KEY_REWIND }, /* LOOP */
+ { 0x02, KEY_PRINT }, /* PREVIEW */
+
+ { 0x2a, KEY_SEARCH }, /* AUTOSCAN */
+ { 0x1a, KEY_SLEEP }, /* FREEZE */
+ { 0x3a, KEY_CAMERA }, /* SNAPSHOT */
+ { 0x0a, KEY_MUTE }, /* MUTE */
+
+ { 0x26, KEY_RECORD }, /* RECORD */
+ { 0x16, KEY_PAUSE }, /* PAUSE */
+ { 0x36, KEY_STOP }, /* STOP */
+ { 0x06, KEY_PLAY }, /* PLAY */
+
+ { 0x2e, KEY_RED }, /* RED */
+ { 0x21, KEY_GREEN }, /* GREEN */
+ { 0x0e, KEY_YELLOW }, /* YELLOW */
+ { 0x01, KEY_BLUE }, /* BLUE */
+
+ { 0x1e, KEY_VOLUMEDOWN }, /* VOLUME- */
+ { 0x3e, KEY_VOLUMEUP }, /* VOLUME+ */
+ { 0x11, KEY_CHANNELDOWN }, /* CHANNEL/PAGE- */
+ { 0x31, KEY_CHANNELUP } /* CHANNEL/PAGE+ */
+};
+
+static struct rc_keymap avermedia_map = {
+ .map = {
+ .scan = avermedia,
+ .size = ARRAY_SIZE(avermedia),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_AVERMEDIA,
+ }
+};
+
+static int __init init_rc_map_avermedia(void)
+{
+ return ir_register_map(&avermedia_map);
+}
+
+static void __exit exit_rc_map_avermedia(void)
+{
+ ir_unregister_map(&avermedia_map);
+}
+
+module_init(init_rc_map_avermedia)
+module_exit(exit_rc_map_avermedia)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-avertv-303.c b/drivers/media/IR/keymaps/rc-avertv-303.c
new file mode 100644
index 00000000000..971c59d6f9d
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-avertv-303.c
@@ -0,0 +1,85 @@
+/* avertv-303.h - Keytable for avertv_303 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* AVERTV STUDIO 303 Remote */
+
+static struct ir_scancode avertv_303[] = {
+ { 0x2a, KEY_1 },
+ { 0x32, KEY_2 },
+ { 0x3a, KEY_3 },
+ { 0x4a, KEY_4 },
+ { 0x52, KEY_5 },
+ { 0x5a, KEY_6 },
+ { 0x6a, KEY_7 },
+ { 0x72, KEY_8 },
+ { 0x7a, KEY_9 },
+ { 0x0e, KEY_0 },
+
+ { 0x02, KEY_POWER },
+ { 0x22, KEY_VIDEO },
+ { 0x42, KEY_AUDIO },
+ { 0x62, KEY_ZOOM },
+ { 0x0a, KEY_TV },
+ { 0x12, KEY_CD },
+ { 0x1a, KEY_TEXT },
+
+ { 0x16, KEY_SUBTITLE },
+ { 0x1e, KEY_REWIND },
+ { 0x06, KEY_PRINT },
+
+ { 0x2e, KEY_SEARCH },
+ { 0x36, KEY_SLEEP },
+ { 0x3e, KEY_SHUFFLE },
+ { 0x26, KEY_MUTE },
+
+ { 0x4e, KEY_RECORD },
+ { 0x56, KEY_PAUSE },
+ { 0x5e, KEY_STOP },
+ { 0x46, KEY_PLAY },
+
+ { 0x6e, KEY_RED },
+ { 0x0b, KEY_GREEN },
+ { 0x66, KEY_YELLOW },
+ { 0x03, KEY_BLUE },
+
+ { 0x76, KEY_LEFT },
+ { 0x7e, KEY_RIGHT },
+ { 0x13, KEY_DOWN },
+ { 0x1b, KEY_UP },
+};
+
+static struct rc_keymap avertv_303_map = {
+ .map = {
+ .scan = avertv_303,
+ .size = ARRAY_SIZE(avertv_303),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_AVERTV_303,
+ }
+};
+
+static int __init init_rc_map_avertv_303(void)
+{
+ return ir_register_map(&avertv_303_map);
+}
+
+static void __exit exit_rc_map_avertv_303(void)
+{
+ ir_unregister_map(&avertv_303_map);
+}
+
+module_init(init_rc_map_avertv_303)
+module_exit(exit_rc_map_avertv_303)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-behold-columbus.c b/drivers/media/IR/keymaps/rc-behold-columbus.c
new file mode 100644
index 00000000000..9f56c98fef5
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-behold-columbus.c
@@ -0,0 +1,108 @@
+/* behold-columbus.h - Keytable for behold_columbus Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Beholder Intl. Ltd. 2008
+ * Dmitry Belimov d.belimov@google.com
+ * Keytable is used by BeholdTV Columbus
+ * The "ascii-art picture" below (in comments, first row
+ * is the keycode in hex, and subsequent row(s) shows
+ * the button labels (several variants when appropriate)
+ * helps to descide which keycodes to assign to the buttons.
+ */
+
+static struct ir_scancode behold_columbus[] = {
+
+ /* 0x13 0x11 0x1C 0x12 *
+ * Mute Source TV/FM Power *
+ * */
+
+ { 0x13, KEY_MUTE },
+ { 0x11, KEY_PROPS },
+ { 0x1C, KEY_TUNER }, /* KEY_TV/KEY_RADIO */
+ { 0x12, KEY_POWER },
+
+ /* 0x01 0x02 0x03 0x0D *
+ * 1 2 3 Stereo *
+ * *
+ * 0x04 0x05 0x06 0x19 *
+ * 4 5 6 Snapshot *
+ * *
+ * 0x07 0x08 0x09 0x10 *
+ * 7 8 9 Zoom *
+ * */
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x0D, KEY_SETUP }, /* Setup key */
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x19, KEY_CAMERA }, /* Snapshot key */
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+ { 0x10, KEY_ZOOM },
+
+ /* 0x0A 0x00 0x0B 0x0C *
+ * RECALL 0 ChannelUp VolumeUp *
+ * */
+ { 0x0A, KEY_AGAIN },
+ { 0x00, KEY_0 },
+ { 0x0B, KEY_CHANNELUP },
+ { 0x0C, KEY_VOLUMEUP },
+
+ /* 0x1B 0x1D 0x15 0x18 *
+ * Timeshift Record ChannelDown VolumeDown *
+ * */
+
+ { 0x1B, KEY_TIME },
+ { 0x1D, KEY_RECORD },
+ { 0x15, KEY_CHANNELDOWN },
+ { 0x18, KEY_VOLUMEDOWN },
+
+ /* 0x0E 0x1E 0x0F 0x1A *
+ * Stop Pause Previouse Next *
+ * */
+
+ { 0x0E, KEY_STOP },
+ { 0x1E, KEY_PAUSE },
+ { 0x0F, KEY_PREVIOUS },
+ { 0x1A, KEY_NEXT },
+
+};
+
+static struct rc_keymap behold_columbus_map = {
+ .map = {
+ .scan = behold_columbus,
+ .size = ARRAY_SIZE(behold_columbus),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_BEHOLD_COLUMBUS,
+ }
+};
+
+static int __init init_rc_map_behold_columbus(void)
+{
+ return ir_register_map(&behold_columbus_map);
+}
+
+static void __exit exit_rc_map_behold_columbus(void)
+{
+ ir_unregister_map(&behold_columbus_map);
+}
+
+module_init(init_rc_map_behold_columbus)
+module_exit(exit_rc_map_behold_columbus)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-behold.c b/drivers/media/IR/keymaps/rc-behold.c
new file mode 100644
index 00000000000..abc140b2098
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-behold.c
@@ -0,0 +1,141 @@
+/* behold.h - Keytable for behold Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ * Igor Kuznetsov <igk72@ya.ru>
+ * Andrey J. Melnikov <temnota@kmv.ru>
+ *
+ * Keytable is used by BeholdTV 60x series, M6 series at
+ * least, and probably other cards too.
+ * The "ascii-art picture" below (in comments, first row
+ * is the keycode in hex, and subsequent row(s) shows
+ * the button labels (several variants when appropriate)
+ * helps to descide which keycodes to assign to the buttons.
+ */
+
+static struct ir_scancode behold[] = {
+
+ /* 0x1c 0x12 *
+ * TV/FM POWER *
+ * */
+ { 0x1c, KEY_TUNER }, /* XXX KEY_TV / KEY_RADIO */
+ { 0x12, KEY_POWER },
+
+ /* 0x01 0x02 0x03 *
+ * 1 2 3 *
+ * *
+ * 0x04 0x05 0x06 *
+ * 4 5 6 *
+ * *
+ * 0x07 0x08 0x09 *
+ * 7 8 9 *
+ * */
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ /* 0x0a 0x00 0x17 *
+ * RECALL 0 MODE *
+ * */
+ { 0x0a, KEY_AGAIN },
+ { 0x00, KEY_0 },
+ { 0x17, KEY_MODE },
+
+ /* 0x14 0x10 *
+ * ASPECT FULLSCREEN *
+ * */
+ { 0x14, KEY_SCREEN },
+ { 0x10, KEY_ZOOM },
+
+ /* 0x0b *
+ * Up *
+ * *
+ * 0x18 0x16 0x0c *
+ * Left Ok Right *
+ * *
+ * 0x015 *
+ * Down *
+ * */
+ { 0x0b, KEY_CHANNELUP },
+ { 0x18, KEY_VOLUMEDOWN },
+ { 0x16, KEY_OK }, /* XXX KEY_ENTER */
+ { 0x0c, KEY_VOLUMEUP },
+ { 0x15, KEY_CHANNELDOWN },
+
+ /* 0x11 0x0d *
+ * MUTE INFO *
+ * */
+ { 0x11, KEY_MUTE },
+ { 0x0d, KEY_INFO },
+
+ /* 0x0f 0x1b 0x1a *
+ * RECORD PLAY/PAUSE STOP *
+ * *
+ * 0x0e 0x1f 0x1e *
+ *TELETEXT AUDIO SOURCE *
+ * RED YELLOW *
+ * */
+ { 0x0f, KEY_RECORD },
+ { 0x1b, KEY_PLAYPAUSE },
+ { 0x1a, KEY_STOP },
+ { 0x0e, KEY_TEXT },
+ { 0x1f, KEY_RED }, /*XXX KEY_AUDIO */
+ { 0x1e, KEY_YELLOW }, /*XXX KEY_SOURCE */
+
+ /* 0x1d 0x13 0x19 *
+ * SLEEP PREVIEW DVB *
+ * GREEN BLUE *
+ * */
+ { 0x1d, KEY_SLEEP },
+ { 0x13, KEY_GREEN },
+ { 0x19, KEY_BLUE }, /* XXX KEY_SAT */
+
+ /* 0x58 0x5c *
+ * FREEZE SNAPSHOT *
+ * */
+ { 0x58, KEY_SLOW },
+ { 0x5c, KEY_CAMERA },
+
+};
+
+static struct rc_keymap behold_map = {
+ .map = {
+ .scan = behold,
+ .size = ARRAY_SIZE(behold),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_BEHOLD,
+ }
+};
+
+static int __init init_rc_map_behold(void)
+{
+ return ir_register_map(&behold_map);
+}
+
+static void __exit exit_rc_map_behold(void)
+{
+ ir_unregister_map(&behold_map);
+}
+
+module_init(init_rc_map_behold)
+module_exit(exit_rc_map_behold)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-budget-ci-old.c b/drivers/media/IR/keymaps/rc-budget-ci-old.c
new file mode 100644
index 00000000000..64c2ac91333
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-budget-ci-old.c
@@ -0,0 +1,92 @@
+/* budget-ci-old.h - Keytable for budget_ci_old Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* From reading the following remotes:
+ * Zenith Universal 7 / TV Mode 807 / VCR Mode 837
+ * Hauppauge (from NOVA-CI-s box product)
+ * This is a "middle of the road" approach, differences are noted
+ */
+
+static struct ir_scancode budget_ci_old[] = {
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+ { 0x0a, KEY_ENTER },
+ { 0x0b, KEY_RED },
+ { 0x0c, KEY_POWER }, /* RADIO on Hauppauge */
+ { 0x0d, KEY_MUTE },
+ { 0x0f, KEY_A }, /* TV on Hauppauge */
+ { 0x10, KEY_VOLUMEUP },
+ { 0x11, KEY_VOLUMEDOWN },
+ { 0x14, KEY_B },
+ { 0x1c, KEY_UP },
+ { 0x1d, KEY_DOWN },
+ { 0x1e, KEY_OPTION }, /* RESERVED on Hauppauge */
+ { 0x1f, KEY_BREAK },
+ { 0x20, KEY_CHANNELUP },
+ { 0x21, KEY_CHANNELDOWN },
+ { 0x22, KEY_PREVIOUS }, /* Prev Ch on Zenith, SOURCE on Hauppauge */
+ { 0x24, KEY_RESTART },
+ { 0x25, KEY_OK },
+ { 0x26, KEY_CYCLEWINDOWS }, /* MINIMIZE on Hauppauge */
+ { 0x28, KEY_ENTER }, /* VCR mode on Zenith */
+ { 0x29, KEY_PAUSE },
+ { 0x2b, KEY_RIGHT },
+ { 0x2c, KEY_LEFT },
+ { 0x2e, KEY_MENU }, /* FULL SCREEN on Hauppauge */
+ { 0x30, KEY_SLOW },
+ { 0x31, KEY_PREVIOUS }, /* VCR mode on Zenith */
+ { 0x32, KEY_REWIND },
+ { 0x34, KEY_FASTFORWARD },
+ { 0x35, KEY_PLAY },
+ { 0x36, KEY_STOP },
+ { 0x37, KEY_RECORD },
+ { 0x38, KEY_TUNER }, /* TV/VCR on Zenith */
+ { 0x3a, KEY_C },
+ { 0x3c, KEY_EXIT },
+ { 0x3d, KEY_POWER2 },
+ { 0x3e, KEY_TUNER },
+};
+
+static struct rc_keymap budget_ci_old_map = {
+ .map = {
+ .scan = budget_ci_old,
+ .size = ARRAY_SIZE(budget_ci_old),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_BUDGET_CI_OLD,
+ }
+};
+
+static int __init init_rc_map_budget_ci_old(void)
+{
+ return ir_register_map(&budget_ci_old_map);
+}
+
+static void __exit exit_rc_map_budget_ci_old(void)
+{
+ ir_unregister_map(&budget_ci_old_map);
+}
+
+module_init(init_rc_map_budget_ci_old)
+module_exit(exit_rc_map_budget_ci_old)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-cinergy-1400.c b/drivers/media/IR/keymaps/rc-cinergy-1400.c
new file mode 100644
index 00000000000..074f2c2c2c6
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-cinergy-1400.c
@@ -0,0 +1,84 @@
+/* cinergy-1400.h - Keytable for cinergy_1400 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Cinergy 1400 DVB-T */
+
+static struct ir_scancode cinergy_1400[] = {
+ { 0x01, KEY_POWER },
+ { 0x02, KEY_1 },
+ { 0x03, KEY_2 },
+ { 0x04, KEY_3 },
+ { 0x05, KEY_4 },
+ { 0x06, KEY_5 },
+ { 0x07, KEY_6 },
+ { 0x08, KEY_7 },
+ { 0x09, KEY_8 },
+ { 0x0a, KEY_9 },
+ { 0x0c, KEY_0 },
+
+ { 0x0b, KEY_VIDEO },
+ { 0x0d, KEY_REFRESH },
+ { 0x0e, KEY_SELECT },
+ { 0x0f, KEY_EPG },
+ { 0x10, KEY_UP },
+ { 0x11, KEY_LEFT },
+ { 0x12, KEY_OK },
+ { 0x13, KEY_RIGHT },
+ { 0x14, KEY_DOWN },
+ { 0x15, KEY_TEXT },
+ { 0x16, KEY_INFO },
+
+ { 0x17, KEY_RED },
+ { 0x18, KEY_GREEN },
+ { 0x19, KEY_YELLOW },
+ { 0x1a, KEY_BLUE },
+
+ { 0x1b, KEY_CHANNELUP },
+ { 0x1c, KEY_VOLUMEUP },
+ { 0x1d, KEY_MUTE },
+ { 0x1e, KEY_VOLUMEDOWN },
+ { 0x1f, KEY_CHANNELDOWN },
+
+ { 0x40, KEY_PAUSE },
+ { 0x4c, KEY_PLAY },
+ { 0x58, KEY_RECORD },
+ { 0x54, KEY_PREVIOUS },
+ { 0x48, KEY_STOP },
+ { 0x5c, KEY_NEXT },
+};
+
+static struct rc_keymap cinergy_1400_map = {
+ .map = {
+ .scan = cinergy_1400,
+ .size = ARRAY_SIZE(cinergy_1400),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_CINERGY_1400,
+ }
+};
+
+static int __init init_rc_map_cinergy_1400(void)
+{
+ return ir_register_map(&cinergy_1400_map);
+}
+
+static void __exit exit_rc_map_cinergy_1400(void)
+{
+ ir_unregister_map(&cinergy_1400_map);
+}
+
+module_init(init_rc_map_cinergy_1400)
+module_exit(exit_rc_map_cinergy_1400)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-cinergy.c b/drivers/media/IR/keymaps/rc-cinergy.c
new file mode 100644
index 00000000000..cf84c3dba74
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-cinergy.c
@@ -0,0 +1,78 @@
+/* cinergy.h - Keytable for cinergy Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode cinergy[] = {
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ { 0x0a, KEY_POWER },
+ { 0x0b, KEY_PROG1 }, /* app */
+ { 0x0c, KEY_ZOOM }, /* zoom/fullscreen */
+ { 0x0d, KEY_CHANNELUP }, /* channel */
+ { 0x0e, KEY_CHANNELDOWN }, /* channel- */
+ { 0x0f, KEY_VOLUMEUP },
+ { 0x10, KEY_VOLUMEDOWN },
+ { 0x11, KEY_TUNER }, /* AV */
+ { 0x12, KEY_NUMLOCK }, /* -/-- */
+ { 0x13, KEY_AUDIO }, /* audio */
+ { 0x14, KEY_MUTE },
+ { 0x15, KEY_UP },
+ { 0x16, KEY_DOWN },
+ { 0x17, KEY_LEFT },
+ { 0x18, KEY_RIGHT },
+ { 0x19, BTN_LEFT, },
+ { 0x1a, BTN_RIGHT, },
+ { 0x1b, KEY_WWW }, /* text */
+ { 0x1c, KEY_REWIND },
+ { 0x1d, KEY_FORWARD },
+ { 0x1e, KEY_RECORD },
+ { 0x1f, KEY_PLAY },
+ { 0x20, KEY_PREVIOUSSONG },
+ { 0x21, KEY_NEXTSONG },
+ { 0x22, KEY_PAUSE },
+ { 0x23, KEY_STOP },
+};
+
+static struct rc_keymap cinergy_map = {
+ .map = {
+ .scan = cinergy,
+ .size = ARRAY_SIZE(cinergy),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_CINERGY,
+ }
+};
+
+static int __init init_rc_map_cinergy(void)
+{
+ return ir_register_map(&cinergy_map);
+}
+
+static void __exit exit_rc_map_cinergy(void)
+{
+ ir_unregister_map(&cinergy_map);
+}
+
+module_init(init_rc_map_cinergy)
+module_exit(exit_rc_map_cinergy)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-dm1105-nec.c b/drivers/media/IR/keymaps/rc-dm1105-nec.c
new file mode 100644
index 00000000000..90684d0efea
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-dm1105-nec.c
@@ -0,0 +1,76 @@
+/* dm1105-nec.h - Keytable for dm1105_nec Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* DVBWorld remotes
+ Igor M. Liplianin <liplianin@me.by>
+ */
+
+static struct ir_scancode dm1105_nec[] = {
+ { 0x0a, KEY_POWER2}, /* power */
+ { 0x0c, KEY_MUTE}, /* mute */
+ { 0x11, KEY_1},
+ { 0x12, KEY_2},
+ { 0x13, KEY_3},
+ { 0x14, KEY_4},
+ { 0x15, KEY_5},
+ { 0x16, KEY_6},
+ { 0x17, KEY_7},
+ { 0x18, KEY_8},
+ { 0x19, KEY_9},
+ { 0x10, KEY_0},
+ { 0x1c, KEY_CHANNELUP}, /* ch+ */
+ { 0x0f, KEY_CHANNELDOWN}, /* ch- */
+ { 0x1a, KEY_VOLUMEUP}, /* vol+ */
+ { 0x0e, KEY_VOLUMEDOWN}, /* vol- */
+ { 0x04, KEY_RECORD}, /* rec */
+ { 0x09, KEY_CHANNEL}, /* fav */
+ { 0x08, KEY_BACKSPACE}, /* rewind */
+ { 0x07, KEY_FASTFORWARD}, /* fast */
+ { 0x0b, KEY_PAUSE}, /* pause */
+ { 0x02, KEY_ESC}, /* cancel */
+ { 0x03, KEY_TAB}, /* tab */
+ { 0x00, KEY_UP}, /* up */
+ { 0x1f, KEY_ENTER}, /* ok */
+ { 0x01, KEY_DOWN}, /* down */
+ { 0x05, KEY_RECORD}, /* cap */
+ { 0x06, KEY_STOP}, /* stop */
+ { 0x40, KEY_ZOOM}, /* full */
+ { 0x1e, KEY_TV}, /* tvmode */
+ { 0x1b, KEY_B}, /* recall */
+};
+
+static struct rc_keymap dm1105_nec_map = {
+ .map = {
+ .scan = dm1105_nec,
+ .size = ARRAY_SIZE(dm1105_nec),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_DM1105_NEC,
+ }
+};
+
+static int __init init_rc_map_dm1105_nec(void)
+{
+ return ir_register_map(&dm1105_nec_map);
+}
+
+static void __exit exit_rc_map_dm1105_nec(void)
+{
+ ir_unregister_map(&dm1105_nec_map);
+}
+
+module_init(init_rc_map_dm1105_nec)
+module_exit(exit_rc_map_dm1105_nec)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-dntv-live-dvb-t.c b/drivers/media/IR/keymaps/rc-dntv-live-dvb-t.c
new file mode 100644
index 00000000000..8a4027af964
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-dntv-live-dvb-t.c
@@ -0,0 +1,78 @@
+/* dntv-live-dvb-t.h - Keytable for dntv_live_dvb_t Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* DigitalNow DNTV Live DVB-T Remote */
+
+static struct ir_scancode dntv_live_dvb_t[] = {
+ { 0x00, KEY_ESC }, /* 'go up a level?' */
+ /* Keys 0 to 9 */
+ { 0x0a, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ { 0x0b, KEY_TUNER }, /* tv/fm */
+ { 0x0c, KEY_SEARCH }, /* scan */
+ { 0x0d, KEY_STOP },
+ { 0x0e, KEY_PAUSE },
+ { 0x0f, KEY_LIST }, /* source */
+
+ { 0x10, KEY_MUTE },
+ { 0x11, KEY_REWIND }, /* backward << */
+ { 0x12, KEY_POWER },
+ { 0x13, KEY_CAMERA }, /* snap */
+ { 0x14, KEY_AUDIO }, /* stereo */
+ { 0x15, KEY_CLEAR }, /* reset */
+ { 0x16, KEY_PLAY },
+ { 0x17, KEY_ENTER },
+ { 0x18, KEY_ZOOM }, /* full screen */
+ { 0x19, KEY_FASTFORWARD }, /* forward >> */
+ { 0x1a, KEY_CHANNELUP },
+ { 0x1b, KEY_VOLUMEUP },
+ { 0x1c, KEY_INFO }, /* preview */
+ { 0x1d, KEY_RECORD }, /* record */
+ { 0x1e, KEY_CHANNELDOWN },
+ { 0x1f, KEY_VOLUMEDOWN },
+};
+
+static struct rc_keymap dntv_live_dvb_t_map = {
+ .map = {
+ .scan = dntv_live_dvb_t,
+ .size = ARRAY_SIZE(dntv_live_dvb_t),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_DNTV_LIVE_DVB_T,
+ }
+};
+
+static int __init init_rc_map_dntv_live_dvb_t(void)
+{
+ return ir_register_map(&dntv_live_dvb_t_map);
+}
+
+static void __exit exit_rc_map_dntv_live_dvb_t(void)
+{
+ ir_unregister_map(&dntv_live_dvb_t_map);
+}
+
+module_init(init_rc_map_dntv_live_dvb_t)
+module_exit(exit_rc_map_dntv_live_dvb_t)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-dntv-live-dvbt-pro.c b/drivers/media/IR/keymaps/rc-dntv-live-dvbt-pro.c
new file mode 100644
index 00000000000..6f4d60764d5
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-dntv-live-dvbt-pro.c
@@ -0,0 +1,97 @@
+/* dntv-live-dvbt-pro.h - Keytable for dntv_live_dvbt_pro Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* DigitalNow DNTV Live! DVB-T Pro Remote */
+
+static struct ir_scancode dntv_live_dvbt_pro[] = {
+ { 0x16, KEY_POWER },
+ { 0x5b, KEY_HOME },
+
+ { 0x55, KEY_TV }, /* live tv */
+ { 0x58, KEY_TUNER }, /* digital Radio */
+ { 0x5a, KEY_RADIO }, /* FM radio */
+ { 0x59, KEY_DVD }, /* dvd menu */
+ { 0x03, KEY_1 },
+ { 0x01, KEY_2 },
+ { 0x06, KEY_3 },
+ { 0x09, KEY_4 },
+ { 0x1d, KEY_5 },
+ { 0x1f, KEY_6 },
+ { 0x0d, KEY_7 },
+ { 0x19, KEY_8 },
+ { 0x1b, KEY_9 },
+ { 0x0c, KEY_CANCEL },
+ { 0x15, KEY_0 },
+ { 0x4a, KEY_CLEAR },
+ { 0x13, KEY_BACK },
+ { 0x00, KEY_TAB },
+ { 0x4b, KEY_UP },
+ { 0x4e, KEY_LEFT },
+ { 0x4f, KEY_OK },
+ { 0x52, KEY_RIGHT },
+ { 0x51, KEY_DOWN },
+ { 0x1e, KEY_VOLUMEUP },
+ { 0x0a, KEY_VOLUMEDOWN },
+ { 0x02, KEY_CHANNELDOWN },
+ { 0x05, KEY_CHANNELUP },
+ { 0x11, KEY_RECORD },
+ { 0x14, KEY_PLAY },
+ { 0x4c, KEY_PAUSE },
+ { 0x1a, KEY_STOP },
+ { 0x40, KEY_REWIND },
+ { 0x12, KEY_FASTFORWARD },
+ { 0x41, KEY_PREVIOUSSONG }, /* replay |< */
+ { 0x42, KEY_NEXTSONG }, /* skip >| */
+ { 0x54, KEY_CAMERA }, /* capture */
+ { 0x50, KEY_LANGUAGE }, /* sap */
+ { 0x47, KEY_TV2 }, /* pip */
+ { 0x4d, KEY_SCREEN },
+ { 0x43, KEY_SUBTITLE },
+ { 0x10, KEY_MUTE },
+ { 0x49, KEY_AUDIO }, /* l/r */
+ { 0x07, KEY_SLEEP },
+ { 0x08, KEY_VIDEO }, /* a/v */
+ { 0x0e, KEY_PREVIOUS }, /* recall */
+ { 0x45, KEY_ZOOM }, /* zoom + */
+ { 0x46, KEY_ANGLE }, /* zoom - */
+ { 0x56, KEY_RED },
+ { 0x57, KEY_GREEN },
+ { 0x5c, KEY_YELLOW },
+ { 0x5d, KEY_BLUE },
+};
+
+static struct rc_keymap dntv_live_dvbt_pro_map = {
+ .map = {
+ .scan = dntv_live_dvbt_pro,
+ .size = ARRAY_SIZE(dntv_live_dvbt_pro),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_DNTV_LIVE_DVBT_PRO,
+ }
+};
+
+static int __init init_rc_map_dntv_live_dvbt_pro(void)
+{
+ return ir_register_map(&dntv_live_dvbt_pro_map);
+}
+
+static void __exit exit_rc_map_dntv_live_dvbt_pro(void)
+{
+ ir_unregister_map(&dntv_live_dvbt_pro_map);
+}
+
+module_init(init_rc_map_dntv_live_dvbt_pro)
+module_exit(exit_rc_map_dntv_live_dvbt_pro)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-em-terratec.c b/drivers/media/IR/keymaps/rc-em-terratec.c
new file mode 100644
index 00000000000..3130c9c29e6
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-em-terratec.c
@@ -0,0 +1,69 @@
+/* em-terratec.h - Keytable for em_terratec Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode em_terratec[] = {
+ { 0x01, KEY_CHANNEL },
+ { 0x02, KEY_SELECT },
+ { 0x03, KEY_MUTE },
+ { 0x04, KEY_POWER },
+ { 0x05, KEY_1 },
+ { 0x06, KEY_2 },
+ { 0x07, KEY_3 },
+ { 0x08, KEY_CHANNELUP },
+ { 0x09, KEY_4 },
+ { 0x0a, KEY_5 },
+ { 0x0b, KEY_6 },
+ { 0x0c, KEY_CHANNELDOWN },
+ { 0x0d, KEY_7 },
+ { 0x0e, KEY_8 },
+ { 0x0f, KEY_9 },
+ { 0x10, KEY_VOLUMEUP },
+ { 0x11, KEY_0 },
+ { 0x12, KEY_MENU },
+ { 0x13, KEY_PRINT },
+ { 0x14, KEY_VOLUMEDOWN },
+ { 0x16, KEY_PAUSE },
+ { 0x18, KEY_RECORD },
+ { 0x19, KEY_REWIND },
+ { 0x1a, KEY_PLAY },
+ { 0x1b, KEY_FORWARD },
+ { 0x1c, KEY_BACKSPACE },
+ { 0x1e, KEY_STOP },
+ { 0x40, KEY_ZOOM },
+};
+
+static struct rc_keymap em_terratec_map = {
+ .map = {
+ .scan = em_terratec,
+ .size = ARRAY_SIZE(em_terratec),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_EM_TERRATEC,
+ }
+};
+
+static int __init init_rc_map_em_terratec(void)
+{
+ return ir_register_map(&em_terratec_map);
+}
+
+static void __exit exit_rc_map_em_terratec(void)
+{
+ ir_unregister_map(&em_terratec_map);
+}
+
+module_init(init_rc_map_em_terratec)
+module_exit(exit_rc_map_em_terratec)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-empty.c b/drivers/media/IR/keymaps/rc-empty.c
new file mode 100644
index 00000000000..3b338d84b47
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-empty.c
@@ -0,0 +1,44 @@
+/* empty.h - Keytable for empty Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* empty keytable, can be used as placeholder for not-yet created keytables */
+
+static struct ir_scancode empty[] = {
+ { 0x2a, KEY_COFFEE },
+};
+
+static struct rc_keymap empty_map = {
+ .map = {
+ .scan = empty,
+ .size = ARRAY_SIZE(empty),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_EMPTY,
+ }
+};
+
+static int __init init_rc_map_empty(void)
+{
+ return ir_register_map(&empty_map);
+}
+
+static void __exit exit_rc_map_empty(void)
+{
+ ir_unregister_map(&empty_map);
+}
+
+module_init(init_rc_map_empty)
+module_exit(exit_rc_map_empty)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-encore-enltv-fm53.c b/drivers/media/IR/keymaps/rc-encore-enltv-fm53.c
new file mode 100644
index 00000000000..4b816967877
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-encore-enltv-fm53.c
@@ -0,0 +1,81 @@
+/* encore-enltv-fm53.h - Keytable for encore_enltv_fm53 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Encore ENLTV-FM v5.3
+ Mauro Carvalho Chehab <mchehab@infradead.org>
+ */
+
+static struct ir_scancode encore_enltv_fm53[] = {
+ { 0x10, KEY_POWER2},
+ { 0x06, KEY_MUTE},
+
+ { 0x09, KEY_1},
+ { 0x1d, KEY_2},
+ { 0x1f, KEY_3},
+ { 0x19, KEY_4},
+ { 0x1b, KEY_5},
+ { 0x11, KEY_6},
+ { 0x17, KEY_7},
+ { 0x12, KEY_8},
+ { 0x16, KEY_9},
+ { 0x48, KEY_0},
+
+ { 0x04, KEY_LIST}, /* -/-- */
+ { 0x40, KEY_LAST}, /* recall */
+
+ { 0x02, KEY_MODE}, /* TV/AV */
+ { 0x05, KEY_CAMERA}, /* SNAPSHOT */
+
+ { 0x4c, KEY_CHANNELUP}, /* UP */
+ { 0x00, KEY_CHANNELDOWN}, /* DOWN */
+ { 0x0d, KEY_VOLUMEUP}, /* RIGHT */
+ { 0x15, KEY_VOLUMEDOWN}, /* LEFT */
+ { 0x49, KEY_ENTER}, /* OK */
+
+ { 0x54, KEY_RECORD},
+ { 0x4d, KEY_PLAY}, /* pause */
+
+ { 0x1e, KEY_MENU}, /* video setting */
+ { 0x0e, KEY_RIGHT}, /* <- */
+ { 0x1a, KEY_LEFT}, /* -> */
+
+ { 0x0a, KEY_CLEAR}, /* video default */
+ { 0x0c, KEY_ZOOM}, /* hide pannel */
+ { 0x47, KEY_SLEEP}, /* shutdown */
+};
+
+static struct rc_keymap encore_enltv_fm53_map = {
+ .map = {
+ .scan = encore_enltv_fm53,
+ .size = ARRAY_SIZE(encore_enltv_fm53),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_ENCORE_ENLTV_FM53,
+ }
+};
+
+static int __init init_rc_map_encore_enltv_fm53(void)
+{
+ return ir_register_map(&encore_enltv_fm53_map);
+}
+
+static void __exit exit_rc_map_encore_enltv_fm53(void)
+{
+ ir_unregister_map(&encore_enltv_fm53_map);
+}
+
+module_init(init_rc_map_encore_enltv_fm53)
+module_exit(exit_rc_map_encore_enltv_fm53)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-encore-enltv.c b/drivers/media/IR/keymaps/rc-encore-enltv.c
new file mode 100644
index 00000000000..9fabffd28cc
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-encore-enltv.c
@@ -0,0 +1,112 @@
+/* encore-enltv.h - Keytable for encore_enltv Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Encore ENLTV-FM - black plastic, white front cover with white glowing buttons
+ Juan Pablo Sormani <sorman@gmail.com> */
+
+static struct ir_scancode encore_enltv[] = {
+
+ /* Power button does nothing, neither in Windows app,
+ although it sends data (used for BIOS wakeup?) */
+ { 0x0d, KEY_MUTE },
+
+ { 0x1e, KEY_TV },
+ { 0x00, KEY_VIDEO },
+ { 0x01, KEY_AUDIO }, /* music */
+ { 0x02, KEY_MHP }, /* picture */
+
+ { 0x1f, KEY_1 },
+ { 0x03, KEY_2 },
+ { 0x04, KEY_3 },
+ { 0x05, KEY_4 },
+ { 0x1c, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x1d, KEY_9 },
+ { 0x0a, KEY_0 },
+
+ { 0x09, KEY_LIST }, /* -/-- */
+ { 0x0b, KEY_LAST }, /* recall */
+
+ { 0x14, KEY_HOME }, /* win start menu */
+ { 0x15, KEY_EXIT }, /* exit */
+ { 0x16, KEY_CHANNELUP }, /* UP */
+ { 0x12, KEY_CHANNELDOWN }, /* DOWN */
+ { 0x0c, KEY_VOLUMEUP }, /* RIGHT */
+ { 0x17, KEY_VOLUMEDOWN }, /* LEFT */
+
+ { 0x18, KEY_ENTER }, /* OK */
+
+ { 0x0e, KEY_ESC },
+ { 0x13, KEY_CYCLEWINDOWS }, /* desktop */
+ { 0x11, KEY_TAB },
+ { 0x19, KEY_SWITCHVIDEOMODE }, /* switch */
+
+ { 0x1a, KEY_MENU },
+ { 0x1b, KEY_ZOOM }, /* fullscreen */
+ { 0x44, KEY_TIME }, /* time shift */
+ { 0x40, KEY_MODE }, /* source */
+
+ { 0x5a, KEY_RECORD },
+ { 0x42, KEY_PLAY }, /* play/pause */
+ { 0x45, KEY_STOP },
+ { 0x43, KEY_CAMERA }, /* camera icon */
+
+ { 0x48, KEY_REWIND },
+ { 0x4a, KEY_FASTFORWARD },
+ { 0x49, KEY_PREVIOUS },
+ { 0x4b, KEY_NEXT },
+
+ { 0x4c, KEY_FAVORITES }, /* tv wall */
+ { 0x4d, KEY_SOUND }, /* DVD sound */
+ { 0x4e, KEY_LANGUAGE }, /* DVD lang */
+ { 0x4f, KEY_TEXT }, /* DVD text */
+
+ { 0x50, KEY_SLEEP }, /* shutdown */
+ { 0x51, KEY_MODE }, /* stereo > main */
+ { 0x52, KEY_SELECT }, /* stereo > sap */
+ { 0x53, KEY_PROG1 }, /* teletext */
+
+
+ { 0x59, KEY_RED }, /* AP1 */
+ { 0x41, KEY_GREEN }, /* AP2 */
+ { 0x47, KEY_YELLOW }, /* AP3 */
+ { 0x57, KEY_BLUE }, /* AP4 */
+};
+
+static struct rc_keymap encore_enltv_map = {
+ .map = {
+ .scan = encore_enltv,
+ .size = ARRAY_SIZE(encore_enltv),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_ENCORE_ENLTV,
+ }
+};
+
+static int __init init_rc_map_encore_enltv(void)
+{
+ return ir_register_map(&encore_enltv_map);
+}
+
+static void __exit exit_rc_map_encore_enltv(void)
+{
+ ir_unregister_map(&encore_enltv_map);
+}
+
+module_init(init_rc_map_encore_enltv)
+module_exit(exit_rc_map_encore_enltv)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-encore-enltv2.c b/drivers/media/IR/keymaps/rc-encore-enltv2.c
new file mode 100644
index 00000000000..efefd516661
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-encore-enltv2.c
@@ -0,0 +1,90 @@
+/* encore-enltv2.h - Keytable for encore_enltv2 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Encore ENLTV2-FM - silver plastic - "Wand Media" written at the botton
+ Mauro Carvalho Chehab <mchehab@infradead.org> */
+
+static struct ir_scancode encore_enltv2[] = {
+ { 0x4c, KEY_POWER2 },
+ { 0x4a, KEY_TUNER },
+ { 0x40, KEY_1 },
+ { 0x60, KEY_2 },
+ { 0x50, KEY_3 },
+ { 0x70, KEY_4 },
+ { 0x48, KEY_5 },
+ { 0x68, KEY_6 },
+ { 0x58, KEY_7 },
+ { 0x78, KEY_8 },
+ { 0x44, KEY_9 },
+ { 0x54, KEY_0 },
+
+ { 0x64, KEY_LAST }, /* +100 */
+ { 0x4e, KEY_AGAIN }, /* Recall */
+
+ { 0x6c, KEY_SWITCHVIDEOMODE }, /* Video Source */
+ { 0x5e, KEY_MENU },
+ { 0x56, KEY_SCREEN },
+ { 0x7a, KEY_SETUP },
+
+ { 0x46, KEY_MUTE },
+ { 0x5c, KEY_MODE }, /* Stereo */
+ { 0x74, KEY_INFO },
+ { 0x7c, KEY_CLEAR },
+
+ { 0x55, KEY_UP },
+ { 0x49, KEY_DOWN },
+ { 0x7e, KEY_LEFT },
+ { 0x59, KEY_RIGHT },
+ { 0x6a, KEY_ENTER },
+
+ { 0x42, KEY_VOLUMEUP },
+ { 0x62, KEY_VOLUMEDOWN },
+ { 0x52, KEY_CHANNELUP },
+ { 0x72, KEY_CHANNELDOWN },
+
+ { 0x41, KEY_RECORD },
+ { 0x51, KEY_CAMERA }, /* Snapshot */
+ { 0x75, KEY_TIME }, /* Timeshift */
+ { 0x71, KEY_TV2 }, /* PIP */
+
+ { 0x45, KEY_REWIND },
+ { 0x6f, KEY_PAUSE },
+ { 0x7d, KEY_FORWARD },
+ { 0x79, KEY_STOP },
+};
+
+static struct rc_keymap encore_enltv2_map = {
+ .map = {
+ .scan = encore_enltv2,
+ .size = ARRAY_SIZE(encore_enltv2),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_ENCORE_ENLTV2,
+ }
+};
+
+static int __init init_rc_map_encore_enltv2(void)
+{
+ return ir_register_map(&encore_enltv2_map);
+}
+
+static void __exit exit_rc_map_encore_enltv2(void)
+{
+ ir_unregister_map(&encore_enltv2_map);
+}
+
+module_init(init_rc_map_encore_enltv2)
+module_exit(exit_rc_map_encore_enltv2)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-evga-indtube.c b/drivers/media/IR/keymaps/rc-evga-indtube.c
new file mode 100644
index 00000000000..3f3fb13813b
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-evga-indtube.c
@@ -0,0 +1,61 @@
+/* evga-indtube.h - Keytable for evga_indtube Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* EVGA inDtube
+ Devin Heitmueller <devin.heitmueller@gmail.com>
+ */
+
+static struct ir_scancode evga_indtube[] = {
+ { 0x12, KEY_POWER},
+ { 0x02, KEY_MODE}, /* TV */
+ { 0x14, KEY_MUTE},
+ { 0x1a, KEY_CHANNELUP},
+ { 0x16, KEY_TV2}, /* PIP */
+ { 0x1d, KEY_VOLUMEUP},
+ { 0x05, KEY_CHANNELDOWN},
+ { 0x0f, KEY_PLAYPAUSE},
+ { 0x19, KEY_VOLUMEDOWN},
+ { 0x1c, KEY_REWIND},
+ { 0x0d, KEY_RECORD},
+ { 0x18, KEY_FORWARD},
+ { 0x1e, KEY_PREVIOUS},
+ { 0x1b, KEY_STOP},
+ { 0x1f, KEY_NEXT},
+ { 0x13, KEY_CAMERA},
+};
+
+static struct rc_keymap evga_indtube_map = {
+ .map = {
+ .scan = evga_indtube,
+ .size = ARRAY_SIZE(evga_indtube),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_EVGA_INDTUBE,
+ }
+};
+
+static int __init init_rc_map_evga_indtube(void)
+{
+ return ir_register_map(&evga_indtube_map);
+}
+
+static void __exit exit_rc_map_evga_indtube(void)
+{
+ ir_unregister_map(&evga_indtube_map);
+}
+
+module_init(init_rc_map_evga_indtube)
+module_exit(exit_rc_map_evga_indtube)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-eztv.c b/drivers/media/IR/keymaps/rc-eztv.c
new file mode 100644
index 00000000000..660907a78db
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-eztv.c
@@ -0,0 +1,96 @@
+/* eztv.h - Keytable for eztv Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Alfons Geser <a.geser@cox.net>
+ * updates from Job D. R. Borges <jobdrb@ig.com.br> */
+
+static struct ir_scancode eztv[] = {
+ { 0x12, KEY_POWER },
+ { 0x01, KEY_TV }, /* DVR */
+ { 0x15, KEY_DVD }, /* DVD */
+ { 0x17, KEY_AUDIO }, /* music */
+ /* DVR mode / DVD mode / music mode */
+
+ { 0x1b, KEY_MUTE }, /* mute */
+ { 0x02, KEY_LANGUAGE }, /* MTS/SAP / audio / autoseek */
+ { 0x1e, KEY_SUBTITLE }, /* closed captioning / subtitle / seek */
+ { 0x16, KEY_ZOOM }, /* full screen */
+ { 0x1c, KEY_VIDEO }, /* video source / eject / delall */
+ { 0x1d, KEY_RESTART }, /* playback / angle / del */
+ { 0x2f, KEY_SEARCH }, /* scan / menu / playlist */
+ { 0x30, KEY_CHANNEL }, /* CH surfing / bookmark / memo */
+
+ { 0x31, KEY_HELP }, /* help */
+ { 0x32, KEY_MODE }, /* num/memo */
+ { 0x33, KEY_ESC }, /* cancel */
+
+ { 0x0c, KEY_UP }, /* up */
+ { 0x10, KEY_DOWN }, /* down */
+ { 0x08, KEY_LEFT }, /* left */
+ { 0x04, KEY_RIGHT }, /* right */
+ { 0x03, KEY_SELECT }, /* select */
+
+ { 0x1f, KEY_REWIND }, /* rewind */
+ { 0x20, KEY_PLAYPAUSE },/* play/pause */
+ { 0x29, KEY_FORWARD }, /* forward */
+ { 0x14, KEY_AGAIN }, /* repeat */
+ { 0x2b, KEY_RECORD }, /* recording */
+ { 0x2c, KEY_STOP }, /* stop */
+ { 0x2d, KEY_PLAY }, /* play */
+ { 0x2e, KEY_CAMERA }, /* snapshot / shuffle */
+
+ { 0x00, KEY_0 },
+ { 0x05, KEY_1 },
+ { 0x06, KEY_2 },
+ { 0x07, KEY_3 },
+ { 0x09, KEY_4 },
+ { 0x0a, KEY_5 },
+ { 0x0b, KEY_6 },
+ { 0x0d, KEY_7 },
+ { 0x0e, KEY_8 },
+ { 0x0f, KEY_9 },
+
+ { 0x2a, KEY_VOLUMEUP },
+ { 0x11, KEY_VOLUMEDOWN },
+ { 0x18, KEY_CHANNELUP },/* CH.tracking up */
+ { 0x19, KEY_CHANNELDOWN },/* CH.tracking down */
+
+ { 0x13, KEY_ENTER }, /* enter */
+ { 0x21, KEY_DOT }, /* . (decimal dot) */
+};
+
+static struct rc_keymap eztv_map = {
+ .map = {
+ .scan = eztv,
+ .size = ARRAY_SIZE(eztv),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_EZTV,
+ }
+};
+
+static int __init init_rc_map_eztv(void)
+{
+ return ir_register_map(&eztv_map);
+}
+
+static void __exit exit_rc_map_eztv(void)
+{
+ ir_unregister_map(&eztv_map);
+}
+
+module_init(init_rc_map_eztv)
+module_exit(exit_rc_map_eztv)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-flydvb.c b/drivers/media/IR/keymaps/rc-flydvb.c
new file mode 100644
index 00000000000..a173c81035f
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-flydvb.c
@@ -0,0 +1,77 @@
+/* flydvb.h - Keytable for flydvb Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode flydvb[] = {
+ { 0x01, KEY_ZOOM }, /* Full Screen */
+ { 0x00, KEY_POWER }, /* Power */
+
+ { 0x03, KEY_1 },
+ { 0x04, KEY_2 },
+ { 0x05, KEY_3 },
+ { 0x07, KEY_4 },
+ { 0x08, KEY_5 },
+ { 0x09, KEY_6 },
+ { 0x0b, KEY_7 },
+ { 0x0c, KEY_8 },
+ { 0x0d, KEY_9 },
+ { 0x06, KEY_AGAIN }, /* Recall */
+ { 0x0f, KEY_0 },
+ { 0x10, KEY_MUTE }, /* Mute */
+ { 0x02, KEY_RADIO }, /* TV/Radio */
+ { 0x1b, KEY_LANGUAGE }, /* SAP (Second Audio Program) */
+
+ { 0x14, KEY_VOLUMEUP }, /* VOL+ */
+ { 0x17, KEY_VOLUMEDOWN }, /* VOL- */
+ { 0x12, KEY_CHANNELUP }, /* CH+ */
+ { 0x13, KEY_CHANNELDOWN }, /* CH- */
+ { 0x1d, KEY_ENTER }, /* Enter */
+
+ { 0x1a, KEY_MODE }, /* PIP */
+ { 0x18, KEY_TUNER }, /* Source */
+
+ { 0x1e, KEY_RECORD }, /* Record/Pause */
+ { 0x15, KEY_ANGLE }, /* Swap (no label on key) */
+ { 0x1c, KEY_PAUSE }, /* Timeshift/Pause */
+ { 0x19, KEY_BACK }, /* Rewind << */
+ { 0x0a, KEY_PLAYPAUSE }, /* Play/Pause */
+ { 0x1f, KEY_FORWARD }, /* Forward >> */
+ { 0x16, KEY_PREVIOUS }, /* Back |<< */
+ { 0x11, KEY_STOP }, /* Stop */
+ { 0x0e, KEY_NEXT }, /* End >>| */
+};
+
+static struct rc_keymap flydvb_map = {
+ .map = {
+ .scan = flydvb,
+ .size = ARRAY_SIZE(flydvb),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_FLYDVB,
+ }
+};
+
+static int __init init_rc_map_flydvb(void)
+{
+ return ir_register_map(&flydvb_map);
+}
+
+static void __exit exit_rc_map_flydvb(void)
+{
+ ir_unregister_map(&flydvb_map);
+}
+
+module_init(init_rc_map_flydvb)
+module_exit(exit_rc_map_flydvb)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-flyvideo.c b/drivers/media/IR/keymaps/rc-flyvideo.c
new file mode 100644
index 00000000000..9c73043cbdb
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-flyvideo.c
@@ -0,0 +1,70 @@
+/* flyvideo.h - Keytable for flyvideo Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode flyvideo[] = {
+ { 0x0f, KEY_0 },
+ { 0x03, KEY_1 },
+ { 0x04, KEY_2 },
+ { 0x05, KEY_3 },
+ { 0x07, KEY_4 },
+ { 0x08, KEY_5 },
+ { 0x09, KEY_6 },
+ { 0x0b, KEY_7 },
+ { 0x0c, KEY_8 },
+ { 0x0d, KEY_9 },
+
+ { 0x0e, KEY_MODE }, /* Air/Cable */
+ { 0x11, KEY_VIDEO }, /* Video */
+ { 0x15, KEY_AUDIO }, /* Audio */
+ { 0x00, KEY_POWER }, /* Power */
+ { 0x18, KEY_TUNER }, /* AV Source */
+ { 0x02, KEY_ZOOM }, /* Fullscreen */
+ { 0x1a, KEY_LANGUAGE }, /* Stereo */
+ { 0x1b, KEY_MUTE }, /* Mute */
+ { 0x14, KEY_VOLUMEUP }, /* Volume + */
+ { 0x17, KEY_VOLUMEDOWN },/* Volume - */
+ { 0x12, KEY_CHANNELUP },/* Channel + */
+ { 0x13, KEY_CHANNELDOWN },/* Channel - */
+ { 0x06, KEY_AGAIN }, /* Recall */
+ { 0x10, KEY_ENTER }, /* Enter */
+
+ { 0x19, KEY_BACK }, /* Rewind ( <<< ) */
+ { 0x1f, KEY_FORWARD }, /* Forward ( >>> ) */
+ { 0x0a, KEY_ANGLE }, /* no label, may be used as the PAUSE button */
+};
+
+static struct rc_keymap flyvideo_map = {
+ .map = {
+ .scan = flyvideo,
+ .size = ARRAY_SIZE(flyvideo),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_FLYVIDEO,
+ }
+};
+
+static int __init init_rc_map_flyvideo(void)
+{
+ return ir_register_map(&flyvideo_map);
+}
+
+static void __exit exit_rc_map_flyvideo(void)
+{
+ ir_unregister_map(&flyvideo_map);
+}
+
+module_init(init_rc_map_flyvideo)
+module_exit(exit_rc_map_flyvideo)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-fusionhdtv-mce.c b/drivers/media/IR/keymaps/rc-fusionhdtv-mce.c
new file mode 100644
index 00000000000..cdb10389b10
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-fusionhdtv-mce.c
@@ -0,0 +1,98 @@
+/* fusionhdtv-mce.h - Keytable for fusionhdtv_mce Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* DViCO FUSION HDTV MCE remote */
+
+static struct ir_scancode fusionhdtv_mce[] = {
+
+ { 0x0b, KEY_1 },
+ { 0x17, KEY_2 },
+ { 0x1b, KEY_3 },
+ { 0x07, KEY_4 },
+ { 0x50, KEY_5 },
+ { 0x54, KEY_6 },
+ { 0x48, KEY_7 },
+ { 0x4c, KEY_8 },
+ { 0x58, KEY_9 },
+ { 0x03, KEY_0 },
+
+ { 0x5e, KEY_OK },
+ { 0x51, KEY_UP },
+ { 0x53, KEY_DOWN },
+ { 0x5b, KEY_LEFT },
+ { 0x5f, KEY_RIGHT },
+
+ { 0x02, KEY_TV }, /* Labeled DTV on remote */
+ { 0x0e, KEY_MP3 },
+ { 0x1a, KEY_DVD },
+ { 0x1e, KEY_FAVORITES }, /* Labeled CPF on remote */
+ { 0x16, KEY_SETUP },
+ { 0x46, KEY_POWER2 }, /* TV On/Off button on remote */
+ { 0x0a, KEY_EPG }, /* Labeled Guide on remote */
+
+ { 0x49, KEY_BACK },
+ { 0x59, KEY_INFO }, /* Labeled MORE on remote */
+ { 0x4d, KEY_MENU }, /* Labeled DVDMENU on remote */
+ { 0x55, KEY_CYCLEWINDOWS }, /* Labeled ALT-TAB on remote */
+
+ { 0x0f, KEY_PREVIOUSSONG }, /* Labeled |<< REPLAY on remote */
+ { 0x12, KEY_NEXTSONG }, /* Labeled >>| SKIP on remote */
+ { 0x42, KEY_ENTER }, /* Labeled START with a green
+ MS windows logo on remote */
+
+ { 0x15, KEY_VOLUMEUP },
+ { 0x05, KEY_VOLUMEDOWN },
+ { 0x11, KEY_CHANNELUP },
+ { 0x09, KEY_CHANNELDOWN },
+
+ { 0x52, KEY_CAMERA },
+ { 0x5a, KEY_TUNER },
+ { 0x19, KEY_OPEN },
+
+ { 0x13, KEY_MODE }, /* 4:3 16:9 select */
+ { 0x1f, KEY_ZOOM },
+
+ { 0x43, KEY_REWIND },
+ { 0x47, KEY_PLAYPAUSE },
+ { 0x4f, KEY_FASTFORWARD },
+ { 0x57, KEY_MUTE },
+ { 0x0d, KEY_STOP },
+ { 0x01, KEY_RECORD },
+ { 0x4e, KEY_POWER },
+};
+
+static struct rc_keymap fusionhdtv_mce_map = {
+ .map = {
+ .scan = fusionhdtv_mce,
+ .size = ARRAY_SIZE(fusionhdtv_mce),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_FUSIONHDTV_MCE,
+ }
+};
+
+static int __init init_rc_map_fusionhdtv_mce(void)
+{
+ return ir_register_map(&fusionhdtv_mce_map);
+}
+
+static void __exit exit_rc_map_fusionhdtv_mce(void)
+{
+ ir_unregister_map(&fusionhdtv_mce_map);
+}
+
+module_init(init_rc_map_fusionhdtv_mce)
+module_exit(exit_rc_map_fusionhdtv_mce)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-gadmei-rm008z.c b/drivers/media/IR/keymaps/rc-gadmei-rm008z.c
new file mode 100644
index 00000000000..c16c0d1263a
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-gadmei-rm008z.c
@@ -0,0 +1,81 @@
+/* gadmei-rm008z.h - Keytable for gadmei_rm008z Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* GADMEI UTV330+ RM008Z remote
+ Shine Liu <shinel@foxmail.com>
+ */
+
+static struct ir_scancode gadmei_rm008z[] = {
+ { 0x14, KEY_POWER2}, /* POWER OFF */
+ { 0x0c, KEY_MUTE}, /* MUTE */
+
+ { 0x18, KEY_TV}, /* TV */
+ { 0x0e, KEY_VIDEO}, /* AV */
+ { 0x0b, KEY_AUDIO}, /* SV */
+ { 0x0f, KEY_RADIO}, /* FM */
+
+ { 0x00, KEY_1},
+ { 0x01, KEY_2},
+ { 0x02, KEY_3},
+ { 0x03, KEY_4},
+ { 0x04, KEY_5},
+ { 0x05, KEY_6},
+ { 0x06, KEY_7},
+ { 0x07, KEY_8},
+ { 0x08, KEY_9},
+ { 0x09, KEY_0},
+ { 0x0a, KEY_INFO}, /* OSD */
+ { 0x1c, KEY_BACKSPACE}, /* LAST */
+
+ { 0x0d, KEY_PLAY}, /* PLAY */
+ { 0x1e, KEY_CAMERA}, /* SNAPSHOT */
+ { 0x1a, KEY_RECORD}, /* RECORD */
+ { 0x17, KEY_STOP}, /* STOP */
+
+ { 0x1f, KEY_UP}, /* UP */
+ { 0x44, KEY_DOWN}, /* DOWN */
+ { 0x46, KEY_TAB}, /* BACK */
+ { 0x4a, KEY_ZOOM}, /* FULLSECREEN */
+
+ { 0x10, KEY_VOLUMEUP}, /* VOLUMEUP */
+ { 0x11, KEY_VOLUMEDOWN}, /* VOLUMEDOWN */
+ { 0x12, KEY_CHANNELUP}, /* CHANNELUP */
+ { 0x13, KEY_CHANNELDOWN}, /* CHANNELDOWN */
+ { 0x15, KEY_ENTER}, /* OK */
+};
+
+static struct rc_keymap gadmei_rm008z_map = {
+ .map = {
+ .scan = gadmei_rm008z,
+ .size = ARRAY_SIZE(gadmei_rm008z),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_GADMEI_RM008Z,
+ }
+};
+
+static int __init init_rc_map_gadmei_rm008z(void)
+{
+ return ir_register_map(&gadmei_rm008z_map);
+}
+
+static void __exit exit_rc_map_gadmei_rm008z(void)
+{
+ ir_unregister_map(&gadmei_rm008z_map);
+}
+
+module_init(init_rc_map_gadmei_rm008z)
+module_exit(exit_rc_map_gadmei_rm008z)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-genius-tvgo-a11mce.c b/drivers/media/IR/keymaps/rc-genius-tvgo-a11mce.c
new file mode 100644
index 00000000000..89f8e384e52
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-genius-tvgo-a11mce.c
@@ -0,0 +1,84 @@
+/* genius-tvgo-a11mce.h - Keytable for genius_tvgo_a11mce Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ * Remote control for the Genius TVGO A11MCE
+ * Adrian Pardini <pardo.bsso@gmail.com>
+ */
+
+static struct ir_scancode genius_tvgo_a11mce[] = {
+ /* Keys 0 to 9 */
+ { 0x48, KEY_0 },
+ { 0x09, KEY_1 },
+ { 0x1d, KEY_2 },
+ { 0x1f, KEY_3 },
+ { 0x19, KEY_4 },
+ { 0x1b, KEY_5 },
+ { 0x11, KEY_6 },
+ { 0x17, KEY_7 },
+ { 0x12, KEY_8 },
+ { 0x16, KEY_9 },
+
+ { 0x54, KEY_RECORD }, /* recording */
+ { 0x06, KEY_MUTE }, /* mute */
+ { 0x10, KEY_POWER },
+ { 0x40, KEY_LAST }, /* recall */
+ { 0x4c, KEY_CHANNELUP }, /* channel / program + */
+ { 0x00, KEY_CHANNELDOWN }, /* channel / program - */
+ { 0x0d, KEY_VOLUMEUP },
+ { 0x15, KEY_VOLUMEDOWN },
+ { 0x4d, KEY_OK }, /* also labeled as Pause */
+ { 0x1c, KEY_ZOOM }, /* full screen and Stop*/
+ { 0x02, KEY_MODE }, /* AV Source or Rewind*/
+ { 0x04, KEY_LIST }, /* -/-- */
+ /* small arrows above numbers */
+ { 0x1a, KEY_NEXT }, /* also Fast Forward */
+ { 0x0e, KEY_PREVIOUS }, /* also Rewind */
+ /* these are in a rather non standard layout and have
+ an alternate name written */
+ { 0x1e, KEY_UP }, /* Video Setting */
+ { 0x0a, KEY_DOWN }, /* Video Default */
+ { 0x05, KEY_CAMERA }, /* Snapshot */
+ { 0x0c, KEY_RIGHT }, /* Hide Panel */
+ /* Four buttons without label */
+ { 0x49, KEY_RED },
+ { 0x0b, KEY_GREEN },
+ { 0x13, KEY_YELLOW },
+ { 0x50, KEY_BLUE },
+};
+
+static struct rc_keymap genius_tvgo_a11mce_map = {
+ .map = {
+ .scan = genius_tvgo_a11mce,
+ .size = ARRAY_SIZE(genius_tvgo_a11mce),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_GENIUS_TVGO_A11MCE,
+ }
+};
+
+static int __init init_rc_map_genius_tvgo_a11mce(void)
+{
+ return ir_register_map(&genius_tvgo_a11mce_map);
+}
+
+static void __exit exit_rc_map_genius_tvgo_a11mce(void)
+{
+ ir_unregister_map(&genius_tvgo_a11mce_map);
+}
+
+module_init(init_rc_map_genius_tvgo_a11mce)
+module_exit(exit_rc_map_genius_tvgo_a11mce)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-gotview7135.c b/drivers/media/IR/keymaps/rc-gotview7135.c
new file mode 100644
index 00000000000..52f025bb35f
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-gotview7135.c
@@ -0,0 +1,79 @@
+/* gotview7135.h - Keytable for gotview7135 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Mike Baikov <mike@baikov.com> */
+
+static struct ir_scancode gotview7135[] = {
+
+ { 0x11, KEY_POWER },
+ { 0x35, KEY_TV },
+ { 0x1b, KEY_0 },
+ { 0x29, KEY_1 },
+ { 0x19, KEY_2 },
+ { 0x39, KEY_3 },
+ { 0x1f, KEY_4 },
+ { 0x2c, KEY_5 },
+ { 0x21, KEY_6 },
+ { 0x24, KEY_7 },
+ { 0x18, KEY_8 },
+ { 0x2b, KEY_9 },
+ { 0x3b, KEY_AGAIN }, /* LOOP */
+ { 0x06, KEY_AUDIO },
+ { 0x31, KEY_PRINT }, /* PREVIEW */
+ { 0x3e, KEY_VIDEO },
+ { 0x10, KEY_CHANNELUP },
+ { 0x20, KEY_CHANNELDOWN },
+ { 0x0c, KEY_VOLUMEDOWN },
+ { 0x28, KEY_VOLUMEUP },
+ { 0x08, KEY_MUTE },
+ { 0x26, KEY_SEARCH }, /* SCAN */
+ { 0x3f, KEY_CAMERA }, /* SNAPSHOT */
+ { 0x12, KEY_RECORD },
+ { 0x32, KEY_STOP },
+ { 0x3c, KEY_PLAY },
+ { 0x1d, KEY_REWIND },
+ { 0x2d, KEY_PAUSE },
+ { 0x0d, KEY_FORWARD },
+ { 0x05, KEY_ZOOM }, /*FULL*/
+
+ { 0x2a, KEY_F21 }, /* LIVE TIMESHIFT */
+ { 0x0e, KEY_F22 }, /* MIN TIMESHIFT */
+ { 0x1e, KEY_TIME }, /* TIMESHIFT */
+ { 0x38, KEY_F24 }, /* NORMAL TIMESHIFT */
+};
+
+static struct rc_keymap gotview7135_map = {
+ .map = {
+ .scan = gotview7135,
+ .size = ARRAY_SIZE(gotview7135),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_GOTVIEW7135,
+ }
+};
+
+static int __init init_rc_map_gotview7135(void)
+{
+ return ir_register_map(&gotview7135_map);
+}
+
+static void __exit exit_rc_map_gotview7135(void)
+{
+ ir_unregister_map(&gotview7135_map);
+}
+
+module_init(init_rc_map_gotview7135)
+module_exit(exit_rc_map_gotview7135)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-hauppauge-new.c b/drivers/media/IR/keymaps/rc-hauppauge-new.c
new file mode 100644
index 00000000000..c6f8cd7c518
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-hauppauge-new.c
@@ -0,0 +1,100 @@
+/* hauppauge-new.h - Keytable for hauppauge_new Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Hauppauge: the newer, gray remotes (seems there are multiple
+ * slightly different versions), shipped with cx88+ivtv cards.
+ * almost rc5 coding, but some non-standard keys */
+
+static struct ir_scancode hauppauge_new[] = {
+ /* Keys 0 to 9 */
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ { 0x0a, KEY_TEXT }, /* keypad asterisk as well */
+ { 0x0b, KEY_RED }, /* red button */
+ { 0x0c, KEY_RADIO },
+ { 0x0d, KEY_MENU },
+ { 0x0e, KEY_SUBTITLE }, /* also the # key */
+ { 0x0f, KEY_MUTE },
+ { 0x10, KEY_VOLUMEUP },
+ { 0x11, KEY_VOLUMEDOWN },
+ { 0x12, KEY_PREVIOUS }, /* previous channel */
+ { 0x14, KEY_UP },
+ { 0x15, KEY_DOWN },
+ { 0x16, KEY_LEFT },
+ { 0x17, KEY_RIGHT },
+ { 0x18, KEY_VIDEO }, /* Videos */
+ { 0x19, KEY_AUDIO }, /* Music */
+ /* 0x1a: Pictures - presume this means
+ "Multimedia Home Platform" -
+ no "PICTURES" key in input.h
+ */
+ { 0x1a, KEY_MHP },
+
+ { 0x1b, KEY_EPG }, /* Guide */
+ { 0x1c, KEY_TV },
+ { 0x1e, KEY_NEXTSONG }, /* skip >| */
+ { 0x1f, KEY_EXIT }, /* back/exit */
+ { 0x20, KEY_CHANNELUP }, /* channel / program + */
+ { 0x21, KEY_CHANNELDOWN }, /* channel / program - */
+ { 0x22, KEY_CHANNEL }, /* source (old black remote) */
+ { 0x24, KEY_PREVIOUSSONG }, /* replay |< */
+ { 0x25, KEY_ENTER }, /* OK */
+ { 0x26, KEY_SLEEP }, /* minimize (old black remote) */
+ { 0x29, KEY_BLUE }, /* blue key */
+ { 0x2e, KEY_GREEN }, /* green button */
+ { 0x30, KEY_PAUSE }, /* pause */
+ { 0x32, KEY_REWIND }, /* backward << */
+ { 0x34, KEY_FASTFORWARD }, /* forward >> */
+ { 0x35, KEY_PLAY },
+ { 0x36, KEY_STOP },
+ { 0x37, KEY_RECORD }, /* recording */
+ { 0x38, KEY_YELLOW }, /* yellow key */
+ { 0x3b, KEY_SELECT }, /* top right button */
+ { 0x3c, KEY_ZOOM }, /* full */
+ { 0x3d, KEY_POWER }, /* system power (green button) */
+};
+
+static struct rc_keymap hauppauge_new_map = {
+ .map = {
+ .scan = hauppauge_new,
+ .size = ARRAY_SIZE(hauppauge_new),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_HAUPPAUGE_NEW,
+ }
+};
+
+static int __init init_rc_map_hauppauge_new(void)
+{
+ return ir_register_map(&hauppauge_new_map);
+}
+
+static void __exit exit_rc_map_hauppauge_new(void)
+{
+ ir_unregister_map(&hauppauge_new_map);
+}
+
+module_init(init_rc_map_hauppauge_new)
+module_exit(exit_rc_map_hauppauge_new)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-imon-mce.c b/drivers/media/IR/keymaps/rc-imon-mce.c
new file mode 100644
index 00000000000..e49f350e3a0
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-imon-mce.c
@@ -0,0 +1,142 @@
+/* rc5-imon-mce.c - Keytable for Windows Media Center RC-6 remotes for use
+ * with the SoundGraph iMON/Antec Veris hardware IR decoder
+ *
+ * Copyright (c) 2010 by Jarod Wilson <jarod@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* mce-mode imon mce remote key table */
+static struct ir_scancode imon_mce[] = {
+ /* keys sorted mostly by frequency of use to optimize lookups */
+ { 0x800ff415, KEY_REWIND },
+ { 0x800ff414, KEY_FASTFORWARD },
+ { 0x800ff41b, KEY_PREVIOUS },
+ { 0x800ff41a, KEY_NEXT },
+
+ { 0x800ff416, KEY_PLAY },
+ { 0x800ff418, KEY_PAUSE },
+ { 0x800ff419, KEY_STOP },
+ { 0x800ff417, KEY_RECORD },
+
+ { 0x02000052, KEY_UP },
+ { 0x02000051, KEY_DOWN },
+ { 0x02000050, KEY_LEFT },
+ { 0x0200004f, KEY_RIGHT },
+
+ { 0x800ff41e, KEY_UP },
+ { 0x800ff41f, KEY_DOWN },
+ { 0x800ff420, KEY_LEFT },
+ { 0x800ff421, KEY_RIGHT },
+
+ /* 0x800ff40b also KEY_NUMERIC_POUND on some receivers */
+ { 0x800ff40b, KEY_ENTER },
+ { 0x02000028, KEY_ENTER },
+/* the OK and Enter buttons decode to the same value on some remotes
+ { 0x02000028, KEY_OK }, */
+ { 0x800ff422, KEY_OK },
+ { 0x0200002a, KEY_EXIT },
+ { 0x800ff423, KEY_EXIT },
+ { 0x02000029, KEY_DELETE },
+ /* 0x800ff40a also KEY_NUMERIC_STAR on some receivers */
+ { 0x800ff40a, KEY_DELETE },
+
+ { 0x800ff40e, KEY_MUTE },
+ { 0x800ff410, KEY_VOLUMEUP },
+ { 0x800ff411, KEY_VOLUMEDOWN },
+ { 0x800ff412, KEY_CHANNELUP },
+ { 0x800ff413, KEY_CHANNELDOWN },
+
+ { 0x0200001e, KEY_NUMERIC_1 },
+ { 0x0200001f, KEY_NUMERIC_2 },
+ { 0x02000020, KEY_NUMERIC_3 },
+ { 0x02000021, KEY_NUMERIC_4 },
+ { 0x02000022, KEY_NUMERIC_5 },
+ { 0x02000023, KEY_NUMERIC_6 },
+ { 0x02000024, KEY_NUMERIC_7 },
+ { 0x02000025, KEY_NUMERIC_8 },
+ { 0x02000026, KEY_NUMERIC_9 },
+ { 0x02000027, KEY_NUMERIC_0 },
+
+ { 0x800ff401, KEY_NUMERIC_1 },
+ { 0x800ff402, KEY_NUMERIC_2 },
+ { 0x800ff403, KEY_NUMERIC_3 },
+ { 0x800ff404, KEY_NUMERIC_4 },
+ { 0x800ff405, KEY_NUMERIC_5 },
+ { 0x800ff406, KEY_NUMERIC_6 },
+ { 0x800ff407, KEY_NUMERIC_7 },
+ { 0x800ff408, KEY_NUMERIC_8 },
+ { 0x800ff409, KEY_NUMERIC_9 },
+ { 0x800ff400, KEY_NUMERIC_0 },
+
+ { 0x02200025, KEY_NUMERIC_STAR },
+ { 0x02200020, KEY_NUMERIC_POUND },
+ /* 0x800ff41d also KEY_BLUE on some receivers */
+ { 0x800ff41d, KEY_NUMERIC_STAR },
+ /* 0x800ff41c also KEY_PREVIOUS on some receivers */
+ { 0x800ff41c, KEY_NUMERIC_POUND },
+
+ { 0x800ff446, KEY_TV },
+ { 0x800ff447, KEY_AUDIO }, /* My Music */
+ { 0x800ff448, KEY_PVR }, /* RecordedTV */
+ { 0x800ff449, KEY_CAMERA },
+ { 0x800ff44a, KEY_VIDEO },
+ /* 0x800ff424 also KEY_MENU on some receivers */
+ { 0x800ff424, KEY_DVD },
+ /* 0x800ff425 also KEY_GREEN on some receivers */
+ { 0x800ff425, KEY_TUNER }, /* LiveTV */
+ { 0x800ff450, KEY_RADIO },
+
+ { 0x800ff44c, KEY_LANGUAGE },
+ { 0x800ff427, KEY_ZOOM }, /* Aspect */
+
+ { 0x800ff45b, KEY_RED },
+ { 0x800ff45c, KEY_GREEN },
+ { 0x800ff45d, KEY_YELLOW },
+ { 0x800ff45e, KEY_BLUE },
+
+ { 0x800ff466, KEY_RED },
+ /* { 0x800ff425, KEY_GREEN }, */
+ { 0x800ff468, KEY_YELLOW },
+ /* { 0x800ff41d, KEY_BLUE }, */
+
+ { 0x800ff40f, KEY_INFO },
+ { 0x800ff426, KEY_EPG }, /* Guide */
+ { 0x800ff45a, KEY_SUBTITLE }, /* Caption/Teletext */
+ { 0x800ff44d, KEY_TITLE },
+
+ { 0x800ff40c, KEY_POWER },
+ { 0x800ff40d, KEY_PROG1 }, /* Windows MCE button */
+
+};
+
+static struct rc_keymap imon_mce_map = {
+ .map = {
+ .scan = imon_mce,
+ .size = ARRAY_SIZE(imon_mce),
+ /* its RC6, but w/a hardware decoder */
+ .ir_type = IR_TYPE_RC6,
+ .name = RC_MAP_IMON_MCE,
+ }
+};
+
+static int __init init_rc_map_imon_mce(void)
+{
+ return ir_register_map(&imon_mce_map);
+}
+
+static void __exit exit_rc_map_imon_mce(void)
+{
+ ir_unregister_map(&imon_mce_map);
+}
+
+module_init(init_rc_map_imon_mce)
+module_exit(exit_rc_map_imon_mce)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-imon-pad.c b/drivers/media/IR/keymaps/rc-imon-pad.c
new file mode 100644
index 00000000000..bc4db72f02e
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-imon-pad.c
@@ -0,0 +1,156 @@
+/* rc5-imon-pad.c - Keytable for SoundGraph iMON PAD and Antec Veris
+ * RM-200 Remote Control
+ *
+ * Copyright (c) 2010 by Jarod Wilson <jarod@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ * standard imon remote key table, which isn't really entirely
+ * "standard", as different receivers decode the same key on the
+ * same remote to different hex codes, and the silkscreened names
+ * vary a bit between the SoundGraph and Antec remotes... ugh.
+ */
+static struct ir_scancode imon_pad[] = {
+ /* keys sorted mostly by frequency of use to optimize lookups */
+ { 0x2a8195b7, KEY_REWIND },
+ { 0x298315b7, KEY_REWIND },
+ { 0x2b8115b7, KEY_FASTFORWARD },
+ { 0x2b8315b7, KEY_FASTFORWARD },
+ { 0x2b9115b7, KEY_PREVIOUS },
+ { 0x298195b7, KEY_NEXT },
+
+ { 0x2a8115b7, KEY_PLAY },
+ { 0x2a8315b7, KEY_PLAY },
+ { 0x2a9115b7, KEY_PAUSE },
+ { 0x2b9715b7, KEY_STOP },
+ { 0x298115b7, KEY_RECORD },
+
+ { 0x01008000, KEY_UP },
+ { 0x01007f00, KEY_DOWN },
+ { 0x01000080, KEY_LEFT },
+ { 0x0100007f, KEY_RIGHT },
+
+ { 0x2aa515b7, KEY_UP },
+ { 0x289515b7, KEY_DOWN },
+ { 0x29a515b7, KEY_LEFT },
+ { 0x2ba515b7, KEY_RIGHT },
+
+ { 0x0200002c, KEY_SPACE }, /* Select/Space */
+ { 0x2a9315b7, KEY_SPACE }, /* Select/Space */
+ { 0x02000028, KEY_ENTER },
+ { 0x28a195b7, KEY_ENTER },
+ { 0x288195b7, KEY_EXIT },
+ { 0x02000029, KEY_ESC },
+ { 0x2bb715b7, KEY_ESC },
+ { 0x0200002a, KEY_BACKSPACE },
+ { 0x28a115b7, KEY_BACKSPACE },
+
+ { 0x2b9595b7, KEY_MUTE },
+ { 0x28a395b7, KEY_VOLUMEUP },
+ { 0x28a595b7, KEY_VOLUMEDOWN },
+ { 0x289395b7, KEY_CHANNELUP },
+ { 0x288795b7, KEY_CHANNELDOWN },
+
+ { 0x0200001e, KEY_NUMERIC_1 },
+ { 0x0200001f, KEY_NUMERIC_2 },
+ { 0x02000020, KEY_NUMERIC_3 },
+ { 0x02000021, KEY_NUMERIC_4 },
+ { 0x02000022, KEY_NUMERIC_5 },
+ { 0x02000023, KEY_NUMERIC_6 },
+ { 0x02000024, KEY_NUMERIC_7 },
+ { 0x02000025, KEY_NUMERIC_8 },
+ { 0x02000026, KEY_NUMERIC_9 },
+ { 0x02000027, KEY_NUMERIC_0 },
+
+ { 0x28b595b7, KEY_NUMERIC_1 },
+ { 0x2bb195b7, KEY_NUMERIC_2 },
+ { 0x28b195b7, KEY_NUMERIC_3 },
+ { 0x2a8595b7, KEY_NUMERIC_4 },
+ { 0x299595b7, KEY_NUMERIC_5 },
+ { 0x2aa595b7, KEY_NUMERIC_6 },
+ { 0x2b9395b7, KEY_NUMERIC_7 },
+ { 0x2a8515b7, KEY_NUMERIC_8 },
+ { 0x2aa115b7, KEY_NUMERIC_9 },
+ { 0x2ba595b7, KEY_NUMERIC_0 },
+
+ { 0x02200025, KEY_NUMERIC_STAR },
+ { 0x28b515b7, KEY_NUMERIC_STAR },
+ { 0x02200020, KEY_NUMERIC_POUND },
+ { 0x29a115b7, KEY_NUMERIC_POUND },
+
+ { 0x2b8515b7, KEY_VIDEO },
+ { 0x299195b7, KEY_AUDIO },
+ { 0x2ba115b7, KEY_CAMERA },
+ { 0x28a515b7, KEY_TV },
+ { 0x29a395b7, KEY_DVD },
+ { 0x29a295b7, KEY_DVD },
+
+ /* the Menu key between DVD and Subtitle on the RM-200... */
+ { 0x2ba385b7, KEY_MENU },
+ { 0x2ba395b7, KEY_MENU },
+
+ { 0x288515b7, KEY_BOOKMARKS },
+ { 0x2ab715b7, KEY_MEDIA }, /* Thumbnail */
+ { 0x298595b7, KEY_SUBTITLE },
+ { 0x2b8595b7, KEY_LANGUAGE },
+
+ { 0x29a595b7, KEY_ZOOM },
+ { 0x2aa395b7, KEY_SCREEN }, /* FullScreen */
+
+ { 0x299115b7, KEY_KEYBOARD },
+ { 0x299135b7, KEY_KEYBOARD },
+
+ { 0x01010000, BTN_LEFT },
+ { 0x01020000, BTN_RIGHT },
+ { 0x01010080, BTN_LEFT },
+ { 0x01020080, BTN_RIGHT },
+ { 0x688301b7, BTN_LEFT },
+ { 0x688481b7, BTN_RIGHT },
+
+ { 0x2a9395b7, KEY_CYCLEWINDOWS }, /* TaskSwitcher */
+ { 0x2b8395b7, KEY_TIME }, /* Timer */
+
+ { 0x289115b7, KEY_POWER },
+ { 0x29b195b7, KEY_EJECTCD }, /* the one next to play */
+ { 0x299395b7, KEY_EJECTCLOSECD }, /* eject (by TaskSw) */
+
+ { 0x02800000, KEY_CONTEXT_MENU }, /* Left Menu */
+ { 0x2b8195b7, KEY_CONTEXT_MENU }, /* Left Menu*/
+ { 0x02000065, KEY_COMPOSE }, /* RightMenu */
+ { 0x28b715b7, KEY_COMPOSE }, /* RightMenu */
+ { 0x2ab195b7, KEY_PROG1 }, /* Go or MultiMon */
+ { 0x29b715b7, KEY_DASHBOARD }, /* AppLauncher */
+};
+
+static struct rc_keymap imon_pad_map = {
+ .map = {
+ .scan = imon_pad,
+ .size = ARRAY_SIZE(imon_pad),
+ /* actual protocol details unknown, hardware decoder */
+ .ir_type = IR_TYPE_OTHER,
+ .name = RC_MAP_IMON_PAD,
+ }
+};
+
+static int __init init_rc_map_imon_pad(void)
+{
+ return ir_register_map(&imon_pad_map);
+}
+
+static void __exit exit_rc_map_imon_pad(void)
+{
+ ir_unregister_map(&imon_pad_map);
+}
+
+module_init(init_rc_map_imon_pad)
+module_exit(exit_rc_map_imon_pad)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-iodata-bctv7e.c b/drivers/media/IR/keymaps/rc-iodata-bctv7e.c
new file mode 100644
index 00000000000..ef6600259fc
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-iodata-bctv7e.c
@@ -0,0 +1,88 @@
+/* iodata-bctv7e.h - Keytable for iodata_bctv7e Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* IO-DATA BCTV7E Remote */
+
+static struct ir_scancode iodata_bctv7e[] = {
+ { 0x40, KEY_TV },
+ { 0x20, KEY_RADIO }, /* FM */
+ { 0x60, KEY_EPG },
+ { 0x00, KEY_POWER },
+
+ /* Keys 0 to 9 */
+ { 0x44, KEY_0 }, /* 10 */
+ { 0x50, KEY_1 },
+ { 0x30, KEY_2 },
+ { 0x70, KEY_3 },
+ { 0x48, KEY_4 },
+ { 0x28, KEY_5 },
+ { 0x68, KEY_6 },
+ { 0x58, KEY_7 },
+ { 0x38, KEY_8 },
+ { 0x78, KEY_9 },
+
+ { 0x10, KEY_L }, /* Live */
+ { 0x08, KEY_TIME }, /* Time Shift */
+
+ { 0x18, KEY_PLAYPAUSE }, /* Play */
+
+ { 0x24, KEY_ENTER }, /* 11 */
+ { 0x64, KEY_ESC }, /* 12 */
+ { 0x04, KEY_M }, /* Multi */
+
+ { 0x54, KEY_VIDEO },
+ { 0x34, KEY_CHANNELUP },
+ { 0x74, KEY_VOLUMEUP },
+ { 0x14, KEY_MUTE },
+
+ { 0x4c, KEY_VCR }, /* SVIDEO */
+ { 0x2c, KEY_CHANNELDOWN },
+ { 0x6c, KEY_VOLUMEDOWN },
+ { 0x0c, KEY_ZOOM },
+
+ { 0x5c, KEY_PAUSE },
+ { 0x3c, KEY_RED }, /* || (red) */
+ { 0x7c, KEY_RECORD }, /* recording */
+ { 0x1c, KEY_STOP },
+
+ { 0x41, KEY_REWIND }, /* backward << */
+ { 0x21, KEY_PLAY },
+ { 0x61, KEY_FASTFORWARD }, /* forward >> */
+ { 0x01, KEY_NEXT }, /* skip >| */
+};
+
+static struct rc_keymap iodata_bctv7e_map = {
+ .map = {
+ .scan = iodata_bctv7e,
+ .size = ARRAY_SIZE(iodata_bctv7e),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_IODATA_BCTV7E,
+ }
+};
+
+static int __init init_rc_map_iodata_bctv7e(void)
+{
+ return ir_register_map(&iodata_bctv7e_map);
+}
+
+static void __exit exit_rc_map_iodata_bctv7e(void)
+{
+ ir_unregister_map(&iodata_bctv7e_map);
+}
+
+module_init(init_rc_map_iodata_bctv7e)
+module_exit(exit_rc_map_iodata_bctv7e)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-kaiomy.c b/drivers/media/IR/keymaps/rc-kaiomy.c
new file mode 100644
index 00000000000..4c7883ba0f1
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-kaiomy.c
@@ -0,0 +1,87 @@
+/* kaiomy.h - Keytable for kaiomy Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Kaiomy TVnPC U2
+ Mauro Carvalho Chehab <mchehab@infradead.org>
+ */
+
+static struct ir_scancode kaiomy[] = {
+ { 0x43, KEY_POWER2},
+ { 0x01, KEY_LIST},
+ { 0x0b, KEY_ZOOM},
+ { 0x03, KEY_POWER},
+
+ { 0x04, KEY_1},
+ { 0x08, KEY_2},
+ { 0x02, KEY_3},
+
+ { 0x0f, KEY_4},
+ { 0x05, KEY_5},
+ { 0x06, KEY_6},
+
+ { 0x0c, KEY_7},
+ { 0x0d, KEY_8},
+ { 0x0a, KEY_9},
+
+ { 0x11, KEY_0},
+
+ { 0x09, KEY_CHANNELUP},
+ { 0x07, KEY_CHANNELDOWN},
+
+ { 0x0e, KEY_VOLUMEUP},
+ { 0x13, KEY_VOLUMEDOWN},
+
+ { 0x10, KEY_HOME},
+ { 0x12, KEY_ENTER},
+
+ { 0x14, KEY_RECORD},
+ { 0x15, KEY_STOP},
+ { 0x16, KEY_PLAY},
+ { 0x17, KEY_MUTE},
+
+ { 0x18, KEY_UP},
+ { 0x19, KEY_DOWN},
+ { 0x1a, KEY_LEFT},
+ { 0x1b, KEY_RIGHT},
+
+ { 0x1c, KEY_RED},
+ { 0x1d, KEY_GREEN},
+ { 0x1e, KEY_YELLOW},
+ { 0x1f, KEY_BLUE},
+};
+
+static struct rc_keymap kaiomy_map = {
+ .map = {
+ .scan = kaiomy,
+ .size = ARRAY_SIZE(kaiomy),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_KAIOMY,
+ }
+};
+
+static int __init init_rc_map_kaiomy(void)
+{
+ return ir_register_map(&kaiomy_map);
+}
+
+static void __exit exit_rc_map_kaiomy(void)
+{
+ ir_unregister_map(&kaiomy_map);
+}
+
+module_init(init_rc_map_kaiomy)
+module_exit(exit_rc_map_kaiomy)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-kworld-315u.c b/drivers/media/IR/keymaps/rc-kworld-315u.c
new file mode 100644
index 00000000000..618c817374e
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-kworld-315u.c
@@ -0,0 +1,83 @@
+/* kworld-315u.h - Keytable for kworld_315u Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Kworld 315U
+ */
+
+static struct ir_scancode kworld_315u[] = {
+ { 0x6143, KEY_POWER },
+ { 0x6101, KEY_TUNER }, /* source */
+ { 0x610b, KEY_ZOOM },
+ { 0x6103, KEY_POWER2 }, /* shutdown */
+
+ { 0x6104, KEY_1 },
+ { 0x6108, KEY_2 },
+ { 0x6102, KEY_3 },
+ { 0x6109, KEY_CHANNELUP },
+
+ { 0x610f, KEY_4 },
+ { 0x6105, KEY_5 },
+ { 0x6106, KEY_6 },
+ { 0x6107, KEY_CHANNELDOWN },
+
+ { 0x610c, KEY_7 },
+ { 0x610d, KEY_8 },
+ { 0x610a, KEY_9 },
+ { 0x610e, KEY_VOLUMEUP },
+
+ { 0x6110, KEY_LAST },
+ { 0x6111, KEY_0 },
+ { 0x6112, KEY_ENTER },
+ { 0x6113, KEY_VOLUMEDOWN },
+
+ { 0x6114, KEY_RECORD },
+ { 0x6115, KEY_STOP },
+ { 0x6116, KEY_PLAY },
+ { 0x6117, KEY_MUTE },
+
+ { 0x6118, KEY_UP },
+ { 0x6119, KEY_DOWN },
+ { 0x611a, KEY_LEFT },
+ { 0x611b, KEY_RIGHT },
+
+ { 0x611c, KEY_RED },
+ { 0x611d, KEY_GREEN },
+ { 0x611e, KEY_YELLOW },
+ { 0x611f, KEY_BLUE },
+};
+
+static struct rc_keymap kworld_315u_map = {
+ .map = {
+ .scan = kworld_315u,
+ .size = ARRAY_SIZE(kworld_315u),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_KWORLD_315U,
+ }
+};
+
+static int __init init_rc_map_kworld_315u(void)
+{
+ return ir_register_map(&kworld_315u_map);
+}
+
+static void __exit exit_rc_map_kworld_315u(void)
+{
+ ir_unregister_map(&kworld_315u_map);
+}
+
+module_init(init_rc_map_kworld_315u)
+module_exit(exit_rc_map_kworld_315u)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-kworld-plus-tv-analog.c b/drivers/media/IR/keymaps/rc-kworld-plus-tv-analog.c
new file mode 100644
index 00000000000..366732f1f7b
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-kworld-plus-tv-analog.c
@@ -0,0 +1,99 @@
+/* kworld-plus-tv-analog.h - Keytable for kworld_plus_tv_analog Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Kworld Plus TV Analog Lite PCI IR
+ Mauro Carvalho Chehab <mchehab@infradead.org>
+ */
+
+static struct ir_scancode kworld_plus_tv_analog[] = {
+ { 0x0c, KEY_PROG1 }, /* Kworld key */
+ { 0x16, KEY_CLOSECD }, /* -> ) */
+ { 0x1d, KEY_POWER2 },
+
+ { 0x00, KEY_1 },
+ { 0x01, KEY_2 },
+ { 0x02, KEY_3 }, /* Two keys have the same code: 3 and left */
+ { 0x03, KEY_4 }, /* Two keys have the same code: 3 and right */
+ { 0x04, KEY_5 },
+ { 0x05, KEY_6 },
+ { 0x06, KEY_7 },
+ { 0x07, KEY_8 },
+ { 0x08, KEY_9 },
+ { 0x0a, KEY_0 },
+
+ { 0x09, KEY_AGAIN },
+ { 0x14, KEY_MUTE },
+
+ { 0x20, KEY_UP },
+ { 0x21, KEY_DOWN },
+ { 0x0b, KEY_ENTER },
+
+ { 0x10, KEY_CHANNELUP },
+ { 0x11, KEY_CHANNELDOWN },
+
+ /* Couldn't map key left/key right since those
+ conflict with '3' and '4' scancodes
+ I dunno what the original driver does
+ */
+
+ { 0x13, KEY_VOLUMEUP },
+ { 0x12, KEY_VOLUMEDOWN },
+
+ /* The lower part of the IR
+ There are several duplicated keycodes there.
+ Most of them conflict with digits.
+ Add mappings just to the unused scancodes.
+ Somehow, the original driver has a way to know,
+ but this doesn't seem to be on some GPIO.
+ Also, it is not related to the time between keyup
+ and keydown.
+ */
+ { 0x19, KEY_TIME}, /* Timeshift */
+ { 0x1a, KEY_STOP},
+ { 0x1b, KEY_RECORD},
+
+ { 0x22, KEY_TEXT},
+
+ { 0x15, KEY_AUDIO}, /* ((*)) */
+ { 0x0f, KEY_ZOOM},
+ { 0x1c, KEY_CAMERA}, /* snapshot */
+
+ { 0x18, KEY_RED}, /* B */
+ { 0x23, KEY_GREEN}, /* C */
+};
+
+static struct rc_keymap kworld_plus_tv_analog_map = {
+ .map = {
+ .scan = kworld_plus_tv_analog,
+ .size = ARRAY_SIZE(kworld_plus_tv_analog),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_KWORLD_PLUS_TV_ANALOG,
+ }
+};
+
+static int __init init_rc_map_kworld_plus_tv_analog(void)
+{
+ return ir_register_map(&kworld_plus_tv_analog_map);
+}
+
+static void __exit exit_rc_map_kworld_plus_tv_analog(void)
+{
+ ir_unregister_map(&kworld_plus_tv_analog_map);
+}
+
+module_init(init_rc_map_kworld_plus_tv_analog)
+module_exit(exit_rc_map_kworld_plus_tv_analog)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-manli.c b/drivers/media/IR/keymaps/rc-manli.c
new file mode 100644
index 00000000000..1e9fbfa90a1
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-manli.c
@@ -0,0 +1,135 @@
+/* manli.h - Keytable for manli Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Michael Tokarev <mjt@tls.msk.ru>
+ http://www.corpit.ru/mjt/beholdTV/remote_control.jpg
+ keytable is used by MANLI MTV00[0x0c] and BeholdTV 40[13] at
+ least, and probably other cards too.
+ The "ascii-art picture" below (in comments, first row
+ is the keycode in hex, and subsequent row(s) shows
+ the button labels (several variants when appropriate)
+ helps to descide which keycodes to assign to the buttons.
+ */
+
+static struct ir_scancode manli[] = {
+
+ /* 0x1c 0x12 *
+ * FUNCTION POWER *
+ * FM (|) *
+ * */
+ { 0x1c, KEY_RADIO }, /*XXX*/
+ { 0x12, KEY_POWER },
+
+ /* 0x01 0x02 0x03 *
+ * 1 2 3 *
+ * *
+ * 0x04 0x05 0x06 *
+ * 4 5 6 *
+ * *
+ * 0x07 0x08 0x09 *
+ * 7 8 9 *
+ * */
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ /* 0x0a 0x00 0x17 *
+ * RECALL 0 +100 *
+ * PLUS *
+ * */
+ { 0x0a, KEY_AGAIN }, /*XXX KEY_REWIND? */
+ { 0x00, KEY_0 },
+ { 0x17, KEY_DIGITS }, /*XXX*/
+
+ /* 0x14 0x10 *
+ * MENU INFO *
+ * OSD */
+ { 0x14, KEY_MENU },
+ { 0x10, KEY_INFO },
+
+ /* 0x0b *
+ * Up *
+ * *
+ * 0x18 0x16 0x0c *
+ * Left Ok Right *
+ * *
+ * 0x015 *
+ * Down *
+ * */
+ { 0x0b, KEY_UP },
+ { 0x18, KEY_LEFT },
+ { 0x16, KEY_OK }, /*XXX KEY_SELECT? KEY_ENTER? */
+ { 0x0c, KEY_RIGHT },
+ { 0x15, KEY_DOWN },
+
+ /* 0x11 0x0d *
+ * TV/AV MODE *
+ * SOURCE STEREO *
+ * */
+ { 0x11, KEY_TV }, /*XXX*/
+ { 0x0d, KEY_MODE }, /*XXX there's no KEY_STEREO */
+
+ /* 0x0f 0x1b 0x1a *
+ * AUDIO Vol+ Chan+ *
+ * TIMESHIFT??? *
+ * *
+ * 0x0e 0x1f 0x1e *
+ * SLEEP Vol- Chan- *
+ * */
+ { 0x0f, KEY_AUDIO },
+ { 0x1b, KEY_VOLUMEUP },
+ { 0x1a, KEY_CHANNELUP },
+ { 0x0e, KEY_TIME },
+ { 0x1f, KEY_VOLUMEDOWN },
+ { 0x1e, KEY_CHANNELDOWN },
+
+ /* 0x13 0x19 *
+ * MUTE SNAPSHOT*
+ * */
+ { 0x13, KEY_MUTE },
+ { 0x19, KEY_CAMERA },
+
+ /* 0x1d unused ? */
+};
+
+static struct rc_keymap manli_map = {
+ .map = {
+ .scan = manli,
+ .size = ARRAY_SIZE(manli),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_MANLI,
+ }
+};
+
+static int __init init_rc_map_manli(void)
+{
+ return ir_register_map(&manli_map);
+}
+
+static void __exit exit_rc_map_manli(void)
+{
+ ir_unregister_map(&manli_map);
+}
+
+module_init(init_rc_map_manli)
+module_exit(exit_rc_map_manli)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-msi-tvanywhere-plus.c b/drivers/media/IR/keymaps/rc-msi-tvanywhere-plus.c
new file mode 100644
index 00000000000..eb8e42c18ff
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-msi-tvanywhere-plus.c
@@ -0,0 +1,123 @@
+/* msi-tvanywhere-plus.h - Keytable for msi_tvanywhere_plus Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ Keycodes for remote on the MSI TV@nywhere Plus. The controller IC on the card
+ is marked "KS003". The controller is I2C at address 0x30, but does not seem
+ to respond to probes until a read is performed from a valid device.
+ I don't know why...
+
+ Note: This remote may be of similar or identical design to the
+ Pixelview remote (?). The raw codes and duplicate button codes
+ appear to be the same.
+
+ Henry Wong <henry@stuffedcow.net>
+ Some changes to formatting and keycodes by Mark Schultz <n9xmj@yahoo.com>
+*/
+
+static struct ir_scancode msi_tvanywhere_plus[] = {
+
+/* ---- Remote Button Layout ----
+
+ POWER SOURCE SCAN MUTE
+ TV/FM 1 2 3
+ |> 4 5 6
+ <| 7 8 9
+ ^^UP 0 + RECALL
+ vvDN RECORD STOP PLAY
+
+ MINIMIZE ZOOM
+
+ CH+
+ VOL- VOL+
+ CH-
+
+ SNAPSHOT MTS
+
+ << FUNC >> RESET
+*/
+
+ { 0x01, KEY_1 }, /* 1 */
+ { 0x0b, KEY_2 }, /* 2 */
+ { 0x1b, KEY_3 }, /* 3 */
+ { 0x05, KEY_4 }, /* 4 */
+ { 0x09, KEY_5 }, /* 5 */
+ { 0x15, KEY_6 }, /* 6 */
+ { 0x06, KEY_7 }, /* 7 */
+ { 0x0a, KEY_8 }, /* 8 */
+ { 0x12, KEY_9 }, /* 9 */
+ { 0x02, KEY_0 }, /* 0 */
+ { 0x10, KEY_KPPLUS }, /* + */
+ { 0x13, KEY_AGAIN }, /* Recall */
+
+ { 0x1e, KEY_POWER }, /* Power */
+ { 0x07, KEY_TUNER }, /* Source */
+ { 0x1c, KEY_SEARCH }, /* Scan */
+ { 0x18, KEY_MUTE }, /* Mute */
+
+ { 0x03, KEY_RADIO }, /* TV/FM */
+ /* The next four keys are duplicates that appear to send the
+ same IR code as Ch+, Ch-, >>, and << . The raw code assigned
+ to them is the actual code + 0x20 - they will never be
+ detected as such unless some way is discovered to distinguish
+ these buttons from those that have the same code. */
+ { 0x3f, KEY_RIGHT }, /* |> and Ch+ */
+ { 0x37, KEY_LEFT }, /* <| and Ch- */
+ { 0x2c, KEY_UP }, /* ^^Up and >> */
+ { 0x24, KEY_DOWN }, /* vvDn and << */
+
+ { 0x00, KEY_RECORD }, /* Record */
+ { 0x08, KEY_STOP }, /* Stop */
+ { 0x11, KEY_PLAY }, /* Play */
+
+ { 0x0f, KEY_CLOSE }, /* Minimize */
+ { 0x19, KEY_ZOOM }, /* Zoom */
+ { 0x1a, KEY_CAMERA }, /* Snapshot */
+ { 0x0d, KEY_LANGUAGE }, /* MTS */
+
+ { 0x14, KEY_VOLUMEDOWN }, /* Vol- */
+ { 0x16, KEY_VOLUMEUP }, /* Vol+ */
+ { 0x17, KEY_CHANNELDOWN }, /* Ch- */
+ { 0x1f, KEY_CHANNELUP }, /* Ch+ */
+
+ { 0x04, KEY_REWIND }, /* << */
+ { 0x0e, KEY_MENU }, /* Function */
+ { 0x0c, KEY_FASTFORWARD }, /* >> */
+ { 0x1d, KEY_RESTART }, /* Reset */
+};
+
+static struct rc_keymap msi_tvanywhere_plus_map = {
+ .map = {
+ .scan = msi_tvanywhere_plus,
+ .size = ARRAY_SIZE(msi_tvanywhere_plus),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_MSI_TVANYWHERE_PLUS,
+ }
+};
+
+static int __init init_rc_map_msi_tvanywhere_plus(void)
+{
+ return ir_register_map(&msi_tvanywhere_plus_map);
+}
+
+static void __exit exit_rc_map_msi_tvanywhere_plus(void)
+{
+ ir_unregister_map(&msi_tvanywhere_plus_map);
+}
+
+module_init(init_rc_map_msi_tvanywhere_plus)
+module_exit(exit_rc_map_msi_tvanywhere_plus)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-msi-tvanywhere.c b/drivers/media/IR/keymaps/rc-msi-tvanywhere.c
new file mode 100644
index 00000000000..ef411854f06
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-msi-tvanywhere.c
@@ -0,0 +1,69 @@
+/* msi-tvanywhere.h - Keytable for msi_tvanywhere Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* MSI TV@nywhere MASTER remote */
+
+static struct ir_scancode msi_tvanywhere[] = {
+ /* Keys 0 to 9 */
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ { 0x0c, KEY_MUTE },
+ { 0x0f, KEY_SCREEN }, /* Full Screen */
+ { 0x10, KEY_FN }, /* Funtion */
+ { 0x11, KEY_TIME }, /* Time shift */
+ { 0x12, KEY_POWER },
+ { 0x13, KEY_MEDIA }, /* MTS */
+ { 0x14, KEY_SLOW },
+ { 0x16, KEY_REWIND }, /* backward << */
+ { 0x17, KEY_ENTER }, /* Return */
+ { 0x18, KEY_FASTFORWARD }, /* forward >> */
+ { 0x1a, KEY_CHANNELUP },
+ { 0x1b, KEY_VOLUMEUP },
+ { 0x1e, KEY_CHANNELDOWN },
+ { 0x1f, KEY_VOLUMEDOWN },
+};
+
+static struct rc_keymap msi_tvanywhere_map = {
+ .map = {
+ .scan = msi_tvanywhere,
+ .size = ARRAY_SIZE(msi_tvanywhere),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_MSI_TVANYWHERE,
+ }
+};
+
+static int __init init_rc_map_msi_tvanywhere(void)
+{
+ return ir_register_map(&msi_tvanywhere_map);
+}
+
+static void __exit exit_rc_map_msi_tvanywhere(void)
+{
+ ir_unregister_map(&msi_tvanywhere_map);
+}
+
+module_init(init_rc_map_msi_tvanywhere)
+module_exit(exit_rc_map_msi_tvanywhere)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-nebula.c b/drivers/media/IR/keymaps/rc-nebula.c
new file mode 100644
index 00000000000..ccc50eb402e
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-nebula.c
@@ -0,0 +1,96 @@
+/* nebula.h - Keytable for nebula Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode nebula[] = {
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+ { 0x0a, KEY_TV },
+ { 0x0b, KEY_AUX },
+ { 0x0c, KEY_DVD },
+ { 0x0d, KEY_POWER },
+ { 0x0e, KEY_MHP }, /* labelled 'Picture' */
+ { 0x0f, KEY_AUDIO },
+ { 0x10, KEY_INFO },
+ { 0x11, KEY_F13 }, /* 16:9 */
+ { 0x12, KEY_F14 }, /* 14:9 */
+ { 0x13, KEY_EPG },
+ { 0x14, KEY_EXIT },
+ { 0x15, KEY_MENU },
+ { 0x16, KEY_UP },
+ { 0x17, KEY_DOWN },
+ { 0x18, KEY_LEFT },
+ { 0x19, KEY_RIGHT },
+ { 0x1a, KEY_ENTER },
+ { 0x1b, KEY_CHANNELUP },
+ { 0x1c, KEY_CHANNELDOWN },
+ { 0x1d, KEY_VOLUMEUP },
+ { 0x1e, KEY_VOLUMEDOWN },
+ { 0x1f, KEY_RED },
+ { 0x20, KEY_GREEN },
+ { 0x21, KEY_YELLOW },
+ { 0x22, KEY_BLUE },
+ { 0x23, KEY_SUBTITLE },
+ { 0x24, KEY_F15 }, /* AD */
+ { 0x25, KEY_TEXT },
+ { 0x26, KEY_MUTE },
+ { 0x27, KEY_REWIND },
+ { 0x28, KEY_STOP },
+ { 0x29, KEY_PLAY },
+ { 0x2a, KEY_FASTFORWARD },
+ { 0x2b, KEY_F16 }, /* chapter */
+ { 0x2c, KEY_PAUSE },
+ { 0x2d, KEY_PLAY },
+ { 0x2e, KEY_RECORD },
+ { 0x2f, KEY_F17 }, /* picture in picture */
+ { 0x30, KEY_KPPLUS }, /* zoom in */
+ { 0x31, KEY_KPMINUS }, /* zoom out */
+ { 0x32, KEY_F18 }, /* capture */
+ { 0x33, KEY_F19 }, /* web */
+ { 0x34, KEY_EMAIL },
+ { 0x35, KEY_PHONE },
+ { 0x36, KEY_PC },
+};
+
+static struct rc_keymap nebula_map = {
+ .map = {
+ .scan = nebula,
+ .size = ARRAY_SIZE(nebula),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_NEBULA,
+ }
+};
+
+static int __init init_rc_map_nebula(void)
+{
+ return ir_register_map(&nebula_map);
+}
+
+static void __exit exit_rc_map_nebula(void)
+{
+ ir_unregister_map(&nebula_map);
+}
+
+module_init(init_rc_map_nebula)
+module_exit(exit_rc_map_nebula)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-nec-terratec-cinergy-xs.c b/drivers/media/IR/keymaps/rc-nec-terratec-cinergy-xs.c
new file mode 100644
index 00000000000..e1b54d20db6
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-nec-terratec-cinergy-xs.c
@@ -0,0 +1,105 @@
+/* nec-terratec-cinergy-xs.h - Keytable for nec_terratec_cinergy_xs Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Terratec Cinergy Hybrid T USB XS FM
+ Mauro Carvalho Chehab <mchehab@redhat.com>
+ */
+
+static struct ir_scancode nec_terratec_cinergy_xs[] = {
+ { 0x1441, KEY_HOME},
+ { 0x1401, KEY_POWER2},
+
+ { 0x1442, KEY_MENU}, /* DVD menu */
+ { 0x1443, KEY_SUBTITLE},
+ { 0x1444, KEY_TEXT}, /* Teletext */
+ { 0x1445, KEY_DELETE},
+
+ { 0x1402, KEY_1},
+ { 0x1403, KEY_2},
+ { 0x1404, KEY_3},
+ { 0x1405, KEY_4},
+ { 0x1406, KEY_5},
+ { 0x1407, KEY_6},
+ { 0x1408, KEY_7},
+ { 0x1409, KEY_8},
+ { 0x140a, KEY_9},
+ { 0x140c, KEY_0},
+
+ { 0x140b, KEY_TUNER}, /* AV */
+ { 0x140d, KEY_MODE}, /* A.B */
+
+ { 0x1446, KEY_TV},
+ { 0x1447, KEY_DVD},
+ { 0x1449, KEY_VIDEO},
+ { 0x144a, KEY_RADIO}, /* Music */
+ { 0x144b, KEY_CAMERA}, /* PIC */
+
+ { 0x1410, KEY_UP},
+ { 0x1411, KEY_LEFT},
+ { 0x1412, KEY_OK},
+ { 0x1413, KEY_RIGHT},
+ { 0x1414, KEY_DOWN},
+
+ { 0x140f, KEY_EPG},
+ { 0x1416, KEY_INFO},
+ { 0x144d, KEY_BACKSPACE},
+
+ { 0x141c, KEY_VOLUMEUP},
+ { 0x141e, KEY_VOLUMEDOWN},
+
+ { 0x144c, KEY_PLAY},
+ { 0x141d, KEY_MUTE},
+
+ { 0x141b, KEY_CHANNELUP},
+ { 0x141f, KEY_CHANNELDOWN},
+
+ { 0x1417, KEY_RED},
+ { 0x1418, KEY_GREEN},
+ { 0x1419, KEY_YELLOW},
+ { 0x141a, KEY_BLUE},
+
+ { 0x1458, KEY_RECORD},
+ { 0x1448, KEY_STOP},
+ { 0x1440, KEY_PAUSE},
+
+ { 0x1454, KEY_LAST},
+ { 0x144e, KEY_REWIND},
+ { 0x144f, KEY_FASTFORWARD},
+ { 0x145c, KEY_NEXT},
+};
+
+static struct rc_keymap nec_terratec_cinergy_xs_map = {
+ .map = {
+ .scan = nec_terratec_cinergy_xs,
+ .size = ARRAY_SIZE(nec_terratec_cinergy_xs),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_NEC_TERRATEC_CINERGY_XS,
+ }
+};
+
+static int __init init_rc_map_nec_terratec_cinergy_xs(void)
+{
+ return ir_register_map(&nec_terratec_cinergy_xs_map);
+}
+
+static void __exit exit_rc_map_nec_terratec_cinergy_xs(void)
+{
+ ir_unregister_map(&nec_terratec_cinergy_xs_map);
+}
+
+module_init(init_rc_map_nec_terratec_cinergy_xs)
+module_exit(exit_rc_map_nec_terratec_cinergy_xs)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-norwood.c b/drivers/media/IR/keymaps/rc-norwood.c
new file mode 100644
index 00000000000..e5849a6b3f0
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-norwood.c
@@ -0,0 +1,85 @@
+/* norwood.h - Keytable for norwood Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Norwood Micro (non-Pro) TV Tuner
+ By Peter Naulls <peter@chocky.org>
+ Key comments are the functions given in the manual */
+
+static struct ir_scancode norwood[] = {
+ /* Keys 0 to 9 */
+ { 0x20, KEY_0 },
+ { 0x21, KEY_1 },
+ { 0x22, KEY_2 },
+ { 0x23, KEY_3 },
+ { 0x24, KEY_4 },
+ { 0x25, KEY_5 },
+ { 0x26, KEY_6 },
+ { 0x27, KEY_7 },
+ { 0x28, KEY_8 },
+ { 0x29, KEY_9 },
+
+ { 0x78, KEY_TUNER }, /* Video Source */
+ { 0x2c, KEY_EXIT }, /* Open/Close software */
+ { 0x2a, KEY_SELECT }, /* 2 Digit Select */
+ { 0x69, KEY_AGAIN }, /* Recall */
+
+ { 0x32, KEY_BRIGHTNESSUP }, /* Brightness increase */
+ { 0x33, KEY_BRIGHTNESSDOWN }, /* Brightness decrease */
+ { 0x6b, KEY_KPPLUS }, /* (not named >>>>>) */
+ { 0x6c, KEY_KPMINUS }, /* (not named <<<<<) */
+
+ { 0x2d, KEY_MUTE }, /* Mute */
+ { 0x30, KEY_VOLUMEUP }, /* Volume up */
+ { 0x31, KEY_VOLUMEDOWN }, /* Volume down */
+ { 0x60, KEY_CHANNELUP }, /* Channel up */
+ { 0x61, KEY_CHANNELDOWN }, /* Channel down */
+
+ { 0x3f, KEY_RECORD }, /* Record */
+ { 0x37, KEY_PLAY }, /* Play */
+ { 0x36, KEY_PAUSE }, /* Pause */
+ { 0x2b, KEY_STOP }, /* Stop */
+ { 0x67, KEY_FASTFORWARD }, /* Foward */
+ { 0x66, KEY_REWIND }, /* Rewind */
+ { 0x3e, KEY_SEARCH }, /* Auto Scan */
+ { 0x2e, KEY_CAMERA }, /* Capture Video */
+ { 0x6d, KEY_MENU }, /* Show/Hide Control */
+ { 0x2f, KEY_ZOOM }, /* Full Screen */
+ { 0x34, KEY_RADIO }, /* FM */
+ { 0x65, KEY_POWER }, /* Computer power */
+};
+
+static struct rc_keymap norwood_map = {
+ .map = {
+ .scan = norwood,
+ .size = ARRAY_SIZE(norwood),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_NORWOOD,
+ }
+};
+
+static int __init init_rc_map_norwood(void)
+{
+ return ir_register_map(&norwood_map);
+}
+
+static void __exit exit_rc_map_norwood(void)
+{
+ ir_unregister_map(&norwood_map);
+}
+
+module_init(init_rc_map_norwood)
+module_exit(exit_rc_map_norwood)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-npgtech.c b/drivers/media/IR/keymaps/rc-npgtech.c
new file mode 100644
index 00000000000..b9ece1e9029
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-npgtech.c
@@ -0,0 +1,80 @@
+/* npgtech.h - Keytable for npgtech Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode npgtech[] = {
+ { 0x1d, KEY_SWITCHVIDEOMODE }, /* switch inputs */
+ { 0x2a, KEY_FRONT },
+
+ { 0x3e, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x06, KEY_3 },
+ { 0x0a, KEY_4 },
+ { 0x0e, KEY_5 },
+ { 0x12, KEY_6 },
+ { 0x16, KEY_7 },
+ { 0x1a, KEY_8 },
+ { 0x1e, KEY_9 },
+ { 0x3a, KEY_0 },
+ { 0x22, KEY_NUMLOCK }, /* -/-- */
+ { 0x20, KEY_REFRESH },
+
+ { 0x03, KEY_BRIGHTNESSDOWN },
+ { 0x28, KEY_AUDIO },
+ { 0x3c, KEY_CHANNELUP },
+ { 0x3f, KEY_VOLUMEDOWN },
+ { 0x2e, KEY_MUTE },
+ { 0x3b, KEY_VOLUMEUP },
+ { 0x00, KEY_CHANNELDOWN },
+ { 0x07, KEY_BRIGHTNESSUP },
+ { 0x2c, KEY_TEXT },
+
+ { 0x37, KEY_RECORD },
+ { 0x17, KEY_PLAY },
+ { 0x13, KEY_PAUSE },
+ { 0x26, KEY_STOP },
+ { 0x18, KEY_FASTFORWARD },
+ { 0x14, KEY_REWIND },
+ { 0x33, KEY_ZOOM },
+ { 0x32, KEY_KEYBOARD },
+ { 0x30, KEY_GOTO }, /* Pointing arrow */
+ { 0x36, KEY_MACRO }, /* Maximize/Minimize (yellow) */
+ { 0x0b, KEY_RADIO },
+ { 0x10, KEY_POWER },
+
+};
+
+static struct rc_keymap npgtech_map = {
+ .map = {
+ .scan = npgtech,
+ .size = ARRAY_SIZE(npgtech),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_NPGTECH,
+ }
+};
+
+static int __init init_rc_map_npgtech(void)
+{
+ return ir_register_map(&npgtech_map);
+}
+
+static void __exit exit_rc_map_npgtech(void)
+{
+ ir_unregister_map(&npgtech_map);
+}
+
+module_init(init_rc_map_npgtech)
+module_exit(exit_rc_map_npgtech)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-pctv-sedna.c b/drivers/media/IR/keymaps/rc-pctv-sedna.c
new file mode 100644
index 00000000000..4129bb44a25
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-pctv-sedna.c
@@ -0,0 +1,80 @@
+/* pctv-sedna.h - Keytable for pctv_sedna Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Mapping for the 28 key remote control as seen at
+ http://www.sednacomputer.com/photo/cardbus-tv.jpg
+ Pavel Mihaylov <bin@bash.info>
+ Also for the remote bundled with Kozumi KTV-01C card */
+
+static struct ir_scancode pctv_sedna[] = {
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ { 0x0a, KEY_AGAIN }, /* Recall */
+ { 0x0b, KEY_CHANNELUP },
+ { 0x0c, KEY_VOLUMEUP },
+ { 0x0d, KEY_MODE }, /* Stereo */
+ { 0x0e, KEY_STOP },
+ { 0x0f, KEY_PREVIOUSSONG },
+ { 0x10, KEY_ZOOM },
+ { 0x11, KEY_TUNER }, /* Source */
+ { 0x12, KEY_POWER },
+ { 0x13, KEY_MUTE },
+ { 0x15, KEY_CHANNELDOWN },
+ { 0x18, KEY_VOLUMEDOWN },
+ { 0x19, KEY_CAMERA }, /* Snapshot */
+ { 0x1a, KEY_NEXTSONG },
+ { 0x1b, KEY_TIME }, /* Time Shift */
+ { 0x1c, KEY_RADIO }, /* FM Radio */
+ { 0x1d, KEY_RECORD },
+ { 0x1e, KEY_PAUSE },
+ /* additional codes for Kozumi's remote */
+ { 0x14, KEY_INFO }, /* OSD */
+ { 0x16, KEY_OK }, /* OK */
+ { 0x17, KEY_DIGITS }, /* Plus */
+ { 0x1f, KEY_PLAY }, /* Play */
+};
+
+static struct rc_keymap pctv_sedna_map = {
+ .map = {
+ .scan = pctv_sedna,
+ .size = ARRAY_SIZE(pctv_sedna),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_PCTV_SEDNA,
+ }
+};
+
+static int __init init_rc_map_pctv_sedna(void)
+{
+ return ir_register_map(&pctv_sedna_map);
+}
+
+static void __exit exit_rc_map_pctv_sedna(void)
+{
+ ir_unregister_map(&pctv_sedna_map);
+}
+
+module_init(init_rc_map_pctv_sedna)
+module_exit(exit_rc_map_pctv_sedna)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-pinnacle-color.c b/drivers/media/IR/keymaps/rc-pinnacle-color.c
new file mode 100644
index 00000000000..326e023ce12
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-pinnacle-color.c
@@ -0,0 +1,94 @@
+/* pinnacle-color.h - Keytable for pinnacle_color Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode pinnacle_color[] = {
+ { 0x59, KEY_MUTE },
+ { 0x4a, KEY_POWER },
+
+ { 0x18, KEY_TEXT },
+ { 0x26, KEY_TV },
+ { 0x3d, KEY_PRINT },
+
+ { 0x48, KEY_RED },
+ { 0x04, KEY_GREEN },
+ { 0x11, KEY_YELLOW },
+ { 0x00, KEY_BLUE },
+
+ { 0x2d, KEY_VOLUMEUP },
+ { 0x1e, KEY_VOLUMEDOWN },
+
+ { 0x49, KEY_MENU },
+
+ { 0x16, KEY_CHANNELUP },
+ { 0x17, KEY_CHANNELDOWN },
+
+ { 0x20, KEY_UP },
+ { 0x21, KEY_DOWN },
+ { 0x22, KEY_LEFT },
+ { 0x23, KEY_RIGHT },
+ { 0x0d, KEY_SELECT },
+
+ { 0x08, KEY_BACK },
+ { 0x07, KEY_REFRESH },
+
+ { 0x2f, KEY_ZOOM },
+ { 0x29, KEY_RECORD },
+
+ { 0x4b, KEY_PAUSE },
+ { 0x4d, KEY_REWIND },
+ { 0x2e, KEY_PLAY },
+ { 0x4e, KEY_FORWARD },
+ { 0x53, KEY_PREVIOUS },
+ { 0x4c, KEY_STOP },
+ { 0x54, KEY_NEXT },
+
+ { 0x69, KEY_0 },
+ { 0x6a, KEY_1 },
+ { 0x6b, KEY_2 },
+ { 0x6c, KEY_3 },
+ { 0x6d, KEY_4 },
+ { 0x6e, KEY_5 },
+ { 0x6f, KEY_6 },
+ { 0x70, KEY_7 },
+ { 0x71, KEY_8 },
+ { 0x72, KEY_9 },
+
+ { 0x74, KEY_CHANNEL },
+ { 0x0a, KEY_BACKSPACE },
+};
+
+static struct rc_keymap pinnacle_color_map = {
+ .map = {
+ .scan = pinnacle_color,
+ .size = ARRAY_SIZE(pinnacle_color),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_PINNACLE_COLOR,
+ }
+};
+
+static int __init init_rc_map_pinnacle_color(void)
+{
+ return ir_register_map(&pinnacle_color_map);
+}
+
+static void __exit exit_rc_map_pinnacle_color(void)
+{
+ ir_unregister_map(&pinnacle_color_map);
+}
+
+module_init(init_rc_map_pinnacle_color)
+module_exit(exit_rc_map_pinnacle_color)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-pinnacle-grey.c b/drivers/media/IR/keymaps/rc-pinnacle-grey.c
new file mode 100644
index 00000000000..14cb772515c
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-pinnacle-grey.c
@@ -0,0 +1,89 @@
+/* pinnacle-grey.h - Keytable for pinnacle_grey Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode pinnacle_grey[] = {
+ { 0x3a, KEY_0 },
+ { 0x31, KEY_1 },
+ { 0x32, KEY_2 },
+ { 0x33, KEY_3 },
+ { 0x34, KEY_4 },
+ { 0x35, KEY_5 },
+ { 0x36, KEY_6 },
+ { 0x37, KEY_7 },
+ { 0x38, KEY_8 },
+ { 0x39, KEY_9 },
+
+ { 0x2f, KEY_POWER },
+
+ { 0x2e, KEY_P },
+ { 0x1f, KEY_L },
+ { 0x2b, KEY_I },
+
+ { 0x2d, KEY_SCREEN },
+ { 0x1e, KEY_ZOOM },
+ { 0x1b, KEY_VOLUMEUP },
+ { 0x0f, KEY_VOLUMEDOWN },
+ { 0x17, KEY_CHANNELUP },
+ { 0x1c, KEY_CHANNELDOWN },
+ { 0x25, KEY_INFO },
+
+ { 0x3c, KEY_MUTE },
+
+ { 0x3d, KEY_LEFT },
+ { 0x3b, KEY_RIGHT },
+
+ { 0x3f, KEY_UP },
+ { 0x3e, KEY_DOWN },
+ { 0x1a, KEY_ENTER },
+
+ { 0x1d, KEY_MENU },
+ { 0x19, KEY_AGAIN },
+ { 0x16, KEY_PREVIOUSSONG },
+ { 0x13, KEY_NEXTSONG },
+ { 0x15, KEY_PAUSE },
+ { 0x0e, KEY_REWIND },
+ { 0x0d, KEY_PLAY },
+ { 0x0b, KEY_STOP },
+ { 0x07, KEY_FORWARD },
+ { 0x27, KEY_RECORD },
+ { 0x26, KEY_TUNER },
+ { 0x29, KEY_TEXT },
+ { 0x2a, KEY_MEDIA },
+ { 0x18, KEY_EPG },
+};
+
+static struct rc_keymap pinnacle_grey_map = {
+ .map = {
+ .scan = pinnacle_grey,
+ .size = ARRAY_SIZE(pinnacle_grey),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_PINNACLE_GREY,
+ }
+};
+
+static int __init init_rc_map_pinnacle_grey(void)
+{
+ return ir_register_map(&pinnacle_grey_map);
+}
+
+static void __exit exit_rc_map_pinnacle_grey(void)
+{
+ ir_unregister_map(&pinnacle_grey_map);
+}
+
+module_init(init_rc_map_pinnacle_grey)
+module_exit(exit_rc_map_pinnacle_grey)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-pinnacle-pctv-hd.c b/drivers/media/IR/keymaps/rc-pinnacle-pctv-hd.c
new file mode 100644
index 00000000000..835bf4ef8de
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-pinnacle-pctv-hd.c
@@ -0,0 +1,73 @@
+/* pinnacle-pctv-hd.h - Keytable for pinnacle_pctv_hd Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Pinnacle PCTV HD 800i mini remote */
+
+static struct ir_scancode pinnacle_pctv_hd[] = {
+
+ { 0x0f, KEY_1 },
+ { 0x15, KEY_2 },
+ { 0x10, KEY_3 },
+ { 0x18, KEY_4 },
+ { 0x1b, KEY_5 },
+ { 0x1e, KEY_6 },
+ { 0x11, KEY_7 },
+ { 0x21, KEY_8 },
+ { 0x12, KEY_9 },
+ { 0x27, KEY_0 },
+
+ { 0x24, KEY_ZOOM },
+ { 0x2a, KEY_SUBTITLE },
+
+ { 0x00, KEY_MUTE },
+ { 0x01, KEY_ENTER }, /* Pinnacle Logo */
+ { 0x39, KEY_POWER },
+
+ { 0x03, KEY_VOLUMEUP },
+ { 0x09, KEY_VOLUMEDOWN },
+ { 0x06, KEY_CHANNELUP },
+ { 0x0c, KEY_CHANNELDOWN },
+
+ { 0x2d, KEY_REWIND },
+ { 0x30, KEY_PLAYPAUSE },
+ { 0x33, KEY_FASTFORWARD },
+ { 0x3c, KEY_STOP },
+ { 0x36, KEY_RECORD },
+ { 0x3f, KEY_EPG }, /* Labeled "?" */
+};
+
+static struct rc_keymap pinnacle_pctv_hd_map = {
+ .map = {
+ .scan = pinnacle_pctv_hd,
+ .size = ARRAY_SIZE(pinnacle_pctv_hd),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_PINNACLE_PCTV_HD,
+ }
+};
+
+static int __init init_rc_map_pinnacle_pctv_hd(void)
+{
+ return ir_register_map(&pinnacle_pctv_hd_map);
+}
+
+static void __exit exit_rc_map_pinnacle_pctv_hd(void)
+{
+ ir_unregister_map(&pinnacle_pctv_hd_map);
+}
+
+module_init(init_rc_map_pinnacle_pctv_hd)
+module_exit(exit_rc_map_pinnacle_pctv_hd)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-pixelview-mk12.c b/drivers/media/IR/keymaps/rc-pixelview-mk12.c
new file mode 100644
index 00000000000..5a735d569a8
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-pixelview-mk12.c
@@ -0,0 +1,83 @@
+/* rc-pixelview-mk12.h - Keytable for pixelview Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ * Keytable for MK-F12 IR remote provided together with Pixelview
+ * Ultra Pro Remote Controller. Uses NEC extended format.
+ */
+static struct ir_scancode pixelview_mk12[] = {
+ { 0x866b03, KEY_TUNER }, /* Timeshift */
+ { 0x866b1e, KEY_POWER2 }, /* power */
+
+ { 0x866b01, KEY_1 },
+ { 0x866b0b, KEY_2 },
+ { 0x866b1b, KEY_3 },
+ { 0x866b05, KEY_4 },
+ { 0x866b09, KEY_5 },
+ { 0x866b15, KEY_6 },
+ { 0x866b06, KEY_7 },
+ { 0x866b0a, KEY_8 },
+ { 0x866b12, KEY_9 },
+ { 0x866b02, KEY_0 },
+
+ { 0x866b13, KEY_AGAIN }, /* loop */
+ { 0x866b10, KEY_DIGITS }, /* +100 */
+
+ { 0x866b00, KEY_MEDIA }, /* source */
+ { 0x866b18, KEY_MUTE }, /* mute */
+ { 0x866b19, KEY_CAMERA }, /* snapshot */
+ { 0x866b1a, KEY_SEARCH }, /* scan */
+
+ { 0x866b16, KEY_CHANNELUP }, /* chn + */
+ { 0x866b14, KEY_CHANNELDOWN }, /* chn - */
+ { 0x866b1f, KEY_VOLUMEUP }, /* vol + */
+ { 0x866b17, KEY_VOLUMEDOWN }, /* vol - */
+ { 0x866b1c, KEY_ZOOM }, /* zoom */
+
+ { 0x866b04, KEY_REWIND },
+ { 0x866b0e, KEY_RECORD },
+ { 0x866b0c, KEY_FORWARD },
+
+ { 0x866b1d, KEY_STOP },
+ { 0x866b08, KEY_PLAY },
+ { 0x866b0f, KEY_PAUSE },
+
+ { 0x866b0d, KEY_TV },
+ { 0x866b07, KEY_RADIO }, /* FM */
+};
+
+static struct rc_keymap pixelview_map = {
+ .map = {
+ .scan = pixelview_mk12,
+ .size = ARRAY_SIZE(pixelview_mk12),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_PIXELVIEW_MK12,
+ }
+};
+
+static int __init init_rc_map_pixelview(void)
+{
+ return ir_register_map(&pixelview_map);
+}
+
+static void __exit exit_rc_map_pixelview(void)
+{
+ ir_unregister_map(&pixelview_map);
+}
+
+module_init(init_rc_map_pixelview)
+module_exit(exit_rc_map_pixelview)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-pixelview-new.c b/drivers/media/IR/keymaps/rc-pixelview-new.c
new file mode 100644
index 00000000000..7bbbbf5735e
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-pixelview-new.c
@@ -0,0 +1,83 @@
+/* pixelview-new.h - Keytable for pixelview_new Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ Mauro Carvalho Chehab <mchehab@infradead.org>
+ present on PV MPEG 8000GT
+ */
+
+static struct ir_scancode pixelview_new[] = {
+ { 0x3c, KEY_TIME }, /* Timeshift */
+ { 0x12, KEY_POWER },
+
+ { 0x3d, KEY_1 },
+ { 0x38, KEY_2 },
+ { 0x18, KEY_3 },
+ { 0x35, KEY_4 },
+ { 0x39, KEY_5 },
+ { 0x15, KEY_6 },
+ { 0x36, KEY_7 },
+ { 0x3a, KEY_8 },
+ { 0x1e, KEY_9 },
+ { 0x3e, KEY_0 },
+
+ { 0x1c, KEY_AGAIN }, /* LOOP */
+ { 0x3f, KEY_MEDIA }, /* Source */
+ { 0x1f, KEY_LAST }, /* +100 */
+ { 0x1b, KEY_MUTE },
+
+ { 0x17, KEY_CHANNELDOWN },
+ { 0x16, KEY_CHANNELUP },
+ { 0x10, KEY_VOLUMEUP },
+ { 0x14, KEY_VOLUMEDOWN },
+ { 0x13, KEY_ZOOM },
+
+ { 0x19, KEY_CAMERA }, /* SNAPSHOT */
+ { 0x1a, KEY_SEARCH }, /* scan */
+
+ { 0x37, KEY_REWIND }, /* << */
+ { 0x32, KEY_RECORD }, /* o (red) */
+ { 0x33, KEY_FORWARD }, /* >> */
+ { 0x11, KEY_STOP }, /* square */
+ { 0x3b, KEY_PLAY }, /* > */
+ { 0x30, KEY_PLAYPAUSE }, /* || */
+
+ { 0x31, KEY_TV },
+ { 0x34, KEY_RADIO },
+};
+
+static struct rc_keymap pixelview_new_map = {
+ .map = {
+ .scan = pixelview_new,
+ .size = ARRAY_SIZE(pixelview_new),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_PIXELVIEW_NEW,
+ }
+};
+
+static int __init init_rc_map_pixelview_new(void)
+{
+ return ir_register_map(&pixelview_new_map);
+}
+
+static void __exit exit_rc_map_pixelview_new(void)
+{
+ ir_unregister_map(&pixelview_new_map);
+}
+
+module_init(init_rc_map_pixelview_new)
+module_exit(exit_rc_map_pixelview_new)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-pixelview.c b/drivers/media/IR/keymaps/rc-pixelview.c
new file mode 100644
index 00000000000..82ff12e182a
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-pixelview.c
@@ -0,0 +1,82 @@
+/* pixelview.h - Keytable for pixelview Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode pixelview[] = {
+
+ { 0x1e, KEY_POWER }, /* power */
+ { 0x07, KEY_MEDIA }, /* source */
+ { 0x1c, KEY_SEARCH }, /* scan */
+
+
+ { 0x03, KEY_TUNER }, /* TV/FM */
+
+ { 0x00, KEY_RECORD },
+ { 0x08, KEY_STOP },
+ { 0x11, KEY_PLAY },
+
+ { 0x1a, KEY_PLAYPAUSE }, /* freeze */
+ { 0x19, KEY_ZOOM }, /* zoom */
+ { 0x0f, KEY_TEXT }, /* min */
+
+ { 0x01, KEY_1 },
+ { 0x0b, KEY_2 },
+ { 0x1b, KEY_3 },
+ { 0x05, KEY_4 },
+ { 0x09, KEY_5 },
+ { 0x15, KEY_6 },
+ { 0x06, KEY_7 },
+ { 0x0a, KEY_8 },
+ { 0x12, KEY_9 },
+ { 0x02, KEY_0 },
+ { 0x10, KEY_LAST }, /* +100 */
+ { 0x13, KEY_LIST }, /* recall */
+
+ { 0x1f, KEY_CHANNELUP }, /* chn down */
+ { 0x17, KEY_CHANNELDOWN }, /* chn up */
+ { 0x16, KEY_VOLUMEUP }, /* vol down */
+ { 0x14, KEY_VOLUMEDOWN }, /* vol up */
+
+ { 0x04, KEY_KPMINUS }, /* <<< */
+ { 0x0e, KEY_SETUP }, /* function */
+ { 0x0c, KEY_KPPLUS }, /* >>> */
+
+ { 0x0d, KEY_GOTO }, /* mts */
+ { 0x1d, KEY_REFRESH }, /* reset */
+ { 0x18, KEY_MUTE }, /* mute/unmute */
+};
+
+static struct rc_keymap pixelview_map = {
+ .map = {
+ .scan = pixelview,
+ .size = ARRAY_SIZE(pixelview),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_PIXELVIEW,
+ }
+};
+
+static int __init init_rc_map_pixelview(void)
+{
+ return ir_register_map(&pixelview_map);
+}
+
+static void __exit exit_rc_map_pixelview(void)
+{
+ ir_unregister_map(&pixelview_map);
+}
+
+module_init(init_rc_map_pixelview)
+module_exit(exit_rc_map_pixelview)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-powercolor-real-angel.c b/drivers/media/IR/keymaps/rc-powercolor-real-angel.c
new file mode 100644
index 00000000000..7cef8190a22
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-powercolor-real-angel.c
@@ -0,0 +1,81 @@
+/* powercolor-real-angel.h - Keytable for powercolor_real_angel Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ * Remote control for Powercolor Real Angel 330
+ * Daniel Fraga <fragabr@gmail.com>
+ */
+
+static struct ir_scancode powercolor_real_angel[] = {
+ { 0x38, KEY_SWITCHVIDEOMODE }, /* switch inputs */
+ { 0x0c, KEY_MEDIA }, /* Turn ON/OFF App */
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+ { 0x0a, KEY_DIGITS }, /* single, double, tripple digit */
+ { 0x29, KEY_PREVIOUS }, /* previous channel */
+ { 0x12, KEY_BRIGHTNESSUP },
+ { 0x13, KEY_BRIGHTNESSDOWN },
+ { 0x2b, KEY_MODE }, /* stereo/mono */
+ { 0x2c, KEY_TEXT }, /* teletext */
+ { 0x20, KEY_CHANNELUP }, /* channel up */
+ { 0x21, KEY_CHANNELDOWN }, /* channel down */
+ { 0x10, KEY_VOLUMEUP }, /* volume up */
+ { 0x11, KEY_VOLUMEDOWN }, /* volume down */
+ { 0x0d, KEY_MUTE },
+ { 0x1f, KEY_RECORD },
+ { 0x17, KEY_PLAY },
+ { 0x16, KEY_PAUSE },
+ { 0x0b, KEY_STOP },
+ { 0x27, KEY_FASTFORWARD },
+ { 0x26, KEY_REWIND },
+ { 0x1e, KEY_SEARCH }, /* autoscan */
+ { 0x0e, KEY_CAMERA }, /* snapshot */
+ { 0x2d, KEY_SETUP },
+ { 0x0f, KEY_SCREEN }, /* full screen */
+ { 0x14, KEY_RADIO }, /* FM radio */
+ { 0x25, KEY_POWER }, /* power */
+};
+
+static struct rc_keymap powercolor_real_angel_map = {
+ .map = {
+ .scan = powercolor_real_angel,
+ .size = ARRAY_SIZE(powercolor_real_angel),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_POWERCOLOR_REAL_ANGEL,
+ }
+};
+
+static int __init init_rc_map_powercolor_real_angel(void)
+{
+ return ir_register_map(&powercolor_real_angel_map);
+}
+
+static void __exit exit_rc_map_powercolor_real_angel(void)
+{
+ ir_unregister_map(&powercolor_real_angel_map);
+}
+
+module_init(init_rc_map_powercolor_real_angel)
+module_exit(exit_rc_map_powercolor_real_angel)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-proteus-2309.c b/drivers/media/IR/keymaps/rc-proteus-2309.c
new file mode 100644
index 00000000000..22e92d39dee
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-proteus-2309.c
@@ -0,0 +1,69 @@
+/* proteus-2309.h - Keytable for proteus_2309 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Michal Majchrowicz <mmajchrowicz@gmail.com> */
+
+static struct ir_scancode proteus_2309[] = {
+ /* numeric */
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ { 0x5c, KEY_POWER }, /* power */
+ { 0x20, KEY_ZOOM }, /* full screen */
+ { 0x0f, KEY_BACKSPACE }, /* recall */
+ { 0x1b, KEY_ENTER }, /* mute */
+ { 0x41, KEY_RECORD }, /* record */
+ { 0x43, KEY_STOP }, /* stop */
+ { 0x16, KEY_S },
+ { 0x1a, KEY_POWER2 }, /* off */
+ { 0x2e, KEY_RED },
+ { 0x1f, KEY_CHANNELDOWN }, /* channel - */
+ { 0x1c, KEY_CHANNELUP }, /* channel + */
+ { 0x10, KEY_VOLUMEDOWN }, /* volume - */
+ { 0x1e, KEY_VOLUMEUP }, /* volume + */
+ { 0x14, KEY_F1 },
+};
+
+static struct rc_keymap proteus_2309_map = {
+ .map = {
+ .scan = proteus_2309,
+ .size = ARRAY_SIZE(proteus_2309),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_PROTEUS_2309,
+ }
+};
+
+static int __init init_rc_map_proteus_2309(void)
+{
+ return ir_register_map(&proteus_2309_map);
+}
+
+static void __exit exit_rc_map_proteus_2309(void)
+{
+ ir_unregister_map(&proteus_2309_map);
+}
+
+module_init(init_rc_map_proteus_2309)
+module_exit(exit_rc_map_proteus_2309)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-purpletv.c b/drivers/media/IR/keymaps/rc-purpletv.c
new file mode 100644
index 00000000000..4e20fc2269f
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-purpletv.c
@@ -0,0 +1,81 @@
+/* purpletv.h - Keytable for purpletv Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode purpletv[] = {
+ { 0x03, KEY_POWER },
+ { 0x6f, KEY_MUTE },
+ { 0x10, KEY_BACKSPACE }, /* Recall */
+
+ { 0x11, KEY_0 },
+ { 0x04, KEY_1 },
+ { 0x05, KEY_2 },
+ { 0x06, KEY_3 },
+ { 0x08, KEY_4 },
+ { 0x09, KEY_5 },
+ { 0x0a, KEY_6 },
+ { 0x0c, KEY_7 },
+ { 0x0d, KEY_8 },
+ { 0x0e, KEY_9 },
+ { 0x12, KEY_DOT }, /* 100+ */
+
+ { 0x07, KEY_VOLUMEUP },
+ { 0x0b, KEY_VOLUMEDOWN },
+ { 0x1a, KEY_KPPLUS },
+ { 0x18, KEY_KPMINUS },
+ { 0x15, KEY_UP },
+ { 0x1d, KEY_DOWN },
+ { 0x0f, KEY_CHANNELUP },
+ { 0x13, KEY_CHANNELDOWN },
+ { 0x48, KEY_ZOOM },
+
+ { 0x1b, KEY_VIDEO }, /* Video source */
+ { 0x1f, KEY_CAMERA }, /* Snapshot */
+ { 0x49, KEY_LANGUAGE }, /* MTS Select */
+ { 0x19, KEY_SEARCH }, /* Auto Scan */
+
+ { 0x4b, KEY_RECORD },
+ { 0x46, KEY_PLAY },
+ { 0x45, KEY_PAUSE }, /* Pause */
+ { 0x44, KEY_STOP },
+ { 0x43, KEY_TIME }, /* Time Shift */
+ { 0x17, KEY_CHANNEL }, /* SURF CH */
+ { 0x40, KEY_FORWARD }, /* Forward ? */
+ { 0x42, KEY_REWIND }, /* Backward ? */
+
+};
+
+static struct rc_keymap purpletv_map = {
+ .map = {
+ .scan = purpletv,
+ .size = ARRAY_SIZE(purpletv),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_PURPLETV,
+ }
+};
+
+static int __init init_rc_map_purpletv(void)
+{
+ return ir_register_map(&purpletv_map);
+}
+
+static void __exit exit_rc_map_purpletv(void)
+{
+ ir_unregister_map(&purpletv_map);
+}
+
+module_init(init_rc_map_purpletv)
+module_exit(exit_rc_map_purpletv)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-pv951.c b/drivers/media/IR/keymaps/rc-pv951.c
new file mode 100644
index 00000000000..36679e706cf
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-pv951.c
@@ -0,0 +1,78 @@
+/* pv951.h - Keytable for pv951 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Mark Phalan <phalanm@o2.ie> */
+
+static struct ir_scancode pv951[] = {
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ { 0x12, KEY_POWER },
+ { 0x10, KEY_MUTE },
+ { 0x1f, KEY_VOLUMEDOWN },
+ { 0x1b, KEY_VOLUMEUP },
+ { 0x1a, KEY_CHANNELUP },
+ { 0x1e, KEY_CHANNELDOWN },
+ { 0x0e, KEY_PAGEUP },
+ { 0x1d, KEY_PAGEDOWN },
+ { 0x13, KEY_SOUND },
+
+ { 0x18, KEY_KPPLUSMINUS }, /* CH +/- */
+ { 0x16, KEY_SUBTITLE }, /* CC */
+ { 0x0d, KEY_TEXT }, /* TTX */
+ { 0x0b, KEY_TV }, /* AIR/CBL */
+ { 0x11, KEY_PC }, /* PC/TV */
+ { 0x17, KEY_OK }, /* CH RTN */
+ { 0x19, KEY_MODE }, /* FUNC */
+ { 0x0c, KEY_SEARCH }, /* AUTOSCAN */
+
+ /* Not sure what to do with these ones! */
+ { 0x0f, KEY_SELECT }, /* SOURCE */
+ { 0x0a, KEY_KPPLUS }, /* +100 */
+ { 0x14, KEY_EQUAL }, /* SYNC */
+ { 0x1c, KEY_MEDIA }, /* PC/TV */
+};
+
+static struct rc_keymap pv951_map = {
+ .map = {
+ .scan = pv951,
+ .size = ARRAY_SIZE(pv951),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_PV951,
+ }
+};
+
+static int __init init_rc_map_pv951(void)
+{
+ return ir_register_map(&pv951_map);
+}
+
+static void __exit exit_rc_map_pv951(void)
+{
+ ir_unregister_map(&pv951_map);
+}
+
+module_init(init_rc_map_pv951)
+module_exit(exit_rc_map_pv951)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-rc5-hauppauge-new.c b/drivers/media/IR/keymaps/rc-rc5-hauppauge-new.c
new file mode 100644
index 00000000000..cc6b8f54874
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-rc5-hauppauge-new.c
@@ -0,0 +1,103 @@
+/* rc5-hauppauge-new.h - Keytable for rc5_hauppauge_new Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ * Hauppauge:the newer, gray remotes (seems there are multiple
+ * slightly different versions), shipped with cx88+ivtv cards.
+ *
+ * This table contains the complete RC5 code, instead of just the data part
+ */
+
+static struct ir_scancode rc5_hauppauge_new[] = {
+ /* Keys 0 to 9 */
+ { 0x1e00, KEY_0 },
+ { 0x1e01, KEY_1 },
+ { 0x1e02, KEY_2 },
+ { 0x1e03, KEY_3 },
+ { 0x1e04, KEY_4 },
+ { 0x1e05, KEY_5 },
+ { 0x1e06, KEY_6 },
+ { 0x1e07, KEY_7 },
+ { 0x1e08, KEY_8 },
+ { 0x1e09, KEY_9 },
+
+ { 0x1e0a, KEY_TEXT }, /* keypad asterisk as well */
+ { 0x1e0b, KEY_RED }, /* red button */
+ { 0x1e0c, KEY_RADIO },
+ { 0x1e0d, KEY_MENU },
+ { 0x1e0e, KEY_SUBTITLE }, /* also the # key */
+ { 0x1e0f, KEY_MUTE },
+ { 0x1e10, KEY_VOLUMEUP },
+ { 0x1e11, KEY_VOLUMEDOWN },
+ { 0x1e12, KEY_PREVIOUS }, /* previous channel */
+ { 0x1e14, KEY_UP },
+ { 0x1e15, KEY_DOWN },
+ { 0x1e16, KEY_LEFT },
+ { 0x1e17, KEY_RIGHT },
+ { 0x1e18, KEY_VIDEO }, /* Videos */
+ { 0x1e19, KEY_AUDIO }, /* Music */
+ /* 0x1e1a: Pictures - presume this means
+ "Multimedia Home Platform" -
+ no "PICTURES" key in input.h
+ */
+ { 0x1e1a, KEY_MHP },
+
+ { 0x1e1b, KEY_EPG }, /* Guide */
+ { 0x1e1c, KEY_TV },
+ { 0x1e1e, KEY_NEXTSONG }, /* skip >| */
+ { 0x1e1f, KEY_EXIT }, /* back/exit */
+ { 0x1e20, KEY_CHANNELUP }, /* channel / program + */
+ { 0x1e21, KEY_CHANNELDOWN }, /* channel / program - */
+ { 0x1e22, KEY_CHANNEL }, /* source (old black remote) */
+ { 0x1e24, KEY_PREVIOUSSONG }, /* replay |< */
+ { 0x1e25, KEY_ENTER }, /* OK */
+ { 0x1e26, KEY_SLEEP }, /* minimize (old black remote) */
+ { 0x1e29, KEY_BLUE }, /* blue key */
+ { 0x1e2e, KEY_GREEN }, /* green button */
+ { 0x1e30, KEY_PAUSE }, /* pause */
+ { 0x1e32, KEY_REWIND }, /* backward << */
+ { 0x1e34, KEY_FASTFORWARD }, /* forward >> */
+ { 0x1e35, KEY_PLAY },
+ { 0x1e36, KEY_STOP },
+ { 0x1e37, KEY_RECORD }, /* recording */
+ { 0x1e38, KEY_YELLOW }, /* yellow key */
+ { 0x1e3b, KEY_SELECT }, /* top right button */
+ { 0x1e3c, KEY_ZOOM }, /* full */
+ { 0x1e3d, KEY_POWER }, /* system power (green button) */
+};
+
+static struct rc_keymap rc5_hauppauge_new_map = {
+ .map = {
+ .scan = rc5_hauppauge_new,
+ .size = ARRAY_SIZE(rc5_hauppauge_new),
+ .ir_type = IR_TYPE_RC5,
+ .name = RC_MAP_RC5_HAUPPAUGE_NEW,
+ }
+};
+
+static int __init init_rc_map_rc5_hauppauge_new(void)
+{
+ return ir_register_map(&rc5_hauppauge_new_map);
+}
+
+static void __exit exit_rc_map_rc5_hauppauge_new(void)
+{
+ ir_unregister_map(&rc5_hauppauge_new_map);
+}
+
+module_init(init_rc_map_rc5_hauppauge_new)
+module_exit(exit_rc_map_rc5_hauppauge_new)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-rc5-tv.c b/drivers/media/IR/keymaps/rc-rc5-tv.c
new file mode 100644
index 00000000000..73cce2f8ddf
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-rc5-tv.c
@@ -0,0 +1,81 @@
+/* rc5-tv.h - Keytable for rc5_tv Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* generic RC5 keytable */
+/* see http://users.pandora.be/nenya/electronics/rc5/codes00.htm */
+/* used by old (black) Hauppauge remotes */
+
+static struct ir_scancode rc5_tv[] = {
+ /* Keys 0 to 9 */
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ { 0x0b, KEY_CHANNEL }, /* channel / program (japan: 11) */
+ { 0x0c, KEY_POWER }, /* standby */
+ { 0x0d, KEY_MUTE }, /* mute / demute */
+ { 0x0f, KEY_TV }, /* display */
+ { 0x10, KEY_VOLUMEUP },
+ { 0x11, KEY_VOLUMEDOWN },
+ { 0x12, KEY_BRIGHTNESSUP },
+ { 0x13, KEY_BRIGHTNESSDOWN },
+ { 0x1e, KEY_SEARCH }, /* search + */
+ { 0x20, KEY_CHANNELUP }, /* channel / program + */
+ { 0x21, KEY_CHANNELDOWN }, /* channel / program - */
+ { 0x22, KEY_CHANNEL }, /* alt / channel */
+ { 0x23, KEY_LANGUAGE }, /* 1st / 2nd language */
+ { 0x26, KEY_SLEEP }, /* sleeptimer */
+ { 0x2e, KEY_MENU }, /* 2nd controls (USA: menu) */
+ { 0x30, KEY_PAUSE },
+ { 0x32, KEY_REWIND },
+ { 0x33, KEY_GOTO },
+ { 0x35, KEY_PLAY },
+ { 0x36, KEY_STOP },
+ { 0x37, KEY_RECORD }, /* recording */
+ { 0x3c, KEY_TEXT }, /* teletext submode (Japan: 12) */
+ { 0x3d, KEY_SUSPEND }, /* system standby */
+
+};
+
+static struct rc_keymap rc5_tv_map = {
+ .map = {
+ .scan = rc5_tv,
+ .size = ARRAY_SIZE(rc5_tv),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_RC5_TV,
+ }
+};
+
+static int __init init_rc_map_rc5_tv(void)
+{
+ return ir_register_map(&rc5_tv_map);
+}
+
+static void __exit exit_rc_map_rc5_tv(void)
+{
+ ir_unregister_map(&rc5_tv_map);
+}
+
+module_init(init_rc_map_rc5_tv)
+module_exit(exit_rc_map_rc5_tv)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-real-audio-220-32-keys.c b/drivers/media/IR/keymaps/rc-real-audio-220-32-keys.c
new file mode 100644
index 00000000000..ab1a6d2baf7
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-real-audio-220-32-keys.c
@@ -0,0 +1,78 @@
+/* real-audio-220-32-keys.h - Keytable for real_audio_220_32_keys Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Zogis Real Audio 220 - 32 keys IR */
+
+static struct ir_scancode real_audio_220_32_keys[] = {
+ { 0x1c, KEY_RADIO},
+ { 0x12, KEY_POWER2},
+
+ { 0x01, KEY_1},
+ { 0x02, KEY_2},
+ { 0x03, KEY_3},
+ { 0x04, KEY_4},
+ { 0x05, KEY_5},
+ { 0x06, KEY_6},
+ { 0x07, KEY_7},
+ { 0x08, KEY_8},
+ { 0x09, KEY_9},
+ { 0x00, KEY_0},
+
+ { 0x0c, KEY_VOLUMEUP},
+ { 0x18, KEY_VOLUMEDOWN},
+ { 0x0b, KEY_CHANNELUP},
+ { 0x15, KEY_CHANNELDOWN},
+ { 0x16, KEY_ENTER},
+
+ { 0x11, KEY_LIST}, /* Source */
+ { 0x0d, KEY_AUDIO}, /* stereo */
+
+ { 0x0f, KEY_PREVIOUS}, /* Prev */
+ { 0x1b, KEY_TIME}, /* Timeshift */
+ { 0x1a, KEY_NEXT}, /* Next */
+
+ { 0x0e, KEY_STOP},
+ { 0x1f, KEY_PLAY},
+ { 0x1e, KEY_PLAYPAUSE}, /* Pause */
+
+ { 0x1d, KEY_RECORD},
+ { 0x13, KEY_MUTE},
+ { 0x19, KEY_CAMERA}, /* Snapshot */
+
+};
+
+static struct rc_keymap real_audio_220_32_keys_map = {
+ .map = {
+ .scan = real_audio_220_32_keys,
+ .size = ARRAY_SIZE(real_audio_220_32_keys),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_REAL_AUDIO_220_32_KEYS,
+ }
+};
+
+static int __init init_rc_map_real_audio_220_32_keys(void)
+{
+ return ir_register_map(&real_audio_220_32_keys_map);
+}
+
+static void __exit exit_rc_map_real_audio_220_32_keys(void)
+{
+ ir_unregister_map(&real_audio_220_32_keys_map);
+}
+
+module_init(init_rc_map_real_audio_220_32_keys)
+module_exit(exit_rc_map_real_audio_220_32_keys)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-tbs-nec.c b/drivers/media/IR/keymaps/rc-tbs-nec.c
new file mode 100644
index 00000000000..3309631e6f8
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-tbs-nec.c
@@ -0,0 +1,73 @@
+/* tbs-nec.h - Keytable for tbs_nec Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode tbs_nec[] = {
+ { 0x04, KEY_POWER2}, /*power*/
+ { 0x14, KEY_MUTE}, /*mute*/
+ { 0x07, KEY_1},
+ { 0x06, KEY_2},
+ { 0x05, KEY_3},
+ { 0x0b, KEY_4},
+ { 0x0a, KEY_5},
+ { 0x09, KEY_6},
+ { 0x0f, KEY_7},
+ { 0x0e, KEY_8},
+ { 0x0d, KEY_9},
+ { 0x12, KEY_0},
+ { 0x16, KEY_CHANNELUP}, /*ch+*/
+ { 0x11, KEY_CHANNELDOWN},/*ch-*/
+ { 0x13, KEY_VOLUMEUP}, /*vol+*/
+ { 0x0c, KEY_VOLUMEDOWN},/*vol-*/
+ { 0x03, KEY_RECORD}, /*rec*/
+ { 0x18, KEY_PAUSE}, /*pause*/
+ { 0x19, KEY_OK}, /*ok*/
+ { 0x1a, KEY_CAMERA}, /* snapshot */
+ { 0x01, KEY_UP},
+ { 0x10, KEY_LEFT},
+ { 0x02, KEY_RIGHT},
+ { 0x08, KEY_DOWN},
+ { 0x15, KEY_FAVORITES},
+ { 0x17, KEY_SUBTITLE},
+ { 0x1d, KEY_ZOOM},
+ { 0x1f, KEY_EXIT},
+ { 0x1e, KEY_MENU},
+ { 0x1c, KEY_EPG},
+ { 0x00, KEY_PREVIOUS},
+ { 0x1b, KEY_MODE},
+};
+
+static struct rc_keymap tbs_nec_map = {
+ .map = {
+ .scan = tbs_nec,
+ .size = ARRAY_SIZE(tbs_nec),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_TBS_NEC,
+ }
+};
+
+static int __init init_rc_map_tbs_nec(void)
+{
+ return ir_register_map(&tbs_nec_map);
+}
+
+static void __exit exit_rc_map_tbs_nec(void)
+{
+ ir_unregister_map(&tbs_nec_map);
+}
+
+module_init(init_rc_map_tbs_nec)
+module_exit(exit_rc_map_tbs_nec)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-terratec-cinergy-xs.c b/drivers/media/IR/keymaps/rc-terratec-cinergy-xs.c
new file mode 100644
index 00000000000..5326a0b444c
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-terratec-cinergy-xs.c
@@ -0,0 +1,92 @@
+/* terratec-cinergy-xs.h - Keytable for terratec_cinergy_xs Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Terratec Cinergy Hybrid T USB XS
+ Devin Heitmueller <dheitmueller@linuxtv.org>
+ */
+
+static struct ir_scancode terratec_cinergy_xs[] = {
+ { 0x41, KEY_HOME},
+ { 0x01, KEY_POWER},
+ { 0x42, KEY_MENU},
+ { 0x02, KEY_1},
+ { 0x03, KEY_2},
+ { 0x04, KEY_3},
+ { 0x43, KEY_SUBTITLE},
+ { 0x05, KEY_4},
+ { 0x06, KEY_5},
+ { 0x07, KEY_6},
+ { 0x44, KEY_TEXT},
+ { 0x08, KEY_7},
+ { 0x09, KEY_8},
+ { 0x0a, KEY_9},
+ { 0x45, KEY_DELETE},
+ { 0x0b, KEY_TUNER},
+ { 0x0c, KEY_0},
+ { 0x0d, KEY_MODE},
+ { 0x46, KEY_TV},
+ { 0x47, KEY_DVD},
+ { 0x49, KEY_VIDEO},
+ { 0x4b, KEY_AUX},
+ { 0x10, KEY_UP},
+ { 0x11, KEY_LEFT},
+ { 0x12, KEY_OK},
+ { 0x13, KEY_RIGHT},
+ { 0x14, KEY_DOWN},
+ { 0x0f, KEY_EPG},
+ { 0x16, KEY_INFO},
+ { 0x4d, KEY_BACKSPACE},
+ { 0x1c, KEY_VOLUMEUP},
+ { 0x4c, KEY_PLAY},
+ { 0x1b, KEY_CHANNELUP},
+ { 0x1e, KEY_VOLUMEDOWN},
+ { 0x1d, KEY_MUTE},
+ { 0x1f, KEY_CHANNELDOWN},
+ { 0x17, KEY_RED},
+ { 0x18, KEY_GREEN},
+ { 0x19, KEY_YELLOW},
+ { 0x1a, KEY_BLUE},
+ { 0x58, KEY_RECORD},
+ { 0x48, KEY_STOP},
+ { 0x40, KEY_PAUSE},
+ { 0x54, KEY_LAST},
+ { 0x4e, KEY_REWIND},
+ { 0x4f, KEY_FASTFORWARD},
+ { 0x5c, KEY_NEXT},
+};
+
+static struct rc_keymap terratec_cinergy_xs_map = {
+ .map = {
+ .scan = terratec_cinergy_xs,
+ .size = ARRAY_SIZE(terratec_cinergy_xs),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_TERRATEC_CINERGY_XS,
+ }
+};
+
+static int __init init_rc_map_terratec_cinergy_xs(void)
+{
+ return ir_register_map(&terratec_cinergy_xs_map);
+}
+
+static void __exit exit_rc_map_terratec_cinergy_xs(void)
+{
+ ir_unregister_map(&terratec_cinergy_xs_map);
+}
+
+module_init(init_rc_map_terratec_cinergy_xs)
+module_exit(exit_rc_map_terratec_cinergy_xs)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-tevii-nec.c b/drivers/media/IR/keymaps/rc-tevii-nec.c
new file mode 100644
index 00000000000..e30d411c07b
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-tevii-nec.c
@@ -0,0 +1,88 @@
+/* tevii-nec.h - Keytable for tevii_nec Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode tevii_nec[] = {
+ { 0x0a, KEY_POWER2},
+ { 0x0c, KEY_MUTE},
+ { 0x11, KEY_1},
+ { 0x12, KEY_2},
+ { 0x13, KEY_3},
+ { 0x14, KEY_4},
+ { 0x15, KEY_5},
+ { 0x16, KEY_6},
+ { 0x17, KEY_7},
+ { 0x18, KEY_8},
+ { 0x19, KEY_9},
+ { 0x10, KEY_0},
+ { 0x1c, KEY_MENU},
+ { 0x0f, KEY_VOLUMEDOWN},
+ { 0x1a, KEY_LAST},
+ { 0x0e, KEY_OPEN},
+ { 0x04, KEY_RECORD},
+ { 0x09, KEY_VOLUMEUP},
+ { 0x08, KEY_CHANNELUP},
+ { 0x07, KEY_PVR},
+ { 0x0b, KEY_TIME},
+ { 0x02, KEY_RIGHT},
+ { 0x03, KEY_LEFT},
+ { 0x00, KEY_UP},
+ { 0x1f, KEY_OK},
+ { 0x01, KEY_DOWN},
+ { 0x05, KEY_TUNER},
+ { 0x06, KEY_CHANNELDOWN},
+ { 0x40, KEY_PLAYPAUSE},
+ { 0x1e, KEY_REWIND},
+ { 0x1b, KEY_FAVORITES},
+ { 0x1d, KEY_BACK},
+ { 0x4d, KEY_FASTFORWARD},
+ { 0x44, KEY_EPG},
+ { 0x4c, KEY_INFO},
+ { 0x41, KEY_AB},
+ { 0x43, KEY_AUDIO},
+ { 0x45, KEY_SUBTITLE},
+ { 0x4a, KEY_LIST},
+ { 0x46, KEY_F1},
+ { 0x47, KEY_F2},
+ { 0x5e, KEY_F3},
+ { 0x5c, KEY_F4},
+ { 0x52, KEY_F5},
+ { 0x5a, KEY_F6},
+ { 0x56, KEY_MODE},
+ { 0x58, KEY_SWITCHVIDEOMODE},
+};
+
+static struct rc_keymap tevii_nec_map = {
+ .map = {
+ .scan = tevii_nec,
+ .size = ARRAY_SIZE(tevii_nec),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_TEVII_NEC,
+ }
+};
+
+static int __init init_rc_map_tevii_nec(void)
+{
+ return ir_register_map(&tevii_nec_map);
+}
+
+static void __exit exit_rc_map_tevii_nec(void)
+{
+ ir_unregister_map(&tevii_nec_map);
+}
+
+module_init(init_rc_map_tevii_nec)
+module_exit(exit_rc_map_tevii_nec)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-tt-1500.c b/drivers/media/IR/keymaps/rc-tt-1500.c
new file mode 100644
index 00000000000..bc88de011d5
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-tt-1500.c
@@ -0,0 +1,82 @@
+/* tt-1500.h - Keytable for tt_1500 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* for the Technotrend 1500 bundled remotes (grey and black): */
+
+static struct ir_scancode tt_1500[] = {
+ { 0x01, KEY_POWER },
+ { 0x02, KEY_SHUFFLE }, /* ? double-arrow key */
+ { 0x03, KEY_1 },
+ { 0x04, KEY_2 },
+ { 0x05, KEY_3 },
+ { 0x06, KEY_4 },
+ { 0x07, KEY_5 },
+ { 0x08, KEY_6 },
+ { 0x09, KEY_7 },
+ { 0x0a, KEY_8 },
+ { 0x0b, KEY_9 },
+ { 0x0c, KEY_0 },
+ { 0x0d, KEY_UP },
+ { 0x0e, KEY_LEFT },
+ { 0x0f, KEY_OK },
+ { 0x10, KEY_RIGHT },
+ { 0x11, KEY_DOWN },
+ { 0x12, KEY_INFO },
+ { 0x13, KEY_EXIT },
+ { 0x14, KEY_RED },
+ { 0x15, KEY_GREEN },
+ { 0x16, KEY_YELLOW },
+ { 0x17, KEY_BLUE },
+ { 0x18, KEY_MUTE },
+ { 0x19, KEY_TEXT },
+ { 0x1a, KEY_MODE }, /* ? TV/Radio */
+ { 0x21, KEY_OPTION },
+ { 0x22, KEY_EPG },
+ { 0x23, KEY_CHANNELUP },
+ { 0x24, KEY_CHANNELDOWN },
+ { 0x25, KEY_VOLUMEUP },
+ { 0x26, KEY_VOLUMEDOWN },
+ { 0x27, KEY_SETUP },
+ { 0x3a, KEY_RECORD }, /* these keys are only in the black remote */
+ { 0x3b, KEY_PLAY },
+ { 0x3c, KEY_STOP },
+ { 0x3d, KEY_REWIND },
+ { 0x3e, KEY_PAUSE },
+ { 0x3f, KEY_FORWARD },
+};
+
+static struct rc_keymap tt_1500_map = {
+ .map = {
+ .scan = tt_1500,
+ .size = ARRAY_SIZE(tt_1500),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_TT_1500,
+ }
+};
+
+static int __init init_rc_map_tt_1500(void)
+{
+ return ir_register_map(&tt_1500_map);
+}
+
+static void __exit exit_rc_map_tt_1500(void)
+{
+ ir_unregister_map(&tt_1500_map);
+}
+
+module_init(init_rc_map_tt_1500)
+module_exit(exit_rc_map_tt_1500)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-videomate-s350.c b/drivers/media/IR/keymaps/rc-videomate-s350.c
new file mode 100644
index 00000000000..4df7fcd1d2f
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-videomate-s350.c
@@ -0,0 +1,85 @@
+/* videomate-s350.h - Keytable for videomate_s350 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode videomate_s350[] = {
+ { 0x00, KEY_TV},
+ { 0x01, KEY_DVD},
+ { 0x04, KEY_RECORD},
+ { 0x05, KEY_VIDEO}, /* TV/Video */
+ { 0x07, KEY_STOP},
+ { 0x08, KEY_PLAYPAUSE},
+ { 0x0a, KEY_REWIND},
+ { 0x0f, KEY_FASTFORWARD},
+ { 0x10, KEY_CHANNELUP},
+ { 0x12, KEY_VOLUMEUP},
+ { 0x13, KEY_CHANNELDOWN},
+ { 0x14, KEY_MUTE},
+ { 0x15, KEY_VOLUMEDOWN},
+ { 0x16, KEY_1},
+ { 0x17, KEY_2},
+ { 0x18, KEY_3},
+ { 0x19, KEY_4},
+ { 0x1a, KEY_5},
+ { 0x1b, KEY_6},
+ { 0x1c, KEY_7},
+ { 0x1d, KEY_8},
+ { 0x1e, KEY_9},
+ { 0x1f, KEY_0},
+ { 0x21, KEY_SLEEP},
+ { 0x24, KEY_ZOOM},
+ { 0x25, KEY_LAST}, /* Recall */
+ { 0x26, KEY_SUBTITLE}, /* CC */
+ { 0x27, KEY_LANGUAGE}, /* MTS */
+ { 0x29, KEY_CHANNEL}, /* SURF */
+ { 0x2b, KEY_A},
+ { 0x2c, KEY_B},
+ { 0x2f, KEY_CAMERA}, /* Snapshot */
+ { 0x23, KEY_RADIO},
+ { 0x02, KEY_PREVIOUSSONG},
+ { 0x06, KEY_NEXTSONG},
+ { 0x03, KEY_EPG},
+ { 0x09, KEY_SETUP},
+ { 0x22, KEY_BACKSPACE},
+ { 0x0c, KEY_UP},
+ { 0x0e, KEY_DOWN},
+ { 0x0b, KEY_LEFT},
+ { 0x0d, KEY_RIGHT},
+ { 0x11, KEY_ENTER},
+ { 0x20, KEY_TEXT},
+};
+
+static struct rc_keymap videomate_s350_map = {
+ .map = {
+ .scan = videomate_s350,
+ .size = ARRAY_SIZE(videomate_s350),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_VIDEOMATE_S350,
+ }
+};
+
+static int __init init_rc_map_videomate_s350(void)
+{
+ return ir_register_map(&videomate_s350_map);
+}
+
+static void __exit exit_rc_map_videomate_s350(void)
+{
+ ir_unregister_map(&videomate_s350_map);
+}
+
+module_init(init_rc_map_videomate_s350)
+module_exit(exit_rc_map_videomate_s350)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-videomate-tv-pvr.c b/drivers/media/IR/keymaps/rc-videomate-tv-pvr.c
new file mode 100644
index 00000000000..776b0a638d8
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-videomate-tv-pvr.c
@@ -0,0 +1,87 @@
+/* videomate-tv-pvr.h - Keytable for videomate_tv_pvr Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode videomate_tv_pvr[] = {
+ { 0x14, KEY_MUTE },
+ { 0x24, KEY_ZOOM },
+
+ { 0x01, KEY_DVD },
+ { 0x23, KEY_RADIO },
+ { 0x00, KEY_TV },
+
+ { 0x0a, KEY_REWIND },
+ { 0x08, KEY_PLAYPAUSE },
+ { 0x0f, KEY_FORWARD },
+
+ { 0x02, KEY_PREVIOUS },
+ { 0x07, KEY_STOP },
+ { 0x06, KEY_NEXT },
+
+ { 0x0c, KEY_UP },
+ { 0x0e, KEY_DOWN },
+ { 0x0b, KEY_LEFT },
+ { 0x0d, KEY_RIGHT },
+ { 0x11, KEY_OK },
+
+ { 0x03, KEY_MENU },
+ { 0x09, KEY_SETUP },
+ { 0x05, KEY_VIDEO },
+ { 0x22, KEY_CHANNEL },
+
+ { 0x12, KEY_VOLUMEUP },
+ { 0x15, KEY_VOLUMEDOWN },
+ { 0x10, KEY_CHANNELUP },
+ { 0x13, KEY_CHANNELDOWN },
+
+ { 0x04, KEY_RECORD },
+
+ { 0x16, KEY_1 },
+ { 0x17, KEY_2 },
+ { 0x18, KEY_3 },
+ { 0x19, KEY_4 },
+ { 0x1a, KEY_5 },
+ { 0x1b, KEY_6 },
+ { 0x1c, KEY_7 },
+ { 0x1d, KEY_8 },
+ { 0x1e, KEY_9 },
+ { 0x1f, KEY_0 },
+
+ { 0x20, KEY_LANGUAGE },
+ { 0x21, KEY_SLEEP },
+};
+
+static struct rc_keymap videomate_tv_pvr_map = {
+ .map = {
+ .scan = videomate_tv_pvr,
+ .size = ARRAY_SIZE(videomate_tv_pvr),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_VIDEOMATE_TV_PVR,
+ }
+};
+
+static int __init init_rc_map_videomate_tv_pvr(void)
+{
+ return ir_register_map(&videomate_tv_pvr_map);
+}
+
+static void __exit exit_rc_map_videomate_tv_pvr(void)
+{
+ ir_unregister_map(&videomate_tv_pvr_map);
+}
+
+module_init(init_rc_map_videomate_tv_pvr)
+module_exit(exit_rc_map_videomate_tv_pvr)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-winfast-usbii-deluxe.c b/drivers/media/IR/keymaps/rc-winfast-usbii-deluxe.c
new file mode 100644
index 00000000000..9d2d550aaa9
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-winfast-usbii-deluxe.c
@@ -0,0 +1,82 @@
+/* winfast-usbii-deluxe.h - Keytable for winfast_usbii_deluxe Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Leadtek Winfast TV USB II Deluxe remote
+ Magnus Alm <magnus.alm@gmail.com>
+ */
+
+static struct ir_scancode winfast_usbii_deluxe[] = {
+ { 0x62, KEY_0},
+ { 0x75, KEY_1},
+ { 0x76, KEY_2},
+ { 0x77, KEY_3},
+ { 0x79, KEY_4},
+ { 0x7a, KEY_5},
+ { 0x7b, KEY_6},
+ { 0x7d, KEY_7},
+ { 0x7e, KEY_8},
+ { 0x7f, KEY_9},
+
+ { 0x38, KEY_CAMERA}, /* SNAPSHOT */
+ { 0x37, KEY_RECORD}, /* RECORD */
+ { 0x35, KEY_TIME}, /* TIMESHIFT */
+
+ { 0x74, KEY_VOLUMEUP}, /* VOLUMEUP */
+ { 0x78, KEY_VOLUMEDOWN}, /* VOLUMEDOWN */
+ { 0x64, KEY_MUTE}, /* MUTE */
+
+ { 0x21, KEY_CHANNEL}, /* SURF */
+ { 0x7c, KEY_CHANNELUP}, /* CHANNELUP */
+ { 0x60, KEY_CHANNELDOWN}, /* CHANNELDOWN */
+ { 0x61, KEY_LAST}, /* LAST CHANNEL (RECALL) */
+
+ { 0x72, KEY_VIDEO}, /* INPUT MODES (TV/FM) */
+
+ { 0x70, KEY_POWER2}, /* TV ON/OFF */
+
+ { 0x39, KEY_CYCLEWINDOWS}, /* MINIMIZE (BOSS) */
+ { 0x3a, KEY_NEW}, /* PIP */
+ { 0x73, KEY_ZOOM}, /* FULLSECREEN */
+
+ { 0x66, KEY_INFO}, /* OSD (DISPLAY) */
+
+ { 0x31, KEY_DOT}, /* '.' */
+ { 0x63, KEY_ENTER}, /* ENTER */
+
+};
+
+static struct rc_keymap winfast_usbii_deluxe_map = {
+ .map = {
+ .scan = winfast_usbii_deluxe,
+ .size = ARRAY_SIZE(winfast_usbii_deluxe),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_WINFAST_USBII_DELUXE,
+ }
+};
+
+static int __init init_rc_map_winfast_usbii_deluxe(void)
+{
+ return ir_register_map(&winfast_usbii_deluxe_map);
+}
+
+static void __exit exit_rc_map_winfast_usbii_deluxe(void)
+{
+ ir_unregister_map(&winfast_usbii_deluxe_map);
+}
+
+module_init(init_rc_map_winfast_usbii_deluxe)
+module_exit(exit_rc_map_winfast_usbii_deluxe)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-winfast.c b/drivers/media/IR/keymaps/rc-winfast.c
new file mode 100644
index 00000000000..0e90a3bd949
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-winfast.c
@@ -0,0 +1,102 @@
+/* winfast.h - Keytable for winfast Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Table for Leadtek Winfast Remote Controls - used by both bttv and cx88 */
+
+static struct ir_scancode winfast[] = {
+ /* Keys 0 to 9 */
+ { 0x12, KEY_0 },
+ { 0x05, KEY_1 },
+ { 0x06, KEY_2 },
+ { 0x07, KEY_3 },
+ { 0x09, KEY_4 },
+ { 0x0a, KEY_5 },
+ { 0x0b, KEY_6 },
+ { 0x0d, KEY_7 },
+ { 0x0e, KEY_8 },
+ { 0x0f, KEY_9 },
+
+ { 0x00, KEY_POWER },
+ { 0x1b, KEY_AUDIO }, /* Audio Source */
+ { 0x02, KEY_TUNER }, /* TV/FM, not on Y0400052 */
+ { 0x1e, KEY_VIDEO }, /* Video Source */
+ { 0x16, KEY_INFO }, /* Display information */
+ { 0x04, KEY_VOLUMEUP },
+ { 0x08, KEY_VOLUMEDOWN },
+ { 0x0c, KEY_CHANNELUP },
+ { 0x10, KEY_CHANNELDOWN },
+ { 0x03, KEY_ZOOM }, /* fullscreen */
+ { 0x1f, KEY_TEXT }, /* closed caption/teletext */
+ { 0x20, KEY_SLEEP },
+ { 0x29, KEY_CLEAR }, /* boss key */
+ { 0x14, KEY_MUTE },
+ { 0x2b, KEY_RED },
+ { 0x2c, KEY_GREEN },
+ { 0x2d, KEY_YELLOW },
+ { 0x2e, KEY_BLUE },
+ { 0x18, KEY_KPPLUS }, /* fine tune + , not on Y040052 */
+ { 0x19, KEY_KPMINUS }, /* fine tune - , not on Y040052 */
+ { 0x2a, KEY_MEDIA }, /* PIP (Picture in picture */
+ { 0x21, KEY_DOT },
+ { 0x13, KEY_ENTER },
+ { 0x11, KEY_LAST }, /* Recall (last channel */
+ { 0x22, KEY_PREVIOUS },
+ { 0x23, KEY_PLAYPAUSE },
+ { 0x24, KEY_NEXT },
+ { 0x25, KEY_TIME }, /* Time Shifting */
+ { 0x26, KEY_STOP },
+ { 0x27, KEY_RECORD },
+ { 0x28, KEY_SAVE }, /* Screenshot */
+ { 0x2f, KEY_MENU },
+ { 0x30, KEY_CANCEL },
+ { 0x31, KEY_CHANNEL }, /* Channel Surf */
+ { 0x32, KEY_SUBTITLE },
+ { 0x33, KEY_LANGUAGE },
+ { 0x34, KEY_REWIND },
+ { 0x35, KEY_FASTFORWARD },
+ { 0x36, KEY_TV },
+ { 0x37, KEY_RADIO }, /* FM */
+ { 0x38, KEY_DVD },
+
+ { 0x1a, KEY_MODE}, /* change to MCE mode on Y04G0051 */
+ { 0x3e, KEY_F21 }, /* MCE +VOL, on Y04G0033 */
+ { 0x3a, KEY_F22 }, /* MCE -VOL, on Y04G0033 */
+ { 0x3b, KEY_F23 }, /* MCE +CH, on Y04G0033 */
+ { 0x3f, KEY_F24 } /* MCE -CH, on Y04G0033 */
+};
+
+static struct rc_keymap winfast_map = {
+ .map = {
+ .scan = winfast,
+ .size = ARRAY_SIZE(winfast),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_WINFAST,
+ }
+};
+
+static int __init init_rc_map_winfast(void)
+{
+ return ir_register_map(&winfast_map);
+}
+
+static void __exit exit_rc_map_winfast(void)
+{
+ ir_unregister_map(&winfast_map);
+}
+
+module_init(init_rc_map_winfast)
+module_exit(exit_rc_map_winfast)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/rc-map.c b/drivers/media/IR/rc-map.c
new file mode 100644
index 00000000000..46a8f1524b5
--- /dev/null
+++ b/drivers/media/IR/rc-map.c
@@ -0,0 +1,84 @@
+/* ir-raw-event.c - handle IR Pulse/Space event
+ *
+ * Copyright (C) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <media/ir-core.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+
+/* Used to handle IR raw handler extensions */
+static LIST_HEAD(rc_map_list);
+static DEFINE_SPINLOCK(rc_map_lock);
+
+static struct rc_keymap *seek_rc_map(const char *name)
+{
+ struct rc_keymap *map = NULL;
+
+ spin_lock(&rc_map_lock);
+ list_for_each_entry(map, &rc_map_list, list) {
+ if (!strcmp(name, map->map.name)) {
+ spin_unlock(&rc_map_lock);
+ return map;
+ }
+ }
+ spin_unlock(&rc_map_lock);
+
+ return NULL;
+}
+
+struct ir_scancode_table *get_rc_map(const char *name)
+{
+
+ struct rc_keymap *map;
+
+ map = seek_rc_map(name);
+#ifdef MODULE
+ if (!map) {
+ int rc = request_module(name);
+ if (rc < 0) {
+ printk(KERN_ERR "Couldn't load IR keymap %s\n", name);
+ return NULL;
+ }
+ msleep(20); /* Give some time for IR to register */
+
+ map = seek_rc_map(name);
+ }
+#endif
+ if (!map) {
+ printk(KERN_ERR "IR keymap %s not found\n", name);
+ return NULL;
+ }
+
+ printk(KERN_INFO "Registered IR keymap %s\n", map->map.name);
+
+ return &map->map;
+}
+EXPORT_SYMBOL_GPL(get_rc_map);
+
+int ir_register_map(struct rc_keymap *map)
+{
+ spin_lock(&rc_map_lock);
+ list_add_tail(&map->list, &rc_map_list);
+ spin_unlock(&rc_map_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ir_register_map);
+
+void ir_unregister_map(struct rc_keymap *map)
+{
+ spin_lock(&rc_map_lock);
+ list_del(&map->list);
+ spin_unlock(&rc_map_lock);
+}
+EXPORT_SYMBOL_GPL(ir_unregister_map);
+
diff --git a/drivers/media/common/tuners/tuner-xc2028.c b/drivers/media/common/tuners/tuner-xc2028.c
index 96d61707f50..b6ce528e188 100644
--- a/drivers/media/common/tuners/tuner-xc2028.c
+++ b/drivers/media/common/tuners/tuner-xc2028.c
@@ -100,6 +100,8 @@ struct xc2028_data {
if (size != _rc) \
tuner_info("i2c output error: rc = %d (should be %d)\n",\
_rc, (int)size); \
+ if (priv->ctrl.msleep) \
+ msleep(priv->ctrl.msleep); \
_rc; \
})
@@ -119,6 +121,8 @@ struct xc2028_data {
if (isize != _rc) \
tuner_err("i2c input error: rc = %d (should be %d)\n", \
_rc, (int)isize); \
+ if (priv->ctrl.msleep) \
+ msleep(priv->ctrl.msleep); \
_rc; \
})
@@ -129,8 +133,8 @@ struct xc2028_data {
(_rc = tuner_i2c_xfer_send(&priv->i2c_props, \
_val, sizeof(_val)))) { \
tuner_err("Error on line %d: %d\n", __LINE__, _rc); \
- } else \
- msleep(10); \
+ } else if (priv->ctrl.msleep) \
+ msleep(priv->ctrl.msleep); \
_rc; \
})
@@ -809,10 +813,20 @@ check_device:
hwmodel, (version & 0xf000) >> 12, (version & 0xf00) >> 8,
(version & 0xf0) >> 4, version & 0xf);
+
+ if (priv->ctrl.read_not_reliable)
+ goto read_not_reliable;
+
/* Check firmware version against what we downloaded. */
if (priv->firm_version != ((version & 0xf0) << 4 | (version & 0x0f))) {
- tuner_err("Incorrect readback of firmware version.\n");
- goto fail;
+ if (!priv->ctrl.read_not_reliable) {
+ tuner_err("Incorrect readback of firmware version.\n");
+ goto fail;
+ } else {
+ tuner_err("Returned an incorrect version. However, "
+ "read is not reliable enough. Ignoring it.\n");
+ hwmodel = 3028;
+ }
}
/* Check that the tuner hardware model remains consistent over time. */
@@ -826,6 +840,7 @@ check_device:
goto fail;
}
+read_not_reliable:
memcpy(&priv->cur_fw, &new_fw, sizeof(priv->cur_fw));
/*
@@ -996,6 +1011,8 @@ static int generic_set_freq(struct dvb_frontend *fe, u32 freq /* in HZ */,
The reset CLK is needed only with tm6000.
Driver should work fine even if this fails.
*/
+ if (priv->ctrl.msleep)
+ msleep(priv->ctrl.msleep);
do_tuner_callback(fe, XC2028_RESET_CLK, 1);
msleep(10);
diff --git a/drivers/media/common/tuners/tuner-xc2028.h b/drivers/media/common/tuners/tuner-xc2028.h
index a90c35d50ad..9778c96a500 100644
--- a/drivers/media/common/tuners/tuner-xc2028.h
+++ b/drivers/media/common/tuners/tuner-xc2028.h
@@ -33,12 +33,14 @@ enum firmware_type {
struct xc2028_ctrl {
char *fname;
int max_len;
+ int msleep;
unsigned int scode_table;
unsigned int mts :1;
unsigned int input1:1;
unsigned int vhfbw7:1;
unsigned int uhfbw8:1;
unsigned int disable_power_mgmt:1;
+ unsigned int read_not_reliable:1;
unsigned int demod;
enum firmware_type type:2;
};
diff --git a/drivers/media/dvb/bt8xx/dst.c b/drivers/media/dvb/bt8xx/dst.c
index 8b0cde38984..248a2a9d841 100644
--- a/drivers/media/dvb/bt8xx/dst.c
+++ b/drivers/media/dvb/bt8xx/dst.c
@@ -930,7 +930,6 @@ static int dst_fw_ver(struct dst_state *state)
dprintk(verbose, DST_INFO, 1, "Unsupported Command");
return -1;
}
- memset(&state->fw_version, '\0', 8);
memcpy(&state->fw_version, &state->rxbuffer, 8);
dprintk(verbose, DST_ERROR, 1, "Firmware Ver = %x.%x Build = %02x, on %x:%x, %x-%x-20%02x",
state->fw_version[0] >> 4, state->fw_version[0] & 0x0f,
@@ -1053,7 +1052,6 @@ static int dst_get_tuner_info(struct dst_state *state)
goto force;
}
}
- memset(&state->board_info, '\0', 8);
memcpy(&state->board_info, &state->rxbuffer, 8);
if (state->type_flags & DST_TYPE_HAS_MULTI_FE) {
dprintk(verbose, DST_ERROR, 1, "DST type has TS=188");
diff --git a/drivers/media/dvb/dm1105/dm1105.c b/drivers/media/dvb/dm1105/dm1105.c
index b6d46961a99..b762e561a6d 100644
--- a/drivers/media/dvb/dm1105/dm1105.c
+++ b/drivers/media/dvb/dm1105/dm1105.c
@@ -28,7 +28,7 @@
#include <linux/dma-mapping.h>
#include <linux/input.h>
#include <linux/slab.h>
-#include <media/ir-common.h>
+#include <media/ir-core.h>
#include "demux.h"
#include "dmxdev.h"
@@ -46,6 +46,8 @@
#include "z0194a.h"
#include "ds3000.h"
+#define MODULE_NAME "dm1105"
+
#define UNSET (-1U)
#define DM1105_BOARD_NOAUTO UNSET
@@ -265,7 +267,6 @@ static void dm1105_card_list(struct pci_dev *pci)
/* infrared remote control */
struct infrared {
struct input_dev *input_dev;
- struct ir_input_state ir;
char input_phys[32];
struct work_struct work;
u32 ir_command;
@@ -531,8 +532,7 @@ static void dm1105_emit_key(struct work_struct *work)
data = (ircom >> 8) & 0x7f;
- ir_input_keydown(ir->input_dev, &ir->ir, data);
- ir_input_nokey(ir->input_dev, &ir->ir);
+ ir_keydown(ir->input_dev, data, 0);
}
/* work handler */
@@ -594,8 +594,7 @@ static irqreturn_t dm1105_irq(int irq, void *dev_id)
int __devinit dm1105_ir_init(struct dm1105_dev *dm1105)
{
struct input_dev *input_dev;
- struct ir_scancode_table *ir_codes = &ir_codes_dm1105_nec_table;
- u64 ir_type = IR_TYPE_OTHER;
+ char *ir_codes = NULL;
int err = -ENOMEM;
input_dev = input_allocate_device();
@@ -606,12 +605,6 @@ int __devinit dm1105_ir_init(struct dm1105_dev *dm1105)
snprintf(dm1105->ir.input_phys, sizeof(dm1105->ir.input_phys),
"pci-%s/ir0", pci_name(dm1105->pdev));
- err = ir_input_init(input_dev, &dm1105->ir.ir, ir_type);
- if (err < 0) {
- input_free_device(input_dev);
- return err;
- }
-
input_dev->name = "DVB on-card IR receiver";
input_dev->phys = dm1105->ir.input_phys;
input_dev->id.bustype = BUS_PCI;
@@ -628,9 +621,13 @@ int __devinit dm1105_ir_init(struct dm1105_dev *dm1105)
INIT_WORK(&dm1105->ir.work, dm1105_emit_key);
- err = ir_input_register(input_dev, ir_codes, NULL);
+ err = ir_input_register(input_dev, ir_codes, NULL, MODULE_NAME);
+ if (err < 0) {
+ input_free_device(input_dev);
+ return err;
+ }
- return err;
+ return 0;
}
void __devexit dm1105_ir_exit(struct dm1105_dev *dm1105)
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index 9ddc57909d4..425862ffb28 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
+#include <linux/smp_lock.h>
#include <linux/poll.h>
#include <linux/ioctl.h>
#include <linux/wait.h>
@@ -963,7 +964,7 @@ dvb_demux_read(struct file *file, char __user *buf, size_t count,
return ret;
}
-static int dvb_demux_do_ioctl(struct inode *inode, struct file *file,
+static int dvb_demux_do_ioctl(struct file *file,
unsigned int cmd, void *parg)
{
struct dmxdev_filter *dmxdevfilter = file->private_data;
@@ -1084,10 +1085,16 @@ static int dvb_demux_do_ioctl(struct inode *inode, struct file *file,
return ret;
}
-static int dvb_demux_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static long dvb_demux_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
- return dvb_usercopy(inode, file, cmd, arg, dvb_demux_do_ioctl);
+ int ret;
+
+ lock_kernel();
+ ret = dvb_usercopy(file, cmd, arg, dvb_demux_do_ioctl);
+ unlock_kernel();
+
+ return ret;
}
static unsigned int dvb_demux_poll(struct file *file, poll_table *wait)
@@ -1139,7 +1146,7 @@ static int dvb_demux_release(struct inode *inode, struct file *file)
static const struct file_operations dvb_demux_fops = {
.owner = THIS_MODULE,
.read = dvb_demux_read,
- .ioctl = dvb_demux_ioctl,
+ .unlocked_ioctl = dvb_demux_ioctl,
.open = dvb_demux_open,
.release = dvb_demux_release,
.poll = dvb_demux_poll,
@@ -1152,7 +1159,7 @@ static struct dvb_device dvbdev_demux = {
.fops = &dvb_demux_fops
};
-static int dvb_dvr_do_ioctl(struct inode *inode, struct file *file,
+static int dvb_dvr_do_ioctl(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
@@ -1176,10 +1183,16 @@ static int dvb_dvr_do_ioctl(struct inode *inode, struct file *file,
return ret;
}
-static int dvb_dvr_ioctl(struct inode *inode, struct file *file,
+static long dvb_dvr_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
- return dvb_usercopy(inode, file, cmd, arg, dvb_dvr_do_ioctl);
+ int ret;
+
+ lock_kernel();
+ ret = dvb_usercopy(file, cmd, arg, dvb_dvr_do_ioctl);
+ unlock_kernel();
+
+ return ret;
}
static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait)
@@ -1208,7 +1221,7 @@ static const struct file_operations dvb_dvr_fops = {
.owner = THIS_MODULE,
.read = dvb_dvr_read,
.write = dvb_dvr_write,
- .ioctl = dvb_dvr_ioctl,
+ .unlocked_ioctl = dvb_dvr_ioctl,
.open = dvb_dvr_open,
.release = dvb_dvr_release,
.poll = dvb_dvr_poll,
diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
index cb22da53bfb..ef259a0718a 100644
--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
@@ -36,6 +36,7 @@
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
+#include <linux/smp_lock.h>
#include <linux/kthread.h>
#include "dvb_ca_en50221.h"
@@ -1181,7 +1182,7 @@ static int dvb_ca_en50221_thread(void *data)
*
* @return 0 on success, <0 on error.
*/
-static int dvb_ca_en50221_io_do_ioctl(struct inode *inode, struct file *file,
+static int dvb_ca_en50221_io_do_ioctl(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
@@ -1255,10 +1256,16 @@ static int dvb_ca_en50221_io_do_ioctl(struct inode *inode, struct file *file,
*
* @return 0 on success, <0 on error.
*/
-static int dvb_ca_en50221_io_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static long dvb_ca_en50221_io_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
{
- return dvb_usercopy(inode, file, cmd, arg, dvb_ca_en50221_io_do_ioctl);
+ int ret;
+
+ lock_kernel();
+ ret = dvb_usercopy(file, cmd, arg, dvb_ca_en50221_io_do_ioctl);
+ unlock_kernel();
+
+ return ret;
}
@@ -1611,7 +1618,7 @@ static const struct file_operations dvb_ca_fops = {
.owner = THIS_MODULE,
.read = dvb_ca_en50221_io_read,
.write = dvb_ca_en50221_io_write,
- .ioctl = dvb_ca_en50221_io_ioctl,
+ .unlocked_ioctl = dvb_ca_en50221_io_ioctl,
.open = dvb_ca_en50221_io_open,
.release = dvb_ca_en50221_io_release,
.poll = dvb_ca_en50221_io_poll,
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index 67f189b7aa1..977ddba3e23 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -426,7 +426,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
};
};
- if (demux->cnt_storage) {
+ if (demux->cnt_storage && dvb_demux_tscheck) {
/* check pkt counter */
if (pid < MAX_PID) {
if (buf[1] & 0x80)
@@ -1248,12 +1248,9 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
dvbdemux->feed[i].index = i;
}
- if (dvb_demux_tscheck) {
- dvbdemux->cnt_storage = vmalloc(MAX_PID + 1);
-
- if (!dvbdemux->cnt_storage)
- printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
- }
+ dvbdemux->cnt_storage = vmalloc(MAX_PID + 1);
+ if (!dvbdemux->cnt_storage)
+ printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
INIT_LIST_HEAD(&dvbdemux->frontend_list);
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index 55ea260572b..44ae89ecef9 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -36,6 +36,7 @@
#include <linux/list.h>
#include <linux/freezer.h>
#include <linux/jiffies.h>
+#include <linux/smp_lock.h>
#include <linux/kthread.h>
#include <asm/processor.h>
@@ -95,6 +96,10 @@ MODULE_PARM_DESC(dvb_mfe_wait_time, "Wait up to <mfe_wait_time> seconds on open(
* FESTATE_LOSTLOCK. When the lock has been lost, and we're searching it again.
*/
+#define DVB_FE_NO_EXIT 0
+#define DVB_FE_NORMAL_EXIT 1
+#define DVB_FE_DEVICE_REMOVED 2
+
static DEFINE_MUTEX(frontend_mutex);
struct dvb_frontend_private {
@@ -497,7 +502,7 @@ static int dvb_frontend_is_exiting(struct dvb_frontend *fe)
{
struct dvb_frontend_private *fepriv = fe->frontend_priv;
- if (fepriv->exit)
+ if (fepriv->exit != DVB_FE_NO_EXIT)
return 1;
if (fepriv->dvbdev->writers == 1)
@@ -559,7 +564,7 @@ restart:
if (kthread_should_stop() || dvb_frontend_is_exiting(fe)) {
/* got signal or quitting */
- fepriv->exit = 1;
+ fepriv->exit = DVB_FE_NORMAL_EXIT;
break;
}
@@ -673,7 +678,10 @@ restart:
}
fepriv->thread = NULL;
- fepriv->exit = 0;
+ if (kthread_should_stop())
+ fepriv->exit = DVB_FE_DEVICE_REMOVED;
+ else
+ fepriv->exit = DVB_FE_NO_EXIT;
mb();
dvb_frontend_wakeup(fe);
@@ -686,7 +694,7 @@ static void dvb_frontend_stop(struct dvb_frontend *fe)
dprintk ("%s\n", __func__);
- fepriv->exit = 1;
+ fepriv->exit = DVB_FE_NORMAL_EXIT;
mb();
if (!fepriv->thread)
@@ -755,7 +763,7 @@ static int dvb_frontend_start(struct dvb_frontend *fe)
dprintk ("%s\n", __func__);
if (fepriv->thread) {
- if (!fepriv->exit)
+ if (fepriv->exit == DVB_FE_NO_EXIT)
return 0;
else
dvb_frontend_stop (fe);
@@ -767,7 +775,7 @@ static int dvb_frontend_start(struct dvb_frontend *fe)
return -EINTR;
fepriv->state = FESTATE_IDLE;
- fepriv->exit = 0;
+ fepriv->exit = DVB_FE_NO_EXIT;
fepriv->thread = NULL;
mb();
@@ -1188,14 +1196,14 @@ static void dtv_property_cache_submit(struct dvb_frontend *fe)
}
}
-static int dvb_frontend_ioctl_legacy(struct inode *inode, struct file *file,
+static int dvb_frontend_ioctl_legacy(struct file *file,
unsigned int cmd, void *parg);
-static int dvb_frontend_ioctl_properties(struct inode *inode, struct file *file,
+static int dvb_frontend_ioctl_properties(struct file *file,
unsigned int cmd, void *parg);
static int dtv_property_process_get(struct dvb_frontend *fe,
struct dtv_property *tvp,
- struct inode *inode, struct file *file)
+ struct file *file)
{
int r = 0;
@@ -1328,7 +1336,6 @@ static int dtv_property_process_get(struct dvb_frontend *fe,
static int dtv_property_process_set(struct dvb_frontend *fe,
struct dtv_property *tvp,
- struct inode *inode,
struct file *file)
{
int r = 0;
@@ -1359,7 +1366,7 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
dprintk("%s() Finalised property cache\n", __func__);
dtv_property_cache_submit(fe);
- r |= dvb_frontend_ioctl_legacy(inode, file, FE_SET_FRONTEND,
+ r |= dvb_frontend_ioctl_legacy(file, FE_SET_FRONTEND,
&fepriv->parameters);
break;
case DTV_FREQUENCY:
@@ -1391,12 +1398,12 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
break;
case DTV_VOLTAGE:
fe->dtv_property_cache.voltage = tvp->u.data;
- r = dvb_frontend_ioctl_legacy(inode, file, FE_SET_VOLTAGE,
+ r = dvb_frontend_ioctl_legacy(file, FE_SET_VOLTAGE,
(void *)fe->dtv_property_cache.voltage);
break;
case DTV_TONE:
fe->dtv_property_cache.sectone = tvp->u.data;
- r = dvb_frontend_ioctl_legacy(inode, file, FE_SET_TONE,
+ r = dvb_frontend_ioctl_legacy(file, FE_SET_TONE,
(void *)fe->dtv_property_cache.sectone);
break;
case DTV_CODE_RATE_HP:
@@ -1480,7 +1487,7 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
return r;
}
-static int dvb_frontend_ioctl(struct inode *inode, struct file *file,
+static int dvb_frontend_ioctl(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
@@ -1490,7 +1497,7 @@ static int dvb_frontend_ioctl(struct inode *inode, struct file *file,
dprintk("%s (%d)\n", __func__, _IOC_NR(cmd));
- if (fepriv->exit)
+ if (fepriv->exit != DVB_FE_NO_EXIT)
return -ENODEV;
if ((file->f_flags & O_ACCMODE) == O_RDONLY &&
@@ -1502,17 +1509,17 @@ static int dvb_frontend_ioctl(struct inode *inode, struct file *file,
return -ERESTARTSYS;
if ((cmd == FE_SET_PROPERTY) || (cmd == FE_GET_PROPERTY))
- err = dvb_frontend_ioctl_properties(inode, file, cmd, parg);
+ err = dvb_frontend_ioctl_properties(file, cmd, parg);
else {
fe->dtv_property_cache.state = DTV_UNDEFINED;
- err = dvb_frontend_ioctl_legacy(inode, file, cmd, parg);
+ err = dvb_frontend_ioctl_legacy(file, cmd, parg);
}
up(&fepriv->sem);
return err;
}
-static int dvb_frontend_ioctl_properties(struct inode *inode, struct file *file,
+static int dvb_frontend_ioctl_properties(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
@@ -1548,7 +1555,7 @@ static int dvb_frontend_ioctl_properties(struct inode *inode, struct file *file,
}
for (i = 0; i < tvps->num; i++) {
- (tvp + i)->result = dtv_property_process_set(fe, tvp + i, inode, file);
+ (tvp + i)->result = dtv_property_process_set(fe, tvp + i, file);
err |= (tvp + i)->result;
}
@@ -1580,7 +1587,7 @@ static int dvb_frontend_ioctl_properties(struct inode *inode, struct file *file,
}
for (i = 0; i < tvps->num; i++) {
- (tvp + i)->result = dtv_property_process_get(fe, tvp + i, inode, file);
+ (tvp + i)->result = dtv_property_process_get(fe, tvp + i, file);
err |= (tvp + i)->result;
}
@@ -1597,7 +1604,7 @@ out:
return err;
}
-static int dvb_frontend_ioctl_legacy(struct inode *inode, struct file *file,
+static int dvb_frontend_ioctl_legacy(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
@@ -1916,6 +1923,8 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
int ret;
dprintk ("%s\n", __func__);
+ if (fepriv->exit == DVB_FE_DEVICE_REMOVED)
+ return -ENODEV;
if (adapter->mfe_shared) {
mutex_lock (&adapter->mfe_lock);
@@ -2008,7 +2017,7 @@ static int dvb_frontend_release(struct inode *inode, struct file *file)
ret = dvb_generic_release (inode, file);
if (dvbdev->users == -1) {
- if (fepriv->exit == 1) {
+ if (fepriv->exit != DVB_FE_NO_EXIT) {
fops_put(file->f_op);
file->f_op = NULL;
wake_up(&dvbdev->wait_queue);
@@ -2022,7 +2031,7 @@ static int dvb_frontend_release(struct inode *inode, struct file *file)
static const struct file_operations dvb_frontend_fops = {
.owner = THIS_MODULE,
- .ioctl = dvb_generic_ioctl,
+ .unlocked_ioctl = dvb_generic_ioctl,
.poll = dvb_frontend_poll,
.open = dvb_frontend_open,
.release = dvb_frontend_release
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index 441c0642b30..f6dac2bb0ac 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -59,6 +59,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/dvb/net.h>
+#include <linux/smp_lock.h>
#include <linux/uio.h>
#include <asm/uaccess.h>
#include <linux/crc32.h>
@@ -1109,14 +1110,14 @@ static int dvb_net_feed_stop(struct net_device *dev)
}
-static int dvb_set_mc_filter (struct net_device *dev, struct dev_mc_list *mc)
+static int dvb_set_mc_filter(struct net_device *dev, unsigned char *addr)
{
struct dvb_net_priv *priv = netdev_priv(dev);
if (priv->multi_num == DVB_NET_MULTICAST_MAX)
return -ENOMEM;
- memcpy(priv->multi_macs[priv->multi_num], mc->dmi_addr, 6);
+ memcpy(priv->multi_macs[priv->multi_num], addr, ETH_ALEN);
priv->multi_num++;
return 0;
@@ -1140,8 +1141,7 @@ static void wq_set_multicast_list (struct work_struct *work)
dprintk("%s: allmulti mode\n", dev->name);
priv->rx_mode = RX_MODE_ALL_MULTI;
} else if (!netdev_mc_empty(dev)) {
- int mci;
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
dprintk("%s: set_mc_list, %d entries\n",
dev->name, netdev_mc_count(dev));
@@ -1149,11 +1149,8 @@ static void wq_set_multicast_list (struct work_struct *work)
priv->rx_mode = RX_MODE_MULTI;
priv->multi_num = 0;
- for (mci = 0, mc=dev->mc_list;
- mci < netdev_mc_count(dev);
- mc = mc->next, mci++) {
- dvb_set_mc_filter(dev, mc);
- }
+ netdev_for_each_mc_addr(ha, dev)
+ dvb_set_mc_filter(dev, ha->addr);
}
netif_addr_unlock_bh(dev);
@@ -1333,7 +1330,7 @@ static int dvb_net_remove_if(struct dvb_net *dvbnet, unsigned long num)
return 0;
}
-static int dvb_net_do_ioctl(struct inode *inode, struct file *file,
+static int dvb_net_do_ioctl(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
@@ -1435,10 +1432,16 @@ static int dvb_net_do_ioctl(struct inode *inode, struct file *file,
return 0;
}
-static int dvb_net_ioctl(struct inode *inode, struct file *file,
+static long dvb_net_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
- return dvb_usercopy(inode, file, cmd, arg, dvb_net_do_ioctl);
+ int ret;
+
+ lock_kernel();
+ ret = dvb_usercopy(file, cmd, arg, dvb_net_do_ioctl);
+ unlock_kernel();
+
+ return ret;
}
static int dvb_net_close(struct inode *inode, struct file *file)
@@ -1459,7 +1462,7 @@ static int dvb_net_close(struct inode *inode, struct file *file)
static const struct file_operations dvb_net_fops = {
.owner = THIS_MODULE,
- .ioctl = dvb_net_ioctl,
+ .unlocked_ioctl = dvb_net_ioctl,
.open = dvb_generic_open,
.release = dvb_net_close,
};
diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
index 94159b90f73..b915c39d782 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.c
+++ b/drivers/media/dvb/dvb-core/dvbdev.c
@@ -154,10 +154,11 @@ int dvb_generic_release(struct inode *inode, struct file *file)
EXPORT_SYMBOL(dvb_generic_release);
-int dvb_generic_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+long dvb_generic_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
{
struct dvb_device *dvbdev = file->private_data;
+ int ret;
if (!dvbdev)
return -ENODEV;
@@ -165,7 +166,11 @@ int dvb_generic_ioctl(struct inode *inode, struct file *file,
if (!dvbdev->kernel_ioctl)
return -EINVAL;
- return dvb_usercopy (inode, file, cmd, arg, dvbdev->kernel_ioctl);
+ lock_kernel();
+ ret = dvb_usercopy(file, cmd, arg, dvbdev->kernel_ioctl);
+ unlock_kernel();
+
+ return ret;
}
EXPORT_SYMBOL(dvb_generic_ioctl);
@@ -377,9 +382,9 @@ EXPORT_SYMBOL(dvb_unregister_adapter);
define this as video_usercopy(). this will introduce a dependecy
to the v4l "videodev.o" module, which is unnecessary for some
cards (ie. the budget dvb-cards don't need the v4l module...) */
-int dvb_usercopy(struct inode *inode, struct file *file,
+int dvb_usercopy(struct file *file,
unsigned int cmd, unsigned long arg,
- int (*func)(struct inode *inode, struct file *file,
+ int (*func)(struct file *file,
unsigned int cmd, void *arg))
{
char sbuf[128];
@@ -416,7 +421,7 @@ int dvb_usercopy(struct inode *inode, struct file *file,
}
/* call driver */
- if ((err = func(inode, file, cmd, parg)) == -ENOIOCTLCMD)
+ if ((err = func(file, cmd, parg)) == -ENOIOCTLCMD)
err = -EINVAL;
if (err < 0)
diff --git a/drivers/media/dvb/dvb-core/dvbdev.h b/drivers/media/dvb/dvb-core/dvbdev.h
index f7b499d4a3c..fcc6ae98745 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.h
+++ b/drivers/media/dvb/dvb-core/dvbdev.h
@@ -116,8 +116,7 @@ struct dvb_device {
wait_queue_head_t wait_queue;
/* don't really need those !? -- FIXME: use video_usercopy */
- int (*kernel_ioctl)(struct inode *inode, struct file *file,
- unsigned int cmd, void *arg);
+ int (*kernel_ioctl)(struct file *file, unsigned int cmd, void *arg);
void *priv;
};
@@ -138,17 +137,15 @@ extern void dvb_unregister_device (struct dvb_device *dvbdev);
extern int dvb_generic_open (struct inode *inode, struct file *file);
extern int dvb_generic_release (struct inode *inode, struct file *file);
-extern int dvb_generic_ioctl (struct inode *inode, struct file *file,
+extern long dvb_generic_ioctl (struct file *file,
unsigned int cmd, unsigned long arg);
/* we don't mess with video_usercopy() any more,
we simply define out own dvb_usercopy(), which will hopefully become
generic_usercopy() someday... */
-extern int dvb_usercopy(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg,
- int (*func)(struct inode *inode, struct file *file,
- unsigned int cmd, void *arg));
+extern int dvb_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
+ int (*func)(struct file *file, unsigned int cmd, void *arg));
/** generic DVB attach function. */
#ifdef CONFIG_MEDIA_ATTACH
diff --git a/drivers/media/dvb/dvb-usb/a800.c b/drivers/media/dvb/dvb-usb/a800.c
index 6247239982e..b6cbb1dfc5f 100644
--- a/drivers/media/dvb/dvb-usb/a800.c
+++ b/drivers/media/dvb/dvb-usb/a800.c
@@ -37,7 +37,7 @@ static int a800_identify_state(struct usb_device *udev, struct dvb_usb_device_pr
return 0;
}
-static struct dvb_usb_rc_key a800_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_a800_table[] = {
{ 0x0201, KEY_PROG1 }, /* SOURCE */
{ 0x0200, KEY_POWER }, /* POWER */
{ 0x0205, KEY_1 }, /* 1 */
@@ -147,8 +147,8 @@ static struct dvb_usb_device_properties a800_properties = {
.identify_state = a800_identify_state,
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = a800_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(a800_rc_keys),
+ .rc_key_map = ir_codes_a800_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_a800_table),
.rc_query = a800_rc_query,
.i2c_algo = &dibusb_i2c_algo,
diff --git a/drivers/media/dvb/dvb-usb/af9005-remote.c b/drivers/media/dvb/dvb-usb/af9005-remote.c
index f4379c650a1..b41fa873b04 100644
--- a/drivers/media/dvb/dvb-usb/af9005-remote.c
+++ b/drivers/media/dvb/dvb-usb/af9005-remote.c
@@ -33,7 +33,7 @@ MODULE_PARM_DESC(debug,
#define deb_decode(args...) dprintk(dvb_usb_af9005_remote_debug,0x01,args)
-struct dvb_usb_rc_key af9005_rc_keys[] = {
+struct dvb_usb_rc_key ir_codes_af9005_table[] = {
{0x01b7, KEY_POWER},
{0x01a7, KEY_VOLUMEUP},
@@ -74,7 +74,7 @@ struct dvb_usb_rc_key af9005_rc_keys[] = {
{0x00d5, KEY_GOTO}, /* marked jump on the remote */
};
-int af9005_rc_keys_size = ARRAY_SIZE(af9005_rc_keys);
+int ir_codes_af9005_table_size = ARRAY_SIZE(ir_codes_af9005_table);
static int repeatable_keys[] = {
KEY_VOLUMEUP,
@@ -130,10 +130,10 @@ int af9005_rc_decode(struct dvb_usb_device *d, u8 * data, int len, u32 * event,
deb_decode("code != inverted code\n");
return 0;
}
- for (i = 0; i < af9005_rc_keys_size; i++) {
- if (rc5_custom(&af9005_rc_keys[i]) == cust
- && rc5_data(&af9005_rc_keys[i]) == dat) {
- *event = af9005_rc_keys[i].event;
+ for (i = 0; i < ir_codes_af9005_table_size; i++) {
+ if (rc5_custom(&ir_codes_af9005_table[i]) == cust
+ && rc5_data(&ir_codes_af9005_table[i]) == dat) {
+ *event = ir_codes_af9005_table[i].event;
*state = REMOTE_KEY_PRESSED;
deb_decode
("key pressed, event %x\n", *event);
@@ -146,8 +146,8 @@ int af9005_rc_decode(struct dvb_usb_device *d, u8 * data, int len, u32 * event,
return 0;
}
-EXPORT_SYMBOL(af9005_rc_keys);
-EXPORT_SYMBOL(af9005_rc_keys_size);
+EXPORT_SYMBOL(ir_codes_af9005_table);
+EXPORT_SYMBOL(ir_codes_af9005_table_size);
EXPORT_SYMBOL(af9005_rc_decode);
MODULE_AUTHOR("Luca Olivetti <luca@ventoso.org>");
diff --git a/drivers/media/dvb/dvb-usb/af9005.c b/drivers/media/dvb/dvb-usb/af9005.c
index ca5a0a4d2a4..cfd6107d534 100644
--- a/drivers/media/dvb/dvb-usb/af9005.c
+++ b/drivers/media/dvb/dvb-usb/af9005.c
@@ -1109,8 +1109,8 @@ static int __init af9005_usb_module_init(void)
return result;
}
rc_decode = symbol_request(af9005_rc_decode);
- rc_keys = symbol_request(af9005_rc_keys);
- rc_keys_size = symbol_request(af9005_rc_keys_size);
+ rc_keys = symbol_request(ir_codes_af9005_table);
+ rc_keys_size = symbol_request(ir_codes_af9005_table_size);
if (rc_decode == NULL || rc_keys == NULL || rc_keys_size == NULL) {
err("af9005_rc_decode function not found, disabling remote");
af9005_properties.rc_query = NULL;
@@ -1128,9 +1128,9 @@ static void __exit af9005_usb_module_exit(void)
if (rc_decode != NULL)
symbol_put(af9005_rc_decode);
if (rc_keys != NULL)
- symbol_put(af9005_rc_keys);
+ symbol_put(ir_codes_af9005_table);
if (rc_keys_size != NULL)
- symbol_put(af9005_rc_keys_size);
+ symbol_put(ir_codes_af9005_table_size);
/* deregister this driver from the USB subsystem */
usb_deregister(&af9005_usb_driver);
}
diff --git a/drivers/media/dvb/dvb-usb/af9005.h b/drivers/media/dvb/dvb-usb/af9005.h
index 0bc48a01218..088e7083a39 100644
--- a/drivers/media/dvb/dvb-usb/af9005.h
+++ b/drivers/media/dvb/dvb-usb/af9005.h
@@ -3490,7 +3490,7 @@ extern u8 regmask[8];
/* remote control decoder */
extern int af9005_rc_decode(struct dvb_usb_device *d, u8 * data, int len,
u32 * event, int *state);
-extern struct dvb_usb_rc_key af9005_rc_keys[];
-extern int af9005_rc_keys_size;
+extern struct dvb_usb_rc_key ir_codes_af9005_table[];
+extern int ir_codes_af9005_table_size;
#endif
diff --git a/drivers/media/dvb/dvb-usb/af9015.c b/drivers/media/dvb/dvb-usb/af9015.c
index 74d94e45324..66c7c3ea799 100644
--- a/drivers/media/dvb/dvb-usb/af9015.c
+++ b/drivers/media/dvb/dvb-usb/af9015.c
@@ -752,19 +752,19 @@ static const struct af9015_setup *af9015_setup_match(unsigned int id,
static const struct af9015_setup af9015_setup_modparam[] = {
{ AF9015_REMOTE_A_LINK_DTU_M,
- af9015_rc_keys_a_link, ARRAY_SIZE(af9015_rc_keys_a_link),
+ ir_codes_af9015_table_a_link, ARRAY_SIZE(ir_codes_af9015_table_a_link),
af9015_ir_table_a_link, ARRAY_SIZE(af9015_ir_table_a_link) },
{ AF9015_REMOTE_MSI_DIGIVOX_MINI_II_V3,
- af9015_rc_keys_msi, ARRAY_SIZE(af9015_rc_keys_msi),
+ ir_codes_af9015_table_msi, ARRAY_SIZE(ir_codes_af9015_table_msi),
af9015_ir_table_msi, ARRAY_SIZE(af9015_ir_table_msi) },
{ AF9015_REMOTE_MYGICTV_U718,
- af9015_rc_keys_mygictv, ARRAY_SIZE(af9015_rc_keys_mygictv),
+ ir_codes_af9015_table_mygictv, ARRAY_SIZE(ir_codes_af9015_table_mygictv),
af9015_ir_table_mygictv, ARRAY_SIZE(af9015_ir_table_mygictv) },
{ AF9015_REMOTE_DIGITTRADE_DVB_T,
- af9015_rc_keys_digittrade, ARRAY_SIZE(af9015_rc_keys_digittrade),
+ ir_codes_af9015_table_digittrade, ARRAY_SIZE(ir_codes_af9015_table_digittrade),
af9015_ir_table_digittrade, ARRAY_SIZE(af9015_ir_table_digittrade) },
{ AF9015_REMOTE_AVERMEDIA_KS,
- af9015_rc_keys_avermedia, ARRAY_SIZE(af9015_rc_keys_avermedia),
+ ir_codes_af9015_table_avermedia, ARRAY_SIZE(ir_codes_af9015_table_avermedia),
af9015_ir_table_avermedia_ks, ARRAY_SIZE(af9015_ir_table_avermedia_ks) },
{ }
};
@@ -772,32 +772,32 @@ static const struct af9015_setup af9015_setup_modparam[] = {
/* don't add new entries here anymore, use hashes instead */
static const struct af9015_setup af9015_setup_usbids[] = {
{ USB_VID_LEADTEK,
- af9015_rc_keys_leadtek, ARRAY_SIZE(af9015_rc_keys_leadtek),
+ ir_codes_af9015_table_leadtek, ARRAY_SIZE(ir_codes_af9015_table_leadtek),
af9015_ir_table_leadtek, ARRAY_SIZE(af9015_ir_table_leadtek) },
{ USB_VID_VISIONPLUS,
- af9015_rc_keys_twinhan, ARRAY_SIZE(af9015_rc_keys_twinhan),
+ ir_codes_af9015_table_twinhan, ARRAY_SIZE(ir_codes_af9015_table_twinhan),
af9015_ir_table_twinhan, ARRAY_SIZE(af9015_ir_table_twinhan) },
{ USB_VID_KWORLD_2, /* TODO: use correct rc keys */
- af9015_rc_keys_twinhan, ARRAY_SIZE(af9015_rc_keys_twinhan),
+ ir_codes_af9015_table_twinhan, ARRAY_SIZE(ir_codes_af9015_table_twinhan),
af9015_ir_table_kworld, ARRAY_SIZE(af9015_ir_table_kworld) },
{ USB_VID_AVERMEDIA,
- af9015_rc_keys_avermedia, ARRAY_SIZE(af9015_rc_keys_avermedia),
+ ir_codes_af9015_table_avermedia, ARRAY_SIZE(ir_codes_af9015_table_avermedia),
af9015_ir_table_avermedia, ARRAY_SIZE(af9015_ir_table_avermedia) },
{ USB_VID_MSI_2,
- af9015_rc_keys_msi_digivox_iii, ARRAY_SIZE(af9015_rc_keys_msi_digivox_iii),
+ ir_codes_af9015_table_msi_digivox_iii, ARRAY_SIZE(ir_codes_af9015_table_msi_digivox_iii),
af9015_ir_table_msi_digivox_iii, ARRAY_SIZE(af9015_ir_table_msi_digivox_iii) },
{ }
};
static const struct af9015_setup af9015_setup_hashes[] = {
{ 0xb8feb708,
- af9015_rc_keys_msi, ARRAY_SIZE(af9015_rc_keys_msi),
+ ir_codes_af9015_table_msi, ARRAY_SIZE(ir_codes_af9015_table_msi),
af9015_ir_table_msi, ARRAY_SIZE(af9015_ir_table_msi) },
{ 0xa3703d00,
- af9015_rc_keys_a_link, ARRAY_SIZE(af9015_rc_keys_a_link),
+ ir_codes_af9015_table_a_link, ARRAY_SIZE(ir_codes_af9015_table_a_link),
af9015_ir_table_a_link, ARRAY_SIZE(af9015_ir_table_a_link) },
{ 0x9b7dc64e,
- af9015_rc_keys_mygictv, ARRAY_SIZE(af9015_rc_keys_mygictv),
+ ir_codes_af9015_table_mygictv, ARRAY_SIZE(ir_codes_af9015_table_mygictv),
af9015_ir_table_mygictv, ARRAY_SIZE(af9015_ir_table_mygictv) },
{ }
};
@@ -836,8 +836,8 @@ static void af9015_set_remote_config(struct usb_device *udev,
} else if (udev->descriptor.idProduct ==
cpu_to_le16(USB_PID_TREKSTOR_DVBT)) {
table = &(const struct af9015_setup){ 0,
- af9015_rc_keys_trekstor,
- ARRAY_SIZE(af9015_rc_keys_trekstor),
+ ir_codes_af9015_table_trekstor,
+ ARRAY_SIZE(ir_codes_af9015_table_trekstor),
af9015_ir_table_trekstor,
ARRAY_SIZE(af9015_ir_table_trekstor)
};
@@ -1297,6 +1297,8 @@ static struct usb_device_id af9015_usb_table[] = {
{USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV20)},
{USB_DEVICE(USB_VID_KWORLD_2, USB_PID_TINYTWIN_2)},
{USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV2000DS)},
+/* 30 */{USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_UB383_T)},
+ {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_4)},
{0},
};
MODULE_DEVICE_TABLE(usb, af9015_usb_table);
@@ -1500,7 +1502,8 @@ static struct dvb_usb_device_properties af9015_properties[] = {
"(VS-DVB-T 395U)",
.cold_ids = {&af9015_usb_table[16],
&af9015_usb_table[17],
- &af9015_usb_table[18], NULL},
+ &af9015_usb_table[18],
+ &af9015_usb_table[31], NULL},
.warm_ids = {NULL},
},
{
@@ -1569,7 +1572,7 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.i2c_algo = &af9015_i2c_algo,
- .num_device_descs = 7, /* max 9 */
+ .num_device_descs = 8, /* max 9 */
.devices = {
{
.name = "AverMedia AVerTV Volar GPS 805 (A805)",
@@ -1608,6 +1611,12 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.cold_ids = {&af9015_usb_table[29], NULL},
.warm_ids = {NULL},
},
+ {
+ .name = "KWorld USB DVB-T Stick Mobile " \
+ "(UB383-T)",
+ .cold_ids = {&af9015_usb_table[30], NULL},
+ .warm_ids = {NULL},
+ },
}
},
};
diff --git a/drivers/media/dvb/dvb-usb/af9015.h b/drivers/media/dvb/dvb-usb/af9015.h
index ef36b183149..63b2a4907b7 100644
--- a/drivers/media/dvb/dvb-usb/af9015.h
+++ b/drivers/media/dvb/dvb-usb/af9015.h
@@ -123,7 +123,7 @@ enum af9015_remote {
/* LeadTek - Y04G0051 */
/* Leadtek WinFast DTV Dongle Gold */
-static struct dvb_usb_rc_key af9015_rc_keys_leadtek[] = {
+static struct dvb_usb_rc_key ir_codes_af9015_table_leadtek[] = {
{ 0x001e, KEY_1 },
{ 0x001f, KEY_2 },
{ 0x0020, KEY_3 },
@@ -227,7 +227,7 @@ static u8 af9015_ir_table_leadtek[] = {
};
/* TwinHan AzureWave AD-TU700(704J) */
-static struct dvb_usb_rc_key af9015_rc_keys_twinhan[] = {
+static struct dvb_usb_rc_key ir_codes_af9015_table_twinhan[] = {
{ 0x053f, KEY_POWER },
{ 0x0019, KEY_FAVORITES }, /* Favorite List */
{ 0x0004, KEY_TEXT }, /* Teletext */
@@ -338,7 +338,7 @@ static u8 af9015_ir_table_twinhan[] = {
};
/* A-Link DTU(m) */
-static struct dvb_usb_rc_key af9015_rc_keys_a_link[] = {
+static struct dvb_usb_rc_key ir_codes_af9015_table_a_link[] = {
{ 0x001e, KEY_1 },
{ 0x001f, KEY_2 },
{ 0x0020, KEY_3 },
@@ -381,7 +381,7 @@ static u8 af9015_ir_table_a_link[] = {
};
/* MSI DIGIVOX mini II V3.0 */
-static struct dvb_usb_rc_key af9015_rc_keys_msi[] = {
+static struct dvb_usb_rc_key ir_codes_af9015_table_msi[] = {
{ 0x001e, KEY_1 },
{ 0x001f, KEY_2 },
{ 0x0020, KEY_3 },
@@ -424,7 +424,7 @@ static u8 af9015_ir_table_msi[] = {
};
/* MYGICTV U718 */
-static struct dvb_usb_rc_key af9015_rc_keys_mygictv[] = {
+static struct dvb_usb_rc_key ir_codes_af9015_table_mygictv[] = {
{ 0x003d, KEY_SWITCHVIDEOMODE },
/* TV / AV */
{ 0x0545, KEY_POWER },
@@ -550,7 +550,7 @@ static u8 af9015_ir_table_kworld[] = {
};
/* AverMedia Volar X */
-static struct dvb_usb_rc_key af9015_rc_keys_avermedia[] = {
+static struct dvb_usb_rc_key ir_codes_af9015_table_avermedia[] = {
{ 0x053d, KEY_PROG1 }, /* SOURCE */
{ 0x0512, KEY_POWER }, /* POWER */
{ 0x051e, KEY_1 }, /* 1 */
@@ -656,7 +656,7 @@ static u8 af9015_ir_table_avermedia_ks[] = {
};
/* Digittrade DVB-T USB Stick */
-static struct dvb_usb_rc_key af9015_rc_keys_digittrade[] = {
+static struct dvb_usb_rc_key ir_codes_af9015_table_digittrade[] = {
{ 0x010f, KEY_LAST }, /* RETURN */
{ 0x0517, KEY_TEXT }, /* TELETEXT */
{ 0x0108, KEY_EPG }, /* EPG */
@@ -719,7 +719,7 @@ static u8 af9015_ir_table_digittrade[] = {
};
/* TREKSTOR DVB-T USB Stick */
-static struct dvb_usb_rc_key af9015_rc_keys_trekstor[] = {
+static struct dvb_usb_rc_key ir_codes_af9015_table_trekstor[] = {
{ 0x0704, KEY_AGAIN }, /* Home */
{ 0x0705, KEY_MUTE }, /* Mute */
{ 0x0706, KEY_UP }, /* Up */
@@ -782,7 +782,7 @@ static u8 af9015_ir_table_trekstor[] = {
};
/* MSI DIGIVOX mini III */
-static struct dvb_usb_rc_key af9015_rc_keys_msi_digivox_iii[] = {
+static struct dvb_usb_rc_key ir_codes_af9015_table_msi_digivox_iii[] = {
{ 0x0713, KEY_POWER }, /* [red power button] */
{ 0x073b, KEY_VIDEO }, /* Source */
{ 0x073e, KEY_ZOOM }, /* Zoom */
diff --git a/drivers/media/dvb/dvb-usb/anysee.c b/drivers/media/dvb/dvb-usb/anysee.c
index bb69f3719f9..faca1ad88a6 100644
--- a/drivers/media/dvb/dvb-usb/anysee.c
+++ b/drivers/media/dvb/dvb-usb/anysee.c
@@ -399,7 +399,7 @@ static int anysee_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
return 0;
}
-static struct dvb_usb_rc_key anysee_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_anysee_table[] = {
{ 0x0100, KEY_0 },
{ 0x0101, KEY_1 },
{ 0x0102, KEY_2 },
@@ -518,8 +518,8 @@ static struct dvb_usb_device_properties anysee_properties = {
}
},
- .rc_key_map = anysee_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(anysee_rc_keys),
+ .rc_key_map = ir_codes_anysee_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_anysee_table),
.rc_query = anysee_rc_query,
.rc_interval = 200, /* windows driver uses 500ms */
diff --git a/drivers/media/dvb/dvb-usb/az6027.c b/drivers/media/dvb/dvb-usb/az6027.c
index d7290b2c091..6681ac1c56e 100644
--- a/drivers/media/dvb/dvb-usb/az6027.c
+++ b/drivers/media/dvb/dvb-usb/az6027.c
@@ -125,12 +125,12 @@ static const struct stb0899_s1_reg az6027_stb0899_s1_init_3[] = {
{ STB0899_RCOMPC , 0xc9 },
{ STB0899_AGC1CN , 0x01 },
{ STB0899_AGC1REF , 0x10 },
- { STB0899_RTC , 0x23 },
+ { STB0899_RTC , 0x23 },
{ STB0899_TMGCFG , 0x4e },
{ STB0899_AGC2REF , 0x34 },
{ STB0899_TLSR , 0x84 },
{ STB0899_CFD , 0xf7 },
- { STB0899_ACLC , 0x87 },
+ { STB0899_ACLC , 0x87 },
{ STB0899_BCLC , 0x94 },
{ STB0899_EQON , 0x41 },
{ STB0899_LDT , 0xf1 },
@@ -183,10 +183,10 @@ static const struct stb0899_s1_reg az6027_stb0899_s1_init_3[] = {
{ STB0899_ECNT3M , 0x0a },
{ STB0899_ECNT3L , 0xad },
{ STB0899_FECAUTO1 , 0x06 },
- { STB0899_FECM , 0x01 },
+ { STB0899_FECM , 0x01 },
{ STB0899_VTH12 , 0xb0 },
{ STB0899_VTH23 , 0x7a },
- { STB0899_VTH34 , 0x58 },
+ { STB0899_VTH34 , 0x58 },
{ STB0899_VTH56 , 0x38 },
{ STB0899_VTH67 , 0x34 },
{ STB0899_VTH78 , 0x24 },
@@ -195,7 +195,7 @@ static const struct stb0899_s1_reg az6027_stb0899_s1_init_3[] = {
{ STB0899_RSULC , 0xb1 }, /* DVB = 0xb1, DSS = 0xa1 */
{ STB0899_TSULC , 0x42 },
{ STB0899_RSLLC , 0x41 },
- { STB0899_TSLPL , 0x12 },
+ { STB0899_TSLPL , 0x12 },
{ STB0899_TSCFGH , 0x0c },
{ STB0899_TSCFGM , 0x00 },
{ STB0899_TSCFGL , 0x00 },
@@ -386,7 +386,7 @@ static int az6027_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
}
/* keys for the enclosed remote control */
-static struct dvb_usb_rc_key az6027_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_az6027_table[] = {
{ 0x01, KEY_1 },
{ 0x02, KEY_2 },
};
@@ -417,11 +417,15 @@ static int az6027_ci_read_attribute_mem(struct dvb_ca_en50221 *ca,
u16 value;
u16 index;
int blen;
- u8 b[12];
+ u8 *b;
if (slot != 0)
return -EINVAL;
+ b = kmalloc(12, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
mutex_lock(&state->ca_mutex);
req = 0xC1;
@@ -438,6 +442,7 @@ static int az6027_ci_read_attribute_mem(struct dvb_ca_en50221 *ca,
}
mutex_unlock(&state->ca_mutex);
+ kfree(b);
return ret;
}
@@ -485,11 +490,15 @@ static int az6027_ci_read_cam_control(struct dvb_ca_en50221 *ca,
u16 value;
u16 index;
int blen;
- u8 b[12];
+ u8 *b;
if (slot != 0)
return -EINVAL;
+ b = kmalloc(12, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
mutex_lock(&state->ca_mutex);
req = 0xC3;
@@ -510,6 +519,7 @@ static int az6027_ci_read_cam_control(struct dvb_ca_en50221 *ca,
}
mutex_unlock(&state->ca_mutex);
+ kfree(b);
return ret;
}
@@ -556,7 +566,11 @@ static int CI_CamReady(struct dvb_ca_en50221 *ca, int slot)
u16 value;
u16 index;
int blen;
- u8 b[12];
+ u8 *b;
+
+ b = kmalloc(12, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
req = 0xC8;
value = 0;
@@ -570,6 +584,7 @@ static int CI_CamReady(struct dvb_ca_en50221 *ca, int slot)
} else{
ret = b[0];
}
+ kfree(b);
return ret;
}
@@ -667,8 +682,11 @@ static int az6027_ci_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int o
u16 value;
u16 index;
int blen;
- u8 b[12];
+ u8 *b;
+ b = kmalloc(12, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
mutex_lock(&state->ca_mutex);
req = 0xC5;
@@ -683,15 +701,13 @@ static int az6027_ci_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int o
} else
ret = 0;
- if (b[0] == 0) {
- ret = 0;
-
- } else if (b[0] == 1) {
+ if (!ret && b[0] == 1) {
ret = DVB_CA_EN50221_POLL_CAM_PRESENT |
DVB_CA_EN50221_POLL_CAM_READY;
}
mutex_unlock(&state->ca_mutex);
+ kfree(b);
return ret;
}
@@ -943,10 +959,16 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
u16 value;
int length;
u8 req;
- u8 data[256];
+ u8 *data;
- if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
+ data = kmalloc(256, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (mutex_lock_interruptible(&d->i2c_mutex) < 0) {
+ kfree(data);
return -EAGAIN;
+ }
if (num > 2)
warn("more than 2 i2c messages at a time is not handled yet. TODO.");
@@ -976,17 +998,14 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
i++;
} else {
- if (msg[i].addr == 0xd0) {
- /* demod 16bit addr */
- req = 0xBD;
- index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff));
- value = msg[i].addr + (2 << 8);
- length = msg[i].len - 2;
- len = msg[i].len - 2;
- for (j = 0; j < len; j++)
- data[j] = msg[i].buf[j + 2];
-
- }
+ /* demod 16bit addr */
+ req = 0xBD;
+ index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff));
+ value = msg[i].addr + (2 << 8);
+ length = msg[i].len - 2;
+ len = msg[i].len - 2;
+ for (j = 0; j < len; j++)
+ data[j] = msg[i].buf[j + 2];
az6027_usb_out_op(d, req, value, index, data, length);
}
}
@@ -1019,6 +1038,7 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
}
}
mutex_unlock(&d->i2c_mutex);
+ kfree(data);
return i;
}
@@ -1039,8 +1059,14 @@ int az6027_identify_state(struct usb_device *udev,
struct dvb_usb_device_description **desc,
int *cold)
{
- u8 b[16];
- s16 ret = usb_control_msg(udev,
+ u8 *b;
+ s16 ret;
+
+ b = kmalloc(16, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
+ ret = usb_control_msg(udev,
usb_rcvctrlpipe(udev, 0),
0xb7,
USB_TYPE_VENDOR | USB_DIR_IN,
@@ -1051,7 +1077,7 @@ int az6027_identify_state(struct usb_device *udev,
USB_CTRL_GET_TIMEOUT);
*cold = ret <= 0;
-
+ kfree(b);
deb_info("cold: %d\n", *cold);
return 0;
}
@@ -1059,8 +1085,10 @@ int az6027_identify_state(struct usb_device *udev,
static struct usb_device_id az6027_usb_table[] = {
{ USB_DEVICE(USB_VID_AZUREWAVE, USB_PID_AZUREWAVE_AZ6027) },
- { USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_DVBS2CI) },
- { USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_USB2_HDCI) },
+ { USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_DVBS2CI_V1) },
+ { USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_DVBS2CI_V2) },
+ { USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_USB2_HDCI_V1) },
+ { USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_USB2_HDCI_V2) },
{ },
};
@@ -1097,18 +1125,34 @@ static struct dvb_usb_device_properties az6027_properties = {
.power_ctrl = az6027_power_ctrl,
.read_mac_address = az6027_read_mac_addr,
*/
- .rc_key_map = az6027_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(az6027_rc_keys),
+ .rc_key_map = ir_codes_az6027_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_az6027_table),
.rc_interval = 400,
.rc_query = az6027_rc_query,
.i2c_algo = &az6027_i2c_algo,
- .num_device_descs = 1,
+ .num_device_descs = 5,
.devices = {
{
.name = "AZUREWAVE DVB-S/S2 USB2.0 (AZ6027)",
.cold_ids = { &az6027_usb_table[0], NULL },
.warm_ids = { NULL },
+ }, {
+ .name = "TERRATEC S7",
+ .cold_ids = { &az6027_usb_table[1], NULL },
+ .warm_ids = { NULL },
+ }, {
+ .name = "TERRATEC S7 MKII",
+ .cold_ids = { &az6027_usb_table[2], NULL },
+ .warm_ids = { NULL },
+ }, {
+ .name = "Technisat SkyStar USB 2 HD CI",
+ .cold_ids = { &az6027_usb_table[3], NULL },
+ .warm_ids = { NULL },
+ }, {
+ .name = "Technisat SkyStar USB 2 HD CI",
+ .cold_ids = { &az6027_usb_table[4], NULL },
+ .warm_ids = { NULL },
},
{ NULL },
}
diff --git a/drivers/media/dvb/dvb-usb/cinergyT2-core.c b/drivers/media/dvb/dvb-usb/cinergyT2-core.c
index e37ac4d4860..5a9c14bdc98 100644
--- a/drivers/media/dvb/dvb-usb/cinergyT2-core.c
+++ b/drivers/media/dvb/dvb-usb/cinergyT2-core.c
@@ -84,7 +84,7 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
return 0;
}
-static struct dvb_usb_rc_key cinergyt2_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_cinergyt2_table[] = {
{ 0x0401, KEY_POWER },
{ 0x0402, KEY_1 },
{ 0x0403, KEY_2 },
@@ -218,8 +218,8 @@ static struct dvb_usb_device_properties cinergyt2_properties = {
.power_ctrl = cinergyt2_power_ctrl,
.rc_interval = 50,
- .rc_key_map = cinergyt2_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(cinergyt2_rc_keys),
+ .rc_key_map = ir_codes_cinergyt2_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_cinergyt2_table),
.rc_query = cinergyt2_rc_query,
.generic_bulk_ctrl_endpoint = 1,
diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
index 960376da7d5..0eb49088916 100644
--- a/drivers/media/dvb/dvb-usb/cxusb.c
+++ b/drivers/media/dvb/dvb-usb/cxusb.c
@@ -461,7 +461,7 @@ static int cxusb_d680_dmb_rc_query(struct dvb_usb_device *d, u32 *event,
return 0;
}
-static struct dvb_usb_rc_key dvico_mce_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_dvico_mce_table[] = {
{ 0xfe02, KEY_TV },
{ 0xfe0e, KEY_MP3 },
{ 0xfe1a, KEY_DVD },
@@ -509,7 +509,7 @@ static struct dvb_usb_rc_key dvico_mce_rc_keys[] = {
{ 0xfe4e, KEY_POWER },
};
-static struct dvb_usb_rc_key dvico_portable_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_dvico_portable_table[] = {
{ 0xfc02, KEY_SETUP }, /* Profile */
{ 0xfc43, KEY_POWER2 },
{ 0xfc06, KEY_EPG },
@@ -548,7 +548,7 @@ static struct dvb_usb_rc_key dvico_portable_rc_keys[] = {
{ 0xfc00, KEY_UNKNOWN }, /* HD */
};
-static struct dvb_usb_rc_key d680_dmb_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_d680_dmb_table[] = {
{ 0x0038, KEY_UNKNOWN }, /* TV/AV */
{ 0x080c, KEY_ZOOM },
{ 0x0800, KEY_0 },
@@ -1025,8 +1025,9 @@ static int cxusb_dualdig4_rev2_frontend_attach(struct dvb_usb_adapter *adap)
cxusb_bluebird_gpio_pulse(adap->dev, 0x02, 1);
- dib7000p_i2c_enumeration(&adap->dev->i2c_adap, 1, 18,
- &cxusb_dualdig4_rev2_config);
+ if (dib7000p_i2c_enumeration(&adap->dev->i2c_adap, 1, 18,
+ &cxusb_dualdig4_rev2_config) < 0)
+ return -ENODEV;
adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x80,
&cxusb_dualdig4_rev2_config);
@@ -1449,8 +1450,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgh064f_properties = {
.i2c_algo = &cxusb_i2c_algo,
.rc_interval = 100,
- .rc_key_map = dvico_portable_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dvico_portable_rc_keys),
+ .rc_key_map = ir_codes_dvico_portable_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dvico_portable_table),
.rc_query = cxusb_rc_query,
.generic_bulk_ctrl_endpoint = 0x01,
@@ -1500,8 +1501,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_dee1601_properties = {
.i2c_algo = &cxusb_i2c_algo,
.rc_interval = 150,
- .rc_key_map = dvico_mce_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dvico_mce_rc_keys),
+ .rc_key_map = ir_codes_dvico_mce_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dvico_mce_table),
.rc_query = cxusb_rc_query,
.generic_bulk_ctrl_endpoint = 0x01,
@@ -1559,8 +1560,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgz201_properties = {
.i2c_algo = &cxusb_i2c_algo,
.rc_interval = 100,
- .rc_key_map = dvico_portable_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dvico_portable_rc_keys),
+ .rc_key_map = ir_codes_dvico_portable_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dvico_portable_table),
.rc_query = cxusb_rc_query,
.generic_bulk_ctrl_endpoint = 0x01,
@@ -1609,8 +1610,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_dtt7579_properties = {
.i2c_algo = &cxusb_i2c_algo,
.rc_interval = 100,
- .rc_key_map = dvico_portable_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dvico_portable_rc_keys),
+ .rc_key_map = ir_codes_dvico_portable_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dvico_portable_table),
.rc_query = cxusb_rc_query,
.generic_bulk_ctrl_endpoint = 0x01,
@@ -1658,8 +1659,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_dualdig4_properties = {
.generic_bulk_ctrl_endpoint = 0x01,
.rc_interval = 100,
- .rc_key_map = dvico_mce_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dvico_mce_rc_keys),
+ .rc_key_map = ir_codes_dvico_mce_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dvico_mce_table),
.rc_query = cxusb_bluebird2_rc_query,
.num_device_descs = 1,
@@ -1706,8 +1707,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_nano2_properties = {
.generic_bulk_ctrl_endpoint = 0x01,
.rc_interval = 100,
- .rc_key_map = dvico_portable_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dvico_portable_rc_keys),
+ .rc_key_map = ir_codes_dvico_portable_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dvico_portable_table),
.rc_query = cxusb_bluebird2_rc_query,
.num_device_descs = 1,
@@ -1756,8 +1757,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_nano2_needsfirmware_prope
.generic_bulk_ctrl_endpoint = 0x01,
.rc_interval = 100,
- .rc_key_map = dvico_portable_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dvico_portable_rc_keys),
+ .rc_key_map = ir_codes_dvico_portable_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dvico_portable_table),
.rc_query = cxusb_rc_query,
.num_device_descs = 1,
@@ -1847,8 +1848,8 @@ struct dvb_usb_device_properties cxusb_bluebird_dualdig4_rev2_properties = {
.generic_bulk_ctrl_endpoint = 0x01,
.rc_interval = 100,
- .rc_key_map = dvico_mce_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dvico_mce_rc_keys),
+ .rc_key_map = ir_codes_dvico_mce_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dvico_mce_table),
.rc_query = cxusb_rc_query,
.num_device_descs = 1,
@@ -1895,8 +1896,8 @@ static struct dvb_usb_device_properties cxusb_d680_dmb_properties = {
.generic_bulk_ctrl_endpoint = 0x01,
.rc_interval = 100,
- .rc_key_map = d680_dmb_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(d680_dmb_rc_keys),
+ .rc_key_map = ir_codes_d680_dmb_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_d680_dmb_table),
.rc_query = cxusb_d680_dmb_rc_query,
.num_device_descs = 1,
@@ -1944,8 +1945,8 @@ static struct dvb_usb_device_properties cxusb_mygica_d689_properties = {
.generic_bulk_ctrl_endpoint = 0x01,
.rc_interval = 100,
- .rc_key_map = d680_dmb_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(d680_dmb_rc_keys),
+ .rc_key_map = ir_codes_d680_dmb_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_d680_dmb_table),
.rc_query = cxusb_d680_dmb_rc_query,
.num_device_descs = 1,
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index 34eab05afc6..800800a9649 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -562,7 +562,7 @@ static int dib0700_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
return 0;
}
-static struct dvb_usb_rc_key dib0700_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_dib0700_table[] = {
/* Key codes for the tiny Pinnacle remote*/
{ 0x0700, KEY_MUTE },
{ 0x0701, KEY_MENU }, /* Pinnacle logo */
@@ -794,6 +794,43 @@ static struct dvb_usb_rc_key dib0700_rc_keys[] = {
{ 0x7a13, KEY_VOLUMEDOWN },
{ 0x7a40, KEY_POWER },
{ 0x7a41, KEY_MUTE },
+
+ /* Key codes for the Elgato EyeTV Diversity silver remote,
+ set dvb_usb_dib0700_ir_proto=0 */
+ { 0x4501, KEY_POWER },
+ { 0x4502, KEY_MUTE },
+ { 0x4503, KEY_1 },
+ { 0x4504, KEY_2 },
+ { 0x4505, KEY_3 },
+ { 0x4506, KEY_4 },
+ { 0x4507, KEY_5 },
+ { 0x4508, KEY_6 },
+ { 0x4509, KEY_7 },
+ { 0x450a, KEY_8 },
+ { 0x450b, KEY_9 },
+ { 0x450c, KEY_LAST },
+ { 0x450d, KEY_0 },
+ { 0x450e, KEY_ENTER },
+ { 0x450f, KEY_RED },
+ { 0x4510, KEY_CHANNELUP },
+ { 0x4511, KEY_GREEN },
+ { 0x4512, KEY_VOLUMEDOWN },
+ { 0x4513, KEY_OK },
+ { 0x4514, KEY_VOLUMEUP },
+ { 0x4515, KEY_YELLOW },
+ { 0x4516, KEY_CHANNELDOWN },
+ { 0x4517, KEY_BLUE },
+ { 0x4518, KEY_LEFT }, /* Skip backwards */
+ { 0x4519, KEY_PLAYPAUSE },
+ { 0x451a, KEY_RIGHT }, /* Skip forward */
+ { 0x451b, KEY_REWIND },
+ { 0x451c, KEY_L }, /* Live */
+ { 0x451d, KEY_FASTFORWARD },
+ { 0x451e, KEY_STOP }, /* 'Reveal' for Teletext */
+ { 0x451f, KEY_MENU }, /* KEY_TEXT for Teletext */
+ { 0x4540, KEY_RECORD }, /* Font 'Size' for Teletext */
+ { 0x4541, KEY_SCREEN }, /* Full screen toggle, 'Hold' for Teletext */
+ { 0x4542, KEY_SELECT }, /* Select video input, 'Select' for Teletext */
};
/* STK7700P: Hauppauge Nova-T Stick, AVerMedia Volar */
@@ -2049,6 +2086,7 @@ struct usb_device_id dib0700_usb_id_table[] = {
/* 65 */{ USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV73ESE) },
{ USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV282E) },
{ USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK8096GP) },
+ { USB_DEVICE(USB_VID_ELGATO, USB_PID_ELGATO_EYETV_DIVERSITY) },
{ 0 } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table);
@@ -2131,8 +2169,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -2160,8 +2198,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -2214,8 +2252,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -2251,8 +2289,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -2321,8 +2359,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -2360,8 +2398,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -2393,7 +2431,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
}
},
- .num_device_descs = 6,
+ .num_device_descs = 7,
.devices = {
{ "DiBcom STK7070PD reference design",
{ &dib0700_usb_id_table[17], NULL },
@@ -2419,11 +2457,15 @@ struct dvb_usb_device_properties dib0700_devices[] = {
{ "Sony PlayTV",
{ &dib0700_usb_id_table[44], NULL },
{ NULL },
- }
+ },
+ { "Elgato EyeTV Diversity",
+ { &dib0700_usb_id_table[68], NULL },
+ { NULL },
+ },
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -2484,8 +2526,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
.num_adapters = 1,
@@ -2513,8 +2555,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
.num_adapters = 1,
@@ -2574,8 +2616,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
.num_adapters = 1,
@@ -2612,8 +2654,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -2656,8 +2698,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
.num_adapters = 1,
@@ -2687,8 +2729,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
},
};
diff --git a/drivers/media/dvb/dvb-usb/dibusb-common.c b/drivers/media/dvb/dvb-usb/dibusb-common.c
index 9143b5631e8..bc08bc0b723 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-common.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-common.c
@@ -327,7 +327,7 @@ EXPORT_SYMBOL(dibusb_dib3000mc_tuner_attach);
/*
* common remote control stuff
*/
-struct dvb_usb_rc_key dibusb_rc_keys[] = {
+struct dvb_usb_rc_key ir_codes_dibusb_table[] = {
/* Key codes for the little Artec T1/Twinhan/HAMA/ remote. */
{ 0x0016, KEY_POWER },
{ 0x0010, KEY_MUTE },
@@ -456,7 +456,7 @@ struct dvb_usb_rc_key dibusb_rc_keys[] = {
{ 0x804e, KEY_ENTER },
{ 0x804f, KEY_VOLUMEDOWN },
};
-EXPORT_SYMBOL(dibusb_rc_keys);
+EXPORT_SYMBOL(ir_codes_dibusb_table);
int dibusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
{
diff --git a/drivers/media/dvb/dvb-usb/dibusb-mb.c b/drivers/media/dvb/dvb-usb/dibusb-mb.c
index 5c0126dc1ff..eb2e6f050fb 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-mb.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-mb.c
@@ -212,7 +212,7 @@ static struct dvb_usb_device_properties dibusb1_1_properties = {
.power_ctrl = dibusb_power_ctrl,
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dibusb_rc_keys,
+ .rc_key_map = ir_codes_dibusb_table,
.rc_key_map_size = 111, /* wow, that is ugly ... I want to load it to the driver dynamically */
.rc_query = dibusb_rc_query,
@@ -296,7 +296,7 @@ static struct dvb_usb_device_properties dibusb1_1_an2235_properties = {
.power_ctrl = dibusb_power_ctrl,
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dibusb_rc_keys,
+ .rc_key_map = ir_codes_dibusb_table,
.rc_key_map_size = 111, /* wow, that is ugly ... I want to load it to the driver dynamically */
.rc_query = dibusb_rc_query,
@@ -360,7 +360,7 @@ static struct dvb_usb_device_properties dibusb2_0b_properties = {
.power_ctrl = dibusb2_0_power_ctrl,
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dibusb_rc_keys,
+ .rc_key_map = ir_codes_dibusb_table,
.rc_key_map_size = 111, /* wow, that is ugly ... I want to load it to the driver dynamically */
.rc_query = dibusb_rc_query,
@@ -417,7 +417,7 @@ static struct dvb_usb_device_properties artec_t1_usb2_properties = {
.power_ctrl = dibusb2_0_power_ctrl,
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dibusb_rc_keys,
+ .rc_key_map = ir_codes_dibusb_table,
.rc_key_map_size = 111, /* wow, that is ugly ... I want to load it to the driver dynamically */
.rc_query = dibusb_rc_query,
diff --git a/drivers/media/dvb/dvb-usb/dibusb-mc.c b/drivers/media/dvb/dvb-usb/dibusb-mc.c
index a05b9f87566..588308eb663 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-mc.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-mc.c
@@ -82,7 +82,7 @@ static struct dvb_usb_device_properties dibusb_mc_properties = {
.power_ctrl = dibusb2_0_power_ctrl,
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dibusb_rc_keys,
+ .rc_key_map = ir_codes_dibusb_table,
.rc_key_map_size = 111, /* FIXME */
.rc_query = dibusb_rc_query,
diff --git a/drivers/media/dvb/dvb-usb/dibusb.h b/drivers/media/dvb/dvb-usb/dibusb.h
index 8e847aa73ba..3d50ac59088 100644
--- a/drivers/media/dvb/dvb-usb/dibusb.h
+++ b/drivers/media/dvb/dvb-usb/dibusb.h
@@ -124,7 +124,7 @@ extern int dibusb2_0_power_ctrl(struct dvb_usb_device *, int);
#define DEFAULT_RC_INTERVAL 150
//#define DEFAULT_RC_INTERVAL 100000
-extern struct dvb_usb_rc_key dibusb_rc_keys[];
+extern struct dvb_usb_rc_key ir_codes_dibusb_table[];
extern int dibusb_rc_query(struct dvb_usb_device *, u32 *, int *);
extern int dibusb_read_eeprom_byte(struct dvb_usb_device *, u8, u8 *);
diff --git a/drivers/media/dvb/dvb-usb/digitv.c b/drivers/media/dvb/dvb-usb/digitv.c
index 955147d0075..e826077094f 100644
--- a/drivers/media/dvb/dvb-usb/digitv.c
+++ b/drivers/media/dvb/dvb-usb/digitv.c
@@ -161,7 +161,7 @@ static int digitv_tuner_attach(struct dvb_usb_adapter *adap)
return 0;
}
-static struct dvb_usb_rc_key digitv_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_digitv_table[] = {
{ 0x5f55, KEY_0 },
{ 0x6f55, KEY_1 },
{ 0x9f55, KEY_2 },
@@ -311,8 +311,8 @@ static struct dvb_usb_device_properties digitv_properties = {
.identify_state = digitv_identify_state,
.rc_interval = 1000,
- .rc_key_map = digitv_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(digitv_rc_keys),
+ .rc_key_map = ir_codes_digitv_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_digitv_table),
.rc_query = digitv_rc_query,
.i2c_algo = &digitv_i2c_algo,
diff --git a/drivers/media/dvb/dvb-usb/dtt200u.c b/drivers/media/dvb/dvb-usb/dtt200u.c
index a1b12b01cbe..f57e59044d4 100644
--- a/drivers/media/dvb/dvb-usb/dtt200u.c
+++ b/drivers/media/dvb/dvb-usb/dtt200u.c
@@ -57,7 +57,7 @@ static int dtt200u_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid,
/* remote control */
/* key list for the tiny remote control (Yakumo, don't know about the others) */
-static struct dvb_usb_rc_key dtt200u_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_dtt200u_table[] = {
{ 0x8001, KEY_MUTE },
{ 0x8002, KEY_CHANNELDOWN },
{ 0x8003, KEY_VOLUMEDOWN },
@@ -162,8 +162,8 @@ static struct dvb_usb_device_properties dtt200u_properties = {
.power_ctrl = dtt200u_power_ctrl,
.rc_interval = 300,
- .rc_key_map = dtt200u_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dtt200u_rc_keys),
+ .rc_key_map = ir_codes_dtt200u_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dtt200u_table),
.rc_query = dtt200u_rc_query,
.generic_bulk_ctrl_endpoint = 0x01,
@@ -207,8 +207,8 @@ static struct dvb_usb_device_properties wt220u_properties = {
.power_ctrl = dtt200u_power_ctrl,
.rc_interval = 300,
- .rc_key_map = dtt200u_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dtt200u_rc_keys),
+ .rc_key_map = ir_codes_dtt200u_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dtt200u_table),
.rc_query = dtt200u_rc_query,
.generic_bulk_ctrl_endpoint = 0x01,
@@ -252,8 +252,8 @@ static struct dvb_usb_device_properties wt220u_fc_properties = {
.power_ctrl = dtt200u_power_ctrl,
.rc_interval = 300,
- .rc_key_map = dtt200u_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dtt200u_rc_keys),
+ .rc_key_map = ir_codes_dtt200u_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dtt200u_table),
.rc_query = dtt200u_rc_query,
.generic_bulk_ctrl_endpoint = 0x01,
@@ -297,8 +297,8 @@ static struct dvb_usb_device_properties wt220u_zl0353_properties = {
.power_ctrl = dtt200u_power_ctrl,
.rc_interval = 300,
- .rc_key_map = dtt200u_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dtt200u_rc_keys),
+ .rc_key_map = ir_codes_dtt200u_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dtt200u_table),
.rc_query = dtt200u_rc_query,
.generic_bulk_ctrl_endpoint = 0x01,
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
index ae8b57acfe0..085c4e457e0 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
@@ -124,9 +124,11 @@
#define USB_PID_KWORLD_395U 0xe396
#define USB_PID_KWORLD_395U_2 0xe39b
#define USB_PID_KWORLD_395U_3 0xe395
+#define USB_PID_KWORLD_395U_4 0xe39a
#define USB_PID_KWORLD_MC810 0xc810
#define USB_PID_KWORLD_PC160_2T 0xc160
#define USB_PID_KWORLD_PC160_T 0xc161
+#define USB_PID_KWORLD_UB383_T 0xe383
#define USB_PID_KWORLD_VSTREAM_COLD 0x17de
#define USB_PID_KWORLD_VSTREAM_WARM 0x17df
#define USB_PID_TERRATEC_CINERGY_T_USB_XE 0x0055
@@ -288,6 +290,7 @@
#define USB_PID_MSI_DIGI_VOX_MINI_III 0x8807
#define USB_PID_SONY_PLAYTV 0x0003
#define USB_PID_MYGICA_D689 0xd811
+#define USB_PID_ELGATO_EYETV_DIVERSITY 0x0011
#define USB_PID_ELGATO_EYETV_DTT 0x0021
#define USB_PID_ELGATO_EYETV_DTT_Dlx 0x0020
#define USB_PID_DVB_T_USB_STICK_HIGH_SPEED_COLD 0x5000
@@ -296,6 +299,8 @@
#define USB_PID_TVWAY_PLUS 0x0002
#define USB_PID_SVEON_STV20 0xe39d
#define USB_PID_AZUREWAVE_AZ6027 0x3275
-#define USB_PID_TERRATEC_DVBS2CI 0x3275
-#define USB_PID_TECHNISAT_USB2_HDCI 0x0002
+#define USB_PID_TERRATEC_DVBS2CI_V1 0x10a4
+#define USB_PID_TERRATEC_DVBS2CI_V2 0x10ac
+#define USB_PID_TECHNISAT_USB2_HDCI_V1 0x0001
+#define USB_PID_TECHNISAT_USB2_HDCI_V2 0x0002
#endif
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-urb.c b/drivers/media/dvb/dvb-usb/dvb-usb-urb.c
index 6fe71c6745e..bb46ba6a357 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-urb.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-urb.c
@@ -42,6 +42,8 @@ int dvb_usb_generic_rw(struct dvb_usb_device *d, u8 *wbuf, u16 wlen, u8 *rbuf,
msleep(delay_ms);
ret = usb_bulk_msg(d->udev,usb_rcvbulkpipe(d->udev,
+ d->props.generic_bulk_ctrl_endpoint_response ?
+ d->props.generic_bulk_ctrl_endpoint_response :
d->props.generic_bulk_ctrl_endpoint),rbuf,rlen,&actlen,
2000);
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb.h b/drivers/media/dvb/dvb-usb/dvb-usb.h
index 0143aef19ec..4a9f676087b 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb.h
@@ -198,6 +198,12 @@ struct dvb_usb_adapter_properties {
* is non-zero, one can use dvb_usb_generic_rw and dvb_usb_generic_write-
* helper functions.
*
+ * @generic_bulk_ctrl_endpoint_response: some DVB USB devices use a separate
+ * endpoint for responses to control messages sent with bulk transfers via
+ * the generic_bulk_ctrl_endpoint. When this is non-zero, this will be used
+ * instead of the generic_bulk_ctrl_endpoint when reading usb responses in
+ * the dvb_usb_generic_rw helper function.
+ *
* @num_device_descs: number of struct dvb_usb_device_description in @devices
* @devices: array of struct dvb_usb_device_description compatibles with these
* properties.
@@ -239,6 +245,7 @@ struct dvb_usb_device_properties {
struct i2c_algorithm *i2c_algo;
int generic_bulk_ctrl_endpoint;
+ int generic_bulk_ctrl_endpoint_response;
int num_device_descs;
struct dvb_usb_device_description devices[12];
diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
index accc65509b0..e8fb8538067 100644
--- a/drivers/media/dvb/dvb-usb/dw2102.c
+++ b/drivers/media/dvb/dvb-usb/dw2102.c
@@ -73,7 +73,7 @@
"Please see linux/Documentation/dvb/ for more details " \
"on firmware-problems."
-struct dvb_usb_rc_keys_table {
+struct ir_codes_dvb_usb_table_table {
struct dvb_usb_rc_key *rc_keys;
int rc_keys_size;
};
@@ -948,7 +948,7 @@ static int dw3101_tuner_attach(struct dvb_usb_adapter *adap)
return 0;
}
-static struct dvb_usb_rc_key dw210x_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_dw210x_table[] = {
{ 0xf80a, KEY_Q }, /*power*/
{ 0xf80c, KEY_M }, /*mute*/
{ 0xf811, KEY_1 },
@@ -982,7 +982,7 @@ static struct dvb_usb_rc_key dw210x_rc_keys[] = {
{ 0xf81b, KEY_B }, /*recall*/
};
-static struct dvb_usb_rc_key tevii_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_tevii_table[] = {
{ 0xf80a, KEY_POWER },
{ 0xf80c, KEY_MUTE },
{ 0xf811, KEY_1 },
@@ -1032,7 +1032,7 @@ static struct dvb_usb_rc_key tevii_rc_keys[] = {
{ 0xf858, KEY_SWITCHVIDEOMODE },
};
-static struct dvb_usb_rc_key tbs_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_tbs_table[] = {
{ 0xf884, KEY_POWER },
{ 0xf894, KEY_MUTE },
{ 0xf887, KEY_1 },
@@ -1067,10 +1067,10 @@ static struct dvb_usb_rc_key tbs_rc_keys[] = {
{ 0xf89b, KEY_MODE }
};
-static struct dvb_usb_rc_keys_table keys_tables[] = {
- { dw210x_rc_keys, ARRAY_SIZE(dw210x_rc_keys) },
- { tevii_rc_keys, ARRAY_SIZE(tevii_rc_keys) },
- { tbs_rc_keys, ARRAY_SIZE(tbs_rc_keys) },
+static struct ir_codes_dvb_usb_table_table keys_tables[] = {
+ { ir_codes_dw210x_table, ARRAY_SIZE(ir_codes_dw210x_table) },
+ { ir_codes_tevii_table, ARRAY_SIZE(ir_codes_tevii_table) },
+ { ir_codes_tbs_table, ARRAY_SIZE(ir_codes_tbs_table) },
};
static int dw2102_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
@@ -1185,14 +1185,14 @@ static int dw2102_load_firmware(struct usb_device *dev,
/* init registers */
switch (dev->descriptor.idProduct) {
case USB_PID_PROF_1100:
- s6x0_properties.rc_key_map = tbs_rc_keys;
+ s6x0_properties.rc_key_map = ir_codes_tbs_table;
s6x0_properties.rc_key_map_size =
- ARRAY_SIZE(tbs_rc_keys);
+ ARRAY_SIZE(ir_codes_tbs_table);
break;
case USB_PID_TEVII_S650:
- dw2104_properties.rc_key_map = tevii_rc_keys;
+ dw2104_properties.rc_key_map = ir_codes_tevii_table;
dw2104_properties.rc_key_map_size =
- ARRAY_SIZE(tevii_rc_keys);
+ ARRAY_SIZE(ir_codes_tevii_table);
case USB_PID_DW2104:
reset = 1;
dw210x_op_rw(dev, 0xc4, 0x0000, 0, &reset, 1,
@@ -1255,8 +1255,8 @@ static struct dvb_usb_device_properties dw2102_properties = {
.no_reconnect = 1,
.i2c_algo = &dw2102_serit_i2c_algo,
- .rc_key_map = dw210x_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dw210x_rc_keys),
+ .rc_key_map = ir_codes_dw210x_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dw210x_table),
.rc_interval = 150,
.rc_query = dw2102_rc_query,
@@ -1306,8 +1306,8 @@ static struct dvb_usb_device_properties dw2104_properties = {
.no_reconnect = 1,
.i2c_algo = &dw2104_i2c_algo,
- .rc_key_map = dw210x_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dw210x_rc_keys),
+ .rc_key_map = ir_codes_dw210x_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dw210x_table),
.rc_interval = 150,
.rc_query = dw2102_rc_query,
@@ -1353,8 +1353,8 @@ static struct dvb_usb_device_properties dw3101_properties = {
.no_reconnect = 1,
.i2c_algo = &dw3101_i2c_algo,
- .rc_key_map = dw210x_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dw210x_rc_keys),
+ .rc_key_map = ir_codes_dw210x_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dw210x_table),
.rc_interval = 150,
.rc_query = dw2102_rc_query,
@@ -1396,8 +1396,8 @@ static struct dvb_usb_device_properties s6x0_properties = {
.no_reconnect = 1,
.i2c_algo = &s6x0_i2c_algo,
- .rc_key_map = tevii_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(tevii_rc_keys),
+ .rc_key_map = ir_codes_tevii_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_tevii_table),
.rc_interval = 150,
.rc_query = dw2102_rc_query,
@@ -1459,8 +1459,8 @@ static int dw2102_probe(struct usb_interface *intf,
/* fill only different fields */
p7500->firmware = "dvb-usb-p7500.fw";
p7500->devices[0] = d7500;
- p7500->rc_key_map = tbs_rc_keys;
- p7500->rc_key_map_size = ARRAY_SIZE(tbs_rc_keys);
+ p7500->rc_key_map = ir_codes_tbs_table;
+ p7500->rc_key_map_size = ARRAY_SIZE(ir_codes_tbs_table);
p7500->adapter->frontend_attach = prof_7500_frontend_attach;
if (0 == dvb_usb_device_init(intf, &dw2102_properties,
diff --git a/drivers/media/dvb/dvb-usb/friio-fe.c b/drivers/media/dvb/dvb-usb/friio-fe.c
index d14bd227b50..93c21ddd0b7 100644
--- a/drivers/media/dvb/dvb-usb/friio-fe.c
+++ b/drivers/media/dvb/dvb-usb/friio-fe.c
@@ -300,7 +300,7 @@ static int jdvbt90502_set_frontend(struct dvb_frontend *fe,
struct dvb_frontend_parameters *p)
{
/**
- * NOTE: ignore all the paramters except frequency.
+ * NOTE: ignore all the parameters except frequency.
* others should be fixed to the proper value for ISDB-T,
* but don't check here.
*/
diff --git a/drivers/media/dvb/dvb-usb/gp8psk.c b/drivers/media/dvb/dvb-usb/gp8psk.c
index afb444db43a..45106ac4967 100644
--- a/drivers/media/dvb/dvb-usb/gp8psk.c
+++ b/drivers/media/dvb/dvb-usb/gp8psk.c
@@ -105,6 +105,10 @@ static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d)
ptr = fw->data;
buf = kmalloc(64, GFP_KERNEL | GFP_DMA);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out_rel_fw;
+ }
while (ptr[0] != 0xff) {
u16 buflen = ptr[0] + 4;
diff --git a/drivers/media/dvb/dvb-usb/m920x.c b/drivers/media/dvb/dvb-usb/m920x.c
index 737ffa36ac9..c211fef45fc 100644
--- a/drivers/media/dvb/dvb-usb/m920x.c
+++ b/drivers/media/dvb/dvb-usb/m920x.c
@@ -589,7 +589,7 @@ static struct m920x_inits pinnacle310e_init[] = {
};
/* ir keymaps */
-static struct dvb_usb_rc_key megasky_rc_keys [] = {
+static struct dvb_usb_rc_key ir_codes_megasky_table [] = {
{ 0x0012, KEY_POWER },
{ 0x001e, KEY_CYCLEWINDOWS }, /* min/max */
{ 0x0002, KEY_CHANNELUP },
@@ -608,7 +608,7 @@ static struct dvb_usb_rc_key megasky_rc_keys [] = {
{ 0x000e, KEY_COFFEE }, /* "MTS" */
};
-static struct dvb_usb_rc_key tvwalkertwin_rc_keys [] = {
+static struct dvb_usb_rc_key ir_codes_tvwalkertwin_table [] = {
{ 0x0001, KEY_ZOOM }, /* Full Screen */
{ 0x0002, KEY_CAMERA }, /* snapshot */
{ 0x0003, KEY_MUTE },
@@ -628,7 +628,7 @@ static struct dvb_usb_rc_key tvwalkertwin_rc_keys [] = {
{ 0x001e, KEY_VOLUMEUP },
};
-static struct dvb_usb_rc_key pinnacle310e_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_pinnacle310e_table[] = {
{ 0x16, KEY_POWER },
{ 0x17, KEY_FAVORITES },
{ 0x0f, KEY_TEXT },
@@ -785,8 +785,8 @@ static struct dvb_usb_device_properties megasky_properties = {
.download_firmware = m920x_firmware_download,
.rc_interval = 100,
- .rc_key_map = megasky_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(megasky_rc_keys),
+ .rc_key_map = ir_codes_megasky_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_megasky_table),
.rc_query = m920x_rc_query,
.size_of_priv = sizeof(struct m920x_state),
@@ -886,8 +886,8 @@ static struct dvb_usb_device_properties tvwalkertwin_properties = {
.download_firmware = m920x_firmware_download,
.rc_interval = 100,
- .rc_key_map = tvwalkertwin_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(tvwalkertwin_rc_keys),
+ .rc_key_map = ir_codes_tvwalkertwin_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_tvwalkertwin_table),
.rc_query = m920x_rc_query,
.size_of_priv = sizeof(struct m920x_state),
@@ -993,8 +993,8 @@ static struct dvb_usb_device_properties pinnacle_pctv310e_properties = {
.download_firmware = NULL,
.rc_interval = 100,
- .rc_key_map = pinnacle310e_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(pinnacle310e_rc_keys),
+ .rc_key_map = ir_codes_pinnacle310e_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_pinnacle310e_table),
.rc_query = m920x_rc_query,
.size_of_priv = sizeof(struct m920x_state),
diff --git a/drivers/media/dvb/dvb-usb/nova-t-usb2.c b/drivers/media/dvb/dvb-usb/nova-t-usb2.c
index b41d66ef832..d195a587cc6 100644
--- a/drivers/media/dvb/dvb-usb/nova-t-usb2.c
+++ b/drivers/media/dvb/dvb-usb/nova-t-usb2.c
@@ -21,7 +21,7 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
#define deb_ee(args...) dprintk(debug,0x02,args)
/* Hauppauge NOVA-T USB2 keys */
-static struct dvb_usb_rc_key haupp_rc_keys [] = {
+static struct dvb_usb_rc_key ir_codes_haupp_table [] = {
{ 0x1e00, KEY_0 },
{ 0x1e01, KEY_1 },
{ 0x1e02, KEY_2 },
@@ -91,14 +91,14 @@ static int nova_t_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
deb_rc("raw key code 0x%02x, 0x%02x, 0x%02x to c: %02x d: %02x toggle: %d\n",key[1],key[2],key[3],custom,data,toggle);
- for (i = 0; i < ARRAY_SIZE(haupp_rc_keys); i++) {
- if (rc5_data(&haupp_rc_keys[i]) == data &&
- rc5_custom(&haupp_rc_keys[i]) == custom) {
+ for (i = 0; i < ARRAY_SIZE(ir_codes_haupp_table); i++) {
+ if (rc5_data(&ir_codes_haupp_table[i]) == data &&
+ rc5_custom(&ir_codes_haupp_table[i]) == custom) {
- deb_rc("c: %x, d: %x\n", rc5_data(&haupp_rc_keys[i]),
- rc5_custom(&haupp_rc_keys[i]));
+ deb_rc("c: %x, d: %x\n", rc5_data(&ir_codes_haupp_table[i]),
+ rc5_custom(&ir_codes_haupp_table[i]));
- *event = haupp_rc_keys[i].event;
+ *event = ir_codes_haupp_table[i].event;
*state = REMOTE_KEY_PRESSED;
if (st->old_toggle == toggle) {
if (st->last_repeat_count++ < 2)
@@ -196,8 +196,8 @@ static struct dvb_usb_device_properties nova_t_properties = {
.read_mac_address = nova_t_read_mac_address,
.rc_interval = 100,
- .rc_key_map = haupp_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(haupp_rc_keys),
+ .rc_key_map = ir_codes_haupp_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_haupp_table),
.rc_query = nova_t_rc_query,
.i2c_algo = &dibusb_i2c_algo,
diff --git a/drivers/media/dvb/dvb-usb/opera1.c b/drivers/media/dvb/dvb-usb/opera1.c
index 830557696ae..dfb81ff1d9a 100644
--- a/drivers/media/dvb/dvb-usb/opera1.c
+++ b/drivers/media/dvb/dvb-usb/opera1.c
@@ -35,7 +35,7 @@
struct opera1_state {
u32 last_key_pressed;
};
-struct opera_rc_keys {
+struct ir_codes_opera_table {
u32 keycode;
u32 event;
};
@@ -331,7 +331,7 @@ static int opera1_pid_filter_control(struct dvb_usb_adapter *adap, int onoff)
return 0;
}
-static struct dvb_usb_rc_key opera1_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_opera1_table[] = {
{0x5fa0, KEY_1},
{0x51af, KEY_2},
{0x5da2, KEY_3},
@@ -404,12 +404,12 @@ static int opera1_rc_query(struct dvb_usb_device *dev, u32 * event, int *state)
send_key = (send_key & 0xffff) | 0x0100;
- for (i = 0; i < ARRAY_SIZE(opera1_rc_keys); i++) {
- if (rc5_scan(&opera1_rc_keys[i]) == (send_key & 0xffff)) {
+ for (i = 0; i < ARRAY_SIZE(ir_codes_opera1_table); i++) {
+ if (rc5_scan(&ir_codes_opera1_table[i]) == (send_key & 0xffff)) {
*state = REMOTE_KEY_PRESSED;
- *event = opera1_rc_keys[i].event;
+ *event = ir_codes_opera1_table[i].event;
opst->last_key_pressed =
- opera1_rc_keys[i].event;
+ ir_codes_opera1_table[i].event;
break;
}
opst->last_key_pressed = 0;
@@ -498,8 +498,8 @@ static struct dvb_usb_device_properties opera1_properties = {
.power_ctrl = opera1_power_ctrl,
.i2c_algo = &opera1_i2c_algo,
- .rc_key_map = opera1_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(opera1_rc_keys),
+ .rc_key_map = ir_codes_opera1_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_opera1_table),
.rc_interval = 200,
.rc_query = opera1_rc_query,
.read_mac_address = opera1_read_mac_address,
diff --git a/drivers/media/dvb/dvb-usb/usb-urb.c b/drivers/media/dvb/dvb-usb/usb-urb.c
index f9702e3756b..86d68933b6b 100644
--- a/drivers/media/dvb/dvb-usb/usb-urb.c
+++ b/drivers/media/dvb/dvb-usb/usb-urb.c
@@ -96,8 +96,9 @@ static int usb_free_stream_buffers(struct usb_data_stream *stream)
while (stream->buf_num) {
stream->buf_num--;
deb_mem("freeing buffer %d\n",stream->buf_num);
- usb_buffer_free(stream->udev, stream->buf_size,
- stream->buf_list[stream->buf_num], stream->dma_addr[stream->buf_num]);
+ usb_free_coherent(stream->udev, stream->buf_size,
+ stream->buf_list[stream->buf_num],
+ stream->dma_addr[stream->buf_num]);
}
}
@@ -116,7 +117,7 @@ static int usb_allocate_stream_buffers(struct usb_data_stream *stream, int num,
for (stream->buf_num = 0; stream->buf_num < num; stream->buf_num++) {
deb_mem("allocating buffer %d\n",stream->buf_num);
if (( stream->buf_list[stream->buf_num] =
- usb_buffer_alloc(stream->udev, size, GFP_ATOMIC,
+ usb_alloc_coherent(stream->udev, size, GFP_ATOMIC,
&stream->dma_addr[stream->buf_num]) ) == NULL) {
deb_mem("not enough memory for urb-buffer allocation.\n");
usb_free_stream_buffers(stream);
diff --git a/drivers/media/dvb/dvb-usb/vp702x.c b/drivers/media/dvb/dvb-usb/vp702x.c
index ef4e37d9c5f..4d332451653 100644
--- a/drivers/media/dvb/dvb-usb/vp702x.c
+++ b/drivers/media/dvb/dvb-usb/vp702x.c
@@ -174,7 +174,7 @@ static int vp702x_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
}
/* keys for the enclosed remote control */
-static struct dvb_usb_rc_key vp702x_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_vp702x_table[] = {
{ 0x0001, KEY_1 },
{ 0x0002, KEY_2 },
};
@@ -197,10 +197,10 @@ static int vp702x_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
return 0;
}
- for (i = 0; i < ARRAY_SIZE(vp702x_rc_keys); i++)
- if (rc5_custom(&vp702x_rc_keys[i]) == key[1]) {
+ for (i = 0; i < ARRAY_SIZE(ir_codes_vp702x_table); i++)
+ if (rc5_custom(&ir_codes_vp702x_table[i]) == key[1]) {
*state = REMOTE_KEY_PRESSED;
- *event = vp702x_rc_keys[i].event;
+ *event = ir_codes_vp702x_table[i].event;
break;
}
return 0;
@@ -283,8 +283,8 @@ static struct dvb_usb_device_properties vp702x_properties = {
},
.read_mac_address = vp702x_read_mac_addr,
- .rc_key_map = vp702x_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(vp702x_rc_keys),
+ .rc_key_map = ir_codes_vp702x_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_vp702x_table),
.rc_interval = 400,
.rc_query = vp702x_rc_query,
diff --git a/drivers/media/dvb/dvb-usb/vp7045.c b/drivers/media/dvb/dvb-usb/vp7045.c
index a59faa27912..036893fa448 100644
--- a/drivers/media/dvb/dvb-usb/vp7045.c
+++ b/drivers/media/dvb/dvb-usb/vp7045.c
@@ -99,7 +99,7 @@ static int vp7045_power_ctrl(struct dvb_usb_device *d, int onoff)
/* The keymapping struct. Somehow this should be loaded to the driver, but
* currently it is hardcoded. */
-static struct dvb_usb_rc_key vp7045_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_vp7045_table[] = {
{ 0x0016, KEY_POWER },
{ 0x0010, KEY_MUTE },
{ 0x0003, KEY_1 },
@@ -165,10 +165,10 @@ static int vp7045_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
return 0;
}
- for (i = 0; i < ARRAY_SIZE(vp7045_rc_keys); i++)
- if (rc5_data(&vp7045_rc_keys[i]) == key) {
+ for (i = 0; i < ARRAY_SIZE(ir_codes_vp7045_table); i++)
+ if (rc5_data(&ir_codes_vp7045_table[i]) == key) {
*state = REMOTE_KEY_PRESSED;
- *event = vp7045_rc_keys[i].event;
+ *event = ir_codes_vp7045_table[i].event;
break;
}
return 0;
@@ -260,8 +260,8 @@ static struct dvb_usb_device_properties vp7045_properties = {
.read_mac_address = vp7045_read_mac_addr,
.rc_interval = 400,
- .rc_key_map = vp7045_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(vp7045_rc_keys),
+ .rc_key_map = ir_codes_vp7045_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_vp7045_table),
.rc_query = vp7045_rc_query,
.num_device_descs = 2,
diff --git a/drivers/media/dvb/firewire/firedtv-avc.c b/drivers/media/dvb/firewire/firedtv-avc.c
index 1b31bebc27d..28294af752d 100644
--- a/drivers/media/dvb/firewire/firedtv-avc.c
+++ b/drivers/media/dvb/firewire/firedtv-avc.c
@@ -1096,7 +1096,7 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
c->operand[15] = msg[1]; /* Program number */
c->operand[16] = msg[2];
- c->operand[17] = 0x01; /* Version number=0 + current/next=1 */
+ c->operand[17] = msg[3]; /* Version number and current/next */
c->operand[18] = 0x00; /* Section number=0 */
c->operand[19] = 0x00; /* Last section number=0 */
c->operand[20] = 0x1f; /* PCR_PID=1FFF */
diff --git a/drivers/media/dvb/firewire/firedtv-ci.c b/drivers/media/dvb/firewire/firedtv-ci.c
index 853e04b7cb3..d3c2cf60de7 100644
--- a/drivers/media/dvb/firewire/firedtv-ci.c
+++ b/drivers/media/dvb/firewire/firedtv-ci.c
@@ -175,8 +175,7 @@ static int fdtv_ca_send_msg(struct firedtv *fdtv, void *arg)
return err;
}
-static int fdtv_ca_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, void *arg)
+static int fdtv_ca_ioctl(struct file *file, unsigned int cmd, void *arg)
{
struct dvb_device *dvbdev = file->private_data;
struct firedtv *fdtv = dvbdev->priv;
@@ -217,7 +216,7 @@ static unsigned int fdtv_ca_io_poll(struct file *file, poll_table *wait)
static const struct file_operations fdtv_ca_fops = {
.owner = THIS_MODULE,
- .ioctl = dvb_generic_ioctl,
+ .unlocked_ioctl = dvb_generic_ioctl,
.open = dvb_generic_open,
.release = dvb_generic_release,
.poll = fdtv_ca_io_poll,
diff --git a/drivers/media/dvb/frontends/atbm8830_priv.h b/drivers/media/dvb/frontends/atbm8830_priv.h
index ce960f76092..d460058d497 100644
--- a/drivers/media/dvb/frontends/atbm8830_priv.h
+++ b/drivers/media/dvb/frontends/atbm8830_priv.h
@@ -18,7 +18,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-
+
#ifndef __ATBM8830_PRIV_H
#define __ATBM8830_PRIV_H
diff --git a/drivers/media/dvb/frontends/au8522_decoder.c b/drivers/media/dvb/frontends/au8522_decoder.c
index 24268ef2753..68dba3a4b4d 100644
--- a/drivers/media/dvb/frontends/au8522_decoder.c
+++ b/drivers/media/dvb/frontends/au8522_decoder.c
@@ -664,6 +664,13 @@ static int au8522_reset(struct v4l2_subdev *sd, u32 val)
{
struct au8522_state *state = to_state(sd);
+ state->operational_mode = AU8522_ANALOG_MODE;
+
+ /* Clear out any state associated with the digital side of the
+ chip, so that when it gets powered back up it won't think
+ that it is already tuned */
+ state->current_frequency = 0;
+
au8522_writereg(state, 0xa4, 1 << 5);
return 0;
diff --git a/drivers/media/dvb/frontends/au8522_dig.c b/drivers/media/dvb/frontends/au8522_dig.c
index a1fed0fa8ed..65f6a36dfb2 100644
--- a/drivers/media/dvb/frontends/au8522_dig.c
+++ b/drivers/media/dvb/frontends/au8522_dig.c
@@ -84,6 +84,14 @@ static int au8522_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
dprintk("%s(%d)\n", __func__, enable);
+ if (state->operational_mode == AU8522_ANALOG_MODE) {
+ /* We're being asked to manage the gate even though we're
+ not in digital mode. This can occur if we get switched
+ over to analog mode before the dvb_frontend kernel thread
+ has completely shutdown */
+ return 0;
+ }
+
if (enable)
return au8522_writereg(state, 0x106, 1);
else
@@ -608,6 +616,13 @@ int au8522_init(struct dvb_frontend *fe)
struct au8522_state *state = fe->demodulator_priv;
dprintk("%s()\n", __func__);
+ state->operational_mode = AU8522_DIGITAL_MODE;
+
+ /* Clear out any state associated with the digital side of the
+ chip, so that when it gets powered back up it won't think
+ that it is already tuned */
+ state->current_frequency = 0;
+
au8522_writereg(state, 0xa4, 1 << 5);
au8522_i2c_gate_ctrl(fe, 1);
@@ -704,6 +719,15 @@ int au8522_sleep(struct dvb_frontend *fe)
struct au8522_state *state = fe->demodulator_priv;
dprintk("%s()\n", __func__);
+ /* Only power down if the digital side is currently using the chip */
+ if (state->operational_mode == AU8522_ANALOG_MODE) {
+ /* We're not in one of the expected power modes, which means
+ that the DVB thread is probably telling us to go to sleep
+ even though the analog frontend has already started using
+ the chip. So ignore the request */
+ return 0;
+ }
+
/* turn off led */
au8522_led_ctrl(state, 0);
@@ -932,6 +956,8 @@ struct dvb_frontend *au8522_attach(const struct au8522_config *config,
/* setup the state */
state->config = config;
state->i2c = i2c;
+ state->operational_mode = AU8522_DIGITAL_MODE;
+
/* create dvb_frontend */
memcpy(&state->frontend.ops, &au8522_ops,
sizeof(struct dvb_frontend_ops));
diff --git a/drivers/media/dvb/frontends/au8522_priv.h b/drivers/media/dvb/frontends/au8522_priv.h
index c74c4e72fe9..609cf04bc31 100644
--- a/drivers/media/dvb/frontends/au8522_priv.h
+++ b/drivers/media/dvb/frontends/au8522_priv.h
@@ -34,10 +34,15 @@
#include "au8522.h"
#include "tuner-i2c.h"
+#define AU8522_ANALOG_MODE 0
+#define AU8522_DIGITAL_MODE 1
+
struct au8522_state {
struct i2c_client *c;
struct i2c_adapter *i2c;
+ u8 operational_mode;
+
/* Used for sharing of the state between analog and digital mode */
struct tuner_i2c_props i2c_props;
struct list_head hybrid_tuner_instance_list;
diff --git a/drivers/media/dvb/frontends/dib3000mc.c b/drivers/media/dvb/frontends/dib3000mc.c
index 40a09981027..afad252abf4 100644
--- a/drivers/media/dvb/frontends/dib3000mc.c
+++ b/drivers/media/dvb/frontends/dib3000mc.c
@@ -814,42 +814,51 @@ EXPORT_SYMBOL(dib3000mc_set_config);
int dib3000mc_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 default_addr, struct dib3000mc_config cfg[])
{
- struct dib3000mc_state st = { .i2c_adap = i2c };
+ struct dib3000mc_state *dmcst;
int k;
u8 new_addr;
static u8 DIB3000MC_I2C_ADDRESS[] = {20,22,24,26};
+ dmcst = kzalloc(sizeof(struct dib3000mc_state), GFP_KERNEL);
+ if (dmcst == NULL)
+ return -ENODEV;
+
+ dmcst->i2c_adap = i2c;
+
for (k = no_of_demods-1; k >= 0; k--) {
- st.cfg = &cfg[k];
+ dmcst->cfg = &cfg[k];
/* designated i2c address */
new_addr = DIB3000MC_I2C_ADDRESS[k];
- st.i2c_addr = new_addr;
- if (dib3000mc_identify(&st) != 0) {
- st.i2c_addr = default_addr;
- if (dib3000mc_identify(&st) != 0) {
+ dmcst->i2c_addr = new_addr;
+ if (dib3000mc_identify(dmcst) != 0) {
+ dmcst->i2c_addr = default_addr;
+ if (dib3000mc_identify(dmcst) != 0) {
dprintk("-E- DiB3000P/MC #%d: not identified\n", k);
+ kfree(dmcst);
return -ENODEV;
}
}
- dib3000mc_set_output_mode(&st, OUTMODE_MPEG2_PAR_CONT_CLK);
+ dib3000mc_set_output_mode(dmcst, OUTMODE_MPEG2_PAR_CONT_CLK);
// set new i2c address and force divstr (Bit 1) to value 0 (Bit 0)
- dib3000mc_write_word(&st, 1024, (new_addr << 3) | 0x1);
- st.i2c_addr = new_addr;
+ dib3000mc_write_word(dmcst, 1024, (new_addr << 3) | 0x1);
+ dmcst->i2c_addr = new_addr;
}
for (k = 0; k < no_of_demods; k++) {
- st.cfg = &cfg[k];
- st.i2c_addr = DIB3000MC_I2C_ADDRESS[k];
+ dmcst->cfg = &cfg[k];
+ dmcst->i2c_addr = DIB3000MC_I2C_ADDRESS[k];
- dib3000mc_write_word(&st, 1024, st.i2c_addr << 3);
+ dib3000mc_write_word(dmcst, 1024, dmcst->i2c_addr << 3);
/* turn off data output */
- dib3000mc_set_output_mode(&st, OUTMODE_HIGH_Z);
+ dib3000mc_set_output_mode(dmcst, OUTMODE_HIGH_Z);
}
+
+ kfree(dmcst);
return 0;
}
EXPORT_SYMBOL(dib3000mc_i2c_enumeration);
diff --git a/drivers/media/dvb/frontends/dib7000p.c b/drivers/media/dvb/frontends/dib7000p.c
index 85468a45c34..2e28b973dfd 100644
--- a/drivers/media/dvb/frontends/dib7000p.c
+++ b/drivers/media/dvb/frontends/dib7000p.c
@@ -1324,46 +1324,54 @@ EXPORT_SYMBOL(dib7000p_pid_filter);
int dib7000p_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 default_addr, struct dib7000p_config cfg[])
{
- struct dib7000p_state st = { .i2c_adap = i2c };
+ struct dib7000p_state *dpst;
int k = 0;
u8 new_addr = 0;
+ dpst = kzalloc(sizeof(struct dib7000p_state), GFP_KERNEL);
+ if (!dpst)
+ return -ENOMEM;
+
+ dpst->i2c_adap = i2c;
+
for (k = no_of_demods-1; k >= 0; k--) {
- st.cfg = cfg[k];
+ dpst->cfg = cfg[k];
/* designated i2c address */
new_addr = (0x40 + k) << 1;
- st.i2c_addr = new_addr;
- dib7000p_write_word(&st, 1287, 0x0003); /* sram lead in, rdy */
- if (dib7000p_identify(&st) != 0) {
- st.i2c_addr = default_addr;
- dib7000p_write_word(&st, 1287, 0x0003); /* sram lead in, rdy */
- if (dib7000p_identify(&st) != 0) {
+ dpst->i2c_addr = new_addr;
+ dib7000p_write_word(dpst, 1287, 0x0003); /* sram lead in, rdy */
+ if (dib7000p_identify(dpst) != 0) {
+ dpst->i2c_addr = default_addr;
+ dib7000p_write_word(dpst, 1287, 0x0003); /* sram lead in, rdy */
+ if (dib7000p_identify(dpst) != 0) {
dprintk("DiB7000P #%d: not identified\n", k);
+ kfree(dpst);
return -EIO;
}
}
/* start diversity to pull_down div_str - just for i2c-enumeration */
- dib7000p_set_output_mode(&st, OUTMODE_DIVERSITY);
+ dib7000p_set_output_mode(dpst, OUTMODE_DIVERSITY);
/* set new i2c address and force divstart */
- dib7000p_write_word(&st, 1285, (new_addr << 2) | 0x2);
+ dib7000p_write_word(dpst, 1285, (new_addr << 2) | 0x2);
dprintk("IC %d initialized (to i2c_address 0x%x)", k, new_addr);
}
for (k = 0; k < no_of_demods; k++) {
- st.cfg = cfg[k];
- st.i2c_addr = (0x40 + k) << 1;
+ dpst->cfg = cfg[k];
+ dpst->i2c_addr = (0x40 + k) << 1;
// unforce divstr
- dib7000p_write_word(&st, 1285, st.i2c_addr << 2);
+ dib7000p_write_word(dpst, 1285, dpst->i2c_addr << 2);
/* deactivate div - it was just for i2c-enumeration */
- dib7000p_set_output_mode(&st, OUTMODE_HIGH_Z);
+ dib7000p_set_output_mode(dpst, OUTMODE_HIGH_Z);
}
+ kfree(dpst);
return 0;
}
EXPORT_SYMBOL(dib7000p_i2c_enumeration);
diff --git a/drivers/media/dvb/frontends/dib8000.h b/drivers/media/dvb/frontends/dib8000.h
index b1ee2079963..e0a9ded11df 100644
--- a/drivers/media/dvb/frontends/dib8000.h
+++ b/drivers/media/dvb/frontends/dib8000.h
@@ -109,6 +109,7 @@ static inline void dib8000_pwm_agc_reset(struct dvb_frontend *fe)
static inline s32 dib8000_get_adc_power(struct dvb_frontend *fe, u8 mode)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return 0;
}
#endif
diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
index cff3535566f..78001e8bcdb 100644
--- a/drivers/media/dvb/frontends/ds3000.c
+++ b/drivers/media/dvb/frontends/ds3000.c
@@ -719,7 +719,7 @@ static int ds3000_read_snr(struct dvb_frontend *fe, u16 *snr)
(ds3000_readreg(state, 0x8d) << 4);
dvbs2_signal_reading = ds3000_readreg(state, 0x8e);
tmp = dvbs2_signal_reading * dvbs2_signal_reading >> 1;
- if (dvbs2_signal_reading == 0) {
+ if (tmp == 0) {
*snr = 0x0000;
return 0;
}
diff --git a/drivers/media/dvb/frontends/stv0900_core.c b/drivers/media/dvb/frontends/stv0900_core.c
index 01f8f1f802f..4f5e7d3a0e6 100644
--- a/drivers/media/dvb/frontends/stv0900_core.c
+++ b/drivers/media/dvb/frontends/stv0900_core.c
@@ -1583,7 +1583,7 @@ static enum dvbfe_search stv0900_search(struct dvb_frontend *fe,
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct stv0900_search_params p_search;
- struct stv0900_signal_info p_result;
+ struct stv0900_signal_info p_result = intp->result[demod];
enum fe_stv0900_error error = STV0900_NO_ERROR;
@@ -1842,6 +1842,19 @@ static void stv0900_release(struct dvb_frontend *fe)
kfree(state);
}
+static int stv0900_get_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p)
+{
+ struct stv0900_state *state = fe->demodulator_priv;
+ struct stv0900_internal *intp = state->internal;
+ enum fe_stv0900_demod_num demod = state->demod;
+ struct stv0900_signal_info p_result = intp->result[demod];
+
+ p->frequency = p_result.locked ? p_result.frequency : 0;
+ p->u.qpsk.symbol_rate = p_result.locked ? p_result.symbol_rate : 0;
+ return 0;
+}
+
static struct dvb_frontend_ops stv0900_ops = {
.info = {
@@ -1862,6 +1875,7 @@ static struct dvb_frontend_ops stv0900_ops = {
},
.release = stv0900_release,
.init = stv0900_init,
+ .get_frontend = stv0900_get_frontend,
.get_frontend_algo = stv0900_frontend_algo,
.i2c_gate_ctrl = stv0900_i2c_gate_ctrl,
.diseqc_send_master_cmd = stv0900_send_master_cmd,
diff --git a/drivers/media/dvb/frontends/stv090x.c b/drivers/media/dvb/frontends/stv090x.c
index 96972804f4a..425e7a43ae1 100644
--- a/drivers/media/dvb/frontends/stv090x.c
+++ b/drivers/media/dvb/frontends/stv090x.c
@@ -754,11 +754,19 @@ static int stv090x_write_reg(struct stv090x_state *state, unsigned int reg, u8 d
return stv090x_write_regs(state, reg, &data, 1);
}
-static int stv090x_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
+static int stv090x_i2c_gate_ctrl(struct stv090x_state *state, int enable)
{
- struct stv090x_state *state = fe->demodulator_priv;
u32 reg;
+ /*
+ * NOTE! A lock is used as a FSM to control the state in which
+ * access is serialized between two tuners on the same demod.
+ * This has nothing to do with a lock to protect a critical section
+ * which may in some other cases be confused with protecting I/O
+ * access to the demodulator gate.
+ * In case of any error, the lock is unlocked and exit within the
+ * relevant operations themselves.
+ */
if (enable)
mutex_lock(&state->internal->tuner_lock);
@@ -1778,7 +1786,7 @@ static u32 stv090x_srate_srch_coarse(struct stv090x_state *state)
freq -= cur_step * car_step;
/* Setup tuner */
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_set_frequency) {
@@ -1791,12 +1799,12 @@ static u32 stv090x_srate_srch_coarse(struct stv090x_state *state)
goto err_gateoff;
}
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
msleep(50);
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_get_status) {
@@ -1809,7 +1817,7 @@ static u32 stv090x_srate_srch_coarse(struct stv090x_state *state)
else
dprintk(FE_DEBUG, 1, "Tuner unlocked");
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
}
@@ -1822,7 +1830,7 @@ static u32 stv090x_srate_srch_coarse(struct stv090x_state *state)
return srate_coarse;
err_gateoff:
- stv090x_i2c_gate_ctrl(fe, 0);
+ stv090x_i2c_gate_ctrl(state, 0);
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
@@ -2167,7 +2175,7 @@ static int stv090x_get_coldlock(struct stv090x_state *state, s32 timeout_dmd)
freq -= cur_step * car_step;
/* Setup tuner */
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_set_frequency) {
@@ -2180,12 +2188,12 @@ static int stv090x_get_coldlock(struct stv090x_state *state, s32 timeout_dmd)
goto err_gateoff;
}
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
msleep(50);
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_get_status) {
@@ -2198,7 +2206,7 @@ static int stv090x_get_coldlock(struct stv090x_state *state, s32 timeout_dmd)
else
dprintk(FE_DEBUG, 1, "Tuner unlocked");
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1c);
@@ -2222,7 +2230,7 @@ static int stv090x_get_coldlock(struct stv090x_state *state, s32 timeout_dmd)
return lock;
err_gateoff:
- stv090x_i2c_gate_ctrl(fe, 0);
+ stv090x_i2c_gate_ctrl(state, 0);
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
@@ -2591,7 +2599,7 @@ static enum stv090x_signal_state stv090x_get_sig_params(struct stv090x_state *st
}
state->delsys = stv090x_get_std(state);
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_get_frequency) {
@@ -2599,7 +2607,7 @@ static enum stv090x_signal_state stv090x_get_sig_params(struct stv090x_state *st
goto err_gateoff;
}
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
offst_freq = stv090x_get_car_freq(state, state->internal->mclk) / 1000;
@@ -2619,7 +2627,7 @@ static enum stv090x_signal_state stv090x_get_sig_params(struct stv090x_state *st
if ((state->algo == STV090x_BLIND_SEARCH) || (state->srate < 10000000)) {
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_get_frequency) {
@@ -2627,7 +2635,7 @@ static enum stv090x_signal_state stv090x_get_sig_params(struct stv090x_state *st
goto err_gateoff;
}
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
if (abs(offst_freq) <= ((state->search_range / 2000) + 500))
@@ -2646,7 +2654,7 @@ static enum stv090x_signal_state stv090x_get_sig_params(struct stv090x_state *st
return STV090x_OUTOFRANGE;
err_gateoff:
- stv090x_i2c_gate_ctrl(fe, 0);
+ stv090x_i2c_gate_ctrl(state, 0);
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
@@ -3000,7 +3008,7 @@ static int stv090x_optimize_track(struct stv090x_state *state)
if (state->algo != STV090x_WARM_SEARCH) {
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_set_bandwidth) {
@@ -3008,7 +3016,7 @@ static int stv090x_optimize_track(struct stv090x_state *state)
goto err_gateoff;
}
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
}
@@ -3059,7 +3067,7 @@ static int stv090x_optimize_track(struct stv090x_state *state)
return 0;
err_gateoff:
- stv090x_i2c_gate_ctrl(fe, 0);
+ stv090x_i2c_gate_ctrl(state, 0);
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
@@ -3235,7 +3243,7 @@ static enum stv090x_signal_state stv090x_algo(struct stv090x_state *state)
}
/* Setup tuner */
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_set_bbgain) {
@@ -3256,17 +3264,17 @@ static enum stv090x_signal_state stv090x_algo(struct stv090x_state *state)
goto err_gateoff;
}
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
msleep(50);
if (state->config->tuner_get_status) {
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_get_status(fe, &reg) < 0)
goto err_gateoff;
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
if (reg)
@@ -3400,7 +3408,7 @@ static enum stv090x_signal_state stv090x_algo(struct stv090x_state *state)
return signal_state;
err_gateoff:
- stv090x_i2c_gate_ctrl(fe, 0);
+ stv090x_i2c_gate_ctrl(state, 0);
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
@@ -3839,6 +3847,17 @@ static int stv090x_sleep(struct dvb_frontend *fe)
struct stv090x_state *state = fe->demodulator_priv;
u32 reg;
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
+ goto err;
+
+ if (state->config->tuner_sleep) {
+ if (state->config->tuner_sleep(fe) < 0)
+ goto err_gateoff;
+ }
+
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
+ goto err;
+
dprintk(FE_DEBUG, 1, "Set %s to sleep",
state->device == STV0900 ? "STV0900" : "STV0903");
@@ -3853,6 +3872,9 @@ static int stv090x_sleep(struct dvb_frontend *fe)
goto err;
return 0;
+
+err_gateoff:
+ stv090x_i2c_gate_ctrl(state, 0);
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
@@ -4311,6 +4333,20 @@ static int stv090x_init(struct dvb_frontend *fe)
u32 reg;
if (state->internal->mclk == 0) {
+ /* call tuner init to configure the tuner's clock output
+ divider directly before setting up the master clock of
+ the stv090x. */
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
+ goto err;
+
+ if (config->tuner_init) {
+ if (config->tuner_init(fe) < 0)
+ goto err_gateoff;
+ }
+
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
+ goto err;
+
stv090x_set_mclk(state, 135000000, config->xtal); /* 135 Mhz */
msleep(5);
if (stv090x_write_reg(state, STV090x_SYNTCTRL,
@@ -4336,7 +4372,7 @@ static int stv090x_init(struct dvb_frontend *fe)
if (STV090x_WRITE_DEMOD(state, DEMOD, reg) < 0)
goto err;
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (config->tuner_set_mode) {
@@ -4349,7 +4385,7 @@ static int stv090x_init(struct dvb_frontend *fe)
goto err_gateoff;
}
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
if (stv090x_set_tspath(state) < 0)
@@ -4358,7 +4394,7 @@ static int stv090x_init(struct dvb_frontend *fe)
return 0;
err_gateoff:
- stv090x_i2c_gate_ctrl(fe, 0);
+ stv090x_i2c_gate_ctrl(state, 0);
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
@@ -4503,8 +4539,6 @@ static struct dvb_frontend_ops stv090x_ops = {
.sleep = stv090x_sleep,
.get_frontend_algo = stv090x_frontend_algo,
- .i2c_gate_ctrl = stv090x_i2c_gate_ctrl,
-
.diseqc_send_master_cmd = stv090x_send_diseqc_msg,
.diseqc_send_burst = stv090x_send_diseqc_burst,
.diseqc_recv_slave_reply = stv090x_recv_slave_reply,
diff --git a/drivers/media/dvb/frontends/stv090x.h b/drivers/media/dvb/frontends/stv090x.h
index 30f01a6902a..dd1b93ae4e9 100644
--- a/drivers/media/dvb/frontends/stv090x.h
+++ b/drivers/media/dvb/frontends/stv090x.h
@@ -87,6 +87,7 @@ struct stv090x_config {
bool diseqc_envelope_mode;
int (*tuner_init) (struct dvb_frontend *fe);
+ int (*tuner_sleep) (struct dvb_frontend *fe);
int (*tuner_set_mode) (struct dvb_frontend *fe, enum tuner_mode mode);
int (*tuner_set_frequency) (struct dvb_frontend *fe, u32 frequency);
int (*tuner_get_frequency) (struct dvb_frontend *fe, u32 *frequency);
diff --git a/drivers/media/dvb/frontends/stv6110x.c b/drivers/media/dvb/frontends/stv6110x.c
index dea4245f077..42591ce1aaa 100644
--- a/drivers/media/dvb/frontends/stv6110x.c
+++ b/drivers/media/dvb/frontends/stv6110x.c
@@ -338,14 +338,12 @@ static struct dvb_tuner_ops stv6110x_ops = {
.frequency_max = 2150000,
.frequency_step = 0,
},
-
- .init = stv6110x_init,
- .sleep = stv6110x_sleep,
.release = stv6110x_release
};
static struct stv6110x_devctl stv6110x_ctl = {
.tuner_init = stv6110x_init,
+ .tuner_sleep = stv6110x_sleep,
.tuner_set_mode = stv6110x_set_mode,
.tuner_set_frequency = stv6110x_set_frequency,
.tuner_get_frequency = stv6110x_get_frequency,
@@ -363,11 +361,10 @@ struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
{
struct stv6110x_state *stv6110x;
u8 default_regs[] = {0x07, 0x11, 0xdc, 0x85, 0x17, 0x01, 0xe6, 0x1e};
- int ret;
stv6110x = kzalloc(sizeof (struct stv6110x_state), GFP_KERNEL);
- if (stv6110x == NULL)
- goto error;
+ if (!stv6110x)
+ return NULL;
stv6110x->i2c = i2c;
stv6110x->config = config;
@@ -392,34 +389,11 @@ struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
break;
}
- if (fe->ops.i2c_gate_ctrl) {
- ret = fe->ops.i2c_gate_ctrl(fe, 1);
- if (ret < 0)
- goto error;
- }
-
- ret = stv6110x_write_regs(stv6110x, 0, stv6110x->regs,
- ARRAY_SIZE(stv6110x->regs));
- if (ret < 0) {
- dprintk(FE_ERROR, 1, "Initialization failed");
- goto error;
- }
-
- if (fe->ops.i2c_gate_ctrl) {
- ret = fe->ops.i2c_gate_ctrl(fe, 0);
- if (ret < 0)
- goto error;
- }
-
fe->tuner_priv = stv6110x;
fe->ops.tuner_ops = stv6110x_ops;
- printk("%s: Attaching STV6110x \n", __func__);
+ printk(KERN_INFO "%s: Attaching STV6110x\n", __func__);
return stv6110x->devctl;
-
-error:
- kfree(stv6110x);
- return NULL;
}
EXPORT_SYMBOL(stv6110x_attach);
diff --git a/drivers/media/dvb/frontends/stv6110x.h b/drivers/media/dvb/frontends/stv6110x.h
index 2429ae6d784..47516753929 100644
--- a/drivers/media/dvb/frontends/stv6110x.h
+++ b/drivers/media/dvb/frontends/stv6110x.h
@@ -40,6 +40,7 @@ enum tuner_status {
struct stv6110x_devctl {
int (*tuner_init) (struct dvb_frontend *fe);
+ int (*tuner_sleep) (struct dvb_frontend *fe);
int (*tuner_set_mode) (struct dvb_frontend *fe, enum tuner_mode mode);
int (*tuner_set_frequency) (struct dvb_frontend *fe, u32 frequency);
int (*tuner_get_frequency) (struct dvb_frontend *fe, u32 *frequency);
diff --git a/drivers/media/dvb/mantis/mantis_input.c b/drivers/media/dvb/mantis/mantis_input.c
index 4675a3b53c7..3d4e4663220 100644
--- a/drivers/media/dvb/mantis/mantis_input.c
+++ b/drivers/media/dvb/mantis/mantis_input.c
@@ -32,6 +32,8 @@
#include "mantis_reg.h"
#include "mantis_uart.h"
+#define MODULE_NAME "mantis_core"
+
static struct ir_scancode mantis_ir_table[] = {
{ 0x29, KEY_POWER },
{ 0x28, KEY_FAVORITES },
@@ -126,7 +128,7 @@ int mantis_input_init(struct mantis_pci *mantis)
rc->id.version = 1;
rc->dev = mantis->pdev->dev;
- err = ir_input_register(rc, &ir_mantis, NULL);
+ err = __ir_input_register(rc, &ir_mantis, NULL, MODULE_NAME);
if (err) {
dprintk(MANTIS_ERROR, 1, "IR device registration failed, ret = %d", err);
input_free_device(rc);
diff --git a/drivers/media/dvb/mantis/mantis_vp1041.c b/drivers/media/dvb/mantis/mantis_vp1041.c
index 515346dd31d..d1aa2bc0c15 100644
--- a/drivers/media/dvb/mantis/mantis_vp1041.c
+++ b/drivers/media/dvb/mantis/mantis_vp1041.c
@@ -136,12 +136,12 @@ static const struct stb0899_s1_reg vp1041_stb0899_s1_init_3[] = {
{ STB0899_RCOMPC , 0xc9 },
{ STB0899_AGC1CN , 0x01 },
{ STB0899_AGC1REF , 0x10 },
- { STB0899_RTC , 0x23 },
+ { STB0899_RTC , 0x23 },
{ STB0899_TMGCFG , 0x4e },
{ STB0899_AGC2REF , 0x34 },
{ STB0899_TLSR , 0x84 },
{ STB0899_CFD , 0xf7 },
- { STB0899_ACLC , 0x87 },
+ { STB0899_ACLC , 0x87 },
{ STB0899_BCLC , 0x94 },
{ STB0899_EQON , 0x41 },
{ STB0899_LDT , 0xf1 },
@@ -194,10 +194,10 @@ static const struct stb0899_s1_reg vp1041_stb0899_s1_init_3[] = {
{ STB0899_ECNT3M , 0x0a },
{ STB0899_ECNT3L , 0xad },
{ STB0899_FECAUTO1 , 0x06 },
- { STB0899_FECM , 0x01 },
+ { STB0899_FECM , 0x01 },
{ STB0899_VTH12 , 0xb0 },
{ STB0899_VTH23 , 0x7a },
- { STB0899_VTH34 , 0x58 },
+ { STB0899_VTH34 , 0x58 },
{ STB0899_VTH56 , 0x38 },
{ STB0899_VTH67 , 0x34 },
{ STB0899_VTH78 , 0x24 },
@@ -206,7 +206,7 @@ static const struct stb0899_s1_reg vp1041_stb0899_s1_init_3[] = {
{ STB0899_RSULC , 0xb1 }, /* DVB = 0xb1, DSS = 0xa1 */
{ STB0899_TSULC , 0x42 },
{ STB0899_RSLLC , 0x41 },
- { STB0899_TSLPL , 0x12 },
+ { STB0899_TSLPL , 0x12 },
{ STB0899_TSCFGH , 0x0c },
{ STB0899_TSCFGM , 0x00 },
{ STB0899_TSCFGL , 0x00 },
diff --git a/drivers/media/dvb/ngene/Kconfig b/drivers/media/dvb/ngene/Kconfig
index 3ec8e6fcbb1..cec242b7c00 100644
--- a/drivers/media/dvb/ngene/Kconfig
+++ b/drivers/media/dvb/ngene/Kconfig
@@ -4,6 +4,8 @@ config DVB_NGENE
select DVB_LNBP21 if !DVB_FE_CUSTOMISE
select DVB_STV6110x if !DVB_FE_CUSTOMISE
select DVB_STV090x if !DVB_FE_CUSTOMISE
+ select DVB_LGDT330X if !DVB_FE_CUSTOMISE
+ select MEDIA_TUNER_MT2131 if !MEDIA_TUNER_CUSTOMISE
---help---
Support for Micronas PCI express cards with nGene bridge.
diff --git a/drivers/media/dvb/ngene/Makefile b/drivers/media/dvb/ngene/Makefile
index 40435cad481..0608aabb14e 100644
--- a/drivers/media/dvb/ngene/Makefile
+++ b/drivers/media/dvb/ngene/Makefile
@@ -2,10 +2,10 @@
# Makefile for the nGene device driver
#
-ngene-objs := ngene-core.o
+ngene-objs := ngene-core.o ngene-i2c.o ngene-cards.o ngene-dvb.o
obj-$(CONFIG_DVB_NGENE) += ngene.o
EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/
EXTRA_CFLAGS += -Idrivers/media/dvb/frontends/
-
+EXTRA_CFLAGS += -Idrivers/media/common/tuners/
diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
new file mode 100644
index 00000000000..692c3e226e8
--- /dev/null
+++ b/drivers/media/dvb/ngene/ngene-cards.c
@@ -0,0 +1,328 @@
+/*
+ * ngene-cards.c: nGene PCIe bridge driver - card specific info
+ *
+ * Copyright (C) 2005-2007 Micronas
+ *
+ * Copyright (C) 2008-2009 Ralph Metzler <rjkm@metzlerbros.de>
+ * Modifications for new nGene firmware,
+ * support for EEPROM-copying,
+ * support for new dual DVB-S2 card prototype
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#include "ngene.h"
+
+/* demods/tuners */
+#include "stv6110x.h"
+#include "stv090x.h"
+#include "lnbh24.h"
+#include "lgdt330x.h"
+#include "mt2131.h"
+
+
+/****************************************************************************/
+/* Demod/tuner attachment ***************************************************/
+/****************************************************************************/
+
+static int tuner_attach_stv6110(struct ngene_channel *chan)
+{
+ struct stv090x_config *feconf = (struct stv090x_config *)
+ chan->dev->card_info->fe_config[chan->number];
+ struct stv6110x_config *tunerconf = (struct stv6110x_config *)
+ chan->dev->card_info->tuner_config[chan->number];
+ struct stv6110x_devctl *ctl;
+
+ ctl = dvb_attach(stv6110x_attach, chan->fe, tunerconf,
+ &chan->i2c_adapter);
+ if (ctl == NULL) {
+ printk(KERN_ERR DEVICE_NAME ": No STV6110X found!\n");
+ return -ENODEV;
+ }
+
+ feconf->tuner_init = ctl->tuner_init;
+ feconf->tuner_set_mode = ctl->tuner_set_mode;
+ feconf->tuner_set_frequency = ctl->tuner_set_frequency;
+ feconf->tuner_get_frequency = ctl->tuner_get_frequency;
+ feconf->tuner_set_bandwidth = ctl->tuner_set_bandwidth;
+ feconf->tuner_get_bandwidth = ctl->tuner_get_bandwidth;
+ feconf->tuner_set_bbgain = ctl->tuner_set_bbgain;
+ feconf->tuner_get_bbgain = ctl->tuner_get_bbgain;
+ feconf->tuner_set_refclk = ctl->tuner_set_refclk;
+ feconf->tuner_get_status = ctl->tuner_get_status;
+
+ return 0;
+}
+
+
+static int demod_attach_stv0900(struct ngene_channel *chan)
+{
+ struct stv090x_config *feconf = (struct stv090x_config *)
+ chan->dev->card_info->fe_config[chan->number];
+
+ chan->fe = dvb_attach(stv090x_attach,
+ feconf,
+ &chan->i2c_adapter,
+ chan->number == 0 ? STV090x_DEMODULATOR_0 :
+ STV090x_DEMODULATOR_1);
+ if (chan->fe == NULL) {
+ printk(KERN_ERR DEVICE_NAME ": No STV0900 found!\n");
+ return -ENODEV;
+ }
+
+ if (!dvb_attach(lnbh24_attach, chan->fe, &chan->i2c_adapter, 0,
+ 0, chan->dev->card_info->lnb[chan->number])) {
+ printk(KERN_ERR DEVICE_NAME ": No LNBH24 found!\n");
+ dvb_frontend_detach(chan->fe);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static struct lgdt330x_config aver_m780 = {
+ .demod_address = 0xb2 >> 1,
+ .demod_chip = LGDT3303,
+ .serial_mpeg = 0x00, /* PARALLEL */
+ .clock_polarity_flip = 1,
+};
+
+static struct mt2131_config m780_tunerconfig = {
+ 0xc0 >> 1
+};
+
+/* A single func to attach the demo and tuner, rather than
+ * use two sep funcs like the current design mandates.
+ */
+static int demod_attach_lg330x(struct ngene_channel *chan)
+{
+ chan->fe = dvb_attach(lgdt330x_attach, &aver_m780, &chan->i2c_adapter);
+ if (chan->fe == NULL) {
+ printk(KERN_ERR DEVICE_NAME ": No LGDT330x found!\n");
+ return -ENODEV;
+ }
+
+ dvb_attach(mt2131_attach, chan->fe, &chan->i2c_adapter,
+ &m780_tunerconfig, 0);
+
+ return (chan->fe) ? 0 : -ENODEV;
+}
+
+/****************************************************************************/
+/* Switch control (I2C gates, etc.) *****************************************/
+/****************************************************************************/
+
+
+static struct stv090x_config fe_cineS2 = {
+ .device = STV0900,
+ .demod_mode = STV090x_DUAL,
+ .clk_mode = STV090x_CLK_EXT,
+
+ .xtal = 27000000,
+ .address = 0x68,
+
+ .ts1_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
+ .ts2_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
+
+ .repeater_level = STV090x_RPTLEVEL_16,
+
+ .adc1_range = STV090x_ADC_1Vpp,
+ .adc2_range = STV090x_ADC_1Vpp,
+
+ .diseqc_envelope_mode = true,
+};
+
+static struct stv6110x_config tuner_cineS2_0 = {
+ .addr = 0x60,
+ .refclk = 27000000,
+ .clk_div = 1,
+};
+
+static struct stv6110x_config tuner_cineS2_1 = {
+ .addr = 0x63,
+ .refclk = 27000000,
+ .clk_div = 1,
+};
+
+static struct ngene_info ngene_info_cineS2 = {
+ .type = NGENE_SIDEWINDER,
+ .name = "Linux4Media cineS2 DVB-S2 Twin Tuner",
+ .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
+ .demod_attach = {demod_attach_stv0900, demod_attach_stv0900},
+ .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110},
+ .fe_config = {&fe_cineS2, &fe_cineS2},
+ .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1},
+ .lnb = {0x0b, 0x08},
+ .tsf = {3, 3},
+ .fw_version = 15,
+};
+
+static struct ngene_info ngene_info_satixS2 = {
+ .type = NGENE_SIDEWINDER,
+ .name = "Mystique SaTiX-S2 Dual",
+ .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
+ .demod_attach = {demod_attach_stv0900, demod_attach_stv0900},
+ .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110},
+ .fe_config = {&fe_cineS2, &fe_cineS2},
+ .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1},
+ .lnb = {0x0b, 0x08},
+ .tsf = {3, 3},
+ .fw_version = 15,
+};
+
+static struct ngene_info ngene_info_satixS2v2 = {
+ .type = NGENE_SIDEWINDER,
+ .name = "Mystique SaTiX-S2 Dual (v2)",
+ .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
+ .demod_attach = {demod_attach_stv0900, demod_attach_stv0900},
+ .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110},
+ .fe_config = {&fe_cineS2, &fe_cineS2},
+ .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1},
+ .lnb = {0x0a, 0x08},
+ .tsf = {3, 3},
+ .fw_version = 15,
+};
+
+static struct ngene_info ngene_info_cineS2v5 = {
+ .type = NGENE_SIDEWINDER,
+ .name = "Linux4Media cineS2 DVB-S2 Twin Tuner (v5)",
+ .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
+ .demod_attach = {demod_attach_stv0900, demod_attach_stv0900},
+ .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110},
+ .fe_config = {&fe_cineS2, &fe_cineS2},
+ .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1},
+ .lnb = {0x0a, 0x08},
+ .tsf = {3, 3},
+ .fw_version = 15,
+};
+
+static struct ngene_info ngene_info_m780 = {
+ .type = NGENE_APP,
+ .name = "Aver M780 ATSC/QAM-B",
+
+ /* Channel 0 is analog, which is currently unsupported */
+ .io_type = { NGENE_IO_NONE, NGENE_IO_TSIN },
+ .demod_attach = { NULL, demod_attach_lg330x },
+
+ /* Ensure these are NULL else the frame will call them (as funcs) */
+ .tuner_attach = { 0, 0, 0, 0 },
+ .fe_config = { NULL, &aver_m780 },
+ .avf = { 0 },
+
+ /* A custom electrical interface config for the demod to bridge */
+ .tsf = { 4, 4 },
+ .fw_version = 15,
+};
+
+/****************************************************************************/
+
+
+
+/****************************************************************************/
+/* PCI Subsystem ID *********************************************************/
+/****************************************************************************/
+
+#define NGENE_ID(_subvend, _subdev, _driverdata) { \
+ .vendor = NGENE_VID, .device = NGENE_PID, \
+ .subvendor = _subvend, .subdevice = _subdev, \
+ .driver_data = (unsigned long) &_driverdata }
+
+/****************************************************************************/
+
+static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
+ NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
+ NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
+ NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
+ NGENE_ID(0x18c3, 0xdb02, ngene_info_satixS2v2),
+ NGENE_ID(0x18c3, 0xdd00, ngene_info_cineS2v5),
+ NGENE_ID(0x1461, 0x062e, ngene_info_m780),
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, ngene_id_tbl);
+
+/****************************************************************************/
+/* Init/Exit ****************************************************************/
+/****************************************************************************/
+
+static pci_ers_result_t ngene_error_detected(struct pci_dev *dev,
+ enum pci_channel_state state)
+{
+ printk(KERN_ERR DEVICE_NAME ": PCI error\n");
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+ if (state == pci_channel_io_frozen)
+ return PCI_ERS_RESULT_NEED_RESET;
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
+
+static pci_ers_result_t ngene_link_reset(struct pci_dev *dev)
+{
+ printk(KERN_INFO DEVICE_NAME ": link reset\n");
+ return 0;
+}
+
+static pci_ers_result_t ngene_slot_reset(struct pci_dev *dev)
+{
+ printk(KERN_INFO DEVICE_NAME ": slot reset\n");
+ return 0;
+}
+
+static void ngene_resume(struct pci_dev *dev)
+{
+ printk(KERN_INFO DEVICE_NAME ": resume\n");
+}
+
+static struct pci_error_handlers ngene_errors = {
+ .error_detected = ngene_error_detected,
+ .link_reset = ngene_link_reset,
+ .slot_reset = ngene_slot_reset,
+ .resume = ngene_resume,
+};
+
+static struct pci_driver ngene_pci_driver = {
+ .name = "ngene",
+ .id_table = ngene_id_tbl,
+ .probe = ngene_probe,
+ .remove = __devexit_p(ngene_remove),
+ .err_handler = &ngene_errors,
+};
+
+static __init int module_init_ngene(void)
+{
+ printk(KERN_INFO
+ "nGene PCIE bridge driver, Copyright (C) 2005-2007 Micronas\n");
+ return pci_register_driver(&ngene_pci_driver);
+}
+
+static __exit void module_exit_ngene(void)
+{
+ pci_unregister_driver(&ngene_pci_driver);
+}
+
+module_init(module_init_ngene);
+module_exit(module_exit_ngene);
+
+MODULE_DESCRIPTION("nGene");
+MODULE_AUTHOR("Micronas, Ralph Metzler, Manfred Voelkel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/ngene/ngene-core.c b/drivers/media/dvb/ngene/ngene-core.c
index 645e8b8a713..c8b4dfa0ab5 100644
--- a/drivers/media/dvb/ngene/ngene-core.c
+++ b/drivers/media/dvb/ngene/ngene-core.c
@@ -34,20 +34,14 @@
#include <linux/io.h>
#include <asm/div64.h>
#include <linux/pci.h>
-#include <linux/pci_ids.h>
#include <linux/smp_lock.h>
#include <linux/timer.h>
-#include <linux/version.h>
#include <linux/byteorder/generic.h>
#include <linux/firmware.h>
#include <linux/vmalloc.h>
#include "ngene.h"
-#include "stv6110x.h"
-#include "stv090x.h"
-#include "lnbh24.h"
-
static int one_adapter = 1;
module_param(one_adapter, int, 0444);
MODULE_PARM_DESC(one_adapter, "Use only one adapter.");
@@ -63,8 +57,6 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
#define dprintk if (debug) printk
-#define DEVICE_NAME "ngene"
-
#define ngwriteb(dat, adr) writeb((dat), (char *)(dev->iomem + (adr)))
#define ngwritel(dat, adr) writel((dat), (char *)(dev->iomem + (adr)))
#define ngwriteb(dat, adr) writeb((dat), (char *)(dev->iomem + (adr)))
@@ -352,7 +344,7 @@ static int ngene_command_mutex(struct ngene *dev, struct ngene_command *com)
return 0;
}
-static int ngene_command(struct ngene *dev, struct ngene_command *com)
+int ngene_command(struct ngene *dev, struct ngene_command *com)
{
int result;
@@ -363,55 +355,6 @@ static int ngene_command(struct ngene *dev, struct ngene_command *com)
}
-static int ngene_command_i2c_read(struct ngene *dev, u8 adr,
- u8 *out, u8 outlen, u8 *in, u8 inlen, int flag)
-{
- struct ngene_command com;
-
- com.cmd.hdr.Opcode = CMD_I2C_READ;
- com.cmd.hdr.Length = outlen + 3;
- com.cmd.I2CRead.Device = adr << 1;
- memcpy(com.cmd.I2CRead.Data, out, outlen);
- com.cmd.I2CRead.Data[outlen] = inlen;
- com.cmd.I2CRead.Data[outlen + 1] = 0;
- com.in_len = outlen + 3;
- com.out_len = inlen + 1;
-
- if (ngene_command(dev, &com) < 0)
- return -EIO;
-
- if ((com.cmd.raw8[0] >> 1) != adr)
- return -EIO;
-
- if (flag)
- memcpy(in, com.cmd.raw8, inlen + 1);
- else
- memcpy(in, com.cmd.raw8 + 1, inlen);
- return 0;
-}
-
-static int ngene_command_i2c_write(struct ngene *dev, u8 adr,
- u8 *out, u8 outlen)
-{
- struct ngene_command com;
-
-
- com.cmd.hdr.Opcode = CMD_I2C_WRITE;
- com.cmd.hdr.Length = outlen + 1;
- com.cmd.I2CRead.Device = adr << 1;
- memcpy(com.cmd.I2CRead.Data, out, outlen);
- com.in_len = outlen + 1;
- com.out_len = 1;
-
- if (ngene_command(dev, &com) < 0)
- return -EIO;
-
- if (com.cmd.raw8[0] == 1)
- return -EIO;
-
- return 0;
-}
-
static int ngene_command_load_firmware(struct ngene *dev,
u8 *ngene_fw, u32 size)
{
@@ -477,7 +420,7 @@ static int ngene_command_config_free_buf(struct ngene *dev, u8 *config)
return 0;
}
-static int ngene_command_gpio_set(struct ngene *dev, u8 select, u8 level)
+int ngene_command_gpio_set(struct ngene *dev, u8 select, u8 level)
{
struct ngene_command com;
@@ -514,11 +457,12 @@ static int ngene_command_gpio_set(struct ngene *dev, u8 select, u8 level)
/****************************************************************************/
-static u8 TSFeatureDecoderSetup[8 * 4] = {
+static u8 TSFeatureDecoderSetup[8 * 5] = {
0x42, 0x00, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00,
0x40, 0x06, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* DRXH */
0x71, 0x07, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* DRXHser */
0x72, 0x06, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* S2ser */
+ 0x40, 0x07, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* LGDT3303 */
};
/* Set NGENE I2S Config to 16 bit packed */
@@ -559,7 +503,7 @@ static u8 ITUFeatureDecoderSetup[8] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x04, 0x00
};
-static void FillTSBuffer(void *Buffer, int Length, u32 Flags)
+void FillTSBuffer(void *Buffer, int Length, u32 Flags)
{
u32 *ptr = Buffer;
@@ -772,144 +716,7 @@ static int ngene_command_stream_control(struct ngene *dev, u8 stream,
return 0;
}
-
-/****************************************************************************/
-/* I2C **********************************************************************/
-/****************************************************************************/
-
-static void ngene_i2c_set_bus(struct ngene *dev, int bus)
-{
- if (!(dev->card_info->i2c_access & 2))
- return;
- if (dev->i2c_current_bus == bus)
- return;
-
- switch (bus) {
- case 0:
- ngene_command_gpio_set(dev, 3, 0);
- ngene_command_gpio_set(dev, 2, 1);
- break;
-
- case 1:
- ngene_command_gpio_set(dev, 2, 0);
- ngene_command_gpio_set(dev, 3, 1);
- break;
- }
- dev->i2c_current_bus = bus;
-}
-
-static int ngene_i2c_master_xfer(struct i2c_adapter *adapter,
- struct i2c_msg msg[], int num)
-{
- struct ngene_channel *chan =
- (struct ngene_channel *)i2c_get_adapdata(adapter);
- struct ngene *dev = chan->dev;
-
- down(&dev->i2c_switch_mutex);
- ngene_i2c_set_bus(dev, chan->number);
-
- if (num == 2 && msg[1].flags & I2C_M_RD && !(msg[0].flags & I2C_M_RD))
- if (!ngene_command_i2c_read(dev, msg[0].addr,
- msg[0].buf, msg[0].len,
- msg[1].buf, msg[1].len, 0))
- goto done;
-
- if (num == 1 && !(msg[0].flags & I2C_M_RD))
- if (!ngene_command_i2c_write(dev, msg[0].addr,
- msg[0].buf, msg[0].len))
- goto done;
- if (num == 1 && (msg[0].flags & I2C_M_RD))
- if (!ngene_command_i2c_read(dev, msg[0].addr, 0, 0,
- msg[0].buf, msg[0].len, 0))
- goto done;
-
- up(&dev->i2c_switch_mutex);
- return -EIO;
-
-done:
- up(&dev->i2c_switch_mutex);
- return num;
-}
-
-
-static u32 ngene_i2c_functionality(struct i2c_adapter *adap)
-{
- return I2C_FUNC_SMBUS_EMUL;
-}
-
-static struct i2c_algorithm ngene_i2c_algo = {
- .master_xfer = ngene_i2c_master_xfer,
- .functionality = ngene_i2c_functionality,
-};
-
-static int ngene_i2c_init(struct ngene *dev, int dev_nr)
-{
- struct i2c_adapter *adap = &(dev->channel[dev_nr].i2c_adapter);
-
- i2c_set_adapdata(adap, &(dev->channel[dev_nr]));
- adap->class = I2C_CLASS_TV_DIGITAL | I2C_CLASS_TV_ANALOG;
-
- strcpy(adap->name, "nGene");
-
- adap->algo = &ngene_i2c_algo;
- adap->algo_data = (void *)&(dev->channel[dev_nr]);
- adap->dev.parent = &dev->pci_dev->dev;
-
- return i2c_add_adapter(adap);
-}
-
-
-/****************************************************************************/
-/* DVB functions and API interface ******************************************/
-/****************************************************************************/
-
-static void swap_buffer(u32 *p, u32 len)
-{
- while (len) {
- *p = swab32(*p);
- p++;
- len -= 4;
- }
-}
-
-
-static void *tsin_exchange(void *priv, void *buf, u32 len, u32 clock, u32 flags)
-{
- struct ngene_channel *chan = priv;
-
-
-#ifdef COMMAND_TIMEOUT_WORKAROUND
- if (chan->users > 0)
-#endif
- dvb_dmx_swfilter(&chan->demux, buf, len);
- return 0;
-}
-
-u8 fill_ts[188] = { 0x47, 0x1f, 0xff, 0x10 };
-
-static void *tsout_exchange(void *priv, void *buf, u32 len,
- u32 clock, u32 flags)
-{
- struct ngene_channel *chan = priv;
- struct ngene *dev = chan->dev;
- u32 alen;
-
- alen = dvb_ringbuffer_avail(&dev->tsout_rbuf);
- alen -= alen % 188;
-
- if (alen < len)
- FillTSBuffer(buf + alen, len - alen, flags);
- else
- alen = len;
- dvb_ringbuffer_read(&dev->tsout_rbuf, buf, alen);
- if (flags & DF_SWAP32)
- swap_buffer((u32 *)buf, alen);
- wake_up_interruptible(&dev->tsout_rbuf.queue);
- return buf;
-}
-
-
-static void set_transfer(struct ngene_channel *chan, int state)
+void set_transfer(struct ngene_channel *chan, int state)
{
u8 control = 0, mode = 0, flags = 0;
struct ngene *dev = chan->dev;
@@ -970,85 +777,12 @@ static void set_transfer(struct ngene_channel *chan, int state)
state);
if (!state) {
spin_lock_irq(&chan->state_lock);
- chan->pBufferExchange = 0;
+ chan->pBufferExchange = NULL;
dvb_ringbuffer_flush(&dev->tsout_rbuf);
spin_unlock_irq(&chan->state_lock);
}
}
-static int ngene_start_feed(struct dvb_demux_feed *dvbdmxfeed)
-{
- struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
- struct ngene_channel *chan = dvbdmx->priv;
-
- if (chan->users == 0) {
-#ifdef COMMAND_TIMEOUT_WORKAROUND
- if (!chan->running)
-#endif
- set_transfer(chan, 1);
- /* msleep(10); */
- }
-
- return ++chan->users;
-}
-
-static int ngene_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
-{
- struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
- struct ngene_channel *chan = dvbdmx->priv;
-
- if (--chan->users)
- return chan->users;
-
-#ifndef COMMAND_TIMEOUT_WORKAROUND
- set_transfer(chan, 0);
-#endif
-
- return 0;
-}
-
-
-
-static int my_dvb_dmx_ts_card_init(struct dvb_demux *dvbdemux, char *id,
- int (*start_feed)(struct dvb_demux_feed *),
- int (*stop_feed)(struct dvb_demux_feed *),
- void *priv)
-{
- dvbdemux->priv = priv;
-
- dvbdemux->filternum = 256;
- dvbdemux->feednum = 256;
- dvbdemux->start_feed = start_feed;
- dvbdemux->stop_feed = stop_feed;
- dvbdemux->write_to_decoder = 0;
- dvbdemux->dmx.capabilities = (DMX_TS_FILTERING |
- DMX_SECTION_FILTERING |
- DMX_MEMORY_BASED_FILTERING);
- return dvb_dmx_init(dvbdemux);
-}
-
-static int my_dvb_dmxdev_ts_card_init(struct dmxdev *dmxdev,
- struct dvb_demux *dvbdemux,
- struct dmx_frontend *hw_frontend,
- struct dmx_frontend *mem_frontend,
- struct dvb_adapter *dvb_adapter)
-{
- int ret;
-
- dmxdev->filternum = 256;
- dmxdev->demux = &dvbdemux->dmx;
- dmxdev->capabilities = 0;
- ret = dvb_dmxdev_init(dmxdev, dvb_adapter);
- if (ret < 0)
- return ret;
-
- hw_frontend->source = DMX_FRONTEND_0;
- dvbdemux->dmx.add_frontend(&dvbdemux->dmx, hw_frontend);
- mem_frontend->source = DMX_MEMORY_FE;
- dvbdemux->dmx.add_frontend(&dvbdemux->dmx, mem_frontend);
- return dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, hw_frontend);
-}
-
/****************************************************************************/
/* nGene hardware init and release functions ********************************/
@@ -1094,8 +828,8 @@ static void free_idlebuffer(struct ngene *dev,
return;
free_ringbuffer(dev, rb);
for (j = 0; j < tb->NumBuffers; j++, Cur = Cur->Next) {
- Cur->Buffer2 = 0;
- Cur->scList2 = 0;
+ Cur->Buffer2 = NULL;
+ Cur->scList2 = NULL;
Cur->ngeneBuffer.Address_of_first_entry_2 = 0;
Cur->ngeneBuffer.Number_of_entries_2 = 0;
}
@@ -1141,7 +875,7 @@ static int create_ring_buffer(struct pci_dev *pci_dev,
u64 PARingBufferNext;
struct SBufferHeader *Cur, *Next;
- descr->Head = 0;
+ descr->Head = NULL;
descr->MemSize = 0;
descr->PAHead = 0;
descr->NumBuffers = 0;
@@ -1633,69 +1367,6 @@ fail:
-/****************************************************************************/
-/* Switch control (I2C gates, etc.) *****************************************/
-/****************************************************************************/
-
-
-/****************************************************************************/
-/* Demod/tuner attachment ***************************************************/
-/****************************************************************************/
-
-static int tuner_attach_stv6110(struct ngene_channel *chan)
-{
- struct stv090x_config *feconf = (struct stv090x_config *)
- chan->dev->card_info->fe_config[chan->number];
- struct stv6110x_config *tunerconf = (struct stv6110x_config *)
- chan->dev->card_info->tuner_config[chan->number];
- struct stv6110x_devctl *ctl;
-
- ctl = dvb_attach(stv6110x_attach, chan->fe, tunerconf,
- &chan->i2c_adapter);
- if (ctl == NULL) {
- printk(KERN_ERR DEVICE_NAME ": No STV6110X found!\n");
- return -ENODEV;
- }
-
- feconf->tuner_init = ctl->tuner_init;
- feconf->tuner_set_mode = ctl->tuner_set_mode;
- feconf->tuner_set_frequency = ctl->tuner_set_frequency;
- feconf->tuner_get_frequency = ctl->tuner_get_frequency;
- feconf->tuner_set_bandwidth = ctl->tuner_set_bandwidth;
- feconf->tuner_get_bandwidth = ctl->tuner_get_bandwidth;
- feconf->tuner_set_bbgain = ctl->tuner_set_bbgain;
- feconf->tuner_get_bbgain = ctl->tuner_get_bbgain;
- feconf->tuner_set_refclk = ctl->tuner_set_refclk;
- feconf->tuner_get_status = ctl->tuner_get_status;
-
- return 0;
-}
-
-
-static int demod_attach_stv0900(struct ngene_channel *chan)
-{
- struct stv090x_config *feconf = (struct stv090x_config *)
- chan->dev->card_info->fe_config[chan->number];
-
- chan->fe = dvb_attach(stv090x_attach,
- feconf,
- &chan->i2c_adapter,
- chan->number == 0 ? STV090x_DEMODULATOR_0 :
- STV090x_DEMODULATOR_1);
- if (chan->fe == NULL) {
- printk(KERN_ERR DEVICE_NAME ": No STV0900 found!\n");
- return -ENODEV;
- }
-
- if (!dvb_attach(lnbh24_attach, chan->fe, &chan->i2c_adapter, 0,
- 0, chan->dev->card_info->lnb[chan->number])) {
- printk(KERN_ERR DEVICE_NAME ": No LNBH24 found!\n");
- dvb_frontend_detach(chan->fe);
- return -ENODEV;
- }
-
- return 0;
-}
/****************************************************************************/
/****************************************************************************/
@@ -1719,7 +1390,7 @@ static void release_channel(struct ngene_channel *chan)
if (chan->fe) {
dvb_unregister_frontend(chan->fe);
dvb_frontend_detach(chan->fe);
- chan->fe = 0;
+ chan->fe = NULL;
}
dvbdemux->dmx.close(&dvbdemux->dmx);
dvbdemux->dmx.remove_frontend(&dvbdemux->dmx,
@@ -1751,7 +1422,7 @@ static int init_channel(struct ngene_channel *chan)
if (io & (NGENE_IO_TSIN | NGENE_IO_TSOUT)) {
if (nr >= STREAM_AUDIOIN1)
chan->DataFormatFlags = DF_SWAP32;
- if (nr == 0 || !one_adapter) {
+ if (nr == 0 || !one_adapter || dev->first_adapter == NULL) {
adapter = &dev->adapter[nr];
ret = dvb_register_adapter(adapter, "nGene",
THIS_MODULE,
@@ -1759,8 +1430,10 @@ static int init_channel(struct ngene_channel *chan)
adapter_nr);
if (ret < 0)
return ret;
+ if (dev->first_adapter == NULL)
+ dev->first_adapter = adapter;
} else {
- adapter = &dev->adapter[0];
+ adapter = dev->first_adapter;
}
ret = my_dvb_dmx_ts_card_init(dvbdemux, "SW demux",
@@ -1797,6 +1470,7 @@ static int init_channels(struct ngene *dev)
int i, j;
for (i = 0; i < MAX_STREAM; i++) {
+ dev->channel[i].number = i;
if (init_channel(&dev->channel[i]) < 0) {
for (j = i - 1; j >= 0; j--)
release_channel(&dev->channel[j]);
@@ -1810,7 +1484,7 @@ static int init_channels(struct ngene *dev)
/* device probe/remove calls ************************************************/
/****************************************************************************/
-static void __devexit ngene_remove(struct pci_dev *pdev)
+void __devexit ngene_remove(struct pci_dev *pdev)
{
struct ngene *dev = (struct ngene *)pci_get_drvdata(pdev);
int i;
@@ -1820,12 +1494,12 @@ static void __devexit ngene_remove(struct pci_dev *pdev)
release_channel(&dev->channel[i]);
ngene_stop(dev);
ngene_release_buffers(dev);
- pci_set_drvdata(pdev, 0);
+ pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
}
-static int __devinit ngene_probe(struct pci_dev *pci_dev,
- const struct pci_device_id *id)
+int __devinit ngene_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *id)
{
struct ngene *dev;
int stat = 0;
@@ -1868,156 +1542,6 @@ fail1:
ngene_release_buffers(dev);
fail0:
pci_disable_device(pci_dev);
- pci_set_drvdata(pci_dev, 0);
+ pci_set_drvdata(pci_dev, NULL);
return stat;
}
-
-/****************************************************************************/
-/* Card configs *************************************************************/
-/****************************************************************************/
-
-static struct stv090x_config fe_cineS2 = {
- .device = STV0900,
- .demod_mode = STV090x_DUAL,
- .clk_mode = STV090x_CLK_EXT,
-
- .xtal = 27000000,
- .address = 0x68,
-
- .ts1_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
- .ts2_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
-
- .repeater_level = STV090x_RPTLEVEL_16,
-
- .adc1_range = STV090x_ADC_1Vpp,
- .adc2_range = STV090x_ADC_1Vpp,
-
- .diseqc_envelope_mode = true,
-};
-
-static struct stv6110x_config tuner_cineS2_0 = {
- .addr = 0x60,
- .refclk = 27000000,
- .clk_div = 1,
-};
-
-static struct stv6110x_config tuner_cineS2_1 = {
- .addr = 0x63,
- .refclk = 27000000,
- .clk_div = 1,
-};
-
-static struct ngene_info ngene_info_cineS2 = {
- .type = NGENE_SIDEWINDER,
- .name = "Linux4Media cineS2 DVB-S2 Twin Tuner",
- .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
- .demod_attach = {demod_attach_stv0900, demod_attach_stv0900},
- .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110},
- .fe_config = {&fe_cineS2, &fe_cineS2},
- .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1},
- .lnb = {0x0b, 0x08},
- .tsf = {3, 3},
- .fw_version = 15,
-};
-
-static struct ngene_info ngene_info_satixs2 = {
- .type = NGENE_SIDEWINDER,
- .name = "Mystique SaTiX-S2 Dual",
- .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
- .demod_attach = {demod_attach_stv0900, demod_attach_stv0900},
- .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110},
- .fe_config = {&fe_cineS2, &fe_cineS2},
- .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1},
- .lnb = {0x0b, 0x08},
- .tsf = {3, 3},
- .fw_version = 15,
-};
-
-/****************************************************************************/
-
-
-
-/****************************************************************************/
-/* PCI Subsystem ID *********************************************************/
-/****************************************************************************/
-
-#define NGENE_ID(_subvend, _subdev, _driverdata) { \
- .vendor = NGENE_VID, .device = NGENE_PID, \
- .subvendor = _subvend, .subdevice = _subdev, \
- .driver_data = (unsigned long) &_driverdata }
-
-/****************************************************************************/
-
-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
- NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
- NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
- NGENE_ID(0x18c3, 0xdb01, ngene_info_satixs2),
- {0}
-};
-MODULE_DEVICE_TABLE(pci, ngene_id_tbl);
-
-/****************************************************************************/
-/* Init/Exit ****************************************************************/
-/****************************************************************************/
-
-static pci_ers_result_t ngene_error_detected(struct pci_dev *dev,
- enum pci_channel_state state)
-{
- printk(KERN_ERR DEVICE_NAME ": PCI error\n");
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
- if (state == pci_channel_io_frozen)
- return PCI_ERS_RESULT_NEED_RESET;
- return PCI_ERS_RESULT_CAN_RECOVER;
-}
-
-static pci_ers_result_t ngene_link_reset(struct pci_dev *dev)
-{
- printk(KERN_INFO DEVICE_NAME ": link reset\n");
- return 0;
-}
-
-static pci_ers_result_t ngene_slot_reset(struct pci_dev *dev)
-{
- printk(KERN_INFO DEVICE_NAME ": slot reset\n");
- return 0;
-}
-
-static void ngene_resume(struct pci_dev *dev)
-{
- printk(KERN_INFO DEVICE_NAME ": resume\n");
-}
-
-static struct pci_error_handlers ngene_errors = {
- .error_detected = ngene_error_detected,
- .link_reset = ngene_link_reset,
- .slot_reset = ngene_slot_reset,
- .resume = ngene_resume,
-};
-
-static struct pci_driver ngene_pci_driver = {
- .name = "ngene",
- .id_table = ngene_id_tbl,
- .probe = ngene_probe,
- .remove = __devexit_p(ngene_remove),
- .err_handler = &ngene_errors,
-};
-
-static __init int module_init_ngene(void)
-{
- printk(KERN_INFO
- "nGene PCIE bridge driver, Copyright (C) 2005-2007 Micronas\n");
- return pci_register_driver(&ngene_pci_driver);
-}
-
-static __exit void module_exit_ngene(void)
-{
- pci_unregister_driver(&ngene_pci_driver);
-}
-
-module_init(module_init_ngene);
-module_exit(module_exit_ngene);
-
-MODULE_DESCRIPTION("nGene");
-MODULE_AUTHOR("Micronas, Ralph Metzler, Manfred Voelkel");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/ngene/ngene-dvb.c b/drivers/media/dvb/ngene/ngene-dvb.c
new file mode 100644
index 00000000000..96013eb353c
--- /dev/null
+++ b/drivers/media/dvb/ngene/ngene-dvb.c
@@ -0,0 +1,172 @@
+/*
+ * ngene-dvb.c: nGene PCIe bridge driver - DVB functions
+ *
+ * Copyright (C) 2005-2007 Micronas
+ *
+ * Copyright (C) 2008-2009 Ralph Metzler <rjkm@metzlerbros.de>
+ * Modifications for new nGene firmware,
+ * support for EEPROM-copying,
+ * support for new dual DVB-S2 card prototype
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/io.h>
+#include <asm/div64.h>
+#include <linux/pci.h>
+#include <linux/smp_lock.h>
+#include <linux/timer.h>
+#include <linux/version.h>
+#include <linux/byteorder/generic.h>
+#include <linux/firmware.h>
+#include <linux/vmalloc.h>
+
+#include "ngene.h"
+
+#define COMMAND_TIMEOUT_WORKAROUND
+
+
+/****************************************************************************/
+/* COMMAND API interface ****************************************************/
+/****************************************************************************/
+
+/****************************************************************************/
+/* DVB functions and API interface ******************************************/
+/****************************************************************************/
+
+static void swap_buffer(u32 *p, u32 len)
+{
+ while (len) {
+ *p = swab32(*p);
+ p++;
+ len -= 4;
+ }
+}
+
+void *tsin_exchange(void *priv, void *buf, u32 len, u32 clock, u32 flags)
+{
+ struct ngene_channel *chan = priv;
+
+
+#ifdef COMMAND_TIMEOUT_WORKAROUND
+ if (chan->users > 0)
+#endif
+ dvb_dmx_swfilter(&chan->demux, buf, len);
+ return NULL;
+}
+
+u8 fill_ts[188] = { 0x47, 0x1f, 0xff, 0x10 };
+
+void *tsout_exchange(void *priv, void *buf, u32 len, u32 clock, u32 flags)
+{
+ struct ngene_channel *chan = priv;
+ struct ngene *dev = chan->dev;
+ u32 alen;
+
+ alen = dvb_ringbuffer_avail(&dev->tsout_rbuf);
+ alen -= alen % 188;
+
+ if (alen < len)
+ FillTSBuffer(buf + alen, len - alen, flags);
+ else
+ alen = len;
+ dvb_ringbuffer_read(&dev->tsout_rbuf, buf, alen);
+ if (flags & DF_SWAP32)
+ swap_buffer((u32 *)buf, alen);
+ wake_up_interruptible(&dev->tsout_rbuf.queue);
+ return buf;
+}
+
+
+
+int ngene_start_feed(struct dvb_demux_feed *dvbdmxfeed)
+{
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+ struct ngene_channel *chan = dvbdmx->priv;
+
+ if (chan->users == 0) {
+#ifdef COMMAND_TIMEOUT_WORKAROUND
+ if (!chan->running)
+#endif
+ set_transfer(chan, 1);
+ /* msleep(10); */
+ }
+
+ return ++chan->users;
+}
+
+int ngene_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
+{
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+ struct ngene_channel *chan = dvbdmx->priv;
+
+ if (--chan->users)
+ return chan->users;
+
+#ifndef COMMAND_TIMEOUT_WORKAROUND
+ set_transfer(chan, 0);
+#endif
+
+ return 0;
+}
+
+int my_dvb_dmx_ts_card_init(struct dvb_demux *dvbdemux, char *id,
+ int (*start_feed)(struct dvb_demux_feed *),
+ int (*stop_feed)(struct dvb_demux_feed *),
+ void *priv)
+{
+ dvbdemux->priv = priv;
+
+ dvbdemux->filternum = 256;
+ dvbdemux->feednum = 256;
+ dvbdemux->start_feed = start_feed;
+ dvbdemux->stop_feed = stop_feed;
+ dvbdemux->write_to_decoder = NULL;
+ dvbdemux->dmx.capabilities = (DMX_TS_FILTERING |
+ DMX_SECTION_FILTERING |
+ DMX_MEMORY_BASED_FILTERING);
+ return dvb_dmx_init(dvbdemux);
+}
+
+int my_dvb_dmxdev_ts_card_init(struct dmxdev *dmxdev,
+ struct dvb_demux *dvbdemux,
+ struct dmx_frontend *hw_frontend,
+ struct dmx_frontend *mem_frontend,
+ struct dvb_adapter *dvb_adapter)
+{
+ int ret;
+
+ dmxdev->filternum = 256;
+ dmxdev->demux = &dvbdemux->dmx;
+ dmxdev->capabilities = 0;
+ ret = dvb_dmxdev_init(dmxdev, dvb_adapter);
+ if (ret < 0)
+ return ret;
+
+ hw_frontend->source = DMX_FRONTEND_0;
+ dvbdemux->dmx.add_frontend(&dvbdemux->dmx, hw_frontend);
+ mem_frontend->source = DMX_MEMORY_FE;
+ dvbdemux->dmx.add_frontend(&dvbdemux->dmx, mem_frontend);
+ return dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, hw_frontend);
+}
diff --git a/drivers/media/dvb/ngene/ngene-i2c.c b/drivers/media/dvb/ngene/ngene-i2c.c
new file mode 100644
index 00000000000..2ef54ca6bad
--- /dev/null
+++ b/drivers/media/dvb/ngene/ngene-i2c.c
@@ -0,0 +1,179 @@
+/*
+ * ngene-i2c.c: nGene PCIe bridge driver i2c functions
+ *
+ * Copyright (C) 2005-2007 Micronas
+ *
+ * Copyright (C) 2008-2009 Ralph Metzler <rjkm@metzlerbros.de>
+ * Modifications for new nGene firmware,
+ * support for EEPROM-copying,
+ * support for new dual DVB-S2 card prototype
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ */
+
+/* FIXME - some of these can probably be removed */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/io.h>
+#include <asm/div64.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/smp_lock.h>
+#include <linux/timer.h>
+#include <linux/version.h>
+#include <linux/byteorder/generic.h>
+#include <linux/firmware.h>
+#include <linux/vmalloc.h>
+
+#include "ngene.h"
+
+/* Firmware command for i2c operations */
+static int ngene_command_i2c_read(struct ngene *dev, u8 adr,
+ u8 *out, u8 outlen, u8 *in, u8 inlen, int flag)
+{
+ struct ngene_command com;
+
+ com.cmd.hdr.Opcode = CMD_I2C_READ;
+ com.cmd.hdr.Length = outlen + 3;
+ com.cmd.I2CRead.Device = adr << 1;
+ memcpy(com.cmd.I2CRead.Data, out, outlen);
+ com.cmd.I2CRead.Data[outlen] = inlen;
+ com.cmd.I2CRead.Data[outlen + 1] = 0;
+ com.in_len = outlen + 3;
+ com.out_len = inlen + 1;
+
+ if (ngene_command(dev, &com) < 0)
+ return -EIO;
+
+ if ((com.cmd.raw8[0] >> 1) != adr)
+ return -EIO;
+
+ if (flag)
+ memcpy(in, com.cmd.raw8, inlen + 1);
+ else
+ memcpy(in, com.cmd.raw8 + 1, inlen);
+ return 0;
+}
+
+static int ngene_command_i2c_write(struct ngene *dev, u8 adr,
+ u8 *out, u8 outlen)
+{
+ struct ngene_command com;
+
+
+ com.cmd.hdr.Opcode = CMD_I2C_WRITE;
+ com.cmd.hdr.Length = outlen + 1;
+ com.cmd.I2CRead.Device = adr << 1;
+ memcpy(com.cmd.I2CRead.Data, out, outlen);
+ com.in_len = outlen + 1;
+ com.out_len = 1;
+
+ if (ngene_command(dev, &com) < 0)
+ return -EIO;
+
+ if (com.cmd.raw8[0] == 1)
+ return -EIO;
+
+ return 0;
+}
+
+static void ngene_i2c_set_bus(struct ngene *dev, int bus)
+{
+ if (!(dev->card_info->i2c_access & 2))
+ return;
+ if (dev->i2c_current_bus == bus)
+ return;
+
+ switch (bus) {
+ case 0:
+ ngene_command_gpio_set(dev, 3, 0);
+ ngene_command_gpio_set(dev, 2, 1);
+ break;
+
+ case 1:
+ ngene_command_gpio_set(dev, 2, 0);
+ ngene_command_gpio_set(dev, 3, 1);
+ break;
+ }
+ dev->i2c_current_bus = bus;
+}
+
+static int ngene_i2c_master_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg msg[], int num)
+{
+ struct ngene_channel *chan =
+ (struct ngene_channel *)i2c_get_adapdata(adapter);
+ struct ngene *dev = chan->dev;
+
+ down(&dev->i2c_switch_mutex);
+ ngene_i2c_set_bus(dev, chan->number);
+
+ if (num == 2 && msg[1].flags & I2C_M_RD && !(msg[0].flags & I2C_M_RD))
+ if (!ngene_command_i2c_read(dev, msg[0].addr,
+ msg[0].buf, msg[0].len,
+ msg[1].buf, msg[1].len, 0))
+ goto done;
+
+ if (num == 1 && !(msg[0].flags & I2C_M_RD))
+ if (!ngene_command_i2c_write(dev, msg[0].addr,
+ msg[0].buf, msg[0].len))
+ goto done;
+ if (num == 1 && (msg[0].flags & I2C_M_RD))
+ if (!ngene_command_i2c_read(dev, msg[0].addr, NULL, 0,
+ msg[0].buf, msg[0].len, 0))
+ goto done;
+
+ up(&dev->i2c_switch_mutex);
+ return -EIO;
+
+done:
+ up(&dev->i2c_switch_mutex);
+ return num;
+}
+
+
+static u32 ngene_i2c_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_SMBUS_EMUL;
+}
+
+static struct i2c_algorithm ngene_i2c_algo = {
+ .master_xfer = ngene_i2c_master_xfer,
+ .functionality = ngene_i2c_functionality,
+};
+
+int ngene_i2c_init(struct ngene *dev, int dev_nr)
+{
+ struct i2c_adapter *adap = &(dev->channel[dev_nr].i2c_adapter);
+
+ i2c_set_adapdata(adap, &(dev->channel[dev_nr]));
+ adap->class = I2C_CLASS_TV_DIGITAL | I2C_CLASS_TV_ANALOG;
+
+ strcpy(adap->name, "nGene");
+
+ adap->algo = &ngene_i2c_algo;
+ adap->algo_data = (void *)&(dev->channel[dev_nr]);
+ adap->dev.parent = &dev->pci_dev->dev;
+
+ return i2c_add_adapter(adap);
+}
+
diff --git a/drivers/media/dvb/ngene/ngene.h b/drivers/media/dvb/ngene/ngene.h
index a7eb2984631..676fcbb7902 100644
--- a/drivers/media/dvb/ngene/ngene.h
+++ b/drivers/media/dvb/ngene/ngene.h
@@ -39,6 +39,8 @@
#include "dvb_frontend.h"
#include "dvb_ringbuffer.h"
+#define DEVICE_NAME "ngene"
+
#define NGENE_VID 0x18c3
#define NGENE_PID 0x0720
@@ -752,6 +754,7 @@ struct ngene {
spinlock_t cmd_lock;
struct dvb_adapter adapter[MAX_STREAM];
+ struct dvb_adapter *first_adapter; /* "one_adapter" modprobe opt */
struct ngene_channel channel[MAX_STREAM];
struct ngene_info *card_info;
@@ -853,6 +856,33 @@ struct ngene_buffer {
#endif
+/* Provided by ngene-core.c */
+int __devinit ngene_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *id);
+void __devexit ngene_remove(struct pci_dev *pdev);
+int ngene_command(struct ngene *dev, struct ngene_command *com);
+int ngene_command_gpio_set(struct ngene *dev, u8 select, u8 level);
+void set_transfer(struct ngene_channel *chan, int state);
+void FillTSBuffer(void *Buffer, int Length, u32 Flags);
+
+/* Provided by ngene-i2c.c */
+int ngene_i2c_init(struct ngene *dev, int dev_nr);
+
+/* Provided by ngene-dvb.c */
+void *tsout_exchange(void *priv, void *buf, u32 len, u32 clock, u32 flags);
+void *tsin_exchange(void *priv, void *buf, u32 len, u32 clock, u32 flags);
+int ngene_start_feed(struct dvb_demux_feed *dvbdmxfeed);
+int ngene_stop_feed(struct dvb_demux_feed *dvbdmxfeed);
+int my_dvb_dmx_ts_card_init(struct dvb_demux *dvbdemux, char *id,
+ int (*start_feed)(struct dvb_demux_feed *),
+ int (*stop_feed)(struct dvb_demux_feed *),
+ void *priv);
+int my_dvb_dmxdev_ts_card_init(struct dmxdev *dmxdev,
+ struct dvb_demux *dvbdemux,
+ struct dmx_frontend *hw_frontend,
+ struct dmx_frontend *mem_frontend,
+ struct dvb_adapter *dvb_adapter);
+
#endif
/* LocalWords: Endif
diff --git a/drivers/media/dvb/pt1/pt1.c b/drivers/media/dvb/pt1/pt1.c
index 6aded234aa6..69ad94934ec 100644
--- a/drivers/media/dvb/pt1/pt1.c
+++ b/drivers/media/dvb/pt1/pt1.c
@@ -1,5 +1,5 @@
/*
- * driver for Earthsoft PT1
+ * driver for Earthsoft PT1/PT2
*
* Copyright (C) 2009 HIRANO Takahito <hiranotaka@zng.info>
*
@@ -77,6 +77,10 @@ struct pt1 {
struct pt1_adapter *adaps[PT1_NR_ADAPS];
struct pt1_table *tables;
struct task_struct *kthread;
+
+ struct mutex lock;
+ int power;
+ int reset;
};
struct pt1_adapter {
@@ -95,6 +99,11 @@ struct pt1_adapter {
struct dvb_frontend *fe;
int (*orig_set_voltage)(struct dvb_frontend *fe,
fe_sec_voltage_t voltage);
+ int (*orig_sleep)(struct dvb_frontend *fe);
+ int (*orig_init)(struct dvb_frontend *fe);
+
+ fe_sec_voltage_t voltage;
+ int sleep;
};
#define pt1_printk(level, pt1, format, arg...) \
@@ -219,8 +228,10 @@ static int pt1_do_enable_ram(struct pt1 *pt1)
static int pt1_enable_ram(struct pt1 *pt1)
{
int i, ret;
+ int phase;
schedule_timeout_uninterruptible((HZ + 999) / 1000);
- for (i = 0; i < 10; i++) {
+ phase = pt1->pdev->device == 0x211a ? 128 : 166;
+ for (i = 0; i < phase; i++) {
ret = pt1_do_enable_ram(pt1);
if (ret < 0)
return ret;
@@ -485,33 +496,47 @@ static int pt1_stop_feed(struct dvb_demux_feed *feed)
}
static void
-pt1_set_power(struct pt1 *pt1, int power, int lnb, int reset)
+pt1_update_power(struct pt1 *pt1)
{
- pt1_write_reg(pt1, 1, power | lnb << 1 | !reset << 3);
+ int bits;
+ int i;
+ struct pt1_adapter *adap;
+ static const int sleep_bits[] = {
+ 1 << 4,
+ 1 << 6 | 1 << 7,
+ 1 << 5,
+ 1 << 6 | 1 << 8,
+ };
+
+ bits = pt1->power | !pt1->reset << 3;
+ mutex_lock(&pt1->lock);
+ for (i = 0; i < PT1_NR_ADAPS; i++) {
+ adap = pt1->adaps[i];
+ switch (adap->voltage) {
+ case SEC_VOLTAGE_13: /* actually 11V */
+ bits |= 1 << 1;
+ break;
+ case SEC_VOLTAGE_18: /* actually 15V */
+ bits |= 1 << 1 | 1 << 2;
+ break;
+ default:
+ break;
+ }
+
+ /* XXX: The bits should be changed depending on adap->sleep. */
+ bits |= sleep_bits[i];
+ }
+ pt1_write_reg(pt1, 1, bits);
+ mutex_unlock(&pt1->lock);
}
static int pt1_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
{
struct pt1_adapter *adap;
- int lnb;
adap = container_of(fe->dvb, struct pt1_adapter, adap);
-
- switch (voltage) {
- case SEC_VOLTAGE_13: /* actually 11V */
- lnb = 2;
- break;
- case SEC_VOLTAGE_18: /* actually 15V */
- lnb = 3;
- break;
- case SEC_VOLTAGE_OFF:
- lnb = 0;
- break;
- default:
- return -EINVAL;
- }
-
- pt1_set_power(adap->pt1, 1, lnb, 0);
+ adap->voltage = voltage;
+ pt1_update_power(adap->pt1);
if (adap->orig_set_voltage)
return adap->orig_set_voltage(fe, voltage);
@@ -519,9 +544,37 @@ static int pt1_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
return 0;
}
+static int pt1_sleep(struct dvb_frontend *fe)
+{
+ struct pt1_adapter *adap;
+
+ adap = container_of(fe->dvb, struct pt1_adapter, adap);
+ adap->sleep = 1;
+ pt1_update_power(adap->pt1);
+
+ if (adap->orig_sleep)
+ return adap->orig_sleep(fe);
+ else
+ return 0;
+}
+
+static int pt1_wakeup(struct dvb_frontend *fe)
+{
+ struct pt1_adapter *adap;
+
+ adap = container_of(fe->dvb, struct pt1_adapter, adap);
+ adap->sleep = 0;
+ pt1_update_power(adap->pt1);
+ schedule_timeout_uninterruptible((HZ + 999) / 1000);
+
+ if (adap->orig_init)
+ return adap->orig_init(fe);
+ else
+ return 0;
+}
+
static void pt1_free_adapter(struct pt1_adapter *adap)
{
- dvb_unregister_frontend(adap->fe);
dvb_net_release(&adap->net);
adap->demux.dmx.close(&adap->demux.dmx);
dvb_dmxdev_release(&adap->dmxdev);
@@ -534,7 +587,7 @@ static void pt1_free_adapter(struct pt1_adapter *adap)
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static struct pt1_adapter *
-pt1_alloc_adapter(struct pt1 *pt1, struct dvb_frontend *fe)
+pt1_alloc_adapter(struct pt1 *pt1)
{
struct pt1_adapter *adap;
void *buf;
@@ -551,8 +604,8 @@ pt1_alloc_adapter(struct pt1 *pt1, struct dvb_frontend *fe)
adap->pt1 = pt1;
- adap->orig_set_voltage = fe->ops.set_voltage;
- fe->ops.set_voltage = pt1_set_voltage;
+ adap->voltage = SEC_VOLTAGE_OFF;
+ adap->sleep = 1;
buf = (u8 *)__get_free_page(GFP_KERNEL);
if (!buf) {
@@ -593,17 +646,8 @@ pt1_alloc_adapter(struct pt1 *pt1, struct dvb_frontend *fe)
dvb_net_init(dvb_adap, &adap->net, &demux->dmx);
- ret = dvb_register_frontend(dvb_adap, fe);
- if (ret < 0)
- goto err_net_release;
- adap->fe = fe;
-
return adap;
-err_net_release:
- dvb_net_release(&adap->net);
- adap->demux.dmx.close(&adap->demux.dmx);
- dvb_dmxdev_release(&adap->dmxdev);
err_dmx_release:
dvb_dmx_release(demux);
err_unregister_adapter:
@@ -623,6 +667,62 @@ static void pt1_cleanup_adapters(struct pt1 *pt1)
pt1_free_adapter(pt1->adaps[i]);
}
+static int pt1_init_adapters(struct pt1 *pt1)
+{
+ int i;
+ struct pt1_adapter *adap;
+ int ret;
+
+ for (i = 0; i < PT1_NR_ADAPS; i++) {
+ adap = pt1_alloc_adapter(pt1);
+ if (IS_ERR(adap)) {
+ ret = PTR_ERR(adap);
+ goto err;
+ }
+
+ adap->index = i;
+ pt1->adaps[i] = adap;
+ }
+ return 0;
+
+err:
+ while (i--)
+ pt1_free_adapter(pt1->adaps[i]);
+
+ return ret;
+}
+
+static void pt1_cleanup_frontend(struct pt1_adapter *adap)
+{
+ dvb_unregister_frontend(adap->fe);
+}
+
+static int pt1_init_frontend(struct pt1_adapter *adap, struct dvb_frontend *fe)
+{
+ int ret;
+
+ adap->orig_set_voltage = fe->ops.set_voltage;
+ adap->orig_sleep = fe->ops.sleep;
+ adap->orig_init = fe->ops.init;
+ fe->ops.set_voltage = pt1_set_voltage;
+ fe->ops.sleep = pt1_sleep;
+ fe->ops.init = pt1_wakeup;
+
+ ret = dvb_register_frontend(&adap->adap, fe);
+ if (ret < 0)
+ return ret;
+
+ adap->fe = fe;
+ return 0;
+}
+
+static void pt1_cleanup_frontends(struct pt1 *pt1)
+{
+ int i;
+ for (i = 0; i < PT1_NR_ADAPS; i++)
+ pt1_cleanup_frontend(pt1->adaps[i]);
+}
+
struct pt1_config {
struct va1j5jf8007s_config va1j5jf8007s_config;
struct va1j5jf8007t_config va1j5jf8007t_config;
@@ -630,29 +730,63 @@ struct pt1_config {
static const struct pt1_config pt1_configs[2] = {
{
- { .demod_address = 0x1b },
- { .demod_address = 0x1a },
+ {
+ .demod_address = 0x1b,
+ .frequency = VA1J5JF8007S_20MHZ,
+ },
+ {
+ .demod_address = 0x1a,
+ .frequency = VA1J5JF8007T_20MHZ,
+ },
}, {
- { .demod_address = 0x19 },
- { .demod_address = 0x18 },
+ {
+ .demod_address = 0x19,
+ .frequency = VA1J5JF8007S_20MHZ,
+ },
+ {
+ .demod_address = 0x18,
+ .frequency = VA1J5JF8007T_20MHZ,
+ },
},
};
-static int pt1_init_adapters(struct pt1 *pt1)
+static const struct pt1_config pt2_configs[2] = {
+ {
+ {
+ .demod_address = 0x1b,
+ .frequency = VA1J5JF8007S_25MHZ,
+ },
+ {
+ .demod_address = 0x1a,
+ .frequency = VA1J5JF8007T_25MHZ,
+ },
+ }, {
+ {
+ .demod_address = 0x19,
+ .frequency = VA1J5JF8007S_25MHZ,
+ },
+ {
+ .demod_address = 0x18,
+ .frequency = VA1J5JF8007T_25MHZ,
+ },
+ },
+};
+
+static int pt1_init_frontends(struct pt1 *pt1)
{
int i, j;
struct i2c_adapter *i2c_adap;
- const struct pt1_config *config;
+ const struct pt1_config *configs, *config;
struct dvb_frontend *fe[4];
- struct pt1_adapter *adap;
int ret;
i = 0;
j = 0;
i2c_adap = &pt1->i2c_adap;
+ configs = pt1->pdev->device == 0x211a ? pt1_configs : pt2_configs;
do {
- config = &pt1_configs[i / 2];
+ config = &configs[i / 2];
fe[i] = va1j5jf8007s_attach(&config->va1j5jf8007s_config,
i2c_adap);
@@ -681,11 +815,9 @@ static int pt1_init_adapters(struct pt1 *pt1)
} while (i < 4);
do {
- adap = pt1_alloc_adapter(pt1, fe[j]);
- if (IS_ERR(adap))
+ ret = pt1_init_frontend(pt1->adaps[j], fe[j]);
+ if (ret < 0)
goto err;
- adap->index = j;
- pt1->adaps[j] = adap;
} while (++j < 4);
return 0;
@@ -695,7 +827,7 @@ err:
fe[i]->ops.release(fe[i]);
while (j--)
- pt1_free_adapter(pt1->adaps[j]);
+ dvb_unregister_frontend(fe[j]);
return ret;
}
@@ -890,9 +1022,12 @@ static void __devexit pt1_remove(struct pci_dev *pdev)
kthread_stop(pt1->kthread);
pt1_cleanup_tables(pt1);
- pt1_cleanup_adapters(pt1);
+ pt1_cleanup_frontends(pt1);
pt1_disable_ram(pt1);
- pt1_set_power(pt1, 0, 0, 1);
+ pt1->power = 0;
+ pt1->reset = 1;
+ pt1_update_power(pt1);
+ pt1_cleanup_adapters(pt1);
i2c_del_adapter(&pt1->i2c_adap);
pci_set_drvdata(pdev, NULL);
kfree(pt1);
@@ -936,10 +1071,21 @@ pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pci_iounmap;
}
+ mutex_init(&pt1->lock);
pt1->pdev = pdev;
pt1->regs = regs;
pci_set_drvdata(pdev, pt1);
+ ret = pt1_init_adapters(pt1);
+ if (ret < 0)
+ goto err_kfree;
+
+ mutex_init(&pt1->lock);
+
+ pt1->power = 0;
+ pt1->reset = 1;
+ pt1_update_power(pt1);
+
i2c_adap = &pt1->i2c_adap;
i2c_adap->class = I2C_CLASS_TV_DIGITAL;
i2c_adap->algo = &pt1_i2c_algo;
@@ -948,9 +1094,7 @@ pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i2c_set_adapdata(i2c_adap, pt1);
ret = i2c_add_adapter(i2c_adap);
if (ret < 0)
- goto err_kfree;
-
- pt1_set_power(pt1, 0, 0, 1);
+ goto err_pt1_cleanup_adapters;
pt1_i2c_init(pt1);
pt1_i2c_wait(pt1);
@@ -979,19 +1123,21 @@ pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pt1_init_streams(pt1);
- pt1_set_power(pt1, 1, 0, 1);
+ pt1->power = 1;
+ pt1_update_power(pt1);
schedule_timeout_uninterruptible((HZ + 49) / 50);
- pt1_set_power(pt1, 1, 0, 0);
+ pt1->reset = 0;
+ pt1_update_power(pt1);
schedule_timeout_uninterruptible((HZ + 999) / 1000);
- ret = pt1_init_adapters(pt1);
+ ret = pt1_init_frontends(pt1);
if (ret < 0)
goto err_pt1_disable_ram;
ret = pt1_init_tables(pt1);
if (ret < 0)
- goto err_pt1_cleanup_adapters;
+ goto err_pt1_cleanup_frontends;
kthread = kthread_run(pt1_thread, pt1, "pt1");
if (IS_ERR(kthread)) {
@@ -1004,11 +1150,15 @@ pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_pt1_cleanup_tables:
pt1_cleanup_tables(pt1);
-err_pt1_cleanup_adapters:
- pt1_cleanup_adapters(pt1);
+err_pt1_cleanup_frontends:
+ pt1_cleanup_frontends(pt1);
err_pt1_disable_ram:
pt1_disable_ram(pt1);
- pt1_set_power(pt1, 0, 0, 1);
+ pt1->power = 0;
+ pt1->reset = 1;
+ pt1_update_power(pt1);
+err_pt1_cleanup_adapters:
+ pt1_cleanup_adapters(pt1);
err_i2c_del_adapter:
i2c_del_adapter(i2c_adap);
err_kfree:
@@ -1027,6 +1177,7 @@ err:
static struct pci_device_id pt1_id_table[] = {
{ PCI_DEVICE(0x10ee, 0x211a) },
+ { PCI_DEVICE(0x10ee, 0x222a) },
{ },
};
MODULE_DEVICE_TABLE(pci, pt1_id_table);
@@ -1054,5 +1205,5 @@ module_init(pt1_init);
module_exit(pt1_cleanup);
MODULE_AUTHOR("Takahito HIRANO <hiranotaka@zng.info>");
-MODULE_DESCRIPTION("Earthsoft PT1 Driver");
+MODULE_DESCRIPTION("Earthsoft PT1/PT2 Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/pt1/va1j5jf8007s.c b/drivers/media/dvb/pt1/va1j5jf8007s.c
index fc6594996e7..451641c0c1d 100644
--- a/drivers/media/dvb/pt1/va1j5jf8007s.c
+++ b/drivers/media/dvb/pt1/va1j5jf8007s.c
@@ -1,5 +1,5 @@
/*
- * ISDB-S driver for VA1J5JF8007
+ * ISDB-S driver for VA1J5JF8007/VA1J5JF8011
*
* Copyright (C) 2009 HIRANO Takahito <hiranotaka@zng.info>
*
@@ -580,7 +580,7 @@ static void va1j5jf8007s_release(struct dvb_frontend *fe)
static struct dvb_frontend_ops va1j5jf8007s_ops = {
.info = {
- .name = "VA1J5JF8007 ISDB-S",
+ .name = "VA1J5JF8007/VA1J5JF8011 ISDB-S",
.type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
@@ -628,28 +628,50 @@ static int va1j5jf8007s_prepare_1(struct va1j5jf8007s_state *state)
return 0;
}
-static const u8 va1j5jf8007s_prepare_bufs[][2] = {
+static const u8 va1j5jf8007s_20mhz_prepare_bufs[][2] = {
{0x04, 0x02}, {0x0d, 0x55}, {0x11, 0x40}, {0x13, 0x80}, {0x17, 0x01},
{0x1c, 0x0a}, {0x1d, 0xaa}, {0x1e, 0x20}, {0x1f, 0x88}, {0x51, 0xb0},
{0x52, 0x89}, {0x53, 0xb3}, {0x5a, 0x2d}, {0x5b, 0xd3}, {0x85, 0x69},
{0x87, 0x04}, {0x8e, 0x02}, {0xa3, 0xf7}, {0xa5, 0xc0},
};
+static const u8 va1j5jf8007s_25mhz_prepare_bufs[][2] = {
+ {0x04, 0x02}, {0x11, 0x40}, {0x13, 0x80}, {0x17, 0x01}, {0x1c, 0x0a},
+ {0x1d, 0xaa}, {0x1e, 0x20}, {0x1f, 0x88}, {0x51, 0xb0}, {0x52, 0x89},
+ {0x53, 0xb3}, {0x5a, 0x2d}, {0x5b, 0xd3}, {0x85, 0x69}, {0x87, 0x04},
+ {0x8e, 0x26}, {0xa3, 0xf7}, {0xa5, 0xc0},
+};
+
static int va1j5jf8007s_prepare_2(struct va1j5jf8007s_state *state)
{
+ const u8 (*bufs)[2];
+ int size;
u8 addr;
u8 buf[2];
struct i2c_msg msg;
int i;
+ switch (state->config->frequency) {
+ case VA1J5JF8007S_20MHZ:
+ bufs = va1j5jf8007s_20mhz_prepare_bufs;
+ size = ARRAY_SIZE(va1j5jf8007s_20mhz_prepare_bufs);
+ break;
+ case VA1J5JF8007S_25MHZ:
+ bufs = va1j5jf8007s_25mhz_prepare_bufs;
+ size = ARRAY_SIZE(va1j5jf8007s_25mhz_prepare_bufs);
+ break;
+ default:
+ return -EINVAL;
+ }
+
addr = state->config->demod_address;
msg.addr = addr;
msg.flags = 0;
msg.len = 2;
msg.buf = buf;
- for (i = 0; i < ARRAY_SIZE(va1j5jf8007s_prepare_bufs); i++) {
- memcpy(buf, va1j5jf8007s_prepare_bufs[i], sizeof(buf));
+ for (i = 0; i < size; i++) {
+ memcpy(buf, bufs[i], sizeof(buf));
if (i2c_transfer(state->adap, &msg, 1) != 1)
return -EREMOTEIO;
}
diff --git a/drivers/media/dvb/pt1/va1j5jf8007s.h b/drivers/media/dvb/pt1/va1j5jf8007s.h
index aa228a81635..b7d6f05a0e0 100644
--- a/drivers/media/dvb/pt1/va1j5jf8007s.h
+++ b/drivers/media/dvb/pt1/va1j5jf8007s.h
@@ -1,5 +1,5 @@
/*
- * ISDB-S driver for VA1J5JF8007
+ * ISDB-S driver for VA1J5JF8007/VA1J5JF8011
*
* Copyright (C) 2009 HIRANO Takahito <hiranotaka@zng.info>
*
@@ -24,8 +24,14 @@
#ifndef VA1J5JF8007S_H
#define VA1J5JF8007S_H
+enum va1j5jf8007s_frequency {
+ VA1J5JF8007S_20MHZ,
+ VA1J5JF8007S_25MHZ,
+};
+
struct va1j5jf8007s_config {
u8 demod_address;
+ enum va1j5jf8007s_frequency frequency;
};
struct i2c_adapter;
diff --git a/drivers/media/dvb/pt1/va1j5jf8007t.c b/drivers/media/dvb/pt1/va1j5jf8007t.c
index 3db4f3e34e8..0f085c3e571 100644
--- a/drivers/media/dvb/pt1/va1j5jf8007t.c
+++ b/drivers/media/dvb/pt1/va1j5jf8007t.c
@@ -1,5 +1,5 @@
/*
- * ISDB-T driver for VA1J5JF8007
+ * ISDB-T driver for VA1J5JF8007/VA1J5JF8011
*
* Copyright (C) 2009 HIRANO Takahito <hiranotaka@zng.info>
*
@@ -429,7 +429,7 @@ static void va1j5jf8007t_release(struct dvb_frontend *fe)
static struct dvb_frontend_ops va1j5jf8007t_ops = {
.info = {
- .name = "VA1J5JF8007 ISDB-T",
+ .name = "VA1J5JF8007/VA1J5JF8011 ISDB-T",
.type = FE_OFDM,
.frequency_min = 90000000,
.frequency_max = 770000000,
@@ -448,29 +448,50 @@ static struct dvb_frontend_ops va1j5jf8007t_ops = {
.release = va1j5jf8007t_release,
};
-static const u8 va1j5jf8007t_prepare_bufs[][2] = {
+static const u8 va1j5jf8007t_20mhz_prepare_bufs[][2] = {
{0x03, 0x90}, {0x14, 0x8f}, {0x1c, 0x2a}, {0x1d, 0xa8}, {0x1e, 0xa2},
{0x22, 0x83}, {0x31, 0x0d}, {0x32, 0xe0}, {0x39, 0xd3}, {0x3a, 0x00},
{0x5c, 0x40}, {0x5f, 0x80}, {0x75, 0x02}, {0x76, 0x4e}, {0x77, 0x03},
{0xef, 0x01}
};
+static const u8 va1j5jf8007t_25mhz_prepare_bufs[][2] = {
+ {0x03, 0x90}, {0x1c, 0x2a}, {0x1d, 0xa8}, {0x1e, 0xa2}, {0x22, 0x83},
+ {0x3a, 0x00}, {0x5c, 0x40}, {0x5f, 0x80}, {0x75, 0x0a}, {0x76, 0x4c},
+ {0x77, 0x03}, {0xef, 0x01}
+};
+
int va1j5jf8007t_prepare(struct dvb_frontend *fe)
{
struct va1j5jf8007t_state *state;
+ const u8 (*bufs)[2];
+ int size;
u8 buf[2];
struct i2c_msg msg;
int i;
state = fe->demodulator_priv;
+ switch (state->config->frequency) {
+ case VA1J5JF8007T_20MHZ:
+ bufs = va1j5jf8007t_20mhz_prepare_bufs;
+ size = ARRAY_SIZE(va1j5jf8007t_20mhz_prepare_bufs);
+ break;
+ case VA1J5JF8007T_25MHZ:
+ bufs = va1j5jf8007t_25mhz_prepare_bufs;
+ size = ARRAY_SIZE(va1j5jf8007t_25mhz_prepare_bufs);
+ break;
+ default:
+ return -EINVAL;
+ }
+
msg.addr = state->config->demod_address;
msg.flags = 0;
msg.len = sizeof(buf);
msg.buf = buf;
- for (i = 0; i < ARRAY_SIZE(va1j5jf8007t_prepare_bufs); i++) {
- memcpy(buf, va1j5jf8007t_prepare_bufs[i], sizeof(buf));
+ for (i = 0; i < size; i++) {
+ memcpy(buf, bufs[i], sizeof(buf));
if (i2c_transfer(state->adap, &msg, 1) != 1)
return -EREMOTEIO;
}
diff --git a/drivers/media/dvb/pt1/va1j5jf8007t.h b/drivers/media/dvb/pt1/va1j5jf8007t.h
index ed49906f776..2903be519ef 100644
--- a/drivers/media/dvb/pt1/va1j5jf8007t.h
+++ b/drivers/media/dvb/pt1/va1j5jf8007t.h
@@ -1,5 +1,5 @@
/*
- * ISDB-T driver for VA1J5JF8007
+ * ISDB-T driver for VA1J5JF8007/VA1J5JF8011
*
* Copyright (C) 2009 HIRANO Takahito <hiranotaka@zng.info>
*
@@ -24,8 +24,14 @@
#ifndef VA1J5JF8007T_H
#define VA1J5JF8007T_H
+enum va1j5jf8007t_frequency {
+ VA1J5JF8007T_20MHZ,
+ VA1J5JF8007T_25MHZ,
+};
+
struct va1j5jf8007t_config {
u8 demod_address;
+ enum va1j5jf8007t_frequency frequency;
};
struct i2c_adapter;
diff --git a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c
index 38915591c6e..a6be529eec5 100644
--- a/drivers/media/dvb/ttpci/av7110.c
+++ b/drivers/media/dvb/ttpci/av7110.c
@@ -708,7 +708,7 @@ static void gpioirq(unsigned long cookie)
#ifdef CONFIG_DVB_AV7110_OSD
-static int dvb_osd_ioctl(struct inode *inode, struct file *file,
+static int dvb_osd_ioctl(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
@@ -727,7 +727,7 @@ static int dvb_osd_ioctl(struct inode *inode, struct file *file,
static const struct file_operations dvb_osd_fops = {
.owner = THIS_MODULE,
- .ioctl = dvb_generic_ioctl,
+ .unlocked_ioctl = dvb_generic_ioctl,
.open = dvb_generic_open,
.release = dvb_generic_release,
};
diff --git a/drivers/media/dvb/ttpci/av7110_av.c b/drivers/media/dvb/ttpci/av7110_av.c
index 53884814161..13efba942da 100644
--- a/drivers/media/dvb/ttpci/av7110_av.c
+++ b/drivers/media/dvb/ttpci/av7110_av.c
@@ -1089,7 +1089,7 @@ static int play_iframe(struct av7110 *av7110, char __user *buf, unsigned int len
}
-static int dvb_video_ioctl(struct inode *inode, struct file *file,
+static int dvb_video_ioctl(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
@@ -1297,7 +1297,7 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file,
return ret;
}
-static int dvb_audio_ioctl(struct inode *inode, struct file *file,
+static int dvb_audio_ioctl(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
@@ -1517,7 +1517,7 @@ static int dvb_audio_release(struct inode *inode, struct file *file)
static const struct file_operations dvb_video_fops = {
.owner = THIS_MODULE,
.write = dvb_video_write,
- .ioctl = dvb_generic_ioctl,
+ .unlocked_ioctl = dvb_generic_ioctl,
.open = dvb_video_open,
.release = dvb_video_release,
.poll = dvb_video_poll,
@@ -1535,7 +1535,7 @@ static struct dvb_device dvbdev_video = {
static const struct file_operations dvb_audio_fops = {
.owner = THIS_MODULE,
.write = dvb_audio_write,
- .ioctl = dvb_generic_ioctl,
+ .unlocked_ioctl = dvb_generic_ioctl,
.open = dvb_audio_open,
.release = dvb_audio_release,
.poll = dvb_audio_poll,
diff --git a/drivers/media/dvb/ttpci/av7110_ca.c b/drivers/media/dvb/ttpci/av7110_ca.c
index ac7779c45c5..4eba35a018e 100644
--- a/drivers/media/dvb/ttpci/av7110_ca.c
+++ b/drivers/media/dvb/ttpci/av7110_ca.c
@@ -248,8 +248,7 @@ static unsigned int dvb_ca_poll (struct file *file, poll_table *wait)
return mask;
}
-static int dvb_ca_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, void *parg)
+static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
@@ -350,7 +349,7 @@ static const struct file_operations dvb_ca_fops = {
.owner = THIS_MODULE,
.read = dvb_ca_read,
.write = dvb_ca_write,
- .ioctl = dvb_generic_ioctl,
+ .unlocked_ioctl = dvb_generic_ioctl,
.open = dvb_ca_open,
.release = dvb_generic_release,
.poll = dvb_ca_poll,
diff --git a/drivers/media/dvb/ttpci/budget-ci.c b/drivers/media/dvb/ttpci/budget-ci.c
index 49c2a817a06..46171439633 100644
--- a/drivers/media/dvb/ttpci/budget-ci.c
+++ b/drivers/media/dvb/ttpci/budget-ci.c
@@ -35,7 +35,7 @@
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/spinlock.h>
-#include <media/ir-common.h>
+#include <media/ir-core.h>
#include "budget.h"
@@ -54,6 +54,8 @@
#include "tda1002x.h"
#include "tda827x.h"
+#define MODULE_NAME "budget_ci"
+
/*
* Regarding DEBIADDR_IR:
* Some CI modules hang if random addresses are read.
@@ -80,12 +82,6 @@
#define SLOTSTATUS_READY 8
#define SLOTSTATUS_OCCUPIED (SLOTSTATUS_PRESENT|SLOTSTATUS_RESET|SLOTSTATUS_READY)
-/*
- * Milliseconds during which a key is regarded as pressed.
- * If an identical command arrives within this time, the timer will start over.
- */
-#define IR_KEYPRESS_TIMEOUT 250
-
/* RC5 device wildcard */
#define IR_DEVICE_ANY 255
@@ -102,12 +98,9 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
struct budget_ci_ir {
struct input_dev *dev;
struct tasklet_struct msp430_irq_tasklet;
- struct timer_list timer_keyup;
char name[72]; /* 40 + 32 for (struct saa7146_dev).name */
char phys[32];
- struct ir_input_state state;
int rc5_device;
- u32 last_raw;
u32 ir_key;
bool have_command;
};
@@ -122,18 +115,11 @@ struct budget_ci {
u8 tuner_pll_address; /* used for philips_tdm1316l configs */
};
-static void msp430_ir_keyup(unsigned long data)
-{
- struct budget_ci_ir *ir = (struct budget_ci_ir *) data;
- ir_input_nokey(ir->dev, &ir->state);
-}
-
static void msp430_ir_interrupt(unsigned long data)
{
struct budget_ci *budget_ci = (struct budget_ci *) data;
struct input_dev *dev = budget_ci->ir.dev;
u32 command = ttpci_budget_debiread(&budget_ci->budget, DEBINOSWAP, DEBIADDR_IR, 2, 1, 0) >> 8;
- u32 raw;
/*
* The msp430 chip can generate two different bytes, command and device
@@ -169,20 +155,12 @@ static void msp430_ir_interrupt(unsigned long data)
return;
budget_ci->ir.have_command = false;
+ /* FIXME: We should generate complete scancodes with device info */
if (budget_ci->ir.rc5_device != IR_DEVICE_ANY &&
budget_ci->ir.rc5_device != (command & 0x1f))
return;
- /* Is this a repeated key sequence? (same device, command, toggle) */
- raw = budget_ci->ir.ir_key | (command << 8);
- if (budget_ci->ir.last_raw != raw || !timer_pending(&budget_ci->ir.timer_keyup)) {
- ir_input_nokey(dev, &budget_ci->ir.state);
- ir_input_keydown(dev, &budget_ci->ir.state,
- budget_ci->ir.ir_key);
- budget_ci->ir.last_raw = raw;
- }
-
- mod_timer(&budget_ci->ir.timer_keyup, jiffies + msecs_to_jiffies(IR_KEYPRESS_TIMEOUT));
+ ir_keydown(dev, budget_ci->ir.ir_key, (command & 0x20) ? 1 : 0);
}
static int msp430_ir_init(struct budget_ci *budget_ci)
@@ -190,7 +168,7 @@ static int msp430_ir_init(struct budget_ci *budget_ci)
struct saa7146_dev *saa = budget_ci->budget.dev;
struct input_dev *input_dev = budget_ci->ir.dev;
int error;
- struct ir_scancode_table *ir_codes;
+ char *ir_codes = NULL;
budget_ci->ir.dev = input_dev = input_allocate_device();
@@ -230,7 +208,7 @@ static int msp430_ir_init(struct budget_ci *budget_ci)
case 0x1011:
case 0x1012:
/* The hauppauge keymap is a superset of these remotes */
- ir_codes = &ir_codes_hauppauge_new_table;
+ ir_codes = RC_MAP_HAUPPAUGE_NEW;
if (rc5_device < 0)
budget_ci->ir.rc5_device = 0x1f;
@@ -239,22 +217,15 @@ static int msp430_ir_init(struct budget_ci *budget_ci)
case 0x1017:
case 0x101a:
/* for the Technotrend 1500 bundled remote */
- ir_codes = &ir_codes_tt_1500_table;
+ ir_codes = RC_MAP_TT_1500;
break;
default:
/* unknown remote */
- ir_codes = &ir_codes_budget_ci_old_table;
+ ir_codes = RC_MAP_BUDGET_CI_OLD;
break;
}
- ir_input_init(input_dev, &budget_ci->ir.state, IR_TYPE_RC5);
-
- /* initialise the key-up timeout handler */
- init_timer(&budget_ci->ir.timer_keyup);
- budget_ci->ir.timer_keyup.function = msp430_ir_keyup;
- budget_ci->ir.timer_keyup.data = (unsigned long) &budget_ci->ir;
- budget_ci->ir.last_raw = 0xffff; /* An impossible value */
- error = ir_input_register(input_dev, ir_codes, NULL);
+ error = ir_input_register(input_dev, ir_codes, NULL, MODULE_NAME);
if (error) {
printk(KERN_ERR "budget_ci: could not init driver for IR device (code %d)\n", error);
return error;
@@ -282,9 +253,6 @@ static void msp430_ir_deinit(struct budget_ci *budget_ci)
saa7146_setgpio(saa, 3, SAA7146_GPIO_INPUT);
tasklet_kill(&budget_ci->ir.msp430_irq_tasklet);
- del_timer_sync(&dev->timer);
- ir_input_nokey(dev, &budget_ci->ir.state);
-
ir_input_unregister(dev);
}
diff --git a/drivers/media/dvb/ttpci/budget.c b/drivers/media/dvb/ttpci/budget.c
index 1500210c06c..874a10a9d49 100644
--- a/drivers/media/dvb/ttpci/budget.c
+++ b/drivers/media/dvb/ttpci/budget.c
@@ -442,6 +442,7 @@ static struct stv090x_config tt1600_stv090x_config = {
.repeater_level = STV090x_RPTLEVEL_16,
.tuner_init = NULL,
+ .tuner_sleep = NULL,
.tuner_set_mode = NULL,
.tuner_set_frequency = NULL,
.tuner_get_frequency = NULL,
@@ -627,22 +628,36 @@ static void frontend_init(struct budget *budget)
&tt1600_stv6110x_config,
&budget->i2c_adap);
- tt1600_stv090x_config.tuner_init = ctl->tuner_init;
- tt1600_stv090x_config.tuner_set_mode = ctl->tuner_set_mode;
- tt1600_stv090x_config.tuner_set_frequency = ctl->tuner_set_frequency;
- tt1600_stv090x_config.tuner_get_frequency = ctl->tuner_get_frequency;
- tt1600_stv090x_config.tuner_set_bandwidth = ctl->tuner_set_bandwidth;
- tt1600_stv090x_config.tuner_get_bandwidth = ctl->tuner_get_bandwidth;
- tt1600_stv090x_config.tuner_set_bbgain = ctl->tuner_set_bbgain;
- tt1600_stv090x_config.tuner_get_bbgain = ctl->tuner_get_bbgain;
- tt1600_stv090x_config.tuner_set_refclk = ctl->tuner_set_refclk;
- tt1600_stv090x_config.tuner_get_status = ctl->tuner_get_status;
-
- dvb_attach(isl6423_attach,
- budget->dvb_frontend,
- &budget->i2c_adap,
- &tt1600_isl6423_config);
-
+ if (ctl) {
+ tt1600_stv090x_config.tuner_init = ctl->tuner_init;
+ tt1600_stv090x_config.tuner_sleep = ctl->tuner_sleep;
+ tt1600_stv090x_config.tuner_set_mode = ctl->tuner_set_mode;
+ tt1600_stv090x_config.tuner_set_frequency = ctl->tuner_set_frequency;
+ tt1600_stv090x_config.tuner_get_frequency = ctl->tuner_get_frequency;
+ tt1600_stv090x_config.tuner_set_bandwidth = ctl->tuner_set_bandwidth;
+ tt1600_stv090x_config.tuner_get_bandwidth = ctl->tuner_get_bandwidth;
+ tt1600_stv090x_config.tuner_set_bbgain = ctl->tuner_set_bbgain;
+ tt1600_stv090x_config.tuner_get_bbgain = ctl->tuner_get_bbgain;
+ tt1600_stv090x_config.tuner_set_refclk = ctl->tuner_set_refclk;
+ tt1600_stv090x_config.tuner_get_status = ctl->tuner_get_status;
+
+ /* call the init function once to initialize
+ tuner's clock output divider and demod's
+ master clock */
+ if (budget->dvb_frontend->ops.init)
+ budget->dvb_frontend->ops.init(budget->dvb_frontend);
+
+ if (dvb_attach(isl6423_attach,
+ budget->dvb_frontend,
+ &budget->i2c_adap,
+ &tt1600_isl6423_config) == NULL) {
+ printk(KERN_ERR "%s: No Intersil ISL6423 found!\n", __func__);
+ goto error_out;
+ }
+ } else {
+ printk(KERN_ERR "%s: No STV6110(A) Silicon Tuner found!\n", __func__);
+ goto error_out;
+ }
}
}
break;
diff --git a/drivers/media/dvb/ttusb-dec/ttusb_dec.c b/drivers/media/dvb/ttusb-dec/ttusb_dec.c
index 53baccbab17..fe1b8037b24 100644
--- a/drivers/media/dvb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/dvb/ttusb-dec/ttusb_dec.c
@@ -1257,7 +1257,7 @@ static int ttusb_dec_init_usb(struct ttusb_dec *dec)
if(!dec->irq_urb) {
return -ENOMEM;
}
- dec->irq_buffer = usb_buffer_alloc(dec->udev,IRQ_PACKET_SIZE,
+ dec->irq_buffer = usb_alloc_coherent(dec->udev,IRQ_PACKET_SIZE,
GFP_ATOMIC, &dec->irq_dma_handle);
if(!dec->irq_buffer) {
usb_free_urb(dec->irq_urb);
@@ -1550,8 +1550,8 @@ static void ttusb_dec_exit_rc(struct ttusb_dec *dec)
usb_free_urb(dec->irq_urb);
- usb_buffer_free(dec->udev,IRQ_PACKET_SIZE,
- dec->irq_buffer, dec->irq_dma_handle);
+ usb_free_coherent(dec->udev,IRQ_PACKET_SIZE,
+ dec->irq_buffer, dec->irq_dma_handle);
if (dec->rc_input_dev) {
input_unregister_device(dec->rc_input_dev);
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
index 02a9cefc9a0..353b8285594 100644
--- a/drivers/media/radio/radio-mr800.c
+++ b/drivers/media/radio/radio-mr800.c
@@ -144,7 +144,10 @@ struct amradio_device {
int initialized;
};
-#define vdev_to_amradio(r) container_of(r, struct amradio_device, videodev)
+static inline struct amradio_device *to_amradio_dev(struct v4l2_device *v4l2_dev)
+{
+ return container_of(v4l2_dev, struct amradio_device, v4l2_dev);
+}
/* USB Device ID List */
static struct usb_device_id usb_amradio_device_table[] = {
@@ -284,13 +287,12 @@ static int amradio_set_stereo(struct amradio_device *radio, char argument)
*/
static void usb_amradio_disconnect(struct usb_interface *intf)
{
- struct amradio_device *radio = usb_get_intfdata(intf);
+ struct amradio_device *radio = to_amradio_dev(usb_get_intfdata(intf));
mutex_lock(&radio->lock);
radio->usbdev = NULL;
mutex_unlock(&radio->lock);
- usb_set_intfdata(intf, NULL);
v4l2_device_disconnect(&radio->v4l2_dev);
video_unregister_device(&radio->videodev);
}
@@ -500,7 +502,7 @@ out:
/* open device - amradio_start() and amradio_setfreq() */
static int usb_amradio_open(struct file *file)
{
- struct amradio_device *radio = vdev_to_amradio(video_devdata(file));
+ struct amradio_device *radio = video_drvdata(file);
int retval = 0;
mutex_lock(&radio->lock);
@@ -566,7 +568,7 @@ unlock:
/* Suspend device - stop device. Need to be checked and fixed */
static int usb_amradio_suspend(struct usb_interface *intf, pm_message_t message)
{
- struct amradio_device *radio = usb_get_intfdata(intf);
+ struct amradio_device *radio = to_amradio_dev(usb_get_intfdata(intf));
mutex_lock(&radio->lock);
@@ -584,7 +586,7 @@ static int usb_amradio_suspend(struct usb_interface *intf, pm_message_t message)
/* Resume device - start device. Need to be checked and fixed */
static int usb_amradio_resume(struct usb_interface *intf)
{
- struct amradio_device *radio = usb_get_intfdata(intf);
+ struct amradio_device *radio = to_amradio_dev(usb_get_intfdata(intf));
mutex_lock(&radio->lock);
@@ -633,9 +635,7 @@ static const struct v4l2_ioctl_ops usb_amradio_ioctl_ops = {
static void usb_amradio_video_device_release(struct video_device *videodev)
{
- struct amradio_device *radio = vdev_to_amradio(videodev);
-
- v4l2_device_unregister(&radio->v4l2_dev);
+ struct amradio_device *radio = video_get_drvdata(videodev);
/* free rest memory */
kfree(radio->buffer);
@@ -693,7 +693,6 @@ static int usb_amradio_probe(struct usb_interface *intf,
goto err_vdev;
}
- usb_set_intfdata(intf, radio);
return 0;
err_vdev:
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 9644cf760aa..ad9e6f9c22e 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -45,6 +45,10 @@ config VIDEO_TUNER
tristate
depends on MEDIA_TUNER
+config V4L2_MEM2MEM_DEV
+ tristate
+ depends on VIDEOBUF_GEN
+
#
# Multimedia Video device configuration
#
@@ -480,6 +484,12 @@ config VIDEO_ADV7343
To compile this driver as a module, choose M here: the
module will be called adv7343.
+config VIDEO_AK881X
+ tristate "AK8813/AK8814 video encoders"
+ depends on I2C
+ help
+ Video output driver for AKM AK8813 and AK8814 TV encoders
+
comment "Video improvement chips"
config VIDEO_UPD64031A
@@ -520,6 +530,13 @@ config DISPLAY_DAVINCI_DM646X_EVM
To compile this driver as a module, choose M here: the
module will be called vpif_display.
+config VIDEO_SH_VOU
+ tristate "SuperH VOU video output driver"
+ depends on VIDEO_DEV && ARCH_SHMOBILE
+ select VIDEOBUF_DMA_CONTIG
+ help
+ Support for the Video Output Unit (VOU) on SuperH SoCs.
+
config CAPTURE_DAVINCI_DM646X_EVM
tristate "DM646x EVM Video Capture"
depends on VIDEO_DEV && MACH_DAVINCI_DM6467_EVM
@@ -542,7 +559,8 @@ config VIDEO_DAVINCI_VPIF
config VIDEO_VIVI
tristate "Virtual Video Driver"
- depends on VIDEO_DEV && VIDEO_V4L2 && !SPARC32 && !SPARC64
+ depends on VIDEO_DEV && VIDEO_V4L2 && !SPARC32 && !SPARC64 && FONTS
+ select FONT_8x16
select VIDEOBUF_VMALLOC
default n
---help---
@@ -613,6 +631,8 @@ config VIDEO_ISIF
To compile this driver as a module, choose M here: the
module will be called vpfe.
+source "drivers/media/video/omap/Kconfig"
+
source "drivers/media/video/bt8xx/Kconfig"
config VIDEO_PMS
@@ -647,7 +667,7 @@ config VIDEO_CQCAM
config VIDEO_W9966
tristate "W9966CF Webcam (FlyCam Supra and others) Video For Linux"
- depends on PARPORT_1284 && PARPORT && VIDEO_V4L1
+ depends on PARPORT_1284 && PARPORT && VIDEO_V4L2
help
Video4linux driver for Winbond's w9966 based Webcams.
Currently tested with the LifeView FlyCam Supra.
@@ -740,7 +760,7 @@ source "drivers/media/video/zoran/Kconfig"
config VIDEO_MEYE
tristate "Sony Vaio Picturebook Motion Eye Video For Linux"
- depends on PCI && SONY_LAPTOP && VIDEO_V4L1
+ depends on PCI && SONY_LAPTOP && VIDEO_V4L2
---help---
This is the video4linux driver for the Motion Eye camera found
in the Vaio Picturebook laptops. Please read the material in
@@ -807,7 +827,7 @@ source "drivers/media/video/saa7164/Kconfig"
config VIDEO_M32R_AR
tristate "AR devices"
- depends on M32R && VIDEO_V4L1
+ depends on M32R && VIDEO_V4L2
---help---
This is a video4linux driver for the Renesas AR (Artificial Retina)
camera module.
@@ -1107,3 +1127,27 @@ config USB_S2255
endif # V4L_USB_DRIVERS
endif # VIDEO_CAPTURE_DRIVERS
+
+menuconfig V4L_MEM2MEM_DRIVERS
+ bool "Memory-to-memory multimedia devices"
+ depends on VIDEO_V4L2
+ default n
+ ---help---
+ Say Y here to enable selecting drivers for V4L devices that
+ use system memory for both source and destination buffers, as opposed
+ to capture and output drivers, which use memory buffers for just
+ one of those.
+
+if V4L_MEM2MEM_DRIVERS
+
+config VIDEO_MEM2MEM_TESTDEV
+ tristate "Virtual test device for mem2mem framework"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ select VIDEOBUF_VMALLOC
+ select V4L2_MEM2MEM_DEV
+ default n
+ ---help---
+ This is a virtual test device for the memory-to-memory driver
+ framework.
+
+endif # V4L_MEM2MEM_DRIVERS
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index c51c386559f..cc93859d316 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -10,7 +10,8 @@ stkwebcam-objs := stk-webcam.o stk-sensor.o
omap2cam-objs := omap24xxcam.o omap24xxcam-dma.o
-videodev-objs := v4l2-dev.o v4l2-ioctl.o v4l2-device.o
+videodev-objs := v4l2-dev.o v4l2-ioctl.o v4l2-device.o v4l2-fh.o \
+ v4l2-event.o
# V4L2 core modules
@@ -117,6 +118,8 @@ obj-$(CONFIG_VIDEOBUF_VMALLOC) += videobuf-vmalloc.o
obj-$(CONFIG_VIDEOBUF_DVB) += videobuf-dvb.o
obj-$(CONFIG_VIDEO_BTCX) += btcx-risc.o
+obj-$(CONFIG_V4L2_MEM2MEM_DEV) += v4l2-mem2mem.o
+
obj-$(CONFIG_VIDEO_M32R_AR_M64278) += arv.o
obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o
@@ -149,8 +152,11 @@ obj-$(CONFIG_VIDEO_IVTV) += ivtv/
obj-$(CONFIG_VIDEO_CX18) += cx18/
obj-$(CONFIG_VIDEO_VIVI) += vivi.o
+obj-$(CONFIG_VIDEO_MEM2MEM_TESTDEV) += mem2mem_testdev.o
obj-$(CONFIG_VIDEO_CX23885) += cx23885/
+obj-$(CONFIG_VIDEO_AK881X) += ak881x.o
+
obj-$(CONFIG_VIDEO_OMAP2) += omap2cam.o
obj-$(CONFIG_SOC_CAMERA) += soc_camera.o soc_mediabus.o
obj-$(CONFIG_SOC_CAMERA_PLATFORM) += soc_camera_platform.o
@@ -160,6 +166,10 @@ obj-$(CONFIG_VIDEO_MX3) += mx3_camera.o
obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o
obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o
+obj-$(CONFIG_ARCH_DAVINCI) += davinci/
+
+obj-$(CONFIG_VIDEO_SH_VOU) += sh_vou.o
+
obj-$(CONFIG_VIDEO_AU0828) += au0828/
obj-$(CONFIG_USB_VIDEO_CLASS) += uvc/
@@ -169,6 +179,8 @@ obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
obj-$(CONFIG_ARCH_DAVINCI) += davinci/
+obj-$(CONFIG_ARCH_OMAP) += omap/
+
EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
EXTRA_CFLAGS += -Idrivers/media/common/tuners
diff --git a/drivers/media/video/ak881x.c b/drivers/media/video/ak881x.c
new file mode 100644
index 00000000000..35390d4717b
--- /dev/null
+++ b/drivers/media/video/ak881x.c
@@ -0,0 +1,368 @@
+/*
+ * Driver for AK8813 / AK8814 TV-ecoders from Asahi Kasei Microsystems Co., Ltd. (AKM)
+ *
+ * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+
+#include <media/ak881x.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+
+#define AK881X_INTERFACE_MODE 0
+#define AK881X_VIDEO_PROCESS1 1
+#define AK881X_VIDEO_PROCESS2 2
+#define AK881X_VIDEO_PROCESS3 3
+#define AK881X_DAC_MODE 5
+#define AK881X_STATUS 0x24
+#define AK881X_DEVICE_ID 0x25
+#define AK881X_DEVICE_REVISION 0x26
+
+struct ak881x {
+ struct v4l2_subdev subdev;
+ struct ak881x_pdata *pdata;
+ unsigned int lines;
+ int id; /* DEVICE_ID code V4L2_IDENT_AK881X code from v4l2-chip-ident.h */
+ char revision; /* DEVICE_REVISION content */
+};
+
+static int reg_read(struct i2c_client *client, const u8 reg)
+{
+ return i2c_smbus_read_byte_data(client, reg);
+}
+
+static int reg_write(struct i2c_client *client, const u8 reg,
+ const u8 data)
+{
+ return i2c_smbus_write_byte_data(client, reg, data);
+}
+
+static int reg_set(struct i2c_client *client, const u8 reg,
+ const u8 data, u8 mask)
+{
+ int ret = reg_read(client, reg);
+ if (ret < 0)
+ return ret;
+ return reg_write(client, reg, (ret & ~mask) | (data & mask));
+}
+
+static struct ak881x *to_ak881x(const struct i2c_client *client)
+{
+ return container_of(i2c_get_clientdata(client), struct ak881x, subdev);
+}
+
+static int ak881x_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *id)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ak881x *ak881x = to_ak881x(client);
+
+ if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
+ return -EINVAL;
+
+ if (id->match.addr != client->addr)
+ return -ENODEV;
+
+ id->ident = ak881x->id;
+ id->revision = ak881x->revision;
+
+ return 0;
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int ak881x_g_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0x26)
+ return -EINVAL;
+
+ if (reg->match.addr != client->addr)
+ return -ENODEV;
+
+ reg->val = reg_read(client, reg->reg);
+
+ if (reg->val > 0xffff)
+ return -EIO;
+
+ return 0;
+}
+
+static int ak881x_s_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0x26)
+ return -EINVAL;
+
+ if (reg->match.addr != client->addr)
+ return -ENODEV;
+
+ if (reg_write(client, reg->reg, reg->val) < 0)
+ return -EIO;
+
+ return 0;
+}
+#endif
+
+static int ak881x_try_g_mbus_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ak881x *ak881x = to_ak881x(client);
+
+ v4l_bound_align_image(&mf->width, 0, 720, 2,
+ &mf->height, 0, ak881x->lines, 1, 0);
+ mf->field = V4L2_FIELD_INTERLACED;
+ mf->code = V4L2_MBUS_FMT_YUYV8_2X8_LE;
+ mf->colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ return 0;
+}
+
+static int ak881x_s_mbus_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ if (mf->field != V4L2_FIELD_INTERLACED ||
+ mf->code != V4L2_MBUS_FMT_YUYV8_2X8_LE)
+ return -EINVAL;
+
+ return ak881x_try_g_mbus_fmt(sd, mf);
+}
+
+static int ak881x_enum_mbus_fmt(struct v4l2_subdev *sd, int index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ if (index)
+ return -EINVAL;
+
+ *code = V4L2_MBUS_FMT_YUYV8_2X8_LE;
+ return 0;
+}
+
+static int ak881x_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ak881x *ak881x = to_ak881x(client);
+
+ a->bounds.left = 0;
+ a->bounds.top = 0;
+ a->bounds.width = 720;
+ a->bounds.height = ak881x->lines;
+ a->defrect = a->bounds;
+ a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ a->pixelaspect.numerator = 1;
+ a->pixelaspect.denominator = 1;
+
+ return 0;
+}
+
+static int ak881x_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ak881x *ak881x = to_ak881x(client);
+ u8 vp1;
+
+ if (std == V4L2_STD_NTSC_443) {
+ vp1 = 3;
+ ak881x->lines = 480;
+ } else if (std == V4L2_STD_PAL_M) {
+ vp1 = 5;
+ ak881x->lines = 480;
+ } else if (std == V4L2_STD_PAL_60) {
+ vp1 = 7;
+ ak881x->lines = 480;
+ } else if (std && !(std & ~V4L2_STD_PAL)) {
+ vp1 = 0xf;
+ ak881x->lines = 576;
+ } else if (std && !(std & ~V4L2_STD_NTSC)) {
+ vp1 = 0;
+ ak881x->lines = 480;
+ } else {
+ /* No SECAM or PAL_N/Nc supported */
+ return -EINVAL;
+ }
+
+ reg_set(client, AK881X_VIDEO_PROCESS1, vp1, 0xf);
+
+ return 0;
+}
+
+static int ak881x_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ak881x *ak881x = to_ak881x(client);
+
+ if (enable) {
+ u8 dac;
+ /* For colour-bar testing set bit 6 of AK881X_VIDEO_PROCESS1 */
+ /* Default: composite output */
+ if (ak881x->pdata->flags & AK881X_COMPONENT)
+ dac = 3;
+ else
+ dac = 4;
+ /* Turn on the DAC(s) */
+ reg_write(client, AK881X_DAC_MODE, dac);
+ dev_dbg(&client->dev, "chip status 0x%x\n",
+ reg_read(client, AK881X_STATUS));
+ } else {
+ /* ...and clear bit 6 of AK881X_VIDEO_PROCESS1 here */
+ reg_write(client, AK881X_DAC_MODE, 0);
+ dev_dbg(&client->dev, "chip status 0x%x\n",
+ reg_read(client, AK881X_STATUS));
+ }
+
+ return 0;
+}
+
+static struct v4l2_subdev_core_ops ak881x_subdev_core_ops = {
+ .g_chip_ident = ak881x_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .g_register = ak881x_g_register,
+ .s_register = ak881x_s_register,
+#endif
+};
+
+static struct v4l2_subdev_video_ops ak881x_subdev_video_ops = {
+ .s_mbus_fmt = ak881x_s_mbus_fmt,
+ .g_mbus_fmt = ak881x_try_g_mbus_fmt,
+ .try_mbus_fmt = ak881x_try_g_mbus_fmt,
+ .cropcap = ak881x_cropcap,
+ .enum_mbus_fmt = ak881x_enum_mbus_fmt,
+ .s_std_output = ak881x_s_std_output,
+ .s_stream = ak881x_s_stream,
+};
+
+static struct v4l2_subdev_ops ak881x_subdev_ops = {
+ .core = &ak881x_subdev_core_ops,
+ .video = &ak881x_subdev_video_ops,
+};
+
+static int ak881x_probe(struct i2c_client *client,
+ const struct i2c_device_id *did)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct ak881x *ak881x;
+ u8 ifmode, data;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_warn(&adapter->dev,
+ "I2C-Adapter doesn't support I2C_FUNC_SMBUS_WORD\n");
+ return -EIO;
+ }
+
+ ak881x = kzalloc(sizeof(struct ak881x), GFP_KERNEL);
+ if (!ak881x)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&ak881x->subdev, client, &ak881x_subdev_ops);
+
+ data = reg_read(client, AK881X_DEVICE_ID);
+
+ switch (data) {
+ case 0x13:
+ ak881x->id = V4L2_IDENT_AK8813;
+ break;
+ case 0x14:
+ ak881x->id = V4L2_IDENT_AK8814;
+ break;
+ default:
+ dev_err(&client->dev,
+ "No ak881x chip detected, register read %x\n", data);
+ kfree(ak881x);
+ return -ENODEV;
+ }
+
+ ak881x->revision = reg_read(client, AK881X_DEVICE_REVISION);
+ ak881x->pdata = client->dev.platform_data;
+
+ if (ak881x->pdata) {
+ if (ak881x->pdata->flags & AK881X_FIELD)
+ ifmode = 4;
+ else
+ ifmode = 0;
+
+ switch (ak881x->pdata->flags & AK881X_IF_MODE_MASK) {
+ case AK881X_IF_MODE_BT656:
+ ifmode |= 1;
+ break;
+ case AK881X_IF_MODE_MASTER:
+ ifmode |= 2;
+ break;
+ case AK881X_IF_MODE_SLAVE:
+ default:
+ break;
+ }
+
+ dev_dbg(&client->dev, "IF mode %x\n", ifmode);
+
+ /*
+ * "Line Blanking No." seems to be the same as the number of
+ * "black" lines on, e.g., SuperH VOU, whose default value of 20
+ * "incidentally" matches ak881x' default
+ */
+ reg_write(client, AK881X_INTERFACE_MODE, ifmode | (20 << 3));
+ }
+
+ /* Hardware default: NTSC-M */
+ ak881x->lines = 480;
+
+ dev_info(&client->dev, "Detected an ak881x chip ID %x, revision %x\n",
+ data, ak881x->revision);
+
+ return 0;
+}
+
+static int ak881x_remove(struct i2c_client *client)
+{
+ struct ak881x *ak881x = to_ak881x(client);
+
+ v4l2_device_unregister_subdev(&ak881x->subdev);
+ kfree(ak881x);
+
+ return 0;
+}
+
+static const struct i2c_device_id ak881x_id[] = {
+ { "ak8813", 0 },
+ { "ak8814", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ak881x_id);
+
+static struct i2c_driver ak881x_i2c_driver = {
+ .driver = {
+ .name = "ak881x",
+ },
+ .probe = ak881x_probe,
+ .remove = ak881x_remove,
+ .id_table = ak881x_id,
+};
+
+static int __init ak881x_module_init(void)
+{
+ return i2c_add_driver(&ak881x_i2c_driver);
+}
+
+static void __exit ak881x_module_exit(void)
+{
+ i2c_del_driver(&ak881x_i2c_driver);
+}
+
+module_init(ak881x_module_init);
+module_exit(ak881x_module_exit);
+
+MODULE_DESCRIPTION("TV-output driver for ak8813/ak8814");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/arv.c b/drivers/media/video/arv.c
index a356d6bd313..31e7a123d19 100644
--- a/drivers/media/video/arv.c
+++ b/drivers/media/video/arv.c
@@ -27,8 +27,10 @@
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/sched.h>
-#include <linux/videodev.h>
+#include <linux/version.h>
+#include <linux/videodev2.h>
#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <linux/mutex.h>
@@ -39,7 +41,7 @@
#include <asm/byteorder.h>
#if 0
-#define DEBUG(n, args...) printk(args)
+#define DEBUG(n, args...) printk(KERN_INFO args)
#define CHECK_LOST 1
#else
#define DEBUG(n, args...)
@@ -52,10 +54,10 @@
*/
#define USE_INT 0 /* Don't modify */
-#define VERSION "0.03"
+#define VERSION "0.04"
#define ar_inl(addr) inl((unsigned long)(addr))
-#define ar_outl(val, addr) outl((unsigned long)(val),(unsigned long)(addr))
+#define ar_outl(val, addr) outl((unsigned long)(val), (unsigned long)(addr))
extern struct cpuinfo_m32r boot_cpu_data;
@@ -79,7 +81,7 @@ extern struct cpuinfo_m32r boot_cpu_data;
/* bits & bytes per pixel */
#define AR_BITS_PER_PIXEL 16
-#define AR_BYTES_PER_PIXEL (AR_BITS_PER_PIXEL/8)
+#define AR_BYTES_PER_PIXEL (AR_BITS_PER_PIXEL / 8)
/* line buffer size */
#define AR_LINE_BYTES_VGA (AR_WIDTH_VGA * AR_BYTES_PER_PIXEL)
@@ -104,8 +106,9 @@ extern struct cpuinfo_m32r boot_cpu_data;
#define AR_MODE_INTERLACE 0
#define AR_MODE_NORMAL 1
-struct ar_device {
- struct video_device *vdev;
+struct ar {
+ struct v4l2_device v4l2_dev;
+ struct video_device vdev;
unsigned int start_capture; /* duaring capture in INT. mode. */
#if USE_INT
unsigned char *line_buff; /* DMA line buffer */
@@ -116,12 +119,13 @@ struct ar_device {
int width, height;
int frame_bytes, line_bytes;
wait_queue_head_t wait;
- unsigned long in_use;
struct mutex lock;
};
+static struct ar ardev;
+
static int video_nr = -1; /* video device number (first free) */
-static unsigned char yuv[MAX_AR_FRAME_BYTES];
+static unsigned char yuv[MAX_AR_FRAME_BYTES];
/* module parameters */
/* default frequency */
@@ -133,9 +137,7 @@ module_param(freq, int, 0);
module_param(vga, int, 0);
module_param(vga_interlace, int, 0);
-static int ar_initialize(struct video_device *dev);
-
-static inline void wait_for_vsync(void)
+static void wait_for_vsync(void)
{
while (ar_inl(ARVCR0) & ARVCR0_VDS) /* wait for VSYNC */
cpu_relax();
@@ -143,7 +145,7 @@ static inline void wait_for_vsync(void)
cpu_relax();
}
-static inline void wait_acknowledge(void)
+static void wait_acknowledge(void)
{
int i;
@@ -156,7 +158,7 @@ static inline void wait_acknowledge(void)
/*******************************************************************
* I2C functions
*******************************************************************/
-void iic(int n, unsigned long addr, unsigned long data1, unsigned long data2,
+static void iic(int n, unsigned long addr, unsigned long data1, unsigned long data2,
unsigned long data3)
{
int i;
@@ -200,7 +202,7 @@ void iic(int n, unsigned long addr, unsigned long data1, unsigned long data2,
}
-void init_iic(void)
+static void init_iic(void)
{
DEBUG(1, "init_iic:\n");
@@ -214,13 +216,12 @@ void init_iic(void)
/* I2C CLK */
/* 50MH-100k */
- if (freq == 75) {
+ if (freq == 75)
ar_outl(369, PLDI2CFREQ); /* BCLK = 75MHz */
- } else if (freq == 50) {
+ else if (freq == 50)
ar_outl(244, PLDI2CFREQ); /* BCLK = 50MHz */
- } else {
+ else
ar_outl(244, PLDI2CFREQ); /* default: BCLK = 50MHz */
- }
ar_outl(0x1, PLDI2CCR); /* I2CCR Enable */
}
@@ -245,7 +246,7 @@ static inline void clear_dma_status(void)
ar_outl(0x8000, M32R_DMAEDET_PORTL); /* clear status */
}
-static inline void wait_for_vertical_sync(int exp_line)
+static void wait_for_vertical_sync(struct ar *ar, int exp_line)
{
#if CHECK_LOST
int tmout = 10000; /* FIXME */
@@ -260,7 +261,7 @@ static inline void wait_for_vertical_sync(int exp_line)
break;
}
if (tmout < 0)
- printk("arv: lost %d -> %d\n", exp_line, l);
+ v4l2_err(&ar->v4l2_dev, "lost %d -> %d\n", exp_line, l);
#else
while (ar_inl(ARVHCOUNT) != exp_line)
cpu_relax();
@@ -269,15 +270,14 @@ static inline void wait_for_vertical_sync(int exp_line)
static ssize_t ar_read(struct file *file, char *buf, size_t count, loff_t *ppos)
{
- struct video_device *v = video_devdata(file);
- struct ar_device *ar = video_get_drvdata(v);
+ struct ar *ar = video_drvdata(file);
long ret = ar->frame_bytes; /* return read bytes */
unsigned long arvcr1 = 0;
unsigned long flags;
unsigned char *p;
int h, w;
unsigned char *py, *pu, *pv;
-#if ! USE_INT
+#if !USE_INT
int l;
#endif
@@ -305,7 +305,7 @@ static ssize_t ar_read(struct file *file, char *buf, size_t count, loff_t *ppos)
ar_outl(ar->line_bytes, M32R_DMA0RBCUT_PORTL); /* reload count (bytes) */
/*
- * Okey , kicks AR LSI to invoke an interrupt
+ * Okay, kick AR LSI to invoke an interrupt
*/
ar->start_capture = 0;
ar_outl(arvcr1 | ARVCR1_HIEN, ARVCR1);
@@ -313,7 +313,7 @@ static ssize_t ar_read(struct file *file, char *buf, size_t count, loff_t *ppos)
/* .... AR interrupts .... */
interruptible_sleep_on(&ar->wait);
if (signal_pending(current)) {
- printk("arv: interrupted while get frame data.\n");
+ printk(KERN_ERR "arv: interrupted while get frame data.\n");
ret = -EINTR;
goto out_up;
}
@@ -334,7 +334,7 @@ static ssize_t ar_read(struct file *file, char *buf, size_t count, loff_t *ppos)
cpu_relax();
if (ar->mode == AR_MODE_INTERLACE && ar->size == AR_SIZE_VGA) {
for (h = 0; h < ar->height; h++) {
- wait_for_vertical_sync(h);
+ wait_for_vertical_sync(ar, h);
if (h < (AR_HEIGHT_VGA/2))
l = h << 1;
else
@@ -349,7 +349,7 @@ static ssize_t ar_read(struct file *file, char *buf, size_t count, loff_t *ppos)
}
} else {
for (h = 0; h < ar->height; h++) {
- wait_for_vertical_sync(h);
+ wait_for_vertical_sync(ar, h);
ar_outl(virt_to_phys(ar->frame[h]), M32R_DMA0CDA_PORTL);
enable_dma();
while (!(ar_inl(M32R_DMAEDET_PORTL) & 0x8000))
@@ -386,7 +386,7 @@ static ssize_t ar_read(struct file *file, char *buf, size_t count, loff_t *ppos)
}
}
if (copy_to_user(buf, yuv, ar->frame_bytes)) {
- printk("arv: failed while copy_to_user yuv.\n");
+ v4l2_err(&ar->v4l2_dev, "failed while copy_to_user yuv.\n");
ret = -EFAULT;
goto out_up;
}
@@ -396,153 +396,127 @@ out_up:
return ret;
}
-static long ar_do_ioctl(struct file *file, unsigned int cmd, void *arg)
+static int ar_querycap(struct file *file, void *priv,
+ struct v4l2_capability *vcap)
{
- struct video_device *dev = video_devdata(file);
- struct ar_device *ar = video_get_drvdata(dev);
-
- DEBUG(1, "ar_ioctl()\n");
- switch(cmd) {
- case VIDIOCGCAP:
- {
- struct video_capability *b = arg;
- DEBUG(1, "VIDIOCGCAP:\n");
- strcpy(b->name, ar->vdev->name);
- b->type = VID_TYPE_CAPTURE;
- b->channels = 0;
- b->audios = 0;
- b->maxwidth = MAX_AR_WIDTH;
- b->maxheight = MAX_AR_HEIGHT;
- b->minwidth = MIN_AR_WIDTH;
- b->minheight = MIN_AR_HEIGHT;
- return 0;
- }
- case VIDIOCGCHAN:
- DEBUG(1, "VIDIOCGCHAN:\n");
- return 0;
- case VIDIOCSCHAN:
- DEBUG(1, "VIDIOCSCHAN:\n");
- return 0;
- case VIDIOCGTUNER:
- DEBUG(1, "VIDIOCGTUNER:\n");
- return 0;
- case VIDIOCSTUNER:
- DEBUG(1, "VIDIOCSTUNER:\n");
- return 0;
- case VIDIOCGPICT:
- DEBUG(1, "VIDIOCGPICT:\n");
- return 0;
- case VIDIOCSPICT:
- DEBUG(1, "VIDIOCSPICT:\n");
- return 0;
- case VIDIOCCAPTURE:
- DEBUG(1, "VIDIOCCAPTURE:\n");
+ struct ar *ar = video_drvdata(file);
+
+ strlcpy(vcap->driver, ar->vdev.name, sizeof(vcap->driver));
+ strlcpy(vcap->card, "Colour AR VGA", sizeof(vcap->card));
+ strlcpy(vcap->bus_info, "Platform", sizeof(vcap->bus_info));
+ vcap->version = KERNEL_VERSION(0, 0, 4);
+ vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ return 0;
+}
+
+static int ar_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
+{
+ if (vin->index > 0)
return -EINVAL;
- case VIDIOCGWIN:
- {
- struct video_window *w = arg;
- DEBUG(1, "VIDIOCGWIN:\n");
- memset(w, 0, sizeof(*w));
- w->width = ar->width;
- w->height = ar->height;
- return 0;
+ strlcpy(vin->name, "Camera", sizeof(vin->name));
+ vin->type = V4L2_INPUT_TYPE_CAMERA;
+ vin->audioset = 0;
+ vin->tuner = 0;
+ vin->std = V4L2_STD_ALL;
+ vin->status = 0;
+ return 0;
+}
+
+static int ar_g_input(struct file *file, void *fh, unsigned int *inp)
+{
+ *inp = 0;
+ return 0;
+}
+
+static int ar_s_input(struct file *file, void *fh, unsigned int inp)
+{
+ return inp ? -EINVAL : 0;
+}
+
+static int ar_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct ar *ar = video_drvdata(file);
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+
+ pix->width = ar->width;
+ pix->height = ar->height;
+ pix->pixelformat = V4L2_PIX_FMT_YUV422P;
+ pix->field = (ar->mode == AR_MODE_NORMAL) ? V4L2_FIELD_NONE : V4L2_FIELD_INTERLACED;
+ pix->bytesperline = ar->width;
+ pix->sizeimage = 2 * ar->width * ar->height;
+ /* Just a guess */
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ return 0;
+}
+
+static int ar_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct ar *ar = video_drvdata(file);
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+
+ if (pix->height <= AR_HEIGHT_QVGA || pix->width <= AR_WIDTH_QVGA) {
+ pix->height = AR_HEIGHT_QVGA;
+ pix->width = AR_WIDTH_QVGA;
+ pix->field = V4L2_FIELD_INTERLACED;
+ } else {
+ pix->height = AR_HEIGHT_VGA;
+ pix->width = AR_WIDTH_VGA;
+ pix->field = vga_interlace ? V4L2_FIELD_INTERLACED : V4L2_FIELD_NONE;
}
- case VIDIOCSWIN:
- {
- struct video_window *w = arg;
- DEBUG(1, "VIDIOCSWIN:\n");
- if ((w->width != AR_WIDTH_VGA || w->height != AR_HEIGHT_VGA) &&
- (w->width != AR_WIDTH_QVGA || w->height != AR_HEIGHT_QVGA))
- return -EINVAL;
-
- mutex_lock(&ar->lock);
- ar->width = w->width;
- ar->height = w->height;
- if (ar->width == AR_WIDTH_VGA) {
- ar->size = AR_SIZE_VGA;
- ar->frame_bytes = AR_FRAME_BYTES_VGA;
- ar->line_bytes = AR_LINE_BYTES_VGA;
- if (vga_interlace)
- ar->mode = AR_MODE_INTERLACE;
- else
- ar->mode = AR_MODE_NORMAL;
- } else {
- ar->size = AR_SIZE_QVGA;
- ar->frame_bytes = AR_FRAME_BYTES_QVGA;
- ar->line_bytes = AR_LINE_BYTES_QVGA;
+ pix->pixelformat = V4L2_PIX_FMT_YUV422P;
+ pix->bytesperline = ar->width;
+ pix->sizeimage = 2 * ar->width * ar->height;
+ /* Just a guess */
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ return 0;
+}
+
+static int ar_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct ar *ar = video_drvdata(file);
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+ int ret = ar_try_fmt_vid_cap(file, fh, fmt);
+
+ if (ret)
+ return ret;
+ mutex_lock(&ar->lock);
+ ar->width = pix->width;
+ ar->height = pix->height;
+ if (ar->width == AR_WIDTH_VGA) {
+ ar->size = AR_SIZE_VGA;
+ ar->frame_bytes = AR_FRAME_BYTES_VGA;
+ ar->line_bytes = AR_LINE_BYTES_VGA;
+ if (vga_interlace)
ar->mode = AR_MODE_INTERLACE;
- }
- mutex_unlock(&ar->lock);
- return 0;
- }
- case VIDIOCGFBUF:
- DEBUG(1, "VIDIOCGFBUF:\n");
- return -EINVAL;
- case VIDIOCSFBUF:
- DEBUG(1, "VIDIOCSFBUF:\n");
- return -EINVAL;
- case VIDIOCKEY:
- DEBUG(1, "VIDIOCKEY:\n");
- return 0;
- case VIDIOCGFREQ:
- DEBUG(1, "VIDIOCGFREQ:\n");
- return -EINVAL;
- case VIDIOCSFREQ:
- DEBUG(1, "VIDIOCSFREQ:\n");
- return -EINVAL;
- case VIDIOCGAUDIO:
- DEBUG(1, "VIDIOCGAUDIO:\n");
- return -EINVAL;
- case VIDIOCSAUDIO:
- DEBUG(1, "VIDIOCSAUDIO:\n");
- return -EINVAL;
- case VIDIOCSYNC:
- DEBUG(1, "VIDIOCSYNC:\n");
- return -EINVAL;
- case VIDIOCMCAPTURE:
- DEBUG(1, "VIDIOCMCAPTURE:\n");
- return -EINVAL;
- case VIDIOCGMBUF:
- DEBUG(1, "VIDIOCGMBUF:\n");
- return -EINVAL;
- case VIDIOCGUNIT:
- DEBUG(1, "VIDIOCGUNIT:\n");
- return -EINVAL;
- case VIDIOCGCAPTURE:
- DEBUG(1, "VIDIOCGCAPTURE:\n");
- return -EINVAL;
- case VIDIOCSCAPTURE:
- DEBUG(1, "VIDIOCSCAPTURE:\n");
- return -EINVAL;
- case VIDIOCSPLAYMODE:
- DEBUG(1, "VIDIOCSPLAYMODE:\n");
- return -EINVAL;
- case VIDIOCSWRITEMODE:
- DEBUG(1, "VIDIOCSWRITEMODE:\n");
- return -EINVAL;
- case VIDIOCGPLAYINFO:
- DEBUG(1, "VIDIOCGPLAYINFO:\n");
- return -EINVAL;
- case VIDIOCSMICROCODE:
- DEBUG(1, "VIDIOCSMICROCODE:\n");
- return -EINVAL;
- case VIDIOCGVBIFMT:
- DEBUG(1, "VIDIOCGVBIFMT:\n");
- return -EINVAL;
- case VIDIOCSVBIFMT:
- DEBUG(1, "VIDIOCSVBIFMT:\n");
- return -EINVAL;
- default:
- DEBUG(1, "Unknown ioctl(0x%08x)\n", cmd);
- return -ENOIOCTLCMD;
+ else
+ ar->mode = AR_MODE_NORMAL;
+ } else {
+ ar->size = AR_SIZE_QVGA;
+ ar->frame_bytes = AR_FRAME_BYTES_QVGA;
+ ar->line_bytes = AR_LINE_BYTES_QVGA;
+ ar->mode = AR_MODE_INTERLACE;
}
+ /* Ok we figured out what to use from our wide choice */
+ mutex_unlock(&ar->lock);
return 0;
}
-static long ar_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+static int ar_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *fmt)
{
- return video_usercopy(file, cmd, arg, ar_do_ioctl);
+ static struct v4l2_fmtdesc formats[] = {
+ { 0, 0, 0,
+ "YUV 4:2:2 Planar", V4L2_PIX_FMT_YUV422P,
+ { 0, 0, 0, 0 }
+ },
+ };
+ enum v4l2_buf_type type = fmt->type;
+
+ if (fmt->index > 0)
+ return -EINVAL;
+
+ *fmt = formats[fmt->index];
+ fmt->type = type;
+ return 0;
}
#if USE_INT
@@ -551,7 +525,7 @@ static long ar_ioctl(struct file *file, unsigned int cmd,
*/
static void ar_interrupt(int irq, void *dev)
{
- struct ar_device *ar = dev;
+ struct ar *ar = dev;
unsigned int line_count;
unsigned int line_number;
unsigned int arvcr1;
@@ -559,11 +533,11 @@ static void ar_interrupt(int irq, void *dev)
line_count = ar_inl(ARVHCOUNT); /* line number */
if (ar->mode == AR_MODE_INTERLACE && ar->size == AR_SIZE_VGA) {
/* operations for interlace mode */
- if ( line_count < (AR_HEIGHT_VGA/2) ) /* even line */
+ if (line_count < (AR_HEIGHT_VGA / 2)) /* even line */
line_number = (line_count << 1);
else /* odd line */
line_number =
- (((line_count - (AR_HEIGHT_VGA/2)) << 1) + 1);
+ (((line_count - (AR_HEIGHT_VGA / 2)) << 1) + 1);
} else {
line_number = line_count;
}
@@ -623,11 +597,10 @@ static void ar_interrupt(int irq, void *dev)
* 0 is returned in success.
*
*/
-static int ar_initialize(struct video_device *dev)
+static int ar_initialize(struct ar *ar)
{
- struct ar_device *ar = video_get_drvdata(dev);
unsigned long cr = 0;
- int i,found=0;
+ int i, found = 0;
DEBUG(1, "ar_initialize:\n");
@@ -666,129 +639,119 @@ static int ar_initialize(struct video_device *dev)
if (found == 0)
return -ENODEV;
- printk("arv: Initializing ");
-
- iic(2,0x78,0x11,0x01,0x00); /* start */
- iic(3,0x78,0x12,0x00,0x06);
- iic(3,0x78,0x12,0x12,0x30);
- iic(3,0x78,0x12,0x15,0x58);
- iic(3,0x78,0x12,0x17,0x30);
- printk(".");
- iic(3,0x78,0x12,0x1a,0x97);
- iic(3,0x78,0x12,0x1b,0xff);
- iic(3,0x78,0x12,0x1c,0xff);
- iic(3,0x78,0x12,0x26,0x10);
- iic(3,0x78,0x12,0x27,0x00);
- printk(".");
- iic(2,0x78,0x34,0x02,0x00);
- iic(2,0x78,0x7a,0x10,0x00);
- iic(2,0x78,0x80,0x39,0x00);
- iic(2,0x78,0x81,0xe6,0x00);
- iic(2,0x78,0x8d,0x00,0x00);
- printk(".");
- iic(2,0x78,0x8e,0x0c,0x00);
- iic(2,0x78,0x8f,0x00,0x00);
+ v4l2_info(&ar->v4l2_dev, "Initializing ");
+
+ iic(2, 0x78, 0x11, 0x01, 0x00); /* start */
+ iic(3, 0x78, 0x12, 0x00, 0x06);
+ iic(3, 0x78, 0x12, 0x12, 0x30);
+ iic(3, 0x78, 0x12, 0x15, 0x58);
+ iic(3, 0x78, 0x12, 0x17, 0x30);
+ printk(KERN_CONT ".");
+ iic(3, 0x78, 0x12, 0x1a, 0x97);
+ iic(3, 0x78, 0x12, 0x1b, 0xff);
+ iic(3, 0x78, 0x12, 0x1c, 0xff);
+ iic(3, 0x78, 0x12, 0x26, 0x10);
+ iic(3, 0x78, 0x12, 0x27, 0x00);
+ printk(KERN_CONT ".");
+ iic(2, 0x78, 0x34, 0x02, 0x00);
+ iic(2, 0x78, 0x7a, 0x10, 0x00);
+ iic(2, 0x78, 0x80, 0x39, 0x00);
+ iic(2, 0x78, 0x81, 0xe6, 0x00);
+ iic(2, 0x78, 0x8d, 0x00, 0x00);
+ printk(KERN_CONT ".");
+ iic(2, 0x78, 0x8e, 0x0c, 0x00);
+ iic(2, 0x78, 0x8f, 0x00, 0x00);
#if 0
- iic(2,0x78,0x90,0x00,0x00); /* AWB on=1 off=0 */
+ iic(2, 0x78, 0x90, 0x00, 0x00); /* AWB on=1 off=0 */
#endif
- iic(2,0x78,0x93,0x01,0x00);
- iic(2,0x78,0x94,0xcd,0x00);
- iic(2,0x78,0x95,0x00,0x00);
- printk(".");
- iic(2,0x78,0x96,0xa0,0x00);
- iic(2,0x78,0x97,0x00,0x00);
- iic(2,0x78,0x98,0x60,0x00);
- iic(2,0x78,0x99,0x01,0x00);
- iic(2,0x78,0x9a,0x19,0x00);
- printk(".");
- iic(2,0x78,0x9b,0x02,0x00);
- iic(2,0x78,0x9c,0xe8,0x00);
- iic(2,0x78,0x9d,0x02,0x00);
- iic(2,0x78,0x9e,0x2e,0x00);
- iic(2,0x78,0xb8,0x78,0x00);
- iic(2,0x78,0xba,0x05,0x00);
+ iic(2, 0x78, 0x93, 0x01, 0x00);
+ iic(2, 0x78, 0x94, 0xcd, 0x00);
+ iic(2, 0x78, 0x95, 0x00, 0x00);
+ printk(KERN_CONT ".");
+ iic(2, 0x78, 0x96, 0xa0, 0x00);
+ iic(2, 0x78, 0x97, 0x00, 0x00);
+ iic(2, 0x78, 0x98, 0x60, 0x00);
+ iic(2, 0x78, 0x99, 0x01, 0x00);
+ iic(2, 0x78, 0x9a, 0x19, 0x00);
+ printk(KERN_CONT ".");
+ iic(2, 0x78, 0x9b, 0x02, 0x00);
+ iic(2, 0x78, 0x9c, 0xe8, 0x00);
+ iic(2, 0x78, 0x9d, 0x02, 0x00);
+ iic(2, 0x78, 0x9e, 0x2e, 0x00);
+ iic(2, 0x78, 0xb8, 0x78, 0x00);
+ iic(2, 0x78, 0xba, 0x05, 0x00);
#if 0
- iic(2,0x78,0x83,0x8c,0x00); /* brightness */
+ iic(2, 0x78, 0x83, 0x8c, 0x00); /* brightness */
#endif
- printk(".");
+ printk(KERN_CONT ".");
/* color correction */
- iic(3,0x78,0x49,0x00,0x95); /* a */
- iic(3,0x78,0x49,0x01,0x96); /* b */
- iic(3,0x78,0x49,0x03,0x85); /* c */
- iic(3,0x78,0x49,0x04,0x97); /* d */
- iic(3,0x78,0x49,0x02,0x7e); /* e(Lo) */
- iic(3,0x78,0x49,0x05,0xa4); /* f(Lo) */
- iic(3,0x78,0x49,0x06,0x04); /* e(Hi) */
- iic(3,0x78,0x49,0x07,0x04); /* e(Hi) */
- iic(2,0x78,0x48,0x01,0x00); /* on=1 off=0 */
-
- printk(".");
- iic(2,0x78,0x11,0x00,0x00); /* end */
- printk(" done\n");
+ iic(3, 0x78, 0x49, 0x00, 0x95); /* a */
+ iic(3, 0x78, 0x49, 0x01, 0x96); /* b */
+ iic(3, 0x78, 0x49, 0x03, 0x85); /* c */
+ iic(3, 0x78, 0x49, 0x04, 0x97); /* d */
+ iic(3, 0x78, 0x49, 0x02, 0x7e); /* e(Lo) */
+ iic(3, 0x78, 0x49, 0x05, 0xa4); /* f(Lo) */
+ iic(3, 0x78, 0x49, 0x06, 0x04); /* e(Hi) */
+ iic(3, 0x78, 0x49, 0x07, 0x04); /* e(Hi) */
+ iic(2, 0x78, 0x48, 0x01, 0x00); /* on=1 off=0 */
+
+ printk(KERN_CONT ".");
+ iic(2, 0x78, 0x11, 0x00, 0x00); /* end */
+ printk(KERN_CONT " done\n");
return 0;
}
-void ar_release(struct video_device *vfd)
-{
- struct ar_device *ar = video_get_drvdata(vfd);
- mutex_lock(&ar->lock);
- video_device_release(vfd);
-}
-
/****************************************************************************
*
* Video4Linux Module functions
*
****************************************************************************/
-static struct ar_device ardev;
-
-static int ar_exclusive_open(struct file *file)
-{
- return test_and_set_bit(0, &ardev.in_use) ? -EBUSY : 0;
-}
-
-static int ar_exclusive_release(struct file *file)
-{
- clear_bit(0, &ardev.in_use);
- return 0;
-}
static const struct v4l2_file_operations ar_fops = {
.owner = THIS_MODULE,
- .open = ar_exclusive_open,
- .release = ar_exclusive_release,
.read = ar_read,
- .ioctl = ar_ioctl,
+ .ioctl = video_ioctl2,
};
-static struct video_device ar_template = {
- .name = "Colour AR VGA",
- .fops = &ar_fops,
- .release = ar_release,
+static const struct v4l2_ioctl_ops ar_ioctl_ops = {
+ .vidioc_querycap = ar_querycap,
+ .vidioc_g_input = ar_g_input,
+ .vidioc_s_input = ar_s_input,
+ .vidioc_enum_input = ar_enum_input,
+ .vidioc_enum_fmt_vid_cap = ar_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = ar_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = ar_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = ar_try_fmt_vid_cap,
};
#define ALIGN4(x) ((((int)(x)) & 0x3) == 0)
static int __init ar_init(void)
{
- struct ar_device *ar;
+ struct ar *ar;
+ struct v4l2_device *v4l2_dev;
int ret;
int i;
- DEBUG(1, "ar_init:\n");
- ret = -EIO;
- printk(KERN_INFO "arv: Colour AR VGA driver %s\n", VERSION);
-
ar = &ardev;
- memset(ar, 0, sizeof(struct ar_device));
+ v4l2_dev = &ar->v4l2_dev;
+ strlcpy(v4l2_dev->name, "arv", sizeof(v4l2_dev->name));
+ v4l2_info(v4l2_dev, "Colour AR VGA driver %s\n", VERSION);
+
+ ret = v4l2_device_register(NULL, v4l2_dev);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
+ return ret;
+ }
+ ret = -EIO;
#if USE_INT
/* allocate a DMA buffer for 1 line. */
ar->line_buff = kmalloc(MAX_AR_LINE_BYTES, GFP_KERNEL | GFP_DMA);
- if (ar->line_buff == NULL || ! ALIGN4(ar->line_buff)) {
- printk("arv: buffer allocation failed for DMA.\n");
+ if (ar->line_buff == NULL || !ALIGN4(ar->line_buff)) {
+ v4l2_err(v4l2_dev, "buffer allocation failed for DMA.\n");
ret = -ENOMEM;
goto out_end;
}
@@ -796,20 +759,19 @@ static int __init ar_init(void)
/* allocate buffers for a frame */
for (i = 0; i < MAX_AR_HEIGHT; i++) {
ar->frame[i] = kmalloc(MAX_AR_LINE_BYTES, GFP_KERNEL);
- if (ar->frame[i] == NULL || ! ALIGN4(ar->frame[i])) {
- printk("arv: buffer allocation failed for frame.\n");
+ if (ar->frame[i] == NULL || !ALIGN4(ar->frame[i])) {
+ v4l2_err(v4l2_dev, "buffer allocation failed for frame.\n");
ret = -ENOMEM;
goto out_line_buff;
}
}
- ar->vdev = video_device_alloc();
- if (!ar->vdev) {
- printk(KERN_ERR "arv: video_device_alloc() failed\n");
- return -ENOMEM;
- }
- memcpy(ar->vdev, &ar_template, sizeof(ar_template));
- video_set_drvdata(ar->vdev, ar);
+ strlcpy(ar->vdev.name, "Colour AR VGA", sizeof(ar->vdev.name));
+ ar->vdev.v4l2_dev = v4l2_dev;
+ ar->vdev.fops = &ar_fops;
+ ar->vdev.ioctl_ops = &ar_ioctl_ops;
+ ar->vdev.release = video_device_release_empty;
+ video_set_drvdata(&ar->vdev, ar);
if (vga) {
ar->width = AR_WIDTH_VGA;
@@ -834,14 +796,14 @@ static int __init ar_init(void)
#if USE_INT
if (request_irq(M32R_IRQ_INT3, ar_interrupt, 0, "arv", ar)) {
- printk("arv: request_irq(%d) failed.\n", M32R_IRQ_INT3);
+ v4l2_err("request_irq(%d) failed.\n", M32R_IRQ_INT3);
ret = -EIO;
goto out_irq;
}
#endif
- if (ar_initialize(ar->vdev) != 0) {
- printk("arv: M64278 not found.\n");
+ if (ar_initialize(ar) != 0) {
+ v4l2_err(v4l2_dev, "M64278 not found.\n");
ret = -ENODEV;
goto out_dev;
}
@@ -852,15 +814,15 @@ static int __init ar_init(void)
* device is named "video[0-64]".
* video_register_device() initializes h/w using ar_initialize().
*/
- if (video_register_device(ar->vdev, VFL_TYPE_GRABBER, video_nr) != 0) {
+ if (video_register_device(&ar->vdev, VFL_TYPE_GRABBER, video_nr) != 0) {
/* return -1, -ENFILE(full) or others */
- printk("arv: register video (Colour AR) failed.\n");
+ v4l2_err(v4l2_dev, "register video (Colour AR) failed.\n");
ret = -ENODEV;
goto out_dev;
}
- printk("%s: Found M64278 VGA (IRQ %d, Freq %dMHz).\n",
- video_device_node_name(ar->vdev), M32R_IRQ_INT3, freq);
+ v4l2_info(v4l2_dev, "%s: Found M64278 VGA (IRQ %d, Freq %dMHz).\n",
+ video_device_node_name(&ar->vdev), M32R_IRQ_INT3, freq);
return 0;
@@ -879,6 +841,7 @@ out_line_buff:
out_end:
#endif
+ v4l2_device_unregister(&ar->v4l2_dev);
return ret;
}
@@ -886,7 +849,7 @@ out_end:
static int __init ar_init_module(void)
{
freq = (boot_cpu_data.bus_clock / 1000000);
- printk("arv: Bus clock %d\n", freq);
+ printk(KERN_INFO "arv: Bus clock %d\n", freq);
if (freq != 50 && freq != 75)
freq = DEFAULT_FREQ;
return ar_init();
@@ -894,11 +857,11 @@ static int __init ar_init_module(void)
static void __exit ar_cleanup_module(void)
{
- struct ar_device *ar;
+ struct ar *ar;
int i;
ar = &ardev;
- video_unregister_device(ar->vdev);
+ video_unregister_device(&ar->vdev);
#if USE_INT
free_irq(M32R_IRQ_INT3, ar);
#endif
@@ -907,6 +870,7 @@ static void __exit ar_cleanup_module(void)
#if USE_INT
kfree(ar->line_buff);
#endif
+ v4l2_device_unregister(&ar->v4l2_dev);
}
module_init(ar_init_module);
diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c
index 8c140c01c5e..52f25aabb6d 100644
--- a/drivers/media/video/au0828/au0828-video.c
+++ b/drivers/media/video/au0828/au0828-video.c
@@ -177,7 +177,7 @@ void au0828_uninit_isoc(struct au0828_dev *dev)
usb_unlink_urb(urb);
if (dev->isoc_ctl.transfer_buffer[i]) {
- usb_buffer_free(dev->usbdev,
+ usb_free_coherent(dev->usbdev,
urb->transfer_buffer_length,
dev->isoc_ctl.transfer_buffer[i],
urb->transfer_dma);
@@ -247,7 +247,7 @@ int au0828_init_isoc(struct au0828_dev *dev, int max_packets,
}
dev->isoc_ctl.urb[i] = urb;
- dev->isoc_ctl.transfer_buffer[i] = usb_buffer_alloc(dev->usbdev,
+ dev->isoc_ctl.transfer_buffer[i] = usb_alloc_coherent(dev->usbdev,
sb_size, GFP_KERNEL, &urb->transfer_dma);
if (!dev->isoc_ctl.transfer_buffer[i]) {
printk("unable to allocate %i bytes for transfer"
@@ -1105,7 +1105,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
tmp = input->index;
- if (tmp > AU0828_MAX_INPUT)
+ if (tmp >= AU0828_MAX_INPUT)
return -EINVAL;
if (AUVI_INPUT(tmp).type == 0)
return -EINVAL;
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index 716870ae85d..7af56cde0c7 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -241,6 +241,10 @@ static struct CARD {
{ 0xa1550101, BTTV_BOARD_IVC200, "IVC-200G" },
{ 0xa1550102, BTTV_BOARD_IVC200, "IVC-200G" },
{ 0xa1550103, BTTV_BOARD_IVC200, "IVC-200G" },
+ { 0xa1550800, BTTV_BOARD_IVC200, "IVC-200" },
+ { 0xa1550801, BTTV_BOARD_IVC200, "IVC-200" },
+ { 0xa1550802, BTTV_BOARD_IVC200, "IVC-200" },
+ { 0xa1550803, BTTV_BOARD_IVC200, "IVC-200" },
{ 0xa182ff00, BTTV_BOARD_IVC120, "IVC-120G" },
{ 0xa182ff01, BTTV_BOARD_IVC120, "IVC-120G" },
{ 0xa182ff02, BTTV_BOARD_IVC120, "IVC-120G" },
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index f4860f03dfc..38c7f78ad9c 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -1525,7 +1525,7 @@ static int bttv_s_ctrl(struct file *file, void *f,
struct bttv_fh *fh = f;
struct bttv *btv = fh->btv;
- err = v4l2_prio_check(&btv->prio, &fh->prio);
+ err = v4l2_prio_check(&btv->prio, fh->prio);
if (0 != err)
return err;
@@ -1806,8 +1806,8 @@ buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
*size = fh->fmt->depth*fh->width*fh->height >> 3;
if (0 == *count)
*count = gbuffers;
- while (*size * *count > gbuffers * gbufsize)
- (*count)--;
+ if (*size * *count > gbuffers * gbufsize)
+ *count = (gbuffers * gbufsize) / *size;
return 0;
}
@@ -1859,7 +1859,7 @@ static int bttv_s_std(struct file *file, void *priv, v4l2_std_id *id)
unsigned int i;
int err;
- err = v4l2_prio_check(&btv->prio, &fh->prio);
+ err = v4l2_prio_check(&btv->prio, fh->prio);
if (0 != err)
return err;
@@ -1941,7 +1941,7 @@ static int bttv_s_input(struct file *file, void *priv, unsigned int i)
int err;
- err = v4l2_prio_check(&btv->prio, &fh->prio);
+ err = v4l2_prio_check(&btv->prio, fh->prio);
if (0 != err)
return err;
@@ -1961,7 +1961,7 @@ static int bttv_s_tuner(struct file *file, void *priv,
struct bttv *btv = fh->btv;
int err;
- err = v4l2_prio_check(&btv->prio, &fh->prio);
+ err = v4l2_prio_check(&btv->prio, fh->prio);
if (0 != err)
return err;
@@ -1987,11 +1987,6 @@ static int bttv_g_frequency(struct file *file, void *priv,
{
struct bttv_fh *fh = priv;
struct bttv *btv = fh->btv;
- int err;
-
- err = v4l2_prio_check(&btv->prio, &fh->prio);
- if (0 != err)
- return err;
f->type = btv->radio_user ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
f->frequency = btv->freq;
@@ -2006,7 +2001,7 @@ static int bttv_s_frequency(struct file *file, void *priv,
struct bttv *btv = fh->btv;
int err;
- err = v4l2_prio_check(&btv->prio, &fh->prio);
+ err = v4l2_prio_check(&btv->prio, fh->prio);
if (0 != err)
return err;
@@ -3029,7 +3024,7 @@ static int bttv_s_crop(struct file *file, void *f, struct v4l2_crop *crop)
crop->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
return -EINVAL;
- retval = v4l2_prio_check(&btv->prio, &fh->prio);
+ retval = v4l2_prio_check(&btv->prio, fh->prio);
if (0 != retval)
return retval;
@@ -3241,7 +3236,7 @@ static int bttv_open(struct file *file)
*fh = btv->init;
fh->type = type;
fh->ov.setup_ok = 0;
- v4l2_prio_open(&btv->prio,&fh->prio);
+ v4l2_prio_open(&btv->prio, &fh->prio);
videobuf_queue_sg_init(&fh->cap, &bttv_video_qops,
&btv->c.pci->dev, &btv->s_lock,
@@ -3312,7 +3307,7 @@ static int bttv_release(struct file *file)
/* free stuff */
videobuf_mmap_free(&fh->cap);
videobuf_mmap_free(&fh->vbi);
- v4l2_prio_close(&btv->prio,&fh->prio);
+ v4l2_prio_close(&btv->prio, fh->prio);
file->private_data = NULL;
kfree(fh);
@@ -3449,7 +3444,7 @@ static int radio_release(struct file *file)
struct bttv *btv = fh->btv;
struct rds_command cmd;
- v4l2_prio_close(&btv->prio,&fh->prio);
+ v4l2_prio_close(&btv->prio, fh->prio);
file->private_data = NULL;
kfree(fh);
diff --git a/drivers/media/video/bt8xx/bttv-input.c b/drivers/media/video/bt8xx/bttv-input.c
index aa153a986ad..f68717a4bde 100644
--- a/drivers/media/video/bt8xx/bttv-input.c
+++ b/drivers/media/video/bt8xx/bttv-input.c
@@ -49,6 +49,8 @@ module_param(ir_rc5_key_timeout, int, 0644);
#define DEVNAME "bttv-input"
+#define MODULE_NAME "bttv"
+
/* ---------------------------------------------------------------------- */
static void ir_handle_key(struct bttv *btv)
@@ -246,7 +248,7 @@ static void bttv_ir_stop(struct bttv *btv)
int bttv_input_init(struct bttv *btv)
{
struct card_ir *ir;
- struct ir_scancode_table *ir_codes = NULL;
+ char *ir_codes = NULL;
struct input_dev *input_dev;
u64 ir_type = IR_TYPE_OTHER;
int err = -ENOMEM;
@@ -264,7 +266,7 @@ int bttv_input_init(struct bttv *btv)
case BTTV_BOARD_AVERMEDIA:
case BTTV_BOARD_AVPHONE98:
case BTTV_BOARD_AVERMEDIA98:
- ir_codes = &ir_codes_avermedia_table;
+ ir_codes = RC_MAP_AVERMEDIA;
ir->mask_keycode = 0xf88000;
ir->mask_keydown = 0x010000;
ir->polling = 50; // ms
@@ -272,14 +274,14 @@ int bttv_input_init(struct bttv *btv)
case BTTV_BOARD_AVDVBT_761:
case BTTV_BOARD_AVDVBT_771:
- ir_codes = &ir_codes_avermedia_dvbt_table;
+ ir_codes = RC_MAP_AVERMEDIA_DVBT;
ir->mask_keycode = 0x0f00c0;
ir->mask_keydown = 0x000020;
ir->polling = 50; // ms
break;
case BTTV_BOARD_PXELVWPLTVPAK:
- ir_codes = &ir_codes_pixelview_table;
+ ir_codes = RC_MAP_PIXELVIEW;
ir->mask_keycode = 0x003e00;
ir->mask_keyup = 0x010000;
ir->polling = 50; // ms
@@ -287,24 +289,24 @@ int bttv_input_init(struct bttv *btv)
case BTTV_BOARD_PV_M4900:
case BTTV_BOARD_PV_BT878P_9B:
case BTTV_BOARD_PV_BT878P_PLUS:
- ir_codes = &ir_codes_pixelview_table;
+ ir_codes = RC_MAP_PIXELVIEW;
ir->mask_keycode = 0x001f00;
ir->mask_keyup = 0x008000;
ir->polling = 50; // ms
break;
case BTTV_BOARD_WINFAST2000:
- ir_codes = &ir_codes_winfast_table;
+ ir_codes = RC_MAP_WINFAST;
ir->mask_keycode = 0x1f8;
break;
case BTTV_BOARD_MAGICTVIEW061:
case BTTV_BOARD_MAGICTVIEW063:
- ir_codes = &ir_codes_winfast_table;
+ ir_codes = RC_MAP_WINFAST;
ir->mask_keycode = 0x0008e000;
ir->mask_keydown = 0x00200000;
break;
case BTTV_BOARD_APAC_VIEWCOMP:
- ir_codes = &ir_codes_apac_viewcomp_table;
+ ir_codes = RC_MAP_APAC_VIEWCOMP;
ir->mask_keycode = 0x001f00;
ir->mask_keyup = 0x008000;
ir->polling = 50; // ms
@@ -312,30 +314,30 @@ int bttv_input_init(struct bttv *btv)
case BTTV_BOARD_ASKEY_CPH03X:
case BTTV_BOARD_CONCEPTRONIC_CTVFMI2:
case BTTV_BOARD_CONTVFMI:
- ir_codes = &ir_codes_pixelview_table;
+ ir_codes = RC_MAP_PIXELVIEW;
ir->mask_keycode = 0x001F00;
ir->mask_keyup = 0x006000;
ir->polling = 50; // ms
break;
case BTTV_BOARD_NEBULA_DIGITV:
- ir_codes = &ir_codes_nebula_table;
+ ir_codes = RC_MAP_NEBULA;
btv->custom_irq = bttv_rc5_irq;
ir->rc5_gpio = 1;
break;
case BTTV_BOARD_MACHTV_MAGICTV:
- ir_codes = &ir_codes_apac_viewcomp_table;
+ ir_codes = RC_MAP_APAC_VIEWCOMP;
ir->mask_keycode = 0x001F00;
ir->mask_keyup = 0x004000;
ir->polling = 50; /* ms */
break;
case BTTV_BOARD_KOZUMI_KTV_01C:
- ir_codes = &ir_codes_pctv_sedna_table;
+ ir_codes = RC_MAP_PCTV_SEDNA;
ir->mask_keycode = 0x001f00;
ir->mask_keyup = 0x006000;
ir->polling = 50; /* ms */
break;
case BTTV_BOARD_ENLTV_FM_2:
- ir_codes = &ir_codes_encore_enltv2_table;
+ ir_codes = RC_MAP_ENCORE_ENLTV2;
ir->mask_keycode = 0x00fd00;
ir->mask_keyup = 0x000080;
ir->polling = 1; /* ms */
@@ -390,7 +392,7 @@ int bttv_input_init(struct bttv *btv)
bttv_ir_start(btv, ir);
/* all done */
- err = ir_input_register(btv->remote->dev, ir_codes, NULL);
+ err = ir_input_register(btv->remote->dev, ir_codes, NULL, MODULE_NAME);
if (err)
goto err_out_stop;
diff --git a/drivers/media/video/bw-qcam.c b/drivers/media/video/bw-qcam.c
index 9e39bc5f7b0..3c9e754d73a 100644
--- a/drivers/media/video/bw-qcam.c
+++ b/drivers/media/video/bw-qcam.c
@@ -80,8 +80,8 @@ OTHER DEALINGS IN THE SOFTWARE.
#include "bw-qcam.h"
-static unsigned int maxpoll=250; /* Maximum busy-loop count for qcam I/O */
-static unsigned int yieldlines=4; /* Yield after this many during capture */
+static unsigned int maxpoll = 250; /* Maximum busy-loop count for qcam I/O */
+static unsigned int yieldlines = 4; /* Yield after this many during capture */
static int video_nr = -1;
static unsigned int force_init; /* Whether to probe aggressively */
@@ -156,7 +156,7 @@ static int qc_calibrate(struct qcam_device *q)
mdelay(1);
schedule();
count++;
- } while (value == 0xff && count<2048);
+ } while (value == 0xff && count < 2048);
q->whitebal = value;
return value;
@@ -170,16 +170,15 @@ static struct qcam_device *qcam_init(struct parport *port)
struct qcam_device *q;
q = kmalloc(sizeof(struct qcam_device), GFP_KERNEL);
- if(q==NULL)
+ if (q == NULL)
return NULL;
q->pport = port;
q->pdev = parport_register_device(port, "bw-qcam", NULL, NULL,
- NULL, 0, NULL);
- if (q->pdev == NULL)
- {
+ NULL, 0, NULL);
+ if (q->pdev == NULL) {
printk(KERN_ERR "bw-qcam: couldn't register for %s.\n",
- port->name);
+ port->name);
kfree(q);
return NULL;
}
@@ -247,12 +246,10 @@ static int qc_readparam(struct qcam_device *q)
static int qc_waithand(struct qcam_device *q, int val)
{
int status;
- int runs=0;
+ int runs = 0;
- if (val)
- {
- while (!((status = read_lpstatus(q)) & 8))
- {
+ if (val) {
+ while (!((status = read_lpstatus(q)) & 8)) {
/* 1000 is enough spins on the I/O for all normal
cases, at that point we start to poll slowly
until the camera wakes up. However, we are
@@ -260,18 +257,13 @@ static int qc_waithand(struct qcam_device *q, int val)
setting it lower is much better for interactive
response. */
- if(runs++>maxpoll)
- {
+ if (runs++ > maxpoll)
msleep_interruptible(5);
- }
- if(runs>(maxpoll+1000)) /* 5 seconds */
+ if (runs > (maxpoll + 1000)) /* 5 seconds */
return -1;
}
- }
- else
- {
- while (((status = read_lpstatus(q)) & 8))
- {
+ } else {
+ while (((status = read_lpstatus(q)) & 8)) {
/* 1000 is enough spins on the I/O for all normal
cases, at that point we start to poll slowly
until the camera wakes up. However, we are
@@ -279,11 +271,9 @@ static int qc_waithand(struct qcam_device *q, int val)
setting it lower is much better for interactive
response. */
- if(runs++>maxpoll)
- {
+ if (runs++ > maxpoll)
msleep_interruptible(5);
- }
- if(runs++>(maxpoll+1000)) /* 5 seconds */
+ if (runs++ > (maxpoll + 1000)) /* 5 seconds */
return -1;
}
}
@@ -299,10 +289,9 @@ static int qc_waithand(struct qcam_device *q, int val)
static unsigned int qc_waithand2(struct qcam_device *q, int val)
{
unsigned int status;
- int runs=0;
+ int runs = 0;
- do
- {
+ do {
status = read_lpdata(q);
/* 1000 is enough spins on the I/O for all normal
cases, at that point we start to poll slowly
@@ -311,14 +300,11 @@ static unsigned int qc_waithand2(struct qcam_device *q, int val)
setting it lower is much better for interactive
response. */
- if(runs++>maxpoll)
- {
+ if (runs++ > maxpoll)
msleep_interruptible(5);
- }
- if(runs++>(maxpoll+1000)) /* 5 seconds */
+ if (runs++ > (maxpoll + 1000)) /* 5 seconds */
return 0;
- }
- while ((status & 1) != val);
+ } while ((status & 1) != val);
return status;
}
@@ -342,8 +328,7 @@ static int qc_detect(struct qcam_device *q)
lastreg = reg = read_lpstatus(q) & 0xf0;
- for (i = 0; i < 500; i++)
- {
+ for (i = 0; i < 500; i++) {
reg = read_lpstatus(q) & 0xf0;
if (reg != lastreg)
count++;
@@ -357,7 +342,7 @@ static int qc_detect(struct qcam_device *q)
won't be flashing these bits. Possibly unloading the module
in the middle of a grab? Or some timeout condition?
I've seen this parameter as low as 19 on my 450Mhz box - mpc */
- printk("Debugging: QCam detection counter <30-200 counts as detected>: %d\n", count);
+ printk(KERN_DEBUG "Debugging: QCam detection counter <30-200 counts as detected>: %d\n", count);
return 1;
#endif
@@ -367,7 +352,7 @@ static int qc_detect(struct qcam_device *q)
return 1; /* found */
} else {
printk(KERN_ERR "No Quickcam found on port %s\n",
- q->pport->name);
+ q->pport->name);
printk(KERN_DEBUG "Quickcam detection counter: %u\n", count);
return 0; /* not found */
}
@@ -381,26 +366,24 @@ static int qc_detect(struct qcam_device *q)
static void qc_reset(struct qcam_device *q)
{
- switch (q->port_mode & QC_FORCE_MASK)
- {
- case QC_FORCE_UNIDIR:
- q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_UNIDIR;
- break;
+ switch (q->port_mode & QC_FORCE_MASK) {
+ case QC_FORCE_UNIDIR:
+ q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_UNIDIR;
+ break;
- case QC_FORCE_BIDIR:
- q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_BIDIR;
- break;
+ case QC_FORCE_BIDIR:
+ q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_BIDIR;
+ break;
- case QC_ANY:
- write_lpcontrol(q, 0x20);
- write_lpdata(q, 0x75);
+ case QC_ANY:
+ write_lpcontrol(q, 0x20);
+ write_lpdata(q, 0x75);
- if (read_lpdata(q) != 0x75) {
- q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_BIDIR;
- } else {
- q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_UNIDIR;
- }
- break;
+ if (read_lpdata(q) != 0x75)
+ q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_BIDIR;
+ else
+ q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_UNIDIR;
+ break;
}
write_lpcontrol(q, 0xb);
@@ -423,36 +406,33 @@ static int qc_setscanmode(struct qcam_device *q)
{
int old_mode = q->mode;
- switch (q->transfer_scale)
- {
- case 1:
- q->mode = 0;
- break;
- case 2:
- q->mode = 4;
- break;
- case 4:
- q->mode = 8;
- break;
+ switch (q->transfer_scale) {
+ case 1:
+ q->mode = 0;
+ break;
+ case 2:
+ q->mode = 4;
+ break;
+ case 4:
+ q->mode = 8;
+ break;
}
- switch (q->bpp)
- {
- case 4:
- break;
- case 6:
- q->mode += 2;
- break;
+ switch (q->bpp) {
+ case 4:
+ break;
+ case 6:
+ q->mode += 2;
+ break;
}
- switch (q->port_mode & QC_MODE_MASK)
- {
- case QC_BIDIR:
- q->mode += 1;
- break;
- case QC_NOTSET:
- case QC_UNIDIR:
- break;
+ switch (q->port_mode & QC_MODE_MASK) {
+ case QC_BIDIR:
+ q->mode += 1;
+ break;
+ case QC_NOTSET:
+ case QC_UNIDIR:
+ break;
}
if (q->mode != old_mode)
@@ -493,7 +473,7 @@ static void qc_set(struct qcam_device *q)
} else {
val = q->width * q->bpp;
val2 = (((q->port_mode & QC_MODE_MASK) == QC_BIDIR) ? 24 : 8) *
- q->transfer_scale;
+ q->transfer_scale;
}
val = DIV_ROUND_UP(val, val2);
qc_command(q, 0x13);
@@ -521,85 +501,80 @@ static void qc_set(struct qcam_device *q)
static inline int qc_readbytes(struct qcam_device *q, char buffer[])
{
- int ret=1;
+ int ret = 1;
unsigned int hi, lo;
unsigned int hi2, lo2;
static int state;
- if (buffer == NULL)
- {
+ if (buffer == NULL) {
state = 0;
return 0;
}
- switch (q->port_mode & QC_MODE_MASK)
- {
- case QC_BIDIR: /* Bi-directional Port */
- write_lpcontrol(q, 0x26);
- lo = (qc_waithand2(q, 1) >> 1);
- hi = (read_lpstatus(q) >> 3) & 0x1f;
- write_lpcontrol(q, 0x2e);
- lo2 = (qc_waithand2(q, 0) >> 1);
- hi2 = (read_lpstatus(q) >> 3) & 0x1f;
- switch (q->bpp)
- {
- case 4:
- buffer[0] = lo & 0xf;
- buffer[1] = ((lo & 0x70) >> 4) | ((hi & 1) << 3);
- buffer[2] = (hi & 0x1e) >> 1;
- buffer[3] = lo2 & 0xf;
- buffer[4] = ((lo2 & 0x70) >> 4) | ((hi2 & 1) << 3);
- buffer[5] = (hi2 & 0x1e) >> 1;
- ret = 6;
- break;
- case 6:
- buffer[0] = lo & 0x3f;
- buffer[1] = ((lo & 0x40) >> 6) | (hi << 1);
- buffer[2] = lo2 & 0x3f;
- buffer[3] = ((lo2 & 0x40) >> 6) | (hi2 << 1);
- ret = 4;
- break;
- }
+ switch (q->port_mode & QC_MODE_MASK) {
+ case QC_BIDIR: /* Bi-directional Port */
+ write_lpcontrol(q, 0x26);
+ lo = (qc_waithand2(q, 1) >> 1);
+ hi = (read_lpstatus(q) >> 3) & 0x1f;
+ write_lpcontrol(q, 0x2e);
+ lo2 = (qc_waithand2(q, 0) >> 1);
+ hi2 = (read_lpstatus(q) >> 3) & 0x1f;
+ switch (q->bpp) {
+ case 4:
+ buffer[0] = lo & 0xf;
+ buffer[1] = ((lo & 0x70) >> 4) | ((hi & 1) << 3);
+ buffer[2] = (hi & 0x1e) >> 1;
+ buffer[3] = lo2 & 0xf;
+ buffer[4] = ((lo2 & 0x70) >> 4) | ((hi2 & 1) << 3);
+ buffer[5] = (hi2 & 0x1e) >> 1;
+ ret = 6;
+ break;
+ case 6:
+ buffer[0] = lo & 0x3f;
+ buffer[1] = ((lo & 0x40) >> 6) | (hi << 1);
+ buffer[2] = lo2 & 0x3f;
+ buffer[3] = ((lo2 & 0x40) >> 6) | (hi2 << 1);
+ ret = 4;
break;
+ }
+ break;
+
+ case QC_UNIDIR: /* Unidirectional Port */
+ write_lpcontrol(q, 6);
+ lo = (qc_waithand(q, 1) & 0xf0) >> 4;
+ write_lpcontrol(q, 0xe);
+ hi = (qc_waithand(q, 0) & 0xf0) >> 4;
- case QC_UNIDIR: /* Unidirectional Port */
- write_lpcontrol(q, 6);
- lo = (qc_waithand(q, 1) & 0xf0) >> 4;
- write_lpcontrol(q, 0xe);
- hi = (qc_waithand(q, 0) & 0xf0) >> 4;
-
- switch (q->bpp)
- {
- case 4:
- buffer[0] = lo;
- buffer[1] = hi;
- ret = 2;
- break;
- case 6:
- switch (state)
- {
- case 0:
- buffer[0] = (lo << 2) | ((hi & 0xc) >> 2);
- q->saved_bits = (hi & 3) << 4;
- state = 1;
- ret = 1;
- break;
- case 1:
- buffer[0] = lo | q->saved_bits;
- q->saved_bits = hi << 2;
- state = 2;
- ret = 1;
- break;
- case 2:
- buffer[0] = ((lo & 0xc) >> 2) | q->saved_bits;
- buffer[1] = ((lo & 3) << 4) | hi;
- state = 0;
- ret = 2;
- break;
- }
- break;
+ switch (q->bpp) {
+ case 4:
+ buffer[0] = lo;
+ buffer[1] = hi;
+ ret = 2;
+ break;
+ case 6:
+ switch (state) {
+ case 0:
+ buffer[0] = (lo << 2) | ((hi & 0xc) >> 2);
+ q->saved_bits = (hi & 3) << 4;
+ state = 1;
+ ret = 1;
+ break;
+ case 1:
+ buffer[0] = lo | q->saved_bits;
+ q->saved_bits = hi << 2;
+ state = 2;
+ ret = 1;
+ break;
+ case 2:
+ buffer[0] = ((lo & 0xc) >> 2) | q->saved_bits;
+ buffer[1] = ((lo & 3) << 4) | hi;
+ state = 0;
+ ret = 2;
+ break;
}
break;
+ }
+ break;
}
return ret;
}
@@ -615,7 +590,7 @@ static inline int qc_readbytes(struct qcam_device *q, char buffer[])
* n=2^(bit depth)-1. Ask me for more details if you don't understand
* this. */
-static long qc_capture(struct qcam_device * q, char __user *buf, unsigned long len)
+static long qc_capture(struct qcam_device *q, char __user *buf, unsigned long len)
{
int i, j, k, yield;
int bytes;
@@ -623,9 +598,9 @@ static long qc_capture(struct qcam_device * q, char __user *buf, unsigned long l
int divisor;
int pixels_per_line;
int pixels_read = 0;
- int got=0;
+ int got = 0;
char buffer[6];
- int shift=8-q->bpp;
+ int shift = 8 - q->bpp;
char invert;
if (q->mode == -1)
@@ -634,13 +609,12 @@ static long qc_capture(struct qcam_device * q, char __user *buf, unsigned long l
qc_command(q, 0x7);
qc_command(q, q->mode);
- if ((q->port_mode & QC_MODE_MASK) == QC_BIDIR)
- {
+ if ((q->port_mode & QC_MODE_MASK) == QC_BIDIR) {
write_lpcontrol(q, 0x2e); /* turn port around */
write_lpcontrol(q, 0x26);
- (void) qc_waithand(q, 1);
+ qc_waithand(q, 1);
write_lpcontrol(q, 0x2e);
- (void) qc_waithand(q, 0);
+ qc_waithand(q, 0);
}
/* strange -- should be 15:63 below, but 4bpp is odd */
@@ -650,33 +624,28 @@ static long qc_capture(struct qcam_device * q, char __user *buf, unsigned long l
pixels_per_line = q->width / q->transfer_scale;
transperline = q->width * q->bpp;
divisor = (((q->port_mode & QC_MODE_MASK) == QC_BIDIR) ? 24 : 8) *
- q->transfer_scale;
+ q->transfer_scale;
transperline = DIV_ROUND_UP(transperline, divisor);
- for (i = 0, yield = yieldlines; i < linestotrans; i++)
- {
- for (pixels_read = j = 0; j < transperline; j++)
- {
+ for (i = 0, yield = yieldlines; i < linestotrans; i++) {
+ for (pixels_read = j = 0; j < transperline; j++) {
bytes = qc_readbytes(q, buffer);
- for (k = 0; k < bytes && (pixels_read + k) < pixels_per_line; k++)
- {
+ for (k = 0; k < bytes && (pixels_read + k) < pixels_per_line; k++) {
int o;
- if (buffer[k] == 0 && invert == 16)
- {
+ if (buffer[k] == 0 && invert == 16) {
/* 4bpp is odd (again) -- inverter is 16, not 15, but output
must be 0-15 -- bls */
buffer[k] = 16;
}
- o=i*pixels_per_line + pixels_read + k;
- if(o<len)
- {
+ o = i * pixels_per_line + pixels_read + k;
+ if (o < len) {
got++;
- put_user((invert - buffer[k])<<shift, buf+o);
+ put_user((invert - buffer[k]) << shift, buf + o);
}
}
pixels_read += bytes;
}
- (void) qc_readbytes(q, NULL); /* reset state machine */
+ qc_readbytes(q, NULL); /* reset state machine */
/* Grabbing an entire frame from the quickcam is a lengthy
process. We don't (usually) want to busy-block the
@@ -690,14 +659,13 @@ static long qc_capture(struct qcam_device * q, char __user *buf, unsigned long l
}
}
- if ((q->port_mode & QC_MODE_MASK) == QC_BIDIR)
- {
+ if ((q->port_mode & QC_MODE_MASK) == QC_BIDIR) {
write_lpcontrol(q, 2);
write_lpcontrol(q, 6);
udelay(3);
write_lpcontrol(q, 0xe);
}
- if(got<len)
+ if (got < len)
return got;
return len;
}
@@ -709,11 +677,10 @@ static long qc_capture(struct qcam_device * q, char __user *buf, unsigned long l
static long qcam_do_ioctl(struct file *file, unsigned int cmd, void *arg)
{
struct video_device *dev = video_devdata(file);
- struct qcam_device *qcam=(struct qcam_device *)dev;
+ struct qcam_device *qcam = (struct qcam_device *)dev;
- switch(cmd)
- {
- case VIDIOCGCAP:
+ switch (cmd) {
+ case VIDIOCGCAP:
{
struct video_capability *b = arg;
strcpy(b->name, "Quickcam");
@@ -726,73 +693,73 @@ static long qcam_do_ioctl(struct file *file, unsigned int cmd, void *arg)
b->minheight = 60;
return 0;
}
- case VIDIOCGCHAN:
+ case VIDIOCGCHAN:
{
struct video_channel *v = arg;
- if(v->channel!=0)
+ if (v->channel != 0)
return -EINVAL;
- v->flags=0;
- v->tuners=0;
+ v->flags = 0;
+ v->tuners = 0;
/* Good question.. its composite or SVHS so.. */
v->type = VIDEO_TYPE_CAMERA;
strcpy(v->name, "Camera");
return 0;
}
- case VIDIOCSCHAN:
+ case VIDIOCSCHAN:
{
struct video_channel *v = arg;
- if(v->channel!=0)
+ if (v->channel != 0)
return -EINVAL;
return 0;
}
- case VIDIOCGTUNER:
+ case VIDIOCGTUNER:
{
struct video_tuner *v = arg;
- if(v->tuner)
+ if (v->tuner)
return -EINVAL;
strcpy(v->name, "Format");
- v->rangelow=0;
- v->rangehigh=0;
- v->flags= 0;
+ v->rangelow = 0;
+ v->rangehigh = 0;
+ v->flags = 0;
v->mode = VIDEO_MODE_AUTO;
return 0;
}
- case VIDIOCSTUNER:
+ case VIDIOCSTUNER:
{
struct video_tuner *v = arg;
- if(v->tuner)
+ if (v->tuner)
return -EINVAL;
- if(v->mode!=VIDEO_MODE_AUTO)
+ if (v->mode != VIDEO_MODE_AUTO)
return -EINVAL;
return 0;
}
- case VIDIOCGPICT:
+ case VIDIOCGPICT:
{
struct video_picture *p = arg;
- p->colour=0x8000;
- p->hue=0x8000;
- p->brightness=qcam->brightness<<8;
- p->contrast=qcam->contrast<<8;
- p->whiteness=qcam->whitebal<<8;
- p->depth=qcam->bpp;
- p->palette=VIDEO_PALETTE_GREY;
+ p->colour = 0x8000;
+ p->hue = 0x8000;
+ p->brightness = qcam->brightness << 8;
+ p->contrast = qcam->contrast << 8;
+ p->whiteness = qcam->whitebal << 8;
+ p->depth = qcam->bpp;
+ p->palette = VIDEO_PALETTE_GREY;
return 0;
}
- case VIDIOCSPICT:
+ case VIDIOCSPICT:
{
struct video_picture *p = arg;
- if(p->palette!=VIDEO_PALETTE_GREY)
+ if (p->palette != VIDEO_PALETTE_GREY)
return -EINVAL;
- if(p->depth!=4 && p->depth!=6)
+ if (p->depth != 4 && p->depth != 6)
return -EINVAL;
/*
* Now load the camera.
*/
- qcam->brightness = p->brightness>>8;
- qcam->contrast = p->contrast>>8;
- qcam->whitebal = p->whiteness>>8;
+ qcam->brightness = p->brightness >> 8;
+ qcam->contrast = p->contrast >> 8;
+ qcam->whitebal = p->whiteness >> 8;
qcam->bpp = p->depth;
mutex_lock(&qcam->lock);
@@ -802,28 +769,25 @@ static long qcam_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return 0;
}
- case VIDIOCSWIN:
+ case VIDIOCSWIN:
{
struct video_window *vw = arg;
- if(vw->flags)
+ if (vw->flags)
return -EINVAL;
- if(vw->clipcount)
+ if (vw->clipcount)
return -EINVAL;
- if(vw->height<60||vw->height>240)
+ if (vw->height < 60 || vw->height > 240)
return -EINVAL;
- if(vw->width<80||vw->width>320)
+ if (vw->width < 80 || vw->width > 320)
return -EINVAL;
qcam->width = 320;
qcam->height = 240;
qcam->transfer_scale = 4;
- if(vw->width>=160 && vw->height>=120)
- {
+ if (vw->width >= 160 && vw->height >= 120)
qcam->transfer_scale = 2;
- }
- if(vw->width>=320 && vw->height>=240)
- {
+ if (vw->width >= 320 && vw->height >= 240) {
qcam->width = 320;
qcam->height = 240;
qcam->transfer_scale = 1;
@@ -839,41 +803,42 @@ static long qcam_do_ioctl(struct file *file, unsigned int cmd, void *arg)
/* Ok we figured out what to use from our wide choice */
return 0;
}
- case VIDIOCGWIN:
+ case VIDIOCGWIN:
{
struct video_window *vw = arg;
+
memset(vw, 0, sizeof(*vw));
- vw->width=qcam->width/qcam->transfer_scale;
- vw->height=qcam->height/qcam->transfer_scale;
+ vw->width = qcam->width / qcam->transfer_scale;
+ vw->height = qcam->height / qcam->transfer_scale;
return 0;
}
- case VIDIOCKEY:
- return 0;
- case VIDIOCCAPTURE:
- case VIDIOCGFBUF:
- case VIDIOCSFBUF:
- case VIDIOCGFREQ:
- case VIDIOCSFREQ:
- case VIDIOCGAUDIO:
- case VIDIOCSAUDIO:
- return -EINVAL;
- default:
- return -ENOIOCTLCMD;
+ case VIDIOCKEY:
+ return 0;
+ case VIDIOCCAPTURE:
+ case VIDIOCGFBUF:
+ case VIDIOCSFBUF:
+ case VIDIOCGFREQ:
+ case VIDIOCSFREQ:
+ case VIDIOCGAUDIO:
+ case VIDIOCSAUDIO:
+ return -EINVAL;
+ default:
+ return -ENOIOCTLCMD;
}
return 0;
}
static long qcam_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
+ unsigned int cmd, unsigned long arg)
{
return video_usercopy(file, cmd, arg, qcam_do_ioctl);
}
static ssize_t qcam_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
struct video_device *v = video_devdata(file);
- struct qcam_device *qcam=(struct qcam_device *)v;
+ struct qcam_device *qcam = (struct qcam_device *)v;
int len;
parport_claim_or_block(qcam->pdev);
@@ -885,7 +850,7 @@ static ssize_t qcam_read(struct file *file, char __user *buf,
if (qcam->status & QC_PARAM_CHANGE)
qc_set(qcam);
- len=qc_capture(qcam, buf,count);
+ len = qc_capture(qcam, buf, count);
mutex_unlock(&qcam->lock);
@@ -917,8 +882,7 @@ static const struct v4l2_file_operations qcam_fops = {
.ioctl = qcam_ioctl,
.read = qcam_read,
};
-static struct video_device qcam_template=
-{
+static struct video_device qcam_template = {
.name = "Connectix Quickcam",
.fops = &qcam_fops,
.release = video_device_release_empty,
@@ -932,22 +896,20 @@ static int init_bwqcam(struct parport *port)
{
struct qcam_device *qcam;
- if (num_cams == MAX_CAMS)
- {
+ if (num_cams == MAX_CAMS) {
printk(KERN_ERR "Too many Quickcams (max %d)\n", MAX_CAMS);
return -ENOSPC;
}
- qcam=qcam_init(port);
- if(qcam==NULL)
+ qcam = qcam_init(port);
+ if (qcam == NULL)
return -ENODEV;
parport_claim_or_block(qcam->pdev);
qc_reset(qcam);
- if(qc_detect(qcam)==0)
- {
+ if (qc_detect(qcam) == 0) {
parport_release(qcam->pdev);
parport_unregister_device(qcam->pdev);
kfree(qcam);
@@ -1045,12 +1007,12 @@ static int __init init_bw_qcams(void)
#ifdef MODULE
/* Do some sanity checks on the module parameters. */
if (maxpoll > 5000) {
- printk("Connectix Quickcam max-poll was above 5000. Using 5000.\n");
+ printk(KERN_INFO "Connectix Quickcam max-poll was above 5000. Using 5000.\n");
maxpoll = 5000;
}
if (yieldlines < 1) {
- printk("Connectix Quickcam yieldlines was less than 1. Using 1.\n");
+ printk(KERN_INFO "Connectix Quickcam yieldlines was less than 1. Using 1.\n");
yieldlines = 1;
}
#endif
diff --git a/drivers/media/video/c-qcam.c b/drivers/media/video/c-qcam.c
index e2cbebab959..8f1dd88b32a 100644
--- a/drivers/media/video/c-qcam.c
+++ b/drivers/media/video/c-qcam.c
@@ -79,17 +79,17 @@ static inline void qcam_set_ack(struct qcam_device *qcam, unsigned int i)
{
/* note: the QC specs refer to the PCAck pin by voltage, not
software level. PC ports have builtin inverters. */
- parport_frob_control(qcam->pport, 8, i?8:0);
+ parport_frob_control(qcam->pport, 8, i ? 8 : 0);
}
static inline unsigned int qcam_ready1(struct qcam_device *qcam)
{
- return (parport_read_status(qcam->pport) & 0x8)?1:0;
+ return (parport_read_status(qcam->pport) & 0x8) ? 1 : 0;
}
static inline unsigned int qcam_ready2(struct qcam_device *qcam)
{
- return (parport_read_data(qcam->pport) & 0x1)?1:0;
+ return (parport_read_data(qcam->pport) & 0x1) ? 1 : 0;
}
static unsigned int qcam_await_ready1(struct qcam_device *qcam,
@@ -99,14 +99,13 @@ static unsigned int qcam_await_ready1(struct qcam_device *qcam,
unsigned int i;
for (oldjiffies = jiffies;
- time_before(jiffies, oldjiffies + msecs_to_jiffies(40)); )
+ time_before(jiffies, oldjiffies + msecs_to_jiffies(40));)
if (qcam_ready1(qcam) == value)
return 0;
/* If the camera didn't respond within 1/25 second, poll slowly
for a while. */
- for (i = 0; i < 50; i++)
- {
+ for (i = 0; i < 50; i++) {
if (qcam_ready1(qcam) == value)
return 0;
msleep_interruptible(100);
@@ -125,14 +124,13 @@ static unsigned int qcam_await_ready2(struct qcam_device *qcam, int value)
unsigned int i;
for (oldjiffies = jiffies;
- time_before(jiffies, oldjiffies + msecs_to_jiffies(40)); )
+ time_before(jiffies, oldjiffies + msecs_to_jiffies(40));)
if (qcam_ready2(qcam) == value)
return 0;
/* If the camera didn't respond within 1/25 second, poll slowly
for a while. */
- for (i = 0; i < 50; i++)
- {
+ for (i = 0; i < 50; i++) {
if (qcam_ready2(qcam) == value)
return 0;
msleep_interruptible(100);
@@ -149,22 +147,25 @@ static unsigned int qcam_await_ready2(struct qcam_device *qcam, int value)
static int qcam_read_data(struct qcam_device *qcam)
{
unsigned int idata;
+
qcam_set_ack(qcam, 0);
- if (qcam_await_ready1(qcam, 1)) return -1;
+ if (qcam_await_ready1(qcam, 1))
+ return -1;
idata = parport_read_status(qcam->pport) & 0xf0;
qcam_set_ack(qcam, 1);
- if (qcam_await_ready1(qcam, 0)) return -1;
- idata |= (parport_read_status(qcam->pport) >> 4);
+ if (qcam_await_ready1(qcam, 0))
+ return -1;
+ idata |= parport_read_status(qcam->pport) >> 4;
return idata;
}
static int qcam_write_data(struct qcam_device *qcam, unsigned int data)
{
unsigned int idata;
+
parport_write_data(qcam->pport, data);
idata = qcam_read_data(qcam);
- if (data != idata)
- {
+ if (data != idata) {
printk(KERN_WARNING "cqcam: sent %x but received %x\n", data,
idata);
return 1;
@@ -212,13 +213,12 @@ static int qc_detect(struct qcam_device *qcam)
/* look for a heartbeat */
ostat = stat = parport_read_status(qcam->pport);
- for (i=0; i<250; i++)
- {
+ for (i = 0; i < 250; i++) {
mdelay(1);
stat = parport_read_status(qcam->pport);
- if (ostat != stat)
- {
- if (++count >= 3) return 1;
+ if (ostat != stat) {
+ if (++count >= 3)
+ return 1;
ostat = stat;
}
}
@@ -232,13 +232,12 @@ static int qc_detect(struct qcam_device *qcam)
count = 0;
ostat = stat = parport_read_status(qcam->pport);
- for (i=0; i<250; i++)
- {
+ for (i = 0; i < 250; i++) {
mdelay(1);
stat = parport_read_status(qcam->pport);
- if (ostat != stat)
- {
- if (++count >= 3) return 1;
+ if (ostat != stat) {
+ if (++count >= 3)
+ return 1;
ostat = stat;
}
}
@@ -263,7 +262,7 @@ static void qc_setup(struct qcam_device *q)
{
qc_reset(q);
- /* Set the brightness. */
+ /* Set the brightness. */
qcam_set(q, 11, q->brightness);
/* Set the height and width. These refer to the actual
@@ -292,25 +291,25 @@ static unsigned int qcam_read_bytes(struct qcam_device *q, unsigned char *buf, u
unsigned int bytes = 0;
qcam_set_ack(q, 0);
- if (q->bidirectional)
- {
+ if (q->bidirectional) {
/* It's a bidirectional port */
- while (bytes < nbytes)
- {
+ while (bytes < nbytes) {
unsigned int lo1, hi1, lo2, hi2;
unsigned char r, g, b;
- if (qcam_await_ready2(q, 1)) return bytes;
+ if (qcam_await_ready2(q, 1))
+ return bytes;
lo1 = parport_read_data(q->pport) >> 1;
hi1 = ((parport_read_status(q->pport) >> 3) & 0x1f) ^ 0x10;
qcam_set_ack(q, 1);
- if (qcam_await_ready2(q, 0)) return bytes;
+ if (qcam_await_ready2(q, 0))
+ return bytes;
lo2 = parport_read_data(q->pport) >> 1;
hi2 = ((parport_read_status(q->pport) >> 3) & 0x1f) ^ 0x10;
qcam_set_ack(q, 0);
- r = (lo1 | ((hi1 & 1)<<7));
- g = ((hi1 & 0x1e)<<3) | ((hi2 & 0x1e)>>1);
- b = (lo2 | ((hi2 & 1)<<7));
+ r = lo1 | ((hi1 & 1) << 7);
+ g = ((hi1 & 0x1e) << 3) | ((hi2 & 0x1e) >> 1);
+ b = lo2 | ((hi2 & 1) << 7);
if (force_rgb) {
buf[bytes++] = r;
buf[bytes++] = g;
@@ -321,21 +320,20 @@ static unsigned int qcam_read_bytes(struct qcam_device *q, unsigned char *buf, u
buf[bytes++] = r;
}
}
- }
- else
- {
+ } else {
/* It's a unidirectional port */
int i = 0, n = bytes;
unsigned char rgb[3];
- while (bytes < nbytes)
- {
+ while (bytes < nbytes) {
unsigned int hi, lo;
- if (qcam_await_ready1(q, 1)) return bytes;
+ if (qcam_await_ready1(q, 1))
+ return bytes;
hi = (parport_read_status(q->pport) & 0xf0);
qcam_set_ack(q, 1);
- if (qcam_await_ready1(q, 0)) return bytes;
+ if (qcam_await_ready1(q, 0))
+ return bytes;
lo = (parport_read_status(q->pport) & 0xf0);
qcam_set_ack(q, 0);
/* flip some bits */
@@ -374,28 +372,26 @@ static long qc_capture(struct qcam_device *q, char __user *buf, unsigned long le
return -EFAULT;
/* Wait for camera to become ready */
- for (;;)
- {
+ for (;;) {
int i = qcam_get(q, 41);
+
if (i == -1) {
qc_setup(q);
return -EIO;
}
if ((i & 0x80) == 0)
break;
- else
- schedule();
+ schedule();
}
- if (qcam_set(q, 7, (q->mode | (is_bi_dir?1:0)) + 1))
+ if (qcam_set(q, 7, (q->mode | (is_bi_dir ? 1 : 0)) + 1))
return -EIO;
lines = q->height;
pixelsperline = q->width;
bitsperxfer = (is_bi_dir) ? 24 : 8;
- if (is_bi_dir)
- {
+ if (is_bi_dir) {
/* Turn the port around */
parport_data_reverse(q->pport);
mdelay(3);
@@ -413,16 +409,17 @@ static long qc_capture(struct qcam_device *q, char __user *buf, unsigned long le
wantlen = lines * pixelsperline * 24 / 8;
- while (wantlen)
- {
+ while (wantlen) {
size_t t, s;
- s = (wantlen > BUFSZ)?BUFSZ:wantlen;
+
+ s = (wantlen > BUFSZ) ? BUFSZ : wantlen;
t = qcam_read_bytes(q, tmpbuf, s);
- if (outptr < len)
- {
+ if (outptr < len) {
size_t sz = len - outptr;
- if (sz > t) sz = t;
- if (__copy_to_user(buf+outptr, tmpbuf, sz))
+
+ if (sz > t)
+ sz = t;
+ if (__copy_to_user(buf + outptr, tmpbuf, sz))
break;
outptr += sz;
}
@@ -434,33 +431,31 @@ static long qc_capture(struct qcam_device *q, char __user *buf, unsigned long le
len = outptr;
- if (wantlen)
- {
- printk("qcam: short read.\n");
+ if (wantlen) {
+ printk(KERN_ERR "qcam: short read.\n");
if (is_bi_dir)
parport_data_forward(q->pport);
qc_setup(q);
return len;
}
- if (is_bi_dir)
- {
+ if (is_bi_dir) {
int l;
+
do {
l = qcam_read_bytes(q, tmpbuf, 3);
cond_resched();
} while (l && (tmpbuf[0] == 0x7e || tmpbuf[1] == 0x7e || tmpbuf[2] == 0x7e));
if (force_rgb) {
if (tmpbuf[0] != 0xe || tmpbuf[1] != 0x0 || tmpbuf[2] != 0xf)
- printk("qcam: bad EOF\n");
+ printk(KERN_ERR "qcam: bad EOF\n");
} else {
if (tmpbuf[0] != 0xf || tmpbuf[1] != 0x0 || tmpbuf[2] != 0xe)
- printk("qcam: bad EOF\n");
+ printk(KERN_ERR "qcam: bad EOF\n");
}
qcam_set_ack(q, 0);
- if (qcam_await_ready1(q, 1))
- {
- printk("qcam: no ack after EOF\n");
+ if (qcam_await_ready1(q, 1)) {
+ printk(KERN_ERR "qcam: no ack after EOF\n");
parport_data_forward(q->pport);
qc_setup(q);
return len;
@@ -468,27 +463,25 @@ static long qc_capture(struct qcam_device *q, char __user *buf, unsigned long le
parport_data_forward(q->pport);
mdelay(3);
qcam_set_ack(q, 1);
- if (qcam_await_ready1(q, 0))
- {
- printk("qcam: no ack to port turnaround\n");
+ if (qcam_await_ready1(q, 0)) {
+ printk(KERN_ERR "qcam: no ack to port turnaround\n");
qc_setup(q);
return len;
}
- }
- else
- {
+ } else {
int l;
+
do {
l = qcam_read_bytes(q, tmpbuf, 1);
cond_resched();
} while (l && tmpbuf[0] == 0x7e);
- l = qcam_read_bytes(q, tmpbuf+1, 2);
+ l = qcam_read_bytes(q, tmpbuf + 1, 2);
if (force_rgb) {
if (tmpbuf[0] != 0xe || tmpbuf[1] != 0x0 || tmpbuf[2] != 0xf)
- printk("qcam: bad EOF\n");
+ printk(KERN_ERR "qcam: bad EOF\n");
} else {
if (tmpbuf[0] != 0xf || tmpbuf[1] != 0x0 || tmpbuf[2] != 0xe)
- printk("qcam: bad EOF\n");
+ printk(KERN_ERR "qcam: bad EOF\n");
}
}
@@ -503,164 +496,166 @@ static long qc_capture(struct qcam_device *q, char __user *buf, unsigned long le
static long qcam_do_ioctl(struct file *file, unsigned int cmd, void *arg)
{
struct video_device *dev = video_devdata(file);
- struct qcam_device *qcam=(struct qcam_device *)dev;
+ struct qcam_device *qcam = (struct qcam_device *)dev;
- switch(cmd)
+ switch (cmd) {
+ case VIDIOCGCAP:
{
- case VIDIOCGCAP:
- {
- struct video_capability *b = arg;
- strcpy(b->name, "Quickcam");
- b->type = VID_TYPE_CAPTURE|VID_TYPE_SCALES;
- b->channels = 1;
- b->audios = 0;
- b->maxwidth = 320;
- b->maxheight = 240;
- b->minwidth = 80;
- b->minheight = 60;
- return 0;
- }
- case VIDIOCGCHAN:
- {
- struct video_channel *v = arg;
- if(v->channel!=0)
- return -EINVAL;
- v->flags=0;
- v->tuners=0;
- /* Good question.. its composite or SVHS so.. */
- v->type = VIDEO_TYPE_CAMERA;
- strcpy(v->name, "Camera");
- return 0;
- }
- case VIDIOCSCHAN:
- {
- struct video_channel *v = arg;
- if(v->channel!=0)
- return -EINVAL;
- return 0;
- }
- case VIDIOCGTUNER:
- {
- struct video_tuner *v = arg;
- if(v->tuner)
- return -EINVAL;
- memset(v,0,sizeof(*v));
- strcpy(v->name, "Format");
- v->mode = VIDEO_MODE_AUTO;
- return 0;
- }
- case VIDIOCSTUNER:
- {
- struct video_tuner *v = arg;
- if(v->tuner)
- return -EINVAL;
- if(v->mode!=VIDEO_MODE_AUTO)
- return -EINVAL;
- return 0;
- }
- case VIDIOCGPICT:
- {
- struct video_picture *p = arg;
- p->colour=0x8000;
- p->hue=0x8000;
- p->brightness=qcam->brightness<<8;
- p->contrast=qcam->contrast<<8;
- p->whiteness=qcam->whitebal<<8;
- p->depth=24;
- p->palette=VIDEO_PALETTE_RGB24;
- return 0;
+ struct video_capability *b = arg;
+
+ strcpy(b->name, "Quickcam");
+ b->type = VID_TYPE_CAPTURE | VID_TYPE_SCALES;
+ b->channels = 1;
+ b->audios = 0;
+ b->maxwidth = 320;
+ b->maxheight = 240;
+ b->minwidth = 80;
+ b->minheight = 60;
+ return 0;
+ }
+ case VIDIOCGCHAN:
+ {
+ struct video_channel *v = arg;
+
+ if (v->channel != 0)
+ return -EINVAL;
+ v->flags = 0;
+ v->tuners = 0;
+ /* Good question.. its composite or SVHS so.. */
+ v->type = VIDEO_TYPE_CAMERA;
+ strcpy(v->name, "Camera");
+ return 0;
+ }
+ case VIDIOCSCHAN:
+ {
+ struct video_channel *v = arg;
+
+ if (v->channel != 0)
+ return -EINVAL;
+ return 0;
+ }
+ case VIDIOCGTUNER:
+ {
+ struct video_tuner *v = arg;
+
+ if (v->tuner)
+ return -EINVAL;
+ memset(v, 0, sizeof(*v));
+ strcpy(v->name, "Format");
+ v->mode = VIDEO_MODE_AUTO;
+ return 0;
+ }
+ case VIDIOCSTUNER:
+ {
+ struct video_tuner *v = arg;
+
+ if (v->tuner)
+ return -EINVAL;
+ if (v->mode != VIDEO_MODE_AUTO)
+ return -EINVAL;
+ return 0;
+ }
+ case VIDIOCGPICT:
+ {
+ struct video_picture *p = arg;
+
+ p->colour = 0x8000;
+ p->hue = 0x8000;
+ p->brightness = qcam->brightness << 8;
+ p->contrast = qcam->contrast << 8;
+ p->whiteness = qcam->whitebal << 8;
+ p->depth = 24;
+ p->palette = VIDEO_PALETTE_RGB24;
+ return 0;
+ }
+ case VIDIOCSPICT:
+ {
+ struct video_picture *p = arg;
+
+ /*
+ * Sanity check args
+ */
+ if (p->depth != 24 || p->palette != VIDEO_PALETTE_RGB24)
+ return -EINVAL;
+
+ /*
+ * Now load the camera.
+ */
+ qcam->brightness = p->brightness >> 8;
+ qcam->contrast = p->contrast >> 8;
+ qcam->whitebal = p->whiteness >> 8;
+
+ mutex_lock(&qcam->lock);
+ parport_claim_or_block(qcam->pdev);
+ qc_setup(qcam);
+ parport_release(qcam->pdev);
+ mutex_unlock(&qcam->lock);
+ return 0;
+ }
+ case VIDIOCSWIN:
+ {
+ struct video_window *vw = arg;
+
+ if (vw->flags)
+ return -EINVAL;
+ if (vw->clipcount)
+ return -EINVAL;
+ if (vw->height < 60 || vw->height > 240)
+ return -EINVAL;
+ if (vw->width < 80 || vw->width > 320)
+ return -EINVAL;
+
+ qcam->width = 80;
+ qcam->height = 60;
+ qcam->mode = QC_DECIMATION_4;
+
+ if (vw->width >= 160 && vw->height >= 120) {
+ qcam->width = 160;
+ qcam->height = 120;
+ qcam->mode = QC_DECIMATION_2;
}
- case VIDIOCSPICT:
- {
- struct video_picture *p = arg;
-
- /*
- * Sanity check args
- */
- if (p->depth != 24 || p->palette != VIDEO_PALETTE_RGB24)
- return -EINVAL;
-
- /*
- * Now load the camera.
- */
- qcam->brightness = p->brightness>>8;
- qcam->contrast = p->contrast>>8;
- qcam->whitebal = p->whiteness>>8;
-
- mutex_lock(&qcam->lock);
- parport_claim_or_block(qcam->pdev);
- qc_setup(qcam);
- parport_release(qcam->pdev);
- mutex_unlock(&qcam->lock);
- return 0;
+ if (vw->width >= 320 && vw->height >= 240) {
+ qcam->width = 320;
+ qcam->height = 240;
+ qcam->mode = QC_DECIMATION_1;
}
- case VIDIOCSWIN:
- {
- struct video_window *vw = arg;
-
- if(vw->flags)
- return -EINVAL;
- if(vw->clipcount)
- return -EINVAL;
- if(vw->height<60||vw->height>240)
- return -EINVAL;
- if(vw->width<80||vw->width>320)
- return -EINVAL;
-
- qcam->width = 80;
- qcam->height = 60;
- qcam->mode = QC_DECIMATION_4;
-
- if(vw->width>=160 && vw->height>=120)
- {
- qcam->width = 160;
- qcam->height = 120;
- qcam->mode = QC_DECIMATION_2;
- }
- if(vw->width>=320 && vw->height>=240)
- {
- qcam->width = 320;
- qcam->height = 240;
- qcam->mode = QC_DECIMATION_1;
- }
- qcam->mode |= QC_MILLIONS;
+ qcam->mode |= QC_MILLIONS;
#if 0
- if(vw->width>=640 && vw->height>=480)
- {
- qcam->width = 640;
- qcam->height = 480;
- qcam->mode = QC_BILLIONS | QC_DECIMATION_1;
- }
-#endif
- /* Ok we figured out what to use from our
- wide choice */
- mutex_lock(&qcam->lock);
- parport_claim_or_block(qcam->pdev);
- qc_setup(qcam);
- parport_release(qcam->pdev);
- mutex_unlock(&qcam->lock);
- return 0;
- }
- case VIDIOCGWIN:
- {
- struct video_window *vw = arg;
- memset(vw, 0, sizeof(*vw));
- vw->width=qcam->width;
- vw->height=qcam->height;
- return 0;
+ if (vw->width >= 640 && vw->height >= 480) {
+ qcam->width = 640;
+ qcam->height = 480;
+ qcam->mode = QC_BILLIONS | QC_DECIMATION_1;
}
- case VIDIOCKEY:
- return 0;
- case VIDIOCCAPTURE:
- case VIDIOCGFBUF:
- case VIDIOCSFBUF:
- case VIDIOCGFREQ:
- case VIDIOCSFREQ:
- case VIDIOCGAUDIO:
- case VIDIOCSAUDIO:
- return -EINVAL;
- default:
- return -ENOIOCTLCMD;
+#endif
+ /* Ok we figured out what to use from our
+ wide choice */
+ mutex_lock(&qcam->lock);
+ parport_claim_or_block(qcam->pdev);
+ qc_setup(qcam);
+ parport_release(qcam->pdev);
+ mutex_unlock(&qcam->lock);
+ return 0;
+ }
+ case VIDIOCGWIN:
+ {
+ struct video_window *vw = arg;
+ memset(vw, 0, sizeof(*vw));
+ vw->width = qcam->width;
+ vw->height = qcam->height;
+ return 0;
+ }
+ case VIDIOCKEY:
+ return 0;
+ case VIDIOCCAPTURE:
+ case VIDIOCGFBUF:
+ case VIDIOCSFBUF:
+ case VIDIOCGFREQ:
+ case VIDIOCSFREQ:
+ case VIDIOCGAUDIO:
+ case VIDIOCSAUDIO:
+ return -EINVAL;
+ default:
+ return -ENOIOCTLCMD;
}
return 0;
}
@@ -675,13 +670,13 @@ static ssize_t qcam_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct video_device *v = video_devdata(file);
- struct qcam_device *qcam=(struct qcam_device *)v;
+ struct qcam_device *qcam = (struct qcam_device *)v;
int len;
mutex_lock(&qcam->lock);
parport_claim_or_block(qcam->pdev);
/* Probably should have a semaphore against multiple users */
- len = qc_capture(qcam, buf,count);
+ len = qc_capture(qcam, buf, count);
parport_release(qcam->pdev);
mutex_unlock(&qcam->lock);
return len;
@@ -713,8 +708,7 @@ static const struct v4l2_file_operations qcam_fops = {
.read = qcam_read,
};
-static struct video_device qcam_template=
-{
+static struct video_device qcam_template = {
.name = "Colour QuickCam",
.fops = &qcam_fops,
.release = video_device_release_empty,
@@ -727,17 +721,16 @@ static struct qcam_device *qcam_init(struct parport *port)
struct qcam_device *q;
q = kmalloc(sizeof(struct qcam_device), GFP_KERNEL);
- if(q==NULL)
+ if (q == NULL)
return NULL;
q->pport = port;
q->pdev = parport_register_device(port, "c-qcam", NULL, NULL,
NULL, 0, NULL);
- q->bidirectional = (q->pport->modes & PARPORT_MODE_TRISTATE)?1:0;
+ q->bidirectional = (q->pport->modes & PARPORT_MODE_TRISTATE) ? 1 : 0;
- if (q->pdev == NULL)
- {
+ if (q->pdev == NULL) {
printk(KERN_ERR "c-qcam: couldn't register for %s.\n",
port->name);
kfree(q);
@@ -765,12 +758,11 @@ static int init_cqcam(struct parport *port)
{
struct qcam_device *qcam;
- if (parport[0] != -1)
- {
+ if (parport[0] != -1) {
/* The user gave specific instructions */
int i, found = 0;
- for (i = 0; i < MAX_CAMS && parport[i] != -1; i++)
- {
+
+ for (i = 0; i < MAX_CAMS && parport[i] != -1; i++) {
if (parport[0] == port->number)
found = 1;
}
@@ -782,15 +774,14 @@ static int init_cqcam(struct parport *port)
return -ENOSPC;
qcam = qcam_init(port);
- if (qcam==NULL)
+ if (qcam == NULL)
return -ENODEV;
parport_claim_or_block(qcam->pdev);
qc_reset(qcam);
- if (probe && qc_detect(qcam)==0)
- {
+ if (probe && qc_detect(qcam) == 0) {
parport_release(qcam->pdev);
parport_unregister_device(qcam->pdev);
kfree(qcam);
@@ -840,14 +831,14 @@ static struct parport_driver cqcam_driver = {
.detach = cq_detach,
};
-static int __init cqcam_init (void)
+static int __init cqcam_init(void)
{
printk(BANNER "\n");
return parport_register_driver(&cqcam_driver);
}
-static void __exit cqcam_cleanup (void)
+static void __exit cqcam_cleanup(void)
{
unsigned int i;
@@ -862,9 +853,9 @@ MODULE_DESCRIPTION(BANNER);
MODULE_LICENSE("GPL");
/* FIXME: parport=auto would never have worked, surely? --RR */
-MODULE_PARM_DESC(parport ,"parport=<auto|n[,n]...> for port detection method\n\
-probe=<0|1|2> for camera detection method\n\
-force_rgb=<0|1> for RGB data format (default BGR)");
+MODULE_PARM_DESC(parport, "parport=<auto|n[,n]...> for port detection method\n"
+ "probe=<0|1|2> for camera detection method\n"
+ "force_rgb=<0|1> for RGB data format (default BGR)");
module_param_array(parport, int, NULL, 0);
module_param(probe, int, 0);
module_param(force_rgb, bool, 0);
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 6f91415eb7b..5520789854d 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -324,7 +324,7 @@ static int cpia2_close(struct file *file)
{
if(fh->mmapped)
cam->mmapped = 0;
- v4l2_prio_close(&cam->prio,&fh->prio);
+ v4l2_prio_close(&cam->prio, fh->prio);
file->private_data = NULL;
kfree(fh);
}
@@ -1592,7 +1592,7 @@ static long cpia2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_S_FMT:
{
struct cpia2_fh *fh = file->private_data;
- retval = v4l2_prio_check(&cam->prio, &fh->prio);
+ retval = v4l2_prio_check(&cam->prio, fh->prio);
if(retval) {
mutex_unlock(&cam->busy_lock);
return retval;
diff --git a/drivers/media/video/cx18/cx18-av-core.c b/drivers/media/video/cx18/cx18-av-core.c
index 4392c76af5d..0e5006b1427 100644
--- a/drivers/media/video/cx18/cx18-av-core.c
+++ b/drivers/media/video/cx18/cx18-av-core.c
@@ -579,6 +579,7 @@ static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input,
u8 afe_mux_cfg;
u8 adc2_cfg;
+ u8 input_mode;
u32 afe_cfg;
int i;
@@ -589,6 +590,30 @@ static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input,
vid_input <= CX18_AV_COMPOSITE8) {
afe_mux_cfg = 0xf0 + (vid_input - CX18_AV_COMPOSITE1);
ch[0] = CVBS;
+ input_mode = 0x0;
+ } else if (vid_input >= CX18_AV_COMPONENT_LUMA1) {
+ int luma = vid_input & 0xf000;
+ int r_chroma = vid_input & 0xf0000;
+ int b_chroma = vid_input & 0xf00000;
+
+ if ((vid_input & ~0xfff000) ||
+ luma < CX18_AV_COMPONENT_LUMA1 ||
+ luma > CX18_AV_COMPONENT_LUMA8 ||
+ r_chroma < CX18_AV_COMPONENT_R_CHROMA4 ||
+ r_chroma > CX18_AV_COMPONENT_R_CHROMA6 ||
+ b_chroma < CX18_AV_COMPONENT_B_CHROMA7 ||
+ b_chroma > CX18_AV_COMPONENT_B_CHROMA8) {
+ CX18_ERR_DEV(sd, "0x%06x is not a valid video input!\n",
+ vid_input);
+ return -EINVAL;
+ }
+ afe_mux_cfg = (luma - CX18_AV_COMPONENT_LUMA1) >> 12;
+ ch[0] = Y;
+ afe_mux_cfg |= (r_chroma - CX18_AV_COMPONENT_R_CHROMA4) >> 12;
+ ch[1] = Pr;
+ afe_mux_cfg |= (b_chroma - CX18_AV_COMPONENT_B_CHROMA7) >> 14;
+ ch[2] = Pb;
+ input_mode = 0x6;
} else {
int luma = vid_input & 0xf0;
int chroma = vid_input & 0xf00;
@@ -598,7 +623,7 @@ static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input,
luma > CX18_AV_SVIDEO_LUMA8 ||
chroma < CX18_AV_SVIDEO_CHROMA4 ||
chroma > CX18_AV_SVIDEO_CHROMA8) {
- CX18_ERR_DEV(sd, "0x%04x is not a valid video input!\n",
+ CX18_ERR_DEV(sd, "0x%06x is not a valid video input!\n",
vid_input);
return -EINVAL;
}
@@ -613,8 +638,8 @@ static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input,
afe_mux_cfg |= (chroma - CX18_AV_SVIDEO_CHROMA4) >> 4;
ch[1] = C;
}
+ input_mode = 0x2;
}
- /* TODO: LeadTek WinFast DVR3100 H & WinFast PVR2100 can do Y/Pb/Pr */
switch (aud_input) {
case CX18_AV_AUDIO_SERIAL1:
@@ -650,8 +675,8 @@ static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input,
/* Set up analog front end multiplexers */
cx18_av_write_expect(cx, 0x103, afe_mux_cfg, afe_mux_cfg, 0xf7);
- /* Set INPUT_MODE to Composite (0) or S-Video (1) */
- cx18_av_and_or(cx, 0x401, ~0x6, ch[0] == CVBS ? 0 : 0x02);
+ /* Set INPUT_MODE to Composite, S-Video, or Component */
+ cx18_av_and_or(cx, 0x401, ~0x6, input_mode);
/* Set CH_SEL_ADC2 to 1 if input comes from CH3 */
adc2_cfg = cx18_av_read(cx, 0x102);
@@ -998,9 +1023,9 @@ static int cx18_av_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
static int cx18_av_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
{
- struct cx18 *cx = v4l2_get_subdevdata(sd);
-
- return cx18_av_vbi_g_fmt(cx, fmt);
+ if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
+ return -EINVAL;
+ return cx18_av_g_sliced_fmt(sd, &fmt->fmt.sliced);
}
static int cx18_av_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
@@ -1073,12 +1098,6 @@ static int cx18_av_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
cx18_av_write(cx, 0x41e, 0x8 | filter);
break;
- case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
- return cx18_av_vbi_s_fmt(cx, fmt);
-
- case V4L2_BUF_TYPE_VBI_CAPTURE:
- return cx18_av_vbi_s_fmt(cx, fmt);
-
default:
return -EINVAL;
}
@@ -1378,17 +1397,24 @@ static const struct v4l2_subdev_audio_ops cx18_av_audio_ops = {
static const struct v4l2_subdev_video_ops cx18_av_video_ops = {
.s_routing = cx18_av_s_video_routing,
- .decode_vbi_line = cx18_av_decode_vbi_line,
.s_stream = cx18_av_s_stream,
.g_fmt = cx18_av_g_fmt,
.s_fmt = cx18_av_s_fmt,
};
+static const struct v4l2_subdev_vbi_ops cx18_av_vbi_ops = {
+ .decode_vbi_line = cx18_av_decode_vbi_line,
+ .g_sliced_fmt = cx18_av_g_sliced_fmt,
+ .s_sliced_fmt = cx18_av_s_sliced_fmt,
+ .s_raw_fmt = cx18_av_s_raw_fmt,
+};
+
static const struct v4l2_subdev_ops cx18_av_ops = {
.core = &cx18_av_general_ops,
.tuner = &cx18_av_tuner_ops,
.audio = &cx18_av_audio_ops,
.video = &cx18_av_video_ops,
+ .vbi = &cx18_av_vbi_ops,
};
int cx18_av_probe(struct cx18 *cx)
diff --git a/drivers/media/video/cx18/cx18-av-core.h b/drivers/media/video/cx18/cx18-av-core.h
index cafb7e99b9a..c106967bdcc 100644
--- a/drivers/media/video/cx18/cx18-av-core.h
+++ b/drivers/media/video/cx18/cx18-av-core.h
@@ -61,6 +61,25 @@ enum cx18_av_video_input {
CX18_AV_SVIDEO2 = 0x620,
CX18_AV_SVIDEO3 = 0x730,
CX18_AV_SVIDEO4 = 0x840,
+
+ /* Component Video inputs consist of one luma input (In1-In8) ORed
+ with a red chroma (In4-In6) and blue chroma input (In7-In8) */
+ CX18_AV_COMPONENT_LUMA1 = 0x1000,
+ CX18_AV_COMPONENT_LUMA2 = 0x2000,
+ CX18_AV_COMPONENT_LUMA3 = 0x3000,
+ CX18_AV_COMPONENT_LUMA4 = 0x4000,
+ CX18_AV_COMPONENT_LUMA5 = 0x5000,
+ CX18_AV_COMPONENT_LUMA6 = 0x6000,
+ CX18_AV_COMPONENT_LUMA7 = 0x7000,
+ CX18_AV_COMPONENT_LUMA8 = 0x8000,
+ CX18_AV_COMPONENT_R_CHROMA4 = 0x40000,
+ CX18_AV_COMPONENT_R_CHROMA5 = 0x50000,
+ CX18_AV_COMPONENT_R_CHROMA6 = 0x60000,
+ CX18_AV_COMPONENT_B_CHROMA7 = 0x700000,
+ CX18_AV_COMPONENT_B_CHROMA8 = 0x800000,
+
+ /* Component Video aliases for common combinations */
+ CX18_AV_COMPONENT1 = 0x861000,
};
enum cx18_av_audio_input {
@@ -359,7 +378,8 @@ void cx18_av_audio_set_path(struct cx18 *cx);
/* cx18_av-vbi.c */
int cx18_av_decode_vbi_line(struct v4l2_subdev *sd,
struct v4l2_decode_vbi_line *vbi);
-int cx18_av_vbi_g_fmt(struct cx18 *cx, struct v4l2_format *fmt);
-int cx18_av_vbi_s_fmt(struct cx18 *cx, struct v4l2_format *fmt);
+int cx18_av_s_raw_fmt(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt);
+int cx18_av_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt);
+int cx18_av_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt);
#endif
diff --git a/drivers/media/video/cx18/cx18-av-vbi.c b/drivers/media/video/cx18/cx18-av-vbi.c
index a51732bcca4..baa36fbcd4d 100644
--- a/drivers/media/video/cx18/cx18-av-vbi.c
+++ b/drivers/media/video/cx18/cx18-av-vbi.c
@@ -129,10 +129,10 @@ static int decode_vps(u8 *dst, u8 *p)
return err & 0xf0;
}
-int cx18_av_vbi_g_fmt(struct cx18 *cx, struct v4l2_format *fmt)
+int cx18_av_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *svbi)
{
+ struct cx18 *cx = v4l2_get_subdevdata(sd);
struct cx18_av_state *state = &cx->av_state;
- struct v4l2_sliced_vbi_format *svbi;
static const u16 lcr2vbi[] = {
0, V4L2_SLICED_TELETEXT_B, 0, /* 1 */
0, V4L2_SLICED_WSS_625, 0, /* 4 */
@@ -143,9 +143,6 @@ int cx18_av_vbi_g_fmt(struct cx18 *cx, struct v4l2_format *fmt)
int is_pal = !(state->std & V4L2_STD_525_60);
int i;
- if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
- return -EINVAL;
- svbi = &fmt->fmt.sliced;
memset(svbi, 0, sizeof(*svbi));
/* we're done if raw VBI is active */
if ((cx18_av_read(cx, 0x404) & 0x10) == 0)
@@ -173,30 +170,27 @@ int cx18_av_vbi_g_fmt(struct cx18 *cx, struct v4l2_format *fmt)
return 0;
}
-int cx18_av_vbi_s_fmt(struct cx18 *cx, struct v4l2_format *fmt)
+int cx18_av_s_raw_fmt(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt)
{
+ struct cx18 *cx = v4l2_get_subdevdata(sd);
struct cx18_av_state *state = &cx->av_state;
- struct v4l2_sliced_vbi_format *svbi;
- int is_pal = !(state->std & V4L2_STD_525_60);
- int i, x;
- u8 lcr[24];
- if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE &&
- fmt->type != V4L2_BUF_TYPE_VBI_CAPTURE)
- return -EINVAL;
- svbi = &fmt->fmt.sliced;
- if (fmt->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- /* raw VBI */
- memset(svbi, 0, sizeof(*svbi));
+ /* Setup standard */
+ cx18_av_std_setup(cx);
- /* Setup standard */
- cx18_av_std_setup(cx);
+ /* VBI Offset */
+ cx18_av_write(cx, 0x47f, state->slicer_line_delay);
+ cx18_av_write(cx, 0x404, 0x2e);
+ return 0;
+}
- /* VBI Offset */
- cx18_av_write(cx, 0x47f, state->slicer_line_delay);
- cx18_av_write(cx, 0x404, 0x2e);
- return 0;
- }
+int cx18_av_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *svbi)
+{
+ struct cx18 *cx = v4l2_get_subdevdata(sd);
+ struct cx18_av_state *state = &cx->av_state;
+ int is_pal = !(state->std & V4L2_STD_525_60);
+ int i, x;
+ u8 lcr[24];
for (x = 0; x <= 23; x++)
lcr[x] = 0x00;
diff --git a/drivers/media/video/cx18/cx18-cards.c b/drivers/media/video/cx18/cx18-cards.c
index f808fb6fc1c..74e122b5fc4 100644
--- a/drivers/media/video/cx18/cx18-cards.c
+++ b/drivers/media/video/cx18/cx18-cards.c
@@ -370,6 +370,7 @@ static const struct cx18_card cx18_card_leadtek_pvr2100 = {
{ CX18_CARD_INPUT_SVIDEO1, 1,
CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 },
{ CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE7 },
+ { CX18_CARD_INPUT_COMPONENT1, 1, CX18_AV_COMPONENT1 },
},
.audio_inputs = {
{ CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 },
@@ -422,6 +423,7 @@ static const struct cx18_card cx18_card_leadtek_dvr3100h = {
{ CX18_CARD_INPUT_SVIDEO1, 1,
CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 },
{ CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE7 },
+ { CX18_CARD_INPUT_COMPONENT1, 1, CX18_AV_COMPONENT1 },
},
.audio_inputs = {
{ CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 },
@@ -480,7 +482,7 @@ int cx18_get_input(struct cx18 *cx, u16 index, struct v4l2_input *input)
"S-Video 2",
"Composite 1",
"Composite 2",
- "Composite 3"
+ "Component 1"
};
memset(input, 0, sizeof(*input));
diff --git a/drivers/media/video/cx18/cx18-cards.h b/drivers/media/video/cx18/cx18-cards.h
index af3d71607dc..796e517300a 100644
--- a/drivers/media/video/cx18/cx18-cards.h
+++ b/drivers/media/video/cx18/cx18-cards.h
@@ -43,7 +43,7 @@
#define CX18_CARD_INPUT_SVIDEO2 3
#define CX18_CARD_INPUT_COMPOSITE1 4
#define CX18_CARD_INPUT_COMPOSITE2 5
-#define CX18_CARD_INPUT_COMPOSITE3 6
+#define CX18_CARD_INPUT_COMPONENT1 6
/* audio inputs */
#define CX18_CARD_INPUT_AUD_TUNER 1
@@ -62,7 +62,7 @@
struct cx18_card_video_input {
u8 video_type; /* video input type */
u8 audio_index; /* index in cx18_card_audio_input array */
- u16 video_input; /* hardware video input */
+ u32 video_input; /* hardware video input */
};
struct cx18_card_audio_input {
diff --git a/drivers/media/video/cx18/cx18-controls.c b/drivers/media/video/cx18/cx18-controls.c
index 7fa589240ff..4b4b46544d5 100644
--- a/drivers/media/video/cx18/cx18-controls.c
+++ b/drivers/media/video/cx18/cx18-controls.c
@@ -263,7 +263,7 @@ int cx18_s_ext_ctrls(struct file *file, void *fh, struct v4l2_ext_controls *c)
int ret;
struct v4l2_control ctrl;
- ret = v4l2_prio_check(&cx->prio, &id->prio);
+ ret = v4l2_prio_check(&cx->prio, id->prio);
if (ret)
return ret;
diff --git a/drivers/media/video/cx18/cx18-fileops.c b/drivers/media/video/cx18/cx18-fileops.c
index 863ce775823..e12a15020cd 100644
--- a/drivers/media/video/cx18/cx18-fileops.c
+++ b/drivers/media/video/cx18/cx18-fileops.c
@@ -700,7 +700,7 @@ int cx18_v4l2_close(struct file *filp)
CX18_DEBUG_IOCTL("close() of %s\n", s->name);
- v4l2_prio_close(&cx->prio, &id->prio);
+ v4l2_prio_close(&cx->prio, id->prio);
/* Easy case first: this stream was never claimed by us */
if (s->id != id->open_id) {
diff --git a/drivers/media/video/cx18/cx18-i2c.c b/drivers/media/video/cx18/cx18-i2c.c
index eecf29af916..cfa1f289b0f 100644
--- a/drivers/media/video/cx18/cx18-i2c.c
+++ b/drivers/media/video/cx18/cx18-i2c.c
@@ -109,7 +109,7 @@ static int cx18_i2c_new_ir(struct cx18 *cx, struct i2c_adapter *adap, u32 hw,
/* Our default information for ir-kbd-i2c.c to use */
switch (hw) {
case CX18_HW_Z8F0811_IR_RX_HAUP:
- init_data->ir_codes = &ir_codes_hauppauge_new_table;
+ init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
init_data->type = IR_TYPE_RC5;
init_data->name = cx->card_name;
diff --git a/drivers/media/video/cx18/cx18-ioctl.c b/drivers/media/video/cx18/cx18-ioctl.c
index b81dd0ea8eb..2530fc54daa 100644
--- a/drivers/media/video/cx18/cx18-ioctl.c
+++ b/drivers/media/video/cx18/cx18-ioctl.c
@@ -208,7 +208,7 @@ static int cx18_g_fmt_sliced_vbi_cap(struct file *file, void *fh,
* digitizer/slicer. Note, cx18_av_vbi() wipes the passed in
* fmt->fmt.sliced under valid calling conditions
*/
- if (v4l2_subdev_call(cx->sd_av, video, g_fmt, fmt))
+ if (v4l2_subdev_call(cx->sd_av, vbi, g_sliced_fmt, &fmt->fmt.sliced))
return -EINVAL;
/* Ensure V4L2 spec compliant output */
@@ -277,7 +277,7 @@ static int cx18_s_fmt_vid_cap(struct file *file, void *fh,
int ret;
int w, h;
- ret = v4l2_prio_check(&cx->prio, &id->prio);
+ ret = v4l2_prio_check(&cx->prio, id->prio);
if (ret)
return ret;
@@ -306,7 +306,7 @@ static int cx18_s_fmt_vbi_cap(struct file *file, void *fh,
struct cx18 *cx = id->cx;
int ret;
- ret = v4l2_prio_check(&cx->prio, &id->prio);
+ ret = v4l2_prio_check(&cx->prio, id->prio);
if (ret)
return ret;
@@ -322,7 +322,7 @@ static int cx18_s_fmt_vbi_cap(struct file *file, void *fh,
* Note cx18_av_vbi_wipes out alot of the passed in fmt under valid
* calling conditions
*/
- ret = v4l2_subdev_call(cx->sd_av, video, s_fmt, fmt);
+ ret = v4l2_subdev_call(cx->sd_av, vbi, s_raw_fmt, &fmt->fmt.vbi);
if (ret)
return ret;
@@ -341,7 +341,7 @@ static int cx18_s_fmt_sliced_vbi_cap(struct file *file, void *fh,
int ret;
struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
- ret = v4l2_prio_check(&cx->prio, &id->prio);
+ ret = v4l2_prio_check(&cx->prio, id->prio);
if (ret)
return ret;
@@ -359,7 +359,7 @@ static int cx18_s_fmt_sliced_vbi_cap(struct file *file, void *fh,
* Note, cx18_av_vbi() wipes some "impossible" service lines in the
* passed in fmt->fmt.sliced under valid calling conditions
*/
- ret = v4l2_subdev_call(cx->sd_av, video, s_fmt, fmt);
+ ret = v4l2_subdev_call(cx->sd_av, vbi, s_sliced_fmt, &fmt->fmt.sliced);
if (ret)
return ret;
/* Store our current v4l2 sliced VBI settings */
@@ -549,7 +549,7 @@ static int cx18_s_crop(struct file *file, void *fh, struct v4l2_crop *crop)
struct cx18 *cx = id->cx;
int ret;
- ret = v4l2_prio_check(&cx->prio, &id->prio);
+ ret = v4l2_prio_check(&cx->prio, id->prio);
if (ret)
return ret;
@@ -601,7 +601,7 @@ int cx18_s_input(struct file *file, void *fh, unsigned int inp)
struct cx18 *cx = id->cx;
int ret;
- ret = v4l2_prio_check(&cx->prio, &id->prio);
+ ret = v4l2_prio_check(&cx->prio, id->prio);
if (ret)
return ret;
@@ -647,7 +647,7 @@ int cx18_s_frequency(struct file *file, void *fh, struct v4l2_frequency *vf)
struct cx18 *cx = id->cx;
int ret;
- ret = v4l2_prio_check(&cx->prio, &id->prio);
+ ret = v4l2_prio_check(&cx->prio, id->prio);
if (ret)
return ret;
@@ -675,7 +675,7 @@ int cx18_s_std(struct file *file, void *fh, v4l2_std_id *std)
struct cx18 *cx = id->cx;
int ret;
- ret = v4l2_prio_check(&cx->prio, &id->prio);
+ ret = v4l2_prio_check(&cx->prio, id->prio);
if (ret)
return ret;
@@ -715,7 +715,7 @@ static int cx18_s_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
struct cx18 *cx = id->cx;
int ret;
- ret = v4l2_prio_check(&cx->prio, &id->prio);
+ ret = v4l2_prio_check(&cx->prio, id->prio);
if (ret)
return ret;
diff --git a/drivers/media/video/cx18/cx18-streams.c b/drivers/media/video/cx18/cx18-streams.c
index 054450f65a6..f5c91261b2d 100644
--- a/drivers/media/video/cx18/cx18-streams.c
+++ b/drivers/media/video/cx18/cx18-streams.c
@@ -374,7 +374,10 @@ static void cx18_vbi_setup(struct cx18_stream *s)
}
/* setup VBI registers */
- v4l2_subdev_call(cx->sd_av, video, s_fmt, &cx->vbi.in);
+ if (raw)
+ v4l2_subdev_call(cx->sd_av, vbi, s_raw_fmt, &cx->vbi.in.fmt.vbi);
+ else
+ v4l2_subdev_call(cx->sd_av, vbi, s_sliced_fmt, &cx->vbi.in.fmt.sliced);
/*
* Send the CX18_CPU_SET_RAW_VBI_PARAM API command to setup Encoder Raw
diff --git a/drivers/media/video/cx18/cx18-vbi.c b/drivers/media/video/cx18/cx18-vbi.c
index 574c1c6974f..582227522cf 100644
--- a/drivers/media/video/cx18/cx18-vbi.c
+++ b/drivers/media/video/cx18/cx18-vbi.c
@@ -174,7 +174,7 @@ static u32 compress_sliced_buf(struct cx18 *cx, u8 *buf, u32 size,
p[3] != sliced_vbi_eav_rp[1]))
continue;
vbi.p = p + 4;
- v4l2_subdev_call(cx->sd_av, video, decode_vbi_line, &vbi);
+ v4l2_subdev_call(cx->sd_av, vbi, decode_vbi_line, &vbi);
if (vbi.type) {
cx->vbi.sliced_data[line].id = vbi.type;
cx->vbi.sliced_data[line].field = vbi.is_second_field;
diff --git a/drivers/media/video/cx231xx/cx231xx-audio.c b/drivers/media/video/cx231xx/cx231xx-audio.c
index 7793d60966d..7cae95a2245 100644
--- a/drivers/media/video/cx231xx/cx231xx-audio.c
+++ b/drivers/media/video/cx231xx/cx231xx-audio.c
@@ -495,7 +495,7 @@ static int cx231xx_audio_init(struct cx231xx *dev)
pcm->info_flags = 0;
pcm->private_data = dev;
strcpy(pcm->name, "Conexant cx231xx Capture");
- strcpy(card->driver, "Conexant cx231xx Audio");
+ strcpy(card->driver, "Cx231xx-Audio");
strcpy(card->shortname, "Cx231xx Audio");
strcpy(card->longname, "Conexant cx231xx Audio");
diff --git a/drivers/media/video/cx231xx/cx231xx-core.c b/drivers/media/video/cx231xx/cx231xx-core.c
index b24eee115e7..912a4d74020 100644
--- a/drivers/media/video/cx231xx/cx231xx-core.c
+++ b/drivers/media/video/cx231xx/cx231xx-core.c
@@ -96,10 +96,9 @@ int cx231xx_register_extension(struct cx231xx_ops *ops)
mutex_lock(&cx231xx_devlist_mutex);
mutex_lock(&cx231xx_extension_devlist_lock);
list_add_tail(&ops->next, &cx231xx_extension_devlist);
- list_for_each_entry(dev, &cx231xx_devlist, devlist) {
- if (dev)
- ops->init(dev);
- }
+ list_for_each_entry(dev, &cx231xx_devlist, devlist)
+ ops->init(dev);
+
printk(KERN_INFO DRIVER_NAME ": %s initialized\n", ops->name);
mutex_unlock(&cx231xx_extension_devlist_lock);
mutex_unlock(&cx231xx_devlist_mutex);
@@ -112,10 +111,8 @@ void cx231xx_unregister_extension(struct cx231xx_ops *ops)
struct cx231xx *dev = NULL;
mutex_lock(&cx231xx_devlist_mutex);
- list_for_each_entry(dev, &cx231xx_devlist, devlist) {
- if (dev)
- ops->fini(dev);
- }
+ list_for_each_entry(dev, &cx231xx_devlist, devlist)
+ ops->fini(dev);
mutex_lock(&cx231xx_extension_devlist_lock);
printk(KERN_INFO DRIVER_NAME ": %s removed\n", ops->name);
@@ -679,11 +676,11 @@ void cx231xx_uninit_isoc(struct cx231xx *dev)
usb_unlink_urb(urb);
if (dev->video_mode.isoc_ctl.transfer_buffer[i]) {
- usb_buffer_free(dev->udev,
- urb->transfer_buffer_length,
- dev->video_mode.isoc_ctl.
- transfer_buffer[i],
- urb->transfer_dma);
+ usb_free_coherent(dev->udev,
+ urb->transfer_buffer_length,
+ dev->video_mode.isoc_ctl.
+ transfer_buffer[i],
+ urb->transfer_dma);
}
usb_free_urb(urb);
dev->video_mode.isoc_ctl.urb[i] = NULL;
@@ -770,8 +767,8 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
dev->video_mode.isoc_ctl.urb[i] = urb;
dev->video_mode.isoc_ctl.transfer_buffer[i] =
- usb_buffer_alloc(dev->udev, sb_size, GFP_KERNEL,
- &urb->transfer_dma);
+ usb_alloc_coherent(dev->udev, sb_size, GFP_KERNEL,
+ &urb->transfer_dma);
if (!dev->video_mode.isoc_ctl.transfer_buffer[i]) {
cx231xx_err("unable to allocate %i bytes for transfer"
" buffer %i%s\n",
diff --git a/drivers/media/video/cx231xx/cx231xx-input.c b/drivers/media/video/cx231xx/cx231xx-input.c
index b473cd8367f..fd099153b74 100644
--- a/drivers/media/video/cx231xx/cx231xx-input.c
+++ b/drivers/media/video/cx231xx/cx231xx-input.c
@@ -35,6 +35,8 @@ static unsigned int ir_debug;
module_param(ir_debug, int, 0644);
MODULE_PARM_DESC(ir_debug, "enable debug messages [IR]");
+#define MODULE_NAME "cx231xx"
+
#define i2cdprintk(fmt, arg...) \
if (ir_debug) { \
printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg); \
@@ -59,7 +61,6 @@ struct cx231xx_ir_poll_result {
struct cx231xx_IR {
struct cx231xx *dev;
struct input_dev *input;
- struct ir_input_state ir;
char name[32];
char phys[32];
@@ -67,9 +68,7 @@ struct cx231xx_IR {
int polling;
struct work_struct work;
struct timer_list timer;
- unsigned int last_toggle:1;
unsigned int last_readcount;
- unsigned int repeat_interval;
int (*get_key) (struct cx231xx_IR *, struct cx231xx_ir_poll_result *);
};
@@ -81,7 +80,6 @@ struct cx231xx_IR {
static void cx231xx_ir_handle_key(struct cx231xx_IR *ir)
{
int result;
- int do_sendkey = 0;
struct cx231xx_ir_poll_result poll_result;
/* read the registers containing the IR status */
@@ -95,44 +93,23 @@ static void cx231xx_ir_handle_key(struct cx231xx_IR *ir)
poll_result.toggle_bit, poll_result.read_count,
ir->last_readcount, poll_result.rc_data[0]);
- if (ir->dev->chip_id == CHIP_ID_EM2874) {
+ if (poll_result.read_count > 0 &&
+ poll_result.read_count != ir->last_readcount)
+ ir_keydown(ir->input,
+ poll_result.rc_data[0],
+ poll_result.toggle_bit);
+
+ if (ir->dev->chip_id == CHIP_ID_EM2874)
/* The em2874 clears the readcount field every time the
register is read. The em2860/2880 datasheet says that it
is supposed to clear the readcount, but it doesn't. So with
the em2874, we are looking for a non-zero read count as
opposed to a readcount that is incrementing */
ir->last_readcount = 0;
- }
-
- if (poll_result.read_count == 0) {
- /* The button has not been pressed since the last read */
- } else if (ir->last_toggle != poll_result.toggle_bit) {
- /* A button has been pressed */
- dprintk("button has been pressed\n");
- ir->last_toggle = poll_result.toggle_bit;
- ir->repeat_interval = 0;
- do_sendkey = 1;
- } else if (poll_result.toggle_bit == ir->last_toggle &&
- poll_result.read_count > 0 &&
- poll_result.read_count != ir->last_readcount) {
- /* The button is still being held down */
- dprintk("button being held down\n");
-
- /* Debouncer for first keypress */
- if (ir->repeat_interval++ > 9) {
- /* Start repeating after 1 second */
- do_sendkey = 1;
- }
- }
+ else
+ ir->last_readcount = poll_result.read_count;
- if (do_sendkey) {
- dprintk("sending keypress\n");
- ir_input_keydown(ir->input, &ir->ir, poll_result.rc_data[0]);
- ir_input_nokey(ir->input, &ir->ir);
}
-
- ir->last_readcount = poll_result.read_count;
- return;
}
static void ir_timer(unsigned long data)
@@ -198,10 +175,6 @@ int cx231xx_ir_init(struct cx231xx *dev)
usb_make_path(dev->udev, ir->phys, sizeof(ir->phys));
strlcat(ir->phys, "/input0", sizeof(ir->phys));
- err = ir_input_init(input_dev, &ir->ir, IR_TYPE_OTHER);
- if (err < 0)
- goto err_out_free;
-
input_dev->name = ir->name;
input_dev->phys = ir->phys;
input_dev->id.bustype = BUS_USB;
@@ -217,7 +190,8 @@ int cx231xx_ir_init(struct cx231xx *dev)
cx231xx_ir_start(ir);
/* all done */
- err = ir_input_register(ir->input, dev->board.ir_codes, NULL);
+ err = __ir_input_register(ir->input, dev->board.ir_codes,
+ NULL, MODULE_NAME);
if (err)
goto err_out_stop;
diff --git a/drivers/media/video/cx231xx/cx231xx-video.c b/drivers/media/video/cx231xx/cx231xx-video.c
index 16a73eab672..2782709b263 100644
--- a/drivers/media/video/cx231xx/cx231xx-video.c
+++ b/drivers/media/video/cx231xx/cx231xx-video.c
@@ -1669,7 +1669,7 @@ static int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *priv,
f->fmt.sliced.service_set = 0;
- call_all(dev, video, g_fmt, f);
+ call_all(dev, vbi, g_sliced_fmt, &f->fmt.sliced);
if (f->fmt.sliced.service_set == 0)
rc = -EINVAL;
@@ -1690,7 +1690,7 @@ static int vidioc_try_set_sliced_vbi_cap(struct file *file, void *priv,
return rc;
mutex_lock(&dev->lock);
- call_all(dev, video, g_fmt, f);
+ call_all(dev, vbi, g_sliced_fmt, &f->fmt.sliced);
mutex_unlock(&dev->lock);
if (f->fmt.sliced.service_set == 0)
@@ -1902,9 +1902,12 @@ static int radio_queryctrl(struct file *file, void *priv,
if (c->id < V4L2_CID_BASE || c->id >= V4L2_CID_LASTP1)
return -EINVAL;
if (c->id == V4L2_CID_AUDIO_MUTE) {
- for (i = 0; i < CX231XX_CTLS; i++)
+ for (i = 0; i < CX231XX_CTLS; i++) {
if (cx231xx_ctls[i].v.id == c->id)
break;
+ }
+ if (i == CX231XX_CTLS)
+ return -EINVAL;
*c = cx231xx_ctls[i].v;
} else
*c = no_ctl;
diff --git a/drivers/media/video/cx231xx/cx231xx.h b/drivers/media/video/cx231xx/cx231xx.h
index 17d4d1a800c..38d417191a6 100644
--- a/drivers/media/video/cx231xx/cx231xx.h
+++ b/drivers/media/video/cx231xx/cx231xx.h
@@ -32,7 +32,7 @@
#include <media/videobuf-vmalloc.h>
#include <media/v4l2-device.h>
-#include <media/ir-kbd-i2c.h>
+#include <media/ir-core.h>
#if defined(CONFIG_VIDEO_CX231XX_DVB) || \
defined(CONFIG_VIDEO_CX231XX_DVB_MODULE)
#include <media/videobuf-dvb.h>
diff --git a/drivers/media/video/cx2341x.c b/drivers/media/video/cx2341x.c
index 4c8e95853fa..022b480918c 100644
--- a/drivers/media/video/cx2341x.c
+++ b/drivers/media/video/cx2341x.c
@@ -1000,20 +1000,6 @@ int cx2341x_update(void *priv, cx2341x_mbox_func func,
h, w);
if (err) return err;
}
-
- if (new->width != 720 || new->height != (new->is_50hz ? 576 : 480)) {
- /* Adjust temporal filter if necessary. The problem with the
- temporal filter is that it works well with full resolution
- capturing, but not when the capture window is scaled (the
- filter introduces a ghosting effect). So if the capture
- window is scaled, then force the filter to 0.
-
- For full resolution the filter really improves the video
- quality, especially if the original video quality is
- suboptimal. */
- temporal = 0;
- }
-
if (force || NEQ(stream_type)) {
err = cx2341x_api(priv, func, CX2341X_ENC_SET_STREAM_TYPE, 1,
mpeg_stream_type[new->stream_type]);
diff --git a/drivers/media/video/cx23885/cimax2.c b/drivers/media/video/cx23885/cimax2.c
index d4a9d2c5947..c95e7bc1474 100644
--- a/drivers/media/video/cx23885/cimax2.c
+++ b/drivers/media/video/cx23885/cimax2.c
@@ -60,12 +60,18 @@ static unsigned int ci_dbg;
module_param(ci_dbg, int, 0644);
MODULE_PARM_DESC(ci_dbg, "Enable CI debugging");
+static unsigned int ci_irq_enable;
+module_param(ci_irq_enable, int, 0644);
+MODULE_PARM_DESC(ci_irq_enable, "Enable IRQ from CAM");
+
#define ci_dbg_print(args...) \
do { \
if (ci_dbg) \
printk(KERN_DEBUG args); \
} while (0)
+#define ci_irq_flags() (ci_irq_enable ? NETUP_IRQ_IRQAM : 0)
+
/* stores all private variables for communication with CI */
struct netup_ci_state {
struct dvb_ca_en50221 ca;
@@ -392,7 +398,7 @@ int netup_poll_ci_slot_status(struct dvb_ca_en50221 *en50221, int slot, int open
if (0 != slot)
return -EINVAL;
- netup_ci_set_irq(en50221, open ? (NETUP_IRQ_DETAM | NETUP_IRQ_IRQAM)
+ netup_ci_set_irq(en50221, open ? (NETUP_IRQ_DETAM | ci_irq_flags())
: NETUP_IRQ_DETAM);
return state->status;
@@ -429,7 +435,7 @@ int netup_ci_init(struct cx23885_tsport *port)
0x01, /* power on (use it like store place) */
0x00, /* RFU */
0x00, /* int status read only */
- NETUP_IRQ_IRQAM | NETUP_IRQ_DETAM, /* DETAM, IRQAM unmasked */
+ ci_irq_flags() | NETUP_IRQ_DETAM, /* DETAM, IRQAM unmasked */
0x05, /* EXTINT=active-high, INT=push-pull */
0x00, /* USCG1 */
0x04, /* ack active low */
@@ -470,7 +476,7 @@ int netup_ci_init(struct cx23885_tsport *port)
state->ca.poll_slot_status = netup_poll_ci_slot_status;
state->ca.data = state;
state->priv = port;
- state->current_irq_mode = NETUP_IRQ_IRQAM | NETUP_IRQ_DETAM;
+ state->current_irq_mode = ci_irq_flags() | NETUP_IRQ_DETAM;
ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &cimax_init[0], 34);
diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c
index a8ddc227cf8..abd64e89f60 100644
--- a/drivers/media/video/cx23885/cx23885-417.c
+++ b/drivers/media/video/cx23885/cx23885-417.c
@@ -1356,7 +1356,7 @@ static int vidioc_querycap(struct file *file, void *priv,
struct cx23885_dev *dev = fh->dev;
struct cx23885_tsport *tsport = &dev->ts1;
- strcpy(cap->driver, dev->name);
+ strlcpy(cap->driver, dev->name, sizeof(cap->driver));
strlcpy(cap->card, cx23885_boards[tsport->dev->board].name,
sizeof(cap->card));
sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index 939079d7bbb..9e1460828b2 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -1006,6 +1006,22 @@ static int dvb_register(struct cx23885_tsport *port)
netup_ci_init(port);
break;
}
+ case CX23885_BOARD_TEVII_S470: {
+ u8 eeprom[256]; /* 24C02 i2c eeprom */
+
+ if (port->nr != 1)
+ break;
+
+ /* Read entire EEPROM */
+ dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
+ tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom, sizeof(eeprom));
+ printk(KERN_INFO "TeVii S470 MAC= "
+ "%02X:%02X:%02X:%02X:%02X:%02X\n",
+ eeprom[0xa0], eeprom[0xa1], eeprom[0xa2],
+ eeprom[0xa3], eeprom[0xa4], eeprom[0xa5]);
+ memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xa0, 6);
+ break;
+ }
}
return ret;
diff --git a/drivers/media/video/cx23885/cx23885-input.c b/drivers/media/video/cx23885/cx23885-input.c
index 8e9d990dbe9..8d306d8bb61 100644
--- a/drivers/media/video/cx23885/cx23885-input.c
+++ b/drivers/media/video/cx23885/cx23885-input.c
@@ -51,6 +51,8 @@
#define RC5_EXTENDED_COMMAND_OFFSET 64
+#define MODULE_NAME "cx23885"
+
static inline unsigned int rc5_command(u32 rc5_baseband)
{
return RC5_INSTR(rc5_baseband) +
@@ -338,7 +340,7 @@ int cx23885_input_init(struct cx23885_dev *dev)
{
struct card_ir *ir;
struct input_dev *input_dev;
- struct ir_scancode_table *ir_codes = NULL;
+ char *ir_codes = NULL;
int ir_type, ir_addr, ir_start;
int ret;
@@ -353,7 +355,7 @@ int cx23885_input_init(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
/* Parameters for the grey Hauppauge remote for the HVR-1850 */
- ir_codes = &ir_codes_hauppauge_new_table;
+ ir_codes = RC_MAP_HAUPPAUGE_NEW;
ir_type = IR_TYPE_RC5;
ir_addr = 0x1e; /* RC-5 system bits emitted by the remote */
ir_start = RC5_START_BITS_NORMAL; /* A basic RC-5 remote */
@@ -398,7 +400,7 @@ int cx23885_input_init(struct cx23885_dev *dev)
dev->ir_input = ir;
cx23885_input_ir_start(dev);
- ret = ir_input_register(ir->dev, ir_codes, NULL);
+ ret = ir_input_register(ir->dev, ir_codes, NULL, MODULE_NAME);
if (ret)
goto err_out_stop;
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index 2d3ac8b83dc..543b854f6a6 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -514,8 +514,8 @@ static int buffer_setup(struct videobuf_queue *q, unsigned int *count,
*size = fh->fmt->depth*fh->width*fh->height >> 3;
if (0 == *count)
*count = 32;
- while (*size * *count > vid_limit * 1024 * 1024)
- (*count)--;
+ if (*size * *count > vid_limit * 1024 * 1024)
+ *count = (vid_limit * 1024 * 1024) / *size;
return 0;
}
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index f2461cd3de5..8b6fb354437 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -1018,7 +1018,7 @@ static int cx25840_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
{
switch (fmt->type) {
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
- return cx25840_vbi_g_fmt(sd, fmt);
+ return cx25840_g_sliced_fmt(sd, &fmt->fmt.sliced);
default:
return -EINVAL;
}
@@ -1079,12 +1079,6 @@ static int cx25840_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
cx25840_write(client, 0x41e, 0x8 | filter);
break;
- case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
- return cx25840_vbi_s_fmt(sd, fmt);
-
- case V4L2_BUF_TYPE_VBI_CAPTURE:
- return cx25840_vbi_s_fmt(sd, fmt);
-
default:
return -EINVAL;
}
@@ -1635,15 +1629,22 @@ static const struct v4l2_subdev_video_ops cx25840_video_ops = {
.s_routing = cx25840_s_video_routing,
.g_fmt = cx25840_g_fmt,
.s_fmt = cx25840_s_fmt,
- .decode_vbi_line = cx25840_decode_vbi_line,
.s_stream = cx25840_s_stream,
};
+static const struct v4l2_subdev_vbi_ops cx25840_vbi_ops = {
+ .decode_vbi_line = cx25840_decode_vbi_line,
+ .s_raw_fmt = cx25840_s_raw_fmt,
+ .s_sliced_fmt = cx25840_s_sliced_fmt,
+ .g_sliced_fmt = cx25840_g_sliced_fmt,
+};
+
static const struct v4l2_subdev_ops cx25840_ops = {
.core = &cx25840_core_ops,
.tuner = &cx25840_tuner_ops,
.audio = &cx25840_audio_ops,
.video = &cx25840_video_ops,
+ .vbi = &cx25840_vbi_ops,
};
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/video/cx25840/cx25840-core.h b/drivers/media/video/cx25840/cx25840-core.h
index 55345444417..04393b97156 100644
--- a/drivers/media/video/cx25840/cx25840-core.h
+++ b/drivers/media/video/cx25840/cx25840-core.h
@@ -99,8 +99,9 @@ int cx25840_audio_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl);
/* ----------------------------------------------------------------------- */
/* cx25850-vbi.c */
-int cx25840_vbi_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt);
-int cx25840_vbi_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt);
+int cx25840_s_raw_fmt(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt);
+int cx25840_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt);
+int cx25840_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt);
int cx25840_decode_vbi_line(struct v4l2_subdev *sd, struct v4l2_decode_vbi_line *vbi);
#endif
diff --git a/drivers/media/video/cx25840/cx25840-vbi.c b/drivers/media/video/cx25840/cx25840-vbi.c
index 35f6592f6c4..64a4004f8a9 100644
--- a/drivers/media/video/cx25840/cx25840-vbi.c
+++ b/drivers/media/video/cx25840/cx25840-vbi.c
@@ -82,11 +82,10 @@ static int decode_vps(u8 * dst, u8 * p)
return err & 0xf0;
}
-int cx25840_vbi_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+int cx25840_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *svbi)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct cx25840_state *state = to_state(sd);
- struct v4l2_sliced_vbi_format *svbi;
static const u16 lcr2vbi[] = {
0, V4L2_SLICED_TELETEXT_B, 0, /* 1 */
0, V4L2_SLICED_WSS_625, 0, /* 4 */
@@ -97,9 +96,6 @@ int cx25840_vbi_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
int is_pal = !(state->std & V4L2_STD_525_60);
int i;
- if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
- return -EINVAL;
- svbi = &fmt->fmt.sliced;
memset(svbi, 0, sizeof(*svbi));
/* we're done if raw VBI is active */
if ((cx25840_read(client, 0x404) & 0x10) == 0)
@@ -127,32 +123,30 @@ int cx25840_vbi_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
return 0;
}
-int cx25840_vbi_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+int cx25840_s_raw_fmt(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct cx25840_state *state = to_state(sd);
- struct v4l2_sliced_vbi_format *svbi;
int is_pal = !(state->std & V4L2_STD_525_60);
int vbi_offset = is_pal ? 1 : 0;
- int i, x;
- u8 lcr[24];
- if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE &&
- fmt->type != V4L2_BUF_TYPE_VBI_CAPTURE)
- return -EINVAL;
- svbi = &fmt->fmt.sliced;
- if (fmt->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- /* raw VBI */
- memset(svbi, 0, sizeof(*svbi));
+ /* Setup standard */
+ cx25840_std_setup(client);
- /* Setup standard */
- cx25840_std_setup(client);
+ /* VBI Offset */
+ cx25840_write(client, 0x47f, vbi_offset);
+ cx25840_write(client, 0x404, 0x2e);
+ return 0;
+}
- /* VBI Offset */
- cx25840_write(client, 0x47f, vbi_offset);
- cx25840_write(client, 0x404, 0x2e);
- return 0;
- }
+int cx25840_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *svbi)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct cx25840_state *state = to_state(sd);
+ int is_pal = !(state->std & V4L2_STD_525_60);
+ int vbi_offset = is_pal ? 1 : 0;
+ int i, x;
+ u8 lcr[24];
for (x = 0; x <= 23; x++)
lcr[x] = 0x00;
diff --git a/drivers/media/video/cx88/cx88-core.c b/drivers/media/video/cx88/cx88-core.c
index b35411160f0..8b21457111b 100644
--- a/drivers/media/video/cx88/cx88-core.c
+++ b/drivers/media/video/cx88/cx88-core.c
@@ -847,7 +847,8 @@ static int set_tvaudio(struct cx88_core *core)
{
v4l2_std_id norm = core->tvnorm;
- if (CX88_VMUX_TELEVISION != INPUT(core->input).type)
+ if (CX88_VMUX_TELEVISION != INPUT(core->input).type &&
+ CX88_VMUX_CABLE != INPUT(core->input).type)
return 0;
if (V4L2_STD_PAL_BG & norm) {
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index 94ab862f021..faa8e8163a4 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -1231,7 +1231,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe->ops.tuner_ops.set_config(fe, &ctl);
}
break;
- case CX88_BOARD_PINNACLE_HYBRID_PCTV:
+ case CX88_BOARD_PINNACLE_HYBRID_PCTV:
case CX88_BOARD_WINFAST_DTV1800H:
fe0->dvb.frontend = dvb_attach(zl10353_attach,
&cx88_pinnacle_hybrid_pctv,
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
index 6b6abf062c2..e185289e446 100644
--- a/drivers/media/video/cx88/cx88-input.c
+++ b/drivers/media/video/cx88/cx88-input.c
@@ -32,12 +32,18 @@
#include "cx88.h"
#include <media/ir-common.h>
+#define MODULE_NAME "cx88xx"
+
/* ---------------------------------------------------------------------- */
struct cx88_IR {
struct cx88_core *core;
struct input_dev *input;
struct ir_input_state ir;
+ struct ir_dev_props props;
+
+ int users;
+
char name[32];
char phys[32];
@@ -159,8 +165,16 @@ static enum hrtimer_restart cx88_ir_work(struct hrtimer *timer)
return HRTIMER_RESTART;
}
-void cx88_ir_start(struct cx88_core *core, struct cx88_IR *ir)
+static int __cx88_ir_start(void *priv)
{
+ struct cx88_core *core = priv;
+ struct cx88_IR *ir;
+
+ if (!core || !core->ir)
+ return -EINVAL;
+
+ ir = core->ir;
+
if (ir->polling) {
hrtimer_init(&ir->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ir->timer.function = cx88_ir_work;
@@ -173,10 +187,18 @@ void cx88_ir_start(struct cx88_core *core, struct cx88_IR *ir)
cx_write(MO_DDS_IO, 0xa80a80); /* 4 kHz sample rate */
cx_write(MO_DDSCFG_IO, 0x5); /* enable */
}
+ return 0;
}
-void cx88_ir_stop(struct cx88_core *core, struct cx88_IR *ir)
+static void __cx88_ir_stop(void *priv)
{
+ struct cx88_core *core = priv;
+ struct cx88_IR *ir;
+
+ if (!core || !core->ir)
+ return;
+
+ ir = core->ir;
if (ir->sampling) {
cx_write(MO_DDSCFG_IO, 0x0);
core->pci_irqmask &= ~PCI_INT_IR_SMPINT;
@@ -186,15 +208,49 @@ void cx88_ir_stop(struct cx88_core *core, struct cx88_IR *ir)
hrtimer_cancel(&ir->timer);
}
+int cx88_ir_start(struct cx88_core *core)
+{
+ if (core->ir->users)
+ return __cx88_ir_start(core);
+
+ return 0;
+}
+
+void cx88_ir_stop(struct cx88_core *core)
+{
+ if (core->ir->users)
+ __cx88_ir_stop(core);
+}
+
+static int cx88_ir_open(void *priv)
+{
+ struct cx88_core *core = priv;
+
+ core->ir->users++;
+ return __cx88_ir_start(core);
+}
+
+static void cx88_ir_close(void *priv)
+{
+ struct cx88_core *core = priv;
+
+ core->ir->users--;
+ if (!core->ir->users)
+ __cx88_ir_stop(core);
+}
+
/* ---------------------------------------------------------------------- */
int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
{
struct cx88_IR *ir;
struct input_dev *input_dev;
- struct ir_scancode_table *ir_codes = NULL;
+ char *ir_codes = NULL;
u64 ir_type = IR_TYPE_OTHER;
int err = -ENOMEM;
+ u32 hardware_mask = 0; /* For devices with a hardware mask, when
+ * used with a full-code IR table
+ */
ir = kzalloc(sizeof(*ir), GFP_KERNEL);
input_dev = input_allocate_device();
@@ -208,15 +264,15 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
case CX88_BOARD_DNTV_LIVE_DVB_T:
case CX88_BOARD_KWORLD_DVB_T:
case CX88_BOARD_KWORLD_DVB_T_CX22702:
- ir_codes = &ir_codes_dntv_live_dvb_t_table;
+ ir_codes = RC_MAP_DNTV_LIVE_DVB_T;
ir->gpio_addr = MO_GP1_IO;
ir->mask_keycode = 0x1f;
ir->mask_keyup = 0x60;
ir->polling = 50; /* ms */
break;
case CX88_BOARD_TERRATEC_CINERGY_1400_DVB_T1:
- ir_codes = &ir_codes_cinergy_1400_table;
- ir_type = IR_TYPE_PD;
+ ir_codes = RC_MAP_CINERGY_1400;
+ ir_type = IR_TYPE_NEC;
ir->sampling = 0xeb04; /* address */
break;
case CX88_BOARD_HAUPPAUGE:
@@ -230,14 +286,14 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
case CX88_BOARD_PCHDTV_HD3000:
case CX88_BOARD_PCHDTV_HD5500:
case CX88_BOARD_HAUPPAUGE_IRONLY:
- ir_codes = &ir_codes_hauppauge_new_table;
+ ir_codes = RC_MAP_HAUPPAUGE_NEW;
ir_type = IR_TYPE_RC5;
ir->sampling = 1;
break;
case CX88_BOARD_WINFAST_DTV2000H:
case CX88_BOARD_WINFAST_DTV2000H_J:
case CX88_BOARD_WINFAST_DTV1800H:
- ir_codes = &ir_codes_winfast_table;
+ ir_codes = RC_MAP_WINFAST;
ir->gpio_addr = MO_GP0_IO;
ir->mask_keycode = 0x8f8;
ir->mask_keyup = 0x100;
@@ -246,14 +302,14 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
case CX88_BOARD_WINFAST2000XP_EXPERT:
case CX88_BOARD_WINFAST_DTV1000:
case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
- ir_codes = &ir_codes_winfast_table;
+ ir_codes = RC_MAP_WINFAST;
ir->gpio_addr = MO_GP0_IO;
ir->mask_keycode = 0x8f8;
ir->mask_keyup = 0x100;
ir->polling = 1; /* ms */
break;
case CX88_BOARD_IODATA_GVBCTV7E:
- ir_codes = &ir_codes_iodata_bctv7e_table;
+ ir_codes = RC_MAP_IODATA_BCTV7E;
ir->gpio_addr = MO_GP0_IO;
ir->mask_keycode = 0xfd;
ir->mask_keydown = 0x02;
@@ -261,36 +317,43 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
break;
case CX88_BOARD_PROLINK_PLAYTVPVR:
case CX88_BOARD_PIXELVIEW_PLAYTV_ULTRA_PRO:
- ir_codes = &ir_codes_pixelview_table;
+ /*
+ * It seems that this hardware is paired with NEC extended
+ * address 0x866b. So, unfortunately, its usage with other
+ * IR's with different address won't work. Still, there are
+ * other IR's from the same manufacturer that works, like the
+ * 002-T mini RC, provided with newer PV hardware
+ */
+ ir_codes = RC_MAP_PIXELVIEW_MK12;
ir->gpio_addr = MO_GP1_IO;
- ir->mask_keycode = 0x1f;
ir->mask_keyup = 0x80;
- ir->polling = 1; /* ms */
+ ir->polling = 10; /* ms */
+ hardware_mask = 0x3f; /* Hardware returns only 6 bits from command part */
break;
case CX88_BOARD_PROLINK_PV_8000GT:
case CX88_BOARD_PROLINK_PV_GLOBAL_XTREME:
- ir_codes = &ir_codes_pixelview_new_table;
+ ir_codes = RC_MAP_PIXELVIEW_NEW;
ir->gpio_addr = MO_GP1_IO;
ir->mask_keycode = 0x3f;
ir->mask_keyup = 0x80;
ir->polling = 1; /* ms */
break;
case CX88_BOARD_KWORLD_LTV883:
- ir_codes = &ir_codes_pixelview_table;
+ ir_codes = RC_MAP_PIXELVIEW;
ir->gpio_addr = MO_GP1_IO;
ir->mask_keycode = 0x1f;
ir->mask_keyup = 0x60;
ir->polling = 1; /* ms */
break;
case CX88_BOARD_ADSTECH_DVB_T_PCI:
- ir_codes = &ir_codes_adstech_dvb_t_pci_table;
+ ir_codes = RC_MAP_ADSTECH_DVB_T_PCI;
ir->gpio_addr = MO_GP1_IO;
ir->mask_keycode = 0xbf;
ir->mask_keyup = 0x40;
ir->polling = 50; /* ms */
break;
case CX88_BOARD_MSI_TVANYWHERE_MASTER:
- ir_codes = &ir_codes_msi_tvanywhere_table;
+ ir_codes = RC_MAP_MSI_TVANYWHERE;
ir->gpio_addr = MO_GP1_IO;
ir->mask_keycode = 0x1f;
ir->mask_keyup = 0x40;
@@ -298,7 +361,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
break;
case CX88_BOARD_AVERTV_303:
case CX88_BOARD_AVERTV_STUDIO_303:
- ir_codes = &ir_codes_avertv_303_table;
+ ir_codes = RC_MAP_AVERTV_303;
ir->gpio_addr = MO_GP2_IO;
ir->mask_keycode = 0xfb;
ir->mask_keydown = 0x02;
@@ -311,41 +374,41 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
case CX88_BOARD_PROF_7300:
case CX88_BOARD_PROF_7301:
case CX88_BOARD_PROF_6200:
- ir_codes = &ir_codes_tbs_nec_table;
- ir_type = IR_TYPE_PD;
+ ir_codes = RC_MAP_TBS_NEC;
+ ir_type = IR_TYPE_NEC;
ir->sampling = 0xff00; /* address */
break;
case CX88_BOARD_TEVII_S460:
case CX88_BOARD_TEVII_S420:
- ir_codes = &ir_codes_tevii_nec_table;
- ir_type = IR_TYPE_PD;
+ ir_codes = RC_MAP_TEVII_NEC;
+ ir_type = IR_TYPE_NEC;
ir->sampling = 0xff00; /* address */
break;
case CX88_BOARD_DNTV_LIVE_DVB_T_PRO:
- ir_codes = &ir_codes_dntv_live_dvbt_pro_table;
- ir_type = IR_TYPE_PD;
+ ir_codes = RC_MAP_DNTV_LIVE_DVBT_PRO;
+ ir_type = IR_TYPE_NEC;
ir->sampling = 0xff00; /* address */
break;
case CX88_BOARD_NORWOOD_MICRO:
- ir_codes = &ir_codes_norwood_table;
+ ir_codes = RC_MAP_NORWOOD;
ir->gpio_addr = MO_GP1_IO;
ir->mask_keycode = 0x0e;
ir->mask_keyup = 0x80;
ir->polling = 50; /* ms */
break;
case CX88_BOARD_NPGTECH_REALTV_TOP10FM:
- ir_codes = &ir_codes_npgtech_table;
+ ir_codes = RC_MAP_NPGTECH;
ir->gpio_addr = MO_GP0_IO;
ir->mask_keycode = 0xfa;
ir->polling = 50; /* ms */
break;
case CX88_BOARD_PINNACLE_PCTV_HD_800i:
- ir_codes = &ir_codes_pinnacle_pctv_hd_table;
+ ir_codes = RC_MAP_PINNACLE_PCTV_HD;
ir_type = IR_TYPE_RC5;
ir->sampling = 1;
break;
case CX88_BOARD_POWERCOLOR_REAL_ANGEL:
- ir_codes = &ir_codes_powercolor_real_angel_table;
+ ir_codes = RC_MAP_POWERCOLOR_REAL_ANGEL;
ir->gpio_addr = MO_GP2_IO;
ir->mask_keycode = 0x7e;
ir->polling = 100; /* ms */
@@ -357,6 +420,21 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
goto err_out_free;
}
+ /*
+ * The usage of mask_keycode were very convenient, due to several
+ * reasons. Among others, the scancode tables were using the scancode
+ * as the index elements. So, the less bits it was used, the smaller
+ * the table were stored. After the input changes, the better is to use
+ * the full scancodes, since it allows replacing the IR remote by
+ * another one. Unfortunately, there are still some hardware, like
+ * Pixelview Ultra Pro, where only part of the scancode is sent via
+ * GPIO. So, there's no way to get the full scancode. Due to that,
+ * hardware_mask were introduced here: it represents those hardware
+ * that has such limits.
+ */
+ if (hardware_mask && !ir->mask_keycode)
+ ir->mask_keycode = hardware_mask;
+
/* init input device */
snprintf(ir->name, sizeof(ir->name), "cx88 IR (%s)", core->board.name);
snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0", pci_name(pci));
@@ -381,19 +459,20 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
ir->core = core;
core->ir = ir;
- cx88_ir_start(core, ir);
+ ir->props.priv = core;
+ ir->props.open = cx88_ir_open;
+ ir->props.close = cx88_ir_close;
+ ir->props.scanmask = hardware_mask;
/* all done */
- err = ir_input_register(ir->input, ir_codes, NULL);
+ err = ir_input_register(ir->input, ir_codes, &ir->props, MODULE_NAME);
if (err)
- goto err_out_stop;
+ goto err_out_free;
return 0;
- err_out_stop:
- cx88_ir_stop(core, ir);
- core->ir = NULL;
err_out_free:
+ core->ir = NULL;
kfree(ir);
return err;
}
@@ -406,7 +485,7 @@ int cx88_ir_fini(struct cx88_core *core)
if (NULL == ir)
return 0;
- cx88_ir_stop(core, ir);
+ cx88_ir_stop(core);
ir_input_unregister(ir->input);
kfree(ir);
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index 6aba7af9160..499f8d512ad 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -599,13 +599,22 @@ struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board
static int cx8802_request_acquire(struct cx8802_driver *drv)
{
struct cx88_core *core = drv->core;
+ unsigned int i;
/* Fail a request for hardware if the device is busy. */
if (core->active_type_id != CX88_BOARD_NONE &&
core->active_type_id != drv->type_id)
return -EBUSY;
- core->input = CX88_VMUX_DVB;
+ core->input = 0;
+ for (i = 0;
+ i < (sizeof(core->board.input) / sizeof(struct cx88_input));
+ i++) {
+ if (core->board.input[i].type == CX88_VMUX_DVB) {
+ core->input = i;
+ break;
+ }
+ }
if (drv->advise_acquire)
{
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index 48c450f4a85..0fab65c3ab3 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -426,12 +426,13 @@ int cx88_video_mux(struct cx88_core *core, unsigned int input)
if (core->board.audio_chip &&
core->board.audio_chip == V4L2_IDENT_WM8775) {
call_all(core, audio, s_routing,
- INPUT(input).audioroute, 0, 0);
+ INPUT(input).audioroute, 0, 0);
}
/* cx2388's C-ADC is connected to the tuner only.
When used with S-Video, that ADC is busy dealing with
chroma, so an external must be used for baseband audio */
- if (INPUT(input).type != CX88_VMUX_TELEVISION ) {
+ if (INPUT(input).type != CX88_VMUX_TELEVISION &&
+ INPUT(input).type != CX88_VMUX_CABLE) {
/* "I2S ADC mode" */
core->tvaudio = WW_I2SADC;
cx88_set_tvaudio(core);
@@ -561,8 +562,8 @@ buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
*size = fh->fmt->depth*fh->width*fh->height >> 3;
if (0 == *count)
*count = 32;
- while (*size * *count > vid_limit * 1024 * 1024)
- (*count)--;
+ if (*size * *count > vid_limit * 1024 * 1024)
+ *count = (vid_limit * 1024 * 1024) / *size;
return 0;
}
@@ -1537,9 +1538,12 @@ static int radio_queryctrl (struct file *file, void *priv,
c->id >= V4L2_CID_LASTP1)
return -EINVAL;
if (c->id == V4L2_CID_AUDIO_MUTE) {
- for (i = 0; i < CX8800_CTLS; i++)
+ for (i = 0; i < CX8800_CTLS; i++) {
if (cx8800_ctls[i].v.id == c->id)
break;
+ }
+ if (i == CX8800_CTLS)
+ return -EINVAL;
*c = cx8800_ctls[i].v;
} else
*c = no_ctl;
@@ -1977,7 +1981,7 @@ static void __devexit cx8800_finidev(struct pci_dev *pci_dev)
}
if (core->ir)
- cx88_ir_stop(core, core->ir);
+ cx88_ir_stop(core);
cx88_shutdown(core); /* FIXME */
pci_disable_device(pci_dev);
@@ -2015,7 +2019,7 @@ static int cx8800_suspend(struct pci_dev *pci_dev, pm_message_t state)
spin_unlock(&dev->slock);
if (core->ir)
- cx88_ir_stop(core, core->ir);
+ cx88_ir_stop(core);
/* FIXME -- shutdown device */
cx88_shutdown(core);
@@ -2056,7 +2060,7 @@ static int cx8800_resume(struct pci_dev *pci_dev)
/* FIXME: re-initialize hardware */
cx88_reset(core);
if (core->ir)
- cx88_ir_start(core, core->ir);
+ cx88_ir_start(core);
cx_set(MO_PCI_INTMSK, core->pci_irqmask);
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index 48b6c04fb49..bdb03d33653 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -41,7 +41,7 @@
#include <linux/version.h>
#include <linux/mutex.h>
-#define CX88_VERSION_CODE KERNEL_VERSION(0,0,7)
+#define CX88_VERSION_CODE KERNEL_VERSION(0, 0, 8)
#define UNSET (-1U)
@@ -290,7 +290,7 @@ struct cx88_subid {
#define RESOURCE_VIDEO 2
#define RESOURCE_VBI 4
-#define BUFFER_TIMEOUT msecs_to_jiffies(500) /* 0.5 seconds */
+#define BUFFER_TIMEOUT msecs_to_jiffies(2000)
/* buffer for one video frame */
struct cx88_buffer {
@@ -683,8 +683,8 @@ s32 cx88_dsp_detect_stereo_sap(struct cx88_core *core);
int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci);
int cx88_ir_fini(struct cx88_core *core);
void cx88_ir_irq(struct cx88_core *core);
-void cx88_ir_start(struct cx88_core *core, struct cx88_IR *ir);
-void cx88_ir_stop(struct cx88_core *core, struct cx88_IR *ir);
+int cx88_ir_start(struct cx88_core *core);
+void cx88_ir_stop(struct cx88_core *core);
/* ----------------------------------------------------------- */
/* cx88-mpeg.c */
diff --git a/drivers/media/video/davinci/dm644x_ccdc.c b/drivers/media/video/davinci/dm644x_ccdc.c
index b4cc96dc99e..490aafb34e2 100644
--- a/drivers/media/video/davinci/dm644x_ccdc.c
+++ b/drivers/media/video/davinci/dm644x_ccdc.c
@@ -101,6 +101,9 @@ static u32 ccdc_raw_bayer_pix_formats[] =
static u32 ccdc_raw_yuv_pix_formats[] =
{V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_YUYV};
+/* CCDC Save/Restore context */
+static u32 ccdc_ctx[CCDC_REG_END / sizeof(u32)];
+
/* register access routines */
static inline u32 regr(u32 offset)
{
@@ -400,7 +403,11 @@ void ccdc_config_ycbcr(void)
* configure the FID, VD, HD pin polarity,
* fld,hd pol positive, vd negative, 8-bit data
*/
- syn_mode |= CCDC_SYN_MODE_VD_POL_NEGATIVE | CCDC_SYN_MODE_8BITS;
+ syn_mode |= CCDC_SYN_MODE_VD_POL_NEGATIVE;
+ if (ccdc_cfg.if_type == VPFE_BT656_10BIT)
+ syn_mode |= CCDC_SYN_MODE_10BITS;
+ else
+ syn_mode |= CCDC_SYN_MODE_8BITS;
} else {
/* y/c external sync mode */
syn_mode |= (((params->fid_pol & CCDC_FID_POL_MASK) <<
@@ -419,8 +426,13 @@ void ccdc_config_ycbcr(void)
* configure the order of y cb cr in SDRAM, and disable latch
* internal register on vsync
*/
- regw((params->pix_order << CCDC_CCDCFG_Y8POS_SHIFT) |
- CCDC_LATCH_ON_VSYNC_DISABLE, CCDC_CCDCFG);
+ if (ccdc_cfg.if_type == VPFE_BT656_10BIT)
+ regw((params->pix_order << CCDC_CCDCFG_Y8POS_SHIFT) |
+ CCDC_LATCH_ON_VSYNC_DISABLE | CCDC_CCDCFG_BW656_10BIT,
+ CCDC_CCDCFG);
+ else
+ regw((params->pix_order << CCDC_CCDCFG_Y8POS_SHIFT) |
+ CCDC_LATCH_ON_VSYNC_DISABLE, CCDC_CCDCFG);
/*
* configure the horizontal line offset. This should be a
@@ -435,7 +447,6 @@ void ccdc_config_ycbcr(void)
ccdc_sbl_reset();
dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_config_ycbcr...\n");
- ccdc_readregs();
}
static void ccdc_config_black_clamp(struct ccdc_black_clamp *bclamp)
@@ -827,6 +838,7 @@ static int ccdc_set_hw_if_params(struct vpfe_hw_if_param *params)
case VPFE_BT656:
case VPFE_YCBCR_SYNC_16:
case VPFE_YCBCR_SYNC_8:
+ case VPFE_BT656_10BIT:
ccdc_cfg.ycbcr.vd_pol = params->vdpol;
ccdc_cfg.ycbcr.hd_pol = params->hdpol;
break;
@@ -837,6 +849,87 @@ static int ccdc_set_hw_if_params(struct vpfe_hw_if_param *params)
return 0;
}
+static void ccdc_save_context(void)
+{
+ ccdc_ctx[CCDC_PCR >> 2] = regr(CCDC_PCR);
+ ccdc_ctx[CCDC_SYN_MODE >> 2] = regr(CCDC_SYN_MODE);
+ ccdc_ctx[CCDC_HD_VD_WID >> 2] = regr(CCDC_HD_VD_WID);
+ ccdc_ctx[CCDC_PIX_LINES >> 2] = regr(CCDC_PIX_LINES);
+ ccdc_ctx[CCDC_HORZ_INFO >> 2] = regr(CCDC_HORZ_INFO);
+ ccdc_ctx[CCDC_VERT_START >> 2] = regr(CCDC_VERT_START);
+ ccdc_ctx[CCDC_VERT_LINES >> 2] = regr(CCDC_VERT_LINES);
+ ccdc_ctx[CCDC_CULLING >> 2] = regr(CCDC_CULLING);
+ ccdc_ctx[CCDC_HSIZE_OFF >> 2] = regr(CCDC_HSIZE_OFF);
+ ccdc_ctx[CCDC_SDOFST >> 2] = regr(CCDC_SDOFST);
+ ccdc_ctx[CCDC_SDR_ADDR >> 2] = regr(CCDC_SDR_ADDR);
+ ccdc_ctx[CCDC_CLAMP >> 2] = regr(CCDC_CLAMP);
+ ccdc_ctx[CCDC_DCSUB >> 2] = regr(CCDC_DCSUB);
+ ccdc_ctx[CCDC_COLPTN >> 2] = regr(CCDC_COLPTN);
+ ccdc_ctx[CCDC_BLKCMP >> 2] = regr(CCDC_BLKCMP);
+ ccdc_ctx[CCDC_FPC >> 2] = regr(CCDC_FPC);
+ ccdc_ctx[CCDC_FPC_ADDR >> 2] = regr(CCDC_FPC_ADDR);
+ ccdc_ctx[CCDC_VDINT >> 2] = regr(CCDC_VDINT);
+ ccdc_ctx[CCDC_ALAW >> 2] = regr(CCDC_ALAW);
+ ccdc_ctx[CCDC_REC656IF >> 2] = regr(CCDC_REC656IF);
+ ccdc_ctx[CCDC_CCDCFG >> 2] = regr(CCDC_CCDCFG);
+ ccdc_ctx[CCDC_FMTCFG >> 2] = regr(CCDC_FMTCFG);
+ ccdc_ctx[CCDC_FMT_HORZ >> 2] = regr(CCDC_FMT_HORZ);
+ ccdc_ctx[CCDC_FMT_VERT >> 2] = regr(CCDC_FMT_VERT);
+ ccdc_ctx[CCDC_FMT_ADDR0 >> 2] = regr(CCDC_FMT_ADDR0);
+ ccdc_ctx[CCDC_FMT_ADDR1 >> 2] = regr(CCDC_FMT_ADDR1);
+ ccdc_ctx[CCDC_FMT_ADDR2 >> 2] = regr(CCDC_FMT_ADDR2);
+ ccdc_ctx[CCDC_FMT_ADDR3 >> 2] = regr(CCDC_FMT_ADDR3);
+ ccdc_ctx[CCDC_FMT_ADDR4 >> 2] = regr(CCDC_FMT_ADDR4);
+ ccdc_ctx[CCDC_FMT_ADDR5 >> 2] = regr(CCDC_FMT_ADDR5);
+ ccdc_ctx[CCDC_FMT_ADDR6 >> 2] = regr(CCDC_FMT_ADDR6);
+ ccdc_ctx[CCDC_FMT_ADDR7 >> 2] = regr(CCDC_FMT_ADDR7);
+ ccdc_ctx[CCDC_PRGEVEN_0 >> 2] = regr(CCDC_PRGEVEN_0);
+ ccdc_ctx[CCDC_PRGEVEN_1 >> 2] = regr(CCDC_PRGEVEN_1);
+ ccdc_ctx[CCDC_PRGODD_0 >> 2] = regr(CCDC_PRGODD_0);
+ ccdc_ctx[CCDC_PRGODD_1 >> 2] = regr(CCDC_PRGODD_1);
+ ccdc_ctx[CCDC_VP_OUT >> 2] = regr(CCDC_VP_OUT);
+}
+
+static void ccdc_restore_context(void)
+{
+ regw(ccdc_ctx[CCDC_SYN_MODE >> 2], CCDC_SYN_MODE);
+ regw(ccdc_ctx[CCDC_HD_VD_WID >> 2], CCDC_HD_VD_WID);
+ regw(ccdc_ctx[CCDC_PIX_LINES >> 2], CCDC_PIX_LINES);
+ regw(ccdc_ctx[CCDC_HORZ_INFO >> 2], CCDC_HORZ_INFO);
+ regw(ccdc_ctx[CCDC_VERT_START >> 2], CCDC_VERT_START);
+ regw(ccdc_ctx[CCDC_VERT_LINES >> 2], CCDC_VERT_LINES);
+ regw(ccdc_ctx[CCDC_CULLING >> 2], CCDC_CULLING);
+ regw(ccdc_ctx[CCDC_HSIZE_OFF >> 2], CCDC_HSIZE_OFF);
+ regw(ccdc_ctx[CCDC_SDOFST >> 2], CCDC_SDOFST);
+ regw(ccdc_ctx[CCDC_SDR_ADDR >> 2], CCDC_SDR_ADDR);
+ regw(ccdc_ctx[CCDC_CLAMP >> 2], CCDC_CLAMP);
+ regw(ccdc_ctx[CCDC_DCSUB >> 2], CCDC_DCSUB);
+ regw(ccdc_ctx[CCDC_COLPTN >> 2], CCDC_COLPTN);
+ regw(ccdc_ctx[CCDC_BLKCMP >> 2], CCDC_BLKCMP);
+ regw(ccdc_ctx[CCDC_FPC >> 2], CCDC_FPC);
+ regw(ccdc_ctx[CCDC_FPC_ADDR >> 2], CCDC_FPC_ADDR);
+ regw(ccdc_ctx[CCDC_VDINT >> 2], CCDC_VDINT);
+ regw(ccdc_ctx[CCDC_ALAW >> 2], CCDC_ALAW);
+ regw(ccdc_ctx[CCDC_REC656IF >> 2], CCDC_REC656IF);
+ regw(ccdc_ctx[CCDC_CCDCFG >> 2], CCDC_CCDCFG);
+ regw(ccdc_ctx[CCDC_FMTCFG >> 2], CCDC_FMTCFG);
+ regw(ccdc_ctx[CCDC_FMT_HORZ >> 2], CCDC_FMT_HORZ);
+ regw(ccdc_ctx[CCDC_FMT_VERT >> 2], CCDC_FMT_VERT);
+ regw(ccdc_ctx[CCDC_FMT_ADDR0 >> 2], CCDC_FMT_ADDR0);
+ regw(ccdc_ctx[CCDC_FMT_ADDR1 >> 2], CCDC_FMT_ADDR1);
+ regw(ccdc_ctx[CCDC_FMT_ADDR2 >> 2], CCDC_FMT_ADDR2);
+ regw(ccdc_ctx[CCDC_FMT_ADDR3 >> 2], CCDC_FMT_ADDR3);
+ regw(ccdc_ctx[CCDC_FMT_ADDR4 >> 2], CCDC_FMT_ADDR4);
+ regw(ccdc_ctx[CCDC_FMT_ADDR5 >> 2], CCDC_FMT_ADDR5);
+ regw(ccdc_ctx[CCDC_FMT_ADDR6 >> 2], CCDC_FMT_ADDR6);
+ regw(ccdc_ctx[CCDC_FMT_ADDR7 >> 2], CCDC_FMT_ADDR7);
+ regw(ccdc_ctx[CCDC_PRGEVEN_0 >> 2], CCDC_PRGEVEN_0);
+ regw(ccdc_ctx[CCDC_PRGEVEN_1 >> 2], CCDC_PRGEVEN_1);
+ regw(ccdc_ctx[CCDC_PRGODD_0 >> 2], CCDC_PRGODD_0);
+ regw(ccdc_ctx[CCDC_PRGODD_1 >> 2], CCDC_PRGODD_1);
+ regw(ccdc_ctx[CCDC_VP_OUT >> 2], CCDC_VP_OUT);
+ regw(ccdc_ctx[CCDC_PCR >> 2], CCDC_PCR);
+}
static struct ccdc_hw_device ccdc_hw_dev = {
.name = "DM6446 CCDC",
.owner = THIS_MODULE,
@@ -945,10 +1038,40 @@ static int dm644x_ccdc_remove(struct platform_device *pdev)
return 0;
}
+static int dm644x_ccdc_suspend(struct device *dev)
+{
+ /* Save CCDC context */
+ ccdc_save_context();
+ /* Disable CCDC */
+ ccdc_enable(0);
+ /* Disable both master and slave clock */
+ clk_disable(ccdc_cfg.mclk);
+ clk_disable(ccdc_cfg.sclk);
+
+ return 0;
+}
+
+static int dm644x_ccdc_resume(struct device *dev)
+{
+ /* Enable both master and slave clock */
+ clk_enable(ccdc_cfg.mclk);
+ clk_enable(ccdc_cfg.sclk);
+ /* Restore CCDC context */
+ ccdc_restore_context();
+
+ return 0;
+}
+
+static const struct dev_pm_ops dm644x_ccdc_pm_ops = {
+ .suspend = dm644x_ccdc_suspend,
+ .resume = dm644x_ccdc_resume,
+};
+
static struct platform_driver dm644x_ccdc_driver = {
.driver = {
.name = "dm644x_ccdc",
.owner = THIS_MODULE,
+ .pm = &dm644x_ccdc_pm_ops,
},
.remove = __devexit_p(dm644x_ccdc_remove),
.probe = dm644x_ccdc_probe,
diff --git a/drivers/media/video/davinci/dm644x_ccdc_regs.h b/drivers/media/video/davinci/dm644x_ccdc_regs.h
index 6e5d0532446..90370e414e2 100644
--- a/drivers/media/video/davinci/dm644x_ccdc_regs.h
+++ b/drivers/media/video/davinci/dm644x_ccdc_regs.h
@@ -59,7 +59,7 @@
#define CCDC_PRGODD_0 0x8c
#define CCDC_PRGODD_1 0x90
#define CCDC_VP_OUT 0x94
-
+#define CCDC_REG_END 0x98
/***************************************************************
* Define for various register bit mask and shifts for CCDC
@@ -135,11 +135,19 @@
#define CCDC_SYN_MODE_INPMOD_SHIFT 12
#define CCDC_SYN_MODE_INPMOD_MASK 3
#define CCDC_SYN_MODE_8BITS (7 << 8)
+#define CCDC_SYN_MODE_10BITS (6 << 8)
+#define CCDC_SYN_MODE_11BITS (5 << 8)
+#define CCDC_SYN_MODE_12BITS (4 << 8)
+#define CCDC_SYN_MODE_13BITS (3 << 8)
+#define CCDC_SYN_MODE_14BITS (2 << 8)
+#define CCDC_SYN_MODE_15BITS (1 << 8)
+#define CCDC_SYN_MODE_16BITS (0 << 8)
#define CCDC_SYN_FLDMODE_MASK 1
#define CCDC_SYN_FLDMODE_SHIFT 7
#define CCDC_REC656IF_BT656_EN 3
#define CCDC_SYN_MODE_VD_POL_NEGATIVE (1 << 2)
#define CCDC_CCDCFG_Y8POS_SHIFT 11
+#define CCDC_CCDCFG_BW656_10BIT (1 << 5)
#define CCDC_SDOFST_FIELD_INTERLEAVED 0x249
#define CCDC_NO_CULLING 0xffff00ff
#endif
diff --git a/drivers/media/video/davinci/isif_regs.h b/drivers/media/video/davinci/isif_regs.h
index f7b8893a295..aa69a463c12 100644
--- a/drivers/media/video/davinci/isif_regs.h
+++ b/drivers/media/video/davinci/isif_regs.h
@@ -158,7 +158,7 @@
/* gain - offset masks */
#define GAIN_INTEGER_SHIFT 9
-#define OFFSET_MASK 0xFFF
+#define OFFSET_MASK 0xFFF
#define GAIN_SDRAM_EN_SHIFT 12
#define GAIN_IPIPE_EN_SHIFT 13
#define GAIN_H3A_EN_SHIFT 14
diff --git a/drivers/media/video/davinci/vpfe_capture.c b/drivers/media/video/davinci/vpfe_capture.c
index 398dbe71cb8..1c258824728 100644
--- a/drivers/media/video/davinci/vpfe_capture.c
+++ b/drivers/media/video/davinci/vpfe_capture.c
@@ -475,6 +475,11 @@ static int vpfe_initialize_device(struct vpfe_device *vpfe_dev)
ret = ccdc_dev->hw_ops.open(vpfe_dev->pdev);
if (!ret)
vpfe_dev->initialized = 1;
+
+ /* Clear all VPFE/CCDC interrupts */
+ if (vpfe_dev->cfg->clr_intr)
+ vpfe_dev->cfg->clr_intr(-1);
+
unlock:
mutex_unlock(&ccdc_lock);
return ret;
@@ -534,6 +539,16 @@ static void vpfe_schedule_next_buffer(struct vpfe_device *vpfe_dev)
list_del(&vpfe_dev->next_frm->queue);
vpfe_dev->next_frm->state = VIDEOBUF_ACTIVE;
addr = videobuf_to_dma_contig(vpfe_dev->next_frm);
+
+ ccdc_dev->hw_ops.setfbaddr(addr);
+}
+
+static void vpfe_schedule_bottom_field(struct vpfe_device *vpfe_dev)
+{
+ unsigned long addr;
+
+ addr = videobuf_to_dma_contig(vpfe_dev->cur_frm);
+ addr += vpfe_dev->field_off;
ccdc_dev->hw_ops.setfbaddr(addr);
}
@@ -554,7 +569,6 @@ static irqreturn_t vpfe_isr(int irq, void *dev_id)
{
struct vpfe_device *vpfe_dev = dev_id;
enum v4l2_field field;
- unsigned long addr;
int fid;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "\nStarting vpfe_isr...\n");
@@ -562,7 +576,7 @@ static irqreturn_t vpfe_isr(int irq, void *dev_id)
/* if streaming not started, don't do anything */
if (!vpfe_dev->started)
- return IRQ_HANDLED;
+ goto clear_intr;
/* only for 6446 this will be applicable */
if (NULL != ccdc_dev->hw_ops.reset)
@@ -574,7 +588,7 @@ static irqreturn_t vpfe_isr(int irq, void *dev_id)
"frame format is progressive...\n");
if (vpfe_dev->cur_frm != vpfe_dev->next_frm)
vpfe_process_buffer_complete(vpfe_dev);
- return IRQ_HANDLED;
+ goto clear_intr;
}
/* interlaced or TB capture check which field we are in hardware */
@@ -599,12 +613,9 @@ static irqreturn_t vpfe_isr(int irq, void *dev_id)
* the CCDC memory address
*/
if (field == V4L2_FIELD_SEQ_TB) {
- addr =
- videobuf_to_dma_contig(vpfe_dev->cur_frm);
- addr += vpfe_dev->field_off;
- ccdc_dev->hw_ops.setfbaddr(addr);
+ vpfe_schedule_bottom_field(vpfe_dev);
}
- return IRQ_HANDLED;
+ goto clear_intr;
}
/*
* if one field is just being captured configure
@@ -624,6 +635,10 @@ static irqreturn_t vpfe_isr(int irq, void *dev_id)
*/
vpfe_dev->field_id = fid;
}
+clear_intr:
+ if (vpfe_dev->cfg->clr_intr)
+ vpfe_dev->cfg->clr_intr(irq);
+
return IRQ_HANDLED;
}
@@ -635,8 +650,11 @@ static irqreturn_t vdint1_isr(int irq, void *dev_id)
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "\nInside vdint1_isr...\n");
/* if streaming not started, don't do anything */
- if (!vpfe_dev->started)
+ if (!vpfe_dev->started) {
+ if (vpfe_dev->cfg->clr_intr)
+ vpfe_dev->cfg->clr_intr(irq);
return IRQ_HANDLED;
+ }
spin_lock(&vpfe_dev->dma_queue_lock);
if ((vpfe_dev->fmt.fmt.pix.field == V4L2_FIELD_NONE) &&
@@ -644,6 +662,10 @@ static irqreturn_t vdint1_isr(int irq, void *dev_id)
vpfe_dev->cur_frm == vpfe_dev->next_frm)
vpfe_schedule_next_buffer(vpfe_dev);
spin_unlock(&vpfe_dev->dma_queue_lock);
+
+ if (vpfe_dev->cfg->clr_intr)
+ vpfe_dev->cfg->clr_intr(irq);
+
return IRQ_HANDLED;
}
@@ -714,7 +736,7 @@ static int vpfe_release(struct file *file)
/* Decrement device usrs counter */
vpfe_dev->usrs--;
/* Close the priority */
- v4l2_prio_close(&vpfe_dev->prio, &fh->prio);
+ v4l2_prio_close(&vpfe_dev->prio, fh->prio);
/* If this is the last file handle */
if (!vpfe_dev->usrs) {
vpfe_dev->initialized = 0;
@@ -1218,7 +1240,10 @@ static int vpfe_videobuf_setup(struct videobuf_queue *vq,
struct vpfe_device *vpfe_dev = fh->vpfe_dev;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_setup\n");
- *size = config_params.device_bufsize;
+ *size = vpfe_dev->fmt.fmt.pix.sizeimage;
+ if (vpfe_dev->memory == V4L2_MEMORY_MMAP &&
+ vpfe_dev->fmt.fmt.pix.sizeimage > config_params.device_bufsize)
+ *size = config_params.device_bufsize;
if (*count < config_params.min_numbuffers)
*count = config_params.min_numbuffers;
@@ -1233,6 +1258,8 @@ static int vpfe_videobuf_prepare(struct videobuf_queue *vq,
{
struct vpfe_fh *fh = vq->priv_data;
struct vpfe_device *vpfe_dev = fh->vpfe_dev;
+ unsigned long addr;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_prepare\n");
@@ -1242,8 +1269,18 @@ static int vpfe_videobuf_prepare(struct videobuf_queue *vq,
vb->height = vpfe_dev->fmt.fmt.pix.height;
vb->size = vpfe_dev->fmt.fmt.pix.sizeimage;
vb->field = field;
+
+ ret = videobuf_iolock(vq, vb, NULL);;
+ if (ret < 0)
+ return ret;
+
+ addr = videobuf_to_dma_contig(vb);
+ /* Make sure user addresses are aligned to 32 bytes */
+ if (!ALIGN(addr, 32))
+ return -EINVAL;
+
+ vb->state = VIDEOBUF_PREPARED;
}
- vb->state = VIDEOBUF_PREPARED;
return 0;
}
@@ -1311,13 +1348,6 @@ static int vpfe_reqbufs(struct file *file, void *priv,
return -EINVAL;
}
- if (V4L2_MEMORY_USERPTR == req_buf->memory) {
- /* we don't support user ptr IO */
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_reqbufs:"
- " USERPTR IO not supported\n");
- return -EINVAL;
- }
-
ret = mutex_lock_interruptible(&vpfe_dev->lock);
if (ret)
return ret;
@@ -1831,7 +1861,6 @@ static __init int vpfe_probe(struct platform_device *pdev)
goto probe_free_dev_mem;
}
- mutex_lock(&ccdc_lock);
/* Allocate memory for ccdc configuration */
ccdc_cfg = kmalloc(sizeof(struct ccdc_config), GFP_KERNEL);
if (NULL == ccdc_cfg) {
@@ -1840,6 +1869,8 @@ static __init int vpfe_probe(struct platform_device *pdev)
goto probe_free_lock;
}
+ mutex_lock(&ccdc_lock);
+
strncpy(ccdc_cfg->name, vpfe_cfg->ccdc, 32);
/* Get VINT0 irq resource */
res1 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -2015,18 +2046,14 @@ static int __devexit vpfe_remove(struct platform_device *pdev)
return 0;
}
-static int
-vpfe_suspend(struct device *dev)
+static int vpfe_suspend(struct device *dev)
{
- /* add suspend code here later */
- return -1;
+ return 0;
}
-static int
-vpfe_resume(struct device *dev)
+static int vpfe_resume(struct device *dev)
{
- /* add resume code here later */
- return -1;
+ return 0;
}
static const struct dev_pm_ops vpfe_dev_pm_ops = {
diff --git a/drivers/media/video/davinci/vpif_capture.c b/drivers/media/video/davinci/vpif_capture.c
index 2e5a7fb2d0c..a7f48b53d3f 100644
--- a/drivers/media/video/davinci/vpif_capture.c
+++ b/drivers/media/video/davinci/vpif_capture.c
@@ -869,7 +869,7 @@ static int vpif_release(struct file *filep)
mutex_unlock(&common->lock);
/* Close the priority */
- v4l2_prio_close(&ch->prio, &fh->prio);
+ v4l2_prio_close(&ch->prio, fh->prio);
if (fh->initialized)
ch->initialized = 0;
@@ -1444,7 +1444,7 @@ static int vpif_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
}
}
- ret = v4l2_prio_check(&ch->prio, &fh->prio);
+ ret = v4l2_prio_check(&ch->prio, fh->prio);
if (0 != ret)
return ret;
@@ -1554,7 +1554,7 @@ static int vpif_s_input(struct file *file, void *priv, unsigned int index)
}
}
- ret = v4l2_prio_check(&ch->prio, &fh->prio);
+ ret = v4l2_prio_check(&ch->prio, fh->prio);
if (0 != ret)
return ret;
@@ -1710,7 +1710,7 @@ static int vpif_s_fmt_vid_cap(struct file *file, void *priv,
}
}
- ret = v4l2_prio_check(&ch->prio, &fh->prio);
+ ret = v4l2_prio_check(&ch->prio, fh->prio);
if (0 != ret)
return ret;
diff --git a/drivers/media/video/davinci/vpif_display.c b/drivers/media/video/davinci/vpif_display.c
index 13c3a1b9776..da07607cbc5 100644
--- a/drivers/media/video/davinci/vpif_display.c
+++ b/drivers/media/video/davinci/vpif_display.c
@@ -384,7 +384,7 @@ static int vpif_get_std_info(struct channel_obj *ch)
int index;
std_info->stdid = vid_ch->stdid;
- if (!std_info)
+ if (!std_info->stdid)
return -1;
for (index = 0; index < ARRAY_SIZE(ch_params); index++) {
@@ -671,7 +671,7 @@ static int vpif_release(struct file *filep)
ch->initialized = 0;
/* Close the priority */
- v4l2_prio_close(&ch->prio, &fh->prio);
+ v4l2_prio_close(&ch->prio, fh->prio);
filep->private_data = NULL;
fh->initialized = 0;
kfree(fh);
@@ -753,7 +753,7 @@ static int vpif_s_fmt_vid_out(struct file *file, void *priv,
}
/* Check for the priority */
- ret = v4l2_prio_check(&ch->prio, &fh->prio);
+ ret = v4l2_prio_check(&ch->prio, fh->prio);
if (0 != ret)
return ret;
fh->initialized = 1;
diff --git a/drivers/media/video/em28xx/em28xx-audio.c b/drivers/media/video/em28xx/em28xx-audio.c
index bd783387b37..e182abf476c 100644
--- a/drivers/media/video/em28xx/em28xx-audio.c
+++ b/drivers/media/video/em28xx/em28xx-audio.c
@@ -491,7 +491,7 @@ static int em28xx_audio_init(struct em28xx *dev)
strcpy(pcm->name, "Empia 28xx Capture");
snd_card_set_dev(card, &dev->udev->dev);
- strcpy(card->driver, "Empia Em28xx Audio");
+ strcpy(card->driver, "Em28xx-Audio");
strcpy(card->shortname, "Em28xx Audio");
strcpy(card->longname, "Empia Em28xx Audio");
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index b0fb0833771..3a4fd851451 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -602,7 +602,7 @@ struct em28xx_board em28xx_boards[] = {
.name = "Gadmei UTV330+",
.tuner_type = TUNER_TNF_5335MF,
.tda9887_conf = TDA9887_PRESENT,
- .ir_codes = &ir_codes_gadmei_rm008z_table,
+ .ir_codes = RC_MAP_GADMEI_RM008Z,
.decoder = EM28XX_SAA711X,
.xclk = EM28XX_XCLK_FREQUENCY_12MHZ,
.input = { {
@@ -681,6 +681,20 @@ struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_LINE_IN,
} },
},
+ [EM2860_BOARD_TVP5150_REFERENCE_DESIGN] = {
+ .name = "EM2860/TVP5150 Reference Design",
+ .tuner_type = TUNER_ABSENT, /* Capture only device */
+ .decoder = EM28XX_TVP5150,
+ .input = { {
+ .type = EM28XX_VMUX_COMPOSITE1,
+ .vmux = TVP5150_COMPOSITE1,
+ .amux = EM28XX_AMUX_LINE_IN,
+ }, {
+ .type = EM28XX_VMUX_SVIDEO,
+ .vmux = TVP5150_SVIDEO,
+ .amux = EM28XX_AMUX_LINE_IN,
+ } },
+ },
[EM2861_BOARD_PLEXTOR_PX_TV100U] = {
.name = "Plextor ConvertX PX-TV100U",
.tuner_type = TUNER_TNF_5335MF,
@@ -777,7 +791,7 @@ struct em28xx_board em28xx_boards[] = {
.mts_firmware = 1,
.has_dvb = 1,
.dvb_gpio = hauppauge_wintv_hvr_900_digital,
- .ir_codes = &ir_codes_hauppauge_new_table,
+ .ir_codes = RC_MAP_HAUPPAUGE_NEW,
.decoder = EM28XX_TVP5150,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
@@ -802,7 +816,7 @@ struct em28xx_board em28xx_boards[] = {
.tuner_type = TUNER_XC2028,
.tuner_gpio = default_tuner_gpio,
.mts_firmware = 1,
- .ir_codes = &ir_codes_hauppauge_new_table,
+ .ir_codes = RC_MAP_HAUPPAUGE_NEW,
.decoder = EM28XX_TVP5150,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
@@ -828,7 +842,7 @@ struct em28xx_board em28xx_boards[] = {
.mts_firmware = 1,
.has_dvb = 1,
.dvb_gpio = hauppauge_wintv_hvr_900_digital,
- .ir_codes = &ir_codes_hauppauge_new_table,
+ .ir_codes = RC_MAP_HAUPPAUGE_NEW,
.decoder = EM28XX_TVP5150,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
@@ -854,7 +868,7 @@ struct em28xx_board em28xx_boards[] = {
.mts_firmware = 1,
.has_dvb = 1,
.dvb_gpio = hauppauge_wintv_hvr_900_digital,
- .ir_codes = &ir_codes_rc5_hauppauge_new_table,
+ .ir_codes = RC_MAP_RC5_HAUPPAUGE_NEW,
.decoder = EM28XX_TVP5150,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
@@ -880,7 +894,7 @@ struct em28xx_board em28xx_boards[] = {
.mts_firmware = 1,
.has_dvb = 1,
.dvb_gpio = hauppauge_wintv_hvr_900_digital,
- .ir_codes = &ir_codes_pinnacle_pctv_hd_table,
+ .ir_codes = RC_MAP_PINNACLE_PCTV_HD,
.decoder = EM28XX_TVP5150,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
@@ -906,7 +920,7 @@ struct em28xx_board em28xx_boards[] = {
.mts_firmware = 1,
.has_dvb = 1,
.dvb_gpio = hauppauge_wintv_hvr_900_digital,
- .ir_codes = &ir_codes_ati_tv_wonder_hd_600_table,
+ .ir_codes = RC_MAP_ATI_TV_WONDER_HD_600,
.decoder = EM28XX_TVP5150,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
@@ -932,7 +946,7 @@ struct em28xx_board em28xx_boards[] = {
.decoder = EM28XX_TVP5150,
.has_dvb = 1,
.dvb_gpio = default_digital,
- .ir_codes = &ir_codes_terratec_cinergy_xs_table,
+ .ir_codes = RC_MAP_TERRATEC_CINERGY_XS,
.xclk = EM28XX_XCLK_FREQUENCY_12MHZ, /* NEC IR */
.input = { {
.type = EM28XX_VMUX_TELEVISION,
@@ -1282,7 +1296,7 @@ struct em28xx_board em28xx_boards[] = {
.decoder = EM28XX_SAA711X,
.has_dvb = 1,
.dvb_gpio = em2882_kworld_315u_digital,
- .ir_codes = &ir_codes_kworld_315u_table,
+ .ir_codes = RC_MAP_KWORLD_315U,
.xclk = EM28XX_XCLK_FREQUENCY_12MHZ,
.i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE,
/* Analog mode - still not ready */
@@ -1404,10 +1418,14 @@ struct em28xx_board em28xx_boards[] = {
},
[EM2882_BOARD_KWORLD_VS_DVBT] = {
.name = "Kworld VS-DVB-T 323UR",
- .valid = EM28XX_BOARD_NOT_VALIDATED,
.tuner_type = TUNER_XC2028,
.tuner_gpio = default_tuner_gpio,
.decoder = EM28XX_TVP5150,
+ .mts_firmware = 1,
+ .has_dvb = 1,
+ .dvb_gpio = kworld_330u_digital,
+ .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, /* NEC IR */
+ .ir_codes = RC_MAP_KWORLD_315U,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
.vmux = TVP5150_COMPOSITE0,
@@ -1430,7 +1448,7 @@ struct em28xx_board em28xx_boards[] = {
.decoder = EM28XX_TVP5150,
.has_dvb = 1,
.dvb_gpio = hauppauge_wintv_hvr_900_digital,
- .ir_codes = &ir_codes_terratec_cinergy_xs_table,
+ .ir_codes = RC_MAP_TERRATEC_CINERGY_XS,
.xclk = EM28XX_XCLK_FREQUENCY_12MHZ,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
@@ -1523,7 +1541,7 @@ struct em28xx_board em28xx_boards[] = {
.mts_firmware = 1,
.decoder = EM28XX_TVP5150,
.tuner_gpio = default_tuner_gpio,
- .ir_codes = &ir_codes_kaiomy_table,
+ .ir_codes = RC_MAP_KAIOMY,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
.vmux = TVP5150_COMPOSITE0,
@@ -1623,7 +1641,7 @@ struct em28xx_board em28xx_boards[] = {
.mts_firmware = 1,
.has_dvb = 1,
.dvb_gpio = evga_indtube_digital,
- .ir_codes = &ir_codes_evga_indtube_table,
+ .ir_codes = RC_MAP_EVGA_INDTUBE,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
.vmux = TVP5150_COMPOSITE0,
@@ -1672,6 +1690,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM2820_BOARD_UNKNOWN },
{ USB_DEVICE(0xeb1a, 0x2862),
.driver_info = EM2820_BOARD_UNKNOWN },
+ { USB_DEVICE(0xeb1a, 0x2863),
+ .driver_info = EM2820_BOARD_UNKNOWN },
{ USB_DEVICE(0xeb1a, 0x2870),
.driver_info = EM2820_BOARD_UNKNOWN },
{ USB_DEVICE(0xeb1a, 0x2881),
@@ -1792,6 +1812,7 @@ static struct em28xx_hash_table em28xx_i2c_hash[] = {
{0xb06a32c3, EM2800_BOARD_TERRATEC_CINERGY_200, TUNER_LG_PAL_NEW_TAPC},
{0xf51200e3, EM2800_BOARD_VGEAR_POCKETTV, TUNER_LG_PAL_NEW_TAPC},
{0x1ba50080, EM2860_BOARD_SAA711X_REFERENCE_DESIGN, TUNER_ABSENT},
+ {0x77800080, EM2860_BOARD_TVP5150_REFERENCE_DESIGN, TUNER_ABSENT},
{0xc51200e3, EM2820_BOARD_GADMEI_TVR200, TUNER_LG_PAL_NEW_TAPC},
{0x4ba50080, EM2861_BOARD_GADMEI_UTV330PLUS, TUNER_TNF_5335MF},
};
@@ -2138,6 +2159,7 @@ static void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl)
break;
case EM2883_BOARD_KWORLD_HYBRID_330U:
case EM2882_BOARD_DIKOM_DK300:
+ case EM2882_BOARD_KWORLD_VS_DVBT:
ctl->demod = XC3028_FE_CHINA;
ctl->fname = XC2028_DEFAULT_FIRMWARE;
break;
@@ -2313,21 +2335,21 @@ void em28xx_register_i2c_ir(struct em28xx *dev)
switch (dev->model) {
case EM2800_BOARD_TERRATEC_CINERGY_200:
case EM2820_BOARD_TERRATEC_CINERGY_250:
- dev->init_data.ir_codes = &ir_codes_em_terratec_table;
+ dev->init_data.ir_codes = RC_MAP_EM_TERRATEC;
dev->init_data.get_key = em28xx_get_key_terratec;
dev->init_data.name = "i2c IR (EM28XX Terratec)";
break;
case EM2820_BOARD_PINNACLE_USB_2:
- dev->init_data.ir_codes = &ir_codes_pinnacle_grey_table;
+ dev->init_data.ir_codes = RC_MAP_PINNACLE_GREY;
dev->init_data.get_key = em28xx_get_key_pinnacle_usb_grey;
dev->init_data.name = "i2c IR (EM28XX Pinnacle PCTV)";
break;
case EM2820_BOARD_HAUPPAUGE_WINTV_USB_2:
- dev->init_data.ir_codes = &ir_codes_rc5_hauppauge_new_table;
+ dev->init_data.ir_codes = RC_MAP_RC5_HAUPPAUGE_NEW;
dev->init_data.get_key = em28xx_get_key_em_haup;
dev->init_data.name = "i2c IR (EM2840 Hauppauge)";
case EM2820_BOARD_LEADTEK_WINFAST_USBII_DELUXE:
- dev->init_data.ir_codes = &ir_codes_winfast_usbii_deluxe_table;;
+ dev->init_data.ir_codes = RC_MAP_WINFAST_USBII_DELUXE;;
dev->init_data.get_key = em28xx_get_key_winfast_usbii_deluxe;
dev->init_data.name = "i2c IR (EM2820 Winfast TV USBII Deluxe)";
break;
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index a41cc556677..331e1cac427 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -782,11 +782,15 @@ int em28xx_resolution_set(struct em28xx *dev)
em28xx_accumulator_set(dev, 1, (width - 4) >> 2, 1, (height - 4) >> 2);
- /* If we don't set the start position to 4 in VBI mode, we end up
- with line 21 being YUYV encoded instead of being in 8-bit
- greyscale */
+ /* If we don't set the start position to 2 in VBI mode, we end up
+ with line 20/21 being YUYV encoded instead of being in 8-bit
+ greyscale. The core of the issue is that line 21 (and line 23 for
+ PAL WSS) are inside of active video region, and as a result they
+ get the pixelformatting associated with that area. So by cropping
+ it out, we end up with the same format as the rest of the VBI
+ region */
if (em28xx_vbi_supported(dev) == 1)
- em28xx_capture_area_set(dev, 0, 4, width >> 2, height >> 2);
+ em28xx_capture_area_set(dev, 0, 2, width >> 2, height >> 2);
else
em28xx_capture_area_set(dev, 0, 0, width >> 2, height >> 2);
@@ -966,7 +970,7 @@ void em28xx_uninit_isoc(struct em28xx *dev)
usb_unlink_urb(urb);
if (dev->isoc_ctl.transfer_buffer[i]) {
- usb_buffer_free(dev->udev,
+ usb_free_coherent(dev->udev,
urb->transfer_buffer_length,
dev->isoc_ctl.transfer_buffer[i],
urb->transfer_dma);
@@ -1041,7 +1045,7 @@ int em28xx_init_isoc(struct em28xx *dev, int max_packets,
}
dev->isoc_ctl.urb[i] = urb;
- dev->isoc_ctl.transfer_buffer[i] = usb_buffer_alloc(dev->udev,
+ dev->isoc_ctl.transfer_buffer[i] = usb_alloc_coherent(dev->udev,
sb_size, GFP_KERNEL, &urb->transfer_dma);
if (!dev->isoc_ctl.transfer_buffer[i]) {
em28xx_err("unable to allocate %i bytes for transfer"
@@ -1174,21 +1178,18 @@ void em28xx_add_into_devlist(struct em28xx *dev)
*/
static LIST_HEAD(em28xx_extension_devlist);
-static DEFINE_MUTEX(em28xx_extension_devlist_lock);
int em28xx_register_extension(struct em28xx_ops *ops)
{
struct em28xx *dev = NULL;
mutex_lock(&em28xx_devlist_mutex);
- mutex_lock(&em28xx_extension_devlist_lock);
list_add_tail(&ops->next, &em28xx_extension_devlist);
list_for_each_entry(dev, &em28xx_devlist, devlist) {
if (dev)
ops->init(dev);
}
printk(KERN_INFO "Em28xx: Initialized (%s) extension\n", ops->name);
- mutex_unlock(&em28xx_extension_devlist_lock);
mutex_unlock(&em28xx_devlist_mutex);
return 0;
}
@@ -1204,10 +1205,8 @@ void em28xx_unregister_extension(struct em28xx_ops *ops)
ops->fini(dev);
}
- mutex_lock(&em28xx_extension_devlist_lock);
printk(KERN_INFO "Em28xx: Removed (%s) extension\n", ops->name);
list_del(&ops->next);
- mutex_unlock(&em28xx_extension_devlist_lock);
mutex_unlock(&em28xx_devlist_mutex);
}
EXPORT_SYMBOL(em28xx_unregister_extension);
@@ -1216,26 +1215,26 @@ void em28xx_init_extension(struct em28xx *dev)
{
struct em28xx_ops *ops = NULL;
- mutex_lock(&em28xx_extension_devlist_lock);
+ mutex_lock(&em28xx_devlist_mutex);
if (!list_empty(&em28xx_extension_devlist)) {
list_for_each_entry(ops, &em28xx_extension_devlist, next) {
if (ops->init)
ops->init(dev);
}
}
- mutex_unlock(&em28xx_extension_devlist_lock);
+ mutex_unlock(&em28xx_devlist_mutex);
}
void em28xx_close_extension(struct em28xx *dev)
{
struct em28xx_ops *ops = NULL;
- mutex_lock(&em28xx_extension_devlist_lock);
+ mutex_lock(&em28xx_devlist_mutex);
if (!list_empty(&em28xx_extension_devlist)) {
list_for_each_entry(ops, &em28xx_extension_devlist, next) {
if (ops->fini)
ops->fini(dev);
}
}
- mutex_unlock(&em28xx_extension_devlist_lock);
+ mutex_unlock(&em28xx_devlist_mutex);
}
diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c
index bcd3c371009..cf1d8c3655f 100644
--- a/drivers/media/video/em28xx/em28xx-dvb.c
+++ b/drivers/media/video/em28xx/em28xx-dvb.c
@@ -467,6 +467,7 @@ static int dvb_init(struct em28xx *dev)
}
dev->dvb = dvb;
+ mutex_lock(&dev->lock);
em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
/* init frontend */
switch (dev->model) {
@@ -506,6 +507,7 @@ static int dvb_init(struct em28xx *dev)
case EM2880_BOARD_TERRATEC_HYBRID_XS_FR:
case EM2881_BOARD_PINNACLE_HYBRID_PRO:
case EM2882_BOARD_DIKOM_DK300:
+ case EM2882_BOARD_KWORLD_VS_DVBT:
dvb->frontend = dvb_attach(zl10353_attach,
&em28xx_zl10353_xc3028_no_i2c_gate,
&dev->i2c_adap);
@@ -589,15 +591,16 @@ static int dvb_init(struct em28xx *dev)
if (result < 0)
goto out_free;
- em28xx_set_mode(dev, EM28XX_SUSPEND);
em28xx_info("Successfully loaded em28xx-dvb\n");
- return 0;
+ret:
+ em28xx_set_mode(dev, EM28XX_SUSPEND);
+ mutex_unlock(&dev->lock);
+ return result;
out_free:
- em28xx_set_mode(dev, EM28XX_SUSPEND);
kfree(dvb);
dev->dvb = NULL;
- return result;
+ goto ret;
}
static int dvb_fini(struct em28xx *dev)
diff --git a/drivers/media/video/em28xx/em28xx-input.c b/drivers/media/video/em28xx/em28xx-input.c
index 20a0001e888..5c3fd9411b1 100644
--- a/drivers/media/video/em28xx/em28xx-input.c
+++ b/drivers/media/video/em28xx/em28xx-input.c
@@ -39,6 +39,8 @@ static unsigned int ir_debug;
module_param(ir_debug, int, 0644);
MODULE_PARM_DESC(ir_debug, "enable debug messages [IR]");
+#define MODULE_NAME "em28xx"
+
#define i2cdprintk(fmt, arg...) \
if (ir_debug) { \
printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg); \
@@ -360,14 +362,20 @@ static void em28xx_ir_work(struct work_struct *work)
schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling));
}
-static void em28xx_ir_start(struct em28xx_IR *ir)
+static int em28xx_ir_start(void *priv)
{
+ struct em28xx_IR *ir = priv;
+
INIT_DELAYED_WORK(&ir->work, em28xx_ir_work);
schedule_delayed_work(&ir->work, 0);
+
+ return 0;
}
-static void em28xx_ir_stop(struct em28xx_IR *ir)
+static void em28xx_ir_stop(void *priv)
{
+ struct em28xx_IR *ir = priv;
+
cancel_delayed_work_sync(&ir->work);
}
@@ -380,7 +388,6 @@ int em28xx_ir_change_protocol(void *priv, u64 ir_type)
/* Adjust xclk based o IR table for RC5/NEC tables */
- dev->board.ir_codes->ir_type = IR_TYPE_OTHER;
if (ir_type == IR_TYPE_RC5) {
dev->board.xclk |= EM28XX_XCLK_IR_RC5_MODE;
ir->full_code = 1;
@@ -388,11 +395,9 @@ int em28xx_ir_change_protocol(void *priv, u64 ir_type)
dev->board.xclk &= ~EM28XX_XCLK_IR_RC5_MODE;
ir_config = EM2874_IR_NEC;
ir->full_code = 1;
- } else
+ } else if (ir_type != IR_TYPE_UNKNOWN)
rc = -EINVAL;
- dev->board.ir_codes->ir_type = ir_type;
-
em28xx_write_reg_bits(dev, EM28XX_R0F_XCLK, dev->board.xclk,
EM28XX_XCLK_IR_RC5_MODE);
@@ -443,6 +448,13 @@ int em28xx_ir_init(struct em28xx *dev)
ir->props.allowed_protos = IR_TYPE_RC5 | IR_TYPE_NEC;
ir->props.priv = ir;
ir->props.change_protocol = em28xx_ir_change_protocol;
+ ir->props.open = em28xx_ir_start;
+ ir->props.close = em28xx_ir_stop;
+
+ /* By default, keep protocol field untouched */
+ err = em28xx_ir_change_protocol(ir, IR_TYPE_UNKNOWN);
+ if (err)
+ goto err_out_free;
/* This is how often we ask the chip for IR information */
ir->polling = 100; /* ms */
@@ -455,7 +467,6 @@ int em28xx_ir_init(struct em28xx *dev)
strlcat(ir->phys, "/input0", sizeof(ir->phys));
/* Set IR protocol */
- em28xx_ir_change_protocol(ir, dev->board.ir_codes->ir_type);
err = ir_input_init(input_dev, &ir->ir, IR_TYPE_OTHER);
if (err < 0)
goto err_out_free;
@@ -470,17 +481,15 @@ int em28xx_ir_init(struct em28xx *dev)
input_dev->dev.parent = &dev->udev->dev;
- em28xx_ir_start(ir);
/* all done */
err = ir_input_register(ir->input, dev->board.ir_codes,
- &ir->props);
+ &ir->props, MODULE_NAME);
if (err)
goto err_out_stop;
return 0;
err_out_stop:
- em28xx_ir_stop(ir);
dev->ir = NULL;
err_out_free:
kfree(ir);
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index 0fe20110bfd..20090e34173 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -203,12 +203,6 @@ static void em28xx_copy_video(struct em28xx *dev,
if (dma_q->pos + len > buf->vb.size)
len = buf->vb.size - dma_q->pos;
- if (p[0] != 0x88 && p[0] != 0x22) {
- em28xx_isocdbg("frame is not complete\n");
- len += 4;
- } else
- p += 4;
-
startread = p;
remain = len;
@@ -309,14 +303,6 @@ static void em28xx_copy_vbi(struct em28xx *dev,
if (dma_q->pos + len > buf->vb.size)
len = buf->vb.size - dma_q->pos;
- if ((p[0] == 0x33 && p[1] == 0x95) ||
- (p[0] == 0x88 && p[1] == 0x88)) {
- /* Header field, advance past it */
- p += 4;
- } else {
- len += 4;
- }
-
startread = p;
startwrite = outp + dma_q->pos;
@@ -507,8 +493,15 @@ static inline int em28xx_isoc_copy(struct em28xx *dev, struct urb *urb)
dma_q->pos = 0;
}
- if (buf != NULL)
+ if (buf != NULL) {
+ if (p[0] != 0x88 && p[0] != 0x22) {
+ em28xx_isocdbg("frame is not complete\n");
+ len += 4;
+ } else {
+ p += 4;
+ }
em28xx_copy_video(dev, dma_q, buf, p, outp, len);
+ }
}
return rc;
}
@@ -555,8 +548,7 @@ static inline int em28xx_isoc_copy_vbi(struct em28xx *dev, struct urb *urb)
continue;
}
- len = urb->iso_frame_desc[i].actual_length - 4;
-
+ len = urb->iso_frame_desc[i].actual_length;
if (urb->iso_frame_desc[i].actual_length <= 0) {
/* em28xx_isocdbg("packet %d is empty",i); - spammy */
continue;
@@ -577,6 +569,17 @@ static inline int em28xx_isoc_copy_vbi(struct em28xx *dev, struct urb *urb)
dev->vbi_read = 0;
em28xx_isocdbg("VBI START HEADER!!!\n");
dev->cur_field = p[2];
+ p += 4;
+ len -= 4;
+ } else if (p[0] == 0x88 && p[1] == 0x88 &&
+ p[2] == 0x88 && p[3] == 0x88) {
+ /* continuation */
+ p += 4;
+ len -= 4;
+ } else if (p[0] == 0x22 && p[1] == 0x5a) {
+ /* start video */
+ p += 4;
+ len -= 4;
}
vbi_size = dev->vbi_width * dev->vbi_height;
@@ -631,9 +634,6 @@ static inline int em28xx_isoc_copy_vbi(struct em28xx *dev, struct urb *urb)
if (dev->capture_type == 1) {
dev->capture_type = 2;
- em28xx_isocdbg("Video frame %d, length=%i, %s\n", p[2],
- len, (p[2] & 1) ? "odd" : "even");
-
if (dev->progressive || !(dev->cur_field & 1)) {
if (buf != NULL)
buffer_filled(dev, dma_q, buf);
@@ -652,8 +652,25 @@ static inline int em28xx_isoc_copy_vbi(struct em28xx *dev, struct urb *urb)
dma_q->pos = 0;
}
- if (buf != NULL && dev->capture_type == 2)
- em28xx_copy_video(dev, dma_q, buf, p, outp, len);
+
+ if (buf != NULL && dev->capture_type == 2) {
+ if (len > 4 && p[0] == 0x88 && p[1] == 0x88 &&
+ p[2] == 0x88 && p[3] == 0x88) {
+ p += 4;
+ len -= 4;
+ }
+ if (len > 4 && p[0] == 0x22 && p[1] == 0x5a) {
+ em28xx_isocdbg("Video frame %d, len=%i, %s\n",
+ p[2], len, (p[2] & 1) ?
+ "odd" : "even");
+ p += 4;
+ len -= 4;
+ }
+
+ if (len > 0)
+ em28xx_copy_video(dev, dma_q, buf, p, outp,
+ len);
+ }
}
return rc;
}
@@ -1483,6 +1500,7 @@ static int vidioc_g_tuner(struct file *file, void *priv,
return -EINVAL;
strcpy(t->name, "Tuner");
+ t->type = V4L2_TUNER_ANALOG_TV;
mutex_lock(&dev->lock);
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, g_tuner, t);
@@ -1814,7 +1832,7 @@ static int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *priv,
mutex_lock(&dev->lock);
f->fmt.sliced.service_set = 0;
- v4l2_device_call_all(&dev->v4l2_dev, 0, video, g_fmt, f);
+ v4l2_device_call_all(&dev->v4l2_dev, 0, vbi, g_sliced_fmt, &f->fmt.sliced);
if (f->fmt.sliced.service_set == 0)
rc = -EINVAL;
@@ -1836,7 +1854,7 @@ static int vidioc_try_set_sliced_vbi_cap(struct file *file, void *priv,
return rc;
mutex_lock(&dev->lock);
- v4l2_device_call_all(&dev->v4l2_dev, 0, video, g_fmt, f);
+ v4l2_device_call_all(&dev->v4l2_dev, 0, vbi, g_sliced_fmt, &f->fmt.sliced);
mutex_unlock(&dev->lock);
if (f->fmt.sliced.service_set == 0)
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index ba6fe5daff8..b252d1b1b2a 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -68,6 +68,7 @@
#define EM2820_BOARD_HERCULES_SMART_TV_USB2 26
#define EM2820_BOARD_PINNACLE_USB_2_FM1216ME 27
#define EM2820_BOARD_LEADTEK_WINFAST_USBII_DELUXE 28
+#define EM2860_BOARD_TVP5150_REFERENCE_DESIGN 29
#define EM2820_BOARD_VIDEOLOGY_20K14XUSB 30
#define EM2821_BOARD_USBGEAR_VD204 31
#define EM2821_BOARD_SUPERCOMP_USB_2 32
@@ -140,10 +141,10 @@
#define EM28XX_NUM_BUFS 5
/* number of packets for each buffer
- windows requests only 40 packets .. so we better do the same
+ windows requests only 64 packets .. so we better do the same
this is what I found out for all alternate numbers there!
*/
-#define EM28XX_NUM_PACKETS 40
+#define EM28XX_NUM_PACKETS 64
#define EM28XX_INTERLACED_DEFAULT 1
@@ -411,7 +412,7 @@ struct em28xx_board {
struct em28xx_input input[MAX_EM28XX_INPUT];
struct em28xx_input radio;
- struct ir_scancode_table *ir_codes;
+ char *ir_codes;
};
struct em28xx_eeprom {
diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c
index e6c23d50986..a5cfc76b40b 100644
--- a/drivers/media/video/et61x251/et61x251_core.c
+++ b/drivers/media/video/et61x251/et61x251_core.c
@@ -1713,7 +1713,7 @@ et61x251_vidioc_s_ctrl(struct et61x251_device* cam, void __user * arg)
if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
return -EFAULT;
- for (i = 0; i < ARRAY_SIZE(s->qctrl); i++)
+ for (i = 0; i < ARRAY_SIZE(s->qctrl); i++) {
if (ctrl.id == s->qctrl[i].id) {
if (s->qctrl[i].flags & V4L2_CTRL_FLAG_DISABLED)
return -EINVAL;
@@ -1723,7 +1723,9 @@ et61x251_vidioc_s_ctrl(struct et61x251_device* cam, void __user * arg)
ctrl.value -= ctrl.value % s->qctrl[i].step;
break;
}
-
+ }
+ if (i == ARRAY_SIZE(s->qctrl))
+ return -EINVAL;
if ((err = s->set_ctrl(cam, &ctrl)))
return err;
diff --git a/drivers/media/video/font.h b/drivers/media/video/font.h
deleted file mode 100644
index 8b1fecc3759..00000000000
--- a/drivers/media/video/font.h
+++ /dev/null
@@ -1,407 +0,0 @@
-static unsigned char rom8x16_bits[] = {
-/* Character 0 (0x30):
- ht=16, width=8
- +--------+
- | |
- | |
- | ***** |
- |** ** |
- |** ** |
- |** *** |
- |** **** |
- |**** ** |
- |*** ** |
- |** ** |
- |** ** |
- | ***** |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0x7c,
-0xc6,
-0xc6,
-0xce,
-0xde,
-0xf6,
-0xe6,
-0xc6,
-0xc6,
-0x7c,
-0x00,
-0x00,
-0x00,
-0x00,
-
-/* Character 1 (0x31):
- ht=16, width=8
- +--------+
- | |
- | |
- | ** |
- | **** |
- | ** |
- | ** |
- | ** |
- | ** |
- | ** |
- | ** |
- | ** |
- | ****** |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0x18,
-0x78,
-0x18,
-0x18,
-0x18,
-0x18,
-0x18,
-0x18,
-0x18,
-0x7e,
-0x00,
-0x00,
-0x00,
-0x00,
-
-/* Character 2 (0x32):
- ht=16, width=8
- +--------+
- | |
- | |
- | ***** |
- |** ** |
- |** ** |
- | ** |
- | ** |
- | ** |
- | ** |
- | ** |
- |** ** |
- |******* |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0x7c,
-0xc6,
-0xc6,
-0x06,
-0x0c,
-0x18,
-0x30,
-0x60,
-0xc6,
-0xfe,
-0x00,
-0x00,
-0x00,
-0x00,
-
-/* Character 3 (0x33):
- ht=16, width=8
- +--------+
- | |
- | |
- | ***** |
- |** ** |
- | ** |
- | ** |
- | **** |
- | ** |
- | ** |
- | ** |
- |** ** |
- | ***** |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0x7c,
-0xc6,
-0x06,
-0x06,
-0x3c,
-0x06,
-0x06,
-0x06,
-0xc6,
-0x7c,
-0x00,
-0x00,
-0x00,
-0x00,
-
-/* Character 4 (0x34):
- ht=16, width=8
- +--------+
- | |
- | |
- | ** |
- | *** |
- | **** |
- | ** ** |
- |** ** |
- |** ** |
- |******* |
- | ** |
- | ** |
- | **** |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0x0c,
-0x1c,
-0x3c,
-0x6c,
-0xcc,
-0xcc,
-0xfe,
-0x0c,
-0x0c,
-0x1e,
-0x00,
-0x00,
-0x00,
-0x00,
-
-/* Character 5 (0x35):
- ht=16, width=8
- +--------+
- | |
- | |
- |******* |
- |** |
- |** |
- |** |
- |****** |
- | ** |
- | ** |
- | ** |
- |** ** |
- | ***** |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0xfe,
-0xc0,
-0xc0,
-0xc0,
-0xfc,
-0x06,
-0x06,
-0x06,
-0xc6,
-0x7c,
-0x00,
-0x00,
-0x00,
-0x00,
-
-/* Character 6 (0x36):
- ht=16, width=8
- +--------+
- | |
- | |
- | ***** |
- |** ** |
- |** |
- |** |
- |****** |
- |** ** |
- |** ** |
- |** ** |
- |** ** |
- | ***** |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0x7c,
-0xc6,
-0xc0,
-0xc0,
-0xfc,
-0xc6,
-0xc6,
-0xc6,
-0xc6,
-0x7c,
-0x00,
-0x00,
-0x00,
-0x00,
-
-/* Character 7 (0x37):
- ht=16, width=8
- +--------+
- | |
- | |
- |******* |
- |** ** |
- | ** |
- | ** |
- | ** |
- | ** |
- | ** |
- | ** |
- | ** |
- | ** |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0xfe,
-0xc6,
-0x06,
-0x0c,
-0x18,
-0x30,
-0x30,
-0x30,
-0x30,
-0x30,
-0x00,
-0x00,
-0x00,
-0x00,
-
-/* Character 8 (0x38):
- ht=16, width=8
- +--------+
- | |
- | |
- | ***** |
- |** ** |
- |** ** |
- |** ** |
- | ***** |
- |** ** |
- |** ** |
- |** ** |
- |** ** |
- | ***** |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0x7c,
-0xc6,
-0xc6,
-0xc6,
-0x7c,
-0xc6,
-0xc6,
-0xc6,
-0xc6,
-0x7c,
-0x00,
-0x00,
-0x00,
-0x00,
-
-/* Character 9 (0x39):
- ht=16, width=8
- +--------+
- | |
- | |
- | ***** |
- |** ** |
- |** ** |
- |** ** |
- |** ** |
- | ****** |
- | ** |
- | ** |
- |** ** |
- | ***** |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0x7c,
-0xc6,
-0xc6,
-0xc6,
-0xc6,
-0x7e,
-0x06,
-0x06,
-0xc6,
-0x7c,
-0x00,
-0x00,
-0x00,
-0x00,
-/* Character : (0x3a):
- ht=16, width=8
- +--------+
- | |
- | |
- | |
- | |
- | |
- | ** |
- | ** |
- | |
- | |
- | ** |
- | ** |
- | |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0x00,
-0x00,
-0x00,
-0x0c,
-0x0c,
-0x00,
-0x00,
-0x0c,
-0x0c,
-0x00,
-0x00,
-0x00,
-0x00,
-0x00,
-};
diff --git a/drivers/media/video/gspca/Kconfig b/drivers/media/video/gspca/Kconfig
index e0060c1f054..5d920e584de 100644
--- a/drivers/media/video/gspca/Kconfig
+++ b/drivers/media/video/gspca/Kconfig
@@ -172,12 +172,6 @@ config USB_GSPCA_SN9C20X
To compile this driver as a module, choose M here: the
module will be called gspca_sn9c20x.
-config USB_GSPCA_SN9C20X_EVDEV
- bool "Enable evdev support"
- depends on USB_GSPCA_SN9C20X && INPUT
- ---help---
- Say Y here in order to enable evdev support for sn9c20x webcam button.
-
config USB_GSPCA_SONIXB
tristate "SONIX Bayer USB Camera Driver"
depends on VIDEO_V4L2 && USB_GSPCA
diff --git a/drivers/media/video/gspca/benq.c b/drivers/media/video/gspca/benq.c
index 43ac4af8d3e..fce8d949264 100644
--- a/drivers/media/video/gspca/benq.c
+++ b/drivers/media/video/gspca/benq.c
@@ -117,13 +117,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
return -ENOMEM;
}
gspca_dev->urb[n] = urb;
- urb->transfer_buffer = usb_buffer_alloc(gspca_dev->dev,
+ urb->transfer_buffer = usb_alloc_coherent(gspca_dev->dev,
SD_PKT_SZ * SD_NPKT,
GFP_KERNEL,
&urb->transfer_dma);
if (urb->transfer_buffer == NULL) {
- err("usb_buffer_alloc failed");
+ err("usb_alloc_coherent failed");
return -ENOMEM;
}
urb->dev = gspca_dev->dev;
diff --git a/drivers/media/video/gspca/cpia1.c b/drivers/media/video/gspca/cpia1.c
index 82945ed5cbe..58b696f455b 100644
--- a/drivers/media/video/gspca/cpia1.c
+++ b/drivers/media/video/gspca/cpia1.c
@@ -374,7 +374,7 @@ static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setcomptarget(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getcomptarget(struct gspca_dev *gspca_dev, __s32 *val);
-static struct ctrl sd_ctrls[] = {
+static const struct ctrl sd_ctrls[] = {
{
{
.id = V4L2_CID_BRIGHTNESS,
@@ -861,7 +861,7 @@ static int save_camera_state(struct gspca_dev *gspca_dev)
return do_command(gspca_dev, CPIA_COMMAND_GetExposure, 0, 0, 0, 0);
}
-int command_setformat(struct gspca_dev *gspca_dev)
+static int command_setformat(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
int ret;
@@ -878,7 +878,7 @@ int command_setformat(struct gspca_dev *gspca_dev)
sd->params.roi.rowStart, sd->params.roi.rowEnd);
}
-int command_setcolourparams(struct gspca_dev *gspca_dev)
+static int command_setcolourparams(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
return do_command(gspca_dev, CPIA_COMMAND_SetColourParams,
@@ -887,7 +887,7 @@ int command_setcolourparams(struct gspca_dev *gspca_dev)
sd->params.colourParams.saturation, 0);
}
-int command_setapcor(struct gspca_dev *gspca_dev)
+static int command_setapcor(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
return do_command(gspca_dev, CPIA_COMMAND_SetApcor,
@@ -897,7 +897,7 @@ int command_setapcor(struct gspca_dev *gspca_dev)
sd->params.apcor.gain8);
}
-int command_setvloffset(struct gspca_dev *gspca_dev)
+static int command_setvloffset(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
return do_command(gspca_dev, CPIA_COMMAND_SetVLOffset,
@@ -907,7 +907,7 @@ int command_setvloffset(struct gspca_dev *gspca_dev)
sd->params.vlOffset.gain8);
}
-int command_setexposure(struct gspca_dev *gspca_dev)
+static int command_setexposure(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
int ret;
@@ -943,7 +943,7 @@ int command_setexposure(struct gspca_dev *gspca_dev)
return ret;
}
-int command_setcolourbalance(struct gspca_dev *gspca_dev)
+static int command_setcolourbalance(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -973,7 +973,7 @@ int command_setcolourbalance(struct gspca_dev *gspca_dev)
return -EINVAL;
}
-int command_setcompressiontarget(struct gspca_dev *gspca_dev)
+static int command_setcompressiontarget(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -983,7 +983,7 @@ int command_setcompressiontarget(struct gspca_dev *gspca_dev)
sd->params.compressionTarget.targetQ, 0);
}
-int command_setyuvtresh(struct gspca_dev *gspca_dev)
+static int command_setyuvtresh(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -992,7 +992,7 @@ int command_setyuvtresh(struct gspca_dev *gspca_dev)
sd->params.yuvThreshold.uvThreshold, 0, 0);
}
-int command_setcompressionparams(struct gspca_dev *gspca_dev)
+static int command_setcompressionparams(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1009,7 +1009,7 @@ int command_setcompressionparams(struct gspca_dev *gspca_dev)
sd->params.compressionParams.decimationThreshMod);
}
-int command_setcompression(struct gspca_dev *gspca_dev)
+static int command_setcompression(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1018,7 +1018,7 @@ int command_setcompression(struct gspca_dev *gspca_dev)
sd->params.compression.decimation, 0, 0);
}
-int command_setsensorfps(struct gspca_dev *gspca_dev)
+static int command_setsensorfps(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1027,7 +1027,7 @@ int command_setsensorfps(struct gspca_dev *gspca_dev)
sd->params.sensorFps.baserate, 0, 0);
}
-int command_setflickerctrl(struct gspca_dev *gspca_dev)
+static int command_setflickerctrl(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1038,7 +1038,7 @@ int command_setflickerctrl(struct gspca_dev *gspca_dev)
0);
}
-int command_setecptiming(struct gspca_dev *gspca_dev)
+static int command_setecptiming(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1046,12 +1046,12 @@ int command_setecptiming(struct gspca_dev *gspca_dev)
sd->params.ecpTiming, 0, 0, 0);
}
-int command_pause(struct gspca_dev *gspca_dev)
+static int command_pause(struct gspca_dev *gspca_dev)
{
return do_command(gspca_dev, CPIA_COMMAND_EndStreamCap, 0, 0, 0, 0);
}
-int command_resume(struct gspca_dev *gspca_dev)
+static int command_resume(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1059,7 +1059,8 @@ int command_resume(struct gspca_dev *gspca_dev)
0, sd->params.streamStartLine, 0, 0);
}
-int command_setlights(struct gspca_dev *gspca_dev)
+#if 0 /* Currently unused */
+static int command_setlights(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
int ret, p1, p2;
@@ -1078,6 +1079,7 @@ int command_setlights(struct gspca_dev *gspca_dev)
return do_command(gspca_dev, CPIA_COMMAND_WriteMCPort, 2, 0,
p1 | p2 | 0xE0, 0);
}
+#endif
static int set_flicker(struct gspca_dev *gspca_dev, int on, int apply)
{
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 222af479150..678675bb365 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -1,7 +1,7 @@
/*
* Main USB camera driver
*
- * Copyright (C) 2008-2009 Jean-Francois Moine (http://moinejf.free.fr)
+ * Copyright (C) 2008-2010 Jean-François Moine <http://moinejf.free.fr>
*
* Camera button input handling by Márton Németh
* Copyright (C) 2009-2010 Márton Németh <nm127@freemail.hu>
@@ -35,12 +35,12 @@
#include <linux/io.h>
#include <asm/page.h>
#include <linux/uaccess.h>
-#include <linux/jiffies.h>
+#include <linux/ktime.h>
#include <media/v4l2-ioctl.h>
#include "gspca.h"
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
#include <linux/input.h>
#include <linux/usb/input.h>
#endif
@@ -51,7 +51,7 @@
#error "DEF_NURBS too big"
#endif
-MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>");
+MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>");
MODULE_DESCRIPTION("GSPCA USB Camera Driver");
MODULE_LICENSE("GPL");
@@ -115,7 +115,7 @@ static const struct vm_operations_struct gspca_vm_ops = {
/*
* Input and interrupt endpoint handling functions
*/
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
static void int_irq(struct urb *urb)
{
struct gspca_dev *gspca_dev = (struct gspca_dev *) urb->context;
@@ -199,7 +199,7 @@ static int alloc_and_submit_int_urb(struct gspca_dev *gspca_dev,
void *buffer = NULL;
int ret = -EINVAL;
- buffer_len = ep->wMaxPacketSize;
+ buffer_len = le16_to_cpu(ep->wMaxPacketSize);
interval = ep->bInterval;
PDEBUG(D_PROBE, "found int in endpoint: 0x%x, "
"buffer_len=%u, interval=%u",
@@ -213,7 +213,7 @@ static int alloc_and_submit_int_urb(struct gspca_dev *gspca_dev,
goto error;
}
- buffer = usb_buffer_alloc(dev, ep->wMaxPacketSize,
+ buffer = usb_alloc_coherent(dev, buffer_len,
GFP_KERNEL, &urb->transfer_dma);
if (!buffer) {
ret = -ENOMEM;
@@ -232,10 +232,10 @@ static int alloc_and_submit_int_urb(struct gspca_dev *gspca_dev,
return ret;
error_submit:
- usb_buffer_free(dev,
- urb->transfer_buffer_length,
- urb->transfer_buffer,
- urb->transfer_dma);
+ usb_free_coherent(dev,
+ urb->transfer_buffer_length,
+ urb->transfer_buffer,
+ urb->transfer_dma);
error_buffer:
usb_free_urb(urb);
error:
@@ -272,17 +272,26 @@ static void gspca_input_destroy_urb(struct gspca_dev *gspca_dev)
if (urb) {
gspca_dev->int_urb = NULL;
usb_kill_urb(urb);
- usb_buffer_free(gspca_dev->dev,
- urb->transfer_buffer_length,
- urb->transfer_buffer,
- urb->transfer_dma);
+ usb_free_coherent(gspca_dev->dev,
+ urb->transfer_buffer_length,
+ urb->transfer_buffer,
+ urb->transfer_dma);
usb_free_urb(urb);
}
}
#else
-#define gspca_input_connect(gspca_dev) 0
-#define gspca_input_create_urb(gspca_dev)
-#define gspca_input_destroy_urb(gspca_dev)
+static inline void gspca_input_destroy_urb(struct gspca_dev *gspca_dev)
+{
+}
+
+static inline void gspca_input_create_urb(struct gspca_dev *gspca_dev)
+{
+}
+
+static inline int gspca_input_connect(struct gspca_dev *dev)
+{
+ return 0;
+}
#endif
/* get the current input frame buffer */
@@ -442,8 +451,7 @@ void gspca_frame_add(struct gspca_dev *gspca_dev,
* is not queued, discard the whole frame */
if (packet_type == FIRST_PACKET) {
frame->data_end = frame->data;
- jiffies_to_timeval(get_jiffies_64(),
- &frame->v4l2_buf.timestamp);
+ frame->v4l2_buf.timestamp = ktime_to_timeval(ktime_get());
frame->v4l2_buf.sequence = ++gspca_dev->sequence;
} else if (gspca_dev->last_packet_type == DISCARD_PACKET) {
if (packet_type == LAST_PACKET)
@@ -597,14 +605,45 @@ static void destroy_urbs(struct gspca_dev *gspca_dev)
gspca_dev->urb[i] = NULL;
usb_kill_urb(urb);
if (urb->transfer_buffer != NULL)
- usb_buffer_free(gspca_dev->dev,
- urb->transfer_buffer_length,
- urb->transfer_buffer,
- urb->transfer_dma);
+ usb_free_coherent(gspca_dev->dev,
+ urb->transfer_buffer_length,
+ urb->transfer_buffer,
+ urb->transfer_dma);
usb_free_urb(urb);
}
}
+static int gspca_set_alt0(struct gspca_dev *gspca_dev)
+{
+ int ret;
+
+ if (gspca_dev->alt == 0)
+ return 0;
+ ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, 0);
+ if (ret < 0)
+ PDEBUG(D_ERR|D_STREAM, "set alt 0 err %d", ret);
+ return ret;
+}
+
+/* Note: both the queue and the usb locks should be held when calling this */
+static void gspca_stream_off(struct gspca_dev *gspca_dev)
+{
+ gspca_dev->streaming = 0;
+ if (gspca_dev->present) {
+ if (gspca_dev->sd_desc->stopN)
+ gspca_dev->sd_desc->stopN(gspca_dev);
+ destroy_urbs(gspca_dev);
+ gspca_input_destroy_urb(gspca_dev);
+ gspca_set_alt0(gspca_dev);
+ gspca_input_create_urb(gspca_dev);
+ }
+
+ /* always call stop0 to free the subdriver's resources */
+ if (gspca_dev->sd_desc->stop0)
+ gspca_dev->sd_desc->stop0(gspca_dev);
+ PDEBUG(D_STREAM, "stream off OK");
+}
+
/*
* look for an input transfer endpoint in an alternate setting
*/
@@ -721,13 +760,13 @@ static int create_urbs(struct gspca_dev *gspca_dev,
return -ENOMEM;
}
gspca_dev->urb[n] = urb;
- urb->transfer_buffer = usb_buffer_alloc(gspca_dev->dev,
+ urb->transfer_buffer = usb_alloc_coherent(gspca_dev->dev,
bsize,
GFP_KERNEL,
&urb->transfer_dma);
if (urb->transfer_buffer == NULL) {
- err("usb_buffer_alloc failed");
+ err("usb_alloc_coherent failed");
return -ENOMEM;
}
urb->dev = gspca_dev->dev;
@@ -830,8 +869,7 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
}
if (ret >= 0)
break;
- gspca_dev->streaming = 0;
- destroy_urbs(gspca_dev);
+ gspca_stream_off(gspca_dev);
if (ret != -ENOSPC) {
PDEBUG(D_ERR|D_STREAM,
"usb_submit_urb alt %d err %d",
@@ -861,37 +899,6 @@ out:
return ret;
}
-static int gspca_set_alt0(struct gspca_dev *gspca_dev)
-{
- int ret;
-
- if (gspca_dev->alt == 0)
- return 0;
- ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, 0);
- if (ret < 0)
- PDEBUG(D_ERR|D_STREAM, "set alt 0 err %d", ret);
- return ret;
-}
-
-/* Note: both the queue and the usb locks should be held when calling this */
-static void gspca_stream_off(struct gspca_dev *gspca_dev)
-{
- gspca_dev->streaming = 0;
- if (gspca_dev->present) {
- if (gspca_dev->sd_desc->stopN)
- gspca_dev->sd_desc->stopN(gspca_dev);
- destroy_urbs(gspca_dev);
- gspca_input_destroy_urb(gspca_dev);
- gspca_set_alt0(gspca_dev);
- gspca_input_create_urb(gspca_dev);
- }
-
- /* always call stop0 to free the subdriver's resources */
- if (gspca_dev->sd_desc->stop0)
- gspca_dev->sd_desc->stop0(gspca_dev);
- PDEBUG(D_STREAM, "stream off OK");
-}
-
static void gspca_set_default_mode(struct gspca_dev *gspca_dev)
{
int i;
@@ -1495,7 +1502,7 @@ static int vidioc_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *rb)
{
struct gspca_dev *gspca_dev = priv;
- int i, ret = 0;
+ int i, ret = 0, streaming;
switch (rb->memory) {
case GSPCA_MEMORY_READ: /* (internal call) */
@@ -1530,7 +1537,8 @@ static int vidioc_reqbufs(struct file *file, void *priv,
}
/* stop streaming */
- if (gspca_dev->streaming) {
+ streaming = gspca_dev->streaming;
+ if (streaming) {
mutex_lock(&gspca_dev->usb_lock);
gspca_dev->usb_err = 0;
gspca_stream_off(gspca_dev);
@@ -1549,6 +1557,8 @@ static int vidioc_reqbufs(struct file *file, void *priv,
if (ret == 0) {
rb->count = gspca_dev->nframes;
gspca_dev->capt_file = file;
+ if (streaming)
+ ret = gspca_init_transfer(gspca_dev);
}
out:
mutex_unlock(&gspca_dev->queue_lock);
@@ -1582,6 +1592,12 @@ static int vidioc_streamon(struct file *file, void *priv,
if (mutex_lock_interruptible(&gspca_dev->queue_lock))
return -ERESTARTSYS;
+ /* check the capture file */
+ if (gspca_dev->capt_file != file) {
+ ret = -EBUSY;
+ goto out;
+ }
+
if (gspca_dev->nframes == 0
|| !(gspca_dev->frame[0].v4l2_buf.flags & V4L2_BUF_FLAG_QUEUED)) {
ret = -EINVAL;
@@ -1619,6 +1635,12 @@ static int vidioc_streamoff(struct file *file, void *priv,
if (mutex_lock_interruptible(&gspca_dev->queue_lock))
return -ERESTARTSYS;
+ /* check the capture file */
+ if (gspca_dev->capt_file != file) {
+ ret = -EBUSY;
+ goto out;
+ }
+
/* stop streaming */
if (mutex_lock_interruptible(&gspca_dev->usb_lock)) {
ret = -ERESTARTSYS;
@@ -2124,7 +2146,7 @@ static ssize_t dev_read(struct file *file, char __user *data,
}
/* get a frame */
- jiffies_to_timeval(get_jiffies_64(), &timestamp);
+ timestamp = ktime_to_timeval(ktime_get());
timestamp.tv_sec--;
n = 2;
for (;;) {
@@ -2315,7 +2337,7 @@ int gspca_dev_probe(struct usb_interface *intf,
return 0;
out:
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
if (gspca_dev->input_dev)
input_unregister_device(gspca_dev->input_dev);
#endif
@@ -2334,7 +2356,7 @@ EXPORT_SYMBOL(gspca_dev_probe);
void gspca_disconnect(struct usb_interface *intf)
{
struct gspca_dev *gspca_dev = usb_get_intfdata(intf);
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
struct input_dev *input_dev;
#endif
@@ -2348,7 +2370,7 @@ void gspca_disconnect(struct usb_interface *intf)
wake_up_interruptible(&gspca_dev->wq);
}
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
gspca_input_destroy_urb(gspca_dev);
input_dev = gspca_dev->input_dev;
if (input_dev) {
diff --git a/drivers/media/video/gspca/gspca.h b/drivers/media/video/gspca/gspca.h
index 8bb242fb79d..8b963dfae86 100644
--- a/drivers/media/video/gspca/gspca.h
+++ b/drivers/media/video/gspca/gspca.h
@@ -130,7 +130,7 @@ struct sd_desc {
cam_reg_op get_register;
#endif
cam_ident_op get_chip_ident;
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
cam_int_pkt_op int_pkt_scan;
/* other_input makes the gspca core create gspca_dev->input even when
int_pkt_scan is NULL, for cams with non interrupt driven buttons */
@@ -158,7 +158,7 @@ struct gspca_dev {
struct module *module; /* subdriver handling the device */
struct usb_device *dev;
struct file *capt_file; /* file doing video capture */
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
struct input_dev *input_dev;
char phys[64]; /* physical device path */
#endif
@@ -171,7 +171,7 @@ struct gspca_dev {
#define USB_BUF_SZ 64
__u8 *usb_buf; /* buffer for USB exchanges */
struct urb *urb[MAX_NURBS];
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
struct urb *int_urb;
#endif
diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c
index 957e05e2d08..dc1e4efe30f 100644
--- a/drivers/media/video/gspca/ov534.c
+++ b/drivers/media/video/gspca/ov534.c
@@ -10,8 +10,8 @@
* https://jim.sh/svn/jim/devl/playstation/ps3/eye/test/
*
* PS3 Eye camera enhanced by Richard Kaswy http://kaswy.free.fr
- * PS3 Eye camera, brightness, contrast, hue, AWB control added
- * by Max Thrun <bear24rw@gmail.com>
+ * PS3 Eye camera - brightness, contrast, awb, agc, aec controls
+ * added by Max Thrun <bear24rw@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -60,15 +60,13 @@ struct sd {
u8 contrast;
u8 gain;
u8 exposure;
- u8 redblc;
- u8 blueblc;
- u8 hue;
- u8 autogain;
+ u8 agc;
u8 awb;
+ u8 aec;
s8 sharpness;
u8 hflip;
u8 vflip;
-
+ u8 freqfltr;
};
/* V4L2 controls supported by the driver */
@@ -76,197 +74,183 @@ static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setredblc(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getredblc(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setblueblc(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getblueblc(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setagc(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getagc(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val);
static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_sethue(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_gethue(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setawb(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getawb(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setaec(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getaec(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setfreqfltr(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getfreqfltr(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_querymenu(struct gspca_dev *gspca_dev,
+ struct v4l2_querymenu *menu);
static const struct ctrl sd_ctrls[] = {
- { /* 0 */
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define BRIGHTNESS_DEF 20
- .default_value = BRIGHTNESS_DEF,
+ { /* 0 */
+ {
+ .id = V4L2_CID_BRIGHTNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Brightness",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
+#define BRIGHTNESS_DEF 0
+ .default_value = BRIGHTNESS_DEF,
+ },
+ .set = sd_setbrightness,
+ .get = sd_getbrightness,
},
- .set = sd_setbrightness,
- .get = sd_getbrightness,
- },
- { /* 1 */
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define CONTRAST_DEF 37
- .default_value = CONTRAST_DEF,
+ { /* 1 */
+ {
+ .id = V4L2_CID_CONTRAST,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Contrast",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
+#define CONTRAST_DEF 32
+ .default_value = CONTRAST_DEF,
+ },
+ .set = sd_setcontrast,
+ .get = sd_getcontrast,
},
- .set = sd_setcontrast,
- .get = sd_getcontrast,
- },
- { /* 2 */
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Main Gain",
- .minimum = 0,
- .maximum = 63,
- .step = 1,
+ { /* 2 */
+ {
+ .id = V4L2_CID_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Main Gain",
+ .minimum = 0,
+ .maximum = 63,
+ .step = 1,
#define GAIN_DEF 20
- .default_value = GAIN_DEF,
+ .default_value = GAIN_DEF,
+ },
+ .set = sd_setgain,
+ .get = sd_getgain,
},
- .set = sd_setgain,
- .get = sd_getgain,
- },
- { /* 3 */
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
+ { /* 3 */
+ {
+ .id = V4L2_CID_EXPOSURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Exposure",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
#define EXPO_DEF 120
- .default_value = EXPO_DEF,
- },
- .set = sd_setexposure,
- .get = sd_getexposure,
- },
- { /* 4 */
- {
- .id = V4L2_CID_RED_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Red Balance",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define RED_BALANCE_DEF 128
- .default_value = RED_BALANCE_DEF,
+ .default_value = EXPO_DEF,
+ },
+ .set = sd_setexposure,
+ .get = sd_getexposure,
},
- .set = sd_setredblc,
- .get = sd_getredblc,
- },
- { /* 5 */
- {
- .id = V4L2_CID_BLUE_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Blue Balance",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define BLUE_BALANCE_DEF 128
- .default_value = BLUE_BALANCE_DEF,
+ { /* 4 */
+ {
+ .id = V4L2_CID_AUTOGAIN,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Auto Gain",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+#define AGC_DEF 1
+ .default_value = AGC_DEF,
+ },
+ .set = sd_setagc,
+ .get = sd_getagc,
},
- .set = sd_setblueblc,
- .get = sd_getblueblc,
- },
- { /* 6 */
- {
- .id = V4L2_CID_HUE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Hue",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define HUE_DEF 143
- .default_value = HUE_DEF,
+#define AWB_IDX 5
+ { /* 5 */
+ {
+ .id = V4L2_CID_AUTO_WHITE_BALANCE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Auto White Balance",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+#define AWB_DEF 1
+ .default_value = AWB_DEF,
+ },
+ .set = sd_setawb,
+ .get = sd_getawb,
},
- .set = sd_sethue,
- .get = sd_gethue,
- },
- { /* 7 */
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Autogain",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define AUTOGAIN_DEF 0
- .default_value = AUTOGAIN_DEF,
+ { /* 6 */
+ {
+ .id = V4L2_CID_EXPOSURE_AUTO,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Auto Exposure",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+#define AEC_DEF 1
+ .default_value = AEC_DEF,
+ },
+ .set = sd_setaec,
+ .get = sd_getaec,
},
- .set = sd_setautogain,
- .get = sd_getautogain,
- },
-#define AWB_IDX 8
- { /* 8 */
- {
- .id = V4L2_CID_AUTO_WHITE_BALANCE,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto White Balance",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define AWB_DEF 0
- .default_value = AWB_DEF,
- },
- .set = sd_setawb,
- .get = sd_getawb,
- },
- { /* 9 */
- {
- .id = V4L2_CID_SHARPNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Sharpness",
- .minimum = 0,
- .maximum = 63,
- .step = 1,
+ { /* 7 */
+ {
+ .id = V4L2_CID_SHARPNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Sharpness",
+ .minimum = 0,
+ .maximum = 63,
+ .step = 1,
#define SHARPNESS_DEF 0
- .default_value = SHARPNESS_DEF,
+ .default_value = SHARPNESS_DEF,
+ },
+ .set = sd_setsharpness,
+ .get = sd_getsharpness,
},
- .set = sd_setsharpness,
- .get = sd_getsharpness,
- },
- { /* 10 */
- {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "HFlip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
+ { /* 8 */
+ {
+ .id = V4L2_CID_HFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "HFlip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
#define HFLIP_DEF 0
- .default_value = HFLIP_DEF,
+ .default_value = HFLIP_DEF,
+ },
+ .set = sd_sethflip,
+ .get = sd_gethflip,
},
- .set = sd_sethflip,
- .get = sd_gethflip,
- },
- { /* 11 */
- {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "VFlip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
+ { /* 9 */
+ {
+ .id = V4L2_CID_VFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "VFlip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
#define VFLIP_DEF 0
- .default_value = VFLIP_DEF,
+ .default_value = VFLIP_DEF,
+ },
+ .set = sd_setvflip,
+ .get = sd_getvflip,
+ },
+ { /* 10 */
+ {
+ .id = V4L2_CID_POWER_LINE_FREQUENCY,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Light Frequency Filter",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+#define FREQFLTR_DEF 0
+ .default_value = FREQFLTR_DEF,
+ },
+ .set = sd_setfreqfltr,
+ .get = sd_getfreqfltr,
},
- .set = sd_setvflip,
- .get = sd_getvflip,
- },
};
static const struct v4l2_pix_format ov772x_mode[] = {
@@ -675,14 +659,14 @@ static void setbrightness(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- sccb_reg_write(gspca_dev, 0x9B, sd->brightness);
+ sccb_reg_write(gspca_dev, 0x9b, sd->brightness);
}
static void setcontrast(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- sccb_reg_write(gspca_dev, 0x9C, sd->contrast);
+ sccb_reg_write(gspca_dev, 0x9c, sd->contrast);
}
static void setgain(struct gspca_dev *gspca_dev)
@@ -690,6 +674,9 @@ static void setgain(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
u8 val;
+ if (sd->agc)
+ return;
+
val = sd->gain;
switch (val & 0x30) {
case 0x00:
@@ -717,55 +704,68 @@ static void setexposure(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
u8 val;
+ if (sd->aec)
+ return;
+
+ /* 'val' is one byte and represents half of the exposure value we are
+ * going to set into registers, a two bytes value:
+ *
+ * MSB: ((u16) val << 1) >> 8 == val >> 7
+ * LSB: ((u16) val << 1) & 0xff == val << 1
+ */
val = sd->exposure;
sccb_reg_write(gspca_dev, 0x08, val >> 7);
sccb_reg_write(gspca_dev, 0x10, val << 1);
}
-static void setredblc(struct gspca_dev *gspca_dev)
+static void setagc(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- sccb_reg_write(gspca_dev, 0x43, sd->redblc);
-}
-
-static void setblueblc(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sccb_reg_write(gspca_dev, 0x42, sd->blueblc);
-}
-
-static void sethue(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
+ if (sd->agc) {
+ sccb_reg_write(gspca_dev, 0x13,
+ sccb_reg_read(gspca_dev, 0x13) | 0x04);
+ sccb_reg_write(gspca_dev, 0x64,
+ sccb_reg_read(gspca_dev, 0x64) | 0x03);
+ } else {
+ sccb_reg_write(gspca_dev, 0x13,
+ sccb_reg_read(gspca_dev, 0x13) & ~0x04);
+ sccb_reg_write(gspca_dev, 0x64,
+ sccb_reg_read(gspca_dev, 0x64) & ~0x03);
- sccb_reg_write(gspca_dev, 0x01, sd->hue);
+ setgain(gspca_dev);
+ }
}
-static void setautogain(struct gspca_dev *gspca_dev)
+static void setawb(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- if (sd->autogain) {
- sccb_reg_write(gspca_dev, 0x13, 0xf7); /* AGC,AEC,AWB ON */
- sccb_reg_write(gspca_dev, 0x64,
- sccb_reg_read(gspca_dev, 0x64) | 0x03);
+ if (sd->awb) {
+ sccb_reg_write(gspca_dev, 0x13,
+ sccb_reg_read(gspca_dev, 0x13) | 0x02);
+ sccb_reg_write(gspca_dev, 0x63,
+ sccb_reg_read(gspca_dev, 0x63) | 0xc0);
} else {
- sccb_reg_write(gspca_dev, 0x13, 0xf0); /* AGC,AEC,AWB OFF */
- sccb_reg_write(gspca_dev, 0x64,
- sccb_reg_read(gspca_dev, 0x64) & 0xfc);
+ sccb_reg_write(gspca_dev, 0x13,
+ sccb_reg_read(gspca_dev, 0x13) & ~0x02);
+ sccb_reg_write(gspca_dev, 0x63,
+ sccb_reg_read(gspca_dev, 0x63) & ~0xc0);
}
}
-static void setawb(struct gspca_dev *gspca_dev)
+static void setaec(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- if (sd->awb)
- sccb_reg_write(gspca_dev, 0x63, 0xe0); /* AWB on */
- else
- sccb_reg_write(gspca_dev, 0x63, 0xaa); /* AWB off */
+ if (sd->aec)
+ sccb_reg_write(gspca_dev, 0x13,
+ sccb_reg_read(gspca_dev, 0x13) | 0x01);
+ else {
+ sccb_reg_write(gspca_dev, 0x13,
+ sccb_reg_read(gspca_dev, 0x13) & ~0x01);
+ setexposure(gspca_dev);
+ }
}
static void setsharpness(struct gspca_dev *gspca_dev)
@@ -774,8 +774,8 @@ static void setsharpness(struct gspca_dev *gspca_dev)
u8 val;
val = sd->sharpness;
- sccb_reg_write(gspca_dev, 0x91, val); /* vga noise */
- sccb_reg_write(gspca_dev, 0x8e, val); /* qvga noise */
+ sccb_reg_write(gspca_dev, 0x91, val); /* Auto de-noise threshold */
+ sccb_reg_write(gspca_dev, 0x8e, val); /* De-noise threshold */
}
static void sethflip(struct gspca_dev *gspca_dev)
@@ -787,7 +787,7 @@ static void sethflip(struct gspca_dev *gspca_dev)
sccb_reg_read(gspca_dev, 0x0c) | 0x40);
else
sccb_reg_write(gspca_dev, 0x0c,
- sccb_reg_read(gspca_dev, 0x0c) & 0xbf);
+ sccb_reg_read(gspca_dev, 0x0c) & ~0x40);
}
static void setvflip(struct gspca_dev *gspca_dev)
@@ -799,9 +799,20 @@ static void setvflip(struct gspca_dev *gspca_dev)
sccb_reg_read(gspca_dev, 0x0c) | 0x80);
else
sccb_reg_write(gspca_dev, 0x0c,
- sccb_reg_read(gspca_dev, 0x0c) & 0x7f);
+ sccb_reg_read(gspca_dev, 0x0c) & ~0x80);
}
+static void setfreqfltr(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ if (sd->freqfltr == 0)
+ sccb_reg_write(gspca_dev, 0x2b, 0x00);
+ else
+ sccb_reg_write(gspca_dev, 0x2b, 0x9e);
+}
+
+
/* this function is called at probe time */
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
@@ -825,26 +836,17 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->contrast = CONTRAST_DEF;
sd->gain = GAIN_DEF;
sd->exposure = EXPO_DEF;
- sd->redblc = RED_BALANCE_DEF;
- sd->blueblc = BLUE_BALANCE_DEF;
- sd->hue = HUE_DEF;
-#if AUTOGAIN_DEF != 0
- sd->autogain = AUTOGAIN_DEF;
+#if AGC_DEF != 0
+ sd->agc = AGC_DEF;
#else
gspca_dev->ctrl_inac |= (1 << AWB_IDX);
#endif
-#if AWB_DEF != 0
- sd->awb = AWB_DEF
-#endif
-#if SHARPNESS_DEF != 0
+ sd->awb = AWB_DEF;
+ sd->aec = AEC_DEF;
sd->sharpness = SHARPNESS_DEF;
-#endif
-#if HFLIP_DEF != 0
sd->hflip = HFLIP_DEF;
-#endif
-#if VFLIP_DEF != 0
sd->vflip = VFLIP_DEF;
-#endif
+ sd->freqfltr = FREQFLTR_DEF;
return 0;
}
@@ -904,18 +906,17 @@ static int sd_start(struct gspca_dev *gspca_dev)
}
set_frame_rate(gspca_dev);
- setautogain(gspca_dev);
+ setagc(gspca_dev);
setawb(gspca_dev);
+ setaec(gspca_dev);
setgain(gspca_dev);
- setredblc(gspca_dev);
- setblueblc(gspca_dev);
- sethue(gspca_dev);
setexposure(gspca_dev);
setbrightness(gspca_dev);
setcontrast(gspca_dev);
setsharpness(gspca_dev);
setvflip(gspca_dev);
sethflip(gspca_dev);
+ setfreqfltr(gspca_dev);
ov534_set_led(gspca_dev, 1);
ov534_reg_write(gspca_dev, 0xe0, 0x00);
@@ -1092,65 +1093,11 @@ static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
return 0;
}
-static int sd_setredblc(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->redblc = val;
- if (gspca_dev->streaming)
- setredblc(gspca_dev);
- return 0;
-}
-
-static int sd_getredblc(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->redblc;
- return 0;
-}
-
-static int sd_setblueblc(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->blueblc = val;
- if (gspca_dev->streaming)
- setblueblc(gspca_dev);
- return 0;
-}
-
-static int sd_getblueblc(struct gspca_dev *gspca_dev, __s32 *val)
+static int sd_setagc(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- *val = sd->blueblc;
- return 0;
-}
-
-static int sd_sethue(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->hue = val;
- if (gspca_dev->streaming)
- sethue(gspca_dev);
- return 0;
-}
-
-static int sd_gethue(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->hue;
- return 0;
-}
-
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->autogain = val;
+ sd->agc = val;
if (gspca_dev->streaming) {
@@ -1160,16 +1107,16 @@ static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
gspca_dev->ctrl_inac &= ~(1 << AWB_IDX);
else
gspca_dev->ctrl_inac |= (1 << AWB_IDX);
- setautogain(gspca_dev);
+ setagc(gspca_dev);
}
return 0;
}
-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val)
+static int sd_getagc(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
- *val = sd->autogain;
+ *val = sd->agc;
return 0;
}
@@ -1191,6 +1138,24 @@ static int sd_getawb(struct gspca_dev *gspca_dev, __s32 *val)
return 0;
}
+static int sd_setaec(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->aec = val;
+ if (gspca_dev->streaming)
+ setaec(gspca_dev);
+ return 0;
+}
+
+static int sd_getaec(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->aec;
+ return 0;
+}
+
static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1245,6 +1210,43 @@ static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val)
return 0;
}
+static int sd_setfreqfltr(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->freqfltr = val;
+ if (gspca_dev->streaming)
+ setfreqfltr(gspca_dev);
+ return 0;
+}
+
+static int sd_getfreqfltr(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->freqfltr;
+ return 0;
+}
+
+static int sd_querymenu(struct gspca_dev *gspca_dev,
+ struct v4l2_querymenu *menu)
+{
+ switch (menu->id) {
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ switch (menu->index) {
+ case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */
+ strcpy((char *) menu->name, "Disabled");
+ return 0;
+ case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */
+ strcpy((char *) menu->name, "50 Hz");
+ return 0;
+ }
+ break;
+ }
+
+ return -EINVAL;
+}
+
/* get stream parameters (framerate) */
static int sd_get_streamparm(struct gspca_dev *gspca_dev,
struct v4l2_streamparm *parm)
@@ -1296,6 +1298,7 @@ static const struct sd_desc sd_desc = {
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
+ .querymenu = sd_querymenu,
.get_streamparm = sd_get_streamparm,
.set_streamparm = sd_set_streamparm,
};
diff --git a/drivers/media/video/gspca/pac207.c b/drivers/media/video/gspca/pac207.c
index 0c87c3490b1..a40f8893310 100644
--- a/drivers/media/video/gspca/pac207.c
+++ b/drivers/media/video/gspca/pac207.c
@@ -99,7 +99,7 @@ static const struct ctrl sd_ctrls[] = {
{
.id = V4L2_CID_EXPOSURE,
.type = V4L2_CTRL_TYPE_INTEGER,
- .name = "exposure",
+ .name = "Exposure",
.minimum = PAC207_EXPOSURE_MIN,
.maximum = PAC207_EXPOSURE_MAX,
.step = 1,
@@ -130,7 +130,7 @@ static const struct ctrl sd_ctrls[] = {
{
.id = V4L2_CID_GAIN,
.type = V4L2_CTRL_TYPE_INTEGER,
- .name = "gain",
+ .name = "Gain",
.minimum = PAC207_GAIN_MIN,
.maximum = PAC207_GAIN_MAX,
.step = 1,
diff --git a/drivers/media/video/gspca/sn9c2028.c b/drivers/media/video/gspca/sn9c2028.c
index dda5fd4aa69..71d9447a798 100644
--- a/drivers/media/video/gspca/sn9c2028.c
+++ b/drivers/media/video/gspca/sn9c2028.c
@@ -39,7 +39,7 @@ struct init_command {
};
/* V4L2 controls supported by the driver */
-static struct ctrl sd_ctrls[] = {
+static const struct ctrl sd_ctrls[] = {
};
/* How to change the resolution of any of the VGA cams is unknown */
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index 3dee3e5844b..644a7fd4701 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -18,10 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV
-#include <linux/kthread.h>
-#include <linux/freezer.h>
-#include <linux/usb/input.h>
+#ifdef CONFIG_INPUT
#include <linux/input.h>
#include <linux/slab.h>
#endif
@@ -30,6 +27,7 @@
#include "jpeg.h"
#include <media/v4l2-chip-ident.h>
+#include <linux/dmi.h>
MODULE_AUTHOR("Brian Johnson <brijohn@gmail.com>, "
"microdia project <microdia@googlegroups.com>");
@@ -52,9 +50,15 @@ MODULE_LICENSE("GPL");
#define SENSOR_MT9V112 7
#define SENSOR_MT9M001 8
#define SENSOR_MT9M111 9
-#define SENSOR_HV7131R 10
+#define SENSOR_MT9M112 10
+#define SENSOR_HV7131R 11
#define SENSOR_MT9VPRB 20
+/* camera flags */
+#define HAS_NO_BUTTON 0x1
+#define LED_REVERSE 0x2 /* some cameras unset gpio to turn on leds */
+#define FLIP_DETECT 0x4
+
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev;
@@ -88,11 +92,7 @@ struct sd {
u8 *jpeg_hdr;
u8 quality;
-#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV
- struct input_dev *input_dev;
- u8 input_gpio;
- struct task_struct *input_task;
-#endif
+ u8 flags;
};
struct i2c_reg_u8 {
@@ -130,6 +130,39 @@ static int sd_getexposure(struct gspca_dev *gspca_dev, s32 *val);
static int sd_setautoexposure(struct gspca_dev *gspca_dev, s32 val);
static int sd_getautoexposure(struct gspca_dev *gspca_dev, s32 *val);
+static const struct dmi_system_id flip_dmi_table[] = {
+ {
+ .ident = "MSI MS-1034",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT'L CO.,LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MS-1034"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "0341")
+ }
+ },
+ {
+ .ident = "MSI MS-1632",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "MSI"),
+ DMI_MATCH(DMI_BOARD_NAME, "MS-1632")
+ }
+ },
+ {
+ .ident = "MSI MS-1635X",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "MSI"),
+ DMI_MATCH(DMI_BOARD_NAME, "MS-1635X")
+ }
+ },
+ {
+ .ident = "ASUSTeK W7J",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_BOARD_NAME, "W7J ")
+ }
+ },
+ {}
+};
+
static const struct ctrl sd_ctrls[] = {
{
#define BRIGHTNESS_IDX 0
@@ -713,6 +746,7 @@ static u16 i2c_ident[] = {
V4L2_IDENT_MT9V112,
V4L2_IDENT_MT9M001C12ST,
V4L2_IDENT_MT9M111,
+ V4L2_IDENT_MT9M112,
V4L2_IDENT_HV7131R,
};
@@ -735,7 +769,8 @@ static u16 bridge_init[][2] = {
{0x11be, 0xf0}, {0x11bf, 0x00}, {0x118c, 0x1f},
{0x118d, 0x1f}, {0x118e, 0x1f}, {0x118f, 0x1f},
{0x1180, 0x01}, {0x1181, 0x00}, {0x1182, 0x01},
- {0x1183, 0x00}, {0x1184, 0x50}, {0x1185, 0x80}
+ {0x1183, 0x00}, {0x1184, 0x50}, {0x1185, 0x80},
+ {0x1007, 0x00}
};
/* Gain = (bit[3:0] / 16 + 1) * (bit[4] + 1) * (bit[5] + 1) * (bit[6] + 1) */
@@ -914,40 +949,30 @@ static struct i2c_reg_u8 ov9650_init[] = {
};
static struct i2c_reg_u8 ov9655_init[] = {
- {0x12, 0x80}, {0x12, 0x01}, {0x0d, 0x00}, {0x0e, 0x61},
- {0x11, 0x80}, {0x13, 0xba}, {0x14, 0x2e}, {0x16, 0x24},
- {0x1e, 0x04}, {0x1e, 0x04}, {0x1e, 0x04}, {0x27, 0x08},
- {0x28, 0x08}, {0x29, 0x15}, {0x2c, 0x08}, {0x32, 0xbf},
- {0x34, 0x3d}, {0x35, 0x00}, {0x36, 0xf8}, {0x38, 0x12},
- {0x39, 0x57}, {0x3a, 0x00}, {0x3b, 0xcc}, {0x3c, 0x0c},
- {0x3d, 0x19}, {0x3e, 0x0c}, {0x3f, 0x01}, {0x41, 0x40},
- {0x42, 0x80}, {0x45, 0x46}, {0x46, 0x62}, {0x47, 0x2a},
- {0x48, 0x3c}, {0x4a, 0xf0}, {0x4b, 0xdc}, {0x4c, 0xdc},
- {0x4d, 0xdc}, {0x4e, 0xdc}, {0x69, 0x02}, {0x6c, 0x04},
- {0x6f, 0x9e}, {0x70, 0x05}, {0x71, 0x78}, {0x77, 0x02},
- {0x8a, 0x23}, {0x8c, 0x0d}, {0x90, 0x7e}, {0x91, 0x7c},
- {0x9f, 0x6e}, {0xa0, 0x6e}, {0xa5, 0x68}, {0xa6, 0x60},
- {0xa8, 0xc1}, {0xa9, 0xfa}, {0xaa, 0x92}, {0xab, 0x04},
- {0xac, 0x80}, {0xad, 0x80}, {0xae, 0x80}, {0xaf, 0x80},
- {0xb2, 0xf2}, {0xb3, 0x20}, {0xb5, 0x00}, {0xb6, 0xaf},
- {0xbb, 0xae}, {0xbc, 0x44}, {0xbd, 0x44}, {0xbe, 0x3b},
- {0xbf, 0x3a}, {0xc0, 0xe2}, {0xc1, 0xc8}, {0xc2, 0x01},
+ {0x12, 0x80}, {0x0e, 0x61}, {0x11, 0x80}, {0x13, 0xba},
+ {0x14, 0x2e}, {0x16, 0x24}, {0x1e, 0x04}, {0x27, 0x08},
+ {0x28, 0x08}, {0x29, 0x15}, {0x2c, 0x08}, {0x34, 0x3d},
+ {0x35, 0x00}, {0x38, 0x12}, {0x0f, 0x42}, {0x39, 0x57},
+ {0x3a, 0x00}, {0x3b, 0xcc}, {0x3c, 0x0c}, {0x3d, 0x19},
+ {0x3e, 0x0c}, {0x3f, 0x01}, {0x41, 0x40}, {0x42, 0x80},
+ {0x45, 0x46}, {0x46, 0x62}, {0x47, 0x2a}, {0x48, 0x3c},
+ {0x4a, 0xf0}, {0x4b, 0xdc}, {0x4c, 0xdc}, {0x4d, 0xdc},
+ {0x4e, 0xdc}, {0x6c, 0x04}, {0x6f, 0x9e}, {0x70, 0x05},
+ {0x71, 0x78}, {0x77, 0x02}, {0x8a, 0x23}, {0x90, 0x7e},
+ {0x91, 0x7c}, {0x9f, 0x6e}, {0xa0, 0x6e}, {0xa5, 0x68},
+ {0xa6, 0x60}, {0xa8, 0xc1}, {0xa9, 0xfa}, {0xaa, 0x92},
+ {0xab, 0x04}, {0xac, 0x80}, {0xad, 0x80}, {0xae, 0x80},
+ {0xaf, 0x80}, {0xb2, 0xf2}, {0xb3, 0x20}, {0xb5, 0x00},
+ {0xb6, 0xaf}, {0xbb, 0xae}, {0xbc, 0x44}, {0xbd, 0x44},
+ {0xbe, 0x3b}, {0xbf, 0x3a}, {0xc1, 0xc8}, {0xc2, 0x01},
{0xc4, 0x00}, {0xc6, 0x85}, {0xc7, 0x81}, {0xc9, 0xe0},
- {0xca, 0xe8}, {0xcc, 0xd8}, {0xcd, 0x93}, {0x12, 0x61},
+ {0xca, 0xe8}, {0xcc, 0xd8}, {0xcd, 0x93}, {0x2d, 0x00},
+ {0x2e, 0x00}, {0x01, 0x80}, {0x02, 0x80}, {0x12, 0x61},
{0x36, 0xfa}, {0x8c, 0x8d}, {0xc0, 0xaa}, {0x69, 0x0a},
- {0x03, 0x12}, {0x17, 0x14}, {0x18, 0x00}, {0x19, 0x01},
- {0x1a, 0x3d}, {0x32, 0xbf}, {0x11, 0x80}, {0x2a, 0x10},
- {0x2b, 0x0a}, {0x92, 0x00}, {0x93, 0x00}, {0x1e, 0x04},
- {0x1e, 0x04}, {0x10, 0x7c}, {0x04, 0x03}, {0xa1, 0x00},
- {0x2d, 0x00}, {0x2e, 0x00}, {0x00, 0x00}, {0x01, 0x80},
- {0x02, 0x80}, {0x12, 0x61}, {0x36, 0xfa}, {0x8c, 0x8d},
- {0xc0, 0xaa}, {0x69, 0x0a}, {0x03, 0x12}, {0x17, 0x14},
- {0x18, 0x00}, {0x19, 0x01}, {0x1a, 0x3d}, {0x32, 0xbf},
- {0x11, 0x80}, {0x2a, 0x10}, {0x2b, 0x0a}, {0x92, 0x00},
- {0x93, 0x00}, {0x04, 0x01}, {0x10, 0x1f}, {0xa1, 0x00},
- {0x00, 0x0a}, {0xa1, 0x00}, {0x10, 0x5d}, {0x04, 0x03},
- {0x00, 0x01}, {0xa1, 0x00}, {0x10, 0x7c}, {0x04, 0x03},
- {0x00, 0x03}, {0x00, 0x0a}, {0x00, 0x10}, {0x00, 0x13},
+ {0x03, 0x09}, {0x17, 0x16}, {0x18, 0x6e}, {0x19, 0x01},
+ {0x1a, 0x3e}, {0x32, 0x09}, {0x2a, 0x10}, {0x2b, 0x0a},
+ {0x92, 0x00}, {0x93, 0x00}, {0xa1, 0x00}, {0x10, 0x7c},
+ {0x04, 0x03}, {0x00, 0x13},
};
static struct i2c_reg_u16 mt9v112_init[] = {
@@ -971,29 +996,12 @@ static struct i2c_reg_u16 mt9v112_init[] = {
static struct i2c_reg_u16 mt9v111_init[] = {
{0x01, 0x0004}, {0x0d, 0x0001}, {0x0d, 0x0000},
- {0x01, 0x0001}, {0x02, 0x0016}, {0x03, 0x01e1},
- {0x04, 0x0281}, {0x05, 0x0004}, {0x07, 0x3002},
- {0x21, 0x0000}, {0x25, 0x4024}, {0x26, 0xff03},
- {0x27, 0xff10}, {0x2b, 0x7828}, {0x2c, 0xb43c},
- {0x2d, 0xf0a0}, {0x2e, 0x0c64}, {0x2f, 0x0064},
- {0x67, 0x4010}, {0x06, 0x301e}, {0x08, 0x0480},
- {0x01, 0x0004}, {0x02, 0x0016}, {0x03, 0x01e6},
- {0x04, 0x0286}, {0x05, 0x0004}, {0x06, 0x0000},
- {0x07, 0x3002}, {0x08, 0x0008}, {0x0c, 0x0000},
- {0x0d, 0x0000}, {0x0e, 0x0000}, {0x0f, 0x0000},
- {0x10, 0x0000}, {0x11, 0x0000}, {0x12, 0x00b0},
- {0x13, 0x007c}, {0x14, 0x0000}, {0x15, 0x0000},
- {0x16, 0x0000}, {0x17, 0x0000}, {0x18, 0x0000},
- {0x19, 0x0000}, {0x1a, 0x0000}, {0x1b, 0x0000},
- {0x1c, 0x0000}, {0x1d, 0x0000}, {0x30, 0x0000},
- {0x30, 0x0005}, {0x31, 0x0000}, {0x02, 0x0016},
- {0x03, 0x01e1}, {0x04, 0x0281}, {0x05, 0x0004},
- {0x06, 0x0000}, {0x07, 0x3002}, {0x06, 0x002d},
- {0x05, 0x0004}, {0x09, 0x0064}, {0x2b, 0x00a0},
- {0x2c, 0x00a0}, {0x2d, 0x00a0}, {0x2e, 0x00a0},
- {0x02, 0x0016}, {0x03, 0x01e1}, {0x04, 0x0281},
- {0x05, 0x0004}, {0x06, 0x002d}, {0x07, 0x3002},
- {0x0e, 0x0008}, {0x06, 0x002d}, {0x05, 0x0004},
+ {0x01, 0x0001}, {0x05, 0x0004}, {0x2d, 0xe0a0},
+ {0x2e, 0x0c64}, {0x2f, 0x0064}, {0x06, 0x600e},
+ {0x08, 0x0480}, {0x01, 0x0004}, {0x02, 0x0016},
+ {0x03, 0x01e7}, {0x04, 0x0287}, {0x05, 0x0004},
+ {0x06, 0x002d}, {0x07, 0x3002}, {0x08, 0x0008},
+ {0x0e, 0x0008}, {0x20, 0x0000}
};
static struct i2c_reg_u16 mt9v011_init[] = {
@@ -1043,6 +1051,13 @@ static struct i2c_reg_u16 mt9m111_init[] = {
{0xf0, 0x0000},
};
+static struct i2c_reg_u16 mt9m112_init[] = {
+ {0xf0, 0x0000}, {0x0d, 0x0021}, {0x0d, 0x0008},
+ {0xf0, 0x0001}, {0x3a, 0x4300}, {0x9b, 0x4300},
+ {0x06, 0x708e}, {0xf0, 0x0002}, {0x2e, 0x0a1e},
+ {0xf0, 0x0000},
+};
+
static struct i2c_reg_u8 hv7131r_init[] = {
{0x02, 0x08}, {0x02, 0x00}, {0x01, 0x08},
{0x02, 0x00}, {0x20, 0x00}, {0x21, 0xd0},
@@ -1240,8 +1255,8 @@ static int ov9655_init_sensor(struct gspca_dev *gspca_dev)
}
/* disable hflip and vflip */
gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX);
- sd->hstart = 0;
- sd->vstart = 7;
+ sd->hstart = 1;
+ sd->vstart = 2;
return 0;
}
@@ -1337,6 +1352,7 @@ static int mt9v_init_sensor(struct gspca_dev *gspca_dev)
return -ENODEV;
}
}
+ gspca_dev->ctrl_dis = (1 << EXPOSURE_IDX) | (1 << AUTOGAIN_IDX) | (1 << GAIN_IDX);
sd->hstart = 2;
sd->vstart = 2;
sd->sensor = SENSOR_MT9V111;
@@ -1369,6 +1385,23 @@ static int mt9v_init_sensor(struct gspca_dev *gspca_dev)
return -ENODEV;
}
+static int mt9m112_init_sensor(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int i;
+ for (i = 0; i < ARRAY_SIZE(mt9m112_init); i++) {
+ if (i2c_w2(gspca_dev, mt9m112_init[i].reg,
+ mt9m112_init[i].val) < 0) {
+ err("MT9M112 sensor initialization failed");
+ return -ENODEV;
+ }
+ }
+ gspca_dev->ctrl_dis = (1 << EXPOSURE_IDX) | (1 << AUTOGAIN_IDX) | (1 << GAIN_IDX);
+ sd->hstart = 0;
+ sd->vstart = 2;
+ return 0;
+}
+
static int mt9m111_init_sensor(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1421,87 +1454,6 @@ static int hv7131r_init_sensor(struct gspca_dev *gspca_dev)
return 0;
}
-#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV
-static int input_kthread(void *data)
-{
- struct gspca_dev *gspca_dev = (struct gspca_dev *)data;
- struct sd *sd = (struct sd *) gspca_dev;
-
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait);
- set_freezable();
- for (;;) {
- if (kthread_should_stop())
- break;
-
- if (reg_r(gspca_dev, 0x1005, 1) < 0)
- continue;
-
- input_report_key(sd->input_dev,
- KEY_CAMERA,
- gspca_dev->usb_buf[0] & sd->input_gpio);
- input_sync(sd->input_dev);
-
- wait_event_freezable_timeout(wait,
- kthread_should_stop(),
- msecs_to_jiffies(100));
- }
- return 0;
-}
-
-
-static int sn9c20x_input_init(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- if (sd->input_gpio == 0)
- return 0;
-
- sd->input_dev = input_allocate_device();
- if (!sd->input_dev)
- return -ENOMEM;
-
- sd->input_dev->name = "SN9C20X Webcam";
-
- sd->input_dev->phys = kasprintf(GFP_KERNEL, "usb-%s-%s",
- gspca_dev->dev->bus->bus_name,
- gspca_dev->dev->devpath);
-
- if (!sd->input_dev->phys)
- return -ENOMEM;
-
- usb_to_input_id(gspca_dev->dev, &sd->input_dev->id);
- sd->input_dev->dev.parent = &gspca_dev->dev->dev;
-
- set_bit(EV_KEY, sd->input_dev->evbit);
- set_bit(KEY_CAMERA, sd->input_dev->keybit);
-
- if (input_register_device(sd->input_dev))
- return -EINVAL;
-
- sd->input_task = kthread_run(input_kthread, gspca_dev, "sn9c20x/%s-%s",
- gspca_dev->dev->bus->bus_name,
- gspca_dev->dev->devpath);
-
- if (IS_ERR(sd->input_task))
- return -EINVAL;
-
- return 0;
-}
-
-static void sn9c20x_input_cleanup(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- if (sd->input_task != NULL && !IS_ERR(sd->input_task))
- kthread_stop(sd->input_task);
-
- if (sd->input_dev != NULL) {
- input_unregister_device(sd->input_dev);
- kfree(sd->input_dev->phys);
- input_free_device(sd->input_dev);
- sd->input_dev = NULL;
- }
-}
-#endif
-
static int set_cmatrix(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1579,17 +1531,26 @@ static int set_redblue(struct gspca_dev *gspca_dev)
static int set_hvflip(struct gspca_dev *gspca_dev)
{
- u8 value, tslb;
+ u8 value, tslb, hflip, vflip;
u16 value2;
struct sd *sd = (struct sd *) gspca_dev;
+
+ if ((sd->flags & FLIP_DETECT) && dmi_check_system(flip_dmi_table)) {
+ hflip = !sd->hflip;
+ vflip = !sd->vflip;
+ } else {
+ hflip = sd->hflip;
+ vflip = sd->vflip;
+ }
+
switch (sd->sensor) {
case SENSOR_OV9650:
i2c_r1(gspca_dev, 0x1e, &value);
value &= ~0x30;
tslb = 0x01;
- if (sd->hflip)
+ if (hflip)
value |= 0x20;
- if (sd->vflip) {
+ if (vflip) {
value |= 0x10;
tslb = 0x49;
}
@@ -1600,28 +1561,29 @@ static int set_hvflip(struct gspca_dev *gspca_dev)
case SENSOR_MT9V011:
i2c_r2(gspca_dev, 0x20, &value2);
value2 &= ~0xc0a0;
- if (sd->hflip)
+ if (hflip)
value2 |= 0x8080;
- if (sd->vflip)
+ if (vflip)
value2 |= 0x4020;
i2c_w2(gspca_dev, 0x20, value2);
break;
+ case SENSOR_MT9M112:
case SENSOR_MT9M111:
case SENSOR_MT9V112:
i2c_r2(gspca_dev, 0x20, &value2);
value2 &= ~0x0003;
- if (sd->hflip)
+ if (hflip)
value2 |= 0x0002;
- if (sd->vflip)
+ if (vflip)
value2 |= 0x0001;
i2c_w2(gspca_dev, 0x20, value2);
break;
case SENSOR_HV7131R:
i2c_r1(gspca_dev, 0x01, &value);
value &= ~0x03;
- if (sd->vflip)
+ if (vflip)
value |= 0x01;
- if (sd->hflip)
+ if (hflip)
value |= 0x02;
i2c_w1(gspca_dev, 0x01, value);
break;
@@ -1645,7 +1607,6 @@ static int set_exposure(struct gspca_dev *gspca_dev)
break;
case SENSOR_MT9M001:
case SENSOR_MT9V112:
- case SENSOR_MT9V111:
case SENSOR_MT9V011:
exp[0] |= (3 << 4);
exp[2] = 0x09;
@@ -1655,9 +1616,9 @@ static int set_exposure(struct gspca_dev *gspca_dev)
case SENSOR_HV7131R:
exp[0] |= (4 << 4);
exp[2] = 0x25;
- exp[3] = ((sd->exposure * 0xffffff) / 0xffff) >> 16;
- exp[4] = ((sd->exposure * 0xffffff) / 0xffff) >> 8;
- exp[5] = ((sd->exposure * 0xffffff) / 0xffff) & 0xff;
+ exp[3] = (sd->exposure >> 5) & 0xff;
+ exp[4] = (sd->exposure << 3) & 0xff;
+ exp[5] = 0;
break;
default:
return 0;
@@ -1680,7 +1641,6 @@ static int set_gain(struct gspca_dev *gspca_dev)
gain[3] = ov_gain[sd->gain];
break;
case SENSOR_MT9V011:
- case SENSOR_MT9V111:
gain[0] |= (3 << 4);
gain[2] = 0x35;
gain[3] = micron1_gain[sd->gain] >> 8;
@@ -1931,7 +1891,7 @@ static int sd_dbg_g_register(struct gspca_dev *gspca_dev,
if (reg->match.addr != sd->i2c_addr)
return -EINVAL;
if (sd->sensor >= SENSOR_MT9V011 &&
- sd->sensor <= SENSOR_MT9M111) {
+ sd->sensor <= SENSOR_MT9M112) {
if (i2c_r2(gspca_dev, reg->reg, (u16 *)&reg->val) < 0)
return -EINVAL;
} else {
@@ -1960,7 +1920,7 @@ static int sd_dbg_s_register(struct gspca_dev *gspca_dev,
if (reg->match.addr != sd->i2c_addr)
return -EINVAL;
if (sd->sensor >= SENSOR_MT9V011 &&
- sd->sensor <= SENSOR_MT9M111) {
+ sd->sensor <= SENSOR_MT9M112) {
if (i2c_w2(gspca_dev, reg->reg, reg->val) < 0)
return -EINVAL;
} else {
@@ -2005,8 +1965,10 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->sensor = (id->driver_info >> 8) & 0xff;
sd->i2c_addr = id->driver_info & 0xff;
+ sd->flags = (id->driver_info >> 16) & 0xff;
switch (sd->sensor) {
+ case SENSOR_MT9M112:
case SENSOR_MT9M111:
case SENSOR_OV9650:
case SENSOR_SOI968:
@@ -2039,11 +2001,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->quality = 95;
-#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV
- sd->input_gpio = (id->driver_info >> 16) & 0xff;
- if (sn9c20x_input_init(gspca_dev) < 0)
- return -ENODEV;
-#endif
return 0;
}
@@ -2063,6 +2020,11 @@ static int sd_init(struct gspca_dev *gspca_dev)
}
}
+ if (sd->flags & LED_REVERSE)
+ reg_w1(gspca_dev, 0x1006, 0x00);
+ else
+ reg_w1(gspca_dev, 0x1006, 0x20);
+
if (reg_w(gspca_dev, 0x10c0, i2c_init, 9) < 0) {
err("Device initialization failed");
return -ENODEV;
@@ -2103,6 +2065,11 @@ static int sd_init(struct gspca_dev *gspca_dev)
return -ENODEV;
info("MT9M111 sensor detected");
break;
+ case SENSOR_MT9M112:
+ if (mt9m112_init_sensor(gspca_dev) < 0)
+ return -ENODEV;
+ info("MT9M112 sensor detected");
+ break;
case SENSOR_MT9M001:
if (mt9m001_init_sensor(gspca_dev) < 0)
return -ENODEV;
@@ -2162,6 +2129,7 @@ static void configure_sensor_output(struct gspca_dev *gspca_dev, int mode)
i2c_w1(gspca_dev, 0x12, (value & 0x7) | 0x40);
}
break;
+ case SENSOR_MT9M112:
case SENSOR_MT9M111:
if (mode & MODE_SXGA) {
i2c_w2(gspca_dev, 0xf0, 0x0002);
@@ -2243,6 +2211,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
set_exposure(gspca_dev);
set_hvflip(gspca_dev);
+ reg_w1(gspca_dev, 0x1007, 0x20);
+
reg_r(gspca_dev, 0x1061, 1);
reg_w1(gspca_dev, 0x1061, gspca_dev->usb_buf[0] | 0x02);
return 0;
@@ -2250,6 +2220,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
static void sd_stopN(struct gspca_dev *gspca_dev)
{
+ reg_w1(gspca_dev, 0x1007, 0x00);
+
reg_r(gspca_dev, 0x1061, 1);
reg_w1(gspca_dev, 0x1061, gspca_dev->usb_buf[0] & ~0x02);
}
@@ -2343,6 +2315,24 @@ static void sd_dqcallback(struct gspca_dev *gspca_dev)
do_autoexposure(gspca_dev, avg_lum);
}
+#ifdef CONFIG_INPUT
+static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
+ u8 *data, /* interrupt packet */
+ int len) /* interrupt packet length */
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int ret = -EINVAL;
+ if (!(sd->flags & HAS_NO_BUTTON) && len == 1) {
+ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1);
+ input_sync(gspca_dev->input_dev);
+ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0);
+ input_sync(gspca_dev->input_dev);
+ ret = 0;
+ }
+ return ret;
+}
+#endif
+
static void sd_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* isoc packet */
int len) /* iso packet length */
@@ -2409,6 +2399,9 @@ static const struct sd_desc sd_desc = {
.stopN = sd_stopN,
.stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
+#ifdef CONFIG_INPUT
+ .int_pkt_scan = sd_int_pkt_scan,
+#endif
.dq_callback = sd_dqcallback,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.set_register = sd_dbg_s_register,
@@ -2417,8 +2410,8 @@ static const struct sd_desc sd_desc = {
.get_chip_ident = sd_chip_ident,
};
-#define SN9C20X(sensor, i2c_addr, button_mask) \
- .driver_info = (button_mask << 16) \
+#define SN9C20X(sensor, i2c_addr, flags) \
+ .driver_info = ((flags & 0xff) << 16) \
| (SENSOR_ ## sensor << 8) \
| (i2c_addr)
@@ -2426,8 +2419,10 @@ static const __devinitdata struct usb_device_id device_table[] = {
{USB_DEVICE(0x0c45, 0x6240), SN9C20X(MT9M001, 0x5d, 0)},
{USB_DEVICE(0x0c45, 0x6242), SN9C20X(MT9M111, 0x5d, 0)},
{USB_DEVICE(0x0c45, 0x6248), SN9C20X(OV9655, 0x30, 0)},
- {USB_DEVICE(0x0c45, 0x624e), SN9C20X(SOI968, 0x30, 0x10)},
- {USB_DEVICE(0x0c45, 0x624f), SN9C20X(OV9650, 0x30, 0)},
+ {USB_DEVICE(0x0c45, 0x624c), SN9C20X(MT9M112, 0x5d, 0)},
+ {USB_DEVICE(0x0c45, 0x624e), SN9C20X(SOI968, 0x30, LED_REVERSE)},
+ {USB_DEVICE(0x0c45, 0x624f), SN9C20X(OV9650, 0x30,
+ (FLIP_DETECT | HAS_NO_BUTTON))},
{USB_DEVICE(0x0c45, 0x6251), SN9C20X(OV9650, 0x30, 0)},
{USB_DEVICE(0x0c45, 0x6253), SN9C20X(OV9650, 0x30, 0)},
{USB_DEVICE(0x0c45, 0x6260), SN9C20X(OV7670, 0x21, 0)},
@@ -2438,6 +2433,7 @@ static const __devinitdata struct usb_device_id device_table[] = {
{USB_DEVICE(0x0c45, 0x6280), SN9C20X(MT9M001, 0x5d, 0)},
{USB_DEVICE(0x0c45, 0x6282), SN9C20X(MT9M111, 0x5d, 0)},
{USB_DEVICE(0x0c45, 0x6288), SN9C20X(OV9655, 0x30, 0)},
+ {USB_DEVICE(0x0c45, 0x628c), SN9C20X(MT9M112, 0x5d, 0)},
{USB_DEVICE(0x0c45, 0x628e), SN9C20X(SOI968, 0x30, 0)},
{USB_DEVICE(0x0c45, 0x628f), SN9C20X(OV9650, 0x30, 0)},
{USB_DEVICE(0x0c45, 0x62a0), SN9C20X(OV7670, 0x21, 0)},
@@ -2448,6 +2444,8 @@ static const __devinitdata struct usb_device_id device_table[] = {
{USB_DEVICE(0x045e, 0x00f4), SN9C20X(OV9650, 0x30, 0)},
{USB_DEVICE(0x145f, 0x013d), SN9C20X(OV7660, 0x21, 0)},
{USB_DEVICE(0x0458, 0x7029), SN9C20X(HV7131R, 0x11, 0)},
+ {USB_DEVICE(0x0458, 0x704a), SN9C20X(MT9M112, 0x5d, 0)},
+ {USB_DEVICE(0x0458, 0x704c), SN9C20X(MT9M112, 0x5d, 0)},
{USB_DEVICE(0xa168, 0x0610), SN9C20X(HV7131R, 0x11, 0)},
{USB_DEVICE(0xa168, 0x0611), SN9C20X(HV7131R, 0x11, 0)},
{USB_DEVICE(0xa168, 0x0613), SN9C20X(HV7131R, 0x11, 0)},
@@ -2467,22 +2465,11 @@ static int sd_probe(struct usb_interface *intf,
THIS_MODULE);
}
-static void sd_disconnect(struct usb_interface *intf)
-{
-#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV
- struct gspca_dev *gspca_dev = usb_get_intfdata(intf);
-
- sn9c20x_input_cleanup(gspca_dev);
-#endif
-
- gspca_disconnect(intf);
-}
-
static struct usb_driver sd_driver = {
.name = MODULE_NAME,
.id_table = device_table,
.probe = sd_probe,
- .disconnect = sd_disconnect,
+ .disconnect = gspca_disconnect,
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index 1d61b92f6bf..bb923efb75b 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -1,7 +1,7 @@
/*
* Sonix sn9c102p sn9c105 sn9c120 (jpeg) subdriver
*
- * Copyright (C) 2009 Jean-Francois Moine <http://moinejf.free.fr>
+ * Copyright (C) 2009-2010 Jean-François Moine <http://moinejf.free.fr>
* Copyright (C) 2005 Michel Xhaard mxhaard@magic.fr
*
* This program is free software; you can redistribute it and/or modify
@@ -28,7 +28,7 @@
#define V4L2_CID_INFRARED (V4L2_CID_PRIVATE_BASE + 0)
-MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>");
+MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>");
MODULE_DESCRIPTION("GSPCA/SONIX JPEG USB Camera Driver");
MODULE_LICENSE("GPL");
@@ -67,20 +67,25 @@ struct sd {
#define BRIDGE_SN9C110 2
#define BRIDGE_SN9C120 3
u8 sensor; /* Type of image sensor chip */
-#define SENSOR_ADCM1700 0
-#define SENSOR_HV7131R 1
-#define SENSOR_MI0360 2
-#define SENSOR_MO4000 3
-#define SENSOR_MT9V111 4
-#define SENSOR_OM6802 5
-#define SENSOR_OV7630 6
-#define SENSOR_OV7648 7
-#define SENSOR_OV7660 8
-#define SENSOR_PO1030 9
-#define SENSOR_SP80708 10
+enum {
+ SENSOR_ADCM1700,
+ SENSOR_GC0307,
+ SENSOR_HV7131R,
+ SENSOR_MI0360,
+ SENSOR_MO4000,
+ SENSOR_MT9V111,
+ SENSOR_OM6802,
+ SENSOR_OV7630,
+ SENSOR_OV7648,
+ SENSOR_OV7660,
+ SENSOR_PO1030,
+ SENSOR_PO2030N,
+ SENSOR_SOI768,
+ SENSOR_SP80708,
+} sensors;
u8 i2c_addr;
- u8 *jpeg_hdr;
+ u8 jpeg_hdr[JPEG_HDR_SZ];
};
/* V4L2 controls supported by the driver */
@@ -281,29 +286,60 @@ static const struct ctrl sd_ctrls[] = {
};
/* table of the disabled controls */
-static __u32 ctrl_dis[] = {
- (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX) |
- (1 << AUTOGAIN_IDX), /* SENSOR_ADCM1700 0 */
- (1 << INFRARED_IDX) | (1 << FREQ_IDX),
- /* SENSOR_HV7131R 1 */
- (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX),
- /* SENSOR_MI0360 2 */
- (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX),
- /* SENSOR_MO4000 3 */
- (1 << VFLIP_IDX) | (1 << FREQ_IDX),
- /* SENSOR_MT9V111 4 */
- (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX),
- /* SENSOR_OM6802 5 */
- (1 << INFRARED_IDX),
- /* SENSOR_OV7630 6 */
- (1 << INFRARED_IDX),
- /* SENSOR_OV7648 7 */
- (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX),
- /* SENSOR_OV7660 8 */
- (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX) |
- (1 << FREQ_IDX), /* SENSOR_PO1030 9 */
- (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX) |
- (1 << FREQ_IDX), /* SENSOR_SP80708 10 */
+static const __u32 ctrl_dis[] = {
+[SENSOR_ADCM1700] = (1 << AUTOGAIN_IDX) |
+ (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
+
+[SENSOR_GC0307] = (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
+
+[SENSOR_HV7131R] = (1 << INFRARED_IDX) |
+ (1 << FREQ_IDX),
+
+[SENSOR_MI0360] = (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
+
+[SENSOR_MO4000] = (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
+
+[SENSOR_MT9V111] = (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
+
+[SENSOR_OM6802] = (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
+
+[SENSOR_OV7630] = (1 << INFRARED_IDX),
+
+[SENSOR_OV7648] = (1 << INFRARED_IDX),
+
+[SENSOR_OV7660] = (1 << AUTOGAIN_IDX) |
+ (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX),
+
+[SENSOR_PO1030] = (1 << AUTOGAIN_IDX) |
+ (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
+
+[SENSOR_PO2030N] = (1 << AUTOGAIN_IDX) |
+ (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
+[SENSOR_SOI768] = (1 << AUTOGAIN_IDX) |
+ (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
+
+[SENSOR_SP80708] = (1 << AUTOGAIN_IDX) |
+ (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
};
static const struct v4l2_pix_format cif_mode[] = {
@@ -343,7 +379,17 @@ static const u8 sn_adcm1700[0x1c] = {
0x06, 0x00, 0x00, 0x00
};
-/*Data from sn9c102p+hv7131r */
+static const u8 sn_gc0307[0x1c] = {
+/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
+ 0x00, 0x61, 0x62, 0x00, 0x1a, 0x00, 0x00, 0x00,
+/* reg8 reg9 rega regb regc regd rege regf */
+ 0x80, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */
+ 0x03, 0x00, 0x03, 0x01, 0x08, 0x28, 0x1e, 0x02,
+/* reg18 reg19 reg1a reg1b */
+ 0x06, 0x00, 0x00, 0x00
+};
+
static const u8 sn_hv7131[0x1c] = {
/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
0x00, 0x03, 0x64, 0x00, 0x1a, 0x20, 0x20, 0x20,
@@ -401,7 +447,7 @@ static const u8 sn_om6802[0x1c] = {
static const u8 sn_ov7630[0x1c] = {
/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
- 0x00, 0x21, 0x40, 0x00, 0x1a, 0x20, 0x1f, 0x20,
+ 0x00, 0x21, 0x40, 0x00, 0x1a, 0x00, 0x00, 0x00,
/* reg8 reg9 rega regb regc regd rege regf */
0x81, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */
@@ -443,6 +489,28 @@ static const u8 sn_po1030[0x1c] = {
0x07, 0x00, 0x00, 0x00
};
+static const u8 sn_po2030n[0x1c] = {
+/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
+ 0x00, 0x63, 0x40, 0x00, 0x1a, 0x00, 0x00, 0x00,
+/* reg8 reg9 rega regb regc regd rege regf */
+ 0x81, 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */
+ 0x03, 0x00, 0x00, 0x01, 0x14, 0x28, 0x1e, 0x00,
+/* reg18 reg19 reg1a reg1b */
+ 0x07, 0x00, 0x00, 0x00
+};
+
+static const u8 sn_soi768[0x1c] = {
+/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
+ 0x00, 0x21, 0x40, 0x00, 0x1a, 0x00, 0x00, 0x00,
+/* reg8 reg9 rega regb regc regd rege regf */
+ 0x81, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */
+ 0x03, 0x00, 0x00, 0x01, 0x08, 0x28, 0x1e, 0x00,
+/* reg18 reg19 reg1a reg1b */
+ 0x07, 0x00, 0x00, 0x00
+};
+
static const u8 sn_sp80708[0x1c] = {
/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
0x00, 0x63, 0x60, 0x00, 0x1a, 0x20, 0x20, 0x20,
@@ -456,17 +524,20 @@ static const u8 sn_sp80708[0x1c] = {
/* sequence specific to the sensors - !! index = SENSOR_xxx */
static const u8 *sn_tb[] = {
- sn_adcm1700,
- sn_hv7131,
- sn_mi0360,
- sn_mo4000,
- sn_mt9v111,
- sn_om6802,
- sn_ov7630,
- sn_ov7648,
- sn_ov7660,
- sn_po1030,
- sn_sp80708
+[SENSOR_ADCM1700] = sn_adcm1700,
+[SENSOR_GC0307] = sn_gc0307,
+[SENSOR_HV7131R] = sn_hv7131,
+[SENSOR_MI0360] = sn_mi0360,
+[SENSOR_MO4000] = sn_mo4000,
+[SENSOR_MT9V111] = sn_mt9v111,
+[SENSOR_OM6802] = sn_om6802,
+[SENSOR_OV7630] = sn_ov7630,
+[SENSOR_OV7648] = sn_ov7648,
+[SENSOR_OV7660] = sn_ov7660,
+[SENSOR_PO1030] = sn_po1030,
+[SENSOR_PO2030N] = sn_po2030n,
+[SENSOR_SOI768] = sn_soi768,
+[SENSOR_SP80708] = sn_sp80708,
};
/* default gamma table */
@@ -484,8 +555,13 @@ static const u8 gamma_spec_1[17] = {
0x08, 0x3a, 0x52, 0x65, 0x75, 0x83, 0x91, 0x9d,
0xa9, 0xb4, 0xbe, 0xc8, 0xd2, 0xdb, 0xe4, 0xed, 0xf5
};
-/* gamma for sensor SP80708 */
+/* gamma for sensor GC0307 */
static const u8 gamma_spec_2[17] = {
+ 0x14, 0x37, 0x50, 0x6a, 0x7c, 0x8d, 0x9d, 0xab,
+ 0xb5, 0xbf, 0xc2, 0xcb, 0xd1, 0xd6, 0xdb, 0xe1, 0xeb
+};
+/* gamma for sensor SP80708 */
+static const u8 gamma_spec_3[17] = {
0x0a, 0x2d, 0x4e, 0x68, 0x7d, 0x8f, 0x9f, 0xab,
0xb7, 0xc2, 0xcc, 0xd3, 0xd8, 0xde, 0xe2, 0xe5, 0xe6
};
@@ -533,6 +609,58 @@ static const u8 adcm1700_sensor_param1[][8] = {
{0xb0, 0x51, 0x32, 0x00, 0xa2, 0x00, 0x00, 0x10},
{}
};
+static const u8 gc0307_sensor_init[][8] = {
+ {0xa0, 0x21, 0x43, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x44, 0xa2, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x01, 0x6a, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x02, 0x70, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x10, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x1d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x11, 0x05, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x05, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x06, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x07, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x08, 0x02, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x09, 0x01, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x0a, 0xe8, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x0b, 0x02, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x0c, 0x80, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x0d, 0x22, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x0e, 0x02, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x0f, 0xb2, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x12, 0x70, 0x00, 0x00, 0x00, 0x10},
+ {0xdd, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*delay 10ms*/
+ {0xa0, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x15, 0xb8, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x16, 0x13, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x17, 0x52, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x18, 0x50, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x1e, 0x0d, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x1f, 0x32, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x61, 0x90, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x63, 0x70, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x65, 0x98, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x67, 0x90, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x03, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x04, 0x96, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x45, 0x27, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x47, 0x2c, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x43, 0x47, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x44, 0xd8, 0x00, 0x00, 0x00, 0x10},
+ {}
+};
+static const u8 gc0307_sensor_param1[][8] = {
+ {0xa0, 0x21, 0x68, 0x13, 0x00, 0x00, 0x00, 0x10},
+ {0xd0, 0x21, 0x61, 0x80, 0x00, 0x80, 0x00, 0x10},
+ {0xc0, 0x21, 0x65, 0x80, 0x00, 0x80, 0x00, 0x10},
+ {0xc0, 0x21, 0x63, 0xa0, 0x00, 0xa6, 0x00, 0x10},
+/*param3*/
+ {0xa0, 0x21, 0x01, 0x6e, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x02, 0x88, 0x00, 0x00, 0x00, 0x10},
+ {}
+};
+
static const u8 hv7131r_sensor_init[][8] = {
{0xc1, 0x11, 0x01, 0x08, 0x01, 0x00, 0x00, 0x10},
{0xb1, 0x11, 0x34, 0x17, 0x7f, 0x00, 0x00, 0x10},
@@ -767,7 +895,9 @@ static const u8 ov7630_sensor_init[][8] = {
{0xc1, 0x21, 0x7b, 0x00, 0x4c, 0xf7, 0x00, 0x10},
{0xd1, 0x21, 0x17, 0x1b, 0xbd, 0x05, 0xf6, 0x10},
{0xa1, 0x21, 0x1b, 0x04, 0x00, 0x00, 0x00, 0x10},
-/* */
+ {}
+};
+static const u8 ov7630_sensor_param1[][8] = {
{0xa1, 0x21, 0x12, 0x48, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x21, 0x12, 0x48, 0x00, 0x00, 0x00, 0x10},
/*fixme: + 0x12, 0x04*/
@@ -984,6 +1114,113 @@ static const u8 po1030_sensor_param1[][8] = {
{}
};
+static const u8 po2030n_sensor_init[][8] = {
+ {0xa1, 0x6e, 0x1e, 0x1a, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x1f, 0x99, 0x00, 0x00, 0x00, 0x10},
+ {0xdd, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 10ms */
+ {0xa1, 0x6e, 0x1e, 0x0a, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x1f, 0x19, 0x00, 0x00, 0x00, 0x10},
+ {0xdd, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 10ms */
+ {0xa1, 0x6e, 0x20, 0x44, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x04, 0x03, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x05, 0x70, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x07, 0x25, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x08, 0x00, 0xd0, 0x00, 0x08, 0x10},
+ {0xd1, 0x6e, 0x0c, 0x03, 0x50, 0x01, 0xe8, 0x10},
+ {0xd1, 0x6e, 0x1d, 0x20, 0x0a, 0x19, 0x44, 0x10},
+ {0xd1, 0x6e, 0x21, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x25, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x29, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x35, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x39, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x3d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x41, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x45, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x49, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x4d, 0x00, 0x00, 0x00, 0xed, 0x10},
+ {0xd1, 0x6e, 0x51, 0x17, 0x4a, 0x2f, 0xc0, 0x10},
+ {0xd1, 0x6e, 0x55, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x59, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x5d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x61, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x65, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x69, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x6d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x71, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x75, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x79, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x7d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x81, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x85, 0x00, 0x00, 0x00, 0x08, 0x10},
+ {0xd1, 0x6e, 0x89, 0x01, 0xe8, 0x00, 0x01, 0x10},
+ {0xa1, 0x6e, 0x8d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x21, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x25, 0x00, 0x00, 0x00, 0x01, 0x10},
+ {0xd1, 0x6e, 0x29, 0xe6, 0x00, 0xbd, 0x03, 0x10},
+ {0xd1, 0x6e, 0x2d, 0x41, 0x38, 0x68, 0x40, 0x10},
+ {0xd1, 0x6e, 0x31, 0x2b, 0x00, 0x36, 0x00, 0x10},
+ {0xd1, 0x6e, 0x35, 0x30, 0x30, 0x08, 0x00, 0x10},
+ {0xd1, 0x6e, 0x39, 0x00, 0x00, 0x33, 0x06, 0x10},
+ {0xb1, 0x6e, 0x3d, 0x06, 0x02, 0x00, 0x00, 0x10},
+ {}
+};
+static const u8 po2030n_sensor_param1[][8] = {
+ {0xa1, 0x6e, 0x1a, 0x01, 0x00, 0x00, 0x00, 0x10},
+ {0xdd, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 8ms */
+ {0xa1, 0x6e, 0x1b, 0xf4, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x15, 0x04, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x16, 0x50, 0x40, 0x49, 0x40, 0x10},
+/*param2*/
+ {0xa1, 0x6e, 0x1d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x04, 0x03, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x05, 0x6f, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x07, 0x25, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x15, 0x04, 0x00, 0x00, 0x00, 0x10},
+ {0xc1, 0x6e, 0x16, 0x52, 0x40, 0x48, 0x00, 0x10},
+/*after start*/
+ {0xa1, 0x6e, 0x15, 0x0f, 0x00, 0x00, 0x00, 0x10},
+ {0xdd, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 5ms */
+ {0xa1, 0x6e, 0x1a, 0x05, 0x00, 0x00, 0x00, 0x10},
+ {0xdd, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 5ms */
+ {0xa1, 0x6e, 0x1b, 0x53, 0x00, 0x00, 0x00, 0x10},
+ {}
+};
+
+static const u8 soi768_sensor_init[][8] = {
+ {0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10}, /* reset */
+ {0xdd, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 96ms */
+ {0xa1, 0x21, 0x12, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x21, 0x13, 0x80, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x21, 0x0f, 0x03, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x21, 0x19, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {}
+};
+static const u8 soi768_sensor_param1[][8] = {
+ {0xa1, 0x21, 0x10, 0x10, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x21, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x21, 0x2e, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xb1, 0x21, 0x01, 0x7f, 0x7f, 0x00, 0x00, 0x10},
+/* */
+/* {0xa1, 0x21, 0x2e, 0x00, 0x00, 0x00, 0x00, 0x10}, */
+/* {0xa1, 0x21, 0x2d, 0x25, 0x00, 0x00, 0x00, 0x10}, */
+ {0xa1, 0x21, 0x2b, 0x00, 0x00, 0x00, 0x00, 0x10},
+/* {0xb1, 0x21, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x10}, */
+ {0xa1, 0x21, 0x02, 0x8d, 0x00, 0x00, 0x00, 0x10},
+/* the next sequence should be used for auto gain */
+ {0xa1, 0x21, 0x00, 0x07, 0x00, 0x00, 0x00, 0x10},
+ /* global gain ? : 07 - change with 0x15 at the end */
+ {0xa1, 0x21, 0x10, 0x3f, 0x00, 0x00, 0x00, 0x10}, /* ???? : 063f */
+ {0xa1, 0x21, 0x04, 0x06, 0x00, 0x00, 0x00, 0x10},
+ {0xb1, 0x21, 0x2d, 0x00, 0x02, 0x00, 0x00, 0x10},
+ /* exposure ? : 0200 - change with 0x1e at the end */
+ {}
+};
+
static const u8 sp80708_sensor_init[][8] = {
{0xa1, 0x18, 0x06, 0xf9, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x18, 0x09, 0x1f, 0x00, 0x00, 0x00, 0x10},
@@ -1069,18 +1306,21 @@ static const u8 sp80708_sensor_param1[][8] = {
{}
};
-static const u8 (*sensor_init[11])[8] = {
- adcm1700_sensor_init, /* ADCM1700 0 */
- hv7131r_sensor_init, /* HV7131R 1 */
- mi0360_sensor_init, /* MI0360 2 */
- mo4000_sensor_init, /* MO4000 3 */
- mt9v111_sensor_init, /* MT9V111 4 */
- om6802_sensor_init, /* OM6802 5 */
- ov7630_sensor_init, /* OV7630 6 */
- ov7648_sensor_init, /* OV7648 7 */
- ov7660_sensor_init, /* OV7660 8 */
- po1030_sensor_init, /* PO1030 9 */
- sp80708_sensor_init, /* SP80708 10 */
+static const u8 (*sensor_init[])[8] = {
+[SENSOR_ADCM1700] = adcm1700_sensor_init,
+[SENSOR_GC0307] = gc0307_sensor_init,
+[SENSOR_HV7131R] = hv7131r_sensor_init,
+[SENSOR_MI0360] = mi0360_sensor_init,
+[SENSOR_MO4000] = mo4000_sensor_init,
+[SENSOR_MT9V111] = mt9v111_sensor_init,
+[SENSOR_OM6802] = om6802_sensor_init,
+[SENSOR_OV7630] = ov7630_sensor_init,
+[SENSOR_OV7648] = ov7648_sensor_init,
+[SENSOR_OV7660] = ov7660_sensor_init,
+[SENSOR_PO1030] = po1030_sensor_init,
+[SENSOR_PO2030N] = po2030n_sensor_init,
+[SENSOR_SOI768] = soi768_sensor_init,
+[SENSOR_SP80708] = sp80708_sensor_init,
};
/* read <len> bytes to gspca_dev->usb_buf */
@@ -1146,10 +1386,11 @@ static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- PDEBUG(D_USBO, "i2c_w2 [%02x] = %02x", reg, val);
+ PDEBUG(D_USBO, "i2c_w1 [%02x] = %02x", reg, val);
switch (sd->sensor) {
case SENSOR_ADCM1700:
- case SENSOR_OM6802: /* i2c command = a0 (100 kHz) */
+ case SENSOR_OM6802:
+ case SENSOR_GC0307: /* i2c command = a0 (100 kHz) */
gspca_dev->usb_buf[0] = 0x80 | (2 << 4);
break;
default: /* i2c command = a1 (400 kHz) */
@@ -1177,6 +1418,8 @@ static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val)
static void i2c_w8(struct gspca_dev *gspca_dev,
const u8 *buffer)
{
+ PDEBUG(D_USBO, "i2c_w8 [%02x] = %02x ..",
+ buffer[2], buffer[3]);
memcpy(gspca_dev->usb_buf, buffer, 8);
usb_control_msg(gspca_dev->dev,
usb_sndctrlpipe(gspca_dev->dev, 0),
@@ -1196,7 +1439,8 @@ static void i2c_r(struct gspca_dev *gspca_dev, u8 reg, int len)
switch (sd->sensor) {
case SENSOR_ADCM1700:
- case SENSOR_OM6802: /* i2c command = 90 (100 kHz) */
+ case SENSOR_OM6802:
+ case SENSOR_GC0307: /* i2c command = a0 (100 kHz) */
mode[0] = 0x80 | 0x10;
break;
default: /* i2c command = 91 (400 kHz) */
@@ -1300,39 +1544,100 @@ static void mi0360_probe(struct gspca_dev *gspca_dev)
}
}
-static void ov7648_probe(struct gspca_dev *gspca_dev)
+static void ov7630_probe(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
+ u16 val;
/* check ov76xx */
reg_w1(gspca_dev, 0x17, 0x62);
reg_w1(gspca_dev, 0x01, 0x08);
sd->i2c_addr = 0x21;
i2c_r(gspca_dev, 0x0a, 2);
- if (gspca_dev->usb_buf[3] == 0x76) { /* ov76xx */
- PDEBUG(D_PROBE, "Sensor ov%02x%02x",
- gspca_dev->usb_buf[3], gspca_dev->usb_buf[4]);
+ val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4];
+ reg_w1(gspca_dev, 0x01, 0x29);
+ reg_w1(gspca_dev, 0x17, 0x42);
+ if (val == 0x7628) { /* soi768 */
+ sd->sensor = SENSOR_SOI768;
+/*fixme: only valid for 0c45:613e?*/
+ gspca_dev->cam.input_flags =
+ V4L2_IN_ST_VFLIP | V4L2_IN_ST_HFLIP;
+ PDEBUG(D_PROBE, "Sensor soi768");
return;
}
+ PDEBUG(D_PROBE, "Sensor ov%04x", val);
+}
- /* reset */
+static void ov7648_probe(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ u16 val;
+
+ /* check ov76xx */
+ reg_w1(gspca_dev, 0x17, 0x62);
+ reg_w1(gspca_dev, 0x01, 0x08);
+ sd->i2c_addr = 0x21;
+ i2c_r(gspca_dev, 0x0a, 2);
+ val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4];
reg_w1(gspca_dev, 0x01, 0x29);
reg_w1(gspca_dev, 0x17, 0x42);
+ if ((val & 0xff00) == 0x7600) { /* ov76xx */
+ PDEBUG(D_PROBE, "Sensor ov%04x", val);
+ return;
+ }
/* check po1030 */
reg_w1(gspca_dev, 0x17, 0x62);
reg_w1(gspca_dev, 0x01, 0x08);
sd->i2c_addr = 0x6e;
i2c_r(gspca_dev, 0x00, 2);
- if (gspca_dev->usb_buf[3] == 0x10 /* po1030 */
- && gspca_dev->usb_buf[4] == 0x30) {
+ val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4];
+ reg_w1(gspca_dev, 0x01, 0x29);
+ reg_w1(gspca_dev, 0x17, 0x42);
+ if (val == 0x1030) { /* po1030 */
PDEBUG(D_PROBE, "Sensor po1030");
sd->sensor = SENSOR_PO1030;
return;
}
- PDEBUG(D_PROBE, "Unknown sensor %02x%02x",
- gspca_dev->usb_buf[3], gspca_dev->usb_buf[4]);
+ PDEBUG(D_PROBE, "Unknown sensor %04x", val);
+}
+
+/* 0c45:6142 sensor may be po2030n, gc0305 or gc0307 */
+static void po2030n_probe(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ u16 val;
+
+ /* check gc0307 */
+ reg_w1(gspca_dev, 0x17, 0x62);
+ reg_w1(gspca_dev, 0x01, 0x08);
+ reg_w1(gspca_dev, 0x02, 0x22);
+ sd->i2c_addr = 0x21;
+ i2c_r(gspca_dev, 0x00, 1);
+ val = gspca_dev->usb_buf[4];
+ reg_w1(gspca_dev, 0x01, 0x29); /* reset */
+ reg_w1(gspca_dev, 0x17, 0x42);
+ if (val == 0x99) { /* gc0307 (?) */
+ PDEBUG(D_PROBE, "Sensor gc0307");
+ sd->sensor = SENSOR_GC0307;
+ return;
+ }
+
+ /* check po2030n */
+ reg_w1(gspca_dev, 0x17, 0x62);
+ reg_w1(gspca_dev, 0x01, 0x0a);
+ sd->i2c_addr = 0x6e;
+ i2c_r(gspca_dev, 0x00, 2);
+ val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4];
+ reg_w1(gspca_dev, 0x01, 0x29);
+ reg_w1(gspca_dev, 0x17, 0x42);
+ if (val == 0x2030) {
+ PDEBUG(D_PROBE, "Sensor po2030n");
+/* sd->sensor = SENSOR_PO2030N; */
+ } else {
+ PDEBUG(D_PROBE, "Unknown sensor ID %04x", val);
+ }
}
static void bridge_init(struct gspca_dev *gspca_dev,
@@ -1355,8 +1660,11 @@ static void bridge_init(struct gspca_dev *gspca_dev,
reg_w(gspca_dev, 0x08, &sn9c1xx[8], 2);
reg_w(gspca_dev, 0x17, &sn9c1xx[0x17], 5);
switch (sd->sensor) {
+ case SENSOR_GC0307:
case SENSOR_OV7660:
case SENSOR_PO1030:
+ case SENSOR_PO2030N:
+ case SENSOR_SOI768:
case SENSOR_SP80708:
reg9a = reg9a_spec;
break;
@@ -1377,6 +1685,14 @@ static void bridge_init(struct gspca_dev *gspca_dev,
reg_w1(gspca_dev, 0x01, 0x42);
reg_w1(gspca_dev, 0x01, 0x42);
break;
+ case SENSOR_GC0307:
+ msleep(50);
+ reg_w1(gspca_dev, 0x01, 0x61);
+ reg_w1(gspca_dev, 0x17, 0x22);
+ reg_w1(gspca_dev, 0x01, 0x60);
+ reg_w1(gspca_dev, 0x01, 0x40);
+ msleep(50);
+ break;
case SENSOR_MT9V111:
reg_w1(gspca_dev, 0x01, 0x61);
reg_w1(gspca_dev, 0x17, 0x61);
@@ -1414,11 +1730,18 @@ static void bridge_init(struct gspca_dev *gspca_dev,
reg_w1(gspca_dev, 0x01, 0x42);
break;
case SENSOR_PO1030:
+ case SENSOR_SOI768:
reg_w1(gspca_dev, 0x01, 0x61);
reg_w1(gspca_dev, 0x17, 0x20);
reg_w1(gspca_dev, 0x01, 0x60);
reg_w1(gspca_dev, 0x01, 0x40);
break;
+ case SENSOR_PO2030N:
+ reg_w1(gspca_dev, 0x01, 0x63);
+ reg_w1(gspca_dev, 0x17, 0x20);
+ reg_w1(gspca_dev, 0x01, 0x62);
+ reg_w1(gspca_dev, 0x01, 0x42);
+ break;
case SENSOR_OV7660:
/* fall thru */
case SENSOR_SP80708:
@@ -1523,9 +1846,15 @@ static int sd_init(struct gspca_dev *gspca_dev)
case SENSOR_MI0360:
mi0360_probe(gspca_dev);
break;
+ case SENSOR_OV7630:
+ ov7630_probe(gspca_dev);
+ break;
case SENSOR_OV7648:
ov7648_probe(gspca_dev);
break;
+ case SENSOR_PO2030N:
+ po2030n_probe(gspca_dev);
+ break;
}
regGpio[1] = 0x70;
reg_w(gspca_dev, 0x01, regGpio, 2);
@@ -1558,6 +1887,18 @@ static u32 setexposure(struct gspca_dev *gspca_dev,
struct sd *sd = (struct sd *) gspca_dev;
switch (sd->sensor) {
+ case SENSOR_GC0307: {
+ int a, b;
+
+ /* expo = 0..255 -> a = 19..43 */
+ a = 19 + expo * 25 / 256;
+ i2c_w1(gspca_dev, 0x68, a);
+ a -= 12;
+ b = a * a * 4; /* heuristic */
+ i2c_w1(gspca_dev, 0x03, b >> 8);
+ i2c_w1(gspca_dev, 0x04, b);
+ break;
+ }
case SENSOR_HV7131R: {
u8 Expodoit[] =
{ 0xc1, 0x11, 0x25, 0x00, 0x00, 0x00, 0x00, 0x16 };
@@ -1668,6 +2009,7 @@ static void setbrightness(struct gspca_dev *gspca_dev)
expo = sd->brightness >> 4;
sd->exposure = setexposure(gspca_dev, expo);
break;
+ case SENSOR_GC0307:
case SENSOR_MT9V111:
expo = sd->brightness >> 8;
sd->exposure = setexposure(gspca_dev, expo);
@@ -1703,7 +2045,7 @@ static void setcolors(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
int i, v;
u8 reg8a[12]; /* U & V gains */
- static s16 uv[6] = { /* same as reg84 in signed decimal */
+ static const s16 uv[6] = { /* same as reg84 in signed decimal */
-24, -38, 64, /* UR UG UB */
62, -51, -9 /* VR VG VB */
};
@@ -1744,9 +2086,12 @@ static void setgamma(struct gspca_dev *gspca_dev)
case SENSOR_MT9V111:
gamma_base = gamma_spec_1;
break;
- case SENSOR_SP80708:
+ case SENSOR_GC0307:
gamma_base = gamma_spec_2;
break;
+ case SENSOR_SP80708:
+ gamma_base = gamma_spec_3;
+ break;
default:
gamma_base = gamma_def;
break;
@@ -1937,14 +2282,17 @@ static int sd_start(struct gspca_dev *gspca_dev)
static const u8 CA[] = { 0x28, 0xd8, 0x14, 0xec };
static const u8 CA_adcm1700[] =
{ 0x14, 0xec, 0x0a, 0xf6 };
+ static const u8 CA_po2030n[] =
+ { 0x1e, 0xe2, 0x14, 0xec };
static const u8 CE[] = { 0x32, 0xdd, 0x2d, 0xdd }; /* MI0360 */
+ static const u8 CE_gc0307[] =
+ { 0x32, 0xce, 0x2d, 0xd3 };
static const u8 CE_ov76xx[] =
{ 0x32, 0xdd, 0x32, 0xdd };
+ static const u8 CE_po2030n[] =
+ { 0x14, 0xe7, 0x1e, 0xdd };
/* create the JPEG header */
- sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL);
- if (!sd->jpeg_hdr)
- return -ENOMEM;
jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
0x21); /* JPEG 422 */
jpeg_set_qual(sd->jpeg_hdr, sd->quality);
@@ -1996,6 +2344,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
}
reg_w1(gspca_dev, 0x18, sn9c1xx[0x18]);
switch (sd->sensor) {
+ case SENSOR_GC0307:
+ reg17 = 0xa2;
+ break;
case SENSOR_MT9V111:
reg17 = 0xe0;
break;
@@ -2007,9 +2358,11 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg17 = 0x20;
break;
case SENSOR_OV7660:
+ case SENSOR_SOI768:
reg17 = 0xa0;
break;
case SENSOR_PO1030:
+ case SENSOR_PO2030N:
reg17 = 0xa0;
break;
default:
@@ -2034,12 +2387,18 @@ static int sd_start(struct gspca_dev *gspca_dev)
case SENSOR_SP80708:
reg_w1(gspca_dev, 0x9a, 0x05);
break;
+ case SENSOR_GC0307:
case SENSOR_MT9V111:
reg_w1(gspca_dev, 0x9a, 0x07);
break;
+ case SENSOR_OV7630:
case SENSOR_OV7648:
reg_w1(gspca_dev, 0x9a, 0x0a);
break;
+ case SENSOR_PO2030N:
+ case SENSOR_SOI768:
+ reg_w1(gspca_dev, 0x9a, 0x06);
+ break;
default:
reg_w1(gspca_dev, 0x9a, 0x08);
break;
@@ -2064,6 +2423,11 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg1 = 0x46;
reg17 = 0xe2;
break;
+ case SENSOR_GC0307:
+ init = gc0307_sensor_param1;
+ reg17 = 0xa2;
+ reg1 = 0x44;
+ break;
case SENSOR_MO4000:
if (mode) {
/* reg1 = 0x46; * 320 clk 48Mhz 60fp/s */
@@ -2087,6 +2451,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg17 = 0x64; /* 640 MCKSIZE */
break;
case SENSOR_OV7630:
+ init = ov7630_sensor_param1;
reg17 = 0xe2;
reg1 = 0x44;
break;
@@ -2113,6 +2478,16 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg17 = 0xa2;
reg1 = 0x44;
break;
+ case SENSOR_PO2030N:
+ init = po2030n_sensor_param1;
+ reg1 = 0x46;
+ reg17 = 0xa2;
+ break;
+ case SENSOR_SOI768:
+ init = soi768_sensor_param1;
+ reg1 = 0x44;
+ reg17 = 0xa2;
+ break;
default:
/* case SENSOR_SP80708: */
init = sp80708_sensor_param1;
@@ -2132,17 +2507,33 @@ static int sd_start(struct gspca_dev *gspca_dev)
}
reg_w(gspca_dev, 0xc0, C0, 6);
- if (sd->sensor == SENSOR_ADCM1700)
+ switch (sd->sensor) {
+ case SENSOR_ADCM1700:
+ case SENSOR_GC0307:
+ case SENSOR_SOI768:
reg_w(gspca_dev, 0xca, CA_adcm1700, 4);
- else
+ break;
+ case SENSOR_PO2030N:
+ reg_w(gspca_dev, 0xca, CA_po2030n, 4);
+ break;
+ default:
reg_w(gspca_dev, 0xca, CA, 4);
+ break;
+ }
switch (sd->sensor) {
case SENSOR_ADCM1700:
case SENSOR_OV7630:
case SENSOR_OV7648:
case SENSOR_OV7660:
+ case SENSOR_SOI768:
reg_w(gspca_dev, 0xce, CE_ov76xx, 4);
break;
+ case SENSOR_GC0307:
+ reg_w(gspca_dev, 0xce, CE_gc0307, 4);
+ break;
+ case SENSOR_PO2030N:
+ reg_w(gspca_dev, 0xce, CE_po2030n, 4);
+ break;
default:
reg_w(gspca_dev, 0xce, CE, 4);
/* ?? {0x1e, 0xdd, 0x2d, 0xe7} */
@@ -2161,6 +2552,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
setvflip(sd);
setbrightness(gspca_dev);
setcontrast(gspca_dev);
+ setcolors(gspca_dev);
setautogain(gspca_dev);
setfreq(gspca_dev);
return 0;
@@ -2175,11 +2567,16 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
{ 0xb1, 0x5d, 0x07, 0x00, 0x00, 0x00, 0x00, 0x10 };
static const u8 stopov7648[] =
{ 0xa1, 0x21, 0x76, 0x20, 0x00, 0x00, 0x00, 0x10 };
+ static const u8 stopsoi768[] =
+ { 0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10 };
u8 data;
const u8 *sn9c1xx;
data = 0x0b;
switch (sd->sensor) {
+ case SENSOR_GC0307:
+ data = 0x29;
+ break;
case SENSOR_HV7131R:
i2c_w8(gspca_dev, stophv7131);
data = 0x2b;
@@ -2196,6 +2593,10 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
case SENSOR_PO1030:
data = 0x29;
break;
+ case SENSOR_SOI768:
+ i2c_w8(gspca_dev, stopsoi768);
+ data = 0x29;
+ break;
}
sn9c1xx = sn_tb[sd->sensor];
reg_w1(gspca_dev, 0x01, sn9c1xx[1]);
@@ -2206,13 +2607,6 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
/* reg_w1(gspca_dev, 0xf1, 0x01); */
}
-static void sd_stop0(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- kfree(sd->jpeg_hdr);
-}
-
static void do_autogain(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -2233,6 +2627,14 @@ static void do_autogain(struct gspca_dev *gspca_dev)
if (delta < luma_mean - luma_delta ||
delta > luma_mean + luma_delta) {
switch (sd->sensor) {
+ case SENSOR_GC0307:
+ expotimes = sd->exposure;
+ expotimes += (luma_mean - delta) >> 6;
+ if (expotimes < 0)
+ expotimes = 0;
+ sd->exposure = setexposure(gspca_dev,
+ (unsigned int) expotimes);
+ break;
case SENSOR_HV7131R:
expotimes = sd->exposure >> 8;
expotimes += (luma_mean - delta) >> 4;
@@ -2584,7 +2986,6 @@ static const struct sd_desc sd_desc = {
.init = sd_init,
.start = sd_start,
.stopN = sd_stopN,
- .stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
.dq_callback = do_autogain,
.get_jcomp = sd_get_jcomp,
@@ -2656,7 +3057,7 @@ static const __devinitdata struct usb_device_id device_table[] = {
#endif
{USB_DEVICE(0x0c45, 0x613c), BS(SN9C120, HV7131R)},
{USB_DEVICE(0x0c45, 0x613e), BS(SN9C120, OV7630)},
-/* {USB_DEVICE(0x0c45, 0x6142), BS(SN9C120, PO2030N)}, *sn9c120b*/
+ {USB_DEVICE(0x0c45, 0x6142), BS(SN9C120, PO2030N)}, /*sn9c120b*/
{USB_DEVICE(0x0c45, 0x6143), BS(SN9C120, SP80708)}, /*sn9c120b*/
{USB_DEVICE(0x0c45, 0x6148), BS(SN9C120, OM6802)}, /*sn9c120b*/
{USB_DEVICE(0x0c45, 0x614a), BS(SN9C120, ADCM1700)}, /*sn9c120b*/
diff --git a/drivers/media/video/gspca/spca561.c b/drivers/media/video/gspca/spca561.c
index b9c80e2103b..7bb2355005d 100644
--- a/drivers/media/video/gspca/spca561.c
+++ b/drivers/media/video/gspca/spca561.c
@@ -22,6 +22,7 @@
#define MODULE_NAME "spca561"
+#include <linux/input.h>
#include "gspca.h"
MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>");
@@ -249,9 +250,9 @@ static const __u16 Pb100_2map8300[][2] = {
};
static const __u16 spca561_161rev12A_data1[][2] = {
- {0x29, 0x8118}, /* white balance - was 21 */
- {0x08, 0x8114}, /* white balance - was 01 */
- {0x0e, 0x8112}, /* white balance - was 00 */
+ {0x29, 0x8118}, /* Control register (various enable bits) */
+ {0x08, 0x8114}, /* GPIO: Led off */
+ {0x0e, 0x8112}, /* 0x0e stream off 0x3e stream on */
{0x00, 0x8102}, /* white balance - new */
{0x92, 0x8804},
{0x04, 0x8802}, /* windows uses 08 */
@@ -263,15 +264,11 @@ static const __u16 spca561_161rev12A_data2[][2] = {
{0x07, 0x8601},
{0x07, 0x8602},
{0x04, 0x8501},
- {0x21, 0x8118},
{0x07, 0x8201}, /* windows uses 02 */
{0x08, 0x8200},
{0x01, 0x8200},
- {0x00, 0x8114},
- {0x01, 0x8114}, /* windows uses 00 */
-
{0x90, 0x8604},
{0x00, 0x8605},
{0xb0, 0x8603},
@@ -302,6 +299,9 @@ static const __u16 spca561_161rev12A_data2[][2] = {
{0xf0, 0x8505},
{0x32, 0x850a},
/* {0x99, 0x8700}, * - white balance - new (removed) */
+ /* HDG we used to do this in stop0, making the init state and the state
+ after a start / stop different, so do this here instead. */
+ {0x29, 0x8118},
{}
};
@@ -645,6 +645,9 @@ static int sd_start_12a(struct gspca_dev *gspca_dev)
setwhite(gspca_dev);
setgain(gspca_dev);
setexposure(gspca_dev);
+
+ /* Led ON (bit 3 -> 0 */
+ reg_w_val(gspca_dev->dev, 0x8114, 0x00);
return 0;
}
static int sd_start_72a(struct gspca_dev *gspca_dev)
@@ -691,26 +694,14 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
if (sd->chip_revision == Rev012A) {
reg_w_val(gspca_dev->dev, 0x8112, 0x0e);
+ /* Led Off (bit 3 -> 1 */
+ reg_w_val(gspca_dev->dev, 0x8114, 0x08);
} else {
reg_w_val(gspca_dev->dev, 0x8112, 0x20);
/* reg_w_val(gspca_dev->dev, 0x8102, 0x00); ?? */
}
}
-/* called on streamoff with alt 0 and on disconnect */
-static void sd_stop0(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- if (!gspca_dev->present)
- return;
- if (sd->chip_revision == Rev012A) {
- reg_w_val(gspca_dev->dev, 0x8118, 0x29);
- reg_w_val(gspca_dev->dev, 0x8114, 0x08);
- }
-/* reg_w_val(gspca_dev->dev, 0x8114, 0); */
-}
-
static void do_autogain(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -788,6 +779,23 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
switch (*data++) { /* sequence number */
case 0: /* start of frame */
gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
+
+ /* This should never happen */
+ if (len < 2) {
+ PDEBUG(D_ERR, "Short SOF packet, ignoring");
+ gspca_dev->last_packet_type = DISCARD_PACKET;
+ return;
+ }
+
+#ifdef CONFIG_INPUT
+ if (data[0] & 0x20) {
+ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1);
+ input_sync(gspca_dev->input_dev);
+ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0);
+ input_sync(gspca_dev->input_dev);
+ }
+#endif
+
if (data[1] & 0x10) {
/* compressed bayer */
gspca_frame_add(gspca_dev, FIRST_PACKET, data, len);
@@ -1028,8 +1036,10 @@ static const struct sd_desc sd_desc_12a = {
.init = sd_init_12a,
.start = sd_start_12a,
.stopN = sd_stopN,
- .stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
+#ifdef CONFIG_INPUT
+ .other_input = 1,
+#endif
};
static const struct sd_desc sd_desc_72a = {
.name = MODULE_NAME,
@@ -1039,9 +1049,11 @@ static const struct sd_desc sd_desc_72a = {
.init = sd_init_72a,
.start = sd_start_72a,
.stopN = sd_stopN,
- .stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
.dq_callback = do_autogain,
+#ifdef CONFIG_INPUT
+ .other_input = 1,
+#endif
};
static const struct sd_desc *sd_desc[2] = {
&sd_desc_12a,
diff --git a/drivers/media/video/gspca/t613.c b/drivers/media/video/gspca/t613.c
index 668a7536af9..63014372adb 100644
--- a/drivers/media/video/gspca/t613.c
+++ b/drivers/media/video/gspca/t613.c
@@ -44,7 +44,10 @@ struct sd {
u8 gamma;
u8 sharpness;
u8 freq;
- u8 whitebalance;
+ u8 red_balance; /* split balance */
+ u8 blue_balance;
+ u8 global_gain; /* aka gain */
+ u8 whitebalance; /* set default r/g/b and activate */
u8 mirror;
u8 effect;
@@ -70,8 +73,17 @@ static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
+
+
static int sd_setwhitebalance(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getwhitebalance(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setblue_balance(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getblue_balance(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setred_balance(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getred_balance(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setglobal_gain(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getglobal_gain(struct gspca_dev *gspca_dev, __s32 *val);
+
static int sd_setflip(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getflip(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_seteffect(struct gspca_dev *gspca_dev, __s32 val);
@@ -79,6 +91,7 @@ static int sd_geteffect(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_querymenu(struct gspca_dev *gspca_dev,
struct v4l2_querymenu *menu);
+
static const struct ctrl sd_ctrls[] = {
{
{
@@ -139,7 +152,7 @@ static const struct ctrl sd_ctrls[] = {
},
{
{
- .id = V4L2_CID_GAIN, /* here, i activate only the lowlight,
+ .id = V4L2_CID_BACKLIGHT_COMPENSATION, /* Activa lowlight,
* some apps dont bring up the
* backligth_compensation control) */
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -183,7 +196,7 @@ static const struct ctrl sd_ctrls[] = {
{
{
- .id = V4L2_CID_WHITE_BALANCE_TEMPERATURE,
+ .id = V4L2_CID_AUTO_WHITE_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "White Balance",
.minimum = 0,
@@ -223,6 +236,48 @@ static const struct ctrl sd_ctrls[] = {
.set = sd_seteffect,
.get = sd_geteffect
},
+ {
+ {
+ .id = V4L2_CID_BLUE_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Blue Balance",
+ .minimum = 0x10,
+ .maximum = 0x40,
+ .step = 1,
+#define BLUE_BALANCE_DEF 0x20
+ .default_value = BLUE_BALANCE_DEF,
+ },
+ .set = sd_setblue_balance,
+ .get = sd_getblue_balance,
+ },
+ {
+ {
+ .id = V4L2_CID_RED_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Red Balance",
+ .minimum = 0x10,
+ .maximum = 0x40,
+ .step = 1,
+#define RED_BALANCE_DEF 0x20
+ .default_value = RED_BALANCE_DEF,
+ },
+ .set = sd_setred_balance,
+ .get = sd_getred_balance,
+ },
+ {
+ {
+ .id = V4L2_CID_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain",
+ .minimum = 0x10,
+ .maximum = 0x40,
+ .step = 1,
+#define global_gain_DEF 0x20
+ .default_value = global_gain_DEF,
+ },
+ .set = sd_setglobal_gain,
+ .get = sd_getglobal_gain,
+ },
};
static char *effects_control[] = {
@@ -523,6 +578,10 @@ static void reg_w_buf(struct gspca_dev *gspca_dev,
u8 *tmpbuf;
tmpbuf = kmalloc(len, GFP_KERNEL);
+ if (!tmpbuf) {
+ err("Out of memory");
+ return;
+ }
memcpy(tmpbuf, buffer, len);
usb_control_msg(gspca_dev->dev,
usb_sndctrlpipe(gspca_dev->dev, 0),
@@ -542,10 +601,15 @@ static void reg_w_ixbuf(struct gspca_dev *gspca_dev,
int i;
u8 *p, *tmpbuf;
- if (len * 2 <= USB_BUF_SZ)
+ if (len * 2 <= USB_BUF_SZ) {
p = tmpbuf = gspca_dev->usb_buf;
- else
+ } else {
p = tmpbuf = kmalloc(len * 2, GFP_KERNEL);
+ if (!tmpbuf) {
+ err("Out of memory");
+ return;
+ }
+ }
i = len;
while (--i >= 0) {
*p++ = reg++;
@@ -642,6 +706,10 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->whitebalance = WHITE_BALANCE_DEF;
sd->sharpness = SHARPNESS_DEF;
sd->effect = EFFECTS_DEF;
+ sd->red_balance = RED_BALANCE_DEF;
+ sd->blue_balance = BLUE_BALANCE_DEF;
+ sd->global_gain = global_gain_DEF;
+
return 0;
}
@@ -693,18 +761,40 @@ static void setgamma(struct gspca_dev *gspca_dev)
reg_w_ixbuf(gspca_dev, 0x90,
gamma_table[sd->gamma], sizeof gamma_table[0]);
}
+static void setglobalgain(struct gspca_dev *gspca_dev)
+{
-static void setwhitebalance(struct gspca_dev *gspca_dev)
+ struct sd *sd = (struct sd *) gspca_dev;
+ reg_w(gspca_dev, (sd->red_balance << 8) + 0x87);
+ reg_w(gspca_dev, (sd->blue_balance << 8) + 0x88);
+ reg_w(gspca_dev, (sd->global_gain << 8) + 0x89);
+}
+
+/* Generic fnc for r/b balance, exposure and whitebalance */
+static void setbalance(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- u8 white_balance[8] =
- {0x87, 0x20, 0x88, 0x20, 0x89, 0x20, 0x80, 0x38};
+ /* on whitebalance leave defaults values */
+ if (sd->whitebalance) {
+ reg_w(gspca_dev, 0x3c80);
+ } else {
+ reg_w(gspca_dev, 0x3880);
+ /* shoud we wait here.. */
+ /* update and reset 'global gain' with webcam parameters */
+ sd->red_balance = reg_r(gspca_dev, 0x0087);
+ sd->blue_balance = reg_r(gspca_dev, 0x0088);
+ sd->global_gain = reg_r(gspca_dev, 0x0089);
+ setglobalgain(gspca_dev);
+ }
+
+}
+
- if (sd->whitebalance)
- white_balance[7] = 0x3c;
- reg_w_buf(gspca_dev, white_balance, sizeof white_balance);
+static void setwhitebalance(struct gspca_dev *gspca_dev)
+{
+ setbalance(gspca_dev);
}
static void setsharpness(struct gspca_dev *gspca_dev)
@@ -1018,6 +1108,66 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
+
+static int sd_setblue_balance(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->blue_balance = val;
+ if (gspca_dev->streaming)
+ reg_w(gspca_dev, (val << 8) + 0x88);
+ return 0;
+}
+
+static int sd_getblue_balance(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->blue_balance;
+ return 0;
+}
+
+static int sd_setred_balance(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->red_balance = val;
+ if (gspca_dev->streaming)
+ reg_w(gspca_dev, (val << 8) + 0x87);
+
+ return 0;
+}
+
+static int sd_getred_balance(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->red_balance;
+ return 0;
+}
+
+
+
+static int sd_setglobal_gain(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->global_gain = val;
+ if (gspca_dev->streaming)
+ setglobalgain(gspca_dev);
+
+ return 0;
+}
+
+static int sd_getglobal_gain(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->global_gain;
+ return 0;
+}
+
+
static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
diff --git a/drivers/media/video/gspca/vc032x.c b/drivers/media/video/gspca/vc032x.c
index 4989f9afb46..732c3dfe46f 100644
--- a/drivers/media/video/gspca/vc032x.c
+++ b/drivers/media/video/gspca/vc032x.c
@@ -1,9 +1,9 @@
/*
- * Z-star vc0321 library
- * Copyright (C) 2006 Koninski Artur takeshi87@o2.pl
- * Copyright (C) 2006 Michel Xhaard
+ * Z-star vc0321 library
*
- * V4L2 by Jean-Francois Moine <http://moinejf.free.fr>
+ * Copyright (C) 2009-2010 Jean-François Moine <http://moinejf.free.fr>
+ * Copyright (C) 2006 Koninski Artur takeshi87@o2.pl
+ * Copyright (C) 2006 Michel Xhaard
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -24,7 +24,7 @@
#include "gspca.h"
-MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>");
+MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>");
MODULE_DESCRIPTION("GSPCA/VC032X USB Camera Driver");
MODULE_LICENSE("GPL");
@@ -1971,268 +1971,489 @@ static const u8 ov7660_NoFliker[][4] = {
{}
};
-static const u8 ov7670_initVGA_JPG[][4] = {
+static const u8 ov7670_InitVGA[][4] = {
{0xb3, 0x01, 0x05, 0xcc},
- {0x00, 0x00, 0x30, 0xdd}, {0xb0, 0x03, 0x19, 0xcc},
+ {0x00, 0x00, 0x30, 0xdd},
+ {0xb0, 0x03, 0x19, 0xcc},
+ {0x00, 0x00, 0x10, 0xdd},
+ {0xb0, 0x04, 0x02, 0xcc},
{0x00, 0x00, 0x10, 0xdd},
- {0xb0, 0x04, 0x02, 0xcc}, {0x00, 0x00, 0x10, 0xdd},
- {0xb3, 0x00, 0x66, 0xcc}, {0xb3, 0x00, 0x67, 0xcc},
+ {0xb3, 0x00, 0x66, 0xcc},
+ {0xb3, 0x00, 0x67, 0xcc},
+ {0xb0, 0x16, 0x01, 0xcc},
{0xb3, 0x35, 0xa1, 0xcc}, /* i2c add: 21 */
{0xb3, 0x34, 0x01, 0xcc},
- {0xb3, 0x05, 0x01, 0xcc}, {0xb3, 0x06, 0x01, 0xcc},
- {0xb3, 0x08, 0x01, 0xcc}, {0xb3, 0x09, 0x0c, 0xcc},
- {0xb3, 0x02, 0x02, 0xcc}, {0xb3, 0x03, 0x1f, 0xcc},
- {0xb3, 0x14, 0x00, 0xcc}, {0xb3, 0x15, 0x00, 0xcc},
- {0xb3, 0x16, 0x02, 0xcc}, {0xb3, 0x17, 0x7f, 0xcc},
- {0xb3, 0x04, 0x05, 0xcc}, {0xb3, 0x20, 0x00, 0xcc},
- {0xb3, 0x21, 0x00, 0xcc}, {0xb3, 0x22, 0x01, 0xcc},
- {0xb3, 0x23, 0xe0, 0xcc}, {0xbc, 0x00, 0x41, 0xcc},
- {0xbc, 0x01, 0x01, 0xcc}, {0x00, 0x12, 0x80, 0xaa},
- {0x00, 0x00, 0x20, 0xdd}, {0x00, 0x12, 0x00, 0xaa},
- {0x00, 0x11, 0x40, 0xaa}, {0x00, 0x6b, 0x0a, 0xaa},
- {0x00, 0x3a, 0x04, 0xaa}, {0x00, 0x40, 0xc0, 0xaa},
- {0x00, 0x8c, 0x00, 0xaa}, {0x00, 0x7a, 0x29, 0xaa},
- {0x00, 0x7b, 0x0e, 0xaa}, {0x00, 0x7c, 0x1a, 0xaa},
- {0x00, 0x7d, 0x31, 0xaa}, {0x00, 0x7e, 0x53, 0xaa},
- {0x00, 0x7f, 0x60, 0xaa}, {0x00, 0x80, 0x6b, 0xaa},
- {0x00, 0x81, 0x73, 0xaa}, {0x00, 0x82, 0x7b, 0xaa},
- {0x00, 0x83, 0x82, 0xaa}, {0x00, 0x84, 0x89, 0xaa},
- {0x00, 0x85, 0x96, 0xaa}, {0x00, 0x86, 0xa1, 0xaa},
- {0x00, 0x87, 0xb7, 0xaa}, {0x00, 0x88, 0xcc, 0xaa},
- {0x00, 0x89, 0xe1, 0xaa}, {0x00, 0x13, 0xe0, 0xaa},
- {0x00, 0x00, 0x00, 0xaa}, {0x00, 0x10, 0x00, 0xaa},
- {0x00, 0x0d, 0x40, 0xaa}, {0x00, 0x14, 0x28, 0xaa},
- {0x00, 0xa5, 0x05, 0xaa}, {0x00, 0xab, 0x07, 0xaa},
- {0x00, 0x24, 0x95, 0xaa}, {0x00, 0x25, 0x33, 0xaa},
- {0x00, 0x26, 0xe3, 0xaa}, {0x00, 0x9f, 0x88, 0xaa},
- {0x00, 0xa0, 0x78, 0xaa}, {0x00, 0x55, 0x90, 0xaa},
- {0x00, 0xa1, 0x03, 0xaa}, {0x00, 0xa6, 0xe0, 0xaa},
- {0x00, 0xa7, 0xd8, 0xaa}, {0x00, 0xa8, 0xf0, 0xaa},
- {0x00, 0xa9, 0x90, 0xaa}, {0x00, 0xaa, 0x14, 0xaa},
- {0x00, 0x13, 0xe5, 0xaa}, {0x00, 0x0e, 0x61, 0xaa},
- {0x00, 0x0f, 0x4b, 0xaa}, {0x00, 0x16, 0x02, 0xaa},
+ {0xb3, 0x05, 0x01, 0xcc},
+ {0xb3, 0x06, 0x01, 0xcc},
+ {0xb3, 0x08, 0x01, 0xcc},
+ {0xb3, 0x09, 0x0c, 0xcc},
+ {0xb3, 0x02, 0x02, 0xcc},
+ {0xb3, 0x03, 0x1f, 0xcc},
+ {0xb3, 0x14, 0x00, 0xcc},
+ {0xb3, 0x15, 0x00, 0xcc},
+ {0xb3, 0x16, 0x02, 0xcc},
+ {0xb3, 0x17, 0x7f, 0xcc},
+ {0xb3, 0x04, 0x05, 0xcc},
+ {0xb3, 0x20, 0x00, 0xcc},
+ {0xb3, 0x21, 0x00, 0xcc},
+ {0xb3, 0x22, 0x01, 0xcc},
+ {0xb3, 0x23, 0xe0, 0xcc},
+ {0xbc, 0x00, 0x41, 0xcc},
+ {0xbc, 0x01, 0x01, 0xcc},
+ {0x00, 0x12, 0x80, 0xaa},
+ {0x00, 0x00, 0x20, 0xdd},
+ {0x00, 0x12, 0x00, 0xaa},
+ {0x00, 0x11, 0x40, 0xaa},
+ {0x00, 0x6b, 0x0a, 0xaa},
+ {0x00, 0x3a, 0x04, 0xaa},
+ {0x00, 0x40, 0xc0, 0xaa},
+ {0x00, 0x8c, 0x00, 0xaa},
+ {0x00, 0x7a, 0x29, 0xaa},
+ {0x00, 0x7b, 0x0e, 0xaa},
+ {0x00, 0x7c, 0x1a, 0xaa},
+ {0x00, 0x7d, 0x31, 0xaa},
+ {0x00, 0x7e, 0x53, 0xaa},
+ {0x00, 0x7f, 0x60, 0xaa},
+ {0x00, 0x80, 0x6b, 0xaa},
+ {0x00, 0x81, 0x73, 0xaa},
+ {0x00, 0x82, 0x7b, 0xaa},
+ {0x00, 0x83, 0x82, 0xaa},
+ {0x00, 0x84, 0x89, 0xaa},
+ {0x00, 0x85, 0x96, 0xaa},
+ {0x00, 0x86, 0xa1, 0xaa},
+ {0x00, 0x87, 0xb7, 0xaa},
+ {0x00, 0x88, 0xcc, 0xaa},
+ {0x00, 0x89, 0xe1, 0xaa},
+ {0x00, 0x13, 0xe0, 0xaa},
+ {0x00, 0x00, 0x00, 0xaa},
+ {0x00, 0x10, 0x00, 0xaa},
+ {0x00, 0x0d, 0x40, 0xaa},
+ {0x00, 0x14, 0x28, 0xaa},
+ {0x00, 0xa5, 0x05, 0xaa},
+ {0x00, 0xab, 0x07, 0xaa},
+ {0x00, 0x24, 0x95, 0xaa},
+ {0x00, 0x25, 0x33, 0xaa},
+ {0x00, 0x26, 0xe3, 0xaa},
+ {0x00, 0x9f, 0x88, 0xaa},
+ {0x00, 0xa0, 0x78, 0xaa},
+ {0x00, 0x55, 0x90, 0xaa},
+ {0x00, 0xa1, 0x03, 0xaa},
+ {0x00, 0xa6, 0xe0, 0xaa},
+ {0x00, 0xa7, 0xd8, 0xaa},
+ {0x00, 0xa8, 0xf0, 0xaa},
+ {0x00, 0xa9, 0x90, 0xaa},
+ {0x00, 0xaa, 0x14, 0xaa},
+ {0x00, 0x13, 0xe5, 0xaa},
+ {0x00, 0x0e, 0x61, 0xaa},
+ {0x00, 0x0f, 0x4b, 0xaa},
+ {0x00, 0x16, 0x02, 0xaa},
{0x00, 0x1e, 0x07, 0xaa}, /* MVFP */
{0x00, 0x21, 0x02, 0xaa},
- {0x00, 0x22, 0x91, 0xaa}, {0x00, 0x29, 0x07, 0xaa},
- {0x00, 0x33, 0x0b, 0xaa}, {0x00, 0x35, 0x0b, 0xaa},
- {0x00, 0x37, 0x1d, 0xaa}, {0x00, 0x38, 0x71, 0xaa},
- {0x00, 0x39, 0x2a, 0xaa}, {0x00, 0x3c, 0x78, 0xaa},
- {0x00, 0x4d, 0x40, 0xaa}, {0x00, 0x4e, 0x20, 0xaa},
- {0x00, 0x74, 0x19, 0xaa}, {0x00, 0x8d, 0x4f, 0xaa},
- {0x00, 0x8e, 0x00, 0xaa}, {0x00, 0x8f, 0x00, 0xaa},
- {0x00, 0x90, 0x00, 0xaa}, {0x00, 0x91, 0x00, 0xaa},
- {0x00, 0x96, 0x00, 0xaa}, {0x00, 0x9a, 0x80, 0xaa},
- {0x00, 0xb0, 0x84, 0xaa}, {0x00, 0xb1, 0x0c, 0xaa},
- {0x00, 0xb2, 0x0e, 0xaa}, {0x00, 0xb3, 0x82, 0xaa},
- {0x00, 0xb8, 0x0a, 0xaa}, {0x00, 0x43, 0x14, 0xaa},
- {0x00, 0x44, 0xf0, 0xaa}, {0x00, 0x45, 0x45, 0xaa},
- {0x00, 0x46, 0x63, 0xaa}, {0x00, 0x47, 0x2d, 0xaa},
- {0x00, 0x48, 0x46, 0xaa}, {0x00, 0x59, 0x88, 0xaa},
- {0x00, 0x5a, 0xa0, 0xaa}, {0x00, 0x5b, 0xc6, 0xaa},
- {0x00, 0x5c, 0x7d, 0xaa}, {0x00, 0x5d, 0x5f, 0xaa},
- {0x00, 0x5e, 0x19, 0xaa}, {0x00, 0x6c, 0x0a, 0xaa},
- {0x00, 0x6d, 0x55, 0xaa}, {0x00, 0x6e, 0x11, 0xaa},
- {0x00, 0x6f, 0x9e, 0xaa}, {0x00, 0x69, 0x00, 0xaa},
- {0x00, 0x6a, 0x40, 0xaa}, {0x00, 0x01, 0x40, 0xaa},
- {0x00, 0x02, 0x40, 0xaa}, {0x00, 0x13, 0xe7, 0xaa},
- {0x00, 0x5f, 0xf0, 0xaa}, {0x00, 0x60, 0xf0, 0xaa},
- {0x00, 0x61, 0xf0, 0xaa}, {0x00, 0x27, 0xa0, 0xaa},
- {0x00, 0x28, 0x80, 0xaa}, {0x00, 0x2c, 0x90, 0xaa},
- {0x00, 0x4f, 0x66, 0xaa}, {0x00, 0x50, 0x66, 0xaa},
- {0x00, 0x51, 0x00, 0xaa}, {0x00, 0x52, 0x22, 0xaa},
- {0x00, 0x53, 0x5e, 0xaa}, {0x00, 0x54, 0x80, 0xaa},
- {0x00, 0x58, 0x9e, 0xaa}, {0x00, 0x41, 0x08, 0xaa},
- {0x00, 0x3f, 0x00, 0xaa}, {0x00, 0x75, 0x85, 0xaa},
- {0x00, 0x76, 0xe1, 0xaa}, {0x00, 0x4c, 0x00, 0xaa},
- {0x00, 0x77, 0x0a, 0xaa}, {0x00, 0x3d, 0x88, 0xaa},
- {0x00, 0x4b, 0x09, 0xaa}, {0x00, 0xc9, 0x60, 0xaa},
- {0x00, 0x41, 0x38, 0xaa}, {0x00, 0x62, 0x30, 0xaa},
- {0x00, 0x63, 0x30, 0xaa}, {0x00, 0x64, 0x08, 0xaa},
- {0x00, 0x94, 0x07, 0xaa}, {0x00, 0x95, 0x0b, 0xaa},
- {0x00, 0x65, 0x00, 0xaa}, {0x00, 0x66, 0x05, 0xaa},
- {0x00, 0x56, 0x50, 0xaa}, {0x00, 0x34, 0x11, 0xaa},
- {0x00, 0xa4, 0x88, 0xaa}, {0x00, 0x96, 0x00, 0xaa},
- {0x00, 0x97, 0x30, 0xaa}, {0x00, 0x98, 0x20, 0xaa},
- {0x00, 0x99, 0x30, 0xaa}, {0x00, 0x9a, 0x84, 0xaa},
- {0x00, 0x9b, 0x29, 0xaa}, {0x00, 0x9c, 0x03, 0xaa},
- {0x00, 0x78, 0x04, 0xaa}, {0x00, 0x79, 0x01, 0xaa},
- {0x00, 0xc8, 0xf0, 0xaa}, {0x00, 0x79, 0x0f, 0xaa},
- {0x00, 0xc8, 0x00, 0xaa}, {0x00, 0x79, 0x10, 0xaa},
- {0x00, 0xc8, 0x7e, 0xaa}, {0x00, 0x79, 0x0a, 0xaa},
- {0x00, 0xc8, 0x80, 0xaa}, {0x00, 0x79, 0x0b, 0xaa},
- {0x00, 0xc8, 0x01, 0xaa}, {0x00, 0x79, 0x0c, 0xaa},
- {0x00, 0xc8, 0x0f, 0xaa}, {0x00, 0x79, 0x0d, 0xaa},
- {0x00, 0xc8, 0x20, 0xaa}, {0x00, 0x79, 0x09, 0xaa},
- {0x00, 0xc8, 0x80, 0xaa}, {0x00, 0x79, 0x02, 0xaa},
- {0x00, 0xc8, 0xc0, 0xaa}, {0x00, 0x79, 0x03, 0xaa},
- {0x00, 0xc8, 0x40, 0xaa}, {0x00, 0x79, 0x05, 0xaa},
- {0x00, 0xc8, 0x30, 0xaa}, {0x00, 0x79, 0x26, 0xaa},
- {0x00, 0x11, 0x40, 0xaa}, {0x00, 0x3a, 0x04, 0xaa},
- {0x00, 0x12, 0x00, 0xaa}, {0x00, 0x40, 0xc0, 0xaa},
- {0x00, 0x8c, 0x00, 0xaa}, {0x00, 0x17, 0x14, 0xaa},
- {0x00, 0x18, 0x02, 0xaa}, {0x00, 0x32, 0x92, 0xaa},
- {0x00, 0x19, 0x02, 0xaa}, {0x00, 0x1a, 0x7a, 0xaa},
- {0x00, 0x03, 0x0a, 0xaa}, {0x00, 0x0c, 0x00, 0xaa},
- {0x00, 0x3e, 0x00, 0xaa}, {0x00, 0x70, 0x3a, 0xaa},
- {0x00, 0x71, 0x35, 0xaa}, {0x00, 0x72, 0x11, 0xaa},
- {0x00, 0x73, 0xf0, 0xaa}, {0x00, 0xa2, 0x02, 0xaa},
- {0x00, 0xb1, 0x00, 0xaa}, {0x00, 0xb1, 0x0c, 0xaa},
+ {0x00, 0x22, 0x91, 0xaa},
+ {0x00, 0x29, 0x07, 0xaa},
+ {0x00, 0x33, 0x0b, 0xaa},
+ {0x00, 0x35, 0x0b, 0xaa},
+ {0x00, 0x37, 0x1d, 0xaa},
+ {0x00, 0x38, 0x71, 0xaa},
+ {0x00, 0x39, 0x2a, 0xaa},
+ {0x00, 0x3c, 0x78, 0xaa},
+ {0x00, 0x4d, 0x40, 0xaa},
+ {0x00, 0x4e, 0x20, 0xaa},
+ {0x00, 0x74, 0x19, 0xaa},
+ {0x00, 0x8d, 0x4f, 0xaa},
+ {0x00, 0x8e, 0x00, 0xaa},
+ {0x00, 0x8f, 0x00, 0xaa},
+ {0x00, 0x90, 0x00, 0xaa},
+ {0x00, 0x91, 0x00, 0xaa},
+ {0x00, 0x96, 0x00, 0xaa},
+ {0x00, 0x9a, 0x80, 0xaa},
+ {0x00, 0xb0, 0x84, 0xaa},
+ {0x00, 0xb1, 0x0c, 0xaa},
+ {0x00, 0xb2, 0x0e, 0xaa},
+ {0x00, 0xb3, 0x82, 0xaa},
+ {0x00, 0xb8, 0x0a, 0xaa},
+ {0x00, 0x43, 0x14, 0xaa},
+ {0x00, 0x44, 0xf0, 0xaa},
+ {0x00, 0x45, 0x45, 0xaa},
+ {0x00, 0x46, 0x63, 0xaa},
+ {0x00, 0x47, 0x2d, 0xaa},
+ {0x00, 0x48, 0x46, 0xaa},
+ {0x00, 0x59, 0x88, 0xaa},
+ {0x00, 0x5a, 0xa0, 0xaa},
+ {0x00, 0x5b, 0xc6, 0xaa},
+ {0x00, 0x5c, 0x7d, 0xaa},
+ {0x00, 0x5d, 0x5f, 0xaa},
+ {0x00, 0x5e, 0x19, 0xaa},
+ {0x00, 0x6c, 0x0a, 0xaa},
+ {0x00, 0x6d, 0x55, 0xaa},
+ {0x00, 0x6e, 0x11, 0xaa},
+ {0x00, 0x6f, 0x9e, 0xaa},
+ {0x00, 0x69, 0x00, 0xaa},
+ {0x00, 0x6a, 0x40, 0xaa},
+ {0x00, 0x01, 0x40, 0xaa},
+ {0x00, 0x02, 0x40, 0xaa},
+ {0x00, 0x13, 0xe7, 0xaa},
+ {0x00, 0x5f, 0xf0, 0xaa},
+ {0x00, 0x60, 0xf0, 0xaa},
+ {0x00, 0x61, 0xf0, 0xaa},
+ {0x00, 0x27, 0xa0, 0xaa},
+ {0x00, 0x28, 0x80, 0xaa},
+ {0x00, 0x2c, 0x90, 0xaa},
+ {0x00, 0x4f, 0x66, 0xaa},
+ {0x00, 0x50, 0x66, 0xaa},
+ {0x00, 0x51, 0x00, 0xaa},
+ {0x00, 0x52, 0x22, 0xaa},
+ {0x00, 0x53, 0x5e, 0xaa},
+ {0x00, 0x54, 0x80, 0xaa},
+ {0x00, 0x58, 0x9e, 0xaa},
+ {0x00, 0x41, 0x08, 0xaa},
+ {0x00, 0x3f, 0x00, 0xaa},
+ {0x00, 0x75, 0x85, 0xaa},
+ {0x00, 0x76, 0xe1, 0xaa},
+ {0x00, 0x4c, 0x00, 0xaa},
+ {0x00, 0x77, 0x0a, 0xaa},
+ {0x00, 0x3d, 0x88, 0xaa},
+ {0x00, 0x4b, 0x09, 0xaa},
+ {0x00, 0xc9, 0x60, 0xaa},
+ {0x00, 0x41, 0x38, 0xaa},
+ {0x00, 0x62, 0x30, 0xaa},
+ {0x00, 0x63, 0x30, 0xaa},
+ {0x00, 0x64, 0x08, 0xaa},
+ {0x00, 0x94, 0x07, 0xaa},
+ {0x00, 0x95, 0x0b, 0xaa},
+ {0x00, 0x65, 0x00, 0xaa},
+ {0x00, 0x66, 0x05, 0xaa},
+ {0x00, 0x56, 0x50, 0xaa},
+ {0x00, 0x34, 0x11, 0xaa},
+ {0x00, 0xa4, 0x88, 0xaa},
+ {0x00, 0x96, 0x00, 0xaa},
+ {0x00, 0x97, 0x30, 0xaa},
+ {0x00, 0x98, 0x20, 0xaa},
+ {0x00, 0x99, 0x30, 0xaa},
+ {0x00, 0x9a, 0x84, 0xaa},
+ {0x00, 0x9b, 0x29, 0xaa},
+ {0x00, 0x9c, 0x03, 0xaa},
+ {0x00, 0x78, 0x04, 0xaa},
+ {0x00, 0x79, 0x01, 0xaa},
+ {0x00, 0xc8, 0xf0, 0xaa},
+ {0x00, 0x79, 0x0f, 0xaa},
+ {0x00, 0xc8, 0x00, 0xaa},
+ {0x00, 0x79, 0x10, 0xaa},
+ {0x00, 0xc8, 0x7e, 0xaa},
+ {0x00, 0x79, 0x0a, 0xaa},
+ {0x00, 0xc8, 0x80, 0xaa},
+ {0x00, 0x79, 0x0b, 0xaa},
+ {0x00, 0xc8, 0x01, 0xaa},
+ {0x00, 0x79, 0x0c, 0xaa},
+ {0x00, 0xc8, 0x0f, 0xaa},
+ {0x00, 0x79, 0x0d, 0xaa},
+ {0x00, 0xc8, 0x20, 0xaa},
+ {0x00, 0x79, 0x09, 0xaa},
+ {0x00, 0xc8, 0x80, 0xaa},
+ {0x00, 0x79, 0x02, 0xaa},
+ {0x00, 0xc8, 0xc0, 0xaa},
+ {0x00, 0x79, 0x03, 0xaa},
+ {0x00, 0xc8, 0x40, 0xaa},
+ {0x00, 0x79, 0x05, 0xaa},
+ {0x00, 0xc8, 0x30, 0xaa},
+ {0x00, 0x79, 0x26, 0xaa},
+ {0x00, 0x11, 0x40, 0xaa},
+ {0x00, 0x3a, 0x04, 0xaa},
+ {0x00, 0x12, 0x00, 0xaa},
+ {0x00, 0x40, 0xc0, 0xaa},
+ {0x00, 0x8c, 0x00, 0xaa},
+ {0x00, 0x17, 0x14, 0xaa},
+ {0x00, 0x18, 0x02, 0xaa},
+ {0x00, 0x32, 0x92, 0xaa},
+ {0x00, 0x19, 0x02, 0xaa},
+ {0x00, 0x1a, 0x7a, 0xaa},
+ {0x00, 0x03, 0x0a, 0xaa},
+ {0x00, 0x0c, 0x00, 0xaa},
+ {0x00, 0x3e, 0x00, 0xaa},
+ {0x00, 0x70, 0x3a, 0xaa},
+ {0x00, 0x71, 0x35, 0xaa},
+ {0x00, 0x72, 0x11, 0xaa},
+ {0x00, 0x73, 0xf0, 0xaa},
+ {0x00, 0xa2, 0x02, 0xaa},
+ {0x00, 0xb1, 0x00, 0xaa},
+ {0x00, 0xb1, 0x0c, 0xaa},
{0x00, 0x1e, 0x37, 0xaa}, /* MVFP */
{0x00, 0xaa, 0x14, 0xaa},
- {0x00, 0x24, 0x80, 0xaa}, {0x00, 0x25, 0x74, 0xaa},
- {0x00, 0x26, 0xd3, 0xaa}, {0x00, 0x0d, 0x00, 0xaa},
- {0x00, 0x14, 0x18, 0xaa}, {0x00, 0x9d, 0x99, 0xaa},
- {0x00, 0x9e, 0x7f, 0xaa}, {0x00, 0x64, 0x08, 0xaa},
- {0x00, 0x94, 0x07, 0xaa}, {0x00, 0x95, 0x06, 0xaa},
- {0x00, 0x66, 0x05, 0xaa}, {0x00, 0x41, 0x08, 0xaa},
- {0x00, 0x3f, 0x00, 0xaa}, {0x00, 0x75, 0x07, 0xaa},
- {0x00, 0x76, 0xe1, 0xaa}, {0x00, 0x4c, 0x00, 0xaa},
- {0x00, 0x77, 0x00, 0xaa}, {0x00, 0x3d, 0xc2, 0xaa},
- {0x00, 0x4b, 0x09, 0xaa}, {0x00, 0xc9, 0x60, 0xaa},
- {0x00, 0x41, 0x38, 0xaa}, {0xb6, 0x00, 0x00, 0xcc},
- {0xb6, 0x03, 0x02, 0xcc}, {0xb6, 0x02, 0x80, 0xcc},
- {0xb6, 0x05, 0x01, 0xcc}, {0xb6, 0x04, 0xe0, 0xcc},
- {0xb6, 0x12, 0xf8, 0xcc}, {0xb6, 0x13, 0x13, 0xcc},
- {0xb6, 0x18, 0x02, 0xcc}, {0xb6, 0x17, 0x58, 0xcc},
- {0xb6, 0x16, 0x00, 0xcc}, {0xb6, 0x22, 0x12, 0xcc},
- {0xb6, 0x23, 0x0b, 0xcc}, {0xbf, 0xc0, 0x39, 0xcc},
- {0xbf, 0xc1, 0x04, 0xcc}, {0xbf, 0xcc, 0x00, 0xcc},
- {0xb3, 0x5c, 0x01, 0xcc}, {0xb3, 0x01, 0x45, 0xcc},
+ {0x00, 0x24, 0x80, 0xaa},
+ {0x00, 0x25, 0x74, 0xaa},
+ {0x00, 0x26, 0xd3, 0xaa},
+ {0x00, 0x0d, 0x00, 0xaa},
+ {0x00, 0x14, 0x18, 0xaa},
+ {0x00, 0x9d, 0x99, 0xaa},
+ {0x00, 0x9e, 0x7f, 0xaa},
+ {0x00, 0x64, 0x08, 0xaa},
+ {0x00, 0x94, 0x07, 0xaa},
+ {0x00, 0x95, 0x06, 0xaa},
+ {0x00, 0x66, 0x05, 0xaa},
+ {0x00, 0x41, 0x08, 0xaa},
+ {0x00, 0x3f, 0x00, 0xaa},
+ {0x00, 0x75, 0x07, 0xaa},
+ {0x00, 0x76, 0xe1, 0xaa},
+ {0x00, 0x4c, 0x00, 0xaa},
+ {0x00, 0x77, 0x00, 0xaa},
+ {0x00, 0x3d, 0xc2, 0xaa},
+ {0x00, 0x4b, 0x09, 0xaa},
+ {0x00, 0xc9, 0x60, 0xaa},
+ {0x00, 0x41, 0x38, 0xaa},
+ {0xbf, 0xc0, 0x26, 0xcc},
+ {0xbf, 0xc1, 0x02, 0xcc},
+ {0xbf, 0xcc, 0x04, 0xcc},
+ {0xb3, 0x5c, 0x01, 0xcc},
+ {0xb3, 0x01, 0x45, 0xcc},
{0x00, 0x77, 0x05, 0xaa},
{},
};
-static const u8 ov7670_initQVGA_JPG[][4] = {
- {0xb3, 0x01, 0x05, 0xcc}, {0x00, 0x00, 0x30, 0xdd},
- {0xb0, 0x03, 0x19, 0xcc}, {0x00, 0x00, 0x10, 0xdd},
- {0xb0, 0x04, 0x02, 0xcc}, {0x00, 0x00, 0x10, 0xdd},
- {0xb3, 0x00, 0x66, 0xcc}, {0xb3, 0x00, 0x67, 0xcc},
- {0xb3, 0x35, 0xa1, 0xcc}, {0xb3, 0x34, 0x01, 0xcc},
- {0xb3, 0x05, 0x01, 0xcc}, {0xb3, 0x06, 0x01, 0xcc},
- {0xb3, 0x08, 0x01, 0xcc}, {0xb3, 0x09, 0x0c, 0xcc},
- {0xb3, 0x02, 0x02, 0xcc}, {0xb3, 0x03, 0x1f, 0xcc},
- {0xb3, 0x14, 0x00, 0xcc}, {0xb3, 0x15, 0x00, 0xcc},
- {0xb3, 0x16, 0x02, 0xcc}, {0xb3, 0x17, 0x7f, 0xcc},
- {0xb3, 0x04, 0x05, 0xcc}, {0xb3, 0x20, 0x00, 0xcc},
- {0xb3, 0x21, 0x00, 0xcc}, {0xb3, 0x22, 0x01, 0xcc},
- {0xb3, 0x23, 0xe0, 0xcc}, {0xbc, 0x00, 0xd1, 0xcc},
- {0xbc, 0x01, 0x01, 0xcc}, {0x00, 0x12, 0x80, 0xaa},
- {0x00, 0x00, 0x20, 0xdd}, {0x00, 0x12, 0x00, 0xaa},
- {0x00, 0x11, 0x40, 0xaa}, {0x00, 0x6b, 0x0a, 0xaa},
- {0x00, 0x3a, 0x04, 0xaa}, {0x00, 0x40, 0xc0, 0xaa},
- {0x00, 0x8c, 0x00, 0xaa}, {0x00, 0x7a, 0x29, 0xaa},
- {0x00, 0x7b, 0x0e, 0xaa}, {0x00, 0x7c, 0x1a, 0xaa},
- {0x00, 0x7d, 0x31, 0xaa}, {0x00, 0x7e, 0x53, 0xaa},
- {0x00, 0x7f, 0x60, 0xaa}, {0x00, 0x80, 0x6b, 0xaa},
- {0x00, 0x81, 0x73, 0xaa}, {0x00, 0x82, 0x7b, 0xaa},
- {0x00, 0x83, 0x82, 0xaa}, {0x00, 0x84, 0x89, 0xaa},
- {0x00, 0x85, 0x96, 0xaa}, {0x00, 0x86, 0xa1, 0xaa},
- {0x00, 0x87, 0xb7, 0xaa}, {0x00, 0x88, 0xcc, 0xaa},
- {0x00, 0x89, 0xe1, 0xaa}, {0x00, 0x13, 0xe0, 0xaa},
- {0x00, 0x00, 0x00, 0xaa}, {0x00, 0x10, 0x00, 0xaa},
- {0x00, 0x0d, 0x40, 0xaa}, {0x00, 0x14, 0x28, 0xaa},
- {0x00, 0xa5, 0x05, 0xaa}, {0x00, 0xab, 0x07, 0xaa},
- {0x00, 0x24, 0x95, 0xaa}, {0x00, 0x25, 0x33, 0xaa},
- {0x00, 0x26, 0xe3, 0xaa}, {0x00, 0x9f, 0x88, 0xaa},
- {0x00, 0xa0, 0x78, 0xaa}, {0x00, 0x55, 0x90, 0xaa},
- {0x00, 0xa1, 0x03, 0xaa}, {0x00, 0xa6, 0xe0, 0xaa},
- {0x00, 0xa7, 0xd8, 0xaa}, {0x00, 0xa8, 0xf0, 0xaa},
- {0x00, 0xa9, 0x90, 0xaa}, {0x00, 0xaa, 0x14, 0xaa},
- {0x00, 0x13, 0xe5, 0xaa}, {0x00, 0x0e, 0x61, 0xaa},
- {0x00, 0x0f, 0x4b, 0xaa}, {0x00, 0x16, 0x02, 0xaa},
+static const u8 ov7670_InitQVGA[][4] = {
+ {0xb3, 0x01, 0x05, 0xcc},
+ {0x00, 0x00, 0x30, 0xdd},
+ {0xb0, 0x03, 0x19, 0xcc},
+ {0x00, 0x00, 0x10, 0xdd},
+ {0xb0, 0x04, 0x02, 0xcc},
+ {0x00, 0x00, 0x10, 0xdd},
+ {0xb3, 0x00, 0x66, 0xcc},
+ {0xb3, 0x00, 0x67, 0xcc},
+ {0xb0, 0x16, 0x01, 0xcc},
+ {0xb3, 0x35, 0xa1, 0xcc}, /* i2c add: 21 */
+ {0xb3, 0x34, 0x01, 0xcc},
+ {0xb3, 0x05, 0x01, 0xcc},
+ {0xb3, 0x06, 0x01, 0xcc},
+ {0xb3, 0x08, 0x01, 0xcc},
+ {0xb3, 0x09, 0x0c, 0xcc},
+ {0xb3, 0x02, 0x02, 0xcc},
+ {0xb3, 0x03, 0x1f, 0xcc},
+ {0xb3, 0x14, 0x00, 0xcc},
+ {0xb3, 0x15, 0x00, 0xcc},
+ {0xb3, 0x16, 0x02, 0xcc},
+ {0xb3, 0x17, 0x7f, 0xcc},
+ {0xb3, 0x04, 0x05, 0xcc},
+ {0xb3, 0x20, 0x00, 0xcc},
+ {0xb3, 0x21, 0x00, 0xcc},
+ {0xb3, 0x22, 0x01, 0xcc},
+ {0xb3, 0x23, 0xe0, 0xcc},
+ {0xbc, 0x00, 0xd1, 0xcc},
+ {0xbc, 0x01, 0x01, 0xcc},
+ {0x00, 0x12, 0x80, 0xaa},
+ {0x00, 0x00, 0x20, 0xdd},
+ {0x00, 0x12, 0x00, 0xaa},
+ {0x00, 0x11, 0x40, 0xaa},
+ {0x00, 0x6b, 0x0a, 0xaa},
+ {0x00, 0x3a, 0x04, 0xaa},
+ {0x00, 0x40, 0xc0, 0xaa},
+ {0x00, 0x8c, 0x00, 0xaa},
+ {0x00, 0x7a, 0x29, 0xaa},
+ {0x00, 0x7b, 0x0e, 0xaa},
+ {0x00, 0x7c, 0x1a, 0xaa},
+ {0x00, 0x7d, 0x31, 0xaa},
+ {0x00, 0x7e, 0x53, 0xaa},
+ {0x00, 0x7f, 0x60, 0xaa},
+ {0x00, 0x80, 0x6b, 0xaa},
+ {0x00, 0x81, 0x73, 0xaa},
+ {0x00, 0x82, 0x7b, 0xaa},
+ {0x00, 0x83, 0x82, 0xaa},
+ {0x00, 0x84, 0x89, 0xaa},
+ {0x00, 0x85, 0x96, 0xaa},
+ {0x00, 0x86, 0xa1, 0xaa},
+ {0x00, 0x87, 0xb7, 0xaa},
+ {0x00, 0x88, 0xcc, 0xaa},
+ {0x00, 0x89, 0xe1, 0xaa},
+ {0x00, 0x13, 0xe0, 0xaa},
+ {0x00, 0x00, 0x00, 0xaa},
+ {0x00, 0x10, 0x00, 0xaa},
+ {0x00, 0x0d, 0x40, 0xaa},
+ {0x00, 0x14, 0x28, 0xaa},
+ {0x00, 0xa5, 0x05, 0xaa},
+ {0x00, 0xab, 0x07, 0xaa},
+ {0x00, 0x24, 0x95, 0xaa},
+ {0x00, 0x25, 0x33, 0xaa},
+ {0x00, 0x26, 0xe3, 0xaa},
+ {0x00, 0x9f, 0x88, 0xaa},
+ {0x00, 0xa0, 0x78, 0xaa},
+ {0x00, 0x55, 0x90, 0xaa},
+ {0x00, 0xa1, 0x03, 0xaa},
+ {0x00, 0xa6, 0xe0, 0xaa},
+ {0x00, 0xa7, 0xd8, 0xaa},
+ {0x00, 0xa8, 0xf0, 0xaa},
+ {0x00, 0xa9, 0x90, 0xaa},
+ {0x00, 0xaa, 0x14, 0xaa},
+ {0x00, 0x13, 0xe5, 0xaa},
+ {0x00, 0x0e, 0x61, 0xaa},
+ {0x00, 0x0f, 0x4b, 0xaa},
+ {0x00, 0x16, 0x02, 0xaa},
{0x00, 0x1e, 0x07, 0xaa}, /* MVFP */
{0x00, 0x21, 0x02, 0xaa},
- {0x00, 0x22, 0x91, 0xaa}, {0x00, 0x29, 0x07, 0xaa},
- {0x00, 0x33, 0x0b, 0xaa}, {0x00, 0x35, 0x0b, 0xaa},
- {0x00, 0x37, 0x1d, 0xaa}, {0x00, 0x38, 0x71, 0xaa},
- {0x00, 0x39, 0x2a, 0xaa}, {0x00, 0x3c, 0x78, 0xaa},
- {0x00, 0x4d, 0x40, 0xaa}, {0x00, 0x4e, 0x20, 0xaa},
- {0x00, 0x74, 0x19, 0xaa}, {0x00, 0x8d, 0x4f, 0xaa},
- {0x00, 0x8e, 0x00, 0xaa}, {0x00, 0x8f, 0x00, 0xaa},
- {0x00, 0x90, 0x00, 0xaa}, {0x00, 0x91, 0x00, 0xaa},
- {0x00, 0x96, 0x00, 0xaa}, {0x00, 0x9a, 0x80, 0xaa},
- {0x00, 0xb0, 0x84, 0xaa}, {0x00, 0xb1, 0x0c, 0xaa},
- {0x00, 0xb2, 0x0e, 0xaa}, {0x00, 0xb3, 0x82, 0xaa},
- {0x00, 0xb8, 0x0a, 0xaa}, {0x00, 0x43, 0x14, 0xaa},
- {0x00, 0x44, 0xf0, 0xaa}, {0x00, 0x45, 0x45, 0xaa},
- {0x00, 0x46, 0x63, 0xaa}, {0x00, 0x47, 0x2d, 0xaa},
- {0x00, 0x48, 0x46, 0xaa}, {0x00, 0x59, 0x88, 0xaa},
- {0x00, 0x5a, 0xa0, 0xaa}, {0x00, 0x5b, 0xc6, 0xaa},
- {0x00, 0x5c, 0x7d, 0xaa}, {0x00, 0x5d, 0x5f, 0xaa},
- {0x00, 0x5e, 0x19, 0xaa}, {0x00, 0x6c, 0x0a, 0xaa},
- {0x00, 0x6d, 0x55, 0xaa}, {0x00, 0x6e, 0x11, 0xaa},
- {0x00, 0x6f, 0x9e, 0xaa}, {0x00, 0x69, 0x00, 0xaa},
- {0x00, 0x6a, 0x40, 0xaa}, {0x00, 0x01, 0x40, 0xaa},
- {0x00, 0x02, 0x40, 0xaa}, {0x00, 0x13, 0xe7, 0xaa},
- {0x00, 0x5f, 0xf0, 0xaa}, {0x00, 0x60, 0xf0, 0xaa},
- {0x00, 0x61, 0xf0, 0xaa}, {0x00, 0x27, 0xa0, 0xaa},
- {0x00, 0x28, 0x80, 0xaa}, {0x00, 0x2c, 0x90, 0xaa},
- {0x00, 0x4f, 0x66, 0xaa}, {0x00, 0x50, 0x66, 0xaa},
- {0x00, 0x51, 0x00, 0xaa}, {0x00, 0x52, 0x22, 0xaa},
- {0x00, 0x53, 0x5e, 0xaa}, {0x00, 0x54, 0x80, 0xaa},
- {0x00, 0x58, 0x9e, 0xaa}, {0x00, 0x41, 0x08, 0xaa},
- {0x00, 0x3f, 0x00, 0xaa}, {0x00, 0x75, 0x85, 0xaa},
- {0x00, 0x76, 0xe1, 0xaa}, {0x00, 0x4c, 0x00, 0xaa},
- {0x00, 0x77, 0x0a, 0xaa}, {0x00, 0x3d, 0x88, 0xaa},
- {0x00, 0x4b, 0x09, 0xaa}, {0x00, 0xc9, 0x60, 0xaa},
- {0x00, 0x41, 0x38, 0xaa}, {0x00, 0x62, 0x30, 0xaa},
- {0x00, 0x63, 0x30, 0xaa}, {0x00, 0x64, 0x08, 0xaa},
- {0x00, 0x94, 0x07, 0xaa}, {0x00, 0x95, 0x0b, 0xaa},
- {0x00, 0x65, 0x00, 0xaa}, {0x00, 0x66, 0x05, 0xaa},
- {0x00, 0x56, 0x50, 0xaa}, {0x00, 0x34, 0x11, 0xaa},
- {0x00, 0xa4, 0x88, 0xaa}, {0x00, 0x96, 0x00, 0xaa},
- {0x00, 0x97, 0x30, 0xaa}, {0x00, 0x98, 0x20, 0xaa},
- {0x00, 0x99, 0x30, 0xaa}, {0x00, 0x9a, 0x84, 0xaa},
- {0x00, 0x9b, 0x29, 0xaa}, {0x00, 0x9c, 0x03, 0xaa},
- {0x00, 0x78, 0x04, 0xaa}, {0x00, 0x79, 0x01, 0xaa},
- {0x00, 0xc8, 0xf0, 0xaa}, {0x00, 0x79, 0x0f, 0xaa},
- {0x00, 0xc8, 0x00, 0xaa}, {0x00, 0x79, 0x10, 0xaa},
- {0x00, 0xc8, 0x7e, 0xaa}, {0x00, 0x79, 0x0a, 0xaa},
- {0x00, 0xc8, 0x80, 0xaa}, {0x00, 0x79, 0x0b, 0xaa},
- {0x00, 0xc8, 0x01, 0xaa}, {0x00, 0x79, 0x0c, 0xaa},
- {0x00, 0xc8, 0x0f, 0xaa}, {0x00, 0x79, 0x0d, 0xaa},
- {0x00, 0xc8, 0x20, 0xaa}, {0x00, 0x79, 0x09, 0xaa},
- {0x00, 0xc8, 0x80, 0xaa}, {0x00, 0x79, 0x02, 0xaa},
- {0x00, 0xc8, 0xc0, 0xaa}, {0x00, 0x79, 0x03, 0xaa},
- {0x00, 0xc8, 0x40, 0xaa}, {0x00, 0x79, 0x05, 0xaa},
- {0x00, 0xc8, 0x30, 0xaa}, {0x00, 0x79, 0x26, 0xaa},
- {0x00, 0x11, 0x40, 0xaa}, {0x00, 0x3a, 0x04, 0xaa},
- {0x00, 0x12, 0x00, 0xaa}, {0x00, 0x40, 0xc0, 0xaa},
- {0x00, 0x8c, 0x00, 0xaa}, {0x00, 0x17, 0x14, 0xaa},
- {0x00, 0x18, 0x02, 0xaa}, {0x00, 0x32, 0x92, 0xaa},
- {0x00, 0x19, 0x02, 0xaa}, {0x00, 0x1a, 0x7a, 0xaa},
- {0x00, 0x03, 0x0a, 0xaa}, {0x00, 0x0c, 0x00, 0xaa},
- {0x00, 0x3e, 0x00, 0xaa}, {0x00, 0x70, 0x3a, 0xaa},
- {0x00, 0x71, 0x35, 0xaa}, {0x00, 0x72, 0x11, 0xaa},
- {0x00, 0x73, 0xf0, 0xaa}, {0x00, 0xa2, 0x02, 0xaa},
- {0x00, 0xb1, 0x00, 0xaa}, {0x00, 0xb1, 0x0c, 0xaa},
+ {0x00, 0x22, 0x91, 0xaa},
+ {0x00, 0x29, 0x07, 0xaa},
+ {0x00, 0x33, 0x0b, 0xaa},
+ {0x00, 0x35, 0x0b, 0xaa},
+ {0x00, 0x37, 0x1d, 0xaa},
+ {0x00, 0x38, 0x71, 0xaa},
+ {0x00, 0x39, 0x2a, 0xaa},
+ {0x00, 0x3c, 0x78, 0xaa},
+ {0x00, 0x4d, 0x40, 0xaa},
+ {0x00, 0x4e, 0x20, 0xaa},
+ {0x00, 0x74, 0x19, 0xaa},
+ {0x00, 0x8d, 0x4f, 0xaa},
+ {0x00, 0x8e, 0x00, 0xaa},
+ {0x00, 0x8f, 0x00, 0xaa},
+ {0x00, 0x90, 0x00, 0xaa},
+ {0x00, 0x91, 0x00, 0xaa},
+ {0x00, 0x96, 0x00, 0xaa},
+ {0x00, 0x9a, 0x80, 0xaa},
+ {0x00, 0xb0, 0x84, 0xaa},
+ {0x00, 0xb1, 0x0c, 0xaa},
+ {0x00, 0xb2, 0x0e, 0xaa},
+ {0x00, 0xb3, 0x82, 0xaa},
+ {0x00, 0xb8, 0x0a, 0xaa},
+ {0x00, 0x43, 0x14, 0xaa},
+ {0x00, 0x44, 0xf0, 0xaa},
+ {0x00, 0x45, 0x45, 0xaa},
+ {0x00, 0x46, 0x63, 0xaa},
+ {0x00, 0x47, 0x2d, 0xaa},
+ {0x00, 0x48, 0x46, 0xaa},
+ {0x00, 0x59, 0x88, 0xaa},
+ {0x00, 0x5a, 0xa0, 0xaa},
+ {0x00, 0x5b, 0xc6, 0xaa},
+ {0x00, 0x5c, 0x7d, 0xaa},
+ {0x00, 0x5d, 0x5f, 0xaa},
+ {0x00, 0x5e, 0x19, 0xaa},
+ {0x00, 0x6c, 0x0a, 0xaa},
+ {0x00, 0x6d, 0x55, 0xaa},
+ {0x00, 0x6e, 0x11, 0xaa},
+ {0x00, 0x6f, 0x9e, 0xaa},
+ {0x00, 0x69, 0x00, 0xaa},
+ {0x00, 0x6a, 0x40, 0xaa},
+ {0x00, 0x01, 0x40, 0xaa},
+ {0x00, 0x02, 0x40, 0xaa},
+ {0x00, 0x13, 0xe7, 0xaa},
+ {0x00, 0x5f, 0xf0, 0xaa},
+ {0x00, 0x60, 0xf0, 0xaa},
+ {0x00, 0x61, 0xf0, 0xaa},
+ {0x00, 0x27, 0xa0, 0xaa},
+ {0x00, 0x28, 0x80, 0xaa},
+ {0x00, 0x2c, 0x90, 0xaa},
+ {0x00, 0x4f, 0x66, 0xaa},
+ {0x00, 0x50, 0x66, 0xaa},
+ {0x00, 0x51, 0x00, 0xaa},
+ {0x00, 0x52, 0x22, 0xaa},
+ {0x00, 0x53, 0x5e, 0xaa},
+ {0x00, 0x54, 0x80, 0xaa},
+ {0x00, 0x58, 0x9e, 0xaa},
+ {0x00, 0x41, 0x08, 0xaa},
+ {0x00, 0x3f, 0x00, 0xaa},
+ {0x00, 0x75, 0x85, 0xaa},
+ {0x00, 0x76, 0xe1, 0xaa},
+ {0x00, 0x4c, 0x00, 0xaa},
+ {0x00, 0x77, 0x0a, 0xaa},
+ {0x00, 0x3d, 0x88, 0xaa},
+ {0x00, 0x4b, 0x09, 0xaa},
+ {0x00, 0xc9, 0x60, 0xaa},
+ {0x00, 0x41, 0x38, 0xaa},
+ {0x00, 0x62, 0x30, 0xaa},
+ {0x00, 0x63, 0x30, 0xaa},
+ {0x00, 0x64, 0x08, 0xaa},
+ {0x00, 0x94, 0x07, 0xaa},
+ {0x00, 0x95, 0x0b, 0xaa},
+ {0x00, 0x65, 0x00, 0xaa},
+ {0x00, 0x66, 0x05, 0xaa},
+ {0x00, 0x56, 0x50, 0xaa},
+ {0x00, 0x34, 0x11, 0xaa},
+ {0x00, 0xa4, 0x88, 0xaa},
+ {0x00, 0x96, 0x00, 0xaa},
+ {0x00, 0x97, 0x30, 0xaa},
+ {0x00, 0x98, 0x20, 0xaa},
+ {0x00, 0x99, 0x30, 0xaa},
+ {0x00, 0x9a, 0x84, 0xaa},
+ {0x00, 0x9b, 0x29, 0xaa},
+ {0x00, 0x9c, 0x03, 0xaa},
+ {0x00, 0x78, 0x04, 0xaa},
+ {0x00, 0x79, 0x01, 0xaa},
+ {0x00, 0xc8, 0xf0, 0xaa},
+ {0x00, 0x79, 0x0f, 0xaa},
+ {0x00, 0xc8, 0x00, 0xaa},
+ {0x00, 0x79, 0x10, 0xaa},
+ {0x00, 0xc8, 0x7e, 0xaa},
+ {0x00, 0x79, 0x0a, 0xaa},
+ {0x00, 0xc8, 0x80, 0xaa},
+ {0x00, 0x79, 0x0b, 0xaa},
+ {0x00, 0xc8, 0x01, 0xaa},
+ {0x00, 0x79, 0x0c, 0xaa},
+ {0x00, 0xc8, 0x0f, 0xaa},
+ {0x00, 0x79, 0x0d, 0xaa},
+ {0x00, 0xc8, 0x20, 0xaa},
+ {0x00, 0x79, 0x09, 0xaa},
+ {0x00, 0xc8, 0x80, 0xaa},
+ {0x00, 0x79, 0x02, 0xaa},
+ {0x00, 0xc8, 0xc0, 0xaa},
+ {0x00, 0x79, 0x03, 0xaa},
+ {0x00, 0xc8, 0x40, 0xaa},
+ {0x00, 0x79, 0x05, 0xaa},
+ {0x00, 0xc8, 0x30, 0xaa},
+ {0x00, 0x79, 0x26, 0xaa},
+ {0x00, 0x11, 0x40, 0xaa},
+ {0x00, 0x3a, 0x04, 0xaa},
+ {0x00, 0x12, 0x00, 0xaa},
+ {0x00, 0x40, 0xc0, 0xaa},
+ {0x00, 0x8c, 0x00, 0xaa},
+ {0x00, 0x17, 0x14, 0xaa},
+ {0x00, 0x18, 0x02, 0xaa},
+ {0x00, 0x32, 0x92, 0xaa},
+ {0x00, 0x19, 0x02, 0xaa},
+ {0x00, 0x1a, 0x7a, 0xaa},
+ {0x00, 0x03, 0x0a, 0xaa},
+ {0x00, 0x0c, 0x00, 0xaa},
+ {0x00, 0x3e, 0x00, 0xaa},
+ {0x00, 0x70, 0x3a, 0xaa},
+ {0x00, 0x71, 0x35, 0xaa},
+ {0x00, 0x72, 0x11, 0xaa},
+ {0x00, 0x73, 0xf0, 0xaa},
+ {0x00, 0xa2, 0x02, 0xaa},
+ {0x00, 0xb1, 0x00, 0xaa},
+ {0x00, 0xb1, 0x0c, 0xaa},
{0x00, 0x1e, 0x37, 0xaa}, /* MVFP */
{0x00, 0xaa, 0x14, 0xaa},
- {0x00, 0x24, 0x80, 0xaa}, {0x00, 0x25, 0x74, 0xaa},
- {0x00, 0x26, 0xd3, 0xaa}, {0x00, 0x0d, 0x00, 0xaa},
- {0x00, 0x14, 0x18, 0xaa}, {0x00, 0x9d, 0x99, 0xaa},
- {0x00, 0x9e, 0x7f, 0xaa}, {0x00, 0x64, 0x08, 0xaa},
- {0x00, 0x94, 0x07, 0xaa}, {0x00, 0x95, 0x06, 0xaa},
- {0x00, 0x66, 0x05, 0xaa}, {0x00, 0x41, 0x08, 0xaa},
- {0x00, 0x3f, 0x00, 0xaa}, {0x00, 0x75, 0x07, 0xaa},
- {0x00, 0x76, 0xe1, 0xaa}, {0x00, 0x4c, 0x00, 0xaa},
- {0x00, 0x77, 0x00, 0xaa}, {0x00, 0x3d, 0xc2, 0xaa},
- {0x00, 0x4b, 0x09, 0xaa}, {0x00, 0xc9, 0x60, 0xaa},
- {0x00, 0x41, 0x38, 0xaa}, {0xb6, 0x00, 0x00, 0xcc},
- {0xb6, 0x03, 0x01, 0xcc}, {0xb6, 0x02, 0x40, 0xcc},
- {0xb6, 0x05, 0x00, 0xcc}, {0xb6, 0x04, 0xf0, 0xcc},
- {0xb6, 0x12, 0xf8, 0xcc}, {0xb6, 0x13, 0x21, 0xcc},
- {0xb6, 0x18, 0x00, 0xcc}, {0xb6, 0x17, 0x96, 0xcc},
- {0xb6, 0x16, 0x00, 0xcc}, {0xb6, 0x22, 0x12, 0xcc},
- {0xb6, 0x23, 0x0b, 0xcc}, {0xbf, 0xc0, 0x39, 0xcc},
- {0xbf, 0xc1, 0x04, 0xcc}, {0xbf, 0xcc, 0x00, 0xcc},
- {0xbc, 0x02, 0x18, 0xcc}, {0xbc, 0x03, 0x50, 0xcc},
- {0xbc, 0x04, 0x18, 0xcc}, {0xbc, 0x05, 0x00, 0xcc},
- {0xbc, 0x06, 0x00, 0xcc}, {0xbc, 0x08, 0x30, 0xcc},
- {0xbc, 0x09, 0x40, 0xcc}, {0xbc, 0x0a, 0x10, 0xcc},
- {0xbc, 0x0b, 0x00, 0xcc}, {0xbc, 0x0c, 0x00, 0xcc},
- {0xb3, 0x5c, 0x01, 0xcc}, {0xb3, 0x01, 0x45, 0xcc},
- {0x00, 0x77, 0x05, 0xaa },
+ {0x00, 0x24, 0x80, 0xaa},
+ {0x00, 0x25, 0x74, 0xaa},
+ {0x00, 0x26, 0xd3, 0xaa},
+ {0x00, 0x0d, 0x00, 0xaa},
+ {0x00, 0x14, 0x18, 0xaa},
+ {0x00, 0x9d, 0x99, 0xaa},
+ {0x00, 0x9e, 0x7f, 0xaa},
+ {0x00, 0x64, 0x08, 0xaa},
+ {0x00, 0x94, 0x07, 0xaa},
+ {0x00, 0x95, 0x06, 0xaa},
+ {0x00, 0x66, 0x05, 0xaa},
+ {0x00, 0x41, 0x08, 0xaa},
+ {0x00, 0x3f, 0x00, 0xaa},
+ {0x00, 0x75, 0x07, 0xaa},
+ {0x00, 0x76, 0xe1, 0xaa},
+ {0x00, 0x4c, 0x00, 0xaa},
+ {0x00, 0x77, 0x00, 0xaa},
+ {0x00, 0x3d, 0xc2, 0xaa},
+ {0x00, 0x4b, 0x09, 0xaa},
+ {0x00, 0xc9, 0x60, 0xaa},
+ {0x00, 0x41, 0x38, 0xaa},
+ {0xbc, 0x02, 0x18, 0xcc},
+ {0xbc, 0x03, 0x50, 0xcc},
+ {0xbc, 0x04, 0x18, 0xcc},
+ {0xbc, 0x05, 0x00, 0xcc},
+ {0xbc, 0x06, 0x00, 0xcc},
+ {0xbc, 0x08, 0x30, 0xcc},
+ {0xbc, 0x09, 0x40, 0xcc},
+ {0xbc, 0x0a, 0x10, 0xcc},
+ {0xbc, 0x0b, 0x00, 0xcc},
+ {0xbc, 0x0c, 0x00, 0xcc},
+ {0xbf, 0xc0, 0x26, 0xcc},
+ {0xbf, 0xc1, 0x02, 0xcc},
+ {0xbf, 0xcc, 0x04, 0xcc},
+ {0xb3, 0x5c, 0x01, 0xcc},
+ {0xb3, 0x01, 0x45, 0xcc},
+ {0x00, 0x77, 0x05, 0xaa},
{},
};
@@ -3117,6 +3338,10 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->cam_mode = bi_mode;
cam->nmodes = ARRAY_SIZE(bi_mode);
break;
+ case SENSOR_OV7670:
+ cam->cam_mode = bi_mode;
+ cam->nmodes = ARRAY_SIZE(bi_mode) - 1;
+ break;
default:
cam->cam_mode = vc0323_mode;
cam->nmodes = ARRAY_SIZE(vc0323_mode) - 1;
@@ -3329,14 +3554,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
else
init = ov7660_initVGA_data; /* 640x480 */
break;
- case SENSOR_OV7670:
- /*GammaT = ov7660_gamma; */
- /*MatrixT = ov7660_matrix; */
- if (mode)
- init = ov7670_initQVGA_JPG; /* 320x240 */
- else
- init = ov7670_initVGA_JPG; /* 640x480 */
- break;
case SENSOR_MI0360:
GammaT = mi1320_gamma;
MatrixT = mi0360_matrix;
@@ -3373,6 +3590,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
MatrixT = mi1320_matrix;
init = mi1320_soc_init[mode];
break;
+ case SENSOR_OV7670:
+ init = mode == 1 ? ov7670_InitVGA : ov7670_InitQVGA;
+ break;
case SENSOR_PO3130NC:
GammaT = po3130_gamma;
MatrixT = po3130_matrix;
@@ -3426,7 +3646,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
sethvflip(gspca_dev);
setlightfreq(gspca_dev);
}
- if (sd->sensor == SENSOR_POxxxx) {
+ switch (sd->sensor) {
+ case SENSOR_OV7670:
+ reg_w(gspca_dev->dev, 0x87, 0xffff, 0xffff);
+ reg_w(gspca_dev->dev, 0x88, 0xff00, 0xf0f1);
+ reg_w(gspca_dev->dev, 0xa0, 0x0000, 0xbfff);
+ break;
+ case SENSOR_POxxxx:
setcolors(gspca_dev);
setbrightness(gspca_dev);
setcontrast(gspca_dev);
@@ -3435,6 +3661,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
msleep(80);
reg_w(gspca_dev->dev, 0x89, 0xffff, 0xfdff);
usb_exchange(gspca_dev, poxxxx_init_end_2);
+ break;
}
return 0;
}
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 7d7814c43f9..d02aa5c8472 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -40,7 +40,6 @@ static int force_sensor = -1;
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- u8 brightness;
u8 contrast;
u8 gamma;
u8 autogain;
@@ -80,8 +79,6 @@ struct sd {
};
/* V4L2 controls supported by the driver */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val);
@@ -94,21 +91,6 @@ static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val);
static const struct ctrl sd_ctrls[] = {
-#define BRIGHTNESS_IDX 0
- {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define BRIGHTNESS_DEF 128
- .default_value = BRIGHTNESS_DEF,
- },
- .set = sd_setbrightness,
- .get = sd_getbrightness,
- },
{
{
.id = V4L2_CID_CONTRAST,
@@ -150,7 +132,7 @@ static const struct ctrl sd_ctrls[] = {
.set = sd_setautogain,
.get = sd_getautogain,
},
-#define LIGHTFREQ_IDX 4
+#define LIGHTFREQ_IDX 3
{
{
.id = V4L2_CID_POWER_LINE_FREQUENCY,
@@ -6004,33 +5986,6 @@ static void setmatrix(struct gspca_dev *gspca_dev)
reg_w(gspca_dev->dev, matrix[i], 0x010a + i);
}
-static void setbrightness(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- u8 brightness;
-
- switch (sd->sensor) {
- case SENSOR_GC0305:
- case SENSOR_OV7620:
- case SENSOR_PAS202B:
- case SENSOR_PO2030:
- return;
- }
-/*fixme: is it really write to 011d and 018d for all other sensors? */
- brightness = sd->brightness;
- reg_w(gspca_dev->dev, brightness, 0x011d);
- switch (sd->sensor) {
- case SENSOR_ADCM2700:
- case SENSOR_HV7131B:
- return;
- }
- if (brightness < 0x70)
- brightness += 0x10;
- else
- brightness = 0x80;
- reg_w(gspca_dev->dev, brightness, 0x018d);
-}
-
static void setsharpness(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -6059,8 +6014,8 @@ static void setcontrast(struct gspca_dev *gspca_dev)
int g, i, k, adj, gp;
u8 gr[16];
static const u8 delta_tb[16] = /* delta for contrast */
- {0x15, 0x0d, 0x0a, 0x09, 0x08, 0x08, 0x08, 0x08,
- 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08};
+ {0x2c, 0x1a, 0x12, 0x0c, 0x0a, 0x06, 0x06, 0x06,
+ 0x04, 0x06, 0x04, 0x04, 0x03, 0x03, 0x02, 0x02};
static const u8 gamma_tb[6][16] = {
{0x00, 0x00, 0x03, 0x0d, 0x1b, 0x2e, 0x45, 0x5f,
0x79, 0x93, 0xab, 0xc1, 0xd4, 0xe5, 0xf3, 0xff},
@@ -6082,11 +6037,11 @@ static void setcontrast(struct gspca_dev *gspca_dev)
adj = 0;
gp = 0;
for (i = 0; i < 16; i++) {
- g = Tgamma[i] - delta_tb[i] * k / 128 - adj / 2;
+ g = Tgamma[i] - delta_tb[i] * k / 256 - adj / 2;
if (g > 0xff)
g = 0xff;
- else if (g <= 0)
- g = 1;
+ else if (g < 0)
+ g = 0;
reg_w(dev, g, 0x0120 + i); /* gamma */
if (k > 0)
adj--;
@@ -6203,13 +6158,13 @@ static int setlightfreq(struct gspca_dev *gspca_dev)
pas106b_50HZ, pas106b_50HZ,
pas106b_60HZ, pas106b_60HZ},
/* SENSOR_PAS202B 13 */
- {pas202b_NoFlikerScale, pas202b_NoFliker,
- pas202b_50HZScale, pas202b_50HZ,
- pas202b_60HZScale, pas202b_60HZ},
+ {pas202b_NoFliker, pas202b_NoFlikerScale,
+ pas202b_50HZ, pas202b_50HZScale,
+ pas202b_60HZ, pas202b_60HZScale},
/* SENSOR_PB0330 14 */
- {pb0330_NoFlikerScale, pb0330_NoFliker,
- pb0330_50HZScale, pb0330_50HZ,
- pb0330_60HZScale, pb0330_60HZ},
+ {pb0330_NoFliker, pb0330_NoFlikerScale,
+ pb0330_50HZ, pb0330_50HZScale,
+ pb0330_60HZ, pb0330_60HZScale},
/* SENSOR_PO2030 15 */
{po2030_NoFliker, po2030_NoFliker,
po2030_50HZ, po2030_50HZ,
@@ -6789,7 +6744,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->nmodes = ARRAY_SIZE(broken_vga_mode);
break;
}
- sd->brightness = BRIGHTNESS_DEF;
sd->contrast = CONTRAST_DEF;
sd->gamma = gamma[sd->sensor];
sd->autogain = AUTOGAIN_DEF;
@@ -6797,12 +6751,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->quality = QUALITY_DEF;
switch (sd->sensor) {
- case SENSOR_GC0305:
- case SENSOR_OV7620:
- case SENSOR_PAS202B:
- case SENSOR_PO2030:
- gspca_dev->ctrl_dis = (1 << BRIGHTNESS_IDX);
- break;
case SENSOR_HV7131B:
case SENSOR_HV7131C:
case SENSOR_OV7630C:
@@ -6893,7 +6841,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
}
setmatrix(gspca_dev);
- setbrightness(gspca_dev);
switch (sd->sensor) {
case SENSOR_ADCM2700:
case SENSOR_OV7620:
@@ -7015,24 +6962,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->brightness = val;
- if (gspca_dev->streaming)
- setbrightness(gspca_dev);
- return 0;
-}
-
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->brightness;
- return 0;
-}
-
static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
diff --git a/drivers/media/video/hdpvr/hdpvr-core.c b/drivers/media/video/hdpvr/hdpvr-core.c
index 2fc9865fd48..830d47b05e1 100644
--- a/drivers/media/video/hdpvr/hdpvr-core.c
+++ b/drivers/media/video/hdpvr/hdpvr-core.c
@@ -373,9 +373,6 @@ static int hdpvr_probe(struct usb_interface *interface,
}
#endif /* CONFIG_I2C */
- /* save our data pointer in this interface device */
- usb_set_intfdata(interface, dev);
-
/* let the user know what node this device is now attached to */
v4l2_info(&dev->v4l2_dev, "device now attached to %s\n",
video_device_node_name(dev->video_dev));
@@ -391,44 +388,24 @@ error:
static void hdpvr_disconnect(struct usb_interface *interface)
{
- struct hdpvr_device *dev;
-
- dev = usb_get_intfdata(interface);
- usb_set_intfdata(interface, NULL);
+ struct hdpvr_device *dev = to_hdpvr_dev(usb_get_intfdata(interface));
+ v4l2_info(&dev->v4l2_dev, "device %s disconnected\n",
+ video_device_node_name(dev->video_dev));
/* prevent more I/O from starting and stop any ongoing */
mutex_lock(&dev->io_mutex);
dev->status = STATUS_DISCONNECTED;
- v4l2_device_disconnect(&dev->v4l2_dev);
- video_unregister_device(dev->video_dev);
wake_up_interruptible(&dev->wait_data);
wake_up_interruptible(&dev->wait_buffer);
mutex_unlock(&dev->io_mutex);
+ v4l2_device_disconnect(&dev->v4l2_dev);
msleep(100);
flush_workqueue(dev->workqueue);
mutex_lock(&dev->io_mutex);
hdpvr_cancel_queue(dev);
- destroy_workqueue(dev->workqueue);
mutex_unlock(&dev->io_mutex);
-
- /* deregister I2C adapter */
-#ifdef CONFIG_I2C
- mutex_lock(&dev->i2c_mutex);
- if (dev->i2c_adapter)
- i2c_del_adapter(dev->i2c_adapter);
- kfree(dev->i2c_adapter);
- dev->i2c_adapter = NULL;
- mutex_unlock(&dev->i2c_mutex);
-#endif /* CONFIG_I2C */
-
+ video_unregister_device(dev->video_dev);
atomic_dec(&dev_nr);
-
- v4l2_info(&dev->v4l2_dev, "device %s disconnected\n",
- video_device_node_name(dev->video_dev));
-
- v4l2_device_unregister(&dev->v4l2_dev);
- kfree(dev->usbc_buf);
- kfree(dev);
}
diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c
index 196f82de48f..7cfccfd1b87 100644
--- a/drivers/media/video/hdpvr/hdpvr-video.c
+++ b/drivers/media/video/hdpvr/hdpvr-video.c
@@ -92,8 +92,8 @@ static int hdpvr_free_queue(struct list_head *q)
buf = list_entry(p, struct hdpvr_buffer, buff_list);
urb = buf->urb;
- usb_buffer_free(urb->dev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
usb_free_urb(urb);
tmp = p->next;
list_del(p);
@@ -143,8 +143,8 @@ int hdpvr_alloc_buffers(struct hdpvr_device *dev, uint count)
}
buf->urb = urb;
- mem = usb_buffer_alloc(dev->udev, dev->bulk_in_size, GFP_KERNEL,
- &urb->transfer_dma);
+ mem = usb_alloc_coherent(dev->udev, dev->bulk_in_size, GFP_KERNEL,
+ &urb->transfer_dma);
if (!mem) {
v4l2_err(&dev->v4l2_dev,
"cannot allocate usb transfer buffer\n");
@@ -1214,6 +1214,24 @@ static void hdpvr_device_release(struct video_device *vdev)
struct hdpvr_device *dev = video_get_drvdata(vdev);
hdpvr_delete(dev);
+ mutex_lock(&dev->io_mutex);
+ destroy_workqueue(dev->workqueue);
+ mutex_unlock(&dev->io_mutex);
+
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ /* deregister I2C adapter */
+#ifdef CONFIG_I2C
+ mutex_lock(&dev->i2c_mutex);
+ if (dev->i2c_adapter)
+ i2c_del_adapter(dev->i2c_adapter);
+ kfree(dev->i2c_adapter);
+ dev->i2c_adapter = NULL;
+ mutex_unlock(&dev->i2c_mutex);
+#endif /* CONFIG_I2C */
+
+ kfree(dev->usbc_buf);
+ kfree(dev);
}
static const struct video_device hdpvr_video_template = {
diff --git a/drivers/media/video/hdpvr/hdpvr.h b/drivers/media/video/hdpvr/hdpvr.h
index 49ae25d83d1..b0f046df3cd 100644
--- a/drivers/media/video/hdpvr/hdpvr.h
+++ b/drivers/media/video/hdpvr/hdpvr.h
@@ -111,6 +111,11 @@ struct hdpvr_device {
u8 *usbc_buf;
};
+static inline struct hdpvr_device *to_hdpvr_dev(struct v4l2_device *v4l2_dev)
+{
+ return container_of(v4l2_dev, struct hdpvr_device, v4l2_dev);
+}
+
/* buffer one bulk urb of data */
struct hdpvr_buffer {
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index da18d698e7f..29d43974265 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -61,9 +61,9 @@ module_param(hauppauge, int, 0644); /* Choose Hauppauge remote */
MODULE_PARM_DESC(hauppauge, "Specify Hauppauge remote: 0=black, 1=grey (defaults to 0)");
-#define DEVNAME "ir-kbd-i2c"
+#define MODULE_NAME "ir-kbd-i2c"
#define dprintk(level, fmt, arg...) if (debug >= level) \
- printk(KERN_DEBUG DEVNAME ": " fmt , ## arg)
+ printk(KERN_DEBUG MODULE_NAME ": " fmt , ## arg)
/* ----------------------------------------------------------------------- */
@@ -297,7 +297,7 @@ static void ir_work(struct work_struct *work)
static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
- struct ir_scancode_table *ir_codes = NULL;
+ char *ir_codes = NULL;
const char *name = NULL;
u64 ir_type = 0;
struct IR_i2c *ir;
@@ -322,13 +322,13 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
name = "Pixelview";
ir->get_key = get_key_pixelview;
ir_type = IR_TYPE_OTHER;
- ir_codes = &ir_codes_empty_table;
+ ir_codes = RC_MAP_EMPTY;
break;
case 0x4b:
name = "PV951";
ir->get_key = get_key_pv951;
ir_type = IR_TYPE_OTHER;
- ir_codes = &ir_codes_pv951_table;
+ ir_codes = RC_MAP_PV951;
break;
case 0x18:
case 0x1f:
@@ -337,22 +337,22 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
ir->get_key = get_key_haup;
ir_type = IR_TYPE_RC5;
if (hauppauge == 1) {
- ir_codes = &ir_codes_hauppauge_new_table;
+ ir_codes = RC_MAP_HAUPPAUGE_NEW;
} else {
- ir_codes = &ir_codes_rc5_tv_table;
+ ir_codes = RC_MAP_RC5_TV;
}
break;
case 0x30:
name = "KNC One";
ir->get_key = get_key_knc1;
ir_type = IR_TYPE_OTHER;
- ir_codes = &ir_codes_empty_table;
+ ir_codes = RC_MAP_EMPTY;
break;
case 0x6b:
name = "FusionHDTV";
ir->get_key = get_key_fusionhdtv;
ir_type = IR_TYPE_RC5;
- ir_codes = &ir_codes_fusionhdtv_mce_table;
+ ir_codes = RC_MAP_FUSIONHDTV_MCE;
break;
case 0x0b:
case 0x47:
@@ -365,9 +365,9 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
ir_type = IR_TYPE_RC5;
ir->get_key = get_key_haup_xvr;
if (hauppauge == 1) {
- ir_codes = &ir_codes_hauppauge_new_table;
+ ir_codes = RC_MAP_HAUPPAUGE_NEW;
} else {
- ir_codes = &ir_codes_rc5_tv_table;
+ ir_codes = RC_MAP_RC5_TV;
}
} else {
/* Handled by saa7134-input */
@@ -379,7 +379,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
name = "AVerMedia Cardbus remote";
ir->get_key = get_key_avermedia_cardbus;
ir_type = IR_TYPE_OTHER;
- ir_codes = &ir_codes_avermedia_cardbus_table;
+ ir_codes = RC_MAP_AVERMEDIA_CARDBUS;
break;
}
@@ -447,11 +447,11 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
input_dev->name = ir->name;
input_dev->phys = ir->phys;
- err = ir_input_register(ir->input, ir->ir_codes, NULL);
+ err = ir_input_register(ir->input, ir->ir_codes, NULL, MODULE_NAME);
if (err)
goto err_out_free;
- printk(DEVNAME ": %s detected at %s [%s]\n",
+ printk(MODULE_NAME ": %s detected at %s [%s]\n",
ir->input->name, ir->input->phys, adap->name);
/* start polling via eventd */
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index 9a250548be4..1b79475ca13 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -1293,7 +1293,6 @@ int ivtv_init_on_first_open(struct ivtv *itv)
ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_stream, 1);
ivtv_init_mpeg_decoder(itv);
}
- ivtv_s_std(NULL, &fh, &itv->tuner_std);
/* On a cx23416 this seems to be able to enable DMA to the chip? */
if (!itv->has_cx23415)
@@ -1310,6 +1309,10 @@ int ivtv_init_on_first_open(struct ivtv *itv)
}
else
ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_INIT);
+
+ /* For cards with video out, this call needs interrupts enabled */
+ ivtv_s_std(NULL, &fh, &itv->tuner_std);
+
return 0;
}
diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h
index 5028e31c564..5b45fd2b264 100644
--- a/drivers/media/video/ivtv/ivtv-driver.h
+++ b/drivers/media/video/ivtv/ivtv-driver.h
@@ -63,6 +63,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
#include <media/tuner.h>
#include <media/cx2341x.h>
#include <media/ir-kbd-i2c.h>
@@ -116,6 +117,9 @@
#define IVTV_REG_VPU (0x9058)
#define IVTV_REG_APU (0xA064)
+/* Other registers */
+#define IVTV_REG_DEC_LINE_FIELD (0x28C0)
+
/* debugging */
extern int ivtv_debug;
@@ -372,6 +376,7 @@ struct ivtv_stream {
};
struct ivtv_open_id {
+ struct v4l2_fh fh;
u32 open_id; /* unique ID for this file descriptor */
int type; /* stream type */
int yuv_frames; /* 1: started OUT_UDMA_YUV output mode */
@@ -379,6 +384,11 @@ struct ivtv_open_id {
struct ivtv *itv;
};
+static inline struct ivtv_open_id *fh2id(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct ivtv_open_id, fh);
+}
+
struct yuv_frame_info
{
u32 update;
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
index babcabd73c0..abf410943cc 100644
--- a/drivers/media/video/ivtv/ivtv-fileops.c
+++ b/drivers/media/video/ivtv/ivtv-fileops.c
@@ -32,6 +32,7 @@
#include "ivtv-yuv.h"
#include "ivtv-ioctl.h"
#include "ivtv-cards.h"
+#include <media/v4l2-event.h>
#include <media/saa7115.h>
/* This function tries to claim the stream for a specific file descriptor.
@@ -506,7 +507,7 @@ int ivtv_start_capture(struct ivtv_open_id *id)
ssize_t ivtv_v4l2_read(struct file * filp, char __user *buf, size_t count, loff_t * pos)
{
- struct ivtv_open_id *id = filp->private_data;
+ struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
int rc;
@@ -541,7 +542,7 @@ int ivtv_start_decoding(struct ivtv_open_id *id, int speed)
ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *pos)
{
- struct ivtv_open_id *id = filp->private_data;
+ struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
struct yuv_playback_info *yi = &itv->yuv_info;
@@ -711,19 +712,31 @@ retry:
unsigned int ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
{
- struct ivtv_open_id *id = filp->private_data;
+ struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
int res = 0;
/* add stream's waitq to the poll list */
IVTV_DEBUG_HI_FILE("Decoder poll\n");
- poll_wait(filp, &s->waitq, wait);
- set_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags);
- if (test_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags) ||
- test_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags))
- res = POLLPRI;
+ /* If there are subscribed events, then only use the new event
+ API instead of the old video.h based API. */
+ if (!list_empty(&id->fh.events->subscribed)) {
+ poll_wait(filp, &id->fh.events->wait, wait);
+ /* Turn off the old-style vsync events */
+ clear_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags);
+ if (v4l2_event_pending(&id->fh))
+ res = POLLPRI;
+ } else {
+ /* This is the old-style API which is here only for backwards
+ compatibility. */
+ poll_wait(filp, &s->waitq, wait);
+ set_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags);
+ if (test_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags) ||
+ test_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags))
+ res = POLLPRI;
+ }
/* Allow write if buffers are available for writing */
if (s->q_free.buffers)
@@ -733,7 +746,7 @@ unsigned int ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait)
{
- struct ivtv_open_id *id = filp->private_data;
+ struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
int eof = test_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
@@ -833,13 +846,16 @@ static void ivtv_stop_decoding(struct ivtv_open_id *id, int flags, u64 pts)
int ivtv_v4l2_close(struct file *filp)
{
- struct ivtv_open_id *id = filp->private_data;
+ struct v4l2_fh *fh = filp->private_data;
+ struct ivtv_open_id *id = fh2id(fh);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
IVTV_DEBUG_FILE("close %s\n", s->name);
- v4l2_prio_close(&itv->prio, &id->prio);
+ v4l2_prio_close(&itv->prio, id->prio);
+ v4l2_fh_del(fh);
+ v4l2_fh_exit(fh);
/* Easy case first: this stream was never claimed by us */
if (s->id != id->open_id) {
@@ -895,6 +911,7 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
{
struct ivtv *itv = s->itv;
struct ivtv_open_id *item;
+ int res = 0;
IVTV_DEBUG_FILE("open %s\n", s->name);
@@ -915,17 +932,27 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
}
/* Allocate memory */
- item = kmalloc(sizeof(struct ivtv_open_id), GFP_KERNEL);
+ item = kzalloc(sizeof(struct ivtv_open_id), GFP_KERNEL);
if (NULL == item) {
IVTV_DEBUG_WARN("nomem on v4l2 open\n");
return -ENOMEM;
}
+ v4l2_fh_init(&item->fh, s->vdev);
+ if (s->type == IVTV_DEC_STREAM_TYPE_YUV ||
+ s->type == IVTV_DEC_STREAM_TYPE_MPG) {
+ res = v4l2_event_alloc(&item->fh, 60);
+ }
+ if (res < 0) {
+ v4l2_fh_exit(&item->fh);
+ kfree(item);
+ return res;
+ }
item->itv = itv;
item->type = s->type;
v4l2_prio_open(&itv->prio, &item->prio);
item->open_id = itv->open_id++;
- filp->private_data = item;
+ filp->private_data = &item->fh;
if (item->type == IVTV_ENC_STREAM_TYPE_RAD) {
/* Try to claim this stream */
@@ -940,6 +967,7 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
/* switching to radio while capture is
in progress is not polite */
ivtv_release_stream(s);
+ v4l2_fh_exit(&item->fh);
kfree(item);
return -EBUSY;
}
@@ -970,6 +998,7 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
itv->yuv_info.stream_size = 0;
}
+ v4l2_fh_add(&item->fh);
return 0;
}
diff --git a/drivers/media/video/ivtv/ivtv-i2c.c b/drivers/media/video/ivtv/ivtv-i2c.c
index 2ee03c2a1b5..a5b92d109c6 100644
--- a/drivers/media/video/ivtv/ivtv-i2c.c
+++ b/drivers/media/video/ivtv/ivtv-i2c.c
@@ -193,7 +193,7 @@ static int ivtv_i2c_new_ir(struct ivtv *itv, u32 hw, const char *type, u8 addr)
/* Our default information for ir-kbd-i2c.c to use */
switch (hw) {
case IVTV_HW_I2C_IR_RX_AVER:
- init_data->ir_codes = &ir_codes_avermedia_cardbus_table;
+ init_data->ir_codes = RC_MAP_AVERMEDIA_CARDBUS;
init_data->internal_get_key_func =
IR_KBD_GET_KEY_AVERMEDIA_CARDBUS;
init_data->type = IR_TYPE_OTHER;
@@ -202,14 +202,14 @@ static int ivtv_i2c_new_ir(struct ivtv *itv, u32 hw, const char *type, u8 addr)
case IVTV_HW_I2C_IR_RX_HAUP_EXT:
case IVTV_HW_I2C_IR_RX_HAUP_INT:
/* Default to old black remote */
- init_data->ir_codes = &ir_codes_rc5_tv_table;
+ init_data->ir_codes = RC_MAP_RC5_TV;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP;
init_data->type = IR_TYPE_RC5;
init_data->name = itv->card_name;
break;
case IVTV_HW_Z8F0811_IR_RX_HAUP:
/* Default to grey remote */
- init_data->ir_codes = &ir_codes_hauppauge_new_table;
+ init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
init_data->type = IR_TYPE_RC5;
init_data->name = itv->card_name;
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index 99f3c39a118..fa9f0d958f9 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -35,6 +35,7 @@
#include <media/saa7127.h>
#include <media/tveeprom.h>
#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-event.h>
#include <linux/dvb/audio.h>
#include <linux/i2c-id.h>
@@ -391,7 +392,7 @@ static int ivtv_g_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_fo
return 0;
}
- v4l2_subdev_call(itv->sd_video, video, g_fmt, fmt);
+ v4l2_subdev_call(itv->sd_video, vbi, g_sliced_fmt, vbifmt);
vbifmt->service_set = ivtv_get_service_set(vbifmt);
return 0;
}
@@ -597,7 +598,7 @@ static int ivtv_s_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *f
return -EBUSY;
itv->vbi.sliced_in->service_set = 0;
itv->vbi.in.type = V4L2_BUF_TYPE_VBI_CAPTURE;
- v4l2_subdev_call(itv->sd_video, video, s_fmt, fmt);
+ v4l2_subdev_call(itv->sd_video, vbi, s_raw_fmt, &fmt->fmt.vbi);
return ivtv_g_fmt_vbi_cap(file, fh, fmt);
}
@@ -615,7 +616,7 @@ static int ivtv_s_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_fo
if (ivtv_raw_vbi(itv) && atomic_read(&itv->capturing) > 0)
return -EBUSY;
itv->vbi.in.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
- v4l2_subdev_call(itv->sd_video, video, s_fmt, fmt);
+ v4l2_subdev_call(itv->sd_video, vbi, s_sliced_fmt, vbifmt);
memcpy(itv->vbi.sliced_in, vbifmt, sizeof(*itv->vbi.sliced_in));
return 0;
}
@@ -1087,8 +1088,10 @@ static int ivtv_g_std(struct file *file, void *fh, v4l2_std_id *std)
int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std)
{
+ DEFINE_WAIT(wait);
struct ivtv *itv = ((struct ivtv_open_id *)fh)->itv;
struct yuv_playback_info *yi = &itv->yuv_info;
+ int f;
if ((*std & V4L2_STD_ALL) == 0)
return -EINVAL;
@@ -1128,6 +1131,25 @@ int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std)
itv->is_out_60hz = itv->is_60hz;
itv->is_out_50hz = itv->is_50hz;
ivtv_call_all(itv, video, s_std_output, itv->std_out);
+
+ /*
+ * The next firmware call is time sensitive. Time it to
+ * avoid risk of a hard lock, by trying to ensure the call
+ * happens within the first 100 lines of the top field.
+ * Make 4 attempts to sync to the decoder before giving up.
+ */
+ for (f = 0; f < 4; f++) {
+ prepare_to_wait(&itv->vsync_waitq, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100)
+ break;
+ schedule_timeout(msecs_to_jiffies(25));
+ }
+ finish_wait(&itv->vsync_waitq, &wait);
+
+ if (f == 4)
+ IVTV_WARN("Mode change failed to sync to decoder\n");
+
ivtv_vapi(itv, CX2341X_DEC_SET_STANDARD, 1, itv->is_out_50hz);
itv->main_rect.left = itv->main_rect.top = 0;
itv->main_rect.width = 720;
@@ -1431,6 +1453,18 @@ static int ivtv_overlay(struct file *file, void *fh, unsigned int on)
return 0;
}
+static int ivtv_subscribe_event(struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_VSYNC:
+ case V4L2_EVENT_EOS:
+ break;
+ default:
+ return -EINVAL;
+ }
+ return v4l2_event_subscribe(fh, sub);
+}
+
static int ivtv_log_status(struct file *file, void *fh)
{
struct ivtv *itv = ((struct ivtv_open_id *)fh)->itv;
@@ -1539,10 +1573,11 @@ static int ivtv_log_status(struct file *file, void *fh)
static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg)
{
- struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data;
+ struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
int nonblocking = filp->f_flags & O_NONBLOCK;
struct ivtv_stream *s = &itv->streams[id->type];
+ unsigned long iarg = (unsigned long)arg;
switch (cmd) {
case IVTV_IOC_DMA_FRAME: {
@@ -1724,6 +1759,33 @@ static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg)
break;
}
+ case VIDEO_SELECT_SOURCE:
+ IVTV_DEBUG_IOCTL("VIDEO_SELECT_SOURCE\n");
+ if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
+ return -EINVAL;
+ return ivtv_passthrough_mode(itv, iarg == VIDEO_SOURCE_DEMUX);
+
+ case AUDIO_SET_MUTE:
+ IVTV_DEBUG_IOCTL("AUDIO_SET_MUTE\n");
+ itv->speed_mute_audio = iarg;
+ return 0;
+
+ case AUDIO_CHANNEL_SELECT:
+ IVTV_DEBUG_IOCTL("AUDIO_CHANNEL_SELECT\n");
+ if (iarg > AUDIO_STEREO_SWAPPED)
+ return -EINVAL;
+ itv->audio_stereo_mode = iarg;
+ ivtv_vapi(itv, CX2341X_DEC_SET_AUDIO_MODE, 2, itv->audio_bilingual_mode, itv->audio_stereo_mode);
+ return 0;
+
+ case AUDIO_BILINGUAL_CHANNEL_SELECT:
+ IVTV_DEBUG_IOCTL("AUDIO_BILINGUAL_CHANNEL_SELECT\n");
+ if (iarg > AUDIO_STEREO_SWAPPED)
+ return -EINVAL;
+ itv->audio_bilingual_mode = iarg;
+ ivtv_vapi(itv, CX2341X_DEC_SET_AUDIO_MODE, 2, itv->audio_bilingual_mode, itv->audio_stereo_mode);
+ return 0;
+
default:
return -EINVAL;
}
@@ -1755,6 +1817,10 @@ static long ivtv_default(struct file *file, void *fh, int cmd, void *arg)
case VIDEO_CONTINUE:
case VIDEO_COMMAND:
case VIDEO_TRY_COMMAND:
+ case VIDEO_SELECT_SOURCE:
+ case AUDIO_SET_MUTE:
+ case AUDIO_CHANNEL_SELECT:
+ case AUDIO_BILINGUAL_CHANNEL_SELECT:
return ivtv_decoder_ioctls(file, cmd, (void *)arg);
default:
@@ -1767,42 +1833,9 @@ static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp,
unsigned int cmd, unsigned long arg)
{
struct video_device *vfd = video_devdata(filp);
- struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data;
+ struct ivtv_open_id *id = fh2id(filp->private_data);
long ret;
- /* Filter dvb ioctls that cannot be handled by the v4l ioctl framework */
- switch (cmd) {
- case VIDEO_SELECT_SOURCE:
- IVTV_DEBUG_IOCTL("VIDEO_SELECT_SOURCE\n");
- if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
- return -EINVAL;
- return ivtv_passthrough_mode(itv, arg == VIDEO_SOURCE_DEMUX);
-
- case AUDIO_SET_MUTE:
- IVTV_DEBUG_IOCTL("AUDIO_SET_MUTE\n");
- itv->speed_mute_audio = arg;
- return 0;
-
- case AUDIO_CHANNEL_SELECT:
- IVTV_DEBUG_IOCTL("AUDIO_CHANNEL_SELECT\n");
- if (arg > AUDIO_STEREO_SWAPPED)
- return -EINVAL;
- itv->audio_stereo_mode = arg;
- ivtv_vapi(itv, CX2341X_DEC_SET_AUDIO_MODE, 2, itv->audio_bilingual_mode, itv->audio_stereo_mode);
- return 0;
-
- case AUDIO_BILINGUAL_CHANNEL_SELECT:
- IVTV_DEBUG_IOCTL("AUDIO_BILINGUAL_CHANNEL_SELECT\n");
- if (arg > AUDIO_STEREO_SWAPPED)
- return -EINVAL;
- itv->audio_bilingual_mode = arg;
- ivtv_vapi(itv, CX2341X_DEC_SET_AUDIO_MODE, 2, itv->audio_bilingual_mode, itv->audio_stereo_mode);
- return 0;
-
- default:
- break;
- }
-
/* check priority */
switch (cmd) {
case VIDIOC_S_CTRL:
@@ -1817,8 +1850,9 @@ static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp,
case VIDIOC_S_AUDOUT:
case VIDIOC_S_EXT_CTRLS:
case VIDIOC_S_FBUF:
+ case VIDIOC_S_PRIORITY:
case VIDIOC_OVERLAY:
- ret = v4l2_prio_check(&itv->prio, &id->prio);
+ ret = v4l2_prio_check(&itv->prio, id->prio);
if (ret)
return ret;
}
@@ -1832,10 +1866,13 @@ static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp,
long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data;
+ struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
long res;
+ /* DQEVENT can block, so this should not run with the serialize lock */
+ if (cmd == VIDIOC_DQEVENT)
+ return ivtv_serialized_ioctl(itv, filp, cmd, arg);
mutex_lock(&itv->serialize_lock);
res = ivtv_serialized_ioctl(itv, filp, cmd, arg);
mutex_unlock(&itv->serialize_lock);
@@ -1906,6 +1943,8 @@ static const struct v4l2_ioctl_ops ivtv_ioctl_ops = {
.vidioc_g_ext_ctrls = ivtv_g_ext_ctrls,
.vidioc_s_ext_ctrls = ivtv_s_ext_ctrls,
.vidioc_try_ext_ctrls = ivtv_try_ext_ctrls,
+ .vidioc_subscribe_event = ivtv_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
void ivtv_set_funcs(struct video_device *vdev)
diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c
index 12d36ca91d5..fea1ec33b0d 100644
--- a/drivers/media/video/ivtv/ivtv-irq.c
+++ b/drivers/media/video/ivtv/ivtv-irq.c
@@ -25,6 +25,7 @@
#include "ivtv-mailbox.h"
#include "ivtv-vbi.h"
#include "ivtv-yuv.h"
+#include <media/v4l2-event.h>
#define DMA_MAGIC_COOKIE 0x000001fe
@@ -752,7 +753,7 @@ static void ivtv_irq_vsync(struct ivtv *itv)
* to determine the line being displayed and ensure we handle
* one vsync per frame.
*/
- unsigned int frame = read_reg(0x28c0) & 1;
+ unsigned int frame = read_reg(IVTV_REG_DEC_LINE_FIELD) & 1;
struct yuv_playback_info *yi = &itv->yuv_info;
int last_dma_frame = atomic_read(&yi->next_dma_frame);
struct yuv_frame_info *f = &yi->new_frame_info[last_dma_frame];
@@ -778,6 +779,14 @@ static void ivtv_irq_vsync(struct ivtv *itv)
}
}
if (frame != (itv->last_vsync_field & 1)) {
+ static const struct v4l2_event evtop = {
+ .type = V4L2_EVENT_VSYNC,
+ .u.vsync.field = V4L2_FIELD_TOP,
+ };
+ static const struct v4l2_event evbottom = {
+ .type = V4L2_EVENT_VSYNC,
+ .u.vsync.field = V4L2_FIELD_BOTTOM,
+ };
struct ivtv_stream *s = ivtv_get_output_stream(itv);
itv->last_vsync_field += 1;
@@ -791,10 +800,12 @@ static void ivtv_irq_vsync(struct ivtv *itv)
if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
wake_up(&itv->event_waitq);
+ if (s)
+ wake_up(&s->waitq);
}
+ if (s && s->vdev)
+ v4l2_event_queue(s->vdev, frame ? &evtop : &evbottom);
wake_up(&itv->vsync_waitq);
- if (s)
- wake_up(&s->waitq);
/* Send VBI to saa7127 */
if (frame && (itv->output_mode == OUT_PASSTHROUGH ||
@@ -852,9 +863,11 @@ irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
*/
if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
/* vsync is enabled, see if we're in a new field */
- if ((itv->last_vsync_field & 1) != (read_reg(0x28c0) & 1)) {
+ if ((itv->last_vsync_field & 1) !=
+ (read_reg(IVTV_REG_DEC_LINE_FIELD) & 1)) {
/* New field, looks like we missed it */
- IVTV_DEBUG_YUV("VSync interrupt missed %d\n",read_reg(0x28c0)>>16);
+ IVTV_DEBUG_YUV("VSync interrupt missed %d\n",
+ read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16);
vsync_force = 1;
}
}
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index 1f9387f6ca2..de4288cc188 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -42,6 +42,7 @@
#include "ivtv-yuv.h"
#include "ivtv-cards.h"
#include "ivtv-streams.h"
+#include <media/v4l2-event.h>
static const struct v4l2_file_operations ivtv_v4l2_enc_fops = {
.owner = THIS_MODULE,
@@ -343,7 +344,10 @@ static void ivtv_vbi_setup(struct ivtv *itv)
ivtv_vapi(itv, CX2341X_ENC_SET_VBI_LINE, 5, 0xffff , 0, 0, 0, 0);
/* setup VBI registers */
- v4l2_subdev_call(itv->sd_video, video, s_fmt, &itv->vbi.in);
+ if (raw)
+ v4l2_subdev_call(itv->sd_video, vbi, s_raw_fmt, &itv->vbi.in.fmt.vbi);
+ else
+ v4l2_subdev_call(itv->sd_video, vbi, s_sliced_fmt, &itv->vbi.in.fmt.sliced);
/* determine number of lines and total number of VBI bytes.
A raw line takes 1443 bytes: 2 * 720 + 4 byte frame header - 1
@@ -581,10 +585,9 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
v4l2_subdev_call(itv->sd_audio, audio, s_stream, 1);
/* Avoid unpredictable PCI bus hang - disable video clocks */
v4l2_subdev_call(itv->sd_video, video, s_stream, 0);
- ivtv_msleep_timeout(150, 1);
+ ivtv_msleep_timeout(300, 1);
ivtv_vapi(itv, CX2341X_ENC_INITIALIZE_INPUT, 0);
v4l2_subdev_call(itv->sd_video, video, s_stream, 1);
- ivtv_msleep_timeout(150, 1);
}
/* begin_capture */
@@ -830,6 +833,10 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VIM_RST);
}
+ /* Raw-passthrough is implied on start. Make sure it's stopped so
+ the encoder will re-initialize when next started */
+ ivtv_vapi(itv, CX2341X_ENC_STOP_CAPTURE, 3, 1, 2, 7);
+
wake_up(&s->waitq);
return 0;
@@ -837,6 +844,9 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
int ivtv_stop_v4l2_decode_stream(struct ivtv_stream *s, int flags, u64 pts)
{
+ static const struct v4l2_event ev = {
+ .type = V4L2_EVENT_EOS,
+ };
struct ivtv *itv = s->itv;
if (s->vdev == NULL)
@@ -888,6 +898,7 @@ int ivtv_stop_v4l2_decode_stream(struct ivtv_stream *s, int flags, u64 pts)
set_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags);
wake_up(&itv->event_waitq);
+ v4l2_event_queue(s->vdev, &ev);
/* wake up wait queues */
wake_up(&s->waitq);
diff --git a/drivers/media/video/ivtv/ivtv-vbi.c b/drivers/media/video/ivtv/ivtv-vbi.c
index f420d31b937..e1c347e5ebd 100644
--- a/drivers/media/video/ivtv/ivtv-vbi.c
+++ b/drivers/media/video/ivtv/ivtv-vbi.c
@@ -38,7 +38,7 @@ static void ivtv_set_vps(struct ivtv *itv, int enabled)
data.data[9] = itv->vbi.vps_payload.data[2];
data.data[10] = itv->vbi.vps_payload.data[3];
data.data[11] = itv->vbi.vps_payload.data[4];
- ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_vbi_data, &data);
+ ivtv_call_hw(itv, IVTV_HW_SAA7127, vbi, s_vbi_data, &data);
}
static void ivtv_set_cc(struct ivtv *itv, int mode, const struct vbi_cc *cc)
@@ -52,12 +52,12 @@ static void ivtv_set_cc(struct ivtv *itv, int mode, const struct vbi_cc *cc)
data.line = (mode & 1) ? 21 : 0;
data.data[0] = cc->odd[0];
data.data[1] = cc->odd[1];
- ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_vbi_data, &data);
+ ivtv_call_hw(itv, IVTV_HW_SAA7127, vbi, s_vbi_data, &data);
data.field = 1;
data.line = (mode & 2) ? 21 : 0;
data.data[0] = cc->even[0];
data.data[1] = cc->even[1];
- ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_vbi_data, &data);
+ ivtv_call_hw(itv, IVTV_HW_SAA7127, vbi, s_vbi_data, &data);
}
static void ivtv_set_wss(struct ivtv *itv, int enabled, int mode)
@@ -80,7 +80,7 @@ static void ivtv_set_wss(struct ivtv *itv, int enabled, int mode)
data.line = enabled ? 23 : 0;
data.data[0] = mode & 0xff;
data.data[1] = (mode >> 8) & 0xff;
- ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_vbi_data, &data);
+ ivtv_call_hw(itv, IVTV_HW_SAA7127, vbi, s_vbi_data, &data);
}
static int odd_parity(u8 c)
@@ -134,7 +134,7 @@ void ivtv_write_vbi(struct ivtv *itv, const struct v4l2_sliced_vbi_data *sliced,
}
}
}
- if (found_cc && vi->cc_payload_idx < sizeof(vi->cc_payload)) {
+ if (found_cc && vi->cc_payload_idx < ARRAY_SIZE(vi->cc_payload)) {
vi->cc_payload[vi->cc_payload_idx++] = cc;
set_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags);
}
@@ -316,7 +316,7 @@ static u32 compress_sliced_buf(struct ivtv *itv, u32 line, u8 *buf, u32 size, u8
continue;
}
vbi.p = p + 4;
- v4l2_subdev_call(itv->sd_video, video, decode_vbi_line, &vbi);
+ v4l2_subdev_call(itv->sd_video, vbi, decode_vbi_line, &vbi);
if (vbi.type && !(lines & (1 << vbi.line))) {
lines |= 1 << vbi.line;
itv->vbi.sliced_data[line].id = vbi.type;
@@ -440,7 +440,7 @@ void ivtv_vbi_work_handler(struct ivtv *itv)
data.id = V4L2_SLICED_WSS_625;
data.field = 0;
- if (v4l2_subdev_call(itv->sd_video, video, g_vbi_data, &data) == 0) {
+ if (v4l2_subdev_call(itv->sd_video, vbi, g_vbi_data, &data) == 0) {
ivtv_set_wss(itv, 1, data.data[0] & 0xf);
vi->wss_missing_cnt = 0;
} else if (vi->wss_missing_cnt == 4) {
@@ -454,13 +454,13 @@ void ivtv_vbi_work_handler(struct ivtv *itv)
data.id = V4L2_SLICED_CAPTION_525;
data.field = 0;
- if (v4l2_subdev_call(itv->sd_video, video, g_vbi_data, &data) == 0) {
+ if (v4l2_subdev_call(itv->sd_video, vbi, g_vbi_data, &data) == 0) {
mode |= 1;
cc.odd[0] = data.data[0];
cc.odd[1] = data.data[1];
}
data.field = 1;
- if (v4l2_subdev_call(itv->sd_video, video, g_vbi_data, &data) == 0) {
+ if (v4l2_subdev_call(itv->sd_video, vbi, g_vbi_data, &data) == 0) {
mode |= 2;
cc.even[0] = data.data[0];
cc.even[1] = data.data[1];
diff --git a/drivers/media/video/ivtv/ivtvfb.c b/drivers/media/video/ivtv/ivtvfb.c
index de2ff1c6ac3..49e1a283ed3 100644
--- a/drivers/media/video/ivtv/ivtvfb.c
+++ b/drivers/media/video/ivtv/ivtvfb.c
@@ -460,7 +460,7 @@ static int ivtvfb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long ar
vblank.flags = FB_VBLANK_HAVE_COUNT |FB_VBLANK_HAVE_VCOUNT |
FB_VBLANK_HAVE_VSYNC;
- trace = read_reg(0x028c0) >> 16;
+ trace = read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16;
if (itv->is_50hz && trace > 312)
trace -= 312;
else if (itv->is_60hz && trace > 262)
diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c
new file mode 100644
index 00000000000..554eaf14012
--- /dev/null
+++ b/drivers/media/video/mem2mem_testdev.c
@@ -0,0 +1,1052 @@
+/*
+ * A virtual v4l2-mem2mem example device.
+ *
+ * This is a virtual device driver for testing mem-to-mem videobuf framework.
+ * It simulates a device that uses memory buffers for both source and
+ * destination, processes the data and issues an "irq" (simulated by a timer).
+ * The device is capable of multi-instance, multi-buffer-per-transaction
+ * operation (via the mem2mem framework).
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <p.osciak@samsung.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/version.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <linux/platform_device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf-vmalloc.h>
+
+#define MEM2MEM_TEST_MODULE_NAME "mem2mem-testdev"
+
+MODULE_DESCRIPTION("Virtual device for mem2mem framework testing");
+MODULE_AUTHOR("Pawel Osciak, <p.osciak@samsung.com>");
+MODULE_LICENSE("GPL");
+
+
+#define MIN_W 32
+#define MIN_H 32
+#define MAX_W 640
+#define MAX_H 480
+#define DIM_ALIGN_MASK 0x08 /* 8-alignment for dimensions */
+
+/* Flags that indicate a format can be used for capture/output */
+#define MEM2MEM_CAPTURE (1 << 0)
+#define MEM2MEM_OUTPUT (1 << 1)
+
+#define MEM2MEM_NAME "m2m-testdev"
+
+/* Per queue */
+#define MEM2MEM_DEF_NUM_BUFS VIDEO_MAX_FRAME
+/* In bytes, per queue */
+#define MEM2MEM_VID_MEM_LIMIT (16 * 1024 * 1024)
+
+/* Default transaction time in msec */
+#define MEM2MEM_DEF_TRANSTIME 1000
+/* Default number of buffers per transaction */
+#define MEM2MEM_DEF_TRANSLEN 1
+#define MEM2MEM_COLOR_STEP (0xff >> 4)
+#define MEM2MEM_NUM_TILES 8
+
+#define dprintk(dev, fmt, arg...) \
+ v4l2_dbg(1, 1, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg)
+
+
+void m2mtest_dev_release(struct device *dev)
+{}
+
+static struct platform_device m2mtest_pdev = {
+ .name = MEM2MEM_NAME,
+ .dev.release = m2mtest_dev_release,
+};
+
+struct m2mtest_fmt {
+ char *name;
+ u32 fourcc;
+ int depth;
+ /* Types the format can be used for */
+ u32 types;
+};
+
+static struct m2mtest_fmt formats[] = {
+ {
+ .name = "RGB565 (BE)",
+ .fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
+ .depth = 16,
+ /* Both capture and output format */
+ .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT,
+ },
+ {
+ .name = "4:2:2, packed, YUYV",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = 16,
+ /* Output-only format */
+ .types = MEM2MEM_OUTPUT,
+ },
+};
+
+/* Per-queue, driver-specific private data */
+struct m2mtest_q_data {
+ unsigned int width;
+ unsigned int height;
+ unsigned int sizeimage;
+ struct m2mtest_fmt *fmt;
+};
+
+enum {
+ V4L2_M2M_SRC = 0,
+ V4L2_M2M_DST = 1,
+};
+
+/* Source and destination queue data */
+static struct m2mtest_q_data q_data[2];
+
+static struct m2mtest_q_data *get_q_data(enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &q_data[V4L2_M2M_SRC];
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &q_data[V4L2_M2M_DST];
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+#define V4L2_CID_TRANS_TIME_MSEC V4L2_CID_PRIVATE_BASE
+#define V4L2_CID_TRANS_NUM_BUFS (V4L2_CID_PRIVATE_BASE + 1)
+
+static struct v4l2_queryctrl m2mtest_ctrls[] = {
+ {
+ .id = V4L2_CID_TRANS_TIME_MSEC,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Transaction time (msec)",
+ .minimum = 1,
+ .maximum = 10000,
+ .step = 100,
+ .default_value = 1000,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_TRANS_NUM_BUFS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Buffers per transaction",
+ .minimum = 1,
+ .maximum = MEM2MEM_DEF_NUM_BUFS,
+ .step = 1,
+ .default_value = 1,
+ .flags = 0,
+ },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(formats)
+
+static struct m2mtest_fmt *find_format(struct v4l2_format *f)
+{
+ struct m2mtest_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < NUM_FORMATS; k++) {
+ fmt = &formats[k];
+ if (fmt->fourcc == f->fmt.pix.pixelformat)
+ break;
+ }
+
+ if (k == NUM_FORMATS)
+ return NULL;
+
+ return &formats[k];
+}
+
+struct m2mtest_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device *vfd;
+
+ atomic_t num_inst;
+ struct mutex dev_mutex;
+ spinlock_t irqlock;
+
+ struct timer_list timer;
+
+ struct v4l2_m2m_dev *m2m_dev;
+};
+
+struct m2mtest_ctx {
+ struct m2mtest_dev *dev;
+
+ /* Processed buffers in this transaction */
+ u8 num_processed;
+
+ /* Transaction length (i.e. how many buffers per transaction) */
+ u32 translen;
+ /* Transaction time (i.e. simulated processing time) in milliseconds */
+ u32 transtime;
+
+ /* Abort requested by m2m */
+ int aborting;
+
+ struct v4l2_m2m_ctx *m2m_ctx;
+};
+
+struct m2mtest_buffer {
+ /* vb must be first! */
+ struct videobuf_buffer vb;
+};
+
+static struct v4l2_queryctrl *get_ctrl(int id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(m2mtest_ctrls); ++i) {
+ if (id == m2mtest_ctrls[i].id)
+ return &m2mtest_ctrls[i];
+ }
+
+ return NULL;
+}
+
+static int device_process(struct m2mtest_ctx *ctx,
+ struct m2mtest_buffer *in_buf,
+ struct m2mtest_buffer *out_buf)
+{
+ struct m2mtest_dev *dev = ctx->dev;
+ u8 *p_in, *p_out;
+ int x, y, t, w;
+ int tile_w, bytes_left;
+ struct videobuf_queue *src_q;
+ struct videobuf_queue *dst_q;
+
+ src_q = v4l2_m2m_get_src_vq(ctx->m2m_ctx);
+ dst_q = v4l2_m2m_get_dst_vq(ctx->m2m_ctx);
+ p_in = videobuf_queue_to_vaddr(src_q, &in_buf->vb);
+ p_out = videobuf_queue_to_vaddr(dst_q, &out_buf->vb);
+ if (!p_in || !p_out) {
+ v4l2_err(&dev->v4l2_dev,
+ "Acquiring kernel pointers to buffers failed\n");
+ return -EFAULT;
+ }
+
+ if (in_buf->vb.size < out_buf->vb.size) {
+ v4l2_err(&dev->v4l2_dev, "Output buffer is too small\n");
+ return -EINVAL;
+ }
+
+ tile_w = (in_buf->vb.width * (q_data[V4L2_M2M_DST].fmt->depth >> 3))
+ / MEM2MEM_NUM_TILES;
+ bytes_left = in_buf->vb.bytesperline - tile_w * MEM2MEM_NUM_TILES;
+ w = 0;
+
+ for (y = 0; y < in_buf->vb.height; ++y) {
+ for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
+ if (w & 0x1) {
+ for (x = 0; x < tile_w; ++x)
+ *p_out++ = *p_in++ + MEM2MEM_COLOR_STEP;
+ } else {
+ for (x = 0; x < tile_w; ++x)
+ *p_out++ = *p_in++ - MEM2MEM_COLOR_STEP;
+ }
+ ++w;
+ }
+ p_in += bytes_left;
+ p_out += bytes_left;
+ }
+
+ return 0;
+}
+
+static void schedule_irq(struct m2mtest_dev *dev, int msec_timeout)
+{
+ dprintk(dev, "Scheduling a simulated irq\n");
+ mod_timer(&dev->timer, jiffies + msecs_to_jiffies(msec_timeout));
+}
+
+/*
+ * mem2mem callbacks
+ */
+
+/**
+ * job_ready() - check whether an instance is ready to be scheduled to run
+ */
+static int job_ready(void *priv)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ if (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) < ctx->translen
+ || v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < ctx->translen) {
+ dprintk(ctx->dev, "Not enough buffers available\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static void job_abort(void *priv)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ /* Will cancel the transaction in the next interrupt handler */
+ ctx->aborting = 1;
+}
+
+/* device_run() - prepares and starts the device
+ *
+ * This simulates all the immediate preparations required before starting
+ * a device. This will be called by the framework when it decides to schedule
+ * a particular instance.
+ */
+static void device_run(void *priv)
+{
+ struct m2mtest_ctx *ctx = priv;
+ struct m2mtest_dev *dev = ctx->dev;
+ struct m2mtest_buffer *src_buf, *dst_buf;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+
+ device_process(ctx, src_buf, dst_buf);
+
+ /* Run a timer, which simulates a hardware irq */
+ schedule_irq(dev, ctx->transtime);
+}
+
+
+static void device_isr(unsigned long priv)
+{
+ struct m2mtest_dev *m2mtest_dev = (struct m2mtest_dev *)priv;
+ struct m2mtest_ctx *curr_ctx;
+ struct m2mtest_buffer *src_buf, *dst_buf;
+ unsigned long flags;
+
+ curr_ctx = v4l2_m2m_get_curr_priv(m2mtest_dev->m2m_dev);
+
+ if (NULL == curr_ctx) {
+ printk(KERN_ERR
+ "Instance released before the end of transaction\n");
+ return;
+ }
+
+ src_buf = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
+ curr_ctx->num_processed++;
+
+ if (curr_ctx->num_processed == curr_ctx->translen
+ || curr_ctx->aborting) {
+ dprintk(curr_ctx->dev, "Finishing transaction\n");
+ curr_ctx->num_processed = 0;
+ spin_lock_irqsave(&m2mtest_dev->irqlock, flags);
+ src_buf->vb.state = dst_buf->vb.state = VIDEOBUF_DONE;
+ wake_up(&src_buf->vb.done);
+ wake_up(&dst_buf->vb.done);
+ spin_unlock_irqrestore(&m2mtest_dev->irqlock, flags);
+ v4l2_m2m_job_finish(m2mtest_dev->m2m_dev, curr_ctx->m2m_ctx);
+ } else {
+ spin_lock_irqsave(&m2mtest_dev->irqlock, flags);
+ src_buf->vb.state = dst_buf->vb.state = VIDEOBUF_DONE;
+ wake_up(&src_buf->vb.done);
+ wake_up(&dst_buf->vb.done);
+ spin_unlock_irqrestore(&m2mtest_dev->irqlock, flags);
+ device_run(curr_ctx);
+ }
+}
+
+
+/*
+ * video ioctls
+ */
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1);
+ strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1);
+ cap->bus_info[0] = 0;
+ cap->version = KERNEL_VERSION(0, 1, 0);
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
+ | V4L2_CAP_STREAMING;
+
+ return 0;
+}
+
+static int enum_fmt(struct v4l2_fmtdesc *f, u32 type)
+{
+ int i, num;
+ struct m2mtest_fmt *fmt;
+
+ num = 0;
+
+ for (i = 0; i < NUM_FORMATS; ++i) {
+ if (formats[i].types & type) {
+ /* index-th format of type type found ? */
+ if (num == f->index)
+ break;
+ /* Correct type but haven't reached our index yet,
+ * just increment per-type index */
+ ++num;
+ }
+ }
+
+ if (i < NUM_FORMATS) {
+ /* Format found */
+ fmt = &formats[i];
+ strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+ f->pixelformat = fmt->fourcc;
+ return 0;
+ }
+
+ /* Format not found */
+ return -EINVAL;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return enum_fmt(f, MEM2MEM_CAPTURE);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return enum_fmt(f, MEM2MEM_OUTPUT);
+}
+
+static int vidioc_g_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
+{
+ struct videobuf_queue *vq;
+ struct m2mtest_q_data *q_data;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(f->type);
+
+ f->fmt.pix.width = q_data->width;
+ f->fmt.pix.height = q_data->height;
+ f->fmt.pix.field = vq->field;
+ f->fmt.pix.pixelformat = q_data->fmt->fourcc;
+ f->fmt.pix.bytesperline = (q_data->width * q_data->fmt->depth) >> 3;
+ f->fmt.pix.sizeimage = q_data->sizeimage;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return vidioc_g_fmt(priv, f);
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return vidioc_g_fmt(priv, f);
+}
+
+static int vidioc_try_fmt(struct v4l2_format *f, struct m2mtest_fmt *fmt)
+{
+ enum v4l2_field field;
+
+ field = f->fmt.pix.field;
+
+ if (field == V4L2_FIELD_ANY)
+ field = V4L2_FIELD_NONE;
+ else if (V4L2_FIELD_NONE != field)
+ return -EINVAL;
+
+ /* V4L2 specification suggests the driver corrects the format struct
+ * if any of the dimensions is unsupported */
+ f->fmt.pix.field = field;
+
+ if (f->fmt.pix.height < MIN_H)
+ f->fmt.pix.height = MIN_H;
+ else if (f->fmt.pix.height > MAX_H)
+ f->fmt.pix.height = MAX_H;
+
+ if (f->fmt.pix.width < MIN_W)
+ f->fmt.pix.width = MIN_W;
+ else if (f->fmt.pix.width > MAX_W)
+ f->fmt.pix.width = MAX_W;
+
+ f->fmt.pix.width &= ~DIM_ALIGN_MASK;
+ f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
+ f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct m2mtest_fmt *fmt;
+ struct m2mtest_ctx *ctx = priv;
+
+ fmt = find_format(f);
+ if (!fmt || !(fmt->types & MEM2MEM_CAPTURE)) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "Fourcc format (0x%08x) invalid.\n",
+ f->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+
+ return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct m2mtest_fmt *fmt;
+ struct m2mtest_ctx *ctx = priv;
+
+ fmt = find_format(f);
+ if (!fmt || !(fmt->types & MEM2MEM_OUTPUT)) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "Fourcc format (0x%08x) invalid.\n",
+ f->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+
+ return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_s_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
+{
+ struct m2mtest_q_data *q_data;
+ struct videobuf_queue *vq;
+ int ret = 0;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ mutex_lock(&vq->vb_lock);
+
+ if (videobuf_queue_is_busy(vq)) {
+ v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ q_data->fmt = find_format(f);
+ q_data->width = f->fmt.pix.width;
+ q_data->height = f->fmt.pix.height;
+ q_data->sizeimage = q_data->width * q_data->height
+ * q_data->fmt->depth >> 3;
+ vq->field = f->fmt.pix.field;
+
+ dprintk(ctx->dev,
+ "Setting format for type %d, wxh: %dx%d, fmt: %d\n",
+ f->type, q_data->width, q_data->height, q_data->fmt->fourcc);
+
+out:
+ mutex_unlock(&vq->vb_lock);
+ return ret;
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret;
+
+ ret = vidioc_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ return vidioc_s_fmt(priv, f);
+}
+
+static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret;
+
+ ret = vidioc_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ return vidioc_s_fmt(priv, f);
+}
+
+static int vidioc_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int vidioc_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static int vidioc_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+}
+
+static int vidioc_queryctrl(struct file *file, void *priv,
+ struct v4l2_queryctrl *qc)
+{
+ struct v4l2_queryctrl *c;
+
+ c = get_ctrl(qc->id);
+ if (!c)
+ return -EINVAL;
+
+ *qc = *c;
+ return 0;
+}
+
+static int vidioc_g_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ switch (ctrl->id) {
+ case V4L2_CID_TRANS_TIME_MSEC:
+ ctrl->value = ctx->transtime;
+ break;
+
+ case V4L2_CID_TRANS_NUM_BUFS:
+ ctrl->value = ctx->translen;
+ break;
+
+ default:
+ v4l2_err(&ctx->dev->v4l2_dev, "Invalid control\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int check_ctrl_val(struct m2mtest_ctx *ctx, struct v4l2_control *ctrl)
+{
+ struct v4l2_queryctrl *c;
+
+ c = get_ctrl(ctrl->id);
+ if (!c)
+ return -EINVAL;
+
+ if (ctrl->value < c->minimum || ctrl->value > c->maximum) {
+ v4l2_err(&ctx->dev->v4l2_dev, "Value out of range\n");
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static int vidioc_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct m2mtest_ctx *ctx = priv;
+ int ret = 0;
+
+ ret = check_ctrl_val(ctx, ctrl);
+ if (ret != 0)
+ return ret;
+
+ switch (ctrl->id) {
+ case V4L2_CID_TRANS_TIME_MSEC:
+ ctx->transtime = ctrl->value;
+ break;
+
+ case V4L2_CID_TRANS_NUM_BUFS:
+ ctx->translen = ctrl->value;
+ break;
+
+ default:
+ v4l2_err(&ctx->dev->v4l2_dev, "Invalid control\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static const struct v4l2_ioctl_ops m2mtest_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
+
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+
+ .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_s_ctrl = vidioc_s_ctrl,
+};
+
+
+/*
+ * Queue operations
+ */
+
+static void m2mtest_buf_release(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct m2mtest_ctx *ctx = vq->priv_data;
+
+ dprintk(ctx->dev, "type: %d, index: %d, state: %d\n",
+ vq->type, vb->i, vb->state);
+
+ videobuf_vmalloc_free(vb);
+ vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static int m2mtest_buf_setup(struct videobuf_queue *vq, unsigned int *count,
+ unsigned int *size)
+{
+ struct m2mtest_ctx *ctx = vq->priv_data;
+ struct m2mtest_q_data *q_data;
+
+ q_data = get_q_data(vq->type);
+
+ *size = q_data->width * q_data->height * q_data->fmt->depth >> 3;
+ dprintk(ctx->dev, "size:%d, w/h %d/%d, depth: %d\n",
+ *size, q_data->width, q_data->height, q_data->fmt->depth);
+
+ if (0 == *count)
+ *count = MEM2MEM_DEF_NUM_BUFS;
+
+ while (*size * *count > MEM2MEM_VID_MEM_LIMIT)
+ (*count)--;
+
+ v4l2_info(&ctx->dev->v4l2_dev,
+ "%d buffers of size %d set up.\n", *count, *size);
+
+ return 0;
+}
+
+static int m2mtest_buf_prepare(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct m2mtest_ctx *ctx = vq->priv_data;
+ struct m2mtest_q_data *q_data;
+ int ret;
+
+ dprintk(ctx->dev, "type: %d, index: %d, state: %d\n",
+ vq->type, vb->i, vb->state);
+
+ q_data = get_q_data(vq->type);
+
+ if (vb->baddr) {
+ /* User-provided buffer */
+ if (vb->bsize < q_data->sizeimage) {
+ /* Buffer too small to fit a frame */
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "User-provided buffer too small\n");
+ return -EINVAL;
+ }
+ } else if (vb->state != VIDEOBUF_NEEDS_INIT
+ && vb->bsize < q_data->sizeimage) {
+ /* We provide the buffer, but it's already been initialized
+ * and is too small */
+ return -EINVAL;
+ }
+
+ vb->width = q_data->width;
+ vb->height = q_data->height;
+ vb->bytesperline = (q_data->width * q_data->fmt->depth) >> 3;
+ vb->size = q_data->sizeimage;
+ vb->field = field;
+
+ if (VIDEOBUF_NEEDS_INIT == vb->state) {
+ ret = videobuf_iolock(vq, vb, NULL);
+ if (ret) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "Iolock failed\n");
+ goto fail;
+ }
+ }
+
+ vb->state = VIDEOBUF_PREPARED;
+
+ return 0;
+fail:
+ m2mtest_buf_release(vq, vb);
+ return ret;
+}
+
+static void m2mtest_buf_queue(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct m2mtest_ctx *ctx = vq->priv_data;
+
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vq, vb);
+}
+
+static struct videobuf_queue_ops m2mtest_qops = {
+ .buf_setup = m2mtest_buf_setup,
+ .buf_prepare = m2mtest_buf_prepare,
+ .buf_queue = m2mtest_buf_queue,
+ .buf_release = m2mtest_buf_release,
+};
+
+static void queue_init(void *priv, struct videobuf_queue *vq,
+ enum v4l2_buf_type type)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ videobuf_queue_vmalloc_init(vq, &m2mtest_qops, ctx->dev->v4l2_dev.dev,
+ &ctx->dev->irqlock, type, V4L2_FIELD_NONE,
+ sizeof(struct m2mtest_buffer), priv);
+}
+
+
+/*
+ * File operations
+ */
+static int m2mtest_open(struct file *file)
+{
+ struct m2mtest_dev *dev = video_drvdata(file);
+ struct m2mtest_ctx *ctx = NULL;
+
+ ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ file->private_data = ctx;
+ ctx->dev = dev;
+ ctx->translen = MEM2MEM_DEF_TRANSLEN;
+ ctx->transtime = MEM2MEM_DEF_TRANSTIME;
+ ctx->num_processed = 0;
+
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(ctx, dev->m2m_dev, queue_init);
+ if (IS_ERR(ctx->m2m_ctx)) {
+ int ret = PTR_ERR(ctx->m2m_ctx);
+
+ kfree(ctx);
+ return ret;
+ }
+
+ atomic_inc(&dev->num_inst);
+
+ dprintk(dev, "Created instance %p, m2m_ctx: %p\n", ctx, ctx->m2m_ctx);
+
+ return 0;
+}
+
+static int m2mtest_release(struct file *file)
+{
+ struct m2mtest_dev *dev = video_drvdata(file);
+ struct m2mtest_ctx *ctx = file->private_data;
+
+ dprintk(dev, "Releasing instance %p\n", ctx);
+
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ kfree(ctx);
+
+ atomic_dec(&dev->num_inst);
+
+ return 0;
+}
+
+static unsigned int m2mtest_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct m2mtest_ctx *ctx = (struct m2mtest_ctx *)file->private_data;
+
+ return v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+}
+
+static int m2mtest_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct m2mtest_ctx *ctx = (struct m2mtest_ctx *)file->private_data;
+
+ return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
+}
+
+static const struct v4l2_file_operations m2mtest_fops = {
+ .owner = THIS_MODULE,
+ .open = m2mtest_open,
+ .release = m2mtest_release,
+ .poll = m2mtest_poll,
+ .ioctl = video_ioctl2,
+ .mmap = m2mtest_mmap,
+};
+
+static struct video_device m2mtest_videodev = {
+ .name = MEM2MEM_NAME,
+ .fops = &m2mtest_fops,
+ .ioctl_ops = &m2mtest_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release,
+};
+
+static struct v4l2_m2m_ops m2m_ops = {
+ .device_run = device_run,
+ .job_ready = job_ready,
+ .job_abort = job_abort,
+};
+
+static int m2mtest_probe(struct platform_device *pdev)
+{
+ struct m2mtest_dev *dev;
+ struct video_device *vfd;
+ int ret;
+
+ dev = kzalloc(sizeof *dev, GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->irqlock);
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret)
+ goto free_dev;
+
+ atomic_set(&dev->num_inst, 0);
+ mutex_init(&dev->dev_mutex);
+
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto unreg_dev;
+ }
+
+ *vfd = m2mtest_videodev;
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+ goto rel_vdev;
+ }
+
+ video_set_drvdata(vfd, dev);
+ snprintf(vfd->name, sizeof(vfd->name), "%s", m2mtest_videodev.name);
+ dev->vfd = vfd;
+ v4l2_info(&dev->v4l2_dev, MEM2MEM_TEST_MODULE_NAME
+ "Device registered as /dev/video%d\n", vfd->num);
+
+ setup_timer(&dev->timer, device_isr, (long)dev);
+ platform_set_drvdata(pdev, dev);
+
+ dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(dev->m2m_dev)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(dev->m2m_dev);
+ goto err_m2m;
+ }
+
+ return 0;
+
+err_m2m:
+ video_unregister_device(dev->vfd);
+rel_vdev:
+ video_device_release(vfd);
+unreg_dev:
+ v4l2_device_unregister(&dev->v4l2_dev);
+free_dev:
+ kfree(dev);
+
+ return ret;
+}
+
+static int m2mtest_remove(struct platform_device *pdev)
+{
+ struct m2mtest_dev *dev =
+ (struct m2mtest_dev *)platform_get_drvdata(pdev);
+
+ v4l2_info(&dev->v4l2_dev, "Removing " MEM2MEM_TEST_MODULE_NAME);
+ v4l2_m2m_release(dev->m2m_dev);
+ del_timer_sync(&dev->timer);
+ video_unregister_device(dev->vfd);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ kfree(dev);
+
+ return 0;
+}
+
+static struct platform_driver m2mtest_pdrv = {
+ .probe = m2mtest_probe,
+ .remove = m2mtest_remove,
+ .driver = {
+ .name = MEM2MEM_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static void __exit m2mtest_exit(void)
+{
+ platform_driver_unregister(&m2mtest_pdrv);
+ platform_device_unregister(&m2mtest_pdev);
+}
+
+static int __init m2mtest_init(void)
+{
+ int ret;
+
+ ret = platform_device_register(&m2mtest_pdev);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&m2mtest_pdrv);
+ if (ret)
+ platform_device_unregister(&m2mtest_pdev);
+
+ return 0;
+}
+
+module_init(m2mtest_init);
+module_exit(m2mtest_exit);
+
diff --git a/drivers/media/video/meye.c b/drivers/media/video/meye.c
index 4404e5ef818..2be23bccd3c 100644
--- a/drivers/media/video/meye.c
+++ b/drivers/media/video/meye.c
@@ -30,9 +30,10 @@
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/init.h>
-#include <linux/videodev.h>
#include <linux/gfp.h>
+#include <linux/videodev2.h>
#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -1168,22 +1169,22 @@ static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *c)
case V4L2_CID_BRIGHTNESS:
sony_pic_camera_command(
SONY_PIC_COMMAND_SETCAMERABRIGHTNESS, c->value);
- meye.picture.brightness = c->value << 10;
+ meye.brightness = c->value << 10;
break;
case V4L2_CID_HUE:
sony_pic_camera_command(
SONY_PIC_COMMAND_SETCAMERAHUE, c->value);
- meye.picture.hue = c->value << 10;
+ meye.hue = c->value << 10;
break;
case V4L2_CID_CONTRAST:
sony_pic_camera_command(
SONY_PIC_COMMAND_SETCAMERACONTRAST, c->value);
- meye.picture.contrast = c->value << 10;
+ meye.contrast = c->value << 10;
break;
case V4L2_CID_SATURATION:
sony_pic_camera_command(
SONY_PIC_COMMAND_SETCAMERACOLOR, c->value);
- meye.picture.colour = c->value << 10;
+ meye.colour = c->value << 10;
break;
case V4L2_CID_AGC:
sony_pic_camera_command(
@@ -1221,16 +1222,16 @@ static int vidioc_g_ctrl(struct file *file, void *fh, struct v4l2_control *c)
mutex_lock(&meye.lock);
switch (c->id) {
case V4L2_CID_BRIGHTNESS:
- c->value = meye.picture.brightness >> 10;
+ c->value = meye.brightness >> 10;
break;
case V4L2_CID_HUE:
- c->value = meye.picture.hue >> 10;
+ c->value = meye.hue >> 10;
break;
case V4L2_CID_CONTRAST:
- c->value = meye.picture.contrast >> 10;
+ c->value = meye.contrast >> 10;
break;
case V4L2_CID_SATURATION:
- c->value = meye.picture.colour >> 10;
+ c->value = meye.colour >> 10;
break;
case V4L2_CID_AGC:
c->value = meye.params.agc;
@@ -1729,6 +1730,7 @@ static int meye_resume(struct pci_dev *pdev)
static int __devinit meye_probe(struct pci_dev *pcidev,
const struct pci_device_id *ent)
{
+ struct v4l2_device *v4l2_dev = &meye.v4l2_dev;
int ret = -EBUSY;
unsigned long mchip_adr;
@@ -1737,70 +1739,75 @@ static int __devinit meye_probe(struct pci_dev *pcidev,
goto outnotdev;
}
+ ret = v4l2_device_register(&pcidev->dev, v4l2_dev);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
+ return ret;
+ }
ret = -ENOMEM;
meye.mchip_dev = pcidev;
- meye.video_dev = video_device_alloc();
- if (!meye.video_dev) {
- printk(KERN_ERR "meye: video_device_alloc() failed!\n");
+ meye.vdev = video_device_alloc();
+ if (!meye.vdev) {
+ v4l2_err(v4l2_dev, "video_device_alloc() failed!\n");
goto outnotdev;
}
meye.grab_temp = vmalloc(MCHIP_NB_PAGES_MJPEG * PAGE_SIZE);
if (!meye.grab_temp) {
- printk(KERN_ERR "meye: grab buffer allocation failed\n");
+ v4l2_err(v4l2_dev, "grab buffer allocation failed\n");
goto outvmalloc;
}
spin_lock_init(&meye.grabq_lock);
if (kfifo_alloc(&meye.grabq, sizeof(int) * MEYE_MAX_BUFNBRS,
GFP_KERNEL)) {
- printk(KERN_ERR "meye: fifo allocation failed\n");
+ v4l2_err(v4l2_dev, "fifo allocation failed\n");
goto outkfifoalloc1;
}
spin_lock_init(&meye.doneq_lock);
if (kfifo_alloc(&meye.doneq, sizeof(int) * MEYE_MAX_BUFNBRS,
GFP_KERNEL)) {
- printk(KERN_ERR "meye: fifo allocation failed\n");
+ v4l2_err(v4l2_dev, "fifo allocation failed\n");
goto outkfifoalloc2;
}
- memcpy(meye.video_dev, &meye_template, sizeof(meye_template));
- meye.video_dev->parent = &meye.mchip_dev->dev;
+ memcpy(meye.vdev, &meye_template, sizeof(meye_template));
+ meye.vdev->v4l2_dev = &meye.v4l2_dev;
ret = -EIO;
if ((ret = sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 1))) {
- printk(KERN_ERR "meye: unable to power on the camera\n");
- printk(KERN_ERR "meye: did you enable the camera in "
+ v4l2_err(v4l2_dev, "meye: unable to power on the camera\n");
+ v4l2_err(v4l2_dev, "meye: did you enable the camera in "
"sonypi using the module options ?\n");
goto outsonypienable;
}
if ((ret = pci_enable_device(meye.mchip_dev))) {
- printk(KERN_ERR "meye: pci_enable_device failed\n");
+ v4l2_err(v4l2_dev, "meye: pci_enable_device failed\n");
goto outenabledev;
}
mchip_adr = pci_resource_start(meye.mchip_dev,0);
if (!mchip_adr) {
- printk(KERN_ERR "meye: mchip has no device base address\n");
+ v4l2_err(v4l2_dev, "meye: mchip has no device base address\n");
goto outregions;
}
if (!request_mem_region(pci_resource_start(meye.mchip_dev, 0),
pci_resource_len(meye.mchip_dev, 0),
"meye")) {
- printk(KERN_ERR "meye: request_mem_region failed\n");
+ v4l2_err(v4l2_dev, "meye: request_mem_region failed\n");
goto outregions;
}
meye.mchip_mmregs = ioremap(mchip_adr, MCHIP_MM_REGS);
if (!meye.mchip_mmregs) {
- printk(KERN_ERR "meye: ioremap failed\n");
+ v4l2_err(v4l2_dev, "meye: ioremap failed\n");
goto outremap;
}
meye.mchip_irq = pcidev->irq;
if (request_irq(meye.mchip_irq, meye_irq,
IRQF_DISABLED | IRQF_SHARED, "meye", meye_irq)) {
- printk(KERN_ERR "meye: request_irq failed\n");
+ v4l2_err(v4l2_dev, "request_irq failed\n");
goto outreqirq;
}
@@ -1824,21 +1831,18 @@ static int __devinit meye_probe(struct pci_dev *pcidev,
msleep(1);
mchip_set(MCHIP_MM_INTA, MCHIP_MM_INTA_HIC_1_MASK);
- if (video_register_device(meye.video_dev, VFL_TYPE_GRABBER,
+ if (video_register_device(meye.vdev, VFL_TYPE_GRABBER,
video_nr) < 0) {
- printk(KERN_ERR "meye: video_register_device failed\n");
+ v4l2_err(v4l2_dev, "video_register_device failed\n");
goto outvideoreg;
}
mutex_init(&meye.lock);
init_waitqueue_head(&meye.proc_list);
- meye.picture.depth = 16;
- meye.picture.palette = VIDEO_PALETTE_YUV422;
- meye.picture.brightness = 32 << 10;
- meye.picture.hue = 32 << 10;
- meye.picture.colour = 32 << 10;
- meye.picture.contrast = 32 << 10;
- meye.picture.whiteness = 0;
+ meye.brightness = 32 << 10;
+ meye.hue = 32 << 10;
+ meye.colour = 32 << 10;
+ meye.contrast = 32 << 10;
meye.params.subsample = 0;
meye.params.quality = 8;
meye.params.sharpness = 32;
@@ -1854,9 +1858,9 @@ static int __devinit meye_probe(struct pci_dev *pcidev,
sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERAPICTURE, 0);
sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERAAGC, 48);
- printk(KERN_INFO "meye: Motion Eye Camera Driver v%s.\n",
+ v4l2_info(v4l2_dev, "Motion Eye Camera Driver v%s.\n",
MEYE_DRIVER_VERSION);
- printk(KERN_INFO "meye: mchip KL5A72002 rev. %d, base %lx, irq %d\n",
+ v4l2_info(v4l2_dev, "mchip KL5A72002 rev. %d, base %lx, irq %d\n",
meye.mchip_dev->revision, mchip_adr, meye.mchip_irq);
return 0;
@@ -1879,14 +1883,14 @@ outkfifoalloc2:
outkfifoalloc1:
vfree(meye.grab_temp);
outvmalloc:
- video_device_release(meye.video_dev);
+ video_device_release(meye.vdev);
outnotdev:
return ret;
}
static void __devexit meye_remove(struct pci_dev *pcidev)
{
- video_unregister_device(meye.video_dev);
+ video_unregister_device(meye.vdev);
mchip_hic_stop();
diff --git a/drivers/media/video/meye.h b/drivers/media/video/meye.h
index 1321ad5d659..4bdeb03f164 100644
--- a/drivers/media/video/meye.h
+++ b/drivers/media/video/meye.h
@@ -31,7 +31,7 @@
#define _MEYE_PRIV_H_
#define MEYE_DRIVER_MAJORVERSION 1
-#define MEYE_DRIVER_MINORVERSION 13
+#define MEYE_DRIVER_MINORVERSION 14
#define MEYE_DRIVER_VERSION __stringify(MEYE_DRIVER_MAJORVERSION) "." \
__stringify(MEYE_DRIVER_MINORVERSION)
@@ -289,6 +289,7 @@ struct meye_grab_buffer {
/* Motion Eye device structure */
struct meye {
+ struct v4l2_device v4l2_dev; /* Main v4l2_device struct */
struct pci_dev *mchip_dev; /* pci device */
u8 mchip_irq; /* irq */
u8 mchip_mode; /* actual mchip mode: HIC_MODE... */
@@ -308,8 +309,11 @@ struct meye {
struct kfifo doneq; /* queue for grabbed buffers */
spinlock_t doneq_lock; /* lock protecting the queue */
wait_queue_head_t proc_list; /* wait queue */
- struct video_device *video_dev; /* video device parameters */
- struct video_picture picture; /* video picture parameters */
+ struct video_device *vdev; /* video device parameters */
+ u16 brightness;
+ u16 hue;
+ u16 contrast;
+ u16 colour;
struct meye_params params; /* additional parameters */
unsigned long in_use; /* set to 1 if the device is in use */
#ifdef CONFIG_PM
diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c
index a9061bff79b..78b4e091d2d 100644
--- a/drivers/media/video/mt9t031.c
+++ b/drivers/media/video/mt9t031.c
@@ -8,14 +8,16 @@
* published by the Free Software Foundation.
*/
-#include <linux/videodev2.h>
-#include <linux/slab.h>
+#include <linux/device.h>
#include <linux/i2c.h>
#include <linux/log2.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
-#include <media/v4l2-subdev.h>
-#include <media/v4l2-chip-ident.h>
#include <media/soc_camera.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-subdev.h>
/*
* mt9t031 i2c address 0x5d
@@ -681,12 +683,66 @@ static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
}
/*
+ * Power Management:
+ * This function does nothing for now but must be present for pm to work
+ */
+static int mt9t031_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+/*
+ * Power Management:
+ * COLUMN_ADDRESS_MODE and ROW_ADDRESS_MODE are not rewritten if unchanged
+ * they are however changed at reset if the platform hook is present
+ * thus we rewrite them with the values stored by the driver
+ */
+static int mt9t031_runtime_resume(struct device *dev)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct soc_camera_device *icd = container_of(vdev->parent,
+ struct soc_camera_device, dev);
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct i2c_client *client = sd->priv;
+ struct mt9t031 *mt9t031 = to_mt9t031(client);
+
+ int ret;
+ u16 xbin, ybin;
+
+ xbin = min(mt9t031->xskip, (u16)3);
+ ybin = min(mt9t031->yskip, (u16)3);
+
+ ret = reg_write(client, MT9T031_COLUMN_ADDRESS_MODE,
+ ((xbin - 1) << 4) | (mt9t031->xskip - 1));
+ if (ret < 0)
+ return ret;
+
+ ret = reg_write(client, MT9T031_ROW_ADDRESS_MODE,
+ ((ybin - 1) << 4) | (mt9t031->yskip - 1));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static struct dev_pm_ops mt9t031_dev_pm_ops = {
+ .runtime_suspend = mt9t031_runtime_suspend,
+ .runtime_resume = mt9t031_runtime_resume,
+};
+
+static struct device_type mt9t031_dev_type = {
+ .name = "MT9T031",
+ .pm = &mt9t031_dev_pm_ops,
+};
+
+/*
* Interface active, can use i2c. If it fails, it can indeed mean, that
* this wasn't our capture interface, so, we wait for the right one
*/
static int mt9t031_video_probe(struct i2c_client *client)
{
struct mt9t031 *mt9t031 = to_mt9t031(client);
+ struct video_device *vdev = soc_camera_i2c_to_vdev(client);
s32 data;
int ret;
@@ -712,6 +768,8 @@ static int mt9t031_video_probe(struct i2c_client *client)
ret = mt9t031_idle(client);
if (ret < 0)
dev_err(&client->dev, "Failed to initialise the camera\n");
+ else
+ vdev->dev.type = &mt9t031_dev_type;
/* mt9t031_idle() has reset the chip to default. */
mt9t031->exposure = 255;
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c
index 1a34d2993e9..e5bae4c9393 100644
--- a/drivers/media/video/mt9v022.c
+++ b/drivers/media/video/mt9v022.c
@@ -325,7 +325,7 @@ static int mt9v022_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
if (ret < 0)
return ret;
- dev_dbg(&client->dev, "Frame %ux%u pixel\n", rect.width, rect.height);
+ dev_dbg(&client->dev, "Frame %dx%d pixel\n", rect.width, rect.height);
mt9v022->rect = rect;
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index 34a66019190..5c17f9ec3d7 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -139,8 +139,8 @@ static int mx1_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
if (!*count)
*count = 32;
- while (*size * *count > MAX_VIDEO_MEM * 1024 * 1024)
- (*count)--;
+ if (*size * *count > MAX_VIDEO_MEM * 1024 * 1024)
+ *count = (MAX_VIDEO_MEM * 1024 * 1024) / *size;
dev_dbg(icd->dev.parent, "count=%d, size=%d\n", *count, *size);
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index bd297f567dc..d477e305800 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -796,7 +796,7 @@ static int acquire_dma_channel(struct mx3_camera_dev *mx3_cam)
* FIXME: learn to use stride != width, then we can keep stride properly aligned
* and support arbitrary (even) widths.
*/
-static inline void stride_align(__s32 *width)
+static inline void stride_align(__u32 *width)
{
if (((*width + 7) & ~7) < 4096)
*width = (*width + 7) & ~7;
@@ -844,7 +844,7 @@ static int mx3_camera_set_crop(struct soc_camera_device *icd,
* So far only direct camera-to-memory is supported
*/
if (channel_change_requested(icd, rect)) {
- int ret = acquire_dma_channel(mx3_cam);
+ ret = acquire_dma_channel(mx3_cam);
if (ret < 0)
return ret;
}
diff --git a/drivers/media/video/omap/Kconfig b/drivers/media/video/omap/Kconfig
new file mode 100644
index 00000000000..97c53949ca8
--- /dev/null
+++ b/drivers/media/video/omap/Kconfig
@@ -0,0 +1,11 @@
+config VIDEO_OMAP2_VOUT
+ tristate "OMAP2/OMAP3 V4L2-Display driver"
+ depends on ARCH_OMAP24XX || ARCH_OMAP34XX
+ select VIDEOBUF_GEN
+ select VIDEOBUF_DMA_SG
+ select OMAP2_DSS
+ select OMAP2_VRAM
+ select OMAP2_VRFB
+ default n
+ ---help---
+ V4L2 Display driver support for OMAP2/3 based boards.
diff --git a/drivers/media/video/omap/Makefile b/drivers/media/video/omap/Makefile
new file mode 100644
index 00000000000..b8bab00ad01
--- /dev/null
+++ b/drivers/media/video/omap/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the omap video device drivers.
+#
+
+# OMAP2/3 Display driver
+omap-vout-mod-objs := omap_vout.o omap_voutlib.o
+obj-$(CONFIG_VIDEO_OMAP2_VOUT) += omap-vout-mod.o
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
new file mode 100644
index 00000000000..4c0ab499228
--- /dev/null
+++ b/drivers/media/video/omap/omap_vout.c
@@ -0,0 +1,2643 @@
+/*
+ * omap_vout.c
+ *
+ * Copyright (C) 2005-2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * Leveraged code from the OMAP2 camera driver
+ * Video-for-Linux (Version 2) camera capture driver for
+ * the OMAP24xx camera controller.
+ *
+ * Author: Andy Lowe (source@mvista.com)
+ *
+ * Copyright (C) 2004 MontaVista Software, Inc.
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * History:
+ * 20-APR-2006 Khasim Modified VRFB based Rotation,
+ * The image data is always read from 0 degree
+ * view and written
+ * to the virtual space of desired rotation angle
+ * 4-DEC-2006 Jian Changed to support better memory management
+ *
+ * 17-Nov-2008 Hardik Changed driver to use video_ioctl2
+ *
+ * 23-Feb-2010 Vaibhav H Modified to use new DSS2 interface
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/irq.h>
+#include <linux/videodev2.h>
+
+#include <media/videobuf-dma-sg.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+
+#include <plat/dma.h>
+#include <plat/vram.h>
+#include <plat/vrfb.h>
+#include <plat/display.h>
+
+#include "omap_voutlib.h"
+#include "omap_voutdef.h"
+
+MODULE_AUTHOR("Texas Instruments");
+MODULE_DESCRIPTION("OMAP Video for Linux Video out driver");
+MODULE_LICENSE("GPL");
+
+
+/* Driver Configuration macros */
+#define VOUT_NAME "omap_vout"
+
+enum omap_vout_channels {
+ OMAP_VIDEO1,
+ OMAP_VIDEO2,
+};
+
+enum dma_channel_state {
+ DMA_CHAN_NOT_ALLOTED,
+ DMA_CHAN_ALLOTED,
+};
+
+#define QQVGA_WIDTH 160
+#define QQVGA_HEIGHT 120
+
+/* Max Resolution supported by the driver */
+#define VID_MAX_WIDTH 1280 /* Largest width */
+#define VID_MAX_HEIGHT 720 /* Largest height */
+
+/* Mimimum requirement is 2x2 for DSS */
+#define VID_MIN_WIDTH 2
+#define VID_MIN_HEIGHT 2
+
+/* 2048 x 2048 is max res supported by OMAP display controller */
+#define MAX_PIXELS_PER_LINE 2048
+
+#define VRFB_TX_TIMEOUT 1000
+#define VRFB_NUM_BUFS 4
+
+/* Max buffer size tobe allocated during init */
+#define OMAP_VOUT_MAX_BUF_SIZE (VID_MAX_WIDTH*VID_MAX_HEIGHT*4)
+
+static struct videobuf_queue_ops video_vbq_ops;
+/* Variables configurable through module params*/
+static u32 video1_numbuffers = 3;
+static u32 video2_numbuffers = 3;
+static u32 video1_bufsize = OMAP_VOUT_MAX_BUF_SIZE;
+static u32 video2_bufsize = OMAP_VOUT_MAX_BUF_SIZE;
+static u32 vid1_static_vrfb_alloc;
+static u32 vid2_static_vrfb_alloc;
+static int debug;
+
+/* Module parameters */
+module_param(video1_numbuffers, uint, S_IRUGO);
+MODULE_PARM_DESC(video1_numbuffers,
+ "Number of buffers to be allocated at init time for Video1 device.");
+
+module_param(video2_numbuffers, uint, S_IRUGO);
+MODULE_PARM_DESC(video2_numbuffers,
+ "Number of buffers to be allocated at init time for Video2 device.");
+
+module_param(video1_bufsize, uint, S_IRUGO);
+MODULE_PARM_DESC(video1_bufsize,
+ "Size of the buffer to be allocated for video1 device");
+
+module_param(video2_bufsize, uint, S_IRUGO);
+MODULE_PARM_DESC(video2_bufsize,
+ "Size of the buffer to be allocated for video2 device");
+
+module_param(vid1_static_vrfb_alloc, bool, S_IRUGO);
+MODULE_PARM_DESC(vid1_static_vrfb_alloc,
+ "Static allocation of the VRFB buffer for video1 device");
+
+module_param(vid2_static_vrfb_alloc, bool, S_IRUGO);
+MODULE_PARM_DESC(vid2_static_vrfb_alloc,
+ "Static allocation of the VRFB buffer for video2 device");
+
+module_param(debug, bool, S_IRUGO);
+MODULE_PARM_DESC(debug, "Debug level (0-1)");
+
+/* list of image formats supported by OMAP2 video pipelines */
+const static struct v4l2_fmtdesc omap_formats[] = {
+ {
+ /* Note: V4L2 defines RGB565 as:
+ *
+ * Byte 0 Byte 1
+ * g2 g1 g0 r4 r3 r2 r1 r0 b4 b3 b2 b1 b0 g5 g4 g3
+ *
+ * We interpret RGB565 as:
+ *
+ * Byte 0 Byte 1
+ * g2 g1 g0 b4 b3 b2 b1 b0 r4 r3 r2 r1 r0 g5 g4 g3
+ */
+ .description = "RGB565, le",
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ },
+ {
+ /* Note: V4L2 defines RGB32 as: RGB-8-8-8-8 we use
+ * this for RGB24 unpack mode, the last 8 bits are ignored
+ * */
+ .description = "RGB32, le",
+ .pixelformat = V4L2_PIX_FMT_RGB32,
+ },
+ {
+ /* Note: V4L2 defines RGB24 as: RGB-8-8-8 we use
+ * this for RGB24 packed mode
+ *
+ */
+ .description = "RGB24, le",
+ .pixelformat = V4L2_PIX_FMT_RGB24,
+ },
+ {
+ .description = "YUYV (YUV 4:2:2), packed",
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ },
+ {
+ .description = "UYVY, packed",
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ },
+};
+
+#define NUM_OUTPUT_FORMATS (ARRAY_SIZE(omap_formats))
+
+/*
+ * Allocate buffers
+ */
+static unsigned long omap_vout_alloc_buffer(u32 buf_size, u32 *phys_addr)
+{
+ u32 order, size;
+ unsigned long virt_addr, addr;
+
+ size = PAGE_ALIGN(buf_size);
+ order = get_order(size);
+ virt_addr = __get_free_pages(GFP_KERNEL | GFP_DMA, order);
+ addr = virt_addr;
+
+ if (virt_addr) {
+ while (size > 0) {
+ SetPageReserved(virt_to_page(addr));
+ addr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ }
+ *phys_addr = (u32) virt_to_phys((void *) virt_addr);
+ return virt_addr;
+}
+
+/*
+ * Free buffers
+ */
+static void omap_vout_free_buffer(unsigned long virtaddr, u32 buf_size)
+{
+ u32 order, size;
+ unsigned long addr = virtaddr;
+
+ size = PAGE_ALIGN(buf_size);
+ order = get_order(size);
+
+ while (size > 0) {
+ ClearPageReserved(virt_to_page(addr));
+ addr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ free_pages((unsigned long) virtaddr, order);
+}
+
+/*
+ * Function for allocating video buffers
+ */
+static int omap_vout_allocate_vrfb_buffers(struct omap_vout_device *vout,
+ unsigned int *count, int startindex)
+{
+ int i, j;
+
+ for (i = 0; i < *count; i++) {
+ if (!vout->smsshado_virt_addr[i]) {
+ vout->smsshado_virt_addr[i] =
+ omap_vout_alloc_buffer(vout->smsshado_size,
+ &vout->smsshado_phy_addr[i]);
+ }
+ if (!vout->smsshado_virt_addr[i] && startindex != -1) {
+ if (V4L2_MEMORY_MMAP == vout->memory && i >= startindex)
+ break;
+ }
+ if (!vout->smsshado_virt_addr[i]) {
+ for (j = 0; j < i; j++) {
+ omap_vout_free_buffer(
+ vout->smsshado_virt_addr[j],
+ vout->smsshado_size);
+ vout->smsshado_virt_addr[j] = 0;
+ vout->smsshado_phy_addr[j] = 0;
+ }
+ *count = 0;
+ return -ENOMEM;
+ }
+ memset((void *) vout->smsshado_virt_addr[i], 0,
+ vout->smsshado_size);
+ }
+ return 0;
+}
+
+/*
+ * Try format
+ */
+static int omap_vout_try_format(struct v4l2_pix_format *pix)
+{
+ int ifmt, bpp = 0;
+
+ pix->height = clamp(pix->height, (u32)VID_MIN_HEIGHT,
+ (u32)VID_MAX_HEIGHT);
+ pix->width = clamp(pix->width, (u32)VID_MIN_WIDTH, (u32)VID_MAX_WIDTH);
+
+ for (ifmt = 0; ifmt < NUM_OUTPUT_FORMATS; ifmt++) {
+ if (pix->pixelformat == omap_formats[ifmt].pixelformat)
+ break;
+ }
+
+ if (ifmt == NUM_OUTPUT_FORMATS)
+ ifmt = 0;
+
+ pix->pixelformat = omap_formats[ifmt].pixelformat;
+ pix->field = V4L2_FIELD_ANY;
+ pix->priv = 0;
+
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ default:
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+ bpp = YUYV_BPP;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_RGB565X:
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ bpp = RGB565_BPP;
+ break;
+ case V4L2_PIX_FMT_RGB24:
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ bpp = RGB24_BPP;
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_BGR32:
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ bpp = RGB32_BPP;
+ break;
+ }
+ pix->bytesperline = pix->width * bpp;
+ pix->sizeimage = pix->bytesperline * pix->height;
+
+ return bpp;
+}
+
+/*
+ * omap_vout_uservirt_to_phys: This inline function is used to convert user
+ * space virtual address to physical address.
+ */
+static u32 omap_vout_uservirt_to_phys(u32 virtp)
+{
+ unsigned long physp = 0;
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+
+ vma = find_vma(mm, virtp);
+ /* For kernel direct-mapped memory, take the easy way */
+ if (virtp >= PAGE_OFFSET) {
+ physp = virt_to_phys((void *) virtp);
+ } else if (vma && (vma->vm_flags & VM_IO) && vma->vm_pgoff) {
+ /* this will catch, kernel-allocated, mmaped-to-usermode
+ addresses */
+ physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start);
+ } else {
+ /* otherwise, use get_user_pages() for general userland pages */
+ int res, nr_pages = 1;
+ struct page *pages;
+ down_read(&current->mm->mmap_sem);
+
+ res = get_user_pages(current, current->mm, virtp, nr_pages, 1,
+ 0, &pages, NULL);
+ up_read(&current->mm->mmap_sem);
+
+ if (res == nr_pages) {
+ physp = __pa(page_address(&pages[0]) +
+ (virtp & ~PAGE_MASK));
+ } else {
+ printk(KERN_WARNING VOUT_NAME
+ "get_user_pages failed\n");
+ return 0;
+ }
+ }
+
+ return physp;
+}
+
+/*
+ * Wakes up the application once the DMA transfer to VRFB space is completed.
+ */
+static void omap_vout_vrfb_dma_tx_callback(int lch, u16 ch_status, void *data)
+{
+ struct vid_vrfb_dma *t = (struct vid_vrfb_dma *) data;
+
+ t->tx_status = 1;
+ wake_up_interruptible(&t->wait);
+}
+
+/*
+ * Release the VRFB context once the module exits
+ */
+static void omap_vout_release_vrfb(struct omap_vout_device *vout)
+{
+ int i;
+
+ for (i = 0; i < VRFB_NUM_BUFS; i++)
+ omap_vrfb_release_ctx(&vout->vrfb_context[i]);
+
+ if (vout->vrfb_dma_tx.req_status == DMA_CHAN_ALLOTED) {
+ vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
+ omap_free_dma(vout->vrfb_dma_tx.dma_ch);
+ }
+}
+
+/*
+ * Return true if rotation is 90 or 270
+ */
+static inline int rotate_90_or_270(const struct omap_vout_device *vout)
+{
+ return (vout->rotation == dss_rotation_90_degree ||
+ vout->rotation == dss_rotation_270_degree);
+}
+
+/*
+ * Return true if rotation is enabled
+ */
+static inline int rotation_enabled(const struct omap_vout_device *vout)
+{
+ return vout->rotation || vout->mirror;
+}
+
+/*
+ * Reverse the rotation degree if mirroring is enabled
+ */
+static inline int calc_rotation(const struct omap_vout_device *vout)
+{
+ if (!vout->mirror)
+ return vout->rotation;
+
+ switch (vout->rotation) {
+ case dss_rotation_90_degree:
+ return dss_rotation_270_degree;
+ case dss_rotation_270_degree:
+ return dss_rotation_90_degree;
+ case dss_rotation_180_degree:
+ return dss_rotation_0_degree;
+ default:
+ return dss_rotation_180_degree;
+ }
+}
+
+/*
+ * Free the V4L2 buffers
+ */
+static void omap_vout_free_buffers(struct omap_vout_device *vout)
+{
+ int i, numbuffers;
+
+ /* Allocate memory for the buffers */
+ numbuffers = (vout->vid) ? video2_numbuffers : video1_numbuffers;
+ vout->buffer_size = (vout->vid) ? video2_bufsize : video1_bufsize;
+
+ for (i = 0; i < numbuffers; i++) {
+ omap_vout_free_buffer(vout->buf_virt_addr[i],
+ vout->buffer_size);
+ vout->buf_phy_addr[i] = 0;
+ vout->buf_virt_addr[i] = 0;
+ }
+}
+
+/*
+ * Free VRFB buffers
+ */
+static void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout)
+{
+ int j;
+
+ for (j = 0; j < VRFB_NUM_BUFS; j++) {
+ omap_vout_free_buffer(vout->smsshado_virt_addr[j],
+ vout->smsshado_size);
+ vout->smsshado_virt_addr[j] = 0;
+ vout->smsshado_phy_addr[j] = 0;
+ }
+}
+
+/*
+ * Allocate the buffers for the VRFB space. Data is copied from V4L2
+ * buffers to the VRFB buffers using the DMA engine.
+ */
+static int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
+ unsigned int *count, unsigned int startindex)
+{
+ int i;
+ bool yuv_mode;
+
+ /* Allocate the VRFB buffers only if the buffers are not
+ * allocated during init time.
+ */
+ if ((rotation_enabled(vout)) && !vout->vrfb_static_allocation)
+ if (omap_vout_allocate_vrfb_buffers(vout, count, startindex))
+ return -ENOMEM;
+
+ if (vout->dss_mode == OMAP_DSS_COLOR_YUV2 ||
+ vout->dss_mode == OMAP_DSS_COLOR_UYVY)
+ yuv_mode = true;
+ else
+ yuv_mode = false;
+
+ for (i = 0; i < *count; i++)
+ omap_vrfb_setup(&vout->vrfb_context[i],
+ vout->smsshado_phy_addr[i], vout->pix.width,
+ vout->pix.height, vout->bpp, yuv_mode);
+
+ return 0;
+}
+
+/*
+ * Convert V4L2 rotation to DSS rotation
+ * V4L2 understand 0, 90, 180, 270.
+ * Convert to 0, 1, 2 and 3 repsectively for DSS
+ */
+static int v4l2_rot_to_dss_rot(int v4l2_rotation,
+ enum dss_rotation *rotation, bool mirror)
+{
+ int ret = 0;
+
+ switch (v4l2_rotation) {
+ case 90:
+ *rotation = dss_rotation_90_degree;
+ break;
+ case 180:
+ *rotation = dss_rotation_180_degree;
+ break;
+ case 270:
+ *rotation = dss_rotation_270_degree;
+ break;
+ case 0:
+ *rotation = dss_rotation_0_degree;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/*
+ * Calculate the buffer offsets from which the streaming should
+ * start. This offset calculation is mainly required because of
+ * the VRFB 32 pixels alignment with rotation.
+ */
+static int omap_vout_calculate_offset(struct omap_vout_device *vout)
+{
+ struct omap_overlay *ovl;
+ enum dss_rotation rotation;
+ struct omapvideo_info *ovid;
+ bool mirroring = vout->mirror;
+ struct omap_dss_device *cur_display;
+ struct v4l2_rect *crop = &vout->crop;
+ struct v4l2_pix_format *pix = &vout->pix;
+ int *cropped_offset = &vout->cropped_offset;
+ int vr_ps = 1, ps = 2, temp_ps = 2;
+ int offset = 0, ctop = 0, cleft = 0, line_length = 0;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+ /* get the display device attached to the overlay */
+ if (!ovl->manager || !ovl->manager->device)
+ return -1;
+
+ cur_display = ovl->manager->device;
+ rotation = calc_rotation(vout);
+
+ if (V4L2_PIX_FMT_YUYV == pix->pixelformat ||
+ V4L2_PIX_FMT_UYVY == pix->pixelformat) {
+ if (rotation_enabled(vout)) {
+ /*
+ * ps - Actual pixel size for YUYV/UYVY for
+ * VRFB/Mirroring is 4 bytes
+ * vr_ps - Virtually pixel size for YUYV/UYVY is
+ * 2 bytes
+ */
+ ps = 4;
+ vr_ps = 2;
+ } else {
+ ps = 2; /* otherwise the pixel size is 2 byte */
+ }
+ } else if (V4L2_PIX_FMT_RGB32 == pix->pixelformat) {
+ ps = 4;
+ } else if (V4L2_PIX_FMT_RGB24 == pix->pixelformat) {
+ ps = 3;
+ }
+ vout->ps = ps;
+ vout->vr_ps = vr_ps;
+
+ if (rotation_enabled(vout)) {
+ line_length = MAX_PIXELS_PER_LINE;
+ ctop = (pix->height - crop->height) - crop->top;
+ cleft = (pix->width - crop->width) - crop->left;
+ } else {
+ line_length = pix->width;
+ }
+ vout->line_length = line_length;
+ switch (rotation) {
+ case dss_rotation_90_degree:
+ offset = vout->vrfb_context[0].yoffset *
+ vout->vrfb_context[0].bytespp;
+ temp_ps = ps / vr_ps;
+ if (mirroring == 0) {
+ *cropped_offset = offset + line_length *
+ temp_ps * cleft + crop->top * temp_ps;
+ } else {
+ *cropped_offset = offset + line_length * temp_ps *
+ cleft + crop->top * temp_ps + (line_length *
+ ((crop->width / (vr_ps)) - 1) * ps);
+ }
+ break;
+ case dss_rotation_180_degree:
+ offset = ((MAX_PIXELS_PER_LINE * vout->vrfb_context[0].yoffset *
+ vout->vrfb_context[0].bytespp) +
+ (vout->vrfb_context[0].xoffset *
+ vout->vrfb_context[0].bytespp));
+ if (mirroring == 0) {
+ *cropped_offset = offset + (line_length * ps * ctop) +
+ (cleft / vr_ps) * ps;
+
+ } else {
+ *cropped_offset = offset + (line_length * ps * ctop) +
+ (cleft / vr_ps) * ps + (line_length *
+ (crop->height - 1) * ps);
+ }
+ break;
+ case dss_rotation_270_degree:
+ offset = MAX_PIXELS_PER_LINE * vout->vrfb_context[0].xoffset *
+ vout->vrfb_context[0].bytespp;
+ temp_ps = ps / vr_ps;
+ if (mirroring == 0) {
+ *cropped_offset = offset + line_length *
+ temp_ps * crop->left + ctop * ps;
+ } else {
+ *cropped_offset = offset + line_length *
+ temp_ps * crop->left + ctop * ps +
+ (line_length * ((crop->width / vr_ps) - 1) *
+ ps);
+ }
+ break;
+ case dss_rotation_0_degree:
+ if (mirroring == 0) {
+ *cropped_offset = (line_length * ps) *
+ crop->top + (crop->left / vr_ps) * ps;
+ } else {
+ *cropped_offset = (line_length * ps) *
+ crop->top + (crop->left / vr_ps) * ps +
+ (line_length * (crop->height - 1) * ps);
+ }
+ break;
+ default:
+ *cropped_offset = (line_length * ps * crop->top) /
+ vr_ps + (crop->left * ps) / vr_ps +
+ ((crop->width / vr_ps) - 1) * ps;
+ break;
+ }
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "%s Offset:%x\n",
+ __func__, *cropped_offset);
+ return 0;
+}
+
+/*
+ * Convert V4L2 pixel format to DSS pixel format
+ */
+static int video_mode_to_dss_mode(struct omap_vout_device *vout)
+{
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct v4l2_pix_format *pix = &vout->pix;
+ enum omap_color_mode mode;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ switch (pix->pixelformat) {
+ case 0:
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ mode = OMAP_DSS_COLOR_YUV2;
+ break;
+ case V4L2_PIX_FMT_UYVY:
+ mode = OMAP_DSS_COLOR_UYVY;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ mode = OMAP_DSS_COLOR_RGB16;
+ break;
+ case V4L2_PIX_FMT_RGB24:
+ mode = OMAP_DSS_COLOR_RGB24P;
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ mode = (ovl->id == OMAP_DSS_VIDEO1) ?
+ OMAP_DSS_COLOR_RGB24U : OMAP_DSS_COLOR_ARGB32;
+ break;
+ case V4L2_PIX_FMT_BGR32:
+ mode = OMAP_DSS_COLOR_RGBX32;
+ break;
+ default:
+ mode = -EINVAL;
+ }
+ return mode;
+}
+
+/*
+ * Setup the overlay
+ */
+int omapvid_setup_overlay(struct omap_vout_device *vout,
+ struct omap_overlay *ovl, int posx, int posy, int outw,
+ int outh, u32 addr)
+{
+ int ret = 0;
+ struct omap_overlay_info info;
+ int cropheight, cropwidth, pixheight, pixwidth;
+
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0 &&
+ (outw != vout->pix.width || outh != vout->pix.height)) {
+ ret = -EINVAL;
+ goto setup_ovl_err;
+ }
+
+ vout->dss_mode = video_mode_to_dss_mode(vout);
+ if (vout->dss_mode == -EINVAL) {
+ ret = -EINVAL;
+ goto setup_ovl_err;
+ }
+
+ /* Setup the input plane parameters according to
+ * rotation value selected.
+ */
+ if (rotate_90_or_270(vout)) {
+ cropheight = vout->crop.width;
+ cropwidth = vout->crop.height;
+ pixheight = vout->pix.width;
+ pixwidth = vout->pix.height;
+ } else {
+ cropheight = vout->crop.height;
+ cropwidth = vout->crop.width;
+ pixheight = vout->pix.height;
+ pixwidth = vout->pix.width;
+ }
+
+ ovl->get_overlay_info(ovl, &info);
+ info.paddr = addr;
+ info.vaddr = NULL;
+ info.width = cropwidth;
+ info.height = cropheight;
+ info.color_mode = vout->dss_mode;
+ info.mirror = vout->mirror;
+ info.pos_x = posx;
+ info.pos_y = posy;
+ info.out_width = outw;
+ info.out_height = outh;
+ info.global_alpha = vout->win.global_alpha;
+ if (!rotation_enabled(vout)) {
+ info.rotation = 0;
+ info.rotation_type = OMAP_DSS_ROT_DMA;
+ info.screen_width = pixwidth;
+ } else {
+ info.rotation = vout->rotation;
+ info.rotation_type = OMAP_DSS_ROT_VRFB;
+ info.screen_width = 2048;
+ }
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "%s enable=%d addr=%x width=%d\n height=%d color_mode=%d\n"
+ "rotation=%d mirror=%d posx=%d posy=%d out_width = %d \n"
+ "out_height=%d rotation_type=%d screen_width=%d\n",
+ __func__, info.enabled, info.paddr, info.width, info.height,
+ info.color_mode, info.rotation, info.mirror, info.pos_x,
+ info.pos_y, info.out_width, info.out_height, info.rotation_type,
+ info.screen_width);
+
+ ret = ovl->set_overlay_info(ovl, &info);
+ if (ret)
+ goto setup_ovl_err;
+
+ return 0;
+
+setup_ovl_err:
+ v4l2_warn(&vout->vid_dev->v4l2_dev, "setup_overlay failed\n");
+ return ret;
+}
+
+/*
+ * Initialize the overlay structure
+ */
+int omapvid_init(struct omap_vout_device *vout, u32 addr)
+{
+ int ret = 0, i;
+ struct v4l2_window *win;
+ struct omap_overlay *ovl;
+ int posx, posy, outw, outh, temp;
+ struct omap_video_timings *timing;
+ struct omapvideo_info *ovid = &vout->vid_info;
+
+ win = &vout->win;
+ for (i = 0; i < ovid->num_overlays; i++) {
+ ovl = ovid->overlays[i];
+ if (!ovl->manager || !ovl->manager->device)
+ return -EINVAL;
+
+ timing = &ovl->manager->device->panel.timings;
+
+ outw = win->w.width;
+ outh = win->w.height;
+ switch (vout->rotation) {
+ case dss_rotation_90_degree:
+ /* Invert the height and width for 90
+ * and 270 degree rotation
+ */
+ temp = outw;
+ outw = outh;
+ outh = temp;
+ posy = (timing->y_res - win->w.width) - win->w.left;
+ posx = win->w.top;
+ break;
+
+ case dss_rotation_180_degree:
+ posx = (timing->x_res - win->w.width) - win->w.left;
+ posy = (timing->y_res - win->w.height) - win->w.top;
+ break;
+
+ case dss_rotation_270_degree:
+ temp = outw;
+ outw = outh;
+ outh = temp;
+ posy = win->w.left;
+ posx = (timing->x_res - win->w.height) - win->w.top;
+ break;
+
+ default:
+ posx = win->w.left;
+ posy = win->w.top;
+ break;
+ }
+
+ ret = omapvid_setup_overlay(vout, ovl, posx, posy,
+ outw, outh, addr);
+ if (ret)
+ goto omapvid_init_err;
+ }
+ return 0;
+
+omapvid_init_err:
+ v4l2_warn(&vout->vid_dev->v4l2_dev, "apply_changes failed\n");
+ return ret;
+}
+
+/*
+ * Apply the changes set the go bit of DSS
+ */
+int omapvid_apply_changes(struct omap_vout_device *vout)
+{
+ int i;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid = &vout->vid_info;
+
+ for (i = 0; i < ovid->num_overlays; i++) {
+ ovl = ovid->overlays[i];
+ if (!ovl->manager || !ovl->manager->device)
+ return -EINVAL;
+ ovl->manager->apply(ovl->manager);
+ }
+
+ return 0;
+}
+
+void omap_vout_isr(void *arg, unsigned int irqstatus)
+{
+ int ret;
+ u32 addr, fid;
+ struct omap_overlay *ovl;
+ struct timeval timevalue;
+ struct omapvideo_info *ovid;
+ struct omap_dss_device *cur_display;
+ struct omap_vout_device *vout = (struct omap_vout_device *)arg;
+
+ if (!vout->streaming)
+ return;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+ /* get the display device attached to the overlay */
+ if (!ovl->manager || !ovl->manager->device)
+ return;
+
+ cur_display = ovl->manager->device;
+
+ spin_lock(&vout->vbq_lock);
+ do_gettimeofday(&timevalue);
+ if (cur_display->type == OMAP_DISPLAY_TYPE_DPI) {
+ if (!(irqstatus & DISPC_IRQ_VSYNC))
+ goto vout_isr_err;
+
+ if (!vout->first_int && (vout->cur_frm != vout->next_frm)) {
+ vout->cur_frm->ts = timevalue;
+ vout->cur_frm->state = VIDEOBUF_DONE;
+ wake_up_interruptible(&vout->cur_frm->done);
+ vout->cur_frm = vout->next_frm;
+ }
+ vout->first_int = 0;
+ if (list_empty(&vout->dma_queue))
+ goto vout_isr_err;
+
+ vout->next_frm = list_entry(vout->dma_queue.next,
+ struct videobuf_buffer, queue);
+ list_del(&vout->next_frm->queue);
+
+ vout->next_frm->state = VIDEOBUF_ACTIVE;
+
+ addr = (unsigned long) vout->queued_buf_addr[vout->next_frm->i]
+ + vout->cropped_offset;
+
+ /* First save the configuration in ovelray structure */
+ ret = omapvid_init(vout, addr);
+ if (ret)
+ printk(KERN_ERR VOUT_NAME
+ "failed to set overlay info\n");
+ /* Enable the pipeline and set the Go bit */
+ ret = omapvid_apply_changes(vout);
+ if (ret)
+ printk(KERN_ERR VOUT_NAME "failed to change mode\n");
+ } else {
+
+ if (vout->first_int) {
+ vout->first_int = 0;
+ goto vout_isr_err;
+ }
+ if (irqstatus & DISPC_IRQ_EVSYNC_ODD)
+ fid = 1;
+ else if (irqstatus & DISPC_IRQ_EVSYNC_EVEN)
+ fid = 0;
+ else
+ goto vout_isr_err;
+
+ vout->field_id ^= 1;
+ if (fid != vout->field_id) {
+ if (0 == fid)
+ vout->field_id = fid;
+
+ goto vout_isr_err;
+ }
+ if (0 == fid) {
+ if (vout->cur_frm == vout->next_frm)
+ goto vout_isr_err;
+
+ vout->cur_frm->ts = timevalue;
+ vout->cur_frm->state = VIDEOBUF_DONE;
+ wake_up_interruptible(&vout->cur_frm->done);
+ vout->cur_frm = vout->next_frm;
+ } else if (1 == fid) {
+ if (list_empty(&vout->dma_queue) ||
+ (vout->cur_frm != vout->next_frm))
+ goto vout_isr_err;
+
+ vout->next_frm = list_entry(vout->dma_queue.next,
+ struct videobuf_buffer, queue);
+ list_del(&vout->next_frm->queue);
+
+ vout->next_frm->state = VIDEOBUF_ACTIVE;
+ addr = (unsigned long)
+ vout->queued_buf_addr[vout->next_frm->i] +
+ vout->cropped_offset;
+ /* First save the configuration in ovelray structure */
+ ret = omapvid_init(vout, addr);
+ if (ret)
+ printk(KERN_ERR VOUT_NAME
+ "failed to set overlay info\n");
+ /* Enable the pipeline and set the Go bit */
+ ret = omapvid_apply_changes(vout);
+ if (ret)
+ printk(KERN_ERR VOUT_NAME
+ "failed to change mode\n");
+ }
+
+ }
+
+vout_isr_err:
+ spin_unlock(&vout->vbq_lock);
+}
+
+
+/* Video buffer call backs */
+
+/*
+ * Buffer setup function is called by videobuf layer when REQBUF ioctl is
+ * called. This is used to setup buffers and return size and count of
+ * buffers allocated. After the call to this buffer, videobuf layer will
+ * setup buffer queue depending on the size and count of buffers
+ */
+static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
+ unsigned int *size)
+{
+ int startindex = 0, i, j;
+ u32 phy_addr = 0, virt_addr = 0;
+ struct omap_vout_device *vout = q->priv_data;
+
+ if (!vout)
+ return -EINVAL;
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != q->type)
+ return -EINVAL;
+
+ startindex = (vout->vid == OMAP_VIDEO1) ?
+ video1_numbuffers : video2_numbuffers;
+ if (V4L2_MEMORY_MMAP == vout->memory && *count < startindex)
+ *count = startindex;
+
+ if ((rotation_enabled(vout)) && *count > VRFB_NUM_BUFS)
+ *count = VRFB_NUM_BUFS;
+
+ /* If rotation is enabled, allocate memory for VRFB space also */
+ if (rotation_enabled(vout))
+ if (omap_vout_vrfb_buffer_setup(vout, count, startindex))
+ return -ENOMEM;
+
+ if (V4L2_MEMORY_MMAP != vout->memory)
+ return 0;
+
+ /* Now allocated the V4L2 buffers */
+ *size = PAGE_ALIGN(vout->pix.width * vout->pix.height * vout->bpp);
+ startindex = (vout->vid == OMAP_VIDEO1) ?
+ video1_numbuffers : video2_numbuffers;
+
+ for (i = startindex; i < *count; i++) {
+ vout->buffer_size = *size;
+
+ virt_addr = omap_vout_alloc_buffer(vout->buffer_size,
+ &phy_addr);
+ if (!virt_addr) {
+ if (!rotation_enabled(vout))
+ break;
+ /* Free the VRFB buffers if no space for V4L2 buffers */
+ for (j = i; j < *count; j++) {
+ omap_vout_free_buffer(
+ vout->smsshado_virt_addr[j],
+ vout->smsshado_size);
+ vout->smsshado_virt_addr[j] = 0;
+ vout->smsshado_phy_addr[j] = 0;
+ }
+ }
+ vout->buf_virt_addr[i] = virt_addr;
+ vout->buf_phy_addr[i] = phy_addr;
+ }
+ *count = vout->buffer_allocated = i;
+
+ return 0;
+}
+
+/*
+ * Free the V4L2 buffers additionally allocated than default
+ * number of buffers and free all the VRFB buffers
+ */
+static void omap_vout_free_allbuffers(struct omap_vout_device *vout)
+{
+ int num_buffers = 0, i;
+
+ num_buffers = (vout->vid == OMAP_VIDEO1) ?
+ video1_numbuffers : video2_numbuffers;
+
+ for (i = num_buffers; i < vout->buffer_allocated; i++) {
+ if (vout->buf_virt_addr[i])
+ omap_vout_free_buffer(vout->buf_virt_addr[i],
+ vout->buffer_size);
+
+ vout->buf_virt_addr[i] = 0;
+ vout->buf_phy_addr[i] = 0;
+ }
+ /* Free the VRFB buffers only if they are allocated
+ * during reqbufs. Don't free if init time allocated
+ */
+ if (!vout->vrfb_static_allocation) {
+ for (i = 0; i < VRFB_NUM_BUFS; i++) {
+ if (vout->smsshado_virt_addr[i]) {
+ omap_vout_free_buffer(
+ vout->smsshado_virt_addr[i],
+ vout->smsshado_size);
+ vout->smsshado_virt_addr[i] = 0;
+ vout->smsshado_phy_addr[i] = 0;
+ }
+ }
+ }
+ vout->buffer_allocated = num_buffers;
+}
+
+/*
+ * This function will be called when VIDIOC_QBUF ioctl is called.
+ * It prepare buffers before give out for the display. This function
+ * converts user space virtual address into physical address if userptr memory
+ * exchange mechanism is used. If rotation is enabled, it copies entire
+ * buffer into VRFB memory space before giving it to the DSS.
+ */
+static int omap_vout_buffer_prepare(struct videobuf_queue *q,
+ struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct vid_vrfb_dma *tx;
+ enum dss_rotation rotation;
+ struct videobuf_dmabuf *dmabuf = NULL;
+ struct omap_vout_device *vout = q->priv_data;
+ u32 dest_frame_index = 0, src_element_index = 0;
+ u32 dest_element_index = 0, src_frame_index = 0;
+ u32 elem_count = 0, frame_count = 0, pixsize = 2;
+
+ if (VIDEOBUF_NEEDS_INIT == vb->state) {
+ vb->width = vout->pix.width;
+ vb->height = vout->pix.height;
+ vb->size = vb->width * vb->height * vout->bpp;
+ vb->field = field;
+ }
+ vb->state = VIDEOBUF_PREPARED;
+ /* if user pointer memory mechanism is used, get the physical
+ * address of the buffer
+ */
+ if (V4L2_MEMORY_USERPTR == vb->memory) {
+ if (0 == vb->baddr)
+ return -EINVAL;
+ /* Virtual address */
+ /* priv points to struct videobuf_pci_sg_memory. But we went
+ * pointer to videobuf_dmabuf, which is member of
+ * videobuf_pci_sg_memory */
+ dmabuf = videobuf_to_dma(q->bufs[vb->i]);
+ dmabuf->vmalloc = (void *) vb->baddr;
+
+ /* Physical address */
+ dmabuf->bus_addr =
+ (dma_addr_t) omap_vout_uservirt_to_phys(vb->baddr);
+ }
+
+ if (!rotation_enabled(vout)) {
+ dmabuf = videobuf_to_dma(q->bufs[vb->i]);
+ vout->queued_buf_addr[vb->i] = (u8 *) dmabuf->bus_addr;
+ return 0;
+ }
+ dmabuf = videobuf_to_dma(q->bufs[vb->i]);
+ /* If rotation is enabled, copy input buffer into VRFB
+ * memory space using DMA. We are copying input buffer
+ * into VRFB memory space of desired angle and DSS will
+ * read image VRFB memory for 0 degree angle
+ */
+ pixsize = vout->bpp * vout->vrfb_bpp;
+ /*
+ * DMA transfer in double index mode
+ */
+
+ /* Frame index */
+ dest_frame_index = ((MAX_PIXELS_PER_LINE * pixsize) -
+ (vout->pix.width * vout->bpp)) + 1;
+
+ /* Source and destination parameters */
+ src_element_index = 0;
+ src_frame_index = 0;
+ dest_element_index = 1;
+ /* Number of elements per frame */
+ elem_count = vout->pix.width * vout->bpp;
+ frame_count = vout->pix.height;
+ tx = &vout->vrfb_dma_tx;
+ tx->tx_status = 0;
+ omap_set_dma_transfer_params(tx->dma_ch, OMAP_DMA_DATA_TYPE_S32,
+ (elem_count / 4), frame_count, OMAP_DMA_SYNC_ELEMENT,
+ tx->dev_id, 0x0);
+ /* src_port required only for OMAP1 */
+ omap_set_dma_src_params(tx->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
+ dmabuf->bus_addr, src_element_index, src_frame_index);
+ /*set dma source burst mode for VRFB */
+ omap_set_dma_src_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
+ rotation = calc_rotation(vout);
+
+ /* dest_port required only for OMAP1 */
+ omap_set_dma_dest_params(tx->dma_ch, 0, OMAP_DMA_AMODE_DOUBLE_IDX,
+ vout->vrfb_context[vb->i].paddr[0], dest_element_index,
+ dest_frame_index);
+ /*set dma dest burst mode for VRFB */
+ omap_set_dma_dest_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
+ omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE, 0x20, 0);
+
+ omap_start_dma(tx->dma_ch);
+ interruptible_sleep_on_timeout(&tx->wait, VRFB_TX_TIMEOUT);
+
+ if (tx->tx_status == 0) {
+ omap_stop_dma(tx->dma_ch);
+ return -EINVAL;
+ }
+ /* Store buffers physical address into an array. Addresses
+ * from this array will be used to configure DSS */
+ vout->queued_buf_addr[vb->i] = (u8 *)
+ vout->vrfb_context[vb->i].paddr[rotation];
+ return 0;
+}
+
+/*
+ * Buffer queue funtion will be called from the videobuf layer when _QBUF
+ * ioctl is called. It is used to enqueue buffer, which is ready to be
+ * displayed.
+ */
+static void omap_vout_buffer_queue(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ struct omap_vout_device *vout = q->priv_data;
+
+ /* Driver is also maintainig a queue. So enqueue buffer in the driver
+ * queue */
+ list_add_tail(&vb->queue, &vout->dma_queue);
+
+ vb->state = VIDEOBUF_QUEUED;
+}
+
+/*
+ * Buffer release function is called from videobuf layer to release buffer
+ * which are already allocated
+ */
+static void omap_vout_buffer_release(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ struct omap_vout_device *vout = q->priv_data;
+
+ vb->state = VIDEOBUF_NEEDS_INIT;
+
+ if (V4L2_MEMORY_MMAP != vout->memory)
+ return;
+}
+
+/*
+ * File operations
+ */
+static void omap_vout_vm_open(struct vm_area_struct *vma)
+{
+ struct omap_vout_device *vout = vma->vm_private_data;
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "vm_open [vma=%08lx-%08lx]\n", vma->vm_start, vma->vm_end);
+ vout->mmap_count++;
+}
+
+static void omap_vout_vm_close(struct vm_area_struct *vma)
+{
+ struct omap_vout_device *vout = vma->vm_private_data;
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "vm_close [vma=%08lx-%08lx]\n", vma->vm_start, vma->vm_end);
+ vout->mmap_count--;
+}
+
+static struct vm_operations_struct omap_vout_vm_ops = {
+ .open = omap_vout_vm_open,
+ .close = omap_vout_vm_close,
+};
+
+static int omap_vout_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int i;
+ void *pos;
+ unsigned long start = vma->vm_start;
+ unsigned long size = (vma->vm_end - vma->vm_start);
+ struct videobuf_dmabuf *dmabuf = NULL;
+ struct omap_vout_device *vout = file->private_data;
+ struct videobuf_queue *q = &vout->vbq;
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ " %s pgoff=0x%lx, start=0x%lx, end=0x%lx\n", __func__,
+ vma->vm_pgoff, vma->vm_start, vma->vm_end);
+
+ /* look for the buffer to map */
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ if (NULL == q->bufs[i])
+ continue;
+ if (V4L2_MEMORY_MMAP != q->bufs[i]->memory)
+ continue;
+ if (q->bufs[i]->boff == (vma->vm_pgoff << PAGE_SHIFT))
+ break;
+ }
+
+ if (VIDEO_MAX_FRAME == i) {
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "offset invalid [offset=0x%lx]\n",
+ (vma->vm_pgoff << PAGE_SHIFT));
+ return -EINVAL;
+ }
+ q->bufs[i]->baddr = vma->vm_start;
+
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_ops = &omap_vout_vm_ops;
+ vma->vm_private_data = (void *) vout;
+ dmabuf = videobuf_to_dma(q->bufs[i]);
+ pos = dmabuf->vmalloc;
+ vma->vm_pgoff = virt_to_phys((void *)pos) >> PAGE_SHIFT;
+ while (size > 0) {
+ unsigned long pfn;
+ pfn = virt_to_phys((void *) pos) >> PAGE_SHIFT;
+ if (remap_pfn_range(vma, start, pfn, PAGE_SIZE, PAGE_SHARED))
+ return -EAGAIN;
+ start += PAGE_SIZE;
+ pos += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ vout->mmap_count++;
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
+
+ return 0;
+}
+
+static int omap_vout_release(struct file *file)
+{
+ unsigned int ret, i;
+ struct videobuf_queue *q;
+ struct omapvideo_info *ovid;
+ struct omap_vout_device *vout = file->private_data;
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
+ ovid = &vout->vid_info;
+
+ if (!vout)
+ return 0;
+
+ q = &vout->vbq;
+ /* Disable all the overlay managers connected with this interface */
+ for (i = 0; i < ovid->num_overlays; i++) {
+ struct omap_overlay *ovl = ovid->overlays[i];
+ if (ovl->manager && ovl->manager->device) {
+ struct omap_overlay_info info;
+ ovl->get_overlay_info(ovl, &info);
+ info.enabled = 0;
+ ovl->set_overlay_info(ovl, &info);
+ }
+ }
+ /* Turn off the pipeline */
+ ret = omapvid_apply_changes(vout);
+ if (ret)
+ v4l2_warn(&vout->vid_dev->v4l2_dev,
+ "Unable to apply changes\n");
+
+ /* Free all buffers */
+ omap_vout_free_allbuffers(vout);
+ videobuf_mmap_free(q);
+
+ /* Even if apply changes fails we should continue
+ freeing allocated memeory */
+ if (vout->streaming) {
+ u32 mask = 0;
+
+ mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN |
+ DISPC_IRQ_EVSYNC_ODD;
+ omap_dispc_unregister_isr(omap_vout_isr, vout, mask);
+ vout->streaming = 0;
+
+ videobuf_streamoff(q);
+ videobuf_queue_cancel(q);
+ }
+
+ if (vout->mmap_count != 0)
+ vout->mmap_count = 0;
+
+ vout->opened -= 1;
+ file->private_data = NULL;
+
+ if (vout->buffer_allocated)
+ videobuf_mmap_free(q);
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
+ return ret;
+}
+
+static int omap_vout_open(struct file *file)
+{
+ struct videobuf_queue *q;
+ struct omap_vout_device *vout = NULL;
+
+ vout = video_drvdata(file);
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
+
+ if (vout == NULL)
+ return -ENODEV;
+
+ /* for now, we only support single open */
+ if (vout->opened)
+ return -EBUSY;
+
+ vout->opened += 1;
+
+ file->private_data = vout;
+ vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+
+ q = &vout->vbq;
+ video_vbq_ops.buf_setup = omap_vout_buffer_setup;
+ video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
+ video_vbq_ops.buf_release = omap_vout_buffer_release;
+ video_vbq_ops.buf_queue = omap_vout_buffer_queue;
+ spin_lock_init(&vout->vbq_lock);
+
+ videobuf_queue_sg_init(q, &video_vbq_ops, NULL, &vout->vbq_lock,
+ vout->type, V4L2_FIELD_NONE,
+ sizeof(struct videobuf_buffer), vout);
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
+ return 0;
+}
+
+/*
+ * V4L2 ioctls
+ */
+static int vidioc_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct omap_vout_device *vout = fh;
+
+ strlcpy(cap->driver, VOUT_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, vout->vfd->name, sizeof(cap->card));
+ cap->bus_info[0] = '\0';
+ cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT;
+
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_fmtdesc *fmt)
+{
+ int index = fmt->index;
+ enum v4l2_buf_type type = fmt->type;
+
+ fmt->index = index;
+ fmt->type = type;
+ if (index >= NUM_OUTPUT_FORMATS)
+ return -EINVAL;
+
+ fmt->flags = omap_formats[index].flags;
+ strlcpy(fmt->description, omap_formats[index].description,
+ sizeof(fmt->description));
+ fmt->pixelformat = omap_formats[index].pixelformat;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct omap_vout_device *vout = fh;
+
+ f->fmt.pix = vout->pix;
+ return 0;
+
+}
+
+static int vidioc_try_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_video_timings *timing;
+ struct omap_vout_device *vout = fh;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ if (!ovl->manager || !ovl->manager->device)
+ return -EINVAL;
+ /* get the display device attached to the overlay */
+ timing = &ovl->manager->device->panel.timings;
+
+ vout->fbuf.fmt.height = timing->y_res;
+ vout->fbuf.fmt.width = timing->x_res;
+
+ omap_vout_try_format(&f->fmt.pix);
+ return 0;
+}
+
+static int vidioc_s_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ int ret, bpp;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_video_timings *timing;
+ struct omap_vout_device *vout = fh;
+
+ if (vout->streaming)
+ return -EBUSY;
+
+ mutex_lock(&vout->lock);
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ /* get the display device attached to the overlay */
+ if (!ovl->manager || !ovl->manager->device) {
+ ret = -EINVAL;
+ goto s_fmt_vid_out_exit;
+ }
+ timing = &ovl->manager->device->panel.timings;
+
+ /* We dont support RGB24-packed mode if vrfb rotation
+ * is enabled*/
+ if ((rotation_enabled(vout)) &&
+ f->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24) {
+ ret = -EINVAL;
+ goto s_fmt_vid_out_exit;
+ }
+
+ /* get the framebuffer parameters */
+
+ if (rotate_90_or_270(vout)) {
+ vout->fbuf.fmt.height = timing->x_res;
+ vout->fbuf.fmt.width = timing->y_res;
+ } else {
+ vout->fbuf.fmt.height = timing->y_res;
+ vout->fbuf.fmt.width = timing->x_res;
+ }
+
+ /* change to samller size is OK */
+
+ bpp = omap_vout_try_format(&f->fmt.pix);
+ f->fmt.pix.sizeimage = f->fmt.pix.width * f->fmt.pix.height * bpp;
+
+ /* try & set the new output format */
+ vout->bpp = bpp;
+ vout->pix = f->fmt.pix;
+ vout->vrfb_bpp = 1;
+
+ /* If YUYV then vrfb bpp is 2, for others its 1 */
+ if (V4L2_PIX_FMT_YUYV == vout->pix.pixelformat ||
+ V4L2_PIX_FMT_UYVY == vout->pix.pixelformat)
+ vout->vrfb_bpp = 2;
+
+ /* set default crop and win */
+ omap_vout_new_format(&vout->pix, &vout->fbuf, &vout->crop, &vout->win);
+
+ /* Save the changes in the overlay strcuture */
+ ret = omapvid_init(vout, 0);
+ if (ret) {
+ v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode\n");
+ goto s_fmt_vid_out_exit;
+ }
+
+ ret = 0;
+
+s_fmt_vid_out_exit:
+ mutex_unlock(&vout->lock);
+ return ret;
+}
+
+static int vidioc_try_fmt_vid_overlay(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ int ret = 0;
+ struct omap_vout_device *vout = fh;
+ struct v4l2_window *win = &f->fmt.win;
+
+ ret = omap_vout_try_window(&vout->fbuf, win);
+
+ if (!ret) {
+ if (vout->vid == OMAP_VIDEO1)
+ win->global_alpha = 255;
+ else
+ win->global_alpha = f->fmt.win.global_alpha;
+ }
+
+ return ret;
+}
+
+static int vidioc_s_fmt_vid_overlay(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ int ret = 0;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_vout_device *vout = fh;
+ struct v4l2_window *win = &f->fmt.win;
+
+ mutex_lock(&vout->lock);
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ ret = omap_vout_new_window(&vout->crop, &vout->win, &vout->fbuf, win);
+ if (!ret) {
+ /* Video1 plane does not support global alpha */
+ if (ovl->id == OMAP_DSS_VIDEO1)
+ vout->win.global_alpha = 255;
+ else
+ vout->win.global_alpha = f->fmt.win.global_alpha;
+
+ vout->win.chromakey = f->fmt.win.chromakey;
+ }
+ mutex_unlock(&vout->lock);
+ return ret;
+}
+
+static int vidioc_enum_fmt_vid_overlay(struct file *file, void *fh,
+ struct v4l2_fmtdesc *fmt)
+{
+ int index = fmt->index;
+ enum v4l2_buf_type type = fmt->type;
+
+ fmt->index = index;
+ fmt->type = type;
+ if (index >= NUM_OUTPUT_FORMATS)
+ return -EINVAL;
+
+ fmt->flags = omap_formats[index].flags;
+ strlcpy(fmt->description, omap_formats[index].description,
+ sizeof(fmt->description));
+ fmt->pixelformat = omap_formats[index].pixelformat;
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_overlay(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ u32 key_value = 0;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_vout_device *vout = fh;
+ struct omap_overlay_manager_info info;
+ struct v4l2_window *win = &f->fmt.win;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ win->w = vout->win.w;
+ win->field = vout->win.field;
+ win->global_alpha = vout->win.global_alpha;
+
+ if (ovl->manager && ovl->manager->get_manager_info) {
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ key_value = info.trans_key;
+ }
+ win->chromakey = key_value;
+ return 0;
+}
+
+static int vidioc_cropcap(struct file *file, void *fh,
+ struct v4l2_cropcap *cropcap)
+{
+ struct omap_vout_device *vout = fh;
+ struct v4l2_pix_format *pix = &vout->pix;
+
+ if (cropcap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ /* Width and height are always even */
+ cropcap->bounds.width = pix->width & ~1;
+ cropcap->bounds.height = pix->height & ~1;
+
+ omap_vout_default_crop(&vout->pix, &vout->fbuf, &cropcap->defrect);
+ cropcap->pixelaspect.numerator = 1;
+ cropcap->pixelaspect.denominator = 1;
+ return 0;
+}
+
+static int vidioc_g_crop(struct file *file, void *fh, struct v4l2_crop *crop)
+{
+ struct omap_vout_device *vout = fh;
+
+ if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ crop->c = vout->crop;
+ return 0;
+}
+
+static int vidioc_s_crop(struct file *file, void *fh, struct v4l2_crop *crop)
+{
+ int ret = -EINVAL;
+ struct omap_vout_device *vout = fh;
+ struct omapvideo_info *ovid;
+ struct omap_overlay *ovl;
+ struct omap_video_timings *timing;
+
+ if (vout->streaming)
+ return -EBUSY;
+
+ mutex_lock(&vout->lock);
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ if (!ovl->manager || !ovl->manager->device) {
+ ret = -EINVAL;
+ goto s_crop_err;
+ }
+ /* get the display device attached to the overlay */
+ timing = &ovl->manager->device->panel.timings;
+
+ if (rotate_90_or_270(vout)) {
+ vout->fbuf.fmt.height = timing->x_res;
+ vout->fbuf.fmt.width = timing->y_res;
+ } else {
+ vout->fbuf.fmt.height = timing->y_res;
+ vout->fbuf.fmt.width = timing->x_res;
+ }
+
+ if (crop->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ ret = omap_vout_new_crop(&vout->pix, &vout->crop, &vout->win,
+ &vout->fbuf, &crop->c);
+
+s_crop_err:
+ mutex_unlock(&vout->lock);
+ return ret;
+}
+
+static int vidioc_queryctrl(struct file *file, void *fh,
+ struct v4l2_queryctrl *ctrl)
+{
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ROTATE:
+ ret = v4l2_ctrl_query_fill(ctrl, 0, 270, 90, 0);
+ break;
+ case V4L2_CID_BG_COLOR:
+ ret = v4l2_ctrl_query_fill(ctrl, 0, 0xFFFFFF, 1, 0);
+ break;
+ case V4L2_CID_VFLIP:
+ ret = v4l2_ctrl_query_fill(ctrl, 0, 1, 1, 0);
+ break;
+ default:
+ ctrl->name[0] = '\0';
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int vidioc_g_ctrl(struct file *file, void *fh, struct v4l2_control *ctrl)
+{
+ int ret = 0;
+ struct omap_vout_device *vout = fh;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ROTATE:
+ ctrl->value = vout->control[0].value;
+ break;
+ case V4L2_CID_BG_COLOR:
+ {
+ struct omap_overlay_manager_info info;
+ struct omap_overlay *ovl;
+
+ ovl = vout->vid_info.overlays[0];
+ if (!ovl->manager || !ovl->manager->get_manager_info) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ ctrl->value = info.default_color;
+ break;
+ }
+ case V4L2_CID_VFLIP:
+ ctrl->value = vout->control[2].value;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *a)
+{
+ int ret = 0;
+ struct omap_vout_device *vout = fh;
+
+ switch (a->id) {
+ case V4L2_CID_ROTATE:
+ {
+ int rotation = a->value;
+
+ mutex_lock(&vout->lock);
+
+ if (rotation && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) {
+ mutex_unlock(&vout->lock);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (v4l2_rot_to_dss_rot(rotation, &vout->rotation,
+ vout->mirror)) {
+ mutex_unlock(&vout->lock);
+ ret = -EINVAL;
+ break;
+ }
+
+ vout->control[0].value = rotation;
+ mutex_unlock(&vout->lock);
+ break;
+ }
+ case V4L2_CID_BG_COLOR:
+ {
+ struct omap_overlay *ovl;
+ unsigned int color = a->value;
+ struct omap_overlay_manager_info info;
+
+ ovl = vout->vid_info.overlays[0];
+
+ mutex_lock(&vout->lock);
+ if (!ovl->manager || !ovl->manager->get_manager_info) {
+ mutex_unlock(&vout->lock);
+ ret = -EINVAL;
+ break;
+ }
+
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ info.default_color = color;
+ if (ovl->manager->set_manager_info(ovl->manager, &info)) {
+ mutex_unlock(&vout->lock);
+ ret = -EINVAL;
+ break;
+ }
+
+ vout->control[1].value = color;
+ mutex_unlock(&vout->lock);
+ break;
+ }
+ case V4L2_CID_VFLIP:
+ {
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ unsigned int mirror = a->value;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ mutex_lock(&vout->lock);
+
+ if (mirror && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) {
+ mutex_unlock(&vout->lock);
+ ret = -EINVAL;
+ break;
+ }
+ vout->mirror = mirror;
+ vout->control[2].value = mirror;
+ mutex_unlock(&vout->lock);
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int vidioc_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *req)
+{
+ int ret = 0;
+ unsigned int i, num_buffers = 0;
+ struct omap_vout_device *vout = fh;
+ struct videobuf_queue *q = &vout->vbq;
+ struct videobuf_dmabuf *dmabuf = NULL;
+
+ if ((req->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) || (req->count < 0))
+ return -EINVAL;
+ /* if memory is not mmp or userptr
+ return error */
+ if ((V4L2_MEMORY_MMAP != req->memory) &&
+ (V4L2_MEMORY_USERPTR != req->memory))
+ return -EINVAL;
+
+ mutex_lock(&vout->lock);
+ /* Cannot be requested when streaming is on */
+ if (vout->streaming) {
+ ret = -EBUSY;
+ goto reqbuf_err;
+ }
+
+ /* If buffers are already allocated free them */
+ if (q->bufs[0] && (V4L2_MEMORY_MMAP == q->bufs[0]->memory)) {
+ if (vout->mmap_count) {
+ ret = -EBUSY;
+ goto reqbuf_err;
+ }
+ num_buffers = (vout->vid == OMAP_VIDEO1) ?
+ video1_numbuffers : video2_numbuffers;
+ for (i = num_buffers; i < vout->buffer_allocated; i++) {
+ dmabuf = videobuf_to_dma(q->bufs[i]);
+ omap_vout_free_buffer((u32)dmabuf->vmalloc,
+ vout->buffer_size);
+ vout->buf_virt_addr[i] = 0;
+ vout->buf_phy_addr[i] = 0;
+ }
+ vout->buffer_allocated = num_buffers;
+ videobuf_mmap_free(q);
+ } else if (q->bufs[0] && (V4L2_MEMORY_USERPTR == q->bufs[0]->memory)) {
+ if (vout->buffer_allocated) {
+ videobuf_mmap_free(q);
+ for (i = 0; i < vout->buffer_allocated; i++) {
+ kfree(q->bufs[i]);
+ q->bufs[i] = NULL;
+ }
+ vout->buffer_allocated = 0;
+ }
+ }
+
+ /*store the memory type in data structure */
+ vout->memory = req->memory;
+
+ INIT_LIST_HEAD(&vout->dma_queue);
+
+ /* call videobuf_reqbufs api */
+ ret = videobuf_reqbufs(q, req);
+ if (ret < 0)
+ goto reqbuf_err;
+
+ vout->buffer_allocated = req->count;
+ for (i = 0; i < req->count; i++) {
+ dmabuf = videobuf_to_dma(q->bufs[i]);
+ dmabuf->vmalloc = (void *) vout->buf_virt_addr[i];
+ dmabuf->bus_addr = (dma_addr_t) vout->buf_phy_addr[i];
+ dmabuf->sglen = 1;
+ }
+reqbuf_err:
+ mutex_unlock(&vout->lock);
+ return ret;
+}
+
+static int vidioc_querybuf(struct file *file, void *fh,
+ struct v4l2_buffer *b)
+{
+ struct omap_vout_device *vout = fh;
+
+ return videobuf_querybuf(&vout->vbq, b);
+}
+
+static int vidioc_qbuf(struct file *file, void *fh,
+ struct v4l2_buffer *buffer)
+{
+ struct omap_vout_device *vout = fh;
+ struct videobuf_queue *q = &vout->vbq;
+
+ if ((V4L2_BUF_TYPE_VIDEO_OUTPUT != buffer->type) ||
+ (buffer->index >= vout->buffer_allocated) ||
+ (q->bufs[buffer->index]->memory != buffer->memory)) {
+ return -EINVAL;
+ }
+ if (V4L2_MEMORY_USERPTR == buffer->memory) {
+ if ((buffer->length < vout->pix.sizeimage) ||
+ (0 == buffer->m.userptr)) {
+ return -EINVAL;
+ }
+ }
+
+ if ((rotation_enabled(vout)) &&
+ vout->vrfb_dma_tx.req_status == DMA_CHAN_NOT_ALLOTED) {
+ v4l2_warn(&vout->vid_dev->v4l2_dev,
+ "DMA Channel not allocated for Rotation\n");
+ return -EINVAL;
+ }
+
+ return videobuf_qbuf(q, buffer);
+}
+
+static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
+{
+ struct omap_vout_device *vout = fh;
+ struct videobuf_queue *q = &vout->vbq;
+
+ if (!vout->streaming)
+ return -EINVAL;
+
+ if (file->f_flags & O_NONBLOCK)
+ /* Call videobuf_dqbuf for non blocking mode */
+ return videobuf_dqbuf(q, (struct v4l2_buffer *)b, 1);
+ else
+ /* Call videobuf_dqbuf for blocking mode */
+ return videobuf_dqbuf(q, (struct v4l2_buffer *)b, 0);
+}
+
+static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
+{
+ int ret = 0, j;
+ u32 addr = 0, mask = 0;
+ struct omap_vout_device *vout = fh;
+ struct videobuf_queue *q = &vout->vbq;
+ struct omapvideo_info *ovid = &vout->vid_info;
+
+ mutex_lock(&vout->lock);
+
+ if (vout->streaming) {
+ ret = -EBUSY;
+ goto streamon_err;
+ }
+
+ ret = videobuf_streamon(q);
+ if (ret)
+ goto streamon_err;
+
+ if (list_empty(&vout->dma_queue)) {
+ ret = -EIO;
+ goto streamon_err1;
+ }
+
+ /* Get the next frame from the buffer queue */
+ vout->next_frm = vout->cur_frm = list_entry(vout->dma_queue.next,
+ struct videobuf_buffer, queue);
+ /* Remove buffer from the buffer queue */
+ list_del(&vout->cur_frm->queue);
+ /* Mark state of the current frame to active */
+ vout->cur_frm->state = VIDEOBUF_ACTIVE;
+ /* Initialize field_id and started member */
+ vout->field_id = 0;
+
+ /* set flag here. Next QBUF will start DMA */
+ vout->streaming = 1;
+
+ vout->first_int = 1;
+
+ if (omap_vout_calculate_offset(vout)) {
+ ret = -EINVAL;
+ goto streamon_err1;
+ }
+ addr = (unsigned long) vout->queued_buf_addr[vout->cur_frm->i]
+ + vout->cropped_offset;
+
+ mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD;
+
+ omap_dispc_register_isr(omap_vout_isr, vout, mask);
+
+ for (j = 0; j < ovid->num_overlays; j++) {
+ struct omap_overlay *ovl = ovid->overlays[j];
+
+ if (ovl->manager && ovl->manager->device) {
+ struct omap_overlay_info info;
+ ovl->get_overlay_info(ovl, &info);
+ info.enabled = 1;
+ info.paddr = addr;
+ if (ovl->set_overlay_info(ovl, &info)) {
+ ret = -EINVAL;
+ goto streamon_err1;
+ }
+ }
+ }
+
+ /* First save the configuration in ovelray structure */
+ ret = omapvid_init(vout, addr);
+ if (ret)
+ v4l2_err(&vout->vid_dev->v4l2_dev,
+ "failed to set overlay info\n");
+ /* Enable the pipeline and set the Go bit */
+ ret = omapvid_apply_changes(vout);
+ if (ret)
+ v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode\n");
+
+ ret = 0;
+
+streamon_err1:
+ if (ret)
+ ret = videobuf_streamoff(q);
+streamon_err:
+ mutex_unlock(&vout->lock);
+ return ret;
+}
+
+static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
+{
+ u32 mask = 0;
+ int ret = 0, j;
+ struct omap_vout_device *vout = fh;
+ struct omapvideo_info *ovid = &vout->vid_info;
+
+ if (!vout->streaming)
+ return -EINVAL;
+
+ vout->streaming = 0;
+ mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD;
+
+ omap_dispc_unregister_isr(omap_vout_isr, vout, mask);
+
+ for (j = 0; j < ovid->num_overlays; j++) {
+ struct omap_overlay *ovl = ovid->overlays[j];
+
+ if (ovl->manager && ovl->manager->device) {
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
+ info.enabled = 0;
+ ret = ovl->set_overlay_info(ovl, &info);
+ if (ret)
+ v4l2_err(&vout->vid_dev->v4l2_dev,
+ "failed to update overlay info in streamoff\n");
+ }
+ }
+
+ /* Turn of the pipeline */
+ ret = omapvid_apply_changes(vout);
+ if (ret)
+ v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode in"
+ " streamoff\n");
+
+ INIT_LIST_HEAD(&vout->dma_queue);
+ ret = videobuf_streamoff(&vout->vbq);
+
+ return ret;
+}
+
+static int vidioc_s_fbuf(struct file *file, void *fh,
+ struct v4l2_framebuffer *a)
+{
+ int enable = 0;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_vout_device *vout = fh;
+ struct omap_overlay_manager_info info;
+ enum omap_dss_trans_key_type key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ /* OMAP DSS doesn't support Source and Destination color
+ key together */
+ if ((a->flags & V4L2_FBUF_FLAG_SRC_CHROMAKEY) &&
+ (a->flags & V4L2_FBUF_FLAG_CHROMAKEY))
+ return -EINVAL;
+ /* OMAP DSS Doesn't support the Destination color key
+ and alpha blending together */
+ if ((a->flags & V4L2_FBUF_FLAG_CHROMAKEY) &&
+ (a->flags & V4L2_FBUF_FLAG_LOCAL_ALPHA))
+ return -EINVAL;
+
+ if ((a->flags & V4L2_FBUF_FLAG_SRC_CHROMAKEY)) {
+ vout->fbuf.flags |= V4L2_FBUF_FLAG_SRC_CHROMAKEY;
+ key_type = OMAP_DSS_COLOR_KEY_VID_SRC;
+ } else
+ vout->fbuf.flags &= ~V4L2_FBUF_FLAG_SRC_CHROMAKEY;
+
+ if ((a->flags & V4L2_FBUF_FLAG_CHROMAKEY)) {
+ vout->fbuf.flags |= V4L2_FBUF_FLAG_CHROMAKEY;
+ key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
+ } else
+ vout->fbuf.flags &= ~V4L2_FBUF_FLAG_CHROMAKEY;
+
+ if (a->flags & (V4L2_FBUF_FLAG_CHROMAKEY |
+ V4L2_FBUF_FLAG_SRC_CHROMAKEY))
+ enable = 1;
+ else
+ enable = 0;
+ if (ovl->manager && ovl->manager->get_manager_info &&
+ ovl->manager->set_manager_info) {
+
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ info.trans_enabled = enable;
+ info.trans_key_type = key_type;
+ info.trans_key = vout->win.chromakey;
+
+ if (ovl->manager->set_manager_info(ovl->manager, &info))
+ return -EINVAL;
+ }
+ if (a->flags & V4L2_FBUF_FLAG_LOCAL_ALPHA) {
+ vout->fbuf.flags |= V4L2_FBUF_FLAG_LOCAL_ALPHA;
+ enable = 1;
+ } else {
+ vout->fbuf.flags &= ~V4L2_FBUF_FLAG_LOCAL_ALPHA;
+ enable = 0;
+ }
+ if (ovl->manager && ovl->manager->get_manager_info &&
+ ovl->manager->set_manager_info) {
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ info.alpha_enabled = enable;
+ if (ovl->manager->set_manager_info(ovl->manager, &info))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vidioc_g_fbuf(struct file *file, void *fh,
+ struct v4l2_framebuffer *a)
+{
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_vout_device *vout = fh;
+ struct omap_overlay_manager_info info;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ a->flags = 0x0;
+ a->capability = V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_CHROMAKEY
+ | V4L2_FBUF_CAP_SRC_CHROMAKEY;
+
+ if (ovl->manager && ovl->manager->get_manager_info) {
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ if (info.trans_key_type == OMAP_DSS_COLOR_KEY_VID_SRC)
+ a->flags |= V4L2_FBUF_FLAG_SRC_CHROMAKEY;
+ if (info.trans_key_type == OMAP_DSS_COLOR_KEY_GFX_DST)
+ a->flags |= V4L2_FBUF_FLAG_CHROMAKEY;
+ }
+ if (ovl->manager && ovl->manager->get_manager_info) {
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ if (info.alpha_enabled)
+ a->flags |= V4L2_FBUF_FLAG_LOCAL_ALPHA;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops vout_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
+ .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_s_fbuf = vidioc_s_fbuf,
+ .vidioc_g_fbuf = vidioc_g_fbuf,
+ .vidioc_s_ctrl = vidioc_s_ctrl,
+ .vidioc_try_fmt_vid_overlay = vidioc_try_fmt_vid_overlay,
+ .vidioc_s_fmt_vid_overlay = vidioc_s_fmt_vid_overlay,
+ .vidioc_enum_fmt_vid_overlay = vidioc_enum_fmt_vid_overlay,
+ .vidioc_g_fmt_vid_overlay = vidioc_g_fmt_vid_overlay,
+ .vidioc_cropcap = vidioc_cropcap,
+ .vidioc_g_crop = vidioc_g_crop,
+ .vidioc_s_crop = vidioc_s_crop,
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+};
+
+static const struct v4l2_file_operations omap_vout_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = omap_vout_mmap,
+ .open = omap_vout_open,
+ .release = omap_vout_release,
+};
+
+/* Init functions used during driver initialization */
+/* Initial setup of video_data */
+static int __init omap_vout_setup_video_data(struct omap_vout_device *vout)
+{
+ struct video_device *vfd;
+ struct v4l2_pix_format *pix;
+ struct v4l2_control *control;
+ struct omap_dss_device *display =
+ vout->vid_info.overlays[0]->manager->device;
+
+ /* set the default pix */
+ pix = &vout->pix;
+
+ /* Set the default picture of QVGA */
+ pix->width = QQVGA_WIDTH;
+ pix->height = QQVGA_HEIGHT;
+
+ /* Default pixel format is RGB 5-6-5 */
+ pix->pixelformat = V4L2_PIX_FMT_RGB565;
+ pix->field = V4L2_FIELD_ANY;
+ pix->bytesperline = pix->width * 2;
+ pix->sizeimage = pix->bytesperline * pix->height;
+ pix->priv = 0;
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+
+ vout->bpp = RGB565_BPP;
+ vout->fbuf.fmt.width = display->panel.timings.x_res;
+ vout->fbuf.fmt.height = display->panel.timings.y_res;
+
+ /* Set the data structures for the overlay parameters*/
+ vout->win.global_alpha = 255;
+ vout->fbuf.flags = 0;
+ vout->fbuf.capability = V4L2_FBUF_CAP_LOCAL_ALPHA |
+ V4L2_FBUF_CAP_SRC_CHROMAKEY | V4L2_FBUF_CAP_CHROMAKEY;
+ vout->win.chromakey = 0;
+
+ omap_vout_new_format(pix, &vout->fbuf, &vout->crop, &vout->win);
+
+ /*Initialize the control variables for
+ rotation, flipping and background color. */
+ control = vout->control;
+ control[0].id = V4L2_CID_ROTATE;
+ control[0].value = 0;
+ vout->rotation = 0;
+ vout->mirror = 0;
+ vout->control[2].id = V4L2_CID_HFLIP;
+ vout->control[2].value = 0;
+ vout->vrfb_bpp = 2;
+
+ control[1].id = V4L2_CID_BG_COLOR;
+ control[1].value = 0;
+
+ /* initialize the video_device struct */
+ vfd = vout->vfd = video_device_alloc();
+
+ if (!vfd) {
+ printk(KERN_ERR VOUT_NAME ": could not allocate"
+ " video device struct\n");
+ return -ENOMEM;
+ }
+ vfd->release = video_device_release;
+ vfd->ioctl_ops = &vout_ioctl_ops;
+
+ strlcpy(vfd->name, VOUT_NAME, sizeof(vfd->name));
+
+ /* need to register for a VID_HARDWARE_* ID in videodev.h */
+ vfd->fops = &omap_vout_fops;
+ vfd->v4l2_dev = &vout->vid_dev->v4l2_dev;
+ mutex_init(&vout->lock);
+
+ vfd->minor = -1;
+ return 0;
+
+}
+
+/* Setup video buffers */
+static int __init omap_vout_setup_video_bufs(struct platform_device *pdev,
+ int vid_num)
+{
+ u32 numbuffers;
+ int ret = 0, i, j;
+ int image_width, image_height;
+ struct video_device *vfd;
+ struct omap_vout_device *vout;
+ int static_vrfb_allocation = 0, vrfb_num_bufs = VRFB_NUM_BUFS;
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct omap2video_device *vid_dev =
+ container_of(v4l2_dev, struct omap2video_device, v4l2_dev);
+
+ vout = vid_dev->vouts[vid_num];
+ vfd = vout->vfd;
+
+ numbuffers = (vid_num == 0) ? video1_numbuffers : video2_numbuffers;
+ vout->buffer_size = (vid_num == 0) ? video1_bufsize : video2_bufsize;
+ dev_info(&pdev->dev, "Buffer Size = %d\n", vout->buffer_size);
+
+ for (i = 0; i < numbuffers; i++) {
+ vout->buf_virt_addr[i] =
+ omap_vout_alloc_buffer(vout->buffer_size,
+ (u32 *) &vout->buf_phy_addr[i]);
+ if (!vout->buf_virt_addr[i]) {
+ numbuffers = i;
+ ret = -ENOMEM;
+ goto free_buffers;
+ }
+ }
+
+ for (i = 0; i < VRFB_NUM_BUFS; i++) {
+ if (omap_vrfb_request_ctx(&vout->vrfb_context[i])) {
+ dev_info(&pdev->dev, ": VRFB allocation failed\n");
+ for (j = 0; j < i; j++)
+ omap_vrfb_release_ctx(&vout->vrfb_context[j]);
+ ret = -ENOMEM;
+ goto free_buffers;
+ }
+ }
+ vout->cropped_offset = 0;
+
+ /* Calculate VRFB memory size */
+ /* allocate for worst case size */
+ image_width = VID_MAX_WIDTH / TILE_SIZE;
+ if (VID_MAX_WIDTH % TILE_SIZE)
+ image_width++;
+
+ image_width = image_width * TILE_SIZE;
+ image_height = VID_MAX_HEIGHT / TILE_SIZE;
+
+ if (VID_MAX_HEIGHT % TILE_SIZE)
+ image_height++;
+
+ image_height = image_height * TILE_SIZE;
+ vout->smsshado_size = PAGE_ALIGN(image_width * image_height * 2 * 2);
+
+ /*
+ * Request and Initialize DMA, for DMA based VRFB transfer
+ */
+ vout->vrfb_dma_tx.dev_id = OMAP_DMA_NO_DEVICE;
+ vout->vrfb_dma_tx.dma_ch = -1;
+ vout->vrfb_dma_tx.req_status = DMA_CHAN_ALLOTED;
+ ret = omap_request_dma(vout->vrfb_dma_tx.dev_id, "VRFB DMA TX",
+ omap_vout_vrfb_dma_tx_callback,
+ (void *) &vout->vrfb_dma_tx, &vout->vrfb_dma_tx.dma_ch);
+ if (ret < 0) {
+ vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
+ dev_info(&pdev->dev, ": failed to allocate DMA Channel for"
+ " video%d\n", vfd->minor);
+ }
+ init_waitqueue_head(&vout->vrfb_dma_tx.wait);
+
+ /* Allocate VRFB buffers if selected through bootargs */
+ static_vrfb_allocation = (vid_num == 0) ?
+ vid1_static_vrfb_alloc : vid2_static_vrfb_alloc;
+
+ /* statically allocated the VRFB buffer is done through
+ commands line aruments */
+ if (static_vrfb_allocation) {
+ if (omap_vout_allocate_vrfb_buffers(vout, &vrfb_num_bufs, -1)) {
+ ret = -ENOMEM;
+ goto release_vrfb_ctx;
+ }
+ vout->vrfb_static_allocation = 1;
+ }
+ return 0;
+
+release_vrfb_ctx:
+ for (j = 0; j < VRFB_NUM_BUFS; j++)
+ omap_vrfb_release_ctx(&vout->vrfb_context[j]);
+
+free_buffers:
+ for (i = 0; i < numbuffers; i++) {
+ omap_vout_free_buffer(vout->buf_virt_addr[i],
+ vout->buffer_size);
+ vout->buf_virt_addr[i] = 0;
+ vout->buf_phy_addr[i] = 0;
+ }
+ return ret;
+
+}
+
+/* Create video out devices */
+static int __init omap_vout_create_video_devices(struct platform_device *pdev)
+{
+ int ret = 0, k;
+ struct omap_vout_device *vout;
+ struct video_device *vfd = NULL;
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct omap2video_device *vid_dev = container_of(v4l2_dev,
+ struct omap2video_device, v4l2_dev);
+
+ for (k = 0; k < pdev->num_resources; k++) {
+
+ vout = kmalloc(sizeof(struct omap_vout_device), GFP_KERNEL);
+ if (!vout) {
+ dev_err(&pdev->dev, ": could not allocate memory\n");
+ return -ENOMEM;
+ }
+ memset(vout, 0, sizeof(struct omap_vout_device));
+
+ vout->vid = k;
+ vid_dev->vouts[k] = vout;
+ vout->vid_dev = vid_dev;
+ /* Select video2 if only 1 overlay is controlled by V4L2 */
+ if (pdev->num_resources == 1)
+ vout->vid_info.overlays[0] = vid_dev->overlays[k + 2];
+ else
+ /* Else select video1 and video2 one by one. */
+ vout->vid_info.overlays[0] = vid_dev->overlays[k + 1];
+ vout->vid_info.num_overlays = 1;
+ vout->vid_info.id = k + 1;
+
+ /* Setup the default configuration for the video devices
+ */
+ if (omap_vout_setup_video_data(vout) != 0) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* Allocate default number of buffers for the video streaming
+ * and reserve the VRFB space for rotation
+ */
+ if (omap_vout_setup_video_bufs(pdev, k) != 0) {
+ ret = -ENOMEM;
+ goto error1;
+ }
+
+ /* Register the Video device with V4L2
+ */
+ vfd = vout->vfd;
+ if (video_register_device(vfd, VFL_TYPE_GRABBER, k + 1) < 0) {
+ dev_err(&pdev->dev, ": Could not register "
+ "Video for Linux device\n");
+ vfd->minor = -1;
+ ret = -ENODEV;
+ goto error2;
+ }
+ video_set_drvdata(vfd, vout);
+
+ /* Configure the overlay structure */
+ ret = omapvid_init(vid_dev->vouts[k], 0);
+ if (!ret)
+ goto success;
+
+error2:
+ omap_vout_release_vrfb(vout);
+ omap_vout_free_buffers(vout);
+error1:
+ video_device_release(vfd);
+error:
+ kfree(vout);
+ return ret;
+
+success:
+ dev_info(&pdev->dev, ": registered and initialized"
+ " video device %d\n", vfd->minor);
+ if (k == (pdev->num_resources - 1))
+ return 0;
+ }
+
+ return -ENODEV;
+}
+/* Driver functions */
+static void omap_vout_cleanup_device(struct omap_vout_device *vout)
+{
+ struct video_device *vfd;
+
+ if (!vout)
+ return;
+
+ vfd = vout->vfd;
+ if (vfd) {
+ if (!video_is_registered(vfd)) {
+ /*
+ * The device was never registered, so release the
+ * video_device struct directly.
+ */
+ video_device_release(vfd);
+ } else {
+ /*
+ * The unregister function will release the video_device
+ * struct as well as unregistering it.
+ */
+ video_unregister_device(vfd);
+ }
+ }
+
+ omap_vout_release_vrfb(vout);
+ omap_vout_free_buffers(vout);
+ /* Free the VRFB buffer if allocated
+ * init time
+ */
+ if (vout->vrfb_static_allocation)
+ omap_vout_free_vrfb_buffers(vout);
+
+ kfree(vout);
+}
+
+static int omap_vout_remove(struct platform_device *pdev)
+{
+ int k;
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct omap2video_device *vid_dev = container_of(v4l2_dev, struct
+ omap2video_device, v4l2_dev);
+
+ v4l2_device_unregister(v4l2_dev);
+ for (k = 0; k < pdev->num_resources; k++)
+ omap_vout_cleanup_device(vid_dev->vouts[k]);
+
+ for (k = 0; k < vid_dev->num_displays; k++) {
+ if (vid_dev->displays[k]->state != OMAP_DSS_DISPLAY_DISABLED)
+ vid_dev->displays[k]->disable(vid_dev->displays[k]);
+
+ omap_dss_put_device(vid_dev->displays[k]);
+ }
+ kfree(vid_dev);
+ return 0;
+}
+
+static int __init omap_vout_probe(struct platform_device *pdev)
+{
+ int ret = 0, i;
+ struct omap_overlay *ovl;
+ struct omap_dss_device *dssdev = NULL;
+ struct omap_dss_device *def_display;
+ struct omap2video_device *vid_dev = NULL;
+
+ if (pdev->num_resources == 0) {
+ dev_err(&pdev->dev, "probed for an unknown device\n");
+ return -ENODEV;
+ }
+
+ vid_dev = kzalloc(sizeof(struct omap2video_device), GFP_KERNEL);
+ if (vid_dev == NULL)
+ return -ENOMEM;
+
+ vid_dev->num_displays = 0;
+ for_each_dss_dev(dssdev) {
+ omap_dss_get_device(dssdev);
+ vid_dev->displays[vid_dev->num_displays++] = dssdev;
+ }
+
+ if (vid_dev->num_displays == 0) {
+ dev_err(&pdev->dev, "no displays\n");
+ ret = -EINVAL;
+ goto probe_err0;
+ }
+
+ vid_dev->num_overlays = omap_dss_get_num_overlays();
+ for (i = 0; i < vid_dev->num_overlays; i++)
+ vid_dev->overlays[i] = omap_dss_get_overlay(i);
+
+ vid_dev->num_managers = omap_dss_get_num_overlay_managers();
+ for (i = 0; i < vid_dev->num_managers; i++)
+ vid_dev->managers[i] = omap_dss_get_overlay_manager(i);
+
+ /* Get the Video1 overlay and video2 overlay.
+ * Setup the Display attached to that overlays
+ */
+ for (i = 1; i < vid_dev->num_overlays; i++) {
+ ovl = omap_dss_get_overlay(i);
+ if (ovl->manager && ovl->manager->device) {
+ def_display = ovl->manager->device;
+ } else {
+ dev_warn(&pdev->dev, "cannot find display\n");
+ def_display = NULL;
+ }
+ if (def_display) {
+ ret = def_display->enable(def_display);
+ if (ret) {
+ /* Here we are not considering a error
+ * as display may be enabled by frame
+ * buffer driver
+ */
+ dev_warn(&pdev->dev,
+ "'%s' Display already enabled\n",
+ def_display->name);
+ }
+ /* set the update mode */
+ if (def_display->caps &
+ OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
+#ifdef CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE
+ if (def_display->enable_te)
+ def_display->enable_te(def_display, 1);
+ if (def_display->set_update_mode)
+ def_display->set_update_mode(def_display,
+ OMAP_DSS_UPDATE_AUTO);
+#else /* MANUAL_UPDATE */
+ if (def_display->enable_te)
+ def_display->enable_te(def_display, 0);
+ if (def_display->set_update_mode)
+ def_display->set_update_mode(def_display,
+ OMAP_DSS_UPDATE_MANUAL);
+#endif
+ } else {
+ if (def_display->set_update_mode)
+ def_display->set_update_mode(def_display,
+ OMAP_DSS_UPDATE_AUTO);
+ }
+ }
+ }
+
+ if (v4l2_device_register(&pdev->dev, &vid_dev->v4l2_dev) < 0) {
+ dev_err(&pdev->dev, "v4l2_device_register failed\n");
+ ret = -ENODEV;
+ goto probe_err1;
+ }
+
+ ret = omap_vout_create_video_devices(pdev);
+ if (ret)
+ goto probe_err2;
+
+ for (i = 0; i < vid_dev->num_displays; i++) {
+ struct omap_dss_device *display = vid_dev->displays[i];
+
+ if (display->update)
+ display->update(display, 0, 0,
+ display->panel.timings.x_res,
+ display->panel.timings.y_res);
+ }
+ return 0;
+
+probe_err2:
+ v4l2_device_unregister(&vid_dev->v4l2_dev);
+probe_err1:
+ for (i = 1; i < vid_dev->num_overlays; i++) {
+ def_display = NULL;
+ ovl = omap_dss_get_overlay(i);
+ if (ovl->manager && ovl->manager->device)
+ def_display = ovl->manager->device;
+
+ if (def_display)
+ def_display->disable(def_display);
+ }
+probe_err0:
+ kfree(vid_dev);
+ return ret;
+}
+
+static struct platform_driver omap_vout_driver = {
+ .driver = {
+ .name = VOUT_NAME,
+ },
+ .probe = omap_vout_probe,
+ .remove = omap_vout_remove,
+};
+
+static int __init omap_vout_init(void)
+{
+ if (platform_driver_register(&omap_vout_driver) != 0) {
+ printk(KERN_ERR VOUT_NAME ":Could not register Video driver\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void omap_vout_cleanup(void)
+{
+ platform_driver_unregister(&omap_vout_driver);
+}
+
+late_initcall(omap_vout_init);
+module_exit(omap_vout_cleanup);
diff --git a/drivers/media/video/omap/omap_voutdef.h b/drivers/media/video/omap/omap_voutdef.h
new file mode 100644
index 00000000000..ea3a047f8bc
--- /dev/null
+++ b/drivers/media/video/omap/omap_voutdef.h
@@ -0,0 +1,147 @@
+/*
+ * omap_voutdef.h
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef OMAP_VOUTDEF_H
+#define OMAP_VOUTDEF_H
+
+#include <plat/display.h>
+
+#define YUYV_BPP 2
+#define RGB565_BPP 2
+#define RGB24_BPP 3
+#define RGB32_BPP 4
+#define TILE_SIZE 32
+#define YUYV_VRFB_BPP 2
+#define RGB_VRFB_BPP 1
+#define MAX_CID 3
+#define MAC_VRFB_CTXS 4
+#define MAX_VOUT_DEV 2
+#define MAX_OVLS 3
+#define MAX_DISPLAYS 3
+#define MAX_MANAGERS 3
+
+/* Enum for Rotation
+ * DSS understands rotation in 0, 1, 2, 3 context
+ * while V4L2 driver understands it as 0, 90, 180, 270
+ */
+enum dss_rotation {
+ dss_rotation_0_degree = 0,
+ dss_rotation_90_degree = 1,
+ dss_rotation_180_degree = 2,
+ dss_rotation_270_degree = 3,
+};
+/*
+ * This structure is used to store the DMA transfer parameters
+ * for VRFB hidden buffer
+ */
+struct vid_vrfb_dma {
+ int dev_id;
+ int dma_ch;
+ int req_status;
+ int tx_status;
+ wait_queue_head_t wait;
+};
+
+struct omapvideo_info {
+ int id;
+ int num_overlays;
+ struct omap_overlay *overlays[MAX_OVLS];
+};
+
+struct omap2video_device {
+ struct mutex mtx;
+
+ int state;
+
+ struct v4l2_device v4l2_dev;
+ struct omap_vout_device *vouts[MAX_VOUT_DEV];
+
+ int num_displays;
+ struct omap_dss_device *displays[MAX_DISPLAYS];
+ int num_overlays;
+ struct omap_overlay *overlays[MAX_OVLS];
+ int num_managers;
+ struct omap_overlay_manager *managers[MAX_MANAGERS];
+};
+
+/* per-device data structure */
+struct omap_vout_device {
+
+ struct omapvideo_info vid_info;
+ struct video_device *vfd;
+ struct omap2video_device *vid_dev;
+ int vid;
+ int opened;
+
+ /* we don't allow to change image fmt/size once buffer has
+ * been allocated
+ */
+ int buffer_allocated;
+ /* allow to reuse previously allocated buffer which is big enough */
+ int buffer_size;
+ /* keep buffer info across opens */
+ unsigned long buf_virt_addr[VIDEO_MAX_FRAME];
+ unsigned long buf_phy_addr[VIDEO_MAX_FRAME];
+ enum omap_color_mode dss_mode;
+
+ /* we don't allow to request new buffer when old buffers are
+ * still mmaped
+ */
+ int mmap_count;
+
+ spinlock_t vbq_lock; /* spinlock for videobuf queues */
+ unsigned long field_count; /* field counter for videobuf_buffer */
+
+ /* non-NULL means streaming is in progress. */
+ bool streaming;
+
+ struct v4l2_pix_format pix;
+ struct v4l2_rect crop;
+ struct v4l2_window win;
+ struct v4l2_framebuffer fbuf;
+
+ /* Lock to protect the shared data structures in ioctl */
+ struct mutex lock;
+
+ /* V4L2 control structure for different control id */
+ struct v4l2_control control[MAX_CID];
+ enum dss_rotation rotation;
+ bool mirror;
+ int flicker_filter;
+ /* V4L2 control structure for different control id */
+
+ int bpp; /* bytes per pixel */
+ int vrfb_bpp; /* bytes per pixel with respect to VRFB */
+
+ struct vid_vrfb_dma vrfb_dma_tx;
+ unsigned int smsshado_phy_addr[MAC_VRFB_CTXS];
+ unsigned int smsshado_virt_addr[MAC_VRFB_CTXS];
+ struct vrfb vrfb_context[MAC_VRFB_CTXS];
+ bool vrfb_static_allocation;
+ unsigned int smsshado_size;
+ unsigned char pos;
+
+ int ps, vr_ps, line_length, first_int, field_id;
+ enum v4l2_memory memory;
+ struct videobuf_buffer *cur_frm, *next_frm;
+ struct list_head dma_queue;
+ u8 *queued_buf_addr[VIDEO_MAX_FRAME];
+ u32 cropped_offset;
+ s32 tv_field1_offset;
+ void *isr_handle;
+
+ /* Buffer queue variables */
+ struct omap_vout_device *vout;
+ enum v4l2_buf_type type;
+ struct videobuf_queue vbq;
+ int io_allowed;
+
+};
+#endif /* ifndef OMAP_VOUTDEF_H */
diff --git a/drivers/media/video/omap/omap_voutlib.c b/drivers/media/video/omap/omap_voutlib.c
new file mode 100644
index 00000000000..b941c761eef
--- /dev/null
+++ b/drivers/media/video/omap/omap_voutlib.c
@@ -0,0 +1,293 @@
+/*
+ * omap_voutlib.c
+ *
+ * Copyright (C) 2005-2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * Based on the OMAP2 camera driver
+ * Video-for-Linux (Version 2) camera capture driver for
+ * the OMAP24xx camera controller.
+ *
+ * Author: Andy Lowe (source@mvista.com)
+ *
+ * Copyright (C) 2004 MontaVista Software, Inc.
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <plat/cpu.h>
+
+MODULE_AUTHOR("Texas Instruments");
+MODULE_DESCRIPTION("OMAP Video library");
+MODULE_LICENSE("GPL");
+
+/* Return the default overlay cropping rectangle in crop given the image
+ * size in pix and the video display size in fbuf. The default
+ * cropping rectangle is the largest rectangle no larger than the capture size
+ * that will fit on the display. The default cropping rectangle is centered in
+ * the image. All dimensions and offsets are rounded down to even numbers.
+ */
+void omap_vout_default_crop(struct v4l2_pix_format *pix,
+ struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop)
+{
+ crop->width = (pix->width < fbuf->fmt.width) ?
+ pix->width : fbuf->fmt.width;
+ crop->height = (pix->height < fbuf->fmt.height) ?
+ pix->height : fbuf->fmt.height;
+ crop->width &= ~1;
+ crop->height &= ~1;
+ crop->left = ((pix->width - crop->width) >> 1) & ~1;
+ crop->top = ((pix->height - crop->height) >> 1) & ~1;
+}
+EXPORT_SYMBOL_GPL(omap_vout_default_crop);
+
+/* Given a new render window in new_win, adjust the window to the
+ * nearest supported configuration. The adjusted window parameters are
+ * returned in new_win.
+ * Returns zero if succesful, or -EINVAL if the requested window is
+ * impossible and cannot reasonably be adjusted.
+ */
+int omap_vout_try_window(struct v4l2_framebuffer *fbuf,
+ struct v4l2_window *new_win)
+{
+ struct v4l2_rect try_win;
+
+ /* make a working copy of the new_win rectangle */
+ try_win = new_win->w;
+
+ /* adjust the preview window so it fits on the display by clipping any
+ * offscreen areas
+ */
+ if (try_win.left < 0) {
+ try_win.width += try_win.left;
+ try_win.left = 0;
+ }
+ if (try_win.top < 0) {
+ try_win.height += try_win.top;
+ try_win.top = 0;
+ }
+ try_win.width = (try_win.width < fbuf->fmt.width) ?
+ try_win.width : fbuf->fmt.width;
+ try_win.height = (try_win.height < fbuf->fmt.height) ?
+ try_win.height : fbuf->fmt.height;
+ if (try_win.left + try_win.width > fbuf->fmt.width)
+ try_win.width = fbuf->fmt.width - try_win.left;
+ if (try_win.top + try_win.height > fbuf->fmt.height)
+ try_win.height = fbuf->fmt.height - try_win.top;
+ try_win.width &= ~1;
+ try_win.height &= ~1;
+
+ if (try_win.width <= 0 || try_win.height <= 0)
+ return -EINVAL;
+
+ /* We now have a valid preview window, so go with it */
+ new_win->w = try_win;
+ new_win->field = V4L2_FIELD_ANY;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(omap_vout_try_window);
+
+/* Given a new render window in new_win, adjust the window to the
+ * nearest supported configuration. The image cropping window in crop
+ * will also be adjusted if necessary. Preference is given to keeping the
+ * the window as close to the requested configuration as possible. If
+ * successful, new_win, vout->win, and crop are updated.
+ * Returns zero if succesful, or -EINVAL if the requested preview window is
+ * impossible and cannot reasonably be adjusted.
+ */
+int omap_vout_new_window(struct v4l2_rect *crop,
+ struct v4l2_window *win, struct v4l2_framebuffer *fbuf,
+ struct v4l2_window *new_win)
+{
+ int err;
+
+ err = omap_vout_try_window(fbuf, new_win);
+ if (err)
+ return err;
+
+ /* update our preview window */
+ win->w = new_win->w;
+ win->field = new_win->field;
+ win->chromakey = new_win->chromakey;
+
+ /* Adjust the cropping window to allow for resizing limitation */
+ if (cpu_is_omap24xx()) {
+ /* For 24xx limit is 8x to 1/2x scaling. */
+ if ((crop->height/win->w.height) >= 2)
+ crop->height = win->w.height * 2;
+
+ if ((crop->width/win->w.width) >= 2)
+ crop->width = win->w.width * 2;
+
+ if (crop->width > 768) {
+ /* The OMAP2420 vertical resizing line buffer is 768
+ * pixels wide. If the cropped image is wider than
+ * 768 pixels then it cannot be vertically resized.
+ */
+ if (crop->height != win->w.height)
+ crop->width = 768;
+ }
+ } else if (cpu_is_omap34xx()) {
+ /* For 34xx limit is 8x to 1/4x scaling. */
+ if ((crop->height/win->w.height) >= 4)
+ crop->height = win->w.height * 4;
+
+ if ((crop->width/win->w.width) >= 4)
+ crop->width = win->w.width * 4;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(omap_vout_new_window);
+
+/* Given a new cropping rectangle in new_crop, adjust the cropping rectangle to
+ * the nearest supported configuration. The image render window in win will
+ * also be adjusted if necessary. The preview window is adjusted such that the
+ * horizontal and vertical rescaling ratios stay constant. If the render
+ * window would fall outside the display boundaries, the cropping rectangle
+ * will also be adjusted to maintain the rescaling ratios. If successful, crop
+ * and win are updated.
+ * Returns zero if succesful, or -EINVAL if the requested cropping rectangle is
+ * impossible and cannot reasonably be adjusted.
+ */
+int omap_vout_new_crop(struct v4l2_pix_format *pix,
+ struct v4l2_rect *crop, struct v4l2_window *win,
+ struct v4l2_framebuffer *fbuf, const struct v4l2_rect *new_crop)
+{
+ struct v4l2_rect try_crop;
+ unsigned long vresize, hresize;
+
+ /* make a working copy of the new_crop rectangle */
+ try_crop = *new_crop;
+
+ /* adjust the cropping rectangle so it fits in the image */
+ if (try_crop.left < 0) {
+ try_crop.width += try_crop.left;
+ try_crop.left = 0;
+ }
+ if (try_crop.top < 0) {
+ try_crop.height += try_crop.top;
+ try_crop.top = 0;
+ }
+ try_crop.width = (try_crop.width < pix->width) ?
+ try_crop.width : pix->width;
+ try_crop.height = (try_crop.height < pix->height) ?
+ try_crop.height : pix->height;
+ if (try_crop.left + try_crop.width > pix->width)
+ try_crop.width = pix->width - try_crop.left;
+ if (try_crop.top + try_crop.height > pix->height)
+ try_crop.height = pix->height - try_crop.top;
+
+ try_crop.width &= ~1;
+ try_crop.height &= ~1;
+
+ if (try_crop.width <= 0 || try_crop.height <= 0)
+ return -EINVAL;
+
+ if (cpu_is_omap24xx()) {
+ if (crop->height != win->w.height) {
+ /* If we're resizing vertically, we can't support a
+ * crop width wider than 768 pixels.
+ */
+ if (try_crop.width > 768)
+ try_crop.width = 768;
+ }
+ }
+ /* vertical resizing */
+ vresize = (1024 * crop->height) / win->w.height;
+ if (cpu_is_omap24xx() && (vresize > 2048))
+ vresize = 2048;
+ else if (cpu_is_omap34xx() && (vresize > 4096))
+ vresize = 4096;
+
+ win->w.height = ((1024 * try_crop.height) / vresize) & ~1;
+ if (win->w.height == 0)
+ win->w.height = 2;
+ if (win->w.height + win->w.top > fbuf->fmt.height) {
+ /* We made the preview window extend below the bottom of the
+ * display, so clip it to the display boundary and resize the
+ * cropping height to maintain the vertical resizing ratio.
+ */
+ win->w.height = (fbuf->fmt.height - win->w.top) & ~1;
+ if (try_crop.height == 0)
+ try_crop.height = 2;
+ }
+ /* horizontal resizing */
+ hresize = (1024 * crop->width) / win->w.width;
+ if (cpu_is_omap24xx() && (hresize > 2048))
+ hresize = 2048;
+ else if (cpu_is_omap34xx() && (hresize > 4096))
+ hresize = 4096;
+
+ win->w.width = ((1024 * try_crop.width) / hresize) & ~1;
+ if (win->w.width == 0)
+ win->w.width = 2;
+ if (win->w.width + win->w.left > fbuf->fmt.width) {
+ /* We made the preview window extend past the right side of the
+ * display, so clip it to the display boundary and resize the
+ * cropping width to maintain the horizontal resizing ratio.
+ */
+ win->w.width = (fbuf->fmt.width - win->w.left) & ~1;
+ if (try_crop.width == 0)
+ try_crop.width = 2;
+ }
+ if (cpu_is_omap24xx()) {
+ if ((try_crop.height/win->w.height) >= 2)
+ try_crop.height = win->w.height * 2;
+
+ if ((try_crop.width/win->w.width) >= 2)
+ try_crop.width = win->w.width * 2;
+
+ if (try_crop.width > 768) {
+ /* The OMAP2420 vertical resizing line buffer is
+ * 768 pixels wide. If the cropped image is wider
+ * than 768 pixels then it cannot be vertically resized.
+ */
+ if (try_crop.height != win->w.height)
+ try_crop.width = 768;
+ }
+ } else if (cpu_is_omap34xx()) {
+ if ((try_crop.height/win->w.height) >= 4)
+ try_crop.height = win->w.height * 4;
+
+ if ((try_crop.width/win->w.width) >= 4)
+ try_crop.width = win->w.width * 4;
+ }
+ /* update our cropping rectangle and we're done */
+ *crop = try_crop;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(omap_vout_new_crop);
+
+/* Given a new format in pix and fbuf, crop and win
+ * structures are initialized to default values. crop
+ * is initialized to the largest window size that will fit on the display. The
+ * crop window is centered in the image. win is initialized to
+ * the same size as crop and is centered on the display.
+ * All sizes and offsets are constrained to be even numbers.
+ */
+void omap_vout_new_format(struct v4l2_pix_format *pix,
+ struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop,
+ struct v4l2_window *win)
+{
+ /* crop defines the preview source window in the image capture
+ * buffer
+ */
+ omap_vout_default_crop(pix, fbuf, crop);
+
+ /* win defines the preview target window on the display */
+ win->w.width = crop->width;
+ win->w.height = crop->height;
+ win->w.left = ((fbuf->fmt.width - win->w.width) >> 1) & ~1;
+ win->w.top = ((fbuf->fmt.height - win->w.height) >> 1) & ~1;
+}
+EXPORT_SYMBOL_GPL(omap_vout_new_format);
+
diff --git a/drivers/media/video/omap/omap_voutlib.h b/drivers/media/video/omap/omap_voutlib.h
new file mode 100644
index 00000000000..a60b16e8bfc
--- /dev/null
+++ b/drivers/media/video/omap/omap_voutlib.h
@@ -0,0 +1,34 @@
+/*
+ * omap_voutlib.h
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#ifndef OMAP_VOUTLIB_H
+#define OMAP_VOUTLIB_H
+
+extern void omap_vout_default_crop(struct v4l2_pix_format *pix,
+ struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop);
+
+extern int omap_vout_new_crop(struct v4l2_pix_format *pix,
+ struct v4l2_rect *crop, struct v4l2_window *win,
+ struct v4l2_framebuffer *fbuf,
+ const struct v4l2_rect *new_crop);
+
+extern int omap_vout_try_window(struct v4l2_framebuffer *fbuf,
+ struct v4l2_window *new_win);
+
+extern int omap_vout_new_window(struct v4l2_rect *crop,
+ struct v4l2_window *win, struct v4l2_framebuffer *fbuf,
+ struct v4l2_window *new_win);
+
+extern void omap_vout_new_format(struct v4l2_pix_format *pix,
+ struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop,
+ struct v4l2_window *win);
+#endif /* #ifndef OMAP_VOUTLIB_H */
+
diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
index ce76d952e16..f85b2ed8a2d 100644
--- a/drivers/media/video/omap24xxcam.c
+++ b/drivers/media/video/omap24xxcam.c
@@ -452,8 +452,8 @@ static int omap24xxcam_vbq_setup(struct videobuf_queue *vbq, unsigned int *cnt,
*size = fh->pix.sizeimage;
/* accessing fh->cam->capture_mem is ok, it's constant */
- while (*size * *cnt > fh->cam->capture_mem)
- (*cnt)--;
+ if (*size * *cnt > fh->cam->capture_mem)
+ *cnt = fh->cam->capture_mem / *size;
return 0;
}
diff --git a/drivers/media/video/ov511.c b/drivers/media/video/ov511.c
index e0bce8dc74b..a10912097b7 100644
--- a/drivers/media/video/ov511.c
+++ b/drivers/media/video/ov511.c
@@ -57,8 +57,8 @@
#define DRIVER_VERSION "v1.64 for Linux 2.5"
#define EMAIL "mark@alpha.dyndns.org"
#define DRIVER_AUTHOR "Mark McClelland <mark@alpha.dyndns.org> & Bret Wallach \
- & Orion Sky Lawlor <olawlor@acm.org> & Kevin Moore & Charl P. Botha \
- <cpbotha@ieee.org> & Claudio Matsuoka <claudio@conectiva.com>"
+& Orion Sky Lawlor <olawlor@acm.org> & Kevin Moore & Charl P. Botha \
+<cpbotha@ieee.org> & Claudio Matsuoka <claudio@conectiva.com>"
#define DRIVER_DESC "ov511 USB Camera Driver"
#define OV511_I2C_RETRIES 3
@@ -5916,11 +5916,6 @@ ov51x_disconnect(struct usb_interface *intf)
mutex_lock(&ov->lock);
usb_set_intfdata (intf, NULL);
- if (!ov) {
- mutex_unlock(&ov->lock);
- return;
- }
-
/* Free device number */
ov511_devused &= ~(1 << ov->nr);
@@ -5945,7 +5940,7 @@ ov51x_disconnect(struct usb_interface *intf)
ov->dev = NULL;
/* Free the memory */
- if (ov && !ov->user) {
+ if (!ov->user) {
mutex_lock(&ov->cbuf_lock);
kfree(ov->cbuf);
ov->cbuf = NULL;
diff --git a/drivers/media/video/ov7670.c b/drivers/media/video/ov7670.c
index aaa50f9b8e7..91c886ab15c 100644
--- a/drivers/media/video/ov7670.c
+++ b/drivers/media/video/ov7670.c
@@ -198,6 +198,7 @@ struct ov7670_info {
struct ov7670_format_struct *fmt; /* Current format */
unsigned char sat; /* Saturation value */
int hue; /* Hue value */
+ u8 clkrc; /* Clock divider value */
};
static inline struct ov7670_info *to_state(struct v4l2_subdev *sd)
@@ -351,7 +352,7 @@ static struct regval_list ov7670_default_regs[] = {
static struct regval_list ov7670_fmt_yuv422[] = {
{ REG_COM7, 0x0 }, /* Selects YUV mode */
{ REG_RGB444, 0 }, /* No RGB444 please */
- { REG_COM1, 0 },
+ { REG_COM1, 0 }, /* CCIR601 */
{ REG_COM15, COM15_R00FF },
{ REG_COM9, 0x18 }, /* 4x gain ceiling; 0x8 is reserved bit */
{ 0x4f, 0x80 }, /* "matrix coefficient 1" */
@@ -367,7 +368,7 @@ static struct regval_list ov7670_fmt_yuv422[] = {
static struct regval_list ov7670_fmt_rgb565[] = {
{ REG_COM7, COM7_RGB }, /* Selects RGB mode */
{ REG_RGB444, 0 }, /* No RGB444 please */
- { REG_COM1, 0x0 },
+ { REG_COM1, 0x0 }, /* CCIR601 */
{ REG_COM15, COM15_RGB565 },
{ REG_COM9, 0x38 }, /* 16x gain ceiling; 0x8 is reserved bit */
{ 0x4f, 0xb3 }, /* "matrix coefficient 1" */
@@ -383,7 +384,7 @@ static struct regval_list ov7670_fmt_rgb565[] = {
static struct regval_list ov7670_fmt_rgb444[] = {
{ REG_COM7, COM7_RGB }, /* Selects RGB mode */
{ REG_RGB444, R444_ENABLE }, /* Enable xxxxrrrr ggggbbbb */
- { REG_COM1, 0x40 }, /* Magic reserved bit */
+ { REG_COM1, 0x0 }, /* CCIR601 */
{ REG_COM15, COM15_R01FE|COM15_RGB565 }, /* Data range needed? */
{ REG_COM9, 0x38 }, /* 16x gain ceiling; 0x8 is reserved bit */
{ 0x4f, 0xb3 }, /* "matrix coefficient 1" */
@@ -408,8 +409,13 @@ static struct regval_list ov7670_fmt_raw[] = {
/*
* Low-level register I/O.
+ *
+ * Note that there are two versions of these. On the XO 1, the
+ * i2c controller only does SMBUS, so that's what we use. The
+ * ov7670 is not really an SMBUS device, though, so the communication
+ * is not always entirely reliable.
*/
-
+#ifdef CONFIG_OLPC_XO_1
static int ov7670_read(struct v4l2_subdev *sd, unsigned char reg,
unsigned char *value)
{
@@ -432,9 +438,67 @@ static int ov7670_write(struct v4l2_subdev *sd, unsigned char reg,
int ret = i2c_smbus_write_byte_data(client, reg, value);
if (reg == REG_COM7 && (value & COM7_RESET))
- msleep(2); /* Wait for reset to run */
+ msleep(5); /* Wait for reset to run */
+ return ret;
+}
+
+#else /* ! CONFIG_OLPC_XO_1 */
+/*
+ * On most platforms, we'd rather do straight i2c I/O.
+ */
+static int ov7670_read(struct v4l2_subdev *sd, unsigned char reg,
+ unsigned char *value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u8 data = reg;
+ struct i2c_msg msg;
+ int ret;
+
+ /*
+ * Send out the register address...
+ */
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 1;
+ msg.buf = &data;
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret < 0) {
+ printk(KERN_ERR "Error %d on register write\n", ret);
+ return ret;
+ }
+ /*
+ * ...then read back the result.
+ */
+ msg.flags = I2C_M_RD;
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret >= 0) {
+ *value = data;
+ ret = 0;
+ }
+ return ret;
+}
+
+
+static int ov7670_write(struct v4l2_subdev *sd, unsigned char reg,
+ unsigned char value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct i2c_msg msg;
+ unsigned char data[2] = { reg, value };
+ int ret;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = data;
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret > 0)
+ ret = 0;
+ if (reg == REG_COM7 && (value & COM7_RESET))
+ msleep(5); /* Wait for reset to run */
return ret;
}
+#endif /* CONFIG_OLPC_XO_1 */
/*
@@ -744,22 +808,12 @@ static int ov7670_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
struct ov7670_format_struct *ovfmt;
struct ov7670_win_size *wsize;
struct ov7670_info *info = to_state(sd);
- unsigned char com7, clkrc = 0;
+ unsigned char com7;
ret = ov7670_try_fmt_internal(sd, fmt, &ovfmt, &wsize);
if (ret)
return ret;
/*
- * HACK: if we're running rgb565 we need to grab then rewrite
- * CLKRC. If we're *not*, however, then rewriting clkrc hoses
- * the colors.
- */
- if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB565) {
- ret = ov7670_read(sd, REG_CLKRC, &clkrc);
- if (ret)
- return ret;
- }
- /*
* COM7 is a pain in the ass, it doesn't like to be read then
* quickly written afterward. But we have everything we need
* to set it absolutely here, as long as the format-specific
@@ -779,8 +833,18 @@ static int ov7670_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
ret = ov7670_write_array(sd, wsize->regs);
info->fmt = ovfmt;
- if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB565 && ret == 0)
- ret = ov7670_write(sd, REG_CLKRC, clkrc);
+ /*
+ * If we're running RGB565, we must rewrite clkrc after setting
+ * the other parameters or the image looks poor. If we're *not*
+ * doing RGB565, we must not rewrite clkrc or the image looks
+ * *really* poor.
+ *
+ * (Update) Now that we retain clkrc state, we should be able
+ * to write it unconditionally, and that will make the frame
+ * rate persistent too.
+ */
+ if (ret == 0)
+ ret = ov7670_write(sd, REG_CLKRC, info->clkrc);
return ret;
}
@@ -791,20 +855,17 @@ static int ov7670_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
static int ov7670_g_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
{
struct v4l2_captureparm *cp = &parms->parm.capture;
- unsigned char clkrc;
- int ret;
+ struct ov7670_info *info = to_state(sd);
if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- ret = ov7670_read(sd, REG_CLKRC, &clkrc);
- if (ret < 0)
- return ret;
+
memset(cp, 0, sizeof(struct v4l2_captureparm));
cp->capability = V4L2_CAP_TIMEPERFRAME;
cp->timeperframe.numerator = 1;
cp->timeperframe.denominator = OV7670_FRAME_RATE;
- if ((clkrc & CLK_EXT) == 0 && (clkrc & CLK_SCALE) > 1)
- cp->timeperframe.denominator /= (clkrc & CLK_SCALE);
+ if ((info->clkrc & CLK_EXT) == 0 && (info->clkrc & CLK_SCALE) > 1)
+ cp->timeperframe.denominator /= (info->clkrc & CLK_SCALE);
return 0;
}
@@ -812,19 +873,14 @@ static int ov7670_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
{
struct v4l2_captureparm *cp = &parms->parm.capture;
struct v4l2_fract *tpf = &cp->timeperframe;
- unsigned char clkrc;
- int ret, div;
+ struct ov7670_info *info = to_state(sd);
+ int div;
if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (cp->extendedmode != 0)
return -EINVAL;
- /*
- * CLKRC has a reserved bit, so let's preserve it.
- */
- ret = ov7670_read(sd, REG_CLKRC, &clkrc);
- if (ret < 0)
- return ret;
+
if (tpf->numerator == 0 || tpf->denominator == 0)
div = 1; /* Reset to full rate */
else
@@ -833,10 +889,10 @@ static int ov7670_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
div = 1;
else if (div > CLK_SCALE)
div = CLK_SCALE;
- clkrc = (clkrc & 0x80) | div;
+ info->clkrc = (info->clkrc & 0x80) | div;
tpf->numerator = 1;
tpf->denominator = OV7670_FRAME_RATE/div;
- return ov7670_write(sd, REG_CLKRC, clkrc);
+ return ov7670_write(sd, REG_CLKRC, info->clkrc);
}
@@ -1115,6 +1171,140 @@ static int ov7670_s_vflip(struct v4l2_subdev *sd, int value)
return ret;
}
+/*
+ * GAIN is split between REG_GAIN and REG_VREF[7:6]. If one believes
+ * the data sheet, the VREF parts should be the most significant, but
+ * experience shows otherwise. There seems to be little value in
+ * messing with the VREF bits, so we leave them alone.
+ */
+static int ov7670_g_gain(struct v4l2_subdev *sd, __s32 *value)
+{
+ int ret;
+ unsigned char gain;
+
+ ret = ov7670_read(sd, REG_GAIN, &gain);
+ *value = gain;
+ return ret;
+}
+
+static int ov7670_s_gain(struct v4l2_subdev *sd, int value)
+{
+ int ret;
+ unsigned char com8;
+
+ ret = ov7670_write(sd, REG_GAIN, value & 0xff);
+ /* Have to turn off AGC as well */
+ if (ret == 0) {
+ ret = ov7670_read(sd, REG_COM8, &com8);
+ ret = ov7670_write(sd, REG_COM8, com8 & ~COM8_AGC);
+ }
+ return ret;
+}
+
+/*
+ * Tweak autogain.
+ */
+static int ov7670_g_autogain(struct v4l2_subdev *sd, __s32 *value)
+{
+ int ret;
+ unsigned char com8;
+
+ ret = ov7670_read(sd, REG_COM8, &com8);
+ *value = (com8 & COM8_AGC) != 0;
+ return ret;
+}
+
+static int ov7670_s_autogain(struct v4l2_subdev *sd, int value)
+{
+ int ret;
+ unsigned char com8;
+
+ ret = ov7670_read(sd, REG_COM8, &com8);
+ if (ret == 0) {
+ if (value)
+ com8 |= COM8_AGC;
+ else
+ com8 &= ~COM8_AGC;
+ ret = ov7670_write(sd, REG_COM8, com8);
+ }
+ return ret;
+}
+
+/*
+ * Exposure is spread all over the place: top 6 bits in AECHH, middle
+ * 8 in AECH, and two stashed in COM1 just for the hell of it.
+ */
+static int ov7670_g_exp(struct v4l2_subdev *sd, __s32 *value)
+{
+ int ret;
+ unsigned char com1, aech, aechh;
+
+ ret = ov7670_read(sd, REG_COM1, &com1) +
+ ov7670_read(sd, REG_AECH, &aech) +
+ ov7670_read(sd, REG_AECHH, &aechh);
+ *value = ((aechh & 0x3f) << 10) | (aech << 2) | (com1 & 0x03);
+ return ret;
+}
+
+static int ov7670_s_exp(struct v4l2_subdev *sd, int value)
+{
+ int ret;
+ unsigned char com1, com8, aech, aechh;
+
+ ret = ov7670_read(sd, REG_COM1, &com1) +
+ ov7670_read(sd, REG_COM8, &com8);
+ ov7670_read(sd, REG_AECHH, &aechh);
+ if (ret)
+ return ret;
+
+ com1 = (com1 & 0xfc) | (value & 0x03);
+ aech = (value >> 2) & 0xff;
+ aechh = (aechh & 0xc0) | ((value >> 10) & 0x3f);
+ ret = ov7670_write(sd, REG_COM1, com1) +
+ ov7670_write(sd, REG_AECH, aech) +
+ ov7670_write(sd, REG_AECHH, aechh);
+ /* Have to turn off AEC as well */
+ if (ret == 0)
+ ret = ov7670_write(sd, REG_COM8, com8 & ~COM8_AEC);
+ return ret;
+}
+
+/*
+ * Tweak autoexposure.
+ */
+static int ov7670_g_autoexp(struct v4l2_subdev *sd, __s32 *value)
+{
+ int ret;
+ unsigned char com8;
+ enum v4l2_exposure_auto_type *atype = (enum v4l2_exposure_auto_type *) value;
+
+ ret = ov7670_read(sd, REG_COM8, &com8);
+ if (com8 & COM8_AEC)
+ *atype = V4L2_EXPOSURE_AUTO;
+ else
+ *atype = V4L2_EXPOSURE_MANUAL;
+ return ret;
+}
+
+static int ov7670_s_autoexp(struct v4l2_subdev *sd,
+ enum v4l2_exposure_auto_type value)
+{
+ int ret;
+ unsigned char com8;
+
+ ret = ov7670_read(sd, REG_COM8, &com8);
+ if (ret == 0) {
+ if (value == V4L2_EXPOSURE_AUTO)
+ com8 |= COM8_AEC;
+ else
+ com8 &= ~COM8_AEC;
+ ret = ov7670_write(sd, REG_COM8, com8);
+ }
+ return ret;
+}
+
+
+
static int ov7670_queryctrl(struct v4l2_subdev *sd,
struct v4l2_queryctrl *qc)
{
@@ -1131,6 +1321,14 @@ static int ov7670_queryctrl(struct v4l2_subdev *sd,
return v4l2_ctrl_query_fill(qc, 0, 256, 1, 128);
case V4L2_CID_HUE:
return v4l2_ctrl_query_fill(qc, -180, 180, 5, 0);
+ case V4L2_CID_GAIN:
+ return v4l2_ctrl_query_fill(qc, 0, 255, 1, 128);
+ case V4L2_CID_AUTOGAIN:
+ return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
+ case V4L2_CID_EXPOSURE:
+ return v4l2_ctrl_query_fill(qc, 0, 65535, 1, 500);
+ case V4L2_CID_EXPOSURE_AUTO:
+ return v4l2_ctrl_query_fill(qc, 0, 1, 1, 0);
}
return -EINVAL;
}
@@ -1150,6 +1348,14 @@ static int ov7670_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
return ov7670_g_vflip(sd, &ctrl->value);
case V4L2_CID_HFLIP:
return ov7670_g_hflip(sd, &ctrl->value);
+ case V4L2_CID_GAIN:
+ return ov7670_g_gain(sd, &ctrl->value);
+ case V4L2_CID_AUTOGAIN:
+ return ov7670_g_autogain(sd, &ctrl->value);
+ case V4L2_CID_EXPOSURE:
+ return ov7670_g_exp(sd, &ctrl->value);
+ case V4L2_CID_EXPOSURE_AUTO:
+ return ov7670_g_autoexp(sd, &ctrl->value);
}
return -EINVAL;
}
@@ -1169,6 +1375,15 @@ static int ov7670_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
return ov7670_s_vflip(sd, ctrl->value);
case V4L2_CID_HFLIP:
return ov7670_s_hflip(sd, ctrl->value);
+ case V4L2_CID_GAIN:
+ return ov7670_s_gain(sd, ctrl->value);
+ case V4L2_CID_AUTOGAIN:
+ return ov7670_s_autogain(sd, ctrl->value);
+ case V4L2_CID_EXPOSURE:
+ return ov7670_s_exp(sd, ctrl->value);
+ case V4L2_CID_EXPOSURE_AUTO:
+ return ov7670_s_autoexp(sd,
+ (enum v4l2_exposure_auto_type) ctrl->value);
}
return -EINVAL;
}
@@ -1268,6 +1483,7 @@ static int ov7670_probe(struct i2c_client *client,
info->fmt = &ov7670_formats[0];
info->sat = 128; /* Review this */
+ info->clkrc = 1; /* 30fps */
return 0;
}
diff --git a/drivers/media/video/ov9640.c b/drivers/media/video/ov9640.c
index 47bf60ceb7a..36599a65f54 100644
--- a/drivers/media/video/ov9640.c
+++ b/drivers/media/video/ov9640.c
@@ -59,9 +59,9 @@ static const struct ov9640_reg ov9640_regs_dflt[] = {
* COM12 |= OV9640_COM12_YUV_AVG
*
* for RGB, alter the following registers:
- * COM7 |= OV9640_COM7_RGB
- * COM13 |= OV9640_COM13_RGB_AVG
- * COM15 |= proper RGB color encoding mode
+ * COM7 |= OV9640_COM7_RGB
+ * COM13 |= OV9640_COM13_RGB_AVG
+ * COM15 |= proper RGB color encoding mode
*/
static const struct ov9640_reg ov9640_regs_qqcif[] = {
{ OV9640_CLKRC, OV9640_CLKRC_DPLL_EN | OV9640_CLKRC_DIV(0x0f) },
diff --git a/drivers/media/video/pms.c b/drivers/media/video/pms.c
index 0598bbd3f36..7129b50757d 100644
--- a/drivers/media/video/pms.c
+++ b/drivers/media/video/pms.c
@@ -61,7 +61,6 @@ struct pms {
int depth;
int input;
s32 brightness, saturation, hue, contrast;
- unsigned long in_use;
struct mutex lock;
int i2c_count;
struct i2c_info i2cinfo[64];
@@ -931,25 +930,8 @@ static ssize_t pms_read(struct file *file, char __user *buf,
return len;
}
-static int pms_exclusive_open(struct file *file)
-{
- struct pms *dev = video_drvdata(file);
-
- return test_and_set_bit(0, &dev->in_use) ? -EBUSY : 0;
-}
-
-static int pms_exclusive_release(struct file *file)
-{
- struct pms *dev = video_drvdata(file);
-
- clear_bit(0, &dev->in_use);
- return 0;
-}
-
static const struct v4l2_file_operations pms_fops = {
.owner = THIS_MODULE,
- .open = pms_exclusive_open,
- .release = pms_exclusive_release,
.ioctl = video_ioctl2,
.read = pms_read,
};
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index 712b300f723..301ef197d03 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -2028,7 +2028,7 @@ static void pvr2_hdw_cx25840_vbi_hack(struct pvr2_hdw *hdw)
memset(&fmt, 0, sizeof(fmt));
fmt.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
v4l2_device_call_all(&hdw->v4l2_dev, hdw->decoder_client_id,
- video, s_fmt, &fmt);
+ vbi, s_sliced_fmt, &fmt.fmt.sliced);
}
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
index bf1e0fe9f4d..5ffa0d2b0b0 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
@@ -49,7 +49,7 @@ struct pvr2_v4l2_dev {
struct pvr2_v4l2_fh {
struct pvr2_channel channel;
- struct pvr2_v4l2_dev *dev_info;
+ struct pvr2_v4l2_dev *pdi;
enum v4l2_priority prio;
struct pvr2_ioread *rhp;
struct file *file;
@@ -162,7 +162,7 @@ static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
{
struct pvr2_v4l2_fh *fh = file->private_data;
struct pvr2_v4l2 *vp = fh->vhead;
- struct pvr2_v4l2_dev *dev_info = fh->dev_info;
+ struct pvr2_v4l2_dev *pdi = fh->pdi;
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
long ret = -EINVAL;
@@ -183,7 +183,7 @@ static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_S_INPUT:
case VIDIOC_S_TUNER:
case VIDIOC_S_FREQUENCY:
- ret = v4l2_prio_check(&vp->prio, &fh->prio);
+ ret = v4l2_prio_check(&vp->prio, fh->prio);
if (ret)
return ret;
}
@@ -564,14 +564,14 @@ static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_STREAMON:
{
- if (!fh->dev_info->stream) {
+ if (!fh->pdi->stream) {
/* No stream defined for this node. This means
that we're not currently allowed to stream from
this node. */
ret = -EPERM;
break;
}
- ret = pvr2_hdw_set_stream_type(hdw,dev_info->config);
+ ret = pvr2_hdw_set_stream_type(hdw,pdi->config);
if (ret < 0) return ret;
ret = pvr2_hdw_set_streaming(hdw,!0);
break;
@@ -579,7 +579,7 @@ static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_STREAMOFF:
{
- if (!fh->dev_info->stream) {
+ if (!fh->pdi->stream) {
/* No stream defined for this node. This means
that we're not currently allowed to stream from
this node. */
@@ -972,7 +972,7 @@ static int pvr2_v4l2_release(struct file *file)
fhp->rhp = NULL;
}
- v4l2_prio_close(&vp->prio, &fhp->prio);
+ v4l2_prio_close(&vp->prio, fhp->prio);
file->private_data = NULL;
if (fhp->vnext) {
@@ -1032,7 +1032,7 @@ static int pvr2_v4l2_open(struct file *file)
}
init_waitqueue_head(&fhp->wait_data);
- fhp->dev_info = dip;
+ fhp->pdi = dip;
pvr2_trace(PVR2_TRACE_STRUCT,"Creating pvr_v4l2_fh id=%p",fhp);
pvr2_channel_init(&fhp->channel,vp->channel.mc_head);
@@ -1093,7 +1093,7 @@ static int pvr2_v4l2_open(struct file *file)
fhp->file = file;
file->private_data = fhp;
- v4l2_prio_open(&vp->prio,&fhp->prio);
+ v4l2_prio_open(&vp->prio, &fhp->prio);
fhp->fw_mode_flag = pvr2_hdw_cpufw_get_enabled(hdw);
@@ -1113,7 +1113,7 @@ static int pvr2_v4l2_iosetup(struct pvr2_v4l2_fh *fh)
struct pvr2_hdw *hdw;
if (fh->rhp) return 0;
- if (!fh->dev_info->stream) {
+ if (!fh->pdi->stream) {
/* No stream defined for this node. This means that we're
not currently allowed to stream from this node. */
return -EPERM;
@@ -1122,21 +1122,21 @@ static int pvr2_v4l2_iosetup(struct pvr2_v4l2_fh *fh)
/* First read() attempt. Try to claim the stream and start
it... */
if ((ret = pvr2_channel_claim_stream(&fh->channel,
- fh->dev_info->stream)) != 0) {
+ fh->pdi->stream)) != 0) {
/* Someone else must already have it */
return ret;
}
- fh->rhp = pvr2_channel_create_mpeg_stream(fh->dev_info->stream);
+ fh->rhp = pvr2_channel_create_mpeg_stream(fh->pdi->stream);
if (!fh->rhp) {
pvr2_channel_claim_stream(&fh->channel,NULL);
return -ENOMEM;
}
hdw = fh->channel.mc_head->hdw;
- sp = fh->dev_info->stream->stream;
+ sp = fh->pdi->stream->stream;
pvr2_stream_set_callback(sp,(pvr2_stream_callback)pvr2_v4l2_notify,fh);
- pvr2_hdw_set_stream_type(hdw,fh->dev_info->config);
+ pvr2_hdw_set_stream_type(hdw,fh->pdi->config);
if ((ret = pvr2_hdw_set_streaming(hdw,!0)) < 0) return ret;
return pvr2_ioread_set_enabled(fh->rhp,!0);
}
diff --git a/drivers/media/video/pwc/Kconfig b/drivers/media/video/pwc/Kconfig
index 340f954aba3..11980db22d3 100644
--- a/drivers/media/video/pwc/Kconfig
+++ b/drivers/media/video/pwc/Kconfig
@@ -39,7 +39,7 @@ config USB_PWC_DEBUG
config USB_PWC_INPUT_EVDEV
bool "USB Philips Cameras input events device support"
default y
- depends on USB_PWC=INPUT || INPUT=y
+ depends on USB_PWC && (USB_PWC=INPUT || INPUT=y)
---help---
This option makes USB Philips cameras register the snapshot button as
an input device to report button events.
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c
index 04bf5c11308..7fe70e71865 100644
--- a/drivers/media/video/pxa_camera.c
+++ b/drivers/media/video/pxa_camera.c
@@ -253,8 +253,8 @@ static int pxa_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
if (0 == *count)
*count = 32;
- while (*size * *count > vid_limit * 1024 * 1024)
- (*count)--;
+ if (*size * *count > vid_limit * 1024 * 1024)
+ *count = (vid_limit * 1024 * 1024) / *size;
return 0;
}
diff --git a/drivers/media/video/rj54n1cb0c.c b/drivers/media/video/rj54n1cb0c.c
index 9277194cd82..bbd9c11e2c5 100644
--- a/drivers/media/video/rj54n1cb0c.c
+++ b/drivers/media/video/rj54n1cb0c.c
@@ -555,15 +555,15 @@ static int rj54n1_commit(struct i2c_client *client)
return ret;
}
-static int rj54n1_sensor_scale(struct v4l2_subdev *sd, u32 *in_w, u32 *in_h,
- u32 *out_w, u32 *out_h);
+static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h,
+ s32 *out_w, s32 *out_h);
static int rj54n1_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
struct i2c_client *client = sd->priv;
struct rj54n1 *rj54n1 = to_rj54n1(client);
struct v4l2_rect *rect = &a->c;
- unsigned int dummy = 0, output_w, output_h,
+ int dummy = 0, output_w, output_h,
input_w = rect->width, input_h = rect->height;
int ret;
@@ -577,7 +577,7 @@ static int rj54n1_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
output_w = (input_w * 1024 + rj54n1->resize / 2) / rj54n1->resize;
output_h = (input_h * 1024 + rj54n1->resize / 2) / rj54n1->resize;
- dev_dbg(&client->dev, "Scaling for %ux%u : %u = %ux%u\n",
+ dev_dbg(&client->dev, "Scaling for %dx%d : %u = %dx%d\n",
input_w, input_h, rj54n1->resize, output_w, output_h);
ret = rj54n1_sensor_scale(sd, &input_w, &input_h, &output_w, &output_h);
@@ -638,8 +638,8 @@ static int rj54n1_g_fmt(struct v4l2_subdev *sd,
* the output one, updates the window sizes and returns an error or the resize
* coefficient on success. Note: we only use the "Fixed Scaling" on this camera.
*/
-static int rj54n1_sensor_scale(struct v4l2_subdev *sd, u32 *in_w, u32 *in_h,
- u32 *out_w, u32 *out_h)
+static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h,
+ s32 *out_w, s32 *out_h)
{
struct i2c_client *client = sd->priv;
struct rj54n1 *rj54n1 = to_rj54n1(client);
@@ -749,7 +749,7 @@ static int rj54n1_sensor_scale(struct v4l2_subdev *sd, u32 *in_w, u32 *in_h,
* improve the image quality or stability for larger frames (see comment
* above), but I didn't check the framerate.
*/
- skip = min(resize / 1024, (unsigned)15);
+ skip = min(resize / 1024, 15U);
inc_sel = 1 << skip;
@@ -819,7 +819,7 @@ static int rj54n1_sensor_scale(struct v4l2_subdev *sd, u32 *in_w, u32 *in_h,
*out_w = output_w;
*out_h = output_h;
- dev_dbg(&client->dev, "Scaled for %ux%u : %u = %ux%u, skip %u\n",
+ dev_dbg(&client->dev, "Scaled for %dx%d : %u = %ux%u, skip %u\n",
*in_w, *in_h, resize, output_w, output_h, skip);
return resize;
@@ -1017,7 +1017,7 @@ static int rj54n1_s_fmt(struct v4l2_subdev *sd,
struct i2c_client *client = sd->priv;
struct rj54n1 *rj54n1 = to_rj54n1(client);
const struct rj54n1_datafmt *fmt;
- unsigned int output_w, output_h, max_w, max_h,
+ int output_w, output_h, max_w, max_h,
input_w = rj54n1->rect.width, input_h = rj54n1->rect.height;
int ret;
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c
index 3de914deb8e..3c7a79f3812 100644
--- a/drivers/media/video/s2255drv.c
+++ b/drivers/media/video/s2255drv.c
@@ -1,7 +1,7 @@
/*
* s2255drv.c - a driver for the Sensoray 2255 USB video capture device
*
- * Copyright (C) 2007-2008 by Sensoray Company Inc.
+ * Copyright (C) 2007-2010 by Sensoray Company Inc.
* Dean Anderson
*
* Some video buffer code based on vivi driver:
@@ -52,14 +52,19 @@
#include <linux/smp_lock.h>
#include <media/videobuf-vmalloc.h>
#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <linux/vmalloc.h>
#include <linux/usb.h>
+#define S2255_MAJOR_VERSION 1
+#define S2255_MINOR_VERSION 20
+#define S2255_RELEASE 0
+#define S2255_VERSION KERNEL_VERSION(S2255_MAJOR_VERSION, \
+ S2255_MINOR_VERSION, \
+ S2255_RELEASE)
#define FIRMWARE_FILE_NAME "f2255usb.bin"
-
-
/* default JPEG quality */
#define S2255_DEF_JPEG_QUAL 50
/* vendor request in */
@@ -76,14 +81,14 @@
#define S2255_LOAD_TIMEOUT (5000 + S2255_DSP_BOOTTIME)
#define S2255_DEF_BUFS 16
#define S2255_SETMODE_TIMEOUT 500
-#define MAX_CHANNELS 4
-#define S2255_MARKER_FRAME 0x2255DA4AL
-#define S2255_MARKER_RESPONSE 0x2255ACACL
-#define S2255_RESPONSE_SETMODE 0x01
-#define S2255_RESPONSE_FW 0x10
+#define S2255_VIDSTATUS_TIMEOUT 350
+#define S2255_MARKER_FRAME cpu_to_le32(0x2255DA4AL)
+#define S2255_MARKER_RESPONSE cpu_to_le32(0x2255ACACL)
+#define S2255_RESPONSE_SETMODE cpu_to_le32(0x01)
+#define S2255_RESPONSE_FW cpu_to_le32(0x10)
+#define S2255_RESPONSE_STATUS cpu_to_le32(0x20)
#define S2255_USB_XFER_SIZE (16 * 1024)
#define MAX_CHANNELS 4
-#define MAX_PIPE_BUFFERS 1
#define SYS_FRAMES 4
/* maximum size is PAL full size plus room for the marker header(s) */
#define SYS_FRAMES_MAXSIZE (720*288*2*2 + 4096)
@@ -118,9 +123,10 @@
#define COLOR_YUVPK 2 /* YUV packed */
#define COLOR_Y8 4 /* monochrome */
#define COLOR_JPG 5 /* JPEG */
-#define MASK_COLOR 0xff
-#define MASK_JPG_QUALITY 0xff00
+#define MASK_COLOR 0x000000ff
+#define MASK_JPG_QUALITY 0x0000ff00
+#define MASK_INPUT_TYPE 0x000f0000
/* frame decimation. Not implemented by V4L yet(experimental in V4L) */
#define FDEC_1 1 /* capture every frame. default */
#define FDEC_2 2 /* capture every 2nd frame */
@@ -139,12 +145,12 @@
#define DEF_HUE 0
/* usb config commands */
-#define IN_DATA_TOKEN 0x2255c0de
-#define CMD_2255 0xc2255000
-#define CMD_SET_MODE (CMD_2255 | 0x10)
-#define CMD_START (CMD_2255 | 0x20)
-#define CMD_STOP (CMD_2255 | 0x30)
-#define CMD_STATUS (CMD_2255 | 0x40)
+#define IN_DATA_TOKEN cpu_to_le32(0x2255c0de)
+#define CMD_2255 cpu_to_le32(0xc2255000)
+#define CMD_SET_MODE cpu_to_le32((CMD_2255 | 0x10))
+#define CMD_START cpu_to_le32((CMD_2255 | 0x20))
+#define CMD_STOP cpu_to_le32((CMD_2255 | 0x30))
+#define CMD_STATUS cpu_to_le32((CMD_2255 | 0x40))
struct s2255_mode {
u32 format; /* input video format (NTSC, PAL) */
@@ -194,7 +200,6 @@ struct s2255_dmaqueue {
#define S2255_FW_SUCCESS 2
#define S2255_FW_FAILED 3
#define S2255_FW_DISCONNECTING 4
-
#define S2255_FW_MARKER cpu_to_le32(0x22552f2f)
/* 2255 read states */
#define S2255_READ_IDLE 0
@@ -223,8 +228,10 @@ struct s2255_pipeinfo {
struct s2255_fmt; /*forward declaration */
struct s2255_dev {
+ struct video_device vdev[MAX_CHANNELS];
+ struct v4l2_device v4l2_dev;
+ atomic_t channels; /* number of channels registered */
int frames;
- int users[MAX_CHANNELS];
struct mutex lock;
struct mutex open_lock;
int resources[MAX_CHANNELS];
@@ -233,11 +240,10 @@ struct s2255_dev {
u8 read_endpoint;
struct s2255_dmaqueue vidq[MAX_CHANNELS];
- struct video_device *vdev[MAX_CHANNELS];
struct timer_list timer;
struct s2255_fw *fw_data;
- struct s2255_pipeinfo pipes[MAX_PIPE_BUFFERS];
- struct s2255_bufferi buffer[MAX_CHANNELS];
+ struct s2255_pipeinfo pipe;
+ struct s2255_bufferi buffer[MAX_CHANNELS];
struct s2255_mode mode[MAX_CHANNELS];
/* jpeg compression */
struct v4l2_jpegcompression jc[MAX_CHANNELS];
@@ -261,11 +267,21 @@ struct s2255_dev {
int chn_configured[MAX_CHANNELS];
wait_queue_head_t wait_setmode[MAX_CHANNELS];
int setmode_ready[MAX_CHANNELS];
+ /* video status items */
+ int vidstatus[MAX_CHANNELS];
+ wait_queue_head_t wait_vidstatus[MAX_CHANNELS];
+ int vidstatus_ready[MAX_CHANNELS];
int chn_ready;
- struct kref kref;
spinlock_t slock;
+ /* dsp firmware version (f2255usb.bin) */
+ int dsp_fw_ver;
+ u16 pid; /* product id */
};
-#define to_s2255_dev(d) container_of(d, struct s2255_dev, kref)
+
+static inline struct s2255_dev *to_s2255_dev(struct v4l2_device *v4l2_dev)
+{
+ return container_of(v4l2_dev, struct s2255_dev, v4l2_dev);
+}
struct s2255_fmt {
char *name;
@@ -296,17 +312,43 @@ struct s2255_fh {
/* current cypress EEPROM firmware version */
#define S2255_CUR_USB_FWVER ((3 << 8) | 6)
-#define S2255_MAJOR_VERSION 1
-#define S2255_MINOR_VERSION 14
-#define S2255_RELEASE 0
-#define S2255_VERSION KERNEL_VERSION(S2255_MAJOR_VERSION, \
- S2255_MINOR_VERSION, \
- S2255_RELEASE)
-
-/* vendor ids */
-#define USB_S2255_VENDOR_ID 0x1943
-#define USB_S2255_PRODUCT_ID 0x2255
+/* current DSP FW version */
+#define S2255_CUR_DSP_FWVER 8
+/* Need DSP version 5+ for video status feature */
+#define S2255_MIN_DSP_STATUS 5
+#define S2255_MIN_DSP_COLORFILTER 8
#define S2255_NORMS (V4L2_STD_PAL | V4L2_STD_NTSC)
+
+/* private V4L2 controls */
+
+/*
+ * The following chart displays how COLORFILTER should be set
+ * =========================================================
+ * = fourcc = COLORFILTER =
+ * = ===============================
+ * = = 0 = 1 =
+ * =========================================================
+ * = V4L2_PIX_FMT_GREY(Y8) = monochrome from = monochrome=
+ * = = s-video or = composite =
+ * = = B/W camera = input =
+ * =========================================================
+ * = other = color, svideo = color, =
+ * = = = composite =
+ * =========================================================
+ *
+ * Notes:
+ * channels 0-3 on 2255 are composite
+ * channels 0-1 on 2257 are composite, 2-3 are s-video
+ * If COLORFILTER is 0 with a composite color camera connected,
+ * the output will appear monochrome but hatching
+ * will occur.
+ * COLORFILTER is different from "color killer" and "color effects"
+ * for reasons above.
+ */
+#define S2255_V4L2_YC_ON 1
+#define S2255_V4L2_YC_OFF 0
+#define V4L2_CID_PRIVATE_COLORFILTER (V4L2_CID_PRIVATE_BASE + 0)
+
/* frame prefix size (sent once every frame) */
#define PREFIX_SIZE 512
@@ -325,9 +367,8 @@ static void s2255_fillbuff(struct s2255_dev *dev, struct s2255_buffer *buf,
static int s2255_set_mode(struct s2255_dev *dev, unsigned long chn,
struct s2255_mode *mode);
static int s2255_board_shutdown(struct s2255_dev *dev);
-static void s2255_exit_v4l(struct s2255_dev *dev);
static void s2255_fwload_start(struct s2255_dev *dev, int reset);
-static void s2255_destroy(struct kref *kref);
+static void s2255_destroy(struct s2255_dev *dev);
static long s2255_vendor_req(struct s2255_dev *dev, unsigned char req,
u16 index, u16 value, void *buf,
s32 buf_len, int bOut);
@@ -347,7 +388,6 @@ static long s2255_vendor_req(struct s2255_dev *dev, unsigned char req,
static struct usb_driver s2255_driver;
-
/* Declare static vars that will be used as parameters */
static unsigned int vid_limit = 16; /* Video memory limit, in Mb */
@@ -362,58 +402,16 @@ module_param(video_nr, int, 0644);
MODULE_PARM_DESC(video_nr, "start video minor(-1 default autodetect)");
/* USB device table */
+#define USB_SENSORAY_VID 0x1943
static struct usb_device_id s2255_table[] = {
- {USB_DEVICE(USB_S2255_VENDOR_ID, USB_S2255_PRODUCT_ID)},
+ {USB_DEVICE(USB_SENSORAY_VID, 0x2255)},
+ {USB_DEVICE(USB_SENSORAY_VID, 0x2257)}, /*same family as 2255*/
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, s2255_table);
-
#define BUFFER_TIMEOUT msecs_to_jiffies(400)
-/* supported controls */
-static struct v4l2_queryctrl s2255_qctrl[] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = -127,
- .maximum = 128,
- .step = 1,
- .default_value = 0,
- .flags = 0,
- }, {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 255,
- .step = 0x1,
- .default_value = DEF_CONTRAST,
- .flags = 0,
- }, {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 255,
- .step = 0x1,
- .default_value = DEF_SATURATION,
- .flags = 0,
- }, {
- .id = V4L2_CID_HUE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Hue",
- .minimum = 0,
- .maximum = 255,
- .step = 0x1,
- .default_value = DEF_HUE,
- .flags = 0,
- }
-};
-
-static int qctl_regs[ARRAY_SIZE(s2255_qctrl)];
-
/* image formats. */
static const struct s2255_fmt formats[] = {
{
@@ -505,7 +503,7 @@ static void s2255_reset_dsppower(struct s2255_dev *dev)
static void s2255_timer(unsigned long user_data)
{
struct s2255_fw *data = (struct s2255_fw *)user_data;
- dprintk(100, "s2255 timer\n");
+ dprintk(100, "%s\n", __func__);
if (usb_submit_urb(data->fw_urb, GFP_ATOMIC) < 0) {
printk(KERN_ERR "s2255: can't submit urb\n");
atomic_set(&data->fw_state, S2255_FW_FAILED);
@@ -527,7 +525,7 @@ static void s2255_fwchunk_complete(struct urb *urb)
struct s2255_fw *data = urb->context;
struct usb_device *udev = urb->dev;
int len;
- dprintk(100, "udev %p urb %p", udev, urb);
+ dprintk(100, "%s: udev %p urb %p", __func__, udev, urb);
if (urb->status) {
dev_err(&udev->dev, "URB failed with status %d\n", urb->status);
atomic_set(&data->fw_state, S2255_FW_FAILED);
@@ -573,8 +571,8 @@ static void s2255_fwchunk_complete(struct urb *urb)
data->fw_loaded += len;
} else {
atomic_set(&data->fw_state, S2255_FW_LOADED_DSPWAIT);
+ dprintk(100, "%s: firmware upload complete\n", __func__);
}
- dprintk(100, "2255 complete done\n");
return;
}
@@ -585,9 +583,7 @@ static int s2255_got_frame(struct s2255_dev *dev, int chn, int jpgsize)
struct s2255_buffer *buf;
unsigned long flags = 0;
int rc = 0;
- dprintk(2, "wakeup: %p channel: %d\n", &dma_q, chn);
spin_lock_irqsave(&dev->slock, flags);
-
if (list_empty(&dma_q->active)) {
dprintk(1, "No active queue to serve\n");
rc = -1;
@@ -595,23 +591,19 @@ static int s2255_got_frame(struct s2255_dev *dev, int chn, int jpgsize)
}
buf = list_entry(dma_q->active.next,
struct s2255_buffer, vb.queue);
-
list_del(&buf->vb.queue);
do_gettimeofday(&buf->vb.ts);
- dprintk(100, "[%p/%d] wakeup\n", buf, buf->vb.i);
s2255_fillbuff(dev, buf, dma_q->channel, jpgsize);
wake_up(&buf->vb.done);
- dprintk(2, "wakeup [buf/i] [%p/%d]\n", buf, buf->vb.i);
+ dprintk(2, "%s: [buf/i] [%p/%d]\n", __func__, buf, buf->vb.i);
unlock:
spin_unlock_irqrestore(&dev->slock, flags);
return 0;
}
-
static const struct s2255_fmt *format_by_fourcc(int fourcc)
{
unsigned int i;
-
for (i = 0; i < ARRAY_SIZE(formats); i++) {
if (-1 == formats[i].fourcc)
continue;
@@ -621,9 +613,6 @@ static const struct s2255_fmt *format_by_fourcc(int fourcc)
return NULL;
}
-
-
-
/* video buffer vmalloc implementation based partly on VIVI driver which is
* Copyright (c) 2006 by
* Mauro Carvalho Chehab <mchehab--a.t--infradead.org>
@@ -703,8 +692,8 @@ static int buffer_setup(struct videobuf_queue *vq, unsigned int *count,
if (0 == *count)
*count = S2255_DEF_BUFS;
- while (*size * (*count) > vid_limit * 1024 * 1024)
- (*count)--;
+ if (*size * *count > vid_limit * 1024 * 1024)
+ *count = (vid_limit * 1024 * 1024) / *size;
return 0;
}
@@ -727,10 +716,10 @@ static int buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
if (fh->fmt == NULL)
return -EINVAL;
- if ((fh->width < norm_minw(fh->dev->vdev[fh->channel])) ||
- (fh->width > norm_maxw(fh->dev->vdev[fh->channel])) ||
- (fh->height < norm_minh(fh->dev->vdev[fh->channel])) ||
- (fh->height > norm_maxh(fh->dev->vdev[fh->channel]))) {
+ if ((fh->width < norm_minw(&fh->dev->vdev[fh->channel])) ||
+ (fh->width > norm_maxw(&fh->dev->vdev[fh->channel])) ||
+ (fh->height < norm_minh(&fh->dev->vdev[fh->channel])) ||
+ (fh->height > norm_maxh(&fh->dev->vdev[fh->channel]))) {
dprintk(4, "invalid buffer prepare\n");
return -EINVAL;
}
@@ -747,7 +736,6 @@ static int buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
buf->vb.height = fh->height;
buf->vb.field = field;
-
if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
rc = videobuf_iolock(vq, &buf->vb, NULL);
if (rc < 0)
@@ -767,9 +755,7 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
struct s2255_fh *fh = vq->priv_data;
struct s2255_dev *dev = fh->dev;
struct s2255_dmaqueue *vidq = &dev->vidq[fh->channel];
-
dprintk(1, "%s\n", __func__);
-
buf->vb.state = VIDEOBUF_QUEUED;
list_add_tail(&buf->vb.queue, &vidq->active);
}
@@ -828,6 +814,27 @@ static void res_free(struct s2255_dev *dev, struct s2255_fh *fh)
dprintk(1, "res: put\n");
}
+static int vidioc_querymenu(struct file *file, void *priv,
+ struct v4l2_querymenu *qmenu)
+{
+ static const char *colorfilter[] = {
+ "Off",
+ "On",
+ NULL
+ };
+ if (qmenu->id == V4L2_CID_PRIVATE_COLORFILTER) {
+ int i;
+ const char **menu_items = colorfilter;
+ for (i = 0; i < qmenu->index && menu_items[i]; i++)
+ ; /* do nothing (from v4l2-common.c) */
+ if (menu_items[i] == NULL || menu_items[i][0] == '\0')
+ return -EINVAL;
+ strlcpy(qmenu->name, menu_items[qmenu->index],
+ sizeof(qmenu->name));
+ return 0;
+ }
+ return v4l2_ctrl_query_menu(qmenu, NULL, NULL);
+}
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
@@ -883,7 +890,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
int is_ntsc;
is_ntsc =
- (dev->vdev[fh->channel]->current_norm & V4L2_STD_NTSC) ? 1 : 0;
+ (dev->vdev[fh->channel].current_norm & V4L2_STD_NTSC) ? 1 : 0;
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
@@ -894,10 +901,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
if (field == V4L2_FIELD_ANY)
b_any_field = 1;
- dprintk(4, "try format %d \n", is_ntsc);
- /* supports 3 sizes. see s2255drv.h */
- dprintk(50, "width test %d, height %d\n",
- f->fmt.pix.width, f->fmt.pix.height);
+ dprintk(50, "%s NTSC: %d suggested width: %d, height: %d\n",
+ __func__, is_ntsc, f->fmt.pix.width, f->fmt.pix.height);
if (is_ntsc) {
/* NTSC */
if (f->fmt.pix.height >= NUM_LINES_1CIFS_NTSC * 2) {
@@ -952,29 +957,24 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
}
}
if (f->fmt.pix.width >= LINE_SZ_4CIFS_PAL) {
- dprintk(50, "pal 704\n");
f->fmt.pix.width = LINE_SZ_4CIFS_PAL;
field = V4L2_FIELD_SEQ_TB;
} else if (f->fmt.pix.width >= LINE_SZ_2CIFS_PAL) {
- dprintk(50, "pal 352A\n");
f->fmt.pix.width = LINE_SZ_2CIFS_PAL;
field = V4L2_FIELD_TOP;
} else if (f->fmt.pix.width >= LINE_SZ_1CIFS_PAL) {
- dprintk(50, "pal 352B\n");
f->fmt.pix.width = LINE_SZ_1CIFS_PAL;
field = V4L2_FIELD_TOP;
} else {
- dprintk(50, "pal 352C\n");
f->fmt.pix.width = LINE_SZ_1CIFS_PAL;
field = V4L2_FIELD_TOP;
}
}
-
- dprintk(50, "width %d height %d field %d \n", f->fmt.pix.width,
- f->fmt.pix.height, f->fmt.pix.field);
f->fmt.pix.field = field;
f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+ dprintk(50, "%s: set width %d height %d field %d\n", __func__,
+ f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field);
return 0;
}
@@ -1006,7 +1006,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
}
if (res_locked(fh->dev, fh)) {
- dprintk(1, "can't change format after started\n");
+ dprintk(1, "%s: channel busy\n", __func__);
ret = -EBUSY;
goto out_s_fmt;
}
@@ -1016,17 +1016,14 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
fh->height = f->fmt.pix.height;
fh->vb_vidq.field = f->fmt.pix.field;
fh->type = f->type;
- norm = norm_minw(fh->dev->vdev[fh->channel]);
- if (fh->width > norm_minw(fh->dev->vdev[fh->channel])) {
- if (fh->height > norm_minh(fh->dev->vdev[fh->channel])) {
+ norm = norm_minw(&fh->dev->vdev[fh->channel]);
+ if (fh->width > norm_minw(&fh->dev->vdev[fh->channel])) {
+ if (fh->height > norm_minh(&fh->dev->vdev[fh->channel])) {
if (fh->dev->cap_parm[fh->channel].capturemode &
- V4L2_MODE_HIGHQUALITY) {
+ V4L2_MODE_HIGHQUALITY)
fh->mode.scale = SCALE_4CIFSI;
- dprintk(2, "scale 4CIFSI\n");
- } else {
+ else
fh->mode.scale = SCALE_4CIFS;
- dprintk(2, "scale 4CIFS\n");
- }
} else
fh->mode.scale = SCALE_2CIFS;
@@ -1037,19 +1034,23 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
/* color mode */
switch (fh->fmt->fourcc) {
case V4L2_PIX_FMT_GREY:
- fh->mode.color = COLOR_Y8;
+ fh->mode.color &= ~MASK_COLOR;
+ fh->mode.color |= COLOR_Y8;
break;
case V4L2_PIX_FMT_JPEG:
- fh->mode.color = COLOR_JPG |
- (fh->dev->jc[fh->channel].quality << 8);
+ fh->mode.color &= ~MASK_COLOR;
+ fh->mode.color |= COLOR_JPG;
+ fh->mode.color |= (fh->dev->jc[fh->channel].quality << 8);
break;
case V4L2_PIX_FMT_YUV422P:
- fh->mode.color = COLOR_YUVPL;
+ fh->mode.color &= ~MASK_COLOR;
+ fh->mode.color |= COLOR_YUVPL;
break;
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
default:
- fh->mode.color = COLOR_YUVPK;
+ fh->mode.color &= ~MASK_COLOR;
+ fh->mode.color |= COLOR_YUVPK;
break;
}
ret = 0;
@@ -1178,19 +1179,13 @@ static u32 get_transfer_size(struct s2255_mode *mode)
return usbInSize;
}
-static void dump_verify_mode(struct s2255_dev *sdev, struct s2255_mode *mode)
+static void s2255_print_cfg(struct s2255_dev *sdev, struct s2255_mode *mode)
{
struct device *dev = &sdev->udev->dev;
dev_info(dev, "------------------------------------------------\n");
- dev_info(dev, "verify mode\n");
- dev_info(dev, "format: %d\n", mode->format);
- dev_info(dev, "scale: %d\n", mode->scale);
- dev_info(dev, "fdec: %d\n", mode->fdec);
- dev_info(dev, "color: %d\n", mode->color);
+ dev_info(dev, "format: %d\nscale %d\n", mode->format, mode->scale);
+ dev_info(dev, "fdec: %d\ncolor %d\n", mode->fdec, mode->color);
dev_info(dev, "bright: 0x%x\n", mode->bright);
- dev_info(dev, "restart: 0x%x\n", mode->restart);
- dev_info(dev, "usb_block: 0x%x\n", mode->usb_block);
- dev_info(dev, "single: 0x%x\n", mode->single);
dev_info(dev, "------------------------------------------------\n");
}
@@ -1206,44 +1201,38 @@ static int s2255_set_mode(struct s2255_dev *dev, unsigned long chn,
struct s2255_mode *mode)
{
int res;
- u32 *buffer;
+ __le32 *buffer;
unsigned long chn_rev;
-
mutex_lock(&dev->lock);
chn_rev = G_chnmap[chn];
- dprintk(3, "mode scale [%ld] %p %d\n", chn, mode, mode->scale);
- dprintk(3, "mode scale [%ld] %p %d\n", chn, &dev->mode[chn],
- dev->mode[chn].scale);
- dprintk(2, "mode contrast %x\n", mode->contrast);
-
+ dprintk(3, "%s channel %lu\n", __func__, chn);
/* if JPEG, set the quality */
- if ((mode->color & MASK_COLOR) == COLOR_JPG)
- mode->color = (dev->jc[chn].quality << 8) | COLOR_JPG;
-
+ if ((mode->color & MASK_COLOR) == COLOR_JPG) {
+ mode->color &= ~MASK_COLOR;
+ mode->color |= COLOR_JPG;
+ mode->color &= ~MASK_JPG_QUALITY;
+ mode->color |= (dev->jc[chn].quality << 8);
+ }
/* save the mode */
dev->mode[chn] = *mode;
dev->req_image_size[chn] = get_transfer_size(mode);
- dprintk(1, "transfer size %ld\n", dev->req_image_size[chn]);
-
+ dprintk(1, "%s: reqsize %ld\n", __func__, dev->req_image_size[chn]);
buffer = kzalloc(512, GFP_KERNEL);
if (buffer == NULL) {
dev_err(&dev->udev->dev, "out of mem\n");
mutex_unlock(&dev->lock);
return -ENOMEM;
}
-
/* set the mode */
buffer[0] = IN_DATA_TOKEN;
- buffer[1] = (u32) chn_rev;
+ buffer[1] = (__le32) cpu_to_le32(chn_rev);
buffer[2] = CMD_SET_MODE;
memcpy(&buffer[3], &dev->mode[chn], sizeof(struct s2255_mode));
dev->setmode_ready[chn] = 0;
res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512);
if (debug)
- dump_verify_mode(dev, mode);
+ s2255_print_cfg(dev, mode);
kfree(buffer);
- dprintk(1, "set mode done chn %lu, %d\n", chn, res);
-
/* wait at least 3 frames before continuing */
if (mode->restart) {
wait_event_timeout(dev->wait_setmode[chn],
@@ -1254,10 +1243,46 @@ static int s2255_set_mode(struct s2255_dev *dev, unsigned long chn,
res = -EFAULT;
}
}
-
/* clear the restart flag */
dev->mode[chn].restart = 0;
mutex_unlock(&dev->lock);
+ dprintk(1, "%s chn %lu, result: %d\n", __func__, chn, res);
+ return res;
+}
+
+static int s2255_cmd_status(struct s2255_dev *dev, unsigned long chn,
+ u32 *pstatus)
+{
+ int res;
+ __le32 *buffer;
+ u32 chn_rev;
+ mutex_lock(&dev->lock);
+ chn_rev = G_chnmap[chn];
+ dprintk(4, "%s chan %lu\n", __func__, chn);
+ buffer = kzalloc(512, GFP_KERNEL);
+ if (buffer == NULL) {
+ dev_err(&dev->udev->dev, "out of mem\n");
+ mutex_unlock(&dev->lock);
+ return -ENOMEM;
+ }
+ /* form the get vid status command */
+ buffer[0] = IN_DATA_TOKEN;
+ buffer[1] = (__le32) cpu_to_le32(chn_rev);
+ buffer[2] = CMD_STATUS;
+ *pstatus = 0;
+ dev->vidstatus_ready[chn] = 0;
+ res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512);
+ kfree(buffer);
+ wait_event_timeout(dev->wait_vidstatus[chn],
+ (dev->vidstatus_ready[chn] != 0),
+ msecs_to_jiffies(S2255_VIDSTATUS_TIMEOUT));
+ if (dev->vidstatus_ready[chn] != 1) {
+ printk(KERN_DEBUG "s2255: no vidstatus response\n");
+ res = -EFAULT;
+ }
+ *pstatus = dev->vidstatus[chn];
+ dprintk(4, "%s, vid status %d\n", __func__, *pstatus);
+ mutex_unlock(&dev->lock);
return res;
}
@@ -1291,7 +1316,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
new_mode = &fh->mode;
old_mode = &fh->dev->mode[chn];
- if (new_mode->color != old_mode->color)
+ if ((new_mode->color & MASK_COLOR) != (old_mode->color & MASK_COLOR))
new_mode->restart = 1;
else if (new_mode->scale != old_mode->scale)
new_mode->restart = 1;
@@ -1302,7 +1327,6 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
new_mode->restart = 0;
*old_mode = *new_mode;
dev->cur_fmt[chn] = fh->fmt;
- dprintk(1, "%s[%d]\n", __func__, chn);
dev->last_frame[chn] = -1;
dev->bad_payload[chn] = 0;
dev->cur_frame[chn] = 0;
@@ -1325,7 +1349,6 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
{
struct s2255_fh *fh = priv;
struct s2255_dev *dev = fh->dev;
-
dprintk(4, "%s\n, channel: %d", __func__, fh->channel);
if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
printk(KERN_ERR "invalid fh type0\n");
@@ -1347,27 +1370,32 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *i)
struct s2255_mode *mode;
struct videobuf_queue *q = &fh->vb_vidq;
int ret = 0;
-
mutex_lock(&q->vb_lock);
if (videobuf_queue_is_busy(q)) {
dprintk(1, "queue busy\n");
ret = -EBUSY;
goto out_s_std;
}
-
if (res_locked(fh->dev, fh)) {
dprintk(1, "can't change standard after started\n");
ret = -EBUSY;
goto out_s_std;
}
mode = &fh->mode;
-
if (*i & V4L2_STD_NTSC) {
- dprintk(4, "vidioc_s_std NTSC\n");
- mode->format = FORMAT_NTSC;
+ dprintk(4, "%s NTSC\n", __func__);
+ /* if changing format, reset frame decimation/intervals */
+ if (mode->format != FORMAT_NTSC) {
+ mode->format = FORMAT_NTSC;
+ mode->fdec = FDEC_1;
+ }
} else if (*i & V4L2_STD_PAL) {
- dprintk(4, "vidioc_s_std PAL\n");
+ dprintk(4, "%s PAL\n", __func__);
mode->format = FORMAT_PAL;
+ if (mode->format != FORMAT_PAL) {
+ mode->format = FORMAT_PAL;
+ mode->fdec = FDEC_1;
+ }
} else {
ret = -EINVAL;
}
@@ -1386,12 +1414,32 @@ out_s_std:
static int vidioc_enum_input(struct file *file, void *priv,
struct v4l2_input *inp)
{
+ struct s2255_fh *fh = priv;
+ struct s2255_dev *dev = fh->dev;
+ u32 status = 0;
if (inp->index != 0)
return -EINVAL;
-
inp->type = V4L2_INPUT_TYPE_CAMERA;
inp->std = S2255_NORMS;
- strlcpy(inp->name, "Camera", sizeof(inp->name));
+ inp->status = 0;
+ if (dev->dsp_fw_ver >= S2255_MIN_DSP_STATUS) {
+ int rc;
+ rc = s2255_cmd_status(dev, fh->channel, &status);
+ dprintk(4, "s2255_cmd_status rc: %d status %x\n", rc, status);
+ if (rc == 0)
+ inp->status = (status & 0x01) ? 0
+ : V4L2_IN_ST_NO_SIGNAL;
+ }
+ switch (dev->pid) {
+ case 0x2255:
+ default:
+ strlcpy(inp->name, "Composite", sizeof(inp->name));
+ break;
+ case 0x2257:
+ strlcpy(inp->name, (fh->channel < 2) ? "Composite" : "S-Video",
+ sizeof(inp->name));
+ break;
+ }
return 0;
}
@@ -1411,74 +1459,113 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
static int vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qc)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(s2255_qctrl); i++)
- if (qc->id && qc->id == s2255_qctrl[i].id) {
- memcpy(qc, &(s2255_qctrl[i]), sizeof(*qc));
- return 0;
- }
-
- dprintk(4, "query_ctrl -EINVAL %d\n", qc->id);
- return -EINVAL;
+ struct s2255_fh *fh = priv;
+ struct s2255_dev *dev = fh->dev;
+ switch (qc->id) {
+ case V4L2_CID_BRIGHTNESS:
+ v4l2_ctrl_query_fill(qc, -127, 127, 1, DEF_BRIGHT);
+ break;
+ case V4L2_CID_CONTRAST:
+ v4l2_ctrl_query_fill(qc, 0, 255, 1, DEF_CONTRAST);
+ break;
+ case V4L2_CID_SATURATION:
+ v4l2_ctrl_query_fill(qc, 0, 255, 1, DEF_SATURATION);
+ break;
+ case V4L2_CID_HUE:
+ v4l2_ctrl_query_fill(qc, 0, 255, 1, DEF_HUE);
+ break;
+ case V4L2_CID_PRIVATE_COLORFILTER:
+ if (dev->dsp_fw_ver < S2255_MIN_DSP_COLORFILTER)
+ return -EINVAL;
+ if ((dev->pid == 0x2257) && (fh->channel > 1))
+ return -EINVAL;
+ strlcpy(qc->name, "Color Filter", sizeof(qc->name));
+ qc->type = V4L2_CTRL_TYPE_MENU;
+ qc->minimum = 0;
+ qc->maximum = 1;
+ qc->step = 1;
+ qc->default_value = 1;
+ qc->flags = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ dprintk(4, "%s, id %d\n", __func__, qc->id);
+ return 0;
}
static int vidioc_g_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(s2255_qctrl); i++)
- if (ctrl->id == s2255_qctrl[i].id) {
- ctrl->value = qctl_regs[i];
- return 0;
- }
- dprintk(4, "g_ctrl -EINVAL\n");
-
- return -EINVAL;
+ struct s2255_fh *fh = priv;
+ struct s2255_dev *dev = fh->dev;
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ ctrl->value = fh->mode.bright;
+ break;
+ case V4L2_CID_CONTRAST:
+ ctrl->value = fh->mode.contrast;
+ break;
+ case V4L2_CID_SATURATION:
+ ctrl->value = fh->mode.saturation;
+ break;
+ case V4L2_CID_HUE:
+ ctrl->value = fh->mode.hue;
+ break;
+ case V4L2_CID_PRIVATE_COLORFILTER:
+ if (dev->dsp_fw_ver < S2255_MIN_DSP_COLORFILTER)
+ return -EINVAL;
+ if ((dev->pid == 0x2257) && (fh->channel > 1))
+ return -EINVAL;
+ ctrl->value = !((fh->mode.color & MASK_INPUT_TYPE) >> 16);
+ break;
+ default:
+ return -EINVAL;
+ }
+ dprintk(4, "%s, id %d val %d\n", __func__, ctrl->id, ctrl->value);
+ return 0;
}
static int vidioc_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
- int i;
struct s2255_fh *fh = priv;
struct s2255_dev *dev = fh->dev;
struct s2255_mode *mode;
mode = &fh->mode;
- dprintk(4, "vidioc_s_ctrl\n");
- for (i = 0; i < ARRAY_SIZE(s2255_qctrl); i++) {
- if (ctrl->id == s2255_qctrl[i].id) {
- if (ctrl->value < s2255_qctrl[i].minimum ||
- ctrl->value > s2255_qctrl[i].maximum)
- return -ERANGE;
-
- qctl_regs[i] = ctrl->value;
- /* update the mode to the corresponding value */
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- mode->bright = ctrl->value;
- break;
- case V4L2_CID_CONTRAST:
- mode->contrast = ctrl->value;
- break;
- case V4L2_CID_HUE:
- mode->hue = ctrl->value;
- break;
- case V4L2_CID_SATURATION:
- mode->saturation = ctrl->value;
- break;
- }
- mode->restart = 0;
- /* set mode here. Note: stream does not need restarted.
- some V4L programs restart stream unnecessarily
- after a s_crtl.
- */
- s2255_set_mode(dev, fh->channel, mode);
- return 0;
- }
+ dprintk(4, "%s\n", __func__);
+ /* update the mode to the corresponding value */
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ mode->bright = ctrl->value;
+ break;
+ case V4L2_CID_CONTRAST:
+ mode->contrast = ctrl->value;
+ break;
+ case V4L2_CID_HUE:
+ mode->hue = ctrl->value;
+ break;
+ case V4L2_CID_SATURATION:
+ mode->saturation = ctrl->value;
+ break;
+ case V4L2_CID_PRIVATE_COLORFILTER:
+ if (dev->dsp_fw_ver < S2255_MIN_DSP_COLORFILTER)
+ return -EINVAL;
+ if ((dev->pid == 0x2257) && (fh->channel > 1))
+ return -EINVAL;
+ mode->color &= ~MASK_INPUT_TYPE;
+ mode->color |= ((ctrl->value ? 0 : 1) << 16);
+ break;
+ default:
+ return -EINVAL;
}
- return -EINVAL;
+ mode->restart = 0;
+ /* set mode here. Note: stream does not need restarted.
+ some V4L programs restart stream unnecessarily
+ after a s_crtl.
+ */
+ s2255_set_mode(dev, fh->channel, mode);
+ return 0;
}
static int vidioc_g_jpegcomp(struct file *file, void *priv,
@@ -1487,7 +1574,7 @@ static int vidioc_g_jpegcomp(struct file *file, void *priv,
struct s2255_fh *fh = priv;
struct s2255_dev *dev = fh->dev;
*jc = dev->jc[fh->channel];
- dprintk(2, "getting jpegcompression, quality %d\n", jc->quality);
+ dprintk(2, "%s: quality %d\n", __func__, jc->quality);
return 0;
}
@@ -1499,7 +1586,7 @@ static int vidioc_s_jpegcomp(struct file *file, void *priv,
if (jc->quality < 0 || jc->quality > 100)
return -EINVAL;
dev->jc[fh->channel].quality = jc->quality;
- dprintk(2, "setting jpeg quality %d\n", jc->quality);
+ dprintk(2, "%s: quality %d\n", __func__, jc->quality);
return 0;
}
@@ -1508,10 +1595,34 @@ static int vidioc_g_parm(struct file *file, void *priv,
{
struct s2255_fh *fh = priv;
struct s2255_dev *dev = fh->dev;
+ __u32 def_num, def_dem;
if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
+ memset(sp, 0, sizeof(struct v4l2_streamparm));
+ sp->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
sp->parm.capture.capturemode = dev->cap_parm[fh->channel].capturemode;
- dprintk(2, "getting parm %d\n", sp->parm.capture.capturemode);
+ def_num = (fh->mode.format == FORMAT_NTSC) ? 1001 : 1000;
+ def_dem = (fh->mode.format == FORMAT_NTSC) ? 30000 : 25000;
+ sp->parm.capture.timeperframe.denominator = def_dem;
+ switch (fh->mode.fdec) {
+ default:
+ case FDEC_1:
+ sp->parm.capture.timeperframe.numerator = def_num;
+ break;
+ case FDEC_2:
+ sp->parm.capture.timeperframe.numerator = def_num * 2;
+ break;
+ case FDEC_3:
+ sp->parm.capture.timeperframe.numerator = def_num * 3;
+ break;
+ case FDEC_5:
+ sp->parm.capture.timeperframe.numerator = def_num * 5;
+ break;
+ }
+ dprintk(4, "%s capture mode, %d timeperframe %d/%d\n", __func__,
+ sp->parm.capture.capturemode,
+ sp->parm.capture.timeperframe.numerator,
+ sp->parm.capture.timeperframe.denominator);
return 0;
}
@@ -1520,15 +1631,79 @@ static int vidioc_s_parm(struct file *file, void *priv,
{
struct s2255_fh *fh = priv;
struct s2255_dev *dev = fh->dev;
-
+ int fdec = FDEC_1;
+ __u32 def_num, def_dem;
if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
+ /* high quality capture mode requires a stream restart */
+ if (dev->cap_parm[fh->channel].capturemode
+ != sp->parm.capture.capturemode && res_locked(fh->dev, fh))
+ return -EBUSY;
+ def_num = (fh->mode.format == FORMAT_NTSC) ? 1001 : 1000;
+ def_dem = (fh->mode.format == FORMAT_NTSC) ? 30000 : 25000;
+ if (def_dem != sp->parm.capture.timeperframe.denominator)
+ sp->parm.capture.timeperframe.numerator = def_num;
+ else if (sp->parm.capture.timeperframe.numerator <= def_num)
+ sp->parm.capture.timeperframe.numerator = def_num;
+ else if (sp->parm.capture.timeperframe.numerator <= (def_num * 2)) {
+ sp->parm.capture.timeperframe.numerator = def_num * 2;
+ fdec = FDEC_2;
+ } else if (sp->parm.capture.timeperframe.numerator <= (def_num * 3)) {
+ sp->parm.capture.timeperframe.numerator = def_num * 3;
+ fdec = FDEC_3;
+ } else {
+ sp->parm.capture.timeperframe.numerator = def_num * 5;
+ fdec = FDEC_5;
+ }
+ fh->mode.fdec = fdec;
+ sp->parm.capture.timeperframe.denominator = def_dem;
+ s2255_set_mode(dev, fh->channel, &fh->mode);
+ dprintk(4, "%s capture mode, %d timeperframe %d/%d, fdec %d\n",
+ __func__,
+ sp->parm.capture.capturemode,
+ sp->parm.capture.timeperframe.numerator,
+ sp->parm.capture.timeperframe.denominator, fdec);
+ return 0;
+}
- dev->cap_parm[fh->channel].capturemode = sp->parm.capture.capturemode;
- dprintk(2, "setting param capture mode %d\n",
- sp->parm.capture.capturemode);
+static int vidioc_enum_frameintervals(struct file *file, void *priv,
+ struct v4l2_frmivalenum *fe)
+{
+ int is_ntsc = 0;
+#define NUM_FRAME_ENUMS 4
+ int frm_dec[NUM_FRAME_ENUMS] = {1, 2, 3, 5};
+ if (fe->index < 0 || fe->index >= NUM_FRAME_ENUMS)
+ return -EINVAL;
+ switch (fe->width) {
+ case 640:
+ if (fe->height != 240 && fe->height != 480)
+ return -EINVAL;
+ is_ntsc = 1;
+ break;
+ case 320:
+ if (fe->height != 240)
+ return -EINVAL;
+ is_ntsc = 1;
+ break;
+ case 704:
+ if (fe->height != 288 && fe->height != 576)
+ return -EINVAL;
+ break;
+ case 352:
+ if (fe->height != 288)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ fe->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fe->discrete.denominator = is_ntsc ? 30000 : 25000;
+ fe->discrete.numerator = (is_ntsc ? 1001 : 1000) * frm_dec[fe->index];
+ dprintk(4, "%s discrete %d/%d\n", __func__, fe->discrete.numerator,
+ fe->discrete.denominator);
return 0;
}
+
static int s2255_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
@@ -1538,31 +1713,29 @@ static int s2255_open(struct file *file)
int i = 0;
int cur_channel = -1;
int state;
-
dprintk(1, "s2255: open called (dev=%s)\n",
video_device_node_name(vdev));
- lock_kernel();
-
for (i = 0; i < MAX_CHANNELS; i++) {
- if (dev->vdev[i] == vdev) {
+ if (&dev->vdev[i] == vdev) {
cur_channel = i;
break;
}
}
-
- if (atomic_read(&dev->fw_data->fw_state) == S2255_FW_DISCONNECTING) {
- unlock_kernel();
- printk(KERN_INFO "disconnecting\n");
+ if (i == MAX_CHANNELS)
return -ENODEV;
- }
- kref_get(&dev->kref);
- mutex_lock(&dev->open_lock);
-
- dev->users[cur_channel]++;
- dprintk(4, "s2255: open_handles %d\n", dev->users[cur_channel]);
- switch (atomic_read(&dev->fw_data->fw_state)) {
+ /*
+ * open lock necessary to prevent multiple instances
+ * of v4l-conf (or other programs) from simultaneously
+ * reloading firmware.
+ */
+ mutex_lock(&dev->open_lock);
+ state = atomic_read(&dev->fw_data->fw_state);
+ switch (state) {
+ case S2255_FW_DISCONNECTING:
+ mutex_unlock(&dev->open_lock);
+ return -ENODEV;
case S2255_FW_FAILED:
s2255_dev_err(&dev->udev->dev,
"firmware load failed. retrying.\n");
@@ -1573,6 +1746,8 @@ static int s2255_open(struct file *file)
(atomic_read(&dev->fw_data->fw_state)
== S2255_FW_DISCONNECTING)),
msecs_to_jiffies(S2255_LOAD_TIMEOUT));
+ /* state may have changed, re-read */
+ state = atomic_read(&dev->fw_data->fw_state);
break;
case S2255_FW_NOTLOADED:
case S2255_FW_LOADED_DSPWAIT:
@@ -1584,53 +1759,50 @@ static int s2255_open(struct file *file)
== S2255_FW_SUCCESS) ||
(atomic_read(&dev->fw_data->fw_state)
== S2255_FW_DISCONNECTING)),
- msecs_to_jiffies(S2255_LOAD_TIMEOUT));
+ msecs_to_jiffies(S2255_LOAD_TIMEOUT));
+ /* state may have changed, re-read */
+ state = atomic_read(&dev->fw_data->fw_state);
break;
case S2255_FW_SUCCESS:
default:
break;
}
- state = atomic_read(&dev->fw_data->fw_state);
- if (state != S2255_FW_SUCCESS) {
- int rc;
- switch (state) {
- case S2255_FW_FAILED:
- printk(KERN_INFO "2255 FW load failed. %d\n", state);
- rc = -ENODEV;
- break;
- case S2255_FW_DISCONNECTING:
- printk(KERN_INFO "%s: disconnecting\n", __func__);
- rc = -ENODEV;
- break;
- case S2255_FW_LOADED_DSPWAIT:
- case S2255_FW_NOTLOADED:
- printk(KERN_INFO "%s: firmware not loaded yet"
- "please try again later\n",
- __func__);
- rc = -EAGAIN;
- break;
- default:
- printk(KERN_INFO "%s: unknown state\n", __func__);
- rc = -EFAULT;
- break;
- }
- dev->users[cur_channel]--;
+ /* state may have changed in above switch statement */
+ switch (state) {
+ case S2255_FW_SUCCESS:
+ break;
+ case S2255_FW_FAILED:
+ printk(KERN_INFO "2255 firmware load failed.\n");
+ mutex_unlock(&dev->open_lock);
+ return -ENODEV;
+ case S2255_FW_DISCONNECTING:
+ printk(KERN_INFO "%s: disconnecting\n", __func__);
mutex_unlock(&dev->open_lock);
- kref_put(&dev->kref, s2255_destroy);
- unlock_kernel();
- return rc;
+ return -ENODEV;
+ case S2255_FW_LOADED_DSPWAIT:
+ case S2255_FW_NOTLOADED:
+ printk(KERN_INFO "%s: firmware not loaded yet"
+ "please try again later\n",
+ __func__);
+ /*
+ * Timeout on firmware load means device unusable.
+ * Set firmware failure state.
+ * On next s2255_open the firmware will be reloaded.
+ */
+ atomic_set(&dev->fw_data->fw_state,
+ S2255_FW_FAILED);
+ mutex_unlock(&dev->open_lock);
+ return -EAGAIN;
+ default:
+ printk(KERN_INFO "%s: unknown state\n", __func__);
+ mutex_unlock(&dev->open_lock);
+ return -EFAULT;
}
-
+ mutex_unlock(&dev->open_lock);
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh) {
- dev->users[cur_channel]--;
- mutex_unlock(&dev->open_lock);
- kref_put(&dev->kref, s2255_destroy);
- unlock_kernel();
+ if (NULL == fh)
return -ENOMEM;
- }
-
file->private_data = fh;
fh->dev = dev;
fh->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
@@ -1640,35 +1812,23 @@ static int s2255_open(struct file *file)
fh->width = LINE_SZ_4CIFS_NTSC;
fh->height = NUM_LINES_4CIFS_NTSC * 2;
fh->channel = cur_channel;
-
/* configure channel to default state */
if (!dev->chn_configured[cur_channel]) {
s2255_set_mode(dev, cur_channel, &fh->mode);
dev->chn_configured[cur_channel] = 1;
}
-
-
- /* Put all controls at a sane state */
- for (i = 0; i < ARRAY_SIZE(s2255_qctrl); i++)
- qctl_regs[i] = s2255_qctrl[i].default_value;
-
- dprintk(1, "s2255drv: open dev=%s type=%s users=%d\n",
- video_device_node_name(vdev), v4l2_type_names[type],
- dev->users[cur_channel]);
- dprintk(2, "s2255drv: open: fh=0x%08lx, dev=0x%08lx, vidq=0x%08lx\n",
+ dprintk(1, "%s: dev=%s type=%s\n", __func__,
+ video_device_node_name(vdev), v4l2_type_names[type]);
+ dprintk(2, "%s: fh=0x%08lx, dev=0x%08lx, vidq=0x%08lx\n", __func__,
(unsigned long)fh, (unsigned long)dev,
(unsigned long)&dev->vidq[cur_channel]);
- dprintk(4, "s2255drv: open: list_empty active=%d\n",
+ dprintk(4, "%s: list_empty active=%d\n", __func__,
list_empty(&dev->vidq[cur_channel].active));
-
videobuf_queue_vmalloc_init(&fh->vb_vidq, &s2255_video_qops,
NULL, &dev->slock,
fh->type,
V4L2_FIELD_INTERLACED,
sizeof(struct s2255_buffer), fh);
-
- mutex_unlock(&dev->open_lock);
- unlock_kernel();
return 0;
}
@@ -1679,39 +1839,19 @@ static unsigned int s2255_poll(struct file *file,
struct s2255_fh *fh = file->private_data;
int rc;
dprintk(100, "%s\n", __func__);
-
if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type)
return POLLERR;
-
rc = videobuf_poll_stream(file, &fh->vb_vidq, wait);
return rc;
}
-static void s2255_destroy(struct kref *kref)
+static void s2255_destroy(struct s2255_dev *dev)
{
- struct s2255_dev *dev = to_s2255_dev(kref);
- int i;
- if (!dev) {
- printk(KERN_ERR "s2255drv: kref problem\n");
- return;
- }
- atomic_set(&dev->fw_data->fw_state, S2255_FW_DISCONNECTING);
- wake_up(&dev->fw_data->wait_fw);
- for (i = 0; i < MAX_CHANNELS; i++) {
- dev->setmode_ready[i] = 1;
- wake_up(&dev->wait_setmode[i]);
- }
- mutex_lock(&dev->open_lock);
- /* reset the DSP so firmware can be reload next time */
- s2255_reset_dsppower(dev);
- s2255_exit_v4l(dev);
/* board shutdown stops the read pipe if it is running */
s2255_board_shutdown(dev);
/* make sure firmware still not trying to load */
del_timer(&dev->timer); /* only started in .probe and .open */
-
if (dev->fw_data->fw_urb) {
- dprintk(2, "kill fw_urb\n");
usb_kill_urb(dev->fw_data->fw_urb);
usb_free_urb(dev->fw_data->fw_urb);
dev->fw_data->fw_urb = NULL;
@@ -1720,24 +1860,22 @@ static void s2255_destroy(struct kref *kref)
release_firmware(dev->fw_data->fw);
kfree(dev->fw_data->pfw_data);
kfree(dev->fw_data);
+ /* reset the DSP so firmware can be reloaded next time */
+ s2255_reset_dsppower(dev);
+ mutex_destroy(&dev->open_lock);
+ mutex_destroy(&dev->lock);
usb_put_dev(dev->udev);
dprintk(1, "%s", __func__);
-
- mutex_unlock(&dev->open_lock);
kfree(dev);
}
-static int s2255_close(struct file *file)
+static int s2255_release(struct file *file)
{
struct s2255_fh *fh = file->private_data;
struct s2255_dev *dev = fh->dev;
struct video_device *vdev = video_devdata(file);
-
if (!dev)
return -ENODEV;
-
- mutex_lock(&dev->open_lock);
-
/* turn off stream */
if (res_check(fh)) {
if (dev->b_acquire[fh->channel])
@@ -1745,15 +1883,8 @@ static int s2255_close(struct file *file)
videobuf_streamoff(&fh->vb_vidq);
res_free(dev, fh);
}
-
videobuf_mmap_free(&fh->vb_vidq);
- dev->users[fh->channel]--;
-
- mutex_unlock(&dev->open_lock);
-
- kref_put(&dev->kref, s2255_destroy);
- dprintk(1, "s2255: close called (dev=%s, users=%d)\n",
- video_device_node_name(vdev), dev->users[fh->channel]);
+ dprintk(1, "%s (dev=%s)\n", __func__, video_device_node_name(vdev));
kfree(fh);
return 0;
}
@@ -1765,27 +1896,25 @@ static int s2255_mmap_v4l(struct file *file, struct vm_area_struct *vma)
if (!fh)
return -ENODEV;
- dprintk(4, "mmap called, vma=0x%08lx\n", (unsigned long)vma);
-
+ dprintk(4, "%s, vma=0x%08lx\n", __func__, (unsigned long)vma);
ret = videobuf_mmap_mapper(&fh->vb_vidq, vma);
-
- dprintk(4, "vma start=0x%08lx, size=%ld, ret=%d\n",
+ dprintk(4, "%s vma start=0x%08lx, size=%ld, ret=%d\n", __func__,
(unsigned long)vma->vm_start,
(unsigned long)vma->vm_end - (unsigned long)vma->vm_start, ret);
-
return ret;
}
static const struct v4l2_file_operations s2255_fops_v4l = {
.owner = THIS_MODULE,
.open = s2255_open,
- .release = s2255_close,
+ .release = s2255_release,
.poll = s2255_poll,
.ioctl = video_ioctl2, /* V4L2 ioctl handler */
.mmap = s2255_mmap_v4l,
};
static const struct v4l2_ioctl_ops s2255_ioctl_ops = {
+ .vidioc_querymenu = vidioc_querymenu,
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
@@ -1811,13 +1940,23 @@ static const struct v4l2_ioctl_ops s2255_ioctl_ops = {
.vidioc_g_jpegcomp = vidioc_g_jpegcomp,
.vidioc_s_parm = vidioc_s_parm,
.vidioc_g_parm = vidioc_g_parm,
+ .vidioc_enum_frameintervals = vidioc_enum_frameintervals,
};
+static void s2255_video_device_release(struct video_device *vdev)
+{
+ struct s2255_dev *dev = video_get_drvdata(vdev);
+ dprintk(4, "%s, chnls: %d \n", __func__, atomic_read(&dev->channels));
+ if (atomic_dec_and_test(&dev->channels))
+ s2255_destroy(dev);
+ return;
+}
+
static struct video_device template = {
.name = "s2255v",
.fops = &s2255_fops_v4l,
.ioctl_ops = &s2255_ioctl_ops,
- .release = video_device_release,
+ .release = s2255_video_device_release,
.tvnorms = S2255_NORMS,
.current_norm = V4L2_STD_NTSC_M,
};
@@ -1827,7 +1966,9 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
int ret;
int i;
int cur_nr = video_nr;
-
+ ret = v4l2_device_register(&dev->interface->dev, &dev->v4l2_dev);
+ if (ret)
+ return ret;
/* initialize all video 4 linux */
/* register 4 video devices */
for (i = 0; i < MAX_CHANNELS; i++) {
@@ -1835,45 +1976,39 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
dev->vidq[i].dev = dev;
dev->vidq[i].channel = i;
/* register 4 video devices */
- dev->vdev[i] = video_device_alloc();
- memcpy(dev->vdev[i], &template, sizeof(struct video_device));
- dev->vdev[i]->parent = &dev->interface->dev;
- video_set_drvdata(dev->vdev[i], dev);
+ memcpy(&dev->vdev[i], &template, sizeof(struct video_device));
+ dev->vdev[i].v4l2_dev = &dev->v4l2_dev;
+ video_set_drvdata(&dev->vdev[i], dev);
if (video_nr == -1)
- ret = video_register_device(dev->vdev[i],
+ ret = video_register_device(&dev->vdev[i],
VFL_TYPE_GRABBER,
video_nr);
else
- ret = video_register_device(dev->vdev[i],
+ ret = video_register_device(&dev->vdev[i],
VFL_TYPE_GRABBER,
cur_nr + i);
- video_set_drvdata(dev->vdev[i], dev);
-
- if (ret != 0) {
+ if (ret) {
dev_err(&dev->udev->dev,
"failed to register video device!\n");
- return ret;
+ break;
}
+ atomic_inc(&dev->channels);
+ v4l2_info(&dev->v4l2_dev, "V4L2 device registered as %s\n",
+ video_device_node_name(&dev->vdev[i]));
+
}
+
printk(KERN_INFO "Sensoray 2255 V4L driver Revision: %d.%d\n",
S2255_MAJOR_VERSION,
S2255_MINOR_VERSION);
- return ret;
-}
-
-static void s2255_exit_v4l(struct s2255_dev *dev)
-{
-
- int i;
- for (i = 0; i < MAX_CHANNELS; i++) {
- if (video_is_registered(dev->vdev[i])) {
- video_unregister_device(dev->vdev[i]);
- printk(KERN_INFO "s2255 unregistered\n");
- } else {
- video_device_release(dev->vdev[i]);
- printk(KERN_INFO "s2255 released\n");
- }
+ /* if no channels registered, return error and probe will fail*/
+ if (atomic_read(&dev->channels) == 0) {
+ v4l2_device_unregister(&dev->v4l2_dev);
+ return ret;
}
+ if (atomic_read(&dev->channels) != MAX_CHANNELS)
+ printk(KERN_WARNING "s2255: Not all channels available.\n");
+ return 0;
}
/* this function moves the usb stream read pipe data
@@ -1907,14 +2042,14 @@ static int save_frame(struct s2255_dev *dev, struct s2255_pipeinfo *pipe_info)
if (frm->ulState == S2255_READ_IDLE) {
int jj;
unsigned int cc;
- s32 *pdword;
+ __le32 *pdword; /*data from dsp is little endian */
int payload;
/* search for marker codes */
pdata = (unsigned char *)pipe_info->transfer_buffer;
+ pdword = (__le32 *)pdata;
for (jj = 0; jj < (pipe_info->cur_transfer_size - 12); jj++) {
- switch (*(s32 *) pdata) {
+ switch (*pdword) {
case S2255_MARKER_FRAME:
- pdword = (s32 *)pdata;
dprintk(4, "found frame marker at offset:"
" %d [%x %x]\n", jj, pdata[0],
pdata[1]);
@@ -1938,7 +2073,6 @@ static int save_frame(struct s2255_dev *dev, struct s2255_pipeinfo *pipe_info)
dev->jpg_size[dev->cc] = pdword[4];
break;
case S2255_MARKER_RESPONSE:
- pdword = (s32 *)pdata;
pdata += DEF_USB_BLOCK;
jj += DEF_USB_BLOCK;
if (pdword[1] >= MAX_CHANNELS)
@@ -1955,7 +2089,6 @@ static int save_frame(struct s2255_dev *dev, struct s2255_pipeinfo *pipe_info)
dprintk(5, "setmode ready %d\n", cc);
break;
case S2255_RESPONSE_FW:
-
dev->chn_ready |= (1 << cc);
if ((dev->chn_ready & 0x0f) != 0x0f)
break;
@@ -1965,6 +2098,13 @@ static int save_frame(struct s2255_dev *dev, struct s2255_pipeinfo *pipe_info)
S2255_FW_SUCCESS);
wake_up(&dev->fw_data->wait_fw);
break;
+ case S2255_RESPONSE_STATUS:
+ dev->vidstatus[cc] = pdword[3];
+ dev->vidstatus_ready[cc] = 1;
+ wake_up(&dev->wait_vidstatus[cc]);
+ dprintk(5, "got vidstatus %x chan %d\n",
+ pdword[3], cc);
+ break;
default:
printk(KERN_INFO "s2255 unknown resp\n");
}
@@ -2165,28 +2305,22 @@ static int s2255_release_sys_buffers(struct s2255_dev *dev,
static int s2255_board_init(struct s2255_dev *dev)
{
- int j;
struct s2255_mode mode_def = DEF_MODEI_NTSC_CONT;
int fw_ver;
+ int j;
+ struct s2255_pipeinfo *pipe = &dev->pipe;
dprintk(4, "board init: %p", dev);
-
- for (j = 0; j < MAX_PIPE_BUFFERS; j++) {
- struct s2255_pipeinfo *pipe = &dev->pipes[j];
-
- memset(pipe, 0, sizeof(*pipe));
- pipe->dev = dev;
- pipe->cur_transfer_size = S2255_USB_XFER_SIZE;
- pipe->max_transfer_size = S2255_USB_XFER_SIZE;
-
- pipe->transfer_buffer = kzalloc(pipe->max_transfer_size,
- GFP_KERNEL);
- if (pipe->transfer_buffer == NULL) {
- dprintk(1, "out of memory!\n");
- return -ENOMEM;
- }
-
+ memset(pipe, 0, sizeof(*pipe));
+ pipe->dev = dev;
+ pipe->cur_transfer_size = S2255_USB_XFER_SIZE;
+ pipe->max_transfer_size = S2255_USB_XFER_SIZE;
+
+ pipe->transfer_buffer = kzalloc(pipe->max_transfer_size,
+ GFP_KERNEL);
+ if (pipe->transfer_buffer == NULL) {
+ dprintk(1, "out of memory!\n");
+ return -ENOMEM;
}
-
/* query the firmware */
fw_ver = s2255_get_fx2fw(dev);
@@ -2203,6 +2337,8 @@ static int s2255_board_init(struct s2255_dev *dev)
for (j = 0; j < MAX_CHANNELS; j++) {
dev->b_acquire[j] = 0;
dev->mode[j] = mode_def;
+ if (dev->pid == 0x2257 && j > 1)
+ dev->mode[j].color |= (1 << 16);
dev->jc[j].quality = S2255_DEF_JPEG_QUAL;
dev->cur_fmt[j] = &formats[0];
dev->mode[j].restart = 1;
@@ -2213,16 +2349,14 @@ static int s2255_board_init(struct s2255_dev *dev)
}
/* start read pipe */
s2255_start_readpipe(dev);
-
- dprintk(1, "S2255: board initialized\n");
+ dprintk(1, "%s: success\n", __func__);
return 0;
}
static int s2255_board_shutdown(struct s2255_dev *dev)
{
u32 i;
-
- dprintk(1, "S2255: board shutdown: %p", dev);
+ dprintk(1, "%s: dev: %p", __func__, dev);
for (i = 0; i < MAX_CHANNELS; i++) {
if (dev->b_acquire[i])
@@ -2233,12 +2367,8 @@ static int s2255_board_shutdown(struct s2255_dev *dev)
for (i = 0; i < MAX_CHANNELS; i++)
s2255_release_sys_buffers(dev, i);
-
- /* release transfer buffers */
- for (i = 0; i < MAX_PIPE_BUFFERS; i++) {
- struct s2255_pipeinfo *pipe = &dev->pipes[i];
- kfree(pipe->transfer_buffer);
- }
+ /* release transfer buffer */
+ kfree(dev->pipe.transfer_buffer);
return 0;
}
@@ -2248,9 +2378,8 @@ static void read_pipe_completion(struct urb *purb)
struct s2255_dev *dev;
int status;
int pipe;
-
pipe_info = purb->context;
- dprintk(100, "read pipe completion %p, status %d\n", purb,
+ dprintk(100, "%s: urb:%p, status %d\n", __func__, purb,
purb->status);
if (pipe_info == NULL) {
dev_err(&purb->dev->dev, "no context!\n");
@@ -2265,13 +2394,13 @@ static void read_pipe_completion(struct urb *purb)
status = purb->status;
/* if shutting down, do not resubmit, exit immediately */
if (status == -ESHUTDOWN) {
- dprintk(2, "read_pipe_completion: err shutdown\n");
+ dprintk(2, "%s: err shutdown\n", __func__);
pipe_info->err_count++;
return;
}
if (pipe_info->state == 0) {
- dprintk(2, "exiting USB pipe");
+ dprintk(2, "%s: exiting USB pipe", __func__);
return;
}
@@ -2279,7 +2408,7 @@ static void read_pipe_completion(struct urb *purb)
s2255_read_video_callback(dev, pipe_info);
else {
pipe_info->err_count++;
- dprintk(1, "s2255drv: failed URB %d\n", status);
+ dprintk(1, "%s: failed URB %d\n", __func__, status);
}
pipe = usb_rcvbulkpipe(dev->udev, dev->read_endpoint);
@@ -2295,7 +2424,7 @@ static void read_pipe_completion(struct urb *purb)
dev_err(&dev->udev->dev, "error submitting urb\n");
}
} else {
- dprintk(2, "read pipe complete state 0\n");
+ dprintk(2, "%s :complete state 0\n", __func__);
}
return;
}
@@ -2304,35 +2433,28 @@ static int s2255_start_readpipe(struct s2255_dev *dev)
{
int pipe;
int retval;
- int i;
- struct s2255_pipeinfo *pipe_info = dev->pipes;
+ struct s2255_pipeinfo *pipe_info = &dev->pipe;
pipe = usb_rcvbulkpipe(dev->udev, dev->read_endpoint);
- dprintk(2, "start pipe IN %d\n", dev->read_endpoint);
-
- for (i = 0; i < MAX_PIPE_BUFFERS; i++) {
- pipe_info->state = 1;
- pipe_info->err_count = 0;
- pipe_info->stream_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!pipe_info->stream_urb) {
- dev_err(&dev->udev->dev,
- "ReadStream: Unable to alloc URB\n");
- return -ENOMEM;
- }
- /* transfer buffer allocated in board_init */
- usb_fill_bulk_urb(pipe_info->stream_urb, dev->udev,
- pipe,
- pipe_info->transfer_buffer,
- pipe_info->cur_transfer_size,
- read_pipe_completion, pipe_info);
-
- dprintk(4, "submitting URB %p\n", pipe_info->stream_urb);
- retval = usb_submit_urb(pipe_info->stream_urb, GFP_KERNEL);
- if (retval) {
- printk(KERN_ERR "s2255: start read pipe failed\n");
- return retval;
- }
+ dprintk(2, "%s: IN %d\n", __func__, dev->read_endpoint);
+ pipe_info->state = 1;
+ pipe_info->err_count = 0;
+ pipe_info->stream_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!pipe_info->stream_urb) {
+ dev_err(&dev->udev->dev,
+ "ReadStream: Unable to alloc URB\n");
+ return -ENOMEM;
+ }
+ /* transfer buffer allocated in board_init */
+ usb_fill_bulk_urb(pipe_info->stream_urb, dev->udev,
+ pipe,
+ pipe_info->transfer_buffer,
+ pipe_info->cur_transfer_size,
+ read_pipe_completion, pipe_info);
+ retval = usb_submit_urb(pipe_info->stream_urb, GFP_KERNEL);
+ if (retval) {
+ printk(KERN_ERR "s2255: start read pipe failed\n");
+ return retval;
}
-
return 0;
}
@@ -2347,10 +2469,7 @@ static int s2255_start_acquire(struct s2255_dev *dev, unsigned long chn)
dprintk(2, "start acquire failed, bad channel %lu\n", chn);
return -1;
}
-
chn_rev = G_chnmap[chn];
- dprintk(1, "S2255: start acquire %lu \n", chn);
-
buffer = kzalloc(512, GFP_KERNEL);
if (buffer == NULL) {
dev_err(&dev->udev->dev, "out of mem\n");
@@ -2366,9 +2485,9 @@ static int s2255_start_acquire(struct s2255_dev *dev, unsigned long chn)
}
/* send the start command */
- *(u32 *) buffer = IN_DATA_TOKEN;
- *((u32 *) buffer + 1) = (u32) chn_rev;
- *((u32 *) buffer + 2) = (u32) CMD_START;
+ *(__le32 *) buffer = IN_DATA_TOKEN;
+ *((__le32 *) buffer + 1) = (__le32) cpu_to_le32(chn_rev);
+ *((__le32 *) buffer + 2) = CMD_START;
res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512);
if (res != 0)
dev_err(&dev->udev->dev, "CMD_START error\n");
@@ -2383,65 +2502,41 @@ static int s2255_stop_acquire(struct s2255_dev *dev, unsigned long chn)
unsigned char *buffer;
int res;
unsigned long chn_rev;
-
if (chn >= MAX_CHANNELS) {
dprintk(2, "stop acquire failed, bad channel %lu\n", chn);
return -1;
}
chn_rev = G_chnmap[chn];
-
buffer = kzalloc(512, GFP_KERNEL);
if (buffer == NULL) {
dev_err(&dev->udev->dev, "out of mem\n");
return -ENOMEM;
}
-
/* send the stop command */
- dprintk(4, "stop acquire %lu\n", chn);
- *(u32 *) buffer = IN_DATA_TOKEN;
- *((u32 *) buffer + 1) = (u32) chn_rev;
- *((u32 *) buffer + 2) = CMD_STOP;
+ *(__le32 *) buffer = IN_DATA_TOKEN;
+ *((__le32 *) buffer + 1) = (__le32) cpu_to_le32(chn_rev);
+ *((__le32 *) buffer + 2) = CMD_STOP;
res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512);
-
if (res != 0)
dev_err(&dev->udev->dev, "CMD_STOP error\n");
-
- dprintk(4, "stop acquire: releasing states \n");
-
kfree(buffer);
dev->b_acquire[chn] = 0;
-
+ dprintk(4, "%s: chn %lu, res %d\n", __func__, chn, res);
return res;
}
static void s2255_stop_readpipe(struct s2255_dev *dev)
{
- int j;
-
- if (dev == NULL) {
- s2255_dev_err(&dev->udev->dev, "invalid device\n");
- return;
- }
- dprintk(4, "stop read pipe\n");
- for (j = 0; j < MAX_PIPE_BUFFERS; j++) {
- struct s2255_pipeinfo *pipe_info = &dev->pipes[j];
- if (pipe_info) {
- if (pipe_info->state == 0)
- continue;
- pipe_info->state = 0;
- }
- }
+ struct s2255_pipeinfo *pipe = &dev->pipe;
- for (j = 0; j < MAX_PIPE_BUFFERS; j++) {
- struct s2255_pipeinfo *pipe_info = &dev->pipes[j];
- if (pipe_info->stream_urb) {
- /* cancel urb */
- usb_kill_urb(pipe_info->stream_urb);
- usb_free_urb(pipe_info->stream_urb);
- pipe_info->stream_urb = NULL;
- }
+ pipe->state = 0;
+ if (pipe->stream_urb) {
+ /* cancel urb */
+ usb_kill_urb(pipe->stream_urb);
+ usb_free_urb(pipe->stream_urb);
+ pipe->stream_urb = NULL;
}
- dprintk(2, "s2255 stop read pipe: %d\n", j);
+ dprintk(4, "%s", __func__);
return;
}
@@ -2473,32 +2568,28 @@ static int s2255_probe(struct usb_interface *interface,
int retval = -ENOMEM;
__le32 *pdata;
int fw_size;
-
- dprintk(2, "s2255: probe\n");
-
+ dprintk(2, "%s\n", __func__);
/* allocate memory for our device state and initialize it to zero */
dev = kzalloc(sizeof(struct s2255_dev), GFP_KERNEL);
if (dev == NULL) {
s2255_dev_err(&interface->dev, "out of memory\n");
- goto error;
+ return -ENOMEM;
}
-
+ atomic_set(&dev->channels, 0);
+ dev->pid = id->idProduct;
dev->fw_data = kzalloc(sizeof(struct s2255_fw), GFP_KERNEL);
if (!dev->fw_data)
- goto error;
-
+ goto errorFWDATA1;
mutex_init(&dev->lock);
mutex_init(&dev->open_lock);
-
/* grab usb_device and save it */
dev->udev = usb_get_dev(interface_to_usbdev(interface));
if (dev->udev == NULL) {
dev_err(&interface->dev, "null usb device\n");
retval = -ENODEV;
- goto error;
+ goto errorUDEV;
}
- kref_init(&dev->kref);
- dprintk(1, "dev: %p, kref: %p udev %p interface %p\n", dev, &dev->kref,
+ dprintk(1, "dev: %p, udev %p interface %p\n", dev,
dev->udev, interface);
dev->interface = interface;
/* set up the endpoint information */
@@ -2514,39 +2605,33 @@ static int s2255_probe(struct usb_interface *interface,
if (!dev->read_endpoint) {
dev_err(&interface->dev, "Could not find bulk-in endpoint\n");
- goto error;
+ goto errorEP;
}
-
- /* set intfdata */
- usb_set_intfdata(interface, dev);
-
- dprintk(100, "after intfdata %p\n", dev);
-
init_timer(&dev->timer);
dev->timer.function = s2255_timer;
dev->timer.data = (unsigned long)dev->fw_data;
-
init_waitqueue_head(&dev->fw_data->wait_fw);
- for (i = 0; i < MAX_CHANNELS; i++)
+ for (i = 0; i < MAX_CHANNELS; i++) {
init_waitqueue_head(&dev->wait_setmode[i]);
-
+ init_waitqueue_head(&dev->wait_vidstatus[i]);
+ }
dev->fw_data->fw_urb = usb_alloc_urb(0, GFP_KERNEL);
-
if (!dev->fw_data->fw_urb) {
dev_err(&interface->dev, "out of memory!\n");
- goto error;
+ goto errorFWURB;
}
+
dev->fw_data->pfw_data = kzalloc(CHUNK_SIZE, GFP_KERNEL);
if (!dev->fw_data->pfw_data) {
dev_err(&interface->dev, "out of memory!\n");
- goto error;
+ goto errorFWDATA2;
}
/* load the first chunk */
if (request_firmware(&dev->fw_data->fw,
FIRMWARE_FILE_NAME, &dev->udev->dev)) {
printk(KERN_ERR "sensoray 2255 failed to get firmware\n");
- goto error;
+ goto errorREQFW;
}
/* check the firmware is valid */
fw_size = dev->fw_data->fw->size;
@@ -2555,59 +2640,80 @@ static int s2255_probe(struct usb_interface *interface,
if (*pdata != S2255_FW_MARKER) {
printk(KERN_INFO "Firmware invalid.\n");
retval = -ENODEV;
- goto error;
+ goto errorFWMARKER;
} else {
/* make sure firmware is the latest */
__le32 *pRel;
pRel = (__le32 *) &dev->fw_data->fw->data[fw_size - 4];
printk(KERN_INFO "s2255 dsp fw version %x\n", *pRel);
+ dev->dsp_fw_ver = *pRel;
+ if (*pRel < S2255_CUR_DSP_FWVER)
+ printk(KERN_INFO "s2255: f2255usb.bin out of date.\n");
+ if (dev->pid == 0x2257 && *pRel < S2255_MIN_DSP_COLORFILTER)
+ printk(KERN_WARNING "s2255: 2257 requires firmware %d"
+ "or above.\n", S2255_MIN_DSP_COLORFILTER);
}
- /* loads v4l specific */
- s2255_probe_v4l(dev);
usb_reset_device(dev->udev);
/* load 2255 board specific */
retval = s2255_board_init(dev);
if (retval)
- goto error;
-
- dprintk(4, "before probe done %p\n", dev);
+ goto errorBOARDINIT;
spin_lock_init(&dev->slock);
-
s2255_fwload_start(dev, 0);
+ /* loads v4l specific */
+ retval = s2255_probe_v4l(dev);
+ if (retval)
+ goto errorBOARDINIT;
dev_info(&interface->dev, "Sensoray 2255 detected\n");
return 0;
-error:
+errorBOARDINIT:
+ s2255_board_shutdown(dev);
+errorFWMARKER:
+ release_firmware(dev->fw_data->fw);
+errorREQFW:
+ kfree(dev->fw_data->pfw_data);
+errorFWDATA2:
+ usb_free_urb(dev->fw_data->fw_urb);
+errorFWURB:
+ del_timer(&dev->timer);
+errorEP:
+ usb_put_dev(dev->udev);
+errorUDEV:
+ kfree(dev->fw_data);
+ mutex_destroy(&dev->open_lock);
+ mutex_destroy(&dev->lock);
+errorFWDATA1:
+ kfree(dev);
+ printk(KERN_WARNING "Sensoray 2255 driver load failed: 0x%x\n", retval);
return retval;
}
/* disconnect routine. when board is removed physically or with rmmod */
static void s2255_disconnect(struct usb_interface *interface)
{
- struct s2255_dev *dev = NULL;
+ struct s2255_dev *dev = to_s2255_dev(usb_get_intfdata(interface));
int i;
- dprintk(1, "s2255: disconnect interface %p\n", interface);
- dev = usb_get_intfdata(interface);
-
- /*
- * wake up any of the timers to allow open_lock to be
- * acquired sooner
- */
+ int channels = atomic_read(&dev->channels);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ /*see comments in the uvc_driver.c usb disconnect function */
+ atomic_inc(&dev->channels);
+ /* unregister each video device. */
+ for (i = 0; i < channels; i++) {
+ if (video_is_registered(&dev->vdev[i]))
+ video_unregister_device(&dev->vdev[i]);
+ }
+ /* wake up any of our timers */
atomic_set(&dev->fw_data->fw_state, S2255_FW_DISCONNECTING);
wake_up(&dev->fw_data->wait_fw);
for (i = 0; i < MAX_CHANNELS; i++) {
dev->setmode_ready[i] = 1;
wake_up(&dev->wait_setmode[i]);
+ dev->vidstatus_ready[i] = 1;
+ wake_up(&dev->wait_vidstatus[i]);
}
-
- mutex_lock(&dev->open_lock);
- usb_set_intfdata(interface, NULL);
- mutex_unlock(&dev->open_lock);
-
- if (dev) {
- kref_put(&dev->kref, s2255_destroy);
- dprintk(1, "s2255drv: disconnect\n");
- dev_info(&interface->dev, "s2255usb now disconnected\n");
- }
+ if (atomic_dec_and_test(&dev->channels))
+ s2255_destroy(dev);
+ dev_info(&interface->dev, "%s\n", __func__);
}
static struct usb_driver s2255_driver = {
@@ -2620,15 +2726,12 @@ static struct usb_driver s2255_driver = {
static int __init usb_s2255_init(void)
{
int result;
-
/* register this driver with the USB subsystem */
result = usb_register(&s2255_driver);
-
if (result)
pr_err(KBUILD_MODNAME
- ": usb_register failed. Error number %d\n", result);
-
- dprintk(2, "s2255_init: done\n");
+ ": usb_register failed. Error number %d\n", result);
+ dprintk(2, "%s\n", __func__);
return result;
}
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index c0a7f8a369f..53b6fcde380 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -74,6 +74,7 @@ struct saa711x_state {
int contrast;
int hue;
int sat;
+ int chroma_agc;
int width;
int height;
u32 ident;
@@ -592,7 +593,7 @@ static const unsigned char saa7115_init_misc[] = {
R_5D_DID, 0xbd,
R_5E_SDID, 0x35,
- R_02_INPUT_CNTL_1, 0x84, /* input tuner -> input 4, amplifier active */
+ R_02_INPUT_CNTL_1, 0xc4, /* input tuner -> input 4, amplifier active */
R_80_GLOBAL_CNTL_1, 0x20, /* enable task B */
R_88_POWER_SAVE_ADC_PORT_CNTL, 0xd0,
@@ -743,6 +744,7 @@ static int saa711x_s_clock_freq(struct v4l2_subdev *sd, u32 freq)
static int saa711x_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
struct saa711x_state *state = to_state(sd);
+ u8 val;
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
@@ -784,7 +786,21 @@ static int saa711x_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
state->hue = ctrl->value;
saa711x_write(sd, R_0D_CHROMA_HUE_CNTL, state->hue);
break;
-
+ case V4L2_CID_CHROMA_AGC:
+ val = saa711x_read(sd, R_0F_CHROMA_GAIN_CNTL);
+ state->chroma_agc = ctrl->value;
+ if (ctrl->value)
+ val &= 0x7f;
+ else
+ val |= 0x80;
+ saa711x_write(sd, R_0F_CHROMA_GAIN_CNTL, val);
+ break;
+ case V4L2_CID_CHROMA_GAIN:
+ /* Chroma gain cannot be set when AGC is enabled */
+ if (state->chroma_agc == 1)
+ return -EINVAL;
+ saa711x_write(sd, R_0F_CHROMA_GAIN_CNTL, ctrl->value | 0x80);
+ break;
default:
return -EINVAL;
}
@@ -809,6 +825,12 @@ static int saa711x_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
case V4L2_CID_HUE:
ctrl->value = state->hue;
break;
+ case V4L2_CID_CHROMA_AGC:
+ ctrl->value = state->chroma_agc;
+ break;
+ case V4L2_CID_CHROMA_GAIN:
+ ctrl->value = saa711x_read(sd, R_0F_CHROMA_GAIN_CNTL) & 0x7f;
+ break;
default:
return -EINVAL;
}
@@ -1069,7 +1091,7 @@ static void saa711x_set_lcr(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_forma
saa7115_cfg_vbi_off);
}
-static int saa711x_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+static int saa711x_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *sliced)
{
static u16 lcr2vbi[] = {
0, V4L2_SLICED_TELETEXT_B, 0, /* 1 */
@@ -1078,11 +1100,8 @@ static int saa711x_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
V4L2_SLICED_VPS, 0, 0, 0, 0, /* 7 */
0, 0, 0, 0
};
- struct v4l2_sliced_vbi_format *sliced = &fmt->fmt.sliced;
int i;
- if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
- return -EINVAL;
memset(sliced, 0, sizeof(*sliced));
/* done if using raw VBI */
if (saa711x_read(sd, R_80_GLOBAL_CNTL_1) & 0x10)
@@ -1098,16 +1117,27 @@ static int saa711x_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
return 0;
}
+static int saa711x_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+{
+ if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
+ return -EINVAL;
+ return saa711x_g_sliced_fmt(sd, &fmt->fmt.sliced);
+}
+
+static int saa711x_s_raw_fmt(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt)
+{
+ saa711x_set_lcr(sd, NULL);
+ return 0;
+}
+
+static int saa711x_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt)
+{
+ saa711x_set_lcr(sd, fmt);
+ return 0;
+}
+
static int saa711x_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
{
- if (fmt->type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE) {
- saa711x_set_lcr(sd, &fmt->fmt.sliced);
- return 0;
- }
- if (fmt->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- saa711x_set_lcr(sd, NULL);
- return 0;
- }
if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -1209,6 +1239,10 @@ static int saa711x_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
return v4l2_ctrl_query_fill(qc, 0, 127, 1, 64);
case V4L2_CID_HUE:
return v4l2_ctrl_query_fill(qc, -128, 127, 1, 0);
+ case V4L2_CID_CHROMA_AGC:
+ return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
+ case V4L2_CID_CHROMA_GAIN:
+ return v4l2_ctrl_query_fill(qc, 0, 127, 1, 48);
default:
return -EINVAL;
}
@@ -1524,18 +1558,25 @@ static const struct v4l2_subdev_video_ops saa711x_video_ops = {
.s_crystal_freq = saa711x_s_crystal_freq,
.g_fmt = saa711x_g_fmt,
.s_fmt = saa711x_s_fmt,
- .g_vbi_data = saa711x_g_vbi_data,
- .decode_vbi_line = saa711x_decode_vbi_line,
.s_stream = saa711x_s_stream,
.querystd = saa711x_querystd,
.g_input_status = saa711x_g_input_status,
};
+static const struct v4l2_subdev_vbi_ops saa711x_vbi_ops = {
+ .g_vbi_data = saa711x_g_vbi_data,
+ .decode_vbi_line = saa711x_decode_vbi_line,
+ .g_sliced_fmt = saa711x_g_sliced_fmt,
+ .s_sliced_fmt = saa711x_s_sliced_fmt,
+ .s_raw_fmt = saa711x_s_raw_fmt,
+};
+
static const struct v4l2_subdev_ops saa711x_ops = {
.core = &saa711x_core_ops,
.tuner = &saa711x_tuner_ops,
.audio = &saa711x_audio_ops,
.video = &saa711x_video_ops,
+ .vbi = &saa711x_vbi_ops,
};
/* ----------------------------------------------------------------------- */
@@ -1593,6 +1634,7 @@ static int saa711x_probe(struct i2c_client *client,
state->contrast = 64;
state->hue = 0;
state->sat = 64;
+ state->chroma_agc = 1;
switch (chip_id) {
case '1':
state->ident = V4L2_IDENT_SAA7111;
diff --git a/drivers/media/video/saa7127.c b/drivers/media/video/saa7127.c
index 250ef84cf5c..87986ad62f8 100644
--- a/drivers/media/video/saa7127.c
+++ b/drivers/media/video/saa7127.c
@@ -625,29 +625,33 @@ static int saa7127_s_stream(struct v4l2_subdev *sd, int enable)
return saa7127_set_video_enable(sd, enable);
}
-static int saa7127_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+static int saa7127_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt)
{
struct saa7127_state *state = to_state(sd);
- if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
- return -EINVAL;
-
- memset(&fmt->fmt.sliced, 0, sizeof(fmt->fmt.sliced));
+ memset(fmt, 0, sizeof(*fmt));
if (state->vps_enable)
- fmt->fmt.sliced.service_lines[0][16] = V4L2_SLICED_VPS;
+ fmt->service_lines[0][16] = V4L2_SLICED_VPS;
if (state->wss_enable)
- fmt->fmt.sliced.service_lines[0][23] = V4L2_SLICED_WSS_625;
+ fmt->service_lines[0][23] = V4L2_SLICED_WSS_625;
if (state->cc_enable) {
- fmt->fmt.sliced.service_lines[0][21] = V4L2_SLICED_CAPTION_525;
- fmt->fmt.sliced.service_lines[1][21] = V4L2_SLICED_CAPTION_525;
+ fmt->service_lines[0][21] = V4L2_SLICED_CAPTION_525;
+ fmt->service_lines[1][21] = V4L2_SLICED_CAPTION_525;
}
- fmt->fmt.sliced.service_set =
+ fmt->service_set =
(state->vps_enable ? V4L2_SLICED_VPS : 0) |
(state->wss_enable ? V4L2_SLICED_WSS_625 : 0) |
(state->cc_enable ? V4L2_SLICED_CAPTION_525 : 0);
return 0;
}
+static int saa7127_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+{
+ if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
+ return -EINVAL;
+ return saa7127_g_sliced_fmt(sd, &fmt->fmt.sliced);
+}
+
static int saa7127_s_vbi_data(struct v4l2_subdev *sd, const struct v4l2_sliced_vbi_data *data)
{
switch (data->id) {
@@ -727,16 +731,21 @@ static const struct v4l2_subdev_core_ops saa7127_core_ops = {
};
static const struct v4l2_subdev_video_ops saa7127_video_ops = {
- .s_vbi_data = saa7127_s_vbi_data,
.g_fmt = saa7127_g_fmt,
.s_std_output = saa7127_s_std_output,
.s_routing = saa7127_s_routing,
.s_stream = saa7127_s_stream,
};
+static const struct v4l2_subdev_vbi_ops saa7127_vbi_ops = {
+ .s_vbi_data = saa7127_s_vbi_data,
+ .g_sliced_fmt = saa7127_g_sliced_fmt,
+};
+
static const struct v4l2_subdev_ops saa7127_ops = {
.core = &saa7127_core_ops,
.video = &saa7127_video_ops,
+ .vbi = &saa7127_vbi_ops,
};
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/video/saa7134/saa7134-alsa.c b/drivers/media/video/saa7134/saa7134-alsa.c
index d48c450ed77..d3bd82ad010 100644
--- a/drivers/media/video/saa7134/saa7134-alsa.c
+++ b/drivers/media/video/saa7134/saa7134-alsa.c
@@ -1011,8 +1011,6 @@ static int snd_card_saa7134_new_mixer(snd_card_saa7134_t * chip)
unsigned int idx;
int err, addr;
- if (snd_BUG_ON(!chip))
- return -EINVAL;
strcpy(card->mixername, "SAA7134 Mixer");
for (idx = 0; idx < ARRAY_SIZE(snd_saa7134_volume_controls); idx++) {
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index 297833fb3b4..72700d4e394 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -5355,6 +5355,79 @@ struct saa7134_board saa7134_boards[] = {
.amux = LINE2,
},
},
+ [SAA7134_BOARD_HAWELL_HW_404M7] = {
+ /* Hawell HW-404M7 & Hawell HW-808M7 */
+ /* Bogoslovskiy Viktor <bogovic@bk.ru> */
+ .name = "Hawell HW-404M7",
+ .audio_clock = 0x00200000,
+ .tuner_type = UNSET,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .gpiomask = 0x389c00,
+ .inputs = {{
+ .name = name_comp1,
+ .vmux = 3,
+ .amux = LINE1,
+ .gpio = 0x01fc00,
+ } },
+ },
+ [SAA7134_BOARD_BEHOLD_H7] = {
+ /* Beholder Intl. Ltd. Dmitry Belimov <d.belimov@gmail.com> */
+ .name = "Beholder BeholdTV H7",
+ .audio_clock = 0x00187de7,
+ .tuner_type = TUNER_XC5000,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .mpeg = SAA7134_MPEG_DVB,
+ .ts_type = SAA7134_MPEG_TS_PARALLEL,
+ .inputs = { {
+ .name = name_tv,
+ .vmux = 2,
+ .amux = TV,
+ .tv = 1,
+ }, {
+ .name = name_comp1,
+ .vmux = 0,
+ .amux = LINE1,
+ }, {
+ .name = name_svideo,
+ .vmux = 9,
+ .amux = LINE1,
+ } },
+ .radio = {
+ .name = name_radio,
+ .amux = TV,
+ },
+ },
+ [SAA7134_BOARD_BEHOLD_A7] = {
+ /* Beholder Intl. Ltd. Dmitry Belimov <d.belimov@gmail.com> */
+ .name = "Beholder BeholdTV A7",
+ .audio_clock = 0x00187de7,
+ .tuner_type = TUNER_XC5000,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .inputs = { {
+ .name = name_tv,
+ .vmux = 2,
+ .amux = TV,
+ .tv = 1,
+ }, {
+ .name = name_comp1,
+ .vmux = 0,
+ .amux = LINE1,
+ }, {
+ .name = name_svideo,
+ .vmux = 9,
+ .amux = LINE1,
+ } },
+ .radio = {
+ .name = name_radio,
+ .amux = TV,
+ },
+ },
};
@@ -6549,6 +6622,18 @@ struct pci_device_id saa7134_pci_tbl[] = {
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = SAA7134_BOARD_UNKNOWN,
+ }, {
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
+ .subvendor = 0x5ace, /* Beholder Intl. Ltd. */
+ .subdevice = 0x7190,
+ .driver_data = SAA7134_BOARD_BEHOLD_H7,
+ }, {
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
+ .subvendor = 0x5ace, /* Beholder Intl. Ltd. */
+ .subdevice = 0x7090,
+ .driver_data = SAA7134_BOARD_BEHOLD_A7,
},{
/* --- end of list --- */
}
@@ -6602,6 +6687,8 @@ static int saa7134_xc5000_callback(struct saa7134_dev *dev,
{
switch (dev->board) {
case SAA7134_BOARD_BEHOLD_X7:
+ case SAA7134_BOARD_BEHOLD_H7:
+ case SAA7134_BOARD_BEHOLD_A7:
if (command == XC5000_TUNER_RESET) {
/* Down and UP pheripherial RESET pin for reset all chips */
saa_writeb(SAA7134_SPECIAL_MODE, 0x00);
@@ -6973,6 +7060,8 @@ int saa7134_board_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_BEHOLD_M6_EXTRA:
case SAA7134_BOARD_BEHOLD_H6:
case SAA7134_BOARD_BEHOLD_X7:
+ case SAA7134_BOARD_BEHOLD_H7:
+ case SAA7134_BOARD_BEHOLD_A7:
dev->has_remote = SAA7134_REMOTE_I2C;
break;
case SAA7134_BOARD_AVERMEDIA_A169_B:
@@ -7215,6 +7304,11 @@ int saa7134_board_init2(struct saa7134_dev *dev)
printk(KERN_INFO "%s: P7131 analog only, using "
"entry of %s\n",
dev->name, saa7134_boards[dev->board].name);
+
+ /* IR init has already happened for other cards, so
+ * we have to catch up. */
+ dev->has_remote = SAA7134_REMOTE_GPIO;
+ saa7134_input_init1(dev);
}
break;
case SAA7134_BOARD_HAUPPAUGE_HVR1150:
@@ -7344,6 +7438,23 @@ int saa7134_board_init2(struct saa7134_dev *dev)
}
break;
}
+ case SAA7134_BOARD_BEHOLD_H6:
+ {
+ u8 data[] = { 0x09, 0x9f, 0x86, 0x11};
+ struct i2c_msg msg = {.addr = 0x61, .flags = 0, .buf = data,
+ .len = sizeof(data)};
+
+ /* The tuner TUNER_PHILIPS_FMD1216MEX_MK3 after hardware */
+ /* start has disabled IF and enabled DVB-T. When saa7134 */
+ /* scan I2C devices it not detect IF tda9887 and can`t */
+ /* watch TV without software reboot. For solve this problem */
+ /* switch the tuner to analog TV mode manually. */
+ if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1)
+ printk(KERN_WARNING
+ "%s: Unable to enable IF of the tuner.\n",
+ dev->name);
+ break;
+ }
} /* switch() */
/* initialize tuner */
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index a7ad7810fdd..90f23188129 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -471,7 +471,7 @@ static char *irqbits[] = {
"DONE_RA0", "DONE_RA1", "DONE_RA2", "DONE_RA3",
"AR", "PE", "PWR_ON", "RDCAP", "INTL", "FIDT", "MMC",
"TRIG_ERR", "CONF_ERR", "LOAD_ERR",
- "GPIO16?", "GPIO18", "GPIO22", "GPIO23"
+ "GPIO16", "GPIO18", "GPIO22", "GPIO23"
};
#define IRQBITS ARRAY_SIZE(irqbits)
@@ -601,12 +601,14 @@ static irqreturn_t saa7134_irq(int irq, void *dev_id)
/* disable gpio16 IRQ */
printk(KERN_WARNING "%s/irq: looping -- "
"clearing GPIO16 enable bit\n",dev->name);
- saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO16);
+ saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO16_P);
+ saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO16_N);
} else if (report & SAA7134_IRQ_REPORT_GPIO18) {
/* disable gpio18 IRQs */
printk(KERN_WARNING "%s/irq: looping -- "
"clearing GPIO18 enable bit\n",dev->name);
- saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18);
+ saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18_P);
+ saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18_N);
} else {
/* disable all irqs */
printk(KERN_WARNING "%s/irq: looping -- "
@@ -698,11 +700,13 @@ static int saa7134_hw_enable2(struct saa7134_dev *dev)
if (dev->has_remote == SAA7134_REMOTE_GPIO && dev->remote) {
if (dev->remote->mask_keydown & 0x10000)
- irq2_mask |= SAA7134_IRQ2_INTE_GPIO16;
- else if (dev->remote->mask_keydown & 0x40000)
- irq2_mask |= SAA7134_IRQ2_INTE_GPIO18;
- else if (dev->remote->mask_keyup & 0x40000)
- irq2_mask |= SAA7134_IRQ2_INTE_GPIO18A;
+ irq2_mask |= SAA7134_IRQ2_INTE_GPIO16_N;
+ else { /* Allow enabling both IRQ edge triggers */
+ if (dev->remote->mask_keydown & 0x40000)
+ irq2_mask |= SAA7134_IRQ2_INTE_GPIO18_P;
+ if (dev->remote->mask_keyup & 0x40000)
+ irq2_mask |= SAA7134_IRQ2_INTE_GPIO18_N;
+ }
}
if (dev->has_remote == SAA7134_REMOTE_I2C) {
@@ -1227,7 +1231,7 @@ static int saa7134_resume(struct pci_dev *pci_dev)
if (card_has_mpeg(dev))
saa7134_ts_init_hw(dev);
if (dev->remote)
- saa7134_ir_start(dev, dev->remote);
+ saa7134_ir_start(dev);
saa7134_hw_enable1(dev);
msleep(100);
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index 4ab4a987c9b..31e82be1b7e 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -1532,6 +1532,15 @@ static int dvb_init(struct saa7134_dev *dev)
&dev->i2c_adap, &behold_x7_tunerconfig);
}
break;
+ case SAA7134_BOARD_BEHOLD_H7:
+ fe0->dvb.frontend = dvb_attach(zl10353_attach,
+ &behold_x7_config,
+ &dev->i2c_adap);
+ if (fe0->dvb.frontend) {
+ dvb_attach(xc5000_attach, fe0->dvb.frontend,
+ &dev->i2c_adap, &behold_x7_tunerconfig);
+ }
+ break;
case SAA7134_BOARD_AVERMEDIA_A700_PRO:
case SAA7134_BOARD_AVERMEDIA_A700_HYBRID:
/* Zarlink ZL10313 */
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index 58a0cdc8414..e5565e2fd42 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -28,6 +28,8 @@
#include "saa7134-reg.h"
#include "saa7134.h"
+#define MODULE_NAME "saa7134"
+
static unsigned int disable_ir;
module_param(disable_ir, int, 0444);
MODULE_PARM_DESC(disable_ir,"disable infrared remote support");
@@ -66,6 +68,7 @@ MODULE_PARM_DESC(disable_other_ir, "disable full codes of "
/* Helper functions for RC5 and NEC decoding at GPIO16 or GPIO18 */
static int saa7134_rc5_irq(struct saa7134_dev *dev);
static int saa7134_nec_irq(struct saa7134_dev *dev);
+static int saa7134_raw_decode_irq(struct saa7134_dev *dev);
static void nec_task(unsigned long data);
static void saa7134_nec_timer(unsigned long data);
@@ -397,14 +400,23 @@ static int get_key_pinnacle_color(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
void saa7134_input_irq(struct saa7134_dev *dev)
{
- struct card_ir *ir = dev->remote;
+ struct card_ir *ir;
+
+ if (!dev || !dev->remote)
+ return;
+
+ ir = dev->remote;
+ if (!ir->running)
+ return;
if (ir->nec_gpio) {
saa7134_nec_irq(dev);
- } else if (!ir->polling && !ir->rc5_gpio) {
+ } else if (!ir->polling && !ir->rc5_gpio && !ir->raw_decode) {
build_key(dev);
} else if (ir->rc5_gpio) {
saa7134_rc5_irq(dev);
+ } else if (ir->raw_decode) {
+ saa7134_raw_decode_irq(dev);
}
}
@@ -417,8 +429,32 @@ static void saa7134_input_timer(unsigned long data)
mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling));
}
-void saa7134_ir_start(struct saa7134_dev *dev, struct card_ir *ir)
+void ir_raw_decode_timer_end(unsigned long data)
+{
+ struct saa7134_dev *dev = (struct saa7134_dev *)data;
+ struct card_ir *ir = dev->remote;
+
+ ir_raw_event_handle(dev->remote->dev);
+
+ ir->active = 0;
+}
+
+static int __saa7134_ir_start(void *priv)
{
+ struct saa7134_dev *dev = priv;
+ struct card_ir *ir;
+
+ if (!dev)
+ return -EINVAL;
+
+ ir = dev->remote;
+ if (!ir)
+ return -EINVAL;
+
+ if (ir->running)
+ return 0;
+
+ ir->running = 1;
if (ir->polling) {
setup_timer(&ir->timer, saa7134_input_timer,
(unsigned long)dev);
@@ -441,26 +477,125 @@ void saa7134_ir_start(struct saa7134_dev *dev, struct card_ir *ir)
setup_timer(&ir->timer_keyup, saa7134_nec_timer,
(unsigned long)dev);
tasklet_init(&ir->tlet, nec_task, (unsigned long)dev);
+ } else if (ir->raw_decode) {
+ /* set timer_end for code completion */
+ init_timer(&ir->timer_end);
+ ir->timer_end.function = ir_raw_decode_timer_end;
+ ir->timer_end.data = (unsigned long)dev;
+ ir->active = 0;
}
+
+ return 0;
}
-void saa7134_ir_stop(struct saa7134_dev *dev)
+static void __saa7134_ir_stop(void *priv)
{
+ struct saa7134_dev *dev = priv;
+ struct card_ir *ir;
+
+ if (!dev)
+ return;
+
+ ir = dev->remote;
+ if (!ir)
+ return;
+
+ if (!ir->running)
+ return;
if (dev->remote->polling)
del_timer_sync(&dev->remote->timer);
+ else if (ir->rc5_gpio)
+ del_timer_sync(&ir->timer_end);
+ else if (ir->nec_gpio)
+ tasklet_kill(&ir->tlet);
+ else if (ir->raw_decode) {
+ del_timer_sync(&ir->timer_end);
+ ir->active = 0;
+ }
+
+ ir->running = 0;
+
+ return;
+}
+
+int saa7134_ir_start(struct saa7134_dev *dev)
+{
+ if (dev->remote->users)
+ return __saa7134_ir_start(dev);
+
+ return 0;
+}
+
+void saa7134_ir_stop(struct saa7134_dev *dev)
+{
+ if (dev->remote->users)
+ __saa7134_ir_stop(dev);
+}
+
+static int saa7134_ir_open(void *priv)
+{
+ struct saa7134_dev *dev = priv;
+
+ dev->remote->users++;
+ return __saa7134_ir_start(dev);
+}
+
+static void saa7134_ir_close(void *priv)
+{
+ struct saa7134_dev *dev = priv;
+
+ dev->remote->users--;
+ if (!dev->remote->users)
+ __saa7134_ir_stop(dev);
+}
+
+
+int saa7134_ir_change_protocol(void *priv, u64 ir_type)
+{
+ struct saa7134_dev *dev = priv;
+ struct card_ir *ir = dev->remote;
+ u32 nec_gpio, rc5_gpio;
+
+ if (ir_type == IR_TYPE_RC5) {
+ dprintk("Changing protocol to RC5\n");
+ nec_gpio = 0;
+ rc5_gpio = 1;
+ } else if (ir_type == IR_TYPE_NEC) {
+ dprintk("Changing protocol to NEC\n");
+ nec_gpio = 1;
+ rc5_gpio = 0;
+ } else {
+ dprintk("IR protocol type %ud is not supported\n",
+ (unsigned)ir_type);
+ return -EINVAL;
+ }
+
+ if (ir->running) {
+ saa7134_ir_stop(dev);
+ ir->nec_gpio = nec_gpio;
+ ir->rc5_gpio = rc5_gpio;
+ saa7134_ir_start(dev);
+ } else {
+ ir->nec_gpio = nec_gpio;
+ ir->rc5_gpio = rc5_gpio;
+ }
+
+ return 0;
}
int saa7134_input_init1(struct saa7134_dev *dev)
{
struct card_ir *ir;
struct input_dev *input_dev;
- struct ir_scancode_table *ir_codes = NULL;
+ char *ir_codes = NULL;
u32 mask_keycode = 0;
u32 mask_keydown = 0;
u32 mask_keyup = 0;
int polling = 0;
int rc5_gpio = 0;
int nec_gpio = 0;
+ int raw_decode = 0;
+ int allow_protocol_change = 0;
u64 ir_type = IR_TYPE_OTHER;
int err;
@@ -476,27 +611,27 @@ int saa7134_input_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_FLYTVPLATINUM_FM:
case SAA7134_BOARD_FLYTVPLATINUM_MINI2:
case SAA7134_BOARD_ROVERMEDIA_LINK_PRO_FM:
- ir_codes = &ir_codes_flyvideo_table;
+ ir_codes = RC_MAP_FLYVIDEO;
mask_keycode = 0xEC00000;
mask_keydown = 0x0040000;
break;
case SAA7134_BOARD_CINERGY400:
case SAA7134_BOARD_CINERGY600:
case SAA7134_BOARD_CINERGY600_MK3:
- ir_codes = &ir_codes_cinergy_table;
+ ir_codes = RC_MAP_CINERGY;
mask_keycode = 0x00003f;
mask_keyup = 0x040000;
break;
case SAA7134_BOARD_ECS_TVP3XP:
case SAA7134_BOARD_ECS_TVP3XP_4CB5:
- ir_codes = &ir_codes_eztv_table;
+ ir_codes = RC_MAP_EZTV;
mask_keycode = 0x00017c;
mask_keyup = 0x000002;
polling = 50; // ms
break;
case SAA7134_BOARD_KWORLD_XPERT:
case SAA7134_BOARD_AVACSSMARTTV:
- ir_codes = &ir_codes_pixelview_table;
+ ir_codes = RC_MAP_PIXELVIEW;
mask_keycode = 0x00001F;
mask_keyup = 0x000020;
polling = 50; // ms
@@ -513,7 +648,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_AVERMEDIA_GO_007_FM:
case SAA7134_BOARD_AVERMEDIA_M102:
case SAA7134_BOARD_AVERMEDIA_GO_007_FM_PLUS:
- ir_codes = &ir_codes_avermedia_table;
+ ir_codes = RC_MAP_AVERMEDIA;
mask_keycode = 0x0007C8;
mask_keydown = 0x000010;
polling = 50; // ms
@@ -522,14 +657,15 @@ int saa7134_input_init1(struct saa7134_dev *dev)
saa_setb(SAA7134_GPIO_GPSTATUS0, 0x4);
break;
case SAA7134_BOARD_AVERMEDIA_M135A:
- ir_codes = &ir_codes_avermedia_m135a_table;
- mask_keydown = 0x0040000;
- mask_keycode = 0x00013f;
- nec_gpio = 1;
+ ir_codes = RC_MAP_AVERMEDIA_M135A_RM_JX;
+ mask_keydown = 0x0040000; /* Enable GPIO18 line on both edges */
+ mask_keyup = 0x0040000;
+ mask_keycode = 0xffff;
+ raw_decode = 1;
break;
case SAA7134_BOARD_AVERMEDIA_777:
case SAA7134_BOARD_AVERMEDIA_A16AR:
- ir_codes = &ir_codes_avermedia_table;
+ ir_codes = RC_MAP_AVERMEDIA;
mask_keycode = 0x02F200;
mask_keydown = 0x000400;
polling = 50; // ms
@@ -538,7 +674,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
saa_setb(SAA7134_GPIO_GPSTATUS1, 0x1);
break;
case SAA7134_BOARD_AVERMEDIA_A16D:
- ir_codes = &ir_codes_avermedia_a16d_table;
+ ir_codes = RC_MAP_AVERMEDIA_A16D;
mask_keycode = 0x02F200;
mask_keydown = 0x000400;
polling = 50; /* ms */
@@ -547,14 +683,14 @@ int saa7134_input_init1(struct saa7134_dev *dev)
saa_setb(SAA7134_GPIO_GPSTATUS1, 0x1);
break;
case SAA7134_BOARD_KWORLD_TERMINATOR:
- ir_codes = &ir_codes_pixelview_table;
+ ir_codes = RC_MAP_PIXELVIEW;
mask_keycode = 0x00001f;
mask_keyup = 0x000060;
polling = 50; // ms
break;
case SAA7134_BOARD_MANLI_MTV001:
case SAA7134_BOARD_MANLI_MTV002:
- ir_codes = &ir_codes_manli_table;
+ ir_codes = RC_MAP_MANLI;
mask_keycode = 0x001f00;
mask_keyup = 0x004000;
polling = 50; /* ms */
@@ -574,25 +710,25 @@ int saa7134_input_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_BEHOLD_507_9FM:
case SAA7134_BOARD_BEHOLD_507RDS_MK3:
case SAA7134_BOARD_BEHOLD_507RDS_MK5:
- ir_codes = &ir_codes_manli_table;
+ ir_codes = RC_MAP_MANLI;
mask_keycode = 0x003f00;
mask_keyup = 0x004000;
polling = 50; /* ms */
break;
case SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM:
- ir_codes = &ir_codes_behold_columbus_table;
+ ir_codes = RC_MAP_BEHOLD_COLUMBUS;
mask_keycode = 0x003f00;
mask_keyup = 0x004000;
polling = 50; // ms
break;
case SAA7134_BOARD_SEDNA_PC_TV_CARDBUS:
- ir_codes = &ir_codes_pctv_sedna_table;
+ ir_codes = RC_MAP_PCTV_SEDNA;
mask_keycode = 0x001f00;
mask_keyup = 0x004000;
polling = 50; // ms
break;
case SAA7134_BOARD_GOTVIEW_7135:
- ir_codes = &ir_codes_gotview7135_table;
+ ir_codes = RC_MAP_GOTVIEW7135;
mask_keycode = 0x0003CC;
mask_keydown = 0x000010;
polling = 5; /* ms */
@@ -601,80 +737,80 @@ int saa7134_input_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_VIDEOMATE_TV_PVR:
case SAA7134_BOARD_VIDEOMATE_GOLD_PLUS:
case SAA7134_BOARD_VIDEOMATE_TV_GOLD_PLUSII:
- ir_codes = &ir_codes_videomate_tv_pvr_table;
+ ir_codes = RC_MAP_VIDEOMATE_TV_PVR;
mask_keycode = 0x00003F;
mask_keyup = 0x400000;
polling = 50; // ms
break;
case SAA7134_BOARD_PROTEUS_2309:
- ir_codes = &ir_codes_proteus_2309_table;
+ ir_codes = RC_MAP_PROTEUS_2309;
mask_keycode = 0x00007F;
mask_keyup = 0x000080;
polling = 50; // ms
break;
case SAA7134_BOARD_VIDEOMATE_DVBT_300:
case SAA7134_BOARD_VIDEOMATE_DVBT_200:
- ir_codes = &ir_codes_videomate_tv_pvr_table;
+ ir_codes = RC_MAP_VIDEOMATE_TV_PVR;
mask_keycode = 0x003F00;
mask_keyup = 0x040000;
break;
case SAA7134_BOARD_FLYDVBS_LR300:
case SAA7134_BOARD_FLYDVBT_LR301:
case SAA7134_BOARD_FLYDVBTDUO:
- ir_codes = &ir_codes_flydvb_table;
+ ir_codes = RC_MAP_FLYDVB;
mask_keycode = 0x0001F00;
mask_keydown = 0x0040000;
break;
case SAA7134_BOARD_ASUSTeK_P7131_DUAL:
case SAA7134_BOARD_ASUSTeK_P7131_HYBRID_LNA:
case SAA7134_BOARD_ASUSTeK_P7131_ANALOG:
- ir_codes = &ir_codes_asus_pc39_table;
+ ir_codes = RC_MAP_ASUS_PC39;
mask_keydown = 0x0040000;
rc5_gpio = 1;
break;
case SAA7134_BOARD_ENCORE_ENLTV:
case SAA7134_BOARD_ENCORE_ENLTV_FM:
- ir_codes = &ir_codes_encore_enltv_table;
+ ir_codes = RC_MAP_ENCORE_ENLTV;
mask_keycode = 0x00007f;
mask_keyup = 0x040000;
polling = 50; // ms
break;
case SAA7134_BOARD_ENCORE_ENLTV_FM53:
- ir_codes = &ir_codes_encore_enltv_fm53_table;
+ ir_codes = RC_MAP_ENCORE_ENLTV_FM53;
mask_keydown = 0x0040000;
mask_keycode = 0x00007f;
nec_gpio = 1;
break;
case SAA7134_BOARD_10MOONSTVMASTER3:
- ir_codes = &ir_codes_encore_enltv_table;
+ ir_codes = RC_MAP_ENCORE_ENLTV;
mask_keycode = 0x5f80000;
mask_keyup = 0x8000000;
polling = 50; //ms
break;
case SAA7134_BOARD_GENIUS_TVGO_A11MCE:
- ir_codes = &ir_codes_genius_tvgo_a11mce_table;
+ ir_codes = RC_MAP_GENIUS_TVGO_A11MCE;
mask_keycode = 0xff;
mask_keydown = 0xf00000;
polling = 50; /* ms */
break;
case SAA7134_BOARD_REAL_ANGEL_220:
- ir_codes = &ir_codes_real_audio_220_32_keys_table;
+ ir_codes = RC_MAP_REAL_AUDIO_220_32_KEYS;
mask_keycode = 0x3f00;
mask_keyup = 0x4000;
polling = 50; /* ms */
break;
case SAA7134_BOARD_KWORLD_PLUS_TV_ANALOG:
- ir_codes = &ir_codes_kworld_plus_tv_analog_table;
+ ir_codes = RC_MAP_KWORLD_PLUS_TV_ANALOG;
mask_keycode = 0x7f;
polling = 40; /* ms */
break;
case SAA7134_BOARD_VIDEOMATE_S350:
- ir_codes = &ir_codes_videomate_s350_table;
+ ir_codes = RC_MAP_VIDEOMATE_S350;
mask_keycode = 0x003f00;
mask_keydown = 0x040000;
break;
case SAA7134_BOARD_LEADTEK_WINFAST_DTV1000S:
- ir_codes = &ir_codes_winfast_table;
+ ir_codes = RC_MAP_WINFAST;
mask_keycode = 0x5f00;
mask_keyup = 0x020000;
polling = 50; /* ms */
@@ -695,6 +831,9 @@ int saa7134_input_init1(struct saa7134_dev *dev)
}
ir->dev = input_dev;
+ dev->remote = ir;
+
+ ir->running = 0;
/* init hardware-specific stuff */
ir->mask_keycode = mask_keycode;
@@ -703,6 +842,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
ir->polling = polling;
ir->rc5_gpio = rc5_gpio;
ir->nec_gpio = nec_gpio;
+ ir->raw_decode = raw_decode;
/* init input device */
snprintf(ir->name, sizeof(ir->name), "saa7134 IR (%s)",
@@ -710,6 +850,19 @@ int saa7134_input_init1(struct saa7134_dev *dev)
snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0",
pci_name(dev->pci));
+
+ ir->props.priv = dev;
+ ir->props.open = saa7134_ir_open;
+ ir->props.close = saa7134_ir_close;
+
+ if (raw_decode)
+ ir->props.driver_type = RC_DRIVER_IR_RAW;
+
+ if (!raw_decode && allow_protocol_change) {
+ ir->props.allowed_protos = IR_TYPE_RC5 | IR_TYPE_NEC;
+ ir->props.change_protocol = saa7134_ir_change_protocol;
+ }
+
err = ir_input_init(input_dev, &ir->ir, ir_type);
if (err < 0)
goto err_out_free;
@@ -727,12 +880,9 @@ int saa7134_input_init1(struct saa7134_dev *dev)
}
input_dev->dev.parent = &dev->pci->dev;
- dev->remote = ir;
- saa7134_ir_start(dev, ir);
-
- err = ir_input_register(ir->dev, ir_codes, NULL);
+ err = ir_input_register(ir->dev, ir_codes, &ir->props, MODULE_NAME);
if (err)
- goto err_out_stop;
+ goto err_out_free;
/* the remote isn't as bouncy as a keyboard */
ir->dev->rep[REP_DELAY] = repeat_delay;
@@ -740,10 +890,8 @@ int saa7134_input_init1(struct saa7134_dev *dev)
return 0;
- err_out_stop:
- saa7134_ir_stop(dev);
+err_out_free:
dev->remote = NULL;
- err_out_free:
kfree(ir);
return err;
}
@@ -787,24 +935,24 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
dev->init_data.name = "Pinnacle PCTV";
if (pinnacle_remote == 0) {
dev->init_data.get_key = get_key_pinnacle_color;
- dev->init_data.ir_codes = &ir_codes_pinnacle_color_table;
+ dev->init_data.ir_codes = RC_MAP_PINNACLE_COLOR;
info.addr = 0x47;
} else {
dev->init_data.get_key = get_key_pinnacle_grey;
- dev->init_data.ir_codes = &ir_codes_pinnacle_grey_table;
+ dev->init_data.ir_codes = RC_MAP_PINNACLE_GREY;
info.addr = 0x47;
}
break;
case SAA7134_BOARD_UPMOST_PURPLE_TV:
dev->init_data.name = "Purple TV";
dev->init_data.get_key = get_key_purpletv;
- dev->init_data.ir_codes = &ir_codes_purpletv_table;
+ dev->init_data.ir_codes = RC_MAP_PURPLETV;
info.addr = 0x7a;
break;
case SAA7134_BOARD_MSI_TVATANYWHERE_PLUS:
dev->init_data.name = "MSI TV@nywhere Plus";
dev->init_data.get_key = get_key_msi_tvanywhere_plus;
- dev->init_data.ir_codes = &ir_codes_msi_tvanywhere_plus_table;
+ dev->init_data.ir_codes = RC_MAP_MSI_TVANYWHERE_PLUS;
info.addr = 0x30;
/* MSI TV@nywhere Plus controller doesn't seem to
respond to probes unless we read something from
@@ -818,7 +966,7 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
case SAA7134_BOARD_HAUPPAUGE_HVR1110:
dev->init_data.name = "HVR 1110";
dev->init_data.get_key = get_key_hvr1110;
- dev->init_data.ir_codes = &ir_codes_hauppauge_new_table;
+ dev->init_data.ir_codes = RC_MAP_HAUPPAUGE_NEW;
info.addr = 0x71;
break;
case SAA7134_BOARD_BEHOLD_607FM_MK3:
@@ -834,9 +982,12 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
case SAA7134_BOARD_BEHOLD_M6_EXTRA:
case SAA7134_BOARD_BEHOLD_H6:
case SAA7134_BOARD_BEHOLD_X7:
+ case SAA7134_BOARD_BEHOLD_H7:
+ case SAA7134_BOARD_BEHOLD_A7:
dev->init_data.name = "BeholdTV";
dev->init_data.get_key = get_key_beholdm6xx;
- dev->init_data.ir_codes = &ir_codes_behold_table;
+ dev->init_data.ir_codes = RC_MAP_BEHOLD;
+ dev->init_data.type = IR_TYPE_NEC;
info.addr = 0x2d;
break;
case SAA7134_BOARD_AVERMEDIA_CARDBUS_501:
@@ -846,7 +997,7 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
case SAA7134_BOARD_FLYDVB_TRIO:
dev->init_data.name = "FlyDVB Trio";
dev->init_data.get_key = get_key_flydvb_trio;
- dev->init_data.ir_codes = &ir_codes_flydvb_table;
+ dev->init_data.ir_codes = RC_MAP_FLYDVB;
info.addr = 0x0b;
break;
default:
@@ -859,6 +1010,33 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
i2c_new_device(&dev->i2c_adap, &info);
}
+static int saa7134_raw_decode_irq(struct saa7134_dev *dev)
+{
+ struct card_ir *ir = dev->remote;
+ unsigned long timeout;
+ int space;
+
+ /* Generate initial event */
+ saa_clearb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN);
+ saa_setb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN);
+ space = saa_readl(SAA7134_GPIO_GPSTATUS0 >> 2) & ir->mask_keydown;
+ ir_raw_event_store_edge(dev->remote->dev, space ? IR_SPACE : IR_PULSE);
+
+
+ /*
+ * Wait 15 ms from the start of the first IR event before processing
+ * the event. This time is enough for NEC protocol. May need adjustments
+ * to work with other protocols.
+ */
+ if (!ir->active) {
+ timeout = jiffies + jiffies_to_msecs(15);
+ mod_timer(&ir->timer_end, timeout);
+ ir->active = 1;
+ }
+
+ return 1;
+}
+
static int saa7134_rc5_irq(struct saa7134_dev *dev)
{
struct card_ir *ir = dev->remote;
@@ -901,7 +1079,6 @@ static int saa7134_rc5_irq(struct saa7134_dev *dev)
return 1;
}
-
/* On NEC protocol, One has 2.25 ms, and zero has 1.125 ms
The first pulse (start) has 9 + 4.5 ms
*/
@@ -1011,14 +1188,14 @@ static void nec_task(unsigned long data)
/* Keep repeating the last key */
mod_timer(&ir->timer_keyup, jiffies + msecs_to_jiffies(150));
- saa_setl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18);
+ saa_setl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18_P);
}
static int saa7134_nec_irq(struct saa7134_dev *dev)
{
struct card_ir *ir = dev->remote;
- saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18);
+ saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18_P);
tasklet_schedule(&ir->tlet);
return 1;
diff --git a/drivers/media/video/saa7134/saa7134-reg.h b/drivers/media/video/saa7134/saa7134-reg.h
index cf89d96d729..e7e0af101fa 100644
--- a/drivers/media/video/saa7134/saa7134-reg.h
+++ b/drivers/media/video/saa7134/saa7134-reg.h
@@ -112,17 +112,17 @@
#define SAA7134_IRQ1_INTE_RA0_0 (1 << 0)
#define SAA7134_IRQ2 (0x2c8 >> 2)
-#define SAA7134_IRQ2_INTE_GPIO23A (1 << 17)
-#define SAA7134_IRQ2_INTE_GPIO23 (1 << 16)
-#define SAA7134_IRQ2_INTE_GPIO22A (1 << 15)
-#define SAA7134_IRQ2_INTE_GPIO22 (1 << 14)
-#define SAA7134_IRQ2_INTE_GPIO18A (1 << 13)
-#define SAA7134_IRQ2_INTE_GPIO18 (1 << 12)
-#define SAA7134_IRQ2_INTE_GPIO16 (1 << 11) /* not certain */
-#define SAA7134_IRQ2_INTE_SC2 (1 << 10)
-#define SAA7134_IRQ2_INTE_SC1 (1 << 9)
-#define SAA7134_IRQ2_INTE_SC0 (1 << 8)
-#define SAA7134_IRQ2_INTE_DEC5 (1 << 7)
+#define SAA7134_IRQ2_INTE_GPIO23_N (1 << 17) /* negative edge */
+#define SAA7134_IRQ2_INTE_GPIO23_P (1 << 16) /* positive edge */
+#define SAA7134_IRQ2_INTE_GPIO22_N (1 << 15) /* negative edge */
+#define SAA7134_IRQ2_INTE_GPIO22_P (1 << 14) /* positive edge */
+#define SAA7134_IRQ2_INTE_GPIO18_N (1 << 13) /* negative edge */
+#define SAA7134_IRQ2_INTE_GPIO18_P (1 << 12) /* positive edge */
+#define SAA7134_IRQ2_INTE_GPIO16_N (1 << 11) /* negative edge */
+#define SAA7134_IRQ2_INTE_GPIO16_P (1 << 10) /* positive edge */
+#define SAA7134_IRQ2_INTE_SC2 (1 << 9)
+#define SAA7134_IRQ2_INTE_SC1 (1 << 8)
+#define SAA7134_IRQ2_INTE_SC0 (1 << 7)
#define SAA7134_IRQ2_INTE_DEC4 (1 << 6)
#define SAA7134_IRQ2_INTE_DEC3 (1 << 5)
#define SAA7134_IRQ2_INTE_DEC2 (1 << 4)
@@ -135,7 +135,7 @@
#define SAA7134_IRQ_REPORT_GPIO23 (1 << 17)
#define SAA7134_IRQ_REPORT_GPIO22 (1 << 16)
#define SAA7134_IRQ_REPORT_GPIO18 (1 << 15)
-#define SAA7134_IRQ_REPORT_GPIO16 (1 << 14) /* not certain */
+#define SAA7134_IRQ_REPORT_GPIO16 (1 << 14)
#define SAA7134_IRQ_REPORT_LOAD_ERR (1 << 13)
#define SAA7134_IRQ_REPORT_CONF_ERR (1 << 12)
#define SAA7134_IRQ_REPORT_TRIG_ERR (1 << 11)
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index 31138d3e51b..45f0ac8f3c0 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -1180,7 +1180,7 @@ int saa7134_s_ctrl_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, str
That needs to be fixed somehow, but for now this is
good enough. */
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
@@ -1359,7 +1359,7 @@ static int video_open(struct file *file)
fh->fmt = format_by_fourcc(V4L2_PIX_FMT_BGR24);
fh->width = 720;
fh->height = 576;
- v4l2_prio_open(&dev->prio,&fh->prio);
+ v4l2_prio_open(&dev->prio, &fh->prio);
videobuf_queue_sg_init(&fh->cap, &video_qops,
&dev->pci->dev, &dev->slock,
@@ -1502,7 +1502,7 @@ static int video_release(struct file *file)
saa7134_pgtable_free(dev->pci,&fh->pt_cap);
saa7134_pgtable_free(dev->pci,&fh->pt_vbi);
- v4l2_prio_close(&dev->prio,&fh->prio);
+ v4l2_prio_close(&dev->prio, fh->prio);
file->private_data = NULL;
kfree(fh);
return 0;
@@ -1632,8 +1632,15 @@ static int saa7134_try_fmt_vid_cap(struct file *file, void *priv,
}
f->fmt.pix.field = field;
- v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2,
- &f->fmt.pix.height, 32, maxh, 0, 0);
+ if (f->fmt.pix.width < 48)
+ f->fmt.pix.width = 48;
+ if (f->fmt.pix.height < 32)
+ f->fmt.pix.height = 32;
+ if (f->fmt.pix.width > maxw)
+ f->fmt.pix.width = maxw;
+ if (f->fmt.pix.height > maxh)
+ f->fmt.pix.height = maxh;
+ f->fmt.pix.width &= ~0x03;
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage =
@@ -1778,7 +1785,7 @@ static int saa7134_s_input(struct file *file, void *priv, unsigned int i)
struct saa7134_dev *dev = fh->dev;
int err;
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
@@ -1832,7 +1839,7 @@ int saa7134_s_std_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, v4l2_
That needs to be fixed somehow, but for now this is
good enough. */
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
} else if (res_locked(dev, RESOURCE_OVERLAY)) {
@@ -2016,7 +2023,7 @@ static int saa7134_s_tuner(struct file *file, void *priv,
struct saa7134_dev *dev = fh->dev;
int rx, mode, err;
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
@@ -2050,7 +2057,7 @@ static int saa7134_s_frequency(struct file *file, void *priv,
struct saa7134_dev *dev = fh->dev;
int err;
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index bf130967ed1..3962534267b 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -20,7 +20,7 @@
*/
#include <linux/version.h>
-#define SAA7134_VERSION_CODE KERNEL_VERSION(0,2,15)
+#define SAA7134_VERSION_CODE KERNEL_VERSION(0, 2, 16)
#include <linux/pci.h>
#include <linux/i2c.h>
@@ -300,6 +300,9 @@ struct saa7134_format {
#define SAA7134_BOARD_ASUS_EUROPA_HYBRID 174
#define SAA7134_BOARD_LEADTEK_WINFAST_DTV1000S 175
#define SAA7134_BOARD_BEHOLD_505RDS_MK3 176
+#define SAA7134_BOARD_HAWELL_HW_404M7 177
+#define SAA7134_BOARD_BEHOLD_H7 178
+#define SAA7134_BOARD_BEHOLD_A7 179
#define SAA7134_MAXBOARDS 32
#define SAA7134_INPUT_MAX 8
@@ -809,7 +812,7 @@ int saa7134_input_init1(struct saa7134_dev *dev);
void saa7134_input_fini(struct saa7134_dev *dev);
void saa7134_input_irq(struct saa7134_dev *dev);
void saa7134_probe_i2c_ir(struct saa7134_dev *dev);
-void saa7134_ir_start(struct saa7134_dev *dev, struct card_ir *ir);
+int saa7134_ir_start(struct saa7134_dev *dev);
void saa7134_ir_stop(struct saa7134_dev *dev);
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index 1ad980f8e77..4ac3b482fbb 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -115,9 +115,20 @@ struct sh_mobile_ceu_dev {
};
struct sh_mobile_ceu_cam {
- struct v4l2_rect ceu_rect;
- unsigned int cam_width;
- unsigned int cam_height;
+ /* CEU offsets within scaled by the CEU camera output */
+ unsigned int ceu_left;
+ unsigned int ceu_top;
+ /* Client output, as seen by the CEU */
+ unsigned int width;
+ unsigned int height;
+ /*
+ * User window from S_CROP / G_CROP, produced by client cropping and
+ * scaling, CEU scaling and CEU cropping, mapped back onto the client
+ * input window
+ */
+ struct v4l2_rect subrect;
+ /* Camera cropping rectangle */
+ struct v4l2_rect rect;
const struct soc_mbus_pixelfmt *extra_fmt;
enum v4l2_mbus_pixelcode code;
};
@@ -213,8 +224,8 @@ static int sh_mobile_ceu_videobuf_setup(struct videobuf_queue *vq,
*count = 2;
if (pcdev->video_limit) {
- while (PAGE_ALIGN(*size) * *count > pcdev->video_limit)
- (*count)--;
+ if (PAGE_ALIGN(*size) * *count > pcdev->video_limit)
+ *count = pcdev->video_limit / PAGE_ALIGN(*size);
}
dev_dbg(icd->dev.parent, "count=%d, size=%d\n", *count, *size);
@@ -565,38 +576,36 @@ static u16 calc_scale(unsigned int src, unsigned int *dst)
}
/* rect is guaranteed to not exceed the scaled camera rectangle */
-static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd,
- unsigned int out_width,
- unsigned int out_height)
+static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct sh_mobile_ceu_cam *cam = icd->host_priv;
- struct v4l2_rect *rect = &cam->ceu_rect;
struct sh_mobile_ceu_dev *pcdev = ici->priv;
unsigned int height, width, cdwdr_width, in_width, in_height;
unsigned int left_offset, top_offset;
u32 camor;
- dev_dbg(icd->dev.parent, "Crop %ux%u@%u:%u\n",
- rect->width, rect->height, rect->left, rect->top);
+ dev_geo(icd->dev.parent, "Crop %ux%u@%u:%u\n",
+ icd->user_width, icd->user_height, cam->ceu_left, cam->ceu_top);
- left_offset = rect->left;
- top_offset = rect->top;
+ left_offset = cam->ceu_left;
+ top_offset = cam->ceu_top;
+ /* CEU cropping (CFSZR) is applied _after_ the scaling filter (CFLCR) */
if (pcdev->image_mode) {
- in_width = rect->width;
+ in_width = cam->width;
if (!pcdev->is_16bit) {
in_width *= 2;
left_offset *= 2;
}
- width = out_width;
- cdwdr_width = out_width;
+ width = icd->user_width;
+ cdwdr_width = icd->user_width;
} else {
- int bytes_per_line = soc_mbus_bytes_per_line(out_width,
+ int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
icd->current_fmt->host_fmt);
unsigned int w_factor;
- width = out_width;
+ width = icd->user_width;
switch (icd->current_fmt->host_fmt->packing) {
case SOC_MBUS_PACKING_2X8_PADHI:
@@ -606,17 +615,17 @@ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd,
w_factor = 1;
}
- in_width = rect->width * w_factor;
+ in_width = cam->width * w_factor;
left_offset = left_offset * w_factor;
if (bytes_per_line < 0)
- cdwdr_width = out_width;
+ cdwdr_width = icd->user_width;
else
cdwdr_width = bytes_per_line;
}
- height = out_height;
- in_height = rect->height;
+ height = icd->user_height;
+ in_height = cam->height;
if (V4L2_FIELD_NONE != pcdev->field) {
height /= 2;
in_height /= 2;
@@ -775,9 +784,10 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
}
ceu_write(pcdev, CAIFR, value);
- sh_mobile_ceu_set_rect(icd, icd->user_width, icd->user_height);
+ sh_mobile_ceu_set_rect(icd);
mdelay(1);
+ dev_geo(icd->dev.parent, "CFLCR 0x%x\n", pcdev->cflcr);
ceu_write(pcdev, CFLCR, pcdev->cflcr);
/*
@@ -866,6 +876,8 @@ static bool sh_mobile_ceu_packing_supported(const struct soc_mbus_pixelfmt *fmt)
fmt->packing == SOC_MBUS_PACKING_EXTEND16);
}
+static int client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect);
+
static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, int idx,
struct soc_camera_format_xlate *xlate)
{
@@ -894,10 +906,55 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, int idx,
return 0;
if (!icd->host_priv) {
+ struct v4l2_mbus_framefmt mf;
+ struct v4l2_rect rect;
+ struct device *dev = icd->dev.parent;
+ int shift = 0;
+
+ /* FIXME: subwindow is lost between close / open */
+
+ /* Cache current client geometry */
+ ret = client_g_rect(sd, &rect);
+ if (ret < 0)
+ return ret;
+
+ /* First time */
+ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ while ((mf.width > 2560 || mf.height > 1920) && shift < 4) {
+ /* Try 2560x1920, 1280x960, 640x480, 320x240 */
+ mf.width = 2560 >> shift;
+ mf.height = 1920 >> shift;
+ ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+ shift++;
+ }
+
+ if (shift == 4) {
+ dev_err(dev, "Failed to configure the client below %ux%x\n",
+ mf.width, mf.height);
+ return -EIO;
+ }
+
+ dev_geo(dev, "camera fmt %ux%u\n", mf.width, mf.height);
+
cam = kzalloc(sizeof(*cam), GFP_KERNEL);
if (!cam)
return -ENOMEM;
+ /* We are called with current camera crop, initialise subrect with it */
+ cam->rect = rect;
+ cam->subrect = rect;
+
+ cam->width = mf.width;
+ cam->height = mf.height;
+
+ cam->width = mf.width;
+ cam->height = mf.height;
+
icd->host_priv = cam;
} else {
cam = icd->host_priv;
@@ -979,16 +1036,12 @@ static unsigned int scale_down(unsigned int size, unsigned int scale)
return (size * 4096 + scale / 2) / scale;
}
-static unsigned int scale_up(unsigned int size, unsigned int scale)
-{
- return (size * scale + 2048) / 4096;
-}
-
static unsigned int calc_generic_scale(unsigned int input, unsigned int output)
{
return (input * 4096 + output / 2) / output;
}
+/* Get and store current client crop */
static int client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect)
{
struct v4l2_crop crop;
@@ -1007,25 +1060,51 @@ static int client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect)
cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
ret = v4l2_subdev_call(sd, video, cropcap, &cap);
- if (ret < 0)
- return ret;
-
- *rect = cap.defrect;
+ if (!ret)
+ *rect = cap.defrect;
return ret;
}
+/* Client crop has changed, update our sub-rectangle to remain within the area */
+static void update_subrect(struct sh_mobile_ceu_cam *cam)
+{
+ struct v4l2_rect *rect = &cam->rect, *subrect = &cam->subrect;
+
+ if (rect->width < subrect->width)
+ subrect->width = rect->width;
+
+ if (rect->height < subrect->height)
+ subrect->height = rect->height;
+
+ if (rect->left > subrect->left)
+ subrect->left = rect->left;
+ else if (rect->left + rect->width >
+ subrect->left + subrect->width)
+ subrect->left = rect->left + rect->width -
+ subrect->width;
+
+ if (rect->top > subrect->top)
+ subrect->top = rect->top;
+ else if (rect->top + rect->height >
+ subrect->top + subrect->height)
+ subrect->top = rect->top + rect->height -
+ subrect->height;
+}
+
/*
* The common for both scaling and cropping iterative approach is:
* 1. try if the client can produce exactly what requested by the user
* 2. if (1) failed, try to double the client image until we get one big enough
* 3. if (2) failed, try to request the maximum image
*/
-static int client_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *crop,
+static int client_s_crop(struct soc_camera_device *icd, struct v4l2_crop *crop,
struct v4l2_crop *cam_crop)
{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct v4l2_rect *rect = &crop->c, *cam_rect = &cam_crop->c;
struct device *dev = sd->v4l2_dev->dev;
+ struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct v4l2_cropcap cap;
int ret;
unsigned int width, height;
@@ -1041,13 +1120,14 @@ static int client_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *crop,
*/
if (!memcmp(rect, cam_rect, sizeof(*rect))) {
/* Even if camera S_CROP failed, but camera rectangle matches */
- dev_dbg(dev, "Camera S_CROP successful for %ux%u@%u:%u\n",
+ dev_dbg(dev, "Camera S_CROP successful for %dx%d@%d:%d\n",
rect->width, rect->height, rect->left, rect->top);
+ cam->rect = *cam_rect;
return 0;
}
/* Try to fix cropping, that camera hasn't managed to set */
- dev_geo(dev, "Fix camera S_CROP for %ux%u@%u:%u to %ux%u@%u:%u\n",
+ dev_geo(dev, "Fix camera S_CROP for %dx%d@%d:%d to %dx%d@%d:%d\n",
cam_rect->width, cam_rect->height,
cam_rect->left, cam_rect->top,
rect->width, rect->height, rect->left, rect->top);
@@ -1057,6 +1137,7 @@ static int client_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *crop,
if (ret < 0)
return ret;
+ /* Put user requested rectangle within sensor bounds */
soc_camera_limit_side(&rect->left, &rect->width, cap.bounds.left, 2,
cap.bounds.width);
soc_camera_limit_side(&rect->top, &rect->height, cap.bounds.top, 4,
@@ -1069,6 +1150,10 @@ static int client_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *crop,
width = max(cam_rect->width, 2);
height = max(cam_rect->height, 2);
+ /*
+ * Loop as long as sensor is not covering the requested rectangle and
+ * is still within its bounds
+ */
while (!ret && (is_smaller(cam_rect, rect) ||
is_inside(cam_rect, rect)) &&
(cap.bounds.width > width || cap.bounds.height > height)) {
@@ -1086,6 +1171,7 @@ static int client_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *crop,
* target left, set it to the middle point between the current
* left and minimum left. But that would add too much
* complexity: we would have to iterate each border separately.
+ * Instead we just drop to the left and top bounds.
*/
if (cam_rect->left > rect->left)
cam_rect->left = cap.bounds.left;
@@ -1103,7 +1189,7 @@ static int client_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *crop,
v4l2_subdev_call(sd, video, s_crop, cam_crop);
ret = client_g_rect(sd, cam_rect);
- dev_geo(dev, "Camera S_CROP %d for %ux%u@%u:%u\n", ret,
+ dev_geo(dev, "Camera S_CROP %d for %dx%d@%d:%d\n", ret,
cam_rect->width, cam_rect->height,
cam_rect->left, cam_rect->top);
}
@@ -1117,82 +1203,24 @@ static int client_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *crop,
*cam_rect = cap.bounds;
v4l2_subdev_call(sd, video, s_crop, cam_crop);
ret = client_g_rect(sd, cam_rect);
- dev_geo(dev, "Camera S_CROP %d for max %ux%u@%u:%u\n", ret,
+ dev_geo(dev, "Camera S_CROP %d for max %dx%d@%d:%d\n", ret,
cam_rect->width, cam_rect->height,
cam_rect->left, cam_rect->top);
}
- return ret;
-}
-
-static int get_camera_scales(struct v4l2_subdev *sd, struct v4l2_rect *rect,
- unsigned int *scale_h, unsigned int *scale_v)
-{
- struct v4l2_mbus_framefmt mf;
- int ret;
-
- ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
- if (ret < 0)
- return ret;
-
- *scale_h = calc_generic_scale(rect->width, mf.width);
- *scale_v = calc_generic_scale(rect->height, mf.height);
-
- return 0;
-}
-
-static int get_camera_subwin(struct soc_camera_device *icd,
- struct v4l2_rect *cam_subrect,
- unsigned int cam_hscale, unsigned int cam_vscale)
-{
- struct sh_mobile_ceu_cam *cam = icd->host_priv;
- struct v4l2_rect *ceu_rect = &cam->ceu_rect;
-
- if (!ceu_rect->width) {
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct device *dev = icd->dev.parent;
- struct v4l2_mbus_framefmt mf;
- int ret;
- /* First time */
-
- ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
- if (ret < 0)
- return ret;
-
- dev_geo(dev, "camera fmt %ux%u\n", mf.width, mf.height);
-
- if (mf.width > 2560) {
- ceu_rect->width = 2560;
- ceu_rect->left = (mf.width - 2560) / 2;
- } else {
- ceu_rect->width = mf.width;
- ceu_rect->left = 0;
- }
-
- if (mf.height > 1920) {
- ceu_rect->height = 1920;
- ceu_rect->top = (mf.height - 1920) / 2;
- } else {
- ceu_rect->height = mf.height;
- ceu_rect->top = 0;
- }
-
- dev_geo(dev, "initialised CEU rect %ux%u@%u:%u\n",
- ceu_rect->width, ceu_rect->height,
- ceu_rect->left, ceu_rect->top);
+ if (!ret) {
+ cam->rect = *cam_rect;
+ update_subrect(cam);
}
- cam_subrect->width = scale_up(ceu_rect->width, cam_hscale);
- cam_subrect->left = scale_up(ceu_rect->left, cam_hscale);
- cam_subrect->height = scale_up(ceu_rect->height, cam_vscale);
- cam_subrect->top = scale_up(ceu_rect->top, cam_vscale);
-
- return 0;
+ return ret;
}
+/* Iterative s_mbus_fmt, also updates cached client crop on success */
static int client_s_fmt(struct soc_camera_device *icd,
struct v4l2_mbus_framefmt *mf, bool ceu_can_scale)
{
+ struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct device *dev = icd->dev.parent;
unsigned int width = mf->width, height = mf->height, tmp_w, tmp_h;
@@ -1200,6 +1228,15 @@ static int client_s_fmt(struct soc_camera_device *icd,
struct v4l2_cropcap cap;
int ret;
+ ret = v4l2_subdev_call(sd, video, s_mbus_fmt, mf);
+ if (ret < 0)
+ return ret;
+
+ dev_geo(dev, "camera scaled to %ux%u\n", mf->width, mf->height);
+
+ if ((width == mf->width && height == mf->height) || !ceu_can_scale)
+ goto update_cache;
+
cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
ret = v4l2_subdev_call(sd, video, cropcap, &cap);
@@ -1209,15 +1246,6 @@ static int client_s_fmt(struct soc_camera_device *icd,
max_width = min(cap.bounds.width, 2560);
max_height = min(cap.bounds.height, 1920);
- ret = v4l2_subdev_call(sd, video, s_mbus_fmt, mf);
- if (ret < 0)
- return ret;
-
- dev_geo(dev, "camera scaled to %ux%u\n", mf->width, mf->height);
-
- if ((width == mf->width && height == mf->height) || !ceu_can_scale)
- return 0;
-
/* Camera set a format, but geometry is not precise, try to improve */
tmp_w = mf->width;
tmp_h = mf->height;
@@ -1239,26 +1267,37 @@ static int client_s_fmt(struct soc_camera_device *icd,
}
}
+update_cache:
+ /* Update cache */
+ ret = client_g_rect(sd, &cam->rect);
+ if (ret < 0)
+ return ret;
+
+ update_subrect(cam);
+
return 0;
}
/**
- * @rect - camera cropped rectangle
- * @sub_rect - CEU cropped rectangle, mapped back to camera input area
- * @ceu_rect - on output calculated CEU crop rectangle
+ * @width - on output: user width, mapped back to input
+ * @height - on output: user height, mapped back to input
+ * @mf - in- / output camera output window
*/
-static int client_scale(struct soc_camera_device *icd, struct v4l2_rect *rect,
- struct v4l2_rect *sub_rect, struct v4l2_rect *ceu_rect,
- struct v4l2_mbus_framefmt *mf, bool ceu_can_scale)
+static int client_scale(struct soc_camera_device *icd,
+ struct v4l2_mbus_framefmt *mf,
+ unsigned int *width, unsigned int *height,
+ bool ceu_can_scale)
{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct device *dev = icd->dev.parent;
struct v4l2_mbus_framefmt mf_tmp = *mf;
unsigned int scale_h, scale_v;
int ret;
- /* 5. Apply iterative camera S_FMT for camera user window. */
+ /*
+ * 5. Apply iterative camera S_FMT for camera user window (also updates
+ * client crop cache and the imaginary sub-rectangle).
+ */
ret = client_s_fmt(icd, &mf_tmp, ceu_can_scale);
if (ret < 0)
return ret;
@@ -1270,60 +1309,22 @@ static int client_scale(struct soc_camera_device *icd, struct v4l2_rect *rect,
/* unneeded - it is already in "mf_tmp" */
- /* 7. Calculate new camera scales. */
- ret = get_camera_scales(sd, rect, &scale_h, &scale_v);
- if (ret < 0)
- return ret;
-
- dev_geo(dev, "7: camera scales %u:%u\n", scale_h, scale_v);
+ /* 7. Calculate new client scales. */
+ scale_h = calc_generic_scale(cam->rect.width, mf_tmp.width);
+ scale_v = calc_generic_scale(cam->rect.height, mf_tmp.height);
- cam->cam_width = mf_tmp.width;
- cam->cam_height = mf_tmp.height;
mf->width = mf_tmp.width;
mf->height = mf_tmp.height;
mf->colorspace = mf_tmp.colorspace;
/*
* 8. Calculate new CEU crop - apply camera scales to previously
- * calculated "effective" crop.
+ * updated "effective" crop.
*/
- ceu_rect->left = scale_down(sub_rect->left, scale_h);
- ceu_rect->width = scale_down(sub_rect->width, scale_h);
- ceu_rect->top = scale_down(sub_rect->top, scale_v);
- ceu_rect->height = scale_down(sub_rect->height, scale_v);
+ *width = scale_down(cam->subrect.width, scale_h);
+ *height = scale_down(cam->subrect.height, scale_v);
- dev_geo(dev, "8: new CEU rect %ux%u@%u:%u\n",
- ceu_rect->width, ceu_rect->height,
- ceu_rect->left, ceu_rect->top);
-
- return 0;
-}
-
-/* Get combined scales */
-static int get_scales(struct soc_camera_device *icd,
- unsigned int *scale_h, unsigned int *scale_v)
-{
- struct sh_mobile_ceu_cam *cam = icd->host_priv;
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct v4l2_crop cam_crop;
- unsigned int width_in, height_in;
- int ret;
-
- cam_crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- ret = client_g_rect(sd, &cam_crop.c);
- if (ret < 0)
- return ret;
-
- ret = get_camera_scales(sd, &cam_crop.c, scale_h, scale_v);
- if (ret < 0)
- return ret;
-
- width_in = scale_up(cam->ceu_rect.width, *scale_h);
- height_in = scale_up(cam->ceu_rect.height, *scale_v);
-
- *scale_h = calc_generic_scale(width_in, icd->user_width);
- *scale_v = calc_generic_scale(height_in, icd->user_height);
+ dev_geo(dev, "8: new client sub-window %ux%u\n", *width, *height);
return 0;
}
@@ -1342,115 +1343,165 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
struct sh_mobile_ceu_dev *pcdev = ici->priv;
struct v4l2_crop cam_crop;
struct sh_mobile_ceu_cam *cam = icd->host_priv;
- struct v4l2_rect *cam_rect = &cam_crop.c, *ceu_rect = &cam->ceu_rect;
+ struct v4l2_rect *cam_rect = &cam_crop.c;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct device *dev = icd->dev.parent;
struct v4l2_mbus_framefmt mf;
- unsigned int scale_comb_h, scale_comb_v, scale_ceu_h, scale_ceu_v,
- out_width, out_height;
+ unsigned int scale_cam_h, scale_cam_v, scale_ceu_h, scale_ceu_v,
+ out_width, out_height, scale_h, scale_v;
+ int interm_width, interm_height;
u32 capsr, cflcr;
int ret;
- /* 1. Calculate current combined scales. */
- ret = get_scales(icd, &scale_comb_h, &scale_comb_v);
- if (ret < 0)
- return ret;
+ dev_geo(dev, "S_CROP(%ux%u@%u:%u)\n", rect->width, rect->height,
+ rect->left, rect->top);
- dev_geo(dev, "1: combined scales %u:%u\n", scale_comb_h, scale_comb_v);
+ /* During camera cropping its output window can change too, stop CEU */
+ capsr = capture_save_reset(pcdev);
+ dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr);
- /* 2. Apply iterative camera S_CROP for new input window. */
- ret = client_s_crop(sd, a, &cam_crop);
+ /* 1. - 2. Apply iterative camera S_CROP for new input window. */
+ ret = client_s_crop(icd, a, &cam_crop);
if (ret < 0)
return ret;
- dev_geo(dev, "2: camera cropped to %ux%u@%u:%u\n",
+ dev_geo(dev, "1-2: camera cropped to %ux%u@%u:%u\n",
cam_rect->width, cam_rect->height,
cam_rect->left, cam_rect->top);
/* On success cam_crop contains current camera crop */
- /*
- * 3. If old combined scales applied to new crop produce an impossible
- * user window, adjust scales to produce nearest possible window.
- */
- out_width = scale_down(rect->width, scale_comb_h);
- out_height = scale_down(rect->height, scale_comb_v);
-
- if (out_width > 2560)
- out_width = 2560;
- else if (out_width < 2)
- out_width = 2;
-
- if (out_height > 1920)
- out_height = 1920;
- else if (out_height < 4)
- out_height = 4;
-
- dev_geo(dev, "3: Adjusted output %ux%u\n", out_width, out_height);
-
- /* 4. Use G_CROP to retrieve actual input window: already in cam_crop */
-
- /*
- * 5. Using actual input window and calculated combined scales calculate
- * camera target output window.
- */
- mf.width = scale_down(cam_rect->width, scale_comb_h);
- mf.height = scale_down(cam_rect->height, scale_comb_v);
-
- dev_geo(dev, "5: camera target %ux%u\n", mf.width, mf.height);
-
- /* 6. - 9. */
- mf.code = cam->code;
- mf.field = pcdev->field;
-
- capsr = capture_save_reset(pcdev);
- dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr);
+ /* 3. Retrieve camera output window */
+ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
- /* Make relative to camera rectangle */
- rect->left -= cam_rect->left;
- rect->top -= cam_rect->top;
+ if (mf.width > 2560 || mf.height > 1920)
+ return -EINVAL;
- ret = client_scale(icd, cam_rect, rect, ceu_rect, &mf,
- pcdev->image_mode &&
- V4L2_FIELD_NONE == pcdev->field);
+ /* Cache camera output window */
+ cam->width = mf.width;
+ cam->height = mf.height;
- dev_geo(dev, "6-9: %d\n", ret);
+ /* 4. Calculate camera scales */
+ scale_cam_h = calc_generic_scale(cam_rect->width, mf.width);
+ scale_cam_v = calc_generic_scale(cam_rect->height, mf.height);
- /* 10. Use CEU cropping to crop to the new window. */
- sh_mobile_ceu_set_rect(icd, out_width, out_height);
+ /* Calculate intermediate window */
+ interm_width = scale_down(rect->width, scale_cam_h);
+ interm_height = scale_down(rect->height, scale_cam_v);
- dev_geo(dev, "10: CEU cropped to %ux%u@%u:%u\n",
- ceu_rect->width, ceu_rect->height,
- ceu_rect->left, ceu_rect->top);
+ if (pcdev->image_mode) {
+ out_width = min(interm_width, icd->user_width);
+ out_height = min(interm_height, icd->user_height);
+ } else {
+ out_width = interm_width;
+ out_height = interm_height;
+ }
/*
- * 11. Calculate CEU scales from camera scales from results of (10) and
- * user window from (3)
+ * 5. Calculate CEU scales from camera scales from results of (5) and
+ * the user window
*/
- scale_ceu_h = calc_scale(ceu_rect->width, &out_width);
- scale_ceu_v = calc_scale(ceu_rect->height, &out_height);
+ scale_ceu_h = calc_scale(interm_width, &out_width);
+ scale_ceu_v = calc_scale(interm_height, &out_height);
- dev_geo(dev, "11: CEU scales %u:%u\n", scale_ceu_h, scale_ceu_v);
+ /* Calculate camera scales */
+ scale_h = calc_generic_scale(cam_rect->width, out_width);
+ scale_v = calc_generic_scale(cam_rect->height, out_height);
- /* 12. Apply CEU scales. */
+ dev_geo(dev, "5: CEU scales %u:%u\n", scale_ceu_h, scale_ceu_v);
+
+ /* Apply CEU scales. */
cflcr = scale_ceu_h | (scale_ceu_v << 16);
if (cflcr != pcdev->cflcr) {
pcdev->cflcr = cflcr;
ceu_write(pcdev, CFLCR, cflcr);
}
+ icd->user_width = out_width;
+ icd->user_height = out_height;
+ cam->ceu_left = scale_down(rect->left - cam_rect->left, scale_h) & ~1;
+ cam->ceu_top = scale_down(rect->top - cam_rect->top, scale_v) & ~1;
+
+ /* 6. Use CEU cropping to crop to the new window. */
+ sh_mobile_ceu_set_rect(icd);
+
+ cam->subrect = *rect;
+
+ dev_geo(dev, "6: CEU cropped to %ux%u@%u:%u\n",
+ icd->user_width, icd->user_height,
+ cam->ceu_left, cam->ceu_top);
+
/* Restore capture */
if (pcdev->active)
capsr |= 1;
capture_restore(pcdev, capsr);
- icd->user_width = out_width;
- icd->user_height = out_height;
-
/* Even if only camera cropping succeeded */
return ret;
}
+static int sh_mobile_ceu_get_crop(struct soc_camera_device *icd,
+ struct v4l2_crop *a)
+{
+ struct sh_mobile_ceu_cam *cam = icd->host_priv;
+
+ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ a->c = cam->subrect;
+
+ return 0;
+}
+
+/*
+ * Calculate real client output window by applying new scales to the current
+ * client crop. New scales are calculated from the requested output format and
+ * CEU crop, mapped backed onto the client input (subrect).
+ */
+static void calculate_client_output(struct soc_camera_device *icd,
+ struct v4l2_pix_format *pix, struct v4l2_mbus_framefmt *mf)
+{
+ struct sh_mobile_ceu_cam *cam = icd->host_priv;
+ struct device *dev = icd->dev.parent;
+ struct v4l2_rect *cam_subrect = &cam->subrect;
+ unsigned int scale_v, scale_h;
+
+ if (cam_subrect->width == cam->rect.width &&
+ cam_subrect->height == cam->rect.height) {
+ /* No sub-cropping */
+ mf->width = pix->width;
+ mf->height = pix->height;
+ return;
+ }
+
+ /* 1.-2. Current camera scales and subwin - cached. */
+
+ dev_geo(dev, "2: subwin %ux%u@%u:%u\n",
+ cam_subrect->width, cam_subrect->height,
+ cam_subrect->left, cam_subrect->top);
+
+ /*
+ * 3. Calculate new combined scales from input sub-window to requested
+ * user window.
+ */
+
+ /*
+ * TODO: CEU cannot scale images larger than VGA to smaller than SubQCIF
+ * (128x96) or larger than VGA
+ */
+ scale_h = calc_generic_scale(cam_subrect->width, pix->width);
+ scale_v = calc_generic_scale(cam_subrect->height, pix->height);
+
+ dev_geo(dev, "3: scales %u:%u\n", scale_h, scale_v);
+
+ /*
+ * 4. Calculate client output window by applying combined scales to real
+ * input window.
+ */
+ mf->width = scale_down(cam->rect.width, scale_h);
+ mf->height = scale_down(cam->rect.height, scale_v);
+}
+
/* Similar to set_crop multistage iterative algorithm */
static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
@@ -1460,18 +1511,17 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct v4l2_pix_format *pix = &f->fmt.pix;
struct v4l2_mbus_framefmt mf;
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct device *dev = icd->dev.parent;
__u32 pixfmt = pix->pixelformat;
const struct soc_camera_format_xlate *xlate;
- struct v4l2_crop cam_crop;
- struct v4l2_rect *cam_rect = &cam_crop.c, cam_subrect, ceu_rect;
- unsigned int scale_cam_h, scale_cam_v;
+ unsigned int ceu_sub_width, ceu_sub_height;
u16 scale_v, scale_h;
int ret;
bool image_mode;
enum v4l2_field field;
+ dev_geo(dev, "S_FMT(pix=0x%x, %ux%u)\n", pixfmt, pix->width, pix->height);
+
switch (pix->field) {
default:
pix->field = V4L2_FIELD_NONE;
@@ -1492,46 +1542,8 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
return -EINVAL;
}
- /* 1. Calculate current camera scales. */
- cam_crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- ret = client_g_rect(sd, cam_rect);
- if (ret < 0)
- return ret;
-
- ret = get_camera_scales(sd, cam_rect, &scale_cam_h, &scale_cam_v);
- if (ret < 0)
- return ret;
-
- dev_geo(dev, "1: camera scales %u:%u\n", scale_cam_h, scale_cam_v);
-
- /*
- * 2. Calculate "effective" input crop (sensor subwindow) - CEU crop
- * scaled back at current camera scales onto input window.
- */
- ret = get_camera_subwin(icd, &cam_subrect, scale_cam_h, scale_cam_v);
- if (ret < 0)
- return ret;
-
- dev_geo(dev, "2: subwin %ux%u@%u:%u\n",
- cam_subrect.width, cam_subrect.height,
- cam_subrect.left, cam_subrect.top);
-
- /*
- * 3. Calculate new combined scales from "effective" input window to
- * requested user window.
- */
- scale_h = calc_generic_scale(cam_subrect.width, pix->width);
- scale_v = calc_generic_scale(cam_subrect.height, pix->height);
-
- dev_geo(dev, "3: scales %u:%u\n", scale_h, scale_v);
-
- /*
- * 4. Calculate camera output window by applying combined scales to real
- * input window.
- */
- mf.width = scale_down(cam_rect->width, scale_h);
- mf.height = scale_down(cam_rect->height, scale_v);
+ /* 1.-4. Calculate client output geometry */
+ calculate_client_output(icd, &f->fmt.pix, &mf);
mf.field = pix->field;
mf.colorspace = pix->colorspace;
mf.code = xlate->code;
@@ -1547,17 +1559,17 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
image_mode = false;
}
- dev_geo(dev, "4: camera output %ux%u\n", mf.width, mf.height);
+ dev_geo(dev, "4: request camera output %ux%u\n", mf.width, mf.height);
/* 5. - 9. */
- ret = client_scale(icd, cam_rect, &cam_subrect, &ceu_rect, &mf,
+ ret = client_scale(icd, &mf, &ceu_sub_width, &ceu_sub_height,
image_mode && V4L2_FIELD_NONE == field);
- dev_geo(dev, "5-9: client scale %d\n", ret);
+ dev_geo(dev, "5-9: client scale return %d\n", ret);
/* Done with the camera. Now see if we can improve the result */
- dev_dbg(dev, "Camera %d fmt %ux%u, requested %ux%u\n",
+ dev_geo(dev, "Camera %d fmt %ux%u, requested %ux%u\n",
ret, mf.width, mf.height, pix->width, pix->height);
if (ret < 0)
return ret;
@@ -1565,40 +1577,44 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
if (mf.code != xlate->code)
return -EINVAL;
+ /* 9. Prepare CEU crop */
+ cam->width = mf.width;
+ cam->height = mf.height;
+
/* 10. Use CEU scaling to scale to the requested user window. */
/* We cannot scale up */
- if (pix->width > mf.width)
- pix->width = mf.width;
- if (pix->width > ceu_rect.width)
- pix->width = ceu_rect.width;
+ if (pix->width > ceu_sub_width)
+ ceu_sub_width = pix->width;
- if (pix->height > mf.height)
- pix->height = mf.height;
- if (pix->height > ceu_rect.height)
- pix->height = ceu_rect.height;
+ if (pix->height > ceu_sub_height)
+ ceu_sub_height = pix->height;
pix->colorspace = mf.colorspace;
if (image_mode) {
/* Scale pix->{width x height} down to width x height */
- scale_h = calc_scale(ceu_rect.width, &pix->width);
- scale_v = calc_scale(ceu_rect.height, &pix->height);
-
- pcdev->cflcr = scale_h | (scale_v << 16);
+ scale_h = calc_scale(ceu_sub_width, &pix->width);
+ scale_v = calc_scale(ceu_sub_height, &pix->height);
} else {
- pix->width = ceu_rect.width;
- pix->height = ceu_rect.height;
- scale_h = scale_v = 0;
- pcdev->cflcr = 0;
+ pix->width = ceu_sub_width;
+ pix->height = ceu_sub_height;
+ scale_h = 0;
+ scale_v = 0;
}
+ pcdev->cflcr = scale_h | (scale_v << 16);
+
+ /*
+ * We have calculated CFLCR, the actual configuration will be performed
+ * in sh_mobile_ceu_set_bus_param()
+ */
+
dev_geo(dev, "10: W: %u : 0x%x = %u, H: %u : 0x%x = %u\n",
- ceu_rect.width, scale_h, pix->width,
- ceu_rect.height, scale_v, pix->height);
+ ceu_sub_width, scale_h, pix->width,
+ ceu_sub_height, scale_v, pix->height);
cam->code = xlate->code;
- cam->ceu_rect = ceu_rect;
icd->current_fmt = xlate;
pcdev->field = field;
@@ -1820,6 +1836,7 @@ static struct soc_camera_host_ops sh_mobile_ceu_host_ops = {
.remove = sh_mobile_ceu_remove_device,
.get_formats = sh_mobile_ceu_get_formats,
.put_formats = sh_mobile_ceu_put_formats,
+ .get_crop = sh_mobile_ceu_get_crop,
.set_crop = sh_mobile_ceu_set_crop,
.set_fmt = sh_mobile_ceu_set_fmt,
.try_fmt = sh_mobile_ceu_try_fmt,
diff --git a/drivers/media/video/sh_vou.c b/drivers/media/video/sh_vou.c
new file mode 100644
index 00000000000..f5b892a2a8e
--- /dev/null
+++ b/drivers/media/video/sh_vou.c
@@ -0,0 +1,1476 @@
+/*
+ * SuperH Video Output Unit (VOU) driver
+ *
+ * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/version.h>
+#include <linux/videodev2.h>
+
+#include <media/sh_vou.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mediabus.h>
+#include <media/videobuf-dma-contig.h>
+
+/* Mirror addresses are not available for all registers */
+#define VOUER 0
+#define VOUCR 4
+#define VOUSTR 8
+#define VOUVCR 0xc
+#define VOUISR 0x10
+#define VOUBCR 0x14
+#define VOUDPR 0x18
+#define VOUDSR 0x1c
+#define VOUVPR 0x20
+#define VOUIR 0x24
+#define VOUSRR 0x28
+#define VOUMSR 0x2c
+#define VOUHIR 0x30
+#define VOUDFR 0x34
+#define VOUAD1R 0x38
+#define VOUAD2R 0x3c
+#define VOUAIR 0x40
+#define VOUSWR 0x44
+#define VOURCR 0x48
+#define VOURPR 0x50
+
+enum sh_vou_status {
+ SH_VOU_IDLE,
+ SH_VOU_INITIALISING,
+ SH_VOU_RUNNING,
+};
+
+#define VOU_MAX_IMAGE_WIDTH 720
+#define VOU_MAX_IMAGE_HEIGHT 480
+
+struct sh_vou_device {
+ struct v4l2_device v4l2_dev;
+ struct video_device *vdev;
+ atomic_t use_count;
+ struct sh_vou_pdata *pdata;
+ spinlock_t lock;
+ void __iomem *base;
+ /* State information */
+ struct v4l2_pix_format pix;
+ struct v4l2_rect rect;
+ struct list_head queue;
+ v4l2_std_id std;
+ int pix_idx;
+ struct videobuf_buffer *active;
+ enum sh_vou_status status;
+};
+
+struct sh_vou_file {
+ struct videobuf_queue vbq;
+};
+
+/* Register access routines for sides A, B and mirror addresses */
+static void sh_vou_reg_a_write(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value)
+{
+ __raw_writel(value, vou_dev->base + reg);
+}
+
+static void sh_vou_reg_ab_write(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value)
+{
+ __raw_writel(value, vou_dev->base + reg);
+ __raw_writel(value, vou_dev->base + reg + 0x1000);
+}
+
+static void sh_vou_reg_m_write(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value)
+{
+ __raw_writel(value, vou_dev->base + reg + 0x2000);
+}
+
+static u32 sh_vou_reg_a_read(struct sh_vou_device *vou_dev, unsigned int reg)
+{
+ return __raw_readl(vou_dev->base + reg);
+}
+
+static void sh_vou_reg_a_set(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value, u32 mask)
+{
+ u32 old = __raw_readl(vou_dev->base + reg);
+
+ value = (value & mask) | (old & ~mask);
+ __raw_writel(value, vou_dev->base + reg);
+}
+
+static void sh_vou_reg_b_set(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value, u32 mask)
+{
+ sh_vou_reg_a_set(vou_dev, reg + 0x1000, value, mask);
+}
+
+static void sh_vou_reg_ab_set(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value, u32 mask)
+{
+ sh_vou_reg_a_set(vou_dev, reg, value, mask);
+ sh_vou_reg_b_set(vou_dev, reg, value, mask);
+}
+
+struct sh_vou_fmt {
+ u32 pfmt;
+ char *desc;
+ unsigned char bpp;
+ unsigned char rgb;
+ unsigned char yf;
+ unsigned char pkf;
+};
+
+/* Further pixel formats can be added */
+static struct sh_vou_fmt vou_fmt[] = {
+ {
+ .pfmt = V4L2_PIX_FMT_NV12,
+ .bpp = 12,
+ .desc = "YVU420 planar",
+ .yf = 0,
+ .rgb = 0,
+ },
+ {
+ .pfmt = V4L2_PIX_FMT_NV16,
+ .bpp = 16,
+ .desc = "YVYU planar",
+ .yf = 1,
+ .rgb = 0,
+ },
+ {
+ .pfmt = V4L2_PIX_FMT_RGB24,
+ .bpp = 24,
+ .desc = "RGB24",
+ .pkf = 2,
+ .rgb = 1,
+ },
+ {
+ .pfmt = V4L2_PIX_FMT_RGB565,
+ .bpp = 16,
+ .desc = "RGB565",
+ .pkf = 3,
+ .rgb = 1,
+ },
+ {
+ .pfmt = V4L2_PIX_FMT_RGB565X,
+ .bpp = 16,
+ .desc = "RGB565 byteswapped",
+ .pkf = 3,
+ .rgb = 1,
+ },
+};
+
+static void sh_vou_schedule_next(struct sh_vou_device *vou_dev,
+ struct videobuf_buffer *vb)
+{
+ dma_addr_t addr1, addr2;
+
+ addr1 = videobuf_to_dma_contig(vb);
+ switch (vou_dev->pix.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ addr2 = addr1 + vou_dev->pix.width * vou_dev->pix.height;
+ break;
+ default:
+ addr2 = 0;
+ }
+
+ sh_vou_reg_m_write(vou_dev, VOUAD1R, addr1);
+ sh_vou_reg_m_write(vou_dev, VOUAD2R, addr2);
+}
+
+static void sh_vou_stream_start(struct sh_vou_device *vou_dev,
+ struct videobuf_buffer *vb)
+{
+ unsigned int row_coeff;
+#ifdef __LITTLE_ENDIAN
+ u32 dataswap = 7;
+#else
+ u32 dataswap = 0;
+#endif
+
+ switch (vou_dev->pix.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ row_coeff = 1;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ dataswap ^= 1;
+ case V4L2_PIX_FMT_RGB565X:
+ row_coeff = 2;
+ break;
+ case V4L2_PIX_FMT_RGB24:
+ row_coeff = 3;
+ break;
+ }
+
+ sh_vou_reg_a_write(vou_dev, VOUSWR, dataswap);
+ sh_vou_reg_ab_write(vou_dev, VOUAIR, vou_dev->pix.width * row_coeff);
+ sh_vou_schedule_next(vou_dev, vb);
+}
+
+static void free_buffer(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+{
+ BUG_ON(in_interrupt());
+
+ /* Wait until this buffer is no longer in STATE_QUEUED or STATE_ACTIVE */
+ videobuf_waiton(vb, 0, 0);
+ videobuf_dma_contig_free(vq, vb);
+ vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+/* Locking: caller holds vq->vb_lock mutex */
+static int sh_vou_buf_setup(struct videobuf_queue *vq, unsigned int *count,
+ unsigned int *size)
+{
+ struct video_device *vdev = vq->priv_data;
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+ *size = vou_fmt[vou_dev->pix_idx].bpp * vou_dev->pix.width *
+ vou_dev->pix.height / 8;
+
+ if (*count < 2)
+ *count = 2;
+
+ /* Taking into account maximum frame size, *count will stay >= 2 */
+ if (PAGE_ALIGN(*size) * *count > 4 * 1024 * 1024)
+ *count = 4 * 1024 * 1024 / PAGE_ALIGN(*size);
+
+ dev_dbg(vq->dev, "%s(): count=%d, size=%d\n", __func__, *count, *size);
+
+ return 0;
+}
+
+/* Locking: caller holds vq->vb_lock mutex */
+static int sh_vou_buf_prepare(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct video_device *vdev = vq->priv_data;
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct v4l2_pix_format *pix = &vou_dev->pix;
+ int bytes_per_line = vou_fmt[vou_dev->pix_idx].bpp * pix->width / 8;
+ int ret;
+
+ dev_dbg(vq->dev, "%s()\n", __func__);
+
+ if (vb->width != pix->width ||
+ vb->height != pix->height ||
+ vb->field != pix->field) {
+ vb->width = pix->width;
+ vb->height = pix->height;
+ vb->field = field;
+ if (vb->state != VIDEOBUF_NEEDS_INIT)
+ free_buffer(vq, vb);
+ }
+
+ vb->size = vb->height * bytes_per_line;
+ if (vb->baddr && vb->bsize < vb->size) {
+ /* User buffer too small */
+ dev_warn(vq->dev, "User buffer too small: [%u] @ %lx\n",
+ vb->bsize, vb->baddr);
+ return -EINVAL;
+ }
+
+ if (vb->state == VIDEOBUF_NEEDS_INIT) {
+ ret = videobuf_iolock(vq, vb, NULL);
+ if (ret < 0) {
+ dev_warn(vq->dev, "IOLOCK buf-type %d: %d\n",
+ vb->memory, ret);
+ return ret;
+ }
+ vb->state = VIDEOBUF_PREPARED;
+ }
+
+ dev_dbg(vq->dev,
+ "%s(): fmt #%d, %u bytes per line, phys 0x%x, type %d, state %d\n",
+ __func__, vou_dev->pix_idx, bytes_per_line,
+ videobuf_to_dma_contig(vb), vb->memory, vb->state);
+
+ return 0;
+}
+
+/* Locking: caller holds vq->vb_lock mutex and vq->irqlock spinlock */
+static void sh_vou_buf_queue(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct video_device *vdev = vq->priv_data;
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+ dev_dbg(vq->dev, "%s()\n", __func__);
+
+ vb->state = VIDEOBUF_QUEUED;
+ list_add_tail(&vb->queue, &vou_dev->queue);
+
+ if (vou_dev->status == SH_VOU_RUNNING) {
+ return;
+ } else if (!vou_dev->active) {
+ vou_dev->active = vb;
+ /* Start from side A: we use mirror addresses, so, set B */
+ sh_vou_reg_a_write(vou_dev, VOURPR, 1);
+ dev_dbg(vq->dev, "%s: first buffer status 0x%x\n", __func__,
+ sh_vou_reg_a_read(vou_dev, VOUSTR));
+ sh_vou_schedule_next(vou_dev, vb);
+ /* Only activate VOU after the second buffer */
+ } else if (vou_dev->active->queue.next == &vb->queue) {
+ /* Second buffer - initialise register side B */
+ sh_vou_reg_a_write(vou_dev, VOURPR, 0);
+ sh_vou_stream_start(vou_dev, vb);
+
+ /* Register side switching with frame VSYNC */
+ sh_vou_reg_a_write(vou_dev, VOURCR, 5);
+ dev_dbg(vq->dev, "%s: second buffer status 0x%x\n", __func__,
+ sh_vou_reg_a_read(vou_dev, VOUSTR));
+
+ /* Enable End-of-Frame (VSYNC) interrupts */
+ sh_vou_reg_a_write(vou_dev, VOUIR, 0x10004);
+ /* Two buffers on the queue - activate the hardware */
+
+ vou_dev->status = SH_VOU_RUNNING;
+ sh_vou_reg_a_write(vou_dev, VOUER, 0x107);
+ }
+}
+
+static void sh_vou_buf_release(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct video_device *vdev = vq->priv_data;
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ unsigned long flags;
+
+ dev_dbg(vq->dev, "%s()\n", __func__);
+
+ spin_lock_irqsave(&vou_dev->lock, flags);
+
+ if (vou_dev->active == vb) {
+ /* disable output */
+ sh_vou_reg_a_set(vou_dev, VOUER, 0, 1);
+ /* ...but the current frame will complete */
+ sh_vou_reg_a_set(vou_dev, VOUIR, 0, 0x30000);
+ vou_dev->active = NULL;
+ }
+
+ if ((vb->state == VIDEOBUF_ACTIVE || vb->state == VIDEOBUF_QUEUED)) {
+ vb->state = VIDEOBUF_ERROR;
+ list_del(&vb->queue);
+ }
+
+ spin_unlock_irqrestore(&vou_dev->lock, flags);
+
+ free_buffer(vq, vb);
+}
+
+static struct videobuf_queue_ops sh_vou_video_qops = {
+ .buf_setup = sh_vou_buf_setup,
+ .buf_prepare = sh_vou_buf_prepare,
+ .buf_queue = sh_vou_buf_queue,
+ .buf_release = sh_vou_buf_release,
+};
+
+/* Video IOCTLs */
+static int sh_vou_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct sh_vou_file *vou_file = priv;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ strlcpy(cap->card, "SuperH VOU", sizeof(cap->card));
+ cap->version = KERNEL_VERSION(0, 1, 0);
+ cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ return 0;
+}
+
+/* Enumerate formats, that the device can accept from the user */
+static int sh_vou_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *fmt)
+{
+ struct sh_vou_file *vou_file = priv;
+
+ if (fmt->index >= ARRAY_SIZE(vou_fmt))
+ return -EINVAL;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ strlcpy(fmt->description, vou_fmt[fmt->index].desc,
+ sizeof(fmt->description));
+ fmt->pixelformat = vou_fmt[fmt->index].pfmt;
+
+ return 0;
+}
+
+static int sh_vou_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+ fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ fmt->fmt.pix = vou_dev->pix;
+
+ return 0;
+}
+
+static const unsigned char vou_scale_h_num[] = {1, 9, 2, 9, 4};
+static const unsigned char vou_scale_h_den[] = {1, 8, 1, 4, 1};
+static const unsigned char vou_scale_h_fld[] = {0, 2, 1, 3};
+static const unsigned char vou_scale_v_num[] = {1, 2, 4};
+static const unsigned char vou_scale_v_den[] = {1, 1, 1};
+static const unsigned char vou_scale_v_fld[] = {0, 1};
+
+static void sh_vou_configure_geometry(struct sh_vou_device *vou_dev,
+ int pix_idx, int w_idx, int h_idx)
+{
+ struct sh_vou_fmt *fmt = vou_fmt + pix_idx;
+ unsigned int black_left, black_top, width_max, height_max,
+ frame_in_height, frame_out_height, frame_out_top;
+ struct v4l2_rect *rect = &vou_dev->rect;
+ struct v4l2_pix_format *pix = &vou_dev->pix;
+ u32 vouvcr = 0, dsr_h, dsr_v;
+
+ if (vou_dev->std & V4L2_STD_525_60) {
+ width_max = 858;
+ height_max = 262;
+ } else {
+ width_max = 864;
+ height_max = 312;
+ }
+
+ frame_in_height = pix->height / 2;
+ frame_out_height = rect->height / 2;
+ frame_out_top = rect->top / 2;
+
+ /*
+ * Cropping scheme: max useful image is 720x480, and the total video
+ * area is 858x525 (NTSC) or 864x625 (PAL). AK8813 / 8814 starts
+ * sampling data beginning with fixed 276th (NTSC) / 288th (PAL) clock,
+ * of which the first 33 / 25 clocks HSYNC must be held active. This
+ * has to be configured in CR[HW]. 1 pixel equals 2 clock periods.
+ * This gives CR[HW] = 16 / 12, VPR[HVP] = 138 / 144, which gives
+ * exactly 858 - 138 = 864 - 144 = 720! We call the out-of-display area,
+ * beyond DSR, specified on the left and top by the VPR register "black
+ * pixels" and out-of-image area (DPR) "background pixels." We fix VPR
+ * at 138 / 144 : 20, because that's the HSYNC timing, that our first
+ * client requires, and that's exactly what leaves us 720 pixels for the
+ * image; we leave VPR[VVP] at default 20 for now, because the client
+ * doesn't seem to have any special requirements for it. Otherwise we
+ * could also set it to max - 240 = 22 / 72. Thus VPR depends only on
+ * the selected standard, and DPR and DSR are selected according to
+ * cropping. Q: how does the client detect the first valid line? Does
+ * HSYNC stay inactive during invalid (black) lines?
+ */
+ black_left = width_max - VOU_MAX_IMAGE_WIDTH;
+ black_top = 20;
+
+ dsr_h = rect->width + rect->left;
+ dsr_v = frame_out_height + frame_out_top;
+
+ dev_dbg(vou_dev->v4l2_dev.dev,
+ "image %ux%u, black %u:%u, offset %u:%u, display %ux%u\n",
+ pix->width, frame_in_height, black_left, black_top,
+ rect->left, frame_out_top, dsr_h, dsr_v);
+
+ /* VOUISR height - half of a frame height in frame mode */
+ sh_vou_reg_ab_write(vou_dev, VOUISR, (pix->width << 16) | frame_in_height);
+ sh_vou_reg_ab_write(vou_dev, VOUVPR, (black_left << 16) | black_top);
+ sh_vou_reg_ab_write(vou_dev, VOUDPR, (rect->left << 16) | frame_out_top);
+ sh_vou_reg_ab_write(vou_dev, VOUDSR, (dsr_h << 16) | dsr_v);
+
+ /*
+ * if necessary, we could set VOUHIR to
+ * max(black_left + dsr_h, width_max) here
+ */
+
+ if (w_idx)
+ vouvcr |= (1 << 15) | (vou_scale_h_fld[w_idx - 1] << 4);
+ if (h_idx)
+ vouvcr |= (1 << 14) | vou_scale_v_fld[h_idx - 1];
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s: scaling 0x%x\n", fmt->desc, vouvcr);
+
+ /* To produce a colour bar for testing set bit 23 of VOUVCR */
+ sh_vou_reg_ab_write(vou_dev, VOUVCR, vouvcr);
+ sh_vou_reg_ab_write(vou_dev, VOUDFR,
+ fmt->pkf | (fmt->yf << 8) | (fmt->rgb << 16));
+}
+
+struct sh_vou_geometry {
+ struct v4l2_rect output;
+ unsigned int in_width;
+ unsigned int in_height;
+ int scale_idx_h;
+ int scale_idx_v;
+};
+
+/*
+ * Find input geometry, that we can use to produce output, closest to the
+ * requested rectangle, using VOU scaling
+ */
+static void vou_adjust_input(struct sh_vou_geometry *geo, v4l2_std_id std)
+{
+ /* The compiler cannot know, that best and idx will indeed be set */
+ unsigned int best_err = UINT_MAX, best = 0, width_max, height_max;
+ int i, idx = 0;
+
+ if (std & V4L2_STD_525_60) {
+ width_max = 858;
+ height_max = 262;
+ } else {
+ width_max = 864;
+ height_max = 312;
+ }
+
+ /* Image width must be a multiple of 4 */
+ v4l_bound_align_image(&geo->in_width, 0, VOU_MAX_IMAGE_WIDTH, 2,
+ &geo->in_height, 0, VOU_MAX_IMAGE_HEIGHT, 1, 0);
+
+ /* Select scales to come as close as possible to the output image */
+ for (i = ARRAY_SIZE(vou_scale_h_num) - 1; i >= 0; i--) {
+ unsigned int err;
+ unsigned int found = geo->output.width * vou_scale_h_den[i] /
+ vou_scale_h_num[i];
+
+ if (found > VOU_MAX_IMAGE_WIDTH)
+ /* scales increase */
+ break;
+
+ err = abs(found - geo->in_width);
+ if (err < best_err) {
+ best_err = err;
+ idx = i;
+ best = found;
+ }
+ if (!err)
+ break;
+ }
+
+ geo->in_width = best;
+ geo->scale_idx_h = idx;
+
+ best_err = UINT_MAX;
+
+ /* This loop can be replaced with one division */
+ for (i = ARRAY_SIZE(vou_scale_v_num) - 1; i >= 0; i--) {
+ unsigned int err;
+ unsigned int found = geo->output.height * vou_scale_v_den[i] /
+ vou_scale_v_num[i];
+
+ if (found > VOU_MAX_IMAGE_HEIGHT)
+ /* scales increase */
+ break;
+
+ err = abs(found - geo->in_height);
+ if (err < best_err) {
+ best_err = err;
+ idx = i;
+ best = found;
+ }
+ if (!err)
+ break;
+ }
+
+ geo->in_height = best;
+ geo->scale_idx_v = idx;
+}
+
+/*
+ * Find output geometry, that we can produce, using VOU scaling, closest to
+ * the requested rectangle
+ */
+static void vou_adjust_output(struct sh_vou_geometry *geo, v4l2_std_id std)
+{
+ unsigned int best_err = UINT_MAX, best, width_max, height_max;
+ int i, idx;
+
+ if (std & V4L2_STD_525_60) {
+ width_max = 858;
+ height_max = 262 * 2;
+ } else {
+ width_max = 864;
+ height_max = 312 * 2;
+ }
+
+ /* Select scales to come as close as possible to the output image */
+ for (i = 0; i < ARRAY_SIZE(vou_scale_h_num); i++) {
+ unsigned int err;
+ unsigned int found = geo->in_width * vou_scale_h_num[i] /
+ vou_scale_h_den[i];
+
+ if (found > VOU_MAX_IMAGE_WIDTH)
+ /* scales increase */
+ break;
+
+ err = abs(found - geo->output.width);
+ if (err < best_err) {
+ best_err = err;
+ idx = i;
+ best = found;
+ }
+ if (!err)
+ break;
+ }
+
+ geo->output.width = best;
+ geo->scale_idx_h = idx;
+ if (geo->output.left + best > width_max)
+ geo->output.left = width_max - best;
+
+ pr_debug("%s(): W %u * %u/%u = %u\n", __func__, geo->in_width,
+ vou_scale_h_num[idx], vou_scale_h_den[idx], best);
+
+ best_err = UINT_MAX;
+
+ /* This loop can be replaced with one division */
+ for (i = 0; i < ARRAY_SIZE(vou_scale_v_num); i++) {
+ unsigned int err;
+ unsigned int found = geo->in_height * vou_scale_v_num[i] /
+ vou_scale_v_den[i];
+
+ if (found > VOU_MAX_IMAGE_HEIGHT)
+ /* scales increase */
+ break;
+
+ err = abs(found - geo->output.height);
+ if (err < best_err) {
+ best_err = err;
+ idx = i;
+ best = found;
+ }
+ if (!err)
+ break;
+ }
+
+ geo->output.height = best;
+ geo->scale_idx_v = idx;
+ if (geo->output.top + best > height_max)
+ geo->output.top = height_max - best;
+
+ pr_debug("%s(): H %u * %u/%u = %u\n", __func__, geo->in_height,
+ vou_scale_v_num[idx], vou_scale_v_den[idx], best);
+}
+
+static int sh_vou_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+ int pix_idx;
+ struct sh_vou_geometry geo;
+ struct v4l2_mbus_framefmt mbfmt = {
+ /* Revisit: is this the correct code? */
+ .code = V4L2_MBUS_FMT_YUYV8_2X8_LE,
+ .field = V4L2_FIELD_INTERLACED,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M,
+ };
+ int ret;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u -> %ux%u\n", __func__,
+ vou_dev->rect.width, vou_dev->rect.height,
+ pix->width, pix->height);
+
+ if (pix->field == V4L2_FIELD_ANY)
+ pix->field = V4L2_FIELD_NONE;
+
+ if (fmt->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ pix->field != V4L2_FIELD_NONE)
+ return -EINVAL;
+
+ for (pix_idx = 0; pix_idx < ARRAY_SIZE(vou_fmt); pix_idx++)
+ if (vou_fmt[pix_idx].pfmt == pix->pixelformat)
+ break;
+
+ if (pix_idx == ARRAY_SIZE(vou_fmt))
+ return -EINVAL;
+
+ /* Image width must be a multiple of 4 */
+ v4l_bound_align_image(&pix->width, 0, VOU_MAX_IMAGE_WIDTH, 2,
+ &pix->height, 0, VOU_MAX_IMAGE_HEIGHT, 1, 0);
+
+ geo.in_width = pix->width;
+ geo.in_height = pix->height;
+ geo.output = vou_dev->rect;
+
+ vou_adjust_output(&geo, vou_dev->std);
+
+ mbfmt.width = geo.output.width;
+ mbfmt.height = geo.output.height;
+ ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video,
+ s_mbus_fmt, &mbfmt);
+ /* Must be implemented, so, don't check for -ENOIOCTLCMD */
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u -> %ux%u\n", __func__,
+ geo.output.width, geo.output.height, mbfmt.width, mbfmt.height);
+
+ /* Sanity checks */
+ if ((unsigned)mbfmt.width > VOU_MAX_IMAGE_WIDTH ||
+ (unsigned)mbfmt.height > VOU_MAX_IMAGE_HEIGHT ||
+ mbfmt.code != V4L2_MBUS_FMT_YUYV8_2X8_LE)
+ return -EIO;
+
+ if (mbfmt.width != geo.output.width ||
+ mbfmt.height != geo.output.height) {
+ geo.output.width = mbfmt.width;
+ geo.output.height = mbfmt.height;
+
+ vou_adjust_input(&geo, vou_dev->std);
+ }
+
+ /* We tried to preserve output rectangle, but it could have changed */
+ vou_dev->rect = geo.output;
+ pix->width = geo.in_width;
+ pix->height = geo.in_height;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u\n", __func__,
+ pix->width, pix->height);
+
+ vou_dev->pix_idx = pix_idx;
+
+ vou_dev->pix = *pix;
+
+ sh_vou_configure_geometry(vou_dev, pix_idx,
+ geo.scale_idx_h, geo.scale_idx_v);
+
+ return 0;
+}
+
+static int sh_vou_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct sh_vou_file *vou_file = priv;
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+ int i;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ pix->field = V4L2_FIELD_NONE;
+
+ v4l_bound_align_image(&pix->width, 0, VOU_MAX_IMAGE_WIDTH, 1,
+ &pix->height, 0, VOU_MAX_IMAGE_HEIGHT, 1, 0);
+
+ for (i = 0; ARRAY_SIZE(vou_fmt); i++)
+ if (vou_fmt[i].pfmt == pix->pixelformat)
+ return 0;
+
+ pix->pixelformat = vou_fmt[0].pfmt;
+
+ return 0;
+}
+
+static int sh_vou_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *req)
+{
+ struct sh_vou_file *vou_file = priv;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ if (req->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ return videobuf_reqbufs(&vou_file->vbq, req);
+}
+
+static int sh_vou_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *b)
+{
+ struct sh_vou_file *vou_file = priv;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ return videobuf_querybuf(&vou_file->vbq, b);
+}
+
+static int sh_vou_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
+{
+ struct sh_vou_file *vou_file = priv;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ return videobuf_qbuf(&vou_file->vbq, b);
+}
+
+static int sh_vou_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
+{
+ struct sh_vou_file *vou_file = priv;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ return videobuf_dqbuf(&vou_file->vbq, b, file->f_flags & O_NONBLOCK);
+}
+
+static int sh_vou_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type buftype)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_file *vou_file = priv;
+ int ret;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0,
+ video, s_stream, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ /* This calls our .buf_queue() (== sh_vou_buf_queue) */
+ return videobuf_streamon(&vou_file->vbq);
+}
+
+static int sh_vou_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type buftype)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_file *vou_file = priv;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ /*
+ * This calls buf_release from host driver's videobuf_queue_ops for all
+ * remaining buffers. When the last buffer is freed, stop streaming
+ */
+ videobuf_streamoff(&vou_file->vbq);
+ v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video, s_stream, 0);
+
+ return 0;
+}
+
+static u32 sh_vou_ntsc_mode(enum sh_vou_bus_fmt bus_fmt)
+{
+ switch (bus_fmt) {
+ default:
+ pr_warning("%s(): Invalid bus-format code %d, using default 8-bit\n",
+ __func__, bus_fmt);
+ case SH_VOU_BUS_8BIT:
+ return 1;
+ case SH_VOU_BUS_16BIT:
+ return 0;
+ case SH_VOU_BUS_BT656:
+ return 3;
+ }
+}
+
+static int sh_vou_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ int ret;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s(): 0x%llx\n", __func__, *std_id);
+
+ if (*std_id & ~vdev->tvnorms)
+ return -EINVAL;
+
+ ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video,
+ s_std_output, *std_id);
+ /* Shall we continue, if the subdev doesn't support .s_std_output()? */
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ if (*std_id & V4L2_STD_525_60)
+ sh_vou_reg_ab_set(vou_dev, VOUCR,
+ sh_vou_ntsc_mode(vou_dev->pdata->bus_fmt) << 29, 7 << 29);
+ else
+ sh_vou_reg_ab_set(vou_dev, VOUCR, 5 << 29, 7 << 29);
+
+ vou_dev->std = *std_id;
+
+ return 0;
+}
+
+static int sh_vou_g_std(struct file *file, void *priv, v4l2_std_id *std)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+ *std = vou_dev->std;
+
+ return 0;
+}
+
+static int sh_vou_g_crop(struct file *file, void *fh, struct v4l2_crop *a)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+ a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ a->c = vou_dev->rect;
+
+ return 0;
+}
+
+/* Assume a dull encoder, do all the work ourselves. */
+static int sh_vou_s_crop(struct file *file, void *fh, struct v4l2_crop *a)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct v4l2_rect *rect = &a->c;
+ struct v4l2_crop sd_crop = {.type = V4L2_BUF_TYPE_VIDEO_OUTPUT};
+ struct v4l2_pix_format *pix = &vou_dev->pix;
+ struct sh_vou_geometry geo;
+ struct v4l2_mbus_framefmt mbfmt = {
+ /* Revisit: is this the correct code? */
+ .code = V4L2_MBUS_FMT_YUYV8_2X8_LE,
+ .field = V4L2_FIELD_INTERLACED,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M,
+ };
+ int ret;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u@%u:%u\n", __func__,
+ rect->width, rect->height, rect->left, rect->top);
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ v4l_bound_align_image(&rect->width, 0, VOU_MAX_IMAGE_WIDTH, 1,
+ &rect->height, 0, VOU_MAX_IMAGE_HEIGHT, 1, 0);
+
+ if (rect->width + rect->left > VOU_MAX_IMAGE_WIDTH)
+ rect->left = VOU_MAX_IMAGE_WIDTH - rect->width;
+
+ if (rect->height + rect->top > VOU_MAX_IMAGE_HEIGHT)
+ rect->top = VOU_MAX_IMAGE_HEIGHT - rect->height;
+
+ geo.output = *rect;
+ geo.in_width = pix->width;
+ geo.in_height = pix->height;
+
+ /* Configure the encoder one-to-one, position at 0, ignore errors */
+ sd_crop.c.width = geo.output.width;
+ sd_crop.c.height = geo.output.height;
+ /*
+ * We first issue a S_CROP, so that the subsequent S_FMT delivers the
+ * final encoder configuration.
+ */
+ v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video,
+ s_crop, &sd_crop);
+ mbfmt.width = geo.output.width;
+ mbfmt.height = geo.output.height;
+ ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video,
+ s_mbus_fmt, &mbfmt);
+ /* Must be implemented, so, don't check for -ENOIOCTLCMD */
+ if (ret < 0)
+ return ret;
+
+ /* Sanity checks */
+ if ((unsigned)mbfmt.width > VOU_MAX_IMAGE_WIDTH ||
+ (unsigned)mbfmt.height > VOU_MAX_IMAGE_HEIGHT ||
+ mbfmt.code != V4L2_MBUS_FMT_YUYV8_2X8_LE)
+ return -EIO;
+
+ geo.output.width = mbfmt.width;
+ geo.output.height = mbfmt.height;
+
+ /*
+ * No down-scaling. According to the API, current call has precedence:
+ * http://v4l2spec.bytesex.org/spec/x1904.htm#AEN1954 paragraph two.
+ */
+ vou_adjust_input(&geo, vou_dev->std);
+
+ /* We tried to preserve output rectangle, but it could have changed */
+ vou_dev->rect = geo.output;
+ pix->width = geo.in_width;
+ pix->height = geo.in_height;
+
+ sh_vou_configure_geometry(vou_dev, vou_dev->pix_idx,
+ geo.scale_idx_h, geo.scale_idx_v);
+
+ return 0;
+}
+
+/*
+ * Total field: NTSC 858 x 2 * 262/263, PAL 864 x 2 * 312/313, default rectangle
+ * is the initial register values, height takes the interlaced format into
+ * account. The actual image can only go up to 720 x 2 * 240, So, VOUVPR can
+ * actually only meaningfully contain values <= 720 and <= 240 respectively, and
+ * not <= 864 and <= 312.
+ */
+static int sh_vou_cropcap(struct file *file, void *priv,
+ struct v4l2_cropcap *a)
+{
+ struct sh_vou_file *vou_file = priv;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ a->bounds.left = 0;
+ a->bounds.top = 0;
+ a->bounds.width = VOU_MAX_IMAGE_WIDTH;
+ a->bounds.height = VOU_MAX_IMAGE_HEIGHT;
+ /* Default = max, set VOUDPR = 0, which is not hardware default */
+ a->defrect.left = 0;
+ a->defrect.top = 0;
+ a->defrect.width = VOU_MAX_IMAGE_WIDTH;
+ a->defrect.height = VOU_MAX_IMAGE_HEIGHT;
+ a->pixelaspect.numerator = 1;
+ a->pixelaspect.denominator = 1;
+
+ return 0;
+}
+
+static irqreturn_t sh_vou_isr(int irq, void *dev_id)
+{
+ struct sh_vou_device *vou_dev = dev_id;
+ static unsigned long j;
+ struct videobuf_buffer *vb;
+ static int cnt;
+ static int side;
+ u32 irq_status = sh_vou_reg_a_read(vou_dev, VOUIR), masked;
+ u32 vou_status = sh_vou_reg_a_read(vou_dev, VOUSTR);
+
+ if (!(irq_status & 0x300)) {
+ if (printk_timed_ratelimit(&j, 500))
+ dev_warn(vou_dev->v4l2_dev.dev, "IRQ status 0x%x!\n",
+ irq_status);
+ return IRQ_NONE;
+ }
+
+ spin_lock(&vou_dev->lock);
+ if (!vou_dev->active || list_empty(&vou_dev->queue)) {
+ if (printk_timed_ratelimit(&j, 500))
+ dev_warn(vou_dev->v4l2_dev.dev,
+ "IRQ without active buffer: %x!\n", irq_status);
+ /* Just ack: buf_release will disable further interrupts */
+ sh_vou_reg_a_set(vou_dev, VOUIR, 0, 0x300);
+ spin_unlock(&vou_dev->lock);
+ return IRQ_HANDLED;
+ }
+
+ masked = ~(0x300 & irq_status) & irq_status & 0x30304;
+ dev_dbg(vou_dev->v4l2_dev.dev,
+ "IRQ status 0x%x -> 0x%x, VOU status 0x%x, cnt %d\n",
+ irq_status, masked, vou_status, cnt);
+
+ cnt++;
+ side = vou_status & 0x10000;
+
+ /* Clear only set interrupts */
+ sh_vou_reg_a_write(vou_dev, VOUIR, masked);
+
+ vb = vou_dev->active;
+ list_del(&vb->queue);
+
+ vb->state = VIDEOBUF_DONE;
+ do_gettimeofday(&vb->ts);
+ vb->field_count++;
+ wake_up(&vb->done);
+
+ if (list_empty(&vou_dev->queue)) {
+ /* Stop VOU */
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s: queue empty after %d\n",
+ __func__, cnt);
+ sh_vou_reg_a_set(vou_dev, VOUER, 0, 1);
+ vou_dev->active = NULL;
+ vou_dev->status = SH_VOU_INITIALISING;
+ /* Disable End-of-Frame (VSYNC) interrupts */
+ sh_vou_reg_a_set(vou_dev, VOUIR, 0, 0x30000);
+ spin_unlock(&vou_dev->lock);
+ return IRQ_HANDLED;
+ }
+
+ vou_dev->active = list_entry(vou_dev->queue.next,
+ struct videobuf_buffer, queue);
+
+ if (vou_dev->active->queue.next != &vou_dev->queue) {
+ struct videobuf_buffer *new = list_entry(vou_dev->active->queue.next,
+ struct videobuf_buffer, queue);
+ sh_vou_schedule_next(vou_dev, new);
+ }
+
+ spin_unlock(&vou_dev->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int sh_vou_hw_init(struct sh_vou_device *vou_dev)
+{
+ struct sh_vou_pdata *pdata = vou_dev->pdata;
+ u32 voucr = sh_vou_ntsc_mode(pdata->bus_fmt) << 29;
+ int i = 100;
+
+ /* Disable all IRQs */
+ sh_vou_reg_a_write(vou_dev, VOUIR, 0);
+
+ /* Reset VOU interfaces - registers unaffected */
+ sh_vou_reg_a_write(vou_dev, VOUSRR, 0x101);
+ while (--i && (sh_vou_reg_a_read(vou_dev, VOUSRR) & 0x101))
+ udelay(1);
+
+ if (!i)
+ return -ETIMEDOUT;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "Reset took %dus\n", 100 - i);
+
+ if (pdata->flags & SH_VOU_PCLK_FALLING)
+ voucr |= 1 << 28;
+ if (pdata->flags & SH_VOU_HSYNC_LOW)
+ voucr |= 1 << 27;
+ if (pdata->flags & SH_VOU_VSYNC_LOW)
+ voucr |= 1 << 26;
+ sh_vou_reg_ab_set(vou_dev, VOUCR, voucr, 0xfc000000);
+
+ /* Manual register side switching at first */
+ sh_vou_reg_a_write(vou_dev, VOURCR, 4);
+ /* Default - fixed HSYNC length, can be made configurable is required */
+ sh_vou_reg_ab_write(vou_dev, VOUMSR, 0x800000);
+
+ return 0;
+}
+
+/* File operations */
+static int sh_vou_open(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_file *vou_file = kzalloc(sizeof(struct sh_vou_file),
+ GFP_KERNEL);
+
+ if (!vou_file)
+ return -ENOMEM;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+ file->private_data = vou_file;
+
+ if (atomic_inc_return(&vou_dev->use_count) == 1) {
+ int ret;
+ /* First open */
+ vou_dev->status = SH_VOU_INITIALISING;
+ pm_runtime_get_sync(vdev->v4l2_dev->dev);
+ ret = sh_vou_hw_init(vou_dev);
+ if (ret < 0) {
+ atomic_dec(&vou_dev->use_count);
+ pm_runtime_put(vdev->v4l2_dev->dev);
+ vou_dev->status = SH_VOU_IDLE;
+ return ret;
+ }
+ }
+
+ videobuf_queue_dma_contig_init(&vou_file->vbq, &sh_vou_video_qops,
+ vou_dev->v4l2_dev.dev, &vou_dev->lock,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT,
+ V4L2_FIELD_NONE,
+ sizeof(struct videobuf_buffer), vdev);
+
+ return 0;
+}
+
+static int sh_vou_release(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_file *vou_file = file->private_data;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ if (!atomic_dec_return(&vou_dev->use_count)) {
+ /* Last close */
+ vou_dev->status = SH_VOU_IDLE;
+ sh_vou_reg_a_set(vou_dev, VOUER, 0, 0x101);
+ pm_runtime_put(vdev->v4l2_dev->dev);
+ }
+
+ file->private_data = NULL;
+ kfree(vou_file);
+
+ return 0;
+}
+
+static int sh_vou_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct sh_vou_file *vou_file = file->private_data;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ return videobuf_mmap_mapper(&vou_file->vbq, vma);
+}
+
+static unsigned int sh_vou_poll(struct file *file, poll_table *wait)
+{
+ struct sh_vou_file *vou_file = file->private_data;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ return videobuf_poll_stream(file, &vou_file->vbq, wait);
+}
+
+static int sh_vou_g_chip_ident(struct file *file, void *fh,
+ struct v4l2_dbg_chip_ident *id)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+ return v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, core, g_chip_ident, id);
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int sh_vou_g_register(struct file *file, void *fh,
+ struct v4l2_dbg_register *reg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+ return v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, core, g_register, reg);
+}
+
+static int sh_vou_s_register(struct file *file, void *fh,
+ struct v4l2_dbg_register *reg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+ return v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, core, s_register, reg);
+}
+#endif
+
+/* sh_vou display ioctl operations */
+static const struct v4l2_ioctl_ops sh_vou_ioctl_ops = {
+ .vidioc_querycap = sh_vou_querycap,
+ .vidioc_enum_fmt_vid_out = sh_vou_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = sh_vou_g_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = sh_vou_s_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = sh_vou_try_fmt_vid_out,
+ .vidioc_reqbufs = sh_vou_reqbufs,
+ .vidioc_querybuf = sh_vou_querybuf,
+ .vidioc_qbuf = sh_vou_qbuf,
+ .vidioc_dqbuf = sh_vou_dqbuf,
+ .vidioc_streamon = sh_vou_streamon,
+ .vidioc_streamoff = sh_vou_streamoff,
+ .vidioc_s_std = sh_vou_s_std,
+ .vidioc_g_std = sh_vou_g_std,
+ .vidioc_cropcap = sh_vou_cropcap,
+ .vidioc_g_crop = sh_vou_g_crop,
+ .vidioc_s_crop = sh_vou_s_crop,
+ .vidioc_g_chip_ident = sh_vou_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .vidioc_g_register = sh_vou_g_register,
+ .vidioc_s_register = sh_vou_s_register,
+#endif
+};
+
+static const struct v4l2_file_operations sh_vou_fops = {
+ .owner = THIS_MODULE,
+ .open = sh_vou_open,
+ .release = sh_vou_release,
+ .ioctl = video_ioctl2,
+ .mmap = sh_vou_mmap,
+ .poll = sh_vou_poll,
+};
+
+static const struct video_device sh_vou_video_template = {
+ .name = "sh_vou",
+ .fops = &sh_vou_fops,
+ .ioctl_ops = &sh_vou_ioctl_ops,
+ .tvnorms = V4L2_STD_525_60, /* PAL only supported in 8-bit non-bt656 mode */
+ .current_norm = V4L2_STD_NTSC_M,
+};
+
+static int __devinit sh_vou_probe(struct platform_device *pdev)
+{
+ struct sh_vou_pdata *vou_pdata = pdev->dev.platform_data;
+ struct v4l2_rect *rect;
+ struct v4l2_pix_format *pix;
+ struct i2c_adapter *i2c_adap;
+ struct video_device *vdev;
+ struct sh_vou_device *vou_dev;
+ struct resource *reg_res, *region;
+ struct v4l2_subdev *subdev;
+ int irq, ret;
+
+ reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+
+ if (!vou_pdata || !reg_res || irq <= 0) {
+ dev_err(&pdev->dev, "Insufficient VOU platform information.\n");
+ return -ENODEV;
+ }
+
+ vou_dev = kzalloc(sizeof(*vou_dev), GFP_KERNEL);
+ if (!vou_dev)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&vou_dev->queue);
+ spin_lock_init(&vou_dev->lock);
+ atomic_set(&vou_dev->use_count, 0);
+ vou_dev->pdata = vou_pdata;
+ vou_dev->status = SH_VOU_IDLE;
+
+ rect = &vou_dev->rect;
+ pix = &vou_dev->pix;
+
+ /* Fill in defaults */
+ vou_dev->std = sh_vou_video_template.current_norm;
+ rect->left = 0;
+ rect->top = 0;
+ rect->width = VOU_MAX_IMAGE_WIDTH;
+ rect->height = VOU_MAX_IMAGE_HEIGHT;
+ pix->width = VOU_MAX_IMAGE_WIDTH;
+ pix->height = VOU_MAX_IMAGE_HEIGHT;
+ pix->pixelformat = V4L2_PIX_FMT_YVYU;
+ pix->field = V4L2_FIELD_NONE;
+ pix->bytesperline = VOU_MAX_IMAGE_WIDTH * 2;
+ pix->sizeimage = VOU_MAX_IMAGE_WIDTH * 2 * VOU_MAX_IMAGE_HEIGHT;
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ region = request_mem_region(reg_res->start, resource_size(reg_res),
+ pdev->name);
+ if (!region) {
+ dev_err(&pdev->dev, "VOU region already claimed\n");
+ ret = -EBUSY;
+ goto ereqmemreg;
+ }
+
+ vou_dev->base = ioremap(reg_res->start, resource_size(reg_res));
+ if (!vou_dev->base) {
+ ret = -ENOMEM;
+ goto emap;
+ }
+
+ ret = request_irq(irq, sh_vou_isr, 0, "vou", vou_dev);
+ if (ret < 0)
+ goto ereqirq;
+
+ ret = v4l2_device_register(&pdev->dev, &vou_dev->v4l2_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Error registering v4l2 device\n");
+ goto ev4l2devreg;
+ }
+
+ /* Allocate memory for video device */
+ vdev = video_device_alloc();
+ if (vdev == NULL) {
+ ret = -ENOMEM;
+ goto evdevalloc;
+ }
+
+ *vdev = sh_vou_video_template;
+ if (vou_pdata->bus_fmt == SH_VOU_BUS_8BIT)
+ vdev->tvnorms |= V4L2_STD_PAL;
+ vdev->v4l2_dev = &vou_dev->v4l2_dev;
+ vdev->release = video_device_release;
+
+ vou_dev->vdev = vdev;
+ video_set_drvdata(vdev, vou_dev);
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_resume(&pdev->dev);
+
+ i2c_adap = i2c_get_adapter(vou_pdata->i2c_adap);
+ if (!i2c_adap) {
+ ret = -ENODEV;
+ goto ei2cgadap;
+ }
+
+ ret = sh_vou_hw_init(vou_dev);
+ if (ret < 0)
+ goto ereset;
+
+ subdev = v4l2_i2c_new_subdev_board(&vou_dev->v4l2_dev, i2c_adap,
+ vou_pdata->module_name, vou_pdata->board_info, NULL);
+ if (!subdev) {
+ ret = -ENOMEM;
+ goto ei2cnd;
+ }
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret < 0)
+ goto evregdev;
+
+ return 0;
+
+evregdev:
+ei2cnd:
+ereset:
+ i2c_put_adapter(i2c_adap);
+ei2cgadap:
+ video_device_release(vdev);
+ pm_runtime_disable(&pdev->dev);
+evdevalloc:
+ v4l2_device_unregister(&vou_dev->v4l2_dev);
+ev4l2devreg:
+ free_irq(irq, vou_dev);
+ereqirq:
+ iounmap(vou_dev->base);
+emap:
+ release_mem_region(reg_res->start, resource_size(reg_res));
+ereqmemreg:
+ kfree(vou_dev);
+ return ret;
+}
+
+static int __devexit sh_vou_remove(struct platform_device *pdev)
+{
+ int irq = platform_get_irq(pdev, 0);
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct sh_vou_device *vou_dev = container_of(v4l2_dev,
+ struct sh_vou_device, v4l2_dev);
+ struct v4l2_subdev *sd = list_entry(v4l2_dev->subdevs.next,
+ struct v4l2_subdev, list);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct resource *reg_res;
+
+ if (irq > 0)
+ free_irq(irq, vou_dev);
+ pm_runtime_disable(&pdev->dev);
+ video_unregister_device(vou_dev->vdev);
+ i2c_put_adapter(client->adapter);
+ v4l2_device_unregister(&vou_dev->v4l2_dev);
+ iounmap(vou_dev->base);
+ reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (reg_res)
+ release_mem_region(reg_res->start, resource_size(reg_res));
+ kfree(vou_dev);
+ return 0;
+}
+
+static struct platform_driver __refdata sh_vou = {
+ .remove = __devexit_p(sh_vou_remove),
+ .driver = {
+ .name = "sh-vou",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init sh_vou_init(void)
+{
+ return platform_driver_probe(&sh_vou, sh_vou_probe);
+}
+
+static void __exit sh_vou_exit(void)
+{
+ platform_driver_unregister(&sh_vou);
+}
+
+module_init(sh_vou_init);
+module_exit(sh_vou_exit);
+
+MODULE_DESCRIPTION("SuperH VOU driver");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sh-vou");
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index cbf8087b286..28e19daadec 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -2295,7 +2295,7 @@ sn9c102_vidioc_s_ctrl(struct sn9c102_device* cam, void __user * arg)
if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
return -EFAULT;
- for (i = 0; i < ARRAY_SIZE(s->qctrl); i++)
+ for (i = 0; i < ARRAY_SIZE(s->qctrl); i++) {
if (ctrl.id == s->qctrl[i].id) {
if (s->qctrl[i].flags & V4L2_CTRL_FLAG_DISABLED)
return -EINVAL;
@@ -2305,7 +2305,9 @@ sn9c102_vidioc_s_ctrl(struct sn9c102_device* cam, void __user * arg)
ctrl.value -= ctrl.value % s->qctrl[i].step;
break;
}
-
+ }
+ if (i == ARRAY_SIZE(s->qctrl))
+ return -EINVAL;
if ((err = s->set_ctrl(cam, &ctrl)))
return err;
diff --git a/drivers/media/video/sn9c102/sn9c102_devtable.h b/drivers/media/video/sn9c102/sn9c102_devtable.h
index cc40d6ba9f2..522ba3f4c28 100644
--- a/drivers/media/video/sn9c102/sn9c102_devtable.h
+++ b/drivers/media/video/sn9c102/sn9c102_devtable.h
@@ -40,12 +40,12 @@ struct sn9c102_device;
static const struct usb_device_id sn9c102_id_table[] = {
/* SN9C101 and SN9C102 */
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x6001, BRIDGE_SN9C102), },
{ SN9C102_USB_DEVICE(0x0c45, 0x6005, BRIDGE_SN9C102), },
#endif
{ SN9C102_USB_DEVICE(0x0c45, 0x6007, BRIDGE_SN9C102), },
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x6009, BRIDGE_SN9C102), },
{ SN9C102_USB_DEVICE(0x0c45, 0x600d, BRIDGE_SN9C102), },
/* { SN9C102_USB_DEVICE(0x0c45, 0x6011, BRIDGE_SN9C102), }, OV6650 */
@@ -53,13 +53,13 @@ static const struct usb_device_id sn9c102_id_table[] = {
{ SN9C102_USB_DEVICE(0x0c45, 0x6019, BRIDGE_SN9C102), },
{ SN9C102_USB_DEVICE(0x0c45, 0x6024, BRIDGE_SN9C102), },
{ SN9C102_USB_DEVICE(0x0c45, 0x6025, BRIDGE_SN9C102), },
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x6028, BRIDGE_SN9C102), },
{ SN9C102_USB_DEVICE(0x0c45, 0x6029, BRIDGE_SN9C102), },
#endif
{ SN9C102_USB_DEVICE(0x0c45, 0x602a, BRIDGE_SN9C102), },
{ SN9C102_USB_DEVICE(0x0c45, 0x602b, BRIDGE_SN9C102), },
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x602c, BRIDGE_SN9C102), },
/* { SN9C102_USB_DEVICE(0x0c45, 0x602d, BRIDGE_SN9C102), }, HV7131R */
#endif
@@ -74,7 +74,7 @@ static const struct usb_device_id sn9c102_id_table[] = {
{ SN9C102_USB_DEVICE(0x0c45, 0x608b, BRIDGE_SN9C103), },
{ SN9C102_USB_DEVICE(0x0c45, 0x608c, BRIDGE_SN9C103), },
/* { SN9C102_USB_DEVICE(0x0c45, 0x608e, BRIDGE_SN9C103), }, CISVF10 */
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x608f, BRIDGE_SN9C103), },
#endif
{ SN9C102_USB_DEVICE(0x0c45, 0x60a0, BRIDGE_SN9C103), },
@@ -86,7 +86,7 @@ static const struct usb_device_id sn9c102_id_table[] = {
{ SN9C102_USB_DEVICE(0x0c45, 0x60ac, BRIDGE_SN9C103), },
{ SN9C102_USB_DEVICE(0x0c45, 0x60ae, BRIDGE_SN9C103), },
{ SN9C102_USB_DEVICE(0x0c45, 0x60af, BRIDGE_SN9C103), },
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x60b0, BRIDGE_SN9C103), },
#endif
{ SN9C102_USB_DEVICE(0x0c45, 0x60b2, BRIDGE_SN9C103), },
@@ -97,7 +97,7 @@ static const struct usb_device_id sn9c102_id_table[] = {
{ SN9C102_USB_DEVICE(0x0c45, 0x60bc, BRIDGE_SN9C103), },
{ SN9C102_USB_DEVICE(0x0c45, 0x60be, BRIDGE_SN9C103), },
/* SN9C105 */
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
{ SN9C102_USB_DEVICE(0x045e, 0x00f5, BRIDGE_SN9C105), },
{ SN9C102_USB_DEVICE(0x045e, 0x00f7, BRIDGE_SN9C105), },
{ SN9C102_USB_DEVICE(0x0471, 0x0327, BRIDGE_SN9C105), },
@@ -121,11 +121,11 @@ static const struct usb_device_id sn9c102_id_table[] = {
{ SN9C102_USB_DEVICE(0x0c45, 0x610f, BRIDGE_SN9C120), },
{ SN9C102_USB_DEVICE(0x0c45, 0x6130, BRIDGE_SN9C120), },
/* { SN9C102_USB_DEVICE(0x0c45, 0x6138, BRIDGE_SN9C120), }, MO8000 */
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x613a, BRIDGE_SN9C120), },
#endif
{ SN9C102_USB_DEVICE(0x0c45, 0x613b, BRIDGE_SN9C120), },
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x613c, BRIDGE_SN9C120), },
{ SN9C102_USB_DEVICE(0x0c45, 0x613e, BRIDGE_SN9C120), },
#endif
diff --git a/drivers/media/video/sn9c102/sn9c102_hv7131d.c b/drivers/media/video/sn9c102/sn9c102_hv7131d.c
index db243494893..2dce5c908c8 100644
--- a/drivers/media/video/sn9c102/sn9c102_hv7131d.c
+++ b/drivers/media/video/sn9c102/sn9c102_hv7131d.c
@@ -255,7 +255,7 @@ int sn9c102_probe_hv7131d(struct sn9c102_device* cam)
if (err || r0 < 0 || r1 < 0)
return -EIO;
- if (r0 != 0x00 || r1 != 0x04)
+ if ((r0 != 0x00 && r0 != 0x01) || r1 != 0x04)
return -ENODEV;
sn9c102_attach_sensor(cam, &hv7131d);
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index a24174ddec4..db1ca0e90d7 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <linux/vmalloc.h>
#include <media/soc_camera.h>
@@ -388,6 +389,11 @@ static int soc_camera_open(struct file *file)
goto eiciadd;
}
+ pm_runtime_enable(&icd->vdev->dev);
+ ret = pm_runtime_resume(&icd->vdev->dev);
+ if (ret < 0 && ret != -ENOSYS)
+ goto eresume;
+
/*
* Try to configure with default parameters. Notice: this is the
* very first open, so, we cannot race against other calls,
@@ -409,10 +415,12 @@ static int soc_camera_open(struct file *file)
return 0;
/*
- * First five errors are entered with the .video_lock held
+ * First four errors are entered with the .video_lock held
* and use_count == 1
*/
esfmt:
+ pm_runtime_disable(&icd->vdev->dev);
+eresume:
ici->ops->remove(icd);
eiciadd:
if (icl->power)
@@ -437,7 +445,11 @@ static int soc_camera_close(struct file *file)
if (!icd->use_count) {
struct soc_camera_link *icl = to_soc_camera_link(icd);
+ pm_runtime_suspend(&icd->vdev->dev);
+ pm_runtime_disable(&icd->vdev->dev);
+
ici->ops->remove(icd);
+
if (icl->power)
icl->power(icd->pdev, 0);
}
@@ -741,8 +753,7 @@ static int soc_camera_g_crop(struct file *file, void *fh,
/*
* According to the V4L2 API, drivers shall not update the struct v4l2_crop
* argument with the actual geometry, instead, the user shall use G_CROP to
- * retrieve it. However, we expect camera host and client drivers to update
- * the argument, which we then use internally, but do not return to the user.
+ * retrieve it.
*/
static int soc_camera_s_crop(struct file *file, void *fh,
struct v4l2_crop *a)
@@ -1319,6 +1330,7 @@ static int video_dev_create(struct soc_camera_device *icd)
*/
static int soc_camera_video_start(struct soc_camera_device *icd)
{
+ struct device_type *type = icd->vdev->dev.type;
int ret;
if (!icd->dev.parent)
@@ -1335,6 +1347,9 @@ static int soc_camera_video_start(struct soc_camera_device *icd)
return ret;
}
+ /* Restore device type, possibly set by the subdevice driver */
+ icd->vdev->dev.type = type;
+
return 0;
}
diff --git a/drivers/media/video/tlg2300/pd-dvb.c b/drivers/media/video/tlg2300/pd-dvb.c
index ebd9cb5bec7..edd78f8b1ba 100644
--- a/drivers/media/video/tlg2300/pd-dvb.c
+++ b/drivers/media/video/tlg2300/pd-dvb.c
@@ -97,15 +97,17 @@ open_out:
return ret;
}
+#ifdef CONFIG_PM
static void poseidon_fe_release(struct dvb_frontend *fe)
{
struct poseidon *pd = fe->demodulator_priv;
-#ifdef CONFIG_PM
pd->pm_suspend = NULL;
pd->pm_resume = NULL;
-#endif
}
+#else
+#define poseidon_fe_release NULL
+#endif
static s32 poseidon_fe_sleep(struct dvb_frontend *fe)
{
diff --git a/drivers/media/video/tlg2300/pd-main.c b/drivers/media/video/tlg2300/pd-main.c
index 2cf0ebf9f28..256cc558ba1 100644
--- a/drivers/media/video/tlg2300/pd-main.c
+++ b/drivers/media/video/tlg2300/pd-main.c
@@ -24,7 +24,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -55,8 +54,8 @@ int debug_mode;
module_param(debug_mode, int, 0644);
MODULE_PARM_DESC(debug_mode, "0 = disable, 1 = enable, 2 = verbose");
-const char *firmware_name = "tlg2300_firmware.bin";
-struct usb_driver poseidon_driver;
+static const char *firmware_name = "tlg2300_firmware.bin";
+static struct usb_driver poseidon_driver;
static LIST_HEAD(pd_device_list);
/*
@@ -455,8 +454,8 @@ static int poseidon_probe(struct usb_interface *interface,
device_init_wakeup(&udev->dev, 1);
#ifdef CONFIG_PM
- pd->udev->autosuspend_disabled = 0;
pd->udev->autosuspend_delay = HZ * PM_SUSPEND_DELAY;
+ usb_enable_autosuspend(pd->udev);
if (in_hibernation(pd)) {
INIT_WORK(&pd->pm_work, hibernation_resume);
@@ -501,7 +500,7 @@ static void poseidon_disconnect(struct usb_interface *interface)
kref_put(&pd->kref, poseidon_delete);
}
-struct usb_driver poseidon_driver = {
+static struct usb_driver poseidon_driver = {
.name = "poseidon",
.probe = poseidon_probe,
.disconnect = poseidon_disconnect,
diff --git a/drivers/media/video/tlg2300/pd-radio.c b/drivers/media/video/tlg2300/pd-radio.c
index 755766b1515..fae84c2a0c3 100644
--- a/drivers/media/video/tlg2300/pd-radio.c
+++ b/drivers/media/video/tlg2300/pd-radio.c
@@ -161,7 +161,8 @@ static const struct v4l2_file_operations poseidon_fm_fops = {
.ioctl = video_ioctl2,
};
-int tlg_fm_vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt)
+static int tlg_fm_vidioc_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *vt)
{
struct tuner_fm_sig_stat_s fm_stat = {};
int ret, status, count = 5;
@@ -203,7 +204,8 @@ int tlg_fm_vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt)
return 0;
}
-int fm_get_freq(struct file *file, void *priv, struct v4l2_frequency *argp)
+static int fm_get_freq(struct file *file, void *priv,
+ struct v4l2_frequency *argp)
{
struct poseidon *p = file->private_data;
@@ -246,7 +248,8 @@ error:
return ret;
}
-int fm_set_freq(struct file *file, void *priv, struct v4l2_frequency *argp)
+static int fm_set_freq(struct file *file, void *priv,
+ struct v4l2_frequency *argp)
{
struct poseidon *p = file->private_data;
@@ -258,13 +261,13 @@ int fm_set_freq(struct file *file, void *priv, struct v4l2_frequency *argp)
return set_frequency(p, argp->frequency);
}
-int tlg_fm_vidioc_g_ctrl(struct file *file, void *priv,
+static int tlg_fm_vidioc_g_ctrl(struct file *file, void *priv,
struct v4l2_control *arg)
{
return 0;
}
-int tlg_fm_vidioc_g_exts_ctrl(struct file *file, void *fh,
+static int tlg_fm_vidioc_g_exts_ctrl(struct file *file, void *fh,
struct v4l2_ext_controls *ctrls)
{
struct poseidon *p = file->private_data;
@@ -285,7 +288,7 @@ int tlg_fm_vidioc_g_exts_ctrl(struct file *file, void *fh,
return 0;
}
-int tlg_fm_vidioc_s_exts_ctrl(struct file *file, void *fh,
+static int tlg_fm_vidioc_s_exts_ctrl(struct file *file, void *fh,
struct v4l2_ext_controls *ctrls)
{
int i;
@@ -312,13 +315,13 @@ int tlg_fm_vidioc_s_exts_ctrl(struct file *file, void *fh,
return 0;
}
-int tlg_fm_vidioc_s_ctrl(struct file *file, void *priv,
+static int tlg_fm_vidioc_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
return 0;
}
-int tlg_fm_vidioc_queryctrl(struct file *file, void *priv,
+static int tlg_fm_vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *ctrl)
{
if (!(ctrl->id & V4L2_CTRL_FLAG_NEXT_CTRL))
@@ -337,7 +340,7 @@ int tlg_fm_vidioc_queryctrl(struct file *file, void *priv,
return -EINVAL;
}
-int tlg_fm_vidioc_querymenu(struct file *file, void *fh,
+static int tlg_fm_vidioc_querymenu(struct file *file, void *fh,
struct v4l2_querymenu *qmenu)
{
return v4l2_ctrl_query_menu(qmenu, NULL, NULL);
diff --git a/drivers/media/video/tlg2300/pd-video.c b/drivers/media/video/tlg2300/pd-video.c
index cf8f18c007e..d0cc012f7ae 100644
--- a/drivers/media/video/tlg2300/pd-video.c
+++ b/drivers/media/video/tlg2300/pd-video.c
@@ -12,11 +12,13 @@
#include "pd-common.h"
#include "vendorcmds.h"
+#ifdef CONFIG_PM
static int pm_video_suspend(struct poseidon *pd);
static int pm_video_resume(struct poseidon *pd);
+#endif
static void iso_bubble_handler(struct work_struct *w);
-int usb_transfer_mode;
+static int usb_transfer_mode;
module_param(usb_transfer_mode, int, 0644);
MODULE_PARM_DESC(usb_transfer_mode, "0 = Bulk, 1 = Isochronous");
@@ -476,10 +478,10 @@ static int prepare_iso_urb(struct video_data *video)
goto out;
video->urb_array[i] = urb;
- mem = usb_buffer_alloc(udev,
- ISO_PKT_SIZE * PK_PER_URB,
- GFP_KERNEL,
- &urb->transfer_dma);
+ mem = usb_alloc_coherent(udev,
+ ISO_PKT_SIZE * PK_PER_URB,
+ GFP_KERNEL,
+ &urb->transfer_dma);
urb->complete = urb_complete_iso; /* handler */
urb->dev = udev;
@@ -519,8 +521,8 @@ int alloc_bulk_urbs_generic(struct urb **urb_array, int num,
if (urb == NULL)
return i;
- mem = usb_buffer_alloc(udev, buf_size, gfp_flags,
- &urb->transfer_dma);
+ mem = usb_alloc_coherent(udev, buf_size, gfp_flags,
+ &urb->transfer_dma);
if (mem == NULL)
return i;
@@ -540,7 +542,7 @@ void free_all_urb_generic(struct urb **urb_array, int num)
for (i = 0; i < num; i++) {
urb = urb_array[i];
if (urb) {
- usb_buffer_free(urb->dev,
+ usb_free_coherent(urb->dev,
urb->transfer_buffer_length,
urb->transfer_buffer,
urb->transfer_dma);
@@ -617,7 +619,7 @@ static int pd_buf_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
return 0;
}
-int fire_all_urb(struct video_data *video)
+static int fire_all_urb(struct video_data *video)
{
int i, ret;
@@ -877,7 +879,7 @@ out:
return ret;
}
-int vidioc_s_std(struct file *file, void *fh, v4l2_std_id *norm)
+static int vidioc_s_std(struct file *file, void *fh, v4l2_std_id *norm)
{
struct front_face *front = fh;
logs(front);
@@ -1020,7 +1022,7 @@ static int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *a)
return 0;
}
-int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *a)
+static int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *a)
{
a->index = 0;
a->capability = V4L2_AUDCAP_STEREO;
@@ -1029,7 +1031,7 @@ int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *a)
return 0;
}
-int vidioc_s_audio(struct file *file, void *fh, struct v4l2_audio *a)
+static int vidioc_s_audio(struct file *file, void *fh, struct v4l2_audio *a)
{
return (0 == a->index) ? 0 : -EINVAL;
}
@@ -1189,7 +1191,7 @@ static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
}
/* Just stop the URBs, do not free the URBs */
-int usb_transfer_stop(struct video_data *video)
+static int usb_transfer_stop(struct video_data *video)
{
if (video->is_streaming) {
int i;
@@ -1518,13 +1520,13 @@ static int pd_video_mmap(struct file *file, struct vm_area_struct *vma)
return videobuf_mmap_mapper(&front->q, vma);
}
-unsigned int pd_video_poll(struct file *file, poll_table *table)
+static unsigned int pd_video_poll(struct file *file, poll_table *table)
{
struct front_face *front = file->private_data;
return videobuf_poll_stream(file, &front->q, table);
}
-ssize_t pd_video_read(struct file *file, char __user *buffer,
+static ssize_t pd_video_read(struct file *file, char __user *buffer,
size_t count, loff_t *ppos)
{
struct front_face *front = file->private_data;
diff --git a/drivers/media/video/tvp514x.c b/drivers/media/video/tvp514x.c
index e4815a1806e..e826114b7fb 100644
--- a/drivers/media/video/tvp514x.c
+++ b/drivers/media/video/tvp514x.c
@@ -79,6 +79,8 @@ struct tvp514x_std_info {
};
static struct tvp514x_reg tvp514x_reg_list_default[0x40];
+
+static int tvp514x_s_stream(struct v4l2_subdev *sd, int enable);
/**
* struct tvp514x_decoder - TVP5146/47 decoder object
* @sd: Subdevice Slave handle
@@ -644,6 +646,17 @@ static int tvp514x_s_routing(struct v4l2_subdev *sd,
/* Index out of bound */
return -EINVAL;
+ /*
+ * For the sequence streamon -> streamoff and again s_input
+ * it fails to lock the signal, since streamoff puts TVP514x
+ * into power off state which leads to failure in sub-sequent s_input.
+ *
+ * So power up the TVP514x device here, since it is important to lock
+ * the signal at this stage.
+ */
+ if (!decoder->streaming)
+ tvp514x_s_stream(sd, 1);
+
input_sel = input;
output_sel = output;
diff --git a/drivers/media/video/tvp5150.c b/drivers/media/video/tvp5150.c
index 908ffb68e92..47f0582d50a 100644
--- a/drivers/media/video/tvp5150.c
+++ b/drivers/media/video/tvp5150.c
@@ -891,29 +891,26 @@ static int tvp5150_s_routing(struct v4l2_subdev *sd,
return 0;
}
-static int tvp5150_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+static int tvp5150_s_raw_fmt(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt)
+{
+ /* this is for capturing 36 raw vbi lines
+ if there's a way to cut off the beginning 2 vbi lines
+ with the tvp5150 then the vbi line count could be lowered
+ to 17 lines/field again, although I couldn't find a register
+ which could do that cropping */
+ if (fmt->sample_format == V4L2_PIX_FMT_GREY)
+ tvp5150_write(sd, TVP5150_LUMA_PROC_CTL_1, 0x70);
+ if (fmt->count[0] == 18 && fmt->count[1] == 18) {
+ tvp5150_write(sd, TVP5150_VERT_BLANKING_START, 0x00);
+ tvp5150_write(sd, TVP5150_VERT_BLANKING_STOP, 0x01);
+ }
+ return 0;
+}
+
+static int tvp5150_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *svbi)
{
- struct v4l2_sliced_vbi_format *svbi;
int i;
- /* raw vbi */
- if (fmt->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- /* this is for capturing 36 raw vbi lines
- if there's a way to cut off the beginning 2 vbi lines
- with the tvp5150 then the vbi line count could be lowered
- to 17 lines/field again, although I couldn't find a register
- which could do that cropping */
- if (fmt->fmt.vbi.sample_format == V4L2_PIX_FMT_GREY)
- tvp5150_write(sd, TVP5150_LUMA_PROC_CTL_1, 0x70);
- if (fmt->fmt.vbi.count[0] == 18 && fmt->fmt.vbi.count[1] == 18) {
- tvp5150_write(sd, TVP5150_VERT_BLANKING_START, 0x00);
- tvp5150_write(sd, TVP5150_VERT_BLANKING_STOP, 0x01);
- }
- return 0;
- }
- if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
- return -EINVAL;
- svbi = &fmt->fmt.sliced;
if (svbi->service_set != 0) {
for (i = 0; i <= 23; i++) {
svbi->service_lines[1][i] = 0;
@@ -937,14 +934,21 @@ static int tvp5150_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
return 0;
}
-static int tvp5150_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+static int tvp5150_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
{
- struct v4l2_sliced_vbi_format *svbi;
- int i, mask = 0;
+ switch (fmt->type) {
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+ return tvp5150_s_sliced_fmt(sd, &fmt->fmt.sliced);
- if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
+ default:
return -EINVAL;
- svbi = &fmt->fmt.sliced;
+ }
+}
+
+static int tvp5150_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *svbi)
+{
+ int i, mask = 0;
+
memset(svbi, 0, sizeof(*svbi));
for (i = 0; i <= 23; i++) {
@@ -956,6 +960,12 @@ static int tvp5150_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
return 0;
}
+static int tvp5150_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+{
+ if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
+ return -EINVAL;
+ return tvp5150_g_sliced_fmt(sd, &fmt->fmt.sliced);
+}
static int tvp5150_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *chip)
@@ -1046,13 +1056,20 @@ static const struct v4l2_subdev_video_ops tvp5150_video_ops = {
.s_routing = tvp5150_s_routing,
.g_fmt = tvp5150_g_fmt,
.s_fmt = tvp5150_s_fmt,
+};
+
+static const struct v4l2_subdev_vbi_ops tvp5150_vbi_ops = {
.g_sliced_vbi_cap = tvp5150_g_sliced_vbi_cap,
+ .g_sliced_fmt = tvp5150_g_sliced_fmt,
+ .s_sliced_fmt = tvp5150_s_sliced_fmt,
+ .s_raw_fmt = tvp5150_s_raw_fmt,
};
static const struct v4l2_subdev_ops tvp5150_ops = {
.core = &tvp5150_core_ops,
.tuner = &tvp5150_tuner_ops,
.video = &tvp5150_video_ops,
+ .vbi = &tvp5150_vbi_ops,
};
diff --git a/drivers/media/video/tvp7002.c b/drivers/media/video/tvp7002.c
index 4a69bcc738f..8085ac39244 100644
--- a/drivers/media/video/tvp7002.c
+++ b/drivers/media/video/tvp7002.c
@@ -458,7 +458,7 @@ static inline struct tvp7002 *to_tvp7002(struct v4l2_subdev *sd)
/*
* tvp7002_read - Read a value from a register in an TVP7002
* @sd: ptr to v4l2_subdev struct
- * @reg: TVP7002 register address
+ * @addr: TVP7002 register address
* @dst: pointer to 8-bit destination
*
* Returns value read if successful, or non-zero (-1) otherwise.
@@ -488,7 +488,7 @@ static int tvp7002_read(struct v4l2_subdev *sd, u8 addr, u8 *dst)
* @sd: pointer to standard V4L2 sub-device structure
* @reg: destination register
* @val: value to be read
- * @error: pointer to error value
+ * @err: pointer to error value
*
* Read a value in a register and save error value in pointer.
* Also update the register table if successful
@@ -535,7 +535,7 @@ static int tvp7002_write(struct v4l2_subdev *sd, u8 addr, u8 value)
* @sd: pointer to standard V4L2 sub-device structure
* @reg: destination register
* @val: value to be written
- * @error: pointer to error value
+ * @err: pointer to error value
*
* Write a value in a register and save error value in pointer.
* Also update the register table if successful
@@ -596,7 +596,7 @@ static int tvp7002_write_inittab(struct v4l2_subdev *sd,
/*
* tvp7002_s_dv_preset() - Set digital video preset
* @sd: ptr to v4l2_subdev struct
- * @std: ptr to v4l2_dv_preset struct
+ * @dv_preset: ptr to v4l2_dv_preset struct
*
* Set the digital video preset for a TVP7002 decoder device.
* Returns zero when successful or -EINVAL if register access fails.
@@ -676,7 +676,7 @@ static int tvp7002_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
/*
* tvp7002_queryctrl() - Query a control
* @sd: ptr to v4l2_subdev struct
- * @ctrl: ptr to v4l2_queryctrl struct
+ * @qc: ptr to v4l2_queryctrl struct
*
* Query a control of a TVP7002 decoder device.
* Returns zero when successful or -EINVAL if register read fails.
@@ -776,7 +776,7 @@ static int tvp7002_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
/*
* tvp7002_query_dv_preset() - query DV preset
* @sd: pointer to standard V4L2 sub-device structure
- * @std_id: standard V4L2 v4l2_dv_preset
+ * @qpreset: standard V4L2 v4l2_dv_preset structure
*
* Returns the current DV preset by TVP7002. If no active input is
* detected, returns -EINVAL
@@ -785,7 +785,6 @@ static int tvp7002_query_dv_preset(struct v4l2_subdev *sd,
struct v4l2_dv_preset *qpreset)
{
const struct tvp7002_preset_definition *presets = tvp7002_presets;
- struct v4l2_dv_enum_preset e_preset;
struct tvp7002 *device;
u8 progressive;
u32 lpfr;
@@ -828,20 +827,18 @@ static int tvp7002_query_dv_preset(struct v4l2_subdev *sd,
}
if (index == NUM_PRESETS) {
- v4l2_err(sd, "querystd error, lpf = %x, cpl = %x\n",
+ v4l2_dbg(1, debug, sd, "detection failed: lpf = %x, cpl = %x\n",
lpfr, cpln);
- return -EINVAL;
+ /* Could not detect a signal, so return the 'invalid' preset */
+ qpreset->preset = V4L2_DV_INVALID;
+ return 0;
}
- if (v4l_fill_dv_preset_info(presets->preset, &e_preset))
- return -EINVAL;
-
/* Set values in found preset */
qpreset->preset = presets->preset;
/* Update lines per frame and clocks per line info */
- v4l2_dbg(1, debug, sd, "Current preset: %d %d",
- e_preset.width, e_preset.height);
+ v4l2_dbg(1, debug, sd, "detected preset: %d\n", presets->preset);
return 0;
}
@@ -849,7 +846,7 @@ static int tvp7002_query_dv_preset(struct v4l2_subdev *sd,
/*
* tvp7002_g_register() - Get the value of a register
* @sd: ptr to v4l2_subdev struct
- * @vreg: ptr to v4l2_dbg_register struct
+ * @reg: ptr to v4l2_dbg_register struct
*
* Get the value of a TVP7002 decoder device register.
* Returns zero when successful, -EINVAL if register read fails or
@@ -876,7 +873,7 @@ static int tvp7002_g_register(struct v4l2_subdev *sd,
/*
* tvp7002_s_register() - set a control
* @sd: ptr to v4l2_subdev struct
- * @ctrl: ptr to v4l2_control struct
+ * @reg: ptr to v4l2_dbg_register struct
*
* Get the value of a TVP7002 decoder device register.
* Returns zero when successful, -EINVAL if register read fails or
@@ -899,7 +896,7 @@ static int tvp7002_s_register(struct v4l2_subdev *sd,
/*
* tvp7002_enum_fmt() - Enum supported formats
* @sd: pointer to standard V4L2 sub-device structure
- * @enable: pointer to format struct
+ * @fmtdesc: pointer to format struct
*
* Enumerate supported formats.
*/
@@ -994,6 +991,23 @@ static int tvp7002_log_status(struct v4l2_subdev *sd)
return 0;
}
+/*
+ * tvp7002_enum_dv_presets() - Enum supported digital video formats
+ * @sd: pointer to standard V4L2 sub-device structure
+ * @preset: pointer to format struct
+ *
+ * Enumerate supported digital video formats.
+ */
+static int tvp7002_enum_dv_presets(struct v4l2_subdev *sd,
+ struct v4l2_dv_enum_preset *preset)
+{
+ /* Check requested format index is within range */
+ if (preset->index >= NUM_PRESETS)
+ return -EINVAL;
+
+ return v4l_fill_dv_preset_info(tvp7002_presets[preset->index].preset, preset);
+}
+
/* V4L2 core operation handlers */
static const struct v4l2_subdev_core_ops tvp7002_core_ops = {
.g_chip_ident = tvp7002_g_chip_ident,
@@ -1009,6 +1023,7 @@ static const struct v4l2_subdev_core_ops tvp7002_core_ops = {
/* Specific video subsystem operation handlers */
static const struct v4l2_subdev_video_ops tvp7002_video_ops = {
+ .enum_dv_presets = tvp7002_enum_dv_presets,
.s_dv_preset = tvp7002_s_dv_preset,
.query_dv_preset = tvp7002_query_dv_preset,
.s_stream = tvp7002_s_stream,
@@ -1042,8 +1057,8 @@ static struct tvp7002 tvp7002_dev = {
/*
* tvp7002_probe - Probe a TVP7002 device
- * @sd: ptr to v4l2_subdev struct
- * @ctrl: ptr to i2c_device_id struct
+ * @c: ptr to i2c_client struct
+ * @id: ptr to i2c_device_id struct
*
* Initialize the TVP7002 device
* Returns zero when successful, -EINVAL if register read fails or
diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
index fab48ec6c0e..fbd665fa197 100644
--- a/drivers/media/video/usbvideo/quickcam_messenger.c
+++ b/drivers/media/video/usbvideo/quickcam_messenger.c
@@ -693,12 +693,13 @@ static int qcm_start_data(struct uvd *uvd)
static void qcm_stop_data(struct uvd *uvd)
{
- struct qcm *cam = (struct qcm *) uvd->user_data;
+ struct qcm *cam;
int i, j;
int ret;
if ((uvd == NULL) || (!uvd->streaming) || (uvd->dev == NULL))
return;
+ cam = (struct qcm *) uvd->user_data;
ret = qcm_camera_off(uvd);
if (ret)
diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
index f7aae229375..b9dd74fde21 100644
--- a/drivers/media/video/usbvision/usbvision-core.c
+++ b/drivers/media/video/usbvision/usbvision-core.c
@@ -2493,10 +2493,10 @@ int usbvision_init_isoc(struct usb_usbvision *usbvision)
}
usbvision->sbuf[bufIdx].urb = urb;
usbvision->sbuf[bufIdx].data =
- usb_buffer_alloc(usbvision->dev,
- sb_size,
- GFP_KERNEL,
- &urb->transfer_dma);
+ usb_alloc_coherent(usbvision->dev,
+ sb_size,
+ GFP_KERNEL,
+ &urb->transfer_dma);
urb->dev = dev;
urb->context = usbvision;
urb->pipe = usb_rcvisocpipe(dev, usbvision->video_endp);
@@ -2552,10 +2552,10 @@ void usbvision_stop_isoc(struct usb_usbvision *usbvision)
for (bufIdx = 0; bufIdx < USBVISION_NUMSBUF; bufIdx++) {
usb_kill_urb(usbvision->sbuf[bufIdx].urb);
if (usbvision->sbuf[bufIdx].data){
- usb_buffer_free(usbvision->dev,
- sb_size,
- usbvision->sbuf[bufIdx].data,
- usbvision->sbuf[bufIdx].urb->transfer_dma);
+ usb_free_coherent(usbvision->dev,
+ sb_size,
+ usbvision->sbuf[bufIdx].data,
+ usbvision->sbuf[bufIdx].urb->transfer_dma);
}
usb_free_urb(usbvision->sbuf[bufIdx].urb);
usbvision->sbuf[bufIdx].urb = NULL;
diff --git a/drivers/media/video/usbvision/usbvision-i2c.c b/drivers/media/video/usbvision/usbvision-i2c.c
index 083765238a6..42ba2878575 100644
--- a/drivers/media/video/usbvision/usbvision-i2c.c
+++ b/drivers/media/video/usbvision/usbvision-i2c.c
@@ -244,6 +244,9 @@ int usbvision_i2c_register(struct usb_usbvision *usbvision)
switch (usbvision_device_data[usbvision->DevModel].Codec) {
case CODEC_SAA7113:
case CODEC_SAA7111:
+ /* Without this delay the detection of the saa711x is
+ hit-and-miss. */
+ mdelay(10);
v4l2_i2c_new_subdev(&usbvision->v4l2_dev,
&usbvision->i2c_adap, "saa7115",
"saa7115_auto", 0, saa711x_addrs);
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index 7c17ec63c5d..6248a639ba2 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -137,8 +137,6 @@ static int PowerOnAtOpen = 1;
static int video_nr = -1;
/* Sequential Number of Radio Device */
static int radio_nr = -1;
-/* Sequential Number of VBI Device */
-static int vbi_nr = -1;
/* Grab parameters for the device driver */
@@ -148,14 +146,12 @@ module_param(video_debug, int, 0444);
module_param(PowerOnAtOpen, int, 0444);
module_param(video_nr, int, 0444);
module_param(radio_nr, int, 0444);
-module_param(vbi_nr, int, 0444);
MODULE_PARM_DESC(isocMode, " Set the default format for ISOC endpoint. Default: 0x60 (Compression On)");
MODULE_PARM_DESC(video_debug, " Set the default Debug Mode of the device driver. Default: 0 (Off)");
MODULE_PARM_DESC(PowerOnAtOpen, " Set the default device to power on when device is opened. Default: 1 (On)");
MODULE_PARM_DESC(video_nr, "Set video device number (/dev/videoX). Default: -1 (autodetect)");
MODULE_PARM_DESC(radio_nr, "Set radio device number (/dev/radioX). Default: -1 (autodetect)");
-MODULE_PARM_DESC(vbi_nr, "Set vbi device number (/dev/vbiX). Default: -1 (autodetect)");
// Misc stuff
@@ -1244,36 +1240,6 @@ static int usbvision_radio_close(struct file *file)
return errCode;
}
-/*
- * Here comes the stuff for vbi on usbvision based devices
- *
- */
-static int usbvision_vbi_open(struct file *file)
-{
- /* TODO */
- return -ENODEV;
-}
-
-static int usbvision_vbi_close(struct file *file)
-{
- /* TODO */
- return -ENODEV;
-}
-
-static long usbvision_do_vbi_ioctl(struct file *file,
- unsigned int cmd, void *arg)
-{
- /* TODO */
- return -ENOIOCTLCMD;
-}
-
-static long usbvision_vbi_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- return video_usercopy(file, cmd, arg, usbvision_do_vbi_ioctl);
-}
-
-
//
// Video registration stuff
//
@@ -1367,21 +1333,6 @@ static struct video_device usbvision_radio_template = {
.current_norm = V4L2_STD_PAL
};
-// vbi template
-static const struct v4l2_file_operations usbvision_vbi_fops = {
- .owner = THIS_MODULE,
- .open = usbvision_vbi_open,
- .release = usbvision_vbi_close,
- .ioctl = usbvision_vbi_ioctl,
-};
-
-static struct video_device usbvision_vbi_template=
-{
- .fops = &usbvision_vbi_fops,
- .release = video_device_release,
- .name = "usbvision-vbi",
-};
-
static struct video_device *usbvision_vdev_init(struct usb_usbvision *usbvision,
struct video_device *vdev_template,
@@ -1410,18 +1361,6 @@ static struct video_device *usbvision_vdev_init(struct usb_usbvision *usbvision,
// unregister video4linux devices
static void usbvision_unregister_video(struct usb_usbvision *usbvision)
{
- // vbi Device:
- if (usbvision->vbi) {
- PDEBUG(DBG_PROBE, "unregister %s [v4l2]",
- video_device_node_name(usbvision->vbi));
- if (video_is_registered(usbvision->vbi)) {
- video_unregister_device(usbvision->vbi);
- } else {
- video_device_release(usbvision->vbi);
- }
- usbvision->vbi = NULL;
- }
-
// Radio Device:
if (usbvision->rdev) {
PDEBUG(DBG_PROBE, "unregister %s [v4l2]",
@@ -1482,22 +1421,6 @@ static int __devinit usbvision_register_video(struct usb_usbvision *usbvision)
printk(KERN_INFO "USBVision[%d]: registered USBVision Radio device %s [v4l2]\n",
usbvision->nr, video_device_node_name(usbvision->rdev));
}
- // vbi Device:
- if (usbvision_device_data[usbvision->DevModel].vbi) {
- usbvision->vbi = usbvision_vdev_init(usbvision,
- &usbvision_vbi_template,
- "USBVision VBI");
- if (usbvision->vbi == NULL) {
- goto err_exit;
- }
- if (video_register_device(usbvision->vbi,
- VFL_TYPE_VBI,
- vbi_nr)<0) {
- goto err_exit;
- }
- printk(KERN_INFO "USBVision[%d]: registered USBVision VBI device %s [v4l2] (Not Working Yet!)\n",
- usbvision->nr, video_device_node_name(usbvision->vbi));
- }
// all done
return 0;
@@ -1726,8 +1649,6 @@ static int __devinit usbvision_probe(struct usb_interface *intf,
usbvision_configure_video(usbvision);
mutex_unlock(&usbvision->lock);
-
- usb_set_intfdata (intf, usbvision);
usbvision_create_sysfs(usbvision->vdev);
PDEBUG(DBG_PROBE, "success");
@@ -1745,7 +1666,7 @@ static int __devinit usbvision_probe(struct usb_interface *intf,
*/
static void __devexit usbvision_disconnect(struct usb_interface *intf)
{
- struct usb_usbvision *usbvision = usb_get_intfdata(intf);
+ struct usb_usbvision *usbvision = to_usbvision(usb_get_intfdata(intf));
PDEBUG(DBG_PROBE, "");
@@ -1754,7 +1675,6 @@ static void __devexit usbvision_disconnect(struct usb_interface *intf)
"%s: usb_get_intfdata() failed\n", __func__);
return;
}
- usb_set_intfdata (intf, NULL);
mutex_lock(&usbvision->lock);
diff --git a/drivers/media/video/usbvision/usbvision.h b/drivers/media/video/usbvision/usbvision.h
index f8d7458daf3..d1b3cc0cd87 100644
--- a/drivers/media/video/usbvision/usbvision.h
+++ b/drivers/media/video/usbvision/usbvision.h
@@ -360,7 +360,6 @@ struct usb_usbvision {
struct v4l2_device v4l2_dev;
struct video_device *vdev; /* Video Device */
struct video_device *rdev; /* Radio Device */
- struct video_device *vbi; /* VBI Device */
/* i2c Declaration Section*/
struct i2c_adapter i2c_adap;
@@ -463,6 +462,11 @@ struct usb_usbvision {
int ComprBlockTypes[4];
};
+static inline struct usb_usbvision *to_usbvision(struct v4l2_device *v4l2_dev)
+{
+ return container_of(v4l2_dev, struct usb_usbvision, v4l2_dev);
+}
+
#define call_all(usbvision, o, f, args...) \
v4l2_device_call_all(&usbvision->v4l2_dev, 0, o, f, ##args)
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
index 6d3850b3716..aa0720af07a 100644
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -217,8 +217,7 @@ static struct uvc_control_info uvc_ctrls[] = {
.selector = UVC_CT_EXPOSURE_TIME_RELATIVE_CONTROL,
.index = 4,
.size = 1,
- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_CUR
- | UVC_CONTROL_RESTORE,
+ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_RESTORE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
@@ -233,8 +232,9 @@ static struct uvc_control_info uvc_ctrls[] = {
.selector = UVC_CT_FOCUS_RELATIVE_CONTROL,
.index = 6,
.size = 2,
- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
- | UVC_CONTROL_AUTO_UPDATE,
+ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_MIN
+ | UVC_CONTROL_GET_MAX | UVC_CONTROL_GET_RES
+ | UVC_CONTROL_GET_DEF | UVC_CONTROL_AUTO_UPDATE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
@@ -249,8 +249,7 @@ static struct uvc_control_info uvc_ctrls[] = {
.selector = UVC_CT_IRIS_RELATIVE_CONTROL,
.index = 8,
.size = 1,
- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_CUR
- | UVC_CONTROL_AUTO_UPDATE,
+ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_AUTO_UPDATE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
@@ -265,8 +264,9 @@ static struct uvc_control_info uvc_ctrls[] = {
.selector = UVC_CT_ZOOM_RELATIVE_CONTROL,
.index = 10,
.size = 3,
- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
- | UVC_CONTROL_AUTO_UPDATE,
+ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_MIN
+ | UVC_CONTROL_GET_MAX | UVC_CONTROL_GET_RES
+ | UVC_CONTROL_GET_DEF | UVC_CONTROL_AUTO_UPDATE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
@@ -281,8 +281,9 @@ static struct uvc_control_info uvc_ctrls[] = {
.selector = UVC_CT_PANTILT_RELATIVE_CONTROL,
.index = 12,
.size = 4,
- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
- | UVC_CONTROL_AUTO_UPDATE,
+ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_MIN
+ | UVC_CONTROL_GET_MAX | UVC_CONTROL_GET_RES
+ | UVC_CONTROL_GET_DEF | UVC_CONTROL_AUTO_UPDATE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
@@ -297,8 +298,9 @@ static struct uvc_control_info uvc_ctrls[] = {
.selector = UVC_CT_ROLL_RELATIVE_CONTROL,
.index = 14,
.size = 2,
- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
- | UVC_CONTROL_AUTO_UPDATE,
+ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_MIN
+ | UVC_CONTROL_GET_MAX | UVC_CONTROL_GET_RES
+ | UVC_CONTROL_GET_DEF | UVC_CONTROL_AUTO_UPDATE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
@@ -562,6 +564,26 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.data_type = UVC_CTRL_DATA_TYPE_BOOLEAN,
},
{
+ .id = V4L2_CID_IRIS_ABSOLUTE,
+ .name = "Iris, Absolute",
+ .entity = UVC_GUID_UVC_CAMERA,
+ .selector = UVC_CT_IRIS_ABSOLUTE_CONTROL,
+ .size = 16,
+ .offset = 0,
+ .v4l2_type = V4L2_CTRL_TYPE_INTEGER,
+ .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED,
+ },
+ {
+ .id = V4L2_CID_IRIS_RELATIVE,
+ .name = "Iris, Relative",
+ .entity = UVC_GUID_UVC_CAMERA,
+ .selector = UVC_CT_IRIS_RELATIVE_CONTROL,
+ .size = 8,
+ .offset = 0,
+ .v4l2_type = V4L2_CTRL_TYPE_INTEGER,
+ .data_type = UVC_CTRL_DATA_TYPE_SIGNED,
+ },
+ {
.id = V4L2_CID_ZOOM_ABSOLUTE,
.name = "Zoom, Absolute",
.entity = UVC_GUID_UVC_CAMERA,
@@ -822,6 +844,8 @@ int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
strlcpy(v4l2_ctrl->name, mapping->name, sizeof v4l2_ctrl->name);
v4l2_ctrl->flags = 0;
+ if (!(ctrl->info->flags & UVC_CONTROL_GET_CUR))
+ v4l2_ctrl->flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
if (!(ctrl->info->flags & UVC_CONTROL_SET_CUR))
v4l2_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
@@ -1047,6 +1071,8 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX));
step = mapping->get(mapping, UVC_GET_RES,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
+ if (step == 0)
+ step = 1;
xctrl->value = min + (xctrl->value - min + step/2) / step * step;
xctrl->value = clamp(xctrl->value, min, max);
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index 86ff8c12ea5..838b56f097c 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -91,11 +91,16 @@ static struct uvc_format_desc uvc_fmts[] = {
.fcc = V4L2_PIX_FMT_UYVY,
},
{
- .name = "Greyscale",
+ .name = "Greyscale (8-bit)",
.guid = UVC_GUID_FORMAT_Y800,
.fcc = V4L2_PIX_FMT_GREY,
},
{
+ .name = "Greyscale (16-bit)",
+ .guid = UVC_GUID_FORMAT_Y16,
+ .fcc = V4L2_PIX_FMT_Y16,
+ },
+ {
.name = "RGB Bayer",
.guid = UVC_GUID_FORMAT_BY8,
.fcc = V4L2_PIX_FMT_SBGGR8,
@@ -2105,6 +2110,15 @@ static struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_QUIRK_STREAM_NO_FID },
+ /* Syntek (Packard Bell EasyNote MX52 */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x174f,
+ .idProduct = 0x8a12,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_STREAM_NO_FID },
/* Syntek (Asus F9SG) */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
@@ -2169,6 +2183,15 @@ static struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_QUIRK_PROBE_MINMAX },
+ /* Arkmicro unbranded */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x18ec,
+ .idProduct = 0x3290,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_PROBE_DEF },
/* Bodelin ProScopeHR */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_DEV_HI
diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c
index 4a925a31b0e..133c78d113a 100644
--- a/drivers/media/video/uvc/uvc_queue.c
+++ b/drivers/media/video/uvc/uvc_queue.c
@@ -388,8 +388,12 @@ unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
poll_wait(file, &buf->wait, wait);
if (buf->state == UVC_BUF_STATE_DONE ||
- buf->state == UVC_BUF_STATE_ERROR)
- mask |= POLLIN | POLLRDNORM;
+ buf->state == UVC_BUF_STATE_ERROR) {
+ if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ mask |= POLLIN | POLLRDNORM;
+ else
+ mask |= POLLOUT | POLLWRNORM;
+ }
done:
mutex_unlock(&queue->mutex);
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index 821a9969b7b..53f3ef4635e 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -739,7 +739,7 @@ static void uvc_free_urb_buffers(struct uvc_streaming *stream)
for (i = 0; i < UVC_URBS; ++i) {
if (stream->urb_buffer[i]) {
- usb_buffer_free(stream->dev->udev, stream->urb_size,
+ usb_free_coherent(stream->dev->udev, stream->urb_size,
stream->urb_buffer[i], stream->urb_dma[i]);
stream->urb_buffer[i] = NULL;
}
@@ -780,7 +780,7 @@ static int uvc_alloc_urb_buffers(struct uvc_streaming *stream,
for (; npackets > 1; npackets /= 2) {
for (i = 0; i < UVC_URBS; ++i) {
stream->urb_size = psize * npackets;
- stream->urb_buffer[i] = usb_buffer_alloc(
+ stream->urb_buffer[i] = usb_alloc_coherent(
stream->dev->udev, stream->urb_size,
gfp_flags | __GFP_NOWARN, &stream->urb_dma[i]);
if (!stream->urb_buffer[i]) {
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index 2bba059259e..d1f88406a5e 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -131,11 +131,13 @@ struct uvc_xu_control {
#define UVC_GUID_FORMAT_Y800 \
{ 'Y', '8', '0', '0', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y16 \
+ { 'Y', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
#define UVC_GUID_FORMAT_BY8 \
{ 'B', 'Y', '8', ' ', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-
/* ------------------------------------------------------------------------
* Driver specific constants.
*/
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 36b5cb86fb5..4e53b0b3339 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -51,6 +51,9 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/i2c.h>
+#if defined(CONFIG_SPI)
+#include <linux/spi/spi.h>
+#endif
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/pgtable.h>
@@ -85,10 +88,9 @@ MODULE_LICENSE("GPL");
val == V4L2_PRIORITY_INTERACTIVE || \
val == V4L2_PRIORITY_RECORD)
-int v4l2_prio_init(struct v4l2_prio_state *global)
+void v4l2_prio_init(struct v4l2_prio_state *global)
{
- memset(global,0,sizeof(*global));
- return 0;
+ memset(global, 0, sizeof(*global));
}
EXPORT_SYMBOL(v4l2_prio_init);
@@ -108,17 +110,16 @@ int v4l2_prio_change(struct v4l2_prio_state *global, enum v4l2_priority *local,
}
EXPORT_SYMBOL(v4l2_prio_change);
-int v4l2_prio_open(struct v4l2_prio_state *global, enum v4l2_priority *local)
+void v4l2_prio_open(struct v4l2_prio_state *global, enum v4l2_priority *local)
{
- return v4l2_prio_change(global,local,V4L2_PRIORITY_DEFAULT);
+ v4l2_prio_change(global, local, V4L2_PRIORITY_DEFAULT);
}
EXPORT_SYMBOL(v4l2_prio_open);
-int v4l2_prio_close(struct v4l2_prio_state *global, enum v4l2_priority *local)
+void v4l2_prio_close(struct v4l2_prio_state *global, enum v4l2_priority local)
{
- if (V4L2_PRIO_VALID(*local))
- atomic_dec(&global->prios[*local]);
- return 0;
+ if (V4L2_PRIO_VALID(local))
+ atomic_dec(&global->prios[local]);
}
EXPORT_SYMBOL(v4l2_prio_close);
@@ -134,11 +135,9 @@ enum v4l2_priority v4l2_prio_max(struct v4l2_prio_state *global)
}
EXPORT_SYMBOL(v4l2_prio_max);
-int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority *local)
+int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local)
{
- if (*local < v4l2_prio_max(global))
- return -EBUSY;
- return 0;
+ return (local < v4l2_prio_max(global)) ? -EBUSY : 0;
}
EXPORT_SYMBOL(v4l2_prio_check);
@@ -340,6 +339,13 @@ const char **v4l2_ctrl_get_menu(u32 id)
"None",
"Black & White",
"Sepia",
+ "Negative",
+ "Emboss",
+ "Sketch",
+ "Sky blue",
+ "Grass green",
+ "Skin whiten",
+ "Vivid",
NULL
};
static const char *tune_preemphasis[] = {
@@ -429,10 +435,13 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_SHARPNESS: return "Sharpness";
case V4L2_CID_BACKLIGHT_COMPENSATION: return "Backlight Compensation";
case V4L2_CID_CHROMA_AGC: return "Chroma AGC";
+ case V4L2_CID_CHROMA_GAIN: return "Chroma Gain";
case V4L2_CID_COLOR_KILLER: return "Color Killer";
case V4L2_CID_COLORFX: return "Color Effects";
+ case V4L2_CID_AUTOBRIGHTNESS: return "Brightness, Automatic";
+ case V4L2_CID_BAND_STOP_FILTER: return "Band-Stop Filter";
case V4L2_CID_ROTATE: return "Rotate";
- case V4L2_CID_BG_COLOR: return "Background color";
+ case V4L2_CID_BG_COLOR: return "Background Color";
/* MPEG controls */
case V4L2_CID_MPEG_CLASS: return "MPEG Encoder Controls";
@@ -483,6 +492,8 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_FOCUS_ABSOLUTE: return "Focus, Absolute";
case V4L2_CID_FOCUS_RELATIVE: return "Focus, Relative";
case V4L2_CID_FOCUS_AUTO: return "Focus, Automatic";
+ case V4L2_CID_IRIS_ABSOLUTE: return "Iris, Absolute";
+ case V4L2_CID_IRIS_RELATIVE: return "Iris, Relative";
case V4L2_CID_ZOOM_ABSOLUTE: return "Zoom, Absolute";
case V4L2_CID_ZOOM_RELATIVE: return "Zoom, Relative";
case V4L2_CID_ZOOM_CONTINUOUS: return "Zoom, Continuous";
@@ -620,6 +631,7 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 ste
case V4L2_CID_BLUE_BALANCE:
case V4L2_CID_GAMMA:
case V4L2_CID_SHARPNESS:
+ case V4L2_CID_CHROMA_GAIN:
case V4L2_CID_RDS_TX_DEVIATION:
case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME:
case V4L2_CID_AUDIO_LIMITER_DEVIATION:
@@ -636,6 +648,7 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 ste
case V4L2_CID_PAN_RELATIVE:
case V4L2_CID_TILT_RELATIVE:
case V4L2_CID_FOCUS_RELATIVE:
+ case V4L2_CID_IRIS_RELATIVE:
case V4L2_CID_ZOOM_RELATIVE:
qctrl->flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
break;
@@ -951,6 +964,66 @@ EXPORT_SYMBOL_GPL(v4l2_i2c_tuner_addrs);
#endif /* defined(CONFIG_I2C) */
+#if defined(CONFIG_SPI)
+
+/* Load a spi sub-device. */
+
+void v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi,
+ const struct v4l2_subdev_ops *ops)
+{
+ v4l2_subdev_init(sd, ops);
+ sd->flags |= V4L2_SUBDEV_FL_IS_SPI;
+ /* the owner is the same as the spi_device's driver owner */
+ sd->owner = spi->dev.driver->owner;
+ /* spi_device and v4l2_subdev point to one another */
+ v4l2_set_subdevdata(sd, spi);
+ spi_set_drvdata(spi, sd);
+ /* initialize name */
+ strlcpy(sd->name, spi->dev.driver->name, sizeof(sd->name));
+}
+EXPORT_SYMBOL_GPL(v4l2_spi_subdev_init);
+
+struct v4l2_subdev *v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev,
+ struct spi_master *master, struct spi_board_info *info)
+{
+ struct v4l2_subdev *sd = NULL;
+ struct spi_device *spi = NULL;
+
+ BUG_ON(!v4l2_dev);
+
+ if (info->modalias)
+ request_module(info->modalias);
+
+ spi = spi_new_device(master, info);
+
+ if (spi == NULL || spi->dev.driver == NULL)
+ goto error;
+
+ if (!try_module_get(spi->dev.driver->owner))
+ goto error;
+
+ sd = spi_get_drvdata(spi);
+
+ /* Register with the v4l2_device which increases the module's
+ use count as well. */
+ if (v4l2_device_register_subdev(v4l2_dev, sd))
+ sd = NULL;
+
+ /* Decrease the module use count to match the first try_module_get. */
+ module_put(spi->dev.driver->owner);
+
+error:
+ /* If we have a client but no subdev, then something went wrong and
+ we must unregister the client. */
+ if (spi && sd == NULL)
+ spi_unregister_device(spi);
+
+ return sd;
+}
+EXPORT_SYMBOL_GPL(v4l2_spi_new_subdev);
+
+#endif /* defined(CONFIG_SPI) */
+
/* Clamp x to be between min and max, aligned to a multiple of 2^align. min
* and max don't have to be aligned, but there must be at least one valid
* value. E.g., min=17,max=31,align=4 is not allowed as there are no multiples
diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c
index f77f84bfe71..9004a5fe764 100644
--- a/drivers/media/video/v4l2-compat-ioctl32.c
+++ b/drivers/media/video/v4l2-compat-ioctl32.c
@@ -1086,6 +1086,9 @@ long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
case VIDIOC_QUERY_DV_PRESET:
case VIDIOC_S_DV_TIMINGS:
case VIDIOC_G_DV_TIMINGS:
+ case VIDIOC_DQEVENT:
+ case VIDIOC_SUBSCRIBE_EVENT:
+ case VIDIOC_UNSUBSCRIBE_EVENT:
ret = do_video_ioctl(file, cmd, arg);
break;
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 70906991606..0ca7ec9ca90 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -421,6 +421,10 @@ static int __video_register_device(struct video_device *vdev, int type, int nr,
if (!vdev->release)
return -EINVAL;
+ /* v4l2_fh support */
+ spin_lock_init(&vdev->fh_lock);
+ INIT_LIST_HEAD(&vdev->fh_list);
+
/* Part 1: check device type */
switch (type) {
case VFL_TYPE_GRABBER:
@@ -596,9 +600,7 @@ void video_unregister_device(struct video_device *vdev)
if (!vdev || !video_is_registered(vdev))
return;
- mutex_lock(&videodev_lock);
clear_bit(V4L2_FL_REGISTERED, &vdev->flags);
- mutex_unlock(&videodev_lock);
device_unregister(&vdev->dev);
}
EXPORT_SYMBOL(video_unregister_device);
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
index 0d06e7cbd5b..5a7dc4afe92 100644
--- a/drivers/media/video/v4l2-device.c
+++ b/drivers/media/video/v4l2-device.c
@@ -21,6 +21,9 @@
#include <linux/types.h>
#include <linux/ioctl.h>
#include <linux/i2c.h>
+#if defined(CONFIG_SPI)
+#include <linux/spi/spi.h>
+#endif
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
@@ -97,6 +100,14 @@ void v4l2_device_unregister(struct v4l2_device *v4l2_dev)
i2c_unregister_device(client);
}
#endif
+#if defined(CONFIG_SPI)
+ if (sd->flags & V4L2_SUBDEV_FL_IS_SPI) {
+ struct spi_device *spi = v4l2_get_subdevdata(sd);
+
+ if (spi)
+ spi_unregister_device(spi);
+ }
+#endif
}
}
EXPORT_SYMBOL_GPL(v4l2_device_unregister);
diff --git a/drivers/media/video/v4l2-event.c b/drivers/media/video/v4l2-event.c
new file mode 100644
index 00000000000..de74ce07b5e
--- /dev/null
+++ b/drivers/media/video/v4l2-event.c
@@ -0,0 +1,292 @@
+/*
+ * v4l2-event.c
+ *
+ * V4L2 events.
+ *
+ * Copyright (C) 2009--2010 Nokia Corporation.
+ *
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+int v4l2_event_init(struct v4l2_fh *fh)
+{
+ fh->events = kzalloc(sizeof(*fh->events), GFP_KERNEL);
+ if (fh->events == NULL)
+ return -ENOMEM;
+
+ init_waitqueue_head(&fh->events->wait);
+
+ INIT_LIST_HEAD(&fh->events->free);
+ INIT_LIST_HEAD(&fh->events->available);
+ INIT_LIST_HEAD(&fh->events->subscribed);
+
+ fh->events->sequence = -1;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_init);
+
+int v4l2_event_alloc(struct v4l2_fh *fh, unsigned int n)
+{
+ struct v4l2_events *events = fh->events;
+ unsigned long flags;
+
+ if (!events) {
+ WARN_ON(1);
+ return -ENOMEM;
+ }
+
+ while (events->nallocated < n) {
+ struct v4l2_kevent *kev;
+
+ kev = kzalloc(sizeof(*kev), GFP_KERNEL);
+ if (kev == NULL)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ list_add_tail(&kev->list, &events->free);
+ events->nallocated++;
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_alloc);
+
+#define list_kfree(list, type, member) \
+ while (!list_empty(list)) { \
+ type *hi; \
+ hi = list_first_entry(list, type, member); \
+ list_del(&hi->member); \
+ kfree(hi); \
+ }
+
+void v4l2_event_free(struct v4l2_fh *fh)
+{
+ struct v4l2_events *events = fh->events;
+
+ if (!events)
+ return;
+
+ list_kfree(&events->free, struct v4l2_kevent, list);
+ list_kfree(&events->available, struct v4l2_kevent, list);
+ list_kfree(&events->subscribed, struct v4l2_subscribed_event, list);
+
+ kfree(events);
+ fh->events = NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_free);
+
+static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
+{
+ struct v4l2_events *events = fh->events;
+ struct v4l2_kevent *kev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+
+ if (list_empty(&events->available)) {
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+ return -ENOENT;
+ }
+
+ WARN_ON(events->navailable == 0);
+
+ kev = list_first_entry(&events->available, struct v4l2_kevent, list);
+ list_move(&kev->list, &events->free);
+ events->navailable--;
+
+ kev->event.pending = events->navailable;
+ *event = kev->event;
+
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+
+ return 0;
+}
+
+int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
+ int nonblocking)
+{
+ struct v4l2_events *events = fh->events;
+ int ret;
+
+ if (nonblocking)
+ return __v4l2_event_dequeue(fh, event);
+
+ do {
+ ret = wait_event_interruptible(events->wait,
+ events->navailable != 0);
+ if (ret < 0)
+ return ret;
+
+ ret = __v4l2_event_dequeue(fh, event);
+ } while (ret == -ENOENT);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
+
+/* Caller must hold fh->event->lock! */
+static struct v4l2_subscribed_event *v4l2_event_subscribed(
+ struct v4l2_fh *fh, u32 type)
+{
+ struct v4l2_events *events = fh->events;
+ struct v4l2_subscribed_event *sev;
+
+ assert_spin_locked(&fh->vdev->fh_lock);
+
+ list_for_each_entry(sev, &events->subscribed, list) {
+ if (sev->type == type)
+ return sev;
+ }
+
+ return NULL;
+}
+
+void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
+{
+ struct v4l2_fh *fh;
+ unsigned long flags;
+ struct timespec timestamp;
+
+ ktime_get_ts(&timestamp);
+
+ spin_lock_irqsave(&vdev->fh_lock, flags);
+
+ list_for_each_entry(fh, &vdev->fh_list, list) {
+ struct v4l2_events *events = fh->events;
+ struct v4l2_kevent *kev;
+
+ /* Are we subscribed? */
+ if (!v4l2_event_subscribed(fh, ev->type))
+ continue;
+
+ /* Increase event sequence number on fh. */
+ events->sequence++;
+
+ /* Do we have any free events? */
+ if (list_empty(&events->free))
+ continue;
+
+ /* Take one and fill it. */
+ kev = list_first_entry(&events->free, struct v4l2_kevent, list);
+ kev->event.type = ev->type;
+ kev->event.u = ev->u;
+ kev->event.timestamp = timestamp;
+ kev->event.sequence = events->sequence;
+ list_move_tail(&kev->list, &events->available);
+
+ events->navailable++;
+
+ wake_up_all(&events->wait);
+ }
+
+ spin_unlock_irqrestore(&vdev->fh_lock, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_event_queue);
+
+int v4l2_event_pending(struct v4l2_fh *fh)
+{
+ return fh->events->navailable;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_pending);
+
+int v4l2_event_subscribe(struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ struct v4l2_events *events = fh->events;
+ struct v4l2_subscribed_event *sev;
+ unsigned long flags;
+
+ if (fh->events == NULL) {
+ WARN_ON(1);
+ return -ENOMEM;
+ }
+
+ sev = kmalloc(sizeof(*sev), GFP_KERNEL);
+ if (!sev)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+
+ if (v4l2_event_subscribed(fh, sub->type) == NULL) {
+ INIT_LIST_HEAD(&sev->list);
+ sev->type = sub->type;
+
+ list_add(&sev->list, &events->subscribed);
+ sev = NULL;
+ }
+
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+
+ kfree(sev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
+
+static void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
+{
+ struct v4l2_events *events = fh->events;
+ struct v4l2_subscribed_event *sev;
+ unsigned long flags;
+
+ do {
+ sev = NULL;
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ if (!list_empty(&events->subscribed)) {
+ sev = list_first_entry(&events->subscribed,
+ struct v4l2_subscribed_event, list);
+ list_del(&sev->list);
+ }
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+ kfree(sev);
+ } while (sev);
+}
+
+int v4l2_event_unsubscribe(struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ struct v4l2_subscribed_event *sev;
+ unsigned long flags;
+
+ if (sub->type == V4L2_EVENT_ALL) {
+ v4l2_event_unsubscribe_all(fh);
+ return 0;
+ }
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+
+ sev = v4l2_event_subscribed(fh, sub->type);
+ if (sev != NULL)
+ list_del(&sev->list);
+
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+
+ kfree(sev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
diff --git a/drivers/media/video/v4l2-fh.c b/drivers/media/video/v4l2-fh.c
new file mode 100644
index 00000000000..d78f184f40c
--- /dev/null
+++ b/drivers/media/video/v4l2-fh.c
@@ -0,0 +1,79 @@
+/*
+ * v4l2-fh.c
+ *
+ * V4L2 file handles.
+ *
+ * Copyright (C) 2009--2010 Nokia Corporation.
+ *
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/bitops.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+
+int v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
+{
+ fh->vdev = vdev;
+ INIT_LIST_HEAD(&fh->list);
+ set_bit(V4L2_FL_USES_V4L2_FH, &fh->vdev->flags);
+
+ /*
+ * fh->events only needs to be initialized if the driver
+ * supports the VIDIOC_SUBSCRIBE_EVENT ioctl.
+ */
+ if (vdev->ioctl_ops && vdev->ioctl_ops->vidioc_subscribe_event)
+ return v4l2_event_init(fh);
+
+ fh->events = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_fh_init);
+
+void v4l2_fh_add(struct v4l2_fh *fh)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ list_add(&fh->list, &fh->vdev->fh_list);
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_fh_add);
+
+void v4l2_fh_del(struct v4l2_fh *fh)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ list_del_init(&fh->list);
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_fh_del);
+
+void v4l2_fh_exit(struct v4l2_fh *fh)
+{
+ if (fh->vdev == NULL)
+ return;
+
+ fh->vdev = NULL;
+
+ v4l2_event_free(fh);
+}
+EXPORT_SYMBOL_GPL(v4l2_fh_exit);
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 7d59c107f13..0eeceae5032 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -26,6 +26,8 @@
#endif
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-chip-ident.h>
#define dbgarg(cmd, fmt, arg...) \
@@ -291,6 +293,9 @@ static const char *v4l2_ioctls[] = {
[_IOC_NR(VIDIOC_QUERY_DV_PRESET)] = "VIDIOC_QUERY_DV_PRESET",
[_IOC_NR(VIDIOC_S_DV_TIMINGS)] = "VIDIOC_S_DV_TIMINGS",
[_IOC_NR(VIDIOC_G_DV_TIMINGS)] = "VIDIOC_G_DV_TIMINGS",
+ [_IOC_NR(VIDIOC_DQEVENT)] = "VIDIOC_DQEVENT",
+ [_IOC_NR(VIDIOC_SUBSCRIBE_EVENT)] = "VIDIOC_SUBSCRIBE_EVENT",
+ [_IOC_NR(VIDIOC_UNSUBSCRIBE_EVENT)] = "VIDIOC_UNSUBSCRIBE_EVENT",
};
#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls)
@@ -610,17 +615,33 @@ static long __video_do_ioctl(struct file *file,
void *fh = file->private_data;
long ret = -EINVAL;
+ if (ops == NULL) {
+ printk(KERN_WARNING "videodev: \"%s\" has no ioctl_ops.\n",
+ vfd->name);
+ return -EINVAL;
+ }
+
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
+ /********************************************************
+ All other V4L1 calls are handled by v4l1_compat module.
+ Those calls will be translated into V4L2 calls, and
+ __video_do_ioctl will be called again, with one or more
+ V4L2 ioctls.
+ ********************************************************/
+ if (_IOC_TYPE(cmd) == 'v' && cmd != VIDIOCGMBUF &&
+ _IOC_NR(cmd) < BASE_VIDIOCPRIVATE) {
+ return v4l_compat_translate_ioctl(file, cmd, arg,
+ __video_do_ioctl);
+ }
+#endif
+
if ((vfd->debug & V4L2_DEBUG_IOCTL) &&
!(vfd->debug & V4L2_DEBUG_IOCTL_ARG)) {
v4l_print_ioctl(vfd->name, cmd);
printk(KERN_CONT "\n");
}
- if (ops == NULL) {
- printk(KERN_WARNING "videodev: \"%s\" has no ioctl_ops.\n",
- vfd->name);
- return -EINVAL;
- }
+ switch (cmd) {
#ifdef CONFIG_VIDEO_V4L1_COMPAT
/***********************************************************
@@ -630,31 +651,21 @@ static long __video_do_ioctl(struct file *file,
***********************************************************/
/* --- streaming capture ------------------------------------- */
- if (cmd == VIDIOCGMBUF) {
+ case VIDIOCGMBUF:
+ {
struct video_mbuf *p = arg;
if (!ops->vidiocgmbuf)
- return ret;
+ break;
ret = ops->vidiocgmbuf(file, fh, p);
if (!ret)
dbgarg(cmd, "size=%d, frames=%d, offsets=0x%08lx\n",
p->size, p->frames,
(unsigned long)p->offsets);
- return ret;
+ break;
}
-
- /********************************************************
- All other V4L1 calls are handled by v4l1_compat module.
- Those calls will be translated into V4L2 calls, and
- __video_do_ioctl will be called again, with one or more
- V4L2 ioctls.
- ********************************************************/
- if (_IOC_TYPE(cmd) == 'v' && _IOC_NR(cmd) < BASE_VIDIOCPRIVATE)
- return v4l_compat_translate_ioctl(file, cmd, arg,
- __video_do_ioctl);
#endif
- switch (cmd) {
/* --- capabilities ------------------------------------------ */
case VIDIOC_QUERYCAP:
{
@@ -1072,7 +1083,7 @@ static long __video_do_ioctl(struct file *file,
id &= ~curr_id;
}
if (i <= index)
- return -EINVAL;
+ break;
v4l2_video_std_construct(p, curr_id, descr);
@@ -1597,7 +1608,7 @@ static long __video_do_ioctl(struct file *file,
v4l2_std_id std = vfd->current_norm;
if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
+ break;
ret = 0;
if (ops->vidioc_g_std)
@@ -1942,7 +1953,55 @@ static long __video_do_ioctl(struct file *file,
}
break;
}
+ case VIDIOC_DQEVENT:
+ {
+ struct v4l2_event *ev = arg;
+
+ if (!ops->vidioc_subscribe_event)
+ break;
+ ret = v4l2_event_dequeue(fh, ev, file->f_flags & O_NONBLOCK);
+ if (ret < 0) {
+ dbgarg(cmd, "no pending events?");
+ break;
+ }
+ dbgarg(cmd,
+ "pending=%d, type=0x%8.8x, sequence=%d, "
+ "timestamp=%lu.%9.9lu ",
+ ev->pending, ev->type, ev->sequence,
+ ev->timestamp.tv_sec, ev->timestamp.tv_nsec);
+ break;
+ }
+ case VIDIOC_SUBSCRIBE_EVENT:
+ {
+ struct v4l2_event_subscription *sub = arg;
+
+ if (!ops->vidioc_subscribe_event)
+ break;
+
+ ret = ops->vidioc_subscribe_event(fh, sub);
+ if (ret < 0) {
+ dbgarg(cmd, "failed, ret=%ld", ret);
+ break;
+ }
+ dbgarg(cmd, "type=0x%8.8x", sub->type);
+ break;
+ }
+ case VIDIOC_UNSUBSCRIBE_EVENT:
+ {
+ struct v4l2_event_subscription *sub = arg;
+
+ if (!ops->vidioc_unsubscribe_event)
+ break;
+
+ ret = ops->vidioc_unsubscribe_event(fh, sub);
+ if (ret < 0) {
+ dbgarg(cmd, "failed, ret=%ld", ret);
+ break;
+ }
+ dbgarg(cmd, "type=0x%8.8x", sub->type);
+ break;
+ }
default:
{
if (!ops->vidioc_default)
@@ -2006,7 +2065,7 @@ long video_ioctl2(struct file *file,
{
char sbuf[128];
void *mbuf = NULL;
- void *parg = NULL;
+ void *parg = (void *)arg;
long err = -EINVAL;
int is_ext_ctrl;
size_t ctrls_size = 0;
diff --git a/drivers/media/video/v4l2-mem2mem.c b/drivers/media/video/v4l2-mem2mem.c
new file mode 100644
index 00000000000..f45f9405ea3
--- /dev/null
+++ b/drivers/media/video/v4l2-mem2mem.c
@@ -0,0 +1,633 @@
+/*
+ * Memory-to-memory device framework for Video for Linux 2 and videobuf.
+ *
+ * Helper functions for devices that use videobuf buffers for both their
+ * source and destination.
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <p.osciak@samsung.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <media/videobuf-core.h>
+#include <media/v4l2-mem2mem.h>
+
+MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
+MODULE_AUTHOR("Pawel Osciak, <p.osciak@samsung.com>");
+MODULE_LICENSE("GPL");
+
+static bool debug;
+module_param(debug, bool, 0644);
+
+#define dprintk(fmt, arg...) \
+ do { \
+ if (debug) \
+ printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
+ } while (0)
+
+
+/* Instance is already queued on the job_queue */
+#define TRANS_QUEUED (1 << 0)
+/* Instance is currently running in hardware */
+#define TRANS_RUNNING (1 << 1)
+
+
+/* Offset base for buffers on the destination queue - used to distinguish
+ * between source and destination buffers when mmapping - they receive the same
+ * offsets but for different queues */
+#define DST_QUEUE_OFF_BASE (1 << 30)
+
+
+/**
+ * struct v4l2_m2m_dev - per-device context
+ * @curr_ctx: currently running instance
+ * @job_queue: instances queued to run
+ * @job_spinlock: protects job_queue
+ * @m2m_ops: driver callbacks
+ */
+struct v4l2_m2m_dev {
+ struct v4l2_m2m_ctx *curr_ctx;
+
+ struct list_head job_queue;
+ spinlock_t job_spinlock;
+
+ struct v4l2_m2m_ops *m2m_ops;
+};
+
+static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &m2m_ctx->cap_q_ctx;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &m2m_ctx->out_q_ctx;
+ default:
+ printk(KERN_ERR "Invalid buffer type\n");
+ return NULL;
+ }
+}
+
+/**
+ * v4l2_m2m_get_vq() - return videobuf_queue for the given type
+ */
+struct videobuf_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
+ enum v4l2_buf_type type)
+{
+ struct v4l2_m2m_queue_ctx *q_ctx;
+
+ q_ctx = get_queue_ctx(m2m_ctx, type);
+ if (!q_ctx)
+ return NULL;
+
+ return &q_ctx->q;
+}
+EXPORT_SYMBOL(v4l2_m2m_get_vq);
+
+/**
+ * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
+ */
+void *v4l2_m2m_next_buf(struct v4l2_m2m_ctx *m2m_ctx, enum v4l2_buf_type type)
+{
+ struct v4l2_m2m_queue_ctx *q_ctx;
+ struct videobuf_buffer *vb = NULL;
+ unsigned long flags;
+
+ q_ctx = get_queue_ctx(m2m_ctx, type);
+ if (!q_ctx)
+ return NULL;
+
+ spin_lock_irqsave(q_ctx->q.irqlock, flags);
+
+ if (list_empty(&q_ctx->rdy_queue))
+ goto end;
+
+ vb = list_entry(q_ctx->rdy_queue.next, struct videobuf_buffer, queue);
+ vb->state = VIDEOBUF_ACTIVE;
+
+end:
+ spin_unlock_irqrestore(q_ctx->q.irqlock, flags);
+ return vb;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
+
+/**
+ * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
+ * return it
+ */
+void *v4l2_m2m_buf_remove(struct v4l2_m2m_ctx *m2m_ctx, enum v4l2_buf_type type)
+{
+ struct v4l2_m2m_queue_ctx *q_ctx;
+ struct videobuf_buffer *vb = NULL;
+ unsigned long flags;
+
+ q_ctx = get_queue_ctx(m2m_ctx, type);
+ if (!q_ctx)
+ return NULL;
+
+ spin_lock_irqsave(q_ctx->q.irqlock, flags);
+ if (!list_empty(&q_ctx->rdy_queue)) {
+ vb = list_entry(q_ctx->rdy_queue.next, struct videobuf_buffer,
+ queue);
+ list_del(&vb->queue);
+ q_ctx->num_rdy--;
+ }
+ spin_unlock_irqrestore(q_ctx->q.irqlock, flags);
+
+ return vb;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
+
+/*
+ * Scheduling handlers
+ */
+
+/**
+ * v4l2_m2m_get_curr_priv() - return driver private data for the currently
+ * running instance or NULL if no instance is running
+ */
+void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
+{
+ unsigned long flags;
+ void *ret = NULL;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ if (m2m_dev->curr_ctx)
+ ret = m2m_dev->curr_ctx->priv;
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
+
+/**
+ * v4l2_m2m_try_run() - select next job to perform and run it if possible
+ *
+ * Get next transaction (if present) from the waiting jobs list and run it.
+ */
+static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ if (NULL != m2m_dev->curr_ctx) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ dprintk("Another instance is running, won't run now\n");
+ return;
+ }
+
+ if (list_empty(&m2m_dev->job_queue)) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ dprintk("No job pending\n");
+ return;
+ }
+
+ m2m_dev->curr_ctx = list_entry(m2m_dev->job_queue.next,
+ struct v4l2_m2m_ctx, queue);
+ m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
+}
+
+/**
+ * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
+ * the pending job queue and add it if so.
+ * @m2m_ctx: m2m context assigned to the instance to be checked
+ *
+ * There are three basic requirements an instance has to meet to be able to run:
+ * 1) at least one source buffer has to be queued,
+ * 2) at least one destination buffer has to be queued,
+ * 3) streaming has to be on.
+ *
+ * There may also be additional, custom requirements. In such case the driver
+ * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
+ * return 1 if the instance is ready.
+ * An example of the above could be an instance that requires more than one
+ * src/dst buffer per transaction.
+ */
+static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ struct v4l2_m2m_dev *m2m_dev;
+ unsigned long flags_job, flags;
+
+ m2m_dev = m2m_ctx->m2m_dev;
+ dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
+
+ if (!m2m_ctx->out_q_ctx.q.streaming
+ || !m2m_ctx->cap_q_ctx.q.streaming) {
+ dprintk("Streaming needs to be on for both queues\n");
+ return;
+ }
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
+ if (m2m_ctx->job_flags & TRANS_QUEUED) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+ dprintk("On job queue already\n");
+ return;
+ }
+
+ spin_lock_irqsave(m2m_ctx->out_q_ctx.q.irqlock, flags);
+ if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) {
+ spin_unlock_irqrestore(m2m_ctx->out_q_ctx.q.irqlock, flags);
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+ dprintk("No input buffers available\n");
+ return;
+ }
+ if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) {
+ spin_unlock_irqrestore(m2m_ctx->out_q_ctx.q.irqlock, flags);
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+ dprintk("No output buffers available\n");
+ return;
+ }
+ spin_unlock_irqrestore(m2m_ctx->out_q_ctx.q.irqlock, flags);
+
+ if (m2m_dev->m2m_ops->job_ready
+ && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+ dprintk("Driver not ready\n");
+ return;
+ }
+
+ list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
+ m2m_ctx->job_flags |= TRANS_QUEUED;
+
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+
+ v4l2_m2m_try_run(m2m_dev);
+}
+
+/**
+ * v4l2_m2m_job_finish() - inform the framework that a job has been finished
+ * and have it clean up
+ *
+ * Called by a driver to yield back the device after it has finished with it.
+ * Should be called as soon as possible after reaching a state which allows
+ * other instances to take control of the device.
+ *
+ * This function has to be called only after device_run() callback has been
+ * called on the driver. To prevent recursion, it should not be called directly
+ * from the device_run() callback though.
+ */
+void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ dprintk("Called by an instance not currently running\n");
+ return;
+ }
+
+ list_del(&m2m_dev->curr_ctx->queue);
+ m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
+ m2m_dev->curr_ctx = NULL;
+
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ /* This instance might have more buffers ready, but since we do not
+ * allow more than one job on the job_queue per instance, each has
+ * to be scheduled separately after the previous one finishes. */
+ v4l2_m2m_try_schedule(m2m_ctx);
+ v4l2_m2m_try_run(m2m_dev);
+}
+EXPORT_SYMBOL(v4l2_m2m_job_finish);
+
+/**
+ * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
+ */
+int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct videobuf_queue *vq;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
+ return videobuf_reqbufs(vq, reqbufs);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
+
+/**
+ * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
+ *
+ * See v4l2_m2m_mmap() documentation for details.
+ */
+int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_buffer *buf)
+{
+ struct videobuf_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
+ ret = videobuf_querybuf(vq, buf);
+
+ if (buf->memory == V4L2_MEMORY_MMAP
+ && vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ buf->m.offset += DST_QUEUE_OFF_BASE;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
+
+/**
+ * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
+ * the type
+ */
+int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_buffer *buf)
+{
+ struct videobuf_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
+ ret = videobuf_qbuf(vq, buf);
+ if (!ret)
+ v4l2_m2m_try_schedule(m2m_ctx);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
+
+/**
+ * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
+ * the type
+ */
+int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_buffer *buf)
+{
+ struct videobuf_queue *vq;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
+ return videobuf_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
+
+/**
+ * v4l2_m2m_streamon() - turn on streaming for a video queue
+ */
+int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ enum v4l2_buf_type type)
+{
+ struct videobuf_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, type);
+ ret = videobuf_streamon(vq);
+ if (!ret)
+ v4l2_m2m_try_schedule(m2m_ctx);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
+
+/**
+ * v4l2_m2m_streamoff() - turn off streaming for a video queue
+ */
+int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ enum v4l2_buf_type type)
+{
+ struct videobuf_queue *vq;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, type);
+ return videobuf_streamoff(vq);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
+
+/**
+ * v4l2_m2m_poll() - poll replacement, for destination buffers only
+ *
+ * Call from the driver's poll() function. Will poll both queues. If a buffer
+ * is available to dequeue (with dqbuf) from the source queue, this will
+ * indicate that a non-blocking write can be performed, while read will be
+ * returned in case of the destination queue.
+ */
+unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct poll_table_struct *wait)
+{
+ struct videobuf_queue *src_q, *dst_q;
+ struct videobuf_buffer *src_vb = NULL, *dst_vb = NULL;
+ unsigned int rc = 0;
+
+ src_q = v4l2_m2m_get_src_vq(m2m_ctx);
+ dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
+
+ mutex_lock(&src_q->vb_lock);
+ mutex_lock(&dst_q->vb_lock);
+
+ if (src_q->streaming && !list_empty(&src_q->stream))
+ src_vb = list_first_entry(&src_q->stream,
+ struct videobuf_buffer, stream);
+ if (dst_q->streaming && !list_empty(&dst_q->stream))
+ dst_vb = list_first_entry(&dst_q->stream,
+ struct videobuf_buffer, stream);
+
+ if (!src_vb && !dst_vb) {
+ rc = POLLERR;
+ goto end;
+ }
+
+ if (src_vb) {
+ poll_wait(file, &src_vb->done, wait);
+ if (src_vb->state == VIDEOBUF_DONE
+ || src_vb->state == VIDEOBUF_ERROR)
+ rc |= POLLOUT | POLLWRNORM;
+ }
+ if (dst_vb) {
+ poll_wait(file, &dst_vb->done, wait);
+ if (dst_vb->state == VIDEOBUF_DONE
+ || dst_vb->state == VIDEOBUF_ERROR)
+ rc |= POLLIN | POLLRDNORM;
+ }
+
+end:
+ mutex_unlock(&dst_q->vb_lock);
+ mutex_unlock(&src_q->vb_lock);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
+
+/**
+ * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
+ *
+ * Call from driver's mmap() function. Will handle mmap() for both queues
+ * seamlessly for videobuffer, which will receive normal per-queue offsets and
+ * proper videobuf queue pointers. The differentiation is made outside videobuf
+ * by adding a predefined offset to buffers from one of the queues and
+ * subtracting it before passing it back to videobuf. Only drivers (and
+ * thus applications) receive modified offsets.
+ */
+int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct vm_area_struct *vma)
+{
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ struct videobuf_queue *vq;
+
+ if (offset < DST_QUEUE_OFF_BASE) {
+ vq = v4l2_m2m_get_src_vq(m2m_ctx);
+ } else {
+ vq = v4l2_m2m_get_dst_vq(m2m_ctx);
+ vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
+ }
+
+ return videobuf_mmap_mapper(vq, vma);
+}
+EXPORT_SYMBOL(v4l2_m2m_mmap);
+
+/**
+ * v4l2_m2m_init() - initialize per-driver m2m data
+ *
+ * Usually called from driver's probe() function.
+ */
+struct v4l2_m2m_dev *v4l2_m2m_init(struct v4l2_m2m_ops *m2m_ops)
+{
+ struct v4l2_m2m_dev *m2m_dev;
+
+ if (!m2m_ops)
+ return ERR_PTR(-EINVAL);
+
+ BUG_ON(!m2m_ops->device_run);
+ BUG_ON(!m2m_ops->job_abort);
+
+ m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
+ if (!m2m_dev)
+ return ERR_PTR(-ENOMEM);
+
+ m2m_dev->curr_ctx = NULL;
+ m2m_dev->m2m_ops = m2m_ops;
+ INIT_LIST_HEAD(&m2m_dev->job_queue);
+ spin_lock_init(&m2m_dev->job_spinlock);
+
+ return m2m_dev;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_init);
+
+/**
+ * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
+ *
+ * Usually called from driver's remove() function.
+ */
+void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
+{
+ kfree(m2m_dev);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_release);
+
+/**
+ * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
+ * @priv - driver's instance private data
+ * @m2m_dev - a previously initialized m2m_dev struct
+ * @vq_init - a callback for queue type-specific initialization function to be
+ * used for initializing videobuf_queues
+ *
+ * Usually called from driver's open() function.
+ */
+struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(void *priv, struct v4l2_m2m_dev *m2m_dev,
+ void (*vq_init)(void *priv, struct videobuf_queue *,
+ enum v4l2_buf_type))
+{
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
+
+ if (!vq_init)
+ return ERR_PTR(-EINVAL);
+
+ m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
+ if (!m2m_ctx)
+ return ERR_PTR(-ENOMEM);
+
+ m2m_ctx->priv = priv;
+ m2m_ctx->m2m_dev = m2m_dev;
+
+ out_q_ctx = get_queue_ctx(m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ cap_q_ctx = get_queue_ctx(m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
+ INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
+
+ INIT_LIST_HEAD(&m2m_ctx->queue);
+
+ vq_init(priv, &out_q_ctx->q, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ vq_init(priv, &cap_q_ctx->q, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ out_q_ctx->q.priv_data = cap_q_ctx->q.priv_data = priv;
+
+ return m2m_ctx;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
+
+/**
+ * v4l2_m2m_ctx_release() - release m2m context
+ *
+ * Usually called from driver's release() function.
+ */
+void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ struct v4l2_m2m_dev *m2m_dev;
+ struct videobuf_buffer *vb;
+ unsigned long flags;
+
+ m2m_dev = m2m_ctx->m2m_dev;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ if (m2m_ctx->job_flags & TRANS_RUNNING) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
+ dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
+ vb = v4l2_m2m_next_dst_buf(m2m_ctx);
+ BUG_ON(NULL == vb);
+ wait_event(vb->done, vb->state != VIDEOBUF_ACTIVE
+ && vb->state != VIDEOBUF_QUEUED);
+ } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
+ list_del(&m2m_ctx->queue);
+ m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ dprintk("m2m_ctx: %p had been on queue and was removed\n",
+ m2m_ctx);
+ } else {
+ /* Do nothing, was not on queue/running */
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ }
+
+ videobuf_stop(&m2m_ctx->cap_q_ctx.q);
+ videobuf_stop(&m2m_ctx->out_q_ctx.q);
+
+ videobuf_mmap_free(&m2m_ctx->cap_q_ctx.q);
+ videobuf_mmap_free(&m2m_ctx->out_q_ctx.q);
+
+ kfree(m2m_ctx);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
+
+/**
+ * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
+ *
+ * Call from buf_queue(), videobuf_queue_ops callback.
+ *
+ * Locking: Caller holds q->irqlock (taken by videobuf before calling buf_queue
+ * callback in the driver).
+ */
+void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct v4l2_m2m_queue_ctx *q_ctx;
+
+ q_ctx = get_queue_ctx(m2m_ctx, vq->type);
+ if (!q_ctx)
+ return;
+
+ list_add_tail(&vb->queue, &q_ctx->rdy_queue);
+ q_ctx->num_rdy++;
+
+ vb->state = VIDEOBUF_QUEUED;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
+
diff --git a/drivers/media/video/videobuf-core.c b/drivers/media/video/videobuf-core.c
index bb0a1c8de41..7d3378437de 100644
--- a/drivers/media/video/videobuf-core.c
+++ b/drivers/media/video/videobuf-core.c
@@ -24,10 +24,15 @@
#include <media/videobuf-core.h>
#define MAGIC_BUFFER 0x20070728
-#define MAGIC_CHECK(is, should) do { \
- if (unlikely((is) != (should))) { \
- printk(KERN_ERR "magic mismatch: %x (expected %x)\n", is, should); \
- BUG(); } } while (0)
+#define MAGIC_CHECK(is, should) \
+ do { \
+ if (unlikely((is) != (should))) { \
+ printk(KERN_ERR \
+ "magic mismatch: %x (expected %x)\n", \
+ is, should); \
+ BUG(); \
+ } \
+ } while (0)
static int debug;
module_param(debug, int, 0644);
@@ -36,16 +41,18 @@ MODULE_DESCRIPTION("helper module to manage video4linux buffers");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
MODULE_LICENSE("GPL");
-#define dprintk(level, fmt, arg...) do { \
- if (debug >= level) \
- printk(KERN_DEBUG "vbuf: " fmt , ## arg); } while (0)
+#define dprintk(level, fmt, arg...) \
+ do { \
+ if (debug >= level) \
+ printk(KERN_DEBUG "vbuf: " fmt, ## arg); \
+ } while (0)
/* --------------------------------------------------------------------- */
#define CALL(q, f, arg...) \
((q->int_ops->f) ? q->int_ops->f(arg) : 0)
-void *videobuf_alloc(struct videobuf_queue *q)
+struct videobuf_buffer *videobuf_alloc(struct videobuf_queue *q)
{
struct videobuf_buffer *vb;
@@ -57,14 +64,14 @@ void *videobuf_alloc(struct videobuf_queue *q)
}
vb = q->int_ops->alloc(q->msize);
-
if (NULL != vb) {
init_waitqueue_head(&vb->done);
- vb->magic = MAGIC_BUFFER;
+ vb->magic = MAGIC_BUFFER;
}
return vb;
}
+EXPORT_SYMBOL_GPL(videobuf_alloc);
#define WAITON_CONDITION (vb->state != VIDEOBUF_ACTIVE &&\
vb->state != VIDEOBUF_QUEUED)
@@ -86,6 +93,7 @@ int videobuf_waiton(struct videobuf_buffer *vb, int non_blocking, int intr)
return 0;
}
+EXPORT_SYMBOL_GPL(videobuf_waiton);
int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
struct v4l2_framebuffer *fbuf)
@@ -95,16 +103,16 @@ int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
return CALL(q, iolock, q, vb, fbuf);
}
+EXPORT_SYMBOL_GPL(videobuf_iolock);
-void *videobuf_queue_to_vmalloc (struct videobuf_queue *q,
- struct videobuf_buffer *buf)
+void *videobuf_queue_to_vaddr(struct videobuf_queue *q,
+ struct videobuf_buffer *buf)
{
- if (q->int_ops->vmalloc)
- return q->int_ops->vmalloc(buf);
- else
- return NULL;
+ if (q->int_ops->vaddr)
+ return q->int_ops->vaddr(buf);
+ return NULL;
}
-EXPORT_SYMBOL_GPL(videobuf_queue_to_vmalloc);
+EXPORT_SYMBOL_GPL(videobuf_queue_to_vaddr);
/* --------------------------------------------------------------------- */
@@ -146,6 +154,7 @@ void videobuf_queue_core_init(struct videobuf_queue *q,
init_waitqueue_head(&q->wait);
INIT_LIST_HEAD(&q->stream);
}
+EXPORT_SYMBOL_GPL(videobuf_queue_core_init);
/* Locking: Only usage in bttv unsafe find way to remove */
int videobuf_queue_is_busy(struct videobuf_queue *q)
@@ -184,6 +193,7 @@ int videobuf_queue_is_busy(struct videobuf_queue *q)
}
return 0;
}
+EXPORT_SYMBOL_GPL(videobuf_queue_is_busy);
/* Locking: Caller holds q->vb_lock */
void videobuf_queue_cancel(struct videobuf_queue *q)
@@ -216,6 +226,7 @@ void videobuf_queue_cancel(struct videobuf_queue *q)
}
INIT_LIST_HEAD(&q->stream);
}
+EXPORT_SYMBOL_GPL(videobuf_queue_cancel);
/* --------------------------------------------------------------------- */
@@ -237,6 +248,7 @@ enum v4l2_field videobuf_next_field(struct videobuf_queue *q)
}
return field;
}
+EXPORT_SYMBOL_GPL(videobuf_next_field);
/* Locking: Caller holds q->vb_lock */
static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
@@ -273,8 +285,10 @@ static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
case VIDEOBUF_ACTIVE:
b->flags |= V4L2_BUF_FLAG_QUEUED;
break;
- case VIDEOBUF_DONE:
case VIDEOBUF_ERROR:
+ b->flags |= V4L2_BUF_FLAG_ERROR;
+ /* fall through */
+ case VIDEOBUF_DONE:
b->flags |= V4L2_BUF_FLAG_DONE;
break;
case VIDEOBUF_NEEDS_INIT:
@@ -298,20 +312,15 @@ static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
static int __videobuf_mmap_free(struct videobuf_queue *q)
{
int i;
- int rc;
if (!q)
return 0;
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
- rc = CALL(q, mmap_free, q);
-
- q->is_mmapped = 0;
-
- if (rc < 0)
- return rc;
+ for (i = 0; i < VIDEO_MAX_FRAME; i++)
+ if (q->bufs[i] && q->bufs[i]->map)
+ return -EBUSY;
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
if (NULL == q->bufs[i])
@@ -321,7 +330,7 @@ static int __videobuf_mmap_free(struct videobuf_queue *q)
q->bufs[i] = NULL;
}
- return rc;
+ return 0;
}
int videobuf_mmap_free(struct videobuf_queue *q)
@@ -332,6 +341,7 @@ int videobuf_mmap_free(struct videobuf_queue *q)
mutex_unlock(&q->vb_lock);
return ret;
}
+EXPORT_SYMBOL_GPL(videobuf_mmap_free);
/* Locking: Caller holds q->vb_lock */
int __videobuf_mmap_setup(struct videobuf_queue *q,
@@ -351,7 +361,7 @@ int __videobuf_mmap_setup(struct videobuf_queue *q,
for (i = 0; i < bcount; i++) {
q->bufs[i] = videobuf_alloc(q);
- if (q->bufs[i] == NULL)
+ if (NULL == q->bufs[i])
break;
q->bufs[i]->i = i;
@@ -372,11 +382,11 @@ int __videobuf_mmap_setup(struct videobuf_queue *q,
if (!i)
return -ENOMEM;
- dprintk(1, "mmap setup: %d buffers, %d bytes each\n",
- i, bsize);
+ dprintk(1, "mmap setup: %d buffers, %d bytes each\n", i, bsize);
return i;
}
+EXPORT_SYMBOL_GPL(__videobuf_mmap_setup);
int videobuf_mmap_setup(struct videobuf_queue *q,
unsigned int bcount, unsigned int bsize,
@@ -388,6 +398,7 @@ int videobuf_mmap_setup(struct videobuf_queue *q,
mutex_unlock(&q->vb_lock);
return ret;
}
+EXPORT_SYMBOL_GPL(videobuf_mmap_setup);
int videobuf_reqbufs(struct videobuf_queue *q,
struct v4l2_requestbuffers *req)
@@ -432,7 +443,7 @@ int videobuf_reqbufs(struct videobuf_queue *q,
q->ops->buf_setup(q, &count, &size);
dprintk(1, "reqbufs: bufs=%d, size=0x%x [%u pages total]\n",
count, size,
- (unsigned int)((count*PAGE_ALIGN(size))>>PAGE_SHIFT) );
+ (unsigned int)((count * PAGE_ALIGN(size)) >> PAGE_SHIFT));
retval = __videobuf_mmap_setup(q, count, size, req->memory);
if (retval < 0) {
@@ -447,6 +458,7 @@ int videobuf_reqbufs(struct videobuf_queue *q,
mutex_unlock(&q->vb_lock);
return retval;
}
+EXPORT_SYMBOL_GPL(videobuf_reqbufs);
int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
{
@@ -473,9 +485,9 @@ done:
mutex_unlock(&q->vb_lock);
return ret;
}
+EXPORT_SYMBOL_GPL(videobuf_querybuf);
-int videobuf_qbuf(struct videobuf_queue *q,
- struct v4l2_buffer *b)
+int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
{
struct videobuf_buffer *buf;
enum v4l2_field field;
@@ -534,6 +546,13 @@ int videobuf_qbuf(struct videobuf_queue *q,
"but buffer addr is zero!\n");
goto done;
}
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT
+ || q->type == V4L2_BUF_TYPE_VBI_OUTPUT
+ || q->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) {
+ buf->size = b->bytesused;
+ buf->field = b->field;
+ buf->ts = b->timestamp;
+ }
break;
case V4L2_MEMORY_USERPTR:
if (b->length < buf->bsize) {
@@ -567,11 +586,11 @@ int videobuf_qbuf(struct videobuf_queue *q,
q->ops->buf_queue(q, buf);
spin_unlock_irqrestore(q->irqlock, flags);
}
- dprintk(1, "qbuf: succeded\n");
+ dprintk(1, "qbuf: succeeded\n");
retval = 0;
wake_up_interruptible_sync(&q->wait);
- done:
+done:
mutex_unlock(&q->vb_lock);
if (b->memory == V4L2_MEMORY_MMAP)
@@ -579,7 +598,7 @@ int videobuf_qbuf(struct videobuf_queue *q,
return retval;
}
-
+EXPORT_SYMBOL_GPL(videobuf_qbuf);
/* Locking: Caller holds q->vb_lock */
static int stream_next_buffer_check_queue(struct videobuf_queue *q, int noblock)
@@ -624,7 +643,6 @@ done:
return retval;
}
-
/* Locking: Caller holds q->vb_lock */
static int stream_next_buffer(struct videobuf_queue *q,
struct videobuf_buffer **vb, int nonblocking)
@@ -647,13 +665,14 @@ done:
}
int videobuf_dqbuf(struct videobuf_queue *q,
- struct v4l2_buffer *b, int nonblocking)
+ struct v4l2_buffer *b, int nonblocking)
{
struct videobuf_buffer *buf = NULL;
int retval;
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+ memset(b, 0, sizeof(*b));
mutex_lock(&q->vb_lock);
retval = stream_next_buffer(q, &buf, nonblocking);
@@ -665,28 +684,25 @@ int videobuf_dqbuf(struct videobuf_queue *q,
switch (buf->state) {
case VIDEOBUF_ERROR:
dprintk(1, "dqbuf: state is error\n");
- retval = -EIO;
- CALL(q, sync, q, buf);
- buf->state = VIDEOBUF_IDLE;
break;
case VIDEOBUF_DONE:
dprintk(1, "dqbuf: state is done\n");
- CALL(q, sync, q, buf);
- buf->state = VIDEOBUF_IDLE;
break;
default:
dprintk(1, "dqbuf: state invalid\n");
retval = -EINVAL;
goto done;
}
- list_del(&buf->stream);
- memset(b, 0, sizeof(*b));
+ CALL(q, sync, q, buf);
videobuf_status(q, b, buf, q->type);
-
- done:
+ list_del(&buf->stream);
+ buf->state = VIDEOBUF_IDLE;
+ b->flags &= ~V4L2_BUF_FLAG_DONE;
+done:
mutex_unlock(&q->vb_lock);
return retval;
}
+EXPORT_SYMBOL_GPL(videobuf_dqbuf);
int videobuf_streamon(struct videobuf_queue *q)
{
@@ -709,10 +725,11 @@ int videobuf_streamon(struct videobuf_queue *q)
spin_unlock_irqrestore(q->irqlock, flags);
wake_up_interruptible_sync(&q->wait);
- done:
+done:
mutex_unlock(&q->vb_lock);
return retval;
}
+EXPORT_SYMBOL_GPL(videobuf_streamon);
/* Locking: Caller holds q->vb_lock */
static int __videobuf_streamoff(struct videobuf_queue *q)
@@ -735,6 +752,7 @@ int videobuf_streamoff(struct videobuf_queue *q)
return retval;
}
+EXPORT_SYMBOL_GPL(videobuf_streamoff);
/* Locking: Caller holds q->vb_lock */
static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
@@ -774,7 +792,7 @@ static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
retval = q->read_buf->size;
}
- done:
+done:
/* cleanup */
q->ops->buf_release(q, q->read_buf);
kfree(q->read_buf);
@@ -782,6 +800,49 @@ static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
return retval;
}
+static int __videobuf_copy_to_user(struct videobuf_queue *q,
+ struct videobuf_buffer *buf,
+ char __user *data, size_t count,
+ int nonblocking)
+{
+ void *vaddr = CALL(q, vaddr, buf);
+
+ /* copy to userspace */
+ if (count > buf->size - q->read_off)
+ count = buf->size - q->read_off;
+
+ if (copy_to_user(data, vaddr + q->read_off, count))
+ return -EFAULT;
+
+ return count;
+}
+
+static int __videobuf_copy_stream(struct videobuf_queue *q,
+ struct videobuf_buffer *buf,
+ char __user *data, size_t count, size_t pos,
+ int vbihack, int nonblocking)
+{
+ unsigned int *fc = CALL(q, vaddr, buf);
+
+ if (vbihack) {
+ /* dirty, undocumented hack -- pass the frame counter
+ * within the last four bytes of each vbi data block.
+ * We need that one to maintain backward compatibility
+ * to all vbi decoding software out there ... */
+ fc += (buf->size >> 2) - 1;
+ *fc = buf->field_count >> 1;
+ dprintk(1, "vbihack: %d\n", *fc);
+ }
+
+ /* copy stuff using the common method */
+ count = __videobuf_copy_to_user(q, buf, data, count, nonblocking);
+
+ if ((count == -EFAULT) && (pos == 0))
+ return -EFAULT;
+
+ return count;
+}
+
ssize_t videobuf_read_one(struct videobuf_queue *q,
char __user *data, size_t count, loff_t *ppos,
int nonblocking)
@@ -850,7 +911,7 @@ ssize_t videobuf_read_one(struct videobuf_queue *q,
}
/* Copy to userspace */
- retval = CALL(q, video_copy_to_user, q, data, count, nonblocking);
+ retval = __videobuf_copy_to_user(q, q->read_buf, data, count, nonblocking);
if (retval < 0)
goto done;
@@ -862,10 +923,11 @@ ssize_t videobuf_read_one(struct videobuf_queue *q,
q->read_buf = NULL;
}
- done:
+done:
mutex_unlock(&q->vb_lock);
return retval;
}
+EXPORT_SYMBOL_GPL(videobuf_read_one);
/* Locking: Caller holds q->vb_lock */
static int __videobuf_read_start(struct videobuf_queue *q)
@@ -917,7 +979,6 @@ static void __videobuf_read_stop(struct videobuf_queue *q)
q->bufs[i] = NULL;
}
q->read_buf = NULL;
-
}
int videobuf_read_start(struct videobuf_queue *q)
@@ -930,6 +991,7 @@ int videobuf_read_start(struct videobuf_queue *q)
return rc;
}
+EXPORT_SYMBOL_GPL(videobuf_read_start);
void videobuf_read_stop(struct videobuf_queue *q)
{
@@ -937,6 +999,7 @@ void videobuf_read_stop(struct videobuf_queue *q)
__videobuf_read_stop(q);
mutex_unlock(&q->vb_lock);
}
+EXPORT_SYMBOL_GPL(videobuf_read_stop);
void videobuf_stop(struct videobuf_queue *q)
{
@@ -950,7 +1013,7 @@ void videobuf_stop(struct videobuf_queue *q)
mutex_unlock(&q->vb_lock);
}
-
+EXPORT_SYMBOL_GPL(videobuf_stop);
ssize_t videobuf_read_stream(struct videobuf_queue *q,
char __user *data, size_t count, loff_t *ppos,
@@ -990,7 +1053,7 @@ ssize_t videobuf_read_stream(struct videobuf_queue *q,
}
if (q->read_buf->state == VIDEOBUF_DONE) {
- rc = CALL(q, copy_stream, q, data + retval, count,
+ rc = __videobuf_copy_stream(q, q->read_buf, data + retval, count,
retval, vbihack, nonblocking);
if (rc < 0) {
retval = rc;
@@ -1019,10 +1082,11 @@ ssize_t videobuf_read_stream(struct videobuf_queue *q,
break;
}
- done:
+done:
mutex_unlock(&q->vb_lock);
return retval;
}
+EXPORT_SYMBOL_GPL(videobuf_read_stream);
unsigned int videobuf_poll_stream(struct file *file,
struct videobuf_queue *q,
@@ -1056,27 +1120,51 @@ unsigned int videobuf_poll_stream(struct file *file,
if (0 == rc) {
poll_wait(file, &buf->done, wait);
if (buf->state == VIDEOBUF_DONE ||
- buf->state == VIDEOBUF_ERROR)
- rc = POLLIN|POLLRDNORM;
+ buf->state == VIDEOBUF_ERROR) {
+ switch (q->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ case V4L2_BUF_TYPE_VBI_OUTPUT:
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+ rc = POLLOUT | POLLWRNORM;
+ break;
+ default:
+ rc = POLLIN | POLLRDNORM;
+ break;
+ }
+ }
}
mutex_unlock(&q->vb_lock);
return rc;
}
+EXPORT_SYMBOL_GPL(videobuf_poll_stream);
-int videobuf_mmap_mapper(struct videobuf_queue *q,
- struct vm_area_struct *vma)
+int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma)
{
- int retval;
+ int rc = -EINVAL;
+ int i;
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+ if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) {
+ dprintk(1, "mmap appl bug: PROT_WRITE and MAP_SHARED are required\n");
+ return -EINVAL;
+ }
+
mutex_lock(&q->vb_lock);
- retval = CALL(q, mmap_mapper, q, vma);
- q->is_mmapped = 1;
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ struct videobuf_buffer *buf = q->bufs[i];
+
+ if (buf && buf->memory == V4L2_MEMORY_MMAP &&
+ buf->boff == (vma->vm_pgoff << PAGE_SHIFT)) {
+ rc = CALL(q, mmap_mapper, q, buf, vma);
+ break;
+ }
+ }
mutex_unlock(&q->vb_lock);
- return retval;
+ return rc;
}
+EXPORT_SYMBOL_GPL(videobuf_mmap_mapper);
#ifdef CONFIG_VIDEO_V4L1_COMPAT
int videobuf_cgmbuf(struct videobuf_queue *q,
@@ -1107,33 +1195,3 @@ int videobuf_cgmbuf(struct videobuf_queue *q,
EXPORT_SYMBOL_GPL(videobuf_cgmbuf);
#endif
-/* --------------------------------------------------------------------- */
-
-EXPORT_SYMBOL_GPL(videobuf_waiton);
-EXPORT_SYMBOL_GPL(videobuf_iolock);
-
-EXPORT_SYMBOL_GPL(videobuf_alloc);
-
-EXPORT_SYMBOL_GPL(videobuf_queue_core_init);
-EXPORT_SYMBOL_GPL(videobuf_queue_cancel);
-EXPORT_SYMBOL_GPL(videobuf_queue_is_busy);
-
-EXPORT_SYMBOL_GPL(videobuf_next_field);
-EXPORT_SYMBOL_GPL(videobuf_reqbufs);
-EXPORT_SYMBOL_GPL(videobuf_querybuf);
-EXPORT_SYMBOL_GPL(videobuf_qbuf);
-EXPORT_SYMBOL_GPL(videobuf_dqbuf);
-EXPORT_SYMBOL_GPL(videobuf_streamon);
-EXPORT_SYMBOL_GPL(videobuf_streamoff);
-
-EXPORT_SYMBOL_GPL(videobuf_read_start);
-EXPORT_SYMBOL_GPL(videobuf_read_stop);
-EXPORT_SYMBOL_GPL(videobuf_stop);
-EXPORT_SYMBOL_GPL(videobuf_read_stream);
-EXPORT_SYMBOL_GPL(videobuf_read_one);
-EXPORT_SYMBOL_GPL(videobuf_poll_stream);
-
-EXPORT_SYMBOL_GPL(__videobuf_mmap_setup);
-EXPORT_SYMBOL_GPL(videobuf_mmap_setup);
-EXPORT_SYMBOL_GPL(videobuf_mmap_free);
-EXPORT_SYMBOL_GPL(videobuf_mmap_mapper);
diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
index dce4f3aa4af..74730c624cf 100644
--- a/drivers/media/video/videobuf-dma-contig.c
+++ b/drivers/media/video/videobuf-dma-contig.c
@@ -55,14 +55,14 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
struct videobuf_queue *q = map->q;
int i;
- dev_dbg(map->q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
+ dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
map, map->count, vma->vm_start, vma->vm_end);
map->count--;
if (0 == map->count) {
struct videobuf_dma_contig_memory *mem;
- dev_dbg(map->q->dev, "munmap %p q=%p\n", map, q);
+ dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
mutex_lock(&q->vb_lock);
/* We need first to cancel streams, before unmapping */
@@ -89,7 +89,7 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
/* vfree is not atomic - can't be
called with IRQ's disabled
*/
- dev_dbg(map->q->dev, "buf[%d] freeing %p\n",
+ dev_dbg(q->dev, "buf[%d] freeing %p\n",
i, mem->vaddr);
dma_free_coherent(q->dev, mem->size,
@@ -190,7 +190,7 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
return ret;
}
-static void *__videobuf_alloc(size_t size)
+static struct videobuf_buffer *__videobuf_alloc(size_t size)
{
struct videobuf_dma_contig_memory *mem;
struct videobuf_buffer *vb;
@@ -204,7 +204,7 @@ static void *__videobuf_alloc(size_t size)
return vb;
}
-static void *__videobuf_to_vmalloc(struct videobuf_buffer *buf)
+static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
{
struct videobuf_dma_contig_memory *mem = buf->priv;
@@ -263,65 +263,34 @@ static int __videobuf_iolock(struct videobuf_queue *q,
return 0;
}
-static int __videobuf_mmap_free(struct videobuf_queue *q)
-{
- unsigned int i;
-
- dev_dbg(q->dev, "%s\n", __func__);
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (q->bufs[i] && q->bufs[i]->map)
- return -EBUSY;
- }
-
- return 0;
-}
-
static int __videobuf_mmap_mapper(struct videobuf_queue *q,
+ struct videobuf_buffer *buf,
struct vm_area_struct *vma)
{
struct videobuf_dma_contig_memory *mem;
struct videobuf_mapping *map;
- unsigned int first;
int retval;
- unsigned long size, offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long size;
dev_dbg(q->dev, "%s\n", __func__);
- if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED))
- return -EINVAL;
-
- /* look for first buffer to map */
- for (first = 0; first < VIDEO_MAX_FRAME; first++) {
- if (!q->bufs[first])
- continue;
-
- if (V4L2_MEMORY_MMAP != q->bufs[first]->memory)
- continue;
- if (q->bufs[first]->boff == offset)
- break;
- }
- if (VIDEO_MAX_FRAME == first) {
- dev_dbg(q->dev, "invalid user space offset [offset=0x%lx]\n",
- offset);
- return -EINVAL;
- }
/* create mapping + update buffer list */
map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
if (!map)
return -ENOMEM;
- q->bufs[first]->map = map;
+ buf->map = map;
map->start = vma->vm_start;
map->end = vma->vm_end;
map->q = q;
- q->bufs[first]->baddr = vma->vm_start;
+ buf->baddr = vma->vm_start;
- mem = q->bufs[first]->priv;
+ mem = buf->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
- mem->size = PAGE_ALIGN(q->bufs[first]->bsize);
+ mem->size = PAGE_ALIGN(buf->bsize);
mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
&mem->dma_handle, GFP_KERNEL);
if (!mem->vaddr) {
@@ -354,8 +323,8 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
map, q, vma->vm_start, vma->vm_end,
- (long int) q->bufs[first]->bsize,
- vma->vm_pgoff, first);
+ (long int)buf->bsize,
+ vma->vm_pgoff, buf->i);
videobuf_vm_open(vma);
@@ -366,69 +335,13 @@ error:
return -ENOMEM;
}
-static int __videobuf_copy_to_user(struct videobuf_queue *q,
- char __user *data, size_t count,
- int nonblocking)
-{
- struct videobuf_dma_contig_memory *mem = q->read_buf->priv;
- void *vaddr;
-
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
- BUG_ON(!mem->vaddr);
-
- /* copy to userspace */
- if (count > q->read_buf->size - q->read_off)
- count = q->read_buf->size - q->read_off;
-
- vaddr = mem->vaddr;
-
- if (copy_to_user(data, vaddr + q->read_off, count))
- return -EFAULT;
-
- return count;
-}
-
-static int __videobuf_copy_stream(struct videobuf_queue *q,
- char __user *data, size_t count, size_t pos,
- int vbihack, int nonblocking)
-{
- unsigned int *fc;
- struct videobuf_dma_contig_memory *mem = q->read_buf->priv;
-
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
-
- if (vbihack) {
- /* dirty, undocumented hack -- pass the frame counter
- * within the last four bytes of each vbi data block.
- * We need that one to maintain backward compatibility
- * to all vbi decoding software out there ... */
- fc = (unsigned int *)mem->vaddr;
- fc += (q->read_buf->size >> 2) - 1;
- *fc = q->read_buf->field_count >> 1;
- dev_dbg(q->dev, "vbihack: %d\n", *fc);
- }
-
- /* copy stuff using the common method */
- count = __videobuf_copy_to_user(q, data, count, nonblocking);
-
- if ((count == -EFAULT) && (pos == 0))
- return -EFAULT;
-
- return count;
-}
-
static struct videobuf_qtype_ops qops = {
.magic = MAGIC_QTYPE_OPS,
.alloc = __videobuf_alloc,
.iolock = __videobuf_iolock,
- .mmap_free = __videobuf_mmap_free,
.mmap_mapper = __videobuf_mmap_mapper,
- .video_copy_to_user = __videobuf_copy_to_user,
- .copy_stream = __videobuf_copy_stream,
- .vmalloc = __videobuf_to_vmalloc,
+ .vaddr = __videobuf_to_vaddr,
};
void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
index fcd045e7a1c..8359e6badd3 100644
--- a/drivers/media/video/videobuf-dma-sg.c
+++ b/drivers/media/video/videobuf-dma-sg.c
@@ -37,8 +37,12 @@
#define MAGIC_DMABUF 0x19721112
#define MAGIC_SG_MEM 0x17890714
-#define MAGIC_CHECK(is,should) if (unlikely((is) != (should))) \
- { printk(KERN_ERR "magic mismatch: %x (expected %x)\n",is,should); BUG(); }
+#define MAGIC_CHECK(is, should) \
+ if (unlikely((is) != (should))) { \
+ printk(KERN_ERR "magic mismatch: %x (expected %x)\n", \
+ is, should); \
+ BUG(); \
+ }
static int debug;
module_param(debug, int, 0644);
@@ -47,13 +51,13 @@ MODULE_DESCRIPTION("helper module to manage video4linux dma sg buffers");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
MODULE_LICENSE("GPL");
-#define dprintk(level, fmt, arg...) if (debug >= level) \
- printk(KERN_DEBUG "vbuf-sg: " fmt , ## arg)
+#define dprintk(level, fmt, arg...) \
+ if (debug >= level) \
+ printk(KERN_DEBUG "vbuf-sg: " fmt , ## arg)
/* --------------------------------------------------------------------- */
-struct scatterlist*
-videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages)
+struct scatterlist *videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages)
{
struct scatterlist *sglist;
struct page *pg;
@@ -73,13 +77,14 @@ videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages)
}
return sglist;
- err:
+err:
vfree(sglist);
return NULL;
}
+EXPORT_SYMBOL_GPL(videobuf_vmalloc_to_sg);
-struct scatterlist*
-videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset)
+struct scatterlist *videobuf_pages_to_sg(struct page **pages, int nr_pages,
+ int offset)
{
struct scatterlist *sglist;
int i;
@@ -104,20 +109,20 @@ videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset)
}
return sglist;
- nopage:
- dprintk(2,"sgl: oops - no page\n");
+nopage:
+ dprintk(2, "sgl: oops - no page\n");
vfree(sglist);
return NULL;
- highmem:
- dprintk(2,"sgl: oops - highmem page\n");
+highmem:
+ dprintk(2, "sgl: oops - highmem page\n");
vfree(sglist);
return NULL;
}
/* --------------------------------------------------------------------- */
-struct videobuf_dmabuf *videobuf_to_dma (struct videobuf_buffer *buf)
+struct videobuf_dmabuf *videobuf_to_dma(struct videobuf_buffer *buf)
{
struct videobuf_dma_sg_memory *mem = buf->priv;
BUG_ON(!mem);
@@ -126,17 +131,19 @@ struct videobuf_dmabuf *videobuf_to_dma (struct videobuf_buffer *buf)
return &mem->dma;
}
+EXPORT_SYMBOL_GPL(videobuf_to_dma);
void videobuf_dma_init(struct videobuf_dmabuf *dma)
{
- memset(dma,0,sizeof(*dma));
+ memset(dma, 0, sizeof(*dma));
dma->magic = MAGIC_DMABUF;
}
+EXPORT_SYMBOL_GPL(videobuf_dma_init);
static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
int direction, unsigned long data, unsigned long size)
{
- unsigned long first,last;
+ unsigned long first, last;
int err, rw = 0;
dma->direction = direction;
@@ -155,21 +162,21 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
last = ((data+size-1) & PAGE_MASK) >> PAGE_SHIFT;
dma->offset = data & ~PAGE_MASK;
dma->nr_pages = last-first+1;
- dma->pages = kmalloc(dma->nr_pages * sizeof(struct page*),
- GFP_KERNEL);
+ dma->pages = kmalloc(dma->nr_pages * sizeof(struct page *), GFP_KERNEL);
if (NULL == dma->pages)
return -ENOMEM;
- dprintk(1,"init user [0x%lx+0x%lx => %d pages]\n",
- data,size,dma->nr_pages);
- err = get_user_pages(current,current->mm,
+ dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
+ data, size, dma->nr_pages);
+
+ err = get_user_pages(current, current->mm,
data & PAGE_MASK, dma->nr_pages,
rw == READ, 1, /* force */
dma->pages, NULL);
if (err != dma->nr_pages) {
dma->nr_pages = (err >= 0) ? err : 0;
- dprintk(1,"get_user_pages: err=%d [%d]\n",err,dma->nr_pages);
+ dprintk(1, "get_user_pages: err=%d [%d]\n", err, dma->nr_pages);
return err < 0 ? err : -EINVAL;
}
return 0;
@@ -179,48 +186,58 @@ int videobuf_dma_init_user(struct videobuf_dmabuf *dma, int direction,
unsigned long data, unsigned long size)
{
int ret;
+
down_read(&current->mm->mmap_sem);
ret = videobuf_dma_init_user_locked(dma, direction, data, size);
up_read(&current->mm->mmap_sem);
return ret;
}
+EXPORT_SYMBOL_GPL(videobuf_dma_init_user);
int videobuf_dma_init_kernel(struct videobuf_dmabuf *dma, int direction,
int nr_pages)
{
- dprintk(1,"init kernel [%d pages]\n",nr_pages);
+ dprintk(1, "init kernel [%d pages]\n", nr_pages);
+
dma->direction = direction;
dma->vmalloc = vmalloc_32(nr_pages << PAGE_SHIFT);
if (NULL == dma->vmalloc) {
- dprintk(1,"vmalloc_32(%d pages) failed\n",nr_pages);
+ dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages);
return -ENOMEM;
}
- dprintk(1,"vmalloc is at addr 0x%08lx, size=%d\n",
+
+ dprintk(1, "vmalloc is at addr 0x%08lx, size=%d\n",
(unsigned long)dma->vmalloc,
nr_pages << PAGE_SHIFT);
- memset(dma->vmalloc,0,nr_pages << PAGE_SHIFT);
+
+ memset(dma->vmalloc, 0, nr_pages << PAGE_SHIFT);
dma->nr_pages = nr_pages;
+
return 0;
}
+EXPORT_SYMBOL_GPL(videobuf_dma_init_kernel);
int videobuf_dma_init_overlay(struct videobuf_dmabuf *dma, int direction,
dma_addr_t addr, int nr_pages)
{
- dprintk(1,"init overlay [%d pages @ bus 0x%lx]\n",
- nr_pages,(unsigned long)addr);
+ dprintk(1, "init overlay [%d pages @ bus 0x%lx]\n",
+ nr_pages, (unsigned long)addr);
dma->direction = direction;
+
if (0 == addr)
return -EINVAL;
dma->bus_addr = addr;
dma->nr_pages = nr_pages;
+
return 0;
}
+EXPORT_SYMBOL_GPL(videobuf_dma_init_overlay);
-int videobuf_dma_map(struct videobuf_queue* q, struct videobuf_dmabuf *dma)
+int videobuf_dma_map(struct videobuf_queue *q, struct videobuf_dmabuf *dma)
{
- MAGIC_CHECK(dma->magic,MAGIC_DMABUF);
+ MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
BUG_ON(0 == dma->nr_pages);
if (dma->pages) {
@@ -228,20 +245,21 @@ int videobuf_dma_map(struct videobuf_queue* q, struct videobuf_dmabuf *dma)
dma->offset);
}
if (dma->vmalloc) {
- dma->sglist = videobuf_vmalloc_to_sg
- (dma->vmalloc,dma->nr_pages);
+ dma->sglist = videobuf_vmalloc_to_sg(dma->vmalloc,
+ dma->nr_pages);
}
if (dma->bus_addr) {
dma->sglist = vmalloc(sizeof(*dma->sglist));
if (NULL != dma->sglist) {
- dma->sglen = 1;
- sg_dma_address(&dma->sglist[0]) = dma->bus_addr & PAGE_MASK;
- dma->sglist[0].offset = dma->bus_addr & ~PAGE_MASK;
- sg_dma_len(&dma->sglist[0]) = dma->nr_pages * PAGE_SIZE;
+ dma->sglen = 1;
+ sg_dma_address(&dma->sglist[0]) = dma->bus_addr
+ & PAGE_MASK;
+ dma->sglist[0].offset = dma->bus_addr & ~PAGE_MASK;
+ sg_dma_len(&dma->sglist[0]) = dma->nr_pages * PAGE_SIZE;
}
}
if (NULL == dma->sglist) {
- dprintk(1,"scatterlist is NULL\n");
+ dprintk(1, "scatterlist is NULL\n");
return -ENOMEM;
}
if (!dma->bus_addr) {
@@ -249,47 +267,43 @@ int videobuf_dma_map(struct videobuf_queue* q, struct videobuf_dmabuf *dma)
dma->nr_pages, dma->direction);
if (0 == dma->sglen) {
printk(KERN_WARNING
- "%s: videobuf_map_sg failed\n",__func__);
+ "%s: videobuf_map_sg failed\n", __func__);
vfree(dma->sglist);
dma->sglist = NULL;
dma->sglen = 0;
return -ENOMEM;
}
}
- return 0;
-}
-
-int videobuf_dma_sync(struct videobuf_queue *q, struct videobuf_dmabuf *dma)
-{
- MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
- BUG_ON(!dma->sglen);
- dma_sync_sg_for_cpu(q->dev, dma->sglist, dma->nr_pages, dma->direction);
return 0;
}
+EXPORT_SYMBOL_GPL(videobuf_dma_map);
-int videobuf_dma_unmap(struct videobuf_queue* q,struct videobuf_dmabuf *dma)
+int videobuf_dma_unmap(struct videobuf_queue *q, struct videobuf_dmabuf *dma)
{
MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
+
if (!dma->sglen)
return 0;
- dma_unmap_sg(q->dev, dma->sglist, dma->nr_pages, dma->direction);
+ dma_unmap_sg(q->dev, dma->sglist, dma->sglen, dma->direction);
vfree(dma->sglist);
dma->sglist = NULL;
dma->sglen = 0;
+
return 0;
}
+EXPORT_SYMBOL_GPL(videobuf_dma_unmap);
int videobuf_dma_free(struct videobuf_dmabuf *dma)
{
- MAGIC_CHECK(dma->magic,MAGIC_DMABUF);
+ int i;
+ MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
BUG_ON(dma->sglen);
if (dma->pages) {
- int i;
- for (i=0; i < dma->nr_pages; i++)
+ for (i = 0; i < dma->nr_pages; i++)
page_cache_release(dma->pages[i]);
kfree(dma->pages);
dma->pages = NULL;
@@ -298,12 +312,13 @@ int videobuf_dma_free(struct videobuf_dmabuf *dma)
vfree(dma->vmalloc);
dma->vmalloc = NULL;
- if (dma->bus_addr) {
+ if (dma->bus_addr)
dma->bus_addr = 0;
- }
dma->direction = DMA_NONE;
+
return 0;
}
+EXPORT_SYMBOL_GPL(videobuf_dma_free);
/* --------------------------------------------------------------------- */
@@ -315,6 +330,7 @@ int videobuf_sg_dma_map(struct device *dev, struct videobuf_dmabuf *dma)
return videobuf_dma_map(&q, dma);
}
+EXPORT_SYMBOL_GPL(videobuf_sg_dma_map);
int videobuf_sg_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma)
{
@@ -324,49 +340,48 @@ int videobuf_sg_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma)
return videobuf_dma_unmap(&q, dma);
}
+EXPORT_SYMBOL_GPL(videobuf_sg_dma_unmap);
/* --------------------------------------------------------------------- */
-static void
-videobuf_vm_open(struct vm_area_struct *vma)
+static void videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
- dprintk(2,"vm_open %p [count=%d,vma=%08lx-%08lx]\n",map,
- map->count,vma->vm_start,vma->vm_end);
+ dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map,
+ map->count, vma->vm_start, vma->vm_end);
+
map->count++;
}
-static void
-videobuf_vm_close(struct vm_area_struct *vma)
+static void videobuf_vm_close(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
struct videobuf_queue *q = map->q;
struct videobuf_dma_sg_memory *mem;
int i;
- dprintk(2,"vm_close %p [count=%d,vma=%08lx-%08lx]\n",map,
- map->count,vma->vm_start,vma->vm_end);
+ dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map,
+ map->count, vma->vm_start, vma->vm_end);
map->count--;
if (0 == map->count) {
- dprintk(1,"munmap %p q=%p\n",map,q);
+ dprintk(1, "munmap %p q=%p\n", map, q);
mutex_lock(&q->vb_lock);
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
if (NULL == q->bufs[i])
continue;
- mem=q->bufs[i]->priv;
-
+ mem = q->bufs[i]->priv;
if (!mem)
continue;
- MAGIC_CHECK(mem->magic,MAGIC_SG_MEM);
+ MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
if (q->bufs[i]->map != map)
continue;
q->bufs[i]->map = NULL;
q->bufs[i]->baddr = 0;
- q->ops->buf_release(q,q->bufs[i]);
+ q->ops->buf_release(q, q->bufs[i]);
}
mutex_unlock(&q->vb_lock);
kfree(map);
@@ -380,26 +395,27 @@ videobuf_vm_close(struct vm_area_struct *vma)
* now ...). Bounce buffers don't work very well for the data rates
* video capture has.
*/
-static int
-videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page;
- dprintk(3,"fault: fault @ %08lx [vma %08lx-%08lx]\n",
- (unsigned long)vmf->virtual_address,vma->vm_start,vma->vm_end);
+ dprintk(3, "fault: fault @ %08lx [vma %08lx-%08lx]\n",
+ (unsigned long)vmf->virtual_address,
+ vma->vm_start, vma->vm_end);
+
page = alloc_page(GFP_USER | __GFP_DMA32);
if (!page)
return VM_FAULT_OOM;
clear_user_highpage(page, (unsigned long)vmf->virtual_address);
vmf->page = page;
+
return 0;
}
-static const struct vm_operations_struct videobuf_vm_ops =
-{
- .open = videobuf_vm_open,
- .close = videobuf_vm_close,
- .fault = videobuf_vm_fault,
+static const struct vm_operations_struct videobuf_vm_ops = {
+ .open = videobuf_vm_open,
+ .close = videobuf_vm_close,
+ .fault = videobuf_vm_fault,
};
/* ---------------------------------------------------------------------
@@ -412,28 +428,28 @@ static const struct vm_operations_struct videobuf_vm_ops =
struct videobuf_dma_sg_memory
*/
-static void *__videobuf_alloc(size_t size)
+static struct videobuf_buffer *__videobuf_alloc(size_t size)
{
struct videobuf_dma_sg_memory *mem;
struct videobuf_buffer *vb;
- vb = kzalloc(size+sizeof(*mem),GFP_KERNEL);
+ vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
if (!vb)
return vb;
- mem = vb->priv = ((char *)vb)+size;
- mem->magic=MAGIC_SG_MEM;
+ mem = vb->priv = ((char *)vb) + size;
+ mem->magic = MAGIC_SG_MEM;
videobuf_dma_init(&mem->dma);
- dprintk(1,"%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
- __func__,vb,(long)sizeof(*vb),(long)size-sizeof(*vb),
- mem,(long)sizeof(*mem));
+ dprintk(1, "%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
+ __func__, vb, (long)sizeof(*vb), (long)size - sizeof(*vb),
+ mem, (long)sizeof(*mem));
return vb;
}
-static void *__videobuf_to_vmalloc (struct videobuf_buffer *buf)
+static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
{
struct videobuf_dma_sg_memory *mem = buf->priv;
BUG_ON(!mem);
@@ -443,11 +459,11 @@ static void *__videobuf_to_vmalloc (struct videobuf_buffer *buf)
return mem->dma.vmalloc;
}
-static int __videobuf_iolock (struct videobuf_queue* q,
- struct videobuf_buffer *vb,
- struct v4l2_framebuffer *fbuf)
+static int __videobuf_iolock(struct videobuf_queue *q,
+ struct videobuf_buffer *vb,
+ struct v4l2_framebuffer *fbuf)
{
- int err,pages;
+ int err, pages;
dma_addr_t bus;
struct videobuf_dma_sg_memory *mem = vb->priv;
BUG_ON(!mem);
@@ -460,16 +476,16 @@ static int __videobuf_iolock (struct videobuf_queue* q,
if (0 == vb->baddr) {
/* no userspace addr -- kernel bounce buffer */
pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT;
- err = videobuf_dma_init_kernel( &mem->dma,
- DMA_FROM_DEVICE,
- pages );
+ err = videobuf_dma_init_kernel(&mem->dma,
+ DMA_FROM_DEVICE,
+ pages);
if (0 != err)
return err;
} else if (vb->memory == V4L2_MEMORY_USERPTR) {
/* dma directly to userspace */
- err = videobuf_dma_init_user( &mem->dma,
- DMA_FROM_DEVICE,
- vb->baddr,vb->bsize );
+ err = videobuf_dma_init_user(&mem->dma,
+ DMA_FROM_DEVICE,
+ vb->baddr, vb->bsize);
if (0 != err)
return err;
} else {
@@ -515,43 +531,27 @@ static int __videobuf_sync(struct videobuf_queue *q,
struct videobuf_buffer *buf)
{
struct videobuf_dma_sg_memory *mem = buf->priv;
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic,MAGIC_SG_MEM);
+ BUG_ON(!mem || !mem->dma.sglen);
- return videobuf_dma_sync(q,&mem->dma);
-}
-
-static int __videobuf_mmap_free(struct videobuf_queue *q)
-{
- int i;
+ MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
+ MAGIC_CHECK(mem->dma.magic, MAGIC_DMABUF);
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (q->bufs[i]) {
- if (q->bufs[i]->map)
- return -EBUSY;
- }
- }
+ dma_sync_sg_for_cpu(q->dev, mem->dma.sglist,
+ mem->dma.sglen, mem->dma.direction);
return 0;
}
static int __videobuf_mmap_mapper(struct videobuf_queue *q,
- struct vm_area_struct *vma)
+ struct videobuf_buffer *buf,
+ struct vm_area_struct *vma)
{
- struct videobuf_dma_sg_memory *mem;
+ struct videobuf_dma_sg_memory *mem = buf->priv;
struct videobuf_mapping *map;
- unsigned int first,last,size,i;
+ unsigned int first, last, size = 0, i;
int retval;
retval = -EINVAL;
- if (!(vma->vm_flags & VM_WRITE)) {
- dprintk(1,"mmap app bug: PROT_WRITE please\n");
- goto done;
- }
- if (!(vma->vm_flags & VM_SHARED)) {
- dprintk(1,"mmap app bug: MAP_SHARED please\n");
- goto done;
- }
/* This function maintains backwards compatibility with V4L1 and will
* map more than one buffer if the vma length is equal to the combined
@@ -561,48 +561,52 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
* TODO: Allow drivers to specify if they support this mode
*/
+ BUG_ON(!mem);
+ MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
+
/* look for first buffer to map */
for (first = 0; first < VIDEO_MAX_FRAME; first++) {
- if (NULL == q->bufs[first])
- continue;
- mem=q->bufs[first]->priv;
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic,MAGIC_SG_MEM);
-
- if (V4L2_MEMORY_MMAP != q->bufs[first]->memory)
- continue;
- if (q->bufs[first]->boff == (vma->vm_pgoff << PAGE_SHIFT))
+ if (buf == q->bufs[first]) {
+ size = PAGE_ALIGN(q->bufs[first]->bsize);
break;
+ }
}
- if (VIDEO_MAX_FRAME == first) {
- dprintk(1,"mmap app bug: offset invalid [offset=0x%lx]\n",
- (vma->vm_pgoff << PAGE_SHIFT));
+
+ /* paranoia, should never happen since buf is always valid. */
+ if (!size) {
+ dprintk(1, "mmap app bug: offset invalid [offset=0x%lx]\n",
+ (vma->vm_pgoff << PAGE_SHIFT));
goto done;
}
- /* look for last buffer to map */
- for (size = 0, last = first; last < VIDEO_MAX_FRAME; last++) {
- if (NULL == q->bufs[last])
- continue;
- if (V4L2_MEMORY_MMAP != q->bufs[last]->memory)
- continue;
- if (q->bufs[last]->map) {
- retval = -EBUSY;
+ last = first;
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
+ if (size != (vma->vm_end - vma->vm_start)) {
+ /* look for last buffer to map */
+ for (last = first + 1; last < VIDEO_MAX_FRAME; last++) {
+ if (NULL == q->bufs[last])
+ continue;
+ if (V4L2_MEMORY_MMAP != q->bufs[last]->memory)
+ continue;
+ if (q->bufs[last]->map) {
+ retval = -EBUSY;
+ goto done;
+ }
+ size += PAGE_ALIGN(q->bufs[last]->bsize);
+ if (size == (vma->vm_end - vma->vm_start))
+ break;
+ }
+ if (VIDEO_MAX_FRAME == last) {
+ dprintk(1, "mmap app bug: size invalid [size=0x%lx]\n",
+ (vma->vm_end - vma->vm_start));
goto done;
}
- size += PAGE_ALIGN(q->bufs[last]->bsize);
- if (size == (vma->vm_end - vma->vm_start))
- break;
- }
- if (VIDEO_MAX_FRAME == last) {
- dprintk(1,"mmap app bug: size invalid [size=0x%lx]\n",
- (vma->vm_end - vma->vm_start));
- goto done;
}
+#endif
/* create mapping + update buffer list */
retval = -ENOMEM;
- map = kmalloc(sizeof(struct videobuf_mapping),GFP_KERNEL);
+ map = kmalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
if (NULL == map)
goto done;
@@ -623,72 +627,22 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */
vma->vm_private_data = map;
- dprintk(1,"mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n",
- map,q,vma->vm_start,vma->vm_end,vma->vm_pgoff,first,last);
+ dprintk(1, "mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n",
+ map, q, vma->vm_start, vma->vm_end, vma->vm_pgoff, first, last);
retval = 0;
- done:
+done:
return retval;
}
-static int __videobuf_copy_to_user ( struct videobuf_queue *q,
- char __user *data, size_t count,
- int nonblocking )
-{
- struct videobuf_dma_sg_memory *mem = q->read_buf->priv;
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic,MAGIC_SG_MEM);
-
- /* copy to userspace */
- if (count > q->read_buf->size - q->read_off)
- count = q->read_buf->size - q->read_off;
-
- if (copy_to_user(data, mem->dma.vmalloc+q->read_off, count))
- return -EFAULT;
-
- return count;
-}
-
-static int __videobuf_copy_stream ( struct videobuf_queue *q,
- char __user *data, size_t count, size_t pos,
- int vbihack, int nonblocking )
-{
- unsigned int *fc;
- struct videobuf_dma_sg_memory *mem = q->read_buf->priv;
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic,MAGIC_SG_MEM);
-
- if (vbihack) {
- /* dirty, undocumented hack -- pass the frame counter
- * within the last four bytes of each vbi data block.
- * We need that one to maintain backward compatibility
- * to all vbi decoding software out there ... */
- fc = (unsigned int*)mem->dma.vmalloc;
- fc += (q->read_buf->size>>2) -1;
- *fc = q->read_buf->field_count >> 1;
- dprintk(1,"vbihack: %d\n",*fc);
- }
-
- /* copy stuff using the common method */
- count = __videobuf_copy_to_user (q,data,count,nonblocking);
-
- if ( (count==-EFAULT) && (0 == pos) )
- return -EFAULT;
-
- return count;
-}
-
static struct videobuf_qtype_ops sg_ops = {
.magic = MAGIC_QTYPE_OPS,
.alloc = __videobuf_alloc,
.iolock = __videobuf_iolock,
.sync = __videobuf_sync,
- .mmap_free = __videobuf_mmap_free,
.mmap_mapper = __videobuf_mmap_mapper,
- .video_copy_to_user = __videobuf_copy_to_user,
- .copy_stream = __videobuf_copy_stream,
- .vmalloc = __videobuf_to_vmalloc,
+ .vaddr = __videobuf_to_vaddr,
};
void *videobuf_sg_alloc(size_t size)
@@ -702,8 +656,9 @@ void *videobuf_sg_alloc(size_t size)
return videobuf_alloc(&q);
}
+EXPORT_SYMBOL_GPL(videobuf_sg_alloc);
-void videobuf_queue_sg_init(struct videobuf_queue* q,
+void videobuf_queue_sg_init(struct videobuf_queue *q,
const struct videobuf_queue_ops *ops,
struct device *dev,
spinlock_t *irqlock,
@@ -715,29 +670,5 @@ void videobuf_queue_sg_init(struct videobuf_queue* q,
videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
priv, &sg_ops);
}
-
-/* --------------------------------------------------------------------- */
-
-EXPORT_SYMBOL_GPL(videobuf_vmalloc_to_sg);
-
-EXPORT_SYMBOL_GPL(videobuf_to_dma);
-EXPORT_SYMBOL_GPL(videobuf_dma_init);
-EXPORT_SYMBOL_GPL(videobuf_dma_init_user);
-EXPORT_SYMBOL_GPL(videobuf_dma_init_kernel);
-EXPORT_SYMBOL_GPL(videobuf_dma_init_overlay);
-EXPORT_SYMBOL_GPL(videobuf_dma_map);
-EXPORT_SYMBOL_GPL(videobuf_dma_sync);
-EXPORT_SYMBOL_GPL(videobuf_dma_unmap);
-EXPORT_SYMBOL_GPL(videobuf_dma_free);
-
-EXPORT_SYMBOL_GPL(videobuf_sg_dma_map);
-EXPORT_SYMBOL_GPL(videobuf_sg_dma_unmap);
-EXPORT_SYMBOL_GPL(videobuf_sg_alloc);
-
EXPORT_SYMBOL_GPL(videobuf_queue_sg_init);
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/video/videobuf-dvb.c b/drivers/media/video/videobuf-dvb.c
index 0afb62e63d9..3f76398968b 100644
--- a/drivers/media/video/videobuf-dvb.c
+++ b/drivers/media/video/videobuf-dvb.c
@@ -67,7 +67,7 @@ static int videobuf_dvb_thread(void *data)
try_to_freeze();
/* feed buffer data to demux */
- outp = videobuf_queue_to_vmalloc (&dvb->dvbq, buf);
+ outp = videobuf_queue_to_vaddr(&dvb->dvbq, buf);
if (buf->state == VIDEOBUF_DONE)
dvb_dmx_swfilter(&dvb->demux, outp,
diff --git a/drivers/media/video/videobuf-vmalloc.c b/drivers/media/video/videobuf-vmalloc.c
index 136e09383c0..583728f4c22 100644
--- a/drivers/media/video/videobuf-vmalloc.c
+++ b/drivers/media/video/videobuf-vmalloc.c
@@ -30,8 +30,12 @@
#define MAGIC_DMABUF 0x17760309
#define MAGIC_VMAL_MEM 0x18221223
-#define MAGIC_CHECK(is,should) if (unlikely((is) != (should))) \
- { printk(KERN_ERR "magic mismatch: %x (expected %x)\n",is,should); BUG(); }
+#define MAGIC_CHECK(is, should) \
+ if (unlikely((is) != (should))) { \
+ printk(KERN_ERR "magic mismatch: %x (expected %x)\n", \
+ is, should); \
+ BUG(); \
+ }
static int debug;
module_param(debug, int, 0644);
@@ -40,19 +44,19 @@ MODULE_DESCRIPTION("helper module to manage video4linux vmalloc buffers");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
MODULE_LICENSE("GPL");
-#define dprintk(level, fmt, arg...) if (debug >= level) \
- printk(KERN_DEBUG "vbuf-vmalloc: " fmt , ## arg)
+#define dprintk(level, fmt, arg...) \
+ if (debug >= level) \
+ printk(KERN_DEBUG "vbuf-vmalloc: " fmt , ## arg)
/***************************************************************************/
-static void
-videobuf_vm_open(struct vm_area_struct *vma)
+static void videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
- dprintk(2,"vm_open %p [count=%u,vma=%08lx-%08lx]\n",map,
- map->count,vma->vm_start,vma->vm_end);
+ dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map,
+ map->count, vma->vm_start, vma->vm_end);
map->count++;
}
@@ -63,7 +67,7 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
struct videobuf_queue *q = map->q;
int i;
- dprintk(2,"vm_close %p [count=%u,vma=%08lx-%08lx]\n", map,
+ dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
map->count--;
@@ -116,8 +120,7 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
return;
}
-static const struct vm_operations_struct videobuf_vm_ops =
-{
+static const struct vm_operations_struct videobuf_vm_ops = {
.open = videobuf_vm_open,
.close = videobuf_vm_close,
};
@@ -132,28 +135,28 @@ static const struct vm_operations_struct videobuf_vm_ops =
struct videobuf_dma_sg_memory
*/
-static void *__videobuf_alloc(size_t size)
+static struct videobuf_buffer *__videobuf_alloc(size_t size)
{
struct videobuf_vmalloc_memory *mem;
struct videobuf_buffer *vb;
- vb = kzalloc(size+sizeof(*mem),GFP_KERNEL);
+ vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
if (!vb)
return vb;
- mem = vb->priv = ((char *)vb)+size;
- mem->magic=MAGIC_VMAL_MEM;
+ mem = vb->priv = ((char *)vb) + size;
+ mem->magic = MAGIC_VMAL_MEM;
- dprintk(1,"%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
- __func__,vb,(long)sizeof(*vb),(long)size-sizeof(*vb),
- mem,(long)sizeof(*mem));
+ dprintk(1, "%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
+ __func__, vb, (long)sizeof(*vb), (long)size - sizeof(*vb),
+ mem, (long)sizeof(*mem));
return vb;
}
-static int __videobuf_iolock (struct videobuf_queue* q,
- struct videobuf_buffer *vb,
- struct v4l2_framebuffer *fbuf)
+static int __videobuf_iolock(struct videobuf_queue *q,
+ struct videobuf_buffer *vb,
+ struct v4l2_framebuffer *fbuf)
{
struct videobuf_vmalloc_memory *mem = vb->priv;
int pages;
@@ -177,15 +180,13 @@ static int __videobuf_iolock (struct videobuf_queue* q,
dprintk(1, "%s memory method USERPTR\n", __func__);
-#if 1
if (vb->baddr) {
printk(KERN_ERR "USERPTR is currently not supported\n");
return -EINVAL;
}
-#endif
/* The only USERPTR currently supported is the one needed for
- read() method.
+ * read() method.
*/
mem->vmalloc = vmalloc_user(pages);
@@ -210,7 +211,7 @@ static int __videobuf_iolock (struct videobuf_queue* q,
/* Try to remap memory */
rc = remap_vmalloc_range(mem->vma, (void *)vb->baddr, 0);
if (rc < 0) {
- printk(KERN_ERR "mmap: remap failed with error %d. ", rc);
+ printk(KERN_ERR "mmap: remap failed with error %d", rc);
return -ENOMEM;
}
#endif
@@ -228,69 +229,29 @@ static int __videobuf_iolock (struct videobuf_queue* q,
return 0;
}
-static int __videobuf_sync(struct videobuf_queue *q,
- struct videobuf_buffer *buf)
-{
- return 0;
-}
-
-static int __videobuf_mmap_free(struct videobuf_queue *q)
-{
- unsigned int i;
-
- dprintk(1, "%s\n", __func__);
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (q->bufs[i]) {
- if (q->bufs[i]->map)
- return -EBUSY;
- }
- }
-
- return 0;
-}
-
static int __videobuf_mmap_mapper(struct videobuf_queue *q,
- struct vm_area_struct *vma)
+ struct videobuf_buffer *buf,
+ struct vm_area_struct *vma)
{
struct videobuf_vmalloc_memory *mem;
struct videobuf_mapping *map;
- unsigned int first;
int retval, pages;
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
dprintk(1, "%s\n", __func__);
- if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED))
- return -EINVAL;
-
- /* look for first buffer to map */
- for (first = 0; first < VIDEO_MAX_FRAME; first++) {
- if (NULL == q->bufs[first])
- continue;
-
- if (V4L2_MEMORY_MMAP != q->bufs[first]->memory)
- continue;
- if (q->bufs[first]->boff == offset)
- break;
- }
- if (VIDEO_MAX_FRAME == first) {
- dprintk(1,"mmap app bug: offset invalid [offset=0x%lx]\n",
- (vma->vm_pgoff << PAGE_SHIFT));
- return -EINVAL;
- }
/* create mapping + update buffer list */
map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
if (NULL == map)
return -ENOMEM;
- q->bufs[first]->map = map;
+ buf->map = map;
map->start = vma->vm_start;
map->end = vma->vm_end;
map->q = q;
- q->bufs[first]->baddr = vma->vm_start;
+ buf->baddr = vma->vm_start;
- mem = q->bufs[first]->priv;
+ mem = buf->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
@@ -300,8 +261,7 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
printk(KERN_ERR "vmalloc (%d pages) failed\n", pages);
goto error;
}
- dprintk(1, "vmalloc is at addr %p (%d pages)\n",
- mem->vmalloc, pages);
+ dprintk(1, "vmalloc is at addr %p (%d pages)\n", mem->vmalloc, pages);
/* Try to remap memory */
retval = remap_vmalloc_range(vma, mem->vmalloc, 0);
@@ -315,10 +275,10 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
vma->vm_private_data = map;
- dprintk(1,"mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
+ dprintk(1, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
map, q, vma->vm_start, vma->vm_end,
- (long int) q->bufs[first]->bsize,
- vma->vm_pgoff, first);
+ (long int)buf->bsize,
+ vma->vm_pgoff, buf->i);
videobuf_vm_open(vma);
@@ -330,69 +290,16 @@ error:
return -ENOMEM;
}
-static int __videobuf_copy_to_user ( struct videobuf_queue *q,
- char __user *data, size_t count,
- int nonblocking )
-{
- struct videobuf_vmalloc_memory *mem=q->read_buf->priv;
- BUG_ON (!mem);
- MAGIC_CHECK(mem->magic,MAGIC_VMAL_MEM);
-
- BUG_ON (!mem->vmalloc);
-
- /* copy to userspace */
- if (count > q->read_buf->size - q->read_off)
- count = q->read_buf->size - q->read_off;
-
- if (copy_to_user(data, mem->vmalloc+q->read_off, count))
- return -EFAULT;
-
- return count;
-}
-
-static int __videobuf_copy_stream ( struct videobuf_queue *q,
- char __user *data, size_t count, size_t pos,
- int vbihack, int nonblocking )
-{
- unsigned int *fc;
- struct videobuf_vmalloc_memory *mem=q->read_buf->priv;
- BUG_ON (!mem);
- MAGIC_CHECK(mem->magic,MAGIC_VMAL_MEM);
-
- if (vbihack) {
- /* dirty, undocumented hack -- pass the frame counter
- * within the last four bytes of each vbi data block.
- * We need that one to maintain backward compatibility
- * to all vbi decoding software out there ... */
- fc = (unsigned int*)mem->vmalloc;
- fc += (q->read_buf->size>>2) -1;
- *fc = q->read_buf->field_count >> 1;
- dprintk(1,"vbihack: %d\n",*fc);
- }
-
- /* copy stuff using the common method */
- count = __videobuf_copy_to_user (q,data,count,nonblocking);
-
- if ( (count==-EFAULT) && (0 == pos) )
- return -EFAULT;
-
- return count;
-}
-
static struct videobuf_qtype_ops qops = {
.magic = MAGIC_QTYPE_OPS,
.alloc = __videobuf_alloc,
.iolock = __videobuf_iolock,
- .sync = __videobuf_sync,
- .mmap_free = __videobuf_mmap_free,
.mmap_mapper = __videobuf_mmap_mapper,
- .video_copy_to_user = __videobuf_copy_to_user,
- .copy_stream = __videobuf_copy_stream,
- .vmalloc = videobuf_to_vmalloc,
+ .vaddr = videobuf_to_vmalloc,
};
-void videobuf_queue_vmalloc_init(struct videobuf_queue* q,
+void videobuf_queue_vmalloc_init(struct videobuf_queue *q,
const struct videobuf_queue_ops *ops,
struct device *dev,
spinlock_t *irqlock,
@@ -404,20 +311,19 @@ void videobuf_queue_vmalloc_init(struct videobuf_queue* q,
videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
priv, &qops);
}
-
EXPORT_SYMBOL_GPL(videobuf_queue_vmalloc_init);
-void *videobuf_to_vmalloc (struct videobuf_buffer *buf)
+void *videobuf_to_vmalloc(struct videobuf_buffer *buf)
{
- struct videobuf_vmalloc_memory *mem=buf->priv;
- BUG_ON (!mem);
- MAGIC_CHECK(mem->magic,MAGIC_VMAL_MEM);
+ struct videobuf_vmalloc_memory *mem = buf->priv;
+ BUG_ON(!mem);
+ MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
return mem->vmalloc;
}
EXPORT_SYMBOL_GPL(videobuf_to_vmalloc);
-void videobuf_vmalloc_free (struct videobuf_buffer *buf)
+void videobuf_vmalloc_free(struct videobuf_buffer *buf)
{
struct videobuf_vmalloc_memory *mem = buf->priv;
@@ -442,8 +348,3 @@ void videobuf_vmalloc_free (struct videobuf_buffer *buf)
}
EXPORT_SYMBOL_GPL(videobuf_vmalloc_free);
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index cdbe70385c1..e17b6fee046 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -13,29 +13,23 @@
* License, or (at your option) any later version
*/
#include <linux/module.h>
-#include <linux/delay.h>
#include <linux/errno.h>
-#include <linux/fs.h>
#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/sched.h>
-#include <linux/pci.h>
-#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/font.h>
#include <linux/version.h>
#include <linux/mutex.h>
#include <linux/videodev2.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
#include <linux/kthread.h>
-#include <linux/highmem.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
#include <linux/freezer.h>
+#endif
#include <media/videobuf-vmalloc.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
-#include "font.h"
+#include <media/v4l2-common.h>
#define VIVI_MODULE_NAME "vivi"
@@ -44,8 +38,11 @@
#define WAKE_DENOMINATOR 1001
#define BUFFER_TIMEOUT msecs_to_jiffies(500) /* 0.5 seconds */
+#define MAX_WIDTH 1920
+#define MAX_HEIGHT 1200
+
#define VIVI_MAJOR_VERSION 0
-#define VIVI_MINOR_VERSION 6
+#define VIVI_MINOR_VERSION 7
#define VIVI_RELEASE 0
#define VIVI_VERSION \
KERNEL_VERSION(VIVI_MAJOR_VERSION, VIVI_MINOR_VERSION, VIVI_RELEASE)
@@ -70,56 +67,8 @@ static unsigned int vid_limit = 16;
module_param(vid_limit, uint, 0644);
MODULE_PARM_DESC(vid_limit, "capture memory limit in megabytes");
-
-/* supported controls */
-static struct v4l2_queryctrl vivi_qctrl[] = {
- {
- .id = V4L2_CID_AUDIO_VOLUME,
- .name = "Volume",
- .minimum = 0,
- .maximum = 65535,
- .step = 65535/100,
- .default_value = 65535,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- .type = V4L2_CTRL_TYPE_INTEGER,
- }, {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 127,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- }, {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 255,
- .step = 0x1,
- .default_value = 0x10,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- }, {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 255,
- .step = 0x1,
- .default_value = 127,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- }, {
- .id = V4L2_CID_HUE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Hue",
- .minimum = -128,
- .maximum = 127,
- .step = 0x1,
- .default_value = 0,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- }
-};
+/* Global font descriptor */
+static const u8 *font8x16;
#define dprintk(dev, level, fmt, arg...) \
v4l2_dbg(level, debug, &dev->v4l2_dev, fmt, ## arg)
@@ -214,41 +163,38 @@ struct vivi_dev {
struct list_head vivi_devlist;
struct v4l2_device v4l2_dev;
+ /* controls */
+ int brightness;
+ int contrast;
+ int saturation;
+ int hue;
+ int volume;
+
spinlock_t slock;
struct mutex mutex;
- int users;
-
/* various device info */
struct video_device *vfd;
struct vivi_dmaqueue vidq;
/* Several counters */
- int h, m, s, ms;
+ unsigned ms;
unsigned long jiffies;
- char timestr[13];
int mv_count; /* Controls bars movement */
/* Input Number */
int input;
- /* Control 'registers' */
- int qctl_regs[ARRAY_SIZE(vivi_qctrl)];
-};
-
-struct vivi_fh {
- struct vivi_dev *dev;
-
/* video capture */
struct vivi_fmt *fmt;
unsigned int width, height;
struct videobuf_queue vb_vidq;
- enum v4l2_buf_type type;
- unsigned char bars[8][3];
- int input; /* Input Number on bars */
+ unsigned long generating;
+ u8 bars[9][3];
+ u8 line[MAX_WIDTH * 4];
};
/* ------------------------------------------------------------------
@@ -259,19 +205,20 @@ struct vivi_fh {
enum colors {
WHITE,
- AMBAR,
+ AMBER,
CYAN,
GREEN,
MAGENTA,
RED,
BLUE,
BLACK,
+ TEXT_BLACK,
};
- /* R G B */
+/* R G B */
#define COLOR_WHITE {204, 204, 204}
-#define COLOR_AMBAR {208, 208, 0}
-#define COLOR_CIAN { 0, 206, 206}
+#define COLOR_AMBER {208, 208, 0}
+#define COLOR_CYAN { 0, 206, 206}
#define COLOR_GREEN { 0, 239, 0}
#define COLOR_MAGENTA {239, 0, 239}
#define COLOR_RED {205, 0, 0}
@@ -279,56 +226,24 @@ enum colors {
#define COLOR_BLACK { 0, 0, 0}
struct bar_std {
- u8 bar[8][3];
+ u8 bar[9][3];
};
/* Maximum number of bars are 10 - otherwise, the input print code
should be modified */
static struct bar_std bars[] = {
{ /* Standard ITU-R color bar sequence */
- {
- COLOR_WHITE,
- COLOR_AMBAR,
- COLOR_CIAN,
- COLOR_GREEN,
- COLOR_MAGENTA,
- COLOR_RED,
- COLOR_BLUE,
- COLOR_BLACK,
- }
+ { COLOR_WHITE, COLOR_AMBER, COLOR_CYAN, COLOR_GREEN,
+ COLOR_MAGENTA, COLOR_RED, COLOR_BLUE, COLOR_BLACK, COLOR_BLACK }
}, {
- {
- COLOR_WHITE,
- COLOR_AMBAR,
- COLOR_BLACK,
- COLOR_WHITE,
- COLOR_AMBAR,
- COLOR_BLACK,
- COLOR_WHITE,
- COLOR_AMBAR,
- }
+ { COLOR_WHITE, COLOR_AMBER, COLOR_BLACK, COLOR_WHITE,
+ COLOR_AMBER, COLOR_BLACK, COLOR_WHITE, COLOR_AMBER, COLOR_BLACK }
}, {
- {
- COLOR_WHITE,
- COLOR_CIAN,
- COLOR_BLACK,
- COLOR_WHITE,
- COLOR_CIAN,
- COLOR_BLACK,
- COLOR_WHITE,
- COLOR_CIAN,
- }
+ { COLOR_WHITE, COLOR_CYAN, COLOR_BLACK, COLOR_WHITE,
+ COLOR_CYAN, COLOR_BLACK, COLOR_WHITE, COLOR_CYAN, COLOR_BLACK }
}, {
- {
- COLOR_WHITE,
- COLOR_GREEN,
- COLOR_BLACK,
- COLOR_WHITE,
- COLOR_GREEN,
- COLOR_BLACK,
- COLOR_WHITE,
- COLOR_GREEN,
- }
+ { COLOR_WHITE, COLOR_GREEN, COLOR_BLACK, COLOR_WHITE,
+ COLOR_GREEN, COLOR_BLACK, COLOR_WHITE, COLOR_GREEN, COLOR_BLACK }
},
};
@@ -344,21 +259,18 @@ static struct bar_std bars[] = {
(((-9714 * r - 19070 * g + 28784 * b + 32768) >> 16) + 128)
/* precalculate color bar values to speed up rendering */
-static void precalculate_bars(struct vivi_fh *fh)
+static void precalculate_bars(struct vivi_dev *dev)
{
- struct vivi_dev *dev = fh->dev;
- unsigned char r, g, b;
+ u8 r, g, b;
int k, is_yuv;
- fh->input = dev->input;
-
- for (k = 0; k < 8; k++) {
- r = bars[fh->input].bar[k][0];
- g = bars[fh->input].bar[k][1];
- b = bars[fh->input].bar[k][2];
+ for (k = 0; k < 9; k++) {
+ r = bars[dev->input].bar[k][0];
+ g = bars[dev->input].bar[k][1];
+ b = bars[dev->input].bar[k][2];
is_yuv = 0;
- switch (fh->fmt->fourcc) {
+ switch (dev->fmt->fourcc) {
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
is_yuv = 1;
@@ -378,16 +290,15 @@ static void precalculate_bars(struct vivi_fh *fh)
}
if (is_yuv) {
- fh->bars[k][0] = TO_Y(r, g, b); /* Luma */
- fh->bars[k][1] = TO_U(r, g, b); /* Cb */
- fh->bars[k][2] = TO_V(r, g, b); /* Cr */
+ dev->bars[k][0] = TO_Y(r, g, b); /* Luma */
+ dev->bars[k][1] = TO_U(r, g, b); /* Cb */
+ dev->bars[k][2] = TO_V(r, g, b); /* Cr */
} else {
- fh->bars[k][0] = r;
- fh->bars[k][1] = g;
- fh->bars[k][2] = b;
+ dev->bars[k][0] = r;
+ dev->bars[k][1] = g;
+ dev->bars[k][2] = b;
}
}
-
}
#define TSTAMP_MIN_Y 24
@@ -395,20 +306,20 @@ static void precalculate_bars(struct vivi_fh *fh)
#define TSTAMP_INPUT_X 10
#define TSTAMP_MIN_X (54 + TSTAMP_INPUT_X)
-static void gen_twopix(struct vivi_fh *fh, unsigned char *buf, int colorpos)
+static void gen_twopix(struct vivi_dev *dev, u8 *buf, int colorpos)
{
- unsigned char r_y, g_u, b_v;
- unsigned char *p;
+ u8 r_y, g_u, b_v;
int color;
+ u8 *p;
- r_y = fh->bars[colorpos][0]; /* R or precalculated Y */
- g_u = fh->bars[colorpos][1]; /* G or precalculated U */
- b_v = fh->bars[colorpos][2]; /* B or precalculated V */
+ r_y = dev->bars[colorpos][0]; /* R or precalculated Y */
+ g_u = dev->bars[colorpos][1]; /* G or precalculated U */
+ b_v = dev->bars[colorpos][2]; /* B or precalculated V */
for (color = 0; color < 4; color++) {
p = buf + color;
- switch (fh->fmt->fourcc) {
+ switch (dev->fmt->fourcc) {
case V4L2_PIX_FMT_YUYV:
switch (color) {
case 0:
@@ -489,123 +400,88 @@ static void gen_twopix(struct vivi_fh *fh, unsigned char *buf, int colorpos)
}
}
-static void gen_line(struct vivi_fh *fh, char *basep, int inipos, int wmax,
- int hmax, int line, int count, char *timestr)
+static void precalculate_line(struct vivi_dev *dev)
{
- int w, i, j;
- int pos = inipos;
- char *s;
- u8 chr;
-
- /* We will just duplicate the second pixel at the packet */
- wmax /= 2;
+ int w;
- /* Generate a standard color bar pattern */
- for (w = 0; w < wmax; w++) {
- int colorpos = ((w + count) * 8/(wmax + 1)) % 8;
+ for (w = 0; w < dev->width * 2; w += 2) {
+ int colorpos = (w / (dev->width / 8) % 8);
- gen_twopix(fh, basep + pos, colorpos);
- pos += 4; /* only 16 bpp supported for now */
+ gen_twopix(dev, dev->line + w * 2, colorpos);
}
+}
- /* Prints input entry number */
-
- /* Checks if it is possible to input number */
- if (TSTAMP_MAX_Y >= hmax)
- goto end;
-
- if (TSTAMP_INPUT_X + strlen(timestr) >= wmax)
- goto end;
-
- if (line >= TSTAMP_MIN_Y && line <= TSTAMP_MAX_Y) {
- chr = rom8x16_bits[fh->input * 16 + line - TSTAMP_MIN_Y];
- pos = TSTAMP_INPUT_X;
- for (i = 0; i < 7; i++) {
- /* Draw white font on black background */
- if (chr & 1 << (7 - i))
- gen_twopix(fh, basep + pos, WHITE);
- else
- gen_twopix(fh, basep + pos, BLACK);
- pos += 2;
- }
- }
+static void gen_text(struct vivi_dev *dev, char *basep,
+ int y, int x, char *text)
+{
+ int line;
- /* Checks if it is possible to show timestamp */
- if (TSTAMP_MIN_X + strlen(timestr) >= wmax)
- goto end;
+ /* Checks if it is possible to show string */
+ if (y + 16 >= dev->height || x + strlen(text) * 8 >= dev->width)
+ return;
/* Print stream time */
- if (line >= TSTAMP_MIN_Y && line <= TSTAMP_MAX_Y) {
- j = TSTAMP_MIN_X;
- for (s = timestr; *s; s++) {
- chr = rom8x16_bits[(*s-0x30)*16+line-TSTAMP_MIN_Y];
- for (i = 0; i < 7; i++) {
- pos = inipos + j * 2;
+ for (line = y; line < y + 16; line++) {
+ int j = 0;
+ char *pos = basep + line * dev->width * 2 + x * 2;
+ char *s;
+
+ for (s = text; *s; s++) {
+ u8 chr = font8x16[*s * 16 + line - y];
+ int i;
+
+ for (i = 0; i < 7; i++, j++) {
/* Draw white font on black background */
- if (chr & 1 << (7 - i))
- gen_twopix(fh, basep + pos, WHITE);
+ if (chr & (1 << (7 - i)))
+ gen_twopix(dev, pos + j * 2, WHITE);
else
- gen_twopix(fh, basep + pos, BLACK);
- j++;
+ gen_twopix(dev, pos + j * 2, TEXT_BLACK);
}
}
}
-
-end:
- return;
}
-static void vivi_fillbuff(struct vivi_fh *fh, struct vivi_buffer *buf)
+static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
{
- struct vivi_dev *dev = fh->dev;
- int h , pos = 0;
- int hmax = buf->vb.height;
- int wmax = buf->vb.width;
+ int hmax = buf->vb.height;
+ int wmax = buf->vb.width;
struct timeval ts;
- char *tmpbuf;
void *vbuf = videobuf_to_vmalloc(&buf->vb);
+ unsigned ms;
+ char str[100];
+ int h, line = 1;
if (!vbuf)
return;
- tmpbuf = kmalloc(wmax * 2, GFP_ATOMIC);
- if (!tmpbuf)
- return;
-
- for (h = 0; h < hmax; h++) {
- gen_line(fh, tmpbuf, 0, wmax, hmax, h, dev->mv_count,
- dev->timestr);
- memcpy(vbuf + pos, tmpbuf, wmax * 2);
- pos += wmax*2;
- }
-
- dev->mv_count++;
-
- kfree(tmpbuf);
+ for (h = 0; h < hmax; h++)
+ memcpy(vbuf + h * wmax * 2, dev->line + (dev->mv_count % wmax) * 2, wmax * 2);
/* Updates stream time */
- dev->ms += jiffies_to_msecs(jiffies-dev->jiffies);
+ dev->ms += jiffies_to_msecs(jiffies - dev->jiffies);
dev->jiffies = jiffies;
- if (dev->ms >= 1000) {
- dev->ms -= 1000;
- dev->s++;
- if (dev->s >= 60) {
- dev->s -= 60;
- dev->m++;
- if (dev->m > 60) {
- dev->m -= 60;
- dev->h++;
- if (dev->h > 24)
- dev->h -= 24;
- }
- }
- }
- sprintf(dev->timestr, "%02d:%02d:%02d:%03d",
- dev->h, dev->m, dev->s, dev->ms);
-
- dprintk(dev, 2, "vivifill at %s: Buffer 0x%08lx size= %d\n",
- dev->timestr, (unsigned long)tmpbuf, pos);
+ ms = dev->ms;
+ snprintf(str, sizeof(str), " %02d:%02d:%02d:%03d ",
+ (ms / (60 * 60 * 1000)) % 24,
+ (ms / (60 * 1000)) % 60,
+ (ms / 1000) % 60,
+ ms % 1000);
+ gen_text(dev, vbuf, line++ * 16, 16, str);
+ snprintf(str, sizeof(str), " %dx%d, input %d ",
+ dev->width, dev->height, dev->input);
+ gen_text(dev, vbuf, line++ * 16, 16, str);
+
+ snprintf(str, sizeof(str), " brightness %3d, contrast %3d, saturation %3d, hue %d ",
+ dev->brightness,
+ dev->contrast,
+ dev->saturation,
+ dev->hue);
+ gen_text(dev, vbuf, line++ * 16, 16, str);
+ snprintf(str, sizeof(str), " volume %3d ", dev->volume);
+ gen_text(dev, vbuf, line++ * 16, 16, str);
+
+ dev->mv_count += 2;
/* Advice that buffer was filled */
buf->vb.field_count++;
@@ -614,12 +490,10 @@ static void vivi_fillbuff(struct vivi_fh *fh, struct vivi_buffer *buf)
buf->vb.state = VIDEOBUF_DONE;
}
-static void vivi_thread_tick(struct vivi_fh *fh)
+static void vivi_thread_tick(struct vivi_dev *dev)
{
- struct vivi_buffer *buf;
- struct vivi_dev *dev = fh->dev;
struct vivi_dmaqueue *dma_q = &dev->vidq;
-
+ struct vivi_buffer *buf;
unsigned long flags = 0;
dprintk(dev, 1, "Thread tick\n");
@@ -642,22 +516,20 @@ static void vivi_thread_tick(struct vivi_fh *fh)
do_gettimeofday(&buf->vb.ts);
/* Fill buffer */
- vivi_fillbuff(fh, buf);
+ vivi_fillbuff(dev, buf);
dprintk(dev, 1, "filled buffer %p\n", buf);
wake_up(&buf->vb.done);
dprintk(dev, 2, "[%p/%d] wakeup\n", buf, buf->vb. i);
unlock:
spin_unlock_irqrestore(&dev->slock, flags);
- return;
}
#define frames_to_ms(frames) \
((frames * WAKE_NUMERATOR * 1000) / WAKE_DENOMINATOR)
-static void vivi_sleep(struct vivi_fh *fh)
+static void vivi_sleep(struct vivi_dev *dev)
{
- struct vivi_dev *dev = fh->dev;
struct vivi_dmaqueue *dma_q = &dev->vidq;
int timeout;
DECLARE_WAITQUEUE(wait, current);
@@ -672,7 +544,7 @@ static void vivi_sleep(struct vivi_fh *fh)
/* Calculate time to wake up */
timeout = msecs_to_jiffies(frames_to_ms(1));
- vivi_thread_tick(fh);
+ vivi_thread_tick(dev);
schedule_timeout_interruptible(timeout);
@@ -683,15 +555,14 @@ stop_task:
static int vivi_thread(void *data)
{
- struct vivi_fh *fh = data;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = data;
dprintk(dev, 1, "thread started\n");
set_freezable();
for (;;) {
- vivi_sleep(fh);
+ vivi_sleep(dev);
if (kthread_should_stop())
break;
@@ -700,39 +571,61 @@ static int vivi_thread(void *data)
return 0;
}
-static int vivi_start_thread(struct vivi_fh *fh)
+static void vivi_start_generating(struct file *file)
{
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = video_drvdata(file);
struct vivi_dmaqueue *dma_q = &dev->vidq;
- dma_q->frame = 0;
- dma_q->ini_jiffies = jiffies;
-
dprintk(dev, 1, "%s\n", __func__);
- dma_q->kthread = kthread_run(vivi_thread, fh, "vivi");
+ if (test_and_set_bit(0, &dev->generating))
+ return;
+ file->private_data = dev;
+
+ /* Resets frame counters */
+ dev->ms = 0;
+ dev->mv_count = 0;
+ dev->jiffies = jiffies;
+
+ dma_q->frame = 0;
+ dma_q->ini_jiffies = jiffies;
+ dma_q->kthread = kthread_run(vivi_thread, dev, dev->v4l2_dev.name);
if (IS_ERR(dma_q->kthread)) {
v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
- return PTR_ERR(dma_q->kthread);
+ clear_bit(0, &dev->generating);
+ return;
}
/* Wakes thread */
wake_up_interruptible(&dma_q->wq);
dprintk(dev, 1, "returning from %s\n", __func__);
- return 0;
}
-static void vivi_stop_thread(struct vivi_dmaqueue *dma_q)
+static void vivi_stop_generating(struct file *file)
{
- struct vivi_dev *dev = container_of(dma_q, struct vivi_dev, vidq);
+ struct vivi_dev *dev = video_drvdata(file);
+ struct vivi_dmaqueue *dma_q = &dev->vidq;
dprintk(dev, 1, "%s\n", __func__);
+
+ if (!file->private_data)
+ return;
+ if (!test_and_clear_bit(0, &dev->generating))
+ return;
+
/* shutdown control thread */
if (dma_q->kthread) {
kthread_stop(dma_q->kthread);
dma_q->kthread = NULL;
}
+ videobuf_stop(&dev->vb_vidq);
+ videobuf_mmap_free(&dev->vb_vidq);
+}
+
+static int vivi_is_generating(struct vivi_dev *dev)
+{
+ return test_bit(0, &dev->generating);
}
/* ------------------------------------------------------------------
@@ -741,10 +634,9 @@ static void vivi_stop_thread(struct vivi_dmaqueue *dma_q)
static int
buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size)
{
- struct vivi_fh *fh = vq->priv_data;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = vq->priv_data;
- *size = fh->width*fh->height*2;
+ *size = dev->width * dev->height * 2;
if (0 == *count)
*count = 32;
@@ -760,49 +652,43 @@ buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size)
static void free_buffer(struct videobuf_queue *vq, struct vivi_buffer *buf)
{
- struct vivi_fh *fh = vq->priv_data;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = vq->priv_data;
dprintk(dev, 1, "%s, state: %i\n", __func__, buf->vb.state);
- if (in_interrupt())
- BUG();
-
videobuf_vmalloc_free(&buf->vb);
dprintk(dev, 1, "free_buffer: freed\n");
buf->vb.state = VIDEOBUF_NEEDS_INIT;
}
-#define norm_maxw() 1024
-#define norm_maxh() 768
static int
buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
enum v4l2_field field)
{
- struct vivi_fh *fh = vq->priv_data;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = vq->priv_data;
struct vivi_buffer *buf = container_of(vb, struct vivi_buffer, vb);
int rc;
dprintk(dev, 1, "%s, field=%d\n", __func__, field);
- BUG_ON(NULL == fh->fmt);
+ BUG_ON(NULL == dev->fmt);
- if (fh->width < 48 || fh->width > norm_maxw() ||
- fh->height < 32 || fh->height > norm_maxh())
+ if (dev->width < 48 || dev->width > MAX_WIDTH ||
+ dev->height < 32 || dev->height > MAX_HEIGHT)
return -EINVAL;
- buf->vb.size = fh->width*fh->height*2;
- if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
+ buf->vb.size = dev->width * dev->height * 2;
+ if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
return -EINVAL;
/* These properties only change when queue is idle, see s_fmt */
- buf->fmt = fh->fmt;
- buf->vb.width = fh->width;
- buf->vb.height = fh->height;
+ buf->fmt = dev->fmt;
+ buf->vb.width = dev->width;
+ buf->vb.height = dev->height;
buf->vb.field = field;
- precalculate_bars(fh);
+ precalculate_bars(dev);
+ precalculate_line(dev);
if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
rc = videobuf_iolock(vq, &buf->vb, NULL);
@@ -811,7 +697,6 @@ buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
}
buf->vb.state = VIDEOBUF_PREPARED;
-
return 0;
fail:
@@ -822,9 +707,8 @@ fail:
static void
buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
{
- struct vivi_buffer *buf = container_of(vb, struct vivi_buffer, vb);
- struct vivi_fh *fh = vq->priv_data;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = vq->priv_data;
+ struct vivi_buffer *buf = container_of(vb, struct vivi_buffer, vb);
struct vivi_dmaqueue *vidq = &dev->vidq;
dprintk(dev, 1, "%s\n", __func__);
@@ -836,9 +720,8 @@ buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
static void buffer_release(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
- struct vivi_buffer *buf = container_of(vb, struct vivi_buffer, vb);
- struct vivi_fh *fh = vq->priv_data;
- struct vivi_dev *dev = (struct vivi_dev *)fh->dev;
+ struct vivi_dev *dev = vq->priv_data;
+ struct vivi_buffer *buf = container_of(vb, struct vivi_buffer, vb);
dprintk(dev, 1, "%s\n", __func__);
@@ -858,16 +741,14 @@ static struct videobuf_queue_ops vivi_video_qops = {
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct vivi_fh *fh = priv;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = video_drvdata(file);
strcpy(cap->driver, "vivi");
strcpy(cap->card, "vivi");
strlcpy(cap->bus_info, dev->v4l2_dev.name, sizeof(cap->bus_info));
cap->version = VIVI_VERSION;
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_STREAMING |
- V4L2_CAP_READWRITE;
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING | \
+ V4L2_CAP_READWRITE;
return 0;
}
@@ -889,28 +770,25 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct vivi_fh *fh = priv;
+ struct vivi_dev *dev = video_drvdata(file);
- f->fmt.pix.width = fh->width;
- f->fmt.pix.height = fh->height;
- f->fmt.pix.field = fh->vb_vidq.field;
- f->fmt.pix.pixelformat = fh->fmt->fourcc;
+ f->fmt.pix.width = dev->width;
+ f->fmt.pix.height = dev->height;
+ f->fmt.pix.field = dev->vb_vidq.field;
+ f->fmt.pix.pixelformat = dev->fmt->fourcc;
f->fmt.pix.bytesperline =
- (f->fmt.pix.width * fh->fmt->depth) >> 3;
+ (f->fmt.pix.width * dev->fmt->depth) >> 3;
f->fmt.pix.sizeimage =
f->fmt.pix.height * f->fmt.pix.bytesperline;
-
- return (0);
+ return 0;
}
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct vivi_fh *fh = priv;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = video_drvdata(file);
struct vivi_fmt *fmt;
enum v4l2_field field;
- unsigned int maxw, maxh;
fmt = get_format(f);
if (!fmt) {
@@ -928,113 +806,109 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
return -EINVAL;
}
- maxw = norm_maxw();
- maxh = norm_maxh();
-
f->fmt.pix.field = field;
- v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2,
- &f->fmt.pix.height, 32, maxh, 0, 0);
+ v4l_bound_align_image(&f->fmt.pix.width, 48, MAX_WIDTH, 2,
+ &f->fmt.pix.height, 32, MAX_HEIGHT, 0, 0);
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage =
f->fmt.pix.height * f->fmt.pix.bytesperline;
-
return 0;
}
-/*FIXME: This seems to be generic enough to be at videodev2 */
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct vivi_fh *fh = priv;
- struct videobuf_queue *q = &fh->vb_vidq;
+ struct vivi_dev *dev = video_drvdata(file);
+ struct videobuf_queue *q = &dev->vb_vidq;
- int ret = vidioc_try_fmt_vid_cap(file, fh, f);
+ int ret = vidioc_try_fmt_vid_cap(file, priv, f);
if (ret < 0)
return ret;
mutex_lock(&q->vb_lock);
- if (videobuf_queue_is_busy(&fh->vb_vidq)) {
- dprintk(fh->dev, 1, "%s queue busy\n", __func__);
+ if (vivi_is_generating(dev)) {
+ dprintk(dev, 1, "%s device busy\n", __func__);
ret = -EBUSY;
goto out;
}
- fh->fmt = get_format(f);
- fh->width = f->fmt.pix.width;
- fh->height = f->fmt.pix.height;
- fh->vb_vidq.field = f->fmt.pix.field;
- fh->type = f->type;
-
+ dev->fmt = get_format(f);
+ dev->width = f->fmt.pix.width;
+ dev->height = f->fmt.pix.height;
+ dev->vb_vidq.field = f->fmt.pix.field;
ret = 0;
out:
mutex_unlock(&q->vb_lock);
-
return ret;
}
static int vidioc_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *p)
{
- struct vivi_fh *fh = priv;
+ struct vivi_dev *dev = video_drvdata(file);
- return (videobuf_reqbufs(&fh->vb_vidq, p));
+ return videobuf_reqbufs(&dev->vb_vidq, p);
}
static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
- struct vivi_fh *fh = priv;
+ struct vivi_dev *dev = video_drvdata(file);
- return (videobuf_querybuf(&fh->vb_vidq, p));
+ return videobuf_querybuf(&dev->vb_vidq, p);
}
static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
- struct vivi_fh *fh = priv;
+ struct vivi_dev *dev = video_drvdata(file);
- return (videobuf_qbuf(&fh->vb_vidq, p));
+ return videobuf_qbuf(&dev->vb_vidq, p);
}
static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
- struct vivi_fh *fh = priv;
+ struct vivi_dev *dev = video_drvdata(file);
- return (videobuf_dqbuf(&fh->vb_vidq, p,
- file->f_flags & O_NONBLOCK));
+ return videobuf_dqbuf(&dev->vb_vidq, p,
+ file->f_flags & O_NONBLOCK);
}
#ifdef CONFIG_VIDEO_V4L1_COMPAT
static int vidiocgmbuf(struct file *file, void *priv, struct video_mbuf *mbuf)
{
- struct vivi_fh *fh = priv;
+ struct vivi_dev *dev = video_drvdata(file);
- return videobuf_cgmbuf(&fh->vb_vidq, mbuf, 8);
+ return videobuf_cgmbuf(&dev->vb_vidq, mbuf, 8);
}
#endif
static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
{
- struct vivi_fh *fh = priv;
+ struct vivi_dev *dev = video_drvdata(file);
+ int ret;
- if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
- if (i != fh->type)
+ if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
+ ret = videobuf_streamon(&dev->vb_vidq);
+ if (ret)
+ return ret;
- return videobuf_streamon(&fh->vb_vidq);
+ vivi_start_generating(file);
+ return 0;
}
static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
{
- struct vivi_fh *fh = priv;
+ struct vivi_dev *dev = video_drvdata(file);
+ int ret;
- if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
- if (i != fh->type)
+ if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
-
- return videobuf_streamoff(&fh->vb_vidq);
+ ret = videobuf_streamoff(&dev->vb_vidq);
+ if (!ret)
+ vivi_stop_generating(file);
+ return ret;
}
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *i)
@@ -1052,80 +926,104 @@ static int vidioc_enum_input(struct file *file, void *priv,
inp->type = V4L2_INPUT_TYPE_CAMERA;
inp->std = V4L2_STD_525_60;
sprintf(inp->name, "Camera %u", inp->index);
-
- return (0);
+ return 0;
}
static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
- struct vivi_fh *fh = priv;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = video_drvdata(file);
*i = dev->input;
-
- return (0);
+ return 0;
}
+
static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
- struct vivi_fh *fh = priv;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = video_drvdata(file);
if (i >= NUM_INPUTS)
return -EINVAL;
dev->input = i;
- precalculate_bars(fh);
-
- return (0);
+ precalculate_bars(dev);
+ precalculate_line(dev);
+ return 0;
}
- /* --- controls ---------------------------------------------- */
+/* --- controls ---------------------------------------------- */
static int vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qc)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(vivi_qctrl); i++)
- if (qc->id && qc->id == vivi_qctrl[i].id) {
- memcpy(qc, &(vivi_qctrl[i]),
- sizeof(*qc));
- return (0);
- }
-
+ switch (qc->id) {
+ case V4L2_CID_AUDIO_VOLUME:
+ return v4l2_ctrl_query_fill(qc, 0, 255, 1, 200);
+ case V4L2_CID_BRIGHTNESS:
+ return v4l2_ctrl_query_fill(qc, 0, 255, 1, 127);
+ case V4L2_CID_CONTRAST:
+ return v4l2_ctrl_query_fill(qc, 0, 255, 1, 16);
+ case V4L2_CID_SATURATION:
+ return v4l2_ctrl_query_fill(qc, 0, 255, 1, 127);
+ case V4L2_CID_HUE:
+ return v4l2_ctrl_query_fill(qc, -128, 127, 1, 0);
+ }
return -EINVAL;
}
static int vidioc_g_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
- struct vivi_fh *fh = priv;
- struct vivi_dev *dev = fh->dev;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(vivi_qctrl); i++)
- if (ctrl->id == vivi_qctrl[i].id) {
- ctrl->value = dev->qctl_regs[i];
- return 0;
- }
+ struct vivi_dev *dev = video_drvdata(file);
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_VOLUME:
+ ctrl->value = dev->volume;
+ return 0;
+ case V4L2_CID_BRIGHTNESS:
+ ctrl->value = dev->brightness;
+ return 0;
+ case V4L2_CID_CONTRAST:
+ ctrl->value = dev->contrast;
+ return 0;
+ case V4L2_CID_SATURATION:
+ ctrl->value = dev->saturation;
+ return 0;
+ case V4L2_CID_HUE:
+ ctrl->value = dev->hue;
+ return 0;
+ }
return -EINVAL;
}
+
static int vidioc_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
- struct vivi_fh *fh = priv;
- struct vivi_dev *dev = fh->dev;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(vivi_qctrl); i++)
- if (ctrl->id == vivi_qctrl[i].id) {
- if (ctrl->value < vivi_qctrl[i].minimum ||
- ctrl->value > vivi_qctrl[i].maximum) {
- return -ERANGE;
- }
- dev->qctl_regs[i] = ctrl->value;
- return 0;
- }
+ struct vivi_dev *dev = video_drvdata(file);
+ struct v4l2_queryctrl qc;
+ int err;
+
+ qc.id = ctrl->id;
+ err = vidioc_queryctrl(file, priv, &qc);
+ if (err < 0)
+ return err;
+ if (ctrl->value < qc.minimum || ctrl->value > qc.maximum)
+ return -ERANGE;
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_VOLUME:
+ dev->volume = ctrl->value;
+ return 0;
+ case V4L2_CID_BRIGHTNESS:
+ dev->brightness = ctrl->value;
+ return 0;
+ case V4L2_CID_CONTRAST:
+ dev->contrast = ctrl->value;
+ return 0;
+ case V4L2_CID_SATURATION:
+ dev->saturation = ctrl->value;
+ return 0;
+ case V4L2_CID_HUE:
+ dev->hue = ctrl->value;
+ return 0;
+ }
return -EINVAL;
}
@@ -1133,134 +1031,58 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
File operations for the device
------------------------------------------------------------------*/
-static int vivi_open(struct file *file)
-{
- struct vivi_dev *dev = video_drvdata(file);
- struct vivi_fh *fh = NULL;
- int retval = 0;
-
- mutex_lock(&dev->mutex);
- dev->users++;
-
- if (dev->users > 1) {
- dev->users--;
- mutex_unlock(&dev->mutex);
- return -EBUSY;
- }
-
- dprintk(dev, 1, "open %s type=%s users=%d\n",
- video_device_node_name(dev->vfd),
- v4l2_type_names[V4L2_BUF_TYPE_VIDEO_CAPTURE], dev->users);
-
- /* allocate + initialize per filehandle data */
- fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh) {
- dev->users--;
- retval = -ENOMEM;
- }
- mutex_unlock(&dev->mutex);
-
- if (retval)
- return retval;
-
- file->private_data = fh;
- fh->dev = dev;
-
- fh->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- fh->fmt = &formats[0];
- fh->width = 640;
- fh->height = 480;
-
- /* Resets frame counters */
- dev->h = 0;
- dev->m = 0;
- dev->s = 0;
- dev->ms = 0;
- dev->mv_count = 0;
- dev->jiffies = jiffies;
- sprintf(dev->timestr, "%02d:%02d:%02d:%03d",
- dev->h, dev->m, dev->s, dev->ms);
-
- videobuf_queue_vmalloc_init(&fh->vb_vidq, &vivi_video_qops,
- NULL, &dev->slock, fh->type, V4L2_FIELD_INTERLACED,
- sizeof(struct vivi_buffer), fh);
-
- vivi_start_thread(fh);
-
- return 0;
-}
-
static ssize_t
vivi_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
{
- struct vivi_fh *fh = file->private_data;
+ struct vivi_dev *dev = video_drvdata(file);
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- return videobuf_read_stream(&fh->vb_vidq, data, count, ppos, 0,
+ vivi_start_generating(file);
+ return videobuf_read_stream(&dev->vb_vidq, data, count, ppos, 0,
file->f_flags & O_NONBLOCK);
- }
- return 0;
}
static unsigned int
vivi_poll(struct file *file, struct poll_table_struct *wait)
{
- struct vivi_fh *fh = file->private_data;
- struct vivi_dev *dev = fh->dev;
- struct videobuf_queue *q = &fh->vb_vidq;
+ struct vivi_dev *dev = video_drvdata(file);
+ struct videobuf_queue *q = &dev->vb_vidq;
dprintk(dev, 1, "%s\n", __func__);
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type)
- return POLLERR;
-
+ vivi_start_generating(file);
return videobuf_poll_stream(file, q, wait);
}
static int vivi_close(struct file *file)
{
- struct vivi_fh *fh = file->private_data;
- struct vivi_dev *dev = fh->dev;
- struct vivi_dmaqueue *vidq = &dev->vidq;
struct video_device *vdev = video_devdata(file);
+ struct vivi_dev *dev = video_drvdata(file);
- vivi_stop_thread(vidq);
- videobuf_stop(&fh->vb_vidq);
- videobuf_mmap_free(&fh->vb_vidq);
-
- kfree(fh);
-
- mutex_lock(&dev->mutex);
- dev->users--;
- mutex_unlock(&dev->mutex);
-
- dprintk(dev, 1, "close called (dev=%s, users=%d)\n",
- video_device_node_name(vdev), dev->users);
+ vivi_stop_generating(file);
+ dprintk(dev, 1, "close called (dev=%s)\n",
+ video_device_node_name(vdev));
return 0;
}
static int vivi_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct vivi_fh *fh = file->private_data;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = video_drvdata(file);
int ret;
dprintk(dev, 1, "mmap called, vma=0x%08lx\n", (unsigned long)vma);
- ret = videobuf_mmap_mapper(&fh->vb_vidq, vma);
+ ret = videobuf_mmap_mapper(&dev->vb_vidq, vma);
dprintk(dev, 1, "vma start=0x%08lx, size=%ld, ret=%d\n",
(unsigned long)vma->vm_start,
- (unsigned long)vma->vm_end-(unsigned long)vma->vm_start,
+ (unsigned long)vma->vm_end - (unsigned long)vma->vm_start,
ret);
-
return ret;
}
static const struct v4l2_file_operations vivi_fops = {
.owner = THIS_MODULE,
- .open = vivi_open,
.release = vivi_close,
.read = vivi_read,
.poll = vivi_poll,
@@ -1282,11 +1104,11 @@ static const struct v4l2_ioctl_ops vivi_ioctl_ops = {
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
.vidioc_queryctrl = vidioc_queryctrl,
.vidioc_g_ctrl = vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
.vidiocgmbuf = vidiocgmbuf,
#endif
@@ -1330,7 +1152,7 @@ static int __init vivi_create_instance(int inst)
{
struct vivi_dev *dev;
struct video_device *vfd;
- int ret, i;
+ int ret;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
@@ -1342,6 +1164,20 @@ static int __init vivi_create_instance(int inst)
if (ret)
goto free_dev;
+ dev->fmt = &formats[0];
+ dev->width = 640;
+ dev->height = 480;
+ dev->volume = 200;
+ dev->brightness = 127;
+ dev->contrast = 16;
+ dev->saturation = 127;
+ dev->hue = 0;
+
+ videobuf_queue_vmalloc_init(&dev->vb_vidq, &vivi_video_qops,
+ NULL, &dev->slock, V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ V4L2_FIELD_INTERLACED,
+ sizeof(struct vivi_buffer), dev);
+
/* init video dma queues */
INIT_LIST_HEAD(&dev->vidq.active);
init_waitqueue_head(&dev->vidq.wq);
@@ -1357,6 +1193,7 @@ static int __init vivi_create_instance(int inst)
*vfd = vivi_template;
vfd->debug = debug;
+ vfd->v4l2_dev = &dev->v4l2_dev;
ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr);
if (ret < 0)
@@ -1364,10 +1201,6 @@ static int __init vivi_create_instance(int inst)
video_set_drvdata(vfd, dev);
- /* Set all controls to their default value. */
- for (i = 0; i < ARRAY_SIZE(vivi_qctrl); i++)
- dev->qctl_regs[i] = vivi_qctrl[i].default_value;
-
/* Now that everything is fine, let's add it to device list */
list_add_tail(&dev->vivi_devlist, &vivi_devlist);
@@ -1396,8 +1229,15 @@ free_dev:
*/
static int __init vivi_init(void)
{
+ const struct font_desc *font = find_font("VGA8x16");
int ret = 0, i;
+ if (font == NULL) {
+ printk(KERN_ERR "vivi: could not find font\n");
+ return -ENODEV;
+ }
+ font8x16 = font->data;
+
if (n_devs <= 0)
n_devs = 1;
@@ -1412,7 +1252,7 @@ static int __init vivi_init(void)
}
if (ret < 0) {
- printk(KERN_INFO "Error %d while loading vivi driver\n", ret);
+ printk(KERN_ERR "vivi: error %d while loading driver\n", ret);
return ret;
}
diff --git a/drivers/media/video/w9966.c b/drivers/media/video/w9966.c
index bf9bf650a31..635420d8d84 100644
--- a/drivers/media/video/w9966.c
+++ b/drivers/media/video/w9966.c
@@ -1,7 +1,7 @@
/*
Winbond w9966cf Webcam parport driver.
- Version 0.32
+ Version 0.33
Copyright (C) 2001 Jakob Kemi <jakob.kemi@post.utfors.se>
@@ -57,10 +57,12 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
-#include <linux/videodev.h>
+#include <linux/version.h>
+#include <linux/videodev2.h>
#include <linux/slab.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-device.h>
#include <linux/parport.h>
/*#define DEBUG*/ /* Undef me for production */
@@ -76,12 +78,12 @@
*/
#define W9966_DRIVERNAME "W9966CF Webcam"
-#define W9966_MAXCAMS 4 // Maximum number of cameras
-#define W9966_RBUFFER 2048 // Read buffer (must be an even number)
-#define W9966_SRAMSIZE 131072 // 128kb
-#define W9966_SRAMID 0x02 // check w9966cf.pdf
+#define W9966_MAXCAMS 4 /* Maximum number of cameras */
+#define W9966_RBUFFER 2048 /* Read buffer (must be an even number) */
+#define W9966_SRAMSIZE 131072 /* 128kb */
+#define W9966_SRAMID 0x02 /* check w9966cf.pdf */
-// Empirically determined window limits
+/* Empirically determined window limits */
#define W9966_WND_MIN_X 16
#define W9966_WND_MIN_Y 14
#define W9966_WND_MAX_X 705
@@ -89,7 +91,7 @@
#define W9966_WND_MAX_W (W9966_WND_MAX_X - W9966_WND_MIN_X)
#define W9966_WND_MAX_H (W9966_WND_MAX_Y - W9966_WND_MIN_Y)
-// Keep track of our current state
+/* Keep track of our current state */
#define W9966_STATE_PDEV 0x01
#define W9966_STATE_CLAIMED 0x02
#define W9966_STATE_VDEV 0x04
@@ -101,12 +103,13 @@
#define W9966_I2C_W_DATA 0x02
#define W9966_I2C_W_CLOCK 0x01
-struct w9966_dev {
+struct w9966 {
+ struct v4l2_device v4l2_dev;
unsigned char dev_state;
unsigned char i2c_state;
unsigned short ppmode;
- struct parport* pport;
- struct pardevice* pdev;
+ struct parport *pport;
+ struct pardevice *pdev;
struct video_device vdev;
unsigned short width;
unsigned short height;
@@ -114,7 +117,7 @@ struct w9966_dev {
signed char contrast;
signed char color;
signed char hue;
- unsigned long in_use;
+ struct mutex lock;
};
/*
@@ -127,15 +130,15 @@ MODULE_LICENSE("GPL");
#ifdef MODULE
-static const char* pardev[] = {[0 ... W9966_MAXCAMS] = ""};
+static const char *pardev[] = {[0 ... W9966_MAXCAMS] = ""};
#else
-static const char* pardev[] = {[0 ... W9966_MAXCAMS] = "aggressive"};
+static const char *pardev[] = {[0 ... W9966_MAXCAMS] = "aggressive"};
#endif
module_param_array(pardev, charp, NULL, 0);
-MODULE_PARM_DESC(pardev, "pardev: where to search for\n\
-\teach camera. 'aggressive' means brute-force search.\n\
-\tEg: >pardev=parport3,aggressive,parport2,parport1< would assign\n\
-\tcam 1 to parport3 and search every parport for cam 2 etc...");
+MODULE_PARM_DESC(pardev, "pardev: where to search for\n"
+ "\teach camera. 'aggressive' means brute-force search.\n"
+ "\tEg: >pardev=parport3,aggressive,parport2,parport1< would assign\n"
+ "\tcam 1 to parport3 and search every parport for cam 2 etc...");
static int parmode;
module_param(parmode, int, 0);
@@ -144,117 +147,49 @@ MODULE_PARM_DESC(parmode, "parmode: transfer mode (0=auto, 1=ecp, 2=epp");
static int video_nr = -1;
module_param(video_nr, int, 0);
-/*
- * Private data
- */
-
-static struct w9966_dev w9966_cams[W9966_MAXCAMS];
-
-/*
- * Private function declares
- */
-
-static inline void w9966_setState(struct w9966_dev* cam, int mask, int val);
-static inline int w9966_getState(struct w9966_dev* cam, int mask, int val);
-static inline void w9966_pdev_claim(struct w9966_dev *vdev);
-static inline void w9966_pdev_release(struct w9966_dev *vdev);
-
-static int w9966_rReg(struct w9966_dev* cam, int reg);
-static int w9966_wReg(struct w9966_dev* cam, int reg, int data);
-#if 0
-static int w9966_rReg_i2c(struct w9966_dev* cam, int reg);
-#endif
-static int w9966_wReg_i2c(struct w9966_dev* cam, int reg, int data);
-static int w9966_findlen(int near, int size, int maxlen);
-static int w9966_calcscale(int size, int min, int max, int* beg, int* end, unsigned char* factor);
-static int w9966_setup(struct w9966_dev* cam, int x1, int y1, int x2, int y2, int w, int h);
-
-static int w9966_init(struct w9966_dev* cam, struct parport* port);
-static void w9966_term(struct w9966_dev* cam);
-
-static inline void w9966_i2c_setsda(struct w9966_dev* cam, int state);
-static inline int w9966_i2c_setscl(struct w9966_dev* cam, int state);
-static inline int w9966_i2c_getsda(struct w9966_dev* cam);
-static inline int w9966_i2c_getscl(struct w9966_dev* cam);
-static int w9966_i2c_wbyte(struct w9966_dev* cam, int data);
-#if 0
-static int w9966_i2c_rbyte(struct w9966_dev* cam);
-#endif
-
-static long w9966_v4l_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg);
-static ssize_t w9966_v4l_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos);
-
-static int w9966_exclusive_open(struct file *file)
-{
- struct w9966_dev *cam = video_drvdata(file);
-
- return test_and_set_bit(0, &cam->in_use) ? -EBUSY : 0;
-}
-
-static int w9966_exclusive_release(struct file *file)
-{
- struct w9966_dev *cam = video_drvdata(file);
-
- clear_bit(0, &cam->in_use);
- return 0;
-}
-
-static const struct v4l2_file_operations w9966_fops = {
- .owner = THIS_MODULE,
- .open = w9966_exclusive_open,
- .release = w9966_exclusive_release,
- .ioctl = w9966_v4l_ioctl,
- .read = w9966_v4l_read,
-};
-static struct video_device w9966_template = {
- .name = W9966_DRIVERNAME,
- .fops = &w9966_fops,
- .release = video_device_release_empty,
-};
+static struct w9966 w9966_cams[W9966_MAXCAMS];
/*
* Private function defines
*/
-// Set camera phase flags, so we know what to uninit when terminating
-static inline void w9966_setState(struct w9966_dev* cam, int mask, int val)
+/* Set camera phase flags, so we know what to uninit when terminating */
+static inline void w9966_set_state(struct w9966 *cam, int mask, int val)
{
cam->dev_state = (cam->dev_state & ~mask) ^ val;
}
-// Get camera phase flags
-static inline int w9966_getState(struct w9966_dev* cam, int mask, int val)
+/* Get camera phase flags */
+static inline int w9966_get_state(struct w9966 *cam, int mask, int val)
{
return ((cam->dev_state & mask) == val);
}
-// Claim parport for ourself
-static inline void w9966_pdev_claim(struct w9966_dev* cam)
+/* Claim parport for ourself */
+static void w9966_pdev_claim(struct w9966 *cam)
{
- if (w9966_getState(cam, W9966_STATE_CLAIMED, W9966_STATE_CLAIMED))
+ if (w9966_get_state(cam, W9966_STATE_CLAIMED, W9966_STATE_CLAIMED))
return;
parport_claim_or_block(cam->pdev);
- w9966_setState(cam, W9966_STATE_CLAIMED, W9966_STATE_CLAIMED);
+ w9966_set_state(cam, W9966_STATE_CLAIMED, W9966_STATE_CLAIMED);
}
-// Release parport for others to use
-static inline void w9966_pdev_release(struct w9966_dev* cam)
+/* Release parport for others to use */
+static void w9966_pdev_release(struct w9966 *cam)
{
- if (w9966_getState(cam, W9966_STATE_CLAIMED, 0))
+ if (w9966_get_state(cam, W9966_STATE_CLAIMED, 0))
return;
parport_release(cam->pdev);
- w9966_setState(cam, W9966_STATE_CLAIMED, 0);
+ w9966_set_state(cam, W9966_STATE_CLAIMED, 0);
}
-// Read register from W9966 interface-chip
-// Expects a claimed pdev
-// -1 on error, else register data (byte)
-static int w9966_rReg(struct w9966_dev* cam, int reg)
+/* Read register from W9966 interface-chip
+ Expects a claimed pdev
+ -1 on error, else register data (byte) */
+static int w9966_read_reg(struct w9966 *cam, int reg)
{
- // ECP, read, regtransfer, REG, REG, REG, REG, REG
+ /* ECP, read, regtransfer, REG, REG, REG, REG, REG */
const unsigned char addr = 0x80 | (reg & 0x1f);
unsigned char val;
@@ -270,12 +205,12 @@ static int w9966_rReg(struct w9966_dev* cam, int reg)
return val;
}
-// Write register to W9966 interface-chip
-// Expects a claimed pdev
-// -1 on error
-static int w9966_wReg(struct w9966_dev* cam, int reg, int data)
+/* Write register to W9966 interface-chip
+ Expects a claimed pdev
+ -1 on error */
+static int w9966_write_reg(struct w9966 *cam, int reg, int data)
{
- // ECP, write, regtransfer, REG, REG, REG, REG, REG
+ /* ECP, write, regtransfer, REG, REG, REG, REG, REG */
const unsigned char addr = 0xc0 | (reg & 0x1f);
const unsigned char val = data;
@@ -291,119 +226,186 @@ static int w9966_wReg(struct w9966_dev* cam, int reg, int data)
return 0;
}
-// Initialize camera device. Setup all internal flags, set a
-// default video mode, setup ccd-chip, register v4l device etc..
-// Also used for 'probing' of hardware.
-// -1 on error
-static int w9966_init(struct w9966_dev* cam, struct parport* port)
+/*
+ * Ugly and primitive i2c protocol functions
+ */
+
+/* Sets the data line on the i2c bus.
+ Expects a claimed pdev. */
+static void w9966_i2c_setsda(struct w9966 *cam, int state)
{
- if (cam->dev_state != 0)
- return -1;
+ if (state)
+ cam->i2c_state |= W9966_I2C_W_DATA;
+ else
+ cam->i2c_state &= ~W9966_I2C_W_DATA;
- cam->pport = port;
- cam->brightness = 128;
- cam->contrast = 64;
- cam->color = 64;
- cam->hue = 0;
+ w9966_write_reg(cam, 0x18, cam->i2c_state);
+ udelay(5);
+}
-// Select requested transfer mode
- switch(parmode)
- {
- default: // Auto-detect (priority: hw-ecp, hw-epp, sw-ecp)
- case 0:
- if (port->modes & PARPORT_MODE_ECP)
- cam->ppmode = IEEE1284_MODE_ECP;
- else if (port->modes & PARPORT_MODE_EPP)
- cam->ppmode = IEEE1284_MODE_EPP;
- else
- cam->ppmode = IEEE1284_MODE_ECP;
- break;
- case 1: // hw- or sw-ecp
- cam->ppmode = IEEE1284_MODE_ECP;
- break;
- case 2: // hw- or sw-epp
- cam->ppmode = IEEE1284_MODE_EPP;
- break;
+/* Get peripheral clock line
+ Expects a claimed pdev. */
+static int w9966_i2c_getscl(struct w9966 *cam)
+{
+ const unsigned char state = w9966_read_reg(cam, 0x18);
+ return ((state & W9966_I2C_R_CLOCK) > 0);
+}
+
+/* Sets the clock line on the i2c bus.
+ Expects a claimed pdev. -1 on error */
+static int w9966_i2c_setscl(struct w9966 *cam, int state)
+{
+ unsigned long timeout;
+
+ if (state)
+ cam->i2c_state |= W9966_I2C_W_CLOCK;
+ else
+ cam->i2c_state &= ~W9966_I2C_W_CLOCK;
+
+ w9966_write_reg(cam, 0x18, cam->i2c_state);
+ udelay(5);
+
+ /* we go to high, we also expect the peripheral to ack. */
+ if (state) {
+ timeout = jiffies + 100;
+ while (!w9966_i2c_getscl(cam)) {
+ if (time_after(jiffies, timeout))
+ return -1;
+ }
}
+ return 0;
+}
-// Tell the parport driver that we exists
- cam->pdev = parport_register_device(port, "w9966", NULL, NULL, NULL, 0, NULL);
- if (cam->pdev == NULL) {
- DPRINTF("parport_register_device() failed\n");
- return -1;
+#if 0
+/* Get peripheral data line
+ Expects a claimed pdev. */
+static int w9966_i2c_getsda(struct w9966 *cam)
+{
+ const unsigned char state = w9966_read_reg(cam, 0x18);
+ return ((state & W9966_I2C_R_DATA) > 0);
+}
+#endif
+
+/* Write a byte with ack to the i2c bus.
+ Expects a claimed pdev. -1 on error */
+static int w9966_i2c_wbyte(struct w9966 *cam, int data)
+{
+ int i;
+
+ for (i = 7; i >= 0; i--) {
+ w9966_i2c_setsda(cam, (data >> i) & 0x01);
+
+ if (w9966_i2c_setscl(cam, 1) == -1)
+ return -1;
+ w9966_i2c_setscl(cam, 0);
}
- w9966_setState(cam, W9966_STATE_PDEV, W9966_STATE_PDEV);
- w9966_pdev_claim(cam);
+ w9966_i2c_setsda(cam, 1);
-// Setup a default capture mode
- if (w9966_setup(cam, 0, 0, 1023, 1023, 200, 160) != 0) {
- DPRINTF("w9966_setup() failed.\n");
+ if (w9966_i2c_setscl(cam, 1) == -1)
return -1;
+ w9966_i2c_setscl(cam, 0);
+
+ return 0;
+}
+
+/* Read a data byte with ack from the i2c-bus
+ Expects a claimed pdev. -1 on error */
+#if 0
+static int w9966_i2c_rbyte(struct w9966 *cam)
+{
+ unsigned char data = 0x00;
+ int i;
+
+ w9966_i2c_setsda(cam, 1);
+
+ for (i = 0; i < 8; i++) {
+ if (w9966_i2c_setscl(cam, 1) == -1)
+ return -1;
+ data = data << 1;
+ if (w9966_i2c_getsda(cam))
+ data |= 0x01;
+
+ w9966_i2c_setscl(cam, 0);
}
+ return data;
+}
+#endif
- w9966_pdev_release(cam);
+/* Read a register from the i2c device.
+ Expects claimed pdev. -1 on error */
+#if 0
+static int w9966_read_reg_i2c(struct w9966 *cam, int reg)
+{
+ int data;
-// Fill in the video_device struct and register us to v4l
- memcpy(&cam->vdev, &w9966_template, sizeof(struct video_device));
- video_set_drvdata(&cam->vdev, cam);
+ w9966_i2c_setsda(cam, 0);
+ w9966_i2c_setscl(cam, 0);
- if (video_register_device(&cam->vdev, VFL_TYPE_GRABBER, video_nr) < 0)
+ if (w9966_i2c_wbyte(cam, W9966_I2C_W_ID) == -1 ||
+ w9966_i2c_wbyte(cam, reg) == -1)
+ return -1;
+
+ w9966_i2c_setsda(cam, 1);
+ if (w9966_i2c_setscl(cam, 1) == -1)
return -1;
+ w9966_i2c_setsda(cam, 0);
+ w9966_i2c_setscl(cam, 0);
- w9966_setState(cam, W9966_STATE_VDEV, W9966_STATE_VDEV);
+ if (w9966_i2c_wbyte(cam, W9966_I2C_R_ID) == -1)
+ return -1;
+ data = w9966_i2c_rbyte(cam);
+ if (data == -1)
+ return -1;
- // All ok
- printk(
- "w9966cf: Found and initialized a webcam on %s.\n",
- cam->pport->name
- );
- return 0;
-}
+ w9966_i2c_setsda(cam, 0);
+ if (w9966_i2c_setscl(cam, 1) == -1)
+ return -1;
+ w9966_i2c_setsda(cam, 1);
-// Terminate everything gracefully
-static void w9966_term(struct w9966_dev* cam)
+ return data;
+}
+#endif
+
+/* Write a register to the i2c device.
+ Expects claimed pdev. -1 on error */
+static int w9966_write_reg_i2c(struct w9966 *cam, int reg, int data)
{
-// Unregister from v4l
- if (w9966_getState(cam, W9966_STATE_VDEV, W9966_STATE_VDEV)) {
- video_unregister_device(&cam->vdev);
- w9966_setState(cam, W9966_STATE_VDEV, 0);
- }
+ w9966_i2c_setsda(cam, 0);
+ w9966_i2c_setscl(cam, 0);
-// Terminate from IEEE1284 mode and release pdev block
- if (w9966_getState(cam, W9966_STATE_PDEV, W9966_STATE_PDEV)) {
- w9966_pdev_claim(cam);
- parport_negotiate(cam->pport, IEEE1284_MODE_COMPAT);
- w9966_pdev_release(cam);
- }
+ if (w9966_i2c_wbyte(cam, W9966_I2C_W_ID) == -1 ||
+ w9966_i2c_wbyte(cam, reg) == -1 ||
+ w9966_i2c_wbyte(cam, data) == -1)
+ return -1;
-// Unregister from parport
- if (w9966_getState(cam, W9966_STATE_PDEV, W9966_STATE_PDEV)) {
- parport_unregister_device(cam->pdev);
- w9966_setState(cam, W9966_STATE_PDEV, 0);
- }
-}
+ w9966_i2c_setsda(cam, 0);
+ if (w9966_i2c_setscl(cam, 1) == -1)
+ return -1;
+
+ w9966_i2c_setsda(cam, 1);
+ return 0;
+}
-// Find a good length for capture window (used both for W and H)
-// A bit ugly but pretty functional. The capture length
-// have to match the downscale
+/* Find a good length for capture window (used both for W and H)
+ A bit ugly but pretty functional. The capture length
+ have to match the downscale */
static int w9966_findlen(int near, int size, int maxlen)
{
int bestlen = size;
int besterr = abs(near - bestlen);
int len;
- for(len = size+1;len < maxlen;len++)
- {
+ for (len = size + 1; len < maxlen; len++) {
int err;
- if ( ((64*size) %len) != 0)
+ if (((64 * size) % len) != 0)
continue;
err = abs(near - len);
- // Only continue as long as we keep getting better values
+ /* Only continue as long as we keep getting better values */
if (err > besterr)
break;
@@ -414,32 +416,32 @@ static int w9966_findlen(int near, int size, int maxlen)
return bestlen;
}
-// Modify capture window (if necessary)
-// and calculate downscaling
-// Return -1 on error
-static int w9966_calcscale(int size, int min, int max, int* beg, int* end, unsigned char* factor)
+/* Modify capture window (if necessary)
+ and calculate downscaling
+ Return -1 on error */
+static int w9966_calcscale(int size, int min, int max, int *beg, int *end, unsigned char *factor)
{
int maxlen = max - min;
int len = *end - *beg + 1;
int newlen = w9966_findlen(len, size, maxlen);
int err = newlen - len;
- // Check for bad format
+ /* Check for bad format */
if (newlen > maxlen || newlen < size)
return -1;
- // Set factor (6 bit fixed)
- *factor = (64*size) / newlen;
+ /* Set factor (6 bit fixed) */
+ *factor = (64 * size) / newlen;
if (*factor == 64)
- *factor = 0x00; // downscale is disabled
+ *factor = 0x00; /* downscale is disabled */
else
- *factor |= 0x80; // set downscale-enable bit
+ *factor |= 0x80; /* set downscale-enable bit */
- // Modify old beginning and end
+ /* Modify old beginning and end */
*beg -= err / 2;
*end += err - (err / 2);
- // Move window if outside borders
+ /* Move window if outside borders */
if (*beg < min) {
*end += min - *beg;
*beg += min - *beg;
@@ -452,10 +454,10 @@ static int w9966_calcscale(int size, int min, int max, int* beg, int* end, unsig
return 0;
}
-// Setup the cameras capture window etc.
-// Expects a claimed pdev
-// return -1 on error
-static int w9966_setup(struct w9966_dev* cam, int x1, int y1, int x2, int y2, int w, int h)
+/* Setup the cameras capture window etc.
+ Expects a claimed pdev
+ return -1 on error */
+static int w9966_setup(struct w9966 *cam, int x1, int y1, int x2, int y2, int w, int h)
{
unsigned int i;
unsigned int enh_s, enh_e;
@@ -469,443 +471,314 @@ static int w9966_setup(struct w9966_dev* cam, int x1, int y1, int x2, int y2, in
};
- if (w*h*2 > W9966_SRAMSIZE)
- {
+ if (w * h * 2 > W9966_SRAMSIZE) {
DPRINTF("capture window exceeds SRAM size!.\n");
- w = 200; h = 160; // Pick default values
+ w = 200; h = 160; /* Pick default values */
}
w &= ~0x1;
- if (w < 2) w = 2;
- if (h < 1) h = 1;
- if (w > W9966_WND_MAX_W) w = W9966_WND_MAX_W;
- if (h > W9966_WND_MAX_H) h = W9966_WND_MAX_H;
+ if (w < 2)
+ w = 2;
+ if (h < 1)
+ h = 1;
+ if (w > W9966_WND_MAX_W)
+ w = W9966_WND_MAX_W;
+ if (h > W9966_WND_MAX_H)
+ h = W9966_WND_MAX_H;
cam->width = w;
cam->height = h;
enh_s = 0;
- enh_e = w*h*2;
-
-// Modify capture window if necessary and calculate downscaling
- if (
- w9966_calcscale(w, W9966_WND_MIN_X, W9966_WND_MAX_X, &x1, &x2, &scale_x) != 0 ||
- w9966_calcscale(h, W9966_WND_MIN_Y, W9966_WND_MAX_Y, &y1, &y2, &scale_y) != 0
- ) return -1;
-
- DPRINTF(
- "%dx%d, x: %d<->%d, y: %d<->%d, sx: %d/64, sy: %d/64.\n",
- w, h, x1, x2, y1, y2, scale_x&~0x80, scale_y&~0x80
- );
-
-// Setup registers
- regs[0x00] = 0x00; // Set normal operation
- regs[0x01] = 0x18; // Capture mode
- regs[0x02] = scale_y; // V-scaling
- regs[0x03] = scale_x; // H-scaling
-
- // Capture window
- regs[0x04] = (x1 & 0x0ff); // X-start (8 low bits)
- regs[0x05] = (x1 & 0x300)>>8; // X-start (2 high bits)
- regs[0x06] = (y1 & 0x0ff); // Y-start (8 low bits)
- regs[0x07] = (y1 & 0x300)>>8; // Y-start (2 high bits)
- regs[0x08] = (x2 & 0x0ff); // X-end (8 low bits)
- regs[0x09] = (x2 & 0x300)>>8; // X-end (2 high bits)
- regs[0x0a] = (y2 & 0x0ff); // Y-end (8 low bits)
-
- regs[0x0c] = W9966_SRAMID; // SRAM-banks (1x 128kb)
-
- // Enhancement layer
- regs[0x0d] = (enh_s& 0x000ff); // Enh. start (0-7)
- regs[0x0e] = (enh_s& 0x0ff00)>>8; // Enh. start (8-15)
- regs[0x0f] = (enh_s& 0x70000)>>16; // Enh. start (16-17/18??)
- regs[0x10] = (enh_e& 0x000ff); // Enh. end (0-7)
- regs[0x11] = (enh_e& 0x0ff00)>>8; // Enh. end (8-15)
- regs[0x12] = (enh_e& 0x70000)>>16; // Enh. end (16-17/18??)
-
- // Misc
- regs[0x13] = 0x40; // VEE control (raw 4:2:2)
- regs[0x17] = 0x00; // ???
- regs[0x18] = cam->i2c_state = 0x00; // Serial bus
- regs[0x19] = 0xff; // I/O port direction control
- regs[0x1a] = 0xff; // I/O port data register
- regs[0x1b] = 0x10; // ???
-
- // SAA7111 chip settings
+ enh_e = w * h * 2;
+
+ /* Modify capture window if necessary and calculate downscaling */
+ if (w9966_calcscale(w, W9966_WND_MIN_X, W9966_WND_MAX_X, &x1, &x2, &scale_x) != 0 ||
+ w9966_calcscale(h, W9966_WND_MIN_Y, W9966_WND_MAX_Y, &y1, &y2, &scale_y) != 0)
+ return -1;
+
+ DPRINTF("%dx%d, x: %d<->%d, y: %d<->%d, sx: %d/64, sy: %d/64.\n",
+ w, h, x1, x2, y1, y2, scale_x & ~0x80, scale_y & ~0x80);
+
+ /* Setup registers */
+ regs[0x00] = 0x00; /* Set normal operation */
+ regs[0x01] = 0x18; /* Capture mode */
+ regs[0x02] = scale_y; /* V-scaling */
+ regs[0x03] = scale_x; /* H-scaling */
+
+ /* Capture window */
+ regs[0x04] = (x1 & 0x0ff); /* X-start (8 low bits) */
+ regs[0x05] = (x1 & 0x300)>>8; /* X-start (2 high bits) */
+ regs[0x06] = (y1 & 0x0ff); /* Y-start (8 low bits) */
+ regs[0x07] = (y1 & 0x300)>>8; /* Y-start (2 high bits) */
+ regs[0x08] = (x2 & 0x0ff); /* X-end (8 low bits) */
+ regs[0x09] = (x2 & 0x300)>>8; /* X-end (2 high bits) */
+ regs[0x0a] = (y2 & 0x0ff); /* Y-end (8 low bits) */
+
+ regs[0x0c] = W9966_SRAMID; /* SRAM-banks (1x 128kb) */
+
+ /* Enhancement layer */
+ regs[0x0d] = (enh_s & 0x000ff); /* Enh. start (0-7) */
+ regs[0x0e] = (enh_s & 0x0ff00) >> 8; /* Enh. start (8-15) */
+ regs[0x0f] = (enh_s & 0x70000) >> 16; /* Enh. start (16-17/18??) */
+ regs[0x10] = (enh_e & 0x000ff); /* Enh. end (0-7) */
+ regs[0x11] = (enh_e & 0x0ff00) >> 8; /* Enh. end (8-15) */
+ regs[0x12] = (enh_e & 0x70000) >> 16; /* Enh. end (16-17/18??) */
+
+ /* Misc */
+ regs[0x13] = 0x40; /* VEE control (raw 4:2:2) */
+ regs[0x17] = 0x00; /* ??? */
+ regs[0x18] = cam->i2c_state = 0x00; /* Serial bus */
+ regs[0x19] = 0xff; /* I/O port direction control */
+ regs[0x1a] = 0xff; /* I/O port data register */
+ regs[0x1b] = 0x10; /* ??? */
+
+ /* SAA7111 chip settings */
saa7111_regs[0x0a] = cam->brightness;
saa7111_regs[0x0b] = cam->contrast;
saa7111_regs[0x0c] = cam->color;
saa7111_regs[0x0d] = cam->hue;
-// Reset (ECP-fifo & serial-bus)
- if (w9966_wReg(cam, 0x00, 0x03) == -1)
+ /* Reset (ECP-fifo & serial-bus) */
+ if (w9966_write_reg(cam, 0x00, 0x03) == -1)
return -1;
-// Write regs to w9966cf chip
+ /* Write regs to w9966cf chip */
for (i = 0; i < 0x1c; i++)
- if (w9966_wReg(cam, i, regs[i]) == -1)
+ if (w9966_write_reg(cam, i, regs[i]) == -1)
return -1;
-// Write regs to saa7111 chip
+ /* Write regs to saa7111 chip */
for (i = 0; i < 0x20; i++)
- if (w9966_wReg_i2c(cam, i, saa7111_regs[i]) == -1)
+ if (w9966_write_reg_i2c(cam, i, saa7111_regs[i]) == -1)
return -1;
return 0;
}
/*
- * Ugly and primitive i2c protocol functions
+ * Video4linux interfacing
*/
-// Sets the data line on the i2c bus.
-// Expects a claimed pdev.
-static inline void w9966_i2c_setsda(struct w9966_dev* cam, int state)
+static int cam_querycap(struct file *file, void *priv,
+ struct v4l2_capability *vcap)
{
- if (state)
- cam->i2c_state |= W9966_I2C_W_DATA;
- else
- cam->i2c_state &= ~W9966_I2C_W_DATA;
+ struct w9966 *cam = video_drvdata(file);
- w9966_wReg(cam, 0x18, cam->i2c_state);
- udelay(5);
+ strlcpy(vcap->driver, cam->v4l2_dev.name, sizeof(vcap->driver));
+ strlcpy(vcap->card, W9966_DRIVERNAME, sizeof(vcap->card));
+ strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info));
+ vcap->version = KERNEL_VERSION(0, 33, 0);
+ vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ return 0;
}
-// Get peripheral clock line
-// Expects a claimed pdev.
-static inline int w9966_i2c_getscl(struct w9966_dev* cam)
+static int cam_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
{
- const unsigned char state = w9966_rReg(cam, 0x18);
- return ((state & W9966_I2C_R_CLOCK) > 0);
+ if (vin->index > 0)
+ return -EINVAL;
+ strlcpy(vin->name, "Camera", sizeof(vin->name));
+ vin->type = V4L2_INPUT_TYPE_CAMERA;
+ vin->audioset = 0;
+ vin->tuner = 0;
+ vin->std = 0;
+ vin->status = 0;
+ return 0;
}
-// Sets the clock line on the i2c bus.
-// Expects a claimed pdev. -1 on error
-static inline int w9966_i2c_setscl(struct w9966_dev* cam, int state)
+static int cam_g_input(struct file *file, void *fh, unsigned int *inp)
{
- unsigned long timeout;
-
- if (state)
- cam->i2c_state |= W9966_I2C_W_CLOCK;
- else
- cam->i2c_state &= ~W9966_I2C_W_CLOCK;
-
- w9966_wReg(cam, 0x18, cam->i2c_state);
- udelay(5);
-
- // we go to high, we also expect the peripheral to ack.
- if (state) {
- timeout = jiffies + 100;
- while (!w9966_i2c_getscl(cam)) {
- if (time_after(jiffies, timeout))
- return -1;
- }
- }
+ *inp = 0;
return 0;
}
-// Get peripheral data line
-// Expects a claimed pdev.
-static inline int w9966_i2c_getsda(struct w9966_dev* cam)
+static int cam_s_input(struct file *file, void *fh, unsigned int inp)
{
- const unsigned char state = w9966_rReg(cam, 0x18);
- return ((state & W9966_I2C_R_DATA) > 0);
+ return (inp > 0) ? -EINVAL : 0;
}
-// Write a byte with ack to the i2c bus.
-// Expects a claimed pdev. -1 on error
-static int w9966_i2c_wbyte(struct w9966_dev* cam, int data)
+static int cam_queryctrl(struct file *file, void *priv,
+ struct v4l2_queryctrl *qc)
{
- int i;
- for (i = 7; i >= 0; i--)
- {
- w9966_i2c_setsda(cam, (data >> i) & 0x01);
-
- if (w9966_i2c_setscl(cam, 1) == -1)
- return -1;
- w9966_i2c_setscl(cam, 0);
+ switch (qc->id) {
+ case V4L2_CID_BRIGHTNESS:
+ return v4l2_ctrl_query_fill(qc, 0, 255, 1, 128);
+ case V4L2_CID_CONTRAST:
+ return v4l2_ctrl_query_fill(qc, -64, 64, 1, 64);
+ case V4L2_CID_SATURATION:
+ return v4l2_ctrl_query_fill(qc, -64, 64, 1, 64);
+ case V4L2_CID_HUE:
+ return v4l2_ctrl_query_fill(qc, -128, 127, 1, 0);
}
-
- w9966_i2c_setsda(cam, 1);
-
- if (w9966_i2c_setscl(cam, 1) == -1)
- return -1;
- w9966_i2c_setscl(cam, 0);
-
- return 0;
+ return -EINVAL;
}
-// Read a data byte with ack from the i2c-bus
-// Expects a claimed pdev. -1 on error
-#if 0
-static int w9966_i2c_rbyte(struct w9966_dev* cam)
+static int cam_g_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
{
- unsigned char data = 0x00;
- int i;
-
- w9966_i2c_setsda(cam, 1);
+ struct w9966 *cam = video_drvdata(file);
+ int ret = 0;
- for (i = 0; i < 8; i++)
- {
- if (w9966_i2c_setscl(cam, 1) == -1)
- return -1;
- data = data << 1;
- if (w9966_i2c_getsda(cam))
- data |= 0x01;
-
- w9966_i2c_setscl(cam, 0);
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ ctrl->value = cam->brightness;
+ break;
+ case V4L2_CID_CONTRAST:
+ ctrl->value = cam->contrast;
+ break;
+ case V4L2_CID_SATURATION:
+ ctrl->value = cam->color;
+ break;
+ case V4L2_CID_HUE:
+ ctrl->value = cam->hue;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
- return data;
+ return ret;
}
-#endif
-// Read a register from the i2c device.
-// Expects claimed pdev. -1 on error
-#if 0
-static int w9966_rReg_i2c(struct w9966_dev* cam, int reg)
+static int cam_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
{
- int data;
-
- w9966_i2c_setsda(cam, 0);
- w9966_i2c_setscl(cam, 0);
-
- if (
- w9966_i2c_wbyte(cam, W9966_I2C_W_ID) == -1 ||
- w9966_i2c_wbyte(cam, reg) == -1
- )
- return -1;
-
- w9966_i2c_setsda(cam, 1);
- if (w9966_i2c_setscl(cam, 1) == -1)
- return -1;
- w9966_i2c_setsda(cam, 0);
- w9966_i2c_setscl(cam, 0);
+ struct w9966 *cam = video_drvdata(file);
+ int ret = 0;
- if (
- w9966_i2c_wbyte(cam, W9966_I2C_R_ID) == -1 ||
- (data = w9966_i2c_rbyte(cam)) == -1
- )
- return -1;
+ mutex_lock(&cam->lock);
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ cam->brightness = ctrl->value;
+ break;
+ case V4L2_CID_CONTRAST:
+ cam->contrast = ctrl->value;
+ break;
+ case V4L2_CID_SATURATION:
+ cam->color = ctrl->value;
+ break;
+ case V4L2_CID_HUE:
+ cam->hue = ctrl->value;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
- w9966_i2c_setsda(cam, 0);
+ if (ret == 0) {
+ w9966_pdev_claim(cam);
- if (w9966_i2c_setscl(cam, 1) == -1)
- return -1;
- w9966_i2c_setsda(cam, 1);
+ if (w9966_write_reg_i2c(cam, 0x0a, cam->brightness) == -1 ||
+ w9966_write_reg_i2c(cam, 0x0b, cam->contrast) == -1 ||
+ w9966_write_reg_i2c(cam, 0x0c, cam->color) == -1 ||
+ w9966_write_reg_i2c(cam, 0x0d, cam->hue) == -1) {
+ ret = -EIO;
+ }
- return data;
+ w9966_pdev_release(cam);
+ }
+ mutex_unlock(&cam->lock);
+ return ret;
}
-#endif
-// Write a register to the i2c device.
-// Expects claimed pdev. -1 on error
-static int w9966_wReg_i2c(struct w9966_dev* cam, int reg, int data)
+static int cam_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
- w9966_i2c_setsda(cam, 0);
- w9966_i2c_setscl(cam, 0);
-
- if (
- w9966_i2c_wbyte(cam, W9966_I2C_W_ID) == -1 ||
- w9966_i2c_wbyte(cam, reg) == -1 ||
- w9966_i2c_wbyte(cam, data) == -1
- )
- return -1;
-
- w9966_i2c_setsda(cam, 0);
- if (w9966_i2c_setscl(cam, 1) == -1)
- return -1;
-
- w9966_i2c_setsda(cam, 1);
-
+ struct w9966 *cam = video_drvdata(file);
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+
+ pix->width = cam->width;
+ pix->height = cam->height;
+ pix->pixelformat = V4L2_PIX_FMT_YUYV;
+ pix->field = V4L2_FIELD_NONE;
+ pix->bytesperline = 2 * cam->width;
+ pix->sizeimage = 2 * cam->width * cam->height;
+ /* Just a guess */
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;
}
-/*
- * Video4linux interfacing
- */
-
-static long w9966_v4l_do_ioctl(struct file *file, unsigned int cmd, void *arg)
-{
- struct w9966_dev *cam = video_drvdata(file);
-
- switch(cmd)
- {
- case VIDIOCGCAP:
- {
- static struct video_capability vcap = {
- .name = W9966_DRIVERNAME,
- .type = VID_TYPE_CAPTURE | VID_TYPE_SCALES,
- .channels = 1,
- .maxwidth = W9966_WND_MAX_W,
- .maxheight = W9966_WND_MAX_H,
- .minwidth = 2,
- .minheight = 1,
- };
- struct video_capability *cap = arg;
- *cap = vcap;
- return 0;
- }
- case VIDIOCGCHAN:
- {
- struct video_channel *vch = arg;
- if(vch->channel != 0) // We only support one channel (#0)
- return -EINVAL;
- memset(vch,0,sizeof(*vch));
- strcpy(vch->name, "CCD-input");
- vch->type = VIDEO_TYPE_CAMERA;
- return 0;
- }
- case VIDIOCSCHAN:
- {
- struct video_channel *vch = arg;
- if(vch->channel != 0)
- return -EINVAL;
- return 0;
- }
- case VIDIOCGTUNER:
- {
- struct video_tuner *vtune = arg;
- if(vtune->tuner != 0)
- return -EINVAL;
- strcpy(vtune->name, "no tuner");
- vtune->rangelow = 0;
- vtune->rangehigh = 0;
- vtune->flags = VIDEO_TUNER_NORM;
- vtune->mode = VIDEO_MODE_AUTO;
- vtune->signal = 0xffff;
- return 0;
- }
- case VIDIOCSTUNER:
- {
- struct video_tuner *vtune = arg;
- if (vtune->tuner != 0)
- return -EINVAL;
- if (vtune->mode != VIDEO_MODE_AUTO)
- return -EINVAL;
- return 0;
- }
- case VIDIOCGPICT:
- {
- struct video_picture vpic = {
- cam->brightness << 8, // brightness
- (cam->hue + 128) << 8, // hue
- cam->color << 9, // color
- cam->contrast << 9, // contrast
- 0x8000, // whiteness
- 16, VIDEO_PALETTE_YUV422// bpp, palette format
- };
- struct video_picture *pic = arg;
- *pic = vpic;
- return 0;
- }
- case VIDIOCSPICT:
- {
- struct video_picture *vpic = arg;
- if (vpic->depth != 16 || (vpic->palette != VIDEO_PALETTE_YUV422 && vpic->palette != VIDEO_PALETTE_YUYV))
- return -EINVAL;
-
- cam->brightness = vpic->brightness >> 8;
- cam->hue = (vpic->hue >> 8) - 128;
- cam->color = vpic->colour >> 9;
- cam->contrast = vpic->contrast >> 9;
+static int cam_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+
+ if (pix->width < 2)
+ pix->width = 2;
+ if (pix->height < 1)
+ pix->height = 1;
+ if (pix->width > W9966_WND_MAX_W)
+ pix->width = W9966_WND_MAX_W;
+ if (pix->height > W9966_WND_MAX_H)
+ pix->height = W9966_WND_MAX_H;
+ pix->pixelformat = V4L2_PIX_FMT_YUYV;
+ pix->field = V4L2_FIELD_NONE;
+ pix->bytesperline = 2 * pix->width;
+ pix->sizeimage = 2 * pix->width * pix->height;
+ /* Just a guess */
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ return 0;
+}
- w9966_pdev_claim(cam);
+static int cam_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct w9966 *cam = video_drvdata(file);
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+ int ret = cam_try_fmt_vid_cap(file, fh, fmt);
- if (
- w9966_wReg_i2c(cam, 0x0a, cam->brightness) == -1 ||
- w9966_wReg_i2c(cam, 0x0b, cam->contrast) == -1 ||
- w9966_wReg_i2c(cam, 0x0c, cam->color) == -1 ||
- w9966_wReg_i2c(cam, 0x0d, cam->hue) == -1
- ) {
- w9966_pdev_release(cam);
- return -EIO;
- }
+ if (ret)
+ return ret;
- w9966_pdev_release(cam);
- return 0;
- }
- case VIDIOCSWIN:
- {
- int ret;
- struct video_window *vwin = arg;
-
- if (vwin->flags != 0)
- return -EINVAL;
- if (vwin->clipcount != 0)
- return -EINVAL;
- if (vwin->width < 2 || vwin->width > W9966_WND_MAX_W)
- return -EINVAL;
- if (vwin->height < 1 || vwin->height > W9966_WND_MAX_H)
- return -EINVAL;
-
- // Update camera regs
- w9966_pdev_claim(cam);
- ret = w9966_setup(cam, 0, 0, 1023, 1023, vwin->width, vwin->height);
- w9966_pdev_release(cam);
+ mutex_lock(&cam->lock);
+ /* Update camera regs */
+ w9966_pdev_claim(cam);
+ ret = w9966_setup(cam, 0, 0, 1023, 1023, pix->width, pix->height);
+ w9966_pdev_release(cam);
+ mutex_unlock(&cam->lock);
+ return ret;
+}
- if (ret != 0) {
- DPRINTF("VIDIOCSWIN: w9966_setup() failed.\n");
- return -EIO;
- }
+static int cam_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *fmt)
+{
+ static struct v4l2_fmtdesc formats[] = {
+ { 0, 0, 0,
+ "YUV 4:2:2", V4L2_PIX_FMT_YUYV,
+ { 0, 0, 0, 0 }
+ },
+ };
+ enum v4l2_buf_type type = fmt->type;
- return 0;
- }
- case VIDIOCGWIN:
- {
- struct video_window *vwin = arg;
- memset(vwin, 0, sizeof(*vwin));
- vwin->width = cam->width;
- vwin->height = cam->height;
- return 0;
- }
- // Unimplemented
- case VIDIOCCAPTURE:
- case VIDIOCGFBUF:
- case VIDIOCSFBUF:
- case VIDIOCKEY:
- case VIDIOCGFREQ:
- case VIDIOCSFREQ:
- case VIDIOCGAUDIO:
- case VIDIOCSAUDIO:
+ if (fmt->index > 0)
return -EINVAL;
- default:
- return -ENOIOCTLCMD;
- }
- return 0;
-}
-static long w9966_v4l_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- return video_usercopy(file, cmd, arg, w9966_v4l_do_ioctl);
+ *fmt = formats[fmt->index];
+ fmt->type = type;
+ return 0;
}
-// Capture data
+/* Capture data */
static ssize_t w9966_v4l_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
- struct w9966_dev *cam = video_drvdata(file);
- unsigned char addr = 0xa0; // ECP, read, CCD-transfer, 00000
+ struct w9966 *cam = video_drvdata(file);
+ unsigned char addr = 0xa0; /* ECP, read, CCD-transfer, 00000 */
unsigned char __user *dest = (unsigned char __user *)buf;
unsigned long dleft = count;
unsigned char *tbuf;
- // Why would anyone want more than this??
+ /* Why would anyone want more than this?? */
if (count > cam->width * cam->height * 2)
return -EINVAL;
+ mutex_lock(&cam->lock);
w9966_pdev_claim(cam);
- w9966_wReg(cam, 0x00, 0x02); // Reset ECP-FIFO buffer
- w9966_wReg(cam, 0x00, 0x00); // Return to normal operation
- w9966_wReg(cam, 0x01, 0x98); // Enable capture
-
- // write special capture-addr and negotiate into data transfer
- if (
- (parport_negotiate(cam->pport, cam->ppmode|IEEE1284_ADDR) != 0 )||
- (parport_write(cam->pport, &addr, 1) != 1 )||
- (parport_negotiate(cam->pport, cam->ppmode|IEEE1284_DATA) != 0 )
- ) {
+ w9966_write_reg(cam, 0x00, 0x02); /* Reset ECP-FIFO buffer */
+ w9966_write_reg(cam, 0x00, 0x00); /* Return to normal operation */
+ w9966_write_reg(cam, 0x01, 0x98); /* Enable capture */
+
+ /* write special capture-addr and negotiate into data transfer */
+ if ((parport_negotiate(cam->pport, cam->ppmode|IEEE1284_ADDR) != 0) ||
+ (parport_write(cam->pport, &addr, 1) != 1) ||
+ (parport_negotiate(cam->pport, cam->ppmode|IEEE1284_DATA) != 0)) {
w9966_pdev_release(cam);
+ mutex_unlock(&cam->lock);
return -EFAULT;
}
@@ -915,8 +788,7 @@ static ssize_t w9966_v4l_read(struct file *file, char __user *buf,
goto out;
}
- while(dleft > 0)
- {
+ while (dleft > 0) {
unsigned long tsize = (dleft > W9966_RBUFFER) ? W9966_RBUFFER : dleft;
if (parport_read(cam->pport, tbuf, tsize) < tsize) {
@@ -931,43 +803,167 @@ static ssize_t w9966_v4l_read(struct file *file, char __user *buf,
dleft -= tsize;
}
- w9966_wReg(cam, 0x01, 0x18); // Disable capture
+ w9966_write_reg(cam, 0x01, 0x18); /* Disable capture */
out:
kfree(tbuf);
w9966_pdev_release(cam);
+ mutex_unlock(&cam->lock);
return count;
}
+static const struct v4l2_file_operations w9966_fops = {
+ .owner = THIS_MODULE,
+ .ioctl = video_ioctl2,
+ .read = w9966_v4l_read,
+};
+
+static const struct v4l2_ioctl_ops w9966_ioctl_ops = {
+ .vidioc_querycap = cam_querycap,
+ .vidioc_g_input = cam_g_input,
+ .vidioc_s_input = cam_s_input,
+ .vidioc_enum_input = cam_enum_input,
+ .vidioc_queryctrl = cam_queryctrl,
+ .vidioc_g_ctrl = cam_g_ctrl,
+ .vidioc_s_ctrl = cam_s_ctrl,
+ .vidioc_enum_fmt_vid_cap = cam_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cam_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = cam_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cam_try_fmt_vid_cap,
+};
+
-// Called once for every parport on init
+/* Initialize camera device. Setup all internal flags, set a
+ default video mode, setup ccd-chip, register v4l device etc..
+ Also used for 'probing' of hardware.
+ -1 on error */
+static int w9966_init(struct w9966 *cam, struct parport *port)
+{
+ struct v4l2_device *v4l2_dev = &cam->v4l2_dev;
+
+ if (cam->dev_state != 0)
+ return -1;
+
+ strlcpy(v4l2_dev->name, "w9966", sizeof(v4l2_dev->name));
+
+ if (v4l2_device_register(NULL, v4l2_dev) < 0) {
+ v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
+ return -1;
+ }
+ cam->pport = port;
+ cam->brightness = 128;
+ cam->contrast = 64;
+ cam->color = 64;
+ cam->hue = 0;
+
+ /* Select requested transfer mode */
+ switch (parmode) {
+ default: /* Auto-detect (priority: hw-ecp, hw-epp, sw-ecp) */
+ case 0:
+ if (port->modes & PARPORT_MODE_ECP)
+ cam->ppmode = IEEE1284_MODE_ECP;
+ else if (port->modes & PARPORT_MODE_EPP)
+ cam->ppmode = IEEE1284_MODE_EPP;
+ else
+ cam->ppmode = IEEE1284_MODE_ECP;
+ break;
+ case 1: /* hw- or sw-ecp */
+ cam->ppmode = IEEE1284_MODE_ECP;
+ break;
+ case 2: /* hw- or sw-epp */
+ cam->ppmode = IEEE1284_MODE_EPP;
+ break;
+ }
+
+ /* Tell the parport driver that we exists */
+ cam->pdev = parport_register_device(port, "w9966", NULL, NULL, NULL, 0, NULL);
+ if (cam->pdev == NULL) {
+ DPRINTF("parport_register_device() failed\n");
+ return -1;
+ }
+ w9966_set_state(cam, W9966_STATE_PDEV, W9966_STATE_PDEV);
+
+ w9966_pdev_claim(cam);
+
+ /* Setup a default capture mode */
+ if (w9966_setup(cam, 0, 0, 1023, 1023, 200, 160) != 0) {
+ DPRINTF("w9966_setup() failed.\n");
+ return -1;
+ }
+
+ w9966_pdev_release(cam);
+
+ /* Fill in the video_device struct and register us to v4l */
+ strlcpy(cam->vdev.name, W9966_DRIVERNAME, sizeof(cam->vdev.name));
+ cam->vdev.v4l2_dev = v4l2_dev;
+ cam->vdev.fops = &w9966_fops;
+ cam->vdev.ioctl_ops = &w9966_ioctl_ops;
+ cam->vdev.release = video_device_release_empty;
+ video_set_drvdata(&cam->vdev, cam);
+
+ mutex_init(&cam->lock);
+
+ if (video_register_device(&cam->vdev, VFL_TYPE_GRABBER, video_nr) < 0)
+ return -1;
+
+ w9966_set_state(cam, W9966_STATE_VDEV, W9966_STATE_VDEV);
+
+ /* All ok */
+ v4l2_info(v4l2_dev, "Found and initialized a webcam on %s.\n",
+ cam->pport->name);
+ return 0;
+}
+
+
+/* Terminate everything gracefully */
+static void w9966_term(struct w9966 *cam)
+{
+ /* Unregister from v4l */
+ if (w9966_get_state(cam, W9966_STATE_VDEV, W9966_STATE_VDEV)) {
+ video_unregister_device(&cam->vdev);
+ w9966_set_state(cam, W9966_STATE_VDEV, 0);
+ }
+
+ /* Terminate from IEEE1284 mode and release pdev block */
+ if (w9966_get_state(cam, W9966_STATE_PDEV, W9966_STATE_PDEV)) {
+ w9966_pdev_claim(cam);
+ parport_negotiate(cam->pport, IEEE1284_MODE_COMPAT);
+ w9966_pdev_release(cam);
+ }
+
+ /* Unregister from parport */
+ if (w9966_get_state(cam, W9966_STATE_PDEV, W9966_STATE_PDEV)) {
+ parport_unregister_device(cam->pdev);
+ w9966_set_state(cam, W9966_STATE_PDEV, 0);
+ }
+}
+
+
+/* Called once for every parport on init */
static void w9966_attach(struct parport *port)
{
int i;
- for (i = 0; i < W9966_MAXCAMS; i++)
- {
- if (w9966_cams[i].dev_state != 0) // Cam is already assigned
+ for (i = 0; i < W9966_MAXCAMS; i++) {
+ if (w9966_cams[i].dev_state != 0) /* Cam is already assigned */
continue;
- if (
- strcmp(pardev[i], "aggressive") == 0 ||
- strcmp(pardev[i], port->name) == 0
- ) {
+ if (strcmp(pardev[i], "aggressive") == 0 || strcmp(pardev[i], port->name) == 0) {
if (w9966_init(&w9966_cams[i], port) != 0)
- w9966_term(&w9966_cams[i]);
- break; // return
+ w9966_term(&w9966_cams[i]);
+ break; /* return */
}
}
}
-// Called once for every parport on termination
+/* Called once for every parport on termination */
static void w9966_detach(struct parport *port)
{
int i;
+
for (i = 0; i < W9966_MAXCAMS; i++)
- if (w9966_cams[i].dev_state != 0 && w9966_cams[i].pport == port)
- w9966_term(&w9966_cams[i]);
+ if (w9966_cams[i].dev_state != 0 && w9966_cams[i].pport == port)
+ w9966_term(&w9966_cams[i]);
}
@@ -977,17 +973,18 @@ static struct parport_driver w9966_ppd = {
.detach = w9966_detach,
};
-// Module entry point
+/* Module entry point */
static int __init w9966_mod_init(void)
{
int i;
+
for (i = 0; i < W9966_MAXCAMS; i++)
w9966_cams[i].dev_state = 0;
return parport_register_driver(&w9966_ppd);
}
-// Module cleanup
+/* Module cleanup */
static void __exit w9966_mod_term(void)
{
parport_unregister_driver(&w9966_ppd);
diff --git a/drivers/media/video/zc0301/zc0301_core.c b/drivers/media/video/zc0301/zc0301_core.c
index e44e4b5f3e5..bb51cfb0c64 100644
--- a/drivers/media/video/zc0301/zc0301_core.c
+++ b/drivers/media/video/zc0301/zc0301_core.c
@@ -1153,7 +1153,7 @@ zc0301_vidioc_s_ctrl(struct zc0301_device* cam, void __user * arg)
if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
return -EFAULT;
- for (i = 0; i < ARRAY_SIZE(s->qctrl); i++)
+ for (i = 0; i < ARRAY_SIZE(s->qctrl); i++) {
if (ctrl.id == s->qctrl[i].id) {
if (s->qctrl[i].flags & V4L2_CTRL_FLAG_DISABLED)
return -EINVAL;
@@ -1163,7 +1163,9 @@ zc0301_vidioc_s_ctrl(struct zc0301_device* cam, void __user * arg)
ctrl.value -= ctrl.value % s->qctrl[i].step;
break;
}
-
+ }
+ if (i == ARRAY_SIZE(s->qctrl))
+ return -EINVAL;
if ((err = s->set_ctrl(cam, &ctrl)))
return err;
diff --git a/drivers/media/video/zc0301/zc0301_sensor.h b/drivers/media/video/zc0301/zc0301_sensor.h
index 3a408de91b9..0be783c203f 100644
--- a/drivers/media/video/zc0301/zc0301_sensor.h
+++ b/drivers/media/video/zc0301/zc0301_sensor.h
@@ -58,7 +58,7 @@ zc0301_attach_sensor(struct zc0301_device* cam, struct zc0301_sensor* sensor);
.idProduct = (prod), \
.bInterfaceClass = (intclass)
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_ZC3XX && !defined CONFIG_USB_GSPCA_ZC3XX_MODULE
#define ZC0301_ID_TABLE \
static const struct usb_device_id zc0301_id_table[] = { \
{ ZC0301_USB_DEVICE(0x046d, 0x08ae, 0xff), }, /* PAS202 */ \
diff --git a/drivers/media/video/zoran/zoran.h b/drivers/media/video/zoran/zoran.h
index cb1de7ea197..8997add1248 100644
--- a/drivers/media/video/zoran/zoran.h
+++ b/drivers/media/video/zoran/zoran.h
@@ -33,6 +33,10 @@
#include <media/v4l2-device.h>
+#define ZORAN_VIDMODE_PAL 0
+#define ZORAN_VIDMODE_NTSC 1
+#define ZORAN_VIDMODE_SECAM 2
+
struct zoran_requestbuffers {
unsigned long count; /* Number of buffers for MJPEG grabbing */
unsigned long size; /* Size PER BUFFER in bytes */
@@ -48,7 +52,7 @@ struct zoran_sync {
struct zoran_status {
int input; /* Input channel, has to be set prior to BUZIOC_G_STATUS */
int signal; /* Returned: 1 if valid video signal detected */
- int norm; /* Returned: VIDEO_MODE_PAL or VIDEO_MODE_NTSC */
+ int norm; /* Returned: ZORAN_VIDMODE_PAL or ZORAN_VIDMODE_NTSC */
int color; /* Returned: 1 if color signal detected */
};
@@ -62,7 +66,7 @@ struct zoran_params {
/* Main control parameters */
int input; /* Input channel: 0 = Composite, 1 = S-VHS */
- int norm; /* Norm: VIDEO_MODE_PAL or VIDEO_MODE_NTSC */
+ int norm; /* Norm: ZORAN_VIDMODE_PAL or ZORAN_VIDMODE_NTSC */
int decimation; /* decimation of captured video,
* enlargement of video played back.
* Valid values are 1, 2, 4 or 0.
@@ -131,13 +135,13 @@ struct zoran_params {
/*
Private IOCTL to set up for displaying MJPEG
*/
-#define BUZIOC_G_PARAMS _IOR ('v', BASE_VIDIOCPRIVATE+0, struct zoran_params)
-#define BUZIOC_S_PARAMS _IOWR('v', BASE_VIDIOCPRIVATE+1, struct zoran_params)
-#define BUZIOC_REQBUFS _IOWR('v', BASE_VIDIOCPRIVATE+2, struct zoran_requestbuffers)
-#define BUZIOC_QBUF_CAPT _IOW ('v', BASE_VIDIOCPRIVATE+3, int)
-#define BUZIOC_QBUF_PLAY _IOW ('v', BASE_VIDIOCPRIVATE+4, int)
-#define BUZIOC_SYNC _IOR ('v', BASE_VIDIOCPRIVATE+5, struct zoran_sync)
-#define BUZIOC_G_STATUS _IOWR('v', BASE_VIDIOCPRIVATE+6, struct zoran_status)
+#define BUZIOC_G_PARAMS _IOR ('v', BASE_VIDIOC_PRIVATE+0, struct zoran_params)
+#define BUZIOC_S_PARAMS _IOWR('v', BASE_VIDIOC_PRIVATE+1, struct zoran_params)
+#define BUZIOC_REQBUFS _IOWR('v', BASE_VIDIOC_PRIVATE+2, struct zoran_requestbuffers)
+#define BUZIOC_QBUF_CAPT _IOW ('v', BASE_VIDIOC_PRIVATE+3, int)
+#define BUZIOC_QBUF_PLAY _IOW ('v', BASE_VIDIOC_PRIVATE+4, int)
+#define BUZIOC_SYNC _IOR ('v', BASE_VIDIOC_PRIVATE+5, struct zoran_sync)
+#define BUZIOC_G_STATUS _IOWR('v', BASE_VIDIOC_PRIVATE+6, struct zoran_status)
#ifdef __KERNEL__
@@ -401,7 +405,7 @@ struct zoran {
spinlock_t spinlock; /* Spinlock */
/* Video for Linux parameters */
- int input; /* card's norm and input - norm=VIDEO_MODE_* */
+ int input; /* card's norm and input */
v4l2_std_id norm;
/* Current buffer params */
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index ec41303544e..6f89d0a096e 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -60,7 +60,7 @@
#include <linux/spinlock.h>
-#include <linux/videodev.h>
+#include <linux/videodev2.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include "videocodec.h"
@@ -1549,11 +1549,11 @@ static long zoran_default(struct file *file, void *__fh, int cmd, void *arg)
mutex_lock(&zr->resource_lock);
if (zr->norm & V4L2_STD_NTSC)
- bparams->norm = VIDEO_MODE_NTSC;
- else if (zr->norm & V4L2_STD_PAL)
- bparams->norm = VIDEO_MODE_PAL;
+ bparams->norm = ZORAN_VIDMODE_NTSC;
+ else if (zr->norm & V4L2_STD_SECAM)
+ bparams->norm = ZORAN_VIDMODE_SECAM;
else
- bparams->norm = VIDEO_MODE_SECAM;
+ bparams->norm = ZORAN_VIDMODE_PAL;
bparams->input = zr->input;
@@ -1789,11 +1789,11 @@ gstat_unlock_and_return:
bstat->signal =
(status & V4L2_IN_ST_NO_SIGNAL) ? 0 : 1;
if (norm & V4L2_STD_NTSC)
- bstat->norm = VIDEO_MODE_NTSC;
+ bstat->norm = ZORAN_VIDMODE_NTSC;
else if (norm & V4L2_STD_SECAM)
- bstat->norm = VIDEO_MODE_SECAM;
+ bstat->norm = ZORAN_VIDMODE_SECAM;
else
- bstat->norm = VIDEO_MODE_PAL;
+ bstat->norm = ZORAN_VIDMODE_PAL;
bstat->color =
(status & V4L2_IN_ST_NO_COLOR) ? 0 : 1;
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
index 3d4bac25290..a82b5bd18d2 100644
--- a/drivers/media/video/zr364xx.c
+++ b/drivers/media/video/zr364xx.c
@@ -376,8 +376,8 @@ static int buffer_setup(struct videobuf_queue *vq, unsigned int *count,
if (*count == 0)
*count = ZR364XX_DEF_BUFS;
- while (*size * (*count) > ZR364XX_DEF_BUFS * 1024 * 1024)
- (*count)--;
+ if (*size * *count > ZR364XX_DEF_BUFS * 1024 * 1024)
+ *count = (ZR364XX_DEF_BUFS * 1024 * 1024) / *size;
return 0;
}
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 5382b5a44af..a6a57011ba6 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -5064,7 +5064,7 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
if (!timeleft) {
printk(KERN_DEBUG "%s: Issuing Reset from %s!!\n",
ioc->name, __func__);
- mpt_HardResetHandler(ioc, CAN_SLEEP);
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
mpt_free_msg_frame(ioc, mf);
}
goto out;
@@ -6456,10 +6456,15 @@ out:
issue_hard_reset = 0;
printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
ioc->name, __func__);
- mpt_HardResetHandler(ioc, CAN_SLEEP);
+ if (retry_count == 0) {
+ if (mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP) != 0)
+ retry_count++;
+ } else
+ mpt_HardResetHandler(ioc, CAN_SLEEP);
+
mpt_free_msg_frame(ioc, mf);
/* attempt one retry for a timed out command */
- if (!retry_count) {
+ if (retry_count < 2) {
printk(MYIOC_s_INFO_FMT
"Attempting Retry Config request"
" type 0x%x, page 0x%x,"
@@ -6904,6 +6909,172 @@ mpt_halt_firmware(MPT_ADAPTER *ioc)
}
EXPORT_SYMBOL(mpt_halt_firmware);
+/**
+ * mpt_SoftResetHandler - Issues a less expensive reset
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sleepFlag: Indicates if sleep or schedule must be called.
+
+ *
+ * Returns 0 for SUCCESS or -1 if FAILED.
+ *
+ * Message Unit Reset - instructs the IOC to reset the Reply Post and
+ * Free FIFO's. All the Message Frames on Reply Free FIFO are discarded.
+ * All posted buffers are freed, and event notification is turned off.
+ * IOC doesnt reply to any outstanding request. This will transfer IOC
+ * to READY state.
+ **/
+int
+mpt_SoftResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
+{
+ int rc;
+ int ii;
+ u8 cb_idx;
+ unsigned long flags;
+ u32 ioc_state;
+ unsigned long time_count;
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SoftResetHandler Entered!\n",
+ ioc->name));
+
+ ioc_state = mpt_GetIocState(ioc, 0) & MPI_IOC_STATE_MASK;
+
+ if (mpt_fwfault_debug)
+ mpt_halt_firmware(ioc);
+
+ if (ioc_state == MPI_IOC_STATE_FAULT ||
+ ioc_state == MPI_IOC_STATE_RESET) {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "skipping, either in FAULT or RESET state!\n", ioc->name));
+ return -1;
+ }
+
+ if (ioc->bus_type == FC) {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "skipping, because the bus type is FC!\n", ioc->name));
+ return -1;
+ }
+
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->ioc_reset_in_progress) {
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ return -1;
+ }
+ ioc->ioc_reset_in_progress = 1;
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+ rc = -1;
+
+ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
+ if (MptResetHandlers[cb_idx])
+ mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET);
+ }
+
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->taskmgmt_in_progress) {
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ return -1;
+ }
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ /* Disable reply interrupts (also blocks FreeQ) */
+ CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
+ ioc->active = 0;
+ time_count = jiffies;
+
+ rc = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag);
+
+ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
+ if (MptResetHandlers[cb_idx])
+ mpt_signal_reset(cb_idx, ioc, MPT_IOC_PRE_RESET);
+ }
+
+ if (rc)
+ goto out;
+
+ ioc_state = mpt_GetIocState(ioc, 0) & MPI_IOC_STATE_MASK;
+ if (ioc_state != MPI_IOC_STATE_READY)
+ goto out;
+
+ for (ii = 0; ii < 5; ii++) {
+ /* Get IOC facts! Allow 5 retries */
+ rc = GetIocFacts(ioc, sleepFlag,
+ MPT_HOSTEVENT_IOC_RECOVER);
+ if (rc == 0)
+ break;
+ if (sleepFlag == CAN_SLEEP)
+ msleep(100);
+ else
+ mdelay(100);
+ }
+ if (ii == 5)
+ goto out;
+
+ rc = PrimeIocFifos(ioc);
+ if (rc != 0)
+ goto out;
+
+ rc = SendIocInit(ioc, sleepFlag);
+ if (rc != 0)
+ goto out;
+
+ rc = SendEventNotification(ioc, 1, sleepFlag);
+ if (rc != 0)
+ goto out;
+
+ if (ioc->hard_resets < -1)
+ ioc->hard_resets++;
+
+ /*
+ * At this point, we know soft reset succeeded.
+ */
+
+ ioc->active = 1;
+ CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM);
+
+ out:
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ ioc->ioc_reset_in_progress = 0;
+ ioc->taskmgmt_quiesce_io = 0;
+ ioc->taskmgmt_in_progress = 0;
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+ if (ioc->active) { /* otherwise, hard reset coming */
+ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
+ if (MptResetHandlers[cb_idx])
+ mpt_signal_reset(cb_idx, ioc,
+ MPT_IOC_POST_RESET);
+ }
+ }
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "SoftResetHandler: completed (%d seconds): %s\n",
+ ioc->name, jiffies_to_msecs(jiffies - time_count)/1000,
+ ((rc == 0) ? "SUCCESS" : "FAILED")));
+
+ return rc;
+}
+
+/**
+ * mpt_Soft_Hard_ResetHandler - Try less expensive reset
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sleepFlag: Indicates if sleep or schedule must be called.
+
+ *
+ * Returns 0 for SUCCESS or -1 if FAILED.
+ * Try for softreset first, only if it fails go for expensive
+ * HardReset.
+ **/
+int
+mpt_Soft_Hard_ResetHandler(MPT_ADAPTER *ioc, int sleepFlag) {
+ int ret = -1;
+
+ ret = mpt_SoftResetHandler(ioc, sleepFlag);
+ if (ret == 0)
+ return ret;
+ ret = mpt_HardResetHandler(ioc, sleepFlag);
+ return ret;
+}
+EXPORT_SYMBOL(mpt_Soft_Hard_ResetHandler);
+
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Reset Handling
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index 9718c8f2e95..b613eb3d470 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
#endif
-#define MPT_LINUX_VERSION_COMMON "3.04.14"
-#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.14"
+#define MPT_LINUX_VERSION_COMMON "3.04.15"
+#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.15"
#define WHAT_MAGIC_STRING "@" "(" "#" ")"
#define show_mptmod_ver(s,ver) \
@@ -940,6 +940,7 @@ extern int mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp);
extern u32 mpt_GetIocState(MPT_ADAPTER *ioc, int cooked);
extern void mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buf, int *size, int len, int showlan);
extern int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag);
+extern int mpt_Soft_Hard_ResetHandler(MPT_ADAPTER *ioc, int sleepFlag);
extern int mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *cfg);
extern int mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size);
extern void mpt_free_fw_memory(MPT_ADAPTER *ioc);
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index caa8f568a41..f06b29193b4 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -128,7 +128,6 @@ static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags
struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc);
static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma,
struct buflist *buflist, MPT_ADAPTER *ioc);
-static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function);
/*
* Reset Handler cleanup function
@@ -275,45 +274,6 @@ mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
return 1;
}
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/* mptctl_timeout_expired
- *
- * Expecting an interrupt, however timed out.
- *
- */
-static void
-mptctl_timeout_expired(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
-{
- unsigned long flags;
-
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": %s\n",
- ioc->name, __func__));
-
- if (mpt_fwfault_debug)
- mpt_halt_firmware(ioc);
-
- spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
- if (ioc->ioc_reset_in_progress) {
- spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
- CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
- mpt_free_msg_frame(ioc, mf);
- return;
- }
- spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
-
-
- if (!mptctl_bus_reset(ioc, mf->u.hdr.Function))
- return;
-
- /* Issue a reset for this device.
- * The IOC is not responding.
- */
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n",
- ioc->name));
- CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
- mpt_HardResetHandler(ioc, CAN_SLEEP);
- mpt_free_msg_frame(ioc, mf);
-}
static int
mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
@@ -343,12 +303,8 @@ mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
return 0;
}
-/* mptctl_bus_reset
- *
- * Bus reset code.
- *
- */
-static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
+static int
+mptctl_do_taskmgmt(MPT_ADAPTER *ioc, u8 tm_type, u8 bus_id, u8 target_id)
{
MPT_FRAME_HDR *mf;
SCSITaskMgmt_t *pScsiTm;
@@ -359,13 +315,6 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
unsigned long time_count;
u16 iocstatus;
- /* bus reset is only good for SCSI IO, RAID PASSTHRU */
- if (!(function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
- function == MPI_FUNCTION_SCSI_IO_REQUEST)) {
- dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
- "TaskMgmt, not SCSI_IO!!\n", ioc->name));
- return -EPERM;
- }
mutex_lock(&ioc->taskmgmt_cmds.mutex);
if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
@@ -375,15 +324,14 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
retval = 0;
- /* Send request
- */
mf = mpt_get_msg_frame(mptctl_taskmgmt_id, ioc);
if (mf == NULL) {
- dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
- "TaskMgmt, no msg frames!!\n", ioc->name));
+ dtmprintk(ioc,
+ printk(MYIOC_s_WARN_FMT "TaskMgmt, no msg frames!!\n",
+ ioc->name));
mpt_clear_taskmgmt_in_progress_flag(ioc);
retval = -ENOMEM;
- goto mptctl_bus_reset_done;
+ goto tm_done;
}
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
@@ -392,10 +340,13 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
pScsiTm = (SCSITaskMgmt_t *) mf;
memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t));
pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
- pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS;
- pScsiTm->MsgFlags = MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION;
- pScsiTm->TargetID = 0;
- pScsiTm->Bus = 0;
+ pScsiTm->TaskType = tm_type;
+ if ((tm_type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) &&
+ (ioc->bus_type == FC))
+ pScsiTm->MsgFlags =
+ MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION;
+ pScsiTm->TargetID = target_id;
+ pScsiTm->Bus = bus_id;
pScsiTm->ChainOffset = 0;
pScsiTm->Reserved = 0;
pScsiTm->Reserved1 = 0;
@@ -413,17 +364,16 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
timeout = 30;
break;
case SPI:
- default:
- timeout = 2;
+ default:
+ timeout = 10;
break;
}
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
- "TaskMgmt type=%d timeout=%ld\n",
- ioc->name, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, timeout));
+ dtmprintk(ioc,
+ printk(MYIOC_s_DEBUG_FMT "TaskMgmt type=%d timeout=%ld\n",
+ ioc->name, tm_type, timeout));
INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
- CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
time_count = jiffies;
if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
(ioc->facts.MsgVersion >= MPI_VERSION_01_05))
@@ -432,17 +382,20 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
retval = mpt_send_handshake_request(mptctl_taskmgmt_id, ioc,
sizeof(SCSITaskMgmt_t), (u32 *)pScsiTm, CAN_SLEEP);
if (retval != 0) {
- dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ dfailprintk(ioc,
+ printk(MYIOC_s_ERR_FMT
"TaskMgmt send_handshake FAILED!"
" (ioc %p, mf %p, rc=%d) \n", ioc->name,
ioc, mf, retval));
+ mpt_free_msg_frame(ioc, mf);
mpt_clear_taskmgmt_in_progress_flag(ioc);
- goto mptctl_bus_reset_done;
+ goto tm_done;
}
}
/* Now wait for the command to complete */
ii = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done, timeout*HZ);
+
if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"TaskMgmt failed\n", ioc->name));
@@ -452,14 +405,14 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
retval = 0;
else
retval = -1; /* return failure */
- goto mptctl_bus_reset_done;
+ goto tm_done;
}
if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"TaskMgmt failed\n", ioc->name));
retval = -1; /* return failure */
- goto mptctl_bus_reset_done;
+ goto tm_done;
}
pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply;
@@ -467,7 +420,7 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
"TaskMgmt fw_channel = %d, fw_id = %d, task_type=0x%02X, "
"iocstatus=0x%04X\n\tloginfo=0x%08X, response_code=0x%02X, "
"term_cmnds=%d\n", ioc->name, pScsiTmReply->Bus,
- pScsiTmReply->TargetID, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
+ pScsiTmReply->TargetID, tm_type,
le16_to_cpu(pScsiTmReply->IOCStatus),
le32_to_cpu(pScsiTmReply->IOCLogInfo),
pScsiTmReply->ResponseCode,
@@ -485,13 +438,71 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
retval = -1; /* return failure */
}
-
- mptctl_bus_reset_done:
+ tm_done:
mutex_unlock(&ioc->taskmgmt_cmds.mutex);
CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
return retval;
}
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* mptctl_timeout_expired
+ *
+ * Expecting an interrupt, however timed out.
+ *
+ */
+static void
+mptctl_timeout_expired(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
+{
+ unsigned long flags;
+ int ret_val = -1;
+ SCSIIORequest_t *scsi_req = (SCSIIORequest_t *) mf;
+ u8 function = mf->u.hdr.Function;
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": %s\n",
+ ioc->name, __func__));
+
+ if (mpt_fwfault_debug)
+ mpt_halt_firmware(ioc);
+
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->ioc_reset_in_progress) {
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
+ mpt_free_msg_frame(ioc, mf);
+ return;
+ }
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+
+ CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
+
+ if (ioc->bus_type == SAS) {
+ if (function == MPI_FUNCTION_SCSI_IO_REQUEST)
+ ret_val = mptctl_do_taskmgmt(ioc,
+ MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
+ scsi_req->Bus, scsi_req->TargetID);
+ else if (function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)
+ ret_val = mptctl_do_taskmgmt(ioc,
+ MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
+ scsi_req->Bus, 0);
+ if (!ret_val)
+ return;
+ } else {
+ if ((function == MPI_FUNCTION_SCSI_IO_REQUEST) ||
+ (function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH))
+ ret_val = mptctl_do_taskmgmt(ioc,
+ MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
+ scsi_req->Bus, 0);
+ if (!ret_val)
+ return;
+ }
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling Reset! \n",
+ ioc->name));
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
+ mpt_free_msg_frame(ioc, mf);
+}
+
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* mptctl_ioc_reset
@@ -1318,6 +1329,8 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
if (ioc->sh) {
shost_for_each_device(sdev, ioc->sh) {
vdevice = sdev->hostdata;
+ if (vdevice == NULL || vdevice->vtarget == NULL)
+ continue;
if (vdevice->vtarget->tflags &
MPT_TARGET_FLAGS_RAID_COMPONENT)
continue;
@@ -1439,6 +1452,8 @@ mptctl_gettargetinfo (unsigned long arg)
if (!maxWordsLeft)
continue;
vdevice = sdev->hostdata;
+ if (vdevice == NULL || vdevice->vtarget == NULL)
+ continue;
if (vdevice->vtarget->tflags &
MPT_TARGET_FLAGS_RAID_COMPONENT)
continue;
@@ -1967,6 +1982,9 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
struct scsi_target *starget = scsi_target(sdev);
VirtTarget *vtarget = starget->hostdata;
+ if (vtarget == NULL)
+ continue;
+
if ((pScsiReq->TargetID == vtarget->id) &&
(pScsiReq->Bus == vtarget->channel) &&
(vtarget->tflags & MPT_TARGET_FLAGS_Q_YES))
@@ -2991,6 +3009,14 @@ static int __init mptctl_init(void)
}
mptctl_taskmgmt_id = mpt_register(mptctl_taskmgmt_reply, MPTCTL_DRIVER);
+ if (!mptctl_taskmgmt_id || mptctl_taskmgmt_id >= MPT_MAX_PROTOCOL_DRIVERS) {
+ printk(KERN_ERR MYNAM ": ERROR: Failed to register with Fusion MPT base driver\n");
+ mpt_deregister(mptctl_id);
+ misc_deregister(&mptctl_miscdev);
+ err = -EBUSY;
+ goto out_fail;
+ }
+
mpt_reset_register(mptctl_id, mptctl_ioc_reset);
mpt_event_register(mptctl_id, mptctl_event_process);
@@ -3010,12 +3036,15 @@ static void mptctl_exit(void)
printk(KERN_INFO MYNAM ": Deregistered /dev/%s @ (major,minor=%d,%d)\n",
mptctl_miscdev.name, MISC_MAJOR, mptctl_miscdev.minor);
+ /* De-register event handler from base module */
+ mpt_event_deregister(mptctl_id);
+
/* De-register reset handler from base module */
mpt_reset_deregister(mptctl_id);
/* De-register callback handler from base module */
+ mpt_deregister(mptctl_taskmgmt_id);
mpt_deregister(mptctl_id);
- mpt_reset_deregister(mptctl_taskmgmt_id);
mpt_device_driver_deregister(MPTCTL_DRIVER);
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 33f7256055b..b5f03ad8156 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -482,6 +482,7 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
if (vtarget) {
vtarget->id = pg0->CurrentTargetID;
vtarget->channel = pg0->CurrentBus;
+ vtarget->deleted = 0;
}
}
*((struct mptfc_rport_info **)rport->dd_data) = ri;
@@ -1092,6 +1093,8 @@ mptfc_setup_reset(struct work_struct *work)
container_of(work, MPT_ADAPTER, fc_setup_reset_work);
u64 pn;
struct mptfc_rport_info *ri;
+ struct scsi_target *starget;
+ VirtTarget *vtarget;
/* reset about to happen, delete (block) all rports */
list_for_each_entry(ri, &ioc->fc_rports, list) {
@@ -1099,6 +1102,12 @@ mptfc_setup_reset(struct work_struct *work)
ri->flags &= ~MPT_RPORT_INFO_FLAGS_REGISTERED;
fc_remote_port_delete(ri->rport); /* won't sleep */
ri->rport = NULL;
+ starget = ri->starget;
+ if (starget) {
+ vtarget = starget->hostdata;
+ if (vtarget)
+ vtarget->deleted = 1;
+ }
pn = (u64)ri->pg0.WWPN.High << 32 |
(u64)ri->pg0.WWPN.Low;
@@ -1119,6 +1128,8 @@ mptfc_rescan_devices(struct work_struct *work)
int ii;
u64 pn;
struct mptfc_rport_info *ri;
+ struct scsi_target *starget;
+ VirtTarget *vtarget;
/* start by tagging all ports as missing */
list_for_each_entry(ri, &ioc->fc_rports, list) {
@@ -1146,6 +1157,12 @@ mptfc_rescan_devices(struct work_struct *work)
MPT_RPORT_INFO_FLAGS_MISSING);
fc_remote_port_delete(ri->rport); /* won't sleep */
ri->rport = NULL;
+ starget = ri->starget;
+ if (starget) {
+ vtarget = starget->hostdata;
+ if (vtarget)
+ vtarget->deleted = 1;
+ }
pn = (u64)ri->pg0.WWPN.High << 32 |
(u64)ri->pg0.WWPN.Low;
@@ -1358,6 +1375,9 @@ mptfc_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
unsigned long flags;
int rc=1;
+ if (ioc->bus_type != FC)
+ return 0;
+
devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n",
ioc->name, event));
@@ -1396,7 +1416,7 @@ mptfc_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
unsigned long flags;
rc = mptscsih_ioc_reset(ioc,reset_phase);
- if (rc == 0)
+ if ((ioc->bus_type != FC) || (!rc))
return rc;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 76687126b57..ac000e83db0 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1894,7 +1894,7 @@ static struct scsi_host_template mptsas_driver_template = {
.module = THIS_MODULE,
.proc_name = "mptsas",
.proc_info = mptscsih_proc_info,
- .name = "MPT SPI Host",
+ .name = "MPT SAS Host",
.info = mptscsih_info,
.queuecommand = mptsas_qcmd,
.target_alloc = mptsas_target_alloc,
@@ -2038,11 +2038,13 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done,
10 * HZ);
- if (!timeleft) {
- /* On timeout reset the board */
+ if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ error = -ETIME;
mpt_free_msg_frame(ioc, mf);
- mpt_HardResetHandler(ioc, CAN_SLEEP);
- error = -ETIMEDOUT;
+ if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
+ goto out_unlock;
+ if (!timeleft)
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
goto out_unlock;
}
@@ -2223,11 +2225,14 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
- if (!timeleft) {
- printk(MYIOC_s_ERR_FMT "%s: smp timeout!\n", ioc->name, __func__);
- /* On timeout reset the board */
- mpt_HardResetHandler(ioc, CAN_SLEEP);
- ret = -ETIMEDOUT;
+ if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ ret = -ETIME;
+ mpt_free_msg_frame(ioc, mf);
+ mf = NULL;
+ if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
+ goto unmap;
+ if (!timeleft)
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
goto unmap;
}
mf = NULL;
@@ -2518,6 +2523,12 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
error = mpt_config(ioc, &cfg);
+
+ if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
+ error = -ENODEV;
+ goto out_free_consistent;
+ }
+
if (error)
goto out_free_consistent;
@@ -2594,14 +2605,14 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
error = mpt_config(ioc, &cfg);
- if (error)
- goto out_free_consistent;
-
- if (!buffer->NumPhys) {
+ if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
error = -ENODEV;
goto out_free_consistent;
}
+ if (error)
+ goto out_free_consistent;
+
/* save config data */
port_info->num_phys = (buffer->NumPhys) ? buffer->NumPhys : 1;
port_info->phy_info = kcalloc(port_info->num_phys,
@@ -2677,7 +2688,7 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
error = -ENODEV;
- goto out;
+ goto out_free_consistent;
}
if (error)
@@ -2833,7 +2844,7 @@ mptsas_exp_repmanufacture_info(MPT_ADAPTER *ioc,
if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
goto out_free;
if (!timeleft)
- mpt_HardResetHandler(ioc, CAN_SLEEP);
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
goto out_free;
}
@@ -4098,6 +4109,7 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
cfg.pageAddr = (channel << 8) + id;
cfg.cfghdr.hdr = &hdr;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
if (mpt_config(ioc, &cfg) != 0)
goto out;
@@ -4717,7 +4729,7 @@ mptsas_broadcast_primative_work(struct fw_event_work *fw_event)
if (issue_reset) {
printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
ioc->name, __func__);
- mpt_HardResetHandler(ioc, CAN_SLEEP);
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
}
mptsas_free_fw_event(ioc, fw_event);
}
@@ -4779,6 +4791,9 @@ mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
struct fw_event_work *fw_event;
unsigned long delay;
+ if (ioc->bus_type != SAS)
+ return 0;
+
/* events turned off due to host reset or driver unloading */
if (ioc->fw_events_off)
return 0;
@@ -5073,6 +5088,12 @@ static void __devexit mptsas_remove(struct pci_dev *pdev)
struct mptsas_portinfo *p, *n;
int i;
+ if (!ioc->sh) {
+ printk(MYIOC_s_INFO_FMT "IOC is in Target mode\n", ioc->name);
+ mpt_detach(pdev);
+ return;
+ }
+
mptsas_shutdown(pdev);
mptsas_del_device_components(ioc);
diff --git a/drivers/message/fusion/mptsas.h b/drivers/message/fusion/mptsas.h
index 953c2bfcf6a..7b249edbda7 100644
--- a/drivers/message/fusion/mptsas.h
+++ b/drivers/message/fusion/mptsas.h
@@ -110,7 +110,7 @@ struct fw_event_work {
MPT_ADAPTER *ioc;
u32 event;
u8 retries;
- u8 event_data[1];
+ u8 __attribute__((aligned(4))) event_data[1];
};
struct mptsas_discovery_event {
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 6796597dcee..5c53624e0e8 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1149,11 +1149,6 @@ mptscsih_remove(struct pci_dev *pdev)
MPT_SCSI_HOST *hd;
int sz1;
- if(!host) {
- mpt_detach(pdev);
- return;
- }
-
scsi_remove_host(host);
if((hd = shost_priv(host)) == NULL)
@@ -1711,7 +1706,7 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun,
if (issue_hard_reset) {
printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
ioc->name, __func__);
- retval = mpt_HardResetHandler(ioc, CAN_SLEEP);
+ retval = mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
mpt_free_msg_frame(ioc, mf);
}
@@ -1728,6 +1723,7 @@ mptscsih_get_tm_timeout(MPT_ADAPTER *ioc)
case FC:
return 40;
case SAS:
+ return 30;
case SPI:
default:
return 10;
@@ -1777,7 +1773,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
ioc->name, SCpnt));
SCpnt->result = DID_NO_CONNECT << 16;
SCpnt->scsi_done(SCpnt);
- retval = 0;
+ retval = SUCCESS;
goto out;
}
@@ -1792,6 +1788,17 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
goto out;
}
+ /* Task aborts are not supported for volumes.
+ */
+ if (vdevice->vtarget->raidVolume) {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "task abort: raid volume (sc=%p)\n",
+ ioc->name, SCpnt));
+ SCpnt->result = DID_RESET << 16;
+ retval = FAILED;
+ goto out;
+ }
+
/* Find this command
*/
if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(ioc, SCpnt)) < 0) {
@@ -1991,7 +1998,7 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt)
/* If our attempts to reset the host failed, then return a failed
* status. The host will be taken off line by the SCSI mid-layer.
*/
- retval = mpt_HardResetHandler(ioc, CAN_SLEEP);
+ retval = mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
if (retval < 0)
status = FAILED;
else
@@ -2344,6 +2351,8 @@ mptscsih_slave_destroy(struct scsi_device *sdev)
starget = scsi_target(sdev);
vtarget = starget->hostdata;
vdevice = sdev->hostdata;
+ if (!vdevice)
+ return;
mptscsih_search_running_cmds(hd, vdevice);
vtarget->num_luns--;
@@ -2561,9 +2570,7 @@ mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i)
}
/**
- * mptscsih_set_scsi_lookup
- *
- * writes a scmd entry into the ScsiLookup[] array list
+ * mptscsih_set_scsi_lookup - write a scmd entry into the ScsiLookup[] array list
*
* @ioc: Pointer to MPT_ADAPTER structure
* @i: index into the array
@@ -2726,7 +2733,7 @@ mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
/**
- * mptscsih_get_completion_code -
+ * mptscsih_get_completion_code - get completion code from MPT request
* @ioc: Pointer to MPT_ADAPTER structure
* @req: Pointer to original MPT request frame
* @reply: Pointer to MPT reply frame (NULL if TurboReply)
@@ -3040,7 +3047,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
if (!timeleft) {
printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
ioc->name, __func__);
- mpt_HardResetHandler(ioc, CAN_SLEEP);
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
mpt_free_msg_frame(ioc, mf);
}
goto out;
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index e44365193fd..1abaa5d01ae 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -210,6 +210,10 @@ mptspi_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtTarget *target,
target->maxOffset = offset;
target->maxWidth = width;
+ spi_min_period(scsi_target(sdev)) = factor;
+ spi_max_offset(scsi_target(sdev)) = offset;
+ spi_max_width(scsi_target(sdev)) = width;
+
target->tflags |= MPT_TARGET_FLAGS_VALID_NEGO;
/* Disable unused features.
@@ -558,6 +562,7 @@ static int mptspi_read_spi_device_pg0(struct scsi_target *starget,
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
cfg.dir = 0;
cfg.pageAddr = starget->id;
+ cfg.timeout = 60;
if (mpt_config(ioc, &cfg)) {
starget_printk(KERN_ERR, starget, MYIOC_s_FMT "mpt_config failed\n", ioc->name);
@@ -1152,6 +1157,9 @@ mptspi_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
u8 event = le32_to_cpu(pEvReply->Event) & 0xFF;
struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
+ if (ioc->bus_type != SPI)
+ return 0;
+
if (hd && event == MPI_EVENT_INTEGRATED_RAID) {
int reason
= (le32_to_cpu(pEvReply->Data[0]) & 0x00FF0000) >> 16;
@@ -1283,6 +1291,8 @@ mptspi_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
int rc;
rc = mptscsih_ioc_reset(ioc, reset_phase);
+ if ((ioc->bus_type != SPI) || (!rc))
+ return rc;
/* only try to do a renegotiation if we're properly set up
* if we get an ioc fault on bringup, ioc->sh will be NULL */
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 11073fa3d9f..c4b117f5fb7 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -186,14 +186,9 @@ static int i2o_cfg_parms(unsigned long arg, unsigned int type)
if (!dev)
return -ENXIO;
- ops = kmalloc(kcmd.oplen, GFP_KERNEL);
- if (!ops)
- return -ENOMEM;
-
- if (copy_from_user(ops, kcmd.opbuf, kcmd.oplen)) {
- kfree(ops);
- return -EFAULT;
- }
+ ops = memdup_user(kcmd.opbuf, kcmd.oplen);
+ if (IS_ERR(ops))
+ return PTR_ERR(ops);
/*
* It's possible to have a _very_ large table
@@ -314,22 +309,22 @@ static int i2o_cfg_swul(unsigned long arg)
int ret = 0;
if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
- goto return_fault;
+ return -EFAULT;
if (get_user(swlen, kxfer.swlen) < 0)
- goto return_fault;
+ return -EFAULT;
if (get_user(maxfrag, kxfer.maxfrag) < 0)
- goto return_fault;
+ return -EFAULT;
if (get_user(curfrag, kxfer.curfrag) < 0)
- goto return_fault;
+ return -EFAULT;
if (curfrag == maxfrag)
fragsize = swlen - (maxfrag - 1) * 8192;
if (!kxfer.buf)
- goto return_fault;
+ return -EFAULT;
c = i2o_find_iop(kxfer.iop);
if (!c)
@@ -373,12 +368,8 @@ static int i2o_cfg_swul(unsigned long arg)
i2o_dma_free(&c->pdev->dev, &buffer);
- return_ret:
return ret;
- return_fault:
- ret = -EFAULT;
- goto return_ret;
-};
+}
static int i2o_cfg_swdel(unsigned long arg)
{
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 6a14d2b1ccf..2c65a2c5729 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -173,33 +173,35 @@ static struct resource regulator_resources[] = {
PM8607_REG_RESOURCE(LDO9, LDO9),
PM8607_REG_RESOURCE(LDO10, LDO10),
PM8607_REG_RESOURCE(LDO12, LDO12),
+ PM8607_REG_RESOURCE(VIBRATOR_SET, VIBRATOR_SET),
PM8607_REG_RESOURCE(LDO14, LDO14),
};
-#define PM8607_REG_DEVS(_name, _id) \
+#define PM8607_REG_DEVS(_id) \
{ \
- .name = "88pm8607-" #_name, \
+ .name = "88pm860x-regulator", \
.num_resources = 1, \
.resources = &regulator_resources[PM8607_ID_##_id], \
.id = PM8607_ID_##_id, \
}
static struct mfd_cell regulator_devs[] = {
- PM8607_REG_DEVS(buck1, BUCK1),
- PM8607_REG_DEVS(buck2, BUCK2),
- PM8607_REG_DEVS(buck3, BUCK3),
- PM8607_REG_DEVS(ldo1, LDO1),
- PM8607_REG_DEVS(ldo2, LDO2),
- PM8607_REG_DEVS(ldo3, LDO3),
- PM8607_REG_DEVS(ldo4, LDO4),
- PM8607_REG_DEVS(ldo5, LDO5),
- PM8607_REG_DEVS(ldo6, LDO6),
- PM8607_REG_DEVS(ldo7, LDO7),
- PM8607_REG_DEVS(ldo8, LDO8),
- PM8607_REG_DEVS(ldo9, LDO9),
- PM8607_REG_DEVS(ldo10, LDO10),
- PM8607_REG_DEVS(ldo12, LDO12),
- PM8607_REG_DEVS(ldo14, LDO14),
+ PM8607_REG_DEVS(BUCK1),
+ PM8607_REG_DEVS(BUCK2),
+ PM8607_REG_DEVS(BUCK3),
+ PM8607_REG_DEVS(LDO1),
+ PM8607_REG_DEVS(LDO2),
+ PM8607_REG_DEVS(LDO3),
+ PM8607_REG_DEVS(LDO4),
+ PM8607_REG_DEVS(LDO5),
+ PM8607_REG_DEVS(LDO6),
+ PM8607_REG_DEVS(LDO7),
+ PM8607_REG_DEVS(LDO8),
+ PM8607_REG_DEVS(LDO9),
+ PM8607_REG_DEVS(LDO10),
+ PM8607_REG_DEVS(LDO12),
+ PM8607_REG_DEVS(LDO13),
+ PM8607_REG_DEVS(LDO14),
};
struct pm860x_irq_data {
@@ -564,7 +566,7 @@ out:
return ret;
}
-static void __devexit device_irq_exit(struct pm860x_chip *chip)
+static void device_irq_exit(struct pm860x_chip *chip)
{
if (chip->core_irq)
free_irq(chip->core_irq, chip);
@@ -701,7 +703,7 @@ out:
return;
}
-int pm860x_device_init(struct pm860x_chip *chip,
+int __devinit pm860x_device_init(struct pm860x_chip *chip,
struct pm860x_platform_data *pdata)
{
chip->core_irq = 0;
@@ -729,7 +731,7 @@ int pm860x_device_init(struct pm860x_chip *chip,
return 0;
}
-void pm860x_device_exit(struct pm860x_chip *chip)
+void __devexit pm860x_device_exit(struct pm860x_chip *chip)
{
device_irq_exit(chip);
mfd_remove_devices(chip->dev);
diff --git a/drivers/mfd/88pm860x-i2c.c b/drivers/mfd/88pm860x-i2c.c
index 4a6e7186334..c933b64d128 100644
--- a/drivers/mfd/88pm860x-i2c.c
+++ b/drivers/mfd/88pm860x-i2c.c
@@ -200,8 +200,8 @@ static int __devexit pm860x_remove(struct i2c_client *client)
pm860x_device_exit(chip);
i2c_unregister_device(chip->companion);
- i2c_set_clientdata(chip->companion, NULL);
i2c_set_clientdata(chip->client, NULL);
+ i2c_set_clientdata(client, NULL);
kfree(chip);
return 0;
}
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 2a5a0b78f84..9da0e504bbe 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -2,8 +2,14 @@
# Multifunction miscellaneous devices
#
-menu "Multifunction device drivers"
+menuconfig MFD_SUPPORT
+ bool "Multifunction device drivers"
depends on HAS_IOMEM
+ default y
+ help
+ Configure MFD device drivers.
+
+if MFD_SUPPORT
config MFD_CORE
tristate
@@ -49,10 +55,15 @@ config MFD_SH_MOBILE_SDHI
bool "Support for SuperH Mobile SDHI"
depends on SUPERH || ARCH_SHMOBILE
select MFD_CORE
+ select TMIO_MMC_DMA
---help---
This driver supports the SDHI hardware block found in many
SuperH Mobile SoCs.
+config MFD_DAVINCI_VOICECODEC
+ tristate
+ select MFD_CORE
+
config MFD_DM355EVM_MSP
bool "DaVinci DM355 EVM microcontroller"
depends on I2C && MACH_DAVINCI_DM355_EVM
@@ -111,6 +122,18 @@ config TPS65010
This driver can also be built as a module. If so, the module
will be called tps65010.
+config TPS6507X
+ tristate "TPS6507x Power Management / Touch Screen chips"
+ select MFD_CORE
+ depends on I2C
+ help
+ If you say yes here you get support for the TPS6507x series of
+ Power Management / Touch Screen chips. These include voltage
+ regulators, lithium ion/polymer battery charging, touch screen
+ and other features that are often used in portable devices.
+ This driver can also be built as a module. If so, the module
+ will be called tps6507x.
+
config MENELAUS
bool "Texas Instruments TWL92330/Menelaus PM chip"
depends on I2C=y && ARCH_OMAP2
@@ -154,10 +177,26 @@ config TWL4030_CODEC
select MFD_CORE
default n
+config MFD_TC35892
+ bool "Support Toshiba TC35892"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select MFD_CORE
+ help
+ Support for the Toshiba TC35892 I/O Expander.
+
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the
+ functionality of the device.
+
config MFD_TMIO
bool
default n
+config TMIO_MMC_DMA
+ bool
+ select DMA_ENGINE
+ select DMADEVICES
+
config MFD_T7L66XB
bool "Support Toshiba T7L66XB"
depends on ARM && HAVE_CLK
@@ -297,9 +336,9 @@ config MFD_WM8350_I2C
selected to enable support for the functionality of the chip.
config MFD_WM8994
- tristate "Support Wolfson Microelectronics WM8994"
+ bool "Support Wolfson Microelectronics WM8994"
select MFD_CORE
- depends on I2C
+ depends on I2C=y && GENERIC_HARDIRQS
help
The WM8994 is a highly integrated hi-fi CODEC designed for
smartphone applicatiosn. As well as audio functionality it
@@ -341,9 +380,19 @@ config PCF50633_GPIO
Say yes here if you want to include support GPIO for pins on
the PCF50633 chip.
+config ABX500_CORE
+ bool "ST-Ericsson ABX500 Mixed Signal Circuit register functions"
+ default y if ARCH_U300
+ help
+ Say yes here if you have the ABX500 Mixed Signal IC family
+ chips. This core driver expose register access functions.
+ Functionality specific drivers using these functions can
+ remain unchanged when IC changes. Binding of the functions to
+ actual register access is done by the IC core driver.
+
config AB3100_CORE
bool "ST-Ericsson AB3100 Mixed Signal Circuit core functions"
- depends on I2C=y
+ depends on I2C=y && ABX500_CORE
default y if ARCH_U300
help
Select this to enable the AB3100 Mixed Signal IC core
@@ -371,15 +420,30 @@ config EZX_PCAP
This enables the PCAP ASIC present on EZX Phones. This is
needed for MMC, TouchScreen, Sound, USB, etc..
-config AB4500_CORE
- tristate "ST-Ericsson's AB4500 Mixed Signal Power management chip"
- depends on SPI
+config AB8500_CORE
+ bool "ST-Ericsson AB8500 Mixed Signal Power Management chip"
+ depends on SPI=y && GENERIC_HARDIRQS
+ select MFD_CORE
help
- Select this option to enable access to AB4500 power management
+ Select this option to enable access to AB8500 power management
chip. This connects to U8500 on the SSP/SPI bus and exports
read/write functions for the devices to get access to this chip.
This chip embeds various other multimedia funtionalities as well.
+config AB3550_CORE
+ bool "ST-Ericsson AB3550 Mixed Signal Circuit core functions"
+ select MFD_CORE
+ depends on I2C=y && GENERIC_HARDIRQS && ABX500_CORE
+ help
+ Select this to enable the AB3550 Mixed Signal IC core
+ functionality. This connects to a AB3550 on the I2C bus
+ and expose a number of symbols needed for dependent devices
+ to read and write registers and subscribe to events from
+ this multi-functional IC. This is needed to use other features
+ of the AB3550 such as battery-backed RTC, charging control,
+ LEDs, vibrator, system power and temperature, power management
+ and ALSA sound.
+
config MFD_TIMBERDALE
tristate "Support for the Timberdale FPGA"
select MFD_CORE
@@ -399,7 +463,26 @@ config LPC_SCH
LPC bridge function of the Intel SCH provides support for
System Management Bus and General Purpose I/O.
-endmenu
+config MFD_RDC321X
+ tristate "Support for RDC-R321x southbridge"
+ select MFD_CORE
+ depends on PCI
+ help
+ Say yes here if you want to have support for the RDC R-321x SoC
+ southbridge which provides access to GPIOs and Watchdog using the
+ southbridge PCI device configuration space.
+
+config MFD_JANZ_CMODIO
+ tristate "Support for Janz CMOD-IO PCI MODULbus Carrier Board"
+ select MFD_CORE
+ depends on PCI
+ help
+ This is the core driver for the Janz CMOD-IO PCI MODULbus
+ carrier board. This device is a PCI to MODULbus bridge which may
+ host many different types of MODULbus daughterboards, including
+ CAN and GPIO controllers.
+
+endif # MFD_SUPPORT
menu "Multimedia Capabilities Port drivers"
depends on ARCH_SA1100
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 22715add99a..fb503e77dc6 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -12,8 +12,10 @@ obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o
obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o
obj-$(CONFIG_HTC_I2CPLD) += htc-i2cpld.o
+obj-$(CONFIG_MFD_DAVINCI_VOICECODEC) += davinci_voicecodec.o
obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o
+obj-$(CONFIG_MFD_TC35892) += tc35892.o
obj-$(CONFIG_MFD_T7L66XB) += t7l66xb.o tmio_core.o
obj-$(CONFIG_MFD_TC6387XB) += tc6387xb.o tmio_core.o
obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o tmio_core.o
@@ -25,9 +27,10 @@ wm8350-objs := wm8350-core.o wm8350-regmap.o wm8350-gpio.o
wm8350-objs += wm8350-irq.o
obj-$(CONFIG_MFD_WM8350) += wm8350.o
obj-$(CONFIG_MFD_WM8350_I2C) += wm8350-i2c.o
-obj-$(CONFIG_MFD_WM8994) += wm8994-core.o
+obj-$(CONFIG_MFD_WM8994) += wm8994-core.o wm8994-irq.o
obj-$(CONFIG_TPS65010) += tps65010.o
+obj-$(CONFIG_TPS6507X) += tps6507x.o
obj-$(CONFIG_MENELAUS) += menelaus.o
obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o
@@ -54,12 +57,17 @@ obj-$(CONFIG_PMIC_DA903X) += da903x.o
max8925-objs := max8925-core.o max8925-i2c.o
obj-$(CONFIG_MFD_MAX8925) += max8925.o
-obj-$(CONFIG_MFD_PCF50633) += pcf50633-core.o
+pcf50633-objs := pcf50633-core.o pcf50633-irq.o
+obj-$(CONFIG_MFD_PCF50633) += pcf50633.o
obj-$(CONFIG_PCF50633_ADC) += pcf50633-adc.o
obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o
+obj-$(CONFIG_ABX500_CORE) += abx500-core.o
obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o
-obj-$(CONFIG_AB4500_CORE) += ab4500-core.o
+obj-$(CONFIG_AB3550_CORE) += ab3550-core.o
+obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-spi.o
obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
obj-$(CONFIG_PMIC_ADP5520) += adp5520.o
-obj-$(CONFIG_LPC_SCH) += lpc_sch.o \ No newline at end of file
+obj-$(CONFIG_LPC_SCH) += lpc_sch.o
+obj-$(CONFIG_MFD_RDC321X) += rdc321x-southbridge.o
+obj-$(CONFIG_MFD_JANZ_CMODIO) += janz-cmodio.o
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index e4ca5909e42..53ebfee548f 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -19,7 +19,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
-#include <linux/mfd/ab3100.h>
+#include <linux/mfd/abx500.h>
/* These are the only registers inside AB3100 used in this main file */
@@ -59,24 +59,15 @@
* The AB3100 is usually assigned address 0x48 (7-bit)
* The chip is defined in the platform i2c_board_data section.
*/
-
-u8 ab3100_get_chip_type(struct ab3100 *ab3100)
+static int ab3100_get_chip_id(struct device *dev)
{
- u8 chip = ABUNKNOWN;
-
- switch (ab3100->chip_id & 0xf0) {
- case 0xa0:
- chip = AB3000;
- break;
- case 0xc0:
- chip = AB3100;
- break;
- }
- return chip;
+ struct ab3100 *ab3100 = dev_get_drvdata(dev->parent);
+
+ return (int)ab3100->chip_id;
}
-EXPORT_SYMBOL(ab3100_get_chip_type);
-int ab3100_set_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 regval)
+static int ab3100_set_register_interruptible(struct ab3100 *ab3100,
+ u8 reg, u8 regval)
{
u8 regandval[2] = {reg, regval};
int err;
@@ -108,8 +99,14 @@ int ab3100_set_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 regval)
mutex_unlock(&ab3100->access_mutex);
return err;
}
-EXPORT_SYMBOL(ab3100_set_register_interruptible);
+static int set_register_interruptible(struct device *dev,
+ u8 bank, u8 reg, u8 value)
+{
+ struct ab3100 *ab3100 = dev_get_drvdata(dev->parent);
+
+ return ab3100_set_register_interruptible(ab3100, reg, value);
+}
/*
* The test registers exist at an I2C bus address up one
@@ -148,8 +145,8 @@ static int ab3100_set_test_register_interruptible(struct ab3100 *ab3100,
return err;
}
-
-int ab3100_get_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 *regval)
+static int ab3100_get_register_interruptible(struct ab3100 *ab3100,
+ u8 reg, u8 *regval)
{
int err;
@@ -203,10 +200,16 @@ int ab3100_get_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 *regval)
mutex_unlock(&ab3100->access_mutex);
return err;
}
-EXPORT_SYMBOL(ab3100_get_register_interruptible);
+static int get_register_interruptible(struct device *dev, u8 bank, u8 reg,
+ u8 *value)
+{
+ struct ab3100 *ab3100 = dev_get_drvdata(dev->parent);
+
+ return ab3100_get_register_interruptible(ab3100, reg, value);
+}
-int ab3100_get_register_page_interruptible(struct ab3100 *ab3100,
+static int ab3100_get_register_page_interruptible(struct ab3100 *ab3100,
u8 first_reg, u8 *regvals, u8 numregs)
{
int err;
@@ -260,10 +263,17 @@ int ab3100_get_register_page_interruptible(struct ab3100 *ab3100,
mutex_unlock(&ab3100->access_mutex);
return err;
}
-EXPORT_SYMBOL(ab3100_get_register_page_interruptible);
+static int get_register_page_interruptible(struct device *dev, u8 bank,
+ u8 first_reg, u8 *regvals, u8 numregs)
+{
+ struct ab3100 *ab3100 = dev_get_drvdata(dev->parent);
+
+ return ab3100_get_register_page_interruptible(ab3100,
+ first_reg, regvals, numregs);
+}
-int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100,
+static int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100,
u8 reg, u8 andmask, u8 ormask)
{
u8 regandval[2] = {reg, 0};
@@ -331,8 +341,15 @@ int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100,
mutex_unlock(&ab3100->access_mutex);
return err;
}
-EXPORT_SYMBOL(ab3100_mask_and_set_register_interruptible);
+static int mask_and_set_register_interruptible(struct device *dev, u8 bank,
+ u8 reg, u8 bitmask, u8 bitvalues)
+{
+ struct ab3100 *ab3100 = dev_get_drvdata(dev->parent);
+
+ return ab3100_mask_and_set_register_interruptible(ab3100,
+ reg, bitmask, (bitmask & bitvalues));
+}
/*
* Register a simple callback for handling any AB3100 events.
@@ -357,15 +374,27 @@ int ab3100_event_unregister(struct ab3100 *ab3100,
EXPORT_SYMBOL(ab3100_event_unregister);
-int ab3100_event_registers_startup_state_get(struct ab3100 *ab3100,
- u32 *fatevent)
+static int ab3100_event_registers_startup_state_get(struct device *dev,
+ u8 *event)
{
+ struct ab3100 *ab3100 = dev_get_drvdata(dev->parent);
if (!ab3100->startup_events_read)
return -EAGAIN; /* Try again later */
- *fatevent = ab3100->startup_events;
+ memcpy(event, ab3100->startup_events, 3);
return 0;
}
-EXPORT_SYMBOL(ab3100_event_registers_startup_state_get);
+
+static struct abx500_ops ab3100_ops = {
+ .get_chip_id = ab3100_get_chip_id,
+ .set_register = set_register_interruptible,
+ .get_register = get_register_interruptible,
+ .get_register_page = get_register_page_interruptible,
+ .set_register_page = NULL,
+ .mask_and_set_register = mask_and_set_register_interruptible,
+ .event_registers_startup_state_get =
+ ab3100_event_registers_startup_state_get,
+ .startup_irq_enabled = NULL,
+};
/*
* This is a threaded interrupt handler so we can make some
@@ -390,7 +419,9 @@ static irqreturn_t ab3100_irq_handler(int irq, void *data)
event_regs[2];
if (!ab3100->startup_events_read) {
- ab3100->startup_events = fatevent;
+ ab3100->startup_events[0] = event_regs[0];
+ ab3100->startup_events[1] = event_regs[1];
+ ab3100->startup_events[2] = event_regs[2];
ab3100->startup_events_read = true;
}
/*
@@ -703,7 +734,8 @@ static int __init ab3100_setup(struct ab3100 *ab3100)
dev_warn(ab3100->dev,
"AB3100 P1E variant detected, "
"forcing chip to 32KHz\n");
- err = ab3100_set_test_register_interruptible(ab3100, 0x02, 0x08);
+ err = ab3100_set_test_register_interruptible(ab3100,
+ 0x02, 0x08);
}
exit_no_setup:
@@ -898,6 +930,10 @@ static int __init ab3100_probe(struct i2c_client *client,
if (err)
goto exit_no_irq;
+ err = abx500_register_ops(&client->dev, &ab3100_ops);
+ if (err)
+ goto exit_no_ops;
+
/* Set parent and a pointer back to the container in device data */
for (i = 0; i < ARRAY_SIZE(ab3100_platform_devs); i++) {
ab3100_platform_devs[i]->dev.parent =
@@ -915,11 +951,13 @@ static int __init ab3100_probe(struct i2c_client *client,
return 0;
+ exit_no_ops:
exit_no_irq:
exit_no_setup:
i2c_unregister_device(ab3100->testreg_client);
exit_no_testreg_client:
exit_no_detect:
+ i2c_set_clientdata(client, NULL);
kfree(ab3100);
return err;
}
@@ -941,6 +979,7 @@ static int __exit ab3100_remove(struct i2c_client *client)
* their notifiers so deactivate IRQ
*/
free_irq(client->irq, ab3100);
+ i2c_set_clientdata(client, NULL);
kfree(ab3100);
return 0;
}
diff --git a/drivers/mfd/ab3100-otp.c b/drivers/mfd/ab3100-otp.c
index 2d14655fdeb..63d2b727ddb 100644
--- a/drivers/mfd/ab3100-otp.c
+++ b/drivers/mfd/ab3100-otp.c
@@ -12,7 +12,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <linux/mfd/ab3100.h>
+#include <linux/mfd/abx500.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
@@ -30,7 +30,6 @@
/**
* struct ab3100_otp
* @dev containing device
- * @ab3100 a pointer to the parent ab3100 device struct
* @locked whether the OTP is locked, after locking, no more bits
* can be changed but before locking it is still possible
* to change bits from 1->0.
@@ -49,7 +48,6 @@
*/
struct ab3100_otp {
struct device *dev;
- struct ab3100 *ab3100;
bool locked;
u32 freq;
bool paf;
@@ -63,19 +61,19 @@ struct ab3100_otp {
static int __init ab3100_otp_read(struct ab3100_otp *otp)
{
- struct ab3100 *ab = otp->ab3100;
u8 otpval[8];
u8 otpp;
int err;
- err = ab3100_get_register_interruptible(ab, AB3100_OTPP, &otpp);
+ err = abx500_get_register_interruptible(otp->dev, 0,
+ AB3100_OTPP, &otpp);
if (err) {
dev_err(otp->dev, "unable to read OTPP register\n");
return err;
}
- err = ab3100_get_register_page_interruptible(ab, AB3100_OTP0,
- otpval, 8);
+ err = abx500_get_register_page_interruptible(otp->dev, 0,
+ AB3100_OTP0, otpval, 8);
if (err) {
dev_err(otp->dev, "unable to read OTP register page\n");
return err;
@@ -197,7 +195,6 @@ static int __init ab3100_otp_probe(struct platform_device *pdev)
otp->dev = &pdev->dev;
/* Replace platform data coming in with a local struct */
- otp->ab3100 = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, otp);
err = ab3100_otp_read(otp);
diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c
new file mode 100644
index 00000000000..1060f8e1c40
--- /dev/null
+++ b/drivers/mfd/ab3550-core.c
@@ -0,0 +1,1401 @@
+/*
+ * Copyright (C) 2007-2010 ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ * Low-level core for exclusive access to the AB3550 IC on the I2C bus
+ * and some basic chip-configuration.
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com>
+ * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com>
+ * Author: Mattias Wallin <mattias.wallin@stericsson.com>
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/random.h>
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/mfd/abx500.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/spinlock.h>
+#include <linux/mfd/core.h>
+
+#define AB3550_NAME_STRING "ab3550"
+#define AB3550_ID_FORMAT_STRING "AB3550 %s"
+#define AB3550_NUM_BANKS 2
+#define AB3550_NUM_EVENT_REG 5
+
+/* These are the only registers inside AB3550 used in this main file */
+
+/* Chip ID register */
+#define AB3550_CID_REG 0x20
+
+/* Interrupt event registers */
+#define AB3550_EVENT_BANK 0
+#define AB3550_EVENT_REG 0x22
+
+/* Read/write operation values. */
+#define AB3550_PERM_RD (0x01)
+#define AB3550_PERM_WR (0x02)
+
+/* Read/write permissions. */
+#define AB3550_PERM_RO (AB3550_PERM_RD)
+#define AB3550_PERM_RW (AB3550_PERM_RD | AB3550_PERM_WR)
+
+/**
+ * struct ab3550
+ * @access_mutex: lock out concurrent accesses to the AB registers
+ * @i2c_client: I2C client for this chip
+ * @chip_name: name of this chip variant
+ * @chip_id: 8 bit chip ID for this chip variant
+ * @mask_work: a worker for writing to mask registers
+ * @event_lock: a lock to protect the event_mask
+ * @event_mask: a local copy of the mask event registers
+ * @startup_events: a copy of the first reading of the event registers
+ * @startup_events_read: whether the first events have been read
+ */
+struct ab3550 {
+ struct mutex access_mutex;
+ struct i2c_client *i2c_client[AB3550_NUM_BANKS];
+ char chip_name[32];
+ u8 chip_id;
+ struct work_struct mask_work;
+ spinlock_t event_lock;
+ u8 event_mask[AB3550_NUM_EVENT_REG];
+ u8 startup_events[AB3550_NUM_EVENT_REG];
+ bool startup_events_read;
+#ifdef CONFIG_DEBUG_FS
+ unsigned int debug_bank;
+ unsigned int debug_address;
+#endif
+};
+
+/**
+ * struct ab3550_reg_range
+ * @first: the first address of the range
+ * @last: the last address of the range
+ * @perm: access permissions for the range
+ */
+struct ab3550_reg_range {
+ u8 first;
+ u8 last;
+ u8 perm;
+};
+
+/**
+ * struct ab3550_reg_ranges
+ * @count: the number of ranges in the list
+ * @range: the list of register ranges
+ */
+struct ab3550_reg_ranges {
+ u8 count;
+ const struct ab3550_reg_range *range;
+};
+
+/*
+ * Permissible register ranges for reading and writing per device and bank.
+ *
+ * The ranges must be listed in increasing address order, and no overlaps are
+ * allowed. It is assumed that write permission implies read permission
+ * (i.e. only RO and RW permissions should be used). Ranges with write
+ * permission must not be split up.
+ */
+
+#define NO_RANGE {.count = 0, .range = NULL,}
+
+static struct
+ab3550_reg_ranges ab3550_reg_ranges[AB3550_NUM_DEVICES][AB3550_NUM_BANKS] = {
+ [AB3550_DEVID_DAC] = {
+ NO_RANGE,
+ {
+ .count = 2,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0xb0,
+ .last = 0xba,
+ .perm = AB3550_PERM_RW,
+ },
+ {
+ .first = 0xbc,
+ .last = 0xc3,
+ .perm = AB3550_PERM_RW,
+ },
+ },
+ },
+ },
+ [AB3550_DEVID_LEDS] = {
+ NO_RANGE,
+ {
+ .count = 2,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x5a,
+ .last = 0x88,
+ .perm = AB3550_PERM_RW,
+ },
+ {
+ .first = 0x8a,
+ .last = 0xad,
+ .perm = AB3550_PERM_RW,
+ },
+ }
+ },
+ },
+ [AB3550_DEVID_POWER] = {
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x21,
+ .last = 0x21,
+ .perm = AB3550_PERM_RO,
+ },
+ }
+ },
+ NO_RANGE,
+ },
+ [AB3550_DEVID_REGULATORS] = {
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x69,
+ .last = 0xa3,
+ .perm = AB3550_PERM_RW,
+ },
+ }
+ },
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x14,
+ .last = 0x16,
+ .perm = AB3550_PERM_RW,
+ },
+ }
+ },
+ },
+ [AB3550_DEVID_SIM] = {
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x21,
+ .last = 0x21,
+ .perm = AB3550_PERM_RO,
+ },
+ }
+ },
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x14,
+ .last = 0x17,
+ .perm = AB3550_PERM_RW,
+ },
+ }
+
+ },
+ },
+ [AB3550_DEVID_UART] = {
+ NO_RANGE,
+ NO_RANGE,
+ },
+ [AB3550_DEVID_RTC] = {
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x0c,
+ .perm = AB3550_PERM_RW,
+ },
+ }
+ },
+ NO_RANGE,
+ },
+ [AB3550_DEVID_CHARGER] = {
+ {
+ .count = 2,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x10,
+ .last = 0x1a,
+ .perm = AB3550_PERM_RW,
+ },
+ {
+ .first = 0x21,
+ .last = 0x21,
+ .perm = AB3550_PERM_RO,
+ },
+ }
+ },
+ NO_RANGE,
+ },
+ [AB3550_DEVID_ADC] = {
+ NO_RANGE,
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x20,
+ .last = 0x56,
+ .perm = AB3550_PERM_RW,
+ },
+
+ }
+ },
+ },
+ [AB3550_DEVID_FUELGAUGE] = {
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x21,
+ .last = 0x21,
+ .perm = AB3550_PERM_RO,
+ },
+ }
+ },
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x0e,
+ .perm = AB3550_PERM_RW,
+ },
+ }
+ },
+ },
+ [AB3550_DEVID_VIBRATOR] = {
+ NO_RANGE,
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x10,
+ .last = 0x13,
+ .perm = AB3550_PERM_RW,
+ },
+
+ }
+ },
+ },
+ [AB3550_DEVID_CODEC] = {
+ {
+ .count = 2,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x31,
+ .last = 0x63,
+ .perm = AB3550_PERM_RW,
+ },
+ {
+ .first = 0x65,
+ .last = 0x68,
+ .perm = AB3550_PERM_RW,
+ },
+ }
+ },
+ NO_RANGE,
+ },
+};
+
+static struct mfd_cell ab3550_devs[AB3550_NUM_DEVICES] = {
+ [AB3550_DEVID_DAC] = {
+ .name = "ab3550-dac",
+ .id = AB3550_DEVID_DAC,
+ .num_resources = 0,
+ },
+ [AB3550_DEVID_LEDS] = {
+ .name = "ab3550-leds",
+ .id = AB3550_DEVID_LEDS,
+ },
+ [AB3550_DEVID_POWER] = {
+ .name = "ab3550-power",
+ .id = AB3550_DEVID_POWER,
+ },
+ [AB3550_DEVID_REGULATORS] = {
+ .name = "ab3550-regulators",
+ .id = AB3550_DEVID_REGULATORS,
+ },
+ [AB3550_DEVID_SIM] = {
+ .name = "ab3550-sim",
+ .id = AB3550_DEVID_SIM,
+ },
+ [AB3550_DEVID_UART] = {
+ .name = "ab3550-uart",
+ .id = AB3550_DEVID_UART,
+ },
+ [AB3550_DEVID_RTC] = {
+ .name = "ab3550-rtc",
+ .id = AB3550_DEVID_RTC,
+ },
+ [AB3550_DEVID_CHARGER] = {
+ .name = "ab3550-charger",
+ .id = AB3550_DEVID_CHARGER,
+ },
+ [AB3550_DEVID_ADC] = {
+ .name = "ab3550-adc",
+ .id = AB3550_DEVID_ADC,
+ .num_resources = 10,
+ .resources = (struct resource[]) {
+ {
+ .name = "TRIGGER-0",
+ .flags = IORESOURCE_IRQ,
+ .start = 16,
+ .end = 16,
+ },
+ {
+ .name = "TRIGGER-1",
+ .flags = IORESOURCE_IRQ,
+ .start = 17,
+ .end = 17,
+ },
+ {
+ .name = "TRIGGER-2",
+ .flags = IORESOURCE_IRQ,
+ .start = 18,
+ .end = 18,
+ },
+ {
+ .name = "TRIGGER-3",
+ .flags = IORESOURCE_IRQ,
+ .start = 19,
+ .end = 19,
+ },
+ {
+ .name = "TRIGGER-4",
+ .flags = IORESOURCE_IRQ,
+ .start = 20,
+ .end = 20,
+ },
+ {
+ .name = "TRIGGER-5",
+ .flags = IORESOURCE_IRQ,
+ .start = 21,
+ .end = 21,
+ },
+ {
+ .name = "TRIGGER-6",
+ .flags = IORESOURCE_IRQ,
+ .start = 22,
+ .end = 22,
+ },
+ {
+ .name = "TRIGGER-7",
+ .flags = IORESOURCE_IRQ,
+ .start = 23,
+ .end = 23,
+ },
+ {
+ .name = "TRIGGER-VBAT-TXON",
+ .flags = IORESOURCE_IRQ,
+ .start = 13,
+ .end = 13,
+ },
+ {
+ .name = "TRIGGER-VBAT",
+ .flags = IORESOURCE_IRQ,
+ .start = 12,
+ .end = 12,
+ },
+ },
+ },
+ [AB3550_DEVID_FUELGAUGE] = {
+ .name = "ab3550-fuelgauge",
+ .id = AB3550_DEVID_FUELGAUGE,
+ },
+ [AB3550_DEVID_VIBRATOR] = {
+ .name = "ab3550-vibrator",
+ .id = AB3550_DEVID_VIBRATOR,
+ },
+ [AB3550_DEVID_CODEC] = {
+ .name = "ab3550-codec",
+ .id = AB3550_DEVID_CODEC,
+ },
+};
+
+/*
+ * I2C transactions with error messages.
+ */
+static int ab3550_i2c_master_send(struct ab3550 *ab, u8 bank, u8 *data,
+ u8 count)
+{
+ int err;
+
+ err = i2c_master_send(ab->i2c_client[bank], data, count);
+ if (err < 0) {
+ dev_err(&ab->i2c_client[0]->dev, "send error: %d\n", err);
+ return err;
+ }
+ return 0;
+}
+
+static int ab3550_i2c_master_recv(struct ab3550 *ab, u8 bank, u8 *data,
+ u8 count)
+{
+ int err;
+
+ err = i2c_master_recv(ab->i2c_client[bank], data, count);
+ if (err < 0) {
+ dev_err(&ab->i2c_client[0]->dev, "receive error: %d\n", err);
+ return err;
+ }
+ return 0;
+}
+
+/*
+ * Functionality for getting/setting register values.
+ */
+static int get_register_interruptible(struct ab3550 *ab, u8 bank, u8 reg,
+ u8 *value)
+{
+ int err;
+
+ err = mutex_lock_interruptible(&ab->access_mutex);
+ if (err)
+ return err;
+
+ err = ab3550_i2c_master_send(ab, bank, &reg, 1);
+ if (!err)
+ err = ab3550_i2c_master_recv(ab, bank, value, 1);
+
+ mutex_unlock(&ab->access_mutex);
+ return err;
+}
+
+static int get_register_page_interruptible(struct ab3550 *ab, u8 bank,
+ u8 first_reg, u8 *regvals, u8 numregs)
+{
+ int err;
+
+ err = mutex_lock_interruptible(&ab->access_mutex);
+ if (err)
+ return err;
+
+ err = ab3550_i2c_master_send(ab, bank, &first_reg, 1);
+ if (!err)
+ err = ab3550_i2c_master_recv(ab, bank, regvals, numregs);
+
+ mutex_unlock(&ab->access_mutex);
+ return err;
+}
+
+static int mask_and_set_register_interruptible(struct ab3550 *ab, u8 bank,
+ u8 reg, u8 bitmask, u8 bitvalues)
+{
+ int err = 0;
+
+ if (likely(bitmask)) {
+ u8 reg_bits[2] = {reg, 0};
+
+ err = mutex_lock_interruptible(&ab->access_mutex);
+ if (err)
+ return err;
+
+ if (bitmask == 0xFF) /* No need to read in this case. */
+ reg_bits[1] = bitvalues;
+ else { /* Read and modify the register value. */
+ u8 bits;
+
+ err = ab3550_i2c_master_send(ab, bank, &reg, 1);
+ if (err)
+ goto unlock_and_return;
+ err = ab3550_i2c_master_recv(ab, bank, &bits, 1);
+ if (err)
+ goto unlock_and_return;
+ reg_bits[1] = ((~bitmask & bits) |
+ (bitmask & bitvalues));
+ }
+ /* Write the new value. */
+ err = ab3550_i2c_master_send(ab, bank, reg_bits, 2);
+unlock_and_return:
+ mutex_unlock(&ab->access_mutex);
+ }
+ return err;
+}
+
+/*
+ * Read/write permission checking functions.
+ */
+static bool page_write_allowed(const struct ab3550_reg_ranges *ranges,
+ u8 first_reg, u8 last_reg)
+{
+ u8 i;
+
+ if (last_reg < first_reg)
+ return false;
+
+ for (i = 0; i < ranges->count; i++) {
+ if (first_reg < ranges->range[i].first)
+ break;
+ if ((last_reg <= ranges->range[i].last) &&
+ (ranges->range[i].perm & AB3550_PERM_WR))
+ return true;
+ }
+ return false;
+}
+
+static bool reg_write_allowed(const struct ab3550_reg_ranges *ranges, u8 reg)
+{
+ return page_write_allowed(ranges, reg, reg);
+}
+
+static bool page_read_allowed(const struct ab3550_reg_ranges *ranges,
+ u8 first_reg, u8 last_reg)
+{
+ u8 i;
+
+ if (last_reg < first_reg)
+ return false;
+ /* Find the range (if it exists in the list) that includes first_reg. */
+ for (i = 0; i < ranges->count; i++) {
+ if (first_reg < ranges->range[i].first)
+ return false;
+ if (first_reg <= ranges->range[i].last)
+ break;
+ }
+ /* Make sure that the entire range up to and including last_reg is
+ * readable. This may span several of the ranges in the list.
+ */
+ while ((i < ranges->count) &&
+ (ranges->range[i].perm & AB3550_PERM_RD)) {
+ if (last_reg <= ranges->range[i].last)
+ return true;
+ if ((++i >= ranges->count) ||
+ (ranges->range[i].first !=
+ (ranges->range[i - 1].last + 1))) {
+ break;
+ }
+ }
+ return false;
+}
+
+static bool reg_read_allowed(const struct ab3550_reg_ranges *ranges, u8 reg)
+{
+ return page_read_allowed(ranges, reg, reg);
+}
+
+/*
+ * The exported register access functionality.
+ */
+int ab3550_get_chip_id(struct device *dev)
+{
+ struct ab3550 *ab = dev_get_drvdata(dev->parent);
+ return (int)ab->chip_id;
+}
+
+int ab3550_mask_and_set_register_interruptible(struct device *dev, u8 bank,
+ u8 reg, u8 bitmask, u8 bitvalues)
+{
+ struct ab3550 *ab;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if ((AB3550_NUM_BANKS <= bank) ||
+ !reg_write_allowed(&ab3550_reg_ranges[pdev->id][bank], reg))
+ return -EINVAL;
+
+ ab = dev_get_drvdata(dev->parent);
+ return mask_and_set_register_interruptible(ab, bank, reg,
+ bitmask, bitvalues);
+}
+
+int ab3550_set_register_interruptible(struct device *dev, u8 bank, u8 reg,
+ u8 value)
+{
+ return ab3550_mask_and_set_register_interruptible(dev, bank, reg, 0xFF,
+ value);
+}
+
+int ab3550_get_register_interruptible(struct device *dev, u8 bank, u8 reg,
+ u8 *value)
+{
+ struct ab3550 *ab;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if ((AB3550_NUM_BANKS <= bank) ||
+ !reg_read_allowed(&ab3550_reg_ranges[pdev->id][bank], reg))
+ return -EINVAL;
+
+ ab = dev_get_drvdata(dev->parent);
+ return get_register_interruptible(ab, bank, reg, value);
+}
+
+int ab3550_get_register_page_interruptible(struct device *dev, u8 bank,
+ u8 first_reg, u8 *regvals, u8 numregs)
+{
+ struct ab3550 *ab;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if ((AB3550_NUM_BANKS <= bank) ||
+ !page_read_allowed(&ab3550_reg_ranges[pdev->id][bank],
+ first_reg, (first_reg + numregs - 1)))
+ return -EINVAL;
+
+ ab = dev_get_drvdata(dev->parent);
+ return get_register_page_interruptible(ab, bank, first_reg, regvals,
+ numregs);
+}
+
+int ab3550_event_registers_startup_state_get(struct device *dev, u8 *event)
+{
+ struct ab3550 *ab;
+
+ ab = dev_get_drvdata(dev->parent);
+ if (!ab->startup_events_read)
+ return -EAGAIN; /* Try again later */
+
+ memcpy(event, ab->startup_events, AB3550_NUM_EVENT_REG);
+ return 0;
+}
+
+int ab3550_startup_irq_enabled(struct device *dev, unsigned int irq)
+{
+ struct ab3550 *ab;
+ struct ab3550_platform_data *plf_data;
+ bool val;
+
+ ab = get_irq_chip_data(irq);
+ plf_data = ab->i2c_client[0]->dev.platform_data;
+ irq -= plf_data->irq.base;
+ val = ((ab->startup_events[irq / 8] & BIT(irq % 8)) != 0);
+
+ return val;
+}
+
+static struct abx500_ops ab3550_ops = {
+ .get_chip_id = ab3550_get_chip_id,
+ .get_register = ab3550_get_register_interruptible,
+ .set_register = ab3550_set_register_interruptible,
+ .get_register_page = ab3550_get_register_page_interruptible,
+ .set_register_page = NULL,
+ .mask_and_set_register = ab3550_mask_and_set_register_interruptible,
+ .event_registers_startup_state_get =
+ ab3550_event_registers_startup_state_get,
+ .startup_irq_enabled = ab3550_startup_irq_enabled,
+};
+
+static irqreturn_t ab3550_irq_handler(int irq, void *data)
+{
+ struct ab3550 *ab = data;
+ int err;
+ unsigned int i;
+ u8 e[AB3550_NUM_EVENT_REG];
+ u8 *events;
+ unsigned long flags;
+
+ events = (ab->startup_events_read ? e : ab->startup_events);
+
+ err = get_register_page_interruptible(ab, AB3550_EVENT_BANK,
+ AB3550_EVENT_REG, events, AB3550_NUM_EVENT_REG);
+ if (err)
+ goto err_event_rd;
+
+ if (!ab->startup_events_read) {
+ dev_info(&ab->i2c_client[0]->dev,
+ "startup events 0x%x,0x%x,0x%x,0x%x,0x%x\n",
+ ab->startup_events[0], ab->startup_events[1],
+ ab->startup_events[2], ab->startup_events[3],
+ ab->startup_events[4]);
+ ab->startup_events_read = true;
+ goto out;
+ }
+
+ /* The two highest bits in event[4] are not used. */
+ events[4] &= 0x3f;
+
+ spin_lock_irqsave(&ab->event_lock, flags);
+ for (i = 0; i < AB3550_NUM_EVENT_REG; i++)
+ events[i] &= ~ab->event_mask[i];
+ spin_unlock_irqrestore(&ab->event_lock, flags);
+
+ for (i = 0; i < AB3550_NUM_EVENT_REG; i++) {
+ u8 bit;
+ u8 event_reg;
+
+ dev_dbg(&ab->i2c_client[0]->dev, "IRQ Event[%d]: 0x%2x\n",
+ i, events[i]);
+
+ event_reg = events[i];
+ for (bit = 0; event_reg; bit++, event_reg /= 2) {
+ if (event_reg % 2) {
+ unsigned int irq;
+ struct ab3550_platform_data *plf_data;
+
+ plf_data = ab->i2c_client[0]->dev.platform_data;
+ irq = plf_data->irq.base + (i * 8) + bit;
+ handle_nested_irq(irq);
+ }
+ }
+ }
+out:
+ return IRQ_HANDLED;
+
+err_event_rd:
+ dev_dbg(&ab->i2c_client[0]->dev, "error reading event registers\n");
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct ab3550_reg_ranges debug_ranges[AB3550_NUM_BANKS] = {
+ {
+ .count = 6,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x0e,
+ },
+ {
+ .first = 0x10,
+ .last = 0x1a,
+ },
+ {
+ .first = 0x1e,
+ .last = 0x4f,
+ },
+ {
+ .first = 0x51,
+ .last = 0x63,
+ },
+ {
+ .first = 0x65,
+ .last = 0xa3,
+ },
+ {
+ .first = 0xa5,
+ .last = 0xa8,
+ },
+ }
+ },
+ {
+ .count = 8,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x0e,
+ },
+ {
+ .first = 0x10,
+ .last = 0x17,
+ },
+ {
+ .first = 0x1a,
+ .last = 0x1c,
+ },
+ {
+ .first = 0x20,
+ .last = 0x56,
+ },
+ {
+ .first = 0x5a,
+ .last = 0x88,
+ },
+ {
+ .first = 0x8a,
+ .last = 0xad,
+ },
+ {
+ .first = 0xb0,
+ .last = 0xba,
+ },
+ {
+ .first = 0xbc,
+ .last = 0xc3,
+ },
+ }
+ },
+};
+
+static int ab3550_registers_print(struct seq_file *s, void *p)
+{
+ struct ab3550 *ab = s->private;
+ int bank;
+
+ seq_printf(s, AB3550_NAME_STRING " register values:\n");
+
+ for (bank = 0; bank < AB3550_NUM_BANKS; bank++) {
+ unsigned int i;
+
+ seq_printf(s, " bank %d:\n", bank);
+ for (i = 0; i < debug_ranges[bank].count; i++) {
+ u8 reg;
+
+ for (reg = debug_ranges[bank].range[i].first;
+ reg <= debug_ranges[bank].range[i].last;
+ reg++) {
+ u8 value;
+
+ get_register_interruptible(ab, bank, reg,
+ &value);
+ seq_printf(s, " [%d/0x%02X]: 0x%02X\n", bank,
+ reg, value);
+ }
+ }
+ }
+ return 0;
+}
+
+static int ab3550_registers_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab3550_registers_print, inode->i_private);
+}
+
+static const struct file_operations ab3550_registers_fops = {
+ .open = ab3550_registers_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab3550_bank_print(struct seq_file *s, void *p)
+{
+ struct ab3550 *ab = s->private;
+
+ seq_printf(s, "%d\n", ab->debug_bank);
+ return 0;
+}
+
+static int ab3550_bank_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab3550_bank_print, inode->i_private);
+}
+
+static ssize_t ab3550_bank_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private;
+ char buf[32];
+ int buf_size;
+ unsigned long user_bank;
+ int err;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ err = strict_strtoul(buf, 0, &user_bank);
+ if (err)
+ return -EINVAL;
+
+ if (user_bank >= AB3550_NUM_BANKS) {
+ dev_err(&ab->i2c_client[0]->dev,
+ "debugfs error input > number of banks\n");
+ return -EINVAL;
+ }
+
+ ab->debug_bank = user_bank;
+
+ return buf_size;
+}
+
+static int ab3550_address_print(struct seq_file *s, void *p)
+{
+ struct ab3550 *ab = s->private;
+
+ seq_printf(s, "0x%02X\n", ab->debug_address);
+ return 0;
+}
+
+static int ab3550_address_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab3550_address_print, inode->i_private);
+}
+
+static ssize_t ab3550_address_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private;
+ char buf[32];
+ int buf_size;
+ unsigned long user_address;
+ int err;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ err = strict_strtoul(buf, 0, &user_address);
+ if (err)
+ return -EINVAL;
+ if (user_address > 0xff) {
+ dev_err(&ab->i2c_client[0]->dev,
+ "debugfs error input > 0xff\n");
+ return -EINVAL;
+ }
+ ab->debug_address = user_address;
+ return buf_size;
+}
+
+static int ab3550_val_print(struct seq_file *s, void *p)
+{
+ struct ab3550 *ab = s->private;
+ int err;
+ u8 regvalue;
+
+ err = get_register_interruptible(ab, (u8)ab->debug_bank,
+ (u8)ab->debug_address, &regvalue);
+ if (err)
+ return -EINVAL;
+ seq_printf(s, "0x%02X\n", regvalue);
+
+ return 0;
+}
+
+static int ab3550_val_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab3550_val_print, inode->i_private);
+}
+
+static ssize_t ab3550_val_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private;
+ char buf[32];
+ int buf_size;
+ unsigned long user_val;
+ int err;
+ u8 regvalue;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ err = strict_strtoul(buf, 0, &user_val);
+ if (err)
+ return -EINVAL;
+ if (user_val > 0xff) {
+ dev_err(&ab->i2c_client[0]->dev,
+ "debugfs error input > 0xff\n");
+ return -EINVAL;
+ }
+ err = mask_and_set_register_interruptible(
+ ab, (u8)ab->debug_bank,
+ (u8)ab->debug_address, 0xFF, (u8)user_val);
+ if (err)
+ return -EINVAL;
+
+ get_register_interruptible(ab, (u8)ab->debug_bank,
+ (u8)ab->debug_address, &regvalue);
+ if (err)
+ return -EINVAL;
+
+ return buf_size;
+}
+
+static const struct file_operations ab3550_bank_fops = {
+ .open = ab3550_bank_open,
+ .write = ab3550_bank_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ab3550_address_fops = {
+ .open = ab3550_address_open,
+ .write = ab3550_address_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ab3550_val_fops = {
+ .open = ab3550_val_open,
+ .write = ab3550_val_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static struct dentry *ab3550_dir;
+static struct dentry *ab3550_reg_file;
+static struct dentry *ab3550_bank_file;
+static struct dentry *ab3550_address_file;
+static struct dentry *ab3550_val_file;
+
+static inline void ab3550_setup_debugfs(struct ab3550 *ab)
+{
+ ab->debug_bank = 0;
+ ab->debug_address = 0x00;
+
+ ab3550_dir = debugfs_create_dir(AB3550_NAME_STRING, NULL);
+ if (!ab3550_dir)
+ goto exit_no_debugfs;
+
+ ab3550_reg_file = debugfs_create_file("all-registers",
+ S_IRUGO, ab3550_dir, ab, &ab3550_registers_fops);
+ if (!ab3550_reg_file)
+ goto exit_destroy_dir;
+
+ ab3550_bank_file = debugfs_create_file("register-bank",
+ (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_bank_fops);
+ if (!ab3550_bank_file)
+ goto exit_destroy_reg;
+
+ ab3550_address_file = debugfs_create_file("register-address",
+ (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_address_fops);
+ if (!ab3550_address_file)
+ goto exit_destroy_bank;
+
+ ab3550_val_file = debugfs_create_file("register-value",
+ (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_val_fops);
+ if (!ab3550_val_file)
+ goto exit_destroy_address;
+
+ return;
+
+exit_destroy_address:
+ debugfs_remove(ab3550_address_file);
+exit_destroy_bank:
+ debugfs_remove(ab3550_bank_file);
+exit_destroy_reg:
+ debugfs_remove(ab3550_reg_file);
+exit_destroy_dir:
+ debugfs_remove(ab3550_dir);
+exit_no_debugfs:
+ dev_err(&ab->i2c_client[0]->dev, "failed to create debugfs entries.\n");
+ return;
+}
+
+static inline void ab3550_remove_debugfs(void)
+{
+ debugfs_remove(ab3550_val_file);
+ debugfs_remove(ab3550_address_file);
+ debugfs_remove(ab3550_bank_file);
+ debugfs_remove(ab3550_reg_file);
+ debugfs_remove(ab3550_dir);
+}
+
+#else /* !CONFIG_DEBUG_FS */
+static inline void ab3550_setup_debugfs(struct ab3550 *ab)
+{
+}
+static inline void ab3550_remove_debugfs(void)
+{
+}
+#endif
+
+/*
+ * Basic set-up, datastructure creation/destruction and I2C interface.
+ * This sets up a default config in the AB3550 chip so that it
+ * will work as expected.
+ */
+static int __init ab3550_setup(struct ab3550 *ab)
+{
+ int err = 0;
+ int i;
+ struct ab3550_platform_data *plf_data;
+ struct abx500_init_settings *settings;
+
+ plf_data = ab->i2c_client[0]->dev.platform_data;
+ settings = plf_data->init_settings;
+
+ for (i = 0; i < plf_data->init_settings_sz; i++) {
+ err = mask_and_set_register_interruptible(ab,
+ settings[i].bank,
+ settings[i].reg,
+ 0xFF, settings[i].setting);
+ if (err)
+ goto exit_no_setup;
+
+ /* If event mask register update the event mask in ab3550 */
+ if ((settings[i].bank == 0) &&
+ (AB3550_IMR1 <= settings[i].reg) &&
+ (settings[i].reg <= AB3550_IMR5)) {
+ ab->event_mask[settings[i].reg - AB3550_IMR1] =
+ settings[i].setting;
+ }
+ }
+exit_no_setup:
+ return err;
+}
+
+static void ab3550_mask_work(struct work_struct *work)
+{
+ struct ab3550 *ab = container_of(work, struct ab3550, mask_work);
+ int i;
+ unsigned long flags;
+ u8 mask[AB3550_NUM_EVENT_REG];
+
+ spin_lock_irqsave(&ab->event_lock, flags);
+ for (i = 0; i < AB3550_NUM_EVENT_REG; i++)
+ mask[i] = ab->event_mask[i];
+ spin_unlock_irqrestore(&ab->event_lock, flags);
+
+ for (i = 0; i < AB3550_NUM_EVENT_REG; i++) {
+ int err;
+
+ err = mask_and_set_register_interruptible(ab, 0,
+ (AB3550_IMR1 + i), ~0, mask[i]);
+ if (err)
+ dev_err(&ab->i2c_client[0]->dev,
+ "ab3550_mask_work failed 0x%x,0x%x\n",
+ (AB3550_IMR1 + i), mask[i]);
+ }
+}
+
+static void ab3550_mask(unsigned int irq)
+{
+ unsigned long flags;
+ struct ab3550 *ab;
+ struct ab3550_platform_data *plf_data;
+
+ ab = get_irq_chip_data(irq);
+ plf_data = ab->i2c_client[0]->dev.platform_data;
+ irq -= plf_data->irq.base;
+
+ spin_lock_irqsave(&ab->event_lock, flags);
+ ab->event_mask[irq / 8] |= BIT(irq % 8);
+ spin_unlock_irqrestore(&ab->event_lock, flags);
+
+ schedule_work(&ab->mask_work);
+}
+
+static void ab3550_unmask(unsigned int irq)
+{
+ unsigned long flags;
+ struct ab3550 *ab;
+ struct ab3550_platform_data *plf_data;
+
+ ab = get_irq_chip_data(irq);
+ plf_data = ab->i2c_client[0]->dev.platform_data;
+ irq -= plf_data->irq.base;
+
+ spin_lock_irqsave(&ab->event_lock, flags);
+ ab->event_mask[irq / 8] &= ~BIT(irq % 8);
+ spin_unlock_irqrestore(&ab->event_lock, flags);
+
+ schedule_work(&ab->mask_work);
+}
+
+static void noop(unsigned int irq)
+{
+}
+
+static struct irq_chip ab3550_irq_chip = {
+ .name = "ab3550-core", /* Keep the same name as the request */
+ .startup = NULL, /* defaults to enable */
+ .shutdown = NULL, /* defaults to disable */
+ .enable = NULL, /* defaults to unmask */
+ .disable = ab3550_mask, /* No default to mask in chip.c */
+ .ack = noop,
+ .mask = ab3550_mask,
+ .unmask = ab3550_unmask,
+ .end = NULL,
+};
+
+struct ab_family_id {
+ u8 id;
+ char *name;
+};
+
+static const struct ab_family_id ids[] __initdata = {
+ /* AB3550 */
+ {
+ .id = AB3550_P1A,
+ .name = "P1A"
+ },
+ /* Terminator */
+ {
+ .id = 0x00,
+ }
+};
+
+static int __init ab3550_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ab3550 *ab;
+ struct ab3550_platform_data *ab3550_plf_data =
+ client->dev.platform_data;
+ int err;
+ int i;
+ int num_i2c_clients = 0;
+
+ ab = kzalloc(sizeof(struct ab3550), GFP_KERNEL);
+ if (!ab) {
+ dev_err(&client->dev,
+ "could not allocate " AB3550_NAME_STRING " device\n");
+ return -ENOMEM;
+ }
+
+ /* Initialize data structure */
+ mutex_init(&ab->access_mutex);
+ spin_lock_init(&ab->event_lock);
+ ab->i2c_client[0] = client;
+
+ i2c_set_clientdata(client, ab);
+
+ /* Read chip ID register */
+ err = get_register_interruptible(ab, 0, AB3550_CID_REG, &ab->chip_id);
+ if (err) {
+ dev_err(&client->dev, "could not communicate with the analog "
+ "baseband chip\n");
+ goto exit_no_detect;
+ }
+
+ for (i = 0; ids[i].id != 0x0; i++) {
+ if (ids[i].id == ab->chip_id) {
+ snprintf(&ab->chip_name[0], sizeof(ab->chip_name) - 1,
+ AB3550_ID_FORMAT_STRING, ids[i].name);
+ break;
+ }
+ }
+
+ if (ids[i].id == 0x0) {
+ dev_err(&client->dev, "unknown analog baseband chip id: 0x%x\n",
+ ab->chip_id);
+ dev_err(&client->dev, "driver not started!\n");
+ goto exit_no_detect;
+ }
+
+ dev_info(&client->dev, "detected AB chip: %s\n", &ab->chip_name[0]);
+
+ /* Attach other dummy I2C clients. */
+ while (++num_i2c_clients < AB3550_NUM_BANKS) {
+ ab->i2c_client[num_i2c_clients] =
+ i2c_new_dummy(client->adapter,
+ (client->addr + num_i2c_clients));
+ if (!ab->i2c_client[num_i2c_clients]) {
+ err = -ENOMEM;
+ goto exit_no_dummy_client;
+ }
+ strlcpy(ab->i2c_client[num_i2c_clients]->name, id->name,
+ sizeof(ab->i2c_client[num_i2c_clients]->name));
+ }
+
+ err = ab3550_setup(ab);
+ if (err)
+ goto exit_no_setup;
+
+ INIT_WORK(&ab->mask_work, ab3550_mask_work);
+
+ for (i = 0; i < ab3550_plf_data->irq.count; i++) {
+ unsigned int irq;
+
+ irq = ab3550_plf_data->irq.base + i;
+ set_irq_chip_data(irq, ab);
+ set_irq_chip_and_handler(irq, &ab3550_irq_chip,
+ handle_simple_irq);
+ set_irq_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ set_irq_noprobe(irq);
+#endif
+ }
+
+ err = request_threaded_irq(client->irq, NULL, ab3550_irq_handler,
+ IRQF_ONESHOT, "ab3550-core", ab);
+ /* This real unpredictable IRQ is of course sampled for entropy */
+ rand_initialize_irq(client->irq);
+
+ if (err)
+ goto exit_no_irq;
+
+ err = abx500_register_ops(&client->dev, &ab3550_ops);
+ if (err)
+ goto exit_no_ops;
+
+ /* Set up and register the platform devices. */
+ for (i = 0; i < AB3550_NUM_DEVICES; i++) {
+ ab3550_devs[i].platform_data = ab3550_plf_data->dev_data[i];
+ ab3550_devs[i].data_size = ab3550_plf_data->dev_data_sz[i];
+ }
+
+ err = mfd_add_devices(&client->dev, 0, ab3550_devs,
+ ARRAY_SIZE(ab3550_devs), NULL,
+ ab3550_plf_data->irq.base);
+
+ ab3550_setup_debugfs(ab);
+
+ return 0;
+
+exit_no_ops:
+exit_no_irq:
+exit_no_setup:
+exit_no_dummy_client:
+ /* Unregister the dummy i2c clients. */
+ while (--num_i2c_clients)
+ i2c_unregister_device(ab->i2c_client[num_i2c_clients]);
+exit_no_detect:
+ kfree(ab);
+ return err;
+}
+
+static int __exit ab3550_remove(struct i2c_client *client)
+{
+ struct ab3550 *ab = i2c_get_clientdata(client);
+ int num_i2c_clients = AB3550_NUM_BANKS;
+
+ mfd_remove_devices(&client->dev);
+ ab3550_remove_debugfs();
+
+ while (--num_i2c_clients)
+ i2c_unregister_device(ab->i2c_client[num_i2c_clients]);
+
+ /*
+ * At this point, all subscribers should have unregistered
+ * their notifiers so deactivate IRQ
+ */
+ free_irq(client->irq, ab);
+ i2c_set_clientdata(client, NULL);
+ kfree(ab);
+ return 0;
+}
+
+static const struct i2c_device_id ab3550_id[] = {
+ {AB3550_NAME_STRING, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ab3550_id);
+
+static struct i2c_driver ab3550_driver = {
+ .driver = {
+ .name = AB3550_NAME_STRING,
+ .owner = THIS_MODULE,
+ },
+ .id_table = ab3550_id,
+ .probe = ab3550_probe,
+ .remove = __exit_p(ab3550_remove),
+};
+
+static int __init ab3550_i2c_init(void)
+{
+ return i2c_add_driver(&ab3550_driver);
+}
+
+static void __exit ab3550_i2c_exit(void)
+{
+ i2c_del_driver(&ab3550_driver);
+}
+
+subsys_initcall(ab3550_i2c_init);
+module_exit(ab3550_i2c_exit);
+
+MODULE_AUTHOR("Mattias Wallin <mattias.wallin@stericsson.com>");
+MODULE_DESCRIPTION("AB3550 core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/ab4500-core.c b/drivers/mfd/ab4500-core.c
deleted file mode 100644
index c275daa3ab1..00000000000
--- a/drivers/mfd/ab4500-core.c
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Copyright (C) 2009 ST-Ericsson
- *
- * Author: Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com>
- *
- * This program is free software; you can redistribute it
- * and/or modify it under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation.
- *
- * AB4500 is a companion power management chip used with U8500.
- * On this platform, this is interfaced with SSP0 controller
- * which is a ARM primecell pl022.
- *
- * At the moment the module just exports read/write features.
- * Interrupt management to be added - TODO.
- */
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/spi/spi.h>
-#include <linux/mfd/ab4500.h>
-
-/* just required if probe fails, we need to
- * unregister the device
- */
-static struct spi_driver ab4500_driver;
-
-/*
- * This funtion writes to any AB4500 registers using
- * SPI protocol & before it writes it packs the data
- * in the below 24 bit frame format
- *
- * *|------------------------------------|
- * *| 23|22...18|17.......10|9|8|7......0|
- * *| r/w bank adr data |
- * * ------------------------------------
- *
- * This function shouldn't be called from interrupt
- * context
- */
-int ab4500_write(struct ab4500 *ab4500, unsigned char block,
- unsigned long addr, unsigned char data)
-{
- struct spi_transfer xfer;
- struct spi_message msg;
- int err;
- unsigned long spi_data =
- block << 18 | addr << 10 | data;
-
- mutex_lock(&ab4500->lock);
- ab4500->tx_buf[0] = spi_data;
- ab4500->rx_buf[0] = 0;
-
- xfer.tx_buf = ab4500->tx_buf;
- xfer.rx_buf = NULL;
- xfer.len = sizeof(unsigned long);
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
-
- err = spi_sync(ab4500->spi, &msg);
- mutex_unlock(&ab4500->lock);
-
- return err;
-}
-EXPORT_SYMBOL(ab4500_write);
-
-int ab4500_read(struct ab4500 *ab4500, unsigned char block,
- unsigned long addr)
-{
- struct spi_transfer xfer;
- struct spi_message msg;
- unsigned long spi_data =
- 1 << 23 | block << 18 | addr << 10;
-
- mutex_lock(&ab4500->lock);
- ab4500->tx_buf[0] = spi_data;
- ab4500->rx_buf[0] = 0;
-
- xfer.tx_buf = ab4500->tx_buf;
- xfer.rx_buf = ab4500->rx_buf;
- xfer.len = sizeof(unsigned long);
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
-
- spi_sync(ab4500->spi, &msg);
- mutex_unlock(&ab4500->lock);
-
- return ab4500->rx_buf[0];
-}
-EXPORT_SYMBOL(ab4500_read);
-
-/* ref: ab3100 core */
-#define AB4500_DEVICE(devname, devid) \
-static struct platform_device ab4500_##devname##_device = { \
- .name = devid, \
- .id = -1, \
-}
-
-/* list of childern devices of ab4500 - all are
- * not populated here - TODO
- */
-AB4500_DEVICE(charger, "ab4500-charger");
-AB4500_DEVICE(audio, "ab4500-audio");
-AB4500_DEVICE(usb, "ab4500-usb");
-AB4500_DEVICE(tvout, "ab4500-tvout");
-AB4500_DEVICE(sim, "ab4500-sim");
-AB4500_DEVICE(gpadc, "ab4500-gpadc");
-AB4500_DEVICE(clkmgt, "ab4500-clkmgt");
-AB4500_DEVICE(misc, "ab4500-misc");
-
-static struct platform_device *ab4500_platform_devs[] = {
- &ab4500_charger_device,
- &ab4500_audio_device,
- &ab4500_usb_device,
- &ab4500_tvout_device,
- &ab4500_sim_device,
- &ab4500_gpadc_device,
- &ab4500_clkmgt_device,
- &ab4500_misc_device,
-};
-
-static int __init ab4500_probe(struct spi_device *spi)
-{
- struct ab4500 *ab4500;
- unsigned char revision;
- int err = 0;
- int i;
-
- ab4500 = kzalloc(sizeof *ab4500, GFP_KERNEL);
- if (!ab4500) {
- dev_err(&spi->dev, "could not allocate AB4500\n");
- err = -ENOMEM;
- goto not_detect;
- }
-
- ab4500->spi = spi;
- spi_set_drvdata(spi, ab4500);
-
- mutex_init(&ab4500->lock);
-
- /* read the revision register */
- revision = ab4500_read(ab4500, AB4500_MISC, AB4500_REV_REG);
-
- /* revision id 0x0 is for early drop, 0x10 is for cut1.0 */
- if (revision == 0x0 || revision == 0x10)
- dev_info(&spi->dev, "Detected chip: %s, revision = %x\n",
- ab4500_driver.driver.name, revision);
- else {
- dev_err(&spi->dev, "unknown chip: 0x%x\n", revision);
- goto not_detect;
- }
-
- for (i = 0; i < ARRAY_SIZE(ab4500_platform_devs); i++) {
- ab4500_platform_devs[i]->dev.parent =
- &spi->dev;
- platform_set_drvdata(ab4500_platform_devs[i], ab4500);
- }
-
- /* register the ab4500 platform devices */
- platform_add_devices(ab4500_platform_devs,
- ARRAY_SIZE(ab4500_platform_devs));
-
- return err;
-
- not_detect:
- spi_unregister_driver(&ab4500_driver);
- kfree(ab4500);
- return err;
-}
-
-static int __devexit ab4500_remove(struct spi_device *spi)
-{
- struct ab4500 *ab4500 =
- spi_get_drvdata(spi);
-
- kfree(ab4500);
-
- return 0;
-}
-
-static struct spi_driver ab4500_driver = {
- .driver = {
- .name = "ab4500",
- .owner = THIS_MODULE,
- },
- .probe = ab4500_probe,
- .remove = __devexit_p(ab4500_remove)
-};
-
-static int __devinit ab4500_init(void)
-{
- return spi_register_driver(&ab4500_driver);
-}
-
-static void __exit ab4500_exit(void)
-{
- spi_unregister_driver(&ab4500_driver);
-}
-
-subsys_initcall(ab4500_init);
-module_exit(ab4500_exit);
-
-MODULE_AUTHOR("Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com");
-MODULE_DESCRIPTION("AB4500 core driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
new file mode 100644
index 00000000000..f3d26fa9c34
--- /dev/null
+++ b/drivers/mfd/ab8500-core.c
@@ -0,0 +1,444 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/ab8500.h>
+
+/*
+ * Interrupt register offsets
+ * Bank : 0x0E
+ */
+#define AB8500_IT_SOURCE1_REG 0x0E00
+#define AB8500_IT_SOURCE2_REG 0x0E01
+#define AB8500_IT_SOURCE3_REG 0x0E02
+#define AB8500_IT_SOURCE4_REG 0x0E03
+#define AB8500_IT_SOURCE5_REG 0x0E04
+#define AB8500_IT_SOURCE6_REG 0x0E05
+#define AB8500_IT_SOURCE7_REG 0x0E06
+#define AB8500_IT_SOURCE8_REG 0x0E07
+#define AB8500_IT_SOURCE19_REG 0x0E12
+#define AB8500_IT_SOURCE20_REG 0x0E13
+#define AB8500_IT_SOURCE21_REG 0x0E14
+#define AB8500_IT_SOURCE22_REG 0x0E15
+#define AB8500_IT_SOURCE23_REG 0x0E16
+#define AB8500_IT_SOURCE24_REG 0x0E17
+
+/*
+ * latch registers
+ */
+#define AB8500_IT_LATCH1_REG 0x0E20
+#define AB8500_IT_LATCH2_REG 0x0E21
+#define AB8500_IT_LATCH3_REG 0x0E22
+#define AB8500_IT_LATCH4_REG 0x0E23
+#define AB8500_IT_LATCH5_REG 0x0E24
+#define AB8500_IT_LATCH6_REG 0x0E25
+#define AB8500_IT_LATCH7_REG 0x0E26
+#define AB8500_IT_LATCH8_REG 0x0E27
+#define AB8500_IT_LATCH9_REG 0x0E28
+#define AB8500_IT_LATCH10_REG 0x0E29
+#define AB8500_IT_LATCH19_REG 0x0E32
+#define AB8500_IT_LATCH20_REG 0x0E33
+#define AB8500_IT_LATCH21_REG 0x0E34
+#define AB8500_IT_LATCH22_REG 0x0E35
+#define AB8500_IT_LATCH23_REG 0x0E36
+#define AB8500_IT_LATCH24_REG 0x0E37
+
+/*
+ * mask registers
+ */
+
+#define AB8500_IT_MASK1_REG 0x0E40
+#define AB8500_IT_MASK2_REG 0x0E41
+#define AB8500_IT_MASK3_REG 0x0E42
+#define AB8500_IT_MASK4_REG 0x0E43
+#define AB8500_IT_MASK5_REG 0x0E44
+#define AB8500_IT_MASK6_REG 0x0E45
+#define AB8500_IT_MASK7_REG 0x0E46
+#define AB8500_IT_MASK8_REG 0x0E47
+#define AB8500_IT_MASK9_REG 0x0E48
+#define AB8500_IT_MASK10_REG 0x0E49
+#define AB8500_IT_MASK11_REG 0x0E4A
+#define AB8500_IT_MASK12_REG 0x0E4B
+#define AB8500_IT_MASK13_REG 0x0E4C
+#define AB8500_IT_MASK14_REG 0x0E4D
+#define AB8500_IT_MASK15_REG 0x0E4E
+#define AB8500_IT_MASK16_REG 0x0E4F
+#define AB8500_IT_MASK17_REG 0x0E50
+#define AB8500_IT_MASK18_REG 0x0E51
+#define AB8500_IT_MASK19_REG 0x0E52
+#define AB8500_IT_MASK20_REG 0x0E53
+#define AB8500_IT_MASK21_REG 0x0E54
+#define AB8500_IT_MASK22_REG 0x0E55
+#define AB8500_IT_MASK23_REG 0x0E56
+#define AB8500_IT_MASK24_REG 0x0E57
+
+#define AB8500_REV_REG 0x1080
+
+/*
+ * Map interrupt numbers to the LATCH and MASK register offsets, Interrupt
+ * numbers are indexed into this array with (num / 8).
+ *
+ * This is one off from the register names, i.e. AB8500_IT_MASK1_REG is at
+ * offset 0.
+ */
+static const int ab8500_irq_regoffset[AB8500_NUM_IRQ_REGS] = {
+ 0, 1, 2, 3, 4, 6, 7, 8, 9, 18, 19, 20, 21,
+};
+
+static int __ab8500_write(struct ab8500 *ab8500, u16 addr, u8 data)
+{
+ int ret;
+
+ dev_vdbg(ab8500->dev, "wr: addr %#x <= %#x\n", addr, data);
+
+ ret = ab8500->write(ab8500, addr, data);
+ if (ret < 0)
+ dev_err(ab8500->dev, "failed to write reg %#x: %d\n",
+ addr, ret);
+
+ return ret;
+}
+
+/**
+ * ab8500_write() - write an AB8500 register
+ * @ab8500: device to write to
+ * @addr: address of the register
+ * @data: value to write
+ */
+int ab8500_write(struct ab8500 *ab8500, u16 addr, u8 data)
+{
+ int ret;
+
+ mutex_lock(&ab8500->lock);
+ ret = __ab8500_write(ab8500, addr, data);
+ mutex_unlock(&ab8500->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ab8500_write);
+
+static int __ab8500_read(struct ab8500 *ab8500, u16 addr)
+{
+ int ret;
+
+ ret = ab8500->read(ab8500, addr);
+ if (ret < 0)
+ dev_err(ab8500->dev, "failed to read reg %#x: %d\n",
+ addr, ret);
+
+ dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret);
+
+ return ret;
+}
+
+/**
+ * ab8500_read() - read an AB8500 register
+ * @ab8500: device to read from
+ * @addr: address of the register
+ */
+int ab8500_read(struct ab8500 *ab8500, u16 addr)
+{
+ int ret;
+
+ mutex_lock(&ab8500->lock);
+ ret = __ab8500_read(ab8500, addr);
+ mutex_unlock(&ab8500->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ab8500_read);
+
+/**
+ * ab8500_set_bits() - set a bitfield in an AB8500 register
+ * @ab8500: device to read from
+ * @addr: address of the register
+ * @mask: mask of the bitfield to modify
+ * @data: value to set to the bitfield
+ */
+int ab8500_set_bits(struct ab8500 *ab8500, u16 addr, u8 mask, u8 data)
+{
+ int ret;
+
+ mutex_lock(&ab8500->lock);
+
+ ret = __ab8500_read(ab8500, addr);
+ if (ret < 0)
+ goto out;
+
+ ret &= ~mask;
+ ret |= data;
+
+ ret = __ab8500_write(ab8500, addr, ret);
+
+out:
+ mutex_unlock(&ab8500->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ab8500_set_bits);
+
+static void ab8500_irq_lock(unsigned int irq)
+{
+ struct ab8500 *ab8500 = get_irq_chip_data(irq);
+
+ mutex_lock(&ab8500->irq_lock);
+}
+
+static void ab8500_irq_sync_unlock(unsigned int irq)
+{
+ struct ab8500 *ab8500 = get_irq_chip_data(irq);
+ int i;
+
+ for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) {
+ u8 old = ab8500->oldmask[i];
+ u8 new = ab8500->mask[i];
+ int reg;
+
+ if (new == old)
+ continue;
+
+ ab8500->oldmask[i] = new;
+
+ reg = AB8500_IT_MASK1_REG + ab8500_irq_regoffset[i];
+ ab8500_write(ab8500, reg, new);
+ }
+
+ mutex_unlock(&ab8500->irq_lock);
+}
+
+static void ab8500_irq_mask(unsigned int irq)
+{
+ struct ab8500 *ab8500 = get_irq_chip_data(irq);
+ int offset = irq - ab8500->irq_base;
+ int index = offset / 8;
+ int mask = 1 << (offset % 8);
+
+ ab8500->mask[index] |= mask;
+}
+
+static void ab8500_irq_unmask(unsigned int irq)
+{
+ struct ab8500 *ab8500 = get_irq_chip_data(irq);
+ int offset = irq - ab8500->irq_base;
+ int index = offset / 8;
+ int mask = 1 << (offset % 8);
+
+ ab8500->mask[index] &= ~mask;
+}
+
+static struct irq_chip ab8500_irq_chip = {
+ .name = "ab8500",
+ .bus_lock = ab8500_irq_lock,
+ .bus_sync_unlock = ab8500_irq_sync_unlock,
+ .mask = ab8500_irq_mask,
+ .unmask = ab8500_irq_unmask,
+};
+
+static irqreturn_t ab8500_irq(int irq, void *dev)
+{
+ struct ab8500 *ab8500 = dev;
+ int i;
+
+ dev_vdbg(ab8500->dev, "interrupt\n");
+
+ for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) {
+ int regoffset = ab8500_irq_regoffset[i];
+ int status;
+
+ status = ab8500_read(ab8500, AB8500_IT_LATCH1_REG + regoffset);
+ if (status <= 0)
+ continue;
+
+ do {
+ int bit = __ffs(status);
+ int line = i * 8 + bit;
+
+ handle_nested_irq(ab8500->irq_base + line);
+ status &= ~(1 << bit);
+ } while (status);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ab8500_irq_init(struct ab8500 *ab8500)
+{
+ int base = ab8500->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + AB8500_NR_IRQS; irq++) {
+ set_irq_chip_data(irq, ab8500);
+ set_irq_chip_and_handler(irq, &ab8500_irq_chip,
+ handle_simple_irq);
+ set_irq_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ set_irq_noprobe(irq);
+#endif
+ }
+
+ return 0;
+}
+
+static void ab8500_irq_remove(struct ab8500 *ab8500)
+{
+ int base = ab8500->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + AB8500_NR_IRQS; irq++) {
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, 0);
+#endif
+ set_irq_chip_and_handler(irq, NULL, NULL);
+ set_irq_chip_data(irq, NULL);
+ }
+}
+
+static struct resource ab8500_gpadc_resources[] = {
+ {
+ .name = "HW_CONV_END",
+ .start = AB8500_INT_GP_HW_ADC_CONV_END,
+ .end = AB8500_INT_GP_HW_ADC_CONV_END,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "SW_CONV_END",
+ .start = AB8500_INT_GP_SW_ADC_CONV_END,
+ .end = AB8500_INT_GP_SW_ADC_CONV_END,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource ab8500_rtc_resources[] = {
+ {
+ .name = "60S",
+ .start = AB8500_INT_RTC_60S,
+ .end = AB8500_INT_RTC_60S,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "ALARM",
+ .start = AB8500_INT_RTC_ALARM,
+ .end = AB8500_INT_RTC_ALARM,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell ab8500_devs[] = {
+ {
+ .name = "ab8500-gpadc",
+ .num_resources = ARRAY_SIZE(ab8500_gpadc_resources),
+ .resources = ab8500_gpadc_resources,
+ },
+ {
+ .name = "ab8500-rtc",
+ .num_resources = ARRAY_SIZE(ab8500_rtc_resources),
+ .resources = ab8500_rtc_resources,
+ },
+ { .name = "ab8500-charger", },
+ { .name = "ab8500-audio", },
+ { .name = "ab8500-usb", },
+ { .name = "ab8500-pwm", },
+};
+
+int __devinit ab8500_init(struct ab8500 *ab8500)
+{
+ struct ab8500_platform_data *plat = dev_get_platdata(ab8500->dev);
+ int ret;
+ int i;
+
+ if (plat)
+ ab8500->irq_base = plat->irq_base;
+
+ mutex_init(&ab8500->lock);
+ mutex_init(&ab8500->irq_lock);
+
+ ret = ab8500_read(ab8500, AB8500_REV_REG);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * 0x0 - Early Drop
+ * 0x10 - Cut 1.0
+ * 0x11 - Cut 1.1
+ */
+ if (ret == 0x0 || ret == 0x10 || ret == 0x11) {
+ ab8500->revision = ret;
+ dev_info(ab8500->dev, "detected chip, revision: %#x\n", ret);
+ } else {
+ dev_err(ab8500->dev, "unknown chip, revision: %#x\n", ret);
+ return -EINVAL;
+ }
+
+ if (plat && plat->init)
+ plat->init(ab8500);
+
+ /* Clear and mask all interrupts */
+ for (i = 0; i < 10; i++) {
+ ab8500_read(ab8500, AB8500_IT_LATCH1_REG + i);
+ ab8500_write(ab8500, AB8500_IT_MASK1_REG + i, 0xff);
+ }
+
+ for (i = 18; i < 24; i++) {
+ ab8500_read(ab8500, AB8500_IT_LATCH1_REG + i);
+ ab8500_write(ab8500, AB8500_IT_MASK1_REG + i, 0xff);
+ }
+
+ for (i = 0; i < AB8500_NUM_IRQ_REGS; i++)
+ ab8500->mask[i] = ab8500->oldmask[i] = 0xff;
+
+ if (ab8500->irq_base) {
+ ret = ab8500_irq_init(ab8500);
+ if (ret)
+ return ret;
+
+ ret = request_threaded_irq(ab8500->irq, NULL, ab8500_irq,
+ IRQF_ONESHOT, "ab8500", ab8500);
+ if (ret)
+ goto out_removeirq;
+ }
+
+ ret = mfd_add_devices(ab8500->dev, -1, ab8500_devs,
+ ARRAY_SIZE(ab8500_devs), NULL,
+ ab8500->irq_base);
+ if (ret)
+ goto out_freeirq;
+
+ return ret;
+
+out_freeirq:
+ if (ab8500->irq_base) {
+ free_irq(ab8500->irq, ab8500);
+out_removeirq:
+ ab8500_irq_remove(ab8500);
+ }
+ return ret;
+}
+
+int __devexit ab8500_exit(struct ab8500 *ab8500)
+{
+ mfd_remove_devices(ab8500->dev);
+ if (ab8500->irq_base) {
+ free_irq(ab8500->irq, ab8500);
+ ab8500_irq_remove(ab8500);
+ }
+
+ return 0;
+}
+
+MODULE_AUTHOR("Srinidhi Kasagar, Rabin Vincent");
+MODULE_DESCRIPTION("AB8500 MFD core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/ab8500-spi.c b/drivers/mfd/ab8500-spi.c
new file mode 100644
index 00000000000..b81d4f768ef
--- /dev/null
+++ b/drivers/mfd/ab8500-spi.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/mfd/ab8500.h>
+
+/*
+ * This funtion writes to any AB8500 registers using
+ * SPI protocol & before it writes it packs the data
+ * in the below 24 bit frame format
+ *
+ * *|------------------------------------|
+ * *| 23|22...18|17.......10|9|8|7......0|
+ * *| r/w bank adr data |
+ * * ------------------------------------
+ *
+ * This function shouldn't be called from interrupt
+ * context
+ */
+static int ab8500_spi_write(struct ab8500 *ab8500, u16 addr, u8 data)
+{
+ struct spi_device *spi = container_of(ab8500->dev, struct spi_device,
+ dev);
+ unsigned long spi_data = addr << 10 | data;
+ struct spi_transfer xfer;
+ struct spi_message msg;
+
+ ab8500->tx_buf[0] = spi_data;
+ ab8500->rx_buf[0] = 0;
+
+ xfer.tx_buf = ab8500->tx_buf;
+ xfer.rx_buf = NULL;
+ xfer.len = sizeof(unsigned long);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ return spi_sync(spi, &msg);
+}
+
+static int ab8500_spi_read(struct ab8500 *ab8500, u16 addr)
+{
+ struct spi_device *spi = container_of(ab8500->dev, struct spi_device,
+ dev);
+ unsigned long spi_data = 1 << 23 | addr << 10;
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ int ret;
+
+ ab8500->tx_buf[0] = spi_data;
+ ab8500->rx_buf[0] = 0;
+
+ xfer.tx_buf = ab8500->tx_buf;
+ xfer.rx_buf = ab8500->rx_buf;
+ xfer.len = sizeof(unsigned long);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ ret = spi_sync(spi, &msg);
+ if (!ret)
+ ret = ab8500->rx_buf[0];
+
+ return ret;
+}
+
+static int __devinit ab8500_spi_probe(struct spi_device *spi)
+{
+ struct ab8500 *ab8500;
+ int ret;
+
+ ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL);
+ if (!ab8500)
+ return -ENOMEM;
+
+ ab8500->dev = &spi->dev;
+ ab8500->irq = spi->irq;
+
+ ab8500->read = ab8500_spi_read;
+ ab8500->write = ab8500_spi_write;
+
+ spi_set_drvdata(spi, ab8500);
+
+ ret = ab8500_init(ab8500);
+ if (ret)
+ kfree(ab8500);
+
+ return ret;
+}
+
+static int __devexit ab8500_spi_remove(struct spi_device *spi)
+{
+ struct ab8500 *ab8500 = spi_get_drvdata(spi);
+
+ ab8500_exit(ab8500);
+ kfree(ab8500);
+
+ return 0;
+}
+
+static struct spi_driver ab8500_spi_driver = {
+ .driver = {
+ .name = "ab8500",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab8500_spi_probe,
+ .remove = __devexit_p(ab8500_spi_remove)
+};
+
+static int __init ab8500_spi_init(void)
+{
+ return spi_register_driver(&ab8500_spi_driver);
+}
+subsys_initcall(ab8500_spi_init);
+
+static void __exit ab8500_spi_exit(void)
+{
+ spi_unregister_driver(&ab8500_spi_driver);
+}
+module_exit(ab8500_spi_exit);
+
+MODULE_AUTHOR("Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com");
+MODULE_DESCRIPTION("AB8500 SPI");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
new file mode 100644
index 00000000000..3b3b97ec32a
--- /dev/null
+++ b/drivers/mfd/abx500-core.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2007-2010 ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ * Register access functions for the ABX500 Mixed Signal IC family.
+ * Author: Mattias Wallin <mattias.wallin@stericsson.com>
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/mfd/abx500.h>
+
+static LIST_HEAD(abx500_list);
+
+struct abx500_device_entry {
+ struct list_head list;
+ struct abx500_ops ops;
+ struct device *dev;
+};
+
+static void lookup_ops(struct device *dev, struct abx500_ops **ops)
+{
+ struct abx500_device_entry *dev_entry;
+
+ *ops = NULL;
+ list_for_each_entry(dev_entry, &abx500_list, list) {
+ if (dev_entry->dev == dev) {
+ *ops = &dev_entry->ops;
+ return;
+ }
+ }
+}
+
+int abx500_register_ops(struct device *dev, struct abx500_ops *ops)
+{
+ struct abx500_device_entry *dev_entry;
+
+ dev_entry = kzalloc(sizeof(struct abx500_device_entry), GFP_KERNEL);
+ if (IS_ERR(dev_entry)) {
+ dev_err(dev, "register_ops kzalloc failed");
+ return -ENOMEM;
+ }
+ dev_entry->dev = dev;
+ memcpy(&dev_entry->ops, ops, sizeof(struct abx500_ops));
+
+ list_add_tail(&dev_entry->list, &abx500_list);
+ return 0;
+}
+EXPORT_SYMBOL(abx500_register_ops);
+
+void abx500_remove_ops(struct device *dev)
+{
+ struct abx500_device_entry *dev_entry, *tmp;
+
+ list_for_each_entry_safe(dev_entry, tmp, &abx500_list, list)
+ {
+ if (dev_entry->dev == dev) {
+ list_del(&dev_entry->list);
+ kfree(dev_entry);
+ }
+ }
+}
+EXPORT_SYMBOL(abx500_remove_ops);
+
+int abx500_set_register_interruptible(struct device *dev, u8 bank, u8 reg,
+ u8 value)
+{
+ struct abx500_ops *ops;
+
+ lookup_ops(dev->parent, &ops);
+ if ((ops != NULL) && (ops->set_register != NULL))
+ return ops->set_register(dev, bank, reg, value);
+ else
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(abx500_set_register_interruptible);
+
+int abx500_get_register_interruptible(struct device *dev, u8 bank, u8 reg,
+ u8 *value)
+{
+ struct abx500_ops *ops;
+
+ lookup_ops(dev->parent, &ops);
+ if ((ops != NULL) && (ops->get_register != NULL))
+ return ops->get_register(dev, bank, reg, value);
+ else
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(abx500_get_register_interruptible);
+
+int abx500_get_register_page_interruptible(struct device *dev, u8 bank,
+ u8 first_reg, u8 *regvals, u8 numregs)
+{
+ struct abx500_ops *ops;
+
+ lookup_ops(dev->parent, &ops);
+ if ((ops != NULL) && (ops->get_register_page != NULL))
+ return ops->get_register_page(dev, bank,
+ first_reg, regvals, numregs);
+ else
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(abx500_get_register_page_interruptible);
+
+int abx500_mask_and_set_register_interruptible(struct device *dev, u8 bank,
+ u8 reg, u8 bitmask, u8 bitvalues)
+{
+ struct abx500_ops *ops;
+
+ lookup_ops(dev->parent, &ops);
+ if ((ops != NULL) && (ops->mask_and_set_register != NULL))
+ return ops->mask_and_set_register(dev, bank,
+ reg, bitmask, bitvalues);
+ else
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(abx500_mask_and_set_register_interruptible);
+
+int abx500_get_chip_id(struct device *dev)
+{
+ struct abx500_ops *ops;
+
+ lookup_ops(dev->parent, &ops);
+ if ((ops != NULL) && (ops->get_chip_id != NULL))
+ return ops->get_chip_id(dev);
+ else
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(abx500_get_chip_id);
+
+int abx500_event_registers_startup_state_get(struct device *dev, u8 *event)
+{
+ struct abx500_ops *ops;
+
+ lookup_ops(dev->parent, &ops);
+ if ((ops != NULL) && (ops->event_registers_startup_state_get != NULL))
+ return ops->event_registers_startup_state_get(dev, event);
+ else
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(abx500_event_registers_startup_state_get);
+
+int abx500_startup_irq_enabled(struct device *dev, unsigned int irq)
+{
+ struct abx500_ops *ops;
+
+ lookup_ops(dev->parent, &ops);
+ if ((ops != NULL) && (ops->startup_irq_enabled != NULL))
+ return ops->startup_irq_enabled(dev, irq);
+ else
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(abx500_startup_irq_enabled);
+
+MODULE_AUTHOR("Mattias Wallin <mattias.wallin@stericsson.com>");
+MODULE_DESCRIPTION("ABX500 core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c
index 67181b147ab..3ad915d0589 100644
--- a/drivers/mfd/da903x.c
+++ b/drivers/mfd/da903x.c
@@ -544,6 +544,7 @@ static int __devexit da903x_remove(struct i2c_client *client)
struct da903x_chip *chip = i2c_get_clientdata(client);
da903x_remove_subdevs(chip);
+ i2c_set_clientdata(client, NULL);
kfree(chip);
return 0;
}
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c
new file mode 100644
index 00000000000..3e75f02e477
--- /dev/null
+++ b/drivers/mfd/davinci_voicecodec.c
@@ -0,0 +1,190 @@
+/*
+ * DaVinci Voice Codec Core Interface for TI platforms
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc
+ *
+ * Author: Miguel Aguilar <miguel.aguilar@ridgerun.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+
+#include <sound/pcm.h>
+
+#include <linux/mfd/davinci_voicecodec.h>
+
+u32 davinci_vc_read(struct davinci_vc *davinci_vc, int reg)
+{
+ return __raw_readl(davinci_vc->base + reg);
+}
+
+void davinci_vc_write(struct davinci_vc *davinci_vc,
+ int reg, u32 val)
+{
+ __raw_writel(val, davinci_vc->base + reg);
+}
+
+static int __init davinci_vc_probe(struct platform_device *pdev)
+{
+ struct davinci_vc *davinci_vc;
+ struct resource *res, *mem;
+ struct mfd_cell *cell = NULL;
+ int ret;
+
+ davinci_vc = kzalloc(sizeof(struct davinci_vc), GFP_KERNEL);
+ if (!davinci_vc) {
+ dev_dbg(&pdev->dev,
+ "could not allocate memory for private data\n");
+ return -ENOMEM;
+ }
+
+ davinci_vc->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(davinci_vc->clk)) {
+ dev_dbg(&pdev->dev,
+ "could not get the clock for voice codec\n");
+ ret = -ENODEV;
+ goto fail1;
+ }
+ clk_enable(davinci_vc->clk);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no mem resource\n");
+ ret = -ENODEV;
+ goto fail2;
+ }
+
+ davinci_vc->pbase = res->start;
+ davinci_vc->base_size = resource_size(res);
+
+ mem = request_mem_region(davinci_vc->pbase, davinci_vc->base_size,
+ pdev->name);
+ if (!mem) {
+ dev_err(&pdev->dev, "VCIF region already claimed\n");
+ ret = -EBUSY;
+ goto fail2;
+ }
+
+ davinci_vc->base = ioremap(davinci_vc->pbase, davinci_vc->base_size);
+ if (!davinci_vc->base) {
+ dev_err(&pdev->dev, "can't ioremap mem resource.\n");
+ ret = -ENOMEM;
+ goto fail3;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no DMA resource\n");
+ return -ENXIO;
+ }
+
+ davinci_vc->davinci_vcif.dma_tx_channel = res->start;
+ davinci_vc->davinci_vcif.dma_tx_addr =
+ (dma_addr_t)(io_v2p(davinci_vc->base) + DAVINCI_VC_WFIFO);
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "no DMA resource\n");
+ return -ENXIO;
+ }
+
+ davinci_vc->davinci_vcif.dma_rx_channel = res->start;
+ davinci_vc->davinci_vcif.dma_rx_addr =
+ (dma_addr_t)(io_v2p(davinci_vc->base) + DAVINCI_VC_RFIFO);
+
+ davinci_vc->dev = &pdev->dev;
+ davinci_vc->pdev = pdev;
+
+ /* Voice codec interface client */
+ cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL];
+ cell->name = "davinci_vcif";
+ cell->driver_data = davinci_vc;
+
+ /* Voice codec CQ93VC client */
+ cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL];
+ cell->name = "cq93vc";
+ cell->driver_data = davinci_vc;
+
+ ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells,
+ DAVINCI_VC_CELLS, NULL, 0);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "fail to register client devices\n");
+ goto fail4;
+ }
+
+ return 0;
+
+fail4:
+ iounmap(davinci_vc->base);
+fail3:
+ release_mem_region(davinci_vc->pbase, davinci_vc->base_size);
+fail2:
+ clk_disable(davinci_vc->clk);
+ clk_put(davinci_vc->clk);
+ davinci_vc->clk = NULL;
+fail1:
+ kfree(davinci_vc);
+
+ return ret;
+}
+
+static int __devexit davinci_vc_remove(struct platform_device *pdev)
+{
+ struct davinci_vc *davinci_vc = platform_get_drvdata(pdev);
+
+ mfd_remove_devices(&pdev->dev);
+
+ iounmap(davinci_vc->base);
+ release_mem_region(davinci_vc->pbase, davinci_vc->base_size);
+
+ clk_disable(davinci_vc->clk);
+ clk_put(davinci_vc->clk);
+ davinci_vc->clk = NULL;
+
+ kfree(davinci_vc);
+
+ return 0;
+}
+
+static struct platform_driver davinci_vc_driver = {
+ .driver = {
+ .name = "davinci_voicecodec",
+ .owner = THIS_MODULE,
+ },
+ .remove = __devexit_p(davinci_vc_remove),
+};
+
+static int __init davinci_vc_init(void)
+{
+ return platform_driver_probe(&davinci_vc_driver, davinci_vc_probe);
+}
+module_init(davinci_vc_init);
+
+static void __exit davinci_vc_exit(void)
+{
+ platform_driver_unregister(&davinci_vc_driver);
+}
+module_exit(davinci_vc_exit);
+
+MODULE_AUTHOR("Miguel Aguilar");
+MODULE_DESCRIPTION("Texas Instruments DaVinci Voice Codec Core Interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
new file mode 100644
index 00000000000..9ed630799ac
--- /dev/null
+++ b/drivers/mfd/janz-cmodio.c
@@ -0,0 +1,304 @@
+/*
+ * Janz CMOD-IO MODULbus Carrier Board PCI Driver
+ *
+ * Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu>
+ *
+ * Lots of inspiration and code was copied from drivers/mfd/sm501.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+
+#include <linux/mfd/janz.h>
+
+#define DRV_NAME "janz-cmodio"
+
+/* Size of each MODULbus module in PCI BAR4 */
+#define CMODIO_MODULBUS_SIZE 0x200
+
+/* Maximum number of MODULbus modules on a CMOD-IO carrier board */
+#define CMODIO_MAX_MODULES 4
+
+/* Module Parameters */
+static unsigned int num_modules = CMODIO_MAX_MODULES;
+static unsigned char *modules[CMODIO_MAX_MODULES] = {
+ "empty", "empty", "empty", "empty",
+};
+
+module_param_array(modules, charp, &num_modules, S_IRUGO);
+MODULE_PARM_DESC(modules, "MODULbus modules attached to the carrier board");
+
+/* Unique Device Id */
+static unsigned int cmodio_id;
+
+struct cmodio_device {
+ /* Parent PCI device */
+ struct pci_dev *pdev;
+
+ /* PLX control registers */
+ struct janz_cmodio_onboard_regs __iomem *ctrl;
+
+ /* hex switch position */
+ u8 hex;
+
+ /* mfd-core API */
+ struct mfd_cell cells[CMODIO_MAX_MODULES];
+ struct resource resources[3 * CMODIO_MAX_MODULES];
+ struct janz_platform_data pdata[CMODIO_MAX_MODULES];
+};
+
+/*
+ * Subdevices using the mfd-core API
+ */
+
+static int __devinit cmodio_setup_subdevice(struct cmodio_device *priv,
+ char *name, unsigned int devno,
+ unsigned int modno)
+{
+ struct janz_platform_data *pdata;
+ struct mfd_cell *cell;
+ struct resource *res;
+ struct pci_dev *pci;
+
+ pci = priv->pdev;
+ cell = &priv->cells[devno];
+ res = &priv->resources[devno * 3];
+ pdata = &priv->pdata[devno];
+
+ cell->name = name;
+ cell->resources = res;
+ cell->num_resources = 3;
+
+ /* Setup the subdevice ID -- must be unique */
+ cell->id = cmodio_id++;
+
+ /* Add platform data */
+ pdata->modno = modno;
+ cell->platform_data = pdata;
+ cell->data_size = sizeof(*pdata);
+
+ /* MODULbus registers -- PCI BAR3 is big-endian MODULbus access */
+ res->flags = IORESOURCE_MEM;
+ res->parent = &pci->resource[3];
+ res->start = pci->resource[3].start + (CMODIO_MODULBUS_SIZE * modno);
+ res->end = res->start + CMODIO_MODULBUS_SIZE - 1;
+ res++;
+
+ /* PLX Control Registers -- PCI BAR4 is interrupt and other registers */
+ res->flags = IORESOURCE_MEM;
+ res->parent = &pci->resource[4];
+ res->start = pci->resource[4].start;
+ res->end = pci->resource[4].end;
+ res++;
+
+ /*
+ * IRQ
+ *
+ * The start and end fields are used as an offset to the irq_base
+ * parameter passed into the mfd_add_devices() function call. All
+ * devices share the same IRQ.
+ */
+ res->flags = IORESOURCE_IRQ;
+ res->parent = NULL;
+ res->start = 0;
+ res->end = 0;
+ res++;
+
+ return 0;
+}
+
+/* Probe each submodule using kernel parameters */
+static int __devinit cmodio_probe_submodules(struct cmodio_device *priv)
+{
+ struct pci_dev *pdev = priv->pdev;
+ unsigned int num_probed = 0;
+ char *name;
+ int i;
+
+ for (i = 0; i < num_modules; i++) {
+ name = modules[i];
+ if (!strcmp(name, "") || !strcmp(name, "empty"))
+ continue;
+
+ dev_dbg(&priv->pdev->dev, "MODULbus %d: name %s\n", i, name);
+ cmodio_setup_subdevice(priv, name, num_probed, i);
+ num_probed++;
+ }
+
+ /* print an error message if no modules were probed */
+ if (num_probed == 0) {
+ dev_err(&priv->pdev->dev, "no MODULbus modules specified, "
+ "please set the ``modules'' kernel "
+ "parameter according to your "
+ "hardware configuration\n");
+ return -ENODEV;
+ }
+
+ return mfd_add_devices(&pdev->dev, 0, priv->cells,
+ num_probed, NULL, pdev->irq);
+}
+
+/*
+ * SYSFS Attributes
+ */
+
+static ssize_t mbus_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cmodio_device *priv = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", priv->hex);
+}
+
+static DEVICE_ATTR(modulbus_number, S_IRUGO, mbus_show, NULL);
+
+static struct attribute *cmodio_sysfs_attrs[] = {
+ &dev_attr_modulbus_number.attr,
+ NULL,
+};
+
+static const struct attribute_group cmodio_sysfs_attr_group = {
+ .attrs = cmodio_sysfs_attrs,
+};
+
+/*
+ * PCI Driver
+ */
+
+static int __devinit cmodio_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ struct cmodio_device *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&dev->dev, "unable to allocate private data\n");
+ ret = -ENOMEM;
+ goto out_return;
+ }
+
+ pci_set_drvdata(dev, priv);
+ priv->pdev = dev;
+
+ /* Hardware Initialization */
+ ret = pci_enable_device(dev);
+ if (ret) {
+ dev_err(&dev->dev, "unable to enable device\n");
+ goto out_free_priv;
+ }
+
+ pci_set_master(dev);
+ ret = pci_request_regions(dev, DRV_NAME);
+ if (ret) {
+ dev_err(&dev->dev, "unable to request regions\n");
+ goto out_pci_disable_device;
+ }
+
+ /* Onboard configuration registers */
+ priv->ctrl = pci_ioremap_bar(dev, 4);
+ if (!priv->ctrl) {
+ dev_err(&dev->dev, "unable to remap onboard regs\n");
+ ret = -ENOMEM;
+ goto out_pci_release_regions;
+ }
+
+ /* Read the hex switch on the carrier board */
+ priv->hex = ioread8(&priv->ctrl->int_enable);
+
+ /* Add the MODULbus number (hex switch value) to the device's sysfs */
+ ret = sysfs_create_group(&dev->dev.kobj, &cmodio_sysfs_attr_group);
+ if (ret) {
+ dev_err(&dev->dev, "unable to create sysfs attributes\n");
+ goto out_unmap_ctrl;
+ }
+
+ /*
+ * Disable all interrupt lines, each submodule will enable its
+ * own interrupt line if needed
+ */
+ iowrite8(0xf, &priv->ctrl->int_disable);
+
+ /* Register drivers for all submodules */
+ ret = cmodio_probe_submodules(priv);
+ if (ret) {
+ dev_err(&dev->dev, "unable to probe submodules\n");
+ goto out_sysfs_remove_group;
+ }
+
+ return 0;
+
+out_sysfs_remove_group:
+ sysfs_remove_group(&dev->dev.kobj, &cmodio_sysfs_attr_group);
+out_unmap_ctrl:
+ iounmap(priv->ctrl);
+out_pci_release_regions:
+ pci_release_regions(dev);
+out_pci_disable_device:
+ pci_disable_device(dev);
+out_free_priv:
+ kfree(priv);
+out_return:
+ return ret;
+}
+
+static void __devexit cmodio_pci_remove(struct pci_dev *dev)
+{
+ struct cmodio_device *priv = pci_get_drvdata(dev);
+
+ mfd_remove_devices(&dev->dev);
+ sysfs_remove_group(&dev->dev.kobj, &cmodio_sysfs_attr_group);
+ iounmap(priv->ctrl);
+ pci_release_regions(dev);
+ pci_disable_device(dev);
+ kfree(priv);
+}
+
+#define PCI_VENDOR_ID_JANZ 0x13c3
+
+/* The list of devices that this module will support */
+static DEFINE_PCI_DEVICE_TABLE(cmodio_pci_ids) = {
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_JANZ, 0x0101 },
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_JANZ, 0x0100 },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, cmodio_pci_ids);
+
+static struct pci_driver cmodio_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = cmodio_pci_ids,
+ .probe = cmodio_pci_probe,
+ .remove = __devexit_p(cmodio_pci_remove),
+};
+
+/*
+ * Module Init / Exit
+ */
+
+static int __init cmodio_init(void)
+{
+ return pci_register_driver(&cmodio_pci_driver);
+}
+
+static void __exit cmodio_exit(void)
+{
+ pci_unregister_driver(&cmodio_pci_driver);
+}
+
+MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
+MODULE_DESCRIPTION("Janz CMOD-IO PCI MODULbus Carrier Board Driver");
+MODULE_LICENSE("GPL");
+
+module_init(cmodio_init);
+module_exit(cmodio_exit);
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index 85d63c04749..f621bcea3d0 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -508,7 +508,7 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq,
max8925_reg_read(chip->i2c, MAX8925_ON_OFF_IRQ2);
max8925_reg_read(chip->rtc, MAX8925_RTC_IRQ);
max8925_reg_read(chip->adc, MAX8925_TSC_IRQ);
- /* mask all interrupts */
+ /* mask all interrupts except for TSC */
max8925_reg_write(chip->rtc, MAX8925_ALARM0_CNTL, 0);
max8925_reg_write(chip->rtc, MAX8925_ALARM1_CNTL, 0);
max8925_reg_write(chip->i2c, MAX8925_CHG_IRQ1_MASK, 0xff);
@@ -516,7 +516,6 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq,
max8925_reg_write(chip->i2c, MAX8925_ON_OFF_IRQ1_MASK, 0xff);
max8925_reg_write(chip->i2c, MAX8925_ON_OFF_IRQ2_MASK, 0xff);
max8925_reg_write(chip->rtc, MAX8925_RTC_IRQ_MASK, 0xff);
- max8925_reg_write(chip->adc, MAX8925_TSC_IRQ_MASK, 0xff);
mutex_init(&chip->irq_lock);
chip->core_irq = irq;
@@ -547,7 +546,11 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq,
dev_err(chip->dev, "Failed to request core IRQ: %d\n", ret);
chip->core_irq = 0;
}
+
tsc_irq:
+ /* mask TSC interrupt */
+ max8925_reg_write(chip->adc, MAX8925_TSC_IRQ_MASK, 0x0f);
+
if (!pdata->tsc_irq) {
dev_warn(chip->dev, "No interrupt support on TSC IRQ\n");
return 0;
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index d9fd8785da4..e73f3f5252a 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -173,8 +173,6 @@ static int __devexit max8925_remove(struct i2c_client *client)
max8925_device_exit(chip);
i2c_unregister_device(chip->adc);
i2c_unregister_device(chip->rtc);
- i2c_set_clientdata(chip->adc, NULL);
- i2c_set_clientdata(chip->rtc, NULL);
i2c_set_clientdata(chip->i2c, NULL);
kfree(chip);
return 0;
diff --git a/drivers/mfd/mc13783-core.c b/drivers/mfd/mc13783-core.c
index 1f68ecadddc..fecf38a4f02 100644
--- a/drivers/mfd/mc13783-core.c
+++ b/drivers/mfd/mc13783-core.c
@@ -679,6 +679,10 @@ err_revision:
if (pdata->flags & MC13783_USE_TOUCHSCREEN)
mc13783_add_subdevice(mc13783, "mc13783-ts");
+ if (pdata->flags & MC13783_USE_LED)
+ mc13783_add_subdevice_pdata(mc13783, "mc13783-led",
+ pdata->leds, sizeof(*pdata->leds));
+
return 0;
}
diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c
index a94b131a18e..721948be12c 100644
--- a/drivers/mfd/menelaus.c
+++ b/drivers/mfd/menelaus.c
@@ -1228,6 +1228,7 @@ fail2:
free_irq(client->irq, menelaus);
flush_scheduled_work();
fail1:
+ i2c_set_clientdata(client, NULL);
kfree(menelaus);
return err;
}
@@ -1237,8 +1238,8 @@ static int __exit menelaus_remove(struct i2c_client *client)
struct menelaus_chip *menelaus = i2c_get_clientdata(client);
free_irq(client->irq, menelaus);
- kfree(menelaus);
i2c_set_clientdata(client, NULL);
+ kfree(menelaus);
the_menelaus = NULL;
return 0;
}
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 8ffbb7a85a7..7dd76bceaae 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -48,7 +48,7 @@ static int mfd_add_device(struct device *parent, int id,
res[r].flags = cell->resources[r].flags;
/* Find out base to use */
- if (cell->resources[r].flags & IORESOURCE_MEM) {
+ if ((cell->resources[r].flags & IORESOURCE_MEM) && mem_base) {
res[r].parent = mem_base;
res[r].start = mem_base->start +
cell->resources[r].start;
diff --git a/drivers/mfd/pcf50633-adc.c b/drivers/mfd/pcf50633-adc.c
index fe8f922f665..aed0d2a9b03 100644
--- a/drivers/mfd/pcf50633-adc.c
+++ b/drivers/mfd/pcf50633-adc.c
@@ -30,13 +30,13 @@
struct pcf50633_adc_request {
int mux;
int avg;
- int result;
void (*callback)(struct pcf50633 *, void *, int);
void *callback_param;
+};
- /* Used in case of sync requests */
+struct pcf50633_adc_sync_request {
+ int result;
struct completion completion;
-
};
#define PCF50633_MAX_ADC_FIFO_DEPTH 8
@@ -109,10 +109,10 @@ adc_enqueue_request(struct pcf50633 *pcf, struct pcf50633_adc_request *req)
return 0;
}
-static void
-pcf50633_adc_sync_read_callback(struct pcf50633 *pcf, void *param, int result)
+static void pcf50633_adc_sync_read_callback(struct pcf50633 *pcf, void *param,
+ int result)
{
- struct pcf50633_adc_request *req = param;
+ struct pcf50633_adc_sync_request *req = param;
req->result = result;
complete(&req->completion);
@@ -120,28 +120,19 @@ pcf50633_adc_sync_read_callback(struct pcf50633 *pcf, void *param, int result)
int pcf50633_adc_sync_read(struct pcf50633 *pcf, int mux, int avg)
{
- struct pcf50633_adc_request *req;
- int err;
+ struct pcf50633_adc_sync_request req;
+ int ret;
- /* req is freed when the result is ready, in interrupt handler */
- req = kzalloc(sizeof(*req), GFP_KERNEL);
- if (!req)
- return -ENOMEM;
-
- req->mux = mux;
- req->avg = avg;
- req->callback = pcf50633_adc_sync_read_callback;
- req->callback_param = req;
+ init_completion(&req.completion);
- init_completion(&req->completion);
- err = adc_enqueue_request(pcf, req);
- if (err)
- return err;
+ ret = pcf50633_adc_async_read(pcf, mux, avg,
+ pcf50633_adc_sync_read_callback, &req);
+ if (ret)
+ return ret;
- wait_for_completion(&req->completion);
+ wait_for_completion(&req.completion);
- /* FIXME by this time req might be already freed */
- return req->result;
+ return req.result;
}
EXPORT_SYMBOL_GPL(pcf50633_adc_sync_read);
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 63a614d696c..704736e6e9b 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -21,16 +21,16 @@
#include <linux/workqueue.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
-#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/mfd/pcf50633/core.h>
-/* Two MBCS registers used during cold start */
-#define PCF50633_REG_MBCS1 0x4b
-#define PCF50633_REG_MBCS2 0x4c
-#define PCF50633_MBCS1_USBPRES 0x01
-#define PCF50633_MBCS1_ADAPTPRES 0x01
+int pcf50633_irq_init(struct pcf50633 *pcf, int irq);
+void pcf50633_irq_free(struct pcf50633 *pcf);
+#ifdef CONFIG_PM
+int pcf50633_irq_suspend(struct pcf50633 *pcf);
+int pcf50633_irq_resume(struct pcf50633 *pcf);
+#endif
static int __pcf50633_read(struct pcf50633 *pcf, u8 reg, int num, u8 *data)
{
@@ -215,244 +215,6 @@ static struct attribute_group pcf_attr_group = {
.attrs = pcf_sysfs_entries,
};
-int pcf50633_register_irq(struct pcf50633 *pcf, int irq,
- void (*handler) (int, void *), void *data)
-{
- if (irq < 0 || irq > PCF50633_NUM_IRQ || !handler)
- return -EINVAL;
-
- if (WARN_ON(pcf->irq_handler[irq].handler))
- return -EBUSY;
-
- mutex_lock(&pcf->lock);
- pcf->irq_handler[irq].handler = handler;
- pcf->irq_handler[irq].data = data;
- mutex_unlock(&pcf->lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pcf50633_register_irq);
-
-int pcf50633_free_irq(struct pcf50633 *pcf, int irq)
-{
- if (irq < 0 || irq > PCF50633_NUM_IRQ)
- return -EINVAL;
-
- mutex_lock(&pcf->lock);
- pcf->irq_handler[irq].handler = NULL;
- mutex_unlock(&pcf->lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pcf50633_free_irq);
-
-static int __pcf50633_irq_mask_set(struct pcf50633 *pcf, int irq, u8 mask)
-{
- u8 reg, bits, tmp;
- int ret = 0, idx;
-
- idx = irq >> 3;
- reg = PCF50633_REG_INT1M + idx;
- bits = 1 << (irq & 0x07);
-
- mutex_lock(&pcf->lock);
-
- if (mask) {
- ret = __pcf50633_read(pcf, reg, 1, &tmp);
- if (ret < 0)
- goto out;
-
- tmp |= bits;
-
- ret = __pcf50633_write(pcf, reg, 1, &tmp);
- if (ret < 0)
- goto out;
-
- pcf->mask_regs[idx] &= ~bits;
- pcf->mask_regs[idx] |= bits;
- } else {
- ret = __pcf50633_read(pcf, reg, 1, &tmp);
- if (ret < 0)
- goto out;
-
- tmp &= ~bits;
-
- ret = __pcf50633_write(pcf, reg, 1, &tmp);
- if (ret < 0)
- goto out;
-
- pcf->mask_regs[idx] &= ~bits;
- }
-out:
- mutex_unlock(&pcf->lock);
-
- return ret;
-}
-
-int pcf50633_irq_mask(struct pcf50633 *pcf, int irq)
-{
- dev_dbg(pcf->dev, "Masking IRQ %d\n", irq);
-
- return __pcf50633_irq_mask_set(pcf, irq, 1);
-}
-EXPORT_SYMBOL_GPL(pcf50633_irq_mask);
-
-int pcf50633_irq_unmask(struct pcf50633 *pcf, int irq)
-{
- dev_dbg(pcf->dev, "Unmasking IRQ %d\n", irq);
-
- return __pcf50633_irq_mask_set(pcf, irq, 0);
-}
-EXPORT_SYMBOL_GPL(pcf50633_irq_unmask);
-
-int pcf50633_irq_mask_get(struct pcf50633 *pcf, int irq)
-{
- u8 reg, bits;
-
- reg = irq >> 3;
- bits = 1 << (irq & 0x07);
-
- return pcf->mask_regs[reg] & bits;
-}
-EXPORT_SYMBOL_GPL(pcf50633_irq_mask_get);
-
-static void pcf50633_irq_call_handler(struct pcf50633 *pcf, int irq)
-{
- if (pcf->irq_handler[irq].handler)
- pcf->irq_handler[irq].handler(irq, pcf->irq_handler[irq].data);
-}
-
-/* Maximum amount of time ONKEY is held before emergency action is taken */
-#define PCF50633_ONKEY1S_TIMEOUT 8
-
-static void pcf50633_irq_worker(struct work_struct *work)
-{
- struct pcf50633 *pcf;
- int ret, i, j;
- u8 pcf_int[5], chgstat;
-
- pcf = container_of(work, struct pcf50633, irq_work);
-
- /* Read the 5 INT regs in one transaction */
- ret = pcf50633_read_block(pcf, PCF50633_REG_INT1,
- ARRAY_SIZE(pcf_int), pcf_int);
- if (ret != ARRAY_SIZE(pcf_int)) {
- dev_err(pcf->dev, "Error reading INT registers\n");
-
- /*
- * If this doesn't ACK the interrupt to the chip, we'll be
- * called once again as we're level triggered.
- */
- goto out;
- }
-
- /* defeat 8s death from lowsys on A5 */
- pcf50633_reg_write(pcf, PCF50633_REG_OOCSHDWN, 0x04);
-
- /* We immediately read the usb and adapter status. We thus make sure
- * only of USBINS/USBREM IRQ handlers are called */
- if (pcf_int[0] & (PCF50633_INT1_USBINS | PCF50633_INT1_USBREM)) {
- chgstat = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2);
- if (chgstat & (0x3 << 4))
- pcf_int[0] &= ~(1 << PCF50633_INT1_USBREM);
- else
- pcf_int[0] &= ~(1 << PCF50633_INT1_USBINS);
- }
-
- /* Make sure only one of ADPINS or ADPREM is set */
- if (pcf_int[0] & (PCF50633_INT1_ADPINS | PCF50633_INT1_ADPREM)) {
- chgstat = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2);
- if (chgstat & (0x3 << 4))
- pcf_int[0] &= ~(1 << PCF50633_INT1_ADPREM);
- else
- pcf_int[0] &= ~(1 << PCF50633_INT1_ADPINS);
- }
-
- dev_dbg(pcf->dev, "INT1=0x%02x INT2=0x%02x INT3=0x%02x "
- "INT4=0x%02x INT5=0x%02x\n", pcf_int[0],
- pcf_int[1], pcf_int[2], pcf_int[3], pcf_int[4]);
-
- /* Some revisions of the chip don't have a 8s standby mode on
- * ONKEY1S press. We try to manually do it in such cases. */
- if ((pcf_int[0] & PCF50633_INT1_SECOND) && pcf->onkey1s_held) {
- dev_info(pcf->dev, "ONKEY1S held for %d secs\n",
- pcf->onkey1s_held);
- if (pcf->onkey1s_held++ == PCF50633_ONKEY1S_TIMEOUT)
- if (pcf->pdata->force_shutdown)
- pcf->pdata->force_shutdown(pcf);
- }
-
- if (pcf_int[2] & PCF50633_INT3_ONKEY1S) {
- dev_info(pcf->dev, "ONKEY1S held\n");
- pcf->onkey1s_held = 1 ;
-
- /* Unmask IRQ_SECOND */
- pcf50633_reg_clear_bits(pcf, PCF50633_REG_INT1M,
- PCF50633_INT1_SECOND);
-
- /* Unmask IRQ_ONKEYR */
- pcf50633_reg_clear_bits(pcf, PCF50633_REG_INT2M,
- PCF50633_INT2_ONKEYR);
- }
-
- if ((pcf_int[1] & PCF50633_INT2_ONKEYR) && pcf->onkey1s_held) {
- pcf->onkey1s_held = 0;
-
- /* Mask SECOND and ONKEYR interrupts */
- if (pcf->mask_regs[0] & PCF50633_INT1_SECOND)
- pcf50633_reg_set_bit_mask(pcf,
- PCF50633_REG_INT1M,
- PCF50633_INT1_SECOND,
- PCF50633_INT1_SECOND);
-
- if (pcf->mask_regs[1] & PCF50633_INT2_ONKEYR)
- pcf50633_reg_set_bit_mask(pcf,
- PCF50633_REG_INT2M,
- PCF50633_INT2_ONKEYR,
- PCF50633_INT2_ONKEYR);
- }
-
- /* Have we just resumed ? */
- if (pcf->is_suspended) {
- pcf->is_suspended = 0;
-
- /* Set the resume reason filtering out non resumers */
- for (i = 0; i < ARRAY_SIZE(pcf_int); i++)
- pcf->resume_reason[i] = pcf_int[i] &
- pcf->pdata->resumers[i];
-
- /* Make sure we don't pass on any ONKEY events to
- * userspace now */
- pcf_int[1] &= ~(PCF50633_INT2_ONKEYR | PCF50633_INT2_ONKEYF);
- }
-
- for (i = 0; i < ARRAY_SIZE(pcf_int); i++) {
- /* Unset masked interrupts */
- pcf_int[i] &= ~pcf->mask_regs[i];
-
- for (j = 0; j < 8 ; j++)
- if (pcf_int[i] & (1 << j))
- pcf50633_irq_call_handler(pcf, (i * 8) + j);
- }
-
-out:
- put_device(pcf->dev);
- enable_irq(pcf->irq);
-}
-
-static irqreturn_t pcf50633_irq(int irq, void *data)
-{
- struct pcf50633 *pcf = data;
-
- dev_dbg(pcf->dev, "pcf50633_irq\n");
-
- get_device(pcf->dev);
- disable_irq_nosync(pcf->irq);
- queue_work(pcf->work_queue, &pcf->irq_work);
-
- return IRQ_HANDLED;
-}
-
static void
pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name,
struct platform_device **pdev)
@@ -479,70 +241,17 @@ pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name,
static int pcf50633_suspend(struct i2c_client *client, pm_message_t state)
{
struct pcf50633 *pcf;
- int ret = 0, i;
- u8 res[5];
-
pcf = i2c_get_clientdata(client);
- /* Make sure our interrupt handlers are not called
- * henceforth */
- disable_irq(pcf->irq);
-
- /* Make sure that any running IRQ worker has quit */
- cancel_work_sync(&pcf->irq_work);
-
- /* Save the masks */
- ret = pcf50633_read_block(pcf, PCF50633_REG_INT1M,
- ARRAY_SIZE(pcf->suspend_irq_masks),
- pcf->suspend_irq_masks);
- if (ret < 0) {
- dev_err(pcf->dev, "error saving irq masks\n");
- goto out;
- }
-
- /* Write wakeup irq masks */
- for (i = 0; i < ARRAY_SIZE(res); i++)
- res[i] = ~pcf->pdata->resumers[i];
-
- ret = pcf50633_write_block(pcf, PCF50633_REG_INT1M,
- ARRAY_SIZE(res), &res[0]);
- if (ret < 0) {
- dev_err(pcf->dev, "error writing wakeup irq masks\n");
- goto out;
- }
-
- pcf->is_suspended = 1;
-
-out:
- return ret;
+ return pcf50633_irq_suspend(pcf);
}
static int pcf50633_resume(struct i2c_client *client)
{
struct pcf50633 *pcf;
- int ret;
-
pcf = i2c_get_clientdata(client);
- /* Write the saved mask registers */
- ret = pcf50633_write_block(pcf, PCF50633_REG_INT1M,
- ARRAY_SIZE(pcf->suspend_irq_masks),
- pcf->suspend_irq_masks);
- if (ret < 0)
- dev_err(pcf->dev, "Error restoring saved suspend masks\n");
-
- /* Restore regulators' state */
-
-
- get_device(pcf->dev);
-
- /*
- * Clear any pending interrupts and set resume reason if any.
- * This will leave with enable_irq()
- */
- pcf50633_irq_worker(&pcf->irq_work);
-
- return 0;
+ return pcf50633_irq_resume(pcf);
}
#else
#define pcf50633_suspend NULL
@@ -573,43 +282,19 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
i2c_set_clientdata(client, pcf);
pcf->dev = &client->dev;
pcf->i2c_client = client;
- pcf->irq = client->irq;
- pcf->work_queue = create_singlethread_workqueue("pcf50633");
-
- if (!pcf->work_queue) {
- dev_err(&client->dev, "Failed to alloc workqueue\n");
- ret = -ENOMEM;
- goto err_free;
- }
-
- INIT_WORK(&pcf->irq_work, pcf50633_irq_worker);
version = pcf50633_reg_read(pcf, 0);
variant = pcf50633_reg_read(pcf, 1);
if (version < 0 || variant < 0) {
dev_err(pcf->dev, "Unable to probe pcf50633\n");
ret = -ENODEV;
- goto err_destroy_workqueue;
+ goto err_free;
}
dev_info(pcf->dev, "Probed device version %d variant %d\n",
version, variant);
- /* Enable all interrupts except RTC SECOND */
- pcf->mask_regs[0] = 0x80;
- pcf50633_reg_write(pcf, PCF50633_REG_INT1M, pcf->mask_regs[0]);
- pcf50633_reg_write(pcf, PCF50633_REG_INT2M, 0x00);
- pcf50633_reg_write(pcf, PCF50633_REG_INT3M, 0x00);
- pcf50633_reg_write(pcf, PCF50633_REG_INT4M, 0x00);
- pcf50633_reg_write(pcf, PCF50633_REG_INT5M, 0x00);
-
- ret = request_irq(client->irq, pcf50633_irq,
- IRQF_TRIGGER_LOW, "pcf50633", pcf);
-
- if (ret) {
- dev_err(pcf->dev, "Failed to request IRQ %d\n", ret);
- goto err_destroy_workqueue;
- }
+ pcf50633_irq_init(pcf, client->irq);
/* Create sub devices */
pcf50633_client_dev_register(pcf, "pcf50633-input",
@@ -620,6 +305,9 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
&pcf->mbc_pdev);
pcf50633_client_dev_register(pcf, "pcf50633-adc",
&pcf->adc_pdev);
+ pcf50633_client_dev_register(pcf, "pcf50633-backlight",
+ &pcf->bl_pdev);
+
for (i = 0; i < PCF50633_NUM_REGULATORS; i++) {
struct platform_device *pdev;
@@ -638,10 +326,6 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
platform_device_add(pdev);
}
- if (enable_irq_wake(client->irq) < 0)
- dev_err(pcf->dev, "IRQ %u cannot be enabled as wake-up source"
- "in this hardware revision", client->irq);
-
ret = sysfs_create_group(&client->dev.kobj, &pcf_attr_group);
if (ret)
dev_err(pcf->dev, "error creating sysfs entries\n");
@@ -651,8 +335,6 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
return 0;
-err_destroy_workqueue:
- destroy_workqueue(pcf->work_queue);
err_free:
i2c_set_clientdata(client, NULL);
kfree(pcf);
@@ -665,8 +347,7 @@ static int __devexit pcf50633_remove(struct i2c_client *client)
struct pcf50633 *pcf = i2c_get_clientdata(client);
int i;
- free_irq(pcf->irq, pcf);
- destroy_workqueue(pcf->work_queue);
+ pcf50633_irq_free(pcf);
platform_device_unregister(pcf->input_pdev);
platform_device_unregister(pcf->rtc_pdev);
@@ -676,6 +357,7 @@ static int __devexit pcf50633_remove(struct i2c_client *client)
for (i = 0; i < PCF50633_NUM_REGULATORS; i++)
platform_device_unregister(pcf->regulator_pdev[i]);
+ i2c_set_clientdata(client, NULL);
kfree(pcf);
return 0;
diff --git a/drivers/mfd/pcf50633-irq.c b/drivers/mfd/pcf50633-irq.c
new file mode 100644
index 00000000000..1b0192f1eff
--- /dev/null
+++ b/drivers/mfd/pcf50633-irq.c
@@ -0,0 +1,318 @@
+/* NXP PCF50633 Power Management Unit (PMU) driver
+ *
+ * (C) 2006-2008 by Openmoko, Inc.
+ * Author: Harald Welte <laforge@openmoko.org>
+ * Balaji Rao <balajirrao@openmoko.org>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <linux/mfd/pcf50633/core.h>
+
+/* Two MBCS registers used during cold start */
+#define PCF50633_REG_MBCS1 0x4b
+#define PCF50633_REG_MBCS2 0x4c
+#define PCF50633_MBCS1_USBPRES 0x01
+#define PCF50633_MBCS1_ADAPTPRES 0x01
+
+int pcf50633_register_irq(struct pcf50633 *pcf, int irq,
+ void (*handler) (int, void *), void *data)
+{
+ if (irq < 0 || irq >= PCF50633_NUM_IRQ || !handler)
+ return -EINVAL;
+
+ if (WARN_ON(pcf->irq_handler[irq].handler))
+ return -EBUSY;
+
+ mutex_lock(&pcf->lock);
+ pcf->irq_handler[irq].handler = handler;
+ pcf->irq_handler[irq].data = data;
+ mutex_unlock(&pcf->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pcf50633_register_irq);
+
+int pcf50633_free_irq(struct pcf50633 *pcf, int irq)
+{
+ if (irq < 0 || irq >= PCF50633_NUM_IRQ)
+ return -EINVAL;
+
+ mutex_lock(&pcf->lock);
+ pcf->irq_handler[irq].handler = NULL;
+ mutex_unlock(&pcf->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pcf50633_free_irq);
+
+static int __pcf50633_irq_mask_set(struct pcf50633 *pcf, int irq, u8 mask)
+{
+ u8 reg, bit;
+ int ret = 0, idx;
+
+ idx = irq >> 3;
+ reg = PCF50633_REG_INT1M + idx;
+ bit = 1 << (irq & 0x07);
+
+ pcf50633_reg_set_bit_mask(pcf, reg, bit, mask ? bit : 0);
+
+ mutex_lock(&pcf->lock);
+
+ if (mask)
+ pcf->mask_regs[idx] |= bit;
+ else
+ pcf->mask_regs[idx] &= ~bit;
+
+ mutex_unlock(&pcf->lock);
+
+ return ret;
+}
+
+int pcf50633_irq_mask(struct pcf50633 *pcf, int irq)
+{
+ dev_dbg(pcf->dev, "Masking IRQ %d\n", irq);
+
+ return __pcf50633_irq_mask_set(pcf, irq, 1);
+}
+EXPORT_SYMBOL_GPL(pcf50633_irq_mask);
+
+int pcf50633_irq_unmask(struct pcf50633 *pcf, int irq)
+{
+ dev_dbg(pcf->dev, "Unmasking IRQ %d\n", irq);
+
+ return __pcf50633_irq_mask_set(pcf, irq, 0);
+}
+EXPORT_SYMBOL_GPL(pcf50633_irq_unmask);
+
+int pcf50633_irq_mask_get(struct pcf50633 *pcf, int irq)
+{
+ u8 reg, bits;
+
+ reg = irq >> 3;
+ bits = 1 << (irq & 0x07);
+
+ return pcf->mask_regs[reg] & bits;
+}
+EXPORT_SYMBOL_GPL(pcf50633_irq_mask_get);
+
+static void pcf50633_irq_call_handler(struct pcf50633 *pcf, int irq)
+{
+ if (pcf->irq_handler[irq].handler)
+ pcf->irq_handler[irq].handler(irq, pcf->irq_handler[irq].data);
+}
+
+/* Maximum amount of time ONKEY is held before emergency action is taken */
+#define PCF50633_ONKEY1S_TIMEOUT 8
+
+static irqreturn_t pcf50633_irq(int irq, void *data)
+{
+ struct pcf50633 *pcf = data;
+ int ret, i, j;
+ u8 pcf_int[5], chgstat;
+
+ /* Read the 5 INT regs in one transaction */
+ ret = pcf50633_read_block(pcf, PCF50633_REG_INT1,
+ ARRAY_SIZE(pcf_int), pcf_int);
+ if (ret != ARRAY_SIZE(pcf_int)) {
+ dev_err(pcf->dev, "Error reading INT registers\n");
+
+ /*
+ * If this doesn't ACK the interrupt to the chip, we'll be
+ * called once again as we're level triggered.
+ */
+ goto out;
+ }
+
+ /* defeat 8s death from lowsys on A5 */
+ pcf50633_reg_write(pcf, PCF50633_REG_OOCSHDWN, 0x04);
+
+ /* We immediately read the usb and adapter status. We thus make sure
+ * only of USBINS/USBREM IRQ handlers are called */
+ if (pcf_int[0] & (PCF50633_INT1_USBINS | PCF50633_INT1_USBREM)) {
+ chgstat = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2);
+ if (chgstat & (0x3 << 4))
+ pcf_int[0] &= ~PCF50633_INT1_USBREM;
+ else
+ pcf_int[0] &= ~PCF50633_INT1_USBINS;
+ }
+
+ /* Make sure only one of ADPINS or ADPREM is set */
+ if (pcf_int[0] & (PCF50633_INT1_ADPINS | PCF50633_INT1_ADPREM)) {
+ chgstat = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2);
+ if (chgstat & (0x3 << 4))
+ pcf_int[0] &= ~PCF50633_INT1_ADPREM;
+ else
+ pcf_int[0] &= ~PCF50633_INT1_ADPINS;
+ }
+
+ dev_dbg(pcf->dev, "INT1=0x%02x INT2=0x%02x INT3=0x%02x "
+ "INT4=0x%02x INT5=0x%02x\n", pcf_int[0],
+ pcf_int[1], pcf_int[2], pcf_int[3], pcf_int[4]);
+
+ /* Some revisions of the chip don't have a 8s standby mode on
+ * ONKEY1S press. We try to manually do it in such cases. */
+ if ((pcf_int[0] & PCF50633_INT1_SECOND) && pcf->onkey1s_held) {
+ dev_info(pcf->dev, "ONKEY1S held for %d secs\n",
+ pcf->onkey1s_held);
+ if (pcf->onkey1s_held++ == PCF50633_ONKEY1S_TIMEOUT)
+ if (pcf->pdata->force_shutdown)
+ pcf->pdata->force_shutdown(pcf);
+ }
+
+ if (pcf_int[2] & PCF50633_INT3_ONKEY1S) {
+ dev_info(pcf->dev, "ONKEY1S held\n");
+ pcf->onkey1s_held = 1 ;
+
+ /* Unmask IRQ_SECOND */
+ pcf50633_reg_clear_bits(pcf, PCF50633_REG_INT1M,
+ PCF50633_INT1_SECOND);
+
+ /* Unmask IRQ_ONKEYR */
+ pcf50633_reg_clear_bits(pcf, PCF50633_REG_INT2M,
+ PCF50633_INT2_ONKEYR);
+ }
+
+ if ((pcf_int[1] & PCF50633_INT2_ONKEYR) && pcf->onkey1s_held) {
+ pcf->onkey1s_held = 0;
+
+ /* Mask SECOND and ONKEYR interrupts */
+ if (pcf->mask_regs[0] & PCF50633_INT1_SECOND)
+ pcf50633_reg_set_bit_mask(pcf,
+ PCF50633_REG_INT1M,
+ PCF50633_INT1_SECOND,
+ PCF50633_INT1_SECOND);
+
+ if (pcf->mask_regs[1] & PCF50633_INT2_ONKEYR)
+ pcf50633_reg_set_bit_mask(pcf,
+ PCF50633_REG_INT2M,
+ PCF50633_INT2_ONKEYR,
+ PCF50633_INT2_ONKEYR);
+ }
+
+ /* Have we just resumed ? */
+ if (pcf->is_suspended) {
+ pcf->is_suspended = 0;
+
+ /* Set the resume reason filtering out non resumers */
+ for (i = 0; i < ARRAY_SIZE(pcf_int); i++)
+ pcf->resume_reason[i] = pcf_int[i] &
+ pcf->pdata->resumers[i];
+
+ /* Make sure we don't pass on any ONKEY events to
+ * userspace now */
+ pcf_int[1] &= ~(PCF50633_INT2_ONKEYR | PCF50633_INT2_ONKEYF);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pcf_int); i++) {
+ /* Unset masked interrupts */
+ pcf_int[i] &= ~pcf->mask_regs[i];
+
+ for (j = 0; j < 8 ; j++)
+ if (pcf_int[i] & (1 << j))
+ pcf50633_irq_call_handler(pcf, (i * 8) + j);
+ }
+
+out:
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_PM
+
+int pcf50633_irq_suspend(struct pcf50633 *pcf)
+{
+ int ret;
+ int i;
+ u8 res[5];
+
+
+ /* Make sure our interrupt handlers are not called
+ * henceforth */
+ disable_irq(pcf->irq);
+
+ /* Save the masks */
+ ret = pcf50633_read_block(pcf, PCF50633_REG_INT1M,
+ ARRAY_SIZE(pcf->suspend_irq_masks),
+ pcf->suspend_irq_masks);
+ if (ret < 0) {
+ dev_err(pcf->dev, "error saving irq masks\n");
+ goto out;
+ }
+
+ /* Write wakeup irq masks */
+ for (i = 0; i < ARRAY_SIZE(res); i++)
+ res[i] = ~pcf->pdata->resumers[i];
+
+ ret = pcf50633_write_block(pcf, PCF50633_REG_INT1M,
+ ARRAY_SIZE(res), &res[0]);
+ if (ret < 0) {
+ dev_err(pcf->dev, "error writing wakeup irq masks\n");
+ goto out;
+ }
+
+ pcf->is_suspended = 1;
+
+out:
+ return ret;
+}
+
+int pcf50633_irq_resume(struct pcf50633 *pcf)
+{
+ int ret;
+
+ /* Write the saved mask registers */
+ ret = pcf50633_write_block(pcf, PCF50633_REG_INT1M,
+ ARRAY_SIZE(pcf->suspend_irq_masks),
+ pcf->suspend_irq_masks);
+ if (ret < 0)
+ dev_err(pcf->dev, "Error restoring saved suspend masks\n");
+
+ enable_irq(pcf->irq);
+
+ return ret;
+}
+
+#endif
+
+int pcf50633_irq_init(struct pcf50633 *pcf, int irq)
+{
+ int ret;
+
+ pcf->irq = irq;
+
+ /* Enable all interrupts except RTC SECOND */
+ pcf->mask_regs[0] = 0x80;
+ pcf50633_reg_write(pcf, PCF50633_REG_INT1M, pcf->mask_regs[0]);
+ pcf50633_reg_write(pcf, PCF50633_REG_INT2M, 0x00);
+ pcf50633_reg_write(pcf, PCF50633_REG_INT3M, 0x00);
+ pcf50633_reg_write(pcf, PCF50633_REG_INT4M, 0x00);
+ pcf50633_reg_write(pcf, PCF50633_REG_INT5M, 0x00);
+
+ ret = request_threaded_irq(irq, NULL, pcf50633_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "pcf50633", pcf);
+
+ if (ret)
+ dev_err(pcf->dev, "Failed to request IRQ %d\n", ret);
+
+ if (enable_irq_wake(irq) < 0)
+ dev_err(pcf->dev, "IRQ %u cannot be enabled as wake-up source"
+ "in this hardware revision", irq);
+
+ return ret;
+}
+
+void pcf50633_irq_free(struct pcf50633 *pcf)
+{
+ free_irq(pcf->irq, pcf);
+}
diff --git a/drivers/mfd/rdc321x-southbridge.c b/drivers/mfd/rdc321x-southbridge.c
new file mode 100644
index 00000000000..50922975bda
--- /dev/null
+++ b/drivers/mfd/rdc321x-southbridge.c
@@ -0,0 +1,123 @@
+/*
+ * RDC321x MFD southbrige driver
+ *
+ * Copyright (C) 2007-2010 Florian Fainelli <florian@openwrt.org>
+ * Copyright (C) 2010 Bernhard Loos <bernhardloos@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/rdc321x.h>
+
+static struct rdc321x_wdt_pdata rdc321x_wdt_pdata;
+
+static struct resource rdc321x_wdt_resource[] = {
+ {
+ .name = "wdt-reg",
+ .start = RDC321X_WDT_CTRL,
+ .end = RDC321X_WDT_CTRL + 0x3,
+ .flags = IORESOURCE_IO,
+ }
+};
+
+static struct rdc321x_gpio_pdata rdc321x_gpio_pdata = {
+ .max_gpios = RDC321X_MAX_GPIO,
+};
+
+static struct resource rdc321x_gpio_resources[] = {
+ {
+ .name = "gpio-reg1",
+ .start = RDC321X_GPIO_CTRL_REG1,
+ .end = RDC321X_GPIO_CTRL_REG1 + 0x7,
+ .flags = IORESOURCE_IO,
+ }, {
+ .name = "gpio-reg2",
+ .start = RDC321X_GPIO_CTRL_REG2,
+ .end = RDC321X_GPIO_CTRL_REG2 + 0x7,
+ .flags = IORESOURCE_IO,
+ }
+};
+
+static struct mfd_cell rdc321x_sb_cells[] = {
+ {
+ .name = "rdc321x-wdt",
+ .resources = rdc321x_wdt_resource,
+ .num_resources = ARRAY_SIZE(rdc321x_wdt_resource),
+ .driver_data = &rdc321x_wdt_pdata,
+ }, {
+ .name = "rdc321x-gpio",
+ .resources = rdc321x_gpio_resources,
+ .num_resources = ARRAY_SIZE(rdc321x_gpio_resources),
+ .driver_data = &rdc321x_gpio_pdata,
+ },
+};
+
+static int __devinit rdc321x_sb_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable device\n");
+ return err;
+ }
+
+ rdc321x_gpio_pdata.sb_pdev = pdev;
+ rdc321x_wdt_pdata.sb_pdev = pdev;
+
+ return mfd_add_devices(&pdev->dev, -1,
+ rdc321x_sb_cells, ARRAY_SIZE(rdc321x_sb_cells), NULL, 0);
+}
+
+static void __devexit rdc321x_sb_remove(struct pci_dev *pdev)
+{
+ mfd_remove_devices(&pdev->dev);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(rdc321x_sb_table) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_RDC, PCI_DEVICE_ID_RDC_R6030) },
+ {}
+};
+
+static struct pci_driver rdc321x_sb_driver = {
+ .name = "RDC321x Southbridge",
+ .id_table = rdc321x_sb_table,
+ .probe = rdc321x_sb_probe,
+ .remove = __devexit_p(rdc321x_sb_remove),
+};
+
+static int __init rdc321x_sb_init(void)
+{
+ return pci_register_driver(&rdc321x_sb_driver);
+}
+
+static void __exit rdc321x_sb_exit(void)
+{
+ pci_unregister_driver(&rdc321x_sb_driver);
+}
+
+module_init(rdc321x_sb_init);
+module_exit(rdc321x_sb_exit);
+
+MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RDC R-321x MFD southbridge driver");
diff --git a/drivers/mfd/sh_mobile_sdhi.c b/drivers/mfd/sh_mobile_sdhi.c
index 497f91b6138..cd164595f08 100644
--- a/drivers/mfd/sh_mobile_sdhi.c
+++ b/drivers/mfd/sh_mobile_sdhi.c
@@ -26,11 +26,15 @@
#include <linux/mfd/core.h>
#include <linux/mfd/tmio.h>
#include <linux/mfd/sh_mobile_sdhi.h>
+#include <linux/sh_dma.h>
struct sh_mobile_sdhi {
struct clk *clk;
struct tmio_mmc_data mmc_data;
struct mfd_cell cell_mmc;
+ struct sh_dmae_slave param_tx;
+ struct sh_dmae_slave param_rx;
+ struct tmio_mmc_dma dma_priv;
};
static struct resource sh_mobile_sdhi_resources[] = {
@@ -64,6 +68,8 @@ static void sh_mobile_sdhi_set_pwr(struct platform_device *tmio, int state)
static int __init sh_mobile_sdhi_probe(struct platform_device *pdev)
{
struct sh_mobile_sdhi *priv;
+ struct tmio_mmc_data *mmc_data;
+ struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
struct resource *mem;
char clk_name[8];
int ret, irq;
@@ -85,6 +91,8 @@ static int __init sh_mobile_sdhi_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ mmc_data = &priv->mmc_data;
+
snprintf(clk_name, sizeof(clk_name), "sdhi%d", pdev->id);
priv->clk = clk_get(&pdev->dev, clk_name);
if (IS_ERR(priv->clk)) {
@@ -96,12 +104,24 @@ static int __init sh_mobile_sdhi_probe(struct platform_device *pdev)
clk_enable(priv->clk);
- priv->mmc_data.hclk = clk_get_rate(priv->clk);
- priv->mmc_data.set_pwr = sh_mobile_sdhi_set_pwr;
- priv->mmc_data.capabilities = MMC_CAP_MMC_HIGHSPEED;
+ mmc_data->hclk = clk_get_rate(priv->clk);
+ mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
+ mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
+ if (p) {
+ mmc_data->flags = p->tmio_flags;
+ mmc_data->ocr_mask = p->tmio_ocr_mask;
+ }
+
+ if (p && p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) {
+ priv->param_tx.slave_id = p->dma_slave_tx;
+ priv->param_rx.slave_id = p->dma_slave_rx;
+ priv->dma_priv.chan_priv_tx = &priv->param_tx;
+ priv->dma_priv.chan_priv_rx = &priv->param_rx;
+ mmc_data->dma = &priv->dma_priv;
+ }
memcpy(&priv->cell_mmc, &sh_mobile_sdhi_cell, sizeof(priv->cell_mmc));
- priv->cell_mmc.driver_data = &priv->mmc_data;
+ priv->cell_mmc.driver_data = mmc_data;
priv->cell_mmc.platform_data = &priv->cell_mmc;
priv->cell_mmc.data_size = sizeof(priv->cell_mmc);
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index da6383a934a..5041d33adf0 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -318,6 +318,9 @@ static int t7l66xb_probe(struct platform_device *dev)
struct resource *iomem, *rscr;
int ret;
+ if (pdata == NULL)
+ return -EINVAL;
+
iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!iomem)
return -EINVAL;
diff --git a/drivers/mfd/tc35892.c b/drivers/mfd/tc35892.c
new file mode 100644
index 00000000000..715f095dd7a
--- /dev/null
+++ b/drivers/mfd/tc35892.c
@@ -0,0 +1,347 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License, version 2
+ * Author: Hanumath Prasad <hanumath.prasad@stericsson.com> for ST-Ericsson
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tc35892.h>
+
+/**
+ * tc35892_reg_read() - read a single TC35892 register
+ * @tc35892: Device to read from
+ * @reg: Register to read
+ */
+int tc35892_reg_read(struct tc35892 *tc35892, u8 reg)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(tc35892->i2c, reg);
+ if (ret < 0)
+ dev_err(tc35892->dev, "failed to read reg %#x: %d\n",
+ reg, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_reg_read);
+
+/**
+ * tc35892_reg_read() - write a single TC35892 register
+ * @tc35892: Device to write to
+ * @reg: Register to read
+ * @data: Value to write
+ */
+int tc35892_reg_write(struct tc35892 *tc35892, u8 reg, u8 data)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(tc35892->i2c, reg, data);
+ if (ret < 0)
+ dev_err(tc35892->dev, "failed to write reg %#x: %d\n",
+ reg, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_reg_write);
+
+/**
+ * tc35892_block_read() - read multiple TC35892 registers
+ * @tc35892: Device to read from
+ * @reg: First register
+ * @length: Number of registers
+ * @values: Buffer to write to
+ */
+int tc35892_block_read(struct tc35892 *tc35892, u8 reg, u8 length, u8 *values)
+{
+ int ret;
+
+ ret = i2c_smbus_read_i2c_block_data(tc35892->i2c, reg, length, values);
+ if (ret < 0)
+ dev_err(tc35892->dev, "failed to read regs %#x: %d\n",
+ reg, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_block_read);
+
+/**
+ * tc35892_block_write() - write multiple TC35892 registers
+ * @tc35892: Device to write to
+ * @reg: First register
+ * @length: Number of registers
+ * @values: Values to write
+ */
+int tc35892_block_write(struct tc35892 *tc35892, u8 reg, u8 length,
+ const u8 *values)
+{
+ int ret;
+
+ ret = i2c_smbus_write_i2c_block_data(tc35892->i2c, reg, length,
+ values);
+ if (ret < 0)
+ dev_err(tc35892->dev, "failed to write regs %#x: %d\n",
+ reg, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_block_write);
+
+/**
+ * tc35892_set_bits() - set the value of a bitfield in a TC35892 register
+ * @tc35892: Device to write to
+ * @reg: Register to write
+ * @mask: Mask of bits to set
+ * @values: Value to set
+ */
+int tc35892_set_bits(struct tc35892 *tc35892, u8 reg, u8 mask, u8 val)
+{
+ int ret;
+
+ mutex_lock(&tc35892->lock);
+
+ ret = tc35892_reg_read(tc35892, reg);
+ if (ret < 0)
+ goto out;
+
+ ret &= ~mask;
+ ret |= val;
+
+ ret = tc35892_reg_write(tc35892, reg, ret);
+
+out:
+ mutex_unlock(&tc35892->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_set_bits);
+
+static struct resource gpio_resources[] = {
+ {
+ .start = TC35892_INT_GPIIRQ,
+ .end = TC35892_INT_GPIIRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell tc35892_devs[] = {
+ {
+ .name = "tc35892-gpio",
+ .num_resources = ARRAY_SIZE(gpio_resources),
+ .resources = &gpio_resources[0],
+ },
+};
+
+static irqreturn_t tc35892_irq(int irq, void *data)
+{
+ struct tc35892 *tc35892 = data;
+ int status;
+
+ status = tc35892_reg_read(tc35892, TC35892_IRQST);
+ if (status < 0)
+ return IRQ_NONE;
+
+ while (status) {
+ int bit = __ffs(status);
+
+ handle_nested_irq(tc35892->irq_base + bit);
+ status &= ~(1 << bit);
+ }
+
+ /*
+ * A dummy read or write (to any register) appears to be necessary to
+ * have the last interrupt clear (for example, GPIO IC write) take
+ * effect.
+ */
+ tc35892_reg_read(tc35892, TC35892_IRQST);
+
+ return IRQ_HANDLED;
+}
+
+static void tc35892_irq_dummy(unsigned int irq)
+{
+ /* No mask/unmask at this level */
+}
+
+static struct irq_chip tc35892_irq_chip = {
+ .name = "tc35892",
+ .mask = tc35892_irq_dummy,
+ .unmask = tc35892_irq_dummy,
+};
+
+static int tc35892_irq_init(struct tc35892 *tc35892)
+{
+ int base = tc35892->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + TC35892_NR_INTERNAL_IRQS; irq++) {
+ set_irq_chip_data(irq, tc35892);
+ set_irq_chip_and_handler(irq, &tc35892_irq_chip,
+ handle_edge_irq);
+ set_irq_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ set_irq_noprobe(irq);
+#endif
+ }
+
+ return 0;
+}
+
+static void tc35892_irq_remove(struct tc35892 *tc35892)
+{
+ int base = tc35892->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + TC35892_NR_INTERNAL_IRQS; irq++) {
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, 0);
+#endif
+ set_irq_chip_and_handler(irq, NULL, NULL);
+ set_irq_chip_data(irq, NULL);
+ }
+}
+
+static int tc35892_chip_init(struct tc35892 *tc35892)
+{
+ int manf, ver, ret;
+
+ manf = tc35892_reg_read(tc35892, TC35892_MANFCODE);
+ if (manf < 0)
+ return manf;
+
+ ver = tc35892_reg_read(tc35892, TC35892_VERSION);
+ if (ver < 0)
+ return ver;
+
+ if (manf != TC35892_MANFCODE_MAGIC) {
+ dev_err(tc35892->dev, "unknown manufacturer: %#x\n", manf);
+ return -EINVAL;
+ }
+
+ dev_info(tc35892->dev, "manufacturer: %#x, version: %#x\n", manf, ver);
+
+ /* Put everything except the IRQ module into reset */
+ ret = tc35892_reg_write(tc35892, TC35892_RSTCTRL,
+ TC35892_RSTCTRL_TIMRST
+ | TC35892_RSTCTRL_ROTRST
+ | TC35892_RSTCTRL_KBDRST
+ | TC35892_RSTCTRL_GPIRST);
+ if (ret < 0)
+ return ret;
+
+ /* Clear the reset interrupt. */
+ return tc35892_reg_write(tc35892, TC35892_RSTINTCLR, 0x1);
+}
+
+static int __devinit tc35892_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct tc35892_platform_data *pdata = i2c->dev.platform_data;
+ struct tc35892 *tc35892;
+ int ret;
+
+ if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA
+ | I2C_FUNC_SMBUS_I2C_BLOCK))
+ return -EIO;
+
+ tc35892 = kzalloc(sizeof(struct tc35892), GFP_KERNEL);
+ if (!tc35892)
+ return -ENOMEM;
+
+ mutex_init(&tc35892->lock);
+
+ tc35892->dev = &i2c->dev;
+ tc35892->i2c = i2c;
+ tc35892->pdata = pdata;
+ tc35892->irq_base = pdata->irq_base;
+ tc35892->num_gpio = id->driver_data;
+
+ i2c_set_clientdata(i2c, tc35892);
+
+ ret = tc35892_chip_init(tc35892);
+ if (ret)
+ goto out_free;
+
+ ret = tc35892_irq_init(tc35892);
+ if (ret)
+ goto out_free;
+
+ ret = request_threaded_irq(tc35892->i2c->irq, NULL, tc35892_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "tc35892", tc35892);
+ if (ret) {
+ dev_err(tc35892->dev, "failed to request IRQ: %d\n", ret);
+ goto out_removeirq;
+ }
+
+ ret = mfd_add_devices(tc35892->dev, -1, tc35892_devs,
+ ARRAY_SIZE(tc35892_devs), NULL,
+ tc35892->irq_base);
+ if (ret) {
+ dev_err(tc35892->dev, "failed to add children\n");
+ goto out_freeirq;
+ }
+
+ return 0;
+
+out_freeirq:
+ free_irq(tc35892->i2c->irq, tc35892);
+out_removeirq:
+ tc35892_irq_remove(tc35892);
+out_free:
+ i2c_set_clientdata(i2c, NULL);
+ kfree(tc35892);
+ return ret;
+}
+
+static int __devexit tc35892_remove(struct i2c_client *client)
+{
+ struct tc35892 *tc35892 = i2c_get_clientdata(client);
+
+ mfd_remove_devices(tc35892->dev);
+
+ free_irq(tc35892->i2c->irq, tc35892);
+ tc35892_irq_remove(tc35892);
+
+ i2c_set_clientdata(client, NULL);
+ kfree(tc35892);
+
+ return 0;
+}
+
+static const struct i2c_device_id tc35892_id[] = {
+ { "tc35892", 24 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tc35892_id);
+
+static struct i2c_driver tc35892_driver = {
+ .driver.name = "tc35892",
+ .driver.owner = THIS_MODULE,
+ .probe = tc35892_probe,
+ .remove = __devexit_p(tc35892_remove),
+ .id_table = tc35892_id,
+};
+
+static int __init tc35892_init(void)
+{
+ return i2c_add_driver(&tc35892_driver);
+}
+subsys_initcall(tc35892_init);
+
+static void __exit tc35892_exit(void)
+{
+ i2c_del_driver(&tc35892_driver);
+}
+module_exit(tc35892_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TC35892 MFD core driver");
+MODULE_AUTHOR("Hanumath Prasad, Rabin Vincent");
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
index 7f478ec4184..ac5995026c8 100644
--- a/drivers/mfd/timberdale.c
+++ b/drivers/mfd/timberdale.c
@@ -31,6 +31,7 @@
#include <linux/i2c.h>
#include <linux/i2c-ocores.h>
+#include <linux/i2c-xiic.h>
#include <linux/i2c/tsc2007.h>
#include <linux/spi/spi.h>
@@ -40,6 +41,8 @@
#include <media/timb_radio.h>
+#include <linux/timb_dma.h>
+
#include "timberdale.h"
#define DRIVER_NAME "timberdale"
@@ -69,6 +72,12 @@ static struct i2c_board_info timberdale_i2c_board_info[] = {
},
};
+static __devinitdata struct xiic_i2c_platform_data
+timberdale_xiic_platform_data = {
+ .devices = timberdale_i2c_board_info,
+ .num_devices = ARRAY_SIZE(timberdale_i2c_board_info)
+};
+
static __devinitdata struct ocores_i2c_platform_data
timberdale_ocores_platform_data = {
.regstep = 4,
@@ -77,7 +86,20 @@ timberdale_ocores_platform_data = {
.num_devices = ARRAY_SIZE(timberdale_i2c_board_info)
};
-const static __devinitconst struct resource timberdale_ocores_resources[] = {
+static const __devinitconst struct resource timberdale_xiic_resources[] = {
+ {
+ .start = XIICOFFSET,
+ .end = XIICEND,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IRQ_TIMBERDALE_I2C,
+ .end = IRQ_TIMBERDALE_I2C,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static const __devinitconst struct resource timberdale_ocores_resources[] = {
{
.start = OCORESOFFSET,
.end = OCORESEND,
@@ -126,7 +148,7 @@ static __devinitdata struct xspi_platform_data timberdale_xspi_platform_data = {
*/
};
-const static __devinitconst struct resource timberdale_spi_resources[] = {
+static const __devinitconst struct resource timberdale_spi_resources[] = {
{
.start = SPIOFFSET,
.end = SPIEND,
@@ -139,7 +161,7 @@ const static __devinitconst struct resource timberdale_spi_resources[] = {
},
};
-const static __devinitconst struct resource timberdale_eth_resources[] = {
+static const __devinitconst struct resource timberdale_eth_resources[] = {
{
.start = ETHOFFSET,
.end = ETHEND,
@@ -159,7 +181,7 @@ static __devinitdata struct timbgpio_platform_data
.irq_base = 200,
};
-const static __devinitconst struct resource timberdale_gpio_resources[] = {
+static const __devinitconst struct resource timberdale_gpio_resources[] = {
{
.start = GPIOOFFSET,
.end = GPIOEND,
@@ -172,7 +194,7 @@ const static __devinitconst struct resource timberdale_gpio_resources[] = {
},
};
-const static __devinitconst struct resource timberdale_mlogicore_resources[] = {
+static const __devinitconst struct resource timberdale_mlogicore_resources[] = {
{
.start = MLCOREOFFSET,
.end = MLCOREEND,
@@ -190,7 +212,7 @@ const static __devinitconst struct resource timberdale_mlogicore_resources[] = {
},
};
-const static __devinitconst struct resource timberdale_uart_resources[] = {
+static const __devinitconst struct resource timberdale_uart_resources[] = {
{
.start = UARTOFFSET,
.end = UARTEND,
@@ -203,7 +225,7 @@ const static __devinitconst struct resource timberdale_uart_resources[] = {
},
};
-const static __devinitconst struct resource timberdale_uartlite_resources[] = {
+static const __devinitconst struct resource timberdale_uartlite_resources[] = {
{
.start = UARTLITEOFFSET,
.end = UARTLITEEND,
@@ -216,7 +238,7 @@ const static __devinitconst struct resource timberdale_uartlite_resources[] = {
},
};
-const static __devinitconst struct resource timberdale_radio_resources[] = {
+static const __devinitconst struct resource timberdale_radio_resources[] = {
{
.start = RDSOFFSET,
.end = RDSEND,
@@ -250,7 +272,66 @@ static __devinitdata struct timb_radio_platform_data
}
};
-const static __devinitconst struct resource timberdale_dma_resources[] = {
+static __devinitdata struct timb_dma_platform_data timb_dma_platform_data = {
+ .nr_channels = 10,
+ .channels = {
+ {
+ /* UART RX */
+ .rx = true,
+ .descriptors = 2,
+ .descriptor_elements = 1
+ },
+ {
+ /* UART TX */
+ .rx = false,
+ .descriptors = 2,
+ .descriptor_elements = 1
+ },
+ {
+ /* MLB RX */
+ .rx = true,
+ .descriptors = 2,
+ .descriptor_elements = 1
+ },
+ {
+ /* MLB TX */
+ .rx = false,
+ .descriptors = 2,
+ .descriptor_elements = 1
+ },
+ {
+ /* Video RX */
+ .rx = true,
+ .bytes_per_line = 1440,
+ .descriptors = 2,
+ .descriptor_elements = 16
+ },
+ {
+ /* Video framedrop */
+ },
+ {
+ /* SDHCI RX */
+ .rx = true,
+ },
+ {
+ /* SDHCI TX */
+ },
+ {
+ /* ETH RX */
+ .rx = true,
+ .descriptors = 2,
+ .descriptor_elements = 1
+ },
+ {
+ /* ETH TX */
+ .rx = false,
+ .descriptors = 2,
+ .descriptor_elements = 1
+ },
+ }
+};
+
+static const __devinitconst struct resource timberdale_dma_resources[] = {
{
.start = DMAOFFSET,
.end = DMAEND,
@@ -265,11 +346,25 @@ const static __devinitconst struct resource timberdale_dma_resources[] = {
static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = {
{
+ .name = "timb-dma",
+ .num_resources = ARRAY_SIZE(timberdale_dma_resources),
+ .resources = timberdale_dma_resources,
+ .platform_data = &timb_dma_platform_data,
+ .data_size = sizeof(timb_dma_platform_data),
+ },
+ {
.name = "timb-uart",
.num_resources = ARRAY_SIZE(timberdale_uart_resources),
.resources = timberdale_uart_resources,
},
{
+ .name = "xiic-i2c",
+ .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
+ .resources = timberdale_xiic_resources,
+ .platform_data = &timberdale_xiic_platform_data,
+ .data_size = sizeof(timberdale_xiic_platform_data),
+ },
+ {
.name = "timb-gpio",
.num_resources = ARRAY_SIZE(timberdale_gpio_resources),
.resources = timberdale_gpio_resources,
@@ -295,14 +390,16 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = {
.num_resources = ARRAY_SIZE(timberdale_eth_resources),
.resources = timberdale_eth_resources,
},
+};
+
+static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
{
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
.resources = timberdale_dma_resources,
+ .platform_data = &timb_dma_platform_data,
+ .data_size = sizeof(timb_dma_platform_data),
},
-};
-
-static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
{
.name = "timb-uart",
.num_resources = ARRAY_SIZE(timberdale_uart_resources),
@@ -314,6 +411,13 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
.resources = timberdale_uartlite_resources,
},
{
+ .name = "xiic-i2c",
+ .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
+ .resources = timberdale_xiic_resources,
+ .platform_data = &timberdale_xiic_platform_data,
+ .data_size = sizeof(timberdale_xiic_platform_data),
+ },
+ {
.name = "timb-gpio",
.num_resources = ARRAY_SIZE(timberdale_gpio_resources),
.resources = timberdale_gpio_resources,
@@ -344,20 +448,29 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
.num_resources = ARRAY_SIZE(timberdale_eth_resources),
.resources = timberdale_eth_resources,
},
+};
+
+static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = {
{
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
.resources = timberdale_dma_resources,
+ .platform_data = &timb_dma_platform_data,
+ .data_size = sizeof(timb_dma_platform_data),
},
-};
-
-static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = {
{
.name = "timb-uart",
.num_resources = ARRAY_SIZE(timberdale_uart_resources),
.resources = timberdale_uart_resources,
},
{
+ .name = "xiic-i2c",
+ .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
+ .resources = timberdale_xiic_resources,
+ .platform_data = &timberdale_xiic_platform_data,
+ .data_size = sizeof(timberdale_xiic_platform_data),
+ },
+ {
.name = "timb-gpio",
.num_resources = ARRAY_SIZE(timberdale_gpio_resources),
.resources = timberdale_gpio_resources,
@@ -378,14 +491,16 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = {
.platform_data = &timberdale_xspi_platform_data,
.data_size = sizeof(timberdale_xspi_platform_data),
},
+};
+
+static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
{
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
.resources = timberdale_dma_resources,
+ .platform_data = &timb_dma_platform_data,
+ .data_size = sizeof(timb_dma_platform_data),
},
-};
-
-static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
{
.name = "timb-uart",
.num_resources = ARRAY_SIZE(timberdale_uart_resources),
@@ -424,11 +539,6 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
.num_resources = ARRAY_SIZE(timberdale_eth_resources),
.resources = timberdale_eth_resources,
},
- {
- .name = "timb-dma",
- .num_resources = ARRAY_SIZE(timberdale_dma_resources),
- .resources = timberdale_dma_resources,
- },
};
static const __devinitconst struct resource timberdale_sdhc_resources[] = {
diff --git a/drivers/mfd/timberdale.h b/drivers/mfd/timberdale.h
index 8d27ffabc25..c11bf6ebfe0 100644
--- a/drivers/mfd/timberdale.h
+++ b/drivers/mfd/timberdale.h
@@ -23,7 +23,7 @@
#ifndef MFD_TIMBERDALE_H
#define MFD_TIMBERDALE_H
-#define DRV_VERSION "0.1"
+#define DRV_VERSION "0.2"
/* This driver only support versions >= 3.8 and < 4.0 */
#define TIMB_SUPPORTED_MAJOR 3
@@ -66,7 +66,7 @@
#define CHIPCTLOFFSET 0x800
#define CHIPCTLEND 0x8ff
-#define CHIPCTLSIZE (CHIPCTLEND - CHIPCTLOFFSET)
+#define CHIPCTLSIZE (CHIPCTLEND - CHIPCTLOFFSET + 1)
#define INTCOFFSET 0xc00
#define INTCEND 0xfff
@@ -127,4 +127,16 @@
#define GPIO_PIN_BT_RST 15
#define GPIO_NR_PINS 16
+/* DMA Channels */
+#define DMA_UART_RX 0
+#define DMA_UART_TX 1
+#define DMA_MLB_RX 2
+#define DMA_MLB_TX 3
+#define DMA_VIDEO_RX 4
+#define DMA_VIDEO_DROP 5
+#define DMA_SDHCI_RX 6
+#define DMA_SDHCI_TX 7
+#define DMA_ETH_RX 8
+#define DMA_ETH_TX 9
+
#endif
diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
index e5955306c2f..9b22a77f70f 100644
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -530,8 +530,8 @@ static int __exit tps65010_remove(struct i2c_client *client)
cancel_delayed_work(&tps->work);
flush_scheduled_work();
debugfs_remove(tps->file);
- kfree(tps);
i2c_set_clientdata(client, NULL);
+ kfree(tps);
the_tps = NULL;
return 0;
}
diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c
new file mode 100644
index 00000000000..d859dffed39
--- /dev/null
+++ b/drivers/mfd/tps6507x.c
@@ -0,0 +1,159 @@
+/*
+ * tps6507x.c -- TPS6507x chip family multi-function driver
+ *
+ * Copyright (c) 2010 RidgeRun (todd.fischer@ridgerun.com)
+ *
+ * Author: Todd Fischer
+ * todd.fischer@ridgerun.com
+ *
+ * Credits:
+ *
+ * Using code from wm831x-*.c, wm8400-core, Wolfson Microelectronics PLC.
+ *
+ * For licencing details see kernel-base/COPYING
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps6507x.h>
+
+static struct mfd_cell tps6507x_devs[] = {
+ {
+ .name = "tps6507x-pmic",
+ },
+ {
+ .name = "tps6507x-ts",
+ },
+};
+
+
+static int tps6507x_i2c_read_device(struct tps6507x_dev *tps6507x, char reg,
+ int bytes, void *dest)
+{
+ struct i2c_client *i2c = tps6507x->i2c_client;
+ struct i2c_msg xfer[2];
+ int ret;
+
+ /* Write register */
+ xfer[0].addr = i2c->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = 1;
+ xfer[0].buf = &reg;
+
+ /* Read data */
+ xfer[1].addr = i2c->addr;
+ xfer[1].flags = I2C_M_RD;
+ xfer[1].len = bytes;
+ xfer[1].buf = dest;
+
+ ret = i2c_transfer(i2c->adapter, xfer, 2);
+ if (ret == 2)
+ ret = 0;
+ else if (ret >= 0)
+ ret = -EIO;
+
+ return ret;
+}
+
+static int tps6507x_i2c_write_device(struct tps6507x_dev *tps6507x, char reg,
+ int bytes, void *src)
+{
+ struct i2c_client *i2c = tps6507x->i2c_client;
+ /* we add 1 byte for device register */
+ u8 msg[TPS6507X_MAX_REGISTER + 1];
+ int ret;
+
+ if (bytes > (TPS6507X_MAX_REGISTER + 1))
+ return -EINVAL;
+
+ msg[0] = reg;
+ memcpy(&msg[1], src, bytes);
+
+ ret = i2c_master_send(i2c, msg, bytes + 1);
+ if (ret < 0)
+ return ret;
+ if (ret != bytes + 1)
+ return -EIO;
+ return 0;
+}
+
+static int tps6507x_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct tps6507x_dev *tps6507x;
+ int ret = 0;
+
+ tps6507x = kzalloc(sizeof(struct tps6507x_dev), GFP_KERNEL);
+ if (tps6507x == NULL) {
+ kfree(i2c);
+ return -ENOMEM;
+ }
+
+ i2c_set_clientdata(i2c, tps6507x);
+ tps6507x->dev = &i2c->dev;
+ tps6507x->i2c_client = i2c;
+ tps6507x->read_dev = tps6507x_i2c_read_device;
+ tps6507x->write_dev = tps6507x_i2c_write_device;
+
+ ret = mfd_add_devices(tps6507x->dev, -1,
+ tps6507x_devs, ARRAY_SIZE(tps6507x_devs),
+ NULL, 0);
+
+ if (ret < 0)
+ goto err;
+
+ return ret;
+
+err:
+ mfd_remove_devices(tps6507x->dev);
+ kfree(tps6507x);
+ return ret;
+}
+
+static int tps6507x_i2c_remove(struct i2c_client *i2c)
+{
+ struct tps6507x_dev *tps6507x = i2c_get_clientdata(i2c);
+
+ mfd_remove_devices(tps6507x->dev);
+ kfree(tps6507x);
+
+ return 0;
+}
+
+static const struct i2c_device_id tps6507x_i2c_id[] = {
+ { "tps6507x", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tps6507x_i2c_id);
+
+
+static struct i2c_driver tps6507x_i2c_driver = {
+ .driver = {
+ .name = "tps6507x",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps6507x_i2c_probe,
+ .remove = tps6507x_i2c_remove,
+ .id_table = tps6507x_i2c_id,
+};
+
+static int __init tps6507x_i2c_init(void)
+{
+ return i2c_add_driver(&tps6507x_i2c_driver);
+}
+/* init early so consumer devices can complete system boot */
+subsys_initcall(tps6507x_i2c_init);
+
+static void __exit tps6507x_i2c_exit(void)
+{
+ i2c_del_driver(&tps6507x_i2c_driver);
+}
+module_exit(tps6507x_i2c_exit);
+
+MODULE_DESCRIPTION("TPS6507x chip family multi-function driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 562cd4935e1..720e099e506 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -109,7 +109,7 @@
#endif
#if defined(CONFIG_TWL4030_CODEC) || defined(CONFIG_TWL4030_CODEC_MODULE) ||\
- defined(CONFIG_SND_SOC_TWL6030) || defined(CONFIG_SND_SOC_TWL6030_MODULE)
+ defined(CONFIG_SND_SOC_TWL6040) || defined(CONFIG_SND_SOC_TWL6040_MODULE)
#define twl_has_codec() true
#else
#define twl_has_codec() false
@@ -708,7 +708,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
/* Phoenix*/
if (twl_has_codec() && pdata->codec && twl_class_is_6030()) {
sub_chip_id = twl_map[TWL_MODULE_AUDIO_VOICE].sid;
- child = add_child(sub_chip_id, "twl6030_codec",
+ child = add_child(sub_chip_id, "twl6040_codec",
pdata->codec, sizeof(*pdata->codec),
false, 0, 0);
if (IS_ERR(child))
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 202bdd59632..097f24d8bce 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -232,10 +232,11 @@ static const struct sih sih_modules_twl5031[8] = {
},
[6] = {
/*
- * ACI doesn't use the same SIH organization.
- * For example, it supports only one interrupt line
+ * ECI/DBI doesn't use the same SIH organization.
+ * For example, it supports only one interrupt output line.
+ * That is, the interrupts are seen on both INT1 and INT2 lines.
*/
- .name = "aci",
+ .name = "eci_dbi",
.module = TWL5031_MODULE_ACCESSORY,
.bits = 9,
.bytes_ixr = 2,
@@ -247,8 +248,8 @@ static const struct sih sih_modules_twl5031[8] = {
},
[7] = {
- /* Accessory */
- .name = "acc",
+ /* Audio accessory */
+ .name = "audio",
.module = TWL5031_MODULE_ACCESSORY,
.control_offset = TWL5031_ACCSIHCTRL,
.bits = 2,
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index f2ab025ad97..1a968f34d67 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -322,7 +322,11 @@ EXPORT_SYMBOL_GPL(wm831x_set_bits);
*/
int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input)
{
- int ret, src;
+ int ret, src, irq_masked, timeout;
+
+ /* Are we using the interrupt? */
+ irq_masked = wm831x_reg_read(wm831x, WM831X_INTERRUPT_STATUS_1_MASK);
+ irq_masked &= WM831X_AUXADC_DATA_EINT;
mutex_lock(&wm831x->auxadc_lock);
@@ -342,6 +346,9 @@ int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input)
goto out;
}
+ /* Clear any notification from a very late arriving interrupt */
+ try_wait_for_completion(&wm831x->auxadc_done);
+
ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL,
WM831X_AUX_CVT_ENA, WM831X_AUX_CVT_ENA);
if (ret < 0) {
@@ -349,22 +356,46 @@ int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input)
goto disable;
}
- /* If an interrupt arrived late clean up after it */
- try_wait_for_completion(&wm831x->auxadc_done);
-
- /* Ignore the result to allow us to soldier on without IRQ hookup */
- wait_for_completion_timeout(&wm831x->auxadc_done, msecs_to_jiffies(5));
-
- ret = wm831x_reg_read(wm831x, WM831X_AUXADC_CONTROL);
- if (ret < 0) {
- dev_err(wm831x->dev, "AUXADC status read failed: %d\n", ret);
- goto disable;
- }
-
- if (ret & WM831X_AUX_CVT_ENA) {
- dev_err(wm831x->dev, "Timed out reading AUXADC\n");
- ret = -EBUSY;
- goto disable;
+ if (irq_masked) {
+ /* If we're not using interrupts then poll the
+ * interrupt status register */
+ timeout = 5;
+ while (timeout) {
+ msleep(1);
+
+ ret = wm831x_reg_read(wm831x,
+ WM831X_INTERRUPT_STATUS_1);
+ if (ret < 0) {
+ dev_err(wm831x->dev,
+ "ISR 1 read failed: %d\n", ret);
+ goto disable;
+ }
+
+ /* Did it complete? */
+ if (ret & WM831X_AUXADC_DATA_EINT) {
+ wm831x_reg_write(wm831x,
+ WM831X_INTERRUPT_STATUS_1,
+ WM831X_AUXADC_DATA_EINT);
+ break;
+ } else {
+ dev_err(wm831x->dev,
+ "AUXADC conversion timeout\n");
+ ret = -EBUSY;
+ goto disable;
+ }
+ }
+ } else {
+ /* If we are using interrupts then wait for the
+ * interrupt to complete. Use an extremely long
+ * timeout to handle situations with heavy load where
+ * the notification of the interrupt may be delayed by
+ * threaded IRQ handling. */
+ if (!wait_for_completion_timeout(&wm831x->auxadc_done,
+ msecs_to_jiffies(500))) {
+ dev_err(wm831x->dev, "Timed out waiting for AUXADC\n");
+ ret = -EBUSY;
+ goto disable;
+ }
}
ret = wm831x_reg_read(wm831x, WM831X_AUXADC_DATA);
@@ -1463,6 +1494,7 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
case WM8310:
parent = WM8310;
wm831x->num_gpio = 16;
+ wm831x->charger_irq_wake = 1;
if (rev > 0) {
wm831x->has_gpio_ena = 1;
wm831x->has_cs_sts = 1;
@@ -1474,6 +1506,7 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
case WM8311:
parent = WM8311;
wm831x->num_gpio = 16;
+ wm831x->charger_irq_wake = 1;
if (rev > 0) {
wm831x->has_gpio_ena = 1;
wm831x->has_cs_sts = 1;
@@ -1485,6 +1518,7 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
case WM8312:
parent = WM8312;
wm831x->num_gpio = 16;
+ wm831x->charger_irq_wake = 1;
if (rev > 0) {
wm831x->has_gpio_ena = 1;
wm831x->has_cs_sts = 1;
@@ -1623,6 +1657,42 @@ static void wm831x_device_exit(struct wm831x *wm831x)
kfree(wm831x);
}
+static int wm831x_device_suspend(struct wm831x *wm831x)
+{
+ int reg, mask;
+
+ /* If the charger IRQs are a wake source then make sure we ack
+ * them even if they're not actively being used (eg, no power
+ * driver or no IRQ line wired up) then acknowledge the
+ * interrupts otherwise suspend won't last very long.
+ */
+ if (wm831x->charger_irq_wake) {
+ reg = wm831x_reg_read(wm831x, WM831X_INTERRUPT_STATUS_2_MASK);
+
+ mask = WM831X_CHG_BATT_HOT_EINT |
+ WM831X_CHG_BATT_COLD_EINT |
+ WM831X_CHG_BATT_FAIL_EINT |
+ WM831X_CHG_OV_EINT | WM831X_CHG_END_EINT |
+ WM831X_CHG_TO_EINT | WM831X_CHG_MODE_EINT |
+ WM831X_CHG_START_EINT;
+
+ /* If any of the interrupts are masked read the statuses */
+ if (reg & mask)
+ reg = wm831x_reg_read(wm831x,
+ WM831X_INTERRUPT_STATUS_2);
+
+ if (reg & mask) {
+ dev_info(wm831x->dev,
+ "Acknowledging masked charger IRQs: %x\n",
+ reg & mask);
+ wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_2,
+ reg & mask);
+ }
+ }
+
+ return 0;
+}
+
static int wm831x_i2c_read_device(struct wm831x *wm831x, unsigned short reg,
int bytes, void *dest)
{
@@ -1697,6 +1767,13 @@ static int wm831x_i2c_remove(struct i2c_client *i2c)
return 0;
}
+static int wm831x_i2c_suspend(struct i2c_client *i2c, pm_message_t mesg)
+{
+ struct wm831x *wm831x = i2c_get_clientdata(i2c);
+
+ return wm831x_device_suspend(wm831x);
+}
+
static const struct i2c_device_id wm831x_i2c_id[] = {
{ "wm8310", WM8310 },
{ "wm8311", WM8311 },
@@ -1714,6 +1791,7 @@ static struct i2c_driver wm831x_i2c_driver = {
},
.probe = wm831x_i2c_probe,
.remove = wm831x_i2c_remove,
+ .suspend = wm831x_i2c_suspend,
.id_table = wm831x_i2c_id,
};
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index 30132769711..7dabe4dbd37 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -21,6 +21,7 @@
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/pdata.h>
+#include <linux/mfd/wm831x/gpio.h>
#include <linux/mfd/wm831x/irq.h>
#include <linux/delay.h>
@@ -38,8 +39,6 @@ struct wm831x_irq_data {
int primary;
int reg;
int mask;
- irq_handler_t handler;
- void *handler_data;
};
static struct wm831x_irq_data wm831x_irqs[] = {
@@ -388,12 +387,41 @@ static void wm831x_irq_mask(unsigned int irq)
wm831x->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
}
+static int wm831x_irq_set_type(unsigned int irq, unsigned int type)
+{
+ struct wm831x *wm831x = get_irq_chip_data(irq);
+ int val;
+
+ irq = irq - wm831x->irq_base;
+
+ if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11)
+ return -EINVAL;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ val = WM831X_GPN_INT_MODE;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ val = WM831X_GPN_POL;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return wm831x_set_bits(wm831x, WM831X_GPIO1_CONTROL + irq,
+ WM831X_GPN_INT_MODE | WM831X_GPN_POL, val);
+}
+
static struct irq_chip wm831x_irq_chip = {
.name = "wm831x",
.bus_lock = wm831x_irq_lock,
.bus_sync_unlock = wm831x_irq_sync_unlock,
.mask = wm831x_irq_mask,
.unmask = wm831x_irq_unmask,
+ .set_type = wm831x_irq_set_type,
};
/* The processing of the primary interrupt occurs in a thread so that
@@ -462,6 +490,14 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
mutex_init(&wm831x->irq_lock);
+ /* Mask the individual interrupt sources */
+ for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) {
+ wm831x->irq_masks_cur[i] = 0xffff;
+ wm831x->irq_masks_cache[i] = 0xffff;
+ wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1_MASK + i,
+ 0xffff);
+ }
+
if (!irq) {
dev_warn(wm831x->dev,
"No interrupt specified - functionality limited\n");
@@ -477,14 +513,6 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
wm831x->irq = irq;
wm831x->irq_base = pdata->irq_base;
- /* Mask the individual interrupt sources */
- for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) {
- wm831x->irq_masks_cur[i] = 0xffff;
- wm831x->irq_masks_cache[i] = 0xffff;
- wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1_MASK + i,
- 0xffff);
- }
-
/* Register them with genirq */
for (cur_irq = wm831x->irq_base;
cur_irq < ARRAY_SIZE(wm831x_irqs) + wm831x->irq_base;
diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
index 65830f57c09..7795af4b1fe 100644
--- a/drivers/mfd/wm8350-i2c.c
+++ b/drivers/mfd/wm8350-i2c.c
@@ -64,10 +64,8 @@ static int wm8350_i2c_probe(struct i2c_client *i2c,
int ret = 0;
wm8350 = kzalloc(sizeof(struct wm8350), GFP_KERNEL);
- if (wm8350 == NULL) {
- kfree(i2c);
+ if (wm8350 == NULL)
return -ENOMEM;
- }
i2c_set_clientdata(i2c, wm8350);
wm8350->dev = &i2c->dev;
@@ -82,6 +80,7 @@ static int wm8350_i2c_probe(struct i2c_client *i2c,
return ret;
err:
+ i2c_set_clientdata(i2c, NULL);
kfree(wm8350);
return ret;
}
@@ -91,6 +90,7 @@ static int wm8350_i2c_remove(struct i2c_client *i2c)
struct wm8350 *wm8350 = i2c_get_clientdata(i2c);
wm8350_device_exit(wm8350);
+ i2c_set_clientdata(i2c, NULL);
kfree(wm8350);
return 0;
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index 865ce013a82..e08aafa663d 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -118,7 +118,7 @@ static int wm8400_read(struct wm8400 *wm8400, u8 reg, int num_regs, u16 *dest)
{
int i, ret = 0;
- BUG_ON(reg + num_regs - 1 > ARRAY_SIZE(wm8400->reg_cache));
+ BUG_ON(reg + num_regs > ARRAY_SIZE(wm8400->reg_cache));
/* If there are any volatile reads then read back the entire block */
for (i = reg; i < reg + num_regs; i++)
@@ -144,7 +144,7 @@ static int wm8400_write(struct wm8400 *wm8400, u8 reg, int num_regs,
{
int ret, i;
- BUG_ON(reg + num_regs - 1 > ARRAY_SIZE(wm8400->reg_cache));
+ BUG_ON(reg + num_regs > ARRAY_SIZE(wm8400->reg_cache));
for (i = 0; i < num_regs; i++) {
BUG_ON(!reg_data[reg + i].writable);
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index cc524df10aa..ec71c936890 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -173,9 +173,34 @@ static struct mfd_cell wm8994_regulator_devs[] = {
{ .name = "wm8994-ldo", .id = 2 },
};
+static struct resource wm8994_codec_resources[] = {
+ {
+ .start = WM8994_IRQ_TEMP_SHUT,
+ .end = WM8994_IRQ_TEMP_WARN,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource wm8994_gpio_resources[] = {
+ {
+ .start = WM8994_IRQ_GPIO(1),
+ .end = WM8994_IRQ_GPIO(11),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
static struct mfd_cell wm8994_devs[] = {
- { .name = "wm8994-codec" },
- { .name = "wm8994-gpio" },
+ {
+ .name = "wm8994-codec",
+ .num_resources = ARRAY_SIZE(wm8994_codec_resources),
+ .resources = wm8994_codec_resources,
+ },
+
+ {
+ .name = "wm8994-gpio",
+ .num_resources = ARRAY_SIZE(wm8994_gpio_resources),
+ .resources = wm8994_gpio_resources,
+ },
};
/*
@@ -236,6 +261,11 @@ static int wm8994_device_resume(struct device *dev)
return ret;
}
+ ret = wm8994_write(wm8994, WM8994_INTERRUPT_STATUS_1_MASK,
+ WM8994_NUM_IRQ_REGS * 2, &wm8994->irq_masks_cur);
+ if (ret < 0)
+ dev_err(dev, "Failed to restore interrupt masks: %d\n", ret);
+
ret = wm8994_write(wm8994, WM8994_LDO_1, WM8994_NUM_LDO_REGS * 2,
&wm8994->ldo_regs);
if (ret < 0)
@@ -348,6 +378,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, unsigned long id, int irq)
if (pdata) {
+ wm8994->irq_base = pdata->irq_base;
wm8994->gpio_base = pdata->gpio_base;
/* GPIO configuration is only applied if it's non-zero */
@@ -375,16 +406,20 @@ static int wm8994_device_init(struct wm8994 *wm8994, unsigned long id, int irq)
WM8994_LDO1_DISCH, 0);
}
+ wm8994_irq_init(wm8994);
+
ret = mfd_add_devices(wm8994->dev, -1,
wm8994_devs, ARRAY_SIZE(wm8994_devs),
NULL, 0);
if (ret != 0) {
dev_err(wm8994->dev, "Failed to add children: %d\n", ret);
- goto err_enable;
+ goto err_irq;
}
return 0;
+err_irq:
+ wm8994_irq_exit(wm8994);
err_enable:
regulator_bulk_disable(ARRAY_SIZE(wm8994_main_supplies),
wm8994->supplies);
@@ -401,6 +436,7 @@ err:
static void wm8994_device_exit(struct wm8994 *wm8994)
{
mfd_remove_devices(wm8994->dev);
+ wm8994_irq_exit(wm8994);
regulator_bulk_disable(ARRAY_SIZE(wm8994_main_supplies),
wm8994->supplies);
regulator_bulk_free(ARRAY_SIZE(wm8994_main_supplies), wm8994->supplies);
@@ -469,6 +505,7 @@ static int wm8994_i2c_probe(struct i2c_client *i2c,
wm8994->control_data = i2c;
wm8994->read_dev = wm8994_i2c_read_device;
wm8994->write_dev = wm8994_i2c_write_device;
+ wm8994->irq = i2c->irq;
return wm8994_device_init(wm8994, id->driver_data, i2c->irq);
}
diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
new file mode 100644
index 00000000000..8400eb1ee5d
--- /dev/null
+++ b/drivers/mfd/wm8994-irq.c
@@ -0,0 +1,310 @@
+/*
+ * wm8994-irq.c -- Interrupt controller support for Wolfson WM8994
+ *
+ * Copyright 2010 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/mfd/core.h>
+#include <linux/interrupt.h>
+
+#include <linux/mfd/wm8994/core.h>
+#include <linux/mfd/wm8994/registers.h>
+
+#include <linux/delay.h>
+
+struct wm8994_irq_data {
+ int reg;
+ int mask;
+};
+
+static struct wm8994_irq_data wm8994_irqs[] = {
+ [WM8994_IRQ_TEMP_SHUT] = {
+ .reg = 2,
+ .mask = WM8994_TEMP_SHUT_EINT,
+ },
+ [WM8994_IRQ_MIC1_DET] = {
+ .reg = 2,
+ .mask = WM8994_MIC1_DET_EINT,
+ },
+ [WM8994_IRQ_MIC1_SHRT] = {
+ .reg = 2,
+ .mask = WM8994_MIC1_SHRT_EINT,
+ },
+ [WM8994_IRQ_MIC2_DET] = {
+ .reg = 2,
+ .mask = WM8994_MIC2_DET_EINT,
+ },
+ [WM8994_IRQ_MIC2_SHRT] = {
+ .reg = 2,
+ .mask = WM8994_MIC2_SHRT_EINT,
+ },
+ [WM8994_IRQ_FLL1_LOCK] = {
+ .reg = 2,
+ .mask = WM8994_FLL1_LOCK_EINT,
+ },
+ [WM8994_IRQ_FLL2_LOCK] = {
+ .reg = 2,
+ .mask = WM8994_FLL2_LOCK_EINT,
+ },
+ [WM8994_IRQ_SRC1_LOCK] = {
+ .reg = 2,
+ .mask = WM8994_SRC1_LOCK_EINT,
+ },
+ [WM8994_IRQ_SRC2_LOCK] = {
+ .reg = 2,
+ .mask = WM8994_SRC2_LOCK_EINT,
+ },
+ [WM8994_IRQ_AIF1DRC1_SIG_DET] = {
+ .reg = 2,
+ .mask = WM8994_AIF1DRC1_SIG_DET,
+ },
+ [WM8994_IRQ_AIF1DRC2_SIG_DET] = {
+ .reg = 2,
+ .mask = WM8994_AIF1DRC2_SIG_DET_EINT,
+ },
+ [WM8994_IRQ_AIF2DRC_SIG_DET] = {
+ .reg = 2,
+ .mask = WM8994_AIF2DRC_SIG_DET_EINT,
+ },
+ [WM8994_IRQ_FIFOS_ERR] = {
+ .reg = 2,
+ .mask = WM8994_FIFOS_ERR_EINT,
+ },
+ [WM8994_IRQ_WSEQ_DONE] = {
+ .reg = 2,
+ .mask = WM8994_WSEQ_DONE_EINT,
+ },
+ [WM8994_IRQ_DCS_DONE] = {
+ .reg = 2,
+ .mask = WM8994_DCS_DONE_EINT,
+ },
+ [WM8994_IRQ_TEMP_WARN] = {
+ .reg = 2,
+ .mask = WM8994_TEMP_WARN_EINT,
+ },
+ [WM8994_IRQ_GPIO(1)] = {
+ .reg = 1,
+ .mask = WM8994_GP1_EINT,
+ },
+ [WM8994_IRQ_GPIO(2)] = {
+ .reg = 1,
+ .mask = WM8994_GP2_EINT,
+ },
+ [WM8994_IRQ_GPIO(3)] = {
+ .reg = 1,
+ .mask = WM8994_GP3_EINT,
+ },
+ [WM8994_IRQ_GPIO(4)] = {
+ .reg = 1,
+ .mask = WM8994_GP4_EINT,
+ },
+ [WM8994_IRQ_GPIO(5)] = {
+ .reg = 1,
+ .mask = WM8994_GP5_EINT,
+ },
+ [WM8994_IRQ_GPIO(6)] = {
+ .reg = 1,
+ .mask = WM8994_GP6_EINT,
+ },
+ [WM8994_IRQ_GPIO(7)] = {
+ .reg = 1,
+ .mask = WM8994_GP7_EINT,
+ },
+ [WM8994_IRQ_GPIO(8)] = {
+ .reg = 1,
+ .mask = WM8994_GP8_EINT,
+ },
+ [WM8994_IRQ_GPIO(9)] = {
+ .reg = 1,
+ .mask = WM8994_GP8_EINT,
+ },
+ [WM8994_IRQ_GPIO(10)] = {
+ .reg = 1,
+ .mask = WM8994_GP10_EINT,
+ },
+ [WM8994_IRQ_GPIO(11)] = {
+ .reg = 1,
+ .mask = WM8994_GP11_EINT,
+ },
+};
+
+static inline int irq_data_to_status_reg(struct wm8994_irq_data *irq_data)
+{
+ return WM8994_INTERRUPT_STATUS_1 - 1 + irq_data->reg;
+}
+
+static inline int irq_data_to_mask_reg(struct wm8994_irq_data *irq_data)
+{
+ return WM8994_INTERRUPT_STATUS_1_MASK - 1 + irq_data->reg;
+}
+
+static inline struct wm8994_irq_data *irq_to_wm8994_irq(struct wm8994 *wm8994,
+ int irq)
+{
+ return &wm8994_irqs[irq - wm8994->irq_base];
+}
+
+static void wm8994_irq_lock(unsigned int irq)
+{
+ struct wm8994 *wm8994 = get_irq_chip_data(irq);
+
+ mutex_lock(&wm8994->irq_lock);
+}
+
+static void wm8994_irq_sync_unlock(unsigned int irq)
+{
+ struct wm8994 *wm8994 = get_irq_chip_data(irq);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wm8994->irq_masks_cur); i++) {
+ /* If there's been a change in the mask write it back
+ * to the hardware. */
+ if (wm8994->irq_masks_cur[i] != wm8994->irq_masks_cache[i]) {
+ wm8994->irq_masks_cache[i] = wm8994->irq_masks_cur[i];
+ wm8994_reg_write(wm8994,
+ WM8994_INTERRUPT_STATUS_1_MASK + i,
+ wm8994->irq_masks_cur[i]);
+ }
+ }
+
+ mutex_unlock(&wm8994->irq_lock);
+}
+
+static void wm8994_irq_unmask(unsigned int irq)
+{
+ struct wm8994 *wm8994 = get_irq_chip_data(irq);
+ struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994, irq);
+
+ wm8994->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
+}
+
+static void wm8994_irq_mask(unsigned int irq)
+{
+ struct wm8994 *wm8994 = get_irq_chip_data(irq);
+ struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994, irq);
+
+ wm8994->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
+}
+
+static struct irq_chip wm8994_irq_chip = {
+ .name = "wm8994",
+ .bus_lock = wm8994_irq_lock,
+ .bus_sync_unlock = wm8994_irq_sync_unlock,
+ .mask = wm8994_irq_mask,
+ .unmask = wm8994_irq_unmask,
+};
+
+/* The processing of the primary interrupt occurs in a thread so that
+ * we can interact with the device over I2C or SPI. */
+static irqreturn_t wm8994_irq_thread(int irq, void *data)
+{
+ struct wm8994 *wm8994 = data;
+ unsigned int i;
+ u16 status[WM8994_NUM_IRQ_REGS];
+ int ret;
+
+ ret = wm8994_bulk_read(wm8994, WM8994_INTERRUPT_STATUS_1,
+ WM8994_NUM_IRQ_REGS, status);
+ if (ret < 0) {
+ dev_err(wm8994->dev, "Failed to read interrupt status: %d\n",
+ ret);
+ return IRQ_NONE;
+ }
+
+ /* Apply masking */
+ for (i = 0; i < WM8994_NUM_IRQ_REGS; i++)
+ status[i] &= ~wm8994->irq_masks_cur[i];
+
+ /* Report */
+ for (i = 0; i < ARRAY_SIZE(wm8994_irqs); i++) {
+ if (status[wm8994_irqs[i].reg - 1] & wm8994_irqs[i].mask)
+ handle_nested_irq(wm8994->irq_base + i);
+ }
+
+ /* Ack any unmasked IRQs */
+ for (i = 0; i < ARRAY_SIZE(status); i++) {
+ if (status[i])
+ wm8994_reg_write(wm8994, WM8994_INTERRUPT_STATUS_1 + i,
+ status[i]);
+ }
+
+ return IRQ_HANDLED;
+}
+
+int wm8994_irq_init(struct wm8994 *wm8994)
+{
+ int i, cur_irq, ret;
+
+ mutex_init(&wm8994->irq_lock);
+
+ /* Mask the individual interrupt sources */
+ for (i = 0; i < ARRAY_SIZE(wm8994->irq_masks_cur); i++) {
+ wm8994->irq_masks_cur[i] = 0xffff;
+ wm8994->irq_masks_cache[i] = 0xffff;
+ wm8994_reg_write(wm8994, WM8994_INTERRUPT_STATUS_1_MASK + i,
+ 0xffff);
+ }
+
+ if (!wm8994->irq) {
+ dev_warn(wm8994->dev,
+ "No interrupt specified, no interrupts\n");
+ wm8994->irq_base = 0;
+ return 0;
+ }
+
+ if (!wm8994->irq_base) {
+ dev_err(wm8994->dev,
+ "No interrupt base specified, no interrupts\n");
+ return 0;
+ }
+
+ /* Register them with genirq */
+ for (cur_irq = wm8994->irq_base;
+ cur_irq < ARRAY_SIZE(wm8994_irqs) + wm8994->irq_base;
+ cur_irq++) {
+ set_irq_chip_data(cur_irq, wm8994);
+ set_irq_chip_and_handler(cur_irq, &wm8994_irq_chip,
+ handle_edge_irq);
+ set_irq_nested_thread(cur_irq, 1);
+
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+ set_irq_flags(cur_irq, IRQF_VALID);
+#else
+ set_irq_noprobe(cur_irq);
+#endif
+ }
+
+ ret = request_threaded_irq(wm8994->irq, NULL, wm8994_irq_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "wm8994", wm8994);
+ if (ret != 0) {
+ dev_err(wm8994->dev, "Failed to request IRQ %d: %d\n",
+ wm8994->irq, ret);
+ return ret;
+ }
+
+ /* Enable top level interrupt if it was masked */
+ wm8994_reg_write(wm8994, WM8994_INTERRUPT_CONTROL, 0);
+
+ return 0;
+}
+
+void wm8994_irq_exit(struct wm8994 *wm8994)
+{
+ if (wm8994->irq)
+ free_irq(wm8994->irq, wm8994);
+}
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 0d0d625fece..26386a92f5a 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -14,11 +14,17 @@ menuconfig MISC_DEVICES
if MISC_DEVICES
config AD525X_DPOT
- tristate "Analog Devices AD525x Digital Potentiometers"
- depends on I2C && SYSFS
+ tristate "Analog Devices Digital Potentiometers"
+ depends on (I2C || SPI) && SYSFS
help
If you say yes here, you get support for the Analog Devices
- AD5258, AD5259, AD5251, AD5252, AD5253, AD5254 and AD5255
+ AD5258, AD5259, AD5251, AD5252, AD5253, AD5254, AD5255
+ AD5160, AD5161, AD5162, AD5165, AD5200, AD5201, AD5203,
+ AD5204, AD5206, AD5207, AD5231, AD5232, AD5233, AD5235,
+ AD5260, AD5262, AD5263, AD5290, AD5291, AD5292, AD5293,
+ AD7376, AD8400, AD8402, AD8403, ADN2850, AD5241, AD5242,
+ AD5243, AD5245, AD5246, AD5247, AD5248, AD5280, AD5282,
+ ADN2860, AD5273, AD5171, AD5170, AD5172, AD5173
digital potentiometer chips.
See Documentation/misc-devices/ad525x_dpot.txt for the
@@ -27,6 +33,26 @@ config AD525X_DPOT
This driver can also be built as a module. If so, the module
will be called ad525x_dpot.
+config AD525X_DPOT_I2C
+ tristate "support I2C bus connection"
+ depends on AD525X_DPOT && I2C
+ help
+ Say Y here if you have a digital potentiometers hooked to an I2C bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad525x_dpot-i2c.
+
+config AD525X_DPOT_SPI
+ tristate "support SPI bus connection"
+ depends on AD525X_DPOT && SPI_MASTER
+ help
+ Say Y here if you have a digital potentiometers hooked to an SPI bus.
+
+ If unsure, say N (but it's safe to say "Y").
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad525x_dpot-spi.
+
config ATMEL_PWM
tristate "Atmel AT32/AT91 PWM support"
depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 7b6f7eefdf8..6ed06a19474 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -3,8 +3,9 @@
#
obj-$(CONFIG_IBM_ASM) += ibmasm/
-obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/
obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot.o
+obj-$(CONFIG_AD525X_DPOT_I2C) += ad525x_dpot-i2c.o
+obj-$(CONFIG_AD525X_DPOT_SPI) += ad525x_dpot-spi.o
obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o
obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c
new file mode 100644
index 00000000000..374352af797
--- /dev/null
+++ b/drivers/misc/ad525x_dpot-i2c.c
@@ -0,0 +1,134 @@
+/*
+ * Driver for the Analog Devices digital potentiometers (I2C bus)
+ *
+ * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+#include "ad525x_dpot.h"
+
+/* ------------------------------------------------------------------------- */
+/* I2C bus functions */
+static int write_d8(void *client, u8 val)
+{
+ return i2c_smbus_write_byte(client, val);
+}
+
+static int write_r8d8(void *client, u8 reg, u8 val)
+{
+ return i2c_smbus_write_byte_data(client, reg, val);
+}
+
+static int write_r8d16(void *client, u8 reg, u16 val)
+{
+ return i2c_smbus_write_word_data(client, reg, val);
+}
+
+static int read_d8(void *client)
+{
+ return i2c_smbus_read_byte(client);
+}
+
+static int read_r8d8(void *client, u8 reg)
+{
+ return i2c_smbus_read_byte_data(client, reg);
+}
+
+static int read_r8d16(void *client, u8 reg)
+{
+ return i2c_smbus_read_word_data(client, reg);
+}
+
+static const struct ad_dpot_bus_ops bops = {
+ .read_d8 = read_d8,
+ .read_r8d8 = read_r8d8,
+ .read_r8d16 = read_r8d16,
+ .write_d8 = write_d8,
+ .write_r8d8 = write_r8d8,
+ .write_r8d16 = write_r8d16,
+};
+
+static int __devinit ad_dpot_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ad_dpot_bus_data bdata = {
+ .client = client,
+ .bops = &bops,
+ };
+
+ struct ad_dpot_id dpot_id = {
+ .name = (char *) &id->name,
+ .devid = id->driver_data,
+ };
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_WORD_DATA)) {
+ dev_err(&client->dev, "SMBUS Word Data not Supported\n");
+ return -EIO;
+ }
+
+ return ad_dpot_probe(&client->dev, &bdata, &dpot_id);
+}
+
+static int __devexit ad_dpot_i2c_remove(struct i2c_client *client)
+{
+ return ad_dpot_remove(&client->dev);
+}
+
+static const struct i2c_device_id ad_dpot_id[] = {
+ {"ad5258", AD5258_ID},
+ {"ad5259", AD5259_ID},
+ {"ad5251", AD5251_ID},
+ {"ad5252", AD5252_ID},
+ {"ad5253", AD5253_ID},
+ {"ad5254", AD5254_ID},
+ {"ad5255", AD5255_ID},
+ {"ad5241", AD5241_ID},
+ {"ad5242", AD5242_ID},
+ {"ad5243", AD5243_ID},
+ {"ad5245", AD5245_ID},
+ {"ad5246", AD5246_ID},
+ {"ad5247", AD5247_ID},
+ {"ad5248", AD5248_ID},
+ {"ad5280", AD5280_ID},
+ {"ad5282", AD5282_ID},
+ {"adn2860", ADN2860_ID},
+ {"ad5273", AD5273_ID},
+ {"ad5171", AD5171_ID},
+ {"ad5170", AD5170_ID},
+ {"ad5172", AD5172_ID},
+ {"ad5173", AD5173_ID},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ad_dpot_id);
+
+static struct i2c_driver ad_dpot_i2c_driver = {
+ .driver = {
+ .name = "ad_dpot",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad_dpot_i2c_probe,
+ .remove = __devexit_p(ad_dpot_i2c_remove),
+ .id_table = ad_dpot_id,
+};
+
+static int __init ad_dpot_i2c_init(void)
+{
+ return i2c_add_driver(&ad_dpot_i2c_driver);
+}
+module_init(ad_dpot_i2c_init);
+
+static void __exit ad_dpot_i2c_exit(void)
+{
+ i2c_del_driver(&ad_dpot_i2c_driver);
+}
+module_exit(ad_dpot_i2c_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("digital potentiometer I2C bus driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("i2c:ad_dpot");
diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c
new file mode 100644
index 00000000000..b8c6df9c843
--- /dev/null
+++ b/drivers/misc/ad525x_dpot-spi.c
@@ -0,0 +1,172 @@
+/*
+ * Driver for the Analog Devices digital potentiometers (SPI bus)
+ *
+ * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/spi/spi.h>
+#include <linux/module.h>
+
+#include "ad525x_dpot.h"
+
+static const struct ad_dpot_id ad_dpot_spi_devlist[] = {
+ {.name = "ad5160", .devid = AD5160_ID},
+ {.name = "ad5161", .devid = AD5161_ID},
+ {.name = "ad5162", .devid = AD5162_ID},
+ {.name = "ad5165", .devid = AD5165_ID},
+ {.name = "ad5200", .devid = AD5200_ID},
+ {.name = "ad5201", .devid = AD5201_ID},
+ {.name = "ad5203", .devid = AD5203_ID},
+ {.name = "ad5204", .devid = AD5204_ID},
+ {.name = "ad5206", .devid = AD5206_ID},
+ {.name = "ad5207", .devid = AD5207_ID},
+ {.name = "ad5231", .devid = AD5231_ID},
+ {.name = "ad5232", .devid = AD5232_ID},
+ {.name = "ad5233", .devid = AD5233_ID},
+ {.name = "ad5235", .devid = AD5235_ID},
+ {.name = "ad5260", .devid = AD5260_ID},
+ {.name = "ad5262", .devid = AD5262_ID},
+ {.name = "ad5263", .devid = AD5263_ID},
+ {.name = "ad5290", .devid = AD5290_ID},
+ {.name = "ad5291", .devid = AD5291_ID},
+ {.name = "ad5292", .devid = AD5292_ID},
+ {.name = "ad5293", .devid = AD5293_ID},
+ {.name = "ad7376", .devid = AD7376_ID},
+ {.name = "ad8400", .devid = AD8400_ID},
+ {.name = "ad8402", .devid = AD8402_ID},
+ {.name = "ad8403", .devid = AD8403_ID},
+ {.name = "adn2850", .devid = ADN2850_ID},
+ {}
+};
+
+/* ------------------------------------------------------------------------- */
+
+/* SPI bus functions */
+static int write8(void *client, u8 val)
+{
+ u8 data = val;
+ return spi_write(client, &data, 1);
+}
+
+static int write16(void *client, u8 reg, u8 val)
+{
+ u8 data[2] = {reg, val};
+ return spi_write(client, data, 1);
+}
+
+static int write24(void *client, u8 reg, u16 val)
+{
+ u8 data[3] = {reg, val >> 8, val};
+ return spi_write(client, data, 1);
+}
+
+static int read8(void *client)
+{
+ int ret;
+ u8 data;
+ ret = spi_read(client, &data, 1);
+ if (ret < 0)
+ return ret;
+
+ return data;
+}
+
+static int read16(void *client, u8 reg)
+{
+ int ret;
+ u8 buf_rx[2];
+
+ write16(client, reg, 0);
+ ret = spi_read(client, buf_rx, 2);
+ if (ret < 0)
+ return ret;
+
+ return (buf_rx[0] << 8) | buf_rx[1];
+}
+
+static int read24(void *client, u8 reg)
+{
+ int ret;
+ u8 buf_rx[3];
+
+ write24(client, reg, 0);
+ ret = spi_read(client, buf_rx, 3);
+ if (ret < 0)
+ return ret;
+
+ return (buf_rx[1] << 8) | buf_rx[2];
+}
+
+static const struct ad_dpot_bus_ops bops = {
+ .read_d8 = read8,
+ .read_r8d8 = read16,
+ .read_r8d16 = read24,
+ .write_d8 = write8,
+ .write_r8d8 = write16,
+ .write_r8d16 = write24,
+};
+
+static const struct ad_dpot_id *dpot_match_id(const struct ad_dpot_id *id,
+ char *name)
+{
+ while (id->name && id->name[0]) {
+ if (strcmp(name, id->name) == 0)
+ return id;
+ id++;
+ }
+ return NULL;
+}
+
+static int __devinit ad_dpot_spi_probe(struct spi_device *spi)
+{
+ char *name = spi->dev.platform_data;
+ const struct ad_dpot_id *dpot_id;
+
+ struct ad_dpot_bus_data bdata = {
+ .client = spi,
+ .bops = &bops,
+ };
+
+ dpot_id = dpot_match_id(ad_dpot_spi_devlist, name);
+
+ if (dpot_id == NULL) {
+ dev_err(&spi->dev, "%s not in supported device list", name);
+ return -ENODEV;
+ }
+
+ return ad_dpot_probe(&spi->dev, &bdata, dpot_id);
+}
+
+static int __devexit ad_dpot_spi_remove(struct spi_device *spi)
+{
+ return ad_dpot_remove(&spi->dev);
+}
+
+static struct spi_driver ad_dpot_spi_driver = {
+ .driver = {
+ .name = "ad_dpot",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = ad_dpot_spi_probe,
+ .remove = __devexit_p(ad_dpot_spi_remove),
+};
+
+static int __init ad_dpot_spi_init(void)
+{
+ return spi_register_driver(&ad_dpot_spi_driver);
+}
+module_init(ad_dpot_spi_init);
+
+static void __exit ad_dpot_spi_exit(void)
+{
+ spi_unregister_driver(&ad_dpot_spi_driver);
+}
+module_exit(ad_dpot_spi_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("digital potentiometer SPI bus driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:ad_dpot");
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index 30a59f2bacd..5e6fa8449e8 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -1,6 +1,6 @@
/*
- * ad525x_dpot: Driver for the Analog Devices AD525x digital potentiometers
- * Copyright (c) 2009 Analog Devices, Inc.
+ * ad525x_dpot: Driver for the Analog Devices digital potentiometers
+ * Copyright (c) 2009-2010 Analog Devices, Inc.
* Author: Michael Hennerich <hennerich@blackfin.uclinux.org>
*
* DEVID #Wipers #Positions Resistor Options (kOhm)
@@ -11,6 +11,47 @@
* AD5255 3 512 25, 250
* AD5253 4 64 1, 10, 50, 100
* AD5254 4 256 1, 10, 50, 100
+ * AD5160 1 256 5, 10, 50, 100
+ * AD5161 1 256 5, 10, 50, 100
+ * AD5162 2 256 2.5, 10, 50, 100
+ * AD5165 1 256 100
+ * AD5200 1 256 10, 50
+ * AD5201 1 33 10, 50
+ * AD5203 4 64 10, 100
+ * AD5204 4 256 10, 50, 100
+ * AD5206 6 256 10, 50, 100
+ * AD5207 2 256 10, 50, 100
+ * AD5231 1 1024 10, 50, 100
+ * AD5232 2 256 10, 50, 100
+ * AD5233 4 64 10, 50, 100
+ * AD5235 2 1024 25, 250
+ * AD5260 1 256 20, 50, 200
+ * AD5262 2 256 20, 50, 200
+ * AD5263 4 256 20, 50, 200
+ * AD5290 1 256 10, 50, 100
+ * AD5291 1 256 20
+ * AD5292 1 1024 20
+ * AD5293 1 1024 20
+ * AD7376 1 128 10, 50, 100, 1M
+ * AD8400 1 256 1, 10, 50, 100
+ * AD8402 2 256 1, 10, 50, 100
+ * AD8403 4 256 1, 10, 50, 100
+ * ADN2850 3 512 25, 250
+ * AD5241 1 256 10, 100, 1M
+ * AD5246 1 128 5, 10, 50, 100
+ * AD5247 1 128 5, 10, 50, 100
+ * AD5245 1 256 5, 10, 50, 100
+ * AD5243 2 256 2.5, 10, 50, 100
+ * AD5248 2 256 2.5, 10, 50, 100
+ * AD5242 2 256 20, 50, 200
+ * AD5280 1 256 20, 50, 200
+ * AD5282 2 256 20, 50, 200
+ * ADN2860 3 512 25, 250
+ * AD5273 1 64 1, 10, 50, 100 (OTP)
+ * AD5171 1 64 5, 10, 50, 100 (OTP)
+ * AD5170 1 256 2.5, 10, 50, 100 (OTP)
+ * AD5172 2 256 2.5, 10, 50, 100 (OTP)
+ * AD5173 2 256 2.5, 10, 50, 100 (OTP)
*
* See Documentation/misc-devices/ad525x_dpot.txt for more info.
*
@@ -28,77 +69,283 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
#include <linux/delay.h>
+#include <linux/slab.h>
-#define DRIVER_NAME "ad525x_dpot"
-#define DRIVER_VERSION "0.1"
-
-enum dpot_devid {
- AD5258_ID,
- AD5259_ID,
- AD5251_ID,
- AD5252_ID,
- AD5253_ID,
- AD5254_ID,
- AD5255_ID,
-};
+#define DRIVER_VERSION "0.2"
-#define AD5258_MAX_POSITION 64
-#define AD5259_MAX_POSITION 256
-#define AD5251_MAX_POSITION 64
-#define AD5252_MAX_POSITION 256
-#define AD5253_MAX_POSITION 64
-#define AD5254_MAX_POSITION 256
-#define AD5255_MAX_POSITION 512
-
-#define AD525X_RDAC0 0
-#define AD525X_RDAC1 1
-#define AD525X_RDAC2 2
-#define AD525X_RDAC3 3
-
-#define AD525X_REG_TOL 0x18
-#define AD525X_TOL_RDAC0 (AD525X_REG_TOL | AD525X_RDAC0)
-#define AD525X_TOL_RDAC1 (AD525X_REG_TOL | AD525X_RDAC1)
-#define AD525X_TOL_RDAC2 (AD525X_REG_TOL | AD525X_RDAC2)
-#define AD525X_TOL_RDAC3 (AD525X_REG_TOL | AD525X_RDAC3)
-
-/* RDAC-to-EEPROM Interface Commands */
-#define AD525X_I2C_RDAC (0x00 << 5)
-#define AD525X_I2C_EEPROM (0x01 << 5)
-#define AD525X_I2C_CMD (0x80)
-
-#define AD525X_DEC_ALL_6DB (AD525X_I2C_CMD | (0x4 << 3))
-#define AD525X_INC_ALL_6DB (AD525X_I2C_CMD | (0x9 << 3))
-#define AD525X_DEC_ALL (AD525X_I2C_CMD | (0x6 << 3))
-#define AD525X_INC_ALL (AD525X_I2C_CMD | (0xB << 3))
-
-static s32 ad525x_read(struct i2c_client *client, u8 reg);
-static s32 ad525x_write(struct i2c_client *client, u8 reg, u8 value);
+#include "ad525x_dpot.h"
/*
* Client data (each client gets its own)
*/
struct dpot_data {
+ struct ad_dpot_bus_data bdata;
struct mutex update_lock;
unsigned rdac_mask;
unsigned max_pos;
- unsigned devid;
+ unsigned long devid;
+ unsigned uid;
+ unsigned feat;
+ unsigned wipers;
+ u16 rdac_cache[MAX_RDACS];
+ DECLARE_BITMAP(otp_en_mask, MAX_RDACS);
};
+static inline int dpot_read_d8(struct dpot_data *dpot)
+{
+ return dpot->bdata.bops->read_d8(dpot->bdata.client);
+}
+
+static inline int dpot_read_r8d8(struct dpot_data *dpot, u8 reg)
+{
+ return dpot->bdata.bops->read_r8d8(dpot->bdata.client, reg);
+}
+
+static inline int dpot_read_r8d16(struct dpot_data *dpot, u8 reg)
+{
+ return dpot->bdata.bops->read_r8d16(dpot->bdata.client, reg);
+}
+
+static inline int dpot_write_d8(struct dpot_data *dpot, u8 val)
+{
+ return dpot->bdata.bops->write_d8(dpot->bdata.client, val);
+}
+
+static inline int dpot_write_r8d8(struct dpot_data *dpot, u8 reg, u16 val)
+{
+ return dpot->bdata.bops->write_r8d8(dpot->bdata.client, reg, val);
+}
+
+static inline int dpot_write_r8d16(struct dpot_data *dpot, u8 reg, u16 val)
+{
+ return dpot->bdata.bops->write_r8d16(dpot->bdata.client, reg, val);
+}
+
+static s32 dpot_read_spi(struct dpot_data *dpot, u8 reg)
+{
+ unsigned ctrl = 0;
+
+ if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD))) {
+
+ if (dpot->feat & F_RDACS_WONLY)
+ return dpot->rdac_cache[reg & DPOT_RDAC_MASK];
+
+ if (dpot->uid == DPOT_UID(AD5291_ID) ||
+ dpot->uid == DPOT_UID(AD5292_ID) ||
+ dpot->uid == DPOT_UID(AD5293_ID))
+ return dpot_read_r8d8(dpot,
+ DPOT_AD5291_READ_RDAC << 2);
+
+ ctrl = DPOT_SPI_READ_RDAC;
+ } else if (reg & DPOT_ADDR_EEPROM) {
+ ctrl = DPOT_SPI_READ_EEPROM;
+ }
+
+ if (dpot->feat & F_SPI_16BIT)
+ return dpot_read_r8d8(dpot, ctrl);
+ else if (dpot->feat & F_SPI_24BIT)
+ return dpot_read_r8d16(dpot, ctrl);
+
+ return -EFAULT;
+}
+
+static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
+{
+ unsigned ctrl = 0;
+ switch (dpot->uid) {
+ case DPOT_UID(AD5246_ID):
+ case DPOT_UID(AD5247_ID):
+ return dpot_read_d8(dpot);
+ case DPOT_UID(AD5245_ID):
+ case DPOT_UID(AD5241_ID):
+ case DPOT_UID(AD5242_ID):
+ case DPOT_UID(AD5243_ID):
+ case DPOT_UID(AD5248_ID):
+ case DPOT_UID(AD5280_ID):
+ case DPOT_UID(AD5282_ID):
+ ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
+ 0 : DPOT_AD5291_RDAC_AB;
+ return dpot_read_r8d8(dpot, ctrl);
+ case DPOT_UID(AD5170_ID):
+ case DPOT_UID(AD5171_ID):
+ case DPOT_UID(AD5273_ID):
+ return dpot_read_d8(dpot);
+ case DPOT_UID(AD5172_ID):
+ case DPOT_UID(AD5173_ID):
+ ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
+ 0 : DPOT_AD5272_3_A0;
+ return dpot_read_r8d8(dpot, ctrl);
+ default:
+ if ((reg & DPOT_REG_TOL) || (dpot->max_pos > 256))
+ return dpot_read_r8d16(dpot, (reg & 0xF8) |
+ ((reg & 0x7) << 1));
+ else
+ return dpot_read_r8d8(dpot, reg);
+ }
+}
+
+static s32 dpot_read(struct dpot_data *dpot, u8 reg)
+{
+ if (dpot->feat & F_SPI)
+ return dpot_read_spi(dpot, reg);
+ else
+ return dpot_read_i2c(dpot, reg);
+}
+
+static s32 dpot_write_spi(struct dpot_data *dpot, u8 reg, u16 value)
+{
+ unsigned val = 0;
+
+ if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD))) {
+ if (dpot->feat & F_RDACS_WONLY)
+ dpot->rdac_cache[reg & DPOT_RDAC_MASK] = value;
+
+ if (dpot->feat & F_AD_APPDATA) {
+ if (dpot->feat & F_SPI_8BIT) {
+ val = ((reg & DPOT_RDAC_MASK) <<
+ DPOT_MAX_POS(dpot->devid)) |
+ value;
+ return dpot_write_d8(dpot, val);
+ } else if (dpot->feat & F_SPI_16BIT) {
+ val = ((reg & DPOT_RDAC_MASK) <<
+ DPOT_MAX_POS(dpot->devid)) |
+ value;
+ return dpot_write_r8d8(dpot, val >> 8,
+ val & 0xFF);
+ } else
+ BUG();
+ } else {
+ if (dpot->uid == DPOT_UID(AD5291_ID) ||
+ dpot->uid == DPOT_UID(AD5292_ID) ||
+ dpot->uid == DPOT_UID(AD5293_ID))
+ return dpot_write_r8d8(dpot,
+ (DPOT_AD5291_RDAC << 2) |
+ (value >> 8), value & 0xFF);
+
+ val = DPOT_SPI_RDAC | (reg & DPOT_RDAC_MASK);
+ }
+ } else if (reg & DPOT_ADDR_EEPROM) {
+ val = DPOT_SPI_EEPROM | (reg & DPOT_RDAC_MASK);
+ } else if (reg & DPOT_ADDR_CMD) {
+ switch (reg) {
+ case DPOT_DEC_ALL_6DB:
+ val = DPOT_SPI_DEC_ALL_6DB;
+ break;
+ case DPOT_INC_ALL_6DB:
+ val = DPOT_SPI_INC_ALL_6DB;
+ break;
+ case DPOT_DEC_ALL:
+ val = DPOT_SPI_DEC_ALL;
+ break;
+ case DPOT_INC_ALL:
+ val = DPOT_SPI_INC_ALL;
+ break;
+ }
+ } else
+ BUG();
+
+ if (dpot->feat & F_SPI_16BIT)
+ return dpot_write_r8d8(dpot, val, value);
+ else if (dpot->feat & F_SPI_24BIT)
+ return dpot_write_r8d16(dpot, val, value);
+
+ return -EFAULT;
+}
+
+static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
+{
+ /* Only write the instruction byte for certain commands */
+ unsigned tmp = 0, ctrl = 0;
+
+ switch (dpot->uid) {
+ case DPOT_UID(AD5246_ID):
+ case DPOT_UID(AD5247_ID):
+ return dpot_write_d8(dpot, value);
+ break;
+
+ case DPOT_UID(AD5245_ID):
+ case DPOT_UID(AD5241_ID):
+ case DPOT_UID(AD5242_ID):
+ case DPOT_UID(AD5243_ID):
+ case DPOT_UID(AD5248_ID):
+ case DPOT_UID(AD5280_ID):
+ case DPOT_UID(AD5282_ID):
+ ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
+ 0 : DPOT_AD5291_RDAC_AB;
+ return dpot_write_r8d8(dpot, ctrl, value);
+ break;
+ case DPOT_UID(AD5171_ID):
+ case DPOT_UID(AD5273_ID):
+ if (reg & DPOT_ADDR_OTP) {
+ tmp = dpot_read_d8(dpot);
+ if (tmp >> 6) /* Ready to Program? */
+ return -EFAULT;
+ ctrl = DPOT_AD5273_FUSE;
+ }
+ return dpot_write_r8d8(dpot, ctrl, value);
+ break;
+ case DPOT_UID(AD5172_ID):
+ case DPOT_UID(AD5173_ID):
+ ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
+ 0 : DPOT_AD5272_3_A0;
+ if (reg & DPOT_ADDR_OTP) {
+ tmp = dpot_read_r8d16(dpot, ctrl);
+ if (tmp >> 14) /* Ready to Program? */
+ return -EFAULT;
+ ctrl |= DPOT_AD5270_2_3_FUSE;
+ }
+ return dpot_write_r8d8(dpot, ctrl, value);
+ break;
+ case DPOT_UID(AD5170_ID):
+ if (reg & DPOT_ADDR_OTP) {
+ tmp = dpot_read_r8d16(dpot, tmp);
+ if (tmp >> 14) /* Ready to Program? */
+ return -EFAULT;
+ ctrl = DPOT_AD5270_2_3_FUSE;
+ }
+ return dpot_write_r8d8(dpot, ctrl, value);
+ break;
+ default:
+ if (reg & DPOT_ADDR_CMD)
+ return dpot_write_d8(dpot, reg);
+
+ if (dpot->max_pos > 256)
+ return dpot_write_r8d16(dpot, (reg & 0xF8) |
+ ((reg & 0x7) << 1), value);
+ else
+ /* All other registers require instruction + data bytes */
+ return dpot_write_r8d8(dpot, reg, value);
+ }
+}
+
+
+static s32 dpot_write(struct dpot_data *dpot, u8 reg, u16 value)
+{
+ if (dpot->feat & F_SPI)
+ return dpot_write_spi(dpot, reg, value);
+ else
+ return dpot_write_i2c(dpot, reg, value);
+}
+
/* sysfs functions */
static ssize_t sysfs_show_reg(struct device *dev,
- struct device_attribute *attr, char *buf, u32 reg)
+ struct device_attribute *attr,
+ char *buf, u32 reg)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct dpot_data *data = i2c_get_clientdata(client);
+ struct dpot_data *data = dev_get_drvdata(dev);
s32 value;
+ if (reg & DPOT_ADDR_OTP_EN)
+ return sprintf(buf, "%s\n",
+ test_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask) ?
+ "enabled" : "disabled");
+
+
mutex_lock(&data->update_lock);
- value = ad525x_read(client, reg);
+ value = dpot_read(data, reg);
mutex_unlock(&data->update_lock);
if (value < 0)
@@ -111,7 +358,7 @@ static ssize_t sysfs_show_reg(struct device *dev,
* datasheet (Rev. A) for more details.
*/
- if (reg & AD525X_REG_TOL)
+ if (reg & DPOT_REG_TOL)
return sprintf(buf, "0x%04x\n", value & 0xFFFF);
else
return sprintf(buf, "%u\n", value & data->rdac_mask);
@@ -121,11 +368,23 @@ static ssize_t sysfs_set_reg(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count, u32 reg)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct dpot_data *data = i2c_get_clientdata(client);
+ struct dpot_data *data = dev_get_drvdata(dev);
unsigned long value;
int err;
+ if (reg & DPOT_ADDR_OTP_EN) {
+ if (!strncmp(buf, "enabled", sizeof("enabled")))
+ set_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask);
+ else
+ clear_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask);
+
+ return count;
+ }
+
+ if ((reg & DPOT_ADDR_OTP) &&
+ !test_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask))
+ return -EPERM;
+
err = strict_strtoul(buf, 10, &value);
if (err)
return err;
@@ -134,9 +393,11 @@ static ssize_t sysfs_set_reg(struct device *dev,
value = data->rdac_mask;
mutex_lock(&data->update_lock);
- ad525x_write(client, reg, value);
- if (reg & AD525X_I2C_EEPROM)
+ dpot_write(data, reg, value);
+ if (reg & DPOT_ADDR_EEPROM)
msleep(26); /* Sleep while the EEPROM updates */
+ else if (reg & DPOT_ADDR_OTP)
+ msleep(400); /* Sleep while the OTP updates */
mutex_unlock(&data->update_lock);
return count;
@@ -146,11 +407,10 @@ static ssize_t sysfs_do_cmd(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count, u32 reg)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct dpot_data *data = i2c_get_clientdata(client);
+ struct dpot_data *data = dev_get_drvdata(dev);
mutex_lock(&data->update_lock);
- ad525x_write(client, reg, 0);
+ dpot_write(data, reg, 0);
mutex_unlock(&data->update_lock);
return count;
@@ -158,244 +418,131 @@ static ssize_t sysfs_do_cmd(struct device *dev,
/* ------------------------------------------------------------------------- */
-static ssize_t show_rdac0(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC0);
-}
-
-static ssize_t set_rdac0(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return sysfs_set_reg(dev, attr, buf, count,
- AD525X_I2C_RDAC | AD525X_RDAC0);
-}
-
-static DEVICE_ATTR(rdac0, S_IWUSR | S_IRUGO, show_rdac0, set_rdac0);
-
-static ssize_t show_eeprom0(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC0);
-}
-
-static ssize_t set_eeprom0(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return sysfs_set_reg(dev, attr, buf, count,
- AD525X_I2C_EEPROM | AD525X_RDAC0);
-}
-
-static DEVICE_ATTR(eeprom0, S_IWUSR | S_IRUGO, show_eeprom0, set_eeprom0);
-
-static ssize_t show_tolerance0(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_show_reg(dev, attr, buf,
- AD525X_I2C_EEPROM | AD525X_TOL_RDAC0);
-}
-
-static DEVICE_ATTR(tolerance0, S_IRUGO, show_tolerance0, NULL);
-
-/* ------------------------------------------------------------------------- */
-
-static ssize_t show_rdac1(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC1);
-}
-
-static ssize_t set_rdac1(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return sysfs_set_reg(dev, attr, buf, count,
- AD525X_I2C_RDAC | AD525X_RDAC1);
-}
-
-static DEVICE_ATTR(rdac1, S_IWUSR | S_IRUGO, show_rdac1, set_rdac1);
-
-static ssize_t show_eeprom1(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC1);
-}
-
-static ssize_t set_eeprom1(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return sysfs_set_reg(dev, attr, buf, count,
- AD525X_I2C_EEPROM | AD525X_RDAC1);
-}
-
-static DEVICE_ATTR(eeprom1, S_IWUSR | S_IRUGO, show_eeprom1, set_eeprom1);
-
-static ssize_t show_tolerance1(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_show_reg(dev, attr, buf,
- AD525X_I2C_EEPROM | AD525X_TOL_RDAC1);
-}
-
-static DEVICE_ATTR(tolerance1, S_IRUGO, show_tolerance1, NULL);
-
-/* ------------------------------------------------------------------------- */
-
-static ssize_t show_rdac2(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC2);
-}
-
-static ssize_t set_rdac2(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return sysfs_set_reg(dev, attr, buf, count,
- AD525X_I2C_RDAC | AD525X_RDAC2);
-}
-
-static DEVICE_ATTR(rdac2, S_IWUSR | S_IRUGO, show_rdac2, set_rdac2);
-
-static ssize_t show_eeprom2(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC2);
-}
-
-static ssize_t set_eeprom2(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return sysfs_set_reg(dev, attr, buf, count,
- AD525X_I2C_EEPROM | AD525X_RDAC2);
-}
-
-static DEVICE_ATTR(eeprom2, S_IWUSR | S_IRUGO, show_eeprom2, set_eeprom2);
-
-static ssize_t show_tolerance2(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_show_reg(dev, attr, buf,
- AD525X_I2C_EEPROM | AD525X_TOL_RDAC2);
-}
-
-static DEVICE_ATTR(tolerance2, S_IRUGO, show_tolerance2, NULL);
-
-/* ------------------------------------------------------------------------- */
-
-static ssize_t show_rdac3(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC3);
-}
-
-static ssize_t set_rdac3(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return sysfs_set_reg(dev, attr, buf, count,
- AD525X_I2C_RDAC | AD525X_RDAC3);
-}
-
-static DEVICE_ATTR(rdac3, S_IWUSR | S_IRUGO, show_rdac3, set_rdac3);
-
-static ssize_t show_eeprom3(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC3);
-}
-
-static ssize_t set_eeprom3(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return sysfs_set_reg(dev, attr, buf, count,
- AD525X_I2C_EEPROM | AD525X_RDAC3);
-}
+#define DPOT_DEVICE_SHOW(_name, _reg) static ssize_t \
+show_##_name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ return sysfs_show_reg(dev, attr, buf, _reg); \
+}
+
+#define DPOT_DEVICE_SET(_name, _reg) static ssize_t \
+set_##_name(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ return sysfs_set_reg(dev, attr, buf, count, _reg); \
+}
+
+#define DPOT_DEVICE_SHOW_SET(name, reg) \
+DPOT_DEVICE_SHOW(name, reg) \
+DPOT_DEVICE_SET(name, reg) \
+static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, set_##name);
+
+#define DPOT_DEVICE_SHOW_ONLY(name, reg) \
+DPOT_DEVICE_SHOW(name, reg) \
+static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, NULL);
+
+DPOT_DEVICE_SHOW_SET(rdac0, DPOT_ADDR_RDAC | DPOT_RDAC0);
+DPOT_DEVICE_SHOW_SET(eeprom0, DPOT_ADDR_EEPROM | DPOT_RDAC0);
+DPOT_DEVICE_SHOW_ONLY(tolerance0, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC0);
+DPOT_DEVICE_SHOW_SET(otp0, DPOT_ADDR_OTP | DPOT_RDAC0);
+DPOT_DEVICE_SHOW_SET(otp0en, DPOT_ADDR_OTP_EN | DPOT_RDAC0);
+
+DPOT_DEVICE_SHOW_SET(rdac1, DPOT_ADDR_RDAC | DPOT_RDAC1);
+DPOT_DEVICE_SHOW_SET(eeprom1, DPOT_ADDR_EEPROM | DPOT_RDAC1);
+DPOT_DEVICE_SHOW_ONLY(tolerance1, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC1);
+DPOT_DEVICE_SHOW_SET(otp1, DPOT_ADDR_OTP | DPOT_RDAC1);
+DPOT_DEVICE_SHOW_SET(otp1en, DPOT_ADDR_OTP_EN | DPOT_RDAC1);
+
+DPOT_DEVICE_SHOW_SET(rdac2, DPOT_ADDR_RDAC | DPOT_RDAC2);
+DPOT_DEVICE_SHOW_SET(eeprom2, DPOT_ADDR_EEPROM | DPOT_RDAC2);
+DPOT_DEVICE_SHOW_ONLY(tolerance2, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC2);
+DPOT_DEVICE_SHOW_SET(otp2, DPOT_ADDR_OTP | DPOT_RDAC2);
+DPOT_DEVICE_SHOW_SET(otp2en, DPOT_ADDR_OTP_EN | DPOT_RDAC2);
+
+DPOT_DEVICE_SHOW_SET(rdac3, DPOT_ADDR_RDAC | DPOT_RDAC3);
+DPOT_DEVICE_SHOW_SET(eeprom3, DPOT_ADDR_EEPROM | DPOT_RDAC3);
+DPOT_DEVICE_SHOW_ONLY(tolerance3, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC3);
+DPOT_DEVICE_SHOW_SET(otp3, DPOT_ADDR_OTP | DPOT_RDAC3);
+DPOT_DEVICE_SHOW_SET(otp3en, DPOT_ADDR_OTP_EN | DPOT_RDAC3);
+
+DPOT_DEVICE_SHOW_SET(rdac4, DPOT_ADDR_RDAC | DPOT_RDAC4);
+DPOT_DEVICE_SHOW_SET(eeprom4, DPOT_ADDR_EEPROM | DPOT_RDAC4);
+DPOT_DEVICE_SHOW_ONLY(tolerance4, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC4);
+DPOT_DEVICE_SHOW_SET(otp4, DPOT_ADDR_OTP | DPOT_RDAC4);
+DPOT_DEVICE_SHOW_SET(otp4en, DPOT_ADDR_OTP_EN | DPOT_RDAC4);
+
+DPOT_DEVICE_SHOW_SET(rdac5, DPOT_ADDR_RDAC | DPOT_RDAC5);
+DPOT_DEVICE_SHOW_SET(eeprom5, DPOT_ADDR_EEPROM | DPOT_RDAC5);
+DPOT_DEVICE_SHOW_ONLY(tolerance5, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC5);
+DPOT_DEVICE_SHOW_SET(otp5, DPOT_ADDR_OTP | DPOT_RDAC5);
+DPOT_DEVICE_SHOW_SET(otp5en, DPOT_ADDR_OTP_EN | DPOT_RDAC5);
+
+static const struct attribute *dpot_attrib_wipers[] = {
+ &dev_attr_rdac0.attr,
+ &dev_attr_rdac1.attr,
+ &dev_attr_rdac2.attr,
+ &dev_attr_rdac3.attr,
+ &dev_attr_rdac4.attr,
+ &dev_attr_rdac5.attr,
+ NULL
+};
-static DEVICE_ATTR(eeprom3, S_IWUSR | S_IRUGO, show_eeprom3, set_eeprom3);
+static const struct attribute *dpot_attrib_eeprom[] = {
+ &dev_attr_eeprom0.attr,
+ &dev_attr_eeprom1.attr,
+ &dev_attr_eeprom2.attr,
+ &dev_attr_eeprom3.attr,
+ &dev_attr_eeprom4.attr,
+ &dev_attr_eeprom5.attr,
+ NULL
+};
-static ssize_t show_tolerance3(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_show_reg(dev, attr, buf,
- AD525X_I2C_EEPROM | AD525X_TOL_RDAC3);
-}
+static const struct attribute *dpot_attrib_otp[] = {
+ &dev_attr_otp0.attr,
+ &dev_attr_otp1.attr,
+ &dev_attr_otp2.attr,
+ &dev_attr_otp3.attr,
+ &dev_attr_otp4.attr,
+ &dev_attr_otp5.attr,
+ NULL
+};
-static DEVICE_ATTR(tolerance3, S_IRUGO, show_tolerance3, NULL);
-
-static struct attribute *ad525x_attributes_wipers[4][4] = {
- {
- &dev_attr_rdac0.attr,
- &dev_attr_eeprom0.attr,
- &dev_attr_tolerance0.attr,
- NULL
- }, {
- &dev_attr_rdac1.attr,
- &dev_attr_eeprom1.attr,
- &dev_attr_tolerance1.attr,
- NULL
- }, {
- &dev_attr_rdac2.attr,
- &dev_attr_eeprom2.attr,
- &dev_attr_tolerance2.attr,
- NULL
- }, {
- &dev_attr_rdac3.attr,
- &dev_attr_eeprom3.attr,
- &dev_attr_tolerance3.attr,
- NULL
- }
+static const struct attribute *dpot_attrib_otp_en[] = {
+ &dev_attr_otp0en.attr,
+ &dev_attr_otp1en.attr,
+ &dev_attr_otp2en.attr,
+ &dev_attr_otp3en.attr,
+ &dev_attr_otp4en.attr,
+ &dev_attr_otp5en.attr,
+ NULL
};
-static const struct attribute_group ad525x_group_wipers[] = {
- {.attrs = ad525x_attributes_wipers[AD525X_RDAC0]},
- {.attrs = ad525x_attributes_wipers[AD525X_RDAC1]},
- {.attrs = ad525x_attributes_wipers[AD525X_RDAC2]},
- {.attrs = ad525x_attributes_wipers[AD525X_RDAC3]},
+static const struct attribute *dpot_attrib_tolerance[] = {
+ &dev_attr_tolerance0.attr,
+ &dev_attr_tolerance1.attr,
+ &dev_attr_tolerance2.attr,
+ &dev_attr_tolerance3.attr,
+ &dev_attr_tolerance4.attr,
+ &dev_attr_tolerance5.attr,
+ NULL
};
/* ------------------------------------------------------------------------- */
-static ssize_t set_inc_all(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return sysfs_do_cmd(dev, attr, buf, count, AD525X_INC_ALL);
-}
+#define DPOT_DEVICE_DO_CMD(_name, _cmd) static ssize_t \
+set_##_name(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ return sysfs_do_cmd(dev, attr, buf, count, _cmd); \
+} \
+static DEVICE_ATTR(_name, S_IWUSR | S_IRUGO, NULL, set_##_name);
-static DEVICE_ATTR(inc_all, S_IWUSR, NULL, set_inc_all);
-
-static ssize_t set_dec_all(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return sysfs_do_cmd(dev, attr, buf, count, AD525X_DEC_ALL);
-}
-
-static DEVICE_ATTR(dec_all, S_IWUSR, NULL, set_dec_all);
-
-static ssize_t set_inc_all_6db(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return sysfs_do_cmd(dev, attr, buf, count, AD525X_INC_ALL_6DB);
-}
-
-static DEVICE_ATTR(inc_all_6db, S_IWUSR, NULL, set_inc_all_6db);
-
-static ssize_t set_dec_all_6db(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return sysfs_do_cmd(dev, attr, buf, count, AD525X_DEC_ALL_6DB);
-}
-
-static DEVICE_ATTR(dec_all_6db, S_IWUSR, NULL, set_dec_all_6db);
+DPOT_DEVICE_DO_CMD(inc_all, DPOT_INC_ALL);
+DPOT_DEVICE_DO_CMD(dec_all, DPOT_DEC_ALL);
+DPOT_DEVICE_DO_CMD(inc_all_6db, DPOT_INC_ALL_6DB);
+DPOT_DEVICE_DO_CMD(dec_all_6db, DPOT_DEC_ALL_6DB);
static struct attribute *ad525x_attributes_commands[] = {
&dev_attr_inc_all.attr,
@@ -409,74 +556,56 @@ static const struct attribute_group ad525x_group_commands = {
.attrs = ad525x_attributes_commands,
};
-/* ------------------------------------------------------------------------- */
-
-/* i2c device functions */
+__devinit int ad_dpot_add_files(struct device *dev,
+ unsigned features, unsigned rdac)
+{
+ int err = sysfs_create_file(&dev->kobj,
+ dpot_attrib_wipers[rdac]);
+ if (features & F_CMD_EEP)
+ err |= sysfs_create_file(&dev->kobj,
+ dpot_attrib_eeprom[rdac]);
+ if (features & F_CMD_TOL)
+ err |= sysfs_create_file(&dev->kobj,
+ dpot_attrib_tolerance[rdac]);
+ if (features & F_CMD_OTP) {
+ err |= sysfs_create_file(&dev->kobj,
+ dpot_attrib_otp_en[rdac]);
+ err |= sysfs_create_file(&dev->kobj,
+ dpot_attrib_otp[rdac]);
+ }
-/**
- * ad525x_read - return the value contained in the specified register
- * on the AD5258 device.
- * @client: value returned from i2c_new_device()
- * @reg: the register to read
- *
- * If the tolerance register is specified, 2 bytes are returned.
- * Otherwise, 1 byte is returned. A negative value indicates an error
- * occurred while reading the register.
- */
-static s32 ad525x_read(struct i2c_client *client, u8 reg)
-{
- struct dpot_data *data = i2c_get_clientdata(client);
+ if (err)
+ dev_err(dev, "failed to register sysfs hooks for RDAC%d\n",
+ rdac);
- if ((reg & AD525X_REG_TOL) || (data->max_pos > 256))
- return i2c_smbus_read_word_data(client, (reg & 0xF8) |
- ((reg & 0x7) << 1));
- else
- return i2c_smbus_read_byte_data(client, reg);
+ return err;
}
-/**
- * ad525x_write - store the given value in the specified register on
- * the AD5258 device.
- * @client: value returned from i2c_new_device()
- * @reg: the register to write
- * @value: the byte to store in the register
- *
- * For certain instructions that do not require a data byte, "NULL"
- * should be specified for the "value" parameter. These instructions
- * include NOP, RESTORE_FROM_EEPROM, and STORE_TO_EEPROM.
- *
- * A negative return value indicates an error occurred while reading
- * the register.
- */
-static s32 ad525x_write(struct i2c_client *client, u8 reg, u8 value)
-{
- struct dpot_data *data = i2c_get_clientdata(client);
-
- /* Only write the instruction byte for certain commands */
- if (reg & AD525X_I2C_CMD)
- return i2c_smbus_write_byte(client, reg);
-
- if (data->max_pos > 256)
- return i2c_smbus_write_word_data(client, (reg & 0xF8) |
- ((reg & 0x7) << 1), value);
- else
- /* All other registers require instruction + data bytes */
- return i2c_smbus_write_byte_data(client, reg, value);
+inline void ad_dpot_remove_files(struct device *dev,
+ unsigned features, unsigned rdac)
+{
+ sysfs_remove_file(&dev->kobj,
+ dpot_attrib_wipers[rdac]);
+ if (features & F_CMD_EEP)
+ sysfs_remove_file(&dev->kobj,
+ dpot_attrib_eeprom[rdac]);
+ if (features & F_CMD_TOL)
+ sysfs_remove_file(&dev->kobj,
+ dpot_attrib_tolerance[rdac]);
+ if (features & F_CMD_OTP) {
+ sysfs_remove_file(&dev->kobj,
+ dpot_attrib_otp_en[rdac]);
+ sysfs_remove_file(&dev->kobj,
+ dpot_attrib_otp[rdac]);
+ }
}
-static int ad525x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+__devinit int ad_dpot_probe(struct device *dev,
+ struct ad_dpot_bus_data *bdata, const struct ad_dpot_id *id)
{
- struct device *dev = &client->dev;
- struct dpot_data *data;
- int err = 0;
- dev_dbg(dev, "%s\n", __func__);
-
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
- dev_err(dev, "missing I2C functionality for this driver\n");
- goto exit;
- }
+ struct dpot_data *data;
+ int i, err = 0;
data = kzalloc(sizeof(struct dpot_data), GFP_KERNEL);
if (!data) {
@@ -484,183 +613,74 @@ static int ad525x_probe(struct i2c_client *client,
goto exit;
}
- i2c_set_clientdata(client, data);
+ dev_set_drvdata(dev, data);
mutex_init(&data->update_lock);
- switch (id->driver_data) {
- case AD5258_ID:
- data->max_pos = AD5258_MAX_POSITION;
- err = sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC0]);
- break;
- case AD5259_ID:
- data->max_pos = AD5259_MAX_POSITION;
- err = sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC0]);
- break;
- case AD5251_ID:
- data->max_pos = AD5251_MAX_POSITION;
- err = sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC1]);
- err |= sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC3]);
- err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
- break;
- case AD5252_ID:
- data->max_pos = AD5252_MAX_POSITION;
- err = sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC1]);
- err |= sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC3]);
- err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
- break;
- case AD5253_ID:
- data->max_pos = AD5253_MAX_POSITION;
- err = sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC0]);
- err |= sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC1]);
- err |= sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC2]);
- err |= sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC3]);
- err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
- break;
- case AD5254_ID:
- data->max_pos = AD5254_MAX_POSITION;
- err = sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC0]);
- err |= sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC1]);
- err |= sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC2]);
- err |= sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC3]);
- err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
- break;
- case AD5255_ID:
- data->max_pos = AD5255_MAX_POSITION;
- err = sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC0]);
- err |= sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC1]);
- err |= sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC2]);
- err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
- break;
- default:
- err = -ENODEV;
- goto exit_free;
- }
+ data->bdata = *bdata;
+ data->devid = id->devid;
+
+ data->max_pos = 1 << DPOT_MAX_POS(data->devid);
+ data->rdac_mask = data->max_pos - 1;
+ data->feat = DPOT_FEAT(data->devid);
+ data->uid = DPOT_UID(data->devid);
+ data->wipers = DPOT_WIPERS(data->devid);
+
+ for (i = DPOT_RDAC0; i < MAX_RDACS; i++)
+ if (data->wipers & (1 << i)) {
+ err = ad_dpot_add_files(dev, data->feat, i);
+ if (err)
+ goto exit_remove_files;
+ /* power-up midscale */
+ if (data->feat & F_RDACS_WONLY)
+ data->rdac_cache[i] = data->max_pos / 2;
+ }
+
+ if (data->feat & F_CMD_INC)
+ err = sysfs_create_group(&dev->kobj, &ad525x_group_commands);
if (err) {
dev_err(dev, "failed to register sysfs hooks\n");
goto exit_free;
}
- data->devid = id->driver_data;
- data->rdac_mask = data->max_pos - 1;
-
dev_info(dev, "%s %d-Position Digital Potentiometer registered\n",
id->name, data->max_pos);
return 0;
+exit_remove_files:
+ for (i = DPOT_RDAC0; i < MAX_RDACS; i++)
+ if (data->wipers & (1 << i))
+ ad_dpot_remove_files(dev, data->feat, i);
+
exit_free:
kfree(data);
- i2c_set_clientdata(client, NULL);
+ dev_set_drvdata(dev, NULL);
exit:
- dev_err(dev, "failed to create client\n");
+ dev_err(dev, "failed to create client for %s ID 0x%lX\n",
+ id->name, id->devid);
return err;
}
+EXPORT_SYMBOL(ad_dpot_probe);
-static int __devexit ad525x_remove(struct i2c_client *client)
+__devexit int ad_dpot_remove(struct device *dev)
{
- struct dpot_data *data = i2c_get_clientdata(client);
- struct device *dev = &client->dev;
-
- switch (data->devid) {
- case AD5258_ID:
- case AD5259_ID:
- sysfs_remove_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC0]);
- break;
- case AD5251_ID:
- case AD5252_ID:
- sysfs_remove_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC1]);
- sysfs_remove_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC3]);
- sysfs_remove_group(&dev->kobj, &ad525x_group_commands);
- break;
- case AD5253_ID:
- case AD5254_ID:
- sysfs_remove_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC0]);
- sysfs_remove_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC1]);
- sysfs_remove_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC2]);
- sysfs_remove_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC3]);
- sysfs_remove_group(&dev->kobj, &ad525x_group_commands);
- break;
- case AD5255_ID:
- sysfs_remove_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC0]);
- sysfs_remove_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC1]);
- sysfs_remove_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC2]);
- sysfs_remove_group(&dev->kobj, &ad525x_group_commands);
- break;
- }
+ struct dpot_data *data = dev_get_drvdata(dev);
+ int i;
+
+ for (i = DPOT_RDAC0; i < MAX_RDACS; i++)
+ if (data->wipers & (1 << i))
+ ad_dpot_remove_files(dev, data->feat, i);
- i2c_set_clientdata(client, NULL);
kfree(data);
return 0;
}
+EXPORT_SYMBOL(ad_dpot_remove);
-static const struct i2c_device_id ad525x_idtable[] = {
- {"ad5258", AD5258_ID},
- {"ad5259", AD5259_ID},
- {"ad5251", AD5251_ID},
- {"ad5252", AD5252_ID},
- {"ad5253", AD5253_ID},
- {"ad5254", AD5254_ID},
- {"ad5255", AD5255_ID},
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, ad525x_idtable);
-
-static struct i2c_driver ad525x_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = DRIVER_NAME,
- },
- .id_table = ad525x_idtable,
- .probe = ad525x_probe,
- .remove = __devexit_p(ad525x_remove),
-};
-
-static int __init ad525x_init(void)
-{
- return i2c_add_driver(&ad525x_driver);
-}
-
-module_init(ad525x_init);
-
-static void __exit ad525x_exit(void)
-{
- i2c_del_driver(&ad525x_driver);
-}
-
-module_exit(ad525x_exit);
MODULE_AUTHOR("Chris Verges <chrisv@cyberswitching.com>, "
- "Michael Hennerich <hennerich@blackfin.uclinux.org>, ");
-MODULE_DESCRIPTION("AD5258/9 digital potentiometer driver");
+ "Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Digital potentiometer driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/misc/ad525x_dpot.h b/drivers/misc/ad525x_dpot.h
new file mode 100644
index 00000000000..78b89fd2e2f
--- /dev/null
+++ b/drivers/misc/ad525x_dpot.h
@@ -0,0 +1,202 @@
+/*
+ * Driver for the Analog Devices digital potentiometers
+ *
+ * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _AD_DPOT_H_
+#define _AD_DPOT_H_
+
+#include <linux/types.h>
+
+#define DPOT_CONF(features, wipers, max_pos, uid) \
+ (((features) << 18) | (((wipers) & 0xFF) << 10) | \
+ ((max_pos & 0xF) << 6) | (uid & 0x3F))
+
+#define DPOT_UID(conf) (conf & 0x3F)
+#define DPOT_MAX_POS(conf) ((conf >> 6) & 0xF)
+#define DPOT_WIPERS(conf) ((conf >> 10) & 0xFF)
+#define DPOT_FEAT(conf) (conf >> 18)
+
+#define BRDAC0 (1 << 0)
+#define BRDAC1 (1 << 1)
+#define BRDAC2 (1 << 2)
+#define BRDAC3 (1 << 3)
+#define BRDAC4 (1 << 4)
+#define BRDAC5 (1 << 5)
+#define MAX_RDACS 6
+
+#define F_CMD_INC (1 << 0) /* Features INC/DEC ALL, 6dB */
+#define F_CMD_EEP (1 << 1) /* Features EEPROM */
+#define F_CMD_OTP (1 << 2) /* Features OTP */
+#define F_CMD_TOL (1 << 3) /* RDACS feature Tolerance REG */
+#define F_RDACS_RW (1 << 4) /* RDACS are Read/Write */
+#define F_RDACS_WONLY (1 << 5) /* RDACS are Write only */
+#define F_AD_APPDATA (1 << 6) /* RDAC Address append to data */
+#define F_SPI_8BIT (1 << 7) /* All SPI XFERS are 8-bit */
+#define F_SPI_16BIT (1 << 8) /* All SPI XFERS are 16-bit */
+#define F_SPI_24BIT (1 << 9) /* All SPI XFERS are 24-bit */
+
+#define F_RDACS_RW_TOL (F_RDACS_RW | F_CMD_EEP | F_CMD_TOL)
+#define F_RDACS_RW_EEP (F_RDACS_RW | F_CMD_EEP)
+#define F_SPI (F_SPI_8BIT | F_SPI_16BIT | F_SPI_24BIT)
+
+enum dpot_devid {
+ AD5258_ID = DPOT_CONF(F_RDACS_RW_TOL, BRDAC0, 6, 0), /* I2C */
+ AD5259_ID = DPOT_CONF(F_RDACS_RW_TOL, BRDAC0, 8, 1),
+ AD5251_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
+ BRDAC0 | BRDAC3, 6, 2),
+ AD5252_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
+ BRDAC0 | BRDAC3, 8, 3),
+ AD5253_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
+ BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 4),
+ AD5254_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
+ BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 5),
+ AD5255_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
+ BRDAC0 | BRDAC1 | BRDAC2, 9, 6),
+ AD5160_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0, 8, 7), /* SPI */
+ AD5161_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0, 8, 8),
+ AD5162_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+ BRDAC0 | BRDAC1, 8, 9),
+ AD5165_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0, 8, 10),
+ AD5200_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0, 8, 11),
+ AD5201_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0, 5, 12),
+ AD5203_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 13),
+ AD5204_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+ BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 14),
+ AD5206_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+ BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3 | BRDAC4 | BRDAC5,
+ 8, 15),
+ AD5207_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+ BRDAC0 | BRDAC1, 8, 16),
+ AD5231_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_24BIT,
+ BRDAC0, 10, 17),
+ AD5232_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_16BIT,
+ BRDAC0 | BRDAC1, 8, 18),
+ AD5233_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_16BIT,
+ BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 19),
+ AD5235_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_24BIT,
+ BRDAC0 | BRDAC1, 10, 20),
+ AD5260_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0, 8, 21),
+ AD5262_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+ BRDAC0 | BRDAC1, 8, 22),
+ AD5263_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+ BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 23),
+ AD5290_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0, 8, 24),
+ AD5291_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 8, 25),
+ AD5292_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 26),
+ AD5293_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 27),
+ AD7376_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0, 7, 28),
+ AD8400_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0, 8, 29),
+ AD8402_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+ BRDAC0 | BRDAC1, 8, 30),
+ AD8403_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+ BRDAC0 | BRDAC1 | BRDAC2, 8, 31),
+ ADN2850_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_24BIT,
+ BRDAC0 | BRDAC1, 10, 32),
+ AD5241_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 8, 33),
+ AD5242_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 34),
+ AD5243_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 35),
+ AD5245_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 8, 36),
+ AD5246_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 7, 37),
+ AD5247_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 7, 38),
+ AD5248_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 39),
+ AD5280_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 8, 40),
+ AD5282_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 41),
+ ADN2860_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
+ BRDAC0 | BRDAC1 | BRDAC2, 9, 42),
+ AD5273_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 6, 43),
+ AD5171_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 6, 44),
+ AD5170_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 8, 45),
+ AD5172_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0 | BRDAC1, 8, 46),
+ AD5173_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0 | BRDAC1, 8, 47),
+};
+
+#define DPOT_RDAC0 0
+#define DPOT_RDAC1 1
+#define DPOT_RDAC2 2
+#define DPOT_RDAC3 3
+#define DPOT_RDAC4 4
+#define DPOT_RDAC5 5
+
+#define DPOT_RDAC_MASK 0x1F
+
+#define DPOT_REG_TOL 0x18
+#define DPOT_TOL_RDAC0 (DPOT_REG_TOL | DPOT_RDAC0)
+#define DPOT_TOL_RDAC1 (DPOT_REG_TOL | DPOT_RDAC1)
+#define DPOT_TOL_RDAC2 (DPOT_REG_TOL | DPOT_RDAC2)
+#define DPOT_TOL_RDAC3 (DPOT_REG_TOL | DPOT_RDAC3)
+#define DPOT_TOL_RDAC4 (DPOT_REG_TOL | DPOT_RDAC4)
+#define DPOT_TOL_RDAC5 (DPOT_REG_TOL | DPOT_RDAC5)
+
+/* RDAC-to-EEPROM Interface Commands */
+#define DPOT_ADDR_RDAC (0x0 << 5)
+#define DPOT_ADDR_EEPROM (0x1 << 5)
+#define DPOT_ADDR_OTP (0x1 << 6)
+#define DPOT_ADDR_CMD (0x1 << 7)
+#define DPOT_ADDR_OTP_EN (0x1 << 9)
+
+#define DPOT_DEC_ALL_6DB (DPOT_ADDR_CMD | (0x4 << 3))
+#define DPOT_INC_ALL_6DB (DPOT_ADDR_CMD | (0x9 << 3))
+#define DPOT_DEC_ALL (DPOT_ADDR_CMD | (0x6 << 3))
+#define DPOT_INC_ALL (DPOT_ADDR_CMD | (0xB << 3))
+
+#define DPOT_SPI_RDAC 0xB0
+#define DPOT_SPI_EEPROM 0x30
+#define DPOT_SPI_READ_RDAC 0xA0
+#define DPOT_SPI_READ_EEPROM 0x90
+#define DPOT_SPI_DEC_ALL_6DB 0x50
+#define DPOT_SPI_INC_ALL_6DB 0xD0
+#define DPOT_SPI_DEC_ALL 0x70
+#define DPOT_SPI_INC_ALL 0xF0
+
+/* AD5291/2/3 use special commands */
+#define DPOT_AD5291_RDAC 0x01
+#define DPOT_AD5291_READ_RDAC 0x02
+
+/* AD524x use special commands */
+#define DPOT_AD5291_RDAC_AB 0x80
+
+#define DPOT_AD5273_FUSE 0x80
+#define DPOT_AD5270_2_3_FUSE 0x20
+#define DPOT_AD5270_2_3_OW 0x08
+#define DPOT_AD5272_3_A0 0x08
+#define DPOT_AD5270_2FUSE 0x80
+
+struct dpot_data;
+
+struct ad_dpot_bus_ops {
+ int (*read_d8) (void *client);
+ int (*read_r8d8) (void *client, u8 reg);
+ int (*read_r8d16) (void *client, u8 reg);
+ int (*write_d8) (void *client, u8 val);
+ int (*write_r8d8) (void *client, u8 reg, u8 val);
+ int (*write_r8d16) (void *client, u8 reg, u16 val);
+};
+
+struct ad_dpot_bus_data {
+ void *client;
+ const struct ad_dpot_bus_ops *bops;
+};
+
+struct ad_dpot_id {
+ char *name;
+ unsigned long devid;
+};
+
+int ad_dpot_probe(struct device *dev, struct ad_dpot_bus_data *bdata, const struct ad_dpot_id *id);
+int ad_dpot_remove(struct device *dev);
+
+#endif
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index ed090e77c9c..19fc7c1cb42 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -707,7 +707,7 @@ static ssize_t __c2port_read_flash_data(struct c2port_device *dev,
return nread;
}
-static ssize_t c2port_read_flash_data(struct kobject *kobj,
+static ssize_t c2port_read_flash_data(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buffer, loff_t offset, size_t count)
{
@@ -824,7 +824,7 @@ static ssize_t __c2port_write_flash_data(struct c2port_device *dev,
return nwrite;
}
-static ssize_t c2port_write_flash_data(struct kobject *kobj,
+static ssize_t c2port_write_flash_data(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buffer, loff_t offset, size_t count)
{
diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c
index 9197cfc5501..a513f0aa643 100644
--- a/drivers/misc/ds1682.c
+++ b/drivers/misc/ds1682.c
@@ -140,7 +140,8 @@ static const struct attribute_group ds1682_group = {
/*
* User data attribute
*/
-static ssize_t ds1682_eeprom_read(struct kobject *kobj, struct bin_attribute *attr,
+static ssize_t ds1682_eeprom_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client = kobj_to_i2c_client(kobj);
@@ -163,7 +164,8 @@ static ssize_t ds1682_eeprom_read(struct kobject *kobj, struct bin_attribute *at
return count;
}
-static ssize_t ds1682_eeprom_write(struct kobject *kobj, struct bin_attribute *attr,
+static ssize_t ds1682_eeprom_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client = kobj_to_i2c_client(kobj);
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index db7d0f21b65..f7ca3a42b49 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -54,7 +54,7 @@
struct at24_data {
struct at24_platform_data chip;
struct memory_accessor macc;
- bool use_smbus;
+ int use_smbus;
/*
* Lock protects against activities from other Linux tasks,
@@ -184,11 +184,19 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf,
if (count > io_limit)
count = io_limit;
- if (at24->use_smbus) {
+ switch (at24->use_smbus) {
+ case I2C_SMBUS_I2C_BLOCK_DATA:
/* Smaller eeproms can work given some SMBus extension calls */
if (count > I2C_SMBUS_BLOCK_MAX)
count = I2C_SMBUS_BLOCK_MAX;
- } else {
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ count = 2;
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ count = 1;
+ break;
+ default:
/*
* When we have a better choice than SMBus calls, use a
* combined I2C message. Write address; then read up to
@@ -219,10 +227,27 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf,
timeout = jiffies + msecs_to_jiffies(write_timeout);
do {
read_time = jiffies;
- if (at24->use_smbus) {
+ switch (at24->use_smbus) {
+ case I2C_SMBUS_I2C_BLOCK_DATA:
status = i2c_smbus_read_i2c_block_data(client, offset,
count, buf);
- } else {
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ status = i2c_smbus_read_word_data(client, offset);
+ if (status >= 0) {
+ buf[0] = status & 0xff;
+ buf[1] = status >> 8;
+ status = count;
+ }
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ status = i2c_smbus_read_byte_data(client, offset);
+ if (status >= 0) {
+ buf[0] = status;
+ status = count;
+ }
+ break;
+ default:
status = i2c_transfer(client->adapter, msg, 2);
if (status == 2)
status = count;
@@ -274,7 +299,8 @@ static ssize_t at24_read(struct at24_data *at24,
return retval;
}
-static ssize_t at24_bin_read(struct kobject *kobj, struct bin_attribute *attr,
+static ssize_t at24_bin_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct at24_data *at24;
@@ -395,7 +421,8 @@ static ssize_t at24_write(struct at24_data *at24, const char *buf, loff_t off,
return retval;
}
-static ssize_t at24_bin_write(struct kobject *kobj, struct bin_attribute *attr,
+static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct at24_data *at24;
@@ -434,7 +461,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct at24_platform_data chip;
bool writable;
- bool use_smbus = false;
+ int use_smbus = 0;
struct at24_data *at24;
int err;
unsigned i, num_addresses;
@@ -475,12 +502,19 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
err = -EPFNOSUPPORT;
goto err_out;
}
- if (!i2c_check_functionality(client->adapter,
+ if (i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
+ use_smbus = I2C_SMBUS_I2C_BLOCK_DATA;
+ } else if (i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_WORD_DATA)) {
+ use_smbus = I2C_SMBUS_WORD_DATA;
+ } else if (i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE_DATA)) {
+ use_smbus = I2C_SMBUS_BYTE_DATA;
+ } else {
err = -EPFNOSUPPORT;
goto err_out;
}
- use_smbus = true;
}
if (chip.flags & AT24_FLAG_TAKE8ADDR)
@@ -566,11 +600,16 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
dev_info(&client->dev, "%zu byte %s EEPROM %s\n",
at24->bin.size, client->name,
writable ? "(writable)" : "(read-only)");
+ if (use_smbus == I2C_SMBUS_WORD_DATA ||
+ use_smbus == I2C_SMBUS_BYTE_DATA) {
+ dev_notice(&client->dev, "Falling back to %s reads, "
+ "performance will suffer\n", use_smbus ==
+ I2C_SMBUS_WORD_DATA ? "word" : "byte");
+ }
dev_dbg(&client->dev,
- "page_size %d, num_addresses %d, write_max %d%s\n",
+ "page_size %d, num_addresses %d, write_max %d, use_smbus %d\n",
chip.page_size, num_addresses,
- at24->write_max,
- use_smbus ? ", use_smbus" : "");
+ at24->write_max, use_smbus);
/* export data to kernel code */
if (chip.setup)
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index d194212a41f..c627e4174cc 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -126,7 +126,8 @@ at25_ee_read(
}
static ssize_t
-at25_bin_read(struct kobject *kobj, struct bin_attribute *bin_attr,
+at25_bin_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev;
@@ -253,7 +254,8 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
}
static ssize_t
-at25_bin_write(struct kobject *kobj, struct bin_attribute *bin_attr,
+at25_bin_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev;
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
index f939ebc2507..45060ddc4e5 100644
--- a/drivers/misc/eeprom/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
@@ -1,24 +1,20 @@
/*
- Copyright (C) 1998, 1999 Frodo Looijaard <frodol@dds.nl> and
- Philip Edelbrock <phil@netroedge.com>
- Copyright (C) 2003 Greg Kroah-Hartman <greg@kroah.com>
- Copyright (C) 2003 IBM Corp.
- Copyright (C) 2004 Jean Delvare <khali@linux-fr.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
+ * Copyright (C) 1998, 1999 Frodo Looijaard <frodol@dds.nl> and
+ * Philip Edelbrock <phil@netroedge.com>
+ * Copyright (C) 2003 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (C) 2003 IBM Corp.
+ * Copyright (C) 2004 Jean Delvare <khali@linux-fr.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#include <linux/kernel.h>
#include <linux/init.h>
@@ -85,7 +81,8 @@ exit:
mutex_unlock(&data->update_lock);
}
-static ssize_t eeprom_read(struct kobject *kobj, struct bin_attribute *bin_attr,
+static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client = to_i2c_client(container_of(kobj, struct device, kobj));
diff --git a/drivers/misc/eeprom/eeprom_93cx6.c b/drivers/misc/eeprom/eeprom_93cx6.c
index 15b1780025c..7b33de95c4b 100644
--- a/drivers/misc/eeprom/eeprom_93cx6.c
+++ b/drivers/misc/eeprom/eeprom_93cx6.c
@@ -1,27 +1,20 @@
/*
- Copyright (C) 2004 - 2006 rt2x00 SourceForge Project
- <http://rt2x00.serialmonkey.com>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-/*
- Module: eeprom_93cx6
- Abstract: EEPROM reader routines for 93cx6 chipsets.
- Supported chipsets: 93c46 & 93c66.
+ * Copyright (C) 2004 - 2006 rt2x00 SourceForge Project
+ * <http://rt2x00.serialmonkey.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Module: eeprom_93cx6
+ * Abstract: EEPROM reader routines for 93cx6 chipsets.
+ * Supported chipsets: 93c46 & 93c66.
*/
#include <linux/kernel.h>
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c
index 5a6b2bce8ad..5653a3ce051 100644
--- a/drivers/misc/eeprom/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -1,30 +1,30 @@
/*
- max6875.c - driver for MAX6874/MAX6875
-
- Copyright (C) 2005 Ben Gardner <bgardner@wabtec.com>
-
- Based on eeprom.c
-
- The MAX6875 has a bank of registers and two banks of EEPROM.
- Address ranges are defined as follows:
- * 0x0000 - 0x0046 = configuration registers
- * 0x8000 - 0x8046 = configuration EEPROM
- * 0x8100 - 0x82FF = user EEPROM
-
- This driver makes the user EEPROM available for read.
-
- The registers & config EEPROM should be accessed via i2c-dev.
-
- The MAX6875 ignores the lowest address bit, so each chip responds to
- two addresses - 0x50/0x51 and 0x52/0x53.
-
- Note that the MAX6875 uses i2c_smbus_write_byte_data() to set the read
- address, so this driver is destructive if loaded for the wrong EEPROM chip.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-*/
+ * max6875.c - driver for MAX6874/MAX6875
+ *
+ * Copyright (C) 2005 Ben Gardner <bgardner@wabtec.com>
+ *
+ * Based on eeprom.c
+ *
+ * The MAX6875 has a bank of registers and two banks of EEPROM.
+ * Address ranges are defined as follows:
+ * * 0x0000 - 0x0046 = configuration registers
+ * * 0x8000 - 0x8046 = configuration EEPROM
+ * * 0x8100 - 0x82FF = user EEPROM
+ *
+ * This driver makes the user EEPROM available for read.
+ *
+ * The registers & config EEPROM should be accessed via i2c-dev.
+ *
+ * The MAX6875 ignores the lowest address bit, so each chip responds to
+ * two addresses - 0x50/0x51 and 0x52/0x53.
+ *
+ * Note that the MAX6875 uses i2c_smbus_write_byte_data() to set the read
+ * address, so this driver is destructive if loaded for the wrong EEPROM chip.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
#include <linux/kernel.h>
#include <linux/init.h>
@@ -107,7 +107,7 @@ exit_up:
mutex_unlock(&data->update_lock);
}
-static ssize_t max6875_read(struct kobject *kobj,
+static ssize_t max6875_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
diff --git a/drivers/misc/hdpuftrs/Makefile b/drivers/misc/hdpuftrs/Makefile
deleted file mode 100644
index ac74ae67923..00000000000
--- a/drivers/misc/hdpuftrs/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_HDPU_FEATURES) := hdpu_cpustate.o hdpu_nexus.o
diff --git a/drivers/misc/hdpuftrs/hdpu_cpustate.c b/drivers/misc/hdpuftrs/hdpu_cpustate.c
deleted file mode 100644
index 176fe4e09d3..00000000000
--- a/drivers/misc/hdpuftrs/hdpu_cpustate.c
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * Sky CPU State Driver
- *
- * Copyright (C) 2002 Brian Waite
- *
- * This driver allows use of the CPU state bits
- * It exports the /dev/sky_cpustate and also
- * /proc/sky_cpustate pseudo-file for status information.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/smp_lock.h>
-#include <linux/miscdevice.h>
-#include <linux/proc_fs.h>
-#include <linux/hdpu_features.h>
-#include <linux/platform_device.h>
-#include <asm/uaccess.h>
-#include <linux/seq_file.h>
-#include <asm/io.h>
-
-#define SKY_CPUSTATE_VERSION "1.1"
-
-static int hdpu_cpustate_probe(struct platform_device *pdev);
-static int hdpu_cpustate_remove(struct platform_device *pdev);
-
-static unsigned char cpustate_get_state(void);
-static int cpustate_proc_open(struct inode *inode, struct file *file);
-static int cpustate_proc_read(struct seq_file *seq, void *offset);
-
-static struct cpustate_t cpustate;
-
-static const struct file_operations proc_cpustate = {
- .open = cpustate_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int cpustate_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, cpustate_proc_read, NULL);
-}
-
-static int cpustate_proc_read(struct seq_file *seq, void *offset)
-{
- seq_printf(seq, "CPU State: %04x\n", cpustate_get_state());
- return 0;
-}
-
-static int cpustate_get_ref(int excl)
-{
-
- int retval = -EBUSY;
-
- spin_lock(&cpustate.lock);
-
- if (cpustate.excl)
- goto out_busy;
-
- if (excl) {
- if (cpustate.open_count)
- goto out_busy;
- cpustate.excl = 1;
- }
-
- cpustate.open_count++;
- retval = 0;
-
- out_busy:
- spin_unlock(&cpustate.lock);
- return retval;
-}
-
-static int cpustate_free_ref(void)
-{
-
- spin_lock(&cpustate.lock);
-
- cpustate.excl = 0;
- cpustate.open_count--;
-
- spin_unlock(&cpustate.lock);
- return 0;
-}
-
-static unsigned char cpustate_get_state(void)
-{
-
- return cpustate.cached_val;
-}
-
-static void cpustate_set_state(unsigned char new_state)
-{
- unsigned int state = (new_state << 21);
-
-#ifdef DEBUG_CPUSTATE
- printk("CPUSTATE -> 0x%x\n", new_state);
-#endif
- spin_lock(&cpustate.lock);
- cpustate.cached_val = new_state;
- writel((0xff << 21), cpustate.clr_addr);
- writel(state, cpustate.set_addr);
- spin_unlock(&cpustate.lock);
-}
-
-/*
- * Now all the various file operations that we export.
- */
-
-static ssize_t cpustate_read(struct file *file, char *buf,
- size_t count, loff_t * ppos)
-{
- unsigned char data;
-
- if (count < 0)
- return -EFAULT;
- if (count == 0)
- return 0;
-
- data = cpustate_get_state();
- if (copy_to_user(buf, &data, sizeof(unsigned char)))
- return -EFAULT;
- return sizeof(unsigned char);
-}
-
-static ssize_t cpustate_write(struct file *file, const char *buf,
- size_t count, loff_t * ppos)
-{
- unsigned char data;
-
- if (count < 0)
- return -EFAULT;
-
- if (count == 0)
- return 0;
-
- if (copy_from_user((unsigned char *)&data, buf, sizeof(unsigned char)))
- return -EFAULT;
-
- cpustate_set_state(data);
- return sizeof(unsigned char);
-}
-
-static int cpustate_open(struct inode *inode, struct file *file)
-{
- int ret;
-
- lock_kernel();
- ret = cpustate_get_ref((file->f_flags & O_EXCL));
- unlock_kernel();
-
- return ret;
-}
-
-static int cpustate_release(struct inode *inode, struct file *file)
-{
- return cpustate_free_ref();
-}
-
-static struct platform_driver hdpu_cpustate_driver = {
- .probe = hdpu_cpustate_probe,
- .remove = hdpu_cpustate_remove,
- .driver = {
- .name = HDPU_CPUSTATE_NAME,
- .owner = THIS_MODULE,
- },
-};
-
-/*
- * The various file operations we support.
- */
-static const struct file_operations cpustate_fops = {
- .owner = THIS_MODULE,
- .open = cpustate_open,
- .release = cpustate_release,
- .read = cpustate_read,
- .write = cpustate_write,
- .llseek = no_llseek,
-};
-
-static struct miscdevice cpustate_dev = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "sky_cpustate",
- .fops = &cpustate_fops,
-};
-
-static int hdpu_cpustate_probe(struct platform_device *pdev)
-{
- struct resource *res;
- struct proc_dir_entry *proc_de;
- int ret;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- printk(KERN_ERR "sky_cpustate: "
- "Invalid memory resource.\n");
- return -EINVAL;
- }
- cpustate.set_addr = (unsigned long *)res->start;
- cpustate.clr_addr = (unsigned long *)res->end - 1;
-
- ret = misc_register(&cpustate_dev);
- if (ret) {
- printk(KERN_WARNING "sky_cpustate: "
- "Unable to register misc device.\n");
- cpustate.set_addr = NULL;
- cpustate.clr_addr = NULL;
- return ret;
- }
-
- proc_de = proc_create("sky_cpustate", 0666, NULL, &proc_cpustate);
- if (!proc_de) {
- printk(KERN_WARNING "sky_cpustate: "
- "Unable to create proc entry\n");
- }
-
- printk(KERN_INFO "Sky CPU State Driver v" SKY_CPUSTATE_VERSION "\n");
- return 0;
-}
-
-static int hdpu_cpustate_remove(struct platform_device *pdev)
-{
- cpustate.set_addr = NULL;
- cpustate.clr_addr = NULL;
-
- remove_proc_entry("sky_cpustate", NULL);
- misc_deregister(&cpustate_dev);
-
- return 0;
-}
-
-static int __init cpustate_init(void)
-{
- return platform_driver_register(&hdpu_cpustate_driver);
-}
-
-static void __exit cpustate_exit(void)
-{
- platform_driver_unregister(&hdpu_cpustate_driver);
-}
-
-module_init(cpustate_init);
-module_exit(cpustate_exit);
-
-MODULE_AUTHOR("Brian Waite");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" HDPU_CPUSTATE_NAME);
diff --git a/drivers/misc/hdpuftrs/hdpu_nexus.c b/drivers/misc/hdpuftrs/hdpu_nexus.c
deleted file mode 100644
index ce39fa54949..00000000000
--- a/drivers/misc/hdpuftrs/hdpu_nexus.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Sky Nexus Register Driver
- *
- * Copyright (C) 2002 Brian Waite
- *
- * This driver allows reading the Nexus register
- * It exports the /proc/sky_chassis_id and also
- * /proc/sky_slot_id pseudo-file for status information.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/proc_fs.h>
-#include <linux/hdpu_features.h>
-#include <linux/platform_device.h>
-#include <linux/seq_file.h>
-#include <asm/io.h>
-
-static int hdpu_nexus_probe(struct platform_device *pdev);
-static int hdpu_nexus_remove(struct platform_device *pdev);
-static int hdpu_slot_id_open(struct inode *inode, struct file *file);
-static int hdpu_slot_id_read(struct seq_file *seq, void *offset);
-static int hdpu_chassis_id_open(struct inode *inode, struct file *file);
-static int hdpu_chassis_id_read(struct seq_file *seq, void *offset);
-
-static struct proc_dir_entry *hdpu_slot_id;
-static struct proc_dir_entry *hdpu_chassis_id;
-static int slot_id = -1;
-static int chassis_id = -1;
-
-static const struct file_operations proc_slot_id = {
- .open = hdpu_slot_id_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static const struct file_operations proc_chassis_id = {
- .open = hdpu_chassis_id_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static struct platform_driver hdpu_nexus_driver = {
- .probe = hdpu_nexus_probe,
- .remove = hdpu_nexus_remove,
- .driver = {
- .name = HDPU_NEXUS_NAME,
- .owner = THIS_MODULE,
- },
-};
-
-static int hdpu_slot_id_open(struct inode *inode, struct file *file)
-{
- return single_open(file, hdpu_slot_id_read, NULL);
-}
-
-static int hdpu_slot_id_read(struct seq_file *seq, void *offset)
-{
- seq_printf(seq, "%d\n", slot_id);
- return 0;
-}
-
-static int hdpu_chassis_id_open(struct inode *inode, struct file *file)
-{
- return single_open(file, hdpu_chassis_id_read, NULL);
-}
-
-static int hdpu_chassis_id_read(struct seq_file *seq, void *offset)
-{
- seq_printf(seq, "%d\n", chassis_id);
- return 0;
-}
-
-static int hdpu_nexus_probe(struct platform_device *pdev)
-{
- struct resource *res;
- int *nexus_id_addr;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- printk(KERN_ERR "sky_nexus: "
- "Invalid memory resource.\n");
- return -EINVAL;
- }
- nexus_id_addr = ioremap(res->start,
- (unsigned long)(res->end - res->start));
- if (nexus_id_addr) {
- slot_id = (*nexus_id_addr >> 8) & 0x1f;
- chassis_id = *nexus_id_addr & 0xff;
- iounmap(nexus_id_addr);
- } else {
- printk(KERN_ERR "sky_nexus: Could not map slot id\n");
- }
-
- hdpu_slot_id = proc_create("sky_slot_id", 0666, NULL, &proc_slot_id);
- if (!hdpu_slot_id) {
- printk(KERN_WARNING "sky_nexus: "
- "Unable to create proc dir entry: sky_slot_id\n");
- }
-
- hdpu_chassis_id = proc_create("sky_chassis_id", 0666, NULL,
- &proc_chassis_id);
- if (!hdpu_chassis_id)
- printk(KERN_WARNING "sky_nexus: "
- "Unable to create proc dir entry: sky_chassis_id\n");
-
- return 0;
-}
-
-static int hdpu_nexus_remove(struct platform_device *pdev)
-{
- slot_id = -1;
- chassis_id = -1;
-
- remove_proc_entry("sky_slot_id", NULL);
- remove_proc_entry("sky_chassis_id", NULL);
-
- hdpu_slot_id = 0;
- hdpu_chassis_id = 0;
-
- return 0;
-}
-
-static int __init nexus_init(void)
-{
- return platform_driver_register(&hdpu_nexus_driver);
-}
-
-static void __exit nexus_exit(void)
-{
- platform_driver_unregister(&hdpu_nexus_driver);
-}
-
-module_init(nexus_init);
-module_exit(nexus_exit);
-
-MODULE_AUTHOR("Brian Waite");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" HDPU_NEXUS_NAME);
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 31a991161f0..5bfb2a2041b 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -75,6 +75,9 @@ enum ctype {
UNALIGNED_LOAD_STORE_WRITE,
OVERWRITE_ALLOCATION,
WRITE_AFTER_FREE,
+ SOFTLOCKUP,
+ HARDLOCKUP,
+ HUNG_TASK,
};
static char* cp_name[] = {
@@ -99,6 +102,9 @@ static char* cp_type[] = {
"UNALIGNED_LOAD_STORE_WRITE",
"OVERWRITE_ALLOCATION",
"WRITE_AFTER_FREE",
+ "SOFTLOCKUP",
+ "HARDLOCKUP",
+ "HUNG_TASK",
};
static struct jprobe lkdtm;
@@ -320,6 +326,20 @@ static void lkdtm_do_action(enum ctype which)
memset(data, 0x78, len);
break;
}
+ case SOFTLOCKUP:
+ preempt_disable();
+ for (;;)
+ cpu_relax();
+ break;
+ case HARDLOCKUP:
+ local_irq_disable();
+ for (;;)
+ cpu_relax();
+ break;
+ case HUNG_TASK:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule();
+ break;
case NONE:
default:
break;
diff --git a/drivers/misc/vmware_balloon.c b/drivers/misc/vmware_balloon.c
index e7161c4e379..db9cd0240c6 100644
--- a/drivers/misc/vmware_balloon.c
+++ b/drivers/misc/vmware_balloon.c
@@ -41,7 +41,7 @@
#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#include <asm/vmware.h>
+#include <asm/hypervisor.h>
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
@@ -767,7 +767,7 @@ static int __init vmballoon_init(void)
* Check if we are running on VMware's hypervisor and bail out
* if we are not.
*/
- if (!vmware_platform())
+ if (x86_hyper != &x86_hyper_vmware)
return -ENODEV;
vmballoon_wq = create_freezeable_workqueue("vmmemctl");
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 3168ebd616b..569e94da844 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1252,9 +1252,8 @@ EXPORT_SYMBOL(mmc_card_can_sleep);
/**
* mmc_suspend_host - suspend a host
* @host: mmc host
- * @state: suspend mode (PM_SUSPEND_xxx)
*/
-int mmc_suspend_host(struct mmc_host *host, pm_message_t state)
+int mmc_suspend_host(struct mmc_host *host)
{
int err = 0;
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index 0d96080d44b..63772e7e760 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -79,8 +79,6 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
* we cannot use the retries field in mmc_command.
*/
for (i = 0;i <= retries;i++) {
- memset(&mrq, 0, sizeof(struct mmc_request));
-
err = mmc_app_cmd(host, card);
if (err) {
/* no point in retrying; no APP commands allowed */
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 2dd4cfe7ca1..b9dee28ee7d 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -296,6 +296,12 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
card->type = MMC_TYPE_SDIO;
/*
+ * Call the optional HC's init_card function to handle quirks.
+ */
+ if (host->ops->init_card)
+ host->ops->init_card(host, card);
+
+ /*
* For native busses: set card RCA and quit open drain mode.
*/
if (!powered_resume && !mmc_host_is_spi(host)) {
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index ff27c8c7135..0f687cdeb06 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -406,6 +406,36 @@ void sdio_writeb(struct sdio_func *func, u8 b, unsigned int addr, int *err_ret)
EXPORT_SYMBOL_GPL(sdio_writeb);
/**
+ * sdio_writeb_readb - write and read a byte from SDIO function
+ * @func: SDIO function to access
+ * @write_byte: byte to write
+ * @addr: address to write to
+ * @err_ret: optional status value from transfer
+ *
+ * Performs a RAW (Read after Write) operation as defined by SDIO spec -
+ * single byte is written to address space of a given SDIO function and
+ * response is read back from the same address, both using single request.
+ * If there is a problem with the operation, 0xff is returned and
+ * @err_ret will contain the error code.
+ */
+u8 sdio_writeb_readb(struct sdio_func *func, u8 write_byte,
+ unsigned int addr, int *err_ret)
+{
+ int ret;
+ u8 val;
+
+ ret = mmc_io_rw_direct(func->card, 1, func->num, addr,
+ write_byte, &val);
+ if (err_ret)
+ *err_ret = ret;
+ if (ret)
+ val = 0xff;
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(sdio_writeb_readb);
+
+/**
* sdio_memcpy_fromio - read a chunk of memory from a SDIO function
* @func: SDIO function to access
* @dst: buffer to store the data
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 2e13b94769f..e171e77f612 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -136,6 +136,18 @@ config MMC_SDHCI_S3C
If unsure, say N.
+config MMC_SDHCI_SPEAR
+ tristate "SDHCI support on ST SPEAr platform"
+ depends on MMC_SDHCI && PLAT_SPEAR
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ often referrered to as the HSMMC block in some of the ST SPEAR range
+ of SoC
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_SDHCI_S3C_DMA
bool "DMA support on S3C SDHCI"
depends on MMC_SDHCI_S3C && EXPERIMENTAL
@@ -412,3 +424,11 @@ config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
depends on SDH_BFIN
help
If you say yes here SD-Cards may work on the EZkit.
+
+config MMC_SH_MMCIF
+ tristate "SuperH Internal MMCIF support"
+ depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE)
+ help
+ This selects the MMC Host Interface controler (MMCIF).
+
+ This driver supports MMCIF in sh7724/sh7757/sh7372.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index f4803977dfc..e30c2ee4889 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
+obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
obj-$(CONFIG_MMC_WBSD) += wbsd.o
obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
obj-$(CONFIG_MMC_OMAP) += omap.o
@@ -34,6 +35,7 @@ obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
+obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
sdhci-of-y := sdhci-of-core.o
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 336d9f553f3..5f3a599ead0 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -1157,7 +1157,7 @@ static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
enable_irq_wake(host->board->det_pin);
if (mmc)
- ret = mmc_suspend_host(mmc, state);
+ ret = mmc_suspend_host(mmc);
return ret;
}
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index fb279f4ed8b..95ef864ad8f 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -173,6 +173,7 @@ struct atmel_mci {
* @mmc: The mmc_host representing this slot.
* @host: The MMC controller this slot is using.
* @sdc_reg: Value of SDCR to be written before using this slot.
+ * @sdio_irq: SDIO irq mask for this slot.
* @mrq: mmc_request currently being processed or waiting to be
* processed, or NULL when the slot is idle.
* @queue_node: List node for placing this node in the @queue list of
@@ -191,6 +192,7 @@ struct atmel_mci_slot {
struct atmel_mci *host;
u32 sdc_reg;
+ u32 sdio_irq;
struct mmc_request *mrq;
struct list_head queue_node;
@@ -580,7 +582,7 @@ static void atmci_stop_dma(struct atmel_mci *host)
struct dma_chan *chan = host->data_chan;
if (chan) {
- chan->device->device_terminate_all(chan);
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
atmci_dma_cleanup(host);
} else {
/* Data transfer was stopped by the interrupt handler */
@@ -792,7 +794,7 @@ static void atmci_start_request(struct atmel_mci *host,
mci_writel(host, SDCR, slot->sdc_reg);
iflags = mci_readl(host, IMR);
- if (iflags)
+ if (iflags & ~(MCI_SDIOIRQA | MCI_SDIOIRQB))
dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
iflags);
@@ -952,10 +954,21 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (mci_has_rwproof())
host->mode_reg |= (MCI_MR_WRPROOF | MCI_MR_RDPROOF);
- if (list_empty(&host->queue))
+ if (atmci_is_mci2()) {
+ /* setup High Speed mode in relation with card capacity */
+ if (ios->timing == MMC_TIMING_SD_HS)
+ host->cfg_reg |= MCI_CFG_HSMODE;
+ else
+ host->cfg_reg &= ~MCI_CFG_HSMODE;
+ }
+
+ if (list_empty(&host->queue)) {
mci_writel(host, MR, host->mode_reg);
- else
+ if (atmci_is_mci2())
+ mci_writel(host, CFG, host->cfg_reg);
+ } else {
host->need_clock_update = true;
+ }
spin_unlock_bh(&host->lock);
} else {
@@ -1030,11 +1043,23 @@ static int atmci_get_cd(struct mmc_host *mmc)
return present;
}
+static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct atmel_mci_slot *slot = mmc_priv(mmc);
+ struct atmel_mci *host = slot->host;
+
+ if (enable)
+ mci_writel(host, IER, slot->sdio_irq);
+ else
+ mci_writel(host, IDR, slot->sdio_irq);
+}
+
static const struct mmc_host_ops atmci_ops = {
.request = atmci_request,
.set_ios = atmci_set_ios,
.get_ro = atmci_get_ro,
.get_cd = atmci_get_cd,
+ .enable_sdio_irq = atmci_enable_sdio_irq,
};
/* Called with host->lock held */
@@ -1052,8 +1077,11 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
* necessary if set_ios() is called when a different slot is
* busy transfering data.
*/
- if (host->need_clock_update)
+ if (host->need_clock_update) {
mci_writel(host, MR, host->mode_reg);
+ if (atmci_is_mci2())
+ mci_writel(host, CFG, host->cfg_reg);
+ }
host->cur_slot->mrq = NULL;
host->mrq = NULL;
@@ -1483,6 +1511,19 @@ static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status)
tasklet_schedule(&host->tasklet);
}
+static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
+{
+ int i;
+
+ for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
+ struct atmel_mci_slot *slot = host->slot[i];
+ if (slot && (status & slot->sdio_irq)) {
+ mmc_signal_sdio_irq(slot->mmc);
+ }
+ }
+}
+
+
static irqreturn_t atmci_interrupt(int irq, void *dev_id)
{
struct atmel_mci *host = dev_id;
@@ -1522,6 +1563,10 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
if (pending & MCI_CMDRDY)
atmci_cmd_interrupt(host, status);
+
+ if (pending & (MCI_SDIOIRQA | MCI_SDIOIRQB))
+ atmci_sdio_interrupt(host, status);
+
} while (pass_count++ < 5);
return pass_count ? IRQ_HANDLED : IRQ_NONE;
@@ -1544,7 +1589,7 @@ static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
static int __init atmci_init_slot(struct atmel_mci *host,
struct mci_slot_pdata *slot_data, unsigned int id,
- u32 sdc_reg)
+ u32 sdc_reg, u32 sdio_irq)
{
struct mmc_host *mmc;
struct atmel_mci_slot *slot;
@@ -1560,11 +1605,16 @@ static int __init atmci_init_slot(struct atmel_mci *host,
slot->wp_pin = slot_data->wp_pin;
slot->detect_is_active_high = slot_data->detect_is_active_high;
slot->sdc_reg = sdc_reg;
+ slot->sdio_irq = sdio_irq;
mmc->ops = &atmci_ops;
mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
mmc->f_max = host->bus_hz / 2;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ if (sdio_irq)
+ mmc->caps |= MMC_CAP_SDIO_IRQ;
+ if (atmci_is_mci2())
+ mmc->caps |= MMC_CAP_SD_HIGHSPEED;
if (slot_data->bus_width >= 4)
mmc->caps |= MMC_CAP_4_BIT_DATA;
@@ -1753,13 +1803,13 @@ static int __init atmci_probe(struct platform_device *pdev)
ret = -ENODEV;
if (pdata->slot[0].bus_width) {
ret = atmci_init_slot(host, &pdata->slot[0],
- 0, MCI_SDCSEL_SLOT_A);
+ 0, MCI_SDCSEL_SLOT_A, MCI_SDIOIRQA);
if (!ret)
nr_slots++;
}
if (pdata->slot[1].bus_width) {
ret = atmci_init_slot(host, &pdata->slot[1],
- 1, MCI_SDCSEL_SLOT_B);
+ 1, MCI_SDCSEL_SLOT_B, MCI_SDIOIRQB);
if (!ret)
nr_slots++;
}
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index f5834449400..c8da5d30a86 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -1142,7 +1142,7 @@ static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state)
struct au1xmmc_host *host = platform_get_drvdata(pdev);
int ret;
- ret = mmc_suspend_host(host->mmc, state);
+ ret = mmc_suspend_host(host->mmc);
if (ret)
return ret;
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index 6919e844072..4b0e677d729 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -576,7 +576,7 @@ static int sdh_suspend(struct platform_device *dev, pm_message_t state)
int ret = 0;
if (mmc)
- ret = mmc_suspend_host(mmc, state);
+ ret = mmc_suspend_host(mmc);
bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON);
peripheral_free_list(drv_data->pin_req);
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index 92a324f7417..ca3bdc83190 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -675,7 +675,7 @@ static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state)
struct mmc_host *mmc = cb710_slot_to_mmc(slot);
int err;
- err = mmc_suspend_host(mmc, state);
+ err = mmc_suspend_host(mmc);
if (err)
return err;
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 3bd0ba294e9..33d9f1b0086 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -137,15 +137,15 @@
/*
* One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units,
- * and we handle up to NR_SG segments. MMC_BLOCK_BOUNCE kicks in only
+ * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only
* for drivers with max_hw_segs == 1, making the segments bigger (64KB)
- * than the page or two that's otherwise typical. NR_SG == 16 gives at
- * least the same throughput boost, using EDMA transfer linkage instead
- * of spending CPU time copying pages.
+ * than the page or two that's otherwise typical. nr_sg (passed from
+ * platform data) == 16 gives at least the same throughput boost, using
+ * EDMA transfer linkage instead of spending CPU time copying pages.
*/
#define MAX_CCNT ((1 << 16) - 1)
-#define NR_SG 16
+#define MAX_NR_SG 16
static unsigned rw_threshold = 32;
module_param(rw_threshold, uint, S_IRUGO);
@@ -171,6 +171,7 @@ struct mmc_davinci_host {
#define DAVINCI_MMC_DATADIR_READ 1
#define DAVINCI_MMC_DATADIR_WRITE 2
unsigned char data_dir;
+ unsigned char suspended;
/* buffer is used during PIO of one scatterlist segment, and
* is updated along with buffer_bytes_left. bytes_left applies
@@ -192,7 +193,7 @@ struct mmc_davinci_host {
struct edmacc_param tx_template;
struct edmacc_param rx_template;
unsigned n_link;
- u32 links[NR_SG - 1];
+ u32 links[MAX_NR_SG - 1];
/* For PIO we walk scatterlists one segment at a time. */
unsigned int sg_len;
@@ -202,6 +203,8 @@ struct mmc_davinci_host {
u8 version;
/* for ns in one cycle calculation */
unsigned ns_in_one_cycle;
+ /* Number of sg segments */
+ u8 nr_sg;
#ifdef CONFIG_CPU_FREQ
struct notifier_block freq_transition;
#endif
@@ -568,6 +571,7 @@ davinci_release_dma_channels(struct mmc_davinci_host *host)
static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
{
+ u32 link_size;
int r, i;
/* Acquire master DMA write channel */
@@ -593,7 +597,8 @@ static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
/* Allocate parameter RAM slots, which will later be bound to a
* channel as needed to handle a scatterlist.
*/
- for (i = 0; i < ARRAY_SIZE(host->links); i++) {
+ link_size = min_t(unsigned, host->nr_sg, ARRAY_SIZE(host->links));
+ for (i = 0; i < link_size; i++) {
r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY);
if (r < 0) {
dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n",
@@ -905,19 +910,26 @@ static void mmc_davinci_cmd_done(struct mmc_davinci_host *host,
}
}
-static void
-davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
+static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host,
+ int val)
{
u32 temp;
- /* reset command and data state machines */
temp = readl(host->base + DAVINCI_MMCCTL);
- writel(temp | MMCCTL_CMDRST | MMCCTL_DATRST,
- host->base + DAVINCI_MMCCTL);
+ if (val) /* reset */
+ temp |= MMCCTL_CMDRST | MMCCTL_DATRST;
+ else /* enable */
+ temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
- temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
- udelay(10);
writel(temp, host->base + DAVINCI_MMCCTL);
+ udelay(10);
+}
+
+static void
+davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
+{
+ mmc_davinci_reset_ctrl(host, 1);
+ mmc_davinci_reset_ctrl(host, 0);
}
static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
@@ -1121,15 +1133,8 @@ static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
#endif
static void __init init_mmcsd_host(struct mmc_davinci_host *host)
{
- /* DAT line portion is diabled and in reset state */
- writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_DATRST,
- host->base + DAVINCI_MMCCTL);
-
- /* CMD line portion is diabled and in reset state */
- writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_CMDRST,
- host->base + DAVINCI_MMCCTL);
- udelay(10);
+ mmc_davinci_reset_ctrl(host, 1);
writel(0, host->base + DAVINCI_MMCCLK);
writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
@@ -1137,12 +1142,7 @@ static void __init init_mmcsd_host(struct mmc_davinci_host *host)
writel(0x1FFF, host->base + DAVINCI_MMCTOR);
writel(0xFFFF, host->base + DAVINCI_MMCTOD);
- writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_DATRST,
- host->base + DAVINCI_MMCCTL);
- writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_CMDRST,
- host->base + DAVINCI_MMCCTL);
-
- udelay(10);
+ mmc_davinci_reset_ctrl(host, 0);
}
static int __init davinci_mmcsd_probe(struct platform_device *pdev)
@@ -1202,6 +1202,12 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
init_mmcsd_host(host);
+ if (pdata->nr_sg)
+ host->nr_sg = pdata->nr_sg - 1;
+
+ if (host->nr_sg > MAX_NR_SG || !host->nr_sg)
+ host->nr_sg = MAX_NR_SG;
+
host->use_dma = use_dma;
host->irq = irq;
@@ -1327,32 +1333,65 @@ static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int davinci_mmcsd_suspend(struct platform_device *pdev, pm_message_t msg)
+static int davinci_mmcsd_suspend(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct mmc_davinci_host *host = platform_get_drvdata(pdev);
+ int ret;
- return mmc_suspend_host(host->mmc, msg);
+ mmc_host_enable(host->mmc);
+ ret = mmc_suspend_host(host->mmc);
+ if (!ret) {
+ writel(0, host->base + DAVINCI_MMCIM);
+ mmc_davinci_reset_ctrl(host, 1);
+ mmc_host_disable(host->mmc);
+ clk_disable(host->clk);
+ host->suspended = 1;
+ } else {
+ host->suspended = 0;
+ mmc_host_disable(host->mmc);
+ }
+
+ return ret;
}
-static int davinci_mmcsd_resume(struct platform_device *pdev)
+static int davinci_mmcsd_resume(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct mmc_davinci_host *host = platform_get_drvdata(pdev);
+ int ret;
+
+ if (!host->suspended)
+ return 0;
- return mmc_resume_host(host->mmc);
+ clk_enable(host->clk);
+ mmc_host_enable(host->mmc);
+
+ mmc_davinci_reset_ctrl(host, 0);
+ ret = mmc_resume_host(host->mmc);
+ if (!ret)
+ host->suspended = 0;
+
+ return ret;
}
+
+static const struct dev_pm_ops davinci_mmcsd_pm = {
+ .suspend = davinci_mmcsd_suspend,
+ .resume = davinci_mmcsd_resume,
+};
+
+#define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm)
#else
-#define davinci_mmcsd_suspend NULL
-#define davinci_mmcsd_resume NULL
+#define davinci_mmcsd_pm_ops NULL
#endif
static struct platform_driver davinci_mmcsd_driver = {
.driver = {
.name = "davinci_mmc",
.owner = THIS_MODULE,
+ .pm = davinci_mmcsd_pm_ops,
},
.remove = __exit_p(davinci_mmcsd_remove),
- .suspend = davinci_mmcsd_suspend,
- .resume = davinci_mmcsd_resume,
};
static int __init davinci_mmcsd_init(void)
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index bf98d7cc928..9a68ff4353a 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -1115,7 +1115,7 @@ static int imxmci_suspend(struct platform_device *dev, pm_message_t state)
int ret = 0;
if (mmc)
- ret = mmc_suspend_host(mmc, state);
+ ret = mmc_suspend_host(mmc);
return ret;
}
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 84c103a7ee1..4917af96bae 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -55,14 +55,16 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
host->cclk = host->mclk / (2 * (clk + 1));
}
if (host->hw_designer == AMBA_VENDOR_ST)
- clk |= MCI_FCEN; /* Bug fix in ST IP block */
+ clk |= MCI_ST_FCEN; /* Bug fix in ST IP block */
clk |= MCI_CLK_ENABLE;
/* This hasn't proven to be worthwhile */
/* clk |= MCI_CLK_PWRSAVE; */
}
if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
- clk |= MCI_WIDE_BUS;
+ clk |= MCI_4BIT_BUS;
+ if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
+ clk |= MCI_ST_8BIT_BUS;
writel(clk, host->base + MMCICLOCK);
}
@@ -629,7 +631,18 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
mmc->ops = &mmci_ops;
mmc->f_min = (host->mclk + 511) / 512;
- mmc->f_max = min(host->mclk, fmax);
+ /*
+ * If the platform data supplies a maximum operating
+ * frequency, this takes precedence. Else, we fall back
+ * to using the module parameter, which has a (low)
+ * default value in case it is not specified. Either
+ * value must not exceed the clock rate into the block,
+ * of course.
+ */
+ if (plat->f_max)
+ mmc->f_max = min(host->mclk, plat->f_max);
+ else
+ mmc->f_max = min(host->mclk, fmax);
dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
#ifdef CONFIG_REGULATOR
@@ -811,7 +824,7 @@ static int mmci_suspend(struct amba_device *dev, pm_message_t state)
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
- ret = mmc_suspend_host(mmc, state);
+ ret = mmc_suspend_host(mmc);
if (ret == 0)
writel(0, host->base + MMCIMASK0);
}
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 1ceb9a90f59..d77062e5e3a 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -25,9 +25,11 @@
#define MCI_CLK_ENABLE (1 << 8)
#define MCI_CLK_PWRSAVE (1 << 9)
#define MCI_CLK_BYPASS (1 << 10)
-#define MCI_WIDE_BUS (1 << 11)
+#define MCI_4BIT_BUS (1 << 11)
+/* 8bit wide buses supported in ST Micro versions */
+#define MCI_ST_8BIT_BUS (1 << 12)
/* HW flow control on the ST Micro version */
-#define MCI_FCEN (1 << 13)
+#define MCI_ST_FCEN (1 << 13)
#define MMCIARGUMENT 0x008
#define MMCICOMMAND 0x00c
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 04ae884383f..24e09454e52 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2007 Google Inc,
* Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
+ * Copyright (C) 2009, Code Aurora Forum. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -26,6 +27,7 @@
#include <linux/log2.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/sdio.h>
#include <linux/clk.h>
#include <linux/scatterlist.h>
#include <linux/platform_device.h>
@@ -47,6 +49,8 @@
#define DRIVER_NAME "msm-sdcc"
+#define BUSCLK_PWRSAVE 1
+#define BUSCLK_TIMEOUT (HZ)
static unsigned int msmsdcc_fmin = 144000;
static unsigned int msmsdcc_fmax = 50000000;
static unsigned int msmsdcc_4bit = 1;
@@ -57,6 +61,67 @@ static unsigned int msmsdcc_sdioirq;
#define PIO_SPINMAX 30
#define CMD_SPINMAX 20
+
+static inline void
+msmsdcc_disable_clocks(struct msmsdcc_host *host, int deferr)
+{
+ WARN_ON(!host->clks_on);
+
+ BUG_ON(host->curr.mrq);
+
+ if (deferr) {
+ mod_timer(&host->busclk_timer, jiffies + BUSCLK_TIMEOUT);
+ } else {
+ del_timer_sync(&host->busclk_timer);
+ /* Need to check clks_on again in case the busclk
+ * timer fired
+ */
+ if (host->clks_on) {
+ clk_disable(host->clk);
+ clk_disable(host->pclk);
+ host->clks_on = 0;
+ }
+ }
+}
+
+static inline int
+msmsdcc_enable_clocks(struct msmsdcc_host *host)
+{
+ int rc;
+
+ del_timer_sync(&host->busclk_timer);
+
+ if (!host->clks_on) {
+ rc = clk_enable(host->pclk);
+ if (rc)
+ return rc;
+ rc = clk_enable(host->clk);
+ if (rc) {
+ clk_disable(host->pclk);
+ return rc;
+ }
+ udelay(1 + ((3 * USEC_PER_SEC) /
+ (host->clk_rate ? host->clk_rate : msmsdcc_fmin)));
+ host->clks_on = 1;
+ }
+ return 0;
+}
+
+static inline unsigned int
+msmsdcc_readl(struct msmsdcc_host *host, unsigned int reg)
+{
+ return readl(host->base + reg);
+}
+
+static inline void
+msmsdcc_writel(struct msmsdcc_host *host, u32 data, unsigned int reg)
+{
+ writel(data, host->base + reg);
+ /* 3 clk delay required! */
+ udelay(1 + ((3 * USEC_PER_SEC) /
+ (host->clk_rate ? host->clk_rate : msmsdcc_fmin)));
+}
+
static void
msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd,
u32 c);
@@ -64,8 +129,6 @@ msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd,
static void
msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
{
- writel(0, host->base + MMCICOMMAND);
-
BUG_ON(host->curr.data);
host->curr.mrq = NULL;
@@ -76,6 +139,9 @@ msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
if (mrq->cmd->error == -ETIMEDOUT)
mdelay(5);
+#if BUSCLK_PWRSAVE
+ msmsdcc_disable_clocks(host, 1);
+#endif
/*
* Need to drop the host lock here; mmc_request_done may call
* back into the driver...
@@ -88,7 +154,6 @@ msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
static void
msmsdcc_stop_data(struct msmsdcc_host *host)
{
- writel(0, host->base + MMCIDATACTRL);
host->curr.data = NULL;
host->curr.got_dataend = host->curr.got_datablkend = 0;
}
@@ -109,6 +174,31 @@ uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host)
return 0;
}
+static inline void
+msmsdcc_start_command_exec(struct msmsdcc_host *host, u32 arg, u32 c) {
+ msmsdcc_writel(host, arg, MMCIARGUMENT);
+ msmsdcc_writel(host, c, MMCICOMMAND);
+}
+
+static void
+msmsdcc_dma_exec_func(struct msm_dmov_cmd *cmd)
+{
+ struct msmsdcc_host *host = (struct msmsdcc_host *)cmd->data;
+
+ msmsdcc_writel(host, host->cmd_timeout, MMCIDATATIMER);
+ msmsdcc_writel(host, (unsigned int)host->curr.xfer_size,
+ MMCIDATALENGTH);
+ msmsdcc_writel(host, host->cmd_pio_irqmask, MMCIMASK1);
+ msmsdcc_writel(host, host->cmd_datactrl, MMCIDATACTRL);
+
+ if (host->cmd_cmd) {
+ msmsdcc_start_command_exec(host,
+ (u32) host->cmd_cmd->arg,
+ (u32) host->cmd_c);
+ }
+ host->dma.active = 1;
+}
+
static void
msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
unsigned int result,
@@ -121,8 +211,11 @@ msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
struct mmc_request *mrq;
spin_lock_irqsave(&host->lock, flags);
+ host->dma.active = 0;
+
mrq = host->curr.mrq;
BUG_ON(!mrq);
+ WARN_ON(!mrq->data);
if (!(result & DMOV_RSLT_VALID)) {
pr_err("msmsdcc: Invalid DataMover result\n");
@@ -146,7 +239,6 @@ msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
if (!mrq->data->error)
mrq->data->error = -EIO;
}
- host->dma.busy = 0;
dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents,
host->dma.dir);
@@ -159,6 +251,7 @@ msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
}
host->dma.sg = NULL;
+ host->dma.busy = 0;
if ((host->curr.got_dataend && host->curr.got_datablkend)
|| mrq->data->error) {
@@ -172,12 +265,14 @@ msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
if (!mrq->data->error)
host->curr.data_xfered = host->curr.xfer_size;
if (!mrq->data->stop || mrq->cmd->error) {
- writel(0, host->base + MMCICOMMAND);
host->curr.mrq = NULL;
host->curr.cmd = NULL;
mrq->data->bytes_xfered = host->curr.data_xfered;
spin_unlock_irqrestore(&host->lock, flags);
+#if BUSCLK_PWRSAVE
+ msmsdcc_disable_clocks(host, 1);
+#endif
mmc_request_done(host->mmc, mrq);
return;
} else
@@ -218,6 +313,8 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
host->dma.sg = data->sg;
host->dma.num_ents = data->sg_len;
+ BUG_ON(host->dma.num_ents > NR_SG); /* Prevent memory corruption */
+
nc = host->dma.nc;
switch (host->pdev_id) {
@@ -246,22 +343,15 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
host->curr.user_pages = 0;
- n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
- host->dma.num_ents, host->dma.dir);
-
- if (n != host->dma.num_ents) {
- pr_err("%s: Unable to map in all sg elements\n",
- mmc_hostname(host->mmc));
- host->dma.sg = NULL;
- host->dma.num_ents = 0;
- return -ENOMEM;
- }
-
box = &nc->cmd[0];
for (i = 0; i < host->dma.num_ents; i++) {
box->cmd = CMD_MODE_BOX;
- if (i == (host->dma.num_ents - 1))
+ /* Initialize sg dma address */
+ sg->dma_address = page_to_dma(mmc_dev(host->mmc), sg_page(sg))
+ + sg->offset;
+
+ if (i == (host->dma.num_ents - 1))
box->cmd |= CMD_LC;
rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ?
(sg_dma_len(sg) / MCI_FIFOSIZE) + 1 :
@@ -300,15 +390,70 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
+ n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
+ host->dma.num_ents, host->dma.dir);
+/* dsb inside dma_map_sg will write nc out to mem as well */
+
+ if (n != host->dma.num_ents) {
+ printk(KERN_ERR "%s: Unable to map in all sg elements\n",
+ mmc_hostname(host->mmc));
+ host->dma.sg = NULL;
+ host->dma.num_ents = 0;
+ return -ENOMEM;
+ }
+
return 0;
}
+static int
+snoop_cccr_abort(struct mmc_command *cmd)
+{
+ if ((cmd->opcode == 52) &&
+ (cmd->arg & 0x80000000) &&
+ (((cmd->arg >> 9) & 0x1ffff) == SDIO_CCCR_ABORT))
+ return 1;
+ return 0;
+}
+
+static void
+msmsdcc_start_command_deferred(struct msmsdcc_host *host,
+ struct mmc_command *cmd, u32 *c)
+{
+ *c |= (cmd->opcode | MCI_CPSM_ENABLE);
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136)
+ *c |= MCI_CPSM_LONGRSP;
+ *c |= MCI_CPSM_RESPONSE;
+ }
+
+ if (/*interrupt*/0)
+ *c |= MCI_CPSM_INTERRUPT;
+
+ if ((((cmd->opcode == 17) || (cmd->opcode == 18)) ||
+ ((cmd->opcode == 24) || (cmd->opcode == 25))) ||
+ (cmd->opcode == 53))
+ *c |= MCI_CSPM_DATCMD;
+
+ if (cmd == cmd->mrq->stop)
+ *c |= MCI_CSPM_MCIABORT;
+
+ if (snoop_cccr_abort(cmd))
+ *c |= MCI_CSPM_MCIABORT;
+
+ if (host->curr.cmd != NULL) {
+ printk(KERN_ERR "%s: Overlapping command requests\n",
+ mmc_hostname(host->mmc));
+ }
+ host->curr.cmd = cmd;
+}
+
static void
-msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data)
+msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data,
+ struct mmc_command *cmd, u32 c)
{
unsigned int datactrl, timeout;
unsigned long long clks;
- void __iomem *base = host->base;
unsigned int pio_irqmask = 0;
host->curr.data = data;
@@ -320,13 +465,6 @@ msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data)
memset(&host->pio, 0, sizeof(host->pio));
- clks = (unsigned long long)data->timeout_ns * host->clk_rate;
- do_div(clks, NSEC_PER_SEC);
- timeout = data->timeout_clks + (unsigned int)clks;
- writel(timeout, base + MMCIDATATIMER);
-
- writel(host->curr.xfer_size, base + MMCIDATALENGTH);
-
datactrl = MCI_DPSM_ENABLE | (data->blksz << 4);
if (!msmsdcc_config_dma(host, data))
@@ -347,47 +485,51 @@ msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data)
if (data->flags & MMC_DATA_READ)
datactrl |= MCI_DPSM_DIRECTION;
- writel(pio_irqmask, base + MMCIMASK1);
- writel(datactrl, base + MMCIDATACTRL);
+ clks = (unsigned long long)data->timeout_ns * host->clk_rate;
+ do_div(clks, NSEC_PER_SEC);
+ timeout = data->timeout_clks + (unsigned int)clks*2 ;
if (datactrl & MCI_DPSM_DMAENABLE) {
+ /* Save parameters for the exec function */
+ host->cmd_timeout = timeout;
+ host->cmd_pio_irqmask = pio_irqmask;
+ host->cmd_datactrl = datactrl;
+ host->cmd_cmd = cmd;
+
+ host->dma.hdr.execute_func = msmsdcc_dma_exec_func;
+ host->dma.hdr.data = (void *)host;
host->dma.busy = 1;
+
+ if (cmd) {
+ msmsdcc_start_command_deferred(host, cmd, &c);
+ host->cmd_c = c;
+ }
msm_dmov_enqueue_cmd(host->dma.channel, &host->dma.hdr);
+ } else {
+ msmsdcc_writel(host, timeout, MMCIDATATIMER);
+
+ msmsdcc_writel(host, host->curr.xfer_size, MMCIDATALENGTH);
+
+ msmsdcc_writel(host, pio_irqmask, MMCIMASK1);
+ msmsdcc_writel(host, datactrl, MMCIDATACTRL);
+
+ if (cmd) {
+ /* Daisy-chain the command if requested */
+ msmsdcc_start_command(host, cmd, c);
+ }
}
}
static void
msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd, u32 c)
{
- void __iomem *base = host->base;
-
- if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
- writel(0, base + MMCICOMMAND);
- udelay(2 + ((5 * 1000000) / host->clk_rate));
- }
-
- c |= cmd->opcode | MCI_CPSM_ENABLE;
-
- if (cmd->flags & MMC_RSP_PRESENT) {
- if (cmd->flags & MMC_RSP_136)
- c |= MCI_CPSM_LONGRSP;
- c |= MCI_CPSM_RESPONSE;
- }
-
- if (cmd->opcode == 17 || cmd->opcode == 18 ||
- cmd->opcode == 24 || cmd->opcode == 25 ||
- cmd->opcode == 53)
- c |= MCI_CSPM_DATCMD;
-
if (cmd == cmd->mrq->stop)
c |= MCI_CSPM_MCIABORT;
- host->curr.cmd = cmd;
-
host->stats.cmds++;
- writel(cmd->arg, base + MMCIARGUMENT);
- writel(c, base + MMCICOMMAND);
+ msmsdcc_start_command_deferred(host, cmd, &c);
+ msmsdcc_start_command_exec(host, cmd->arg, c);
}
static void
@@ -421,13 +563,11 @@ msmsdcc_data_err(struct msmsdcc_host *host, struct mmc_data *data,
static int
msmsdcc_pio_read(struct msmsdcc_host *host, char *buffer, unsigned int remain)
{
- void __iomem *base = host->base;
uint32_t *ptr = (uint32_t *) buffer;
int count = 0;
- while (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL) {
-
- *ptr = readl(base + MMCIFIFO + (count % MCI_FIFOSIZE));
+ while (msmsdcc_readl(host, MMCISTATUS) & MCI_RXDATAAVLBL) {
+ *ptr = msmsdcc_readl(host, MMCIFIFO + (count % MCI_FIFOSIZE));
ptr++;
count += sizeof(uint32_t);
@@ -459,7 +599,7 @@ msmsdcc_pio_write(struct msmsdcc_host *host, char *buffer,
if (remain == 0)
break;
- status = readl(base + MMCISTATUS);
+ status = msmsdcc_readl(host, MMCISTATUS);
} while (status & MCI_TXFIFOHALFEMPTY);
return ptr - buffer;
@@ -469,7 +609,7 @@ static int
msmsdcc_spin_on_status(struct msmsdcc_host *host, uint32_t mask, int maxspin)
{
while (maxspin) {
- if ((readl(host->base + MMCISTATUS) & mask))
+ if ((msmsdcc_readl(host, MMCISTATUS) & mask))
return 0;
udelay(1);
--maxspin;
@@ -477,14 +617,13 @@ msmsdcc_spin_on_status(struct msmsdcc_host *host, uint32_t mask, int maxspin)
return -ETIMEDOUT;
}
-static int
+static irqreturn_t
msmsdcc_pio_irq(int irq, void *dev_id)
{
struct msmsdcc_host *host = dev_id;
- void __iomem *base = host->base;
uint32_t status;
- status = readl(base + MMCISTATUS);
+ status = msmsdcc_readl(host, MMCISTATUS);
do {
unsigned long flags;
@@ -539,14 +678,14 @@ msmsdcc_pio_irq(int irq, void *dev_id)
host->pio.sg_off = 0;
}
- status = readl(base + MMCISTATUS);
+ status = msmsdcc_readl(host, MMCISTATUS);
} while (1);
if (status & MCI_RXACTIVE && host->curr.xfer_remain < MCI_FIFOSIZE)
- writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
+ msmsdcc_writel(host, MCI_RXDATAAVLBLMASK, MMCIMASK1);
if (!host->curr.xfer_remain)
- writel(0, base + MMCIMASK1);
+ msmsdcc_writel(host, 0, MMCIMASK1);
return IRQ_HANDLED;
}
@@ -554,15 +693,13 @@ msmsdcc_pio_irq(int irq, void *dev_id)
static void msmsdcc_do_cmdirq(struct msmsdcc_host *host, uint32_t status)
{
struct mmc_command *cmd = host->curr.cmd;
- void __iomem *base = host->base;
host->curr.cmd = NULL;
- cmd->resp[0] = readl(base + MMCIRESPONSE0);
- cmd->resp[1] = readl(base + MMCIRESPONSE1);
- cmd->resp[2] = readl(base + MMCIRESPONSE2);
- cmd->resp[3] = readl(base + MMCIRESPONSE3);
+ cmd->resp[0] = msmsdcc_readl(host, MMCIRESPONSE0);
+ cmd->resp[1] = msmsdcc_readl(host, MMCIRESPONSE1);
+ cmd->resp[2] = msmsdcc_readl(host, MMCIRESPONSE2);
+ cmd->resp[3] = msmsdcc_readl(host, MMCIRESPONSE3);
- del_timer(&host->command_timer);
if (status & MCI_CMDTIMEOUT) {
cmd->error = -ETIMEDOUT;
} else if (status & MCI_CMDCRCFAIL &&
@@ -580,8 +717,10 @@ static void msmsdcc_do_cmdirq(struct msmsdcc_host *host, uint32_t status)
msmsdcc_request_end(host, cmd->mrq);
} else /* host->data == NULL */
msmsdcc_request_end(host, cmd->mrq);
- } else if (!(cmd->data->flags & MMC_DATA_READ))
- msmsdcc_start_data(host, cmd->data);
+ } else if (cmd->data)
+ if (!(cmd->data->flags & MMC_DATA_READ))
+ msmsdcc_start_data(host, cmd->data,
+ NULL, 0);
}
static void
@@ -590,6 +729,11 @@ msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,
{
struct mmc_data *data = host->curr.data;
+ if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL |
+ MCI_CMDTIMEOUT) && host->curr.cmd) {
+ msmsdcc_do_cmdirq(host, status);
+ }
+
if (!data)
return;
@@ -602,7 +746,8 @@ msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,
msm_dmov_stop_cmd(host->dma.channel,
&host->dma.hdr, 0);
else {
- msmsdcc_stop_data(host);
+ if (host->curr.data)
+ msmsdcc_stop_data(host);
if (!data->stop)
msmsdcc_request_end(host, data->mrq);
else
@@ -657,17 +802,18 @@ msmsdcc_irq(int irq, void *dev_id)
spin_lock(&host->lock);
do {
- status = readl(base + MMCISTATUS);
+ status = msmsdcc_readl(host, MMCISTATUS);
+ status &= (msmsdcc_readl(host, MMCIMASK0) |
+ MCI_DATABLOCKENDMASK);
+ msmsdcc_writel(host, status, MMCICLEAR);
- status &= (readl(base + MMCIMASK0) | MCI_DATABLOCKENDMASK);
- writel(status, base + MMCICLEAR);
+ if (status & MCI_SDIOINTR)
+ status &= ~MCI_SDIOINTR;
- msmsdcc_handle_irq_data(host, status, base);
+ if (!status)
+ break;
- if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL |
- MCI_CMDTIMEOUT) && host->curr.cmd) {
- msmsdcc_do_cmdirq(host, status);
- }
+ msmsdcc_handle_irq_data(host, status, base);
if (status & MCI_SDIOINTOPER) {
cardint = 1;
@@ -714,24 +860,27 @@ msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq)
return;
}
+ msmsdcc_enable_clocks(host);
+
host->curr.mrq = mrq;
if (mrq->data && mrq->data->flags & MMC_DATA_READ)
- msmsdcc_start_data(host, mrq->data);
-
- msmsdcc_start_command(host, mrq->cmd, 0);
+ /* Queue/read data, daisy-chain command when data starts */
+ msmsdcc_start_data(host, mrq->data, mrq->cmd, 0);
+ else
+ msmsdcc_start_command(host, mrq->cmd, 0);
if (host->cmdpoll && !msmsdcc_spin_on_status(host,
MCI_CMDRESPEND|MCI_CMDCRCFAIL|MCI_CMDTIMEOUT,
CMD_SPINMAX)) {
- uint32_t status = readl(host->base + MMCISTATUS);
+ uint32_t status = msmsdcc_readl(host, MMCISTATUS);
msmsdcc_do_cmdirq(host, status);
- writel(MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT,
- host->base + MMCICLEAR);
+ msmsdcc_writel(host,
+ MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT,
+ MMCICLEAR);
host->stats.cmdpoll_hits++;
} else {
host->stats.cmdpoll_misses++;
- mod_timer(&host->command_timer, jiffies + HZ);
}
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -742,14 +891,13 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct msmsdcc_host *host = mmc_priv(mmc);
u32 clk = 0, pwr = 0;
int rc;
+ unsigned long flags;
- if (ios->clock) {
+ spin_lock_irqsave(&host->lock, flags);
- if (!host->clks_on) {
- clk_enable(host->pclk);
- clk_enable(host->clk);
- host->clks_on = 1;
- }
+ msmsdcc_enable_clocks(host);
+
+ if (ios->clock) {
if (ios->clock != host->clk_rate) {
rc = clk_set_rate(host->clk, ios->clock);
if (rc < 0)
@@ -787,18 +935,16 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
pwr |= MCI_OD;
- writel(clk, host->base + MMCICLOCK);
+ msmsdcc_writel(host, clk, MMCICLOCK);
if (host->pwr != pwr) {
host->pwr = pwr;
- writel(pwr, host->base + MMCIPOWER);
- }
-
- if (!(clk & MCI_CLK_ENABLE) && host->clks_on) {
- clk_disable(host->clk);
- clk_disable(host->pclk);
- host->clks_on = 0;
+ msmsdcc_writel(host, pwr, MMCIPOWER);
}
+#if BUSCLK_PWRSAVE
+ msmsdcc_disable_clocks(host, 1);
+#endif
+ spin_unlock_irqrestore(&host->lock, flags);
}
static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable)
@@ -809,13 +955,13 @@ static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable)
spin_lock_irqsave(&host->lock, flags);
if (msmsdcc_sdioirq == 1) {
- status = readl(host->base + MMCIMASK0);
+ status = msmsdcc_readl(host, MMCIMASK0);
if (enable)
status |= MCI_SDIOINTOPERMASK;
else
status &= ~MCI_SDIOINTOPERMASK;
host->saved_irq0mask = status;
- writel(status, host->base + MMCIMASK0);
+ msmsdcc_writel(host, status, MMCIMASK0);
}
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -875,42 +1021,13 @@ msmsdcc_status_notify_cb(int card_present, void *dev_id)
msmsdcc_check_status((unsigned long) host);
}
-/*
- * called when a command expires.
- * Dump some debugging, and then error
- * out the transaction.
- */
static void
-msmsdcc_command_expired(unsigned long _data)
+msmsdcc_busclk_expired(unsigned long _data)
{
struct msmsdcc_host *host = (struct msmsdcc_host *) _data;
- struct mmc_request *mrq;
- unsigned long flags;
-
- spin_lock_irqsave(&host->lock, flags);
- mrq = host->curr.mrq;
-
- if (!mrq) {
- pr_info("%s: Command expiry misfire\n",
- mmc_hostname(host->mmc));
- spin_unlock_irqrestore(&host->lock, flags);
- return;
- }
-
- pr_err("%s: Command timeout (%p %p %p %p)\n",
- mmc_hostname(host->mmc), mrq, mrq->cmd,
- mrq->data, host->dma.sg);
- mrq->cmd->error = -ETIMEDOUT;
- msmsdcc_stop_data(host);
-
- writel(0, host->base + MMCICOMMAND);
-
- host->curr.mrq = NULL;
- host->curr.cmd = NULL;
-
- spin_unlock_irqrestore(&host->lock, flags);
- mmc_request_done(host->mmc, mrq);
+ if (host->clks_on)
+ msmsdcc_disable_clocks(host, 0);
}
static int
@@ -1012,6 +1129,7 @@ msmsdcc_probe(struct platform_device *pdev)
host->pdev_id = pdev->id;
host->plat = plat;
host->mmc = mmc;
+ host->curr.cmd = NULL;
host->cmdpoll = 1;
@@ -1027,36 +1145,35 @@ msmsdcc_probe(struct platform_device *pdev)
host->dmares = dmares;
spin_lock_init(&host->lock);
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ if (plat->embedded_sdio)
+ mmc_set_embedded_sdio_data(mmc,
+ &plat->embedded_sdio->cis,
+ &plat->embedded_sdio->cccr,
+ plat->embedded_sdio->funcs,
+ plat->embedded_sdio->num_funcs);
+#endif
+
/*
* Setup DMA
*/
msmsdcc_init_dma(host);
- /*
- * Setup main peripheral bus clock
- */
+ /* Get our clocks */
host->pclk = clk_get(&pdev->dev, "sdc_pclk");
if (IS_ERR(host->pclk)) {
ret = PTR_ERR(host->pclk);
goto host_free;
}
- ret = clk_enable(host->pclk);
- if (ret)
- goto pclk_put;
-
- host->pclk_rate = clk_get_rate(host->pclk);
-
- /*
- * Setup SDC MMC clock
- */
host->clk = clk_get(&pdev->dev, "sdc_clk");
if (IS_ERR(host->clk)) {
ret = PTR_ERR(host->clk);
- goto pclk_disable;
+ goto pclk_put;
}
- ret = clk_enable(host->clk);
+ /* Enable clocks */
+ ret = msmsdcc_enable_clocks(host);
if (ret)
goto clk_put;
@@ -1066,10 +1183,9 @@ msmsdcc_probe(struct platform_device *pdev)
goto clk_disable;
}
+ host->pclk_rate = clk_get_rate(host->pclk);
host->clk_rate = clk_get_rate(host->clk);
- host->clks_on = 1;
-
/*
* Setup MMC host structure
*/
@@ -1092,10 +1208,10 @@ msmsdcc_probe(struct platform_device *pdev)
mmc->max_req_size = 33554432; /* MCI_DATA_LENGTH is 25 bits */
mmc->max_seg_size = mmc->max_req_size;
- writel(0, host->base + MMCIMASK0);
- writel(0x5e007ff, host->base + MMCICLEAR); /* Add: 1 << 25 */
+ msmsdcc_writel(host, 0, MMCIMASK0);
+ msmsdcc_writel(host, 0x5e007ff, MMCICLEAR);
- writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+ msmsdcc_writel(host, MCI_IRQENABLE, MMCIMASK0);
host->saved_irq0mask = MCI_IRQENABLE;
/*
@@ -1137,13 +1253,9 @@ msmsdcc_probe(struct platform_device *pdev)
host->eject = !host->oldstat;
}
- /*
- * Setup a command timer. We currently need this due to
- * some 'strange' timeout / error handling situations.
- */
- init_timer(&host->command_timer);
- host->command_timer.data = (unsigned long) host;
- host->command_timer.function = msmsdcc_command_expired;
+ init_timer(&host->busclk_timer);
+ host->busclk_timer.data = (unsigned long) host;
+ host->busclk_timer.function = msmsdcc_busclk_expired;
ret = request_irq(cmd_irqres->start, msmsdcc_irq, IRQF_SHARED,
DRIVER_NAME " (cmd)", host);
@@ -1181,6 +1293,9 @@ msmsdcc_probe(struct platform_device *pdev)
if (host->timer.function)
pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc));
+#if BUSCLK_PWRSAVE
+ msmsdcc_disable_clocks(host, 1);
+#endif
return 0;
cmd_irq_free:
free_irq(cmd_irqres->start, host);
@@ -1188,11 +1303,9 @@ msmsdcc_probe(struct platform_device *pdev)
if (host->stat_irq)
free_irq(host->stat_irq, host);
clk_disable:
- clk_disable(host->clk);
+ msmsdcc_disable_clocks(host, 0);
clk_put:
clk_put(host->clk);
- pclk_disable:
- clk_disable(host->pclk);
pclk_put:
clk_put(host->pclk);
host_free:
@@ -1214,16 +1327,11 @@ msmsdcc_suspend(struct platform_device *dev, pm_message_t state)
disable_irq(host->stat_irq);
if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
- rc = mmc_suspend_host(mmc, state);
- if (!rc) {
- writel(0, host->base + MMCIMASK0);
-
- if (host->clks_on) {
- clk_disable(host->clk);
- clk_disable(host->pclk);
- host->clks_on = 0;
- }
- }
+ rc = mmc_suspend_host(mmc);
+ if (!rc)
+ msmsdcc_writel(host, 0, MMCIMASK0);
+ if (host->clks_on)
+ msmsdcc_disable_clocks(host, 0);
}
return rc;
}
@@ -1232,27 +1340,21 @@ static int
msmsdcc_resume(struct platform_device *dev)
{
struct mmc_host *mmc = mmc_get_drvdata(dev);
- unsigned long flags;
if (mmc) {
struct msmsdcc_host *host = mmc_priv(mmc);
- spin_lock_irqsave(&host->lock, flags);
+ msmsdcc_enable_clocks(host);
- if (!host->clks_on) {
- clk_enable(host->pclk);
- clk_enable(host->clk);
- host->clks_on = 1;
- }
-
- writel(host->saved_irq0mask, host->base + MMCIMASK0);
-
- spin_unlock_irqrestore(&host->lock, flags);
+ msmsdcc_writel(host, host->saved_irq0mask, MMCIMASK0);
if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
mmc_resume_host(mmc);
if (host->stat_irq)
enable_irq(host->stat_irq);
+#if BUSCLK_PWRSAVE
+ msmsdcc_disable_clocks(host, 1);
+#endif
}
return 0;
}
diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h
index 8c844846981..da0039c9285 100644
--- a/drivers/mmc/host/msm_sdcc.h
+++ b/drivers/mmc/host/msm_sdcc.h
@@ -171,6 +171,7 @@ struct msmsdcc_dma_data {
int channel;
struct msmsdcc_host *host;
int busy; /* Set if DM is busy */
+ int active;
};
struct msmsdcc_pio_data {
@@ -213,7 +214,7 @@ struct msmsdcc_host {
struct clk *clk; /* main MMC bus clock */
struct clk *pclk; /* SDCC peripheral bus clock */
unsigned int clks_on; /* set if clocks are enabled */
- struct timer_list command_timer;
+ struct timer_list busclk_timer;
unsigned int eject; /* eject state */
@@ -233,6 +234,18 @@ struct msmsdcc_host {
struct msmsdcc_pio_data pio;
int cmdpoll;
struct msmsdcc_stats stats;
+
+#ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ
+ struct work_struct resume_task;
+#endif
+
+ /* Command parameters */
+ unsigned int cmd_timeout;
+ unsigned int cmd_pio_irqmask;
+ unsigned int cmd_datactrl;
+ struct mmc_command *cmd_cmd;
+ u32 cmd_c;
+
};
#endif
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 34e23489811..366eefa77c5 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -865,7 +865,7 @@ static int mvsd_suspend(struct platform_device *dev, pm_message_t state)
int ret = 0;
if (mmc)
- ret = mmc_suspend_host(mmc, state);
+ ret = mmc_suspend_host(mmc);
return ret;
}
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 2df90412abb..d9d4a72e0ec 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -119,6 +119,7 @@ struct mxcmci_host {
int detect_irq;
int dma;
int do_dma;
+ int use_sdio;
unsigned int power_mode;
struct imxmmc_platform_data *pdata;
@@ -138,6 +139,7 @@ struct mxcmci_host {
int clock;
struct work_struct datawork;
+ spinlock_t lock;
};
static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
@@ -151,6 +153,8 @@ static void mxcmci_softreset(struct mxcmci_host *host)
{
int i;
+ dev_dbg(mmc_dev(host->mmc), "mxcmci_softreset\n");
+
/* reset sequence */
writew(STR_STP_CLK_RESET, host->base + MMC_REG_STR_STP_CLK);
writew(STR_STP_CLK_RESET | STR_STP_CLK_START_CLK,
@@ -224,6 +228,9 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
unsigned int cmdat)
{
+ u32 int_cntr;
+ unsigned long flags;
+
WARN_ON(host->cmd != NULL);
host->cmd = cmd;
@@ -247,12 +254,16 @@ static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
return -EINVAL;
}
+ int_cntr = INT_END_CMD_RES_EN;
+
if (mxcmci_use_dma(host))
- writel(INT_READ_OP_EN | INT_WRITE_OP_DONE_EN |
- INT_END_CMD_RES_EN,
- host->base + MMC_REG_INT_CNTR);
- else
- writel(INT_END_CMD_RES_EN, host->base + MMC_REG_INT_CNTR);
+ int_cntr |= INT_READ_OP_EN | INT_WRITE_OP_DONE_EN;
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->use_sdio)
+ int_cntr |= INT_SDIO_IRQ_EN;
+ writel(int_cntr, host->base + MMC_REG_INT_CNTR);
+ spin_unlock_irqrestore(&host->lock, flags);
writew(cmd->opcode, host->base + MMC_REG_CMD);
writel(cmd->arg, host->base + MMC_REG_ARG);
@@ -264,7 +275,14 @@ static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
static void mxcmci_finish_request(struct mxcmci_host *host,
struct mmc_request *req)
{
- writel(0, host->base + MMC_REG_INT_CNTR);
+ u32 int_cntr = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->use_sdio)
+ int_cntr |= INT_SDIO_IRQ_EN;
+ writel(int_cntr, host->base + MMC_REG_INT_CNTR);
+ spin_unlock_irqrestore(&host->lock, flags);
host->req = NULL;
host->cmd = NULL;
@@ -290,16 +308,25 @@ static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)
dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",
stat);
if (stat & STATUS_CRC_READ_ERR) {
+ dev_err(mmc_dev(host->mmc), "%s: -EILSEQ\n", __func__);
data->error = -EILSEQ;
} else if (stat & STATUS_CRC_WRITE_ERR) {
u32 err_code = (stat >> 9) & 0x3;
- if (err_code == 2) /* No CRC response */
+ if (err_code == 2) { /* No CRC response */
+ dev_err(mmc_dev(host->mmc),
+ "%s: No CRC -ETIMEDOUT\n", __func__);
data->error = -ETIMEDOUT;
- else
+ } else {
+ dev_err(mmc_dev(host->mmc),
+ "%s: -EILSEQ\n", __func__);
data->error = -EILSEQ;
+ }
} else if (stat & STATUS_TIME_OUT_READ) {
+ dev_err(mmc_dev(host->mmc),
+ "%s: read -ETIMEDOUT\n", __func__);
data->error = -ETIMEDOUT;
} else {
+ dev_err(mmc_dev(host->mmc), "%s: -EIO\n", __func__);
data->error = -EIO;
}
} else {
@@ -433,8 +460,6 @@ static int mxcmci_transfer_data(struct mxcmci_host *host)
struct scatterlist *sg;
int stat, i;
- host->datasize = 0;
-
host->data = data;
host->datasize = 0;
@@ -464,6 +489,9 @@ static void mxcmci_datawork(struct work_struct *work)
struct mxcmci_host *host = container_of(work, struct mxcmci_host,
datawork);
int datastat = mxcmci_transfer_data(host);
+
+ writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
+ host->base + MMC_REG_STATUS);
mxcmci_finish_data(host, datastat);
if (host->req->stop) {
@@ -523,15 +551,35 @@ static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat)
static irqreturn_t mxcmci_irq(int irq, void *devid)
{
struct mxcmci_host *host = devid;
+ unsigned long flags;
+ bool sdio_irq;
u32 stat;
stat = readl(host->base + MMC_REG_STATUS);
- writel(stat, host->base + MMC_REG_STATUS);
+ writel(stat & ~(STATUS_SDIO_INT_ACTIVE | STATUS_DATA_TRANS_DONE |
+ STATUS_WRITE_OP_DONE), host->base + MMC_REG_STATUS);
dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
+ spin_lock_irqsave(&host->lock, flags);
+ sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+#ifdef HAS_DMA
+ if (mxcmci_use_dma(host) &&
+ (stat & (STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE)))
+ writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
+ host->base + MMC_REG_STATUS);
+#endif
+
+ if (sdio_irq) {
+ writel(STATUS_SDIO_INT_ACTIVE, host->base + MMC_REG_STATUS);
+ mmc_signal_sdio_irq(host->mmc);
+ }
+
if (stat & STATUS_END_CMD_RESP)
mxcmci_cmd_done(host, stat);
+
#ifdef HAS_DMA
if (mxcmci_use_dma(host) &&
(stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE)))
@@ -668,11 +716,46 @@ static int mxcmci_get_ro(struct mmc_host *mmc)
return -ENOSYS;
}
+static void mxcmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct mxcmci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ u32 int_cntr;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->use_sdio = enable;
+ int_cntr = readl(host->base + MMC_REG_INT_CNTR);
+
+ if (enable)
+ int_cntr |= INT_SDIO_IRQ_EN;
+ else
+ int_cntr &= ~INT_SDIO_IRQ_EN;
+
+ writel(int_cntr, host->base + MMC_REG_INT_CNTR);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
+{
+ /*
+ * MX3 SoCs have a silicon bug which corrupts CRC calculation of
+ * multi-block transfers when connected SDIO peripheral doesn't
+ * drive the BUSY line as required by the specs.
+ * One way to prevent this is to only allow 1-bit transfers.
+ */
+
+ if (cpu_is_mx3() && card->type == MMC_TYPE_SDIO)
+ host->caps &= ~MMC_CAP_4_BIT_DATA;
+ else
+ host->caps |= MMC_CAP_4_BIT_DATA;
+}
static const struct mmc_host_ops mxcmci_ops = {
- .request = mxcmci_request,
- .set_ios = mxcmci_set_ios,
- .get_ro = mxcmci_get_ro,
+ .request = mxcmci_request,
+ .set_ios = mxcmci_set_ios,
+ .get_ro = mxcmci_get_ro,
+ .enable_sdio_irq = mxcmci_enable_sdio_irq,
+ .init_card = mxcmci_init_card,
};
static int mxcmci_probe(struct platform_device *pdev)
@@ -700,7 +783,7 @@ static int mxcmci_probe(struct platform_device *pdev)
}
mmc->ops = &mxcmci_ops;
- mmc->caps = MMC_CAP_4_BIT_DATA;
+ mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
/* MMC core transfer sizes tunable parameters */
mmc->max_hw_segs = 64;
@@ -719,6 +802,7 @@ static int mxcmci_probe(struct platform_device *pdev)
host->mmc = mmc;
host->pdata = pdev->dev.platform_data;
+ spin_lock_init(&host->lock);
if (host->pdata && host->pdata->ocr_avail)
mmc->ocr_avail = host->pdata->ocr_avail;
@@ -848,7 +932,7 @@ static int mxcmci_suspend(struct platform_device *dev, pm_message_t state)
int ret = 0;
if (mmc)
- ret = mmc_suspend_host(mmc, state);
+ ret = mmc_suspend_host(mmc);
return ret;
}
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index bb6cc54b558..1247e5de9fa 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -64,7 +64,7 @@ static int of_mmc_spi_get_ro(struct device *dev)
struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)
{
struct device *dev = &spi->dev;
- struct device_node *np = dev_archdata_get_node(&dev->archdata);
+ struct device_node *np = dev->of_node;
struct of_mmc_spi *oms;
const u32 *voltage_ranges;
int num_ranges;
@@ -135,7 +135,7 @@ EXPORT_SYMBOL(mmc_spi_get_pdata);
void mmc_spi_put_pdata(struct spi_device *spi)
{
struct device *dev = &spi->dev;
- struct device_node *np = dev_archdata_get_node(&dev->archdata);
+ struct device_node *np = dev->of_node;
struct of_mmc_spi *oms = to_of_mmc_spi(dev);
int i;
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 84d28040634..2b281680e32 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -39,30 +39,30 @@
#include <plat/fpga.h>
#define OMAP_MMC_REG_CMD 0x00
-#define OMAP_MMC_REG_ARGL 0x04
-#define OMAP_MMC_REG_ARGH 0x08
-#define OMAP_MMC_REG_CON 0x0c
-#define OMAP_MMC_REG_STAT 0x10
-#define OMAP_MMC_REG_IE 0x14
-#define OMAP_MMC_REG_CTO 0x18
-#define OMAP_MMC_REG_DTO 0x1c
-#define OMAP_MMC_REG_DATA 0x20
-#define OMAP_MMC_REG_BLEN 0x24
-#define OMAP_MMC_REG_NBLK 0x28
-#define OMAP_MMC_REG_BUF 0x2c
-#define OMAP_MMC_REG_SDIO 0x34
-#define OMAP_MMC_REG_REV 0x3c
-#define OMAP_MMC_REG_RSP0 0x40
-#define OMAP_MMC_REG_RSP1 0x44
-#define OMAP_MMC_REG_RSP2 0x48
-#define OMAP_MMC_REG_RSP3 0x4c
-#define OMAP_MMC_REG_RSP4 0x50
-#define OMAP_MMC_REG_RSP5 0x54
-#define OMAP_MMC_REG_RSP6 0x58
-#define OMAP_MMC_REG_RSP7 0x5c
-#define OMAP_MMC_REG_IOSR 0x60
-#define OMAP_MMC_REG_SYSC 0x64
-#define OMAP_MMC_REG_SYSS 0x68
+#define OMAP_MMC_REG_ARGL 0x01
+#define OMAP_MMC_REG_ARGH 0x02
+#define OMAP_MMC_REG_CON 0x03
+#define OMAP_MMC_REG_STAT 0x04
+#define OMAP_MMC_REG_IE 0x05
+#define OMAP_MMC_REG_CTO 0x06
+#define OMAP_MMC_REG_DTO 0x07
+#define OMAP_MMC_REG_DATA 0x08
+#define OMAP_MMC_REG_BLEN 0x09
+#define OMAP_MMC_REG_NBLK 0x0a
+#define OMAP_MMC_REG_BUF 0x0b
+#define OMAP_MMC_REG_SDIO 0x0d
+#define OMAP_MMC_REG_REV 0x0f
+#define OMAP_MMC_REG_RSP0 0x10
+#define OMAP_MMC_REG_RSP1 0x11
+#define OMAP_MMC_REG_RSP2 0x12
+#define OMAP_MMC_REG_RSP3 0x13
+#define OMAP_MMC_REG_RSP4 0x14
+#define OMAP_MMC_REG_RSP5 0x15
+#define OMAP_MMC_REG_RSP6 0x16
+#define OMAP_MMC_REG_RSP7 0x17
+#define OMAP_MMC_REG_IOSR 0x18
+#define OMAP_MMC_REG_SYSC 0x19
+#define OMAP_MMC_REG_SYSS 0x1a
#define OMAP_MMC_STAT_CARD_ERR (1 << 14)
#define OMAP_MMC_STAT_CARD_IRQ (1 << 13)
@@ -78,8 +78,9 @@
#define OMAP_MMC_STAT_CARD_BUSY (1 << 2)
#define OMAP_MMC_STAT_END_OF_CMD (1 << 0)
-#define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG_##reg)
-#define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG_##reg)
+#define OMAP_MMC_REG(host, reg) (OMAP_MMC_REG_##reg << (host)->reg_shift)
+#define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG(host, reg))
+#define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG(host, reg))
/*
* Command types
@@ -133,6 +134,7 @@ struct mmc_omap_host {
int irq;
unsigned char bus_mode;
unsigned char hw_bus_mode;
+ unsigned int reg_shift;
struct work_struct cmd_abort_work;
unsigned abort:1;
@@ -680,9 +682,9 @@ mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
host->data->bytes_xfered += n;
if (write) {
- __raw_writesw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n);
+ __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n);
} else {
- __raw_readsw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n);
+ __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n);
}
}
@@ -900,7 +902,7 @@ mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
int dst_port = 0;
int sync_dev = 0;
- data_addr = host->phys_base + OMAP_MMC_REG_DATA;
+ data_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
frame = data->blksz;
count = sg_dma_len(sg);
@@ -1493,6 +1495,8 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
}
}
+ host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
+
return 0;
err_plat_cleanup:
@@ -1557,7 +1561,7 @@ static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg)
struct mmc_omap_slot *slot;
slot = host->slots[i];
- ret = mmc_suspend_host(slot->mmc, mesg);
+ ret = mmc_suspend_host(slot->mmc);
if (ret < 0) {
while (--i >= 0) {
slot = host->slots[i];
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index e9caf694c59..b032828c612 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -157,12 +157,10 @@ struct omap_hsmmc_host {
*/
struct regulator *vcc;
struct regulator *vcc_aux;
- struct semaphore sem;
struct work_struct mmc_carddetect_work;
void __iomem *base;
resource_size_t mapbase;
spinlock_t irq_lock; /* Prevent races with irq handler */
- unsigned long flags;
unsigned int id;
unsigned int dma_len;
unsigned int dma_sg_idx;
@@ -183,6 +181,7 @@ struct omap_hsmmc_host {
int protect_card;
int reqs_blocked;
int use_reg;
+ int req_in_progress;
struct omap_mmc_platform_data *pdata;
};
@@ -524,6 +523,27 @@ static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host)
dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n");
}
+static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host)
+{
+ unsigned int irq_mask;
+
+ if (host->use_dma)
+ irq_mask = INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE);
+ else
+ irq_mask = INT_EN_MASK;
+
+ OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
+ OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
+ OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
+}
+
+static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host)
+{
+ OMAP_HSMMC_WRITE(host->base, ISE, 0);
+ OMAP_HSMMC_WRITE(host->base, IE, 0);
+ OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
+}
+
#ifdef CONFIG_PM
/*
@@ -592,9 +612,7 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
&& time_before(jiffies, timeout))
;
- OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
- OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
- OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
+ omap_hsmmc_disable_irq(host);
/* Do not initialize card-specific things if the power is off */
if (host->power_mode == MMC_POWER_OFF)
@@ -697,6 +715,8 @@ static void send_init_stream(struct omap_hsmmc_host *host)
return;
disable_irq(host->irq);
+
+ OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
OMAP_HSMMC_WRITE(host->base, CON,
OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM);
OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD);
@@ -762,17 +782,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
mmc_hostname(host->mmc), cmd->opcode, cmd->arg);
host->cmd = cmd;
- /*
- * Clear status bits and enable interrupts
- */
- OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
- OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
-
- if (host->use_dma)
- OMAP_HSMMC_WRITE(host->base, IE,
- INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE));
- else
- OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
+ omap_hsmmc_enable_irq(host);
host->response_busy = 0;
if (cmd->flags & MMC_RSP_PRESENT) {
@@ -806,13 +816,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
if (host->use_dma)
cmdreg |= DMA_EN;
- /*
- * In an interrupt context (i.e. STOP command), the spinlock is unlocked
- * by the interrupt handler, otherwise (i.e. for a new request) it is
- * unlocked here.
- */
- if (!in_interrupt())
- spin_unlock_irqrestore(&host->irq_lock, host->flags);
+ host->req_in_progress = 1;
OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg);
OMAP_HSMMC_WRITE(host->base, CMD, cmdreg);
@@ -827,6 +831,23 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
return DMA_FROM_DEVICE;
}
+static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
+{
+ int dma_ch;
+
+ spin_lock(&host->irq_lock);
+ host->req_in_progress = 0;
+ dma_ch = host->dma_ch;
+ spin_unlock(&host->irq_lock);
+
+ omap_hsmmc_disable_irq(host);
+ /* Do not complete the request if DMA is still in progress */
+ if (mrq->data && host->use_dma && dma_ch != -1)
+ return;
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, mrq);
+}
+
/*
* Notify the transfer complete to MMC core
*/
@@ -843,25 +864,19 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
return;
}
- host->mrq = NULL;
- mmc_request_done(host->mmc, mrq);
+ omap_hsmmc_request_done(host, mrq);
return;
}
host->data = NULL;
- if (host->use_dma && host->dma_ch != -1)
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
- omap_hsmmc_get_dma_dir(host, data));
-
if (!data->error)
data->bytes_xfered += data->blocks * (data->blksz);
else
data->bytes_xfered = 0;
if (!data->stop) {
- host->mrq = NULL;
- mmc_request_done(host->mmc, data->mrq);
+ omap_hsmmc_request_done(host, data->mrq);
return;
}
omap_hsmmc_start_command(host, data->stop, NULL);
@@ -887,10 +902,8 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10);
}
}
- if ((host->data == NULL && !host->response_busy) || cmd->error) {
- host->mrq = NULL;
- mmc_request_done(host->mmc, cmd->mrq);
- }
+ if ((host->data == NULL && !host->response_busy) || cmd->error)
+ omap_hsmmc_request_done(host, cmd->mrq);
}
/*
@@ -898,14 +911,19 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
*/
static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
{
+ int dma_ch;
+
host->data->error = errno;
- if (host->use_dma && host->dma_ch != -1) {
+ spin_lock(&host->irq_lock);
+ dma_ch = host->dma_ch;
+ host->dma_ch = -1;
+ spin_unlock(&host->irq_lock);
+
+ if (host->use_dma && dma_ch != -1) {
dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len,
omap_hsmmc_get_dma_dir(host, host->data));
- omap_free_dma(host->dma_ch);
- host->dma_ch = -1;
- up(&host->sem);
+ omap_free_dma(dma_ch);
}
host->data = NULL;
}
@@ -967,28 +985,21 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
__func__);
}
-/*
- * MMC controller IRQ handler
- */
-static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
+static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
{
- struct omap_hsmmc_host *host = dev_id;
struct mmc_data *data;
- int end_cmd = 0, end_trans = 0, status;
-
- spin_lock(&host->irq_lock);
-
- if (host->mrq == NULL) {
- OMAP_HSMMC_WRITE(host->base, STAT,
- OMAP_HSMMC_READ(host->base, STAT));
- /* Flush posted write */
- OMAP_HSMMC_READ(host->base, STAT);
- spin_unlock(&host->irq_lock);
- return IRQ_HANDLED;
+ int end_cmd = 0, end_trans = 0;
+
+ if (!host->req_in_progress) {
+ do {
+ OMAP_HSMMC_WRITE(host->base, STAT, status);
+ /* Flush posted write */
+ status = OMAP_HSMMC_READ(host->base, STAT);
+ } while (status & INT_EN_MASK);
+ return;
}
data = host->data;
- status = OMAP_HSMMC_READ(host->base, STAT);
dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status);
if (status & ERR) {
@@ -1041,15 +1052,27 @@ static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
}
OMAP_HSMMC_WRITE(host->base, STAT, status);
- /* Flush posted write */
- OMAP_HSMMC_READ(host->base, STAT);
if (end_cmd || ((status & CC) && host->cmd))
omap_hsmmc_cmd_done(host, host->cmd);
if ((end_trans || (status & TC)) && host->mrq)
omap_hsmmc_xfer_done(host, data);
+}
- spin_unlock(&host->irq_lock);
+/*
+ * MMC controller IRQ handler
+ */
+static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
+{
+ struct omap_hsmmc_host *host = dev_id;
+ int status;
+
+ status = OMAP_HSMMC_READ(host->base, STAT);
+ do {
+ omap_hsmmc_do_irq(host, status);
+ /* Flush posted write */
+ status = OMAP_HSMMC_READ(host->base, STAT);
+ } while (status & INT_EN_MASK);
return IRQ_HANDLED;
}
@@ -1244,31 +1267,47 @@ static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host,
/*
* DMA call back function
*/
-static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *data)
+static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
{
- struct omap_hsmmc_host *host = data;
+ struct omap_hsmmc_host *host = cb_data;
+ struct mmc_data *data = host->mrq->data;
+ int dma_ch, req_in_progress;
if (ch_status & OMAP2_DMA_MISALIGNED_ERR_IRQ)
dev_dbg(mmc_dev(host->mmc), "MISALIGNED_ADRS_ERR\n");
- if (host->dma_ch < 0)
+ spin_lock(&host->irq_lock);
+ if (host->dma_ch < 0) {
+ spin_unlock(&host->irq_lock);
return;
+ }
host->dma_sg_idx++;
if (host->dma_sg_idx < host->dma_len) {
/* Fire up the next transfer. */
- omap_hsmmc_config_dma_params(host, host->data,
- host->data->sg + host->dma_sg_idx);
+ omap_hsmmc_config_dma_params(host, data,
+ data->sg + host->dma_sg_idx);
+ spin_unlock(&host->irq_lock);
return;
}
- omap_free_dma(host->dma_ch);
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
+ omap_hsmmc_get_dma_dir(host, data));
+
+ req_in_progress = host->req_in_progress;
+ dma_ch = host->dma_ch;
host->dma_ch = -1;
- /*
- * DMA Callback: run in interrupt context.
- * mutex_unlock will throw a kernel warning if used.
- */
- up(&host->sem);
+ spin_unlock(&host->irq_lock);
+
+ omap_free_dma(dma_ch);
+
+ /* If DMA has finished after TC, complete the request */
+ if (!req_in_progress) {
+ struct mmc_request *mrq = host->mrq;
+
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, mrq);
+ }
}
/*
@@ -1277,7 +1316,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *data)
static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
struct mmc_request *req)
{
- int dma_ch = 0, ret = 0, err = 1, i;
+ int dma_ch = 0, ret = 0, i;
struct mmc_data *data = req->data;
/* Sanity check: all the SG entries must be aligned by block size. */
@@ -1294,23 +1333,7 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
*/
return -EINVAL;
- /*
- * If for some reason the DMA transfer is still active,
- * we wait for timeout period and free the dma
- */
- if (host->dma_ch != -1) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(100);
- if (down_trylock(&host->sem)) {
- omap_free_dma(host->dma_ch);
- host->dma_ch = -1;
- up(&host->sem);
- return err;
- }
- } else {
- if (down_trylock(&host->sem))
- return err;
- }
+ BUG_ON(host->dma_ch != -1);
ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
"MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
@@ -1410,37 +1433,27 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
struct omap_hsmmc_host *host = mmc_priv(mmc);
int err;
- /*
- * Prevent races with the interrupt handler because of unexpected
- * interrupts, but not if we are already in interrupt context i.e.
- * retries.
- */
- if (!in_interrupt()) {
- spin_lock_irqsave(&host->irq_lock, host->flags);
- /*
- * Protect the card from I/O if there is a possibility
- * it can be removed.
- */
- if (host->protect_card) {
- if (host->reqs_blocked < 3) {
- /*
- * Ensure the controller is left in a consistent
- * state by resetting the command and data state
- * machines.
- */
- omap_hsmmc_reset_controller_fsm(host, SRD);
- omap_hsmmc_reset_controller_fsm(host, SRC);
- host->reqs_blocked += 1;
- }
- req->cmd->error = -EBADF;
- if (req->data)
- req->data->error = -EBADF;
- spin_unlock_irqrestore(&host->irq_lock, host->flags);
- mmc_request_done(mmc, req);
- return;
- } else if (host->reqs_blocked)
- host->reqs_blocked = 0;
- }
+ BUG_ON(host->req_in_progress);
+ BUG_ON(host->dma_ch != -1);
+ if (host->protect_card) {
+ if (host->reqs_blocked < 3) {
+ /*
+ * Ensure the controller is left in a consistent
+ * state by resetting the command and data state
+ * machines.
+ */
+ omap_hsmmc_reset_controller_fsm(host, SRD);
+ omap_hsmmc_reset_controller_fsm(host, SRC);
+ host->reqs_blocked += 1;
+ }
+ req->cmd->error = -EBADF;
+ if (req->data)
+ req->data->error = -EBADF;
+ req->cmd->retries = 0;
+ mmc_request_done(mmc, req);
+ return;
+ } else if (host->reqs_blocked)
+ host->reqs_blocked = 0;
WARN_ON(host->mrq != NULL);
host->mrq = req;
err = omap_hsmmc_prepare_data(host, req);
@@ -1449,8 +1462,6 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
if (req->data)
req->data->error = err;
host->mrq = NULL;
- if (!in_interrupt())
- spin_unlock_irqrestore(&host->irq_lock, host->flags);
mmc_request_done(mmc, req);
return;
}
@@ -2019,7 +2030,6 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
mmc->f_min = 400000;
mmc->f_max = 52000000;
- sema_init(&host->sem, 1);
spin_lock_init(&host->irq_lock);
host->iclk = clk_get(&pdev->dev, "ick");
@@ -2162,8 +2172,7 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
}
}
- OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
- OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
+ omap_hsmmc_disable_irq(host);
mmc_host_lazy_disable(host->mmc);
@@ -2258,10 +2267,12 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state)
+static int omap_hsmmc_suspend(struct device *dev)
{
int ret = 0;
+ struct platform_device *pdev = to_platform_device(dev);
struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
+ pm_message_t state = PMSG_SUSPEND; /* unused by MMC core */
if (host && host->suspended)
return 0;
@@ -2281,12 +2292,9 @@ static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state)
}
cancel_work_sync(&host->mmc_carddetect_work);
mmc_host_enable(host->mmc);
- ret = mmc_suspend_host(host->mmc, state);
+ ret = mmc_suspend_host(host->mmc);
if (ret == 0) {
- OMAP_HSMMC_WRITE(host->base, ISE, 0);
- OMAP_HSMMC_WRITE(host->base, IE, 0);
-
-
+ omap_hsmmc_disable_irq(host);
OMAP_HSMMC_WRITE(host->base, HCTL,
OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
mmc_host_disable(host->mmc);
@@ -2310,9 +2318,10 @@ static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state)
}
/* Routine to resume the MMC device */
-static int omap_hsmmc_resume(struct platform_device *pdev)
+static int omap_hsmmc_resume(struct device *dev)
{
int ret = 0;
+ struct platform_device *pdev = to_platform_device(dev);
struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
if (host && !host->suspended)
@@ -2363,13 +2372,17 @@ clk_en_err:
#define omap_hsmmc_resume NULL
#endif
-static struct platform_driver omap_hsmmc_driver = {
- .remove = omap_hsmmc_remove,
+static struct dev_pm_ops omap_hsmmc_dev_pm_ops = {
.suspend = omap_hsmmc_suspend,
.resume = omap_hsmmc_resume,
+};
+
+static struct platform_driver omap_hsmmc_driver = {
+ .remove = omap_hsmmc_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
+ .pm = &omap_hsmmc_dev_pm_ops,
},
};
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 0ed48959b59..0a4e43f3714 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -544,7 +544,7 @@ static irqreturn_t pxamci_detect_irq(int irq, void *devid)
{
struct pxamci_host *host = mmc_priv(devid);
- mmc_detect_change(devid, host->pdata->detect_delay);
+ mmc_detect_change(devid, msecs_to_jiffies(host->pdata->detect_delay_ms));
return IRQ_HANDLED;
}
@@ -813,7 +813,7 @@ static int pxamci_suspend(struct device *dev)
int ret = 0;
if (mmc)
- ret = mmc_suspend_host(mmc, PMSG_SUSPEND);
+ ret = mmc_suspend_host(mmc);
return ret;
}
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 2fdf7689ae6..2e16e0a90a5 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1881,9 +1881,8 @@ MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids);
static int s3cmci_suspend(struct device *dev)
{
struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev));
- struct pm_message event = { PM_EVENT_SUSPEND };
- return mmc_suspend_host(mmc, event);
+ return mmc_suspend_host(mmc);
}
static int s3cmci_resume(struct device *dev)
diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c
index 55e33135edb..a2e9820cd42 100644
--- a/drivers/mmc/host/sdhci-of-core.c
+++ b/drivers/mmc/host/sdhci-of-core.c
@@ -89,7 +89,7 @@ static int sdhci_of_suspend(struct of_device *ofdev, pm_message_t state)
{
struct sdhci_host *host = dev_get_drvdata(&ofdev->dev);
- return mmc_suspend_host(host->mmc, state);
+ return mmc_suspend_host(host->mmc);
}
static int sdhci_of_resume(struct of_device *ofdev)
@@ -118,7 +118,7 @@ static bool __devinit sdhci_of_wp_inverted(struct device_node *np)
static int __devinit sdhci_of_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct sdhci_of_data *sdhci_of_data = match->data;
struct sdhci_host *host;
struct sdhci_of_host *of_host;
@@ -205,8 +205,11 @@ static const struct of_device_id sdhci_of_match[] = {
MODULE_DEVICE_TABLE(of, sdhci_of_match);
static struct of_platform_driver sdhci_of_driver = {
- .driver.name = "sdhci-of",
- .match_table = sdhci_of_match,
+ .driver = {
+ .name = "sdhci-of",
+ .owner = THIS_MODULE,
+ .of_match_table = sdhci_of_match,
+ },
.probe = sdhci_of_probe,
.remove = __devexit_p(sdhci_of_remove),
.suspend = sdhci_of_suspend,
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index d5b11a17e64..c8623de13af 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -129,12 +129,12 @@ struct sdhci_of_data sdhci_esdhc = {
SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET |
SDHCI_QUIRK_NO_CARD_NO_RESET,
.ops = {
- .readl = sdhci_be32bs_readl,
- .readw = esdhc_readw,
- .readb = sdhci_be32bs_readb,
- .writel = sdhci_be32bs_writel,
- .writew = esdhc_writew,
- .writeb = esdhc_writeb,
+ .read_l = sdhci_be32bs_readl,
+ .read_w = esdhc_readw,
+ .read_b = sdhci_be32bs_readb,
+ .write_l = sdhci_be32bs_writel,
+ .write_w = esdhc_writew,
+ .write_b = esdhc_writeb,
.set_clock = esdhc_set_clock,
.enable_dma = esdhc_enable_dma,
.get_max_clock = esdhc_get_max_clock,
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
index 35117f3ed75..68ddb7546ae 100644
--- a/drivers/mmc/host/sdhci-of-hlwd.c
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -55,11 +55,11 @@ struct sdhci_of_data sdhci_hlwd = {
.quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
SDHCI_QUIRK_32BIT_DMA_SIZE,
.ops = {
- .readl = sdhci_be32bs_readl,
- .readw = sdhci_be32bs_readw,
- .readb = sdhci_be32bs_readb,
- .writel = sdhci_hlwd_writel,
- .writew = sdhci_hlwd_writew,
- .writeb = sdhci_hlwd_writeb,
+ .read_l = sdhci_be32bs_readl,
+ .read_w = sdhci_be32bs_readw,
+ .read_b = sdhci_be32bs_readb,
+ .write_l = sdhci_hlwd_writel,
+ .write_w = sdhci_hlwd_writew,
+ .write_b = sdhci_hlwd_writeb,
},
};
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 6701af629c3..65483fdea45 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -628,7 +628,7 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pci_slot));
if (IS_ERR(host)) {
dev_err(&pdev->dev, "cannot allocate host\n");
- return ERR_PTR(PTR_ERR(host));
+ return ERR_CAST(host);
}
slot = sdhci_priv(host);
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 297f40ae6ad..b6ee0d71969 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -29,6 +29,7 @@
#include <linux/mmc/host.h>
#include <linux/io.h>
+#include <linux/sdhci-pltfm.h>
#include "sdhci.h"
@@ -49,19 +50,18 @@ static struct sdhci_ops sdhci_pltfm_ops = {
static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
{
+ struct sdhci_pltfm_data *pdata = pdev->dev.platform_data;
struct sdhci_host *host;
struct resource *iomem;
int ret;
- BUG_ON(pdev == NULL);
-
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iomem) {
ret = -ENOMEM;
goto err;
}
- if (resource_size(iomem) != 0x100)
+ if (resource_size(iomem) < 0x100)
dev_err(&pdev->dev, "Invalid iomem size. You may "
"experience problems.\n");
@@ -76,7 +76,12 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
}
host->hw_name = "platform";
- host->ops = &sdhci_pltfm_ops;
+ if (pdata && pdata->ops)
+ host->ops = pdata->ops;
+ else
+ host->ops = &sdhci_pltfm_ops;
+ if (pdata)
+ host->quirks = pdata->quirks;
host->irq = platform_get_irq(pdev, 0);
if (!request_mem_region(iomem->start, resource_size(iomem),
@@ -93,6 +98,12 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
goto err_remap;
}
+ if (pdata && pdata->init) {
+ ret = pdata->init(host);
+ if (ret)
+ goto err_plat_init;
+ }
+
ret = sdhci_add_host(host);
if (ret)
goto err_add_host;
@@ -102,6 +113,9 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
return 0;
err_add_host:
+ if (pdata && pdata->exit)
+ pdata->exit(host);
+err_plat_init:
iounmap(host->ioaddr);
err_remap:
release_mem_region(iomem->start, resource_size(iomem));
@@ -114,6 +128,7 @@ err:
static int __devexit sdhci_pltfm_remove(struct platform_device *pdev)
{
+ struct sdhci_pltfm_data *pdata = pdev->dev.platform_data;
struct sdhci_host *host = platform_get_drvdata(pdev);
struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
int dead;
@@ -125,6 +140,8 @@ static int __devexit sdhci_pltfm_remove(struct platform_device *pdev)
dead = 1;
sdhci_remove_host(host, dead);
+ if (pdata && pdata->exit)
+ pdata->exit(host);
iounmap(host->ioaddr);
release_mem_region(iomem->start, resource_size(iomem));
sdhci_free_host(host);
@@ -165,4 +182,3 @@ MODULE_DESCRIPTION("Secure Digital Host Controller Interface platform driver");
MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:sdhci");
-
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 2136794c0cf..af217924a76 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -317,12 +317,7 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
host->irq = irq;
/* Setup quirks for the controller */
-
- /* Currently with ADMA enabled we are getting some length
- * interrupts that are not being dealt with, do disable
- * ADMA until this is sorted out. */
- host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
- host->quirks |= SDHCI_QUIRK_32BIT_ADMA_SIZE;
+ host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
#ifndef CONFIG_MMC_SDHCI_S3C_DMA
@@ -330,9 +325,6 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
* support as well. */
host->quirks |= SDHCI_QUIRK_BROKEN_DMA;
- /* PIO currently has problems with multi-block IO */
- host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK;
-
#endif /* CONFIG_MMC_SDHCI_S3C_DMA */
/* It seems we do not get an DATA transfer complete on non-busy
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
new file mode 100644
index 00000000000..d70c54c7b70
--- /dev/null
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -0,0 +1,298 @@
+/*
+ * drivers/mmc/host/sdhci-spear.c
+ *
+ * Support of SDHCI platform devices for spear soc family
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * Inspired by sdhci-pltfm.c
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/highmem.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdhci-spear.h>
+#include <linux/io.h>
+#include "sdhci.h"
+
+struct spear_sdhci {
+ struct clk *clk;
+ struct sdhci_plat_data *data;
+};
+
+/* sdhci ops */
+static struct sdhci_ops sdhci_pltfm_ops = {
+ /* Nothing to do for now. */
+};
+
+/* gpio card detection interrupt handler */
+static irqreturn_t sdhci_gpio_irq(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev);
+ unsigned long gpio_irq_type;
+ int val;
+
+ val = gpio_get_value(sdhci->data->card_int_gpio);
+
+ /* val == 1 -> card removed, val == 0 -> card inserted */
+ /* if card removed - set irq for low level, else vice versa */
+ gpio_irq_type = val ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH;
+ set_irq_type(irq, gpio_irq_type);
+
+ if (sdhci->data->card_power_gpio >= 0) {
+ if (!sdhci->data->power_always_enb) {
+ /* if card inserted, give power, otherwise remove it */
+ val = sdhci->data->power_active_high ? !val : val ;
+ gpio_set_value(sdhci->data->card_power_gpio, val);
+ }
+ }
+
+ /* inform sdhci driver about card insertion/removal */
+ tasklet_schedule(&host->card_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit sdhci_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct resource *iomem;
+ struct spear_sdhci *sdhci;
+ int ret;
+
+ BUG_ON(pdev == NULL);
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem) {
+ ret = -ENOMEM;
+ dev_dbg(&pdev->dev, "memory resource not defined\n");
+ goto err;
+ }
+
+ if (!request_mem_region(iomem->start, resource_size(iomem),
+ "spear-sdhci")) {
+ ret = -EBUSY;
+ dev_dbg(&pdev->dev, "cannot request region\n");
+ goto err;
+ }
+
+ sdhci = kzalloc(sizeof(*sdhci), GFP_KERNEL);
+ if (!sdhci) {
+ ret = -ENOMEM;
+ dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n");
+ goto err_kzalloc;
+ }
+
+ /* clk enable */
+ sdhci->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(sdhci->clk)) {
+ ret = PTR_ERR(sdhci->clk);
+ dev_dbg(&pdev->dev, "Error getting clock\n");
+ goto err_clk_get;
+ }
+
+ ret = clk_enable(sdhci->clk);
+ if (ret) {
+ dev_dbg(&pdev->dev, "Error enabling clock\n");
+ goto err_clk_enb;
+ }
+
+ /* overwrite platform_data */
+ sdhci->data = dev_get_platdata(&pdev->dev);
+ pdev->dev.platform_data = sdhci;
+
+ if (pdev->dev.parent)
+ host = sdhci_alloc_host(pdev->dev.parent, 0);
+ else
+ host = sdhci_alloc_host(&pdev->dev, 0);
+
+ if (IS_ERR(host)) {
+ ret = PTR_ERR(host);
+ dev_dbg(&pdev->dev, "error allocating host\n");
+ goto err_alloc_host;
+ }
+
+ host->hw_name = "sdhci";
+ host->ops = &sdhci_pltfm_ops;
+ host->irq = platform_get_irq(pdev, 0);
+ host->quirks = SDHCI_QUIRK_BROKEN_ADMA;
+
+ host->ioaddr = ioremap(iomem->start, resource_size(iomem));
+ if (!host->ioaddr) {
+ ret = -ENOMEM;
+ dev_dbg(&pdev->dev, "failed to remap registers\n");
+ goto err_ioremap;
+ }
+
+ ret = sdhci_add_host(host);
+ if (ret) {
+ dev_dbg(&pdev->dev, "error adding host\n");
+ goto err_add_host;
+ }
+
+ platform_set_drvdata(pdev, host);
+
+ /*
+ * It is optional to use GPIOs for sdhci Power control & sdhci card
+ * interrupt detection. If sdhci->data is NULL, then use original sdhci
+ * lines otherwise GPIO lines.
+ * If GPIO is selected for power control, then power should be disabled
+ * after card removal and should be enabled when card insertion
+ * interrupt occurs
+ */
+ if (!sdhci->data)
+ return 0;
+
+ if (sdhci->data->card_power_gpio >= 0) {
+ int val = 0;
+
+ ret = gpio_request(sdhci->data->card_power_gpio, "sdhci");
+ if (ret < 0) {
+ dev_dbg(&pdev->dev, "gpio request fail: %d\n",
+ sdhci->data->card_power_gpio);
+ goto err_pgpio_request;
+ }
+
+ if (sdhci->data->power_always_enb)
+ val = sdhci->data->power_active_high;
+ else
+ val = !sdhci->data->power_active_high;
+
+ ret = gpio_direction_output(sdhci->data->card_power_gpio, val);
+ if (ret) {
+ dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
+ sdhci->data->card_power_gpio);
+ goto err_pgpio_direction;
+ }
+
+ gpio_set_value(sdhci->data->card_power_gpio, 1);
+ }
+
+ if (sdhci->data->card_int_gpio >= 0) {
+ ret = gpio_request(sdhci->data->card_int_gpio, "sdhci");
+ if (ret < 0) {
+ dev_dbg(&pdev->dev, "gpio request fail: %d\n",
+ sdhci->data->card_int_gpio);
+ goto err_igpio_request;
+ }
+
+ ret = gpio_direction_input(sdhci->data->card_int_gpio);
+ if (ret) {
+ dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
+ sdhci->data->card_int_gpio);
+ goto err_igpio_direction;
+ }
+ ret = request_irq(gpio_to_irq(sdhci->data->card_int_gpio),
+ sdhci_gpio_irq, IRQF_TRIGGER_LOW,
+ mmc_hostname(host->mmc), pdev);
+ if (ret) {
+ dev_dbg(&pdev->dev, "gpio request irq fail: %d\n",
+ sdhci->data->card_int_gpio);
+ goto err_igpio_request_irq;
+ }
+
+ }
+
+ return 0;
+
+err_igpio_request_irq:
+err_igpio_direction:
+ if (sdhci->data->card_int_gpio >= 0)
+ gpio_free(sdhci->data->card_int_gpio);
+err_igpio_request:
+err_pgpio_direction:
+ if (sdhci->data->card_power_gpio >= 0)
+ gpio_free(sdhci->data->card_power_gpio);
+err_pgpio_request:
+ platform_set_drvdata(pdev, NULL);
+ sdhci_remove_host(host, 1);
+err_add_host:
+ iounmap(host->ioaddr);
+err_ioremap:
+ sdhci_free_host(host);
+err_alloc_host:
+ clk_disable(sdhci->clk);
+err_clk_enb:
+ clk_put(sdhci->clk);
+err_clk_get:
+ kfree(sdhci);
+err_kzalloc:
+ release_mem_region(iomem->start, resource_size(iomem));
+err:
+ dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret);
+ return ret;
+}
+
+static int __devexit sdhci_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev);
+ int dead;
+ u32 scratch;
+
+ if (sdhci->data) {
+ if (sdhci->data->card_int_gpio >= 0) {
+ free_irq(gpio_to_irq(sdhci->data->card_int_gpio), pdev);
+ gpio_free(sdhci->data->card_int_gpio);
+ }
+
+ if (sdhci->data->card_power_gpio >= 0)
+ gpio_free(sdhci->data->card_power_gpio);
+ }
+
+ platform_set_drvdata(pdev, NULL);
+ dead = 0;
+ scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
+ if (scratch == (u32)-1)
+ dead = 1;
+
+ sdhci_remove_host(host, dead);
+ iounmap(host->ioaddr);
+ sdhci_free_host(host);
+ clk_disable(sdhci->clk);
+ clk_put(sdhci->clk);
+ kfree(sdhci);
+ if (iomem)
+ release_mem_region(iomem->start, resource_size(iomem));
+
+ return 0;
+}
+
+static struct platform_driver sdhci_driver = {
+ .driver = {
+ .name = "sdhci",
+ .owner = THIS_MODULE,
+ },
+ .probe = sdhci_probe,
+ .remove = __devexit_p(sdhci_remove),
+};
+
+static int __init sdhci_init(void)
+{
+ return platform_driver_register(&sdhci_driver);
+}
+module_init(sdhci_init);
+
+static void __exit sdhci_exit(void)
+{
+ platform_driver_unregister(&sdhci_driver);
+}
+module_exit(sdhci_exit);
+
+MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver");
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 9d4fdfa685e..c6d1bd8d4ac 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -496,12 +496,22 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
}
- /*
- * Add a terminating entry.
- */
+ if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
+ /*
+ * Mark the last descriptor as the terminating descriptor
+ */
+ if (desc != host->adma_desc) {
+ desc -= 8;
+ desc[0] |= 0x2; /* end */
+ }
+ } else {
+ /*
+ * Add a terminating entry.
+ */
- /* nop, end, valid */
- sdhci_set_adma_desc(desc, 0, 0, 0x3);
+ /* nop, end, valid */
+ sdhci_set_adma_desc(desc, 0, 0, 0x3);
+ }
/*
* Resync align buffer as we might have changed it.
@@ -1587,7 +1597,7 @@ int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
sdhci_disable_card_detection(host);
- ret = mmc_suspend_host(host->mmc, state);
+ ret = mmc_suspend_host(host->mmc);
if (ret)
return ret;
@@ -1744,7 +1754,8 @@ int sdhci_add_host(struct sdhci_host *host)
host->max_clk =
(caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
host->max_clk *= 1000000;
- if (host->max_clk == 0) {
+ if (host->max_clk == 0 || host->quirks &
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
if (!host->ops->get_max_clock) {
printk(KERN_ERR
"%s: Hardware doesn't specify base clock "
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 842f46f9428..c8468134adc 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -127,7 +127,7 @@
#define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \
SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
- SDHCI_INT_DATA_END_BIT | SDHCI_ADMA_ERROR)
+ SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR)
#define SDHCI_INT_ALL_MASK ((unsigned int)-1)
#define SDHCI_ACMD12_ERR 0x3C
@@ -236,6 +236,10 @@ struct sdhci_host {
#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23)
/* Controller uses SDCLK instead of TMCLK for data timeouts */
#define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24)
+/* Controller reports wrong base clock capability */
+#define SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN (1<<25)
+/* Controller cannot support End Attribute in NOP ADMA descriptor */
+#define SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC (1<<26)
int irq; /* Device IRQ */
void __iomem * ioaddr; /* Mapped address */
@@ -294,12 +298,12 @@ struct sdhci_host {
struct sdhci_ops {
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
- u32 (*readl)(struct sdhci_host *host, int reg);
- u16 (*readw)(struct sdhci_host *host, int reg);
- u8 (*readb)(struct sdhci_host *host, int reg);
- void (*writel)(struct sdhci_host *host, u32 val, int reg);
- void (*writew)(struct sdhci_host *host, u16 val, int reg);
- void (*writeb)(struct sdhci_host *host, u8 val, int reg);
+ u32 (*read_l)(struct sdhci_host *host, int reg);
+ u16 (*read_w)(struct sdhci_host *host, int reg);
+ u8 (*read_b)(struct sdhci_host *host, int reg);
+ void (*write_l)(struct sdhci_host *host, u32 val, int reg);
+ void (*write_w)(struct sdhci_host *host, u16 val, int reg);
+ void (*write_b)(struct sdhci_host *host, u8 val, int reg);
#endif
void (*set_clock)(struct sdhci_host *host, unsigned int clock);
@@ -314,48 +318,48 @@ struct sdhci_ops {
static inline void sdhci_writel(struct sdhci_host *host, u32 val, int reg)
{
- if (unlikely(host->ops->writel))
- host->ops->writel(host, val, reg);
+ if (unlikely(host->ops->write_l))
+ host->ops->write_l(host, val, reg);
else
writel(val, host->ioaddr + reg);
}
static inline void sdhci_writew(struct sdhci_host *host, u16 val, int reg)
{
- if (unlikely(host->ops->writew))
- host->ops->writew(host, val, reg);
+ if (unlikely(host->ops->write_w))
+ host->ops->write_w(host, val, reg);
else
writew(val, host->ioaddr + reg);
}
static inline void sdhci_writeb(struct sdhci_host *host, u8 val, int reg)
{
- if (unlikely(host->ops->writeb))
- host->ops->writeb(host, val, reg);
+ if (unlikely(host->ops->write_b))
+ host->ops->write_b(host, val, reg);
else
writeb(val, host->ioaddr + reg);
}
static inline u32 sdhci_readl(struct sdhci_host *host, int reg)
{
- if (unlikely(host->ops->readl))
- return host->ops->readl(host, reg);
+ if (unlikely(host->ops->read_l))
+ return host->ops->read_l(host, reg);
else
return readl(host->ioaddr + reg);
}
static inline u16 sdhci_readw(struct sdhci_host *host, int reg)
{
- if (unlikely(host->ops->readw))
- return host->ops->readw(host, reg);
+ if (unlikely(host->ops->read_w))
+ return host->ops->read_w(host, reg);
else
return readw(host->ioaddr + reg);
}
static inline u8 sdhci_readb(struct sdhci_host *host, int reg)
{
- if (unlikely(host->ops->readb))
- return host->ops->readb(host, reg);
+ if (unlikely(host->ops->read_b))
+ return host->ops->read_b(host, reg);
else
return readb(host->ioaddr + reg);
}
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
index cb41e9c3ac0..e7507af3856 100644
--- a/drivers/mmc/host/sdricoh_cs.c
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -519,7 +519,7 @@ static int sdricoh_pcmcia_suspend(struct pcmcia_device *link)
{
struct mmc_host *mmc = link->priv;
dev_dbg(&link->dev, "suspend\n");
- mmc_suspend_host(mmc, PMSG_SUSPEND);
+ mmc_suspend_host(mmc);
return 0;
}
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
new file mode 100644
index 00000000000..eb97830c034
--- /dev/null
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -0,0 +1,965 @@
+/*
+ * MMCIF eMMC driver.
+ *
+ * Copyright (C) 2010 Renesas Solutions Corp.
+ * Yusuke Goda <yusuke.goda.sx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ *
+ * TODO
+ * 1. DMA
+ * 2. Power management
+ * 3. Handle MMC errors better
+ *
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/mmc/sh_mmcif.h>
+
+#define DRIVER_NAME "sh_mmcif"
+#define DRIVER_VERSION "2010-04-28"
+
+#define MMCIF_CE_CMD_SET 0x00000000
+#define MMCIF_CE_ARG 0x00000008
+#define MMCIF_CE_ARG_CMD12 0x0000000C
+#define MMCIF_CE_CMD_CTRL 0x00000010
+#define MMCIF_CE_BLOCK_SET 0x00000014
+#define MMCIF_CE_CLK_CTRL 0x00000018
+#define MMCIF_CE_BUF_ACC 0x0000001C
+#define MMCIF_CE_RESP3 0x00000020
+#define MMCIF_CE_RESP2 0x00000024
+#define MMCIF_CE_RESP1 0x00000028
+#define MMCIF_CE_RESP0 0x0000002C
+#define MMCIF_CE_RESP_CMD12 0x00000030
+#define MMCIF_CE_DATA 0x00000034
+#define MMCIF_CE_INT 0x00000040
+#define MMCIF_CE_INT_MASK 0x00000044
+#define MMCIF_CE_HOST_STS1 0x00000048
+#define MMCIF_CE_HOST_STS2 0x0000004C
+#define MMCIF_CE_VERSION 0x0000007C
+
+/* CE_CMD_SET */
+#define CMD_MASK 0x3f000000
+#define CMD_SET_RTYP_NO ((0 << 23) | (0 << 22))
+#define CMD_SET_RTYP_6B ((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
+#define CMD_SET_RTYP_17B ((1 << 23) | (0 << 22)) /* R2 */
+#define CMD_SET_RBSY (1 << 21) /* R1b */
+#define CMD_SET_CCSEN (1 << 20)
+#define CMD_SET_WDAT (1 << 19) /* 1: on data, 0: no data */
+#define CMD_SET_DWEN (1 << 18) /* 1: write, 0: read */
+#define CMD_SET_CMLTE (1 << 17) /* 1: multi block trans, 0: single */
+#define CMD_SET_CMD12EN (1 << 16) /* 1: CMD12 auto issue */
+#define CMD_SET_RIDXC_INDEX ((0 << 15) | (0 << 14)) /* index check */
+#define CMD_SET_RIDXC_BITS ((0 << 15) | (1 << 14)) /* check bits check */
+#define CMD_SET_RIDXC_NO ((1 << 15) | (0 << 14)) /* no check */
+#define CMD_SET_CRC7C ((0 << 13) | (0 << 12)) /* CRC7 check*/
+#define CMD_SET_CRC7C_BITS ((0 << 13) | (1 << 12)) /* check bits check*/
+#define CMD_SET_CRC7C_INTERNAL ((1 << 13) | (0 << 12)) /* internal CRC7 check*/
+#define CMD_SET_CRC16C (1 << 10) /* 0: CRC16 check*/
+#define CMD_SET_CRCSTE (1 << 8) /* 1: not receive CRC status */
+#define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */
+#define CMD_SET_OPDM (1 << 6) /* 1: open/drain */
+#define CMD_SET_CCSH (1 << 5)
+#define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */
+#define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */
+#define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */
+
+/* CE_CMD_CTRL */
+#define CMD_CTRL_BREAK (1 << 0)
+
+/* CE_BLOCK_SET */
+#define BLOCK_SIZE_MASK 0x0000ffff
+
+/* CE_CLK_CTRL */
+#define CLK_ENABLE (1 << 24) /* 1: output mmc clock */
+#define CLK_CLEAR ((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16))
+#define CLK_SUP_PCLK ((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16))
+#define SRSPTO_256 ((1 << 13) | (0 << 12)) /* resp timeout */
+#define SRBSYTO_29 ((1 << 11) | (1 << 10) | \
+ (1 << 9) | (1 << 8)) /* resp busy timeout */
+#define SRWDTO_29 ((1 << 7) | (1 << 6) | \
+ (1 << 5) | (1 << 4)) /* read/write timeout */
+#define SCCSTO_29 ((1 << 3) | (1 << 2) | \
+ (1 << 1) | (1 << 0)) /* ccs timeout */
+
+/* CE_BUF_ACC */
+#define BUF_ACC_DMAWEN (1 << 25)
+#define BUF_ACC_DMAREN (1 << 24)
+#define BUF_ACC_BUSW_32 (0 << 17)
+#define BUF_ACC_BUSW_16 (1 << 17)
+#define BUF_ACC_ATYP (1 << 16)
+
+/* CE_INT */
+#define INT_CCSDE (1 << 29)
+#define INT_CMD12DRE (1 << 26)
+#define INT_CMD12RBE (1 << 25)
+#define INT_CMD12CRE (1 << 24)
+#define INT_DTRANE (1 << 23)
+#define INT_BUFRE (1 << 22)
+#define INT_BUFWEN (1 << 21)
+#define INT_BUFREN (1 << 20)
+#define INT_CCSRCV (1 << 19)
+#define INT_RBSYE (1 << 17)
+#define INT_CRSPE (1 << 16)
+#define INT_CMDVIO (1 << 15)
+#define INT_BUFVIO (1 << 14)
+#define INT_WDATERR (1 << 11)
+#define INT_RDATERR (1 << 10)
+#define INT_RIDXERR (1 << 9)
+#define INT_RSPERR (1 << 8)
+#define INT_CCSTO (1 << 5)
+#define INT_CRCSTO (1 << 4)
+#define INT_WDATTO (1 << 3)
+#define INT_RDATTO (1 << 2)
+#define INT_RBSYTO (1 << 1)
+#define INT_RSPTO (1 << 0)
+#define INT_ERR_STS (INT_CMDVIO | INT_BUFVIO | INT_WDATERR | \
+ INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
+ INT_CCSTO | INT_CRCSTO | INT_WDATTO | \
+ INT_RDATTO | INT_RBSYTO | INT_RSPTO)
+
+/* CE_INT_MASK */
+#define MASK_ALL 0x00000000
+#define MASK_MCCSDE (1 << 29)
+#define MASK_MCMD12DRE (1 << 26)
+#define MASK_MCMD12RBE (1 << 25)
+#define MASK_MCMD12CRE (1 << 24)
+#define MASK_MDTRANE (1 << 23)
+#define MASK_MBUFRE (1 << 22)
+#define MASK_MBUFWEN (1 << 21)
+#define MASK_MBUFREN (1 << 20)
+#define MASK_MCCSRCV (1 << 19)
+#define MASK_MRBSYE (1 << 17)
+#define MASK_MCRSPE (1 << 16)
+#define MASK_MCMDVIO (1 << 15)
+#define MASK_MBUFVIO (1 << 14)
+#define MASK_MWDATERR (1 << 11)
+#define MASK_MRDATERR (1 << 10)
+#define MASK_MRIDXERR (1 << 9)
+#define MASK_MRSPERR (1 << 8)
+#define MASK_MCCSTO (1 << 5)
+#define MASK_MCRCSTO (1 << 4)
+#define MASK_MWDATTO (1 << 3)
+#define MASK_MRDATTO (1 << 2)
+#define MASK_MRBSYTO (1 << 1)
+#define MASK_MRSPTO (1 << 0)
+
+/* CE_HOST_STS1 */
+#define STS1_CMDSEQ (1 << 31)
+
+/* CE_HOST_STS2 */
+#define STS2_CRCSTE (1 << 31)
+#define STS2_CRC16E (1 << 30)
+#define STS2_AC12CRCE (1 << 29)
+#define STS2_RSPCRC7E (1 << 28)
+#define STS2_CRCSTEBE (1 << 27)
+#define STS2_RDATEBE (1 << 26)
+#define STS2_AC12REBE (1 << 25)
+#define STS2_RSPEBE (1 << 24)
+#define STS2_AC12IDXE (1 << 23)
+#define STS2_RSPIDXE (1 << 22)
+#define STS2_CCSTO (1 << 15)
+#define STS2_RDATTO (1 << 14)
+#define STS2_DATBSYTO (1 << 13)
+#define STS2_CRCSTTO (1 << 12)
+#define STS2_AC12BSYTO (1 << 11)
+#define STS2_RSPBSYTO (1 << 10)
+#define STS2_AC12RSPTO (1 << 9)
+#define STS2_RSPTO (1 << 8)
+#define STS2_CRC_ERR (STS2_CRCSTE | STS2_CRC16E | \
+ STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
+#define STS2_TIMEOUT_ERR (STS2_CCSTO | STS2_RDATTO | \
+ STS2_DATBSYTO | STS2_CRCSTTO | \
+ STS2_AC12BSYTO | STS2_RSPBSYTO | \
+ STS2_AC12RSPTO | STS2_RSPTO)
+
+/* CE_VERSION */
+#define SOFT_RST_ON (1 << 31)
+#define SOFT_RST_OFF (0 << 31)
+
+#define CLKDEV_EMMC_DATA 52000000 /* 52MHz */
+#define CLKDEV_MMC_DATA 20000000 /* 20MHz */
+#define CLKDEV_INIT 400000 /* 400 KHz */
+
+struct sh_mmcif_host {
+ struct mmc_host *mmc;
+ struct mmc_data *data;
+ struct mmc_command *cmd;
+ struct platform_device *pd;
+ struct clk *hclk;
+ unsigned int clk;
+ int bus_width;
+ u16 wait_int;
+ u16 sd_error;
+ long timeout;
+ void __iomem *addr;
+ wait_queue_head_t intr_wait;
+};
+
+static inline u32 sh_mmcif_readl(struct sh_mmcif_host *host, unsigned int reg)
+{
+ return readl(host->addr + reg);
+}
+
+static inline void sh_mmcif_writel(struct sh_mmcif_host *host,
+ unsigned int reg, u32 val)
+{
+ writel(val, host->addr + reg);
+}
+
+static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
+ unsigned int reg, u32 val)
+{
+ writel(val | sh_mmcif_readl(host, reg), host->addr + reg);
+}
+
+static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
+ unsigned int reg, u32 val)
+{
+ writel(~val & sh_mmcif_readl(host, reg), host->addr + reg);
+}
+
+
+static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
+{
+ struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
+
+ sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
+ sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
+
+ if (!clk)
+ return;
+ if (p->sup_pclk && clk == host->clk)
+ sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
+ else
+ sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
+ (ilog2(__rounddown_pow_of_two(host->clk / clk)) << 16));
+
+ sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
+}
+
+static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
+{
+ u32 tmp;
+
+ tmp = 0x010f0000 & sh_mmcif_readl(host, MMCIF_CE_CLK_CTRL);
+
+ sh_mmcif_writel(host, MMCIF_CE_VERSION, SOFT_RST_ON);
+ sh_mmcif_writel(host, MMCIF_CE_VERSION, SOFT_RST_OFF);
+ sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
+ SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29);
+ /* byte swap on */
+ sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
+}
+
+static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
+{
+ u32 state1, state2;
+ int ret, timeout = 10000000;
+
+ host->sd_error = 0;
+ host->wait_int = 0;
+
+ state1 = sh_mmcif_readl(host, MMCIF_CE_HOST_STS1);
+ state2 = sh_mmcif_readl(host, MMCIF_CE_HOST_STS2);
+ pr_debug("%s: ERR HOST_STS1 = %08x\n", \
+ DRIVER_NAME, sh_mmcif_readl(host, MMCIF_CE_HOST_STS1));
+ pr_debug("%s: ERR HOST_STS2 = %08x\n", \
+ DRIVER_NAME, sh_mmcif_readl(host, MMCIF_CE_HOST_STS2));
+
+ if (state1 & STS1_CMDSEQ) {
+ sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
+ sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
+ while (1) {
+ timeout--;
+ if (timeout < 0) {
+ pr_err(DRIVER_NAME": Forceed end of " \
+ "command sequence timeout err\n");
+ return -EIO;
+ }
+ if (!(sh_mmcif_readl(host, MMCIF_CE_HOST_STS1)
+ & STS1_CMDSEQ))
+ break;
+ mdelay(1);
+ }
+ sh_mmcif_sync_reset(host);
+ pr_debug(DRIVER_NAME": Forced end of command sequence\n");
+ return -EIO;
+ }
+
+ if (state2 & STS2_CRC_ERR) {
+ pr_debug(DRIVER_NAME": Happened CRC error\n");
+ ret = -EIO;
+ } else if (state2 & STS2_TIMEOUT_ERR) {
+ pr_debug(DRIVER_NAME": Happened Timeout error\n");
+ ret = -ETIMEDOUT;
+ } else {
+ pr_debug(DRIVER_NAME": Happened End/Index error\n");
+ ret = -EIO;
+ }
+ return ret;
+}
+
+static int sh_mmcif_single_read(struct sh_mmcif_host *host,
+ struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+ long time;
+ u32 blocksize, i, *p = sg_virt(data->sg);
+
+ host->wait_int = 0;
+
+ /* buf read enable */
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+ time = wait_event_interruptible_timeout(host->intr_wait,
+ host->wait_int == 1 ||
+ host->sd_error == 1, host->timeout);
+ if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
+ return sh_mmcif_error_manage(host);
+
+ host->wait_int = 0;
+ blocksize = (BLOCK_SIZE_MASK &
+ sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET)) + 3;
+ for (i = 0; i < blocksize / 4; i++)
+ *p++ = sh_mmcif_readl(host, MMCIF_CE_DATA);
+
+ /* buffer read end */
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
+ time = wait_event_interruptible_timeout(host->intr_wait,
+ host->wait_int == 1 ||
+ host->sd_error == 1, host->timeout);
+ if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
+ return sh_mmcif_error_manage(host);
+
+ host->wait_int = 0;
+ return 0;
+}
+
+static int sh_mmcif_multi_read(struct sh_mmcif_host *host,
+ struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+ long time;
+ u32 blocksize, i, j, sec, *p;
+
+ blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET);
+ for (j = 0; j < data->sg_len; j++) {
+ p = sg_virt(data->sg);
+ host->wait_int = 0;
+ for (sec = 0; sec < data->sg->length / blocksize; sec++) {
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+ /* buf read enable */
+ time = wait_event_interruptible_timeout(host->intr_wait,
+ host->wait_int == 1 ||
+ host->sd_error == 1, host->timeout);
+
+ if (host->wait_int != 1 &&
+ (time == 0 || host->sd_error != 0))
+ return sh_mmcif_error_manage(host);
+
+ host->wait_int = 0;
+ for (i = 0; i < blocksize / 4; i++)
+ *p++ = sh_mmcif_readl(host, MMCIF_CE_DATA);
+ }
+ if (j < data->sg_len - 1)
+ data->sg++;
+ }
+ return 0;
+}
+
+static int sh_mmcif_single_write(struct sh_mmcif_host *host,
+ struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+ long time;
+ u32 blocksize, i, *p = sg_virt(data->sg);
+
+ host->wait_int = 0;
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+
+ /* buf write enable */
+ time = wait_event_interruptible_timeout(host->intr_wait,
+ host->wait_int == 1 ||
+ host->sd_error == 1, host->timeout);
+ if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
+ return sh_mmcif_error_manage(host);
+
+ host->wait_int = 0;
+ blocksize = (BLOCK_SIZE_MASK &
+ sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET)) + 3;
+ for (i = 0; i < blocksize / 4; i++)
+ sh_mmcif_writel(host, MMCIF_CE_DATA, *p++);
+
+ /* buffer write end */
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
+
+ time = wait_event_interruptible_timeout(host->intr_wait,
+ host->wait_int == 1 ||
+ host->sd_error == 1, host->timeout);
+ if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
+ return sh_mmcif_error_manage(host);
+
+ host->wait_int = 0;
+ return 0;
+}
+
+static int sh_mmcif_multi_write(struct sh_mmcif_host *host,
+ struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+ long time;
+ u32 i, sec, j, blocksize, *p;
+
+ blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET);
+
+ for (j = 0; j < data->sg_len; j++) {
+ p = sg_virt(data->sg);
+ host->wait_int = 0;
+ for (sec = 0; sec < data->sg->length / blocksize; sec++) {
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+ /* buf write enable*/
+ time = wait_event_interruptible_timeout(host->intr_wait,
+ host->wait_int == 1 ||
+ host->sd_error == 1, host->timeout);
+
+ if (host->wait_int != 1 &&
+ (time == 0 || host->sd_error != 0))
+ return sh_mmcif_error_manage(host);
+
+ host->wait_int = 0;
+ for (i = 0; i < blocksize / 4; i++)
+ sh_mmcif_writel(host, MMCIF_CE_DATA, *p++);
+ }
+ if (j < data->sg_len - 1)
+ data->sg++;
+ }
+ return 0;
+}
+
+static void sh_mmcif_get_response(struct sh_mmcif_host *host,
+ struct mmc_command *cmd)
+{
+ if (cmd->flags & MMC_RSP_136) {
+ cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP3);
+ cmd->resp[1] = sh_mmcif_readl(host, MMCIF_CE_RESP2);
+ cmd->resp[2] = sh_mmcif_readl(host, MMCIF_CE_RESP1);
+ cmd->resp[3] = sh_mmcif_readl(host, MMCIF_CE_RESP0);
+ } else
+ cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP0);
+}
+
+static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
+ struct mmc_command *cmd)
+{
+ cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP_CMD12);
+}
+
+static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
+ struct mmc_request *mrq, struct mmc_command *cmd, u32 opc)
+{
+ u32 tmp = 0;
+
+ /* Response Type check */
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE:
+ tmp |= CMD_SET_RTYP_NO;
+ break;
+ case MMC_RSP_R1:
+ case MMC_RSP_R1B:
+ case MMC_RSP_R3:
+ tmp |= CMD_SET_RTYP_6B;
+ break;
+ case MMC_RSP_R2:
+ tmp |= CMD_SET_RTYP_17B;
+ break;
+ default:
+ pr_err(DRIVER_NAME": Not support type response.\n");
+ break;
+ }
+ switch (opc) {
+ /* RBSY */
+ case MMC_SWITCH:
+ case MMC_STOP_TRANSMISSION:
+ case MMC_SET_WRITE_PROT:
+ case MMC_CLR_WRITE_PROT:
+ case MMC_ERASE:
+ case MMC_GEN_CMD:
+ tmp |= CMD_SET_RBSY;
+ break;
+ }
+ /* WDAT / DATW */
+ if (host->data) {
+ tmp |= CMD_SET_WDAT;
+ switch (host->bus_width) {
+ case MMC_BUS_WIDTH_1:
+ tmp |= CMD_SET_DATW_1;
+ break;
+ case MMC_BUS_WIDTH_4:
+ tmp |= CMD_SET_DATW_4;
+ break;
+ case MMC_BUS_WIDTH_8:
+ tmp |= CMD_SET_DATW_8;
+ break;
+ default:
+ pr_err(DRIVER_NAME": Not support bus width.\n");
+ break;
+ }
+ }
+ /* DWEN */
+ if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
+ tmp |= CMD_SET_DWEN;
+ /* CMLTE/CMD12EN */
+ if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
+ tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
+ sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
+ mrq->data->blocks << 16);
+ }
+ /* RIDXC[1:0] check bits */
+ if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
+ opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
+ tmp |= CMD_SET_RIDXC_BITS;
+ /* RCRC7C[1:0] check bits */
+ if (opc == MMC_SEND_OP_COND)
+ tmp |= CMD_SET_CRC7C_BITS;
+ /* RCRC7C[1:0] internal CRC7 */
+ if (opc == MMC_ALL_SEND_CID ||
+ opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
+ tmp |= CMD_SET_CRC7C_INTERNAL;
+
+ return opc = ((opc << 24) | tmp);
+}
+
+static u32 sh_mmcif_data_trans(struct sh_mmcif_host *host,
+ struct mmc_request *mrq, u32 opc)
+{
+ u32 ret;
+
+ switch (opc) {
+ case MMC_READ_MULTIPLE_BLOCK:
+ ret = sh_mmcif_multi_read(host, mrq);
+ break;
+ case MMC_WRITE_MULTIPLE_BLOCK:
+ ret = sh_mmcif_multi_write(host, mrq);
+ break;
+ case MMC_WRITE_BLOCK:
+ ret = sh_mmcif_single_write(host, mrq);
+ break;
+ case MMC_READ_SINGLE_BLOCK:
+ case MMC_SEND_EXT_CSD:
+ ret = sh_mmcif_single_read(host, mrq);
+ break;
+ default:
+ pr_err(DRIVER_NAME": NOT SUPPORT CMD = d'%08d\n", opc);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
+ struct mmc_request *mrq, struct mmc_command *cmd)
+{
+ long time;
+ int ret = 0, mask = 0;
+ u32 opc = cmd->opcode;
+
+ host->cmd = cmd;
+
+ switch (opc) {
+ /* respons busy check */
+ case MMC_SWITCH:
+ case MMC_STOP_TRANSMISSION:
+ case MMC_SET_WRITE_PROT:
+ case MMC_CLR_WRITE_PROT:
+ case MMC_ERASE:
+ case MMC_GEN_CMD:
+ mask = MASK_MRBSYE;
+ break;
+ default:
+ mask = MASK_MCRSPE;
+ break;
+ }
+ mask |= MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR |
+ MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR |
+ MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO |
+ MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO;
+
+ if (host->data) {
+ sh_mmcif_writel(host, MMCIF_CE_BLOCK_SET, 0);
+ sh_mmcif_writel(host, MMCIF_CE_BLOCK_SET, mrq->data->blksz);
+ }
+ opc = sh_mmcif_set_cmd(host, mrq, cmd, opc);
+
+ sh_mmcif_writel(host, MMCIF_CE_INT, 0xD80430C0);
+ sh_mmcif_writel(host, MMCIF_CE_INT_MASK, mask);
+ /* set arg */
+ sh_mmcif_writel(host, MMCIF_CE_ARG, cmd->arg);
+ host->wait_int = 0;
+ /* set cmd */
+ sh_mmcif_writel(host, MMCIF_CE_CMD_SET, opc);
+
+ time = wait_event_interruptible_timeout(host->intr_wait,
+ host->wait_int == 1 || host->sd_error == 1, host->timeout);
+ if (host->wait_int != 1 && time == 0) {
+ cmd->error = sh_mmcif_error_manage(host);
+ return;
+ }
+ if (host->sd_error) {
+ switch (cmd->opcode) {
+ case MMC_ALL_SEND_CID:
+ case MMC_SELECT_CARD:
+ case MMC_APP_CMD:
+ cmd->error = -ETIMEDOUT;
+ break;
+ default:
+ pr_debug("%s: Cmd(d'%d) err\n",
+ DRIVER_NAME, cmd->opcode);
+ cmd->error = sh_mmcif_error_manage(host);
+ break;
+ }
+ host->sd_error = 0;
+ host->wait_int = 0;
+ return;
+ }
+ if (!(cmd->flags & MMC_RSP_PRESENT)) {
+ cmd->error = ret;
+ host->wait_int = 0;
+ return;
+ }
+ if (host->wait_int == 1) {
+ sh_mmcif_get_response(host, cmd);
+ host->wait_int = 0;
+ }
+ if (host->data) {
+ ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
+ if (ret < 0)
+ mrq->data->bytes_xfered = 0;
+ else
+ mrq->data->bytes_xfered =
+ mrq->data->blocks * mrq->data->blksz;
+ }
+ cmd->error = ret;
+}
+
+static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
+ struct mmc_request *mrq, struct mmc_command *cmd)
+{
+ long time;
+
+ if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK)
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
+ else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
+ else {
+ pr_err(DRIVER_NAME": not support stop cmd\n");
+ cmd->error = sh_mmcif_error_manage(host);
+ return;
+ }
+
+ time = wait_event_interruptible_timeout(host->intr_wait,
+ host->wait_int == 1 ||
+ host->sd_error == 1, host->timeout);
+ if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) {
+ cmd->error = sh_mmcif_error_manage(host);
+ return;
+ }
+ sh_mmcif_get_cmd12response(host, cmd);
+ host->wait_int = 0;
+ cmd->error = 0;
+}
+
+static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct sh_mmcif_host *host = mmc_priv(mmc);
+
+ switch (mrq->cmd->opcode) {
+ /* MMCIF does not support SD/SDIO command */
+ case SD_IO_SEND_OP_COND:
+ case MMC_APP_CMD:
+ mrq->cmd->error = -ETIMEDOUT;
+ mmc_request_done(mmc, mrq);
+ return;
+ case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
+ if (!mrq->data) {
+ /* send_if_cond cmd (not support) */
+ mrq->cmd->error = -ETIMEDOUT;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+ host->data = mrq->data;
+ sh_mmcif_start_cmd(host, mrq, mrq->cmd);
+ host->data = NULL;
+
+ if (mrq->cmd->error != 0) {
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+ if (mrq->stop)
+ sh_mmcif_stop_cmd(host, mrq, mrq->stop);
+ mmc_request_done(mmc, mrq);
+}
+
+static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sh_mmcif_host *host = mmc_priv(mmc);
+ struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
+
+ if (ios->power_mode == MMC_POWER_OFF) {
+ /* clock stop */
+ sh_mmcif_clock_control(host, 0);
+ if (p->down_pwr)
+ p->down_pwr(host->pd);
+ return;
+ } else if (ios->power_mode == MMC_POWER_UP) {
+ if (p->set_pwr)
+ p->set_pwr(host->pd, ios->power_mode);
+ }
+
+ if (ios->clock)
+ sh_mmcif_clock_control(host, ios->clock);
+
+ host->bus_width = ios->bus_width;
+}
+
+static struct mmc_host_ops sh_mmcif_ops = {
+ .request = sh_mmcif_request,
+ .set_ios = sh_mmcif_set_ios,
+};
+
+static void sh_mmcif_detect(struct mmc_host *mmc)
+{
+ mmc_detect_change(mmc, 0);
+}
+
+static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
+{
+ struct sh_mmcif_host *host = dev_id;
+ u32 state = 0;
+ int err = 0;
+
+ state = sh_mmcif_readl(host, MMCIF_CE_INT);
+
+ if (state & INT_RBSYE) {
+ sh_mmcif_writel(host, MMCIF_CE_INT, ~(INT_RBSYE | INT_CRSPE));
+ sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE);
+ } else if (state & INT_CRSPE) {
+ sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_CRSPE);
+ sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE);
+ } else if (state & INT_BUFREN) {
+ sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFREN);
+ sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+ } else if (state & INT_BUFWEN) {
+ sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFWEN);
+ sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+ } else if (state & INT_CMD12DRE) {
+ sh_mmcif_writel(host, MMCIF_CE_INT,
+ ~(INT_CMD12DRE | INT_CMD12RBE |
+ INT_CMD12CRE | INT_BUFRE));
+ sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
+ } else if (state & INT_BUFRE) {
+ sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFRE);
+ sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
+ } else if (state & INT_DTRANE) {
+ sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_DTRANE);
+ sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
+ } else if (state & INT_CMD12RBE) {
+ sh_mmcif_writel(host, MMCIF_CE_INT,
+ ~(INT_CMD12RBE | INT_CMD12CRE));
+ sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
+ } else if (state & INT_ERR_STS) {
+ /* err interrupts */
+ sh_mmcif_writel(host, MMCIF_CE_INT, ~state);
+ sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
+ err = 1;
+ } else {
+ pr_debug("%s: Not support int\n", DRIVER_NAME);
+ sh_mmcif_writel(host, MMCIF_CE_INT, ~state);
+ sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
+ err = 1;
+ }
+ if (err) {
+ host->sd_error = 1;
+ pr_debug("%s: int err state = %08x\n", DRIVER_NAME, state);
+ }
+ host->wait_int = 1;
+ wake_up(&host->intr_wait);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit sh_mmcif_probe(struct platform_device *pdev)
+{
+ int ret = 0, irq[2];
+ struct mmc_host *mmc;
+ struct sh_mmcif_host *host = NULL;
+ struct sh_mmcif_plat_data *pd = NULL;
+ struct resource *res;
+ void __iomem *reg;
+ char clk_name[8];
+
+ irq[0] = platform_get_irq(pdev, 0);
+ irq[1] = platform_get_irq(pdev, 1);
+ if (irq[0] < 0 || irq[1] < 0) {
+ pr_err(DRIVER_NAME": Get irq error\n");
+ return -ENXIO;
+ }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "platform_get_resource error.\n");
+ return -ENXIO;
+ }
+ reg = ioremap(res->start, resource_size(res));
+ if (!reg) {
+ dev_err(&pdev->dev, "ioremap error.\n");
+ return -ENOMEM;
+ }
+ pd = (struct sh_mmcif_plat_data *)(pdev->dev.platform_data);
+ if (!pd) {
+ dev_err(&pdev->dev, "sh_mmcif plat data error.\n");
+ ret = -ENXIO;
+ goto clean_up;
+ }
+ mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev);
+ if (!mmc) {
+ ret = -ENOMEM;
+ goto clean_up;
+ }
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->addr = reg;
+ host->timeout = 1000;
+
+ snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id);
+ host->hclk = clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(host->hclk)) {
+ dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
+ ret = PTR_ERR(host->hclk);
+ goto clean_up1;
+ }
+ clk_enable(host->hclk);
+ host->clk = clk_get_rate(host->hclk);
+ host->pd = pdev;
+
+ init_waitqueue_head(&host->intr_wait);
+
+ mmc->ops = &sh_mmcif_ops;
+ mmc->f_max = host->clk;
+ /* close to 400KHz */
+ if (mmc->f_max < 51200000)
+ mmc->f_min = mmc->f_max / 128;
+ else if (mmc->f_max < 102400000)
+ mmc->f_min = mmc->f_max / 256;
+ else
+ mmc->f_min = mmc->f_max / 512;
+ if (pd->ocr)
+ mmc->ocr_avail = pd->ocr;
+ mmc->caps = MMC_CAP_MMC_HIGHSPEED;
+ if (pd->caps)
+ mmc->caps |= pd->caps;
+ mmc->max_phys_segs = 128;
+ mmc->max_hw_segs = 128;
+ mmc->max_blk_size = 512;
+ mmc->max_blk_count = 65535;
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_seg_size = mmc->max_req_size;
+
+ sh_mmcif_sync_reset(host);
+ platform_set_drvdata(pdev, host);
+ mmc_add_host(mmc);
+
+ ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
+ if (ret) {
+ pr_err(DRIVER_NAME": request_irq error (sh_mmc:error)\n");
+ goto clean_up2;
+ }
+ ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
+ if (ret) {
+ free_irq(irq[0], host);
+ pr_err(DRIVER_NAME": request_irq error (sh_mmc:int)\n");
+ goto clean_up2;
+ }
+
+ sh_mmcif_writel(host, MMCIF_CE_INT_MASK, MASK_ALL);
+ sh_mmcif_detect(host->mmc);
+
+ pr_info("%s: driver version %s\n", DRIVER_NAME, DRIVER_VERSION);
+ pr_debug("%s: chip ver H'%04x\n", DRIVER_NAME,
+ sh_mmcif_readl(host, MMCIF_CE_VERSION) & 0x0000ffff);
+ return ret;
+
+clean_up2:
+ clk_disable(host->hclk);
+clean_up1:
+ mmc_free_host(mmc);
+clean_up:
+ if (reg)
+ iounmap(reg);
+ return ret;
+}
+
+static int __devexit sh_mmcif_remove(struct platform_device *pdev)
+{
+ struct sh_mmcif_host *host = platform_get_drvdata(pdev);
+ int irq[2];
+
+ sh_mmcif_writel(host, MMCIF_CE_INT_MASK, MASK_ALL);
+
+ irq[0] = platform_get_irq(pdev, 0);
+ irq[1] = platform_get_irq(pdev, 1);
+
+ if (host->addr)
+ iounmap(host->addr);
+
+ platform_set_drvdata(pdev, NULL);
+ mmc_remove_host(host->mmc);
+
+ free_irq(irq[0], host);
+ free_irq(irq[1], host);
+
+ clk_disable(host->hclk);
+ mmc_free_host(host->mmc);
+
+ return 0;
+}
+
+static struct platform_driver sh_mmcif_driver = {
+ .probe = sh_mmcif_probe,
+ .remove = sh_mmcif_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init sh_mmcif_init(void)
+{
+ return platform_driver_register(&sh_mmcif_driver);
+}
+
+static void __exit sh_mmcif_exit(void)
+{
+ platform_driver_unregister(&sh_mmcif_driver);
+}
+
+module_init(sh_mmcif_init);
+module_exit(sh_mmcif_exit);
+
+
+MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS(DRIVER_NAME);
+MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index 82554ddec6b..cec99958b65 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -1032,7 +1032,7 @@ static void tifm_sd_remove(struct tifm_dev *sock)
static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state)
{
- return mmc_suspend_host(tifm_get_drvdata(sock), state);
+ return mmc_suspend_host(tifm_get_drvdata(sock));
}
static int tifm_sd_resume(struct tifm_dev *sock)
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index b2b577f6afd..ee7d0a5a51c 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -29,6 +29,7 @@
#include <linux/irq.h>
#include <linux/device.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
#include <linux/mmc/host.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tmio.h>
@@ -131,8 +132,8 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
host->cmd = cmd;
-/* FIXME - this seems to be ok comented out but the spec suggest this bit should
- * be set when issuing app commands.
+/* FIXME - this seems to be ok commented out but the spec suggest this bit
+ * should be set when issuing app commands.
* if(cmd->flags & MMC_FLAG_ACMD)
* c |= APP_CMD;
*/
@@ -155,12 +156,12 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
return 0;
}
-/* This chip always returns (at least?) as much data as you ask for.
+/*
+ * This chip always returns (at least?) as much data as you ask for.
* I'm unsure what happens if you ask for less than a block. This should be
* looked into to ensure that a funny length read doesnt hose the controller.
- *
*/
-static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
+static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
{
struct mmc_data *data = host->data;
unsigned short *buf;
@@ -180,7 +181,7 @@ static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
count = data->blksz;
pr_debug("count: %08x offset: %08x flags %08x\n",
- count, host->sg_off, data->flags);
+ count, host->sg_off, data->flags);
/* Transfer the data */
if (data->flags & MMC_DATA_READ)
@@ -198,7 +199,7 @@ static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
return;
}
-static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
+static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
{
struct mmc_data *data = host->data;
struct mmc_command *stop;
@@ -206,7 +207,7 @@ static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
host->data = NULL;
if (!data) {
- pr_debug("Spurious data end IRQ\n");
+ dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
return;
}
stop = data->stop;
@@ -219,7 +220,8 @@ static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
pr_debug("Completed data request\n");
- /*FIXME - other drivers allow an optional stop command of any given type
+ /*
+ * FIXME: other drivers allow an optional stop command of any given type
* which we dont do, as the chip can auto generate them.
* Perhaps we can be smarter about when to use auto CMD12 and
* only issue the auto request when we know this is the desired
@@ -227,10 +229,17 @@ static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
* upper layers expect. For now, we do what works.
*/
- if (data->flags & MMC_DATA_READ)
- disable_mmc_irqs(host, TMIO_MASK_READOP);
- else
- disable_mmc_irqs(host, TMIO_MASK_WRITEOP);
+ if (data->flags & MMC_DATA_READ) {
+ if (!host->chan_rx)
+ disable_mmc_irqs(host, TMIO_MASK_READOP);
+ dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
+ host->mrq);
+ } else {
+ if (!host->chan_tx)
+ disable_mmc_irqs(host, TMIO_MASK_WRITEOP);
+ dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
+ host->mrq);
+ }
if (stop) {
if (stop->opcode == 12 && !stop->arg)
@@ -242,7 +251,35 @@ static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
tmio_mmc_finish_request(host);
}
-static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
+static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
+{
+ struct mmc_data *data = host->data;
+
+ if (!data)
+ return;
+
+ if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) {
+ /*
+ * Has all data been written out yet? Testing on SuperH showed,
+ * that in most cases the first interrupt comes already with the
+ * BUSY status bit clear, but on some operations, like mount or
+ * in the beginning of a write / sync / umount, there is one
+ * DATAEND interrupt with the BUSY bit set, in this cases
+ * waiting for one more interrupt fixes the problem.
+ */
+ if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) {
+ disable_mmc_irqs(host, TMIO_STAT_DATAEND);
+ tasklet_schedule(&host->dma_complete);
+ }
+ } else if (host->chan_rx && (data->flags & MMC_DATA_READ)) {
+ disable_mmc_irqs(host, TMIO_STAT_DATAEND);
+ tasklet_schedule(&host->dma_complete);
+ } else {
+ tmio_mmc_do_data_irq(host);
+ }
+}
+
+static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
unsigned int stat)
{
struct mmc_command *cmd = host->cmd;
@@ -282,10 +319,16 @@ static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
* If theres no data or we encountered an error, finish now.
*/
if (host->data && !cmd->error) {
- if (host->data->flags & MMC_DATA_READ)
- enable_mmc_irqs(host, TMIO_MASK_READOP);
- else
- enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
+ if (host->data->flags & MMC_DATA_READ) {
+ if (!host->chan_rx)
+ enable_mmc_irqs(host, TMIO_MASK_READOP);
+ } else {
+ struct dma_chan *chan = host->chan_tx;
+ if (!chan)
+ enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
+ else
+ tasklet_schedule(&host->dma_issue);
+ }
} else {
tmio_mmc_finish_request(host);
}
@@ -293,7 +336,6 @@ static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
return;
}
-
static irqreturn_t tmio_mmc_irq(int irq, void *devid)
{
struct tmio_mmc_host *host = devid;
@@ -311,7 +353,7 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid)
if (!ireg) {
disable_mmc_irqs(host, status & ~irq_mask);
- pr_debug("tmio_mmc: Spurious irq, disabling! "
+ pr_warning("tmio_mmc: Spurious irq, disabling! "
"0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg);
pr_debug_status(status);
@@ -363,16 +405,265 @@ out:
return IRQ_HANDLED;
}
+#ifdef CONFIG_TMIO_MMC_DMA
+static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
+{
+#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
+ /* Switch DMA mode on or off - SuperH specific? */
+ sd_ctrl_write16(host, 0xd8, enable ? 2 : 0);
+#endif
+}
+
+static void tmio_dma_complete(void *arg)
+{
+ struct tmio_mmc_host *host = arg;
+
+ dev_dbg(&host->pdev->dev, "Command completed\n");
+
+ if (!host->data)
+ dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n");
+ else
+ enable_mmc_irqs(host, TMIO_STAT_DATAEND);
+}
+
+static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
+{
+ struct scatterlist *sg = host->sg_ptr;
+ struct dma_async_tx_descriptor *desc = NULL;
+ struct dma_chan *chan = host->chan_rx;
+ int ret;
+
+ ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE);
+ if (ret > 0) {
+ host->dma_sglen = ret;
+ desc = chan->device->device_prep_slave_sg(chan, sg, ret,
+ DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ }
+
+ if (desc) {
+ host->desc = desc;
+ desc->callback = tmio_dma_complete;
+ desc->callback_param = host;
+ host->cookie = desc->tx_submit(desc);
+ if (host->cookie < 0) {
+ host->desc = NULL;
+ ret = host->cookie;
+ } else {
+ chan->device->device_issue_pending(chan);
+ }
+ }
+ dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
+ __func__, host->sg_len, ret, host->cookie, host->mrq);
+
+ if (!host->desc) {
+ /* DMA failed, fall back to PIO */
+ if (ret >= 0)
+ ret = -EIO;
+ host->chan_rx = NULL;
+ dma_release_channel(chan);
+ /* Free the Tx channel too */
+ chan = host->chan_tx;
+ if (chan) {
+ host->chan_tx = NULL;
+ dma_release_channel(chan);
+ }
+ dev_warn(&host->pdev->dev,
+ "DMA failed: %d, falling back to PIO\n", ret);
+ tmio_mmc_enable_dma(host, false);
+ reset(host);
+ /* Fail this request, let above layers recover */
+ host->mrq->cmd->error = ret;
+ tmio_mmc_finish_request(host);
+ }
+
+ dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
+ desc, host->cookie, host->sg_len);
+
+ return ret > 0 ? 0 : ret;
+}
+
+static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
+{
+ struct scatterlist *sg = host->sg_ptr;
+ struct dma_async_tx_descriptor *desc = NULL;
+ struct dma_chan *chan = host->chan_tx;
+ int ret;
+
+ ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE);
+ if (ret > 0) {
+ host->dma_sglen = ret;
+ desc = chan->device->device_prep_slave_sg(chan, sg, ret,
+ DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ }
+
+ if (desc) {
+ host->desc = desc;
+ desc->callback = tmio_dma_complete;
+ desc->callback_param = host;
+ host->cookie = desc->tx_submit(desc);
+ if (host->cookie < 0) {
+ host->desc = NULL;
+ ret = host->cookie;
+ }
+ }
+ dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
+ __func__, host->sg_len, ret, host->cookie, host->mrq);
+
+ if (!host->desc) {
+ /* DMA failed, fall back to PIO */
+ if (ret >= 0)
+ ret = -EIO;
+ host->chan_tx = NULL;
+ dma_release_channel(chan);
+ /* Free the Rx channel too */
+ chan = host->chan_rx;
+ if (chan) {
+ host->chan_rx = NULL;
+ dma_release_channel(chan);
+ }
+ dev_warn(&host->pdev->dev,
+ "DMA failed: %d, falling back to PIO\n", ret);
+ tmio_mmc_enable_dma(host, false);
+ reset(host);
+ /* Fail this request, let above layers recover */
+ host->mrq->cmd->error = ret;
+ tmio_mmc_finish_request(host);
+ }
+
+ dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
+ desc, host->cookie);
+
+ return ret > 0 ? 0 : ret;
+}
+
+static int tmio_mmc_start_dma(struct tmio_mmc_host *host,
+ struct mmc_data *data)
+{
+ if (data->flags & MMC_DATA_READ) {
+ if (host->chan_rx)
+ return tmio_mmc_start_dma_rx(host);
+ } else {
+ if (host->chan_tx)
+ return tmio_mmc_start_dma_tx(host);
+ }
+
+ return 0;
+}
+
+static void tmio_issue_tasklet_fn(unsigned long priv)
+{
+ struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
+ struct dma_chan *chan = host->chan_tx;
+
+ chan->device->device_issue_pending(chan);
+}
+
+static void tmio_tasklet_fn(unsigned long arg)
+{
+ struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
+
+ if (host->data->flags & MMC_DATA_READ)
+ dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
+ DMA_FROM_DEVICE);
+ else
+ dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
+ DMA_TO_DEVICE);
+
+ tmio_mmc_do_data_irq(host);
+}
+
+/* It might be necessary to make filter MFD specific */
+static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
+{
+ dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
+ chan->private = arg;
+ return true;
+}
+
+static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
+ struct tmio_mmc_data *pdata)
+{
+ host->cookie = -EINVAL;
+ host->desc = NULL;
+
+ /* We can only either use DMA for both Tx and Rx or not use it at all */
+ if (pdata->dma) {
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ host->chan_tx = dma_request_channel(mask, tmio_mmc_filter,
+ pdata->dma->chan_priv_tx);
+ dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
+ host->chan_tx);
+
+ if (!host->chan_tx)
+ return;
+
+ host->chan_rx = dma_request_channel(mask, tmio_mmc_filter,
+ pdata->dma->chan_priv_rx);
+ dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
+ host->chan_rx);
+
+ if (!host->chan_rx) {
+ dma_release_channel(host->chan_tx);
+ host->chan_tx = NULL;
+ return;
+ }
+
+ tasklet_init(&host->dma_complete, tmio_tasklet_fn, (unsigned long)host);
+ tasklet_init(&host->dma_issue, tmio_issue_tasklet_fn, (unsigned long)host);
+
+ tmio_mmc_enable_dma(host, true);
+ }
+}
+
+static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
+{
+ if (host->chan_tx) {
+ struct dma_chan *chan = host->chan_tx;
+ host->chan_tx = NULL;
+ dma_release_channel(chan);
+ }
+ if (host->chan_rx) {
+ struct dma_chan *chan = host->chan_rx;
+ host->chan_rx = NULL;
+ dma_release_channel(chan);
+ }
+
+ host->cookie = -EINVAL;
+ host->desc = NULL;
+}
+#else
+static int tmio_mmc_start_dma(struct tmio_mmc_host *host,
+ struct mmc_data *data)
+{
+ return 0;
+}
+
+static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
+ struct tmio_mmc_data *pdata)
+{
+ host->chan_tx = NULL;
+ host->chan_rx = NULL;
+}
+
+static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
+{
+}
+#endif
+
static int tmio_mmc_start_data(struct tmio_mmc_host *host,
struct mmc_data *data)
{
pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
- data->blksz, data->blocks);
+ data->blksz, data->blocks);
/* Hardware cannot perform 1 and 2 byte requests in 4 bit mode */
if (data->blksz < 4 && host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
- printk(KERN_ERR "%s: %d byte block unsupported in 4 bit mode\n",
- mmc_hostname(host->mmc), data->blksz);
+ pr_err("%s: %d byte block unsupported in 4 bit mode\n",
+ mmc_hostname(host->mmc), data->blksz);
return -EINVAL;
}
@@ -383,7 +674,7 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
- return 0;
+ return tmio_mmc_start_dma(host, data);
}
/* Process requests from the MMC layer */
@@ -404,7 +695,6 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
}
ret = tmio_mmc_start_command(host, mrq->cmd);
-
if (!ret)
return;
@@ -458,11 +748,14 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
static int tmio_mmc_get_ro(struct mmc_host *mmc)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct mfd_cell *cell = host->pdev->dev.platform_data;
+ struct tmio_mmc_data *pdata = cell->driver_data;
- return (sd_ctrl_read16(host, CTL_STATUS) & TMIO_STAT_WRPROTECT) ? 0 : 1;
+ return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
+ (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)) ? 0 : 1;
}
-static struct mmc_host_ops tmio_mmc_ops = {
+static const struct mmc_host_ops tmio_mmc_ops = {
.request = tmio_mmc_request,
.set_ios = tmio_mmc_set_ios,
.get_ro = tmio_mmc_get_ro,
@@ -475,7 +768,7 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
struct mmc_host *mmc = platform_get_drvdata(dev);
int ret;
- ret = mmc_suspend_host(mmc, state);
+ ret = mmc_suspend_host(mmc);
/* Tell MFD core it can disable us now.*/
if (!ret && cell->disable)
@@ -515,6 +808,7 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
struct tmio_mmc_host *host;
struct mmc_host *mmc;
int ret = -EINVAL;
+ u32 irq_mask = TMIO_MASK_CMD;
if (dev->num_resources != 2)
goto out;
@@ -553,7 +847,10 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
mmc->caps |= pdata->capabilities;
mmc->f_max = pdata->hclk;
mmc->f_min = mmc->f_max / 512;
- mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ if (pdata->ocr_mask)
+ mmc->ocr_avail = pdata->ocr_mask;
+ else
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
/* Tell the MFD core we are ready to be enabled */
if (cell->enable) {
@@ -578,13 +875,20 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
if (ret)
goto cell_disable;
+ /* See if we also get DMA */
+ tmio_mmc_request_dma(host, pdata);
+
mmc_add_host(mmc);
- printk(KERN_INFO "%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc),
- (unsigned long)host->ctl, host->irq);
+ pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc),
+ (unsigned long)host->ctl, host->irq);
/* Unmask the IRQs we want to know about */
- enable_mmc_irqs(host, TMIO_MASK_IRQ);
+ if (!host->chan_rx)
+ irq_mask |= TMIO_MASK_READOP;
+ if (!host->chan_tx)
+ irq_mask |= TMIO_MASK_WRITEOP;
+ enable_mmc_irqs(host, irq_mask);
return 0;
@@ -609,6 +913,7 @@ static int __devexit tmio_mmc_remove(struct platform_device *dev)
if (mmc) {
struct tmio_mmc_host *host = mmc_priv(mmc);
mmc_remove_host(mmc);
+ tmio_mmc_release_dma(host);
free_irq(host->irq, host);
if (cell->disable)
cell->disable(dev);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index dafecfbcd91..64f7d5dfc10 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -10,6 +10,8 @@
*/
#include <linux/highmem.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
#define CTL_SD_CMD 0x00
#define CTL_ARG_REG 0x04
@@ -106,6 +108,17 @@ struct tmio_mmc_host {
unsigned int sg_off;
struct platform_device *pdev;
+
+ /* DMA support */
+ struct dma_chan *chan_rx;
+ struct dma_chan *chan_tx;
+ struct tasklet_struct dma_complete;
+ struct tasklet_struct dma_issue;
+#ifdef CONFIG_TMIO_MMC_DMA
+ struct dma_async_tx_descriptor *desc;
+ unsigned int dma_sglen;
+ dma_cookie_t cookie;
+#endif
};
#include <linux/io.h>
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 632858a9437..19f2d72dbca 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -1280,7 +1280,7 @@ static int via_sd_suspend(struct pci_dev *pcidev, pm_message_t state)
via_save_pcictrlreg(host);
via_save_sdcreg(host);
- ret = mmc_suspend_host(host->mmc, state);
+ ret = mmc_suspend_host(host->mmc);
pci_save_state(pcidev);
pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 69efe01eece..0012f5d13d2 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -1819,7 +1819,7 @@ static int wbsd_suspend(struct wbsd_host *host, pm_message_t state)
{
BUG_ON(host == NULL);
- return mmc_suspend_host(host->mmc, state);
+ return mmc_suspend_host(host->mmc);
}
static int wbsd_resume(struct wbsd_host *host)
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index ecf90f5c97c..f8210bf2d24 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -304,6 +304,19 @@ config SSFDC
This enables read only access to SmartMedia formatted NAND
flash. You can mount it with FAT file system.
+
+config SM_FTL
+ tristate "SmartMedia/xD new translation layer"
+ depends on EXPERIMENTAL && BLOCK
+ select MTD_BLKDEVS
+ select MTD_NAND_ECC
+ help
+ This enables new and very EXPERMENTAL support for SmartMedia/xD
+ FTL (Flash translation layer).
+ Write support isn't yet well tested, therefore this code IS likely to
+ eat your card, so please don't use it together with valuable data.
+ Use readonly driver (CONFIG_SSFDC) instead.
+
config MTD_OOPS
tristate "Log panic/oops to an MTD buffer"
depends on MTD
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 4521b1ecce4..760abc53339 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_NFTL) += nftl.o
obj-$(CONFIG_INFTL) += inftl.o
obj-$(CONFIG_RFD_FTL) += rfd_ftl.o
obj-$(CONFIG_SSFDC) += ssfdc.o
+obj-$(CONFIG_SM_FTL) += sm_ftl.o
obj-$(CONFIG_MTD_OOPS) += mtdoops.o
nftl-objs := nftlcore.o nftlmount.o
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 5fbf29e1e64..62f3ea9de84 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -615,10 +615,8 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
return mtd;
setup_err:
- if(mtd) {
- kfree(mtd->eraseregions);
- kfree(mtd);
- }
+ kfree(mtd->eraseregions);
+ kfree(mtd);
kfree(cfi->cmdset_priv);
return NULL;
}
@@ -727,8 +725,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
/* those should be reset too since
they create memory references. */
init_waitqueue_head(&chip->wq);
- spin_lock_init(&chip->_spinlock);
- chip->mutex = &chip->_spinlock;
+ mutex_init(&chip->mutex);
chip++;
}
}
@@ -774,9 +771,9 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
break;
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Someone else might have been playing with it. */
return -EAGAIN;
}
@@ -823,9 +820,9 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
return -EIO;
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
So we can just loop here. */
}
@@ -852,10 +849,10 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
return -EAGAIN;
}
}
@@ -901,20 +898,20 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
* it'll happily send us to sleep. In any case, when
* get_chip returns success we're clear to go ahead.
*/
- ret = spin_trylock(contender->mutex);
+ ret = mutex_trylock(&contender->mutex);
spin_unlock(&shared->lock);
if (!ret)
goto retry;
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
ret = chip_ready(map, contender, contender->start, mode);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (ret == -EAGAIN) {
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
goto retry;
}
if (ret) {
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
return ret;
}
spin_lock(&shared->lock);
@@ -923,10 +920,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
* in FL_SYNCING state. Put contender and retry. */
if (chip->state == FL_SYNCING) {
put_chip(map, contender, contender->start);
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
goto retry;
}
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
}
/* Check if we already have suspended erase
@@ -936,10 +933,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
spin_unlock(&shared->lock);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
goto retry;
}
@@ -969,12 +966,12 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
if (shared->writing && shared->writing != chip) {
/* give back ownership to who we loaned it from */
struct flchip *loaner = shared->writing;
- spin_lock(loaner->mutex);
+ mutex_lock(&loaner->mutex);
spin_unlock(&shared->lock);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
put_chip(map, loaner, loaner->start);
- spin_lock(chip->mutex);
- spin_unlock(loaner->mutex);
+ mutex_lock(&chip->mutex);
+ mutex_unlock(&loaner->mutex);
wake_up(&chip->wq);
return;
}
@@ -1144,7 +1141,7 @@ static int __xipram xip_wait_for_operation(
(void) map_read(map, adr);
xip_iprefetch();
local_irq_enable();
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
xip_iprefetch();
cond_resched();
@@ -1154,15 +1151,15 @@ static int __xipram xip_wait_for_operation(
* a suspended erase state. If so let's wait
* until it's done.
*/
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
while (chip->state != newstate) {
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
}
/* Disallow XIP again */
local_irq_disable();
@@ -1218,10 +1215,10 @@ static int inval_cache_and_wait_for_operation(
int chip_state = chip->state;
unsigned int timeo, sleep_time, reset_timeo;
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
if (inval_len)
INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
timeo = chip_op_time_max;
if (!timeo)
@@ -1241,7 +1238,7 @@ static int inval_cache_and_wait_for_operation(
}
/* OK Still waiting. Drop the lock, wait a while and retry. */
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
if (sleep_time >= 1000000/HZ) {
/*
* Half of the normal delay still remaining
@@ -1256,17 +1253,17 @@ static int inval_cache_and_wait_for_operation(
cond_resched();
timeo--;
}
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
while (chip->state != chip_state) {
/* Someone's suspended the operation: sleep */
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
}
if (chip->erase_suspended && chip_state == FL_ERASING) {
/* Erase suspend occured while sleep: reset timeout */
@@ -1302,7 +1299,7 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a
/* Ensure cmd read/writes are aligned. */
cmd_addr = adr & ~(map_bankwidth(map)-1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_addr, FL_POINT);
@@ -1313,7 +1310,7 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a
chip->state = FL_POINT;
chip->ref_point_counter++;
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1398,7 +1395,7 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
else
thislen = len;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_POINT) {
chip->ref_point_counter--;
if(chip->ref_point_counter == 0)
@@ -1407,7 +1404,7 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
put_chip(map, chip, chip->start);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
len -= thislen;
ofs = 0;
@@ -1426,10 +1423,10 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
/* Ensure cmd read/writes are aligned. */
cmd_addr = adr & ~(map_bankwidth(map)-1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_addr, FL_READY);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1443,7 +1440,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
put_chip(map, chip, cmd_addr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
@@ -1506,10 +1503,10 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
return -EINVAL;
}
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, mode);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1555,7 +1552,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
xip_enable(map, chip, adr);
out: put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1664,10 +1661,10 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
/* Let's determine this according to the interleave only once */
write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_adr, FL_WRITING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1798,7 +1795,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
xip_enable(map, chip, cmd_adr);
out: put_chip(map, chip, cmd_adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1877,10 +1874,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
adr += chip->start;
retry:
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_ERASING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1936,7 +1933,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
} else if (chipstatus & 0x20 && retries--) {
printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
goto retry;
} else {
printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
@@ -1948,7 +1945,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
xip_enable(map, chip, adr);
out: put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1981,7 +1978,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_SYNCING);
if (!ret) {
@@ -1992,7 +1989,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
* with the chip now anyway.
*/
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
@@ -2000,14 +1997,14 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_SYNCING) {
chip->state = chip->oldstate;
chip->oldstate = FL_READY;
wake_up(&chip->wq);
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
@@ -2053,10 +2050,10 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
adr += chip->start;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_LOCKING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -2090,7 +2087,7 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
xip_enable(map, chip, adr);
out: put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -2155,10 +2152,10 @@ do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
struct cfi_private *cfi = map->fldrv_priv;
int ret;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -2177,7 +2174,7 @@ do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
put_chip(map, chip, chip->start);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
@@ -2452,7 +2449,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
switch (chip->state) {
case FL_READY:
@@ -2484,7 +2481,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
case FL_PM_SUSPENDED:
break;
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
@@ -2493,7 +2490,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
/* No need to force it into a known state here,
@@ -2503,7 +2500,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
chip->oldstate = FL_READY;
wake_up(&chip->wq);
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
@@ -2544,7 +2541,7 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Go to known state. Chip may have been power cycled */
if (chip->state == FL_PM_SUSPENDED) {
@@ -2553,7 +2550,7 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
wake_up(&chip->wq);
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
if ((mtd->flags & MTD_POWERUP_LOCK)
@@ -2573,14 +2570,14 @@ static int cfi_intelext_reset(struct mtd_info *mtd)
/* force the completion of any ongoing operation
and switch to array mode so any bootloader in
flash is accessible for soft reboot. */
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
if (!ret) {
map_write(map, CMD(0xff), chip->start);
chip->state = FL_SHUTDOWN;
put_chip(map, chip, chip->start);
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
return 0;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index f3600e8d538..d81079ef91a 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -32,6 +32,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/reboot.h>
#include <linux/mtd/compatmac.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
@@ -43,10 +44,6 @@
#define MAX_WORD_RETRIES 3
-#define MANUFACTURER_AMD 0x0001
-#define MANUFACTURER_ATMEL 0x001F
-#define MANUFACTURER_MACRONIX 0x00C2
-#define MANUFACTURER_SST 0x00BF
#define SST49LF004B 0x0060
#define SST49LF040B 0x0050
#define SST49LF008A 0x005a
@@ -60,6 +57,7 @@ static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
static void cfi_amdstd_sync (struct mtd_info *);
static int cfi_amdstd_suspend (struct mtd_info *);
static void cfi_amdstd_resume (struct mtd_info *);
+static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static void cfi_amdstd_destroy(struct mtd_info *);
@@ -168,7 +166,7 @@ static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
* This reduces the risk of false detection due to
* the 8-bit device ID.
*/
- (cfi->mfr == MANUFACTURER_MACRONIX)) {
+ (cfi->mfr == CFI_MFR_MACRONIX)) {
DEBUG(MTD_DEBUG_LEVEL1,
"%s: Macronix MX29LV400C with bottom boot block"
" detected\n", map->name);
@@ -260,6 +258,42 @@ static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
mtd->flags |= MTD_POWERUP_LOCK;
}
+static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ /*
+ * These flashes report two seperate eraseblock regions based on the
+ * sector_erase-size and block_erase-size, although they both operate on the
+ * same memory. This is not allowed according to CFI, so we just pick the
+ * sector_erase-size.
+ */
+ cfi->cfiq->NumEraseRegions = 1;
+}
+
+static void fixup_sst39vf(struct mtd_info *mtd, void *param)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ fixup_old_sst_eraseregion(mtd);
+
+ cfi->addr_unlock1 = 0x5555;
+ cfi->addr_unlock2 = 0x2AAA;
+}
+
+static void fixup_sst39vf_rev_b(struct mtd_info *mtd, void *param)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ fixup_old_sst_eraseregion(mtd);
+
+ cfi->addr_unlock1 = 0x555;
+ cfi->addr_unlock2 = 0x2AA;
+}
+
static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
{
struct map_info *map = mtd->priv;
@@ -282,11 +316,24 @@ static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
}
}
+/* Used to fix CFI-Tables of chips without Extended Query Tables */
+static struct cfi_fixup cfi_nopri_fixup_table[] = {
+ { CFI_MFR_SST, 0x234A, fixup_sst39vf, NULL, }, // SST39VF1602
+ { CFI_MFR_SST, 0x234B, fixup_sst39vf, NULL, }, // SST39VF1601
+ { CFI_MFR_SST, 0x235A, fixup_sst39vf, NULL, }, // SST39VF3202
+ { CFI_MFR_SST, 0x235B, fixup_sst39vf, NULL, }, // SST39VF3201
+ { CFI_MFR_SST, 0x235C, fixup_sst39vf_rev_b, NULL, }, // SST39VF3202B
+ { CFI_MFR_SST, 0x235D, fixup_sst39vf_rev_b, NULL, }, // SST39VF3201B
+ { CFI_MFR_SST, 0x236C, fixup_sst39vf_rev_b, NULL, }, // SST39VF6402B
+ { CFI_MFR_SST, 0x236D, fixup_sst39vf_rev_b, NULL, }, // SST39VF6401B
+ { 0, 0, NULL, NULL }
+};
+
static struct cfi_fixup cfi_fixup_table[] = {
{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
#ifdef AMD_BOOTLOC_BUG
{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
- { MANUFACTURER_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
+ { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
#endif
{ CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
{ CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
@@ -304,9 +351,9 @@ static struct cfi_fixup cfi_fixup_table[] = {
{ 0, 0, NULL, NULL }
};
static struct cfi_fixup jedec_fixup_table[] = {
- { MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
- { MANUFACTURER_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
- { MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
+ { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
+ { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
+ { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
{ 0, 0, NULL, NULL }
};
@@ -355,67 +402,72 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
mtd->name = map->name;
mtd->writesize = 1;
+ mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
+
if (cfi->cfi_mode==CFI_MODE_CFI){
unsigned char bootloc;
- /*
- * It's a real CFI chip, not one for which the probe
- * routine faked a CFI structure. So we read the feature
- * table from it.
- */
__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
struct cfi_pri_amdstd *extp;
extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
- if (!extp) {
- kfree(mtd);
- return NULL;
- }
-
- cfi_fixup_major_minor(cfi, extp);
-
- if (extp->MajorVersion != '1' ||
- (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
- printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
- "version %c.%c.\n", extp->MajorVersion,
- extp->MinorVersion);
- kfree(extp);
- kfree(mtd);
- return NULL;
- }
+ if (extp) {
+ /*
+ * It's a real CFI chip, not one for which the probe
+ * routine faked a CFI structure.
+ */
+ cfi_fixup_major_minor(cfi, extp);
+
+ if (extp->MajorVersion != '1' ||
+ (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
+ printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
+ "version %c.%c.\n", extp->MajorVersion,
+ extp->MinorVersion);
+ kfree(extp);
+ kfree(mtd);
+ return NULL;
+ }
- /* Install our own private info structure */
- cfi->cmdset_priv = extp;
+ /* Install our own private info structure */
+ cfi->cmdset_priv = extp;
- /* Apply cfi device specific fixups */
- cfi_fixup(mtd, cfi_fixup_table);
+ /* Apply cfi device specific fixups */
+ cfi_fixup(mtd, cfi_fixup_table);
#ifdef DEBUG_CFI_FEATURES
- /* Tell the user about it in lots of lovely detail */
- cfi_tell_features(extp);
+ /* Tell the user about it in lots of lovely detail */
+ cfi_tell_features(extp);
#endif
- bootloc = extp->TopBottom;
- if ((bootloc != 2) && (bootloc != 3)) {
- printk(KERN_WARNING "%s: CFI does not contain boot "
- "bank location. Assuming top.\n", map->name);
- bootloc = 2;
- }
+ bootloc = extp->TopBottom;
+ if ((bootloc < 2) || (bootloc > 5)) {
+ printk(KERN_WARNING "%s: CFI contains unrecognised boot "
+ "bank location (%d). Assuming bottom.\n",
+ map->name, bootloc);
+ bootloc = 2;
+ }
- if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
- printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
+ if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
+ printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
- for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
- int j = (cfi->cfiq->NumEraseRegions-1)-i;
- __u32 swap;
+ for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
+ int j = (cfi->cfiq->NumEraseRegions-1)-i;
+ __u32 swap;
- swap = cfi->cfiq->EraseRegionInfo[i];
- cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
- cfi->cfiq->EraseRegionInfo[j] = swap;
+ swap = cfi->cfiq->EraseRegionInfo[i];
+ cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
+ cfi->cfiq->EraseRegionInfo[j] = swap;
+ }
}
+ /* Set the default CFI lock/unlock addresses */
+ cfi->addr_unlock1 = 0x555;
+ cfi->addr_unlock2 = 0x2aa;
+ }
+ cfi_fixup(mtd, cfi_nopri_fixup_table);
+
+ if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
+ kfree(mtd);
+ return NULL;
}
- /* Set the default CFI lock/unlock addresses */
- cfi->addr_unlock1 = 0x555;
- cfi->addr_unlock2 = 0x2aa;
} /* CFI mode */
else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
@@ -437,7 +489,11 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
return cfi_amdstd_setup(mtd);
}
+struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
+struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
+EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
+EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
{
@@ -491,13 +547,12 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
#endif
__module_get(THIS_MODULE);
+ register_reboot_notifier(&mtd->reboot_notifier);
return mtd;
setup_err:
- if(mtd) {
- kfree(mtd->eraseregions);
- kfree(mtd);
- }
+ kfree(mtd->eraseregions);
+ kfree(mtd);
kfree(cfi->cmdset_priv);
kfree(cfi->cfiq);
return NULL;
@@ -571,9 +626,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
return -EIO;
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Someone else might have been playing with it. */
goto retry;
}
@@ -617,9 +672,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
return -EIO;
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
So we can just loop here. */
}
@@ -634,6 +689,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
chip->state = FL_READY;
return 0;
+ case FL_SHUTDOWN:
+ /* The machine is rebooting */
+ return -EIO;
+
case FL_POINT:
/* Only if there's no operation suspended... */
if (mode == FL_READY && chip->oldstate == FL_READY)
@@ -643,10 +702,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
goto resettime;
}
}
@@ -778,7 +837,7 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
(void) map_read(map, adr);
xip_iprefetch();
local_irq_enable();
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
xip_iprefetch();
cond_resched();
@@ -788,15 +847,15 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
* a suspended erase state. If so let's wait
* until it's done.
*/
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
while (chip->state != FL_XIP_WHILE_ERASING) {
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
}
/* Disallow XIP again */
local_irq_disable();
@@ -858,17 +917,17 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
#define UDELAY(map, chip, adr, usec) \
do { \
- spin_unlock(chip->mutex); \
+ mutex_unlock(&chip->mutex); \
cfi_udelay(usec); \
- spin_lock(chip->mutex); \
+ mutex_lock(&chip->mutex); \
} while (0)
#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
do { \
- spin_unlock(chip->mutex); \
+ mutex_unlock(&chip->mutex); \
INVALIDATE_CACHED_RANGE(map, adr, len); \
cfi_udelay(usec); \
- spin_lock(chip->mutex); \
+ mutex_lock(&chip->mutex); \
} while (0)
#endif
@@ -884,10 +943,10 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
/* Ensure cmd read/writes are aligned. */
cmd_addr = adr & ~(map_bankwidth(map)-1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_addr, FL_READY);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -900,7 +959,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
put_chip(map, chip, cmd_addr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
@@ -954,7 +1013,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
struct cfi_private *cfi = map->fldrv_priv;
retry:
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state != FL_READY){
#if 0
@@ -963,7 +1022,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
@@ -992,7 +1051,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
wake_up(&chip->wq);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
@@ -1061,10 +1120,10 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
adr += chip->start;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_WRITING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1107,11 +1166,11 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + (HZ / 2); /* FIXME */
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
continue;
}
@@ -1143,7 +1202,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
op_done:
chip->state = FL_READY;
put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1175,7 +1234,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
map_word tmp_buf;
retry:
- spin_lock(cfi->chips[chipnum].mutex);
+ mutex_lock(&cfi->chips[chipnum].mutex);
if (cfi->chips[chipnum].state != FL_READY) {
#if 0
@@ -1184,7 +1243,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&cfi->chips[chipnum].wq, &wait);
- spin_unlock(cfi->chips[chipnum].mutex);
+ mutex_unlock(&cfi->chips[chipnum].mutex);
schedule();
remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
@@ -1198,7 +1257,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
/* Load 'tmp_buf' with old contents of flash */
tmp_buf = map_read(map, bus_ofs+chipstart);
- spin_unlock(cfi->chips[chipnum].mutex);
+ mutex_unlock(&cfi->chips[chipnum].mutex);
/* Number of bytes to copy from buffer */
n = min_t(int, len, map_bankwidth(map)-i);
@@ -1253,7 +1312,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
map_word tmp_buf;
retry1:
- spin_lock(cfi->chips[chipnum].mutex);
+ mutex_lock(&cfi->chips[chipnum].mutex);
if (cfi->chips[chipnum].state != FL_READY) {
#if 0
@@ -1262,7 +1321,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&cfi->chips[chipnum].wq, &wait);
- spin_unlock(cfi->chips[chipnum].mutex);
+ mutex_unlock(&cfi->chips[chipnum].mutex);
schedule();
remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
@@ -1275,7 +1334,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
tmp_buf = map_read(map, ofs + chipstart);
- spin_unlock(cfi->chips[chipnum].mutex);
+ mutex_unlock(&cfi->chips[chipnum].mutex);
tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
@@ -1310,10 +1369,10 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
adr += chip->start;
cmd_adr = adr;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_WRITING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1368,11 +1427,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + (HZ / 2); /* FIXME */
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
continue;
}
@@ -1400,7 +1459,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
op_done:
chip->state = FL_READY;
put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1500,10 +1559,10 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
adr = cfi->addr_unlock1;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_WRITING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1536,10 +1595,10 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
/* Someone's suspended the erase. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
continue;
}
if (chip->erase_suspended) {
@@ -1573,7 +1632,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
chip->state = FL_READY;
xip_enable(map, chip, adr);
put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1588,10 +1647,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
adr += chip->start;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_ERASING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1624,10 +1683,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
/* Someone's suspended the erase. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
continue;
}
if (chip->erase_suspended) {
@@ -1663,7 +1722,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
chip->state = FL_READY;
put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1715,7 +1774,7 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip,
struct cfi_private *cfi = map->fldrv_priv;
int ret;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
if (ret)
goto out_unlock;
@@ -1741,7 +1800,7 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip,
ret = 0;
out_unlock:
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1751,7 +1810,7 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
struct cfi_private *cfi = map->fldrv_priv;
int ret;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
if (ret)
goto out_unlock;
@@ -1769,7 +1828,7 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
ret = 0;
out_unlock:
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1797,7 +1856,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
chip = &cfi->chips[i];
retry:
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
switch(chip->state) {
case FL_READY:
@@ -1811,7 +1870,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
* with the chip now anyway.
*/
case FL_SYNCING:
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
break;
default:
@@ -1819,7 +1878,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
@@ -1834,13 +1893,13 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_SYNCING) {
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
@@ -1856,7 +1915,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
switch(chip->state) {
case FL_READY:
@@ -1876,7 +1935,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
ret = -EAGAIN;
break;
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
@@ -1885,13 +1944,13 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
@@ -1910,7 +1969,7 @@ static void cfi_amdstd_resume(struct mtd_info *mtd)
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
chip->state = FL_READY;
@@ -1920,15 +1979,62 @@ static void cfi_amdstd_resume(struct mtd_info *mtd)
else
printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
+
+/*
+ * Ensure that the flash device is put back into read array mode before
+ * unloading the driver or rebooting. On some systems, rebooting while
+ * the flash is in query/program/erase mode will prevent the CPU from
+ * fetching the bootloader code, requiring a hard reset or power cycle.
+ */
+static int cfi_amdstd_reset(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i, ret;
+ struct flchip *chip;
+
+ for (i = 0; i < cfi->numchips; i++) {
+
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
+ if (!ret) {
+ map_write(map, CMD(0xF0), chip->start);
+ chip->state = FL_SHUTDOWN;
+ put_chip(map, chip, chip->start);
+ }
+
+ mutex_unlock(&chip->mutex);
+ }
+
+ return 0;
+}
+
+
+static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
+ void *v)
+{
+ struct mtd_info *mtd;
+
+ mtd = container_of(nb, struct mtd_info, reboot_notifier);
+ cfi_amdstd_reset(mtd);
+ return NOTIFY_DONE;
+}
+
+
static void cfi_amdstd_destroy(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
+ cfi_amdstd_reset(mtd);
+ unregister_reboot_notifier(&mtd->reboot_notifier);
kfree(cfi->cmdset_priv);
kfree(cfi->cfiq);
kfree(cfi);
@@ -1938,3 +2044,5 @@ static void cfi_amdstd_destroy(struct mtd_info *mtd)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
+MODULE_ALIAS("cfi_cmdset_0006");
+MODULE_ALIAS("cfi_cmdset_0701");
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 0667a671525..e54e8c169d7 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -265,7 +265,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
timeo = jiffies + HZ;
retry:
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us.
* If it's in FL_ERASING state, suspend it and make it talk now.
@@ -296,15 +296,15 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
/* make sure we're in 'read status' mode */
map_write(map, CMD(0x70), cmd_addr);
chip->state = FL_ERASING;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "Chip not ready after erase "
"suspended: status = 0x%lx\n", status.x[0]);
return -EIO;
}
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
}
suspended = 1;
@@ -335,13 +335,13 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
@@ -351,7 +351,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
@@ -376,7 +376,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
}
wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
@@ -445,7 +445,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
#ifdef DEBUG_CFI_FEATURES
printk("%s: chip->state[%d]\n", __func__, chip->state);
#endif
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us.
* Later, we can actually think about interrupting it
@@ -470,14 +470,14 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
break;
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
status.x[0], map_read(map, cmd_adr).x[0]);
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
@@ -486,7 +486,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
@@ -503,16 +503,16 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
if (map_word_andequal(map, status, status_OK, status_OK))
break;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
if (++z > 100) {
/* Argh. Not ready for write to buffer */
DISABLE_VPP(map);
map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
return -EIO;
}
@@ -532,9 +532,9 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
map_write(map, CMD(0xd0), cmd_adr);
chip->state = FL_WRITING;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(chip->buffer_write_time);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
timeo = jiffies + (HZ/2);
z = 0;
@@ -543,11 +543,11 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
/* Someone's suspended the write. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + (HZ / 2); /* FIXME */
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
continue;
}
@@ -563,16 +563,16 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
DISABLE_VPP(map);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
z++;
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
}
if (!z) {
chip->buffer_write_time--;
@@ -596,11 +596,11 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
/* put back into read status register mode */
map_write(map, CMD(0x70), adr);
wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
}
wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
@@ -749,7 +749,7 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
timeo = jiffies + HZ;
retry:
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us. */
switch (chip->state) {
@@ -766,13 +766,13 @@ retry:
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
@@ -781,7 +781,7 @@ retry:
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
@@ -797,9 +797,9 @@ retry:
map_write(map, CMD(0xD0), adr);
chip->state = FL_ERASING;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
msleep(1000);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* FIXME. Use a timer to check this, and return immediately. */
/* Once the state machine's known to be working I'll do that */
@@ -810,11 +810,11 @@ retry:
/* Someone's suspended the erase. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + (HZ*20); /* FIXME */
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
continue;
}
@@ -828,14 +828,14 @@ retry:
chip->state = FL_STATUS;
printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
DISABLE_VPP(map);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
}
DISABLE_VPP(map);
@@ -878,7 +878,7 @@ retry:
printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
timeo = jiffies + HZ;
chip->state = FL_STATUS;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
goto retry;
}
printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
@@ -887,7 +887,7 @@ retry:
}
wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -995,7 +995,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
chip = &cfi->chips[i];
retry:
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
switch(chip->state) {
case FL_READY:
@@ -1009,7 +1009,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
* with the chip now anyway.
*/
case FL_SYNCING:
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
break;
default:
@@ -1017,7 +1017,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
@@ -1030,13 +1030,13 @@ static void cfi_staa_sync (struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_SYNCING) {
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
@@ -1054,7 +1054,7 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
timeo = jiffies + HZ;
retry:
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us. */
switch (chip->state) {
@@ -1071,13 +1071,13 @@ retry:
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
@@ -1086,7 +1086,7 @@ retry:
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
@@ -1098,9 +1098,9 @@ retry:
map_write(map, CMD(0x01), adr);
chip->state = FL_LOCKING;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
msleep(1000);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* FIXME. Use a timer to check this, and return immediately. */
/* Once the state machine's known to be working I'll do that */
@@ -1118,21 +1118,21 @@ retry:
chip->state = FL_STATUS;
printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
DISABLE_VPP(map);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
}
/* Done and happy. */
chip->state = FL_STATUS;
DISABLE_VPP(map);
wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
@@ -1203,7 +1203,7 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
timeo = jiffies + HZ;
retry:
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us. */
switch (chip->state) {
@@ -1220,13 +1220,13 @@ retry:
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
@@ -1235,7 +1235,7 @@ retry:
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
@@ -1247,9 +1247,9 @@ retry:
map_write(map, CMD(0xD0), adr);
chip->state = FL_UNLOCKING;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
msleep(1000);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* FIXME. Use a timer to check this, and return immediately. */
/* Once the state machine's known to be working I'll do that */
@@ -1267,21 +1267,21 @@ retry:
chip->state = FL_STATUS;
printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
DISABLE_VPP(map);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return -EIO;
}
/* Latency issues. Drop the unlock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
}
/* Done and happy. */
chip->state = FL_STATUS;
DISABLE_VPP(map);
wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
@@ -1334,7 +1334,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
switch(chip->state) {
case FL_READY:
@@ -1354,7 +1354,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
ret = -EAGAIN;
break;
}
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
@@ -1363,7 +1363,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
/* No need to force it into a known state here,
@@ -1372,7 +1372,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
@@ -1390,7 +1390,7 @@ static void cfi_staa_resume(struct mtd_info *mtd)
chip = &cfi->chips[i];
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Go to known state. Chip may have been power cycled */
if (chip->state == FL_PM_SUSPENDED) {
@@ -1399,7 +1399,7 @@ static void cfi_staa_resume(struct mtd_info *mtd)
wake_up(&chip->wq);
}
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
index e63e6749429..b2acd32f4fb 100644
--- a/drivers/mtd/chips/cfi_probe.c
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -158,6 +158,7 @@ static int __xipram cfi_chip_setup(struct map_info *map,
__u32 base = 0;
int num_erase_regions = cfi_read_query(map, base + (0x10 + 28)*ofs_factor);
int i;
+ int addr_unlock1 = 0x555, addr_unlock2 = 0x2AA;
xip_enable(base, map, cfi);
#ifdef DEBUG_CFI
@@ -181,29 +182,6 @@ static int __xipram cfi_chip_setup(struct map_info *map,
for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++)
((unsigned char *)cfi->cfiq)[i] = cfi_read_query(map,base + (0x10 + i)*ofs_factor);
- /* Note we put the device back into Read Mode BEFORE going into Auto
- * Select Mode, as some devices support nesting of modes, others
- * don't. This way should always work.
- * On cmdset 0001 the writes of 0xaa and 0x55 are not needed, and
- * so should be treated as nops or illegal (and so put the device
- * back into Read Mode, which is a nop in this case).
- */
- cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL);
- cfi->mfr = cfi_read_query16(map, base);
- cfi->id = cfi_read_query16(map, base + ofs_factor);
-
- /* Get AMD/Spansion extended JEDEC ID */
- if (cfi->mfr == CFI_MFR_AMD && (cfi->id & 0xff) == 0x7e)
- cfi->id = cfi_read_query(map, base + 0xe * ofs_factor) << 8 |
- cfi_read_query(map, base + 0xf * ofs_factor);
-
- /* Put it back into Read Mode */
- cfi_qry_mode_off(base, map, cfi);
- xip_allowed(base, map);
-
/* Do any necessary byteswapping */
cfi->cfiq->P_ID = le16_to_cpu(cfi->cfiq->P_ID);
@@ -228,6 +206,35 @@ static int __xipram cfi_chip_setup(struct map_info *map,
#endif
}
+ if (cfi->cfiq->P_ID == P_ID_SST_OLD) {
+ addr_unlock1 = 0x5555;
+ addr_unlock2 = 0x2AAA;
+ }
+
+ /*
+ * Note we put the device back into Read Mode BEFORE going into Auto
+ * Select Mode, as some devices support nesting of modes, others
+ * don't. This way should always work.
+ * On cmdset 0001 the writes of 0xaa and 0x55 are not needed, and
+ * so should be treated as nops or illegal (and so put the device
+ * back into Read Mode, which is a nop in this case).
+ */
+ cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xaa, addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, addr_unlock2, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x90, addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ cfi->mfr = cfi_read_query16(map, base);
+ cfi->id = cfi_read_query16(map, base + ofs_factor);
+
+ /* Get AMD/Spansion extended JEDEC ID */
+ if (cfi->mfr == CFI_MFR_AMD && (cfi->id & 0xff) == 0x7e)
+ cfi->id = cfi_read_query(map, base + 0xe * ofs_factor) << 8 |
+ cfi_read_query(map, base + 0xf * ofs_factor);
+
+ /* Put it back into Read Mode */
+ cfi_qry_mode_off(base, map, cfi);
+ xip_allowed(base, map);
+
printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
map->name, cfi->interleave, cfi->device_type*8, base,
map->bankwidth*8);
@@ -269,6 +276,9 @@ static char *vendorname(__u16 vendor)
case P_ID_SST_PAGE:
return "SST Page Write";
+ case P_ID_SST_OLD:
+ return "SST 39VF160x/39VF320x";
+
case P_ID_INTEL_PERFORMANCE:
return "Intel Performance Code";
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index ca584d0380b..d7c2c672757 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -104,10 +104,11 @@ __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* n
int i;
struct cfi_extquery *extp = NULL;
- printk(" %s Extended Query Table at 0x%4.4X\n", name, adr);
if (!adr)
goto out;
+ printk(KERN_INFO "%s Extended Query Table at 0x%4.4X\n", name, adr);
+
extp = kmalloc(size, GFP_KERNEL);
if (!extp) {
printk(KERN_ERR "Failed to allocate memory\n");
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index 57e0e4e921f..d1806497719 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -58,10 +58,10 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
* to flash memory - that means that we don't have to check status
* and timeout.
*/
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_LOCKING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -72,7 +72,7 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
/* Done and happy. */
chip->state = chip->oldstate;
put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
index e2dc96441e0..3b9a2843c5f 100644
--- a/drivers/mtd/chips/gen_probe.c
+++ b/drivers/mtd/chips/gen_probe.c
@@ -155,8 +155,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
pchip->start = (i << cfi.chipshift);
pchip->state = FL_READY;
init_waitqueue_head(&pchip->wq);
- spin_lock_init(&pchip->_spinlock);
- pchip->mutex = &pchip->_spinlock;
+ mutex_init(&pchip->mutex);
}
}
@@ -242,17 +241,19 @@ static struct mtd_info *check_cmd_set(struct map_info *map, int primary)
/* We need these for the !CONFIG_MODULES case,
because symbol_get() doesn't work there */
#ifdef CONFIG_MTD_CFI_INTELEXT
- case 0x0001:
- case 0x0003:
- case 0x0200:
+ case P_ID_INTEL_EXT:
+ case P_ID_INTEL_STD:
+ case P_ID_INTEL_PERFORMANCE:
return cfi_cmdset_0001(map, primary);
#endif
#ifdef CONFIG_MTD_CFI_AMDSTD
- case 0x0002:
+ case P_ID_AMD_STD:
+ case P_ID_SST_OLD:
+ case P_ID_WINBOND:
return cfi_cmdset_0002(map, primary);
#endif
#ifdef CONFIG_MTD_CFI_STAA
- case 0x0020:
+ case P_ID_ST_ADV:
return cfi_cmdset_0020(map, primary);
#endif
default:
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index 8db1148dfa4..d72a5fb2d04 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -22,24 +22,6 @@
#include <linux/mtd/cfi.h>
#include <linux/mtd/gen_probe.h>
-/* Manufacturers */
-#define MANUFACTURER_AMD 0x0001
-#define MANUFACTURER_ATMEL 0x001f
-#define MANUFACTURER_EON 0x001c
-#define MANUFACTURER_FUJITSU 0x0004
-#define MANUFACTURER_HYUNDAI 0x00AD
-#define MANUFACTURER_INTEL 0x0089
-#define MANUFACTURER_MACRONIX 0x00C2
-#define MANUFACTURER_NEC 0x0010
-#define MANUFACTURER_PMC 0x009D
-#define MANUFACTURER_SHARP 0x00b0
-#define MANUFACTURER_SST 0x00BF
-#define MANUFACTURER_ST 0x0020
-#define MANUFACTURER_TOSHIBA 0x0098
-#define MANUFACTURER_WINBOND 0x00da
-#define CONTINUATION_CODE 0x007f
-
-
/* AMD */
#define AM29DL800BB 0x22CB
#define AM29DL800BT 0x224A
@@ -166,6 +148,8 @@
#define SST39LF160 0x2782
#define SST39VF1601 0x234b
#define SST39VF3201 0x235b
+#define SST39WF1601 0x274b
+#define SST39WF1602 0x274a
#define SST39LF512 0x00D4
#define SST39LF010 0x00D5
#define SST39LF020 0x00D6
@@ -309,7 +293,7 @@ struct amd_flash_info {
*/
static const struct amd_flash_info jedec_table[] = {
{
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29F032B,
.name = "AMD AM29F032B",
.uaddr = MTD_UADDR_0x0555_0x02AA,
@@ -321,7 +305,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,64)
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV160DT,
.name = "AMD AM29LV160DT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -336,7 +320,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV160DB,
.name = "AMD AM29LV160DB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -351,7 +335,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,31)
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV400BB,
.name = "AMD AM29LV400BB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -366,7 +350,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,7)
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV400BT,
.name = "AMD AM29LV400BT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -381,7 +365,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV800BB,
.name = "AMD AM29LV800BB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -397,7 +381,7 @@ static const struct amd_flash_info jedec_table[] = {
}
}, {
/* add DL */
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29DL800BB,
.name = "AMD AM29DL800BB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -414,7 +398,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,14)
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29DL800BT,
.name = "AMD AM29DL800BT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -431,7 +415,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29F800BB,
.name = "AMD AM29F800BB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -446,7 +430,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,15),
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV800BT,
.name = "AMD AM29LV800BT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -461,7 +445,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29F800BT,
.name = "AMD AM29F800BT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -476,7 +460,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29F017D,
.name = "AMD AM29F017D",
.devtypes = CFI_DEVICETYPE_X8,
@@ -488,7 +472,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,32),
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29F016D,
.name = "AMD AM29F016D",
.devtypes = CFI_DEVICETYPE_X8,
@@ -500,7 +484,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,32),
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29F080,
.name = "AMD AM29F080",
.devtypes = CFI_DEVICETYPE_X8,
@@ -512,7 +496,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,16),
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29F040,
.name = "AMD AM29F040",
.devtypes = CFI_DEVICETYPE_X8,
@@ -524,7 +508,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,8),
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV040B,
.name = "AMD AM29LV040B",
.devtypes = CFI_DEVICETYPE_X8,
@@ -536,7 +520,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,8),
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29F002T,
.name = "AMD AM29F002T",
.devtypes = CFI_DEVICETYPE_X8,
@@ -551,7 +535,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1),
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29SL800DT,
.name = "AMD AM29SL800DT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -566,7 +550,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1),
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29SL800DB,
.name = "AMD AM29SL800DB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -581,7 +565,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,15),
}
}, {
- .mfr_id = MANUFACTURER_ATMEL,
+ .mfr_id = CFI_MFR_ATMEL,
.dev_id = AT49BV512,
.name = "Atmel AT49BV512",
.devtypes = CFI_DEVICETYPE_X8,
@@ -593,7 +577,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,1)
}
}, {
- .mfr_id = MANUFACTURER_ATMEL,
+ .mfr_id = CFI_MFR_ATMEL,
.dev_id = AT29LV512,
.name = "Atmel AT29LV512",
.devtypes = CFI_DEVICETYPE_X8,
@@ -606,7 +590,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x80,256)
}
}, {
- .mfr_id = MANUFACTURER_ATMEL,
+ .mfr_id = CFI_MFR_ATMEL,
.dev_id = AT49BV16X,
.name = "Atmel AT49BV16X",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -619,7 +603,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,31)
}
}, {
- .mfr_id = MANUFACTURER_ATMEL,
+ .mfr_id = CFI_MFR_ATMEL,
.dev_id = AT49BV16XT,
.name = "Atmel AT49BV16XT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -632,7 +616,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000,8)
}
}, {
- .mfr_id = MANUFACTURER_ATMEL,
+ .mfr_id = CFI_MFR_ATMEL,
.dev_id = AT49BV32X,
.name = "Atmel AT49BV32X",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -645,7 +629,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,63)
}
}, {
- .mfr_id = MANUFACTURER_ATMEL,
+ .mfr_id = CFI_MFR_ATMEL,
.dev_id = AT49BV32XT,
.name = "Atmel AT49BV32XT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -658,7 +642,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000,8)
}
}, {
- .mfr_id = MANUFACTURER_EON,
+ .mfr_id = CFI_MFR_EON,
.dev_id = EN29SL800BT,
.name = "Eon EN29SL800BT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -673,7 +657,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1),
}
}, {
- .mfr_id = MANUFACTURER_EON,
+ .mfr_id = CFI_MFR_EON,
.dev_id = EN29SL800BB,
.name = "Eon EN29SL800BB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -688,7 +672,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,15),
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29F040C,
.name = "Fujitsu MBM29F040C",
.devtypes = CFI_DEVICETYPE_X8,
@@ -700,7 +684,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,8)
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29F800BA,
.name = "Fujitsu MBM29F800BA",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -715,7 +699,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,15),
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV650UE,
.name = "Fujitsu MBM29LV650UE",
.devtypes = CFI_DEVICETYPE_X8,
@@ -727,7 +711,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,128)
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV320TE,
.name = "Fujitsu MBM29LV320TE",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -740,7 +724,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000,8)
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV320BE,
.name = "Fujitsu MBM29LV320BE",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -753,7 +737,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,63)
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV160TE,
.name = "Fujitsu MBM29LV160TE",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -768,7 +752,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV160BE,
.name = "Fujitsu MBM29LV160BE",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -783,7 +767,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,31)
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV800BA,
.name = "Fujitsu MBM29LV800BA",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -798,7 +782,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,15)
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV800TA,
.name = "Fujitsu MBM29LV800TA",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -813,7 +797,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV400BC,
.name = "Fujitsu MBM29LV400BC",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -828,7 +812,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,7)
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV400TC,
.name = "Fujitsu MBM29LV400TC",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -843,7 +827,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_HYUNDAI,
+ .mfr_id = CFI_MFR_HYUNDAI,
.dev_id = HY29F002T,
.name = "Hyundai HY29F002T",
.devtypes = CFI_DEVICETYPE_X8,
@@ -858,7 +842,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F004B3B,
.name = "Intel 28F004B3B",
.devtypes = CFI_DEVICETYPE_X8,
@@ -871,7 +855,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 7),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F004B3T,
.name = "Intel 28F004B3T",
.devtypes = CFI_DEVICETYPE_X8,
@@ -884,7 +868,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000, 8),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F400B3B,
.name = "Intel 28F400B3B",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -897,7 +881,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 7),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F400B3T,
.name = "Intel 28F400B3T",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -910,7 +894,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000, 8),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F008B3B,
.name = "Intel 28F008B3B",
.devtypes = CFI_DEVICETYPE_X8,
@@ -923,7 +907,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 15),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F008B3T,
.name = "Intel 28F008B3T",
.devtypes = CFI_DEVICETYPE_X8,
@@ -936,7 +920,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000, 8),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F008S5,
.name = "Intel 28F008S5",
.devtypes = CFI_DEVICETYPE_X8,
@@ -948,7 +932,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,16),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F016S5,
.name = "Intel 28F016S5",
.devtypes = CFI_DEVICETYPE_X8,
@@ -960,7 +944,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,32),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F008SA,
.name = "Intel 28F008SA",
.devtypes = CFI_DEVICETYPE_X8,
@@ -972,7 +956,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 16),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F800B3B,
.name = "Intel 28F800B3B",
.devtypes = CFI_DEVICETYPE_X16,
@@ -985,7 +969,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 15),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F800B3T,
.name = "Intel 28F800B3T",
.devtypes = CFI_DEVICETYPE_X16,
@@ -998,7 +982,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000, 8),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F016B3B,
.name = "Intel 28F016B3B",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1011,7 +995,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 31),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F016S3,
.name = "Intel I28F016S3",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1023,7 +1007,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 32),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F016B3T,
.name = "Intel 28F016B3T",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1036,7 +1020,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000, 8),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F160B3B,
.name = "Intel 28F160B3B",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1049,7 +1033,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 31),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F160B3T,
.name = "Intel 28F160B3T",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1062,7 +1046,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000, 8),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F320B3B,
.name = "Intel 28F320B3B",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1075,7 +1059,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 63),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F320B3T,
.name = "Intel 28F320B3T",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1088,7 +1072,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000, 8),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F640B3B,
.name = "Intel 28F640B3B",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1101,7 +1085,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 127),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F640B3T,
.name = "Intel 28F640B3T",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1114,7 +1098,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000, 8),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F640C3B,
.name = "Intel 28F640C3B",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1127,7 +1111,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 127),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I82802AB,
.name = "Intel 82802AB",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1139,7 +1123,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,8),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I82802AC,
.name = "Intel 82802AC",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1151,7 +1135,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,16),
}
}, {
- .mfr_id = MANUFACTURER_MACRONIX,
+ .mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29LV040C,
.name = "Macronix MX29LV040C",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1163,7 +1147,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,8),
}
}, {
- .mfr_id = MANUFACTURER_MACRONIX,
+ .mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29LV160T,
.name = "MXIC MX29LV160T",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1178,7 +1162,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_NEC,
+ .mfr_id = CFI_MFR_NEC,
.dev_id = UPD29F064115,
.name = "NEC uPD29F064115",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1192,7 +1176,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x2000,8),
}
}, {
- .mfr_id = MANUFACTURER_MACRONIX,
+ .mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29LV160B,
.name = "MXIC MX29LV160B",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1207,7 +1191,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,31)
}
}, {
- .mfr_id = MANUFACTURER_MACRONIX,
+ .mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29F040,
.name = "Macronix MX29F040",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1219,7 +1203,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,8),
}
}, {
- .mfr_id = MANUFACTURER_MACRONIX,
+ .mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29F016,
.name = "Macronix MX29F016",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1231,7 +1215,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,32),
}
}, {
- .mfr_id = MANUFACTURER_MACRONIX,
+ .mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29F004T,
.name = "Macronix MX29F004T",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1246,7 +1230,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1),
}
}, {
- .mfr_id = MANUFACTURER_MACRONIX,
+ .mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29F004B,
.name = "Macronix MX29F004B",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1261,7 +1245,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,7),
}
}, {
- .mfr_id = MANUFACTURER_MACRONIX,
+ .mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29F002T,
.name = "Macronix MX29F002T",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1276,7 +1260,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1),
}
}, {
- .mfr_id = MANUFACTURER_PMC,
+ .mfr_id = CFI_MFR_PMC,
.dev_id = PM49FL002,
.name = "PMC Pm49FL002",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1288,7 +1272,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO( 0x01000, 64 )
}
}, {
- .mfr_id = MANUFACTURER_PMC,
+ .mfr_id = CFI_MFR_PMC,
.dev_id = PM49FL004,
.name = "PMC Pm49FL004",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1300,7 +1284,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO( 0x01000, 128 )
}
}, {
- .mfr_id = MANUFACTURER_PMC,
+ .mfr_id = CFI_MFR_PMC,
.dev_id = PM49FL008,
.name = "PMC Pm49FL008",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1312,7 +1296,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO( 0x01000, 256 )
}
}, {
- .mfr_id = MANUFACTURER_SHARP,
+ .mfr_id = CFI_MFR_SHARP,
.dev_id = LH28F640BF,
.name = "LH28F640BF",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1324,7 +1308,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x40000,16),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST39LF512,
.name = "SST 39LF512",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1336,7 +1320,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,16),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST39LF010,
.name = "SST 39LF010",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1348,8 +1332,8 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,32),
}
}, {
- .mfr_id = MANUFACTURER_SST,
- .dev_id = SST29EE020,
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST29EE020,
.name = "SST 29EE020",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
@@ -1359,9 +1343,9 @@ static const struct amd_flash_info jedec_table[] = {
.regions = {ERASEINFO(0x01000,64),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST29LE020,
- .name = "SST 29LE020",
+ .name = "SST 29LE020",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_256KiB,
@@ -1370,7 +1354,7 @@ static const struct amd_flash_info jedec_table[] = {
.regions = {ERASEINFO(0x01000,64),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST39LF020,
.name = "SST 39LF020",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1382,7 +1366,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,64),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST39LF040,
.name = "SST 39LF040",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1394,7 +1378,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,128),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST39SF010A,
.name = "SST 39SF010A",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1406,7 +1390,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,32),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST39SF020A,
.name = "SST 39SF020A",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1418,7 +1402,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,64),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST39SF040,
.name = "SST 39SF040",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1430,7 +1414,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,128),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST49LF040B,
.name = "SST 49LF040B",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1443,7 +1427,7 @@ static const struct amd_flash_info jedec_table[] = {
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST49LF004B,
.name = "SST 49LF004B",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1455,7 +1439,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,128),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST49LF008A,
.name = "SST 49LF008A",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1467,7 +1451,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,256),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST49LF030A,
.name = "SST 49LF030A",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1479,7 +1463,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,96),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST49LF040A,
.name = "SST 49LF040A",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1491,7 +1475,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,128),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST49LF080A,
.name = "SST 49LF080A",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1503,7 +1487,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,256),
}
}, {
- .mfr_id = MANUFACTURER_SST, /* should be CFI */
+ .mfr_id = CFI_MFR_SST, /* should be CFI */
.dev_id = SST39LF160,
.name = "SST 39LF160",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1516,7 +1500,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x1000,256)
}
}, {
- .mfr_id = MANUFACTURER_SST, /* should be CFI */
+ .mfr_id = CFI_MFR_SST, /* should be CFI */
.dev_id = SST39VF1601,
.name = "SST 39VF1601",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1529,7 +1513,35 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x1000,256)
}
}, {
- .mfr_id = MANUFACTURER_SST, /* should be CFI */
+ /* CFI is broken: reports AMD_STD, but needs custom uaddr */
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST39WF1601,
+ .name = "SST 39WF1601",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_0xAAAA_0x5555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x1000,256),
+ ERASEINFO(0x1000,256)
+ }
+ }, {
+ /* CFI is broken: reports AMD_STD, but needs custom uaddr */
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST39WF1602,
+ .name = "SST 39WF1602",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_0xAAAA_0x5555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x1000,256),
+ ERASEINFO(0x1000,256)
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST, /* should be CFI */
.dev_id = SST39VF3201,
.name = "SST 39VF3201",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1544,7 +1556,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x1000,256)
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST36VF3203,
.name = "SST 36VF3203",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1556,7 +1568,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,64),
}
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M29F800AB,
.name = "ST M29F800AB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1571,7 +1583,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,15),
}
}, {
- .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
+ .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */
.dev_id = M29W800DT,
.name = "ST M29W800DT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1586,7 +1598,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
+ .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */
.dev_id = M29W800DB,
.name = "ST M29W800DB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1601,7 +1613,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,15)
}
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M29W400DT,
.name = "ST M29W400DT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1616,7 +1628,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,1)
}
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M29W400DB,
.name = "ST M29W400DB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1631,7 +1643,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,7)
}
}, {
- .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
+ .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */
.dev_id = M29W160DT,
.name = "ST M29W160DT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1646,7 +1658,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
+ .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */
.dev_id = M29W160DB,
.name = "ST M29W160DB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1661,7 +1673,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,31)
}
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M29W040B,
.name = "ST M29W040B",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1673,7 +1685,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,8),
}
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M50FW040,
.name = "ST M50FW040",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1685,7 +1697,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,8),
}
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M50FW080,
.name = "ST M50FW080",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1697,7 +1709,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,16),
}
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M50FW016,
.name = "ST M50FW016",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1709,7 +1721,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,32),
}
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M50LPW080,
.name = "ST M50LPW080",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1721,7 +1733,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,16),
},
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M50FLW080A,
.name = "ST M50FLW080A",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1736,7 +1748,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x1000,16),
}
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M50FLW080B,
.name = "ST M50FLW080B",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1751,7 +1763,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x1000,16),
}
}, {
- .mfr_id = 0xff00 | MANUFACTURER_ST,
+ .mfr_id = 0xff00 | CFI_MFR_ST,
.dev_id = 0xff00 | PSD4256G6V,
.name = "ST PSD4256G6V",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1763,7 +1775,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,16),
}
}, {
- .mfr_id = MANUFACTURER_TOSHIBA,
+ .mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVT160,
.name = "Toshiba TC58FVT160",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1778,7 +1790,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_TOSHIBA,
+ .mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVB160,
.name = "Toshiba TC58FVB160",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1793,7 +1805,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,31)
}
}, {
- .mfr_id = MANUFACTURER_TOSHIBA,
+ .mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVB321,
.name = "Toshiba TC58FVB321",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1806,7 +1818,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,63)
}
}, {
- .mfr_id = MANUFACTURER_TOSHIBA,
+ .mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVT321,
.name = "Toshiba TC58FVT321",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1819,7 +1831,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000,8)
}
}, {
- .mfr_id = MANUFACTURER_TOSHIBA,
+ .mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVB641,
.name = "Toshiba TC58FVB641",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1832,7 +1844,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,127)
}
}, {
- .mfr_id = MANUFACTURER_TOSHIBA,
+ .mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVT641,
.name = "Toshiba TC58FVT641",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1845,7 +1857,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000,8)
}
}, {
- .mfr_id = MANUFACTURER_WINBOND,
+ .mfr_id = CFI_MFR_WINBOND,
.dev_id = W49V002A,
.name = "Winbond W49V002A",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1878,7 +1890,7 @@ static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base,
mask = (1 << (cfi->device_type * 8)) - 1;
result = map_read(map, base + ofs);
bank++;
- } while ((result.x[0] & mask) == CONTINUATION_CODE);
+ } while ((result.x[0] & mask) == CFI_MFR_CONTINUATION);
return result.x[0] & mask;
}
@@ -1969,7 +1981,7 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / p_cfi->device_type;
p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / p_cfi->device_type;
- return 1; /* ok */
+ return 1; /* ok */
}
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index ab5c9b92ac8..f3226b1d38f 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -1,5 +1,5 @@
#
-# linux/drivers/devices/Makefile
+# linux/drivers/mtd/devices/Makefile
#
obj-$(CONFIG_MTD_DOC2000) += doc2000.o
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index ce6424008ed..93651865ddb 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -276,12 +276,10 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
/* Setup the MTD structure */
/* make the name contain the block device in */
- name = kmalloc(sizeof("block2mtd: ") + strlen(devname) + 1,
- GFP_KERNEL);
+ name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname);
if (!name)
goto devinit_err;
- sprintf(name, "block2mtd: %s", devname);
dev->mtd.name = name;
dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index d2fd550f7e0..fc8ea0a57ac 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -668,7 +668,7 @@ static int __init init_pmc551(void)
{
struct pci_dev *PCI_Device = NULL;
struct mypriv *priv;
- int count, found = 0;
+ int found = 0;
struct mtd_info *mtd;
u32 length = 0;
@@ -695,7 +695,7 @@ static int __init init_pmc551(void)
/*
* PCU-bus chipset probe.
*/
- for (count = 0; count < MAX_MTD_DEVICES; count++) {
+ for (;;) {
if ((PCI_Device = pci_get_device(PCI_VENDOR_ID_V3_SEMI,
PCI_DEVICE_ID_V3_SEMI_V370PDC,
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index fe17054ee2f..ab5d8cd02a1 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -73,15 +73,25 @@ static struct flash_info __initdata sst25l_flash_info[] = {
static int sst25l_status(struct sst25l_flash *flash, int *status)
{
- unsigned char command, response;
+ struct spi_message m;
+ struct spi_transfer t;
+ unsigned char cmd_resp[2];
int err;
- command = SST25L_CMD_RDSR;
- err = spi_write_then_read(flash->spi, &command, 1, &response, 1);
+ spi_message_init(&m);
+ memset(&t, 0, sizeof(struct spi_transfer));
+
+ cmd_resp[0] = SST25L_CMD_RDSR;
+ cmd_resp[1] = 0xff;
+ t.tx_buf = cmd_resp;
+ t.rx_buf = cmd_resp;
+ t.len = sizeof(cmd_resp);
+ spi_message_add_tail(&t, &m);
+ err = spi_sync(flash->spi, &m);
if (err < 0)
return err;
- *status = response;
+ *status = cmd_resp[1];
return 0;
}
@@ -328,33 +338,32 @@ out:
static struct flash_info *__init sst25l_match_device(struct spi_device *spi)
{
struct flash_info *flash_info = NULL;
- unsigned char command[4], response;
+ struct spi_message m;
+ struct spi_transfer t;
+ unsigned char cmd_resp[6];
int i, err;
uint16_t id;
- command[0] = SST25L_CMD_READ_ID;
- command[1] = 0;
- command[2] = 0;
- command[3] = 0;
- err = spi_write_then_read(spi, command, sizeof(command), &response, 1);
+ spi_message_init(&m);
+ memset(&t, 0, sizeof(struct spi_transfer));
+
+ cmd_resp[0] = SST25L_CMD_READ_ID;
+ cmd_resp[1] = 0;
+ cmd_resp[2] = 0;
+ cmd_resp[3] = 0;
+ cmd_resp[4] = 0xff;
+ cmd_resp[5] = 0xff;
+ t.tx_buf = cmd_resp;
+ t.rx_buf = cmd_resp;
+ t.len = sizeof(cmd_resp);
+ spi_message_add_tail(&t, &m);
+ err = spi_sync(spi, &m);
if (err < 0) {
- dev_err(&spi->dev, "error reading device id msb\n");
+ dev_err(&spi->dev, "error reading device id\n");
return NULL;
}
- id = response << 8;
-
- command[0] = SST25L_CMD_READ_ID;
- command[1] = 0;
- command[2] = 0;
- command[3] = 1;
- err = spi_write_then_read(spi, command, sizeof(command), &response, 1);
- if (err < 0) {
- dev_err(&spi->dev, "error reading device id lsb\n");
- return NULL;
- }
-
- id |= response;
+ id = (cmd_resp[4] << 8) | cmd_resp[5];
for (i = 0; i < ARRAY_SIZE(sst25l_flash_info); i++)
if (sst25l_flash_info[i].device_id == id)
@@ -411,17 +420,6 @@ static int __init sst25l_probe(struct spi_device *spi)
flash->mtd.erasesize, flash->mtd.erasesize / 1024,
flash->mtd.numeraseregions);
- if (flash->mtd.numeraseregions)
- for (i = 0; i < flash->mtd.numeraseregions; i++)
- DEBUG(MTD_DEBUG_LEVEL2,
- "mtd.eraseregions[%d] = { .offset = 0x%llx, "
- ".erasesize = 0x%.8x (%uKiB), "
- ".numblocks = %d }\n",
- i, (long long)flash->mtd.eraseregions[i].offset,
- flash->mtd.eraseregions[i].erasesize,
- flash->mtd.eraseregions[i].erasesize / 1024,
- flash->mtd.eraseregions[i].numblocks);
-
if (mtd_has_partitions()) {
struct mtd_partition *parts = NULL;
int nr_parts = 0;
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index e56d6b42f02..62da9eb7032 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -1082,7 +1082,6 @@ static void ftl_remove_dev(struct mtd_blktrans_dev *dev)
{
del_mtd_blktrans_dev(dev);
ftl_freepart((partition_t *)dev);
- kfree(dev);
}
static struct mtd_blktrans_ops ftl_tr = {
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 8aca5523a33..015a7fe1b6e 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -139,7 +139,6 @@ static void inftl_remove_dev(struct mtd_blktrans_dev *dev)
kfree(inftl->PUtable);
kfree(inftl->VUtable);
- kfree(inftl);
}
/*
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index 32e82aef3e5..8f988d7d3c5 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -100,9 +100,10 @@ static int find_boot_record(struct INFTLrecord *inftl)
}
/* To be safer with BIOS, also use erase mark as discriminant */
- if ((ret = inftl_read_oob(mtd, block * inftl->EraseSize +
- SECTORSIZE + 8, 8, &retlen,
- (char *)&h1) < 0)) {
+ ret = inftl_read_oob(mtd,
+ block * inftl->EraseSize + SECTORSIZE + 8,
+ 8, &retlen,(char *)&h1);
+ if (ret < 0) {
printk(KERN_WARNING "INFTL: ANAND header found at "
"0x%x in mtd%d, but OOB data read failed "
"(err %d)\n", block * inftl->EraseSize,
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index a73ee12aad8..fece5be5871 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -107,8 +107,7 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
/* those should be reset too since
they create memory references. */
init_waitqueue_head(&chip->wq);
- spin_lock_init(&chip->_spinlock);
- chip->mutex = &chip->_spinlock;
+ mutex_init(&chip->mutex);
chip++;
}
}
@@ -144,7 +143,7 @@ static int wait_for_ready(struct map_info *map, struct flchip *chip,
}
/* OK Still waiting. Drop the lock, wait a while and retry. */
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
if (sleep_time >= 1000000/HZ) {
/*
* Half of the normal delay still remaining
@@ -159,17 +158,17 @@ static int wait_for_ready(struct map_info *map, struct flchip *chip,
cond_resched();
timeo--;
}
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
while (chip->state != chip_state) {
/* Someone's suspended the operation: sleep */
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
}
if (chip->erase_suspended || chip->write_suspended) {
/* Suspend has occured while sleep: reset timeout */
@@ -230,20 +229,20 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
* it'll happily send us to sleep. In any case, when
* get_chip returns success we're clear to go ahead.
*/
- ret = spin_trylock(contender->mutex);
+ ret = mutex_trylock(&contender->mutex);
spin_unlock(&shared->lock);
if (!ret)
goto retry;
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
ret = chip_ready(map, contender, mode);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (ret == -EAGAIN) {
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
goto retry;
}
if (ret) {
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
return ret;
}
spin_lock(&shared->lock);
@@ -252,10 +251,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
* state. Put contender and retry. */
if (chip->state == FL_SYNCING) {
put_chip(map, contender);
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
goto retry;
}
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
}
/* Check if we have suspended erase on this chip.
@@ -265,10 +264,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
spin_unlock(&shared->lock);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
goto retry;
}
@@ -337,10 +336,10 @@ static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
return -EAGAIN;
}
}
@@ -356,12 +355,12 @@ static void put_chip(struct map_info *map, struct flchip *chip)
if (shared->writing && shared->writing != chip) {
/* give back the ownership */
struct flchip *loaner = shared->writing;
- spin_lock(loaner->mutex);
+ mutex_lock(&loaner->mutex);
spin_unlock(&shared->lock);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
put_chip(map, loaner);
- spin_lock(chip->mutex);
- spin_unlock(loaner->mutex);
+ mutex_lock(&chip->mutex);
+ mutex_unlock(&loaner->mutex);
wake_up(&chip->wq);
return;
}
@@ -414,10 +413,10 @@ int do_write_buffer(struct map_info *map, struct flchip *chip,
wbufsize = 1 << lpddr->qinfo->BufSizeShift;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_WRITING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
/* Figure out the number of words to write */
@@ -478,7 +477,7 @@ int do_write_buffer(struct map_info *map, struct flchip *chip,
}
out: put_chip(map, chip);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -490,10 +489,10 @@ int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
struct flchip *chip = &lpddr->chips[chipnum];
int ret;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_ERASING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL);
@@ -505,7 +504,7 @@ int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
goto out;
}
out: put_chip(map, chip);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -518,10 +517,10 @@ static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
struct flchip *chip = &lpddr->chips[chipnum];
int ret = 0;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_READY);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -529,7 +528,7 @@ static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
*retlen = len;
put_chip(map, chip);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -569,9 +568,9 @@ static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
else
thislen = len;
/* get the chip */
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_POINT);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
if (ret)
break;
@@ -611,7 +610,7 @@ static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
else
thislen = len;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_POINT) {
chip->ref_point_counter--;
if (chip->ref_point_counter == 0)
@@ -621,7 +620,7 @@ static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
"pointed region\n", map->name);
put_chip(map, chip);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
len -= thislen;
ofs = 0;
@@ -727,10 +726,10 @@ int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
int chipnum = adr >> lpddr->chipshift;
struct flchip *chip = &lpddr->chips[chipnum];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_LOCKING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -750,7 +749,7 @@ int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
goto out;
}
out: put_chip(map, chip);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -771,10 +770,10 @@ int word_program(struct map_info *map, loff_t adr, uint32_t curval)
int chipnum = adr >> lpddr->chipshift;
struct flchip *chip = &lpddr->chips[chipnum];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_WRITING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -788,7 +787,7 @@ int word_program(struct map_info *map, loff_t adr, uint32_t curval)
}
out: put_chip(map, chip);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
index 79bf40f48b7..dbfe17baf04 100644
--- a/drivers/mtd/lpddr/qinfo_probe.c
+++ b/drivers/mtd/lpddr/qinfo_probe.c
@@ -134,13 +134,12 @@ out:
static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr)
{
- lpddr->qinfo = kmalloc(sizeof(struct qinfo_chip), GFP_KERNEL);
+ lpddr->qinfo = kzalloc(sizeof(struct qinfo_chip), GFP_KERNEL);
if (!lpddr->qinfo) {
printk(KERN_WARNING "%s: no memory for LPDDR qinfo structure\n",
map->name);
return 0;
}
- memset(lpddr->qinfo, 0, sizeof(struct qinfo_chip));
/* Get the ManuID */
lpddr->ManufactId = CMDVAL(map_read(map, map->pfow_base + PFOW_MANUFACTURER_ID));
@@ -185,13 +184,11 @@ static struct lpddr_private *lpddr_probe_chip(struct map_info *map)
lpddr.numchips = 1;
numvirtchips = lpddr.numchips * lpddr.qinfo->HWPartsNum;
- retlpddr = kmalloc(sizeof(struct lpddr_private) +
+ retlpddr = kzalloc(sizeof(struct lpddr_private) +
numvirtchips * sizeof(struct flchip), GFP_KERNEL);
if (!retlpddr)
return NULL;
- memset(retlpddr, 0, sizeof(struct lpddr_private) +
- numvirtchips * sizeof(struct flchip));
memcpy(retlpddr, &lpddr, sizeof(struct lpddr_private));
retlpddr->numchips = numvirtchips;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index aa2807d0ce7..f22bc9f05dd 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -435,7 +435,7 @@ config MTD_PCI
config MTD_PCMCIA
tristate "PCMCIA MTD driver"
- depends on PCMCIA && MTD_COMPLEX_MAPPINGS && BROKEN
+ depends on PCMCIA && MTD_COMPLEX_MAPPINGS
help
Map driver for accessing PCMCIA linear flash memory cards. These
cards are usually around 4-16MiB in size. This does not include
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index c0fd99b0c52..85dd18193cf 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -70,7 +70,7 @@ static void switch_back(struct async_state *state)
local_irq_restore(state->irq_flags);
}
-static map_word bfin_read(struct map_info *map, unsigned long ofs)
+static map_word bfin_flash_read(struct map_info *map, unsigned long ofs)
{
struct async_state *state = (struct async_state *)map->map_priv_1;
uint16_t word;
@@ -86,7 +86,7 @@ static map_word bfin_read(struct map_info *map, unsigned long ofs)
return test;
}
-static void bfin_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+static void bfin_flash_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
{
struct async_state *state = (struct async_state *)map->map_priv_1;
@@ -97,7 +97,7 @@ static void bfin_copy_from(struct map_info *map, void *to, unsigned long from, s
switch_back(state);
}
-static void bfin_write(struct map_info *map, map_word d1, unsigned long ofs)
+static void bfin_flash_write(struct map_info *map, map_word d1, unsigned long ofs)
{
struct async_state *state = (struct async_state *)map->map_priv_1;
uint16_t d;
@@ -112,7 +112,7 @@ static void bfin_write(struct map_info *map, map_word d1, unsigned long ofs)
switch_back(state);
}
-static void bfin_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+static void bfin_flash_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
{
struct async_state *state = (struct async_state *)map->map_priv_1;
@@ -141,10 +141,10 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev)
return -ENOMEM;
state->map.name = DRIVER_NAME;
- state->map.read = bfin_read;
- state->map.copy_from = bfin_copy_from;
- state->map.write = bfin_write;
- state->map.copy_to = bfin_copy_to;
+ state->map.read = bfin_flash_read;
+ state->map.copy_from = bfin_flash_copy_from;
+ state->map.write = bfin_flash_write;
+ state->map.copy_to = bfin_flash_copy_to;
state->map.bankwidth = pdata->width;
state->map.size = memory->end - memory->start + 1;
state->map.virt = (void __iomem *)memory->start;
diff --git a/drivers/mtd/maps/ceiva.c b/drivers/mtd/maps/ceiva.c
index d41f34766e5..c09f4f57093 100644
--- a/drivers/mtd/maps/ceiva.c
+++ b/drivers/mtd/maps/ceiva.c
@@ -253,7 +253,7 @@ static void __exit clps_destroy_mtd(struct clps_info *clps, struct mtd_info *mtd
static int __init clps_setup_flash(void)
{
- int nr;
+ int nr = 0;
#ifdef CONFIG_ARCH_CEIVA
if (machine_is_ceiva()) {
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index 1bdf0ee6d0b..9639d83a9d6 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -165,12 +165,11 @@ static int ixp2000_flash_probe(struct platform_device *dev)
return -EIO;
}
- info = kmalloc(sizeof(struct ixp2000_flash_info), GFP_KERNEL);
+ info = kzalloc(sizeof(struct ixp2000_flash_info), GFP_KERNEL);
if(!info) {
err = -ENOMEM;
goto Error;
}
- memset(info, 0, sizeof(struct ixp2000_flash_info));
platform_set_drvdata(dev, info);
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 7b051529741..e0a5e0426ea 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -107,8 +107,8 @@ static void ixp4xx_copy_from(struct map_info *map, void *to,
return;
if (from & 1) {
- *dest++ = BYTE1(flash_read16(src));
- src++;
+ *dest++ = BYTE1(flash_read16(src-1));
+ src++;
--len;
}
@@ -196,12 +196,11 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
return err;
}
- info = kmalloc(sizeof(struct ixp4xx_flash_info), GFP_KERNEL);
+ info = kzalloc(sizeof(struct ixp4xx_flash_info), GFP_KERNEL);
if(!info) {
err = -ENOMEM;
goto Error;
}
- memset(info, 0, sizeof(struct ixp4xx_flash_info));
platform_set_drvdata(dev, info);
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index 689d6a79ffc..e699e6ac23d 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -40,10 +40,7 @@ MODULE_PARM_DESC(debug, "Set Debug Level 0=quiet, 5=noisy");
static const int debug = 0;
#endif
-#define err(format, arg...) printk(KERN_ERR "pcmciamtd: " format "\n" , ## arg)
#define info(format, arg...) printk(KERN_INFO "pcmciamtd: " format "\n" , ## arg)
-#define warn(format, arg...) printk(KERN_WARNING "pcmciamtd: " format "\n" , ## arg)
-
#define DRIVER_DESC "PCMCIA Flash memory card driver"
@@ -52,7 +49,6 @@ static const int debug = 0;
struct pcmciamtd_dev {
struct pcmcia_device *p_dev;
- dev_node_t node; /* device node */
caddr_t win_base; /* ioremapped address of PCMCIA window */
unsigned int win_size; /* size of window */
unsigned int offset; /* offset into card the window currently points at */
@@ -100,7 +96,9 @@ module_param(mem_type, int, 0);
MODULE_PARM_DESC(mem_type, "Set Memory type (0=Flash, 1=RAM, 2=ROM, default=0)");
-/* read/write{8,16} copy_{from,to} routines with window remapping to access whole card */
+/* read/write{8,16} copy_{from,to} routines with window remapping
+ * to access whole card
+ */
static caddr_t remap_window(struct map_info *map, unsigned long to)
{
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
@@ -137,7 +135,7 @@ static map_word pcmcia_read8_remap(struct map_info *map, unsigned long ofs)
return d;
d.x[0] = readb(addr);
- DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02x", ofs, addr, d.x[0]);
+ DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02lx", ofs, addr, d.x[0]);
return d;
}
@@ -152,7 +150,7 @@ static map_word pcmcia_read16_remap(struct map_info *map, unsigned long ofs)
return d;
d.x[0] = readw(addr);
- DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04x", ofs, addr, d.x[0]);
+ DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04lx", ofs, addr, d.x[0]);
return d;
}
@@ -162,7 +160,7 @@ static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
unsigned long win_size = dev->win_size;
- DEBUG(3, "to = %p from = %lu len = %u", to, from, len);
+ DEBUG(3, "to = %p from = %lu len = %zd", to, from, len);
while(len) {
int toread = win_size - (from & (win_size-1));
caddr_t addr;
@@ -190,7 +188,7 @@ static void pcmcia_write8_remap(struct map_info *map, map_word d, unsigned long
if(!addr)
return;
- DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02x", adr, addr, d.x[0]);
+ DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02lx", adr, addr, d.x[0]);
writeb(d.x[0], addr);
}
@@ -201,7 +199,7 @@ static void pcmcia_write16_remap(struct map_info *map, map_word d, unsigned long
if(!addr)
return;
- DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04x", adr, addr, d.x[0]);
+ DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04lx", adr, addr, d.x[0]);
writew(d.x[0], addr);
}
@@ -211,7 +209,7 @@ static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const v
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
unsigned long win_size = dev->win_size;
- DEBUG(3, "to = %lu from = %p len = %u", to, from, len);
+ DEBUG(3, "to = %lu from = %p len = %zd", to, from, len);
while(len) {
int towrite = win_size - (to & (win_size-1));
caddr_t addr;
@@ -245,7 +243,8 @@ static map_word pcmcia_read8(struct map_info *map, unsigned long ofs)
return d;
d.x[0] = readb(win_base + ofs);
- DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02x", ofs, win_base + ofs, d.x[0]);
+ DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02lx",
+ ofs, win_base + ofs, d.x[0]);
return d;
}
@@ -259,7 +258,8 @@ static map_word pcmcia_read16(struct map_info *map, unsigned long ofs)
return d;
d.x[0] = readw(win_base + ofs);
- DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04x", ofs, win_base + ofs, d.x[0]);
+ DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04lx",
+ ofs, win_base + ofs, d.x[0]);
return d;
}
@@ -271,32 +271,34 @@ static void pcmcia_copy_from(struct map_info *map, void *to, unsigned long from,
if(DEV_REMOVED(map))
return;
- DEBUG(3, "to = %p from = %lu len = %u", to, from, len);
+ DEBUG(3, "to = %p from = %lu len = %zd", to, from, len);
memcpy_fromio(to, win_base + from, len);
}
-static void pcmcia_write8(struct map_info *map, u8 d, unsigned long adr)
+static void pcmcia_write8(struct map_info *map, map_word d, unsigned long adr)
{
caddr_t win_base = (caddr_t)map->map_priv_2;
if(DEV_REMOVED(map))
return;
- DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02x", adr, win_base + adr, d);
- writeb(d, win_base + adr);
+ DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02lx",
+ adr, win_base + adr, d.x[0]);
+ writeb(d.x[0], win_base + adr);
}
-static void pcmcia_write16(struct map_info *map, u16 d, unsigned long adr)
+static void pcmcia_write16(struct map_info *map, map_word d, unsigned long adr)
{
caddr_t win_base = (caddr_t)map->map_priv_2;
if(DEV_REMOVED(map))
return;
- DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04x", adr, win_base + adr, d);
- writew(d, win_base + adr);
+ DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04lx",
+ adr, win_base + adr, d.x[0]);
+ writew(d.x[0], win_base + adr);
}
@@ -307,7 +309,7 @@ static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *f
if(DEV_REMOVED(map))
return;
- DEBUG(3, "to = %lu from = %p len = %u", to, from, len);
+ DEBUG(3, "to = %lu from = %p len = %zd", to, from, len);
memcpy_toio(win_base + to, from, len);
}
@@ -376,7 +378,8 @@ static int pcmciamtd_cistpl_jedec(struct pcmcia_device *p_dev,
if (!pcmcia_parse_tuple(tuple, &parse)) {
cistpl_jedec_t *t = &parse.jedec;
for (i = 0; i < t->nid; i++)
- DEBUG(2, "JEDEC: 0x%02x 0x%02x", t->id[i].mfr, t->id[i].info);
+ DEBUG(2, "JEDEC: 0x%02x 0x%02x",
+ t->id[i].mfr, t->id[i].info);
}
return -ENOSPC;
}
@@ -432,7 +435,7 @@ static int pcmciamtd_cistpl_geo(struct pcmcia_device *p_dev,
}
-static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *link, int *new_name)
+static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *p_dev, int *new_name)
{
int i;
@@ -477,7 +480,8 @@ static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *link,
}
DEBUG(1, "Device: Size: %lu Width:%d Name: %s",
- dev->pcmcia_map.size, dev->pcmcia_map.bankwidth << 3, dev->mtd_name);
+ dev->pcmcia_map.size,
+ dev->pcmcia_map.bankwidth << 3, dev->mtd_name);
}
@@ -490,7 +494,6 @@ static int pcmciamtd_config(struct pcmcia_device *link)
{
struct pcmciamtd_dev *dev = link->priv;
struct mtd_info *mtd = NULL;
- cs_status_t status;
win_req_t req;
int ret;
int i;
@@ -514,9 +517,11 @@ static int pcmciamtd_config(struct pcmcia_device *link)
if(setvpp == 1)
dev->pcmcia_map.set_vpp = pcmciamtd_set_vpp;
- /* Request a memory window for PCMCIA. Some architeures can map windows upto the maximum
- that PCMCIA can support (64MiB) - this is ideal and we aim for a window the size of the
- whole card - otherwise we try smaller windows until we succeed */
+ /* Request a memory window for PCMCIA. Some architeures can map windows
+ * upto the maximum that PCMCIA can support (64MiB) - this is ideal and
+ * we aim for a window the size of the whole card - otherwise we try
+ * smaller windows until we succeed
+ */
req.Attributes = WIN_MEMORY_TYPE_CM | WIN_ENABLE;
req.Attributes |= (dev->pcmcia_map.bankwidth == 1) ? WIN_DATA_WIDTH_8 : WIN_DATA_WIDTH_16;
@@ -544,7 +549,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
DEBUG(2, "dev->win_size = %d", dev->win_size);
if(!dev->win_size) {
- err("Cant allocate memory window");
+ dev_err(&dev->p_dev->dev, "Cannot allocate memory window\n");
pcmciamtd_release(link);
return -ENODEV;
}
@@ -554,7 +559,8 @@ static int pcmciamtd_config(struct pcmcia_device *link)
DEBUG(2, "window handle = 0x%8.8lx", (unsigned long)link->win);
dev->win_base = ioremap(req.Base, req.Size);
if(!dev->win_base) {
- err("ioremap(%lu, %u) failed", req.Base, req.Size);
+ dev_err(&dev->p_dev->dev, "ioremap(%lu, %u) failed\n",
+ req.Base, req.Size);
pcmciamtd_release(link);
return -ENODEV;
}
@@ -565,7 +571,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
dev->pcmcia_map.map_priv_1 = (unsigned long)dev;
dev->pcmcia_map.map_priv_2 = (unsigned long)link->win;
- dev->vpp = (vpp) ? vpp : link->socket.socket.Vpp;
+ dev->vpp = (vpp) ? vpp : link->socket->socket.Vpp;
link->conf.Attributes = 0;
if(setvpp == 2) {
link->conf.Vpp = dev->vpp;
@@ -601,7 +607,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
}
if(!mtd) {
- DEBUG(1, "Cant find an MTD");
+ DEBUG(1, "Can not find an MTD");
pcmciamtd_release(link);
return -ENODEV;
}
@@ -612,8 +618,9 @@ static int pcmciamtd_config(struct pcmcia_device *link)
if(new_name) {
int size = 0;
char unit = ' ';
- /* Since we are using a default name, make it better by adding in the
- size */
+ /* Since we are using a default name, make it better by adding
+ * in the size
+ */
if(mtd->size < 1048576) { /* <1MiB in size, show size in KiB */
size = mtd->size >> 10;
unit = 'K';
@@ -643,17 +650,15 @@ static int pcmciamtd_config(struct pcmcia_device *link)
if(add_mtd_device(mtd)) {
map_destroy(mtd);
dev->mtd_info = NULL;
- err("Couldnt register MTD device");
+ dev_err(&dev->p_dev->dev,
+ "Could not register the MTD device\n");
pcmciamtd_release(link);
return -ENODEV;
}
- snprintf(dev->node.dev_name, sizeof(dev->node.dev_name), "mtd%d", mtd->index);
- info("mtd%d: %s", mtd->index, mtd->name);
- link->dev_node = &dev->node;
+ dev_info(&dev->p_dev->dev, "mtd%d: %s\n", mtd->index, mtd->name);
return 0;
- failed:
- err("CS Error, exiting");
+ dev_err(&dev->p_dev->dev, "CS Error, exiting\n");
pcmciamtd_release(link);
return -ENODEV;
}
@@ -692,8 +697,9 @@ static void pcmciamtd_detach(struct pcmcia_device *link)
if(dev->mtd_info) {
del_mtd_device(dev->mtd_info);
+ dev_info(&dev->p_dev->dev, "mtd%d: Removing\n",
+ dev->mtd_info->index);
map_destroy(dev->mtd_info);
- info("mtd%d: Removed", dev->mtd_info->index);
}
pcmciamtd_release(link);
@@ -737,8 +743,11 @@ static struct pcmcia_device_id pcmciamtd_ids[] = {
PCMCIA_DEVICE_PROD_ID12("intel", "VALUE SERIES 100 ", 0x40ade711, 0xdf8506d8),
PCMCIA_DEVICE_PROD_ID12("KINGMAX TECHNOLOGY INC.", "SRAM 256K Bytes", 0x54d0c69c, 0xad12c29c),
PCMCIA_DEVICE_PROD_ID12("Maxtor", "MAXFL MobileMax Flash Memory Card", 0xb68968c8, 0x2dfb47b0),
+ PCMCIA_DEVICE_PROD_ID123("M-Systems", "M-SYS Flash Memory Card", "(c) M-Systems", 0x7ed2ad87, 0x675dc3fb, 0x7aef3965),
+ PCMCIA_DEVICE_PROD_ID12("PRETEC", " 2MB SRAM CARD", 0xebf91155, 0x805360ca),
PCMCIA_DEVICE_PROD_ID12("SEIKO EPSON", "WWB101EN20", 0xf9876baf, 0xad0b207b),
PCMCIA_DEVICE_PROD_ID12("SEIKO EPSON", "WWB513EN20", 0xf9876baf, 0xe8d884ad),
+ PCMCIA_DEVICE_PROD_ID12("SMART Modular Technologies", " 4MB FLASH Card", 0x96fd8277, 0x737a5b05),
PCMCIA_DEVICE_PROD_ID12("Starfish, Inc.", "REX-3000", 0x05ddca47, 0xe7d67bca),
PCMCIA_DEVICE_PROD_ID12("Starfish, Inc.", "REX-4100", 0x05ddca47, 0x7bc32944),
/* the following was commented out in pcmcia-cs-3.2.7 */
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index d9603f7f965..426461a5f0d 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -264,8 +264,11 @@ static int __init physmap_init(void)
err = platform_driver_register(&physmap_flash_driver);
#ifdef CONFIG_MTD_PHYSMAP_COMPAT
- if (err == 0)
- platform_device_register(&physmap_flash);
+ if (err == 0) {
+ err = platform_device_register(&physmap_flash);
+ if (err)
+ platform_driver_unregister(&physmap_flash_driver);
+ }
#endif
return err;
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 101ee6ead05..ba124baa646 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -143,7 +143,7 @@ static int of_flash_remove(struct of_device *dev)
static struct mtd_info * __devinit obsolete_probe(struct of_device *dev,
struct map_info *map)
{
- struct device_node *dp = dev->node;
+ struct device_node *dp = dev->dev.of_node;
const char *of_probe;
struct mtd_info *mtd;
static const char *rom_probe_types[]
@@ -173,14 +173,55 @@ static struct mtd_info * __devinit obsolete_probe(struct of_device *dev,
}
}
+#ifdef CONFIG_MTD_PARTITIONS
+/* When partitions are set we look for a linux,part-probe property which
+ specifies the list of partition probers to use. If none is given then the
+ default is use. These take precedence over other device tree
+ information. */
+static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot", NULL };
+static const char ** __devinit of_get_probes(struct device_node *dp)
+{
+ const char *cp;
+ int cplen;
+ unsigned int l;
+ unsigned int count;
+ const char **res;
+
+ cp = of_get_property(dp, "linux,part-probe", &cplen);
+ if (cp == NULL)
+ return part_probe_types_def;
+
+ count = 0;
+ for (l = 0; l != cplen; l++)
+ if (cp[l] == 0)
+ count++;
+
+ res = kzalloc((count + 1)*sizeof(*res), GFP_KERNEL);
+ count = 0;
+ while (cplen > 0) {
+ res[count] = cp;
+ l = strlen(cp) + 1;
+ cp += l;
+ cplen -= l;
+ count++;
+ }
+ return res;
+}
+
+static void __devinit of_free_probes(const char **probes)
+{
+ if (probes != part_probe_types_def)
+ kfree(probes);
+}
+#endif
+
static int __devinit of_flash_probe(struct of_device *dev,
const struct of_device_id *match)
{
#ifdef CONFIG_MTD_PARTITIONS
- static const char *part_probe_types[]
- = { "cmdlinepart", "RedBoot", NULL };
+ const char **part_probe_types;
#endif
- struct device_node *dp = dev->node;
+ struct device_node *dp = dev->dev.of_node;
struct resource res;
struct of_flash *info;
const char *probe_type = match->data;
@@ -204,7 +245,7 @@ static int __devinit of_flash_probe(struct of_device *dev,
p = of_get_property(dp, "reg", &count);
if (count % reg_tuple_size != 0) {
dev_err(&dev->dev, "Malformed reg property on %s\n",
- dev->node->full_name);
+ dev->dev.of_node->full_name);
err = -EINVAL;
goto err_flash_remove;
}
@@ -218,7 +259,7 @@ static int __devinit of_flash_probe(struct of_device *dev,
dev_set_drvdata(&dev->dev, info);
- mtd_list = kzalloc(sizeof(struct mtd_info) * count, GFP_KERNEL);
+ mtd_list = kzalloc(sizeof(*mtd_list) * count, GFP_KERNEL);
if (!mtd_list)
goto err_flash_remove;
@@ -307,12 +348,14 @@ static int __devinit of_flash_probe(struct of_device *dev,
goto err_out;
#ifdef CONFIG_MTD_PARTITIONS
- /* First look for RedBoot table or partitions on the command
- * line, these take precedence over device tree information */
+ part_probe_types = of_get_probes(dp);
err = parse_mtd_partitions(info->cmtd, part_probe_types,
&info->parts, 0);
- if (err < 0)
+ if (err < 0) {
+ of_free_probes(part_probe_types);
return err;
+ }
+ of_free_probes(part_probe_types);
#ifdef CONFIG_MTD_OF_PARTS
if (err == 0) {
@@ -375,8 +418,11 @@ static struct of_device_id of_flash_match[] = {
MODULE_DEVICE_TABLE(of, of_flash_match);
static struct of_platform_driver of_flash_driver = {
- .name = "of-flash",
- .match_table = of_flash_match,
+ .driver = {
+ .name = "of-flash",
+ .owner = THIS_MODULE,
+ .of_match_table = of_flash_match,
+ },
.probe = of_flash_probe,
.remove = of_flash_remove,
};
diff --git a/drivers/mtd/maps/pismo.c b/drivers/mtd/maps/pismo.c
index 60c068db452..eb476b7f8d1 100644
--- a/drivers/mtd/maps/pismo.c
+++ b/drivers/mtd/maps/pismo.c
@@ -234,6 +234,7 @@ static int __devexit pismo_remove(struct i2c_client *client)
/* FIXME: set_vpp needs saner arguments */
pismo_setvpp_remove_fix(pismo);
+ i2c_set_clientdata(client, NULL);
kfree(pismo);
return 0;
@@ -272,7 +273,7 @@ static int __devinit pismo_probe(struct i2c_client *client,
ret = pismo_eeprom_read(client, &eeprom, 0, sizeof(eeprom));
if (ret < 0) {
dev_err(&client->dev, "error reading EEPROM: %d\n", ret);
- return ret;
+ goto exit_free;
}
dev_info(&client->dev, "%.15s board found\n", eeprom.board);
@@ -283,6 +284,11 @@ static int __devinit pismo_probe(struct i2c_client *client,
pdata->cs_addrs[i]);
return 0;
+
+ exit_free:
+ i2c_set_clientdata(client, NULL);
+ kfree(pismo);
+ return ret;
}
static const struct i2c_device_id pismo_id[] = {
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 91dc6331053..dd90880048c 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -63,11 +63,10 @@ static int __init pxa2xx_flash_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
- info = kmalloc(sizeof(struct pxa2xx_flash_info), GFP_KERNEL);
+ info = kzalloc(sizeof(struct pxa2xx_flash_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
- memset(info, 0, sizeof(struct pxa2xx_flash_info));
info->map.name = (char *) flash->name;
info->map.bankwidth = flash->width;
info->map.phys = res->start;
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index fadc4c45b45..0391c2527bd 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -110,7 +110,7 @@ int uflash_devinit(struct of_device *op, struct device_node *dp)
static int __devinit uflash_probe(struct of_device *op, const struct of_device_id *match)
{
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
/* Flashprom must have the "user" property in order to
* be used by this driver.
@@ -149,8 +149,11 @@ static const struct of_device_id uflash_match[] = {
MODULE_DEVICE_TABLE(of, uflash_match);
static struct of_platform_driver uflash_driver = {
- .name = DRIVER_NAME,
- .match_table = uflash_match,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = uflash_match,
+ },
.probe = uflash_probe,
.remove = __devexit_p(uflash_remove),
};
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index c82e09bbc5f..03e19c1965c 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -14,7 +14,6 @@
#include <linux/mtd/mtd.h>
#include <linux/blkdev.h>
#include <linux/blkpg.h>
-#include <linux/freezer.h>
#include <linux/spinlock.h>
#include <linux/hdreg.h>
#include <linux/init.h>
@@ -25,12 +24,42 @@
#include "mtdcore.h"
static LIST_HEAD(blktrans_majors);
+static DEFINE_MUTEX(blktrans_ref_mutex);
+
+void blktrans_dev_release(struct kref *kref)
+{
+ struct mtd_blktrans_dev *dev =
+ container_of(kref, struct mtd_blktrans_dev, ref);
+
+ dev->disk->private_data = NULL;
+ blk_cleanup_queue(dev->rq);
+ put_disk(dev->disk);
+ list_del(&dev->list);
+ kfree(dev);
+}
+
+static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk)
+{
+ struct mtd_blktrans_dev *dev;
+
+ mutex_lock(&blktrans_ref_mutex);
+ dev = disk->private_data;
+
+ if (!dev)
+ goto unlock;
+ kref_get(&dev->ref);
+unlock:
+ mutex_unlock(&blktrans_ref_mutex);
+ return dev;
+}
+
+void blktrans_dev_put(struct mtd_blktrans_dev *dev)
+{
+ mutex_lock(&blktrans_ref_mutex);
+ kref_put(&dev->ref, blktrans_dev_release);
+ mutex_unlock(&blktrans_ref_mutex);
+}
-struct mtd_blkcore_priv {
- struct task_struct *thread;
- struct request_queue *rq;
- spinlock_t queue_lock;
-};
static int do_blktrans_request(struct mtd_blktrans_ops *tr,
struct mtd_blktrans_dev *dev,
@@ -61,7 +90,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
return -EIO;
rq_flush_dcache_pages(req);
return 0;
-
case WRITE:
if (!tr->writesect)
return -EIO;
@@ -71,7 +99,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
if (tr->writesect(dev, block, buf))
return -EIO;
return 0;
-
default:
printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
return -EIO;
@@ -80,14 +107,13 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
static int mtd_blktrans_thread(void *arg)
{
- struct mtd_blktrans_ops *tr = arg;
- struct request_queue *rq = tr->blkcore_priv->rq;
+ struct mtd_blktrans_dev *dev = arg;
+ struct request_queue *rq = dev->rq;
struct request *req = NULL;
spin_lock_irq(rq->queue_lock);
while (!kthread_should_stop()) {
- struct mtd_blktrans_dev *dev;
int res;
if (!req && !(req = blk_fetch_request(rq))) {
@@ -98,13 +124,10 @@ static int mtd_blktrans_thread(void *arg)
continue;
}
- dev = req->rq_disk->private_data;
- tr = dev->tr;
-
spin_unlock_irq(rq->queue_lock);
mutex_lock(&dev->lock);
- res = do_blktrans_request(tr, dev, req);
+ res = do_blktrans_request(dev->tr, dev, req);
mutex_unlock(&dev->lock);
spin_lock_irq(rq->queue_lock);
@@ -123,81 +146,112 @@ static int mtd_blktrans_thread(void *arg)
static void mtd_blktrans_request(struct request_queue *rq)
{
- struct mtd_blktrans_ops *tr = rq->queuedata;
- wake_up_process(tr->blkcore_priv->thread);
-}
+ struct mtd_blktrans_dev *dev;
+ struct request *req = NULL;
+
+ dev = rq->queuedata;
+ if (!dev)
+ while ((req = blk_fetch_request(rq)) != NULL)
+ __blk_end_request_all(req, -ENODEV);
+ else
+ wake_up_process(dev->thread);
+}
static int blktrans_open(struct block_device *bdev, fmode_t mode)
{
- struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
- struct mtd_blktrans_ops *tr = dev->tr;
- int ret = -ENODEV;
-
- if (!get_mtd_device(NULL, dev->mtd->index))
- goto out;
-
- if (!try_module_get(tr->owner))
- goto out_tr;
-
- /* FIXME: Locking. A hot pluggable device can go away
- (del_mtd_device can be called for it) without its module
- being unloaded. */
- dev->mtd->usecount++;
-
- ret = 0;
- if (tr->open && (ret = tr->open(dev))) {
- dev->mtd->usecount--;
- put_mtd_device(dev->mtd);
- out_tr:
- module_put(tr->owner);
+ struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
+ int ret;
+
+ if (!dev)
+ return -ERESTARTSYS;
+
+ mutex_lock(&dev->lock);
+
+ if (!dev->mtd) {
+ ret = -ENXIO;
+ goto unlock;
}
- out:
+
+ ret = !dev->open++ && dev->tr->open ? dev->tr->open(dev) : 0;
+
+ /* Take another reference on the device so it won't go away till
+ last release */
+ if (!ret)
+ kref_get(&dev->ref);
+unlock:
+ mutex_unlock(&dev->lock);
+ blktrans_dev_put(dev);
return ret;
}
static int blktrans_release(struct gendisk *disk, fmode_t mode)
{
- struct mtd_blktrans_dev *dev = disk->private_data;
- struct mtd_blktrans_ops *tr = dev->tr;
- int ret = 0;
+ struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
+ int ret = -ENXIO;
- if (tr->release)
- ret = tr->release(dev);
+ if (!dev)
+ return ret;
- if (!ret) {
- dev->mtd->usecount--;
- put_mtd_device(dev->mtd);
- module_put(tr->owner);
- }
+ mutex_lock(&dev->lock);
+
+ /* Release one reference, we sure its not the last one here*/
+ kref_put(&dev->ref, blktrans_dev_release);
+ if (!dev->mtd)
+ goto unlock;
+
+ ret = !--dev->open && dev->tr->release ? dev->tr->release(dev) : 0;
+unlock:
+ mutex_unlock(&dev->lock);
+ blktrans_dev_put(dev);
return ret;
}
static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
- struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
+ struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
+ int ret = -ENXIO;
+
+ if (!dev)
+ return ret;
+
+ mutex_lock(&dev->lock);
+
+ if (!dev->mtd)
+ goto unlock;
- if (dev->tr->getgeo)
- return dev->tr->getgeo(dev, geo);
- return -ENOTTY;
+ ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : 0;
+unlock:
+ mutex_unlock(&dev->lock);
+ blktrans_dev_put(dev);
+ return ret;
}
static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
- struct mtd_blktrans_ops *tr = dev->tr;
+ struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
+ int ret = -ENXIO;
+
+ if (!dev)
+ return ret;
+
+ mutex_lock(&dev->lock);
+
+ if (!dev->mtd)
+ goto unlock;
switch (cmd) {
case BLKFLSBUF:
- if (tr->flush)
- return tr->flush(dev);
- /* The core code did the work, we had nothing to do. */
- return 0;
+ ret = dev->tr->flush ? dev->tr->flush(dev) : 0;
default:
- return -ENOTTY;
+ ret = -ENOTTY;
}
+unlock:
+ mutex_unlock(&dev->lock);
+ blktrans_dev_put(dev);
+ return ret;
}
static const struct block_device_operations mtd_blktrans_ops = {
@@ -214,12 +268,14 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
struct mtd_blktrans_dev *d;
int last_devnum = -1;
struct gendisk *gd;
+ int ret;
if (mutex_trylock(&mtd_table_mutex)) {
mutex_unlock(&mtd_table_mutex);
BUG();
}
+ mutex_lock(&blktrans_ref_mutex);
list_for_each_entry(d, &tr->devs, list) {
if (new->devnum == -1) {
/* Use first free number */
@@ -231,6 +287,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
}
} else if (d->devnum == new->devnum) {
/* Required number taken */
+ mutex_unlock(&blktrans_ref_mutex);
return -EBUSY;
} else if (d->devnum > new->devnum) {
/* Required number was free */
@@ -239,24 +296,38 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
}
last_devnum = d->devnum;
}
+
+ ret = -EBUSY;
if (new->devnum == -1)
new->devnum = last_devnum+1;
- if ((new->devnum << tr->part_bits) > 256) {
- return -EBUSY;
+ /* Check that the device and any partitions will get valid
+ * minor numbers and that the disk naming code below can cope
+ * with this number. */
+ if (new->devnum > (MINORMASK >> tr->part_bits) ||
+ (tr->part_bits && new->devnum >= 27 * 26)) {
+ mutex_unlock(&blktrans_ref_mutex);
+ goto error1;
}
list_add_tail(&new->list, &tr->devs);
added:
+ mutex_unlock(&blktrans_ref_mutex);
+
mutex_init(&new->lock);
+ kref_init(&new->ref);
if (!tr->writesect)
new->readonly = 1;
+ /* Create gendisk */
+ ret = -ENOMEM;
gd = alloc_disk(1 << tr->part_bits);
- if (!gd) {
- list_del(&new->list);
- return -ENOMEM;
- }
+
+ if (!gd)
+ goto error2;
+
+ new->disk = gd;
+ gd->private_data = new;
gd->major = tr->major;
gd->first_minor = (new->devnum) << tr->part_bits;
gd->fops = &mtd_blktrans_ops;
@@ -274,13 +345,35 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
snprintf(gd->disk_name, sizeof(gd->disk_name),
"%s%d", tr->name, new->devnum);
- /* 2.5 has capacity in units of 512 bytes while still
- having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */
set_capacity(gd, (new->size * tr->blksize) >> 9);
- gd->private_data = new;
- new->blkcore_priv = gd;
- gd->queue = tr->blkcore_priv->rq;
+ /* Create the request queue */
+ spin_lock_init(&new->queue_lock);
+ new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock);
+
+ if (!new->rq)
+ goto error3;
+
+ new->rq->queuedata = new;
+ blk_queue_logical_block_size(new->rq, tr->blksize);
+
+ if (tr->discard)
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
+ new->rq);
+
+ gd->queue = new->rq;
+
+ __get_mtd_device(new->mtd);
+ __module_get(tr->owner);
+
+ /* Create processing thread */
+ /* TODO: workqueue ? */
+ new->thread = kthread_run(mtd_blktrans_thread, new,
+ "%s%d", tr->name, new->mtd->index);
+ if (IS_ERR(new->thread)) {
+ ret = PTR_ERR(new->thread);
+ goto error4;
+ }
gd->driverfs_dev = &new->mtd->dev;
if (new->readonly)
@@ -288,21 +381,65 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
add_disk(gd);
+ if (new->disk_attributes) {
+ ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
+ new->disk_attributes);
+ WARN_ON(ret);
+ }
return 0;
+error4:
+ module_put(tr->owner);
+ __put_mtd_device(new->mtd);
+ blk_cleanup_queue(new->rq);
+error3:
+ put_disk(new->disk);
+error2:
+ list_del(&new->list);
+error1:
+ kfree(new);
+ return ret;
}
int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
{
+ unsigned long flags;
+
if (mutex_trylock(&mtd_table_mutex)) {
mutex_unlock(&mtd_table_mutex);
BUG();
}
- list_del(&old->list);
+ /* Stop new requests to arrive */
+ del_gendisk(old->disk);
+
+ if (old->disk_attributes)
+ sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
+ old->disk_attributes);
+
+ /* Stop the thread */
+ kthread_stop(old->thread);
+
+ /* Kill current requests */
+ spin_lock_irqsave(&old->queue_lock, flags);
+ old->rq->queuedata = NULL;
+ blk_start_queue(old->rq);
+ spin_unlock_irqrestore(&old->queue_lock, flags);
+
+ /* Ask trans driver for release to the mtd device */
+ mutex_lock(&old->lock);
+ if (old->open && old->tr->release) {
+ old->tr->release(old);
+ old->open = 0;
+ }
+
+ __put_mtd_device(old->mtd);
+ module_put(old->tr->owner);
- del_gendisk(old->blkcore_priv);
- put_disk(old->blkcore_priv);
+ /* At that point, we don't touch the mtd anymore */
+ old->mtd = NULL;
+ mutex_unlock(&old->lock);
+ blktrans_dev_put(old);
return 0;
}
@@ -335,7 +472,8 @@ static struct mtd_notifier blktrans_notifier = {
int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
{
- int ret, i;
+ struct mtd_info *mtd;
+ int ret;
/* Register the notifier if/when the first device type is
registered, to prevent the link/init ordering from fucking
@@ -343,9 +481,6 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
if (!blktrans_notifier.list.next)
register_mtd_user(&blktrans_notifier);
- tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
- if (!tr->blkcore_priv)
- return -ENOMEM;
mutex_lock(&mtd_table_mutex);
@@ -353,49 +488,20 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
if (ret) {
printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
tr->name, tr->major, ret);
- kfree(tr->blkcore_priv);
mutex_unlock(&mtd_table_mutex);
return ret;
}
- spin_lock_init(&tr->blkcore_priv->queue_lock);
-
- tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock);
- if (!tr->blkcore_priv->rq) {
- unregister_blkdev(tr->major, tr->name);
- kfree(tr->blkcore_priv);
- mutex_unlock(&mtd_table_mutex);
- return -ENOMEM;
- }
-
- tr->blkcore_priv->rq->queuedata = tr;
- blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize);
- if (tr->discard)
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
- tr->blkcore_priv->rq);
tr->blkshift = ffs(tr->blksize) - 1;
- tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr,
- "%sd", tr->name);
- if (IS_ERR(tr->blkcore_priv->thread)) {
- ret = PTR_ERR(tr->blkcore_priv->thread);
- blk_cleanup_queue(tr->blkcore_priv->rq);
- unregister_blkdev(tr->major, tr->name);
- kfree(tr->blkcore_priv);
- mutex_unlock(&mtd_table_mutex);
- return ret;
- }
-
INIT_LIST_HEAD(&tr->devs);
list_add(&tr->list, &blktrans_majors);
- for (i=0; i<MAX_MTD_DEVICES; i++) {
- if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT)
- tr->add_mtd(tr, mtd_table[i]);
- }
+ mtd_for_each_device(mtd)
+ if (mtd->type != MTD_ABSENT)
+ tr->add_mtd(tr, mtd);
mutex_unlock(&mtd_table_mutex);
-
return 0;
}
@@ -405,22 +511,15 @@ int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
mutex_lock(&mtd_table_mutex);
- /* Clean up the kernel thread */
- kthread_stop(tr->blkcore_priv->thread);
-
/* Remove it from the list of active majors */
list_del(&tr->list);
list_for_each_entry_safe(dev, next, &tr->devs, list)
tr->remove_dev(dev);
- blk_cleanup_queue(tr->blkcore_priv->rq);
unregister_blkdev(tr->major, tr->name);
-
mutex_unlock(&mtd_table_mutex);
- kfree(tr->blkcore_priv);
-
BUG_ON(!list_empty(&tr->devs));
return 0;
}
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 9f41b1a853c..e6edbec609f 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -19,15 +19,15 @@
#include <linux/mutex.h>
-static struct mtdblk_dev {
- struct mtd_info *mtd;
+struct mtdblk_dev {
+ struct mtd_blktrans_dev mbd;
int count;
struct mutex cache_mutex;
unsigned char *cache_data;
unsigned long cache_offset;
unsigned int cache_size;
enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
-} *mtdblks[MAX_MTD_DEVICES];
+};
static struct mutex mtdblks_lock;
@@ -98,7 +98,7 @@ static int erase_write (struct mtd_info *mtd, unsigned long pos,
static int write_cached_data (struct mtdblk_dev *mtdblk)
{
- struct mtd_info *mtd = mtdblk->mtd;
+ struct mtd_info *mtd = mtdblk->mbd.mtd;
int ret;
if (mtdblk->cache_state != STATE_DIRTY)
@@ -128,7 +128,7 @@ static int write_cached_data (struct mtdblk_dev *mtdblk)
static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
int len, const char *buf)
{
- struct mtd_info *mtd = mtdblk->mtd;
+ struct mtd_info *mtd = mtdblk->mbd.mtd;
unsigned int sect_size = mtdblk->cache_size;
size_t retlen;
int ret;
@@ -198,7 +198,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
int len, char *buf)
{
- struct mtd_info *mtd = mtdblk->mtd;
+ struct mtd_info *mtd = mtdblk->mbd.mtd;
unsigned int sect_size = mtdblk->cache_size;
size_t retlen;
int ret;
@@ -244,16 +244,16 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
unsigned long block, char *buf)
{
- struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
+ struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
return do_cached_read(mtdblk, block<<9, 512, buf);
}
static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
unsigned long block, char *buf)
{
- struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
+ struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) {
- mtdblk->cache_data = vmalloc(mtdblk->mtd->erasesize);
+ mtdblk->cache_data = vmalloc(mtdblk->mbd.mtd->erasesize);
if (!mtdblk->cache_data)
return -EINTR;
/* -EINTR is not really correct, but it is the best match
@@ -266,37 +266,26 @@ static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
static int mtdblock_open(struct mtd_blktrans_dev *mbd)
{
- struct mtdblk_dev *mtdblk;
- struct mtd_info *mtd = mbd->mtd;
- int dev = mbd->devnum;
+ struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n");
mutex_lock(&mtdblks_lock);
- if (mtdblks[dev]) {
- mtdblks[dev]->count++;
+ if (mtdblk->count) {
+ mtdblk->count++;
mutex_unlock(&mtdblks_lock);
return 0;
}
/* OK, it's not open. Create cache info for it */
- mtdblk = kzalloc(sizeof(struct mtdblk_dev), GFP_KERNEL);
- if (!mtdblk) {
- mutex_unlock(&mtdblks_lock);
- return -ENOMEM;
- }
-
mtdblk->count = 1;
- mtdblk->mtd = mtd;
-
mutex_init(&mtdblk->cache_mutex);
mtdblk->cache_state = STATE_EMPTY;
- if ( !(mtdblk->mtd->flags & MTD_NO_ERASE) && mtdblk->mtd->erasesize) {
- mtdblk->cache_size = mtdblk->mtd->erasesize;
+ if (!(mbd->mtd->flags & MTD_NO_ERASE) && mbd->mtd->erasesize) {
+ mtdblk->cache_size = mbd->mtd->erasesize;
mtdblk->cache_data = NULL;
}
- mtdblks[dev] = mtdblk;
mutex_unlock(&mtdblks_lock);
DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
@@ -306,8 +295,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
static int mtdblock_release(struct mtd_blktrans_dev *mbd)
{
- int dev = mbd->devnum;
- struct mtdblk_dev *mtdblk = mtdblks[dev];
+ struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n");
@@ -318,12 +306,10 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
mutex_unlock(&mtdblk->cache_mutex);
if (!--mtdblk->count) {
- /* It was the last usage. Free the device */
- mtdblks[dev] = NULL;
- if (mtdblk->mtd->sync)
- mtdblk->mtd->sync(mtdblk->mtd);
+ /* It was the last usage. Free the cache */
+ if (mbd->mtd->sync)
+ mbd->mtd->sync(mbd->mtd);
vfree(mtdblk->cache_data);
- kfree(mtdblk);
}
mutex_unlock(&mtdblks_lock);
@@ -335,40 +321,40 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
static int mtdblock_flush(struct mtd_blktrans_dev *dev)
{
- struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
+ struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
mutex_lock(&mtdblk->cache_mutex);
write_cached_data(mtdblk);
mutex_unlock(&mtdblk->cache_mutex);
- if (mtdblk->mtd->sync)
- mtdblk->mtd->sync(mtdblk->mtd);
+ if (dev->mtd->sync)
+ dev->mtd->sync(dev->mtd);
return 0;
}
static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
- struct mtd_blktrans_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ struct mtdblk_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return;
- dev->mtd = mtd;
- dev->devnum = mtd->index;
+ dev->mbd.mtd = mtd;
+ dev->mbd.devnum = mtd->index;
- dev->size = mtd->size >> 9;
- dev->tr = tr;
+ dev->mbd.size = mtd->size >> 9;
+ dev->mbd.tr = tr;
if (!(mtd->flags & MTD_WRITEABLE))
- dev->readonly = 1;
+ dev->mbd.readonly = 1;
- add_mtd_blktrans_dev(dev);
+ if (add_mtd_blktrans_dev(&dev->mbd))
+ kfree(dev);
}
static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
{
del_mtd_blktrans_dev(dev);
- kfree(dev);
}
static struct mtd_blktrans_ops mtdblock_tr = {
diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c
index 852165f8b1c..d0d3f79f9d0 100644
--- a/drivers/mtd/mtdblock_ro.c
+++ b/drivers/mtd/mtdblock_ro.c
@@ -43,13 +43,13 @@ static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
dev->tr = tr;
dev->readonly = 1;
- add_mtd_blktrans_dev(dev);
+ if (add_mtd_blktrans_dev(dev))
+ kfree(dev);
}
static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
{
del_mtd_blktrans_dev(dev);
- kfree(dev);
}
static struct mtd_blktrans_ops mtdblock_tr = {
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 5b081cb8435..000d65ea55a 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -15,12 +15,15 @@
#include <linux/smp_lock.h>
#include <linux/backing-dev.h>
#include <linux/compat.h>
+#include <linux/mount.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/compatmac.h>
#include <asm/uaccess.h>
+#define MTD_INODE_FS_MAGIC 0x11307854
+static struct vfsmount *mtd_inode_mnt __read_mostly;
/*
* Data structure to hold the pointer to the mtd device as well
@@ -28,6 +31,7 @@
*/
struct mtd_file_info {
struct mtd_info *mtd;
+ struct inode *ino;
enum mtd_file_modes mode;
};
@@ -64,12 +68,10 @@ static int mtd_open(struct inode *inode, struct file *file)
int ret = 0;
struct mtd_info *mtd;
struct mtd_file_info *mfi;
+ struct inode *mtd_ino;
DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
- if (devnum >= MAX_MTD_DEVICES)
- return -ENODEV;
-
/* You can't open the RO devices RW */
if ((file->f_mode & FMODE_WRITE) && (minor & 1))
return -EACCES;
@@ -88,11 +90,23 @@ static int mtd_open(struct inode *inode, struct file *file)
goto out;
}
- if (mtd->backing_dev_info)
- file->f_mapping->backing_dev_info = mtd->backing_dev_info;
+ mtd_ino = iget_locked(mtd_inode_mnt->mnt_sb, devnum);
+ if (!mtd_ino) {
+ put_mtd_device(mtd);
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (mtd_ino->i_state & I_NEW) {
+ mtd_ino->i_private = mtd;
+ mtd_ino->i_mode = S_IFCHR;
+ mtd_ino->i_data.backing_dev_info = mtd->backing_dev_info;
+ unlock_new_inode(mtd_ino);
+ }
+ file->f_mapping = mtd_ino->i_mapping;
/* You can't open it RW if it's not a writeable device */
if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
+ iput(mtd_ino);
put_mtd_device(mtd);
ret = -EACCES;
goto out;
@@ -100,10 +114,12 @@ static int mtd_open(struct inode *inode, struct file *file)
mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
if (!mfi) {
+ iput(mtd_ino);
put_mtd_device(mtd);
ret = -ENOMEM;
goto out;
}
+ mfi->ino = mtd_ino;
mfi->mtd = mtd;
file->private_data = mfi;
@@ -125,6 +141,8 @@ static int mtd_close(struct inode *inode, struct file *file)
if ((file->f_mode & FMODE_WRITE) && mtd->sync)
mtd->sync(mtd);
+ iput(mfi->ino);
+
put_mtd_device(mtd);
file->private_data = NULL;
kfree(mfi);
@@ -373,7 +391,7 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
if (!mtd->write_oob)
ret = -EOPNOTSUPP;
else
- ret = access_ok(VERIFY_READ, ptr, length) ? 0 : EFAULT;
+ ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
if (ret)
return ret;
@@ -450,8 +468,7 @@ static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
return ret;
}
-static int mtd_ioctl(struct inode *inode, struct file *file,
- u_int cmd, u_long arg)
+static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
@@ -482,7 +499,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
{
uint32_t ur_idx;
struct mtd_erase_region_info *kr;
- struct region_info_user *ur = (struct region_info_user *) argp;
+ struct region_info_user __user *ur = argp;
if (get_user(ur_idx, &(ur->regionindex)))
return -EFAULT;
@@ -822,6 +839,17 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
return ret;
} /* memory_ioctl */
+static long mtd_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = mtd_ioctl(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
#ifdef CONFIG_COMPAT
struct mtd_oob_buf32 {
@@ -836,7 +864,6 @@ struct mtd_oob_buf32 {
static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- struct inode *inode = file->f_path.dentry->d_inode;
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
void __user *argp = compat_ptr(arg);
@@ -874,7 +901,7 @@ static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
break;
}
default:
- ret = mtd_ioctl(inode, file, cmd, (unsigned long)argp);
+ ret = mtd_ioctl(file, cmd, (unsigned long)argp);
}
unlock_kernel();
@@ -942,7 +969,7 @@ static const struct file_operations mtd_fops = {
.llseek = mtd_lseek,
.read = mtd_read,
.write = mtd_write,
- .ioctl = mtd_ioctl,
+ .unlocked_ioctl = mtd_unlocked_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = mtd_compat_ioctl,
#endif
@@ -954,22 +981,81 @@ static const struct file_operations mtd_fops = {
#endif
};
+static int mtd_inodefs_get_sb(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data,
+ struct vfsmount *mnt)
+{
+ return get_sb_pseudo(fs_type, "mtd_inode:", NULL, MTD_INODE_FS_MAGIC,
+ mnt);
+}
+
+static struct file_system_type mtd_inodefs_type = {
+ .name = "mtd_inodefs",
+ .get_sb = mtd_inodefs_get_sb,
+ .kill_sb = kill_anon_super,
+};
+
+static void mtdchar_notify_add(struct mtd_info *mtd)
+{
+}
+
+static void mtdchar_notify_remove(struct mtd_info *mtd)
+{
+ struct inode *mtd_ino = ilookup(mtd_inode_mnt->mnt_sb, mtd->index);
+
+ if (mtd_ino) {
+ /* Destroy the inode if it exists */
+ mtd_ino->i_nlink = 0;
+ iput(mtd_ino);
+ }
+}
+
+static struct mtd_notifier mtdchar_notifier = {
+ .add = mtdchar_notify_add,
+ .remove = mtdchar_notify_remove,
+};
+
static int __init init_mtdchar(void)
{
- int status;
+ int ret;
- status = register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops);
- if (status < 0) {
- printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
- MTD_CHAR_MAJOR);
+ ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
+ "mtd", &mtd_fops);
+ if (ret < 0) {
+ pr_notice("Can't allocate major number %d for "
+ "Memory Technology Devices.\n", MTD_CHAR_MAJOR);
+ return ret;
}
- return status;
+ ret = register_filesystem(&mtd_inodefs_type);
+ if (ret) {
+ pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret);
+ goto err_unregister_chdev;
+ }
+
+ mtd_inode_mnt = kern_mount(&mtd_inodefs_type);
+ if (IS_ERR(mtd_inode_mnt)) {
+ ret = PTR_ERR(mtd_inode_mnt);
+ pr_notice("Error mounting mtd_inodefs filesystem: %d\n", ret);
+ goto err_unregister_filesystem;
+ }
+ register_mtd_user(&mtdchar_notifier);
+
+ return ret;
+
+err_unregister_filesystem:
+ unregister_filesystem(&mtd_inodefs_type);
+err_unregister_chdev:
+ __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
+ return ret;
}
static void __exit cleanup_mtdchar(void)
{
- unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
+ unregister_mtd_user(&mtdchar_notifier);
+ mntput(mtd_inode_mnt);
+ unregister_filesystem(&mtd_inodefs_type);
+ __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
}
module_init(init_mtdchar);
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index db6de74082a..7e075621bbf 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -183,10 +183,9 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
}
/* make a copy of vecs */
- vecs_copy = kmalloc(sizeof(struct kvec) * count, GFP_KERNEL);
+ vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
if (!vecs_copy)
return -ENOMEM;
- memcpy(vecs_copy, vecs, sizeof(struct kvec) * count);
entry_low = 0;
for (i = 0; i < concat->num_subdev; i++) {
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index b177e750efc..a1b8b70d2d0 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -19,7 +19,9 @@
#include <linux/init.h>
#include <linux/mtd/compatmac.h>
#include <linux/proc_fs.h>
+#include <linux/idr.h>
#include <linux/backing-dev.h>
+#include <linux/gfp.h>
#include <linux/mtd/mtd.h>
@@ -63,13 +65,18 @@ static struct class mtd_class = {
.resume = mtd_cls_resume,
};
+static DEFINE_IDR(mtd_idr);
+
/* These are exported solely for the purpose of mtd_blkdevs.c. You
should not use them for _anything_ else */
DEFINE_MUTEX(mtd_table_mutex);
-struct mtd_info *mtd_table[MAX_MTD_DEVICES];
-
EXPORT_SYMBOL_GPL(mtd_table_mutex);
-EXPORT_SYMBOL_GPL(mtd_table);
+
+struct mtd_info *__mtd_next_device(int i)
+{
+ return idr_get_next(&mtd_idr, &i);
+}
+EXPORT_SYMBOL_GPL(__mtd_next_device);
static LIST_HEAD(mtd_notifiers);
@@ -265,13 +272,13 @@ static struct device_type mtd_devtype = {
* Add a device to the list of MTD devices present in the system, and
* notify each currently active MTD 'user' of its arrival. Returns
* zero on success or 1 on failure, which currently will only happen
- * if the number of present devices exceeds MAX_MTD_DEVICES (i.e. 16)
- * or there's a sysfs error.
+ * if there is insufficient memory or a sysfs error.
*/
int add_mtd_device(struct mtd_info *mtd)
{
- int i;
+ struct mtd_notifier *not;
+ int i, error;
if (!mtd->backing_dev_info) {
switch (mtd->type) {
@@ -290,70 +297,73 @@ int add_mtd_device(struct mtd_info *mtd)
BUG_ON(mtd->writesize == 0);
mutex_lock(&mtd_table_mutex);
- for (i=0; i < MAX_MTD_DEVICES; i++)
- if (!mtd_table[i]) {
- struct mtd_notifier *not;
-
- mtd_table[i] = mtd;
- mtd->index = i;
- mtd->usecount = 0;
-
- if (is_power_of_2(mtd->erasesize))
- mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
- else
- mtd->erasesize_shift = 0;
-
- if (is_power_of_2(mtd->writesize))
- mtd->writesize_shift = ffs(mtd->writesize) - 1;
- else
- mtd->writesize_shift = 0;
-
- mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
- mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
-
- /* Some chips always power up locked. Unlock them now */
- if ((mtd->flags & MTD_WRITEABLE)
- && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
- if (mtd->unlock(mtd, 0, mtd->size))
- printk(KERN_WARNING
- "%s: unlock failed, "
- "writes may not work\n",
- mtd->name);
- }
+ do {
+ if (!idr_pre_get(&mtd_idr, GFP_KERNEL))
+ goto fail_locked;
+ error = idr_get_new(&mtd_idr, mtd, &i);
+ } while (error == -EAGAIN);
- /* Caller should have set dev.parent to match the
- * physical device.
- */
- mtd->dev.type = &mtd_devtype;
- mtd->dev.class = &mtd_class;
- mtd->dev.devt = MTD_DEVT(i);
- dev_set_name(&mtd->dev, "mtd%d", i);
- dev_set_drvdata(&mtd->dev, mtd);
- if (device_register(&mtd->dev) != 0) {
- mtd_table[i] = NULL;
- break;
- }
+ if (error)
+ goto fail_locked;
- if (MTD_DEVT(i))
- device_create(&mtd_class, mtd->dev.parent,
- MTD_DEVT(i) + 1,
- NULL, "mtd%dro", i);
-
- DEBUG(0, "mtd: Giving out device %d to %s\n",i, mtd->name);
- /* No need to get a refcount on the module containing
- the notifier, since we hold the mtd_table_mutex */
- list_for_each_entry(not, &mtd_notifiers, list)
- not->add(mtd);
-
- mutex_unlock(&mtd_table_mutex);
- /* We _know_ we aren't being removed, because
- our caller is still holding us here. So none
- of this try_ nonsense, and no bitching about it
- either. :) */
- __module_get(THIS_MODULE);
- return 0;
- }
+ mtd->index = i;
+ mtd->usecount = 0;
+
+ if (is_power_of_2(mtd->erasesize))
+ mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
+ else
+ mtd->erasesize_shift = 0;
+
+ if (is_power_of_2(mtd->writesize))
+ mtd->writesize_shift = ffs(mtd->writesize) - 1;
+ else
+ mtd->writesize_shift = 0;
+
+ mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
+ mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
+
+ /* Some chips always power up locked. Unlock them now */
+ if ((mtd->flags & MTD_WRITEABLE)
+ && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
+ if (mtd->unlock(mtd, 0, mtd->size))
+ printk(KERN_WARNING
+ "%s: unlock failed, writes may not work\n",
+ mtd->name);
+ }
+
+ /* Caller should have set dev.parent to match the
+ * physical device.
+ */
+ mtd->dev.type = &mtd_devtype;
+ mtd->dev.class = &mtd_class;
+ mtd->dev.devt = MTD_DEVT(i);
+ dev_set_name(&mtd->dev, "mtd%d", i);
+ dev_set_drvdata(&mtd->dev, mtd);
+ if (device_register(&mtd->dev) != 0)
+ goto fail_added;
+
+ if (MTD_DEVT(i))
+ device_create(&mtd_class, mtd->dev.parent,
+ MTD_DEVT(i) + 1,
+ NULL, "mtd%dro", i);
+
+ DEBUG(0, "mtd: Giving out device %d to %s\n", i, mtd->name);
+ /* No need to get a refcount on the module containing
+ the notifier, since we hold the mtd_table_mutex */
+ list_for_each_entry(not, &mtd_notifiers, list)
+ not->add(mtd);
+
+ mutex_unlock(&mtd_table_mutex);
+ /* We _know_ we aren't being removed, because
+ our caller is still holding us here. So none
+ of this try_ nonsense, and no bitching about it
+ either. :) */
+ __module_get(THIS_MODULE);
+ return 0;
+fail_added:
+ idr_remove(&mtd_idr, i);
+fail_locked:
mutex_unlock(&mtd_table_mutex);
return 1;
}
@@ -371,31 +381,34 @@ int add_mtd_device(struct mtd_info *mtd)
int del_mtd_device (struct mtd_info *mtd)
{
int ret;
+ struct mtd_notifier *not;
mutex_lock(&mtd_table_mutex);
- if (mtd_table[mtd->index] != mtd) {
+ if (idr_find(&mtd_idr, mtd->index) != mtd) {
ret = -ENODEV;
- } else if (mtd->usecount) {
+ goto out_error;
+ }
+
+ /* No need to get a refcount on the module containing
+ the notifier, since we hold the mtd_table_mutex */
+ list_for_each_entry(not, &mtd_notifiers, list)
+ not->remove(mtd);
+
+ if (mtd->usecount) {
printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
mtd->index, mtd->name, mtd->usecount);
ret = -EBUSY;
} else {
- struct mtd_notifier *not;
-
device_unregister(&mtd->dev);
- /* No need to get a refcount on the module containing
- the notifier, since we hold the mtd_table_mutex */
- list_for_each_entry(not, &mtd_notifiers, list)
- not->remove(mtd);
-
- mtd_table[mtd->index] = NULL;
+ idr_remove(&mtd_idr, mtd->index);
module_put(THIS_MODULE);
ret = 0;
}
+out_error:
mutex_unlock(&mtd_table_mutex);
return ret;
}
@@ -411,7 +424,7 @@ int del_mtd_device (struct mtd_info *mtd)
void register_mtd_user (struct mtd_notifier *new)
{
- int i;
+ struct mtd_info *mtd;
mutex_lock(&mtd_table_mutex);
@@ -419,9 +432,8 @@ void register_mtd_user (struct mtd_notifier *new)
__module_get(THIS_MODULE);
- for (i=0; i< MAX_MTD_DEVICES; i++)
- if (mtd_table[i])
- new->add(mtd_table[i]);
+ mtd_for_each_device(mtd)
+ new->add(mtd);
mutex_unlock(&mtd_table_mutex);
}
@@ -438,15 +450,14 @@ void register_mtd_user (struct mtd_notifier *new)
int unregister_mtd_user (struct mtd_notifier *old)
{
- int i;
+ struct mtd_info *mtd;
mutex_lock(&mtd_table_mutex);
module_put(THIS_MODULE);
- for (i=0; i< MAX_MTD_DEVICES; i++)
- if (mtd_table[i])
- old->remove(mtd_table[i]);
+ mtd_for_each_device(mtd)
+ old->remove(mtd);
list_del(&old->list);
mutex_unlock(&mtd_table_mutex);
@@ -468,42 +479,56 @@ int unregister_mtd_user (struct mtd_notifier *old)
struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
{
- struct mtd_info *ret = NULL;
- int i, err = -ENODEV;
+ struct mtd_info *ret = NULL, *other;
+ int err = -ENODEV;
mutex_lock(&mtd_table_mutex);
if (num == -1) {
- for (i=0; i< MAX_MTD_DEVICES; i++)
- if (mtd_table[i] == mtd)
- ret = mtd_table[i];
- } else if (num >= 0 && num < MAX_MTD_DEVICES) {
- ret = mtd_table[num];
+ mtd_for_each_device(other) {
+ if (other == mtd) {
+ ret = mtd;
+ break;
+ }
+ }
+ } else if (num >= 0) {
+ ret = idr_find(&mtd_idr, num);
if (mtd && mtd != ret)
ret = NULL;
}
- if (!ret)
- goto out_unlock;
-
- if (!try_module_get(ret->owner))
- goto out_unlock;
-
- if (ret->get_device) {
- err = ret->get_device(ret);
- if (err)
- goto out_put;
+ if (!ret) {
+ ret = ERR_PTR(err);
+ goto out;
}
- ret->usecount++;
+ err = __get_mtd_device(ret);
+ if (err)
+ ret = ERR_PTR(err);
+out:
mutex_unlock(&mtd_table_mutex);
return ret;
+}
-out_put:
- module_put(ret->owner);
-out_unlock:
- mutex_unlock(&mtd_table_mutex);
- return ERR_PTR(err);
+
+int __get_mtd_device(struct mtd_info *mtd)
+{
+ int err;
+
+ if (!try_module_get(mtd->owner))
+ return -ENODEV;
+
+ if (mtd->get_device) {
+
+ err = mtd->get_device(mtd);
+
+ if (err) {
+ module_put(mtd->owner);
+ return err;
+ }
+ }
+ mtd->usecount++;
+ return 0;
}
/**
@@ -517,14 +542,14 @@ out_unlock:
struct mtd_info *get_mtd_device_nm(const char *name)
{
- int i, err = -ENODEV;
- struct mtd_info *mtd = NULL;
+ int err = -ENODEV;
+ struct mtd_info *mtd = NULL, *other;
mutex_lock(&mtd_table_mutex);
- for (i = 0; i < MAX_MTD_DEVICES; i++) {
- if (mtd_table[i] && !strcmp(name, mtd_table[i]->name)) {
- mtd = mtd_table[i];
+ mtd_for_each_device(other) {
+ if (!strcmp(name, other->name)) {
+ mtd = other;
break;
}
}
@@ -554,14 +579,19 @@ out_unlock:
void put_mtd_device(struct mtd_info *mtd)
{
- int c;
-
mutex_lock(&mtd_table_mutex);
- c = --mtd->usecount;
+ __put_mtd_device(mtd);
+ mutex_unlock(&mtd_table_mutex);
+
+}
+
+void __put_mtd_device(struct mtd_info *mtd)
+{
+ --mtd->usecount;
+ BUG_ON(mtd->usecount < 0);
+
if (mtd->put_device)
mtd->put_device(mtd);
- mutex_unlock(&mtd_table_mutex);
- BUG_ON(c < 0);
module_put(mtd->owner);
}
@@ -599,7 +629,9 @@ EXPORT_SYMBOL_GPL(add_mtd_device);
EXPORT_SYMBOL_GPL(del_mtd_device);
EXPORT_SYMBOL_GPL(get_mtd_device);
EXPORT_SYMBOL_GPL(get_mtd_device_nm);
+EXPORT_SYMBOL_GPL(__get_mtd_device);
EXPORT_SYMBOL_GPL(put_mtd_device);
+EXPORT_SYMBOL_GPL(__put_mtd_device);
EXPORT_SYMBOL_GPL(register_mtd_user);
EXPORT_SYMBOL_GPL(unregister_mtd_user);
EXPORT_SYMBOL_GPL(default_mtd_writev);
@@ -611,14 +643,9 @@ EXPORT_SYMBOL_GPL(default_mtd_writev);
static struct proc_dir_entry *proc_mtd;
-static inline int mtd_proc_info (char *buf, int i)
+static inline int mtd_proc_info(char *buf, struct mtd_info *this)
{
- struct mtd_info *this = mtd_table[i];
-
- if (!this)
- return 0;
-
- return sprintf(buf, "mtd%d: %8.8llx %8.8x \"%s\"\n", i,
+ return sprintf(buf, "mtd%d: %8.8llx %8.8x \"%s\"\n", this->index,
(unsigned long long)this->size,
this->erasesize, this->name);
}
@@ -626,15 +653,15 @@ static inline int mtd_proc_info (char *buf, int i)
static int mtd_read_proc (char *page, char **start, off_t off, int count,
int *eof, void *data_unused)
{
- int len, l, i;
+ struct mtd_info *mtd;
+ int len, l;
off_t begin = 0;
mutex_lock(&mtd_table_mutex);
len = sprintf(page, "dev: size erasesize name\n");
- for (i=0; i< MAX_MTD_DEVICES; i++) {
-
- l = mtd_proc_info(page + len, i);
+ mtd_for_each_device(mtd) {
+ l = mtd_proc_info(page + len, mtd);
len += l;
if (len+begin > off+count)
goto done;
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index a33251f4b87..6a64fdebc89 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -8,4 +8,9 @@
should not use them for _anything_ else */
extern struct mutex mtd_table_mutex;
-extern struct mtd_info *mtd_table[MAX_MTD_DEVICES];
+extern struct mtd_info *__mtd_next_device(int i);
+
+#define mtd_for_each_device(mtd) \
+ for ((mtd) = __mtd_next_device(0); \
+ (mtd) != NULL; \
+ (mtd) = __mtd_next_device(mtd->index + 1))
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 92e12df0917..328313c3dcc 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -429,11 +429,6 @@ static int __init mtdoops_init(void)
mtd_index = simple_strtoul(mtddev, &endp, 0);
if (*endp == '\0')
cxt->mtd_index = mtd_index;
- if (cxt->mtd_index > MAX_MTD_DEVICES) {
- printk(KERN_ERR "mtdoops: invalid mtd device number (%u) given\n",
- mtd_index);
- return -EINVAL;
- }
cxt->oops_buf = vmalloc(record_size);
if (!cxt->oops_buf) {
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index 7c003191fca..bd9a443ccf6 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -152,18 +152,12 @@ int get_sb_mtd(struct file_system_type *fs_type, int flags,
DEBUG(1, "MTDSB: mtd:%%s, name \"%s\"\n",
dev_name + 4);
- for (mtdnr = 0; mtdnr < MAX_MTD_DEVICES; mtdnr++) {
- mtd = get_mtd_device(NULL, mtdnr);
- if (!IS_ERR(mtd)) {
- if (!strcmp(mtd->name, dev_name + 4))
- return get_sb_mtd_aux(
- fs_type, flags,
- dev_name, data, mtd,
- fill_super, mnt);
-
- put_mtd_device(mtd);
- }
- }
+ mtd = get_mtd_device_nm(dev_name + 4);
+ if (!IS_ERR(mtd))
+ return get_sb_mtd_aux(
+ fs_type, flags,
+ dev_name, data, mtd,
+ fill_super, mnt);
printk(KERN_NOTICE "MTD:"
" MTD device with name \"%s\" not found.\n",
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 42e5ea49e97..98a04b3c952 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -2,11 +2,23 @@ menuconfig MTD_NAND
tristate "NAND Device Support"
depends on MTD
select MTD_NAND_IDS
+ select MTD_NAND_ECC
help
This enables support for accessing all type of NAND flash
devices. For further information see
<http://www.linux-mtd.infradead.org/doc/nand.html>.
+config MTD_NAND_ECC
+ tristate
+
+config MTD_NAND_ECC_SMC
+ bool "NAND ECC Smart Media byte order"
+ depends on MTD_NAND_ECC
+ default n
+ help
+ Software ECC according to the Smart Media Specification.
+ The original Linux implementation had byte 0 and 1 swapped.
+
if MTD_NAND
config MTD_NAND_VERIFY_WRITE
@@ -18,12 +30,9 @@ config MTD_NAND_VERIFY_WRITE
device thinks the write was successful, a bit could have been
flipped accidentally due to device wear or something else.
-config MTD_NAND_ECC_SMC
- bool "NAND ECC Smart Media byte order"
+config MTD_SM_COMMON
+ tristate
default n
- help
- Software ECC according to the Smart Media Specification.
- The original Linux implementation had byte 0 and 1 swapped.
config MTD_NAND_MUSEUM_IDS
bool "Enable chip ids for obsolete ancient NAND devices"
@@ -41,6 +50,23 @@ config MTD_NAND_AUTCPU12
This enables the driver for the autronix autcpu12 board to
access the SmartMediaCard.
+config MTD_NAND_DENALI
+ depends on PCI
+ tristate "Support Denali NAND controller on Intel Moorestown"
+ help
+ Enable the driver for NAND flash on Intel Moorestown, using the
+ Denali NAND controller core.
+
+config MTD_NAND_DENALI_SCRATCH_REG_ADDR
+ hex "Denali NAND size scratch register address"
+ default "0xFF108018"
+ help
+ Some platforms place the NAND chip size in a scratch register
+ because (some versions of) the driver aren't able to automatically
+ determine the size of certain chips. Set the address of the
+ scratch register here to enable this feature. On Intel Moorestown
+ boards, the scratch register is at 0xFF108018.
+
config MTD_NAND_EDB7312
tristate "Support for Cirrus Logic EBD7312 evaluation board"
depends on ARCH_EDB7312
@@ -95,15 +121,21 @@ config MTD_NAND_OMAP_PREFETCH_DMA
or in DMA interrupt mode.
Say y for DMA mode or MPU mode will be used
-config MTD_NAND_TS7250
- tristate "NAND Flash device on TS-7250 board"
- depends on MACH_TS72XX
- help
- Support for NAND flash on Technologic Systems TS-7250 platform.
-
config MTD_NAND_IDS
tristate
+config MTD_NAND_RICOH
+ tristate "Ricoh xD card reader"
+ default n
+ depends on PCI
+ select MTD_SM_COMMON
+ help
+ Enable support for Ricoh R5C852 xD card reader
+ You also need to enable ether
+ NAND SSFDC (SmartMedia) read only translation layer' or new
+ expermental, readwrite
+ 'SmartMedia/xD new translation layer'
+
config MTD_NAND_AU1550
tristate "Au1550/1200 NAND support"
depends on SOC_AU1200 || SOC_AU1550
@@ -358,8 +390,6 @@ config MTD_NAND_ATMEL_ECC_NONE
If unsure, say N
- endchoice
-
endchoice
config MTD_NAND_PXA3xx
@@ -442,6 +472,13 @@ config MTD_NAND_FSL_UPM
Enables support for NAND Flash chips wired onto Freescale PowerPC
processor localbus with User-Programmable Machine support.
+config MTD_NAND_MPC5121_NFC
+ tristate "MPC5121 built-in NAND Flash Controller support"
+ depends on PPC_MPC512x
+ help
+ This enables the driver for the NAND flash controller on the
+ MPC5121 SoC.
+
config MTD_NAND_MXC
tristate "MXC NAND support"
depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3
@@ -481,11 +518,11 @@ config MTD_NAND_SOCRATES
help
Enables support for NAND Flash chips wired onto Socrates board.
-config MTD_NAND_W90P910
- tristate "Support for NAND on w90p910 evaluation board."
+config MTD_NAND_NUC900
+ tristate "Support for NAND on Nuvoton NUC9xx/w90p910 evaluation boards."
depends on ARCH_W90X900 && MTD_PARTITIONS
help
This enables the driver for the NAND Flash on evaluation board based
- on w90p910.
+ on w90p910 / NUC9xx.
endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 1407bd14401..e8ab884ba47 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -2,13 +2,16 @@
# linux/drivers/nand/Makefile
#
-obj-$(CONFIG_MTD_NAND) += nand.o nand_ecc.o
+obj-$(CONFIG_MTD_NAND) += nand.o
+obj-$(CONFIG_MTD_NAND_ECC) += nand_ecc.o
obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o
+obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o
obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o
obj-$(CONFIG_MTD_NAND_SPIA) += spia.o
obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o
obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o
+obj-$(CONFIG_MTD_NAND_DENALI) += denali.o
obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o
obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o
@@ -19,7 +22,6 @@ obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
obj-$(CONFIG_MTD_NAND_H1900) += h1910.o
obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o
obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o
-obj-$(CONFIG_MTD_NAND_TS7250) += ts7250.o
obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o
obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
@@ -39,8 +41,10 @@ obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o
obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o
obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
-obj-$(CONFIG_MTD_NAND_W90P910) += w90p910_nand.o
+obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o
obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o
obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o
+obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
+obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 2d6773281fd..8691e0482ed 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -49,7 +49,7 @@
#define TIMEOUT HZ
-static struct usb_device_id alauda_table [] = {
+static const struct usb_device_id alauda_table[] = {
{ USB_DEVICE(0x0584, 0x0008) }, /* Fujifilm DPC-R1 */
{ USB_DEVICE(0x07b4, 0x010a) }, /* Olympus MAUSB-10 */
{ }
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 524e6c9e067..04d30887ca7 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -474,7 +474,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
}
/* first scan to find the device and get the page size */
- if (nand_scan_ident(mtd, 1)) {
+ if (nand_scan_ident(mtd, 1, NULL)) {
res = -ENXIO;
goto err_scan_ident;
}
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 43d46e42404..3ffe05db492 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -451,7 +451,7 @@ static int __init au1xxx_nand_init(void)
u32 nand_phys;
/* Allocate memory for MTD device structure and private data */
- au1550_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
+ au1550_mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
if (!au1550_mtd) {
printk("Unable to allocate NAND MTD dev structure.\n");
return -ENOMEM;
@@ -460,10 +460,6 @@ static int __init au1xxx_nand_init(void)
/* Get pointer to private data */
this = (struct nand_chip *)(&au1550_mtd[1]);
- /* Initialize structures */
- memset(au1550_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
/* Link the private data with the MTD structure */
au1550_mtd->priv = this;
au1550_mtd->owner = THIS_MODULE;
@@ -544,7 +540,7 @@ static int __init au1xxx_nand_init(void)
}
nand_phys = (mem_staddr << 4) & 0xFFFC0000;
- p_nand = (void __iomem *)ioremap(nand_phys, 0x1000);
+ p_nand = ioremap(nand_phys, 0x1000);
/* make controller and MTD agree */
if (NAND_CS == 0)
@@ -589,7 +585,7 @@ static int __init au1xxx_nand_init(void)
return 0;
outio:
- iounmap((void *)p_nand);
+ iounmap(p_nand);
outmem:
kfree(au1550_mtd);
@@ -610,7 +606,7 @@ static void __exit au1550_cleanup(void)
kfree(au1550_mtd);
/* Unmap */
- iounmap((void *)p_nand);
+ iounmap(p_nand);
}
module_exit(au1550_cleanup);
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index c997f98eeb3..dfe262c726f 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -13,7 +13,6 @@
*****************************************************************************/
/* ---- Include Files ---------------------------------------------------- */
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -447,7 +446,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
* layout we'll be using.
*/
- err = nand_scan_ident(board_mtd, 1);
+ err = nand_scan_ident(board_mtd, 1, NULL);
if (err) {
printk(KERN_ERR "nand_scan failed: %d\n", err);
iounmap(bcm_umi_io_base);
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 8506e7e606f..2974995e194 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -68,6 +68,27 @@
#define DRV_AUTHOR "Bryan Wu <bryan.wu@analog.com>"
#define DRV_DESC "BF5xx on-chip NAND FLash Controller Driver"
+/* NFC_STAT Masks */
+#define NBUSY 0x01 /* Not Busy */
+#define WB_FULL 0x02 /* Write Buffer Full */
+#define PG_WR_STAT 0x04 /* Page Write Pending */
+#define PG_RD_STAT 0x08 /* Page Read Pending */
+#define WB_EMPTY 0x10 /* Write Buffer Empty */
+
+/* NFC_IRQSTAT Masks */
+#define NBUSYIRQ 0x01 /* Not Busy IRQ */
+#define WB_OVF 0x02 /* Write Buffer Overflow */
+#define WB_EDGE 0x04 /* Write Buffer Edge Detect */
+#define RD_RDY 0x08 /* Read Data Ready */
+#define WR_DONE 0x10 /* Page Write Done */
+
+/* NFC_RST Masks */
+#define ECC_RST 0x01 /* ECC (and NFC counters) Reset */
+
+/* NFC_PGCTL Masks */
+#define PG_RD_START 0x01 /* Page Read Start */
+#define PG_WR_START 0x02 /* Page Write Start */
+
#ifdef CONFIG_MTD_NAND_BF5XX_HWECC
static int hardware_ecc = 1;
#else
@@ -487,7 +508,7 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
* transferred to generate the correct ECC register
* values.
*/
- bfin_write_NFC_RST(0x1);
+ bfin_write_NFC_RST(ECC_RST);
SSYNC();
disable_dma(CH_NFC);
@@ -497,7 +518,7 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
set_dma_config(CH_NFC, 0x0);
set_dma_start_addr(CH_NFC, (unsigned long) buf);
-/* The DMAs have different size on BF52x and BF54x */
+ /* The DMAs have different size on BF52x and BF54x */
#ifdef CONFIG_BF52x
set_dma_x_count(CH_NFC, (page_size >> 1));
set_dma_x_modify(CH_NFC, 2);
@@ -517,9 +538,9 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
/* Start PAGE read/write operation */
if (is_read)
- bfin_write_NFC_PGCTL(0x1);
+ bfin_write_NFC_PGCTL(PG_RD_START);
else
- bfin_write_NFC_PGCTL(0x2);
+ bfin_write_NFC_PGCTL(PG_WR_START);
wait_for_completion(&info->dma_completion);
}
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index e5a9f9ccea6..db1dfc5a1b1 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -762,7 +762,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
cafe_readl(cafe, GLOBAL_CTRL), cafe_readl(cafe, GLOBAL_IRQ_MASK));
/* Scan to find existence of the device */
- if (nand_scan_ident(mtd, 2)) {
+ if (nand_scan_ident(mtd, 2, NULL)) {
err = -ENXIO;
goto out_irq;
}
@@ -849,7 +849,7 @@ static void __devexit cafe_nand_remove(struct pci_dev *pdev)
kfree(mtd);
}
-static struct pci_device_id cafe_nand_tbl[] = {
+static const struct pci_device_id cafe_nand_tbl[] = {
{ PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_88ALP01_NAND,
PCI_ANY_ID, PCI_ANY_ID },
{ }
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 76e2dc8e62f..9c9d893affe 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -567,8 +567,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
goto err_nomem;
}
- vaddr = ioremap(res1->start, res1->end - res1->start);
- base = ioremap(res2->start, res2->end - res2->start);
+ vaddr = ioremap(res1->start, resource_size(res1));
+ base = ioremap(res2->start, resource_size(res2));
if (!vaddr || !base) {
dev_err(&pdev->dev, "ioremap failed\n");
ret = -EINVAL;
@@ -691,7 +691,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
spin_unlock_irq(&davinci_nand_lock);
/* Scan to find existence of the device(s) */
- ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1);
+ ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1, NULL);
if (ret < 0) {
dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
goto err_scan;
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
new file mode 100644
index 00000000000..ca03428b59c
--- /dev/null
+++ b/drivers/mtd/nand/denali.c
@@ -0,0 +1,2134 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright © 2009-2010, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/mtd/mtd.h>
+#include <linux/module.h>
+
+#include "denali.h"
+
+MODULE_LICENSE("GPL");
+
+/* We define a module parameter that allows the user to override
+ * the hardware and decide what timing mode should be used.
+ */
+#define NAND_DEFAULT_TIMINGS -1
+
+static int onfi_timing_mode = NAND_DEFAULT_TIMINGS;
+module_param(onfi_timing_mode, int, S_IRUGO);
+MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting. -1 indicates"
+ " use default timings");
+
+#define DENALI_NAND_NAME "denali-nand"
+
+/* We define a macro here that combines all interrupts this driver uses into
+ * a single constant value, for convenience. */
+#define DENALI_IRQ_ALL (INTR_STATUS0__DMA_CMD_COMP | \
+ INTR_STATUS0__ECC_TRANSACTION_DONE | \
+ INTR_STATUS0__ECC_ERR | \
+ INTR_STATUS0__PROGRAM_FAIL | \
+ INTR_STATUS0__LOAD_COMP | \
+ INTR_STATUS0__PROGRAM_COMP | \
+ INTR_STATUS0__TIME_OUT | \
+ INTR_STATUS0__ERASE_FAIL | \
+ INTR_STATUS0__RST_COMP | \
+ INTR_STATUS0__ERASE_COMP)
+
+/* indicates whether or not the internal value for the flash bank is
+ valid or not */
+#define CHIP_SELECT_INVALID -1
+
+#define SUPPORT_8BITECC 1
+
+/* This macro divides two integers and rounds fractional values up
+ * to the nearest integer value. */
+#define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
+
+/* this macro allows us to convert from an MTD structure to our own
+ * device context (denali) structure.
+ */
+#define mtd_to_denali(m) container_of(m, struct denali_nand_info, mtd)
+
+/* These constants are defined by the driver to enable common driver
+ configuration options. */
+#define SPARE_ACCESS 0x41
+#define MAIN_ACCESS 0x42
+#define MAIN_SPARE_ACCESS 0x43
+
+#define DENALI_READ 0
+#define DENALI_WRITE 0x100
+
+/* types of device accesses. We can issue commands and get status */
+#define COMMAND_CYCLE 0
+#define ADDR_CYCLE 1
+#define STATUS_CYCLE 2
+
+/* this is a helper macro that allows us to
+ * format the bank into the proper bits for the controller */
+#define BANK(x) ((x) << 24)
+
+/* List of platforms this NAND controller has be integrated into */
+static const struct pci_device_id denali_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
+ { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
+ { /* end: all zeroes */ }
+};
+
+
+/* these are static lookup tables that give us easy access to
+ registers in the NAND controller.
+ */
+static const uint32_t intr_status_addresses[4] = {INTR_STATUS0,
+ INTR_STATUS1,
+ INTR_STATUS2,
+ INTR_STATUS3};
+
+static const uint32_t device_reset_banks[4] = {DEVICE_RESET__BANK0,
+ DEVICE_RESET__BANK1,
+ DEVICE_RESET__BANK2,
+ DEVICE_RESET__BANK3};
+
+static const uint32_t operation_timeout[4] = {INTR_STATUS0__TIME_OUT,
+ INTR_STATUS1__TIME_OUT,
+ INTR_STATUS2__TIME_OUT,
+ INTR_STATUS3__TIME_OUT};
+
+static const uint32_t reset_complete[4] = {INTR_STATUS0__RST_COMP,
+ INTR_STATUS1__RST_COMP,
+ INTR_STATUS2__RST_COMP,
+ INTR_STATUS3__RST_COMP};
+
+/* specifies the debug level of the driver */
+static int nand_debug_level = 0;
+
+/* forward declarations */
+static void clear_interrupts(struct denali_nand_info *denali);
+static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask);
+static void denali_irq_enable(struct denali_nand_info *denali, uint32_t int_mask);
+static uint32_t read_interrupt_status(struct denali_nand_info *denali);
+
+#define DEBUG_DENALI 0
+
+/* This is a wrapper for writing to the denali registers.
+ * this allows us to create debug information so we can
+ * observe how the driver is programming the device.
+ * it uses standard linux convention for (val, addr) */
+static void denali_write32(uint32_t value, void *addr)
+{
+ iowrite32(value, addr);
+
+#if DEBUG_DENALI
+ printk(KERN_ERR "wrote: 0x%x -> 0x%x\n", value, (uint32_t)((uint32_t)addr & 0x1fff));
+#endif
+}
+
+/* Certain operations for the denali NAND controller use an indexed mode to read/write
+ data. The operation is performed by writing the address value of the command to
+ the device memory followed by the data. This function abstracts this common
+ operation.
+*/
+static void index_addr(struct denali_nand_info *denali, uint32_t address, uint32_t data)
+{
+ denali_write32(address, denali->flash_mem);
+ denali_write32(data, denali->flash_mem + 0x10);
+}
+
+/* Perform an indexed read of the device */
+static void index_addr_read_data(struct denali_nand_info *denali,
+ uint32_t address, uint32_t *pdata)
+{
+ denali_write32(address, denali->flash_mem);
+ *pdata = ioread32(denali->flash_mem + 0x10);
+}
+
+/* We need to buffer some data for some of the NAND core routines.
+ * The operations manage buffering that data. */
+static void reset_buf(struct denali_nand_info *denali)
+{
+ denali->buf.head = denali->buf.tail = 0;
+}
+
+static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte)
+{
+ BUG_ON(denali->buf.tail >= sizeof(denali->buf.buf));
+ denali->buf.buf[denali->buf.tail++] = byte;
+}
+
+/* reads the status of the device */
+static void read_status(struct denali_nand_info *denali)
+{
+ uint32_t cmd = 0x0;
+
+ /* initialize the data buffer to store status */
+ reset_buf(denali);
+
+ /* initiate a device status read */
+ cmd = MODE_11 | BANK(denali->flash_bank);
+ index_addr(denali, cmd | COMMAND_CYCLE, 0x70);
+ denali_write32(cmd | STATUS_CYCLE, denali->flash_mem);
+
+ /* update buffer with status value */
+ write_byte_to_buf(denali, ioread32(denali->flash_mem + 0x10));
+
+#if DEBUG_DENALI
+ printk("device reporting status value of 0x%2x\n", denali->buf.buf[0]);
+#endif
+}
+
+/* resets a specific device connected to the core */
+static void reset_bank(struct denali_nand_info *denali)
+{
+ uint32_t irq_status = 0;
+ uint32_t irq_mask = reset_complete[denali->flash_bank] |
+ operation_timeout[denali->flash_bank];
+ int bank = 0;
+
+ clear_interrupts(denali);
+
+ bank = device_reset_banks[denali->flash_bank];
+ denali_write32(bank, denali->flash_reg + DEVICE_RESET);
+
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ if (irq_status & operation_timeout[denali->flash_bank])
+ {
+ printk(KERN_ERR "reset bank failed.\n");
+ }
+}
+
+/* Reset the flash controller */
+static uint16_t NAND_Flash_Reset(struct denali_nand_info *denali)
+{
+ uint32_t i;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++)
+ denali_write32(reset_complete[i] | operation_timeout[i],
+ denali->flash_reg + intr_status_addresses[i]);
+
+ for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) {
+ denali_write32(device_reset_banks[i], denali->flash_reg + DEVICE_RESET);
+ while (!(ioread32(denali->flash_reg + intr_status_addresses[i]) &
+ (reset_complete[i] | operation_timeout[i])))
+ ;
+ if (ioread32(denali->flash_reg + intr_status_addresses[i]) &
+ operation_timeout[i])
+ nand_dbg_print(NAND_DBG_WARN,
+ "NAND Reset operation timed out on bank %d\n", i);
+ }
+
+ for (i = 0; i < LLD_MAX_FLASH_BANKS; i++)
+ denali_write32(reset_complete[i] | operation_timeout[i],
+ denali->flash_reg + intr_status_addresses[i]);
+
+ return PASS;
+}
+
+/* this routine calculates the ONFI timing values for a given mode and programs
+ * the clocking register accordingly. The mode is determined by the get_onfi_nand_para
+ routine.
+ */
+static void NAND_ONFi_Timing_Mode(struct denali_nand_info *denali, uint16_t mode)
+{
+ uint16_t Trea[6] = {40, 30, 25, 20, 20, 16};
+ uint16_t Trp[6] = {50, 25, 17, 15, 12, 10};
+ uint16_t Treh[6] = {30, 15, 15, 10, 10, 7};
+ uint16_t Trc[6] = {100, 50, 35, 30, 25, 20};
+ uint16_t Trhoh[6] = {0, 15, 15, 15, 15, 15};
+ uint16_t Trloh[6] = {0, 0, 0, 0, 5, 5};
+ uint16_t Tcea[6] = {100, 45, 30, 25, 25, 25};
+ uint16_t Tadl[6] = {200, 100, 100, 100, 70, 70};
+ uint16_t Trhw[6] = {200, 100, 100, 100, 100, 100};
+ uint16_t Trhz[6] = {200, 100, 100, 100, 100, 100};
+ uint16_t Twhr[6] = {120, 80, 80, 60, 60, 60};
+ uint16_t Tcs[6] = {70, 35, 25, 25, 20, 15};
+
+ uint16_t TclsRising = 1;
+ uint16_t data_invalid_rhoh, data_invalid_rloh, data_invalid;
+ uint16_t dv_window = 0;
+ uint16_t en_lo, en_hi;
+ uint16_t acc_clks;
+ uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ en_lo = CEIL_DIV(Trp[mode], CLK_X);
+ en_hi = CEIL_DIV(Treh[mode], CLK_X);
+#if ONFI_BLOOM_TIME
+ if ((en_hi * CLK_X) < (Treh[mode] + 2))
+ en_hi++;
+#endif
+
+ if ((en_lo + en_hi) * CLK_X < Trc[mode])
+ en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X);
+
+ if ((en_lo + en_hi) < CLK_MULTI)
+ en_lo += CLK_MULTI - en_lo - en_hi;
+
+ while (dv_window < 8) {
+ data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode];
+
+ data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode];
+
+ data_invalid =
+ data_invalid_rhoh <
+ data_invalid_rloh ? data_invalid_rhoh : data_invalid_rloh;
+
+ dv_window = data_invalid - Trea[mode];
+
+ if (dv_window < 8)
+ en_lo++;
+ }
+
+ acc_clks = CEIL_DIV(Trea[mode], CLK_X);
+
+ while (((acc_clks * CLK_X) - Trea[mode]) < 3)
+ acc_clks++;
+
+ if ((data_invalid - acc_clks * CLK_X) < 2)
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d: Warning!\n",
+ __FILE__, __LINE__);
+
+ addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
+ re_2_we = CEIL_DIV(Trhw[mode], CLK_X);
+ re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
+ we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
+ cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
+ if (!TclsRising)
+ cs_cnt = CEIL_DIV(Tcs[mode], CLK_X);
+ if (cs_cnt == 0)
+ cs_cnt = 1;
+
+ if (Tcea[mode]) {
+ while (((cs_cnt * CLK_X) + Trea[mode]) < Tcea[mode])
+ cs_cnt++;
+ }
+
+#if MODE5_WORKAROUND
+ if (mode == 5)
+ acc_clks = 5;
+#endif
+
+ /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
+ if ((ioread32(denali->flash_reg + MANUFACTURER_ID) == 0) &&
+ (ioread32(denali->flash_reg + DEVICE_ID) == 0x88))
+ acc_clks = 6;
+
+ denali_write32(acc_clks, denali->flash_reg + ACC_CLKS);
+ denali_write32(re_2_we, denali->flash_reg + RE_2_WE);
+ denali_write32(re_2_re, denali->flash_reg + RE_2_RE);
+ denali_write32(we_2_re, denali->flash_reg + WE_2_RE);
+ denali_write32(addr_2_data, denali->flash_reg + ADDR_2_DATA);
+ denali_write32(en_lo, denali->flash_reg + RDWR_EN_LO_CNT);
+ denali_write32(en_hi, denali->flash_reg + RDWR_EN_HI_CNT);
+ denali_write32(cs_cnt, denali->flash_reg + CS_SETUP_CNT);
+}
+
+/* configures the initial ECC settings for the controller */
+static void set_ecc_config(struct denali_nand_info *denali)
+{
+#if SUPPORT_8BITECC
+ if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) < 4096) ||
+ (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) <= 128))
+ denali_write32(8, denali->flash_reg + ECC_CORRECTION);
+#endif
+
+ if ((ioread32(denali->flash_reg + ECC_CORRECTION) & ECC_CORRECTION__VALUE)
+ == 1) {
+ denali->dev_info.wECCBytesPerSector = 4;
+ denali->dev_info.wECCBytesPerSector *= denali->dev_info.wDevicesConnected;
+ denali->dev_info.wNumPageSpareFlag =
+ denali->dev_info.wPageSpareSize -
+ denali->dev_info.wPageDataSize /
+ (ECC_SECTOR_SIZE * denali->dev_info.wDevicesConnected) *
+ denali->dev_info.wECCBytesPerSector
+ - denali->dev_info.wSpareSkipBytes;
+ } else {
+ denali->dev_info.wECCBytesPerSector =
+ (ioread32(denali->flash_reg + ECC_CORRECTION) &
+ ECC_CORRECTION__VALUE) * 13 / 8;
+ if ((denali->dev_info.wECCBytesPerSector) % 2 == 0)
+ denali->dev_info.wECCBytesPerSector += 2;
+ else
+ denali->dev_info.wECCBytesPerSector += 1;
+
+ denali->dev_info.wECCBytesPerSector *= denali->dev_info.wDevicesConnected;
+ denali->dev_info.wNumPageSpareFlag = denali->dev_info.wPageSpareSize -
+ denali->dev_info.wPageDataSize /
+ (ECC_SECTOR_SIZE * denali->dev_info.wDevicesConnected) *
+ denali->dev_info.wECCBytesPerSector
+ - denali->dev_info.wSpareSkipBytes;
+ }
+}
+
+/* queries the NAND device to see what ONFI modes it supports. */
+static uint16_t get_onfi_nand_para(struct denali_nand_info *denali)
+{
+ int i;
+ uint16_t blks_lun_l, blks_lun_h, n_of_luns;
+ uint32_t blockperlun, id;
+
+ denali_write32(DEVICE_RESET__BANK0, denali->flash_reg + DEVICE_RESET);
+
+ while (!((ioread32(denali->flash_reg + INTR_STATUS0) &
+ INTR_STATUS0__RST_COMP) |
+ (ioread32(denali->flash_reg + INTR_STATUS0) &
+ INTR_STATUS0__TIME_OUT)))
+ ;
+
+ if (ioread32(denali->flash_reg + INTR_STATUS0) & INTR_STATUS0__RST_COMP) {
+ denali_write32(DEVICE_RESET__BANK1, denali->flash_reg + DEVICE_RESET);
+ while (!((ioread32(denali->flash_reg + INTR_STATUS1) &
+ INTR_STATUS1__RST_COMP) |
+ (ioread32(denali->flash_reg + INTR_STATUS1) &
+ INTR_STATUS1__TIME_OUT)))
+ ;
+
+ if (ioread32(denali->flash_reg + INTR_STATUS1) &
+ INTR_STATUS1__RST_COMP) {
+ denali_write32(DEVICE_RESET__BANK2,
+ denali->flash_reg + DEVICE_RESET);
+ while (!((ioread32(denali->flash_reg + INTR_STATUS2) &
+ INTR_STATUS2__RST_COMP) |
+ (ioread32(denali->flash_reg + INTR_STATUS2) &
+ INTR_STATUS2__TIME_OUT)))
+ ;
+
+ if (ioread32(denali->flash_reg + INTR_STATUS2) &
+ INTR_STATUS2__RST_COMP) {
+ denali_write32(DEVICE_RESET__BANK3,
+ denali->flash_reg + DEVICE_RESET);
+ while (!((ioread32(denali->flash_reg + INTR_STATUS3) &
+ INTR_STATUS3__RST_COMP) |
+ (ioread32(denali->flash_reg + INTR_STATUS3) &
+ INTR_STATUS3__TIME_OUT)))
+ ;
+ } else {
+ printk(KERN_ERR "Getting a time out for bank 2!\n");
+ }
+ } else {
+ printk(KERN_ERR "Getting a time out for bank 1!\n");
+ }
+ }
+
+ denali_write32(INTR_STATUS0__TIME_OUT, denali->flash_reg + INTR_STATUS0);
+ denali_write32(INTR_STATUS1__TIME_OUT, denali->flash_reg + INTR_STATUS1);
+ denali_write32(INTR_STATUS2__TIME_OUT, denali->flash_reg + INTR_STATUS2);
+ denali_write32(INTR_STATUS3__TIME_OUT, denali->flash_reg + INTR_STATUS3);
+
+ denali->dev_info.wONFIDevFeatures =
+ ioread32(denali->flash_reg + ONFI_DEVICE_FEATURES);
+ denali->dev_info.wONFIOptCommands =
+ ioread32(denali->flash_reg + ONFI_OPTIONAL_COMMANDS);
+ denali->dev_info.wONFITimingMode =
+ ioread32(denali->flash_reg + ONFI_TIMING_MODE);
+ denali->dev_info.wONFIPgmCacheTimingMode =
+ ioread32(denali->flash_reg + ONFI_PGM_CACHE_TIMING_MODE);
+
+ n_of_luns = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) &
+ ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS;
+ blks_lun_l = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L);
+ blks_lun_h = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U);
+
+ blockperlun = (blks_lun_h << 16) | blks_lun_l;
+
+ denali->dev_info.wTotalBlocks = n_of_luns * blockperlun;
+
+ if (!(ioread32(denali->flash_reg + ONFI_TIMING_MODE) &
+ ONFI_TIMING_MODE__VALUE))
+ return FAIL;
+
+ for (i = 5; i > 0; i--) {
+ if (ioread32(denali->flash_reg + ONFI_TIMING_MODE) & (0x01 << i))
+ break;
+ }
+
+ NAND_ONFi_Timing_Mode(denali, i);
+
+ index_addr(denali, MODE_11 | 0, 0x90);
+ index_addr(denali, MODE_11 | 1, 0);
+
+ for (i = 0; i < 3; i++)
+ index_addr_read_data(denali, MODE_11 | 2, &id);
+
+ nand_dbg_print(NAND_DBG_DEBUG, "3rd ID: 0x%x\n", id);
+
+ denali->dev_info.MLCDevice = id & 0x0C;
+
+ /* By now, all the ONFI devices we know support the page cache */
+ /* rw feature. So here we enable the pipeline_rw_ahead feature */
+ /* iowrite32(1, denali->flash_reg + CACHE_WRITE_ENABLE); */
+ /* iowrite32(1, denali->flash_reg + CACHE_READ_ENABLE); */
+
+ return PASS;
+}
+
+static void get_samsung_nand_para(struct denali_nand_info *denali)
+{
+ uint8_t no_of_planes;
+ uint32_t blk_size;
+ uint64_t plane_size, capacity;
+ uint32_t id_bytes[5];
+ int i;
+
+ index_addr(denali, (uint32_t)(MODE_11 | 0), 0x90);
+ index_addr(denali, (uint32_t)(MODE_11 | 1), 0);
+ for (i = 0; i < 5; i++)
+ index_addr_read_data(denali, (uint32_t)(MODE_11 | 2), &id_bytes[i]);
+
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "ID bytes: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
+ id_bytes[0], id_bytes[1], id_bytes[2],
+ id_bytes[3], id_bytes[4]);
+
+ if ((id_bytes[1] & 0xff) == 0xd3) { /* Samsung K9WAG08U1A */
+ /* Set timing register values according to datasheet */
+ denali_write32(5, denali->flash_reg + ACC_CLKS);
+ denali_write32(20, denali->flash_reg + RE_2_WE);
+ denali_write32(12, denali->flash_reg + WE_2_RE);
+ denali_write32(14, denali->flash_reg + ADDR_2_DATA);
+ denali_write32(3, denali->flash_reg + RDWR_EN_LO_CNT);
+ denali_write32(2, denali->flash_reg + RDWR_EN_HI_CNT);
+ denali_write32(2, denali->flash_reg + CS_SETUP_CNT);
+ }
+
+ no_of_planes = 1 << ((id_bytes[4] & 0x0c) >> 2);
+ plane_size = (uint64_t)64 << ((id_bytes[4] & 0x70) >> 4);
+ blk_size = 64 << ((ioread32(denali->flash_reg + DEVICE_PARAM_1) & 0x30) >> 4);
+ capacity = (uint64_t)128 * plane_size * no_of_planes;
+
+ do_div(capacity, blk_size);
+ denali->dev_info.wTotalBlocks = capacity;
+}
+
+static void get_toshiba_nand_para(struct denali_nand_info *denali)
+{
+ void __iomem *scratch_reg;
+ uint32_t tmp;
+
+ /* Workaround to fix a controller bug which reports a wrong */
+ /* spare area size for some kind of Toshiba NAND device */
+ if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
+ (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) {
+ denali_write32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
+ tmp = ioread32(denali->flash_reg + DEVICES_CONNECTED) *
+ ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
+ denali_write32(tmp, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
+#if SUPPORT_15BITECC
+ denali_write32(15, denali->flash_reg + ECC_CORRECTION);
+#elif SUPPORT_8BITECC
+ denali_write32(8, denali->flash_reg + ECC_CORRECTION);
+#endif
+ }
+
+ /* As Toshiba NAND can not provide it's block number, */
+ /* so here we need user to provide the correct block */
+ /* number in a scratch register before the Linux NAND */
+ /* driver is loaded. If no valid value found in the scratch */
+ /* register, then we use default block number value */
+ scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
+ if (!scratch_reg) {
+ printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
+ __FILE__, __LINE__);
+ denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
+ } else {
+ nand_dbg_print(NAND_DBG_WARN,
+ "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
+ denali->dev_info.wTotalBlocks = 1 << ioread8(scratch_reg);
+ if (denali->dev_info.wTotalBlocks < 512)
+ denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
+ iounmap(scratch_reg);
+ }
+}
+
+static void get_hynix_nand_para(struct denali_nand_info *denali)
+{
+ void __iomem *scratch_reg;
+ uint32_t main_size, spare_size;
+
+ switch (denali->dev_info.wDeviceID) {
+ case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
+ case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
+ denali_write32(128, denali->flash_reg + PAGES_PER_BLOCK);
+ denali_write32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
+ denali_write32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
+ main_size = 4096 * ioread32(denali->flash_reg + DEVICES_CONNECTED);
+ spare_size = 224 * ioread32(denali->flash_reg + DEVICES_CONNECTED);
+ denali_write32(main_size, denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
+ denali_write32(spare_size, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
+ denali_write32(0, denali->flash_reg + DEVICE_WIDTH);
+#if SUPPORT_15BITECC
+ denali_write32(15, denali->flash_reg + ECC_CORRECTION);
+#elif SUPPORT_8BITECC
+ denali_write32(8, denali->flash_reg + ECC_CORRECTION);
+#endif
+ denali->dev_info.MLCDevice = 1;
+ break;
+ default:
+ nand_dbg_print(NAND_DBG_WARN,
+ "Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
+ "Will use default parameter values instead.\n",
+ denali->dev_info.wDeviceID);
+ }
+
+ scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
+ if (!scratch_reg) {
+ printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
+ __FILE__, __LINE__);
+ denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
+ } else {
+ nand_dbg_print(NAND_DBG_WARN,
+ "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
+ denali->dev_info.wTotalBlocks = 1 << ioread8(scratch_reg);
+ if (denali->dev_info.wTotalBlocks < 512)
+ denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
+ iounmap(scratch_reg);
+ }
+}
+
+/* determines how many NAND chips are connected to the controller. Note for
+ Intel CE4100 devices we don't support more than one device.
+ */
+static void find_valid_banks(struct denali_nand_info *denali)
+{
+ uint32_t id[LLD_MAX_FLASH_BANKS];
+ int i;
+
+ denali->total_used_banks = 1;
+ for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) {
+ index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 0), 0x90);
+ index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 1), 0);
+ index_addr_read_data(denali, (uint32_t)(MODE_11 | (i << 24) | 2), &id[i]);
+
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Return 1st ID for bank[%d]: %x\n", i, id[i]);
+
+ if (i == 0) {
+ if (!(id[i] & 0x0ff))
+ break; /* WTF? */
+ } else {
+ if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
+ denali->total_used_banks++;
+ else
+ break;
+ }
+ }
+
+ if (denali->platform == INTEL_CE4100)
+ {
+ /* Platform limitations of the CE4100 device limit
+ * users to a single chip solution for NAND.
+ * Multichip support is not enabled.
+ */
+ if (denali->total_used_banks != 1)
+ {
+ printk(KERN_ERR "Sorry, Intel CE4100 only supports "
+ "a single NAND device.\n");
+ BUG();
+ }
+ }
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "denali->total_used_banks: %d\n", denali->total_used_banks);
+}
+
+static void detect_partition_feature(struct denali_nand_info *denali)
+{
+ if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) {
+ if ((ioread32(denali->flash_reg + PERM_SRC_ID_1) &
+ PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) {
+ denali->dev_info.wSpectraStartBlock =
+ ((ioread32(denali->flash_reg + MIN_MAX_BANK_1) &
+ MIN_MAX_BANK_1__MIN_VALUE) *
+ denali->dev_info.wTotalBlocks)
+ +
+ (ioread32(denali->flash_reg + MIN_BLK_ADDR_1) &
+ MIN_BLK_ADDR_1__VALUE);
+
+ denali->dev_info.wSpectraEndBlock =
+ (((ioread32(denali->flash_reg + MIN_MAX_BANK_1) &
+ MIN_MAX_BANK_1__MAX_VALUE) >> 2) *
+ denali->dev_info.wTotalBlocks)
+ +
+ (ioread32(denali->flash_reg + MAX_BLK_ADDR_1) &
+ MAX_BLK_ADDR_1__VALUE);
+
+ denali->dev_info.wTotalBlocks *= denali->total_used_banks;
+
+ if (denali->dev_info.wSpectraEndBlock >=
+ denali->dev_info.wTotalBlocks) {
+ denali->dev_info.wSpectraEndBlock =
+ denali->dev_info.wTotalBlocks - 1;
+ }
+
+ denali->dev_info.wDataBlockNum =
+ denali->dev_info.wSpectraEndBlock -
+ denali->dev_info.wSpectraStartBlock + 1;
+ } else {
+ denali->dev_info.wTotalBlocks *= denali->total_used_banks;
+ denali->dev_info.wSpectraStartBlock = SPECTRA_START_BLOCK;
+ denali->dev_info.wSpectraEndBlock =
+ denali->dev_info.wTotalBlocks - 1;
+ denali->dev_info.wDataBlockNum =
+ denali->dev_info.wSpectraEndBlock -
+ denali->dev_info.wSpectraStartBlock + 1;
+ }
+ } else {
+ denali->dev_info.wTotalBlocks *= denali->total_used_banks;
+ denali->dev_info.wSpectraStartBlock = SPECTRA_START_BLOCK;
+ denali->dev_info.wSpectraEndBlock = denali->dev_info.wTotalBlocks - 1;
+ denali->dev_info.wDataBlockNum =
+ denali->dev_info.wSpectraEndBlock -
+ denali->dev_info.wSpectraStartBlock + 1;
+ }
+}
+
+static void dump_device_info(struct denali_nand_info *denali)
+{
+ nand_dbg_print(NAND_DBG_DEBUG, "denali->dev_info:\n");
+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceMaker: 0x%x\n",
+ denali->dev_info.wDeviceMaker);
+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceID: 0x%x\n",
+ denali->dev_info.wDeviceID);
+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceType: 0x%x\n",
+ denali->dev_info.wDeviceType);
+ nand_dbg_print(NAND_DBG_DEBUG, "SpectraStartBlock: %d\n",
+ denali->dev_info.wSpectraStartBlock);
+ nand_dbg_print(NAND_DBG_DEBUG, "SpectraEndBlock: %d\n",
+ denali->dev_info.wSpectraEndBlock);
+ nand_dbg_print(NAND_DBG_DEBUG, "TotalBlocks: %d\n",
+ denali->dev_info.wTotalBlocks);
+ nand_dbg_print(NAND_DBG_DEBUG, "PagesPerBlock: %d\n",
+ denali->dev_info.wPagesPerBlock);
+ nand_dbg_print(NAND_DBG_DEBUG, "PageSize: %d\n",
+ denali->dev_info.wPageSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "PageDataSize: %d\n",
+ denali->dev_info.wPageDataSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "PageSpareSize: %d\n",
+ denali->dev_info.wPageSpareSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "NumPageSpareFlag: %d\n",
+ denali->dev_info.wNumPageSpareFlag);
+ nand_dbg_print(NAND_DBG_DEBUG, "ECCBytesPerSector: %d\n",
+ denali->dev_info.wECCBytesPerSector);
+ nand_dbg_print(NAND_DBG_DEBUG, "BlockSize: %d\n",
+ denali->dev_info.wBlockSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "BlockDataSize: %d\n",
+ denali->dev_info.wBlockDataSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "DataBlockNum: %d\n",
+ denali->dev_info.wDataBlockNum);
+ nand_dbg_print(NAND_DBG_DEBUG, "PlaneNum: %d\n",
+ denali->dev_info.bPlaneNum);
+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceMainAreaSize: %d\n",
+ denali->dev_info.wDeviceMainAreaSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceSpareAreaSize: %d\n",
+ denali->dev_info.wDeviceSpareAreaSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "DevicesConnected: %d\n",
+ denali->dev_info.wDevicesConnected);
+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceWidth: %d\n",
+ denali->dev_info.wDeviceWidth);
+ nand_dbg_print(NAND_DBG_DEBUG, "HWRevision: 0x%x\n",
+ denali->dev_info.wHWRevision);
+ nand_dbg_print(NAND_DBG_DEBUG, "HWFeatures: 0x%x\n",
+ denali->dev_info.wHWFeatures);
+ nand_dbg_print(NAND_DBG_DEBUG, "ONFIDevFeatures: 0x%x\n",
+ denali->dev_info.wONFIDevFeatures);
+ nand_dbg_print(NAND_DBG_DEBUG, "ONFIOptCommands: 0x%x\n",
+ denali->dev_info.wONFIOptCommands);
+ nand_dbg_print(NAND_DBG_DEBUG, "ONFITimingMode: 0x%x\n",
+ denali->dev_info.wONFITimingMode);
+ nand_dbg_print(NAND_DBG_DEBUG, "ONFIPgmCacheTimingMode: 0x%x\n",
+ denali->dev_info.wONFIPgmCacheTimingMode);
+ nand_dbg_print(NAND_DBG_DEBUG, "MLCDevice: %s\n",
+ denali->dev_info.MLCDevice ? "Yes" : "No");
+ nand_dbg_print(NAND_DBG_DEBUG, "SpareSkipBytes: %d\n",
+ denali->dev_info.wSpareSkipBytes);
+ nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageNumber: %d\n",
+ denali->dev_info.nBitsInPageNumber);
+ nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageDataSize: %d\n",
+ denali->dev_info.nBitsInPageDataSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "BitsInBlockDataSize: %d\n",
+ denali->dev_info.nBitsInBlockDataSize);
+}
+
+static uint16_t NAND_Read_Device_ID(struct denali_nand_info *denali)
+{
+ uint16_t status = PASS;
+ uint8_t no_of_planes;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ denali->dev_info.wDeviceMaker = ioread32(denali->flash_reg + MANUFACTURER_ID);
+ denali->dev_info.wDeviceID = ioread32(denali->flash_reg + DEVICE_ID);
+ denali->dev_info.bDeviceParam0 = ioread32(denali->flash_reg + DEVICE_PARAM_0);
+ denali->dev_info.bDeviceParam1 = ioread32(denali->flash_reg + DEVICE_PARAM_1);
+ denali->dev_info.bDeviceParam2 = ioread32(denali->flash_reg + DEVICE_PARAM_2);
+
+ denali->dev_info.MLCDevice = ioread32(denali->flash_reg + DEVICE_PARAM_0) & 0x0c;
+
+ if (ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) &
+ ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
+ if (FAIL == get_onfi_nand_para(denali))
+ return FAIL;
+ } else if (denali->dev_info.wDeviceMaker == 0xEC) { /* Samsung NAND */
+ get_samsung_nand_para(denali);
+ } else if (denali->dev_info.wDeviceMaker == 0x98) { /* Toshiba NAND */
+ get_toshiba_nand_para(denali);
+ } else if (denali->dev_info.wDeviceMaker == 0xAD) { /* Hynix NAND */
+ get_hynix_nand_para(denali);
+ } else {
+ denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
+ }
+
+ nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
+ "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
+ "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
+ "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
+ ioread32(denali->flash_reg + ACC_CLKS),
+ ioread32(denali->flash_reg + RE_2_WE),
+ ioread32(denali->flash_reg + WE_2_RE),
+ ioread32(denali->flash_reg + ADDR_2_DATA),
+ ioread32(denali->flash_reg + RDWR_EN_LO_CNT),
+ ioread32(denali->flash_reg + RDWR_EN_HI_CNT),
+ ioread32(denali->flash_reg + CS_SETUP_CNT));
+
+ denali->dev_info.wHWRevision = ioread32(denali->flash_reg + REVISION);
+ denali->dev_info.wHWFeatures = ioread32(denali->flash_reg + FEATURES);
+
+ denali->dev_info.wDeviceMainAreaSize =
+ ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
+ denali->dev_info.wDeviceSpareAreaSize =
+ ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
+
+ denali->dev_info.wPageDataSize =
+ ioread32(denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
+
+ /* Note: When using the Micon 4K NAND device, the controller will report
+ * Page Spare Size as 216 bytes. But Micron's Spec say it's 218 bytes.
+ * And if force set it to 218 bytes, the controller can not work
+ * correctly. So just let it be. But keep in mind that this bug may
+ * cause
+ * other problems in future. - Yunpeng 2008-10-10
+ */
+ denali->dev_info.wPageSpareSize =
+ ioread32(denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
+
+ denali->dev_info.wPagesPerBlock = ioread32(denali->flash_reg + PAGES_PER_BLOCK);
+
+ denali->dev_info.wPageSize =
+ denali->dev_info.wPageDataSize + denali->dev_info.wPageSpareSize;
+ denali->dev_info.wBlockSize =
+ denali->dev_info.wPageSize * denali->dev_info.wPagesPerBlock;
+ denali->dev_info.wBlockDataSize =
+ denali->dev_info.wPagesPerBlock * denali->dev_info.wPageDataSize;
+
+ denali->dev_info.wDeviceWidth = ioread32(denali->flash_reg + DEVICE_WIDTH);
+ denali->dev_info.wDeviceType =
+ ((ioread32(denali->flash_reg + DEVICE_WIDTH) > 0) ? 16 : 8);
+
+ denali->dev_info.wDevicesConnected = ioread32(denali->flash_reg + DEVICES_CONNECTED);
+
+ denali->dev_info.wSpareSkipBytes =
+ ioread32(denali->flash_reg + SPARE_AREA_SKIP_BYTES) *
+ denali->dev_info.wDevicesConnected;
+
+ denali->dev_info.nBitsInPageNumber =
+ ilog2(denali->dev_info.wPagesPerBlock);
+ denali->dev_info.nBitsInPageDataSize =
+ ilog2(denali->dev_info.wPageDataSize);
+ denali->dev_info.nBitsInBlockDataSize =
+ ilog2(denali->dev_info.wBlockDataSize);
+
+ set_ecc_config(denali);
+
+ no_of_planes = ioread32(denali->flash_reg + NUMBER_OF_PLANES) &
+ NUMBER_OF_PLANES__VALUE;
+
+ switch (no_of_planes) {
+ case 0:
+ case 1:
+ case 3:
+ case 7:
+ denali->dev_info.bPlaneNum = no_of_planes + 1;
+ break;
+ default:
+ status = FAIL;
+ break;
+ }
+
+ find_valid_banks(denali);
+
+ detect_partition_feature(denali);
+
+ dump_device_info(denali);
+
+ /* If the user specified to override the default timings
+ * with a specific ONFI mode, we apply those changes here.
+ */
+ if (onfi_timing_mode != NAND_DEFAULT_TIMINGS)
+ {
+ NAND_ONFi_Timing_Mode(denali, onfi_timing_mode);
+ }
+
+ return status;
+}
+
+static void NAND_LLD_Enable_Disable_Interrupts(struct denali_nand_info *denali,
+ uint16_t INT_ENABLE)
+{
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (INT_ENABLE)
+ denali_write32(1, denali->flash_reg + GLOBAL_INT_ENABLE);
+ else
+ denali_write32(0, denali->flash_reg + GLOBAL_INT_ENABLE);
+}
+
+/* validation function to verify that the controlling software is making
+ a valid request
+ */
+static inline bool is_flash_bank_valid(int flash_bank)
+{
+ return (flash_bank >= 0 && flash_bank < 4);
+}
+
+static void denali_irq_init(struct denali_nand_info *denali)
+{
+ uint32_t int_mask = 0;
+
+ /* Disable global interrupts */
+ NAND_LLD_Enable_Disable_Interrupts(denali, false);
+
+ int_mask = DENALI_IRQ_ALL;
+
+ /* Clear all status bits */
+ denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS0);
+ denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS1);
+ denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS2);
+ denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS3);
+
+ denali_irq_enable(denali, int_mask);
+}
+
+static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
+{
+ NAND_LLD_Enable_Disable_Interrupts(denali, false);
+ free_irq(irqnum, denali);
+}
+
+static void denali_irq_enable(struct denali_nand_info *denali, uint32_t int_mask)
+{
+ denali_write32(int_mask, denali->flash_reg + INTR_EN0);
+ denali_write32(int_mask, denali->flash_reg + INTR_EN1);
+ denali_write32(int_mask, denali->flash_reg + INTR_EN2);
+ denali_write32(int_mask, denali->flash_reg + INTR_EN3);
+}
+
+/* This function only returns when an interrupt that this driver cares about
+ * occurs. This is to reduce the overhead of servicing interrupts
+ */
+static inline uint32_t denali_irq_detected(struct denali_nand_info *denali)
+{
+ return (read_interrupt_status(denali) & DENALI_IRQ_ALL);
+}
+
+/* Interrupts are cleared by writing a 1 to the appropriate status bit */
+static inline void clear_interrupt(struct denali_nand_info *denali, uint32_t irq_mask)
+{
+ uint32_t intr_status_reg = 0;
+
+ intr_status_reg = intr_status_addresses[denali->flash_bank];
+
+ denali_write32(irq_mask, denali->flash_reg + intr_status_reg);
+}
+
+static void clear_interrupts(struct denali_nand_info *denali)
+{
+ uint32_t status = 0x0;
+ spin_lock_irq(&denali->irq_lock);
+
+ status = read_interrupt_status(denali);
+
+#if DEBUG_DENALI
+ denali->irq_debug_array[denali->idx++] = 0x30000000 | status;
+ denali->idx %= 32;
+#endif
+
+ denali->irq_status = 0x0;
+ spin_unlock_irq(&denali->irq_lock);
+}
+
+static uint32_t read_interrupt_status(struct denali_nand_info *denali)
+{
+ uint32_t intr_status_reg = 0;
+
+ intr_status_reg = intr_status_addresses[denali->flash_bank];
+
+ return ioread32(denali->flash_reg + intr_status_reg);
+}
+
+#if DEBUG_DENALI
+static void print_irq_log(struct denali_nand_info *denali)
+{
+ int i = 0;
+
+ printk("ISR debug log index = %X\n", denali->idx);
+ for (i = 0; i < 32; i++)
+ {
+ printk("%08X: %08X\n", i, denali->irq_debug_array[i]);
+ }
+}
+#endif
+
+/* This is the interrupt service routine. It handles all interrupts
+ * sent to this device. Note that on CE4100, this is a shared
+ * interrupt.
+ */
+static irqreturn_t denali_isr(int irq, void *dev_id)
+{
+ struct denali_nand_info *denali = dev_id;
+ uint32_t irq_status = 0x0;
+ irqreturn_t result = IRQ_NONE;
+
+ spin_lock(&denali->irq_lock);
+
+ /* check to see if a valid NAND chip has
+ * been selected.
+ */
+ if (is_flash_bank_valid(denali->flash_bank))
+ {
+ /* check to see if controller generated
+ * the interrupt, since this is a shared interrupt */
+ if ((irq_status = denali_irq_detected(denali)) != 0)
+ {
+#if DEBUG_DENALI
+ denali->irq_debug_array[denali->idx++] = 0x10000000 | irq_status;
+ denali->idx %= 32;
+
+ printk("IRQ status = 0x%04x\n", irq_status);
+#endif
+ /* handle interrupt */
+ /* first acknowledge it */
+ clear_interrupt(denali, irq_status);
+ /* store the status in the device context for someone
+ to read */
+ denali->irq_status |= irq_status;
+ /* notify anyone who cares that it happened */
+ complete(&denali->complete);
+ /* tell the OS that we've handled this */
+ result = IRQ_HANDLED;
+ }
+ }
+ spin_unlock(&denali->irq_lock);
+ return result;
+}
+#define BANK(x) ((x) << 24)
+
+static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
+{
+ unsigned long comp_res = 0;
+ uint32_t intr_status = 0;
+ bool retry = false;
+ unsigned long timeout = msecs_to_jiffies(1000);
+
+ do
+ {
+#if DEBUG_DENALI
+ printk("waiting for 0x%x\n", irq_mask);
+#endif
+ comp_res = wait_for_completion_timeout(&denali->complete, timeout);
+ spin_lock_irq(&denali->irq_lock);
+ intr_status = denali->irq_status;
+
+#if DEBUG_DENALI
+ denali->irq_debug_array[denali->idx++] = 0x20000000 | (irq_mask << 16) | intr_status;
+ denali->idx %= 32;
+#endif
+
+ if (intr_status & irq_mask)
+ {
+ denali->irq_status &= ~irq_mask;
+ spin_unlock_irq(&denali->irq_lock);
+#if DEBUG_DENALI
+ if (retry) printk("status on retry = 0x%x\n", intr_status);
+#endif
+ /* our interrupt was detected */
+ break;
+ }
+ else
+ {
+ /* these are not the interrupts you are looking for -
+ need to wait again */
+ spin_unlock_irq(&denali->irq_lock);
+#if DEBUG_DENALI
+ print_irq_log(denali);
+ printk("received irq nobody cared: irq_status = 0x%x,"
+ " irq_mask = 0x%x, timeout = %ld\n", intr_status, irq_mask, comp_res);
+#endif
+ retry = true;
+ }
+ } while (comp_res != 0);
+
+ if (comp_res == 0)
+ {
+ /* timeout */
+ printk(KERN_ERR "timeout occurred, status = 0x%x, mask = 0x%x\n",
+ intr_status, irq_mask);
+
+ intr_status = 0;
+ }
+ return intr_status;
+}
+
+/* This helper function setups the registers for ECC and whether or not
+ the spare area will be transfered. */
+static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
+ bool transfer_spare)
+{
+ int ecc_en_flag = 0, transfer_spare_flag = 0;
+
+ /* set ECC, transfer spare bits if needed */
+ ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
+ transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
+
+ /* Enable spare area/ECC per user's request. */
+ denali_write32(ecc_en_flag, denali->flash_reg + ECC_ENABLE);
+ denali_write32(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG);
+}
+
+/* sends a pipeline command operation to the controller. See the Denali NAND
+ controller's user guide for more information (section 4.2.3.6).
+ */
+static int denali_send_pipeline_cmd(struct denali_nand_info *denali, bool ecc_en,
+ bool transfer_spare, int access_type,
+ int op)
+{
+ int status = PASS;
+ uint32_t addr = 0x0, cmd = 0x0, page_count = 1, irq_status = 0,
+ irq_mask = 0;
+
+ if (op == DENALI_READ) irq_mask = INTR_STATUS0__LOAD_COMP;
+ else if (op == DENALI_WRITE) irq_mask = 0;
+ else BUG();
+
+ setup_ecc_for_xfer(denali, ecc_en, transfer_spare);
+
+#if DEBUG_DENALI
+ spin_lock_irq(&denali->irq_lock);
+ denali->irq_debug_array[denali->idx++] = 0x40000000 | ioread32(denali->flash_reg + ECC_ENABLE) | (access_type << 4);
+ denali->idx %= 32;
+ spin_unlock_irq(&denali->irq_lock);
+#endif
+
+
+ /* clear interrupts */
+ clear_interrupts(denali);
+
+ addr = BANK(denali->flash_bank) | denali->page;
+
+ if (op == DENALI_WRITE && access_type != SPARE_ACCESS)
+ {
+ cmd = MODE_01 | addr;
+ denali_write32(cmd, denali->flash_mem);
+ }
+ else if (op == DENALI_WRITE && access_type == SPARE_ACCESS)
+ {
+ /* read spare area */
+ cmd = MODE_10 | addr;
+ index_addr(denali, (uint32_t)cmd, access_type);
+
+ cmd = MODE_01 | addr;
+ denali_write32(cmd, denali->flash_mem);
+ }
+ else if (op == DENALI_READ)
+ {
+ /* setup page read request for access type */
+ cmd = MODE_10 | addr;
+ index_addr(denali, (uint32_t)cmd, access_type);
+
+ /* page 33 of the NAND controller spec indicates we should not
+ use the pipeline commands in Spare area only mode. So we
+ don't.
+ */
+ if (access_type == SPARE_ACCESS)
+ {
+ cmd = MODE_01 | addr;
+ denali_write32(cmd, denali->flash_mem);
+ }
+ else
+ {
+ index_addr(denali, (uint32_t)cmd, 0x2000 | op | page_count);
+
+ /* wait for command to be accepted
+ * can always use status0 bit as the mask is identical for each
+ * bank. */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ if (irq_status == 0)
+ {
+ printk(KERN_ERR "cmd, page, addr on timeout "
+ "(0x%x, 0x%x, 0x%x)\n", cmd, denali->page, addr);
+ status = FAIL;
+ }
+ else
+ {
+ cmd = MODE_01 | addr;
+ denali_write32(cmd, denali->flash_mem);
+ }
+ }
+ }
+ return status;
+}
+
+/* helper function that simply writes a buffer to the flash */
+static int write_data_to_flash_mem(struct denali_nand_info *denali, const uint8_t *buf,
+ int len)
+{
+ uint32_t i = 0, *buf32;
+
+ /* verify that the len is a multiple of 4. see comment in
+ * read_data_from_flash_mem() */
+ BUG_ON((len % 4) != 0);
+
+ /* write the data to the flash memory */
+ buf32 = (uint32_t *)buf;
+ for (i = 0; i < len / 4; i++)
+ {
+ denali_write32(*buf32++, denali->flash_mem + 0x10);
+ }
+ return i*4; /* intent is to return the number of bytes read */
+}
+
+/* helper function that simply reads a buffer from the flash */
+static int read_data_from_flash_mem(struct denali_nand_info *denali, uint8_t *buf,
+ int len)
+{
+ uint32_t i = 0, *buf32;
+
+ /* we assume that len will be a multiple of 4, if not
+ * it would be nice to know about it ASAP rather than
+ * have random failures...
+ *
+ * This assumption is based on the fact that this
+ * function is designed to be used to read flash pages,
+ * which are typically multiples of 4...
+ */
+
+ BUG_ON((len % 4) != 0);
+
+ /* transfer the data from the flash */
+ buf32 = (uint32_t *)buf;
+ for (i = 0; i < len / 4; i++)
+ {
+ *buf32++ = ioread32(denali->flash_mem + 0x10);
+ }
+ return i*4; /* intent is to return the number of bytes read */
+}
+
+/* writes OOB data to the device */
+static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ uint32_t irq_status = 0;
+ uint32_t irq_mask = INTR_STATUS0__PROGRAM_COMP |
+ INTR_STATUS0__PROGRAM_FAIL;
+ int status = 0;
+
+ denali->page = page;
+
+ if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS,
+ DENALI_WRITE) == PASS)
+ {
+ write_data_to_flash_mem(denali, buf, mtd->oobsize);
+
+#if DEBUG_DENALI
+ spin_lock_irq(&denali->irq_lock);
+ denali->irq_debug_array[denali->idx++] = 0x80000000 | mtd->oobsize;
+ denali->idx %= 32;
+ spin_unlock_irq(&denali->irq_lock);
+#endif
+
+
+ /* wait for operation to complete */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ if (irq_status == 0)
+ {
+ printk(KERN_ERR "OOB write failed\n");
+ status = -EIO;
+ }
+ }
+ else
+ {
+ printk(KERN_ERR "unable to send pipeline command\n");
+ status = -EIO;
+ }
+ return status;
+}
+
+/* reads OOB data from the device */
+static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ uint32_t irq_mask = INTR_STATUS0__LOAD_COMP, irq_status = 0, addr = 0x0, cmd = 0x0;
+
+ denali->page = page;
+
+#if DEBUG_DENALI
+ printk("read_oob %d\n", page);
+#endif
+ if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
+ DENALI_READ) == PASS)
+ {
+ read_data_from_flash_mem(denali, buf, mtd->oobsize);
+
+ /* wait for command to be accepted
+ * can always use status0 bit as the mask is identical for each
+ * bank. */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ if (irq_status == 0)
+ {
+ printk(KERN_ERR "page on OOB timeout %d\n", denali->page);
+ }
+
+ /* We set the device back to MAIN_ACCESS here as I observed
+ * instability with the controller if you do a block erase
+ * and the last transaction was a SPARE_ACCESS. Block erase
+ * is reliable (according to the MTD test infrastructure)
+ * if you are in MAIN_ACCESS.
+ */
+ addr = BANK(denali->flash_bank) | denali->page;
+ cmd = MODE_10 | addr;
+ index_addr(denali, (uint32_t)cmd, MAIN_ACCESS);
+
+#if DEBUG_DENALI
+ spin_lock_irq(&denali->irq_lock);
+ denali->irq_debug_array[denali->idx++] = 0x60000000 | mtd->oobsize;
+ denali->idx %= 32;
+ spin_unlock_irq(&denali->irq_lock);
+#endif
+ }
+}
+
+/* this function examines buffers to see if they contain data that
+ * indicate that the buffer is part of an erased region of flash.
+ */
+bool is_erased(uint8_t *buf, int len)
+{
+ int i = 0;
+ for (i = 0; i < len; i++)
+ {
+ if (buf[i] != 0xFF)
+ {
+ return false;
+ }
+ }
+ return true;
+}
+#define ECC_SECTOR_SIZE 512
+
+#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
+#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
+#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
+#define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO))
+#define ECC_ERR_DEVICE(x) ((x) & ERR_CORRECTION_INFO__DEVICE_NR >> 8)
+#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
+
+static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
+ uint8_t *oobbuf, uint32_t irq_status)
+{
+ bool check_erased_page = false;
+
+ if (irq_status & INTR_STATUS0__ECC_ERR)
+ {
+ /* read the ECC errors. we'll ignore them for now */
+ uint32_t err_address = 0, err_correction_info = 0;
+ uint32_t err_byte = 0, err_sector = 0, err_device = 0;
+ uint32_t err_correction_value = 0;
+
+ do
+ {
+ err_address = ioread32(denali->flash_reg +
+ ECC_ERROR_ADDRESS);
+ err_sector = ECC_SECTOR(err_address);
+ err_byte = ECC_BYTE(err_address);
+
+
+ err_correction_info = ioread32(denali->flash_reg +
+ ERR_CORRECTION_INFO);
+ err_correction_value =
+ ECC_CORRECTION_VALUE(err_correction_info);
+ err_device = ECC_ERR_DEVICE(err_correction_info);
+
+ if (ECC_ERROR_CORRECTABLE(err_correction_info))
+ {
+ /* offset in our buffer is computed as:
+ sector number * sector size + offset in
+ sector
+ */
+ int offset = err_sector * ECC_SECTOR_SIZE +
+ err_byte;
+ if (offset < denali->mtd.writesize)
+ {
+ /* correct the ECC error */
+ buf[offset] ^= err_correction_value;
+ denali->mtd.ecc_stats.corrected++;
+ }
+ else
+ {
+ /* bummer, couldn't correct the error */
+ printk(KERN_ERR "ECC offset invalid\n");
+ denali->mtd.ecc_stats.failed++;
+ }
+ }
+ else
+ {
+ /* if the error is not correctable, need to
+ * look at the page to see if it is an erased page.
+ * if so, then it's not a real ECC error */
+ check_erased_page = true;
+ }
+
+#if DEBUG_DENALI
+ printk("Detected ECC error in page %d: err_addr = 0x%08x,"
+ " info to fix is 0x%08x\n", denali->page, err_address,
+ err_correction_info);
+#endif
+ } while (!ECC_LAST_ERR(err_correction_info));
+ }
+ return check_erased_page;
+}
+
+/* programs the controller to either enable/disable DMA transfers */
+static void denali_enable_dma(struct denali_nand_info *denali, bool en)
+{
+ uint32_t reg_val = 0x0;
+
+ if (en) reg_val = DMA_ENABLE__FLAG;
+
+ denali_write32(reg_val, denali->flash_reg + DMA_ENABLE);
+ ioread32(denali->flash_reg + DMA_ENABLE);
+}
+
+/* setups the HW to perform the data DMA */
+static void denali_setup_dma(struct denali_nand_info *denali, int op)
+{
+ uint32_t mode = 0x0;
+ const int page_count = 1;
+ dma_addr_t addr = denali->buf.dma_buf;
+
+ mode = MODE_10 | BANK(denali->flash_bank);
+
+ /* DMA is a four step process */
+
+ /* 1. setup transfer type and # of pages */
+ index_addr(denali, mode | denali->page, 0x2000 | op | page_count);
+
+ /* 2. set memory high address bits 23:8 */
+ index_addr(denali, mode | ((uint16_t)(addr >> 16) << 8), 0x2200);
+
+ /* 3. set memory low address bits 23:8 */
+ index_addr(denali, mode | ((uint16_t)addr << 8), 0x2300);
+
+ /* 4. interrupt when complete, burst len = 64 bytes*/
+ index_addr(denali, mode | 0x14000, 0x2400);
+}
+
+/* writes a page. user specifies type, and this function handles the
+ configuration details. */
+static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, bool raw_xfer)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ struct pci_dev *pci_dev = denali->dev;
+
+ dma_addr_t addr = denali->buf.dma_buf;
+ size_t size = denali->mtd.writesize + denali->mtd.oobsize;
+
+ uint32_t irq_status = 0;
+ uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP |
+ INTR_STATUS0__PROGRAM_FAIL;
+
+ /* if it is a raw xfer, we want to disable ecc, and send
+ * the spare area.
+ * !raw_xfer - enable ecc
+ * raw_xfer - transfer spare
+ */
+ setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer);
+
+ /* copy buffer into DMA buffer */
+ memcpy(denali->buf.buf, buf, mtd->writesize);
+
+ if (raw_xfer)
+ {
+ /* transfer the data to the spare area */
+ memcpy(denali->buf.buf + mtd->writesize,
+ chip->oob_poi,
+ mtd->oobsize);
+ }
+
+ pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_TODEVICE);
+
+ clear_interrupts(denali);
+ denali_enable_dma(denali, true);
+
+ denali_setup_dma(denali, DENALI_WRITE);
+
+ /* wait for operation to complete */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ if (irq_status == 0)
+ {
+ printk(KERN_ERR "timeout on write_page (type = %d)\n", raw_xfer);
+ denali->status =
+ (irq_status & INTR_STATUS0__PROGRAM_FAIL) ? NAND_STATUS_FAIL :
+ PASS;
+ }
+
+ denali_enable_dma(denali, false);
+ pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_TODEVICE);
+}
+
+/* NAND core entry points */
+
+/* this is the callback that the NAND core calls to write a page. Since
+ writing a page with ECC or without is similar, all the work is done
+ by write_page above. */
+static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf)
+{
+ /* for regular page writes, we let HW handle all the ECC
+ * data written to the device. */
+ write_page(mtd, chip, buf, false);
+}
+
+/* This is the callback that the NAND core calls to write a page without ECC.
+ raw access is similiar to ECC page writes, so all the work is done in the
+ write_page() function above.
+ */
+static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf)
+{
+ /* for raw page writes, we want to disable ECC and simply write
+ whatever data is in the buffer. */
+ write_page(mtd, chip, buf, true);
+}
+
+static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ return write_oob_data(mtd, chip->oob_poi, page);
+}
+
+static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page, int sndcmd)
+{
+ read_oob_data(mtd, chip->oob_poi, page);
+
+ return 0; /* notify NAND core to send command to
+ * NAND device. */
+}
+
+static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ struct pci_dev *pci_dev = denali->dev;
+
+ dma_addr_t addr = denali->buf.dma_buf;
+ size_t size = denali->mtd.writesize + denali->mtd.oobsize;
+
+ uint32_t irq_status = 0;
+ uint32_t irq_mask = INTR_STATUS0__ECC_TRANSACTION_DONE |
+ INTR_STATUS0__ECC_ERR;
+ bool check_erased_page = false;
+
+ setup_ecc_for_xfer(denali, true, false);
+
+ denali_enable_dma(denali, true);
+ pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
+
+ clear_interrupts(denali);
+ denali_setup_dma(denali, DENALI_READ);
+
+ /* wait for operation to complete */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
+
+ memcpy(buf, denali->buf.buf, mtd->writesize);
+
+ check_erased_page = handle_ecc(denali, buf, chip->oob_poi, irq_status);
+ denali_enable_dma(denali, false);
+
+ if (check_erased_page)
+ {
+ read_oob_data(&denali->mtd, chip->oob_poi, denali->page);
+
+ /* check ECC failures that may have occurred on erased pages */
+ if (check_erased_page)
+ {
+ if (!is_erased(buf, denali->mtd.writesize))
+ {
+ denali->mtd.ecc_stats.failed++;
+ }
+ if (!is_erased(buf, denali->mtd.oobsize))
+ {
+ denali->mtd.ecc_stats.failed++;
+ }
+ }
+ }
+ return 0;
+}
+
+static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ struct pci_dev *pci_dev = denali->dev;
+
+ dma_addr_t addr = denali->buf.dma_buf;
+ size_t size = denali->mtd.writesize + denali->mtd.oobsize;
+
+ uint32_t irq_status = 0;
+ uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP;
+
+ setup_ecc_for_xfer(denali, false, true);
+ denali_enable_dma(denali, true);
+
+ pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
+
+ clear_interrupts(denali);
+ denali_setup_dma(denali, DENALI_READ);
+
+ /* wait for operation to complete */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
+
+ denali_enable_dma(denali, false);
+
+ memcpy(buf, denali->buf.buf, mtd->writesize);
+ memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize);
+
+ return 0;
+}
+
+static uint8_t denali_read_byte(struct mtd_info *mtd)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ uint8_t result = 0xff;
+
+ if (denali->buf.head < denali->buf.tail)
+ {
+ result = denali->buf.buf[denali->buf.head++];
+ }
+
+#if DEBUG_DENALI
+ printk("read byte -> 0x%02x\n", result);
+#endif
+ return result;
+}
+
+static void denali_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+#if DEBUG_DENALI
+ printk("denali select chip %d\n", chip);
+#endif
+ spin_lock_irq(&denali->irq_lock);
+ denali->flash_bank = chip;
+ spin_unlock_irq(&denali->irq_lock);
+}
+
+static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ int status = denali->status;
+ denali->status = 0;
+
+#if DEBUG_DENALI
+ printk("waitfunc %d\n", status);
+#endif
+ return status;
+}
+
+static void denali_erase(struct mtd_info *mtd, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+
+ uint32_t cmd = 0x0, irq_status = 0;
+
+#if DEBUG_DENALI
+ printk("erase page: %d\n", page);
+#endif
+ /* clear interrupts */
+ clear_interrupts(denali);
+
+ /* setup page read request for access type */
+ cmd = MODE_10 | BANK(denali->flash_bank) | page;
+ index_addr(denali, (uint32_t)cmd, 0x1);
+
+ /* wait for erase to complete or failure to occur */
+ irq_status = wait_for_irq(denali, INTR_STATUS0__ERASE_COMP |
+ INTR_STATUS0__ERASE_FAIL);
+
+ denali->status = (irq_status & INTR_STATUS0__ERASE_FAIL) ? NAND_STATUS_FAIL :
+ PASS;
+}
+
+static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
+ int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+
+#if DEBUG_DENALI
+ printk("cmdfunc: 0x%x %d %d\n", cmd, col, page);
+#endif
+ switch (cmd)
+ {
+ case NAND_CMD_PAGEPROG:
+ break;
+ case NAND_CMD_STATUS:
+ read_status(denali);
+ break;
+ case NAND_CMD_READID:
+ reset_buf(denali);
+ if (denali->flash_bank < denali->total_used_banks)
+ {
+ /* write manufacturer information into nand
+ buffer for NAND subsystem to fetch.
+ */
+ write_byte_to_buf(denali, denali->dev_info.wDeviceMaker);
+ write_byte_to_buf(denali, denali->dev_info.wDeviceID);
+ write_byte_to_buf(denali, denali->dev_info.bDeviceParam0);
+ write_byte_to_buf(denali, denali->dev_info.bDeviceParam1);
+ write_byte_to_buf(denali, denali->dev_info.bDeviceParam2);
+ }
+ else
+ {
+ int i;
+ for (i = 0; i < 5; i++)
+ write_byte_to_buf(denali, 0xff);
+ }
+ break;
+ case NAND_CMD_READ0:
+ case NAND_CMD_SEQIN:
+ denali->page = page;
+ break;
+ case NAND_CMD_RESET:
+ reset_bank(denali);
+ break;
+ case NAND_CMD_READOOB:
+ /* TODO: Read OOB data */
+ break;
+ default:
+ printk(KERN_ERR ": unsupported command received 0x%x\n", cmd);
+ break;
+ }
+}
+
+/* stubs for ECC functions not used by the NAND core */
+static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data,
+ uint8_t *ecc_code)
+{
+ printk(KERN_ERR "denali_ecc_calculate called unexpectedly\n");
+ BUG();
+ return -EIO;
+}
+
+static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data,
+ uint8_t *read_ecc, uint8_t *calc_ecc)
+{
+ printk(KERN_ERR "denali_ecc_correct called unexpectedly\n");
+ BUG();
+ return -EIO;
+}
+
+static void denali_ecc_hwctl(struct mtd_info *mtd, int mode)
+{
+ printk(KERN_ERR "denali_ecc_hwctl called unexpectedly\n");
+ BUG();
+}
+/* end NAND core entry points */
+
+/* Initialization code to bring the device up to a known good state */
+static void denali_hw_init(struct denali_nand_info *denali)
+{
+ denali_irq_init(denali);
+ NAND_Flash_Reset(denali);
+ denali_write32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
+ denali_write32(CHIP_EN_DONT_CARE__FLAG, denali->flash_reg + CHIP_ENABLE_DONT_CARE);
+
+ denali_write32(0x0, denali->flash_reg + SPARE_AREA_SKIP_BYTES);
+ denali_write32(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
+
+ /* Should set value for these registers when init */
+ denali_write32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
+ denali_write32(1, denali->flash_reg + ECC_ENABLE);
+}
+
+/* ECC layout for SLC devices. Denali spec indicates SLC fixed at 4 bytes */
+#define ECC_BYTES_SLC 4 * (2048 / ECC_SECTOR_SIZE)
+static struct nand_ecclayout nand_oob_slc = {
+ .eccbytes = 4,
+ .eccpos = { 0, 1, 2, 3 }, /* not used */
+ .oobfree = {{
+ .offset = ECC_BYTES_SLC,
+ .length = 64 - ECC_BYTES_SLC
+ }}
+};
+
+#define ECC_BYTES_MLC 14 * (2048 / ECC_SECTOR_SIZE)
+static struct nand_ecclayout nand_oob_mlc_14bit = {
+ .eccbytes = 14,
+ .eccpos = { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13 }, /* not used */
+ .oobfree = {{
+ .offset = ECC_BYTES_MLC,
+ .length = 64 - ECC_BYTES_MLC
+ }}
+};
+
+static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
+static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 8,
+ .len = 4,
+ .veroffs = 12,
+ .maxblocks = 4,
+ .pattern = bbt_pattern,
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 8,
+ .len = 4,
+ .veroffs = 12,
+ .maxblocks = 4,
+ .pattern = mirror_pattern,
+};
+
+/* initalize driver data structures */
+void denali_drv_init(struct denali_nand_info *denali)
+{
+ denali->idx = 0;
+
+ /* setup interrupt handler */
+ /* the completion object will be used to notify
+ * the callee that the interrupt is done */
+ init_completion(&denali->complete);
+
+ /* the spinlock will be used to synchronize the ISR
+ * with any element that might be access shared
+ * data (interrupt status) */
+ spin_lock_init(&denali->irq_lock);
+
+ /* indicate that MTD has not selected a valid bank yet */
+ denali->flash_bank = CHIP_SELECT_INVALID;
+
+ /* initialize our irq_status variable to indicate no interrupts */
+ denali->irq_status = 0;
+}
+
+/* driver entry point */
+static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ int ret = -ENODEV;
+ resource_size_t csr_base, mem_base;
+ unsigned long csr_len, mem_len;
+ struct denali_nand_info *denali;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ denali = kzalloc(sizeof(*denali), GFP_KERNEL);
+ if (!denali)
+ return -ENOMEM;
+
+ ret = pci_enable_device(dev);
+ if (ret) {
+ printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
+ goto failed_enable;
+ }
+
+ if (id->driver_data == INTEL_CE4100) {
+ /* Due to a silicon limitation, we can only support
+ * ONFI timing mode 1 and below.
+ */
+ if (onfi_timing_mode < -1 || onfi_timing_mode > 1)
+ {
+ printk("Intel CE4100 only supports ONFI timing mode 1 "
+ "or below\n");
+ ret = -EINVAL;
+ goto failed_enable;
+ }
+ denali->platform = INTEL_CE4100;
+ mem_base = pci_resource_start(dev, 0);
+ mem_len = pci_resource_len(dev, 1);
+ csr_base = pci_resource_start(dev, 1);
+ csr_len = pci_resource_len(dev, 1);
+ } else {
+ denali->platform = INTEL_MRST;
+ csr_base = pci_resource_start(dev, 0);
+ csr_len = pci_resource_start(dev, 0);
+ mem_base = pci_resource_start(dev, 1);
+ mem_len = pci_resource_len(dev, 1);
+ if (!mem_len) {
+ mem_base = csr_base + csr_len;
+ mem_len = csr_len;
+ nand_dbg_print(NAND_DBG_WARN,
+ "Spectra: No second BAR for PCI device; assuming %08Lx\n",
+ (uint64_t)csr_base);
+ }
+ }
+
+ /* Is 32-bit DMA supported? */
+ ret = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
+
+ if (ret)
+ {
+ printk(KERN_ERR "Spectra: no usable DMA configuration\n");
+ goto failed_enable;
+ }
+ denali->buf.dma_buf = pci_map_single(dev, denali->buf.buf, DENALI_BUF_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+
+ if (pci_dma_mapping_error(dev, denali->buf.dma_buf))
+ {
+ printk(KERN_ERR "Spectra: failed to map DMA buffer\n");
+ goto failed_enable;
+ }
+
+ pci_set_master(dev);
+ denali->dev = dev;
+
+ ret = pci_request_regions(dev, DENALI_NAND_NAME);
+ if (ret) {
+ printk(KERN_ERR "Spectra: Unable to request memory regions\n");
+ goto failed_req_csr;
+ }
+
+ denali->flash_reg = ioremap_nocache(csr_base, csr_len);
+ if (!denali->flash_reg) {
+ printk(KERN_ERR "Spectra: Unable to remap memory region\n");
+ ret = -ENOMEM;
+ goto failed_remap_csr;
+ }
+ nand_dbg_print(NAND_DBG_DEBUG, "Spectra: CSR 0x%08Lx -> 0x%p (0x%lx)\n",
+ (uint64_t)csr_base, denali->flash_reg, csr_len);
+
+ denali->flash_mem = ioremap_nocache(mem_base, mem_len);
+ if (!denali->flash_mem) {
+ printk(KERN_ERR "Spectra: ioremap_nocache failed!");
+ iounmap(denali->flash_reg);
+ ret = -ENOMEM;
+ goto failed_remap_csr;
+ }
+
+ nand_dbg_print(NAND_DBG_WARN,
+ "Spectra: Remapped flash base address: "
+ "0x%p, len: %ld\n",
+ denali->flash_mem, csr_len);
+
+ denali_hw_init(denali);
+ denali_drv_init(denali);
+
+ nand_dbg_print(NAND_DBG_DEBUG, "Spectra: IRQ %d\n", dev->irq);
+ if (request_irq(dev->irq, denali_isr, IRQF_SHARED,
+ DENALI_NAND_NAME, denali)) {
+ printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
+ ret = -ENODEV;
+ goto failed_request_irq;
+ }
+
+ /* now that our ISR is registered, we can enable interrupts */
+ NAND_LLD_Enable_Disable_Interrupts(denali, true);
+
+ pci_set_drvdata(dev, denali);
+
+ NAND_Read_Device_ID(denali);
+
+ /* MTD supported page sizes vary by kernel. We validate our
+ kernel supports the device here.
+ */
+ if (denali->dev_info.wPageSize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE)
+ {
+ ret = -ENODEV;
+ printk(KERN_ERR "Spectra: device size not supported by this "
+ "version of MTD.");
+ goto failed_nand;
+ }
+
+ nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
+ "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
+ "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
+ "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
+ ioread32(denali->flash_reg + ACC_CLKS),
+ ioread32(denali->flash_reg + RE_2_WE),
+ ioread32(denali->flash_reg + WE_2_RE),
+ ioread32(denali->flash_reg + ADDR_2_DATA),
+ ioread32(denali->flash_reg + RDWR_EN_LO_CNT),
+ ioread32(denali->flash_reg + RDWR_EN_HI_CNT),
+ ioread32(denali->flash_reg + CS_SETUP_CNT));
+
+ denali->mtd.name = "Denali NAND";
+ denali->mtd.owner = THIS_MODULE;
+ denali->mtd.priv = &denali->nand;
+
+ /* register the driver with the NAND core subsystem */
+ denali->nand.select_chip = denali_select_chip;
+ denali->nand.cmdfunc = denali_cmdfunc;
+ denali->nand.read_byte = denali_read_byte;
+ denali->nand.waitfunc = denali_waitfunc;
+
+ /* scan for NAND devices attached to the controller
+ * this is the first stage in a two step process to register
+ * with the nand subsystem */
+ if (nand_scan_ident(&denali->mtd, LLD_MAX_FLASH_BANKS, NULL))
+ {
+ ret = -ENXIO;
+ goto failed_nand;
+ }
+
+ /* second stage of the NAND scan
+ * this stage requires information regarding ECC and
+ * bad block management. */
+
+ /* Bad block management */
+ denali->nand.bbt_td = &bbt_main_descr;
+ denali->nand.bbt_md = &bbt_mirror_descr;
+
+ /* skip the scan for now until we have OOB read and write support */
+ denali->nand.options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN;
+ denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
+
+ if (denali->dev_info.MLCDevice)
+ {
+ denali->nand.ecc.layout = &nand_oob_mlc_14bit;
+ denali->nand.ecc.bytes = ECC_BYTES_MLC;
+ }
+ else /* SLC */
+ {
+ denali->nand.ecc.layout = &nand_oob_slc;
+ denali->nand.ecc.bytes = ECC_BYTES_SLC;
+ }
+
+ /* These functions are required by the NAND core framework, otherwise,
+ the NAND core will assert. However, we don't need them, so we'll stub
+ them out. */
+ denali->nand.ecc.calculate = denali_ecc_calculate;
+ denali->nand.ecc.correct = denali_ecc_correct;
+ denali->nand.ecc.hwctl = denali_ecc_hwctl;
+
+ /* override the default read operations */
+ denali->nand.ecc.size = denali->mtd.writesize;
+ denali->nand.ecc.read_page = denali_read_page;
+ denali->nand.ecc.read_page_raw = denali_read_page_raw;
+ denali->nand.ecc.write_page = denali_write_page;
+ denali->nand.ecc.write_page_raw = denali_write_page_raw;
+ denali->nand.ecc.read_oob = denali_read_oob;
+ denali->nand.ecc.write_oob = denali_write_oob;
+ denali->nand.erase_cmd = denali_erase;
+
+ if (nand_scan_tail(&denali->mtd))
+ {
+ ret = -ENXIO;
+ goto failed_nand;
+ }
+
+ ret = add_mtd_device(&denali->mtd);
+ if (ret) {
+ printk(KERN_ERR "Spectra: Failed to register MTD device: %d\n", ret);
+ goto failed_nand;
+ }
+ return 0;
+
+ failed_nand:
+ denali_irq_cleanup(dev->irq, denali);
+ failed_request_irq:
+ iounmap(denali->flash_reg);
+ iounmap(denali->flash_mem);
+ failed_remap_csr:
+ pci_release_regions(dev);
+ failed_req_csr:
+ pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ failed_enable:
+ kfree(denali);
+ return ret;
+}
+
+/* driver exit point */
+static void denali_pci_remove(struct pci_dev *dev)
+{
+ struct denali_nand_info *denali = pci_get_drvdata(dev);
+
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ nand_release(&denali->mtd);
+ del_mtd_device(&denali->mtd);
+
+ denali_irq_cleanup(dev->irq, denali);
+
+ iounmap(denali->flash_reg);
+ iounmap(denali->flash_mem);
+ pci_release_regions(dev);
+ pci_disable_device(dev);
+ pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ pci_set_drvdata(dev, NULL);
+ kfree(denali);
+}
+
+MODULE_DEVICE_TABLE(pci, denali_pci_ids);
+
+static struct pci_driver denali_pci_driver = {
+ .name = DENALI_NAND_NAME,
+ .id_table = denali_pci_ids,
+ .probe = denali_pci_probe,
+ .remove = denali_pci_remove,
+};
+
+static int __devinit denali_init(void)
+{
+ printk(KERN_INFO "Spectra MTD driver built on %s @ %s\n", __DATE__, __TIME__);
+ return pci_register_driver(&denali_pci_driver);
+}
+
+/* Free memory */
+static void __devexit denali_exit(void)
+{
+ pci_unregister_driver(&denali_pci_driver);
+}
+
+module_init(denali_init);
+module_exit(denali_exit);
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
new file mode 100644
index 00000000000..422a29ab2f6
--- /dev/null
+++ b/drivers/mtd/nand/denali.h
@@ -0,0 +1,816 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009 - 2010, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/mtd/nand.h>
+
+#define DEVICE_RESET 0x0
+#define DEVICE_RESET__BANK0 0x0001
+#define DEVICE_RESET__BANK1 0x0002
+#define DEVICE_RESET__BANK2 0x0004
+#define DEVICE_RESET__BANK3 0x0008
+
+#define TRANSFER_SPARE_REG 0x10
+#define TRANSFER_SPARE_REG__FLAG 0x0001
+
+#define LOAD_WAIT_CNT 0x20
+#define LOAD_WAIT_CNT__VALUE 0xffff
+
+#define PROGRAM_WAIT_CNT 0x30
+#define PROGRAM_WAIT_CNT__VALUE 0xffff
+
+#define ERASE_WAIT_CNT 0x40
+#define ERASE_WAIT_CNT__VALUE 0xffff
+
+#define INT_MON_CYCCNT 0x50
+#define INT_MON_CYCCNT__VALUE 0xffff
+
+#define RB_PIN_ENABLED 0x60
+#define RB_PIN_ENABLED__BANK0 0x0001
+#define RB_PIN_ENABLED__BANK1 0x0002
+#define RB_PIN_ENABLED__BANK2 0x0004
+#define RB_PIN_ENABLED__BANK3 0x0008
+
+#define MULTIPLANE_OPERATION 0x70
+#define MULTIPLANE_OPERATION__FLAG 0x0001
+
+#define MULTIPLANE_READ_ENABLE 0x80
+#define MULTIPLANE_READ_ENABLE__FLAG 0x0001
+
+#define COPYBACK_DISABLE 0x90
+#define COPYBACK_DISABLE__FLAG 0x0001
+
+#define CACHE_WRITE_ENABLE 0xa0
+#define CACHE_WRITE_ENABLE__FLAG 0x0001
+
+#define CACHE_READ_ENABLE 0xb0
+#define CACHE_READ_ENABLE__FLAG 0x0001
+
+#define PREFETCH_MODE 0xc0
+#define PREFETCH_MODE__PREFETCH_EN 0x0001
+#define PREFETCH_MODE__PREFETCH_BURST_LENGTH 0xfff0
+
+#define CHIP_ENABLE_DONT_CARE 0xd0
+#define CHIP_EN_DONT_CARE__FLAG 0x01
+
+#define ECC_ENABLE 0xe0
+#define ECC_ENABLE__FLAG 0x0001
+
+#define GLOBAL_INT_ENABLE 0xf0
+#define GLOBAL_INT_EN_FLAG 0x01
+
+#define WE_2_RE 0x100
+#define WE_2_RE__VALUE 0x003f
+
+#define ADDR_2_DATA 0x110
+#define ADDR_2_DATA__VALUE 0x003f
+
+#define RE_2_WE 0x120
+#define RE_2_WE__VALUE 0x003f
+
+#define ACC_CLKS 0x130
+#define ACC_CLKS__VALUE 0x000f
+
+#define NUMBER_OF_PLANES 0x140
+#define NUMBER_OF_PLANES__VALUE 0x0007
+
+#define PAGES_PER_BLOCK 0x150
+#define PAGES_PER_BLOCK__VALUE 0xffff
+
+#define DEVICE_WIDTH 0x160
+#define DEVICE_WIDTH__VALUE 0x0003
+
+#define DEVICE_MAIN_AREA_SIZE 0x170
+#define DEVICE_MAIN_AREA_SIZE__VALUE 0xffff
+
+#define DEVICE_SPARE_AREA_SIZE 0x180
+#define DEVICE_SPARE_AREA_SIZE__VALUE 0xffff
+
+#define TWO_ROW_ADDR_CYCLES 0x190
+#define TWO_ROW_ADDR_CYCLES__FLAG 0x0001
+
+#define MULTIPLANE_ADDR_RESTRICT 0x1a0
+#define MULTIPLANE_ADDR_RESTRICT__FLAG 0x0001
+
+#define ECC_CORRECTION 0x1b0
+#define ECC_CORRECTION__VALUE 0x001f
+
+#define READ_MODE 0x1c0
+#define READ_MODE__VALUE 0x000f
+
+#define WRITE_MODE 0x1d0
+#define WRITE_MODE__VALUE 0x000f
+
+#define COPYBACK_MODE 0x1e0
+#define COPYBACK_MODE__VALUE 0x000f
+
+#define RDWR_EN_LO_CNT 0x1f0
+#define RDWR_EN_LO_CNT__VALUE 0x001f
+
+#define RDWR_EN_HI_CNT 0x200
+#define RDWR_EN_HI_CNT__VALUE 0x001f
+
+#define MAX_RD_DELAY 0x210
+#define MAX_RD_DELAY__VALUE 0x000f
+
+#define CS_SETUP_CNT 0x220
+#define CS_SETUP_CNT__VALUE 0x001f
+
+#define SPARE_AREA_SKIP_BYTES 0x230
+#define SPARE_AREA_SKIP_BYTES__VALUE 0x003f
+
+#define SPARE_AREA_MARKER 0x240
+#define SPARE_AREA_MARKER__VALUE 0xffff
+
+#define DEVICES_CONNECTED 0x250
+#define DEVICES_CONNECTED__VALUE 0x0007
+
+#define DIE_MASK 0x260
+#define DIE_MASK__VALUE 0x00ff
+
+#define FIRST_BLOCK_OF_NEXT_PLANE 0x270
+#define FIRST_BLOCK_OF_NEXT_PLANE__VALUE 0xffff
+
+#define WRITE_PROTECT 0x280
+#define WRITE_PROTECT__FLAG 0x0001
+
+#define RE_2_RE 0x290
+#define RE_2_RE__VALUE 0x003f
+
+#define MANUFACTURER_ID 0x300
+#define MANUFACTURER_ID__VALUE 0x00ff
+
+#define DEVICE_ID 0x310
+#define DEVICE_ID__VALUE 0x00ff
+
+#define DEVICE_PARAM_0 0x320
+#define DEVICE_PARAM_0__VALUE 0x00ff
+
+#define DEVICE_PARAM_1 0x330
+#define DEVICE_PARAM_1__VALUE 0x00ff
+
+#define DEVICE_PARAM_2 0x340
+#define DEVICE_PARAM_2__VALUE 0x00ff
+
+#define LOGICAL_PAGE_DATA_SIZE 0x350
+#define LOGICAL_PAGE_DATA_SIZE__VALUE 0xffff
+
+#define LOGICAL_PAGE_SPARE_SIZE 0x360
+#define LOGICAL_PAGE_SPARE_SIZE__VALUE 0xffff
+
+#define REVISION 0x370
+#define REVISION__VALUE 0xffff
+
+#define ONFI_DEVICE_FEATURES 0x380
+#define ONFI_DEVICE_FEATURES__VALUE 0x003f
+
+#define ONFI_OPTIONAL_COMMANDS 0x390
+#define ONFI_OPTIONAL_COMMANDS__VALUE 0x003f
+
+#define ONFI_TIMING_MODE 0x3a0
+#define ONFI_TIMING_MODE__VALUE 0x003f
+
+#define ONFI_PGM_CACHE_TIMING_MODE 0x3b0
+#define ONFI_PGM_CACHE_TIMING_MODE__VALUE 0x003f
+
+#define ONFI_DEVICE_NO_OF_LUNS 0x3c0
+#define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS 0x00ff
+#define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE 0x0100
+
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L 0x3d0
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE 0xffff
+
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U 0x3e0
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE 0xffff
+
+#define FEATURES 0x3f0
+#define FEATURES__N_BANKS 0x0003
+#define FEATURES__ECC_MAX_ERR 0x003c
+#define FEATURES__DMA 0x0040
+#define FEATURES__CMD_DMA 0x0080
+#define FEATURES__PARTITION 0x0100
+#define FEATURES__XDMA_SIDEBAND 0x0200
+#define FEATURES__GPREG 0x0400
+#define FEATURES__INDEX_ADDR 0x0800
+
+#define TRANSFER_MODE 0x400
+#define TRANSFER_MODE__VALUE 0x0003
+
+#define INTR_STATUS0 0x410
+#define INTR_STATUS0__ECC_TRANSACTION_DONE 0x0001
+#define INTR_STATUS0__ECC_ERR 0x0002
+#define INTR_STATUS0__DMA_CMD_COMP 0x0004
+#define INTR_STATUS0__TIME_OUT 0x0008
+#define INTR_STATUS0__PROGRAM_FAIL 0x0010
+#define INTR_STATUS0__ERASE_FAIL 0x0020
+#define INTR_STATUS0__LOAD_COMP 0x0040
+#define INTR_STATUS0__PROGRAM_COMP 0x0080
+#define INTR_STATUS0__ERASE_COMP 0x0100
+#define INTR_STATUS0__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_STATUS0__LOCKED_BLK 0x0400
+#define INTR_STATUS0__UNSUP_CMD 0x0800
+#define INTR_STATUS0__INT_ACT 0x1000
+#define INTR_STATUS0__RST_COMP 0x2000
+#define INTR_STATUS0__PIPE_CMD_ERR 0x4000
+#define INTR_STATUS0__PAGE_XFER_INC 0x8000
+
+#define INTR_EN0 0x420
+#define INTR_EN0__ECC_TRANSACTION_DONE 0x0001
+#define INTR_EN0__ECC_ERR 0x0002
+#define INTR_EN0__DMA_CMD_COMP 0x0004
+#define INTR_EN0__TIME_OUT 0x0008
+#define INTR_EN0__PROGRAM_FAIL 0x0010
+#define INTR_EN0__ERASE_FAIL 0x0020
+#define INTR_EN0__LOAD_COMP 0x0040
+#define INTR_EN0__PROGRAM_COMP 0x0080
+#define INTR_EN0__ERASE_COMP 0x0100
+#define INTR_EN0__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_EN0__LOCKED_BLK 0x0400
+#define INTR_EN0__UNSUP_CMD 0x0800
+#define INTR_EN0__INT_ACT 0x1000
+#define INTR_EN0__RST_COMP 0x2000
+#define INTR_EN0__PIPE_CMD_ERR 0x4000
+#define INTR_EN0__PAGE_XFER_INC 0x8000
+
+#define PAGE_CNT0 0x430
+#define PAGE_CNT0__VALUE 0x00ff
+
+#define ERR_PAGE_ADDR0 0x440
+#define ERR_PAGE_ADDR0__VALUE 0xffff
+
+#define ERR_BLOCK_ADDR0 0x450
+#define ERR_BLOCK_ADDR0__VALUE 0xffff
+
+#define INTR_STATUS1 0x460
+#define INTR_STATUS1__ECC_TRANSACTION_DONE 0x0001
+#define INTR_STATUS1__ECC_ERR 0x0002
+#define INTR_STATUS1__DMA_CMD_COMP 0x0004
+#define INTR_STATUS1__TIME_OUT 0x0008
+#define INTR_STATUS1__PROGRAM_FAIL 0x0010
+#define INTR_STATUS1__ERASE_FAIL 0x0020
+#define INTR_STATUS1__LOAD_COMP 0x0040
+#define INTR_STATUS1__PROGRAM_COMP 0x0080
+#define INTR_STATUS1__ERASE_COMP 0x0100
+#define INTR_STATUS1__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_STATUS1__LOCKED_BLK 0x0400
+#define INTR_STATUS1__UNSUP_CMD 0x0800
+#define INTR_STATUS1__INT_ACT 0x1000
+#define INTR_STATUS1__RST_COMP 0x2000
+#define INTR_STATUS1__PIPE_CMD_ERR 0x4000
+#define INTR_STATUS1__PAGE_XFER_INC 0x8000
+
+#define INTR_EN1 0x470
+#define INTR_EN1__ECC_TRANSACTION_DONE 0x0001
+#define INTR_EN1__ECC_ERR 0x0002
+#define INTR_EN1__DMA_CMD_COMP 0x0004
+#define INTR_EN1__TIME_OUT 0x0008
+#define INTR_EN1__PROGRAM_FAIL 0x0010
+#define INTR_EN1__ERASE_FAIL 0x0020
+#define INTR_EN1__LOAD_COMP 0x0040
+#define INTR_EN1__PROGRAM_COMP 0x0080
+#define INTR_EN1__ERASE_COMP 0x0100
+#define INTR_EN1__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_EN1__LOCKED_BLK 0x0400
+#define INTR_EN1__UNSUP_CMD 0x0800
+#define INTR_EN1__INT_ACT 0x1000
+#define INTR_EN1__RST_COMP 0x2000
+#define INTR_EN1__PIPE_CMD_ERR 0x4000
+#define INTR_EN1__PAGE_XFER_INC 0x8000
+
+#define PAGE_CNT1 0x480
+#define PAGE_CNT1__VALUE 0x00ff
+
+#define ERR_PAGE_ADDR1 0x490
+#define ERR_PAGE_ADDR1__VALUE 0xffff
+
+#define ERR_BLOCK_ADDR1 0x4a0
+#define ERR_BLOCK_ADDR1__VALUE 0xffff
+
+#define INTR_STATUS2 0x4b0
+#define INTR_STATUS2__ECC_TRANSACTION_DONE 0x0001
+#define INTR_STATUS2__ECC_ERR 0x0002
+#define INTR_STATUS2__DMA_CMD_COMP 0x0004
+#define INTR_STATUS2__TIME_OUT 0x0008
+#define INTR_STATUS2__PROGRAM_FAIL 0x0010
+#define INTR_STATUS2__ERASE_FAIL 0x0020
+#define INTR_STATUS2__LOAD_COMP 0x0040
+#define INTR_STATUS2__PROGRAM_COMP 0x0080
+#define INTR_STATUS2__ERASE_COMP 0x0100
+#define INTR_STATUS2__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_STATUS2__LOCKED_BLK 0x0400
+#define INTR_STATUS2__UNSUP_CMD 0x0800
+#define INTR_STATUS2__INT_ACT 0x1000
+#define INTR_STATUS2__RST_COMP 0x2000
+#define INTR_STATUS2__PIPE_CMD_ERR 0x4000
+#define INTR_STATUS2__PAGE_XFER_INC 0x8000
+
+#define INTR_EN2 0x4c0
+#define INTR_EN2__ECC_TRANSACTION_DONE 0x0001
+#define INTR_EN2__ECC_ERR 0x0002
+#define INTR_EN2__DMA_CMD_COMP 0x0004
+#define INTR_EN2__TIME_OUT 0x0008
+#define INTR_EN2__PROGRAM_FAIL 0x0010
+#define INTR_EN2__ERASE_FAIL 0x0020
+#define INTR_EN2__LOAD_COMP 0x0040
+#define INTR_EN2__PROGRAM_COMP 0x0080
+#define INTR_EN2__ERASE_COMP 0x0100
+#define INTR_EN2__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_EN2__LOCKED_BLK 0x0400
+#define INTR_EN2__UNSUP_CMD 0x0800
+#define INTR_EN2__INT_ACT 0x1000
+#define INTR_EN2__RST_COMP 0x2000
+#define INTR_EN2__PIPE_CMD_ERR 0x4000
+#define INTR_EN2__PAGE_XFER_INC 0x8000
+
+#define PAGE_CNT2 0x4d0
+#define PAGE_CNT2__VALUE 0x00ff
+
+#define ERR_PAGE_ADDR2 0x4e0
+#define ERR_PAGE_ADDR2__VALUE 0xffff
+
+#define ERR_BLOCK_ADDR2 0x4f0
+#define ERR_BLOCK_ADDR2__VALUE 0xffff
+
+#define INTR_STATUS3 0x500
+#define INTR_STATUS3__ECC_TRANSACTION_DONE 0x0001
+#define INTR_STATUS3__ECC_ERR 0x0002
+#define INTR_STATUS3__DMA_CMD_COMP 0x0004
+#define INTR_STATUS3__TIME_OUT 0x0008
+#define INTR_STATUS3__PROGRAM_FAIL 0x0010
+#define INTR_STATUS3__ERASE_FAIL 0x0020
+#define INTR_STATUS3__LOAD_COMP 0x0040
+#define INTR_STATUS3__PROGRAM_COMP 0x0080
+#define INTR_STATUS3__ERASE_COMP 0x0100
+#define INTR_STATUS3__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_STATUS3__LOCKED_BLK 0x0400
+#define INTR_STATUS3__UNSUP_CMD 0x0800
+#define INTR_STATUS3__INT_ACT 0x1000
+#define INTR_STATUS3__RST_COMP 0x2000
+#define INTR_STATUS3__PIPE_CMD_ERR 0x4000
+#define INTR_STATUS3__PAGE_XFER_INC 0x8000
+
+#define INTR_EN3 0x510
+#define INTR_EN3__ECC_TRANSACTION_DONE 0x0001
+#define INTR_EN3__ECC_ERR 0x0002
+#define INTR_EN3__DMA_CMD_COMP 0x0004
+#define INTR_EN3__TIME_OUT 0x0008
+#define INTR_EN3__PROGRAM_FAIL 0x0010
+#define INTR_EN3__ERASE_FAIL 0x0020
+#define INTR_EN3__LOAD_COMP 0x0040
+#define INTR_EN3__PROGRAM_COMP 0x0080
+#define INTR_EN3__ERASE_COMP 0x0100
+#define INTR_EN3__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_EN3__LOCKED_BLK 0x0400
+#define INTR_EN3__UNSUP_CMD 0x0800
+#define INTR_EN3__INT_ACT 0x1000
+#define INTR_EN3__RST_COMP 0x2000
+#define INTR_EN3__PIPE_CMD_ERR 0x4000
+#define INTR_EN3__PAGE_XFER_INC 0x8000
+
+#define PAGE_CNT3 0x520
+#define PAGE_CNT3__VALUE 0x00ff
+
+#define ERR_PAGE_ADDR3 0x530
+#define ERR_PAGE_ADDR3__VALUE 0xffff
+
+#define ERR_BLOCK_ADDR3 0x540
+#define ERR_BLOCK_ADDR3__VALUE 0xffff
+
+#define DATA_INTR 0x550
+#define DATA_INTR__WRITE_SPACE_AV 0x0001
+#define DATA_INTR__READ_DATA_AV 0x0002
+
+#define DATA_INTR_EN 0x560
+#define DATA_INTR_EN__WRITE_SPACE_AV 0x0001
+#define DATA_INTR_EN__READ_DATA_AV 0x0002
+
+#define GPREG_0 0x570
+#define GPREG_0__VALUE 0xffff
+
+#define GPREG_1 0x580
+#define GPREG_1__VALUE 0xffff
+
+#define GPREG_2 0x590
+#define GPREG_2__VALUE 0xffff
+
+#define GPREG_3 0x5a0
+#define GPREG_3__VALUE 0xffff
+
+#define ECC_THRESHOLD 0x600
+#define ECC_THRESHOLD__VALUE 0x03ff
+
+#define ECC_ERROR_BLOCK_ADDRESS 0x610
+#define ECC_ERROR_BLOCK_ADDRESS__VALUE 0xffff
+
+#define ECC_ERROR_PAGE_ADDRESS 0x620
+#define ECC_ERROR_PAGE_ADDRESS__VALUE 0x0fff
+#define ECC_ERROR_PAGE_ADDRESS__BANK 0xf000
+
+#define ECC_ERROR_ADDRESS 0x630
+#define ECC_ERROR_ADDRESS__OFFSET 0x0fff
+#define ECC_ERROR_ADDRESS__SECTOR_NR 0xf000
+
+#define ERR_CORRECTION_INFO 0x640
+#define ERR_CORRECTION_INFO__BYTEMASK 0x00ff
+#define ERR_CORRECTION_INFO__DEVICE_NR 0x0f00
+#define ERR_CORRECTION_INFO__ERROR_TYPE 0x4000
+#define ERR_CORRECTION_INFO__LAST_ERR_INFO 0x8000
+
+#define DMA_ENABLE 0x700
+#define DMA_ENABLE__FLAG 0x0001
+
+#define IGNORE_ECC_DONE 0x710
+#define IGNORE_ECC_DONE__FLAG 0x0001
+
+#define DMA_INTR 0x720
+#define DMA_INTR__TARGET_ERROR 0x0001
+#define DMA_INTR__DESC_COMP_CHANNEL0 0x0002
+#define DMA_INTR__DESC_COMP_CHANNEL1 0x0004
+#define DMA_INTR__DESC_COMP_CHANNEL2 0x0008
+#define DMA_INTR__DESC_COMP_CHANNEL3 0x0010
+#define DMA_INTR__MEMCOPY_DESC_COMP 0x0020
+
+#define DMA_INTR_EN 0x730
+#define DMA_INTR_EN__TARGET_ERROR 0x0001
+#define DMA_INTR_EN__DESC_COMP_CHANNEL0 0x0002
+#define DMA_INTR_EN__DESC_COMP_CHANNEL1 0x0004
+#define DMA_INTR_EN__DESC_COMP_CHANNEL2 0x0008
+#define DMA_INTR_EN__DESC_COMP_CHANNEL3 0x0010
+#define DMA_INTR_EN__MEMCOPY_DESC_COMP 0x0020
+
+#define TARGET_ERR_ADDR_LO 0x740
+#define TARGET_ERR_ADDR_LO__VALUE 0xffff
+
+#define TARGET_ERR_ADDR_HI 0x750
+#define TARGET_ERR_ADDR_HI__VALUE 0xffff
+
+#define CHNL_ACTIVE 0x760
+#define CHNL_ACTIVE__CHANNEL0 0x0001
+#define CHNL_ACTIVE__CHANNEL1 0x0002
+#define CHNL_ACTIVE__CHANNEL2 0x0004
+#define CHNL_ACTIVE__CHANNEL3 0x0008
+
+#define ACTIVE_SRC_ID 0x800
+#define ACTIVE_SRC_ID__VALUE 0x00ff
+
+#define PTN_INTR 0x810
+#define PTN_INTR__CONFIG_ERROR 0x0001
+#define PTN_INTR__ACCESS_ERROR_BANK0 0x0002
+#define PTN_INTR__ACCESS_ERROR_BANK1 0x0004
+#define PTN_INTR__ACCESS_ERROR_BANK2 0x0008
+#define PTN_INTR__ACCESS_ERROR_BANK3 0x0010
+#define PTN_INTR__REG_ACCESS_ERROR 0x0020
+
+#define PTN_INTR_EN 0x820
+#define PTN_INTR_EN__CONFIG_ERROR 0x0001
+#define PTN_INTR_EN__ACCESS_ERROR_BANK0 0x0002
+#define PTN_INTR_EN__ACCESS_ERROR_BANK1 0x0004
+#define PTN_INTR_EN__ACCESS_ERROR_BANK2 0x0008
+#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010
+#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020
+
+#define PERM_SRC_ID_0 0x830
+#define PERM_SRC_ID_0__SRCID 0x00ff
+#define PERM_SRC_ID_0__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_0__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_0__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_0__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_0 0x840
+#define MIN_BLK_ADDR_0__VALUE 0xffff
+
+#define MAX_BLK_ADDR_0 0x850
+#define MAX_BLK_ADDR_0__VALUE 0xffff
+
+#define MIN_MAX_BANK_0 0x860
+#define MIN_MAX_BANK_0__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_0__MAX_VALUE 0x000c
+
+#define PERM_SRC_ID_1 0x870
+#define PERM_SRC_ID_1__SRCID 0x00ff
+#define PERM_SRC_ID_1__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_1__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_1__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_1__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_1 0x880
+#define MIN_BLK_ADDR_1__VALUE 0xffff
+
+#define MAX_BLK_ADDR_1 0x890
+#define MAX_BLK_ADDR_1__VALUE 0xffff
+
+#define MIN_MAX_BANK_1 0x8a0
+#define MIN_MAX_BANK_1__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_1__MAX_VALUE 0x000c
+
+#define PERM_SRC_ID_2 0x8b0
+#define PERM_SRC_ID_2__SRCID 0x00ff
+#define PERM_SRC_ID_2__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_2__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_2__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_2__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_2 0x8c0
+#define MIN_BLK_ADDR_2__VALUE 0xffff
+
+#define MAX_BLK_ADDR_2 0x8d0
+#define MAX_BLK_ADDR_2__VALUE 0xffff
+
+#define MIN_MAX_BANK_2 0x8e0
+#define MIN_MAX_BANK_2__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_2__MAX_VALUE 0x000c
+
+#define PERM_SRC_ID_3 0x8f0
+#define PERM_SRC_ID_3__SRCID 0x00ff
+#define PERM_SRC_ID_3__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_3__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_3__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_3__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_3 0x900
+#define MIN_BLK_ADDR_3__VALUE 0xffff
+
+#define MAX_BLK_ADDR_3 0x910
+#define MAX_BLK_ADDR_3__VALUE 0xffff
+
+#define MIN_MAX_BANK_3 0x920
+#define MIN_MAX_BANK_3__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_3__MAX_VALUE 0x000c
+
+#define PERM_SRC_ID_4 0x930
+#define PERM_SRC_ID_4__SRCID 0x00ff
+#define PERM_SRC_ID_4__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_4__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_4__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_4__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_4 0x940
+#define MIN_BLK_ADDR_4__VALUE 0xffff
+
+#define MAX_BLK_ADDR_4 0x950
+#define MAX_BLK_ADDR_4__VALUE 0xffff
+
+#define MIN_MAX_BANK_4 0x960
+#define MIN_MAX_BANK_4__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_4__MAX_VALUE 0x000c
+
+#define PERM_SRC_ID_5 0x970
+#define PERM_SRC_ID_5__SRCID 0x00ff
+#define PERM_SRC_ID_5__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_5__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_5__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_5__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_5 0x980
+#define MIN_BLK_ADDR_5__VALUE 0xffff
+
+#define MAX_BLK_ADDR_5 0x990
+#define MAX_BLK_ADDR_5__VALUE 0xffff
+
+#define MIN_MAX_BANK_5 0x9a0
+#define MIN_MAX_BANK_5__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_5__MAX_VALUE 0x000c
+
+#define PERM_SRC_ID_6 0x9b0
+#define PERM_SRC_ID_6__SRCID 0x00ff
+#define PERM_SRC_ID_6__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_6__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_6__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_6__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_6 0x9c0
+#define MIN_BLK_ADDR_6__VALUE 0xffff
+
+#define MAX_BLK_ADDR_6 0x9d0
+#define MAX_BLK_ADDR_6__VALUE 0xffff
+
+#define MIN_MAX_BANK_6 0x9e0
+#define MIN_MAX_BANK_6__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_6__MAX_VALUE 0x000c
+
+#define PERM_SRC_ID_7 0x9f0
+#define PERM_SRC_ID_7__SRCID 0x00ff
+#define PERM_SRC_ID_7__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_7__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_7__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_7__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_7 0xa00
+#define MIN_BLK_ADDR_7__VALUE 0xffff
+
+#define MAX_BLK_ADDR_7 0xa10
+#define MAX_BLK_ADDR_7__VALUE 0xffff
+
+#define MIN_MAX_BANK_7 0xa20
+#define MIN_MAX_BANK_7__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_7__MAX_VALUE 0x000c
+
+/* flash.h */
+struct device_info_tag {
+ uint16_t wDeviceMaker;
+ uint16_t wDeviceID;
+ uint8_t bDeviceParam0;
+ uint8_t bDeviceParam1;
+ uint8_t bDeviceParam2;
+ uint32_t wDeviceType;
+ uint32_t wSpectraStartBlock;
+ uint32_t wSpectraEndBlock;
+ uint32_t wTotalBlocks;
+ uint16_t wPagesPerBlock;
+ uint16_t wPageSize;
+ uint16_t wPageDataSize;
+ uint16_t wPageSpareSize;
+ uint16_t wNumPageSpareFlag;
+ uint16_t wECCBytesPerSector;
+ uint32_t wBlockSize;
+ uint32_t wBlockDataSize;
+ uint32_t wDataBlockNum;
+ uint8_t bPlaneNum;
+ uint16_t wDeviceMainAreaSize;
+ uint16_t wDeviceSpareAreaSize;
+ uint16_t wDevicesConnected;
+ uint16_t wDeviceWidth;
+ uint16_t wHWRevision;
+ uint16_t wHWFeatures;
+
+ uint16_t wONFIDevFeatures;
+ uint16_t wONFIOptCommands;
+ uint16_t wONFITimingMode;
+ uint16_t wONFIPgmCacheTimingMode;
+
+ uint16_t MLCDevice;
+ uint16_t wSpareSkipBytes;
+
+ uint8_t nBitsInPageNumber;
+ uint8_t nBitsInPageDataSize;
+ uint8_t nBitsInBlockDataSize;
+};
+
+/* ffsdefs.h */
+#define CLEAR 0 /*use this to clear a field instead of "fail"*/
+#define SET 1 /*use this to set a field instead of "pass"*/
+#define FAIL 1 /*failed flag*/
+#define PASS 0 /*success flag*/
+#define ERR -1 /*error flag*/
+
+/* lld.h */
+#define GOOD_BLOCK 0
+#define DEFECTIVE_BLOCK 1
+#define READ_ERROR 2
+
+#define CLK_X 5
+#define CLK_MULTI 4
+
+/* ffsport.h */
+#define VERBOSE 1
+
+#define NAND_DBG_WARN 1
+#define NAND_DBG_DEBUG 2
+#define NAND_DBG_TRACE 3
+
+#ifdef VERBOSE
+#define nand_dbg_print(level, args...) \
+ do { \
+ if (level <= nand_debug_level) \
+ printk(KERN_ALERT args); \
+ } while (0)
+#else
+#define nand_dbg_print(level, args...)
+#endif
+
+
+/* spectraswconfig.h */
+#define CMD_DMA 0
+
+#define SPECTRA_PARTITION_ID 0
+/**** Block Table and Reserved Block Parameters *****/
+#define SPECTRA_START_BLOCK 3
+#define NUM_FREE_BLOCKS_GATE 30
+
+/* KBV - Updated to LNW scratch register address */
+#define SCRATCH_REG_ADDR CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR
+#define SCRATCH_REG_SIZE 64
+
+#define GLOB_HWCTL_DEFAULT_BLKS 2048
+
+#define SUPPORT_15BITECC 1
+#define SUPPORT_8BITECC 1
+
+#define CUSTOM_CONF_PARAMS 0
+
+#define ONFI_BLOOM_TIME 1
+#define MODE5_WORKAROUND 0
+
+/* lld_nand.h */
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef _LLD_NAND_
+#define _LLD_NAND_
+
+#define MODE_00 0x00000000
+#define MODE_01 0x04000000
+#define MODE_10 0x08000000
+#define MODE_11 0x0C000000
+
+
+#define DATA_TRANSFER_MODE 0
+#define PROTECTION_PER_BLOCK 1
+#define LOAD_WAIT_COUNT 2
+#define PROGRAM_WAIT_COUNT 3
+#define ERASE_WAIT_COUNT 4
+#define INT_MONITOR_CYCLE_COUNT 5
+#define READ_BUSY_PIN_ENABLED 6
+#define MULTIPLANE_OPERATION_SUPPORT 7
+#define PRE_FETCH_MODE 8
+#define CE_DONT_CARE_SUPPORT 9
+#define COPYBACK_SUPPORT 10
+#define CACHE_WRITE_SUPPORT 11
+#define CACHE_READ_SUPPORT 12
+#define NUM_PAGES_IN_BLOCK 13
+#define ECC_ENABLE_SELECT 14
+#define WRITE_ENABLE_2_READ_ENABLE 15
+#define ADDRESS_2_DATA 16
+#define READ_ENABLE_2_WRITE_ENABLE 17
+#define TWO_ROW_ADDRESS_CYCLES 18
+#define MULTIPLANE_ADDRESS_RESTRICT 19
+#define ACC_CLOCKS 20
+#define READ_WRITE_ENABLE_LOW_COUNT 21
+#define READ_WRITE_ENABLE_HIGH_COUNT 22
+
+#define ECC_SECTOR_SIZE 512
+#define LLD_MAX_FLASH_BANKS 4
+
+#define DENALI_BUF_SIZE NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE
+
+struct nand_buf
+{
+ int head;
+ int tail;
+ uint8_t buf[DENALI_BUF_SIZE];
+ dma_addr_t dma_buf;
+};
+
+#define INTEL_CE4100 1
+#define INTEL_MRST 2
+
+struct denali_nand_info {
+ struct mtd_info mtd;
+ struct nand_chip nand;
+ struct device_info_tag dev_info;
+ int flash_bank; /* currently selected chip */
+ int status;
+ int platform;
+ struct nand_buf buf;
+ struct pci_dev *dev;
+ int total_used_banks;
+ uint32_t block; /* stored for future use */
+ uint16_t page;
+ void __iomem *flash_reg; /* Mapped io reg base address */
+ void __iomem *flash_mem; /* Mapped io reg base address */
+
+ /* elements used by ISR */
+ struct completion complete;
+ spinlock_t irq_lock;
+ uint32_t irq_status;
+ int irq_debug_array[32];
+ int idx;
+};
+
+static uint16_t NAND_Flash_Reset(struct denali_nand_info *denali);
+static uint16_t NAND_Read_Device_ID(struct denali_nand_info *denali);
+static void NAND_LLD_Enable_Disable_Interrupts(struct denali_nand_info *denali, uint16_t INT_ENABLE);
+
+#endif /*_LLD_NAND_*/
+
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index ae30fb6eed9..5084cc51794 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -874,7 +874,7 @@ static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
priv->ctrl = ctrl;
priv->dev = ctrl->dev;
- priv->vbase = ioremap(res.start, res.end - res.start + 1);
+ priv->vbase = ioremap(res.start, resource_size(&res));
if (!priv->vbase) {
dev_err(ctrl->dev, "failed to map chip region\n");
ret = -ENOMEM;
@@ -891,7 +891,7 @@ static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
if (ret)
goto err;
- ret = nand_scan_ident(&priv->mtd, 1);
+ ret = nand_scan_ident(&priv->mtd, 1, NULL);
if (ret)
goto err;
@@ -1030,14 +1030,14 @@ static int __devinit fsl_elbc_ctrl_probe(struct of_device *ofdev,
init_waitqueue_head(&ctrl->controller.wq);
init_waitqueue_head(&ctrl->irq_wait);
- ctrl->regs = of_iomap(ofdev->node, 0);
+ ctrl->regs = of_iomap(ofdev->dev.of_node, 0);
if (!ctrl->regs) {
dev_err(&ofdev->dev, "failed to get memory region\n");
ret = -ENODEV;
goto err;
}
- ctrl->irq = of_irq_to_resource(ofdev->node, 0, NULL);
+ ctrl->irq = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
if (ctrl->irq == NO_IRQ) {
dev_err(&ofdev->dev, "failed to get irq resource\n");
ret = -ENODEV;
@@ -1058,7 +1058,7 @@ static int __devinit fsl_elbc_ctrl_probe(struct of_device *ofdev,
goto err;
}
- for_each_child_of_node(ofdev->node, child)
+ for_each_child_of_node(ofdev->dev.of_node, child)
if (of_device_is_compatible(child, "fsl,elbc-fcm-nand"))
fsl_elbc_chip_probe(ctrl, child);
@@ -1078,9 +1078,10 @@ static const struct of_device_id fsl_elbc_match[] = {
static struct of_platform_driver fsl_elbc_ctrl_driver = {
.driver = {
- .name = "fsl-elbc",
+ .name = "fsl-elbc",
+ .owner = THIS_MODULE,
+ .of_match_table = fsl_elbc_match,
},
- .match_table = fsl_elbc_match,
.probe = fsl_elbc_ctrl_probe,
.remove = fsl_elbc_ctrl_remove,
};
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 4b96296af32..00aea6f7d1f 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -49,7 +49,10 @@ struct fsl_upm_nand {
uint32_t wait_flags;
};
-#define to_fsl_upm_nand(mtd) container_of(mtd, struct fsl_upm_nand, mtd)
+static inline struct fsl_upm_nand *to_fsl_upm_nand(struct mtd_info *mtdinfo)
+{
+ return container_of(mtdinfo, struct fsl_upm_nand, mtd);
+}
static int fun_chip_ready(struct mtd_info *mtd)
{
@@ -303,7 +306,7 @@ static int __devinit fun_probe(struct of_device *ofdev,
FSL_UPM_WAIT_WRITE_BYTE;
fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start,
- io_res.end - io_res.start + 1);
+ resource_size(&io_res));
if (!fun->io_base) {
ret = -ENOMEM;
goto err2;
@@ -350,15 +353,18 @@ static int __devexit fun_remove(struct of_device *ofdev)
return 0;
}
-static struct of_device_id of_fun_match[] = {
+static const struct of_device_id of_fun_match[] = {
{ .compatible = "fsl,upm-nand" },
{},
};
MODULE_DEVICE_TABLE(of, of_fun_match);
static struct of_platform_driver of_fun_driver = {
- .name = "fsl,upm-nand",
- .match_table = of_fun_match,
+ .driver = {
+ .name = "fsl,upm-nand",
+ .owner = THIS_MODULE,
+ .of_match_table = of_fun_match,
+ },
.probe = fun_probe,
.remove = __devexit_p(fun_remove),
};
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 8f902e75aa8..0cde618bcc1 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -181,11 +181,11 @@ static int __devexit gpio_nand_remove(struct platform_device *dev)
res = platform_get_resource(dev, IORESOURCE_MEM, 1);
iounmap(gpiomtd->io_sync);
if (res)
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
iounmap(gpiomtd->nand_chip.IO_ADDR_R);
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
@@ -208,14 +208,14 @@ static void __iomem *request_and_remap(struct resource *res, size_t size,
{
void __iomem *ptr;
- if (!request_mem_region(res->start, res->end - res->start + 1, name)) {
+ if (!request_mem_region(res->start, resource_size(res), name)) {
*err = -EBUSY;
return NULL;
}
ptr = ioremap(res->start, size);
if (!ptr) {
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
*err = -ENOMEM;
}
return ptr;
@@ -338,10 +338,10 @@ err_nwp:
err_nce:
iounmap(gpiomtd->io_sync);
if (res1)
- release_mem_region(res1->start, res1->end - res1->start + 1);
+ release_mem_region(res1->start, resource_size(res1));
err_sync:
iounmap(gpiomtd->nand_chip.IO_ADDR_R);
- release_mem_region(res0->start, res0->end - res0->start + 1);
+ release_mem_region(res0->start, resource_size(res0));
err_map:
kfree(gpiomtd);
return ret;
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
new file mode 100644
index 00000000000..3d0867d829c
--- /dev/null
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -0,0 +1,917 @@
+/*
+ * Copyright 2004-2008 Freescale Semiconductor, Inc.
+ * Copyright 2009 Semihalf.
+ *
+ * Approved as OSADL project by a majority of OSADL members and funded
+ * by OSADL membership fees in 2009; for details see www.osadl.org.
+ *
+ * Based on original driver from Freescale Semiconductor
+ * written by John Rigby <jrigby@freescale.com> on basis
+ * of drivers/mtd/nand/mxc_nand.c. Reworked and extended
+ * Piotr Ziecik <kosmo@semihalf.com>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/gfp.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+
+#include <asm/mpc5121.h>
+
+/* Addresses for NFC MAIN RAM BUFFER areas */
+#define NFC_MAIN_AREA(n) ((n) * 0x200)
+
+/* Addresses for NFC SPARE BUFFER areas */
+#define NFC_SPARE_BUFFERS 8
+#define NFC_SPARE_LEN 0x40
+#define NFC_SPARE_AREA(n) (0x1000 + ((n) * NFC_SPARE_LEN))
+
+/* MPC5121 NFC registers */
+#define NFC_BUF_ADDR 0x1E04
+#define NFC_FLASH_ADDR 0x1E06
+#define NFC_FLASH_CMD 0x1E08
+#define NFC_CONFIG 0x1E0A
+#define NFC_ECC_STATUS1 0x1E0C
+#define NFC_ECC_STATUS2 0x1E0E
+#define NFC_SPAS 0x1E10
+#define NFC_WRPROT 0x1E12
+#define NFC_NF_WRPRST 0x1E18
+#define NFC_CONFIG1 0x1E1A
+#define NFC_CONFIG2 0x1E1C
+#define NFC_UNLOCKSTART_BLK0 0x1E20
+#define NFC_UNLOCKEND_BLK0 0x1E22
+#define NFC_UNLOCKSTART_BLK1 0x1E24
+#define NFC_UNLOCKEND_BLK1 0x1E26
+#define NFC_UNLOCKSTART_BLK2 0x1E28
+#define NFC_UNLOCKEND_BLK2 0x1E2A
+#define NFC_UNLOCKSTART_BLK3 0x1E2C
+#define NFC_UNLOCKEND_BLK3 0x1E2E
+
+/* Bit Definitions: NFC_BUF_ADDR */
+#define NFC_RBA_MASK (7 << 0)
+#define NFC_ACTIVE_CS_SHIFT 5
+#define NFC_ACTIVE_CS_MASK (3 << NFC_ACTIVE_CS_SHIFT)
+
+/* Bit Definitions: NFC_CONFIG */
+#define NFC_BLS_UNLOCKED (1 << 1)
+
+/* Bit Definitions: NFC_CONFIG1 */
+#define NFC_ECC_4BIT (1 << 0)
+#define NFC_FULL_PAGE_DMA (1 << 1)
+#define NFC_SPARE_ONLY (1 << 2)
+#define NFC_ECC_ENABLE (1 << 3)
+#define NFC_INT_MASK (1 << 4)
+#define NFC_BIG_ENDIAN (1 << 5)
+#define NFC_RESET (1 << 6)
+#define NFC_CE (1 << 7)
+#define NFC_ONE_CYCLE (1 << 8)
+#define NFC_PPB_32 (0 << 9)
+#define NFC_PPB_64 (1 << 9)
+#define NFC_PPB_128 (2 << 9)
+#define NFC_PPB_256 (3 << 9)
+#define NFC_PPB_MASK (3 << 9)
+#define NFC_FULL_PAGE_INT (1 << 11)
+
+/* Bit Definitions: NFC_CONFIG2 */
+#define NFC_COMMAND (1 << 0)
+#define NFC_ADDRESS (1 << 1)
+#define NFC_INPUT (1 << 2)
+#define NFC_OUTPUT (1 << 3)
+#define NFC_ID (1 << 4)
+#define NFC_STATUS (1 << 5)
+#define NFC_CMD_FAIL (1 << 15)
+#define NFC_INT (1 << 15)
+
+/* Bit Definitions: NFC_WRPROT */
+#define NFC_WPC_LOCK_TIGHT (1 << 0)
+#define NFC_WPC_LOCK (1 << 1)
+#define NFC_WPC_UNLOCK (1 << 2)
+
+#define DRV_NAME "mpc5121_nfc"
+
+/* Timeouts */
+#define NFC_RESET_TIMEOUT 1000 /* 1 ms */
+#define NFC_TIMEOUT (HZ / 10) /* 1/10 s */
+
+struct mpc5121_nfc_prv {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ int irq;
+ void __iomem *regs;
+ struct clk *clk;
+ wait_queue_head_t irq_waitq;
+ uint column;
+ int spareonly;
+ void __iomem *csreg;
+ struct device *dev;
+};
+
+static void mpc5121_nfc_done(struct mtd_info *mtd);
+
+#ifdef CONFIG_MTD_PARTITIONS
+static const char *mpc5121_nfc_pprobes[] = { "cmdlinepart", NULL };
+#endif
+
+/* Read NFC register */
+static inline u16 nfc_read(struct mtd_info *mtd, uint reg)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ return in_be16(prv->regs + reg);
+}
+
+/* Write NFC register */
+static inline void nfc_write(struct mtd_info *mtd, uint reg, u16 val)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ out_be16(prv->regs + reg, val);
+}
+
+/* Set bits in NFC register */
+static inline void nfc_set(struct mtd_info *mtd, uint reg, u16 bits)
+{
+ nfc_write(mtd, reg, nfc_read(mtd, reg) | bits);
+}
+
+/* Clear bits in NFC register */
+static inline void nfc_clear(struct mtd_info *mtd, uint reg, u16 bits)
+{
+ nfc_write(mtd, reg, nfc_read(mtd, reg) & ~bits);
+}
+
+/* Invoke address cycle */
+static inline void mpc5121_nfc_send_addr(struct mtd_info *mtd, u16 addr)
+{
+ nfc_write(mtd, NFC_FLASH_ADDR, addr);
+ nfc_write(mtd, NFC_CONFIG2, NFC_ADDRESS);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Invoke command cycle */
+static inline void mpc5121_nfc_send_cmd(struct mtd_info *mtd, u16 cmd)
+{
+ nfc_write(mtd, NFC_FLASH_CMD, cmd);
+ nfc_write(mtd, NFC_CONFIG2, NFC_COMMAND);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Send data from NFC buffers to NAND flash */
+static inline void mpc5121_nfc_send_prog_page(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_INPUT);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive data from NAND flash */
+static inline void mpc5121_nfc_send_read_page(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_OUTPUT);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive ID from NAND flash */
+static inline void mpc5121_nfc_send_read_id(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_ID);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive status from NAND flash */
+static inline void mpc5121_nfc_send_read_status(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_STATUS);
+ mpc5121_nfc_done(mtd);
+}
+
+/* NFC interrupt handler */
+static irqreturn_t mpc5121_nfc_irq(int irq, void *data)
+{
+ struct mtd_info *mtd = data;
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ nfc_set(mtd, NFC_CONFIG1, NFC_INT_MASK);
+ wake_up(&prv->irq_waitq);
+
+ return IRQ_HANDLED;
+}
+
+/* Wait for operation complete */
+static void mpc5121_nfc_done(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+ int rv;
+
+ if ((nfc_read(mtd, NFC_CONFIG2) & NFC_INT) == 0) {
+ nfc_clear(mtd, NFC_CONFIG1, NFC_INT_MASK);
+ rv = wait_event_timeout(prv->irq_waitq,
+ (nfc_read(mtd, NFC_CONFIG2) & NFC_INT), NFC_TIMEOUT);
+
+ if (!rv)
+ dev_warn(prv->dev,
+ "Timeout while waiting for interrupt.\n");
+ }
+
+ nfc_clear(mtd, NFC_CONFIG2, NFC_INT);
+}
+
+/* Do address cycle(s) */
+static void mpc5121_nfc_addr_cycle(struct mtd_info *mtd, int column, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+ u32 pagemask = chip->pagemask;
+
+ if (column != -1) {
+ mpc5121_nfc_send_addr(mtd, column);
+ if (mtd->writesize > 512)
+ mpc5121_nfc_send_addr(mtd, column >> 8);
+ }
+
+ if (page != -1) {
+ do {
+ mpc5121_nfc_send_addr(mtd, page & 0xFF);
+ page >>= 8;
+ pagemask >>= 8;
+ } while (pagemask);
+ }
+}
+
+/* Control chip select signals */
+static void mpc5121_nfc_select_chip(struct mtd_info *mtd, int chip)
+{
+ if (chip < 0) {
+ nfc_clear(mtd, NFC_CONFIG1, NFC_CE);
+ return;
+ }
+
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_ACTIVE_CS_MASK);
+ nfc_set(mtd, NFC_BUF_ADDR, (chip << NFC_ACTIVE_CS_SHIFT) &
+ NFC_ACTIVE_CS_MASK);
+ nfc_set(mtd, NFC_CONFIG1, NFC_CE);
+}
+
+/* Init external chip select logic on ADS5121 board */
+static int ads5121_chipselect_init(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+ struct device_node *dn;
+
+ dn = of_find_compatible_node(NULL, NULL, "fsl,mpc5121ads-cpld");
+ if (dn) {
+ prv->csreg = of_iomap(dn, 0);
+ of_node_put(dn);
+ if (!prv->csreg)
+ return -ENOMEM;
+
+ /* CPLD Register 9 controls NAND /CE Lines */
+ prv->csreg += 9;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/* Control chips select signal on ADS5121 board */
+static void ads5121_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct mpc5121_nfc_prv *prv = nand->priv;
+ u8 v;
+
+ v = in_8(prv->csreg);
+ v |= 0x0F;
+
+ if (chip >= 0) {
+ mpc5121_nfc_select_chip(mtd, 0);
+ v &= ~(1 << chip);
+ } else
+ mpc5121_nfc_select_chip(mtd, -1);
+
+ out_8(prv->csreg, v);
+}
+
+/* Read NAND Ready/Busy signal */
+static int mpc5121_nfc_dev_ready(struct mtd_info *mtd)
+{
+ /*
+ * NFC handles ready/busy signal internally. Therefore, this function
+ * always returns status as ready.
+ */
+ return 1;
+}
+
+/* Write command to NAND flash */
+static void mpc5121_nfc_command(struct mtd_info *mtd, unsigned command,
+ int column, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ prv->column = (column >= 0) ? column : 0;
+ prv->spareonly = 0;
+
+ switch (command) {
+ case NAND_CMD_PAGEPROG:
+ mpc5121_nfc_send_prog_page(mtd);
+ break;
+ /*
+ * NFC does not support sub-page reads and writes,
+ * so emulate them using full page transfers.
+ */
+ case NAND_CMD_READ0:
+ column = 0;
+ break;
+
+ case NAND_CMD_READ1:
+ prv->column += 256;
+ command = NAND_CMD_READ0;
+ column = 0;
+ break;
+
+ case NAND_CMD_READOOB:
+ prv->spareonly = 1;
+ command = NAND_CMD_READ0;
+ column = 0;
+ break;
+
+ case NAND_CMD_SEQIN:
+ mpc5121_nfc_command(mtd, NAND_CMD_READ0, column, page);
+ column = 0;
+ break;
+
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_READID:
+ case NAND_CMD_STATUS:
+ break;
+
+ default:
+ return;
+ }
+
+ mpc5121_nfc_send_cmd(mtd, command);
+ mpc5121_nfc_addr_cycle(mtd, column, page);
+
+ switch (command) {
+ case NAND_CMD_READ0:
+ if (mtd->writesize > 512)
+ mpc5121_nfc_send_cmd(mtd, NAND_CMD_READSTART);
+ mpc5121_nfc_send_read_page(mtd);
+ break;
+
+ case NAND_CMD_READID:
+ mpc5121_nfc_send_read_id(mtd);
+ break;
+
+ case NAND_CMD_STATUS:
+ mpc5121_nfc_send_read_status(mtd);
+ if (chip->options & NAND_BUSWIDTH_16)
+ prv->column = 1;
+ else
+ prv->column = 0;
+ break;
+ }
+}
+
+/* Copy data from/to NFC spare buffers. */
+static void mpc5121_nfc_copy_spare(struct mtd_info *mtd, uint offset,
+ u8 *buffer, uint size, int wr)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct mpc5121_nfc_prv *prv = nand->priv;
+ uint o, s, sbsize, blksize;
+
+ /*
+ * NAND spare area is available through NFC spare buffers.
+ * The NFC divides spare area into (page_size / 512) chunks.
+ * Each chunk is placed into separate spare memory area, using
+ * first (spare_size / num_of_chunks) bytes of the buffer.
+ *
+ * For NAND device in which the spare area is not divided fully
+ * by the number of chunks, number of used bytes in each spare
+ * buffer is rounded down to the nearest even number of bytes,
+ * and all remaining bytes are added to the last used spare area.
+ *
+ * For more information read section 26.6.10 of MPC5121e
+ * Microcontroller Reference Manual, Rev. 3.
+ */
+
+ /* Calculate number of valid bytes in each spare buffer */
+ sbsize = (mtd->oobsize / (mtd->writesize / 512)) & ~1;
+
+ while (size) {
+ /* Calculate spare buffer number */
+ s = offset / sbsize;
+ if (s > NFC_SPARE_BUFFERS - 1)
+ s = NFC_SPARE_BUFFERS - 1;
+
+ /*
+ * Calculate offset to requested data block in selected spare
+ * buffer and its size.
+ */
+ o = offset - (s * sbsize);
+ blksize = min(sbsize - o, size);
+
+ if (wr)
+ memcpy_toio(prv->regs + NFC_SPARE_AREA(s) + o,
+ buffer, blksize);
+ else
+ memcpy_fromio(buffer,
+ prv->regs + NFC_SPARE_AREA(s) + o, blksize);
+
+ buffer += blksize;
+ offset += blksize;
+ size -= blksize;
+ };
+}
+
+/* Copy data from/to NFC main and spare buffers */
+static void mpc5121_nfc_buf_copy(struct mtd_info *mtd, u_char *buf, int len,
+ int wr)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+ uint c = prv->column;
+ uint l;
+
+ /* Handle spare area access */
+ if (prv->spareonly || c >= mtd->writesize) {
+ /* Calculate offset from beginning of spare area */
+ if (c >= mtd->writesize)
+ c -= mtd->writesize;
+
+ prv->column += len;
+ mpc5121_nfc_copy_spare(mtd, c, buf, len, wr);
+ return;
+ }
+
+ /*
+ * Handle main area access - limit copy length to prevent
+ * crossing main/spare boundary.
+ */
+ l = min((uint)len, mtd->writesize - c);
+ prv->column += l;
+
+ if (wr)
+ memcpy_toio(prv->regs + NFC_MAIN_AREA(0) + c, buf, l);
+ else
+ memcpy_fromio(buf, prv->regs + NFC_MAIN_AREA(0) + c, l);
+
+ /* Handle crossing main/spare boundary */
+ if (l != len) {
+ buf += l;
+ len -= l;
+ mpc5121_nfc_buf_copy(mtd, buf, len, wr);
+ }
+}
+
+/* Read data from NFC buffers */
+static void mpc5121_nfc_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ mpc5121_nfc_buf_copy(mtd, buf, len, 0);
+}
+
+/* Write data to NFC buffers */
+static void mpc5121_nfc_write_buf(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ mpc5121_nfc_buf_copy(mtd, (u_char *)buf, len, 1);
+}
+
+/* Compare buffer with NAND flash */
+static int mpc5121_nfc_verify_buf(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ u_char tmp[256];
+ uint bsize;
+
+ while (len) {
+ bsize = min(len, 256);
+ mpc5121_nfc_read_buf(mtd, tmp, bsize);
+
+ if (memcmp(buf, tmp, bsize))
+ return 1;
+
+ buf += bsize;
+ len -= bsize;
+ }
+
+ return 0;
+}
+
+/* Read byte from NFC buffers */
+static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd)
+{
+ u8 tmp;
+
+ mpc5121_nfc_read_buf(mtd, &tmp, sizeof(tmp));
+
+ return tmp;
+}
+
+/* Read word from NFC buffers */
+static u16 mpc5121_nfc_read_word(struct mtd_info *mtd)
+{
+ u16 tmp;
+
+ mpc5121_nfc_read_buf(mtd, (u_char *)&tmp, sizeof(tmp));
+
+ return tmp;
+}
+
+/*
+ * Read NFC configuration from Reset Config Word
+ *
+ * NFC is configured during reset in basis of information stored
+ * in Reset Config Word. There is no other way to set NAND block
+ * size, spare size and bus width.
+ */
+static int mpc5121_nfc_read_hw_config(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+ struct mpc512x_reset_module *rm;
+ struct device_node *rmnode;
+ uint rcw_pagesize = 0;
+ uint rcw_sparesize = 0;
+ uint rcw_width;
+ uint rcwh;
+ uint romloc, ps;
+
+ rmnode = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-reset");
+ if (!rmnode) {
+ dev_err(prv->dev, "Missing 'fsl,mpc5121-reset' "
+ "node in device tree!\n");
+ return -ENODEV;
+ }
+
+ rm = of_iomap(rmnode, 0);
+ if (!rm) {
+ dev_err(prv->dev, "Error mapping reset module node!\n");
+ return -EBUSY;
+ }
+
+ rcwh = in_be32(&rm->rcwhr);
+
+ /* Bit 6: NFC bus width */
+ rcw_width = ((rcwh >> 6) & 0x1) ? 2 : 1;
+
+ /* Bit 7: NFC Page/Spare size */
+ ps = (rcwh >> 7) & 0x1;
+
+ /* Bits [22:21]: ROM Location */
+ romloc = (rcwh >> 21) & 0x3;
+
+ /* Decode RCW bits */
+ switch ((ps << 2) | romloc) {
+ case 0x00:
+ case 0x01:
+ rcw_pagesize = 512;
+ rcw_sparesize = 16;
+ break;
+ case 0x02:
+ case 0x03:
+ rcw_pagesize = 4096;
+ rcw_sparesize = 128;
+ break;
+ case 0x04:
+ case 0x05:
+ rcw_pagesize = 2048;
+ rcw_sparesize = 64;
+ break;
+ case 0x06:
+ case 0x07:
+ rcw_pagesize = 4096;
+ rcw_sparesize = 218;
+ break;
+ }
+
+ mtd->writesize = rcw_pagesize;
+ mtd->oobsize = rcw_sparesize;
+ if (rcw_width == 2)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ dev_notice(prv->dev, "Configured for "
+ "%u-bit NAND, page size %u "
+ "with %u spare.\n",
+ rcw_width * 8, rcw_pagesize,
+ rcw_sparesize);
+ iounmap(rm);
+ of_node_put(rmnode);
+ return 0;
+}
+
+/* Free driver resources */
+static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ if (prv->clk) {
+ clk_disable(prv->clk);
+ clk_put(prv->clk);
+ }
+
+ if (prv->csreg)
+ iounmap(prv->csreg);
+}
+
+static int __devinit mpc5121_nfc_probe(struct of_device *op,
+ const struct of_device_id *match)
+{
+ struct device_node *rootnode, *dn = op->node;
+ struct device *dev = &op->dev;
+ struct mpc5121_nfc_prv *prv;
+ struct resource res;
+ struct mtd_info *mtd;
+#ifdef CONFIG_MTD_PARTITIONS
+ struct mtd_partition *parts;
+#endif
+ struct nand_chip *chip;
+ unsigned long regs_paddr, regs_size;
+ const uint *chips_no;
+ int resettime = 0;
+ int retval = 0;
+ int rev, len;
+
+ /*
+ * Check SoC revision. This driver supports only NFC
+ * in MPC5121 revision 2 and MPC5123 revision 3.
+ */
+ rev = (mfspr(SPRN_SVR) >> 4) & 0xF;
+ if ((rev != 2) && (rev != 3)) {
+ dev_err(dev, "SoC revision %u is not supported!\n", rev);
+ return -ENXIO;
+ }
+
+ prv = devm_kzalloc(dev, sizeof(*prv), GFP_KERNEL);
+ if (!prv) {
+ dev_err(dev, "Memory exhausted!\n");
+ return -ENOMEM;
+ }
+
+ mtd = &prv->mtd;
+ chip = &prv->chip;
+
+ mtd->priv = chip;
+ chip->priv = prv;
+ prv->dev = dev;
+
+ /* Read NFC configuration from Reset Config Word */
+ retval = mpc5121_nfc_read_hw_config(mtd);
+ if (retval) {
+ dev_err(dev, "Unable to read NFC config!\n");
+ return retval;
+ }
+
+ prv->irq = irq_of_parse_and_map(dn, 0);
+ if (prv->irq == NO_IRQ) {
+ dev_err(dev, "Error mapping IRQ!\n");
+ return -EINVAL;
+ }
+
+ retval = of_address_to_resource(dn, 0, &res);
+ if (retval) {
+ dev_err(dev, "Error parsing memory region!\n");
+ return retval;
+ }
+
+ chips_no = of_get_property(dn, "chips", &len);
+ if (!chips_no || len != sizeof(*chips_no)) {
+ dev_err(dev, "Invalid/missing 'chips' property!\n");
+ return -EINVAL;
+ }
+
+ regs_paddr = res.start;
+ regs_size = res.end - res.start + 1;
+
+ if (!devm_request_mem_region(dev, regs_paddr, regs_size, DRV_NAME)) {
+ dev_err(dev, "Error requesting memory region!\n");
+ return -EBUSY;
+ }
+
+ prv->regs = devm_ioremap(dev, regs_paddr, regs_size);
+ if (!prv->regs) {
+ dev_err(dev, "Error mapping memory region!\n");
+ return -ENOMEM;
+ }
+
+ mtd->name = "MPC5121 NAND";
+ chip->dev_ready = mpc5121_nfc_dev_ready;
+ chip->cmdfunc = mpc5121_nfc_command;
+ chip->read_byte = mpc5121_nfc_read_byte;
+ chip->read_word = mpc5121_nfc_read_word;
+ chip->read_buf = mpc5121_nfc_read_buf;
+ chip->write_buf = mpc5121_nfc_write_buf;
+ chip->verify_buf = mpc5121_nfc_verify_buf;
+ chip->select_chip = mpc5121_nfc_select_chip;
+ chip->options = NAND_NO_AUTOINCR | NAND_USE_FLASH_BBT;
+ chip->ecc.mode = NAND_ECC_SOFT;
+
+ /* Support external chip-select logic on ADS5121 board */
+ rootnode = of_find_node_by_path("/");
+ if (of_device_is_compatible(rootnode, "fsl,mpc5121ads")) {
+ retval = ads5121_chipselect_init(mtd);
+ if (retval) {
+ dev_err(dev, "Chipselect init error!\n");
+ of_node_put(rootnode);
+ return retval;
+ }
+
+ chip->select_chip = ads5121_select_chip;
+ }
+ of_node_put(rootnode);
+
+ /* Enable NFC clock */
+ prv->clk = clk_get(dev, "nfc_clk");
+ if (!prv->clk) {
+ dev_err(dev, "Unable to acquire NFC clock!\n");
+ retval = -ENODEV;
+ goto error;
+ }
+
+ clk_enable(prv->clk);
+
+ /* Reset NAND Flash controller */
+ nfc_set(mtd, NFC_CONFIG1, NFC_RESET);
+ while (nfc_read(mtd, NFC_CONFIG1) & NFC_RESET) {
+ if (resettime++ >= NFC_RESET_TIMEOUT) {
+ dev_err(dev, "Timeout while resetting NFC!\n");
+ retval = -EINVAL;
+ goto error;
+ }
+
+ udelay(1);
+ }
+
+ /* Enable write to NFC memory */
+ nfc_write(mtd, NFC_CONFIG, NFC_BLS_UNLOCKED);
+
+ /* Enable write to all NAND pages */
+ nfc_write(mtd, NFC_UNLOCKSTART_BLK0, 0x0000);
+ nfc_write(mtd, NFC_UNLOCKEND_BLK0, 0xFFFF);
+ nfc_write(mtd, NFC_WRPROT, NFC_WPC_UNLOCK);
+
+ /*
+ * Setup NFC:
+ * - Big Endian transfers,
+ * - Interrupt after full page read/write.
+ */
+ nfc_write(mtd, NFC_CONFIG1, NFC_BIG_ENDIAN | NFC_INT_MASK |
+ NFC_FULL_PAGE_INT);
+
+ /* Set spare area size */
+ nfc_write(mtd, NFC_SPAS, mtd->oobsize >> 1);
+
+ init_waitqueue_head(&prv->irq_waitq);
+ retval = devm_request_irq(dev, prv->irq, &mpc5121_nfc_irq, 0, DRV_NAME,
+ mtd);
+ if (retval) {
+ dev_err(dev, "Error requesting IRQ!\n");
+ goto error;
+ }
+
+ /* Detect NAND chips */
+ if (nand_scan(mtd, *chips_no)) {
+ dev_err(dev, "NAND Flash not found !\n");
+ devm_free_irq(dev, prv->irq, mtd);
+ retval = -ENXIO;
+ goto error;
+ }
+
+ /* Set erase block size */
+ switch (mtd->erasesize / mtd->writesize) {
+ case 32:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_32);
+ break;
+
+ case 64:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_64);
+ break;
+
+ case 128:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_128);
+ break;
+
+ case 256:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_256);
+ break;
+
+ default:
+ dev_err(dev, "Unsupported NAND flash!\n");
+ devm_free_irq(dev, prv->irq, mtd);
+ retval = -ENXIO;
+ goto error;
+ }
+
+ dev_set_drvdata(dev, mtd);
+
+ /* Register device in MTD */
+#ifdef CONFIG_MTD_PARTITIONS
+ retval = parse_mtd_partitions(mtd, mpc5121_nfc_pprobes, &parts, 0);
+#ifdef CONFIG_MTD_OF_PARTS
+ if (retval == 0)
+ retval = of_mtd_parse_partitions(dev, dn, &parts);
+#endif
+ if (retval < 0) {
+ dev_err(dev, "Error parsing MTD partitions!\n");
+ devm_free_irq(dev, prv->irq, mtd);
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if (retval > 0)
+ retval = add_mtd_partitions(mtd, parts, retval);
+ else
+#endif
+ retval = add_mtd_device(mtd);
+
+ if (retval) {
+ dev_err(dev, "Error adding MTD device!\n");
+ devm_free_irq(dev, prv->irq, mtd);
+ goto error;
+ }
+
+ return 0;
+error:
+ mpc5121_nfc_free(dev, mtd);
+ return retval;
+}
+
+static int __devexit mpc5121_nfc_remove(struct of_device *op)
+{
+ struct device *dev = &op->dev;
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ nand_release(mtd);
+ devm_free_irq(dev, prv->irq, mtd);
+ mpc5121_nfc_free(dev, mtd);
+
+ return 0;
+}
+
+static struct of_device_id mpc5121_nfc_match[] __devinitdata = {
+ { .compatible = "fsl,mpc5121-nfc", },
+ {},
+};
+
+static struct of_platform_driver mpc5121_nfc_driver = {
+ .match_table = mpc5121_nfc_match,
+ .probe = mpc5121_nfc_probe,
+ .remove = __devexit_p(mpc5121_nfc_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init mpc5121_nfc_init(void)
+{
+ return of_register_platform_driver(&mpc5121_nfc_driver);
+}
+
+module_init(mpc5121_nfc_init);
+
+static void __exit mpc5121_nfc_cleanup(void)
+{
+ of_unregister_platform_driver(&mpc5121_nfc_driver);
+}
+
+module_exit(mpc5121_nfc_cleanup);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("MPC5121 NAND MTD driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index b2900d8406d..82e94389824 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -38,7 +38,7 @@
#define DRIVER_NAME "mxc_nand"
#define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35())
-#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27())
+#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21())
/* Addresses for NFC registers */
#define NFC_BUF_SIZE 0xE00
@@ -168,11 +168,7 @@ static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
{
struct mxc_nand_host *host = dev_id;
- uint16_t tmp;
-
- tmp = readw(host->regs + NFC_CONFIG1);
- tmp |= NFC_INT_MSK; /* Disable interrupt */
- writew(tmp, host->regs + NFC_CONFIG1);
+ disable_irq_nosync(irq);
wake_up(&host->irq_waitq);
@@ -184,15 +180,13 @@ static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
*/
static void wait_op_done(struct mxc_nand_host *host, int useirq)
{
- uint32_t tmp;
- int max_retries = 2000;
+ uint16_t tmp;
+ int max_retries = 8000;
if (useirq) {
if ((readw(host->regs + NFC_CONFIG2) & NFC_INT) == 0) {
- tmp = readw(host->regs + NFC_CONFIG1);
- tmp &= ~NFC_INT_MSK; /* Enable interrupt */
- writew(tmp, host->regs + NFC_CONFIG1);
+ enable_irq(host->irq);
wait_event(host->irq_waitq,
readw(host->regs + NFC_CONFIG2) & NFC_INT);
@@ -226,8 +220,23 @@ static void send_cmd(struct mxc_nand_host *host, uint16_t cmd, int useirq)
writew(cmd, host->regs + NFC_FLASH_CMD);
writew(NFC_CMD, host->regs + NFC_CONFIG2);
- /* Wait for operation to complete */
- wait_op_done(host, useirq);
+ if (cpu_is_mx21() && (cmd == NAND_CMD_RESET)) {
+ int max_retries = 100;
+ /* Reset completion is indicated by NFC_CONFIG2 */
+ /* being set to 0 */
+ while (max_retries-- > 0) {
+ if (readw(host->regs + NFC_CONFIG2) == 0) {
+ break;
+ }
+ udelay(1);
+ }
+ if (max_retries < 0)
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: RESET failed\n",
+ __func__);
+ } else {
+ /* Wait for operation to complete */
+ wait_op_done(host, useirq);
+ }
}
/* This function sends an address (or partial address) to the
@@ -542,6 +551,41 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
}
}
+static void preset(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ uint16_t tmp;
+
+ /* enable interrupt, disable spare enable */
+ tmp = readw(host->regs + NFC_CONFIG1);
+ tmp &= ~NFC_INT_MSK;
+ tmp &= ~NFC_SP_EN;
+ if (nand_chip->ecc.mode == NAND_ECC_HW) {
+ tmp |= NFC_ECC_EN;
+ } else {
+ tmp &= ~NFC_ECC_EN;
+ }
+ writew(tmp, host->regs + NFC_CONFIG1);
+ /* preset operation */
+
+ /* Unlock the internal RAM Buffer */
+ writew(0x2, host->regs + NFC_CONFIG);
+
+ /* Blocks to be unlocked */
+ if (nfc_is_v21()) {
+ writew(0x0, host->regs + NFC_V21_UNLOCKSTART_BLKADDR);
+ writew(0xffff, host->regs + NFC_V21_UNLOCKEND_BLKADDR);
+ } else if (nfc_is_v1()) {
+ writew(0x0, host->regs + NFC_V1_UNLOCKSTART_BLKADDR);
+ writew(0x4000, host->regs + NFC_V1_UNLOCKEND_BLKADDR);
+ } else
+ BUG();
+
+ /* Unlock Block Command for given address range */
+ writew(0x4, host->regs + NFC_WRPROT);
+}
+
/* Used by the upper layer to write command to NAND Flash for
* different operations to be carried out on NAND Flash */
static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
@@ -559,6 +603,10 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
/* Command pre-processing step */
switch (command) {
+ case NAND_CMD_RESET:
+ send_cmd(host, command, false);
+ preset(mtd);
+ break;
case NAND_CMD_STATUS:
host->buf_start = 0;
@@ -679,7 +727,6 @@ static int __init mxcnd_probe(struct platform_device *pdev)
struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
struct mxc_nand_host *host;
struct resource *res;
- uint16_t tmp;
int err = 0, nr_parts = 0;
struct nand_ecclayout *oob_smallpage, *oob_largepage;
@@ -743,51 +790,17 @@ static int __init mxcnd_probe(struct platform_device *pdev)
host->spare_len = 64;
oob_smallpage = &nandv2_hw_eccoob_smallpage;
oob_largepage = &nandv2_hw_eccoob_largepage;
+ this->ecc.bytes = 9;
} else if (nfc_is_v1()) {
host->regs = host->base;
host->spare0 = host->base + 0x800;
host->spare_len = 16;
oob_smallpage = &nandv1_hw_eccoob_smallpage;
oob_largepage = &nandv1_hw_eccoob_largepage;
- } else
- BUG();
-
- /* disable interrupt and spare enable */
- tmp = readw(host->regs + NFC_CONFIG1);
- tmp |= NFC_INT_MSK;
- tmp &= ~NFC_SP_EN;
- writew(tmp, host->regs + NFC_CONFIG1);
-
- init_waitqueue_head(&host->irq_waitq);
-
- host->irq = platform_get_irq(pdev, 0);
-
- err = request_irq(host->irq, mxc_nfc_irq, 0, DRIVER_NAME, host);
- if (err)
- goto eirq;
-
- /* Reset NAND */
- this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
-
- /* preset operation */
- /* Unlock the internal RAM Buffer */
- writew(0x2, host->regs + NFC_CONFIG);
-
- /* Blocks to be unlocked */
- if (nfc_is_v21()) {
- writew(0x0, host->regs + NFC_V21_UNLOCKSTART_BLKADDR);
- writew(0xffff, host->regs + NFC_V21_UNLOCKEND_BLKADDR);
- this->ecc.bytes = 9;
- } else if (nfc_is_v1()) {
- writew(0x0, host->regs + NFC_V1_UNLOCKSTART_BLKADDR);
- writew(0x4000, host->regs + NFC_V1_UNLOCKEND_BLKADDR);
this->ecc.bytes = 3;
} else
BUG();
- /* Unlock Block Command for given address range */
- writew(0x4, host->regs + NFC_WRPROT);
-
this->ecc.size = 512;
this->ecc.layout = oob_smallpage;
@@ -796,14 +809,8 @@ static int __init mxcnd_probe(struct platform_device *pdev)
this->ecc.hwctl = mxc_nand_enable_hwecc;
this->ecc.correct = mxc_nand_correct_data;
this->ecc.mode = NAND_ECC_HW;
- tmp = readw(host->regs + NFC_CONFIG1);
- tmp |= NFC_ECC_EN;
- writew(tmp, host->regs + NFC_CONFIG1);
} else {
this->ecc.mode = NAND_ECC_SOFT;
- tmp = readw(host->regs + NFC_CONFIG1);
- tmp &= ~NFC_ECC_EN;
- writew(tmp, host->regs + NFC_CONFIG1);
}
/* NAND bus width determines access funtions used by upper layer */
@@ -817,8 +824,16 @@ static int __init mxcnd_probe(struct platform_device *pdev)
this->options |= NAND_USE_FLASH_BBT;
}
+ init_waitqueue_head(&host->irq_waitq);
+
+ host->irq = platform_get_irq(pdev, 0);
+
+ err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host);
+ if (err)
+ goto eirq;
+
/* first scan to find the device and get the page size */
- if (nand_scan_ident(mtd, 1)) {
+ if (nand_scan_ident(mtd, 1, NULL)) {
err = -ENXIO;
goto escan;
}
@@ -886,11 +901,14 @@ static int mxcnd_suspend(struct platform_device *pdev, pm_message_t state)
int ret = 0;
DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND suspend\n");
- if (mtd) {
- ret = mtd->suspend(mtd);
- /* Disable the NFC clock */
- clk_disable(host->clk);
- }
+
+ ret = mtd->suspend(mtd);
+
+ /*
+ * nand_suspend locks the device for exclusive access, so
+ * the clock must already be off.
+ */
+ BUG_ON(!ret && host->clk_act);
return ret;
}
@@ -904,11 +922,7 @@ static int mxcnd_resume(struct platform_device *pdev)
DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND resume\n");
- if (mtd) {
- /* Enable the NFC clock */
- clk_enable(host->clk);
- mtd->resume(mtd);
- }
+ mtd->resume(mtd);
return ret;
}
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 8f2958fe214..4a7b86423ee 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -108,6 +108,35 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
*/
DEFINE_LED_TRIGGER(nand_led_trigger);
+static int check_offs_len(struct mtd_info *mtd,
+ loff_t ofs, uint64_t len)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret = 0;
+
+ /* Start address must align on block boundary */
+ if (ofs & ((1 << chip->phys_erase_shift) - 1)) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__);
+ ret = -EINVAL;
+ }
+
+ /* Length must align on block boundary */
+ if (len & ((1 << chip->phys_erase_shift) - 1)) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n",
+ __func__);
+ ret = -EINVAL;
+ }
+
+ /* Do not allow past end of device */
+ if (ofs + len > mtd->size) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Past end of device\n",
+ __func__);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
/**
* nand_release_device - [GENERIC] release chip
* @mtd: MTD device structure
@@ -318,6 +347,9 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
struct nand_chip *chip = mtd->priv;
u16 bad;
+ if (chip->options & NAND_BB_LAST_PAGE)
+ ofs += mtd->erasesize - mtd->writesize;
+
page = (int)(ofs >> chip->page_shift) & chip->pagemask;
if (getchip) {
@@ -335,14 +367,18 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
bad = cpu_to_le16(chip->read_word(mtd));
if (chip->badblockpos & 0x1)
bad >>= 8;
- if ((bad & 0xFF) != 0xff)
- res = 1;
+ else
+ bad &= 0xFF;
} else {
chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page);
- if (chip->read_byte(mtd) != 0xff)
- res = 1;
+ bad = chip->read_byte(mtd);
}
+ if (likely(chip->badblockbits == 8))
+ res = bad != 0xFF;
+ else
+ res = hweight8(bad) < chip->badblockbits;
+
if (getchip)
nand_release_device(mtd);
@@ -363,6 +399,9 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
uint8_t buf[2] = { 0, 0 };
int block, ret;
+ if (chip->options & NAND_BB_LAST_PAGE)
+ ofs += mtd->erasesize - mtd->writesize;
+
/* Get block number */
block = (int)(ofs >> chip->bbt_erase_shift);
if (chip->bbt)
@@ -401,6 +440,11 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
static int nand_check_wp(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
+
+ /* broken xD cards report WP despite being writable */
+ if (chip->options & NAND_BROKEN_XD)
+ return 0;
+
/* Check the WP bit */
chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
@@ -744,9 +788,6 @@ nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
chip->state = FL_PM_SUSPENDED;
spin_unlock(lock);
return 0;
- } else {
- spin_unlock(lock);
- return -EAGAIN;
}
}
set_current_state(TASK_UNINTERRUPTIBLE);
@@ -835,6 +876,168 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
}
/**
+ * __nand_unlock - [REPLACABLE] unlocks specified locked blockes
+ *
+ * @param mtd - mtd info
+ * @param ofs - offset to start unlock from
+ * @param len - length to unlock
+ * @invert - when = 0, unlock the range of blocks within the lower and
+ * upper boundary address
+ * whne = 1, unlock the range of blocks outside the boundaries
+ * of the lower and upper boundary address
+ *
+ * @return - unlock status
+ */
+static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len, int invert)
+{
+ int ret = 0;
+ int status, page;
+ struct nand_chip *chip = mtd->priv;
+
+ /* Submit address of first page to unlock */
+ page = ofs >> chip->page_shift;
+ chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
+
+ /* Submit address of last page to unlock */
+ page = (ofs + len) >> chip->page_shift;
+ chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1,
+ (page | invert) & chip->pagemask);
+
+ /* Call wait ready function */
+ status = chip->waitfunc(mtd, chip);
+ udelay(1000);
+ /* See if device thinks it succeeded */
+ if (status & 0x01) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n",
+ __func__, status);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/**
+ * nand_unlock - [REPLACABLE] unlocks specified locked blockes
+ *
+ * @param mtd - mtd info
+ * @param ofs - offset to start unlock from
+ * @param len - length to unlock
+ *
+ * @return - unlock status
+ */
+int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ int ret = 0;
+ int chipnr;
+ struct nand_chip *chip = mtd->priv;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n",
+ __func__, (unsigned long long)ofs, len);
+
+ if (check_offs_len(mtd, ofs, len))
+ ret = -EINVAL;
+
+ /* Align to last block address if size addresses end of the device */
+ if (ofs + len == mtd->size)
+ len -= mtd->erasesize;
+
+ nand_get_device(chip, mtd, FL_UNLOCKING);
+
+ /* Shift to get chip number */
+ chipnr = ofs >> chip->chip_shift;
+
+ chip->select_chip(mtd, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd)) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n",
+ __func__);
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = __nand_unlock(mtd, ofs, len, 0);
+
+out:
+ /* de-select the NAND device */
+ chip->select_chip(mtd, -1);
+
+ nand_release_device(mtd);
+
+ return ret;
+}
+
+/**
+ * nand_lock - [REPLACABLE] locks all blockes present in the device
+ *
+ * @param mtd - mtd info
+ * @param ofs - offset to start unlock from
+ * @param len - length to unlock
+ *
+ * @return - lock status
+ *
+ * This feature is not support in many NAND parts. 'Micron' NAND parts
+ * do have this feature, but it allows only to lock all blocks not for
+ * specified range for block.
+ *
+ * Implementing 'lock' feature by making use of 'unlock', for now.
+ */
+int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ int ret = 0;
+ int chipnr, status, page;
+ struct nand_chip *chip = mtd->priv;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n",
+ __func__, (unsigned long long)ofs, len);
+
+ if (check_offs_len(mtd, ofs, len))
+ ret = -EINVAL;
+
+ nand_get_device(chip, mtd, FL_LOCKING);
+
+ /* Shift to get chip number */
+ chipnr = ofs >> chip->chip_shift;
+
+ chip->select_chip(mtd, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd)) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n",
+ __func__);
+ status = MTD_ERASE_FAILED;
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Submit address of first page to lock */
+ page = ofs >> chip->page_shift;
+ chip->cmdfunc(mtd, NAND_CMD_LOCK, -1, page & chip->pagemask);
+
+ /* Call wait ready function */
+ status = chip->waitfunc(mtd, chip);
+ udelay(1000);
+ /* See if device thinks it succeeded */
+ if (status & 0x01) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n",
+ __func__, status);
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = __nand_unlock(mtd, ofs, len, 0x1);
+
+out:
+ /* de-select the NAND device */
+ chip->select_chip(mtd, -1);
+
+ nand_release_device(mtd);
+
+ return ret;
+}
+
+/**
* nand_read_page_raw - [Intern] read raw page data without ecc
* @mtd: mtd info structure
* @chip: nand chip info structure
@@ -1232,6 +1435,9 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
int ret = 0;
uint32_t readlen = ops->len;
uint32_t oobreadlen = ops->ooblen;
+ uint32_t max_oobsize = ops->mode == MTD_OOB_AUTO ?
+ mtd->oobavail : mtd->oobsize;
+
uint8_t *bufpoi, *oob, *buf;
stats = mtd->ecc_stats;
@@ -1282,18 +1488,14 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
buf += bytes;
if (unlikely(oob)) {
- /* Raw mode does data:oob:data:oob */
- if (ops->mode != MTD_OOB_RAW) {
- int toread = min(oobreadlen,
- chip->ecc.layout->oobavail);
- if (toread) {
- oob = nand_transfer_oob(chip,
- oob, ops, toread);
- oobreadlen -= toread;
- }
- } else
- buf = nand_transfer_oob(chip,
- buf, ops, mtd->oobsize);
+
+ int toread = min(oobreadlen, max_oobsize);
+
+ if (toread) {
+ oob = nand_transfer_oob(chip,
+ oob, ops, toread);
+ oobreadlen -= toread;
+ }
}
if (!(chip->options & NAND_NO_READRDY)) {
@@ -1880,11 +2082,9 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
* @oob: oob data buffer
* @ops: oob ops structure
*/
-static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob,
- struct mtd_oob_ops *ops)
+static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
+ struct mtd_oob_ops *ops)
{
- size_t len = ops->ooblen;
-
switch(ops->mode) {
case MTD_OOB_PLACE:
@@ -1939,6 +2139,11 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
int chipnr, realpage, page, blockmask, column;
struct nand_chip *chip = mtd->priv;
uint32_t writelen = ops->len;
+
+ uint32_t oobwritelen = ops->ooblen;
+ uint32_t oobmaxlen = ops->mode == MTD_OOB_AUTO ?
+ mtd->oobavail : mtd->oobsize;
+
uint8_t *oob = ops->oobbuf;
uint8_t *buf = ops->datbuf;
int ret, subpage;
@@ -1980,6 +2185,10 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
if (likely(!oob))
memset(chip->oob_poi, 0xff, mtd->oobsize);
+ /* Don't allow multipage oob writes with offset */
+ if (ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen))
+ return -EINVAL;
+
while(1) {
int bytes = mtd->writesize;
int cached = writelen > bytes && page != blockmask;
@@ -1995,8 +2204,11 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
wbuf = chip->buffers->databuf;
}
- if (unlikely(oob))
- oob = nand_fill_oob(chip, oob, ops);
+ if (unlikely(oob)) {
+ size_t len = min(oobwritelen, oobmaxlen);
+ oob = nand_fill_oob(chip, oob, len, ops);
+ oobwritelen -= len;
+ }
ret = chip->write_page(mtd, chip, wbuf, page, cached,
(ops->mode == MTD_OOB_RAW));
@@ -2170,7 +2382,7 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
chip->pagebuf = -1;
memset(chip->oob_poi, 0xff, mtd->oobsize);
- nand_fill_oob(chip, ops->oobbuf, ops);
+ nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
memset(chip->oob_poi, 0xff, mtd->oobsize);
@@ -2293,25 +2505,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
__func__, (unsigned long long)instr->addr,
(unsigned long long)instr->len);
- /* Start address must align on block boundary */
- if (instr->addr & ((1 << chip->phys_erase_shift) - 1)) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__);
+ if (check_offs_len(mtd, instr->addr, instr->len))
return -EINVAL;
- }
-
- /* Length must align on block boundary */
- if (instr->len & ((1 << chip->phys_erase_shift) - 1)) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n",
- __func__);
- return -EINVAL;
- }
-
- /* Do not allow erase past end of device */
- if ((instr->len + instr->addr) > mtd->size) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Erase past end of device\n",
- __func__);
- return -EINVAL;
- }
instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
@@ -2582,11 +2777,11 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
*/
static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
struct nand_chip *chip,
- int busw, int *maf_id)
+ int busw, int *maf_id,
+ struct nand_flash_dev *type)
{
- struct nand_flash_dev *type = NULL;
int i, dev_id, maf_idx;
- int tmp_id, tmp_manf;
+ u8 id_data[8];
/* Select the device */
chip->select_chip(mtd, 0);
@@ -2612,27 +2807,26 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
- /* Read manufacturer and device IDs */
+ /* Read entire ID string */
- tmp_manf = chip->read_byte(mtd);
- tmp_id = chip->read_byte(mtd);
+ for (i = 0; i < 8; i++)
+ id_data[i] = chip->read_byte(mtd);
- if (tmp_manf != *maf_id || tmp_id != dev_id) {
+ if (id_data[0] != *maf_id || id_data[1] != dev_id) {
printk(KERN_INFO "%s: second ID read did not match "
"%02x,%02x against %02x,%02x\n", __func__,
- *maf_id, dev_id, tmp_manf, tmp_id);
+ *maf_id, dev_id, id_data[0], id_data[1]);
return ERR_PTR(-ENODEV);
}
- /* Lookup the flash id */
- for (i = 0; nand_flash_ids[i].name != NULL; i++) {
- if (dev_id == nand_flash_ids[i].id) {
- type = &nand_flash_ids[i];
- break;
- }
- }
-
if (!type)
+ type = nand_flash_ids;
+
+ for (; type->name != NULL; type++)
+ if (dev_id == type->id)
+ break;
+
+ if (!type->name)
return ERR_PTR(-ENODEV);
if (!mtd->name)
@@ -2644,21 +2838,45 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
if (!type->pagesize) {
int extid;
/* The 3rd id byte holds MLC / multichip data */
- chip->cellinfo = chip->read_byte(mtd);
+ chip->cellinfo = id_data[2];
/* The 4th id byte is the important one */
- extid = chip->read_byte(mtd);
- /* Calc pagesize */
- mtd->writesize = 1024 << (extid & 0x3);
- extid >>= 2;
- /* Calc oobsize */
- mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
- extid >>= 2;
- /* Calc blocksize. Blocksize is multiples of 64KiB */
- mtd->erasesize = (64 * 1024) << (extid & 0x03);
- extid >>= 2;
- /* Get buswidth information */
- busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
+ extid = id_data[3];
+ /*
+ * Field definitions are in the following datasheets:
+ * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
+ * New style (6 byte ID): Samsung K9GAG08U0D (p.40)
+ *
+ * Check for wraparound + Samsung ID + nonzero 6th byte
+ * to decide what to do.
+ */
+ if (id_data[0] == id_data[6] && id_data[1] == id_data[7] &&
+ id_data[0] == NAND_MFR_SAMSUNG &&
+ id_data[5] != 0x00) {
+ /* Calc pagesize */
+ mtd->writesize = 2048 << (extid & 0x03);
+ extid >>= 2;
+ /* Calc oobsize */
+ mtd->oobsize = (extid & 0x03) == 0x01 ? 128 : 218;
+ extid >>= 2;
+ /* Calc blocksize */
+ mtd->erasesize = (128 * 1024) <<
+ (((extid >> 1) & 0x04) | (extid & 0x03));
+ busw = 0;
+ } else {
+ /* Calc pagesize */
+ mtd->writesize = 1024 << (extid & 0x03);
+ extid >>= 2;
+ /* Calc oobsize */
+ mtd->oobsize = (8 << (extid & 0x01)) *
+ (mtd->writesize >> 9);
+ extid >>= 2;
+ /* Calc blocksize. Blocksize is multiples of 64KiB */
+ mtd->erasesize = (64 * 1024) << (extid & 0x03);
+ extid >>= 2;
+ /* Get buswidth information */
+ busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
+ }
} else {
/*
* Old devices have chip data hardcoded in the device id table
@@ -2704,6 +2922,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
/* Set the bad block position */
chip->badblockpos = mtd->writesize > 512 ?
NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS;
+ chip->badblockbits = 8;
/* Get chip options, preserve non chip based options */
chip->options &= ~NAND_CHIPOPTIONS_MSK;
@@ -2720,6 +2939,15 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
+ /*
+ * Bad block marker is stored in the last page of each block
+ * on Samsung and Hynix MLC devices
+ */
+ if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+ (*maf_id == NAND_MFR_SAMSUNG ||
+ *maf_id == NAND_MFR_HYNIX))
+ chip->options |= NAND_BB_LAST_PAGE;
+
/* Check for AND chips with 4 page planes */
if (chip->options & NAND_4PAGE_ARRAY)
chip->erase_cmd = multi_erase_cmd;
@@ -2741,13 +2969,15 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
* nand_scan_ident - [NAND Interface] Scan for the NAND device
* @mtd: MTD device structure
* @maxchips: Number of chips to scan for
+ * @table: Alternative NAND ID table
*
* This is the first phase of the normal nand_scan() function. It
* reads the flash ID and sets up MTD fields accordingly.
*
* The mtd->owner field must be set to the module of the caller.
*/
-int nand_scan_ident(struct mtd_info *mtd, int maxchips)
+int nand_scan_ident(struct mtd_info *mtd, int maxchips,
+ struct nand_flash_dev *table)
{
int i, busw, nand_maf_id;
struct nand_chip *chip = mtd->priv;
@@ -2759,7 +2989,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips)
nand_set_defaults(chip, busw);
/* Read the flash type */
- type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id);
+ type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id, table);
if (IS_ERR(type)) {
if (!(chip->options & NAND_SCAN_SILENT_NODEV))
@@ -2989,7 +3219,8 @@ int nand_scan_tail(struct mtd_info *mtd)
/* Fill in remaining MTD driver data */
mtd->type = MTD_NANDFLASH;
- mtd->flags = MTD_CAP_NANDFLASH;
+ mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
+ MTD_CAP_NANDFLASH;
mtd->erase = nand_erase;
mtd->point = NULL;
mtd->unpoint = NULL;
@@ -3050,7 +3281,7 @@ int nand_scan(struct mtd_info *mtd, int maxchips)
BUG();
}
- ret = nand_scan_ident(mtd, maxchips);
+ ret = nand_scan_ident(mtd, maxchips, NULL);
if (!ret)
ret = nand_scan_tail(mtd);
return ret;
@@ -3077,6 +3308,8 @@ void nand_release(struct mtd_info *mtd)
kfree(chip->buffers);
}
+EXPORT_SYMBOL_GPL(nand_lock);
+EXPORT_SYMBOL_GPL(nand_unlock);
EXPORT_SYMBOL_GPL(nand_scan);
EXPORT_SYMBOL_GPL(nand_scan_ident);
EXPORT_SYMBOL_GPL(nand_scan_tail);
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 55c23e5cd21..ad97c0ce73b 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -237,15 +237,33 @@ static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
size_t len)
{
struct mtd_oob_ops ops;
+ int res;
ops.mode = MTD_OOB_RAW;
ops.ooboffs = 0;
ops.ooblen = mtd->oobsize;
- ops.oobbuf = buf;
- ops.datbuf = buf;
- ops.len = len;
- return mtd->read_oob(mtd, offs, &ops);
+
+ while (len > 0) {
+ if (len <= mtd->writesize) {
+ ops.oobbuf = buf + len;
+ ops.datbuf = buf;
+ ops.len = len;
+ return mtd->read_oob(mtd, offs, &ops);
+ } else {
+ ops.oobbuf = buf + mtd->writesize;
+ ops.datbuf = buf;
+ ops.len = mtd->writesize;
+ res = mtd->read_oob(mtd, offs, &ops);
+
+ if (res)
+ return res;
+ }
+
+ buf += mtd->oobsize + mtd->writesize;
+ len -= mtd->writesize;
+ }
+ return 0;
}
/*
@@ -414,6 +432,9 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
from = (loff_t)startblock << (this->bbt_erase_shift - 1);
}
+ if (this->options & NAND_BB_LAST_PAGE)
+ from += mtd->erasesize - (mtd->writesize * len);
+
for (i = startblock; i < numblocks;) {
int ret;
diff --git a/drivers/mtd/nand/nand_bcm_umi.h b/drivers/mtd/nand/nand_bcm_umi.h
index 7cec2cd9785..198b304d6f7 100644
--- a/drivers/mtd/nand/nand_bcm_umi.h
+++ b/drivers/mtd/nand/nand_bcm_umi.h
@@ -167,18 +167,27 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
int numToRead = 16; /* There are 16 bytes per sector in the OOB */
/* ECC is already paused when this function is called */
+ if (pageSize != NAND_DATA_ACCESS_SIZE) {
+ /* skip BI */
+#if defined(__KERNEL__) && !defined(STANDALONE)
+ *oobp++ = REG_NAND_DATA8;
+#else
+ REG_NAND_DATA8;
+#endif
+ numToRead--;
+ }
- if (pageSize == NAND_DATA_ACCESS_SIZE) {
- while (numToRead > numEccBytes) {
- /* skip free oob region */
+ while (numToRead > numEccBytes) {
+ /* skip free oob region */
#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp++ = REG_NAND_DATA8;
+ *oobp++ = REG_NAND_DATA8;
#else
- REG_NAND_DATA8;
+ REG_NAND_DATA8;
#endif
- numToRead--;
- }
+ numToRead--;
+ }
+ if (pageSize == NAND_DATA_ACCESS_SIZE) {
/* read ECC bytes before BI */
nand_bcm_umi_bch_resume_read_ecc_calc();
@@ -190,6 +199,7 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
#else
eccCalc[eccPos++] = REG_NAND_DATA8;
#endif
+ numToRead--;
}
nand_bcm_umi_bch_pause_read_ecc_calc();
@@ -204,49 +214,18 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
numToRead--;
}
- /* read ECC bytes */
- nand_bcm_umi_bch_resume_read_ecc_calc();
- while (numToRead) {
-#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp = REG_NAND_DATA8;
- eccCalc[eccPos++] = *oobp;
- oobp++;
-#else
- eccCalc[eccPos++] = REG_NAND_DATA8;
-#endif
- numToRead--;
- }
- } else {
- /* skip BI */
+ }
+ /* read ECC bytes */
+ nand_bcm_umi_bch_resume_read_ecc_calc();
+ while (numToRead) {
#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp++ = REG_NAND_DATA8;
+ *oobp = REG_NAND_DATA8;
+ eccCalc[eccPos++] = *oobp;
+ oobp++;
#else
- REG_NAND_DATA8;
+ eccCalc[eccPos++] = REG_NAND_DATA8;
#endif
numToRead--;
-
- while (numToRead > numEccBytes) {
- /* skip free oob region */
-#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp++ = REG_NAND_DATA8;
-#else
- REG_NAND_DATA8;
-#endif
- numToRead--;
- }
-
- /* read ECC bytes */
- nand_bcm_umi_bch_resume_read_ecc_calc();
- while (numToRead) {
-#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp = REG_NAND_DATA8;
- eccCalc[eccPos++] = *oobp;
- oobp++;
-#else
- eccCalc[eccPos++] = REG_NAND_DATA8;
-#endif
- numToRead--;
- }
}
}
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 69ee2c90eb0..89907ed9900 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -82,6 +82,7 @@ struct nand_flash_dev nand_flash_ids[] = {
/* 1 Gigabit */
{"NAND 128MiB 1,8V 8-bit", 0xA1, 0, 128, 0, LP_OPTIONS},
{"NAND 128MiB 3,3V 8-bit", 0xF1, 0, 128, 0, LP_OPTIONS},
+ {"NAND 128MiB 3,3V 8-bit", 0xD1, 0, 128, 0, LP_OPTIONS},
{"NAND 128MiB 1,8V 16-bit", 0xB1, 0, 128, 0, LP_OPTIONS16},
{"NAND 128MiB 3,3V 16-bit", 0xC1, 0, 128, 0, LP_OPTIONS16},
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 7281000fef2..261337efe0e 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -80,6 +80,9 @@
#ifndef CONFIG_NANDSIM_DBG
#define CONFIG_NANDSIM_DBG 0
#endif
+#ifndef CONFIG_NANDSIM_MAX_PARTS
+#define CONFIG_NANDSIM_MAX_PARTS 32
+#endif
static uint first_id_byte = CONFIG_NANDSIM_FIRST_ID_BYTE;
static uint second_id_byte = CONFIG_NANDSIM_SECOND_ID_BYTE;
@@ -94,7 +97,7 @@ static uint bus_width = CONFIG_NANDSIM_BUS_WIDTH;
static uint do_delays = CONFIG_NANDSIM_DO_DELAYS;
static uint log = CONFIG_NANDSIM_LOG;
static uint dbg = CONFIG_NANDSIM_DBG;
-static unsigned long parts[MAX_MTD_DEVICES];
+static unsigned long parts[CONFIG_NANDSIM_MAX_PARTS];
static unsigned int parts_num;
static char *badblocks = NULL;
static char *weakblocks = NULL;
@@ -135,8 +138,8 @@ MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read I
MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)");
MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)");
-MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanodeconds)");
-MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanodeconds)");
+MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanoseconds)");
+MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanoseconds)");
MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)");
MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero");
MODULE_PARM_DESC(log, "Perform logging if not zero");
@@ -288,7 +291,7 @@ union ns_mem {
* The structure which describes all the internal simulator data.
*/
struct nandsim {
- struct mtd_partition partitions[MAX_MTD_DEVICES];
+ struct mtd_partition partitions[CONFIG_NANDSIM_MAX_PARTS];
unsigned int nbparts;
uint busw; /* flash chip bus width (8 or 16) */
@@ -312,7 +315,7 @@ struct nandsim {
union ns_mem buf;
/* NAND flash "geometry" */
- struct nandsin_geometry {
+ struct {
uint64_t totsz; /* total flash size, bytes */
uint32_t secsz; /* flash sector (erase block) size, bytes */
uint pgsz; /* NAND flash page size, bytes */
@@ -331,7 +334,7 @@ struct nandsim {
} geom;
/* NAND flash internal registers */
- struct nandsim_regs {
+ struct {
unsigned command; /* the command register */
u_char status; /* the status register */
uint row; /* the page number */
@@ -342,7 +345,7 @@ struct nandsim {
} regs;
/* NAND flash lines state */
- struct ns_lines_status {
+ struct {
int ce; /* chip Enable */
int cle; /* command Latch Enable */
int ale; /* address Latch Enable */
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index b983cae8c29..98fd2bdf8be 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -239,14 +239,14 @@ static int __devinit ndfc_probe(struct of_device *ofdev,
dev_set_drvdata(&ofdev->dev, ndfc);
/* Read the reg property to get the chip select */
- reg = of_get_property(ofdev->node, "reg", &len);
+ reg = of_get_property(ofdev->dev.of_node, "reg", &len);
if (reg == NULL || len != 12) {
dev_err(&ofdev->dev, "unable read reg property (%d)\n", len);
return -ENOENT;
}
ndfc->chip_select = reg[0];
- ndfc->ndfcbase = of_iomap(ofdev->node, 0);
+ ndfc->ndfcbase = of_iomap(ofdev->dev.of_node, 0);
if (!ndfc->ndfcbase) {
dev_err(&ofdev->dev, "failed to get memory\n");
return -EIO;
@@ -255,20 +255,20 @@ static int __devinit ndfc_probe(struct of_device *ofdev,
ccr = NDFC_CCR_BS(ndfc->chip_select);
/* It is ok if ccr does not exist - just default to 0 */
- reg = of_get_property(ofdev->node, "ccr", NULL);
+ reg = of_get_property(ofdev->dev.of_node, "ccr", NULL);
if (reg)
ccr |= *reg;
out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
/* Set the bank settings if given */
- reg = of_get_property(ofdev->node, "bank-settings", NULL);
+ reg = of_get_property(ofdev->dev.of_node, "bank-settings", NULL);
if (reg) {
int offset = NDFC_BCFG0 + (ndfc->chip_select << 2);
out_be32(ndfc->ndfcbase + offset, *reg);
}
- err = ndfc_chip_init(ndfc, ofdev->node);
+ err = ndfc_chip_init(ndfc, ofdev->dev.of_node);
if (err) {
iounmap(ndfc->ndfcbase);
return err;
@@ -294,9 +294,10 @@ MODULE_DEVICE_TABLE(of, ndfc_match);
static struct of_platform_driver ndfc_driver = {
.driver = {
- .name = "ndfc",
+ .name = "ndfc",
+ .owner = THIS_MODULE,
+ .of_match_table = ndfc_match,
},
- .match_table = ndfc_match,
.probe = ndfc_probe,
.remove = __devexit_p(ndfc_remove),
};
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
index 1f6f741af5d..8c0b6937522 100644
--- a/drivers/mtd/nand/nomadik_nand.c
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -105,21 +105,21 @@ static int nomadik_nand_probe(struct platform_device *pdev)
ret = -EIO;
goto err_unmap;
}
- host->addr_va = ioremap(res->start, res->end - res->start + 1);
+ host->addr_va = ioremap(res->start, resource_size(res));
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
if (!res) {
ret = -EIO;
goto err_unmap;
}
- host->data_va = ioremap(res->start, res->end - res->start + 1);
+ host->data_va = ioremap(res->start, resource_size(res));
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
if (!res) {
ret = -EIO;
goto err_unmap;
}
- host->cmd_va = ioremap(res->start, res->end - res->start + 1);
+ host->cmd_va = ioremap(res->start, resource_size(res));
if (!host->addr_va || !host->data_va || !host->cmd_va) {
ret = -ENOMEM;
diff --git a/drivers/mtd/nand/w90p910_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 7680e731348..6eddf7361ed 100644
--- a/drivers/mtd/nand/w90p910_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009 Nuvoton technology corporation.
+ * Copyright © 2009 Nuvoton technology corporation.
*
* Wan ZongShun <mcuos.com@gmail.com>
*
@@ -55,7 +55,7 @@
#define write_addr_reg(dev, val) \
__raw_writel((val), (dev)->reg + REG_SMADDR)
-struct w90p910_nand {
+struct nuc900_nand {
struct mtd_info mtd;
struct nand_chip chip;
void __iomem *reg;
@@ -76,49 +76,49 @@ static const struct mtd_partition partitions[] = {
}
};
-static unsigned char w90p910_nand_read_byte(struct mtd_info *mtd)
+static unsigned char nuc900_nand_read_byte(struct mtd_info *mtd)
{
unsigned char ret;
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
ret = (unsigned char)read_data_reg(nand);
return ret;
}
-static void w90p910_nand_read_buf(struct mtd_info *mtd,
- unsigned char *buf, int len)
+static void nuc900_nand_read_buf(struct mtd_info *mtd,
+ unsigned char *buf, int len)
{
int i;
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
for (i = 0; i < len; i++)
buf[i] = (unsigned char)read_data_reg(nand);
}
-static void w90p910_nand_write_buf(struct mtd_info *mtd,
- const unsigned char *buf, int len)
+static void nuc900_nand_write_buf(struct mtd_info *mtd,
+ const unsigned char *buf, int len)
{
int i;
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
for (i = 0; i < len; i++)
write_data_reg(nand, buf[i]);
}
-static int w90p910_verify_buf(struct mtd_info *mtd,
- const unsigned char *buf, int len)
+static int nuc900_verify_buf(struct mtd_info *mtd,
+ const unsigned char *buf, int len)
{
int i;
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
for (i = 0; i < len; i++) {
if (buf[i] != (unsigned char)read_data_reg(nand))
@@ -128,7 +128,7 @@ static int w90p910_verify_buf(struct mtd_info *mtd,
return 0;
}
-static int w90p910_check_rb(struct w90p910_nand *nand)
+static int nuc900_check_rb(struct nuc900_nand *nand)
{
unsigned int val;
spin_lock(&nand->lock);
@@ -139,24 +139,24 @@ static int w90p910_check_rb(struct w90p910_nand *nand)
return val;
}
-static int w90p910_nand_devready(struct mtd_info *mtd)
+static int nuc900_nand_devready(struct mtd_info *mtd)
{
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
int ready;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
- ready = (w90p910_check_rb(nand)) ? 1 : 0;
+ ready = (nuc900_check_rb(nand)) ? 1 : 0;
return ready;
}
-static void w90p910_nand_command_lp(struct mtd_info *mtd,
- unsigned int command, int column, int page_addr)
+static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
{
register struct nand_chip *chip = mtd->priv;
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
if (command == NAND_CMD_READOOB) {
column += mtd->writesize;
@@ -212,7 +212,7 @@ static void w90p910_nand_command_lp(struct mtd_info *mtd,
write_cmd_reg(nand, NAND_CMD_STATUS);
write_cmd_reg(nand, command);
- while (!w90p910_check_rb(nand))
+ while (!nuc900_check_rb(nand))
;
return;
@@ -241,7 +241,7 @@ static void w90p910_nand_command_lp(struct mtd_info *mtd,
}
-static void w90p910_nand_enable(struct w90p910_nand *nand)
+static void nuc900_nand_enable(struct nuc900_nand *nand)
{
unsigned int val;
spin_lock(&nand->lock);
@@ -262,37 +262,37 @@ static void w90p910_nand_enable(struct w90p910_nand *nand)
spin_unlock(&nand->lock);
}
-static int __devinit w90p910_nand_probe(struct platform_device *pdev)
+static int __devinit nuc900_nand_probe(struct platform_device *pdev)
{
- struct w90p910_nand *w90p910_nand;
+ struct nuc900_nand *nuc900_nand;
struct nand_chip *chip;
int retval;
struct resource *res;
retval = 0;
- w90p910_nand = kzalloc(sizeof(struct w90p910_nand), GFP_KERNEL);
- if (!w90p910_nand)
+ nuc900_nand = kzalloc(sizeof(struct nuc900_nand), GFP_KERNEL);
+ if (!nuc900_nand)
return -ENOMEM;
- chip = &(w90p910_nand->chip);
+ chip = &(nuc900_nand->chip);
- w90p910_nand->mtd.priv = chip;
- w90p910_nand->mtd.owner = THIS_MODULE;
- spin_lock_init(&w90p910_nand->lock);
+ nuc900_nand->mtd.priv = chip;
+ nuc900_nand->mtd.owner = THIS_MODULE;
+ spin_lock_init(&nuc900_nand->lock);
- w90p910_nand->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(w90p910_nand->clk)) {
+ nuc900_nand->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(nuc900_nand->clk)) {
retval = -ENOENT;
goto fail1;
}
- clk_enable(w90p910_nand->clk);
-
- chip->cmdfunc = w90p910_nand_command_lp;
- chip->dev_ready = w90p910_nand_devready;
- chip->read_byte = w90p910_nand_read_byte;
- chip->write_buf = w90p910_nand_write_buf;
- chip->read_buf = w90p910_nand_read_buf;
- chip->verify_buf = w90p910_verify_buf;
+ clk_enable(nuc900_nand->clk);
+
+ chip->cmdfunc = nuc900_nand_command_lp;
+ chip->dev_ready = nuc900_nand_devready;
+ chip->read_byte = nuc900_nand_read_byte;
+ chip->write_buf = nuc900_nand_write_buf;
+ chip->read_buf = nuc900_nand_read_buf;
+ chip->verify_buf = nuc900_verify_buf;
chip->chip_delay = 50;
chip->options = 0;
chip->ecc.mode = NAND_ECC_SOFT;
@@ -308,75 +308,75 @@ static int __devinit w90p910_nand_probe(struct platform_device *pdev)
goto fail1;
}
- w90p910_nand->reg = ioremap(res->start, resource_size(res));
- if (!w90p910_nand->reg) {
+ nuc900_nand->reg = ioremap(res->start, resource_size(res));
+ if (!nuc900_nand->reg) {
retval = -ENOMEM;
goto fail2;
}
- w90p910_nand_enable(w90p910_nand);
+ nuc900_nand_enable(nuc900_nand);
- if (nand_scan(&(w90p910_nand->mtd), 1)) {
+ if (nand_scan(&(nuc900_nand->mtd), 1)) {
retval = -ENXIO;
goto fail3;
}
- add_mtd_partitions(&(w90p910_nand->mtd), partitions,
+ add_mtd_partitions(&(nuc900_nand->mtd), partitions,
ARRAY_SIZE(partitions));
- platform_set_drvdata(pdev, w90p910_nand);
+ platform_set_drvdata(pdev, nuc900_nand);
return retval;
-fail3: iounmap(w90p910_nand->reg);
+fail3: iounmap(nuc900_nand->reg);
fail2: release_mem_region(res->start, resource_size(res));
-fail1: kfree(w90p910_nand);
+fail1: kfree(nuc900_nand);
return retval;
}
-static int __devexit w90p910_nand_remove(struct platform_device *pdev)
+static int __devexit nuc900_nand_remove(struct platform_device *pdev)
{
- struct w90p910_nand *w90p910_nand = platform_get_drvdata(pdev);
+ struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev);
struct resource *res;
- iounmap(w90p910_nand->reg);
+ iounmap(nuc900_nand->reg);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
- clk_disable(w90p910_nand->clk);
- clk_put(w90p910_nand->clk);
+ clk_disable(nuc900_nand->clk);
+ clk_put(nuc900_nand->clk);
- kfree(w90p910_nand);
+ kfree(nuc900_nand);
platform_set_drvdata(pdev, NULL);
return 0;
}
-static struct platform_driver w90p910_nand_driver = {
- .probe = w90p910_nand_probe,
- .remove = __devexit_p(w90p910_nand_remove),
+static struct platform_driver nuc900_nand_driver = {
+ .probe = nuc900_nand_probe,
+ .remove = __devexit_p(nuc900_nand_remove),
.driver = {
- .name = "w90p910-fmi",
+ .name = "nuc900-fmi",
.owner = THIS_MODULE,
},
};
-static int __init w90p910_nand_init(void)
+static int __init nuc900_nand_init(void)
{
- return platform_driver_register(&w90p910_nand_driver);
+ return platform_driver_register(&nuc900_nand_driver);
}
-static void __exit w90p910_nand_exit(void)
+static void __exit nuc900_nand_exit(void)
{
- platform_driver_unregister(&w90p910_nand_driver);
+ platform_driver_unregister(&nuc900_nand_driver);
}
-module_init(w90p910_nand_init);
-module_exit(w90p910_nand_exit);
+module_init(nuc900_nand_init);
+module_exit(nuc900_nand_exit);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
-MODULE_DESCRIPTION("w90p910 nand driver!");
+MODULE_DESCRIPTION("w90p910/NUC9xx nand driver!");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:w90p910-fmi");
+MODULE_ALIAS("platform:nuc900-fmi");
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 7545568fce4..ee87325c771 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -292,11 +292,14 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
u32 *p = (u32 *)buf;
/* take care of subpage reads */
- for (; len % 4 != 0; ) {
- *buf++ = __raw_readb(info->nand.IO_ADDR_R);
- len--;
+ if (len % 4) {
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_read_buf16(mtd, buf, len % 4);
+ else
+ omap_read_buf8(mtd, buf, len % 4);
+ p = (u32 *) (buf + len % 4);
+ len -= len % 4;
}
- p = (u32 *) buf;
/* configure and start prefetch transfer */
ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0);
@@ -502,7 +505,7 @@ static void omap_write_buf_dma_pref(struct mtd_info *mtd,
omap_write_buf_pref(mtd, buf, len);
else
/* start transfer in DMA mode */
- omap_nand_dma_transfer(mtd, buf, len, 0x1);
+ omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
}
/**
@@ -1028,7 +1031,8 @@ out_free_info:
static int omap_nand_remove(struct platform_device *pdev)
{
struct mtd_info *mtd = platform_get_drvdata(pdev);
- struct omap_nand_info *info = mtd->priv;
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
platform_set_drvdata(pdev, NULL);
if (use_dma)
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index d60fc5719fe..da6e7534305 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -80,6 +80,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
struct mtd_info *mtd;
struct nand_chip *nc;
struct orion_nand_data *board;
+ struct resource *res;
void __iomem *io_base;
int ret = 0;
#ifdef CONFIG_MTD_PARTITIONS
@@ -95,8 +96,13 @@ static int __init orion_nand_probe(struct platform_device *pdev)
}
mtd = (struct mtd_info *)(nc + 1);
- io_base = ioremap(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start + 1);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ goto no_res;
+ }
+
+ io_base = ioremap(res->start, resource_size(res));
if (!io_base) {
printk(KERN_ERR "orion_nand: ioremap failed\n");
ret = -EIO;
@@ -120,6 +126,9 @@ static int __init orion_nand_probe(struct platform_device *pdev)
if (board->width == 16)
nc->options |= NAND_BUSWIDTH_16;
+ if (board->dev_ready)
+ nc->dev_ready = board->dev_ready;
+
platform_set_drvdata(pdev, mtd);
if (nand_scan(mtd, 1)) {
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index a8b9376cf32..f02af24d033 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -93,7 +93,7 @@ static int __devinit pasemi_nand_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
struct pci_dev *pdev;
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct resource res;
struct nand_chip *chip;
int err = 0;
@@ -209,7 +209,7 @@ static int __devexit pasemi_nand_remove(struct of_device *ofdev)
return 0;
}
-static struct of_device_id pasemi_nand_match[] =
+static const struct of_device_id pasemi_nand_match[] =
{
{
.compatible = "pasemi,localbus-nand",
@@ -221,8 +221,11 @@ MODULE_DEVICE_TABLE(of, pasemi_nand_match);
static struct of_platform_driver pasemi_nand_driver =
{
- .name = (char*)driver_name,
- .match_table = pasemi_nand_match,
+ .driver = {
+ .name = (char*)driver_name,
+ .owner = THIS_MODULE,
+ .of_match_table = pasemi_nand_match,
+ },
.probe = pasemi_nand_probe,
.remove = pasemi_nand_remove,
};
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 5d55152162c..e02fa4f0e3c 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1320,6 +1320,17 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
goto fail_free_irq;
}
+ if (mtd_has_cmdlinepart()) {
+ static const char *probes[] = { "cmdlinepart", NULL };
+ struct mtd_partition *parts;
+ int nr_parts;
+
+ nr_parts = parse_mtd_partitions(mtd, probes, &parts, 0);
+
+ if (nr_parts)
+ return add_mtd_partitions(mtd, parts, nr_parts);
+ }
+
return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
fail_free_irq:
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
new file mode 100644
index 00000000000..78a42329547
--- /dev/null
+++ b/drivers/mtd/nand/r852.c
@@ -0,0 +1,1140 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * driver for Ricoh xD readers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+#include <linux/sched.h>
+#include "sm_common.h"
+#include "r852.h"
+
+
+static int r852_enable_dma = 1;
+module_param(r852_enable_dma, bool, S_IRUGO);
+MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)");
+
+static int debug;
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+/* read register */
+static inline uint8_t r852_read_reg(struct r852_device *dev, int address)
+{
+ uint8_t reg = readb(dev->mmio + address);
+ return reg;
+}
+
+/* write register */
+static inline void r852_write_reg(struct r852_device *dev,
+ int address, uint8_t value)
+{
+ writeb(value, dev->mmio + address);
+ mmiowb();
+}
+
+
+/* read dword sized register */
+static inline uint32_t r852_read_reg_dword(struct r852_device *dev, int address)
+{
+ uint32_t reg = le32_to_cpu(readl(dev->mmio + address));
+ return reg;
+}
+
+/* write dword sized register */
+static inline void r852_write_reg_dword(struct r852_device *dev,
+ int address, uint32_t value)
+{
+ writel(cpu_to_le32(value), dev->mmio + address);
+ mmiowb();
+}
+
+/* returns pointer to our private structure */
+static inline struct r852_device *r852_get_dev(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = (struct nand_chip *)mtd->priv;
+ return (struct r852_device *)chip->priv;
+}
+
+
+/* check if controller supports dma */
+static void r852_dma_test(struct r852_device *dev)
+{
+ dev->dma_usable = (r852_read_reg(dev, R852_DMA_CAP) &
+ (R852_DMA1 | R852_DMA2)) == (R852_DMA1 | R852_DMA2);
+
+ if (!dev->dma_usable)
+ message("Non dma capable device detected, dma disabled");
+
+ if (!r852_enable_dma) {
+ message("disabling dma on user request");
+ dev->dma_usable = 0;
+ }
+}
+
+/*
+ * Enable dma. Enables ether first or second stage of the DMA,
+ * Expects dev->dma_dir and dev->dma_state be set
+ */
+static void r852_dma_enable(struct r852_device *dev)
+{
+ uint8_t dma_reg, dma_irq_reg;
+
+ /* Set up dma settings */
+ dma_reg = r852_read_reg_dword(dev, R852_DMA_SETTINGS);
+ dma_reg &= ~(R852_DMA_READ | R852_DMA_INTERNAL | R852_DMA_MEMORY);
+
+ if (dev->dma_dir)
+ dma_reg |= R852_DMA_READ;
+
+ if (dev->dma_state == DMA_INTERNAL) {
+ dma_reg |= R852_DMA_INTERNAL;
+ /* Precaution to make sure HW doesn't write */
+ /* to random kernel memory */
+ r852_write_reg_dword(dev, R852_DMA_ADDR,
+ cpu_to_le32(dev->phys_bounce_buffer));
+ } else {
+ dma_reg |= R852_DMA_MEMORY;
+ r852_write_reg_dword(dev, R852_DMA_ADDR,
+ cpu_to_le32(dev->phys_dma_addr));
+ }
+
+ /* Precaution: make sure write reached the device */
+ r852_read_reg_dword(dev, R852_DMA_ADDR);
+
+ r852_write_reg_dword(dev, R852_DMA_SETTINGS, dma_reg);
+
+ /* Set dma irq */
+ dma_irq_reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
+ dma_irq_reg |
+ R852_DMA_IRQ_INTERNAL |
+ R852_DMA_IRQ_ERROR |
+ R852_DMA_IRQ_MEMORY);
+}
+
+/*
+ * Disable dma, called from the interrupt handler, which specifies
+ * success of the operation via 'error' argument
+ */
+static void r852_dma_done(struct r852_device *dev, int error)
+{
+ WARN_ON(dev->dma_stage == 0);
+
+ r852_write_reg_dword(dev, R852_DMA_IRQ_STA,
+ r852_read_reg_dword(dev, R852_DMA_IRQ_STA));
+
+ r852_write_reg_dword(dev, R852_DMA_SETTINGS, 0);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 0);
+
+ /* Precaution to make sure HW doesn't write to random kernel memory */
+ r852_write_reg_dword(dev, R852_DMA_ADDR,
+ cpu_to_le32(dev->phys_bounce_buffer));
+ r852_read_reg_dword(dev, R852_DMA_ADDR);
+
+ dev->dma_error = error;
+ dev->dma_stage = 0;
+
+ if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer)
+ pci_unmap_single(dev->pci_dev, dev->phys_dma_addr, R852_DMA_LEN,
+ dev->dma_dir ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
+ complete(&dev->dma_done);
+}
+
+/*
+ * Wait, till dma is done, which includes both phases of it
+ */
+static int r852_dma_wait(struct r852_device *dev)
+{
+ long timeout = wait_for_completion_timeout(&dev->dma_done,
+ msecs_to_jiffies(1000));
+ if (!timeout) {
+ dbg("timeout waiting for DMA interrupt");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * Read/Write one page using dma. Only pages can be read (512 bytes)
+*/
+static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read)
+{
+ int bounce = 0;
+ unsigned long flags;
+ int error;
+
+ dev->dma_error = 0;
+
+ /* Set dma direction */
+ dev->dma_dir = do_read;
+ dev->dma_stage = 1;
+
+ dbg_verbose("doing dma %s ", do_read ? "read" : "write");
+
+ /* Set intial dma state: for reading first fill on board buffer,
+ from device, for writes first fill the buffer from memory*/
+ dev->dma_state = do_read ? DMA_INTERNAL : DMA_MEMORY;
+
+ /* if incoming buffer is not page aligned, we should do bounce */
+ if ((unsigned long)buf & (R852_DMA_LEN-1))
+ bounce = 1;
+
+ if (!bounce) {
+ dev->phys_dma_addr = pci_map_single(dev->pci_dev, (void *)buf,
+ R852_DMA_LEN,
+ (do_read ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
+
+ if (pci_dma_mapping_error(dev->pci_dev, dev->phys_dma_addr))
+ bounce = 1;
+ }
+
+ if (bounce) {
+ dbg_verbose("dma: using bounce buffer");
+ dev->phys_dma_addr = dev->phys_bounce_buffer;
+ if (!do_read)
+ memcpy(dev->bounce_buffer, buf, R852_DMA_LEN);
+ }
+
+ /* Enable DMA */
+ spin_lock_irqsave(&dev->irqlock, flags);
+ r852_dma_enable(dev);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+
+ /* Wait till complete */
+ error = r852_dma_wait(dev);
+
+ if (error) {
+ r852_dma_done(dev, error);
+ return;
+ }
+
+ if (do_read && bounce)
+ memcpy((void *)buf, dev->bounce_buffer, R852_DMA_LEN);
+}
+
+/*
+ * Program data lines of the nand chip to send data to it
+ */
+void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+ uint32_t reg;
+
+ /* Don't allow any access to hardware if we suspect card removal */
+ if (dev->card_unstable)
+ return;
+
+ /* Special case for whole sector read */
+ if (len == R852_DMA_LEN && dev->dma_usable) {
+ r852_do_dma(dev, (uint8_t *)buf, 0);
+ return;
+ }
+
+ /* write DWORD chinks - faster */
+ while (len) {
+ reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24;
+ r852_write_reg_dword(dev, R852_DATALINE, reg);
+ buf += 4;
+ len -= 4;
+
+ }
+
+ /* write rest */
+ while (len)
+ r852_write_reg(dev, R852_DATALINE, *buf++);
+}
+
+/*
+ * Read data lines of the nand chip to retrieve data
+ */
+void r852_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+ uint32_t reg;
+
+ if (dev->card_unstable) {
+ /* since we can't signal error here, at least, return
+ predictable buffer */
+ memset(buf, 0, len);
+ return;
+ }
+
+ /* special case for whole sector read */
+ if (len == R852_DMA_LEN && dev->dma_usable) {
+ r852_do_dma(dev, buf, 1);
+ return;
+ }
+
+ /* read in dword sized chunks */
+ while (len >= 4) {
+
+ reg = r852_read_reg_dword(dev, R852_DATALINE);
+ *buf++ = reg & 0xFF;
+ *buf++ = (reg >> 8) & 0xFF;
+ *buf++ = (reg >> 16) & 0xFF;
+ *buf++ = (reg >> 24) & 0xFF;
+ len -= 4;
+ }
+
+ /* read the reset by bytes */
+ while (len--)
+ *buf++ = r852_read_reg(dev, R852_DATALINE);
+}
+
+/*
+ * Read one byte from nand chip
+ */
+static uint8_t r852_read_byte(struct mtd_info *mtd)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ /* Same problem as in r852_read_buf.... */
+ if (dev->card_unstable)
+ return 0;
+
+ return r852_read_reg(dev, R852_DATALINE);
+}
+
+
+/*
+ * Readback the buffer to verify it
+ */
+int r852_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ /* We can't be sure about anything here... */
+ if (dev->card_unstable)
+ return -1;
+
+ /* This will never happen, unless you wired up a nand chip
+ with > 512 bytes page size to the reader */
+ if (len > SM_SECTOR_SIZE)
+ return 0;
+
+ r852_read_buf(mtd, dev->tmp_buffer, len);
+ return memcmp(buf, dev->tmp_buffer, len);
+}
+
+/*
+ * Control several chip lines & send commands
+ */
+void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ if (dev->card_unstable)
+ return;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+
+ dev->ctlreg &= ~(R852_CTL_DATA | R852_CTL_COMMAND |
+ R852_CTL_ON | R852_CTL_CARDENABLE);
+
+ if (ctrl & NAND_ALE)
+ dev->ctlreg |= R852_CTL_DATA;
+
+ if (ctrl & NAND_CLE)
+ dev->ctlreg |= R852_CTL_COMMAND;
+
+ if (ctrl & NAND_NCE)
+ dev->ctlreg |= (R852_CTL_CARDENABLE | R852_CTL_ON);
+ else
+ dev->ctlreg &= ~R852_CTL_WRITE;
+
+ /* when write is stareted, enable write access */
+ if (dat == NAND_CMD_ERASE1)
+ dev->ctlreg |= R852_CTL_WRITE;
+
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ }
+
+ /* HACK: NAND_CMD_SEQIN is called without NAND_CTRL_CHANGE, but we need
+ to set write mode */
+ if (dat == NAND_CMD_SEQIN && (dev->ctlreg & R852_CTL_COMMAND)) {
+ dev->ctlreg |= R852_CTL_WRITE;
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ }
+
+ if (dat != NAND_CMD_NONE)
+ r852_write_reg(dev, R852_DATALINE, dat);
+}
+
+/*
+ * Wait till card is ready.
+ * based on nand_wait, but returns errors on DMA error
+ */
+int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct r852_device *dev = (struct r852_device *)chip->priv;
+
+ unsigned long timeout;
+ int status;
+
+ timeout = jiffies + (chip->state == FL_ERASING ?
+ msecs_to_jiffies(400) : msecs_to_jiffies(20));
+
+ while (time_before(jiffies, timeout))
+ if (chip->dev_ready(mtd))
+ break;
+
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ status = (int)chip->read_byte(mtd);
+
+ /* Unfortunelly, no way to send detailed error status... */
+ if (dev->dma_error) {
+ status |= NAND_STATUS_FAIL;
+ dev->dma_error = 0;
+ }
+ return status;
+}
+
+/*
+ * Check if card is ready
+ */
+
+int r852_ready(struct mtd_info *mtd)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+ return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY);
+}
+
+
+/*
+ * Set ECC engine mode
+*/
+
+void r852_ecc_hwctl(struct mtd_info *mtd, int mode)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ if (dev->card_unstable)
+ return;
+
+ switch (mode) {
+ case NAND_ECC_READ:
+ case NAND_ECC_WRITE:
+ /* enable ecc generation/check*/
+ dev->ctlreg |= R852_CTL_ECC_ENABLE;
+
+ /* flush ecc buffer */
+ r852_write_reg(dev, R852_CTL,
+ dev->ctlreg | R852_CTL_ECC_ACCESS);
+
+ r852_read_reg_dword(dev, R852_DATALINE);
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ return;
+
+ case NAND_ECC_READSYN:
+ /* disable ecc generation */
+ dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ }
+}
+
+/*
+ * Calculate ECC, only used for writes
+ */
+
+int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
+ uint8_t *ecc_code)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+ struct sm_oob *oob = (struct sm_oob *)ecc_code;
+ uint32_t ecc1, ecc2;
+
+ if (dev->card_unstable)
+ return 0;
+
+ dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
+ r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
+
+ ecc1 = r852_read_reg_dword(dev, R852_DATALINE);
+ ecc2 = r852_read_reg_dword(dev, R852_DATALINE);
+
+ oob->ecc1[0] = (ecc1) & 0xFF;
+ oob->ecc1[1] = (ecc1 >> 8) & 0xFF;
+ oob->ecc1[2] = (ecc1 >> 16) & 0xFF;
+
+ oob->ecc2[0] = (ecc2) & 0xFF;
+ oob->ecc2[1] = (ecc2 >> 8) & 0xFF;
+ oob->ecc2[2] = (ecc2 >> 16) & 0xFF;
+
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ return 0;
+}
+
+/*
+ * Correct the data using ECC, hw did almost everything for us
+ */
+
+int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat,
+ uint8_t *read_ecc, uint8_t *calc_ecc)
+{
+ uint16_t ecc_reg;
+ uint8_t ecc_status, err_byte;
+ int i, error = 0;
+
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ if (dev->card_unstable)
+ return 0;
+
+ r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
+ ecc_reg = r852_read_reg_dword(dev, R852_DATALINE);
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+
+ for (i = 0 ; i <= 1 ; i++) {
+
+ ecc_status = (ecc_reg >> 8) & 0xFF;
+
+ /* ecc uncorrectable error */
+ if (ecc_status & R852_ECC_FAIL) {
+ dbg("ecc: unrecoverable error, in half %d", i);
+ error = -1;
+ goto exit;
+ }
+
+ /* correctable error */
+ if (ecc_status & R852_ECC_CORRECTABLE) {
+
+ err_byte = ecc_reg & 0xFF;
+ dbg("ecc: recoverable error, "
+ "in half %d, byte %d, bit %d", i,
+ err_byte, ecc_status & R852_ECC_ERR_BIT_MSK);
+
+ dat[err_byte] ^=
+ 1 << (ecc_status & R852_ECC_ERR_BIT_MSK);
+ error++;
+ }
+
+ dat += 256;
+ ecc_reg >>= 16;
+ }
+exit:
+ return error;
+}
+
+/*
+ * This is copy of nand_read_oob_std
+ * nand_read_oob_syndrome assumes we can send column address - we can't
+ */
+static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page, int sndcmd)
+{
+ if (sndcmd) {
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ sndcmd = 0;
+ }
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return sndcmd;
+}
+
+/*
+ * Start the nand engine
+ */
+
+void r852_engine_enable(struct r852_device *dev)
+{
+ if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) {
+ r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
+ r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
+ } else {
+ r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
+ r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
+ }
+ msleep(300);
+ r852_write_reg(dev, R852_CTL, 0);
+}
+
+
+/*
+ * Stop the nand engine
+ */
+
+void r852_engine_disable(struct r852_device *dev)
+{
+ r852_write_reg_dword(dev, R852_HW, 0);
+ r852_write_reg(dev, R852_CTL, R852_CTL_RESET);
+}
+
+/*
+ * Test if card is present
+ */
+
+void r852_card_update_present(struct r852_device *dev)
+{
+ unsigned long flags;
+ uint8_t reg;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ reg = r852_read_reg(dev, R852_CARD_STA);
+ dev->card_detected = !!(reg & R852_CARD_STA_PRESENT);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+}
+
+/*
+ * Update card detection IRQ state according to current card state
+ * which is read in r852_card_update_present
+ */
+void r852_update_card_detect(struct r852_device *dev)
+{
+ int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
+ dev->card_unstable = 0;
+
+ card_detect_reg &= ~(R852_CARD_IRQ_REMOVE | R852_CARD_IRQ_INSERT);
+ card_detect_reg |= R852_CARD_IRQ_GENABLE;
+
+ card_detect_reg |= dev->card_detected ?
+ R852_CARD_IRQ_REMOVE : R852_CARD_IRQ_INSERT;
+
+ r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg);
+}
+
+ssize_t r852_media_type_show(struct device *sys_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev);
+ struct r852_device *dev = r852_get_dev(mtd);
+ char *data = dev->sm ? "smartmedia" : "xd";
+
+ strcpy(buf, data);
+ return strlen(data);
+}
+
+DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL);
+
+
+/* Detect properties of card in slot */
+void r852_update_media_status(struct r852_device *dev)
+{
+ uint8_t reg;
+ unsigned long flags;
+ int readonly;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ if (!dev->card_detected) {
+ message("card removed");
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return ;
+ }
+
+ readonly = r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_RO;
+ reg = r852_read_reg(dev, R852_DMA_CAP);
+ dev->sm = (reg & (R852_DMA1 | R852_DMA2)) && (reg & R852_SMBIT);
+
+ message("detected %s %s card in slot",
+ dev->sm ? "SmartMedia" : "xD",
+ readonly ? "readonly" : "writeable");
+
+ dev->readonly = readonly;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+}
+
+/*
+ * Register the nand device
+ * Called when the card is detected
+ */
+int r852_register_nand_device(struct r852_device *dev)
+{
+ dev->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
+
+ if (!dev->mtd)
+ goto error1;
+
+ WARN_ON(dev->card_registred);
+
+ dev->mtd->owner = THIS_MODULE;
+ dev->mtd->priv = dev->chip;
+ dev->mtd->dev.parent = &dev->pci_dev->dev;
+
+ if (dev->readonly)
+ dev->chip->options |= NAND_ROM;
+
+ r852_engine_enable(dev);
+
+ if (sm_register_device(dev->mtd, dev->sm))
+ goto error2;
+
+ if (device_create_file(&dev->mtd->dev, &dev_attr_media_type))
+ message("can't create media type sysfs attribute");
+
+ dev->card_registred = 1;
+ return 0;
+error2:
+ kfree(dev->mtd);
+error1:
+ /* Force card redetect */
+ dev->card_detected = 0;
+ return -1;
+}
+
+/*
+ * Unregister the card
+ */
+
+void r852_unregister_nand_device(struct r852_device *dev)
+{
+ if (!dev->card_registred)
+ return;
+
+ device_remove_file(&dev->mtd->dev, &dev_attr_media_type);
+ nand_release(dev->mtd);
+ r852_engine_disable(dev);
+ dev->card_registred = 0;
+ kfree(dev->mtd);
+ dev->mtd = NULL;
+}
+
+/* Card state updater */
+void r852_card_detect_work(struct work_struct *work)
+{
+ struct r852_device *dev =
+ container_of(work, struct r852_device, card_detect_work.work);
+
+ r852_card_update_present(dev);
+ dev->card_unstable = 0;
+
+ /* False alarm */
+ if (dev->card_detected == dev->card_registred)
+ goto exit;
+
+ /* Read media properties */
+ r852_update_media_status(dev);
+
+ /* Register the card */
+ if (dev->card_detected)
+ r852_register_nand_device(dev);
+ else
+ r852_unregister_nand_device(dev);
+exit:
+ /* Update detection logic */
+ r852_update_card_detect(dev);
+}
+
+/* Ack + disable IRQ generation */
+static void r852_disable_irqs(struct r852_device *dev)
+{
+ uint8_t reg;
+ reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
+ r852_write_reg(dev, R852_CARD_IRQ_ENABLE, reg & ~R852_CARD_IRQ_MASK);
+
+ reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
+ reg & ~R852_DMA_IRQ_MASK);
+
+ r852_write_reg(dev, R852_CARD_IRQ_STA, R852_CARD_IRQ_MASK);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_STA, R852_DMA_IRQ_MASK);
+}
+
+/* Interrupt handler */
+static irqreturn_t r852_irq(int irq, void *data)
+{
+ struct r852_device *dev = (struct r852_device *)data;
+
+ uint8_t card_status, dma_status;
+ unsigned long flags;
+ irqreturn_t ret = IRQ_NONE;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+
+ /* We can recieve shared interrupt while pci is suspended
+ in that case reads will return 0xFFFFFFFF.... */
+ if (dev->insuspend)
+ goto out;
+
+ /* handle card detection interrupts first */
+ card_status = r852_read_reg(dev, R852_CARD_IRQ_STA);
+ r852_write_reg(dev, R852_CARD_IRQ_STA, card_status);
+
+ if (card_status & (R852_CARD_IRQ_INSERT|R852_CARD_IRQ_REMOVE)) {
+
+ ret = IRQ_HANDLED;
+ dev->card_detected = !!(card_status & R852_CARD_IRQ_INSERT);
+
+ /* we shouldn't recieve any interrupts if we wait for card
+ to settle */
+ WARN_ON(dev->card_unstable);
+
+ /* disable irqs while card is unstable */
+ /* this will timeout DMA if active, but better that garbage */
+ r852_disable_irqs(dev);
+
+ if (dev->card_unstable)
+ goto out;
+
+ /* let, card state to settle a bit, and then do the work */
+ dev->card_unstable = 1;
+ queue_delayed_work(dev->card_workqueue,
+ &dev->card_detect_work, msecs_to_jiffies(100));
+ goto out;
+ }
+
+
+ /* Handle dma interrupts */
+ dma_status = r852_read_reg_dword(dev, R852_DMA_IRQ_STA);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_STA, dma_status);
+
+ if (dma_status & R852_DMA_IRQ_MASK) {
+
+ ret = IRQ_HANDLED;
+
+ if (dma_status & R852_DMA_IRQ_ERROR) {
+ dbg("recieved dma error IRQ");
+ r852_dma_done(dev, -EIO);
+ goto out;
+ }
+
+ /* recieved DMA interrupt out of nowhere? */
+ WARN_ON_ONCE(dev->dma_stage == 0);
+
+ if (dev->dma_stage == 0)
+ goto out;
+
+ /* done device access */
+ if (dev->dma_state == DMA_INTERNAL &&
+ (dma_status & R852_DMA_IRQ_INTERNAL)) {
+
+ dev->dma_state = DMA_MEMORY;
+ dev->dma_stage++;
+ }
+
+ /* done memory DMA */
+ if (dev->dma_state == DMA_MEMORY &&
+ (dma_status & R852_DMA_IRQ_MEMORY)) {
+ dev->dma_state = DMA_INTERNAL;
+ dev->dma_stage++;
+ }
+
+ /* Enable 2nd half of dma dance */
+ if (dev->dma_stage == 2)
+ r852_dma_enable(dev);
+
+ /* Operation done */
+ if (dev->dma_stage == 3)
+ r852_dma_done(dev, 0);
+ goto out;
+ }
+
+ /* Handle unknown interrupts */
+ if (dma_status)
+ dbg("bad dma IRQ status = %x", dma_status);
+
+ if (card_status & ~R852_CARD_STA_CD)
+ dbg("strange card status = %x", card_status);
+
+out:
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return ret;
+}
+
+int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
+{
+ int error;
+ struct nand_chip *chip;
+ struct r852_device *dev;
+
+ /* pci initialization */
+ error = pci_enable_device(pci_dev);
+
+ if (error)
+ goto error1;
+
+ pci_set_master(pci_dev);
+
+ error = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+ if (error)
+ goto error2;
+
+ error = pci_request_regions(pci_dev, DRV_NAME);
+
+ if (error)
+ goto error3;
+
+ error = -ENOMEM;
+
+ /* init nand chip, but register it only on card insert */
+ chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
+
+ if (!chip)
+ goto error4;
+
+ /* commands */
+ chip->cmd_ctrl = r852_cmdctl;
+ chip->waitfunc = r852_wait;
+ chip->dev_ready = r852_ready;
+
+ /* I/O */
+ chip->read_byte = r852_read_byte;
+ chip->read_buf = r852_read_buf;
+ chip->write_buf = r852_write_buf;
+ chip->verify_buf = r852_verify_buf;
+
+ /* ecc */
+ chip->ecc.mode = NAND_ECC_HW_SYNDROME;
+ chip->ecc.size = R852_DMA_LEN;
+ chip->ecc.bytes = SM_OOB_SIZE;
+ chip->ecc.hwctl = r852_ecc_hwctl;
+ chip->ecc.calculate = r852_ecc_calculate;
+ chip->ecc.correct = r852_ecc_correct;
+
+ /* TODO: hack */
+ chip->ecc.read_oob = r852_read_oob;
+
+ /* init our device structure */
+ dev = kzalloc(sizeof(struct r852_device), GFP_KERNEL);
+
+ if (!dev)
+ goto error5;
+
+ chip->priv = dev;
+ dev->chip = chip;
+ dev->pci_dev = pci_dev;
+ pci_set_drvdata(pci_dev, dev);
+
+ dev->bounce_buffer = pci_alloc_consistent(pci_dev, R852_DMA_LEN,
+ &dev->phys_bounce_buffer);
+
+ if (!dev->bounce_buffer)
+ goto error6;
+
+
+ error = -ENODEV;
+ dev->mmio = pci_ioremap_bar(pci_dev, 0);
+
+ if (!dev->mmio)
+ goto error7;
+
+ error = -ENOMEM;
+ dev->tmp_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
+
+ if (!dev->tmp_buffer)
+ goto error8;
+
+ init_completion(&dev->dma_done);
+
+ dev->card_workqueue = create_freezeable_workqueue(DRV_NAME);
+
+ if (!dev->card_workqueue)
+ goto error9;
+
+ INIT_DELAYED_WORK(&dev->card_detect_work, r852_card_detect_work);
+
+ /* shutdown everything - precation */
+ r852_engine_disable(dev);
+ r852_disable_irqs(dev);
+
+ r852_dma_test(dev);
+
+ /*register irq handler*/
+ error = -ENODEV;
+ if (request_irq(pci_dev->irq, &r852_irq, IRQF_SHARED,
+ DRV_NAME, dev))
+ goto error10;
+
+ dev->irq = pci_dev->irq;
+ spin_lock_init(&dev->irqlock);
+
+ /* kick initial present test */
+ dev->card_detected = 0;
+ r852_card_update_present(dev);
+ queue_delayed_work(dev->card_workqueue,
+ &dev->card_detect_work, 0);
+
+
+ printk(KERN_NOTICE DRV_NAME ": driver loaded succesfully\n");
+ return 0;
+
+error10:
+ destroy_workqueue(dev->card_workqueue);
+error9:
+ kfree(dev->tmp_buffer);
+error8:
+ pci_iounmap(pci_dev, dev->mmio);
+error7:
+ pci_free_consistent(pci_dev, R852_DMA_LEN,
+ dev->bounce_buffer, dev->phys_bounce_buffer);
+error6:
+ kfree(dev);
+error5:
+ kfree(chip);
+error4:
+ pci_release_regions(pci_dev);
+error3:
+error2:
+ pci_disable_device(pci_dev);
+error1:
+ return error;
+}
+
+void r852_remove(struct pci_dev *pci_dev)
+{
+ struct r852_device *dev = pci_get_drvdata(pci_dev);
+
+ /* Stop detect workqueue -
+ we are going to unregister the device anyway*/
+ cancel_delayed_work_sync(&dev->card_detect_work);
+ destroy_workqueue(dev->card_workqueue);
+
+ /* Unregister the device, this might make more IO */
+ r852_unregister_nand_device(dev);
+
+ /* Stop interrupts */
+ r852_disable_irqs(dev);
+ synchronize_irq(dev->irq);
+ free_irq(dev->irq, dev);
+
+ /* Cleanup */
+ kfree(dev->tmp_buffer);
+ pci_iounmap(pci_dev, dev->mmio);
+ pci_free_consistent(pci_dev, R852_DMA_LEN,
+ dev->bounce_buffer, dev->phys_bounce_buffer);
+
+ kfree(dev->chip);
+ kfree(dev);
+
+ /* Shutdown the PCI device */
+ pci_release_regions(pci_dev);
+ pci_disable_device(pci_dev);
+}
+
+void r852_shutdown(struct pci_dev *pci_dev)
+{
+ struct r852_device *dev = pci_get_drvdata(pci_dev);
+
+ cancel_delayed_work_sync(&dev->card_detect_work);
+ r852_disable_irqs(dev);
+ synchronize_irq(dev->irq);
+ pci_disable_device(pci_dev);
+}
+
+#ifdef CONFIG_PM
+int r852_suspend(struct device *device)
+{
+ struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
+ unsigned long flags;
+
+ if (dev->ctlreg & R852_CTL_CARDENABLE)
+ return -EBUSY;
+
+ /* First make sure the detect work is gone */
+ cancel_delayed_work_sync(&dev->card_detect_work);
+
+ /* Turn off the interrupts and stop the device */
+ r852_disable_irqs(dev);
+ r852_engine_disable(dev);
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ dev->insuspend = 1;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+
+ /* At that point, even if interrupt handler is running, it will quit */
+ /* So wait for this to happen explictly */
+ synchronize_irq(dev->irq);
+
+ /* If card was pulled off just during the suspend, which is very
+ unlikely, we will remove it on resume, it too late now
+ anyway... */
+ dev->card_unstable = 0;
+
+ pci_save_state(to_pci_dev(device));
+ return pci_prepare_to_sleep(to_pci_dev(device));
+}
+
+int r852_resume(struct device *device)
+{
+ struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
+ unsigned long flags;
+
+ /* Turn on the hardware */
+ pci_back_from_sleep(to_pci_dev(device));
+ pci_restore_state(to_pci_dev(device));
+
+ r852_disable_irqs(dev);
+ r852_card_update_present(dev);
+ r852_engine_disable(dev);
+
+
+ /* Now its safe for IRQ to run */
+ spin_lock_irqsave(&dev->irqlock, flags);
+ dev->insuspend = 0;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+
+
+ /* If card status changed, just do the work */
+ if (dev->card_detected != dev->card_registred) {
+ dbg("card was %s during low power state",
+ dev->card_detected ? "added" : "removed");
+
+ queue_delayed_work(dev->card_workqueue,
+ &dev->card_detect_work, 1000);
+ return 0;
+ }
+
+ /* Otherwise, initialize the card */
+ if (dev->card_registred) {
+ r852_engine_enable(dev);
+ dev->chip->select_chip(dev->mtd, 0);
+ dev->chip->cmdfunc(dev->mtd, NAND_CMD_RESET, -1, -1);
+ dev->chip->select_chip(dev->mtd, -1);
+ }
+
+ /* Program card detection IRQ */
+ r852_update_card_detect(dev);
+ return 0;
+}
+#else
+#define r852_suspend NULL
+#define r852_resume NULL
+#endif
+
+static const struct pci_device_id r852_pci_id_tbl[] = {
+
+ { PCI_VDEVICE(RICOH, 0x0852), },
+ { },
+};
+
+MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl);
+
+SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume);
+
+
+static struct pci_driver r852_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = r852_pci_id_tbl,
+ .probe = r852_probe,
+ .remove = r852_remove,
+ .shutdown = r852_shutdown,
+ .driver.pm = &r852_pm_ops,
+};
+
+static __init int r852_module_init(void)
+{
+ return pci_register_driver(&r852_pci_driver);
+}
+
+static void __exit r852_module_exit(void)
+{
+ pci_unregister_driver(&r852_pci_driver);
+}
+
+module_init(r852_module_init);
+module_exit(r852_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
+MODULE_DESCRIPTION("Ricoh 85xx xD/smartmedia card reader driver");
diff --git a/drivers/mtd/nand/r852.h b/drivers/mtd/nand/r852.h
new file mode 100644
index 00000000000..8096cc280c7
--- /dev/null
+++ b/drivers/mtd/nand/r852.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * driver for Ricoh xD readers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/pci.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/mtd/nand.h>
+#include <linux/spinlock.h>
+
+
+/* nand interface + ecc
+ byte write/read does one cycle on nand data lines.
+ dword write/read does 4 cycles
+ if R852_CTL_ECC_ACCESS is set in R852_CTL, then dword read reads
+ results of ecc correction, if DMA read was done before.
+ If write was done two dword reads read generated ecc checksums
+*/
+#define R852_DATALINE 0x00
+
+/* control register */
+#define R852_CTL 0x04
+#define R852_CTL_COMMAND 0x01 /* send command (#CLE)*/
+#define R852_CTL_DATA 0x02 /* read/write data (#ALE)*/
+#define R852_CTL_ON 0x04 /* only seem to controls the hd led, */
+ /* but has to be set on start...*/
+#define R852_CTL_RESET 0x08 /* unknown, set only on start once*/
+#define R852_CTL_CARDENABLE 0x10 /* probably (#CE) - always set*/
+#define R852_CTL_ECC_ENABLE 0x20 /* enable ecc engine */
+#define R852_CTL_ECC_ACCESS 0x40 /* read/write ecc via reg #0*/
+#define R852_CTL_WRITE 0x80 /* set when performing writes (#WP) */
+
+/* card detection status */
+#define R852_CARD_STA 0x05
+
+#define R852_CARD_STA_CD 0x01 /* state of #CD line, same as 0x04 */
+#define R852_CARD_STA_RO 0x02 /* card is readonly */
+#define R852_CARD_STA_PRESENT 0x04 /* card is present (#CD) */
+#define R852_CARD_STA_ABSENT 0x08 /* card is absent */
+#define R852_CARD_STA_BUSY 0x80 /* card is busy - (#R/B) */
+
+/* card detection irq status & enable*/
+#define R852_CARD_IRQ_STA 0x06 /* IRQ status */
+#define R852_CARD_IRQ_ENABLE 0x07 /* IRQ enable */
+
+#define R852_CARD_IRQ_CD 0x01 /* fire when #CD lights, same as 0x04*/
+#define R852_CARD_IRQ_REMOVE 0x04 /* detect card removal */
+#define R852_CARD_IRQ_INSERT 0x08 /* detect card insert */
+#define R852_CARD_IRQ_UNK1 0x10 /* unknown */
+#define R852_CARD_IRQ_GENABLE 0x80 /* general enable */
+#define R852_CARD_IRQ_MASK 0x1D
+
+
+
+/* hardware enable */
+#define R852_HW 0x08
+#define R852_HW_ENABLED 0x01 /* hw enabled */
+#define R852_HW_UNKNOWN 0x80
+
+
+/* dma capabilities */
+#define R852_DMA_CAP 0x09
+#define R852_SMBIT 0x20 /* if set with bit #6 or bit #7, then */
+ /* hw is smartmedia */
+#define R852_DMA1 0x40 /* if set w/bit #7, dma is supported */
+#define R852_DMA2 0x80 /* if set w/bit #6, dma is supported */
+
+
+/* physical DMA address - 32 bit value*/
+#define R852_DMA_ADDR 0x0C
+
+
+/* dma settings */
+#define R852_DMA_SETTINGS 0x10
+#define R852_DMA_MEMORY 0x01 /* (memory <-> internal hw buffer) */
+#define R852_DMA_READ 0x02 /* 0 = write, 1 = read */
+#define R852_DMA_INTERNAL 0x04 /* (internal hw buffer <-> card) */
+
+/* dma IRQ status */
+#define R852_DMA_IRQ_STA 0x14
+
+/* dma IRQ enable */
+#define R852_DMA_IRQ_ENABLE 0x18
+
+#define R852_DMA_IRQ_MEMORY 0x01 /* (memory <-> internal hw buffer) */
+#define R852_DMA_IRQ_ERROR 0x02 /* error did happen */
+#define R852_DMA_IRQ_INTERNAL 0x04 /* (internal hw buffer <-> card) */
+#define R852_DMA_IRQ_MASK 0x07 /* mask of all IRQ bits */
+
+
+/* ECC syndrome format - read from reg #0 will return two copies of these for
+ each half of the page.
+ first byte is error byte location, and second, bit location + flags */
+#define R852_ECC_ERR_BIT_MSK 0x07 /* error bit location */
+#define R852_ECC_CORRECT 0x10 /* no errors - (guessed) */
+#define R852_ECC_CORRECTABLE 0x20 /* correctable error exist */
+#define R852_ECC_FAIL 0x40 /* non correctable error detected */
+
+#define R852_DMA_LEN 512
+
+#define DMA_INTERNAL 0
+#define DMA_MEMORY 1
+
+struct r852_device {
+ void __iomem *mmio; /* mmio */
+ struct mtd_info *mtd; /* mtd backpointer */
+ struct nand_chip *chip; /* nand chip backpointer */
+ struct pci_dev *pci_dev; /* pci backpointer */
+
+ /* dma area */
+ dma_addr_t phys_dma_addr; /* bus address of buffer*/
+ struct completion dma_done; /* data transfer done */
+
+ dma_addr_t phys_bounce_buffer; /* bus address of bounce buffer */
+ uint8_t *bounce_buffer; /* virtual address of bounce buffer */
+
+ int dma_dir; /* 1 = read, 0 = write */
+ int dma_stage; /* 0 - idle, 1 - first step,
+ 2 - second step */
+
+ int dma_state; /* 0 = internal, 1 = memory */
+ int dma_error; /* dma errors */
+ int dma_usable; /* is it possible to use dma */
+
+ /* card status area */
+ struct delayed_work card_detect_work;
+ struct workqueue_struct *card_workqueue;
+ int card_registred; /* card registered with mtd */
+ int card_detected; /* card detected in slot */
+ int card_unstable; /* whenever the card is inserted,
+ is not known yet */
+ int readonly; /* card is readonly */
+ int sm; /* Is card smartmedia */
+
+ /* interrupt handling */
+ spinlock_t irqlock; /* IRQ protecting lock */
+ int irq; /* irq num */
+ int insuspend; /* device is suspended */
+
+ /* misc */
+ void *tmp_buffer; /* temporary buffer */
+ uint8_t ctlreg; /* cached contents of control reg */
+};
+
+#define DRV_NAME "r852"
+
+
+#define dbg(format, ...) \
+ if (debug) \
+ printk(KERN_DEBUG DRV_NAME ": " format "\n", ## __VA_ARGS__)
+
+#define dbg_verbose(format, ...) \
+ if (debug > 1) \
+ printk(KERN_DEBUG DRV_NAME ": " format "\n", ## __VA_ARGS__)
+
+
+#define message(format, ...) \
+ printk(KERN_INFO DRV_NAME ": " format "\n", ## __VA_ARGS__)
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index fa6e9c7fe51..239aadfd01b 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -929,14 +929,13 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
pr_debug("s3c2410_nand_probe(%p)\n", pdev);
- info = kmalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
if (info == NULL) {
dev_err(&pdev->dev, "no memory for flash info\n");
err = -ENOMEM;
goto exit_error;
}
- memset(info, 0, sizeof(*info));
platform_set_drvdata(pdev, info);
spin_lock_init(&info->controller.lock);
@@ -957,7 +956,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
/* currently we assume we have the one resource */
res = pdev->resource;
- size = res->end - res->start + 1;
+ size = resource_size(res);
info->area = request_mem_region(res->start, size, pdev->name);
@@ -994,15 +993,13 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
/* allocate our information */
size = nr_sets * sizeof(*info->mtds);
- info->mtds = kmalloc(size, GFP_KERNEL);
+ info->mtds = kzalloc(size, GFP_KERNEL);
if (info->mtds == NULL) {
dev_err(&pdev->dev, "failed to allocate mtd storage\n");
err = -ENOMEM;
goto exit_error;
}
- memset(info->mtds, 0, size);
-
/* initialise all possible chips */
nmtd = info->mtds;
@@ -1013,7 +1010,8 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
s3c2410_nand_init_chip(info, nmtd, sets);
nmtd->scan_res = nand_scan_ident(&nmtd->mtd,
- (sets) ? sets->nr_chips : 1);
+ (sets) ? sets->nr_chips : 1,
+ NULL);
if (nmtd->scan_res == 0) {
s3c2410_nand_update_chip(info, nmtd);
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 34752fce079..546c2f0eb2e 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -855,7 +855,7 @@ static int __devinit flctl_probe(struct platform_device *pdev)
nand->read_word = flctl_read_word;
}
- ret = nand_scan_ident(flctl_mtd, 1);
+ ret = nand_scan_ident(flctl_mtd, 1, NULL);
if (ret)
goto err;
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
new file mode 100644
index 00000000000..ac80fb362e6
--- /dev/null
+++ b/drivers/mtd/nand/sm_common.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * Common routines & support for xD format
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/mtd/nand.h>
+#include "sm_common.h"
+
+static struct nand_ecclayout nand_oob_sm = {
+ .eccbytes = 6,
+ .eccpos = {8, 9, 10, 13, 14, 15},
+ .oobfree = {
+ {.offset = 0 , .length = 4}, /* reserved */
+ {.offset = 6 , .length = 2}, /* LBA1 */
+ {.offset = 11, .length = 2} /* LBA2 */
+ }
+};
+
+/* NOTE: This layout is is not compatabable with SmartMedia, */
+/* because the 256 byte devices have page depenent oob layout */
+/* However it does preserve the bad block markers */
+/* If you use smftl, it will bypass this and work correctly */
+/* If you not, then you break SmartMedia compliance anyway */
+
+static struct nand_ecclayout nand_oob_sm_small = {
+ .eccbytes = 3,
+ .eccpos = {0, 1, 2},
+ .oobfree = {
+ {.offset = 3 , .length = 2}, /* reserved */
+ {.offset = 6 , .length = 2}, /* LBA1 */
+ }
+};
+
+
+static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_oob_ops ops;
+ struct sm_oob oob;
+ int ret, error = 0;
+
+ memset(&oob, -1, SM_OOB_SIZE);
+ oob.block_status = 0x0F;
+
+ /* As long as this function is called on erase block boundaries
+ it will work correctly for 256 byte nand */
+ ops.mode = MTD_OOB_PLACE;
+ ops.ooboffs = 0;
+ ops.ooblen = mtd->oobsize;
+ ops.oobbuf = (void *)&oob;
+ ops.datbuf = NULL;
+
+
+ ret = mtd->write_oob(mtd, ofs, &ops);
+ if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) {
+ printk(KERN_NOTICE
+ "sm_common: can't mark sector at %i as bad\n",
+ (int)ofs);
+ error = -EIO;
+ } else
+ mtd->ecc_stats.badblocks++;
+
+ return error;
+}
+
+
+static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
+ {"SmartMedia 1MiB 5V", 0x6e, 256, 1, 0x1000, 0},
+ {"SmartMedia 1MiB 3,3V", 0xe8, 256, 1, 0x1000, 0},
+ {"SmartMedia 1MiB 3,3V", 0xec, 256, 1, 0x1000, 0},
+ {"SmartMedia 2MiB 3,3V", 0xea, 256, 2, 0x1000, 0},
+ {"SmartMedia 2MiB 5V", 0x64, 256, 2, 0x1000, 0},
+ {"SmartMedia 2MiB 3,3V ROM", 0x5d, 512, 2, 0x2000, NAND_ROM},
+ {"SmartMedia 4MiB 3,3V", 0xe3, 512, 4, 0x2000, 0},
+ {"SmartMedia 4MiB 3,3/5V", 0xe5, 512, 4, 0x2000, 0},
+ {"SmartMedia 4MiB 5V", 0x6b, 512, 4, 0x2000, 0},
+ {"SmartMedia 4MiB 3,3V ROM", 0xd5, 512, 4, 0x2000, NAND_ROM},
+ {"SmartMedia 8MiB 3,3V", 0xe6, 512, 8, 0x2000, 0},
+ {"SmartMedia 8MiB 3,3V ROM", 0xd6, 512, 8, 0x2000, NAND_ROM},
+ {"SmartMedia 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0},
+ {"SmartMedia 16MiB 3,3V ROM", 0x57, 512, 16, 0x4000, NAND_ROM},
+ {"SmartMedia 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0},
+ {"SmartMedia 32MiB 3,3V ROM", 0x58, 512, 32, 0x4000, NAND_ROM},
+ {"SmartMedia 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0},
+ {"SmartMedia 64MiB 3,3V ROM", 0xd9, 512, 64, 0x4000, NAND_ROM},
+ {"SmartMedia 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0},
+ {"SmartMedia 128MiB 3,3V ROM", 0xda, 512, 128, 0x4000, NAND_ROM},
+ {"SmartMedia 256MiB 3,3V", 0x71, 512, 256, 0x4000 },
+ {"SmartMedia 256MiB 3,3V ROM", 0x5b, 512, 256, 0x4000, NAND_ROM},
+ {NULL,}
+};
+
+#define XD_TYPEM (NAND_NO_AUTOINCR | NAND_BROKEN_XD)
+static struct nand_flash_dev nand_xd_flash_ids[] = {
+
+ {"xD 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0},
+ {"xD 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0},
+ {"xD 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0},
+ {"xD 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0},
+ {"xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, XD_TYPEM},
+ {"xD 512MiB 3,3V", 0xdc, 512, 512, 0x4000, XD_TYPEM},
+ {"xD 1GiB 3,3V", 0xd3, 512, 1024, 0x4000, XD_TYPEM},
+ {"xD 2GiB 3,3V", 0xd5, 512, 2048, 0x4000, XD_TYPEM},
+ {NULL,}
+};
+
+int sm_register_device(struct mtd_info *mtd, int smartmedia)
+{
+ struct nand_chip *chip = (struct nand_chip *)mtd->priv;
+ int ret;
+
+ chip->options |= NAND_SKIP_BBTSCAN;
+
+ /* Scan for card properties */
+ ret = nand_scan_ident(mtd, 1, smartmedia ?
+ nand_smartmedia_flash_ids : nand_xd_flash_ids);
+
+ if (ret)
+ return ret;
+
+ /* Bad block marker postion */
+ chip->badblockpos = 0x05;
+ chip->badblockbits = 7;
+ chip->block_markbad = sm_block_markbad;
+
+ /* ECC layout */
+ if (mtd->writesize == SM_SECTOR_SIZE)
+ chip->ecc.layout = &nand_oob_sm;
+ else if (mtd->writesize == SM_SMALL_PAGE)
+ chip->ecc.layout = &nand_oob_sm_small;
+ else
+ return -ENODEV;
+
+ ret = nand_scan_tail(mtd);
+
+ if (ret)
+ return ret;
+
+ return add_mtd_device(mtd);
+}
+EXPORT_SYMBOL_GPL(sm_register_device);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
+MODULE_DESCRIPTION("Common SmartMedia/xD functions");
diff --git a/drivers/mtd/nand/sm_common.h b/drivers/mtd/nand/sm_common.h
new file mode 100644
index 00000000000..00f4a83359b
--- /dev/null
+++ b/drivers/mtd/nand/sm_common.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * Common routines & support for SmartMedia/xD format
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/bitops.h>
+#include <linux/mtd/mtd.h>
+
+/* Full oob structure as written on the flash */
+struct sm_oob {
+ uint32_t reserved;
+ uint8_t data_status;
+ uint8_t block_status;
+ uint8_t lba_copy1[2];
+ uint8_t ecc2[3];
+ uint8_t lba_copy2[2];
+ uint8_t ecc1[3];
+} __attribute__((packed));
+
+
+/* one sector is always 512 bytes, but it can consist of two nand pages */
+#define SM_SECTOR_SIZE 512
+
+/* oob area is also 16 bytes, but might be from two pages */
+#define SM_OOB_SIZE 16
+
+/* This is maximum zone size, and all devices that have more that one zone
+ have this size */
+#define SM_MAX_ZONE_SIZE 1024
+
+/* support for small page nand */
+#define SM_SMALL_PAGE 256
+#define SM_SMALL_OOB_SIZE 8
+
+
+extern int sm_register_device(struct mtd_info *mtd, int smartmedia);
+
+
+static inline int sm_sector_valid(struct sm_oob *oob)
+{
+ return hweight16(oob->data_status) >= 5;
+}
+
+static inline int sm_block_valid(struct sm_oob *oob)
+{
+ return hweight16(oob->block_status) >= 7;
+}
+
+static inline int sm_block_erased(struct sm_oob *oob)
+{
+ static const uint32_t erased_pattern[4] = {
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
+
+ /* First test for erased block */
+ if (!memcmp(oob, erased_pattern, sizeof(*oob)))
+ return 1;
+ return 0;
+}
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index a4519a7bd68..884852dc7eb 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -220,7 +220,7 @@ static int __devinit socrates_nand_probe(struct of_device *ofdev,
dev_set_drvdata(&ofdev->dev, host);
/* first scan to find the device and get the page size */
- if (nand_scan_ident(mtd, 1)) {
+ if (nand_scan_ident(mtd, 1, NULL)) {
res = -ENXIO;
goto out;
}
@@ -290,7 +290,7 @@ static int __devexit socrates_nand_remove(struct of_device *ofdev)
return 0;
}
-static struct of_device_id socrates_nand_match[] =
+static const struct of_device_id socrates_nand_match[] =
{
{
.compatible = "abb,socrates-nand",
@@ -301,8 +301,11 @@ static struct of_device_id socrates_nand_match[] =
MODULE_DEVICE_TABLE(of, socrates_nand_match);
static struct of_platform_driver socrates_nand_driver = {
- .name = "socrates_nand",
- .match_table = socrates_nand_match,
+ .driver = {
+ .name = "socrates_nand",
+ .owner = THIS_MODULE,
+ .of_match_table = socrates_nand_match,
+ },
.probe = socrates_nand_probe,
.remove = __devexit_p(socrates_nand_remove),
};
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index fa28f01ae00..3041d1f7ae3 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -319,7 +319,7 @@ static int tmio_nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ struct mfd_cell *cell = dev_get_platdata(&dev->dev);
int ret;
if (cell->enable) {
@@ -363,7 +363,7 @@ static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ struct mfd_cell *cell = dev_get_platdata(&dev->dev);
tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE);
if (cell->disable)
@@ -372,7 +372,7 @@ static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
static int tmio_probe(struct platform_device *dev)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ struct mfd_cell *cell = dev_get_platdata(&dev->dev);
struct tmio_nand_data *data = cell->driver_data;
struct resource *fcr = platform_get_resource(dev,
IORESOURCE_MEM, 0);
@@ -405,14 +405,14 @@ static int tmio_probe(struct platform_device *dev)
mtd->priv = nand_chip;
mtd->name = "tmio-nand";
- tmio->ccr = ioremap(ccr->start, ccr->end - ccr->start + 1);
+ tmio->ccr = ioremap(ccr->start, resource_size(ccr));
if (!tmio->ccr) {
retval = -EIO;
goto err_iomap_ccr;
}
tmio->fcr_base = fcr->start & 0xfffff;
- tmio->fcr = ioremap(fcr->start, fcr->end - fcr->start + 1);
+ tmio->fcr = ioremap(fcr->start, resource_size(fcr));
if (!tmio->fcr) {
retval = -EIO;
goto err_iomap_fcr;
@@ -516,7 +516,7 @@ static int tmio_remove(struct platform_device *dev)
#ifdef CONFIG_PM
static int tmio_suspend(struct platform_device *dev, pm_message_t state)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ struct mfd_cell *cell = dev_get_platdata(&dev->dev);
if (cell->suspend)
cell->suspend(dev);
@@ -527,7 +527,7 @@ static int tmio_suspend(struct platform_device *dev, pm_message_t state)
static int tmio_resume(struct platform_device *dev)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ struct mfd_cell *cell = dev_get_platdata(&dev->dev);
/* FIXME - is this required or merely another attack of the broken
* SHARP platform? Looks suspicious.
diff --git a/drivers/mtd/nand/ts7250.c b/drivers/mtd/nand/ts7250.c
deleted file mode 100644
index 0f5562aeedc..00000000000
--- a/drivers/mtd/nand/ts7250.c
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * drivers/mtd/nand/ts7250.c
- *
- * Copyright (C) 2004 Technologic Systems (support@embeddedARM.com)
- *
- * Derived from drivers/mtd/nand/edb7312.c
- * Copyright (C) 2004 Marius Gröger (mag@sysgo.de)
- *
- * Derived from drivers/mtd/nand/autcpu12.c
- * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Overview:
- * This is a device driver for the NAND flash device found on the
- * TS-7250 board which utilizes a Samsung 32 Mbyte part.
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <mach/ts72xx.h>
-
-#include <asm/sizes.h>
-#include <asm/mach-types.h>
-
-/*
- * MTD structure for TS7250 board
- */
-static struct mtd_info *ts7250_mtd = NULL;
-
-#ifdef CONFIG_MTD_PARTITIONS
-static const char *part_probes[] = { "cmdlinepart", NULL };
-
-#define NUM_PARTITIONS 3
-
-/*
- * Define static partitions for flash device
- */
-static struct mtd_partition partition_info32[] = {
- {
- .name = "TS-BOOTROM",
- .offset = 0x00000000,
- .size = 0x00004000,
- }, {
- .name = "Linux",
- .offset = 0x00004000,
- .size = 0x01d00000,
- }, {
- .name = "RedBoot",
- .offset = 0x01d04000,
- .size = 0x002fc000,
- },
-};
-
-/*
- * Define static partitions for flash device
- */
-static struct mtd_partition partition_info128[] = {
- {
- .name = "TS-BOOTROM",
- .offset = 0x00000000,
- .size = 0x00004000,
- }, {
- .name = "Linux",
- .offset = 0x00004000,
- .size = 0x07d00000,
- }, {
- .name = "RedBoot",
- .offset = 0x07d04000,
- .size = 0x002fc000,
- },
-};
-#endif
-
-
-/*
- * hardware specific access to control-lines
- *
- * ctrl:
- * NAND_NCE: bit 0 -> bit 2
- * NAND_CLE: bit 1 -> bit 1
- * NAND_ALE: bit 2 -> bit 0
- */
-static void ts7250_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
-{
- struct nand_chip *chip = mtd->priv;
-
- if (ctrl & NAND_CTRL_CHANGE) {
- unsigned long addr = TS72XX_NAND_CONTROL_VIRT_BASE;
- unsigned char bits;
-
- bits = (ctrl & NAND_NCE) << 2;
- bits |= ctrl & NAND_CLE;
- bits |= (ctrl & NAND_ALE) >> 2;
-
- __raw_writeb((__raw_readb(addr) & ~0x7) | bits, addr);
- }
-
- if (cmd != NAND_CMD_NONE)
- writeb(cmd, chip->IO_ADDR_W);
-}
-
-/*
- * read device ready pin
- */
-static int ts7250_device_ready(struct mtd_info *mtd)
-{
- return __raw_readb(TS72XX_NAND_BUSY_VIRT_BASE) & 0x20;
-}
-
-/*
- * Main initialization routine
- */
-static int __init ts7250_init(void)
-{
- struct nand_chip *this;
- const char *part_type = 0;
- int mtd_parts_nb = 0;
- struct mtd_partition *mtd_parts = 0;
-
- if (!machine_is_ts72xx() || board_is_ts7200())
- return -ENXIO;
-
- /* Allocate memory for MTD device structure and private data */
- ts7250_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
- if (!ts7250_mtd) {
- printk("Unable to allocate TS7250 NAND MTD device structure.\n");
- return -ENOMEM;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *)(&ts7250_mtd[1]);
-
- /* Initialize structures */
- memset(ts7250_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- ts7250_mtd->priv = this;
- ts7250_mtd->owner = THIS_MODULE;
-
- /* insert callbacks */
- this->IO_ADDR_R = (void *)TS72XX_NAND_DATA_VIRT_BASE;
- this->IO_ADDR_W = (void *)TS72XX_NAND_DATA_VIRT_BASE;
- this->cmd_ctrl = ts7250_hwcontrol;
- this->dev_ready = ts7250_device_ready;
- this->chip_delay = 15;
- this->ecc.mode = NAND_ECC_SOFT;
-
- printk("Searching for NAND flash...\n");
- /* Scan to find existence of the device */
- if (nand_scan(ts7250_mtd, 1)) {
- kfree(ts7250_mtd);
- return -ENXIO;
- }
-#ifdef CONFIG_MTD_PARTITIONS
- ts7250_mtd->name = "ts7250-nand";
- mtd_parts_nb = parse_mtd_partitions(ts7250_mtd, part_probes, &mtd_parts, 0);
- if (mtd_parts_nb > 0)
- part_type = "command line";
- else
- mtd_parts_nb = 0;
-#endif
- if (mtd_parts_nb == 0) {
- mtd_parts = partition_info32;
- if (ts7250_mtd->size >= (128 * 0x100000))
- mtd_parts = partition_info128;
- mtd_parts_nb = NUM_PARTITIONS;
- part_type = "static";
- }
-
- /* Register the partitions */
- printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- add_mtd_partitions(ts7250_mtd, mtd_parts, mtd_parts_nb);
-
- /* Return happy */
- return 0;
-}
-
-module_init(ts7250_init);
-
-/*
- * Clean up routine
- */
-static void __exit ts7250_cleanup(void)
-{
- /* Unregister the device */
- del_mtd_device(ts7250_mtd);
-
- /* Free the MTD device structure */
- kfree(ts7250_mtd);
-}
-
-module_exit(ts7250_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jesse Off <joff@embeddedARM.com>");
-MODULE_DESCRIPTION("MTD map driver for Technologic Systems TS-7250 board");
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 863513c3b69..054a41c0ef4 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -274,7 +274,7 @@ static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
struct nand_chip *chip = mtd->priv;
int ret;
- ret = nand_scan_ident(mtd, 1);
+ ret = nand_scan_ident(mtd, 1, NULL);
if (!ret) {
if (mtd->writesize >= 512) {
chip->ecc.size = mtd->writesize;
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index 1002e188299..a4578bf903a 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -126,7 +126,6 @@ static void nftl_remove_dev(struct mtd_blktrans_dev *dev)
del_mtd_blktrans_dev(dev);
kfree(nftl->ReplUnitTable);
kfree(nftl->EUNtable);
- kfree(nftl);
}
/*
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index 3a9f1578460..9a49d68ba5f 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -30,6 +30,13 @@ config MTD_ONENAND_OMAP2
Support for a OneNAND flash device connected to an OMAP2/OMAP3 CPU
via the GPMC memory controller.
+config MTD_ONENAND_SAMSUNG
+ tristate "OneNAND on Samsung SOC controller support"
+ depends on MTD_ONENAND && (ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210)
+ help
+ Support for a OneNAND flash device connected to an Samsung SOC
+ S3C64XX/S5PC1XX controller.
+
config MTD_ONENAND_OTP
bool "OneNAND OTP Support"
select HAVE_MTD_OTP
diff --git a/drivers/mtd/onenand/Makefile b/drivers/mtd/onenand/Makefile
index 64b6cc61a52..2b7884c7577 100644
--- a/drivers/mtd/onenand/Makefile
+++ b/drivers/mtd/onenand/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_MTD_ONENAND) += onenand.o
# Board specific.
obj-$(CONFIG_MTD_ONENAND_GENERIC) += generic.o
obj-$(CONFIG_MTD_ONENAND_OMAP2) += omap2.o
+obj-$(CONFIG_MTD_ONENAND_SAMSUNG) += samsung.o
# Simulator
obj-$(CONFIG_MTD_ONENAND_SIM) += onenand_sim.o
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index fd406348fdf..9f322f1a7f2 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -309,7 +309,7 @@ static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
goto out_copy;
/* panic_write() may be in an interrupt context */
- if (in_interrupt())
+ if (in_interrupt() || oops_in_progress)
goto out_copy;
if (buf >= high_memory) {
@@ -386,7 +386,7 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
goto out_copy;
/* panic_write() may be in an interrupt context */
- if (in_interrupt())
+ if (in_interrupt() || oops_in_progress)
goto out_copy;
if (buf >= high_memory) {
@@ -403,7 +403,7 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
dma_dst = c->phys_base + bram_offset;
- if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
+ if (dma_mapping_error(&c->pdev->dev, dma_src)) {
dev_err(&c->pdev->dev,
"Couldn't DMA map a %d byte buffer\n",
count);
@@ -426,7 +426,7 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
if (*done)
break;
- dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
+ dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
if (!*done) {
dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
@@ -521,7 +521,7 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
DMA_TO_DEVICE);
dma_dst = c->phys_base + bram_offset;
- if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
+ if (dma_mapping_error(&c->pdev->dev, dma_src)) {
dev_err(&c->pdev->dev,
"Couldn't DMA map a %d byte buffer\n",
count);
@@ -539,7 +539,7 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
omap_start_dma(c->dma_channel);
wait_for_completion(&c->dma_done);
- dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
+ dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
return 0;
}
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 32f0ed33afe..26caf2590da 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -397,7 +397,8 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
value = onenand_bufferram_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
- if (ONENAND_IS_MLC(this) || ONENAND_IS_2PLANE(this))
+ if (ONENAND_IS_MLC(this) || ONENAND_IS_2PLANE(this) ||
+ ONENAND_IS_4KB_PAGE(this))
/* It is always BufferRAM0 */
ONENAND_SET_BUFFERRAM0(this);
else
@@ -426,7 +427,7 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
case FLEXONENAND_CMD_RECOVER_LSB:
case ONENAND_CMD_READ:
case ONENAND_CMD_READOOB:
- if (ONENAND_IS_MLC(this))
+ if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this))
/* It is always BufferRAM0 */
dataram = ONENAND_SET_BUFFERRAM0(this);
else
@@ -466,11 +467,11 @@ static inline int onenand_read_ecc(struct onenand_chip *this)
{
int ecc, i, result = 0;
- if (!FLEXONENAND(this))
+ if (!FLEXONENAND(this) && !ONENAND_IS_4KB_PAGE(this))
return this->read_word(this->base + ONENAND_REG_ECC_STATUS);
for (i = 0; i < 4; i++) {
- ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS + i);
+ ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS + i*2);
if (likely(!ecc))
continue;
if (ecc & FLEXONENAND_UNCORRECTABLE_ERROR)
@@ -1425,7 +1426,7 @@ static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
int ret;
onenand_get_device(mtd, FL_READING);
- ret = ONENAND_IS_MLC(this) ?
+ ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ?
onenand_mlc_read_ops_nolock(mtd, from, &ops) :
onenand_read_ops_nolock(mtd, from, &ops);
onenand_release_device(mtd);
@@ -1460,7 +1461,7 @@ static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
onenand_get_device(mtd, FL_READING);
if (ops->datbuf)
- ret = ONENAND_IS_MLC(this) ?
+ ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ?
onenand_mlc_read_ops_nolock(mtd, from, ops) :
onenand_read_ops_nolock(mtd, from, ops);
else
@@ -1634,7 +1635,6 @@ static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to
static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr, size_t len)
{
struct onenand_chip *this = mtd->priv;
- void __iomem *dataram;
int ret = 0;
int thislen, column;
@@ -1654,10 +1654,9 @@ static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr,
onenand_update_bufferram(mtd, addr, 1);
- dataram = this->base + ONENAND_DATARAM;
- dataram += onenand_bufferram_offset(mtd, ONENAND_DATARAM);
+ this->read_bufferram(mtd, ONENAND_DATARAM, this->verify_buf, 0, mtd->writesize);
- if (memcmp(buf, dataram + column, thislen))
+ if (memcmp(buf, this->verify_buf, thislen))
return -EBADMSG;
len -= thislen;
@@ -1926,7 +1925,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
* 2 PLANE, MLC, and Flex-OneNAND do not support
* write-while-program feature.
*/
- if (!ONENAND_IS_2PLANE(this) && !first) {
+ if (!ONENAND_IS_2PLANE(this) && !ONENAND_IS_4KB_PAGE(this) && !first) {
ONENAND_SET_PREV_BUFFERRAM(this);
ret = this->wait(mtd, FL_WRITING);
@@ -1957,7 +1956,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
/*
* 2 PLANE, MLC, and Flex-OneNAND wait here
*/
- if (ONENAND_IS_2PLANE(this)) {
+ if (ONENAND_IS_2PLANE(this) || ONENAND_IS_4KB_PAGE(this)) {
ret = this->wait(mtd, FL_WRITING);
/* In partial page write we don't update bufferram */
@@ -2084,7 +2083,7 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
memcpy(oobbuf + column, buf, thislen);
this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
- if (ONENAND_IS_MLC(this)) {
+ if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this)) {
/* Set main area of DataRAM to 0xff*/
memset(this->page_buf, 0xff, mtd->writesize);
this->write_bufferram(mtd, ONENAND_DATARAM,
@@ -3027,7 +3026,7 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
this->wait(mtd, FL_OTPING);
- ret = ONENAND_IS_MLC(this) ?
+ ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ?
onenand_mlc_read_ops_nolock(mtd, from, &ops) :
onenand_read_ops_nolock(mtd, from, &ops);
@@ -3372,7 +3371,10 @@ static void onenand_check_features(struct mtd_info *mtd)
/* Lock scheme */
switch (density) {
case ONENAND_DEVICE_DENSITY_4Gb:
- this->options |= ONENAND_HAS_2PLANE;
+ if (ONENAND_IS_DDP(this))
+ this->options |= ONENAND_HAS_2PLANE;
+ else
+ this->options |= ONENAND_HAS_4KB_PAGE;
case ONENAND_DEVICE_DENSITY_2Gb:
/* 2Gb DDP does not have 2 plane */
@@ -3393,7 +3395,7 @@ static void onenand_check_features(struct mtd_info *mtd)
break;
}
- if (ONENAND_IS_MLC(this))
+ if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this))
this->options &= ~ONENAND_HAS_2PLANE;
if (FLEXONENAND(this)) {
@@ -3407,6 +3409,8 @@ static void onenand_check_features(struct mtd_info *mtd)
printk(KERN_DEBUG "Chip support all block unlock\n");
if (this->options & ONENAND_HAS_2PLANE)
printk(KERN_DEBUG "Chip has 2 plane\n");
+ if (this->options & ONENAND_HAS_4KB_PAGE)
+ printk(KERN_DEBUG "Chip has 4KiB pagesize\n");
}
/**
@@ -3759,6 +3763,12 @@ static int onenand_probe(struct mtd_info *mtd)
/* Restore system configuration 1 */
this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
+ /* Workaround */
+ if (syscfg & ONENAND_SYS_CFG1_SYNC_WRITE) {
+ bram_maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID);
+ bram_dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
+ }
+
/* Check manufacturer ID */
if (onenand_check_maf(bram_maf_id))
return -ENXIO;
@@ -3778,6 +3788,9 @@ static int onenand_probe(struct mtd_info *mtd)
this->device_id = dev_id;
this->version_id = ver_id;
+ /* Check OneNAND features */
+ onenand_check_features(mtd);
+
density = onenand_get_density(dev_id);
if (FLEXONENAND(this)) {
this->dies = ONENAND_IS_DDP(this) ? 2 : 1;
@@ -3799,7 +3812,7 @@ static int onenand_probe(struct mtd_info *mtd)
/* The data buffer size is equal to page size */
mtd->writesize = this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE);
/* We use the full BufferRAM */
- if (ONENAND_IS_MLC(this))
+ if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this))
mtd->writesize <<= 1;
mtd->oobsize = mtd->writesize >> 5;
@@ -3829,9 +3842,6 @@ static int onenand_probe(struct mtd_info *mtd)
else
mtd->size = this->chipsize;
- /* Check OneNAND features */
- onenand_check_features(mtd);
-
/*
* We emulate the 4KiB page and 256KiB erase block size
* But oobsize is still 64 bytes.
@@ -3926,6 +3936,13 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
__func__);
return -ENOMEM;
}
+#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
+ this->verify_buf = kzalloc(mtd->writesize, GFP_KERNEL);
+ if (!this->verify_buf) {
+ kfree(this->page_buf);
+ return -ENOMEM;
+ }
+#endif
this->options |= ONENAND_PAGEBUF_ALLOC;
}
if (!this->oob_buf) {
@@ -4053,8 +4070,12 @@ void onenand_release(struct mtd_info *mtd)
kfree(this->bbm);
}
/* Buffers allocated by onenand_scan */
- if (this->options & ONENAND_PAGEBUF_ALLOC)
+ if (this->options & ONENAND_PAGEBUF_ALLOC) {
kfree(this->page_buf);
+#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
+ kfree(this->verify_buf);
+#endif
+ }
if (this->options & ONENAND_OOBBUF_ALLOC)
kfree(this->oob_buf);
kfree(mtd->eraseregions);
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
new file mode 100644
index 00000000000..2750317cb58
--- /dev/null
+++ b/drivers/mtd/onenand/samsung.c
@@ -0,0 +1,1071 @@
+/*
+ * Samsung S3C64XX/S5PC1XX OneNAND driver
+ *
+ * Copyright © 2008-2010 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Implementation:
+ * S3C64XX and S5PC100: emulate the pseudo BufferRAM
+ * S5PC110: use DMA
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/onenand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/mach/flash.h>
+#include <plat/regs-onenand.h>
+
+#include <linux/io.h>
+
+enum soc_type {
+ TYPE_S3C6400,
+ TYPE_S3C6410,
+ TYPE_S5PC100,
+ TYPE_S5PC110,
+};
+
+#define ONENAND_ERASE_STATUS 0x00
+#define ONENAND_MULTI_ERASE_SET 0x01
+#define ONENAND_ERASE_START 0x03
+#define ONENAND_UNLOCK_START 0x08
+#define ONENAND_UNLOCK_END 0x09
+#define ONENAND_LOCK_START 0x0A
+#define ONENAND_LOCK_END 0x0B
+#define ONENAND_LOCK_TIGHT_START 0x0C
+#define ONENAND_LOCK_TIGHT_END 0x0D
+#define ONENAND_UNLOCK_ALL 0x0E
+#define ONENAND_OTP_ACCESS 0x12
+#define ONENAND_SPARE_ACCESS_ONLY 0x13
+#define ONENAND_MAIN_ACCESS_ONLY 0x14
+#define ONENAND_ERASE_VERIFY 0x15
+#define ONENAND_MAIN_SPARE_ACCESS 0x16
+#define ONENAND_PIPELINE_READ 0x4000
+
+#define MAP_00 (0x0)
+#define MAP_01 (0x1)
+#define MAP_10 (0x2)
+#define MAP_11 (0x3)
+
+#define S3C64XX_CMD_MAP_SHIFT 24
+#define S5PC1XX_CMD_MAP_SHIFT 26
+
+#define S3C6400_FBA_SHIFT 10
+#define S3C6400_FPA_SHIFT 4
+#define S3C6400_FSA_SHIFT 2
+
+#define S3C6410_FBA_SHIFT 12
+#define S3C6410_FPA_SHIFT 6
+#define S3C6410_FSA_SHIFT 4
+
+#define S5PC100_FBA_SHIFT 13
+#define S5PC100_FPA_SHIFT 7
+#define S5PC100_FSA_SHIFT 5
+
+/* S5PC110 specific definitions */
+#define S5PC110_DMA_SRC_ADDR 0x400
+#define S5PC110_DMA_SRC_CFG 0x404
+#define S5PC110_DMA_DST_ADDR 0x408
+#define S5PC110_DMA_DST_CFG 0x40C
+#define S5PC110_DMA_TRANS_SIZE 0x414
+#define S5PC110_DMA_TRANS_CMD 0x418
+#define S5PC110_DMA_TRANS_STATUS 0x41C
+#define S5PC110_DMA_TRANS_DIR 0x420
+
+#define S5PC110_DMA_CFG_SINGLE (0x0 << 16)
+#define S5PC110_DMA_CFG_4BURST (0x2 << 16)
+#define S5PC110_DMA_CFG_8BURST (0x3 << 16)
+#define S5PC110_DMA_CFG_16BURST (0x4 << 16)
+
+#define S5PC110_DMA_CFG_INC (0x0 << 8)
+#define S5PC110_DMA_CFG_CNT (0x1 << 8)
+
+#define S5PC110_DMA_CFG_8BIT (0x0 << 0)
+#define S5PC110_DMA_CFG_16BIT (0x1 << 0)
+#define S5PC110_DMA_CFG_32BIT (0x2 << 0)
+
+#define S5PC110_DMA_SRC_CFG_READ (S5PC110_DMA_CFG_16BURST | \
+ S5PC110_DMA_CFG_INC | \
+ S5PC110_DMA_CFG_16BIT)
+#define S5PC110_DMA_DST_CFG_READ (S5PC110_DMA_CFG_16BURST | \
+ S5PC110_DMA_CFG_INC | \
+ S5PC110_DMA_CFG_32BIT)
+#define S5PC110_DMA_SRC_CFG_WRITE (S5PC110_DMA_CFG_16BURST | \
+ S5PC110_DMA_CFG_INC | \
+ S5PC110_DMA_CFG_32BIT)
+#define S5PC110_DMA_DST_CFG_WRITE (S5PC110_DMA_CFG_16BURST | \
+ S5PC110_DMA_CFG_INC | \
+ S5PC110_DMA_CFG_16BIT)
+
+#define S5PC110_DMA_TRANS_CMD_TDC (0x1 << 18)
+#define S5PC110_DMA_TRANS_CMD_TEC (0x1 << 16)
+#define S5PC110_DMA_TRANS_CMD_TR (0x1 << 0)
+
+#define S5PC110_DMA_TRANS_STATUS_TD (0x1 << 18)
+#define S5PC110_DMA_TRANS_STATUS_TB (0x1 << 17)
+#define S5PC110_DMA_TRANS_STATUS_TE (0x1 << 16)
+
+#define S5PC110_DMA_DIR_READ 0x0
+#define S5PC110_DMA_DIR_WRITE 0x1
+
+struct s3c_onenand {
+ struct mtd_info *mtd;
+ struct platform_device *pdev;
+ enum soc_type type;
+ void __iomem *base;
+ struct resource *base_res;
+ void __iomem *ahb_addr;
+ struct resource *ahb_res;
+ int bootram_command;
+ void __iomem *page_buf;
+ void __iomem *oob_buf;
+ unsigned int (*mem_addr)(int fba, int fpa, int fsa);
+ unsigned int (*cmd_map)(unsigned int type, unsigned int val);
+ void __iomem *dma_addr;
+ struct resource *dma_res;
+ unsigned long phys_base;
+#ifdef CONFIG_MTD_PARTITIONS
+ struct mtd_partition *parts;
+#endif
+};
+
+#define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1)))
+#define CMD_MAP_01(dev, mem_addr) (dev->cmd_map(MAP_01, (mem_addr)))
+#define CMD_MAP_10(dev, mem_addr) (dev->cmd_map(MAP_10, (mem_addr)))
+#define CMD_MAP_11(dev, addr) (dev->cmd_map(MAP_11, ((addr) << 2)))
+
+static struct s3c_onenand *onenand;
+
+#ifdef CONFIG_MTD_PARTITIONS
+static const char *part_probes[] = { "cmdlinepart", NULL, };
+#endif
+
+static inline int s3c_read_reg(int offset)
+{
+ return readl(onenand->base + offset);
+}
+
+static inline void s3c_write_reg(int value, int offset)
+{
+ writel(value, onenand->base + offset);
+}
+
+static inline int s3c_read_cmd(unsigned int cmd)
+{
+ return readl(onenand->ahb_addr + cmd);
+}
+
+static inline void s3c_write_cmd(int value, unsigned int cmd)
+{
+ writel(value, onenand->ahb_addr + cmd);
+}
+
+#ifdef SAMSUNG_DEBUG
+static void s3c_dump_reg(void)
+{
+ int i;
+
+ for (i = 0; i < 0x400; i += 0x40) {
+ printk(KERN_INFO "0x%08X: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ (unsigned int) onenand->base + i,
+ s3c_read_reg(i), s3c_read_reg(i + 0x10),
+ s3c_read_reg(i + 0x20), s3c_read_reg(i + 0x30));
+ }
+}
+#endif
+
+static unsigned int s3c64xx_cmd_map(unsigned type, unsigned val)
+{
+ return (type << S3C64XX_CMD_MAP_SHIFT) | val;
+}
+
+static unsigned int s5pc1xx_cmd_map(unsigned type, unsigned val)
+{
+ return (type << S5PC1XX_CMD_MAP_SHIFT) | val;
+}
+
+static unsigned int s3c6400_mem_addr(int fba, int fpa, int fsa)
+{
+ return (fba << S3C6400_FBA_SHIFT) | (fpa << S3C6400_FPA_SHIFT) |
+ (fsa << S3C6400_FSA_SHIFT);
+}
+
+static unsigned int s3c6410_mem_addr(int fba, int fpa, int fsa)
+{
+ return (fba << S3C6410_FBA_SHIFT) | (fpa << S3C6410_FPA_SHIFT) |
+ (fsa << S3C6410_FSA_SHIFT);
+}
+
+static unsigned int s5pc100_mem_addr(int fba, int fpa, int fsa)
+{
+ return (fba << S5PC100_FBA_SHIFT) | (fpa << S5PC100_FPA_SHIFT) |
+ (fsa << S5PC100_FSA_SHIFT);
+}
+
+static void s3c_onenand_reset(void)
+{
+ unsigned long timeout = 0x10000;
+ int stat;
+
+ s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET);
+ while (1 && timeout--) {
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ if (stat & RST_CMP)
+ break;
+ }
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
+
+ /* Clear interrupt */
+ s3c_write_reg(0x0, INT_ERR_ACK_OFFSET);
+ /* Clear the ECC status */
+ s3c_write_reg(0x0, ECC_ERR_STAT_OFFSET);
+}
+
+static unsigned short s3c_onenand_readw(void __iomem *addr)
+{
+ struct onenand_chip *this = onenand->mtd->priv;
+ struct device *dev = &onenand->pdev->dev;
+ int reg = addr - this->base;
+ int word_addr = reg >> 1;
+ int value;
+
+ /* It's used for probing time */
+ switch (reg) {
+ case ONENAND_REG_MANUFACTURER_ID:
+ return s3c_read_reg(MANUFACT_ID_OFFSET);
+ case ONENAND_REG_DEVICE_ID:
+ return s3c_read_reg(DEVICE_ID_OFFSET);
+ case ONENAND_REG_VERSION_ID:
+ return s3c_read_reg(FLASH_VER_ID_OFFSET);
+ case ONENAND_REG_DATA_BUFFER_SIZE:
+ return s3c_read_reg(DATA_BUF_SIZE_OFFSET);
+ case ONENAND_REG_TECHNOLOGY:
+ return s3c_read_reg(TECH_OFFSET);
+ case ONENAND_REG_SYS_CFG1:
+ return s3c_read_reg(MEM_CFG_OFFSET);
+
+ /* Used at unlock all status */
+ case ONENAND_REG_CTRL_STATUS:
+ return 0;
+
+ case ONENAND_REG_WP_STATUS:
+ return ONENAND_WP_US;
+
+ default:
+ break;
+ }
+
+ /* BootRAM access control */
+ if ((unsigned int) addr < ONENAND_DATARAM && onenand->bootram_command) {
+ if (word_addr == 0)
+ return s3c_read_reg(MANUFACT_ID_OFFSET);
+ if (word_addr == 1)
+ return s3c_read_reg(DEVICE_ID_OFFSET);
+ if (word_addr == 2)
+ return s3c_read_reg(FLASH_VER_ID_OFFSET);
+ }
+
+ value = s3c_read_cmd(CMD_MAP_11(onenand, word_addr)) & 0xffff;
+ dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__,
+ word_addr, value);
+ return value;
+}
+
+static void s3c_onenand_writew(unsigned short value, void __iomem *addr)
+{
+ struct onenand_chip *this = onenand->mtd->priv;
+ struct device *dev = &onenand->pdev->dev;
+ unsigned int reg = addr - this->base;
+ unsigned int word_addr = reg >> 1;
+
+ /* It's used for probing time */
+ switch (reg) {
+ case ONENAND_REG_SYS_CFG1:
+ s3c_write_reg(value, MEM_CFG_OFFSET);
+ return;
+
+ case ONENAND_REG_START_ADDRESS1:
+ case ONENAND_REG_START_ADDRESS2:
+ return;
+
+ /* Lock/lock-tight/unlock/unlock_all */
+ case ONENAND_REG_START_BLOCK_ADDRESS:
+ return;
+
+ default:
+ break;
+ }
+
+ /* BootRAM access control */
+ if ((unsigned int)addr < ONENAND_DATARAM) {
+ if (value == ONENAND_CMD_READID) {
+ onenand->bootram_command = 1;
+ return;
+ }
+ if (value == ONENAND_CMD_RESET) {
+ s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET);
+ onenand->bootram_command = 0;
+ return;
+ }
+ }
+
+ dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__,
+ word_addr, value);
+
+ s3c_write_cmd(value, CMD_MAP_11(onenand, word_addr));
+}
+
+static int s3c_onenand_wait(struct mtd_info *mtd, int state)
+{
+ struct device *dev = &onenand->pdev->dev;
+ unsigned int flags = INT_ACT;
+ unsigned int stat, ecc;
+ unsigned long timeout;
+
+ switch (state) {
+ case FL_READING:
+ flags |= BLK_RW_CMP | LOAD_CMP;
+ break;
+ case FL_WRITING:
+ flags |= BLK_RW_CMP | PGM_CMP;
+ break;
+ case FL_ERASING:
+ flags |= BLK_RW_CMP | ERS_CMP;
+ break;
+ case FL_LOCKING:
+ flags |= BLK_RW_CMP;
+ break;
+ default:
+ break;
+ }
+
+ /* The 20 msec is enough */
+ timeout = jiffies + msecs_to_jiffies(20);
+ while (time_before(jiffies, timeout)) {
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ if (stat & flags)
+ break;
+
+ if (state != FL_READING)
+ cond_resched();
+ }
+ /* To get correct interrupt status in timeout case */
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
+
+ /*
+ * In the Spec. it checks the controller status first
+ * However if you get the correct information in case of
+ * power off recovery (POR) test, it should read ECC status first
+ */
+ if (stat & LOAD_CMP) {
+ ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET);
+ if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) {
+ dev_info(dev, "%s: ECC error = 0x%04x\n", __func__,
+ ecc);
+ mtd->ecc_stats.failed++;
+ return -EBADMSG;
+ }
+ }
+
+ if (stat & (LOCKED_BLK | ERS_FAIL | PGM_FAIL | LD_FAIL_ECC_ERR)) {
+ dev_info(dev, "%s: controller error = 0x%04x\n", __func__,
+ stat);
+ if (stat & LOCKED_BLK)
+ dev_info(dev, "%s: it's locked error = 0x%04x\n",
+ __func__, stat);
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int s3c_onenand_command(struct mtd_info *mtd, int cmd, loff_t addr,
+ size_t len)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned int *m, *s;
+ int fba, fpa, fsa = 0;
+ unsigned int mem_addr, cmd_map_01, cmd_map_10;
+ int i, mcount, scount;
+ int index;
+
+ fba = (int) (addr >> this->erase_shift);
+ fpa = (int) (addr >> this->page_shift);
+ fpa &= this->page_mask;
+
+ mem_addr = onenand->mem_addr(fba, fpa, fsa);
+ cmd_map_01 = CMD_MAP_01(onenand, mem_addr);
+ cmd_map_10 = CMD_MAP_10(onenand, mem_addr);
+
+ switch (cmd) {
+ case ONENAND_CMD_READ:
+ case ONENAND_CMD_READOOB:
+ case ONENAND_CMD_BUFFERRAM:
+ ONENAND_SET_NEXT_BUFFERRAM(this);
+ default:
+ break;
+ }
+
+ index = ONENAND_CURRENT_BUFFERRAM(this);
+
+ /*
+ * Emulate Two BufferRAMs and access with 4 bytes pointer
+ */
+ m = (unsigned int *) onenand->page_buf;
+ s = (unsigned int *) onenand->oob_buf;
+
+ if (index) {
+ m += (this->writesize >> 2);
+ s += (mtd->oobsize >> 2);
+ }
+
+ mcount = mtd->writesize >> 2;
+ scount = mtd->oobsize >> 2;
+
+ switch (cmd) {
+ case ONENAND_CMD_READ:
+ /* Main */
+ for (i = 0; i < mcount; i++)
+ *m++ = s3c_read_cmd(cmd_map_01);
+ return 0;
+
+ case ONENAND_CMD_READOOB:
+ s3c_write_reg(TSRF, TRANS_SPARE_OFFSET);
+ /* Main */
+ for (i = 0; i < mcount; i++)
+ *m++ = s3c_read_cmd(cmd_map_01);
+
+ /* Spare */
+ for (i = 0; i < scount; i++)
+ *s++ = s3c_read_cmd(cmd_map_01);
+
+ s3c_write_reg(0, TRANS_SPARE_OFFSET);
+ return 0;
+
+ case ONENAND_CMD_PROG:
+ /* Main */
+ for (i = 0; i < mcount; i++)
+ s3c_write_cmd(*m++, cmd_map_01);
+ return 0;
+
+ case ONENAND_CMD_PROGOOB:
+ s3c_write_reg(TSRF, TRANS_SPARE_OFFSET);
+
+ /* Main - dummy write */
+ for (i = 0; i < mcount; i++)
+ s3c_write_cmd(0xffffffff, cmd_map_01);
+
+ /* Spare */
+ for (i = 0; i < scount; i++)
+ s3c_write_cmd(*s++, cmd_map_01);
+
+ s3c_write_reg(0, TRANS_SPARE_OFFSET);
+ return 0;
+
+ case ONENAND_CMD_UNLOCK_ALL:
+ s3c_write_cmd(ONENAND_UNLOCK_ALL, cmd_map_10);
+ return 0;
+
+ case ONENAND_CMD_ERASE:
+ s3c_write_cmd(ONENAND_ERASE_START, cmd_map_10);
+ return 0;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static unsigned char *s3c_get_bufferram(struct mtd_info *mtd, int area)
+{
+ struct onenand_chip *this = mtd->priv;
+ int index = ONENAND_CURRENT_BUFFERRAM(this);
+ unsigned char *p;
+
+ if (area == ONENAND_DATARAM) {
+ p = (unsigned char *) onenand->page_buf;
+ if (index == 1)
+ p += this->writesize;
+ } else {
+ p = (unsigned char *) onenand->oob_buf;
+ if (index == 1)
+ p += mtd->oobsize;
+ }
+
+ return p;
+}
+
+static int onenand_read_bufferram(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset,
+ size_t count)
+{
+ unsigned char *p;
+
+ p = s3c_get_bufferram(mtd, area);
+ memcpy(buffer, p + offset, count);
+ return 0;
+}
+
+static int onenand_write_bufferram(struct mtd_info *mtd, int area,
+ const unsigned char *buffer, int offset,
+ size_t count)
+{
+ unsigned char *p;
+
+ p = s3c_get_bufferram(mtd, area);
+ memcpy(p + offset, buffer, count);
+ return 0;
+}
+
+static int s5pc110_dma_ops(void *dst, void *src, size_t count, int direction)
+{
+ void __iomem *base = onenand->dma_addr;
+ int status;
+
+ writel(src, base + S5PC110_DMA_SRC_ADDR);
+ writel(dst, base + S5PC110_DMA_DST_ADDR);
+
+ if (direction == S5PC110_DMA_DIR_READ) {
+ writel(S5PC110_DMA_SRC_CFG_READ, base + S5PC110_DMA_SRC_CFG);
+ writel(S5PC110_DMA_DST_CFG_READ, base + S5PC110_DMA_DST_CFG);
+ } else {
+ writel(S5PC110_DMA_SRC_CFG_WRITE, base + S5PC110_DMA_SRC_CFG);
+ writel(S5PC110_DMA_DST_CFG_WRITE, base + S5PC110_DMA_DST_CFG);
+ }
+
+ writel(count, base + S5PC110_DMA_TRANS_SIZE);
+ writel(direction, base + S5PC110_DMA_TRANS_DIR);
+
+ writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD);
+
+ do {
+ status = readl(base + S5PC110_DMA_TRANS_STATUS);
+ } while (!(status & S5PC110_DMA_TRANS_STATUS_TD));
+
+ if (status & S5PC110_DMA_TRANS_STATUS_TE) {
+ writel(S5PC110_DMA_TRANS_CMD_TEC, base + S5PC110_DMA_TRANS_CMD);
+ writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
+ return -EIO;
+ }
+
+ writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
+
+ return 0;
+}
+
+static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset, size_t count)
+{
+ struct onenand_chip *this = mtd->priv;
+ void __iomem *bufferram;
+ void __iomem *p;
+ void *buf = (void *) buffer;
+ dma_addr_t dma_src, dma_dst;
+ int err;
+
+ p = bufferram = this->base + area;
+ if (ONENAND_CURRENT_BUFFERRAM(this)) {
+ if (area == ONENAND_DATARAM)
+ p += this->writesize;
+ else
+ p += mtd->oobsize;
+ }
+
+ if (offset & 3 || (size_t) buf & 3 ||
+ !onenand->dma_addr || count != mtd->writesize)
+ goto normal;
+
+ /* Handle vmalloc address */
+ if (buf >= high_memory) {
+ struct page *page;
+
+ if (((size_t) buf & PAGE_MASK) !=
+ ((size_t) (buf + count - 1) & PAGE_MASK))
+ goto normal;
+ page = vmalloc_to_page(buf);
+ if (!page)
+ goto normal;
+ buf = page_address(page) + ((size_t) buf & ~PAGE_MASK);
+ }
+
+ /* DMA routine */
+ dma_src = onenand->phys_base + (p - this->base);
+ dma_dst = dma_map_single(&onenand->pdev->dev,
+ buf, count, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&onenand->pdev->dev, dma_dst)) {
+ dev_err(&onenand->pdev->dev,
+ "Couldn't map a %d byte buffer for DMA\n", count);
+ goto normal;
+ }
+ err = s5pc110_dma_ops((void *) dma_dst, (void *) dma_src,
+ count, S5PC110_DMA_DIR_READ);
+ dma_unmap_single(&onenand->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
+
+ if (!err)
+ return 0;
+
+normal:
+ if (count != mtd->writesize) {
+ /* Copy the bufferram to memory to prevent unaligned access */
+ memcpy(this->page_buf, bufferram, mtd->writesize);
+ p = this->page_buf + offset;
+ }
+
+ memcpy(buffer, p, count);
+
+ return 0;
+}
+
+static int s3c_onenand_bbt_wait(struct mtd_info *mtd, int state)
+{
+ unsigned int flags = INT_ACT | LOAD_CMP;
+ unsigned int stat;
+ unsigned long timeout;
+
+ /* The 20 msec is enough */
+ timeout = jiffies + msecs_to_jiffies(20);
+ while (time_before(jiffies, timeout)) {
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ if (stat & flags)
+ break;
+ }
+ /* To get correct interrupt status in timeout case */
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
+
+ if (stat & LD_FAIL_ECC_ERR) {
+ s3c_onenand_reset();
+ return ONENAND_BBT_READ_ERROR;
+ }
+
+ if (stat & LOAD_CMP) {
+ int ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET);
+ if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) {
+ s3c_onenand_reset();
+ return ONENAND_BBT_READ_ERROR;
+ }
+ }
+
+ return 0;
+}
+
+static void s3c_onenand_check_lock_status(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct device *dev = &onenand->pdev->dev;
+ unsigned int block, end;
+ int tmp;
+
+ end = this->chipsize >> this->erase_shift;
+
+ for (block = 0; block < end; block++) {
+ unsigned int mem_addr = onenand->mem_addr(block, 0, 0);
+ tmp = s3c_read_cmd(CMD_MAP_01(onenand, mem_addr));
+
+ if (s3c_read_reg(INT_ERR_STAT_OFFSET) & LOCKED_BLK) {
+ dev_err(dev, "block %d is write-protected!\n", block);
+ s3c_write_reg(LOCKED_BLK, INT_ERR_ACK_OFFSET);
+ }
+ }
+}
+
+static void s3c_onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs,
+ size_t len, int cmd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int start, end, start_mem_addr, end_mem_addr;
+
+ start = ofs >> this->erase_shift;
+ start_mem_addr = onenand->mem_addr(start, 0, 0);
+ end = start + (len >> this->erase_shift) - 1;
+ end_mem_addr = onenand->mem_addr(end, 0, 0);
+
+ if (cmd == ONENAND_CMD_LOCK) {
+ s3c_write_cmd(ONENAND_LOCK_START, CMD_MAP_10(onenand,
+ start_mem_addr));
+ s3c_write_cmd(ONENAND_LOCK_END, CMD_MAP_10(onenand,
+ end_mem_addr));
+ } else {
+ s3c_write_cmd(ONENAND_UNLOCK_START, CMD_MAP_10(onenand,
+ start_mem_addr));
+ s3c_write_cmd(ONENAND_UNLOCK_END, CMD_MAP_10(onenand,
+ end_mem_addr));
+ }
+
+ this->wait(mtd, FL_LOCKING);
+}
+
+static void s3c_unlock_all(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ loff_t ofs = 0;
+ size_t len = this->chipsize;
+
+ if (this->options & ONENAND_HAS_UNLOCK_ALL) {
+ /* Write unlock command */
+ this->command(mtd, ONENAND_CMD_UNLOCK_ALL, 0, 0);
+
+ /* No need to check return value */
+ this->wait(mtd, FL_LOCKING);
+
+ /* Workaround for all block unlock in DDP */
+ if (!ONENAND_IS_DDP(this)) {
+ s3c_onenand_check_lock_status(mtd);
+ return;
+ }
+
+ /* All blocks on another chip */
+ ofs = this->chipsize >> 1;
+ len = this->chipsize >> 1;
+ }
+
+ s3c_onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
+
+ s3c_onenand_check_lock_status(mtd);
+}
+
+static void s3c_onenand_setup(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+
+ onenand->mtd = mtd;
+
+ if (onenand->type == TYPE_S3C6400) {
+ onenand->mem_addr = s3c6400_mem_addr;
+ onenand->cmd_map = s3c64xx_cmd_map;
+ } else if (onenand->type == TYPE_S3C6410) {
+ onenand->mem_addr = s3c6410_mem_addr;
+ onenand->cmd_map = s3c64xx_cmd_map;
+ } else if (onenand->type == TYPE_S5PC100) {
+ onenand->mem_addr = s5pc100_mem_addr;
+ onenand->cmd_map = s5pc1xx_cmd_map;
+ } else if (onenand->type == TYPE_S5PC110) {
+ /* Use generic onenand functions */
+ onenand->cmd_map = s5pc1xx_cmd_map;
+ this->read_bufferram = s5pc110_read_bufferram;
+ return;
+ } else {
+ BUG();
+ }
+
+ this->read_word = s3c_onenand_readw;
+ this->write_word = s3c_onenand_writew;
+
+ this->wait = s3c_onenand_wait;
+ this->bbt_wait = s3c_onenand_bbt_wait;
+ this->unlock_all = s3c_unlock_all;
+ this->command = s3c_onenand_command;
+
+ this->read_bufferram = onenand_read_bufferram;
+ this->write_bufferram = onenand_write_bufferram;
+}
+
+static int s3c_onenand_probe(struct platform_device *pdev)
+{
+ struct onenand_platform_data *pdata;
+ struct onenand_chip *this;
+ struct mtd_info *mtd;
+ struct resource *r;
+ int size, err;
+ unsigned long onenand_ctrl_cfg = 0;
+
+ pdata = pdev->dev.platform_data;
+ /* No need to check pdata. the platform data is optional */
+
+ size = sizeof(struct mtd_info) + sizeof(struct onenand_chip);
+ mtd = kzalloc(size, GFP_KERNEL);
+ if (!mtd) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ onenand = kzalloc(sizeof(struct s3c_onenand), GFP_KERNEL);
+ if (!onenand) {
+ err = -ENOMEM;
+ goto onenand_fail;
+ }
+
+ this = (struct onenand_chip *) &mtd[1];
+ mtd->priv = this;
+ mtd->dev.parent = &pdev->dev;
+ mtd->owner = THIS_MODULE;
+ onenand->pdev = pdev;
+ onenand->type = platform_get_device_id(pdev)->driver_data;
+
+ s3c_onenand_setup(mtd);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no memory resource defined\n");
+ return -ENOENT;
+ goto ahb_resource_failed;
+ }
+
+ onenand->base_res = request_mem_region(r->start, resource_size(r),
+ pdev->name);
+ if (!onenand->base_res) {
+ dev_err(&pdev->dev, "failed to request memory resource\n");
+ err = -EBUSY;
+ goto resource_failed;
+ }
+
+ onenand->base = ioremap(r->start, resource_size(r));
+ if (!onenand->base) {
+ dev_err(&pdev->dev, "failed to map memory resource\n");
+ err = -EFAULT;
+ goto ioremap_failed;
+ }
+ /* Set onenand_chip also */
+ this->base = onenand->base;
+
+ /* Use runtime badblock check */
+ this->options |= ONENAND_SKIP_UNLOCK_CHECK;
+
+ if (onenand->type != TYPE_S5PC110) {
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!r) {
+ dev_err(&pdev->dev, "no buffer memory resource defined\n");
+ return -ENOENT;
+ goto ahb_resource_failed;
+ }
+
+ onenand->ahb_res = request_mem_region(r->start, resource_size(r),
+ pdev->name);
+ if (!onenand->ahb_res) {
+ dev_err(&pdev->dev, "failed to request buffer memory resource\n");
+ err = -EBUSY;
+ goto ahb_resource_failed;
+ }
+
+ onenand->ahb_addr = ioremap(r->start, resource_size(r));
+ if (!onenand->ahb_addr) {
+ dev_err(&pdev->dev, "failed to map buffer memory resource\n");
+ err = -EINVAL;
+ goto ahb_ioremap_failed;
+ }
+
+ /* Allocate 4KiB BufferRAM */
+ onenand->page_buf = kzalloc(SZ_4K, GFP_KERNEL);
+ if (!onenand->page_buf) {
+ err = -ENOMEM;
+ goto page_buf_fail;
+ }
+
+ /* Allocate 128 SpareRAM */
+ onenand->oob_buf = kzalloc(128, GFP_KERNEL);
+ if (!onenand->oob_buf) {
+ err = -ENOMEM;
+ goto oob_buf_fail;
+ }
+
+ /* S3C doesn't handle subpage write */
+ mtd->subpage_sft = 0;
+ this->subpagesize = mtd->writesize;
+
+ } else { /* S5PC110 */
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!r) {
+ dev_err(&pdev->dev, "no dma memory resource defined\n");
+ return -ENOENT;
+ goto dma_resource_failed;
+ }
+
+ onenand->dma_res = request_mem_region(r->start, resource_size(r),
+ pdev->name);
+ if (!onenand->dma_res) {
+ dev_err(&pdev->dev, "failed to request dma memory resource\n");
+ err = -EBUSY;
+ goto dma_resource_failed;
+ }
+
+ onenand->dma_addr = ioremap(r->start, resource_size(r));
+ if (!onenand->dma_addr) {
+ dev_err(&pdev->dev, "failed to map dma memory resource\n");
+ err = -EINVAL;
+ goto dma_ioremap_failed;
+ }
+
+ onenand->phys_base = onenand->base_res->start;
+
+ onenand_ctrl_cfg = readl(onenand->dma_addr + 0x100);
+ if ((onenand_ctrl_cfg & ONENAND_SYS_CFG1_SYNC_WRITE) &&
+ onenand->dma_addr)
+ writel(onenand_ctrl_cfg & ~ONENAND_SYS_CFG1_SYNC_WRITE,
+ onenand->dma_addr + 0x100);
+ else
+ onenand_ctrl_cfg = 0;
+ }
+
+ if (onenand_scan(mtd, 1)) {
+ err = -EFAULT;
+ goto scan_failed;
+ }
+
+ if (onenand->type == TYPE_S5PC110) {
+ if (onenand_ctrl_cfg && onenand->dma_addr)
+ writel(onenand_ctrl_cfg, onenand->dma_addr + 0x100);
+ } else {
+ /* S3C doesn't handle subpage write */
+ mtd->subpage_sft = 0;
+ this->subpagesize = mtd->writesize;
+ }
+
+ if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ)
+ dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n");
+
+#ifdef CONFIG_MTD_PARTITIONS
+ err = parse_mtd_partitions(mtd, part_probes, &onenand->parts, 0);
+ if (err > 0)
+ add_mtd_partitions(mtd, onenand->parts, err);
+ else if (err <= 0 && pdata && pdata->parts)
+ add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
+ else
+#endif
+ err = add_mtd_device(mtd);
+
+ platform_set_drvdata(pdev, mtd);
+
+ return 0;
+
+scan_failed:
+ if (onenand->dma_addr)
+ iounmap(onenand->dma_addr);
+dma_ioremap_failed:
+ if (onenand->dma_res)
+ release_mem_region(onenand->dma_res->start,
+ resource_size(onenand->dma_res));
+ kfree(onenand->oob_buf);
+oob_buf_fail:
+ kfree(onenand->page_buf);
+page_buf_fail:
+ if (onenand->ahb_addr)
+ iounmap(onenand->ahb_addr);
+ahb_ioremap_failed:
+ if (onenand->ahb_res)
+ release_mem_region(onenand->ahb_res->start,
+ resource_size(onenand->ahb_res));
+dma_resource_failed:
+ahb_resource_failed:
+ iounmap(onenand->base);
+ioremap_failed:
+ if (onenand->base_res)
+ release_mem_region(onenand->base_res->start,
+ resource_size(onenand->base_res));
+resource_failed:
+ kfree(onenand);
+onenand_fail:
+ kfree(mtd);
+ return err;
+}
+
+static int __devexit s3c_onenand_remove(struct platform_device *pdev)
+{
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+
+ onenand_release(mtd);
+ if (onenand->ahb_addr)
+ iounmap(onenand->ahb_addr);
+ if (onenand->ahb_res)
+ release_mem_region(onenand->ahb_res->start,
+ resource_size(onenand->ahb_res));
+ if (onenand->dma_addr)
+ iounmap(onenand->dma_addr);
+ if (onenand->dma_res)
+ release_mem_region(onenand->dma_res->start,
+ resource_size(onenand->dma_res));
+
+ iounmap(onenand->base);
+ release_mem_region(onenand->base_res->start,
+ resource_size(onenand->base_res));
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(onenand->oob_buf);
+ kfree(onenand->page_buf);
+ kfree(onenand);
+ kfree(mtd);
+ return 0;
+}
+
+static int s3c_pm_ops_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct onenand_chip *this = mtd->priv;
+
+ this->wait(mtd, FL_PM_SUSPENDED);
+ return mtd->suspend(mtd);
+}
+
+static int s3c_pm_ops_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct onenand_chip *this = mtd->priv;
+
+ mtd->resume(mtd);
+ this->unlock_all(mtd);
+ return 0;
+}
+
+static const struct dev_pm_ops s3c_pm_ops = {
+ .suspend = s3c_pm_ops_suspend,
+ .resume = s3c_pm_ops_resume,
+};
+
+static struct platform_device_id s3c_onenand_driver_ids[] = {
+ {
+ .name = "s3c6400-onenand",
+ .driver_data = TYPE_S3C6400,
+ }, {
+ .name = "s3c6410-onenand",
+ .driver_data = TYPE_S3C6410,
+ }, {
+ .name = "s5pc100-onenand",
+ .driver_data = TYPE_S5PC100,
+ }, {
+ .name = "s5pc110-onenand",
+ .driver_data = TYPE_S5PC110,
+ }, { },
+};
+MODULE_DEVICE_TABLE(platform, s3c_onenand_driver_ids);
+
+static struct platform_driver s3c_onenand_driver = {
+ .driver = {
+ .name = "samsung-onenand",
+ .pm = &s3c_pm_ops,
+ },
+ .id_table = s3c_onenand_driver_ids,
+ .probe = s3c_onenand_probe,
+ .remove = __devexit_p(s3c_onenand_remove),
+};
+
+static int __init s3c_onenand_init(void)
+{
+ return platform_driver_register(&s3c_onenand_driver);
+}
+
+static void __exit s3c_onenand_exit(void)
+{
+ platform_driver_unregister(&s3c_onenand_driver);
+}
+
+module_init(s3c_onenand_init);
+module_exit(s3c_onenand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
+MODULE_DESCRIPTION("Samsung OneNAND controller support");
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index d2aa9c46530..63b83c0d9a1 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -817,7 +817,6 @@ static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
vfree(part->sector_map);
kfree(part->header_cache);
kfree(part->blocks);
- kfree(part);
}
static struct mtd_blktrans_ops rfd_ftl_tr = {
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
new file mode 100644
index 00000000000..67822cf6c02
--- /dev/null
+++ b/drivers/mtd/sm_ftl.c
@@ -0,0 +1,1284 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * SmartMedia/xD translation layer
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/hdreg.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/sysfs.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/mtd/nand_ecc.h>
+#include "nand/sm_common.h"
+#include "sm_ftl.h"
+
+
+
+struct workqueue_struct *cache_flush_workqueue;
+
+static int cache_timeout = 1000;
+module_param(cache_timeout, bool, S_IRUGO);
+MODULE_PARM_DESC(cache_timeout,
+ "Timeout (in ms) for cache flush (1000 ms default");
+
+static int debug;
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+
+/* ------------------- sysfs attributtes ---------------------------------- */
+struct sm_sysfs_attribute {
+ struct device_attribute dev_attr;
+ char *data;
+ int len;
+};
+
+ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sm_sysfs_attribute *sm_attr =
+ container_of(attr, struct sm_sysfs_attribute, dev_attr);
+
+ strncpy(buf, sm_attr->data, sm_attr->len);
+ return sm_attr->len;
+}
+
+
+#define NUM_ATTRIBUTES 1
+#define SM_CIS_VENDOR_OFFSET 0x59
+struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
+{
+ struct attribute_group *attr_group;
+ struct attribute **attributes;
+ struct sm_sysfs_attribute *vendor_attribute;
+
+ int vendor_len = strnlen(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET,
+ SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET);
+
+ char *vendor = kmalloc(vendor_len, GFP_KERNEL);
+ memcpy(vendor, ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, vendor_len);
+ vendor[vendor_len] = 0;
+
+ /* Initialize sysfs attributes */
+ vendor_attribute =
+ kzalloc(sizeof(struct sm_sysfs_attribute), GFP_KERNEL);
+
+ sysfs_attr_init(&vendor_attribute->dev_attr.attr);
+
+ vendor_attribute->data = vendor;
+ vendor_attribute->len = vendor_len;
+ vendor_attribute->dev_attr.attr.name = "vendor";
+ vendor_attribute->dev_attr.attr.mode = S_IRUGO;
+ vendor_attribute->dev_attr.show = sm_attr_show;
+
+
+ /* Create array of pointers to the attributes */
+ attributes = kzalloc(sizeof(struct attribute *) * (NUM_ATTRIBUTES + 1),
+ GFP_KERNEL);
+ attributes[0] = &vendor_attribute->dev_attr.attr;
+
+ /* Finally create the attribute group */
+ attr_group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
+ attr_group->attrs = attributes;
+ return attr_group;
+}
+
+void sm_delete_sysfs_attributes(struct sm_ftl *ftl)
+{
+ struct attribute **attributes = ftl->disk_attributes->attrs;
+ int i;
+
+ for (i = 0; attributes[i] ; i++) {
+
+ struct device_attribute *dev_attr = container_of(attributes[i],
+ struct device_attribute, attr);
+
+ struct sm_sysfs_attribute *sm_attr =
+ container_of(dev_attr,
+ struct sm_sysfs_attribute, dev_attr);
+
+ kfree(sm_attr->data);
+ kfree(sm_attr);
+ }
+
+ kfree(ftl->disk_attributes->attrs);
+ kfree(ftl->disk_attributes);
+}
+
+
+/* ----------------------- oob helpers -------------------------------------- */
+
+static int sm_get_lba(uint8_t *lba)
+{
+ /* check fixed bits */
+ if ((lba[0] & 0xF8) != 0x10)
+ return -2;
+
+ /* check parity - endianess doesn't matter */
+ if (hweight16(*(uint16_t *)lba) & 1)
+ return -2;
+
+ return (lba[1] >> 1) | ((lba[0] & 0x07) << 7);
+}
+
+
+/*
+ * Read LBA asscociated with block
+ * returns -1, if block is erased
+ * returns -2 if error happens
+ */
+static int sm_read_lba(struct sm_oob *oob)
+{
+ static const uint32_t erased_pattern[4] = {
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
+
+ uint16_t lba_test;
+ int lba;
+
+ /* First test for erased block */
+ if (!memcmp(oob, erased_pattern, SM_OOB_SIZE))
+ return -1;
+
+ /* Now check is both copies of the LBA differ too much */
+ lba_test = *(uint16_t *)oob->lba_copy1 ^ *(uint16_t*)oob->lba_copy2;
+ if (lba_test && !is_power_of_2(lba_test))
+ return -2;
+
+ /* And read it */
+ lba = sm_get_lba(oob->lba_copy1);
+
+ if (lba == -2)
+ lba = sm_get_lba(oob->lba_copy2);
+
+ return lba;
+}
+
+static void sm_write_lba(struct sm_oob *oob, uint16_t lba)
+{
+ uint8_t tmp[2];
+
+ WARN_ON(lba >= 1000);
+
+ tmp[0] = 0x10 | ((lba >> 7) & 0x07);
+ tmp[1] = (lba << 1) & 0xFF;
+
+ if (hweight16(*(uint16_t *)tmp) & 0x01)
+ tmp[1] |= 1;
+
+ oob->lba_copy1[0] = oob->lba_copy2[0] = tmp[0];
+ oob->lba_copy1[1] = oob->lba_copy2[1] = tmp[1];
+}
+
+
+/* Make offset from parts */
+static loff_t sm_mkoffset(struct sm_ftl *ftl, int zone, int block, int boffset)
+{
+ WARN_ON(boffset & (SM_SECTOR_SIZE - 1));
+ WARN_ON(zone < 0 || zone >= ftl->zone_count);
+ WARN_ON(block >= ftl->zone_size);
+ WARN_ON(boffset >= ftl->block_size);
+
+ if (block == -1)
+ return -1;
+
+ return (zone * SM_MAX_ZONE_SIZE + block) * ftl->block_size + boffset;
+}
+
+/* Breaks offset into parts */
+static void sm_break_offset(struct sm_ftl *ftl, loff_t offset,
+ int *zone, int *block, int *boffset)
+{
+ *boffset = do_div(offset, ftl->block_size);
+ *block = do_div(offset, ftl->max_lba);
+ *zone = offset >= ftl->zone_count ? -1 : offset;
+}
+
+/* ---------------------- low level IO ------------------------------------- */
+
+static int sm_correct_sector(uint8_t *buffer, struct sm_oob *oob)
+{
+ uint8_t ecc[3];
+
+ __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc);
+ if (__nand_correct_data(buffer, ecc, oob->ecc1, SM_SMALL_PAGE) < 0)
+ return -EIO;
+
+ buffer += SM_SMALL_PAGE;
+
+ __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc);
+ if (__nand_correct_data(buffer, ecc, oob->ecc2, SM_SMALL_PAGE) < 0)
+ return -EIO;
+ return 0;
+}
+
+/* Reads a sector + oob*/
+static int sm_read_sector(struct sm_ftl *ftl,
+ int zone, int block, int boffset,
+ uint8_t *buffer, struct sm_oob *oob)
+{
+ struct mtd_info *mtd = ftl->trans->mtd;
+ struct mtd_oob_ops ops;
+ struct sm_oob tmp_oob;
+ int ret = -EIO;
+ int try = 0;
+
+ /* FTL can contain -1 entries that are by default filled with bits */
+ if (block == -1) {
+ memset(buffer, 0xFF, SM_SECTOR_SIZE);
+ return 0;
+ }
+
+ /* User might not need the oob, but we do for data vertification */
+ if (!oob)
+ oob = &tmp_oob;
+
+ ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE;
+ ops.ooboffs = 0;
+ ops.ooblen = SM_OOB_SIZE;
+ ops.oobbuf = (void *)oob;
+ ops.len = SM_SECTOR_SIZE;
+ ops.datbuf = buffer;
+
+again:
+ if (try++) {
+ /* Avoid infinite recursion on CIS reads, sm_recheck_media
+ won't help anyway */
+ if (zone == 0 && block == ftl->cis_block && boffset ==
+ ftl->cis_boffset)
+ return ret;
+
+ /* Test if media is stable */
+ if (try == 3 || sm_recheck_media(ftl))
+ return ret;
+ }
+
+ /* Unfortunelly, oob read will _always_ succeed,
+ despite card removal..... */
+ ret = mtd->read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
+
+ /* Test for unknown errors */
+ if (ret != 0 && ret != -EUCLEAN && ret != -EBADMSG) {
+ dbg("read of block %d at zone %d, failed due to error (%d)",
+ block, zone, ret);
+ goto again;
+ }
+
+ /* Do a basic test on the oob, to guard against returned garbage */
+ if (oob->reserved != 0xFFFFFFFF && !is_power_of_2(~oob->reserved))
+ goto again;
+
+ /* This should never happen, unless there is a bug in the mtd driver */
+ WARN_ON(ops.oobretlen != SM_OOB_SIZE);
+ WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
+
+ if (!buffer)
+ return 0;
+
+ /* Test if sector marked as bad */
+ if (!sm_sector_valid(oob)) {
+ dbg("read of block %d at zone %d, failed because it is marked"
+ " as bad" , block, zone);
+ goto again;
+ }
+
+ /* Test ECC*/
+ if (ret == -EBADMSG ||
+ (ftl->smallpagenand && sm_correct_sector(buffer, oob))) {
+
+ dbg("read of block %d at zone %d, failed due to ECC error",
+ block, zone);
+ goto again;
+ }
+
+ return 0;
+}
+
+/* Writes a sector to media */
+static int sm_write_sector(struct sm_ftl *ftl,
+ int zone, int block, int boffset,
+ uint8_t *buffer, struct sm_oob *oob)
+{
+ struct mtd_oob_ops ops;
+ struct mtd_info *mtd = ftl->trans->mtd;
+ int ret;
+
+ BUG_ON(ftl->readonly);
+
+ if (zone == 0 && (block == ftl->cis_block || block == 0)) {
+ dbg("attempted to write the CIS!");
+ return -EIO;
+ }
+
+ if (ftl->unstable)
+ return -EIO;
+
+ ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE;
+ ops.len = SM_SECTOR_SIZE;
+ ops.datbuf = buffer;
+ ops.ooboffs = 0;
+ ops.ooblen = SM_OOB_SIZE;
+ ops.oobbuf = (void *)oob;
+
+ ret = mtd->write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
+
+ /* Now we assume that hardware will catch write bitflip errors */
+ /* If you are paranoid, use CONFIG_MTD_NAND_VERIFY_WRITE */
+
+ if (ret) {
+ dbg("write to block %d at zone %d, failed with error %d",
+ block, zone, ret);
+
+ sm_recheck_media(ftl);
+ return ret;
+ }
+
+ /* This should never happen, unless there is a bug in the driver */
+ WARN_ON(ops.oobretlen != SM_OOB_SIZE);
+ WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
+
+ return 0;
+}
+
+/* ------------------------ block IO ------------------------------------- */
+
+/* Write a block using data and lba, and invalid sector bitmap */
+static int sm_write_block(struct sm_ftl *ftl, uint8_t *buf,
+ int zone, int block, int lba,
+ unsigned long invalid_bitmap)
+{
+ struct sm_oob oob;
+ int boffset;
+ int retry = 0;
+
+ /* Initialize the oob with requested values */
+ memset(&oob, 0xFF, SM_OOB_SIZE);
+ sm_write_lba(&oob, lba);
+restart:
+ if (ftl->unstable)
+ return -EIO;
+
+ for (boffset = 0; boffset < ftl->block_size;
+ boffset += SM_SECTOR_SIZE) {
+
+ oob.data_status = 0xFF;
+
+ if (test_bit(boffset / SM_SECTOR_SIZE, &invalid_bitmap)) {
+
+ sm_printk("sector %d of block at LBA %d of zone %d"
+ " coudn't be read, marking it as invalid",
+ boffset / SM_SECTOR_SIZE, lba, zone);
+
+ oob.data_status = 0;
+ }
+
+ if (ftl->smallpagenand) {
+ __nand_calculate_ecc(buf + boffset,
+ SM_SMALL_PAGE, oob.ecc1);
+
+ __nand_calculate_ecc(buf + boffset + SM_SMALL_PAGE,
+ SM_SMALL_PAGE, oob.ecc2);
+ }
+ if (!sm_write_sector(ftl, zone, block, boffset,
+ buf + boffset, &oob))
+ continue;
+
+ if (!retry) {
+
+ /* If write fails. try to erase the block */
+ /* This is safe, because we never write in blocks
+ that contain valuable data.
+ This is intended to repair block that are marked
+ as erased, but that isn't fully erased*/
+
+ if (sm_erase_block(ftl, zone, block, 0))
+ return -EIO;
+
+ retry = 1;
+ goto restart;
+ } else {
+ sm_mark_block_bad(ftl, zone, block);
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+
+/* Mark whole block at offset 'offs' as bad. */
+static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block)
+{
+ struct sm_oob oob;
+ int boffset;
+
+ memset(&oob, 0xFF, SM_OOB_SIZE);
+ oob.block_status = 0xF0;
+
+ if (ftl->unstable)
+ return;
+
+ if (sm_recheck_media(ftl))
+ return;
+
+ sm_printk("marking block %d of zone %d as bad", block, zone);
+
+ /* We aren't checking the return value, because we don't care */
+ /* This also fails on fake xD cards, but I guess these won't expose
+ any bad blocks till fail completly */
+ for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE)
+ sm_write_sector(ftl, zone, block, boffset, NULL, &oob);
+}
+
+/*
+ * Erase a block within a zone
+ * If erase succedes, it updates free block fifo, otherwise marks block as bad
+ */
+static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
+ int put_free)
+{
+ struct ftl_zone *zone = &ftl->zones[zone_num];
+ struct mtd_info *mtd = ftl->trans->mtd;
+ struct erase_info erase;
+
+ erase.mtd = mtd;
+ erase.callback = sm_erase_callback;
+ erase.addr = sm_mkoffset(ftl, zone_num, block, 0);
+ erase.len = ftl->block_size;
+ erase.priv = (u_long)ftl;
+
+ if (ftl->unstable)
+ return -EIO;
+
+ BUG_ON(ftl->readonly);
+
+ if (zone_num == 0 && (block == ftl->cis_block || block == 0)) {
+ sm_printk("attempted to erase the CIS!");
+ return -EIO;
+ }
+
+ if (mtd->erase(mtd, &erase)) {
+ sm_printk("erase of block %d in zone %d failed",
+ block, zone_num);
+ goto error;
+ }
+
+ if (erase.state == MTD_ERASE_PENDING)
+ wait_for_completion(&ftl->erase_completion);
+
+ if (erase.state != MTD_ERASE_DONE) {
+ sm_printk("erase of block %d in zone %d failed after wait",
+ block, zone_num);
+ goto error;
+ }
+
+ if (put_free)
+ kfifo_in(&zone->free_sectors,
+ (const unsigned char *)&block, sizeof(block));
+
+ return 0;
+error:
+ sm_mark_block_bad(ftl, zone_num, block);
+ return -EIO;
+}
+
+static void sm_erase_callback(struct erase_info *self)
+{
+ struct sm_ftl *ftl = (struct sm_ftl *)self->priv;
+ complete(&ftl->erase_completion);
+}
+
+/* Throughtly test that block is valid. */
+static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
+{
+ int boffset;
+ struct sm_oob oob;
+ int lbas[] = { -3, 0, 0, 0 };
+ int i = 0;
+ int test_lba;
+
+
+ /* First just check that block doesn't look fishy */
+ /* Only blocks that are valid or are sliced in two parts, are
+ accepted */
+ for (boffset = 0; boffset < ftl->block_size;
+ boffset += SM_SECTOR_SIZE) {
+
+ /* This shoudn't happen anyway */
+ if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob))
+ return -2;
+
+ test_lba = sm_read_lba(&oob);
+
+ if (lbas[i] != test_lba)
+ lbas[++i] = test_lba;
+
+ /* If we found three different LBAs, something is fishy */
+ if (i == 3)
+ return -EIO;
+ }
+
+ /* If the block is sliced (partialy erased usually) erase it */
+ if (i == 2) {
+ sm_erase_block(ftl, zone, block, 1);
+ return 1;
+ }
+
+ return 0;
+}
+
+/* ----------------- media scanning --------------------------------- */
+static const struct chs_entry chs_table[] = {
+ { 1, 125, 4, 4 },
+ { 2, 125, 4, 8 },
+ { 4, 250, 4, 8 },
+ { 8, 250, 4, 16 },
+ { 16, 500, 4, 16 },
+ { 32, 500, 8, 16 },
+ { 64, 500, 8, 32 },
+ { 128, 500, 16, 32 },
+ { 256, 1000, 16, 32 },
+ { 512, 1015, 32, 63 },
+ { 1024, 985, 33, 63 },
+ { 2048, 985, 33, 63 },
+ { 0 },
+};
+
+
+static const uint8_t cis_signature[] = {
+ 0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20
+};
+/* Find out media parameters.
+ * This ideally has to be based on nand id, but for now device size is enough */
+int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
+{
+ int i;
+ int size_in_megs = mtd->size / (1024 * 1024);
+
+ ftl->readonly = mtd->type == MTD_ROM;
+
+ /* Manual settings for very old devices */
+ ftl->zone_count = 1;
+ ftl->smallpagenand = 0;
+
+ switch (size_in_megs) {
+ case 1:
+ /* 1 MiB flash/rom SmartMedia card (256 byte pages)*/
+ ftl->zone_size = 256;
+ ftl->max_lba = 250;
+ ftl->block_size = 8 * SM_SECTOR_SIZE;
+ ftl->smallpagenand = 1;
+
+ break;
+ case 2:
+ /* 2 MiB flash SmartMedia (256 byte pages)*/
+ if (mtd->writesize == SM_SMALL_PAGE) {
+ ftl->zone_size = 512;
+ ftl->max_lba = 500;
+ ftl->block_size = 8 * SM_SECTOR_SIZE;
+ ftl->smallpagenand = 1;
+ /* 2 MiB rom SmartMedia */
+ } else {
+
+ if (!ftl->readonly)
+ return -ENODEV;
+
+ ftl->zone_size = 256;
+ ftl->max_lba = 250;
+ ftl->block_size = 16 * SM_SECTOR_SIZE;
+ }
+ break;
+ case 4:
+ /* 4 MiB flash/rom SmartMedia device */
+ ftl->zone_size = 512;
+ ftl->max_lba = 500;
+ ftl->block_size = 16 * SM_SECTOR_SIZE;
+ break;
+ case 8:
+ /* 8 MiB flash/rom SmartMedia device */
+ ftl->zone_size = 1024;
+ ftl->max_lba = 1000;
+ ftl->block_size = 16 * SM_SECTOR_SIZE;
+ }
+
+ /* Minimum xD size is 16MiB. Also, all xD cards have standard zone
+ sizes. SmartMedia cards exist up to 128 MiB and have same layout*/
+ if (size_in_megs >= 16) {
+ ftl->zone_count = size_in_megs / 16;
+ ftl->zone_size = 1024;
+ ftl->max_lba = 1000;
+ ftl->block_size = 32 * SM_SECTOR_SIZE;
+ }
+
+ /* Test for proper write,erase and oob sizes */
+ if (mtd->erasesize > ftl->block_size)
+ return -ENODEV;
+
+ if (mtd->writesize > SM_SECTOR_SIZE)
+ return -ENODEV;
+
+ if (ftl->smallpagenand && mtd->oobsize < SM_SMALL_OOB_SIZE)
+ return -ENODEV;
+
+ if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE)
+ return -ENODEV;
+
+ /* We use these functions for IO */
+ if (!mtd->read_oob || !mtd->write_oob)
+ return -ENODEV;
+
+ /* Find geometry information */
+ for (i = 0 ; i < ARRAY_SIZE(chs_table) ; i++) {
+ if (chs_table[i].size == size_in_megs) {
+ ftl->cylinders = chs_table[i].cyl;
+ ftl->heads = chs_table[i].head;
+ ftl->sectors = chs_table[i].sec;
+ return 0;
+ }
+ }
+
+ sm_printk("media has unknown size : %dMiB", size_in_megs);
+ ftl->cylinders = 985;
+ ftl->heads = 33;
+ ftl->sectors = 63;
+ return 0;
+}
+
+/* Validate the CIS */
+static int sm_read_cis(struct sm_ftl *ftl)
+{
+ struct sm_oob oob;
+
+ if (sm_read_sector(ftl,
+ 0, ftl->cis_block, ftl->cis_boffset, ftl->cis_buffer, &oob))
+ return -EIO;
+
+ if (!sm_sector_valid(&oob) || !sm_block_valid(&oob))
+ return -EIO;
+
+ if (!memcmp(ftl->cis_buffer + ftl->cis_page_offset,
+ cis_signature, sizeof(cis_signature))) {
+ return 0;
+ }
+
+ return -EIO;
+}
+
+/* Scan the media for the CIS */
+static int sm_find_cis(struct sm_ftl *ftl)
+{
+ struct sm_oob oob;
+ int block, boffset;
+ int block_found = 0;
+ int cis_found = 0;
+
+ /* Search for first valid block */
+ for (block = 0 ; block < ftl->zone_size - ftl->max_lba ; block++) {
+
+ if (sm_read_sector(ftl, 0, block, 0, NULL, &oob))
+ continue;
+
+ if (!sm_block_valid(&oob))
+ continue;
+ block_found = 1;
+ break;
+ }
+
+ if (!block_found)
+ return -EIO;
+
+ /* Search for first valid sector in this block */
+ for (boffset = 0 ; boffset < ftl->block_size;
+ boffset += SM_SECTOR_SIZE) {
+
+ if (sm_read_sector(ftl, 0, block, boffset, NULL, &oob))
+ continue;
+
+ if (!sm_sector_valid(&oob))
+ continue;
+ break;
+ }
+
+ if (boffset == ftl->block_size)
+ return -EIO;
+
+ ftl->cis_block = block;
+ ftl->cis_boffset = boffset;
+ ftl->cis_page_offset = 0;
+
+ cis_found = !sm_read_cis(ftl);
+
+ if (!cis_found) {
+ ftl->cis_page_offset = SM_SMALL_PAGE;
+ cis_found = !sm_read_cis(ftl);
+ }
+
+ if (cis_found) {
+ dbg("CIS block found at offset %x",
+ block * ftl->block_size +
+ boffset + ftl->cis_page_offset);
+ return 0;
+ }
+ return -EIO;
+}
+
+/* Basic test to determine if underlying mtd device if functional */
+static int sm_recheck_media(struct sm_ftl *ftl)
+{
+ if (sm_read_cis(ftl)) {
+
+ if (!ftl->unstable) {
+ sm_printk("media unstable, not allowing writes");
+ ftl->unstable = 1;
+ }
+ return -EIO;
+ }
+ return 0;
+}
+
+/* Initialize a FTL zone */
+static int sm_init_zone(struct sm_ftl *ftl, int zone_num)
+{
+ struct ftl_zone *zone = &ftl->zones[zone_num];
+ struct sm_oob oob;
+ uint16_t block;
+ int lba;
+ int i = 0;
+ int len;
+
+ dbg("initializing zone %d", zone_num);
+
+ /* Allocate memory for FTL table */
+ zone->lba_to_phys_table = kmalloc(ftl->max_lba * 2, GFP_KERNEL);
+
+ if (!zone->lba_to_phys_table)
+ return -ENOMEM;
+ memset(zone->lba_to_phys_table, -1, ftl->max_lba * 2);
+
+
+ /* Allocate memory for free sectors FIFO */
+ if (kfifo_alloc(&zone->free_sectors, ftl->zone_size * 2, GFP_KERNEL)) {
+ kfree(zone->lba_to_phys_table);
+ return -ENOMEM;
+ }
+
+ /* Now scan the zone */
+ for (block = 0 ; block < ftl->zone_size ; block++) {
+
+ /* Skip blocks till the CIS (including) */
+ if (zone_num == 0 && block <= ftl->cis_block)
+ continue;
+
+ /* Read the oob of first sector */
+ if (sm_read_sector(ftl, zone_num, block, 0, NULL, &oob))
+ return -EIO;
+
+ /* Test to see if block is erased. It is enough to test
+ first sector, because erase happens in one shot */
+ if (sm_block_erased(&oob)) {
+ kfifo_in(&zone->free_sectors,
+ (unsigned char *)&block, 2);
+ continue;
+ }
+
+ /* If block is marked as bad, skip it */
+ /* This assumes we can trust first sector*/
+ /* However the way the block valid status is defined, ensures
+ very low probability of failure here */
+ if (!sm_block_valid(&oob)) {
+ dbg("PH %04d <-> <marked bad>", block);
+ continue;
+ }
+
+
+ lba = sm_read_lba(&oob);
+
+ /* Invalid LBA means that block is damaged. */
+ /* We can try to erase it, or mark it as bad, but
+ lets leave that to recovery application */
+ if (lba == -2 || lba >= ftl->max_lba) {
+ dbg("PH %04d <-> LBA %04d(bad)", block, lba);
+ continue;
+ }
+
+
+ /* If there is no collision,
+ just put the sector in the FTL table */
+ if (zone->lba_to_phys_table[lba] < 0) {
+ dbg_verbose("PH %04d <-> LBA %04d", block, lba);
+ zone->lba_to_phys_table[lba] = block;
+ continue;
+ }
+
+ sm_printk("collision"
+ " of LBA %d between blocks %d and %d in zone %d",
+ lba, zone->lba_to_phys_table[lba], block, zone_num);
+
+ /* Test that this block is valid*/
+ if (sm_check_block(ftl, zone_num, block))
+ continue;
+
+ /* Test now the old block */
+ if (sm_check_block(ftl, zone_num,
+ zone->lba_to_phys_table[lba])) {
+ zone->lba_to_phys_table[lba] = block;
+ continue;
+ }
+
+ /* If both blocks are valid and share same LBA, it means that
+ they hold different versions of same data. It not
+ known which is more recent, thus just erase one of them
+ */
+ sm_printk("both blocks are valid, erasing the later");
+ sm_erase_block(ftl, zone_num, block, 1);
+ }
+
+ dbg("zone initialized");
+ zone->initialized = 1;
+
+ /* No free sectors, means that the zone is heavily damaged, write won't
+ work, but it can still can be (partially) read */
+ if (!kfifo_len(&zone->free_sectors)) {
+ sm_printk("no free blocks in zone %d", zone_num);
+ return 0;
+ }
+
+ /* Randomize first block we write to */
+ get_random_bytes(&i, 2);
+ i %= (kfifo_len(&zone->free_sectors) / 2);
+
+ while (i--) {
+ len = kfifo_out(&zone->free_sectors,
+ (unsigned char *)&block, 2);
+ WARN_ON(len != 2);
+ kfifo_in(&zone->free_sectors, (const unsigned char *)&block, 2);
+ }
+ return 0;
+}
+
+/* Get and automaticly initialize an FTL mapping for one zone */
+struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num)
+{
+ struct ftl_zone *zone;
+ int error;
+
+ BUG_ON(zone_num >= ftl->zone_count);
+ zone = &ftl->zones[zone_num];
+
+ if (!zone->initialized) {
+ error = sm_init_zone(ftl, zone_num);
+
+ if (error)
+ return ERR_PTR(error);
+ }
+ return zone;
+}
+
+
+/* ----------------- cache handling ------------------------------------------*/
+
+/* Initialize the one block cache */
+void sm_cache_init(struct sm_ftl *ftl)
+{
+ ftl->cache_data_invalid_bitmap = 0xFFFFFFFF;
+ ftl->cache_clean = 1;
+ ftl->cache_zone = -1;
+ ftl->cache_block = -1;
+ /*memset(ftl->cache_data, 0xAA, ftl->block_size);*/
+}
+
+/* Put sector in one block cache */
+void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset)
+{
+ memcpy(ftl->cache_data + boffset, buffer, SM_SECTOR_SIZE);
+ clear_bit(boffset / SM_SECTOR_SIZE, &ftl->cache_data_invalid_bitmap);
+ ftl->cache_clean = 0;
+}
+
+/* Read a sector from the cache */
+int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset)
+{
+ if (test_bit(boffset / SM_SECTOR_SIZE,
+ &ftl->cache_data_invalid_bitmap))
+ return -1;
+
+ memcpy(buffer, ftl->cache_data + boffset, SM_SECTOR_SIZE);
+ return 0;
+}
+
+/* Write the cache to hardware */
+int sm_cache_flush(struct sm_ftl *ftl)
+{
+ struct ftl_zone *zone;
+
+ int sector_num;
+ uint16_t write_sector;
+ int zone_num = ftl->cache_zone;
+ int block_num;
+
+ if (ftl->cache_clean)
+ return 0;
+
+ if (ftl->unstable)
+ return -EIO;
+
+ BUG_ON(zone_num < 0);
+ zone = &ftl->zones[zone_num];
+ block_num = zone->lba_to_phys_table[ftl->cache_block];
+
+
+ /* Try to read all unread areas of the cache block*/
+ for_each_set_bit(sector_num, &ftl->cache_data_invalid_bitmap,
+ ftl->block_size / SM_SECTOR_SIZE) {
+
+ if (!sm_read_sector(ftl,
+ zone_num, block_num, sector_num * SM_SECTOR_SIZE,
+ ftl->cache_data + sector_num * SM_SECTOR_SIZE, NULL))
+ clear_bit(sector_num,
+ &ftl->cache_data_invalid_bitmap);
+ }
+restart:
+
+ if (ftl->unstable)
+ return -EIO;
+
+ /* If there are no spare blocks, */
+ /* we could still continue by erasing/writing the current block,
+ but for such worn out media it doesn't worth the trouble,
+ and the dangers */
+ if (kfifo_out(&zone->free_sectors,
+ (unsigned char *)&write_sector, 2) != 2) {
+ dbg("no free sectors for write!");
+ return -EIO;
+ }
+
+
+ if (sm_write_block(ftl, ftl->cache_data, zone_num, write_sector,
+ ftl->cache_block, ftl->cache_data_invalid_bitmap))
+ goto restart;
+
+ /* Update the FTL table */
+ zone->lba_to_phys_table[ftl->cache_block] = write_sector;
+
+ /* Write succesfull, so erase and free the old block */
+ if (block_num > 0)
+ sm_erase_block(ftl, zone_num, block_num, 1);
+
+ sm_cache_init(ftl);
+ return 0;
+}
+
+
+/* flush timer, runs a second after last write */
+static void sm_cache_flush_timer(unsigned long data)
+{
+ struct sm_ftl *ftl = (struct sm_ftl *)data;
+ queue_work(cache_flush_workqueue, &ftl->flush_work);
+}
+
+/* cache flush work, kicked by timer */
+static void sm_cache_flush_work(struct work_struct *work)
+{
+ struct sm_ftl *ftl = container_of(work, struct sm_ftl, flush_work);
+ mutex_lock(&ftl->mutex);
+ sm_cache_flush(ftl);
+ mutex_unlock(&ftl->mutex);
+ return;
+}
+
+/* ---------------- outside interface -------------------------------------- */
+
+/* outside interface: read a sector */
+static int sm_read(struct mtd_blktrans_dev *dev,
+ unsigned long sect_no, char *buf)
+{
+ struct sm_ftl *ftl = dev->priv;
+ struct ftl_zone *zone;
+ int error = 0, in_cache = 0;
+ int zone_num, block, boffset;
+
+ sm_break_offset(ftl, sect_no << 9, &zone_num, &block, &boffset);
+ mutex_lock(&ftl->mutex);
+
+
+ zone = sm_get_zone(ftl, zone_num);
+ if (IS_ERR(zone)) {
+ error = PTR_ERR(zone);
+ goto unlock;
+ }
+
+ /* Have to look at cache first */
+ if (ftl->cache_zone == zone_num && ftl->cache_block == block) {
+ in_cache = 1;
+ if (!sm_cache_get(ftl, buf, boffset))
+ goto unlock;
+ }
+
+ /* Translate the block and return if doesn't exist in the table */
+ block = zone->lba_to_phys_table[block];
+
+ if (block == -1) {
+ memset(buf, 0xFF, SM_SECTOR_SIZE);
+ goto unlock;
+ }
+
+ if (sm_read_sector(ftl, zone_num, block, boffset, buf, NULL)) {
+ error = -EIO;
+ goto unlock;
+ }
+
+ if (in_cache)
+ sm_cache_put(ftl, buf, boffset);
+unlock:
+ mutex_unlock(&ftl->mutex);
+ return error;
+}
+
+/* outside interface: write a sector */
+static int sm_write(struct mtd_blktrans_dev *dev,
+ unsigned long sec_no, char *buf)
+{
+ struct sm_ftl *ftl = dev->priv;
+ struct ftl_zone *zone;
+ int error, zone_num, block, boffset;
+
+ BUG_ON(ftl->readonly);
+ sm_break_offset(ftl, sec_no << 9, &zone_num, &block, &boffset);
+
+ /* No need in flush thread running now */
+ del_timer(&ftl->timer);
+ mutex_lock(&ftl->mutex);
+
+ zone = sm_get_zone(ftl, zone_num);
+ if (IS_ERR(zone)) {
+ error = PTR_ERR(zone);
+ goto unlock;
+ }
+
+ /* If entry is not in cache, flush it */
+ if (ftl->cache_block != block || ftl->cache_zone != zone_num) {
+
+ error = sm_cache_flush(ftl);
+ if (error)
+ goto unlock;
+
+ ftl->cache_block = block;
+ ftl->cache_zone = zone_num;
+ }
+
+ sm_cache_put(ftl, buf, boffset);
+unlock:
+ mod_timer(&ftl->timer, jiffies + msecs_to_jiffies(cache_timeout));
+ mutex_unlock(&ftl->mutex);
+ return error;
+}
+
+/* outside interface: flush everything */
+static int sm_flush(struct mtd_blktrans_dev *dev)
+{
+ struct sm_ftl *ftl = dev->priv;
+ int retval;
+
+ mutex_lock(&ftl->mutex);
+ retval = sm_cache_flush(ftl);
+ mutex_unlock(&ftl->mutex);
+ return retval;
+}
+
+/* outside interface: device is released */
+static int sm_release(struct mtd_blktrans_dev *dev)
+{
+ struct sm_ftl *ftl = dev->priv;
+
+ mutex_lock(&ftl->mutex);
+ del_timer_sync(&ftl->timer);
+ cancel_work_sync(&ftl->flush_work);
+ sm_cache_flush(ftl);
+ mutex_unlock(&ftl->mutex);
+ return 0;
+}
+
+/* outside interface: get geometry */
+static int sm_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
+{
+ struct sm_ftl *ftl = dev->priv;
+ geo->heads = ftl->heads;
+ geo->sectors = ftl->sectors;
+ geo->cylinders = ftl->cylinders;
+ return 0;
+}
+
+/* external interface: main initialization function */
+static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+{
+ struct mtd_blktrans_dev *trans;
+ struct sm_ftl *ftl;
+
+ /* Allocate & initialize our private structure */
+ ftl = kzalloc(sizeof(struct sm_ftl), GFP_KERNEL);
+ if (!ftl)
+ goto error1;
+
+
+ mutex_init(&ftl->mutex);
+ setup_timer(&ftl->timer, sm_cache_flush_timer, (unsigned long)ftl);
+ INIT_WORK(&ftl->flush_work, sm_cache_flush_work);
+ init_completion(&ftl->erase_completion);
+
+ /* Read media information */
+ if (sm_get_media_info(ftl, mtd)) {
+ dbg("found unsupported mtd device, aborting");
+ goto error2;
+ }
+
+
+ /* Allocate temporary CIS buffer for read retry support */
+ ftl->cis_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
+ if (!ftl->cis_buffer)
+ goto error2;
+
+ /* Allocate zone array, it will be initialized on demand */
+ ftl->zones = kzalloc(sizeof(struct ftl_zone) * ftl->zone_count,
+ GFP_KERNEL);
+ if (!ftl->zones)
+ goto error3;
+
+ /* Allocate the cache*/
+ ftl->cache_data = kzalloc(ftl->block_size, GFP_KERNEL);
+
+ if (!ftl->cache_data)
+ goto error4;
+
+ sm_cache_init(ftl);
+
+
+ /* Allocate upper layer structure and initialize it */
+ trans = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL);
+ if (!trans)
+ goto error5;
+
+ ftl->trans = trans;
+ trans->priv = ftl;
+
+ trans->tr = tr;
+ trans->mtd = mtd;
+ trans->devnum = -1;
+ trans->size = (ftl->block_size * ftl->max_lba * ftl->zone_count) >> 9;
+ trans->readonly = ftl->readonly;
+
+ if (sm_find_cis(ftl)) {
+ dbg("CIS not found on mtd device, aborting");
+ goto error6;
+ }
+
+ ftl->disk_attributes = sm_create_sysfs_attributes(ftl);
+ trans->disk_attributes = ftl->disk_attributes;
+
+ sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d",
+ (int)(mtd->size / (1024 * 1024)), mtd->index);
+
+ dbg("FTL layout:");
+ dbg("%d zone(s), each consists of %d blocks (+%d spares)",
+ ftl->zone_count, ftl->max_lba,
+ ftl->zone_size - ftl->max_lba);
+ dbg("each block consists of %d bytes",
+ ftl->block_size);
+
+
+ /* Register device*/
+ if (add_mtd_blktrans_dev(trans)) {
+ dbg("error in mtdblktrans layer");
+ goto error6;
+ }
+ return;
+error6:
+ kfree(trans);
+error5:
+ kfree(ftl->cache_data);
+error4:
+ kfree(ftl->zones);
+error3:
+ kfree(ftl->cis_buffer);
+error2:
+ kfree(ftl);
+error1:
+ return;
+}
+
+/* main interface: device {surprise,} removal */
+static void sm_remove_dev(struct mtd_blktrans_dev *dev)
+{
+ struct sm_ftl *ftl = dev->priv;
+ int i;
+
+ del_mtd_blktrans_dev(dev);
+ ftl->trans = NULL;
+
+ for (i = 0 ; i < ftl->zone_count; i++) {
+
+ if (!ftl->zones[i].initialized)
+ continue;
+
+ kfree(ftl->zones[i].lba_to_phys_table);
+ kfifo_free(&ftl->zones[i].free_sectors);
+ }
+
+ sm_delete_sysfs_attributes(ftl);
+ kfree(ftl->cis_buffer);
+ kfree(ftl->zones);
+ kfree(ftl->cache_data);
+ kfree(ftl);
+}
+
+static struct mtd_blktrans_ops sm_ftl_ops = {
+ .name = "smblk",
+ .major = -1,
+ .part_bits = SM_FTL_PARTN_BITS,
+ .blksize = SM_SECTOR_SIZE,
+ .getgeo = sm_getgeo,
+
+ .add_mtd = sm_add_mtd,
+ .remove_dev = sm_remove_dev,
+
+ .readsect = sm_read,
+ .writesect = sm_write,
+
+ .flush = sm_flush,
+ .release = sm_release,
+
+ .owner = THIS_MODULE,
+};
+
+static __init int sm_module_init(void)
+{
+ int error = 0;
+ cache_flush_workqueue = create_freezeable_workqueue("smflush");
+
+ if (IS_ERR(cache_flush_workqueue))
+ return PTR_ERR(cache_flush_workqueue);
+
+ error = register_mtd_blktrans(&sm_ftl_ops);
+ if (error)
+ destroy_workqueue(cache_flush_workqueue);
+ return error;
+
+}
+
+static void __exit sm_module_exit(void)
+{
+ destroy_workqueue(cache_flush_workqueue);
+ deregister_mtd_blktrans(&sm_ftl_ops);
+}
+
+module_init(sm_module_init);
+module_exit(sm_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
+MODULE_DESCRIPTION("Smartmedia/xD mtd translation layer");
diff --git a/drivers/mtd/sm_ftl.h b/drivers/mtd/sm_ftl.h
new file mode 100644
index 00000000000..e30e48e7f63
--- /dev/null
+++ b/drivers/mtd/sm_ftl.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * SmartMedia/xD translation layer
+ *
+ * Based loosly on ssfdc.c which is
+ * © 2005 Eptar srl
+ * Author: Claudio Lanconelli <lanconelli.claudio@eptar.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/mtd/blktrans.h>
+#include <linux/kfifo.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/mtd/mtd.h>
+
+
+
+struct ftl_zone {
+ int initialized;
+ int16_t *lba_to_phys_table; /* LBA to physical table */
+ struct kfifo free_sectors; /* queue of free sectors */
+};
+
+struct sm_ftl {
+ struct mtd_blktrans_dev *trans;
+
+ struct mutex mutex; /* protects the structure */
+ struct ftl_zone *zones; /* FTL tables for each zone */
+
+ /* Media information */
+ int block_size; /* block size in bytes */
+ int zone_size; /* zone size in blocks */
+ int zone_count; /* number of zones */
+ int max_lba; /* maximum lba in a zone */
+ int smallpagenand; /* 256 bytes/page nand */
+ int readonly; /* is FS readonly */
+ int unstable;
+ int cis_block; /* CIS block location */
+ int cis_boffset; /* CIS offset in the block */
+ int cis_page_offset; /* CIS offset in the page */
+ void *cis_buffer; /* tmp buffer for cis reads */
+
+ /* Cache */
+ int cache_block; /* block number of cached block */
+ int cache_zone; /* zone of cached block */
+ unsigned char *cache_data; /* cached block data */
+ long unsigned int cache_data_invalid_bitmap;
+ int cache_clean;
+ struct work_struct flush_work;
+ struct timer_list timer;
+
+ /* Async erase stuff */
+ struct completion erase_completion;
+
+ /* Geometry stuff */
+ int heads;
+ int sectors;
+ int cylinders;
+
+ struct attribute_group *disk_attributes;
+};
+
+struct chs_entry {
+ unsigned long size;
+ unsigned short cyl;
+ unsigned char head;
+ unsigned char sec;
+};
+
+
+#define SM_FTL_PARTN_BITS 3
+
+#define sm_printk(format, ...) \
+ printk(KERN_WARNING "sm_ftl" ": " format "\n", ## __VA_ARGS__)
+
+#define dbg(format, ...) \
+ if (debug) \
+ printk(KERN_DEBUG "sm_ftl" ": " format "\n", ## __VA_ARGS__)
+
+#define dbg_verbose(format, ...) \
+ if (debug > 1) \
+ printk(KERN_DEBUG "sm_ftl" ": " format "\n", ## __VA_ARGS__)
+
+
+static void sm_erase_callback(struct erase_info *self);
+static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
+ int put_free);
+static void sm_mark_block_bad(struct sm_ftl *ftl, int zone_num, int block);
+
+static int sm_recheck_media(struct sm_ftl *ftl);
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index 3f67e00d98e..81c4ecdc11f 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -375,7 +375,6 @@ static void ssfdcr_remove_dev(struct mtd_blktrans_dev *dev)
del_mtd_blktrans_dev(dev);
kfree(ssfdc->logic_block_map);
- kfree(ssfdc);
}
static int ssfdcr_readsect(struct mtd_blktrans_dev *dev,
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c
index 921a85df919..6bc1b8276c6 100644
--- a/drivers/mtd/tests/mtd_pagetest.c
+++ b/drivers/mtd/tests/mtd_pagetest.c
@@ -480,12 +480,11 @@ static int scan_for_bad_eraseblocks(void)
{
int i, bad = 0;
- bbt = kmalloc(ebcnt, GFP_KERNEL);
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
printk(PRINT_PREF "error: cannot allocate memory\n");
return -ENOMEM;
}
- memset(bbt, 0 , ebcnt);
printk(PRINT_PREF "scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
diff --git a/drivers/mtd/tests/mtd_readtest.c b/drivers/mtd/tests/mtd_readtest.c
index 7107fccbc7d..afe71aa15c4 100644
--- a/drivers/mtd/tests/mtd_readtest.c
+++ b/drivers/mtd/tests/mtd_readtest.c
@@ -141,12 +141,11 @@ static int scan_for_bad_eraseblocks(void)
{
int i, bad = 0;
- bbt = kmalloc(ebcnt, GFP_KERNEL);
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
printk(PRINT_PREF "error: cannot allocate memory\n");
return -ENOMEM;
}
- memset(bbt, 0 , ebcnt);
/* NOR flash does not implement block_isbad */
if (mtd->block_isbad == NULL)
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c
index 56ca62bb96b..161feeb7b8b 100644
--- a/drivers/mtd/tests/mtd_speedtest.c
+++ b/drivers/mtd/tests/mtd_speedtest.c
@@ -295,12 +295,11 @@ static int scan_for_bad_eraseblocks(void)
{
int i, bad = 0;
- bbt = kmalloc(ebcnt, GFP_KERNEL);
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
printk(PRINT_PREF "error: cannot allocate memory\n");
return -ENOMEM;
}
- memset(bbt, 0 , ebcnt);
/* NOR flash does not implement block_isbad */
if (mtd->block_isbad == NULL)
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
index 3854afec56d..531625fc925 100644
--- a/drivers/mtd/tests/mtd_stresstest.c
+++ b/drivers/mtd/tests/mtd_stresstest.c
@@ -221,12 +221,11 @@ static int scan_for_bad_eraseblocks(void)
{
int i, bad = 0;
- bbt = kmalloc(ebcnt, GFP_KERNEL);
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
printk(PRINT_PREF "error: cannot allocate memory\n");
return -ENOMEM;
}
- memset(bbt, 0 , ebcnt);
/* NOR flash does not implement block_isbad */
if (mtd->block_isbad == NULL)
diff --git a/drivers/mtd/tests/mtd_subpagetest.c b/drivers/mtd/tests/mtd_subpagetest.c
index 700237a3d12..11204e8aab5 100644
--- a/drivers/mtd/tests/mtd_subpagetest.c
+++ b/drivers/mtd/tests/mtd_subpagetest.c
@@ -354,12 +354,11 @@ static int scan_for_bad_eraseblocks(void)
{
int i, bad = 0;
- bbt = kmalloc(ebcnt, GFP_KERNEL);
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
printk(PRINT_PREF "error: cannot allocate memory\n");
return -ENOMEM;
}
- memset(bbt, 0 , ebcnt);
printk(PRINT_PREF "scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index 0a8c7ea764a..f702a163d8d 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -27,7 +27,7 @@ config MTD_UBI_WL_THRESHOLD
The default value should be OK for SLC NAND flashes, NOR flashes and
other flashes which have eraseblock life-cycle 100000 or more.
However, in case of MLC NAND flashes which typically have eraseblock
- life-cycle less then 10000, the threshold should be lessened (e.g.,
+ life-cycle less than 10000, the threshold should be lessened (e.g.,
to 128 or 256, although it does not have to be power of 2).
config MTD_UBI_BEB_RESERVE
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 55c726dde94..13b05cb33b0 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -42,7 +42,6 @@
#include <linux/miscdevice.h>
#include <linux/log2.h>
#include <linux/kthread.h>
-#include <linux/reboot.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include "ubi.h"
@@ -50,6 +49,12 @@
/* Maximum length of the 'mtd=' parameter */
#define MTD_PARAM_LEN_MAX 64
+#ifdef CONFIG_MTD_UBI_MODULE
+#define ubi_is_module() 1
+#else
+#define ubi_is_module() 0
+#endif
+
/**
* struct mtd_dev_param - MTD device parameter description data structure.
* @name: MTD character device node path, MTD device name, or MTD device number
@@ -832,34 +837,6 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
}
/**
- * ubi_reboot_notifier - halt UBI transactions immediately prior to a reboot.
- * @n: reboot notifier object
- * @state: SYS_RESTART, SYS_HALT, or SYS_POWER_OFF
- * @cmd: pointer to command string for RESTART2
- *
- * This function stops the UBI background thread so that the flash device
- * remains quiescent when Linux restarts the system. Any queued work will be
- * discarded, but this function will block until do_work() finishes if an
- * operation is already in progress.
- *
- * This function solves a real-life problem observed on NOR flashes when an
- * PEB erase operation starts, then the system is rebooted before the erase is
- * finishes, and the boot loader gets confused and dies. So we prefer to finish
- * the ongoing operation before rebooting.
- */
-static int ubi_reboot_notifier(struct notifier_block *n, unsigned long state,
- void *cmd)
-{
- struct ubi_device *ubi;
-
- ubi = container_of(n, struct ubi_device, reboot_notifier);
- if (ubi->bgt_thread)
- kthread_stop(ubi->bgt_thread);
- ubi_sync(ubi->ubi_num);
- return NOTIFY_DONE;
-}
-
-/**
* ubi_attach_mtd_dev - attach an MTD device.
* @mtd: MTD device description object
* @ubi_num: number to assign to the new UBI device
@@ -1016,11 +993,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
wake_up_process(ubi->bgt_thread);
spin_unlock(&ubi->wl_lock);
- /* Flash device priority is 0 - UBI needs to shut down first */
- ubi->reboot_notifier.priority = 1;
- ubi->reboot_notifier.notifier_call = ubi_reboot_notifier;
- register_reboot_notifier(&ubi->reboot_notifier);
-
ubi_devices[ubi_num] = ubi;
ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
return ubi_num;
@@ -1091,7 +1063,6 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
* Before freeing anything, we have to stop the background thread to
* prevent it from doing anything on this device while we are freeing.
*/
- unregister_reboot_notifier(&ubi->reboot_notifier);
if (ubi->bgt_thread)
kthread_stop(ubi->bgt_thread);
@@ -1241,9 +1212,24 @@ static int __init ubi_init(void)
p->vid_hdr_offs);
mutex_unlock(&ubi_devices_mutex);
if (err < 0) {
- put_mtd_device(mtd);
ubi_err("cannot attach mtd%d", mtd->index);
- goto out_detach;
+ put_mtd_device(mtd);
+
+ /*
+ * Originally UBI stopped initializing on any error.
+ * However, later on it was found out that this
+ * behavior is not very good when UBI is compiled into
+ * the kernel and the MTD devices to attach are passed
+ * through the command line. Indeed, UBI failure
+ * stopped whole boot sequence.
+ *
+ * To fix this, we changed the behavior for the
+ * non-module case, but preserved the old behavior for
+ * the module case, just for compatibility. This is a
+ * little inconsistent, though.
+ */
+ if (ubi_is_module())
+ goto out_detach;
}
}
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 72ebb3f06b8..4dfa6b90c21 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -189,8 +189,7 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
return new_offset;
}
-static int vol_cdev_fsync(struct file *file, struct dentry *dentry,
- int datasync)
+static int vol_cdev_fsync(struct file *file, int datasync)
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_device *ubi = desc->vol->ubi;
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 533b1a4b9af..4b979e34b15 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -64,9 +64,9 @@
* device, e.g., make @ubi->min_io_size = 512 in the example above?
*
* A: because when writing a sub-page, MTD still writes a full 2K page but the
- * bytes which are no relevant to the sub-page are 0xFF. So, basically, writing
- * 4x512 sub-pages is 4 times slower then writing one 2KiB NAND page. Thus, we
- * prefer to use sub-pages only for EV and VID headers.
+ * bytes which are not relevant to the sub-page are 0xFF. So, basically,
+ * writing 4x512 sub-pages is 4 times slower than writing one 2KiB NAND page.
+ * Thus, we prefer to use sub-pages only for EC and VID headers.
*
* As it was noted above, the VID header may start at a non-aligned offset.
* For example, in case of a 2KiB page NAND flash with a 512 bytes sub-page,
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 17f287decc3..69fa4ef03c5 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -488,7 +488,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_write);
*
* This function changes the contents of a logical eraseblock atomically. @buf
* has to contain new logical eraseblock data, and @len - the length of the
- * data, which has to be aligned. The length may be shorter then the logical
+ * data, which has to be aligned. The length may be shorter than the logical
* eraseblock size, ant the logical eraseblock may be appended to more times
* later on. This function guarantees that in case of an unclean reboot the old
* contents is preserved. Returns zero in case of success and a negative error
@@ -571,7 +571,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_erase);
*
* This function un-maps logical eraseblock @lnum and schedules the
* corresponding physical eraseblock for erasure, so that it will eventually be
- * physically erased in background. This operation is much faster then the
+ * physically erased in background. This operation is much faster than the
* erase operation.
*
* Unlike erase, the un-map operation does not guarantee that the logical
@@ -590,7 +590,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_erase);
*
* The main and obvious use-case of this function is when the contents of a
* logical eraseblock has to be re-written. Then it is much more efficient to
- * first un-map it, then write new data, rather then first erase it, then write
+ * first un-map it, then write new data, rather than first erase it, then write
* new data. Note, once new data has been written to the logical eraseblock,
* UBI guarantees that the old contents has gone forever. In other words, if an
* unclean reboot happens after the logical eraseblock has been un-mapped and
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index dc5f688699d..aed19f33b8f 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -231,7 +231,7 @@ static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id,
* case of success this function returns a positive value, in case of failure, a
* negative error code is returned. The success return codes use the following
* bits:
- * o bit 0 is cleared: the first PEB (described by @seb) is newer then the
+ * o bit 0 is cleared: the first PEB (described by @seb) is newer than the
* second PEB (described by @pnum and @vid_hdr);
* o bit 0 is set: the second PEB is newer;
* o bit 1 is cleared: no bit-flips were detected in the newer LEB;
@@ -452,7 +452,7 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
if (cmp_res & 1) {
/*
- * This logical eraseblock is newer then the one
+ * This logical eraseblock is newer than the one
* found earlier.
*/
err = validate_vid_hdr(vid_hdr, sv, pnum);
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 5176d488651..a637f0283ad 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -350,7 +350,6 @@ struct ubi_wl_entry;
* @bgt_thread: background thread description object
* @thread_enabled: if the background thread is enabled
* @bgt_name: background thread name
- * @reboot_notifier: notifier to terminate background thread before rebooting
*
* @flash_size: underlying MTD device size (in bytes)
* @peb_count: count of physical eraseblocks on the MTD device
@@ -436,7 +435,6 @@ struct ubi_device {
struct task_struct *bgt_thread;
int thread_enabled;
char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2];
- struct notifier_block reboot_notifier;
/* I/O sub-system's stuff */
long long flash_size;
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index cd90ff3b76b..14c10bed94e 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -414,7 +414,7 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
* 0 contains more recent information.
*
* So the plan is to first check LEB 0. Then
- * a. if LEB 0 is OK, it must be containing the most resent data; then
+ * a. if LEB 0 is OK, it must be containing the most recent data; then
* we compare it with LEB 1, and if they are different, we copy LEB
* 0 to LEB 1;
* b. if LEB 0 is corrupted, but LEB 1 has to be OK, and we copy LEB 1
@@ -848,7 +848,7 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
goto out_free;
/*
- * Get sure that the scanning information is consistent to the
+ * Make sure that the scanning information is consistent to the
* information stored in the volume table.
*/
err = check_scanning_info(ubi, si);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index f64ddabd4ac..ee7b1d8fbb9 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -350,7 +350,7 @@ static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
* @max: highest possible erase counter
*
* This function looks for a wear leveling entry with erase counter closest to
- * @max and less then @max.
+ * @max and less than @max.
*/
static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
{
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c
index 3ea42ff1765..1776ab61b05 100644
--- a/drivers/net/3c501.c
+++ b/drivers/net/3c501.c
@@ -480,7 +480,6 @@ static netdev_tx_t el_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* fire ... Trigger xmit. */
outb(AX_XMIT, AX_CMD);
lp->loading = 0;
- dev->trans_start = jiffies;
if (el_debug > 2)
pr_debug(" queued xmit.\n");
dev_kfree_skb(skb);
@@ -727,7 +726,6 @@ static void el_receive(struct net_device *dev)
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
}
- return;
}
/**
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index 66e0323c183..baac246561b 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -380,6 +380,12 @@ out:
return retval;
}
+static irqreturn_t el2_probe_interrupt(int irq, void *seen)
+{
+ *(bool *)seen = true;
+ return IRQ_HANDLED;
+}
+
static int
el2_open(struct net_device *dev)
{
@@ -391,23 +397,35 @@ el2_open(struct net_device *dev)
outb(EGACFR_NORM, E33G_GACFR); /* Enable RAM and interrupts. */
do {
- retval = request_irq(*irqp, NULL, 0, "bogus", dev);
- if (retval >= 0) {
+ bool seen;
+
+ retval = request_irq(*irqp, el2_probe_interrupt, 0,
+ dev->name, &seen);
+ if (retval == -EBUSY)
+ continue;
+ if (retval < 0)
+ goto err_disable;
+
/* Twinkle the interrupt, and check if it's seen. */
- unsigned long cookie = probe_irq_on();
+ seen = false;
+ smp_wmb();
outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR);
outb_p(0x00, E33G_IDCFR);
- if (*irqp == probe_irq_off(cookie) && /* It's a good IRQ line! */
- ((retval = request_irq(dev->irq = *irqp,
- eip_interrupt, 0,
- dev->name, dev)) == 0))
- break;
- } else {
- if (retval != -EBUSY)
- return retval;
- }
+ msleep(1);
+ free_irq(*irqp, el2_probe_interrupt);
+ if (!seen)
+ continue;
+
+ retval = request_irq(dev->irq = *irqp, eip_interrupt, 0,
+ dev->name, dev);
+ if (retval == -EBUSY)
+ continue;
+ if (retval < 0)
+ goto err_disable;
} while (*++irqp);
+
if (*irqp == 0) {
+ err_disable:
outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */
return -EAGAIN;
}
@@ -555,7 +573,6 @@ el2_block_output(struct net_device *dev, int count,
}
blocked:;
outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
- return;
}
/* Read the 4 byte, page aligned 8390 specific header. */
@@ -671,7 +688,6 @@ el2_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring
}
blocked:;
outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
- return;
}
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index 29b8d1d63bd..88d766ee0e1 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -1055,7 +1055,7 @@ static void elp_timeout(struct net_device *dev)
(stat & ACRF) ? "interrupt" : "command");
if (elp_debug >= 1)
pr_debug("%s: status %#02x\n", dev->name, stat);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
dev->stats.tx_dropped++;
netif_wake_queue(dev);
}
@@ -1093,11 +1093,6 @@ static netdev_tx_t elp_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (elp_debug >= 3)
pr_debug("%s: packet of length %d sent\n", dev->name, (int) skb->len);
- /*
- * start the transmit timeout
- */
- dev->trans_start = jiffies;
-
prime_rx(dev);
spin_unlock_irqrestore(&adapter->lock, flags);
netif_start_queue(dev);
@@ -1216,7 +1211,7 @@ static int elp_close(struct net_device *dev)
static void elp_set_mc_list(struct net_device *dev)
{
elp_device *adapter = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int i;
unsigned long flags;
@@ -1231,8 +1226,9 @@ static void elp_set_mc_list(struct net_device *dev)
adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST;
adapter->tx_pcb.length = 6 * netdev_mc_count(dev);
i = 0;
- netdev_for_each_mc_addr(dmi, dev)
- memcpy(adapter->tx_pcb.data.multicast[i++], dmi->dmi_addr, 6);
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy(adapter->tx_pcb.data.multicast[i++],
+ ha->addr, 6);
adapter->got[CMD_LOAD_MULTICAST_LIST] = 0;
if (!send_pcb(dev, &adapter->tx_pcb))
pr_err("%s: couldn't send set_multicast command\n", dev->name);
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index b32b7a1710b..ea9b7a098c9 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -449,7 +449,6 @@ static int __init el16_probe1(struct net_device *dev, int ioaddr)
pr_debug("%s", version);
lp = netdev_priv(dev);
- memset(lp, 0, sizeof(*lp));
spin_lock_init(&lp->lock);
lp->base = ioremap(dev->mem_start, RX_BUF_END);
if (!lp->base) {
@@ -505,7 +504,7 @@ static void el16_tx_timeout (struct net_device *dev)
outb (0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */
lp->last_restart = dev->stats.tx_packets;
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue (dev);
}
@@ -529,7 +528,6 @@ static netdev_tx_t el16_send_packet (struct sk_buff *skb,
hardware_send_packet (dev, buf, skb->len, length - skb->len);
- dev->trans_start = jiffies;
/* Enable the 82586 interrupt input. */
outb (0x84, ioaddr + MISC_CTRL);
@@ -553,8 +551,7 @@ static irqreturn_t el16_interrupt(int irq, void *dev_id)
void __iomem *shmem;
if (dev == NULL) {
- pr_err("%s: net_interrupt(): irq %d for unknown device.\n",
- dev->name, irq);
+ pr_err("net_interrupt(): irq %d for unknown device.\n", irq);
return IRQ_NONE;
}
@@ -766,7 +763,6 @@ static void init_82586_mem(struct net_device *dev)
if (net_debug > 4)
pr_debug("%s: Initialized 82586, status %04x.\n", dev->name,
readw(shmem+iSCB_STATUS));
- return;
}
static void hardware_send_packet(struct net_device *dev, void *buf, short length, short pad)
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index ab9bb3c5200..91abb965fb4 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -807,7 +807,7 @@ el3_tx_timeout (struct net_device *dev)
dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS),
inw(ioaddr + TX_FREE));
dev->stats.tx_errors++;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
/* Issue TX_RESET and TX_START commands. */
outw(TxReset, ioaddr + EL3_CMD);
outw(TxEnable, ioaddr + EL3_CMD);
@@ -868,7 +868,6 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* ... and the packet rounded to a doubleword. */
outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
- dev->trans_start = jiffies;
if (inw(ioaddr + TX_FREE) > 1536)
netif_start_queue(dev);
else
@@ -1038,7 +1037,6 @@ static void update_stats(struct net_device *dev)
/* Back to window 1, and turn statistics back on. */
EL3WINDOW(1);
outw(StatsEnable, ioaddr + EL3_CMD);
- return;
}
static int
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 2e17837be54..3bba835f1a2 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -958,7 +958,6 @@ static void corkscrew_timer(unsigned long data)
dev->name, media_tbl[dev->if_port].name);
#endif /* AUTOMEDIA */
- return;
}
static void corkscrew_timeout(struct net_device *dev)
@@ -992,7 +991,7 @@ static void corkscrew_timeout(struct net_device *dev)
if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress))
break;
outw(TxEnable, ioaddr + EL3_CMD);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
dev->stats.tx_errors++;
dev->stats.tx_dropped++;
netif_wake_queue(dev);
@@ -1055,7 +1054,6 @@ static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb,
prev_entry->status &= ~0x80000000;
netif_wake_queue(dev);
}
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
/* Put out the doubleword header... */
@@ -1091,7 +1089,6 @@ static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb,
outw(SetTxThreshold + (1536 >> 2), ioaddr + EL3_CMD);
#endif /* bus master */
- dev->trans_start = jiffies;
/* Clear the Tx status stack. */
{
@@ -1518,7 +1515,6 @@ static void update_stats(int ioaddr, struct net_device *dev)
/* We change back to window 7 (not 1) with the Vortex. */
EL3WINDOW(7);
- return;
}
/* This new version of set_rx_mode() supports v1.4 kernels.
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index 1719079cc49..a7b0e5e43a5 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -503,7 +503,6 @@ static int __init do_elmc_probe(struct net_device *dev)
break;
}
- memset(pr, 0, sizeof(struct priv));
pr->slot = slot;
pr_info("%s: 3Com 3c523 Rev 0x%x at %#lx\n", dev->name, (int) revision,
@@ -624,7 +623,7 @@ static int init586(struct net_device *dev)
volatile struct iasetup_cmd_struct *ias_cmd;
volatile struct tdr_cmd_struct *tdr_cmd;
volatile struct mcsetup_cmd_struct *mc_cmd;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int num_addrs = netdev_mc_count(dev);
ptr = (void *) ((char *) p->scb + sizeof(struct scb_struct));
@@ -787,8 +786,9 @@ static int init586(struct net_device *dev)
mc_cmd->cmd_link = 0xffff;
mc_cmd->mc_cnt = num_addrs * 6;
i = 0;
- netdev_for_each_mc_addr(dmi, dev)
- memcpy((char *) mc_cmd->mc_list[i++], dmi->dmi_addr, 6);
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy((char *) mc_cmd->mc_list[i++],
+ ha->addr, 6);
p->scb->cbl_offset = make16(mc_cmd);
p->scb->cmd = CUC_START;
elmc_id_attn586();
@@ -1152,7 +1152,6 @@ static netdev_tx_t elmc_send_packet(struct sk_buff *skb, struct net_device *dev)
p->scb->cmd = CUC_START;
p->xmit_cmds[0]->cmd_status = 0;
elmc_attn586();
- dev->trans_start = jiffies;
if (!i) {
dev_kfree_skb(skb);
}
@@ -1176,7 +1175,6 @@ static netdev_tx_t elmc_send_packet(struct sk_buff *skb, struct net_device *dev)
p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;
p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
- dev->trans_start = jiffies;
p->nop_point = next_nop;
dev_kfree_skb(skb);
#endif
@@ -1190,7 +1188,6 @@ static netdev_tx_t elmc_send_packet(struct sk_buff *skb, struct net_device *dev)
= make16((p->nop_cmds[next_nop]));
p->nop_cmds[next_nop]->cmd_status = 0;
p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count]));
- dev->trans_start = jiffies;
p->xmit_count = next_nop;
if (p->xmit_count != p->xmit_last)
netif_wake_queue(dev);
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 5c07b147ec9..38395dfa496 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -1533,7 +1533,7 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
{
unsigned char block[62];
unsigned char *bp;
- struct dev_mc_list *dmc;
+ struct netdev_hw_addr *ha;
if(retry==0)
lp->mc_list_valid = 0;
@@ -1543,8 +1543,8 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
block[0]=netdev_mc_count(dev);
bp=block+2;
- netdev_for_each_mc_addr(dmc, dev) {
- memcpy(bp, dmc->dmi_addr, 6);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(bp, ha->addr, 6);
bp+=6;
}
if(mc32_command_nowait(dev, 2, block,
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 5f92fdbe66e..d75803e6e52 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -1855,7 +1855,6 @@ leave_media_alone:
mod_timer(&vp->timer, RUN_AT(next_tick));
if (vp->deferred)
iowrite16(FakeIntr, ioaddr + EL3_CMD);
- return;
}
static void vortex_tx_timeout(struct net_device *dev)
@@ -1917,7 +1916,7 @@ static void vortex_tx_timeout(struct net_device *dev)
/* Issue Tx Enable */
iowrite16(TxEnable, ioaddr + EL3_CMD);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
/* Switch to register set 7 for normal use. */
EL3WINDOW(7);
@@ -2063,7 +2062,6 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- dev->trans_start = jiffies;
/* Clear the Tx status stack. */
{
@@ -2129,8 +2127,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
int i;
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
- skb->len-skb->data_len, PCI_DMA_TODEVICE));
- vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len-skb->data_len);
+ skb_headlen(skb), PCI_DMA_TODEVICE));
+ vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -2174,7 +2172,6 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
iowrite16(DownUnstall, ioaddr + EL3_CMD);
spin_unlock_irqrestore(&vp->lock, flags);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -2800,7 +2797,6 @@ static void update_stats(void __iomem *ioaddr, struct net_device *dev)
}
EL3WINDOW(old_window >> 13);
- return;
}
static int vortex_nway_reset(struct net_device *dev)
@@ -3122,7 +3118,6 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
iowrite16(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
mdio_delay();
}
- return;
}
/* ACPI: Advanced Configuration and Power Interface. */
diff --git a/drivers/net/7990.c b/drivers/net/7990.c
index 500e135723b..903bcb3ef5b 100644
--- a/drivers/net/7990.c
+++ b/drivers/net/7990.c
@@ -262,7 +262,7 @@ static int lance_reset (struct net_device *dev)
load_csrs (lp);
lance_init_ring (dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
status = init_restart_lance (lp);
#ifdef DEBUG_DRIVER
printk ("Lance restart=%d\n", status);
@@ -526,7 +526,7 @@ void lance_tx_timeout(struct net_device *dev)
{
printk("lance_tx_timeout\n");
lance_reset(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue (dev);
}
EXPORT_SYMBOL_GPL(lance_tx_timeout);
@@ -574,7 +574,6 @@ int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
outs++;
/* Kick the lance: transmit now */
WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
spin_lock_irqsave (&lp->devlock, flags);
@@ -594,7 +593,7 @@ static void lance_load_multicast (struct net_device *dev)
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_init_block *ib = lp->init_block;
volatile u16 *mcast_table = (u16 *)&ib->filter;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
@@ -609,8 +608,8 @@ static void lance_load_multicast (struct net_device *dev)
ib->filter [1] = 0;
/* Add addresses */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
/* multicast address? */
if (!(*addrs & 1))
@@ -620,7 +619,6 @@ static void lance_load_multicast (struct net_device *dev)
crc = crc >> 26;
mcast_table [crc >> 4] |= 1 << (crc & 0xf);
}
- return;
}
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index a09e6ce3eaa..9c149750e2b 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -882,7 +882,6 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
spin_unlock_irqrestore(&cp->lock, intr_flags);
cpw8(TxPoll, NormalTxPoll);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -910,11 +909,11 @@ static void __cp_set_rx_mode (struct net_device *dev)
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
rx_mode |= AcceptMulticast;
@@ -1225,8 +1224,6 @@ static void cp_tx_timeout(struct net_device *dev)
netif_wake_queue(dev);
spin_unlock_irqrestore(&cp->lock, flags);
-
- return;
}
#ifdef BROKEN
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index f0d23de3296..4ba72933f0d 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -1716,8 +1716,6 @@ static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
RTL_W32_F (TxStatus0 + (entry * sizeof (u32)),
tp->tx_flag | max(len, (unsigned int)ETH_ZLEN));
- dev->trans_start = jiffies;
-
tp->cur_tx++;
if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx)
@@ -2503,11 +2501,11 @@ static void __set_rx_mode (struct net_device *dev)
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
rx_mode |= AcceptMulticast;
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index 56e68db4886..dd8dc15556c 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -1050,7 +1050,7 @@ static void i596_tx_timeout (struct net_device *dev)
lp->last_restart = dev->stats.tx_packets;
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue (dev);
}
@@ -1060,7 +1060,6 @@ static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct tx_cmd *tx_cmd;
struct i596_tbd *tbd;
short length = skb->len;
- dev->trans_start = jiffies;
DEB(DEB_STARTTX,printk(KERN_DEBUG "%s: i596_start_xmit(%x,%p) called\n",
dev->name, skb->len, skb->data));
@@ -1542,7 +1541,7 @@ static void set_multicast_list(struct net_device *dev)
}
if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned char *cp;
struct mc_cmd *cmd;
@@ -1552,10 +1551,10 @@ static void set_multicast_list(struct net_device *dev)
cmd->cmd.command = CmdMulticastList;
cmd->mc_cnt = cnt * ETH_ALEN;
cp = cmd->mc_addrs;
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (!cnt--)
break;
- memcpy(cp, dmi->dmi_addr, ETH_ALEN);
+ memcpy(cp, ha->addr, ETH_ALEN);
if (i596_debug > 1)
DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %pM\n",
dev->name, cp));
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 7b832c727f8..2decc597bda 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -483,7 +483,7 @@ config XTENSA_XT2000_SONIC
This is the driver for the onboard card of the Xtensa XT2000 board.
config MIPS_AU1X00_ENET
- bool "MIPS AU1000 Ethernet support"
+ tristate "MIPS AU1000 Ethernet support"
depends on SOC_AU1X00
select PHYLIB
select CRC32
@@ -887,6 +887,13 @@ config BFIN_MAC_RMII
help
Use Reduced PHY MII Interface
+config BFIN_MAC_USE_HWSTAMP
+ bool "Use IEEE 1588 hwstamp"
+ depends on BFIN_MAC && BF518
+ default y
+ help
+ To support the IEEE 1588 Precision Time Protocol (PTP), select y here
+
config SMC9194
tristate "SMC 9194 support"
depends on NET_VENDOR_SMC && (ISA || MAC && BROKEN)
@@ -1453,20 +1460,6 @@ config FORCEDETH
To compile this driver as a module, choose M here. The module
will be called forcedeth.
-config FORCEDETH_NAPI
- bool "Use Rx Polling (NAPI) (EXPERIMENTAL)"
- depends on FORCEDETH && EXPERIMENTAL
- help
- NAPI is a new driver API designed to reduce CPU and interrupt load
- when the driver is receiving lots of packets from the card. It is
- still somewhat experimental and thus not yet enabled by default.
-
- If your estimated Rx load is 10kpps or more, or if the card will be
- deployed on potentially unfriendly networks (e.g. in a firewall),
- then say Y here.
-
- If in doubt, say N.
-
config CS89x0
tristate "CS89x0 support"
depends on NET_ETHERNET && (ISA || EISA || MACH_IXDP2351 \
@@ -1916,6 +1909,7 @@ config FEC
bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5
+ select PHYLIB
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
controller on some Motorola ColdFire and Freescale i.MX processors.
@@ -2434,8 +2428,8 @@ config MV643XX_ETH
config XILINX_LL_TEMAC
tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
+ depends on PPC || MICROBLAZE
select PHYLIB
- depends on PPC_DCR_NATIVE
help
This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
core used in Xilinx Spartan and Virtex FPGAs
@@ -2618,11 +2612,11 @@ config EHEA
will be called ehea.
config ENIC
- tristate "Cisco 10G Ethernet NIC support"
+ tristate "Cisco VIC Ethernet NIC Support"
depends on PCI && INET
select INET_LRO
help
- This enables the support for the Cisco 10G Ethernet card.
+ This enables the support for the Cisco VIC Ethernet card.
config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support"
@@ -2862,6 +2856,8 @@ source "drivers/ieee802154/Kconfig"
source "drivers/s390/net/Kconfig"
+source "drivers/net/caif/Kconfig"
+
config XEN_NETDEV_FRONTEND
tristate "Xen network device frontend driver"
depends on XEN
@@ -3180,17 +3176,12 @@ config PPPOATM
config PPPOL2TP
tristate "PPP over L2TP (EXPERIMENTAL)"
- depends on EXPERIMENTAL && PPP && INET
+ depends on EXPERIMENTAL && L2TP && PPP
help
Support for PPP-over-L2TP socket family. L2TP is a protocol
used by ISPs and enterprises to tunnel PPP traffic over UDP
tunnels. L2TP is replacing PPTP for VPN uses.
- This kernel component handles only L2TP data packets: a
- userland daemon handles L2TP the control protocol (tunnel
- and session setup). One such daemon is OpenL2TP
- (http://openl2tp.sourceforge.net/).
-
config SLIP
tristate "SLIP (serial line) support"
---help---
@@ -3277,15 +3268,14 @@ config NET_FC
"SCSI generic support".
config NETCONSOLE
- tristate "Network console logging support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ tristate "Network console logging support"
---help---
If you want to log kernel messages over the network, enable this.
See <file:Documentation/networking/netconsole.txt> for details.
config NETCONSOLE_DYNAMIC
- bool "Dynamic reconfiguration of logging targets (EXPERIMENTAL)"
- depends on NETCONSOLE && SYSFS && EXPERIMENTAL
+ bool "Dynamic reconfiguration of logging targets"
+ depends on NETCONSOLE && SYSFS
select CONFIGFS_FS
help
This option enables the ability to dynamically reconfigure target
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 12b280afdd5..0a0512ae77d 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -161,7 +161,7 @@ obj-$(CONFIG_PPP_DEFLATE) += ppp_deflate.o
obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o
obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o
obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
-obj-$(CONFIG_PPPOL2TP) += pppox.o pppol2tp.o
+obj-$(CONFIG_PPPOL2TP) += pppox.o
obj-$(CONFIG_SLIP) += slip.o
obj-$(CONFIG_SLHC) += slhc.o
@@ -292,5 +292,6 @@ obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
obj-$(CONFIG_SFC) += sfc/
obj-$(CONFIG_WIMAX) += wimax/
+obj-$(CONFIG_CAIF) += caif/
obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index ed5e9742be2..f142cc21e45 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -525,7 +525,7 @@ static inline int lance_reset (struct net_device *dev)
load_csrs (lp);
lance_init_ring (dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_start_queue(dev);
status = init_restart_lance (lp);
@@ -588,7 +588,6 @@ static netdev_tx_t lance_start_xmit (struct sk_buff *skb,
/* Kick the lance: transmit now */
ll->rdp = LE_C0_INEA | LE_C0_TDMD;
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
local_irq_restore(flags);
@@ -602,7 +601,7 @@ static void lance_load_multicast (struct net_device *dev)
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_init_block *ib = lp->init_block;
volatile u16 *mcast_table = (u16 *)&ib->filter;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
@@ -617,8 +616,8 @@ static void lance_load_multicast (struct net_device *dev)
ib->filter [1] = 0;
/* Add addresses */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
/* multicast address? */
if (!(*addrs & 1))
@@ -628,7 +627,6 @@ static void lance_load_multicast (struct net_device *dev)
crc = crc >> 26;
mcast_table [crc >> 4] |= 1 << (crc & 0xf);
}
- return;
}
static void lance_set_multicast (struct net_device *dev)
@@ -674,6 +672,7 @@ static struct zorro_device_id a2065_zorro_tbl[] __devinitdata = {
{ ZORRO_PROD_AMERISTAR_A2065 },
{ 0 }
};
+MODULE_DEVICE_TABLE(zorro, a2065_zorro_tbl);
static struct zorro_driver a2065_driver = {
.name = "a2065",
diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c
index eac73382c08..b9115a776fd 100644
--- a/drivers/net/ac3200.c
+++ b/drivers/net/ac3200.c
@@ -307,8 +307,6 @@ static void ac_reset_8390(struct net_device *dev)
ei_status.txing = 0;
outb(AC_ENABLE, ioaddr + AC_RESET_PORT);
if (ei_debug > 1) printk("reset done\n");
-
- return;
}
/* Grab the 8390 specific header. Similar to the block_input routine, but
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index 97a3dfd94df..b9a591604e5 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -661,7 +661,7 @@ static void __devexit acenic_remove_one(struct pci_dev *pdev)
dma_addr_t mapping;
ringp = &ap->skb->rx_std_skbuff[i];
- mapping = pci_unmap_addr(ringp, mapping);
+ mapping = dma_unmap_addr(ringp, mapping);
pci_unmap_page(ap->pdev, mapping,
ACE_STD_BUFSIZE,
PCI_DMA_FROMDEVICE);
@@ -681,7 +681,7 @@ static void __devexit acenic_remove_one(struct pci_dev *pdev)
dma_addr_t mapping;
ringp = &ap->skb->rx_mini_skbuff[i];
- mapping = pci_unmap_addr(ringp,mapping);
+ mapping = dma_unmap_addr(ringp,mapping);
pci_unmap_page(ap->pdev, mapping,
ACE_MINI_BUFSIZE,
PCI_DMA_FROMDEVICE);
@@ -700,7 +700,7 @@ static void __devexit acenic_remove_one(struct pci_dev *pdev)
dma_addr_t mapping;
ringp = &ap->skb->rx_jumbo_skbuff[i];
- mapping = pci_unmap_addr(ringp, mapping);
+ mapping = dma_unmap_addr(ringp, mapping);
pci_unmap_page(ap->pdev, mapping,
ACE_JUMBO_BUFSIZE,
PCI_DMA_FROMDEVICE);
@@ -1683,7 +1683,7 @@ static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs)
ACE_STD_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->skb->rx_std_skbuff[idx].skb = skb;
- pci_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
+ dma_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
mapping, mapping);
rd = &ap->rx_std_ring[idx];
@@ -1744,7 +1744,7 @@ static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs)
ACE_MINI_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->skb->rx_mini_skbuff[idx].skb = skb;
- pci_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
+ dma_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
mapping, mapping);
rd = &ap->rx_mini_ring[idx];
@@ -1800,7 +1800,7 @@ static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs)
ACE_JUMBO_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->skb->rx_jumbo_skbuff[idx].skb = skb;
- pci_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
+ dma_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
mapping, mapping);
rd = &ap->rx_jumbo_ring[idx];
@@ -2013,7 +2013,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
skb = rip->skb;
rip->skb = NULL;
pci_unmap_page(ap->pdev,
- pci_unmap_addr(rip, mapping),
+ dma_unmap_addr(rip, mapping),
mapsize,
PCI_DMA_FROMDEVICE);
skb_put(skb, retdesc->size);
@@ -2078,18 +2078,16 @@ static inline void ace_tx_int(struct net_device *dev,
do {
struct sk_buff *skb;
- dma_addr_t mapping;
struct tx_ring_info *info;
info = ap->skb->tx_skbuff + idx;
skb = info->skb;
- mapping = pci_unmap_addr(info, mapping);
- if (mapping) {
- pci_unmap_page(ap->pdev, mapping,
- pci_unmap_len(info, maplen),
+ if (dma_unmap_len(info, maplen)) {
+ pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping),
+ dma_unmap_len(info, maplen),
PCI_DMA_TODEVICE);
- pci_unmap_addr_set(info, mapping, 0);
+ dma_unmap_len_set(info, maplen, 0);
}
if (skb) {
@@ -2377,14 +2375,12 @@ static int ace_close(struct net_device *dev)
for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) {
struct sk_buff *skb;
- dma_addr_t mapping;
struct tx_ring_info *info;
info = ap->skb->tx_skbuff + i;
skb = info->skb;
- mapping = pci_unmap_addr(info, mapping);
- if (mapping) {
+ if (dma_unmap_len(info, maplen)) {
if (ACE_IS_TIGON_I(ap)) {
/* NB: TIGON_1 is special, tx_ring is in io space */
struct tx_desc __iomem *tx;
@@ -2395,10 +2391,10 @@ static int ace_close(struct net_device *dev)
} else
memset(ap->tx_ring + i, 0,
sizeof(struct tx_desc));
- pci_unmap_page(ap->pdev, mapping,
- pci_unmap_len(info, maplen),
+ pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping),
+ dma_unmap_len(info, maplen),
PCI_DMA_TODEVICE);
- pci_unmap_addr_set(info, mapping, 0);
+ dma_unmap_len_set(info, maplen, 0);
}
if (skb) {
dev_kfree_skb(skb);
@@ -2433,8 +2429,8 @@ ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb,
info = ap->skb->tx_skbuff + idx;
info->skb = tail;
- pci_unmap_addr_set(info, mapping, mapping);
- pci_unmap_len_set(info, maplen, skb->len);
+ dma_unmap_addr_set(info, mapping, mapping);
+ dma_unmap_len_set(info, maplen, skb->len);
return mapping;
}
@@ -2553,8 +2549,8 @@ restart:
} else {
info->skb = NULL;
}
- pci_unmap_addr_set(info, mapping, mapping);
- pci_unmap_len_set(info, maplen, frag->size);
+ dma_unmap_addr_set(info, mapping, mapping);
+ dma_unmap_len_set(info, maplen, frag->size);
ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
}
}
@@ -2923,8 +2919,6 @@ static void __devinit ace_clear(struct ace_regs __iomem *regs, u32 dest, int siz
dest += tsize;
size -= tsize;
}
-
- return;
}
diff --git a/drivers/net/acenic.h b/drivers/net/acenic.h
index 17079b927ff..0681da7e875 100644
--- a/drivers/net/acenic.h
+++ b/drivers/net/acenic.h
@@ -589,7 +589,7 @@ struct ace_info {
struct ring_info {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
@@ -600,8 +600,8 @@ struct ring_info {
*/
struct tx_ring_info {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapping)
- DECLARE_PCI_UNMAP_LEN(maplen)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_LEN(maplen);
};
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 8d58f0a8f42..585c25f4b60 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -1339,8 +1339,6 @@ static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
writel( VAL1 | TDMD0, lp->mmio + CMD0);
writel( VAL2 | RDMD0,lp->mmio + CMD0);
- dev->trans_start = jiffies;
-
if(amd8111e_tx_queue_avail(lp) < 0){
netif_stop_queue(dev);
}
@@ -1376,7 +1374,7 @@ list to the device.
*/
static void amd8111e_set_multicast_list(struct net_device *dev)
{
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
struct amd8111e_priv *lp = netdev_priv(dev);
u32 mc_filter[2] ;
int bit_num;
@@ -1407,8 +1405,8 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
/* load all the multicast addresses in the logic filter */
lp->options |= OPTION_MULTICAST_ENABLE;
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(mc_ptr, dev) {
- bit_num = (ether_crc_le(ETH_ALEN, mc_ptr->dmi_addr) >> 26) & 0x3f;
+ netdev_for_each_mc_addr(ha, dev) {
+ bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f;
mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
}
amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF);
diff --git a/drivers/net/apne.c b/drivers/net/apne.c
index 1437f5d1212..2fe60f16810 100644
--- a/drivers/net/apne.c
+++ b/drivers/net/apne.c
@@ -521,7 +521,6 @@ apne_block_output(struct net_device *dev, int count,
outb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */
ei_status.dmaing &= ~0x01;
- return;
}
static irqreturn_t apne_interrupt(int irq, void *dev_id)
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 6f8d6206b5c..748c9f526e7 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -593,8 +593,6 @@ static void cops_load (struct net_device *dev)
tangent_wait_reset(ioaddr);
inb(ioaddr); /* Clear initial ready signal. */
}
-
- return;
}
/*
@@ -701,8 +699,6 @@ static void cops_poll(unsigned long ltdev)
/* poll 20 times per second */
cops_timer.expires = jiffies + HZ/20;
add_timer(&cops_timer);
-
- return;
}
/*
@@ -866,7 +862,7 @@ static void cops_timeout(struct net_device *dev)
}
printk(KERN_WARNING "%s: Transmit timed out.\n", dev->name);
cops_jumpstart(dev); /* Restart the card. */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -919,9 +915,8 @@ static netdev_tx_t cops_send_packet(struct sk_buff *skb,
/* Done sending packet, update counters and cleanup. */
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
- return NETDEV_TX_OK;
+ return NETDEV_TX_OK;
}
/*
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index 6af65b656f3..adc07551739 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -641,7 +641,6 @@ done:
inb_p(base+7);
inb_p(base+7);
}
- return;
}
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index d8f02930375..a746ba272f0 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -654,7 +654,6 @@ netdev_tx_t arcnet_send_packet(struct sk_buff *skb,
}
}
retval = NETDEV_TX_OK;
- dev->trans_start = jiffies;
lp->next_tx = txbuf;
} else {
retval = NETDEV_TX_BUSY;
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 2c712af6c26..48a1dbf01e6 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -164,8 +164,8 @@ static DEFINE_PCI_DEVICE_TABLE(com20020pci_id_table) = {
{ 0x1571, 0xa204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
{ 0x1571, 0xa205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
{ 0x1571, 0xa206, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
- { 0x10B5, 0x9030, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
- { 0x10B5, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
+ { 0x10B5, 0x9030, 0x10B5, 0x2978, 0, 0, ARC_CAN_10MBIT },
+ { 0x10B5, 0x9050, 0x10B5, 0x2273, 0, 0, ARC_CAN_10MBIT },
{ 0x14BA, 0x6000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
{ 0x10B5, 0x2200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
{0,}
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index fa1a2354f5f..39214e51245 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -145,6 +145,7 @@ static struct zorro_device_id ariadne_zorro_tbl[] __devinitdata = {
{ ZORRO_PROD_VILLAGE_TRONIC_ARIADNE },
{ 0 }
};
+MODULE_DEVICE_TABLE(zorro, ariadne_zorro_tbl);
static struct zorro_driver ariadne_driver = {
.name = "ariadne",
@@ -676,8 +677,6 @@ static netdev_tx_t ariadne_start_xmit(struct sk_buff *skb,
lance->RAP = CSR0; /* PCnet-ISA Controller Status */
lance->RDP = INEA|TDMD;
- dev->trans_start = jiffies;
-
if (lowb(priv->tx_ring[(entry+1) % TX_RING_SIZE]->TMD1) != 0) {
netif_stop_queue(dev);
priv->tx_full = 1;
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index f1f58c5e27b..8c496fb1ac9 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -383,12 +383,12 @@ static void am79c961_setmulticastlist (struct net_device *dev)
} else if (dev->flags & IFF_ALLMULTI) {
memset(multi_hash, 0xff, sizeof(multi_hash));
} else {
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
memset(multi_hash, 0x00, sizeof(multi_hash));
- netdev_for_each_mc_addr(dmi, dev)
- am79c961_mc_hash(dmi->dmi_addr, multi_hash);
+ netdev_for_each_mc_addr(ha, dev)
+ am79c961_mc_hash(ha->addr, multi_hash);
}
spin_lock_irqsave(&priv->chip_lock, flags);
@@ -469,7 +469,6 @@ am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
spin_lock_irqsave(&priv->chip_lock, flags);
write_rreg (dev->base_addr, CSR0, CSR0_TDMD|CSR0_IENA);
- dev->trans_start = jiffies;
spin_unlock_irqrestore(&priv->chip_lock, flags);
/*
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index aed5b5479b5..e07b314ed8f 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -557,14 +557,14 @@ static int hash_get_index(__u8 *addr)
*/
static void at91ether_sethashtable(struct net_device *dev)
{
- struct dev_mc_list *curr;
+ struct netdev_hw_addr *ha;
unsigned long mc_filter[2];
unsigned int bitnr;
mc_filter[0] = mc_filter[1] = 0;
- netdev_for_each_mc_addr(curr, dev) {
- bitnr = hash_get_index(curr->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ bitnr = hash_get_index(ha->addr);
mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
}
@@ -824,7 +824,6 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Set length of the packet in the Transmit Control register */
at91_emac_write(AT91_EMAC_TCR, skb->len);
- dev->trans_start = jiffies;
} else {
printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n");
return NETDEV_TX_BUSY; /* if we return anything but zero, dev.c:1055 calls kfree_skb(skb)
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index cd17d09f385..4a5ec9470aa 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -374,8 +374,6 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
skb->len, DMA_TO_DEVICE);
dev_kfree_skb(skb);
- dev->trans_start = jiffies;
-
spin_lock_irq(&ep->tx_pending_lock);
ep->tx_pending++;
if (ep->tx_pending == TX_QUEUE_ENTRIES)
diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c
index e47c0d96285..b17ab5153f5 100644
--- a/drivers/net/arm/ether1.c
+++ b/drivers/net/arm/ether1.c
@@ -736,7 +736,6 @@ ether1_sendpacket (struct sk_buff *skb, struct net_device *dev)
local_irq_restore(flags);
/* handle transmit */
- dev->trans_start = jiffies;
/* check to see if we have room for a full sized ether frame */
tmp = priv(dev)->tx_head;
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
index d9de9bce239..1361b7367c2 100644
--- a/drivers/net/arm/ether3.c
+++ b/drivers/net/arm/ether3.c
@@ -529,7 +529,6 @@ ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY; /* unable to queue */
}
- dev->trans_start = jiffies;
ptr = 0x600 * priv(dev)->tx_head;
priv(dev)->tx_head = next_ptr;
next_ptr *= 0x600;
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 6be8b098b8b..24df0325090 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -708,7 +708,6 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
/* NPE firmware pads short frames with zeros internally */
wmb();
queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc);
- dev->trans_start = jiffies;
if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
#if DEBUG_TX
@@ -736,7 +735,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
static void eth_set_mcast_list(struct net_device *dev)
{
struct port *port = netdev_priv(dev);
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u8 diffs[ETH_ALEN], *addr;
int i;
@@ -749,11 +748,11 @@ static void eth_set_mcast_list(struct net_device *dev)
memset(diffs, 0, ETH_ALEN);
addr = NULL;
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (!addr)
- addr = mclist->dmi_addr; /* first MAC address */
+ addr = ha->addr; /* first MAC address */
for (i = 0; i < ETH_ALEN; i++)
- diffs[i] |= addr[i] ^ mclist->dmi_addr[i];
+ diffs[i] |= addr[i] ^ ha->addr[i];
}
for (i = 0; i < ETH_ALEN; i++) {
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 84f8a8f7380..54c6d849cf2 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -332,16 +332,16 @@ ks8695_init_partial_multicast(struct ks8695_priv *ksp,
{
u32 low, high;
int i;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
i = 0;
- netdev_for_each_mc_addr(dmi, ndev) {
+ netdev_for_each_mc_addr(ha, ndev) {
/* Ran out of space in chip? */
BUG_ON(i == KS8695_NR_ADDRESSES);
- low = (dmi->dmi_addr[2] << 24) | (dmi->dmi_addr[3] << 16) |
- (dmi->dmi_addr[4] << 8) | (dmi->dmi_addr[5]);
- high = (dmi->dmi_addr[0] << 8) | (dmi->dmi_addr[1]);
+ low = (ha->addr[2] << 24) | (ha->addr[3] << 16) |
+ (ha->addr[4] << 8) | (ha->addr[5]);
+ high = (ha->addr[0] << 8) | (ha->addr[1]);
ks8695_writereg(ksp, KS8695_AAL_(i), low);
ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high);
@@ -1302,8 +1302,6 @@ ks8695_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (++ksp->tx_ring_used == MAX_TX_DESC)
netif_stop_queue(ndev);
- ndev->trans_start = jiffies;
-
/* Kick the TX DMA in case it decided to go IDLE */
ks8695_writereg(ksp, KS8695_DTSC, 0);
@@ -1472,7 +1470,6 @@ ks8695_probe(struct platform_device *pdev)
/* Configure our private structure a little */
ksp = netdev_priv(ndev);
- memset(ksp, 0, sizeof(struct ks8695_priv));
ksp->dev = &pdev->dev;
ksp->ndev = ndev;
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
index f7c9ca1dfb1..2e852463382 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/arm/w90p910_ether.c
@@ -483,7 +483,7 @@ static void w90p910_reset_mac(struct net_device *dev)
w90p910_init_desc(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
ether->cur_tx = 0x0;
ether->finish_tx = 0x0;
ether->cur_rx = 0x0;
@@ -497,7 +497,7 @@ static void w90p910_reset_mac(struct net_device *dev)
w90p910_trigger_tx(dev);
w90p910_trigger_rx(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
@@ -634,8 +634,6 @@ static int w90p910_send_frame(struct net_device *dev,
txbd = &ether->tdesc->desclist[ether->cur_tx];
- dev->trans_start = jiffies;
-
if (txbd->mode & TX_OWEN_DMA)
netif_stop_queue(dev);
@@ -744,7 +742,6 @@ static void netdev_rx(struct net_device *dev)
return;
}
- skb->dev = dev;
skb_reserve(skb, 2);
skb_put(skb, length);
skb_copy_to_linear_data(skb, data, length);
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index 10a20fb9ae6..93185f5f09a 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -583,7 +583,7 @@ static void net_tx_timeout (struct net_device *dev)
outb (0x00, ioaddr + TX_START);
outb (0x03, ioaddr + COL16CNTL);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
lp->tx_started = 0;
lp->tx_queue_ready = 1;
@@ -636,7 +636,6 @@ static netdev_tx_t net_send_packet (struct sk_buff *skb,
outb (0x80 | lp->tx_queue, ioaddr + TX_START);
lp->tx_queue = 0;
lp->tx_queue_len = 0;
- dev->trans_start = jiffies;
lp->tx_started = 1;
netif_start_queue (dev);
} else if (lp->tx_queue_len < 4096 - 1502)
@@ -796,7 +795,6 @@ net_rx(struct net_device *dev)
printk("%s: Exint Rx packet with mode %02x after %d ticks.\n",
dev->name, inb(ioaddr + RX_MODE), i);
}
- return;
}
/* The inverse routine to net_open(). */
@@ -847,12 +845,12 @@ set_rx_mode(struct net_device *dev)
memset(mc_filter, 0x00, sizeof(mc_filter));
outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
unsigned int bit =
- ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26;
+ ether_crc_le(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit >> 3] |= (1 << bit);
}
outb(0x02, ioaddr + RX_MODE); /* Use normal mode. */
@@ -870,7 +868,6 @@ set_rx_mode(struct net_device *dev)
outw(saved_bank, ioaddr + CONFIG_0);
}
spin_unlock_irqrestore (&lp->lock, flags);
- return;
}
#ifdef MODULE
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index a8686bfec7a..b57d7dee389 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -767,8 +767,8 @@ static void lance_tx_timeout (struct net_device *dev)
/* lance_restart, essentially */
lance_init_ring(dev);
REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
- dev->trans_start = jiffies;
- netif_wake_queue (dev);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
}
/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
@@ -836,7 +836,6 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
/* Trigger an immediate send poll. */
DREG = CSR0_INEA | CSR0_TDMD;
- dev->trans_start = jiffies;
if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) ==
TMD1_OWN_HOST)
diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c
index 32339243d61..7c521508313 100644
--- a/drivers/net/atl1c/atl1c_ethtool.c
+++ b/drivers/net/atl1c/atl1c_ethtool.c
@@ -263,8 +263,6 @@ static void atl1c_get_wol(struct net_device *netdev,
wol->wolopts |= WAKE_MAGIC;
if (adapter->wol & AT_WUFC_LNKC)
wol->wolopts |= WAKE_PHY;
-
- return;
}
static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 50dc531a02d..1c3c046d5f3 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -317,8 +317,6 @@ static void atl1c_common_task(struct work_struct *work)
if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE)
atl1c_check_link_status(adapter);
-
- return;
}
@@ -354,7 +352,7 @@ static void atl1c_set_multi(struct net_device *netdev)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct atl1c_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u32 mac_ctrl_data;
u32 hash_value;
@@ -377,8 +375,8 @@ static void atl1c_set_multi(struct net_device *netdev)
AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
/* comoute mc addresses' hash value ,and put it into hash table */
- netdev_for_each_mc_addr(mc_ptr, netdev) {
- hash_value = atl1c_hash_mc_addr(hw, mc_ptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ hash_value = atl1c_hash_mc_addr(hw, ha->addr);
atl1c_hash_set(hw, hash_value);
}
}
@@ -1817,7 +1815,6 @@ rrs_checked:
atl1c_clean_rfd(rfd_ring, rrs, rfd_num);
skb_put(skb, length - ETH_FCS_LEN);
skb->protocol = eth_type_trans(skb, netdev);
- skb->dev = netdev;
atl1c_rx_checksum(adapter, skb, rrs);
if (unlikely(adapter->vlgrp) && rrs->word3 & RRS_VLAN_INS) {
u16 vlan;
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c
index ffd696ee7c8..6943a6c3b94 100644
--- a/drivers/net/atl1e/atl1e_ethtool.c
+++ b/drivers/net/atl1e/atl1e_ethtool.c
@@ -338,8 +338,6 @@ static void atl1e_get_wol(struct net_device *netdev,
wol->wolopts |= WAKE_MAGIC;
if (adapter->wol & AT_WUFC_LNKC)
wol->wolopts |= WAKE_PHY;
-
- return;
}
static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 73302ae468a..1acea5774e8 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -284,7 +284,7 @@ static void atl1e_set_multi(struct net_device *netdev)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
struct atl1e_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u32 mac_ctrl_data = 0;
u32 hash_value;
@@ -307,8 +307,8 @@ static void atl1e_set_multi(struct net_device *netdev)
AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
/* comoute mc addresses' hash value ,and put it into hash table */
- netdev_for_each_mc_addr(mc_ptr, netdev) {
- hash_value = atl1e_hash_mc_addr(hw, mc_ptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ hash_value = atl1e_hash_mc_addr(hw, ha->addr);
atl1e_hash_set(hw, hash_value);
}
}
@@ -707,8 +707,6 @@ static void atl1e_init_ring_resources(struct atl1e_adapter *adapter)
adapter->ring_vir_addr = NULL;
adapter->rx_ring.desc = NULL;
rwlock_init(&adapter->tx_ring.tx_lock);
-
- return;
}
/*
@@ -905,8 +903,6 @@ static inline void atl1e_configure_des_ring(const struct atl1e_adapter *adapter)
AT_WRITE_REG(hw, REG_HOST_RXFPAGE_SIZE, rx_ring->page_size);
/* Load all of base address above */
AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
-
- return;
}
static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
@@ -950,7 +946,6 @@ static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
(((u16)hw->tpd_burst & TXQ_CTRL_NUM_TPD_BURST_MASK)
<< TXQ_CTRL_NUM_TPD_BURST_SHIFT)
| TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN);
- return;
}
static inline void atl1e_configure_rx(struct atl1e_adapter *adapter)
@@ -1004,7 +999,6 @@ static inline void atl1e_configure_rx(struct atl1e_adapter *adapter)
RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN;
AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
- return;
}
static inline void atl1e_configure_dma(struct atl1e_adapter *adapter)
@@ -1024,7 +1018,6 @@ static inline void atl1e_configure_dma(struct atl1e_adapter *adapter)
<< DMA_CTRL_DMAW_DLY_CNT_SHIFT;
AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data);
- return;
}
static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
@@ -1428,7 +1421,6 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
"Memory squeeze, deferring packet\n");
goto skip_pkt;
}
- skb->dev = netdev;
memcpy(skb->data, (u8 *)(prrs + 1), packet_size);
skb_put(skb, packet_size);
skb->protocol = eth_type_trans(skb, netdev);
@@ -1680,7 +1672,7 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
{
struct atl1e_tpd_desc *use_tpd = NULL;
struct atl1e_tx_buffer *tx_buffer = NULL;
- u16 buf_len = skb->len - skb->data_len;
+ u16 buf_len = skb_headlen(skb);
u16 map_len = 0;
u16 mapped_len = 0;
u16 hdr_len = 0;
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 0ebd8208f60..63b9ba0cc67 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1830,8 +1830,6 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
adapter->hw_csum_good++;
return;
}
-
- return;
}
/*
@@ -2347,7 +2345,7 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
{
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
- int len = skb->len;
+ int len;
int tso;
int count = 1;
int ret_val;
@@ -2359,7 +2357,7 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
unsigned int f;
unsigned int proto_hdr_len;
- len -= skb->data_len;
+ len = skb_headlen(skb);
if (unlikely(skb->len <= 0)) {
dev_kfree_skb_any(skb);
@@ -3390,7 +3388,6 @@ static void atl1_get_wol(struct net_device *netdev,
wol->wolopts = 0;
if (adapter->wol & ATLX_WUFC_MAG)
wol->wolopts |= WAKE_MAGIC;
- return;
}
static int atl1_set_wol(struct net_device *netdev,
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index 54662f24f9b..8da87383fb3 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -136,7 +136,7 @@ static void atl2_set_multi(struct net_device *netdev)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
struct atl2_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u32 rctl;
u32 hash_value;
@@ -158,8 +158,8 @@ static void atl2_set_multi(struct net_device *netdev)
ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
/* comoute mc addresses' hash value ,and put it into hash table */
- netdev_for_each_mc_addr(mc_ptr, netdev) {
- hash_value = atl2_hash_mc_addr(hw, mc_ptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ hash_value = atl2_hash_mc_addr(hw, ha->addr);
atl2_hash_set(hw, hash_value);
}
}
@@ -422,7 +422,6 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
netdev->stats.rx_dropped++;
break;
}
- skb->dev = netdev;
memcpy(skb->data, rxd->packet, rx_size);
skb_put(skb, rx_size);
skb->protocol = eth_type_trans(skb, netdev);
@@ -893,7 +892,6 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
(adapter->txd_write_ptr >> 2));
mmiowb();
- netdev->trans_start = jiffies;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c
index 72f3306352e..f979ea2d6d3 100644
--- a/drivers/net/atlx/atlx.c
+++ b/drivers/net/atlx/atlx.c
@@ -123,7 +123,7 @@ static void atlx_set_multi(struct net_device *netdev)
{
struct atlx_adapter *adapter = netdev_priv(netdev);
struct atlx_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u32 rctl;
u32 hash_value;
@@ -144,8 +144,8 @@ static void atlx_set_multi(struct net_device *netdev)
iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
/* compute mc addresses' hash value ,and put it into hash table */
- netdev_for_each_mc_addr(mc_ptr, netdev) {
- hash_value = atlx_hash_mc_addr(hw, mc_ptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ hash_value = atlx_hash_mc_addr(hw, ha->addr);
atlx_hash_set(hw, hash_value);
}
}
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index 55039d44dc4..bd2f9d331da 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -547,7 +547,7 @@ static void tx_timeout(struct net_device *dev)
dev->stats.tx_errors++;
/* Try to restart the adapter. */
hardware_init(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
dev->stats.tx_errors++;
}
@@ -586,7 +586,6 @@ static netdev_tx_t atp_send_packet(struct sk_buff *skb,
write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
write_reg_high(ioaddr, IMR, ISRh_RxErr);
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
return NETDEV_TX_OK;
}
@@ -803,7 +802,6 @@ static void net_rx(struct net_device *dev)
done:
write_reg(ioaddr, CMR1, CMR1_NextPkt);
lp->last_rx_time = jiffies;
- return;
}
static void read_block(long ioaddr, int length, unsigned char *p, int data_mode)
@@ -882,11 +880,11 @@ static void set_rx_mode_8012(struct net_device *dev)
memset(mc_filter, 0xff, sizeof(mc_filter));
new_mode = CMR2h_Normal;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- int filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
+ netdev_for_each_mc_addr(ha, dev) {
+ int filterbit = ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
}
new_mode = CMR2h_Normal;
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 4da191b87b0..ece6128bef1 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -75,14 +75,19 @@ static int au1000_debug = 5;
static int au1000_debug = 3;
#endif
+#define AU1000_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK)
+
#define DRV_NAME "au1000_eth"
-#define DRV_VERSION "1.6"
+#define DRV_VERSION "1.7"
#define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
#define DRV_DESC "Au1xxx on-chip Ethernet driver"
MODULE_AUTHOR(DRV_AUTHOR);
MODULE_DESCRIPTION(DRV_DESC);
MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
/*
* Theory of operation
@@ -148,7 +153,7 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES];
* specific irq-map
*/
-static void enable_mac(struct net_device *dev, int force_reset)
+static void au1000_enable_mac(struct net_device *dev, int force_reset)
{
unsigned long flags;
struct au1000_private *aup = netdev_priv(dev);
@@ -182,8 +187,7 @@ static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
while (*mii_control_reg & MAC_MII_BUSY) {
mdelay(1);
if (--timedout == 0) {
- printk(KERN_ERR "%s: read_MII busy timeout!!\n",
- dev->name);
+ netdev_err(dev, "read_MII busy timeout!!\n");
return -1;
}
}
@@ -197,8 +201,7 @@ static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
while (*mii_control_reg & MAC_MII_BUSY) {
mdelay(1);
if (--timedout == 0) {
- printk(KERN_ERR "%s: mdio_read busy timeout!!\n",
- dev->name);
+ netdev_err(dev, "mdio_read busy timeout!!\n");
return -1;
}
}
@@ -217,8 +220,7 @@ static void au1000_mdio_write(struct net_device *dev, int phy_addr,
while (*mii_control_reg & MAC_MII_BUSY) {
mdelay(1);
if (--timedout == 0) {
- printk(KERN_ERR "%s: mdio_write busy timeout!!\n",
- dev->name);
+ netdev_err(dev, "mdio_write busy timeout!!\n");
return;
}
}
@@ -236,7 +238,7 @@ static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
* _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus) */
struct net_device *const dev = bus->priv;
- enable_mac(dev, 0); /* make sure the MAC associated with this
+ au1000_enable_mac(dev, 0); /* make sure the MAC associated with this
* mii_bus is enabled */
return au1000_mdio_read(dev, phy_addr, regnum);
}
@@ -246,7 +248,7 @@ static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
{
struct net_device *const dev = bus->priv;
- enable_mac(dev, 0); /* make sure the MAC associated with this
+ au1000_enable_mac(dev, 0); /* make sure the MAC associated with this
* mii_bus is enabled */
au1000_mdio_write(dev, phy_addr, regnum, value);
return 0;
@@ -256,28 +258,26 @@ static int au1000_mdiobus_reset(struct mii_bus *bus)
{
struct net_device *const dev = bus->priv;
- enable_mac(dev, 0); /* make sure the MAC associated with this
+ au1000_enable_mac(dev, 0); /* make sure the MAC associated with this
* mii_bus is enabled */
return 0;
}
-static void hard_stop(struct net_device *dev)
+static void au1000_hard_stop(struct net_device *dev)
{
struct au1000_private *aup = netdev_priv(dev);
- if (au1000_debug > 4)
- printk(KERN_INFO "%s: hard stop\n", dev->name);
+ netif_dbg(aup, drv, dev, "hard stop\n");
aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
au_sync_delay(10);
}
-static void enable_rx_tx(struct net_device *dev)
+static void au1000_enable_rx_tx(struct net_device *dev)
{
struct au1000_private *aup = netdev_priv(dev);
- if (au1000_debug > 4)
- printk(KERN_INFO "%s: enable_rx_tx\n", dev->name);
+ netif_dbg(aup, hw, dev, "enable_rx_tx\n");
aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
au_sync_delay(10);
@@ -297,16 +297,15 @@ au1000_adjust_link(struct net_device *dev)
spin_lock_irqsave(&aup->lock, flags);
if (phydev->link && (aup->old_speed != phydev->speed)) {
- // speed changed
+ /* speed changed */
- switch(phydev->speed) {
+ switch (phydev->speed) {
case SPEED_10:
case SPEED_100:
break;
default:
- printk(KERN_WARNING
- "%s: Speed (%d) is not 10/100 ???\n",
- dev->name, phydev->speed);
+ netdev_warn(dev, "Speed (%d) is not 10/100 ???\n",
+ phydev->speed);
break;
}
@@ -316,10 +315,10 @@ au1000_adjust_link(struct net_device *dev)
}
if (phydev->link && (aup->old_duplex != phydev->duplex)) {
- // duplex mode changed
+ /* duplex mode changed */
/* switching duplex mode requires to disable rx and tx! */
- hard_stop(dev);
+ au1000_hard_stop(dev);
if (DUPLEX_FULL == phydev->duplex)
aup->mac->control = ((aup->mac->control
@@ -331,14 +330,14 @@ au1000_adjust_link(struct net_device *dev)
| MAC_DISABLE_RX_OWN);
au_sync_delay(1);
- enable_rx_tx(dev);
+ au1000_enable_rx_tx(dev);
aup->old_duplex = phydev->duplex;
status_change = 1;
}
- if(phydev->link != aup->old_link) {
- // link state changed
+ if (phydev->link != aup->old_link) {
+ /* link state changed */
if (!phydev->link) {
/* link went down */
@@ -354,15 +353,15 @@ au1000_adjust_link(struct net_device *dev)
if (status_change) {
if (phydev->link)
- printk(KERN_INFO "%s: link up (%d/%s)\n",
- dev->name, phydev->speed,
+ netdev_info(dev, "link up (%d/%s)\n",
+ phydev->speed,
DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
else
- printk(KERN_INFO "%s: link down\n", dev->name);
+ netdev_info(dev, "link down\n");
}
}
-static int mii_probe (struct net_device *dev)
+static int au1000_mii_probe (struct net_device *dev)
{
struct au1000_private *const aup = netdev_priv(dev);
struct phy_device *phydev = NULL;
@@ -373,8 +372,7 @@ static int mii_probe (struct net_device *dev)
if (aup->phy_addr)
phydev = aup->mii_bus->phy_map[aup->phy_addr];
else
- printk (KERN_INFO DRV_NAME ":%s: using PHY-less setup\n",
- dev->name);
+ netdev_info(dev, "using PHY-less setup\n");
return 0;
} else {
int phy_addr;
@@ -391,7 +389,7 @@ static int mii_probe (struct net_device *dev)
/* try harder to find a PHY */
if (!phydev && (aup->mac_id == 1)) {
/* no PHY found, maybe we have a dual PHY? */
- printk (KERN_INFO DRV_NAME ": no PHY found on MAC1, "
+ dev_info(&dev->dev, ": no PHY found on MAC1, "
"let's see if it's attached to MAC0...\n");
/* find the first (lowest address) non-attached PHY on
@@ -417,7 +415,7 @@ static int mii_probe (struct net_device *dev)
}
if (!phydev) {
- printk (KERN_ERR DRV_NAME ":%s: no PHY found\n", dev->name);
+ netdev_err(dev, "no PHY found\n");
return -1;
}
@@ -428,7 +426,7 @@ static int mii_probe (struct net_device *dev)
0, PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
- printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ netdev_err(dev, "Could not attach to PHY\n");
return PTR_ERR(phydev);
}
@@ -449,8 +447,8 @@ static int mii_probe (struct net_device *dev)
aup->old_duplex = -1;
aup->phy_dev = phydev;
- printk(KERN_INFO "%s: attached PHY driver [%s] "
- "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name,
+ netdev_info(dev, "attached PHY driver [%s] "
+ "(mii_bus:phy_addr=%s, irq=%d)\n",
phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
return 0;
@@ -462,7 +460,7 @@ static int mii_probe (struct net_device *dev)
* has the virtual and dma address of a buffer suitable for
* both, receive and transmit operations.
*/
-static db_dest_t *GetFreeDB(struct au1000_private *aup)
+static db_dest_t *au1000_GetFreeDB(struct au1000_private *aup)
{
db_dest_t *pDB;
pDB = aup->pDBfree;
@@ -473,7 +471,7 @@ static db_dest_t *GetFreeDB(struct au1000_private *aup)
return pDB;
}
-void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
+void au1000_ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
{
db_dest_t *pDBfree = aup->pDBfree;
if (pDBfree)
@@ -481,12 +479,12 @@ void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
aup->pDBfree = pDB;
}
-static void reset_mac_unlocked(struct net_device *dev)
+static void au1000_reset_mac_unlocked(struct net_device *dev)
{
struct au1000_private *const aup = netdev_priv(dev);
int i;
- hard_stop(dev);
+ au1000_hard_stop(dev);
*aup->enable = MAC_EN_CLOCK_ENABLE;
au_sync_delay(2);
@@ -507,18 +505,17 @@ static void reset_mac_unlocked(struct net_device *dev)
}
-static void reset_mac(struct net_device *dev)
+static void au1000_reset_mac(struct net_device *dev)
{
struct au1000_private *const aup = netdev_priv(dev);
unsigned long flags;
- if (au1000_debug > 4)
- printk(KERN_INFO "%s: reset mac, aup %x\n",
- dev->name, (unsigned)aup);
+ netif_dbg(aup, hw, dev, "reset mac, aup %x\n",
+ (unsigned)aup);
spin_lock_irqsave(&aup->lock, flags);
- reset_mac_unlocked (dev);
+ au1000_reset_mac_unlocked (dev);
spin_unlock_irqrestore(&aup->lock, flags);
}
@@ -529,7 +526,7 @@ static void reset_mac(struct net_device *dev)
* these are not descriptors sitting in memory.
*/
static void
-setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
+au1000_setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
{
int i;
@@ -582,11 +579,25 @@ au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
info->regdump_len = 0;
}
+static void au1000_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ aup->msg_enable = value;
+}
+
+static u32 au1000_get_msglevel(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ return aup->msg_enable;
+}
+
static const struct ethtool_ops au1000_ethtool_ops = {
.get_settings = au1000_get_settings,
.set_settings = au1000_set_settings,
.get_drvinfo = au1000_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_msglevel = au1000_get_msglevel,
+ .set_msglevel = au1000_set_msglevel,
};
@@ -606,11 +617,10 @@ static int au1000_init(struct net_device *dev)
int i;
u32 control;
- if (au1000_debug > 4)
- printk("%s: au1000_init\n", dev->name);
+ netif_dbg(aup, hw, dev, "au1000_init\n");
/* bring the device out of reset */
- enable_mac(dev, 1);
+ au1000_enable_mac(dev, 1);
spin_lock_irqsave(&aup->lock, flags);
@@ -649,7 +659,7 @@ static int au1000_init(struct net_device *dev)
return 0;
}
-static inline void update_rx_stats(struct net_device *dev, u32 status)
+static inline void au1000_update_rx_stats(struct net_device *dev, u32 status)
{
struct net_device_stats *ps = &dev->stats;
@@ -667,8 +677,7 @@ static inline void update_rx_stats(struct net_device *dev, u32 status)
ps->rx_crc_errors++;
if (status & RX_COLL)
ps->collisions++;
- }
- else
+ } else
ps->rx_bytes += status & RX_FRAME_LEN_MASK;
}
@@ -685,15 +694,14 @@ static int au1000_rx(struct net_device *dev)
db_dest_t *pDB;
u32 frmlen;
- if (au1000_debug > 5)
- printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head);
+ netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head);
prxd = aup->rx_dma_ring[aup->rx_head];
buff_stat = prxd->buff_stat;
while (buff_stat & RX_T_DONE) {
status = prxd->status;
pDB = aup->rx_db_inuse[aup->rx_head];
- update_rx_stats(dev, status);
+ au1000_update_rx_stats(dev, status);
if (!(status & RX_ERROR)) {
/* good frame */
@@ -701,9 +709,7 @@ static int au1000_rx(struct net_device *dev)
frmlen -= 4; /* Remove FCS */
skb = dev_alloc_skb(frmlen + 2);
if (skb == NULL) {
- printk(KERN_ERR
- "%s: Memory squeeze, dropping packet.\n",
- dev->name);
+ netdev_err(dev, "Memory squeeze, dropping packet.\n");
dev->stats.rx_dropped++;
continue;
}
@@ -713,8 +719,7 @@ static int au1000_rx(struct net_device *dev)
skb_put(skb, frmlen);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb); /* pass the packet to upper layers */
- }
- else {
+ } else {
if (au1000_debug > 4) {
if (status & RX_MISSED_FRAME)
printk("rx miss\n");
@@ -747,7 +752,7 @@ static int au1000_rx(struct net_device *dev)
return 0;
}
-static void update_tx_stats(struct net_device *dev, u32 status)
+static void au1000_update_tx_stats(struct net_device *dev, u32 status)
{
struct au1000_private *aup = netdev_priv(dev);
struct net_device_stats *ps = &dev->stats;
@@ -760,8 +765,7 @@ static void update_tx_stats(struct net_device *dev, u32 status)
ps->tx_errors++;
ps->tx_aborted_errors++;
}
- }
- else {
+ } else {
ps->tx_errors++;
ps->tx_aborted_errors++;
if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
@@ -783,7 +787,7 @@ static void au1000_tx_ack(struct net_device *dev)
ptxd = aup->tx_dma_ring[aup->tx_tail];
while (ptxd->buff_stat & TX_T_DONE) {
- update_tx_stats(dev, ptxd->status);
+ au1000_update_tx_stats(dev, ptxd->status);
ptxd->buff_stat &= ~TX_T_DONE;
ptxd->len = 0;
au_sync();
@@ -817,18 +821,18 @@ static int au1000_open(struct net_device *dev)
int retval;
struct au1000_private *aup = netdev_priv(dev);
- if (au1000_debug > 4)
- printk("%s: open: dev=%p\n", dev->name, dev);
+ netif_dbg(aup, drv, dev, "open: dev=%p\n", dev);
- if ((retval = request_irq(dev->irq, au1000_interrupt, 0,
- dev->name, dev))) {
- printk(KERN_ERR "%s: unable to get IRQ %d\n",
- dev->name, dev->irq);
+ retval = request_irq(dev->irq, au1000_interrupt, 0,
+ dev->name, dev);
+ if (retval) {
+ netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
return retval;
}
- if ((retval = au1000_init(dev))) {
- printk(KERN_ERR "%s: error in au1000_init\n", dev->name);
+ retval = au1000_init(dev);
+ if (retval) {
+ netdev_err(dev, "error in au1000_init\n");
free_irq(dev->irq, dev);
return retval;
}
@@ -841,8 +845,7 @@ static int au1000_open(struct net_device *dev)
netif_start_queue(dev);
- if (au1000_debug > 4)
- printk("%s: open: Initialization done.\n", dev->name);
+ netif_dbg(aup, drv, dev, "open: Initialization done.\n");
return 0;
}
@@ -852,15 +855,14 @@ static int au1000_close(struct net_device *dev)
unsigned long flags;
struct au1000_private *const aup = netdev_priv(dev);
- if (au1000_debug > 4)
- printk("%s: close: dev=%p\n", dev->name, dev);
+ netif_dbg(aup, drv, dev, "close: dev=%p\n", dev);
if (aup->phy_dev)
phy_stop(aup->phy_dev);
spin_lock_irqsave(&aup->lock, flags);
- reset_mac_unlocked (dev);
+ au1000_reset_mac_unlocked (dev);
/* stop the device */
netif_stop_queue(dev);
@@ -884,9 +886,8 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
db_dest_t *pDB;
int i;
- if (au1000_debug > 5)
- printk("%s: tx: aup %x len=%d, data=%p, head %d\n",
- dev->name, (unsigned)aup, skb->len,
+ netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n",
+ (unsigned)aup, skb->len,
skb->data, aup->tx_head);
ptxd = aup->tx_dma_ring[aup->tx_head];
@@ -896,9 +897,8 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
aup->tx_full = 1;
return NETDEV_TX_BUSY;
- }
- else if (buff_stat & TX_T_DONE) {
- update_tx_stats(dev, ptxd->status);
+ } else if (buff_stat & TX_T_DONE) {
+ au1000_update_tx_stats(dev, ptxd->status);
ptxd->len = 0;
}
@@ -910,12 +910,11 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
pDB = aup->tx_db_inuse[aup->tx_head];
skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
if (skb->len < ETH_ZLEN) {
- for (i=skb->len; i<ETH_ZLEN; i++) {
+ for (i = skb->len; i < ETH_ZLEN; i++) {
((char *)pDB->vaddr)[i] = 0;
}
ptxd->len = ETH_ZLEN;
- }
- else
+ } else
ptxd->len = skb->len;
ps->tx_packets++;
@@ -925,7 +924,6 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
au_sync();
dev_kfree_skb(skb);
aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -935,10 +933,10 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
*/
static void au1000_tx_timeout(struct net_device *dev)
{
- printk(KERN_ERR "%s: au1000_tx_timeout: dev=%p\n", dev->name, dev);
- reset_mac(dev);
+ netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev);
+ au1000_reset_mac(dev);
au1000_init(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -946,8 +944,7 @@ static void au1000_multicast_list(struct net_device *dev)
{
struct au1000_private *aup = netdev_priv(dev);
- if (au1000_debug > 4)
- printk("%s: au1000_multicast_list: flags=%x\n", dev->name, dev->flags);
+ netif_dbg(aup, drv, dev, "au1000_multicast_list: flags=%x\n", dev->flags);
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
aup->mac->control |= MAC_PROMISCUOUS;
@@ -955,14 +952,14 @@ static void au1000_multicast_list(struct net_device *dev)
netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
aup->mac->control |= MAC_PASS_ALL_MULTI;
aup->mac->control &= ~MAC_PROMISCUOUS;
- printk(KERN_INFO "%s: Pass all multicast\n", dev->name);
+ netdev_info(dev, "Pass all multicast\n");
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u32 mc_filter[2]; /* Multicast hash filter */
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(mclist, dev)
- set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr)>>26,
+ netdev_for_each_mc_addr(ha, dev)
+ set_bit(ether_crc(ETH_ALEN, ha->addr)>>26,
(long *)mc_filter);
aup->mac->multi_hash_high = mc_filter[1];
aup->mac->multi_hash_low = mc_filter[0];
@@ -975,9 +972,11 @@ static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct au1000_private *aup = netdev_priv(dev);
- if (!netif_running(dev)) return -EINVAL;
+ if (!netif_running(dev))
+ return -EINVAL;
- if (!aup->phy_dev) return -EINVAL; // PHY not controllable
+ if (!aup->phy_dev)
+ return -EINVAL; /* PHY not controllable */
return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd);
}
@@ -996,7 +995,7 @@ static const struct net_device_ops au1000_netdev_ops = {
static int __devinit au1000_probe(struct platform_device *pdev)
{
- static unsigned version_printed = 0;
+ static unsigned version_printed;
struct au1000_private *aup = NULL;
struct au1000_eth_platform_data *pd;
struct net_device *dev = NULL;
@@ -1007,40 +1006,40 @@ static int __devinit au1000_probe(struct platform_device *pdev)
base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!base) {
- printk(KERN_ERR DRV_NAME ": failed to retrieve base register\n");
+ dev_err(&pdev->dev, "failed to retrieve base register\n");
err = -ENODEV;
goto out;
}
macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!macen) {
- printk(KERN_ERR DRV_NAME ": failed to retrieve MAC Enable register\n");
+ dev_err(&pdev->dev, "failed to retrieve MAC Enable register\n");
err = -ENODEV;
goto out;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- printk(KERN_ERR DRV_NAME ": failed to retrieve IRQ\n");
+ dev_err(&pdev->dev, "failed to retrieve IRQ\n");
err = -ENODEV;
goto out;
}
if (!request_mem_region(base->start, resource_size(base), pdev->name)) {
- printk(KERN_ERR DRV_NAME ": failed to request memory region for base registers\n");
+ dev_err(&pdev->dev, "failed to request memory region for base registers\n");
err = -ENXIO;
goto out;
}
if (!request_mem_region(macen->start, resource_size(macen), pdev->name)) {
- printk(KERN_ERR DRV_NAME ": failed to request memory region for MAC enable register\n");
+ dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n");
err = -ENXIO;
goto err_request;
}
dev = alloc_etherdev(sizeof(struct au1000_private));
if (!dev) {
- printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME);
+ dev_err(&pdev->dev, "alloc_etherdev failed\n");
err = -ENOMEM;
goto err_alloc;
}
@@ -1050,6 +1049,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
aup = netdev_priv(dev);
spin_lock_init(&aup->lock);
+ aup->msg_enable = (au1000_debug < 4 ? AU1000_DEF_MSG_ENABLE : au1000_debug);
/* Allocate the data buffers */
/* Snooping works fine with eth on all au1xxx */
@@ -1057,7 +1057,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
(NUM_TX_BUFFS + NUM_RX_BUFFS),
&aup->dma_addr, 0);
if (!aup->vaddr) {
- printk(KERN_ERR DRV_NAME ": failed to allocate data buffers\n");
+ dev_err(&pdev->dev, "failed to allocate data buffers\n");
err = -ENOMEM;
goto err_vaddr;
}
@@ -1065,7 +1065,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
/* aup->mac is the base address of the MAC's registers */
aup->mac = (volatile mac_reg_t *)ioremap_nocache(base->start, resource_size(base));
if (!aup->mac) {
- printk(KERN_ERR DRV_NAME ": failed to ioremap MAC registers\n");
+ dev_err(&pdev->dev, "failed to ioremap MAC registers\n");
err = -ENXIO;
goto err_remap1;
}
@@ -1073,7 +1073,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
/* Setup some variables for quick register address access */
aup->enable = (volatile u32 *)ioremap_nocache(macen->start, resource_size(macen));
if (!aup->enable) {
- printk(KERN_ERR DRV_NAME ": failed to ioremap MAC enable register\n");
+ dev_err(&pdev->dev, "failed to ioremap MAC enable register\n");
err = -ENXIO;
goto err_remap2;
}
@@ -1083,14 +1083,13 @@ static int __devinit au1000_probe(struct platform_device *pdev)
if (prom_get_ethernet_addr(ethaddr) == 0)
memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
else {
- printk(KERN_INFO "%s: No MAC address found\n",
- dev->name);
+ netdev_info(dev, "No MAC address found\n");
/* Use the hard coded MAC addresses */
}
- setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
+ au1000_setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
} else if (pdev->id == 1)
- setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
+ au1000_setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
/*
* Assign to the Ethernet ports two consecutive MAC addresses
@@ -1104,7 +1103,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
pd = pdev->dev.platform_data;
if (!pd) {
- printk(KERN_INFO DRV_NAME ": no platform_data passed, PHY search on MAC0\n");
+ dev_info(&pdev->dev, "no platform_data passed, PHY search on MAC0\n");
aup->phy1_search_mac0 = 1;
} else {
aup->phy_static_config = pd->phy_static_config;
@@ -1116,7 +1115,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
}
if (aup->phy_busid && aup->phy_busid > 0) {
- printk(KERN_ERR DRV_NAME ": MAC0-associated PHY attached 2nd MACs MII"
+ dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII"
"bus not supported yet\n");
err = -ENODEV;
goto err_mdiobus_alloc;
@@ -1124,7 +1123,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
aup->mii_bus = mdiobus_alloc();
if (aup->mii_bus == NULL) {
- printk(KERN_ERR DRV_NAME ": failed to allocate mdiobus structure\n");
+ dev_err(&pdev->dev, "failed to allocate mdiobus structure\n");
err = -ENOMEM;
goto err_mdiobus_alloc;
}
@@ -1139,7 +1138,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
if (aup->mii_bus->irq == NULL)
goto err_out;
- for(i = 0; i < PHY_MAX_ADDR; ++i)
+ for (i = 0; i < PHY_MAX_ADDR; ++i)
aup->mii_bus->irq[i] = PHY_POLL;
/* if known, set corresponding PHY IRQs */
if (aup->phy_static_config)
@@ -1148,11 +1147,11 @@ static int __devinit au1000_probe(struct platform_device *pdev)
err = mdiobus_register(aup->mii_bus);
if (err) {
- printk(KERN_ERR DRV_NAME " failed to register MDIO bus\n");
+ dev_err(&pdev->dev, "failed to register MDIO bus\n");
goto err_mdiobus_reg;
}
- if (mii_probe(dev) != 0)
+ if (au1000_mii_probe(dev) != 0)
goto err_out;
pDBfree = NULL;
@@ -1168,7 +1167,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
aup->pDBfree = pDBfree;
for (i = 0; i < NUM_RX_DMA; i++) {
- pDB = GetFreeDB(aup);
+ pDB = au1000_GetFreeDB(aup);
if (!pDB) {
goto err_out;
}
@@ -1176,7 +1175,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
aup->rx_db_inuse[i] = pDB;
}
for (i = 0; i < NUM_TX_DMA; i++) {
- pDB = GetFreeDB(aup);
+ pDB = au1000_GetFreeDB(aup);
if (!pDB) {
goto err_out;
}
@@ -1195,17 +1194,16 @@ static int __devinit au1000_probe(struct platform_device *pdev)
* The boot code uses the ethernet controller, so reset it to start
* fresh. au1000_init() expects that the device is in reset state.
*/
- reset_mac(dev);
+ au1000_reset_mac(dev);
err = register_netdev(dev);
if (err) {
- printk(KERN_ERR DRV_NAME "%s: Cannot register net device, aborting.\n",
- dev->name);
+ netdev_err(dev, "Cannot register net device, aborting.\n");
goto err_out;
}
- printk("%s: Au1xx0 Ethernet found at 0x%lx, irq %d\n",
- dev->name, (unsigned long)base->start, irq);
+ netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n",
+ (unsigned long)base->start, irq);
if (version_printed++ == 0)
printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
@@ -1217,15 +1215,15 @@ err_out:
/* here we should have a valid dev plus aup-> register addresses
* so we can reset the mac properly.*/
- reset_mac(dev);
+ au1000_reset_mac(dev);
for (i = 0; i < NUM_RX_DMA; i++) {
if (aup->rx_db_inuse[i])
- ReleaseDB(aup, aup->rx_db_inuse[i]);
+ au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
}
for (i = 0; i < NUM_TX_DMA; i++) {
if (aup->tx_db_inuse[i])
- ReleaseDB(aup, aup->tx_db_inuse[i]);
+ au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
}
err_mdiobus_reg:
mdiobus_free(aup->mii_bus);
@@ -1261,11 +1259,11 @@ static int __devexit au1000_remove(struct platform_device *pdev)
for (i = 0; i < NUM_RX_DMA; i++)
if (aup->rx_db_inuse[i])
- ReleaseDB(aup, aup->rx_db_inuse[i]);
+ au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
for (i = 0; i < NUM_TX_DMA; i++)
if (aup->tx_db_inuse[i])
- ReleaseDB(aup, aup->tx_db_inuse[i]);
+ au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
dma_free_noncoherent(NULL, MAX_BUF_SIZE *
(NUM_TX_BUFFS + NUM_RX_BUFFS),
diff --git a/drivers/net/au1000_eth.h b/drivers/net/au1000_eth.h
index f9d29a29b8f..d06ec008fbf 100644
--- a/drivers/net/au1000_eth.h
+++ b/drivers/net/au1000_eth.h
@@ -35,7 +35,7 @@
#define NUM_TX_BUFFS 4
#define MAX_BUF_SIZE 2048
-#define ETH_TX_TIMEOUT HZ/4
+#define ETH_TX_TIMEOUT (HZ/4)
#define MAC_MIN_PKT_SIZE 64
#define MULTICAST_FILTER_LIMIT 64
@@ -125,4 +125,6 @@ struct au1000_private {
dma_addr_t dma_addr; /* dma address of rx/tx buffers */
spinlock_t lock; /* Serialise access to device */
+
+ u32 msg_enable;
};
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index b718dc60afc..55c9958043c 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -303,7 +303,6 @@ static void ax_block_output(struct net_device *dev, int count,
ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
ei_status.dmaing &= ~0x01;
- return;
}
/* definitions for accessing MII/EEPROM interface */
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 69d9f3d368a..293f9c16e78 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -1014,8 +1014,6 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (TX_BUFFS_AVAIL(bp) < 1)
netif_stop_queue(dev);
- dev->trans_start = jiffies;
-
out_unlock:
spin_unlock_irqrestore(&bp->lock, flags);
@@ -1681,15 +1679,15 @@ static struct net_device_stats *b44_get_stats(struct net_device *dev)
static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
{
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int i, num_ents;
num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
i = 0;
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (i == num_ents)
break;
- __b44_cam_write(bp, mclist->dmi_addr, i++ + 1);
+ __b44_cam_write(bp, ha->addr, i++ + 1);
}
return i+1;
}
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index 17460aba3ba..faf5add894d 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -341,11 +341,9 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
}
skb_put(skb, len);
- skb->dev = dev;
skb->protocol = eth_type_trans(skb, dev);
priv->stats.rx_packets++;
priv->stats.rx_bytes += len;
- dev->last_rx = jiffies;
netif_receive_skb(skb);
} while (--budget > 0);
@@ -567,7 +565,6 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
priv->stats.tx_bytes += skb->len;
priv->stats.tx_packets++;
- dev->trans_start = jiffies;
ret = NETDEV_TX_OK;
out_unlock:
@@ -605,7 +602,7 @@ static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
static void bcm_enet_set_multicast_list(struct net_device *dev)
{
struct bcm_enet_priv *priv;
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
u32 val;
int i;
@@ -633,14 +630,14 @@ static void bcm_enet_set_multicast_list(struct net_device *dev)
}
i = 0;
- netdev_for_each_mc_addr(mc_list, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
u8 *dmi_addr;
u32 tmp;
if (i == 3)
break;
/* update perfect match registers */
- dmi_addr = mc_list->dmi_addr;
+ dmi_addr = ha->addr;
tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
(dmi_addr[4] << 8) | dmi_addr[5];
enet_writel(priv, tmp, ENET_PML_REG(i + 1));
@@ -960,7 +957,9 @@ static int bcm_enet_open(struct net_device *dev)
/* all set, enable mac and interrupts, start dma engine and
* kick rx dma channel */
wmb();
- enet_writel(priv, ENET_CTL_ENABLE_MASK, ENET_CTL_REG);
+ val = enet_readl(priv, ENET_CTL_REG);
+ val |= ENET_CTL_ENABLE_MASK;
+ enet_writel(priv, val, ENET_CTL_REG);
enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
ENETDMA_CHANCFG_REG(priv->rx_chan));
@@ -1647,7 +1646,6 @@ static int __devinit bcm_enet_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
priv = netdev_priv(dev);
- memset(priv, 0, sizeof(*priv));
ret = compute_hw_mtu(priv, dev->mtu);
if (ret)
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 56387b191c9..b46be490cd2 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -84,6 +84,8 @@ static inline char *nic_name(struct pci_dev *pdev)
#define FW_VER_LEN 32
+#define BE_MAX_VF 32
+
struct be_dma_mem {
void *va;
dma_addr_t dma;
@@ -207,7 +209,7 @@ struct be_tx_obj {
/* Struct to remember the pages posted for rx frags */
struct be_rx_page_info {
struct page *page;
- dma_addr_t bus;
+ DEFINE_DMA_UNMAP_ADDR(bus);
u16 page_offset;
bool last_page_user;
};
@@ -281,8 +283,17 @@ struct be_adapter {
u8 port_type;
u8 transceiver;
u8 generation; /* BladeEngine ASIC generation */
+ u32 flash_status;
+ struct completion flash_compl;
+
+ bool sriov_enabled;
+ u32 vf_if_handle[BE_MAX_VF];
+ u32 vf_pmac_id[BE_MAX_VF];
+ u8 base_eq_id;
};
+#define be_physfn(adapter) (!adapter->pdev->is_virtfn)
+
/* BladeEngine Generation numbers */
#define BE_GEN2 2
#define BE_GEN3 3
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index d0ef4ac987c..9d11dbf5e4d 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -59,6 +59,13 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
CQE_STATUS_COMPL_MASK;
+
+ if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) &&
+ (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
+ adapter->flash_status = compl_status;
+ complete(&adapter->flash_compl);
+ }
+
if (compl_status == MCC_STATUS_SUCCESS) {
if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
struct be_cmd_resp_get_stats *resp =
@@ -287,7 +294,7 @@ int be_cmd_POST(struct be_adapter *adapter)
} else {
return 0;
}
- } while (timeout < 20);
+ } while (timeout < 40);
dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
return -1;
@@ -843,7 +850,8 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
* Uses mbox
*/
int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
- u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
+ u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
+ u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_if_create *req;
@@ -860,6 +868,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
+ req->hdr.domain = domain;
req->capability_flags = cpu_to_le32(cap_flags);
req->enable_flags = cpu_to_le32(en_flags);
req->pmac_invalid = pmac_invalid;
@@ -1111,6 +1120,10 @@ int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
OPCODE_ETH_PROMISCUOUS, sizeof(*req));
+ /* In FW versions X.102.149/X.101.487 and later,
+ * the port setting associated only with the
+ * issuing pci function will take effect
+ */
if (port_num)
req->port1_promiscuous = en;
else
@@ -1157,13 +1170,13 @@ int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
req->interface_id = if_id;
if (netdev) {
int i;
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
i = 0;
- netdev_for_each_mc_addr(mc, netdev)
- memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(req->mac[i].byte, ha->addr, ETH_ALEN);
} else {
req->promiscuous = 1;
}
@@ -1411,6 +1424,7 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
int status;
spin_lock_bh(&adapter->mcc_lock);
+ adapter->flash_status = 0;
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1422,6 +1436,7 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
OPCODE_COMMON_WRITE_FLASHROM);
+ wrb->tag1 = CMD_SUBSYSTEM_COMMON;
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
@@ -1433,10 +1448,16 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
req->params.op_code = cpu_to_le32(flash_opcode);
req->params.data_buf_size = cpu_to_le32(buf_size);
- status = be_mcc_notify_wait(adapter);
+ be_mcc_notify(adapter);
+ spin_unlock_bh(&adapter->mcc_lock);
+
+ if (!wait_for_completion_timeout(&adapter->flash_compl,
+ msecs_to_jiffies(12000)))
+ status = -1;
+ else
+ status = adapter->flash_status;
err:
- spin_unlock_bh(&adapter->mcc_lock);
return status;
}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index cce61f9a371..763dc199e33 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -878,7 +878,7 @@ extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
u32 en_flags, u8 *mac, bool pmac_invalid,
- u32 *if_handle, u32 *pmac_id);
+ u32 *if_handle, u32 *pmac_id, u32 domain);
extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
extern int be_cmd_eq_create(struct be_adapter *adapter,
struct be_queue_info *eq, int eq_delay);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 51e1065e789..200e9851590 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -276,8 +276,6 @@ be_get_ethtool_stats(struct net_device *netdev,
data[i] = (et_stats[i].size == sizeof(u64)) ?
*(u64 *)p: *(u32 *)p;
}
-
- return;
}
static void
@@ -466,7 +464,6 @@ be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
else
wol->wolopts = 0;
memset(&wol->sopass, 0, sizeof(wol->sopass));
- return;
}
static int
@@ -496,7 +493,7 @@ be_test_ddr_dma(struct be_adapter *adapter)
ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,
&ddrdma_cmd.dma);
if (!ddrdma_cmd.va) {
- dev_err(&adapter->pdev->dev, "Memory allocation failure \n");
+ dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
return -ENOMEM;
}
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index 2d4a4b82763..063026de495 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -99,6 +99,9 @@
/* Number of entries posted */
#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
+/********** SRIOV VF PCICFG OFFSET ********/
+#define SRIOV_VF_PCICFG_OFFSET (4096)
+
/* Flashrom related descriptors */
#define IMAGE_TYPE_FIRMWARE 160
#define IMAGE_TYPE_BOOTCODE 224
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index ec6ace80225..54b14272f33 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -26,8 +26,11 @@ MODULE_AUTHOR("ServerEngines Corporation");
MODULE_LICENSE("GPL");
static unsigned int rx_frag_size = 2048;
+static unsigned int num_vfs;
module_param(rx_frag_size, uint, S_IRUGO);
+module_param(num_vfs, uint, S_IRUGO);
MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
+MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
@@ -138,12 +141,19 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
+ /* MAC addr configuration will be done in hardware for VFs
+ * by their corresponding PFs. Just copy to netdev addr here
+ */
+ if (!be_physfn(adapter))
+ goto netdev_addr;
+
status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
if (status)
return status;
status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
adapter->if_handle, &adapter->pmac_id);
+netdev_addr:
if (!status)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
@@ -386,26 +396,48 @@ static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
}
+static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
+ bool unmap_single)
+{
+ dma_addr_t dma;
+
+ be_dws_le_to_cpu(wrb, sizeof(*wrb));
+
+ dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
+ if (wrb->frag_len) {
+ if (unmap_single)
+ pci_unmap_single(pdev, dma, wrb->frag_len,
+ PCI_DMA_TODEVICE);
+ else
+ pci_unmap_page(pdev, dma, wrb->frag_len,
+ PCI_DMA_TODEVICE);
+ }
+}
static int make_tx_wrbs(struct be_adapter *adapter,
struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
{
- u64 busaddr;
- u32 i, copied = 0;
+ dma_addr_t busaddr;
+ int i, copied = 0;
struct pci_dev *pdev = adapter->pdev;
struct sk_buff *first_skb = skb;
struct be_queue_info *txq = &adapter->tx_obj.q;
struct be_eth_wrb *wrb;
struct be_eth_hdr_wrb *hdr;
+ bool map_single = false;
+ u16 map_head;
hdr = queue_head_node(txq);
- atomic_add(wrb_cnt, &txq->used);
queue_head_inc(txq);
+ map_head = txq->head;
if (skb->len > skb->data_len) {
- int len = skb->len - skb->data_len;
+ int len = skb_headlen(skb);
busaddr = pci_map_single(pdev, skb->data, len,
PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, busaddr))
+ goto dma_err;
+ map_single = true;
wrb = queue_head_node(txq);
wrb_fill(wrb, busaddr, len);
be_dws_cpu_to_le(wrb, sizeof(*wrb));
@@ -419,6 +451,8 @@ static int make_tx_wrbs(struct be_adapter *adapter,
busaddr = pci_map_page(pdev, frag->page,
frag->page_offset,
frag->size, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, busaddr))
+ goto dma_err;
wrb = queue_head_node(txq);
wrb_fill(wrb, busaddr, frag->size);
be_dws_cpu_to_le(wrb, sizeof(*wrb));
@@ -438,6 +472,16 @@ static int make_tx_wrbs(struct be_adapter *adapter,
be_dws_cpu_to_le(hdr, sizeof(*hdr));
return copied;
+dma_err:
+ txq->head = map_head;
+ while (copied) {
+ wrb = queue_head_node(txq);
+ unmap_tx_frag(pdev, wrb, map_single);
+ map_single = false;
+ copied -= wrb->frag_len;
+ queue_head_inc(txq);
+ }
+ return 0;
}
static netdev_tx_t be_xmit(struct sk_buff *skb,
@@ -462,6 +506,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
* *BEFORE* ringing the tx doorbell, so that we serialze the
* tx compls of the current transmit which'll wake up the queue
*/
+ atomic_add(wrb_cnt, &txq->used);
if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
txq->len) {
netif_stop_queue(netdev);
@@ -541,6 +586,9 @@ static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ if (!be_physfn(adapter))
+ return;
+
adapter->vlan_tag[vid] = 1;
adapter->vlans_added++;
if (adapter->vlans_added <= (adapter->max_vlans + 1))
@@ -551,6 +599,9 @@ static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ if (!be_physfn(adapter))
+ return;
+
adapter->vlan_tag[vid] = 0;
vlan_group_set_device(adapter->vlan_grp, vid, NULL);
adapter->vlans_added--;
@@ -588,6 +639,28 @@ done:
return;
}
+static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int status;
+
+ if (!adapter->sriov_enabled)
+ return -EPERM;
+
+ if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
+ return -EINVAL;
+
+ status = be_cmd_pmac_del(adapter, adapter->vf_if_handle[vf],
+ adapter->vf_pmac_id[vf]);
+
+ status = be_cmd_pmac_add(adapter, mac, adapter->vf_if_handle[vf],
+ &adapter->vf_pmac_id[vf]);
+ if (!status)
+ dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
+ mac, vf);
+ return status;
+}
+
static void be_rx_rate_update(struct be_adapter *adapter)
{
struct be_drvr_stats *stats = drvr_stats(adapter);
@@ -647,7 +720,7 @@ get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
BUG_ON(!rx_page_info->page);
if (rx_page_info->last_page_user) {
- pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
+ pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
adapter->big_page_size, PCI_DMA_FROMDEVICE);
rx_page_info->last_page_user = false;
}
@@ -757,7 +830,6 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
done:
be_rx_stats_update(adapter, pktsize, num_rcvd);
- return;
}
/* Process the RX completion indicated by rxcp when GRO is disabled */
@@ -791,7 +863,6 @@ static void be_rx_compl_process(struct be_adapter *adapter,
skb->truesize = skb->len + sizeof(struct sk_buff);
skb->protocol = eth_type_trans(skb, adapter->netdev);
- skb->dev = adapter->netdev;
vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
@@ -812,8 +883,6 @@ static void be_rx_compl_process(struct be_adapter *adapter,
} else {
netif_receive_skb(skb);
}
-
- return;
}
/* Process the RX completion indicated by rxcp when GRO is enabled */
@@ -893,7 +962,6 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
}
be_rx_stats_update(adapter, pkt_size, num_rcvd);
- return;
}
static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
@@ -959,7 +1027,7 @@ static void be_post_rx_frags(struct be_adapter *adapter)
}
page_offset = page_info->page_offset;
page_info->page = pagep;
- pci_unmap_addr_set(page_info, bus, page_dmaaddr);
+ dma_unmap_addr_set(page_info, bus, page_dmaaddr);
frag_dmaaddr = page_dmaaddr + page_info->page_offset;
rxd = queue_head_node(rxq);
@@ -987,8 +1055,6 @@ static void be_post_rx_frags(struct be_adapter *adapter)
/* Let be_worker replenish when memory is available */
adapter->rx_post_starved = true;
}
-
- return;
}
static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
@@ -1012,35 +1078,26 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
struct be_eth_wrb *wrb;
struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
struct sk_buff *sent_skb;
- u64 busaddr;
- u16 cur_index, num_wrbs = 0;
+ u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
+ bool unmap_skb_hdr = true;
- cur_index = txq->tail;
- sent_skb = sent_skbs[cur_index];
+ sent_skb = sent_skbs[txq->tail];
BUG_ON(!sent_skb);
- sent_skbs[cur_index] = NULL;
- wrb = queue_tail_node(txq);
- be_dws_le_to_cpu(wrb, sizeof(*wrb));
- busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
- if (busaddr != 0) {
- pci_unmap_single(adapter->pdev, busaddr,
- wrb->frag_len, PCI_DMA_TODEVICE);
- }
- num_wrbs++;
+ sent_skbs[txq->tail] = NULL;
+
+ /* skip header wrb */
queue_tail_inc(txq);
- while (cur_index != last_index) {
+ do {
cur_index = txq->tail;
wrb = queue_tail_node(txq);
- be_dws_le_to_cpu(wrb, sizeof(*wrb));
- busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
- if (busaddr != 0) {
- pci_unmap_page(adapter->pdev, busaddr,
- wrb->frag_len, PCI_DMA_TODEVICE);
- }
+ unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
+ skb_headlen(sent_skb)));
+ unmap_skb_hdr = false;
+
num_wrbs++;
queue_tail_inc(txq);
- }
+ } while (cur_index != last_index);
atomic_sub(num_wrbs, &txq->used);
@@ -1255,6 +1312,8 @@ static int be_tx_queues_create(struct be_adapter *adapter)
/* Ask BE to create Tx Event queue */
if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
goto tx_eq_free;
+ adapter->base_eq_id = adapter->tx_eq.q.id;
+
/* Alloc TX eth compl queue */
cq = &adapter->tx_obj.cq;
if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
@@ -1382,7 +1441,7 @@ rx_eq_free:
/* There are 8 evt ids per func. Retruns the evt id's bit number */
static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
{
- return eq_id % 8;
+ return eq_id - adapter->base_eq_id;
}
static irqreturn_t be_intx(int irq, void *dev)
@@ -1557,7 +1616,27 @@ static void be_msix_enable(struct be_adapter *adapter)
BE_NUM_MSIX_VECTORS);
if (status == 0)
adapter->msix_enabled = true;
- return;
+}
+
+static void be_sriov_enable(struct be_adapter *adapter)
+{
+#ifdef CONFIG_PCI_IOV
+ int status;
+ if (be_physfn(adapter) && num_vfs) {
+ status = pci_enable_sriov(adapter->pdev, num_vfs);
+ adapter->sriov_enabled = status ? false : true;
+ }
+#endif
+}
+
+static void be_sriov_disable(struct be_adapter *adapter)
+{
+#ifdef CONFIG_PCI_IOV
+ if (adapter->sriov_enabled) {
+ pci_disable_sriov(adapter->pdev);
+ adapter->sriov_enabled = false;
+ }
+#endif
}
static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
@@ -1617,6 +1696,9 @@ static int be_irq_register(struct be_adapter *adapter)
status = be_msix_register(adapter);
if (status == 0)
goto done;
+ /* INTx is not supported for VF */
+ if (!be_physfn(adapter))
+ return status;
}
/* INTx */
@@ -1651,7 +1733,6 @@ static void be_irq_unregister(struct be_adapter *adapter)
be_free_irq(adapter, &adapter->rx_eq);
done:
adapter->isr_registered = false;
- return;
}
static int be_open(struct net_device *netdev)
@@ -1690,14 +1771,17 @@ static int be_open(struct net_device *netdev)
goto ret_sts;
be_link_status_update(adapter, link_up);
- status = be_vid_config(adapter);
+ if (be_physfn(adapter))
+ status = be_vid_config(adapter);
if (status)
goto ret_sts;
- status = be_cmd_set_flow_control(adapter,
- adapter->tx_fc, adapter->rx_fc);
- if (status)
- goto ret_sts;
+ if (be_physfn(adapter)) {
+ status = be_cmd_set_flow_control(adapter,
+ adapter->tx_fc, adapter->rx_fc);
+ if (status)
+ goto ret_sts;
+ }
schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
ret_sts:
@@ -1723,7 +1807,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
if (status) {
dev_err(&adapter->pdev->dev,
- "Could not enable Wake-on-lan \n");
+ "Could not enable Wake-on-lan\n");
pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
cmd.dma);
return status;
@@ -1745,22 +1829,48 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
static int be_setup(struct be_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- u32 cap_flags, en_flags;
+ u32 cap_flags, en_flags, vf = 0;
int status;
+ u8 mac[ETH_ALEN];
+
+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
- cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MCAST_PROMISCUOUS |
- BE_IF_FLAGS_PROMISCUOUS |
- BE_IF_FLAGS_PASS_L3L4_ERRORS;
- en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_PASS_L3L4_ERRORS;
+ if (be_physfn(adapter)) {
+ cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
+ BE_IF_FLAGS_PROMISCUOUS |
+ BE_IF_FLAGS_PASS_L3L4_ERRORS;
+ en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
+ }
status = be_cmd_if_create(adapter, cap_flags, en_flags,
netdev->dev_addr, false/* pmac_invalid */,
- &adapter->if_handle, &adapter->pmac_id);
+ &adapter->if_handle, &adapter->pmac_id, 0);
if (status != 0)
goto do_none;
+ if (be_physfn(adapter)) {
+ while (vf < num_vfs) {
+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
+ | BE_IF_FLAGS_BROADCAST;
+ status = be_cmd_if_create(adapter, cap_flags, en_flags,
+ mac, true, &adapter->vf_if_handle[vf],
+ NULL, vf+1);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Interface Create failed for VF %d\n", vf);
+ goto if_destroy;
+ }
+ vf++;
+ }
+ } else if (!be_physfn(adapter)) {
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
+ if (!status) {
+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+ }
+ }
+
status = be_tx_queues_create(adapter);
if (status != 0)
goto if_destroy;
@@ -1782,6 +1892,9 @@ rx_qs_destroy:
tx_qs_destroy:
be_tx_queues_destroy(adapter);
if_destroy:
+ for (vf = 0; vf < num_vfs; vf++)
+ if (adapter->vf_if_handle[vf])
+ be_cmd_if_destroy(adapter, adapter->vf_if_handle[vf]);
be_cmd_if_destroy(adapter, adapter->if_handle);
do_none:
return status;
@@ -2061,6 +2174,7 @@ static struct net_device_ops be_netdev_ops = {
.ndo_vlan_rx_register = be_vlan_register,
.ndo_vlan_rx_add_vid = be_vlan_add_vid,
.ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
+ .ndo_set_vf_mac = be_set_vf_mac
};
static void be_netdev_init(struct net_device *netdev)
@@ -2102,37 +2216,48 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
iounmap(adapter->csr);
if (adapter->db)
iounmap(adapter->db);
- if (adapter->pcicfg)
+ if (adapter->pcicfg && be_physfn(adapter))
iounmap(adapter->pcicfg);
}
static int be_map_pci_bars(struct be_adapter *adapter)
{
u8 __iomem *addr;
- int pcicfg_reg;
+ int pcicfg_reg, db_reg;
- addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
- pci_resource_len(adapter->pdev, 2));
- if (addr == NULL)
- return -ENOMEM;
- adapter->csr = addr;
-
- addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4),
- 128 * 1024);
- if (addr == NULL)
- goto pci_map_err;
- adapter->db = addr;
+ if (be_physfn(adapter)) {
+ addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
+ pci_resource_len(adapter->pdev, 2));
+ if (addr == NULL)
+ return -ENOMEM;
+ adapter->csr = addr;
+ }
- if (adapter->generation == BE_GEN2)
+ if (adapter->generation == BE_GEN2) {
pcicfg_reg = 1;
- else
+ db_reg = 4;
+ } else {
pcicfg_reg = 0;
-
- addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg),
- pci_resource_len(adapter->pdev, pcicfg_reg));
+ if (be_physfn(adapter))
+ db_reg = 4;
+ else
+ db_reg = 0;
+ }
+ addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
+ pci_resource_len(adapter->pdev, db_reg));
if (addr == NULL)
goto pci_map_err;
- adapter->pcicfg = addr;
+ adapter->db = addr;
+
+ if (be_physfn(adapter)) {
+ addr = ioremap_nocache(
+ pci_resource_start(adapter->pdev, pcicfg_reg),
+ pci_resource_len(adapter->pdev, pcicfg_reg));
+ if (addr == NULL)
+ goto pci_map_err;
+ adapter->pcicfg = addr;
+ } else
+ adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
return 0;
pci_map_err:
@@ -2194,6 +2319,7 @@ static int be_ctrl_init(struct be_adapter *adapter)
spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
+ init_completion(&adapter->flash_compl);
pci_save_state(adapter->pdev);
return 0;
@@ -2246,6 +2372,8 @@ static void __devexit be_remove(struct pci_dev *pdev)
be_ctrl_cleanup(adapter);
+ be_sriov_disable(adapter);
+
be_msix_disable(adapter);
pci_set_drvdata(pdev, NULL);
@@ -2270,16 +2398,20 @@ static int be_get_config(struct be_adapter *adapter)
return status;
memset(mac, 0, ETH_ALEN);
- status = be_cmd_mac_addr_query(adapter, mac,
+
+ if (be_physfn(adapter)) {
+ status = be_cmd_mac_addr_query(adapter, mac,
MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
- if (status)
- return status;
- if (!is_valid_ether_addr(mac))
- return -EADDRNOTAVAIL;
+ if (status)
+ return status;
+
+ if (!is_valid_ether_addr(mac))
+ return -EADDRNOTAVAIL;
- memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
- memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+ }
if (adapter->cap & 0x400)
adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
@@ -2296,6 +2428,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
struct be_adapter *adapter;
struct net_device *netdev;
+
status = pci_enable_device(pdev);
if (status)
goto do_none;
@@ -2344,23 +2477,29 @@ static int __devinit be_probe(struct pci_dev *pdev,
}
}
+ be_sriov_enable(adapter);
+
status = be_ctrl_init(adapter);
if (status)
goto free_netdev;
/* sync up with fw's ready state */
- status = be_cmd_POST(adapter);
- if (status)
- goto ctrl_clean;
+ if (be_physfn(adapter)) {
+ status = be_cmd_POST(adapter);
+ if (status)
+ goto ctrl_clean;
+ }
/* tell fw we're ready to fire cmds */
status = be_cmd_fw_init(adapter);
if (status)
goto ctrl_clean;
- status = be_cmd_reset_function(adapter);
- if (status)
- goto ctrl_clean;
+ if (be_physfn(adapter)) {
+ status = be_cmd_reset_function(adapter);
+ if (status)
+ goto ctrl_clean;
+ }
status = be_stats_init(adapter);
if (status)
@@ -2391,6 +2530,7 @@ ctrl_clean:
be_ctrl_cleanup(adapter);
free_netdev:
be_msix_disable(adapter);
+ be_sriov_disable(adapter);
free_netdev(adapter->netdev);
pci_set_drvdata(pdev, NULL);
rel_reg:
@@ -2474,8 +2614,6 @@ static void be_shutdown(struct pci_dev *pdev)
be_setup_wol(adapter, true);
pci_disable_device(pdev);
-
- return;
}
static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
@@ -2557,7 +2695,6 @@ static void be_eeh_resume(struct pci_dev *pdev)
return;
err:
dev_err(&adapter->pdev->dev, "EEH resume failed\n");
- return;
}
static struct pci_error_handlers be_eeh_handlers = {
@@ -2587,6 +2724,13 @@ static int __init be_init_module(void)
rx_frag_size = 2048;
}
+ if (num_vfs > 32) {
+ printk(KERN_WARNING DRV_NAME
+ " : Module param num_vfs must not be greater than 32."
+ "Using 32\n");
+ num_vfs = 32;
+ }
+
return pci_register_driver(&be_driver);
}
module_init(be_init_module);
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 587f93cf03f..368f33313fb 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -33,6 +33,7 @@
#include <asm/dma.h>
#include <linux/dma-mapping.h>
+#include <asm/div64.h>
#include <asm/dpmc.h>
#include <asm/blackfin.h>
#include <asm/cacheflush.h>
@@ -80,9 +81,6 @@ static u16 pin_req[] = P_RMII0;
static u16 pin_req[] = P_MII0;
#endif
-static void bfin_mac_disable(void);
-static void bfin_mac_enable(void);
-
static void desc_list_free(void)
{
struct net_dma_desc_rx *r;
@@ -202,6 +200,11 @@ static int desc_list_init(void)
goto init_error;
}
skb_reserve(new_skb, NET_IP_ALIGN);
+ /* Invidate the data cache of skb->data range when it is write back
+ * cache. It will prevent overwritting the new data from DMA
+ */
+ blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
+ (unsigned long)new_skb->end);
r->skb = new_skb;
/*
@@ -254,7 +257,7 @@ init_error:
* MII operations
*/
/* Wait until the previous MDC/MDIO transaction has completed */
-static void bfin_mdio_poll(void)
+static int bfin_mdio_poll(void)
{
int timeout_cnt = MAX_TIMEOUT_CNT;
@@ -264,22 +267,30 @@ static void bfin_mdio_poll(void)
if (timeout_cnt-- < 0) {
printk(KERN_ERR DRV_NAME
": wait MDC/MDIO transaction to complete timeout\n");
- break;
+ return -ETIMEDOUT;
}
}
+
+ return 0;
}
/* Read an off-chip register in a PHY through the MDC/MDIO port */
static int bfin_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
{
- bfin_mdio_poll();
+ int ret;
+
+ ret = bfin_mdio_poll();
+ if (ret)
+ return ret;
/* read mode */
bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) |
SET_REGAD((u16) regnum) |
STABUSY);
- bfin_mdio_poll();
+ ret = bfin_mdio_poll();
+ if (ret)
+ return ret;
return (int) bfin_read_EMAC_STADAT();
}
@@ -288,7 +299,11 @@ static int bfin_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
u16 value)
{
- bfin_mdio_poll();
+ int ret;
+
+ ret = bfin_mdio_poll();
+ if (ret)
+ return ret;
bfin_write_EMAC_STADAT((u32) value);
@@ -298,9 +313,7 @@ static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
STAOP |
STABUSY);
- bfin_mdio_poll();
-
- return 0;
+ return bfin_mdio_poll();
}
static int bfin_mdiobus_reset(struct mii_bus *bus)
@@ -458,6 +471,14 @@ static int mii_probe(struct net_device *dev)
* Ethtool support
*/
+/*
+ * interrupt routine for magic packet wakeup
+ */
+static irqreturn_t bfin_mac_wake_interrupt(int irq, void *dev_id)
+{
+ return IRQ_HANDLED;
+}
+
static int
bfin_mac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
{
@@ -492,11 +513,57 @@ static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
strcpy(info->bus_info, dev_name(&dev->dev));
}
+static void bfin_mac_ethtool_getwol(struct net_device *dev,
+ struct ethtool_wolinfo *wolinfo)
+{
+ struct bfin_mac_local *lp = netdev_priv(dev);
+
+ wolinfo->supported = WAKE_MAGIC;
+ wolinfo->wolopts = lp->wol;
+}
+
+static int bfin_mac_ethtool_setwol(struct net_device *dev,
+ struct ethtool_wolinfo *wolinfo)
+{
+ struct bfin_mac_local *lp = netdev_priv(dev);
+ int rc;
+
+ if (wolinfo->wolopts & (WAKE_MAGICSECURE |
+ WAKE_UCAST |
+ WAKE_MCAST |
+ WAKE_BCAST |
+ WAKE_ARP))
+ return -EOPNOTSUPP;
+
+ lp->wol = wolinfo->wolopts;
+
+ if (lp->wol && !lp->irq_wake_requested) {
+ /* register wake irq handler */
+ rc = request_irq(IRQ_MAC_WAKEDET, bfin_mac_wake_interrupt,
+ IRQF_DISABLED, "EMAC_WAKE", dev);
+ if (rc)
+ return rc;
+ lp->irq_wake_requested = true;
+ }
+
+ if (!lp->wol && lp->irq_wake_requested) {
+ free_irq(IRQ_MAC_WAKEDET, dev);
+ lp->irq_wake_requested = false;
+ }
+
+ /* Make sure the PHY driver doesn't suspend */
+ device_init_wakeup(&dev->dev, lp->wol);
+
+ return 0;
+}
+
static const struct ethtool_ops bfin_mac_ethtool_ops = {
.get_settings = bfin_mac_ethtool_getsettings,
.set_settings = bfin_mac_ethtool_setsettings,
.get_link = ethtool_op_get_link,
.get_drvinfo = bfin_mac_ethtool_getdrvinfo,
+ .get_wol = bfin_mac_ethtool_getwol,
+ .set_wol = bfin_mac_ethtool_setwol,
};
/**************************************************************************/
@@ -509,10 +576,11 @@ void setup_system_regs(struct net_device *dev)
* Configure checksum support and rcve frame word alignment
*/
sysctl = bfin_read_EMAC_SYSCTL();
+ sysctl |= RXDWA;
#if defined(BFIN_MAC_CSUM_OFFLOAD)
- sysctl |= RXDWA | RXCKS;
+ sysctl |= RXCKS;
#else
- sysctl |= RXDWA;
+ sysctl &= ~RXCKS;
#endif
bfin_write_EMAC_SYSCTL(sysctl);
@@ -551,6 +619,309 @@ static int bfin_mac_set_mac_address(struct net_device *dev, void *p)
return 0;
}
+#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
+#define bfin_mac_hwtstamp_is_none(cfg) ((cfg) == HWTSTAMP_FILTER_NONE)
+
+static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config config;
+ struct bfin_mac_local *lp = netdev_priv(netdev);
+ u16 ptpctl;
+ u32 ptpfv1, ptpfv2, ptpfv3, ptpfoff;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ pr_debug("%s config flag:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
+ __func__, config.flags, config.tx_type, config.rx_filter);
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ if ((config.tx_type != HWTSTAMP_TX_OFF) &&
+ (config.tx_type != HWTSTAMP_TX_ON))
+ return -ERANGE;
+
+ ptpctl = bfin_read_EMAC_PTP_CTL();
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ /*
+ * Dont allow any timestamping
+ */
+ ptpfv3 = 0xFFFFFFFF;
+ bfin_write_EMAC_PTP_FV3(ptpfv3);
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ /*
+ * Clear the five comparison mask bits (bits[12:8]) in EMAC_PTP_CTL)
+ * to enable all the field matches.
+ */
+ ptpctl &= ~0x1F00;
+ bfin_write_EMAC_PTP_CTL(ptpctl);
+ /*
+ * Keep the default values of the EMAC_PTP_FOFF register.
+ */
+ ptpfoff = 0x4A24170C;
+ bfin_write_EMAC_PTP_FOFF(ptpfoff);
+ /*
+ * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
+ * registers.
+ */
+ ptpfv1 = 0x11040800;
+ bfin_write_EMAC_PTP_FV1(ptpfv1);
+ ptpfv2 = 0x0140013F;
+ bfin_write_EMAC_PTP_FV2(ptpfv2);
+ /*
+ * The default value (0xFFFC) allows the timestamping of both
+ * received Sync messages and Delay_Req messages.
+ */
+ ptpfv3 = 0xFFFFFFFC;
+ bfin_write_EMAC_PTP_FV3(ptpfv3);
+
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ /* Clear all five comparison mask bits (bits[12:8]) in the
+ * EMAC_PTP_CTL register to enable all the field matches.
+ */
+ ptpctl &= ~0x1F00;
+ bfin_write_EMAC_PTP_CTL(ptpctl);
+ /*
+ * Keep the default values of the EMAC_PTP_FOFF register, except set
+ * the PTPCOF field to 0x2A.
+ */
+ ptpfoff = 0x2A24170C;
+ bfin_write_EMAC_PTP_FOFF(ptpfoff);
+ /*
+ * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
+ * registers.
+ */
+ ptpfv1 = 0x11040800;
+ bfin_write_EMAC_PTP_FV1(ptpfv1);
+ ptpfv2 = 0x0140013F;
+ bfin_write_EMAC_PTP_FV2(ptpfv2);
+ /*
+ * To allow the timestamping of Pdelay_Req and Pdelay_Resp, set
+ * the value to 0xFFF0.
+ */
+ ptpfv3 = 0xFFFFFFF0;
+ bfin_write_EMAC_PTP_FV3(ptpfv3);
+
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ /*
+ * Clear bits 8 and 12 of the EMAC_PTP_CTL register to enable only the
+ * EFTM and PTPCM field comparison.
+ */
+ ptpctl &= ~0x1100;
+ bfin_write_EMAC_PTP_CTL(ptpctl);
+ /*
+ * Keep the default values of all the fields of the EMAC_PTP_FOFF
+ * register, except set the PTPCOF field to 0x0E.
+ */
+ ptpfoff = 0x0E24170C;
+ bfin_write_EMAC_PTP_FOFF(ptpfoff);
+ /*
+ * Program bits [15:0] of the EMAC_PTP_FV1 register to 0x88F7, which
+ * corresponds to PTP messages on the MAC layer.
+ */
+ ptpfv1 = 0x110488F7;
+ bfin_write_EMAC_PTP_FV1(ptpfv1);
+ ptpfv2 = 0x0140013F;
+ bfin_write_EMAC_PTP_FV2(ptpfv2);
+ /*
+ * To allow the timestamping of Pdelay_Req and Pdelay_Resp
+ * messages, set the value to 0xFFF0.
+ */
+ ptpfv3 = 0xFFFFFFF0;
+ bfin_write_EMAC_PTP_FV3(ptpfv3);
+
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (config.tx_type == HWTSTAMP_TX_OFF &&
+ bfin_mac_hwtstamp_is_none(config.rx_filter)) {
+ ptpctl &= ~PTP_EN;
+ bfin_write_EMAC_PTP_CTL(ptpctl);
+
+ SSYNC();
+ } else {
+ ptpctl |= PTP_EN;
+ bfin_write_EMAC_PTP_CTL(ptpctl);
+
+ /*
+ * clear any existing timestamp
+ */
+ bfin_read_EMAC_PTP_RXSNAPLO();
+ bfin_read_EMAC_PTP_RXSNAPHI();
+
+ bfin_read_EMAC_PTP_TXSNAPLO();
+ bfin_read_EMAC_PTP_TXSNAPHI();
+
+ /*
+ * Set registers so that rollover occurs soon to test this.
+ */
+ bfin_write_EMAC_PTP_TIMELO(0x00000000);
+ bfin_write_EMAC_PTP_TIMEHI(0xFF800000);
+
+ SSYNC();
+
+ lp->compare.last_update = 0;
+ timecounter_init(&lp->clock,
+ &lp->cycles,
+ ktime_to_ns(ktime_get_real()));
+ timecompare_update(&lp->compare, 0);
+ }
+
+ lp->stamp_cfg = config;
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static void bfin_dump_hwtamp(char *s, ktime_t *hw, ktime_t *ts, struct timecompare *cmp)
+{
+ ktime_t sys = ktime_get_real();
+
+ pr_debug("%s %s hardware:%d,%d transform system:%d,%d system:%d,%d, cmp:%lld, %lld\n",
+ __func__, s, hw->tv.sec, hw->tv.nsec, ts->tv.sec, ts->tv.nsec, sys.tv.sec,
+ sys.tv.nsec, cmp->offset, cmp->skew);
+}
+
+static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
+{
+ struct bfin_mac_local *lp = netdev_priv(netdev);
+ union skb_shared_tx *shtx = skb_tx(skb);
+
+ if (shtx->hardware) {
+ int timeout_cnt = MAX_TIMEOUT_CNT;
+
+ /* When doing time stamping, keep the connection to the socket
+ * a while longer
+ */
+ shtx->in_progress = 1;
+
+ /*
+ * The timestamping is done at the EMAC module's MII/RMII interface
+ * when the module sees the Start of Frame of an event message packet. This
+ * interface is the closest possible place to the physical Ethernet transmission
+ * medium, providing the best timing accuracy.
+ */
+ while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt))
+ udelay(1);
+ if (timeout_cnt == 0)
+ printk(KERN_ERR DRV_NAME
+ ": fails to timestamp the TX packet\n");
+ else {
+ struct skb_shared_hwtstamps shhwtstamps;
+ u64 ns;
+ u64 regval;
+
+ regval = bfin_read_EMAC_PTP_TXSNAPLO();
+ regval |= (u64)bfin_read_EMAC_PTP_TXSNAPHI() << 32;
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ ns = timecounter_cyc2time(&lp->clock,
+ regval);
+ timecompare_update(&lp->compare, ns);
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ shhwtstamps.syststamp =
+ timecompare_transform(&lp->compare, ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+
+ bfin_dump_hwtamp("TX", &shhwtstamps.hwtstamp, &shhwtstamps.syststamp, &lp->compare);
+ }
+ }
+}
+
+static void bfin_rx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
+{
+ struct bfin_mac_local *lp = netdev_priv(netdev);
+ u32 valid;
+ u64 regval, ns;
+ struct skb_shared_hwtstamps *shhwtstamps;
+
+ if (bfin_mac_hwtstamp_is_none(lp->stamp_cfg.rx_filter))
+ return;
+
+ valid = bfin_read_EMAC_PTP_ISTAT() & RXEL;
+ if (!valid)
+ return;
+
+ shhwtstamps = skb_hwtstamps(skb);
+
+ regval = bfin_read_EMAC_PTP_RXSNAPLO();
+ regval |= (u64)bfin_read_EMAC_PTP_RXSNAPHI() << 32;
+ ns = timecounter_cyc2time(&lp->clock, regval);
+ timecompare_update(&lp->compare, ns);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
+ shhwtstamps->syststamp = timecompare_transform(&lp->compare, ns);
+
+ bfin_dump_hwtamp("RX", &shhwtstamps->hwtstamp, &shhwtstamps->syststamp, &lp->compare);
+}
+
+/*
+ * bfin_read_clock - read raw cycle counter (to be used by time counter)
+ */
+static cycle_t bfin_read_clock(const struct cyclecounter *tc)
+{
+ u64 stamp;
+
+ stamp = bfin_read_EMAC_PTP_TIMELO();
+ stamp |= (u64)bfin_read_EMAC_PTP_TIMEHI() << 32ULL;
+
+ return stamp;
+}
+
+#define PTP_CLK 25000000
+
+static void bfin_mac_hwtstamp_init(struct net_device *netdev)
+{
+ struct bfin_mac_local *lp = netdev_priv(netdev);
+ u64 append;
+
+ /* Initialize hardware timer */
+ append = PTP_CLK * (1ULL << 32);
+ do_div(append, get_sclk());
+ bfin_write_EMAC_PTP_ADDEND((u32)append);
+
+ memset(&lp->cycles, 0, sizeof(lp->cycles));
+ lp->cycles.read = bfin_read_clock;
+ lp->cycles.mask = CLOCKSOURCE_MASK(64);
+ lp->cycles.mult = 1000000000 / PTP_CLK;
+ lp->cycles.shift = 0;
+
+ /* Synchronize our NIC clock against system wall clock */
+ memset(&lp->compare, 0, sizeof(lp->compare));
+ lp->compare.source = &lp->clock;
+ lp->compare.target = ktime_get_real;
+ lp->compare.num_samples = 10;
+
+ /* Initialize hwstamp config */
+ lp->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ lp->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
+}
+
+#else
+# define bfin_mac_hwtstamp_is_none(cfg) 0
+# define bfin_mac_hwtstamp_init(dev)
+# define bfin_mac_hwtstamp_ioctl(dev, ifr, cmd) (-EOPNOTSUPP)
+# define bfin_rx_hwtstamp(dev, skb)
+# define bfin_tx_hwtstamp(dev, skb)
+#endif
+
static void adjust_tx_list(void)
{
int timeout_cnt = MAX_TIMEOUT_CNT;
@@ -608,18 +979,32 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
{
u16 *data;
u32 data_align = (unsigned long)(skb->data) & 0x3;
+ union skb_shared_tx *shtx = skb_tx(skb);
+
current_tx_ptr->skb = skb;
if (data_align == 0x2) {
/* move skb->data to current_tx_ptr payload */
data = (u16 *)(skb->data) - 1;
- *data = (u16)(skb->len);
+ *data = (u16)(skb->len);
+ /*
+ * When transmitting an Ethernet packet, the PTP_TSYNC module requires
+ * a DMA_Length_Word field associated with the packet. The lower 12 bits
+ * of this field are the length of the packet payload in bytes and the higher
+ * 4 bits are the timestamping enable field.
+ */
+ if (shtx->hardware)
+ *data |= 0x1000;
+
current_tx_ptr->desc_a.start_addr = (u32)data;
/* this is important! */
blackfin_dcache_flush_range((u32)data,
(u32)((u8 *)data + skb->len + 4));
} else {
*((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
+ /* enable timestamping for the sent packet */
+ if (shtx->hardware)
+ *((u16 *)(current_tx_ptr->packet)) |= 0x1000;
memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
skb->len);
current_tx_ptr->desc_a.start_addr =
@@ -653,20 +1038,42 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
out:
adjust_tx_list();
+
+ bfin_tx_hwtstamp(dev, skb);
+
current_tx_ptr = current_tx_ptr->next;
- dev->trans_start = jiffies;
dev->stats.tx_packets++;
dev->stats.tx_bytes += (skb->len);
return NETDEV_TX_OK;
}
+#define IP_HEADER_OFF 0
+#define RX_ERROR_MASK (RX_LONG | RX_ALIGN | RX_CRC | RX_LEN | \
+ RX_FRAG | RX_ADDR | RX_DMAO | RX_PHY | RX_LATE | RX_RANGE)
+
static void bfin_mac_rx(struct net_device *dev)
{
struct sk_buff *skb, *new_skb;
unsigned short len;
+ struct bfin_mac_local *lp __maybe_unused = netdev_priv(dev);
+#if defined(BFIN_MAC_CSUM_OFFLOAD)
+ unsigned int i;
+ unsigned char fcs[ETH_FCS_LEN + 1];
+#endif
+
+ /* check if frame status word reports an error condition
+ * we which case we simply drop the packet
+ */
+ if (current_rx_ptr->status.status_word & RX_ERROR_MASK) {
+ printk(KERN_NOTICE DRV_NAME
+ ": rx: receive error - packet dropped\n");
+ dev->stats.rx_dropped++;
+ goto out;
+ }
/* allocate a new skb for next time receive */
skb = current_rx_ptr->skb;
+
new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
if (!new_skb) {
printk(KERN_NOTICE DRV_NAME
@@ -676,34 +1083,59 @@ static void bfin_mac_rx(struct net_device *dev)
}
/* reserve 2 bytes for RXDWA padding */
skb_reserve(new_skb, NET_IP_ALIGN);
- current_rx_ptr->skb = new_skb;
- current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
-
/* Invidate the data cache of skb->data range when it is write back
* cache. It will prevent overwritting the new data from DMA
*/
blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
(unsigned long)new_skb->end);
+ current_rx_ptr->skb = new_skb;
+ current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
+
len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN);
+ /* Deduce Ethernet FCS length from Ethernet payload length */
+ len -= ETH_FCS_LEN;
skb_put(skb, len);
- blackfin_dcache_invalidate_range((unsigned long)skb->head,
- (unsigned long)skb->tail);
skb->protocol = eth_type_trans(skb, dev);
+
+ bfin_rx_hwtstamp(dev, skb);
+
#if defined(BFIN_MAC_CSUM_OFFLOAD)
- skb->csum = current_rx_ptr->status.ip_payload_csum;
- skb->ip_summed = CHECKSUM_COMPLETE;
+ /* Checksum offloading only works for IPv4 packets with the standard IP header
+ * length of 20 bytes, because the blackfin MAC checksum calculation is
+ * based on that assumption. We must NOT use the calculated checksum if our
+ * IP version or header break that assumption.
+ */
+ if (skb->data[IP_HEADER_OFF] == 0x45) {
+ skb->csum = current_rx_ptr->status.ip_payload_csum;
+ /*
+ * Deduce Ethernet FCS from hardware generated IP payload checksum.
+ * IP checksum is based on 16-bit one's complement algorithm.
+ * To deduce a value from checksum is equal to add its inversion.
+ * If the IP payload len is odd, the inversed FCS should also
+ * begin from odd address and leave first byte zero.
+ */
+ if (skb->len % 2) {
+ fcs[0] = 0;
+ for (i = 0; i < ETH_FCS_LEN; i++)
+ fcs[i + 1] = ~skb->data[skb->len + i];
+ skb->csum = csum_partial(fcs, ETH_FCS_LEN + 1, skb->csum);
+ } else {
+ for (i = 0; i < ETH_FCS_LEN; i++)
+ fcs[i] = ~skb->data[skb->len + i];
+ skb->csum = csum_partial(fcs, ETH_FCS_LEN, skb->csum);
+ }
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
#endif
netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
+out:
current_rx_ptr->status.status_word = 0x00000000;
current_rx_ptr = current_rx_ptr->next;
-
-out:
- return;
}
/* interrupt routine to handle rx and error signal */
@@ -755,8 +1187,9 @@ static void bfin_mac_disable(void)
/*
* Enable Interrupts, Receive, and Transmit
*/
-static void bfin_mac_enable(void)
+static int bfin_mac_enable(void)
{
+ int ret;
u32 opmode;
pr_debug("%s: %s\n", DRV_NAME, __func__);
@@ -766,7 +1199,9 @@ static void bfin_mac_enable(void)
bfin_write_DMA1_CONFIG(rx_list_head->desc_a.config);
/* Wait MII done */
- bfin_mdio_poll();
+ ret = bfin_mdio_poll();
+ if (ret)
+ return ret;
/* We enable only RX here */
/* ASTP : Enable Automatic Pad Stripping
@@ -790,6 +1225,8 @@ static void bfin_mac_enable(void)
#endif
/* Turn on the EMAC rx */
bfin_write_EMAC_OPMODE(opmode);
+
+ return 0;
}
/* Our watchdog timed out. Called by the networking layer */
@@ -805,21 +1242,21 @@ static void bfin_mac_timeout(struct net_device *dev)
bfin_mac_enable();
/* We can accept TX packets again */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
static void bfin_mac_multicast_hash(struct net_device *dev)
{
u32 emac_hashhi, emac_hashlo;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
emac_hashhi = emac_hashlo = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
/* skip non-multicast addresses */
if (!(*addrs & 1))
@@ -836,8 +1273,6 @@ static void bfin_mac_multicast_hash(struct net_device *dev)
bfin_write_EMAC_HASHHI(emac_hashhi);
bfin_write_EMAC_HASHLO(emac_hashlo);
-
- return;
}
/*
@@ -853,7 +1288,7 @@ static void bfin_mac_set_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
printk(KERN_INFO "%s: set to promisc mode\n", dev->name);
sysctl = bfin_read_EMAC_OPMODE();
- sysctl |= RAF;
+ sysctl |= PR;
bfin_write_EMAC_OPMODE(sysctl);
} else if (dev->flags & IFF_ALLMULTI) {
/* accept all multicast */
@@ -874,6 +1309,16 @@ static void bfin_mac_set_multicast_list(struct net_device *dev)
}
}
+static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return bfin_mac_hwtstamp_ioctl(netdev, ifr, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
/*
* this puts the device in an inactive state
*/
@@ -894,7 +1339,7 @@ static void bfin_mac_shutdown(struct net_device *dev)
static int bfin_mac_open(struct net_device *dev)
{
struct bfin_mac_local *lp = netdev_priv(dev);
- int retval;
+ int ret;
pr_debug("%s: %s\n", dev->name, __func__);
/*
@@ -908,18 +1353,21 @@ static int bfin_mac_open(struct net_device *dev)
}
/* initial rx and tx list */
- retval = desc_list_init();
-
- if (retval)
- return retval;
+ ret = desc_list_init();
+ if (ret)
+ return ret;
phy_start(lp->phydev);
phy_write(lp->phydev, MII_BMCR, BMCR_RESET);
setup_system_regs(dev);
setup_mac_addr(dev->dev_addr);
+
bfin_mac_disable();
- bfin_mac_enable();
+ ret = bfin_mac_enable();
+ if (ret)
+ return ret;
pr_debug("hardware init finished\n");
+
netif_start_queue(dev);
netif_carrier_on(dev);
@@ -958,6 +1406,7 @@ static const struct net_device_ops bfin_mac_netdev_ops = {
.ndo_set_mac_address = bfin_mac_set_mac_address,
.ndo_tx_timeout = bfin_mac_timeout,
.ndo_set_multicast_list = bfin_mac_set_multicast_list,
+ .ndo_do_ioctl = bfin_mac_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1017,6 +1466,11 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
}
pd = pdev->dev.platform_data;
lp->mii_bus = platform_get_drvdata(pd);
+ if (!lp->mii_bus) {
+ dev_err(&pdev->dev, "Cannot get mii_bus!\n");
+ rc = -ENODEV;
+ goto out_err_mii_bus_probe;
+ }
lp->mii_bus->priv = ndev;
rc = mii_probe(ndev);
@@ -1049,6 +1503,8 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
goto out_err_reg_ndev;
}
+ bfin_mac_hwtstamp_init(ndev);
+
/* now, print out the card info, in a short format.. */
dev_info(&pdev->dev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
@@ -1060,6 +1516,7 @@ out_err_request_irq:
out_err_mii_probe:
mdiobus_unregister(lp->mii_bus);
mdiobus_free(lp->mii_bus);
+out_err_mii_bus_probe:
peripheral_free_list(pin_req);
out_err_probe_mac:
platform_set_drvdata(pdev, NULL);
@@ -1092,9 +1549,16 @@ static int __devexit bfin_mac_remove(struct platform_device *pdev)
static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg)
{
struct net_device *net_dev = platform_get_drvdata(pdev);
+ struct bfin_mac_local *lp = netdev_priv(net_dev);
- if (netif_running(net_dev))
- bfin_mac_close(net_dev);
+ if (lp->wol) {
+ bfin_write_EMAC_OPMODE((bfin_read_EMAC_OPMODE() & ~TE) | RE);
+ bfin_write_EMAC_WKUP_CTL(MPKE);
+ enable_irq_wake(IRQ_MAC_WAKEDET);
+ } else {
+ if (netif_running(net_dev))
+ bfin_mac_close(net_dev);
+ }
return 0;
}
@@ -1102,9 +1566,16 @@ static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg)
static int bfin_mac_resume(struct platform_device *pdev)
{
struct net_device *net_dev = platform_get_drvdata(pdev);
+ struct bfin_mac_local *lp = netdev_priv(net_dev);
- if (netif_running(net_dev))
- bfin_mac_open(net_dev);
+ if (lp->wol) {
+ bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
+ bfin_write_EMAC_WKUP_CTL(0);
+ disable_irq_wake(IRQ_MAC_WAKEDET);
+ } else {
+ if (netif_running(net_dev))
+ bfin_mac_open(net_dev);
+ }
return 0;
}
@@ -1155,6 +1626,7 @@ static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
return 0;
out_err_mdiobus_register:
+ kfree(miibus->irq);
mdiobus_free(miibus);
out_err_alloc:
peripheral_free_list(pin_req);
@@ -1167,6 +1639,7 @@ static int __devexit bfin_mii_bus_remove(struct platform_device *pdev)
struct mii_bus *miibus = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
mdiobus_unregister(miibus);
+ kfree(miibus->irq);
mdiobus_free(miibus);
peripheral_free_list(pin_req);
return 0;
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h
index 052b5dce3e3..1ae7b82ceee 100644
--- a/drivers/net/bfin_mac.h
+++ b/drivers/net/bfin_mac.h
@@ -7,6 +7,12 @@
*
* Licensed under the GPL-2 or later.
*/
+#ifndef _BFIN_MAC_H_
+#define _BFIN_MAC_H_
+
+#include <linux/net_tstamp.h>
+#include <linux/clocksource.h>
+#include <linux/timecompare.h>
#define BFIN_MAC_CSUM_OFFLOAD
@@ -60,6 +66,9 @@ struct bfin_mac_local {
unsigned char Mac[6]; /* MAC address of the board */
spinlock_t lock;
+ int wol; /* Wake On Lan */
+ int irq_wake_requested;
+
/* MII and PHY stuffs */
int old_link; /* used by bf537_adjust_link */
int old_speed;
@@ -67,6 +76,15 @@ struct bfin_mac_local {
struct phy_device *phydev;
struct mii_bus *mii_bus;
+
+#if defined(CONFIG_BFIN_MAC_USE_HWSTAMP)
+ struct cyclecounter cycles;
+ struct timecounter clock;
+ struct timecompare compare;
+ struct hwtstamp_config stamp_cfg;
+#endif
};
extern void bfin_get_ether_addr(char *addr);
+
+#endif
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 598b007f199..39250b2ca88 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -167,7 +167,6 @@ static inline void
dbdma_st32(volatile __u32 __iomem *a, unsigned long x)
{
__asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
- return;
}
static inline unsigned long
@@ -382,8 +381,6 @@ bmac_init_registers(struct net_device *dev)
bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
bmwrite(dev, INTDISABLE, EnableNormal);
-
- return;
}
#if 0
@@ -972,7 +969,7 @@ bmac_remove_multi(struct net_device *dev,
*/
static void bmac_set_multicast(struct net_device *dev)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
struct bmac_data *bp = netdev_priv(dev);
int num_addrs = netdev_mc_count(dev);
unsigned short rx_cfg;
@@ -1001,8 +998,8 @@ static void bmac_set_multicast(struct net_device *dev)
rx_cfg = bmac_rx_on(dev, 0, 0);
XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
} else {
- netdev_for_each_mc_addr(dmi, dev)
- bmac_addhash(bp, dmi->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ bmac_addhash(bp, ha->addr);
bmac_update_hash_table_mask(dev, bp);
rx_cfg = bmac_rx_on(dev, 1, 0);
XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
@@ -1016,7 +1013,7 @@ static void bmac_set_multicast(struct net_device *dev)
static void bmac_set_multicast(struct net_device *dev)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
int i;
unsigned short rx_cfg;
@@ -1040,8 +1037,8 @@ static void bmac_set_multicast(struct net_device *dev)
for(i = 0; i < 4; i++) hash_table[i] = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if(!(*addrs & 1))
continue;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index ac90a3828f6..188e356c30a 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -58,11 +58,11 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.0.9"
-#define DRV_MODULE_RELDATE "April 27, 2010"
+#define DRV_MODULE_VERSION "2.0.15"
+#define DRV_MODULE_RELDATE "May 4, 2010"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
-#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw"
+#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j15.fw"
#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
@@ -656,19 +656,11 @@ bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
if (stop_cnic)
bnx2_cnic_stop(bp);
if (netif_running(bp->dev)) {
- int i;
-
bnx2_napi_disable(bp);
netif_tx_disable(bp->dev);
- /* prevent tx timeout */
- for (i = 0; i < bp->dev->num_tx_queues; i++) {
- struct netdev_queue *txq;
-
- txq = netdev_get_tx_queue(bp->dev, i);
- txq->trans_start = jiffies;
- }
}
bnx2_disable_int_sync(bp);
+ netif_carrier_off(bp->dev); /* prevent tx timeout */
}
static void
@@ -677,6 +669,10 @@ bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
if (atomic_dec_and_test(&bp->intr_sem)) {
if (netif_running(bp->dev)) {
netif_tx_wake_all_queues(bp->dev);
+ spin_lock_bh(&bp->phy_lock);
+ if (bp->link_up)
+ netif_carrier_on(bp->dev);
+ spin_unlock_bh(&bp->phy_lock);
bnx2_napi_enable(bp);
bnx2_enable_int(bp);
if (start_cnic)
@@ -2672,7 +2668,7 @@ bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
}
rx_pg->page = page;
- pci_unmap_addr_set(rx_pg, mapping, mapping);
+ dma_unmap_addr_set(rx_pg, mapping, mapping);
rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
return 0;
@@ -2687,7 +2683,7 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
if (!page)
return;
- pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
+ pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE,
PCI_DMA_FROMDEVICE);
__free_page(page);
@@ -2719,7 +2715,8 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
}
rx_buf->skb = skb;
- pci_unmap_addr_set(rx_buf, mapping, mapping);
+ rx_buf->desc = (struct l2_fhdr *) skb->data;
+ dma_unmap_addr_set(rx_buf, mapping, mapping);
rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
@@ -2818,7 +2815,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
}
}
- pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
+ pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb), PCI_DMA_TODEVICE);
tx_buf->skb = NULL;
@@ -2828,7 +2825,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
sw_cons = NEXT_TX_BD(sw_cons);
pci_unmap_page(bp->pdev,
- pci_unmap_addr(
+ dma_unmap_addr(
&txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
mapping),
skb_shinfo(skb)->frags[i].size,
@@ -2910,8 +2907,8 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
if (prod != cons) {
prod_rx_pg->page = cons_rx_pg->page;
cons_rx_pg->page = NULL;
- pci_unmap_addr_set(prod_rx_pg, mapping,
- pci_unmap_addr(cons_rx_pg, mapping));
+ dma_unmap_addr_set(prod_rx_pg, mapping,
+ dma_unmap_addr(cons_rx_pg, mapping));
prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
@@ -2935,18 +2932,19 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
prod_rx_buf = &rxr->rx_buf_ring[prod];
pci_dma_sync_single_for_device(bp->pdev,
- pci_unmap_addr(cons_rx_buf, mapping),
+ dma_unmap_addr(cons_rx_buf, mapping),
BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
rxr->rx_prod_bseq += bp->rx_buf_use_size;
prod_rx_buf->skb = skb;
+ prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
if (cons == prod)
return;
- pci_unmap_addr_set(prod_rx_buf, mapping,
- pci_unmap_addr(cons_rx_buf, mapping));
+ dma_unmap_addr_set(prod_rx_buf, mapping,
+ dma_unmap_addr(cons_rx_buf, mapping));
cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
@@ -3019,7 +3017,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
/* Don't unmap yet. If we're unable to allocate a new
* page, we need to recycle the page and the DMA addr.
*/
- mapping_old = pci_unmap_addr(rx_pg, mapping);
+ mapping_old = dma_unmap_addr(rx_pg, mapping);
if (i == pages - 1)
frag_len -= 4;
@@ -3074,6 +3072,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
struct l2_fhdr *rx_hdr;
int rx_pkt = 0, pg_ring_used = 0;
+ struct pci_dev *pdev = bp->pdev;
hw_cons = bnx2_get_hw_rx_cons(bnapi);
sw_cons = rxr->rx_cons;
@@ -3086,7 +3085,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
while (sw_cons != hw_cons) {
unsigned int len, hdr_len;
u32 status;
- struct sw_bd *rx_buf;
+ struct sw_bd *rx_buf, *next_rx_buf;
struct sk_buff *skb;
dma_addr_t dma_addr;
u16 vtag = 0;
@@ -3097,16 +3096,23 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
skb = rx_buf->skb;
+ prefetchw(skb);
+ if (!get_dma_ops(&pdev->dev)->sync_single_for_cpu) {
+ next_rx_buf =
+ &rxr->rx_buf_ring[
+ RX_RING_IDX(NEXT_RX_BD(sw_cons))];
+ prefetch(next_rx_buf->desc);
+ }
rx_buf->skb = NULL;
- dma_addr = pci_unmap_addr(rx_buf, mapping);
+ dma_addr = dma_unmap_addr(rx_buf, mapping);
pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
PCI_DMA_FROMDEVICE);
- rx_hdr = (struct l2_fhdr *) skb->data;
+ rx_hdr = rx_buf->desc;
len = rx_hdr->l2_fhdr_pkt_len;
status = rx_hdr->l2_fhdr_status;
@@ -3207,10 +3213,10 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
#ifdef BCM_VLAN
if (hw_vlan)
- vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
+ vlan_gro_receive(&bnapi->napi, bp->vlgrp, vtag, skb);
else
#endif
- netif_receive_skb(skb);
+ napi_gro_receive(&bnapi->napi, skb);
rx_pkt++;
@@ -3548,7 +3554,6 @@ bnx2_set_rx_mode(struct net_device *dev)
}
else {
/* Accept one or more multicast(s). */
- struct dev_mc_list *mclist;
u32 mc_filter[NUM_MC_HASH_REGISTERS];
u32 regidx;
u32 bit;
@@ -3556,8 +3561,8 @@ bnx2_set_rx_mode(struct net_device *dev)
memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
- netdev_for_each_mc_addr(mclist, dev) {
- crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
bit = crc & 0xff;
regidx = (bit & 0xe0) >> 5;
bit &= 0x1f;
@@ -5318,7 +5323,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
}
pci_unmap_single(bp->pdev,
- pci_unmap_addr(tx_buf, mapping),
+ dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
@@ -5329,7 +5334,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
for (k = 0; k < last; k++, j++) {
tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
pci_unmap_page(bp->pdev,
- pci_unmap_addr(tx_buf, mapping),
+ dma_unmap_addr(tx_buf, mapping),
skb_shinfo(skb)->frags[k].size,
PCI_DMA_TODEVICE);
}
@@ -5359,7 +5364,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
continue;
pci_unmap_single(bp->pdev,
- pci_unmap_addr(rx_buf, mapping),
+ dma_unmap_addr(rx_buf, mapping),
bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
@@ -5765,11 +5770,11 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
rx_buf = &rxr->rx_buf_ring[rx_start_idx];
rx_skb = rx_buf->skb;
- rx_hdr = (struct l2_fhdr *) rx_skb->data;
+ rx_hdr = rx_buf->desc;
skb_reserve(rx_skb, BNX2_RX_OFFSET);
pci_dma_sync_single_for_cpu(bp->pdev,
- pci_unmap_addr(rx_buf, mapping),
+ dma_unmap_addr(rx_buf, mapping),
bp->rx_buf_size, PCI_DMA_FROMDEVICE);
if (rx_hdr->l2_fhdr_status &
@@ -6292,14 +6297,23 @@ static void
bnx2_dump_state(struct bnx2 *bp)
{
struct net_device *dev = bp->dev;
+ u32 mcp_p0, mcp_p1;
netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem));
- netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] RPM_MGMT_PKT_CTRL[%08x]\n",
+ netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
REG_RD(bp, BNX2_EMAC_TX_STATUS),
+ REG_RD(bp, BNX2_EMAC_RX_STATUS));
+ netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ mcp_p0 = BNX2_MCP_STATE_P0;
+ mcp_p1 = BNX2_MCP_STATE_P1;
+ } else {
+ mcp_p0 = BNX2_MCP_STATE_P0_5708;
+ mcp_p1 = BNX2_MCP_STATE_P1_5708;
+ }
netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
- bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0),
- bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1));
+ bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
if (bp->flags & BNX2_FLAG_USING_MSIX)
@@ -6429,7 +6443,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_buf = &txr->tx_buf_ring[ring_prod];
tx_buf->skb = skb;
- pci_unmap_addr_set(tx_buf, mapping, mapping);
+ dma_unmap_addr_set(tx_buf, mapping, mapping);
txbd = &txr->tx_desc_ring[ring_prod];
@@ -6454,7 +6468,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
len, PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(bp->pdev, mapping))
goto dma_error;
- pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
+ dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
mapping);
txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
@@ -6491,7 +6505,7 @@ dma_error:
ring_prod = TX_RING_IDX(prod);
tx_buf = &txr->tx_buf_ring[ring_prod];
tx_buf->skb = NULL;
- pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
+ pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb), PCI_DMA_TODEVICE);
/* unmap remaining mapped pages */
@@ -6499,7 +6513,7 @@ dma_error:
prod = NEXT_TX_BD(prod);
ring_prod = TX_RING_IDX(prod);
tx_buf = &txr->tx_buf_ring[ring_prod];
- pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping),
+ pci_unmap_page(bp->pdev, dma_unmap_addr(tx_buf, mapping),
skb_shinfo(skb)->frags[i].size,
PCI_DMA_TODEVICE);
}
@@ -8297,7 +8311,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
memcpy(dev->dev_addr, bp->mac_addr, 6);
memcpy(dev->perm_addr, bp->mac_addr, 6);
- dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
if (CHIP_NUM(bp) == CHIP_NUM_5709) {
dev->features |= NETIF_F_IPV6_CSUM;
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index cd4b0e4637a..ddaa3fc9987 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6347,6 +6347,8 @@ struct l2_fhdr {
#define BNX2_MCP_SCRATCH 0x00160000
#define BNX2_MCP_STATE_P1 0x0016f9c8
#define BNX2_MCP_STATE_P0 0x0016fdc8
+#define BNX2_MCP_STATE_P1_5708 0x001699c8
+#define BNX2_MCP_STATE_P0_5708 0x00169dc8
#define BNX2_SHM_HDR_SIGNATURE BNX2_MCP_SCRATCH
#define BNX2_SHM_HDR_SIGNATURE_SIG_MASK 0xffff0000
@@ -6551,17 +6553,18 @@ struct l2_fhdr {
struct sw_bd {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ struct l2_fhdr *desc;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
struct sw_pg {
struct page *page;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
struct sw_tx_bd {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
unsigned short is_gso;
unsigned short nr_frags;
};
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 3c48a7a6830..8bd23687c53 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -24,16 +24,25 @@
#define BCM_VLAN 1
#endif
+#define BNX2X_MULTI_QUEUE
+
+#define BNX2X_NEW_NAPI
+
+
+
#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
#define BCM_CNIC 1
#include "cnic_if.h"
#endif
-#define BNX2X_MULTI_QUEUE
-
-#define BNX2X_NEW_NAPI
-
+#ifdef BCM_CNIC
+#define BNX2X_MIN_MSIX_VEC_CNT 3
+#define BNX2X_MSIX_VEC_FP_START 2
+#else
+#define BNX2X_MIN_MSIX_VEC_CNT 2
+#define BNX2X_MSIX_VEC_FP_START 1
+#endif
#include <linux/mdio.h>
#include "bnx2x_reg.h"
@@ -83,7 +92,12 @@ do { \
__func__, __LINE__, \
bp->dev ? (bp->dev->name) : "?", \
##__args); \
-} while (0)
+ } while (0)
+
+#define BNX2X_ERROR(__fmt, __args...) do { \
+ pr_err("[%s:%d]" __fmt, __func__, __LINE__, ##__args); \
+ } while (0)
+
/* before we have a dev->name use dev_info() */
#define BNX2X_DEV_INFO(__fmt, __args...) \
@@ -155,15 +169,21 @@ do { \
#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field))
#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val)
+#define MF_CFG_RD(bp, field) SHMEM_RD(bp, mf_cfg.field)
+#define MF_CFG_WR(bp, field, val) SHMEM_WR(bp, mf_cfg.field, val)
+
#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
+#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
+ AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
+
/* fast path */
struct sw_rx_bd {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
struct sw_tx_bd {
@@ -176,7 +196,7 @@ struct sw_tx_bd {
struct sw_rx_page {
struct page *page;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
union db_prod {
@@ -261,7 +281,7 @@ struct bnx2x_eth_q_stats {
u32 hw_csum_err;
};
-#define BNX2X_NUM_Q_STATS 11
+#define BNX2X_NUM_Q_STATS 13
#define Q_STATS_OFFSET32(stat_name) \
(offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
@@ -767,7 +787,7 @@ struct bnx2x_eth_stats {
u32 nig_timer_max;
};
-#define BNX2X_NUM_STATS 41
+#define BNX2X_NUM_STATS 43
#define STATS_OFFSET32(stat_name) \
(offsetof(struct bnx2x_eth_stats, stat_name) / 4)
@@ -818,6 +838,12 @@ struct attn_route {
u32 sig[4];
};
+typedef enum {
+ BNX2X_RECOVERY_DONE,
+ BNX2X_RECOVERY_INIT,
+ BNX2X_RECOVERY_WAIT,
+} bnx2x_recovery_state_t;
+
struct bnx2x {
/* Fields used in the tx and intr/napi performance paths
* are grouped together in the beginning of the structure
@@ -835,6 +861,9 @@ struct bnx2x {
struct pci_dev *pdev;
atomic_t intr_sem;
+
+ bnx2x_recovery_state_t recovery_state;
+ int is_leader;
#ifdef BCM_CNIC
struct msix_entry msix_table[MAX_CONTEXT+2];
#else
@@ -842,7 +871,6 @@ struct bnx2x {
#endif
#define INT_MODE_INTx 1
#define INT_MODE_MSI 2
-#define INT_MODE_MSIX 3
int tx_ring_size;
@@ -924,8 +952,7 @@ struct bnx2x {
int mrrs;
struct delayed_work sp_task;
- struct work_struct reset_task;
-
+ struct delayed_work reset_task;
struct timer_list timer;
int current_interval;
@@ -961,6 +988,8 @@ struct bnx2x {
u16 rx_quick_cons_trip;
u16 rx_ticks_int;
u16 rx_ticks;
+/* Maximal coalescing timeout in us */
+#define BNX2X_MAX_COALESCE_TOUT (0xf0*12)
u32 lin_cnt;
@@ -1075,6 +1104,7 @@ struct bnx2x {
#define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data)
#define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data)
+ char fw_ver[32];
const struct firmware *firmware;
};
@@ -1125,6 +1155,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define LOAD_DIAG 2
#define UNLOAD_NORMAL 0
#define UNLOAD_CLOSE 1
+#define UNLOAD_RECOVERY 2
/* DMAE command defines */
@@ -1152,7 +1183,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT
#define DMAE_LEN32_RD_MAX 0x80
-#define DMAE_LEN32_WR_MAX 0x400
+#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000)
#define DMAE_COMP_VAL 0xe0d0d0ae
@@ -1294,8 +1325,12 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \
AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
+#define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
-#define MULTI_FLAGS(bp) \
+#define RSS_FLAGS(bp) \
(TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \
TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \
TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \
@@ -1333,6 +1368,9 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0
#endif
+#define BNX2X_VPD_LEN 128
+#define VENDOR_ID_LEN 4
+
/* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */
#endif /* bnx2x.h */
diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x_hsi.h
index 760069345b1..fd1f29e0317 100644
--- a/drivers/net/bnx2x_hsi.h
+++ b/drivers/net/bnx2x_hsi.h
@@ -683,7 +683,7 @@ struct drv_func_mb {
#define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000
#define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000
/*
- * The optic module verification commands requris bootcode
+ * The optic module verification commands require bootcode
* v5.0.6 or later
*/
#define DRV_MSG_CODE_VRFY_OPT_MDL 0xa0000000
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index 32e79c359e8..ff70be89876 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -1594,7 +1594,7 @@ static u8 bnx2x_ext_phy_resolve_fc(struct link_params *params,
MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
pause_result |= (lp_pause &
MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
- DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n",
+ DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
pause_result);
bnx2x_pause_resolve(vars, pause_result);
if (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE &&
@@ -1616,7 +1616,7 @@ static u8 bnx2x_ext_phy_resolve_fc(struct link_params *params,
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
bnx2x_pause_resolve(vars, pause_result);
- DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x \n",
+ DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n",
pause_result);
}
}
@@ -1974,7 +1974,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
}
}
- DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %x line_speed %x \n",
+ DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %x line_speed %x\n",
gp_status, vars->phy_link_up, vars->line_speed);
DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x"
" autoneg 0x%x\n",
@@ -3852,7 +3852,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
SPEED_AUTO_NEG) &&
((params->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) {
- DP(NETIF_MSG_LINK, "Setting 1G clause37 \n");
+ DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
bnx2x_cl45_write(bp, params->port, ext_phy_type,
ext_phy_addr, MDIO_AN_DEVAD,
MDIO_AN_REG_ADV, 0x20);
@@ -4234,14 +4234,14 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
ext_phy_addr,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_10G_CTRL2, &tmp1);
- DP(NETIF_MSG_LINK, "1.7 = 0x%x \n", tmp1);
+ DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
} else if ((params->req_line_speed ==
SPEED_AUTO_NEG) &&
((params->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) {
- DP(NETIF_MSG_LINK, "Setting 1G clause37 \n");
+ DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
bnx2x_cl45_write(bp, params->port, ext_phy_type,
ext_phy_addr, MDIO_AN_DEVAD,
MDIO_PMA_REG_8727_MISC_CTRL, 0);
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 6c042a72d6c..57ff5b3bcce 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -57,8 +57,8 @@
#include "bnx2x_init_ops.h"
#include "bnx2x_dump.h"
-#define DRV_MODULE_VERSION "1.52.1-7"
-#define DRV_MODULE_RELDATE "2010/02/28"
+#define DRV_MODULE_VERSION "1.52.53-1"
+#define DRV_MODULE_RELDATE "2010/18/04"
#define BNX2X_BC_VER 0x040200
#include <linux/firmware.h>
@@ -102,7 +102,8 @@ MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
static int int_mode;
module_param(int_mode, int, 0);
-MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
+MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
+ "(1 INT#x; 2 MSI)");
static int dropless_fc;
module_param(dropless_fc, int, 0);
@@ -352,13 +353,14 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
u32 addr, u32 len)
{
+ int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
int offset = 0;
- while (len > DMAE_LEN32_WR_MAX) {
+ while (len > dmae_wr_max) {
bnx2x_write_dmae(bp, phys_addr + offset,
- addr + offset, DMAE_LEN32_WR_MAX);
- offset += DMAE_LEN32_WR_MAX * 4;
- len -= DMAE_LEN32_WR_MAX;
+ addr + offset, dmae_wr_max);
+ offset += dmae_wr_max * 4;
+ len -= dmae_wr_max;
}
bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
@@ -508,26 +510,31 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
static void bnx2x_fw_dump(struct bnx2x *bp)
{
+ u32 addr;
u32 mark, offset;
__be32 data[9];
int word;
- mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
- mark = ((mark + 0x3) & ~0x3);
+ if (BP_NOMCP(bp)) {
+ BNX2X_ERR("NO MCP - can not dump\n");
+ return;
+ }
+
+ addr = bp->common.shmem_base - 0x0800 + 4;
+ mark = REG_RD(bp, addr);
+ mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
pr_err("begin fw dump (mark 0x%x)\n", mark);
pr_err("");
- for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
+ for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
for (word = 0; word < 8; word++)
- data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
- offset + 4*word));
+ data[word] = htonl(REG_RD(bp, offset + 4*word));
data[8] = 0x0;
pr_cont("%s", (char *)data);
}
- for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
+ for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
for (word = 0; word < 8; word++)
- data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
- offset + 4*word));
+ data[word] = htonl(REG_RD(bp, offset + 4*word));
data[8] = 0x0;
pr_cont("%s", (char *)data);
}
@@ -546,9 +553,9 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
/* Indices */
/* Common */
- BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
- " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
- " spq_prod_idx(%u)\n",
+ BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
+ " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
+ " spq_prod_idx(0x%x)\n",
bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
@@ -556,14 +563,14 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
- BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
- " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
- " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
+ BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
+ " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
+ " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
i, fp->rx_bd_prod, fp->rx_bd_cons,
le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
- BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
- " fp_u_idx(%x) *sb_u_idx(%x)\n",
+ BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
+ " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
fp->rx_sge_prod, fp->last_max_sge,
le16_to_cpu(fp->fp_u_idx),
fp->status_blk->u_status_block.status_block_index);
@@ -573,12 +580,13 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
- BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
- " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
+ BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
+ " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
+ " *tx_cons_sb(0x%x)\n",
i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
- BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
- " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
+ BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
+ " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
fp->status_blk->c_status_block.status_block_index,
fp->tx_db.data.prod);
}
@@ -764,6 +772,40 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
* General service functions
*/
+/* Return true if succeeded to acquire the lock */
+static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
+{
+ u32 lock_status;
+ u32 resource_bit = (1 << resource);
+ int func = BP_FUNC(bp);
+ u32 hw_lock_control_reg;
+
+ DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
+
+ /* Validating that the resource is within range */
+ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+ DP(NETIF_MSG_HW,
+ "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+ resource, HW_LOCK_MAX_RESOURCE_VALUE);
+ return -EINVAL;
+ }
+
+ if (func <= 5)
+ hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+ else
+ hw_lock_control_reg =
+ (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+
+ /* Try to acquire the lock */
+ REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
+ lock_status = REG_RD(bp, hw_lock_control_reg);
+ if (lock_status & resource_bit)
+ return true;
+
+ DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
+ return false;
+}
+
static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
u8 storm, u16 index, u8 op, u8 update)
{
@@ -842,7 +884,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* unmap first bd */
DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
- pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
+ dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
@@ -872,8 +914,8 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
- pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
- BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
+ dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
+ BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
if (--nbd)
bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
}
@@ -1023,7 +1065,8 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
default:
BNX2X_ERR("unexpected MC reply (%d) "
- "fp->state is %x\n", command, fp->state);
+ "fp[%d] state is %x\n",
+ command, fp->index, fp->state);
break;
}
mb(); /* force bnx2x_wait_ramrod() to see the change */
@@ -1086,7 +1129,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
if (!page)
return;
- pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
+ dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
__free_pages(page, PAGES_PER_SGE_SHIFT);
@@ -1115,15 +1158,15 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
if (unlikely(page == NULL))
return -ENOMEM;
- mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
- PCI_DMA_FROMDEVICE);
+ mapping = dma_map_page(&bp->pdev->dev, page, 0,
+ SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
__free_pages(page, PAGES_PER_SGE_SHIFT);
return -ENOMEM;
}
sw_buf->page = page;
- pci_unmap_addr_set(sw_buf, mapping, mapping);
+ dma_unmap_addr_set(sw_buf, mapping, mapping);
sge->addr_hi = cpu_to_le32(U64_HI(mapping));
sge->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -1143,15 +1186,15 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
if (unlikely(skb == NULL))
return -ENOMEM;
- mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
- PCI_DMA_FROMDEVICE);
+ mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
+ DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
dev_kfree_skb(skb);
return -ENOMEM;
}
rx_buf->skb = skb;
- pci_unmap_addr_set(rx_buf, mapping, mapping);
+ dma_unmap_addr_set(rx_buf, mapping, mapping);
rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -1173,13 +1216,13 @@ static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
- pci_dma_sync_single_for_device(bp->pdev,
- pci_unmap_addr(cons_rx_buf, mapping),
- RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&bp->pdev->dev,
+ dma_unmap_addr(cons_rx_buf, mapping),
+ RX_COPY_THRESH, DMA_FROM_DEVICE);
prod_rx_buf->skb = cons_rx_buf->skb;
- pci_unmap_addr_set(prod_rx_buf, mapping,
- pci_unmap_addr(cons_rx_buf, mapping));
+ dma_unmap_addr_set(prod_rx_buf, mapping,
+ dma_unmap_addr(cons_rx_buf, mapping));
*prod_bd = *cons_bd;
}
@@ -1283,9 +1326,9 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
/* move empty skb from pool to prod and map it */
prod_rx_buf->skb = fp->tpa_pool[queue].skb;
- mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
- bp->rx_buf_size, PCI_DMA_FROMDEVICE);
- pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
+ mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
+ bp->rx_buf_size, DMA_FROM_DEVICE);
+ dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
/* move partial skb from cons to pool (don't unmap yet) */
fp->tpa_pool[queue] = *cons_rx_buf;
@@ -1302,7 +1345,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
#ifdef BNX2X_STOP_ON_ERROR
fp->tpa_queue_used |= (1 << queue);
-#ifdef __powerpc64__
+#ifdef _ASM_GENERIC_INT_L64_H
DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
#else
DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
@@ -1331,8 +1374,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
max(frag_size, (u32)len_on_bd));
#ifdef BNX2X_STOP_ON_ERROR
- if (pages >
- min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
+ if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
pages, cqe_idx);
BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
@@ -1361,8 +1403,9 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
/* Unmap the page as we r going to pass it to the stack */
- pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
- SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&bp->pdev->dev,
+ dma_unmap_addr(&old_rx_pg, mapping),
+ SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
/* Add one frag and update the appropriate fields in the skb */
skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
@@ -1389,8 +1432,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* Unmap skb in the pool anyway, as we are going to change
pool entry status to BNX2X_TPA_STOP even if new skb allocation
fails. */
- pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
+ bp->rx_buf_size, DMA_FROM_DEVICE);
if (likely(new_skb)) {
/* fix ip xsum and give it to the stack */
@@ -1441,12 +1484,12 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
#ifdef BCM_VLAN
if ((bp->vlgrp != NULL) && is_vlan_cqe &&
(!is_not_hwaccel_vlan_cqe))
- vlan_hwaccel_receive_skb(skb, bp->vlgrp,
- le16_to_cpu(cqe->fast_path_cqe.
- vlan_tag));
+ vlan_gro_receive(&fp->napi, bp->vlgrp,
+ le16_to_cpu(cqe->fast_path_cqe.
+ vlan_tag), skb);
else
#endif
- netif_receive_skb(skb);
+ napi_gro_receive(&fp->napi, skb);
} else {
DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
" - dropping packet!\n");
@@ -1539,7 +1582,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
struct sw_rx_bd *rx_buf = NULL;
struct sk_buff *skb;
union eth_rx_cqe *cqe;
- u8 cqe_fp_flags;
+ u8 cqe_fp_flags, cqe_fp_status_flags;
u16 len, pad;
comp_ring_cons = RCQ_BD(sw_comp_cons);
@@ -1555,6 +1598,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
cqe = &fp->rx_comp_ring[comp_ring_cons];
cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
+ cqe_fp_status_flags = cqe->fast_path_cqe.status_flags;
DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
" queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
@@ -1573,7 +1617,6 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
rx_buf = &fp->rx_buf_ring[bd_cons];
skb = rx_buf->skb;
prefetch(skb);
- prefetch((u8 *)skb + 256);
len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
pad = cqe->fast_path_cqe.placement_offset;
@@ -1620,11 +1663,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
}
}
- pci_dma_sync_single_for_device(bp->pdev,
- pci_unmap_addr(rx_buf, mapping),
- pad + RX_COPY_THRESH,
- PCI_DMA_FROMDEVICE);
- prefetch(skb);
+ dma_sync_single_for_device(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ pad + RX_COPY_THRESH,
+ DMA_FROM_DEVICE);
prefetch(((char *)(skb)) + 128);
/* is this an error packet? */
@@ -1665,10 +1707,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
} else
if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
- pci_unmap_single(bp->pdev,
- pci_unmap_addr(rx_buf, mapping),
+ dma_unmap_single(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
bp->rx_buf_size,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
skb_reserve(skb, pad);
skb_put(skb, len);
@@ -1684,6 +1726,12 @@ reuse_rx:
skb->protocol = eth_type_trans(skb, bp->dev);
+ if ((bp->dev->features & NETIF_F_RXHASH) &&
+ (cqe_fp_status_flags &
+ ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
+ skb->rxhash = le32_to_cpu(
+ cqe->fast_path_cqe.rss_hash_result);
+
skb->ip_summed = CHECKSUM_NONE;
if (bp->rx_csum) {
if (likely(BNX2X_RX_CSUM_OK(cqe)))
@@ -1699,11 +1747,11 @@ reuse_rx:
if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
(le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
PARSING_FLAGS_VLAN))
- vlan_hwaccel_receive_skb(skb, bp->vlgrp,
- le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
+ vlan_gro_receive(&fp->napi, bp->vlgrp,
+ le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
else
#endif
- netif_receive_skb(skb);
+ napi_gro_receive(&fp->napi, skb);
next_rx:
@@ -1831,8 +1879,8 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
return IRQ_HANDLED;
}
- if (status)
- DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
+ if (unlikely(status))
+ DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
status);
return IRQ_HANDLED;
@@ -1900,6 +1948,8 @@ static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
int func = BP_FUNC(bp);
u32 hw_lock_control_reg;
+ DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
+
/* Validating that the resource is within range */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
DP(NETIF_MSG_HW,
@@ -2254,11 +2304,14 @@ static void bnx2x__link_reset(struct bnx2x *bp)
static u8 bnx2x_link_test(struct bnx2x *bp)
{
- u8 rc;
+ u8 rc = 0;
- bnx2x_acquire_phy_lock(bp);
- rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
- bnx2x_release_phy_lock(bp);
+ if (!BP_NOMCP(bp)) {
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
+ bnx2x_release_phy_lock(bp);
+ } else
+ BNX2X_ERR("Bootcode is missing - can not test link\n");
return rc;
}
@@ -2387,10 +2440,10 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
than zero */
m_fair_vn.vn_credit_delta =
- max((u32)(vn_min_rate * (T_FAIR_COEF /
- (8 * bp->vn_weight_sum))),
- (u32)(bp->cmng.fair_vars.fair_threshold * 2));
- DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
+ max_t(u32, (vn_min_rate * (T_FAIR_COEF /
+ (8 * bp->vn_weight_sum))),
+ (bp->cmng.fair_vars.fair_threshold * 2));
+ DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
m_fair_vn.vn_credit_delta);
}
@@ -2410,6 +2463,7 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
/* This function is called upon link interrupt */
static void bnx2x_link_attn(struct bnx2x *bp)
{
+ u32 prev_link_status = bp->link_vars.link_status;
/* Make sure that we are synced with the current statistics */
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
@@ -2442,8 +2496,9 @@ static void bnx2x_link_attn(struct bnx2x *bp)
bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
}
- /* indicate link status */
- bnx2x_link_report(bp);
+ /* indicate link status only if link status actually changed */
+ if (prev_link_status != bp->link_vars.link_status)
+ bnx2x_link_report(bp);
if (IS_E1HMF(bp)) {
int port = BP_PORT(bp);
@@ -2560,7 +2615,6 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
return rc;
}
-static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
static void bnx2x_set_rx_mode(struct net_device *dev);
@@ -2696,12 +2750,6 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
{
struct eth_spe *spe;
- DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
- "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
- (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
- (void *)bp->spq_prod_bd - (void *)bp->spq), command,
- HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
-
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
return -EIO;
@@ -2720,8 +2768,8 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
/* CID needs port number to be encoded int it */
spe->hdr.conn_and_cmd_data =
- cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
- HW_CID(bp, cid)));
+ cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
+ HW_CID(bp, cid));
spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
if (common)
spe->hdr.type |=
@@ -2732,6 +2780,13 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
bp->spq_left--;
+ DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
+ "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
+ bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
+ (u32)(U64_LO(bp->spq_mapping) +
+ (void *)bp->spq_prod_bd - (void *)bp->spq), command,
+ HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
+
bnx2x_sp_prod_update(bp);
spin_unlock_bh(&bp->spq_lock);
return 0;
@@ -2740,12 +2795,11 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
/* acquire split MCP access lock register */
static int bnx2x_acquire_alr(struct bnx2x *bp)
{
- u32 i, j, val;
+ u32 j, val;
int rc = 0;
might_sleep();
- i = 100;
- for (j = 0; j < i*10; j++) {
+ for (j = 0; j < 1000; j++) {
val = (1UL << 31);
REG_WR(bp, GRCBASE_MCP + 0x9c, val);
val = REG_RD(bp, GRCBASE_MCP + 0x9c);
@@ -2765,9 +2819,7 @@ static int bnx2x_acquire_alr(struct bnx2x *bp)
/* release split MCP access lock register */
static void bnx2x_release_alr(struct bnx2x *bp)
{
- u32 val = 0;
-
- REG_WR(bp, GRCBASE_MCP + 0x9c, val);
+ REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
}
static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
@@ -2823,7 +2875,7 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
aeu_mask, asserted);
- aeu_mask &= ~(asserted & 0xff);
+ aeu_mask &= ~(asserted & 0x3ff);
DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
REG_WR(bp, aeu_addr, aeu_mask);
@@ -2910,8 +2962,9 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp)
bp->link_params.ext_phy_config);
/* log the failure */
- netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
- "Please contact Dell Support for assistance.\n");
+ netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
+ " the driver to shutdown the card to prevent permanent"
+ " damage. Please contact OEM Support for assistance\n");
}
static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -3104,10 +3157,311 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
}
}
-static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
+static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
+static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
+
+
+#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
+#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
+#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
+#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
+#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
+#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
+/*
+ * should be run under rtnl lock
+ */
+static inline void bnx2x_set_reset_done(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+ val &= ~(1 << RESET_DONE_FLAG_SHIFT);
+ REG_WR(bp, BNX2X_MISC_GEN_REG, val);
+ barrier();
+ mmiowb();
+}
+
+/*
+ * should be run under rtnl lock
+ */
+static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+ val |= (1 << 16);
+ REG_WR(bp, BNX2X_MISC_GEN_REG, val);
+ barrier();
+ mmiowb();
+}
+
+/*
+ * should be run under rtnl lock
+ */
+static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+ DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
+ return (val & RESET_DONE_FLAG_MASK) ? false : true;
+}
+
+/*
+ * should be run under rtnl lock
+ */
+static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
+{
+ u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+
+ DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
+
+ val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
+ REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
+ barrier();
+ mmiowb();
+}
+
+/*
+ * should be run under rtnl lock
+ */
+static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
+{
+ u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+
+ DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
+
+ val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
+ REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
+ barrier();
+ mmiowb();
+
+ return val1;
+}
+
+/*
+ * should be run under rtnl lock
+ */
+static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
+{
+ return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
+}
+
+static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+ REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
+}
+
+static inline void _print_next_block(int idx, const char *blk)
+{
+ if (idx)
+ pr_cont(", ");
+ pr_cont("%s", blk);
+}
+
+static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
+ _print_next_block(par_num++, "BRB");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
+ _print_next_block(par_num++, "PARSER");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
+ _print_next_block(par_num++, "TSDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
+ _print_next_block(par_num++, "SEARCHER");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
+ _print_next_block(par_num++, "TSEMI");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
+ _print_next_block(par_num++, "PBCLIENT");
+ break;
+ case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
+ _print_next_block(par_num++, "QM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
+ _print_next_block(par_num++, "XSDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
+ _print_next_block(par_num++, "XSEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
+ _print_next_block(par_num++, "DOORBELLQ");
+ break;
+ case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
+ _print_next_block(par_num++, "VAUX PCI CORE");
+ break;
+ case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
+ _print_next_block(par_num++, "DEBUG");
+ break;
+ case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
+ _print_next_block(par_num++, "USDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
+ _print_next_block(par_num++, "USEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
+ _print_next_block(par_num++, "UPB");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
+ _print_next_block(par_num++, "CSDM");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
+ _print_next_block(par_num++, "CSEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
+ _print_next_block(par_num++, "PXP");
+ break;
+ case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
+ _print_next_block(par_num++,
+ "PXPPCICLOCKCLIENT");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
+ _print_next_block(par_num++, "CFC");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
+ _print_next_block(par_num++, "CDU");
+ break;
+ case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
+ _print_next_block(par_num++, "IGU");
+ break;
+ case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
+ _print_next_block(par_num++, "MISC");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
+ _print_next_block(par_num++, "MCP ROM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
+ _print_next_block(par_num++, "MCP UMP RX");
+ break;
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
+ _print_next_block(par_num++, "MCP UMP TX");
+ break;
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
+ _print_next_block(par_num++, "MCP SCPAD");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
+ u32 sig2, u32 sig3)
+{
+ if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
+ (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
+ int par_num = 0;
+ DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
+ "[0]:0x%08x [1]:0x%08x "
+ "[2]:0x%08x [3]:0x%08x\n",
+ sig0 & HW_PRTY_ASSERT_SET_0,
+ sig1 & HW_PRTY_ASSERT_SET_1,
+ sig2 & HW_PRTY_ASSERT_SET_2,
+ sig3 & HW_PRTY_ASSERT_SET_3);
+ printk(KERN_ERR"%s: Parity errors detected in blocks: ",
+ bp->dev->name);
+ par_num = bnx2x_print_blocks_with_parity0(
+ sig0 & HW_PRTY_ASSERT_SET_0, par_num);
+ par_num = bnx2x_print_blocks_with_parity1(
+ sig1 & HW_PRTY_ASSERT_SET_1, par_num);
+ par_num = bnx2x_print_blocks_with_parity2(
+ sig2 & HW_PRTY_ASSERT_SET_2, par_num);
+ par_num = bnx2x_print_blocks_with_parity3(
+ sig3 & HW_PRTY_ASSERT_SET_3, par_num);
+ printk("\n");
+ return true;
+ } else
+ return false;
+}
+
+static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
{
struct attn_route attn;
- struct attn_route group_mask;
+ int port = BP_PORT(bp);
+
+ attn.sig[0] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
+ port*4);
+ attn.sig[1] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
+ port*4);
+ attn.sig[2] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
+ port*4);
+ attn.sig[3] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
+ port*4);
+
+ return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
+ attn.sig[3]);
+}
+
+static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
+{
+ struct attn_route attn, *group_mask;
int port = BP_PORT(bp);
int index;
u32 reg_addr;
@@ -3118,6 +3472,19 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
try to handle this event */
bnx2x_acquire_alr(bp);
+ if (bnx2x_chk_parity_attn(bp)) {
+ bp->recovery_state = BNX2X_RECOVERY_INIT;
+ bnx2x_set_reset_in_progress(bp);
+ schedule_delayed_work(&bp->reset_task, 0);
+ /* Disable HW interrupts */
+ bnx2x_int_disable(bp);
+ bnx2x_release_alr(bp);
+ /* In case of parity errors don't handle attentions so that
+ * other function would "see" parity errors.
+ */
+ return;
+ }
+
attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
@@ -3127,28 +3494,20 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
if (deasserted & (1 << index)) {
- group_mask = bp->attn_group[index];
+ group_mask = &bp->attn_group[index];
DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
- index, group_mask.sig[0], group_mask.sig[1],
- group_mask.sig[2], group_mask.sig[3]);
+ index, group_mask->sig[0], group_mask->sig[1],
+ group_mask->sig[2], group_mask->sig[3]);
bnx2x_attn_int_deasserted3(bp,
- attn.sig[3] & group_mask.sig[3]);
+ attn.sig[3] & group_mask->sig[3]);
bnx2x_attn_int_deasserted1(bp,
- attn.sig[1] & group_mask.sig[1]);
+ attn.sig[1] & group_mask->sig[1]);
bnx2x_attn_int_deasserted2(bp,
- attn.sig[2] & group_mask.sig[2]);
+ attn.sig[2] & group_mask->sig[2]);
bnx2x_attn_int_deasserted0(bp,
- attn.sig[0] & group_mask.sig[0]);
-
- if ((attn.sig[0] & group_mask.sig[0] &
- HW_PRTY_ASSERT_SET_0) ||
- (attn.sig[1] & group_mask.sig[1] &
- HW_PRTY_ASSERT_SET_1) ||
- (attn.sig[2] & group_mask.sig[2] &
- HW_PRTY_ASSERT_SET_2))
- BNX2X_ERR("FATAL HW block parity attention\n");
+ attn.sig[0] & group_mask->sig[0]);
}
}
@@ -3172,7 +3531,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
aeu_mask, deasserted);
- aeu_mask |= (deasserted & 0xff);
+ aeu_mask |= (deasserted & 0x3ff);
DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
REG_WR(bp, reg_addr, aeu_mask);
@@ -3216,7 +3575,6 @@ static void bnx2x_sp_task(struct work_struct *work)
struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
u16 status;
-
/* Return here if interrupt is disabled */
if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
@@ -3227,11 +3585,23 @@ static void bnx2x_sp_task(struct work_struct *work)
/* if (status == 0) */
/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
- DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
+ DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
/* HW attentions */
- if (status & 0x1)
+ if (status & 0x1) {
bnx2x_attn_int(bp);
+ status &= ~0x1;
+ }
+
+ /* CStorm events: STAT_QUERY */
+ if (status & 0x2) {
+ DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
+ status &= ~0x2;
+ }
+
+ if (unlikely(status))
+ DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
+ status);
bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
IGU_INT_NOP, 1);
@@ -3243,7 +3613,6 @@ static void bnx2x_sp_task(struct work_struct *work)
IGU_INT_NOP, 1);
bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
IGU_INT_ENABLE, 1);
-
}
static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
@@ -3947,7 +4316,6 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
u32 lo;
u32 hi;
} diff;
- u32 nig_timer_max;
if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
bnx2x_bmac_stats_update(bp);
@@ -3978,10 +4346,14 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
pstats->host_port_stats_start = ++pstats->host_port_stats_end;
- nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
- if (nig_timer_max != estats->nig_timer_max) {
- estats->nig_timer_max = nig_timer_max;
- BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
+ if (!BP_NOMCP(bp)) {
+ u32 nig_timer_max =
+ SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
+ if (nig_timer_max != estats->nig_timer_max) {
+ estats->nig_timer_max = nig_timer_max;
+ BNX2X_ERR("NIG timer max (%u)\n",
+ estats->nig_timer_max);
+ }
}
return 0;
@@ -4025,21 +4397,21 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bp->stats_counter) {
DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
- " xstorm counter (%d) != stats_counter (%d)\n",
+ " xstorm counter (0x%x) != stats_counter (0x%x)\n",
i, xclient->stats_counter, bp->stats_counter);
return -1;
}
if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bp->stats_counter) {
DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
- " tstorm counter (%d) != stats_counter (%d)\n",
+ " tstorm counter (0x%x) != stats_counter (0x%x)\n",
i, tclient->stats_counter, bp->stats_counter);
return -2;
}
if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
bp->stats_counter) {
DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
- " ustorm counter (%d) != stats_counter (%d)\n",
+ " ustorm counter (0x%x) != stats_counter (0x%x)\n",
i, uclient->stats_counter, bp->stats_counter);
return -4;
}
@@ -4059,6 +4431,21 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
qstats->total_bytes_received_lo,
le32_to_cpu(tclient->rcv_unicast_bytes.lo));
+ SUB_64(qstats->total_bytes_received_hi,
+ le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
+ qstats->total_bytes_received_lo,
+ le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
+
+ SUB_64(qstats->total_bytes_received_hi,
+ le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
+ qstats->total_bytes_received_lo,
+ le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
+
+ SUB_64(qstats->total_bytes_received_hi,
+ le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
+ qstats->total_bytes_received_lo,
+ le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
+
qstats->valid_bytes_received_hi =
qstats->total_bytes_received_hi;
qstats->valid_bytes_received_lo =
@@ -4307,47 +4694,43 @@ static void bnx2x_stats_update(struct bnx2x *bp)
bnx2x_drv_stats_update(bp);
if (netif_msg_timer(bp)) {
- struct bnx2x_fastpath *fp0_rx = bp->fp;
- struct bnx2x_fastpath *fp0_tx = bp->fp;
- struct tstorm_per_client_stats *old_tclient =
- &bp->fp->old_tclient;
- struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
struct bnx2x_eth_stats *estats = &bp->eth_stats;
- struct net_device_stats *nstats = &bp->dev->stats;
int i;
- netdev_printk(KERN_DEBUG, bp->dev, "\n");
- printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
- " tx pkt (%lx)\n",
- bnx2x_tx_avail(fp0_tx),
- le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
- printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
- " rx pkt (%lx)\n",
- (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
- fp0_rx->rx_comp_cons),
- le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
- printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
- "brb truncate %u\n",
- (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
- qstats->driver_xoff,
+ printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n",
+ bp->dev->name,
estats->brb_drop_lo, estats->brb_truncate_lo);
- printk(KERN_DEBUG "tstats: checksum_discard %u "
- "packets_too_big_discard %lu no_buff_discard %lu "
- "mac_discard %u mac_filter_discard %u "
- "xxovrflow_discard %u brb_truncate_discard %u "
- "ttl0_discard %u\n",
- le32_to_cpu(old_tclient->checksum_discard),
- bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
- bnx2x_hilo(&qstats->no_buff_discard_hi),
- estats->mac_discard, estats->mac_filter_discard,
- estats->xxoverflow_discard, estats->brb_truncate_discard,
- le32_to_cpu(old_tclient->ttl0_discard));
for_each_queue(bp, i) {
- printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
- bnx2x_fp(bp, i, tx_pkt),
- bnx2x_fp(bp, i, rx_pkt),
- bnx2x_fp(bp, i, rx_calls));
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+
+ printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)"
+ " rx pkt(%lu) rx calls(%lu %lu)\n",
+ fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
+ fp->rx_comp_cons),
+ le16_to_cpu(*fp->rx_cons_sb),
+ bnx2x_hilo(&qstats->
+ total_unicast_packets_received_hi),
+ fp->rx_calls, fp->rx_pkt);
+ }
+
+ for_each_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+ struct netdev_queue *txq =
+ netdev_get_tx_queue(bp->dev, i);
+
+ printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)"
+ " tx pkt(%lu) tx calls (%lu)"
+ " %s (Xoff events %u)\n",
+ fp->name, bnx2x_tx_avail(fp),
+ le16_to_cpu(*fp->tx_cons_sb),
+ bnx2x_hilo(&qstats->
+ total_unicast_packets_transmitted_hi),
+ fp->tx_pkt,
+ (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
+ qstats->driver_xoff);
}
}
@@ -4468,6 +4851,9 @@ static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
{
enum bnx2x_stats_state state = bp->stats_state;
+ if (unlikely(bp->panic))
+ return;
+
bnx2x_stats_stm[state][event].action(bp);
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
@@ -4940,9 +5326,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
}
if (fp->tpa_state[i] == BNX2X_TPA_START)
- pci_unmap_single(bp->pdev,
- pci_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ bp->rx_buf_size, DMA_FROM_DEVICE);
dev_kfree_skb(skb);
rx_buf->skb = NULL;
@@ -4978,7 +5364,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
fp->disable_tpa = 1;
break;
}
- pci_unmap_addr_set((struct sw_rx_bd *)
+ dma_unmap_addr_set((struct sw_rx_bd *)
&bp->fp->tpa_pool[i],
mapping, 0);
fp->tpa_state[i] = BNX2X_TPA_STOP;
@@ -5072,8 +5458,8 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
fp->rx_bd_prod = ring_prod;
/* must not have more available CQEs than BDs */
- fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
- cqe_ring_prod);
+ fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
+ cqe_ring_prod);
fp->rx_pkt = fp->rx_calls = 0;
/* Warning!
@@ -5179,8 +5565,8 @@ static void bnx2x_init_context(struct bnx2x *bp)
context->ustorm_st_context.common.flags |=
USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
context->ustorm_st_context.common.sge_buff_size =
- (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
- (u32)0xffff);
+ (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
+ 0xffff);
context->ustorm_st_context.common.sge_page_base_hi =
U64_HI(fp->rx_sge_mapping);
context->ustorm_st_context.common.sge_page_base_lo =
@@ -5369,10 +5755,10 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
u32 offset;
u16 max_agg_size;
- if (is_multi(bp)) {
- tstorm_config.config_flags = MULTI_FLAGS(bp);
+ tstorm_config.config_flags = RSS_FLAGS(bp);
+
+ if (is_multi(bp))
tstorm_config.rss_result_mask = MULTI_MASK;
- }
/* Enable TPA if needed */
if (bp->flags & TPA_ENABLE_FLAG)
@@ -5477,10 +5863,8 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
}
/* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
- max_agg_size =
- min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
- SGE_PAGE_SIZE * PAGES_PER_SGE),
- (u32)0xffff);
+ max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
+ SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
@@ -5566,7 +5950,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
}
- /* Store it to internal memory */
+ /* Store cmng structures to internal memory */
if (bp->port.pmf)
for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
REG_WR(bp, BAR_XSTRORM_INTMEM +
@@ -5658,8 +6042,8 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
static int bnx2x_gunzip_init(struct bnx2x *bp)
{
- bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
- &bp->gunzip_mapping);
+ bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
+ &bp->gunzip_mapping, GFP_KERNEL);
if (bp->gunzip_buf == NULL)
goto gunzip_nomem1;
@@ -5679,12 +6063,13 @@ gunzip_nomem3:
bp->strm = NULL;
gunzip_nomem2:
- pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
- bp->gunzip_mapping);
+ dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
+ bp->gunzip_mapping);
bp->gunzip_buf = NULL;
gunzip_nomem1:
- netdev_err(bp->dev, "Cannot allocate firmware buffer for un-compression\n");
+ netdev_err(bp->dev, "Cannot allocate firmware buffer for"
+ " un-compression\n");
return -ENOMEM;
}
@@ -5696,8 +6081,8 @@ static void bnx2x_gunzip_end(struct bnx2x *bp)
bp->strm = NULL;
if (bp->gunzip_buf) {
- pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
- bp->gunzip_mapping);
+ dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
+ bp->gunzip_mapping);
bp->gunzip_buf = NULL;
}
}
@@ -5735,8 +6120,9 @@ static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
if (bp->gunzip_outlen & 0x3)
- netdev_err(bp->dev, "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
- bp->gunzip_outlen);
+ netdev_err(bp->dev, "Firmware decompression error:"
+ " gunzip_outlen (%d) not aligned\n",
+ bp->gunzip_outlen);
bp->gunzip_outlen >>= 2;
zlib_inflateEnd(bp->strm);
@@ -5962,6 +6348,50 @@ static void enable_blocks_attention(struct bnx2x *bp)
REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
}
+static const struct {
+ u32 addr;
+ u32 mask;
+} bnx2x_parity_mask[] = {
+ {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
+ {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
+ {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
+ {HC_REG_HC_PRTY_MASK, 0xffffffff},
+ {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
+ {QM_REG_QM_PRTY_MASK, 0x0},
+ {DORQ_REG_DORQ_PRTY_MASK, 0x0},
+ {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
+ {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
+ {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
+ {CDU_REG_CDU_PRTY_MASK, 0x0},
+ {CFC_REG_CFC_PRTY_MASK, 0x0},
+ {DBG_REG_DBG_PRTY_MASK, 0x0},
+ {DMAE_REG_DMAE_PRTY_MASK, 0x0},
+ {BRB1_REG_BRB1_PRTY_MASK, 0x0},
+ {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
+ {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
+ {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
+ {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
+ {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
+ {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
+ {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
+ {USEM_REG_USEM_PRTY_MASK_0, 0x0},
+ {USEM_REG_USEM_PRTY_MASK_1, 0x0},
+ {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
+ {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
+ {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
+ {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
+};
+
+static void enable_blocks_parity(struct bnx2x *bp)
+{
+ int i, mask_arr_len =
+ sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
+
+ for (i = 0; i < mask_arr_len; i++)
+ REG_WR(bp, bnx2x_parity_mask[i].addr,
+ bnx2x_parity_mask[i].mask);
+}
+
static void bnx2x_reset_common(struct bnx2x *bp)
{
@@ -5992,10 +6422,14 @@ static void bnx2x_init_pxp(struct bnx2x *bp)
static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
{
+ int is_required;
u32 val;
- u8 port;
- u8 is_required = 0;
+ int port;
+
+ if (BP_NOMCP(bp))
+ return;
+ is_required = 0;
val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
SHARED_HW_CFG_FAN_FAILURE_MASK;
@@ -6034,7 +6468,7 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
/* set to active low mode */
val = REG_RD(bp, MISC_REG_SPIO_INT);
val |= ((1 << MISC_REGISTERS_SPIO_5) <<
- MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
+ MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
REG_WR(bp, MISC_REG_SPIO_INT, val);
/* enable interrupt to signal the IGU */
@@ -6200,10 +6634,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
REG_WR(bp, SRC_REG_SOFT_RST, 1);
- for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
- REG_WR(bp, i, 0xc0cac01a);
- /* TODO: replace with something meaningful */
- }
+ for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
+ REG_WR(bp, i, random32());
bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
#ifdef BCM_CNIC
REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
@@ -6221,7 +6653,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
if (sizeof(union cdu_context) != 1024)
/* we currently assume that a context is 1024 bytes */
- pr_alert("please adjust the size of cdu_context(%ld)\n",
+ dev_alert(&bp->pdev->dev, "please adjust the size "
+ "of cdu_context(%ld)\n",
(long)sizeof(union cdu_context));
bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
@@ -6305,6 +6738,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
enable_blocks_attention(bp);
+ if (CHIP_PARITY_SUPPORTED(bp))
+ enable_blocks_parity(bp);
if (!BP_NOMCP(bp)) {
bnx2x_acquire_phy_lock(bp);
@@ -6323,7 +6758,7 @@ static int bnx2x_init_port(struct bnx2x *bp)
u32 low, high;
u32 val;
- DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
+ DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
@@ -6342,6 +6777,7 @@ static int bnx2x_init_port(struct bnx2x *bp)
REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
#endif
+
bnx2x_init_block(bp, DQ_BLOCK, init_stage);
bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
@@ -6534,7 +6970,7 @@ static int bnx2x_init_func(struct bnx2x *bp)
u32 addr, val;
int i;
- DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
+ DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
/* set MSI reconfigure capability */
addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
@@ -6692,7 +7128,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
#define BNX2X_PCI_FREE(x, y, size) \
do { \
if (x) { \
- pci_free_consistent(bp->pdev, size, x, y); \
+ dma_free_coherent(&bp->pdev->dev, size, x, y); \
x = NULL; \
y = 0; \
} \
@@ -6773,7 +7209,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
#define BNX2X_PCI_ALLOC(x, y, size) \
do { \
- x = pci_alloc_consistent(bp->pdev, size, y); \
+ x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
if (x == NULL) \
goto alloc_mem_err; \
memset(x, 0, size); \
@@ -6906,9 +7342,9 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
if (skb == NULL)
continue;
- pci_unmap_single(bp->pdev,
- pci_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ bp->rx_buf_size, DMA_FROM_DEVICE);
rx_buf->skb = NULL;
dev_kfree_skb(skb);
@@ -6987,7 +7423,31 @@ static int bnx2x_enable_msix(struct bnx2x *bp)
rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
BNX2X_NUM_QUEUES(bp) + offset);
- if (rc) {
+
+ /*
+ * reconfigure number of tx/rx queues according to available
+ * MSI-X vectors
+ */
+ if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
+ /* vectors available for FP */
+ int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
+
+ DP(NETIF_MSG_IFUP,
+ "Trying to use less MSI-X vectors: %d\n", rc);
+
+ rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
+
+ if (rc) {
+ DP(NETIF_MSG_IFUP,
+ "MSI-X is not attainable rc %d\n", rc);
+ return rc;
+ }
+
+ bp->num_queues = min(bp->num_queues, fp_vec);
+
+ DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
+ bp->num_queues);
+ } else if (rc) {
DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
return rc;
}
@@ -7028,10 +7488,11 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
}
i = BNX2X_NUM_QUEUES(bp);
- netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
- bp->msix_table[0].vector,
- 0, bp->msix_table[offset].vector,
- i - 1, bp->msix_table[offset + i - 1].vector);
+ netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
+ " ... fp[%d] %d\n",
+ bp->msix_table[0].vector,
+ 0, bp->msix_table[offset].vector,
+ i - 1, bp->msix_table[offset + i - 1].vector);
return 0;
}
@@ -7409,8 +7870,6 @@ static int bnx2x_set_num_queues(struct bnx2x *bp)
bp->num_queues = 1;
DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
break;
-
- case INT_MODE_MSIX:
default:
/* Set number of queues according to bp->multi_mode value */
bnx2x_set_num_queues_msix(bp);
@@ -7656,6 +8115,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (bp->state == BNX2X_STATE_OPEN)
bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
#endif
+ bnx2x_inc_load_cnt(bp);
return 0;
@@ -7843,33 +8303,12 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
}
}
-/* must be called with rtnl_lock */
-static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
+static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
{
int port = BP_PORT(bp);
u32 reset_code = 0;
int i, cnt, rc;
-#ifdef BCM_CNIC
- bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
-#endif
- bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
-
- /* Set "drop all" */
- bp->rx_mode = BNX2X_RX_MODE_NONE;
- bnx2x_set_storm_rx_mode(bp);
-
- /* Disable HW interrupts, NAPI and Tx */
- bnx2x_netif_stop(bp, 1);
-
- del_timer_sync(&bp->timer);
- SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
- (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
- bnx2x_stats_handle(bp, STATS_EVENT_STOP);
-
- /* Release IRQs */
- bnx2x_free_irq(bp, false);
-
/* Wait until tx fastpath tasks complete */
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
@@ -8010,6 +8449,70 @@ unload_error:
if (!BP_NOMCP(bp))
bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+}
+
+static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
+{
+ u32 val;
+
+ DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
+
+ if (CHIP_IS_E1(bp)) {
+ int port = BP_PORT(bp);
+ u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+ MISC_REG_AEU_MASK_ATTN_FUNC_0;
+
+ val = REG_RD(bp, addr);
+ val &= ~(0x300);
+ REG_WR(bp, addr, val);
+ } else if (CHIP_IS_E1H(bp)) {
+ val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
+ val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
+ MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
+ }
+}
+
+/* must be called with rtnl_lock */
+static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
+{
+ int i;
+
+ if (bp->state == BNX2X_STATE_CLOSED) {
+ /* Interface has been removed - nothing to recover */
+ bp->recovery_state = BNX2X_RECOVERY_DONE;
+ bp->is_leader = 0;
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
+ smp_wmb();
+
+ return -EINVAL;
+ }
+
+#ifdef BCM_CNIC
+ bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
+#endif
+ bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
+
+ /* Set "drop all" */
+ bp->rx_mode = BNX2X_RX_MODE_NONE;
+ bnx2x_set_storm_rx_mode(bp);
+
+ /* Disable HW interrupts, NAPI and Tx */
+ bnx2x_netif_stop(bp, 1);
+ netif_carrier_off(bp->dev);
+
+ del_timer_sync(&bp->timer);
+ SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
+ (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+ /* Release IRQs */
+ bnx2x_free_irq(bp, false);
+
+ /* Cleanup the chip if needed */
+ if (unload_mode != UNLOAD_RECOVERY)
+ bnx2x_chip_cleanup(bp, unload_mode);
+
bp->port.pmf = 0;
/* Free SKBs, SGEs, TPA pool and driver internals */
@@ -8022,19 +8525,448 @@ unload_error:
bp->state = BNX2X_STATE_CLOSED;
- netif_carrier_off(bp->dev);
+ /* The last driver must disable a "close the gate" if there is no
+ * parity attention or "process kill" pending.
+ */
+ if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
+ bnx2x_reset_is_done(bp))
+ bnx2x_disable_close_the_gate(bp);
+
+ /* Reset MCP mail box sequence if there is on going recovery */
+ if (unload_mode == UNLOAD_RECOVERY)
+ bp->fw_seq = 0;
+
+ return 0;
+}
+
+/* Close gates #2, #3 and #4: */
+static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
+{
+ u32 val, addr;
+
+ /* Gates #2 and #4a are closed/opened for "not E1" only */
+ if (!CHIP_IS_E1(bp)) {
+ /* #4 */
+ val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
+ REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
+ close ? (val | 0x1) : (val & (~(u32)1)));
+ /* #2 */
+ val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
+ REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
+ close ? (val | 0x1) : (val & (~(u32)1)));
+ }
+
+ /* #3 */
+ addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+ val = REG_RD(bp, addr);
+ REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
+
+ DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
+ close ? "closing" : "opening");
+ mmiowb();
+}
+
+#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
+
+static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
+{
+ /* Do some magic... */
+ u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
+ *magic_val = val & SHARED_MF_CLP_MAGIC;
+ MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
+}
+
+/* Restore the value of the `magic' bit.
+ *
+ * @param pdev Device handle.
+ * @param magic_val Old value of the `magic' bit.
+ */
+static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
+{
+ /* Restore the `magic' bit value... */
+ /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
+ SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
+ (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
+ u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
+ MF_CFG_WR(bp, shared_mf_config.clp_mb,
+ (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
+}
+
+/* Prepares for MCP reset: takes care of CLP configurations.
+ *
+ * @param bp
+ * @param magic_val Old value of 'magic' bit.
+ */
+static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
+{
+ u32 shmem;
+ u32 validity_offset;
+
+ DP(NETIF_MSG_HW, "Starting\n");
+
+ /* Set `magic' bit in order to save MF config */
+ if (!CHIP_IS_E1(bp))
+ bnx2x_clp_reset_prep(bp, magic_val);
+
+ /* Get shmem offset */
+ shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+ validity_offset = offsetof(struct shmem_region, validity_map[0]);
+
+ /* Clear validity map flags */
+ if (shmem > 0)
+ REG_WR(bp, shmem + validity_offset, 0);
+}
+
+#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
+#define MCP_ONE_TIMEOUT 100 /* 100 ms */
+
+/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
+ * depending on the HW type.
+ *
+ * @param bp
+ */
+static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
+{
+ /* special handling for emulation and FPGA,
+ wait 10 times longer */
+ if (CHIP_REV_IS_SLOW(bp))
+ msleep(MCP_ONE_TIMEOUT*10);
+ else
+ msleep(MCP_ONE_TIMEOUT);
+}
+
+static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
+{
+ u32 shmem, cnt, validity_offset, val;
+ int rc = 0;
+
+ msleep(100);
+
+ /* Get shmem offset */
+ shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+ if (shmem == 0) {
+ BNX2X_ERR("Shmem 0 return failure\n");
+ rc = -ENOTTY;
+ goto exit_lbl;
+ }
+
+ validity_offset = offsetof(struct shmem_region, validity_map[0]);
+
+ /* Wait for MCP to come up */
+ for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
+ /* TBD: its best to check validity map of last port.
+ * currently checks on port 0.
+ */
+ val = REG_RD(bp, shmem + validity_offset);
+ DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
+ shmem + validity_offset, val);
+
+ /* check that shared memory is valid. */
+ if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+ == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+ break;
+
+ bnx2x_mcp_wait_one(bp);
+ }
+
+ DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
+
+ /* Check that shared memory is valid. This indicates that MCP is up. */
+ if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
+ (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
+ BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
+ rc = -ENOTTY;
+ goto exit_lbl;
+ }
+
+exit_lbl:
+ /* Restore the `magic' bit value */
+ if (!CHIP_IS_E1(bp))
+ bnx2x_clp_reset_done(bp, magic_val);
+
+ return rc;
+}
+
+static void bnx2x_pxp_prep(struct bnx2x *bp)
+{
+ if (!CHIP_IS_E1(bp)) {
+ REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
+ REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
+ REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
+ mmiowb();
+ }
+}
+
+/*
+ * Reset the whole chip except for:
+ * - PCIE core
+ * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
+ * one reset bit)
+ * - IGU
+ * - MISC (including AEU)
+ * - GRC
+ * - RBCN, RBCP
+ */
+static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
+{
+ u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
+
+ not_reset_mask1 =
+ MISC_REGISTERS_RESET_REG_1_RST_HC |
+ MISC_REGISTERS_RESET_REG_1_RST_PXPV |
+ MISC_REGISTERS_RESET_REG_1_RST_PXP;
+
+ not_reset_mask2 =
+ MISC_REGISTERS_RESET_REG_2_RST_MDIO |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_RBCN |
+ MISC_REGISTERS_RESET_REG_2_RST_GRC |
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
+
+ reset_mask1 = 0xffffffff;
+
+ if (CHIP_IS_E1(bp))
+ reset_mask2 = 0xffff;
+ else
+ reset_mask2 = 0x1ffff;
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+ reset_mask1 & (~not_reset_mask1));
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ reset_mask2 & (~not_reset_mask2));
+
+ barrier();
+ mmiowb();
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
+ mmiowb();
+}
+
+static int bnx2x_process_kill(struct bnx2x *bp)
+{
+ int cnt = 1000;
+ u32 val = 0;
+ u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
+
+
+ /* Empty the Tetris buffer, wait for 1s */
+ do {
+ sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
+ blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
+ port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
+ port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
+ pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
+ if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
+ ((port_is_idle_0 & 0x1) == 0x1) &&
+ ((port_is_idle_1 & 0x1) == 0x1) &&
+ (pgl_exp_rom2 == 0xffffffff))
+ break;
+ msleep(1);
+ } while (cnt-- > 0);
+
+ if (cnt <= 0) {
+ DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
+ " are still"
+ " outstanding read requests after 1s!\n");
+ DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
+ " port_is_idle_0=0x%08x,"
+ " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
+ sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
+ pgl_exp_rom2);
+ return -EAGAIN;
+ }
+
+ barrier();
+
+ /* Close gates #2, #3 and #4 */
+ bnx2x_set_234_gates(bp, true);
+
+ /* TBD: Indicate that "process kill" is in progress to MCP */
+
+ /* Clear "unprepared" bit */
+ REG_WR(bp, MISC_REG_UNPREPARED, 0);
+ barrier();
+
+ /* Make sure all is written to the chip before the reset */
+ mmiowb();
+
+ /* Wait for 1ms to empty GLUE and PCI-E core queues,
+ * PSWHST, GRC and PSWRD Tetris buffer.
+ */
+ msleep(1);
+
+ /* Prepare to chip reset: */
+ /* MCP */
+ bnx2x_reset_mcp_prep(bp, &val);
+
+ /* PXP */
+ bnx2x_pxp_prep(bp);
+ barrier();
+
+ /* reset the chip */
+ bnx2x_process_kill_chip_reset(bp);
+ barrier();
+
+ /* Recover after reset: */
+ /* MCP */
+ if (bnx2x_reset_mcp_comp(bp, val))
+ return -EAGAIN;
+
+ /* PXP */
+ bnx2x_pxp_prep(bp);
+
+ /* Open the gates #2, #3 and #4 */
+ bnx2x_set_234_gates(bp, false);
+
+ /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
+ * reset state, re-enable attentions. */
return 0;
}
+static int bnx2x_leader_reset(struct bnx2x *bp)
+{
+ int rc = 0;
+ /* Try to recover after the failure */
+ if (bnx2x_process_kill(bp)) {
+ printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
+ bp->dev->name);
+ rc = -EAGAIN;
+ goto exit_leader_reset;
+ }
+
+ /* Clear "reset is in progress" bit and update the driver state */
+ bnx2x_set_reset_done(bp);
+ bp->recovery_state = BNX2X_RECOVERY_DONE;
+
+exit_leader_reset:
+ bp->is_leader = 0;
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
+ smp_wmb();
+ return rc;
+}
+
+static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
+
+/* Assumption: runs under rtnl lock. This together with the fact
+ * that it's called only from bnx2x_reset_task() ensure that it
+ * will never be called when netif_running(bp->dev) is false.
+ */
+static void bnx2x_parity_recover(struct bnx2x *bp)
+{
+ DP(NETIF_MSG_HW, "Handling parity\n");
+ while (1) {
+ switch (bp->recovery_state) {
+ case BNX2X_RECOVERY_INIT:
+ DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
+ /* Try to get a LEADER_LOCK HW lock */
+ if (bnx2x_trylock_hw_lock(bp,
+ HW_LOCK_RESOURCE_RESERVED_08))
+ bp->is_leader = 1;
+
+ /* Stop the driver */
+ /* If interface has been removed - break */
+ if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
+ return;
+
+ bp->recovery_state = BNX2X_RECOVERY_WAIT;
+ /* Ensure "is_leader" and "recovery_state"
+ * update values are seen on other CPUs
+ */
+ smp_wmb();
+ break;
+
+ case BNX2X_RECOVERY_WAIT:
+ DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
+ if (bp->is_leader) {
+ u32 load_counter = bnx2x_get_load_cnt(bp);
+ if (load_counter) {
+ /* Wait until all other functions get
+ * down.
+ */
+ schedule_delayed_work(&bp->reset_task,
+ HZ/10);
+ return;
+ } else {
+ /* If all other functions got down -
+ * try to bring the chip back to
+ * normal. In any case it's an exit
+ * point for a leader.
+ */
+ if (bnx2x_leader_reset(bp) ||
+ bnx2x_nic_load(bp, LOAD_NORMAL)) {
+ printk(KERN_ERR"%s: Recovery "
+ "has failed. Power cycle is "
+ "needed.\n", bp->dev->name);
+ /* Disconnect this device */
+ netif_device_detach(bp->dev);
+ /* Block ifup for all function
+ * of this ASIC until
+ * "process kill" or power
+ * cycle.
+ */
+ bnx2x_set_reset_in_progress(bp);
+ /* Shut down the power */
+ bnx2x_set_power_state(bp,
+ PCI_D3hot);
+ return;
+ }
+
+ return;
+ }
+ } else { /* non-leader */
+ if (!bnx2x_reset_is_done(bp)) {
+ /* Try to get a LEADER_LOCK HW lock as
+ * long as a former leader may have
+ * been unloaded by the user or
+ * released a leadership by another
+ * reason.
+ */
+ if (bnx2x_trylock_hw_lock(bp,
+ HW_LOCK_RESOURCE_RESERVED_08)) {
+ /* I'm a leader now! Restart a
+ * switch case.
+ */
+ bp->is_leader = 1;
+ break;
+ }
+
+ schedule_delayed_work(&bp->reset_task,
+ HZ/10);
+ return;
+
+ } else { /* A leader has completed
+ * the "process kill". It's an exit
+ * point for a non-leader.
+ */
+ bnx2x_nic_load(bp, LOAD_NORMAL);
+ bp->recovery_state =
+ BNX2X_RECOVERY_DONE;
+ smp_wmb();
+ return;
+ }
+ }
+ default:
+ return;
+ }
+ }
+}
+
+/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
+ * scheduled on a general queue in order to prevent a dead lock.
+ */
static void bnx2x_reset_task(struct work_struct *work)
{
- struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
+ struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
#ifdef BNX2X_STOP_ON_ERROR
BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
" so reset not done to allow debug dump,\n"
- " you will need to reboot when done\n");
+ KERN_ERR " you will need to reboot when done\n");
return;
#endif
@@ -8043,8 +8975,12 @@ static void bnx2x_reset_task(struct work_struct *work)
if (!netif_running(bp->dev))
goto reset_task_exit;
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
- bnx2x_nic_load(bp, LOAD_NORMAL);
+ if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
+ bnx2x_parity_recover(bp);
+ else {
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ bnx2x_nic_load(bp, LOAD_NORMAL);
+ }
reset_task_exit:
rtnl_unlock();
@@ -8264,7 +9200,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
!= (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
- BNX2X_ERR("BAD MCP validity signature\n");
+ BNX2X_ERROR("BAD MCP validity signature\n");
bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
@@ -8288,8 +9224,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
if (val < BNX2X_BC_VER) {
/* for now only warn
* later we might need to enforce this */
- BNX2X_ERR("This driver needs bc_ver %X but found %X,"
- " please upgrade BC\n", BNX2X_BC_VER, val);
+ BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
+ "please upgrade BC\n", BNX2X_BC_VER, val);
}
bp->link_params.feature_config_flags |=
(val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
@@ -8310,7 +9246,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
- pr_info("part number %X-%X-%X-%X\n", val, val2, val3, val4);
+ dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
+ val, val2, val3, val4);
}
static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
@@ -8588,11 +9525,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising = (ADVERTISED_10baseT_Full |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ bp->port.link_config,
+ bp->link_params.speed_cap_mask);
return;
}
break;
@@ -8604,11 +9541,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising = (ADVERTISED_10baseT_Half |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ bp->port.link_config,
+ bp->link_params.speed_cap_mask);
return;
}
break;
@@ -8619,11 +9556,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising = (ADVERTISED_100baseT_Full |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ bp->port.link_config,
+ bp->link_params.speed_cap_mask);
return;
}
break;
@@ -8635,11 +9572,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising = (ADVERTISED_100baseT_Half |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ bp->port.link_config,
+ bp->link_params.speed_cap_mask);
return;
}
break;
@@ -8650,11 +9587,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising = (ADVERTISED_1000baseT_Full |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ bp->port.link_config,
+ bp->link_params.speed_cap_mask);
return;
}
break;
@@ -8665,11 +9602,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising = (ADVERTISED_2500baseX_Full |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ bp->port.link_config,
+ bp->link_params.speed_cap_mask);
return;
}
break;
@@ -8682,19 +9619,19 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising = (ADVERTISED_10000baseT_Full |
ADVERTISED_FIBRE);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ bp->port.link_config,
+ bp->link_params.speed_cap_mask);
return;
}
break;
default:
- BNX2X_ERR("NVRAM config error. "
- "BAD link speed link_config 0x%x\n",
- bp->port.link_config);
+ BNX2X_ERROR("NVRAM config error. "
+ "BAD link speed link_config 0x%x\n",
+ bp->port.link_config);
bp->link_params.req_line_speed = SPEED_AUTO_NEG;
bp->port.advertising = bp->port.supported;
break;
@@ -8823,7 +9760,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bp->e1hov = 0;
bp->e1hmf = 0;
- if (CHIP_IS_E1H(bp)) {
+ if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
bp->mf_config =
SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
@@ -8844,14 +9781,14 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
"(0x%04x)\n",
func, bp->e1hov, bp->e1hov);
} else {
- BNX2X_ERR("!!! No valid E1HOV for func %d,"
- " aborting\n", func);
+ BNX2X_ERROR("No valid E1HOV for func %d,"
+ " aborting\n", func);
rc = -EPERM;
}
} else {
if (BP_E1HVN(bp)) {
- BNX2X_ERR("!!! VN %d in single function mode,"
- " aborting\n", BP_E1HVN(bp));
+ BNX2X_ERROR("VN %d in single function mode,"
+ " aborting\n", BP_E1HVN(bp));
rc = -EPERM;
}
}
@@ -8887,7 +9824,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
if (BP_NOMCP(bp)) {
/* only supposed to happen on emulation/FPGA */
- BNX2X_ERR("warning random MAC workaround active\n");
+ BNX2X_ERROR("warning: random MAC workaround active\n");
random_ether_addr(bp->dev->dev_addr);
memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
}
@@ -8895,6 +9832,70 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
return rc;
}
+static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
+{
+ int cnt, i, block_end, rodi;
+ char vpd_data[BNX2X_VPD_LEN+1];
+ char str_id_reg[VENDOR_ID_LEN+1];
+ char str_id_cap[VENDOR_ID_LEN+1];
+ u8 len;
+
+ cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
+ memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
+
+ if (cnt < BNX2X_VPD_LEN)
+ goto out_not_found;
+
+ i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
+ PCI_VPD_LRDT_RO_DATA);
+ if (i < 0)
+ goto out_not_found;
+
+
+ block_end = i + PCI_VPD_LRDT_TAG_SIZE +
+ pci_vpd_lrdt_size(&vpd_data[i]);
+
+ i += PCI_VPD_LRDT_TAG_SIZE;
+
+ if (block_end > BNX2X_VPD_LEN)
+ goto out_not_found;
+
+ rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
+ PCI_VPD_RO_KEYWORD_MFR_ID);
+ if (rodi < 0)
+ goto out_not_found;
+
+ len = pci_vpd_info_field_size(&vpd_data[rodi]);
+
+ if (len != VENDOR_ID_LEN)
+ goto out_not_found;
+
+ rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+ /* vendor specific info */
+ snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
+ snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
+ if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
+ !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
+
+ rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
+ PCI_VPD_RO_KEYWORD_VENDOR0);
+ if (rodi >= 0) {
+ len = pci_vpd_info_field_size(&vpd_data[rodi]);
+
+ rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+ if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
+ memcpy(bp->fw_ver, &vpd_data[rodi], len);
+ bp->fw_ver[len] = ' ';
+ }
+ }
+ return;
+ }
+out_not_found:
+ return;
+}
+
static int __devinit bnx2x_init_bp(struct bnx2x *bp)
{
int func = BP_FUNC(bp);
@@ -8912,29 +9913,34 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
#endif
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
- INIT_WORK(&bp->reset_task, bnx2x_reset_task);
+ INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
rc = bnx2x_get_hwinfo(bp);
+ bnx2x_read_fwinfo(bp);
/* need to reset chip if undi was active */
if (!BP_NOMCP(bp))
bnx2x_undi_unload(bp);
if (CHIP_REV_IS_FPGA(bp))
- pr_err("FPGA detected\n");
+ dev_err(&bp->pdev->dev, "FPGA detected\n");
if (BP_NOMCP(bp) && (func == 0))
- pr_err("MCP disabled, must load devices in order!\n");
+ dev_err(&bp->pdev->dev, "MCP disabled, "
+ "must load devices in order!\n");
/* Set multi queue mode */
if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
- pr_err("Multi disabled since int_mode requested is not MSI-X\n");
+ dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
+ "requested is not MSI-X\n");
multi_mode = ETH_RSS_MODE_DISABLED;
}
bp->multi_mode = multi_mode;
+ bp->dev->features |= NETIF_F_GRO;
+
/* Set TPA flags */
if (disable_tpa) {
bp->flags &= ~TPA_ENABLE_FLAG;
@@ -9304,11 +10310,13 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
bnx2x_release_phy_lock(bp);
}
- snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
+ strncpy(info->fw_version, bp->fw_ver, 32);
+ snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
+ "bc %d.%d.%d%s%s",
(bp->common.bc_ver & 0xff0000) >> 16,
(bp->common.bc_ver & 0xff00) >> 8,
(bp->common.bc_ver & 0xff),
- ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
+ ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
strcpy(info->bus_info, pci_name(bp->pdev));
info->n_stats = BNX2X_NUM_STATS;
info->testinfo_len = BNX2X_NUM_TESTS;
@@ -9842,19 +10850,18 @@ static int bnx2x_get_coalesce(struct net_device *dev,
return 0;
}
-#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
static int bnx2x_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal)
{
struct bnx2x *bp = netdev_priv(dev);
- bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
- if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
- bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
+ bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
+ if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
+ bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
- bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
- if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
- bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
+ bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
+ if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
+ bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
if (netif_running(dev))
bnx2x_update_coalesce(bp);
@@ -9885,6 +10892,11 @@ static int bnx2x_set_ringparam(struct net_device *dev,
struct bnx2x *bp = netdev_priv(dev);
int rc = 0;
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
if ((ering->rx_pending > MAX_RX_AVAIL) ||
(ering->tx_pending > MAX_TX_AVAIL) ||
(ering->tx_pending <= MAX_SKB_FRAGS + 4))
@@ -9970,6 +10982,11 @@ static int bnx2x_set_flags(struct net_device *dev, u32 data)
int changed = 0;
int rc = 0;
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
/* TPA requires Rx CSUM offloading */
if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
if (!disable_tpa) {
@@ -9986,6 +11003,11 @@ static int bnx2x_set_flags(struct net_device *dev, u32 data)
changed = 1;
}
+ if (data & ETH_FLAG_RXHASH)
+ dev->features |= NETIF_F_RXHASH;
+ else
+ dev->features &= ~NETIF_F_RXHASH;
+
if (changed && netif_running(dev)) {
bnx2x_nic_unload(bp, UNLOAD_NORMAL);
rc = bnx2x_nic_load(bp, LOAD_NORMAL);
@@ -10006,6 +11028,11 @@ static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
struct bnx2x *bp = netdev_priv(dev);
int rc = 0;
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
bp->rx_csum = data;
/* Disable TPA, when Rx CSUM is disabled. Otherwise all
@@ -10050,9 +11077,9 @@ static int bnx2x_test_registers(struct bnx2x *bp)
u32 wr_val = 0;
int port = BP_PORT(bp);
static const struct {
- u32 offset0;
- u32 offset1;
- u32 mask;
+ u32 offset0;
+ u32 offset1;
+ u32 mask;
} reg_tbl[] = {
/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
{ DORQ_REG_DB_ADDR0, 4, 0xffffffff },
@@ -10119,15 +11146,19 @@ static int bnx2x_test_registers(struct bnx2x *bp)
save_val = REG_RD(bp, offset);
- REG_WR(bp, offset, wr_val);
+ REG_WR(bp, offset, (wr_val & mask));
val = REG_RD(bp, offset);
/* Restore the original register's value */
REG_WR(bp, offset, save_val);
- /* verify that value is as expected value */
- if ((val & mask) != (wr_val & mask))
+ /* verify value is as expected */
+ if ((val & mask) != (wr_val & mask)) {
+ DP(NETIF_MSG_PROBE,
+ "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
+ offset, val, wr_val, mask);
goto test_reg_exit;
+ }
}
}
@@ -10267,8 +11298,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
bd_prod = TX_BD(fp_tx->tx_bd_prod);
tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
- mapping = pci_map_single(bp->pdev, skb->data,
- skb_headlen(skb), PCI_DMA_TODEVICE);
+ mapping = dma_map_single(&bp->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
@@ -10344,6 +11375,9 @@ static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
{
int rc = 0, res;
+ if (BP_NOMCP(bp))
+ return rc;
+
if (!netif_running(bp->dev))
return BNX2X_LOOPBACK_FAILED;
@@ -10391,6 +11425,9 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
int i, rc;
u32 magic, crc;
+ if (BP_NOMCP(bp))
+ return 0;
+
rc = bnx2x_nvram_read(bp, 0, data, 4);
if (rc) {
DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
@@ -10468,6 +11505,12 @@ static void bnx2x_self_test(struct net_device *dev,
{
struct bnx2x *bp = netdev_priv(dev);
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ etest->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
+
memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
if (!netif_running(dev))
@@ -10556,7 +11599,11 @@ static const struct {
/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
- 8, "[%d]: tx_packets" }
+ 8, "[%d]: tx_ucast_packets" },
+ { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
+ 8, "[%d]: tx_mcast_packets" },
+ { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
+ 8, "[%d]: tx_bcast_packets" }
};
static const struct {
@@ -10618,16 +11665,20 @@ static const struct {
{ STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8, STATS_FLAGS_PORT, "tx_error_bytes" },
{ STATS_OFFSET32(total_unicast_packets_transmitted_hi),
- 8, STATS_FLAGS_BOTH, "tx_packets" },
+ 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
+ { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
+ 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
+ { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
+ 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
{ STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8, STATS_FLAGS_PORT, "tx_mac_errors" },
{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8, STATS_FLAGS_PORT, "tx_carrier_errors" },
- { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
+/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8, STATS_FLAGS_PORT, "tx_single_collisions" },
{ STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8, STATS_FLAGS_PORT, "tx_multi_collisions" },
-/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
+ { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8, STATS_FLAGS_PORT, "tx_deferred" },
{ STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
8, STATS_FLAGS_PORT, "tx_excess_collisions" },
@@ -10643,11 +11694,11 @@ static const struct {
8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
{ STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
- { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
+/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
{ STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
-/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
+ { STATS_OFFSET32(etherstatspktsover1522octets_hi),
8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
{ STATS_OFFSET32(pause_frames_sent_hi),
8, STATS_FLAGS_PORT, "tx_pause_frames" }
@@ -10664,7 +11715,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
struct bnx2x *bp = netdev_priv(dev);
int i, num_stats;
- switch(stringset) {
+ switch (stringset) {
case ETH_SS_STATS:
if (is_multi(bp)) {
num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
@@ -10893,6 +11944,14 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
break;
case PCI_D3hot:
+ /* If there are other clients above don't
+ shut down the power */
+ if (atomic_read(&bp->pdev->enable_cnt) != 1)
+ return 0;
+ /* Don't shut down the power for emulation and FPGA */
+ if (CHIP_REV_IS_SLOW(bp))
+ return 0;
+
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
pmcsr |= 3;
@@ -11182,6 +12241,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
int i;
u8 hlen = 0;
__le16 pkt_size = 0;
+ struct ethhdr *eth;
+ u8 mac_type = UNICAST_ADDRESS;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -11205,6 +12266,16 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
+ eth = (struct ethhdr *)skb->data;
+
+ /* set flag according to packet type (UNICAST_ADDRESS is default)*/
+ if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
+ if (is_broadcast_ether_addr(eth->h_dest))
+ mac_type = BROADCAST_ADDRESS;
+ else
+ mac_type = MULTICAST_ADDRESS;
+ }
+
#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
/* First, check if we need to linearize the skb (due to FW
restrictions). No need to check fragmentation if page size > 8K
@@ -11238,8 +12309,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
- tx_start_bd->general_data = (UNICAST_ADDRESS <<
- ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
+ tx_start_bd->general_data = (mac_type <<
+ ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
/* header nbd */
tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
@@ -11314,8 +12385,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- mapping = pci_map_single(bp->pdev, skb->data,
- skb_headlen(skb), PCI_DMA_TODEVICE);
+ mapping = dma_map_single(&bp->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -11372,8 +12443,9 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (total_pkt_bd == NULL)
total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
- mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
- frag->size, PCI_DMA_TODEVICE);
+ mapping = dma_map_page(&bp->pdev->dev, frag->page,
+ frag->page_offset,
+ frag->size, DMA_TO_DEVICE);
tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -11452,6 +12524,40 @@ static int bnx2x_open(struct net_device *dev)
bnx2x_set_power_state(bp, PCI_D0);
+ if (!bnx2x_reset_is_done(bp)) {
+ do {
+ /* Reset MCP mail box sequence if there is on going
+ * recovery
+ */
+ bp->fw_seq = 0;
+
+ /* If it's the first function to load and reset done
+ * is still not cleared it may mean that. We don't
+ * check the attention state here because it may have
+ * already been cleared by a "common" reset but we
+ * shell proceed with "process kill" anyway.
+ */
+ if ((bnx2x_get_load_cnt(bp) == 0) &&
+ bnx2x_trylock_hw_lock(bp,
+ HW_LOCK_RESOURCE_RESERVED_08) &&
+ (!bnx2x_leader_reset(bp))) {
+ DP(NETIF_MSG_HW, "Recovered in open\n");
+ break;
+ }
+
+ bnx2x_set_power_state(bp, PCI_D3hot);
+
+ printk(KERN_ERR"%s: Recovery flow hasn't been properly"
+ " completed yet. Try again later. If u still see this"
+ " message after a few retries then power cycle is"
+ " required.\n", bp->dev->name);
+
+ return -EAGAIN;
+ } while (0);
+ }
+
+ bp->recovery_state = BNX2X_RECOVERY_DONE;
+
return bnx2x_nic_load(bp, LOAD_OPEN);
}
@@ -11462,9 +12568,7 @@ static int bnx2x_close(struct net_device *dev)
/* Unload the driver, release IRQs */
bnx2x_nic_unload(bp, UNLOAD_CLOSE);
- if (atomic_read(&bp->pdev->enable_cnt) == 1)
- if (!CHIP_REV_IS_SLOW(bp))
- bnx2x_set_power_state(bp, PCI_D3hot);
+ bnx2x_set_power_state(bp, PCI_D3hot);
return 0;
}
@@ -11494,21 +12598,21 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
else { /* some multicasts */
if (CHIP_IS_E1(bp)) {
int i, old, offset;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
struct mac_configuration_cmd *config =
bnx2x_sp(bp, mcast_config);
i = 0;
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
config->config_table[i].
cam_entry.msb_mac_addr =
- swab16(*(u16 *)&mclist->dmi_addr[0]);
+ swab16(*(u16 *)&ha->addr[0]);
config->config_table[i].
cam_entry.middle_mac_addr =
- swab16(*(u16 *)&mclist->dmi_addr[2]);
+ swab16(*(u16 *)&ha->addr[2]);
config->config_table[i].
cam_entry.lsb_mac_addr =
- swab16(*(u16 *)&mclist->dmi_addr[4]);
+ swab16(*(u16 *)&ha->addr[4]);
config->config_table[i].cam_entry.flags =
cpu_to_le16(port);
config->config_table[i].
@@ -11562,18 +12666,18 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
0);
} else { /* E1H */
/* Accept one or more multicasts */
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u32 mc_filter[MC_HASH_SIZE];
u32 crc, bit, regidx;
int i;
memset(mc_filter, 0, 4 * MC_HASH_SIZE);
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
- mclist->dmi_addr);
+ ha->addr);
- crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
+ crc = crc32c_le(0, ha->addr, ETH_ALEN);
bit = (crc >> 24) & 0xff;
regidx = bit >> 5;
bit &= 0x1f;
@@ -11690,6 +12794,11 @@ static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
struct bnx2x *bp = netdev_priv(dev);
int rc = 0;
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
return -EINVAL;
@@ -11717,7 +12826,7 @@ static void bnx2x_tx_timeout(struct net_device *dev)
bnx2x_panic();
#endif
/* This allows the netif to be shutdown gracefully before resetting */
- schedule_work(&bp->reset_task);
+ schedule_delayed_work(&bp->reset_task, 0);
}
#ifdef BCM_VLAN
@@ -11789,18 +12898,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
rc = pci_enable_device(pdev);
if (rc) {
- pr_err("Cannot enable PCI device, aborting\n");
+ dev_err(&bp->pdev->dev,
+ "Cannot enable PCI device, aborting\n");
goto err_out;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- pr_err("Cannot find PCI device base address, aborting\n");
+ dev_err(&bp->pdev->dev,
+ "Cannot find PCI device base address, aborting\n");
rc = -ENODEV;
goto err_out_disable;
}
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
- pr_err("Cannot find second PCI device base address, aborting\n");
+ dev_err(&bp->pdev->dev, "Cannot find second PCI device"
+ " base address, aborting\n");
rc = -ENODEV;
goto err_out_disable;
}
@@ -11808,7 +12920,8 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
if (atomic_read(&pdev->enable_cnt) == 1) {
rc = pci_request_regions(pdev, DRV_MODULE_NAME);
if (rc) {
- pr_err("Cannot obtain PCI resources, aborting\n");
+ dev_err(&bp->pdev->dev,
+ "Cannot obtain PCI resources, aborting\n");
goto err_out_disable;
}
@@ -11818,28 +12931,32 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (bp->pm_cap == 0) {
- pr_err("Cannot find power management capability, aborting\n");
+ dev_err(&bp->pdev->dev,
+ "Cannot find power management capability, aborting\n");
rc = -EIO;
goto err_out_release;
}
bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
if (bp->pcie_cap == 0) {
- pr_err("Cannot find PCI Express capability, aborting\n");
+ dev_err(&bp->pdev->dev,
+ "Cannot find PCI Express capability, aborting\n");
rc = -EIO;
goto err_out_release;
}
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
bp->flags |= USING_DAC_FLAG;
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
- pr_err("pci_set_consistent_dma_mask failed, aborting\n");
+ if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
+ dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
+ " failed, aborting\n");
rc = -EIO;
goto err_out_release;
}
- } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
- pr_err("System does not support DMA, aborting\n");
+ } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
+ dev_err(&bp->pdev->dev,
+ "System does not support DMA, aborting\n");
rc = -EIO;
goto err_out_release;
}
@@ -11852,7 +12969,8 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
bp->regview = pci_ioremap_bar(pdev, 0);
if (!bp->regview) {
- pr_err("Cannot map register space, aborting\n");
+ dev_err(&bp->pdev->dev,
+ "Cannot map register space, aborting\n");
rc = -ENOMEM;
goto err_out_release;
}
@@ -11861,7 +12979,8 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
min_t(u64, BNX2X_DB_SIZE,
pci_resource_len(pdev, 2)));
if (!bp->doorbells) {
- pr_err("Cannot map doorbell space, aborting\n");
+ dev_err(&bp->pdev->dev,
+ "Cannot map doorbell space, aborting\n");
rc = -ENOMEM;
goto err_out_unmap;
}
@@ -11876,6 +12995,9 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
+ /* Reset the load counter */
+ bnx2x_clear_load_cnt(bp);
+
dev->watchdog_timeo = TX_TIMEOUT;
dev->netdev_ops = &bnx2x_netdev_ops;
@@ -11963,7 +13085,8 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
offset = be32_to_cpu(sections[i].offset);
len = be32_to_cpu(sections[i].len);
if (offset + len > firmware->size) {
- pr_err("Section %d length is out of bounds\n", i);
+ dev_err(&bp->pdev->dev,
+ "Section %d length is out of bounds\n", i);
return -EINVAL;
}
}
@@ -11975,7 +13098,8 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
if (be16_to_cpu(ops_offsets[i]) > num_ops) {
- pr_err("Section offset %d is out of bounds\n", i);
+ dev_err(&bp->pdev->dev,
+ "Section offset %d is out of bounds\n", i);
return -EINVAL;
}
}
@@ -11987,7 +13111,8 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
(fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
(fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
(fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
- pr_err("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
+ dev_err(&bp->pdev->dev,
+ "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
fw_ver[0], fw_ver[1], fw_ver[2],
fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
BCM_5710_FW_MINOR_VERSION,
@@ -12022,8 +13147,8 @@ static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
for (i = 0, j = 0; i < n/8; i++, j += 2) {
tmp = be32_to_cpu(source[j]);
target[i].op = (tmp >> 24) & 0xff;
- target[i].offset = tmp & 0xffffff;
- target[i].raw_data = be32_to_cpu(source[j+1]);
+ target[i].offset = tmp & 0xffffff;
+ target[i].raw_data = be32_to_cpu(source[j + 1]);
}
}
@@ -12057,20 +13182,24 @@ static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
if (CHIP_IS_E1(bp))
fw_file_name = FW_FILE_NAME_E1;
- else
+ else if (CHIP_IS_E1H(bp))
fw_file_name = FW_FILE_NAME_E1H;
+ else {
+ dev_err(dev, "Unsupported chip revision\n");
+ return -EINVAL;
+ }
- pr_info("Loading %s\n", fw_file_name);
+ dev_info(dev, "Loading %s\n", fw_file_name);
rc = request_firmware(&bp->firmware, fw_file_name, dev);
if (rc) {
- pr_err("Can't load firmware file %s\n", fw_file_name);
+ dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
goto request_firmware_exit;
}
rc = bnx2x_check_firmware(bp);
if (rc) {
- pr_err("Corrupt firmware file %s\n", fw_file_name);
+ dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
goto request_firmware_exit;
}
@@ -12129,7 +13258,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
/* dev zeroed in init_etherdev */
dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
if (!dev) {
- pr_err("Cannot allocate net device\n");
+ dev_err(&pdev->dev, "Cannot allocate net device\n");
return -ENOMEM;
}
@@ -12151,7 +13280,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
/* Set init arrays */
rc = bnx2x_init_firmware(bp, &pdev->dev);
if (rc) {
- pr_err("Error loading firmware\n");
+ dev_err(&pdev->dev, "Error loading firmware\n");
goto init_one_exit;
}
@@ -12162,11 +13291,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
}
bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
- netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
- board_info[ent->driver_data].name,
- (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
- pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
- dev->base_addr, bp->pdev->irq, dev->dev_addr);
+ netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
+ " IRQ %d, ", board_info[ent->driver_data].name,
+ (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
+ pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
+ dev->base_addr, bp->pdev->irq);
+ pr_cont("node addr %pM\n", dev->dev_addr);
return 0;
@@ -12194,13 +13324,16 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
struct bnx2x *bp;
if (!dev) {
- pr_err("BAD net device from bnx2x_init_one\n");
+ dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
return;
}
bp = netdev_priv(dev);
unregister_netdev(dev);
+ /* Make sure RESET task is not scheduled before continuing */
+ cancel_delayed_work_sync(&bp->reset_task);
+
kfree(bp->init_ops_offsets);
kfree(bp->init_ops);
kfree(bp->init_data);
@@ -12227,7 +13360,7 @@ static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
struct bnx2x *bp;
if (!dev) {
- pr_err("BAD net device from bnx2x_init_one\n");
+ dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
return -ENODEV;
}
bp = netdev_priv(dev);
@@ -12259,11 +13392,16 @@ static int bnx2x_resume(struct pci_dev *pdev)
int rc;
if (!dev) {
- pr_err("BAD net device from bnx2x_init_one\n");
+ dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
return -ENODEV;
}
bp = netdev_priv(dev);
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
rtnl_lock();
pci_restore_state(pdev);
@@ -12292,6 +13430,7 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
bp->rx_mode = BNX2X_RX_MODE_NONE;
bnx2x_netif_stop(bp, 0);
+ netif_carrier_off(bp->dev);
del_timer_sync(&bp->timer);
bp->stats_state = STATS_STATE_DISABLED;
@@ -12318,8 +13457,6 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
bp->state = BNX2X_STATE_CLOSED;
- netif_carrier_off(bp->dev);
-
return 0;
}
@@ -12430,6 +13567,11 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2x *bp = netdev_priv(dev);
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ return;
+ }
+
rtnl_lock();
bnx2x_eeh_recover(bp);
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h
index 944964e78c8..a1f3bf0cd63 100644
--- a/drivers/net/bnx2x_reg.h
+++ b/drivers/net/bnx2x_reg.h
@@ -766,6 +766,8 @@
#define MCP_REG_MCPR_NVM_SW_ARB 0x86420
#define MCP_REG_MCPR_NVM_WRITE 0x86408
#define MCP_REG_MCPR_SCRATCH 0xa0000
+#define MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK (0x1<<1)
+#define MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK (0x1<<0)
/* [R 32] read first 32 bit after inversion of function 0. mapped as
follows: [0] NIG attention for function0; [1] NIG attention for
function1; [2] GPIO1 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp;
@@ -1249,6 +1251,8 @@
#define MISC_REG_E1HMF_MODE 0xa5f8
/* [RW 32] Debug only: spare RW register reset by core reset */
#define MISC_REG_GENERIC_CR_0 0xa460
+/* [RW 32] Debug only: spare RW register reset by por reset */
+#define MISC_REG_GENERIC_POR_1 0xa474
/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of
these bits is written as a '1'; the corresponding SPIO bit will turn off
it's drivers and become an input. This is the reset state of all GPIO
@@ -1438,7 +1442,7 @@
(~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */
#define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc
/* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses
- in this register. addres 0 - timer 1; address - timer 2�address 7 -
+ in this register. addres 0 - timer 1; address 1 - timer 2, ... address 7 -
timer 8 */
#define MISC_REG_SW_TIMER_VAL 0xa5c0
/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are
@@ -2407,10 +2411,16 @@
/* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means
this client is waiting for the arbiter. */
#define PXP_REG_HST_CLIENTS_WAITING_TO_ARB 0x103008
+/* [RW 1] When 1; doorbells are discarded and not passed to doorbell queue
+ block. Should be used for close the gates. */
+#define PXP_REG_HST_DISCARD_DOORBELLS 0x1030a4
/* [R 1] debug only: '1' means this PSWHST is discarding doorbells. This bit
should update accoring to 'hst_discard_doorbells' register when the state
machine is idle */
#define PXP_REG_HST_DISCARD_DOORBELLS_STATUS 0x1030a0
+/* [RW 1] When 1; new internal writes arriving to the block are discarded.
+ Should be used for close the gates. */
+#define PXP_REG_HST_DISCARD_INTERNAL_WRITES 0x1030a8
/* [R 6] debug only: A bit mask for all PSWHST internal write clients. '1'
means this PSWHST is discarding inputs from this client. Each bit should
update accoring to 'hst_discard_internal_writes' register when the state
@@ -4422,11 +4432,21 @@
#define MISC_REGISTERS_GPIO_PORT_SHIFT 4
#define MISC_REGISTERS_GPIO_SET_POS 8
#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588
+#define MISC_REGISTERS_RESET_REG_1_RST_HC (0x1<<29)
#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
+#define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26)
+#define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27)
#define MISC_REGISTERS_RESET_REG_1_SET 0x584
#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0)
#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1<<14)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE (0x1<<15)
+#define MISC_REGISTERS_RESET_REG_2_RST_GRC (0x1<<4)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B (0x1<<6)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1<<5)
+#define MISC_REGISTERS_RESET_REG_2_RST_MDIO (0x1<<13)
+#define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE (0x1<<11)
+#define MISC_REGISTERS_RESET_REG_2_RST_RBCN (0x1<<9)
#define MISC_REGISTERS_RESET_REG_2_SET 0x594
#define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8
#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1<<1)
@@ -4454,6 +4474,7 @@
#define HW_LOCK_RESOURCE_GPIO 1
#define HW_LOCK_RESOURCE_MDIO 0
#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
+#define HW_LOCK_RESOURCE_RESERVED_08 8
#define HW_LOCK_RESOURCE_SPIO 2
#define HW_LOCK_RESOURCE_UNDI 5
#define PRS_FLAG_OVERETH_IPV4 1
@@ -4474,6 +4495,10 @@
#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 (1<<5)
#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 (1<<9)
#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (1<<12)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (1<<28)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (1<<31)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (1<<29)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (1<<30)
#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (1<<15)
#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (1<<14)
#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (1<<20)
diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c
index 6dd64cf3cb7..969ffed86b9 100644
--- a/drivers/net/bonding/bond_ipv6.c
+++ b/drivers/net/bonding/bond_ipv6.c
@@ -37,7 +37,6 @@
static void bond_glean_dev_ipv6(struct net_device *dev, struct in6_addr *addr)
{
struct inet6_dev *idev;
- struct inet6_ifaddr *ifa;
if (!dev)
return;
@@ -47,10 +46,12 @@ static void bond_glean_dev_ipv6(struct net_device *dev, struct in6_addr *addr)
return;
read_lock_bh(&idev->lock);
- ifa = idev->addr_list;
- if (ifa)
+ if (!list_empty(&idev->addr_list)) {
+ struct inet6_ifaddr *ifa
+ = list_first_entry(&idev->addr_list,
+ struct inet6_ifaddr, if_list);
ipv6_addr_copy(addr, &ifa->addr);
- else
+ } else
ipv6_addr_set(addr, 0, 0, 0, 0);
read_unlock_bh(&idev->lock);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 0075514bf32..5e12462a9d5 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -59,6 +59,7 @@
#include <linux/uaccess.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
+#include <linux/netpoll.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/etherdevice.h>
@@ -430,7 +431,18 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
}
skb->priority = 1;
- dev_queue_xmit(skb);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) {
+ struct netpoll *np = bond->dev->npinfo->netpoll;
+ slave_dev->npinfo = bond->dev->npinfo;
+ np->real_dev = np->dev = skb->dev;
+ slave_dev->priv_flags |= IFF_IN_NETPOLL;
+ netpoll_send_skb(np, skb);
+ slave_dev->priv_flags &= ~IFF_IN_NETPOLL;
+ np->dev = bond->dev;
+ } else
+#endif
+ dev_queue_xmit(skb);
return 0;
}
@@ -762,32 +774,6 @@ static int bond_check_dev_link(struct bonding *bond,
/*----------------------------- Multicast list ------------------------------*/
/*
- * Returns 0 if dmi1 and dmi2 are the same, non-0 otherwise
- */
-static inline int bond_is_dmi_same(const struct dev_mc_list *dmi1,
- const struct dev_mc_list *dmi2)
-{
- return memcmp(dmi1->dmi_addr, dmi2->dmi_addr, dmi1->dmi_addrlen) == 0 &&
- dmi1->dmi_addrlen == dmi2->dmi_addrlen;
-}
-
-/*
- * returns dmi entry if found, NULL otherwise
- */
-static struct dev_mc_list *bond_mc_list_find_dmi(struct dev_mc_list *dmi,
- struct dev_mc_list *mc_list)
-{
- struct dev_mc_list *idmi;
-
- for (idmi = mc_list; idmi; idmi = idmi->next) {
- if (bond_is_dmi_same(dmi, idmi))
- return idmi;
- }
-
- return NULL;
-}
-
-/*
* Push the promiscuity flag down to appropriate slaves
*/
static int bond_set_promiscuity(struct bonding *bond, int inc)
@@ -839,18 +825,18 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
* Add a Multicast address to slaves
* according to mode
*/
-static void bond_mc_add(struct bonding *bond, void *addr, int alen)
+static void bond_mc_add(struct bonding *bond, void *addr)
{
if (USES_PRIMARY(bond->params.mode)) {
/* write lock already acquired */
if (bond->curr_active_slave)
- dev_mc_add(bond->curr_active_slave->dev, addr, alen, 0);
+ dev_mc_add(bond->curr_active_slave->dev, addr);
} else {
struct slave *slave;
int i;
bond_for_each_slave(bond, slave, i)
- dev_mc_add(slave->dev, addr, alen, 0);
+ dev_mc_add(slave->dev, addr);
}
}
@@ -858,18 +844,17 @@ static void bond_mc_add(struct bonding *bond, void *addr, int alen)
* Remove a multicast address from slave
* according to mode
*/
-static void bond_mc_delete(struct bonding *bond, void *addr, int alen)
+static void bond_mc_del(struct bonding *bond, void *addr)
{
if (USES_PRIMARY(bond->params.mode)) {
/* write lock already acquired */
if (bond->curr_active_slave)
- dev_mc_delete(bond->curr_active_slave->dev, addr,
- alen, 0);
+ dev_mc_del(bond->curr_active_slave->dev, addr);
} else {
struct slave *slave;
int i;
bond_for_each_slave(bond, slave, i) {
- dev_mc_delete(slave->dev, addr, alen, 0);
+ dev_mc_del(slave->dev, addr);
}
}
}
@@ -896,66 +881,22 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
}
/*
- * Totally destroys the mc_list in bond
- */
-static void bond_mc_list_destroy(struct bonding *bond)
-{
- struct dev_mc_list *dmi;
-
- dmi = bond->mc_list;
- while (dmi) {
- bond->mc_list = dmi->next;
- kfree(dmi);
- dmi = bond->mc_list;
- }
-
- bond->mc_list = NULL;
-}
-
-/*
- * Copy all the Multicast addresses from src to the bonding device dst
- */
-static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond,
- gfp_t gfp_flag)
-{
- struct dev_mc_list *dmi, *new_dmi;
-
- for (dmi = mc_list; dmi; dmi = dmi->next) {
- new_dmi = kmalloc(sizeof(struct dev_mc_list), gfp_flag);
-
- if (!new_dmi) {
- /* FIXME: Potential memory leak !!! */
- return -ENOMEM;
- }
-
- new_dmi->next = bond->mc_list;
- bond->mc_list = new_dmi;
- new_dmi->dmi_addrlen = dmi->dmi_addrlen;
- memcpy(new_dmi->dmi_addr, dmi->dmi_addr, dmi->dmi_addrlen);
- new_dmi->dmi_users = dmi->dmi_users;
- new_dmi->dmi_gusers = dmi->dmi_gusers;
- }
-
- return 0;
-}
-
-/*
* flush all members of flush->mc_list from device dev->mc_list
*/
static void bond_mc_list_flush(struct net_device *bond_dev,
struct net_device *slave_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
- for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next)
- dev_mc_delete(slave_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
+ netdev_for_each_mc_addr(ha, bond_dev)
+ dev_mc_del(slave_dev, ha->addr);
if (bond->params.mode == BOND_MODE_8023AD) {
/* del lacpdu mc addr from mc list */
u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
- dev_mc_delete(slave_dev, lacpdu_multicast, ETH_ALEN, 0);
+ dev_mc_del(slave_dev, lacpdu_multicast);
}
}
@@ -969,7 +910,7 @@ static void bond_mc_list_flush(struct net_device *bond_dev,
static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
struct slave *old_active)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
if (!USES_PRIMARY(bond->params.mode))
/* nothing to do - mc list is already up-to-date on
@@ -984,9 +925,8 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(old_active->dev, -1);
- for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next)
- dev_mc_delete(old_active->dev, dmi->dmi_addr,
- dmi->dmi_addrlen, 0);
+ netdev_for_each_mc_addr(ha, bond->dev)
+ dev_mc_del(old_active->dev, ha->addr);
}
if (new_active) {
@@ -997,9 +937,8 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(new_active->dev, 1);
- for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next)
- dev_mc_add(new_active->dev, dmi->dmi_addr,
- dmi->dmi_addrlen, 0);
+ netdev_for_each_mc_addr(ha, bond->dev)
+ dev_mc_add(new_active->dev, ha->addr);
bond_resend_igmp_join_requests(bond);
}
}
@@ -1329,6 +1268,61 @@ static void bond_detach_slave(struct bonding *bond, struct slave *slave)
bond->slave_cnt--;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * You must hold read lock on bond->lock before calling this.
+ */
+static bool slaves_support_netpoll(struct net_device *bond_dev)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave;
+ int i = 0;
+ bool ret = true;
+
+ bond_for_each_slave(bond, slave, i) {
+ if ((slave->dev->priv_flags & IFF_DISABLE_NETPOLL) ||
+ !slave->dev->netdev_ops->ndo_poll_controller)
+ ret = false;
+ }
+ return i != 0 && ret;
+}
+
+static void bond_poll_controller(struct net_device *bond_dev)
+{
+ struct net_device *dev = bond_dev->npinfo->netpoll->real_dev;
+ if (dev != bond_dev)
+ netpoll_poll_dev(dev);
+}
+
+static void bond_netpoll_cleanup(struct net_device *bond_dev)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave;
+ const struct net_device_ops *ops;
+ int i;
+
+ read_lock(&bond->lock);
+ bond_dev->npinfo = NULL;
+ bond_for_each_slave(bond, slave, i) {
+ if (slave->dev) {
+ ops = slave->dev->netdev_ops;
+ if (ops->ndo_netpoll_cleanup)
+ ops->ndo_netpoll_cleanup(slave->dev);
+ else
+ slave->dev->npinfo = NULL;
+ }
+ }
+ read_unlock(&bond->lock);
+}
+
+#else
+
+static void bond_netpoll_cleanup(struct net_device *bond_dev)
+{
+}
+
+#endif
+
/*---------------------------------- IOCTL ----------------------------------*/
static int bond_sethwaddr(struct net_device *bond_dev,
@@ -1411,7 +1405,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
struct bonding *bond = netdev_priv(bond_dev);
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
struct slave *new_slave = NULL;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
struct sockaddr addr;
int link_reporting;
int old_features = bond_dev->features;
@@ -1485,14 +1479,27 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_dev->name,
bond_dev->type, slave_dev->type);
- netdev_bonding_change(bond_dev, NETDEV_BONDING_OLDTYPE);
+ res = netdev_bonding_change(bond_dev,
+ NETDEV_PRE_TYPE_CHANGE);
+ res = notifier_to_errno(res);
+ if (res) {
+ pr_err("%s: refused to change device type\n",
+ bond_dev->name);
+ res = -EBUSY;
+ goto err_undo_flags;
+ }
+
+ /* Flush unicast and multicast addresses */
+ dev_uc_flush(bond_dev);
+ dev_mc_flush(bond_dev);
if (slave_dev->type != ARPHRD_ETHER)
bond_setup_by_slave(bond_dev, slave_dev);
else
ether_setup(bond_dev);
- netdev_bonding_change(bond_dev, NETDEV_BONDING_NEWTYPE);
+ netdev_bonding_change(bond_dev,
+ NETDEV_POST_TYPE_CHANGE);
}
} else if (bond_dev->type != slave_dev->type) {
pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n",
@@ -1593,9 +1600,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
netif_addr_lock_bh(bond_dev);
/* upload master's mc_list to new slave */
- for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next)
- dev_mc_add(slave_dev, dmi->dmi_addr,
- dmi->dmi_addrlen, 0);
+ netdev_for_each_mc_addr(ha, bond_dev)
+ dev_mc_add(slave_dev, ha->addr);
netif_addr_unlock_bh(bond_dev);
}
@@ -1603,7 +1609,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
/* add lacpdu mc addr to mc list */
u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
- dev_mc_add(slave_dev, lacpdu_multicast, ETH_ALEN, 0);
+ dev_mc_add(slave_dev, lacpdu_multicast);
}
bond_add_vlans_on_slave(bond, slave_dev);
@@ -1735,6 +1741,18 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_set_carrier(bond);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ if (slaves_support_netpoll(bond_dev)) {
+ bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
+ if (bond_dev->npinfo)
+ slave_dev->npinfo = bond_dev->npinfo;
+ } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) {
+ bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
+ pr_info("New slave device %s does not support netpoll\n",
+ slave_dev->name);
+ pr_info("Disabling netpoll support for %s\n", bond_dev->name);
+ }
+#endif
read_unlock(&bond->lock);
res = bond_create_slave_symlinks(bond_dev, slave_dev);
@@ -1801,6 +1819,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
return -EINVAL;
}
+ netdev_bonding_change(bond_dev, NETDEV_BONDING_DESLAVE);
write_lock_bh(&bond->lock);
slave = bond_get_slave_by_dev(bond, slave_dev);
@@ -1929,6 +1948,17 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
netdev_set_master(slave_dev, NULL);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ read_lock_bh(&bond->lock);
+ if (slaves_support_netpoll(bond_dev))
+ bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
+ read_unlock_bh(&bond->lock);
+ if (slave_dev->netdev_ops->ndo_netpoll_cleanup)
+ slave_dev->netdev_ops->ndo_netpoll_cleanup(slave_dev);
+ else
+ slave_dev->npinfo = NULL;
+#endif
+
/* close slave before restoring its mac address */
dev_close(slave_dev);
@@ -3905,10 +3935,24 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
return res;
}
+static bool bond_addr_in_mc_list(unsigned char *addr,
+ struct netdev_hw_addr_list *list,
+ int addrlen)
+{
+ struct netdev_hw_addr *ha;
+
+ netdev_hw_addr_list_for_each(ha, list)
+ if (!memcmp(ha->addr, addr, addrlen))
+ return true;
+
+ return false;
+}
+
static void bond_set_multicast_list(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
+ bool found;
/*
* Do promisc before checking multicast_mode
@@ -3943,20 +3987,25 @@ static void bond_set_multicast_list(struct net_device *bond_dev)
bond->flags = bond_dev->flags;
/* looking for addresses to add to slaves' mc list */
- for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) {
- if (!bond_mc_list_find_dmi(dmi, bond->mc_list))
- bond_mc_add(bond, dmi->dmi_addr, dmi->dmi_addrlen);
+ netdev_for_each_mc_addr(ha, bond_dev) {
+ found = bond_addr_in_mc_list(ha->addr, &bond->mc_list,
+ bond_dev->addr_len);
+ if (!found)
+ bond_mc_add(bond, ha->addr);
}
/* looking for addresses to delete from slaves' list */
- for (dmi = bond->mc_list; dmi; dmi = dmi->next) {
- if (!bond_mc_list_find_dmi(dmi, bond_dev->mc_list))
- bond_mc_delete(bond, dmi->dmi_addr, dmi->dmi_addrlen);
+ netdev_hw_addr_list_for_each(ha, &bond->mc_list) {
+ found = bond_addr_in_mc_list(ha->addr, &bond_dev->mc,
+ bond_dev->addr_len);
+ if (!found)
+ bond_mc_del(bond, ha->addr);
}
/* save master's multicast list */
- bond_mc_list_destroy(bond);
- bond_mc_list_copy(bond_dev->mc_list, bond, GFP_ATOMIC);
+ __hw_addr_flush(&bond->mc_list);
+ __hw_addr_add_multiple(&bond->mc_list, &bond_dev->mc,
+ bond_dev->addr_len, NETDEV_HW_ADDR_T_MULTICAST);
read_unlock(&bond->lock);
}
@@ -4448,6 +4497,10 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_vlan_rx_register = bond_vlan_rx_register,
.ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_netpoll_cleanup = bond_netpoll_cleanup,
+ .ndo_poll_controller = bond_poll_controller,
+#endif
};
static void bond_destructor(struct net_device *bond_dev)
@@ -4541,6 +4594,8 @@ static void bond_uninit(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+ bond_netpoll_cleanup(bond_dev);
+
/* Release the bonded slaves */
bond_release_all(bond_dev);
@@ -4550,9 +4605,7 @@ static void bond_uninit(struct net_device *bond_dev)
bond_remove_proc_entry(bond);
- netif_addr_lock_bh(bond_dev);
- bond_mc_list_destroy(bond);
- netif_addr_unlock_bh(bond_dev);
+ __hw_addr_flush(&bond->mc_list);
}
/*------------------------- Module initialization ---------------------------*/
@@ -4683,13 +4736,13 @@ static int bond_check_params(struct bond_params *params)
}
if (num_grat_arp < 0 || num_grat_arp > 255) {
- pr_warning("Warning: num_grat_arp (%d) not in range 0-255 so it was reset to 1 \n",
+ pr_warning("Warning: num_grat_arp (%d) not in range 0-255 so it was reset to 1\n",
num_grat_arp);
num_grat_arp = 1;
}
if (num_unsol_na < 0 || num_unsol_na > 255) {
- pr_warning("Warning: num_unsol_na (%d) not in range 0-255 so it was reset to 1 \n",
+ pr_warning("Warning: num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
num_unsol_na);
num_unsol_na = 1;
}
@@ -4924,6 +4977,8 @@ static int bond_init(struct net_device *bond_dev)
list_add_tail(&bond->bond_list, &bn->dev_list);
bond_prepare_sysfs_group(bond);
+
+ __hw_addr_init(&bond->mc_list);
return 0;
}
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 257a7a4dfce..2aa33672059 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -202,7 +202,7 @@ struct bonding {
char proc_file_name[IFNAMSIZ];
#endif /* CONFIG_PROC_FS */
struct list_head bond_list;
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr_list mc_list;
int (*xmit_hash_policy)(struct sk_buff *, int);
__be32 master_ip;
u16 flags;
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
new file mode 100644
index 00000000000..0b28e010769
--- /dev/null
+++ b/drivers/net/caif/Kconfig
@@ -0,0 +1,17 @@
+#
+# CAIF physical drivers
+#
+
+if CAIF
+
+comment "CAIF transport drivers"
+
+config CAIF_TTY
+ tristate "CAIF TTY transport driver"
+ default n
+ ---help---
+ The CAIF TTY transport driver is a Line Discipline (ldisc)
+ identified as N_CAIF. When this ldisc is opened from user space
+ it will redirect the TTY's traffic into the CAIF stack.
+
+endif # CAIF
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
new file mode 100644
index 00000000000..52b6d1f826f
--- /dev/null
+++ b/drivers/net/caif/Makefile
@@ -0,0 +1,12 @@
+ifeq ($(CONFIG_CAIF_DEBUG),1)
+CAIF_DBG_FLAGS := -DDEBUG
+endif
+
+KBUILD_EXTRA_SYMBOLS=net/caif/Module.symvers
+
+ccflags-y := $(CAIF_FLAGS) $(CAIF_DBG_FLAGS)
+clean-dirs:= .tmp_versions
+clean-files:= Module.symvers modules.order *.cmd *~ \
+
+# Serial interface
+obj-$(CONFIG_CAIF_TTY) += caif_serial.o
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
new file mode 100644
index 00000000000..09257ca8f56
--- /dev/null
+++ b/drivers/net/caif/caif_serial.c
@@ -0,0 +1,449 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/init.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/tty.h>
+#include <linux/file.h>
+#include <linux/if_arp.h>
+#include <net/caif/caif_device.h>
+#include <net/caif/cfcnfg.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sjur Brendeland<sjur.brandeland@stericsson.com>");
+MODULE_DESCRIPTION("CAIF serial device TTY line discipline");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_LDISC(N_CAIF);
+
+#define SEND_QUEUE_LOW 10
+#define SEND_QUEUE_HIGH 100
+#define CAIF_SENDING 1 /* Bit 1 = 0x02*/
+#define CAIF_FLOW_OFF_SENT 4 /* Bit 4 = 0x10 */
+#define MAX_WRITE_CHUNK 4096
+#define ON 1
+#define OFF 0
+#define CAIF_MAX_MTU 4096
+
+/*This list is protected by the rtnl lock. */
+static LIST_HEAD(ser_list);
+
+static int ser_loop;
+module_param(ser_loop, bool, S_IRUGO);
+MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode.");
+
+static int ser_use_stx = 1;
+module_param(ser_use_stx, bool, S_IRUGO);
+MODULE_PARM_DESC(ser_use_stx, "STX enabled or not.");
+
+static int ser_use_fcs = 1;
+
+module_param(ser_use_fcs, bool, S_IRUGO);
+MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not.");
+
+static int ser_write_chunk = MAX_WRITE_CHUNK;
+module_param(ser_write_chunk, int, S_IRUGO);
+
+MODULE_PARM_DESC(ser_write_chunk, "Maximum size of data written to UART.");
+
+static struct dentry *debugfsdir;
+
+static int caif_net_open(struct net_device *dev);
+static int caif_net_close(struct net_device *dev);
+
+struct ser_device {
+ struct caif_dev_common common;
+ struct list_head node;
+ struct net_device *dev;
+ struct sk_buff_head head;
+ struct tty_struct *tty;
+ bool tx_started;
+ unsigned long state;
+ char *tty_name;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_tty_dir;
+ struct debugfs_blob_wrapper tx_blob;
+ struct debugfs_blob_wrapper rx_blob;
+ u8 rx_data[128];
+ u8 tx_data[128];
+ u8 tty_status;
+
+#endif
+};
+
+static void caifdev_setup(struct net_device *dev);
+static void ldisc_tx_wakeup(struct tty_struct *tty);
+#ifdef CONFIG_DEBUG_FS
+static inline void update_tty_status(struct ser_device *ser)
+{
+ ser->tty_status =
+ ser->tty->stopped << 5 |
+ ser->tty->hw_stopped << 4 |
+ ser->tty->flow_stopped << 3 |
+ ser->tty->packet << 2 |
+ ser->tty->low_latency << 1 |
+ ser->tty->warned;
+}
+static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
+{
+ ser->debugfs_tty_dir =
+ debugfs_create_dir(tty->name, debugfsdir);
+ if (!IS_ERR(ser->debugfs_tty_dir)) {
+ debugfs_create_blob("last_tx_msg", S_IRUSR,
+ ser->debugfs_tty_dir,
+ &ser->tx_blob);
+
+ debugfs_create_blob("last_rx_msg", S_IRUSR,
+ ser->debugfs_tty_dir,
+ &ser->rx_blob);
+
+ debugfs_create_x32("ser_state", S_IRUSR,
+ ser->debugfs_tty_dir,
+ (u32 *)&ser->state);
+
+ debugfs_create_x8("tty_status", S_IRUSR,
+ ser->debugfs_tty_dir,
+ &ser->tty_status);
+
+ }
+ ser->tx_blob.data = ser->tx_data;
+ ser->tx_blob.size = 0;
+ ser->rx_blob.data = ser->rx_data;
+ ser->rx_blob.size = 0;
+}
+
+static inline void debugfs_deinit(struct ser_device *ser)
+{
+ debugfs_remove_recursive(ser->debugfs_tty_dir);
+}
+
+static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
+{
+ if (size > sizeof(ser->rx_data))
+ size = sizeof(ser->rx_data);
+ memcpy(ser->rx_data, data, size);
+ ser->rx_blob.data = ser->rx_data;
+ ser->rx_blob.size = size;
+}
+
+static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
+{
+ if (size > sizeof(ser->tx_data))
+ size = sizeof(ser->tx_data);
+ memcpy(ser->tx_data, data, size);
+ ser->tx_blob.data = ser->tx_data;
+ ser->tx_blob.size = size;
+}
+#else
+static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
+{
+}
+
+static inline void debugfs_deinit(struct ser_device *ser)
+{
+}
+
+static inline void update_tty_status(struct ser_device *ser)
+{
+}
+
+static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
+{
+}
+
+static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
+{
+}
+
+#endif
+
+static void ldisc_receive(struct tty_struct *tty, const u8 *data,
+ char *flags, int count)
+{
+ struct sk_buff *skb = NULL;
+ struct ser_device *ser;
+ int ret;
+ u8 *p;
+ ser = tty->disc_data;
+
+ /*
+ * NOTE: flags may contain information about break or overrun.
+ * This is not yet handled.
+ */
+
+
+ /*
+ * Workaround for garbage at start of transmission,
+ * only enable if STX handling is not enabled.
+ */
+ if (!ser->common.use_stx && !ser->tx_started) {
+ dev_info(&ser->dev->dev,
+ "Bytes received before initial transmission -"
+ "bytes discarded.\n");
+ return;
+ }
+
+ BUG_ON(ser->dev == NULL);
+
+ /* Get a suitable caif packet and copy in data. */
+ skb = netdev_alloc_skb(ser->dev, count+1);
+ if (skb == NULL)
+ return;
+ p = skb_put(skb, count);
+ memcpy(p, data, count);
+
+ skb->protocol = htons(ETH_P_CAIF);
+ skb_reset_mac_header(skb);
+ skb->dev = ser->dev;
+ debugfs_rx(ser, data, count);
+ /* Push received packet up the stack. */
+ ret = netif_rx_ni(skb);
+ if (!ret) {
+ ser->dev->stats.rx_packets++;
+ ser->dev->stats.rx_bytes += count;
+ } else
+ ++ser->dev->stats.rx_dropped;
+ update_tty_status(ser);
+}
+
+static int handle_tx(struct ser_device *ser)
+{
+ struct tty_struct *tty;
+ struct sk_buff *skb;
+ int tty_wr, len, room;
+ tty = ser->tty;
+ ser->tx_started = true;
+
+ /* Enter critical section */
+ if (test_and_set_bit(CAIF_SENDING, &ser->state))
+ return 0;
+
+ /* skb_peek is safe because handle_tx is called after skb_queue_tail */
+ while ((skb = skb_peek(&ser->head)) != NULL) {
+
+ /* Make sure you don't write too much */
+ len = skb->len;
+ room = tty_write_room(tty);
+ if (!room)
+ break;
+ if (room > ser_write_chunk)
+ room = ser_write_chunk;
+ if (len > room)
+ len = room;
+
+ /* Write to tty or loopback */
+ if (!ser_loop) {
+ tty_wr = tty->ops->write(tty, skb->data, len);
+ update_tty_status(ser);
+ } else {
+ tty_wr = len;
+ ldisc_receive(tty, skb->data, NULL, len);
+ }
+ ser->dev->stats.tx_packets++;
+ ser->dev->stats.tx_bytes += tty_wr;
+
+ /* Error on TTY ?! */
+ if (tty_wr < 0)
+ goto error;
+ /* Reduce buffer written, and discard if empty */
+ skb_pull(skb, tty_wr);
+ if (skb->len == 0) {
+ struct sk_buff *tmp = skb_dequeue(&ser->head);
+ BUG_ON(tmp != skb);
+ if (in_interrupt())
+ dev_kfree_skb_irq(skb);
+ else
+ kfree_skb(skb);
+ }
+ }
+ /* Send flow off if queue is empty */
+ if (ser->head.qlen <= SEND_QUEUE_LOW &&
+ test_and_clear_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
+ ser->common.flowctrl != NULL)
+ ser->common.flowctrl(ser->dev, ON);
+ clear_bit(CAIF_SENDING, &ser->state);
+ return 0;
+error:
+ clear_bit(CAIF_SENDING, &ser->state);
+ return tty_wr;
+}
+
+static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ser_device *ser;
+ BUG_ON(dev == NULL);
+ ser = netdev_priv(dev);
+
+ /* Send flow off once, on high water mark */
+ if (ser->head.qlen > SEND_QUEUE_HIGH &&
+ !test_and_set_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
+ ser->common.flowctrl != NULL)
+
+ ser->common.flowctrl(ser->dev, OFF);
+
+ skb_queue_tail(&ser->head, skb);
+ return handle_tx(ser);
+}
+
+
+static void ldisc_tx_wakeup(struct tty_struct *tty)
+{
+ struct ser_device *ser;
+ ser = tty->disc_data;
+ BUG_ON(ser == NULL);
+ BUG_ON(ser->tty != tty);
+ handle_tx(ser);
+}
+
+
+static int ldisc_open(struct tty_struct *tty)
+{
+ struct ser_device *ser;
+ struct net_device *dev;
+ char name[64];
+ int result;
+
+ /* No write no play */
+ if (tty->ops->write == NULL)
+ return -EOPNOTSUPP;
+ if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_TTY_CONFIG))
+ return -EPERM;
+
+ sprintf(name, "cf%s", tty->name);
+ dev = alloc_netdev(sizeof(*ser), name, caifdev_setup);
+ ser = netdev_priv(dev);
+ ser->tty = tty_kref_get(tty);
+ ser->dev = dev;
+ debugfs_init(ser, tty);
+ tty->receive_room = N_TTY_BUF_SIZE;
+ tty->disc_data = ser;
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ rtnl_lock();
+ result = register_netdevice(dev);
+ if (result) {
+ rtnl_unlock();
+ free_netdev(dev);
+ return -ENODEV;
+ }
+
+ list_add(&ser->node, &ser_list);
+ rtnl_unlock();
+ netif_stop_queue(dev);
+ update_tty_status(ser);
+ return 0;
+}
+
+static void ldisc_close(struct tty_struct *tty)
+{
+ struct ser_device *ser = tty->disc_data;
+ /* Remove may be called inside or outside of rtnl_lock */
+ int islocked = rtnl_is_locked();
+ if (!islocked)
+ rtnl_lock();
+ /* device is freed automagically by net-sysfs */
+ dev_close(ser->dev);
+ unregister_netdevice(ser->dev);
+ list_del(&ser->node);
+ debugfs_deinit(ser);
+ tty_kref_put(ser->tty);
+ if (!islocked)
+ rtnl_unlock();
+}
+
+/* The line discipline structure. */
+static struct tty_ldisc_ops caif_ldisc = {
+ .owner = THIS_MODULE,
+ .magic = TTY_LDISC_MAGIC,
+ .name = "n_caif",
+ .open = ldisc_open,
+ .close = ldisc_close,
+ .receive_buf = ldisc_receive,
+ .write_wakeup = ldisc_tx_wakeup
+};
+
+static int register_ldisc(void)
+{
+ int result;
+ result = tty_register_ldisc(N_CAIF, &caif_ldisc);
+ if (result < 0) {
+ pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF,
+ result);
+ return result;
+ }
+ return result;
+}
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = caif_net_open,
+ .ndo_stop = caif_net_close,
+ .ndo_start_xmit = caif_xmit
+};
+
+static void caifdev_setup(struct net_device *dev)
+{
+ struct ser_device *serdev = netdev_priv(dev);
+ dev->features = 0;
+ dev->netdev_ops = &netdev_ops;
+ dev->type = ARPHRD_CAIF;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->mtu = CAIF_MAX_MTU;
+ dev->hard_header_len = CAIF_NEEDED_HEADROOM;
+ dev->tx_queue_len = 0;
+ dev->destructor = free_netdev;
+ skb_queue_head_init(&serdev->head);
+ serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
+ serdev->common.use_frag = true;
+ serdev->common.use_stx = ser_use_stx;
+ serdev->common.use_fcs = ser_use_fcs;
+ serdev->dev = dev;
+}
+
+
+static int caif_net_open(struct net_device *dev)
+{
+ struct ser_device *ser;
+ ser = netdev_priv(dev);
+ netif_wake_queue(dev);
+ return 0;
+}
+
+static int caif_net_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ return 0;
+}
+
+static int __init caif_ser_init(void)
+{
+ int ret;
+ ret = register_ldisc();
+ debugfsdir = debugfs_create_dir("caif_serial", NULL);
+ return ret;
+}
+
+static void __exit caif_ser_exit(void)
+{
+ struct ser_device *ser = NULL;
+ struct list_head *node;
+ struct list_head *_tmp;
+ list_for_each_safe(node, _tmp, &ser_list) {
+ ser = list_entry(node, struct ser_device, node);
+ dev_close(ser->dev);
+ unregister_netdevice(ser->dev);
+ list_del(node);
+ }
+ tty_unregister_ldisc(N_CAIF);
+ debugfs_remove_recursive(debugfsdir);
+}
+
+module_init(caif_ser_init);
+module_exit(caif_ser_exit);
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 05b751719bd..2c5227c02fa 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -63,6 +63,16 @@ config CAN_BFIN
To compile this driver as a module, choose M here: the
module will be called bfin_can.
+config CAN_JANZ_ICAN3
+ tristate "Janz VMOD-ICAN3 Intelligent CAN controller"
+ depends on CAN_DEV && MFD_JANZ_CMODIO
+ ---help---
+ Driver for Janz VMOD-ICAN3 Intelligent CAN controller module, which
+ connects to a MODULbus carrier board.
+
+ This driver can also be built as a module. If so, the module will be
+ called janz-ican3.ko.
+
source "drivers/net/can/mscan/Kconfig"
source "drivers/net/can/sja1000/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 7a702f28d01..9047cd066fe 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -15,5 +15,6 @@ obj-$(CONFIG_CAN_AT91) += at91_can.o
obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
obj-$(CONFIG_CAN_BFIN) += bfin_can.o
+obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index a2f29a38798..2d8bd86bc5e 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -35,7 +35,6 @@
#include <linux/string.h>
#include <linux/types.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
@@ -376,7 +375,6 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
at91_write(priv, AT91_MCR(mb), reg_mcr);
stats->tx_bytes += cf->can_dlc;
- dev->trans_start = jiffies;
/* _NOTE_: substract AT91_MB_TX_FIRST offset from mb! */
can_put_echo_skb(skb, dev, mb - AT91_MB_TX_FIRST);
@@ -662,7 +660,6 @@ static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
at91_poll_err_frame(dev, cf, reg_sr);
netif_receive_skb(skb);
- dev->last_rx = jiffies;
dev->stats.rx_packets++;
dev->stats.rx_bytes += cf->can_dlc;
@@ -899,7 +896,6 @@ static void at91_irq_err(struct net_device *dev)
at91_irq_err_state(dev, cf, new_state);
netif_rx(skb);
- dev->last_rx = jiffies;
dev->stats.rx_packets++;
dev->stats.rx_bytes += cf->can_dlc;
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 03489864376..b6e890d2836 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -18,7 +18,6 @@
#include <linux/skbuff.h>
#include <linux/platform_device.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
@@ -270,8 +269,6 @@ static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* fill data length code */
bfin_write16(&reg->chl[TRANSMIT_CHL].dlc, dlc);
- dev->trans_start = jiffies;
-
can_put_echo_skb(skb, dev, 0);
/* set transmit request */
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
new file mode 100644
index 00000000000..6e533dcc36c
--- /dev/null
+++ b/drivers/net/can/janz-ican3.c
@@ -0,0 +1,1830 @@
+/*
+ * Janz MODULbus VMOD-ICAN3 CAN Interface Driver
+ *
+ * Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+
+#include <linux/netdevice.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#include <linux/mfd/janz.h>
+
+/* the DPM has 64k of memory, organized into 256x 256 byte pages */
+#define DPM_NUM_PAGES 256
+#define DPM_PAGE_SIZE 256
+#define DPM_PAGE_ADDR(p) ((p) * DPM_PAGE_SIZE)
+
+/* JANZ ICAN3 "old-style" host interface queue page numbers */
+#define QUEUE_OLD_CONTROL 0
+#define QUEUE_OLD_RB0 1
+#define QUEUE_OLD_RB1 2
+#define QUEUE_OLD_WB0 3
+#define QUEUE_OLD_WB1 4
+
+/* Janz ICAN3 "old-style" host interface control registers */
+#define MSYNC_PEER 0x00 /* ICAN only */
+#define MSYNC_LOCL 0x01 /* host only */
+#define TARGET_RUNNING 0x02
+
+#define MSYNC_RB0 0x01
+#define MSYNC_RB1 0x02
+#define MSYNC_RBLW 0x04
+#define MSYNC_RB_MASK (MSYNC_RB0 | MSYNC_RB1)
+
+#define MSYNC_WB0 0x10
+#define MSYNC_WB1 0x20
+#define MSYNC_WBLW 0x40
+#define MSYNC_WB_MASK (MSYNC_WB0 | MSYNC_WB1)
+
+/* Janz ICAN3 "new-style" host interface queue page numbers */
+#define QUEUE_TOHOST 5
+#define QUEUE_FROMHOST_MID 6
+#define QUEUE_FROMHOST_HIGH 7
+#define QUEUE_FROMHOST_LOW 8
+
+/* The first free page in the DPM is #9 */
+#define DPM_FREE_START 9
+
+/* Janz ICAN3 "new-style" and "fast" host interface descriptor flags */
+#define DESC_VALID 0x80
+#define DESC_WRAP 0x40
+#define DESC_INTERRUPT 0x20
+#define DESC_IVALID 0x10
+#define DESC_LEN(len) (len)
+
+/* Janz ICAN3 Firmware Messages */
+#define MSG_CONNECTI 0x02
+#define MSG_DISCONNECT 0x03
+#define MSG_IDVERS 0x04
+#define MSG_MSGLOST 0x05
+#define MSG_NEWHOSTIF 0x08
+#define MSG_INQUIRY 0x0a
+#define MSG_SETAFILMASK 0x10
+#define MSG_INITFDPMQUEUE 0x11
+#define MSG_HWCONF 0x12
+#define MSG_FMSGLOST 0x15
+#define MSG_CEVTIND 0x37
+#define MSG_CBTRREQ 0x41
+#define MSG_COFFREQ 0x42
+#define MSG_CONREQ 0x43
+#define MSG_CCONFREQ 0x47
+
+/*
+ * Janz ICAN3 CAN Inquiry Message Types
+ *
+ * NOTE: there appears to be a firmware bug here. You must send
+ * NOTE: INQUIRY_STATUS and expect to receive an INQUIRY_EXTENDED
+ * NOTE: response. The controller never responds to a message with
+ * NOTE: the INQUIRY_EXTENDED subspec :(
+ */
+#define INQUIRY_STATUS 0x00
+#define INQUIRY_TERMINATION 0x01
+#define INQUIRY_EXTENDED 0x04
+
+/* Janz ICAN3 CAN Set Acceptance Filter Mask Message Types */
+#define SETAFILMASK_REJECT 0x00
+#define SETAFILMASK_FASTIF 0x02
+
+/* Janz ICAN3 CAN Hardware Configuration Message Types */
+#define HWCONF_TERMINATE_ON 0x01
+#define HWCONF_TERMINATE_OFF 0x00
+
+/* Janz ICAN3 CAN Event Indication Message Types */
+#define CEVTIND_EI 0x01
+#define CEVTIND_DOI 0x02
+#define CEVTIND_LOST 0x04
+#define CEVTIND_FULL 0x08
+#define CEVTIND_BEI 0x10
+
+#define CEVTIND_CHIP_SJA1000 0x02
+
+#define ICAN3_BUSERR_QUOTA_MAX 255
+
+/* Janz ICAN3 CAN Frame Conversion */
+#define ICAN3_ECHO 0x10
+#define ICAN3_EFF_RTR 0x40
+#define ICAN3_SFF_RTR 0x10
+#define ICAN3_EFF 0x80
+
+#define ICAN3_CAN_TYPE_MASK 0x0f
+#define ICAN3_CAN_TYPE_SFF 0x00
+#define ICAN3_CAN_TYPE_EFF 0x01
+
+#define ICAN3_CAN_DLC_MASK 0x0f
+
+/*
+ * SJA1000 Status and Error Register Definitions
+ *
+ * Copied from drivers/net/can/sja1000/sja1000.h
+ */
+
+/* status register content */
+#define SR_BS 0x80
+#define SR_ES 0x40
+#define SR_TS 0x20
+#define SR_RS 0x10
+#define SR_TCS 0x08
+#define SR_TBS 0x04
+#define SR_DOS 0x02
+#define SR_RBS 0x01
+
+#define SR_CRIT (SR_BS|SR_ES)
+
+/* ECC register */
+#define ECC_SEG 0x1F
+#define ECC_DIR 0x20
+#define ECC_ERR 6
+#define ECC_BIT 0x00
+#define ECC_FORM 0x40
+#define ECC_STUFF 0x80
+#define ECC_MASK 0xc0
+
+/* Number of buffers for use in the "new-style" host interface */
+#define ICAN3_NEW_BUFFERS 16
+
+/* Number of buffers for use in the "fast" host interface */
+#define ICAN3_TX_BUFFERS 512
+#define ICAN3_RX_BUFFERS 1024
+
+/* SJA1000 Clock Input */
+#define ICAN3_CAN_CLOCK 8000000
+
+/* Driver Name */
+#define DRV_NAME "janz-ican3"
+
+/* DPM Control Registers -- starts at offset 0x100 in the MODULbus registers */
+struct ican3_dpm_control {
+ /* window address register */
+ u8 window_address;
+ u8 unused1;
+
+ /*
+ * Read access: clear interrupt from microcontroller
+ * Write access: send interrupt to microcontroller
+ */
+ u8 interrupt;
+ u8 unused2;
+
+ /* write-only: reset all hardware on the module */
+ u8 hwreset;
+ u8 unused3;
+
+ /* write-only: generate an interrupt to the TPU */
+ u8 tpuinterrupt;
+};
+
+struct ican3_dev {
+
+ /* must be the first member */
+ struct can_priv can;
+
+ /* CAN network device */
+ struct net_device *ndev;
+ struct napi_struct napi;
+
+ /* Device for printing */
+ struct device *dev;
+
+ /* module number */
+ unsigned int num;
+
+ /* base address of registers and IRQ */
+ struct janz_cmodio_onboard_regs __iomem *ctrl;
+ struct ican3_dpm_control __iomem *dpmctrl;
+ void __iomem *dpm;
+ int irq;
+
+ /* CAN bus termination status */
+ struct completion termination_comp;
+ bool termination_enabled;
+
+ /* CAN bus error status registers */
+ struct completion buserror_comp;
+ struct can_berr_counter bec;
+
+ /* old and new style host interface */
+ unsigned int iftype;
+
+ /*
+ * Any function which changes the current DPM page must hold this
+ * lock while it is performing data accesses. This ensures that the
+ * function will not be preempted and end up reading data from a
+ * different DPM page than it expects.
+ */
+ spinlock_t lock;
+
+ /* new host interface */
+ unsigned int rx_int;
+ unsigned int rx_num;
+ unsigned int tx_num;
+
+ /* fast host interface */
+ unsigned int fastrx_start;
+ unsigned int fastrx_int;
+ unsigned int fastrx_num;
+ unsigned int fasttx_start;
+ unsigned int fasttx_num;
+
+ /* first free DPM page */
+ unsigned int free_page;
+};
+
+struct ican3_msg {
+ u8 control;
+ u8 spec;
+ __le16 len;
+ u8 data[252];
+};
+
+struct ican3_new_desc {
+ u8 control;
+ u8 pointer;
+};
+
+struct ican3_fast_desc {
+ u8 control;
+ u8 command;
+ u8 data[14];
+};
+
+/* write to the window basic address register */
+static inline void ican3_set_page(struct ican3_dev *mod, unsigned int page)
+{
+ BUG_ON(page >= DPM_NUM_PAGES);
+ iowrite8(page, &mod->dpmctrl->window_address);
+}
+
+/*
+ * ICAN3 "old-style" host interface
+ */
+
+/*
+ * Recieve a message from the ICAN3 "old-style" firmware interface
+ *
+ * LOCKING: must hold mod->lock
+ *
+ * returns 0 on success, -ENOMEM when no message exists
+ */
+static int ican3_old_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ unsigned int mbox, mbox_page;
+ u8 locl, peer, xord;
+
+ /* get the MSYNC registers */
+ ican3_set_page(mod, QUEUE_OLD_CONTROL);
+ peer = ioread8(mod->dpm + MSYNC_PEER);
+ locl = ioread8(mod->dpm + MSYNC_LOCL);
+ xord = locl ^ peer;
+
+ if ((xord & MSYNC_RB_MASK) == 0x00) {
+ dev_dbg(mod->dev, "no mbox for reading\n");
+ return -ENOMEM;
+ }
+
+ /* find the first free mbox to read */
+ if ((xord & MSYNC_RB_MASK) == MSYNC_RB_MASK)
+ mbox = (xord & MSYNC_RBLW) ? MSYNC_RB0 : MSYNC_RB1;
+ else
+ mbox = (xord & MSYNC_RB0) ? MSYNC_RB0 : MSYNC_RB1;
+
+ /* copy the message */
+ mbox_page = (mbox == MSYNC_RB0) ? QUEUE_OLD_RB0 : QUEUE_OLD_RB1;
+ ican3_set_page(mod, mbox_page);
+ memcpy_fromio(msg, mod->dpm, sizeof(*msg));
+
+ /*
+ * notify the firmware that the read buffer is available
+ * for it to fill again
+ */
+ locl ^= mbox;
+
+ ican3_set_page(mod, QUEUE_OLD_CONTROL);
+ iowrite8(locl, mod->dpm + MSYNC_LOCL);
+ return 0;
+}
+
+/*
+ * Send a message through the "old-style" firmware interface
+ *
+ * LOCKING: must hold mod->lock
+ *
+ * returns 0 on success, -ENOMEM when no free space exists
+ */
+static int ican3_old_send_msg(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ unsigned int mbox, mbox_page;
+ u8 locl, peer, xord;
+
+ /* get the MSYNC registers */
+ ican3_set_page(mod, QUEUE_OLD_CONTROL);
+ peer = ioread8(mod->dpm + MSYNC_PEER);
+ locl = ioread8(mod->dpm + MSYNC_LOCL);
+ xord = locl ^ peer;
+
+ if ((xord & MSYNC_WB_MASK) == MSYNC_WB_MASK) {
+ dev_err(mod->dev, "no mbox for writing\n");
+ return -ENOMEM;
+ }
+
+ /* calculate a free mbox to use */
+ mbox = (xord & MSYNC_WB0) ? MSYNC_WB1 : MSYNC_WB0;
+
+ /* copy the message to the DPM */
+ mbox_page = (mbox == MSYNC_WB0) ? QUEUE_OLD_WB0 : QUEUE_OLD_WB1;
+ ican3_set_page(mod, mbox_page);
+ memcpy_toio(mod->dpm, msg, sizeof(*msg));
+
+ locl ^= mbox;
+ if (mbox == MSYNC_WB1)
+ locl |= MSYNC_WBLW;
+
+ ican3_set_page(mod, QUEUE_OLD_CONTROL);
+ iowrite8(locl, mod->dpm + MSYNC_LOCL);
+ return 0;
+}
+
+/*
+ * ICAN3 "new-style" Host Interface Setup
+ */
+
+static void __devinit ican3_init_new_host_interface(struct ican3_dev *mod)
+{
+ struct ican3_new_desc desc;
+ unsigned long flags;
+ void __iomem *dst;
+ int i;
+
+ spin_lock_irqsave(&mod->lock, flags);
+
+ /* setup the internal datastructures for RX */
+ mod->rx_num = 0;
+ mod->rx_int = 0;
+
+ /* tohost queue descriptors are in page 5 */
+ ican3_set_page(mod, QUEUE_TOHOST);
+ dst = mod->dpm;
+
+ /* initialize the tohost (rx) queue descriptors: pages 9-24 */
+ for (i = 0; i < ICAN3_NEW_BUFFERS; i++) {
+ desc.control = DESC_INTERRUPT | DESC_LEN(1); /* I L=1 */
+ desc.pointer = mod->free_page;
+
+ /* set wrap flag on last buffer */
+ if (i == ICAN3_NEW_BUFFERS - 1)
+ desc.control |= DESC_WRAP;
+
+ memcpy_toio(dst, &desc, sizeof(desc));
+ dst += sizeof(desc);
+ mod->free_page++;
+ }
+
+ /* fromhost (tx) mid queue descriptors are in page 6 */
+ ican3_set_page(mod, QUEUE_FROMHOST_MID);
+ dst = mod->dpm;
+
+ /* setup the internal datastructures for TX */
+ mod->tx_num = 0;
+
+ /* initialize the fromhost mid queue descriptors: pages 25-40 */
+ for (i = 0; i < ICAN3_NEW_BUFFERS; i++) {
+ desc.control = DESC_VALID | DESC_LEN(1); /* V L=1 */
+ desc.pointer = mod->free_page;
+
+ /* set wrap flag on last buffer */
+ if (i == ICAN3_NEW_BUFFERS - 1)
+ desc.control |= DESC_WRAP;
+
+ memcpy_toio(dst, &desc, sizeof(desc));
+ dst += sizeof(desc);
+ mod->free_page++;
+ }
+
+ /* fromhost hi queue descriptors are in page 7 */
+ ican3_set_page(mod, QUEUE_FROMHOST_HIGH);
+ dst = mod->dpm;
+
+ /* initialize only a single buffer in the fromhost hi queue (unused) */
+ desc.control = DESC_VALID | DESC_WRAP | DESC_LEN(1); /* VW L=1 */
+ desc.pointer = mod->free_page;
+ memcpy_toio(dst, &desc, sizeof(desc));
+ mod->free_page++;
+
+ /* fromhost low queue descriptors are in page 8 */
+ ican3_set_page(mod, QUEUE_FROMHOST_LOW);
+ dst = mod->dpm;
+
+ /* initialize only a single buffer in the fromhost low queue (unused) */
+ desc.control = DESC_VALID | DESC_WRAP | DESC_LEN(1); /* VW L=1 */
+ desc.pointer = mod->free_page;
+ memcpy_toio(dst, &desc, sizeof(desc));
+ mod->free_page++;
+
+ spin_unlock_irqrestore(&mod->lock, flags);
+}
+
+/*
+ * ICAN3 Fast Host Interface Setup
+ */
+
+static void __devinit ican3_init_fast_host_interface(struct ican3_dev *mod)
+{
+ struct ican3_fast_desc desc;
+ unsigned long flags;
+ unsigned int addr;
+ void __iomem *dst;
+ int i;
+
+ spin_lock_irqsave(&mod->lock, flags);
+
+ /* save the start recv page */
+ mod->fastrx_start = mod->free_page;
+ mod->fastrx_num = 0;
+ mod->fastrx_int = 0;
+
+ /* build a single fast tohost queue descriptor */
+ memset(&desc, 0, sizeof(desc));
+ desc.control = 0x00;
+ desc.command = 1;
+
+ /* build the tohost queue descriptor ring in memory */
+ addr = 0;
+ for (i = 0; i < ICAN3_RX_BUFFERS; i++) {
+
+ /* set the wrap bit on the last buffer */
+ if (i == ICAN3_RX_BUFFERS - 1)
+ desc.control |= DESC_WRAP;
+
+ /* switch to the correct page */
+ ican3_set_page(mod, mod->free_page);
+
+ /* copy the descriptor to the DPM */
+ dst = mod->dpm + addr;
+ memcpy_toio(dst, &desc, sizeof(desc));
+ addr += sizeof(desc);
+
+ /* move to the next page if necessary */
+ if (addr >= DPM_PAGE_SIZE) {
+ addr = 0;
+ mod->free_page++;
+ }
+ }
+
+ /* make sure we page-align the next queue */
+ if (addr != 0)
+ mod->free_page++;
+
+ /* save the start xmit page */
+ mod->fasttx_start = mod->free_page;
+ mod->fasttx_num = 0;
+
+ /* build a single fast fromhost queue descriptor */
+ memset(&desc, 0, sizeof(desc));
+ desc.control = DESC_VALID;
+ desc.command = 1;
+
+ /* build the fromhost queue descriptor ring in memory */
+ addr = 0;
+ for (i = 0; i < ICAN3_TX_BUFFERS; i++) {
+
+ /* set the wrap bit on the last buffer */
+ if (i == ICAN3_TX_BUFFERS - 1)
+ desc.control |= DESC_WRAP;
+
+ /* switch to the correct page */
+ ican3_set_page(mod, mod->free_page);
+
+ /* copy the descriptor to the DPM */
+ dst = mod->dpm + addr;
+ memcpy_toio(dst, &desc, sizeof(desc));
+ addr += sizeof(desc);
+
+ /* move to the next page if necessary */
+ if (addr >= DPM_PAGE_SIZE) {
+ addr = 0;
+ mod->free_page++;
+ }
+ }
+
+ spin_unlock_irqrestore(&mod->lock, flags);
+}
+
+/*
+ * ICAN3 "new-style" Host Interface Message Helpers
+ */
+
+/*
+ * LOCKING: must hold mod->lock
+ */
+static int ican3_new_send_msg(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ struct ican3_new_desc desc;
+ void __iomem *desc_addr = mod->dpm + (mod->tx_num * sizeof(desc));
+
+ /* switch to the fromhost mid queue, and read the buffer descriptor */
+ ican3_set_page(mod, QUEUE_FROMHOST_MID);
+ memcpy_fromio(&desc, desc_addr, sizeof(desc));
+
+ if (!(desc.control & DESC_VALID)) {
+ dev_dbg(mod->dev, "%s: no free buffers\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* switch to the data page, copy the data */
+ ican3_set_page(mod, desc.pointer);
+ memcpy_toio(mod->dpm, msg, sizeof(*msg));
+
+ /* switch back to the descriptor, set the valid bit, write it back */
+ ican3_set_page(mod, QUEUE_FROMHOST_MID);
+ desc.control ^= DESC_VALID;
+ memcpy_toio(desc_addr, &desc, sizeof(desc));
+
+ /* update the tx number */
+ mod->tx_num = (desc.control & DESC_WRAP) ? 0 : (mod->tx_num + 1);
+ return 0;
+}
+
+/*
+ * LOCKING: must hold mod->lock
+ */
+static int ican3_new_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ struct ican3_new_desc desc;
+ void __iomem *desc_addr = mod->dpm + (mod->rx_num * sizeof(desc));
+
+ /* switch to the tohost queue, and read the buffer descriptor */
+ ican3_set_page(mod, QUEUE_TOHOST);
+ memcpy_fromio(&desc, desc_addr, sizeof(desc));
+
+ if (!(desc.control & DESC_VALID)) {
+ dev_dbg(mod->dev, "%s: no buffers to recv\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* switch to the data page, copy the data */
+ ican3_set_page(mod, desc.pointer);
+ memcpy_fromio(msg, mod->dpm, sizeof(*msg));
+
+ /* switch back to the descriptor, toggle the valid bit, write it back */
+ ican3_set_page(mod, QUEUE_TOHOST);
+ desc.control ^= DESC_VALID;
+ memcpy_toio(desc_addr, &desc, sizeof(desc));
+
+ /* update the rx number */
+ mod->rx_num = (desc.control & DESC_WRAP) ? 0 : (mod->rx_num + 1);
+ return 0;
+}
+
+/*
+ * Message Send / Recv Helpers
+ */
+
+static int ican3_send_msg(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&mod->lock, flags);
+
+ if (mod->iftype == 0)
+ ret = ican3_old_send_msg(mod, msg);
+ else
+ ret = ican3_new_send_msg(mod, msg);
+
+ spin_unlock_irqrestore(&mod->lock, flags);
+ return ret;
+}
+
+static int ican3_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&mod->lock, flags);
+
+ if (mod->iftype == 0)
+ ret = ican3_old_recv_msg(mod, msg);
+ else
+ ret = ican3_new_recv_msg(mod, msg);
+
+ spin_unlock_irqrestore(&mod->lock, flags);
+ return ret;
+}
+
+/*
+ * Quick Pre-constructed Messages
+ */
+
+static int __devinit ican3_msg_connect(struct ican3_dev *mod)
+{
+ struct ican3_msg msg;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_CONNECTI;
+ msg.len = cpu_to_le16(0);
+
+ return ican3_send_msg(mod, &msg);
+}
+
+static int __devexit ican3_msg_disconnect(struct ican3_dev *mod)
+{
+ struct ican3_msg msg;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_DISCONNECT;
+ msg.len = cpu_to_le16(0);
+
+ return ican3_send_msg(mod, &msg);
+}
+
+static int __devinit ican3_msg_newhostif(struct ican3_dev *mod)
+{
+ struct ican3_msg msg;
+ int ret;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_NEWHOSTIF;
+ msg.len = cpu_to_le16(0);
+
+ /* If we're not using the old interface, switching seems bogus */
+ WARN_ON(mod->iftype != 0);
+
+ ret = ican3_send_msg(mod, &msg);
+ if (ret)
+ return ret;
+
+ /* mark the module as using the new host interface */
+ mod->iftype = 1;
+ return 0;
+}
+
+static int __devinit ican3_msg_fasthostif(struct ican3_dev *mod)
+{
+ struct ican3_msg msg;
+ unsigned int addr;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_INITFDPMQUEUE;
+ msg.len = cpu_to_le16(8);
+
+ /* write the tohost queue start address */
+ addr = DPM_PAGE_ADDR(mod->fastrx_start);
+ msg.data[0] = addr & 0xff;
+ msg.data[1] = (addr >> 8) & 0xff;
+ msg.data[2] = (addr >> 16) & 0xff;
+ msg.data[3] = (addr >> 24) & 0xff;
+
+ /* write the fromhost queue start address */
+ addr = DPM_PAGE_ADDR(mod->fasttx_start);
+ msg.data[4] = addr & 0xff;
+ msg.data[5] = (addr >> 8) & 0xff;
+ msg.data[6] = (addr >> 16) & 0xff;
+ msg.data[7] = (addr >> 24) & 0xff;
+
+ /* If we're not using the new interface yet, we cannot do this */
+ WARN_ON(mod->iftype != 1);
+
+ return ican3_send_msg(mod, &msg);
+}
+
+/*
+ * Setup the CAN filter to either accept or reject all
+ * messages from the CAN bus.
+ */
+static int __devinit ican3_set_id_filter(struct ican3_dev *mod, bool accept)
+{
+ struct ican3_msg msg;
+ int ret;
+
+ /* Standard Frame Format */
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_SETAFILMASK;
+ msg.len = cpu_to_le16(5);
+ msg.data[0] = 0x00; /* IDLo LSB */
+ msg.data[1] = 0x00; /* IDLo MSB */
+ msg.data[2] = 0xff; /* IDHi LSB */
+ msg.data[3] = 0x07; /* IDHi MSB */
+
+ /* accept all frames for fast host if, or reject all frames */
+ msg.data[4] = accept ? SETAFILMASK_FASTIF : SETAFILMASK_REJECT;
+
+ ret = ican3_send_msg(mod, &msg);
+ if (ret)
+ return ret;
+
+ /* Extended Frame Format */
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_SETAFILMASK;
+ msg.len = cpu_to_le16(13);
+ msg.data[0] = 0; /* MUX = 0 */
+ msg.data[1] = 0x00; /* IDLo LSB */
+ msg.data[2] = 0x00;
+ msg.data[3] = 0x00;
+ msg.data[4] = 0x20; /* IDLo MSB */
+ msg.data[5] = 0xff; /* IDHi LSB */
+ msg.data[6] = 0xff;
+ msg.data[7] = 0xff;
+ msg.data[8] = 0x3f; /* IDHi MSB */
+
+ /* accept all frames for fast host if, or reject all frames */
+ msg.data[9] = accept ? SETAFILMASK_FASTIF : SETAFILMASK_REJECT;
+
+ return ican3_send_msg(mod, &msg);
+}
+
+/*
+ * Bring the CAN bus online or offline
+ */
+static int ican3_set_bus_state(struct ican3_dev *mod, bool on)
+{
+ struct ican3_msg msg;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = on ? MSG_CONREQ : MSG_COFFREQ;
+ msg.len = cpu_to_le16(0);
+
+ return ican3_send_msg(mod, &msg);
+}
+
+static int ican3_set_termination(struct ican3_dev *mod, bool on)
+{
+ struct ican3_msg msg;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_HWCONF;
+ msg.len = cpu_to_le16(2);
+ msg.data[0] = 0x00;
+ msg.data[1] = on ? HWCONF_TERMINATE_ON : HWCONF_TERMINATE_OFF;
+
+ return ican3_send_msg(mod, &msg);
+}
+
+static int ican3_send_inquiry(struct ican3_dev *mod, u8 subspec)
+{
+ struct ican3_msg msg;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_INQUIRY;
+ msg.len = cpu_to_le16(2);
+ msg.data[0] = subspec;
+ msg.data[1] = 0x00;
+
+ return ican3_send_msg(mod, &msg);
+}
+
+static int ican3_set_buserror(struct ican3_dev *mod, u8 quota)
+{
+ struct ican3_msg msg;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_CCONFREQ;
+ msg.len = cpu_to_le16(2);
+ msg.data[0] = 0x00;
+ msg.data[1] = quota;
+
+ return ican3_send_msg(mod, &msg);
+}
+
+/*
+ * ICAN3 to Linux CAN Frame Conversion
+ */
+
+static void ican3_to_can_frame(struct ican3_dev *mod,
+ struct ican3_fast_desc *desc,
+ struct can_frame *cf)
+{
+ if ((desc->command & ICAN3_CAN_TYPE_MASK) == ICAN3_CAN_TYPE_SFF) {
+ if (desc->data[1] & ICAN3_SFF_RTR)
+ cf->can_id |= CAN_RTR_FLAG;
+
+ cf->can_id |= desc->data[0] << 3;
+ cf->can_id |= (desc->data[1] & 0xe0) >> 5;
+ cf->can_dlc = desc->data[1] & ICAN3_CAN_DLC_MASK;
+ memcpy(cf->data, &desc->data[2], sizeof(cf->data));
+ } else {
+ cf->can_dlc = desc->data[0] & ICAN3_CAN_DLC_MASK;
+ if (desc->data[0] & ICAN3_EFF_RTR)
+ cf->can_id |= CAN_RTR_FLAG;
+
+ if (desc->data[0] & ICAN3_EFF) {
+ cf->can_id |= CAN_EFF_FLAG;
+ cf->can_id |= desc->data[2] << 21; /* 28-21 */
+ cf->can_id |= desc->data[3] << 13; /* 20-13 */
+ cf->can_id |= desc->data[4] << 5; /* 12-5 */
+ cf->can_id |= (desc->data[5] & 0xf8) >> 3;
+ } else {
+ cf->can_id |= desc->data[2] << 3; /* 10-3 */
+ cf->can_id |= desc->data[3] >> 5; /* 2-0 */
+ }
+
+ memcpy(cf->data, &desc->data[6], sizeof(cf->data));
+ }
+}
+
+static void can_frame_to_ican3(struct ican3_dev *mod,
+ struct can_frame *cf,
+ struct ican3_fast_desc *desc)
+{
+ /* clear out any stale data in the descriptor */
+ memset(desc->data, 0, sizeof(desc->data));
+
+ /* we always use the extended format, with the ECHO flag set */
+ desc->command = ICAN3_CAN_TYPE_EFF;
+ desc->data[0] |= cf->can_dlc;
+ desc->data[1] |= ICAN3_ECHO;
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ desc->data[0] |= ICAN3_EFF_RTR;
+
+ /* pack the id into the correct places */
+ if (cf->can_id & CAN_EFF_FLAG) {
+ desc->data[0] |= ICAN3_EFF;
+ desc->data[2] = (cf->can_id & 0x1fe00000) >> 21; /* 28-21 */
+ desc->data[3] = (cf->can_id & 0x001fe000) >> 13; /* 20-13 */
+ desc->data[4] = (cf->can_id & 0x00001fe0) >> 5; /* 12-5 */
+ desc->data[5] = (cf->can_id & 0x0000001f) << 3; /* 4-0 */
+ } else {
+ desc->data[2] = (cf->can_id & 0x7F8) >> 3; /* bits 10-3 */
+ desc->data[3] = (cf->can_id & 0x007) << 5; /* bits 2-0 */
+ }
+
+ /* copy the data bits into the descriptor */
+ memcpy(&desc->data[6], cf->data, sizeof(cf->data));
+}
+
+/*
+ * Interrupt Handling
+ */
+
+/*
+ * Handle an ID + Version message response from the firmware. We never generate
+ * this message in production code, but it is very useful when debugging to be
+ * able to display this message.
+ */
+static void ican3_handle_idvers(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ dev_dbg(mod->dev, "IDVERS response: %s\n", msg->data);
+}
+
+static void ican3_handle_msglost(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ struct net_device *dev = mod->ndev;
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ /*
+ * Report that communication messages with the microcontroller firmware
+ * are being lost. These are never CAN frames, so we do not generate an
+ * error frame for userspace
+ */
+ if (msg->spec == MSG_MSGLOST) {
+ dev_err(mod->dev, "lost %d control messages\n", msg->data[0]);
+ return;
+ }
+
+ /*
+ * Oops, this indicates that we have lost messages in the fast queue,
+ * which are exclusively CAN messages. Our driver isn't reading CAN
+ * frames fast enough.
+ *
+ * We'll pretend that the SJA1000 told us that it ran out of buffer
+ * space, because there is not a better message for this.
+ */
+ skb = alloc_can_err_skb(dev, &cf);
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ stats->rx_errors++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ }
+}
+
+/*
+ * Handle CAN Event Indication Messages from the firmware
+ *
+ * The ICAN3 firmware provides the values of some SJA1000 registers when it
+ * generates this message. The code below is largely copied from the
+ * drivers/net/can/sja1000/sja1000.c file, and adapted as necessary
+ */
+static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ struct net_device *dev = mod->ndev;
+ struct net_device_stats *stats = &dev->stats;
+ enum can_state state = mod->can.state;
+ u8 status, isrc, rxerr, txerr;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ /* we can only handle the SJA1000 part */
+ if (msg->data[1] != CEVTIND_CHIP_SJA1000) {
+ dev_err(mod->dev, "unable to handle errors on non-SJA1000\n");
+ return -ENODEV;
+ }
+
+ /* check the message length for sanity */
+ if (le16_to_cpu(msg->len) < 6) {
+ dev_err(mod->dev, "error message too short\n");
+ return -EINVAL;
+ }
+
+ skb = alloc_can_err_skb(dev, &cf);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ isrc = msg->data[0];
+ status = msg->data[3];
+ rxerr = msg->data[4];
+ txerr = msg->data[5];
+
+ /* data overrun interrupt */
+ if (isrc == CEVTIND_DOI || isrc == CEVTIND_LOST) {
+ dev_dbg(mod->dev, "data overrun interrupt\n");
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+ }
+
+ /* error warning + passive interrupt */
+ if (isrc == CEVTIND_EI) {
+ dev_dbg(mod->dev, "error warning + passive interrupt\n");
+ if (status & SR_BS) {
+ state = CAN_STATE_BUS_OFF;
+ cf->can_id |= CAN_ERR_BUSOFF;
+ can_bus_off(dev);
+ } else if (status & SR_ES) {
+ if (rxerr >= 128 || txerr >= 128)
+ state = CAN_STATE_ERROR_PASSIVE;
+ else
+ state = CAN_STATE_ERROR_WARNING;
+ } else {
+ state = CAN_STATE_ERROR_ACTIVE;
+ }
+ }
+
+ /* bus error interrupt */
+ if (isrc == CEVTIND_BEI) {
+ u8 ecc = msg->data[2];
+
+ dev_dbg(mod->dev, "bus error interrupt\n");
+ mod->can.can_stats.bus_error++;
+ stats->rx_errors++;
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+ switch (ecc & ECC_MASK) {
+ case ECC_BIT:
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ break;
+ case ECC_FORM:
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case ECC_STUFF:
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ default:
+ cf->data[2] |= CAN_ERR_PROT_UNSPEC;
+ cf->data[3] = ecc & ECC_SEG;
+ break;
+ }
+
+ if ((ecc & ECC_DIR) == 0)
+ cf->data[2] |= CAN_ERR_PROT_TX;
+
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
+
+ if (state != mod->can.state && (state == CAN_STATE_ERROR_WARNING ||
+ state == CAN_STATE_ERROR_PASSIVE)) {
+ cf->can_id |= CAN_ERR_CRTL;
+ if (state == CAN_STATE_ERROR_WARNING) {
+ mod->can.can_stats.error_warning++;
+ cf->data[1] = (txerr > rxerr) ?
+ CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ } else {
+ mod->can.can_stats.error_passive++;
+ cf->data[1] = (txerr > rxerr) ?
+ CAN_ERR_CRTL_TX_PASSIVE :
+ CAN_ERR_CRTL_RX_PASSIVE;
+ }
+
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
+
+ mod->can.state = state;
+ stats->rx_errors++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ return 0;
+}
+
+static void ican3_handle_inquiry(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ switch (msg->data[0]) {
+ case INQUIRY_STATUS:
+ case INQUIRY_EXTENDED:
+ mod->bec.rxerr = msg->data[5];
+ mod->bec.txerr = msg->data[6];
+ complete(&mod->buserror_comp);
+ break;
+ case INQUIRY_TERMINATION:
+ mod->termination_enabled = msg->data[6] & HWCONF_TERMINATE_ON;
+ complete(&mod->termination_comp);
+ break;
+ default:
+ dev_err(mod->dev, "recieved an unknown inquiry response\n");
+ break;
+ }
+}
+
+static void ican3_handle_unknown_message(struct ican3_dev *mod,
+ struct ican3_msg *msg)
+{
+ dev_warn(mod->dev, "recieved unknown message: spec 0x%.2x length %d\n",
+ msg->spec, le16_to_cpu(msg->len));
+}
+
+/*
+ * Handle a control message from the firmware
+ */
+static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ dev_dbg(mod->dev, "%s: modno %d spec 0x%.2x len %d bytes\n", __func__,
+ mod->num, msg->spec, le16_to_cpu(msg->len));
+
+ switch (msg->spec) {
+ case MSG_IDVERS:
+ ican3_handle_idvers(mod, msg);
+ break;
+ case MSG_MSGLOST:
+ case MSG_FMSGLOST:
+ ican3_handle_msglost(mod, msg);
+ break;
+ case MSG_CEVTIND:
+ ican3_handle_cevtind(mod, msg);
+ break;
+ case MSG_INQUIRY:
+ ican3_handle_inquiry(mod, msg);
+ break;
+ default:
+ ican3_handle_unknown_message(mod, msg);
+ break;
+ }
+}
+
+/*
+ * Check that there is room in the TX ring to transmit another skb
+ *
+ * LOCKING: must hold mod->lock
+ */
+static bool ican3_txok(struct ican3_dev *mod)
+{
+ struct ican3_fast_desc __iomem *desc;
+ u8 control;
+
+ /* copy the control bits of the descriptor */
+ ican3_set_page(mod, mod->fasttx_start + (mod->fasttx_num / 16));
+ desc = mod->dpm + ((mod->fasttx_num % 16) * sizeof(*desc));
+ control = ioread8(&desc->control);
+
+ /* if the control bits are not valid, then we have no more space */
+ if (!(control & DESC_VALID))
+ return false;
+
+ return true;
+}
+
+/*
+ * Recieve one CAN frame from the hardware
+ *
+ * This works like the core of a NAPI function, but is intended to be called
+ * from workqueue context instead. This driver already needs a workqueue to
+ * process control messages, so we use the workqueue instead of using NAPI.
+ * This was done to simplify locking.
+ *
+ * CONTEXT: must be called from user context
+ */
+static int ican3_recv_skb(struct ican3_dev *mod)
+{
+ struct net_device *ndev = mod->ndev;
+ struct net_device_stats *stats = &ndev->stats;
+ struct ican3_fast_desc desc;
+ void __iomem *desc_addr;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mod->lock, flags);
+
+ /* copy the whole descriptor */
+ ican3_set_page(mod, mod->fastrx_start + (mod->fastrx_num / 16));
+ desc_addr = mod->dpm + ((mod->fastrx_num % 16) * sizeof(desc));
+ memcpy_fromio(&desc, desc_addr, sizeof(desc));
+
+ spin_unlock_irqrestore(&mod->lock, flags);
+
+ /* check that we actually have a CAN frame */
+ if (!(desc.control & DESC_VALID))
+ return -ENOBUFS;
+
+ /* allocate an skb */
+ skb = alloc_can_skb(ndev, &cf);
+ if (unlikely(skb == NULL)) {
+ stats->rx_dropped++;
+ goto err_noalloc;
+ }
+
+ /* convert the ICAN3 frame into Linux CAN format */
+ ican3_to_can_frame(mod, &desc, cf);
+
+ /* receive the skb, update statistics */
+ netif_receive_skb(skb);
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+err_noalloc:
+ /* toggle the valid bit and return the descriptor to the ring */
+ desc.control ^= DESC_VALID;
+
+ spin_lock_irqsave(&mod->lock, flags);
+
+ ican3_set_page(mod, mod->fastrx_start + (mod->fastrx_num / 16));
+ memcpy_toio(desc_addr, &desc, 1);
+
+ /* update the next buffer pointer */
+ mod->fastrx_num = (desc.control & DESC_WRAP) ? 0
+ : (mod->fastrx_num + 1);
+
+ /* there are still more buffers to process */
+ spin_unlock_irqrestore(&mod->lock, flags);
+ return 0;
+}
+
+static int ican3_napi(struct napi_struct *napi, int budget)
+{
+ struct ican3_dev *mod = container_of(napi, struct ican3_dev, napi);
+ struct ican3_msg msg;
+ unsigned long flags;
+ int received = 0;
+ int ret;
+
+ /* process all communication messages */
+ while (true) {
+ ret = ican3_recv_msg(mod, &msg);
+ if (ret)
+ break;
+
+ ican3_handle_message(mod, &msg);
+ }
+
+ /* process all CAN frames from the fast interface */
+ while (received < budget) {
+ ret = ican3_recv_skb(mod);
+ if (ret)
+ break;
+
+ received++;
+ }
+
+ /* We have processed all packets that the adapter had, but it
+ * was less than our budget, stop polling */
+ if (received < budget)
+ napi_complete(napi);
+
+ spin_lock_irqsave(&mod->lock, flags);
+
+ /* Wake up the transmit queue if necessary */
+ if (netif_queue_stopped(mod->ndev) && ican3_txok(mod))
+ netif_wake_queue(mod->ndev);
+
+ spin_unlock_irqrestore(&mod->lock, flags);
+
+ /* re-enable interrupt generation */
+ iowrite8(1 << mod->num, &mod->ctrl->int_enable);
+ return received;
+}
+
+static irqreturn_t ican3_irq(int irq, void *dev_id)
+{
+ struct ican3_dev *mod = dev_id;
+ u8 stat;
+
+ /*
+ * The interrupt status register on this device reports interrupts
+ * as zeroes instead of using ones like most other devices
+ */
+ stat = ioread8(&mod->ctrl->int_disable) & (1 << mod->num);
+ if (stat == (1 << mod->num))
+ return IRQ_NONE;
+
+ /* clear the MODULbus interrupt from the microcontroller */
+ ioread8(&mod->dpmctrl->interrupt);
+
+ /* disable interrupt generation, schedule the NAPI poller */
+ iowrite8(1 << mod->num, &mod->ctrl->int_disable);
+ napi_schedule(&mod->napi);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Firmware reset, startup, and shutdown
+ */
+
+/*
+ * Reset an ICAN module to its power-on state
+ *
+ * CONTEXT: no network device registered
+ * LOCKING: work function disabled
+ */
+static int ican3_reset_module(struct ican3_dev *mod)
+{
+ u8 val = 1 << mod->num;
+ unsigned long start;
+ u8 runold, runnew;
+
+ /* disable interrupts so no more work is scheduled */
+ iowrite8(1 << mod->num, &mod->ctrl->int_disable);
+
+ /* flush any pending work */
+ flush_scheduled_work();
+
+ /* the first unallocated page in the DPM is #9 */
+ mod->free_page = DPM_FREE_START;
+
+ ican3_set_page(mod, QUEUE_OLD_CONTROL);
+ runold = ioread8(mod->dpm + TARGET_RUNNING);
+
+ /* reset the module */
+ iowrite8(val, &mod->ctrl->reset_assert);
+ iowrite8(val, &mod->ctrl->reset_deassert);
+
+ /* wait until the module has finished resetting and is running */
+ start = jiffies;
+ do {
+ ican3_set_page(mod, QUEUE_OLD_CONTROL);
+ runnew = ioread8(mod->dpm + TARGET_RUNNING);
+ if (runnew == (runold ^ 0xff))
+ return 0;
+
+ msleep(10);
+ } while (time_before(jiffies, start + HZ / 4));
+
+ dev_err(mod->dev, "failed to reset CAN module\n");
+ return -ETIMEDOUT;
+}
+
+static void __devexit ican3_shutdown_module(struct ican3_dev *mod)
+{
+ ican3_msg_disconnect(mod);
+ ican3_reset_module(mod);
+}
+
+/*
+ * Startup an ICAN module, bringing it into fast mode
+ */
+static int __devinit ican3_startup_module(struct ican3_dev *mod)
+{
+ int ret;
+
+ ret = ican3_reset_module(mod);
+ if (ret) {
+ dev_err(mod->dev, "unable to reset module\n");
+ return ret;
+ }
+
+ /* re-enable interrupts so we can send messages */
+ iowrite8(1 << mod->num, &mod->ctrl->int_enable);
+
+ ret = ican3_msg_connect(mod);
+ if (ret) {
+ dev_err(mod->dev, "unable to connect to module\n");
+ return ret;
+ }
+
+ ican3_init_new_host_interface(mod);
+ ret = ican3_msg_newhostif(mod);
+ if (ret) {
+ dev_err(mod->dev, "unable to switch to new-style interface\n");
+ return ret;
+ }
+
+ /* default to "termination on" */
+ ret = ican3_set_termination(mod, true);
+ if (ret) {
+ dev_err(mod->dev, "unable to enable termination\n");
+ return ret;
+ }
+
+ /* default to "bus errors enabled" */
+ ret = ican3_set_buserror(mod, ICAN3_BUSERR_QUOTA_MAX);
+ if (ret) {
+ dev_err(mod->dev, "unable to set bus-error\n");
+ return ret;
+ }
+
+ ican3_init_fast_host_interface(mod);
+ ret = ican3_msg_fasthostif(mod);
+ if (ret) {
+ dev_err(mod->dev, "unable to switch to fast host interface\n");
+ return ret;
+ }
+
+ ret = ican3_set_id_filter(mod, true);
+ if (ret) {
+ dev_err(mod->dev, "unable to set acceptance filter\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * CAN Network Device
+ */
+
+static int ican3_open(struct net_device *ndev)
+{
+ struct ican3_dev *mod = netdev_priv(ndev);
+ u8 quota;
+ int ret;
+
+ /* open the CAN layer */
+ ret = open_candev(ndev);
+ if (ret) {
+ dev_err(mod->dev, "unable to start CAN layer\n");
+ return ret;
+ }
+
+ /* set the bus error generation state appropriately */
+ if (mod->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ quota = ICAN3_BUSERR_QUOTA_MAX;
+ else
+ quota = 0;
+
+ ret = ican3_set_buserror(mod, quota);
+ if (ret) {
+ dev_err(mod->dev, "unable to set bus-error\n");
+ close_candev(ndev);
+ return ret;
+ }
+
+ /* bring the bus online */
+ ret = ican3_set_bus_state(mod, true);
+ if (ret) {
+ dev_err(mod->dev, "unable to set bus-on\n");
+ close_candev(ndev);
+ return ret;
+ }
+
+ /* start up the network device */
+ mod->can.state = CAN_STATE_ERROR_ACTIVE;
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+static int ican3_stop(struct net_device *ndev)
+{
+ struct ican3_dev *mod = netdev_priv(ndev);
+ int ret;
+
+ /* stop the network device xmit routine */
+ netif_stop_queue(ndev);
+ mod->can.state = CAN_STATE_STOPPED;
+
+ /* bring the bus offline, stop receiving packets */
+ ret = ican3_set_bus_state(mod, false);
+ if (ret) {
+ dev_err(mod->dev, "unable to set bus-off\n");
+ return ret;
+ }
+
+ /* close the CAN layer */
+ close_candev(ndev);
+ return 0;
+}
+
+static int ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct ican3_dev *mod = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ struct ican3_fast_desc desc;
+ void __iomem *desc_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mod->lock, flags);
+
+ /* check that we can actually transmit */
+ if (!ican3_txok(mod)) {
+ dev_err(mod->dev, "no free descriptors, stopping queue\n");
+ netif_stop_queue(ndev);
+ spin_unlock_irqrestore(&mod->lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* copy the control bits of the descriptor */
+ ican3_set_page(mod, mod->fasttx_start + (mod->fasttx_num / 16));
+ desc_addr = mod->dpm + ((mod->fasttx_num % 16) * sizeof(desc));
+ memset(&desc, 0, sizeof(desc));
+ memcpy_fromio(&desc, desc_addr, 1);
+
+ /* convert the Linux CAN frame into ICAN3 format */
+ can_frame_to_ican3(mod, cf, &desc);
+
+ /*
+ * the programming manual says that you must set the IVALID bit, then
+ * interrupt, then set the valid bit. Quite weird, but it seems to be
+ * required for this to work
+ */
+ desc.control |= DESC_IVALID;
+ memcpy_toio(desc_addr, &desc, sizeof(desc));
+
+ /* generate a MODULbus interrupt to the microcontroller */
+ iowrite8(0x01, &mod->dpmctrl->interrupt);
+
+ desc.control ^= DESC_VALID;
+ memcpy_toio(desc_addr, &desc, sizeof(desc));
+
+ /* update the next buffer pointer */
+ mod->fasttx_num = (desc.control & DESC_WRAP) ? 0
+ : (mod->fasttx_num + 1);
+
+ /* update statistics */
+ stats->tx_packets++;
+ stats->tx_bytes += cf->can_dlc;
+ kfree_skb(skb);
+
+ /*
+ * This hardware doesn't have TX-done notifications, so we'll try and
+ * emulate it the best we can using ECHO skbs. Get the next TX
+ * descriptor, and see if we have room to send. If not, stop the queue.
+ * It will be woken when the ECHO skb for the current packet is recv'd.
+ */
+
+ /* copy the control bits of the descriptor */
+ if (!ican3_txok(mod))
+ netif_stop_queue(ndev);
+
+ spin_unlock_irqrestore(&mod->lock, flags);
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops ican3_netdev_ops = {
+ .ndo_open = ican3_open,
+ .ndo_stop = ican3_stop,
+ .ndo_start_xmit = ican3_xmit,
+};
+
+/*
+ * Low-level CAN Device
+ */
+
+/* This structure was stolen from drivers/net/can/sja1000/sja1000.c */
+static struct can_bittiming_const ican3_bittiming_const = {
+ .name = DRV_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 64,
+ .brp_inc = 1,
+};
+
+/*
+ * This routine was stolen from drivers/net/can/sja1000/sja1000.c
+ *
+ * The bittiming register command for the ICAN3 just sets the bit timing
+ * registers on the SJA1000 chip directly
+ */
+static int ican3_set_bittiming(struct net_device *ndev)
+{
+ struct ican3_dev *mod = netdev_priv(ndev);
+ struct can_bittiming *bt = &mod->can.bittiming;
+ struct ican3_msg msg;
+ u8 btr0, btr1;
+
+ btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6);
+ btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) |
+ (((bt->phase_seg2 - 1) & 0x7) << 4);
+ if (mod->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ btr1 |= 0x80;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_CBTRREQ;
+ msg.len = cpu_to_le16(4);
+ msg.data[0] = 0x00;
+ msg.data[1] = 0x00;
+ msg.data[2] = btr0;
+ msg.data[3] = btr1;
+
+ return ican3_send_msg(mod, &msg);
+}
+
+static int ican3_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ struct ican3_dev *mod = netdev_priv(ndev);
+ int ret;
+
+ if (mode != CAN_MODE_START)
+ return -ENOTSUPP;
+
+ /* bring the bus online */
+ ret = ican3_set_bus_state(mod, true);
+ if (ret) {
+ dev_err(mod->dev, "unable to set bus-on\n");
+ return ret;
+ }
+
+ /* start up the network device */
+ mod->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ if (netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+
+ return 0;
+}
+
+static int ican3_get_berr_counter(const struct net_device *ndev,
+ struct can_berr_counter *bec)
+{
+ struct ican3_dev *mod = netdev_priv(ndev);
+ int ret;
+
+ ret = ican3_send_inquiry(mod, INQUIRY_STATUS);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_timeout(&mod->buserror_comp, HZ);
+ if (ret <= 0) {
+ dev_info(mod->dev, "%s timed out\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ bec->rxerr = mod->bec.rxerr;
+ bec->txerr = mod->bec.txerr;
+ return 0;
+}
+
+/*
+ * Sysfs Attributes
+ */
+
+static ssize_t ican3_sysfs_show_term(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ican3_dev *mod = netdev_priv(to_net_dev(dev));
+ int ret;
+
+ ret = ican3_send_inquiry(mod, INQUIRY_TERMINATION);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_timeout(&mod->termination_comp, HZ);
+ if (ret <= 0) {
+ dev_info(mod->dev, "%s timed out\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", mod->termination_enabled);
+}
+
+static ssize_t ican3_sysfs_set_term(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ican3_dev *mod = netdev_priv(to_net_dev(dev));
+ unsigned long enable;
+ int ret;
+
+ if (strict_strtoul(buf, 0, &enable))
+ return -EINVAL;
+
+ ret = ican3_set_termination(mod, enable);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR(termination, S_IWUGO | S_IRUGO, ican3_sysfs_show_term,
+ ican3_sysfs_set_term);
+
+static struct attribute *ican3_sysfs_attrs[] = {
+ &dev_attr_termination.attr,
+ NULL,
+};
+
+static struct attribute_group ican3_sysfs_attr_group = {
+ .attrs = ican3_sysfs_attrs,
+};
+
+/*
+ * PCI Subsystem
+ */
+
+static int __devinit ican3_probe(struct platform_device *pdev)
+{
+ struct janz_platform_data *pdata;
+ struct net_device *ndev;
+ struct ican3_dev *mod;
+ struct resource *res;
+ struct device *dev;
+ int ret;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata)
+ return -ENXIO;
+
+ dev_dbg(&pdev->dev, "probe: module number %d\n", pdata->modno);
+
+ /* save the struct device for printing */
+ dev = &pdev->dev;
+
+ /* allocate the CAN device and private data */
+ ndev = alloc_candev(sizeof(*mod), 0);
+ if (!ndev) {
+ dev_err(dev, "unable to allocate CANdev\n");
+ ret = -ENOMEM;
+ goto out_return;
+ }
+
+ platform_set_drvdata(pdev, ndev);
+ mod = netdev_priv(ndev);
+ mod->ndev = ndev;
+ mod->dev = &pdev->dev;
+ mod->num = pdata->modno;
+ netif_napi_add(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS);
+ spin_lock_init(&mod->lock);
+ init_completion(&mod->termination_comp);
+ init_completion(&mod->buserror_comp);
+
+ /* setup device-specific sysfs attributes */
+ ndev->sysfs_groups[0] = &ican3_sysfs_attr_group;
+
+ /* the first unallocated page in the DPM is 9 */
+ mod->free_page = DPM_FREE_START;
+
+ ndev->netdev_ops = &ican3_netdev_ops;
+ ndev->flags |= IFF_ECHO;
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ mod->can.clock.freq = ICAN3_CAN_CLOCK;
+ mod->can.bittiming_const = &ican3_bittiming_const;
+ mod->can.do_set_bittiming = ican3_set_bittiming;
+ mod->can.do_set_mode = ican3_set_mode;
+ mod->can.do_get_berr_counter = ican3_get_berr_counter;
+ mod->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES
+ | CAN_CTRLMODE_BERR_REPORTING;
+
+ /* find our IRQ number */
+ mod->irq = platform_get_irq(pdev, 0);
+ if (mod->irq < 0) {
+ dev_err(dev, "IRQ line not found\n");
+ ret = -ENODEV;
+ goto out_free_ndev;
+ }
+
+ ndev->irq = mod->irq;
+
+ /* get access to the MODULbus registers for this module */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "MODULbus registers not found\n");
+ ret = -ENODEV;
+ goto out_free_ndev;
+ }
+
+ mod->dpm = ioremap(res->start, resource_size(res));
+ if (!mod->dpm) {
+ dev_err(dev, "MODULbus registers not ioremap\n");
+ ret = -ENOMEM;
+ goto out_free_ndev;
+ }
+
+ mod->dpmctrl = mod->dpm + DPM_PAGE_SIZE;
+
+ /* get access to the control registers for this module */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(dev, "CONTROL registers not found\n");
+ ret = -ENODEV;
+ goto out_iounmap_dpm;
+ }
+
+ mod->ctrl = ioremap(res->start, resource_size(res));
+ if (!mod->ctrl) {
+ dev_err(dev, "CONTROL registers not ioremap\n");
+ ret = -ENOMEM;
+ goto out_iounmap_dpm;
+ }
+
+ /* disable our IRQ, then hookup the IRQ handler */
+ iowrite8(1 << mod->num, &mod->ctrl->int_disable);
+ ret = request_irq(mod->irq, ican3_irq, IRQF_SHARED, DRV_NAME, mod);
+ if (ret) {
+ dev_err(dev, "unable to request IRQ\n");
+ goto out_iounmap_ctrl;
+ }
+
+ /* reset and initialize the CAN controller into fast mode */
+ napi_enable(&mod->napi);
+ ret = ican3_startup_module(mod);
+ if (ret) {
+ dev_err(dev, "%s: unable to start CANdev\n", __func__);
+ goto out_free_irq;
+ }
+
+ /* register with the Linux CAN layer */
+ ret = register_candev(ndev);
+ if (ret) {
+ dev_err(dev, "%s: unable to register CANdev\n", __func__);
+ goto out_free_irq;
+ }
+
+ dev_info(dev, "module %d: registered CAN device\n", pdata->modno);
+ return 0;
+
+out_free_irq:
+ napi_disable(&mod->napi);
+ iowrite8(1 << mod->num, &mod->ctrl->int_disable);
+ free_irq(mod->irq, mod);
+out_iounmap_ctrl:
+ iounmap(mod->ctrl);
+out_iounmap_dpm:
+ iounmap(mod->dpm);
+out_free_ndev:
+ free_candev(ndev);
+out_return:
+ return ret;
+}
+
+static int __devexit ican3_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct ican3_dev *mod = netdev_priv(ndev);
+
+ /* unregister the netdevice, stop interrupts */
+ unregister_netdev(ndev);
+ napi_disable(&mod->napi);
+ iowrite8(1 << mod->num, &mod->ctrl->int_disable);
+ free_irq(mod->irq, mod);
+
+ /* put the module into reset */
+ ican3_shutdown_module(mod);
+
+ /* unmap all registers */
+ iounmap(mod->ctrl);
+ iounmap(mod->dpm);
+
+ free_candev(ndev);
+
+ return 0;
+}
+
+static struct platform_driver ican3_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ican3_probe,
+ .remove = __devexit_p(ican3_remove),
+};
+
+static int __init ican3_init(void)
+{
+ return platform_driver_register(&ican3_driver);
+}
+
+static void __exit ican3_exit(void)
+{
+ platform_driver_unregister(&ican3_driver);
+}
+
+MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
+MODULE_DESCRIPTION("Janz MODULbus VMOD-ICAN3 Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:janz-ican3");
+
+module_init(ican3_init);
+module_exit(ican3_exit);
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index b39b108318b..b11a0cb5ed8 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -58,7 +58,6 @@
*
*/
-#include <linux/can.h>
#include <linux/can/core.h>
#include <linux/can/dev.h>
#include <linux/can/platform/mcp251x.h>
@@ -476,7 +475,6 @@ static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
netif_stop_queue(net);
priv->tx_skb = skb;
- net->trans_start = jiffies;
queue_work(priv->wq, &priv->tx_work);
return NETDEV_TX_OK;
@@ -923,12 +921,16 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
struct net_device *net;
struct mcp251x_priv *priv;
struct mcp251x_platform_data *pdata = spi->dev.platform_data;
+ int model = spi_get_device_id(spi)->driver_data;
int ret = -ENODEV;
if (!pdata)
/* Platform data is required for osc freq */
goto error_out;
+ if (model)
+ pdata->model = model;
+
/* Allocate can/net device */
net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX);
if (!net) {
@@ -1118,6 +1120,15 @@ static int mcp251x_can_resume(struct spi_device *spi)
#define mcp251x_can_resume NULL
#endif
+static struct spi_device_id mcp251x_id_table[] = {
+ { "mcp251x", 0 /* Use pdata.model */ },
+ { "mcp2510", CAN_MCP251X_MCP2510 },
+ { "mcp2515", CAN_MCP251X_MCP2515 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
+
static struct spi_driver mcp251x_can_driver = {
.driver = {
.name = DEVICE_NAME,
@@ -1125,6 +1136,7 @@ static struct spi_driver mcp251x_can_driver = {
.owner = THIS_MODULE,
},
+ .id_table = mcp251x_id_table,
.probe = mcp251x_can_probe,
.remove = __devexit_p(mcp251x_can_remove),
.suspend = mcp251x_can_suspend,
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 03e7c48465a..8af8442c694 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -25,7 +25,6 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/netdevice.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
@@ -393,15 +392,17 @@ static struct of_device_id __devinitdata mpc5xxx_can_table[] = {
};
static struct of_platform_driver mpc5xxx_can_driver = {
- .owner = THIS_MODULE,
- .name = "mpc5xxx_can",
+ .driver = {
+ .name = "mpc5xxx_can",
+ .owner = THIS_MODULE,
+ .of_match_table = mpc5xxx_can_table,
+ },
.probe = mpc5xxx_can_probe,
.remove = __devexit_p(mpc5xxx_can_remove),
#ifdef CONFIG_PM
.suspend = mpc5xxx_can_suspend,
.resume = mpc5xxx_can_resume,
#endif
- .match_table = mpc5xxx_can_table,
};
static int __init mpc5xxx_can_init(void)
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 6b7dd578d41..64c378cd0c3 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -28,7 +28,6 @@
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/list.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
#include <linux/io.h>
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 9e277d64a31..ae3505afd68 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -53,7 +53,9 @@ config CAN_PLX_PCI
Driver supports now:
- Adlink PCI-7841/cPCI-7841 card (http://www.adlinktech.com/)
- Adlink PCI-7841/cPCI-7841 SE card
+ - esd CAN-PCI/CPCI/PCI104/200 (http://www.esd.eu/)
+ - esd CAN-PCI/PMC/266
+ - esd CAN-PCIe/2000
- Marathon CAN-bus-PCI card (http://www.marathon.ru/)
- TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
-
endif
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 5f53da0bc40..36f4f9780c3 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -24,7 +24,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pci.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/io.h>
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 441e776a7f5..ed004cebd31 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -36,7 +36,6 @@
#include <linux/netdevice.h>
#include <linux/delay.h>
#include <linux/pci.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/io.h>
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 4aff4070db9..437b5c716a2 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -27,7 +27,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pci.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/io.h>
@@ -41,7 +40,10 @@ MODULE_DESCRIPTION("Socket-CAN driver for PLX90xx PCI-bridge cards with "
MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, "
"Adlink PCI-7841/cPCI-7841 SE, "
"Marathon CAN-bus-PCI, "
- "TEWS TECHNOLOGIES TPMC810");
+ "TEWS TECHNOLOGIES TPMC810, "
+ "esd CAN-PCI/CPCI/PCI104/200, "
+ "esd CAN-PCI/PMC/266, "
+ "esd CAN-PCIe/2000")
MODULE_LICENSE("GPL v2");
#define PLX_PCI_MAX_CHAN 2
@@ -50,11 +52,14 @@ struct plx_pci_card {
int channels; /* detected channels count */
struct net_device *net_dev[PLX_PCI_MAX_CHAN];
void __iomem *conf_addr;
+
+ /* Pointer to device-dependent reset function */
+ void (*reset_func)(struct pci_dev *pdev);
};
#define PLX_PCI_CAN_CLOCK (16000000 / 2)
-/* PLX90xx registers */
+/* PLX9030/9050/9052 registers */
#define PLX_INTCSR 0x4c /* Interrupt Control/Status */
#define PLX_CNTRL 0x50 /* User I/O, Direct Slave Response,
* Serial EEPROM, and Initialization
@@ -66,6 +71,14 @@ struct plx_pci_card {
#define PLX_PCI_INT_EN (1 << 6) /* PCI Interrupt Enable */
#define PLX_PCI_RESET (1 << 30) /* PCI Adapter Software Reset */
+/* PLX9056 registers */
+#define PLX9056_INTCSR 0x68 /* Interrupt Control/Status */
+#define PLX9056_CNTRL 0x6c /* Control / Software Reset */
+
+#define PLX9056_LINTI (1 << 11)
+#define PLX9056_PCI_INT_EN (1 << 8)
+#define PLX9056_PCI_RCR (1 << 29) /* Read Configuration Registers */
+
/*
* The board configuration is probably following:
* RX1 is connected to ground.
@@ -101,6 +114,13 @@ struct plx_pci_card {
#define ADLINK_PCI_VENDOR_ID 0x144A
#define ADLINK_PCI_DEVICE_ID 0x7841
+#define ESD_PCI_SUB_SYS_ID_PCI200 0x0004
+#define ESD_PCI_SUB_SYS_ID_PCI266 0x0009
+#define ESD_PCI_SUB_SYS_ID_PMC266 0x000e
+#define ESD_PCI_SUB_SYS_ID_CPCI200 0x010b
+#define ESD_PCI_SUB_SYS_ID_PCIE2000 0x0200
+#define ESD_PCI_SUB_SYS_ID_PCI104200 0x0501
+
#define MARATHON_PCI_DEVICE_ID 0x2715
#define TEWS_PCI_VENDOR_ID 0x1498
@@ -108,6 +128,7 @@ struct plx_pci_card {
static void plx_pci_reset_common(struct pci_dev *pdev);
static void plx_pci_reset_marathon(struct pci_dev *pdev);
+static void plx9056_pci_reset_common(struct pci_dev *pdev);
struct plx_pci_channel_map {
u32 bar;
@@ -148,6 +169,30 @@ static struct plx_pci_card_info plx_pci_card_info_adlink_se __devinitdata = {
/* based on PLX9052 */
};
+static struct plx_pci_card_info plx_pci_card_info_esd200 __devinitdata = {
+ "esd CAN-PCI/CPCI/PCI104/200", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
+ &plx_pci_reset_common
+ /* based on PLX9030/9050 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_esd266 __devinitdata = {
+ "esd CAN-PCI/PMC/266", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
+ &plx9056_pci_reset_common
+ /* based on PLX9056 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_esd2000 __devinitdata = {
+ "esd CAN-PCIe/2000", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
+ &plx9056_pci_reset_common
+ /* based on PEX8311 */
+};
+
static struct plx_pci_card_info plx_pci_card_info_marathon __devinitdata = {
"Marathon CAN-bus-PCI", 2,
PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
@@ -180,6 +225,48 @@ static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = {
(kernel_ulong_t)&plx_pci_card_info_adlink_se
},
{
+ /* esd CAN-PCI/200 */
+ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+ PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI200,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_esd200
+ },
+ {
+ /* esd CAN-CPCI/200 */
+ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
+ PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_CPCI200,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_esd200
+ },
+ {
+ /* esd CAN-PCI104/200 */
+ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
+ PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI104200,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_esd200
+ },
+ {
+ /* esd CAN-PCI/266 */
+ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
+ PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI266,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_esd266
+ },
+ {
+ /* esd CAN-PMC/266 */
+ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
+ PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PMC266,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_esd266
+ },
+ {
+ /* esd CAN-PCIE/2000 */
+ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
+ PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCIE2000,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_esd2000
+ },
+ {
/* Marathon CAN-bus-PCI card */
PCI_VENDOR_ID_PLX, MARATHON_PCI_DEVICE_ID,
PCI_ANY_ID, PCI_ANY_ID,
@@ -242,7 +329,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
}
/*
- * PLX90xx software reset
+ * PLX9030/50/52 software reset
* Also LRESET# asserts and brings to reset device on the Local Bus (if wired).
* For most cards it's enough for reset the SJA1000 chips.
*/
@@ -259,6 +346,38 @@ static void plx_pci_reset_common(struct pci_dev *pdev)
iowrite32(cntrl, card->conf_addr + PLX_CNTRL);
};
+/*
+ * PLX9056 software reset
+ * Assert LRESET# and reset device(s) on the Local Bus (if wired).
+ */
+static void plx9056_pci_reset_common(struct pci_dev *pdev)
+{
+ struct plx_pci_card *card = pci_get_drvdata(pdev);
+ u32 cntrl;
+
+ /* issue a local bus reset */
+ cntrl = ioread32(card->conf_addr + PLX9056_CNTRL);
+ cntrl |= PLX_PCI_RESET;
+ iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
+ udelay(100);
+ cntrl ^= PLX_PCI_RESET;
+ iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
+
+ /* reload local configuration from EEPROM */
+ cntrl |= PLX9056_PCI_RCR;
+ iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
+
+ /*
+ * There is no safe way to poll for the end
+ * of reconfiguration process. Waiting for 10ms
+ * is safe.
+ */
+ mdelay(10);
+
+ cntrl ^= PLX9056_PCI_RCR;
+ iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
+};
+
/* Special reset function for Marathon card */
static void plx_pci_reset_marathon(struct pci_dev *pdev)
{
@@ -302,13 +421,16 @@ static void plx_pci_del_card(struct pci_dev *pdev)
free_sja1000dev(dev);
}
- plx_pci_reset_common(pdev);
+ card->reset_func(pdev);
/*
- * Disable interrupts from PCI-card (PLX90xx) and disable Local_1,
- * Local_2 interrupts
+ * Disable interrupts from PCI-card and disable local
+ * interrupts
*/
- iowrite32(0x0, card->conf_addr + PLX_INTCSR);
+ if (pdev->device != PCI_DEVICE_ID_PLX_9056)
+ iowrite32(0x0, card->conf_addr + PLX_INTCSR);
+ else
+ iowrite32(0x0, card->conf_addr + PLX9056_INTCSR);
if (card->conf_addr)
pci_iounmap(pdev, card->conf_addr);
@@ -367,6 +489,7 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev,
card->conf_addr = addr + ci->conf_map.offset;
ci->reset_func(pdev);
+ card->reset_func = ci->reset_func;
/* Detect available channels */
for (i = 0; i < ci->channel_count; i++) {
@@ -438,10 +561,17 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev,
* Enable interrupts from PCI-card (PLX90xx) and enable Local_1,
* Local_2 interrupts from the SJA1000 chips
*/
- val = ioread32(card->conf_addr + PLX_INTCSR);
- val |= PLX_LINT1_EN | PLX_LINT2_EN | PLX_PCI_INT_EN;
- iowrite32(val, card->conf_addr + PLX_INTCSR);
-
+ if (pdev->device != PCI_DEVICE_ID_PLX_9056) {
+ val = ioread32(card->conf_addr + PLX_INTCSR);
+ if (pdev->subsystem_vendor == PCI_VENDOR_ID_ESDGMBH)
+ val |= PLX_LINT1_EN | PLX_PCI_INT_EN;
+ else
+ val |= PLX_LINT1_EN | PLX_LINT2_EN | PLX_PCI_INT_EN;
+ iowrite32(val, card->conf_addr + PLX_INTCSR);
+ } else {
+ iowrite32(PLX9056_LINTI | PLX9056_PCI_INT_EN,
+ card->conf_addr + PLX9056_INTCSR);
+ }
return 0;
failure_cleanup:
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 145b1a731a5..0a8de01d52f 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -60,7 +60,6 @@
#include <linux/skbuff.h>
#include <linux/delay.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
@@ -84,6 +83,20 @@ static struct can_bittiming_const sja1000_bittiming_const = {
.brp_inc = 1,
};
+static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val)
+{
+ unsigned long flags;
+
+ /*
+ * The command register needs some locking and time to settle
+ * the write_reg() operation - especially on SMP systems.
+ */
+ spin_lock_irqsave(&priv->cmdreg_lock, flags);
+ priv->write_reg(priv, REG_CMR, val);
+ priv->read_reg(priv, REG_SR);
+ spin_unlock_irqrestore(&priv->cmdreg_lock, flags);
+}
+
static int sja1000_probe_chip(struct net_device *dev)
{
struct sja1000_priv *priv = netdev_priv(dev);
@@ -293,11 +306,9 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
for (i = 0; i < dlc; i++)
priv->write_reg(priv, dreg++, cf->data[i]);
- dev->trans_start = jiffies;
-
can_put_echo_skb(skb, dev, 0);
- priv->write_reg(priv, REG_CMR, CMD_TR);
+ sja1000_write_cmdreg(priv, CMD_TR);
return NETDEV_TX_OK;
}
@@ -346,7 +357,7 @@ static void sja1000_rx(struct net_device *dev)
cf->can_id = id;
/* release receive buffer */
- priv->write_reg(priv, REG_CMR, CMD_RRB);
+ sja1000_write_cmdreg(priv, CMD_RRB);
netif_rx(skb);
@@ -374,7 +385,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
stats->rx_over_errors++;
stats->rx_errors++;
- priv->write_reg(priv, REG_CMR, CMD_CDO); /* clear bit */
+ sja1000_write_cmdreg(priv, CMD_CDO); /* clear bit */
}
if (isrc & IRQ_EI) {
@@ -588,6 +599,8 @@ struct net_device *alloc_sja1000dev(int sizeof_priv)
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
CAN_CTRLMODE_BERR_REPORTING;
+ spin_lock_init(&priv->cmdreg_lock);
+
if (sizeof_priv)
priv->priv = (void *)priv + sizeof(struct sja1000_priv);
diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
index 97a622b9302..de8e778f683 100644
--- a/drivers/net/can/sja1000/sja1000.h
+++ b/drivers/net/can/sja1000/sja1000.h
@@ -167,6 +167,7 @@ struct sja1000_priv {
void __iomem *reg_base; /* ioremap'ed address to registers */
unsigned long irq_flags; /* for request_irq() */
+ spinlock_t cmdreg_lock; /* lock for concurrent cmd register writes */
u16 flags; /* custom mode flags */
u8 ocr; /* output control register */
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index a6a51f15596..496223e9e2f 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -23,7 +23,6 @@
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/io.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/platform/sja1000.h>
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 9dd076a626a..ac1a83d7c20 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -38,7 +38,6 @@
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/of_platform.h>
@@ -72,7 +71,7 @@ static int __devexit sja1000_ofp_remove(struct of_device *ofdev)
{
struct net_device *dev = dev_get_drvdata(&ofdev->dev);
struct sja1000_priv *priv = netdev_priv(dev);
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct resource res;
dev_set_drvdata(&ofdev->dev, NULL);
@@ -91,7 +90,7 @@ static int __devexit sja1000_ofp_remove(struct of_device *ofdev)
static int __devinit sja1000_ofp_probe(struct of_device *ofdev,
const struct of_device_id *id)
{
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct net_device *dev;
struct sja1000_priv *priv;
struct resource res;
@@ -216,11 +215,13 @@ static struct of_device_id __devinitdata sja1000_ofp_table[] = {
MODULE_DEVICE_TABLE(of, sja1000_ofp_table);
static struct of_platform_driver sja1000_ofp_driver = {
- .owner = THIS_MODULE,
- .name = DRV_NAME,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRV_NAME,
+ .of_match_table = sja1000_ofp_table,
+ },
.probe = sja1000_ofp_probe,
.remove = __devexit_p(sja1000_ofp_remove),
- .match_table = sja1000_ofp_table,
};
static int __init sja1000_ofp_init(void)
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 628374c2a05..d9fadc489b3 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -24,7 +24,6 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/platform/sja1000.h>
#include <linux/io.h>
@@ -37,16 +36,36 @@ MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus");
MODULE_LICENSE("GPL v2");
-static u8 sp_read_reg(const struct sja1000_priv *priv, int reg)
+static u8 sp_read_reg8(const struct sja1000_priv *priv, int reg)
{
return ioread8(priv->reg_base + reg);
}
-static void sp_write_reg(const struct sja1000_priv *priv, int reg, u8 val)
+static void sp_write_reg8(const struct sja1000_priv *priv, int reg, u8 val)
{
iowrite8(val, priv->reg_base + reg);
}
+static u8 sp_read_reg16(const struct sja1000_priv *priv, int reg)
+{
+ return ioread8(priv->reg_base + reg * 2);
+}
+
+static void sp_write_reg16(const struct sja1000_priv *priv, int reg, u8 val)
+{
+ iowrite8(val, priv->reg_base + reg * 2);
+}
+
+static u8 sp_read_reg32(const struct sja1000_priv *priv, int reg)
+{
+ return ioread8(priv->reg_base + reg * 4);
+}
+
+static void sp_write_reg32(const struct sja1000_priv *priv, int reg, u8 val)
+{
+ iowrite8(val, priv->reg_base + reg * 4);
+}
+
static int sp_probe(struct platform_device *pdev)
{
int err;
@@ -90,14 +109,29 @@ static int sp_probe(struct platform_device *pdev)
priv = netdev_priv(dev);
dev->irq = res_irq->start;
- priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
+ priv->irq_flags = res_irq->flags & (IRQF_TRIGGER_MASK | IRQF_SHARED);
priv->reg_base = addr;
- priv->read_reg = sp_read_reg;
- priv->write_reg = sp_write_reg;
- priv->can.clock.freq = pdata->clock;
+ /* The CAN clock frequency is half the oscillator clock frequency */
+ priv->can.clock.freq = pdata->osc_freq / 2;
priv->ocr = pdata->ocr;
priv->cdr = pdata->cdr;
+ switch (res_mem->flags & IORESOURCE_MEM_TYPE_MASK) {
+ case IORESOURCE_MEM_32BIT:
+ priv->read_reg = sp_read_reg32;
+ priv->write_reg = sp_write_reg32;
+ break;
+ case IORESOURCE_MEM_16BIT:
+ priv->read_reg = sp_read_reg16;
+ priv->write_reg = sp_write_reg16;
+ break;
+ case IORESOURCE_MEM_8BIT:
+ default:
+ priv->read_reg = sp_read_reg8;
+ priv->write_reg = sp_write_reg8;
+ break;
+ }
+
dev_set_drvdata(&pdev->dev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 0c3d2ba0d17..4d07f1ee716 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -47,7 +47,6 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
#include <linux/can/platform/ti_hecc.h>
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index d800b598ae3..1fc0871d2ef 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -300,8 +300,6 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
else if (err)
dev_err(netdev->dev.parent,
"failed resubmitting intr urb: %d\n", err);
-
- return;
}
static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
@@ -497,8 +495,6 @@ resubmit_urb:
else if (retval)
dev_err(netdev->dev.parent,
"failed resubmitting read bulk urb: %d\n", retval);
-
- return;
}
/*
@@ -516,8 +512,8 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
netdev = dev->netdev;
/* free up our allocated buffer */
- usb_buffer_free(urb->dev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
atomic_dec(&dev->active_tx_urbs);
@@ -614,8 +610,8 @@ static int ems_usb_start(struct ems_usb *dev)
return -ENOMEM;
}
- buf = usb_buffer_alloc(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
- &urb->transfer_dma);
+ buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
+ &urb->transfer_dma);
if (!buf) {
dev_err(netdev->dev.parent,
"No memory left for USB buffer\n");
@@ -635,8 +631,8 @@ static int ems_usb_start(struct ems_usb *dev)
netif_device_detach(dev->netdev);
usb_unanchor_urb(urb);
- usb_buffer_free(dev->udev, RX_BUFFER_SIZE, buf,
- urb->transfer_dma);
+ usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
+ urb->transfer_dma);
break;
}
@@ -777,7 +773,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
goto nomem;
}
- buf = usb_buffer_alloc(dev->udev, size, GFP_ATOMIC, &urb->transfer_dma);
+ buf = usb_alloc_coherent(dev->udev, size, GFP_ATOMIC, &urb->transfer_dma);
if (!buf) {
dev_err(netdev->dev.parent, "No memory left for USB buffer\n");
usb_free_urb(urb);
@@ -820,7 +816,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
*/
if (!context) {
usb_unanchor_urb(urb);
- usb_buffer_free(dev->udev, size, buf, urb->transfer_dma);
+ usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
dev_warn(netdev->dev.parent, "couldn't find free context\n");
@@ -845,7 +841,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
can_free_echo_skb(netdev, context->echo_index);
usb_unanchor_urb(urb);
- usb_buffer_free(dev->udev, size, buf, urb->transfer_dma);
+ usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
dev_kfree_skb(skb);
atomic_dec(&dev->active_tx_urbs);
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 9bd155e4111..04a03f7003a 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -2889,7 +2889,6 @@ static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
return NETDEV_TX_BUSY;
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -2957,20 +2956,20 @@ static void cas_process_mc_list(struct cas *cp)
{
u16 hash_table[16];
u32 crc;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int i = 1;
memset(hash_table, 0, sizeof(hash_table));
- netdev_for_each_mc_addr(dmi, cp->dev) {
+ netdev_for_each_mc_addr(ha, cp->dev) {
if (i <= CAS_MC_EXACT_MATCH_SIZE) {
/* use the alternate mac address registers for the
* first 15 multicast addresses
*/
- writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5],
+ writel((ha->addr[4] << 8) | ha->addr[5],
cp->regs + REG_MAC_ADDRN(i*3 + 0));
- writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3],
+ writel((ha->addr[2] << 8) | ha->addr[3],
cp->regs + REG_MAC_ADDRN(i*3 + 1));
- writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1],
+ writel((ha->addr[0] << 8) | ha->addr[1],
cp->regs + REG_MAC_ADDRN(i*3 + 2));
i++;
}
@@ -2978,7 +2977,7 @@ static void cas_process_mc_list(struct cas *cp)
/* use hw hash table for the next series of
* multicast addresses
*/
- crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr);
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
crc >>= 24;
hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
}
@@ -4825,7 +4824,7 @@ static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
break;
default:
break;
- };
+ }
mutex_unlock(&cp->pm_mutex);
return rc;
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
index 9e631b9d394..7dbb16d36ff 100644
--- a/drivers/net/chelsio/pm3393.c
+++ b/drivers/net/chelsio/pm3393.c
@@ -377,12 +377,13 @@ static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm)
rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
} else if (t1_rx_mode_mc_cnt(rm)) {
/* Accept one or more multicast(s). */
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int bit;
u16 mc_filter[4] = { 0, };
- netdev_for_each_mc_addr(dmi, t1_get_netdev(rm)) {
- bit = (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 23) & 0x3f; /* bit[23:28] */
+ netdev_for_each_mc_addr(ha, t1_get_netdev(rm)) {
+ /* bit[23:28] */
+ bit = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x3f;
mc_filter[bit >> 4] |= 1 << (bit & 0xf);
}
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]);
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index df3a1410696..f01cfdb995d 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -162,14 +162,14 @@ struct respQ_e {
*/
struct cmdQ_ce {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(dma_addr);
- DECLARE_PCI_UNMAP_LEN(dma_len);
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
};
struct freelQ_ce {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(dma_addr);
- DECLARE_PCI_UNMAP_LEN(dma_len);
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
};
/*
@@ -460,7 +460,7 @@ static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
again:
for (i = 0; i < MAX_NPORTS; i++) {
- s->port = ++s->port & (MAX_NPORTS - 1);
+ s->port = (s->port + 1) & (MAX_NPORTS - 1);
skbq = &s->p[s->port].skbq;
skb = skb_peek(skbq);
@@ -518,8 +518,8 @@ static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
while (q->credits--) {
struct freelQ_ce *ce = &q->centries[cidx];
- pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len),
+ pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
PCI_DMA_FROMDEVICE);
dev_kfree_skb(ce->skb);
ce->skb = NULL;
@@ -633,9 +633,9 @@ static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
q->in_use -= n;
ce = &q->centries[cidx];
while (n--) {
- if (likely(pci_unmap_len(ce, dma_len))) {
- pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len),
+ if (likely(dma_unmap_len(ce, dma_len))) {
+ pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
PCI_DMA_TODEVICE);
if (q->sop)
q->sop = 0;
@@ -851,8 +851,8 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
skb_reserve(skb, sge->rx_pkt_pad);
ce->skb = skb;
- pci_unmap_addr_set(ce, dma_addr, mapping);
- pci_unmap_len_set(ce, dma_len, dma_len);
+ dma_unmap_addr_set(ce, dma_addr, mapping);
+ dma_unmap_len_set(ce, dma_len, dma_len);
e->addr_lo = (u32)mapping;
e->addr_hi = (u64)mapping >> 32;
e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
@@ -1059,13 +1059,13 @@ static inline struct sk_buff *get_packet(struct pci_dev *pdev,
skb_reserve(skb, 2); /* align IP header */
skb_put(skb, len);
pci_dma_sync_single_for_cpu(pdev,
- pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len),
+ dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(ce->skb, skb->data, len);
pci_dma_sync_single_for_device(pdev,
- pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len),
+ dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
PCI_DMA_FROMDEVICE);
recycle_fl_buf(fl, fl->cidx);
return skb;
@@ -1077,8 +1077,8 @@ use_orig_buf:
return NULL;
}
- pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+ pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
skb = ce->skb;
prefetch(skb->data);
@@ -1100,8 +1100,8 @@ static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
struct freelQ_ce *ce = &fl->centries[fl->cidx];
struct sk_buff *skb = ce->skb;
- pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+ pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
pr_err("%s: unexpected offload packet, cmd %u\n",
adapter->name, *skb->data);
recycle_fl_buf(fl, fl->cidx);
@@ -1123,7 +1123,7 @@ static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
unsigned int nfrags = skb_shinfo(skb)->nr_frags;
- unsigned int i, len = skb->len - skb->data_len;
+ unsigned int i, len = skb_headlen(skb);
while (len > SGE_TX_DESC_MAX_PLEN) {
count++;
len -= SGE_TX_DESC_MAX_PLEN;
@@ -1182,7 +1182,7 @@ static inline unsigned int write_large_page_tx_descs(unsigned int pidx,
write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
*gen, nfrags == 0 && *desc_len == 0);
ce1->skb = NULL;
- pci_unmap_len_set(ce1, dma_len, 0);
+ dma_unmap_len_set(ce1, dma_len, 0);
*desc_mapping += SGE_TX_DESC_MAX_PLEN;
if (*desc_len) {
ce1++;
@@ -1219,10 +1219,10 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
ce = &q->centries[pidx];
mapping = pci_map_single(adapter->pdev, skb->data,
- skb->len - skb->data_len, PCI_DMA_TODEVICE);
+ skb_headlen(skb), PCI_DMA_TODEVICE);
desc_mapping = mapping;
- desc_len = skb->len - skb->data_len;
+ desc_len = skb_headlen(skb);
flags = F_CMD_DATAVALID | F_CMD_SOP |
V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
@@ -1233,7 +1233,7 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
e->addr_hi = (u64)desc_mapping >> 32;
e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
ce->skb = NULL;
- pci_unmap_len_set(ce, dma_len, 0);
+ dma_unmap_len_set(ce, dma_len, 0);
if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
desc_len > SGE_TX_DESC_MAX_PLEN) {
@@ -1257,8 +1257,8 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
}
ce->skb = NULL;
- pci_unmap_addr_set(ce, dma_addr, mapping);
- pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len);
+ dma_unmap_addr_set(ce, dma_addr, mapping);
+ dma_unmap_len_set(ce, dma_len, skb_headlen(skb));
for (i = 0; nfrags--; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -1284,8 +1284,8 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
write_tx_desc(e1, desc_mapping, desc_len, gen,
nfrags == 0);
ce->skb = NULL;
- pci_unmap_addr_set(ce, dma_addr, mapping);
- pci_unmap_len_set(ce, dma_len, frag->size);
+ dma_unmap_addr_set(ce, dma_addr, mapping);
+ dma_unmap_len_set(ce, dma_len, frag->size);
}
ce->skb = skb;
wmb();
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 4b451a7c03e..fe925663d39 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -1143,12 +1143,12 @@ static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
spin_lock_bh(&cp->cnic_ulp_lock);
if (num_wqes > cnic_kwq_avail(cp) &&
- !(cp->cnic_local_flags & CNIC_LCL_FL_KWQ_INIT)) {
+ !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
spin_unlock_bh(&cp->cnic_ulp_lock);
return -EAGAIN;
}
- cp->cnic_local_flags &= ~CNIC_LCL_FL_KWQ_INIT;
+ clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
prod = cp->kwq_prod_idx;
sw_prod = prod & MAX_KWQ_IDX;
@@ -2092,7 +2092,6 @@ end:
i += j;
j = 1;
}
- return;
}
static u16 cnic_bnx2_next_idx(u16 idx)
@@ -2146,17 +2145,56 @@ static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
return last_cnt;
}
+static int cnic_l2_completion(struct cnic_local *cp)
+{
+ u16 hw_cons, sw_cons;
+ union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
+ (cp->l2_ring + (2 * BCM_PAGE_SIZE));
+ u32 cmd;
+ int comp = 0;
+
+ if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
+ return 0;
+
+ hw_cons = *cp->rx_cons_ptr;
+ if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
+ hw_cons++;
+
+ sw_cons = cp->rx_cons;
+ while (sw_cons != hw_cons) {
+ u8 cqe_fp_flags;
+
+ cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
+ cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
+ if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
+ cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
+ cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
+ if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
+ cmd == RAMROD_CMD_ID_ETH_HALT)
+ comp++;
+ }
+ sw_cons = BNX2X_NEXT_RCQE(sw_cons);
+ }
+ return comp;
+}
+
static void cnic_chk_pkt_rings(struct cnic_local *cp)
{
u16 rx_cons = *cp->rx_cons_ptr;
u16 tx_cons = *cp->tx_cons_ptr;
+ int comp = 0;
if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
+ if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
+ comp = cnic_l2_completion(cp);
+
cp->tx_cons = tx_cons;
cp->rx_cons = rx_cons;
uio_event_notify(cp->cnic_uinfo);
}
+ if (comp)
+ clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
}
static int cnic_service_bnx2(void *data, void *status_blk)
@@ -2325,7 +2363,6 @@ done:
status_idx, IGU_INT_ENABLE, 1);
cp->kcq_prod_idx = sw_prod;
- return;
}
static int cnic_service_bnx2x(void *data, void *status_blk)
@@ -3330,13 +3367,9 @@ static int cnic_cm_shutdown(struct cnic_dev *dev)
static void cnic_init_context(struct cnic_dev *dev, u32 cid)
{
- struct cnic_local *cp = dev->cnic_priv;
u32 cid_addr;
int i;
- if (CHIP_NUM(cp) == CHIP_NUM_5709)
- return;
-
cid_addr = GET_CID_ADDR(cid);
for (i = 0; i < CTX_SIZE; i += 4)
@@ -3493,14 +3526,11 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
sb_id = cp->status_blk_num;
tx_cid = 20;
- cnic_init_context(dev, tx_cid);
- cnic_init_context(dev, tx_cid + 1);
cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
struct status_block_msix *sblk = cp->status_blk.bnx2;
tx_cid = TX_TSS_CID + sb_id - 1;
- cnic_init_context(dev, tx_cid);
CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
(TX_TSS_CID << 7));
cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
@@ -3519,6 +3549,9 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
} else {
+ cnic_init_context(dev, tx_cid);
+ cnic_init_context(dev, tx_cid + 1);
+
offset0 = BNX2_L2CTX_TYPE;
offset1 = BNX2_L2CTX_CMD_TYPE;
offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
@@ -3692,7 +3725,7 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
cp->max_kwq_idx = MAX_KWQ_IDX;
cp->kwq_prod_idx = 0;
cp->kwq_con_idx = 0;
- cp->cnic_local_flags |= CNIC_LCL_FL_KWQ_INIT;
+ set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
@@ -4170,6 +4203,8 @@ static void cnic_init_rings(struct cnic_dev *dev)
for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
+ set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
+
cnic_init_bnx2x_tx_ring(dev);
cnic_init_bnx2x_rx_ring(dev);
@@ -4177,6 +4212,15 @@ static void cnic_init_rings(struct cnic_dev *dev)
l5_data.phy_address.hi = 0;
cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data);
+ i = 0;
+ while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
+ ++i < 10)
+ msleep(1);
+
+ if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
+ netdev_err(dev->netdev,
+ "iSCSI CLIENT_SETUP did not complete\n");
+ cnic_kwq_completion(dev, 1);
cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 1);
}
}
@@ -4189,14 +4233,25 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
struct cnic_local *cp = dev->cnic_priv;
u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
union l5cm_specific_data l5_data;
+ int i;
cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 0);
+ set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
+
l5_data.phy_address.lo = cli;
l5_data.phy_address.hi = 0;
cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data);
- msleep(10);
+ i = 0;
+ while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
+ ++i < 10)
+ msleep(1);
+
+ if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
+ netdev_err(dev->netdev,
+ "iSCSI CLIENT_HALT did not complete\n");
+ cnic_kwq_completion(dev, 1);
memset(&l5_data, 0, sizeof(l5_data));
cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL,
@@ -4317,7 +4372,15 @@ static void cnic_stop_hw(struct cnic_dev *dev)
{
if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
struct cnic_local *cp = dev->cnic_priv;
+ int i = 0;
+ /* Need to wait for the ring shutdown event to complete
+ * before clearing the CNIC_UP flag.
+ */
+ while (cp->uio_dev != -1 && i < 15) {
+ msleep(100);
+ i++;
+ }
clear_bit(CNIC_F_CNIC_UP, &dev->flags);
rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
synchronize_rcu();
@@ -4628,7 +4691,6 @@ static void __exit cnic_exit(void)
{
unregister_netdevice_notifier(&cnic_netdev_notifier);
cnic_release();
- return;
}
module_init(cnic_init);
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index a0d853dff98..08b1235d987 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -179,9 +179,9 @@ struct cnic_local {
#define ULP_F_CALL_PENDING 2
struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
- /* protected by ulp_lock */
- u32 cnic_local_flags;
-#define CNIC_LCL_FL_KWQ_INIT 0x00000001
+ unsigned long cnic_local_flags;
+#define CNIC_LCL_FL_KWQ_INIT 0x0
+#define CNIC_LCL_FL_L2_WAIT 0x1
struct cnic_dev *dev;
@@ -349,6 +349,10 @@ struct bnx2x_bd_chain_next {
#define BNX2X_RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
#define BNX2X_MAX_RCQ_DESC_CNT (BNX2X_RCQ_DESC_CNT - 1)
+#define BNX2X_NEXT_RCQE(x) (((x) & BNX2X_MAX_RCQ_DESC_CNT) == \
+ (BNX2X_MAX_RCQ_DESC_CNT - 1)) ? \
+ ((x) + 2) : ((x) + 1)
+
#define BNX2X_DEF_SB_ID 16
#define BNX2X_ISCSI_RX_SB_INDEX_NUM \
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index 110c62072e6..0c55177db04 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
#ifndef CNIC_IF_H
#define CNIC_IF_H
-#define CNIC_MODULE_VERSION "2.1.1"
-#define CNIC_MODULE_RELDATE "Feb 22, 2010"
+#define CNIC_MODULE_VERSION "2.1.2"
+#define CNIC_MODULE_RELDATE "May 26, 2010"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 60777fd90b3..3c58db59528 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -328,7 +328,7 @@ static int cpmac_config(struct net_device *dev, struct ifmap *map)
static void cpmac_set_multicast_list(struct net_device *dev)
{
- struct dev_mc_list *iter;
+ struct netdev_hw_addr *ha;
u8 tmp;
u32 mbp, bit, hash[2] = { 0, };
struct cpmac_priv *priv = netdev_priv(dev);
@@ -348,19 +348,19 @@ static void cpmac_set_multicast_list(struct net_device *dev)
* cpmac uses some strange mac address hashing
* (not crc32)
*/
- netdev_for_each_mc_addr(iter, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
bit = 0;
- tmp = iter->dmi_addr[0];
+ tmp = ha->addr[0];
bit ^= (tmp >> 2) ^ (tmp << 4);
- tmp = iter->dmi_addr[1];
+ tmp = ha->addr[1];
bit ^= (tmp >> 4) ^ (tmp << 2);
- tmp = iter->dmi_addr[2];
+ tmp = ha->addr[2];
bit ^= (tmp >> 6) ^ tmp;
- tmp = iter->dmi_addr[3];
+ tmp = ha->addr[3];
bit ^= (tmp >> 2) ^ (tmp << 4);
- tmp = iter->dmi_addr[4];
+ tmp = ha->addr[4];
bit ^= (tmp >> 4) ^ (tmp << 2);
- tmp = iter->dmi_addr[5];
+ tmp = ha->addr[5];
bit ^= (tmp >> 6) ^ tmp;
bit &= 0x3f;
hash[bit / 32] |= 1 << (bit % 32);
@@ -579,7 +579,6 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
spin_lock(&priv->lock);
- dev->trans_start = jiffies;
spin_unlock(&priv->lock);
desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
desc->skb = skb;
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 61a33914e96..7e00027b9f8 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1108,7 +1108,7 @@ e100_send_packet(struct sk_buff *skb, struct net_device *dev)
myNextTxDesc->skb = skb;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
e100_hardware_send_packet(np, buf, skb->len);
@@ -1595,16 +1595,16 @@ set_multicast_list(struct net_device *dev)
} else {
/* MC mode, receive normal and MC packets */
char hash_ix;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *baddr;
lo_bits = 0x00000000ul;
hi_bits = 0x00000000ul;
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
/* Calculate the hash index for the GA registers */
hash_ix = 0;
- baddr = dmi->dmi_addr;
+ baddr = ha->addr;
hash_ix ^= (*baddr) & 0x3f;
hash_ix ^= ((*baddr) >> 6) & 0x03;
++baddr;
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index 4c38491b8ef..2ccb9f12805 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -902,7 +902,6 @@ get_dma_channel(struct net_device *dev)
return;
}
}
- return;
}
static void
@@ -1554,7 +1553,6 @@ static netdev_tx_t net_send_packet(struct sk_buff *skb,struct net_device *dev)
writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1);
spin_unlock_irqrestore(&lp->lock, flags);
lp->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
/*
@@ -1673,7 +1671,6 @@ count_rx_errors(int status, struct net_local *lp)
/* per str 172 */
lp->stats.rx_crc_errors++;
if (status & RX_DRIBBLE) lp->stats.rx_frame_errors++;
- return;
}
/* We have a good packet(s), get it/them out of the buffers. */
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
index 2f3ee721c3e..f452c400325 100644
--- a/drivers/net/cxgb3/l2t.c
+++ b/drivers/net/cxgb3/l2t.c
@@ -207,7 +207,6 @@ again:
*/
neigh_event_send(e->neigh, NULL);
}
- return;
}
EXPORT_SYMBOL(t3_l2t_send_event);
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 07d7e7fab3f..5962b911b5b 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -118,7 +118,7 @@ struct rx_sw_desc { /* SW state per Rx descriptor */
struct sk_buff *skb;
struct fl_pg_chunk pg_chunk;
};
- DECLARE_PCI_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
};
struct rsp_desc { /* response queue descriptor */
@@ -208,7 +208,7 @@ static inline int need_skb_unmap(void)
* unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
*/
struct dummy {
- DECLARE_PCI_UNMAP_ADDR(addr);
+ DEFINE_DMA_UNMAP_ADDR(addr);
};
return sizeof(struct dummy) != 0;
@@ -363,7 +363,7 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
put_page(d->pg_chunk.page);
d->pg_chunk.page = NULL;
} else {
- pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
+ pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr),
q->buf_size, PCI_DMA_FROMDEVICE);
kfree_skb(d->skb);
d->skb = NULL;
@@ -419,7 +419,7 @@ static inline int add_one_rx_buf(void *va, unsigned int len,
if (unlikely(pci_dma_mapping_error(pdev, mapping)))
return -ENOMEM;
- pci_unmap_addr_set(sd, dma_addr, mapping);
+ dma_unmap_addr_set(sd, dma_addr, mapping);
d->addr_lo = cpu_to_be32(mapping);
d->addr_hi = cpu_to_be32((u64) mapping >> 32);
@@ -515,7 +515,7 @@ nomem: q->alloc_failed++;
break;
}
mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
- pci_unmap_addr_set(sd, dma_addr, mapping);
+ dma_unmap_addr_set(sd, dma_addr, mapping);
add_one_rx_chunk(mapping, d, q->gen);
pci_dma_sync_single_for_device(adap->pdev, mapping,
@@ -791,11 +791,11 @@ static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
if (likely(skb != NULL)) {
__skb_put(skb, len);
pci_dma_sync_single_for_cpu(adap->pdev,
- pci_unmap_addr(sd, dma_addr), len,
+ dma_unmap_addr(sd, dma_addr), len,
PCI_DMA_FROMDEVICE);
memcpy(skb->data, sd->skb->data, len);
pci_dma_sync_single_for_device(adap->pdev,
- pci_unmap_addr(sd, dma_addr), len,
+ dma_unmap_addr(sd, dma_addr), len,
PCI_DMA_FROMDEVICE);
} else if (!drop_thres)
goto use_orig_buf;
@@ -810,7 +810,7 @@ recycle:
goto recycle;
use_orig_buf:
- pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
+ pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr),
fl->buf_size, PCI_DMA_FROMDEVICE);
skb = sd->skb;
skb_put(skb, len);
@@ -843,7 +843,7 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
struct sk_buff *newskb, *skb;
struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
- dma_addr_t dma_addr = pci_unmap_addr(sd, dma_addr);
+ dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);
newskb = skb = q->pg_skb;
if (!skb && (len <= SGE_RX_COPY_THRES)) {
@@ -2097,7 +2097,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
fl->credits--;
pci_dma_sync_single_for_cpu(adap->pdev,
- pci_unmap_addr(sd, dma_addr),
+ dma_unmap_addr(sd, dma_addr),
fl->buf_size - SGE_PG_RSVD,
PCI_DMA_FROMDEVICE);
diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c
index c142a2132e9..3af19a55037 100644
--- a/drivers/net/cxgb3/xgmac.c
+++ b/drivers/net/cxgb3/xgmac.c
@@ -311,16 +311,16 @@ int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev)
if (dev->flags & IFF_ALLMULTI)
hash_lo = hash_hi = 0xffffffff;
else {
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int exact_addr_idx = mac->nucast;
hash_lo = hash_hi = 0;
- netdev_for_each_mc_addr(dmi, dev)
+ netdev_for_each_mc_addr(ha, dev)
if (exact_addr_idx < EXACT_ADDR_FILTERS)
set_addr_filter(mac, exact_addr_idx++,
- dmi->dmi_addr);
+ ha->addr);
else {
- int hash = hash_hw_addr(dmi->dmi_addr);
+ int hash = hash_hw_addr(ha->addr);
if (hash < 32)
hash_lo |= (1 << hash);
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index 3d8ff4889b5..dd1770e075e 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -53,7 +53,7 @@
enum {
MAX_NPORTS = 4, /* max # of ports */
- SERNUM_LEN = 16, /* Serial # length */
+ SERNUM_LEN = 24, /* Serial # length */
EC_LEN = 16, /* E/C length */
ID_LEN = 16, /* ID length */
};
@@ -477,7 +477,6 @@ struct adapter {
struct pci_dev *pdev;
struct device *pdev_dev;
unsigned long registered_device_map;
- unsigned long open_device_map;
unsigned long flags;
const char *name;
@@ -651,14 +650,11 @@ int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
struct link_config *lc);
int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
int t4_seeprom_wp(struct adapter *adapter, bool enable);
-int t4_read_flash(struct adapter *adapter, unsigned int addr,
- unsigned int nwords, u32 *data, int byte_oriented);
int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
int t4_check_fw_version(struct adapter *adapter);
int t4_prep_adapter(struct adapter *adapter);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
void t4_fatal_err(struct adapter *adapter);
-void t4_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
int filter_index, int enable);
void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
@@ -709,7 +705,8 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int viid);
int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
- int mtu, int promisc, int all_multi, int bcast, bool sleep_ok);
+ int mtu, int promisc, int all_multi, int bcast, int vlanex,
+ bool sleep_ok);
int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
unsigned int viid, bool free, unsigned int naddr,
const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index a7e30a23d32..58045b00cf4 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -240,9 +240,9 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
u16 filt_idx[7];
const u8 *addr[7];
int ret, naddr = 0;
- const struct dev_addr_list *d;
const struct netdev_hw_addr *ha;
int uc_cnt = netdev_uc_count(dev);
+ int mc_cnt = netdev_mc_count(dev);
const struct port_info *pi = netdev_priv(dev);
/* first do the secondary unicast addresses */
@@ -260,9 +260,9 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
}
/* next set up the multicast addresses */
- netdev_for_each_mc_addr(d, dev) {
- addr[naddr++] = d->dmi_addr;
- if (naddr >= ARRAY_SIZE(addr) || d->next == NULL) {
+ netdev_for_each_mc_addr(ha, dev) {
+ addr[naddr++] = ha->addr;
+ if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
naddr, addr, filt_idx, &mhash, sleep);
if (ret < 0)
@@ -290,7 +290,7 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
if (ret == 0)
ret = t4_set_rxmode(pi->adapter, 0, pi->viid, mtu,
(dev->flags & IFF_PROMISC) ? 1 : 0,
- (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1,
+ (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
sleep_ok);
return ret;
}
@@ -311,11 +311,11 @@ static int link_start(struct net_device *dev)
* that step explicitly.
*/
ret = t4_set_rxmode(pi->adapter, 0, pi->viid, dev->mtu, -1, -1, -1,
- true);
+ pi->vlan_grp != NULL, true);
if (ret == 0) {
ret = t4_change_mac(pi->adapter, 0, pi->viid,
pi->xact_addr_filt, dev->dev_addr, true,
- false);
+ true);
if (ret >= 0) {
pi->xact_addr_filt = ret;
ret = 0;
@@ -859,6 +859,8 @@ static char stats_strings[][ETH_GSTRING_LEN] = {
"RxCsumGood ",
"VLANextractions ",
"VLANinsertions ",
+ "GROpackets ",
+ "GROmerged ",
};
static int get_sset_count(struct net_device *dev, int sset)
@@ -922,6 +924,8 @@ struct queue_port_stats {
u64 rx_csum;
u64 vlan_ex;
u64 vlan_ins;
+ u64 gro_pkts;
+ u64 gro_merged;
};
static void collect_sge_port_stats(const struct adapter *adap,
@@ -938,6 +942,8 @@ static void collect_sge_port_stats(const struct adapter *adap,
s->rx_csum += rx->stats.rx_cso;
s->vlan_ex += rx->stats.vlan_ex;
s->vlan_ins += tx->vlan_ins;
+ s->gro_pkts += rx->stats.lro_pkts;
+ s->gro_merged += rx->stats.lro_merged;
}
}
@@ -1711,6 +1717,18 @@ static int set_tso(struct net_device *dev, u32 value)
return 0;
}
+static int set_flags(struct net_device *dev, u32 flags)
+{
+ if (flags & ~ETH_FLAG_RXHASH)
+ return -EOPNOTSUPP;
+
+ if (flags & ETH_FLAG_RXHASH)
+ dev->features |= NETIF_F_RXHASH;
+ else
+ dev->features &= ~NETIF_F_RXHASH;
+ return 0;
+}
+
static struct ethtool_ops cxgb_ethtool_ops = {
.get_settings = get_settings,
.set_settings = set_settings,
@@ -1741,6 +1759,7 @@ static struct ethtool_ops cxgb_ethtool_ops = {
.get_wol = get_wol,
.set_wol = set_wol,
.set_tso = set_tso,
+ .set_flags = set_flags,
.flash_device = set_flash,
};
@@ -2308,6 +2327,9 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
register_netevent_notifier(&cxgb4_netevent_nb);
netevent_registered = true;
}
+
+ if (adap->flags & FULL_INIT_DONE)
+ ulds[uld].state_change(handle, CXGB4_STATE_UP);
}
static void attach_ulds(struct adapter *adap)
@@ -2414,23 +2436,17 @@ EXPORT_SYMBOL(cxgb4_unregister_uld);
*/
static int cxgb_up(struct adapter *adap)
{
- int err = 0;
+ int err;
- if (!(adap->flags & FULL_INIT_DONE)) {
- err = setup_sge_queues(adap);
- if (err)
- goto out;
- err = setup_rss(adap);
- if (err) {
- t4_free_sge_resources(adap);
- goto out;
- }
- if (adap->flags & USING_MSIX)
- name_msix_vecs(adap);
- adap->flags |= FULL_INIT_DONE;
- }
+ err = setup_sge_queues(adap);
+ if (err)
+ goto out;
+ err = setup_rss(adap);
+ if (err)
+ goto freeq;
if (adap->flags & USING_MSIX) {
+ name_msix_vecs(adap);
err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
adap->msix_info[0].desc, adap);
if (err)
@@ -2451,11 +2467,14 @@ static int cxgb_up(struct adapter *adap)
enable_rx(adap);
t4_sge_start(adap);
t4_intr_enable(adap);
+ adap->flags |= FULL_INIT_DONE;
notify_ulds(adap, CXGB4_STATE_UP);
out:
return err;
irq_err:
dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
+ freeq:
+ t4_free_sge_resources(adap);
goto out;
}
@@ -2471,6 +2490,9 @@ static void cxgb_down(struct adapter *adapter)
} else
free_irq(adapter->pdev->irq, adapter);
quiesce_rx(adapter);
+ t4_sge_stop(adapter);
+ t4_free_sge_resources(adapter);
+ adapter->flags &= ~FULL_INIT_DONE;
}
/*
@@ -2482,11 +2504,13 @@ static int cxgb_open(struct net_device *dev)
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
- if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
- return err;
+ if (!(adapter->flags & FULL_INIT_DONE)) {
+ err = cxgb_up(adapter);
+ if (err < 0)
+ return err;
+ }
dev->real_num_tx_queues = pi->nqsets;
- set_bit(pi->tx_chan, &adapter->open_device_map);
link_start(dev);
netif_tx_start_all_queues(dev);
return 0;
@@ -2494,19 +2518,12 @@ static int cxgb_open(struct net_device *dev)
static int cxgb_close(struct net_device *dev)
{
- int ret;
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
netif_tx_stop_all_queues(dev);
netif_carrier_off(dev);
- ret = t4_enable_vi(adapter, 0, pi->viid, false, false);
-
- clear_bit(pi->tx_chan, &adapter->open_device_map);
-
- if (!adapter->open_device_map)
- cxgb_down(adapter);
- return 0;
+ return t4_enable_vi(adapter, 0, pi->viid, false, false);
}
static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
@@ -2601,7 +2618,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
return -EINVAL;
- ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1,
+ ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1, -1,
true);
if (!ret)
dev->mtu = new_mtu;
@@ -2632,7 +2649,8 @@ static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
struct port_info *pi = netdev_priv(dev);
pi->vlan_grp = grp;
- t4_set_vlan_accel(pi->adapter, 1 << pi->tx_chan, grp != NULL);
+ t4_set_rxmode(pi->adapter, 0, pi->viid, -1, -1, -1, -1, grp != NULL,
+ true);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3066,6 +3084,12 @@ static void __devinit print_port_info(struct adapter *adap)
int i;
char buf[80];
+ const char *spd = "";
+
+ if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
+ spd = " 2.5 GT/s";
+ else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
+ spd = " 5 GT/s";
for_each_port(adap, i) {
struct net_device *dev = adap->port[i];
@@ -3085,10 +3109,10 @@ static void __devinit print_port_info(struct adapter *adap)
--bufp;
sprintf(bufp, "BASE-%s", base[pi->port_type]);
- netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s\n",
+ netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
adap->params.vpd.id, adap->params.rev,
buf, is_offload(adap) ? "R" : "",
- adap->params.pci.width,
+ adap->params.pci.width, spd,
(adap->flags & USING_MSIX) ? " MSI-X" :
(adap->flags & USING_MSI) ? " MSI" : "");
if (adap->name == dev->name)
@@ -3203,7 +3227,7 @@ static int __devinit init_one(struct pci_dev *pdev,
netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- netdev->features |= NETIF_F_GRO | highdma;
+ netdev->features |= NETIF_F_GRO | NETIF_F_RXHASH | highdma;
netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
netdev->vlan_features = netdev->features & VLAN_FEAT;
@@ -3334,8 +3358,8 @@ static void __devexit remove_one(struct pci_dev *pdev)
if (adapter->debugfs_root)
debugfs_remove_recursive(adapter->debugfs_root);
- t4_sge_stop(adapter);
- t4_free_sge_resources(adapter);
+ if (adapter->flags & FULL_INIT_DONE)
+ cxgb_down(adapter);
t4_free_mem(adapter->l2t);
t4_free_mem(adapter->tids.tid_tab);
disable_msi(adapter);
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index 14adc58e71c..d1f8f225e45 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -1471,7 +1471,7 @@ EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
* Releases the pages of a packet gather list. We do not own the last
* page on the list and do not free it.
*/
-void t4_pktgl_free(const struct pkt_gl *gl)
+static void t4_pktgl_free(const struct pkt_gl *gl)
{
int n;
const skb_frag_t *p;
@@ -1524,6 +1524,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb->truesize += skb->data_len;
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rxq->rspq.idx);
+ if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
+ skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
if (unlikely(pkt->vlan_ex)) {
struct port_info *pi = netdev_priv(rxq->rspq.netdev);
@@ -1565,7 +1567,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
return handle_trace_pkt(q->adap, si);
- pkt = (void *)&rsp[1];
+ pkt = (const struct cpl_rx_pkt *)rsp;
csum_ok = pkt->csum_calc && !pkt->err_vec;
if ((pkt->l2info & htonl(RXF_TCP)) &&
(q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
@@ -1583,6 +1585,9 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
__skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */
skb->protocol = eth_type_trans(skb, q->netdev);
skb_record_rx_queue(skb, q->idx);
+ if (skb->dev->features & NETIF_F_RXHASH)
+ skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
+
pi = netdev_priv(skb->dev);
rxq->stats.pkts++;
@@ -2047,7 +2052,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
adap->sge.ingr_map[iq->cntxt_id] = iq;
if (fl) {
- fl->cntxt_id = htons(c.fl0id);
+ fl->cntxt_id = ntohs(c.fl0id);
fl->avail = fl->pend_cred = 0;
fl->pidx = fl->cidx = 0;
fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
index a814a3afe12..da272a98fdb 100644
--- a/drivers/net/cxgb4/t4_hw.c
+++ b/drivers/net/cxgb4/t4_hw.c
@@ -53,8 +53,8 @@
* at the time it indicated completion is stored there. Returns 0 if the
* operation completes and -EAGAIN otherwise.
*/
-int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
- int polarity, int attempts, int delay, u32 *valp)
+static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
+ int polarity, int attempts, int delay, u32 *valp)
{
while (1) {
u32 val = t4_read_reg(adapter, reg);
@@ -109,9 +109,9 @@ void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
* Reads registers that are accessed indirectly through an address/data
* register pair.
*/
-void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
- unsigned int data_reg, u32 *vals, unsigned int nregs,
- unsigned int start_idx)
+static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, u32 *vals,
+ unsigned int nregs, unsigned int start_idx)
{
while (nregs--) {
t4_write_reg(adap, addr_reg, start_idx);
@@ -120,6 +120,7 @@ void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
}
}
+#if 0
/**
* t4_write_indirect - write indirectly addressed registers
* @adap: the adapter
@@ -132,15 +133,16 @@ void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
* Writes a sequential block of registers that are accessed indirectly
* through an address/data register pair.
*/
-void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
- unsigned int data_reg, const u32 *vals,
- unsigned int nregs, unsigned int start_idx)
+static void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, const u32 *vals,
+ unsigned int nregs, unsigned int start_idx)
{
while (nregs--) {
t4_write_reg(adap, addr_reg, start_idx++);
t4_write_reg(adap, data_reg, *vals++);
}
}
+#endif
/*
* Get the reply to a mailbox command and store it in @rpl in big-endian order.
@@ -345,33 +347,21 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
return 0;
}
-#define VPD_ENTRY(name, len) \
- u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
-
/*
* Partial EEPROM Vital Product Data structure. Includes only the ID and
- * VPD-R sections.
+ * VPD-R header.
*/
-struct t4_vpd {
+struct t4_vpd_hdr {
u8 id_tag;
u8 id_len[2];
u8 id_data[ID_LEN];
u8 vpdr_tag;
u8 vpdr_len[2];
- VPD_ENTRY(pn, 16); /* part number */
- VPD_ENTRY(ec, EC_LEN); /* EC level */
- VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
- VPD_ENTRY(na, 12); /* MAC address base */
- VPD_ENTRY(port_type, 8); /* port types */
- VPD_ENTRY(gpio, 14); /* GPIO usage */
- VPD_ENTRY(cclk, 6); /* core clock */
- VPD_ENTRY(port_addr, 8); /* port MDIO addresses */
- VPD_ENTRY(rv, 1); /* csum */
- u32 pad; /* for multiple-of-4 sizing and alignment */
};
#define EEPROM_STAT_ADDR 0x7bfc
#define VPD_BASE 0
+#define VPD_LEN 512
/**
* t4_seeprom_wp - enable/disable EEPROM write protection
@@ -396,16 +386,36 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
*/
static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
- int ret;
- struct t4_vpd vpd;
- u8 *q = (u8 *)&vpd, csum;
+ int i, ret;
+ int ec, sn, v2;
+ u8 vpd[VPD_LEN], csum;
+ unsigned int vpdr_len;
+ const struct t4_vpd_hdr *v;
- ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), &vpd);
+ ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
if (ret < 0)
return ret;
- for (csum = 0; q <= vpd.rv_data; q++)
- csum += *q;
+ v = (const struct t4_vpd_hdr *)vpd;
+ vpdr_len = pci_vpd_lrdt_size(&v->vpdr_tag);
+ if (vpdr_len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
+ dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
+ return -EINVAL;
+ }
+
+#define FIND_VPD_KW(var, name) do { \
+ var = pci_vpd_find_info_keyword(&v->id_tag, sizeof(struct t4_vpd_hdr), \
+ vpdr_len, name); \
+ if (var < 0) { \
+ dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
+ return -EINVAL; \
+ } \
+ var += PCI_VPD_INFO_FLD_HDR_SIZE; \
+} while (0)
+
+ FIND_VPD_KW(i, "RV");
+ for (csum = 0; i >= 0; i--)
+ csum += vpd[i];
if (csum) {
dev_err(adapter->pdev_dev,
@@ -413,12 +423,18 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
return -EINVAL;
}
- p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
- memcpy(p->id, vpd.id_data, sizeof(vpd.id_data));
+ FIND_VPD_KW(ec, "EC");
+ FIND_VPD_KW(sn, "SN");
+ FIND_VPD_KW(v2, "V2");
+#undef FIND_VPD_KW
+
+ p->cclk = simple_strtoul(vpd + v2, NULL, 10);
+ memcpy(p->id, v->id_data, ID_LEN);
strim(p->id);
- memcpy(p->ec, vpd.ec_data, sizeof(vpd.ec_data));
+ memcpy(p->ec, vpd + ec, EC_LEN);
strim(p->ec);
- memcpy(p->sn, vpd.sn_data, sizeof(vpd.sn_data));
+ i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
+ memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
strim(p->sn);
return 0;
}
@@ -537,8 +553,8 @@ static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
* (i.e., big-endian), otherwise as 32-bit words in the platform's
* natural endianess.
*/
-int t4_read_flash(struct adapter *adapter, unsigned int addr,
- unsigned int nwords, u32 *data, int byte_oriented)
+static int t4_read_flash(struct adapter *adapter, unsigned int addr,
+ unsigned int nwords, u32 *data, int byte_oriented)
{
int ret;
@@ -870,22 +886,6 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
-/**
- * t4_set_vlan_accel - configure HW VLAN extraction
- * @adap: the adapter
- * @ports: bitmap of adapter ports to operate on
- * @on: enable (1) or disable (0) HW VLAN extraction
- *
- * Enables or disables HW extraction of VLAN tags for the ports specified
- * by @ports. @ports is a bitmap with the ith bit designating the port
- * associated with the ith adapter channel.
- */
-void t4_set_vlan_accel(struct adapter *adap, unsigned int ports, int on)
-{
- ports <<= VLANEXTENABLE_SHIFT;
- t4_set_reg_field(adap, TP_OUT_CONFIG, ports, on ? ports : 0);
-}
-
struct intr_info {
unsigned int mask; /* bits to check in interrupt status */
const char *msg; /* message to print or NULL */
@@ -2608,12 +2608,14 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
* @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
* @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
* @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
+ * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
* @sleep_ok: if true we may sleep while awaiting command completion
*
* Sets Rx properties of a virtual interface.
*/
int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
- int mtu, int promisc, int all_multi, int bcast, bool sleep_ok)
+ int mtu, int promisc, int all_multi, int bcast, int vlanex,
+ bool sleep_ok)
{
struct fw_vi_rxmode_cmd c;
@@ -2626,15 +2628,18 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
if (bcast < 0)
bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
+ if (vlanex < 0)
+ vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
memset(&c, 0, sizeof(c));
c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
c.retval_len16 = htonl(FW_LEN16(c));
- c.mtu_to_broadcasten = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
- FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
- FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
- FW_VI_RXMODE_CMD_BROADCASTEN(bcast));
+ c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
+ FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
+ FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
+ FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
+ FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
}
diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/cxgb4/t4_msg.h
index fdb11744314..7a981b81afa 100644
--- a/drivers/net/cxgb4/t4_msg.h
+++ b/drivers/net/cxgb4/t4_msg.h
@@ -503,6 +503,7 @@ struct cpl_rx_data_ack {
};
struct cpl_rx_pkt {
+ struct rss_header rsshdr;
u8 opcode;
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 iff:4;
diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h
index 3393d05a388..63991d68950 100644
--- a/drivers/net/cxgb4/t4fw_api.h
+++ b/drivers/net/cxgb4/t4fw_api.h
@@ -876,7 +876,7 @@ struct fw_vi_mac_cmd {
struct fw_vi_rxmode_cmd {
__be32 op_to_viid;
__be32 retval_len16;
- __be32 mtu_to_broadcasten;
+ __be32 mtu_to_vlanexen;
__be32 r4_lo;
};
@@ -888,6 +888,8 @@ struct fw_vi_rxmode_cmd {
#define FW_VI_RXMODE_CMD_ALLMULTIEN(x) ((x) << 12)
#define FW_VI_RXMODE_CMD_BROADCASTEN_MASK 0x3
#define FW_VI_RXMODE_CMD_BROADCASTEN(x) ((x) << 10)
+#define FW_VI_RXMODE_CMD_VLANEXEN_MASK 0x3
+#define FW_VI_RXMODE_CMD_VLANEXEN(x) ((x) << 8)
struct fw_vi_enable_cmd {
__be32 op_to_viid;
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 2b8edd2efbf..08e82b1a0b3 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -952,13 +952,14 @@ static void emac_dev_mcast_set(struct net_device *ndev)
emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
}
if (!netdev_mc_empty(ndev)) {
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
+
mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL);
/* program multicast address list into EMAC hardware */
- netdev_for_each_mc_addr(mc_ptr, ndev) {
+ netdev_for_each_mc_addr(ha, ndev) {
emac_add_mcast(priv, EMAC_MULTICAST_ADD,
- (u8 *) mc_ptr->dmi_addr);
+ (u8 *) ha->addr);
}
} else {
mbp_enable = (mbp_enable & ~EMAC_MBP_RXMCAST);
@@ -1467,7 +1468,6 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
tx_buf.length = skb->len;
tx_buf.buf_token = (void *)skb;
tx_buf.data_ptr = skb->data;
- ndev->trans_start = jiffies;
ret_code = emac_send(priv, &tx_packet, EMAC_DEF_TX_CH);
if (unlikely(ret_code != 0)) {
if (ret_code == EMAC_ERR_TX_OUT_OF_BD) {
diff --git a/drivers/net/de600.c b/drivers/net/de600.c
index 6b13f4fd2e9..23a65398d01 100644
--- a/drivers/net/de600.c
+++ b/drivers/net/de600.c
@@ -166,8 +166,8 @@ static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev)
int i;
if (free_tx_pages <= 0) { /* Do timeouts, to avoid hangs. */
- tickssofar = jiffies - dev->trans_start;
- if (tickssofar < 5)
+ tickssofar = jiffies - dev_trans_start(dev);
+ if (tickssofar < HZ/20)
return NETDEV_TX_BUSY;
/* else */
printk(KERN_WARNING "%s: transmit timed out (%d), %s?\n", dev->name, tickssofar, "network cable problem");
diff --git a/drivers/net/de620.c b/drivers/net/de620.c
index a0a6830b5e6..f3650fd096f 100644
--- a/drivers/net/de620.c
+++ b/drivers/net/de620.c
@@ -535,7 +535,6 @@ static int de620_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
de620_write_block(dev, buffer, skb->len, len-skb->len);
- dev->trans_start = jiffies;
if(!(using_txbuf == (TXBF0 | TXBF1)))
netif_wake_queue(dev);
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index 8cf3cc6f20e..1d973db27c3 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -874,7 +874,7 @@ static inline int lance_reset(struct net_device *dev)
lance_init_ring(dev);
load_csrs(lp);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
status = init_restart_lance(lp);
return status;
}
@@ -930,7 +930,6 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&lp->lock, flags);
- dev->trans_start = jiffies;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -940,7 +939,7 @@ static void lance_load_multicast(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile u16 *ib = (volatile u16 *)dev->mem_start;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
@@ -959,8 +958,8 @@ static void lance_load_multicast(struct net_device *dev)
*lib_ptr(ib, filter[3], lp->type) = 0;
/* Add addresses */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
/* multicast address? */
if (!(*addrs & 1))
@@ -970,7 +969,6 @@ static void lance_load_multicast(struct net_device *dev)
crc = crc >> 26;
*lib_ptr(ib, filter[crc >> 4], lp->type) |= 1 << (crc & 0xf);
}
- return;
}
static void lance_set_multicast(struct net_device *dev)
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index ed53a8d45f4..e5667c55844 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -2195,7 +2195,7 @@ static void dfx_ctl_set_multicast_list(struct net_device *dev)
{
DFX_board_t *bp = netdev_priv(dev);
int i; /* used as index in for loop */
- struct dev_mc_list *dmi; /* ptr to multicast addr entry */
+ struct netdev_hw_addr *ha;
/* Enable LLC frame promiscuous mode, if necessary */
@@ -2241,9 +2241,9 @@ static void dfx_ctl_set_multicast_list(struct net_device *dev)
/* Copy addresses to multicast address table, then update adapter CAM */
i = 0;
- netdev_for_each_mc_addr(dmi, dev)
+ netdev_for_each_mc_addr(ha, dev)
memcpy(&bp->mc_table[i++ * FDDI_K_ALEN],
- dmi->dmi_addr, FDDI_K_ALEN);
+ ha->addr, FDDI_K_ALEN);
if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
{
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 744c1928dfc..bf66e9b3b19 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -921,7 +921,7 @@ static void depca_tx_timeout(struct net_device *dev)
STOP_DEPCA;
depca_init_ring(dev);
LoadCSRs(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
InitRestartDepca(dev);
}
@@ -954,7 +954,6 @@ static netdev_tx_t depca_start_xmit(struct sk_buff *skb,
outw(CSR0, DEPCA_ADDR);
outw(INEA | TDMD, DEPCA_DATA);
- dev->trans_start = jiffies;
dev_kfree_skb(skb);
}
if (TX_BUFFS_AVAIL)
@@ -1204,8 +1203,6 @@ static void LoadCSRs(struct net_device *dev)
outw(ACON, DEPCA_DATA);
outw(CSR0, DEPCA_ADDR); /* Point back to CSR0 */
-
- return;
}
static int InitRestartDepca(struct net_device *dev)
@@ -1272,7 +1269,7 @@ static void set_multicast_list(struct net_device *dev)
static void SetMulticastFilter(struct net_device *dev)
{
struct depca_private *lp = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
int i, j, bit, byte;
u16 hashcode;
@@ -1287,8 +1284,8 @@ static void SetMulticastFilter(struct net_device *dev)
lp->init_block.mcast_table[i] = 0;
}
/* Add multicast addresses */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if ((*addrs & 0x01) == 1) { /* multicast address? */
crc = ether_crc(ETH_ALEN, addrs);
hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */
@@ -1303,8 +1300,6 @@ static void SetMulticastFilter(struct net_device *dev)
}
}
}
-
- return;
}
static int __init depca_common_init (u_long ioaddr, struct net_device **devp)
@@ -1909,8 +1904,6 @@ static void depca_dbg_open(struct net_device *dev)
outw(CSR3, DEPCA_ADDR);
printk("CSR3: 0x%4.4x\n", inw(DEPCA_DATA));
}
-
- return;
}
/*
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index b05bad82982..a2f238d20ca 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -596,8 +596,6 @@ alloc_list (struct net_device *dev)
/* Set RFDListPtr */
writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0);
writel (0, dev->base_addr + RFDListPtr1);
-
- return;
}
static netdev_tx_t
@@ -1132,14 +1130,14 @@ set_multicast (struct net_device *dev)
/* Receive broadcast and multicast frames */
rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
} else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
/* Receive broadcast frames and multicast frames filtering
by Hashtable */
rx_mode =
ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
int bit, index = 0;
- int crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
+ int crc = ether_crc_le(ETH_ALEN, ha->addr);
/* The inverted high significant 6 bits of CRC are
used as an index to hashtable */
for (bit = 0; bit < 6; bit++)
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 7f9960f718e..abcc838e18a 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -476,17 +476,13 @@ static uint32_t dm9000_get_rx_csum(struct net_device *dev)
return dm->rx_csum;
}
-static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data)
+static int dm9000_set_rx_csum_unlocked(struct net_device *dev, uint32_t data)
{
board_info_t *dm = to_dm9000_board(dev);
- unsigned long flags;
if (dm->can_csum) {
dm->rx_csum = data;
-
- spin_lock_irqsave(&dm->lock, flags);
iow(dm, DM9000_RCSR, dm->rx_csum ? RCSR_CSUM : 0);
- spin_unlock_irqrestore(&dm->lock, flags);
return 0;
}
@@ -494,6 +490,19 @@ static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data)
return -EOPNOTSUPP;
}
+static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data)
+{
+ board_info_t *dm = to_dm9000_board(dev);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dm->lock, flags);
+ ret = dm9000_set_rx_csum_unlocked(dev, data);
+ spin_unlock_irqrestore(&dm->lock, flags);
+
+ return ret;
+}
+
static int dm9000_set_tx_csum(struct net_device *dev, uint32_t data)
{
board_info_t *dm = to_dm9000_board(dev);
@@ -722,20 +731,17 @@ static unsigned char dm9000_type_to_char(enum dm9000_type type)
* Set DM9000 multicast address
*/
static void
-dm9000_hash_table(struct net_device *dev)
+dm9000_hash_table_unlocked(struct net_device *dev)
{
board_info_t *db = netdev_priv(dev);
- struct dev_mc_list *mcptr;
+ struct netdev_hw_addr *ha;
int i, oft;
u32 hash_val;
u16 hash_table[4];
u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
- unsigned long flags;
dm9000_dbg(db, 1, "entering %s\n", __func__);
- spin_lock_irqsave(&db->lock, flags);
-
for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
iow(db, oft, dev->dev_addr[i]);
@@ -753,8 +759,8 @@ dm9000_hash_table(struct net_device *dev)
rcr |= RCR_ALL;
/* the multicast address in Hash Table : 64 bits */
- netdev_for_each_mc_addr(mcptr, dev) {
- hash_val = ether_crc_le(6, mcptr->dmi_addr) & 0x3f;
+ netdev_for_each_mc_addr(ha, dev) {
+ hash_val = ether_crc_le(6, ha->addr) & 0x3f;
hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
}
@@ -765,11 +771,21 @@ dm9000_hash_table(struct net_device *dev)
}
iow(db, DM9000_RCR, rcr);
+}
+
+static void
+dm9000_hash_table(struct net_device *dev)
+{
+ board_info_t *db = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&db->lock, flags);
+ dm9000_hash_table_unlocked(dev);
spin_unlock_irqrestore(&db->lock, flags);
}
/*
- * Initilize dm9000 board
+ * Initialize dm9000 board
*/
static void
dm9000_init_dm9000(struct net_device *dev)
@@ -784,7 +800,7 @@ dm9000_init_dm9000(struct net_device *dev)
db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
/* Checksum mode */
- dm9000_set_rx_csum(dev, db->rx_csum);
+ dm9000_set_rx_csum_unlocked(dev, db->rx_csum);
/* GPIO0 on pre-activate PHY */
iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
@@ -811,7 +827,7 @@ dm9000_init_dm9000(struct net_device *dev)
iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */
/* Set address filter table */
- dm9000_hash_table(dev);
+ dm9000_hash_table_unlocked(dev);
imr = IMR_PAR | IMR_PTM | IMR_PRM;
if (db->type != TYPE_DM9000E)
@@ -825,7 +841,7 @@ dm9000_init_dm9000(struct net_device *dev)
/* Init Driver variable */
db->tx_pkt_cnt = 0;
db->queue_pkt_len = 0;
- dev->trans_start = 0;
+ dev->trans_start = jiffies;
}
/* Our watchdog timed out. Called by the networking layer */
@@ -843,7 +859,7 @@ static void dm9000_timeout(struct net_device *dev)
dm9000_reset(db);
dm9000_init_dm9000(dev);
/* We can accept TX packets again */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
/* Restore previous register address */
diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c
index 234685213f1..8b0f50bbf3e 100644
--- a/drivers/net/dnet.c
+++ b/drivers/net/dnet.c
@@ -594,8 +594,6 @@ static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&bp->lock, flags);
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -918,7 +916,7 @@ static int __devinit dnet_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n",
bp->regs, mem_base, dev->irq, dev->dev_addr);
- dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma \n",
+ dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma\n",
(bp->capabilities & DNET_HAS_MDIO) ? "" : "no ",
(bp->capabilities & DNET_HAS_IRQ) ? "" : "no ",
(bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ",
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 791080303db..b194bad29ac 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -147,6 +147,8 @@
* - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -175,7 +177,6 @@
#define DRV_VERSION "3.5.24-k2"DRV_EXT
#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
-#define PFX DRV_NAME ": "
#define E100_WATCHDOG_PERIOD (2 * HZ)
#define E100_NAPI_WEIGHT 16
@@ -201,10 +202,6 @@ module_param(use_io, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
-#define DPRINTK(nlevel, klevel, fmt, args...) \
- (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
- printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
- __func__ , ## args))
#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
@@ -690,12 +687,13 @@ static int e100_self_test(struct nic *nic)
/* Check results of self-test */
if (nic->mem->selftest.result != 0) {
- DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
- nic->mem->selftest.result);
+ netif_err(nic, hw, nic->netdev,
+ "Self-test failed: result=0x%08X\n",
+ nic->mem->selftest.result);
return -ETIMEDOUT;
}
if (nic->mem->selftest.signature == 0) {
- DPRINTK(HW, ERR, "Self-test failed: timed out\n");
+ netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
return -ETIMEDOUT;
}
@@ -798,7 +796,7 @@ static int e100_eeprom_load(struct nic *nic)
/* The checksum, stored in the last word, is calculated such that
* the sum of words should be 0xBABA */
if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
- DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
+ netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
if (!eeprom_bad_csum_allow)
return -EAGAIN;
}
@@ -954,8 +952,7 @@ static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
udelay(20);
}
if (unlikely(!i)) {
- printk("e100.mdio_ctrl(%s) won't go Ready\n",
- nic->netdev->name );
+ netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
spin_unlock_irqrestore(&nic->mdio_lock, flags);
return 0; /* No way to indicate timeout error */
}
@@ -967,9 +964,10 @@ static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
break;
}
spin_unlock_irqrestore(&nic->mdio_lock, flags);
- DPRINTK(HW, DEBUG,
- "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
- dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
+ dir == mdi_read ? "READ" : "WRITE",
+ addr, reg, data, data_out);
return (u16)data_out;
}
@@ -1029,17 +1027,19 @@ static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
return ADVERTISE_10HALF |
ADVERTISE_10FULL;
default:
- DPRINTK(HW, DEBUG,
- "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
- dir == mdi_read ? "READ" : "WRITE", addr, reg, data);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
+ dir == mdi_read ? "READ" : "WRITE",
+ addr, reg, data);
return 0xFFFF;
}
} else {
switch (reg) {
default:
- DPRINTK(HW, DEBUG,
- "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
- dir == mdi_read ? "READ" : "WRITE", addr, reg, data);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
+ dir == mdi_read ? "READ" : "WRITE",
+ addr, reg, data);
return 0xFFFF;
}
}
@@ -1156,12 +1156,15 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
}
}
- DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
- c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
- DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
- c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
- DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
- c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
}
/*************************************************************************
@@ -1254,16 +1257,18 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
err = request_firmware(&fw, fw_name, &nic->pdev->dev);
if (err) {
- DPRINTK(PROBE, ERR, "Failed to load firmware \"%s\": %d\n",
- fw_name, err);
+ netif_err(nic, probe, nic->netdev,
+ "Failed to load firmware \"%s\": %d\n",
+ fw_name, err);
return ERR_PTR(err);
}
/* Firmware should be precisely UCODE_SIZE (words) plus three bytes
indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
if (fw->size != UCODE_SIZE * 4 + 3) {
- DPRINTK(PROBE, ERR, "Firmware \"%s\" has wrong size %zu\n",
- fw_name, fw->size);
+ netif_err(nic, probe, nic->netdev,
+ "Firmware \"%s\" has wrong size %zu\n",
+ fw_name, fw->size);
release_firmware(fw);
return ERR_PTR(-EINVAL);
}
@@ -1275,9 +1280,9 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
min_size >= UCODE_SIZE) {
- DPRINTK(PROBE, ERR,
- "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
- fw_name, timer, bundle, min_size);
+ netif_err(nic, probe, nic->netdev,
+ "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
+ fw_name, timer, bundle, min_size);
release_firmware(fw);
return ERR_PTR(-EINVAL);
}
@@ -1329,7 +1334,8 @@ static inline int e100_load_ucode_wait(struct nic *nic)
return PTR_ERR(fw);
if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
- DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
+ netif_err(nic, probe, nic->netdev,
+ "ucode cmd failed with error %d\n", err);
/* must restart cuc */
nic->cuc_cmd = cuc_start;
@@ -1349,7 +1355,7 @@ static inline int e100_load_ucode_wait(struct nic *nic)
/* if the command failed, or is not OK, notify and return */
if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
- DPRINTK(PROBE,ERR, "ucode load failed\n");
+ netif_err(nic, probe, nic->netdev, "ucode load failed\n");
err = -EPERM;
}
@@ -1387,8 +1393,8 @@ static int e100_phy_check_without_mii(struct nic *nic)
* media is sensed automatically based on how the link partner
* is configured. This is, in essence, manual configuration.
*/
- DPRINTK(PROBE, INFO,
- "found MII-less i82503 or 80c24 or other PHY\n");
+ netif_info(nic, probe, nic->netdev,
+ "found MII-less i82503 or 80c24 or other PHY\n");
nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
@@ -1435,18 +1441,20 @@ static int e100_phy_init(struct nic *nic)
return 0; /* simply return and hope for the best */
else {
/* for unknown cases log a fatal error */
- DPRINTK(HW, ERR,
- "Failed to locate any known PHY, aborting.\n");
+ netif_err(nic, hw, nic->netdev,
+ "Failed to locate any known PHY, aborting\n");
return -EAGAIN;
}
} else
- DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "phy_addr = %d\n", nic->mii.phy_id);
/* Get phy ID */
id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
nic->phy = (u32)id_hi << 16 | (u32)id_lo;
- DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "phy ID = 0x%08X\n", nic->phy);
/* Select the phy and isolate the rest */
for (addr = 0; addr < 32; addr++) {
@@ -1508,7 +1516,7 @@ static int e100_hw_init(struct nic *nic)
e100_hw_reset(nic);
- DPRINTK(HW, ERR, "e100_hw_init\n");
+ netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
if (!in_interrupt() && (err = e100_self_test(nic)))
return err;
@@ -1538,16 +1546,16 @@ static int e100_hw_init(struct nic *nic)
static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
{
struct net_device *netdev = nic->netdev;
- struct dev_mc_list *list;
+ struct netdev_hw_addr *ha;
u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
cb->command = cpu_to_le16(cb_multi);
cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
i = 0;
- netdev_for_each_mc_addr(list, netdev) {
+ netdev_for_each_mc_addr(ha, netdev) {
if (i == count)
break;
- memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &list->dmi_addr,
+ memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
ETH_ALEN);
}
}
@@ -1556,8 +1564,9 @@ static void e100_set_multicast_list(struct net_device *netdev)
{
struct nic *nic = netdev_priv(netdev);
- DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
- netdev_mc_count(netdev), netdev->flags);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "mc_count=%d, flags=0x%04X\n",
+ netdev_mc_count(netdev), netdev->flags);
if (netdev->flags & IFF_PROMISC)
nic->flags |= promiscuous;
@@ -1630,7 +1639,8 @@ static void e100_update_stats(struct nic *nic)
if (e100_exec_cmd(nic, cuc_dump_reset, 0))
- DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
+ netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
+ "exec cuc_dump_reset failed\n");
}
static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
@@ -1660,20 +1670,19 @@ static void e100_watchdog(unsigned long data)
struct nic *nic = (struct nic *)data;
struct ethtool_cmd cmd;
- DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
+ netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
+ "right now = %ld\n", jiffies);
/* mii library handles link maintenance tasks */
mii_ethtool_gset(&nic->mii, &cmd);
if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
- printk(KERN_INFO "e100: %s NIC Link is Up %s Mbps %s Duplex\n",
- nic->netdev->name,
- cmd.speed == SPEED_100 ? "100" : "10",
- cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
+ netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
+ cmd.speed == SPEED_100 ? 100 : 10,
+ cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
} else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
- printk(KERN_INFO "e100: %s NIC Link is Down\n",
- nic->netdev->name);
+ netdev_info(nic->netdev, "NIC Link is Down\n");
}
mii_check_link(&nic->mii);
@@ -1733,7 +1742,8 @@ static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
Issue a NOP command followed by a 1us delay before
issuing the Tx command. */
if (e100_exec_cmd(nic, cuc_nop, 0))
- DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
+ netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
+ "exec cuc_nop failed\n");
udelay(1);
}
@@ -1742,17 +1752,18 @@ static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
switch (err) {
case -ENOSPC:
/* We queued the skb, but now we're out of space. */
- DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
+ netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
+ "No space for CB\n");
netif_stop_queue(netdev);
break;
case -ENOMEM:
/* This is a hard error - log it. */
- DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
+ netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
+ "Out of Tx resources, returning skb\n");
netif_stop_queue(netdev);
return NETDEV_TX_BUSY;
}
- netdev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -1768,9 +1779,10 @@ static int e100_tx_clean(struct nic *nic)
for (cb = nic->cb_to_clean;
cb->status & cpu_to_le16(cb_complete);
cb = nic->cb_to_clean = cb->next) {
- DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n",
- (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
- cb->status);
+ netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
+ "cb[%d]->status = 0x%04X\n",
+ (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
+ cb->status);
if (likely(cb->skb != NULL)) {
dev->stats.tx_packets++;
@@ -1913,7 +1925,8 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
rfd_status = le16_to_cpu(rfd->status);
- DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
+ netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
+ "status=0x%04X\n", rfd_status);
/* If data isn't ready, nothing to indicate */
if (unlikely(!(rfd_status & cb_complete))) {
@@ -2124,7 +2137,8 @@ static irqreturn_t e100_intr(int irq, void *dev_id)
struct nic *nic = netdev_priv(netdev);
u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
- DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
+ netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
+ "stat_ack = 0x%02X\n", stat_ack);
if (stat_ack == stat_ack_not_ours || /* Not our interrupt */
stat_ack == stat_ack_not_present) /* Hardware is ejected */
@@ -2264,8 +2278,8 @@ static void e100_tx_timeout_task(struct work_struct *work)
struct nic *nic = container_of(work, struct nic, tx_timeout_task);
struct net_device *netdev = nic->netdev;
- DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
- ioread8(&nic->csr->scb.status));
+ netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
+ "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
rtnl_lock();
if (netif_running(netdev)) {
@@ -2532,8 +2546,8 @@ static int e100_set_ringparam(struct net_device *netdev,
rfds->count = min(rfds->count, rfds->max);
cbs->count = max(ring->tx_pending, cbs->min);
cbs->count = min(cbs->count, cbs->max);
- DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
- rfds->count, cbs->count);
+ netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
+ rfds->count, cbs->count);
if (netif_running(netdev))
e100_up(nic);
@@ -2710,7 +2724,7 @@ static int e100_open(struct net_device *netdev)
netif_carrier_off(netdev);
if ((err = e100_up(nic)))
- DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
+ netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
return err;
}
@@ -2744,7 +2758,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
if (!(netdev = alloc_etherdev(sizeof(struct nic)))) {
if (((1 << debug) - 1) & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
+ pr_err("Etherdev alloc failed, aborting\n");
return -ENOMEM;
}
@@ -2762,35 +2776,34 @@ static int __devinit e100_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, netdev);
if ((err = pci_enable_device(pdev))) {
- DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
goto err_out_free_dev;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
- "base address, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
err = -ENODEV;
goto err_out_disable_pdev;
}
if ((err = pci_request_regions(pdev, DRV_NAME))) {
- DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
goto err_out_disable_pdev;
}
if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
- DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
goto err_out_free_res;
}
SET_NETDEV_DEV(netdev, &pdev->dev);
if (use_io)
- DPRINTK(PROBE, INFO, "using i/o access mode\n");
+ netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
if (!nic->csr) {
- DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
err = -ENOMEM;
goto err_out_free_res;
}
@@ -2824,7 +2837,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
if ((err = e100_alloc(nic))) {
- DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
goto err_out_iounmap;
}
@@ -2837,13 +2850,11 @@ static int __devinit e100_probe(struct pci_dev *pdev,
memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
if (!is_valid_ether_addr(netdev->perm_addr)) {
if (!eeprom_bad_csum_allow) {
- DPRINTK(PROBE, ERR, "Invalid MAC address from "
- "EEPROM, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
err = -EAGAIN;
goto err_out_free;
} else {
- DPRINTK(PROBE, ERR, "Invalid MAC address from EEPROM, "
- "you MUST configure one.\n");
+ netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
}
}
@@ -2859,7 +2870,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
strcpy(netdev->name, "eth%d");
if ((err = register_netdev(netdev))) {
- DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n");
goto err_out_free;
}
nic->cbs_pool = pci_pool_create(netdev->name,
@@ -2867,9 +2878,10 @@ static int __devinit e100_probe(struct pci_dev *pdev,
nic->params.cbs.max * sizeof(struct cb),
sizeof(u32),
0);
- DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n",
- (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
- pdev->irq, netdev->dev_addr);
+ netif_info(nic, probe, nic->netdev,
+ "addr 0x%llx, irq %d, MAC addr %pM\n",
+ (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
+ pdev->irq, netdev->dev_addr);
return 0;
@@ -3027,7 +3039,7 @@ static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
struct nic *nic = netdev_priv(netdev);
if (pci_enable_device(pdev)) {
- printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
+ pr_err("Cannot re-enable PCI device after reset\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
@@ -3086,8 +3098,8 @@ static struct pci_driver e100_driver = {
static int __init e100_init_module(void)
{
if (((1 << debug) - 1) & NETIF_MSG_DRV) {
- printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
- printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT);
+ pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
+ pr_info("%s\n", DRV_COPYRIGHT);
}
return pci_register_driver(&e100_driver);
}
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 2f29c213185..40b62b406b0 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -81,23 +81,6 @@ struct e1000_adapter;
#include "e1000_hw.h"
-#ifdef DBG
-#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args)
-#else
-#define E1000_DBG(args...)
-#endif
-
-#define E1000_ERR(args...) printk(KERN_ERR "e1000: " args)
-
-#define PFX "e1000: "
-
-#define DPRINTK(nlevel, klevel, fmt, args...) \
-do { \
- if (NETIF_MSG_##nlevel & adapter->msg_enable) \
- printk(KERN_##klevel PFX "%s: %s: " fmt, \
- adapter->netdev->name, __func__, ##args); \
-} while (0)
-
#define E1000_MAX_INTR 10
/* TX/RX descriptor defines */
@@ -335,6 +318,25 @@ enum e1000_state_t {
__E1000_DOWN
};
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
+#define e_dbg(format, arg...) \
+ netdev_dbg(e1000_get_hw_dev(hw), format, ## arg)
+#define e_err(format, arg...) \
+ netdev_err(adapter->netdev, format, ## arg)
+#define e_info(format, arg...) \
+ netdev_info(adapter->netdev, format, ## arg)
+#define e_warn(format, arg...) \
+ netdev_warn(adapter->netdev, format, ## arg)
+#define e_notice(format, arg...) \
+ netdev_notice(adapter->netdev, format, ## arg)
+#define e_dev_info(format, arg...) \
+ dev_info(&adapter->pdev->dev, format, ## arg)
+#define e_dev_warn(format, arg...) \
+ dev_warn(&adapter->pdev->dev, format, ## arg)
+
extern char e1000_driver_name[];
extern const char e1000_driver_version[];
@@ -352,5 +354,6 @@ extern bool e1000_has_link(struct e1000_adapter *adapter);
extern void e1000_power_up_phy(struct e1000_adapter *);
extern void e1000_set_ethtool_ops(struct net_device *netdev);
extern void e1000_check_options(struct e1000_adapter *adapter);
+extern char *e1000_get_hw_dev_name(struct e1000_hw *hw);
#endif /* _E1000_H_ */
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index c67e9311727..d5ff029aa7b 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -346,7 +346,7 @@ static int e1000_set_tso(struct net_device *netdev, u32 data)
netdev->features &= ~NETIF_F_TSO6;
- DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
+ e_info("TSO is %s\n", data ? "Enabled" : "Disabled");
adapter->tso_force = true;
return 0;
}
@@ -714,9 +714,9 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg,
writel(write & test[i], address);
read = readl(address);
if (read != (write & test[i] & mask)) {
- DPRINTK(DRV, ERR, "pattern test reg %04X failed: "
- "got 0x%08X expected 0x%08X\n",
- reg, read, (write & test[i] & mask));
+ e_info("pattern test reg %04X failed: "
+ "got 0x%08X expected 0x%08X\n",
+ reg, read, (write & test[i] & mask));
*data = reg;
return true;
}
@@ -734,9 +734,9 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg,
writel(write & mask, address);
read = readl(address);
if ((read & mask) != (write & mask)) {
- DPRINTK(DRV, ERR, "set/check reg %04X test failed: "
- "got 0x%08X expected 0x%08X\n",
- reg, (read & mask), (write & mask));
+ e_err("set/check reg %04X test failed: "
+ "got 0x%08X expected 0x%08X\n",
+ reg, (read & mask), (write & mask));
*data = reg;
return true;
}
@@ -779,8 +779,8 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
ew32(STATUS, toggle);
after = er32(STATUS) & toggle;
if (value != after) {
- DPRINTK(DRV, ERR, "failed STATUS register test got: "
- "0x%08X expected: 0x%08X\n", after, value);
+ e_err("failed STATUS register test got: "
+ "0x%08X expected: 0x%08X\n", after, value);
*data = 1;
return 1;
}
@@ -894,8 +894,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
*data = 1;
return -1;
}
- DPRINTK(HW, INFO, "testing %s interrupt\n",
- (shared_int ? "shared" : "unshared"));
+ e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared"));
/* Disable all the interrupts */
ew32(IMC, 0xFFFFFFFF);
@@ -980,9 +979,10 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
if (txdr->desc && txdr->buffer_info) {
for (i = 0; i < txdr->count; i++) {
if (txdr->buffer_info[i].dma)
- pci_unmap_single(pdev, txdr->buffer_info[i].dma,
+ dma_unmap_single(&pdev->dev,
+ txdr->buffer_info[i].dma,
txdr->buffer_info[i].length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (txdr->buffer_info[i].skb)
dev_kfree_skb(txdr->buffer_info[i].skb);
}
@@ -991,20 +991,23 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
if (rxdr->desc && rxdr->buffer_info) {
for (i = 0; i < rxdr->count; i++) {
if (rxdr->buffer_info[i].dma)
- pci_unmap_single(pdev, rxdr->buffer_info[i].dma,
+ dma_unmap_single(&pdev->dev,
+ rxdr->buffer_info[i].dma,
rxdr->buffer_info[i].length,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
if (rxdr->buffer_info[i].skb)
dev_kfree_skb(rxdr->buffer_info[i].skb);
}
}
if (txdr->desc) {
- pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma);
+ dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
+ txdr->dma);
txdr->desc = NULL;
}
if (rxdr->desc) {
- pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma);
+ dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
+ rxdr->dma);
rxdr->desc = NULL;
}
@@ -1012,8 +1015,6 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
txdr->buffer_info = NULL;
kfree(rxdr->buffer_info);
rxdr->buffer_info = NULL;
-
- return;
}
static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
@@ -1039,7 +1040,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
- txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+ txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+ GFP_KERNEL);
if (!txdr->desc) {
ret_val = 2;
goto err_nomem;
@@ -1070,8 +1072,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
txdr->buffer_info[i].skb = skb;
txdr->buffer_info[i].length = skb->len;
txdr->buffer_info[i].dma =
- pci_map_single(pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_map_single(&pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma);
tx_desc->lower.data = cpu_to_le32(skb->len);
tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
@@ -1093,7 +1095,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
}
rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
- rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+ GFP_KERNEL);
if (!rxdr->desc) {
ret_val = 5;
goto err_nomem;
@@ -1126,8 +1129,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rxdr->buffer_info[i].skb = skb;
rxdr->buffer_info[i].length = E1000_RXBUFFER_2048;
rxdr->buffer_info[i].dma =
- pci_map_single(pdev, skb->data, E1000_RXBUFFER_2048,
- PCI_DMA_FROMDEVICE);
+ dma_map_single(&pdev->dev, skb->data,
+ E1000_RXBUFFER_2048, DMA_FROM_DEVICE);
rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
memset(skb->data, 0x00, skb->len);
}
@@ -1444,10 +1447,10 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
for (i = 0; i < 64; i++) { /* send the packets */
e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
1024);
- pci_dma_sync_single_for_device(pdev,
- txdr->buffer_info[k].dma,
- txdr->buffer_info[k].length,
- PCI_DMA_TODEVICE);
+ dma_sync_single_for_device(&pdev->dev,
+ txdr->buffer_info[k].dma,
+ txdr->buffer_info[k].length,
+ DMA_TO_DEVICE);
if (unlikely(++k == txdr->count)) k = 0;
}
ew32(TDT, k);
@@ -1455,10 +1458,10 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
time = jiffies; /* set the start time for the receive */
good_cnt = 0;
do { /* receive the sent packets */
- pci_dma_sync_single_for_cpu(pdev,
- rxdr->buffer_info[l].dma,
- rxdr->buffer_info[l].length,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&pdev->dev,
+ rxdr->buffer_info[l].dma,
+ rxdr->buffer_info[l].length,
+ DMA_FROM_DEVICE);
ret_val = e1000_check_lbtest_frame(
rxdr->buffer_info[l].skb,
@@ -1558,7 +1561,7 @@ static void e1000_diag_test(struct net_device *netdev,
u8 forced_speed_duplex = hw->forced_speed_duplex;
u8 autoneg = hw->autoneg;
- DPRINTK(HW, INFO, "offline testing starting\n");
+ e_info("offline testing starting\n");
/* Link test performed before hardware reset so autoneg doesn't
* interfere with test result */
@@ -1598,7 +1601,7 @@ static void e1000_diag_test(struct net_device *netdev,
if (if_running)
dev_open(netdev);
} else {
- DPRINTK(HW, INFO, "online testing starting\n");
+ e_info("online testing starting\n");
/* Online tests */
if (e1000_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1691,7 +1694,7 @@ static void e1000_get_wol(struct net_device *netdev,
wol->supported &= ~WAKE_UCAST;
if (adapter->wol & E1000_WUFC_EX)
- DPRINTK(DRV, ERR, "Interface does not support "
+ e_err("Interface does not support "
"directed (unicast) frame wake-up packets\n");
break;
default:
@@ -1706,8 +1709,6 @@ static void e1000_get_wol(struct net_device *netdev,
wol->wolopts |= WAKE_BCAST;
if (adapter->wol & E1000_WUFC_MAG)
wol->wolopts |= WAKE_MAGIC;
-
- return;
}
static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -1725,8 +1726,8 @@ static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
switch (hw->device_id) {
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
if (wol->wolopts & WAKE_UCAST) {
- DPRINTK(DRV, ERR, "Interface does not support "
- "directed (unicast) frame wake-up packets\n");
+ e_err("Interface does not support "
+ "directed (unicast) frame wake-up packets\n");
return -EOPNOTSUPP;
}
break;
@@ -1803,7 +1804,7 @@ static int e1000_get_coalesce(struct net_device *netdev,
if (adapter->hw.mac_type < e1000_82545)
return -EOPNOTSUPP;
- if (adapter->itr_setting <= 3)
+ if (adapter->itr_setting <= 4)
ec->rx_coalesce_usecs = adapter->itr_setting;
else
ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
@@ -1821,12 +1822,14 @@ static int e1000_set_coalesce(struct net_device *netdev,
return -EOPNOTSUPP;
if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
- ((ec->rx_coalesce_usecs > 3) &&
+ ((ec->rx_coalesce_usecs > 4) &&
(ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) ||
(ec->rx_coalesce_usecs == 2))
return -EINVAL;
- if (ec->rx_coalesce_usecs <= 3) {
+ if (ec->rx_coalesce_usecs == 4) {
+ adapter->itr = adapter->itr_setting = 4;
+ } else if (ec->rx_coalesce_usecs <= 3) {
adapter->itr = 20000;
adapter->itr_setting = ec->rx_coalesce_usecs;
} else {
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 8d7d87f1282..c7e242b69a1 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -30,7 +30,7 @@
* Shared functions for accessing and configuring the MAC
*/
-#include "e1000_hw.h"
+#include "e1000.h"
static s32 e1000_check_downshift(struct e1000_hw *hw);
static s32 e1000_check_polarity(struct e1000_hw *hw,
@@ -114,7 +114,7 @@ static DEFINE_SPINLOCK(e1000_eeprom_lock);
*/
static s32 e1000_set_phy_type(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_set_phy_type");
+ e_dbg("e1000_set_phy_type");
if (hw->mac_type == e1000_undefined)
return -E1000_ERR_PHY_TYPE;
@@ -152,7 +152,7 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
u32 ret_val;
u16 phy_saved_data;
- DEBUGFUNC("e1000_phy_init_script");
+ e_dbg("e1000_phy_init_script");
if (hw->phy_init_script) {
msleep(20);
@@ -245,7 +245,7 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
*/
s32 e1000_set_mac_type(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_set_mac_type");
+ e_dbg("e1000_set_mac_type");
switch (hw->device_id) {
case E1000_DEV_ID_82542:
@@ -354,7 +354,7 @@ void e1000_set_media_type(struct e1000_hw *hw)
{
u32 status;
- DEBUGFUNC("e1000_set_media_type");
+ e_dbg("e1000_set_media_type");
if (hw->mac_type != e1000_82543) {
/* tbi_compatibility is only valid on 82543 */
@@ -401,16 +401,16 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
u32 led_ctrl;
s32 ret_val;
- DEBUGFUNC("e1000_reset_hw");
+ e_dbg("e1000_reset_hw");
/* For 82542 (rev 2.0), disable MWI before issuing a device reset */
if (hw->mac_type == e1000_82542_rev2_0) {
- DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+ e_dbg("Disabling MWI on 82542 rev 2.0\n");
e1000_pci_clear_mwi(hw);
}
/* Clear interrupt mask to stop board from generating interrupts */
- DEBUGOUT("Masking off all interrupts\n");
+ e_dbg("Masking off all interrupts\n");
ew32(IMC, 0xffffffff);
/* Disable the Transmit and Receive units. Then delay to allow
@@ -442,7 +442,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
* the current PCI configuration. The global reset bit is self-
* clearing, and should clear within a microsecond.
*/
- DEBUGOUT("Issuing a global reset to MAC\n");
+ e_dbg("Issuing a global reset to MAC\n");
switch (hw->mac_type) {
case e1000_82544:
@@ -516,7 +516,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
}
/* Clear interrupt mask to stop board from generating interrupts */
- DEBUGOUT("Masking off all interrupts\n");
+ e_dbg("Masking off all interrupts\n");
ew32(IMC, 0xffffffff);
/* Clear any pending interrupt events. */
@@ -549,12 +549,12 @@ s32 e1000_init_hw(struct e1000_hw *hw)
u32 mta_size;
u32 ctrl_ext;
- DEBUGFUNC("e1000_init_hw");
+ e_dbg("e1000_init_hw");
/* Initialize Identification LED */
ret_val = e1000_id_led_init(hw);
if (ret_val) {
- DEBUGOUT("Error Initializing Identification LED\n");
+ e_dbg("Error Initializing Identification LED\n");
return ret_val;
}
@@ -562,14 +562,14 @@ s32 e1000_init_hw(struct e1000_hw *hw)
e1000_set_media_type(hw);
/* Disabling VLAN filtering. */
- DEBUGOUT("Initializing the IEEE VLAN\n");
+ e_dbg("Initializing the IEEE VLAN\n");
if (hw->mac_type < e1000_82545_rev_3)
ew32(VET, 0);
e1000_clear_vfta(hw);
/* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
if (hw->mac_type == e1000_82542_rev2_0) {
- DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+ e_dbg("Disabling MWI on 82542 rev 2.0\n");
e1000_pci_clear_mwi(hw);
ew32(RCTL, E1000_RCTL_RST);
E1000_WRITE_FLUSH();
@@ -591,7 +591,7 @@ s32 e1000_init_hw(struct e1000_hw *hw)
}
/* Zero out the Multicast HASH table */
- DEBUGOUT("Zeroing the MTA\n");
+ e_dbg("Zeroing the MTA\n");
mta_size = E1000_MC_TBL_SIZE;
for (i = 0; i < mta_size; i++) {
E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
@@ -662,7 +662,7 @@ static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
u16 eeprom_data;
s32 ret_val;
- DEBUGFUNC("e1000_adjust_serdes_amplitude");
+ e_dbg("e1000_adjust_serdes_amplitude");
if (hw->media_type != e1000_media_type_internal_serdes)
return E1000_SUCCESS;
@@ -709,7 +709,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
s32 ret_val;
u16 eeprom_data;
- DEBUGFUNC("e1000_setup_link");
+ e_dbg("e1000_setup_link");
/* Read and store word 0x0F of the EEPROM. This word contains bits
* that determine the hardware's default PAUSE (flow control) mode,
@@ -723,7 +723,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
1, &eeprom_data);
if (ret_val) {
- DEBUGOUT("EEPROM Read Error\n");
+ e_dbg("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0)
@@ -747,7 +747,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
hw->original_fc = hw->fc;
- DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc);
+ e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc);
/* Take the 4 bits from EEPROM word 0x0F that determine the initial
* polarity value for the SW controlled pins, and setup the
@@ -760,7 +760,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
1, &eeprom_data);
if (ret_val) {
- DEBUGOUT("EEPROM Read Error\n");
+ e_dbg("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) <<
@@ -777,8 +777,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
* control is disabled, because it does not hurt anything to
* initialize these registers.
*/
- DEBUGOUT
- ("Initializing the Flow Control address, type and timer regs\n");
+ e_dbg("Initializing the Flow Control address, type and timer regs\n");
ew32(FCT, FLOW_CONTROL_TYPE);
ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
@@ -827,7 +826,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
u32 signal = 0;
s32 ret_val;
- DEBUGFUNC("e1000_setup_fiber_serdes_link");
+ e_dbg("e1000_setup_fiber_serdes_link");
/* On adapters with a MAC newer than 82544, SWDP 1 will be
* set when the optics detect a signal. On older adapters, it will be
@@ -893,7 +892,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
break;
default:
- DEBUGOUT("Flow control param set incorrectly\n");
+ e_dbg("Flow control param set incorrectly\n");
return -E1000_ERR_CONFIG;
break;
}
@@ -904,7 +903,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
* link-up status bit will be set and the flow control enable bits (RFCE
* and TFCE) will be set according to their negotiated value.
*/
- DEBUGOUT("Auto-negotiation enabled\n");
+ e_dbg("Auto-negotiation enabled\n");
ew32(TXCW, txcw);
ew32(CTRL, ctrl);
@@ -921,7 +920,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
*/
if (hw->media_type == e1000_media_type_internal_serdes ||
(er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) {
- DEBUGOUT("Looking for Link\n");
+ e_dbg("Looking for Link\n");
for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) {
msleep(10);
status = er32(STATUS);
@@ -929,7 +928,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
break;
}
if (i == (LINK_UP_TIMEOUT / 10)) {
- DEBUGOUT("Never got a valid link from auto-neg!!!\n");
+ e_dbg("Never got a valid link from auto-neg!!!\n");
hw->autoneg_failed = 1;
/* AutoNeg failed to achieve a link, so we'll call
* e1000_check_for_link. This routine will force the link up if
@@ -938,16 +937,16 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
*/
ret_val = e1000_check_for_link(hw);
if (ret_val) {
- DEBUGOUT("Error while checking for link\n");
+ e_dbg("Error while checking for link\n");
return ret_val;
}
hw->autoneg_failed = 0;
} else {
hw->autoneg_failed = 0;
- DEBUGOUT("Valid Link Found\n");
+ e_dbg("Valid Link Found\n");
}
} else {
- DEBUGOUT("No Signal Detected\n");
+ e_dbg("No Signal Detected\n");
}
return E1000_SUCCESS;
}
@@ -964,7 +963,7 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_copper_link_preconfig");
+ e_dbg("e1000_copper_link_preconfig");
ctrl = er32(CTRL);
/* With 82543, we need to force speed and duplex on the MAC equal to what
@@ -987,10 +986,10 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
/* Make sure we have a valid PHY */
ret_val = e1000_detect_gig_phy(hw);
if (ret_val) {
- DEBUGOUT("Error, did not detect valid phy.\n");
+ e_dbg("Error, did not detect valid phy.\n");
return ret_val;
}
- DEBUGOUT1("Phy ID = %x \n", hw->phy_id);
+ e_dbg("Phy ID = %x\n", hw->phy_id);
/* Set PHY to class A mode (if necessary) */
ret_val = e1000_set_phy_mode(hw);
@@ -1025,14 +1024,14 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_copper_link_igp_setup");
+ e_dbg("e1000_copper_link_igp_setup");
if (hw->phy_reset_disable)
return E1000_SUCCESS;
ret_val = e1000_phy_reset(hw);
if (ret_val) {
- DEBUGOUT("Error Resetting the PHY\n");
+ e_dbg("Error Resetting the PHY\n");
return ret_val;
}
@@ -1049,7 +1048,7 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
/* disable lplu d3 during driver init */
ret_val = e1000_set_d3_lplu_state(hw, false);
if (ret_val) {
- DEBUGOUT("Error Disabling LPLU D3\n");
+ e_dbg("Error Disabling LPLU D3\n");
return ret_val;
}
}
@@ -1166,7 +1165,7 @@ static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_copper_link_mgp_setup");
+ e_dbg("e1000_copper_link_mgp_setup");
if (hw->phy_reset_disable)
return E1000_SUCCESS;
@@ -1255,7 +1254,7 @@ static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw)
/* SW Reset the PHY so all changes take effect */
ret_val = e1000_phy_reset(hw);
if (ret_val) {
- DEBUGOUT("Error Resetting the PHY\n");
+ e_dbg("Error Resetting the PHY\n");
return ret_val;
}
@@ -1274,7 +1273,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_copper_link_autoneg");
+ e_dbg("e1000_copper_link_autoneg");
/* Perform some bounds checking on the hw->autoneg_advertised
* parameter. If this variable is zero, then set it to the default.
@@ -1287,13 +1286,13 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
if (hw->autoneg_advertised == 0)
hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
- DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
+ e_dbg("Reconfiguring auto-neg advertisement params\n");
ret_val = e1000_phy_setup_autoneg(hw);
if (ret_val) {
- DEBUGOUT("Error Setting up Auto-Negotiation\n");
+ e_dbg("Error Setting up Auto-Negotiation\n");
return ret_val;
}
- DEBUGOUT("Restarting Auto-Neg\n");
+ e_dbg("Restarting Auto-Neg\n");
/* Restart auto-negotiation by setting the Auto Neg Enable bit and
* the Auto Neg Restart bit in the PHY control register.
@@ -1313,7 +1312,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
if (hw->wait_autoneg_complete) {
ret_val = e1000_wait_autoneg(hw);
if (ret_val) {
- DEBUGOUT
+ e_dbg
("Error while waiting for autoneg to complete\n");
return ret_val;
}
@@ -1340,20 +1339,20 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
{
s32 ret_val;
- DEBUGFUNC("e1000_copper_link_postconfig");
+ e_dbg("e1000_copper_link_postconfig");
if (hw->mac_type >= e1000_82544) {
e1000_config_collision_dist(hw);
} else {
ret_val = e1000_config_mac_to_phy(hw);
if (ret_val) {
- DEBUGOUT("Error configuring MAC to PHY settings\n");
+ e_dbg("Error configuring MAC to PHY settings\n");
return ret_val;
}
}
ret_val = e1000_config_fc_after_link_up(hw);
if (ret_val) {
- DEBUGOUT("Error Configuring Flow Control\n");
+ e_dbg("Error Configuring Flow Control\n");
return ret_val;
}
@@ -1361,7 +1360,7 @@ static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
if (hw->phy_type == e1000_phy_igp) {
ret_val = e1000_config_dsp_after_link_change(hw, true);
if (ret_val) {
- DEBUGOUT("Error Configuring DSP after link up\n");
+ e_dbg("Error Configuring DSP after link up\n");
return ret_val;
}
}
@@ -1381,7 +1380,7 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
u16 i;
u16 phy_data;
- DEBUGFUNC("e1000_setup_copper_link");
+ e_dbg("e1000_setup_copper_link");
/* Check if it is a valid PHY and set PHY mode if necessary. */
ret_val = e1000_copper_link_preconfig(hw);
@@ -1407,10 +1406,10 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
} else {
/* PHY will be set to 10H, 10F, 100H,or 100F
* depending on value from forced_speed_duplex. */
- DEBUGOUT("Forcing speed and duplex\n");
+ e_dbg("Forcing speed and duplex\n");
ret_val = e1000_phy_force_speed_duplex(hw);
if (ret_val) {
- DEBUGOUT("Error Forcing Speed and Duplex\n");
+ e_dbg("Error Forcing Speed and Duplex\n");
return ret_val;
}
}
@@ -1432,13 +1431,13 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- DEBUGOUT("Valid link established!!!\n");
+ e_dbg("Valid link established!!!\n");
return E1000_SUCCESS;
}
udelay(10);
}
- DEBUGOUT("Unable to establish link!!!\n");
+ e_dbg("Unable to establish link!!!\n");
return E1000_SUCCESS;
}
@@ -1454,7 +1453,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
u16 mii_autoneg_adv_reg;
u16 mii_1000t_ctrl_reg;
- DEBUGFUNC("e1000_phy_setup_autoneg");
+ e_dbg("e1000_phy_setup_autoneg");
/* Read the MII Auto-Neg Advertisement Register (Address 4). */
ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
@@ -1481,41 +1480,41 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
mii_autoneg_adv_reg &= ~REG4_SPEED_MASK;
mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
- DEBUGOUT1("autoneg_advertised %x\n", hw->autoneg_advertised);
+ e_dbg("autoneg_advertised %x\n", hw->autoneg_advertised);
/* Do we want to advertise 10 Mb Half Duplex? */
if (hw->autoneg_advertised & ADVERTISE_10_HALF) {
- DEBUGOUT("Advertise 10mb Half duplex\n");
+ e_dbg("Advertise 10mb Half duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
}
/* Do we want to advertise 10 Mb Full Duplex? */
if (hw->autoneg_advertised & ADVERTISE_10_FULL) {
- DEBUGOUT("Advertise 10mb Full duplex\n");
+ e_dbg("Advertise 10mb Full duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
}
/* Do we want to advertise 100 Mb Half Duplex? */
if (hw->autoneg_advertised & ADVERTISE_100_HALF) {
- DEBUGOUT("Advertise 100mb Half duplex\n");
+ e_dbg("Advertise 100mb Half duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
}
/* Do we want to advertise 100 Mb Full Duplex? */
if (hw->autoneg_advertised & ADVERTISE_100_FULL) {
- DEBUGOUT("Advertise 100mb Full duplex\n");
+ e_dbg("Advertise 100mb Full duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
}
/* We do not allow the Phy to advertise 1000 Mb Half Duplex */
if (hw->autoneg_advertised & ADVERTISE_1000_HALF) {
- DEBUGOUT
+ e_dbg
("Advertise 1000mb Half duplex requested, request denied!\n");
}
/* Do we want to advertise 1000 Mb Full Duplex? */
if (hw->autoneg_advertised & ADVERTISE_1000_FULL) {
- DEBUGOUT("Advertise 1000mb Full duplex\n");
+ e_dbg("Advertise 1000mb Full duplex\n");
mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
}
@@ -1568,7 +1567,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
default:
- DEBUGOUT("Flow control param set incorrectly\n");
+ e_dbg("Flow control param set incorrectly\n");
return -E1000_ERR_CONFIG;
}
@@ -1576,7 +1575,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+ e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
if (ret_val)
@@ -1600,12 +1599,12 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
u16 phy_data;
u16 i;
- DEBUGFUNC("e1000_phy_force_speed_duplex");
+ e_dbg("e1000_phy_force_speed_duplex");
/* Turn off Flow control if we are forcing speed and duplex. */
hw->fc = E1000_FC_NONE;
- DEBUGOUT1("hw->fc = %d\n", hw->fc);
+ e_dbg("hw->fc = %d\n", hw->fc);
/* Read the Device Control Register. */
ctrl = er32(CTRL);
@@ -1634,14 +1633,14 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
*/
ctrl |= E1000_CTRL_FD;
mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
- DEBUGOUT("Full Duplex\n");
+ e_dbg("Full Duplex\n");
} else {
/* We want to force half duplex so we CLEAR the full duplex bits in
* the Device and MII Control Registers.
*/
ctrl &= ~E1000_CTRL_FD;
mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
- DEBUGOUT("Half Duplex\n");
+ e_dbg("Half Duplex\n");
}
/* Are we forcing 100Mbps??? */
@@ -1651,13 +1650,13 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
ctrl |= E1000_CTRL_SPD_100;
mii_ctrl_reg |= MII_CR_SPEED_100;
mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
- DEBUGOUT("Forcing 100mb ");
+ e_dbg("Forcing 100mb ");
} else {
/* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */
ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
mii_ctrl_reg |= MII_CR_SPEED_10;
mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
- DEBUGOUT("Forcing 10mb ");
+ e_dbg("Forcing 10mb ");
}
e1000_config_collision_dist(hw);
@@ -1680,7 +1679,7 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- DEBUGOUT1("M88E1000 PSCR: %x \n", phy_data);
+ e_dbg("M88E1000 PSCR: %x\n", phy_data);
/* Need to reset the PHY or these changes will be ignored */
mii_ctrl_reg |= MII_CR_RESET;
@@ -1720,7 +1719,7 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
*/
if (hw->wait_autoneg_complete) {
/* We will wait for autoneg to complete. */
- DEBUGOUT("Waiting for forced speed/duplex link.\n");
+ e_dbg("Waiting for forced speed/duplex link.\n");
mii_status_reg = 0;
/* We will wait for autoneg to complete or 4.5 seconds to expire. */
@@ -1746,7 +1745,7 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
/* We didn't get link. Reset the DSP and wait again for link. */
ret_val = e1000_phy_reset_dsp(hw);
if (ret_val) {
- DEBUGOUT("Error Resetting PHY DSP\n");
+ e_dbg("Error Resetting PHY DSP\n");
return ret_val;
}
}
@@ -1826,7 +1825,7 @@ void e1000_config_collision_dist(struct e1000_hw *hw)
{
u32 tctl, coll_dist;
- DEBUGFUNC("e1000_config_collision_dist");
+ e_dbg("e1000_config_collision_dist");
if (hw->mac_type < e1000_82543)
coll_dist = E1000_COLLISION_DISTANCE_82542;
@@ -1857,7 +1856,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_config_mac_to_phy");
+ e_dbg("e1000_config_mac_to_phy");
/* 82544 or newer MAC, Auto Speed Detection takes care of
* MAC speed/duplex configuration.*/
@@ -1913,7 +1912,7 @@ s32 e1000_force_mac_fc(struct e1000_hw *hw)
{
u32 ctrl;
- DEBUGFUNC("e1000_force_mac_fc");
+ e_dbg("e1000_force_mac_fc");
/* Get the current configuration of the Device Control Register */
ctrl = er32(CTRL);
@@ -1952,7 +1951,7 @@ s32 e1000_force_mac_fc(struct e1000_hw *hw)
ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
break;
default:
- DEBUGOUT("Flow control param set incorrectly\n");
+ e_dbg("Flow control param set incorrectly\n");
return -E1000_ERR_CONFIG;
}
@@ -1984,7 +1983,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
u16 speed;
u16 duplex;
- DEBUGFUNC("e1000_config_fc_after_link_up");
+ e_dbg("e1000_config_fc_after_link_up");
/* Check for the case where we have fiber media and auto-neg failed
* so we had to force link. In this case, we need to force the
@@ -1997,7 +1996,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
&& (!hw->autoneg))) {
ret_val = e1000_force_mac_fc(hw);
if (ret_val) {
- DEBUGOUT("Error forcing flow control settings\n");
+ e_dbg("Error forcing flow control settings\n");
return ret_val;
}
}
@@ -2079,10 +2078,10 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
*/
if (hw->original_fc == E1000_FC_FULL) {
hw->fc = E1000_FC_FULL;
- DEBUGOUT("Flow Control = FULL.\n");
+ e_dbg("Flow Control = FULL.\n");
} else {
hw->fc = E1000_FC_RX_PAUSE;
- DEBUGOUT
+ e_dbg
("Flow Control = RX PAUSE frames only.\n");
}
}
@@ -2100,7 +2099,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR))
{
hw->fc = E1000_FC_TX_PAUSE;
- DEBUGOUT
+ e_dbg
("Flow Control = TX PAUSE frames only.\n");
}
/* For transmitting PAUSE frames ONLY.
@@ -2117,7 +2116,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR))
{
hw->fc = E1000_FC_RX_PAUSE;
- DEBUGOUT
+ e_dbg
("Flow Control = RX PAUSE frames only.\n");
}
/* Per the IEEE spec, at this point flow control should be
@@ -2144,10 +2143,10 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
hw->original_fc == E1000_FC_TX_PAUSE) ||
hw->fc_strict_ieee) {
hw->fc = E1000_FC_NONE;
- DEBUGOUT("Flow Control = NONE.\n");
+ e_dbg("Flow Control = NONE.\n");
} else {
hw->fc = E1000_FC_RX_PAUSE;
- DEBUGOUT
+ e_dbg
("Flow Control = RX PAUSE frames only.\n");
}
@@ -2158,7 +2157,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
ret_val =
e1000_get_speed_and_duplex(hw, &speed, &duplex);
if (ret_val) {
- DEBUGOUT
+ e_dbg
("Error getting link speed and duplex\n");
return ret_val;
}
@@ -2171,12 +2170,12 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
*/
ret_val = e1000_force_mac_fc(hw);
if (ret_val) {
- DEBUGOUT
+ e_dbg
("Error forcing flow control settings\n");
return ret_val;
}
} else {
- DEBUGOUT
+ e_dbg
("Copper PHY and Auto Neg has not completed.\n");
}
}
@@ -2197,7 +2196,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
u32 status;
s32 ret_val = E1000_SUCCESS;
- DEBUGFUNC("e1000_check_for_serdes_link_generic");
+ e_dbg("e1000_check_for_serdes_link_generic");
ctrl = er32(CTRL);
status = er32(STATUS);
@@ -2216,7 +2215,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
hw->autoneg_failed = 1;
goto out;
}
- DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
+ e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n");
/* Disable auto-negotiation in the TXCW register */
ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE));
@@ -2229,7 +2228,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
/* Configure Flow Control after forcing link up. */
ret_val = e1000_config_fc_after_link_up(hw);
if (ret_val) {
- DEBUGOUT("Error configuring flow control\n");
+ e_dbg("Error configuring flow control\n");
goto out;
}
} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
@@ -2239,7 +2238,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
* and disable forced link in the Device Control register
* in an attempt to auto-negotiate with our link partner.
*/
- DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
+ e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n");
ew32(TXCW, hw->txcw);
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
@@ -2256,11 +2255,11 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
if (rxcw & E1000_RXCW_SYNCH) {
if (!(rxcw & E1000_RXCW_IV)) {
hw->serdes_has_link = true;
- DEBUGOUT("SERDES: Link up - forced.\n");
+ e_dbg("SERDES: Link up - forced.\n");
}
} else {
hw->serdes_has_link = false;
- DEBUGOUT("SERDES: Link down - force failed.\n");
+ e_dbg("SERDES: Link down - force failed.\n");
}
}
@@ -2273,20 +2272,20 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
if (rxcw & E1000_RXCW_SYNCH) {
if (!(rxcw & E1000_RXCW_IV)) {
hw->serdes_has_link = true;
- DEBUGOUT("SERDES: Link up - autoneg "
+ e_dbg("SERDES: Link up - autoneg "
"completed successfully.\n");
} else {
hw->serdes_has_link = false;
- DEBUGOUT("SERDES: Link down - invalid"
+ e_dbg("SERDES: Link down - invalid"
"codewords detected in autoneg.\n");
}
} else {
hw->serdes_has_link = false;
- DEBUGOUT("SERDES: Link down - no sync.\n");
+ e_dbg("SERDES: Link down - no sync.\n");
}
} else {
hw->serdes_has_link = false;
- DEBUGOUT("SERDES: Link down - autoneg failed\n");
+ e_dbg("SERDES: Link down - autoneg failed\n");
}
}
@@ -2312,7 +2311,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_check_for_link");
+ e_dbg("e1000_check_for_link");
ctrl = er32(CTRL);
status = er32(STATUS);
@@ -2407,7 +2406,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
else {
ret_val = e1000_config_mac_to_phy(hw);
if (ret_val) {
- DEBUGOUT
+ e_dbg
("Error configuring MAC to PHY settings\n");
return ret_val;
}
@@ -2419,7 +2418,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
*/
ret_val = e1000_config_fc_after_link_up(hw);
if (ret_val) {
- DEBUGOUT("Error configuring flow control\n");
+ e_dbg("Error configuring flow control\n");
return ret_val;
}
@@ -2435,7 +2434,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
ret_val =
e1000_get_speed_and_duplex(hw, &speed, &duplex);
if (ret_val) {
- DEBUGOUT
+ e_dbg
("Error getting link speed and duplex\n");
return ret_val;
}
@@ -2487,30 +2486,30 @@ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_get_speed_and_duplex");
+ e_dbg("e1000_get_speed_and_duplex");
if (hw->mac_type >= e1000_82543) {
status = er32(STATUS);
if (status & E1000_STATUS_SPEED_1000) {
*speed = SPEED_1000;
- DEBUGOUT("1000 Mbs, ");
+ e_dbg("1000 Mbs, ");
} else if (status & E1000_STATUS_SPEED_100) {
*speed = SPEED_100;
- DEBUGOUT("100 Mbs, ");
+ e_dbg("100 Mbs, ");
} else {
*speed = SPEED_10;
- DEBUGOUT("10 Mbs, ");
+ e_dbg("10 Mbs, ");
}
if (status & E1000_STATUS_FD) {
*duplex = FULL_DUPLEX;
- DEBUGOUT("Full Duplex\n");
+ e_dbg("Full Duplex\n");
} else {
*duplex = HALF_DUPLEX;
- DEBUGOUT(" Half Duplex\n");
+ e_dbg(" Half Duplex\n");
}
} else {
- DEBUGOUT("1000 Mbs, Full Duplex\n");
+ e_dbg("1000 Mbs, Full Duplex\n");
*speed = SPEED_1000;
*duplex = FULL_DUPLEX;
}
@@ -2554,8 +2553,8 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
u16 i;
u16 phy_data;
- DEBUGFUNC("e1000_wait_autoneg");
- DEBUGOUT("Waiting for Auto-Neg to complete.\n");
+ e_dbg("e1000_wait_autoneg");
+ e_dbg("Waiting for Auto-Neg to complete.\n");
/* We will wait for autoneg to complete or 4.5 seconds to expire. */
for (i = PHY_AUTO_NEG_TIME; i > 0; i--) {
@@ -2718,7 +2717,7 @@ s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data)
{
u32 ret_val;
- DEBUGFUNC("e1000_read_phy_reg");
+ e_dbg("e1000_read_phy_reg");
if ((hw->phy_type == e1000_phy_igp) &&
(reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
@@ -2741,10 +2740,10 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
u32 mdic = 0;
const u32 phy_addr = 1;
- DEBUGFUNC("e1000_read_phy_reg_ex");
+ e_dbg("e1000_read_phy_reg_ex");
if (reg_addr > MAX_PHY_REG_ADDRESS) {
- DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
+ e_dbg("PHY Address %d is out of range\n", reg_addr);
return -E1000_ERR_PARAM;
}
@@ -2767,11 +2766,11 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
break;
}
if (!(mdic & E1000_MDIC_READY)) {
- DEBUGOUT("MDI Read did not complete\n");
+ e_dbg("MDI Read did not complete\n");
return -E1000_ERR_PHY;
}
if (mdic & E1000_MDIC_ERROR) {
- DEBUGOUT("MDI Error\n");
+ e_dbg("MDI Error\n");
return -E1000_ERR_PHY;
}
*phy_data = (u16) mdic;
@@ -2820,7 +2819,7 @@ s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
{
u32 ret_val;
- DEBUGFUNC("e1000_write_phy_reg");
+ e_dbg("e1000_write_phy_reg");
if ((hw->phy_type == e1000_phy_igp) &&
(reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
@@ -2843,10 +2842,10 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
u32 mdic = 0;
const u32 phy_addr = 1;
- DEBUGFUNC("e1000_write_phy_reg_ex");
+ e_dbg("e1000_write_phy_reg_ex");
if (reg_addr > MAX_PHY_REG_ADDRESS) {
- DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
+ e_dbg("PHY Address %d is out of range\n", reg_addr);
return -E1000_ERR_PARAM;
}
@@ -2870,7 +2869,7 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
break;
}
if (!(mdic & E1000_MDIC_READY)) {
- DEBUGOUT("MDI Write did not complete\n");
+ e_dbg("MDI Write did not complete\n");
return -E1000_ERR_PHY;
}
} else {
@@ -2910,9 +2909,9 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw)
u32 led_ctrl;
s32 ret_val;
- DEBUGFUNC("e1000_phy_hw_reset");
+ e_dbg("e1000_phy_hw_reset");
- DEBUGOUT("Resetting Phy...\n");
+ e_dbg("Resetting Phy...\n");
if (hw->mac_type > e1000_82543) {
/* Read the device control register and assert the E1000_CTRL_PHY_RST
@@ -2973,7 +2972,7 @@ s32 e1000_phy_reset(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_phy_reset");
+ e_dbg("e1000_phy_reset");
switch (hw->phy_type) {
case e1000_phy_igp:
@@ -3013,7 +3012,7 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
u16 phy_id_high, phy_id_low;
bool match = false;
- DEBUGFUNC("e1000_detect_gig_phy");
+ e_dbg("e1000_detect_gig_phy");
if (hw->phy_id != 0)
return E1000_SUCCESS;
@@ -3057,16 +3056,16 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
match = true;
break;
default:
- DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type);
+ e_dbg("Invalid MAC type %d\n", hw->mac_type);
return -E1000_ERR_CONFIG;
}
phy_init_status = e1000_set_phy_type(hw);
if ((match) && (phy_init_status == E1000_SUCCESS)) {
- DEBUGOUT1("PHY ID 0x%X detected\n", hw->phy_id);
+ e_dbg("PHY ID 0x%X detected\n", hw->phy_id);
return E1000_SUCCESS;
}
- DEBUGOUT1("Invalid PHY ID 0x%X\n", hw->phy_id);
+ e_dbg("Invalid PHY ID 0x%X\n", hw->phy_id);
return -E1000_ERR_PHY;
}
@@ -3079,7 +3078,7 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
static s32 e1000_phy_reset_dsp(struct e1000_hw *hw)
{
s32 ret_val;
- DEBUGFUNC("e1000_phy_reset_dsp");
+ e_dbg("e1000_phy_reset_dsp");
do {
ret_val = e1000_write_phy_reg(hw, 29, 0x001d);
@@ -3111,7 +3110,7 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
u16 phy_data, min_length, max_length, average;
e1000_rev_polarity polarity;
- DEBUGFUNC("e1000_phy_igp_get_info");
+ e_dbg("e1000_phy_igp_get_info");
/* The downshift status is checked only once, after link is established,
* and it stored in the hw->speed_downgraded parameter. */
@@ -3189,7 +3188,7 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
u16 phy_data;
e1000_rev_polarity polarity;
- DEBUGFUNC("e1000_phy_m88_get_info");
+ e_dbg("e1000_phy_m88_get_info");
/* The downshift status is checked only once, after link is established,
* and it stored in the hw->speed_downgraded parameter. */
@@ -3261,7 +3260,7 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_phy_get_info");
+ e_dbg("e1000_phy_get_info");
phy_info->cable_length = e1000_cable_length_undefined;
phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined;
@@ -3273,7 +3272,7 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
phy_info->remote_rx = e1000_1000t_rx_status_undefined;
if (hw->media_type != e1000_media_type_copper) {
- DEBUGOUT("PHY info is only valid for copper media\n");
+ e_dbg("PHY info is only valid for copper media\n");
return -E1000_ERR_CONFIG;
}
@@ -3286,7 +3285,7 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
return ret_val;
if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) {
- DEBUGOUT("PHY info is only valid if link is up\n");
+ e_dbg("PHY info is only valid if link is up\n");
return -E1000_ERR_CONFIG;
}
@@ -3298,10 +3297,10 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_validate_mdi_settings");
+ e_dbg("e1000_validate_mdi_settings");
if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
- DEBUGOUT("Invalid MDI setting detected\n");
+ e_dbg("Invalid MDI setting detected\n");
hw->mdix = 1;
return -E1000_ERR_CONFIG;
}
@@ -3322,7 +3321,7 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
s32 ret_val = E1000_SUCCESS;
u16 eeprom_size;
- DEBUGFUNC("e1000_init_eeprom_params");
+ e_dbg("e1000_init_eeprom_params");
switch (hw->mac_type) {
case e1000_82542_rev2_0:
@@ -3539,7 +3538,7 @@ static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
struct e1000_eeprom_info *eeprom = &hw->eeprom;
u32 eecd, i = 0;
- DEBUGFUNC("e1000_acquire_eeprom");
+ e_dbg("e1000_acquire_eeprom");
eecd = er32(EECD);
@@ -3557,7 +3556,7 @@ static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
if (!(eecd & E1000_EECD_GNT)) {
eecd &= ~E1000_EECD_REQ;
ew32(EECD, eecd);
- DEBUGOUT("Could not acquire EEPROM grant\n");
+ e_dbg("Could not acquire EEPROM grant\n");
return -E1000_ERR_EEPROM;
}
}
@@ -3639,7 +3638,7 @@ static void e1000_release_eeprom(struct e1000_hw *hw)
{
u32 eecd;
- DEBUGFUNC("e1000_release_eeprom");
+ e_dbg("e1000_release_eeprom");
eecd = er32(EECD);
@@ -3687,7 +3686,7 @@ static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
u16 retry_count = 0;
u8 spi_stat_reg;
- DEBUGFUNC("e1000_spi_eeprom_ready");
+ e_dbg("e1000_spi_eeprom_ready");
/* Read "Status Register" repeatedly until the LSB is cleared. The
* EEPROM will signal that the command has been completed by clearing
@@ -3712,7 +3711,7 @@ static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
* only 0-5mSec on 5V devices)
*/
if (retry_count >= EEPROM_MAX_RETRY_SPI) {
- DEBUGOUT("SPI EEPROM Status error\n");
+ e_dbg("SPI EEPROM Status error\n");
return -E1000_ERR_EEPROM;
}
@@ -3741,7 +3740,7 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
struct e1000_eeprom_info *eeprom = &hw->eeprom;
u32 i = 0;
- DEBUGFUNC("e1000_read_eeprom");
+ e_dbg("e1000_read_eeprom");
/* If eeprom is not yet detected, do so now */
if (eeprom->word_size == 0)
@@ -3752,9 +3751,8 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
*/
if ((offset >= eeprom->word_size)
|| (words > eeprom->word_size - offset) || (words == 0)) {
- DEBUGOUT2
- ("\"words\" parameter out of bounds. Words = %d, size = %d\n",
- offset, eeprom->word_size);
+ e_dbg("\"words\" parameter out of bounds. Words = %d,"
+ "size = %d\n", offset, eeprom->word_size);
return -E1000_ERR_EEPROM;
}
@@ -3832,11 +3830,11 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
u16 checksum = 0;
u16 i, eeprom_data;
- DEBUGFUNC("e1000_validate_eeprom_checksum");
+ e_dbg("e1000_validate_eeprom_checksum");
for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
- DEBUGOUT("EEPROM Read Error\n");
+ e_dbg("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
checksum += eeprom_data;
@@ -3845,7 +3843,7 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
if (checksum == (u16) EEPROM_SUM)
return E1000_SUCCESS;
else {
- DEBUGOUT("EEPROM Checksum Invalid\n");
+ e_dbg("EEPROM Checksum Invalid\n");
return -E1000_ERR_EEPROM;
}
}
@@ -3862,18 +3860,18 @@ s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
u16 checksum = 0;
u16 i, eeprom_data;
- DEBUGFUNC("e1000_update_eeprom_checksum");
+ e_dbg("e1000_update_eeprom_checksum");
for (i = 0; i < EEPROM_CHECKSUM_REG; i++) {
if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
- DEBUGOUT("EEPROM Read Error\n");
+ e_dbg("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
checksum += eeprom_data;
}
checksum = (u16) EEPROM_SUM - checksum;
if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
- DEBUGOUT("EEPROM Write Error\n");
+ e_dbg("EEPROM Write Error\n");
return -E1000_ERR_EEPROM;
}
return E1000_SUCCESS;
@@ -3904,7 +3902,7 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
struct e1000_eeprom_info *eeprom = &hw->eeprom;
s32 status = 0;
- DEBUGFUNC("e1000_write_eeprom");
+ e_dbg("e1000_write_eeprom");
/* If eeprom is not yet detected, do so now */
if (eeprom->word_size == 0)
@@ -3915,7 +3913,7 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
*/
if ((offset >= eeprom->word_size)
|| (words > eeprom->word_size - offset) || (words == 0)) {
- DEBUGOUT("\"words\" parameter out of bounds\n");
+ e_dbg("\"words\" parameter out of bounds\n");
return -E1000_ERR_EEPROM;
}
@@ -3949,7 +3947,7 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
struct e1000_eeprom_info *eeprom = &hw->eeprom;
u16 widx = 0;
- DEBUGFUNC("e1000_write_eeprom_spi");
+ e_dbg("e1000_write_eeprom_spi");
while (widx < words) {
u8 write_opcode = EEPROM_WRITE_OPCODE_SPI;
@@ -4013,7 +4011,7 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
u16 words_written = 0;
u16 i = 0;
- DEBUGFUNC("e1000_write_eeprom_microwire");
+ e_dbg("e1000_write_eeprom_microwire");
/* Send the write enable command to the EEPROM (3-bit opcode plus
* 6/8-bit dummy address beginning with 11). It's less work to include
@@ -4056,7 +4054,7 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
udelay(50);
}
if (i == 200) {
- DEBUGOUT("EEPROM Write did not complete\n");
+ e_dbg("EEPROM Write did not complete\n");
return -E1000_ERR_EEPROM;
}
@@ -4092,12 +4090,12 @@ s32 e1000_read_mac_addr(struct e1000_hw *hw)
u16 offset;
u16 eeprom_data, i;
- DEBUGFUNC("e1000_read_mac_addr");
+ e_dbg("e1000_read_mac_addr");
for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
offset = i >> 1;
if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
- DEBUGOUT("EEPROM Read Error\n");
+ e_dbg("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF);
@@ -4132,17 +4130,17 @@ static void e1000_init_rx_addrs(struct e1000_hw *hw)
u32 i;
u32 rar_num;
- DEBUGFUNC("e1000_init_rx_addrs");
+ e_dbg("e1000_init_rx_addrs");
/* Setup the receive address. */
- DEBUGOUT("Programming MAC Address into RAR[0]\n");
+ e_dbg("Programming MAC Address into RAR[0]\n");
e1000_rar_set(hw, hw->mac_addr, 0);
rar_num = E1000_RAR_ENTRIES;
/* Zero out the other 15 receive addresses. */
- DEBUGOUT("Clearing RAR[1-15]\n");
+ e_dbg("Clearing RAR[1-15]\n");
for (i = 1; i < rar_num; i++) {
E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
E1000_WRITE_FLUSH();
@@ -4290,7 +4288,7 @@ static s32 e1000_id_led_init(struct e1000_hw *hw)
u16 eeprom_data, i, temp;
const u16 led_mask = 0x0F;
- DEBUGFUNC("e1000_id_led_init");
+ e_dbg("e1000_id_led_init");
if (hw->mac_type < e1000_82540) {
/* Nothing to do */
@@ -4303,7 +4301,7 @@ static s32 e1000_id_led_init(struct e1000_hw *hw)
hw->ledctl_mode2 = hw->ledctl_default;
if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) {
- DEBUGOUT("EEPROM Read Error\n");
+ e_dbg("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
@@ -4363,7 +4361,7 @@ s32 e1000_setup_led(struct e1000_hw *hw)
u32 ledctl;
s32 ret_val = E1000_SUCCESS;
- DEBUGFUNC("e1000_setup_led");
+ e_dbg("e1000_setup_led");
switch (hw->mac_type) {
case e1000_82542_rev2_0:
@@ -4415,7 +4413,7 @@ s32 e1000_cleanup_led(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
- DEBUGFUNC("e1000_cleanup_led");
+ e_dbg("e1000_cleanup_led");
switch (hw->mac_type) {
case e1000_82542_rev2_0:
@@ -4451,7 +4449,7 @@ s32 e1000_led_on(struct e1000_hw *hw)
{
u32 ctrl = er32(CTRL);
- DEBUGFUNC("e1000_led_on");
+ e_dbg("e1000_led_on");
switch (hw->mac_type) {
case e1000_82542_rev2_0:
@@ -4497,7 +4495,7 @@ s32 e1000_led_off(struct e1000_hw *hw)
{
u32 ctrl = er32(CTRL);
- DEBUGFUNC("e1000_led_off");
+ e_dbg("e1000_led_off");
switch (hw->mac_type) {
case e1000_82542_rev2_0:
@@ -4626,7 +4624,7 @@ static void e1000_clear_hw_cntrs(struct e1000_hw *hw)
*/
void e1000_reset_adaptive(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_reset_adaptive");
+ e_dbg("e1000_reset_adaptive");
if (hw->adaptive_ifs) {
if (!hw->ifs_params_forced) {
@@ -4639,7 +4637,7 @@ void e1000_reset_adaptive(struct e1000_hw *hw)
hw->in_ifs_mode = false;
ew32(AIT, 0);
} else {
- DEBUGOUT("Not in Adaptive IFS mode!\n");
+ e_dbg("Not in Adaptive IFS mode!\n");
}
}
@@ -4654,7 +4652,7 @@ void e1000_reset_adaptive(struct e1000_hw *hw)
*/
void e1000_update_adaptive(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_update_adaptive");
+ e_dbg("e1000_update_adaptive");
if (hw->adaptive_ifs) {
if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) {
@@ -4679,7 +4677,7 @@ void e1000_update_adaptive(struct e1000_hw *hw)
}
}
} else {
- DEBUGOUT("Not in Adaptive IFS mode!\n");
+ e_dbg("Not in Adaptive IFS mode!\n");
}
}
@@ -4851,7 +4849,7 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
u16 i, phy_data;
u16 cable_length;
- DEBUGFUNC("e1000_get_cable_length");
+ e_dbg("e1000_get_cable_length");
*min_length = *max_length = 0;
@@ -4968,7 +4966,7 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_check_polarity");
+ e_dbg("e1000_check_polarity");
if (hw->phy_type == e1000_phy_m88) {
/* return the Polarity bit in the Status register. */
@@ -5034,7 +5032,7 @@ static s32 e1000_check_downshift(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_check_downshift");
+ e_dbg("e1000_check_downshift");
if (hw->phy_type == e1000_phy_igp) {
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
@@ -5081,7 +5079,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
};
u16 min_length, max_length;
- DEBUGFUNC("e1000_config_dsp_after_link_change");
+ e_dbg("e1000_config_dsp_after_link_change");
if (hw->phy_type != e1000_phy_igp)
return E1000_SUCCESS;
@@ -5089,7 +5087,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
if (link_up) {
ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
if (ret_val) {
- DEBUGOUT("Error getting link speed and duplex\n");
+ e_dbg("Error getting link speed and duplex\n");
return ret_val;
}
@@ -5289,7 +5287,7 @@ static s32 e1000_set_phy_mode(struct e1000_hw *hw)
s32 ret_val;
u16 eeprom_data;
- DEBUGFUNC("e1000_set_phy_mode");
+ e_dbg("e1000_set_phy_mode");
if ((hw->mac_type == e1000_82545_rev_3) &&
(hw->media_type == e1000_media_type_copper)) {
@@ -5337,7 +5335,7 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
{
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_set_d3_lplu_state");
+ e_dbg("e1000_set_d3_lplu_state");
if (hw->phy_type != e1000_phy_igp)
return E1000_SUCCESS;
@@ -5440,7 +5438,7 @@ static s32 e1000_set_vco_speed(struct e1000_hw *hw)
u16 default_page = 0;
u16 phy_data;
- DEBUGFUNC("e1000_set_vco_speed");
+ e_dbg("e1000_set_vco_speed");
switch (hw->mac_type) {
case e1000_82545_rev_3:
@@ -5613,7 +5611,7 @@ static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
*/
static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_get_auto_rd_done");
+ e_dbg("e1000_get_auto_rd_done");
msleep(5);
return E1000_SUCCESS;
}
@@ -5628,7 +5626,7 @@ static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
*/
static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_get_phy_cfg_done");
+ e_dbg("e1000_get_phy_cfg_done");
mdelay(10);
return E1000_SUCCESS;
}
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 9acfddb0daf..ecd9f6c6bcd 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -35,6 +35,7 @@
#include "e1000_osdep.h"
+
/* Forward declarations of structures used by the shared code */
struct e1000_hw;
struct e1000_hw_stats;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index b15ece26ed8..ebdea089166 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -31,7 +31,7 @@
char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
-#define DRV_VERSION "7.3.21-k5-NAPI"
+#define DRV_VERSION "7.3.21-k6-NAPI"
const char e1000_driver_version[] = DRV_VERSION;
static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
@@ -214,6 +214,17 @@ module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/**
+ * e1000_get_hw_dev - return device
+ * used by hardware layer to print debugging information
+ *
+ **/
+struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
+{
+ struct e1000_adapter *adapter = hw->back;
+ return adapter->netdev;
+}
+
+/**
* e1000_init_module - Driver Registration Routine
*
* e1000_init_module is the first routine called when the driver is
@@ -223,18 +234,17 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
static int __init e1000_init_module(void)
{
int ret;
- printk(KERN_INFO "%s - version %s\n",
- e1000_driver_string, e1000_driver_version);
+ pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
- printk(KERN_INFO "%s\n", e1000_copyright);
+ pr_info("%s\n", e1000_copyright);
ret = pci_register_driver(&e1000_driver);
if (copybreak != COPYBREAK_DEFAULT) {
if (copybreak == 0)
- printk(KERN_INFO "e1000: copybreak disabled\n");
+ pr_info("copybreak disabled\n");
else
- printk(KERN_INFO "e1000: copybreak enabled for "
- "packets <= %u bytes\n", copybreak);
+ pr_info("copybreak enabled for "
+ "packets <= %u bytes\n", copybreak);
}
return ret;
}
@@ -265,8 +275,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
netdev);
if (err) {
- DPRINTK(PROBE, ERR,
- "Unable to allocate interrupt Error: %d\n", err);
+ e_err("Unable to allocate interrupt Error: %d\n", err);
}
return err;
@@ -648,7 +657,7 @@ void e1000_reset(struct e1000_adapter *adapter)
ew32(WUC, 0);
if (e1000_init_hw(hw))
- DPRINTK(PROBE, ERR, "Hardware Error\n");
+ e_err("Hardware Error\n");
e1000_update_mng_vlan(adapter);
/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
@@ -689,8 +698,7 @@ static void e1000_dump_eeprom(struct e1000_adapter *adapter)
data = kmalloc(eeprom.len, GFP_KERNEL);
if (!data) {
- printk(KERN_ERR "Unable to allocate memory to dump EEPROM"
- " data\n");
+ pr_err("Unable to allocate memory to dump EEPROM data\n");
return;
}
@@ -702,30 +710,25 @@ static void e1000_dump_eeprom(struct e1000_adapter *adapter)
csum_new += data[i] + (data[i + 1] << 8);
csum_new = EEPROM_SUM - csum_new;
- printk(KERN_ERR "/*********************/\n");
- printk(KERN_ERR "Current EEPROM Checksum : 0x%04x\n", csum_old);
- printk(KERN_ERR "Calculated : 0x%04x\n", csum_new);
+ pr_err("/*********************/\n");
+ pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
+ pr_err("Calculated : 0x%04x\n", csum_new);
- printk(KERN_ERR "Offset Values\n");
- printk(KERN_ERR "======== ======\n");
+ pr_err("Offset Values\n");
+ pr_err("======== ======\n");
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
- printk(KERN_ERR "Include this output when contacting your support "
- "provider.\n");
- printk(KERN_ERR "This is not a software error! Something bad "
- "happened to your hardware or\n");
- printk(KERN_ERR "EEPROM image. Ignoring this "
- "problem could result in further problems,\n");
- printk(KERN_ERR "possibly loss of data, corruption or system hangs!\n");
- printk(KERN_ERR "The MAC Address will be reset to 00:00:00:00:00:00, "
- "which is invalid\n");
- printk(KERN_ERR "and requires you to set the proper MAC "
- "address manually before continuing\n");
- printk(KERN_ERR "to enable this network device.\n");
- printk(KERN_ERR "Please inspect the EEPROM dump and report the issue "
- "to your hardware vendor\n");
- printk(KERN_ERR "or Intel Customer Support.\n");
- printk(KERN_ERR "/*********************/\n");
+ pr_err("Include this output when contacting your support provider.\n");
+ pr_err("This is not a software error! Something bad happened to\n");
+ pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
+ pr_err("result in further problems, possibly loss of data,\n");
+ pr_err("corruption or system hangs!\n");
+ pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
+ pr_err("which is invalid and requires you to set the proper MAC\n");
+ pr_err("address manually before continuing to enable this network\n");
+ pr_err("device. Please inspect the EEPROM dump and report the\n");
+ pr_err("issue to your hardware vendor or Intel Customer Support.\n");
+ pr_err("/*********************/\n");
kfree(data);
}
@@ -823,16 +826,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (err)
return err;
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
- !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err) {
- E1000_ERR("No usable DMA configuration, "
- "aborting\n");
+ pr_err("No usable DMA config, aborting\n");
goto err_dma;
}
}
@@ -922,7 +925,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* initialize eeprom parameters */
if (e1000_init_eeprom_params(hw)) {
- E1000_ERR("EEPROM initialization failed\n");
+ e_err("EEPROM initialization failed\n");
goto err_eeprom;
}
@@ -933,7 +936,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* make sure the EEPROM is good */
if (e1000_validate_eeprom_checksum(hw) < 0) {
- DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
+ e_err("The EEPROM Checksum Is Not Valid\n");
e1000_dump_eeprom(adapter);
/*
* set MAC address to all zeroes to invalidate and temporary
@@ -947,14 +950,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
} else {
/* copy the MAC address out of the EEPROM */
if (e1000_read_mac_addr(hw))
- DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
+ e_err("EEPROM Read Error\n");
}
/* don't block initalization here due to bad MAC address */
memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->perm_addr))
- DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
+ e_err("Invalid MAC Address\n");
e1000_get_bus_info(hw);
@@ -1035,8 +1038,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
adapter->wol = adapter->eeprom_wol;
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+ /* reset the hardware with the new settings */
+ e1000_reset(adapter);
+
+ strcpy(netdev->name, "eth%d");
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
/* print bus type/speed/width info */
- DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
+ e_info("(PCI%s:%s:%s) ",
((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
((hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
(hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
@@ -1044,20 +1055,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
(hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
((hw->bus_width == e1000_bus_width_64) ? "64-bit" : "32-bit"));
- printk("%pM\n", netdev->dev_addr);
-
- /* reset the hardware with the new settings */
- e1000_reset(adapter);
-
- strcpy(netdev->name, "eth%d");
- err = register_netdev(netdev);
- if (err)
- goto err_register;
+ e_info("%pM\n", netdev->dev_addr);
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
- DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
+ e_info("Intel(R) PRO/1000 Network Connection\n");
cards_found++;
return 0;
@@ -1157,7 +1160,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
/* identify the MAC */
if (e1000_set_mac_type(hw)) {
- DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
+ e_err("Unknown MAC Type\n");
return -EIO;
}
@@ -1190,7 +1193,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
adapter->num_rx_queues = 1;
if (e1000_alloc_queues(adapter)) {
- DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
+ e_err("Unable to allocate memory for queues\n");
return -ENOMEM;
}
@@ -1384,8 +1387,7 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
size = sizeof(struct e1000_buffer) * txdr->count;
txdr->buffer_info = vmalloc(size);
if (!txdr->buffer_info) {
- DPRINTK(PROBE, ERR,
- "Unable to allocate memory for the transmit descriptor ring\n");
+ e_err("Unable to allocate memory for the Tx descriptor ring\n");
return -ENOMEM;
}
memset(txdr->buffer_info, 0, size);
@@ -1395,12 +1397,12 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
- txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+ txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+ GFP_KERNEL);
if (!txdr->desc) {
setup_tx_desc_die:
vfree(txdr->buffer_info);
- DPRINTK(PROBE, ERR,
- "Unable to allocate memory for the transmit descriptor ring\n");
+ e_err("Unable to allocate memory for the Tx descriptor ring\n");
return -ENOMEM;
}
@@ -1408,29 +1410,32 @@ setup_tx_desc_die:
if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
void *olddesc = txdr->desc;
dma_addr_t olddma = txdr->dma;
- DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
- "at %p\n", txdr->size, txdr->desc);
+ e_err("txdr align check failed: %u bytes at %p\n",
+ txdr->size, txdr->desc);
/* Try again, without freeing the previous */
- txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+ txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
+ &txdr->dma, GFP_KERNEL);
/* Failed allocation, critical failure */
if (!txdr->desc) {
- pci_free_consistent(pdev, txdr->size, olddesc, olddma);
+ dma_free_coherent(&pdev->dev, txdr->size, olddesc,
+ olddma);
goto setup_tx_desc_die;
}
if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
/* give up */
- pci_free_consistent(pdev, txdr->size, txdr->desc,
- txdr->dma);
- pci_free_consistent(pdev, txdr->size, olddesc, olddma);
- DPRINTK(PROBE, ERR,
- "Unable to allocate aligned memory "
- "for the transmit descriptor ring\n");
+ dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
+ txdr->dma);
+ dma_free_coherent(&pdev->dev, txdr->size, olddesc,
+ olddma);
+ e_err("Unable to allocate aligned memory "
+ "for the transmit descriptor ring\n");
vfree(txdr->buffer_info);
return -ENOMEM;
} else {
/* Free old allocation, new allocation was successful */
- pci_free_consistent(pdev, txdr->size, olddesc, olddma);
+ dma_free_coherent(&pdev->dev, txdr->size, olddesc,
+ olddma);
}
}
memset(txdr->desc, 0, txdr->size);
@@ -1456,8 +1461,7 @@ int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++) {
err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
if (err) {
- DPRINTK(PROBE, ERR,
- "Allocation for Tx Queue %u failed\n", i);
+ e_err("Allocation for Tx Queue %u failed\n", i);
for (i-- ; i >= 0; i--)
e1000_free_tx_resources(adapter,
&adapter->tx_ring[i]);
@@ -1577,8 +1581,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
size = sizeof(struct e1000_buffer) * rxdr->count;
rxdr->buffer_info = vmalloc(size);
if (!rxdr->buffer_info) {
- DPRINTK(PROBE, ERR,
- "Unable to allocate memory for the receive descriptor ring\n");
+ e_err("Unable to allocate memory for the Rx descriptor ring\n");
return -ENOMEM;
}
memset(rxdr->buffer_info, 0, size);
@@ -1590,11 +1593,11 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
rxdr->size = rxdr->count * desc_len;
rxdr->size = ALIGN(rxdr->size, 4096);
- rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+ GFP_KERNEL);
if (!rxdr->desc) {
- DPRINTK(PROBE, ERR,
- "Unable to allocate memory for the receive descriptor ring\n");
+ e_err("Unable to allocate memory for the Rx descriptor ring\n");
setup_rx_desc_die:
vfree(rxdr->buffer_info);
return -ENOMEM;
@@ -1604,31 +1607,33 @@ setup_rx_desc_die:
if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
void *olddesc = rxdr->desc;
dma_addr_t olddma = rxdr->dma;
- DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
- "at %p\n", rxdr->size, rxdr->desc);
+ e_err("rxdr align check failed: %u bytes at %p\n",
+ rxdr->size, rxdr->desc);
/* Try again, without freeing the previous */
- rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
+ &rxdr->dma, GFP_KERNEL);
/* Failed allocation, critical failure */
if (!rxdr->desc) {
- pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
- DPRINTK(PROBE, ERR,
- "Unable to allocate memory "
- "for the receive descriptor ring\n");
+ dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
+ olddma);
+ e_err("Unable to allocate memory for the Rx descriptor "
+ "ring\n");
goto setup_rx_desc_die;
}
if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
/* give up */
- pci_free_consistent(pdev, rxdr->size, rxdr->desc,
- rxdr->dma);
- pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
- DPRINTK(PROBE, ERR,
- "Unable to allocate aligned memory "
- "for the receive descriptor ring\n");
+ dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
+ rxdr->dma);
+ dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
+ olddma);
+ e_err("Unable to allocate aligned memory for the Rx "
+ "descriptor ring\n");
goto setup_rx_desc_die;
} else {
/* Free old allocation, new allocation was successful */
- pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
+ dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
+ olddma);
}
}
memset(rxdr->desc, 0, rxdr->size);
@@ -1655,8 +1660,7 @@ int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
if (err) {
- DPRINTK(PROBE, ERR,
- "Allocation for Rx Queue %u failed\n", i);
+ e_err("Allocation for Rx Queue %u failed\n", i);
for (i-- ; i >= 0; i--)
e1000_free_rx_resources(adapter,
&adapter->rx_ring[i]);
@@ -1804,7 +1808,8 @@ static void e1000_free_tx_resources(struct e1000_adapter *adapter,
vfree(tx_ring->buffer_info);
tx_ring->buffer_info = NULL;
- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -1829,12 +1834,12 @@ static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev, buffer_info->dma,
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
buffer_info->dma = 0;
}
if (buffer_info->skb) {
@@ -1912,7 +1917,8 @@ static void e1000_free_rx_resources(struct e1000_adapter *adapter,
vfree(rx_ring->buffer_info);
rx_ring->buffer_info = NULL;
- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -1952,14 +1958,14 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma &&
adapter->clean_rx == e1000_clean_rx_irq) {
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
} else if (buffer_info->dma &&
adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
- pci_unmap_page(pdev, buffer_info->dma,
- buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
+ buffer_info->length,
+ DMA_FROM_DEVICE);
}
buffer_info->dma = 0;
@@ -2098,7 +2104,6 @@ static void e1000_set_rx_mode(struct net_device *netdev)
struct e1000_hw *hw = &adapter->hw;
struct netdev_hw_addr *ha;
bool use_uc = false;
- struct dev_addr_list *mc_ptr;
u32 rctl;
u32 hash_value;
int i, rar_entries = E1000_RAR_ENTRIES;
@@ -2106,7 +2111,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
if (!mcarray) {
- DPRINTK(PROBE, ERR, "memory allocation failed\n");
+ e_err("memory allocation failed\n");
return;
}
@@ -2156,19 +2161,17 @@ static void e1000_set_rx_mode(struct net_device *netdev)
e1000_rar_set(hw, ha->addr, i++);
}
- WARN_ON(i == rar_entries);
-
- netdev_for_each_mc_addr(mc_ptr, netdev) {
+ netdev_for_each_mc_addr(ha, netdev) {
if (i == rar_entries) {
/* load any remaining addresses into the hash table */
u32 hash_reg, hash_bit, mta;
- hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr);
+ hash_value = e1000_hash_mc_addr(hw, ha->addr);
hash_reg = (hash_value >> 5) & 0x7F;
hash_bit = hash_value & 0x1F;
mta = (1 << hash_bit);
mcarray[hash_reg] |= mta;
} else {
- e1000_rar_set(hw, mc_ptr->da_addr, i++);
+ e1000_rar_set(hw, ha->addr, i++);
}
}
@@ -2302,16 +2305,16 @@ static void e1000_watchdog(unsigned long data)
&adapter->link_duplex);
ctrl = er32(CTRL);
- printk(KERN_INFO "e1000: %s NIC Link is Up %d Mbps %s, "
- "Flow Control: %s\n",
- netdev->name,
- adapter->link_speed,
- adapter->link_duplex == FULL_DUPLEX ?
- "Full Duplex" : "Half Duplex",
- ((ctrl & E1000_CTRL_TFCE) && (ctrl &
- E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
- E1000_CTRL_RFCE) ? "RX" : ((ctrl &
- E1000_CTRL_TFCE) ? "TX" : "None" )));
+ pr_info("%s NIC Link is Up %d Mbps %s, "
+ "Flow Control: %s\n",
+ netdev->name,
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ?
+ "Full Duplex" : "Half Duplex",
+ ((ctrl & E1000_CTRL_TFCE) && (ctrl &
+ E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
+ E1000_CTRL_RFCE) ? "RX" : ((ctrl &
+ E1000_CTRL_TFCE) ? "TX" : "None")));
/* adjust timeout factor according to speed/duplex */
adapter->tx_timeout_factor = 1;
@@ -2341,8 +2344,8 @@ static void e1000_watchdog(unsigned long data)
if (netif_carrier_ok(netdev)) {
adapter->link_speed = 0;
adapter->link_duplex = 0;
- printk(KERN_INFO "e1000: %s NIC Link is Down\n",
- netdev->name);
+ pr_info("%s NIC Link is Down\n",
+ netdev->name);
netif_carrier_off(netdev);
if (!test_bit(__E1000_DOWN, &adapter->flags))
@@ -2381,6 +2384,22 @@ link_up:
}
}
+ /* Simple mode for Interrupt Throttle Rate (ITR) */
+ if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
+ /*
+ * Symmetric Tx/Rx gets a reduced ITR=2000;
+ * Total asymmetrical Tx or Rx gets ITR=8000;
+ * everyone else is between 2000-8000.
+ */
+ u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
+ u32 dif = (adapter->gotcl > adapter->gorcl ?
+ adapter->gotcl - adapter->gorcl :
+ adapter->gorcl - adapter->gotcl) / 10000;
+ u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
+
+ ew32(ITR, 1000000000 / (itr * 256));
+ }
+
/* Cause software interrupt to ensure rx ring is cleaned */
ew32(ICS, E1000_ICS_RXDMT0);
@@ -2525,8 +2544,6 @@ set_itr_now:
adapter->itr = new_itr;
ew32(ITR, 1000000000 / (new_itr * 256));
}
-
- return;
}
#define E1000_TX_FLAGS_CSUM 0x00000001
@@ -2632,8 +2649,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
break;
default:
if (unlikely(net_ratelimit()))
- DPRINTK(DRV, WARNING,
- "checksum_partial proto=%x!\n", skb->protocol);
+ e_warn("checksum_partial proto=%x!\n", skb->protocol);
break;
}
@@ -2715,9 +2731,10 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
/* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies;
buffer_info->mapped_as_page = false;
- buffer_info->dma = pci_map_single(pdev, skb->data + offset,
- size, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ buffer_info->dma = dma_map_single(&pdev->dev,
+ skb->data + offset,
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
buffer_info->next_to_watch = i;
@@ -2761,10 +2778,10 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info->length = size;
buffer_info->time_stamp = jiffies;
buffer_info->mapped_as_page = true;
- buffer_info->dma = pci_map_page(pdev, frag->page,
+ buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
offset, size,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
buffer_info->next_to_watch = i;
@@ -2930,7 +2947,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
unsigned int tx_flags = 0;
- unsigned int len = skb->len - skb->data_len;
+ unsigned int len = skb_headlen(skb);
unsigned int nr_frags;
unsigned int mss;
int count = 0;
@@ -2976,12 +2993,11 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
/* fall through */
pull_size = min((unsigned int)4, skb->data_len);
if (!__pskb_pull_tail(skb, pull_size)) {
- DPRINTK(DRV, ERR,
- "__pskb_pull_tail failed.\n");
+ e_err("__pskb_pull_tail failed.\n");
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
- len = skb->len - skb->data_len;
+ len = skb_headlen(skb);
break;
default:
/* do nothing */
@@ -3125,7 +3141,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
(max_frame > MAX_JUMBO_FRAME_SIZE)) {
- DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
+ e_err("Invalid MTU setting\n");
return -EINVAL;
}
@@ -3133,7 +3149,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
switch (hw->mac_type) {
case e1000_undefined ... e1000_82542_rev2_1:
if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
- DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
+ e_err("Jumbo Frames not supported.\n");
return -EINVAL;
}
break;
@@ -3171,8 +3187,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
(max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
- printk(KERN_INFO "e1000: %s changing MTU from %d to %d\n",
- netdev->name, netdev->mtu, new_mtu);
+ pr_info("%s changing MTU from %d to %d\n",
+ netdev->name, netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
@@ -3485,17 +3501,17 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
!(er32(STATUS) & E1000_STATUS_TXOFF)) {
/* detected Tx unit hang */
- DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
- " Tx Queue <%lu>\n"
- " TDH <%x>\n"
- " TDT <%x>\n"
- " next_to_use <%x>\n"
- " next_to_clean <%x>\n"
- "buffer_info[next_to_clean]\n"
- " time_stamp <%lx>\n"
- " next_to_watch <%x>\n"
- " jiffies <%lx>\n"
- " next_to_watch.status <%x>\n",
+ e_err("Detected Tx Unit Hang\n"
+ " Tx Queue <%lu>\n"
+ " TDH <%x>\n"
+ " TDT <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " next_to_watch <%x>\n"
+ " jiffies <%lx>\n"
+ " next_to_watch.status <%x>\n",
(unsigned long)((tx_ring - adapter->tx_ring) /
sizeof(struct e1000_tx_ring)),
readl(hw->hw_addr + tx_ring->tdh),
@@ -3635,8 +3651,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
cleaned = true;
cleaned_count++;
- pci_unmap_page(pdev, buffer_info->dma, buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
@@ -3734,7 +3750,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* eth type trans needs skb->data to point to something */
if (!pskb_may_pull(skb, ETH_HLEN)) {
- DPRINTK(DRV, ERR, "pskb_may_pull failed.\n");
+ e_err("pskb_may_pull failed.\n");
dev_kfree_skb(skb);
goto next_desc;
}
@@ -3769,6 +3785,31 @@ next_desc:
return cleaned;
}
+/*
+ * this should improve performance for small packets with large amounts
+ * of reassembly being done in the stack
+ */
+static void e1000_check_copybreak(struct net_device *netdev,
+ struct e1000_buffer *buffer_info,
+ u32 length, struct sk_buff **skb)
+{
+ struct sk_buff *new_skb;
+
+ if (length > copybreak)
+ return;
+
+ new_skb = netdev_alloc_skb_ip_align(netdev, length);
+ if (!new_skb)
+ return;
+
+ skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
+ (*skb)->data - NET_IP_ALIGN,
+ length + NET_IP_ALIGN);
+ /* save the skb in buffer_info as good */
+ buffer_info->skb = *skb;
+ *skb = new_skb;
+}
+
/**
* e1000_clean_rx_irq - Send received data up the network stack; legacy
* @adapter: board private structure
@@ -3818,8 +3859,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
cleaned = true;
cleaned_count++;
- pci_unmap_single(pdev, buffer_info->dma, buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
@@ -3834,8 +3875,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
if (adapter->discarding) {
/* All receives must fit into a single buffer */
- E1000_DBG("%s: Receive packet consumed multiple"
- " buffers\n", netdev->name);
+ e_info("Receive packet consumed multiple buffers\n");
/* recycle */
buffer_info->skb = skb;
if (status & E1000_RXD_STAT_EOP)
@@ -3868,26 +3908,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
total_rx_bytes += length;
total_rx_packets++;
- /* code added for copybreak, this should improve
- * performance for small packets with large amounts
- * of reassembly being done in the stack */
- if (length < copybreak) {
- struct sk_buff *new_skb =
- netdev_alloc_skb_ip_align(netdev, length);
- if (new_skb) {
- skb_copy_to_linear_data_offset(new_skb,
- -NET_IP_ALIGN,
- (skb->data -
- NET_IP_ALIGN),
- (length +
- NET_IP_ALIGN));
- /* save the skb in buffer_info as good */
- buffer_info->skb = skb;
- skb = new_skb;
- }
- /* else just continue with the old one */
- }
- /* end copybreak code */
+ e1000_check_copybreak(netdev, buffer_info, length, &skb);
+
skb_put(skb, length);
/* Receive Checksum Offload */
@@ -3965,8 +3987,8 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
/* Fix for errata 23, can't cross 64kB boundary */
if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
struct sk_buff *oldskb = skb;
- DPRINTK(PROBE, ERR, "skb align check failed: %u bytes "
- "at %p\n", bufsz, skb->data);
+ e_err("skb align check failed: %u bytes at %p\n",
+ bufsz, skb->data);
/* Try again, without freeing the previous */
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
/* Failed allocation, critical failure */
@@ -3999,11 +4021,11 @@ check_page:
}
if (!buffer_info->dma) {
- buffer_info->dma = pci_map_page(pdev,
+ buffer_info->dma = dma_map_page(&pdev->dev,
buffer_info->page, 0,
- buffer_info->length,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+ buffer_info->length,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
put_page(buffer_info->page);
dev_kfree_skb(skb);
buffer_info->page = NULL;
@@ -4074,8 +4096,8 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
/* Fix for errata 23, can't cross 64kB boundary */
if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
struct sk_buff *oldskb = skb;
- DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
- "at %p\n", bufsz, skb->data);
+ e_err("skb align check failed: %u bytes at %p\n",
+ bufsz, skb->data);
/* Try again, without freeing the previous */
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
/* Failed allocation, critical failure */
@@ -4099,11 +4121,11 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
map_skb:
- buffer_info->dma = pci_map_single(pdev,
+ buffer_info->dma = dma_map_single(&pdev->dev,
skb->data,
buffer_info->length,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
dev_kfree_skb(skb);
buffer_info->skb = NULL;
buffer_info->dma = 0;
@@ -4120,16 +4142,15 @@ map_skb:
if (!e1000_check_64k_bound(adapter,
(void *)(unsigned long)buffer_info->dma,
adapter->rx_buffer_len)) {
- DPRINTK(RX_ERR, ERR,
- "dma align check failed: %u bytes at %p\n",
- adapter->rx_buffer_len,
- (void *)(unsigned long)buffer_info->dma);
+ e_err("dma align check failed: %u bytes at %p\n",
+ adapter->rx_buffer_len,
+ (void *)(unsigned long)buffer_info->dma);
dev_kfree_skb(skb);
buffer_info->skb = NULL;
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
adapter->alloc_rx_buff_failed++;
@@ -4335,7 +4356,7 @@ void e1000_pci_set_mwi(struct e1000_hw *hw)
int ret_val = pci_set_mwi(adapter->pdev);
if (ret_val)
- DPRINTK(PROBE, ERR, "Error in setting MWI\n");
+ e_err("Error in setting MWI\n");
}
void e1000_pci_clear_mwi(struct e1000_hw *hw)
@@ -4466,7 +4487,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
/* Fiber NICs only allow 1000 gbps Full duplex */
if ((hw->media_type == e1000_media_type_fiber) &&
spddplx != (SPEED_1000 + DUPLEX_FULL)) {
- DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
+ e_err("Unsupported Speed/Duplex configuration\n");
return -EINVAL;
}
@@ -4489,7 +4510,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
break;
case SPEED_1000 + DUPLEX_HALF: /* not supported */
default:
- DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
+ e_err("Unsupported Speed/Duplex configuration\n");
return -EINVAL;
}
return 0;
@@ -4612,7 +4633,7 @@ static int e1000_resume(struct pci_dev *pdev)
else
err = pci_enable_device_mem(pdev);
if (err) {
- printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n");
+ pr_err("Cannot enable PCI device from suspend\n");
return err;
}
pci_set_master(pdev);
@@ -4715,7 +4736,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
else
err = pci_enable_device_mem(pdev);
if (err) {
- printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n");
+ pr_err("Cannot re-enable PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
@@ -4746,7 +4767,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
if (netif_running(netdev)) {
if (e1000_up(adapter)) {
- printk("e1000: can't bring device back up after reset\n");
+ pr_info("can't bring device back up after reset\n");
return;
}
}
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index d9298522f5a..edd1c75aa89 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -41,20 +41,6 @@
#include <linux/interrupt.h>
#include <linux/sched.h>
-#ifdef DBG
-#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
-#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)
-#else
-#define DEBUGOUT(S)
-#define DEBUGOUT1(S, A...)
-#endif
-
-#define DEBUGFUNC(F) DEBUGOUT(F "\n")
-#define DEBUGOUT2 DEBUGOUT1
-#define DEBUGOUT3 DEBUGOUT2
-#define DEBUGOUT7 DEBUGOUT3
-
-
#define er32(reg) \
(readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \
? E1000_##reg : E1000_82542_##reg)))
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 38d2741ccae..10d8d98bb79 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -188,14 +188,6 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
*/
E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
-/* Enable Kumeran Lock Loss workaround
- *
- * Valid Range: 0, 1
- *
- * Default Value: 1 (enabled)
- */
-E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
-
struct e1000_option {
enum { enable_option, range_option, list_option } type;
const char *name;
@@ -226,17 +218,16 @@ static int __devinit e1000_validate_option(unsigned int *value,
case enable_option:
switch (*value) {
case OPTION_ENABLED:
- DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name);
+ e_dev_info("%s Enabled\n", opt->name);
return 0;
case OPTION_DISABLED:
- DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name);
+ e_dev_info("%s Disabled\n", opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
- DPRINTK(PROBE, INFO,
- "%s set to %i\n", opt->name, *value);
+ e_dev_info("%s set to %i\n", opt->name, *value);
return 0;
}
break;
@@ -248,7 +239,7 @@ static int __devinit e1000_validate_option(unsigned int *value,
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
- DPRINTK(PROBE, INFO, "%s\n", ent->str);
+ e_dev_info("%s\n", ent->str);
return 0;
}
}
@@ -258,7 +249,7 @@ static int __devinit e1000_validate_option(unsigned int *value,
BUG();
}
- DPRINTK(PROBE, INFO, "Invalid %s value specified (%i) %s\n",
+ e_dev_info("Invalid %s value specified (%i) %s\n",
opt->name, *value, opt->err);
*value = opt->def;
return -1;
@@ -283,9 +274,8 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
int bd = adapter->bd_number;
if (bd >= E1000_MAX_NIC) {
- DPRINTK(PROBE, NOTICE,
- "Warning: no configuration for board #%i\n", bd);
- DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
+ e_dev_warn("Warning: no configuration for board #%i "
+ "using defaults for all values\n", bd);
}
{ /* Transmit Descriptor Count */
@@ -472,27 +462,31 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
adapter->itr = InterruptThrottleRate[bd];
switch (adapter->itr) {
case 0:
- DPRINTK(PROBE, INFO, "%s turned off\n",
- opt.name);
+ e_dev_info("%s turned off\n", opt.name);
break;
case 1:
- DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
- opt.name);
+ e_dev_info("%s set to dynamic mode\n",
+ opt.name);
adapter->itr_setting = adapter->itr;
adapter->itr = 20000;
break;
case 3:
- DPRINTK(PROBE, INFO,
- "%s set to dynamic conservative mode\n",
- opt.name);
+ e_dev_info("%s set to dynamic conservative "
+ "mode\n", opt.name);
adapter->itr_setting = adapter->itr;
adapter->itr = 20000;
break;
+ case 4:
+ e_dev_info("%s set to simplified "
+ "(2000-8000) ints mode\n", opt.name);
+ adapter->itr_setting = adapter->itr;
+ break;
default:
e1000_validate_option(&adapter->itr, &opt,
adapter);
- /* save the setting, because the dynamic bits change itr */
- /* clear the lower two bits because they are
+ /* save the setting, because the dynamic bits
+ * change itr.
+ * clear the lower two bits because they are
* used as control */
adapter->itr_setting = adapter->itr & ~3;
break;
@@ -543,19 +537,18 @@ static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter)
{
int bd = adapter->bd_number;
if (num_Speed > bd) {
- DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
- "parameter ignored\n");
+ e_dev_info("Speed not valid for fiber adapters, parameter "
+ "ignored\n");
}
if (num_Duplex > bd) {
- DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
- "parameter ignored\n");
+ e_dev_info("Duplex not valid for fiber adapters, parameter "
+ "ignored\n");
}
if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
- DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
- "not valid for fiber adapters, "
- "parameter ignored\n");
+ e_dev_info("AutoNeg other than 1000/Full is not valid for fiber"
+ "adapters, parameter ignored\n");
}
}
@@ -619,9 +612,8 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
}
if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
- DPRINTK(PROBE, INFO,
- "AutoNeg specified along with Speed or Duplex, "
- "parameter ignored\n");
+ e_dev_info("AutoNeg specified along with Speed or Duplex, "
+ "parameter ignored\n");
adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
} else { /* Autoneg */
static const struct e1000_opt_list an_list[] =
@@ -680,79 +672,72 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
case 0:
adapter->hw.autoneg = adapter->fc_autoneg = 1;
if ((num_Speed > bd) && (speed != 0 || dplx != 0))
- DPRINTK(PROBE, INFO,
- "Speed and duplex autonegotiation enabled\n");
+ e_dev_info("Speed and duplex autonegotiation "
+ "enabled\n");
break;
case HALF_DUPLEX:
- DPRINTK(PROBE, INFO, "Half Duplex specified without Speed\n");
- DPRINTK(PROBE, INFO, "Using Autonegotiation at "
- "Half Duplex only\n");
+ e_dev_info("Half Duplex specified without Speed\n");
+ e_dev_info("Using Autonegotiation at Half Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
ADVERTISE_100_HALF;
break;
case FULL_DUPLEX:
- DPRINTK(PROBE, INFO, "Full Duplex specified without Speed\n");
- DPRINTK(PROBE, INFO, "Using Autonegotiation at "
- "Full Duplex only\n");
+ e_dev_info("Full Duplex specified without Speed\n");
+ e_dev_info("Using Autonegotiation at Full Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_FULL |
ADVERTISE_100_FULL |
ADVERTISE_1000_FULL;
break;
case SPEED_10:
- DPRINTK(PROBE, INFO, "10 Mbps Speed specified "
- "without Duplex\n");
- DPRINTK(PROBE, INFO, "Using Autonegotiation at 10 Mbps only\n");
+ e_dev_info("10 Mbps Speed specified without Duplex\n");
+ e_dev_info("Using Autonegotiation at 10 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
ADVERTISE_10_FULL;
break;
case SPEED_10 + HALF_DUPLEX:
- DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Half Duplex\n");
+ e_dev_info("Forcing to 10 Mbps Half Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_10_half;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_10 + FULL_DUPLEX:
- DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Full Duplex\n");
+ e_dev_info("Forcing to 10 Mbps Full Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_10_full;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_100:
- DPRINTK(PROBE, INFO, "100 Mbps Speed specified "
- "without Duplex\n");
- DPRINTK(PROBE, INFO, "Using Autonegotiation at "
- "100 Mbps only\n");
+ e_dev_info("100 Mbps Speed specified without Duplex\n");
+ e_dev_info("Using Autonegotiation at 100 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_100_HALF |
ADVERTISE_100_FULL;
break;
case SPEED_100 + HALF_DUPLEX:
- DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Half Duplex\n");
+ e_dev_info("Forcing to 100 Mbps Half Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_100_half;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_100 + FULL_DUPLEX:
- DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Full Duplex\n");
+ e_dev_info("Forcing to 100 Mbps Full Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_100_full;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_1000:
- DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without "
- "Duplex\n");
+ e_dev_info("1000 Mbps Speed specified without Duplex\n");
goto full_duplex_only;
case SPEED_1000 + HALF_DUPLEX:
- DPRINTK(PROBE, INFO,
- "Half Duplex is not supported at 1000 Mbps\n");
+ e_dev_info("Half Duplex is not supported at 1000 Mbps\n");
/* fall through */
case SPEED_1000 + FULL_DUPLEX:
full_duplex_only:
- DPRINTK(PROBE, INFO,
- "Using Autonegotiation at 1000 Mbps Full Duplex only\n");
+ e_dev_info("Using Autonegotiation at 1000 Mbps Full Duplex "
+ "only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
break;
@@ -762,9 +747,8 @@ full_duplex_only:
/* Speed, AutoNeg and MDI/MDI-X must all play nice */
if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) {
- DPRINTK(PROBE, INFO,
- "Speed, AutoNeg and MDI-X specifications are "
- "incompatible. Setting MDI-X to a compatible value.\n");
+ e_dev_info("Speed, AutoNeg and MDI-X specs are incompatible. "
+ "Setting MDI-X to a compatible value.\n");
}
}
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 90155552ea0..f654db9121d 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -234,9 +234,6 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
mac->mta_reg_count = 128;
/* Set rar entry count */
mac->rar_entry_count = E1000_RAR_ENTRIES;
- /* Set if manageability features are enabled. */
- mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK)
- ? true : false;
/* Adaptive IFS supported */
mac->adaptive_ifs = true;
@@ -271,6 +268,16 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
func->set_lan_id = e1000_set_lan_id_single_port;
func->check_mng_mode = e1000e_check_mng_mode_generic;
func->led_on = e1000e_led_on_generic;
+
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /*
+ * ARC supported; valid only if manageability features are
+ * enabled.
+ */
+ mac->arc_subsystem_valid =
+ (er32(FWSM) & E1000_FWSM_MODE_MASK)
+ ? true : false;
break;
case e1000_82574:
case e1000_82583:
@@ -281,6 +288,9 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
default:
func->check_mng_mode = e1000e_check_mng_mode_generic;
func->led_on = e1000e_led_on_generic;
+
+ /* FWSM register */
+ mac->has_fwsm = true;
break;
}
@@ -323,7 +333,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
}
/*
- * Initialze device specific counter of SMBI acquisition
+ * Initialize device specific counter of SMBI acquisition
* timeouts.
*/
hw->dev_spec.e82571.smb_counter = 0;
@@ -993,9 +1003,10 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
/* ...for both queues. */
switch (mac->type) {
case e1000_82573:
+ e1000e_enable_tx_pkt_filtering(hw);
+ /* fall through */
case e1000_82574:
case e1000_82583:
- e1000e_enable_tx_pkt_filtering(hw);
reg_data = er32(GCR);
reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
ew32(GCR, reg_data);
@@ -1137,8 +1148,6 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
default:
break;
}
-
- return;
}
/**
@@ -1642,8 +1651,6 @@ static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw)
/* If the management interface is not enabled, then power down */
if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw)))
e1000_power_down_phy_copper(hw);
-
- return;
}
/**
@@ -1845,7 +1852,7 @@ struct e1000_info e1000_82574_info = {
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
- .pba = 20,
+ .pba = 36,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
@@ -1862,7 +1869,7 @@ struct e1000_info e1000_82583_info = {
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
- .pba = 20,
+ .pba = 36,
.max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index e301e26d689..4dc02c71ffd 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -138,6 +138,11 @@
/* Enable MNG packets to host memory */
#define E1000_MANC_EN_MNG2HOST 0x00200000
+#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */
+#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */
+#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */
+#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */
+
/* Receive Control */
#define E1000_RCTL_EN 0x00000002 /* enable */
#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
@@ -214,6 +219,8 @@
#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
+#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
+#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
@@ -622,6 +629,8 @@
#define NVM_ALT_MAC_ADDR_PTR 0x0037
#define NVM_CHECKSUM_REG 0x003F
+#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
+
#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */
#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index ee32b9b27a9..c0b3db40bd7 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -43,25 +43,16 @@
struct e1000_info;
-#define e_printk(level, adapter, format, arg...) \
- printk(level "%s: %s: " format, pci_name(adapter->pdev), \
- adapter->netdev->name, ## arg)
-
-#ifdef DEBUG
#define e_dbg(format, arg...) \
- e_printk(KERN_DEBUG , hw->adapter, format, ## arg)
-#else
-#define e_dbg(format, arg...) do { (void)(hw); } while (0)
-#endif
-
+ netdev_dbg(hw->adapter->netdev, format, ## arg)
#define e_err(format, arg...) \
- e_printk(KERN_ERR, adapter, format, ## arg)
+ netdev_err(adapter->netdev, format, ## arg)
#define e_info(format, arg...) \
- e_printk(KERN_INFO, adapter, format, ## arg)
+ netdev_info(adapter->netdev, format, ## arg)
#define e_warn(format, arg...) \
- e_printk(KERN_WARNING, adapter, format, ## arg)
+ netdev_warn(adapter->netdev, format, ## arg)
#define e_notice(format, arg...) \
- e_printk(KERN_NOTICE, adapter, format, ## arg)
+ netdev_notice(adapter->netdev, format, ## arg)
/* Interrupt modes, as used by the IntMode parameter */
@@ -159,6 +150,9 @@ struct e1000_info;
#define HV_M_STATUS_SPEED_1000 0x0200
#define HV_M_STATUS_LINK_UP 0x0040
+/* Time to wait before putting the device into D3 if there's no link (in ms). */
+#define LINK_TIMEOUT 100
+
enum e1000_boards {
board_82571,
board_82572,
@@ -195,6 +189,8 @@ struct e1000_buffer {
unsigned long time_stamp;
u16 length;
u16 next_to_watch;
+ unsigned int segs;
+ unsigned int bytecount;
u16 mapped_as_page;
};
/* Rx */
@@ -370,6 +366,8 @@ struct e1000_adapter {
struct work_struct update_phy_task;
struct work_struct led_blink_task;
struct work_struct print_hang_task;
+
+ bool idle_check;
};
struct e1000_info {
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index 27d21589a69..38d79a66905 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -221,9 +221,12 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
mac->mta_reg_count = 128;
/* Set rar entry count */
mac->rar_entry_count = E1000_RAR_ENTRIES;
- /* Set if manageability features are enabled. */
- mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK)
- ? true : false;
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /* ARC supported; valid only if manageability features are enabled. */
+ mac->arc_subsystem_valid =
+ (er32(FWSM) & E1000_FWSM_MODE_MASK)
+ ? true : false;
/* Adaptive IFS not supported */
mac->adaptive_ifs = false;
@@ -1380,8 +1383,6 @@ static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw)
if (!(hw->mac.ops.check_mng_mode(hw) ||
hw->phy.ops.check_reset_block(hw)))
e1000_power_down_phy_copper(hw);
-
- return;
}
/**
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 983493f2330..2c521218102 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -412,7 +412,6 @@ static int e1000_set_tso(struct net_device *netdev, u32 data)
netdev->features &= ~NETIF_F_TSO6;
}
- e_info("TSO is %s\n", data ? "Enabled" : "Disabled");
adapter->flags |= FLAG_TSO_FORCE;
return 0;
}
@@ -1069,10 +1068,10 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
if (tx_ring->desc && tx_ring->buffer_info) {
for (i = 0; i < tx_ring->count; i++) {
if (tx_ring->buffer_info[i].dma)
- pci_unmap_single(pdev,
+ dma_unmap_single(&pdev->dev,
tx_ring->buffer_info[i].dma,
tx_ring->buffer_info[i].length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (tx_ring->buffer_info[i].skb)
dev_kfree_skb(tx_ring->buffer_info[i].skb);
}
@@ -1081,9 +1080,9 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
if (rx_ring->desc && rx_ring->buffer_info) {
for (i = 0; i < rx_ring->count; i++) {
if (rx_ring->buffer_info[i].dma)
- pci_unmap_single(pdev,
+ dma_unmap_single(&pdev->dev,
rx_ring->buffer_info[i].dma,
- 2048, PCI_DMA_FROMDEVICE);
+ 2048, DMA_FROM_DEVICE);
if (rx_ring->buffer_info[i].skb)
dev_kfree_skb(rx_ring->buffer_info[i].skb);
}
@@ -1163,9 +1162,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
tx_ring->buffer_info[i].skb = skb;
tx_ring->buffer_info[i].length = skb->len;
tx_ring->buffer_info[i].dma =
- pci_map_single(pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, tx_ring->buffer_info[i].dma)) {
+ dma_map_single(&pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ tx_ring->buffer_info[i].dma)) {
ret_val = 4;
goto err_nomem;
}
@@ -1226,9 +1226,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
skb_reserve(skb, NET_IP_ALIGN);
rx_ring->buffer_info[i].skb = skb;
rx_ring->buffer_info[i].dma =
- pci_map_single(pdev, skb->data, 2048,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, rx_ring->buffer_info[i].dma)) {
+ dma_map_single(&pdev->dev, skb->data, 2048,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ rx_ring->buffer_info[i].dma)) {
ret_val = 8;
goto err_nomem;
}
@@ -1556,10 +1557,10 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
for (i = 0; i < 64; i++) { /* send the packets */
e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb,
1024);
- pci_dma_sync_single_for_device(pdev,
+ dma_sync_single_for_device(&pdev->dev,
tx_ring->buffer_info[k].dma,
tx_ring->buffer_info[k].length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
k++;
if (k == tx_ring->count)
k = 0;
@@ -1569,9 +1570,9 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
time = jiffies; /* set the start time for the receive */
good_cnt = 0;
do { /* receive the sent packets */
- pci_dma_sync_single_for_cpu(pdev,
+ dma_sync_single_for_cpu(&pdev->dev,
rx_ring->buffer_info[l].dma, 2048,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
ret_val = e1000_check_lbtest_frame(
rx_ring->buffer_info[l].skb, 1024);
@@ -1736,6 +1737,12 @@ static void e1000_diag_test(struct net_device *netdev,
if (if_running)
dev_open(netdev);
} else {
+ if (!if_running && (adapter->flags & FLAG_HAS_AMT)) {
+ clear_bit(__E1000_TESTING, &adapter->state);
+ dev_open(netdev);
+ set_bit(__E1000_TESTING, &adapter->state);
+ }
+
e_info("online testing starting\n");
/* Online tests */
if (e1000_link_test(adapter, &data[4]))
@@ -1747,6 +1754,9 @@ static void e1000_diag_test(struct net_device *netdev,
data[2] = 0;
data[3] = 0;
+ if (!if_running && (adapter->flags & FLAG_HAS_AMT))
+ dev_close(netdev);
+
clear_bit(__E1000_TESTING, &adapter->state);
}
msleep_interruptible(4 * 1000);
@@ -1889,7 +1899,7 @@ static int e1000_get_coalesce(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- if (adapter->itr_setting <= 3)
+ if (adapter->itr_setting <= 4)
ec->rx_coalesce_usecs = adapter->itr_setting;
else
ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
@@ -1904,12 +1914,14 @@ static int e1000_set_coalesce(struct net_device *netdev,
struct e1000_hw *hw = &adapter->hw;
if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
- ((ec->rx_coalesce_usecs > 3) &&
+ ((ec->rx_coalesce_usecs > 4) &&
(ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) ||
(ec->rx_coalesce_usecs == 2))
return -EINVAL;
- if (ec->rx_coalesce_usecs <= 3) {
+ if (ec->rx_coalesce_usecs == 4) {
+ adapter->itr = adapter->itr_setting = 4;
+ } else if (ec->rx_coalesce_usecs <= 3) {
adapter->itr = 20000;
adapter->itr_setting = ec->rx_coalesce_usecs;
} else {
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 8bdcd5f24ef..5d1220d188d 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -208,6 +208,8 @@ enum e1e_registers {
E1000_KMRNCTRLSTA = 0x00034, /* MAC-PHY interface - RW */
E1000_MANC2H = 0x05860, /* Management Control To Host - RW */
+ E1000_MDEF_BASE = 0x05890, /* Management Decision Filters */
+#define E1000_MDEF(_n) (E1000_MDEF_BASE + ((_n) * 4))
E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */
E1000_GCR = 0x05B00, /* PCI-Ex Control */
E1000_GCR2 = 0x05B64, /* PCI-Ex Control #2 */
@@ -380,6 +382,7 @@ enum e1e_registers {
#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE
#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE
#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF
+#define E1000_DEV_ID_ICH10_D_BM_V 0x1525
#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA
#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB
#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF
@@ -828,6 +831,7 @@ struct e1000_mac_info {
u8 forced_speed_duplex;
bool adaptive_ifs;
+ bool has_fwsm;
bool arc_subsystem_valid;
bool autoneg;
bool autoneg_failed;
@@ -898,6 +902,7 @@ struct e1000_fc_info {
u32 high_water; /* Flow control high-water mark */
u32 low_water; /* Flow control low-water mark */
u16 pause_time; /* Flow control pause timer */
+ u16 refresh_time; /* Flow control refresh timer */
bool send_xon; /* Flow control send XON */
bool strict_ieee; /* Strict IEEE mode */
enum e1000_fc_mode current_mode; /* FC mode in effect */
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 8b5e157e9c8..b2507d93de9 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -83,6 +83,8 @@
#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */
+/* FW established a valid mode */
+#define E1000_ICH_FWSM_FW_VALID 0x00008000
#define E1000_ICH_MNG_IAMT_MODE 0x2
@@ -259,6 +261,7 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
+ u32 ctrl;
s32 ret_val = 0;
phy->addr = 1;
@@ -274,6 +277,33 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+ /*
+ * The MAC-PHY interconnect may still be in SMBus mode
+ * after Sx->S0. Toggle the LANPHYPC Value bit to force
+ * the interconnect to PCIe mode, but only if there is no
+ * firmware present otherwise firmware will have done it.
+ */
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
+ ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
+ ew32(CTRL, ctrl);
+ udelay(10);
+ ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
+ ew32(CTRL, ctrl);
+ msleep(50);
+ }
+
+ /*
+ * Reset the PHY before any acccess to it. Doing so, ensures that
+ * the PHY is in a known good state before we read/write PHY registers.
+ * The generic reset is sufficient here, because we haven't determined
+ * the PHY type yet.
+ */
+ ret_val = e1000e_phy_hw_reset_generic(hw);
+ if (ret_val)
+ goto out;
+
phy->id = e1000_phy_unknown;
ret_val = e1000e_get_phy_id(hw);
if (ret_val)
@@ -300,6 +330,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
phy->ops.get_cable_length = e1000_get_cable_length_82577;
phy->ops.get_info = e1000_get_phy_info_82577;
phy->ops.commit = e1000e_phy_sw_reset;
+ break;
case e1000_phy_82578:
phy->ops.check_polarity = e1000_check_polarity_m88;
phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
@@ -472,8 +503,10 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
if (mac->type == e1000_ich8lan)
mac->rar_entry_count--;
- /* Set if manageability features are enabled. */
- mac->arc_subsystem_valid = true;
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /* ARC subsystem not supported */
+ mac->arc_subsystem_valid = false;
/* Adaptive IFS supported */
mac->adaptive_ifs = true;
@@ -657,8 +690,6 @@ static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
{
mutex_unlock(&nvm_mutex);
-
- return;
}
static DEFINE_MUTEX(swflag_mutex);
@@ -737,8 +768,6 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
ew32(EXTCNF_CTRL, extcnf_ctrl);
mutex_unlock(&swflag_mutex);
-
- return;
}
/**
@@ -785,11 +814,16 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
**/
static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
{
+ struct e1000_adapter *adapter = hw->adapter;
struct e1000_phy_info *phy = &hw->phy;
u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
- s32 ret_val;
+ s32 ret_val = 0;
u16 word_addr, reg_data, reg_addr, phy_page = 0;
+ if (!(hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) &&
+ !(hw->mac.type == e1000_pchlan))
+ return ret_val;
+
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
@@ -801,97 +835,87 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
* Therefore, after each PHY reset, we will load the
* configuration data out of the NVM manually.
*/
- if ((hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) ||
- (hw->mac.type == e1000_pchlan)) {
- struct e1000_adapter *adapter = hw->adapter;
-
- /* Check if SW needs to configure the PHY */
- if ((adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
- (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M) ||
- (hw->mac.type == e1000_pchlan))
- sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
- else
- sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
+ if ((adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
+ (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M) ||
+ (hw->mac.type == e1000_pchlan))
+ sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
+ else
+ sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
- data = er32(FEXTNVM);
- if (!(data & sw_cfg_mask))
- goto out;
+ data = er32(FEXTNVM);
+ if (!(data & sw_cfg_mask))
+ goto out;
- /* Wait for basic configuration completes before proceeding */
- e1000_lan_init_done_ich8lan(hw);
+ /*
+ * Make sure HW does not configure LCD from PHY
+ * extended configuration before SW configuration
+ */
+ data = er32(EXTCNF_CTRL);
+ if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
+ goto out;
+
+ cnf_size = er32(EXTCNF_SIZE);
+ cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
+ cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
+ if (!cnf_size)
+ goto out;
+
+ cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
+ cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
+ if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
+ (hw->mac.type == e1000_pchlan)) {
/*
- * Make sure HW does not configure LCD from PHY
- * extended configuration before SW configuration
+ * HW configures the SMBus address and LEDs when the
+ * OEM and LCD Write Enable bits are set in the NVM.
+ * When both NVM bits are cleared, SW will configure
+ * them instead.
*/
- data = er32(EXTCNF_CTRL);
- if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
+ data = er32(STRAP);
+ data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
+ reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
+ reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
+ reg_data);
+ if (ret_val)
goto out;
- cnf_size = er32(EXTCNF_SIZE);
- cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
- cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
- if (!cnf_size)
+ data = er32(LEDCTL);
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
+ (u16)data);
+ if (ret_val)
goto out;
+ }
- cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
- cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
-
- if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
- (hw->mac.type == e1000_pchlan)) {
- /*
- * HW configures the SMBus address and LEDs when the
- * OEM and LCD Write Enable bits are set in the NVM.
- * When both NVM bits are cleared, SW will configure
- * them instead.
- */
- data = er32(STRAP);
- data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
- reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
- reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
- ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
- reg_data);
- if (ret_val)
- goto out;
-
- data = er32(LEDCTL);
- ret_val = e1000_write_phy_reg_hv_locked(hw,
- HV_LED_CONFIG,
- (u16)data);
- if (ret_val)
- goto out;
- }
- /* Configure LCD from extended configuration region. */
+ /* Configure LCD from extended configuration region. */
- /* cnf_base_addr is in DWORD */
- word_addr = (u16)(cnf_base_addr << 1);
+ /* cnf_base_addr is in DWORD */
+ word_addr = (u16)(cnf_base_addr << 1);
- for (i = 0; i < cnf_size; i++) {
- ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1,
- &reg_data);
- if (ret_val)
- goto out;
+ for (i = 0; i < cnf_size; i++) {
+ ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1,
+ &reg_data);
+ if (ret_val)
+ goto out;
- ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
- 1, &reg_addr);
- if (ret_val)
- goto out;
+ ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
+ 1, &reg_addr);
+ if (ret_val)
+ goto out;
- /* Save off the PHY page for future writes. */
- if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
- phy_page = reg_data;
- continue;
- }
+ /* Save off the PHY page for future writes. */
+ if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
+ phy_page = reg_data;
+ continue;
+ }
- reg_addr &= PHY_REG_MASK;
- reg_addr |= phy_page;
+ reg_addr &= PHY_REG_MASK;
+ reg_addr |= phy_page;
- ret_val = phy->ops.write_reg_locked(hw,
- (u32)reg_addr,
- reg_data);
- if (ret_val)
- goto out;
- }
+ ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
+ reg_data);
+ if (ret_val)
+ goto out;
}
out:
@@ -1229,30 +1253,26 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
}
/**
- * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
+ * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
* @hw: pointer to the HW structure
- *
- * Resets the PHY
- * This is a function pointer entry point called by drivers
- * or other shared routines.
**/
-static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
+static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
{
s32 ret_val = 0;
u16 reg;
- ret_val = e1000e_phy_hw_reset_generic(hw);
- if (ret_val)
- return ret_val;
-
- /* Allow time for h/w to get to a quiescent state after reset */
- mdelay(10);
+ if (e1000_check_reset_block(hw))
+ goto out;
/* Perform any necessary post-reset workarounds */
- if (hw->mac.type == e1000_pchlan) {
+ switch (hw->mac.type) {
+ case e1000_pchlan:
ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
if (ret_val)
- return ret_val;
+ goto out;
+ break;
+ default:
+ break;
}
/* Dummy read to clear the phy wakeup bit after lcd reset */
@@ -1265,11 +1285,32 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
goto out;
/* Configure the LCD with the OEM bits in NVM */
- if (hw->mac.type == e1000_pchlan)
- ret_val = e1000_oem_bits_config_ich8lan(hw, true);
+ ret_val = e1000_oem_bits_config_ich8lan(hw, true);
out:
- return 0;
+ return ret_val;
+}
+
+/**
+ * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Resets the PHY
+ * This is a function pointer entry point called by drivers
+ * or other shared routines.
+ **/
+static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+
+ ret_val = e1000e_phy_hw_reset_generic(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_post_phy_reset_ich8lan(hw);
+
+out:
+ return ret_val;
}
/**
@@ -1622,7 +1663,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
/* Check if the flash descriptor is valid */
if (hsfsts.hsf_status.fldesvalid == 0) {
e_dbg("Flash descriptor invalid. "
- "SW Sequencing must be used.");
+ "SW Sequencing must be used.\n");
return -E1000_ERR_NVM;
}
@@ -1671,7 +1712,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
hsfsts.hsf_status.flcdone = 1;
ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
} else {
- e_dbg("Flash controller busy, cannot get access");
+ e_dbg("Flash controller busy, cannot get access\n");
}
}
@@ -1822,7 +1863,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
continue;
} else if (hsfsts.hsf_status.flcdone == 0) {
e_dbg("Timeout error - flash cycle "
- "did not complete.");
+ "did not complete.\n");
break;
}
}
@@ -1908,18 +1949,14 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
new_bank_offset = nvm->flash_bank_size;
old_bank_offset = 0;
ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
- if (ret_val) {
- nvm->ops.release(hw);
- goto out;
- }
+ if (ret_val)
+ goto release;
} else {
old_bank_offset = nvm->flash_bank_size;
new_bank_offset = 0;
ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
- if (ret_val) {
- nvm->ops.release(hw);
- goto out;
- }
+ if (ret_val)
+ goto release;
}
for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
@@ -1975,8 +2012,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
if (ret_val) {
/* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
e_dbg("Flash commit failed.\n");
- nvm->ops.release(hw);
- goto out;
+ goto release;
}
/*
@@ -1987,18 +2023,15 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
*/
act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
- if (ret_val) {
- nvm->ops.release(hw);
- goto out;
- }
+ if (ret_val)
+ goto release;
+
data &= 0xBFFF;
ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
act_offset * 2 + 1,
(u8)(data >> 8));
- if (ret_val) {
- nvm->ops.release(hw);
- goto out;
- }
+ if (ret_val)
+ goto release;
/*
* And invalidate the previously valid segment by setting
@@ -2008,10 +2041,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
*/
act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
- if (ret_val) {
- nvm->ops.release(hw);
- goto out;
- }
+ if (ret_val)
+ goto release;
/* Great! Everything worked, we can now clear the cached entries. */
for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
@@ -2019,14 +2050,17 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
dev_spec->shadow_ram[i].value = 0xFFFF;
}
+release:
nvm->ops.release(hw);
/*
* Reload the EEPROM, or else modifications will not appear
* until after the next adapter reset.
*/
- e1000e_reload_nvm(hw);
- msleep(10);
+ if (!ret_val) {
+ e1000e_reload_nvm(hw);
+ msleep(10);
+ }
out:
if (ret_val)
@@ -2487,9 +2521,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
* on the last TLP read/write transaction when MAC is reset.
*/
ret_val = e1000e_disable_pcie_master(hw);
- if (ret_val) {
+ if (ret_val)
e_dbg("PCI-E Master disable polling has failed.\n");
- }
e_dbg("Masking off all interrupts\n");
ew32(IMC, 0xffffffff);
@@ -2528,14 +2561,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
ctrl = er32(CTRL);
if (!e1000_check_reset_block(hw)) {
- /* Clear PHY Reset Asserted bit */
- if (hw->mac.type >= e1000_pchlan) {
- u32 status = er32(STATUS);
- ew32(STATUS, status & ~E1000_STATUS_PHYRA);
- }
-
/*
- * PHY HW reset requires MAC CORE reset at the same
+ * Full-chip reset requires MAC and PHY reset at the same
* time to make sure the interface between MAC and the
* external PHY is reset.
*/
@@ -2549,39 +2576,16 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
if (!ret_val)
e1000_release_swflag_ich8lan(hw);
- /* Perform any necessary post-reset workarounds */
- if (hw->mac.type == e1000_pchlan)
- ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
-
- if (ctrl & E1000_CTRL_PHY_RST)
+ if (ctrl & E1000_CTRL_PHY_RST) {
ret_val = hw->phy.ops.get_cfg_done(hw);
+ if (ret_val)
+ goto out;
- if (hw->mac.type >= e1000_ich10lan) {
- e1000_lan_init_done_ich8lan(hw);
- } else {
- ret_val = e1000e_get_auto_rd_done(hw);
- if (ret_val) {
- /*
- * When auto config read does not complete, do not
- * return with an error. This can happen in situations
- * where there is no eeprom and prevents getting link.
- */
- e_dbg("Auto Read Done did not complete\n");
- }
- }
- /* Dummy read to clear the phy wakeup bit after lcd reset */
- if (hw->mac.type == e1000_pchlan)
- e1e_rphy(hw, BM_WUC, &reg);
-
- ret_val = e1000_sw_lcd_config_ich8lan(hw);
- if (ret_val)
- goto out;
-
- if (hw->mac.type == e1000_pchlan) {
- ret_val = e1000_oem_bits_config_ich8lan(hw, true);
+ ret_val = e1000_post_phy_reset_ich8lan(hw);
if (ret_val)
goto out;
}
+
/*
* For PCH, this write will make sure that any noise
* will be detected as a CRC error and be dropped rather than show up
@@ -2748,8 +2752,6 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
reg = er32(RFCTL);
reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
ew32(RFCTL, reg);
-
- return;
}
/**
@@ -2799,6 +2801,8 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
ew32(FCTTV, hw->fc.pause_time);
if ((hw->phy.type == e1000_phy_82578) ||
(hw->phy.type == e1000_phy_82577)) {
+ ew32(FCRTV_PCH, hw->fc.refresh_time);
+
ret_val = hw->phy.ops.write_reg(hw,
PHY_REG(BM_PORT_CTRL_PAGE, 27),
hw->fc.pause_time);
@@ -3127,8 +3131,6 @@ void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
default:
break;
}
-
- return;
}
/**
@@ -3265,33 +3267,50 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
}
/**
- * e1000_get_cfg_done_ich8lan - Read config done bit
+ * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
* @hw: pointer to the HW structure
*
- * Read the management control register for the config done bit for
- * completion status. NOTE: silicon which is EEPROM-less will fail trying
- * to read the config done bit, so an error is *ONLY* logged and returns
- * 0. If we were to return with error, EEPROM-less silicon
- * would not be able to be reset or change link.
+ * Read appropriate register for the config done bit for completion status
+ * and configure the PHY through s/w for EEPROM-less parts.
+ *
+ * NOTE: some silicon which is EEPROM-less will fail trying to read the
+ * config done bit, so only an error is logged and continues. If we were
+ * to return with error, EEPROM-less silicon would not be able to be reset
+ * or change link.
**/
static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
{
+ s32 ret_val = 0;
u32 bank = 0;
+ u32 status;
- if (hw->mac.type >= e1000_pchlan) {
- u32 status = er32(STATUS);
+ e1000e_get_cfg_done(hw);
- if (status & E1000_STATUS_PHYRA)
- ew32(STATUS, status & ~E1000_STATUS_PHYRA);
- else
- e_dbg("PHY Reset Asserted not set - needs delay\n");
+ /* Wait for indication from h/w that it has completed basic config */
+ if (hw->mac.type >= e1000_ich10lan) {
+ e1000_lan_init_done_ich8lan(hw);
+ } else {
+ ret_val = e1000e_get_auto_rd_done(hw);
+ if (ret_val) {
+ /*
+ * When auto config read does not complete, do not
+ * return with an error. This can happen in situations
+ * where there is no eeprom and prevents getting link.
+ */
+ e_dbg("Auto Read Done did not complete\n");
+ ret_val = 0;
+ }
}
- e1000e_get_cfg_done(hw);
+ /* Clear PHY Reset Asserted bit */
+ status = er32(STATUS);
+ if (status & E1000_STATUS_PHYRA)
+ ew32(STATUS, status & ~E1000_STATUS_PHYRA);
+ else
+ e_dbg("PHY Reset Asserted not set - needs delay\n");
/* If EEPROM is not marked present, init the IGP 3 PHY manually */
- if ((hw->mac.type != e1000_ich10lan) &&
- (hw->mac.type != e1000_pchlan)) {
+ if (hw->mac.type <= e1000_ich9lan) {
if (((er32(EECD) & E1000_EECD_PRES) == 0) &&
(hw->phy.type == e1000_phy_igp_3)) {
e1000e_phy_init_script_igp3(hw);
@@ -3300,11 +3319,11 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
/* Maybe we should do a basic PHY config */
e_dbg("EEPROM not present\n");
- return -E1000_ERR_CONFIG;
+ ret_val = -E1000_ERR_CONFIG;
}
}
- return 0;
+ return ret_val;
}
/**
@@ -3320,8 +3339,6 @@ static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
if (!(hw->mac.ops.check_mng_mode(hw) ||
hw->phy.ops.check_reset_block(hw)))
e1000_power_down_phy_copper(hw);
-
- return;
}
/**
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index a8b2c0de27c..a968e3a416a 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1262,24 +1262,21 @@ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *dup
u32 status;
status = er32(STATUS);
- if (status & E1000_STATUS_SPEED_1000) {
+ if (status & E1000_STATUS_SPEED_1000)
*speed = SPEED_1000;
- e_dbg("1000 Mbs, ");
- } else if (status & E1000_STATUS_SPEED_100) {
+ else if (status & E1000_STATUS_SPEED_100)
*speed = SPEED_100;
- e_dbg("100 Mbs, ");
- } else {
+ else
*speed = SPEED_10;
- e_dbg("10 Mbs, ");
- }
- if (status & E1000_STATUS_FD) {
+ if (status & E1000_STATUS_FD)
*duplex = FULL_DUPLEX;
- e_dbg("Full Duplex\n");
- } else {
+ else
*duplex = HALF_DUPLEX;
- e_dbg("Half Duplex\n");
- }
+
+ e_dbg("%u Mbps, %s Duplex\n",
+ *speed == SPEED_1000 ? 1000 : *speed == SPEED_100 ? 100 : 10,
+ *duplex == FULL_DUPLEX ? "Full" : "Half");
return 0;
}
@@ -2275,6 +2272,11 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
u32 hicr;
u8 i;
+ if (!(hw->mac.arc_subsystem_valid)) {
+ e_dbg("ARC subsystem not valid.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
/* Check that the host interface is enabled. */
hicr = er32(HICR);
if ((hicr & E1000_HICR_EN) == 0) {
@@ -2518,10 +2520,11 @@ s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
}
/**
- * e1000e_enable_mng_pass_thru - Enable processing of ARP's
+ * e1000e_enable_mng_pass_thru - Check if management passthrough is needed
* @hw: pointer to the HW structure
*
- * Verifies the hardware needs to allow ARPs to be processed by the host.
+ * Verifies the hardware needs to leave interface enabled so that frames can
+ * be directed to and from the management interface.
**/
bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
{
@@ -2531,11 +2534,10 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
manc = er32(MANC);
- if (!(manc & E1000_MANC_RCV_TCO_EN) ||
- !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
- return ret_val;
+ if (!(manc & E1000_MANC_RCV_TCO_EN))
+ goto out;
- if (hw->mac.arc_subsystem_valid) {
+ if (hw->mac.has_fwsm) {
fwsm = er32(FWSM);
factps = er32(FACTPS);
@@ -2543,16 +2545,28 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
((fwsm & E1000_FWSM_MODE_MASK) ==
(e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
ret_val = true;
- return ret_val;
+ goto out;
}
- } else {
- if ((manc & E1000_MANC_SMBUS_EN) &&
- !(manc & E1000_MANC_ASF_EN)) {
+ } else if ((hw->mac.type == e1000_82574) ||
+ (hw->mac.type == e1000_82583)) {
+ u16 data;
+
+ factps = er32(FACTPS);
+ e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+
+ if (!(factps & E1000_FACTPS_MNGCG) &&
+ ((data & E1000_NVM_INIT_CTRL2_MNGM) ==
+ (e1000_mng_mode_pt << 13))) {
ret_val = true;
- return ret_val;
+ goto out;
}
+ } else if ((manc & E1000_MANC_SMBUS_EN) &&
+ !(manc & E1000_MANC_ASF_EN)) {
+ ret_val = true;
+ goto out;
}
+out:
return ret_val;
}
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index dbf81788bb4..24507f3b8b1 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -26,6 +26,8 @@
*******************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -45,11 +47,12 @@
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/pm_qos_params.h>
+#include <linux/pm_runtime.h>
#include <linux/aer.h>
#include "e1000.h"
-#define DRV_VERSION "1.0.2-k2"
+#define DRV_VERSION "1.0.2-k4"
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -66,6 +69,361 @@ static const struct e1000_info *e1000_info_tbl[] = {
[board_pchlan] = &e1000_pch_info,
};
+struct e1000_reg_info {
+ u32 ofs;
+ char *name;
+};
+
+#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
+#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
+#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
+#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
+#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
+
+#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
+#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
+
+static const struct e1000_reg_info e1000_reg_info_tbl[] = {
+
+ /* General Registers */
+ {E1000_CTRL, "CTRL"},
+ {E1000_STATUS, "STATUS"},
+ {E1000_CTRL_EXT, "CTRL_EXT"},
+
+ /* Interrupt Registers */
+ {E1000_ICR, "ICR"},
+
+ /* RX Registers */
+ {E1000_RCTL, "RCTL"},
+ {E1000_RDLEN, "RDLEN"},
+ {E1000_RDH, "RDH"},
+ {E1000_RDT, "RDT"},
+ {E1000_RDTR, "RDTR"},
+ {E1000_RXDCTL(0), "RXDCTL"},
+ {E1000_ERT, "ERT"},
+ {E1000_RDBAL, "RDBAL"},
+ {E1000_RDBAH, "RDBAH"},
+ {E1000_RDFH, "RDFH"},
+ {E1000_RDFT, "RDFT"},
+ {E1000_RDFHS, "RDFHS"},
+ {E1000_RDFTS, "RDFTS"},
+ {E1000_RDFPC, "RDFPC"},
+
+ /* TX Registers */
+ {E1000_TCTL, "TCTL"},
+ {E1000_TDBAL, "TDBAL"},
+ {E1000_TDBAH, "TDBAH"},
+ {E1000_TDLEN, "TDLEN"},
+ {E1000_TDH, "TDH"},
+ {E1000_TDT, "TDT"},
+ {E1000_TIDV, "TIDV"},
+ {E1000_TXDCTL(0), "TXDCTL"},
+ {E1000_TADV, "TADV"},
+ {E1000_TARC(0), "TARC"},
+ {E1000_TDFH, "TDFH"},
+ {E1000_TDFT, "TDFT"},
+ {E1000_TDFHS, "TDFHS"},
+ {E1000_TDFTS, "TDFTS"},
+ {E1000_TDFPC, "TDFPC"},
+
+ /* List Terminator */
+ {}
+};
+
+/*
+ * e1000_regdump - register printout routine
+ */
+static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
+{
+ int n = 0;
+ char rname[16];
+ u32 regs[8];
+
+ switch (reginfo->ofs) {
+ case E1000_RXDCTL(0):
+ for (n = 0; n < 2; n++)
+ regs[n] = __er32(hw, E1000_RXDCTL(n));
+ break;
+ case E1000_TXDCTL(0):
+ for (n = 0; n < 2; n++)
+ regs[n] = __er32(hw, E1000_TXDCTL(n));
+ break;
+ case E1000_TARC(0):
+ for (n = 0; n < 2; n++)
+ regs[n] = __er32(hw, E1000_TARC(n));
+ break;
+ default:
+ printk(KERN_INFO "%-15s %08x\n",
+ reginfo->name, __er32(hw, reginfo->ofs));
+ return;
+ }
+
+ snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
+ printk(KERN_INFO "%-15s ", rname);
+ for (n = 0; n < 2; n++)
+ printk(KERN_CONT "%08x ", regs[n]);
+ printk(KERN_CONT "\n");
+}
+
+
+/*
+ * e1000e_dump - Print registers, tx-ring and rx-ring
+ */
+static void e1000e_dump(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_reg_info *reginfo;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_tx_desc *tx_desc;
+ struct my_u0 { u64 a; u64 b; } *u0;
+ struct e1000_buffer *buffer_info;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ union e1000_rx_desc_packet_split *rx_desc_ps;
+ struct e1000_rx_desc *rx_desc;
+ struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1;
+ u32 staterr;
+ int i = 0;
+
+ if (!netif_msg_hw(adapter))
+ return;
+
+ /* Print netdevice Info */
+ if (netdev) {
+ dev_info(&adapter->pdev->dev, "Net device Info\n");
+ printk(KERN_INFO "Device Name state "
+ "trans_start last_rx\n");
+ printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
+ netdev->name,
+ netdev->state,
+ netdev->trans_start,
+ netdev->last_rx);
+ }
+
+ /* Print Registers */
+ dev_info(&adapter->pdev->dev, "Register Dump\n");
+ printk(KERN_INFO " Register Name Value\n");
+ for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
+ reginfo->name; reginfo++) {
+ e1000_regdump(hw, reginfo);
+ }
+
+ /* Print TX Ring Summary */
+ if (!netdev || !netif_running(netdev))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
+ " leng ntw timestamp\n");
+ buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
+ printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
+ 0, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (u64)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp);
+
+ /* Print TX Rings */
+ if (!netif_msg_tx_done(adapter))
+ goto rx_ring_summary;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+
+ /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
+ *
+ * Legacy Transmit Descriptor
+ * +--------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] (Reserved on Write Back) |
+ * +--------------------------------------------------------------+
+ * 8 | Special | CSS | Status | CMD | CSO | Length |
+ * +--------------------------------------------------------------+
+ * 63 48 47 36 35 32 31 24 23 16 15 0
+ *
+ * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
+ * 63 48 47 40 39 32 31 16 15 8 7 0
+ * +----------------------------------------------------------------+
+ * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
+ * +----------------------------------------------------------------+
+ * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
+ * +----------------------------------------------------------------+
+ * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
+ *
+ * Extended Data Descriptor (DTYP=0x1)
+ * +----------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +----------------------------------------------------------------+
+ * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
+ * +----------------------------------------------------------------+
+ * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
+ */
+ printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]"
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Legacy format\n");
+ printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Ext Context format\n");
+ printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]"
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Ext Data format\n");
+ for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ tx_desc = E1000_TX_DESC(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+ u0 = (struct my_u0 *)tx_desc;
+ printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX "
+ "%04X %3X %016llX %p",
+ (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' :
+ ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i,
+ le64_to_cpu(u0->a), le64_to_cpu(u0->b),
+ (u64)buffer_info->dma, buffer_info->length,
+ buffer_info->next_to_watch, (u64)buffer_info->time_stamp,
+ buffer_info->skb);
+ if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC/U\n");
+ else if (i == tx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
+ 16, 1, phys_to_virt(buffer_info->dma),
+ buffer_info->length, true);
+ }
+
+ /* Print RX Rings Summary */
+rx_ring_summary:
+ dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC]\n");
+ printk(KERN_INFO " %5d %5X %5X\n", 0,
+ rx_ring->next_to_use, rx_ring->next_to_clean);
+
+ /* Print RX Rings */
+ if (!netif_msg_rx_status(adapter))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+ switch (adapter->rx_ps_pages) {
+ case 1:
+ case 2:
+ case 3:
+ /* [Extended] Packet Split Receive Descriptor Format
+ *
+ * +-----------------------------------------------------+
+ * 0 | Buffer Address 0 [63:0] |
+ * +-----------------------------------------------------+
+ * 8 | Buffer Address 1 [63:0] |
+ * +-----------------------------------------------------+
+ * 16 | Buffer Address 2 [63:0] |
+ * +-----------------------------------------------------+
+ * 24 | Buffer Address 3 [63:0] |
+ * +-----------------------------------------------------+
+ */
+ printk(KERN_INFO "R [desc] [buffer 0 63:0 ] "
+ "[buffer 1 63:0 ] "
+ "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] "
+ "[bi->skb] <-- Ext Pkt Split format\n");
+ /* [Extended] Receive Descriptor (Write-Back) Format
+ *
+ * 63 48 47 32 31 13 12 8 7 4 3 0
+ * +------------------------------------------------------+
+ * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
+ * | Checksum | Ident | | Queue | | Type |
+ * +------------------------------------------------------+
+ * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+ * +------------------------------------------------------+
+ * 63 48 47 32 31 20 19 0
+ */
+ printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] "
+ "[vl l0 ee es] "
+ "[ l3 l2 l1 hs] [reserved ] ---------------- "
+ "[bi->skb] <-- Ext Rx Write-Back format\n");
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
+ u1 = (struct my_u1 *)rx_desc_ps;
+ staterr =
+ le32_to_cpu(rx_desc_ps->wb.middle.status_error);
+ if (staterr & E1000_RXD_STAT_DD) {
+ /* Descriptor Done */
+ printk(KERN_INFO "RWB[0x%03X] %016llX "
+ "%016llX %016llX %016llX "
+ "---------------- %p", i,
+ le64_to_cpu(u1->a),
+ le64_to_cpu(u1->b),
+ le64_to_cpu(u1->c),
+ le64_to_cpu(u1->d),
+ buffer_info->skb);
+ } else {
+ printk(KERN_INFO "R [0x%03X] %016llX "
+ "%016llX %016llX %016llX %016llX %p", i,
+ le64_to_cpu(u1->a),
+ le64_to_cpu(u1->b),
+ le64_to_cpu(u1->c),
+ le64_to_cpu(u1->d),
+ (u64)buffer_info->dma,
+ buffer_info->skb);
+
+ if (netif_msg_pktdata(adapter))
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16, 1,
+ phys_to_virt(buffer_info->dma),
+ adapter->rx_ps_bsize0, true);
+ }
+
+ if (i == rx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == rx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+ }
+ break;
+ default:
+ case 0:
+ /* Legacy Receive Descriptor Format
+ *
+ * +-----------------------------------------------------+
+ * | Buffer Address [63:0] |
+ * +-----------------------------------------------------+
+ * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
+ * +-----------------------------------------------------+
+ * 63 48 47 40 39 32 31 16 15 0
+ */
+ printk(KERN_INFO "Rl[desc] [address 63:0 ] "
+ "[vl er S cks ln] [bi->dma ] [bi->skb] "
+ "<-- Legacy format\n");
+ for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
+ rx_desc = E1000_RX_DESC(*rx_ring, i);
+ buffer_info = &rx_ring->buffer_info[i];
+ u0 = (struct my_u0 *)rx_desc;
+ printk(KERN_INFO "Rl[0x%03X] %016llX %016llX "
+ "%016llX %p",
+ i, le64_to_cpu(u0->a), le64_to_cpu(u0->b),
+ (u64)buffer_info->dma, buffer_info->skb);
+ if (i == rx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == rx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ if (netif_msg_pktdata(adapter))
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1, phys_to_virt(buffer_info->dma),
+ adapter->rx_buffer_len, true);
+ }
+ }
+
+exit:
+ return;
+}
+
/**
* e1000_desc_unused - calculate if we have unused descriptors
**/
@@ -178,10 +536,10 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
buffer_info->skb = skb;
map_skb:
- buffer_info->dma = pci_map_single(pdev, skb->data,
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
dev_err(&pdev->dev, "RX DMA map failed\n");
adapter->rx_dma_failed++;
break;
@@ -190,26 +548,23 @@ map_skb:
rx_desc = E1000_RX_DESC(*rx_ring, i);
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+ if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i, adapter->hw.hw_addr + rx_ring->tail);
+ }
i++;
if (i == rx_ring->count)
i = 0;
buffer_info = &rx_ring->buffer_info[i];
}
- if (rx_ring->next_to_use != i) {
- rx_ring->next_to_use = i;
- if (i-- == 0)
- i = (rx_ring->count - 1);
-
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- writel(i, adapter->hw.hw_addr + rx_ring->tail);
- }
+ rx_ring->next_to_use = i;
}
/**
@@ -247,11 +602,12 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
adapter->alloc_rx_buff_failed++;
goto no_buffers;
}
- ps_page->dma = pci_map_page(pdev,
- ps_page->page,
- 0, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, ps_page->dma)) {
+ ps_page->dma = dma_map_page(&pdev->dev,
+ ps_page->page,
+ 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ ps_page->dma)) {
dev_err(&adapter->pdev->dev,
"RX DMA page map failed\n");
adapter->rx_dma_failed++;
@@ -276,10 +632,10 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
}
buffer_info->skb = skb;
- buffer_info->dma = pci_map_single(pdev, skb->data,
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
adapter->rx_ps_bsize0,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
dev_err(&pdev->dev, "RX DMA map failed\n");
adapter->rx_dma_failed++;
/* cleanup skb */
@@ -290,6 +646,17 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
+ if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
+ }
+
i++;
if (i == rx_ring->count)
i = 0;
@@ -297,26 +664,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
}
no_buffers:
- if (rx_ring->next_to_use != i) {
- rx_ring->next_to_use = i;
-
- if (!(i--))
- i = (rx_ring->count - 1);
-
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- /*
- * Hardware increments by 16 bytes, but packet split
- * descriptors are 32 bytes...so we increment tail
- * twice as much.
- */
- writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
- }
+ rx_ring->next_to_use = i;
}
/**
@@ -366,10 +714,10 @@ check_page:
}
if (!buffer_info->dma)
- buffer_info->dma = pci_map_page(pdev,
+ buffer_info->dma = dma_map_page(&pdev->dev,
buffer_info->page, 0,
PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_desc = E1000_RX_DESC(*rx_ring, i);
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -443,10 +791,10 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
cleaned = 1;
cleaned_count++;
- pci_unmap_single(pdev,
+ dma_unmap_single(&pdev->dev,
buffer_info->dma,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
@@ -547,12 +895,11 @@ static void e1000_put_txbuf(struct e1000_adapter *adapter,
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev, buffer_info->dma,
- buffer_info->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
buffer_info->dma = 0;
}
if (buffer_info->skb) {
@@ -643,14 +990,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
cleaned = (i == eop);
if (cleaned) {
- struct sk_buff *skb = buffer_info->skb;
- unsigned int segs, bytecount;
- segs = skb_shinfo(skb)->gso_segs ?: 1;
- /* multiply data chunks by size of headers */
- bytecount = ((segs - 1) * skb_headlen(skb)) +
- skb->len;
- total_tx_packets += segs;
- total_tx_bytes += bytecount;
+ total_tx_packets += buffer_info->segs;
+ total_tx_bytes += buffer_info->bytecount;
}
e1000_put_txbuf(adapter, buffer_info);
@@ -753,9 +1094,9 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
cleaned = 1;
cleaned_count++;
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_bsize0,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
/* see !EOP comment in other rx routine */
@@ -811,13 +1152,13 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
* kmap_atomic, so we can't hold the mapping
* very long
*/
- pci_dma_sync_single_for_cpu(pdev, ps_page->dma,
- PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&pdev->dev, ps_page->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
memcpy(skb_tail_pointer(skb), vaddr, l1);
kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
- pci_dma_sync_single_for_device(pdev, ps_page->dma,
- PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&pdev->dev, ps_page->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
/* remove the CRC */
if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
@@ -834,8 +1175,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
break;
ps_page = &buffer_info->ps_pages[j];
- pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
ps_page->dma = 0;
skb_fill_page_desc(skb, j, ps_page->page, 0, length);
ps_page->page = NULL;
@@ -953,8 +1294,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
cleaned = true;
cleaned_count++;
- pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
@@ -1090,17 +1431,17 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma) {
if (adapter->clean_rx == e1000_clean_rx_irq)
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
- pci_unmap_page(pdev, buffer_info->dma,
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_bsize0,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
}
@@ -1118,8 +1459,8 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
ps_page = &buffer_info->ps_pages[j];
if (!ps_page->page)
break;
- pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
ps_page->dma = 0;
put_page(ps_page->page);
ps_page->page = NULL;
@@ -1426,8 +1767,6 @@ void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
pci_disable_msi(adapter->pdev);
adapter->flags &= ~FLAG_MSI_ENABLED;
}
-
- return;
}
/**
@@ -1479,8 +1818,6 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
/* Don't do anything; this is the system default */
break;
}
-
- return;
}
/**
@@ -2185,10 +2522,10 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter)
}
}
-static void e1000_init_manageability(struct e1000_adapter *adapter)
+static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- u32 manc, manc2h;
+ u32 manc, manc2h, mdef, i, j;
if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
return;
@@ -2202,10 +2539,49 @@ static void e1000_init_manageability(struct e1000_adapter *adapter)
*/
manc |= E1000_MANC_EN_MNG2HOST;
manc2h = er32(MANC2H);
-#define E1000_MNG2HOST_PORT_623 (1 << 5)
-#define E1000_MNG2HOST_PORT_664 (1 << 6)
- manc2h |= E1000_MNG2HOST_PORT_623;
- manc2h |= E1000_MNG2HOST_PORT_664;
+
+ switch (hw->mac.type) {
+ default:
+ manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ /*
+ * Check if IPMI pass-through decision filter already exists;
+ * if so, enable it.
+ */
+ for (i = 0, j = 0; i < 8; i++) {
+ mdef = er32(MDEF(i));
+
+ /* Ignore filters with anything other than IPMI ports */
+ if (mdef & !(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
+ continue;
+
+ /* Enable this decision filter in MANC2H */
+ if (mdef)
+ manc2h |= (1 << i);
+
+ j |= mdef;
+ }
+
+ if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
+ break;
+
+ /* Create new decision filter in an empty filter */
+ for (i = 0, j = 0; i < 8; i++)
+ if (er32(MDEF(i)) == 0) {
+ ew32(MDEF(i), (E1000_MDEF_PORT_623 |
+ E1000_MDEF_PORT_664));
+ manc2h |= (1 << 1);
+ j++;
+ break;
+ }
+
+ if (!j)
+ e_warn("Unable to create IPMI pass-through filter\n");
+ break;
+ }
+
ew32(MANC2H, manc2h);
ew32(MANC, manc);
}
@@ -2524,12 +2900,12 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
* excessive C-state transition latencies result in
* dropped transactions.
*/
- pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
- adapter->netdev->name, 55);
+ pm_qos_update_request(
+ adapter->netdev->pm_qos_req, 55);
} else {
- pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
- adapter->netdev->name,
- PM_QOS_DEFAULT_VALUE);
+ pm_qos_update_request(
+ adapter->netdev->pm_qos_req,
+ PM_QOS_DEFAULT_VALUE);
}
}
@@ -2565,7 +2941,7 @@ static void e1000_set_multi(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u8 *mta_list;
u32 rctl;
int i;
@@ -2597,9 +2973,8 @@ static void e1000_set_multi(struct net_device *netdev)
/* prepare a packed array of only addresses. */
i = 0;
- netdev_for_each_mc_addr(mc_ptr, netdev)
- memcpy(mta_list + (i++ * ETH_ALEN),
- mc_ptr->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
e1000_update_mc_addr_list(hw, mta_list, i);
kfree(mta_list);
@@ -2621,7 +2996,7 @@ static void e1000_configure(struct e1000_adapter *adapter)
e1000_set_multi(adapter->netdev);
e1000_restore_vlan(adapter);
- e1000_init_manageability(adapter);
+ e1000_init_manageability_pt(adapter);
e1000_configure_tx(adapter);
e1000_setup_rctl(adapter);
@@ -2755,6 +3130,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
fc->high_water = 0x5000;
fc->low_water = 0x3000;
}
+ fc->refresh_time = 0x1000;
} else {
if ((adapter->flags & FLAG_HAS_ERT) &&
(adapter->netdev->mtu > ETH_DATA_LEN))
@@ -2792,10 +3168,6 @@ void e1000e_reset(struct e1000_adapter *adapter)
if (mac->ops.init_hw(hw))
e_err("Hardware Error\n");
- /* additional part of the flow-control workaround above */
- if (hw->mac.type == e1000_pchlan)
- ew32(FCRTV_PCH, 0x1000);
-
e1000_update_mng_vlan(adapter);
/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
@@ -2824,8 +3196,8 @@ int e1000e_up(struct e1000_adapter *adapter)
/* DMA latency requirement to workaround early-receive/jumbo issue */
if (adapter->flags & FLAG_HAS_ERT)
- pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY,
- adapter->netdev->name,
+ adapter->netdev->pm_qos_req =
+ pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
/* hardware has been reset, we need to reload some things */
@@ -2841,7 +3213,11 @@ int e1000e_up(struct e1000_adapter *adapter)
netif_wake_queue(adapter->netdev);
/* fire a link change interrupt to start the watchdog */
- ew32(ICS, E1000_ICS_LSC);
+ if (adapter->msix_entries)
+ ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
+ else
+ ew32(ICS, E1000_ICS_LSC);
+
return 0;
}
@@ -2887,9 +3263,11 @@ void e1000e_down(struct e1000_adapter *adapter)
e1000_clean_tx_ring(adapter);
e1000_clean_rx_ring(adapter);
- if (adapter->flags & FLAG_HAS_ERT)
- pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY,
- adapter->netdev->name);
+ if (adapter->flags & FLAG_HAS_ERT) {
+ pm_qos_remove_request(
+ adapter->netdev->pm_qos_req);
+ adapter->netdev->pm_qos_req = NULL;
+ }
/*
* TODO: for power management, we could drop the link and
@@ -3083,12 +3461,15 @@ static int e1000_open(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
int err;
/* disallow open during test */
if (test_bit(__E1000_TESTING, &adapter->state))
return -EBUSY;
+ pm_runtime_get_sync(&pdev->dev);
+
netif_carrier_off(netdev);
/* allocate transmit descriptors */
@@ -3101,6 +3482,15 @@ static int e1000_open(struct net_device *netdev)
if (err)
goto err_setup_rx;
+ /*
+ * If AMT is enabled, let the firmware know that the network
+ * interface is now open and reset the part to a known state.
+ */
+ if (adapter->flags & FLAG_HAS_AMT) {
+ e1000_get_hw_control(adapter);
+ e1000e_reset(adapter);
+ }
+
e1000e_power_up_phy(adapter);
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
@@ -3109,13 +3499,6 @@ static int e1000_open(struct net_device *netdev)
e1000_update_mng_vlan(adapter);
/*
- * If AMT is enabled, let the firmware know that the network
- * interface is now open
- */
- if (adapter->flags & FLAG_HAS_AMT)
- e1000_get_hw_control(adapter);
-
- /*
* before we allocate an interrupt, we must be ready to handle it.
* Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
* as soon as we call pci_request_irq, so we have to setup our
@@ -3149,8 +3532,14 @@ static int e1000_open(struct net_device *netdev)
netif_start_queue(netdev);
+ adapter->idle_check = true;
+ pm_runtime_put(&pdev->dev);
+
/* fire a link status change interrupt to start the watchdog */
- ew32(ICS, E1000_ICS_LSC);
+ if (adapter->msix_entries)
+ ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
+ else
+ ew32(ICS, E1000_ICS_LSC);
return 0;
@@ -3162,6 +3551,7 @@ err_setup_rx:
e1000e_free_tx_resources(adapter);
err_setup_tx:
e1000e_reset(adapter);
+ pm_runtime_put_sync(&pdev->dev);
return err;
}
@@ -3180,11 +3570,17 @@ err_setup_tx:
static int e1000_close(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
- e1000e_down(adapter);
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ if (!test_bit(__E1000_DOWN, &adapter->state)) {
+ e1000e_down(adapter);
+ e1000_free_irq(adapter);
+ }
e1000_power_down_phy(adapter);
- e1000_free_irq(adapter);
e1000e_free_tx_resources(adapter);
e1000e_free_rx_resources(adapter);
@@ -3206,6 +3602,8 @@ static int e1000_close(struct net_device *netdev)
if (adapter->flags & FLAG_HAS_AMT)
e1000_release_hw_control(adapter);
+ pm_runtime_put_sync(&pdev->dev);
+
return 0;
}
/**
@@ -3550,6 +3948,9 @@ static void e1000_watchdog_task(struct work_struct *work)
link = e1000e_has_link(adapter);
if ((netif_carrier_ok(netdev)) && link) {
+ /* Cancel scheduled suspend requests. */
+ pm_runtime_resume(netdev->dev.parent);
+
e1000e_enable_receives(adapter);
goto link_up;
}
@@ -3561,6 +3962,10 @@ static void e1000_watchdog_task(struct work_struct *work)
if (link) {
if (!netif_carrier_ok(netdev)) {
bool txb2b = 1;
+
+ /* Cancel scheduled suspend requests. */
+ pm_runtime_resume(netdev->dev.parent);
+
/* update snapshot of PHY registers on LSC */
e1000_phy_read_status(adapter);
mac->ops.get_link_up_info(&adapter->hw,
@@ -3670,6 +4075,9 @@ static void e1000_watchdog_task(struct work_struct *work)
if (adapter->flags & FLAG_RX_NEEDS_RESTART)
schedule_work(&adapter->reset_task);
+ else
+ pm_schedule_suspend(netdev->dev.parent,
+ LINK_TIMEOUT);
}
}
@@ -3705,6 +4113,22 @@ link_up:
}
}
+ /* Simple mode for Interrupt Throttle Rate (ITR) */
+ if (adapter->itr_setting == 4) {
+ /*
+ * Symmetric Tx/Rx gets a reduced ITR=2000;
+ * Total asymmetrical Tx or Rx gets ITR=8000;
+ * everyone else is between 2000-8000.
+ */
+ u32 goc = (adapter->gotc + adapter->gorc) / 10000;
+ u32 dif = (adapter->gotc > adapter->gorc ?
+ adapter->gotc - adapter->gorc :
+ adapter->gorc - adapter->gotc) / 10000;
+ u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
+
+ ew32(ITR, 1000000000 / (itr * 256));
+ }
+
/* Cause software interrupt to ensure Rx ring is cleaned */
if (adapter->msix_entries)
ew32(ICS, adapter->rx_ring->ims_val);
@@ -3879,7 +4303,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
struct e1000_buffer *buffer_info;
unsigned int len = skb_headlen(skb);
unsigned int offset = 0, size, count = 0, i;
- unsigned int f;
+ unsigned int f, bytecount, segs;
i = tx_ring->next_to_use;
@@ -3890,10 +4314,11 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info->length = size;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
- buffer_info->dma = pci_map_single(pdev, skb->data + offset,
- size, PCI_DMA_TODEVICE);
+ buffer_info->dma = dma_map_single(&pdev->dev,
+ skb->data + offset,
+ size, DMA_TO_DEVICE);
buffer_info->mapped_as_page = false;
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
len -= size;
@@ -3925,11 +4350,11 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info->length = size;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
- buffer_info->dma = pci_map_page(pdev, frag->page,
+ buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
offset, size,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
buffer_info->mapped_as_page = true;
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
len -= size;
@@ -3938,7 +4363,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
}
}
+ segs = skb_shinfo(skb)->gso_segs ?: 1;
+ /* multiply data chunks by size of headers */
+ bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
+
tx_ring->buffer_info[i].skb = skb;
+ tx_ring->buffer_info[i].segs = segs;
+ tx_ring->buffer_info[i].bytecount = bytecount;
tx_ring->buffer_info[first].next_to_watch = i;
return count;
@@ -4105,7 +4536,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
unsigned int max_per_txd = E1000_MAX_PER_TXD;
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
unsigned int tx_flags = 0;
- unsigned int len = skb->len - skb->data_len;
+ unsigned int len = skb_headlen(skb);
unsigned int nr_frags;
unsigned int mss;
int count = 0;
@@ -4155,7 +4586,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
- len = skb->len - skb->data_len;
+ len = skb_headlen(skb);
}
}
@@ -4241,6 +4672,8 @@ static void e1000_reset_task(struct work_struct *work)
struct e1000_adapter *adapter;
adapter = container_of(work, struct e1000_adapter, reset_task);
+ e1000e_dump(adapter);
+ e_err("Reset adapter\n");
e1000e_reinit_locked(adapter);
}
@@ -4475,13 +4908,15 @@ out:
return retval;
}
-static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
+static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
+ bool runtime)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, ctrl_ext, rctl, status;
- u32 wufc = adapter->wol;
+ /* Runtime suspend should only enable wakeup for link changes */
+ u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
int retval = 0;
netif_device_detach(netdev);
@@ -4651,20 +5086,13 @@ void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
__e1000e_disable_aspm(pdev, state);
}
-#ifdef CONFIG_PM
-static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
+#ifdef CONFIG_PM_OPS
+static bool e1000e_pm_ready(struct e1000_adapter *adapter)
{
- int retval;
- bool wake;
-
- retval = __e1000_shutdown(pdev, &wake);
- if (!retval)
- e1000_complete_shutdown(pdev, true, wake);
-
- return retval;
+ return !!adapter->tx_ring->buffer_info;
}
-static int e1000_resume(struct pci_dev *pdev)
+static int __e1000_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -4677,18 +5105,6 @@ static int e1000_resume(struct pci_dev *pdev)
if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
- err = pci_enable_device_mem(pdev);
- if (err) {
- dev_err(&pdev->dev,
- "Cannot enable PCI device from suspend\n");
- return err;
- }
-
- pci_set_master(pdev);
-
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
-
e1000e_set_interrupt_capability(adapter);
if (netif_running(netdev)) {
err = e1000_request_irq(adapter);
@@ -4729,7 +5145,7 @@ static int e1000_resume(struct pci_dev *pdev)
e1000e_reset(adapter);
- e1000_init_manageability(adapter);
+ e1000_init_manageability_pt(adapter);
if (netif_running(netdev))
e1000e_up(adapter);
@@ -4746,13 +5162,88 @@ static int e1000_resume(struct pci_dev *pdev)
return 0;
}
-#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int e1000_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int retval;
+ bool wake;
+
+ retval = __e1000_shutdown(pdev, &wake, false);
+ if (!retval)
+ e1000_complete_shutdown(pdev, true, wake);
+
+ return retval;
+}
+
+static int e1000_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (e1000e_pm_ready(adapter))
+ adapter->idle_check = true;
+
+ return __e1000_resume(pdev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM_RUNTIME
+static int e1000_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (e1000e_pm_ready(adapter)) {
+ bool wake;
+
+ __e1000_shutdown(pdev, &wake, true);
+ }
+
+ return 0;
+}
+
+static int e1000_idle(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (!e1000e_pm_ready(adapter))
+ return 0;
+
+ if (adapter->idle_check) {
+ adapter->idle_check = false;
+ if (!e1000e_has_link(adapter))
+ pm_schedule_suspend(dev, MSEC_PER_SEC);
+ }
+
+ return -EBUSY;
+}
+
+static int e1000_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (!e1000e_pm_ready(adapter))
+ return 0;
+
+ adapter->idle_check = !dev->power.runtime_auto;
+ return __e1000_resume(pdev);
+}
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM_OPS */
static void e1000_shutdown(struct pci_dev *pdev)
{
bool wake = false;
- __e1000_shutdown(pdev, &wake);
+ __e1000_shutdown(pdev, &wake, false);
if (system_state == SYSTEM_POWER_OFF)
e1000_complete_shutdown(pdev, false, wake);
@@ -4826,8 +5317,8 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_DISCONNECT;
} else {
pci_set_master(pdev);
+ pdev->state_saved = true;
pci_restore_state(pdev);
- pci_save_state(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -4855,7 +5346,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
- e1000_init_manageability(adapter);
+ e1000_init_manageability_pt(adapter);
if (netif_running(netdev)) {
if (e1000e_up(adapter)) {
@@ -4968,16 +5459,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
return err;
pci_using_dac = 0;
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err)
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");
@@ -5008,6 +5499,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
SET_NETDEV_DEV(netdev, &pdev->dev);
+ netdev->irq = pdev->irq;
+
pci_set_drvdata(pdev, netdev);
adapter = netdev_priv(netdev);
hw = &adapter->hw;
@@ -5228,6 +5721,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
e1000_print_device_info(adapter);
+ if (pci_dev_run_wake(pdev)) {
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ }
+ pm_schedule_suspend(&pdev->dev, MSEC_PER_SEC);
+
return 0;
err_register:
@@ -5270,12 +5769,16 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
+ bool down = test_bit(__E1000_DOWN, &adapter->state);
+
+ pm_runtime_get_sync(&pdev->dev);
/*
* flush_scheduled work may reschedule our watchdog task, so
* explicitly disable watchdog tasks from being rescheduled
*/
- set_bit(__E1000_DOWN, &adapter->state);
+ if (!down)
+ set_bit(__E1000_DOWN, &adapter->state);
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
@@ -5289,8 +5792,17 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
if (!(netdev->flags & IFF_UP))
e1000_power_down_phy(adapter);
+ /* Don't lie to e1000_close() down the road. */
+ if (!down)
+ clear_bit(__E1000_DOWN, &adapter->state);
unregister_netdev(netdev);
+ if (pci_dev_run_wake(pdev)) {
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ }
+ pm_runtime_put_noidle(&pdev->dev);
+
/*
* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
@@ -5380,6 +5892,7 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
@@ -5390,16 +5903,22 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
};
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
+#ifdef CONFIG_PM_OPS
+static const struct dev_pm_ops e1000_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
+ SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
+ e1000_runtime_resume, e1000_idle)
+};
+#endif
+
/* PCI Device API Driver */
static struct pci_driver e1000_driver = {
.name = e1000e_driver_name,
.id_table = e1000_pci_tbl,
.probe = e1000_probe,
.remove = __devexit_p(e1000_remove),
-#ifdef CONFIG_PM
- /* Power Management Hooks */
- .suspend = e1000_suspend,
- .resume = e1000_resume,
+#ifdef CONFIG_PM_OPS
+ .driver.pm = &e1000_pm_ops,
#endif
.shutdown = e1000_shutdown,
.err_handler = &e1000_err_handler
@@ -5414,10 +5933,9 @@ static struct pci_driver e1000_driver = {
static int __init e1000_init_module(void)
{
int ret;
- printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n",
- e1000e_driver_name, e1000e_driver_version);
- printk(KERN_INFO "%s: Copyright (c) 1999 - 2009 Intel Corporation.\n",
- e1000e_driver_name);
+ pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
+ e1000e_driver_version);
+ pr_info("Copyright (c) 1999 - 2009 Intel Corporation.\n");
ret = pci_register_driver(&e1000_driver);
return ret;
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index 2e399778cae..a150e48a117 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -248,7 +248,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
{ /* Transmit Interrupt Delay */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = range_option,
.name = "Transmit Interrupt Delay",
.err = "using default of "
@@ -267,7 +267,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Transmit Absolute Interrupt Delay */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = range_option,
.name = "Transmit Absolute Interrupt Delay",
.err = "using default of "
@@ -286,7 +286,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Receive Interrupt Delay */
- struct e1000_option opt = {
+ static struct e1000_option opt = {
.type = range_option,
.name = "Receive Interrupt Delay",
.err = "using default of "
@@ -305,7 +305,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Receive Absolute Interrupt Delay */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = range_option,
.name = "Receive Absolute Interrupt Delay",
.err = "using default of "
@@ -324,7 +324,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Interrupt Throttling Rate */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = range_option,
.name = "Interrupt Throttling Rate (ints/sec)",
.err = "using default of "
@@ -351,6 +351,11 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
adapter->itr_setting = adapter->itr;
adapter->itr = 20000;
break;
+ case 4:
+ e_info("%s set to simplified (2000-8000 ints) "
+ "mode\n", opt.name);
+ adapter->itr_setting = 4;
+ break;
default:
/*
* Save the setting, because the dynamic bits
@@ -381,7 +386,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Interrupt Mode */
- struct e1000_option opt = {
+ static struct e1000_option opt = {
.type = range_option,
.name = "Interrupt Mode",
.err = "defaulting to 2 (MSI-X)",
@@ -399,7 +404,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Smart Power Down */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = enable_option,
.name = "PHY Smart Power Down",
.err = "defaulting to Disabled",
@@ -415,7 +420,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* CRC Stripping */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = enable_option,
.name = "CRC Stripping",
.err = "defaulting to enabled",
@@ -432,7 +437,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Kumeran Lock Loss Workaround */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = enable_option,
.name = "Kumeran Lock Loss Workaround",
.err = "defaulting to Enabled",
@@ -452,7 +457,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Write-protect NVM */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = enable_option,
.name = "Write-protect NVM",
.err = "defaulting to Enabled",
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 7f3ceb9dad6..b4ac82d51b2 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -3116,9 +3116,7 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw)
* e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY
* @hw: pointer to the HW structure
*
- * Calls the PHY setup function to force speed and duplex. Clears the
- * auto-crossover to force MDI manually. Waits for link and returns
- * successful if link up is successful, else -E1000_ERR_PHY (-2).
+ * Calls the PHY setup function to force speed and duplex.
**/
s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
{
@@ -3137,23 +3135,6 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
if (ret_val)
goto out;
- /*
- * Clear Auto-Crossover to force MDI manually. 82577 requires MDI
- * forced whenever speed and duplex are forced.
- */
- ret_val = phy->ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data);
- if (ret_val)
- goto out;
-
- phy_data &= ~I82577_PHY_CTRL2_AUTO_MDIX;
- phy_data &= ~I82577_PHY_CTRL2_FORCE_MDI_MDIX;
-
- ret_val = phy->ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data);
- if (ret_val)
- goto out;
-
- e_dbg("I82577_PHY_CTRL_2: %X\n", phy_data);
-
udelay(1);
if (phy->autoneg_wait_to_complete) {
diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c
index ca93c9a9d37..06e72fbef86 100644
--- a/drivers/net/e2100.c
+++ b/drivers/net/e2100.c
@@ -328,7 +328,6 @@ e21_reset_8390(struct net_device *dev)
/* Set up the ASIC registers, just in case something changed them. */
if (ei_debug > 1) printk("reset done\n");
- return;
}
/* Grab the 8390 specific header. We put the 2k window so the header page
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 27c7bdbfa00..8d97f168f01 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -645,7 +645,7 @@ static void __init printEEPROMInfo(struct net_device *dev)
if (GetBit(Word,ee_PortTPE)) printk(KERN_DEBUG "TPE ");
if (GetBit(Word,ee_PortBNC)) printk(KERN_DEBUG "BNC ");
if (GetBit(Word,ee_PortAUI)) printk(KERN_DEBUG "AUI ");
- printk(KERN_DEBUG "port(s) \n");
+ printk(KERN_DEBUG "port(s)\n");
Word = lp->word[6];
printk(KERN_DEBUG "Word6:\n");
@@ -765,7 +765,7 @@ static int __init eepro_probe1(struct net_device *dev, int autoprobe)
/* Grab the region so we can find another board if autoIRQ fails. */
if (!request_region(ioaddr, EEPRO_IO_EXTENT, DRV_NAME)) {
if (!autoprobe)
- printk(KERN_WARNING "EEPRO: io-port 0x%04x in use \n",
+ printk(KERN_WARNING "EEPRO: io-port 0x%04x in use\n",
ioaddr);
return -EBUSY;
}
@@ -1161,8 +1161,7 @@ static netdev_tx_t eepro_send_packet(struct sk_buff *skb,
/* we won't wake queue here because we're out of space */
dev->stats.tx_dropped++;
else {
- dev->stats.tx_bytes+=skb->len;
- dev->trans_start = jiffies;
+ dev->stats.tx_bytes+=skb->len;
netif_wake_queue(dev);
}
@@ -1286,7 +1285,7 @@ set_multicast_list(struct net_device *dev)
struct eepro_local *lp = netdev_priv(dev);
short ioaddr = dev->base_addr;
unsigned short mode;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int mc_count = netdev_mc_count(dev);
if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || mc_count > 63)
@@ -1331,8 +1330,8 @@ set_multicast_list(struct net_device *dev)
outw(0, ioaddr + IO_PORT);
outw(6 * (mc_count + 1), ioaddr + IO_PORT);
- netdev_for_each_mc_addr(dmi, dev) {
- eaddrs = (unsigned short *) dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ eaddrs = (unsigned short *) ha->addr;
outw(*eaddrs++, ioaddr + IO_PORT);
outw(*eaddrs++, ioaddr + IO_PORT);
outw(*eaddrs++, ioaddr + IO_PORT);
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 1a7322b80ea..12c37d26410 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -543,7 +543,7 @@ static void unstick_cu(struct net_device *dev)
if (lp->started)
{
- if (time_after(jiffies, dev->trans_start + 50))
+ if (time_after(jiffies, dev_trans_start(dev) + HZ/2))
{
if (lp->tx_link==lp->last_tx_restart)
{
@@ -1018,7 +1018,7 @@ static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf,
outw(lp->tx_head+0x16, ioaddr + DATAPORT);
outw(0, ioaddr + DATAPORT);
- outsw(ioaddr + DATAPORT, buf, (len+1)>>1);
+ outsw(ioaddr + DATAPORT, buf, (len+1)>>1);
outw(lp->tx_tail+0xc, ioaddr + WRITE_PTR);
outw(lp->tx_head, ioaddr + DATAPORT);
@@ -1570,12 +1570,11 @@ static void eexp_hw_init586(struct net_device *dev)
#if NET_DEBUG > 6
printk("%s: leaving eexp_hw_init586()\n", dev->name);
#endif
- return;
}
static void eexp_setup_filter(struct net_device *dev)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned short ioaddr = dev->base_addr;
int count = netdev_mc_count(dev);
int i;
@@ -1588,8 +1587,8 @@ static void eexp_setup_filter(struct net_device *dev)
outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR);
outw(6*count, ioaddr+SHADOW(CONF_NR_MULTICAST));
i = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- unsigned short *data = (unsigned short *) dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ unsigned short *data = (unsigned short *) ha->addr;
if (i == count)
break;
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index fa311a95099..0630980a272 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
#include <asm/io.h>
#define DRV_NAME "ehea"
-#define DRV_VERSION "EHEA_0102"
+#define DRV_VERSION "EHEA_0103"
/* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 809ccc9ff09..f547894ff48 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -122,8 +122,11 @@ static struct of_device_id ehea_device_table[] = {
MODULE_DEVICE_TABLE(of, ehea_device_table);
static struct of_platform_driver ehea_driver = {
- .name = "ehea",
- .match_table = ehea_device_table,
+ .driver = {
+ .name = "ehea",
+ .owner = THIS_MODULE,
+ .of_match_table = ehea_device_table,
+ },
.probe = ehea_probe_adapter,
.remove = ehea_remove,
};
@@ -791,11 +794,17 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
cqe_counter++;
rmb();
if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
- ehea_error("Send Completion Error: Resetting port");
+ ehea_error("Bad send completion status=0x%04X",
+ cqe->status);
+
if (netif_msg_tx_err(pr->port))
ehea_dump(cqe, sizeof(*cqe), "Send CQE");
- ehea_schedule_port_reset(pr->port);
- break;
+
+ if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
+ ehea_error("Resetting port");
+ ehea_schedule_port_reset(pr->port);
+ break;
+ }
}
if (netif_msg_tx_done(pr->port))
@@ -814,7 +823,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
quota--;
cqe = ehea_poll_cq(send_cq);
- };
+ }
ehea_update_feca(send_cq, cqe_counter);
atomic_add(swqe_av, &pr->swqe_avail);
@@ -901,6 +910,8 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
struct ehea_eqe *eqe;
struct ehea_qp *qp;
u32 qp_token;
+ u64 resource_type, aer, aerr;
+ int reset_port = 0;
eqe = ehea_poll_eq(port->qp_eq);
@@ -910,11 +921,24 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
eqe->entry, qp_token);
qp = port->port_res[qp_token].qp;
- ehea_error_data(port->adapter, qp->fw_handle);
+
+ resource_type = ehea_error_data(port->adapter, qp->fw_handle,
+ &aer, &aerr);
+
+ if (resource_type == EHEA_AER_RESTYPE_QP) {
+ if ((aer & EHEA_AER_RESET_MASK) ||
+ (aerr & EHEA_AERR_RESET_MASK))
+ reset_port = 1;
+ } else
+ reset_port = 1; /* Reset in case of CQ or EQ error */
+
eqe = ehea_poll_eq(port->qp_eq);
}
- ehea_schedule_port_reset(port);
+ if (reset_port) {
+ ehea_error("Resetting port");
+ ehea_schedule_port_reset(port);
+ }
return IRQ_HANDLED;
}
@@ -1618,7 +1642,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
{
struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
- int skb_data_size = skb->len - skb->data_len;
+ int skb_data_size = skb_headlen(skb);
int headersize;
/* Packet is TCP with TSO enabled */
@@ -1629,7 +1653,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
*/
headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
- skb_data_size = skb->len - skb->data_len;
+ skb_data_size = skb_headlen(skb);
if (skb_data_size >= headersize) {
/* copy immediate data */
@@ -1651,7 +1675,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
static void write_swqe2_nonTSO(struct sk_buff *skb,
struct ehea_swqe *swqe, u32 lkey)
{
- int skb_data_size = skb->len - skb->data_len;
+ int skb_data_size = skb_headlen(skb);
u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
@@ -1860,7 +1884,6 @@ static void ehea_promiscuous(struct net_device *dev, int enable)
port->promisc = enable;
out:
free_page((unsigned long)cb7);
- return;
}
static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
@@ -1967,7 +1990,7 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
static void ehea_set_multicast_list(struct net_device *dev)
{
struct ehea_port *port = netdev_priv(dev);
- struct dev_mc_list *k_mcl_entry;
+ struct netdev_hw_addr *ha;
int ret;
if (dev->flags & IFF_PROMISC) {
@@ -1998,13 +2021,12 @@ static void ehea_set_multicast_list(struct net_device *dev)
goto out;
}
- netdev_for_each_mc_addr(k_mcl_entry, dev)
- ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ ehea_add_multicast_entry(port, ha->addr);
}
out:
ehea_update_bcmc_registrations();
- return;
}
static int ehea_change_mtu(struct net_device *dev, int new_mtu)
@@ -2108,8 +2130,8 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
} else {
/* first copy data from the skb->data buffer ... */
skb_copy_from_linear_data(skb, imm_data,
- skb->len - skb->data_len);
- imm_data += skb->len - skb->data_len;
+ skb_headlen(skb));
+ imm_data += skb_headlen(skb);
/* ... then copy data from the fragments */
for (i = 0; i < nfrags; i++) {
@@ -2220,7 +2242,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
spin_unlock_irqrestore(&pr->netif_queue, flags);
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
spin_unlock(&pr->xmit_lock);
return NETDEV_TX_OK;
@@ -2317,7 +2339,6 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
ehea_error("modify_ehea_port failed");
out:
free_page((unsigned long)cb1);
- return;
}
int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
@@ -2860,7 +2881,6 @@ static void ehea_reset_port(struct work_struct *work)
netif_wake_queue(dev);
out:
mutex_unlock(&port->port_lock);
- return;
}
static void ehea_rereg_mrs(struct work_struct *work)
@@ -2868,7 +2888,6 @@ static void ehea_rereg_mrs(struct work_struct *work)
int ret, i;
struct ehea_adapter *adapter;
- mutex_lock(&dlpar_mem_lock);
ehea_info("LPAR memory changed - re-initializing driver");
list_for_each_entry(adapter, &adapter_list, list)
@@ -2938,7 +2957,6 @@ static void ehea_rereg_mrs(struct work_struct *work)
}
ehea_info("re-initializing driver complete");
out:
- mutex_unlock(&dlpar_mem_lock);
return;
}
@@ -3035,7 +3053,7 @@ static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
static void __devinit logical_port_release(struct device *dev)
{
struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
- of_node_put(port->ofdev.node);
+ of_node_put(port->ofdev.dev.of_node);
}
static struct device *ehea_register_port(struct ehea_port *port,
@@ -3043,7 +3061,7 @@ static struct device *ehea_register_port(struct ehea_port *port,
{
int ret;
- port->ofdev.node = of_node_get(dn);
+ port->ofdev.dev.of_node = of_node_get(dn);
port->ofdev.dev.parent = &port->adapter->ofdev->dev;
port->ofdev.dev.bus = &ibmebus_bus_type;
@@ -3210,7 +3228,7 @@ static int ehea_setup_ports(struct ehea_adapter *adapter)
const u32 *dn_log_port_id;
int i = 0;
- lhea_dn = adapter->ofdev->node;
+ lhea_dn = adapter->ofdev->dev.of_node;
while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
@@ -3238,7 +3256,7 @@ static int ehea_setup_ports(struct ehea_adapter *adapter)
ehea_remove_adapter_mr(adapter);
i++;
- };
+ }
return 0;
}
@@ -3249,7 +3267,7 @@ static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
struct device_node *eth_dn = NULL;
const u32 *dn_log_port_id;
- lhea_dn = adapter->ofdev->node;
+ lhea_dn = adapter->ofdev->dev.of_node;
while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
@@ -3257,7 +3275,7 @@ static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
if (dn_log_port_id)
if (*dn_log_port_id == logical_port_id)
return eth_dn;
- };
+ }
return NULL;
}
@@ -3379,7 +3397,7 @@ static int __devinit ehea_probe_adapter(struct of_device *dev,
const u64 *adapter_handle;
int ret;
- if (!dev || !dev->node) {
+ if (!dev || !dev->dev.of_node) {
ehea_error("Invalid ibmebus device probed");
return -EINVAL;
}
@@ -3395,14 +3413,14 @@ static int __devinit ehea_probe_adapter(struct of_device *dev,
adapter->ofdev = dev;
- adapter_handle = of_get_property(dev->node, "ibm,hea-handle",
+ adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
NULL);
if (adapter_handle)
adapter->handle = *adapter_handle;
if (!adapter->handle) {
dev_err(&dev->dev, "failed getting handle for adapter"
- " '%s'\n", dev->node->full_name);
+ " '%s'\n", dev->dev.of_node->full_name);
ret = -ENODEV;
goto out_free_ad;
}
@@ -3521,7 +3539,14 @@ void ehea_crash_handler(void)
static int ehea_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
+ int ret = NOTIFY_BAD;
struct memory_notify *arg = data;
+
+ if (!mutex_trylock(&dlpar_mem_lock)) {
+ ehea_info("ehea_mem_notifier must not be called parallelized");
+ goto out;
+ }
+
switch (action) {
case MEM_CANCEL_OFFLINE:
ehea_info("memory offlining canceled");
@@ -3530,14 +3555,14 @@ static int ehea_mem_notifier(struct notifier_block *nb,
ehea_info("memory is going online");
set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
- return NOTIFY_BAD;
+ goto out_unlock;
ehea_rereg_mrs(NULL);
break;
case MEM_GOING_OFFLINE:
ehea_info("memory is going offline");
set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
- return NOTIFY_BAD;
+ goto out_unlock;
ehea_rereg_mrs(NULL);
break;
default:
@@ -3545,8 +3570,12 @@ static int ehea_mem_notifier(struct notifier_block *nb,
}
ehea_update_firmware_handles();
+ ret = NOTIFY_OK;
- return NOTIFY_OK;
+out_unlock:
+ mutex_unlock(&dlpar_mem_lock);
+out:
+ return ret;
}
static struct notifier_block ehea_mem_nb = {
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index a1b4c7e5636..89128b6373e 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -229,14 +229,14 @@ u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
int ehea_destroy_cq(struct ehea_cq *cq)
{
- u64 hret;
+ u64 hret, aer, aerr;
if (!cq)
return 0;
hcp_epas_dtor(&cq->epas);
hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
if (hret == H_R_STATE) {
- ehea_error_data(cq->adapter, cq->fw_handle);
+ ehea_error_data(cq->adapter, cq->fw_handle, &aer, &aerr);
hret = ehea_destroy_cq_res(cq, FORCE_FREE);
}
@@ -357,7 +357,7 @@ u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
int ehea_destroy_eq(struct ehea_eq *eq)
{
- u64 hret;
+ u64 hret, aer, aerr;
if (!eq)
return 0;
@@ -365,7 +365,7 @@ int ehea_destroy_eq(struct ehea_eq *eq)
hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
if (hret == H_R_STATE) {
- ehea_error_data(eq->adapter, eq->fw_handle);
+ ehea_error_data(eq->adapter, eq->fw_handle, &aer, &aerr);
hret = ehea_destroy_eq_res(eq, FORCE_FREE);
}
@@ -540,7 +540,7 @@ u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
int ehea_destroy_qp(struct ehea_qp *qp)
{
- u64 hret;
+ u64 hret, aer, aerr;
if (!qp)
return 0;
@@ -548,7 +548,7 @@ int ehea_destroy_qp(struct ehea_qp *qp)
hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
if (hret == H_R_STATE) {
- ehea_error_data(qp->adapter, qp->fw_handle);
+ ehea_error_data(qp->adapter, qp->fw_handle, &aer, &aerr);
hret = ehea_destroy_qp_res(qp, FORCE_FREE);
}
@@ -986,42 +986,45 @@ void print_error_data(u64 *data)
if (length > EHEA_PAGESIZE)
length = EHEA_PAGESIZE;
- if (type == 0x8) /* Queue Pair */
+ if (type == EHEA_AER_RESTYPE_QP)
ehea_error("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, "
"port=%llX", resource, data[6], data[12], data[22]);
-
- if (type == 0x4) /* Completion Queue */
+ else if (type == EHEA_AER_RESTYPE_CQ)
ehea_error("CQ (resource=%llX) state: AER=0x%llX", resource,
data[6]);
-
- if (type == 0x3) /* Event Queue */
+ else if (type == EHEA_AER_RESTYPE_EQ)
ehea_error("EQ (resource=%llX) state: AER=0x%llX", resource,
data[6]);
ehea_dump(data, length, "error data");
}
-void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle)
+u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
+ u64 *aer, u64 *aerr)
{
unsigned long ret;
u64 *rblock;
+ u64 type = 0;
rblock = (void *)get_zeroed_page(GFP_KERNEL);
if (!rblock) {
ehea_error("Cannot allocate rblock memory.");
- return;
+ goto out;
}
- ret = ehea_h_error_data(adapter->handle,
- res_handle,
- rblock);
+ ret = ehea_h_error_data(adapter->handle, res_handle, rblock);
- if (ret == H_R_STATE)
- ehea_error("No error data is available: %llX.", res_handle);
- else if (ret == H_SUCCESS)
+ if (ret == H_SUCCESS) {
+ type = EHEA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
+ *aer = rblock[6];
+ *aerr = rblock[12];
print_error_data(rblock);
- else
+ } else if (ret == H_R_STATE) {
+ ehea_error("No error data available: %llX.", res_handle);
+ } else
ehea_error("Error data could not be fetched: %llX", res_handle);
free_page((unsigned long)rblock);
+out:
+ return type;
}
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
index 0817c1e74a1..882c50c9c34 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -154,6 +154,9 @@ struct ehea_rwqe {
#define EHEA_CQE_STAT_ERR_IP 0x2000
#define EHEA_CQE_STAT_ERR_CRC 0x1000
+/* Defines which bad send cqe stati lead to a port reset */
+#define EHEA_CQE_STAT_RESET_MASK 0x0002
+
struct ehea_cqe {
u64 wr_id; /* work request ID from WQE */
u8 type;
@@ -187,6 +190,14 @@ struct ehea_cqe {
#define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55)
#define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
+#define EHEA_AER_RESTYPE_QP 0x8
+#define EHEA_AER_RESTYPE_CQ 0x4
+#define EHEA_AER_RESTYPE_EQ 0x3
+
+/* Defines which affiliated errors lead to a port reset */
+#define EHEA_AER_RESET_MASK 0xFFFFFFFFFEFFFFFFULL
+#define EHEA_AERR_RESET_MASK 0xFFFFFFFFFFFFFFFFULL
+
struct ehea_eqe {
u64 entry;
};
@@ -379,7 +390,8 @@ int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
int ehea_rem_mr(struct ehea_mr *mr);
-void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle);
+u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
+ u64 *aer, u64 *aerr);
int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages);
int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index ff27f728fd9..112c5aa9af7 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -1293,8 +1293,6 @@ static netdev_tx_t enc28j60_send_packet(struct sk_buff *skb,
*/
netif_stop_queue(dev);
- /* save the timestamp */
- priv->netdev->trans_start = jiffies;
/* Remember the skb for deferred processing */
priv->tx_skb = skb;
schedule_work(&priv->tx_work);
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
index 391c3bce5b7..e7b6c31880b 100644
--- a/drivers/net/enic/Makefile
+++ b/drivers/net/enic/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_ENIC) := enic.o
enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
- enic_res.o vnic_dev.o vnic_rq.o
+ enic_res.o vnic_dev.o vnic_rq.o vnic_vic.o
diff --git a/drivers/net/enic/cq_enet_desc.h b/drivers/net/enic/cq_enet_desc.h
index 03dce9ed612..337d1943af4 100644
--- a/drivers/net/enic/cq_enet_desc.h
+++ b/drivers/net/enic/cq_enet_desc.h
@@ -101,14 +101,18 @@ static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
{
- u16 completed_index_flags = le16_to_cpu(desc->completed_index_flags);
- u16 q_number_rss_type_flags =
- le16_to_cpu(desc->q_number_rss_type_flags);
- u16 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
+ u16 completed_index_flags;
+ u16 q_number_rss_type_flags;
+ u16 bytes_written_flags;
cq_desc_dec((struct cq_desc *)desc, type,
color, q_number, completed_index);
+ completed_index_flags = le16_to_cpu(desc->completed_index_flags);
+ q_number_rss_type_flags =
+ le16_to_cpu(desc->q_number_rss_type_flags);
+ bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
+
*ingress_port = (completed_index_flags &
CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
*fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index ee01f5a6d0d..85f2a2e7030 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -33,8 +33,8 @@
#include "vnic_rss.h"
#define DRV_NAME "enic"
-#define DRV_DESCRIPTION "Cisco 10G Ethernet Driver"
-#define DRV_VERSION "1.1.0.241a"
+#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
+#define DRV_VERSION "1.3.1.1-pp"
#define DRV_COPYRIGHT "Copyright 2008-2009 Cisco Systems, Inc"
#define PFX DRV_NAME ": "
@@ -74,6 +74,13 @@ struct enic_msix_entry {
void *devid;
};
+struct enic_port_profile {
+ u8 request;
+ char name[PORT_PROFILE_MAX];
+ u8 instance_uuid[PORT_UUID_MAX];
+ u8 host_uuid[PORT_UUID_MAX];
+};
+
/* Per-instance private data structure */
struct enic {
struct net_device *netdev;
@@ -95,6 +102,7 @@ struct enic {
u32 port_mtu;
u32 rx_coalesce_usecs;
u32 tx_coalesce_usecs;
+ struct enic_port_profile pp;
/* work queue cache line section */
____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX];
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index cf098bb636b..6586b5c7e4b 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -29,6 +29,7 @@
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
+#include <linux/if_link.h>
#include <linux/ethtool.h>
#include <linux/in.h>
#include <linux/ip.h>
@@ -40,6 +41,7 @@
#include "vnic_dev.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
+#include "vnic_vic.h"
#include "enic_res.h"
#include "enic.h"
@@ -49,10 +51,12 @@
#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
+#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
/* Supported devices */
static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
+ { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
{ 0, } /* end of table */
};
@@ -113,6 +117,11 @@ static const struct enic_stat enic_rx_stats[] = {
static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
+static int enic_is_dynamic(struct enic *enic)
+{
+ return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
+}
+
static int enic_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
@@ -810,26 +819,90 @@ static void enic_reset_mcaddrs(struct enic *enic)
static int enic_set_mac_addr(struct net_device *netdev, char *addr)
{
- if (!is_valid_ether_addr(addr))
- return -EADDRNOTAVAIL;
+ struct enic *enic = netdev_priv(netdev);
+
+ if (enic_is_dynamic(enic)) {
+ if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
+ return -EADDRNOTAVAIL;
+ } else {
+ if (!is_valid_ether_addr(addr))
+ return -EADDRNOTAVAIL;
+ }
memcpy(netdev->dev_addr, addr, netdev->addr_len);
return 0;
}
+static int enic_dev_add_station_addr(struct enic *enic)
+{
+ int err = 0;
+
+ if (is_valid_ether_addr(enic->netdev->dev_addr)) {
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
+ spin_unlock(&enic->devcmd_lock);
+ }
+
+ return err;
+}
+
+static int enic_dev_del_station_addr(struct enic *enic)
+{
+ int err = 0;
+
+ if (is_valid_ether_addr(enic->netdev->dev_addr)) {
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
+ spin_unlock(&enic->devcmd_lock);
+ }
+
+ return err;
+}
+
+static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
+{
+ struct enic *enic = netdev_priv(netdev);
+ struct sockaddr *saddr = p;
+ char *addr = saddr->sa_data;
+ int err;
+
+ if (netif_running(enic->netdev)) {
+ err = enic_dev_del_station_addr(enic);
+ if (err)
+ return err;
+ }
+
+ err = enic_set_mac_addr(netdev, addr);
+ if (err)
+ return err;
+
+ if (netif_running(enic->netdev)) {
+ err = enic_dev_add_station_addr(enic);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+static int enic_set_mac_address(struct net_device *netdev, void *p)
+{
+ return -EOPNOTSUPP;
+}
+
/* netif_tx_lock held, BHs disabled */
static void enic_set_multicast_list(struct net_device *netdev)
{
struct enic *enic = netdev_priv(netdev);
- struct dev_mc_list *list;
+ struct netdev_hw_addr *ha;
int directed = 1;
int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0;
unsigned int mc_count = netdev_mc_count(netdev);
int allmulti = (netdev->flags & IFF_ALLMULTI) ||
- mc_count > ENIC_MULTICAST_PERFECT_FILTERS;
+ mc_count > ENIC_MULTICAST_PERFECT_FILTERS;
unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0);
u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
unsigned int i, j;
@@ -852,10 +925,10 @@ static void enic_set_multicast_list(struct net_device *netdev)
*/
i = 0;
- netdev_for_each_mc_addr(list, netdev) {
+ netdev_for_each_mc_addr(ha, netdev) {
if (i == mc_count)
break;
- memcpy(mc_addr[i++], list->dmi_addr, ETH_ALEN);
+ memcpy(mc_addr[i++], ha->addr, ETH_ALEN);
}
for (i = 0; i < enic->mc_count; i++) {
@@ -922,6 +995,226 @@ static void enic_tx_timeout(struct net_device *netdev)
schedule_work(&enic->reset);
}
+static int enic_vnic_dev_deinit(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_deinit(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+static int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_init_prov(enic->vdev,
+ (u8 *)vp, vic_provinfo_size(vp));
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+static int enic_dev_init_done(struct enic *enic, int *done, int *error)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_init_done(enic->vdev, done, error);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+static int enic_set_port_profile(struct enic *enic, u8 request, u8 *mac,
+ char *name, u8 *instance_uuid, u8 *host_uuid)
+{
+ struct vic_provinfo *vp;
+ u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
+ u8 *uuid;
+ char uuid_str[38];
+ static char *uuid_fmt = "%02X%02X%02X%02X-%02X%02X-%02X%02X-"
+ "%02X%02X-%02X%02X%02X%02X%0X%02X";
+ int err;
+
+ if (!name)
+ return -EINVAL;
+
+ if (!is_valid_ether_addr(mac))
+ return -EADDRNOTAVAIL;
+
+ vp = vic_provinfo_alloc(GFP_KERNEL, oui, VIC_PROVINFO_LINUX_TYPE);
+ if (!vp)
+ return -ENOMEM;
+
+ vic_provinfo_add_tlv(vp,
+ VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR,
+ strlen(name) + 1, name);
+
+ vic_provinfo_add_tlv(vp,
+ VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR,
+ ETH_ALEN, mac);
+
+ if (instance_uuid) {
+ uuid = instance_uuid;
+ sprintf(uuid_str, uuid_fmt,
+ uuid[0], uuid[1], uuid[2], uuid[3],
+ uuid[4], uuid[5], uuid[6], uuid[7],
+ uuid[8], uuid[9], uuid[10], uuid[11],
+ uuid[12], uuid[13], uuid[14], uuid[15]);
+ vic_provinfo_add_tlv(vp,
+ VIC_LINUX_PROV_TLV_CLIENT_UUID_STR,
+ sizeof(uuid_str), uuid_str);
+ }
+
+ if (host_uuid) {
+ uuid = host_uuid;
+ sprintf(uuid_str, uuid_fmt,
+ uuid[0], uuid[1], uuid[2], uuid[3],
+ uuid[4], uuid[5], uuid[6], uuid[7],
+ uuid[8], uuid[9], uuid[10], uuid[11],
+ uuid[12], uuid[13], uuid[14], uuid[15]);
+ vic_provinfo_add_tlv(vp,
+ VIC_LINUX_PROV_TLV_HOST_UUID_STR,
+ sizeof(uuid_str), uuid_str);
+ }
+
+ err = enic_vnic_dev_deinit(enic);
+ if (err)
+ goto err_out;
+
+ memset(&enic->pp, 0, sizeof(enic->pp));
+
+ err = enic_dev_init_prov(enic, vp);
+ if (err)
+ goto err_out;
+
+ enic->pp.request = request;
+ memcpy(enic->pp.name, name, PORT_PROFILE_MAX);
+ if (instance_uuid)
+ memcpy(enic->pp.instance_uuid,
+ instance_uuid, PORT_UUID_MAX);
+ if (host_uuid)
+ memcpy(enic->pp.host_uuid,
+ host_uuid, PORT_UUID_MAX);
+
+err_out:
+ vic_provinfo_free(vp);
+
+ return err;
+}
+
+static int enic_unset_port_profile(struct enic *enic)
+{
+ memset(&enic->pp, 0, sizeof(enic->pp));
+ return enic_vnic_dev_deinit(enic);
+}
+
+static int enic_set_vf_port(struct net_device *netdev, int vf,
+ struct nlattr *port[])
+{
+ struct enic *enic = netdev_priv(netdev);
+ char *name = NULL;
+ u8 *instance_uuid = NULL;
+ u8 *host_uuid = NULL;
+ u8 request = PORT_REQUEST_DISASSOCIATE;
+
+ /* don't support VFs, yet */
+ if (vf != PORT_SELF_VF)
+ return -EOPNOTSUPP;
+
+ if (port[IFLA_PORT_REQUEST])
+ request = nla_get_u8(port[IFLA_PORT_REQUEST]);
+
+ switch (request) {
+ case PORT_REQUEST_ASSOCIATE:
+
+ /* If the interface mac addr hasn't been assigned,
+ * assign a random mac addr before setting port-
+ * profile.
+ */
+
+ if (is_zero_ether_addr(netdev->dev_addr))
+ random_ether_addr(netdev->dev_addr);
+
+ if (port[IFLA_PORT_PROFILE])
+ name = nla_data(port[IFLA_PORT_PROFILE]);
+
+ if (port[IFLA_PORT_INSTANCE_UUID])
+ instance_uuid =
+ nla_data(port[IFLA_PORT_INSTANCE_UUID]);
+
+ if (port[IFLA_PORT_HOST_UUID])
+ host_uuid = nla_data(port[IFLA_PORT_HOST_UUID]);
+
+ return enic_set_port_profile(enic, request,
+ netdev->dev_addr, name,
+ instance_uuid, host_uuid);
+
+ case PORT_REQUEST_DISASSOCIATE:
+
+ return enic_unset_port_profile(enic);
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int enic_get_vf_port(struct net_device *netdev, int vf,
+ struct sk_buff *skb)
+{
+ struct enic *enic = netdev_priv(netdev);
+ int err, error, done;
+ u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
+
+ /* don't support VFs, yet */
+ if (vf != PORT_SELF_VF)
+ return -EOPNOTSUPP;
+
+ err = enic_dev_init_done(enic, &done, &error);
+
+ if (err)
+ return err;
+
+ switch (error) {
+ case ERR_SUCCESS:
+ if (!done)
+ response = PORT_PROFILE_RESPONSE_INPROGRESS;
+ break;
+ case ERR_EINVAL:
+ response = PORT_PROFILE_RESPONSE_INVALID;
+ break;
+ case ERR_EBADSTATE:
+ response = PORT_PROFILE_RESPONSE_BADSTATE;
+ break;
+ case ERR_ENOMEM:
+ response = PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES;
+ break;
+ default:
+ response = PORT_PROFILE_RESPONSE_ERROR;
+ break;
+ }
+
+ NLA_PUT_U16(skb, IFLA_PORT_REQUEST, enic->pp.request);
+ NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response);
+ NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX,
+ enic->pp.name);
+ NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
+ enic->pp.instance_uuid);
+ NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX,
+ enic->pp.host_uuid);
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
{
struct enic *enic = vnic_dev_priv(rq->vdev);
@@ -1440,9 +1733,7 @@ static int enic_open(struct net_device *netdev)
for (i = 0; i < enic->rq_count; i++)
vnic_rq_enable(&enic->rq[i]);
- spin_lock(&enic->devcmd_lock);
- enic_add_station_addr(enic);
- spin_unlock(&enic->devcmd_lock);
+ enic_dev_add_station_addr(enic);
enic_set_multicast_list(netdev);
netif_wake_queue(netdev);
@@ -1489,6 +1780,8 @@ static int enic_stop(struct net_device *netdev)
netif_carrier_off(netdev);
netif_tx_disable(netdev);
+ enic_dev_del_station_addr(enic);
+
for (i = 0; i < enic->wq_count; i++) {
err = vnic_wq_disable(&enic->wq[i]);
if (err)
@@ -1774,14 +2067,34 @@ static void enic_clear_intr_mode(struct enic *enic)
vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
}
+static const struct net_device_ops enic_netdev_dynamic_ops = {
+ .ndo_open = enic_open,
+ .ndo_stop = enic_stop,
+ .ndo_start_xmit = enic_hard_start_xmit,
+ .ndo_get_stats = enic_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_multicast_list = enic_set_multicast_list,
+ .ndo_set_mac_address = enic_set_mac_address_dynamic,
+ .ndo_change_mtu = enic_change_mtu,
+ .ndo_vlan_rx_register = enic_vlan_rx_register,
+ .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
+ .ndo_tx_timeout = enic_tx_timeout,
+ .ndo_set_vf_port = enic_set_vf_port,
+ .ndo_get_vf_port = enic_get_vf_port,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = enic_poll_controller,
+#endif
+};
+
static const struct net_device_ops enic_netdev_ops = {
.ndo_open = enic_open,
.ndo_stop = enic_stop,
.ndo_start_xmit = enic_hard_start_xmit,
.ndo_get_stats = enic_get_stats,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
.ndo_set_multicast_list = enic_set_multicast_list,
+ .ndo_set_mac_address = enic_set_mac_address,
.ndo_change_mtu = enic_change_mtu,
.ndo_vlan_rx_register = enic_vlan_rx_register,
.ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
@@ -2010,11 +2323,13 @@ static int __devinit enic_probe(struct pci_dev *pdev,
netif_carrier_off(netdev);
- err = vnic_dev_init(enic->vdev, 0);
- if (err) {
- printk(KERN_ERR PFX
- "vNIC dev init failed, aborting.\n");
- goto err_out_dev_close;
+ if (!enic_is_dynamic(enic)) {
+ err = vnic_dev_init(enic->vdev, 0);
+ if (err) {
+ printk(KERN_ERR PFX
+ "vNIC dev init failed, aborting.\n");
+ goto err_out_dev_close;
+ }
}
err = enic_dev_init(enic);
@@ -2054,12 +2369,15 @@ static int __devinit enic_probe(struct pci_dev *pdev,
enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
- netdev->netdev_ops = &enic_netdev_ops;
+ if (enic_is_dynamic(enic))
+ netdev->netdev_ops = &enic_netdev_dynamic_ops;
+ else
+ netdev->netdev_ops = &enic_netdev_ops;
+
netdev->watchdog_timeo = 2 * HZ;
netdev->ethtool_ops = &enic_ethtool_ops;
- netdev->features |= NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
+ netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
if (ENIC_SETTING(enic, TXCSUM))
netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
if (ENIC_SETTING(enic, TSO))
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index 02839bf0fe8..9b18840cba9 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -103,11 +103,6 @@ int enic_get_vnic_config(struct enic *enic)
return 0;
}
-void enic_add_station_addr(struct enic *enic)
-{
- vnic_dev_add_addr(enic->vdev, enic->mac_addr);
-}
-
void enic_add_multicast_addr(struct enic *enic, u8 *addr)
{
vnic_dev_add_addr(enic->vdev, addr);
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
index abc19741ab0..494664f7fcc 100644
--- a/drivers/net/enic/enic_res.h
+++ b/drivers/net/enic/enic_res.h
@@ -131,7 +131,6 @@ static inline void enic_queue_rq_desc(struct vnic_rq *rq,
struct enic;
int enic_get_vnic_config(struct enic *);
-void enic_add_station_addr(struct enic *enic);
void enic_add_multicast_addr(struct enic *enic, u8 *addr);
void enic_del_multicast_addr(struct enic *enic, u8 *addr);
void enic_add_vlan(struct enic *enic, u16 vlanid);
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index cf22de71014..2b3e16db5c8 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -530,7 +530,7 @@ void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
printk(KERN_ERR "Can't set packet filter\n");
}
-void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
+int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
@@ -543,9 +543,11 @@ void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
if (err)
printk(KERN_ERR "Can't add addr [%pM], %d\n", addr, err);
+
+ return err;
}
-void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
+int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
@@ -558,6 +560,8 @@ void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
if (err)
printk(KERN_ERR "Can't del addr [%pM], %d\n", addr, err);
+
+ return err;
}
int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr)
@@ -574,22 +578,18 @@ int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr)
return err;
}
-int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
+int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
+ void *notify_addr, dma_addr_t notify_pa, u16 intr)
{
u64 a0, a1;
int wait = 1000;
int r;
- if (!vdev->notify) {
- vdev->notify = pci_alloc_consistent(vdev->pdev,
- sizeof(struct vnic_devcmd_notify),
- &vdev->notify_pa);
- if (!vdev->notify)
- return -ENOMEM;
- memset(vdev->notify, 0, sizeof(struct vnic_devcmd_notify));
- }
+ memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify));
+ vdev->notify = notify_addr;
+ vdev->notify_pa = notify_pa;
- a0 = vdev->notify_pa;
+ a0 = (u64)notify_pa;
a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
a1 += sizeof(struct vnic_devcmd_notify);
@@ -598,7 +598,27 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
return r;
}
-void vnic_dev_notify_unset(struct vnic_dev *vdev)
+int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
+{
+ void *notify_addr;
+ dma_addr_t notify_pa;
+
+ if (vdev->notify || vdev->notify_pa) {
+ printk(KERN_ERR "notify block %p still allocated",
+ vdev->notify);
+ return -EINVAL;
+ }
+
+ notify_addr = pci_alloc_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_notify),
+ &notify_pa);
+ if (!notify_addr)
+ return -ENOMEM;
+
+ return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
+}
+
+void vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
{
u64 a0, a1;
int wait = 1000;
@@ -608,9 +628,23 @@ void vnic_dev_notify_unset(struct vnic_dev *vdev)
a1 += sizeof(struct vnic_devcmd_notify);
vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
+ vdev->notify = NULL;
+ vdev->notify_pa = 0;
vdev->notify_sz = 0;
}
+void vnic_dev_notify_unset(struct vnic_dev *vdev)
+{
+ if (vdev->notify) {
+ pci_free_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_notify),
+ vdev->notify,
+ vdev->notify_pa);
+ }
+
+ vnic_dev_notify_unsetcmd(vdev);
+}
+
static int vnic_dev_notify_ready(struct vnic_dev *vdev)
{
u32 *words;
@@ -652,6 +686,56 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg)
return r;
}
+int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int ret;
+
+ *done = 0;
+
+ ret = vnic_dev_cmd(vdev, CMD_INIT_STATUS, &a0, &a1, wait);
+ if (ret)
+ return ret;
+
+ *done = (a0 == 0);
+
+ *err = (a0 == 0) ? a1 : 0;
+
+ return 0;
+}
+
+int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len)
+{
+ u64 a0, a1 = len;
+ int wait = 1000;
+ u64 prov_pa;
+ void *prov_buf;
+ int ret;
+
+ prov_buf = pci_alloc_consistent(vdev->pdev, len, &prov_pa);
+ if (!prov_buf)
+ return -ENOMEM;
+
+ memcpy(prov_buf, buf, len);
+
+ a0 = prov_pa;
+
+ ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO, &a0, &a1, wait);
+
+ pci_free_consistent(vdev->pdev, len, prov_buf, prov_pa);
+
+ return ret;
+}
+
+int vnic_dev_deinit(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_DEINIT, &a0, &a1, wait);
+}
+
int vnic_dev_link_status(struct vnic_dev *vdev)
{
if (vdev->linkstatus)
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
index fc5e3eb35a5..caccce36957 100644
--- a/drivers/net/enic/vnic_dev.h
+++ b/drivers/net/enic/vnic_dev.h
@@ -103,11 +103,14 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
int vnic_dev_hang_notify(struct vnic_dev *vdev);
void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
int broadcast, int promisc, int allmulti);
-void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
-void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
+int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
+int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr);
+int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
+ void *notify_addr, dma_addr_t notify_pa, u16 intr);
int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
+void vnic_dev_notify_unsetcmd(struct vnic_dev *vdev);
void vnic_dev_notify_unset(struct vnic_dev *vdev);
int vnic_dev_link_status(struct vnic_dev *vdev);
u32 vnic_dev_port_speed(struct vnic_dev *vdev);
@@ -121,6 +124,9 @@ int vnic_dev_disable(struct vnic_dev *vdev);
int vnic_dev_open(struct vnic_dev *vdev, int arg);
int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
int vnic_dev_init(struct vnic_dev *vdev, int arg);
+int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err);
+int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len);
+int vnic_dev_deinit(struct vnic_dev *vdev);
int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
diff --git a/drivers/net/enic/vnic_rq.c b/drivers/net/enic/vnic_rq.c
index e186efaf9da..cc580cfec41 100644
--- a/drivers/net/enic/vnic_rq.c
+++ b/drivers/net/enic/vnic_rq.c
@@ -168,10 +168,10 @@ int vnic_rq_disable(struct vnic_rq *rq)
iowrite32(0, &rq->ctrl->enable);
/* Wait for HW to ACK disable request */
- for (wait = 0; wait < 100; wait++) {
+ for (wait = 0; wait < 1000; wait++) {
if (!(ioread32(&rq->ctrl->running)))
return 0;
- udelay(1);
+ udelay(10);
}
printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index);
diff --git a/drivers/net/enic/vnic_vic.c b/drivers/net/enic/vnic_vic.c
new file mode 100644
index 00000000000..d769772998c
--- /dev/null
+++ b/drivers/net/enic/vnic_vic.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2010 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+
+#include "vnic_vic.h"
+
+struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, u8 *oui, u8 type)
+{
+ struct vic_provinfo *vp = kzalloc(VIC_PROVINFO_MAX_DATA, flags);
+
+ if (!vp || !oui)
+ return NULL;
+
+ memcpy(vp->oui, oui, sizeof(vp->oui));
+ vp->type = type;
+ vp->length = htonl(sizeof(vp->num_tlvs));
+
+ return vp;
+}
+
+void vic_provinfo_free(struct vic_provinfo *vp)
+{
+ kfree(vp);
+}
+
+int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
+ void *value)
+{
+ struct vic_provinfo_tlv *tlv;
+
+ if (!vp || !value)
+ return -EINVAL;
+
+ if (ntohl(vp->length) + sizeof(*tlv) + length >
+ VIC_PROVINFO_MAX_TLV_DATA)
+ return -ENOMEM;
+
+ tlv = (struct vic_provinfo_tlv *)((u8 *)vp->tlv +
+ ntohl(vp->length) - sizeof(vp->num_tlvs));
+
+ tlv->type = htons(type);
+ tlv->length = htons(length);
+ memcpy(tlv->value, value, length);
+
+ vp->num_tlvs = htonl(ntohl(vp->num_tlvs) + 1);
+ vp->length = htonl(ntohl(vp->length) + sizeof(*tlv) + length);
+
+ return 0;
+}
+
+size_t vic_provinfo_size(struct vic_provinfo *vp)
+{
+ return vp ? ntohl(vp->length) + sizeof(*vp) - sizeof(vp->num_tlvs) : 0;
+}
diff --git a/drivers/net/enic/vnic_vic.h b/drivers/net/enic/vnic_vic.h
new file mode 100644
index 00000000000..085c2a274cb
--- /dev/null
+++ b/drivers/net/enic/vnic_vic.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2010 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_VIC_H_
+#define _VNIC_VIC_H_
+
+/* Note: All integer fields in NETWORK byte order */
+
+/* Note: String field lengths include null char */
+
+#define VIC_PROVINFO_CISCO_OUI { 0x00, 0x00, 0x0c }
+#define VIC_PROVINFO_LINUX_TYPE 0x2
+
+enum vic_linux_prov_tlv_type {
+ VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR = 0,
+ VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR = 1, /* u8[6] */
+ VIC_LINUX_PROV_TLV_CLIENT_NAME_STR = 2,
+ VIC_LINUX_PROV_TLV_HOST_UUID_STR = 8,
+ VIC_LINUX_PROV_TLV_CLIENT_UUID_STR = 9,
+};
+
+struct vic_provinfo {
+ u8 oui[3]; /* OUI of data provider */
+ u8 type; /* provider-specific type */
+ u32 length; /* length of data below */
+ u32 num_tlvs; /* number of tlvs */
+ struct vic_provinfo_tlv {
+ u16 type;
+ u16 length;
+ u8 value[0];
+ } tlv[0];
+} __attribute__ ((packed));
+
+#define VIC_PROVINFO_MAX_DATA 1385
+#define VIC_PROVINFO_MAX_TLV_DATA (VIC_PROVINFO_MAX_DATA - \
+ sizeof(struct vic_provinfo))
+
+struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, u8 *oui, u8 type);
+void vic_provinfo_free(struct vic_provinfo *vp);
+int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
+ void *value);
+size_t vic_provinfo_size(struct vic_provinfo *vp);
+
+#endif /* _VNIC_VIC_H_ */
diff --git a/drivers/net/enic/vnic_wq.c b/drivers/net/enic/vnic_wq.c
index d5f984357f5..1378afbdfe6 100644
--- a/drivers/net/enic/vnic_wq.c
+++ b/drivers/net/enic/vnic_wq.c
@@ -161,10 +161,10 @@ int vnic_wq_disable(struct vnic_wq *wq)
iowrite32(0, &wq->ctrl->enable);
/* Wait for HW to ACK disable request */
- for (wait = 0; wait < 100; wait++) {
+ for (wait = 0; wait < 1000; wait++) {
if (!(ioread32(&wq->ctrl->running)))
return 0;
- udelay(1);
+ udelay(10);
}
printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index);
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 7a567201e82..6838dfc9ef2 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -652,7 +652,6 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
break;
}
- return;
}
@@ -840,7 +839,6 @@ static void epic_restart(struct net_device *dev)
" interrupt %4.4x.\n",
dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
(int)inl(ioaddr + INTSTAT));
- return;
}
static void check_media(struct net_device *dev)
@@ -908,7 +906,7 @@ static void epic_tx_timeout(struct net_device *dev)
outl(TxQueued, dev->base_addr + COMMAND);
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
ep->stats.tx_errors++;
if (!ep->tx_full)
netif_wake_queue(dev);
@@ -958,7 +956,6 @@ static void epic_init_ring(struct net_device *dev)
(i+1)*sizeof(struct epic_tx_desc);
}
ep->tx_ring[i-1].next = ep->tx_ring_dma;
- return;
}
static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -1006,7 +1003,6 @@ static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Trigger an immediate transmit demand. */
outl(TxQueued, dev->base_addr + COMMAND);
- dev->trans_start = jiffies;
if (debug > 4)
printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
"flag %2.2x Tx status %8.8x.\n",
@@ -1399,12 +1395,12 @@ static void set_rx_mode(struct net_device *dev)
outl(0x0004, ioaddr + RxCtrl);
return;
} else { /* Never executed, for now. */
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
unsigned int bit_nr =
- ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
+ ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
mc_filter[bit_nr >> 3] |= (1 << bit_nr);
}
}
@@ -1414,7 +1410,6 @@ static void set_rx_mode(struct net_device *dev)
outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
}
- return;
}
static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index b34a2ddeef4..dda2c7944da 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -288,7 +288,7 @@ static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return eql_s_master_cfg(dev, ifr->ifr_data);
default:
return -EOPNOTSUPP;
- };
+ }
}
/* queue->lock must be held */
diff --git a/drivers/net/es3210.c b/drivers/net/es3210.c
index 5569f2ffb62..0ba5e7b9058 100644
--- a/drivers/net/es3210.c
+++ b/drivers/net/es3210.c
@@ -319,8 +319,6 @@ static void es_reset_8390(struct net_device *dev)
ei_status.txing = 0;
outb(0x01, ioaddr + ES_RESET_PORT);
if (ei_debug > 1) printk("reset done\n");
-
- return;
}
/*
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index d4e24f08b3b..874973f558e 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -1027,7 +1027,7 @@ static void eth16i_timeout(struct net_device *dev)
inw(ioaddr + TX_STATUS_REG), (inb(ioaddr + TX_STATUS_REG) & TX_DONE) ?
"IRQ conflict" : "network cable problem");
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
/* Let's dump all registers */
if(eth16i_debug > 0) {
@@ -1047,7 +1047,7 @@ static void eth16i_timeout(struct net_device *dev)
}
dev->stats.tx_errors++;
eth16i_reset(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
netif_wake_queue(dev);
}
@@ -1109,7 +1109,6 @@ static netdev_tx_t eth16i_tx(struct sk_buff *skb, struct net_device *dev)
outb(TX_START | lp->tx_queue, ioaddr + TRANSMIT_START_REG);
lp->tx_queue = 0;
lp->tx_queue_len = 0;
- dev->trans_start = jiffies;
lp->tx_started = 1;
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index a8d92503226..6ed2df14ec8 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -174,6 +174,7 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
* @iobase: pointer to I/O memory region
* @membase: pointer to buffer memory region
* @dma_alloc: dma allocated buffer size
+ * @io_region_size: I/O memory region size
* @num_tx: number of send buffers
* @cur_tx: last send buffer written
* @dty_tx: last buffer actually sent
@@ -193,6 +194,7 @@ struct ethoc {
void __iomem *iobase;
void __iomem *membase;
int dma_alloc;
+ resource_size_t io_region_size;
unsigned int num_tx;
unsigned int cur_tx;
@@ -756,7 +758,7 @@ static void ethoc_set_multicast_list(struct net_device *dev)
{
struct ethoc *priv = netdev_priv(dev);
u32 mode = ethoc_read(priv, MODER);
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
u32 hash[2] = { 0, 0 };
/* set loopback mode if requested */
@@ -784,8 +786,8 @@ static void ethoc_set_multicast_list(struct net_device *dev)
hash[0] = 0xffffffff;
hash[1] = 0xffffffff;
} else {
- netdev_for_each_mc_addr(mc, dev) {
- u32 crc = ether_crc(ETH_ALEN, mc->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ u32 crc = ether_crc(ETH_ALEN, ha->addr);
int bit = (crc >> 26) & 0x3f;
hash[bit >> 5] |= 1 << (bit & 0x1f);
}
@@ -851,7 +853,6 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
}
- dev->trans_start = jiffies;
spin_unlock_irq(&priv->lock);
out:
dev_kfree_skb(skb);
@@ -944,6 +945,7 @@ static int ethoc_probe(struct platform_device *pdev)
priv = netdev_priv(netdev);
priv->netdev = netdev;
priv->dma_alloc = 0;
+ priv->io_region_size = mmio->end - mmio->start + 1;
priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
resource_size(mmio));
@@ -1040,7 +1042,6 @@ static int ethoc_probe(struct platform_device *pdev)
netdev->features |= 0;
/* setup NAPI */
- memset(&priv->napi, 0, sizeof(priv->napi));
netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
spin_lock_init(&priv->rx_lock);
@@ -1049,20 +1050,34 @@ static int ethoc_probe(struct platform_device *pdev)
ret = register_netdev(netdev);
if (ret < 0) {
dev_err(&netdev->dev, "failed to register interface\n");
- goto error;
+ goto error2;
}
goto out;
+error2:
+ netif_napi_del(&priv->napi);
error:
mdiobus_unregister(priv->mdio);
free_mdio:
kfree(priv->mdio->irq);
mdiobus_free(priv->mdio);
free:
- if (priv->dma_alloc)
- dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
- netdev->mem_start);
+ if (priv) {
+ if (priv->dma_alloc)
+ dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
+ netdev->mem_start);
+ else if (priv->membase)
+ devm_iounmap(&pdev->dev, priv->membase);
+ if (priv->iobase)
+ devm_iounmap(&pdev->dev, priv->iobase);
+ }
+ if (mem)
+ devm_release_mem_region(&pdev->dev, mem->start,
+ mem->end - mem->start + 1);
+ if (mmio)
+ devm_release_mem_region(&pdev->dev, mmio->start,
+ mmio->end - mmio->start + 1);
free_netdev(netdev);
out:
return ret;
@@ -1080,6 +1095,7 @@ static int ethoc_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
if (netdev) {
+ netif_napi_del(&priv->napi);
phy_disconnect(priv->phy);
priv->phy = NULL;
@@ -1091,6 +1107,14 @@ static int ethoc_remove(struct platform_device *pdev)
if (priv->dma_alloc)
dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
netdev->mem_start);
+ else {
+ devm_iounmap(&pdev->dev, priv->membase);
+ devm_release_mem_region(&pdev->dev, netdev->mem_start,
+ netdev->mem_end - netdev->mem_start + 1);
+ }
+ devm_iounmap(&pdev->dev, priv->iobase);
+ devm_release_mem_region(&pdev->dev, netdev->base_addr,
+ priv->io_region_size);
unregister_netdev(netdev);
free_netdev(netdev);
}
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c
index 91e59f3a9d6..380d0614a89 100644
--- a/drivers/net/ewrk3.c
+++ b/drivers/net/ewrk3.c
@@ -757,7 +757,7 @@ static void ewrk3_timeout(struct net_device *dev)
*/
ENABLE_IRQs;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
}
@@ -862,7 +862,6 @@ static netdev_tx_t ewrk3_queue_pkt(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irq (&lp->hw_lock);
dev->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
/* Check for free resources: stop Tx queue if there are none */
@@ -1169,7 +1168,7 @@ static void set_multicast_list(struct net_device *dev)
static void SetMulticastFilter(struct net_device *dev)
{
struct ewrk3_private *lp = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
u_long iobase = dev->base_addr;
int i;
char *addrs, bit, byte;
@@ -1213,8 +1212,8 @@ static void SetMulticastFilter(struct net_device *dev)
}
/* Update table */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if ((*addrs & 0x01) == 1) { /* multicast address? */
crc = ether_crc_le(ETH_ALEN, addrs);
hashcode = crc & ((1 << 9) - 1); /* hashcode is 9 LSb of CRC */
@@ -1370,8 +1369,6 @@ static void __init EthwrkSignature(char *name, char *eeprom_image)
name[EWRK3_STRLEN] = '\0';
} else
name[0] = '\0';
-
- return;
}
/*
@@ -1776,8 +1773,7 @@ static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
break;
case EWRK3_SET_MCA: /* Set a multicast address */
if (capable(CAP_NET_ADMIN)) {
- if (ioc->len > 1024)
- {
+ if (ioc->len > HASH_TABLE_LEN) {
status = -EINVAL;
break;
}
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index d11ae5197f0..15f4f8d3d46 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -1233,7 +1233,7 @@ static void fealnx_tx_timeout(struct net_device *dev)
spin_unlock_irqrestore(&np->lock, flags);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
np->stats.tx_errors++;
netif_wake_queue(dev); /* or .._start_.. ?? */
}
@@ -1374,7 +1374,6 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
++np->really_tx_count;
iowrite32(0, np->mem + TXPDR);
- dev->trans_start = jiffies;
spin_unlock_irqrestore(&np->lock, flags);
return NETDEV_TX_OK;
@@ -1791,12 +1790,12 @@ static void __set_rx_mode(struct net_device *dev)
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = CR_W_AB | CR_W_AM;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
unsigned int bit;
- bit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
+ bit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
mc_filter[bit >> 5] |= (1 << bit);
}
rx_mode = CR_W_AB | CR_W_AM;
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 9b4e8f797a7..ddf7a86cd46 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -40,6 +40,8 @@
#include <linux/irq.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/fec.h>
#include <asm/cacheflush.h>
@@ -61,7 +63,6 @@
* Define the fixed address of the FEC hardware.
*/
#if defined(CONFIG_M5272)
-#define HAVE_mii_link_interrupt
static unsigned char fec_mac_default[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -86,23 +87,6 @@ static unsigned char fec_mac_default[] = {
#endif
#endif /* CONFIG_M5272 */
-/* Forward declarations of some structures to support different PHYs */
-
-typedef struct {
- uint mii_data;
- void (*funct)(uint mii_reg, struct net_device *dev);
-} phy_cmd_t;
-
-typedef struct {
- uint id;
- char *name;
-
- const phy_cmd_t *config;
- const phy_cmd_t *startup;
- const phy_cmd_t *ack_int;
- const phy_cmd_t *shutdown;
-} phy_info_t;
-
/* The number of Tx and Rx buffers. These are allocated from the page
* pool. The code may assume these are power of two, so it it best
* to keep them that size.
@@ -189,29 +173,22 @@ struct fec_enet_private {
uint tx_full;
/* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
spinlock_t hw_lock;
- /* hold while accessing the mii_list_t() elements */
- spinlock_t mii_lock;
-
- uint phy_id;
- uint phy_id_done;
- uint phy_status;
- uint phy_speed;
- phy_info_t const *phy;
- struct work_struct phy_task;
- uint sequence_done;
- uint mii_phy_task_queued;
+ struct platform_device *pdev;
- uint phy_addr;
+ int opened;
+ /* Phylib and MDIO interface */
+ struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
+ int mii_timeout;
+ uint phy_speed;
+ phy_interface_t phy_interface;
int index;
- int opened;
int link;
- int old_link;
int full_duplex;
};
-static void fec_enet_mii(struct net_device *dev);
static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
static void fec_enet_tx(struct net_device *dev);
static void fec_enet_rx(struct net_device *dev);
@@ -219,67 +196,20 @@ static int fec_enet_close(struct net_device *dev);
static void fec_restart(struct net_device *dev, int duplex);
static void fec_stop(struct net_device *dev);
+/* FEC MII MMFR bits definition */
+#define FEC_MMFR_ST (1 << 30)
+#define FEC_MMFR_OP_READ (2 << 28)
+#define FEC_MMFR_OP_WRITE (1 << 28)
+#define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
+#define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
+#define FEC_MMFR_TA (2 << 16)
+#define FEC_MMFR_DATA(v) (v & 0xffff)
-/* MII processing. We keep this as simple as possible. Requests are
- * placed on the list (if there is room). When the request is finished
- * by the MII, an optional function may be called.
- */
-typedef struct mii_list {
- uint mii_regval;
- void (*mii_func)(uint val, struct net_device *dev);
- struct mii_list *mii_next;
-} mii_list_t;
-
-#define NMII 20
-static mii_list_t mii_cmds[NMII];
-static mii_list_t *mii_free;
-static mii_list_t *mii_head;
-static mii_list_t *mii_tail;
-
-static int mii_queue(struct net_device *dev, int request,
- void (*func)(uint, struct net_device *));
-
-/* Make MII read/write commands for the FEC */
-#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
-#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \
- (VAL & 0xffff))
-#define mk_mii_end 0
+#define FEC_MII_TIMEOUT 10000
/* Transmitter timeout */
#define TX_TIMEOUT (2 * HZ)
-/* Register definitions for the PHY */
-
-#define MII_REG_CR 0 /* Control Register */
-#define MII_REG_SR 1 /* Status Register */
-#define MII_REG_PHYIR1 2 /* PHY Identification Register 1 */
-#define MII_REG_PHYIR2 3 /* PHY Identification Register 2 */
-#define MII_REG_ANAR 4 /* A-N Advertisement Register */
-#define MII_REG_ANLPAR 5 /* A-N Link Partner Ability Register */
-#define MII_REG_ANER 6 /* A-N Expansion Register */
-#define MII_REG_ANNPTR 7 /* A-N Next Page Transmit Register */
-#define MII_REG_ANLPRNPR 8 /* A-N Link Partner Received Next Page Reg. */
-
-/* values for phy_status */
-
-#define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */
-#define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */
-#define PHY_CONF_SPMASK 0x00f0 /* mask for speed */
-#define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */
-#define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */
-#define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */
-#define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */
-
-#define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */
-#define PHY_STAT_FAULT 0x0200 /* 1 remote fault */
-#define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */
-#define PHY_STAT_SPMASK 0xf000 /* mask for speed */
-#define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */
-#define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */
-#define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */
-#define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */
-
-
static int
fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
@@ -347,8 +277,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
| BD_ENET_TX_LAST | BD_ENET_TX_TC);
bdp->cbd_sc = status;
- dev->trans_start = jiffies;
-
/* Trigger transmission start */
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
@@ -406,12 +334,6 @@ fec_enet_interrupt(int irq, void * dev_id)
ret = IRQ_HANDLED;
fec_enet_tx(dev);
}
-
- if (int_events & FEC_ENET_MII) {
- ret = IRQ_HANDLED;
- fec_enet_mii(dev);
- }
-
} while (int_events);
return ret;
@@ -607,827 +529,313 @@ rx_processing_done:
spin_unlock(&fep->hw_lock);
}
-/* called from interrupt context */
-static void
-fec_enet_mii(struct net_device *dev)
-{
- struct fec_enet_private *fep;
- mii_list_t *mip;
-
- fep = netdev_priv(dev);
- spin_lock(&fep->mii_lock);
-
- if ((mip = mii_head) == NULL) {
- printk("MII and no head!\n");
- goto unlock;
- }
-
- if (mip->mii_func != NULL)
- (*(mip->mii_func))(readl(fep->hwp + FEC_MII_DATA), dev);
-
- mii_head = mip->mii_next;
- mip->mii_next = mii_free;
- mii_free = mip;
-
- if ((mip = mii_head) != NULL)
- writel(mip->mii_regval, fep->hwp + FEC_MII_DATA);
-
-unlock:
- spin_unlock(&fep->mii_lock);
-}
-
-static int
-mii_queue_unlocked(struct net_device *dev, int regval,
- void (*func)(uint, struct net_device *))
+/* ------------------------------------------------------------------------- */
+#ifdef CONFIG_M5272
+static void __inline__ fec_get_mac(struct net_device *dev)
{
- struct fec_enet_private *fep;
- mii_list_t *mip;
- int retval;
-
- /* Add PHY address to register command */
- fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(dev);
+ unsigned char *iap, tmpaddr[ETH_ALEN];
- regval |= fep->phy_addr << 23;
- retval = 0;
-
- if ((mip = mii_free) != NULL) {
- mii_free = mip->mii_next;
- mip->mii_regval = regval;
- mip->mii_func = func;
- mip->mii_next = NULL;
- if (mii_head) {
- mii_tail->mii_next = mip;
- mii_tail = mip;
- } else {
- mii_head = mii_tail = mip;
- writel(regval, fep->hwp + FEC_MII_DATA);
- }
+ if (FEC_FLASHMAC) {
+ /*
+ * Get MAC address from FLASH.
+ * If it is all 1's or 0's, use the default.
+ */
+ iap = (unsigned char *)FEC_FLASHMAC;
+ if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
+ (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
+ iap = fec_mac_default;
+ if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
+ (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
+ iap = fec_mac_default;
} else {
- retval = 1;
+ *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW);
+ *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
+ iap = &tmpaddr[0];
}
- return retval;
-}
-
-static int
-mii_queue(struct net_device *dev, int regval,
- void (*func)(uint, struct net_device *))
-{
- struct fec_enet_private *fep;
- unsigned long flags;
- int retval;
- fep = netdev_priv(dev);
- spin_lock_irqsave(&fep->mii_lock, flags);
- retval = mii_queue_unlocked(dev, regval, func);
- spin_unlock_irqrestore(&fep->mii_lock, flags);
- return retval;
-}
-
-static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c)
-{
- if(!c)
- return;
+ memcpy(dev->dev_addr, iap, ETH_ALEN);
- for (; c->mii_data != mk_mii_end; c++)
- mii_queue(dev, c->mii_data, c->funct);
+ /* Adjust MAC if using default MAC address */
+ if (iap == fec_mac_default)
+ dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
}
+#endif
-static void mii_parse_sr(uint mii_reg, struct net_device *dev)
-{
- struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
- uint status;
-
- status = *s & ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC);
-
- if (mii_reg & 0x0004)
- status |= PHY_STAT_LINK;
- if (mii_reg & 0x0010)
- status |= PHY_STAT_FAULT;
- if (mii_reg & 0x0020)
- status |= PHY_STAT_ANC;
- *s = status;
-}
+/* ------------------------------------------------------------------------- */
-static void mii_parse_cr(uint mii_reg, struct net_device *dev)
+/*
+ * Phy section
+ */
+static void fec_enet_adjust_link(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
- uint status;
-
- status = *s & ~(PHY_CONF_ANE | PHY_CONF_LOOP);
-
- if (mii_reg & 0x1000)
- status |= PHY_CONF_ANE;
- if (mii_reg & 0x4000)
- status |= PHY_CONF_LOOP;
- *s = status;
-}
+ struct phy_device *phy_dev = fep->phy_dev;
+ unsigned long flags;
-static void mii_parse_anar(uint mii_reg, struct net_device *dev)
-{
- struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
- uint status;
-
- status = *s & ~(PHY_CONF_SPMASK);
-
- if (mii_reg & 0x0020)
- status |= PHY_CONF_10HDX;
- if (mii_reg & 0x0040)
- status |= PHY_CONF_10FDX;
- if (mii_reg & 0x0080)
- status |= PHY_CONF_100HDX;
- if (mii_reg & 0x00100)
- status |= PHY_CONF_100FDX;
- *s = status;
-}
+ int status_change = 0;
-/* ------------------------------------------------------------------------- */
-/* The Level one LXT970 is used by many boards */
+ spin_lock_irqsave(&fep->hw_lock, flags);
-#define MII_LXT970_MIRROR 16 /* Mirror register */
-#define MII_LXT970_IER 17 /* Interrupt Enable Register */
-#define MII_LXT970_ISR 18 /* Interrupt Status Register */
-#define MII_LXT970_CONFIG 19 /* Configuration Register */
-#define MII_LXT970_CSR 20 /* Chip Status Register */
+ /* Prevent a state halted on mii error */
+ if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
+ phy_dev->state = PHY_RESUMING;
+ goto spin_unlock;
+ }
-static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev)
-{
- struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
- uint status;
+ /* Duplex link change */
+ if (phy_dev->link) {
+ if (fep->full_duplex != phy_dev->duplex) {
+ fec_restart(dev, phy_dev->duplex);
+ status_change = 1;
+ }
+ }
- status = *s & ~(PHY_STAT_SPMASK);
- if (mii_reg & 0x0800) {
- if (mii_reg & 0x1000)
- status |= PHY_STAT_100FDX;
- else
- status |= PHY_STAT_100HDX;
- } else {
- if (mii_reg & 0x1000)
- status |= PHY_STAT_10FDX;
+ /* Link on or off change */
+ if (phy_dev->link != fep->link) {
+ fep->link = phy_dev->link;
+ if (phy_dev->link)
+ fec_restart(dev, phy_dev->duplex);
else
- status |= PHY_STAT_10HDX;
+ fec_stop(dev);
+ status_change = 1;
}
- *s = status;
-}
-static phy_cmd_t const phy_cmd_lxt970_config[] = {
- { mk_mii_read(MII_REG_CR), mii_parse_cr },
- { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_lxt970_startup[] = { /* enable interrupts */
- { mk_mii_write(MII_LXT970_IER, 0x0002), NULL },
- { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_lxt970_ack_int[] = {
- /* read SR and ISR to acknowledge */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_read(MII_LXT970_ISR), NULL },
-
- /* find out the current status */
- { mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_lxt970_shutdown[] = { /* disable interrupts */
- { mk_mii_write(MII_LXT970_IER, 0x0000), NULL },
- { mk_mii_end, }
- };
-static phy_info_t const phy_info_lxt970 = {
- .id = 0x07810000,
- .name = "LXT970",
- .config = phy_cmd_lxt970_config,
- .startup = phy_cmd_lxt970_startup,
- .ack_int = phy_cmd_lxt970_ack_int,
- .shutdown = phy_cmd_lxt970_shutdown
-};
-
-/* ------------------------------------------------------------------------- */
-/* The Level one LXT971 is used on some of my custom boards */
-
-/* register definitions for the 971 */
+spin_unlock:
+ spin_unlock_irqrestore(&fep->hw_lock, flags);
-#define MII_LXT971_PCR 16 /* Port Control Register */
-#define MII_LXT971_SR2 17 /* Status Register 2 */
-#define MII_LXT971_IER 18 /* Interrupt Enable Register */
-#define MII_LXT971_ISR 19 /* Interrupt Status Register */
-#define MII_LXT971_LCR 20 /* LED Control Register */
-#define MII_LXT971_TCR 30 /* Transmit Control Register */
+ if (status_change)
+ phy_print_status(phy_dev);
+}
/*
- * I had some nice ideas of running the MDIO faster...
- * The 971 should support 8MHz and I tried it, but things acted really
- * weird, so 2.5 MHz ought to be enough for anyone...
+ * NOTE: a MII transaction is during around 25 us, so polling it...
*/
-
-static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev)
+static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
- struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
- uint status;
+ struct fec_enet_private *fep = bus->priv;
+ int timeout = FEC_MII_TIMEOUT;
- status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC);
+ fep->mii_timeout = 0;
- if (mii_reg & 0x0400) {
- fep->link = 1;
- status |= PHY_STAT_LINK;
- } else {
- fep->link = 0;
- }
- if (mii_reg & 0x0080)
- status |= PHY_STAT_ANC;
- if (mii_reg & 0x4000) {
- if (mii_reg & 0x0200)
- status |= PHY_STAT_100FDX;
- else
- status |= PHY_STAT_100HDX;
- } else {
- if (mii_reg & 0x0200)
- status |= PHY_STAT_10FDX;
- else
- status |= PHY_STAT_10HDX;
+ /* clear MII end of transfer bit*/
+ writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
+
+ /* start a read op */
+ writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
+ FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ while (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_MII)) {
+ cpu_relax();
+ if (timeout-- < 0) {
+ fep->mii_timeout = 1;
+ printk(KERN_ERR "FEC: MDIO read timeout\n");
+ return -ETIMEDOUT;
+ }
}
- if (mii_reg & 0x0008)
- status |= PHY_STAT_FAULT;
- *s = status;
+ /* return value */
+ return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
}
-static phy_cmd_t const phy_cmd_lxt971_config[] = {
- /* limit to 10MBit because my prototype board
- * doesn't work with 100. */
- { mk_mii_read(MII_REG_CR), mii_parse_cr },
- { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
- { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_lxt971_startup[] = { /* enable interrupts */
- { mk_mii_write(MII_LXT971_IER, 0x00f2), NULL },
- { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
- { mk_mii_write(MII_LXT971_LCR, 0xd422), NULL }, /* LED config */
- /* Somehow does the 971 tell me that the link is down
- * the first read after power-up.
- * read here to get a valid value in ack_int */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_lxt971_ack_int[] = {
- /* acknowledge the int before reading status ! */
- { mk_mii_read(MII_LXT971_ISR), NULL },
- /* find out the current status */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_lxt971_shutdown[] = { /* disable interrupts */
- { mk_mii_write(MII_LXT971_IER, 0x0000), NULL },
- { mk_mii_end, }
- };
-static phy_info_t const phy_info_lxt971 = {
- .id = 0x0001378e,
- .name = "LXT971",
- .config = phy_cmd_lxt971_config,
- .startup = phy_cmd_lxt971_startup,
- .ack_int = phy_cmd_lxt971_ack_int,
- .shutdown = phy_cmd_lxt971_shutdown
-};
-
-/* ------------------------------------------------------------------------- */
-/* The Quality Semiconductor QS6612 is used on the RPX CLLF */
-
-/* register definitions */
+static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
+{
+ struct fec_enet_private *fep = bus->priv;
+ int timeout = FEC_MII_TIMEOUT;
-#define MII_QS6612_MCR 17 /* Mode Control Register */
-#define MII_QS6612_FTR 27 /* Factory Test Register */
-#define MII_QS6612_MCO 28 /* Misc. Control Register */
-#define MII_QS6612_ISR 29 /* Interrupt Source Register */
-#define MII_QS6612_IMR 30 /* Interrupt Mask Register */
-#define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */
+ fep->mii_timeout = 0;
-static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev)
-{
- struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
- uint status;
+ /* clear MII end of transfer bit*/
+ writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
- status = *s & ~(PHY_STAT_SPMASK);
+ /* start a read op */
+ writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
+ FEC_MMFR_TA | FEC_MMFR_DATA(value),
+ fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ while (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_MII)) {
+ cpu_relax();
+ if (timeout-- < 0) {
+ fep->mii_timeout = 1;
+ printk(KERN_ERR "FEC: MDIO write timeout\n");
+ return -ETIMEDOUT;
+ }
+ }
- switch((mii_reg >> 2) & 7) {
- case 1: status |= PHY_STAT_10HDX; break;
- case 2: status |= PHY_STAT_100HDX; break;
- case 5: status |= PHY_STAT_10FDX; break;
- case 6: status |= PHY_STAT_100FDX; break;
+ return 0;
}
- *s = status;
+static int fec_enet_mdio_reset(struct mii_bus *bus)
+{
+ return 0;
}
-static phy_cmd_t const phy_cmd_qs6612_config[] = {
- /* The PHY powers up isolated on the RPX,
- * so send a command to allow operation.
- */
- { mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL },
-
- /* parse cr and anar to get some info */
- { mk_mii_read(MII_REG_CR), mii_parse_cr },
- { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_qs6612_startup[] = { /* enable interrupts */
- { mk_mii_write(MII_QS6612_IMR, 0x003a), NULL },
- { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_qs6612_ack_int[] = {
- /* we need to read ISR, SR and ANER to acknowledge */
- { mk_mii_read(MII_QS6612_ISR), NULL },
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_read(MII_REG_ANER), NULL },
-
- /* read pcr to get info */
- { mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_qs6612_shutdown[] = { /* disable interrupts */
- { mk_mii_write(MII_QS6612_IMR, 0x0000), NULL },
- { mk_mii_end, }
- };
-static phy_info_t const phy_info_qs6612 = {
- .id = 0x00181440,
- .name = "QS6612",
- .config = phy_cmd_qs6612_config,
- .startup = phy_cmd_qs6612_startup,
- .ack_int = phy_cmd_qs6612_ack_int,
- .shutdown = phy_cmd_qs6612_shutdown
-};
-
-/* ------------------------------------------------------------------------- */
-/* AMD AM79C874 phy */
-
-/* register definitions for the 874 */
-
-#define MII_AM79C874_MFR 16 /* Miscellaneous Feature Register */
-#define MII_AM79C874_ICSR 17 /* Interrupt/Status Register */
-#define MII_AM79C874_DR 18 /* Diagnostic Register */
-#define MII_AM79C874_PMLR 19 /* Power and Loopback Register */
-#define MII_AM79C874_MCR 21 /* ModeControl Register */
-#define MII_AM79C874_DC 23 /* Disconnect Counter */
-#define MII_AM79C874_REC 24 /* Recieve Error Counter */
-
-static void mii_parse_am79c874_dr(uint mii_reg, struct net_device *dev)
+static int fec_enet_mii_probe(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
- uint status;
-
- status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_ANC);
-
- if (mii_reg & 0x0080)
- status |= PHY_STAT_ANC;
- if (mii_reg & 0x0400)
- status |= ((mii_reg & 0x0800) ? PHY_STAT_100FDX : PHY_STAT_100HDX);
- else
- status |= ((mii_reg & 0x0800) ? PHY_STAT_10FDX : PHY_STAT_10HDX);
-
- *s = status;
-}
-
-static phy_cmd_t const phy_cmd_am79c874_config[] = {
- { mk_mii_read(MII_REG_CR), mii_parse_cr },
- { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
- { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_am79c874_startup[] = { /* enable interrupts */
- { mk_mii_write(MII_AM79C874_ICSR, 0xff00), NULL },
- { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_am79c874_ack_int[] = {
- /* find out the current status */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr },
- /* we only need to read ISR to acknowledge */
- { mk_mii_read(MII_AM79C874_ICSR), NULL },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_am79c874_shutdown[] = { /* disable interrupts */
- { mk_mii_write(MII_AM79C874_ICSR, 0x0000), NULL },
- { mk_mii_end, }
- };
-static phy_info_t const phy_info_am79c874 = {
- .id = 0x00022561,
- .name = "AM79C874",
- .config = phy_cmd_am79c874_config,
- .startup = phy_cmd_am79c874_startup,
- .ack_int = phy_cmd_am79c874_ack_int,
- .shutdown = phy_cmd_am79c874_shutdown
-};
+ struct phy_device *phy_dev = NULL;
+ int phy_addr;
+ fep->phy_dev = NULL;
-/* ------------------------------------------------------------------------- */
-/* Kendin KS8721BL phy */
-
-/* register definitions for the 8721 */
-
-#define MII_KS8721BL_RXERCR 21
-#define MII_KS8721BL_ICSR 27
-#define MII_KS8721BL_PHYCR 31
-
-static phy_cmd_t const phy_cmd_ks8721bl_config[] = {
- { mk_mii_read(MII_REG_CR), mii_parse_cr },
- { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_ks8721bl_startup[] = { /* enable interrupts */
- { mk_mii_write(MII_KS8721BL_ICSR, 0xff00), NULL },
- { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_ks8721bl_ack_int[] = {
- /* find out the current status */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- /* we only need to read ISR to acknowledge */
- { mk_mii_read(MII_KS8721BL_ICSR), NULL },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_ks8721bl_shutdown[] = { /* disable interrupts */
- { mk_mii_write(MII_KS8721BL_ICSR, 0x0000), NULL },
- { mk_mii_end, }
- };
-static phy_info_t const phy_info_ks8721bl = {
- .id = 0x00022161,
- .name = "KS8721BL",
- .config = phy_cmd_ks8721bl_config,
- .startup = phy_cmd_ks8721bl_startup,
- .ack_int = phy_cmd_ks8721bl_ack_int,
- .shutdown = phy_cmd_ks8721bl_shutdown
-};
-
-/* ------------------------------------------------------------------------- */
-/* register definitions for the DP83848 */
-
-#define MII_DP8384X_PHYSTST 16 /* PHY Status Register */
-
-static void mii_parse_dp8384x_sr2(uint mii_reg, struct net_device *dev)
-{
- struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
-
- *s &= ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC);
-
- /* Link up */
- if (mii_reg & 0x0001) {
- fep->link = 1;
- *s |= PHY_STAT_LINK;
- } else
- fep->link = 0;
- /* Status of link */
- if (mii_reg & 0x0010) /* Autonegotioation complete */
- *s |= PHY_STAT_ANC;
- if (mii_reg & 0x0002) { /* 10MBps? */
- if (mii_reg & 0x0004) /* Full Duplex? */
- *s |= PHY_STAT_10FDX;
- else
- *s |= PHY_STAT_10HDX;
- } else { /* 100 Mbps? */
- if (mii_reg & 0x0004) /* Full Duplex? */
- *s |= PHY_STAT_100FDX;
- else
- *s |= PHY_STAT_100HDX;
+ /* find the first phy */
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
+ if (fep->mii_bus->phy_map[phy_addr]) {
+ phy_dev = fep->mii_bus->phy_map[phy_addr];
+ break;
+ }
}
- if (mii_reg & 0x0008)
- *s |= PHY_STAT_FAULT;
-}
-static phy_info_t phy_info_dp83848= {
- 0x020005c9,
- "DP83848",
-
- (const phy_cmd_t []) { /* config */
- { mk_mii_read(MII_REG_CR), mii_parse_cr },
- { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
- { mk_mii_read(MII_DP8384X_PHYSTST), mii_parse_dp8384x_sr2 },
- { mk_mii_end, }
- },
- (const phy_cmd_t []) { /* startup - enable interrupts */
- { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_end, }
- },
- (const phy_cmd_t []) { /* ack_int - never happens, no interrupt */
- { mk_mii_end, }
- },
- (const phy_cmd_t []) { /* shutdown */
- { mk_mii_end, }
- },
-};
+ if (!phy_dev) {
+ printk(KERN_ERR "%s: no PHY found\n", dev->name);
+ return -ENODEV;
+ }
-static phy_info_t phy_info_lan8700 = {
- 0x0007C0C,
- "LAN8700",
- (const phy_cmd_t []) { /* config */
- { mk_mii_read(MII_REG_CR), mii_parse_cr },
- { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
- { mk_mii_end, }
- },
- (const phy_cmd_t []) { /* startup */
- { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_end, }
- },
- (const phy_cmd_t []) { /* act_int */
- { mk_mii_end, }
- },
- (const phy_cmd_t []) { /* shutdown */
- { mk_mii_end, }
- },
-};
-/* ------------------------------------------------------------------------- */
+ /* attach the mac to the phy */
+ phy_dev = phy_connect(dev, dev_name(&phy_dev->dev),
+ &fec_enet_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(phy_dev)) {
+ printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ return PTR_ERR(phy_dev);
+ }
-static phy_info_t const * const phy_info[] = {
- &phy_info_lxt970,
- &phy_info_lxt971,
- &phy_info_qs6612,
- &phy_info_am79c874,
- &phy_info_ks8721bl,
- &phy_info_dp83848,
- &phy_info_lan8700,
- NULL
-};
+ /* mask with MAC supported features */
+ phy_dev->supported &= PHY_BASIC_FEATURES;
+ phy_dev->advertising = phy_dev->supported;
-/* ------------------------------------------------------------------------- */
-#ifdef HAVE_mii_link_interrupt
-static irqreturn_t
-mii_link_interrupt(int irq, void * dev_id);
+ fep->phy_dev = phy_dev;
+ fep->link = 0;
+ fep->full_duplex = 0;
-/*
- * This is specific to the MII interrupt setup of the M5272EVB.
- */
-static void __inline__ fec_request_mii_intr(struct net_device *dev)
-{
- if (request_irq(66, mii_link_interrupt, IRQF_DISABLED, "fec(MII)", dev) != 0)
- printk("FEC: Could not allocate fec(MII) IRQ(66)!\n");
-}
+ printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
+ "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name,
+ fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
+ fep->phy_dev->irq);
-static void __inline__ fec_disable_phy_intr(struct net_device *dev)
-{
- free_irq(66, dev);
+ return 0;
}
-#endif
-#ifdef CONFIG_M5272
-static void __inline__ fec_get_mac(struct net_device *dev)
+static int fec_enet_mii_init(struct platform_device *pdev)
{
+ struct net_device *dev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(dev);
- unsigned char *iap, tmpaddr[ETH_ALEN];
+ int err = -ENXIO, i;
- if (FEC_FLASHMAC) {
- /*
- * Get MAC address from FLASH.
- * If it is all 1's or 0's, use the default.
- */
- iap = (unsigned char *)FEC_FLASHMAC;
- if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
- (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
- iap = fec_mac_default;
- if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
- (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
- iap = fec_mac_default;
- } else {
- *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW);
- *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
- iap = &tmpaddr[0];
- }
+ fep->mii_timeout = 0;
- memcpy(dev->dev_addr, iap, ETH_ALEN);
-
- /* Adjust MAC if using default MAC address */
- if (iap == fec_mac_default)
- dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
-}
-#endif
-
-/* ------------------------------------------------------------------------- */
-
-static void mii_display_status(struct net_device *dev)
-{
- struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
+ /*
+ * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
+ */
+ fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk), 5000000) << 1;
+ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
- if (!fep->link && !fep->old_link) {
- /* Link is still down - don't print anything */
- return;
+ fep->mii_bus = mdiobus_alloc();
+ if (fep->mii_bus == NULL) {
+ err = -ENOMEM;
+ goto err_out;
}
- printk("%s: status: ", dev->name);
-
- if (!fep->link) {
- printk("link down");
- } else {
- printk("link up");
-
- switch(*s & PHY_STAT_SPMASK) {
- case PHY_STAT_100FDX: printk(", 100MBit Full Duplex"); break;
- case PHY_STAT_100HDX: printk(", 100MBit Half Duplex"); break;
- case PHY_STAT_10FDX: printk(", 10MBit Full Duplex"); break;
- case PHY_STAT_10HDX: printk(", 10MBit Half Duplex"); break;
- default:
- printk(", Unknown speed/duplex");
- }
-
- if (*s & PHY_STAT_ANC)
- printk(", auto-negotiation complete");
+ fep->mii_bus->name = "fec_enet_mii_bus";
+ fep->mii_bus->read = fec_enet_mdio_read;
+ fep->mii_bus->write = fec_enet_mdio_write;
+ fep->mii_bus->reset = fec_enet_mdio_reset;
+ snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
+ fep->mii_bus->priv = fep;
+ fep->mii_bus->parent = &pdev->dev;
+
+ fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!fep->mii_bus->irq) {
+ err = -ENOMEM;
+ goto err_out_free_mdiobus;
}
- if (*s & PHY_STAT_FAULT)
- printk(", remote fault");
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ fep->mii_bus->irq[i] = PHY_POLL;
- printk(".\n");
-}
-
-static void mii_display_config(struct work_struct *work)
-{
- struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task);
- struct net_device *dev = fep->netdev;
- uint status = fep->phy_status;
+ platform_set_drvdata(dev, fep->mii_bus);
- /*
- ** When we get here, phy_task is already removed from
- ** the workqueue. It is thus safe to allow to reuse it.
- */
- fep->mii_phy_task_queued = 0;
- printk("%s: config: auto-negotiation ", dev->name);
-
- if (status & PHY_CONF_ANE)
- printk("on");
- else
- printk("off");
+ if (mdiobus_register(fep->mii_bus))
+ goto err_out_free_mdio_irq;
- if (status & PHY_CONF_100FDX)
- printk(", 100FDX");
- if (status & PHY_CONF_100HDX)
- printk(", 100HDX");
- if (status & PHY_CONF_10FDX)
- printk(", 10FDX");
- if (status & PHY_CONF_10HDX)
- printk(", 10HDX");
- if (!(status & PHY_CONF_SPMASK))
- printk(", No speed/duplex selected?");
-
- if (status & PHY_CONF_LOOP)
- printk(", loopback enabled");
-
- printk(".\n");
+ return 0;
- fep->sequence_done = 1;
+err_out_free_mdio_irq:
+ kfree(fep->mii_bus->irq);
+err_out_free_mdiobus:
+ mdiobus_free(fep->mii_bus);
+err_out:
+ return err;
}
-static void mii_relink(struct work_struct *work)
+static void fec_enet_mii_remove(struct fec_enet_private *fep)
{
- struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task);
- struct net_device *dev = fep->netdev;
- int duplex;
-
- /*
- ** When we get here, phy_task is already removed from
- ** the workqueue. It is thus safe to allow to reuse it.
- */
- fep->mii_phy_task_queued = 0;
- fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
- mii_display_status(dev);
- fep->old_link = fep->link;
-
- if (fep->link) {
- duplex = 0;
- if (fep->phy_status
- & (PHY_STAT_100FDX | PHY_STAT_10FDX))
- duplex = 1;
- fec_restart(dev, duplex);
- } else
- fec_stop(dev);
+ if (fep->phy_dev)
+ phy_disconnect(fep->phy_dev);
+ mdiobus_unregister(fep->mii_bus);
+ kfree(fep->mii_bus->irq);
+ mdiobus_free(fep->mii_bus);
}
-/* mii_queue_relink is called in interrupt context from mii_link_interrupt */
-static void mii_queue_relink(uint mii_reg, struct net_device *dev)
+static int fec_enet_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
{
struct fec_enet_private *fep = netdev_priv(dev);
+ struct phy_device *phydev = fep->phy_dev;
- /*
- * We cannot queue phy_task twice in the workqueue. It
- * would cause an endless loop in the workqueue.
- * Fortunately, if the last mii_relink entry has not yet been
- * executed now, it will do the job for the current interrupt,
- * which is just what we want.
- */
- if (fep->mii_phy_task_queued)
- return;
+ if (!phydev)
+ return -ENODEV;
- fep->mii_phy_task_queued = 1;
- INIT_WORK(&fep->phy_task, mii_relink);
- schedule_work(&fep->phy_task);
+ return phy_ethtool_gset(phydev, cmd);
}
-/* mii_queue_config is called in interrupt context from fec_enet_mii */
-static void mii_queue_config(uint mii_reg, struct net_device *dev)
+static int fec_enet_set_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
{
struct fec_enet_private *fep = netdev_priv(dev);
+ struct phy_device *phydev = fep->phy_dev;
- if (fep->mii_phy_task_queued)
- return;
+ if (!phydev)
+ return -ENODEV;
- fep->mii_phy_task_queued = 1;
- INIT_WORK(&fep->phy_task, mii_display_config);
- schedule_work(&fep->phy_task);
+ return phy_ethtool_sset(phydev, cmd);
}
-phy_cmd_t const phy_cmd_relink[] = {
- { mk_mii_read(MII_REG_CR), mii_queue_relink },
- { mk_mii_end, }
- };
-phy_cmd_t const phy_cmd_config[] = {
- { mk_mii_read(MII_REG_CR), mii_queue_config },
- { mk_mii_end, }
- };
-
-/* Read remainder of PHY ID. */
-static void
-mii_discover_phy3(uint mii_reg, struct net_device *dev)
+static void fec_enet_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
{
- struct fec_enet_private *fep;
- int i;
-
- fep = netdev_priv(dev);
- fep->phy_id |= (mii_reg & 0xffff);
- printk("fec: PHY @ 0x%x, ID 0x%08x", fep->phy_addr, fep->phy_id);
-
- for(i = 0; phy_info[i]; i++) {
- if(phy_info[i]->id == (fep->phy_id >> 4))
- break;
- }
-
- if (phy_info[i])
- printk(" -- %s\n", phy_info[i]->name);
- else
- printk(" -- unknown PHY!\n");
+ struct fec_enet_private *fep = netdev_priv(dev);
- fep->phy = phy_info[i];
- fep->phy_id_done = 1;
+ strcpy(info->driver, fep->pdev->dev.driver->name);
+ strcpy(info->version, "Revision: 1.0");
+ strcpy(info->bus_info, dev_name(&dev->dev));
}
-/* Scan all of the MII PHY addresses looking for someone to respond
- * with a valid ID. This usually happens quickly.
- */
-static void
-mii_discover_phy(uint mii_reg, struct net_device *dev)
-{
- struct fec_enet_private *fep;
- uint phytype;
-
- fep = netdev_priv(dev);
-
- if (fep->phy_addr < 32) {
- if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) {
-
- /* Got first part of ID, now get remainder */
- fep->phy_id = phytype << 16;
- mii_queue_unlocked(dev, mk_mii_read(MII_REG_PHYIR2),
- mii_discover_phy3);
- } else {
- fep->phy_addr++;
- mii_queue_unlocked(dev, mk_mii_read(MII_REG_PHYIR1),
- mii_discover_phy);
- }
- } else {
- printk("FEC: No PHY device found.\n");
- /* Disable external MII interface */
- writel(0, fep->hwp + FEC_MII_SPEED);
- fep->phy_speed = 0;
-#ifdef HAVE_mii_link_interrupt
- fec_disable_phy_intr(dev);
-#endif
- }
-}
+static struct ethtool_ops fec_enet_ethtool_ops = {
+ .get_settings = fec_enet_get_settings,
+ .set_settings = fec_enet_set_settings,
+ .get_drvinfo = fec_enet_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
-/* This interrupt occurs when the PHY detects a link change */
-#ifdef HAVE_mii_link_interrupt
-static irqreturn_t
-mii_link_interrupt(int irq, void * dev_id)
+static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct net_device *dev = dev_id;
struct fec_enet_private *fep = netdev_priv(dev);
+ struct phy_device *phydev = fep->phy_dev;
- mii_do_cmd(dev, fep->phy->ack_int);
- mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */
+ if (!netif_running(dev))
+ return -EINVAL;
- return IRQ_HANDLED;
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_mii_ioctl(phydev, if_mii(rq), cmd);
}
-#endif
static void fec_enet_free_buffers(struct net_device *dev)
{
@@ -1509,35 +917,13 @@ fec_enet_open(struct net_device *dev)
if (ret)
return ret;
- fep->sequence_done = 0;
- fep->link = 0;
-
- fec_restart(dev, 1);
-
- if (fep->phy) {
- mii_do_cmd(dev, fep->phy->ack_int);
- mii_do_cmd(dev, fep->phy->config);
- mii_do_cmd(dev, phy_cmd_config); /* display configuration */
-
- /* Poll until the PHY tells us its configuration
- * (not link state).
- * Request is initiated by mii_do_cmd above, but answer
- * comes by interrupt.
- * This should take about 25 usec per register at 2.5 MHz,
- * and we read approximately 5 registers.
- */
- while(!fep->sequence_done)
- schedule();
-
- mii_do_cmd(dev, fep->phy->startup);
+ /* Probe and connect to PHY when open the interface */
+ ret = fec_enet_mii_probe(dev);
+ if (ret) {
+ fec_enet_free_buffers(dev);
+ return ret;
}
-
- /* Set the initial link state to true. A lot of hardware
- * based on this device does not implement a PHY interrupt,
- * so we are never notified of link change.
- */
- fep->link = 1;
-
+ phy_start(fep->phy_dev);
netif_start_queue(dev);
fep->opened = 1;
return 0;
@@ -1553,6 +939,9 @@ fec_enet_close(struct net_device *dev)
netif_stop_queue(dev);
fec_stop(dev);
+ if (fep->phy_dev)
+ phy_disconnect(fep->phy_dev);
+
fec_enet_free_buffers(dev);
return 0;
@@ -1574,7 +963,7 @@ fec_enet_close(struct net_device *dev)
static void set_multicast_list(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned int i, bit, data, crc, tmp;
unsigned char hash;
@@ -1604,16 +993,16 @@ static void set_multicast_list(struct net_device *dev)
writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
/* Only support group multicast for now */
- if (!(dmi->dmi_addr[0] & 1))
+ if (!(ha->addr[0] & 1))
continue;
/* calculate crc32 value of mac address */
crc = 0xffffffff;
- for (i = 0; i < dmi->dmi_addrlen; i++) {
- data = dmi->dmi_addr[i];
+ for (i = 0; i < dev->addr_len; i++) {
+ data = ha->addr[i];
for (bit = 0; bit < 8; bit++, data >>= 1) {
crc = (crc >> 1) ^
(((crc ^ data) & 1) ? CRC32_POLY : 0);
@@ -1666,6 +1055,7 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = fec_timeout,
.ndo_set_mac_address = fec_set_mac_address,
+ .ndo_do_ioctl = fec_enet_ioctl,
};
/*
@@ -1689,7 +1079,6 @@ static int fec_enet_init(struct net_device *dev, int index)
}
spin_lock_init(&fep->hw_lock);
- spin_lock_init(&fep->mii_lock);
fep->index = index;
fep->hwp = (void __iomem *)dev->base_addr;
@@ -1716,20 +1105,10 @@ static int fec_enet_init(struct net_device *dev, int index)
fep->rx_bd_base = cbd_base;
fep->tx_bd_base = cbd_base + RX_RING_SIZE;
-#ifdef HAVE_mii_link_interrupt
- fec_request_mii_intr(dev);
-#endif
/* The FEC Ethernet specific entries in the device structure */
dev->watchdog_timeo = TX_TIMEOUT;
dev->netdev_ops = &fec_netdev_ops;
-
- for (i=0; i<NMII-1; i++)
- mii_cmds[i].mii_next = &mii_cmds[i+1];
- mii_free = mii_cmds;
-
- /* Set MII speed to 2.5 MHz */
- fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999)
- / 2500000) / 2) & 0x3F) << 1;
+ dev->ethtool_ops = &fec_enet_ethtool_ops;
/* Initialize the receive buffer descriptors. */
bdp = fep->rx_bd_base;
@@ -1760,13 +1139,6 @@ static int fec_enet_init(struct net_device *dev, int index)
fec_restart(dev, 0);
- /* Queue up command to detect the PHY and initialize the
- * remainder of the interface.
- */
- fep->phy_id_done = 0;
- fep->phy_addr = 0;
- mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy);
-
return 0;
}
@@ -1830,13 +1202,27 @@ fec_restart(struct net_device *dev, int duplex)
/* Set MII speed */
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+#ifdef FEC_MIIGSK_ENR
+ if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
+ /* disable the gasket and wait */
+ writel(0, fep->hwp + FEC_MIIGSK_ENR);
+ while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
+ udelay(1);
+
+ /* configure the gasket: RMII, 50 MHz, no loopback, no echo */
+ writel(1, fep->hwp + FEC_MIIGSK_CFGR);
+
+ /* re-enable the gasket */
+ writel(2, fep->hwp + FEC_MIIGSK_ENR);
+ }
+#endif
+
/* And last, enable the transmit and receive processing */
writel(2, fep->hwp + FEC_ECNTRL);
writel(0, fep->hwp + FEC_R_DES_ACTIVE);
/* Enable interrupts we wish to service */
- writel(FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII,
- fep->hwp + FEC_IMASK);
+ writel(FEC_ENET_TXF | FEC_ENET_RXF, fep->hwp + FEC_IMASK);
}
static void
@@ -1859,7 +1245,6 @@ fec_stop(struct net_device *dev)
/* Clear outstanding MII command interrupts. */
writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
- writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
}
@@ -1867,6 +1252,7 @@ static int __devinit
fec_probe(struct platform_device *pdev)
{
struct fec_enet_private *fep;
+ struct fec_platform_data *pdata;
struct net_device *ndev;
int i, irq, ret = 0;
struct resource *r;
@@ -1891,6 +1277,7 @@ fec_probe(struct platform_device *pdev)
memset(fep, 0, sizeof(*fep));
ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r));
+ fep->pdev = pdev;
if (!ndev->base_addr) {
ret = -ENOMEM;
@@ -1899,6 +1286,10 @@ fec_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
+ pdata = pdev->dev.platform_data;
+ if (pdata)
+ fep->phy_interface = pdata->phy;
+
/* This device has up to three irqs on some platforms */
for (i = 0; i < 3; i++) {
irq = platform_get_irq(pdev, i);
@@ -1926,6 +1317,10 @@ fec_probe(struct platform_device *pdev)
if (ret)
goto failed_init;
+ ret = fec_enet_mii_init(pdev);
+ if (ret)
+ goto failed_mii_init;
+
ret = register_netdev(ndev);
if (ret)
goto failed_register;
@@ -1933,6 +1328,8 @@ fec_probe(struct platform_device *pdev)
return 0;
failed_register:
+ fec_enet_mii_remove(fep);
+failed_mii_init:
failed_init:
clk_disable(fep->clk);
clk_put(fep->clk);
@@ -1959,6 +1356,7 @@ fec_drv_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
fec_stop(ndev);
+ fec_enet_mii_remove(fep);
clk_disable(fep->clk);
clk_put(fep->clk);
iounmap((void __iomem *)ndev->base_addr);
diff --git a/drivers/net/fec.h b/drivers/net/fec.h
index cc47f3f057c..2c48b25668d 100644
--- a/drivers/net/fec.h
+++ b/drivers/net/fec.h
@@ -43,6 +43,8 @@
#define FEC_R_DES_START 0x180 /* Receive descriptor ring */
#define FEC_X_DES_START 0x184 /* Transmit descriptor ring */
#define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */
+#define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */
+#define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */
#else
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index 4a43e56b739..25e6cc6840b 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -327,7 +327,6 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
spin_lock_irqsave(&priv->lock, flags);
- dev->trans_start = jiffies;
bd = (struct bcom_fec_bd *)
bcom_prepare_next_buffer(priv->tx_dmatsk);
@@ -436,7 +435,6 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
DMA_FROM_DEVICE);
length = status & BCOM_FEC_RX_BD_LEN_MASK;
skb_put(rskb, length - 4); /* length without CRC32 */
- rskb->dev = dev;
rskb->protocol = eth_type_trans(rskb, dev);
netif_rx(rskb);
@@ -576,12 +574,12 @@ static void mpc52xx_fec_set_multicast_list(struct net_device *dev)
out_be32(&fec->gaddr2, 0xffffffff);
} else {
u32 crc;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
u32 gaddr1 = 0x00000000;
u32 gaddr2 = 0x00000000;
- netdev_for_each_mc_addr(dmi, dev) {
- crc = ether_crc_le(6, dmi->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr) >> 26;
if (crc >= 32)
gaddr1 |= 1 << (crc-32);
else
@@ -873,7 +871,7 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
priv->ndev = ndev;
/* Reserve FEC control zone */
- rv = of_address_to_resource(op->node, 0, &mem);
+ rv = of_address_to_resource(op->dev.of_node, 0, &mem);
if (rv) {
printk(KERN_ERR DRIVER_NAME ": "
"Error while parsing device node resource\n" );
@@ -921,7 +919,7 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
/* Get the IRQ we need one by one */
/* Control */
- ndev->irq = irq_of_parse_and_map(op->node, 0);
+ ndev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
/* RX */
priv->r_irq = bcom_get_task_irq(priv->rx_dmatsk);
@@ -944,20 +942,20 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
/* Start with safe defaults for link connection */
priv->speed = 100;
priv->duplex = DUPLEX_HALF;
- priv->mdio_speed = ((mpc5xxx_get_bus_frequency(op->node) >> 20) / 5) << 1;
+ priv->mdio_speed = ((mpc5xxx_get_bus_frequency(op->dev.of_node) >> 20) / 5) << 1;
/* The current speed preconfigures the speed of the MII link */
- prop = of_get_property(op->node, "current-speed", &prop_size);
+ prop = of_get_property(op->dev.of_node, "current-speed", &prop_size);
if (prop && (prop_size >= sizeof(u32) * 2)) {
priv->speed = prop[0];
priv->duplex = prop[1] ? DUPLEX_FULL : DUPLEX_HALF;
}
/* If there is a phy handle, then get the PHY node */
- priv->phy_node = of_parse_phandle(op->node, "phy-handle", 0);
+ priv->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
/* the 7-wire property means don't use MII mode */
- if (of_find_property(op->node, "fsl,7-wire-mode", NULL)) {
+ if (of_find_property(op->dev.of_node, "fsl,7-wire-mode", NULL)) {
priv->seven_wire_mode = 1;
dev_info(&ndev->dev, "using 7-wire PHY mode\n");
}
@@ -1065,9 +1063,11 @@ static struct of_device_id mpc52xx_fec_match[] = {
MODULE_DEVICE_TABLE(of, mpc52xx_fec_match);
static struct of_platform_driver mpc52xx_fec_driver = {
- .owner = THIS_MODULE,
- .name = DRIVER_NAME,
- .match_table = mpc52xx_fec_match,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = mpc52xx_fec_match,
+ },
.probe = mpc52xx_fec_probe,
.remove = mpc52xx_fec_remove,
#ifdef CONFIG_PM
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index 7658a082e39..006f64d9f96 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -66,7 +66,7 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of,
const struct of_device_id *match)
{
struct device *dev = &of->dev;
- struct device_node *np = of->node;
+ struct device_node *np = of->dev.of_node;
struct mii_bus *bus;
struct mpc52xx_fec_mdio_priv *priv;
struct resource res = {};
@@ -107,7 +107,7 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of,
/* set MII speed */
out_be32(&priv->regs->mii_speed,
- ((mpc5xxx_get_bus_frequency(of->node) >> 20) / 5) << 1);
+ ((mpc5xxx_get_bus_frequency(of->dev.of_node) >> 20) / 5) << 1);
err = of_mdiobus_register(bus, np);
if (err)
@@ -159,10 +159,13 @@ static struct of_device_id mpc52xx_fec_mdio_match[] = {
MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match);
struct of_platform_driver mpc52xx_fec_mdio_driver = {
- .name = "mpc5200b-fec-phy",
+ .driver = {
+ .name = "mpc5200b-fec-phy",
+ .owner = THIS_MODULE,
+ .of_match_table = mpc52xx_fec_mdio_match,
+ },
.probe = mpc52xx_fec_mdio_probe,
.remove = mpc52xx_fec_mdio_remove,
- .match_table = mpc52xx_fec_mdio_match,
};
/* let fec driver call it, since this has to be registered before it */
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 5c98f7c2242..268ea4d566d 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -1104,20 +1104,16 @@ static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
static void nv_napi_enable(struct net_device *dev)
{
-#ifdef CONFIG_FORCEDETH_NAPI
struct fe_priv *np = get_nvpriv(dev);
napi_enable(&np->napi);
-#endif
}
static void nv_napi_disable(struct net_device *dev)
{
-#ifdef CONFIG_FORCEDETH_NAPI
struct fe_priv *np = get_nvpriv(dev);
napi_disable(&np->napi);
-#endif
}
#define MII_READ (-1)
@@ -1810,7 +1806,6 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
}
/* If rx bufs are exhausted called after 50ms to attempt to refresh */
-#ifdef CONFIG_FORCEDETH_NAPI
static void nv_do_rx_refill(unsigned long data)
{
struct net_device *dev = (struct net_device *) data;
@@ -1819,41 +1814,6 @@ static void nv_do_rx_refill(unsigned long data)
/* Just reschedule NAPI rx processing */
napi_schedule(&np->napi);
}
-#else
-static void nv_do_rx_refill(unsigned long data)
-{
- struct net_device *dev = (struct net_device *) data;
- struct fe_priv *np = netdev_priv(dev);
- int retcode;
-
- if (!using_multi_irqs(dev)) {
- if (np->msi_flags & NV_MSI_X_ENABLED)
- disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
- else
- disable_irq(np->pci_dev->irq);
- } else {
- disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
- }
- if (!nv_optimized(np))
- retcode = nv_alloc_rx(dev);
- else
- retcode = nv_alloc_rx_optimized(dev);
- if (retcode) {
- spin_lock_irq(&np->lock);
- if (!np->in_shutdown)
- mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
- spin_unlock_irq(&np->lock);
- }
- if (!using_multi_irqs(dev)) {
- if (np->msi_flags & NV_MSI_X_ENABLED)
- enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
- else
- enable_irq(np->pci_dev->irq);
- } else {
- enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
- }
-}
-#endif
static void nv_init_rx(struct net_device *dev)
{
@@ -2148,7 +2108,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int i;
u32 offset = 0;
u32 bcnt;
- u32 size = skb->len-skb->data_len;
+ u32 size = skb_headlen(skb);
u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
u32 empty_slots;
struct ring_desc* put_tx;
@@ -2254,7 +2214,6 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
dprintk("\n");
}
- dev->trans_start = jiffies;
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
return NETDEV_TX_OK;
}
@@ -2269,7 +2228,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
unsigned int i;
u32 offset = 0;
u32 bcnt;
- u32 size = skb->len-skb->data_len;
+ u32 size = skb_headlen(skb);
u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
u32 empty_slots;
struct ring_desc_ex* put_tx;
@@ -2409,7 +2368,6 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
dprintk("\n");
}
- dev->trans_start = jiffies;
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
return NETDEV_TX_OK;
}
@@ -2816,11 +2774,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
skb->protocol = eth_type_trans(skb, dev);
dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
dev->name, len, skb->protocol);
-#ifdef CONFIG_FORCEDETH_NAPI
- netif_receive_skb(skb);
-#else
- netif_rx(skb);
-#endif
+ napi_gro_receive(&np->napi, skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
next_pkt:
@@ -2909,27 +2863,14 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
dev->name, len, skb->protocol);
if (likely(!np->vlangrp)) {
-#ifdef CONFIG_FORCEDETH_NAPI
- netif_receive_skb(skb);
-#else
- netif_rx(skb);
-#endif
+ napi_gro_receive(&np->napi, skb);
} else {
vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
-#ifdef CONFIG_FORCEDETH_NAPI
- vlan_hwaccel_receive_skb(skb, np->vlangrp,
- vlanflags & NV_RX3_VLAN_TAG_MASK);
-#else
- vlan_hwaccel_rx(skb, np->vlangrp,
- vlanflags & NV_RX3_VLAN_TAG_MASK);
-#endif
+ vlan_gro_receive(&np->napi, np->vlangrp,
+ vlanflags & NV_RX3_VLAN_TAG_MASK, skb);
} else {
-#ifdef CONFIG_FORCEDETH_NAPI
- netif_receive_skb(skb);
-#else
- netif_rx(skb);
-#endif
+ napi_gro_receive(&np->napi, skb);
}
}
@@ -3104,12 +3045,14 @@ static void nv_set_multicast(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI) {
alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
} else {
- struct dev_mc_list *walk;
+ struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(walk, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
+ unsigned char *addr = ha->addr;
u32 a, b;
- a = le32_to_cpu(*(__le32 *) walk->dmi_addr);
- b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4]));
+
+ a = le32_to_cpu(*(__le32 *) addr);
+ b = le16_to_cpu(*(__le16 *) (&addr[4]));
alwaysOn[0] &= a;
alwaysOff[0] &= ~a;
alwaysOn[1] &= b;
@@ -3494,10 +3437,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
-#ifndef CONFIG_FORCEDETH_NAPI
- int total_work = 0;
- int loop_count = 0;
-#endif
dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
@@ -3514,7 +3453,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
nv_msi_workaround(np);
-#ifdef CONFIG_FORCEDETH_NAPI
if (napi_schedule_prep(&np->napi)) {
/*
* Disable further irq's (msix not enabled with napi)
@@ -3523,65 +3461,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
__napi_schedule(&np->napi);
}
-#else
- do
- {
- int work = 0;
- if ((work = nv_rx_process(dev, RX_WORK_PER_LOOP))) {
- if (unlikely(nv_alloc_rx(dev))) {
- spin_lock(&np->lock);
- if (!np->in_shutdown)
- mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
- spin_unlock(&np->lock);
- }
- }
-
- spin_lock(&np->lock);
- work += nv_tx_done(dev, TX_WORK_PER_LOOP);
- spin_unlock(&np->lock);
-
- if (!work)
- break;
-
- total_work += work;
-
- loop_count++;
- }
- while (loop_count < max_interrupt_work);
-
- if (nv_change_interrupt_mode(dev, total_work)) {
- /* setup new irq mask */
- writel(np->irqmask, base + NvRegIrqMask);
- }
-
- if (unlikely(np->events & NVREG_IRQ_LINK)) {
- spin_lock(&np->lock);
- nv_link_irq(dev);
- spin_unlock(&np->lock);
- }
- if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
- spin_lock(&np->lock);
- nv_linkchange(dev);
- spin_unlock(&np->lock);
- np->link_timeout = jiffies + LINK_TIMEOUT;
- }
- if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
- spin_lock(&np->lock);
- /* disable interrupts on the nic */
- if (!(np->msi_flags & NV_MSI_X_ENABLED))
- writel(0, base + NvRegIrqMask);
- else
- writel(np->irqmask, base + NvRegIrqMask);
- pci_push(base);
-
- if (!np->in_shutdown) {
- np->nic_poll_irq = np->irqmask;
- np->recover_error = 1;
- mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
- }
- spin_unlock(&np->lock);
- }
-#endif
dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
return IRQ_HANDLED;
@@ -3597,10 +3476,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
-#ifndef CONFIG_FORCEDETH_NAPI
- int total_work = 0;
- int loop_count = 0;
-#endif
dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
@@ -3617,7 +3492,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
nv_msi_workaround(np);
-#ifdef CONFIG_FORCEDETH_NAPI
if (napi_schedule_prep(&np->napi)) {
/*
* Disable further irq's (msix not enabled with napi)
@@ -3625,66 +3499,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
writel(0, base + NvRegIrqMask);
__napi_schedule(&np->napi);
}
-#else
- do
- {
- int work = 0;
- if ((work = nv_rx_process_optimized(dev, RX_WORK_PER_LOOP))) {
- if (unlikely(nv_alloc_rx_optimized(dev))) {
- spin_lock(&np->lock);
- if (!np->in_shutdown)
- mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
- spin_unlock(&np->lock);
- }
- }
-
- spin_lock(&np->lock);
- work += nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
- spin_unlock(&np->lock);
-
- if (!work)
- break;
-
- total_work += work;
-
- loop_count++;
- }
- while (loop_count < max_interrupt_work);
-
- if (nv_change_interrupt_mode(dev, total_work)) {
- /* setup new irq mask */
- writel(np->irqmask, base + NvRegIrqMask);
- }
-
- if (unlikely(np->events & NVREG_IRQ_LINK)) {
- spin_lock(&np->lock);
- nv_link_irq(dev);
- spin_unlock(&np->lock);
- }
- if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
- spin_lock(&np->lock);
- nv_linkchange(dev);
- spin_unlock(&np->lock);
- np->link_timeout = jiffies + LINK_TIMEOUT;
- }
- if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
- spin_lock(&np->lock);
- /* disable interrupts on the nic */
- if (!(np->msi_flags & NV_MSI_X_ENABLED))
- writel(0, base + NvRegIrqMask);
- else
- writel(np->irqmask, base + NvRegIrqMask);
- pci_push(base);
-
- if (!np->in_shutdown) {
- np->nic_poll_irq = np->irqmask;
- np->recover_error = 1;
- mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
- }
- spin_unlock(&np->lock);
- }
-
-#endif
dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
return IRQ_HANDLED;
@@ -3733,7 +3547,6 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
return IRQ_RETVAL(i);
}
-#ifdef CONFIG_FORCEDETH_NAPI
static int nv_napi_poll(struct napi_struct *napi, int budget)
{
struct fe_priv *np = container_of(napi, struct fe_priv, napi);
@@ -3741,23 +3554,27 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
u8 __iomem *base = get_hwbase(dev);
unsigned long flags;
int retcode;
- int tx_work, rx_work;
+ int rx_count, tx_work=0, rx_work=0;
- if (!nv_optimized(np)) {
- spin_lock_irqsave(&np->lock, flags);
- tx_work = nv_tx_done(dev, np->tx_ring_size);
- spin_unlock_irqrestore(&np->lock, flags);
+ do {
+ if (!nv_optimized(np)) {
+ spin_lock_irqsave(&np->lock, flags);
+ tx_work += nv_tx_done(dev, np->tx_ring_size);
+ spin_unlock_irqrestore(&np->lock, flags);
- rx_work = nv_rx_process(dev, budget);
- retcode = nv_alloc_rx(dev);
- } else {
- spin_lock_irqsave(&np->lock, flags);
- tx_work = nv_tx_done_optimized(dev, np->tx_ring_size);
- spin_unlock_irqrestore(&np->lock, flags);
+ rx_count = nv_rx_process(dev, budget - rx_work);
+ retcode = nv_alloc_rx(dev);
+ } else {
+ spin_lock_irqsave(&np->lock, flags);
+ tx_work += nv_tx_done_optimized(dev, np->tx_ring_size);
+ spin_unlock_irqrestore(&np->lock, flags);
- rx_work = nv_rx_process_optimized(dev, budget);
- retcode = nv_alloc_rx_optimized(dev);
- }
+ rx_count = nv_rx_process_optimized(dev,
+ budget - rx_work);
+ retcode = nv_alloc_rx_optimized(dev);
+ }
+ } while (retcode == 0 &&
+ rx_count > 0 && (rx_work += rx_count) < budget);
if (retcode) {
spin_lock_irqsave(&np->lock, flags);
@@ -3800,7 +3617,6 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
}
return rx_work;
}
-#endif
static irqreturn_t nv_nic_irq_rx(int foo, void *data)
{
@@ -5706,6 +5522,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
dev->features |= NETIF_F_TSO;
+ dev->features |= NETIF_F_GRO;
}
np->vlanctl_bits = 0;
@@ -5758,9 +5575,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
else
dev->netdev_ops = &nv_netdev_ops_optimized;
-#ifdef CONFIG_FORCEDETH_NAPI
netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
-#endif
SET_ETHTOOL_OPS(dev, &ops);
dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
@@ -5863,7 +5678,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
/* msix has had reported issues when modifying irqmask
as in the case of napi, therefore, disable for now
*/
-#ifndef CONFIG_FORCEDETH_NAPI
+#if 0
np->msi_flags |= NV_MSI_X_CAPABLE;
#endif
}
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 0770e2f6da6..309a0eaddd8 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -674,8 +674,6 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb->data, skb->len, DMA_TO_DEVICE));
CBDW_DATLEN(bdp, skb->len);
- dev->trans_start = jiffies;
-
/*
* If this was the last BD in the ring, start at the beginning again.
*/
@@ -1015,7 +1013,7 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
return -ENOMEM;
if (!IS_FEC(match)) {
- data = of_get_property(ofdev->node, "fsl,cpm-command", &len);
+ data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len);
if (!data || len != 4)
goto out_free_fpi;
@@ -1027,8 +1025,8 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
fpi->rx_copybreak = 240;
fpi->use_napi = 1;
fpi->napi_weight = 17;
- fpi->phy_node = of_parse_phandle(ofdev->node, "phy-handle", 0);
- if ((!fpi->phy_node) && (!of_get_property(ofdev->node, "fixed-link",
+ fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
+ if ((!fpi->phy_node) && (!of_get_property(ofdev->dev.of_node, "fixed-link",
NULL)))
goto out_free_fpi;
@@ -1061,7 +1059,7 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
spin_lock_init(&fep->lock);
spin_lock_init(&fep->tx_lock);
- mac_addr = of_get_mac_address(ofdev->node);
+ mac_addr = of_get_mac_address(ofdev->dev.of_node);
if (mac_addr)
memcpy(ndev->dev_addr, mac_addr, 6);
@@ -1158,8 +1156,11 @@ static struct of_device_id fs_enet_match[] = {
MODULE_DEVICE_TABLE(of, fs_enet_match);
static struct of_platform_driver fs_enet_driver = {
- .name = "fs_enet",
- .match_table = fs_enet_match,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "fs_enet",
+ .of_match_table = fs_enet_match,
+ },
.probe = fs_enet_probe,
.remove = fs_enet_remove,
};
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 0a973e71876..5d45084b287 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -88,19 +88,19 @@ static int do_pd_setup(struct fs_enet_private *fep)
struct fs_platform_info *fpi = fep->fpi;
int ret = -EINVAL;
- fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL);
+ fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
if (fep->interrupt == NO_IRQ)
goto out;
- fep->fcc.fccp = of_iomap(ofdev->node, 0);
+ fep->fcc.fccp = of_iomap(ofdev->dev.of_node, 0);
if (!fep->fcc.fccp)
goto out;
- fep->fcc.ep = of_iomap(ofdev->node, 1);
+ fep->fcc.ep = of_iomap(ofdev->dev.of_node, 1);
if (!fep->fcc.ep)
goto out_fccp;
- fep->fcc.fcccp = of_iomap(ofdev->node, 2);
+ fep->fcc.fcccp = of_iomap(ofdev->dev.of_node, 2);
if (!fep->fcc.fcccp)
goto out_ep;
@@ -231,12 +231,12 @@ static void set_multicast_finish(struct net_device *dev)
static void set_multicast_list(struct net_device *dev)
{
- struct dev_mc_list *pmc;
+ struct netdev_hw_addr *ha;
if ((dev->flags & IFF_PROMISC) == 0) {
set_multicast_start(dev);
- netdev_for_each_mc_addr(pmc, dev)
- set_multicast_one(dev, pmc->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ set_multicast_one(dev, ha->addr);
set_multicast_finish(dev);
} else
set_promiscuous_mode(dev);
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index ec81f50d591..7ca1642276d 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -98,11 +98,11 @@ static int do_pd_setup(struct fs_enet_private *fep)
{
struct of_device *ofdev = to_of_device(fep->dev);
- fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL);
+ fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
if (fep->interrupt == NO_IRQ)
return -EINVAL;
- fep->fec.fecp = of_iomap(ofdev->node, 0);
+ fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0);
if (!fep->fcc.fccp)
return -EINVAL;
@@ -232,12 +232,12 @@ static void set_multicast_finish(struct net_device *dev)
static void set_multicast_list(struct net_device *dev)
{
- struct dev_mc_list *pmc;
+ struct netdev_hw_addr *ha;
if ((dev->flags & IFF_PROMISC) == 0) {
set_multicast_start(dev);
- netdev_for_each_mc_addr(pmc, dev)
- set_multicast_one(dev, pmc->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ set_multicast_one(dev, ha->addr);
set_multicast_finish(dev);
} else
set_promiscuous_mode(dev);
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index 34d3da751eb..a3c44544846 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -98,15 +98,15 @@ static int do_pd_setup(struct fs_enet_private *fep)
{
struct of_device *ofdev = to_of_device(fep->dev);
- fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL);
+ fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
if (fep->interrupt == NO_IRQ)
return -EINVAL;
- fep->scc.sccp = of_iomap(ofdev->node, 0);
+ fep->scc.sccp = of_iomap(ofdev->dev.of_node, 0);
if (!fep->scc.sccp)
return -EINVAL;
- fep->scc.ep = of_iomap(ofdev->node, 1);
+ fep->scc.ep = of_iomap(ofdev->dev.of_node, 1);
if (!fep->scc.ep) {
iounmap(fep->scc.sccp);
return -EINVAL;
@@ -223,12 +223,12 @@ static void set_multicast_finish(struct net_device *dev)
static void set_multicast_list(struct net_device *dev)
{
- struct dev_mc_list *pmc;
+ struct netdev_hw_addr *ha;
if ((dev->flags & IFF_PROMISC) == 0) {
set_multicast_start(dev);
- netdev_for_each_mc_addr(pmc, dev)
- set_multicast_one(dev, pmc->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ set_multicast_one(dev, ha->addr);
set_multicast_finish(dev);
} else
set_promiscuous_mode(dev);
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
index 24ff9f43a62..0f90685d3d1 100644
--- a/drivers/net/fs_enet/mii-bitbang.c
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -224,8 +224,11 @@ static struct of_device_id fs_enet_mdio_bb_match[] = {
MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match);
static struct of_platform_driver fs_enet_bb_mdio_driver = {
- .name = "fsl-bb-mdio",
- .match_table = fs_enet_mdio_bb_match,
+ .driver = {
+ .name = "fsl-bb-mdio",
+ .owner = THIS_MODULE,
+ .of_match_table = fs_enet_mdio_bb_match,
+ },
.probe = fs_enet_mdio_probe,
.remove = fs_enet_mdio_remove,
};
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index 5944b65082c..bddffd169b9 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -124,7 +124,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
new_bus->write = &fs_enet_fec_mii_write;
new_bus->reset = &fs_enet_fec_mii_reset;
- ret = of_address_to_resource(ofdev->node, 0, &res);
+ ret = of_address_to_resource(ofdev->dev.of_node, 0, &res);
if (ret)
goto out_res;
@@ -135,7 +135,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
goto out_fec;
if (get_bus_freq) {
- clock = get_bus_freq(ofdev->node);
+ clock = get_bus_freq(ofdev->dev.of_node);
if (!clock) {
/* Use maximum divider if clock is unknown */
dev_warn(&ofdev->dev, "could not determine IPS clock\n");
@@ -172,7 +172,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
new_bus->parent = &ofdev->dev;
dev_set_drvdata(&ofdev->dev, new_bus);
- ret = of_mdiobus_register(new_bus, ofdev->node);
+ ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
if (ret)
goto out_free_irqs;
@@ -222,8 +222,11 @@ static struct of_device_id fs_enet_mdio_fec_match[] = {
MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match);
static struct of_platform_driver fs_enet_fec_mdio_driver = {
- .name = "fsl-fec-mdio",
- .match_table = fs_enet_mdio_fec_match,
+ .driver = {
+ .name = "fsl-fec-mdio",
+ .owner = THIS_MODULE,
+ .of_match_table = fs_enet_mdio_fec_match,
+ },
.probe = fs_enet_mdio_probe,
.remove = fs_enet_mdio_remove,
};
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index 3acac5f930c..b4c41d72c42 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -267,7 +267,7 @@ static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
static int fsl_pq_mdio_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct device_node *tbi;
struct fsl_pq_mdio_priv *priv;
struct fsl_pq_mdio __iomem *regs = NULL;
@@ -277,15 +277,17 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
int tbiaddr = -1;
const u32 *addrp;
u64 addr = 0, size = 0;
- int err = 0;
+ int err;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
new_bus = mdiobus_alloc();
- if (NULL == new_bus)
+ if (!new_bus) {
+ err = -ENOMEM;
goto err_free_priv;
+ }
new_bus->name = "Freescale PowerQUICC MII Bus",
new_bus->read = &fsl_pq_mdio_read,
@@ -469,10 +471,13 @@ static struct of_device_id fsl_pq_mdio_match[] = {
MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
static struct of_platform_driver fsl_pq_mdio_driver = {
- .name = "fsl-pq_mdio",
+ .driver = {
+ .name = "fsl-pq_mdio",
+ .owner = THIS_MODULE,
+ .of_match_table = fsl_pq_mdio_match,
+ },
.probe = fsl_pq_mdio_probe,
.remove = fsl_pq_mdio_remove,
- .match_table = fsl_pq_mdio_match,
};
int __init fsl_pq_mdio_init(void)
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 5d3763fb347..1830f3199cb 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -82,6 +82,7 @@
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/in.h>
+#include <linux/net_tstamp.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -377,6 +378,13 @@ static void gfar_init_mac(struct net_device *ndev)
rctrl |= RCTRL_PADDING(priv->padding);
}
+ /* Insert receive time stamps into padding alignment bytes */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
+ rctrl &= ~RCTRL_PAL_MASK;
+ rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE | RCTRL_PADDING(8);
+ priv->padding = 8;
+ }
+
/* keep vlan related bits if it's enabled */
if (priv->vlgrp) {
rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
@@ -501,7 +509,8 @@ void unlock_tx_qs(struct gfar_private *priv)
/* Returns 1 if incoming frames use an FCB */
static inline int gfar_uses_fcb(struct gfar_private *priv)
{
- return priv->vlgrp || priv->rx_csum_enable;
+ return priv->vlgrp || priv->rx_csum_enable ||
+ (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
}
static void free_tx_pointers(struct gfar_private *priv)
@@ -599,7 +608,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
int err = 0, i;
struct net_device *dev = NULL;
struct gfar_private *priv = NULL;
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct device_node *child = NULL;
const u32 *stash;
const u32 *stash_len;
@@ -637,7 +646,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
return -ENOMEM;
priv = netdev_priv(dev);
- priv->node = ofdev->node;
+ priv->node = ofdev->dev.of_node;
priv->ndev = dev;
dev->num_tx_queues = num_tx_qs;
@@ -738,7 +747,8 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
FSL_GIANFAR_DEV_HAS_CSUM |
FSL_GIANFAR_DEV_HAS_VLAN |
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
- FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
+ FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
+ FSL_GIANFAR_DEV_HAS_TIMER;
ctype = of_get_property(np, "phy-connection-type", NULL);
@@ -768,6 +778,48 @@ err_grp_init:
return err;
}
+static int gfar_hwtstamp_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config config;
+ struct gfar_private *priv = netdev_priv(netdev);
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ priv->hwts_tx_en = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
+ return -ERANGE;
+ priv->hwts_tx_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ priv->hwts_rx_en = 0;
+ break;
+ default:
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
+ return -ERANGE;
+ priv->hwts_rx_en = 1;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ }
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
/* Ioctl MII Interface */
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
@@ -776,6 +828,9 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!netif_running(dev))
return -EINVAL;
+ if (cmd == SIOCSHWTSTAMP)
+ return gfar_hwtstamp_ioctl(dev, rq, cmd);
+
if (!priv->phydev)
return -ENODEV;
@@ -884,7 +939,7 @@ static int gfar_probe(struct of_device *ofdev,
priv = netdev_priv(dev);
priv->ndev = dev;
priv->ofdev = ofdev;
- priv->node = ofdev->node;
+ priv->node = ofdev->dev.of_node;
SET_NETDEV_DEV(dev, &ofdev->dev);
spin_lock_init(&priv->bflock);
@@ -978,7 +1033,8 @@ static int gfar_probe(struct of_device *ofdev,
else
priv->padding = 0;
- if (dev->features & NETIF_F_IP_CSUM)
+ if (dev->features & NETIF_F_IP_CSUM ||
+ priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
dev->hard_header_len += GMAC_FCB_LEN;
/* Program the isrg regs only if number of grps > 1 */
@@ -1288,21 +1344,9 @@ static struct dev_pm_ops gfar_pm_ops = {
#define GFAR_PM_OPS (&gfar_pm_ops)
-static int gfar_legacy_suspend(struct of_device *ofdev, pm_message_t state)
-{
- return gfar_suspend(&ofdev->dev);
-}
-
-static int gfar_legacy_resume(struct of_device *ofdev)
-{
- return gfar_resume(&ofdev->dev);
-}
-
#else
#define GFAR_PM_OPS NULL
-#define gfar_legacy_suspend NULL
-#define gfar_legacy_resume NULL
#endif
@@ -1683,7 +1727,7 @@ void gfar_start(struct net_device *dev)
gfar_write(&regs->imask, IMASK_DEFAULT);
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
}
void gfar_configure_coalescing(struct gfar_private *priv,
@@ -1923,23 +1967,29 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct netdev_queue *txq;
struct gfar __iomem *regs = NULL;
struct txfcb *fcb = NULL;
- struct txbd8 *txbdp, *txbdp_start, *base;
+ struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
u32 lstatus;
- int i, rq = 0;
+ int i, rq = 0, do_tstamp = 0;
u32 bufaddr;
unsigned long flags;
- unsigned int nr_frags, length;
-
+ unsigned int nr_frags, nr_txbds, length;
+ union skb_shared_tx *shtx;
rq = skb->queue_mapping;
tx_queue = priv->tx_queue[rq];
txq = netdev_get_tx_queue(dev, rq);
base = tx_queue->tx_bd_base;
regs = tx_queue->grp->regs;
+ shtx = skb_tx(skb);
+
+ /* check if time stamp should be generated */
+ if (unlikely(shtx->hardware && priv->hwts_tx_en))
+ do_tstamp = 1;
/* make space for additional header when fcb is needed */
if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
- (priv->vlgrp && vlan_tx_tag_present(skb))) &&
+ (priv->vlgrp && vlan_tx_tag_present(skb)) ||
+ unlikely(do_tstamp)) &&
(skb_headroom(skb) < GMAC_FCB_LEN)) {
struct sk_buff *skb_new;
@@ -1956,8 +2006,14 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* total number of fragments in the SKB */
nr_frags = skb_shinfo(skb)->nr_frags;
+ /* calculate the required number of TxBDs for this skb */
+ if (unlikely(do_tstamp))
+ nr_txbds = nr_frags + 2;
+ else
+ nr_txbds = nr_frags + 1;
+
/* check if there is space to queue this packet */
- if ((nr_frags+1) > tx_queue->num_txbdfree) {
+ if (nr_txbds > tx_queue->num_txbdfree) {
/* no space, stop the queue */
netif_tx_stop_queue(txq);
dev->stats.tx_fifo_errors++;
@@ -1969,9 +2025,19 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
txq->tx_packets ++;
txbdp = txbdp_start = tx_queue->cur_tx;
+ lstatus = txbdp->lstatus;
+
+ /* Time stamp insertion requires one additional TxBD */
+ if (unlikely(do_tstamp))
+ txbdp_tstamp = txbdp = next_txbd(txbdp, base,
+ tx_queue->tx_ring_size);
if (nr_frags == 0) {
- lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+ if (unlikely(do_tstamp))
+ txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
+ TXBD_INTERRUPT);
+ else
+ lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
} else {
/* Place the fragment addresses and lengths into the TxBDs */
for (i = 0; i < nr_frags; i++) {
@@ -2017,11 +2083,32 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
gfar_tx_vlan(skb, fcb);
}
- /* setup the TxBD length and buffer pointer for the first BD */
+ /* Setup tx hardware time stamping if requested */
+ if (unlikely(do_tstamp)) {
+ shtx->in_progress = 1;
+ if (fcb == NULL)
+ fcb = gfar_add_fcb(skb);
+ fcb->ptp = 1;
+ lstatus |= BD_LFLAG(TXBD_TOE);
+ }
+
txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
- lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
+ /*
+ * If time stamping is requested one additional TxBD must be set up. The
+ * first TxBD points to the FCB and must have a data length of
+ * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
+ * the full frame length.
+ */
+ if (unlikely(do_tstamp)) {
+ txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN;
+ txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
+ (skb_headlen(skb) - GMAC_FCB_LEN);
+ lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
+ } else {
+ lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
+ }
/*
* We can work in parallel with gfar_clean_tx_ring(), except
@@ -2061,9 +2148,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
/* reduce TxBD free count */
- tx_queue->num_txbdfree -= (nr_frags + 1);
-
- dev->trans_start = jiffies;
+ tx_queue->num_txbdfree -= (nr_txbds);
/* If the next BD still needs to be cleaned up, then the bds
are full. We need to tell the kernel to stop sending us stuff. */
@@ -2251,16 +2336,18 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
struct net_device *dev = tx_queue->dev;
struct gfar_private *priv = netdev_priv(dev);
struct gfar_priv_rx_q *rx_queue = NULL;
- struct txbd8 *bdp;
+ struct txbd8 *bdp, *next = NULL;
struct txbd8 *lbdp = NULL;
struct txbd8 *base = tx_queue->tx_bd_base;
struct sk_buff *skb;
int skb_dirtytx;
int tx_ring_size = tx_queue->tx_ring_size;
- int frags = 0;
+ int frags = 0, nr_txbds = 0;
int i;
int howmany = 0;
u32 lstatus;
+ size_t buflen;
+ union skb_shared_tx *shtx;
rx_queue = priv->rx_queue[tx_queue->qindex];
bdp = tx_queue->dirty_tx;
@@ -2270,7 +2357,18 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
unsigned long flags;
frags = skb_shinfo(skb)->nr_frags;
- lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
+
+ /*
+ * When time stamping, one additional TxBD must be freed.
+ * Also, we need to dma_unmap_single() the TxPAL.
+ */
+ shtx = skb_tx(skb);
+ if (unlikely(shtx->in_progress))
+ nr_txbds = frags + 2;
+ else
+ nr_txbds = frags + 1;
+
+ lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
lstatus = lbdp->lstatus;
@@ -2279,10 +2377,24 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
(lstatus & BD_LENGTH_MASK))
break;
- dma_unmap_single(&priv->ofdev->dev,
- bdp->bufPtr,
- bdp->length,
- DMA_TO_DEVICE);
+ if (unlikely(shtx->in_progress)) {
+ next = next_txbd(bdp, base, tx_ring_size);
+ buflen = next->length + GMAC_FCB_LEN;
+ } else
+ buflen = bdp->length;
+
+ dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
+ buflen, DMA_TO_DEVICE);
+
+ if (unlikely(shtx->in_progress)) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ns_to_ktime(*ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
+ bdp = next;
+ }
bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
bdp = next_txbd(bdp, base, tx_ring_size);
@@ -2314,7 +2426,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
howmany++;
spin_lock_irqsave(&tx_queue->txlock, flags);
- tx_queue->num_txbdfree += frags + 1;
+ tx_queue->num_txbdfree += nr_txbds;
spin_unlock_irqrestore(&tx_queue->txlock, flags);
}
@@ -2470,6 +2582,17 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
skb_pull(skb, amount_pull);
}
+ /* Get receive timestamp from the skb */
+ if (priv->hwts_rx_en) {
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+ u64 *ns = (u64 *) skb->data;
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(*ns);
+ }
+
+ if (priv->padding)
+ skb_pull(skb, priv->padding);
+
if (priv->rx_csum_enable)
gfar_rx_checksum(skb, fcb);
@@ -2506,8 +2629,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
bdp = rx_queue->cur_rx;
base = rx_queue->rx_bd_base;
- amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
- priv->padding;
+ amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
struct sk_buff *newskb;
@@ -2794,7 +2916,7 @@ static void adjust_link(struct net_device *dev)
* whenever dev->flags is changed */
static void gfar_set_multi(struct net_device *dev)
{
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
@@ -2867,17 +2989,14 @@ static void gfar_set_multi(struct net_device *dev)
return;
/* Parse the list, and set the appropriate bits */
- netdev_for_each_mc_addr(mc_ptr, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (idx < em_num) {
- gfar_set_mac_for_addr(dev, idx,
- mc_ptr->dmi_addr);
+ gfar_set_mac_for_addr(dev, idx, ha->addr);
idx++;
} else
- gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
+ gfar_set_hash_for_addr(dev, ha->addr);
}
}
-
- return;
}
@@ -2918,8 +3037,6 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
tempval = gfar_read(priv->hash_regs[whichreg]);
tempval |= value;
gfar_write(priv->hash_regs[whichreg], tempval);
-
- return;
}
@@ -3050,14 +3167,14 @@ MODULE_DEVICE_TABLE(of, gfar_match);
/* Structure for a device driver */
static struct of_platform_driver gfar_driver = {
- .name = "fsl-gianfar",
- .match_table = gfar_match,
-
+ .driver = {
+ .name = "fsl-gianfar",
+ .owner = THIS_MODULE,
+ .pm = GFAR_PM_OPS,
+ .of_match_table = gfar_match,
+ },
.probe = gfar_probe,
.remove = gfar_remove,
- .suspend = gfar_legacy_suspend,
- .resume = gfar_legacy_resume,
- .driver.pm = GFAR_PM_OPS,
};
static int __init gfar_init(void)
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 17d25e71423..ac4a92e08c0 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -262,6 +262,7 @@ extern const char gfar_driver_version[];
#define next_bd(bdp, base, ring_size) skip_bd(bdp, 1, base, ring_size)
+#define RCTRL_TS_ENABLE 0x01000000
#define RCTRL_PAL_MASK 0x001f0000
#define RCTRL_VLEX 0x00002000
#define RCTRL_FILREN 0x00001000
@@ -539,7 +540,7 @@ struct txbd8
struct txfcb {
u8 flags;
- u8 reserved;
+ u8 ptp; /* Flag to enable tx timestamping */
u8 l4os; /* Level 4 Header Offset */
u8 l3os; /* Level 3 Header Offset */
u16 phcs; /* Pseudo-header Checksum */
@@ -885,6 +886,7 @@ struct gfar {
#define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100
#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
+#define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800
#if (MAXGROUPS == 2)
#define DEFAULT_MAPPING 0xAA
@@ -1100,6 +1102,10 @@ struct gfar_private {
/* Network Statistics */
struct gfar_extra_stats extra_stats;
+
+ /* HW time stamping enabled flag */
+ int hwts_rx_en;
+ int hwts_tx_en;
};
extern unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index 3a90430de91..f37a4c143dd 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -895,7 +895,6 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
else
skb->ip_summed = CHECKSUM_NONE;
- skb->dev = dev;
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_packets++;
netif_receive_skb(skb);
@@ -990,7 +989,7 @@ static u32 greth_hash_get_index(__u8 *addr)
static void greth_set_hash_filter(struct net_device *dev)
{
- struct dev_mc_list *curr;
+ struct netdev_hw_addr *ha;
struct greth_private *greth = netdev_priv(dev);
struct greth_regs *regs = (struct greth_regs *) greth->regs;
u32 mc_filter[2];
@@ -998,8 +997,8 @@ static void greth_set_hash_filter(struct net_device *dev)
mc_filter[0] = mc_filter[1] = 0;
- netdev_for_each_mc_addr(curr, dev) {
- bitnr = greth_hash_get_index(curr->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ bitnr = greth_hash_get_index(ha->addr);
mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
}
@@ -1500,7 +1499,8 @@ static int __devinit greth_of_probe(struct of_device *ofdev, const struct of_dev
if (i == 6) {
const unsigned char *addr;
int len;
- addr = of_get_property(ofdev->node, "local-mac-address", &len);
+ addr = of_get_property(ofdev->dev.of_node, "local-mac-address",
+ &len);
if (addr != NULL && len == 6) {
for (i = 0; i < 6; i++)
macaddr[i] = (unsigned int) addr[i];
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 5d6f1387959..61f2b1cfcd4 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -859,7 +859,6 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
for (i = 10000; i >= 0; i--)
if ((readw(ioaddr + MII_Status) & 1) == 0)
break;
- return;
}
@@ -1225,8 +1224,6 @@ static void hamachi_init_ring(struct net_device *dev)
}
/* Mark the last entry of the ring */
hmp->tx_ring[TX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
-
- return;
}
@@ -1857,12 +1854,12 @@ static void set_rx_mode(struct net_device *dev)
/* Too many to match, or accept all multicasts. */
writew(0x000B, ioaddr + AddrMode);
} else if (!netdev_mc_empty(dev)) { /* Must use the CAM filter. */
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int i = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- writel(*(u32*)(mclist->dmi_addr), ioaddr + 0x100 + i*8);
- writel(0x20000 | (*(u16*)&mclist->dmi_addr[4]),
+ netdev_for_each_mc_addr(ha, dev) {
+ writel(*(u32 *)(ha->addr), ioaddr + 0x100 + i*8);
+ writel(0x20000 | (*(u16 *)&ha->addr[4]),
ioaddr + 0x104 + i*8);
i++;
}
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index 0cab992b3d1..3e25f10cabd 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -429,7 +429,7 @@ static int ser12_open(struct net_device *dev)
return -EINVAL;
}
if (!request_region(dev->base_addr, SER12_EXTENT, "baycom_ser_fdx")) {
- printk(KERN_WARNING "BAYCOM_SER_FSX: I/O port 0x%04lx busy \n",
+ printk(KERN_WARNING "BAYCOM_SER_FSX: I/O port 0x%04lx busy\n",
dev->base_addr);
return -EACCES;
}
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index f3a96b84391..9f64c863720 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1629,7 +1629,6 @@ static void scc_net_rx(struct scc_channel *scc, struct sk_buff *skb)
skb->protocol = ax25_type_trans(skb, scc->dev);
netif_rx(skb);
- return;
}
/* ----> transmit frame <---- */
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 694132e04af..4e7d1d0a234 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1151,8 +1151,7 @@ static int __init yam_init_driver(void)
dev = alloc_netdev(sizeof(struct yam_port), name,
yam_setup);
if (!dev) {
- printk(KERN_ERR "yam: cannot allocate net device %s\n",
- dev->name);
+ pr_err("yam: cannot allocate net device\n");
err = -ENOMEM;
goto error;
}
diff --git a/drivers/net/hp-plus.c b/drivers/net/hp-plus.c
index efdbcad63c6..82bffc3cabd 100644
--- a/drivers/net/hp-plus.c
+++ b/drivers/net/hp-plus.c
@@ -351,7 +351,6 @@ hpp_reset_8390(struct net_device *dev)
printk("%s: hp_reset_8390() did not complete.\n", dev->name);
if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies);
- return;
}
/* The programmed-I/O version of reading the 4 byte 8390 specific header.
@@ -422,7 +421,6 @@ hpp_io_block_output(struct net_device *dev, int count,
int ioaddr = dev->base_addr - NIC_OFFSET;
outw(start_page << 8, ioaddr + HPP_OUT_ADDR);
outsl(ioaddr + HP_DATAPORT, buf, (count+3)>>2);
- return;
}
static void
@@ -436,8 +434,6 @@ hpp_mem_block_output(struct net_device *dev, int count,
outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
memcpy_toio(ei_status.mem, buf, (count + 3) & ~3);
outw(option_reg, ioaddr + HPP_OPTION);
-
- return;
}
diff --git a/drivers/net/hp.c b/drivers/net/hp.c
index 5c4d78c1ff4..86ececd3c65 100644
--- a/drivers/net/hp.c
+++ b/drivers/net/hp.c
@@ -240,7 +240,6 @@ hp_reset_8390(struct net_device *dev)
printk("%s: hp_reset_8390() did not complete.\n", dev->name);
if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies);
- return;
}
static void
@@ -360,7 +359,6 @@ hp_block_output(struct net_device *dev, int count,
dev->name, (start_page << 8) + count, addr);
}
outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE);
- return;
}
/* This function resets the ethercard if something screws up. */
@@ -371,7 +369,6 @@ hp_init_card(struct net_device *dev)
NS8390p_init(dev, 0);
outb_p(irqmap[irq&0x0f] | HP_RUN,
dev->base_addr - NIC_OFFSET + HP_CONFIGURE);
- return;
}
#ifdef MODULE
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 4daad8cd56e..68e5ac8832a 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -1102,7 +1102,7 @@ static int hp100_open(struct net_device *dev)
return -EAGAIN;
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_start_queue(dev);
lp->lan_type = hp100_sense_lan(dev);
@@ -1510,7 +1510,7 @@ static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
printk("hp100: %s: start_xmit_bm: No TX PDL available.\n", dev->name);
#endif
/* not waited long enough since last tx? */
- if (time_before(jiffies, dev->trans_start + HZ))
+ if (time_before(jiffies, dev_trans_start(dev) + HZ))
goto drop;
if (hp100_check_lan(dev))
@@ -1547,7 +1547,6 @@ static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
}
}
- dev->trans_start = jiffies;
goto drop;
}
@@ -1585,7 +1584,6 @@ static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
/* Update statistics */
lp->stats.tx_packets++;
lp->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
@@ -1663,7 +1661,7 @@ static netdev_tx_t hp100_start_xmit(struct sk_buff *skb,
printk("hp100: %s: start_xmit: tx free mem = 0x%x\n", dev->name, i);
#endif
/* not waited long enough since last failed tx try? */
- if (time_before(jiffies, dev->trans_start + HZ)) {
+ if (time_before(jiffies, dev_trans_start(dev) + HZ)) {
#ifdef HP100_DEBUG
printk("hp100: %s: trans_start timing problem\n",
dev->name);
@@ -1701,7 +1699,6 @@ static netdev_tx_t hp100_start_xmit(struct sk_buff *skb,
mdelay(1);
}
}
- dev->trans_start = jiffies;
goto drop;
}
@@ -1745,7 +1742,6 @@ static netdev_tx_t hp100_start_xmit(struct sk_buff *skb,
lp->stats.tx_packets++;
lp->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
hp100_ints_on();
spin_unlock_irqrestore(&lp->lock, flags);
@@ -2099,15 +2095,15 @@ static void hp100_set_multicast_list(struct net_device *dev)
} else {
int i, idx;
u_char *addrs;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
memset(&lp->hash_bytes, 0x00, 8);
#ifdef HP100_DEBUG
printk("hp100: %s: computing hash filter - mc_count = %i\n",
dev->name, netdev_mc_count(dev));
#endif
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if ((*addrs & 0x01) == 0x01) { /* multicast address? */
#ifdef HP100_DEBUG
printk("hp100: %s: multicast = %pM, ",
diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c
index 24724b4ad70..07d8e5b634f 100644
--- a/drivers/net/hydra.c
+++ b/drivers/net/hydra.c
@@ -71,6 +71,7 @@ static struct zorro_device_id hydra_zorro_tbl[] __devinitdata = {
{ ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET },
{ 0 }
};
+MODULE_DEVICE_TABLE(zorro, hydra_zorro_tbl);
static struct zorro_driver hydra_driver = {
.name = "hydra",
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index dd873cc41c2..b150c102ca5 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -136,7 +136,8 @@ static inline void emac_report_timeout_error(struct emac_instance *dev,
EMAC_FTR_440EP_PHY_CLK_FIX))
DBG(dev, "%s" NL, error);
else if (net_ratelimit())
- printk(KERN_ERR "%s: %s\n", dev->ofdev->node->full_name, error);
+ printk(KERN_ERR "%s: %s\n", dev->ofdev->dev.of_node->full_name,
+ error);
}
/* EMAC PHY clock workaround:
@@ -389,18 +390,19 @@ static void emac_hash_mc(struct emac_instance *dev)
const int regs = EMAC_XAHT_REGS(dev);
u32 *gaht_base = emac_gaht_base(dev);
u32 gaht_temp[regs];
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int i;
DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
memset(gaht_temp, 0, sizeof (gaht_temp));
- netdev_for_each_mc_addr(dmi, dev->ndev) {
+ netdev_for_each_mc_addr(ha, dev->ndev) {
int slot, reg, mask;
- DBG2(dev, "mc %pM" NL, dmi->dmi_addr);
+ DBG2(dev, "mc %pM" NL, ha->addr);
- slot = EMAC_XAHT_CRC_TO_SLOT(dev, ether_crc(ETH_ALEN, dmi->dmi_addr));
+ slot = EMAC_XAHT_CRC_TO_SLOT(dev,
+ ether_crc(ETH_ALEN, ha->addr));
reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
@@ -1177,7 +1179,7 @@ static int emac_open(struct net_device *ndev)
netif_carrier_on(dev->ndev);
/* Required for Pause packet support in EMAC */
- dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
+ dev_mc_add_global(ndev, default_mcast_addr);
emac_configure(dev);
mal_poll_add(dev->mal, &dev->commac);
@@ -1700,7 +1702,6 @@ static int emac_poll_rx(void *param, int budget)
skb_put(skb, len);
push_packet:
- skb->dev = dev->ndev;
skb->protocol = eth_type_trans(skb, dev->ndev);
emac_rx_csum(dev, skb, ctrl);
@@ -2185,7 +2186,7 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev,
strcpy(info->version, DRV_VERSION);
info->fw_version[0] = '\0';
sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
- dev->cell_index, dev->ofdev->node->full_name);
+ dev->cell_index, dev->ofdev->dev.of_node->full_name);
info->regdump_len = emac_ethtool_get_regs_len(ndev);
}
@@ -2379,7 +2380,7 @@ static int __devinit emac_read_uint_prop(struct device_node *np, const char *nam
static int __devinit emac_init_phy(struct emac_instance *dev)
{
- struct device_node *np = dev->ofdev->node;
+ struct device_node *np = dev->ofdev->dev.of_node;
struct net_device *ndev = dev->ndev;
u32 phy_map, adv;
int i;
@@ -2514,7 +2515,7 @@ static int __devinit emac_init_phy(struct emac_instance *dev)
static int __devinit emac_init_config(struct emac_instance *dev)
{
- struct device_node *np = dev->ofdev->node;
+ struct device_node *np = dev->ofdev->dev.of_node;
const void *p;
unsigned int plen;
const char *pm, *phy_modes[] = {
@@ -2723,7 +2724,7 @@ static int __devinit emac_probe(struct of_device *ofdev,
{
struct net_device *ndev;
struct emac_instance *dev;
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct device_node **blist = NULL;
int err, i;
@@ -2810,7 +2811,7 @@ static int __devinit emac_probe(struct of_device *ofdev,
err = mal_register_commac(dev->mal, &dev->commac);
if (err) {
printk(KERN_ERR "%s: failed to register with mal %s!\n",
- np->full_name, dev->mal_dev->node->full_name);
+ np->full_name, dev->mal_dev->dev.of_node->full_name);
goto err_rel_deps;
}
dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
@@ -2995,9 +2996,11 @@ static struct of_device_id emac_match[] =
MODULE_DEVICE_TABLE(of, emac_match);
static struct of_platform_driver emac_driver = {
- .name = "emac",
- .match_table = emac_match,
-
+ .driver = {
+ .name = "emac",
+ .owner = THIS_MODULE,
+ .of_match_table = emac_match,
+ },
.probe = emac_probe,
.remove = emac_remove,
};
diff --git a/drivers/net/ibm_newemac/debug.c b/drivers/net/ibm_newemac/debug.c
index 775c850a425..3995fafc1e0 100644
--- a/drivers/net/ibm_newemac/debug.c
+++ b/drivers/net/ibm_newemac/debug.c
@@ -33,7 +33,7 @@ static void emac_desc_dump(struct emac_instance *p)
int i;
printk("** EMAC %s TX BDs **\n"
" tx_cnt = %d tx_slot = %d ack_slot = %d\n",
- p->ofdev->node->full_name,
+ p->ofdev->dev.of_node->full_name,
p->tx_cnt, p->tx_slot, p->ack_slot);
for (i = 0; i < NUM_TX_BUFF / 2; ++i)
printk
@@ -49,7 +49,7 @@ static void emac_desc_dump(struct emac_instance *p)
printk("** EMAC %s RX BDs **\n"
" rx_slot = %d flags = 0x%lx rx_skb_size = %d rx_sync_size = %d\n"
" rx_sg_skb = 0x%p\n",
- p->ofdev->node->full_name,
+ p->ofdev->dev.of_node->full_name,
p->rx_slot, p->commac.flags, p->rx_skb_size,
p->rx_sync_size, p->rx_sg_skb);
for (i = 0; i < NUM_RX_BUFF / 2; ++i)
@@ -77,7 +77,8 @@ static void emac_mac_dump(struct emac_instance *dev)
"MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n"
"RMR = 0x%08x ISR = 0x%08x ISER = 0x%08x\n"
"IAR = %04x%08x VTPID = 0x%04x VTCI = 0x%04x\n",
- dev->ofdev->node->full_name, in_be32(&p->mr0), in_be32(&p->mr1),
+ dev->ofdev->dev.of_node->full_name,
+ in_be32(&p->mr0), in_be32(&p->mr1),
in_be32(&p->tmr0), in_be32(&p->tmr1),
in_be32(&p->rmr), in_be32(&p->isr), in_be32(&p->iser),
in_be32(&p->iahr), in_be32(&p->ialr), in_be32(&p->vtpid),
@@ -128,7 +129,7 @@ static void emac_mal_dump(struct mal_instance *mal)
"CFG = 0x%08x ESR = 0x%08x IER = 0x%08x\n"
"TX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n"
"RX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n",
- mal->ofdev->node->full_name,
+ mal->ofdev->dev.of_node->full_name,
get_mal_dcrn(mal, MAL_CFG), get_mal_dcrn(mal, MAL_ESR),
get_mal_dcrn(mal, MAL_IER),
get_mal_dcrn(mal, MAL_TXCASR), get_mal_dcrn(mal, MAL_TXCARR),
diff --git a/drivers/net/ibm_newemac/debug.h b/drivers/net/ibm_newemac/debug.h
index b631842ec8d..e596c77ccdf 100644
--- a/drivers/net/ibm_newemac/debug.h
+++ b/drivers/net/ibm_newemac/debug.h
@@ -53,8 +53,8 @@ extern void emac_dbg_dump_all(void);
#endif
-#define EMAC_DBG(dev, name, fmt, arg...) \
- printk(KERN_DEBUG #name "%s: " fmt, dev->ofdev->node->full_name, ## arg)
+#define EMAC_DBG(d, name, fmt, arg...) \
+ printk(KERN_DEBUG #name "%s: " fmt, d->ofdev->dev.of_node->full_name, ## arg)
#if DBG_LEVEL > 0
# define DBG(d,f,x...) EMAC_DBG(d, emac, f, ##x)
diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c
index 5b3d94419fe..fcff9e0bd38 100644
--- a/drivers/net/ibm_newemac/mal.c
+++ b/drivers/net/ibm_newemac/mal.c
@@ -538,11 +538,11 @@ static int __devinit mal_probe(struct of_device *ofdev,
}
mal->index = index;
mal->ofdev = ofdev;
- mal->version = of_device_is_compatible(ofdev->node, "ibm,mcmal2") ? 2 : 1;
+ mal->version = of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal2") ? 2 : 1;
MAL_DBG(mal, "probe" NL);
- prop = of_get_property(ofdev->node, "num-tx-chans", NULL);
+ prop = of_get_property(ofdev->dev.of_node, "num-tx-chans", NULL);
if (prop == NULL) {
printk(KERN_ERR
"mal%d: can't find MAL num-tx-chans property!\n",
@@ -552,7 +552,7 @@ static int __devinit mal_probe(struct of_device *ofdev,
}
mal->num_tx_chans = prop[0];
- prop = of_get_property(ofdev->node, "num-rx-chans", NULL);
+ prop = of_get_property(ofdev->dev.of_node, "num-rx-chans", NULL);
if (prop == NULL) {
printk(KERN_ERR
"mal%d: can't find MAL num-rx-chans property!\n",
@@ -562,14 +562,14 @@ static int __devinit mal_probe(struct of_device *ofdev,
}
mal->num_rx_chans = prop[0];
- dcr_base = dcr_resource_start(ofdev->node, 0);
+ dcr_base = dcr_resource_start(ofdev->dev.of_node, 0);
if (dcr_base == 0) {
printk(KERN_ERR
"mal%d: can't find DCR resource!\n", index);
err = -ENODEV;
goto fail;
}
- mal->dcr_host = dcr_map(ofdev->node, dcr_base, 0x100);
+ mal->dcr_host = dcr_map(ofdev->dev.of_node, dcr_base, 0x100);
if (!DCR_MAP_OK(mal->dcr_host)) {
printk(KERN_ERR
"mal%d: failed to map DCRs !\n", index);
@@ -577,28 +577,28 @@ static int __devinit mal_probe(struct of_device *ofdev,
goto fail;
}
- if (of_device_is_compatible(ofdev->node, "ibm,mcmal-405ez")) {
+ if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-405ez")) {
#if defined(CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT) && \
defined(CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR)
mal->features |= (MAL_FTR_CLEAR_ICINTSTAT |
MAL_FTR_COMMON_ERR_INT);
#else
printk(KERN_ERR "%s: Support for 405EZ not enabled!\n",
- ofdev->node->full_name);
+ ofdev->dev.of_node->full_name);
err = -ENODEV;
goto fail;
#endif
}
- mal->txeob_irq = irq_of_parse_and_map(ofdev->node, 0);
- mal->rxeob_irq = irq_of_parse_and_map(ofdev->node, 1);
- mal->serr_irq = irq_of_parse_and_map(ofdev->node, 2);
+ mal->txeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+ mal->rxeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 1);
+ mal->serr_irq = irq_of_parse_and_map(ofdev->dev.of_node, 2);
if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
mal->txde_irq = mal->rxde_irq = mal->serr_irq;
} else {
- mal->txde_irq = irq_of_parse_and_map(ofdev->node, 3);
- mal->rxde_irq = irq_of_parse_and_map(ofdev->node, 4);
+ mal->txde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 3);
+ mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4);
}
if (mal->txeob_irq == NO_IRQ || mal->rxeob_irq == NO_IRQ ||
@@ -629,7 +629,7 @@ static int __devinit mal_probe(struct of_device *ofdev,
/* Current Axon is not happy with priority being non-0, it can
* deadlock, fix it up here
*/
- if (of_device_is_compatible(ofdev->node, "ibm,mcmal-axon"))
+ if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-axon"))
cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10);
/* Apply configuration */
@@ -701,7 +701,7 @@ static int __devinit mal_probe(struct of_device *ofdev,
printk(KERN_INFO
"MAL v%d %s, %d TX channels, %d RX channels\n",
- mal->version, ofdev->node->full_name,
+ mal->version, ofdev->dev.of_node->full_name,
mal->num_tx_chans, mal->num_rx_chans);
/* Advertise this instance to the rest of the world */
@@ -790,9 +790,11 @@ static struct of_device_id mal_platform_match[] =
};
static struct of_platform_driver mal_of_driver = {
- .name = "mcmal",
- .match_table = mal_platform_match,
-
+ .driver = {
+ .name = "mcmal",
+ .owner = THIS_MODULE,
+ .of_match_table = mal_platform_match,
+ },
.probe = mal_probe,
.remove = mal_remove,
};
diff --git a/drivers/net/ibm_newemac/rgmii.c b/drivers/net/ibm_newemac/rgmii.c
index 5b90d34c845..108919bcdf1 100644
--- a/drivers/net/ibm_newemac/rgmii.c
+++ b/drivers/net/ibm_newemac/rgmii.c
@@ -103,7 +103,7 @@ int __devinit rgmii_attach(struct of_device *ofdev, int input, int mode)
/* Check if we need to attach to a RGMII */
if (input < 0 || !rgmii_valid_mode(mode)) {
printk(KERN_ERR "%s: unsupported settings !\n",
- ofdev->node->full_name);
+ ofdev->dev.of_node->full_name);
return -ENODEV;
}
@@ -113,7 +113,7 @@ int __devinit rgmii_attach(struct of_device *ofdev, int input, int mode)
out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input));
printk(KERN_NOTICE "%s: input %d in %s mode\n",
- ofdev->node->full_name, input, rgmii_mode_name(mode));
+ ofdev->dev.of_node->full_name, input, rgmii_mode_name(mode));
++dev->users;
@@ -231,7 +231,7 @@ void *rgmii_dump_regs(struct of_device *ofdev, void *buf)
static int __devinit rgmii_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct rgmii_instance *dev;
struct resource regs;
int rc;
@@ -264,11 +264,11 @@ static int __devinit rgmii_probe(struct of_device *ofdev,
}
/* Check for RGMII flags */
- if (of_get_property(ofdev->node, "has-mdio", NULL))
+ if (of_get_property(ofdev->dev.of_node, "has-mdio", NULL))
dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO;
/* CAB lacks the right properties, fix this up */
- if (of_device_is_compatible(ofdev->node, "ibm,rgmii-axon"))
+ if (of_device_is_compatible(ofdev->dev.of_node, "ibm,rgmii-axon"))
dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO;
DBG2(dev, " Boot FER = 0x%08x, SSR = 0x%08x\n",
@@ -279,7 +279,7 @@ static int __devinit rgmii_probe(struct of_device *ofdev,
printk(KERN_INFO
"RGMII %s initialized with%s MDIO support\n",
- ofdev->node->full_name,
+ ofdev->dev.of_node->full_name,
(dev->flags & EMAC_RGMII_FLAG_HAS_MDIO) ? "" : "out");
wmb();
@@ -319,9 +319,11 @@ static struct of_device_id rgmii_match[] =
};
static struct of_platform_driver rgmii_driver = {
- .name = "emac-rgmii",
- .match_table = rgmii_match,
-
+ .driver = {
+ .name = "emac-rgmii",
+ .owner = THIS_MODULE,
+ .of_match_table = rgmii_match,
+ },
.probe = rgmii_probe,
.remove = rgmii_remove,
};
diff --git a/drivers/net/ibm_newemac/tah.c b/drivers/net/ibm_newemac/tah.c
index 30173a9fb55..044637144c4 100644
--- a/drivers/net/ibm_newemac/tah.c
+++ b/drivers/net/ibm_newemac/tah.c
@@ -57,7 +57,8 @@ void tah_reset(struct of_device *ofdev)
--n;
if (unlikely(!n))
- printk(KERN_ERR "%s: reset timeout\n", ofdev->node->full_name);
+ printk(KERN_ERR "%s: reset timeout\n",
+ ofdev->dev.of_node->full_name);
/* 10KB TAH TX FIFO accomodates the max MTU of 9000 */
out_be32(&p->mr,
@@ -89,7 +90,7 @@ void *tah_dump_regs(struct of_device *ofdev, void *buf)
static int __devinit tah_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct tah_instance *dev;
struct resource regs;
int rc;
@@ -127,7 +128,7 @@ static int __devinit tah_probe(struct of_device *ofdev,
tah_reset(ofdev);
printk(KERN_INFO
- "TAH %s initialized\n", ofdev->node->full_name);
+ "TAH %s initialized\n", ofdev->dev.of_node->full_name);
wmb();
return 0;
@@ -165,9 +166,11 @@ static struct of_device_id tah_match[] =
};
static struct of_platform_driver tah_driver = {
- .name = "emac-tah",
- .match_table = tah_match,
-
+ .driver = {
+ .name = "emac-tah",
+ .owner = THIS_MODULE,
+ .of_match_table = tah_match,
+ },
.probe = tah_probe,
.remove = tah_remove,
};
diff --git a/drivers/net/ibm_newemac/zmii.c b/drivers/net/ibm_newemac/zmii.c
index 1f038f808ab..046dcd069c4 100644
--- a/drivers/net/ibm_newemac/zmii.c
+++ b/drivers/net/ibm_newemac/zmii.c
@@ -121,13 +121,14 @@ int __devinit zmii_attach(struct of_device *ofdev, int input, int *mode)
dev->mode = *mode;
printk(KERN_NOTICE "%s: bridge in %s mode\n",
- ofdev->node->full_name, zmii_mode_name(dev->mode));
+ ofdev->dev.of_node->full_name,
+ zmii_mode_name(dev->mode));
} else {
/* All inputs must use the same mode */
if (*mode != PHY_MODE_NA && *mode != dev->mode) {
printk(KERN_ERR
"%s: invalid mode %d specified for input %d\n",
- ofdev->node->full_name, *mode, input);
+ ofdev->dev.of_node->full_name, *mode, input);
mutex_unlock(&dev->lock);
return -EINVAL;
}
@@ -233,7 +234,7 @@ void *zmii_dump_regs(struct of_device *ofdev, void *buf)
static int __devinit zmii_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct zmii_instance *dev;
struct resource regs;
int rc;
@@ -273,7 +274,7 @@ static int __devinit zmii_probe(struct of_device *ofdev,
out_be32(&dev->base->fer, 0);
printk(KERN_INFO
- "ZMII %s initialized\n", ofdev->node->full_name);
+ "ZMII %s initialized\n", ofdev->dev.of_node->full_name);
wmb();
dev_set_drvdata(&ofdev->dev, dev);
@@ -312,9 +313,11 @@ static struct of_device_id zmii_match[] =
};
static struct of_platform_driver zmii_driver = {
- .name = "emac-zmii",
- .match_table = zmii_match,
-
+ .driver = {
+ .name = "emac-zmii",
+ .owner = THIS_MODULE,
+ .of_match_table = zmii_match,
+ },
.probe = zmii_probe,
.remove = zmii_remove,
};
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 7d6cf3340c1..294ccfb427c 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -384,7 +384,7 @@ static void InitBoard(struct net_device *dev)
int camcnt;
camentry_t cams[16];
u32 cammask;
- struct dev_mc_list *mcptr;
+ struct netdev_hw_addr *ha;
u16 rcrval;
/* reset the SONIC */
@@ -419,8 +419,8 @@ static void InitBoard(struct net_device *dev)
/* start putting the multicast addresses into the CAM list. Stop if
it is full. */
- netdev_for_each_mc_addr(mcptr, dev) {
- putcam(cams, &camcnt, mcptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ putcam(cams, &camcnt, ha->addr);
if (camcnt == 16)
break;
}
@@ -478,7 +478,7 @@ static void InitBoard(struct net_device *dev)
/* if still multicast addresses left or ALLMULTI is set, set the multicast
enable bit */
- if ((dev->flags & IFF_ALLMULTI) || (mcptr != NULL))
+ if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > camcnt)
rcrval |= RCREG_AMC;
/* promiscous mode ? */
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index cd508a8ee25..7acb3edc47e 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -45,6 +45,7 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/mm.h>
+#include <linux/pm.h>
#include <linux/ethtool.h>
#include <linux/proc_fs.h>
#include <linux/in.h>
@@ -199,7 +200,7 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
return -1;
}
- pool->skbuff = kmalloc(sizeof(void*) * pool->size, GFP_KERNEL);
+ pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
if(!pool->skbuff) {
kfree(pool->dma_addr);
@@ -210,7 +211,6 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
return -1;
}
- memset(pool->skbuff, 0, sizeof(void*) * pool->size);
memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
for(i = 0; i < pool->size; ++i) {
@@ -957,7 +957,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
} else {
tx_packets++;
tx_bytes += skb->len;
- netdev->trans_start = jiffies;
+ netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
}
if (!used_bounce)
@@ -1073,7 +1073,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc);
}
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
/* clear the filter table & disable filtering */
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastEnableRecv |
@@ -1084,10 +1084,10 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc);
}
/* add the addresses to the filter table */
- netdev_for_each_mc_addr(mclist, netdev) {
+ netdev_for_each_mc_addr(ha, netdev) {
// add the multicast address to the filter table
unsigned long mcast_addr = 0;
- memcpy(((char *)&mcast_addr)+2, mclist->dmi_addr, 6);
+ memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastAddFilter,
mcast_addr);
@@ -1421,7 +1421,6 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
if (!entry)
ibmveth_error_printk("Cannot create adapter proc entry");
}
- return;
}
static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
@@ -1589,6 +1588,12 @@ static struct kobj_type ktype_veth_pool = {
.default_attrs = veth_pool_attrs,
};
+static int ibmveth_resume(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ ibmveth_interrupt(netdev->irq, netdev);
+ return 0;
+}
static struct vio_device_id ibmveth_device_table[] __devinitdata= {
{ "network", "IBM,l-lan"},
@@ -1596,6 +1601,10 @@ static struct vio_device_id ibmveth_device_table[] __devinitdata= {
};
MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
+static struct dev_pm_ops ibmveth_pm_ops = {
+ .resume = ibmveth_resume
+};
+
static struct vio_driver ibmveth_driver = {
.id_table = ibmveth_device_table,
.probe = ibmveth_probe,
@@ -1604,6 +1613,7 @@ static struct vio_driver ibmveth_driver = {
.driver = {
.name = ibmveth_driver_name,
.owner = THIS_MODULE,
+ .pm = &ibmveth_pm_ops,
}
};
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index f4081c0a2d9..ab9f675c5b8 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -182,7 +182,6 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
}
- dev->trans_start = jiffies;
skb_queue_tail(&dp->rq, skb);
if (!dp->tasklet_pending) {
dp->tasklet_pending = 1;
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 4a32bed77c7..86438b59fa2 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -104,6 +104,12 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
case E1000_DEV_ID_82580_COPPER_DUAL:
mac->type = e1000_82580;
break;
+ case E1000_DEV_ID_I350_COPPER:
+ case E1000_DEV_ID_I350_FIBER:
+ case E1000_DEV_ID_I350_SERDES:
+ case E1000_DEV_ID_I350_SGMII:
+ mac->type = e1000_i350;
+ break;
default:
return -E1000_ERR_MAC_INIT;
break;
@@ -153,8 +159,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
if (mac->type == e1000_82580)
mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
+ if (mac->type == e1000_i350)
+ mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
/* reset */
- if (mac->type == e1000_82580)
+ if (mac->type >= e1000_82580)
mac->ops.reset_hw = igb_reset_hw_82580;
else
mac->ops.reset_hw = igb_reset_hw_82575;
@@ -225,7 +233,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
- } else if (hw->mac.type == e1000_82580) {
+ } else if (hw->mac.type >= e1000_82580) {
phy->ops.reset = igb_phy_hw_reset;
phy->ops.read_reg = igb_read_phy_reg_82580;
phy->ops.write_reg = igb_write_phy_reg_82580;
@@ -261,6 +269,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
break;
case I82580_I_PHY_ID:
+ case I350_I_PHY_ID:
phy->type = e1000_phy_82580;
phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580;
phy->ops.get_cable_length = igb_get_cable_length_82580;
@@ -1205,8 +1214,6 @@ void igb_power_down_phy_copper_82575(struct e1000_hw *hw)
/* If the management interface is not enabled, then power down */
if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw)))
igb_power_down_phy_copper(hw);
-
- return;
}
/**
@@ -1445,7 +1452,6 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
**/
static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
{
- u32 mdicnfg = 0;
s32 ret_val;
@@ -1453,15 +1459,6 @@ static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
if (ret_val)
goto out;
- /*
- * We config the phy address in MDICNFG register now. Same bits
- * as before. The values in MDIC can be written but will be
- * ignored. This allows us to call the old function after
- * configuring the PHY address in the new register
- */
- mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
- wr32(E1000_MDICNFG, mdicnfg);
-
ret_val = igb_read_phy_reg_mdic(hw, offset, data);
hw->phy.ops.release(hw);
@@ -1480,7 +1477,6 @@ out:
**/
static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
{
- u32 mdicnfg = 0;
s32 ret_val;
@@ -1488,15 +1484,6 @@ static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
if (ret_val)
goto out;
- /*
- * We config the phy address in MDICNFG register now. Same bits
- * as before. The values in MDIC can be written but will be
- * ignored. This allows us to call the old function after
- * configuring the PHY address in the new register
- */
- mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
- wr32(E1000_MDICNFG, mdicnfg);
-
ret_val = igb_write_phy_reg_mdic(hw, offset, data);
hw->phy.ops.release(hw);
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index fbe1c99c193..cbd1e1259e4 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -38,9 +38,10 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
(ID_LED_DEF1_DEF2 << 4) | \
(ID_LED_OFF1_ON2))
-#define E1000_RAR_ENTRIES_82575 16
-#define E1000_RAR_ENTRIES_82576 24
-#define E1000_RAR_ENTRIES_82580 24
+#define E1000_RAR_ENTRIES_82575 16
+#define E1000_RAR_ENTRIES_82576 24
+#define E1000_RAR_ENTRIES_82580 24
+#define E1000_RAR_ENTRIES_I350 32
#define E1000_SW_SYNCH_MB 0x00000100
#define E1000_STAT_DEV_RST_SET 0x00100000
@@ -52,6 +53,7 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
#define E1000_SRRCTL_DROP_EN 0x80000000
+#define E1000_SRRCTL_TIMESTAMP 0x40000000
#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
#define E1000_MRQC_ENABLE_VMDQ 0x00000003
@@ -108,6 +110,7 @@ union e1000_adv_rx_desc {
#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
+#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
/* Transmit Descriptor - Advanced */
union e1000_adv_tx_desc {
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index fe6cf1b696c..24d9be64342 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -610,11 +610,7 @@
#define IGP_LED3_MODE 0x07000000
/* PCI/PCI-X/PCI-EX Config space */
-#define PCIE_LINK_STATUS 0x12
#define PCIE_DEVICE_CONTROL2 0x28
-
-#define PCIE_LINK_WIDTH_MASK 0x3F0
-#define PCIE_LINK_WIDTH_SHIFT 4
#define PCIE_DEVICE_CONTROL2_16ms 0x0005
#define PHY_REVISION_MASK 0xFFFFFFF0
@@ -629,6 +625,7 @@
#define M88E1111_I_PHY_ID 0x01410CC0
#define IGP03E1000_E_PHY_ID 0x02A80390
#define I82580_I_PHY_ID 0x015403A0
+#define I350_I_PHY_ID 0x015403B0
#define M88_VENDOR 0x0141
/* M88E1000 Specific Registers */
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index 82a533f5192..cb8db78b1a0 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -31,6 +31,7 @@
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/netdevice.h>
#include "e1000_regs.h"
#include "e1000_defines.h"
@@ -53,6 +54,10 @@ struct e1000_hw;
#define E1000_DEV_ID_82580_SERDES 0x1510
#define E1000_DEV_ID_82580_SGMII 0x1511
#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
+#define E1000_DEV_ID_I350_COPPER 0x1521
+#define E1000_DEV_ID_I350_FIBER 0x1522
+#define E1000_DEV_ID_I350_SERDES 0x1523
+#define E1000_DEV_ID_I350_SGMII 0x1524
#define E1000_REVISION_2 2
#define E1000_REVISION_4 4
@@ -72,6 +77,7 @@ enum e1000_mac_type {
e1000_82575,
e1000_82576,
e1000_82580,
+ e1000_i350,
e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
};
@@ -502,14 +508,11 @@ struct e1000_hw {
u8 revision_id;
};
-#ifdef DEBUG
-extern char *igb_get_hw_dev_name(struct e1000_hw *hw);
+extern struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
#define hw_dbg(format, arg...) \
- printk(KERN_DEBUG "%s: " format, igb_get_hw_dev_name(hw), ##arg)
-#else
-#define hw_dbg(format, arg...)
-#endif
-#endif
+ netdev_dbg(igb_get_hw_dev(hw), format, ##arg)
+
/* These functions must be implemented by drivers */
s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+#endif /* _E1000_HW_H_ */
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index be8d010e402..90c5e01e923 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -53,17 +53,30 @@ s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
u16 pcie_link_status;
bus->type = e1000_bus_type_pci_express;
- bus->speed = e1000_bus_speed_2500;
ret_val = igb_read_pcie_cap_reg(hw,
- PCIE_LINK_STATUS,
- &pcie_link_status);
- if (ret_val)
+ PCI_EXP_LNKSTA,
+ &pcie_link_status);
+ if (ret_val) {
bus->width = e1000_bus_width_unknown;
- else
+ bus->speed = e1000_bus_speed_unknown;
+ } else {
+ switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) {
+ case PCI_EXP_LNKSTA_CLS_2_5GB:
+ bus->speed = e1000_bus_speed_2500;
+ break;
+ case PCI_EXP_LNKSTA_CLS_5_0GB:
+ bus->speed = e1000_bus_speed_5000;
+ break;
+ default:
+ bus->speed = e1000_bus_speed_unknown;
+ break;
+ }
+
bus->width = (enum e1000_bus_width)((pcie_link_status &
- PCIE_LINK_WIDTH_MASK) >>
- PCIE_LINK_WIDTH_SHIFT);
+ PCI_EXP_LNKSTA_NLW) >>
+ PCI_EXP_LNKSTA_NLW_SHIFT);
+ }
reg = rd32(E1000_STATUS);
bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 3b772b822a5..6e63d9a7fc7 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -107,6 +107,7 @@ struct vf_data_storage {
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
/* Supported Rx Buffer Sizes */
+#define IGB_RXBUFFER_64 64 /* Used for packet split */
#define IGB_RXBUFFER_128 128 /* Used for packet split */
#define IGB_RXBUFFER_1024 1024
#define IGB_RXBUFFER_2048 2048
@@ -140,8 +141,10 @@ struct igb_buffer {
unsigned long time_stamp;
u16 length;
u16 next_to_watch;
- u16 mapped_as_page;
+ unsigned int bytecount;
u16 gso_segs;
+ union skb_shared_tx shtx;
+ u8 mapped_as_page;
};
/* RX */
struct {
@@ -185,7 +188,7 @@ struct igb_q_vector {
struct igb_ring {
struct igb_q_vector *q_vector; /* backlink to q_vector */
struct net_device *netdev; /* back pointer to net_device */
- struct pci_dev *pdev; /* pci device for dma mapping */
+ struct device *dev; /* device pointer for dma mapping */
dma_addr_t dma; /* phys address of the ring */
void *desc; /* descriptor ring memory */
unsigned int size; /* length of desc. ring in bytes */
@@ -323,6 +326,7 @@ struct igb_adapter {
#define IGB_82576_TSYNC_SHIFT 19
#define IGB_82580_TSYNC_SHIFT 24
+#define IGB_TS_HDR_LEN 16
enum e1000_state_t {
__IGB_TESTING,
__IGB_RESETTING,
@@ -336,7 +340,6 @@ enum igb_boards {
extern char igb_driver_name[];
extern char igb_driver_version[];
-extern char *igb_get_hw_dev_name(struct e1000_hw *hw);
extern int igb_up(struct igb_adapter *);
extern void igb_down(struct igb_adapter *);
extern void igb_reinit_locked(struct igb_adapter *);
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 74303849010..f2ebf927e4b 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -902,6 +902,49 @@ struct igb_reg_test {
#define TABLE64_TEST_LO 5
#define TABLE64_TEST_HI 6
+/* i350 reg test */
+static struct igb_reg_test reg_test_i350[] = {
+ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 },
+ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ /* RDH is read-only for i350, only test RDT. */
+ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
+ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RA, 0, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA, 0, 16, TABLE64_TEST_HI,
+ 0xC3FFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 16, TABLE64_TEST_HI,
+ 0xC3FFFFFF, 0xFFFFFFFF },
+ { E1000_MTA, 0, 128, TABLE32_TEST,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0 }
+};
+
/* 82580 reg test */
static struct igb_reg_test reg_test_82580[] = {
{ E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1077,6 +1120,10 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
u32 i, toggle;
switch (adapter->hw.mac.type) {
+ case e1000_i350:
+ test = reg_test_i350;
+ toggle = 0x7FEFF3FF;
+ break;
case e1000_82580:
test = reg_test_82580;
toggle = 0x7FEFF3FF;
@@ -1238,6 +1285,9 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
case e1000_82580:
ics_mask = 0x77DCFED5;
break;
+ case e1000_i350:
+ ics_mask = 0x77DCFED5;
+ break;
default:
ics_mask = 0x7FFFFFFF;
break;
@@ -1344,7 +1394,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
/* Setup Tx descriptor ring and Tx buffers */
tx_ring->count = IGB_DEFAULT_TXD;
- tx_ring->pdev = adapter->pdev;
+ tx_ring->dev = &adapter->pdev->dev;
tx_ring->netdev = adapter->netdev;
tx_ring->reg_idx = adapter->vfs_allocated_count;
@@ -1358,7 +1408,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
/* Setup Rx descriptor ring and Rx buffers */
rx_ring->count = IGB_DEFAULT_RXD;
- rx_ring->pdev = adapter->pdev;
+ rx_ring->dev = &adapter->pdev->dev;
rx_ring->netdev = adapter->netdev;
rx_ring->rx_buffer_len = IGB_RXBUFFER_2048;
rx_ring->reg_idx = adapter->vfs_allocated_count;
@@ -1554,10 +1604,10 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
buffer_info = &rx_ring->buffer_info[rx_ntc];
/* unmap rx buffer, will be remapped by alloc_rx_buffers */
- pci_unmap_single(rx_ring->pdev,
+ dma_unmap_single(rx_ring->dev,
buffer_info->dma,
rx_ring->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
/* verify contents of skb */
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index c9baa2aa98c..3881918f538 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -62,6 +62,10 @@ static const struct e1000_info *igb_info_tbl[] = {
};
static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
@@ -197,6 +201,336 @@ MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
+struct igb_reg_info {
+ u32 ofs;
+ char *name;
+};
+
+static const struct igb_reg_info igb_reg_info_tbl[] = {
+
+ /* General Registers */
+ {E1000_CTRL, "CTRL"},
+ {E1000_STATUS, "STATUS"},
+ {E1000_CTRL_EXT, "CTRL_EXT"},
+
+ /* Interrupt Registers */
+ {E1000_ICR, "ICR"},
+
+ /* RX Registers */
+ {E1000_RCTL, "RCTL"},
+ {E1000_RDLEN(0), "RDLEN"},
+ {E1000_RDH(0), "RDH"},
+ {E1000_RDT(0), "RDT"},
+ {E1000_RXDCTL(0), "RXDCTL"},
+ {E1000_RDBAL(0), "RDBAL"},
+ {E1000_RDBAH(0), "RDBAH"},
+
+ /* TX Registers */
+ {E1000_TCTL, "TCTL"},
+ {E1000_TDBAL(0), "TDBAL"},
+ {E1000_TDBAH(0), "TDBAH"},
+ {E1000_TDLEN(0), "TDLEN"},
+ {E1000_TDH(0), "TDH"},
+ {E1000_TDT(0), "TDT"},
+ {E1000_TXDCTL(0), "TXDCTL"},
+ {E1000_TDFH, "TDFH"},
+ {E1000_TDFT, "TDFT"},
+ {E1000_TDFHS, "TDFHS"},
+ {E1000_TDFPC, "TDFPC"},
+
+ /* List Terminator */
+ {}
+};
+
+/*
+ * igb_regdump - register printout routine
+ */
+static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
+{
+ int n = 0;
+ char rname[16];
+ u32 regs[8];
+
+ switch (reginfo->ofs) {
+ case E1000_RDLEN(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDLEN(n));
+ break;
+ case E1000_RDH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDH(n));
+ break;
+ case E1000_RDT(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDT(n));
+ break;
+ case E1000_RXDCTL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RXDCTL(n));
+ break;
+ case E1000_RDBAL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDBAL(n));
+ break;
+ case E1000_RDBAH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDBAH(n));
+ break;
+ case E1000_TDBAL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDBAL(n));
+ break;
+ case E1000_TDBAH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_TDBAH(n));
+ break;
+ case E1000_TDLEN(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_TDLEN(n));
+ break;
+ case E1000_TDH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_TDH(n));
+ break;
+ case E1000_TDT(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_TDT(n));
+ break;
+ case E1000_TXDCTL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_TXDCTL(n));
+ break;
+ default:
+ printk(KERN_INFO "%-15s %08x\n",
+ reginfo->name, rd32(reginfo->ofs));
+ return;
+ }
+
+ snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
+ printk(KERN_INFO "%-15s ", rname);
+ for (n = 0; n < 4; n++)
+ printk(KERN_CONT "%08x ", regs[n]);
+ printk(KERN_CONT "\n");
+}
+
+/*
+ * igb_dump - Print registers, tx-rings and rx-rings
+ */
+static void igb_dump(struct igb_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct igb_reg_info *reginfo;
+ int n = 0;
+ struct igb_ring *tx_ring;
+ union e1000_adv_tx_desc *tx_desc;
+ struct my_u0 { u64 a; u64 b; } *u0;
+ struct igb_buffer *buffer_info;
+ struct igb_ring *rx_ring;
+ union e1000_adv_rx_desc *rx_desc;
+ u32 staterr;
+ int i = 0;
+
+ if (!netif_msg_hw(adapter))
+ return;
+
+ /* Print netdevice Info */
+ if (netdev) {
+ dev_info(&adapter->pdev->dev, "Net device Info\n");
+ printk(KERN_INFO "Device Name state "
+ "trans_start last_rx\n");
+ printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
+ netdev->name,
+ netdev->state,
+ netdev->trans_start,
+ netdev->last_rx);
+ }
+
+ /* Print Registers */
+ dev_info(&adapter->pdev->dev, "Register Dump\n");
+ printk(KERN_INFO " Register Name Value\n");
+ for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
+ reginfo->name; reginfo++) {
+ igb_regdump(hw, reginfo);
+ }
+
+ /* Print TX Ring Summary */
+ if (!netdev || !netif_running(netdev))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
+ " leng ntw timestamp\n");
+ for (n = 0; n < adapter->num_tx_queues; n++) {
+ tx_ring = adapter->tx_ring[n];
+ buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
+ printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
+ n, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (u64)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp);
+ }
+
+ /* Print TX Rings */
+ if (!netif_msg_tx_done(adapter))
+ goto rx_ring_summary;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+
+ /* Transmit Descriptor Formats
+ *
+ * Advanced Transmit Descriptor
+ * +--------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +--------------------------------------------------------------+
+ * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
+ * +--------------------------------------------------------------+
+ * 63 46 45 40 39 38 36 35 32 31 24 15 0
+ */
+
+ for (n = 0; n < adapter->num_tx_queues; n++) {
+ tx_ring = adapter->tx_ring[n];
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "T [desc] [address 63:0 ] "
+ "[PlPOCIStDDM Ln] [bi->dma ] "
+ "leng ntw timestamp bi->skb\n");
+
+ for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+ u0 = (struct my_u0 *)tx_desc;
+ printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
+ " %04X %3X %016llX %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp,
+ buffer_info->skb);
+ if (i == tx_ring->next_to_use &&
+ i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC/U\n");
+ else if (i == tx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1, phys_to_virt(buffer_info->dma),
+ buffer_info->length, true);
+ }
+ }
+
+ /* Print RX Rings Summary */
+rx_ring_summary:
+ dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC]\n");
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ printk(KERN_INFO " %5d %5X %5X\n", n,
+ rx_ring->next_to_use, rx_ring->next_to_clean);
+ }
+
+ /* Print RX Rings */
+ if (!netif_msg_rx_status(adapter))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+
+ /* Advanced Receive Descriptor (Read) Format
+ * 63 1 0
+ * +-----------------------------------------------------+
+ * 0 | Packet Buffer Address [63:1] |A0/NSE|
+ * +----------------------------------------------+------+
+ * 8 | Header Buffer Address [63:1] | DD |
+ * +-----------------------------------------------------+
+ *
+ *
+ * Advanced Receive Descriptor (Write-Back) Format
+ *
+ * 63 48 47 32 31 30 21 20 17 16 4 3 0
+ * +------------------------------------------------------+
+ * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
+ * | Checksum Ident | | | | Type | Type |
+ * +------------------------------------------------------+
+ * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+ * +------------------------------------------------------+
+ * 63 48 47 32 31 20 19 0
+ */
+
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "R [desc] [ PktBuf A0] "
+ "[ HeadBuf DD] [bi->dma ] [bi->skb] "
+ "<-- Adv Rx Read format\n");
+ printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
+ "[vl er S cks ln] ---------------- [bi->skb] "
+ "<-- Adv Rx Write-Back format\n");
+
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
+ u0 = (struct my_u0 *)rx_desc;
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ if (staterr & E1000_RXD_STAT_DD) {
+ /* Descriptor Done */
+ printk(KERN_INFO "RWB[0x%03X] %016llX "
+ "%016llX ---------------- %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ buffer_info->skb);
+ } else {
+ printk(KERN_INFO "R [0x%03X] %016llX "
+ "%016llX %016llX %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)buffer_info->dma,
+ buffer_info->skb);
+
+ if (netif_msg_pktdata(adapter)) {
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1,
+ phys_to_virt(buffer_info->dma),
+ rx_ring->rx_buffer_len, true);
+ if (rx_ring->rx_buffer_len
+ < IGB_RXBUFFER_1024)
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1,
+ phys_to_virt(
+ buffer_info->page_dma +
+ buffer_info->page_offset),
+ PAGE_SIZE/2, true);
+ }
+ }
+
+ if (i == rx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == rx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ }
+ }
+
+exit:
+ return;
+}
+
+
/**
* igb_read_clock - read raw cycle counter (to be used by time counter)
*/
@@ -223,41 +557,15 @@ static cycle_t igb_read_clock(const struct cyclecounter *tc)
return stamp;
}
-#ifdef DEBUG
/**
- * igb_get_hw_dev_name - return device name string
+ * igb_get_hw_dev - return device
* used by hardware layer to print debugging information
**/
-char *igb_get_hw_dev_name(struct e1000_hw *hw)
+struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
{
struct igb_adapter *adapter = hw->back;
- return adapter->netdev->name;
-}
-
-/**
- * igb_get_time_str - format current NIC and system time as string
- */
-static char *igb_get_time_str(struct igb_adapter *adapter,
- char buffer[160])
-{
- cycle_t hw = adapter->cycles.read(&adapter->cycles);
- struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
- struct timespec sys;
- struct timespec delta;
- getnstimeofday(&sys);
-
- delta = timespec_sub(nic, sys);
-
- sprintf(buffer,
- "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
- hw,
- (long)nic.tv_sec, nic.tv_nsec,
- (long)sys.tv_sec, sys.tv_nsec,
- (long)delta.tv_sec, delta.tv_nsec);
-
- return buffer;
+ return adapter->netdev;
}
-#endif
/**
* igb_init_module - Driver Registration Routine
@@ -328,6 +636,7 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
}
case e1000_82575:
case e1000_82580:
+ case e1000_i350:
default:
for (; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = rbase_offset + i;
@@ -371,7 +680,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
goto err;
ring->count = adapter->tx_ring_count;
ring->queue_index = i;
- ring->pdev = adapter->pdev;
+ ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
/* For 82575, context index must be unique per ring. */
if (adapter->hw.mac.type == e1000_82575)
@@ -385,7 +694,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
goto err;
ring->count = adapter->rx_ring_count;
ring->queue_index = i;
- ring->pdev = adapter->pdev;
+ ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
@@ -471,6 +780,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
q_vector->eims_value = 1 << msix_vector;
break;
case e1000_82580:
+ case e1000_i350:
/* 82580 uses the same table-based approach as 82576 but has fewer
entries as a result we carry over for queues greater than 4. */
if (rx_queue > IGB_N0_QUEUE) {
@@ -551,6 +861,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
case e1000_82576:
case e1000_82580:
+ case e1000_i350:
/* Turn on MSI-X capability first, or our settings
* won't stick. And it will take days to debug. */
wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
@@ -743,7 +1054,6 @@ msi_only:
out:
/* Notify the stack of the (possibly) reduced Tx Queue count. */
adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
- return;
}
/**
@@ -1253,6 +1563,7 @@ void igb_reset(struct igb_adapter *adapter)
* To take effect CTRL.RST is required.
*/
switch (mac->type) {
+ case e1000_i350:
case e1000_82580:
pba = rd32(E1000_RXPBS);
pba = igb_rxpbs_adjust_82580(pba);
@@ -1416,15 +1727,15 @@ static int __devinit igb_probe(struct pci_dev *pdev,
return err;
pci_using_dac = 0;
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err)
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");
@@ -1656,6 +1967,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
netdev->name,
((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
+ (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
"unknown"),
((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
(hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
@@ -1826,6 +2138,7 @@ static void igb_init_hw_timer(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
switch (hw->mac.type) {
+ case e1000_i350:
case e1000_82580:
memset(&adapter->cycles, 0, sizeof(adapter->cycles));
adapter->cycles.read = igb_read_clock;
@@ -2096,7 +2409,7 @@ static int igb_close(struct net_device *netdev)
**/
int igb_setup_tx_resources(struct igb_ring *tx_ring)
{
- struct pci_dev *pdev = tx_ring->pdev;
+ struct device *dev = tx_ring->dev;
int size;
size = sizeof(struct igb_buffer) * tx_ring->count;
@@ -2109,9 +2422,10 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = pci_alloc_consistent(pdev,
- tx_ring->size,
- &tx_ring->dma);
+ tx_ring->desc = dma_alloc_coherent(dev,
+ tx_ring->size,
+ &tx_ring->dma,
+ GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -2122,7 +2436,7 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
err:
vfree(tx_ring->buffer_info);
- dev_err(&pdev->dev,
+ dev_err(dev,
"Unable to allocate memory for the transmit descriptor ring\n");
return -ENOMEM;
}
@@ -2246,7 +2560,7 @@ static void igb_configure_tx(struct igb_adapter *adapter)
**/
int igb_setup_rx_resources(struct igb_ring *rx_ring)
{
- struct pci_dev *pdev = rx_ring->pdev;
+ struct device *dev = rx_ring->dev;
int size, desc_len;
size = sizeof(struct igb_buffer) * rx_ring->count;
@@ -2261,8 +2575,10 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
rx_ring->size = rx_ring->count * desc_len;
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
- &rx_ring->dma);
+ rx_ring->desc = dma_alloc_coherent(dev,
+ rx_ring->size,
+ &rx_ring->dma,
+ GFP_KERNEL);
if (!rx_ring->desc)
goto err;
@@ -2275,8 +2591,8 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
err:
vfree(rx_ring->buffer_info);
rx_ring->buffer_info = NULL;
- dev_err(&pdev->dev, "Unable to allocate memory for "
- "the receive descriptor ring\n");
+ dev_err(dev, "Unable to allocate memory for the receive descriptor"
+ " ring\n");
return -ENOMEM;
}
@@ -2339,6 +2655,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
if (adapter->vfs_allocated_count) {
/* 82575 and 82576 supports 2 RSS queues for VMDq */
switch (hw->mac.type) {
+ case e1000_i350:
case e1000_82580:
num_rx_queues = 1;
shift = 0;
@@ -2590,6 +2907,8 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
E1000_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
}
+ if (hw->mac.type == e1000_82580)
+ srrctl |= E1000_SRRCTL_TIMESTAMP;
/* Only set Drop Enable if we are supporting multiple queues */
if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
srrctl |= E1000_SRRCTL_DROP_EN;
@@ -2649,8 +2968,8 @@ void igb_free_tx_resources(struct igb_ring *tx_ring)
if (!tx_ring->desc)
return;
- pci_free_consistent(tx_ring->pdev, tx_ring->size,
- tx_ring->desc, tx_ring->dma);
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -2674,15 +2993,15 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
- pci_unmap_page(tx_ring->pdev,
+ dma_unmap_page(tx_ring->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
else
- pci_unmap_single(tx_ring->pdev,
+ dma_unmap_single(tx_ring->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
buffer_info->dma = 0;
}
if (buffer_info->skb) {
@@ -2753,8 +3072,8 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
if (!rx_ring->desc)
return;
- pci_free_consistent(rx_ring->pdev, rx_ring->size,
- rx_ring->desc, rx_ring->dma);
+ dma_free_coherent(rx_ring->dev, rx_ring->size,
+ rx_ring->desc, rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -2790,10 +3109,10 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma) {
- pci_unmap_single(rx_ring->pdev,
+ dma_unmap_single(rx_ring->dev,
buffer_info->dma,
rx_ring->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
}
@@ -2802,10 +3121,10 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
buffer_info->skb = NULL;
}
if (buffer_info->page_dma) {
- pci_unmap_page(rx_ring->pdev,
+ dma_unmap_page(rx_ring->dev,
buffer_info->page_dma,
PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->page_dma = 0;
}
if (buffer_info->page) {
@@ -2876,7 +3195,7 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u8 *mta_list;
int i;
@@ -2893,8 +3212,8 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
/* The shared function expects a packed array of only addresses. */
i = 0;
- netdev_for_each_mc_addr(mc_ptr, netdev)
- memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
igb_update_mc_addr_list(hw, mta_list, i);
kfree(mta_list);
@@ -3397,8 +3716,6 @@ set_itr_now:
q_vector->itr_val = new_itr;
q_vector->set_itr = 1;
}
-
- return;
}
#define IGB_TX_FLAGS_CSUM 0x00000001
@@ -3493,7 +3810,7 @@ static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
struct sk_buff *skb, u32 tx_flags)
{
struct e1000_adv_tx_context_desc *context_desc;
- struct pci_dev *pdev = tx_ring->pdev;
+ struct device *dev = tx_ring->dev;
struct igb_buffer *buffer_info;
u32 info = 0, tu_cmd = 0;
unsigned int i;
@@ -3544,7 +3861,7 @@ static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
break;
default:
if (unlikely(net_ratelimit()))
- dev_warn(&pdev->dev,
+ dev_warn(dev,
"partial checksum but proto=%x!\n",
skb->protocol);
break;
@@ -3578,59 +3895,61 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
unsigned int first)
{
struct igb_buffer *buffer_info;
- struct pci_dev *pdev = tx_ring->pdev;
- unsigned int len = skb_headlen(skb);
+ struct device *dev = tx_ring->dev;
+ unsigned int hlen = skb_headlen(skb);
unsigned int count = 0, i;
unsigned int f;
+ u16 gso_segs = skb_shinfo(skb)->gso_segs ?: 1;
i = tx_ring->next_to_use;
buffer_info = &tx_ring->buffer_info[i];
- BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
- buffer_info->length = len;
+ BUG_ON(hlen >= IGB_MAX_DATA_PER_TXD);
+ buffer_info->length = hlen;
/* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
- buffer_info->dma = pci_map_single(pdev, skb->data, len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ buffer_info->dma = dma_map_single(dev, skb->data, hlen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, buffer_info->dma))
goto dma_error;
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
- struct skb_frag_struct *frag;
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f];
+ unsigned int len = frag->size;
count++;
i++;
if (i == tx_ring->count)
i = 0;
- frag = &skb_shinfo(skb)->frags[f];
- len = frag->size;
-
buffer_info = &tx_ring->buffer_info[i];
BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
buffer_info->length = len;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = true;
- buffer_info->dma = pci_map_page(pdev,
+ buffer_info->dma = dma_map_page(dev,
frag->page,
frag->page_offset,
len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, buffer_info->dma))
goto dma_error;
}
tx_ring->buffer_info[i].skb = skb;
- tx_ring->buffer_info[i].gso_segs = skb_shinfo(skb)->gso_segs ?: 1;
+ tx_ring->buffer_info[i].shtx = skb_shinfo(skb)->tx_flags;
+ /* multiply data chunks by size of headers */
+ tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len;
+ tx_ring->buffer_info[i].gso_segs = gso_segs;
tx_ring->buffer_info[first].next_to_watch = i;
return ++count;
dma_error:
- dev_err(&pdev->dev, "TX DMA map failed\n");
+ dev_err(dev, "TX DMA map failed\n");
/* clear timestamp and dma mappings for failed buffer_info mapping */
buffer_info->dma = 0;
@@ -3868,6 +4187,8 @@ static void igb_reset_task(struct work_struct *work)
struct igb_adapter *adapter;
adapter = container_of(work, struct igb_adapter, reset_task);
+ igb_dump(adapter);
+ netdev_err(adapter->netdev, "Reset adapter\n");
igb_reinit_locked(adapter);
}
@@ -3920,6 +4241,9 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
* i.e. RXBUFFER_2048 --> size-4096 slab
*/
+ if (adapter->hw.mac.type == e1000_82580)
+ max_frame += IGB_TS_HDR_LEN;
+
if (max_frame <= IGB_RXBUFFER_1024)
rx_buffer_len = IGB_RXBUFFER_1024;
else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
@@ -3927,6 +4251,14 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
else
rx_buffer_len = IGB_RXBUFFER_128;
+ if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN + IGB_TS_HDR_LEN) ||
+ (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN))
+ rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN;
+
+ if ((adapter->hw.mac.type == e1000_82580) &&
+ (rx_buffer_len == IGB_RXBUFFER_128))
+ rx_buffer_len += IGB_RXBUFFER_64;
+
if (netif_running(netdev))
igb_down(adapter);
@@ -4955,22 +5287,21 @@ static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
/**
* igb_tx_hwtstamp - utility function which checks for TX time stamp
* @q_vector: pointer to q_vector containing needed info
- * @skb: packet that was just sent
+ * @buffer: pointer to igb_buffer structure
*
* If we were asked to do hardware stamping and such a time stamp is
* available, then it must have been for this skb here because we only
* allow only one such packet into the queue.
*/
-static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
+static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *buffer_info)
{
struct igb_adapter *adapter = q_vector->adapter;
- union skb_shared_tx *shtx = skb_tx(skb);
struct e1000_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps shhwtstamps;
u64 regval;
/* if skb does not support hw timestamp or TX stamp not valid exit */
- if (likely(!shtx->hardware) ||
+ if (likely(!buffer_info->shtx.hardware) ||
!(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
return;
@@ -4978,7 +5309,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
regval |= (u64)rd32(E1000_TXSTMPH) << 32;
igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
- skb_tstamp_tx(skb, &shhwtstamps);
+ skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
}
/**
@@ -4993,7 +5324,6 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
struct net_device *netdev = tx_ring->netdev;
struct e1000_hw *hw = &adapter->hw;
struct igb_buffer *buffer_info;
- struct sk_buff *skb;
union e1000_adv_tx_desc *tx_desc, *eop_desc;
unsigned int total_bytes = 0, total_packets = 0;
unsigned int i, eop, count = 0;
@@ -5009,19 +5339,12 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
cleaned = (i == eop);
- skb = buffer_info->skb;
- if (skb) {
- unsigned int segs, bytecount;
+ if (buffer_info->skb) {
+ total_bytes += buffer_info->bytecount;
/* gso_segs is currently only valid for tcp */
- segs = buffer_info->gso_segs;
- /* multiply data chunks by size of headers */
- bytecount = ((segs - 1) * skb_headlen(skb)) +
- skb->len;
- total_packets += segs;
- total_bytes += bytecount;
-
- igb_tx_hwtstamp(q_vector, skb);
+ total_packets += buffer_info->gso_segs;
+ igb_tx_hwtstamp(q_vector, buffer_info);
}
igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
@@ -5061,7 +5384,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
!(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
/* detected Tx unit hang */
- dev_err(&tx_ring->pdev->dev,
+ dev_err(tx_ring->dev,
"Detected Tx Unit Hang\n"
" Tx Queue <%d>\n"
" TDH <%x>\n"
@@ -5140,10 +5463,10 @@ static inline void igb_rx_checksum_adv(struct igb_ring *ring,
if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
skb->ip_summed = CHECKSUM_UNNECESSARY;
- dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
+ dev_dbg(ring->dev, "cksum success: bits %08X\n", status_err);
}
-static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
+static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
struct sk_buff *skb)
{
struct igb_adapter *adapter = q_vector->adapter;
@@ -5161,13 +5484,18 @@ static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
* If nothing went wrong, then it should have a skb_shared_tx that we
* can turn into a skb_shared_hwtstamps.
*/
- if (likely(!(staterr & E1000_RXDADV_STAT_TS)))
- return;
- if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
- return;
+ if (staterr & E1000_RXDADV_STAT_TSIP) {
+ u32 *stamp = (u32 *)skb->data;
+ regval = le32_to_cpu(*(stamp + 2));
+ regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
+ skb_pull(skb, IGB_TS_HDR_LEN);
+ } else {
+ if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
+ return;
- regval = rd32(E1000_RXSTMPL);
- regval |= (u64)rd32(E1000_RXSTMPH) << 32;
+ regval = rd32(E1000_RXSTMPL);
+ regval |= (u64)rd32(E1000_RXSTMPH) << 32;
+ }
igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
}
@@ -5190,7 +5518,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
{
struct igb_ring *rx_ring = q_vector->rx_ring;
struct net_device *netdev = rx_ring->netdev;
- struct pci_dev *pdev = rx_ring->pdev;
+ struct device *dev = rx_ring->dev;
union e1000_adv_rx_desc *rx_desc , *next_rxd;
struct igb_buffer *buffer_info , *next_buffer;
struct sk_buff *skb;
@@ -5230,9 +5558,9 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
cleaned_count++;
if (buffer_info->dma) {
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(dev, buffer_info->dma,
rx_ring->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
skb_put(skb, length);
@@ -5242,11 +5570,11 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
}
if (length) {
- pci_unmap_page(pdev, buffer_info->page_dma,
- PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(dev, buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
buffer_info->page_dma = 0;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
buffer_info->page,
buffer_info->page_offset,
length);
@@ -5275,7 +5603,8 @@ send_up:
goto next_desc;
}
- igb_rx_hwtstamp(q_vector, staterr, skb);
+ if (staterr & (E1000_RXDADV_STAT_TSIP | E1000_RXDADV_STAT_TS))
+ igb_rx_hwtstamp(q_vector, staterr, skb);
total_bytes += skb->len;
total_packets++;
@@ -5350,12 +5679,12 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
buffer_info->page_offset ^= PAGE_SIZE / 2;
}
buffer_info->page_dma =
- pci_map_page(rx_ring->pdev, buffer_info->page,
+ dma_map_page(rx_ring->dev, buffer_info->page,
buffer_info->page_offset,
PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rx_ring->pdev,
- buffer_info->page_dma)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev,
+ buffer_info->page_dma)) {
buffer_info->page_dma = 0;
rx_ring->rx_stats.alloc_failed++;
goto no_buffers;
@@ -5373,12 +5702,12 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
buffer_info->skb = skb;
}
if (!buffer_info->dma) {
- buffer_info->dma = pci_map_single(rx_ring->pdev,
+ buffer_info->dma = dma_map_single(rx_ring->dev,
skb->data,
bufsz,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rx_ring->pdev,
- buffer_info->dma)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev,
+ buffer_info->dma)) {
buffer_info->dma = 0;
rx_ring->rx_stats.alloc_failed++;
goto no_buffers;
@@ -5555,6 +5884,16 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
return 0;
}
+ /*
+ * Per-packet timestamping only works if all packets are
+ * timestamped, so enable timestamping in all packets as
+ * long as one rx filter was configured.
+ */
+ if ((hw->mac.type == e1000_82580) && tsync_rx_ctl) {
+ tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+ }
+
/* enable/disable TX */
regval = rd32(E1000_TSYNCTXCTL);
regval &= ~E1000_TSYNCTXCTL_ENABLED;
@@ -6131,19 +6470,25 @@ static void igb_vmm_control(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
u32 reg;
- /* replication is not supported for 82575 */
- if (hw->mac.type == e1000_82575)
+ switch (hw->mac.type) {
+ case e1000_82575:
+ default:
+ /* replication is not supported for 82575 */
return;
-
- /* enable replication vlan tag stripping */
- reg = rd32(E1000_RPLOLR);
- reg |= E1000_RPLOLR_STRVLAN;
- wr32(E1000_RPLOLR, reg);
-
- /* notify HW that the MAC is adding vlan tags */
- reg = rd32(E1000_DTXCTL);
- reg |= E1000_DTXCTL_VLAN_ADDED;
- wr32(E1000_DTXCTL, reg);
+ case e1000_82576:
+ /* notify HW that the MAC is adding vlan tags */
+ reg = rd32(E1000_DTXCTL);
+ reg |= E1000_DTXCTL_VLAN_ADDED;
+ wr32(E1000_DTXCTL, reg);
+ case e1000_82580:
+ /* enable replication vlan tag stripping */
+ reg = rd32(E1000_RPLOLR);
+ reg |= E1000_RPLOLR_STRVLAN;
+ wr32(E1000_RPLOLR, reg);
+ case e1000_i350:
+ /* none of the above registers are supported by i350 */
+ break;
+ }
if (adapter->vfs_allocated_count) {
igb_vmdq_set_loopback_pf(hw, true);
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index 8afff07ff55..103b3aa1afc 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -390,8 +390,6 @@ static void igbvf_get_wol(struct net_device *netdev,
{
wol->supported = 0;
wol->wolopts = 0;
-
- return;
}
static int igbvf_set_wol(struct net_device *netdev,
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 1b1edad1eb5..5e2b2a8c56c 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -48,6 +48,7 @@
#define DRV_VERSION "1.0.0-k0"
char igbvf_driver_name[] = "igbvf";
const char igbvf_driver_version[] = DRV_VERSION;
+struct pm_qos_request_list *igbvf_driver_pm_qos_req;
static const char igbvf_driver_string[] =
"Intel(R) Virtual Function Network Driver";
static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
@@ -164,10 +165,10 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
buffer_info->page_offset ^= PAGE_SIZE / 2;
}
buffer_info->page_dma =
- pci_map_page(pdev, buffer_info->page,
+ dma_map_page(&pdev->dev, buffer_info->page,
buffer_info->page_offset,
PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
if (!buffer_info->skb) {
@@ -178,9 +179,9 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
}
buffer_info->skb = skb;
- buffer_info->dma = pci_map_single(pdev, skb->data,
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
bufsz,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write-back erases this info. */
@@ -268,28 +269,28 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
prefetch(skb->data - NET_IP_ALIGN);
buffer_info->skb = NULL;
if (!adapter->rx_ps_hdr_size) {
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
skb_put(skb, length);
goto send_up;
}
if (!skb_shinfo(skb)->nr_frags) {
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_hdr_size,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
skb_put(skb, hlen);
}
if (length) {
- pci_unmap_page(pdev, buffer_info->page_dma,
+ dma_unmap_page(&pdev->dev, buffer_info->page_dma,
PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->page_dma = 0;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
buffer_info->page,
buffer_info->page_offset,
length);
@@ -369,15 +370,15 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev,
+ dma_unmap_page(&adapter->pdev->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev,
+ dma_unmap_single(&adapter->pdev->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
buffer_info->dma = 0;
}
if (buffer_info->skb) {
@@ -438,8 +439,8 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
- &tx_ring->dma);
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -480,8 +481,8 @@ int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
rx_ring->size = rx_ring->count * desc_len;
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
- &rx_ring->dma);
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc)
goto err;
@@ -549,7 +550,8 @@ void igbvf_free_tx_resources(struct igbvf_ring *tx_ring)
vfree(tx_ring->buffer_info);
tx_ring->buffer_info = NULL;
- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -574,13 +576,13 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma) {
if (adapter->rx_ps_hdr_size){
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_hdr_size,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
} else {
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
buffer_info->dma = 0;
}
@@ -592,9 +594,10 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
if (buffer_info->page) {
if (buffer_info->page_dma)
- pci_unmap_page(pdev, buffer_info->page_dma,
+ dma_unmap_page(&pdev->dev,
+ buffer_info->page_dma,
PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
put_page(buffer_info->page);
buffer_info->page = NULL;
buffer_info->page_dma = 0;
@@ -1398,7 +1401,7 @@ static void igbvf_set_multi(struct net_device *netdev)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u8 *mta_list = NULL;
int i;
@@ -1413,8 +1416,8 @@ static void igbvf_set_multi(struct net_device *netdev)
/* prepare a packed array of only addresses. */
i = 0;
- netdev_for_each_mc_addr(mc_ptr, netdev)
- memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0);
kfree(mta_list);
@@ -2104,9 +2107,9 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = false;
- buffer_info->dma = pci_map_single(pdev, skb->data, len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
@@ -2127,12 +2130,12 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = true;
- buffer_info->dma = pci_map_page(pdev,
+ buffer_info->dma = dma_map_page(&pdev->dev,
frag->page,
frag->page_offset,
len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
}
@@ -2644,16 +2647,16 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
return err;
pci_using_dac = 0;
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err)
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");
@@ -2899,7 +2902,7 @@ static int __init igbvf_init_module(void)
printk(KERN_INFO "%s\n", igbvf_copyright);
ret = pci_register_driver(&igbvf_driver);
- pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, igbvf_driver_name,
+ igbvf_driver_pm_qos_req = pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
return ret;
@@ -2915,7 +2918,8 @@ module_init(igbvf_init_module);
static void __exit igbvf_exit_module(void)
{
pci_unregister_driver(&igbvf_driver);
- pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, igbvf_driver_name);
+ pm_qos_remove_request(igbvf_driver_pm_qos_req);
+ igbvf_driver_pm_qos_req = NULL;
}
module_exit(igbvf_exit_module);
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index 8f6197d647c..e3b5e949060 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -1503,7 +1503,6 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
BARRIER();
- dev->trans_start = jiffies;
ip->tx_skbs[produce] = skb; /* Remember skb */
produce = (produce + 1) & 127;
ip->tx_pi = produce;
@@ -1665,7 +1664,7 @@ static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static void ioc3_set_multicast_list(struct net_device *dev)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
struct ioc3_private *ip = netdev_priv(dev);
struct ioc3 *ioc3 = ip->regs;
u64 ehar = 0;
@@ -1689,8 +1688,8 @@ static void ioc3_set_multicast_list(struct net_device *dev)
ip->ehar_h = 0xffffffff;
ip->ehar_l = 0xffffffff;
} else {
- netdev_for_each_mc_addr(dmi, dev) {
- char *addr = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ char *addr = ha->addr;
if (!(*addr & 1))
continue;
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index 639bf9fb027..72e3d2da9e9 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -570,7 +570,7 @@ static int ipg_config_autoneg(struct net_device *dev)
static void ipg_nic_set_multicast_list(struct net_device *dev)
{
void __iomem *ioaddr = ipg_ioaddr(dev);
- struct dev_mc_list *mc_list_ptr;
+ struct netdev_hw_addr *ha;
unsigned int hashindex;
u32 hashtable[2];
u8 receivemode;
@@ -609,9 +609,9 @@ static void ipg_nic_set_multicast_list(struct net_device *dev)
hashtable[1] = 0x00000000;
/* Cycle through all multicast addresses to filter. */
- netdev_for_each_mc_addr(mc_list_ptr, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
/* Calculate CRC result for each multicast address. */
- hashindex = crc32_le(0xffffffff, mc_list_ptr->dmi_addr,
+ hashindex = crc32_le(0xffffffff, ha->addr,
ETH_ALEN);
/* Use only the least significant 6 bits. */
@@ -1548,8 +1548,6 @@ static void ipg_reset_after_host_error(struct work_struct *work)
container_of(work, struct ipg_nic_private, task.work);
struct net_device *dev = sp->dev;
- IPG_DDEBUG_MSG("DMACtrl = %8.8x\n", ioread32(sp->ioaddr + IPG_DMACTRL));
-
/*
* Acknowledge HostError interrupt by resetting
* IPG DMA and HOST.
@@ -1826,9 +1824,6 @@ static int ipg_nic_stop(struct net_device *dev)
netif_stop_queue(dev);
- IPG_DDEBUG_MSG("RFDlistendCount = %i\n", sp->RFDlistendCount);
- IPG_DDEBUG_MSG("RFDListCheckedCount = %i\n", sp->rxdCheckedCount);
- IPG_DDEBUG_MSG("EmptyRFDListCount = %i\n", sp->EmptyRFDListCount);
IPG_DUMPTFDLIST(dev);
do {
diff --git a/drivers/net/ipg.h b/drivers/net/ipg.h
index dfc2541bb55..6ce027355fc 100644
--- a/drivers/net/ipg.h
+++ b/drivers/net/ipg.h
@@ -29,7 +29,7 @@
/* GMII based PHY IDs */
#define NS 0x2000
#define MARVELL 0x0141
-#define ICPLUS_PHY 0x243
+#define ICPLUS_PHY 0x243
/* NIC Physical Layer Device MII register fields. */
#define MII_PHY_SELECTOR_IEEE8023 0x0001
@@ -96,31 +96,31 @@ enum ipg_regs {
};
/* Ethernet MIB statistic register offsets. */
-#define IPG_OCTETRCVOK 0xA8
+#define IPG_OCTETRCVOK 0xA8
#define IPG_MCSTOCTETRCVDOK 0xAC
#define IPG_BCSTOCTETRCVOK 0xB0
#define IPG_FRAMESRCVDOK 0xB4
#define IPG_MCSTFRAMESRCVDOK 0xB8
#define IPG_BCSTFRAMESRCVDOK 0xBE
#define IPG_MACCONTROLFRAMESRCVD 0xC6
-#define IPG_FRAMETOOLONGERRRORS 0xC8
-#define IPG_INRANGELENGTHERRORS 0xCA
-#define IPG_FRAMECHECKSEQERRORS 0xCC
-#define IPG_FRAMESLOSTRXERRORS 0xCE
-#define IPG_OCTETXMTOK 0xD0
+#define IPG_FRAMETOOLONGERRRORS 0xC8
+#define IPG_INRANGELENGTHERRORS 0xCA
+#define IPG_FRAMECHECKSEQERRORS 0xCC
+#define IPG_FRAMESLOSTRXERRORS 0xCE
+#define IPG_OCTETXMTOK 0xD0
#define IPG_MCSTOCTETXMTOK 0xD4
#define IPG_BCSTOCTETXMTOK 0xD8
#define IPG_FRAMESXMTDOK 0xDC
#define IPG_MCSTFRAMESXMTDOK 0xE0
-#define IPG_FRAMESWDEFERREDXMT 0xE4
+#define IPG_FRAMESWDEFERREDXMT 0xE4
#define IPG_LATECOLLISIONS 0xE8
#define IPG_MULTICOLFRAMES 0xEC
#define IPG_SINGLECOLFRAMES 0xF0
#define IPG_BCSTFRAMESXMTDOK 0xF6
-#define IPG_CARRIERSENSEERRORS 0xF8
+#define IPG_CARRIERSENSEERRORS 0xF8
#define IPG_MACCONTROLFRAMESXMTDOK 0xFA
-#define IPG_FRAMESABORTXSCOLLS 0xFC
-#define IPG_FRAMESWEXDEFERRAL 0xFE
+#define IPG_FRAMESABORTXSCOLLS 0xFC
+#define IPG_FRAMESWEXDEFERRAL 0xFE
/* RMON statistic register offsets. */
#define IPG_ETHERSTATSCOLLISIONS 0x100
@@ -134,8 +134,8 @@ enum ipg_regs {
#define IPG_ETHERSTATSPKTS1024TO1518OCTESTSTRANSMIT 0x120
#define IPG_ETHERSTATSCRCALIGNERRORS 0x124
#define IPG_ETHERSTATSUNDERSIZEPKTS 0x128
-#define IPG_ETHERSTATSFRAGMENTS 0x12C
-#define IPG_ETHERSTATSJABBERS 0x130
+#define IPG_ETHERSTATSFRAGMENTS 0x12C
+#define IPG_ETHERSTATSJABBERS 0x130
#define IPG_ETHERSTATSOCTETS 0x134
#define IPG_ETHERSTATSPKTS 0x138
#define IPG_ETHERSTATSPKTS64OCTESTS 0x13C
@@ -154,10 +154,10 @@ enum ipg_regs {
#define IPG_ETHERSTATSDROPEVENTS 0xCE
/* Serial EEPROM offsets */
-#define IPG_EEPROM_CONFIGPARAM 0x00
+#define IPG_EEPROM_CONFIGPARAM 0x00
#define IPG_EEPROM_ASICCTRL 0x01
#define IPG_EEPROM_SUBSYSTEMVENDORID 0x02
-#define IPG_EEPROM_SUBSYSTEMID 0x03
+#define IPG_EEPROM_SUBSYSTEMID 0x03
#define IPG_EEPROM_STATIONADDRESS0 0x10
#define IPG_EEPROM_STATIONADDRESS1 0x11
#define IPG_EEPROM_STATIONADDRESS2 0x12
@@ -168,16 +168,16 @@ enum ipg_regs {
/* IOBaseAddress */
#define IPG_PIB_RSVD_MASK 0xFFFFFE01
-#define IPG_PIB_IOBASEADDRESS 0xFFFFFF00
-#define IPG_PIB_IOBASEADDRIND 0x00000001
+#define IPG_PIB_IOBASEADDRESS 0xFFFFFF00
+#define IPG_PIB_IOBASEADDRIND 0x00000001
/* MemBaseAddress */
#define IPG_PMB_RSVD_MASK 0xFFFFFE07
-#define IPG_PMB_MEMBASEADDRIND 0x00000001
+#define IPG_PMB_MEMBASEADDRIND 0x00000001
#define IPG_PMB_MEMMAPTYPE 0x00000006
#define IPG_PMB_MEMMAPTYPE0 0x00000002
#define IPG_PMB_MEMMAPTYPE1 0x00000004
-#define IPG_PMB_MEMBASEADDRESS 0xFFFFFE00
+#define IPG_PMB_MEMBASEADDRESS 0xFFFFFE00
/* ConfigStatus */
#define IPG_CS_RSVD_MASK 0xFFB0
@@ -196,20 +196,20 @@ enum ipg_regs {
/* TFDList, TFC */
#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFF
-#define IPG_TFC_FRAMEID 0x000000000000FFFF
+#define IPG_TFC_FRAMEID 0x000000000000FFFF
#define IPG_TFC_WORDALIGN 0x0000000000030000
#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000
-#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000
+#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000
#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000
#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000
#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000
#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000
#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000
#define IPG_TFC_TXINDICATE 0x0000000000400000
-#define IPG_TFC_TXDMAINDICATE 0x0000000000800000
+#define IPG_TFC_TXDMAINDICATE 0x0000000000800000
#define IPG_TFC_FRAGCOUNT 0x000000000F000000
-#define IPG_TFC_VLANTAGINSERT 0x0000000010000000
-#define IPG_TFC_TFDDONE 0x0000000080000000
+#define IPG_TFC_VLANTAGINSERT 0x0000000010000000
+#define IPG_TFC_TFDDONE 0x0000000080000000
#define IPG_TFC_VID 0x00000FFF00000000
#define IPG_TFC_CFI 0x0000100000000000
#define IPG_TFC_USERPRIORITY 0x0000E00000000000
@@ -217,35 +217,35 @@ enum ipg_regs {
/* TFDList, FragInfo */
#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFF
#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFF
-#define IPG_TFI_FRAGLEN 0xFFFF000000000000LL
+#define IPG_TFI_FRAGLEN 0xFFFF000000000000LL
/* RFD data structure masks. */
/* RFDList, RFS */
#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFF
#define IPG_RFS_RXFRAMELEN 0x000000000000FFFF
-#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000
+#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000
#define IPG_RFS_RXRUNTFRAME 0x0000000000020000
#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000
#define IPG_RFS_RXFCSERROR 0x0000000000080000
#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000
-#define IPG_RFS_RXLENGTHERROR 0x0000000000200000
+#define IPG_RFS_RXLENGTHERROR 0x0000000000200000
#define IPG_RFS_VLANDETECTED 0x0000000000400000
#define IPG_RFS_TCPDETECTED 0x0000000000800000
#define IPG_RFS_TCPERROR 0x0000000001000000
#define IPG_RFS_UDPDETECTED 0x0000000002000000
#define IPG_RFS_UDPERROR 0x0000000004000000
#define IPG_RFS_IPDETECTED 0x0000000008000000
-#define IPG_RFS_IPERROR 0x0000000010000000
+#define IPG_RFS_IPERROR 0x0000000010000000
#define IPG_RFS_FRAMESTART 0x0000000020000000
#define IPG_RFS_FRAMEEND 0x0000000040000000
-#define IPG_RFS_RFDDONE 0x0000000080000000
+#define IPG_RFS_RFDDONE 0x0000000080000000
#define IPG_RFS_TCI 0x0000FFFF00000000
/* RFDList, FragInfo */
#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFF
#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFF
-#define IPG_RFI_FRAGLEN 0xFFFF000000000000LL
+#define IPG_RFI_FRAGLEN 0xFFFF000000000000LL
/* I/O Register masks. */
@@ -254,37 +254,37 @@ enum ipg_regs {
/* Statistics Mask */
#define IPG_SM_ALL 0x0FFFFFFF
-#define IPG_SM_OCTETRCVOK_FRAMESRCVDOK 0x00000001
-#define IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK 0x00000002
-#define IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK 0x00000004
+#define IPG_SM_OCTETRCVOK_FRAMESRCVDOK 0x00000001
+#define IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK 0x00000002
+#define IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK 0x00000004
#define IPG_SM_RXJUMBOFRAMES 0x00000008
#define IPG_SM_TCPCHECKSUMERRORS 0x00000010
-#define IPG_SM_IPCHECKSUMERRORS 0x00000020
+#define IPG_SM_IPCHECKSUMERRORS 0x00000020
#define IPG_SM_UDPCHECKSUMERRORS 0x00000040
#define IPG_SM_MACCONTROLFRAMESRCVD 0x00000080
#define IPG_SM_FRAMESTOOLONGERRORS 0x00000100
#define IPG_SM_INRANGELENGTHERRORS 0x00000200
#define IPG_SM_FRAMECHECKSEQERRORS 0x00000400
#define IPG_SM_FRAMESLOSTRXERRORS 0x00000800
-#define IPG_SM_OCTETXMTOK_FRAMESXMTOK 0x00001000
-#define IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK 0x00002000
-#define IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK 0x00004000
+#define IPG_SM_OCTETXMTOK_FRAMESXMTOK 0x00001000
+#define IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK 0x00002000
+#define IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK 0x00004000
#define IPG_SM_FRAMESWDEFERREDXMT 0x00008000
-#define IPG_SM_LATECOLLISIONS 0x00010000
-#define IPG_SM_MULTICOLFRAMES 0x00020000
-#define IPG_SM_SINGLECOLFRAMES 0x00040000
+#define IPG_SM_LATECOLLISIONS 0x00010000
+#define IPG_SM_MULTICOLFRAMES 0x00020000
+#define IPG_SM_SINGLECOLFRAMES 0x00040000
#define IPG_SM_TXJUMBOFRAMES 0x00080000
#define IPG_SM_CARRIERSENSEERRORS 0x00100000
#define IPG_SM_MACCONTROLFRAMESXMTD 0x00200000
#define IPG_SM_FRAMESABORTXSCOLLS 0x00400000
-#define IPG_SM_FRAMESWEXDEFERAL 0x00800000
+#define IPG_SM_FRAMESWEXDEFERAL 0x00800000
/* Countdown */
#define IPG_CD_RSVD_MASK 0x0700FFFF
#define IPG_CD_COUNT 0x0000FFFF
-#define IPG_CD_COUNTDOWNSPEED 0x01000000
+#define IPG_CD_COUNTDOWNSPEED 0x01000000
#define IPG_CD_COUNTDOWNMODE 0x02000000
-#define IPG_CD_COUNTINTENABLED 0x04000000
+#define IPG_CD_COUNTINTENABLED 0x04000000
/* TxDMABurstThresh */
#define IPG_TB_RSVD_MASK 0xFF
@@ -653,15 +653,28 @@ enum ipg_regs {
* Miscellaneous macros.
*/
-/* Marco for printing debug statements. */
+/* Macros for printing debug statements. */
#ifdef IPG_DEBUG
-# define IPG_DEBUG_MSG(args...)
-# define IPG_DDEBUG_MSG(args...) printk(KERN_DEBUG "IPG: " args)
+# define IPG_DEBUG_MSG(fmt, args...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG "IPG: " fmt, ##args); \
+} while (0)
+# define IPG_DDEBUG_MSG(fmt, args...) \
+ printk(KERN_DEBUG "IPG: " fmt, ##args)
# define IPG_DUMPRFDLIST(args) ipg_dump_rfdlist(args)
# define IPG_DUMPTFDLIST(args) ipg_dump_tfdlist(args)
#else
-# define IPG_DEBUG_MSG(args...)
-# define IPG_DDEBUG_MSG(args...)
+# define IPG_DEBUG_MSG(fmt, args...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG "IPG: " fmt, ##args); \
+} while (0)
+# define IPG_DDEBUG_MSG(fmt, args...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG "IPG: " fmt, ##args); \
+} while (0)
# define IPG_DUMPRFDLIST(args)
# define IPG_DUMPTFDLIST(args)
#endif
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index af10e97345c..25bb2a015e1 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -397,5 +397,11 @@ config MCS_FIR
To compile it as a module, choose M here: the module will be called
mcs7780.
+config SH_IRDA
+ tristate "SuperH IrDA driver"
+ depends on IRDA && ARCH_SHMOBILE
+ help
+ Say Y here if your want to enable SuperH IrDA devices.
+
endmenu
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index e030d47e279..dfc64537f62 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_VIA_FIR) += via-ircc.o
obj-$(CONFIG_PXA_FICP) += pxaficp_ir.o
obj-$(CONFIG_MCS_FIR) += mcs7780.o
obj-$(CONFIG_AU1000_FIR) += au1k_ir.o
+obj-$(CONFIG_SH_IRDA) += sh_irda.o
# SIR drivers
obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o
obj-$(CONFIG_BFIN_SIR) += bfin_sir.o
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 28992c815cb..a3cb109006a 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -753,18 +753,18 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
if(OldMessageCount > ((self->LineStatus+1) & 0x07))
{
self->rcvFramesOverflow = TRUE;
- IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******** \n", __func__);
+ IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ********\n", __func__);
}
if (ali_ircc_dma_receive_complete(self))
{
- IRDA_DEBUG(1, "%s(), ******* receive complete ******** \n", __func__);
+ IRDA_DEBUG(1, "%s(), ******* receive complete ********\n", __func__);
self->ier = IER_EOM;
}
else
{
- IRDA_DEBUG(1, "%s(), ******* Not receive complete ******** \n", __func__);
+ IRDA_DEBUG(1, "%s(), ******* Not receive complete ********\n", __func__);
self->ier = IER_EOM | IER_TIMER;
}
@@ -777,7 +777,7 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
if(OldMessageCount > ((self->LineStatus+1) & 0x07))
{
self->rcvFramesOverflow = TRUE;
- IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******* \n", __func__);
+ IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE *******\n", __func__);
}
/* Disable Timer */
switch_bank(iobase, BANK1);
@@ -942,7 +942,7 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
// benjamin 2000/11/10 06:32PM
if (self->io.speed > 115200)
{
- IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT \n", __func__ );
+ IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT\n", __func__ );
self->ier = IER_EOM;
// SetCOMInterrupts(self, TRUE);
@@ -970,7 +970,7 @@ static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
- IRDA_DEBUG(2, "%s(), setting speed = %d \n", __func__ , baud);
+ IRDA_DEBUG(2, "%s(), setting speed = %d\n", __func__ , baud);
/* This function *must* be called with irq off and spin-lock.
* - Jean II */
@@ -1500,7 +1500,7 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
diff = self->now.tv_usec - self->stamp.tv_usec;
/* self->stamp is set from ali_ircc_dma_receive_complete() */
- IRDA_DEBUG(1, "%s(), ******* diff = %d ******* \n", __func__ , diff);
+ IRDA_DEBUG(1, "%s(), ******* diff = %d *******\n", __func__ , diff);
if (diff < 0)
diff += 1000000;
@@ -1641,7 +1641,7 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
tmp = inb(iobase+FIR_LCR_B);
tmp &= ~0x20; // Disable SIP
outb(((unsigned char)(tmp & 0x3f) | LCR_B_TX_MODE) & ~LCR_B_BW, iobase+FIR_LCR_B);
- IRDA_DEBUG(1, "%s(), ******* Change to TX mode: FIR_LCR_B = 0x%x ******* \n", __func__ , inb(iobase+FIR_LCR_B));
+ IRDA_DEBUG(1, "%s(), *** Change to TX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B));
outb(0, iobase+FIR_LSR);
@@ -1768,7 +1768,7 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self)
//switch_bank(iobase, BANK0);
tmp = inb(iobase+FIR_LCR_B);
outb((unsigned char)(tmp &0x3f) | LCR_B_RX_MODE | LCR_B_BW , iobase + FIR_LCR_B); // 2000/12/1 05:16PM
- IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x *** \n", __func__ , inb(iobase+FIR_LCR_B));
+ IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B));
/* Set Rx Threshold */
switch_bank(iobase, BANK1);
@@ -1840,7 +1840,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
/* Check for errors */
if ((status & 0xd8) || self->rcvFramesOverflow || (len==0))
{
- IRDA_DEBUG(0,"%s(), ************* RX Errors ************ \n", __func__ );
+ IRDA_DEBUG(0,"%s(), ************* RX Errors ************\n", __func__ );
/* Skip frame */
self->netdev->stats.rx_errors++;
@@ -1850,29 +1850,29 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
if (status & LSR_FIFO_UR)
{
self->netdev->stats.rx_frame_errors++;
- IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************ \n", __func__ );
+ IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************\n", __func__ );
}
if (status & LSR_FRAME_ERROR)
{
self->netdev->stats.rx_frame_errors++;
- IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************ \n", __func__ );
+ IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************\n", __func__ );
}
if (status & LSR_CRC_ERROR)
{
self->netdev->stats.rx_crc_errors++;
- IRDA_DEBUG(0,"%s(), ************* CRC Errors ************ \n", __func__ );
+ IRDA_DEBUG(0,"%s(), ************* CRC Errors ************\n", __func__ );
}
if(self->rcvFramesOverflow)
{
self->netdev->stats.rx_frame_errors++;
- IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************ \n", __func__ );
+ IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************\n", __func__ );
}
if(len == 0)
{
self->netdev->stats.rx_frame_errors++;
- IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 ********* \n", __func__ );
+ IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 *********\n", __func__ );
}
}
else
@@ -1884,7 +1884,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
val = inb(iobase+FIR_BSR);
if ((val& BSR_FIFO_NOT_EMPTY)== 0x80)
{
- IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************ \n", __func__ );
+ IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************\n", __func__ );
/* Put this entry back in fifo */
st_fifo->head--;
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index b5cbd39d068..a3d696a9456 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -546,7 +546,6 @@ static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
aup->tx_head = (aup->tx_head + 1) & (NUM_IR_DESC - 1);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index 911c082cee5..f940dfa1f7f 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -107,8 +107,12 @@ static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed)
case 57600:
case 115200:
- quot = (port->clk + (8 * speed)) / (16 * speed)\
- - ANOMALY_05000230;
+ /*
+ * IRDA is not affected by anomaly 05000230, so there is no
+ * need to tweak the divisor like he UART driver (which will
+ * slightly speed up the baud rate on us).
+ */
+ quot = (port->clk + (8 * speed)) / (16 * speed);
do {
udelay(utime);
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index b7e6625ca75..48bd5ec9f29 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1002,8 +1002,6 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
toshoboe_checkstuck (self);
- dev->trans_start = jiffies;
-
/* Check if we need to change the speed */
/* But not now. Wait after transmission if mtt not required */
speed=irda_get_next_speed(skb);
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 2c9b3af1661..4441fa3389c 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -839,7 +839,7 @@ static void irda_usb_receive(struct urb *urb)
/* Usually precursor to a hot-unplug on OHCI. */
default:
self->netdev->stats.rx_errors++;
- IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X \n", __func__, urb->status, urb->transfer_flags);
+ IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X\n", __func__, urb->status, urb->transfer_flags);
break;
}
/* If we received an error, we don't want to resubmit the
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index c0e0bb9401d..5b1036ac38d 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -434,8 +434,6 @@ static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len)
mcs->netdev->stats.rx_packets++;
mcs->netdev->stats.rx_bytes += new_len;
-
- return;
}
/* Unwrap received packets at FIR speed. A 32 bit crc_ccitt checksum is
@@ -487,8 +485,6 @@ static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len)
mcs->netdev->stats.rx_packets++;
mcs->netdev->stats.rx_bytes += new_len;
-
- return;
}
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 1a54f6bb68c..c192c31e4c5 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -556,7 +556,6 @@ static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
}
dev_kfree_skb(skb);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 1dcdce0631a..da2705061a6 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -715,8 +715,6 @@ static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_TXE;
}
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
new file mode 100644
index 00000000000..9a828b06a57
--- /dev/null
+++ b/drivers/net/irda/sh_irda.c
@@ -0,0 +1,865 @@
+/*
+ * SuperH IrDA Driver
+ *
+ * Copyright (C) 2010 Renesas Solutions Corp.
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on sh_sir.c
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ * Copyright 2006-2009 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * CAUTION
+ *
+ * This driver is very simple.
+ * So, it doesn't have below support now
+ * - MIR/FIR support
+ * - DMA transfer support
+ * - FIFO mode support
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+
+#define DRIVER_NAME "sh_irda"
+
+#if defined(CONFIG_ARCH_SH7367) || defined(CONFIG_ARCH_SH7377)
+#define __IRDARAM_LEN 0x13FF
+#else
+#define __IRDARAM_LEN 0x1039
+#endif
+
+#define IRTMR 0x1F00 /* Transfer mode */
+#define IRCFR 0x1F02 /* Configuration */
+#define IRCTR 0x1F04 /* IR control */
+#define IRTFLR 0x1F20 /* Transmit frame length */
+#define IRTCTR 0x1F22 /* Transmit control */
+#define IRRFLR 0x1F40 /* Receive frame length */
+#define IRRCTR 0x1F42 /* Receive control */
+#define SIRISR 0x1F60 /* SIR-UART mode interrupt source */
+#define SIRIMR 0x1F62 /* SIR-UART mode interrupt mask */
+#define SIRICR 0x1F64 /* SIR-UART mode interrupt clear */
+#define SIRBCR 0x1F68 /* SIR-UART mode baud rate count */
+#define MFIRISR 0x1F70 /* MIR/FIR mode interrupt source */
+#define MFIRIMR 0x1F72 /* MIR/FIR mode interrupt mask */
+#define MFIRICR 0x1F74 /* MIR/FIR mode interrupt clear */
+#define CRCCTR 0x1F80 /* CRC engine control */
+#define CRCIR 0x1F86 /* CRC engine input data */
+#define CRCCR 0x1F8A /* CRC engine calculation */
+#define CRCOR 0x1F8E /* CRC engine output data */
+#define FIFOCP 0x1FC0 /* FIFO current pointer */
+#define FIFOFP 0x1FC2 /* FIFO follow pointer */
+#define FIFORSMSK 0x1FC4 /* FIFO receive status mask */
+#define FIFORSOR 0x1FC6 /* FIFO receive status OR */
+#define FIFOSEL 0x1FC8 /* FIFO select */
+#define FIFORS 0x1FCA /* FIFO receive status */
+#define FIFORFL 0x1FCC /* FIFO receive frame length */
+#define FIFORAMCP 0x1FCE /* FIFO RAM current pointer */
+#define FIFORAMFP 0x1FD0 /* FIFO RAM follow pointer */
+#define BIFCTL 0x1FD2 /* BUS interface control */
+#define IRDARAM 0x0000 /* IrDA buffer RAM */
+#define IRDARAM_LEN __IRDARAM_LEN /* - 8/16/32 (read-only for 32) */
+
+/* IRTMR */
+#define TMD_MASK (0x3 << 14) /* Transfer Mode */
+#define TMD_SIR (0x0 << 14)
+#define TMD_MIR (0x3 << 14)
+#define TMD_FIR (0x2 << 14)
+
+#define FIFORIM (1 << 8) /* FIFO receive interrupt mask */
+#define MIM (1 << 4) /* MIR/FIR Interrupt Mask */
+#define SIM (1 << 0) /* SIR Interrupt Mask */
+#define xIM_MASK (FIFORIM | MIM | SIM)
+
+/* IRCFR */
+#define RTO_SHIFT 8 /* shift for Receive Timeout */
+#define RTO (0x3 << RTO_SHIFT)
+
+/* IRTCTR */
+#define ARMOD (1 << 15) /* Auto-Receive Mode */
+#define TE (1 << 0) /* Transmit Enable */
+
+/* IRRFLR */
+#define RFL_MASK (0x1FFF) /* mask for Receive Frame Length */
+
+/* IRRCTR */
+#define RE (1 << 0) /* Receive Enable */
+
+/*
+ * SIRISR, SIRIMR, SIRICR,
+ * MFIRISR, MFIRIMR, MFIRICR
+ */
+#define FRE (1 << 15) /* Frame Receive End */
+#define TROV (1 << 11) /* Transfer Area Overflow */
+#define xIR_9 (1 << 9)
+#define TOT xIR_9 /* for SIR Timeout */
+#define ABTD xIR_9 /* for MIR/FIR Abort Detection */
+#define xIR_8 (1 << 8)
+#define FER xIR_8 /* for SIR Framing Error */
+#define CRCER xIR_8 /* for MIR/FIR CRC error */
+#define FTE (1 << 7) /* Frame Transmit End */
+#define xIR_MASK (FRE | TROV | xIR_9 | xIR_8 | FTE)
+
+/* SIRBCR */
+#define BRC_MASK (0x3F) /* mask for Baud Rate Count */
+
+/* CRCCTR */
+#define CRC_RST (1 << 15) /* CRC Engine Reset */
+#define CRC_CT_MASK 0x0FFF /* mask for CRC Engine Input Data Count */
+
+/* CRCIR */
+#define CRC_IN_MASK 0x0FFF /* mask for CRC Engine Input Data */
+
+/************************************************************************
+
+
+ enum / structure
+
+
+************************************************************************/
+enum sh_irda_mode {
+ SH_IRDA_NONE = 0,
+ SH_IRDA_SIR,
+ SH_IRDA_MIR,
+ SH_IRDA_FIR,
+};
+
+struct sh_irda_self;
+struct sh_irda_xir_func {
+ int (*xir_fre) (struct sh_irda_self *self);
+ int (*xir_trov) (struct sh_irda_self *self);
+ int (*xir_9) (struct sh_irda_self *self);
+ int (*xir_8) (struct sh_irda_self *self);
+ int (*xir_fte) (struct sh_irda_self *self);
+};
+
+struct sh_irda_self {
+ void __iomem *membase;
+ unsigned int irq;
+ struct clk *clk;
+
+ struct net_device *ndev;
+
+ struct irlap_cb *irlap;
+ struct qos_info qos;
+
+ iobuff_t tx_buff;
+ iobuff_t rx_buff;
+
+ enum sh_irda_mode mode;
+ spinlock_t lock;
+
+ struct sh_irda_xir_func *xir_func;
+};
+
+/************************************************************************
+
+
+ common function
+
+
+************************************************************************/
+static void sh_irda_write(struct sh_irda_self *self, u32 offset, u16 data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&self->lock, flags);
+ iowrite16(data, self->membase + offset);
+ spin_unlock_irqrestore(&self->lock, flags);
+}
+
+static u16 sh_irda_read(struct sh_irda_self *self, u32 offset)
+{
+ unsigned long flags;
+ u16 ret;
+
+ spin_lock_irqsave(&self->lock, flags);
+ ret = ioread16(self->membase + offset);
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ return ret;
+}
+
+static void sh_irda_update_bits(struct sh_irda_self *self, u32 offset,
+ u16 mask, u16 data)
+{
+ unsigned long flags;
+ u16 old, new;
+
+ spin_lock_irqsave(&self->lock, flags);
+ old = ioread16(self->membase + offset);
+ new = (old & ~mask) | data;
+ if (old != new)
+ iowrite16(data, self->membase + offset);
+ spin_unlock_irqrestore(&self->lock, flags);
+}
+
+/************************************************************************
+
+
+ mode function
+
+
+************************************************************************/
+/*=====================================
+ *
+ * common
+ *
+ *=====================================*/
+static void sh_irda_rcv_ctrl(struct sh_irda_self *self, int enable)
+{
+ struct device *dev = &self->ndev->dev;
+
+ sh_irda_update_bits(self, IRRCTR, RE, enable ? RE : 0);
+ dev_dbg(dev, "recv %s\n", enable ? "enable" : "disable");
+}
+
+static int sh_irda_set_timeout(struct sh_irda_self *self, int interval)
+{
+ struct device *dev = &self->ndev->dev;
+
+ if (SH_IRDA_SIR != self->mode)
+ interval = 0;
+
+ if (interval < 0 || interval > 2) {
+ dev_err(dev, "unsupported timeout interval\n");
+ return -EINVAL;
+ }
+
+ sh_irda_update_bits(self, IRCFR, RTO, interval << RTO_SHIFT);
+ return 0;
+}
+
+static int sh_irda_set_baudrate(struct sh_irda_self *self, int baudrate)
+{
+ struct device *dev = &self->ndev->dev;
+ u16 val;
+
+ if (baudrate < 0)
+ return 0;
+
+ if (SH_IRDA_SIR != self->mode) {
+ dev_err(dev, "it is not SIR mode\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Baud rate (bits/s) =
+ * (48 MHz / 26) / (baud rate counter value + 1) x 16
+ */
+ val = (48000000 / 26 / 16 / baudrate) - 1;
+ dev_dbg(dev, "baudrate = %d, val = 0x%02x\n", baudrate, val);
+
+ sh_irda_update_bits(self, SIRBCR, BRC_MASK, val);
+
+ return 0;
+}
+
+static int xir_get_rcv_length(struct sh_irda_self *self)
+{
+ return RFL_MASK & sh_irda_read(self, IRRFLR);
+}
+
+/*=====================================
+ *
+ * NONE MODE
+ *
+ *=====================================*/
+static int xir_fre(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ dev_err(dev, "none mode: frame recv\n");
+ return 0;
+}
+
+static int xir_trov(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ dev_err(dev, "none mode: buffer ram over\n");
+ return 0;
+}
+
+static int xir_9(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ dev_err(dev, "none mode: time over\n");
+ return 0;
+}
+
+static int xir_8(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ dev_err(dev, "none mode: framing error\n");
+ return 0;
+}
+
+static int xir_fte(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ dev_err(dev, "none mode: frame transmit end\n");
+ return 0;
+}
+
+static struct sh_irda_xir_func xir_func = {
+ .xir_fre = xir_fre,
+ .xir_trov = xir_trov,
+ .xir_9 = xir_9,
+ .xir_8 = xir_8,
+ .xir_fte = xir_fte,
+};
+
+/*=====================================
+ *
+ * MIR/FIR MODE
+ *
+ * MIR/FIR are not supported now
+ *=====================================*/
+static struct sh_irda_xir_func mfir_func = {
+ .xir_fre = xir_fre,
+ .xir_trov = xir_trov,
+ .xir_9 = xir_9,
+ .xir_8 = xir_8,
+ .xir_fte = xir_fte,
+};
+
+/*=====================================
+ *
+ * SIR MODE
+ *
+ *=====================================*/
+static int sir_fre(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ u16 data16;
+ u8 *data = (u8 *)&data16;
+ int len = xir_get_rcv_length(self);
+ int i, j;
+
+ if (len > IRDARAM_LEN)
+ len = IRDARAM_LEN;
+
+ dev_dbg(dev, "frame recv length = %d\n", len);
+
+ for (i = 0; i < len; i++) {
+ j = i % 2;
+ if (!j)
+ data16 = sh_irda_read(self, IRDARAM + i);
+
+ async_unwrap_char(self->ndev, &self->ndev->stats,
+ &self->rx_buff, data[j]);
+ }
+ self->ndev->last_rx = jiffies;
+
+ sh_irda_rcv_ctrl(self, 1);
+
+ return 0;
+}
+
+static int sir_trov(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+
+ dev_err(dev, "buffer ram over\n");
+ sh_irda_rcv_ctrl(self, 1);
+ return 0;
+}
+
+static int sir_tot(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+
+ dev_err(dev, "time over\n");
+ sh_irda_set_baudrate(self, 9600);
+ sh_irda_rcv_ctrl(self, 1);
+ return 0;
+}
+
+static int sir_fer(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+
+ dev_err(dev, "framing error\n");
+ sh_irda_rcv_ctrl(self, 1);
+ return 0;
+}
+
+static int sir_fte(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+
+ dev_dbg(dev, "frame transmit end\n");
+ netif_wake_queue(self->ndev);
+
+ return 0;
+}
+
+static struct sh_irda_xir_func sir_func = {
+ .xir_fre = sir_fre,
+ .xir_trov = sir_trov,
+ .xir_9 = sir_tot,
+ .xir_8 = sir_fer,
+ .xir_fte = sir_fte,
+};
+
+static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode)
+{
+ struct device *dev = &self->ndev->dev;
+ struct sh_irda_xir_func *func;
+ const char *name;
+ u16 data;
+
+ switch (mode) {
+ case SH_IRDA_SIR:
+ name = "SIR";
+ data = TMD_SIR;
+ func = &sir_func;
+ break;
+ case SH_IRDA_MIR:
+ name = "MIR";
+ data = TMD_MIR;
+ func = &mfir_func;
+ break;
+ case SH_IRDA_FIR:
+ name = "FIR";
+ data = TMD_FIR;
+ func = &mfir_func;
+ break;
+ default:
+ name = "NONE";
+ data = 0;
+ func = &xir_func;
+ break;
+ }
+
+ self->mode = mode;
+ self->xir_func = func;
+ sh_irda_update_bits(self, IRTMR, TMD_MASK, data);
+
+ dev_dbg(dev, "switch to %s mode", name);
+}
+
+/************************************************************************
+
+
+ irq function
+
+
+************************************************************************/
+static void sh_irda_set_irq_mask(struct sh_irda_self *self)
+{
+ u16 tmr_hole;
+ u16 xir_reg;
+
+ /* set all mask */
+ sh_irda_update_bits(self, IRTMR, xIM_MASK, xIM_MASK);
+ sh_irda_update_bits(self, SIRIMR, xIR_MASK, xIR_MASK);
+ sh_irda_update_bits(self, MFIRIMR, xIR_MASK, xIR_MASK);
+
+ /* clear irq */
+ sh_irda_update_bits(self, SIRICR, xIR_MASK, xIR_MASK);
+ sh_irda_update_bits(self, MFIRICR, xIR_MASK, xIR_MASK);
+
+ switch (self->mode) {
+ case SH_IRDA_SIR:
+ tmr_hole = SIM;
+ xir_reg = SIRIMR;
+ break;
+ case SH_IRDA_MIR:
+ case SH_IRDA_FIR:
+ tmr_hole = MIM;
+ xir_reg = MFIRIMR;
+ break;
+ default:
+ tmr_hole = 0;
+ xir_reg = 0;
+ break;
+ }
+
+ /* open mask */
+ if (xir_reg) {
+ sh_irda_update_bits(self, IRTMR, tmr_hole, 0);
+ sh_irda_update_bits(self, xir_reg, xIR_MASK, 0);
+ }
+}
+
+static irqreturn_t sh_irda_irq(int irq, void *dev_id)
+{
+ struct sh_irda_self *self = dev_id;
+ struct sh_irda_xir_func *func = self->xir_func;
+ u16 isr = sh_irda_read(self, SIRISR);
+
+ /* clear irq */
+ sh_irda_write(self, SIRICR, isr);
+
+ if (isr & FRE)
+ func->xir_fre(self);
+ if (isr & TROV)
+ func->xir_trov(self);
+ if (isr & xIR_9)
+ func->xir_9(self);
+ if (isr & xIR_8)
+ func->xir_8(self);
+ if (isr & FTE)
+ func->xir_fte(self);
+
+ return IRQ_HANDLED;
+}
+
+/************************************************************************
+
+
+ CRC function
+
+
+************************************************************************/
+static void sh_irda_crc_reset(struct sh_irda_self *self)
+{
+ sh_irda_write(self, CRCCTR, CRC_RST);
+}
+
+static void sh_irda_crc_add(struct sh_irda_self *self, u16 data)
+{
+ sh_irda_write(self, CRCIR, data & CRC_IN_MASK);
+}
+
+static u16 sh_irda_crc_cnt(struct sh_irda_self *self)
+{
+ return CRC_CT_MASK & sh_irda_read(self, CRCCTR);
+}
+
+static u16 sh_irda_crc_out(struct sh_irda_self *self)
+{
+ return sh_irda_read(self, CRCOR);
+}
+
+static int sh_irda_crc_init(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ int ret = -EIO;
+ u16 val;
+
+ sh_irda_crc_reset(self);
+
+ sh_irda_crc_add(self, 0xCC);
+ sh_irda_crc_add(self, 0xF5);
+ sh_irda_crc_add(self, 0xF1);
+ sh_irda_crc_add(self, 0xA7);
+
+ val = sh_irda_crc_cnt(self);
+ if (4 != val) {
+ dev_err(dev, "CRC count error %x\n", val);
+ goto crc_init_out;
+ }
+
+ val = sh_irda_crc_out(self);
+ if (0x51DF != val) {
+ dev_err(dev, "CRC result error%x\n", val);
+ goto crc_init_out;
+ }
+
+ ret = 0;
+
+crc_init_out:
+
+ sh_irda_crc_reset(self);
+ return ret;
+}
+
+/************************************************************************
+
+
+ iobuf function
+
+
+************************************************************************/
+static void sh_irda_remove_iobuf(struct sh_irda_self *self)
+{
+ kfree(self->rx_buff.head);
+
+ self->tx_buff.head = NULL;
+ self->tx_buff.data = NULL;
+ self->rx_buff.head = NULL;
+ self->rx_buff.data = NULL;
+}
+
+static int sh_irda_init_iobuf(struct sh_irda_self *self, int rxsize, int txsize)
+{
+ if (self->rx_buff.head ||
+ self->tx_buff.head) {
+ dev_err(&self->ndev->dev, "iobuff has already existed.");
+ return -EINVAL;
+ }
+
+ /* rx_buff */
+ self->rx_buff.head = kmalloc(rxsize, GFP_KERNEL);
+ if (!self->rx_buff.head)
+ return -ENOMEM;
+
+ self->rx_buff.truesize = rxsize;
+ self->rx_buff.in_frame = FALSE;
+ self->rx_buff.state = OUTSIDE_FRAME;
+ self->rx_buff.data = self->rx_buff.head;
+
+ /* tx_buff */
+ self->tx_buff.head = self->membase + IRDARAM;
+ self->tx_buff.truesize = IRDARAM_LEN;
+
+ return 0;
+}
+
+/************************************************************************
+
+
+ net_device_ops function
+
+
+************************************************************************/
+static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct sh_irda_self *self = netdev_priv(ndev);
+ struct device *dev = &self->ndev->dev;
+ int speed = irda_get_next_speed(skb);
+ int ret;
+
+ dev_dbg(dev, "hard xmit\n");
+
+ netif_stop_queue(ndev);
+ sh_irda_rcv_ctrl(self, 0);
+
+ ret = sh_irda_set_baudrate(self, speed);
+ if (ret < 0)
+ return ret;
+
+ self->tx_buff.len = 0;
+ if (skb->len) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&self->lock, flags);
+ self->tx_buff.len = async_wrap_skb(skb,
+ self->tx_buff.head,
+ self->tx_buff.truesize);
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ if (self->tx_buff.len > self->tx_buff.truesize)
+ self->tx_buff.len = self->tx_buff.truesize;
+
+ sh_irda_write(self, IRTFLR, self->tx_buff.len);
+ sh_irda_write(self, IRTCTR, ARMOD | TE);
+ }
+
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
+{
+ /*
+ * FIXME
+ *
+ * This function is needed for irda framework.
+ * But nothing to do now
+ */
+ return 0;
+}
+
+static struct net_device_stats *sh_irda_stats(struct net_device *ndev)
+{
+ struct sh_irda_self *self = netdev_priv(ndev);
+
+ return &self->ndev->stats;
+}
+
+static int sh_irda_open(struct net_device *ndev)
+{
+ struct sh_irda_self *self = netdev_priv(ndev);
+ int err;
+
+ clk_enable(self->clk);
+ err = sh_irda_crc_init(self);
+ if (err)
+ goto open_err;
+
+ sh_irda_set_mode(self, SH_IRDA_SIR);
+ sh_irda_set_timeout(self, 2);
+ sh_irda_set_baudrate(self, 9600);
+
+ self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
+ if (!self->irlap) {
+ err = -ENODEV;
+ goto open_err;
+ }
+
+ netif_start_queue(ndev);
+ sh_irda_rcv_ctrl(self, 1);
+ sh_irda_set_irq_mask(self);
+
+ dev_info(&ndev->dev, "opened\n");
+
+ return 0;
+
+open_err:
+ clk_disable(self->clk);
+
+ return err;
+}
+
+static int sh_irda_stop(struct net_device *ndev)
+{
+ struct sh_irda_self *self = netdev_priv(ndev);
+
+ /* Stop IrLAP */
+ if (self->irlap) {
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+ }
+
+ netif_stop_queue(ndev);
+
+ dev_info(&ndev->dev, "stoped\n");
+
+ return 0;
+}
+
+static const struct net_device_ops sh_irda_ndo = {
+ .ndo_open = sh_irda_open,
+ .ndo_stop = sh_irda_stop,
+ .ndo_start_xmit = sh_irda_hard_xmit,
+ .ndo_do_ioctl = sh_irda_ioctl,
+ .ndo_get_stats = sh_irda_stats,
+};
+
+/************************************************************************
+
+
+ platform_driver function
+
+
+************************************************************************/
+static int __devinit sh_irda_probe(struct platform_device *pdev)
+{
+ struct net_device *ndev;
+ struct sh_irda_self *self;
+ struct resource *res;
+ char clk_name[8];
+ unsigned int irq;
+ int err = -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!res || irq < 0) {
+ dev_err(&pdev->dev, "Not enough platform resources.\n");
+ goto exit;
+ }
+
+ ndev = alloc_irdadev(sizeof(*self));
+ if (!ndev)
+ goto exit;
+
+ self = netdev_priv(ndev);
+ self->membase = ioremap_nocache(res->start, resource_size(res));
+ if (!self->membase) {
+ err = -ENXIO;
+ dev_err(&pdev->dev, "Unable to ioremap.\n");
+ goto err_mem_1;
+ }
+
+ err = sh_irda_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
+ if (err)
+ goto err_mem_2;
+
+ snprintf(clk_name, sizeof(clk_name), "irda%d", pdev->id);
+ self->clk = clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(self->clk)) {
+ dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
+ goto err_mem_3;
+ }
+
+ irda_init_max_qos_capabilies(&self->qos);
+
+ ndev->netdev_ops = &sh_irda_ndo;
+ ndev->irq = irq;
+
+ self->ndev = ndev;
+ self->qos.baud_rate.bits &= IR_9600; /* FIXME */
+ self->qos.min_turn_time.bits = 1; /* 10 ms or more */
+ spin_lock_init(&self->lock);
+
+ irda_qos_bits_to_value(&self->qos);
+
+ err = register_netdev(ndev);
+ if (err)
+ goto err_mem_4;
+
+ platform_set_drvdata(pdev, ndev);
+
+ if (request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self)) {
+ dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
+ goto err_mem_4;
+ }
+
+ dev_info(&pdev->dev, "SuperH IrDA probed\n");
+
+ goto exit;
+
+err_mem_4:
+ clk_put(self->clk);
+err_mem_3:
+ sh_irda_remove_iobuf(self);
+err_mem_2:
+ iounmap(self->membase);
+err_mem_1:
+ free_netdev(ndev);
+exit:
+ return err;
+}
+
+static int __devexit sh_irda_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct sh_irda_self *self = netdev_priv(ndev);
+
+ if (!self)
+ return 0;
+
+ unregister_netdev(ndev);
+ clk_put(self->clk);
+ sh_irda_remove_iobuf(self);
+ iounmap(self->membase);
+ free_netdev(ndev);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver sh_irda_driver = {
+ .probe = sh_irda_probe,
+ .remove = __devexit_p(sh_irda_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init sh_irda_init(void)
+{
+ return platform_driver_register(&sh_irda_driver);
+}
+
+static void __exit sh_irda_exit(void)
+{
+ platform_driver_unregister(&sh_irda_driver);
+}
+
+module_init(sh_irda_init);
+module_exit(sh_irda_exit);
+
+MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
+MODULE_DESCRIPTION("SuperH IrDA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 0745581c4b5..5c5f99d5034 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -646,8 +646,10 @@ static int sh_sir_open(struct net_device *ndev)
sh_sir_set_baudrate(self, 9600);
self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
- if (!self->irlap)
+ if (!self->irlap) {
+ err = -ENODEV;
goto open_err;
+ }
/*
* Now enable the interrupt then start the queue
@@ -707,7 +709,6 @@ static int __devinit sh_sir_probe(struct platform_device *pdev)
struct sh_sir_self *self;
struct resource *res;
char clk_name[8];
- void __iomem *base;
unsigned int irq;
int err = -ENOMEM;
@@ -722,14 +723,14 @@ static int __devinit sh_sir_probe(struct platform_device *pdev)
if (!ndev)
goto exit;
- base = ioremap_nocache(res->start, resource_size(res));
- if (!base) {
+ self = netdev_priv(ndev);
+ self->membase = ioremap_nocache(res->start, resource_size(res));
+ if (!self->membase) {
err = -ENXIO;
dev_err(&pdev->dev, "Unable to ioremap.\n");
goto err_mem_1;
}
- self = netdev_priv(ndev);
err = sh_sir_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
if (err)
goto err_mem_2;
@@ -746,7 +747,6 @@ static int __devinit sh_sir_probe(struct platform_device *pdev)
ndev->netdev_ops = &sh_sir_ndo;
ndev->irq = irq;
- self->membase = base;
self->ndev = ndev;
self->qos.baud_rate.bits &= IR_9600; /* FIXME */
self->qos.min_turn_time.bits = 1; /* 10 ms or more */
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index de91cd14016..1b051dab7b2 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -655,7 +655,6 @@ static netdev_tx_t sirdev_hard_xmit(struct sk_buff *skb,
if (likely(actual > 0)) {
dev->tx_skb = skb;
- ndev->trans_start = jiffies;
dev->tx_buff.data += actual;
dev->tx_buff.len -= actual;
}
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 6af84d88cd0..d67e48418e5 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -868,7 +868,7 @@ static void smsc_ircc_timeout(struct net_device *dev)
spin_lock_irqsave(&self->lock, flags);
smsc_ircc_sir_start(self);
smsc_ircc_change_speed(self, self->io.speed);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
spin_unlock_irqrestore(&self->lock, flags);
}
@@ -2822,7 +2822,6 @@ static void __init preconfigure_ali_port(struct pci_dev *dev,
tmpbyte |= mask;
pci_write_config_byte(dev, reg, tmpbyte);
IRDA_MESSAGE("Activated ALi 1533 ISA bridge port 0x%04x.\n", port);
- return;
}
static int __init preconfigure_through_ali(struct pci_dev *dev,
diff --git a/drivers/net/irda/via-ircc.h b/drivers/net/irda/via-ircc.h
index d9d1db03fa2..5a84822b5a4 100644
--- a/drivers/net/irda/via-ircc.h
+++ b/drivers/net/irda/via-ircc.h
@@ -774,7 +774,7 @@ static void SetBaudRate(__u16 iobase, __u32 rate)
break;
default:
break;
- };
+ }
} else if (IsMIROn(iobase)) {
value = 0; // will automatically be fixed in 1.152M
} else if (IsFIROn(iobase)) {
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 209d4bcface..c3d07382b7f 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -1037,7 +1037,6 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb,
wmb();
outw(0, iobase+VLSI_PIO_PROMPT);
}
- ndev->trans_start = jiffies;
if (ring_put(r) == NULL) {
netif_stop_queue(ndev);
@@ -1742,7 +1741,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
vlsi_irda_dev_t *idev;
if (!ndev) {
- IRDA_ERROR("%s - %s: no netdevice \n",
+ IRDA_ERROR("%s - %s: no netdevice\n",
__func__, pci_name(pdev));
return 0;
}
@@ -1781,7 +1780,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
vlsi_irda_dev_t *idev;
if (!ndev) {
- IRDA_ERROR("%s - %s: no netdevice \n",
+ IRDA_ERROR("%s - %s: no netdevice\n",
__func__, pci_name(pdev));
return 0;
}
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index cb0cb758be6..1f9c3f08d1a 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -515,7 +515,6 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
/* Check for empty frame */
if (!skb->len) {
w83977af_change_speed(self, speed);
- dev->trans_start = jiffies;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
} else
@@ -549,7 +548,6 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
switch_bank(iobase, SET0);
outb(ICR_ETXTHI, iobase+ICR);
}
- dev->trans_start = jiffies;
dev_kfree_skb(skb);
/* Restore set register */
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 773c59c8969..ba1de5973fb 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -962,15 +962,15 @@ static void veth_set_multicast_list(struct net_device *dev)
(netdev_mc_count(dev) > VETH_MAX_MCAST)) {
port->promiscuous = 1;
} else {
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
port->promiscuous = 0;
/* Update table */
port->num_mcast = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- u8 *addr = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ u8 *addr = ha->addr;
u64 xaddr = 0;
if (addr[0] & 0x01) {/* multicast address? */
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 92d2e71d0c8..521c0c73299 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -78,9 +78,13 @@ struct ixgb_adapter;
#define PFX "ixgb: "
#ifdef _DEBUG_DRIVER_
-#define IXGB_DBG(args...) printk(KERN_DEBUG PFX args)
+#define IXGB_DBG(fmt, args...) printk(KERN_DEBUG PFX fmt, ##args)
#else
-#define IXGB_DBG(args...)
+#define IXGB_DBG(fmt, args...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG PFX fmt, ##args); \
+} while (0)
#endif
/* TX/RX descriptor defines */
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index 89ffa7264a1..813993f9c65 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -26,6 +26,8 @@
*******************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "ixgb_hw.h"
#include "ixgb_ee.h"
/* Local prototypes */
@@ -56,7 +58,6 @@ ixgb_raise_clock(struct ixgb_hw *hw,
*eecd_reg = *eecd_reg | IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, *eecd_reg);
udelay(50);
- return;
}
/******************************************************************************
@@ -75,7 +76,6 @@ ixgb_lower_clock(struct ixgb_hw *hw,
*eecd_reg = *eecd_reg & ~IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, *eecd_reg);
udelay(50);
- return;
}
/******************************************************************************
@@ -125,7 +125,6 @@ ixgb_shift_out_bits(struct ixgb_hw *hw,
/* We leave the "DI" bit set to "0" when we leave this routine. */
eecd_reg &= ~IXGB_EECD_DI;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
- return;
}
/******************************************************************************
@@ -190,7 +189,6 @@ ixgb_setup_eeprom(struct ixgb_hw *hw)
/* Set CS */
eecd_reg |= IXGB_EECD_CS;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
- return;
}
/******************************************************************************
@@ -224,7 +222,6 @@ ixgb_standby_eeprom(struct ixgb_hw *hw)
eecd_reg &= ~IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
udelay(50);
- return;
}
/******************************************************************************
@@ -248,7 +245,6 @@ ixgb_clock_eeprom(struct ixgb_hw *hw)
eecd_reg &= ~IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
udelay(50);
- return;
}
/******************************************************************************
@@ -268,7 +264,6 @@ ixgb_cleanup_eeprom(struct ixgb_hw *hw)
IXGB_WRITE_REG(hw, EECD, eecd_reg);
ixgb_clock_eeprom(hw);
- return;
}
/******************************************************************************
@@ -357,7 +352,6 @@ ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
checksum = (u16) EEPROM_SUM - checksum;
ixgb_write_eeprom(hw, EEPROM_CHECKSUM_REG, checksum);
- return;
}
/******************************************************************************
@@ -412,8 +406,6 @@ ixgb_write_eeprom(struct ixgb_hw *hw, u16 offset, u16 data)
/* clear the init_ctrl_reg_1 to signify that the cache is invalidated */
ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR);
-
- return;
}
/******************************************************************************
@@ -467,11 +459,11 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
u16 checksum = 0;
struct ixgb_ee_map_type *ee_map;
- DEBUGFUNC("ixgb_get_eeprom_data");
+ ENTER();
ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
- DEBUGOUT("ixgb_ee: Reading eeprom data\n");
+ pr_debug("Reading eeprom data\n");
for (i = 0; i < IXGB_EEPROM_SIZE ; i++) {
u16 ee_data;
ee_data = ixgb_read_eeprom(hw, i);
@@ -480,7 +472,7 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
}
if (checksum != (u16) EEPROM_SUM) {
- DEBUGOUT("ixgb_ee: Checksum invalid.\n");
+ pr_debug("Checksum invalid\n");
/* clear the init_ctrl_reg_1 to signify that the cache is
* invalidated */
ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR);
@@ -489,7 +481,7 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
!= cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
- DEBUGOUT("ixgb_ee: Signature invalid.\n");
+ pr_debug("Signature invalid\n");
return(false);
}
@@ -555,13 +547,13 @@ ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
int i;
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
- DEBUGFUNC("ixgb_get_ee_mac_addr");
+ ENTER();
if (ixgb_check_and_get_eeprom_data(hw) == true) {
for (i = 0; i < IXGB_ETH_LENGTH_OF_ADDRESS; i++) {
mac_addr[i] = ee_map->mac_addr[i];
- DEBUGOUT2("mac(%d) = %.2X\n", i, mac_addr[i]);
}
+ pr_debug("eeprom mac address = %pM\n", mac_addr);
}
}
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
index ff67a84e680..397acabccab 100644
--- a/drivers/net/ixgb/ixgb_hw.c
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -30,9 +30,13 @@
* Shared functions for accessing and configuring the adapter
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "ixgb_hw.h"
#include "ixgb_ids.h"
+#include <linux/etherdevice.h>
+
/* Local function prototypes */
static u32 ixgb_hash_mc_addr(struct ixgb_hw *hw, u8 * mc_addr);
@@ -120,13 +124,13 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
u32 ctrl_reg;
u32 icr_reg;
- DEBUGFUNC("ixgb_adapter_stop");
+ ENTER();
/* If we are stopped or resetting exit gracefully and wait to be
* started again before accessing the hardware.
*/
if (hw->adapter_stopped) {
- DEBUGOUT("Exiting because the adapter is already stopped!!!\n");
+ pr_debug("Exiting because the adapter is already stopped!!!\n");
return false;
}
@@ -136,7 +140,7 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
hw->adapter_stopped = true;
/* Clear interrupt mask to stop board from generating interrupts */
- DEBUGOUT("Masking off all interrupts\n");
+ pr_debug("Masking off all interrupts\n");
IXGB_WRITE_REG(hw, IMC, 0xFFFFFFFF);
/* Disable the Transmit and Receive units. Then delay to allow
@@ -152,12 +156,12 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
* the current PCI configuration. The global reset bit is self-
* clearing, and should clear within a microsecond.
*/
- DEBUGOUT("Issuing a global reset to MAC\n");
+ pr_debug("Issuing a global reset to MAC\n");
ctrl_reg = ixgb_mac_reset(hw);
/* Clear interrupt mask to stop board from generating interrupts */
- DEBUGOUT("Masking off all interrupts\n");
+ pr_debug("Masking off all interrupts\n");
IXGB_WRITE_REG(hw, IMC, 0xffffffff);
/* Clear any pending interrupt events. */
@@ -183,7 +187,7 @@ ixgb_identify_xpak_vendor(struct ixgb_hw *hw)
u16 vendor_name[5];
ixgb_xpak_vendor xpak_vendor;
- DEBUGFUNC("ixgb_identify_xpak_vendor");
+ ENTER();
/* Read the first few bytes of the vendor string from the XPAK NVR
* registers. These are standard XENPAK/XPAK registers, so all XPAK
@@ -222,12 +226,12 @@ ixgb_identify_phy(struct ixgb_hw *hw)
ixgb_phy_type phy_type;
ixgb_xpak_vendor xpak_vendor;
- DEBUGFUNC("ixgb_identify_phy");
+ ENTER();
/* Infer the transceiver/phy type from the device id */
switch (hw->device_id) {
case IXGB_DEVICE_ID_82597EX:
- DEBUGOUT("Identified TXN17401 optics\n");
+ pr_debug("Identified TXN17401 optics\n");
phy_type = ixgb_phy_type_txn17401;
break;
@@ -237,30 +241,30 @@ ixgb_identify_phy(struct ixgb_hw *hw)
* type of optics. */
xpak_vendor = ixgb_identify_xpak_vendor(hw);
if (xpak_vendor == ixgb_xpak_vendor_intel) {
- DEBUGOUT("Identified TXN17201 optics\n");
+ pr_debug("Identified TXN17201 optics\n");
phy_type = ixgb_phy_type_txn17201;
} else {
- DEBUGOUT("Identified G6005 optics\n");
+ pr_debug("Identified G6005 optics\n");
phy_type = ixgb_phy_type_g6005;
}
break;
case IXGB_DEVICE_ID_82597EX_LR:
- DEBUGOUT("Identified G6104 optics\n");
+ pr_debug("Identified G6104 optics\n");
phy_type = ixgb_phy_type_g6104;
break;
case IXGB_DEVICE_ID_82597EX_CX4:
- DEBUGOUT("Identified CX4\n");
+ pr_debug("Identified CX4\n");
xpak_vendor = ixgb_identify_xpak_vendor(hw);
if (xpak_vendor == ixgb_xpak_vendor_intel) {
- DEBUGOUT("Identified TXN17201 optics\n");
+ pr_debug("Identified TXN17201 optics\n");
phy_type = ixgb_phy_type_txn17201;
} else {
- DEBUGOUT("Identified G6005 optics\n");
+ pr_debug("Identified G6005 optics\n");
phy_type = ixgb_phy_type_g6005;
}
break;
default:
- DEBUGOUT("Unknown physical layer module\n");
+ pr_debug("Unknown physical layer module\n");
phy_type = ixgb_phy_type_unknown;
break;
}
@@ -296,18 +300,18 @@ ixgb_init_hw(struct ixgb_hw *hw)
u32 ctrl_reg;
bool status;
- DEBUGFUNC("ixgb_init_hw");
+ ENTER();
/* Issue a global reset to the MAC. This will reset the chip's
* transmit, receive, DMA, and link units. It will not effect
* the current PCI configuration. The global reset bit is self-
* clearing, and should clear within a microsecond.
*/
- DEBUGOUT("Issuing a global reset to MAC\n");
+ pr_debug("Issuing a global reset to MAC\n");
ctrl_reg = ixgb_mac_reset(hw);
- DEBUGOUT("Issuing an EE reset to MAC\n");
+ pr_debug("Issuing an EE reset to MAC\n");
#ifdef HP_ZX1
/* Workaround for 82597EX reset errata */
IXGB_WRITE_REG_IO(hw, CTRL1, IXGB_CTRL1_EE_RST);
@@ -335,7 +339,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
* If it is not valid, we fail hardware init.
*/
if (!mac_addr_valid(hw->curr_mac_addr)) {
- DEBUGOUT("MAC address invalid after ixgb_init_rx_addrs\n");
+ pr_debug("MAC address invalid after ixgb_init_rx_addrs\n");
return(false);
}
@@ -346,7 +350,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
ixgb_get_bus_info(hw);
/* Zero out the Multicast HASH table */
- DEBUGOUT("Zeroing the MTA\n");
+ pr_debug("Zeroing the MTA\n");
for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
@@ -379,7 +383,7 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw)
{
u32 i;
- DEBUGFUNC("ixgb_init_rx_addrs");
+ ENTER();
/*
* If the current mac address is valid, assume it is a software override
@@ -391,35 +395,24 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw)
/* Get the MAC address from the eeprom for later reference */
ixgb_get_ee_mac_addr(hw, hw->curr_mac_addr);
- DEBUGOUT3(" Keeping Permanent MAC Addr =%.2X %.2X %.2X ",
- hw->curr_mac_addr[0],
- hw->curr_mac_addr[1], hw->curr_mac_addr[2]);
- DEBUGOUT3("%.2X %.2X %.2X\n",
- hw->curr_mac_addr[3],
- hw->curr_mac_addr[4], hw->curr_mac_addr[5]);
+ pr_debug("Keeping Permanent MAC Addr = %pM\n",
+ hw->curr_mac_addr);
} else {
/* Setup the receive address. */
- DEBUGOUT("Overriding MAC Address in RAR[0]\n");
- DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
- hw->curr_mac_addr[0],
- hw->curr_mac_addr[1], hw->curr_mac_addr[2]);
- DEBUGOUT3("%.2X %.2X %.2X\n",
- hw->curr_mac_addr[3],
- hw->curr_mac_addr[4], hw->curr_mac_addr[5]);
+ pr_debug("Overriding MAC Address in RAR[0]\n");
+ pr_debug("New MAC Addr = %pM\n", hw->curr_mac_addr);
ixgb_rar_set(hw, hw->curr_mac_addr, 0);
}
/* Zero out the other 15 receive addresses. */
- DEBUGOUT("Clearing RAR[1-15]\n");
+ pr_debug("Clearing RAR[1-15]\n");
for (i = 1; i < IXGB_RAR_ENTRIES; i++) {
/* Write high reg first to disable the AV bit first */
IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
}
-
- return;
}
/******************************************************************************
@@ -444,65 +437,50 @@ ixgb_mc_addr_list_update(struct ixgb_hw *hw,
u32 hash_value;
u32 i;
u32 rar_used_count = 1; /* RAR[0] is used for our MAC address */
+ u8 *mca;
- DEBUGFUNC("ixgb_mc_addr_list_update");
+ ENTER();
/* Set the new number of MC addresses that we are being requested to use. */
hw->num_mc_addrs = mc_addr_count;
/* Clear RAR[1-15] */
- DEBUGOUT(" Clearing RAR[1-15]\n");
+ pr_debug("Clearing RAR[1-15]\n");
for (i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) {
IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
}
/* Clear the MTA */
- DEBUGOUT(" Clearing MTA\n");
+ pr_debug("Clearing MTA\n");
for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
/* Add the new addresses */
+ mca = mc_addr_list;
for (i = 0; i < mc_addr_count; i++) {
- DEBUGOUT(" Adding the multicast addresses:\n");
- DEBUGOUT7(" MC Addr #%d =%.2X %.2X %.2X %.2X %.2X %.2X\n", i,
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)],
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
- 1],
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
- 2],
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
- 3],
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
- 4],
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
- 5]);
+ pr_debug("Adding the multicast addresses:\n");
+ pr_debug("MC Addr #%d = %pM\n", i, mca);
/* Place this multicast address in the RAR if there is room, *
* else put it in the MTA
*/
if (rar_used_count < IXGB_RAR_ENTRIES) {
- ixgb_rar_set(hw,
- mc_addr_list +
- (i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)),
- rar_used_count);
- DEBUGOUT1("Added a multicast address to RAR[%d]\n", i);
+ ixgb_rar_set(hw, mca, rar_used_count);
+ pr_debug("Added a multicast address to RAR[%d]\n", i);
rar_used_count++;
} else {
- hash_value = ixgb_hash_mc_addr(hw,
- mc_addr_list +
- (i *
- (IXGB_ETH_LENGTH_OF_ADDRESS
- + pad)));
+ hash_value = ixgb_hash_mc_addr(hw, mca);
- DEBUGOUT1(" Hash value = 0x%03X\n", hash_value);
+ pr_debug("Hash value = 0x%03X\n", hash_value);
ixgb_mta_set(hw, hash_value);
}
+
+ mca += IXGB_ETH_LENGTH_OF_ADDRESS + pad;
}
- DEBUGOUT("MC Update Complete\n");
- return;
+ pr_debug("MC Update Complete\n");
}
/******************************************************************************
@@ -520,7 +498,7 @@ ixgb_hash_mc_addr(struct ixgb_hw *hw,
{
u32 hash_value = 0;
- DEBUGFUNC("ixgb_hash_mc_addr");
+ ENTER();
/* The portion of the address that is used for the hash table is
* determined by the mc_filter_type setting.
@@ -547,7 +525,7 @@ ixgb_hash_mc_addr(struct ixgb_hw *hw,
break;
default:
/* Invalid mc_filter_type, what should we do? */
- DEBUGOUT("MC filter type param set incorrectly\n");
+ pr_debug("MC filter type param set incorrectly\n");
ASSERT(0);
break;
}
@@ -585,8 +563,6 @@ ixgb_mta_set(struct ixgb_hw *hw,
mta_reg |= (1 << hash_bit);
IXGB_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta_reg);
-
- return;
}
/******************************************************************************
@@ -603,7 +579,7 @@ ixgb_rar_set(struct ixgb_hw *hw,
{
u32 rar_low, rar_high;
- DEBUGFUNC("ixgb_rar_set");
+ ENTER();
/* HW expects these in little endian so we reverse the byte order
* from network order (big endian) to little endian
@@ -619,7 +595,6 @@ ixgb_rar_set(struct ixgb_hw *hw,
IXGB_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
IXGB_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
- return;
}
/******************************************************************************
@@ -635,7 +610,6 @@ ixgb_write_vfta(struct ixgb_hw *hw,
u32 value)
{
IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, value);
- return;
}
/******************************************************************************
@@ -650,7 +624,6 @@ ixgb_clear_vfta(struct ixgb_hw *hw)
for (offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++)
IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0);
- return;
}
/******************************************************************************
@@ -666,7 +639,7 @@ ixgb_setup_fc(struct ixgb_hw *hw)
u32 pap_reg = 0; /* by default, assume no pause time */
bool status = true;
- DEBUGFUNC("ixgb_setup_fc");
+ ENTER();
/* Get the current control reg 0 settings */
ctrl_reg = IXGB_READ_REG(hw, CTRL0);
@@ -710,7 +683,7 @@ ixgb_setup_fc(struct ixgb_hw *hw)
break;
default:
/* We should never get here. The value should be 0-3. */
- DEBUGOUT("Flow control param set incorrectly\n");
+ pr_debug("Flow control param set incorrectly\n");
ASSERT(0);
break;
}
@@ -940,7 +913,7 @@ ixgb_check_for_link(struct ixgb_hw *hw)
u32 status_reg;
u32 xpcss_reg;
- DEBUGFUNC("ixgb_check_for_link");
+ ENTER();
xpcss_reg = IXGB_READ_REG(hw, XPCSS);
status_reg = IXGB_READ_REG(hw, STATUS);
@@ -950,7 +923,7 @@ ixgb_check_for_link(struct ixgb_hw *hw)
hw->link_up = true;
} else if (!(xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
(status_reg & IXGB_STATUS_LU)) {
- DEBUGOUT("XPCSS Not Aligned while Status:LU is set.\n");
+ pr_debug("XPCSS Not Aligned while Status:LU is set\n");
hw->link_up = ixgb_link_reset(hw);
} else {
/*
@@ -981,8 +954,7 @@ bool ixgb_check_for_bad_link(struct ixgb_hw *hw)
newRFC = IXGB_READ_REG(hw, RFC);
if ((hw->lastLFC + 250 < newLFC)
|| (hw->lastRFC + 250 < newRFC)) {
- DEBUGOUT
- ("BAD LINK! too many LFC/RFC since last check\n");
+ pr_debug("BAD LINK! too many LFC/RFC since last check\n");
bad_link_returncode = true;
}
hw->lastLFC = newLFC;
@@ -1002,11 +974,11 @@ ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
{
volatile u32 temp_reg;
- DEBUGFUNC("ixgb_clear_hw_cntrs");
+ ENTER();
/* if we are stopped or resetting exit gracefully */
if (hw->adapter_stopped) {
- DEBUGOUT("Exiting because the adapter is stopped!!!\n");
+ pr_debug("Exiting because the adapter is stopped!!!\n");
return;
}
@@ -1070,7 +1042,6 @@ ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
temp_reg = IXGB_READ_REG(hw, XOFFRXC);
temp_reg = IXGB_READ_REG(hw, XOFFTXC);
temp_reg = IXGB_READ_REG(hw, RJC);
- return;
}
/******************************************************************************
@@ -1086,7 +1057,6 @@ ixgb_led_on(struct ixgb_hw *hw)
/* To turn on the LED, clear software-definable pin 0 (SDP0). */
ctrl0_reg &= ~IXGB_CTRL0_SDP0;
IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg);
- return;
}
/******************************************************************************
@@ -1102,7 +1072,6 @@ ixgb_led_off(struct ixgb_hw *hw)
/* To turn off the LED, set software-definable pin 0 (SDP0). */
ctrl0_reg |= IXGB_CTRL0_SDP0;
IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg);
- return;
}
/******************************************************************************
@@ -1142,8 +1111,6 @@ ixgb_get_bus_info(struct ixgb_hw *hw)
hw->bus.width = (status_reg & IXGB_STATUS_BUS64) ?
ixgb_bus_width_64 : ixgb_bus_width_32;
-
- return;
}
/******************************************************************************
@@ -1156,26 +1123,21 @@ static bool
mac_addr_valid(u8 *mac_addr)
{
bool is_valid = true;
- DEBUGFUNC("mac_addr_valid");
+ ENTER();
/* Make sure it is not a multicast address */
- if (IS_MULTICAST(mac_addr)) {
- DEBUGOUT("MAC address is multicast\n");
+ if (is_multicast_ether_addr(mac_addr)) {
+ pr_debug("MAC address is multicast\n");
is_valid = false;
}
/* Not a broadcast address */
- else if (IS_BROADCAST(mac_addr)) {
- DEBUGOUT("MAC address is broadcast\n");
+ else if (is_broadcast_ether_addr(mac_addr)) {
+ pr_debug("MAC address is broadcast\n");
is_valid = false;
}
/* Reject the zero address */
- else if (mac_addr[0] == 0 &&
- mac_addr[1] == 0 &&
- mac_addr[2] == 0 &&
- mac_addr[3] == 0 &&
- mac_addr[4] == 0 &&
- mac_addr[5] == 0) {
- DEBUGOUT("MAC address is all zeros\n");
+ else if (is_zero_ether_addr(mac_addr)) {
+ pr_debug("MAC address is all zeros\n");
is_valid = false;
}
return (is_valid);
@@ -1235,8 +1197,6 @@ ixgb_optics_reset(struct ixgb_hw *hw)
IXGB_PHY_ADDRESS,
MDIO_MMD_PMAPMD);
}
-
- return;
}
/******************************************************************************
@@ -1297,6 +1257,4 @@ ixgb_optics_reset_bcm(struct ixgb_hw *hw)
/* SerDes needs extra delay */
msleep(IXGB_SUN_PHY_RESET_DELAY);
-
- return;
}
diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h
index af6ca3aab5a..873d32b89fb 100644
--- a/drivers/net/ixgb/ixgb_hw.h
+++ b/drivers/net/ixgb/ixgb_hw.h
@@ -636,18 +636,6 @@ struct ixgb_flash_buffer {
u8 filler3[0xAAAA];
};
-/*
- * This is a little-endian specific check.
- */
-#define IS_MULTICAST(Address) \
- (bool)(((u8 *)(Address))[0] & ((u8)0x01))
-
-/*
- * Check whether an address is broadcast.
- */
-#define IS_BROADCAST(Address) \
- ((((u8 *)(Address))[0] == ((u8)0xff)) && (((u8 *)(Address))[1] == ((u8)0xff)))
-
/* Flow control parameters */
struct ixgb_fc {
u32 high_water; /* Flow Control High-water */
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index c9fef65cb98..c6b75c83100 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -26,6 +26,8 @@
*******************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "ixgb.h"
char ixgb_driver_name[] = "ixgb";
@@ -146,10 +148,8 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
static int __init
ixgb_init_module(void)
{
- printk(KERN_INFO "%s - version %s\n",
- ixgb_driver_string, ixgb_driver_version);
-
- printk(KERN_INFO "%s\n", ixgb_copyright);
+ pr_info("%s - version %s\n", ixgb_driver_string, ixgb_driver_version);
+ pr_info("%s\n", ixgb_copyright);
return pci_register_driver(&ixgb_driver);
}
@@ -368,17 +368,22 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) &&
- !(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) {
- pci_using_dac = 1;
+ pci_using_dac = 0;
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (!err) {
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (!err)
+ pci_using_dac = 1;
} else {
- if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) ||
- (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
- printk(KERN_ERR
- "ixgb: No usable DMA configuration, aborting\n");
- goto err_dma_mask;
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
+ if (err) {
+ pr_err("No usable DMA configuration, aborting\n");
+ goto err_dma_mask;
+ }
}
- pci_using_dac = 0;
}
err = pci_request_regions(pdev, ixgb_driver_name);
@@ -674,7 +679,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
- txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+ txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+ GFP_KERNEL);
if (!txdr->desc) {
vfree(txdr->buffer_info);
netif_err(adapter, probe, adapter->netdev,
@@ -763,7 +769,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
rxdr->size = ALIGN(rxdr->size, 4096);
- rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+ GFP_KERNEL);
if (!rxdr->desc) {
vfree(rxdr->buffer_info);
@@ -884,8 +891,8 @@ ixgb_free_tx_resources(struct ixgb_adapter *adapter)
vfree(adapter->tx_ring.buffer_info);
adapter->tx_ring.buffer_info = NULL;
- pci_free_consistent(pdev, adapter->tx_ring.size,
- adapter->tx_ring.desc, adapter->tx_ring.dma);
+ dma_free_coherent(&pdev->dev, adapter->tx_ring.size,
+ adapter->tx_ring.desc, adapter->tx_ring.dma);
adapter->tx_ring.desc = NULL;
}
@@ -896,12 +903,11 @@ ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev, buffer_info->dma,
- buffer_info->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
buffer_info->dma = 0;
}
@@ -967,7 +973,8 @@ ixgb_free_rx_resources(struct ixgb_adapter *adapter)
vfree(rx_ring->buffer_info);
rx_ring->buffer_info = NULL;
- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -991,10 +998,10 @@ ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma) {
- pci_unmap_single(pdev,
+ dma_unmap_single(&pdev->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
buffer_info->length = 0;
}
@@ -1058,7 +1065,7 @@ ixgb_set_multi(struct net_device *netdev)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u32 rctl;
int i;
@@ -1089,9 +1096,9 @@ ixgb_set_multi(struct net_device *netdev)
IXGB_WRITE_REG(hw, RCTL, rctl);
i = 0;
- netdev_for_each_mc_addr(mc_ptr, netdev)
+ netdev_for_each_mc_addr(ha, netdev)
memcpy(&mta[i++ * IXGB_ETH_LENGTH_OF_ADDRESS],
- mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS);
+ ha->addr, IXGB_ETH_LENGTH_OF_ADDRESS);
ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
}
@@ -1118,15 +1125,14 @@ ixgb_watchdog(unsigned long data)
if (adapter->hw.link_up) {
if (!netif_carrier_ok(netdev)) {
- printk(KERN_INFO "ixgb: %s NIC Link is Up 10 Gbps "
- "Full Duplex, Flow Control: %s\n",
- netdev->name,
- (adapter->hw.fc.type == ixgb_fc_full) ?
- "RX/TX" :
- ((adapter->hw.fc.type == ixgb_fc_rx_pause) ?
- "RX" :
- ((adapter->hw.fc.type == ixgb_fc_tx_pause) ?
- "TX" : "None")));
+ netdev_info(netdev,
+ "NIC Link is Up 10 Gbps Full Duplex, Flow Control: %s\n",
+ (adapter->hw.fc.type == ixgb_fc_full) ?
+ "RX/TX" :
+ (adapter->hw.fc.type == ixgb_fc_rx_pause) ?
+ "RX" :
+ (adapter->hw.fc.type == ixgb_fc_tx_pause) ?
+ "TX" : "None");
adapter->link_speed = 10000;
adapter->link_duplex = FULL_DUPLEX;
netif_carrier_on(netdev);
@@ -1135,8 +1141,7 @@ ixgb_watchdog(unsigned long data)
if (netif_carrier_ok(netdev)) {
adapter->link_speed = 0;
adapter->link_duplex = 0;
- printk(KERN_INFO "ixgb: %s NIC Link is Down\n",
- netdev->name);
+ netdev_info(netdev, "NIC Link is Down\n");
netif_carrier_off(netdev);
}
}
@@ -1303,9 +1308,10 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
WARN_ON(buffer_info->dma != 0);
buffer_info->time_stamp = jiffies;
buffer_info->mapped_as_page = false;
- buffer_info->dma = pci_map_single(pdev, skb->data + offset,
- size, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ buffer_info->dma = dma_map_single(&pdev->dev,
+ skb->data + offset,
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
buffer_info->next_to_watch = 0;
@@ -1344,10 +1350,9 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
buffer_info->time_stamp = jiffies;
buffer_info->mapped_as_page = true;
buffer_info->dma =
- pci_map_page(pdev, frag->page,
- offset, size,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ dma_map_page(&pdev->dev, frag->page,
+ offset, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
buffer_info->next_to_watch = 0;
@@ -1916,6 +1921,31 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
}
}
+/*
+ * this should improve performance for small packets with large amounts
+ * of reassembly being done in the stack
+ */
+static void ixgb_check_copybreak(struct net_device *netdev,
+ struct ixgb_buffer *buffer_info,
+ u32 length, struct sk_buff **skb)
+{
+ struct sk_buff *new_skb;
+
+ if (length > copybreak)
+ return;
+
+ new_skb = netdev_alloc_skb_ip_align(netdev, length);
+ if (!new_skb)
+ return;
+
+ skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
+ (*skb)->data - NET_IP_ALIGN,
+ length + NET_IP_ALIGN);
+ /* save the skb in buffer_info as good */
+ buffer_info->skb = *skb;
+ *skb = new_skb;
+}
+
/**
* ixgb_clean_rx_irq - Send received data up the network stack,
* @adapter: board private structure
@@ -1952,11 +1982,14 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
prefetch(skb->data - NET_IP_ALIGN);
- if (++i == rx_ring->count) i = 0;
+ if (++i == rx_ring->count)
+ i = 0;
next_rxd = IXGB_RX_DESC(*rx_ring, i);
prefetch(next_rxd);
- if ((j = i + 1) == rx_ring->count) j = 0;
+ j = i + 1;
+ if (j == rx_ring->count)
+ j = 0;
next2_buffer = &rx_ring->buffer_info[j];
prefetch(next2_buffer);
@@ -1965,10 +1998,10 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
cleaned = true;
cleaned_count++;
- pci_unmap_single(pdev,
+ dma_unmap_single(&pdev->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
@@ -1992,25 +2025,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
goto rxdesc_done;
}
- /* code added for copybreak, this should improve
- * performance for small packets with large amounts
- * of reassembly being done in the stack */
- if (length < copybreak) {
- struct sk_buff *new_skb =
- netdev_alloc_skb_ip_align(netdev, length);
- if (new_skb) {
- skb_copy_to_linear_data_offset(new_skb,
- -NET_IP_ALIGN,
- (skb->data -
- NET_IP_ALIGN),
- (length +
- NET_IP_ALIGN));
- /* save the skb in buffer_info as good */
- buffer_info->skb = skb;
- skb = new_skb;
- }
- }
- /* end copybreak code */
+ ixgb_check_copybreak(netdev, buffer_info, length, &skb);
/* Good Receive */
skb_put(skb, length);
@@ -2091,10 +2106,10 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
map_skb:
- buffer_info->dma = pci_map_single(pdev,
+ buffer_info->dma = dma_map_single(&pdev->dev,
skb->data,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_desc = IXGB_RX_DESC(*rx_ring, i);
rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
@@ -2322,7 +2337,7 @@ static void ixgb_io_resume(struct pci_dev *pdev)
if (netif_running(netdev)) {
if (ixgb_up(adapter)) {
- printk ("ixgb: can't bring device back up after reset\n");
+ pr_err("can't bring device back up after reset\n");
return;
}
}
diff --git a/drivers/net/ixgb/ixgb_osdep.h b/drivers/net/ixgb/ixgb_osdep.h
index 371a6be4d96..e361185920e 100644
--- a/drivers/net/ixgb/ixgb_osdep.h
+++ b/drivers/net/ixgb/ixgb_osdep.h
@@ -41,20 +41,8 @@
#undef ASSERT
#define ASSERT(x) BUG_ON(!(x))
-#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B)
-
-#ifdef DBG
-#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
-#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)
-#else
-#define DEBUGOUT(S)
-#define DEBUGOUT1(S, A...)
-#endif
-
-#define DEBUGFUNC(F) DEBUGOUT(F)
-#define DEBUGOUT2 DEBUGOUT1
-#define DEBUGOUT3 DEBUGOUT2
-#define DEBUGOUT7 DEBUGOUT3
+
+#define ENTER() pr_debug("%s\n", __func__);
#define IXGB_WRITE_REG(a, reg, value) ( \
writel((value), ((a)->hw_addr + IXGB_##reg)))
diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
index af35e1ddadd..88a08f05624 100644
--- a/drivers/net/ixgb/ixgb_param.c
+++ b/drivers/net/ixgb/ixgb_param.c
@@ -26,6 +26,8 @@
*******************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "ixgb.h"
/* This is the only thing that needs to be changed to adjust the
@@ -209,16 +211,16 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
case enable_option:
switch (*value) {
case OPTION_ENABLED:
- printk(KERN_INFO "%s Enabled\n", opt->name);
+ pr_info("%s Enabled\n", opt->name);
return 0;
case OPTION_DISABLED:
- printk(KERN_INFO "%s Disabled\n", opt->name);
+ pr_info("%s Disabled\n", opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
- printk(KERN_INFO "%s set to %i\n", opt->name, *value);
+ pr_info("%s set to %i\n", opt->name, *value);
return 0;
}
break;
@@ -230,7 +232,7 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
- printk(KERN_INFO "%s\n", ent->str);
+ pr_info("%s\n", ent->str);
return 0;
}
}
@@ -240,8 +242,7 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
BUG();
}
- printk(KERN_INFO "Invalid %s specified (%i) %s\n",
- opt->name, *value, opt->err);
+ pr_info("Invalid %s specified (%i) %s\n", opt->name, *value, opt->err);
*value = opt->def;
return -1;
}
@@ -261,9 +262,8 @@ ixgb_check_options(struct ixgb_adapter *adapter)
{
int bd = adapter->bd_number;
if (bd >= IXGB_MAX_NIC) {
- printk(KERN_NOTICE
- "Warning: no configuration for board #%i\n", bd);
- printk(KERN_NOTICE "Using defaults for all values\n");
+ pr_notice("Warning: no configuration for board #%i\n", bd);
+ pr_notice("Using defaults for all values\n");
}
{ /* Transmit Descriptor Count */
@@ -363,8 +363,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
adapter->hw.fc.high_water = opt.def;
}
if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
- printk(KERN_INFO
- "Ignoring RxFCHighThresh when no RxFC\n");
+ pr_info("Ignoring RxFCHighThresh when no RxFC\n");
}
{ /* Receive Flow Control Low Threshold */
const struct ixgb_option opt = {
@@ -383,8 +382,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
adapter->hw.fc.low_water = opt.def;
}
if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
- printk(KERN_INFO
- "Ignoring RxFCLowThresh when no RxFC\n");
+ pr_info("Ignoring RxFCLowThresh when no RxFC\n");
}
{ /* Flow Control Pause Time Request*/
const struct ixgb_option opt = {
@@ -404,17 +402,14 @@ ixgb_check_options(struct ixgb_adapter *adapter)
adapter->hw.fc.pause_time = opt.def;
}
if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
- printk(KERN_INFO
- "Ignoring FCReqTimeout when no RxFC\n");
+ pr_info("Ignoring FCReqTimeout when no RxFC\n");
}
/* high low and spacing check for rx flow control thresholds */
if (adapter->hw.fc.type & ixgb_fc_tx_pause) {
/* high must be greater than low */
if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) {
/* set defaults */
- printk(KERN_INFO
- "RxFCHighThresh must be >= (RxFCLowThresh + 8), "
- "Using Defaults\n");
+ pr_info("RxFCHighThresh must be >= (RxFCLowThresh + 8), Using Defaults\n");
adapter->hw.fc.high_water = DEFAULT_FCRTH;
adapter->hw.fc.low_water = DEFAULT_FCRTL;
}
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 79c35ae3718..ffae480587a 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -111,7 +111,10 @@ struct vf_data_storage {
u16 default_vf_vlan_id;
u16 vlans_enabled;
bool clear_to_send;
+ bool pf_set_mac;
int rar;
+ u16 pf_vlan; /* When set, guest VLAN config not allowed. */
+ u16 pf_qos;
};
/* wrapper around a pointer to a socket buffer,
@@ -357,6 +360,7 @@ struct ixgbe_adapter {
u32 flags2;
#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
+#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2)
/* default to trying for four seconds */
#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
@@ -404,6 +408,8 @@ struct ixgbe_adapter {
u16 eeprom_version;
int node;
+ struct work_struct check_overtemp_task;
+ u32 interrupt_event;
/* SR-IOV */
DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 35a06b47587..9c02d6014cc 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -42,9 +42,9 @@ static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg);
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete);
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete);
static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
@@ -1221,7 +1221,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
.init_params = &ixgbe_init_eeprom_params_generic,
- .read = &ixgbe_read_eeprom_generic,
+ .read = &ixgbe_read_eerd_generic,
.validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
};
@@ -1236,6 +1236,7 @@ static struct ixgbe_phy_operations phy_ops_82598 = {
.setup_link = &ixgbe_setup_phy_link_generic,
.setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598,
+ .check_overtemp = &ixgbe_tn_check_overtemp,
};
struct ixgbe_info ixgbe_82598_info = {
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 12fc0e7ba2c..a4e2901f2f0 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -133,27 +133,6 @@ setup_sfp_out:
return ret_val;
}
-/**
- * ixgbe_get_pcie_msix_count_82599 - Gets MSI-X vector count
- * @hw: pointer to hardware structure
- *
- * Read PCIe configuration space, and get the MSI-X vector count from
- * the capabilities table.
- **/
-static u32 ixgbe_get_pcie_msix_count_82599(struct ixgbe_hw *hw)
-{
- struct ixgbe_adapter *adapter = hw->back;
- u16 msix_count;
- pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82599_CAPS,
- &msix_count);
- msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
-
- /* MSI-X count is zero-based in HW, so increment to give proper value */
- msix_count++;
-
- return msix_count;
-}
-
static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
@@ -165,7 +144,7 @@ static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
- mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82599(hw);
+ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
return 0;
}
@@ -642,6 +621,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
s32 i, j;
bool link_up = false;
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ struct ixgbe_adapter *adapter = hw->back;
hw_dbg(hw, "ixgbe_setup_mac_link_smartspeed.\n");
@@ -726,64 +706,14 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
autoneg_wait_to_complete);
out:
+ if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
+ netif_info(adapter, hw, adapter->netdev, "Smartspeed has"
+ " downgraded the link speed from the maximum"
+ " advertised\n");
return status;
}
/**
- * ixgbe_check_mac_link_82599 - Determine link and speed status
- * @hw: pointer to hardware structure
- * @speed: pointer to link speed
- * @link_up: true when link is up
- * @link_up_wait_to_complete: bool used to wait for link up or not
- *
- * Reads the links register to determine if link is up and the current speed
- **/
-static s32 ixgbe_check_mac_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *link_up,
- bool link_up_wait_to_complete)
-{
- u32 links_reg;
- u32 i;
-
- links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
- if (link_up_wait_to_complete) {
- for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
- if (links_reg & IXGBE_LINKS_UP) {
- *link_up = true;
- break;
- } else {
- *link_up = false;
- }
- msleep(100);
- links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
- }
- } else {
- if (links_reg & IXGBE_LINKS_UP)
- *link_up = true;
- else
- *link_up = false;
- }
-
- if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
- IXGBE_LINKS_SPEED_10G_82599)
- *speed = IXGBE_LINK_SPEED_10GB_FULL;
- else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
- IXGBE_LINKS_SPEED_1G_82599)
- *speed = IXGBE_LINK_SPEED_1GB_FULL;
- else
- *speed = IXGBE_LINK_SPEED_100_FULL;
-
- /* if link is down, zero out the current_mode */
- if (*link_up == false) {
- hw->fc.current_mode = ixgbe_fc_none;
- hw->fc.fc_was_autonegged = false;
- }
-
- return 0;
-}
-
-/**
* ixgbe_setup_mac_link_82599 - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
@@ -1045,243 +975,6 @@ reset_hw_out:
}
/**
- * ixgbe_clear_vmdq_82599 - Disassociate a VMDq pool index from a rx address
- * @hw: pointer to hardware struct
- * @rar: receive address register index to disassociate
- * @vmdq: VMDq pool index to remove from the rar
- **/
-static s32 ixgbe_clear_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
-{
- u32 mpsar_lo, mpsar_hi;
- u32 rar_entries = hw->mac.num_rar_entries;
-
- if (rar < rar_entries) {
- mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
- mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
-
- if (!mpsar_lo && !mpsar_hi)
- goto done;
-
- if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
- if (mpsar_lo) {
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
- mpsar_lo = 0;
- }
- if (mpsar_hi) {
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
- mpsar_hi = 0;
- }
- } else if (vmdq < 32) {
- mpsar_lo &= ~(1 << vmdq);
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
- } else {
- mpsar_hi &= ~(1 << (vmdq - 32));
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
- }
-
- /* was that the last pool using this rar? */
- if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
- hw->mac.ops.clear_rar(hw, rar);
- } else {
- hw_dbg(hw, "RAR index %d is out of range.\n", rar);
- }
-
-done:
- return 0;
-}
-
-/**
- * ixgbe_set_vmdq_82599 - Associate a VMDq pool index with a rx address
- * @hw: pointer to hardware struct
- * @rar: receive address register index to associate with a VMDq index
- * @vmdq: VMDq pool index
- **/
-static s32 ixgbe_set_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
-{
- u32 mpsar;
- u32 rar_entries = hw->mac.num_rar_entries;
-
- if (rar < rar_entries) {
- if (vmdq < 32) {
- mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
- mpsar |= 1 << vmdq;
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
- } else {
- mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
- mpsar |= 1 << (vmdq - 32);
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
- }
- } else {
- hw_dbg(hw, "RAR index %d is out of range.\n", rar);
- }
- return 0;
-}
-
-/**
- * ixgbe_set_vfta_82599 - Set VLAN filter table
- * @hw: pointer to hardware structure
- * @vlan: VLAN id to write to VLAN filter
- * @vind: VMDq output index that maps queue to VLAN id in VFVFB
- * @vlan_on: boolean flag to turn on/off VLAN in VFVF
- *
- * Turn on/off specified VLAN in the VLAN filter table.
- **/
-static s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
- bool vlan_on)
-{
- u32 regindex;
- u32 vlvf_index;
- u32 bitindex;
- u32 bits;
- u32 first_empty_slot;
- u32 vt_ctl;
-
- if (vlan > 4095)
- return IXGBE_ERR_PARAM;
-
- /*
- * this is a 2 part operation - first the VFTA, then the
- * VLVF and VLVFB if vind is set
- */
-
- /* Part 1
- * The VFTA is a bitstring made up of 128 32-bit registers
- * that enable the particular VLAN id, much like the MTA:
- * bits[11-5]: which register
- * bits[4-0]: which bit in the register
- */
- regindex = (vlan >> 5) & 0x7F;
- bitindex = vlan & 0x1F;
- bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
- if (vlan_on)
- bits |= (1 << bitindex);
- else
- bits &= ~(1 << bitindex);
- IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
-
-
- /* Part 2
- * If VT mode is set
- * Either vlan_on
- * make sure the vlan is in VLVF
- * set the vind bit in the matching VLVFB
- * Or !vlan_on
- * clear the pool bit and possibly the vind
- */
- vt_ctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
- if (!(vt_ctl & IXGBE_VT_CTL_VT_ENABLE))
- goto out;
-
- /* find the vlanid or the first empty slot */
- first_empty_slot = 0;
-
- for (vlvf_index = 1; vlvf_index < IXGBE_VLVF_ENTRIES; vlvf_index++) {
- bits = IXGBE_READ_REG(hw, IXGBE_VLVF(vlvf_index));
- if (!bits && !first_empty_slot)
- first_empty_slot = vlvf_index;
- else if ((bits & 0x0FFF) == vlan)
- break;
- }
-
- if (vlvf_index >= IXGBE_VLVF_ENTRIES) {
- if (first_empty_slot)
- vlvf_index = first_empty_slot;
- else {
- hw_dbg(hw, "No space in VLVF.\n");
- goto out;
- }
- }
-
- if (vlan_on) {
- /* set the pool bit */
- if (vind < 32) {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2));
- bits |= (1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2), bits);
- } else {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1));
- bits |= (1 << (vind - 32));
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1), bits);
- }
- } else {
- /* clear the pool bit */
- if (vind < 32) {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2));
- bits &= ~(1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2), bits);
- bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1));
- } else {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1));
- bits &= ~(1 << (vind - 32));
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1), bits);
- bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2));
- }
- }
-
- if (bits) {
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
- (IXGBE_VLVF_VIEN | vlan));
- /* if bits is non-zero then some pools/VFs are still
- * using this VLAN ID. Force the VFTA entry to on */
- bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
- bits |= (1 << bitindex);
- IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
- }
- else
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
-
-out:
- return 0;
-}
-
-/**
- * ixgbe_clear_vfta_82599 - Clear VLAN filter table
- * @hw: pointer to hardware structure
- *
- * Clears the VLAN filer table, and the VMDq index associated with the filter
- **/
-static s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw)
-{
- u32 offset;
-
- for (offset = 0; offset < hw->mac.vft_size; offset++)
- IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
-
- for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
- }
-
- return 0;
-}
-
-/**
- * ixgbe_init_uta_tables_82599 - Initialize the Unicast Table Array
- * @hw: pointer to hardware structure
- **/
-static s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw)
-{
- int i;
- hw_dbg(hw, " Clearing UTA\n");
-
- for (i = 0; i < 128; i++)
- IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
-
- return 0;
-}
-
-/**
* ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
* @hw: pointer to hardware structure
**/
@@ -1303,7 +996,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
}
if (i >= IXGBE_FDIRCMD_CMD_POLL) {
hw_dbg(hw ,"Flow Director previous command isn't complete, "
- "aborting table re-initialization. \n");
+ "aborting table re-initialization.\n");
return IXGBE_ERR_FDIR_REINIT_FAILED;
}
@@ -2462,10 +2155,14 @@ sfp_check:
goto out;
switch (hw->phy.type) {
- case ixgbe_phy_tw_tyco:
- case ixgbe_phy_tw_unknown:
+ case ixgbe_phy_sfp_passive_tyco:
+ case ixgbe_phy_sfp_passive_unknown:
physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
break;
+ case ixgbe_phy_sfp_ftl_active:
+ case ixgbe_phy_sfp_active_unknown:
+ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
+ break;
case ixgbe_phy_sfp_avago:
case ixgbe_phy_sfp_ftl:
case ixgbe_phy_sfp_intel:
@@ -2545,75 +2242,6 @@ static s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps)
}
/**
- * ixgbe_get_san_mac_addr_offset_82599 - SAN MAC address offset for 82599
- * @hw: pointer to hardware structure
- * @san_mac_offset: SAN MAC address offset
- *
- * This function will read the EEPROM location for the SAN MAC address
- * pointer, and returns the value at that location. This is used in both
- * get and set mac_addr routines.
- **/
-static s32 ixgbe_get_san_mac_addr_offset_82599(struct ixgbe_hw *hw,
- u16 *san_mac_offset)
-{
- /*
- * First read the EEPROM pointer to see if the MAC addresses are
- * available.
- */
- hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
-
- return 0;
-}
-
-/**
- * ixgbe_get_san_mac_addr_82599 - SAN MAC address retrieval for 82599
- * @hw: pointer to hardware structure
- * @san_mac_addr: SAN MAC address
- *
- * Reads the SAN MAC address from the EEPROM, if it's available. This is
- * per-port, so set_lan_id() must be called before reading the addresses.
- * set_lan_id() is called by identify_sfp(), but this cannot be relied
- * upon for non-SFP connections, so we must call it here.
- **/
-static s32 ixgbe_get_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr)
-{
- u16 san_mac_data, san_mac_offset;
- u8 i;
-
- /*
- * First read the EEPROM pointer to see if the MAC addresses are
- * available. If they're not, no point in calling set_lan_id() here.
- */
- ixgbe_get_san_mac_addr_offset_82599(hw, &san_mac_offset);
-
- if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
- /*
- * No addresses available in this EEPROM. It's not an
- * error though, so just wipe the local address and return.
- */
- for (i = 0; i < 6; i++)
- san_mac_addr[i] = 0xFF;
-
- goto san_mac_addr_out;
- }
-
- /* make sure we know which port we need to program */
- hw->mac.ops.set_lan_id(hw);
- /* apply the port offset to the address offset */
- (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
- (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
- for (i = 0; i < 3; i++) {
- hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
- san_mac_addr[i * 2] = (u8)(san_mac_data);
- san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
- san_mac_offset++;
- }
-
-san_mac_addr_out:
- return 0;
-}
-
-/**
* ixgbe_verify_fw_version_82599 - verify fw version for 82599
* @hw: pointer to hardware structure
*
@@ -2715,7 +2343,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82599,
.enable_rx_dma = &ixgbe_enable_rx_dma_82599,
.get_mac_addr = &ixgbe_get_mac_addr_generic,
- .get_san_mac_addr = &ixgbe_get_san_mac_addr_82599,
+ .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
.get_device_caps = &ixgbe_get_device_caps_82599,
.get_wwn_prefix = &ixgbe_get_wwn_prefix_82599,
.stop_adapter = &ixgbe_stop_adapter_generic,
@@ -2724,7 +2352,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.read_analog_reg8 = &ixgbe_read_analog_reg8_82599,
.write_analog_reg8 = &ixgbe_write_analog_reg8_82599,
.setup_link = &ixgbe_setup_mac_link_82599,
- .check_link = &ixgbe_check_mac_link_82599,
+ .check_link = &ixgbe_check_mac_link_generic,
.get_link_capabilities = &ixgbe_get_link_capabilities_82599,
.led_on = &ixgbe_led_on_generic,
.led_off = &ixgbe_led_off_generic,
@@ -2732,23 +2360,23 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.blink_led_stop = &ixgbe_blink_led_stop_generic,
.set_rar = &ixgbe_set_rar_generic,
.clear_rar = &ixgbe_clear_rar_generic,
- .set_vmdq = &ixgbe_set_vmdq_82599,
- .clear_vmdq = &ixgbe_clear_vmdq_82599,
+ .set_vmdq = &ixgbe_set_vmdq_generic,
+ .clear_vmdq = &ixgbe_clear_vmdq_generic,
.init_rx_addrs = &ixgbe_init_rx_addrs_generic,
.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
.enable_mc = &ixgbe_enable_mc_generic,
.disable_mc = &ixgbe_disable_mc_generic,
- .clear_vfta = &ixgbe_clear_vfta_82599,
- .set_vfta = &ixgbe_set_vfta_82599,
- .fc_enable = &ixgbe_fc_enable_generic,
- .init_uta_tables = &ixgbe_init_uta_tables_82599,
+ .clear_vfta = &ixgbe_clear_vfta_generic,
+ .set_vfta = &ixgbe_set_vfta_generic,
+ .fc_enable = &ixgbe_fc_enable_generic,
+ .init_uta_tables = &ixgbe_init_uta_tables_generic,
.setup_sfp = &ixgbe_setup_sfp_modules_82599,
};
static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
.init_params = &ixgbe_init_eeprom_params_generic,
- .read = &ixgbe_read_eeprom_generic,
+ .read = &ixgbe_read_eerd_generic,
.write = &ixgbe_write_eeprom_generic,
.validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
@@ -2757,7 +2385,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
static struct ixgbe_phy_operations phy_ops_82599 = {
.identify = &ixgbe_identify_phy_82599,
.identify_sfp = &ixgbe_identify_sfp_module_generic,
- .init = &ixgbe_init_phy_ops_82599,
+ .init = &ixgbe_init_phy_ops_82599,
.reset = &ixgbe_reset_phy_generic,
.read_reg = &ixgbe_read_phy_reg_generic,
.write_reg = &ixgbe_write_phy_reg_generic,
@@ -2767,6 +2395,7 @@ static struct ixgbe_phy_operations phy_ops_82599 = {
.write_i2c_byte = &ixgbe_write_i2c_byte_generic,
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
+ .check_overtemp = &ixgbe_tn_check_overtemp,
};
struct ixgbe_info ixgbe_82599_info = {
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index eb49020903c..1159d9138f0 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -34,7 +34,6 @@
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
-static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw);
static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
@@ -595,14 +594,14 @@ out:
}
/**
- * ixgbe_read_eeprom_generic - Read EEPROM word using EERD
+ * ixgbe_read_eerd_generic - Read EEPROM word using EERD
* @hw: pointer to hardware structure
* @offset: offset of word in the EEPROM to read
* @data: word read from the EEPROM
*
* Reads a 16 bit word from the EEPROM using the EERD register.
**/
-s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
+s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
{
u32 eerd;
s32 status;
@@ -614,15 +613,15 @@ s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
goto out;
}
- eerd = (offset << IXGBE_EEPROM_READ_ADDR_SHIFT) +
- IXGBE_EEPROM_READ_REG_START;
+ eerd = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) +
+ IXGBE_EEPROM_RW_REG_START;
IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
- status = ixgbe_poll_eeprom_eerd_done(hw);
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
if (status == 0)
*data = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
- IXGBE_EEPROM_READ_REG_DATA);
+ IXGBE_EEPROM_RW_REG_DATA);
else
hw_dbg(hw, "Eeprom read timed out\n");
@@ -631,20 +630,26 @@ out:
}
/**
- * ixgbe_poll_eeprom_eerd_done - Poll EERD status
+ * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
* @hw: pointer to hardware structure
+ * @ee_reg: EEPROM flag for polling
*
- * Polls the status bit (bit 1) of the EERD to determine when the read is done.
+ * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
+ * read or write is done respectively.
**/
-static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw)
+s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
{
u32 i;
u32 reg;
s32 status = IXGBE_ERR_EEPROM;
- for (i = 0; i < IXGBE_EERD_ATTEMPTS; i++) {
- reg = IXGBE_READ_REG(hw, IXGBE_EERD);
- if (reg & IXGBE_EEPROM_READ_REG_DONE) {
+ for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
+ if (ee_reg == IXGBE_NVM_POLL_READ)
+ reg = IXGBE_READ_REG(hw, IXGBE_EERD);
+ else
+ reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
+
+ if (reg & IXGBE_EEPROM_RW_REG_DONE) {
status = 0;
break;
}
@@ -1392,14 +1397,17 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
fctrl |= IXGBE_FCTRL_UPE;
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+ hw->addr_ctrl.uc_set_promisc = true;
}
} else {
/* only disable if set by overflow, not by user */
- if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
+ if ((old_promisc_setting && hw->addr_ctrl.uc_set_promisc) &&
+ !(hw->addr_ctrl.user_set_promisc)) {
hw_dbg(hw, " Leaving address overflow promisc mode\n");
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
fctrl &= ~IXGBE_FCTRL_UPE;
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+ hw->addr_ctrl.uc_set_promisc = false;
}
}
@@ -1484,26 +1492,24 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
/**
* ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
* @hw: pointer to hardware structure
- * @mc_addr_list: the list of new multicast addresses
- * @mc_addr_count: number of addresses
- * @next: iterator function to walk the multicast address list
+ * @netdev: pointer to net device structure
*
* The given list replaces any existing list. Clears the MC addrs from receive
* address registers and the multicast table. Uses unused receive address
* registers for the first multicast addresses, and hashes the rest into the
* multicast table.
**/
-s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count, ixgbe_mc_addr_itr next)
+s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
+ struct net_device *netdev)
{
+ struct netdev_hw_addr *ha;
u32 i;
- u32 vmdq;
/*
* Set the new number of MC addresses that we are being requested to
* use.
*/
- hw->addr_ctrl.num_mc_addrs = mc_addr_count;
+ hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
hw->addr_ctrl.mta_in_use = 0;
/* Clear the MTA */
@@ -1512,9 +1518,9 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
/* Add the new addresses */
- for (i = 0; i < mc_addr_count; i++) {
+ netdev_for_each_mc_addr(ha, netdev) {
hw_dbg(hw, " Adding the multicast addresses:\n");
- ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
+ ixgbe_set_mta(hw, ha->addr);
}
/* Enable mta */
@@ -2254,3 +2260,490 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
return 0;
}
+
+/**
+ * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
+ * @hw: pointer to hardware structure
+ * @san_mac_offset: SAN MAC address offset
+ *
+ * This function will read the EEPROM location for the SAN MAC address
+ * pointer, and returns the value at that location. This is used in both
+ * get and set mac_addr routines.
+ **/
+static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
+ u16 *san_mac_offset)
+{
+ /*
+ * First read the EEPROM pointer to see if the MAC addresses are
+ * available.
+ */
+ hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
+ * @hw: pointer to hardware structure
+ * @san_mac_addr: SAN MAC address
+ *
+ * Reads the SAN MAC address from the EEPROM, if it's available. This is
+ * per-port, so set_lan_id() must be called before reading the addresses.
+ * set_lan_id() is called by identify_sfp(), but this cannot be relied
+ * upon for non-SFP connections, so we must call it here.
+ **/
+s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+ u16 san_mac_data, san_mac_offset;
+ u8 i;
+
+ /*
+ * First read the EEPROM pointer to see if the MAC addresses are
+ * available. If they're not, no point in calling set_lan_id() here.
+ */
+ ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+
+ if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
+ /*
+ * No addresses available in this EEPROM. It's not an
+ * error though, so just wipe the local address and return.
+ */
+ for (i = 0; i < 6; i++)
+ san_mac_addr[i] = 0xFF;
+
+ goto san_mac_addr_out;
+ }
+
+ /* make sure we know which port we need to program */
+ hw->mac.ops.set_lan_id(hw);
+ /* apply the port offset to the address offset */
+ (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
+ (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
+ for (i = 0; i < 3; i++) {
+ hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
+ san_mac_addr[i * 2] = (u8)(san_mac_data);
+ san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
+ san_mac_offset++;
+ }
+
+san_mac_addr_out:
+ return 0;
+}
+
+/**
+ * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
+ * @hw: pointer to hardware structure
+ *
+ * Read PCIe configuration space, and get the MSI-X vector count from
+ * the capabilities table.
+ **/
+u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ u16 msix_count;
+ pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82599_CAPS,
+ &msix_count);
+ msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
+
+ /* MSI-X count is zero-based in HW, so increment to give proper value */
+ msix_count++;
+
+ return msix_count;
+}
+
+/**
+ * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
+ * @hw: pointer to hardware struct
+ * @rar: receive address register index to disassociate
+ * @vmdq: VMDq pool index to remove from the rar
+ **/
+s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ u32 mpsar_lo, mpsar_hi;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ if (rar < rar_entries) {
+ mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+ mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+
+ if (!mpsar_lo && !mpsar_hi)
+ goto done;
+
+ if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
+ if (mpsar_lo) {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
+ mpsar_lo = 0;
+ }
+ if (mpsar_hi) {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
+ mpsar_hi = 0;
+ }
+ } else if (vmdq < 32) {
+ mpsar_lo &= ~(1 << vmdq);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
+ } else {
+ mpsar_hi &= ~(1 << (vmdq - 32));
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
+ }
+
+ /* was that the last pool using this rar? */
+ if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
+ hw->mac.ops.clear_rar(hw, rar);
+ } else {
+ hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+ }
+
+done:
+ return 0;
+}
+
+/**
+ * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
+ * @hw: pointer to hardware struct
+ * @rar: receive address register index to associate with a VMDq index
+ * @vmdq: VMDq pool index
+ **/
+s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ u32 mpsar;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ if (rar < rar_entries) {
+ if (vmdq < 32) {
+ mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+ mpsar |= 1 << vmdq;
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
+ } else {
+ mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+ mpsar |= 1 << (vmdq - 32);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
+ }
+ } else {
+ hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+ }
+ return 0;
+}
+
+/**
+ * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
+{
+ int i;
+
+
+ for (i = 0; i < 128; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
+
+ return 0;
+}
+
+/**
+ * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ *
+ * return the VLVF index where this VLAN id should be placed
+ *
+ **/
+s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
+{
+ u32 bits = 0;
+ u32 first_empty_slot = 0;
+ s32 regindex;
+
+ /* short cut the special case */
+ if (vlan == 0)
+ return 0;
+
+ /*
+ * Search for the vlan id in the VLVF entries. Save off the first empty
+ * slot found along the way
+ */
+ for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
+ bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
+ if (!bits && !(first_empty_slot))
+ first_empty_slot = regindex;
+ else if ((bits & 0x0FFF) == vlan)
+ break;
+ }
+
+ /*
+ * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
+ * in the VLVF. Else use the first empty VLVF register for this
+ * vlan id.
+ */
+ if (regindex >= IXGBE_VLVF_ENTRIES) {
+ if (first_empty_slot)
+ regindex = first_empty_slot;
+ else {
+ hw_dbg(hw, "No space in VLVF.\n");
+ regindex = IXGBE_ERR_NO_SPACE;
+ }
+ }
+
+ return regindex;
+}
+
+/**
+ * ixgbe_set_vfta_generic - Set VLAN filter table
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VFVFB
+ * @vlan_on: boolean flag to turn on/off VLAN in VFVF
+ *
+ * Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on)
+{
+ s32 regindex;
+ u32 bitindex;
+ u32 vfta;
+ u32 bits;
+ u32 vt;
+ u32 targetbit;
+ bool vfta_changed = false;
+
+ if (vlan > 4095)
+ return IXGBE_ERR_PARAM;
+
+ /*
+ * this is a 2 part operation - first the VFTA, then the
+ * VLVF and VLVFB if VT Mode is set
+ * We don't write the VFTA until we know the VLVF part succeeded.
+ */
+
+ /* Part 1
+ * The VFTA is a bitstring made up of 128 32-bit registers
+ * that enable the particular VLAN id, much like the MTA:
+ * bits[11-5]: which register
+ * bits[4-0]: which bit in the register
+ */
+ regindex = (vlan >> 5) & 0x7F;
+ bitindex = vlan & 0x1F;
+ targetbit = (1 << bitindex);
+ vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
+
+ if (vlan_on) {
+ if (!(vfta & targetbit)) {
+ vfta |= targetbit;
+ vfta_changed = true;
+ }
+ } else {
+ if ((vfta & targetbit)) {
+ vfta &= ~targetbit;
+ vfta_changed = true;
+ }
+ }
+
+ /* Part 2
+ * If VT Mode is set
+ * Either vlan_on
+ * make sure the vlan is in VLVF
+ * set the vind bit in the matching VLVFB
+ * Or !vlan_on
+ * clear the pool bit and possibly the vind
+ */
+ vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ if (vt & IXGBE_VT_CTL_VT_ENABLE) {
+ s32 vlvf_index;
+
+ vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
+ if (vlvf_index < 0)
+ return vlvf_index;
+
+ if (vlan_on) {
+ /* set the pool bit */
+ if (vind < 32) {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index*2));
+ bits |= (1 << vind);
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB(vlvf_index*2),
+ bits);
+ } else {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index*2)+1));
+ bits |= (1 << (vind-32));
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB((vlvf_index*2)+1),
+ bits);
+ }
+ } else {
+ /* clear the pool bit */
+ if (vind < 32) {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index*2));
+ bits &= ~(1 << vind);
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB(vlvf_index*2),
+ bits);
+ bits |= IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index*2)+1));
+ } else {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index*2)+1));
+ bits &= ~(1 << (vind-32));
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB((vlvf_index*2)+1),
+ bits);
+ bits |= IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index*2));
+ }
+ }
+
+ /*
+ * If there are still bits set in the VLVFB registers
+ * for the VLAN ID indicated we need to see if the
+ * caller is requesting that we clear the VFTA entry bit.
+ * If the caller has requested that we clear the VFTA
+ * entry bit but there are still pools/VFs using this VLAN
+ * ID entry then ignore the request. We're not worried
+ * about the case where we're turning the VFTA VLAN ID
+ * entry bit on, only when requested to turn it off as
+ * there may be multiple pools and/or VFs using the
+ * VLAN ID entry. In that case we cannot clear the
+ * VFTA bit until all pools/VFs using that VLAN ID have also
+ * been cleared. This will be indicated by "bits" being
+ * zero.
+ */
+ if (bits) {
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
+ (IXGBE_VLVF_VIEN | vlan));
+ if (!vlan_on) {
+ /* someone wants to clear the vfta entry
+ * but some pools/VFs are still using it.
+ * Ignore it. */
+ vfta_changed = false;
+ }
+ }
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
+ }
+
+ if (vfta_changed)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
+
+ return 0;
+}
+
+/**
+ * ixgbe_clear_vfta_generic - Clear VLAN filter table
+ * @hw: pointer to hardware structure
+ *
+ * Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
+{
+ u32 offset;
+
+ for (offset = 0; offset < hw->mac.vft_size; offset++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
+
+ for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_check_mac_link_generic - Determine link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true when link is up
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete)
+{
+ u32 links_reg;
+ u32 i;
+
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if (link_up_wait_to_complete) {
+ for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
+ if (links_reg & IXGBE_LINKS_UP) {
+ *link_up = true;
+ break;
+ } else {
+ *link_up = false;
+ }
+ msleep(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ }
+ } else {
+ if (links_reg & IXGBE_LINKS_UP)
+ *link_up = true;
+ else
+ *link_up = false;
+ }
+
+ if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
+ IXGBE_LINKS_SPEED_10G_82599)
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
+ IXGBE_LINKS_SPEED_1G_82599)
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+
+ /* if link is down, zero out the current_mode */
+ if (*link_up == false) {
+ hw->fc.current_mode = ixgbe_fc_none;
+ hw->fc.fc_was_autonegged = false;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
+ * the EEPROM
+ * @hw: pointer to hardware structure
+ * @wwnn_prefix: the alternative WWNN prefix
+ * @wwpn_prefix: the alternative WWPN prefix
+ *
+ * This function will read the EEPROM from the alternative SAN MAC address
+ * block to check the support for the alternative WWNN/WWPN prefix support.
+ **/
+s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix)
+{
+ u16 offset, caps;
+ u16 alt_san_mac_blk_offset;
+
+ /* clear output first */
+ *wwnn_prefix = 0xFFFF;
+ *wwpn_prefix = 0xFFFF;
+
+ /* check if alternative SAN MAC is supported */
+ hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
+ &alt_san_mac_blk_offset);
+
+ if ((alt_san_mac_blk_offset == 0) ||
+ (alt_san_mac_blk_offset == 0xFFFF))
+ goto wwn_prefix_out;
+
+ /* check capability in alternative san mac address block */
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
+ hw->eeprom.ops.read(hw, offset, &caps);
+ if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
+ goto wwn_prefix_out;
+
+ /* get the corresponding prefix for WWNN/WWPN */
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
+ hw->eeprom.ops.read(hw, offset, wwnn_prefix);
+
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
+ hw->eeprom.ops.read(hw, offset, wwpn_prefix);
+
+wwn_prefix_out:
+ return 0;
+}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 13606d4809c..3080afb12bd 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -30,6 +30,7 @@
#include "ixgbe_type.h"
+u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
@@ -45,20 +46,20 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
-s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 *data);
s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
u16 *checksum_val);
s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
+s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr);
s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
-s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count,
- ixgbe_mc_addr_itr func);
+s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
+ struct net_device *netdev);
s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
struct net_device *netdev);
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
@@ -71,9 +72,16 @@ s32 ixgbe_validate_mac_addr(u8 *mac_addr);
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
-
-s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val);
-s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
+s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
+s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
+ u32 vind, bool vlan_on);
+s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
+s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index dd4883f642b..71da325dfa8 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -488,7 +488,6 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
if (adapter->temp_dcb_cfg.pfc_mode_enable !=
adapter->dcb_cfg.pfc_mode_enable)
adapter->dcb_set_bitmap |= BIT_PFC;
- return;
}
/**
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 8f461d5cee7..c50a7541ffe 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -212,8 +212,8 @@ static int ixgbe_get_settings(struct net_device *netdev,
ecmd->port = PORT_FIBRE;
break;
case ixgbe_phy_nl:
- case ixgbe_phy_tw_tyco:
- case ixgbe_phy_tw_unknown:
+ case ixgbe_phy_sfp_passive_tyco:
+ case ixgbe_phy_sfp_passive_unknown:
case ixgbe_phy_sfp_ftl:
case ixgbe_phy_sfp_avago:
case ixgbe_phy_sfp_intel:
@@ -365,7 +365,7 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
else
fc.disable_fc_autoneg = false;
- if (pause->rx_pause && pause->tx_pause)
+ if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
fc.requested_mode = ixgbe_fc_full;
else if (pause->rx_pause && !pause->tx_pause)
fc.requested_mode = ixgbe_fc_rx_pause;
@@ -1458,8 +1458,8 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
struct ixgbe_tx_buffer *buf =
&(tx_ring->tx_buffer_info[i]);
if (buf->dma)
- pci_unmap_single(pdev, buf->dma, buf->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&pdev->dev, buf->dma,
+ buf->length, DMA_TO_DEVICE);
if (buf->skb)
dev_kfree_skb(buf->skb);
}
@@ -1470,22 +1470,22 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
struct ixgbe_rx_buffer *buf =
&(rx_ring->rx_buffer_info[i]);
if (buf->dma)
- pci_unmap_single(pdev, buf->dma,
+ dma_unmap_single(&pdev->dev, buf->dma,
IXGBE_RXBUFFER_2048,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
if (buf->skb)
dev_kfree_skb(buf->skb);
}
}
if (tx_ring->desc) {
- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc,
- tx_ring->dma);
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
tx_ring->desc = NULL;
}
if (rx_ring->desc) {
- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc,
- rx_ring->dma);
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -1493,8 +1493,6 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
tx_ring->tx_buffer_info = NULL;
kfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
-
- return;
}
static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
@@ -1520,8 +1518,9 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- if (!(tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
- &tx_ring->dma))) {
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ if (!(tx_ring->desc)) {
ret_val = 2;
goto err_nomem;
}
@@ -1563,8 +1562,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
tx_ring->tx_buffer_info[i].skb = skb;
tx_ring->tx_buffer_info[i].length = skb->len;
tx_ring->tx_buffer_info[i].dma =
- pci_map_single(pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_map_single(&pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
desc->read.buffer_addr =
cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
desc->read.cmd_type_len = cpu_to_le32(skb->len);
@@ -1593,8 +1592,9 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- if (!(rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
- &rx_ring->dma))) {
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+ if (!(rx_ring->desc)) {
ret_val = 5;
goto err_nomem;
}
@@ -1661,8 +1661,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
skb_reserve(skb, NET_IP_ALIGN);
rx_ring->rx_buffer_info[i].skb = skb;
rx_ring->rx_buffer_info[i].dma =
- pci_map_single(pdev, skb->data, IXGBE_RXBUFFER_2048,
- PCI_DMA_FROMDEVICE);
+ dma_map_single(&pdev->dev, skb->data,
+ IXGBE_RXBUFFER_2048, DMA_FROM_DEVICE);
rx_desc->read.pkt_addr =
cpu_to_le64(rx_ring->rx_buffer_info[i].dma);
memset(skb->data, 0x00, skb->len);
@@ -1775,10 +1775,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
ixgbe_create_lbtest_frame(
tx_ring->tx_buffer_info[k].skb,
1024);
- pci_dma_sync_single_for_device(pdev,
+ dma_sync_single_for_device(&pdev->dev,
tx_ring->tx_buffer_info[k].dma,
tx_ring->tx_buffer_info[k].length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (unlikely(++k == tx_ring->count))
k = 0;
}
@@ -1789,10 +1789,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
good_cnt = 0;
do {
/* receive the sent packets */
- pci_dma_sync_single_for_cpu(pdev,
+ dma_sync_single_for_cpu(&pdev->dev,
rx_ring->rx_buffer_info[l].dma,
IXGBE_RXBUFFER_2048,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
ret_val = ixgbe_check_lbtest_frame(
rx_ring->rx_buffer_info[l].skb, 1024);
if (!ret_val)
@@ -1971,8 +1971,6 @@ static void ixgbe_get_wol(struct net_device *netdev,
wol->wolopts |= WAKE_BCAST;
if (adapter->wol & IXGBE_WUFC_MAG)
wol->wolopts |= WAKE_MAGIC;
-
- return;
}
static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -2079,12 +2077,32 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
return 0;
}
+/*
+ * this function must be called before setting the new value of
+ * rx_itr_setting
+ */
+static bool ixgbe_reenable_rsc(struct ixgbe_adapter *adapter,
+ struct ethtool_coalesce *ec)
+{
+ /* check the old value and enable RSC if necessary */
+ if ((adapter->rx_itr_setting == 0) &&
+ (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) {
+ adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
+ adapter->netdev->features |= NETIF_F_LRO;
+ DPRINTK(PROBE, INFO, "rx-usecs set to %d, re-enabling RSC\n",
+ ec->rx_coalesce_usecs);
+ return true;
+ }
+ return false;
+}
+
static int ixgbe_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_q_vector *q_vector;
int i;
+ bool need_reset = false;
/* don't accept tx specific changes if we've got mixed RxTx vectors */
if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count
@@ -2095,11 +2113,20 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
if (ec->rx_coalesce_usecs > 1) {
+ u32 max_int;
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
+ max_int = IXGBE_MAX_RSC_INT_RATE;
+ else
+ max_int = IXGBE_MAX_INT_RATE;
+
/* check the limits */
- if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
+ if ((1000000/ec->rx_coalesce_usecs > max_int) ||
(1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE))
return -EINVAL;
+ /* check the old value and enable RSC if necessary */
+ need_reset = ixgbe_reenable_rsc(adapter, ec);
+
/* store the value in ints/second */
adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
@@ -2108,6 +2135,9 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
/* clear the lower bit as its used for dynamic state */
adapter->rx_itr_setting &= ~1;
} else if (ec->rx_coalesce_usecs == 1) {
+ /* check the old value and enable RSC if necessary */
+ need_reset = ixgbe_reenable_rsc(adapter, ec);
+
/* 1 means dynamic mode */
adapter->rx_eitr_param = 20000;
adapter->rx_itr_setting = 1;
@@ -2116,14 +2146,30 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
* any other value means disable eitr, which is best
* served by setting the interrupt rate very high
*/
- if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
- adapter->rx_eitr_param = IXGBE_MAX_RSC_INT_RATE;
- else
- adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
+ adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
adapter->rx_itr_setting = 0;
+
+ /*
+ * if hardware RSC is enabled, disable it when
+ * setting low latency mode, to avoid errata, assuming
+ * that when the user set low latency mode they want
+ * it at the cost of anything else
+ */
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
+ adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
+ netdev->features &= ~NETIF_F_LRO;
+ DPRINTK(PROBE, INFO,
+ "rx-usecs set to 0, disabling RSC\n");
+
+ need_reset = true;
+ }
}
if (ec->tx_coalesce_usecs > 1) {
+ /*
+ * don't have to worry about max_int as above because
+ * tx vectors don't do hardware RSC (an rx function)
+ */
/* check the limits */
if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
(1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE))
@@ -2167,6 +2213,18 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
ixgbe_write_eitr(q_vector);
}
+ /*
+ * do reset here at the end to make sure EITR==0 case is handled
+ * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
+ * also locks in RSC enable/disable which requires reset
+ */
+ if (need_reset) {
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+ else
+ ixgbe_reset(adapter);
+ }
+
return 0;
}
@@ -2178,10 +2236,26 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
ethtool_op_set_flags(netdev, data);
/* if state changes we need to update adapter->flags and reset */
- if ((!!(data & ETH_FLAG_LRO)) !=
- (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
- adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
- need_reset = true;
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) {
+ /*
+ * cast both to bool and verify if they are set the same
+ * but only enable RSC if itr is non-zero, as
+ * itr=0 and RSC are mutually exclusive
+ */
+ if (((!!(data & ETH_FLAG_LRO)) !=
+ (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) &&
+ adapter->rx_itr_setting) {
+ adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ need_reset = true;
+ break;
+ default:
+ break;
+ }
+ } else if (!adapter->rx_itr_setting) {
+ netdev->features &= ~ETH_FLAG_LRO;
+ }
}
/*
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 6493049b663..45182ab41d6 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -32,6 +32,7 @@
#endif /* CONFIG_IXGBE_DCB */
#include <linux/if_ether.h>
#include <linux/gfp.h>
+#include <linux/if_vlan.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/fc/fc_fs.h>
@@ -312,10 +313,12 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
if (fcerr == IXGBE_FCERR_BADCRC)
skb->ip_summed = CHECKSUM_NONE;
- skb_reset_network_header(skb);
- skb_set_transport_header(skb, skb_network_offset(skb) +
- sizeof(struct fcoe_hdr));
- fh = (struct fc_frame_header *)skb_transport_header(skb);
+ if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
+ fh = (struct fc_frame_header *)(skb->data +
+ sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr));
+ else
+ fh = (struct fc_frame_header *)(skb->data +
+ sizeof(struct fcoe_hdr));
fctl = ntoh24(fh->fh_f_ctl);
if (fctl & FC_FC_EX_CTX)
xid = be16_to_cpu(fh->fh_ox_id);
@@ -536,12 +539,6 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
}
IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
- fcoe_i = f->mask;
- fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
- fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
- IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
- IXGBE_ETQS_QUEUE_EN |
- (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
} else {
/* Use single rx queue for FCoE */
fcoe_i = f->mask;
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 6c00ee493a3..d571d101de0 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -108,6 +108,8 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM),
+ board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
board_82599 },
@@ -175,6 +177,345 @@ static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
}
+struct ixgbe_reg_info {
+ u32 ofs;
+ char *name;
+};
+
+static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
+
+ /* General Registers */
+ {IXGBE_CTRL, "CTRL"},
+ {IXGBE_STATUS, "STATUS"},
+ {IXGBE_CTRL_EXT, "CTRL_EXT"},
+
+ /* Interrupt Registers */
+ {IXGBE_EICR, "EICR"},
+
+ /* RX Registers */
+ {IXGBE_SRRCTL(0), "SRRCTL"},
+ {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
+ {IXGBE_RDLEN(0), "RDLEN"},
+ {IXGBE_RDH(0), "RDH"},
+ {IXGBE_RDT(0), "RDT"},
+ {IXGBE_RXDCTL(0), "RXDCTL"},
+ {IXGBE_RDBAL(0), "RDBAL"},
+ {IXGBE_RDBAH(0), "RDBAH"},
+
+ /* TX Registers */
+ {IXGBE_TDBAL(0), "TDBAL"},
+ {IXGBE_TDBAH(0), "TDBAH"},
+ {IXGBE_TDLEN(0), "TDLEN"},
+ {IXGBE_TDH(0), "TDH"},
+ {IXGBE_TDT(0), "TDT"},
+ {IXGBE_TXDCTL(0), "TXDCTL"},
+
+ /* List Terminator */
+ {}
+};
+
+
+/*
+ * ixgbe_regdump - register printout routine
+ */
+static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
+{
+ int i = 0, j = 0;
+ char rname[16];
+ u32 regs[64];
+
+ switch (reginfo->ofs) {
+ case IXGBE_SRRCTL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
+ break;
+ case IXGBE_DCA_RXCTRL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+ break;
+ case IXGBE_RDLEN(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
+ break;
+ case IXGBE_RDH(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
+ break;
+ case IXGBE_RDT(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
+ break;
+ case IXGBE_RXDCTL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+ break;
+ case IXGBE_RDBAL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
+ break;
+ case IXGBE_RDBAH(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
+ break;
+ case IXGBE_TDBAL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
+ break;
+ case IXGBE_TDBAH(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
+ break;
+ case IXGBE_TDLEN(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
+ break;
+ case IXGBE_TDH(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
+ break;
+ case IXGBE_TDT(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
+ break;
+ case IXGBE_TXDCTL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
+ break;
+ default:
+ printk(KERN_INFO "%-15s %08x\n", reginfo->name,
+ IXGBE_READ_REG(hw, reginfo->ofs));
+ return;
+ }
+
+ for (i = 0; i < 8; i++) {
+ snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
+ printk(KERN_ERR "%-15s ", rname);
+ for (j = 0; j < 8; j++)
+ printk(KERN_CONT "%08x ", regs[i*8+j]);
+ printk(KERN_CONT "\n");
+ }
+
+}
+
+/*
+ * ixgbe_dump - Print registers, tx-rings and rx-rings
+ */
+static void ixgbe_dump(struct ixgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_reg_info *reginfo;
+ int n = 0;
+ struct ixgbe_ring *tx_ring;
+ struct ixgbe_tx_buffer *tx_buffer_info;
+ union ixgbe_adv_tx_desc *tx_desc;
+ struct my_u0 { u64 a; u64 b; } *u0;
+ struct ixgbe_ring *rx_ring;
+ union ixgbe_adv_rx_desc *rx_desc;
+ struct ixgbe_rx_buffer *rx_buffer_info;
+ u32 staterr;
+ int i = 0;
+
+ if (!netif_msg_hw(adapter))
+ return;
+
+ /* Print netdevice Info */
+ if (netdev) {
+ dev_info(&adapter->pdev->dev, "Net device Info\n");
+ printk(KERN_INFO "Device Name state "
+ "trans_start last_rx\n");
+ printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
+ netdev->name,
+ netdev->state,
+ netdev->trans_start,
+ netdev->last_rx);
+ }
+
+ /* Print Registers */
+ dev_info(&adapter->pdev->dev, "Register Dump\n");
+ printk(KERN_INFO " Register Name Value\n");
+ for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
+ reginfo->name; reginfo++) {
+ ixgbe_regdump(hw, reginfo);
+ }
+
+ /* Print TX Ring Summary */
+ if (!netdev || !netif_running(netdev))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ] "
+ "leng ntw timestamp\n");
+ for (n = 0; n < adapter->num_tx_queues; n++) {
+ tx_ring = adapter->tx_ring[n];
+ tx_buffer_info =
+ &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
+ printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
+ n, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (u64)tx_buffer_info->dma,
+ tx_buffer_info->length,
+ tx_buffer_info->next_to_watch,
+ (u64)tx_buffer_info->time_stamp);
+ }
+
+ /* Print TX Rings */
+ if (!netif_msg_tx_done(adapter))
+ goto rx_ring_summary;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+
+ /* Transmit Descriptor Formats
+ *
+ * Advanced Transmit Descriptor
+ * +--------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +--------------------------------------------------------------+
+ * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
+ * +--------------------------------------------------------------+
+ * 63 46 45 40 39 36 35 32 31 24 23 20 19 0
+ */
+
+ for (n = 0; n < adapter->num_tx_queues; n++) {
+ tx_ring = adapter->tx_ring[n];
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "T [desc] [address 63:0 ] "
+ "[PlPOIdStDDt Ln] [bi->dma ] "
+ "leng ntw timestamp bi->skb\n");
+
+ for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ u0 = (struct my_u0 *)tx_desc;
+ printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
+ " %04X %3X %016llX %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)tx_buffer_info->dma,
+ tx_buffer_info->length,
+ tx_buffer_info->next_to_watch,
+ (u64)tx_buffer_info->time_stamp,
+ tx_buffer_info->skb);
+ if (i == tx_ring->next_to_use &&
+ i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC/U\n");
+ else if (i == tx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ if (netif_msg_pktdata(adapter) &&
+ tx_buffer_info->dma != 0)
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16, 1,
+ phys_to_virt(tx_buffer_info->dma),
+ tx_buffer_info->length, true);
+ }
+ }
+
+ /* Print RX Rings Summary */
+rx_ring_summary:
+ dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC]\n");
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ printk(KERN_INFO "%5d %5X %5X\n", n,
+ rx_ring->next_to_use, rx_ring->next_to_clean);
+ }
+
+ /* Print RX Rings */
+ if (!netif_msg_rx_status(adapter))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+
+ /* Advanced Receive Descriptor (Read) Format
+ * 63 1 0
+ * +-----------------------------------------------------+
+ * 0 | Packet Buffer Address [63:1] |A0/NSE|
+ * +----------------------------------------------+------+
+ * 8 | Header Buffer Address [63:1] | DD |
+ * +-----------------------------------------------------+
+ *
+ *
+ * Advanced Receive Descriptor (Write-Back) Format
+ *
+ * 63 48 47 32 31 30 21 20 16 15 4 3 0
+ * +------------------------------------------------------+
+ * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
+ * | Checksum Ident | | | | Type | Type |
+ * +------------------------------------------------------+
+ * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+ * +------------------------------------------------------+
+ * 63 48 47 32 31 20 19 0
+ */
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "R [desc] [ PktBuf A0] "
+ "[ HeadBuf DD] [bi->dma ] [bi->skb] "
+ "<-- Adv Rx Read format\n");
+ printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
+ "[vl er S cks ln] ---------------- [bi->skb] "
+ "<-- Adv Rx Write-Back format\n");
+
+ for (i = 0; i < rx_ring->count; i++) {
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+ rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ u0 = (struct my_u0 *)rx_desc;
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ if (staterr & IXGBE_RXD_STAT_DD) {
+ /* Descriptor Done */
+ printk(KERN_INFO "RWB[0x%03X] %016llX "
+ "%016llX ---------------- %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ rx_buffer_info->skb);
+ } else {
+ printk(KERN_INFO "R [0x%03X] %016llX "
+ "%016llX %016llX %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)rx_buffer_info->dma,
+ rx_buffer_info->skb);
+
+ if (netif_msg_pktdata(adapter)) {
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16, 1,
+ phys_to_virt(rx_buffer_info->dma),
+ rx_ring->rx_buf_len, true);
+
+ if (rx_ring->rx_buf_len
+ < IXGBE_RXBUFFER_2048)
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16, 1,
+ phys_to_virt(
+ rx_buffer_info->page_dma +
+ rx_buffer_info->page_offset
+ ),
+ PAGE_SIZE/2, true);
+ }
+ }
+
+ if (i == rx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == rx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ }
+ }
+
+exit:
+ return;
+}
+
static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
{
u32 ctrl_ext;
@@ -266,15 +607,15 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
{
if (tx_buffer_info->dma) {
if (tx_buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev,
+ dma_unmap_page(&adapter->pdev->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev,
+ dma_unmap_single(&adapter->pdev->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tx_buffer_info->dma = 0;
}
if (tx_buffer_info->skb) {
@@ -286,16 +627,16 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
}
/**
- * ixgbe_tx_is_paused - check if the tx ring is paused
+ * ixgbe_tx_xon_state - check the tx ring xon state
* @adapter: the ixgbe adapter
* @tx_ring: the corresponding tx_ring
*
* If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
* corresponding TC of this tx_ring when checking TFCS.
*
- * Returns : true if paused
+ * Returns : true if in xon state (currently not paused)
*/
-static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
+static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring)
{
u32 txoff = IXGBE_TFCS_TXOFF;
@@ -351,7 +692,7 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
adapter->detect_tx_hung = false;
if (tx_ring->tx_buffer_info[eop].time_stamp &&
time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
- !ixgbe_tx_is_paused(adapter, tx_ring)) {
+ ixgbe_tx_xon_state(adapter, tx_ring)) {
/* detected Tx unit hang */
union ixgbe_adv_tx_desc *tx_desc;
tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
@@ -721,10 +1062,10 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
bi->page_offset ^= (PAGE_SIZE / 2);
}
- bi->page_dma = pci_map_page(pdev, bi->page,
+ bi->page_dma = dma_map_page(&pdev->dev, bi->page,
bi->page_offset,
(PAGE_SIZE / 2),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
if (!bi->skb) {
@@ -743,9 +1084,9 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
- skb->data));
bi->skb = skb;
- bi->dma = pci_map_single(pdev, skb->data,
+ bi->dma = dma_map_single(&pdev->dev, skb->data,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write-back erases this info. */
@@ -821,6 +1162,7 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
struct ixgbe_rsc_cb {
dma_addr_t dma;
+ bool delay_unmap;
};
#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
@@ -861,9 +1203,10 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
IXGBE_RXDADV_HDRBUFLEN_SHIFT;
- if (len > IXGBE_RX_HDR_SIZE)
- len = IXGBE_RX_HDR_SIZE;
upper_len = le16_to_cpu(rx_desc->wb.upper.length);
+ if ((len > IXGBE_RX_HDR_SIZE) ||
+ (upper_len && !(hdr_info & IXGBE_RXDADV_SPH)))
+ len = IXGBE_RX_HDR_SIZE;
} else {
len = le16_to_cpu(rx_desc->wb.upper.length);
}
@@ -876,7 +1219,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
if (rx_buffer_info->dma) {
if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
(!(staterr & IXGBE_RXD_STAT_EOP)) &&
- (!(skb->prev)))
+ (!(skb->prev))) {
/*
* When HWRSC is enabled, delay unmapping
* of the first packet. It carries the
@@ -884,18 +1227,21 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
* access the header after the writeback.
* Only unmap it when EOP is reached
*/
+ IXGBE_RSC_CB(skb)->delay_unmap = true;
IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
- else
- pci_unmap_single(pdev, rx_buffer_info->dma,
+ } else {
+ dma_unmap_single(&pdev->dev,
+ rx_buffer_info->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
+ }
rx_buffer_info->dma = 0;
skb_put(skb, len);
}
if (upper_len) {
- pci_unmap_page(pdev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
rx_buffer_info->page,
@@ -936,11 +1282,13 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
if (skb->prev)
skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
- if (IXGBE_RSC_CB(skb)->dma) {
- pci_unmap_single(pdev, IXGBE_RSC_CB(skb)->dma,
+ if (IXGBE_RSC_CB(skb)->delay_unmap) {
+ dma_unmap_single(&pdev->dev,
+ IXGBE_RSC_CB(skb)->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
IXGBE_RSC_CB(skb)->dma = 0;
+ IXGBE_RSC_CB(skb)->delay_unmap = false;
}
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
rx_ring->rsc_count += skb_shinfo(skb)->nr_frags;
@@ -1190,6 +1538,15 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
itr_reg |= (itr_reg << 16);
} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
/*
+ * 82599 can support a value of zero, so allow it for
+ * max interrupt rate, but there is an errata where it can
+ * not be zero with RSC
+ */
+ if (itr_reg == 8 &&
+ !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
+ itr_reg = 0;
+
+ /*
* set the WDIS bit to not clear the timer bits and cause an
* immediate assertion of the interrupt
*/
@@ -1261,8 +1618,48 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
ixgbe_write_eitr(q_vector);
}
+}
- return;
+/**
+ * ixgbe_check_overtemp_task - worker thread to check over tempurature
+ * @work: pointer to work_struct containing our data
+ **/
+static void ixgbe_check_overtemp_task(struct work_struct *work)
+{
+ struct ixgbe_adapter *adapter = container_of(work,
+ struct ixgbe_adapter,
+ check_overtemp_task);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 eicr = adapter->interrupt_event;
+
+ if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82599_T3_LOM: {
+ u32 autoneg;
+ bool link_up = false;
+
+ if (hw->mac.ops.check_link)
+ hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
+
+ if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) ||
+ (eicr & IXGBE_EICR_LSC))
+ /* Check if this is due to overtemp */
+ if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP)
+ break;
+ }
+ return;
+ default:
+ if (!(eicr & IXGBE_EICR_GPI_SDP0))
+ return;
+ break;
+ }
+ DPRINTK(DRV, ERR, "Network adapter has been stopped because it "
+ "has over heated. Restart the computer. If the problem "
+ "persists, power off the system and replace the "
+ "adapter\n");
+ /* write to clear the interrupt */
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
+ }
}
static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
@@ -1336,6 +1733,10 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
if (hw->mac.type == ixgbe_mac_82599EB) {
ixgbe_check_sfp_event(adapter, eicr);
+ adapter->interrupt_event = eicr;
+ if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
+ ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
+ schedule_work(&adapter->check_overtemp_task);
/* Handle Flow Director Full threshold interrupt */
if (eicr & IXGBE_EICR_FLOW_DIR) {
@@ -1826,8 +2227,6 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
ixgbe_write_eitr(q_vector);
}
-
- return;
}
/**
@@ -1839,6 +2238,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
u32 mask;
mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
+ if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
+ mask |= IXGBE_EIMS_GPI_SDP0;
if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
mask |= IXGBE_EIMS_GPI_SDP1;
if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
@@ -1899,6 +2300,9 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
ixgbe_check_sfp_event(adapter, eicr);
ixgbe_check_fan_failure(adapter, eicr);
+ if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
+ ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
+ schedule_work(&adapter->check_overtemp_task);
if (napi_schedule_prep(&(q_vector->napi))) {
adapter->tx_ring[0]->total_packets = 0;
@@ -2372,7 +2776,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
- ixgbe_set_vmolr(hw, adapter->num_vfs);
+ ixgbe_set_vmolr(hw, adapter->num_vfs, true);
}
/* Program MRQC for the distribution of queues */
@@ -2482,12 +2886,82 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
}
+/**
+ * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
+ * @adapter: driver data
+ */
+static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ int i, j;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ vlnctrl &= ~IXGBE_VLNCTRL_VFE;
+#ifdef CONFIG_IXGBE_DCB
+ if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+ vlnctrl &= ~IXGBE_VLNCTRL_VME;
+#endif
+ vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+ break;
+ case ixgbe_mac_82599EB:
+ vlnctrl &= ~IXGBE_VLNCTRL_VFE;
+ vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+#ifdef CONFIG_IXGBE_DCB
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
+ break;
+#endif
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ j = adapter->rx_ring[i]->reg_idx;
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
+ vlnctrl &= ~IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
+ * @adapter: driver data
+ */
+static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ int i, j;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
+ vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+ break;
+ case ixgbe_mac_82599EB:
+ vlnctrl |= IXGBE_VLNCTRL_VFE;
+ vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ j = adapter->rx_ring[i]->reg_idx;
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
+ vlnctrl |= IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
static void ixgbe_vlan_rx_register(struct net_device *netdev,
struct vlan_group *grp)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- u32 ctrl;
- int i, j;
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_disable(adapter);
@@ -2498,25 +2972,7 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
* still receive traffic from a DCB-enabled host even if we're
* not in DCB mode.
*/
- ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
-
- /* Disable CFI check */
- ctrl &= ~IXGBE_VLNCTRL_CFIEN;
-
- /* enable VLAN tag stripping */
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
- ctrl |= IXGBE_VLNCTRL_VME;
- } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- for (i = 0; i < adapter->num_rx_queues; i++) {
- u32 ctrl;
- j = adapter->rx_ring[i]->reg_idx;
- ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
- ctrl |= IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
- }
- }
-
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
+ ixgbe_vlan_filter_enable(adapter);
ixgbe_vlan_rx_add_vid(netdev, 0);
@@ -2538,21 +2994,6 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
}
}
-static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
-{
- struct dev_mc_list *mc_ptr;
- u8 *addr = *mc_addr_ptr;
- *vmdq = 0;
-
- mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
- if (mc_ptr->next)
- *mc_addr_ptr = mc_ptr->next->dmi_addr;
- else
- *mc_addr_ptr = NULL;
-
- return addr;
-}
-
/**
* ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
* @netdev: network interface device structure
@@ -2566,42 +3007,36 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- u32 fctrl, vlnctrl;
- u8 *addr_list = NULL;
- int addr_count = 0;
+ u32 fctrl;
/* Check for Promiscuous and All Multicast modes */
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
if (netdev->flags & IFF_PROMISC) {
- hw->addr_ctrl.user_set_promisc = 1;
+ hw->addr_ctrl.user_set_promisc = true;
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
- vlnctrl &= ~IXGBE_VLNCTRL_VFE;
+ /* don't hardware filter vlans in promisc mode */
+ ixgbe_vlan_filter_disable(adapter);
} else {
if (netdev->flags & IFF_ALLMULTI) {
fctrl |= IXGBE_FCTRL_MPE;
fctrl &= ~IXGBE_FCTRL_UPE;
- } else {
+ } else if (!hw->addr_ctrl.uc_set_promisc) {
fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
}
- vlnctrl |= IXGBE_VLNCTRL_VFE;
- hw->addr_ctrl.user_set_promisc = 0;
+ ixgbe_vlan_filter_enable(adapter);
+ hw->addr_ctrl.user_set_promisc = false;
}
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
/* reprogram secondary unicast list */
hw->mac.ops.update_uc_addr_list(hw, netdev);
/* reprogram multicast list */
- addr_count = netdev_mc_count(netdev);
- if (addr_count)
- addr_list = netdev->mc_list->dmi_addr;
- hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
- ixgbe_addr_list_itr);
+ hw->mac.ops.update_mc_addr_list(hw, netdev);
+
if (adapter->num_vfs)
ixgbe_restore_vf_multicasts(adapter);
}
@@ -2661,7 +3096,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 txdctl, vlnctrl;
+ u32 txdctl;
int i, j;
ixgbe_dcb_check_config(&adapter->dcb_cfg);
@@ -2679,22 +3114,8 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
}
/* Enable VLAN tag insert/strip */
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- if (hw->mac.type == ixgbe_mac_82598EB) {
- vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
- vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
- } else if (hw->mac.type == ixgbe_mac_82599EB) {
- vlnctrl |= IXGBE_VLNCTRL_VFE;
- vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
- for (i = 0; i < adapter->num_rx_queues; i++) {
- j = adapter->rx_ring[i]->reg_idx;
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
- vlnctrl |= IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
- }
- }
+ ixgbe_vlan_filter_enable(adapter);
+
hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
}
@@ -2750,8 +3171,10 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
case ixgbe_phy_sfp_ftl:
case ixgbe_phy_sfp_intel:
case ixgbe_phy_sfp_unknown:
- case ixgbe_phy_tw_tyco:
- case ixgbe_phy_tw_unknown:
+ case ixgbe_phy_sfp_passive_tyco:
+ case ixgbe_phy_sfp_passive_unknown:
+ case ixgbe_phy_sfp_active_unknown:
+ case ixgbe_phy_sfp_ftl_active:
return true;
default:
return false;
@@ -2895,6 +3318,13 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
}
+ /* Enable Thermal over heat sensor interrupt */
+ if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
+ gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ gpie |= IXGBE_SDP0_GPIEN;
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+ }
+
/* Enable fan failure interrupt if media type is copper */
if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
@@ -2927,8 +3357,13 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++) {
j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
- /* enable WTHRESH=8 descriptors, to encourage burst writeback */
- txdctl |= (8 << 16);
+ if (adapter->rx_itr_setting == 0) {
+ /* cannot set wthresh when itr==0 */
+ txdctl &= ~0x007F0000;
+ } else {
+ /* enable WTHRESH=8 descriptors, to encourage burst writeback */
+ txdctl |= (8 << 16);
+ }
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
}
@@ -3131,9 +3566,9 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
rx_buffer_info = &rx_ring->rx_buffer_info[i];
if (rx_buffer_info->dma) {
- pci_unmap_single(pdev, rx_buffer_info->dma,
+ dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
}
if (rx_buffer_info->skb) {
@@ -3141,11 +3576,13 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
rx_buffer_info->skb = NULL;
do {
struct sk_buff *this = skb;
- if (IXGBE_RSC_CB(this)->dma) {
- pci_unmap_single(pdev, IXGBE_RSC_CB(this)->dma,
+ if (IXGBE_RSC_CB(this)->delay_unmap) {
+ dma_unmap_single(&pdev->dev,
+ IXGBE_RSC_CB(this)->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
IXGBE_RSC_CB(this)->dma = 0;
+ IXGBE_RSC_CB(skb)->delay_unmap = false;
}
skb = skb->prev;
dev_kfree_skb(this);
@@ -3154,8 +3591,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
if (!rx_buffer_info->page)
continue;
if (rx_buffer_info->page_dma) {
- pci_unmap_page(pdev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
}
put_page(rx_buffer_info->page);
@@ -3268,26 +3705,30 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
- netif_tx_disable(netdev);
-
IXGBE_WRITE_FLUSH(hw);
msleep(10);
netif_tx_stop_all_queues(netdev);
- ixgbe_irq_disable(adapter);
-
- ixgbe_napi_disable_all(adapter);
-
clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
del_timer_sync(&adapter->sfp_timer);
del_timer_sync(&adapter->watchdog_timer);
cancel_work_sync(&adapter->watchdog_task);
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ ixgbe_irq_disable(adapter);
+
+ ixgbe_napi_disable_all(adapter);
+
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
cancel_work_sync(&adapter->fdir_reinit_task);
+ if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
+ cancel_work_sync(&adapter->check_overtemp_task);
+
/* disable transmits in the hardware now that interrupts are off */
for (i = 0; i < adapter->num_tx_queues; i++) {
j = adapter->tx_ring[i]->reg_idx;
@@ -3301,8 +3742,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
~IXGBE_DMATXCTL_TE));
- netif_carrier_off(netdev);
-
/* clear n-tuple filters that are cached */
ethtool_ntuple_flush(netdev);
@@ -3379,6 +3818,8 @@ static void ixgbe_reset_task(struct work_struct *work)
adapter->tx_timeout_count++;
+ ixgbe_dump(adapter);
+ netdev_err(adapter->netdev, "Reset adapter\n");
ixgbe_reinit_locked(adapter);
}
@@ -3479,12 +3920,12 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
adapter->num_tx_queues = 1;
#ifdef CONFIG_IXGBE_DCB
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- DPRINTK(PROBE, INFO, "FCoE enabled with DCB \n");
+ DPRINTK(PROBE, INFO, "FCoE enabled with DCB\n");
ixgbe_set_dcb_queues(adapter);
}
#endif
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- DPRINTK(PROBE, INFO, "FCoE enabled with RSS \n");
+ DPRINTK(PROBE, INFO, "FCoE enabled with RSS\n");
if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
ixgbe_set_fdir_queues(adapter);
@@ -4095,7 +4536,6 @@ static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
pci_disable_msi(adapter->pdev);
}
- return;
}
/**
@@ -4268,6 +4708,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
+ if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
if (dev->features & NETIF_F_NTUPLE) {
/* Flow Director perfect filter enabled */
adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
@@ -4381,8 +4823,8 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
- &tx_ring->dma);
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -4452,7 +4894,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma);
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) {
DPRINTK(PROBE, ERR,
@@ -4513,7 +4956,8 @@ void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -4550,7 +4994,8 @@ void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -5100,7 +5545,7 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
&(adapter->tx_ring[i]->reinit_state));
} else {
DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
- "ignored adding FDIR ATR filters \n");
+ "ignored adding FDIR ATR filters\n");
}
/* Done FDIR Re-initialization, enable transmits */
netif_tx_start_all_queues(adapter->netdev);
@@ -5420,10 +5865,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
tx_buffer_info->length = size;
tx_buffer_info->mapped_as_page = false;
- tx_buffer_info->dma = pci_map_single(pdev,
+ tx_buffer_info->dma = dma_map_single(&pdev->dev,
skb->data + offset,
- size, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -5456,12 +5901,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
- tx_buffer_info->dma = pci_map_page(adapter->pdev,
+ tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
frag->page,
offset, size,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tx_buffer_info->mapped_as_page = true;
- if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -5697,7 +6142,8 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
}
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
- } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+ } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED &&
+ skb->priority != TC_PRIO_CONTROL) {
tx_flags |= ((skb->queue_mapping & 0x7) << 13);
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
@@ -5942,6 +6388,10 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
.ndo_do_ioctl = ixgbe_ioctl,
+ .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
+ .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
+ .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
+ .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbe_netpoll,
#endif
@@ -6039,13 +6489,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (err)
return err;
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
- !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");
@@ -6175,7 +6626,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
}
/* reset_hw fills in the perm_addr as well */
+ hw->phy.reset_if_overtemp = true;
err = hw->mac.ops.reset_hw(hw);
+ hw->phy.reset_if_overtemp = false;
if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
hw->mac.type == ixgbe_mac_82598EB) {
/*
@@ -6344,6 +6797,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
+ if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
+ INIT_WORK(&adapter->check_overtemp_task, ixgbe_check_overtemp_task);
#ifdef CONFIG_IXGBE_DCA
if (dca_add_requester(&pdev->dev) == 0) {
adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 1c1efd38695..09e1911ff51 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -135,6 +135,11 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
**/
s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
{
+ /* Don't reset PHY if it's shut down due to overtemp. */
+ if (!hw->phy.reset_if_overtemp &&
+ (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
+ return 0;
+
/*
* Perform soft PHY reset to the PHY_XS.
* This will cause a soft reset to the PHY
@@ -475,7 +480,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
msleep(edata);
break;
case IXGBE_DATA_NL:
- hw_dbg(hw, "DATA: \n");
+ hw_dbg(hw, "DATA:\n");
data_offset++;
hw->eeprom.ops.read(hw, data_offset++,
&phy_offset);
@@ -491,7 +496,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
break;
case IXGBE_CONTROL_NL:
data_offset++;
- hw_dbg(hw, "CONTROL: \n");
+ hw_dbg(hw, "CONTROL:\n");
if (edata == IXGBE_CONTROL_EOL_NL) {
hw_dbg(hw, "EOL\n");
end_data = true;
@@ -531,6 +536,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
u8 comp_codes_10g = 0;
u8 oui_bytes[3] = {0, 0, 0};
u8 cable_tech = 0;
+ u8 cable_spec = 0;
u16 enforce_sfp = 0;
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
@@ -580,14 +586,30 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
else
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
} else if (hw->mac.type == ixgbe_mac_82599EB) {
- if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
ixgbe_sfp_type_da_cu_core0;
else
hw->phy.sfp_type =
ixgbe_sfp_type_da_cu_core1;
- else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+ } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
+ hw->phy.ops.read_i2c_eeprom(
+ hw, IXGBE_SFF_CABLE_SPEC_COMP,
+ &cable_spec);
+ if (cable_spec &
+ IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_act_lmt_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_act_lmt_core1;
+ } else {
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_unknown;
+ }
+ } else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
ixgbe_sfp_type_srlr_core0;
@@ -637,10 +659,14 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
switch (vendor_oui) {
case IXGBE_SFF_VENDOR_OUI_TYCO:
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
- hw->phy.type = ixgbe_phy_tw_tyco;
+ hw->phy.type =
+ ixgbe_phy_sfp_passive_tyco;
break;
case IXGBE_SFF_VENDOR_OUI_FTL:
- hw->phy.type = ixgbe_phy_sfp_ftl;
+ if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
+ hw->phy.type = ixgbe_phy_sfp_ftl_active;
+ else
+ hw->phy.type = ixgbe_phy_sfp_ftl;
break;
case IXGBE_SFF_VENDOR_OUI_AVAGO:
hw->phy.type = ixgbe_phy_sfp_avago;
@@ -650,7 +676,11 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
break;
default:
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
- hw->phy.type = ixgbe_phy_tw_unknown;
+ hw->phy.type =
+ ixgbe_phy_sfp_passive_unknown;
+ else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
+ hw->phy.type =
+ ixgbe_phy_sfp_active_unknown;
else
hw->phy.type = ixgbe_phy_sfp_unknown;
break;
@@ -658,7 +688,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
}
/* All passive DA cables are supported */
- if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
+ if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
+ IXGBE_SFF_DA_ACTIVE_CABLE)) {
status = 0;
goto out;
}
@@ -1319,3 +1350,28 @@ s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
return status;
}
+/**
+ * ixgbe_tn_check_overtemp - Checks if an overtemp occured.
+ * @hw: pointer to hardware structure
+ *
+ * Checks if the LASI temp alarm status was triggered due to overtemp
+ **/
+s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+ u16 phy_data = 0;
+
+ if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
+ goto out;
+
+ /* Check that the LASI temp alarm status was triggered */
+ hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
+ MDIO_MMD_PMAPMD, &phy_data);
+
+ if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
+ goto out;
+
+ status = IXGBE_ERR_OVERTEMP;
+out:
+ return status;
+}
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index 9cf5f3b4cc5..ef4ba834c59 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -40,9 +40,12 @@
#define IXGBE_SFF_1GBE_COMP_CODES 0x6
#define IXGBE_SFF_10GBE_COMP_CODES 0x3
#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
+#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
/* Bitmasks */
#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
+#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
+#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
@@ -77,6 +80,8 @@
#define IXGBE_I2C_T_SU_STO 4
#define IXGBE_I2C_T_BUF 5
+#define IXGBE_TN_LASI_STATUS_REG 0x9005
+#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
@@ -103,6 +108,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
u16 *data_offset);
+s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index d4cd20f3019..f6cee94ec8e 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -48,7 +48,11 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
int entries, u16 *hash_list, u32 vf)
{
struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
+ struct ixgbe_hw *hw = &adapter->hw;
int i;
+ u32 vector_bit;
+ u32 vector_reg;
+ u32 mta_reg;
/* only so many hash values supported */
entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
@@ -68,8 +72,13 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
vfinfo->vf_mc_hashes[i] = hash_list[i];;
}
- /* Flush and reset the mta with the new values */
- ixgbe_set_rx_mode(adapter->netdev);
+ for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
+ vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
+ vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
+ mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
+ mta_reg |= (1 << vector_bit);
+ IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
+ }
return 0;
}
@@ -98,38 +107,51 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf)
{
- u32 ctrl;
-
- /* Check if global VLAN already set, if not set it */
- ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
- if (!(ctrl & IXGBE_VLNCTRL_VFE)) {
- /* enable VLAN tag insert/strip */
- ctrl |= IXGBE_VLNCTRL_VFE;
- ctrl &= ~IXGBE_VLNCTRL_CFIEN;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
- }
-
return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
}
-void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf)
+void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
{
u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
- vmolr |= (IXGBE_VMOLR_AUPE |
- IXGBE_VMOLR_ROMPE |
+ vmolr |= (IXGBE_VMOLR_ROMPE |
IXGBE_VMOLR_ROPE |
IXGBE_VMOLR_BAM);
+ if (aupe)
+ vmolr |= IXGBE_VMOLR_AUPE;
+ else
+ vmolr &= ~IXGBE_VMOLR_AUPE;
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
}
+static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (vid)
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf),
+ (vid | IXGBE_VMVIR_VLANA_DEFAULT));
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
+}
+
inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
/* reset offloads to defaults */
- ixgbe_set_vmolr(hw, vf);
-
+ if (adapter->vfinfo[vf].pf_vlan) {
+ ixgbe_set_vf_vlan(adapter, true,
+ adapter->vfinfo[vf].pf_vlan, vf);
+ ixgbe_set_vmvir(adapter,
+ (adapter->vfinfo[vf].pf_vlan |
+ (adapter->vfinfo[vf].pf_qos <<
+ VLAN_PRIO_SHIFT)), vf);
+ ixgbe_set_vmolr(hw, vf, false);
+ } else {
+ ixgbe_set_vmvir(adapter, 0, vf);
+ ixgbe_set_vmolr(hw, vf, true);
+ }
/* reset multicast table array for vf */
adapter->vfinfo[vf].num_vf_mc_hashes = 0;
@@ -263,10 +285,12 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
case IXGBE_VF_SET_MAC_ADDR:
{
u8 *new_mac = ((u8 *)(&msgbuf[1]));
- if (is_valid_ether_addr(new_mac))
+ if (is_valid_ether_addr(new_mac) &&
+ !adapter->vfinfo[vf].pf_set_mac)
ixgbe_set_vf_mac(adapter, vf, new_mac);
else
- retval = -1;
+ ixgbe_set_vf_mac(adapter,
+ vf, adapter->vfinfo[vf].vf_mac_addresses);
}
break;
case IXGBE_VF_SET_MULTICAST:
@@ -360,3 +384,76 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
}
}
+int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
+ return -EINVAL;
+ adapter->vfinfo[vf].pf_set_mac = true;
+ dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
+ dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
+ " change effective.");
+ if (test_bit(__IXGBE_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
+ " but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
+ " attempting to use the VF device.\n");
+ }
+ return ixgbe_set_vf_mac(adapter, vf, mac);
+}
+
+int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
+{
+ int err = 0;
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
+ return -EINVAL;
+ if (vlan || qos) {
+ err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
+ if (err)
+ goto out;
+ ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
+ ixgbe_set_vmolr(&adapter->hw, vf, false);
+ adapter->vfinfo[vf].pf_vlan = vlan;
+ adapter->vfinfo[vf].pf_qos = qos;
+ dev_info(&adapter->pdev->dev,
+ "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
+ if (test_bit(__IXGBE_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev,
+ "The VF VLAN has been set,"
+ " but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev,
+ "Bring the PF device up before"
+ " attempting to use the VF device.\n");
+ }
+ } else {
+ err = ixgbe_set_vf_vlan(adapter, false,
+ adapter->vfinfo[vf].pf_vlan, vf);
+ ixgbe_set_vmvir(adapter, vlan, vf);
+ ixgbe_set_vmolr(&adapter->hw, vf, true);
+ adapter->vfinfo[vf].pf_vlan = 0;
+ adapter->vfinfo[vf].pf_qos = 0;
+ }
+out:
+ return err;
+}
+
+int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
+{
+ return -EOPNOTSUPP;
+}
+
+int ixgbe_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ if (vf >= adapter->num_vfs)
+ return -EINVAL;
+ ivi->vf = vf;
+ memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
+ ivi->tx_rate = 0;
+ ivi->vlan = adapter->vfinfo[vf].pf_vlan;
+ ivi->qos = adapter->vfinfo[vf].pf_qos;
+ return 0;
+}
diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h
index 51d1106c45a..184730ecdfb 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ixgbe/ixgbe_sriov.h
@@ -32,7 +32,7 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
int entries, u16 *hash_list, u32 vf);
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf);
-void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf);
+void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe);
void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf);
void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf);
void ixgbe_msg_task(struct ixgbe_adapter *adapter);
@@ -42,6 +42,12 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
+int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
+int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
+ u8 qos);
+int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
+int ixgbe_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi);
#endif /* _IXGBE_SRIOV_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 534affcc38c..2eb6e151016 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -51,6 +51,7 @@
#define IXGBE_DEV_ID_82599_KX4 0x10F7
#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
#define IXGBE_DEV_ID_82599_KR 0x1517
+#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
#define IXGBE_DEV_ID_82599_CX4 0x10F9
#define IXGBE_DEV_ID_82599_SFP 0x10FB
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
@@ -73,6 +74,7 @@
/* NVM Registers */
#define IXGBE_EEC 0x10010
#define IXGBE_EERD 0x10014
+#define IXGBE_EEWR 0x10018
#define IXGBE_FLA 0x1001C
#define IXGBE_EEMNGCTL 0x10110
#define IXGBE_EEMNGDATA 0x10114
@@ -219,6 +221,7 @@
#define IXGBE_MTQC 0x08120
#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */
#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */
+#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */
#define IXGBE_VT_CTL 0x051B0
#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4))
#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4))
@@ -698,6 +701,7 @@
#define IXGBE_MREVID 0x11064
#define IXGBE_DCA_ID 0x11070
#define IXGBE_DCA_CTRL 0x11074
+#define IXGBE_SWFW_SYNC IXGBE_GSSR
/* PCIe registers 82599-specific */
#define IXGBE_GCR_EXT 0x11050
@@ -1311,6 +1315,10 @@
#define IXGBE_VLVF_ENTRIES 64
#define IXGBE_VLVF_VLANID_MASK 0x00000FFF
+/* Per VF Port VLAN insertion rules */
+#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
+#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
+
#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
/* STATUS Bit Masks */
@@ -1458,8 +1466,9 @@
#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
+#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */
-/* GSSR definitions */
+/* SW_FW_SYNC/GSSR definitions */
#define IXGBE_GSSR_EEP_SM 0x0001
#define IXGBE_GSSR_PHY0_SM 0x0002
#define IXGBE_GSSR_PHY1_SM 0x0004
@@ -1479,6 +1488,8 @@
#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */
#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */
#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */
+#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */
+#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */
/* EEPROM Addressing bits based on type (0-small, 1-large) */
#define IXGBE_EEC_ADDR_SIZE 0x00000400
#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */
@@ -1534,10 +1545,12 @@
#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */
/* EEPROM Read Register */
-#define IXGBE_EEPROM_READ_REG_DATA 16 /* data offset in EEPROM read reg */
-#define IXGBE_EEPROM_READ_REG_DONE 2 /* Offset to READ done bit */
-#define IXGBE_EEPROM_READ_REG_START 1 /* First bit to start operation */
-#define IXGBE_EEPROM_READ_ADDR_SHIFT 2 /* Shift to the address bits */
+#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */
+#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */
+#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */
+#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
+#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
+#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for read complete */
#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
@@ -1545,9 +1558,15 @@
#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
#endif
-#ifndef IXGBE_EERD_ATTEMPTS
-/* Number of 5 microseconds we wait for EERD read to complete */
-#define IXGBE_EERD_ATTEMPTS 100000
+#ifndef IXGBE_EERD_EEWR_ATTEMPTS
+/* Number of 5 microseconds we wait for EERD read and
+ * EERW write to complete */
+#define IXGBE_EERD_EEWR_ATTEMPTS 100000
+#endif
+
+#ifndef IXGBE_FLUDONE_ATTEMPTS
+/* # attempts we wait for flush update to complete */
+#define IXGBE_FLUDONE_ATTEMPTS 20000
#endif
#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0
@@ -2090,6 +2109,7 @@ typedef u32 ixgbe_physical_layer;
#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400
#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800
#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
+#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
/* Software ATR hash keys */
#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D
@@ -2159,10 +2179,12 @@ enum ixgbe_phy_type {
ixgbe_phy_qt,
ixgbe_phy_xaui,
ixgbe_phy_nl,
- ixgbe_phy_tw_tyco,
- ixgbe_phy_tw_unknown,
+ ixgbe_phy_sfp_passive_tyco,
+ ixgbe_phy_sfp_passive_unknown,
+ ixgbe_phy_sfp_active_unknown,
ixgbe_phy_sfp_avago,
ixgbe_phy_sfp_ftl,
+ ixgbe_phy_sfp_ftl_active,
ixgbe_phy_sfp_unknown,
ixgbe_phy_sfp_intel,
ixgbe_phy_sfp_unsupported,
@@ -2190,6 +2212,8 @@ enum ixgbe_sfp_type {
ixgbe_sfp_type_da_cu_core1 = 4,
ixgbe_sfp_type_srlr_core0 = 5,
ixgbe_sfp_type_srlr_core1 = 6,
+ ixgbe_sfp_type_da_act_lmt_core0 = 7,
+ ixgbe_sfp_type_da_act_lmt_core1 = 8,
ixgbe_sfp_type_not_present = 0xFFFE,
ixgbe_sfp_type_unknown = 0xFFFF
};
@@ -2263,6 +2287,7 @@ struct ixgbe_addr_filter_info {
u32 mc_addr_in_rar_count;
u32 mta_in_use;
u32 overflow_promisc;
+ bool uc_set_promisc;
bool user_set_promisc;
};
@@ -2419,8 +2444,7 @@ struct ixgbe_mac_operations {
s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *);
- s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
- ixgbe_mc_addr_itr);
+ s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
s32 (*enable_mc)(struct ixgbe_hw *);
s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *);
@@ -2447,6 +2471,7 @@ struct ixgbe_phy_operations {
s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
+ s32 (*check_overtemp)(struct ixgbe_hw *);
};
struct ixgbe_eeprom_info {
@@ -2471,6 +2496,7 @@ struct ixgbe_mac_info {
u32 mcft_size;
u32 vft_size;
u32 num_rar_entries;
+ u32 rar_highwater;
u32 max_tx_queues;
u32 max_rx_queues;
u32 max_msix_vectors;
@@ -2494,6 +2520,7 @@ struct ixgbe_phy_info {
enum ixgbe_smart_speed smart_speed;
bool smart_speed_active;
bool multispeed_fiber;
+ bool reset_if_overtemp;
};
#include "ixgbe_mbx.h"
@@ -2577,8 +2604,11 @@ struct ixgbe_info {
#define IXGBE_ERR_SFP_NOT_SUPPORTED -19
#define IXGBE_ERR_SFP_NOT_PRESENT -20
#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21
+#define IXGBE_ERR_NO_SAN_ADDR_PTR -22
#define IXGBE_ERR_FDIR_REINIT_FAILED -23
#define IXGBE_ERR_EEPROM_VERSION -24
+#define IXGBE_ERR_NO_SPACE -25
+#define IXGBE_ERR_OVERTEMP -26
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h
index c44fdb05447..ca2c81f49a0 100644
--- a/drivers/net/ixgbevf/defines.h
+++ b/drivers/net/ixgbevf/defines.h
@@ -41,11 +41,13 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
-#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
-#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
-#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
-#define IXGBE_LINKS_UP 0x40000000
-#define IXGBE_LINKS_SPEED 0x20000000
+#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
+#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
+#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
+#define IXGBE_LINKS_UP 0x40000000
+#define IXGBE_LINKS_SPEED_82599 0x30000000
+#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
+#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index 0cd6202dfac..a16cff7e54a 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -139,15 +139,15 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
{
if (tx_buffer_info->dma) {
if (tx_buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev,
+ dma_unmap_page(&adapter->pdev->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev,
+ dma_unmap_single(&adapter->pdev->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tx_buffer_info->dma = 0;
}
if (tx_buffer_info->skb) {
@@ -416,10 +416,10 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
bi->page_offset ^= (PAGE_SIZE / 2);
}
- bi->page_dma = pci_map_page(pdev, bi->page,
+ bi->page_dma = dma_map_page(&pdev->dev, bi->page,
bi->page_offset,
(PAGE_SIZE / 2),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
skb = bi->skb;
@@ -442,9 +442,9 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
bi->skb = skb;
}
if (!bi->dma) {
- bi->dma = pci_map_single(pdev, skb->data,
+ bi->dma = dma_map_single(&pdev->dev, skb->data,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write-back erases this info. */
@@ -536,16 +536,16 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
rx_buffer_info->skb = NULL;
if (rx_buffer_info->dma) {
- pci_unmap_single(pdev, rx_buffer_info->dma,
+ dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
skb_put(skb, len);
}
if (upper_len) {
- pci_unmap_page(pdev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
rx_buffer_info->page,
@@ -604,14 +604,13 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
* packets not getting split correctly
*/
if (staterr & IXGBE_RXD_STAT_LB) {
- u32 header_fixup_len = skb->len - skb->data_len;
+ u32 header_fixup_len = skb_headlen(skb);
if (header_fixup_len < 14)
skb_push(skb, header_fixup_len);
}
skb->protocol = eth_type_trans(skb, adapter->netdev);
ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
- adapter->netdev->last_rx = jiffies;
next_desc:
rx_desc->wb.upper.status_error = 0;
@@ -947,8 +946,6 @@ static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
ixgbevf_write_eitr(adapter, v_idx, itr_reg);
}
-
- return;
}
static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
@@ -962,12 +959,28 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
+ if (!hw->mbx.ops.check_for_ack(hw)) {
+ /*
+ * checking for the ack clears the PFACK bit. Place
+ * it back in the v2p_mailbox cache so that anyone
+ * polling for an ack will not miss it. Also
+ * avoid the read below because the code to read
+ * the mailbox will also clear the ack bit. This was
+ * causing lost acks. Just cache the bit and exit
+ * the IRQ handler.
+ */
+ hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
+ goto out;
+ }
+
+ /* Not an ack interrupt, go ahead and read the message */
hw->mbx.ops.read(hw, &msg, 1);
if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
mod_timer(&adapter->watchdog_timer,
round_jiffies(jiffies + 1));
+out:
return IRQ_HANDLED;
}
@@ -1496,22 +1509,6 @@ static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
}
}
-static u8 *ixgbevf_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr,
- u32 *vmdq)
-{
- struct dev_mc_list *mc_ptr;
- u8 *addr = *mc_addr_ptr;
- *vmdq = 0;
-
- mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
- if (mc_ptr->next)
- *mc_addr_ptr = mc_ptr->next->dmi_addr;
- else
- *mc_addr_ptr = NULL;
-
- return addr;
-}
-
/**
* ixgbevf_set_rx_mode - Multicast set
* @netdev: network interface device structure
@@ -1524,16 +1521,10 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- u8 *addr_list = NULL;
- int addr_count = 0;
/* reprogram multicast list */
- addr_count = netdev_mc_count(netdev);
- if (addr_count)
- addr_list = netdev->mc_list->dmi_addr;
if (hw->mac.ops.update_mc_addr_list)
- hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
- ixgbevf_addr_list_itr);
+ hw->mac.ops.update_mc_addr_list(hw, netdev);
}
static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
@@ -1744,9 +1735,9 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
rx_buffer_info = &rx_ring->rx_buffer_info[i];
if (rx_buffer_info->dma) {
- pci_unmap_single(pdev, rx_buffer_info->dma,
+ dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
}
if (rx_buffer_info->skb) {
@@ -1760,8 +1751,8 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
}
if (!rx_buffer_info->page)
continue;
- pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
put_page(rx_buffer_info->page);
rx_buffer_info->page = NULL;
@@ -2158,8 +2149,6 @@ static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
pci_disable_msix(adapter->pdev);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
-
- return;
}
/**
@@ -2418,9 +2407,9 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
if (link_up) {
if (!netif_carrier_ok(netdev)) {
- hw_dbg(&adapter->hw, "NIC Link is Up %s, ",
- ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
- "10 Gbps\n" : "1 Gbps\n"));
+ hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n",
+ (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
+ 10 : 1);
netif_carrier_on(netdev);
netif_tx_wake_all_queues(netdev);
} else {
@@ -2468,7 +2457,8 @@ void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -2513,8 +2503,8 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
- &tx_ring->dma);
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -2584,8 +2574,8 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
- &rx_ring->dma);
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) {
hw_dbg(&adapter->hw,
@@ -2646,7 +2636,8 @@ void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -2958,10 +2949,10 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
tx_buffer_info->length = size;
tx_buffer_info->mapped_as_page = false;
- tx_buffer_info->dma = pci_map_single(adapter->pdev,
+ tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev,
skb->data + offset,
- size, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -2987,13 +2978,13 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
- tx_buffer_info->dma = pci_map_page(adapter->pdev,
+ tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
frag->page,
offset,
size,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tx_buffer_info->mapped_as_page = true;
- if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -3189,8 +3180,6 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
skb->len, hdr_len);
- netdev->trans_start = jiffies;
-
ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
return NETDEV_TX_OK;
@@ -3334,14 +3323,14 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
if (err)
return err;
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
- !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");
@@ -3482,7 +3471,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
hw_dbg(hw, "MAC: %d\n", hw->mac.type);
- hw_dbg(hw, "LRO is disabled \n");
+ hw_dbg(hw, "LRO is disabled\n");
hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
cards_found++;
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c
index 4b5dec0ec14..f6f929958ba 100644
--- a/drivers/net/ixgbevf/vf.c
+++ b/drivers/net/ixgbevf/vf.c
@@ -252,22 +252,18 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
/**
* ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
* @hw: pointer to the HW structure
- * @mc_addr_list: array of multicast addresses to program
- * @mc_addr_count: number of multicast addresses to program
- * @next: caller supplied function to return next address in list
+ * @netdev: pointer to net device structure
*
* Updates the Multicast Table Array.
**/
-static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count,
- ixgbe_mc_addr_itr next)
+static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
+ struct net_device *netdev)
{
+ struct netdev_hw_addr *ha;
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
u16 *vector_list = (u16 *)&msgbuf[1];
- u32 vector;
u32 cnt, i;
- u32 vmdq;
/* Each entry in the list uses 1 16 bit word. We have 30
* 16 bit words available in our HW msg buffer (minus 1 for the
@@ -278,13 +274,17 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
* addresses except for in large enterprise network environments.
*/
- cnt = (mc_addr_count > 30) ? 30 : mc_addr_count;
+ cnt = netdev_mc_count(netdev);
+ if (cnt > 30)
+ cnt = 30;
msgbuf[0] = IXGBE_VF_SET_MULTICAST;
msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
- for (i = 0; i < cnt; i++) {
- vector = ixgbevf_mta_vector(hw, next(hw, &mc_addr_list, &vmdq));
- vector_list[i] = vector;
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev) {
+ if (i == cnt)
+ break;
+ vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
}
mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
@@ -359,7 +359,8 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
else
*link_up = false;
- if (links_reg & IXGBE_LINKS_SPEED)
+ if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
+ IXGBE_LINKS_SPEED_10G_82599)
*speed = IXGBE_LINK_SPEED_10GB_FULL;
else
*speed = IXGBE_LINK_SPEED_1GB_FULL;
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
index 1f31b052d4b..94b750b8874 100644
--- a/drivers/net/ixgbevf/vf.h
+++ b/drivers/net/ixgbevf/vf.h
@@ -32,6 +32,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/if_ether.h>
+#include <linux/netdevice.h>
#include "defines.h"
#include "regs.h"
@@ -62,8 +63,7 @@ struct ixgbe_mac_operations {
/* RAR, Multicast, VLAN */
s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
- s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
- ixgbe_mc_addr_itr);
+ s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
s32 (*enable_mc)(struct ixgbe_hw *);
s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *);
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index d5932ca3e27..78ddd8b79e7 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -64,8 +64,6 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev)
ixp2000_reg_write(RING_TX_PENDING,
TX_BUF_DESC_BASE + (entry * sizeof(struct ixpdev_tx_desc)));
- dev->trans_start = jiffies;
-
local_irq_save(flags);
ip->tx_queue_entries++;
if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index b705ad3a53a..99f24f5cac5 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -103,8 +103,6 @@ jme_mdio_write(struct net_device *netdev,
if (i == 0)
jeprintk(jme->pdev, "phy(%d) write timeout : %d\n", phy, reg);
-
- return;
}
static inline void
@@ -130,8 +128,6 @@ jme_reset_phy_processor(struct jme_adapter *jme)
jme_mdio_write(jme->dev,
jme->mii_if.phy_id,
MII_BMCR, val | BMCR_RESET);
-
- return;
}
static void
@@ -2010,12 +2006,12 @@ jme_set_multi(struct net_device *netdev)
} else if (netdev->flags & IFF_ALLMULTI) {
jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
} else if (netdev->flags & IFF_MULTICAST) {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int bit_nr;
jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
- netdev_for_each_mc_addr(mclist, netdev) {
- bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
+ netdev_for_each_mc_addr(ha, netdev) {
+ bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3F;
mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
}
@@ -2839,7 +2835,7 @@ jme_init_one(struct pci_dev *pdev,
default:
jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
break;
- };
+ }
/*
* Must check before reset_mac_processor
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index 300c2249812..26bf1b76b99 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -482,7 +482,7 @@ static void korina_multicast_list(struct net_device *dev)
{
struct korina_private *lp = netdev_priv(dev);
unsigned long flags;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
u32 recognise = ETH_ARC_AB; /* always accept broadcasts */
int i;
@@ -502,8 +502,8 @@ static void korina_multicast_list(struct net_device *dev)
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- char *addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ char *addrs = ha->addr;
if (!(*addrs & 1))
continue;
@@ -1135,7 +1135,7 @@ static int korina_probe(struct platform_device *pdev)
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs");
dev->base_addr = r->start;
- lp->eth_regs = ioremap_nocache(r->start, r->end - r->start);
+ lp->eth_regs = ioremap_nocache(r->start, resource_size(r));
if (!lp->eth_regs) {
printk(KERN_ERR DRV_NAME ": cannot remap registers\n");
rc = -ENXIO;
@@ -1143,7 +1143,7 @@ static int korina_probe(struct platform_device *pdev)
}
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx");
- lp->rx_dma_regs = ioremap_nocache(r->start, r->end - r->start);
+ lp->rx_dma_regs = ioremap_nocache(r->start, resource_size(r));
if (!lp->rx_dma_regs) {
printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n");
rc = -ENXIO;
@@ -1151,7 +1151,7 @@ static int korina_probe(struct platform_device *pdev)
}
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx");
- lp->tx_dma_regs = ioremap_nocache(r->start, r->end - r->start);
+ lp->tx_dma_regs = ioremap_nocache(r->start, resource_size(r));
if (!lp->tx_dma_regs) {
printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n");
rc = -ENXIO;
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index 5c45cb58d02..f852ab3ae9c 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -1,5 +1,5 @@
/*
- * ks8842_main.c timberdale KS8842 ethernet driver
+ * ks8842.c timberdale KS8842 ethernet driver
* Copyright (c) 2009 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
@@ -20,12 +20,15 @@
* The Micrel KS8842 behind the timberdale FPGA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/ks8842.h>
#define DRV_NAME "ks8842"
@@ -302,6 +305,20 @@ static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest)
ks8842_write16(adapter, 39, mac, REG_MACAR3);
}
+static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac)
+{
+ unsigned long flags;
+ unsigned i;
+
+ spin_lock_irqsave(&adapter->lock, flags);
+ for (i = 0; i < ETH_ALEN; i++) {
+ ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
+ ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
+ REG_MACAR1 + i);
+ }
+ spin_unlock_irqrestore(&adapter->lock, flags);
+}
+
static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter)
{
return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff;
@@ -520,13 +537,14 @@ static int ks8842_open(struct net_device *netdev)
/* reset the HW */
ks8842_reset_hw(adapter);
+ ks8842_write_mac_addr(adapter, netdev->dev_addr);
+
ks8842_update_link_status(netdev, adapter);
err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME,
adapter);
if (err) {
- printk(KERN_ERR "Failed to request IRQ: %d: %d\n",
- adapter->irq, err);
+ pr_err("Failed to request IRQ: %d: %d\n", adapter->irq, err);
return err;
}
@@ -567,10 +585,8 @@ static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb,
static int ks8842_set_mac(struct net_device *netdev, void *p)
{
struct ks8842_adapter *adapter = netdev_priv(netdev);
- unsigned long flags;
struct sockaddr *addr = p;
char *mac = (u8 *)addr->sa_data;
- int i;
dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
@@ -579,13 +595,7 @@ static int ks8842_set_mac(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, mac, netdev->addr_len);
- spin_lock_irqsave(&adapter->lock, flags);
- for (i = 0; i < ETH_ALEN; i++) {
- ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
- ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
- REG_MACAR1 + i);
- }
- spin_unlock_irqrestore(&adapter->lock, flags);
+ ks8842_write_mac_addr(adapter, mac);
return 0;
}
@@ -604,6 +614,8 @@ static void ks8842_tx_timeout(struct net_device *netdev)
ks8842_reset_hw(adapter);
+ ks8842_write_mac_addr(adapter, netdev->dev_addr);
+
ks8842_update_link_status(netdev, adapter);
}
@@ -626,7 +638,9 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
struct resource *iomem;
struct net_device *netdev;
struct ks8842_adapter *adapter;
+ struct ks8842_platform_data *pdata = pdev->dev.platform_data;
u16 id;
+ unsigned i;
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME))
@@ -657,7 +671,25 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
netdev->netdev_ops = &ks8842_netdev_ops;
netdev->ethtool_ops = &ks8842_ethtool_ops;
- ks8842_read_mac_addr(adapter, netdev->dev_addr);
+ /* Check if a mac address was given */
+ i = netdev->addr_len;
+ if (pdata) {
+ for (i = 0; i < netdev->addr_len; i++)
+ if (pdata->macaddr[i] != 0)
+ break;
+
+ if (i < netdev->addr_len)
+ /* an address was passed, use it */
+ memcpy(netdev->dev_addr, pdata->macaddr,
+ netdev->addr_len);
+ }
+
+ if (i == netdev->addr_len) {
+ ks8842_read_mac_addr(adapter, netdev->dev_addr);
+
+ if (!is_valid_ether_addr(netdev->dev_addr))
+ random_ether_addr(netdev->dev_addr);
+ }
id = ks8842_read16(adapter, 32, REG_SW_ID_AND_ENABLE);
@@ -668,8 +700,7 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, netdev);
- printk(KERN_INFO DRV_NAME
- " Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
+ pr_info("Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
(id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
return 0;
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 9e9f9b34976..b4fb07a6f13 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -9,6 +9,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DEBUG
#include <linux/module.h>
@@ -76,7 +78,9 @@ union ks8851_tx_hdr {
* @msg_enable: The message flags controlling driver output (see ethtool).
* @fid: Incrementing frame id tag.
* @rc_ier: Cached copy of KS_IER.
+ * @rc_ccr: Cached copy of KS_CCR.
* @rc_rxqcr: Cached copy of KS_RXQCR.
+ * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
*
* The @lock ensures that the chip is protected when certain operations are
* in progress. When the read or write packet transfer is in progress, most
@@ -107,6 +111,8 @@ struct ks8851_net {
u16 rc_ier;
u16 rc_rxqcr;
+ u16 rc_ccr;
+ u16 eeprom_size;
struct mii_if_info mii;
struct ks8851_rxctrl rxctrl;
@@ -125,11 +131,6 @@ struct ks8851_net {
static int msg_enable;
-#define ks_info(_ks, _msg...) dev_info(&(_ks)->spidev->dev, _msg)
-#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->spidev->dev, _msg)
-#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->spidev->dev, _msg)
-#define ks_err(_ks, _msg...) dev_err(&(_ks)->spidev->dev, _msg)
-
/* shift for byte-enable data */
#define BYTE_EN(_x) ((_x) << 2)
@@ -167,7 +168,7 @@ static void ks8851_wrreg16(struct ks8851_net *ks, unsigned reg, unsigned val)
ret = spi_sync(ks->spidev, msg);
if (ret < 0)
- ks_err(ks, "spi_sync() failed\n");
+ netdev_err(ks->netdev, "spi_sync() failed\n");
}
/**
@@ -197,7 +198,7 @@ static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val)
ret = spi_sync(ks->spidev, msg);
if (ret < 0)
- ks_err(ks, "spi_sync() failed\n");
+ netdev_err(ks->netdev, "spi_sync() failed\n");
}
/**
@@ -263,7 +264,7 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned op,
ret = spi_sync(ks->spidev, msg);
if (ret < 0)
- ks_err(ks, "read: spi_sync() failed\n");
+ netdev_err(ks->netdev, "read: spi_sync() failed\n");
else if (ks8851_rx_1msg(ks))
memcpy(rxb, trx + 2, rxl);
else
@@ -417,8 +418,8 @@ static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len)
u8 txb[1];
int ret;
- if (netif_msg_rx_status(ks))
- ks_dbg(ks, "%s: %d@%p\n", __func__, len, buff);
+ netif_dbg(ks, rx_status, ks->netdev,
+ "%s: %d@%p\n", __func__, len, buff);
/* set the operation we're issuing */
txb[0] = KS_SPIOP_RXFIFO;
@@ -434,7 +435,7 @@ static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len)
ret = spi_sync(ks->spidev, msg);
if (ret < 0)
- ks_err(ks, "%s: spi_sync() failed\n", __func__);
+ netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__);
}
/**
@@ -446,10 +447,11 @@ static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len)
*/
static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt)
{
- ks_dbg(ks, "pkt %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
- rxpkt[4], rxpkt[5], rxpkt[6], rxpkt[7],
- rxpkt[8], rxpkt[9], rxpkt[10], rxpkt[11],
- rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]);
+ netdev_dbg(ks->netdev,
+ "pkt %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
+ rxpkt[4], rxpkt[5], rxpkt[6], rxpkt[7],
+ rxpkt[8], rxpkt[9], rxpkt[10], rxpkt[11],
+ rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]);
}
/**
@@ -471,8 +473,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
rxfc = ks8851_rdreg8(ks, KS_RXFC);
- if (netif_msg_rx_status(ks))
- ks_dbg(ks, "%s: %d packets\n", __func__, rxfc);
+ netif_dbg(ks, rx_status, ks->netdev,
+ "%s: %d packets\n", __func__, rxfc);
/* Currently we're issuing a read per packet, but we could possibly
* improve the code by issuing a single read, getting the receive
@@ -489,9 +491,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
rxstat = rxh & 0xffff;
rxlen = rxh >> 16;
- if (netif_msg_rx_status(ks))
- ks_dbg(ks, "rx: stat 0x%04x, len 0x%04x\n",
- rxstat, rxlen);
+ netif_dbg(ks, rx_status, ks->netdev,
+ "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen);
/* the length of the packet includes the 32bit CRC */
@@ -553,9 +554,8 @@ static void ks8851_irq_work(struct work_struct *work)
status = ks8851_rdreg16(ks, KS_ISR);
- if (netif_msg_intr(ks))
- dev_dbg(&ks->spidev->dev, "%s: status 0x%04x\n",
- __func__, status);
+ netif_dbg(ks, intr, ks->netdev,
+ "%s: status 0x%04x\n", __func__, status);
if (status & IRQ_LCI) {
/* should do something about checking link status */
@@ -582,8 +582,8 @@ static void ks8851_irq_work(struct work_struct *work)
* system */
ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR);
- if (netif_msg_intr(ks))
- ks_dbg(ks, "%s: txspace %d\n", __func__, ks->tx_space);
+ netif_dbg(ks, intr, ks->netdev,
+ "%s: txspace %d\n", __func__, ks->tx_space);
}
if (status & IRQ_RXI)
@@ -659,9 +659,8 @@ static void ks8851_wrpkt(struct ks8851_net *ks, struct sk_buff *txp, bool irq)
unsigned fid = 0;
int ret;
- if (netif_msg_tx_queued(ks))
- dev_dbg(&ks->spidev->dev, "%s: skb %p, %d@%p, irq %d\n",
- __func__, txp, txp->len, txp->data, irq);
+ netif_dbg(ks, tx_queued, ks->netdev, "%s: skb %p, %d@%p, irq %d\n",
+ __func__, txp, txp->len, txp->data, irq);
fid = ks->fid++;
fid &= TXFR_TXFID_MASK;
@@ -685,7 +684,7 @@ static void ks8851_wrpkt(struct ks8851_net *ks, struct sk_buff *txp, bool irq)
ret = spi_sync(ks->spidev, msg);
if (ret < 0)
- ks_err(ks, "%s: spi_sync() failed\n", __func__);
+ netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__);
}
/**
@@ -746,8 +745,7 @@ static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
{
unsigned pmecr;
- if (netif_msg_hw(ks))
- ks_dbg(ks, "setting power mode %d\n", pwrmode);
+ netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
pmecr = ks8851_rdreg16(ks, KS_PMECR);
pmecr &= ~PMECR_PM_MASK;
@@ -771,8 +769,7 @@ static int ks8851_net_open(struct net_device *dev)
* else at the moment */
mutex_lock(&ks->lock);
- if (netif_msg_ifup(ks))
- ks_dbg(ks, "opening %s\n", dev->name);
+ netif_dbg(ks, ifup, ks->netdev, "opening\n");
/* bring chip out of any power saving mode it was in */
ks8851_set_powermode(ks, PMECR_PM_NORMAL);
@@ -828,8 +825,7 @@ static int ks8851_net_open(struct net_device *dev)
netif_start_queue(ks->netdev);
- if (netif_msg_ifup(ks))
- ks_dbg(ks, "network device %s up\n", dev->name);
+ netif_dbg(ks, ifup, ks->netdev, "network device up\n");
mutex_unlock(&ks->lock);
return 0;
@@ -847,8 +843,7 @@ static int ks8851_net_stop(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
- if (netif_msg_ifdown(ks))
- ks_info(ks, "%s: shutting down\n", dev->name);
+ netif_info(ks, ifdown, dev, "shutting down\n");
netif_stop_queue(dev);
@@ -876,8 +871,8 @@ static int ks8851_net_stop(struct net_device *dev)
while (!skb_queue_empty(&ks->txq)) {
struct sk_buff *txb = skb_dequeue(&ks->txq);
- if (netif_msg_ifdown(ks))
- ks_dbg(ks, "%s: freeing txb %p\n", __func__, txb);
+ netif_dbg(ks, ifdown, ks->netdev,
+ "%s: freeing txb %p\n", __func__, txb);
dev_kfree_skb(txb);
}
@@ -906,9 +901,8 @@ static netdev_tx_t ks8851_start_xmit(struct sk_buff *skb,
unsigned needed = calc_txlen(skb->len);
netdev_tx_t ret = NETDEV_TX_OK;
- if (netif_msg_tx_queued(ks))
- ks_dbg(ks, "%s: skb %p, %d@%p\n", __func__,
- skb, skb->len, skb->data);
+ netif_dbg(ks, tx_queued, ks->netdev,
+ "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data);
spin_lock(&ks->statelock);
@@ -968,13 +962,13 @@ static void ks8851_set_rx_mode(struct net_device *dev)
rxctrl.rxcr1 = (RXCR1_RXME | RXCR1_RXAE |
RXCR1_RXPAFMA | RXCR1_RXMAFMA);
} else if (dev->flags & IFF_MULTICAST && !netdev_mc_empty(dev)) {
- struct dev_mc_list *mcptr;
+ struct netdev_hw_addr *ha;
u32 crc;
/* accept some multicast */
- netdev_for_each_mc_addr(mcptr, dev) {
- crc = ether_crc(ETH_ALEN, mcptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc(ETH_ALEN, ha->addr);
crc >>= (32 - 6); /* get top six bits */
rxctrl.mchash[crc >> 4] |= (1 << (crc & 0xf));
@@ -1040,6 +1034,234 @@ static const struct net_device_ops ks8851_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
+/* Companion eeprom access */
+
+enum { /* EEPROM programming states */
+ EEPROM_CONTROL,
+ EEPROM_ADDRESS,
+ EEPROM_DATA,
+ EEPROM_COMPLETE
+};
+
+/**
+ * ks8851_eeprom_read - read a 16bits word in ks8851 companion EEPROM
+ * @dev: The network device the PHY is on.
+ * @addr: EEPROM address to read
+ *
+ * eeprom_size: used to define the data coding length. Can be changed
+ * through debug-fs.
+ *
+ * Programs a read on the EEPROM using ks8851 EEPROM SW access feature.
+ * Warning: The READ feature is not supported on ks8851 revision 0.
+ *
+ * Rough programming model:
+ * - on period start: set clock high and read value on bus
+ * - on period / 2: set clock low and program value on bus
+ * - start on period / 2
+ */
+unsigned int ks8851_eeprom_read(struct net_device *dev, unsigned int addr)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ int eepcr;
+ int ctrl = EEPROM_OP_READ;
+ int state = EEPROM_CONTROL;
+ int bit_count = EEPROM_OP_LEN - 1;
+ unsigned int data = 0;
+ int dummy;
+ unsigned int addr_len;
+
+ addr_len = (ks->eeprom_size == 128) ? 6 : 8;
+
+ /* start transaction: chip select high, authorize write */
+ mutex_lock(&ks->lock);
+ eepcr = EEPCR_EESA | EEPCR_EESRWA;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ eepcr |= EEPCR_EECS;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ while (state != EEPROM_COMPLETE) {
+ /* falling clock period starts... */
+ /* set EED_IO pin for control and address */
+ eepcr &= ~EEPCR_EEDO;
+ switch (state) {
+ case EEPROM_CONTROL:
+ eepcr |= ((ctrl >> bit_count) & 1) << 2;
+ if (bit_count-- <= 0) {
+ bit_count = addr_len - 1;
+ state = EEPROM_ADDRESS;
+ }
+ break;
+ case EEPROM_ADDRESS:
+ eepcr |= ((addr >> bit_count) & 1) << 2;
+ bit_count--;
+ break;
+ case EEPROM_DATA:
+ /* Change to receive mode */
+ eepcr &= ~EEPCR_EESRWA;
+ break;
+ }
+
+ /* lower clock */
+ eepcr &= ~EEPCR_EESCK;
+
+ mutex_lock(&ks->lock);
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ /* waitread period / 2 */
+ udelay(EEPROM_SK_PERIOD / 2);
+
+ /* rising clock period starts... */
+
+ /* raise clock */
+ mutex_lock(&ks->lock);
+ eepcr |= EEPCR_EESCK;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ /* Manage read */
+ switch (state) {
+ case EEPROM_ADDRESS:
+ if (bit_count < 0) {
+ bit_count = EEPROM_DATA_LEN - 1;
+ state = EEPROM_DATA;
+ }
+ break;
+ case EEPROM_DATA:
+ mutex_lock(&ks->lock);
+ dummy = ks8851_rdreg16(ks, KS_EEPCR);
+ mutex_unlock(&ks->lock);
+ data |= ((dummy >> EEPCR_EESB_OFFSET) & 1) << bit_count;
+ if (bit_count-- <= 0)
+ state = EEPROM_COMPLETE;
+ break;
+ }
+
+ /* wait period / 2 */
+ udelay(EEPROM_SK_PERIOD / 2);
+ }
+
+ /* close transaction */
+ mutex_lock(&ks->lock);
+ eepcr &= ~EEPCR_EECS;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ eepcr = 0;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ return data;
+}
+
+/**
+ * ks8851_eeprom_write - write a 16bits word in ks8851 companion EEPROM
+ * @dev: The network device the PHY is on.
+ * @op: operand (can be WRITE, EWEN, EWDS)
+ * @addr: EEPROM address to write
+ * @data: data to write
+ *
+ * eeprom_size: used to define the data coding length. Can be changed
+ * through debug-fs.
+ *
+ * Programs a write on the EEPROM using ks8851 EEPROM SW access feature.
+ *
+ * Note that a write enable is required before writing data.
+ *
+ * Rough programming model:
+ * - on period start: set clock high
+ * - on period / 2: set clock low and program value on bus
+ * - start on period / 2
+ */
+void ks8851_eeprom_write(struct net_device *dev, unsigned int op,
+ unsigned int addr, unsigned int data)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ int eepcr;
+ int state = EEPROM_CONTROL;
+ int bit_count = EEPROM_OP_LEN - 1;
+ unsigned int addr_len;
+
+ addr_len = (ks->eeprom_size == 128) ? 6 : 8;
+
+ switch (op) {
+ case EEPROM_OP_EWEN:
+ addr = 0x30;
+ break;
+ case EEPROM_OP_EWDS:
+ addr = 0;
+ break;
+ }
+
+ /* start transaction: chip select high, authorize write */
+ mutex_lock(&ks->lock);
+ eepcr = EEPCR_EESA | EEPCR_EESRWA;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ eepcr |= EEPCR_EECS;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ while (state != EEPROM_COMPLETE) {
+ /* falling clock period starts... */
+ /* set EED_IO pin for control and address */
+ eepcr &= ~EEPCR_EEDO;
+ switch (state) {
+ case EEPROM_CONTROL:
+ eepcr |= ((op >> bit_count) & 1) << 2;
+ if (bit_count-- <= 0) {
+ bit_count = addr_len - 1;
+ state = EEPROM_ADDRESS;
+ }
+ break;
+ case EEPROM_ADDRESS:
+ eepcr |= ((addr >> bit_count) & 1) << 2;
+ if (bit_count-- <= 0) {
+ if (op == EEPROM_OP_WRITE) {
+ bit_count = EEPROM_DATA_LEN - 1;
+ state = EEPROM_DATA;
+ } else {
+ state = EEPROM_COMPLETE;
+ }
+ }
+ break;
+ case EEPROM_DATA:
+ eepcr |= ((data >> bit_count) & 1) << 2;
+ if (bit_count-- <= 0)
+ state = EEPROM_COMPLETE;
+ break;
+ }
+
+ /* lower clock */
+ eepcr &= ~EEPCR_EESCK;
+
+ mutex_lock(&ks->lock);
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ /* wait period / 2 */
+ udelay(EEPROM_SK_PERIOD / 2);
+
+ /* rising clock period starts... */
+
+ /* raise clock */
+ eepcr |= EEPCR_EESCK;
+ mutex_lock(&ks->lock);
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ /* wait period / 2 */
+ udelay(EEPROM_SK_PERIOD / 2);
+ }
+
+ /* close transaction */
+ mutex_lock(&ks->lock);
+ eepcr &= ~EEPCR_EECS;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ eepcr = 0;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+}
+
/* ethtool support */
static void ks8851_get_drvinfo(struct net_device *dev,
@@ -1086,6 +1308,117 @@ static int ks8851_nway_reset(struct net_device *dev)
return mii_nway_restart(&ks->mii);
}
+static int ks8851_get_eeprom_len(struct net_device *dev)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ return ks->eeprom_size;
+}
+
+static int ks8851_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ u16 *eeprom_buff;
+ int first_word;
+ int last_word;
+ int ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ if (eeprom->len > ks->eeprom_size)
+ return -EINVAL;
+
+ eeprom->magic = ks8851_rdreg16(ks, KS_CIDER);
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+ eeprom_buff = kmalloc(sizeof(u16) *
+ (last_word - first_word + 1), GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ for (i = 0; i < last_word - first_word + 1; i++)
+ eeprom_buff[i] = ks8851_eeprom_read(dev, first_word + 1);
+
+ /* Device's eeprom is little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
+ kfree(eeprom_buff);
+
+ return ret_val;
+}
+
+static int ks8851_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ u16 *eeprom_buff;
+ void *ptr;
+ int max_len;
+ int first_word;
+ int last_word;
+ int ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EOPNOTSUPP;
+
+ if (eeprom->len > ks->eeprom_size)
+ return -EINVAL;
+
+ if (eeprom->magic != ks8851_rdreg16(ks, KS_CIDER))
+ return -EFAULT;
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ max_len = (last_word - first_word + 1) * 2;
+ eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ ptr = (void *)eeprom_buff;
+
+ if (eeprom->offset & 1) {
+ /* need read/modify/write of first changed EEPROM word */
+ /* only the second byte of the word is being modified */
+ eeprom_buff[0] = ks8851_eeprom_read(dev, first_word);
+ ptr++;
+ }
+ if ((eeprom->offset + eeprom->len) & 1)
+ /* need read/modify/write of last changed EEPROM word */
+ /* only the first byte of the word is being modified */
+ eeprom_buff[last_word - first_word] =
+ ks8851_eeprom_read(dev, last_word);
+
+
+ /* Device's eeprom is little-endian, word addressable */
+ le16_to_cpus(&eeprom_buff[0]);
+ le16_to_cpus(&eeprom_buff[last_word - first_word]);
+
+ memcpy(ptr, bytes, eeprom->len);
+
+ for (i = 0; i < last_word - first_word + 1; i++)
+ eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+
+ ks8851_eeprom_write(dev, EEPROM_OP_EWEN, 0, 0);
+
+ for (i = 0; i < last_word - first_word + 1; i++) {
+ ks8851_eeprom_write(dev, EEPROM_OP_WRITE, first_word + i,
+ eeprom_buff[i]);
+ mdelay(EEPROM_WRITE_TIME);
+ }
+
+ ks8851_eeprom_write(dev, EEPROM_OP_EWDS, 0, 0);
+
+ kfree(eeprom_buff);
+ return ret_val;
+}
+
static const struct ethtool_ops ks8851_ethtool_ops = {
.get_drvinfo = ks8851_get_drvinfo,
.get_msglevel = ks8851_get_msglevel,
@@ -1094,6 +1427,9 @@ static const struct ethtool_ops ks8851_ethtool_ops = {
.set_settings = ks8851_set_settings,
.get_link = ks8851_get_link,
.nway_reset = ks8851_nway_reset,
+ .get_eeprom_len = ks8851_get_eeprom_len,
+ .get_eeprom = ks8851_get_eeprom,
+ .set_eeprom = ks8851_set_eeprom,
};
/* MII interface controls */
@@ -1187,17 +1523,17 @@ static int ks8851_read_selftest(struct ks8851_net *ks)
rd = ks8851_rdreg16(ks, KS_MBIR);
if ((rd & both_done) != both_done) {
- ks_warn(ks, "Memory selftest not finished\n");
+ netdev_warn(ks->netdev, "Memory selftest not finished\n");
return 0;
}
if (rd & MBIR_TXMBFA) {
- ks_err(ks, "TX memory selftest fail\n");
+ netdev_err(ks->netdev, "TX memory selftest fail\n");
ret |= 1;
}
if (rd & MBIR_RXMBFA) {
- ks_err(ks, "RX memory selftest fail\n");
+ netdev_err(ks->netdev, "RX memory selftest fail\n");
ret |= 2;
}
@@ -1279,6 +1615,14 @@ static int __devinit ks8851_probe(struct spi_device *spi)
goto err_id;
}
+ /* cache the contents of the CCR register for EEPROM, etc. */
+ ks->rc_ccr = ks8851_rdreg16(ks, KS_CCR);
+
+ if (ks->rc_ccr & CCR_EEPROM)
+ ks->eeprom_size = 128;
+ else
+ ks->eeprom_size = 0;
+
ks8851_read_selftest(ks);
ks8851_init_mac(ks);
@@ -1295,9 +1639,9 @@ static int __devinit ks8851_probe(struct spi_device *spi)
goto err_netdev;
}
- dev_info(&spi->dev, "revision %d, MAC %pM, IRQ %d\n",
- CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)),
- ndev->dev_addr, ndev->irq);
+ netdev_info(ndev, "revision %d, MAC %pM, IRQ %d\n",
+ CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)),
+ ndev->dev_addr, ndev->irq);
return 0;
@@ -1316,7 +1660,7 @@ static int __devexit ks8851_remove(struct spi_device *spi)
struct ks8851_net *priv = dev_get_drvdata(&spi->dev);
if (netif_msg_drv(priv))
- dev_info(&spi->dev, "remove");
+ dev_info(&spi->dev, "remove\n");
unregister_netdev(priv->netdev);
free_irq(spi->irq, priv);
diff --git a/drivers/net/ks8851.h b/drivers/net/ks8851.h
index f52c312cc35..537fb06e593 100644
--- a/drivers/net/ks8851.h
+++ b/drivers/net/ks8851.h
@@ -25,12 +25,24 @@
#define OBCR_ODS_16mA (1 << 6)
#define KS_EEPCR 0x22
+#define EEPCR_EESRWA (1 << 5)
#define EEPCR_EESA (1 << 4)
-#define EEPCR_EESB (1 << 3)
+#define EEPCR_EESB_OFFSET 3
+#define EEPCR_EESB (1 << EEPCR_EESB_OFFSET)
#define EEPCR_EEDO (1 << 2)
#define EEPCR_EESCK (1 << 1)
#define EEPCR_EECS (1 << 0)
+#define EEPROM_OP_LEN 3 /* bits:*/
+#define EEPROM_OP_READ 0x06
+#define EEPROM_OP_EWEN 0x04
+#define EEPROM_OP_WRITE 0x05
+#define EEPROM_OP_EWDS 0x14
+
+#define EEPROM_DATA_LEN 16 /* 16 bits EEPROM */
+#define EEPROM_WRITE_TIME 4 /* wrt ack time in ms */
+#define EEPROM_SK_PERIOD 400 /* in us */
+
#define KS_MBIR 0x24
#define MBIR_TXMBF (1 << 12)
#define MBIR_TXMBFA (1 << 11)
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c
index 6354ab3a45a..2e2c69b2406 100644
--- a/drivers/net/ks8851_mll.c
+++ b/drivers/net/ks8851_mll.c
@@ -21,6 +21,8 @@
* KS8851 16bit MLL chip from Micrel Inc.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
@@ -361,7 +363,6 @@ static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
#define MAX_MCAST_LST 32
#define HW_MCAST_SIZE 8
-#define MAC_ADDR_LEN 6
/**
* union ks_tx_hdr - tx header data
@@ -449,7 +450,7 @@ struct ks_net {
u16 promiscuous;
u16 all_mcast;
u16 mcast_lst_size;
- u8 mcast_lst[MAX_MCAST_LST][MAC_ADDR_LEN];
+ u8 mcast_lst[MAX_MCAST_LST][ETH_ALEN];
u8 mcast_bits[HW_MCAST_SIZE];
u8 mac_addr[6];
u8 fid;
@@ -459,11 +460,6 @@ struct ks_net {
static int msg_enable;
-#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg)
-#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg)
-#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg)
-#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg)
-
#define BE3 0x8000 /* Byte Enable 3 */
#define BE2 0x4000 /* Byte Enable 2 */
#define BE1 0x2000 /* Byte Enable 1 */
@@ -625,8 +621,7 @@ static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
{
unsigned pmecr;
- if (netif_msg_hw(ks))
- ks_dbg(ks, "setting power mode %d\n", pwrmode);
+ netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
ks_rdreg16(ks, KS_GRR);
pmecr = ks_rdreg16(ks, KS_PMECR);
@@ -806,11 +801,10 @@ static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
/* read data block including CRC 4 bytes */
ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
skb_put(skb, frame_hdr->len);
- skb->dev = netdev;
skb->protocol = eth_type_trans(skb, netdev);
netif_rx(skb);
} else {
- printk(KERN_ERR "%s: err:skb alloc\n", __func__);
+ pr_err("%s: err:skb alloc\n", __func__);
ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
if (skb)
dev_kfree_skb_irq(skb);
@@ -837,9 +831,8 @@ static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
netif_carrier_off(netdev);
link_up_status = false;
}
- if (netif_msg_link(ks))
- ks_dbg(ks, "%s: %s\n",
- __func__, link_up_status ? "UP" : "DOWN");
+ netif_dbg(ks, link, ks->netdev,
+ "%s: %s\n", __func__, link_up_status ? "UP" : "DOWN");
}
/**
@@ -909,15 +902,13 @@ static int ks_net_open(struct net_device *netdev)
* else at the moment.
*/
- if (netif_msg_ifup(ks))
- ks_dbg(ks, "%s - entry\n", __func__);
+ netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__);
/* reset the HW */
err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
if (err) {
- printk(KERN_ERR "Failed to request IRQ: %d: %d\n",
- ks->irq, err);
+ pr_err("Failed to request IRQ: %d: %d\n", ks->irq, err);
return err;
}
@@ -930,8 +921,7 @@ static int ks_net_open(struct net_device *netdev)
ks_enable_qmu(ks);
netif_start_queue(ks->netdev);
- if (netif_msg_ifup(ks))
- ks_dbg(ks, "network device %s up\n", netdev->name);
+ netif_dbg(ks, ifup, ks->netdev, "network device up\n");
return 0;
}
@@ -948,8 +938,7 @@ static int ks_net_stop(struct net_device *netdev)
{
struct ks_net *ks = netdev_priv(netdev);
- if (netif_msg_ifdown(ks))
- ks_info(ks, "%s: shutting down\n", netdev->name);
+ netif_info(ks, ifdown, netdev, "shutting down\n");
netif_stop_queue(netdev);
@@ -1181,7 +1170,7 @@ static void ks_set_mcast(struct ks_net *ks, u16 mcast)
static void ks_set_rx_mode(struct net_device *netdev)
{
struct ks_net *ks = netdev_priv(netdev);
- struct dev_mc_list *ptr;
+ struct netdev_hw_addr *ha;
/* Turn on/off promiscuous mode. */
if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
@@ -1198,13 +1187,12 @@ static void ks_set_rx_mode(struct net_device *netdev)
if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
int i = 0;
- netdev_for_each_mc_addr(ptr, netdev) {
- if (!(*ptr->dmi_addr & 1))
+ netdev_for_each_mc_addr(ha, netdev) {
+ if (!(*ha->addr & 1))
continue;
if (i >= MAX_MCAST_LST)
break;
- memcpy(ks->mcast_lst[i++], ptr->dmi_addr,
- MAC_ADDR_LEN);
+ memcpy(ks->mcast_lst[i++], ha->addr, ETH_ALEN);
}
ks->mcast_lst_size = (u8)i;
ks_set_grpaddr(ks);
@@ -1430,21 +1418,21 @@ static int ks_read_selftest(struct ks_net *ks)
rd = ks_rdreg16(ks, KS_MBIR);
if ((rd & both_done) != both_done) {
- ks_warn(ks, "Memory selftest not finished\n");
+ netdev_warn(ks->netdev, "Memory selftest not finished\n");
return 0;
}
if (rd & MBIR_TXMBFA) {
- ks_err(ks, "TX memory selftest fails\n");
+ netdev_err(ks->netdev, "TX memory selftest fails\n");
ret |= 1;
}
if (rd & MBIR_RXMBFA) {
- ks_err(ks, "RX memory selftest fails\n");
+ netdev_err(ks->netdev, "RX memory selftest fails\n");
ret |= 2;
}
- ks_info(ks, "the selftest passes\n");
+ netdev_info(ks->netdev, "the selftest passes\n");
return ret;
}
@@ -1515,7 +1503,7 @@ static int ks_hw_init(struct ks_net *ks)
ks->frame_head_info = (struct type_frame_head *) \
kmalloc(MHEADER_SIZE, GFP_KERNEL);
if (!ks->frame_head_info) {
- printk(KERN_ERR "Error: Fail to allocate frame memory\n");
+ pr_err("Error: Fail to allocate frame memory\n");
return false;
}
@@ -1581,7 +1569,7 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
ks->mii.mdio_read = ks_phy_read;
ks->mii.mdio_write = ks_phy_write;
- ks_info(ks, "message enable is %d\n", msg_enable);
+ netdev_info(netdev, "message enable is %d\n", msg_enable);
/* set the default message enable */
ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
NETIF_MSG_PROBE |
@@ -1590,13 +1578,13 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
/* simple check for a valid chip being connected to the bus */
if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
- ks_err(ks, "failed to read device ID\n");
+ netdev_err(netdev, "failed to read device ID\n");
err = -ENODEV;
goto err_register;
}
if (ks_read_selftest(ks)) {
- ks_err(ks, "failed to read device ID\n");
+ netdev_err(netdev, "failed to read device ID\n");
err = -ENODEV;
goto err_register;
}
@@ -1627,9 +1615,8 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
id = ks_rdreg16(ks, KS_CIDER);
- printk(KERN_INFO DRV_NAME
- " Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
- (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
+ netdev_info(netdev, "Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
+ (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
return 0;
err_register:
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
index 0606a1f359f..c80ca64277b 100644
--- a/drivers/net/ksz884x.c
+++ b/drivers/net/ksz884x.c
@@ -14,10 +14,11 @@
* GNU General Public License for more details.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
@@ -1484,11 +1485,6 @@ struct dev_priv {
int promiscuous;
};
-#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg)
-#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg)
-#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg)
-#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg)
-
#define DRV_NAME "KSZ884X PCI"
#define DEVICE_NAME "KSZ884x PCI"
#define DRV_VERSION "1.0.0"
@@ -3835,7 +3831,7 @@ static void ksz_check_desc_num(struct ksz_desc_info *info)
alloc >>= 1;
}
if (alloc != 1 || shift < MIN_DESC_SHIFT) {
- printk(KERN_ALERT "Hardware descriptor numbers not right!\n");
+ pr_alert("Hardware descriptor numbers not right!\n");
while (alloc) {
shift++;
alloc >>= 1;
@@ -4546,8 +4542,7 @@ static int ksz_alloc_mem(struct dev_info *adapter)
(((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
DESC_ALIGNMENT) * DESC_ALIGNMENT);
if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc))
- printk(KERN_ALERT
- "Hardware descriptor size not right!\n");
+ pr_alert("Hardware descriptor size not right!\n");
ksz_check_desc_num(&hw->rx_desc_info);
ksz_check_desc_num(&hw->tx_desc_info);
@@ -4689,7 +4684,7 @@ static void send_packet(struct sk_buff *skb, struct net_device *dev)
int frag;
skb_frag_t *this_frag;
- dma_buf->len = skb->len - skb->data_len;
+ dma_buf->len = skb_headlen(skb);
dma_buf->dma = pci_map_single(
hw_priv->pdev, skb->data, dma_buf->len,
@@ -5049,8 +5044,6 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
dma_buf->skb->data, packet_len);
} while (0);
- skb->dev = dev;
-
skb->protocol = eth_type_trans(skb, dev);
if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP))
@@ -5061,8 +5054,6 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
priv->stats.rx_bytes += packet_len;
/* Notify upper layer for received packet. */
- dev->last_rx = jiffies;
-
rx_status = netif_rx(skb);
return 0;
@@ -5320,10 +5311,10 @@ static irqreturn_t netdev_intr(int irq, void *dev_id)
u32 data;
hw->intr_mask &= ~KS884X_INT_TX_STOPPED;
- printk(KERN_INFO "Tx stopped\n");
+ pr_info("Tx stopped\n");
data = readl(hw->io + KS_DMA_TX_CTRL);
if (!(data & DMA_TX_ENABLE))
- printk(KERN_INFO "Tx disabled\n");
+ pr_info("Tx disabled\n");
break;
}
} while (0);
@@ -5496,6 +5487,18 @@ static int prepare_hardware(struct net_device *dev)
return 0;
}
+static void set_media_state(struct net_device *dev, int media_state)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+
+ if (media_state == priv->media_state)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+ netif_info(priv, link, dev, "link %s\n",
+ media_state == priv->media_state ? "on" : "off");
+}
+
/**
* netdev_open - open network device
* @dev: Network device.
@@ -5585,15 +5588,7 @@ static int netdev_open(struct net_device *dev)
priv->media_state = port->linked->state;
- if (media_connected == priv->media_state)
- netif_carrier_on(dev);
- else
- netif_carrier_off(dev);
- if (netif_msg_link(priv))
- printk(KERN_INFO "%s link %s\n", dev->name,
- (media_connected == priv->media_state ?
- "on" : "off"));
-
+ set_media_state(dev, media_connected);
netif_start_queue(dev);
return 0;
@@ -5767,7 +5762,7 @@ static void netdev_set_rx_mode(struct net_device *dev)
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
int multicast = (dev->flags & IFF_ALLMULTI);
dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC));
@@ -5784,7 +5779,7 @@ static void netdev_set_rx_mode(struct net_device *dev)
int i = 0;
/* List too big to support so turn on all multicast mode. */
- if (dev->mc_count > MAX_MULTICAST_LIST) {
+ if (netdev_mc_count(dev) > MAX_MULTICAST_LIST) {
if (MAX_MULTICAST_LIST != hw->multi_list_size) {
hw->multi_list_size = MAX_MULTICAST_LIST;
++hw->all_multi;
@@ -5793,13 +5788,12 @@ static void netdev_set_rx_mode(struct net_device *dev)
return;
}
- netdev_for_each_mc_addr(mc_ptr, dev) {
- if (!(*mc_ptr->dmi_addr & 1))
+ netdev_for_each_mc_addr(ha, dev) {
+ if (!(*ha->addr & 1))
continue;
if (i >= MAX_MULTICAST_LIST)
break;
- memcpy(hw->multi_list[i++], mc_ptr->dmi_addr,
- MAC_ADDR_LEN);
+ memcpy(hw->multi_list[i++], ha->addr, MAC_ADDR_LEN);
}
hw->multi_list_size = (u8) i;
hw_set_grp_addr(hw);
@@ -6683,16 +6677,8 @@ static void update_link(struct net_device *dev, struct dev_priv *priv,
{
if (priv->media_state != port->linked->state) {
priv->media_state = port->linked->state;
- if (netif_running(dev)) {
- if (media_connected == priv->media_state)
- netif_carrier_on(dev);
- else
- netif_carrier_off(dev);
- if (netif_msg_link(priv))
- printk(KERN_INFO "%s link %s\n", dev->name,
- (media_connected == priv->media_state ?
- "on" : "off"));
- }
+ if (netif_running(dev))
+ set_media_state(dev, media_connected);
}
}
@@ -6986,7 +6972,7 @@ static int __init pcidev_init(struct pci_dev *pdev,
int pi;
int port_count;
int result;
- char banner[80];
+ char banner[sizeof(version)];
struct ksz_switch *sw = NULL;
result = pci_enable_device(pdev);
@@ -7010,10 +6996,9 @@ static int __init pcidev_init(struct pci_dev *pdev,
result = -ENOMEM;
- info = kmalloc(sizeof(struct platform_info), GFP_KERNEL);
+ info = kzalloc(sizeof(struct platform_info), GFP_KERNEL);
if (!info)
goto pcidev_init_dev_err;
- memset(info, 0, sizeof(struct platform_info));
hw_priv = &info->dev_info;
hw_priv->pdev = pdev;
@@ -7027,15 +7012,15 @@ static int __init pcidev_init(struct pci_dev *pdev,
cnt = hw_init(hw);
if (!cnt) {
if (msg_enable & NETIF_MSG_PROBE)
- printk(KERN_ALERT "chip not detected\n");
+ pr_alert("chip not detected\n");
result = -ENODEV;
goto pcidev_init_alloc_err;
}
- sprintf(banner, "%s\n", version);
- banner[13] = cnt + '0';
- ks_info(hw_priv, "%s", banner);
- ks_dbg(hw_priv, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
+ snprintf(banner, sizeof(banner), "%s", version);
+ banner[13] = cnt + '0'; /* Replace x in "Micrel KSZ884x" */
+ dev_info(&hw_priv->pdev->dev, "%s\n", banner);
+ dev_dbg(&hw_priv->pdev->dev, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
/* Assume device is KSZ8841. */
hw->dev_count = 1;
@@ -7064,10 +7049,9 @@ static int __init pcidev_init(struct pci_dev *pdev,
mib_port_count = SWITCH_PORT_NUM;
}
hw->mib_port_cnt = TOTAL_PORT_NUM;
- hw->ksz_switch = kmalloc(sizeof(struct ksz_switch), GFP_KERNEL);
+ hw->ksz_switch = kzalloc(sizeof(struct ksz_switch), GFP_KERNEL);
if (!hw->ksz_switch)
goto pcidev_init_alloc_err;
- memset(hw->ksz_switch, 0, sizeof(struct ksz_switch));
sw = hw->ksz_switch;
}
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index 7b9447646f8..21f8adaa87c 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -945,7 +945,7 @@ static void lance_tx_timeout (struct net_device *dev)
#endif
lance_restart (dev, 0x0043, 1);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue (dev);
}
@@ -1011,8 +1011,6 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
outw(0x0000, ioaddr+LANCE_ADDR);
outw(0x0048, ioaddr+LANCE_DATA);
- dev->trans_start = jiffies;
-
if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE)
netif_stop_queue(dev);
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index 973390b82ec..ce5d6e90921 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -963,7 +963,7 @@ static void i596_tx_timeout (struct net_device *dev)
lp->last_restart = dev->stats.tx_packets;
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue (dev);
}
@@ -974,7 +974,6 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct tx_cmd *tx_cmd;
struct i596_tbd *tbd;
short length = skb->len;
- dev->trans_start = jiffies;
DEB(DEB_STARTTX, printk(KERN_DEBUG
"%s: i596_start_xmit(%x,%p) called\n",
@@ -1092,7 +1091,7 @@ static int __devinit i82596_probe(struct net_device *dev)
DMA_FREE(dev->dev.parent, sizeof(struct i596_dma),
(void *)dma, lp->dma_addr);
return i;
- };
+ }
DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n",
dev->name, dev->base_addr, dev->dev_addr,
@@ -1388,7 +1387,7 @@ static void set_multicast_list(struct net_device *dev)
}
if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned char *cp;
struct mc_cmd *cmd;
@@ -1396,10 +1395,10 @@ static void set_multicast_list(struct net_device *dev)
cmd->cmd.command = SWAP16(CmdMulticastList);
cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6);
cp = cmd->mc_addrs;
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (!cnt--)
break;
- memcpy(cp, dmi->dmi_addr, 6);
+ memcpy(cp, ha->addr, 6);
if (i596_debug > 1)
DEB(DEB_MULTI,
printk(KERN_DEBUG
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index 56f66f48540..316bb70775b 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -257,7 +257,7 @@ static void __ei_tx_timeout(struct net_device *dev)
{
unsigned long e8390_base = dev->base_addr;
struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
- int txsr, isr, tickssofar = jiffies - dev->trans_start;
+ int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
unsigned long flags;
dev->stats.tx_errors++;
@@ -386,7 +386,6 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
{
ei_local->txing = 1;
NS8390_trigger_send(dev, send_length, output_page);
- dev->trans_start = jiffies;
if (output_page == ei_local->tx_start_page)
{
ei_local->tx1 = -1;
@@ -445,14 +444,14 @@ static irqreturn_t __ei_interrupt(int irq, void *dev_id)
if (ei_local->irqlock)
{
-#if 1 /* This might just be an interrupt for a PCI device sharing this line */
- /* The "irqlock" check is only for testing. */
- printk(ei_local->irqlock
- ? "%s: Interrupted while interrupts are masked! isr=%#2x imr=%#2x.\n"
- : "%s: Reentering the interrupt handler! isr=%#2x imr=%#2x.\n",
+ /*
+ * This might just be an interrupt for a PCI device sharing
+ * this line
+ */
+ printk("%s: Interrupted while interrupts are masked!"
+ " isr=%#2x imr=%#2x.\n",
dev->name, ei_inb_p(e8390_base + EN0_ISR),
ei_inb_p(e8390_base + EN0_IMR));
-#endif
spin_unlock(&ei_local->page_lock);
return IRQ_NONE;
}
@@ -792,7 +791,6 @@ static void ei_receive(struct net_device *dev)
/* We used to also ack ENISR_OVER here, but that would sometimes mask
a real overrun, leaving the 8390 in a stopped state with rec'vr off. */
ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
- return;
}
/**
@@ -905,10 +903,10 @@ static struct net_device_stats *__ei_get_stats(struct net_device *dev)
static inline void make_mc_bits(u8 *bits, struct net_device *dev)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(dmi, dev) {
- u32 crc = ether_crc(ETH_ALEN, dmi->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ u32 crc = ether_crc(ETH_ALEN, ha->addr);
/*
* The 8390 uses the 6 most significant bits of the
* CRC to index the multicast table.
diff --git a/drivers/net/ll_temac.h b/drivers/net/ll_temac.h
index 1af66a1e691..522abe2ff25 100644
--- a/drivers/net/ll_temac.h
+++ b/drivers/net/ll_temac.h
@@ -5,8 +5,11 @@
#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/spinlock.h>
+
+#ifdef CONFIG_PPC_DCR
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
+#endif
/* packet size info */
#define XTE_HDR_SIZE 14 /* size of Ethernet header */
@@ -290,11 +293,12 @@ This option defaults to enabled (set) */
#define TX_CONTROL_CALC_CSUM_MASK 1
-#define XTE_ALIGN 32
-#define BUFFER_ALIGN(adr) ((XTE_ALIGN - ((u32) adr)) % XTE_ALIGN)
-
#define MULTICAST_CAM_TABLE_NUM 4
+/* TEMAC Synthesis features */
+#define TEMAC_FEATURE_RX_CSUM (1 << 0)
+#define TEMAC_FEATURE_TX_CSUM (1 << 1)
+
/* TX/RX CURDESC_PTR points to first descriptor */
/* TX/RX TAILDESC_PTR points to last descriptor in linked list */
@@ -335,9 +339,15 @@ struct temac_local {
struct mii_bus *mii_bus; /* MII bus reference */
int mdio_irqs[PHY_MAX_ADDR]; /* IRQs table for MDIO bus */
- /* IO registers and IRQs */
+ /* IO registers, dma functions and IRQs */
void __iomem *regs;
+ void __iomem *sdma_regs;
+#ifdef CONFIG_PPC_DCR
dcr_host_t sdma_dcrs;
+#endif
+ u32 (*dma_in)(struct temac_local *, int);
+ void (*dma_out)(struct temac_local *, int, u32);
+
int tx_irq;
int rx_irq;
int emac_num;
@@ -347,6 +357,7 @@ struct temac_local {
struct mutex indirect_mutex;
u32 options; /* Current options word */
int last_link;
+ unsigned int temac_features;
/* Buffer descriptors */
struct cdmac_bd *tx_bd_v;
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index ba617e3cf1b..52dcc849564 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -20,9 +20,6 @@
* or rx, so this should be okay.
*
* TODO:
- * - Fix driver to work on more than just Virtex5. Right now the driver
- * assumes that the locallink DMA registers are accessed via DCR
- * instructions.
* - Factor out locallink DMA code into separate driver
* - Fix multicast assignment.
* - Fix support for hardware checksumming.
@@ -116,17 +113,86 @@ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
}
+/**
+ * temac_dma_in32 - Memory mapped DMA read, this function expects a
+ * register input that is based on DCR word addresses which
+ * are then converted to memory mapped byte addresses
+ */
static u32 temac_dma_in32(struct temac_local *lp, int reg)
{
- return dcr_read(lp->sdma_dcrs, reg);
+ return in_be32((u32 *)(lp->sdma_regs + (reg << 2)));
}
+/**
+ * temac_dma_out32 - Memory mapped DMA read, this function expects a
+ * register input that is based on DCR word addresses which
+ * are then converted to memory mapped byte addresses
+ */
static void temac_dma_out32(struct temac_local *lp, int reg, u32 value)
{
+ out_be32((u32 *)(lp->sdma_regs + (reg << 2)), value);
+}
+
+/* DMA register access functions can be DCR based or memory mapped.
+ * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both
+ * memory mapped.
+ */
+#ifdef CONFIG_PPC_DCR
+
+/**
+ * temac_dma_dcr_in32 - DCR based DMA read
+ */
+static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
+{
+ return dcr_read(lp->sdma_dcrs, reg);
+}
+
+/**
+ * temac_dma_dcr_out32 - DCR based DMA write
+ */
+static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
+{
dcr_write(lp->sdma_dcrs, reg, value);
}
/**
+ * temac_dcr_setup - If the DMA is DCR based, then setup the address and
+ * I/O functions
+ */
+static int temac_dcr_setup(struct temac_local *lp, struct of_device *op,
+ struct device_node *np)
+{
+ unsigned int dcrs;
+
+ /* setup the dcr address mapping if it's in the device tree */
+
+ dcrs = dcr_resource_start(np, 0);
+ if (dcrs != 0) {
+ lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
+ lp->dma_in = temac_dma_dcr_in;
+ lp->dma_out = temac_dma_dcr_out;
+ dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
+ return 0;
+ }
+ /* no DCR in the device tree, indicate a failure */
+ return -1;
+}
+
+#else
+
+/*
+ * temac_dcr_setup - This is a stub for when DCR is not supported,
+ * such as with MicroBlaze
+ */
+static int temac_dcr_setup(struct temac_local *lp, struct of_device *op,
+ struct device_node *np)
+{
+ return -1;
+}
+
+#endif
+
+/**
* temac_dma_bd_init - Setup buffer descriptor rings
*/
static int temac_dma_bd_init(struct net_device *ndev)
@@ -156,14 +222,14 @@ static int temac_dma_bd_init(struct net_device *ndev)
lp->rx_bd_v[i].next = lp->rx_bd_p +
sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
- skb = alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE
- + XTE_ALIGN, GFP_ATOMIC);
+ skb = netdev_alloc_skb_ip_align(ndev,
+ XTE_MAX_JUMBO_FRAME_SIZE);
+
if (skb == 0) {
dev_err(&ndev->dev, "alloc_skb error %d\n", i);
return -1;
}
lp->rx_skb[i] = skb;
- skb_reserve(skb, BUFFER_ALIGN(skb->data));
/* returns physical address of skb->data */
lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
skb->data,
@@ -173,23 +239,23 @@ static int temac_dma_bd_init(struct net_device *ndev)
lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND;
}
- temac_dma_out32(lp, TX_CHNL_CTRL, 0x10220400 |
+ lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 |
CHNL_CTRL_IRQ_EN |
CHNL_CTRL_IRQ_DLY_EN |
CHNL_CTRL_IRQ_COAL_EN);
/* 0x10220483 */
/* 0x00100483 */
- temac_dma_out32(lp, RX_CHNL_CTRL, 0xff010000 |
+ lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 |
CHNL_CTRL_IRQ_EN |
CHNL_CTRL_IRQ_DLY_EN |
CHNL_CTRL_IRQ_COAL_EN |
CHNL_CTRL_IRQ_IOE);
/* 0xff010283 */
- temac_dma_out32(lp, RX_CURDESC_PTR, lp->rx_bd_p);
- temac_dma_out32(lp, RX_TAILDESC_PTR,
+ lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
+ lp->dma_out(lp, RX_TAILDESC_PTR,
lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
- temac_dma_out32(lp, TX_CURDESC_PTR, lp->tx_bd_p);
+ lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
return 0;
}
@@ -251,20 +317,20 @@ static void temac_set_multicast_list(struct net_device *ndev)
temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
} else if (!netdev_mc_empty(ndev)) {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
i = 0;
- netdev_for_each_mc_addr(mclist, ndev) {
+ netdev_for_each_mc_addr(ha, ndev) {
if (i >= MULTICAST_CAM_TABLE_NUM)
break;
- multi_addr_msw = ((mclist->dmi_addr[3] << 24) |
- (mclist->dmi_addr[2] << 16) |
- (mclist->dmi_addr[1] << 8) |
- (mclist->dmi_addr[0]));
+ multi_addr_msw = ((ha->addr[3] << 24) |
+ (ha->addr[2] << 16) |
+ (ha->addr[1] << 8) |
+ (ha->addr[0]));
temac_indirect_out32(lp, XTE_MAW0_OFFSET,
multi_addr_msw);
- multi_addr_lsw = ((mclist->dmi_addr[5] << 8) |
- (mclist->dmi_addr[4]) | (i << 16));
+ multi_addr_lsw = ((ha->addr[5] << 8) |
+ (ha->addr[4]) | (i << 16));
temac_indirect_out32(lp, XTE_MAW1_OFFSET,
multi_addr_lsw);
i++;
@@ -427,9 +493,9 @@ static void temac_device_reset(struct net_device *ndev)
temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK);
/* Reset Local Link (DMA) */
- temac_dma_out32(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
+ lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
timeout = 1000;
- while (temac_dma_in32(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
+ while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
udelay(1);
if (--timeout == 0) {
dev_err(&ndev->dev,
@@ -437,7 +503,7 @@ static void temac_device_reset(struct net_device *ndev)
break;
}
}
- temac_dma_out32(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
+ lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
temac_dma_bd_init(ndev);
@@ -461,7 +527,7 @@ static void temac_device_reset(struct net_device *ndev)
dev_err(&ndev->dev, "Error setting TEMAC options\n");
/* Init Driver variable */
- ndev->trans_start = 0;
+ ndev->trans_start = jiffies; /* prevent tx timeout */
}
void temac_adjust_link(struct net_device *ndev)
@@ -508,6 +574,10 @@ static void temac_start_xmit_done(struct net_device *ndev)
if (cur_p->app4)
dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
cur_p->app0 = 0;
+ cur_p->app1 = 0;
+ cur_p->app2 = 0;
+ cur_p->app3 = 0;
+ cur_p->app4 = 0;
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += cur_p->len;
@@ -523,6 +593,29 @@ static void temac_start_xmit_done(struct net_device *ndev)
netif_wake_queue(ndev);
}
+static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
+{
+ struct cdmac_bd *cur_p;
+ int tail;
+
+ tail = lp->tx_bd_tail;
+ cur_p = &lp->tx_bd_v[tail];
+
+ do {
+ if (cur_p->app0)
+ return NETDEV_TX_BUSY;
+
+ tail++;
+ if (tail >= TX_BD_NUM)
+ tail = 0;
+
+ cur_p = &lp->tx_bd_v[tail];
+ num_frag--;
+ } while (num_frag >= 0);
+
+ return 0;
+}
+
static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
@@ -537,7 +630,7 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
- if (cur_p->app0 & STS_CTRL_APP0_CMPLT) {
+ if (temac_check_tx_bd_space(lp, num_frag)) {
if (!netif_queue_stopped(ndev)) {
netif_stop_queue(ndev);
return NETDEV_TX_BUSY;
@@ -547,29 +640,14 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->app0 = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- const struct iphdr *ip = ip_hdr(skb);
- int length = 0, start = 0, insert = 0;
-
- switch (ip->protocol) {
- case IPPROTO_TCP:
- start = sizeof(struct iphdr) + ETH_HLEN;
- insert = sizeof(struct iphdr) + ETH_HLEN + 16;
- length = ip->tot_len - sizeof(struct iphdr);
- break;
- case IPPROTO_UDP:
- start = sizeof(struct iphdr) + ETH_HLEN;
- insert = sizeof(struct iphdr) + ETH_HLEN + 6;
- length = ip->tot_len - sizeof(struct iphdr);
- break;
- default:
- break;
- }
- cur_p->app1 = ((start << 16) | insert);
- cur_p->app2 = csum_tcpudp_magic(ip->saddr, ip->daddr,
- length, ip->protocol, 0);
- skb->data[insert] = 0;
- skb->data[insert + 1] = 0;
+ unsigned int csum_start_off = skb_transport_offset(skb);
+ unsigned int csum_index_off = csum_start_off + skb->csum_offset;
+
+ cur_p->app0 |= 1; /* TX Checksum Enabled */
+ cur_p->app1 = (csum_start_off << 16) | csum_index_off;
+ cur_p->app2 = 0; /* initial checksum seed */
}
+
cur_p->app0 |= STS_CTRL_APP0_SOP;
cur_p->len = skb_headlen(skb);
cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len,
@@ -598,7 +676,7 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
lp->tx_bd_tail = 0;
/* Kick off the transfer */
- temac_dma_out32(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
+ lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
return NETDEV_TX_OK;
}
@@ -612,7 +690,6 @@ static void ll_temac_recv(struct net_device *ndev)
struct cdmac_bd *cur_p;
dma_addr_t tail_p;
int length;
- unsigned long skb_vaddr;
unsigned long flags;
spin_lock_irqsave(&lp->rx_lock, flags);
@@ -626,8 +703,7 @@ static void ll_temac_recv(struct net_device *ndev)
skb = lp->rx_skb[lp->rx_bd_ci];
length = cur_p->app4 & 0x3FFF;
- skb_vaddr = virt_to_bus(skb->data);
- dma_unmap_single(ndev->dev.parent, skb_vaddr, length,
+ dma_unmap_single(ndev->dev.parent, cur_p->phys, length,
DMA_FROM_DEVICE);
skb_put(skb, length);
@@ -635,21 +711,29 @@ static void ll_temac_recv(struct net_device *ndev)
skb->protocol = eth_type_trans(skb, ndev);
skb->ip_summed = CHECKSUM_NONE;
+ /* if we're doing rx csum offload, set it up */
+ if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
+ (skb->protocol == __constant_htons(ETH_P_IP)) &&
+ (skb->len > 64)) {
+
+ skb->csum = cur_p->app3 & 0xFFFF;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+
netif_rx(skb);
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += length;
- new_skb = alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE + XTE_ALIGN,
- GFP_ATOMIC);
+ new_skb = netdev_alloc_skb_ip_align(ndev,
+ XTE_MAX_JUMBO_FRAME_SIZE);
+
if (new_skb == 0) {
dev_err(&ndev->dev, "no memory for new sk_buff\n");
spin_unlock_irqrestore(&lp->rx_lock, flags);
return;
}
- skb_reserve(new_skb, BUFFER_ALIGN(new_skb->data));
-
cur_p->app0 = STS_CTRL_APP0_IRQONEND;
cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
XTE_MAX_JUMBO_FRAME_SIZE,
@@ -664,7 +748,7 @@ static void ll_temac_recv(struct net_device *ndev)
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
bdstat = cur_p->app0;
}
- temac_dma_out32(lp, RX_TAILDESC_PTR, tail_p);
+ lp->dma_out(lp, RX_TAILDESC_PTR, tail_p);
spin_unlock_irqrestore(&lp->rx_lock, flags);
}
@@ -675,8 +759,8 @@ static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
struct temac_local *lp = netdev_priv(ndev);
unsigned int status;
- status = temac_dma_in32(lp, TX_IRQ_REG);
- temac_dma_out32(lp, TX_IRQ_REG, status);
+ status = lp->dma_in(lp, TX_IRQ_REG);
+ lp->dma_out(lp, TX_IRQ_REG, status);
if (status & (IRQ_COAL | IRQ_DLY))
temac_start_xmit_done(lp->ndev);
@@ -693,8 +777,8 @@ static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
unsigned int status;
/* Read and clear the status registers */
- status = temac_dma_in32(lp, RX_IRQ_REG);
- temac_dma_out32(lp, RX_IRQ_REG, status);
+ status = lp->dma_in(lp, RX_IRQ_REG);
+ lp->dma_out(lp, RX_IRQ_REG, status);
if (status & (IRQ_COAL | IRQ_DLY))
ll_temac_recv(lp->ndev);
@@ -795,7 +879,7 @@ static ssize_t temac_show_llink_regs(struct device *dev,
int i, len = 0;
for (i = 0; i < 0x11; i++)
- len += sprintf(buf + len, "%.8x%s", temac_dma_in32(lp, i),
+ len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i),
(i % 8) == 7 ? "\n" : " ");
len += sprintf(buf + len, "\n");
@@ -820,8 +904,8 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
struct temac_local *lp;
struct net_device *ndev;
const void *addr;
+ __be32 *p;
int size, rc = 0;
- unsigned int dcrs;
/* Init network device structure */
ndev = alloc_etherdev(sizeof(*lp));
@@ -858,30 +942,49 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
mutex_init(&lp->indirect_mutex);
/* map device registers */
- lp->regs = of_iomap(op->node, 0);
+ lp->regs = of_iomap(op->dev.of_node, 0);
if (!lp->regs) {
dev_err(&op->dev, "could not map temac regs.\n");
goto nodev;
}
+ /* Setup checksum offload, but default to off if not specified */
+ lp->temac_features = 0;
+ p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
+ if (p && be32_to_cpu(*p)) {
+ lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
+ /* Can checksum TCP/UDP over IPv4. */
+ ndev->features |= NETIF_F_IP_CSUM;
+ }
+ p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
+ if (p && be32_to_cpu(*p))
+ lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
+
/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
- np = of_parse_phandle(op->node, "llink-connected", 0);
+ np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
if (!np) {
dev_err(&op->dev, "could not find DMA node\n");
goto nodev;
}
- dcrs = dcr_resource_start(np, 0);
- if (dcrs == 0) {
- dev_err(&op->dev, "could not get DMA register address\n");
- goto nodev;
+ /* Setup the DMA register accesses, could be DCR or memory mapped */
+ if (temac_dcr_setup(lp, op, np)) {
+
+ /* no DCR in the device tree, try non-DCR */
+ lp->sdma_regs = of_iomap(np, 0);
+ if (lp->sdma_regs) {
+ lp->dma_in = temac_dma_in32;
+ lp->dma_out = temac_dma_out32;
+ dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs);
+ } else {
+ dev_err(&op->dev, "unable to map DMA registers\n");
+ goto nodev;
+ }
}
- lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
- dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
lp->rx_irq = irq_of_parse_and_map(np, 0);
lp->tx_irq = irq_of_parse_and_map(np, 1);
- if (!lp->rx_irq || !lp->tx_irq) {
+ if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
dev_err(&op->dev, "could not determine irqs\n");
rc = -ENOMEM;
goto nodev;
@@ -890,7 +993,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
of_node_put(np); /* Finished with the DMA node; drop the reference */
/* Retrieve the MAC address */
- addr = of_get_property(op->node, "local-mac-address", &size);
+ addr = of_get_property(op->dev.of_node, "local-mac-address", &size);
if ((!addr) || (size != 6)) {
dev_err(&op->dev, "could not find MAC address\n");
rc = -ENODEV;
@@ -898,11 +1001,11 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
}
temac_set_mac_address(ndev, (void *)addr);
- rc = temac_mdio_setup(lp, op->node);
+ rc = temac_mdio_setup(lp, op->dev.of_node);
if (rc)
dev_warn(&op->dev, "error registering MDIO bus\n");
- lp->phy_node = of_parse_phandle(op->node, "phy-handle", 0);
+ lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
if (lp->phy_node)
dev_dbg(lp->dev, "using PHY node %s (%p)\n", np->full_name, np);
@@ -955,12 +1058,12 @@ static struct of_device_id temac_of_match[] __devinitdata = {
MODULE_DEVICE_TABLE(of, temac_of_match);
static struct of_platform_driver temac_of_driver = {
- .match_table = temac_of_match,
.probe = temac_of_probe,
.remove = __devexit_p(temac_of_remove),
.driver = {
.owner = THIS_MODULE,
.name = "xilinx_temac",
+ .of_match_table = temac_of_match,
},
};
diff --git a/drivers/net/lne390.c b/drivers/net/lne390.c
index 41cbaaef065..8a1097cf8a8 100644
--- a/drivers/net/lne390.c
+++ b/drivers/net/lne390.c
@@ -307,8 +307,6 @@ static void lne390_reset_8390(struct net_device *dev)
ei_status.txing = 0;
outb(0x01, ioaddr + LNE390_RESET_PORT);
if (ei_debug > 1) printk("reset done\n");
-
- return;
}
/*
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index 3e3cc04defd..3df046a58b1 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -875,8 +875,6 @@ static netdev_tx_t i596_start_xmit (struct sk_buff *skb, struct net_device *dev)
length = ETH_ZLEN;
}
- dev->trans_start = jiffies;
-
tx_cmd = kmalloc((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC);
if (tx_cmd == NULL) {
printk(KERN_WARNING "%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name);
@@ -1256,7 +1254,7 @@ static void set_multicast_list(struct net_device *dev) {
dev->name, netdev_mc_count(dev));
if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *cp;
cmd = kmalloc(sizeof(struct i596_cmd) + 2 +
netdev_mc_count(dev) * 6, GFP_ATOMIC);
@@ -1267,8 +1265,8 @@ static void set_multicast_list(struct net_device *dev) {
cmd->command = CmdMulticastList;
*((unsigned short *) (cmd + 1)) = netdev_mc_count(dev) * 6;
cp = ((char *)(cmd + 1))+2;
- netdev_for_each_mc_addr(dmi, dev) {
- memcpy(cp, dmi->dmi_addr, 6);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(cp, ha->addr, 6);
cp += 6;
}
if (i596_debug & LOG_SRCDST)
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index c8e68fde066..1136c9a22b6 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -661,7 +661,6 @@ static void mac8390_no_reset(struct net_device *dev)
ei_status.txing = 0;
if (ei_debug > 1)
pr_info("reset not supported\n");
- return;
}
static void interlan_reset(struct net_device *dev)
@@ -673,7 +672,6 @@ static void interlan_reset(struct net_device *dev)
target[0xC0000] = 0;
if (ei_debug > 1)
pr_cont("reset complete\n");
- return;
}
/* dayna_memcpy_fromio/dayna_memcpy_toio */
diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c
index c0876e915ee..69fa4ef64dd 100644
--- a/drivers/net/mac89x0.c
+++ b/drivers/net/mac89x0.c
@@ -408,7 +408,6 @@ net_send_packet(struct sk_buff *skb, struct net_device *dev)
skb->len+1);
local_irq_restore(flags);
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
return NETDEV_TX_OK;
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index c8a18a6203c..40797fbdca9 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -666,8 +666,6 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&bp->lock, flags);
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -793,6 +791,7 @@ static void macb_init_hw(struct macb *bp)
config = macb_readl(bp, NCFGR) & MACB_BF(CLK, -1L);
config |= MACB_BIT(PAE); /* PAuse Enable */
config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
+ config |= MACB_BIT(BIG); /* Receive oversized frames */
if (bp->dev->flags & IFF_PROMISC)
config |= MACB_BIT(CAF); /* Copy All Frames */
if (!(bp->dev->flags & IFF_BROADCAST))
@@ -882,15 +881,15 @@ static int hash_get_index(__u8 *addr)
*/
static void macb_sethashtable(struct net_device *dev)
{
- struct dev_mc_list *curr;
+ struct netdev_hw_addr *ha;
unsigned long mc_filter[2];
unsigned int bitnr;
struct macb *bp = netdev_priv(dev);
mc_filter[0] = mc_filter[1] = 0;
- netdev_for_each_mc_addr(curr, dev) {
- bitnr = hash_get_index(curr->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ bitnr = hash_get_index(ha->addr);
mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
}
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index 962c41d0c8d..b6855a6476f 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -599,7 +599,7 @@ static void mace_set_multicast(struct net_device *dev)
mp->maccc |= PROM;
} else {
unsigned char multicast_filter[8];
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
if (dev->flags & IFF_ALLMULTI) {
for (i = 0; i < 8; i++)
@@ -607,8 +607,8 @@ static void mace_set_multicast(struct net_device *dev)
} else {
for (i = 0; i < 8; i++)
multicast_filter[i] = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- crc = ether_crc_le(6, dmi->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr);
i = crc >> 26; /* bit number in multicast_filter */
multicast_filter[i >> 3] |= 1 << (i & 7);
}
diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c
index 52e9a51c4c4..c685a465687 100644
--- a/drivers/net/macmace.c
+++ b/drivers/net/macmace.c
@@ -488,7 +488,6 @@ static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -509,7 +508,7 @@ static void mace_set_multicast(struct net_device *dev)
mb->maccc |= PROM;
} else {
unsigned char multicast_filter[8];
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
if (dev->flags & IFF_ALLMULTI) {
for (i = 0; i < 8; i++) {
@@ -518,8 +517,8 @@ static void mace_set_multicast(struct net_device *dev)
} else {
for (i = 0; i < 8; i++)
multicast_filter[i] = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- crc = ether_crc_le(6, dmi->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr);
/* bit number in multicast_filter */
i = crc >> 26;
multicast_filter[i >> 3] |= 1 << (i & 7);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 40faa368b07..87e8d4cb405 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -145,19 +145,15 @@ static void macvlan_broadcast(struct sk_buff *skb,
}
/* called under rcu_read_lock() from netif_receive_skb */
-static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
+static struct sk_buff *macvlan_handle_frame(struct macvlan_port *port,
+ struct sk_buff *skb)
{
const struct ethhdr *eth = eth_hdr(skb);
- const struct macvlan_port *port;
const struct macvlan_dev *vlan;
const struct macvlan_dev *src;
struct net_device *dev;
unsigned int len;
- port = rcu_dereference(skb->dev->macvlan_port);
- if (port == NULL)
- return skb;
-
if (is_multicast_ether_addr(eth->h_dest)) {
src = macvlan_hash_lookup(port, eth->h_source);
if (!src)
@@ -243,7 +239,7 @@ netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
int ret;
ret = macvlan_queue_xmit(skb, dev);
- if (likely(ret == NET_XMIT_SUCCESS)) {
+ if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
txq->tx_packets++;
txq->tx_bytes += len;
} else
@@ -282,7 +278,7 @@ static int macvlan_open(struct net_device *dev)
if (macvlan_addr_busy(vlan->port, dev->dev_addr))
goto out;
- err = dev_unicast_add(lowerdev, dev->dev_addr);
+ err = dev_uc_add(lowerdev, dev->dev_addr);
if (err < 0)
goto out;
if (dev->flags & IFF_ALLMULTI) {
@@ -294,7 +290,7 @@ static int macvlan_open(struct net_device *dev)
return 0;
del_unicast:
- dev_unicast_delete(lowerdev, dev->dev_addr);
+ dev_uc_del(lowerdev, dev->dev_addr);
out:
return err;
}
@@ -308,7 +304,7 @@ static int macvlan_stop(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(lowerdev, -1);
- dev_unicast_delete(lowerdev, dev->dev_addr);
+ dev_uc_del(lowerdev, dev->dev_addr);
macvlan_hash_del(vlan);
return 0;
@@ -332,11 +328,11 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p)
if (macvlan_addr_busy(vlan->port, addr->sa_data))
return -EBUSY;
- err = dev_unicast_add(lowerdev, addr->sa_data);
+ err = dev_uc_add(lowerdev, addr->sa_data);
if (err)
return err;
- dev_unicast_delete(lowerdev, dev->dev_addr);
+ dev_uc_del(lowerdev, dev->dev_addr);
macvlan_hash_change_addr(vlan, addr->sa_data);
}
@@ -638,11 +634,18 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
err = register_netdevice(dev);
if (err < 0)
- return err;
+ goto destroy_port;
list_add_tail(&vlan->list, &port->vlans);
netif_stacked_transfer_operstate(lowerdev, dev);
+
return 0;
+
+destroy_port:
+ if (list_empty(&port->vlans))
+ macvlan_port_destroy(lowerdev);
+
+ return err;
}
EXPORT_SYMBOL_GPL(macvlan_common_newlink);
@@ -748,6 +751,9 @@ static int macvlan_device_event(struct notifier_block *unused,
list_for_each_entry_safe(vlan, next, &port->vlans, list)
vlan->dev->rtnl_link_ops->dellink(vlan->dev, NULL);
break;
+ case NETDEV_PRE_TYPE_CHANGE:
+ /* Forbid underlaying device to change its type. */
+ return NOTIFY_BAD;
}
return NOTIFY_DONE;
}
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index abba3cc81f1..a8a94e2f6dd 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -37,6 +37,8 @@
struct macvtap_queue {
struct sock sk;
struct socket sock;
+ struct socket_wq wq;
+ int vnet_hdr_sz;
struct macvlan_dev *vlan;
struct file *file;
unsigned int flags;
@@ -181,7 +183,7 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
return -ENOLINK;
skb_queue_tail(&q->sk.sk_receive_queue, skb);
- wake_up_interruptible_poll(q->sk.sk_sleep, POLLIN | POLLRDNORM | POLLRDBAND);
+ wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
return 0;
}
@@ -242,12 +244,15 @@ static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
static void macvtap_sock_write_space(struct sock *sk)
{
+ wait_queue_head_t *wqueue;
+
if (!sock_writeable(sk) ||
!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
return;
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_poll(sk->sk_sleep, POLLOUT | POLLWRNORM | POLLWRBAND);
+ wqueue = sk_sleep(sk);
+ if (wqueue && waitqueue_active(wqueue))
+ wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
}
static int macvtap_open(struct inode *inode, struct file *file)
@@ -272,7 +277,8 @@ static int macvtap_open(struct inode *inode, struct file *file)
if (!q)
goto out;
- init_waitqueue_head(&q->sock.wait);
+ q->sock.wq = &q->wq;
+ init_waitqueue_head(&q->wq.wait);
q->sock.type = SOCK_RAW;
q->sock.state = SS_CONNECTED;
q->sock.file = file;
@@ -280,6 +286,7 @@ static int macvtap_open(struct inode *inode, struct file *file)
sock_init_data(&q->sock, &q->sk);
q->sk.sk_write_space = macvtap_sock_write_space;
q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
+ q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
err = macvtap_set_queue(dev, file, q);
if (err)
@@ -308,7 +315,7 @@ static unsigned int macvtap_poll(struct file *file, poll_table * wait)
goto out;
mask = 0;
- poll_wait(file, &q->sock.wait, wait);
+ poll_wait(file, &q->wq.wait, wait);
if (!skb_queue_empty(&q->sk.sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
@@ -440,14 +447,14 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q,
int vnet_hdr_len = 0;
if (q->flags & IFF_VNET_HDR) {
- vnet_hdr_len = sizeof(vnet_hdr);
+ vnet_hdr_len = q->vnet_hdr_sz;
err = -EINVAL;
if ((len -= vnet_hdr_len) < 0)
goto err;
err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
- vnet_hdr_len);
+ sizeof(vnet_hdr));
if (err < 0)
goto err;
if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
@@ -529,7 +536,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
if (q->flags & IFF_VNET_HDR) {
struct virtio_net_hdr vnet_hdr;
- vnet_hdr_len = sizeof (vnet_hdr);
+ vnet_hdr_len = q->vnet_hdr_sz;
if ((len -= vnet_hdr_len) < 0)
return -EINVAL;
@@ -537,7 +544,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
if (ret)
return ret;
- if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, vnet_hdr_len))
+ if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
return -EFAULT;
}
@@ -562,7 +569,7 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
struct sk_buff *skb;
ssize_t ret = 0;
- add_wait_queue(q->sk.sk_sleep, &wait);
+ add_wait_queue(sk_sleep(&q->sk), &wait);
while (len) {
current->state = TASK_INTERRUPTIBLE;
@@ -587,7 +594,7 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
}
current->state = TASK_RUNNING;
- remove_wait_queue(q->sk.sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(&q->sk), &wait);
return ret;
}
@@ -622,6 +629,8 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
struct ifreq __user *ifr = argp;
unsigned int __user *up = argp;
unsigned int u;
+ int __user *sp = argp;
+ int s;
int ret;
switch (cmd) {
@@ -667,6 +676,21 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
q->sk.sk_sndbuf = u;
return 0;
+ case TUNGETVNETHDRSZ:
+ s = q->vnet_hdr_sz;
+ if (put_user(s, sp))
+ return -EFAULT;
+ return 0;
+
+ case TUNSETVNETHDRSZ:
+ if (get_user(s, sp))
+ return -EFAULT;
+ if (s < (int)sizeof(struct virtio_net_hdr))
+ return -EINVAL;
+
+ q->vnet_hdr_sz = s;
+ return 0;
+
case TUNSETOFFLOAD:
/* let the user check for future flags */
if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 9f72cb45f4a..42e3294671d 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -746,10 +746,8 @@ static void meth_tx_timeout(struct net_device *dev)
/* Enable interrupt */
spin_unlock_irqrestore(&priv->meth_lock, flags);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
-
- return;
}
/*
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c
index 86467b444ac..d5afd037cd7 100644
--- a/drivers/net/mlx4/en_ethtool.c
+++ b/drivers/net/mlx4/en_ethtool.c
@@ -140,8 +140,6 @@ static void mlx4_en_get_wol(struct net_device *netdev,
{
wol->supported = 0;
wol->wolopts = 0;
-
- return;
}
static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 73c3d20c645..96180c0ec20 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -161,39 +161,29 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
static void mlx4_en_clear_list(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct dev_mc_list *plist = priv->mc_list;
- struct dev_mc_list *next;
- while (plist) {
- next = plist->next;
- kfree(plist);
- plist = next;
- }
- priv->mc_list = NULL;
+ kfree(priv->mc_addrs);
+ priv->mc_addrs_cnt = 0;
}
static void mlx4_en_cache_mclist(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct dev_mc_list *mclist;
- struct dev_mc_list *tmp;
- struct dev_mc_list *plist = NULL;
-
- for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
- tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC);
- if (!tmp) {
- en_err(priv, "failed to allocate multicast list\n");
- mlx4_en_clear_list(dev);
- return;
- }
- memcpy(tmp, mclist, sizeof(struct dev_mc_list));
- tmp->next = NULL;
- if (plist)
- plist->next = tmp;
- else
- priv->mc_list = tmp;
- plist = tmp;
+ struct netdev_hw_addr *ha;
+ char *mc_addrs;
+ int mc_addrs_cnt = netdev_mc_count(dev);
+ int i;
+
+ mc_addrs = kmalloc(mc_addrs_cnt * ETH_ALEN, GFP_ATOMIC);
+ if (!mc_addrs) {
+ en_err(priv, "failed to allocate multicast list\n");
+ return;
}
+ i = 0;
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
+ priv->mc_addrs = mc_addrs;
+ priv->mc_addrs_cnt = mc_addrs_cnt;
}
@@ -213,7 +203,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
mcast_task);
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
- struct dev_mc_list *mclist;
u64 mcast_addr = 0;
int err;
@@ -289,6 +278,8 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
if (err)
en_err(priv, "Failed disabling multicast filter\n");
} else {
+ int i;
+
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
0, MLX4_MCAST_DISABLE);
if (err)
@@ -303,8 +294,9 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
netif_tx_lock_bh(dev);
mlx4_en_cache_mclist(dev);
netif_tx_unlock_bh(dev);
- for (mclist = priv->mc_list; mclist; mclist = mclist->next) {
- mcast_addr = mlx4_en_mac_to_u64(mclist->dmi_addr);
+ for (i = 0; i < priv->mc_addrs_cnt; i++) {
+ mcast_addr =
+ mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN);
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
mcast_addr, 0, MLX4_MCAST_CONFIG);
}
@@ -512,7 +504,7 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
if (err)
- en_dbg(HW, priv, "Could not update stats \n");
+ en_dbg(HW, priv, "Could not update stats\n");
mutex_lock(&mdev->state_lock);
if (mdev->device_up) {
@@ -985,7 +977,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->flags = prof->flags;
priv->tx_ring_num = prof->tx_ring_num;
priv->rx_ring_num = prof->rx_ring_num;
- priv->mc_list = NULL;
priv->mac_index = -1;
priv->msg_enable = MLX4_EN_MSG_LEVEL;
spin_lock_init(&priv->stats_lock);
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 7365bf488b8..423053482ed 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -239,7 +239,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
eqe->type, eqe->subtype, eq->eqn, eq->cons_index);
break;
- };
+ }
++eq->cons_index;
eqes_found = 1;
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
index 57288ca1395..b07e4dee80a 100644
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/mlx4/icm.c
@@ -163,28 +163,30 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
cur_order, gfp_mask);
- if (!ret) {
- ++chunk->npages;
-
- if (coherent)
- ++chunk->nsg;
- else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
- chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
- chunk->npages,
- PCI_DMA_BIDIRECTIONAL);
+ if (ret) {
+ if (--cur_order < 0)
+ goto fail;
+ else
+ continue;
+ }
- if (chunk->nsg <= 0)
- goto fail;
+ ++chunk->npages;
- chunk = NULL;
- }
+ if (coherent)
+ ++chunk->nsg;
+ else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
+ chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
+ chunk->npages,
+ PCI_DMA_BIDIRECTIONAL);
- npages -= 1 << cur_order;
- } else {
- --cur_order;
- if (cur_order < 0)
+ if (chunk->nsg <= 0)
goto fail;
}
+
+ if (chunk->npages == MLX4_ICM_CHUNK_LEN)
+ chunk = NULL;
+
+ npages -= 1 << cur_order;
}
if (!coherent && chunk) {
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index bc72d6e4919..13343e88499 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -40,6 +40,7 @@
#include <linux/mutex.h>
#include <linux/radix-tree.h>
#include <linux/timer.h>
+#include <linux/semaphore.h>
#include <linux/workqueue.h>
#include <linux/mlx4/device.h>
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index 82c3ebc584e..b55e46c8b68 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -492,7 +492,8 @@ struct mlx4_en_priv {
struct mlx4_en_perf_stats pstats;
struct mlx4_en_pkt_stats pkstats;
struct mlx4_en_port_stats port_stats;
- struct dev_mc_list *mc_list;
+ char *mc_addrs;
+ int mc_addrs_cnt;
struct mlx4_en_stat_out_mbox hw_stats;
};
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 8613a52ddf1..e345ec8cb47 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -882,7 +882,6 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
txq->tx_bytes += skb->len;
txq->tx_packets++;
- dev->trans_start = jiffies;
entries_left = txq->tx_ring_size - txq->tx_desc_count;
if (entries_left < MAX_SKB_FRAGS + 1)
@@ -1770,7 +1769,7 @@ static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
struct mv643xx_eth_private *mp = netdev_priv(dev);
u32 *mc_spec;
u32 *mc_other;
- struct dev_addr_list *addr;
+ struct netdev_hw_addr *ha;
int i;
if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
@@ -1795,8 +1794,8 @@ oom:
memset(mc_spec, 0, 0x100);
memset(mc_other, 0, 0x100);
- netdev_for_each_mc_addr(addr, dev) {
- u8 *a = addr->da_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ u8 *a = ha->addr;
u32 *table;
int entry;
@@ -2609,10 +2608,9 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
goto out;
ret = -ENOMEM;
- msp = kmalloc(sizeof(*msp), GFP_KERNEL);
+ msp = kzalloc(sizeof(*msp), GFP_KERNEL);
if (msp == NULL)
goto out;
- memset(msp, 0, sizeof(*msp));
msp->base = ioremap(res->start, res->end - res->start + 1);
if (msp->base == NULL)
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index ecde0876a78..e0b47cc8a86 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -110,15 +110,15 @@ MODULE_LICENSE("Dual BSD/GPL");
struct myri10ge_rx_buffer_state {
struct page *page;
int page_offset;
- DECLARE_PCI_UNMAP_ADDR(bus)
- DECLARE_PCI_UNMAP_LEN(len)
+ DEFINE_DMA_UNMAP_ADDR(bus);
+ DEFINE_DMA_UNMAP_LEN(len);
};
struct myri10ge_tx_buffer_state {
struct sk_buff *skb;
int last;
- DECLARE_PCI_UNMAP_ADDR(bus)
- DECLARE_PCI_UNMAP_LEN(len)
+ DEFINE_DMA_UNMAP_ADDR(bus);
+ DEFINE_DMA_UNMAP_LEN(len);
};
struct myri10ge_cmd {
@@ -1234,7 +1234,7 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
rx->info[idx].page_offset = rx->page_offset;
/* note that this is the address of the start of the
* page */
- pci_unmap_addr_set(&rx->info[idx], bus, rx->bus);
+ dma_unmap_addr_set(&rx->info[idx], bus, rx->bus);
rx->shadow[idx].addr_low =
htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset);
rx->shadow[idx].addr_high =
@@ -1266,7 +1266,7 @@ myri10ge_unmap_rx_page(struct pci_dev *pdev,
/* unmap the recvd page if we're the only or last user of it */
if (bytes >= MYRI10GE_ALLOC_SIZE / 2 ||
(info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) {
- pci_unmap_page(pdev, (pci_unmap_addr(info, bus)
+ pci_unmap_page(pdev, (dma_unmap_addr(info, bus)
& ~(MYRI10GE_ALLOC_SIZE - 1)),
MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
}
@@ -1373,21 +1373,21 @@ myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
tx->info[idx].last = 0;
}
tx->done++;
- len = pci_unmap_len(&tx->info[idx], len);
- pci_unmap_len_set(&tx->info[idx], len, 0);
+ len = dma_unmap_len(&tx->info[idx], len);
+ dma_unmap_len_set(&tx->info[idx], len, 0);
if (skb) {
ss->stats.tx_bytes += skb->len;
ss->stats.tx_packets++;
dev_kfree_skb_irq(skb);
if (len)
pci_unmap_single(pdev,
- pci_unmap_addr(&tx->info[idx],
+ dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
} else {
if (len)
pci_unmap_page(pdev,
- pci_unmap_addr(&tx->info[idx],
+ dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
}
@@ -2094,20 +2094,20 @@ static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
/* Mark as free */
tx->info[idx].skb = NULL;
tx->done++;
- len = pci_unmap_len(&tx->info[idx], len);
- pci_unmap_len_set(&tx->info[idx], len, 0);
+ len = dma_unmap_len(&tx->info[idx], len);
+ dma_unmap_len_set(&tx->info[idx], len, 0);
if (skb) {
ss->stats.tx_dropped++;
dev_kfree_skb_any(skb);
if (len)
pci_unmap_single(mgp->pdev,
- pci_unmap_addr(&tx->info[idx],
+ dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
} else {
if (len)
pci_unmap_page(mgp->pdev,
- pci_unmap_addr(&tx->info[idx],
+ dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
}
@@ -2757,12 +2757,12 @@ again:
}
/* map the skb for DMA */
- len = skb->len - skb->data_len;
+ len = skb_headlen(skb);
idx = tx->req & tx->mask;
tx->info[idx].skb = skb;
bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&tx->info[idx], bus, bus);
- pci_unmap_len_set(&tx->info[idx], len, len);
+ dma_unmap_addr_set(&tx->info[idx], bus, bus);
+ dma_unmap_len_set(&tx->info[idx], len, len);
frag_cnt = skb_shinfo(skb)->nr_frags;
frag_idx = 0;
@@ -2865,8 +2865,8 @@ again:
len = frag->size;
bus = pci_map_page(mgp->pdev, frag->page, frag->page_offset,
len, PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&tx->info[idx], bus, bus);
- pci_unmap_len_set(&tx->info[idx], len, len);
+ dma_unmap_addr_set(&tx->info[idx], bus, bus);
+ dma_unmap_len_set(&tx->info[idx], len, len);
}
(req - rdma_count)->rdma_count = rdma_count;
@@ -2903,19 +2903,19 @@ abort_linearize:
idx = tx->req & tx->mask;
tx->info[idx].skb = NULL;
do {
- len = pci_unmap_len(&tx->info[idx], len);
+ len = dma_unmap_len(&tx->info[idx], len);
if (len) {
if (tx->info[idx].skb != NULL)
pci_unmap_single(mgp->pdev,
- pci_unmap_addr(&tx->info[idx],
+ dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
else
pci_unmap_page(mgp->pdev,
- pci_unmap_addr(&tx->info[idx],
+ dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
- pci_unmap_len_set(&tx->info[idx], len, 0);
+ dma_unmap_len_set(&tx->info[idx], len, 0);
tx->info[idx].skb = NULL;
}
idx = (idx + 1) & tx->mask;
@@ -3002,7 +3002,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
{
struct myri10ge_priv *mgp = netdev_priv(dev);
struct myri10ge_cmd cmd;
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
__be32 data[2] = { 0, 0 };
int err;
@@ -3039,8 +3039,8 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
}
/* Walk the multicast list, and add each address */
- netdev_for_each_mc_addr(mc_list, dev) {
- memcpy(data, &mc_list->dmi_addr, 6);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(data, &ha->addr, 6);
cmd.data0 = ntohl(data[0]);
cmd.data1 = ntohl(data[1]);
err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
@@ -3048,7 +3048,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
if (err != 0) {
netdev_err(dev, "Failed MXGEFW_JOIN_MULTICAST_GROUP, error status:%d %pM\n",
- err, mc_list->dmi_addr);
+ err, ha->addr);
goto abort;
}
}
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index b72e749afdf..1a57c3da1f4 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -865,7 +865,7 @@ static inline void determine_reg_space_size(struct myri_eth *mp)
printk("myricom: AIEEE weird cpu version %04x assuming pre4.0\n",
mp->eeprom.cpuvers);
mp->reg_size = (3 * 128 * 1024) + 4096;
- };
+ }
}
#ifdef DEBUG_DETECT
@@ -928,7 +928,7 @@ static const struct net_device_ops myri_ops = {
static int __devinit myri_sbus_probe(struct of_device *op, const struct of_device_id *match)
{
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
static unsigned version_printed;
struct net_device *dev;
struct myri_eth *mp;
@@ -1161,8 +1161,11 @@ static const struct of_device_id myri_sbus_match[] = {
MODULE_DEVICE_TABLE(of, myri_sbus_match);
static struct of_platform_driver myri_sbus_driver = {
- .name = "myri",
- .match_table = myri_sbus_match,
+ .driver = {
+ .name = "myri",
+ .owner = THIS_MODULE,
+ .of_match_table = myri_sbus_match,
+ },
.probe = myri_sbus_probe,
.remove = __devexit_p(myri_sbus_remove),
};
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index e5203878324..2a17b503fea 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -1905,7 +1905,7 @@ static void ns_tx_timeout(struct net_device *dev)
spin_unlock_irq(&np->lock);
enable_irq(dev->irq);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
np->stats.tx_errors++;
netif_wake_queue(dev);
}
@@ -2119,8 +2119,6 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
}
spin_unlock_irqrestore(&np->lock, flags);
- dev->trans_start = jiffies;
-
if (netif_msg_tx_queued(np)) {
printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
dev->name, np->cur_tx, entry);
@@ -2493,12 +2491,12 @@ static void __set_rx_mode(struct net_device *dev)
rx_mode = RxFilterEnable | AcceptBroadcast
| AcceptAllMulticast | AcceptMyPhys;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int i;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- int b = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 23) & 0x1ff;
+ netdev_for_each_mc_addr(ha, dev) {
+ int b = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x1ff;
mc_filter[b/8] |= (1 << (b & 0x07));
}
rx_mode = RxFilterEnable | AcceptBroadcast
diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c
index 7bd6662d5b0..e0b0ef11f11 100644
--- a/drivers/net/ne-h8300.c
+++ b/drivers/net/ne-h8300.c
@@ -608,7 +608,6 @@ retry:
outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */
ei_status.dmaing &= ~0x01;
- return;
}
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index f4347f88b6f..b8e2923a1d6 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -785,7 +785,6 @@ retry:
outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
ei_status.dmaing &= ~0x01;
- return;
}
static int __init ne_drv_probe(struct platform_device *pdev)
diff --git a/drivers/net/ne2.c b/drivers/net/ne2.c
index ff3c4c81498..70cdc699634 100644
--- a/drivers/net/ne2.c
+++ b/drivers/net/ne2.c
@@ -730,7 +730,6 @@ retry:
outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
ei_status.dmaing &= ~0x01;
- return;
}
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index 85aec4f1013..3c333cb5d34 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -631,7 +631,6 @@ static void ne2k_pci_block_output(struct net_device *dev, int count,
outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
ei_status.dmaing &= ~0x01;
- return;
}
static void ne2k_pci_get_drvinfo(struct net_device *dev,
diff --git a/drivers/net/ne3210.c b/drivers/net/ne3210.c
index a00bbfb9aed..243ed2aee88 100644
--- a/drivers/net/ne3210.c
+++ b/drivers/net/ne3210.c
@@ -255,8 +255,6 @@ static void ne3210_reset_8390(struct net_device *dev)
ei_status.txing = 0;
outb(0x01, ioaddr + NE3210_RESET_PORT);
if (ei_debug > 1) printk("reset done\n");
-
- return;
}
/*
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index a361dea3557..ca142c47b2e 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -665,7 +665,8 @@ static int netconsole_netdev_event(struct notifier_block *this,
struct netconsole_target *nt;
struct net_device *dev = ptr;
- if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER))
+ if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER ||
+ event == NETDEV_BONDING_DESLAVE || event == NETDEV_GOING_DOWN))
goto done;
spin_lock_irqsave(&target_list_lock, flags);
@@ -677,19 +678,21 @@ static int netconsole_netdev_event(struct notifier_block *this,
strlcpy(nt->np.dev_name, dev->name, IFNAMSIZ);
break;
case NETDEV_UNREGISTER:
- if (!nt->enabled)
- break;
netpoll_cleanup(&nt->np);
+ /* Fall through */
+ case NETDEV_GOING_DOWN:
+ case NETDEV_BONDING_DESLAVE:
nt->enabled = 0;
- printk(KERN_INFO "netconsole: network logging stopped"
- ", interface %s unregistered\n",
- dev->name);
break;
}
}
netconsole_target_put(nt);
}
spin_unlock_irqrestore(&target_list_lock, flags);
+ if (event == NETDEV_UNREGISTER || event == NETDEV_BONDING_DESLAVE)
+ printk(KERN_INFO "netconsole: network logging stopped, "
+ "interface %s %s\n", dev->name,
+ event == NETDEV_UNREGISTER ? "unregistered" : "released slaves");
done:
return NOTIFY_DONE;
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index 64770298c4f..2e4b42175f3 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -126,7 +126,6 @@ netx_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
FIFO_PTR_FRAMENO(1) |
FIFO_PTR_FRAMELEN(len));
- ndev->trans_start = jiffies;
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += skb->len;
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 0f703838e21..ffa1b9ce1cc 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -95,6 +95,9 @@
#define ADDR_IN_WINDOW1(off) \
((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0
+#define ADDR_IN_RANGE(addr, low, high) \
+ (((addr) < (high)) && ((addr) >= (low)))
+
/*
* normalize a 64MB crb address to 32MB PCI window
* To use NETXEN_CRB_NORMALIZE, window _must_ be set to 1
@@ -420,7 +423,6 @@ struct status_desc {
} __attribute__ ((aligned(16)));
/* UNIFIED ROMIMAGE *************************/
-#define NX_UNI_FW_MIN_SIZE 0xc8000
#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0
#define NX_UNI_DIR_SECT_BOOTLD 0x6
#define NX_UNI_DIR_SECT_FW 0x7
@@ -1353,6 +1355,8 @@ int netxen_config_rss(struct netxen_adapter *adapter, int enable);
int netxen_config_ipaddr(struct netxen_adapter *adapter, u32 ip, int cmd);
int netxen_linkevent_request(struct netxen_adapter *adapter, int enable);
void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup);
+void netxen_pci_camqm_read_2M(struct netxen_adapter *, u64, u64 *);
+void netxen_pci_camqm_write_2M(struct netxen_adapter *, u64, u64);
int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu);
int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu);
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index f8499e56cbe..20f7c58bd09 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -632,6 +632,9 @@ static int netxen_nic_reg_test(struct net_device *dev)
if ((data_read & 0xffff) != adapter->pdev->vendor)
return 1;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ return 0;
+
data_written = (u32)0xa5a5a5a5;
NXWR32(adapter, CRB_SCRATCHPAD_TEST, data_written);
@@ -703,6 +706,11 @@ netxen_nic_get_ethtool_stats(struct net_device *dev,
}
}
+static u32 netxen_nic_get_tx_csum(struct net_device *dev)
+{
+ return dev->features & NETIF_F_IP_CSUM;
+}
+
static u32 netxen_nic_get_rx_csum(struct net_device *dev)
{
struct netxen_adapter *adapter = netdev_priv(dev);
@@ -909,6 +917,7 @@ const struct ethtool_ops netxen_nic_ethtool_ops = {
.set_ringparam = netxen_nic_set_ringparam,
.get_pauseparam = netxen_nic_get_pauseparam,
.set_pauseparam = netxen_nic_set_pauseparam,
+ .get_tx_csum = netxen_nic_get_tx_csum,
.set_tx_csum = ethtool_op_set_tx_csum,
.set_sg = ethtool_op_set_sg,
.get_tso = netxen_nic_get_tso,
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index 622e4c8be93..d8bd73d7e29 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -681,14 +681,8 @@ enum {
#define MIU_TEST_AGT_ADDR_HI (0x08)
#define MIU_TEST_AGT_WRDATA_LO (0x10)
#define MIU_TEST_AGT_WRDATA_HI (0x14)
-#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x20)
-#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x24)
-#define MIU_TEST_AGT_WRDATA(i) (0x10+(0x10*((i)>>1))+(4*((i)&1)))
#define MIU_TEST_AGT_RDDATA_LO (0x18)
#define MIU_TEST_AGT_RDDATA_HI (0x1c)
-#define MIU_TEST_AGT_RDDATA_UPPER_LO (0x28)
-#define MIU_TEST_AGT_RDDATA_UPPER_HI (0x2c)
-#define MIU_TEST_AGT_RDDATA(i) (0x18+(0x10*((i)>>1))+(4*((i)&1)))
#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
@@ -789,9 +783,7 @@ enum {
* for backward compability
*/
#define CRB_NIC_CAPABILITIES_HOST NETXEN_NIC_REG(0x1a8)
-#define CRB_NIC_CAPABILITIES_FW NETXEN_NIC_REG(0x1dc)
#define CRB_NIC_MSI_MODE_HOST NETXEN_NIC_REG(0x270)
-#define CRB_NIC_MSI_MODE_FW NETXEN_NIC_REG(0x274)
#define INTR_SCHEME_PERPORT 0x1
#define MSI_MODE_MULTIFUNC 0x1
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index b1cf46a0c48..5c496f8d7c4 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -32,7 +32,6 @@
#define MASK(n) ((1ULL<<(n))-1)
#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff))
#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff))
-#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
#define MS_WIN(addr) (addr & 0x0ffc0000)
#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
@@ -63,9 +62,6 @@ static inline void writeq(u64 val, void __iomem *addr)
}
#endif
-#define ADDR_IN_RANGE(addr, low, high) \
- (((addr) < (high)) && ((addr) >= (low)))
-
#define PCI_OFFSET_FIRST_RANGE(adapter, off) \
((adapter)->ahw.pci_base0 + (off))
#define PCI_OFFSET_SECOND_RANGE(adapter, off) \
@@ -538,7 +534,7 @@ netxen_nic_set_mcast_addr(struct netxen_adapter *adapter,
void netxen_p2_nic_set_multi(struct net_device *netdev)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u8 null_addr[6];
int i;
@@ -572,8 +568,8 @@ void netxen_p2_nic_set_multi(struct net_device *netdev)
netxen_nic_enable_mcast_filter(adapter);
i = 0;
- netdev_for_each_mc_addr(mc_ptr, netdev)
- netxen_nic_set_mcast_addr(adapter, i++, mc_ptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev)
+ netxen_nic_set_mcast_addr(adapter, i++, ha->addr);
/* Clear out remaining addresses */
while (i < adapter->max_mc_count)
@@ -681,7 +677,7 @@ static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
void netxen_p3_nic_set_multi(struct net_device *netdev)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
u32 mode = VPORT_MISS_MODE_DROP;
LIST_HEAD(del_list);
@@ -708,8 +704,8 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
}
if (!netdev_mc_empty(netdev)) {
- netdev_for_each_mc_addr(mc_ptr, netdev)
- nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr, &del_list);
+ netdev_for_each_mc_addr(ha, netdev)
+ nx_p3_nic_add_mac(adapter, ha->addr, &del_list);
}
send_fw_cmd:
@@ -1391,18 +1387,8 @@ netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter,
u64 addr, u32 *start)
{
u32 window;
- struct pci_dev *pdev = adapter->pdev;
- if ((addr & 0x00ff800) == 0xff800) {
- if (printk_ratelimit())
- dev_warn(&pdev->dev, "QM access not handled\n");
- return -EIO;
- }
-
- if (NX_IS_REVISION_P3P(adapter->ahw.revision_id))
- window = OCM_WIN_P3P(addr);
- else
- window = OCM_WIN(addr);
+ window = OCM_WIN(addr);
writel(window, adapter->ahw.ocm_win_crb);
/* read back to flush */
@@ -1419,7 +1405,7 @@ netxen_nic_pci_mem_access_direct(struct netxen_adapter *adapter, u64 off,
{
void __iomem *addr, *mem_ptr = NULL;
resource_size_t mem_base;
- int ret = -EIO;
+ int ret;
u32 start;
spin_lock(&adapter->ahw.mem_lock);
@@ -1428,20 +1414,23 @@ netxen_nic_pci_mem_access_direct(struct netxen_adapter *adapter, u64 off,
if (ret != 0)
goto unlock;
- addr = pci_base_offset(adapter, start);
- if (addr)
- goto noremap;
-
- mem_base = pci_resource_start(adapter->pdev, 0) + (start & PAGE_MASK);
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ addr = adapter->ahw.pci_base0 + start;
+ } else {
+ addr = pci_base_offset(adapter, start);
+ if (addr)
+ goto noremap;
+
+ mem_base = pci_resource_start(adapter->pdev, 0) +
+ (start & PAGE_MASK);
+ mem_ptr = ioremap(mem_base, PAGE_SIZE);
+ if (mem_ptr == NULL) {
+ ret = -EIO;
+ goto unlock;
+ }
- mem_ptr = ioremap(mem_base, PAGE_SIZE);
- if (mem_ptr == NULL) {
- ret = -EIO;
- goto unlock;
+ addr = mem_ptr + (start & (PAGE_SIZE-1));
}
-
- addr = mem_ptr + (start & (PAGE_SIZE - 1));
-
noremap:
if (op == 0) /* read */
*data = readq(addr);
@@ -1456,6 +1445,28 @@ unlock:
return ret;
}
+void
+netxen_pci_camqm_read_2M(struct netxen_adapter *adapter, u64 off, u64 *data)
+{
+ void __iomem *addr = adapter->ahw.pci_base0 +
+ NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM);
+
+ spin_lock(&adapter->ahw.mem_lock);
+ *data = readq(addr);
+ spin_unlock(&adapter->ahw.mem_lock);
+}
+
+void
+netxen_pci_camqm_write_2M(struct netxen_adapter *adapter, u64 off, u64 data)
+{
+ void __iomem *addr = adapter->ahw.pci_base0 +
+ NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM);
+
+ spin_lock(&adapter->ahw.mem_lock);
+ writeq(data, addr);
+ spin_unlock(&adapter->ahw.mem_lock);
+}
+
#define MAX_CTL_CHECK 1000
static int
@@ -1621,9 +1632,8 @@ static int
netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter,
u64 off, u64 data)
{
- int i, j, ret;
+ int j, ret;
u32 temp, off8;
- u64 stride;
void __iomem *mem_crb;
/* Only 64-bit aligned access */
@@ -1650,44 +1660,17 @@ netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter,
return -EIO;
correct:
- stride = NX_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
-
- off8 = off & ~(stride-1);
+ off8 = off & 0xfffffff8;
spin_lock(&adapter->ahw.mem_lock);
writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
- i = 0;
- if (stride == 16) {
- writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
- writel((TA_CTL_START | TA_CTL_ENABLE),
- (mem_crb + TEST_AGT_CTRL));
-
- for (j = 0; j < MAX_CTL_CHECK; j++) {
- temp = readl(mem_crb + TEST_AGT_CTRL);
- if ((temp & TA_CTL_BUSY) == 0)
- break;
- }
-
- if (j >= MAX_CTL_CHECK) {
- ret = -EIO;
- goto done;
- }
-
- i = (off & 0xf) ? 0 : 2;
- writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
- mem_crb + MIU_TEST_AGT_WRDATA(i));
- writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
- mem_crb + MIU_TEST_AGT_WRDATA(i+1));
- i = (off & 0xf) ? 2 : 0;
- }
-
writel(data & 0xffffffff,
- mem_crb + MIU_TEST_AGT_WRDATA(i));
+ mem_crb + MIU_TEST_AGT_WRDATA_LO);
writel((data >> 32) & 0xffffffff,
- mem_crb + MIU_TEST_AGT_WRDATA(i+1));
+ mem_crb + MIU_TEST_AGT_WRDATA_HI);
writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
@@ -1707,7 +1690,6 @@ correct:
} else
ret = 0;
-done:
spin_unlock(&adapter->ahw.mem_lock);
return ret;
@@ -1719,7 +1701,7 @@ netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter,
{
int j, ret;
u32 temp, off8;
- u64 val, stride;
+ u64 val;
void __iomem *mem_crb;
/* Only 64-bit aligned access */
@@ -1748,9 +1730,7 @@ netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter,
return -EIO;
correct:
- stride = NX_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
-
- off8 = off & ~(stride-1);
+ off8 = off & 0xfffffff8;
spin_lock(&adapter->ahw.mem_lock);
@@ -1771,13 +1751,8 @@ correct:
"failed to read through agent\n");
ret = -EIO;
} else {
- off8 = MIU_TEST_AGT_RDDATA_LO;
- if ((stride == 16) && (off & 0xf))
- off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
-
- temp = readl(mem_crb + off8 + 4);
- val = (u64)temp << 32;
- val |= readl(mem_crb + off8);
+ val = (u64)(readl(mem_crb + MIU_TEST_AGT_RDDATA_HI)) << 32;
+ val |= readl(mem_crb + MIU_TEST_AGT_RDDATA_LO);
*data = val;
ret = 0;
}
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 02876f59cbb..045a7c8f5bd 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -614,22 +614,123 @@ static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section)
return NULL;
}
+#define QLCNIC_FILEHEADER_SIZE (14 * 4)
+
static int
-nx_set_product_offs(struct netxen_adapter *adapter)
-{
- struct uni_table_desc *ptab_descr;
+netxen_nic_validate_header(struct netxen_adapter *adapter)
+ {
const u8 *unirom = adapter->fw->data;
- uint32_t i;
+ struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
+ u32 fw_file_size = adapter->fw->size;
+ u32 tab_size;
__le32 entries;
+ __le32 entry_size;
+
+ if (fw_file_size < QLCNIC_FILEHEADER_SIZE)
+ return -EINVAL;
+
+ entries = cpu_to_le32(directory->num_entries);
+ entry_size = cpu_to_le32(directory->entry_size);
+ tab_size = cpu_to_le32(directory->findex) + (entries * entry_size);
+
+ if (fw_file_size < tab_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+netxen_nic_validate_bootld(struct netxen_adapter *adapter)
+{
+ struct uni_table_desc *tab_desc;
+ struct uni_data_desc *descr;
+ const u8 *unirom = adapter->fw->data;
+ __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+ NX_UNI_BOOTLD_IDX_OFF));
+ u32 offs;
+ u32 tab_size;
+ u32 data_size;
+
+ tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_BOOTLD);
+
+ if (!tab_desc)
+ return -EINVAL;
+
+ tab_size = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+ offs = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx));
+ descr = (struct uni_data_desc *)&unirom[offs];
+
+ data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
+
+ if (adapter->fw->size < data_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+netxen_nic_validate_fw(struct netxen_adapter *adapter)
+{
+ struct uni_table_desc *tab_desc;
+ struct uni_data_desc *descr;
+ const u8 *unirom = adapter->fw->data;
+ __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+ NX_UNI_FIRMWARE_IDX_OFF));
+ u32 offs;
+ u32 tab_size;
+ u32 data_size;
+
+ tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_FW);
+
+ if (!tab_desc)
+ return -EINVAL;
+
+ tab_size = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+ offs = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx));
+ descr = (struct uni_data_desc *)&unirom[offs];
+ data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
+
+ if (adapter->fw->size < data_size)
+ return -EINVAL;
+ return 0;
+}
+
+
+static int
+netxen_nic_validate_product_offs(struct netxen_adapter *adapter)
+{
+ struct uni_table_desc *ptab_descr;
+ const u8 *unirom = adapter->fw->data;
int mn_present = (NX_IS_REVISION_P2(adapter->ahw.revision_id)) ?
1 : netxen_p3_has_mn(adapter);
+ __le32 entries;
+ __le32 entry_size;
+ u32 tab_size;
+ u32 i;
ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL);
if (ptab_descr == NULL)
- return -1;
+ return -EINVAL;
entries = cpu_to_le32(ptab_descr->num_entries);
+ entry_size = cpu_to_le32(ptab_descr->entry_size);
+ tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size);
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
nomn:
for (i = 0; i < entries; i++) {
@@ -658,9 +759,38 @@ nomn:
goto nomn;
}
- return -1;
+ return -EINVAL;
}
+static int
+netxen_nic_validate_unified_romimage(struct netxen_adapter *adapter)
+{
+ if (netxen_nic_validate_header(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: header validation failed\n");
+ return -EINVAL;
+ }
+
+ if (netxen_nic_validate_product_offs(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: product validation failed\n");
+ return -EINVAL;
+ }
+
+ if (netxen_nic_validate_bootld(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: bootld validation failed\n");
+ return -EINVAL;
+ }
+
+ if (netxen_nic_validate_fw(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: firmware validation failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter,
u32 section, u32 idx_offset)
@@ -890,6 +1020,16 @@ netxen_load_firmware(struct netxen_adapter *adapter)
flashaddr += 8;
}
+
+ size = (__force u32)nx_get_fw_size(adapter) % 8;
+ if (size) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (adapter->pci_mem_write(adapter,
+ flashaddr, data))
+ return -EIO;
+ }
+
} else {
u64 data;
u32 hi, lo;
@@ -934,27 +1074,23 @@ static int
netxen_validate_firmware(struct netxen_adapter *adapter)
{
__le32 val;
- u32 ver, min_ver, bios, min_size;
+ u32 ver, min_ver, bios;
struct pci_dev *pdev = adapter->pdev;
const struct firmware *fw = adapter->fw;
u8 fw_type = adapter->fw_type;
if (fw_type == NX_UNIFIED_ROMIMAGE) {
- if (nx_set_product_offs(adapter))
+ if (netxen_nic_validate_unified_romimage(adapter))
return -EINVAL;
-
- min_size = NX_UNI_FW_MIN_SIZE;
} else {
val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]);
if ((__force u32)val != NETXEN_BDINFO_MAGIC)
return -EINVAL;
- min_size = NX_FW_MIN_SIZE;
+ if (fw->size < NX_FW_MIN_SIZE)
+ return -EINVAL;
}
- if (fw->size < min_size)
- return -EINVAL;
-
val = nx_get_fw_version(adapter);
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
@@ -1225,10 +1361,12 @@ int netxen_init_firmware(struct netxen_adapter *adapter)
return err;
NXWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
- NXWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
NXWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ NXWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
+
return err;
}
@@ -1763,6 +1901,5 @@ netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
void netxen_nic_clear_stats(struct netxen_adapter *adapter)
{
memset(&adapter->stats, 0, sizeof(adapter->stats));
- return;
}
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index ce838f7c8b0..6ce6ce1df6d 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -782,15 +782,22 @@ netxen_check_options(struct netxen_adapter *adapter)
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
adapter->msix_supported = !!use_msi_x;
adapter->rss_supported = !!use_msi_x;
- } else if (adapter->fw_version >= NETXEN_VERSION_CODE(3, 4, 336)) {
- switch (adapter->ahw.board_type) {
- case NETXEN_BRDTYPE_P2_SB31_10G:
- case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
- adapter->msix_supported = !!use_msi_x;
- adapter->rss_supported = !!use_msi_x;
- break;
- default:
- break;
+ } else {
+ u32 flashed_ver = 0;
+ netxen_rom_fast_read(adapter,
+ NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
+ flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
+
+ if (flashed_ver >= NETXEN_VERSION_CODE(3, 4, 336)) {
+ switch (adapter->ahw.board_type) {
+ case NETXEN_BRDTYPE_P2_SB31_10G:
+ case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
+ adapter->msix_supported = !!use_msi_x;
+ adapter->rss_supported = !!use_msi_x;
+ break;
+ default:
+ break;
+ }
}
}
@@ -2304,6 +2311,7 @@ netxen_fwinit_work(struct work_struct *work)
}
break;
+ case NX_DEV_NEED_RESET:
case NX_DEV_INITALIZING:
if (++adapter->fw_wait_cnt < FW_POLL_THRESH) {
netxen_schedule_work(adapter,
@@ -2347,6 +2355,9 @@ netxen_detach_work(struct work_struct *work)
ref_cnt = nx_decr_dev_ref_cnt(adapter);
+ if (ref_cnt == -EIO)
+ goto err_ret;
+
delay = (ref_cnt == 0) ? 0 : (2 * FW_POLL_DELAY);
adapter->fw_wait_cnt = 0;
@@ -2526,51 +2537,81 @@ static int
netxen_sysfs_validate_crb(struct netxen_adapter *adapter,
loff_t offset, size_t size)
{
+ size_t crb_size = 4;
+
if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
return -EIO;
- if ((size != 4) || (offset & 0x3))
- return -EINVAL;
+ if (offset < NETXEN_PCI_CRBSPACE) {
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return -EINVAL;
- if (offset < NETXEN_PCI_CRBSPACE)
- return -EINVAL;
+ if (ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
+ NETXEN_PCI_CAMQM_2M_END))
+ crb_size = 8;
+ else
+ return -EINVAL;
+ }
+
+ if ((size != crb_size) || (offset & (crb_size-1)))
+ return -EINVAL;
return 0;
}
static ssize_t
-netxen_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
+netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct netxen_adapter *adapter = dev_get_drvdata(dev);
u32 data;
+ u64 qmdata;
int ret;
ret = netxen_sysfs_validate_crb(adapter, offset, size);
if (ret != 0)
return ret;
- data = NXRD32(adapter, offset);
- memcpy(buf, &data, size);
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id) &&
+ ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
+ NETXEN_PCI_CAMQM_2M_END)) {
+ netxen_pci_camqm_read_2M(adapter, offset, &qmdata);
+ memcpy(buf, &qmdata, size);
+ } else {
+ data = NXRD32(adapter, offset);
+ memcpy(buf, &data, size);
+ }
+
return size;
}
static ssize_t
-netxen_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr,
+netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct netxen_adapter *adapter = dev_get_drvdata(dev);
u32 data;
+ u64 qmdata;
int ret;
ret = netxen_sysfs_validate_crb(adapter, offset, size);
if (ret != 0)
return ret;
- memcpy(&data, buf, size);
- NXWR32(adapter, offset, data);
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id) &&
+ ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
+ NETXEN_PCI_CAMQM_2M_END)) {
+ memcpy(&qmdata, buf, size);
+ netxen_pci_camqm_write_2M(adapter, offset, qmdata);
+ } else {
+ memcpy(&data, buf, size);
+ NXWR32(adapter, offset, data);
+ }
+
return size;
}
@@ -2588,7 +2629,8 @@ netxen_sysfs_validate_mem(struct netxen_adapter *adapter,
}
static ssize_t
-netxen_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
+netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -2608,7 +2650,7 @@ netxen_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
return size;
}
-static ssize_t netxen_sysfs_write_mem(struct kobject *kobj,
+static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
@@ -2742,7 +2784,6 @@ netxen_config_indev_addr(struct net_device *dev, unsigned long event)
} endfor_ifa(indev);
in_dev_put(indev);
- return;
}
static int netxen_netdev_event(struct notifier_block *this,
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c
index 3892330f244..4d3f2e2b28b 100644
--- a/drivers/net/ni5010.c
+++ b/drivers/net/ni5010.c
@@ -444,7 +444,7 @@ static void ni5010_timeout(struct net_device *dev)
/* Try to restart the adaptor. */
/* FIXME: Give it a real kick here */
chipset_init(dev, 1);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -460,7 +460,6 @@ static int ni5010_send_packet(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
hardware_send_packet(dev, (unsigned char *)skb->data, skb->len, length-skb->len);
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
return NETDEV_TX_OK;
}
@@ -515,8 +514,6 @@ static void dump_packet(void *buf, int len)
if (i % 16 == 15) printk("\n");
}
printk("\n");
-
- return;
}
/* We have a good packet, get it out of the buffer. */
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index f7a8f707361..9bddb5fa7a9 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -595,7 +595,7 @@ static int init586(struct net_device *dev)
struct iasetup_cmd_struct __iomem *ias_cmd;
struct tdr_cmd_struct __iomem *tdr_cmd;
struct mcsetup_cmd_struct __iomem *mc_cmd;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int num_addrs = netdev_mc_count(dev);
ptr = p->scb + 1;
@@ -724,8 +724,8 @@ static int init586(struct net_device *dev)
writew(num_addrs * 6, &mc_cmd->mc_cnt);
i = 0;
- netdev_for_each_mc_addr(dmi, dev)
- memcpy_toio(mc_cmd->mc_list[i++], dmi->dmi_addr, 6);
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy_toio(mc_cmd->mc_list[i++], ha->addr, 6);
writew(make16(mc_cmd), &p->scb->cbl_offset);
writeb(CUC_START, &p->scb->cmd_cuc);
@@ -1147,7 +1147,7 @@ static void ni52_timeout(struct net_device *dev)
writeb(CUC_START, &p->scb->cmd_cuc);
ni_attn586();
wait_for_scb_cmd(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
return 0;
}
#endif
@@ -1165,7 +1165,7 @@ static void ni52_timeout(struct net_device *dev)
ni52_close(dev);
ni52_open(dev);
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
}
/******************************************************
@@ -1218,7 +1218,6 @@ static netdev_tx_t ni52_send_packet(struct sk_buff *skb,
writeb(CUC_START, &p->scb->cmd_cuc);
}
ni_attn586();
- dev->trans_start = jiffies;
if (!i)
dev_kfree_skb(skb);
wait_for_scb_cmd(dev);
@@ -1240,7 +1239,6 @@ static netdev_tx_t ni52_send_packet(struct sk_buff *skb,
writew(0, &p->nop_cmds[next_nop]->cmd_status);
writew(make16(p->xmit_cmds[0]), &p->nop_cmds[p->nop_point]->cmd_link);
- dev->trans_start = jiffies;
p->nop_point = next_nop;
dev_kfree_skb(skb);
# endif
@@ -1256,7 +1254,6 @@ static netdev_tx_t ni52_send_packet(struct sk_buff *skb,
writew(0, &p->nop_cmds[next_nop]->cmd_status);
writew(make16(p->xmit_cmds[p->xmit_count]),
&p->nop_cmds[p->xmit_count]->cmd_link);
- dev->trans_start = jiffies;
p->xmit_count = next_nop;
{
unsigned long flags;
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index 9225c76cac4..da228a0dd6c 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -784,7 +784,7 @@ static void ni65_stop_start(struct net_device *dev,struct priv *p)
if(!p->lock)
if (p->tmdnum || !p->xmit_queued)
netif_wake_queue(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
}
else
writedatareg(CSR0_STRT | csr0);
@@ -1150,7 +1150,7 @@ static void ni65_timeout(struct net_device *dev)
printk("%02x ",p->tmdhead[i].u.s.status);
printk("\n");
ni65_lance_reinit(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -1213,7 +1213,6 @@ static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
netif_wake_queue(dev);
p->lock = 0;
- dev->trans_start = jiffies;
spin_unlock_irqrestore(&p->ring_lock, flags);
}
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index d5cd16bfc90..63e8e3893bd 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -36,8 +36,8 @@
#include "niu.h"
#define DRV_MODULE_NAME "niu"
-#define DRV_MODULE_VERSION "1.0"
-#define DRV_MODULE_RELDATE "Nov 14, 2008"
+#define DRV_MODULE_VERSION "1.1"
+#define DRV_MODULE_RELDATE "Apr 22, 2010"
static char version[] __devinitdata =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -3444,6 +3444,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
struct rx_ring_info *rp)
{
unsigned int index = rp->rcr_index;
+ struct rx_pkt_hdr1 *rh;
struct sk_buff *skb;
int len, num_rcr;
@@ -3477,9 +3478,6 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
if (num_rcr == 1) {
int ptype;
- off += 2;
- append_size -= 2;
-
ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT);
if ((ptype == RCR_PKT_TYPE_TCP ||
ptype == RCR_PKT_TYPE_UDP) &&
@@ -3488,8 +3486,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb->ip_summed = CHECKSUM_NONE;
- }
- if (!(val & RCR_ENTRY_MULTI))
+ } else if (!(val & RCR_ENTRY_MULTI))
append_size = len - skb->len;
niu_rx_skb_append(skb, page, off, append_size);
@@ -3510,8 +3507,17 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
}
rp->rcr_index = index;
- skb_reserve(skb, NET_IP_ALIGN);
- __pskb_pull_tail(skb, min(len, VLAN_ETH_HLEN));
+ len += sizeof(*rh);
+ len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN);
+ __pskb_pull_tail(skb, len);
+
+ rh = (struct rx_pkt_hdr1 *) skb->data;
+ if (np->dev->features & NETIF_F_RXHASH)
+ skb->rxhash = ((u32)rh->hashval2_0 << 24 |
+ (u32)rh->hashval2_1 << 16 |
+ (u32)rh->hashval1_1 << 8 |
+ (u32)rh->hashval1_2 << 0);
+ skb_pull(skb, sizeof(*rh));
rp->rx_packets++;
rp->rx_bytes += skb->len;
@@ -4946,7 +4952,9 @@ static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
RX_DMA_CTL_STAT_RCRTO |
RX_DMA_CTL_STAT_RBR_EMPTY));
nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
- nw64(RXDMA_CFIG2(channel), (rp->mbox_dma & 0x00000000ffffffc0));
+ nw64(RXDMA_CFIG2(channel),
+ ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) |
+ RXDMA_CFIG2_FULL_HDR));
nw64(RBR_CFIG_A(channel),
((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
(rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
@@ -6314,7 +6322,6 @@ static void niu_set_rx_mode(struct net_device *dev)
{
struct niu *np = netdev_priv(dev);
int i, alt_cnt, err;
- struct dev_addr_list *addr;
struct netdev_hw_addr *ha;
unsigned long flags;
u16 hash[16] = { 0, };
@@ -6366,8 +6373,8 @@ static void niu_set_rx_mode(struct net_device *dev)
for (i = 0; i < 16; i++)
hash[i] = 0xffff;
} else if (!netdev_mc_empty(dev)) {
- netdev_for_each_mc_addr(addr, dev) {
- u32 crc = ether_crc_le(ETH_ALEN, addr->da_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ u32 crc = ether_crc_le(ETH_ALEN, ha->addr);
crc >>= 24;
hash[crc >> 4] |= (1 << (15 - (crc & 0xf)));
@@ -7911,6 +7918,18 @@ static int niu_phys_id(struct net_device *dev, u32 data)
return 0;
}
+static int niu_set_flags(struct net_device *dev, u32 data)
+{
+ if (data & (ETH_FLAG_LRO | ETH_FLAG_NTUPLE))
+ return -EOPNOTSUPP;
+
+ if (data & ETH_FLAG_RXHASH)
+ dev->features |= NETIF_F_RXHASH;
+ else
+ dev->features &= ~NETIF_F_RXHASH;
+ return 0;
+}
+
static const struct ethtool_ops niu_ethtool_ops = {
.get_drvinfo = niu_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -7927,6 +7946,8 @@ static const struct ethtool_ops niu_ethtool_ops = {
.phys_id = niu_phys_id,
.get_rxnfc = niu_get_nfc,
.set_rxnfc = niu_set_nfc,
+ .set_flags = niu_set_flags,
+ .get_flags = ethtool_op_get_flags,
};
static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
@@ -9094,7 +9115,7 @@ static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
const u32 *int_prop;
int i;
- int_prop = of_get_property(op->node, "interrupts", NULL);
+ int_prop = of_get_property(op->dev.of_node, "interrupts", NULL);
if (!int_prop)
return -ENODEV;
@@ -9245,7 +9266,7 @@ static int __devinit niu_get_of_props(struct niu *np)
int prop_len;
if (np->parent->plat_type == PLAT_TYPE_NIU)
- dp = np->op->node;
+ dp = np->op->dev.of_node;
else
dp = pci_device_to_OF_node(np->pdev);
@@ -9755,6 +9776,12 @@ static void __devinit niu_device_announce(struct niu *np)
}
}
+static void __devinit niu_set_basic_features(struct net_device *dev)
+{
+ dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM |
+ NETIF_F_GRO | NETIF_F_RXHASH);
+}
+
static int __devinit niu_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -9839,7 +9866,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
}
}
- dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM);
+ niu_set_basic_features(dev);
np->regs = pci_ioremap_bar(pdev, 0);
if (!np->regs) {
@@ -10056,10 +10083,10 @@ static int __devinit niu_of_probe(struct of_device *op,
niu_driver_version();
- reg = of_get_property(op->node, "reg", NULL);
+ reg = of_get_property(op->dev.of_node, "reg", NULL);
if (!reg) {
dev_err(&op->dev, "%s: No 'reg' property, aborting\n",
- op->node->full_name);
+ op->dev.of_node->full_name);
return -ENODEV;
}
@@ -10072,7 +10099,7 @@ static int __devinit niu_of_probe(struct of_device *op,
np = netdev_priv(dev);
memset(&parent_id, 0, sizeof(parent_id));
- parent_id.of = of_get_parent(op->node);
+ parent_id.of = of_get_parent(op->dev.of_node);
np->parent = niu_get_parent(np, &parent_id,
PLAT_TYPE_NIU);
@@ -10081,7 +10108,7 @@ static int __devinit niu_of_probe(struct of_device *op,
goto err_out_free_dev;
}
- dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM);
+ niu_set_basic_features(dev);
np->regs = of_ioremap(&op->resource[1], 0,
resource_size(&op->resource[1]),
@@ -10207,8 +10234,11 @@ static const struct of_device_id niu_match[] = {
MODULE_DEVICE_TABLE(of, niu_match);
static struct of_platform_driver niu_of_driver = {
- .name = "niu",
- .match_table = niu_match,
+ .driver = {
+ .name = "niu",
+ .owner = THIS_MODULE,
+ .of_match_table = niu_match,
+ },
.probe = niu_of_probe,
.remove = __devexit_p(niu_of_remove),
};
diff --git a/drivers/net/niu.h b/drivers/net/niu.h
index 3bd0b5933d5..d6715465f35 100644
--- a/drivers/net/niu.h
+++ b/drivers/net/niu.h
@@ -2706,7 +2706,7 @@ struct rx_pkt_hdr0 {
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 inputport:2,
maccheck:1,
- class:4;
+ class:5;
u8 vlan:1,
llcsnap:1,
noport:1,
@@ -2715,7 +2715,7 @@ struct rx_pkt_hdr0 {
tres:2,
tzfvld:1;
#elif defined(__BIG_ENDIAN_BITFIELD)
- u8 class:4,
+ u8 class:5,
maccheck:1,
inputport:2;
u8 tzfvld:1,
@@ -2775,6 +2775,9 @@ struct rx_pkt_hdr1 {
/* Bits 7:0 of hash value, H1. */
u8 hashval1_2;
+ u8 hwrsvd5;
+ u8 hwrsvd6;
+
u8 usrdata_0; /* Bits 39:32 of user data. */
u8 usrdata_1; /* Bits 31:24 of user data. */
u8 usrdata_2; /* Bits 23:16 of user data. */
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c
index 8aadc8e2ddd..000e792d57c 100644
--- a/drivers/net/octeon/octeon_mgmt.c
+++ b/drivers/net/octeon/octeon_mgmt.c
@@ -189,12 +189,19 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
while (mix_orcnt.s.orcnt) {
+ spin_lock_irqsave(&p->tx_list.lock, flags);
+
+ mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
+
+ if (mix_orcnt.s.orcnt == 0) {
+ spin_unlock_irqrestore(&p->tx_list.lock, flags);
+ break;
+ }
+
dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
DMA_BIDIRECTIONAL);
- spin_lock_irqsave(&p->tx_list.lock, flags);
-
re.d64 = p->tx_ring[p->tx_next_clean];
p->tx_next_clean =
(p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
@@ -317,7 +324,6 @@ good:
skb->protocol = eth_type_trans(skb, netdev);
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += skb->len;
- netdev->last_rx = jiffies;
netif_receive_skb(skb);
rc = 0;
} else if (re.s.code == RING_ENTRY_CODE_MORE) {
@@ -374,7 +380,6 @@ done:
mix_ircnt.s.ircnt = 1;
cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64);
return rc;
-
}
static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
@@ -384,7 +389,6 @@ static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
union cvmx_mixx_ircnt mix_ircnt;
int rc;
-
mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
while (work_done < budget && mix_ircnt.s.ircnt) {
@@ -475,13 +479,12 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */
struct octeon_mgmt_cam_state cam_state;
- struct dev_addr_list *list;
- struct list_head *pos;
+ struct netdev_hw_addr *ha;
int available_cam_entries;
memset(&cam_state, 0, sizeof(cam_state));
- if ((netdev->flags & IFF_PROMISC) || netdev->dev_addrs.count > 7) {
+ if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
cam_mode = 0;
available_cam_entries = 8;
} else {
@@ -489,13 +492,13 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
* One CAM entry for the primary address, leaves seven
* for the secondary addresses.
*/
- available_cam_entries = 7 - netdev->dev_addrs.count;
+ available_cam_entries = 7 - netdev->uc.count;
}
if (netdev->flags & IFF_MULTICAST) {
if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
netdev_mc_count(netdev) > available_cam_entries)
- multicast_mode = 2; /* 1 - Accept all multicast. */
+ multicast_mode = 2; /* 2 - Accept all multicast. */
else
multicast_mode = 0; /* 0 - Use CAM. */
}
@@ -503,19 +506,14 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
if (cam_mode == 1) {
/* Add primary address. */
octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
- list_for_each(pos, &netdev->dev_addrs.list) {
- struct netdev_hw_addr *hw_addr;
- hw_addr = list_entry(pos, struct netdev_hw_addr, list);
- octeon_mgmt_cam_state_add(&cam_state, hw_addr->addr);
- list = list->next;
- }
+ netdev_for_each_uc_addr(ha, netdev)
+ octeon_mgmt_cam_state_add(&cam_state, ha->addr);
}
if (multicast_mode == 0) {
- netdev_for_each_mc_addr(list, netdev)
- octeon_mgmt_cam_state_add(&cam_state, list->da_addr);
+ netdev_for_each_mc_addr(ha, netdev)
+ octeon_mgmt_cam_state_add(&cam_state, ha->addr);
}
-
spin_lock_irqsave(&p->lock, flags);
/* Disable packet I/O. */
@@ -524,7 +522,6 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
agl_gmx_prtx.s.en = 0;
cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
-
adr_ctl.u64 = 0;
adr_ctl.s.cam_mode = cam_mode;
adr_ctl.s.mcst = multicast_mode;
@@ -597,8 +594,7 @@ static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port));
/* Clear any pending interrupts */
- cvmx_write_csr(CVMX_MIXX_ISR(port),
- cvmx_read_csr(CVMX_MIXX_ISR(port)));
+ cvmx_write_csr(CVMX_MIXX_ISR(port), mixx_isr.u64);
cvmx_read_csr(CVMX_MIXX_ISR(port));
if (mixx_isr.s.irthresh) {
@@ -832,9 +828,9 @@ static int octeon_mgmt_open(struct net_device *netdev)
mix_irhwm.s.irhwm = 0;
cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64);
- /* Interrupt when we have 5 or more packets to clean. */
+ /* Interrupt when we have 1 or more packets to clean. */
mix_orhwm.u64 = 0;
- mix_orhwm.s.orhwm = 5;
+ mix_orhwm.s.orhwm = 1;
cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64);
/* Enable receive and transmit interrupts */
@@ -928,7 +924,6 @@ static int octeon_mgmt_stop(struct net_device *netdev)
octeon_mgmt_reset_hw(p);
-
free_irq(p->irq, netdev);
/* dma_unmap is a nop on Octeon, so just free everything. */
@@ -945,7 +940,6 @@ static int octeon_mgmt_stop(struct net_device *netdev)
DMA_BIDIRECTIONAL);
kfree(p->tx_ring);
-
return 0;
}
@@ -955,6 +949,7 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
int port = p->port;
union mgmt_port_ring_entry re;
unsigned long flags;
+ int rv = NETDEV_TX_BUSY;
re.d64 = 0;
re.s.len = skb->len;
@@ -964,15 +959,18 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
spin_lock_irqsave(&p->tx_list.lock, flags);
+ if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) {
+ spin_unlock_irqrestore(&p->tx_list.lock, flags);
+ netif_stop_queue(netdev);
+ spin_lock_irqsave(&p->tx_list.lock, flags);
+ }
+
if (unlikely(p->tx_current_fill >=
ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
spin_unlock_irqrestore(&p->tx_list.lock, flags);
-
dma_unmap_single(p->dev, re.s.addr, re.s.len,
DMA_TO_DEVICE);
-
- netif_stop_queue(netdev);
- return NETDEV_TX_BUSY;
+ goto out;
}
__skb_queue_tail(&p->tx_list, skb);
@@ -994,10 +992,10 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Ring the bell. */
cvmx_write_csr(CVMX_MIXX_ORING2(port), 1);
- netdev->trans_start = jiffies;
- octeon_mgmt_clean_tx_buffers(p);
+ rv = NETDEV_TX_OK;
+out:
octeon_mgmt_update_tx_stats(netdev);
- return NETDEV_TX_OK;
+ return rv;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1007,7 +1005,6 @@ static void octeon_mgmt_poll_controller(struct net_device *netdev)
octeon_mgmt_receive_packets(p, 16);
octeon_mgmt_update_rx_stats(netdev);
- return;
}
#endif
@@ -1107,7 +1104,6 @@ static int __init octeon_mgmt_probe(struct platform_device *pdev)
netdev->netdev_ops = &octeon_mgmt_ops;
netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
-
/* The mgmt ports get the first N MACs. */
for (i = 0; i < 6; i++)
netdev->dev_addr[i] = octeon_bootinfo->mac_addr_base[i];
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 370c147d08a..8ab6ae0a610 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -1472,8 +1472,6 @@ static void pasemi_mac_queue_csdesc(const struct sk_buff *skb,
txring->next_to_fill = fill;
write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), 2);
-
- return;
}
static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 36785853a14..56f3fc45dba 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -1354,7 +1354,6 @@ static int netdrv_start_xmit(struct sk_buff *skb, struct net_device *dev)
NETDRV_W32(TxStatus0 + (entry * sizeof(u32)),
tp->tx_flag | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
- dev->trans_start = jiffies;
atomic_inc(&tp->cur_tx);
if ((atomic_read(&tp->cur_tx) - atomic_read(&tp->dirty_tx)) >= NUM_TX_DESC)
netif_stop_queue(dev);
@@ -1813,12 +1812,12 @@ static void netdrv_set_rx_mode(struct net_device *dev)
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
}
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 757f87bb1db..10ee106a161 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -93,7 +93,6 @@ earlier 3Com products.
#include <pcmcia/cisreg.h>
#include <pcmcia/ciscode.h>
#include <pcmcia/ds.h>
-#include <pcmcia/mem_op.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -200,7 +199,6 @@ enum Window4 { /* Window 4: Xcvr/media bits. */
struct el3_private {
struct pcmcia_device *p_dev;
- dev_node_t node;
u16 advertising, partner; /* NWay media advertisement */
unsigned char phys; /* MII device address */
unsigned int autoselect:1, default_media:3; /* Read from the EEPROM/Wn3_Config. */
@@ -283,8 +281,6 @@ static int tc574_probe(struct pcmcia_device *link)
spin_lock_init(&lp->window_lock);
link->io.NumPorts1 = 32;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = &el3_interrupt;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.ConfigIndex = 1;
@@ -311,8 +307,7 @@ static void tc574_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "3c574_detach()\n");
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
tc574_release(link);
@@ -353,7 +348,7 @@ static int tc574_config(struct pcmcia_device *link)
if (i != 0)
goto failed;
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_irq(link, el3_interrupt);
if (ret)
goto failed;
@@ -361,7 +356,7 @@ static int tc574_config(struct pcmcia_device *link)
if (ret)
goto failed;
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
ioaddr = dev->base_addr;
@@ -446,17 +441,13 @@ static int tc574_config(struct pcmcia_device *link)
}
}
- link->dev_node = &lp->node;
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
printk(KERN_NOTICE "3c574_cs: register_netdev() failed\n");
- link->dev_node = NULL;
goto failed;
}
- strcpy(lp->node.dev_name, dev->name);
-
printk(KERN_INFO "%s: %s at io %#3lx, irq %d, "
"hw_addr %pM.\n",
dev->name, cardname, dev->base_addr, dev->irq,
@@ -622,8 +613,6 @@ static void mdio_write(unsigned int ioaddr, int phy_id, int location, int value)
outw(MDIO_ENB_IN, mdio_addr);
outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
}
-
- return;
}
/* Reset and restore all of the 3c574 registers. */
@@ -739,7 +728,7 @@ static void el3_tx_timeout(struct net_device *dev)
printk(KERN_NOTICE "%s: Transmit timed out!\n", dev->name);
dump_status(dev);
dev->stats.tx_errors++;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
/* Issue TX_RESET and TX_START commands. */
tc574_wait_for_completion(dev, TxReset);
outw(TxEnable, ioaddr + EL3_CMD);
@@ -790,8 +779,6 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
/* ... and the packet rounded to a doubleword. */
outsl(ioaddr + TX_FIFO, skb->data, (skb->len+3)>>2);
- dev->trans_start = jiffies;
-
/* TxFree appears only in Window 1, not offset 0x1c. */
if (inw(ioaddr + TxFree) <= 1536) {
netif_stop_queue(dev);
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 091e0b00043..ce63c3773b4 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -1,20 +1,20 @@
/*======================================================================
A PCMCIA ethernet driver for the 3com 3c589 card.
-
+
Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
3c589_cs.c 1.162 2001/10/13 00:08:50
The network driver code is based on Donald Becker's 3c589 code:
-
+
Written 1994 by Donald Becker.
Copyright 1993 United States Government as represented by the
Director, National Security Agency. This software may be used and
distributed according to the terms of the GNU General Public License,
incorporated herein by reference.
Donald Becker may be reached at becker@scyld.com
-
+
Updated for 2.5.x by Alan Cox <alan@lxorguk.ukuu.org.uk>
======================================================================*/
@@ -69,31 +69,54 @@
/* The top five bits written to EL3_CMD are a command, the lower
11 bits are the parameter, if applicable. */
enum c509cmd {
- TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
- RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11,
- TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
- FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
- SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
- SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11,
- StatsDisable = 22<<11, StopCoax = 23<<11,
+ TotalReset = 0<<11,
+ SelectWindow = 1<<11,
+ StartCoax = 2<<11,
+ RxDisable = 3<<11,
+ RxEnable = 4<<11,
+ RxReset = 5<<11,
+ RxDiscard = 8<<11,
+ TxEnable = 9<<11,
+ TxDisable = 10<<11,
+ TxReset = 11<<11,
+ FakeIntr = 12<<11,
+ AckIntr = 13<<11,
+ SetIntrEnb = 14<<11,
+ SetStatusEnb = 15<<11,
+ SetRxFilter = 16<<11,
+ SetRxThreshold = 17<<11,
+ SetTxThreshold = 18<<11,
+ SetTxStart = 19<<11,
+ StatsEnable = 21<<11,
+ StatsDisable = 22<<11,
+ StopCoax = 23<<11
};
enum c509status {
- IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004,
- TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
- IntReq = 0x0040, StatsFull = 0x0080, CmdBusy = 0x1000
+ IntLatch = 0x0001,
+ AdapterFailure = 0x0002,
+ TxComplete = 0x0004,
+ TxAvailable = 0x0008,
+ RxComplete = 0x0010,
+ RxEarly = 0x0020,
+ IntReq = 0x0040,
+ StatsFull = 0x0080,
+ CmdBusy = 0x1000
};
/* The SetRxFilter command accepts the following classes: */
enum RxFilter {
- RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8
+ RxStation = 1,
+ RxMulticast = 2,
+ RxBroadcast = 4,
+ RxProm = 8
};
/* Register window 1 offsets, the window used in normal operation. */
#define TX_FIFO 0x00
#define RX_FIFO 0x00
-#define RX_STATUS 0x08
-#define TX_STATUS 0x0B
+#define RX_STATUS 0x08
+#define TX_STATUS 0x0B
#define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */
#define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */
@@ -106,13 +129,12 @@ enum RxFilter {
struct el3_private {
struct pcmcia_device *p_dev;
- dev_node_t node;
- /* For transceiver monitoring */
- struct timer_list media;
- u16 media_status;
- u16 fast_poll;
- unsigned long last_irq;
- spinlock_t lock;
+ /* For transceiver monitoring */
+ struct timer_list media;
+ u16 media_status;
+ u16 fast_poll;
+ unsigned long last_irq;
+ spinlock_t lock;
};
static const char *if_names[] = { "auto", "10baseT", "10base2", "AUI" };
@@ -164,15 +186,15 @@ static void tc589_detach(struct pcmcia_device *p_dev);
======================================================================*/
static const struct net_device_ops el3_netdev_ops = {
- .ndo_open = el3_open,
- .ndo_stop = el3_close,
+ .ndo_open = el3_open,
+ .ndo_stop = el3_close,
.ndo_start_xmit = el3_start_xmit,
- .ndo_tx_timeout = el3_tx_timeout,
+ .ndo_tx_timeout = el3_tx_timeout,
.ndo_set_config = el3_config,
.ndo_get_stats = el3_get_stats,
.ndo_set_multicast_list = set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -194,8 +216,7 @@ static int tc589_probe(struct pcmcia_device *link)
spin_lock_init(&lp->lock);
link->io.NumPorts1 = 16;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = &el3_interrupt;
+
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.ConfigIndex = 1;
@@ -223,8 +244,7 @@ static void tc589_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "3c589_detach\n");
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
tc589_release(link);
@@ -236,20 +256,19 @@ static void tc589_detach(struct pcmcia_device *link)
tc589_config() is scheduled to run after a CARD_INSERTION event
is received, to configure the PCMCIA socket, and to make the
ethernet device available to the system.
-
+
======================================================================*/
static int tc589_config(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
- struct el3_private *lp = netdev_priv(dev);
__be16 *phys_addr;
int ret, i, j, multi = 0, fifo;
unsigned int ioaddr;
char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
u8 *buf;
size_t len;
-
+
dev_dbg(&link->dev, "3c589_config\n");
phys_addr = (__be16 *)dev->dev_addr;
@@ -271,15 +290,15 @@ static int tc589_config(struct pcmcia_device *link)
if (i != 0)
goto failed;
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_irq(link, el3_interrupt);
if (ret)
goto failed;
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
goto failed;
-
- dev->irq = link->irq.AssignedIRQ;
+
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
ioaddr = dev->base_addr;
EL3WINDOW(0);
@@ -312,25 +331,20 @@ static int tc589_config(struct pcmcia_device *link)
dev->if_port = if_port;
else
printk(KERN_ERR "3c589_cs: invalid if_port requested\n");
-
- link->dev_node = &lp->node;
+
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
printk(KERN_ERR "3c589_cs: register_netdev() failed\n");
- link->dev_node = NULL;
goto failed;
}
- strcpy(lp->node.dev_name, dev->name);
-
- printk(KERN_INFO "%s: 3Com 3c%s, io %#3lx, irq %d, "
- "hw_addr %pM\n",
- dev->name, (multi ? "562" : "589"), dev->base_addr, dev->irq,
- dev->dev_addr);
- printk(KERN_INFO " %dK FIFO split %s Rx:Tx, %s xcvr\n",
- (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3],
- if_names[dev->if_port]);
+ netdev_info(dev, "3Com 3c%s, io %#3lx, irq %d, hw_addr %pM\n",
+ (multi ? "562" : "589"), dev->base_addr, dev->irq,
+ dev->dev_addr);
+ netdev_info(dev, " %dK FIFO split %s Rx:Tx, %s xcvr\n",
+ (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3],
+ if_names[dev->if_port]);
return 0;
failed:
@@ -343,7 +357,7 @@ failed:
After a card is removed, tc589_release() will unregister the net
device, and release the PCMCIA configuration. If the device is
still open, this will be postponed until it is closed.
-
+
======================================================================*/
static void tc589_release(struct pcmcia_device *link)
@@ -365,7 +379,7 @@ static int tc589_resume(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
- if (link->open) {
+ if (link->open) {
tc589_reset(dev);
netif_device_attach(dev);
}
@@ -385,8 +399,7 @@ static void tc589_wait_for_completion(struct net_device *dev, int cmd)
while (--i > 0)
if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break;
if (i == 0)
- printk(KERN_WARNING "%s: command 0x%04x did not complete!\n",
- dev->name, cmd);
+ netdev_warn(dev, "command 0x%04x did not complete!\n", cmd);
}
/*
@@ -412,7 +425,7 @@ static void tc589_set_xcvr(struct net_device *dev, int if_port)
{
struct el3_private *lp = netdev_priv(dev);
unsigned int ioaddr = dev->base_addr;
-
+
EL3WINDOW(0);
switch (if_port) {
case 0: case 1: outw(0, ioaddr + 6); break;
@@ -435,14 +448,13 @@ static void dump_status(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
EL3WINDOW(1);
- printk(KERN_INFO " irq status %04x, rx status %04x, tx status "
- "%02x tx free %04x\n", inw(ioaddr+EL3_STATUS),
- inw(ioaddr+RX_STATUS), inb(ioaddr+TX_STATUS),
- inw(ioaddr+TX_FREE));
+ netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x tx free %04x\n",
+ inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS),
+ inb(ioaddr+TX_STATUS), inw(ioaddr+TX_FREE));
EL3WINDOW(4);
- printk(KERN_INFO " diagnostics: fifo %04x net %04x ethernet %04x"
- " media %04x\n", inw(ioaddr+0x04), inw(ioaddr+0x06),
- inw(ioaddr+0x08), inw(ioaddr+0x0a));
+ netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n",
+ inw(ioaddr+0x04), inw(ioaddr+0x06), inw(ioaddr+0x08),
+ inw(ioaddr+0x0a));
EL3WINDOW(1);
}
@@ -451,18 +463,18 @@ static void tc589_reset(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
int i;
-
+
EL3WINDOW(0);
- outw(0x0001, ioaddr + 4); /* Activate board. */
+ outw(0x0001, ioaddr + 4); /* Activate board. */
outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */
-
+
/* Set the station address in window 2. */
EL3WINDOW(2);
for (i = 0; i < 6; i++)
outb(dev->dev_addr[i], ioaddr + i);
tc589_set_xcvr(dev, dev->if_port);
-
+
/* Switch to the stats window, and clear all stats by reading. */
outw(StatsDisable, ioaddr + EL3_CMD);
EL3WINDOW(6);
@@ -470,7 +482,7 @@ static void tc589_reset(struct net_device *dev)
inb(ioaddr+i);
inw(ioaddr + 10);
inw(ioaddr + 12);
-
+
/* Switch to register set 1 for normal use. */
EL3WINDOW(1);
@@ -504,8 +516,7 @@ static int el3_config(struct net_device *dev, struct ifmap *map)
if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
if (map->port <= 3) {
dev->if_port = map->port;
- printk(KERN_INFO "%s: switched to %s port\n",
- dev->name, if_names[dev->if_port]);
+ netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
tc589_set_xcvr(dev, dev->if_port);
} else
return -EINVAL;
@@ -517,13 +528,13 @@ static int el3_open(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
struct pcmcia_device *link = lp->p_dev;
-
+
if (!pcmcia_dev_present(link))
return -ENODEV;
link->open++;
netif_start_queue(dev);
-
+
tc589_reset(dev);
init_timer(&lp->media);
lp->media.function = &media_check;
@@ -533,18 +544,18 @@ static int el3_open(struct net_device *dev)
dev_dbg(&link->dev, "%s: opened, status %4.4x.\n",
dev->name, inw(dev->base_addr + EL3_STATUS));
-
+
return 0;
}
static void el3_tx_timeout(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
-
- printk(KERN_WARNING "%s: Transmit timed out!\n", dev->name);
+
+ netdev_warn(dev, "Transmit timed out!\n");
dump_status(dev);
dev->stats.tx_errors++;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
/* Issue TX_RESET and TX_START commands. */
tc589_wait_for_completion(dev, TxReset);
outw(TxEnable, ioaddr + EL3_CMD);
@@ -555,19 +566,18 @@ static void pop_tx_status(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
int i;
-
+
/* Clear the Tx status stack. */
for (i = 32; i > 0; i--) {
u_char tx_status = inb(ioaddr + TX_STATUS);
if (!(tx_status & 0x84)) break;
/* reset transmitter on jabber error or underrun */
if (tx_status & 0x30)
- tc589_wait_for_completion(dev, TxReset);
+ tc589_wait_for_completion(dev, TxReset);
if (tx_status & 0x38) {
- pr_debug("%s: transmit error: status 0x%02x\n",
- dev->name, tx_status);
- outw(TxEnable, ioaddr + EL3_CMD);
- dev->stats.tx_aborted_errors++;
+ netdev_dbg(dev, "transmit error: status 0x%02x\n", tx_status);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ dev->stats.tx_aborted_errors++;
}
outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
}
@@ -580,11 +590,10 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
struct el3_private *priv = netdev_priv(dev);
unsigned long flags;
- pr_debug("%s: el3_start_xmit(length = %ld) called, "
- "status %4.4x.\n", dev->name, (long)skb->len,
- inw(ioaddr + EL3_STATUS));
+ netdev_dbg(dev, "el3_start_xmit(length = %ld) called, status %4.4x.\n",
+ (long)skb->len, inw(ioaddr + EL3_STATUS));
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->lock, flags);
dev->stats.tx_bytes += skb->len;
@@ -594,7 +603,6 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
/* ... and the packet rounded to a doubleword. */
outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
- dev->trans_start = jiffies;
if (inw(ioaddr + TX_FREE) <= 1536) {
netif_stop_queue(dev);
/* Interrupt us when the FIFO has room for max-sized packet. */
@@ -602,9 +610,9 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
}
pop_tx_status(dev);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->lock, flags);
dev_kfree_skb(skb);
-
+
return NETDEV_TX_OK;
}
@@ -616,37 +624,32 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
unsigned int ioaddr;
__u16 status;
int i = 0, handled = 1;
-
+
if (!netif_device_present(dev))
return IRQ_NONE;
ioaddr = dev->base_addr;
- pr_debug("%s: interrupt, status %4.4x.\n",
- dev->name, inw(ioaddr + EL3_STATUS));
+ netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS));
- spin_lock(&lp->lock);
+ spin_lock(&lp->lock);
while ((status = inw(ioaddr + EL3_STATUS)) &
(IntLatch | RxComplete | StatsFull)) {
if ((status & 0xe000) != 0x2000) {
- pr_debug("%s: interrupt from dead card\n", dev->name);
- handled = 0;
- break;
+ netdev_dbg(dev, "interrupt from dead card\n");
+ handled = 0;
+ break;
}
-
if (status & RxComplete)
- el3_rx(dev);
-
+ el3_rx(dev);
if (status & TxAvailable) {
- pr_debug(" TX room bit was handled.\n");
- /* There's room in the FIFO for a full-sized packet. */
- outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
- netif_wake_queue(dev);
+ netdev_dbg(dev, " TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
}
-
if (status & TxComplete)
- pop_tx_status(dev);
-
+ pop_tx_status(dev);
if (status & (AdapterFailure | RxEarly | StatsFull)) {
/* Handle all uncommon interrupts. */
if (status & StatsFull) /* Empty statistics. */
@@ -660,8 +663,8 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
EL3WINDOW(4);
fifo_diag = inw(ioaddr + 4);
EL3WINDOW(1);
- printk(KERN_WARNING "%s: adapter failure, FIFO diagnostic"
- " register %04x.\n", dev->name, fifo_diag);
+ netdev_warn(dev, "adapter failure, FIFO diagnostic register %04x.\n",
+ fifo_diag);
if (fifo_diag & 0x0400) {
/* Tx overrun */
tc589_wait_for_completion(dev, TxReset);
@@ -676,22 +679,20 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
}
}
-
if (++i > 10) {
- printk(KERN_ERR "%s: infinite loop in interrupt, "
- "status %4.4x.\n", dev->name, status);
- /* Clear all interrupts */
- outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
- break;
+ netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n",
+ status);
+ /* Clear all interrupts */
+ outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
+ break;
}
/* Acknowledge the IRQ. */
outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
}
-
lp->last_irq = jiffies;
- spin_unlock(&lp->lock);
- pr_debug("%s: exiting interrupt, status %4.4x.\n",
- dev->name, inw(ioaddr + EL3_STATUS));
+ spin_unlock(&lp->lock);
+ netdev_dbg(dev, "exiting interrupt, status %4.4x.\n",
+ inw(ioaddr + EL3_STATUS));
return IRQ_RETVAL(handled);
}
@@ -710,7 +711,7 @@ static void media_check(unsigned long arg)
if ((inw(ioaddr + EL3_STATUS) & IntLatch) &&
(inb(ioaddr + EL3_TIMER) == 0xff)) {
if (!lp->fast_poll)
- printk(KERN_WARNING "%s: interrupt(s) dropped!\n", dev->name);
+ netdev_warn(dev, "interrupt(s) dropped!\n");
local_irq_save(flags);
el3_interrupt(dev->irq, dev);
@@ -727,7 +728,7 @@ static void media_check(unsigned long arg)
/* lp->lock guards the EL3 window. Window should always be 1 except
when the lock is held */
- spin_lock_irqsave(&lp->lock, flags);
+ spin_lock_irqsave(&lp->lock, flags);
EL3WINDOW(4);
media = inw(ioaddr+WN4_MEDIA) & 0xc810;
@@ -747,32 +748,30 @@ static void media_check(unsigned long arg)
if (media != lp->media_status) {
if ((media & lp->media_status & 0x8000) &&
((lp->media_status ^ media) & 0x0800))
- printk(KERN_INFO "%s: %s link beat\n", dev->name,
- (lp->media_status & 0x0800 ? "lost" : "found"));
+ netdev_info(dev, "%s link beat\n",
+ (lp->media_status & 0x0800 ? "lost" : "found"));
else if ((media & lp->media_status & 0x4000) &&
((lp->media_status ^ media) & 0x0010))
- printk(KERN_INFO "%s: coax cable %s\n", dev->name,
- (lp->media_status & 0x0010 ? "ok" : "problem"));
+ netdev_info(dev, "coax cable %s\n",
+ (lp->media_status & 0x0010 ? "ok" : "problem"));
if (dev->if_port == 0) {
if (media & 0x8000) {
if (media & 0x0800)
- printk(KERN_INFO "%s: flipped to 10baseT\n",
- dev->name);
+ netdev_info(dev, "flipped to 10baseT\n");
else
- tc589_set_xcvr(dev, 2);
+ tc589_set_xcvr(dev, 2);
} else if (media & 0x4000) {
if (media & 0x0010)
tc589_set_xcvr(dev, 1);
else
- printk(KERN_INFO "%s: flipped to 10base2\n",
- dev->name);
+ netdev_info(dev, "flipped to 10base2\n");
}
}
lp->media_status = media;
}
-
+
EL3WINDOW(1);
- spin_unlock_irqrestore(&lp->lock, flags);
+ spin_unlock_irqrestore(&lp->lock, flags);
reschedule:
lp->media.expires = jiffies + HZ;
@@ -786,7 +785,7 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev)
struct pcmcia_device *link = lp->p_dev;
if (pcmcia_dev_present(link)) {
- spin_lock_irqsave(&lp->lock, flags);
+ spin_lock_irqsave(&lp->lock, flags);
update_stats(dev);
spin_unlock_irqrestore(&lp->lock, flags);
}
@@ -798,21 +797,21 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev)
single-threaded if the device is active. This is expected to be a rare
operation, and it's simpler for the rest of the driver to assume that
window 1 is always valid rather than use a special window-state variable.
-
+
Caller must hold the lock for this
*/
static void update_stats(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
- pr_debug("%s: updating the statistics.\n", dev->name);
+ netdev_dbg(dev, "updating the statistics.\n");
/* Turn off statistics updates while reading. */
outw(StatsDisable, ioaddr + EL3_CMD);
/* Switch to the stats window, and read everything. */
EL3WINDOW(6);
- dev->stats.tx_carrier_errors += inb(ioaddr + 0);
+ dev->stats.tx_carrier_errors += inb(ioaddr + 0);
dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
- /* Multiple collisions. */ inb(ioaddr + 2);
+ /* Multiple collisions. */ inb(ioaddr + 2);
dev->stats.collisions += inb(ioaddr + 3);
dev->stats.tx_window_errors += inb(ioaddr + 4);
dev->stats.rx_fifo_errors += inb(ioaddr + 5);
@@ -821,7 +820,7 @@ static void update_stats(struct net_device *dev)
/* Tx deferrals */ inb(ioaddr + 8);
/* Rx octets */ inw(ioaddr + 10);
/* Tx octets */ inw(ioaddr + 12);
-
+
/* Back to window 1, and turn statistics back on. */
EL3WINDOW(1);
outw(StatsEnable, ioaddr + EL3_CMD);
@@ -832,9 +831,9 @@ static int el3_rx(struct net_device *dev)
unsigned int ioaddr = dev->base_addr;
int worklimit = 32;
short rx_status;
-
- pr_debug("%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n",
- dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
+
+ netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
worklimit > 0) {
worklimit--;
@@ -852,11 +851,11 @@ static int el3_rx(struct net_device *dev)
} else {
short pkt_len = rx_status & 0x7ff;
struct sk_buff *skb;
-
+
skb = dev_alloc_skb(pkt_len+5);
-
- pr_debug(" Receiving packet size %d status %4.4x.\n",
- pkt_len, rx_status);
+
+ netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
if (skb != NULL) {
skb_reserve(skb, 2);
insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
@@ -866,8 +865,8 @@ static int el3_rx(struct net_device *dev)
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
} else {
- pr_debug("%s: couldn't allocate a sk_buff of"
- " size %d.\n", dev->name, pkt_len);
+ netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n",
+ pkt_len);
dev->stats.rx_dropped++;
}
}
@@ -875,7 +874,7 @@ static int el3_rx(struct net_device *dev)
tc589_wait_for_completion(dev, RxDiscard);
}
if (worklimit == 0)
- printk(KERN_WARNING "%s: too much work in el3_rx!\n", dev->name);
+ netdev_warn(dev, "too much work in el3_rx!\n");
return 0;
}
@@ -906,17 +905,17 @@ static int el3_close(struct net_device *dev)
struct el3_private *lp = netdev_priv(dev);
struct pcmcia_device *link = lp->p_dev;
unsigned int ioaddr = dev->base_addr;
-
+
dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
if (pcmcia_dev_present(link)) {
/* Turn off statistics ASAP. We update dev->stats below. */
outw(StatsDisable, ioaddr + EL3_CMD);
-
+
/* Disable the receiver and transmitter. */
outw(RxDisable, ioaddr + EL3_CMD);
outw(TxDisable, ioaddr + EL3_CMD);
-
+
if (dev->if_port == 2)
/* Turn off thinnet power. Green! */
outw(StopCoax, ioaddr + EL3_CMD);
@@ -925,12 +924,12 @@ static int el3_close(struct net_device *dev)
EL3WINDOW(4);
outw(0, ioaddr + WN4_MEDIA);
}
-
+
/* Switching back to window 0 disables the IRQ. */
EL3WINDOW(0);
/* But we explicitly zero the IRQ line select anyway. */
outw(0x0f00, ioaddr + WN0_IRQ);
-
+
/* Check if the card still exists */
if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000)
update_stats(dev);
@@ -939,7 +938,7 @@ static int el3_close(struct net_device *dev)
link->open--;
netif_stop_queue(dev);
del_timer_sync(&lp->media);
-
+
return 0;
}
@@ -961,7 +960,7 @@ static struct pcmcia_driver tc589_driver = {
},
.probe = tc589_probe,
.remove = tc589_detach,
- .id_table = tc589_ids,
+ .id_table = tc589_ids,
.suspend = tc589_suspend,
.resume = tc589_resume,
};
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 9f3d593f14e..5b3dfb4ab27 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -113,7 +113,6 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id);
typedef struct axnet_dev_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
caddr_t base;
struct timer_list watchdog;
int stale, fast_poll;
@@ -168,7 +167,6 @@ static int axnet_probe(struct pcmcia_device *link)
info = PRIV(dev);
info->p_dev = link;
link->priv = dev;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -195,8 +193,7 @@ static void axnet_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "axnet_detach(0x%p)\n", link);
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
axnet_release(link);
@@ -265,12 +262,9 @@ static int try_io_port(struct pcmcia_device *link)
int j, ret;
if (link->io.NumPorts1 == 32) {
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- if (link->io.NumPorts2 > 0) {
- /* for master/slave multifunction cards */
+ /* for master/slave multifunction cards */
+ if (link->io.NumPorts2 > 0)
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
- link->irq.Attributes =
- IRQ_TYPE_DYNAMIC_SHARING;
- }
} else {
/* This should be two 16-port windows */
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
@@ -336,8 +330,7 @@ static int axnet_config(struct pcmcia_device *link)
if (ret != 0)
goto failed;
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
+ if (!link->irq)
goto failed;
if (link->io.NumPorts2 == 8) {
@@ -349,7 +342,7 @@ static int axnet_config(struct pcmcia_device *link)
if (ret)
goto failed;
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
if (!get_prom(link)) {
@@ -397,17 +390,13 @@ static int axnet_config(struct pcmcia_device *link)
}
info->phy_id = (i < 32) ? i : -1;
- link->dev_node = &info->node;
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
printk(KERN_NOTICE "axnet_cs: register_netdev() failed\n");
- link->dev_node = NULL;
goto failed;
}
- strcpy(info->node.dev_name, dev->name);
-
printk(KERN_INFO "%s: Asix AX88%d90: io %#3lx, irq %d, "
"hw_addr %pM\n",
dev->name, ((info->flags & IS_AX88790) ? 7 : 1),
@@ -1005,7 +994,7 @@ static void axnet_tx_timeout(struct net_device *dev)
{
long e8390_base = dev->base_addr;
struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
- int txsr, isr, tickssofar = jiffies - dev->trans_start;
+ int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
unsigned long flags;
dev->stats.tx_errors++;
@@ -1510,8 +1499,6 @@ static void ei_receive(struct net_device *dev)
ei_local->current_page = next_frame;
outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
}
-
- return;
}
/**
@@ -1622,11 +1609,11 @@ static struct net_device_stats *get_stats(struct net_device *dev)
static inline void make_mc_bits(u8 *bits, struct net_device *dev)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
u32 crc;
- netdev_for_each_mc_addr(dmi, dev) {
- crc = ether_crc(ETH_ALEN, dmi->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc(ETH_ALEN, ha->addr);
/*
* The 8390 uses the 6 most significant bits of the
* CRC to index the multicast table.
diff --git a/drivers/net/pcmcia/com20020_cs.c b/drivers/net/pcmcia/com20020_cs.c
index 21d9c9d815d..5643f94541b 100644
--- a/drivers/net/pcmcia/com20020_cs.c
+++ b/drivers/net/pcmcia/com20020_cs.c
@@ -122,7 +122,6 @@ static void com20020_detach(struct pcmcia_device *p_dev);
typedef struct com20020_dev_t {
struct net_device *dev;
- dev_node_t node;
} com20020_dev_t;
/*======================================================================
@@ -163,7 +162,6 @@ static int com20020_probe(struct pcmcia_device *p_dev)
p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
p_dev->io.NumPorts1 = 16;
p_dev->io.IOAddrLines = 16;
- p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
p_dev->conf.Attributes = CONF_ENABLE_IRQ;
p_dev->conf.IntType = INT_MEMORY_AND_IO;
@@ -196,18 +194,16 @@ static void com20020_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "com20020_detach\n");
- if (link->dev_node) {
- dev_dbg(&link->dev, "unregister...\n");
+ dev_dbg(&link->dev, "unregister...\n");
- unregister_netdev(dev);
+ unregister_netdev(dev);
- /*
- * this is necessary because we register our IRQ separately
- * from card services.
- */
- if (dev->irq)
+ /*
+ * this is necessary because we register our IRQ separately
+ * from card services.
+ */
+ if (dev->irq)
free_irq(dev->irq, dev);
- }
com20020_release(link);
@@ -275,15 +271,14 @@ static int com20020_config(struct pcmcia_device *link)
dev_dbg(&link->dev, "got ioaddr %Xh\n", ioaddr);
dev_dbg(&link->dev, "request IRQ %d\n",
- link->irq.AssignedIRQ);
- i = pcmcia_request_irq(link, &link->irq);
- if (i != 0)
+ link->irq);
+ if (!link->irq)
{
dev_dbg(&link->dev, "requestIRQ failed totally!\n");
goto failed;
}
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
@@ -299,7 +294,6 @@ static int com20020_config(struct pcmcia_device *link)
lp->card_name = "PCMCIA COM20020";
lp->card_flags = ARC_CAN_10MBIT; /* pretend all of them can 10Mbit */
- link->dev_node = &info->node;
SET_NETDEV_DEV(dev, &link->dev);
i = com20020_found(dev, 0); /* calls register_netdev */
@@ -307,12 +301,9 @@ static int com20020_config(struct pcmcia_device *link)
if (i != 0) {
dev_printk(KERN_NOTICE, &link->dev,
"com20020_cs: com20020_found() failed\n");
- link->dev_node = NULL;
goto failed;
}
- strcpy(info->node.dev_name, dev->name);
-
dev_dbg(&link->dev,KERN_INFO "%s: port %#3lx, irq %d\n",
dev->name, dev->base_addr, dev->irq);
return 0;
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index b9dc80b9d04..7c27c50211a 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -110,7 +110,6 @@ typedef enum { MBH10302, MBH10304, TDK, CONTEC, LA501, UNGERMANN,
*/
typedef struct local_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
long open_time;
uint tx_started:1;
uint tx_queue;
@@ -254,10 +253,6 @@ static int fmvj18x_probe(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
link->io.IOAddrLines = 5;
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = fjn_interrupt;
-
/* General socket configuration */
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -278,8 +273,7 @@ static void fmvj18x_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "fmvj18x_detach\n");
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
fmvj18x_release(link);
@@ -425,8 +419,6 @@ static int fmvj18x_config(struct pcmcia_device *link)
}
if (link->io.NumPorts2 != 0) {
- link->irq.Attributes =
- IRQ_TYPE_DYNAMIC_SHARING;
ret = mfc_try_io_port(link);
if (ret != 0) goto failed;
} else if (cardtype == UNGERMANN) {
@@ -437,14 +429,14 @@ static int fmvj18x_config(struct pcmcia_device *link)
if (ret)
goto failed;
}
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_irq(link, fjn_interrupt);
if (ret)
goto failed;
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
goto failed;
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
if (link->io.BasePort2 != 0) {
@@ -529,17 +521,13 @@ static int fmvj18x_config(struct pcmcia_device *link)
}
lp->cardtype = cardtype;
- link->dev_node = &lp->node;
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
printk(KERN_NOTICE "fmvj18x_cs: register_netdev() failed\n");
- link->dev_node = NULL;
goto failed;
}
- strcpy(lp->node.dev_name, dev->name);
-
/* print current configuration */
printk(KERN_INFO "%s: %s, sram %s, port %#3lx, irq %d, "
"hw_addr %pM\n",
@@ -890,7 +878,6 @@ static netdev_tx_t fjn_start_xmit(struct sk_buff *skb,
lp->sent = lp->tx_queue ;
lp->tx_queue = 0;
lp->tx_queue_len = 0;
- dev->trans_start = jiffies;
lp->tx_started = 1;
netif_start_queue(dev);
} else {
@@ -1082,8 +1069,6 @@ static void fjn_rx(struct net_device *dev)
"%d ticks.\n", dev->name, inb(ioaddr + RX_MODE), i);
}
*/
-
- return;
} /* fjn_rx */
/*====================================================================*/
@@ -1196,11 +1181,11 @@ static void set_rx_mode(struct net_device *dev)
memset(mc_filter, 0x00, sizeof(mc_filter));
outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- unsigned int bit = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ unsigned int bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit >> 3] |= (1 << (bit & 7));
}
outb(2, ioaddr + RX_MODE); /* Use normal mode. */
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
index 37f4a6fdc3e..67ee9851a8e 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -104,7 +104,6 @@ static void ibmtr_detach(struct pcmcia_device *p_dev);
typedef struct ibmtr_dev_t {
struct pcmcia_device *p_dev;
struct net_device *dev;
- dev_node_t node;
window_handle_t sram_win_handle;
struct tok_info *ti;
} ibmtr_dev_t;
@@ -156,8 +155,6 @@ static int __devinit ibmtr_attach(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts1 = 4;
link->io.IOAddrLines = 16;
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
- link->irq.Handler = ibmtr_interrupt;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.Present = PRESENT_OPTION;
@@ -192,8 +189,7 @@ static void ibmtr_detach(struct pcmcia_device *link)
*/
ti->sram_phys |= 1;
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
del_timer_sync(&(ti->tr_timer));
@@ -238,11 +234,11 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
}
dev->base_addr = link->io.BasePort1;
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_exclusive_irq(link, ibmtr_interrupt);
if (ret)
goto failed;
- dev->irq = link->irq.AssignedIRQ;
- ti->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
+ ti->irq = link->irq;
ti->global_int_enable=GLOBAL_INT_ENABLE+((dev->irq==9) ? 2 : dev->irq);
/* Allocate the MMIO memory window */
@@ -291,18 +287,14 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
Adapters Technical Reference" SC30-3585 for this info. */
ibmtr_hw_setup(dev, mmiobase);
- link->dev_node = &info->node;
SET_NETDEV_DEV(dev, &link->dev);
i = ibmtr_probe_card(dev);
if (i != 0) {
printk(KERN_NOTICE "ibmtr_cs: register_netdev() failed\n");
- link->dev_node = NULL;
goto failed;
}
- strcpy(info->node.dev_name, dev->name);
-
printk(KERN_INFO
"%s: port %#3lx, irq %d, mmio %#5lx, sram %#5lx, hwaddr=%pM\n",
dev->name, dev->base_addr, dev->irq,
@@ -402,8 +394,6 @@ static void ibmtr_hw_setup(struct net_device *dev, u_int mmiobase)
/* 0x40 will release the card for use */
outb(0x40, dev->base_addr);
-
- return;
}
static struct pcmcia_device_id ibmtr_ids[] = {
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index c717b143f11..9b63dec549c 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -363,7 +363,6 @@ typedef struct _mace_statistics {
typedef struct _mace_private {
struct pcmcia_device *p_dev;
- dev_node_t node;
struct net_device_stats linux_stats; /* Linux statistics counters */
mace_statistics mace_stats; /* MACE chip statistics counters */
@@ -463,8 +462,6 @@ static int nmclan_probe(struct pcmcia_device *link)
link->io.NumPorts1 = 32;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
link->io.IOAddrLines = 5;
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
- link->irq.Handler = mace_interrupt;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.ConfigIndex = 1;
@@ -493,8 +490,7 @@ static void nmclan_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "nmclan_detach\n");
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
nmclan_release(link);
@@ -652,14 +648,14 @@ static int nmclan_config(struct pcmcia_device *link)
ret = pcmcia_request_io(link, &link->io);
if (ret)
goto failed;
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_exclusive_irq(link, mace_interrupt);
if (ret)
goto failed;
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
goto failed;
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
ioaddr = dev->base_addr;
@@ -698,18 +694,14 @@ static int nmclan_config(struct pcmcia_device *link)
else
printk(KERN_NOTICE "nmclan_cs: invalid if_port requested\n");
- link->dev_node = &lp->node;
SET_NETDEV_DEV(dev, &link->dev);
i = register_netdev(dev);
if (i != 0) {
printk(KERN_NOTICE "nmclan_cs: register_netdev() failed\n");
- link->dev_node = NULL;
goto failed;
}
- strcpy(lp->node.dev_name, dev->name);
-
printk(KERN_INFO "%s: nmclan: port %#3lx, irq %d, %s port,"
" hw_addr %pM\n",
dev->name, dev->base_addr, dev->irq, if_names[dev->if_port],
@@ -903,7 +895,7 @@ static void mace_tx_timeout(struct net_device *dev)
#else /* #if RESET_ON_TIMEOUT */
printk("NOT resetting card\n");
#endif /* #if RESET_ON_TIMEOUT */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -945,8 +937,6 @@ static netdev_tx_t mace_start_xmit(struct sk_buff *skb,
outb(skb->data[skb->len-1], ioaddr + AM2150_XMT);
}
- dev->trans_start = jiffies;
-
#if MULTI_TX
if (lp->tx_free_frames > 0)
netif_start_queue(dev);
@@ -1315,8 +1305,6 @@ static void update_stats(unsigned int ioaddr, struct net_device *dev)
lp->linux_stats.tx_fifo_errors = lp->mace_stats.uflo;
lp->linux_stats.tx_heartbeat_errors = lp->mace_stats.cerr;
/* lp->linux_stats.tx_window_errors; */
-
- return;
} /* update_stats */
/* ----------------------------------------------------------------------------
@@ -1475,7 +1463,7 @@ static void set_multicast_list(struct net_device *dev)
{
mace_private *lp = netdev_priv(dev);
int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
#ifdef PCMCIA_DEBUG
{
@@ -1495,8 +1483,8 @@ static void set_multicast_list(struct net_device *dev)
if (num_addrs > 0) {
/* Calculate multicast logical address filter */
memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN);
- netdev_for_each_mc_addr(dmi, dev) {
- memcpy(adr, dmi->dmi_addr, ETHER_ADDR_LEN);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(adr, ha->addr, ETHER_ADDR_LEN);
BuildLAF(lp->multicast_ladrf, adr);
}
}
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 4c0368de181..6f77a768ba8 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -208,7 +208,6 @@ static hw_info_t dl10022_info = { 0, 0, 0, 0, IS_DL10022|HAS_MII };
typedef struct pcnet_dev_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
u_int flags;
void __iomem *base;
struct timer_list watchdog;
@@ -264,7 +263,6 @@ static int pcnet_probe(struct pcmcia_device *link)
info->p_dev = link;
link->priv = dev;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -288,8 +286,7 @@ static void pcnet_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "pcnet_detach\n");
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
pcnet_release(link);
@@ -488,8 +485,6 @@ static int try_io_port(struct pcmcia_device *link)
if (link->io.NumPorts2 > 0) {
/* for master/slave multifunction cards */
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
- link->irq.Attributes =
- IRQ_TYPE_DYNAMIC_SHARING;
}
} else {
/* This should be two 16-port windows */
@@ -559,8 +554,7 @@ static int pcnet_config(struct pcmcia_device *link)
if (ret)
goto failed;
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
+ if (!link->irq)
goto failed;
if (link->io.NumPorts2 == 8) {
@@ -574,7 +568,7 @@ static int pcnet_config(struct pcmcia_device *link)
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
goto failed;
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
if (info->flags & HAS_MISC_REG) {
if ((if_port == 1) || (if_port == 2))
@@ -643,17 +637,13 @@ static int pcnet_config(struct pcmcia_device *link)
if (info->flags & (IS_DL10019|IS_DL10022))
mii_phy_probe(dev);
- link->dev_node = &info->node;
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
printk(KERN_NOTICE "pcnet_cs: register_netdev() failed\n");
- link->dev_node = NULL;
goto failed;
}
- strcpy(info->node.dev_name, dev->name);
-
if (info->flags & (IS_DL10019|IS_DL10022)) {
u_char id = inb(dev->base_addr + 0x1a);
printk(KERN_INFO "%s: NE2000 (DL100%d rev %02x): ",
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index ccc553782a0..7b6fe89f9db 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -103,7 +103,6 @@ struct smc_private {
u_short manfid;
u_short cardid;
- dev_node_t node;
struct sk_buff *saved_skb;
int packets_waiting;
void __iomem *base;
@@ -323,14 +322,11 @@ static int smc91c92_probe(struct pcmcia_device *link)
return -ENOMEM;
smc = netdev_priv(dev);
smc->p_dev = link;
- link->priv = dev;
spin_lock_init(&smc->lock);
link->io.NumPorts1 = 16;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
link->io.IOAddrLines = 4;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = &smc_interrupt;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -363,8 +359,7 @@ static void smc91c92_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "smc91c92_detach\n");
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
smc91c92_release(link);
@@ -453,7 +448,6 @@ static int mhz_mfc_config(struct pcmcia_device *link)
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status = CCSR_AUDIO_ENA;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->io.IOAddrLines = 16;
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts2 = 8;
@@ -652,7 +646,6 @@ static int osi_config(struct pcmcia_device *link)
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status = CCSR_AUDIO_ENA;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->io.NumPorts1 = 64;
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts2 = 8;
@@ -877,7 +870,7 @@ static int smc91c92_config(struct pcmcia_device *link)
if (i)
goto config_failed;
- i = pcmcia_request_irq(link, &link->irq);
+ i = pcmcia_request_irq(link, smc_interrupt);
if (i)
goto config_failed;
i = pcmcia_request_configuration(link, &link->conf);
@@ -887,7 +880,7 @@ static int smc91c92_config(struct pcmcia_device *link)
if (smc->manfid == MANFID_MOTOROLA)
mot_config(link);
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
if ((if_port >= 0) && (if_port <= 2))
dev->if_port = if_port;
@@ -960,17 +953,13 @@ static int smc91c92_config(struct pcmcia_device *link)
SMC_SELECT_BANK(0);
}
- link->dev_node = &smc->node;
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
printk(KERN_ERR "smc91c92_cs: register_netdev() failed\n");
- link->dev_node = NULL;
goto config_undo;
}
- strcpy(smc->node.dev_name, dev->name);
-
printk(KERN_INFO "%s: smc91c%s rev %d: io %#3lx, irq %d, "
"hw_addr %pM\n",
dev->name, name, (rev & 0x0f), dev->base_addr, dev->irq,
@@ -1239,7 +1228,6 @@ static void smc_hardware_send_packet(struct net_device * dev)
dev_kfree_skb_irq(skb);
dev->trans_start = jiffies;
netif_start_queue(dev);
- return;
}
/*====================================================================*/
@@ -1254,7 +1242,7 @@ static void smc_tx_timeout(struct net_device *dev)
dev->name, inw(ioaddr)&0xff, inw(ioaddr + 2));
dev->stats.tx_errors++;
smc_reset(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
smc->saved_skb = NULL;
netif_wake_queue(dev);
}
@@ -1369,7 +1357,6 @@ static void smc_tx_err(struct net_device * dev)
smc->packets_waiting--;
outw(saved_packet, ioaddr + PNR_ARR);
- return;
}
/*====================================================================*/
@@ -1589,8 +1576,6 @@ static void smc_rx(struct net_device *dev)
}
/* Let the MMU free the memory of this packet. */
outw(MC_RELEASE, ioaddr + MMU_CMD);
-
- return;
}
/*======================================================================
@@ -1621,10 +1606,10 @@ static void set_rx_mode(struct net_device *dev)
rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti;
else {
if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *mc_addr;
+ struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(mc_addr, dev) {
- u_int position = ether_crc(6, mc_addr->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ u_int position = ether_crc(6, ha->addr);
multicast_table[position >> 29] |= 1 << ((position >> 26) & 7);
}
}
@@ -1640,8 +1625,6 @@ static void set_rx_mode(struct net_device *dev)
outw(rx_cfg_setting, ioaddr + RCR);
SMC_SELECT_BANK(2);
spin_unlock_irqrestore(&smc->lock, flags);
-
- return;
}
/*======================================================================
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 4d1802e457b..b6c3644888c 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -297,31 +297,9 @@ static void xirc2ps_detach(struct pcmcia_device *p_dev);
static irqreturn_t xirc2ps_interrupt(int irq, void *dev_id);
-/****************
- * A linked list of "instances" of the device. Each actual
- * PCMCIA card corresponds to one device instance, and is described
- * by one struct pcmcia_device structure (defined in ds.h).
- *
- * You may not want to use a linked list for this -- for example, the
- * memory card driver uses an array of struct pcmcia_device pointers, where minor
- * device numbers are used to derive the corresponding array index.
- */
-
-/****************
- * A driver needs to provide a dev_node_t structure for each device
- * on a card. In some cases, there is only one device per card (for
- * example, ethernet cards, modems). In other cases, there may be
- * many actual or logical devices (SCSI adapters, memory cards with
- * multiple partitions). The dev_node_t structures need to be kept
- * in a linked list starting at the 'dev' field of a struct pcmcia_device
- * structure. We allocate them in the card's private data structure,
- * because they generally can't be allocated dynamically.
- */
-
typedef struct local_info_t {
struct net_device *dev;
struct pcmcia_device *p_dev;
- dev_node_t node;
int card_type;
int probe_port;
@@ -555,7 +533,6 @@ xirc2ps_probe(struct pcmcia_device *link)
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.ConfigIndex = 1;
- link->irq.Handler = xirc2ps_interrupt;
/* Fill in card specific entries */
dev->netdev_ops = &netdev_ops;
@@ -580,8 +557,7 @@ xirc2ps_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "detach\n");
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
xirc2ps_release(link);
@@ -841,7 +817,6 @@ xirc2ps_config(struct pcmcia_device * link)
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status |= CCSR_AUDIO_ENA;
}
- link->irq.Attributes |= IRQ_TYPE_DYNAMIC_SHARING;
link->io.NumPorts2 = 8;
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
if (local->dingo) {
@@ -866,7 +841,6 @@ xirc2ps_config(struct pcmcia_device * link)
}
printk(KNOT_XIRC "no ports available\n");
} else {
- link->irq.Attributes |= IRQ_TYPE_DYNAMIC_SHARING;
link->io.NumPorts1 = 16;
for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) {
link->io.BasePort1 = ioaddr;
@@ -885,7 +859,7 @@ xirc2ps_config(struct pcmcia_device * link)
* Now allocate an interrupt line. Note that this does not
* actually assign a handler to the interrupt.
*/
- if ((err=pcmcia_request_irq(link, &link->irq)))
+ if ((err=pcmcia_request_irq(link, xirc2ps_interrupt)))
goto config_error;
/****************
@@ -982,23 +956,19 @@ xirc2ps_config(struct pcmcia_device * link)
printk(KNOT_XIRC "invalid if_port requested\n");
/* we can now register the device with the net subsystem */
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
if (local->dingo)
do_reset(dev, 1); /* a kludge to make the cem56 work */
- link->dev_node = &local->node;
SET_NETDEV_DEV(dev, &link->dev);
if ((err=register_netdev(dev))) {
printk(KNOT_XIRC "register_netdev() failed\n");
- link->dev_node = NULL;
goto config_error;
}
- strcpy(local->node.dev_name, dev->name);
-
/* give some infos about the hardware */
printk(KERN_INFO "%s: %s: port %#3lx, irq %d, hwaddr %pM\n",
dev->name, local->manf_str,(u_long)dev->base_addr, (int)dev->irq,
@@ -1295,7 +1265,7 @@ xirc2ps_tx_timeout_task(struct work_struct *work)
struct net_device *dev = local->dev;
/* reset the card */
do_reset(dev,1);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -1358,7 +1328,6 @@ do_start_xmit(struct sk_buff *skb, struct net_device *dev)
PutByte(XIRCREG_CR, TransmitPacket|EnableIntr);
dev_kfree_skb (skb);
- dev->trans_start = jiffies;
dev->stats.tx_bytes += pktlen;
netif_start_queue(dev);
return NETDEV_TX_OK;
@@ -1398,7 +1367,7 @@ static void set_addresses(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
local_info_t *lp = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
struct set_address_info sa_info;
int i;
@@ -1413,10 +1382,10 @@ static void set_addresses(struct net_device *dev)
set_address(&sa_info, dev->dev_addr);
i = 0;
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (i++ == 9)
break;
- set_address(&sa_info, dmi->dmi_addr);
+ set_address(&sa_info, ha->addr);
}
while (i++ < 9)
set_address(&sa_info, dev->dev_addr);
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 084d78dd163..c200c282173 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -448,7 +448,7 @@ static void pcnet32_netif_stop(struct net_device *dev)
{
struct pcnet32_private *lp = netdev_priv(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
napi_disable(&lp->napi);
netif_tx_disable(dev);
}
@@ -647,7 +647,6 @@ free_new_rx_ring:
(1 << size),
new_rx_ring,
new_ring_dma_addr);
- return;
}
static void pcnet32_purge_rx_ring(struct net_device *dev)
@@ -1215,7 +1214,6 @@ static void pcnet32_rx_entry(struct net_device *dev,
skb->protocol = eth_type_trans(skb, dev);
netif_receive_skb(skb);
dev->stats.rx_packets++;
- return;
}
static int pcnet32_rx(struct net_device *dev, int budget)
@@ -2398,7 +2396,7 @@ static void pcnet32_tx_timeout(struct net_device *dev)
}
pcnet32_restart(dev, CSR0_NORMAL);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
spin_unlock_irqrestore(&lp->lock, flags);
@@ -2449,8 +2447,6 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
/* Trigger an immediate send poll. */
lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
- dev->trans_start = jiffies;
-
if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
lp->tx_full = 1;
netif_stop_queue(dev);
@@ -2590,7 +2586,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
struct pcnet32_private *lp = netdev_priv(dev);
volatile struct pcnet32_init_block *ib = lp->init_block;
volatile __le16 *mcast_table = (__le16 *)ib->filter;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned long ioaddr = dev->base_addr;
char *addrs;
int i;
@@ -2611,8 +2607,8 @@ static void pcnet32_load_multicast(struct net_device *dev)
ib->filter[1] = 0;
/* Add addresses */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
/* multicast address? */
if (!(*addrs & 1))
@@ -2625,7 +2621,6 @@ static void pcnet32_load_multicast(struct net_device *dev)
for (i = 0; i < 4; i++)
lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
le16_to_cpu(mcast_table[i]));
- return;
}
/*
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index 4fed95e8350..c1281567983 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -130,3 +130,11 @@ static void __exit bcm63xx_phy_exit(void)
module_init(bcm63xx_phy_init);
module_exit(bcm63xx_phy_exit);
+
+static struct mdio_device_id bcm63xx_tbl[] = {
+ { 0x00406000, 0xfffffc00 },
+ { 0x002bdc00, 0xfffffc00 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, bcm63xx_tbl);
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index f482fc4f8cf..cecdbbd549e 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -908,3 +908,19 @@ static void __exit broadcom_exit(void)
module_init(broadcom_init);
module_exit(broadcom_exit);
+
+static struct mdio_device_id broadcom_tbl[] = {
+ { 0x00206070, 0xfffffff0 },
+ { 0x002060e0, 0xfffffff0 },
+ { 0x002060c0, 0xfffffff0 },
+ { 0x002060b0, 0xfffffff0 },
+ { 0x0143bca0, 0xfffffff0 },
+ { 0x0143bcb0, 0xfffffff0 },
+ { PHY_ID_BCM50610, 0xfffffff0 },
+ { PHY_ID_BCM50610M, 0xfffffff0 },
+ { PHY_ID_BCM57780, 0xfffffff0 },
+ { PHY_ID_BCMAC131, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, broadcom_tbl);
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index 92282b31d94..1a325d63756 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -158,3 +158,11 @@ static void __exit cicada_exit(void)
module_init(cicada_init);
module_exit(cicada_exit);
+
+static struct mdio_device_id cicada_tbl[] = {
+ { 0x000fc410, 0x000ffff0 },
+ { 0x000fc440, 0x000fffc0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, cicada_tbl);
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index c722e95853f..29c17617a2e 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -218,3 +218,12 @@ static void __exit davicom_exit(void)
module_init(davicom_init);
module_exit(davicom_exit);
+
+static struct mdio_device_id davicom_tbl[] = {
+ { 0x0181b880, 0x0ffffff0 },
+ { 0x0181b8a0, 0x0ffffff0 },
+ { 0x00181b80, 0x0ffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, davicom_tbl);
diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c
index 7712ebeba9b..13995f52d6a 100644
--- a/drivers/net/phy/et1011c.c
+++ b/drivers/net/phy/et1011c.c
@@ -110,3 +110,10 @@ static void __exit et1011c_exit(void)
module_init(et1011c_init);
module_exit(et1011c_exit);
+
+static struct mdio_device_id et1011c_tbl[] = {
+ { 0x0282f014, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, et1011c_tbl);
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 904208b95d4..439adafeacb 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -131,3 +131,10 @@ static void __exit ip175c_exit(void)
module_init(ip175c_init);
module_exit(ip175c_exit);
+
+static struct mdio_device_id icplus_tbl[] = {
+ { 0x02430d80, 0x0ffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, icplus_tbl);
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 057ecaacde6..8ee929b796d 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -173,3 +173,11 @@ static void __exit lxt_exit(void)
module_init(lxt_init);
module_exit(lxt_exit);
+
+static struct mdio_device_id lxt_tbl[] = {
+ { 0x78100000, 0xfffffff0 },
+ { 0x001378e0, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, lxt_tbl);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 64c7fbe0a8e..78b74e83ce5 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -648,3 +648,16 @@ static void __exit marvell_exit(void)
module_init(marvell_init);
module_exit(marvell_exit);
+
+static struct mdio_device_id marvell_tbl[] = {
+ { 0x01410c60, 0xfffffff0 },
+ { 0x01410c90, 0xfffffff0 },
+ { 0x01410cc0, 0xfffffff0 },
+ { 0x01410e10, 0xfffffff0 },
+ { 0x01410cb0, 0xfffffff0 },
+ { 0x01410cd0, 0xfffffff0 },
+ { 0x01410e30, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, marvell_tbl);
diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
index 19e70d7e27a..65391891d8c 100644
--- a/drivers/net/phy/mdio-bitbang.c
+++ b/drivers/net/phy/mdio-bitbang.c
@@ -22,8 +22,13 @@
#include <linux/types.h>
#include <linux/delay.h>
-#define MDIO_READ 1
-#define MDIO_WRITE 0
+#define MDIO_READ 2
+#define MDIO_WRITE 1
+
+#define MDIO_C45 (1<<15)
+#define MDIO_C45_ADDR (MDIO_C45 | 0)
+#define MDIO_C45_READ (MDIO_C45 | 3)
+#define MDIO_C45_WRITE (MDIO_C45 | 1)
#define MDIO_SETUP_TIME 10
#define MDIO_HOLD_TIME 10
@@ -89,7 +94,7 @@ static u16 mdiobb_get_num(struct mdiobb_ctrl *ctrl, int bits)
/* Utility to send the preamble, address, and
* register (common to read and write).
*/
-static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int read, u8 phy, u8 reg)
+static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int op, u8 phy, u8 reg)
{
const struct mdiobb_ops *ops = ctrl->ops;
int i;
@@ -108,23 +113,56 @@ static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int read, u8 phy, u8 reg)
for (i = 0; i < 32; i++)
mdiobb_send_bit(ctrl, 1);
- /* send the start bit (01) and the read opcode (10) or write (10) */
+ /* send the start bit (01) and the read opcode (10) or write (10).
+ Clause 45 operation uses 00 for the start and 11, 10 for
+ read/write */
mdiobb_send_bit(ctrl, 0);
- mdiobb_send_bit(ctrl, 1);
- mdiobb_send_bit(ctrl, read);
- mdiobb_send_bit(ctrl, !read);
+ if (op & MDIO_C45)
+ mdiobb_send_bit(ctrl, 0);
+ else
+ mdiobb_send_bit(ctrl, 1);
+ mdiobb_send_bit(ctrl, (op >> 1) & 1);
+ mdiobb_send_bit(ctrl, (op >> 0) & 1);
mdiobb_send_num(ctrl, phy, 5);
mdiobb_send_num(ctrl, reg, 5);
}
+/* In clause 45 mode all commands are prefixed by MDIO_ADDR to specify the
+ lower 16 bits of the 21 bit address. This transfer is done identically to a
+ MDIO_WRITE except for a different code. To enable clause 45 mode or
+ MII_ADDR_C45 into the address. Theoretically clause 45 and normal devices
+ can exist on the same bus. Normal devices should ignore the MDIO_ADDR
+ phase. */
+static int mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, u32 addr)
+{
+ unsigned int dev_addr = (addr >> 16) & 0x1F;
+ unsigned int reg = addr & 0xFFFF;
+ mdiobb_cmd(ctrl, MDIO_C45_ADDR, phy, dev_addr);
+
+ /* send the turnaround (10) */
+ mdiobb_send_bit(ctrl, 1);
+ mdiobb_send_bit(ctrl, 0);
+
+ mdiobb_send_num(ctrl, reg, 16);
+
+ ctrl->ops->set_mdio_dir(ctrl, 0);
+ mdiobb_get_bit(ctrl);
+
+ return dev_addr;
+}
static int mdiobb_read(struct mii_bus *bus, int phy, int reg)
{
struct mdiobb_ctrl *ctrl = bus->priv;
int ret, i;
- mdiobb_cmd(ctrl, MDIO_READ, phy, reg);
+ if (reg & MII_ADDR_C45) {
+ reg = mdiobb_cmd_addr(ctrl, phy, reg);
+ mdiobb_cmd(ctrl, MDIO_C45_READ, phy, reg);
+ } else
+ mdiobb_cmd(ctrl, MDIO_READ, phy, reg);
+
ctrl->ops->set_mdio_dir(ctrl, 0);
/* check the turnaround bit: the PHY should be driving it to zero */
@@ -147,7 +185,11 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
{
struct mdiobb_ctrl *ctrl = bus->priv;
- mdiobb_cmd(ctrl, MDIO_WRITE, phy, reg);
+ if (reg & MII_ADDR_C45) {
+ reg = mdiobb_cmd_addr(ctrl, phy, reg);
+ mdiobb_cmd(ctrl, MDIO_C45_WRITE, phy, reg);
+ } else
+ mdiobb_cmd(ctrl, MDIO_WRITE, phy, reg);
/* send the turnaround (10) */
mdiobb_send_bit(ctrl, 1);
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 35897134a5d..fc5fef2a817 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -199,12 +199,12 @@ static int __devinit mdio_ofgpio_probe(struct of_device *ofdev,
if (!pdata)
return -ENOMEM;
- ret = of_get_gpio(ofdev->node, 0);
+ ret = of_get_gpio(ofdev->dev.of_node, 0);
if (ret < 0)
goto out_free;
pdata->mdc = ret;
- ret = of_get_gpio(ofdev->node, 1);
+ ret = of_get_gpio(ofdev->dev.of_node, 1);
if (ret < 0)
goto out_free;
pdata->mdio = ret;
@@ -213,7 +213,7 @@ static int __devinit mdio_ofgpio_probe(struct of_device *ofdev,
if (!new_bus)
goto out_free;
- ret = of_mdiobus_register(new_bus, ofdev->node);
+ ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
if (ret)
mdio_gpio_bus_deinit(&ofdev->dev);
@@ -241,8 +241,11 @@ static struct of_device_id mdio_ofgpio_match[] = {
MODULE_DEVICE_TABLE(of, mdio_ofgpio_match);
static struct of_platform_driver mdio_ofgpio_driver = {
- .name = "mdio-gpio",
- .match_table = mdio_ofgpio_match,
+ .driver = {
+ .name = "mdio-gpio",
+ .owner = THIS_MODULE,
+ .of_match_table = mdio_ofgpio_match,
+ },
.probe = mdio_ofgpio_probe,
.remove = __devexit_p(mdio_ofgpio_remove),
};
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index e17b70291bb..6a6b8199a0d 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -208,7 +208,7 @@ EXPORT_SYMBOL(mdiobus_scan);
* because the bus read/write functions may wait for an interrupt
* to conclude the operation.
*/
-int mdiobus_read(struct mii_bus *bus, int addr, u16 regnum)
+int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
{
int retval;
@@ -233,7 +233,7 @@ EXPORT_SYMBOL(mdiobus_read);
* because the bus read/write functions may wait for an interrupt
* to conclude the operation.
*/
-int mdiobus_write(struct mii_bus *bus, int addr, u16 regnum, u16 val)
+int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
{
int err;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index e67691dca4a..0692f750c40 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -103,3 +103,12 @@ module_exit(ksphy_exit);
MODULE_DESCRIPTION("Micrel PHY driver");
MODULE_AUTHOR("David J. Choi");
MODULE_LICENSE("GPL");
+
+static struct mdio_device_id micrel_tbl[] = {
+ { PHY_ID_KSZ9021, 0x000fff10 },
+ { PHY_ID_VSC8201, 0x00fffff0 },
+ { PHY_ID_KS8001, 0x00fffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, micrel_tbl);
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 6c636eb7208..a73ba0bcc0c 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -97,7 +97,6 @@ static void ns_giga_speed_fallback(struct phy_device *phydev, int mode)
phy_write(phydev, NS_EXP_MEM_DATA, 0x0008);
phy_write(phydev, MII_BMCR, (bmcr & ~BMCR_PDOWN));
phy_write(phydev, LED_CTRL_REG, mode);
- return;
}
static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable)
@@ -110,8 +109,6 @@ static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable)
printk(KERN_DEBUG "DP83865 PHY: 10BASE-T HDX loopback %s\n",
(ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on");
-
- return;
}
static int ns_config_init(struct phy_device *phydev)
@@ -153,3 +150,10 @@ MODULE_LICENSE("GPL");
module_init(ns_init);
module_exit(ns_exit);
+
+static struct mdio_device_id ns_tbl[] = {
+ { DP83865_PHY_ID, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, ns_tbl);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index db1794546c5..1a99bb24410 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -149,6 +149,7 @@ EXPORT_SYMBOL(phy_scan_fixups);
struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
{
struct phy_device *dev;
+
/* We allocate the device, and initialize the
* default values */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -179,6 +180,17 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
mutex_init(&dev->lock);
INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine);
+ /* Request the appropriate module unconditionally; don't
+ bother trying to do so only if it isn't already loaded,
+ because that gets complicated. A hotplug event would have
+ done an unconditional modprobe anyway.
+ We don't do normal hotplug because it won't work for MDIO
+ -- because it relies on the device staying around for long
+ enough for the driver to get loaded. With MDIO, the NIC
+ driver will get bored and give up as soon as it finds that
+ there's no driver _already_ loaded. */
+ request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id));
+
return dev;
}
EXPORT_SYMBOL(phy_device_create);
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index f6e190f73c3..6736b23f1b2 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -137,3 +137,10 @@ static void __exit qs6612_exit(void)
module_init(qs6612_init);
module_exit(qs6612_exit);
+
+static struct mdio_device_id qs6612_tbl[] = {
+ { 0x00181440, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, qs6612_tbl);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index a052a6744a5..f567c0e1aaa 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -78,3 +78,10 @@ static void __exit realtek_exit(void)
module_init(realtek_init);
module_exit(realtek_exit);
+
+static struct mdio_device_id realtek_tbl[] = {
+ { 0x001cc912, 0x001fffff },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, realtek_tbl);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index ed2644a5750..78fa988256f 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -253,3 +253,14 @@ MODULE_LICENSE("GPL");
module_init(smsc_init);
module_exit(smsc_exit);
+
+static struct mdio_device_id smsc_tbl[] = {
+ { 0x0007c0a0, 0xfffffff0 },
+ { 0x0007c0b0, 0xfffffff0 },
+ { 0x0007c0c0, 0xfffffff0 },
+ { 0x0007c0d0, 0xfffffff0 },
+ { 0x0007c0f0, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, smsc_tbl);
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 6bdb0d53aaf..72290099e5e 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -132,6 +132,14 @@ static void __exit ste10Xp_exit(void)
module_init(ste10Xp_init);
module_exit(ste10Xp_exit);
+static struct mdio_device_id ste10Xp_tbl[] = {
+ { STE101P_PHY_ID, 0xfffffff0 },
+ { STE100P_PHY_ID, 0xffffffff },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, ste10Xp_tbl);
+
MODULE_DESCRIPTION("STMicroelectronics STe10Xp PHY driver");
MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index dd3b2447e85..45cce50a279 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -191,3 +191,11 @@ static void __exit vsc82xx_exit(void)
module_init(vsc82xx_init);
module_exit(vsc82xx_exit);
+
+static struct mdio_device_id vitesse_tbl[] = {
+ { PHY_ID_VSC8244, 0x000fffc0 },
+ { PHY_ID_VSC8221, 0x000ffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, vitesse_tbl);
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index 9a2103a69e1..ec0349e84a8 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -979,7 +979,6 @@ plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
printk(KERN_DEBUG "%s: send request\n", dev->name);
spin_lock_irq(&nl->lock);
- dev->trans_start = jiffies;
snd->skb = skb;
snd->length.h = skb->len;
snd->state = PLIP_PK_TRIGGER;
@@ -1192,8 +1191,6 @@ plip_wakeup(void *handle)
/* Clear the data port. */
write_data (dev, 0x00);
}
-
- return;
}
static int
@@ -1309,7 +1306,6 @@ err_parport_unregister:
parport_unregister_device(nl->pardev);
err_free_dev:
free_netdev(dev);
- return;
}
/* plip_detach() is called (by the parport code) when a port is
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 8518a2e58e5..c5f8eb102bf 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -2174,6 +2174,24 @@ int ppp_unit_number(struct ppp_channel *chan)
}
/*
+ * Return the PPP device interface name of a channel.
+ */
+char *ppp_dev_name(struct ppp_channel *chan)
+{
+ struct channel *pch = chan->ppp;
+ char *name = NULL;
+
+ if (pch) {
+ read_lock_bh(&pch->upl);
+ if (pch->ppp && pch->ppp->dev)
+ name = pch->ppp->dev->name;
+ read_unlock_bh(&pch->upl);
+ }
+ return name;
+}
+
+
+/*
* Disconnect a channel from the generic layer.
* This must be called in process context.
*/
@@ -2901,11 +2919,12 @@ EXPORT_SYMBOL(ppp_register_channel);
EXPORT_SYMBOL(ppp_unregister_channel);
EXPORT_SYMBOL(ppp_channel_index);
EXPORT_SYMBOL(ppp_unit_number);
+EXPORT_SYMBOL(ppp_dev_name);
EXPORT_SYMBOL(ppp_input);
EXPORT_SYMBOL(ppp_input_error);
EXPORT_SYMBOL(ppp_output_wakeup);
EXPORT_SYMBOL(ppp_register_compressor);
EXPORT_SYMBOL(ppp_unregister_compressor);
MODULE_LICENSE("GPL");
-MODULE_ALIAS_CHARDEV_MAJOR(PPP_MAJOR);
-MODULE_ALIAS("/dev/ppp");
+MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0);
+MODULE_ALIAS("devname:ppp");
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index cdd11ba100e..805b64d1e89 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -89,7 +89,6 @@
#define PPPOE_HASH_SIZE (1 << PPPOE_HASH_BITS)
#define PPPOE_HASH_MASK (PPPOE_HASH_SIZE - 1)
-static int pppoe_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb);
static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
@@ -258,7 +257,7 @@ static inline struct pppox_sock *get_item_by_addr(struct net *net,
dev = dev_get_by_name_rcu(net, sp->sa_addr.pppoe.dev);
if (dev) {
ifindex = dev->ifindex;
- pn = net_generic(net, pppoe_net_id);
+ pn = pppoe_pernet(net);
pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid,
sp->sa_addr.pppoe.remote, ifindex);
}
@@ -290,12 +289,7 @@ static void pppoe_flush_dev(struct net_device *dev)
struct pppoe_net *pn;
int i;
- BUG_ON(dev == NULL);
-
pn = pppoe_pernet(dev_net(dev));
- if (!pn) /* already freed */
- return;
-
write_lock_bh(&pn->hash_lock);
for (i = 0; i < PPPOE_HASH_SIZE; i++) {
struct pppox_sock *po = pn->hash_table[i];
@@ -368,7 +362,7 @@ static int pppoe_device_event(struct notifier_block *this,
default:
break;
- };
+ }
return NOTIFY_DONE;
}
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
deleted file mode 100644
index 449a9825200..00000000000
--- a/drivers/net/pppol2tp.c
+++ /dev/null
@@ -1,2680 +0,0 @@
-/*****************************************************************************
- * Linux PPP over L2TP (PPPoX/PPPoL2TP) Sockets
- *
- * PPPoX --- Generic PPP encapsulation socket family
- * PPPoL2TP --- PPP over L2TP (RFC 2661)
- *
- * Version: 1.0.0
- *
- * Authors: Martijn van Oosterhout <kleptog@svana.org>
- * James Chapman (jchapman@katalix.com)
- * Contributors:
- * Michal Ostrowski <mostrows@speakeasy.net>
- * Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
- * David S. Miller (davem@redhat.com)
- *
- * License:
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-/* This driver handles only L2TP data frames; control frames are handled by a
- * userspace application.
- *
- * To send data in an L2TP session, userspace opens a PPPoL2TP socket and
- * attaches it to a bound UDP socket with local tunnel_id / session_id and
- * peer tunnel_id / session_id set. Data can then be sent or received using
- * regular socket sendmsg() / recvmsg() calls. Kernel parameters of the socket
- * can be read or modified using ioctl() or [gs]etsockopt() calls.
- *
- * When a PPPoL2TP socket is connected with local and peer session_id values
- * zero, the socket is treated as a special tunnel management socket.
- *
- * Here's example userspace code to create a socket for sending/receiving data
- * over an L2TP session:-
- *
- * struct sockaddr_pppol2tp sax;
- * int fd;
- * int session_fd;
- *
- * fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OL2TP);
- *
- * sax.sa_family = AF_PPPOX;
- * sax.sa_protocol = PX_PROTO_OL2TP;
- * sax.pppol2tp.fd = tunnel_fd; // bound UDP socket
- * sax.pppol2tp.addr.sin_addr.s_addr = addr->sin_addr.s_addr;
- * sax.pppol2tp.addr.sin_port = addr->sin_port;
- * sax.pppol2tp.addr.sin_family = AF_INET;
- * sax.pppol2tp.s_tunnel = tunnel_id;
- * sax.pppol2tp.s_session = session_id;
- * sax.pppol2tp.d_tunnel = peer_tunnel_id;
- * sax.pppol2tp.d_session = peer_session_id;
- *
- * session_fd = connect(fd, (struct sockaddr *)&sax, sizeof(sax));
- *
- * A pppd plugin that allows PPP traffic to be carried over L2TP using
- * this driver is available from the OpenL2TP project at
- * http://openl2tp.sourceforge.net.
- */
-
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/list.h>
-#include <asm/uaccess.h>
-
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/kthread.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/jiffies.h>
-
-#include <linux/netdevice.h>
-#include <linux/net.h>
-#include <linux/inetdevice.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-#include <linux/ip.h>
-#include <linux/udp.h>
-#include <linux/if_pppox.h>
-#include <linux/if_pppol2tp.h>
-#include <net/sock.h>
-#include <linux/ppp_channel.h>
-#include <linux/ppp_defs.h>
-#include <linux/if_ppp.h>
-#include <linux/file.h>
-#include <linux/hash.h>
-#include <linux/sort.h>
-#include <linux/proc_fs.h>
-#include <linux/nsproxy.h>
-#include <net/net_namespace.h>
-#include <net/netns/generic.h>
-#include <net/dst.h>
-#include <net/ip.h>
-#include <net/udp.h>
-#include <net/xfrm.h>
-
-#include <asm/byteorder.h>
-#include <asm/atomic.h>
-
-
-#define PPPOL2TP_DRV_VERSION "V1.0"
-
-/* L2TP header constants */
-#define L2TP_HDRFLAG_T 0x8000
-#define L2TP_HDRFLAG_L 0x4000
-#define L2TP_HDRFLAG_S 0x0800
-#define L2TP_HDRFLAG_O 0x0200
-#define L2TP_HDRFLAG_P 0x0100
-
-#define L2TP_HDR_VER_MASK 0x000F
-#define L2TP_HDR_VER 0x0002
-
-/* Space for UDP, L2TP and PPP headers */
-#define PPPOL2TP_HEADER_OVERHEAD 40
-
-/* Just some random numbers */
-#define L2TP_TUNNEL_MAGIC 0x42114DDA
-#define L2TP_SESSION_MAGIC 0x0C04EB7D
-
-#define PPPOL2TP_HASH_BITS 4
-#define PPPOL2TP_HASH_SIZE (1 << PPPOL2TP_HASH_BITS)
-
-/* Default trace flags */
-#define PPPOL2TP_DEFAULT_DEBUG_FLAGS 0
-
-#define PRINTK(_mask, _type, _lvl, _fmt, args...) \
- do { \
- if ((_mask) & (_type)) \
- printk(_lvl "PPPOL2TP: " _fmt, ##args); \
- } while(0)
-
-/* Number of bytes to build transmit L2TP headers.
- * Unfortunately the size is different depending on whether sequence numbers
- * are enabled.
- */
-#define PPPOL2TP_L2TP_HDR_SIZE_SEQ 10
-#define PPPOL2TP_L2TP_HDR_SIZE_NOSEQ 6
-
-struct pppol2tp_tunnel;
-
-/* Describes a session. It is the sk_user_data field in the PPPoL2TP
- * socket. Contains information to determine incoming packets and transmit
- * outgoing ones.
- */
-struct pppol2tp_session
-{
- int magic; /* should be
- * L2TP_SESSION_MAGIC */
- int owner; /* pid that opened the socket */
-
- struct sock *sock; /* Pointer to the session
- * PPPoX socket */
- struct sock *tunnel_sock; /* Pointer to the tunnel UDP
- * socket */
-
- struct pppol2tp_addr tunnel_addr; /* Description of tunnel */
-
- struct pppol2tp_tunnel *tunnel; /* back pointer to tunnel
- * context */
-
- char name[20]; /* "sess xxxxx/yyyyy", where
- * x=tunnel_id, y=session_id */
- int mtu;
- int mru;
- int flags; /* accessed by PPPIOCGFLAGS.
- * Unused. */
- unsigned recv_seq:1; /* expect receive packets with
- * sequence numbers? */
- unsigned send_seq:1; /* send packets with sequence
- * numbers? */
- unsigned lns_mode:1; /* behave as LNS? LAC enables
- * sequence numbers under
- * control of LNS. */
- int debug; /* bitmask of debug message
- * categories */
- int reorder_timeout; /* configured reorder timeout
- * (in jiffies) */
- u16 nr; /* session NR state (receive) */
- u16 ns; /* session NR state (send) */
- struct sk_buff_head reorder_q; /* receive reorder queue */
- struct pppol2tp_ioc_stats stats;
- struct hlist_node hlist; /* Hash list node */
-};
-
-/* The sk_user_data field of the tunnel's UDP socket. It contains info to track
- * all the associated sessions so incoming packets can be sorted out
- */
-struct pppol2tp_tunnel
-{
- int magic; /* Should be L2TP_TUNNEL_MAGIC */
- rwlock_t hlist_lock; /* protect session_hlist */
- struct hlist_head session_hlist[PPPOL2TP_HASH_SIZE];
- /* hashed list of sessions,
- * hashed by id */
- int debug; /* bitmask of debug message
- * categories */
- char name[12]; /* "tunl xxxxx" */
- struct pppol2tp_ioc_stats stats;
-
- void (*old_sk_destruct)(struct sock *);
-
- struct sock *sock; /* Parent socket */
- struct list_head list; /* Keep a list of all open
- * prepared sockets */
- struct net *pppol2tp_net; /* the net we belong to */
-
- atomic_t ref_count;
-};
-
-/* Private data stored for received packets in the skb.
- */
-struct pppol2tp_skb_cb {
- u16 ns;
- u16 nr;
- u16 has_seq;
- u16 length;
- unsigned long expires;
-};
-
-#define PPPOL2TP_SKB_CB(skb) ((struct pppol2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
-
-static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb);
-static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel);
-
-static atomic_t pppol2tp_tunnel_count;
-static atomic_t pppol2tp_session_count;
-static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL };
-static const struct proto_ops pppol2tp_ops;
-
-/* per-net private data for this module */
-static int pppol2tp_net_id __read_mostly;
-struct pppol2tp_net {
- struct list_head pppol2tp_tunnel_list;
- rwlock_t pppol2tp_tunnel_list_lock;
-};
-
-static inline struct pppol2tp_net *pppol2tp_pernet(struct net *net)
-{
- BUG_ON(!net);
-
- return net_generic(net, pppol2tp_net_id);
-}
-
-/* Helpers to obtain tunnel/session contexts from sockets.
- */
-static inline struct pppol2tp_session *pppol2tp_sock_to_session(struct sock *sk)
-{
- struct pppol2tp_session *session;
-
- if (sk == NULL)
- return NULL;
-
- sock_hold(sk);
- session = (struct pppol2tp_session *)(sk->sk_user_data);
- if (session == NULL) {
- sock_put(sk);
- goto out;
- }
-
- BUG_ON(session->magic != L2TP_SESSION_MAGIC);
-out:
- return session;
-}
-
-static inline struct pppol2tp_tunnel *pppol2tp_sock_to_tunnel(struct sock *sk)
-{
- struct pppol2tp_tunnel *tunnel;
-
- if (sk == NULL)
- return NULL;
-
- sock_hold(sk);
- tunnel = (struct pppol2tp_tunnel *)(sk->sk_user_data);
- if (tunnel == NULL) {
- sock_put(sk);
- goto out;
- }
-
- BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
-out:
- return tunnel;
-}
-
-/* Tunnel reference counts. Incremented per session that is added to
- * the tunnel.
- */
-static inline void pppol2tp_tunnel_inc_refcount(struct pppol2tp_tunnel *tunnel)
-{
- atomic_inc(&tunnel->ref_count);
-}
-
-static inline void pppol2tp_tunnel_dec_refcount(struct pppol2tp_tunnel *tunnel)
-{
- if (atomic_dec_and_test(&tunnel->ref_count))
- pppol2tp_tunnel_free(tunnel);
-}
-
-/* Session hash list.
- * The session_id SHOULD be random according to RFC2661, but several
- * L2TP implementations (Cisco and Microsoft) use incrementing
- * session_ids. So we do a real hash on the session_id, rather than a
- * simple bitmask.
- */
-static inline struct hlist_head *
-pppol2tp_session_id_hash(struct pppol2tp_tunnel *tunnel, u16 session_id)
-{
- unsigned long hash_val = (unsigned long) session_id;
- return &tunnel->session_hlist[hash_long(hash_val, PPPOL2TP_HASH_BITS)];
-}
-
-/* Lookup a session by id
- */
-static struct pppol2tp_session *
-pppol2tp_session_find(struct pppol2tp_tunnel *tunnel, u16 session_id)
-{
- struct hlist_head *session_list =
- pppol2tp_session_id_hash(tunnel, session_id);
- struct pppol2tp_session *session;
- struct hlist_node *walk;
-
- read_lock_bh(&tunnel->hlist_lock);
- hlist_for_each_entry(session, walk, session_list, hlist) {
- if (session->tunnel_addr.s_session == session_id) {
- read_unlock_bh(&tunnel->hlist_lock);
- return session;
- }
- }
- read_unlock_bh(&tunnel->hlist_lock);
-
- return NULL;
-}
-
-/* Lookup a tunnel by id
- */
-static struct pppol2tp_tunnel *pppol2tp_tunnel_find(struct net *net, u16 tunnel_id)
-{
- struct pppol2tp_tunnel *tunnel;
- struct pppol2tp_net *pn = pppol2tp_pernet(net);
-
- read_lock_bh(&pn->pppol2tp_tunnel_list_lock);
- list_for_each_entry(tunnel, &pn->pppol2tp_tunnel_list, list) {
- if (tunnel->stats.tunnel_id == tunnel_id) {
- read_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
- return tunnel;
- }
- }
- read_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
-
- return NULL;
-}
-
-/*****************************************************************************
- * Receive data handling
- *****************************************************************************/
-
-/* Queue a skb in order. We come here only if the skb has an L2TP sequence
- * number.
- */
-static void pppol2tp_recv_queue_skb(struct pppol2tp_session *session, struct sk_buff *skb)
-{
- struct sk_buff *skbp;
- struct sk_buff *tmp;
- u16 ns = PPPOL2TP_SKB_CB(skb)->ns;
-
- spin_lock_bh(&session->reorder_q.lock);
- skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
- if (PPPOL2TP_SKB_CB(skbp)->ns > ns) {
- __skb_queue_before(&session->reorder_q, skbp, skb);
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
- "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
- session->name, ns, PPPOL2TP_SKB_CB(skbp)->ns,
- skb_queue_len(&session->reorder_q));
- session->stats.rx_oos_packets++;
- goto out;
- }
- }
-
- __skb_queue_tail(&session->reorder_q, skb);
-
-out:
- spin_unlock_bh(&session->reorder_q.lock);
-}
-
-/* Dequeue a single skb.
- */
-static void pppol2tp_recv_dequeue_skb(struct pppol2tp_session *session, struct sk_buff *skb)
-{
- struct pppol2tp_tunnel *tunnel = session->tunnel;
- int length = PPPOL2TP_SKB_CB(skb)->length;
- struct sock *session_sock = NULL;
-
- /* We're about to requeue the skb, so return resources
- * to its current owner (a socket receive buffer).
- */
- skb_orphan(skb);
-
- tunnel->stats.rx_packets++;
- tunnel->stats.rx_bytes += length;
- session->stats.rx_packets++;
- session->stats.rx_bytes += length;
-
- if (PPPOL2TP_SKB_CB(skb)->has_seq) {
- /* Bump our Nr */
- session->nr++;
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
- "%s: updated nr to %hu\n", session->name, session->nr);
- }
-
- /* If the socket is bound, send it in to PPP's input queue. Otherwise
- * queue it on the session socket.
- */
- session_sock = session->sock;
- if (session_sock->sk_state & PPPOX_BOUND) {
- struct pppox_sock *po;
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: recv %d byte data frame, passing to ppp\n",
- session->name, length);
-
- /* We need to forget all info related to the L2TP packet
- * gathered in the skb as we are going to reuse the same
- * skb for the inner packet.
- * Namely we need to:
- * - reset xfrm (IPSec) information as it applies to
- * the outer L2TP packet and not to the inner one
- * - release the dst to force a route lookup on the inner
- * IP packet since skb->dst currently points to the dst
- * of the UDP tunnel
- * - reset netfilter information as it doesn't apply
- * to the inner packet either
- */
- secpath_reset(skb);
- skb_dst_drop(skb);
- nf_reset(skb);
-
- po = pppox_sk(session_sock);
- ppp_input(&po->chan, skb);
- } else {
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
- "%s: socket not bound\n", session->name);
-
- /* Not bound. Nothing we can do, so discard. */
- session->stats.rx_errors++;
- kfree_skb(skb);
- }
-
- sock_put(session->sock);
-}
-
-/* Dequeue skbs from the session's reorder_q, subject to packet order.
- * Skbs that have been in the queue for too long are simply discarded.
- */
-static void pppol2tp_recv_dequeue(struct pppol2tp_session *session)
-{
- struct sk_buff *skb;
- struct sk_buff *tmp;
-
- /* If the pkt at the head of the queue has the nr that we
- * expect to send up next, dequeue it and any other
- * in-sequence packets behind it.
- */
- spin_lock_bh(&session->reorder_q.lock);
- skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
- if (time_after(jiffies, PPPOL2TP_SKB_CB(skb)->expires)) {
- session->stats.rx_seq_discards++;
- session->stats.rx_errors++;
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
- "%s: oos pkt %hu len %d discarded (too old), "
- "waiting for %hu, reorder_q_len=%d\n",
- session->name, PPPOL2TP_SKB_CB(skb)->ns,
- PPPOL2TP_SKB_CB(skb)->length, session->nr,
- skb_queue_len(&session->reorder_q));
- __skb_unlink(skb, &session->reorder_q);
- kfree_skb(skb);
- sock_put(session->sock);
- continue;
- }
-
- if (PPPOL2TP_SKB_CB(skb)->has_seq) {
- if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) {
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
- "%s: holding oos pkt %hu len %d, "
- "waiting for %hu, reorder_q_len=%d\n",
- session->name, PPPOL2TP_SKB_CB(skb)->ns,
- PPPOL2TP_SKB_CB(skb)->length, session->nr,
- skb_queue_len(&session->reorder_q));
- goto out;
- }
- }
- __skb_unlink(skb, &session->reorder_q);
-
- /* Process the skb. We release the queue lock while we
- * do so to let other contexts process the queue.
- */
- spin_unlock_bh(&session->reorder_q.lock);
- pppol2tp_recv_dequeue_skb(session, skb);
- spin_lock_bh(&session->reorder_q.lock);
- }
-
-out:
- spin_unlock_bh(&session->reorder_q.lock);
-}
-
-static inline int pppol2tp_verify_udp_checksum(struct sock *sk,
- struct sk_buff *skb)
-{
- struct udphdr *uh = udp_hdr(skb);
- u16 ulen = ntohs(uh->len);
- struct inet_sock *inet;
- __wsum psum;
-
- if (sk->sk_no_check || skb_csum_unnecessary(skb) || !uh->check)
- return 0;
-
- inet = inet_sk(sk);
- psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr, ulen,
- IPPROTO_UDP, 0);
-
- if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
- !csum_fold(csum_add(psum, skb->csum)))
- return 0;
-
- skb->csum = psum;
-
- return __skb_checksum_complete(skb);
-}
-
-/* Internal receive frame. Do the real work of receiving an L2TP data frame
- * here. The skb is not on a list when we get here.
- * Returns 0 if the packet was a data packet and was successfully passed on.
- * Returns 1 if the packet was not a good data packet and could not be
- * forwarded. All such packets are passed up to userspace to deal with.
- */
-static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb)
-{
- struct pppol2tp_session *session = NULL;
- struct pppol2tp_tunnel *tunnel;
- unsigned char *ptr, *optr;
- u16 hdrflags;
- u16 tunnel_id, session_id;
- int length;
- int offset;
-
- tunnel = pppol2tp_sock_to_tunnel(sock);
- if (tunnel == NULL)
- goto no_tunnel;
-
- if (tunnel->sock && pppol2tp_verify_udp_checksum(tunnel->sock, skb))
- goto discard_bad_csum;
-
- /* UDP always verifies the packet length. */
- __skb_pull(skb, sizeof(struct udphdr));
-
- /* Short packet? */
- if (!pskb_may_pull(skb, 12)) {
- PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
- "%s: recv short packet (len=%d)\n", tunnel->name, skb->len);
- goto error;
- }
-
- /* Point to L2TP header */
- optr = ptr = skb->data;
-
- /* Get L2TP header flags */
- hdrflags = ntohs(*(__be16*)ptr);
-
- /* Trace packet contents, if enabled */
- if (tunnel->debug & PPPOL2TP_MSG_DATA) {
- length = min(16u, skb->len);
- if (!pskb_may_pull(skb, length))
- goto error;
-
- printk(KERN_DEBUG "%s: recv: ", tunnel->name);
-
- offset = 0;
- do {
- printk(" %02X", ptr[offset]);
- } while (++offset < length);
-
- printk("\n");
- }
-
- /* Get length of L2TP packet */
- length = skb->len;
-
- /* If type is control packet, it is handled by userspace. */
- if (hdrflags & L2TP_HDRFLAG_T) {
- PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: recv control packet, len=%d\n", tunnel->name, length);
- goto error;
- }
-
- /* Skip flags */
- ptr += 2;
-
- /* If length is present, skip it */
- if (hdrflags & L2TP_HDRFLAG_L)
- ptr += 2;
-
- /* Extract tunnel and session ID */
- tunnel_id = ntohs(*(__be16 *) ptr);
- ptr += 2;
- session_id = ntohs(*(__be16 *) ptr);
- ptr += 2;
-
- /* Find the session context */
- session = pppol2tp_session_find(tunnel, session_id);
- if (!session) {
- /* Not found? Pass to userspace to deal with */
- PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
- "%s: no socket found (%hu/%hu). Passing up.\n",
- tunnel->name, tunnel_id, session_id);
- goto error;
- }
- sock_hold(session->sock);
-
- /* The ref count on the socket was increased by the above call since
- * we now hold a pointer to the session. Take care to do sock_put()
- * when exiting this function from now on...
- */
-
- /* Handle the optional sequence numbers. If we are the LAC,
- * enable/disable sequence numbers under the control of the LNS. If
- * no sequence numbers present but we were expecting them, discard
- * frame.
- */
- if (hdrflags & L2TP_HDRFLAG_S) {
- u16 ns, nr;
- ns = ntohs(*(__be16 *) ptr);
- ptr += 2;
- nr = ntohs(*(__be16 *) ptr);
- ptr += 2;
-
- /* Received a packet with sequence numbers. If we're the LNS,
- * check if we sre sending sequence numbers and if not,
- * configure it so.
- */
- if ((!session->lns_mode) && (!session->send_seq)) {
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_INFO,
- "%s: requested to enable seq numbers by LNS\n",
- session->name);
- session->send_seq = -1;
- }
-
- /* Store L2TP info in the skb */
- PPPOL2TP_SKB_CB(skb)->ns = ns;
- PPPOL2TP_SKB_CB(skb)->nr = nr;
- PPPOL2TP_SKB_CB(skb)->has_seq = 1;
-
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
- "%s: recv data ns=%hu, nr=%hu, session nr=%hu\n",
- session->name, ns, nr, session->nr);
- } else {
- /* No sequence numbers.
- * If user has configured mandatory sequence numbers, discard.
- */
- if (session->recv_seq) {
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_WARNING,
- "%s: recv data has no seq numbers when required. "
- "Discarding\n", session->name);
- session->stats.rx_seq_discards++;
- goto discard;
- }
-
- /* If we're the LAC and we're sending sequence numbers, the
- * LNS has requested that we no longer send sequence numbers.
- * If we're the LNS and we're sending sequence numbers, the
- * LAC is broken. Discard the frame.
- */
- if ((!session->lns_mode) && (session->send_seq)) {
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_INFO,
- "%s: requested to disable seq numbers by LNS\n",
- session->name);
- session->send_seq = 0;
- } else if (session->send_seq) {
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_WARNING,
- "%s: recv data has no seq numbers when required. "
- "Discarding\n", session->name);
- session->stats.rx_seq_discards++;
- goto discard;
- }
-
- /* Store L2TP info in the skb */
- PPPOL2TP_SKB_CB(skb)->has_seq = 0;
- }
-
- /* If offset bit set, skip it. */
- if (hdrflags & L2TP_HDRFLAG_O) {
- offset = ntohs(*(__be16 *)ptr);
- ptr += 2 + offset;
- }
-
- offset = ptr - optr;
- if (!pskb_may_pull(skb, offset))
- goto discard;
-
- __skb_pull(skb, offset);
-
- /* Skip PPP header, if present. In testing, Microsoft L2TP clients
- * don't send the PPP header (PPP header compression enabled), but
- * other clients can include the header. So we cope with both cases
- * here. The PPP header is always FF03 when using L2TP.
- *
- * Note that skb->data[] isn't dereferenced from a u16 ptr here since
- * the field may be unaligned.
- */
- if (!pskb_may_pull(skb, 2))
- goto discard;
-
- if ((skb->data[0] == 0xff) && (skb->data[1] == 0x03))
- skb_pull(skb, 2);
-
- /* Prepare skb for adding to the session's reorder_q. Hold
- * packets for max reorder_timeout or 1 second if not
- * reordering.
- */
- PPPOL2TP_SKB_CB(skb)->length = length;
- PPPOL2TP_SKB_CB(skb)->expires = jiffies +
- (session->reorder_timeout ? session->reorder_timeout : HZ);
-
- /* Add packet to the session's receive queue. Reordering is done here, if
- * enabled. Saved L2TP protocol info is stored in skb->sb[].
- */
- if (PPPOL2TP_SKB_CB(skb)->has_seq) {
- if (session->reorder_timeout != 0) {
- /* Packet reordering enabled. Add skb to session's
- * reorder queue, in order of ns.
- */
- pppol2tp_recv_queue_skb(session, skb);
- } else {
- /* Packet reordering disabled. Discard out-of-sequence
- * packets
- */
- if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) {
- session->stats.rx_seq_discards++;
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
- "%s: oos pkt %hu len %d discarded, "
- "waiting for %hu, reorder_q_len=%d\n",
- session->name, PPPOL2TP_SKB_CB(skb)->ns,
- PPPOL2TP_SKB_CB(skb)->length, session->nr,
- skb_queue_len(&session->reorder_q));
- goto discard;
- }
- skb_queue_tail(&session->reorder_q, skb);
- }
- } else {
- /* No sequence numbers. Add the skb to the tail of the
- * reorder queue. This ensures that it will be
- * delivered after all previous sequenced skbs.
- */
- skb_queue_tail(&session->reorder_q, skb);
- }
-
- /* Try to dequeue as many skbs from reorder_q as we can. */
- pppol2tp_recv_dequeue(session);
- sock_put(sock);
-
- return 0;
-
-discard:
- session->stats.rx_errors++;
- kfree_skb(skb);
- sock_put(session->sock);
- sock_put(sock);
-
- return 0;
-
-discard_bad_csum:
- LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
- UDP_INC_STATS_USER(&init_net, UDP_MIB_INERRORS, 0);
- tunnel->stats.rx_errors++;
- kfree_skb(skb);
- sock_put(sock);
-
- return 0;
-
-error:
- /* Put UDP header back */
- __skb_push(skb, sizeof(struct udphdr));
- sock_put(sock);
-
-no_tunnel:
- return 1;
-}
-
-/* UDP encapsulation receive handler. See net/ipv4/udp.c.
- * Return codes:
- * 0 : success.
- * <0: error
- * >0: skb should be passed up to userspace as UDP.
- */
-static int pppol2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
-{
- struct pppol2tp_tunnel *tunnel;
-
- tunnel = pppol2tp_sock_to_tunnel(sk);
- if (tunnel == NULL)
- goto pass_up;
-
- PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: received %d bytes\n", tunnel->name, skb->len);
-
- if (pppol2tp_recv_core(sk, skb))
- goto pass_up_put;
-
- sock_put(sk);
- return 0;
-
-pass_up_put:
- sock_put(sk);
-pass_up:
- return 1;
-}
-
-/* Receive message. This is the recvmsg for the PPPoL2TP socket.
- */
-static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len,
- int flags)
-{
- int err;
- struct sk_buff *skb;
- struct sock *sk = sock->sk;
-
- err = -EIO;
- if (sk->sk_state & PPPOX_BOUND)
- goto end;
-
- msg->msg_namelen = 0;
-
- err = 0;
- skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
- flags & MSG_DONTWAIT, &err);
- if (!skb)
- goto end;
-
- if (len > skb->len)
- len = skb->len;
- else if (len < skb->len)
- msg->msg_flags |= MSG_TRUNC;
-
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len);
- if (likely(err == 0))
- err = len;
-
- kfree_skb(skb);
-end:
- return err;
-}
-
-/************************************************************************
- * Transmit handling
- ***********************************************************************/
-
-/* Tell how big L2TP headers are for a particular session. This
- * depends on whether sequence numbers are being used.
- */
-static inline int pppol2tp_l2tp_header_len(struct pppol2tp_session *session)
-{
- if (session->send_seq)
- return PPPOL2TP_L2TP_HDR_SIZE_SEQ;
-
- return PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
-}
-
-/* Build an L2TP header for the session into the buffer provided.
- */
-static void pppol2tp_build_l2tp_header(struct pppol2tp_session *session,
- void *buf)
-{
- __be16 *bufp = buf;
- u16 flags = L2TP_HDR_VER;
-
- if (session->send_seq)
- flags |= L2TP_HDRFLAG_S;
-
- /* Setup L2TP header.
- * FIXME: Can this ever be unaligned? Is direct dereferencing of
- * 16-bit header fields safe here for all architectures?
- */
- *bufp++ = htons(flags);
- *bufp++ = htons(session->tunnel_addr.d_tunnel);
- *bufp++ = htons(session->tunnel_addr.d_session);
- if (session->send_seq) {
- *bufp++ = htons(session->ns);
- *bufp++ = 0;
- session->ns++;
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
- "%s: updated ns to %hu\n", session->name, session->ns);
- }
-}
-
-/* This is the sendmsg for the PPPoL2TP pppol2tp_session socket. We come here
- * when a user application does a sendmsg() on the session socket. L2TP and
- * PPP headers must be inserted into the user's data.
- */
-static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
- size_t total_len)
-{
- static const unsigned char ppph[2] = { 0xff, 0x03 };
- struct sock *sk = sock->sk;
- struct inet_sock *inet;
- __wsum csum;
- struct sk_buff *skb;
- int error;
- int hdr_len;
- struct pppol2tp_session *session;
- struct pppol2tp_tunnel *tunnel;
- struct udphdr *uh;
- unsigned int len;
- struct sock *sk_tun;
- u16 udp_len;
-
- error = -ENOTCONN;
- if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
- goto error;
-
- /* Get session and tunnel contexts */
- error = -EBADF;
- session = pppol2tp_sock_to_session(sk);
- if (session == NULL)
- goto error;
-
- sk_tun = session->tunnel_sock;
- tunnel = pppol2tp_sock_to_tunnel(sk_tun);
- if (tunnel == NULL)
- goto error_put_sess;
-
- /* What header length is configured for this session? */
- hdr_len = pppol2tp_l2tp_header_len(session);
-
- /* Allocate a socket buffer */
- error = -ENOMEM;
- skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) +
- sizeof(struct udphdr) + hdr_len +
- sizeof(ppph) + total_len,
- 0, GFP_KERNEL);
- if (!skb)
- goto error_put_sess_tun;
-
- /* Reserve space for headers. */
- skb_reserve(skb, NET_SKB_PAD);
- skb_reset_network_header(skb);
- skb_reserve(skb, sizeof(struct iphdr));
- skb_reset_transport_header(skb);
-
- /* Build UDP header */
- inet = inet_sk(sk_tun);
- udp_len = hdr_len + sizeof(ppph) + total_len;
- uh = (struct udphdr *) skb->data;
- uh->source = inet->inet_sport;
- uh->dest = inet->inet_dport;
- uh->len = htons(udp_len);
- uh->check = 0;
- skb_put(skb, sizeof(struct udphdr));
-
- /* Build L2TP header */
- pppol2tp_build_l2tp_header(session, skb->data);
- skb_put(skb, hdr_len);
-
- /* Add PPP header */
- skb->data[0] = ppph[0];
- skb->data[1] = ppph[1];
- skb_put(skb, 2);
-
- /* Copy user data into skb */
- error = memcpy_fromiovec(skb->data, m->msg_iov, total_len);
- if (error < 0) {
- kfree_skb(skb);
- goto error_put_sess_tun;
- }
- skb_put(skb, total_len);
-
- /* Calculate UDP checksum if configured to do so */
- if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT)
- skb->ip_summed = CHECKSUM_NONE;
- else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) {
- skb->ip_summed = CHECKSUM_COMPLETE;
- csum = skb_checksum(skb, 0, udp_len, 0);
- uh->check = csum_tcpudp_magic(inet->inet_saddr,
- inet->inet_daddr,
- udp_len, IPPROTO_UDP, csum);
- if (uh->check == 0)
- uh->check = CSUM_MANGLED_0;
- } else {
- skb->ip_summed = CHECKSUM_PARTIAL;
- skb->csum_start = skb_transport_header(skb) - skb->head;
- skb->csum_offset = offsetof(struct udphdr, check);
- uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
- inet->inet_daddr,
- udp_len, IPPROTO_UDP, 0);
- }
-
- /* Debug */
- if (session->send_seq)
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: send %Zd bytes, ns=%hu\n", session->name,
- total_len, session->ns - 1);
- else
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: send %Zd bytes\n", session->name, total_len);
-
- if (session->debug & PPPOL2TP_MSG_DATA) {
- int i;
- unsigned char *datap = skb->data;
-
- printk(KERN_DEBUG "%s: xmit:", session->name);
- for (i = 0; i < total_len; i++) {
- printk(" %02X", *datap++);
- if (i == 15) {
- printk(" ...");
- break;
- }
- }
- printk("\n");
- }
-
- /* Queue the packet to IP for output */
- len = skb->len;
- error = ip_queue_xmit(skb, 1);
-
- /* Update stats */
- if (error >= 0) {
- tunnel->stats.tx_packets++;
- tunnel->stats.tx_bytes += len;
- session->stats.tx_packets++;
- session->stats.tx_bytes += len;
- } else {
- tunnel->stats.tx_errors++;
- session->stats.tx_errors++;
- }
-
- return error;
-
-error_put_sess_tun:
- sock_put(session->tunnel_sock);
-error_put_sess:
- sock_put(sk);
-error:
- return error;
-}
-
-/* Automatically called when the skb is freed.
- */
-static void pppol2tp_sock_wfree(struct sk_buff *skb)
-{
- sock_put(skb->sk);
-}
-
-/* For data skbs that we transmit, we associate with the tunnel socket
- * but don't do accounting.
- */
-static inline void pppol2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
-{
- sock_hold(sk);
- skb->sk = sk;
- skb->destructor = pppol2tp_sock_wfree;
-}
-
-/* Transmit function called by generic PPP driver. Sends PPP frame
- * over PPPoL2TP socket.
- *
- * This is almost the same as pppol2tp_sendmsg(), but rather than
- * being called with a msghdr from userspace, it is called with a skb
- * from the kernel.
- *
- * The supplied skb from ppp doesn't have enough headroom for the
- * insertion of L2TP, UDP and IP headers so we need to allocate more
- * headroom in the skb. This will create a cloned skb. But we must be
- * careful in the error case because the caller will expect to free
- * the skb it supplied, not our cloned skb. So we take care to always
- * leave the original skb unfreed if we return an error.
- */
-static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
-{
- static const u8 ppph[2] = { 0xff, 0x03 };
- struct sock *sk = (struct sock *) chan->private;
- struct sock *sk_tun;
- int hdr_len;
- u16 udp_len;
- struct pppol2tp_session *session;
- struct pppol2tp_tunnel *tunnel;
- int rc;
- int headroom;
- int data_len = skb->len;
- struct inet_sock *inet;
- __wsum csum;
- struct udphdr *uh;
- unsigned int len;
- int old_headroom;
- int new_headroom;
-
- if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
- goto abort;
-
- /* Get session and tunnel contexts from the socket */
- session = pppol2tp_sock_to_session(sk);
- if (session == NULL)
- goto abort;
-
- sk_tun = session->tunnel_sock;
- if (sk_tun == NULL)
- goto abort_put_sess;
- tunnel = pppol2tp_sock_to_tunnel(sk_tun);
- if (tunnel == NULL)
- goto abort_put_sess;
-
- /* What header length is configured for this session? */
- hdr_len = pppol2tp_l2tp_header_len(session);
-
- /* Check that there's enough headroom in the skb to insert IP,
- * UDP and L2TP and PPP headers. If not enough, expand it to
- * make room. Adjust truesize.
- */
- headroom = NET_SKB_PAD + sizeof(struct iphdr) +
- sizeof(struct udphdr) + hdr_len + sizeof(ppph);
- old_headroom = skb_headroom(skb);
- if (skb_cow_head(skb, headroom))
- goto abort_put_sess_tun;
-
- new_headroom = skb_headroom(skb);
- skb_orphan(skb);
- skb->truesize += new_headroom - old_headroom;
-
- /* Setup PPP header */
- __skb_push(skb, sizeof(ppph));
- skb->data[0] = ppph[0];
- skb->data[1] = ppph[1];
-
- /* Setup L2TP header */
- pppol2tp_build_l2tp_header(session, __skb_push(skb, hdr_len));
-
- udp_len = sizeof(struct udphdr) + hdr_len + sizeof(ppph) + data_len;
-
- /* Setup UDP header */
- inet = inet_sk(sk_tun);
- __skb_push(skb, sizeof(*uh));
- skb_reset_transport_header(skb);
- uh = udp_hdr(skb);
- uh->source = inet->inet_sport;
- uh->dest = inet->inet_dport;
- uh->len = htons(udp_len);
- uh->check = 0;
-
- /* Debug */
- if (session->send_seq)
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: send %d bytes, ns=%hu\n", session->name,
- data_len, session->ns - 1);
- else
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: send %d bytes\n", session->name, data_len);
-
- if (session->debug & PPPOL2TP_MSG_DATA) {
- int i;
- unsigned char *datap = skb->data;
-
- printk(KERN_DEBUG "%s: xmit:", session->name);
- for (i = 0; i < data_len; i++) {
- printk(" %02X", *datap++);
- if (i == 31) {
- printk(" ...");
- break;
- }
- }
- printk("\n");
- }
-
- memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
- IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
- IPSKB_REROUTED);
- nf_reset(skb);
-
- /* Get routing info from the tunnel socket */
- skb_dst_drop(skb);
- skb_dst_set(skb, dst_clone(__sk_dst_get(sk_tun)));
- pppol2tp_skb_set_owner_w(skb, sk_tun);
-
- /* Calculate UDP checksum if configured to do so */
- if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT)
- skb->ip_summed = CHECKSUM_NONE;
- else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
- (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
- skb->ip_summed = CHECKSUM_COMPLETE;
- csum = skb_checksum(skb, 0, udp_len, 0);
- uh->check = csum_tcpudp_magic(inet->inet_saddr,
- inet->inet_daddr,
- udp_len, IPPROTO_UDP, csum);
- if (uh->check == 0)
- uh->check = CSUM_MANGLED_0;
- } else {
- skb->ip_summed = CHECKSUM_PARTIAL;
- skb->csum_start = skb_transport_header(skb) - skb->head;
- skb->csum_offset = offsetof(struct udphdr, check);
- uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
- inet->inet_daddr,
- udp_len, IPPROTO_UDP, 0);
- }
-
- /* Queue the packet to IP for output */
- len = skb->len;
- rc = ip_queue_xmit(skb, 1);
-
- /* Update stats */
- if (rc >= 0) {
- tunnel->stats.tx_packets++;
- tunnel->stats.tx_bytes += len;
- session->stats.tx_packets++;
- session->stats.tx_bytes += len;
- } else {
- tunnel->stats.tx_errors++;
- session->stats.tx_errors++;
- }
-
- sock_put(sk_tun);
- sock_put(sk);
- return 1;
-
-abort_put_sess_tun:
- sock_put(sk_tun);
-abort_put_sess:
- sock_put(sk);
-abort:
- /* Free the original skb */
- kfree_skb(skb);
- return 1;
-}
-
-/*****************************************************************************
- * Session (and tunnel control) socket create/destroy.
- *****************************************************************************/
-
-/* When the tunnel UDP socket is closed, all the attached sockets need to go
- * too.
- */
-static void pppol2tp_tunnel_closeall(struct pppol2tp_tunnel *tunnel)
-{
- int hash;
- struct hlist_node *walk;
- struct hlist_node *tmp;
- struct pppol2tp_session *session;
- struct sock *sk;
-
- BUG_ON(tunnel == NULL);
-
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: closing all sessions...\n", tunnel->name);
-
- write_lock_bh(&tunnel->hlist_lock);
- for (hash = 0; hash < PPPOL2TP_HASH_SIZE; hash++) {
-again:
- hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
- struct sk_buff *skb;
-
- session = hlist_entry(walk, struct pppol2tp_session, hlist);
-
- sk = session->sock;
-
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: closing session\n", session->name);
-
- hlist_del_init(&session->hlist);
-
- /* Since we should hold the sock lock while
- * doing any unbinding, we need to release the
- * lock we're holding before taking that lock.
- * Hold a reference to the sock so it doesn't
- * disappear as we're jumping between locks.
- */
- sock_hold(sk);
- write_unlock_bh(&tunnel->hlist_lock);
- lock_sock(sk);
-
- if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
- pppox_unbind_sock(sk);
- sk->sk_state = PPPOX_DEAD;
- sk->sk_state_change(sk);
- }
-
- /* Purge any queued data */
- skb_queue_purge(&sk->sk_receive_queue);
- skb_queue_purge(&sk->sk_write_queue);
- while ((skb = skb_dequeue(&session->reorder_q))) {
- kfree_skb(skb);
- sock_put(sk);
- }
-
- release_sock(sk);
- sock_put(sk);
-
- /* Now restart from the beginning of this hash
- * chain. We always remove a session from the
- * list so we are guaranteed to make forward
- * progress.
- */
- write_lock_bh(&tunnel->hlist_lock);
- goto again;
- }
- }
- write_unlock_bh(&tunnel->hlist_lock);
-}
-
-/* Really kill the tunnel.
- * Come here only when all sessions have been cleared from the tunnel.
- */
-static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel)
-{
- struct pppol2tp_net *pn = pppol2tp_pernet(tunnel->pppol2tp_net);
-
- /* Remove from socket list */
- write_lock_bh(&pn->pppol2tp_tunnel_list_lock);
- list_del_init(&tunnel->list);
- write_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
-
- atomic_dec(&pppol2tp_tunnel_count);
- kfree(tunnel);
-}
-
-/* Tunnel UDP socket destruct hook.
- * The tunnel context is deleted only when all session sockets have been
- * closed.
- */
-static void pppol2tp_tunnel_destruct(struct sock *sk)
-{
- struct pppol2tp_tunnel *tunnel;
-
- tunnel = sk->sk_user_data;
- if (tunnel == NULL)
- goto end;
-
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: closing...\n", tunnel->name);
-
- /* Close all sessions */
- pppol2tp_tunnel_closeall(tunnel);
-
- /* No longer an encapsulation socket. See net/ipv4/udp.c */
- (udp_sk(sk))->encap_type = 0;
- (udp_sk(sk))->encap_rcv = NULL;
-
- /* Remove hooks into tunnel socket */
- tunnel->sock = NULL;
- sk->sk_destruct = tunnel->old_sk_destruct;
- sk->sk_user_data = NULL;
-
- /* Call original (UDP) socket descructor */
- if (sk->sk_destruct != NULL)
- (*sk->sk_destruct)(sk);
-
- pppol2tp_tunnel_dec_refcount(tunnel);
-
-end:
- return;
-}
-
-/* Really kill the session socket. (Called from sock_put() if
- * refcnt == 0.)
- */
-static void pppol2tp_session_destruct(struct sock *sk)
-{
- struct pppol2tp_session *session = NULL;
-
- if (sk->sk_user_data != NULL) {
- struct pppol2tp_tunnel *tunnel;
-
- session = sk->sk_user_data;
- if (session == NULL)
- goto out;
-
- BUG_ON(session->magic != L2TP_SESSION_MAGIC);
-
- /* Don't use pppol2tp_sock_to_tunnel() here to
- * get the tunnel context because the tunnel
- * socket might have already been closed (its
- * sk->sk_user_data will be NULL) so use the
- * session's private tunnel ptr instead.
- */
- tunnel = session->tunnel;
- if (tunnel != NULL) {
- BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
-
- /* If session_id is zero, this is a null
- * session context, which was created for a
- * socket that is being used only to manage
- * tunnels.
- */
- if (session->tunnel_addr.s_session != 0) {
- /* Delete the session socket from the
- * hash
- */
- write_lock_bh(&tunnel->hlist_lock);
- hlist_del_init(&session->hlist);
- write_unlock_bh(&tunnel->hlist_lock);
-
- atomic_dec(&pppol2tp_session_count);
- }
-
- /* This will delete the tunnel context if this
- * is the last session on the tunnel.
- */
- session->tunnel = NULL;
- session->tunnel_sock = NULL;
- pppol2tp_tunnel_dec_refcount(tunnel);
- }
- }
-
- kfree(session);
-out:
- return;
-}
-
-/* Called when the PPPoX socket (session) is closed.
- */
-static int pppol2tp_release(struct socket *sock)
-{
- struct sock *sk = sock->sk;
- struct pppol2tp_session *session;
- int error;
-
- if (!sk)
- return 0;
-
- error = -EBADF;
- lock_sock(sk);
- if (sock_flag(sk, SOCK_DEAD) != 0)
- goto error;
-
- pppox_unbind_sock(sk);
-
- /* Signal the death of the socket. */
- sk->sk_state = PPPOX_DEAD;
- sock_orphan(sk);
- sock->sk = NULL;
-
- session = pppol2tp_sock_to_session(sk);
-
- /* Purge any queued data */
- skb_queue_purge(&sk->sk_receive_queue);
- skb_queue_purge(&sk->sk_write_queue);
- if (session != NULL) {
- struct sk_buff *skb;
- while ((skb = skb_dequeue(&session->reorder_q))) {
- kfree_skb(skb);
- sock_put(sk);
- }
- sock_put(sk);
- }
-
- release_sock(sk);
-
- /* This will delete the session context via
- * pppol2tp_session_destruct() if the socket's refcnt drops to
- * zero.
- */
- sock_put(sk);
-
- return 0;
-
-error:
- release_sock(sk);
- return error;
-}
-
-/* Internal function to prepare a tunnel (UDP) socket to have PPPoX
- * sockets attached to it.
- */
-static struct sock *pppol2tp_prepare_tunnel_socket(struct net *net,
- int fd, u16 tunnel_id, int *error)
-{
- int err;
- struct socket *sock = NULL;
- struct sock *sk;
- struct pppol2tp_tunnel *tunnel;
- struct pppol2tp_net *pn;
- struct sock *ret = NULL;
-
- /* Get the tunnel UDP socket from the fd, which was opened by
- * the userspace L2TP daemon.
- */
- err = -EBADF;
- sock = sockfd_lookup(fd, &err);
- if (!sock) {
- PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
- "tunl %hu: sockfd_lookup(fd=%d) returned %d\n",
- tunnel_id, fd, err);
- goto err;
- }
-
- sk = sock->sk;
-
- /* Quick sanity checks */
- err = -EPROTONOSUPPORT;
- if (sk->sk_protocol != IPPROTO_UDP) {
- PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
- "tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
- tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
- goto err;
- }
- err = -EAFNOSUPPORT;
- if (sock->ops->family != AF_INET) {
- PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
- "tunl %hu: fd %d wrong family, got %d, expected %d\n",
- tunnel_id, fd, sock->ops->family, AF_INET);
- goto err;
- }
-
- err = -ENOTCONN;
-
- /* Check if this socket has already been prepped */
- tunnel = (struct pppol2tp_tunnel *)sk->sk_user_data;
- if (tunnel != NULL) {
- /* User-data field already set */
- err = -EBUSY;
- BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
-
- /* This socket has already been prepped */
- ret = tunnel->sock;
- goto out;
- }
-
- /* This socket is available and needs prepping. Create a new tunnel
- * context and init it.
- */
- sk->sk_user_data = tunnel = kzalloc(sizeof(struct pppol2tp_tunnel), GFP_KERNEL);
- if (sk->sk_user_data == NULL) {
- err = -ENOMEM;
- goto err;
- }
-
- tunnel->magic = L2TP_TUNNEL_MAGIC;
- sprintf(&tunnel->name[0], "tunl %hu", tunnel_id);
-
- tunnel->stats.tunnel_id = tunnel_id;
- tunnel->debug = PPPOL2TP_DEFAULT_DEBUG_FLAGS;
-
- /* Hook on the tunnel socket destructor so that we can cleanup
- * if the tunnel socket goes away.
- */
- tunnel->old_sk_destruct = sk->sk_destruct;
- sk->sk_destruct = pppol2tp_tunnel_destruct;
-
- tunnel->sock = sk;
- sk->sk_allocation = GFP_ATOMIC;
-
- /* Misc init */
- rwlock_init(&tunnel->hlist_lock);
-
- /* The net we belong to */
- tunnel->pppol2tp_net = net;
- pn = pppol2tp_pernet(net);
-
- /* Add tunnel to our list */
- INIT_LIST_HEAD(&tunnel->list);
- write_lock_bh(&pn->pppol2tp_tunnel_list_lock);
- list_add(&tunnel->list, &pn->pppol2tp_tunnel_list);
- write_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
- atomic_inc(&pppol2tp_tunnel_count);
-
- /* Bump the reference count. The tunnel context is deleted
- * only when this drops to zero.
- */
- pppol2tp_tunnel_inc_refcount(tunnel);
-
- /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
- (udp_sk(sk))->encap_type = UDP_ENCAP_L2TPINUDP;
- (udp_sk(sk))->encap_rcv = pppol2tp_udp_encap_recv;
-
- ret = tunnel->sock;
-
- *error = 0;
-out:
- if (sock)
- sockfd_put(sock);
-
- return ret;
-
-err:
- *error = err;
- goto out;
-}
-
-static struct proto pppol2tp_sk_proto = {
- .name = "PPPOL2TP",
- .owner = THIS_MODULE,
- .obj_size = sizeof(struct pppox_sock),
-};
-
-/* socket() handler. Initialize a new struct sock.
- */
-static int pppol2tp_create(struct net *net, struct socket *sock)
-{
- int error = -ENOMEM;
- struct sock *sk;
-
- sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto);
- if (!sk)
- goto out;
-
- sock_init_data(sock, sk);
-
- sock->state = SS_UNCONNECTED;
- sock->ops = &pppol2tp_ops;
-
- sk->sk_backlog_rcv = pppol2tp_recv_core;
- sk->sk_protocol = PX_PROTO_OL2TP;
- sk->sk_family = PF_PPPOX;
- sk->sk_state = PPPOX_NONE;
- sk->sk_type = SOCK_STREAM;
- sk->sk_destruct = pppol2tp_session_destruct;
-
- error = 0;
-
-out:
- return error;
-}
-
-/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket
- */
-static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
- int sockaddr_len, int flags)
-{
- struct sock *sk = sock->sk;
- struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr;
- struct pppox_sock *po = pppox_sk(sk);
- struct sock *tunnel_sock = NULL;
- struct pppol2tp_session *session = NULL;
- struct pppol2tp_tunnel *tunnel;
- struct dst_entry *dst;
- int error = 0;
-
- lock_sock(sk);
-
- error = -EINVAL;
- if (sp->sa_protocol != PX_PROTO_OL2TP)
- goto end;
-
- /* Check for already bound sockets */
- error = -EBUSY;
- if (sk->sk_state & PPPOX_CONNECTED)
- goto end;
-
- /* We don't supporting rebinding anyway */
- error = -EALREADY;
- if (sk->sk_user_data)
- goto end; /* socket is already attached */
-
- /* Don't bind if s_tunnel is 0 */
- error = -EINVAL;
- if (sp->pppol2tp.s_tunnel == 0)
- goto end;
-
- /* Special case: prepare tunnel socket if s_session and
- * d_session is 0. Otherwise look up tunnel using supplied
- * tunnel id.
- */
- if ((sp->pppol2tp.s_session == 0) && (sp->pppol2tp.d_session == 0)) {
- tunnel_sock = pppol2tp_prepare_tunnel_socket(sock_net(sk),
- sp->pppol2tp.fd,
- sp->pppol2tp.s_tunnel,
- &error);
- if (tunnel_sock == NULL)
- goto end;
-
- sock_hold(tunnel_sock);
- tunnel = tunnel_sock->sk_user_data;
- } else {
- tunnel = pppol2tp_tunnel_find(sock_net(sk), sp->pppol2tp.s_tunnel);
-
- /* Error if we can't find the tunnel */
- error = -ENOENT;
- if (tunnel == NULL)
- goto end;
-
- tunnel_sock = tunnel->sock;
- }
-
- /* Check that this session doesn't already exist */
- error = -EEXIST;
- session = pppol2tp_session_find(tunnel, sp->pppol2tp.s_session);
- if (session != NULL)
- goto end;
-
- /* Allocate and initialize a new session context. */
- session = kzalloc(sizeof(struct pppol2tp_session), GFP_KERNEL);
- if (session == NULL) {
- error = -ENOMEM;
- goto end;
- }
-
- skb_queue_head_init(&session->reorder_q);
-
- session->magic = L2TP_SESSION_MAGIC;
- session->owner = current->pid;
- session->sock = sk;
- session->tunnel = tunnel;
- session->tunnel_sock = tunnel_sock;
- session->tunnel_addr = sp->pppol2tp;
- sprintf(&session->name[0], "sess %hu/%hu",
- session->tunnel_addr.s_tunnel,
- session->tunnel_addr.s_session);
-
- session->stats.tunnel_id = session->tunnel_addr.s_tunnel;
- session->stats.session_id = session->tunnel_addr.s_session;
-
- INIT_HLIST_NODE(&session->hlist);
-
- /* Inherit debug options from tunnel */
- session->debug = tunnel->debug;
-
- /* Default MTU must allow space for UDP/L2TP/PPP
- * headers.
- */
- session->mtu = session->mru = 1500 - PPPOL2TP_HEADER_OVERHEAD;
-
- /* If PMTU discovery was enabled, use the MTU that was discovered */
- dst = sk_dst_get(sk);
- if (dst != NULL) {
- u32 pmtu = dst_mtu(__sk_dst_get(sk));
- if (pmtu != 0)
- session->mtu = session->mru = pmtu -
- PPPOL2TP_HEADER_OVERHEAD;
- dst_release(dst);
- }
-
- /* Special case: if source & dest session_id == 0x0000, this socket is
- * being created to manage the tunnel. Don't add the session to the
- * session hash list, just set up the internal context for use by
- * ioctl() and sockopt() handlers.
- */
- if ((session->tunnel_addr.s_session == 0) &&
- (session->tunnel_addr.d_session == 0)) {
- error = 0;
- sk->sk_user_data = session;
- goto out_no_ppp;
- }
-
- /* Get tunnel context from the tunnel socket */
- tunnel = pppol2tp_sock_to_tunnel(tunnel_sock);
- if (tunnel == NULL) {
- error = -EBADF;
- goto end;
- }
-
- /* Right now, because we don't have a way to push the incoming skb's
- * straight through the UDP layer, the only header we need to worry
- * about is the L2TP header. This size is different depending on
- * whether sequence numbers are enabled for the data channel.
- */
- po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
-
- po->chan.private = sk;
- po->chan.ops = &pppol2tp_chan_ops;
- po->chan.mtu = session->mtu;
-
- error = ppp_register_net_channel(sock_net(sk), &po->chan);
- if (error)
- goto end_put_tun;
-
- /* This is how we get the session context from the socket. */
- sk->sk_user_data = session;
-
- /* Add session to the tunnel's hash list */
- write_lock_bh(&tunnel->hlist_lock);
- hlist_add_head(&session->hlist,
- pppol2tp_session_id_hash(tunnel,
- session->tunnel_addr.s_session));
- write_unlock_bh(&tunnel->hlist_lock);
-
- atomic_inc(&pppol2tp_session_count);
-
-out_no_ppp:
- pppol2tp_tunnel_inc_refcount(tunnel);
- sk->sk_state = PPPOX_CONNECTED;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: created\n", session->name);
-
-end_put_tun:
- sock_put(tunnel_sock);
-end:
- release_sock(sk);
-
- if (error != 0) {
- if (session)
- PRINTK(session->debug,
- PPPOL2TP_MSG_CONTROL, KERN_WARNING,
- "%s: connect failed: %d\n",
- session->name, error);
- else
- PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_WARNING,
- "connect failed: %d\n", error);
- }
-
- return error;
-}
-
-/* getname() support.
- */
-static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
- int *usockaddr_len, int peer)
-{
- int len = sizeof(struct sockaddr_pppol2tp);
- struct sockaddr_pppol2tp sp;
- int error = 0;
- struct pppol2tp_session *session;
-
- error = -ENOTCONN;
- if (sock->sk->sk_state != PPPOX_CONNECTED)
- goto end;
-
- session = pppol2tp_sock_to_session(sock->sk);
- if (session == NULL) {
- error = -EBADF;
- goto end;
- }
-
- sp.sa_family = AF_PPPOX;
- sp.sa_protocol = PX_PROTO_OL2TP;
- memcpy(&sp.pppol2tp, &session->tunnel_addr,
- sizeof(struct pppol2tp_addr));
-
- memcpy(uaddr, &sp, len);
-
- *usockaddr_len = len;
-
- error = 0;
- sock_put(sock->sk);
-
-end:
- return error;
-}
-
-/****************************************************************************
- * ioctl() handlers.
- *
- * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
- * sockets. However, in order to control kernel tunnel features, we allow
- * userspace to create a special "tunnel" PPPoX socket which is used for
- * control only. Tunnel PPPoX sockets have session_id == 0 and simply allow
- * the user application to issue L2TP setsockopt(), getsockopt() and ioctl()
- * calls.
- ****************************************************************************/
-
-/* Session ioctl helper.
- */
-static int pppol2tp_session_ioctl(struct pppol2tp_session *session,
- unsigned int cmd, unsigned long arg)
-{
- struct ifreq ifr;
- int err = 0;
- struct sock *sk = session->sock;
- int val = (int) arg;
-
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
- "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
- session->name, cmd, arg);
-
- sock_hold(sk);
-
- switch (cmd) {
- case SIOCGIFMTU:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
-
- err = -EFAULT;
- if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
- break;
- ifr.ifr_mtu = session->mtu;
- if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq)))
- break;
-
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get mtu=%d\n", session->name, session->mtu);
- err = 0;
- break;
-
- case SIOCSIFMTU:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
-
- err = -EFAULT;
- if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
- break;
-
- session->mtu = ifr.ifr_mtu;
-
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set mtu=%d\n", session->name, session->mtu);
- err = 0;
- break;
-
- case PPPIOCGMRU:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
-
- err = -EFAULT;
- if (put_user(session->mru, (int __user *) arg))
- break;
-
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get mru=%d\n", session->name, session->mru);
- err = 0;
- break;
-
- case PPPIOCSMRU:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
-
- err = -EFAULT;
- if (get_user(val,(int __user *) arg))
- break;
-
- session->mru = val;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set mru=%d\n", session->name, session->mru);
- err = 0;
- break;
-
- case PPPIOCGFLAGS:
- err = -EFAULT;
- if (put_user(session->flags, (int __user *) arg))
- break;
-
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get flags=%d\n", session->name, session->flags);
- err = 0;
- break;
-
- case PPPIOCSFLAGS:
- err = -EFAULT;
- if (get_user(val, (int __user *) arg))
- break;
- session->flags = val;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set flags=%d\n", session->name, session->flags);
- err = 0;
- break;
-
- case PPPIOCGL2TPSTATS:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
-
- if (copy_to_user((void __user *) arg, &session->stats,
- sizeof(session->stats)))
- break;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get L2TP stats\n", session->name);
- err = 0;
- break;
-
- default:
- err = -ENOSYS;
- break;
- }
-
- sock_put(sk);
-
- return err;
-}
-
-/* Tunnel ioctl helper.
- *
- * Note the special handling for PPPIOCGL2TPSTATS below. If the ioctl data
- * specifies a session_id, the session ioctl handler is called. This allows an
- * application to retrieve session stats via a tunnel socket.
- */
-static int pppol2tp_tunnel_ioctl(struct pppol2tp_tunnel *tunnel,
- unsigned int cmd, unsigned long arg)
-{
- int err = 0;
- struct sock *sk = tunnel->sock;
- struct pppol2tp_ioc_stats stats_req;
-
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
- "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n", tunnel->name,
- cmd, arg);
-
- sock_hold(sk);
-
- switch (cmd) {
- case PPPIOCGL2TPSTATS:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
-
- if (copy_from_user(&stats_req, (void __user *) arg,
- sizeof(stats_req))) {
- err = -EFAULT;
- break;
- }
- if (stats_req.session_id != 0) {
- /* resend to session ioctl handler */
- struct pppol2tp_session *session =
- pppol2tp_session_find(tunnel, stats_req.session_id);
- if (session != NULL)
- err = pppol2tp_session_ioctl(session, cmd, arg);
- else
- err = -EBADR;
- break;
- }
-#ifdef CONFIG_XFRM
- tunnel->stats.using_ipsec = (sk->sk_policy[0] || sk->sk_policy[1]) ? 1 : 0;
-#endif
- if (copy_to_user((void __user *) arg, &tunnel->stats,
- sizeof(tunnel->stats))) {
- err = -EFAULT;
- break;
- }
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get L2TP stats\n", tunnel->name);
- err = 0;
- break;
-
- default:
- err = -ENOSYS;
- break;
- }
-
- sock_put(sk);
-
- return err;
-}
-
-/* Main ioctl() handler.
- * Dispatch to tunnel or session helpers depending on the socket.
- */
-static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd,
- unsigned long arg)
-{
- struct sock *sk = sock->sk;
- struct pppol2tp_session *session;
- struct pppol2tp_tunnel *tunnel;
- int err;
-
- if (!sk)
- return 0;
-
- err = -EBADF;
- if (sock_flag(sk, SOCK_DEAD) != 0)
- goto end;
-
- err = -ENOTCONN;
- if ((sk->sk_user_data == NULL) ||
- (!(sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND))))
- goto end;
-
- /* Get session context from the socket */
- err = -EBADF;
- session = pppol2tp_sock_to_session(sk);
- if (session == NULL)
- goto end;
-
- /* Special case: if session's session_id is zero, treat ioctl as a
- * tunnel ioctl
- */
- if ((session->tunnel_addr.s_session == 0) &&
- (session->tunnel_addr.d_session == 0)) {
- err = -EBADF;
- tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
- if (tunnel == NULL)
- goto end_put_sess;
-
- err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg);
- sock_put(session->tunnel_sock);
- goto end_put_sess;
- }
-
- err = pppol2tp_session_ioctl(session, cmd, arg);
-
-end_put_sess:
- sock_put(sk);
-end:
- return err;
-}
-
-/*****************************************************************************
- * setsockopt() / getsockopt() support.
- *
- * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
- * sockets. In order to control kernel tunnel features, we allow userspace to
- * create a special "tunnel" PPPoX socket which is used for control only.
- * Tunnel PPPoX sockets have session_id == 0 and simply allow the user
- * application to issue L2TP setsockopt(), getsockopt() and ioctl() calls.
- *****************************************************************************/
-
-/* Tunnel setsockopt() helper.
- */
-static int pppol2tp_tunnel_setsockopt(struct sock *sk,
- struct pppol2tp_tunnel *tunnel,
- int optname, int val)
-{
- int err = 0;
-
- switch (optname) {
- case PPPOL2TP_SO_DEBUG:
- tunnel->debug = val;
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set debug=%x\n", tunnel->name, tunnel->debug);
- break;
-
- default:
- err = -ENOPROTOOPT;
- break;
- }
-
- return err;
-}
-
-/* Session setsockopt helper.
- */
-static int pppol2tp_session_setsockopt(struct sock *sk,
- struct pppol2tp_session *session,
- int optname, int val)
-{
- int err = 0;
-
- switch (optname) {
- case PPPOL2TP_SO_RECVSEQ:
- if ((val != 0) && (val != 1)) {
- err = -EINVAL;
- break;
- }
- session->recv_seq = val ? -1 : 0;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set recv_seq=%d\n", session->name,
- session->recv_seq);
- break;
-
- case PPPOL2TP_SO_SENDSEQ:
- if ((val != 0) && (val != 1)) {
- err = -EINVAL;
- break;
- }
- session->send_seq = val ? -1 : 0;
- {
- struct sock *ssk = session->sock;
- struct pppox_sock *po = pppox_sk(ssk);
- po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ :
- PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
- }
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set send_seq=%d\n", session->name, session->send_seq);
- break;
-
- case PPPOL2TP_SO_LNSMODE:
- if ((val != 0) && (val != 1)) {
- err = -EINVAL;
- break;
- }
- session->lns_mode = val ? -1 : 0;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set lns_mode=%d\n", session->name,
- session->lns_mode);
- break;
-
- case PPPOL2TP_SO_DEBUG:
- session->debug = val;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set debug=%x\n", session->name, session->debug);
- break;
-
- case PPPOL2TP_SO_REORDERTO:
- session->reorder_timeout = msecs_to_jiffies(val);
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set reorder_timeout=%d\n", session->name,
- session->reorder_timeout);
- break;
-
- default:
- err = -ENOPROTOOPT;
- break;
- }
-
- return err;
-}
-
-/* Main setsockopt() entry point.
- * Does API checks, then calls either the tunnel or session setsockopt
- * handler, according to whether the PPPoL2TP socket is a for a regular
- * session or the special tunnel type.
- */
-static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
- char __user *optval, unsigned int optlen)
-{
- struct sock *sk = sock->sk;
- struct pppol2tp_session *session = sk->sk_user_data;
- struct pppol2tp_tunnel *tunnel;
- int val;
- int err;
-
- if (level != SOL_PPPOL2TP)
- return udp_prot.setsockopt(sk, level, optname, optval, optlen);
-
- if (optlen < sizeof(int))
- return -EINVAL;
-
- if (get_user(val, (int __user *)optval))
- return -EFAULT;
-
- err = -ENOTCONN;
- if (sk->sk_user_data == NULL)
- goto end;
-
- /* Get session context from the socket */
- err = -EBADF;
- session = pppol2tp_sock_to_session(sk);
- if (session == NULL)
- goto end;
-
- /* Special case: if session_id == 0x0000, treat as operation on tunnel
- */
- if ((session->tunnel_addr.s_session == 0) &&
- (session->tunnel_addr.d_session == 0)) {
- err = -EBADF;
- tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
- if (tunnel == NULL)
- goto end_put_sess;
-
- err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val);
- sock_put(session->tunnel_sock);
- } else
- err = pppol2tp_session_setsockopt(sk, session, optname, val);
-
- err = 0;
-
-end_put_sess:
- sock_put(sk);
-end:
- return err;
-}
-
-/* Tunnel getsockopt helper. Called with sock locked.
- */
-static int pppol2tp_tunnel_getsockopt(struct sock *sk,
- struct pppol2tp_tunnel *tunnel,
- int optname, int *val)
-{
- int err = 0;
-
- switch (optname) {
- case PPPOL2TP_SO_DEBUG:
- *val = tunnel->debug;
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get debug=%x\n", tunnel->name, tunnel->debug);
- break;
-
- default:
- err = -ENOPROTOOPT;
- break;
- }
-
- return err;
-}
-
-/* Session getsockopt helper. Called with sock locked.
- */
-static int pppol2tp_session_getsockopt(struct sock *sk,
- struct pppol2tp_session *session,
- int optname, int *val)
-{
- int err = 0;
-
- switch (optname) {
- case PPPOL2TP_SO_RECVSEQ:
- *val = session->recv_seq;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get recv_seq=%d\n", session->name, *val);
- break;
-
- case PPPOL2TP_SO_SENDSEQ:
- *val = session->send_seq;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get send_seq=%d\n", session->name, *val);
- break;
-
- case PPPOL2TP_SO_LNSMODE:
- *val = session->lns_mode;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get lns_mode=%d\n", session->name, *val);
- break;
-
- case PPPOL2TP_SO_DEBUG:
- *val = session->debug;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get debug=%d\n", session->name, *val);
- break;
-
- case PPPOL2TP_SO_REORDERTO:
- *val = (int) jiffies_to_msecs(session->reorder_timeout);
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get reorder_timeout=%d\n", session->name, *val);
- break;
-
- default:
- err = -ENOPROTOOPT;
- }
-
- return err;
-}
-
-/* Main getsockopt() entry point.
- * Does API checks, then calls either the tunnel or session getsockopt
- * handler, according to whether the PPPoX socket is a for a regular session
- * or the special tunnel type.
- */
-static int pppol2tp_getsockopt(struct socket *sock, int level,
- int optname, char __user *optval, int __user *optlen)
-{
- struct sock *sk = sock->sk;
- struct pppol2tp_session *session = sk->sk_user_data;
- struct pppol2tp_tunnel *tunnel;
- int val, len;
- int err;
-
- if (level != SOL_PPPOL2TP)
- return udp_prot.getsockopt(sk, level, optname, optval, optlen);
-
- if (get_user(len, (int __user *) optlen))
- return -EFAULT;
-
- len = min_t(unsigned int, len, sizeof(int));
-
- if (len < 0)
- return -EINVAL;
-
- err = -ENOTCONN;
- if (sk->sk_user_data == NULL)
- goto end;
-
- /* Get the session context */
- err = -EBADF;
- session = pppol2tp_sock_to_session(sk);
- if (session == NULL)
- goto end;
-
- /* Special case: if session_id == 0x0000, treat as operation on tunnel */
- if ((session->tunnel_addr.s_session == 0) &&
- (session->tunnel_addr.d_session == 0)) {
- err = -EBADF;
- tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
- if (tunnel == NULL)
- goto end_put_sess;
-
- err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
- sock_put(session->tunnel_sock);
- } else
- err = pppol2tp_session_getsockopt(sk, session, optname, &val);
-
- err = -EFAULT;
- if (put_user(len, (int __user *) optlen))
- goto end_put_sess;
-
- if (copy_to_user((void __user *) optval, &val, len))
- goto end_put_sess;
-
- err = 0;
-
-end_put_sess:
- sock_put(sk);
-end:
- return err;
-}
-
-/*****************************************************************************
- * /proc filesystem for debug
- *****************************************************************************/
-
-#ifdef CONFIG_PROC_FS
-
-#include <linux/seq_file.h>
-
-struct pppol2tp_seq_data {
- struct seq_net_private p;
- struct pppol2tp_tunnel *tunnel; /* current tunnel */
- struct pppol2tp_session *session; /* NULL means get first session in tunnel */
-};
-
-static struct pppol2tp_session *next_session(struct pppol2tp_tunnel *tunnel, struct pppol2tp_session *curr)
-{
- struct pppol2tp_session *session = NULL;
- struct hlist_node *walk;
- int found = 0;
- int next = 0;
- int i;
-
- read_lock_bh(&tunnel->hlist_lock);
- for (i = 0; i < PPPOL2TP_HASH_SIZE; i++) {
- hlist_for_each_entry(session, walk, &tunnel->session_hlist[i], hlist) {
- if (curr == NULL) {
- found = 1;
- goto out;
- }
- if (session == curr) {
- next = 1;
- continue;
- }
- if (next) {
- found = 1;
- goto out;
- }
- }
- }
-out:
- read_unlock_bh(&tunnel->hlist_lock);
- if (!found)
- session = NULL;
-
- return session;
-}
-
-static struct pppol2tp_tunnel *next_tunnel(struct pppol2tp_net *pn,
- struct pppol2tp_tunnel *curr)
-{
- struct pppol2tp_tunnel *tunnel = NULL;
-
- read_lock_bh(&pn->pppol2tp_tunnel_list_lock);
- if (list_is_last(&curr->list, &pn->pppol2tp_tunnel_list)) {
- goto out;
- }
- tunnel = list_entry(curr->list.next, struct pppol2tp_tunnel, list);
-out:
- read_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
-
- return tunnel;
-}
-
-static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs)
-{
- struct pppol2tp_seq_data *pd = SEQ_START_TOKEN;
- struct pppol2tp_net *pn;
- loff_t pos = *offs;
-
- if (!pos)
- goto out;
-
- BUG_ON(m->private == NULL);
- pd = m->private;
- pn = pppol2tp_pernet(seq_file_net(m));
-
- if (pd->tunnel == NULL) {
- if (!list_empty(&pn->pppol2tp_tunnel_list))
- pd->tunnel = list_entry(pn->pppol2tp_tunnel_list.next, struct pppol2tp_tunnel, list);
- } else {
- pd->session = next_session(pd->tunnel, pd->session);
- if (pd->session == NULL) {
- pd->tunnel = next_tunnel(pn, pd->tunnel);
- }
- }
-
- /* NULL tunnel and session indicates end of list */
- if ((pd->tunnel == NULL) && (pd->session == NULL))
- pd = NULL;
-
-out:
- return pd;
-}
-
-static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos)
-{
- (*pos)++;
- return NULL;
-}
-
-static void pppol2tp_seq_stop(struct seq_file *p, void *v)
-{
- /* nothing to do */
-}
-
-static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
-{
- struct pppol2tp_tunnel *tunnel = v;
-
- seq_printf(m, "\nTUNNEL '%s', %c %d\n",
- tunnel->name,
- (tunnel == tunnel->sock->sk_user_data) ? 'Y':'N',
- atomic_read(&tunnel->ref_count) - 1);
- seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n",
- tunnel->debug,
- (unsigned long long)tunnel->stats.tx_packets,
- (unsigned long long)tunnel->stats.tx_bytes,
- (unsigned long long)tunnel->stats.tx_errors,
- (unsigned long long)tunnel->stats.rx_packets,
- (unsigned long long)tunnel->stats.rx_bytes,
- (unsigned long long)tunnel->stats.rx_errors);
-}
-
-static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
-{
- struct pppol2tp_session *session = v;
-
- seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> "
- "%04X/%04X %d %c\n",
- session->name,
- ntohl(session->tunnel_addr.addr.sin_addr.s_addr),
- ntohs(session->tunnel_addr.addr.sin_port),
- session->tunnel_addr.s_tunnel,
- session->tunnel_addr.s_session,
- session->tunnel_addr.d_tunnel,
- session->tunnel_addr.d_session,
- session->sock->sk_state,
- (session == session->sock->sk_user_data) ?
- 'Y' : 'N');
- seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n",
- session->mtu, session->mru,
- session->recv_seq ? 'R' : '-',
- session->send_seq ? 'S' : '-',
- session->lns_mode ? "LNS" : "LAC",
- session->debug,
- jiffies_to_msecs(session->reorder_timeout));
- seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n",
- session->nr, session->ns,
- (unsigned long long)session->stats.tx_packets,
- (unsigned long long)session->stats.tx_bytes,
- (unsigned long long)session->stats.tx_errors,
- (unsigned long long)session->stats.rx_packets,
- (unsigned long long)session->stats.rx_bytes,
- (unsigned long long)session->stats.rx_errors);
-}
-
-static int pppol2tp_seq_show(struct seq_file *m, void *v)
-{
- struct pppol2tp_seq_data *pd = v;
-
- /* display header on line 1 */
- if (v == SEQ_START_TOKEN) {
- seq_puts(m, "PPPoL2TP driver info, " PPPOL2TP_DRV_VERSION "\n");
- seq_puts(m, "TUNNEL name, user-data-ok session-count\n");
- seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
- seq_puts(m, " SESSION name, addr/port src-tid/sid "
- "dest-tid/sid state user-data-ok\n");
- seq_puts(m, " mtu/mru/rcvseq/sendseq/lns debug reorderto\n");
- seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
- goto out;
- }
-
- /* Show the tunnel or session context.
- */
- if (pd->session == NULL)
- pppol2tp_seq_tunnel_show(m, pd->tunnel);
- else
- pppol2tp_seq_session_show(m, pd->session);
-
-out:
- return 0;
-}
-
-static const struct seq_operations pppol2tp_seq_ops = {
- .start = pppol2tp_seq_start,
- .next = pppol2tp_seq_next,
- .stop = pppol2tp_seq_stop,
- .show = pppol2tp_seq_show,
-};
-
-/* Called when our /proc file is opened. We allocate data for use when
- * iterating our tunnel / session contexts and store it in the private
- * data of the seq_file.
- */
-static int pppol2tp_proc_open(struct inode *inode, struct file *file)
-{
- return seq_open_net(inode, file, &pppol2tp_seq_ops,
- sizeof(struct pppol2tp_seq_data));
-}
-
-static const struct file_operations pppol2tp_proc_fops = {
- .owner = THIS_MODULE,
- .open = pppol2tp_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release_net,
-};
-
-#endif /* CONFIG_PROC_FS */
-
-/*****************************************************************************
- * Init and cleanup
- *****************************************************************************/
-
-static const struct proto_ops pppol2tp_ops = {
- .family = AF_PPPOX,
- .owner = THIS_MODULE,
- .release = pppol2tp_release,
- .bind = sock_no_bind,
- .connect = pppol2tp_connect,
- .socketpair = sock_no_socketpair,
- .accept = sock_no_accept,
- .getname = pppol2tp_getname,
- .poll = datagram_poll,
- .listen = sock_no_listen,
- .shutdown = sock_no_shutdown,
- .setsockopt = pppol2tp_setsockopt,
- .getsockopt = pppol2tp_getsockopt,
- .sendmsg = pppol2tp_sendmsg,
- .recvmsg = pppol2tp_recvmsg,
- .mmap = sock_no_mmap,
- .ioctl = pppox_ioctl,
-};
-
-static struct pppox_proto pppol2tp_proto = {
- .create = pppol2tp_create,
- .ioctl = pppol2tp_ioctl
-};
-
-static __net_init int pppol2tp_init_net(struct net *net)
-{
- struct pppol2tp_net *pn = pppol2tp_pernet(net);
- struct proc_dir_entry *pde;
-
- INIT_LIST_HEAD(&pn->pppol2tp_tunnel_list);
- rwlock_init(&pn->pppol2tp_tunnel_list_lock);
-
- pde = proc_net_fops_create(net, "pppol2tp", S_IRUGO, &pppol2tp_proc_fops);
-#ifdef CONFIG_PROC_FS
- if (!pde)
- return -ENOMEM;
-#endif
-
- return 0;
-}
-
-static __net_exit void pppol2tp_exit_net(struct net *net)
-{
- proc_net_remove(net, "pppol2tp");
-}
-
-static struct pernet_operations pppol2tp_net_ops = {
- .init = pppol2tp_init_net,
- .exit = pppol2tp_exit_net,
- .id = &pppol2tp_net_id,
- .size = sizeof(struct pppol2tp_net),
-};
-
-static int __init pppol2tp_init(void)
-{
- int err;
-
- err = proto_register(&pppol2tp_sk_proto, 0);
- if (err)
- goto out;
- err = register_pppox_proto(PX_PROTO_OL2TP, &pppol2tp_proto);
- if (err)
- goto out_unregister_pppol2tp_proto;
-
- err = register_pernet_device(&pppol2tp_net_ops);
- if (err)
- goto out_unregister_pppox_proto;
-
- printk(KERN_INFO "PPPoL2TP kernel driver, %s\n",
- PPPOL2TP_DRV_VERSION);
-
-out:
- return err;
-out_unregister_pppox_proto:
- unregister_pppox_proto(PX_PROTO_OL2TP);
-out_unregister_pppol2tp_proto:
- proto_unregister(&pppol2tp_sk_proto);
- goto out;
-}
-
-static void __exit pppol2tp_exit(void)
-{
- unregister_pppox_proto(PX_PROTO_OL2TP);
- unregister_pernet_device(&pppol2tp_net_ops);
- proto_unregister(&pppol2tp_sk_proto);
-}
-
-module_init(pppol2tp_init);
-module_exit(pppol2tp_exit);
-
-MODULE_AUTHOR("Martijn van Oosterhout <kleptog@svana.org>, "
- "James Chapman <jchapman@katalix.com>");
-MODULE_DESCRIPTION("PPP over L2TP over UDP");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(PPPOL2TP_DRV_VERSION);
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index 5bf229bb34c..87d6b8f3630 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -327,7 +327,7 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
unsigned int bufsize;
if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE)
- dev_info(ctodev(card), "%s: ERROR status \n", __func__);
+ dev_info(ctodev(card), "%s: ERROR status\n", __func__);
/* we need to round up the buffer size to a multiple of 128 */
bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN);
@@ -547,7 +547,7 @@ out:
void gelic_net_set_multi(struct net_device *netdev)
{
struct gelic_card *card = netdev_card(netdev);
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
unsigned int i;
uint8_t *p;
u64 addr;
@@ -581,9 +581,9 @@ void gelic_net_set_multi(struct net_device *netdev)
}
/* set multicast addresses */
- netdev_for_each_mc_addr(mc, netdev) {
+ netdev_for_each_mc_addr(ha, netdev) {
addr = 0;
- p = mc->dmi_addr;
+ p = ha->addr;
for (i = 0; i < ETH_ALEN; i++) {
addr <<= 8;
addr |= *p++;
@@ -903,9 +903,6 @@ int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
gelic_descr_release_tx(card, descr->next);
card->tx_chain.tail = descr->next->next;
dev_info(ctodev(card), "%s: kick failure\n", __func__);
- } else {
- /* OK, DMA started/reserved */
- netdev->trans_start = jiffies;
}
spin_unlock_irqrestore(&card->tx_lock, flags);
@@ -1435,7 +1432,7 @@ static void gelic_net_tx_timeout_task(struct work_struct *work)
container_of(work, struct gelic_card, tx_timeout_task);
struct net_device *netdev = card->netdev[GELIC_PORT_ETHERNET_0];
- dev_info(ctodev(card), "%s:Timed out. Restarting... \n", __func__);
+ dev_info(ctodev(card), "%s:Timed out. Restarting...\n", __func__);
if (!(netdev->flags & IFF_UP))
goto out;
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index f0be507e532..43b8d7797f0 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -96,7 +96,7 @@ static inline int precise_ie(void)
* post_eurus_cmd helpers
*/
struct eurus_cmd_arg_info {
- int pre_arg; /* command requres arg1, arg2 at POST COMMAND */
+ int pre_arg; /* command requires arg1, arg2 at POST COMMAND */
int post_arg; /* command requires arg1, arg2 at GET_RESULT */
};
@@ -301,7 +301,6 @@ static void gelic_wl_get_ch_info(struct gelic_wl_info *wl)
/* 16 bits of MSB has available channels */
wl->ch_info = ch_info_raw >> 48;
}
- return;
}
/* SIOGIWRANGE */
@@ -528,7 +527,7 @@ static void gelic_wl_parse_ie(u8 *data, size_t len,
u8 item_len;
u8 item_id;
- pr_debug("%s: data=%p len=%ld \n", __func__,
+ pr_debug("%s: data=%p len=%ld\n", __func__,
data, len);
memset(ie_info, 0, sizeof(struct ie_info));
@@ -897,7 +896,7 @@ static int gelic_wl_set_auth(struct net_device *netdev,
default:
ret = -EOPNOTSUPP;
break;
- };
+ }
if (!ret)
set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
@@ -979,7 +978,7 @@ static int gelic_wl_set_essid(struct net_device *netdev,
pr_debug("%s: essid = '%s'\n", __func__, extra);
set_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat);
} else {
- pr_debug("%s: ESSID any \n", __func__);
+ pr_debug("%s: ESSID any\n", __func__);
clear_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat);
}
set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
@@ -987,7 +986,7 @@ static int gelic_wl_set_essid(struct net_device *netdev,
gelic_wl_try_associate(netdev); /* FIXME */
- pr_debug("%s: -> \n", __func__);
+ pr_debug("%s: ->\n", __func__);
return 0;
}
@@ -998,7 +997,7 @@ static int gelic_wl_get_essid(struct net_device *netdev,
struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
unsigned long irqflag;
- pr_debug("%s: <- \n", __func__);
+ pr_debug("%s: <-\n", __func__);
mutex_lock(&wl->assoc_stat_lock);
spin_lock_irqsave(&wl->lock, irqflag);
if (test_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat) ||
@@ -1011,7 +1010,7 @@ static int gelic_wl_get_essid(struct net_device *netdev,
mutex_unlock(&wl->assoc_stat_lock);
spin_unlock_irqrestore(&wl->lock, irqflag);
- pr_debug("%s: -> len=%d \n", __func__, data->essid.length);
+ pr_debug("%s: -> len=%d\n", __func__, data->essid.length);
return 0;
}
@@ -1028,7 +1027,7 @@ static int gelic_wl_set_encode(struct net_device *netdev,
int key_index, index_specified;
int ret = 0;
- pr_debug("%s: <- \n", __func__);
+ pr_debug("%s: <-\n", __func__);
flags = enc->flags & IW_ENCODE_FLAGS;
key_index = enc->flags & IW_ENCODE_INDEX;
@@ -1087,7 +1086,7 @@ static int gelic_wl_set_encode(struct net_device *netdev,
set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
done:
spin_unlock_irqrestore(&wl->lock, irqflag);
- pr_debug("%s: -> \n", __func__);
+ pr_debug("%s: ->\n", __func__);
return ret;
}
@@ -1101,7 +1100,7 @@ static int gelic_wl_get_encode(struct net_device *netdev,
unsigned int key_index, index_specified;
int ret = 0;
- pr_debug("%s: <- \n", __func__);
+ pr_debug("%s: <-\n", __func__);
key_index = enc->flags & IW_ENCODE_INDEX;
pr_debug("%s: flag=%#x point=%p len=%d extra=%p\n", __func__,
enc->flags, enc->pointer, enc->length, extra);
@@ -1215,7 +1214,7 @@ static int gelic_wl_set_encodeext(struct net_device *netdev,
int key_index;
int ret = 0;
- pr_debug("%s: <- \n", __func__);
+ pr_debug("%s: <-\n", __func__);
flags = enc->flags & IW_ENCODE_FLAGS;
alg = ext->alg;
key_index = enc->flags & IW_ENCODE_INDEX;
@@ -1288,7 +1287,7 @@ static int gelic_wl_set_encodeext(struct net_device *netdev,
}
done:
spin_unlock_irqrestore(&wl->lock, irqflag);
- pr_debug("%s: -> \n", __func__);
+ pr_debug("%s: ->\n", __func__);
return ret;
}
@@ -1304,7 +1303,7 @@ static int gelic_wl_get_encodeext(struct net_device *netdev,
int ret = 0;
int max_key_len;
- pr_debug("%s: <- \n", __func__);
+ pr_debug("%s: <-\n", __func__);
max_key_len = enc->length - sizeof(struct iw_encode_ext);
if (max_key_len < 0)
@@ -1359,7 +1358,7 @@ static int gelic_wl_get_encodeext(struct net_device *netdev,
}
out:
spin_unlock_irqrestore(&wl->lock, irqflag);
- pr_debug("%s: -> \n", __func__);
+ pr_debug("%s: ->\n", __func__);
return ret;
}
/* SIOC{S,G}IWMODE */
@@ -1370,7 +1369,7 @@ static int gelic_wl_set_mode(struct net_device *netdev,
__u32 mode = data->mode;
int ret;
- pr_debug("%s: <- \n", __func__);
+ pr_debug("%s: <-\n", __func__);
if (mode == IW_MODE_INFRA)
ret = 0;
else
@@ -1384,7 +1383,7 @@ static int gelic_wl_get_mode(struct net_device *netdev,
union iwreq_data *data, char *extra)
{
__u32 *mode = &data->mode;
- pr_debug("%s: <- \n", __func__);
+ pr_debug("%s: <-\n", __func__);
*mode = IW_MODE_INFRA;
pr_debug("%s: ->\n", __func__);
return 0;
@@ -1992,7 +1991,7 @@ static int gelic_wl_associate_bss(struct gelic_wl_info *wl,
case GELIC_WL_WPA_LEVEL_WPA2:
ret = gelic_wl_do_wpa_setup(wl);
break;
- };
+ }
if (ret) {
pr_debug("%s: WEP/WPA setup failed %d\n", __func__,
@@ -2022,7 +2021,7 @@ static int gelic_wl_associate_bss(struct gelic_wl_info *wl,
if (!rc) {
/* timeouted. Maybe key or cyrpt mode is wrong */
- pr_info("%s: connect timeout \n", __func__);
+ pr_info("%s: connect timeout\n", __func__);
cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_DISASSOC,
NULL, 0);
kfree(cmd);
@@ -2063,7 +2062,7 @@ static void gelic_wl_connected_event(struct gelic_wl_info *wl,
}
if (desired_event == event) {
- pr_debug("%s: completed \n", __func__);
+ pr_debug("%s: completed\n", __func__);
complete(&wl->assoc_done);
netif_carrier_on(port_to_netdev(wl_port(wl)));
} else
@@ -2280,26 +2279,25 @@ void gelic_wl_interrupt(struct net_device *netdev, u64 status)
/*
* driver helpers
*/
-#define IW_IOCTL(n) [(n) - SIOCSIWCOMMIT]
static const iw_handler gelic_wl_wext_handler[] =
{
- IW_IOCTL(SIOCGIWNAME) = gelic_wl_get_name,
- IW_IOCTL(SIOCGIWRANGE) = gelic_wl_get_range,
- IW_IOCTL(SIOCSIWSCAN) = gelic_wl_set_scan,
- IW_IOCTL(SIOCGIWSCAN) = gelic_wl_get_scan,
- IW_IOCTL(SIOCSIWAUTH) = gelic_wl_set_auth,
- IW_IOCTL(SIOCGIWAUTH) = gelic_wl_get_auth,
- IW_IOCTL(SIOCSIWESSID) = gelic_wl_set_essid,
- IW_IOCTL(SIOCGIWESSID) = gelic_wl_get_essid,
- IW_IOCTL(SIOCSIWENCODE) = gelic_wl_set_encode,
- IW_IOCTL(SIOCGIWENCODE) = gelic_wl_get_encode,
- IW_IOCTL(SIOCSIWAP) = gelic_wl_set_ap,
- IW_IOCTL(SIOCGIWAP) = gelic_wl_get_ap,
- IW_IOCTL(SIOCSIWENCODEEXT) = gelic_wl_set_encodeext,
- IW_IOCTL(SIOCGIWENCODEEXT) = gelic_wl_get_encodeext,
- IW_IOCTL(SIOCSIWMODE) = gelic_wl_set_mode,
- IW_IOCTL(SIOCGIWMODE) = gelic_wl_get_mode,
- IW_IOCTL(SIOCGIWNICKN) = gelic_wl_get_nick,
+ IW_HANDLER(SIOCGIWNAME, gelic_wl_get_name),
+ IW_HANDLER(SIOCGIWRANGE, gelic_wl_get_range),
+ IW_HANDLER(SIOCSIWSCAN, gelic_wl_set_scan),
+ IW_HANDLER(SIOCGIWSCAN, gelic_wl_get_scan),
+ IW_HANDLER(SIOCSIWAUTH, gelic_wl_set_auth),
+ IW_HANDLER(SIOCGIWAUTH, gelic_wl_get_auth),
+ IW_HANDLER(SIOCSIWESSID, gelic_wl_set_essid),
+ IW_HANDLER(SIOCGIWESSID, gelic_wl_get_essid),
+ IW_HANDLER(SIOCSIWENCODE, gelic_wl_set_encode),
+ IW_HANDLER(SIOCGIWENCODE, gelic_wl_get_encode),
+ IW_HANDLER(SIOCSIWAP, gelic_wl_set_ap),
+ IW_HANDLER(SIOCGIWAP, gelic_wl_get_ap),
+ IW_HANDLER(SIOCSIWENCODEEXT, gelic_wl_set_encodeext),
+ IW_HANDLER(SIOCGIWENCODEEXT, gelic_wl_get_encodeext),
+ IW_HANDLER(SIOCSIWMODE, gelic_wl_set_mode),
+ IW_HANDLER(SIOCGIWMODE, gelic_wl_get_mode),
+ IW_HANDLER(SIOCGIWNICKN, gelic_wl_get_nick),
};
static const struct iw_handler_def gelic_wl_wext_handler_def = {
@@ -2318,7 +2316,7 @@ static struct net_device * __devinit gelic_wl_alloc(struct gelic_card *card)
pr_debug("%s:start\n", __func__);
netdev = alloc_etherdev(sizeof(struct gelic_port) +
sizeof(struct gelic_wl_info));
- pr_debug("%s: netdev =%p card=%p \np", __func__, netdev, card);
+ pr_debug("%s: netdev =%p card=%p\n", __func__, netdev, card);
if (!netdev)
return NULL;
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 4ef0afbcbe1..54ebb65ada1 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -222,7 +222,6 @@ static void ql_write_common_reg_l(struct ql3_adapter *qdev,
writel(value, reg);
readl(reg);
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
- return;
}
static void ql_write_common_reg(struct ql3_adapter *qdev,
@@ -230,7 +229,6 @@ static void ql_write_common_reg(struct ql3_adapter *qdev,
{
writel(value, reg);
readl(reg);
- return;
}
static void ql_write_nvram_reg(struct ql3_adapter *qdev,
@@ -239,7 +237,6 @@ static void ql_write_nvram_reg(struct ql3_adapter *qdev,
writel(value, reg);
readl(reg);
udelay(1);
- return;
}
static void ql_write_page0_reg(struct ql3_adapter *qdev,
@@ -249,7 +246,6 @@ static void ql_write_page0_reg(struct ql3_adapter *qdev,
ql_set_register_page(qdev,0);
writel(value, reg);
readl(reg);
- return;
}
/*
@@ -262,7 +258,6 @@ static void ql_write_page1_reg(struct ql3_adapter *qdev,
ql_set_register_page(qdev,1);
writel(value, reg);
readl(reg);
- return;
}
/*
@@ -275,7 +270,6 @@ static void ql_write_page2_reg(struct ql3_adapter *qdev,
ql_set_register_page(qdev,2);
writel(value, reg);
readl(reg);
- return;
}
static void ql_disable_interrupts(struct ql3_adapter *qdev)
@@ -343,8 +337,8 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
cpu_to_le32(LS_64BITS(map));
lrg_buf_cb->buf_phy_addr_high =
cpu_to_le32(MS_64BITS(map));
- pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
- pci_unmap_len_set(lrg_buf_cb, maplen,
+ dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+ dma_unmap_len_set(lrg_buf_cb, maplen,
qdev->lrg_buffer_len -
QL_HEADER_SPACE);
}
@@ -1924,8 +1918,8 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
cpu_to_le32(LS_64BITS(map));
lrg_buf_cb->buf_phy_addr_high =
cpu_to_le32(MS_64BITS(map));
- pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
- pci_unmap_len_set(lrg_buf_cb, maplen,
+ dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+ dma_unmap_len_set(lrg_buf_cb, maplen,
qdev->lrg_buffer_len -
QL_HEADER_SPACE);
--qdev->lrg_buf_skb_check;
@@ -2041,16 +2035,16 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
}
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(&tx_cb->map[0], mapaddr),
- pci_unmap_len(&tx_cb->map[0], maplen),
+ dma_unmap_addr(&tx_cb->map[0], mapaddr),
+ dma_unmap_len(&tx_cb->map[0], maplen),
PCI_DMA_TODEVICE);
tx_cb->seg_count--;
if (tx_cb->seg_count) {
for (i = 1; i < tx_cb->seg_count; i++) {
pci_unmap_page(qdev->pdev,
- pci_unmap_addr(&tx_cb->map[i],
+ dma_unmap_addr(&tx_cb->map[i],
mapaddr),
- pci_unmap_len(&tx_cb->map[i], maplen),
+ dma_unmap_len(&tx_cb->map[i], maplen),
PCI_DMA_TODEVICE);
}
}
@@ -2119,8 +2113,8 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
skb_put(skb, length);
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(lrg_buf_cb2, mapaddr),
- pci_unmap_len(lrg_buf_cb2, maplen),
+ dma_unmap_addr(lrg_buf_cb2, mapaddr),
+ dma_unmap_len(lrg_buf_cb2, maplen),
PCI_DMA_FROMDEVICE);
prefetch(skb->data);
skb->ip_summed = CHECKSUM_NONE;
@@ -2165,8 +2159,8 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
skb_put(skb2, length); /* Just the second buffer length here. */
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(lrg_buf_cb2, mapaddr),
- pci_unmap_len(lrg_buf_cb2, maplen),
+ dma_unmap_addr(lrg_buf_cb2, mapaddr),
+ dma_unmap_len(lrg_buf_cb2, maplen),
PCI_DMA_FROMDEVICE);
prefetch(skb2->data);
@@ -2258,7 +2252,7 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
"%x.\n",
ndev->name, net_rsp->opcode);
printk(KERN_ERR PFX
- "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n",
+ "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
(unsigned long int)tmp[0],
(unsigned long int)tmp[1],
(unsigned long int)tmp[2],
@@ -2454,8 +2448,8 @@ static int ql_send_map(struct ql3_adapter *qdev,
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
oal_entry->len = cpu_to_le32(len);
- pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
- pci_unmap_len_set(&tx_cb->map[seg], maplen, len);
+ dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
+ dma_unmap_len_set(&tx_cb->map[seg], maplen, len);
seg++;
if (seg_cnt == 1) {
@@ -2488,9 +2482,9 @@ static int ql_send_map(struct ql3_adapter *qdev,
oal_entry->len =
cpu_to_le32(sizeof(struct oal) |
OAL_CONT_ENTRY);
- pci_unmap_addr_set(&tx_cb->map[seg], mapaddr,
+ dma_unmap_addr_set(&tx_cb->map[seg], mapaddr,
map);
- pci_unmap_len_set(&tx_cb->map[seg], maplen,
+ dma_unmap_len_set(&tx_cb->map[seg], maplen,
sizeof(struct oal));
oal_entry = (struct oal_entry *)oal;
oal++;
@@ -2512,8 +2506,8 @@ static int ql_send_map(struct ql3_adapter *qdev,
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
oal_entry->len = cpu_to_le32(frag->size);
- pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
- pci_unmap_len_set(&tx_cb->map[seg], maplen,
+ dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
+ dma_unmap_len_set(&tx_cb->map[seg], maplen,
frag->size);
}
/* Terminate the last segment. */
@@ -2539,22 +2533,22 @@ map_error:
(seg == 12 && seg_cnt > 13) || /* but necessary. */
(seg == 17 && seg_cnt > 18)) {
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(&tx_cb->map[seg], mapaddr),
- pci_unmap_len(&tx_cb->map[seg], maplen),
+ dma_unmap_addr(&tx_cb->map[seg], mapaddr),
+ dma_unmap_len(&tx_cb->map[seg], maplen),
PCI_DMA_TODEVICE);
oal++;
seg++;
}
pci_unmap_page(qdev->pdev,
- pci_unmap_addr(&tx_cb->map[seg], mapaddr),
- pci_unmap_len(&tx_cb->map[seg], maplen),
+ dma_unmap_addr(&tx_cb->map[seg], mapaddr),
+ dma_unmap_len(&tx_cb->map[seg], maplen),
PCI_DMA_TODEVICE);
}
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(&tx_cb->map[0], mapaddr),
- pci_unmap_addr(&tx_cb->map[0], maplen),
+ dma_unmap_addr(&tx_cb->map[0], mapaddr),
+ dma_unmap_addr(&tx_cb->map[0], maplen),
PCI_DMA_TODEVICE);
return NETDEV_TX_BUSY;
@@ -2841,8 +2835,8 @@ static void ql_free_large_buffers(struct ql3_adapter *qdev)
if (lrg_buf_cb->skb) {
dev_kfree_skb(lrg_buf_cb->skb);
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(lrg_buf_cb, mapaddr),
- pci_unmap_len(lrg_buf_cb, maplen),
+ dma_unmap_addr(lrg_buf_cb, mapaddr),
+ dma_unmap_len(lrg_buf_cb, maplen),
PCI_DMA_FROMDEVICE);
memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
} else {
@@ -2912,8 +2906,8 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
return -ENOMEM;
}
- pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
- pci_unmap_len_set(lrg_buf_cb, maplen,
+ dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+ dma_unmap_len_set(lrg_buf_cb, maplen,
qdev->lrg_buffer_len -
QL_HEADER_SPACE);
lrg_buf_cb->buf_phy_addr_low =
@@ -3793,13 +3787,13 @@ static void ql_reset_work(struct work_struct *work)
"%s: Freeing lost SKB.\n",
qdev->ndev->name);
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(&tx_cb->map[0], mapaddr),
- pci_unmap_len(&tx_cb->map[0], maplen),
+ dma_unmap_addr(&tx_cb->map[0], mapaddr),
+ dma_unmap_len(&tx_cb->map[0], maplen),
PCI_DMA_TODEVICE);
for(j=1;j<tx_cb->seg_count;j++) {
pci_unmap_page(qdev->pdev,
- pci_unmap_addr(&tx_cb->map[j],mapaddr),
- pci_unmap_len(&tx_cb->map[j],maplen),
+ dma_unmap_addr(&tx_cb->map[j],mapaddr),
+ dma_unmap_len(&tx_cb->map[j],maplen),
PCI_DMA_TODEVICE);
}
dev_kfree_skb(tx_cb->skb);
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
index 7113e71b15a..3362a661248 100644
--- a/drivers/net/qla3xxx.h
+++ b/drivers/net/qla3xxx.h
@@ -998,8 +998,8 @@ enum link_state_t {
struct ql_rcv_buf_cb {
struct ql_rcv_buf_cb *next;
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapaddr);
- DECLARE_PCI_UNMAP_LEN(maplen);
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
__le32 buf_phy_addr_low;
__le32 buf_phy_addr_high;
int index;
@@ -1029,8 +1029,8 @@ struct oal {
};
struct map_list {
- DECLARE_PCI_UNMAP_ADDR(mapaddr);
- DECLARE_PCI_UNMAP_LEN(maplen);
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
};
struct ql_tx_buf_cb {
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 0da94b208db..896d40df9a1 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -51,8 +51,9 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 0
-#define QLCNIC_LINUX_VERSIONID "5.0.0"
+#define _QLCNIC_LINUX_SUBVERSION 2
+#define QLCNIC_LINUX_VERSIONID "5.0.2"
+#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
#define _major(v) (((v) >> 24) & 0xff)
@@ -98,8 +99,6 @@
#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048
#define QLCNIC_LRO_BUFFER_EXTRA 2048
-#define QLCNIC_RX_LRO_BUFFER_LENGTH (8060)
-
/* Opcodes to be used with the commands */
#define TX_ETHER_PKT 0x01
#define TX_TCP_PKT 0x02
@@ -133,7 +132,6 @@
#define RCV_RING_NORMAL 0
#define RCV_RING_JUMBO 1
-#define RCV_RING_LRO 2
#define MIN_CMD_DESCRIPTORS 64
#define MIN_RCV_DESCRIPTORS 64
@@ -144,7 +142,6 @@
#define MAX_RCV_DESCRIPTORS_10G 8192
#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512
#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024
-#define MAX_LRO_RCV_DESCRIPTORS 8
#define DEFAULT_RCV_DESCRIPTORS_1G 2048
#define DEFAULT_RCV_DESCRIPTORS_10G 4096
@@ -152,8 +149,6 @@
#define get_next_index(index, length) \
(((index) + 1) & ((length) - 1))
-#define MPORT_MULTI_FUNCTION_MODE 0x2222
-
/*
* Following data structures describe the descriptors that will be used.
* Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
@@ -399,13 +394,9 @@ struct qlcnic_hardware_context {
unsigned long pci_len0;
- u32 ocm_win;
- u32 crb_win;
-
rwlock_t crb_lock;
struct mutex mem_lock;
- u8 cut_through;
u8 revision_id;
u8 pci_func;
u8 linkup;
@@ -428,6 +419,10 @@ struct qlcnic_adapter_stats {
u64 xmit_on;
u64 xmit_off;
u64 skb_alloc_failure;
+ u64 null_skb;
+ u64 null_rxbuf;
+ u64 rx_dma_map_error;
+ u64 tx_dma_map_error;
};
/*
@@ -916,14 +911,12 @@ struct qlcnic_adapter {
u16 num_txd;
u16 num_rxd;
u16 num_jumbo_rxd;
- u16 num_lro_rxd;
u8 max_rds_rings;
u8 max_sds_rings;
u8 driver_mismatch;
u8 msix_supported;
u8 rx_csum;
- u8 pci_using_dac;
u8 portnum;
u8 physical_port;
@@ -958,11 +951,15 @@ struct qlcnic_adapter {
u8 dev_state;
u8 diag_test;
u8 diag_cnt;
+ u8 reset_ack_timeo;
+ u8 dev_init_timeo;
u8 rsrd1;
- u16 rsrd2;
+ u16 msg_enable;
u8 mac_addr[ETH_ALEN];
+ u64 dev_rst_time;
+
struct qlcnic_adapter_stats stats;
struct qlcnic_recv_context recv_ctx;
@@ -994,6 +991,11 @@ u32 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off);
int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data);
int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
+void qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *, u64, u64 *);
+void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64);
+
+#define ADDR_IN_RANGE(addr, low, high) \
+ (((addr) < (high)) && ((addr) >= (low)))
#define QLCRD32(adapter, off) \
(qlcnic_hw_read_wx_2M(adapter, off))
@@ -1035,6 +1037,7 @@ int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
void qlcnic_release_firmware(struct qlcnic_adapter *adapter);
int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter);
+int qlcnic_setup_idc_param(struct qlcnic_adapter *adapter);
int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp);
int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
@@ -1128,4 +1131,11 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
extern const struct ethtool_ops qlcnic_ethtool_ops;
+#define QLCDB(adapter, lvl, _fmt, _args...) do { \
+ if (NETIF_MSG_##lvl & adapter->msg_enable) \
+ printk(KERN_INFO "%s: %s: " _fmt, \
+ dev_name(&adapter->pdev->dev), \
+ __func__, ##_args); \
+ } while (0)
+
#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
index 0a6a39914ae..c2c1f5cc16c 100644
--- a/drivers/net/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -421,7 +421,8 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
if (addr == NULL) {
dev_err(&pdev->dev, "failed to allocate tx desc ring\n");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_out_free;
}
tx_ring->desc_head = (struct cmd_desc_type0 *)addr;
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index f83e15fe3e1..3bd514ec7e8 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -69,6 +69,14 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
{"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
QLC_OFF(stats.skb_alloc_failure)},
+ {"null skb",
+ QLC_SIZEOF(stats.null_skb), QLC_OFF(stats.null_skb)},
+ {"null rxbuf",
+ QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
+ {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
+ QLC_OFF(stats.rx_dma_map_error)},
+ {"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error),
+ QLC_OFF(stats.tx_dma_map_error)},
};
@@ -404,7 +412,6 @@ qlcnic_get_ringparam(struct net_device *dev,
ring->rx_pending = adapter->num_rxd;
ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
- ring->rx_jumbo_pending += adapter->num_lro_rxd;
ring->tx_pending = adapter->num_txd;
if (adapter->ahw.port_type == QLCNIC_GBE) {
@@ -598,19 +605,12 @@ qlcnic_set_pauseparam(struct net_device *netdev,
static int qlcnic_reg_test(struct net_device *dev)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
- u32 data_read, data_written;
+ u32 data_read;
data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0));
if ((data_read & 0xffff) != adapter->pdev->vendor)
return 1;
- data_written = (u32)0xa5a5a5a5;
-
- QLCWR32(adapter, CRB_SCRATCHPAD_TEST, data_written);
- data_read = QLCRD32(adapter, CRB_SCRATCHPAD_TEST);
- if (data_written != data_read)
- return 1;
-
return 0;
}
@@ -998,6 +998,20 @@ static int qlcnic_set_flags(struct net_device *netdev, u32 data)
return 0;
}
+static u32 qlcnic_get_msglevel(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->msg_enable;
+}
+
+static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ adapter->msg_enable = msglvl;
+}
+
const struct ethtool_ops qlcnic_ethtool_ops = {
.get_settings = qlcnic_get_settings,
.set_settings = qlcnic_set_settings,
@@ -1029,4 +1043,6 @@ const struct ethtool_ops qlcnic_ethtool_ops = {
.get_flags = ethtool_op_get_flags,
.set_flags = qlcnic_set_flags,
.phys_id = qlcnic_blink_led,
+ .set_msglevel = qlcnic_set_msglevel,
+ .get_msglevel = qlcnic_get_msglevel,
};
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
index 0469f84360a..ad9d167723c 100644
--- a/drivers/net/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -435,9 +435,10 @@ enum {
#define QLCNIC_PCI_MS_2M (0x80000)
#define QLCNIC_PCI_OCM0_2M (0x000c0000UL)
#define QLCNIC_PCI_CRBSPACE (0x06000000UL)
+#define QLCNIC_PCI_CAMQM (0x04800000UL)
+#define QLCNIC_PCI_CAMQM_END (0x04800800UL)
#define QLCNIC_PCI_2MB_SIZE (0x00200000UL)
#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL)
-#define QLCNIC_PCI_CAMQM_2M_END (0x04800800UL)
#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM)
@@ -448,7 +449,7 @@ enum {
#define QLCNIC_ADDR_OCM1 (0x0000000200400000ULL)
#define QLCNIC_ADDR_OCM1_MAX (0x00000002004fffffULL)
#define QLCNIC_ADDR_QDR_NET (0x0000000300000000ULL)
-#define QLCNIC_ADDR_QDR_NET_MAX_P3 (0x0000000303ffffffULL)
+#define QLCNIC_ADDR_QDR_NET_MAX (0x0000000307ffffffULL)
/*
* Register offsets for MN
@@ -562,39 +563,16 @@ enum {
#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8))
#define CRB_PF_LINK_SPEED_2 (QLCNIC_REG(0xec))
-#define CRB_MPORT_MODE (QLCNIC_REG(0xc4))
-#define CRB_DMA_SHIFT (QLCNIC_REG(0xcc))
-
#define CRB_TEMP_STATE (QLCNIC_REG(0x1b4))
#define CRB_V2P_0 (QLCNIC_REG(0x290))
#define CRB_V2P(port) (CRB_V2P_0+((port)*4))
#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0))
-#define CRB_SW_INT_MASK_0 (QLCNIC_REG(0x1d8))
-#define CRB_SW_INT_MASK_1 (QLCNIC_REG(0x1e0))
-#define CRB_SW_INT_MASK_2 (QLCNIC_REG(0x1e4))
-#define CRB_SW_INT_MASK_3 (QLCNIC_REG(0x1e8))
-
#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128))
#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0))
/*
- * capabilities register, can be used to selectively enable/disable features
- * for backward compability
- */
-#define CRB_NIC_CAPABILITIES_HOST QLCNIC_REG(0x1a8)
-#define CRB_NIC_CAPABILITIES_FW QLCNIC_REG(0x1dc)
-#define CRB_NIC_MSI_MODE_HOST QLCNIC_REG(0x270)
-#define CRB_NIC_MSI_MODE_FW QLCNIC_REG(0x274)
-
-#define INTR_SCHEME_PERPORT 0x1
-#define MSI_MODE_MULTIFUNC 0x1
-
-/* used for ethtool tests */
-#define CRB_SCRATCHPAD_TEST QLCNIC_REG(0x280)
-
-/*
* CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
* which can be read by the Phantom host to get producer/consumer indexes from
* Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following
@@ -693,15 +671,24 @@ enum {
#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148))
#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c))
-#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x14c))
-
- /* Device State */
-#define QLCNIC_DEV_COLD 1
-#define QLCNIC_DEV_INITALIZING 2
-#define QLCNIC_DEV_READY 3
-#define QLCNIC_DEV_NEED_RESET 4
-#define QLCNIC_DEV_NEED_QUISCENT 5
-#define QLCNIC_DEV_FAILED 6
+#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x174))
+#define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c)
+#define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860)
+
+/* Device State */
+#define QLCNIC_DEV_COLD 0x1
+#define QLCNIC_DEV_INITIALIZING 0x2
+#define QLCNIC_DEV_READY 0x3
+#define QLCNIC_DEV_NEED_RESET 0x4
+#define QLCNIC_DEV_NEED_QUISCENT 0x5
+#define QLCNIC_DEV_FAILED 0x6
+#define QLCNIC_DEV_QUISCENT 0x7
+
+#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4)))
+#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4)))
+#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4)))
+#define QLC_DEV_SET_QSCNT_RDY(VAL, FN) ((VAL) |= (2 << (FN * 4)))
+#define QLC_DEV_CLR_RST_QSCNT(VAL, FN) ((VAL) &= ~(3 << (FN * 4)))
#define QLCNIC_RCODE_DRIVER_INFO 0x20000000
#define QLCNIC_RCODE_DRIVER_CAN_RELOAD 0x40000000
@@ -709,9 +696,8 @@ enum {
#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff)
#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff)
-#define FW_POLL_DELAY (2 * HZ)
-#define FW_FAIL_THRESH 3
-#define FW_POLL_THRESH 10
+#define FW_POLL_DELAY (1 * HZ)
+#define FW_FAIL_THRESH 2
#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index e73ba455aa2..0c2e1f08f45 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -54,21 +54,6 @@ static inline void writeq(u64 val, void __iomem *addr)
}
#endif
-#define ADDR_IN_RANGE(addr, low, high) \
- (((addr) < (high)) && ((addr) >= (low)))
-
-#define PCI_OFFSET_FIRST_RANGE(adapter, off) \
- ((adapter)->ahw.pci_base0 + (off))
-
-static void __iomem *pci_base_offset(struct qlcnic_adapter *adapter,
- unsigned long off)
-{
- if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END))
- return PCI_OFFSET_FIRST_RANGE(adapter, off);
-
- return NULL;
-}
-
static const struct crb_128M_2M_block_map
crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
{{{0, 0, 0, 0} } }, /* 0: PCI */
@@ -310,8 +295,12 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
if (done == 1)
break;
- if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT)
+ if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to acquire sem=%d lock;reg_id=%d\n",
+ sem, id_reg);
return -EIO;
+ }
msleep(1);
}
@@ -427,7 +416,7 @@ static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, u8 *addr)
void qlcnic_set_multi(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
u32 mode = VPORT_MISS_MODE_DROP;
@@ -449,8 +438,8 @@ void qlcnic_set_multi(struct net_device *netdev)
}
if (!netdev_mc_empty(netdev)) {
- netdev_for_each_mc_addr(mc_ptr, netdev) {
- qlcnic_nic_add_mac(adapter, mc_ptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ qlcnic_nic_add_mac(adapter, ha->addr);
}
}
@@ -787,9 +776,6 @@ qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
window = CRB_HI(off);
- if (adapter->ahw.crb_win == window)
- return;
-
writel(window, addr);
if (readl(addr) != window) {
if (printk_ratelimit())
@@ -797,7 +783,6 @@ qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
"failed to set CRB window to %d off 0x%lx\n",
window, off);
}
- adapter->ahw.crb_win = window;
}
int
@@ -878,13 +863,6 @@ qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
u64 addr, u32 *start)
{
u32 window;
- struct pci_dev *pdev = adapter->pdev;
-
- if ((addr & 0x00ff800) == 0xff800) {
- if (printk_ratelimit())
- dev_warn(&pdev->dev, "QM access not handled\n");
- return -EIO;
- }
window = OCM_WIN_P3P(addr);
@@ -892,7 +870,6 @@ qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
/* read back to flush */
readl(adapter->ahw.ocm_win_crb);
- adapter->ahw.ocm_win = window;
*start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
return 0;
}
@@ -901,8 +878,7 @@ static int
qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
u64 *data, int op)
{
- void __iomem *addr, *mem_ptr = NULL;
- resource_size_t mem_base;
+ void __iomem *addr;
int ret;
u32 start;
@@ -912,21 +888,8 @@ qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
if (ret != 0)
goto unlock;
- addr = pci_base_offset(adapter, start);
- if (addr)
- goto noremap;
-
- mem_base = pci_resource_start(adapter->pdev, 0) + (start & PAGE_MASK);
-
- mem_ptr = ioremap(mem_base, PAGE_SIZE);
- if (mem_ptr == NULL) {
- ret = -EIO;
- goto unlock;
- }
+ addr = adapter->ahw.pci_base0 + start;
- addr = mem_ptr + (start & (PAGE_SIZE - 1));
-
-noremap:
if (op == 0) /* read */
*data = readq(addr);
else /* write */
@@ -935,11 +898,31 @@ noremap:
unlock:
mutex_unlock(&adapter->ahw.mem_lock);
- if (mem_ptr)
- iounmap(mem_ptr);
return ret;
}
+void
+qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
+{
+ void __iomem *addr = adapter->ahw.pci_base0 +
+ QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
+
+ mutex_lock(&adapter->ahw.mem_lock);
+ *data = readq(addr);
+ mutex_unlock(&adapter->ahw.mem_lock);
+}
+
+void
+qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
+{
+ void __iomem *addr = adapter->ahw.pci_base0 +
+ QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
+
+ mutex_lock(&adapter->ahw.mem_lock);
+ writeq(data, addr);
+ mutex_unlock(&adapter->ahw.mem_lock);
+}
+
#define MAX_CTL_CHECK 1000
int
@@ -948,7 +931,6 @@ qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
{
int i, j, ret;
u32 temp, off8;
- u64 stride;
void __iomem *mem_crb;
/* Only 64-bit aligned access */
@@ -957,7 +939,7 @@ qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
/* P3 onward, test agent base for MIU and SIU is same */
if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
- QLCNIC_ADDR_QDR_NET_MAX_P3)) {
+ QLCNIC_ADDR_QDR_NET_MAX)) {
mem_crb = qlcnic_get_ioaddr(adapter,
QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
goto correct;
@@ -975,9 +957,7 @@ qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
return -EIO;
correct:
- stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
-
- off8 = off & ~(stride-1);
+ off8 = off & ~0xf;
mutex_lock(&adapter->ahw.mem_lock);
@@ -985,30 +965,28 @@ correct:
writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
i = 0;
- if (stride == 16) {
- writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
- writel((TA_CTL_START | TA_CTL_ENABLE),
- (mem_crb + TEST_AGT_CTRL));
-
- for (j = 0; j < MAX_CTL_CHECK; j++) {
- temp = readl(mem_crb + TEST_AGT_CTRL);
- if ((temp & TA_CTL_BUSY) == 0)
- break;
- }
+ writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START | TA_CTL_ENABLE),
+ (mem_crb + TEST_AGT_CTRL));
- if (j >= MAX_CTL_CHECK) {
- ret = -EIO;
- goto done;
- }
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(mem_crb + TEST_AGT_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
- i = (off & 0xf) ? 0 : 2;
- writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
- mem_crb + MIU_TEST_AGT_WRDATA(i));
- writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
- mem_crb + MIU_TEST_AGT_WRDATA(i+1));
- i = (off & 0xf) ? 2 : 0;
+ if (j >= MAX_CTL_CHECK) {
+ ret = -EIO;
+ goto done;
}
+ i = (off & 0xf) ? 0 : 2;
+ writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
+ mem_crb + MIU_TEST_AGT_WRDATA(i));
+ writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
+ mem_crb + MIU_TEST_AGT_WRDATA(i+1));
+ i = (off & 0xf) ? 2 : 0;
+
writel(data & 0xffffffff,
mem_crb + MIU_TEST_AGT_WRDATA(i));
writel((data >> 32) & 0xffffffff,
@@ -1044,7 +1022,7 @@ qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
{
int j, ret;
u32 temp, off8;
- u64 val, stride;
+ u64 val;
void __iomem *mem_crb;
/* Only 64-bit aligned access */
@@ -1053,7 +1031,7 @@ qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
/* P3 onward, test agent base for MIU and SIU is same */
if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
- QLCNIC_ADDR_QDR_NET_MAX_P3)) {
+ QLCNIC_ADDR_QDR_NET_MAX)) {
mem_crb = qlcnic_get_ioaddr(adapter,
QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
goto correct;
@@ -1073,9 +1051,7 @@ qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
return -EIO;
correct:
- stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
-
- off8 = off & ~(stride-1);
+ off8 = off & ~0xf;
mutex_lock(&adapter->ahw.mem_lock);
@@ -1097,7 +1073,7 @@ correct:
ret = -EIO;
} else {
off8 = MIU_TEST_AGT_RDDATA_LO;
- if ((stride == 16) && (off & 0xf))
+ if (off & 0xf)
off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
temp = readl(mem_crb + off8 + 4);
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 9d2c124048f..71a4e664ad7 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -210,7 +210,7 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring));
if (cmd_buf_arr == NULL) {
dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
- return -ENOMEM;
+ goto err_out;
}
memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
tx_ring->cmd_buf_arr = cmd_buf_arr;
@@ -221,7 +221,7 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
rds_ring = kzalloc(size, GFP_KERNEL);
if (rds_ring == NULL) {
dev_err(&netdev->dev, "failed to allocate rds ring struct\n");
- return -ENOMEM;
+ goto err_out;
}
recv_ctx->rds_rings = rds_ring;
@@ -230,17 +230,8 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
switch (ring) {
case RCV_RING_NORMAL:
rds_ring->num_desc = adapter->num_rxd;
- if (adapter->ahw.cut_through) {
- rds_ring->dma_size =
- QLCNIC_CT_DEFAULT_RX_BUF_LEN;
- rds_ring->skb_size =
- QLCNIC_CT_DEFAULT_RX_BUF_LEN;
- } else {
- rds_ring->dma_size =
- QLCNIC_P3_RX_BUF_MAX_LEN;
- rds_ring->skb_size =
- rds_ring->dma_size + NET_IP_ALIGN;
- }
+ rds_ring->dma_size = QLCNIC_P3_RX_BUF_MAX_LEN;
+ rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
break;
case RCV_RING_JUMBO:
@@ -254,13 +245,6 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
rds_ring->skb_size =
rds_ring->dma_size + NET_IP_ALIGN;
break;
-
- case RCV_RING_LRO:
- rds_ring->num_desc = adapter->num_lro_rxd;
- rds_ring->dma_size = QLCNIC_RX_LRO_BUFFER_LENGTH;
- rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
- break;
-
}
rds_ring->rx_buf_arr = (struct qlcnic_rx_buffer *)
vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
@@ -530,6 +514,36 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
return 0;
}
+int
+qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
+
+ int timeo;
+ u32 val;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
+ val = (val >> (adapter->portnum * 4)) & 0xf;
+
+ if ((val & 0x3) != 1) {
+ dev_err(&adapter->pdev->dev, "Not an Ethernet NIC func=%u\n",
+ val);
+ return -EIO;
+ }
+
+ adapter->physical_port = (val >> 2);
+
+ if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo))
+ timeo = 30;
+
+ adapter->dev_init_timeo = timeo;
+
+ if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo))
+ timeo = 10;
+
+ adapter->reset_ack_timeo = timeo;
+
+ return 0;
+}
+
static int
qlcnic_has_mn(struct qlcnic_adapter *adapter)
{
@@ -540,12 +554,10 @@ qlcnic_has_mn(struct qlcnic_adapter *adapter)
QLCNIC_FW_VERSION_OFFSET, (int *)&flashed_ver);
flashed_ver = QLCNIC_DECODE_VERSION(flashed_ver);
- if (flashed_ver >= QLCNIC_VERSION_CODE(4, 0, 220)) {
+ capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
+ if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
+ return 1;
- capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
- if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
- return 1;
- }
return 0;
}
@@ -612,7 +624,7 @@ qlcnic_validate_bootld(struct qlcnic_adapter *adapter)
return -EINVAL;
tab_size = cpu_to_le32(tab_desc->findex) +
- (cpu_to_le32(tab_desc->entry_size * (idx + 1)));
+ (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
if (adapter->fw->size < tab_size)
return -EINVAL;
@@ -621,7 +633,7 @@ qlcnic_validate_bootld(struct qlcnic_adapter *adapter)
(cpu_to_le32(tab_desc->entry_size) * (idx));
descr = (struct uni_data_desc *)&unirom[offs];
- data_size = descr->findex + cpu_to_le32(descr->size);
+ data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
if (adapter->fw->size < data_size)
return -EINVAL;
@@ -647,7 +659,7 @@ qlcnic_validate_fw(struct qlcnic_adapter *adapter)
return -EINVAL;
tab_size = cpu_to_le32(tab_desc->findex) +
- (cpu_to_le32(tab_desc->entry_size * (idx + 1)));
+ (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
if (adapter->fw->size < tab_size)
return -EINVAL;
@@ -655,7 +667,7 @@ qlcnic_validate_fw(struct qlcnic_adapter *adapter)
offs = cpu_to_le32(tab_desc->findex) +
(cpu_to_le32(tab_desc->entry_size) * (idx));
descr = (struct uni_data_desc *)&unirom[offs];
- data_size = descr->findex + cpu_to_le32(descr->size);
+ data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
if (adapter->fw->size < data_size)
return -EINVAL;
@@ -950,6 +962,16 @@ qlcnic_load_firmware(struct qlcnic_adapter *adapter)
flashaddr += 8;
}
+
+ size = (__force u32)qlcnic_get_fw_size(adapter) % 8;
+ if (size) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (qlcnic_pci_mem_write_2M(adapter,
+ flashaddr, data))
+ return -EIO;
+ }
+
} else {
u64 data;
u32 hi, lo;
@@ -1162,9 +1184,6 @@ int qlcnic_init_firmware(struct qlcnic_adapter *adapter)
if (err)
return err;
- QLCWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
- QLCWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
- QLCWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
return err;
@@ -1254,13 +1273,13 @@ qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
skb = buffer->skb;
- if (!adapter->ahw.cut_through)
- skb_reserve(skb, 2);
+ skb_reserve(skb, 2);
dma = pci_map_single(pdev, skb->data,
rds_ring->dma_size, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(pdev, dma)) {
+ adapter->stats.rx_dma_map_error++;
dev_kfree_skb_any(skb);
buffer->skb = NULL;
return -ENOMEM;
@@ -1285,8 +1304,10 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
PCI_DMA_FROMDEVICE);
skb = buffer->skb;
- if (!skb)
+ if (!skb) {
+ adapter->stats.null_skb++;
goto no_skb;
+ }
if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
adapter->stats.csummed++;
@@ -1476,6 +1497,8 @@ qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
if (rxbuf)
list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
+ else
+ adapter->stats.null_rxbuf++;
skip:
for (; desc_cnt > 0; desc_cnt--) {
@@ -1523,9 +1546,10 @@ qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
int producer, count = 0;
struct list_head *head;
+ spin_lock(&rds_ring->lock);
+
producer = rds_ring->producer;
- spin_lock(&rds_ring->lock);
head = &rds_ring->free_list;
while (!list_empty(head)) {
@@ -1547,13 +1571,13 @@ qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
producer = get_next_index(producer, rds_ring->num_desc);
}
- spin_unlock(&rds_ring->lock);
if (count) {
rds_ring->producer = producer;
writel((producer-1) & (rds_ring->num_desc-1),
rds_ring->crb_rcv_producer);
}
+ spin_unlock(&rds_ring->lock);
}
static void
@@ -1565,10 +1589,11 @@ qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
int producer, count = 0;
struct list_head *head;
- producer = rds_ring->producer;
if (!spin_trylock(&rds_ring->lock))
return;
+ producer = rds_ring->producer;
+
head = &rds_ring->free_list;
while (!list_empty(head)) {
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 234dab1f998..23ea9caa526 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -61,6 +61,10 @@ static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
module_param(auto_fw_reset, int, 0644);
MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
+static int load_fw_file;
+module_param(load_fw_file, int, 0644);
+MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
+
static int __devinit qlcnic_probe(struct pci_dev *pdev,
const struct pci_device_id *ent);
static void __devexit qlcnic_remove(struct pci_dev *pdev);
@@ -84,6 +88,7 @@ static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
+static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter);
static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
@@ -208,6 +213,9 @@ qlcnic_napi_enable(struct qlcnic_adapter *adapter)
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
napi_enable(&sds_ring->napi);
@@ -222,6 +230,9 @@ qlcnic_napi_disable(struct qlcnic_adapter *adapter)
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
qlcnic_disable_int(sds_ring);
@@ -233,67 +244,6 @@ qlcnic_napi_disable(struct qlcnic_adapter *adapter)
static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
{
memset(&adapter->stats, 0, sizeof(adapter->stats));
- return;
-}
-
-static int qlcnic_set_dma_mask(struct qlcnic_adapter *adapter)
-{
- struct pci_dev *pdev = adapter->pdev;
- u64 mask, cmask;
-
- adapter->pci_using_dac = 0;
-
- mask = DMA_BIT_MASK(39);
- cmask = mask;
-
- if (pci_set_dma_mask(pdev, mask) == 0 &&
- pci_set_consistent_dma_mask(pdev, cmask) == 0) {
- adapter->pci_using_dac = 1;
- return 0;
- }
-
- return -EIO;
-}
-
-/* Update addressable range if firmware supports it */
-static int
-qlcnic_update_dma_mask(struct qlcnic_adapter *adapter)
-{
- int change, shift, err;
- u64 mask, old_mask, old_cmask;
- struct pci_dev *pdev = adapter->pdev;
-
- change = 0;
-
- shift = QLCRD32(adapter, CRB_DMA_SHIFT);
- if (shift > 32)
- return 0;
-
- if (shift > 9)
- change = 1;
-
- if (change) {
- old_mask = pdev->dma_mask;
- old_cmask = pdev->dev.coherent_dma_mask;
-
- mask = DMA_BIT_MASK(32+shift);
-
- err = pci_set_dma_mask(pdev, mask);
- if (err)
- goto err_out;
-
- err = pci_set_consistent_dma_mask(pdev, mask);
- if (err)
- goto err_out;
- dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift);
- }
-
- return 0;
-
-err_out:
- pci_set_dma_mask(pdev, old_mask);
- pci_set_consistent_dma_mask(pdev, old_cmask);
- return err;
}
static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
@@ -512,13 +462,6 @@ qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
int pci_func = adapter->ahw.pci_func;
- /*
- * Set the CRB window to invalid. If any register in window 0 is
- * accessed it should set the window to 0 and then reset it to 1.
- */
- adapter->ahw.crb_win = -1;
- adapter->ahw.ocm_win = -1;
-
/* remap phys address */
mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
mem_len = pci_resource_len(pdev, 0);
@@ -556,7 +499,9 @@ static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
qlcnic_boards[i].device == pdev->device &&
qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
qlcnic_boards[i].sub_device == pdev->subsystem_device) {
- strcpy(name, qlcnic_boards[i].short_name);
+ sprintf(name, "%pM: %s" ,
+ adapter->mac_addr,
+ qlcnic_boards[i].short_name);
found = 1;
break;
}
@@ -605,22 +550,10 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
brd_name, adapter->ahw.revision_id);
}
- if (adapter->fw_version < QLCNIC_VERSION_CODE(3, 4, 216)) {
- adapter->driver_mismatch = 1;
- dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n",
- fw_major, fw_minor, fw_build);
- return;
- }
-
- i = QLCRD32(adapter, QLCNIC_SRE_MISC);
- adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0;
-
- dev_info(&pdev->dev, "firmware v%d.%d.%d [%s]\n",
- fw_major, fw_minor, fw_build,
- adapter->ahw.cut_through ? "cut-through" : "legacy");
+ dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
+ fw_major, fw_minor, fw_build);
- if (adapter->fw_version >= QLCNIC_VERSION_CODE(4, 0, 222))
- adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
+ adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
adapter->flags &= ~QLCNIC_LRO_ENABLED;
@@ -637,7 +570,6 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
adapter->num_txd = MAX_CMD_DESCRIPTORS;
- adapter->num_lro_rxd = 0;
adapter->max_rds_rings = 2;
}
@@ -646,11 +578,10 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
{
int val, err, first_boot;
- err = qlcnic_set_dma_mask(adapter);
- if (err)
+ err = qlcnic_can_start_firmware(adapter);
+ if (err < 0)
return err;
-
- if (!qlcnic_can_start_firmware(adapter))
+ else if (!err)
goto wait_init;
first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
@@ -658,7 +589,10 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
/* This is the first boot after power up */
QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
- qlcnic_request_firmware(adapter);
+ if (load_fw_file)
+ qlcnic_request_firmware(adapter);
+ else
+ adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
err = qlcnic_need_fw_reset(adapter);
if (err < 0)
@@ -672,7 +606,6 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
msleep(1);
}
- QLCWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
@@ -696,16 +629,18 @@ wait_init:
goto err_out;
QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
-
- qlcnic_update_dma_mask(adapter);
+ qlcnic_idc_debug_info(adapter, 1);
qlcnic_check_options(adapter);
adapter->need_fw_reset = 0;
- /* fall through and release firmware */
+ qlcnic_release_firmware(adapter);
+ return 0;
err_out:
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
+ dev_err(&adapter->pdev->dev, "Device state set to failed\n");
qlcnic_release_firmware(adapter);
return err;
}
@@ -937,6 +872,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
struct qlcnic_host_sds_ring *sds_ring;
int ring;
+ clear_bit(__QLCNIC_DEV_UP, &adapter->state);
if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx.sds_rings[ring];
@@ -950,11 +886,11 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
adapter->max_sds_rings = max_sds_rings;
if (qlcnic_attach(adapter))
- return;
+ goto out;
if (netif_running(netdev))
__qlcnic_up(adapter, netdev);
-
+out:
netif_device_attach(netdev);
}
@@ -976,8 +912,10 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
adapter->diag_test = test;
ret = qlcnic_attach(adapter);
- if (ret)
+ if (ret) {
+ netif_device_attach(netdev);
return ret;
+ }
if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
@@ -985,6 +923,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
qlcnic_enable_int(sds_ring);
}
}
+ set_bit(__QLCNIC_DEV_UP, &adapter->state);
return 0;
}
@@ -1010,23 +949,19 @@ qlcnic_reset_context(struct qlcnic_adapter *adapter)
if (netif_running(netdev)) {
err = qlcnic_attach(adapter);
if (!err)
- err = __qlcnic_up(adapter, netdev);
-
- if (err)
- goto done;
+ __qlcnic_up(adapter, netdev);
}
netif_device_attach(netdev);
}
-done:
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return err;
}
static int
qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
- struct net_device *netdev)
+ struct net_device *netdev, u8 pci_using_dac)
{
int err;
struct pci_dev *pdev = adapter->pdev;
@@ -1049,7 +984,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
- if (adapter->pci_using_dac) {
+ if (pci_using_dac) {
netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= NETIF_F_HIGHDMA;
}
@@ -1079,6 +1014,22 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
return 0;
}
+static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
+{
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
+ *pci_using_dac = 1;
+ else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+ *pci_using_dac = 0;
+ else {
+ dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int __devinit
qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -1087,6 +1038,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int err;
int pci_func_id = PCI_FUNC(pdev->devfn);
uint8_t revision_id;
+ uint8_t pci_using_dac;
err = pci_enable_device(pdev);
if (err)
@@ -1097,6 +1049,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_disable_pdev;
}
+ err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
+ if (err)
+ goto err_out_disable_pdev;
+
err = pci_request_regions(pdev, qlcnic_driver_name);
if (err)
goto err_out_disable_pdev;
@@ -1115,6 +1071,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter = netdev_priv(netdev);
adapter->netdev = netdev;
adapter->pdev = pdev;
+ adapter->dev_rst_time = jiffies;
adapter->ahw.pci_func = pci_func_id;
revision_id = pdev->revision;
@@ -1139,21 +1096,23 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_iounmap;
}
+ if (qlcnic_read_mac_addr(adapter))
+ dev_warn(&pdev->dev, "failed to read mac addr\n");
+
+ if (qlcnic_setup_idc_param(adapter))
+ goto err_out_iounmap;
err = qlcnic_start_firmware(adapter);
- if (err)
+ if (err) {
+ dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
goto err_out_decr_ref;
-
- /*
- * See if the firmware gave us a virtual-physical port mapping.
- */
- adapter->physical_port = adapter->portnum;
+ }
qlcnic_clear_stats(adapter);
qlcnic_setup_intr(adapter);
- err = qlcnic_setup_netdev(adapter, netdev);
+ err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
if (err)
goto err_out_disable_msi;
@@ -1304,9 +1263,6 @@ qlcnic_resume(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
- adapter->ahw.crb_win = -1;
- adapter->ahw.ocm_win = -1;
-
err = qlcnic_start_firmware(adapter);
if (err) {
dev_err(&pdev->dev, "failed to start firmware\n");
@@ -1334,6 +1290,7 @@ err_out_detach:
qlcnic_detach(adapter);
err_out:
qlcnic_clr_all_drv_state(adapter);
+ netif_device_attach(netdev);
return err;
}
#endif
@@ -1570,6 +1527,11 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
int frag_count, no_of_desc;
u32 num_txd = tx_ring->num_desc;
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+
frag_count = skb_shinfo(skb)->nr_frags + 1;
/* 4 fragments per cmd des */
@@ -1586,8 +1548,10 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
pdev = adapter->pdev;
- if (qlcnic_map_tx_skb(pdev, skb, pbuf))
+ if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
+ adapter->stats.tx_dma_map_error++;
goto drop_packet;
+ }
pbuf->skb = skb;
pbuf->frag_count = frag_count;
@@ -1739,6 +1703,7 @@ static void qlcnic_tx_timeout_task(struct work_struct *work)
request_reset:
adapter->need_fw_reset = 1;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ QLCDB(adapter, DRV, "Resetting adapter\n");
}
static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
@@ -1750,7 +1715,7 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
stats->tx_packets = adapter->stats.xmitfinished;
- stats->rx_bytes = adapter->stats.rxbytes;
+ stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
stats->tx_bytes = adapter->stats.txbytes;
stats->rx_dropped = adapter->stats.rxdropped;
stats->tx_dropped = adapter->stats.txdropped;
@@ -1944,7 +1909,20 @@ static void qlcnic_poll_controller(struct net_device *netdev)
#endif
static void
-qlcnic_set_drv_state(struct qlcnic_adapter *adapter, int state)
+qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
+{
+ u32 val;
+
+ val = adapter->portnum & 0xf;
+ val |= encoding << 7;
+ val |= (jiffies - adapter->dev_rst_time) << 8;
+
+ QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
+ adapter->dev_rst_time = jiffies;
+}
+
+static int
+qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
{
u32 val;
@@ -1952,18 +1930,20 @@ qlcnic_set_drv_state(struct qlcnic_adapter *adapter, int state)
state != QLCNIC_DEV_NEED_QUISCENT);
if (qlcnic_api_lock(adapter))
- return ;
+ return -EIO;
val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
if (state == QLCNIC_DEV_NEED_RESET)
- val |= ((u32)0x1 << (adapter->portnum * 4));
+ QLC_DEV_SET_RST_RDY(val, adapter->portnum);
else if (state == QLCNIC_DEV_NEED_QUISCENT)
- val |= ((u32)0x1 << ((adapter->portnum * 4) + 1));
+ QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
qlcnic_api_unlock(adapter);
+
+ return 0;
}
static int
@@ -1975,7 +1955,7 @@ qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
return -EBUSY;
val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
- val &= ~((u32)0x3 << (adapter->portnum * 4));
+ QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
qlcnic_api_unlock(adapter);
@@ -1992,14 +1972,14 @@ qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter)
goto err;
val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
- val &= ~((u32)0x1 << (adapter->portnum * 4));
+ QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
if (!(val & 0x11111111))
QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
- val &= ~((u32)0x3 << (adapter->portnum * 4));
+ QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
qlcnic_api_unlock(adapter);
@@ -2009,6 +1989,7 @@ err:
clear_bit(__QLCNIC_RESETTING, &adapter->state);
}
+/* Grab api lock, before checking state */
static int
qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
{
@@ -2024,73 +2005,103 @@ qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
return 1;
}
+static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
+{
+ u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
+
+ if (val != QLCNIC_DRV_IDC_VER) {
+ dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
+ " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
+ }
+
+ return 0;
+}
+
static int
qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
{
u32 val, prev_state;
- int cnt = 0;
- int portnum = adapter->portnum;
+ u8 dev_init_timeo = adapter->dev_init_timeo;
+ u8 portnum = adapter->portnum;
+ u8 ret;
+
+ if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
+ return 1;
if (qlcnic_api_lock(adapter))
return -1;
val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
- if (!(val & ((int)0x1 << (portnum * 4)))) {
- val |= ((u32)0x1 << (portnum * 4));
+ if (!(val & (1 << (portnum * 4)))) {
+ QLC_DEV_SET_REF_CNT(val, portnum);
QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
- } else if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state)) {
- goto start_fw;
}
prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ QLCDB(adapter, HW, "Device state = %u\n", prev_state);
switch (prev_state) {
case QLCNIC_DEV_COLD:
-start_fw:
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITALIZING);
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
+ QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
+ qlcnic_idc_debug_info(adapter, 0);
qlcnic_api_unlock(adapter);
return 1;
case QLCNIC_DEV_READY:
+ ret = qlcnic_check_idc_ver(adapter);
qlcnic_api_unlock(adapter);
- return 0;
+ return ret;
case QLCNIC_DEV_NEED_RESET:
val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
- val |= ((u32)0x1 << (portnum * 4));
+ QLC_DEV_SET_RST_RDY(val, portnum);
QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
break;
case QLCNIC_DEV_NEED_QUISCENT:
val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
- val |= ((u32)0x1 << ((portnum * 4) + 1));
+ QLC_DEV_SET_QSCNT_RDY(val, portnum);
QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
break;
case QLCNIC_DEV_FAILED:
+ dev_err(&adapter->pdev->dev, "Device in failed state.\n");
qlcnic_api_unlock(adapter);
return -1;
+
+ case QLCNIC_DEV_INITIALIZING:
+ case QLCNIC_DEV_QUISCENT:
+ break;
}
qlcnic_api_unlock(adapter);
- msleep(1000);
- while ((QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) != QLCNIC_DEV_READY) &&
- ++cnt < 20)
+
+ do {
msleep(1000);
+ prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+ if (prev_state == QLCNIC_DEV_QUISCENT)
+ continue;
+ } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
- if (cnt >= 20)
+ if (!dev_init_timeo) {
+ dev_err(&adapter->pdev->dev,
+ "Waiting for device to initialize timeout\n");
return -1;
+ }
if (qlcnic_api_lock(adapter))
return -1;
val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
- val &= ~((u32)0x3 << (portnum * 4));
+ QLC_DEV_CLR_RST_QSCNT(val, portnum);
QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ ret = qlcnic_check_idc_ver(adapter);
qlcnic_api_unlock(adapter);
- return 0;
+ return ret;
}
static void
@@ -2098,44 +2109,84 @@ qlcnic_fwinit_work(struct work_struct *work)
{
struct qlcnic_adapter *adapter = container_of(work,
struct qlcnic_adapter, fw_work.work);
- int dev_state;
+ u32 dev_state = 0xf;
- if (++adapter->fw_wait_cnt > FW_POLL_THRESH)
+ if (qlcnic_api_lock(adapter))
goto err_ret;
- if (test_bit(__QLCNIC_START_FW, &adapter->state)) {
+ dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (dev_state == QLCNIC_DEV_QUISCENT) {
+ qlcnic_api_unlock(adapter);
+ qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
+ FW_POLL_DELAY * 2);
+ return;
+ }
+
+ if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
+ dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
+ adapter->reset_ack_timeo);
+ goto skip_ack_check;
+ }
+
+ if (!qlcnic_check_drv_state(adapter)) {
+skip_ack_check:
+ dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (qlcnic_check_drv_state(adapter)) {
- qlcnic_schedule_work(adapter,
- qlcnic_fwinit_work, FW_POLL_DELAY);
+ if (dev_state == QLCNIC_DEV_NEED_QUISCENT) {
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_QUISCENT);
+ qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
+ FW_POLL_DELAY * 2);
+ QLCDB(adapter, DRV, "Quiscing the driver\n");
+ qlcnic_idc_debug_info(adapter, 0);
+
+ qlcnic_api_unlock(adapter);
return;
}
+ if (dev_state == QLCNIC_DEV_NEED_RESET) {
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_INITIALIZING);
+ set_bit(__QLCNIC_START_FW, &adapter->state);
+ QLCDB(adapter, DRV, "Restarting fw\n");
+ qlcnic_idc_debug_info(adapter, 0);
+ }
+
+ qlcnic_api_unlock(adapter);
+
if (!qlcnic_start_firmware(adapter)) {
qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
return;
}
-
goto err_ret;
}
+ qlcnic_api_unlock(adapter);
+
dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
+
switch (dev_state) {
- case QLCNIC_DEV_READY:
- if (!qlcnic_start_firmware(adapter)) {
- qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
- return;
- }
+ case QLCNIC_DEV_QUISCENT:
+ case QLCNIC_DEV_NEED_QUISCENT:
+ case QLCNIC_DEV_NEED_RESET:
+ qlcnic_schedule_work(adapter,
+ qlcnic_fwinit_work, FW_POLL_DELAY);
+ return;
case QLCNIC_DEV_FAILED:
break;
default:
- qlcnic_schedule_work(adapter,
- qlcnic_fwinit_work, 2 * FW_POLL_DELAY);
- return;
+ if (!qlcnic_start_firmware(adapter)) {
+ qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
+ return;
+ }
}
err_ret:
+ dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
+ "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
+ netif_device_attach(adapter->netdev);
qlcnic_clr_all_drv_state(adapter);
}
@@ -2163,7 +2214,8 @@ qlcnic_detach_work(struct work_struct *work)
if (adapter->temp == QLCNIC_TEMP_PANIC)
goto err_ret;
- qlcnic_set_drv_state(adapter, adapter->dev_state);
+ if (qlcnic_set_drv_state(adapter, adapter->dev_state))
+ goto err_ret;
adapter->fw_wait_cnt = 0;
@@ -2172,10 +2224,14 @@ qlcnic_detach_work(struct work_struct *work)
return;
err_ret:
+ dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
+ status, adapter->temp);
+ netif_device_attach(netdev);
qlcnic_clr_all_drv_state(adapter);
}
+/*Transit to RESET state from READY state only */
static void
qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
{
@@ -2186,9 +2242,10 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state != QLCNIC_DEV_INITALIZING && state != QLCNIC_DEV_NEED_RESET) {
+ if (state == QLCNIC_DEV_READY) {
QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
- set_bit(__QLCNIC_START_FW, &adapter->state);
+ QLCDB(adapter, DRV, "NEED_RESET state set\n");
+ qlcnic_idc_debug_info(adapter, 0);
}
qlcnic_api_unlock(adapter);
@@ -2233,9 +2290,8 @@ qlcnic_attach_work(struct work_struct *work)
qlcnic_config_indev_addr(netdev, NETDEV_UP);
}
- netif_device_attach(netdev);
-
done:
+ netif_device_attach(netdev);
adapter->fw_fail_cnt = 0;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
@@ -2253,10 +2309,8 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
if (qlcnic_check_temp(adapter))
goto detach;
- if (adapter->need_fw_reset) {
+ if (adapter->need_fw_reset)
qlcnic_dev_request_reset(adapter);
- goto detach;
- }
state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
if (state == QLCNIC_DEV_NEED_RESET || state == QLCNIC_DEV_NEED_QUISCENT)
@@ -2285,8 +2339,11 @@ detach:
QLCNIC_DEV_NEED_RESET;
if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
- !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
+
qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
+ QLCDB(adapter, DRV, "fw recovery scheduled.\n");
+ }
return 1;
}
@@ -2387,51 +2444,72 @@ static int
qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
loff_t offset, size_t size)
{
+ size_t crb_size = 4;
+
if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
return -EIO;
- if ((size != 4) || (offset & 0x3))
- return -EINVAL;
+ if (offset < QLCNIC_PCI_CRBSPACE) {
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
+ QLCNIC_PCI_CAMQM_END))
+ crb_size = 8;
+ else
+ return -EINVAL;
+ }
- if (offset < QLCNIC_PCI_CRBSPACE)
- return -EINVAL;
+ if ((size != crb_size) || (offset & (crb_size-1)))
+ return -EINVAL;
return 0;
}
static ssize_t
-qlcnic_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
+qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
u32 data;
+ u64 qmdata;
int ret;
ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
if (ret != 0)
return ret;
- data = QLCRD32(adapter, offset);
- memcpy(buf, &data, size);
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+ qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
+ memcpy(buf, &qmdata, size);
+ } else {
+ data = QLCRD32(adapter, offset);
+ memcpy(buf, &data, size);
+ }
return size;
}
static ssize_t
-qlcnic_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr,
+qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
u32 data;
+ u64 qmdata;
int ret;
ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
if (ret != 0)
return ret;
- memcpy(&data, buf, size);
- QLCWR32(adapter, offset, data);
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+ memcpy(&qmdata, buf, size);
+ qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
+ } else {
+ memcpy(&data, buf, size);
+ QLCWR32(adapter, offset, data);
+ }
return size;
}
@@ -2449,7 +2527,8 @@ qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
}
static ssize_t
-qlcnic_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
+qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -2470,7 +2549,8 @@ qlcnic_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
}
static ssize_t
-qlcnic_sysfs_write_mem(struct kobject *kobj, struct bin_attribute *attr,
+qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -2553,24 +2633,12 @@ qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
-static int
-qlcnic_destip_supported(struct qlcnic_adapter *adapter)
-{
- if (adapter->ahw.cut_through)
- return 0;
-
- return 1;
-}
-
static void
qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
{
struct in_device *indev;
struct qlcnic_adapter *adapter = netdev_priv(dev);
- if (!qlcnic_destip_supported(adapter))
- return;
-
indev = in_dev_get(dev);
if (!indev)
return;
@@ -2591,7 +2659,6 @@ qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
} endfor_ifa(indev);
in_dev_put(indev);
- return;
}
static int qlcnic_netdev_event(struct notifier_block *this,
@@ -2650,7 +2717,7 @@ recheck:
adapter = netdev_priv(dev);
- if (!adapter || !qlcnic_destip_supported(adapter))
+ if (!adapter)
goto done;
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 8b742b639ce..20624ba44a3 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -1344,8 +1344,8 @@ struct oal {
};
struct map_list {
- DECLARE_PCI_UNMAP_ADDR(mapaddr);
- DECLARE_PCI_UNMAP_LEN(maplen);
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
};
struct tx_ring_desc {
@@ -1373,8 +1373,8 @@ struct bq_desc {
} p;
__le64 *addr;
u32 index;
- DECLARE_PCI_UNMAP_ADDR(mapaddr);
- DECLARE_PCI_UNMAP_LEN(maplen);
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
};
#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 36266462893..68a1c9b91e7 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1340,7 +1340,7 @@ void ql_mpi_core_to_log(struct work_struct *work)
for (i = 0; i < count; i += 8) {
printk(KERN_ERR "%.08x: %.08x %.08x %.08x %.08x %.08x "
- "%.08x %.08x %.08x \n", i,
+ "%.08x %.08x %.08x\n", i,
tmp[i + 0],
tmp[i + 1],
tmp[i + 2],
@@ -2058,7 +2058,7 @@ void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
- printk(KERN_ERR PFX "flags3 = %s %s %s \n",
+ printk(KERN_ERR PFX "flags3 = %s %s %s\n",
ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 7e09ff4a575..4892d64f4e0 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -181,8 +181,6 @@ quit:
spin_unlock(&qdev->stats_lock);
QL_DUMP_STAT(qdev);
-
- return;
}
static char ql_stats_str_arr[][ETH_GSTRING_LEN] = {
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index fd34f266c0a..fa4b24c49f4 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -1057,7 +1057,7 @@ static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
pci_dma_sync_single_for_cpu(qdev->pdev,
- pci_unmap_addr(lbq_desc, mapaddr),
+ dma_unmap_addr(lbq_desc, mapaddr),
rx_ring->lbq_buf_size,
PCI_DMA_FROMDEVICE);
@@ -1170,8 +1170,8 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
map = lbq_desc->p.pg_chunk.map +
lbq_desc->p.pg_chunk.offset;
- pci_unmap_addr_set(lbq_desc, mapaddr, map);
- pci_unmap_len_set(lbq_desc, maplen,
+ dma_unmap_addr_set(lbq_desc, mapaddr, map);
+ dma_unmap_len_set(lbq_desc, maplen,
rx_ring->lbq_buf_size);
*lbq_desc->addr = cpu_to_le64(map);
@@ -1241,8 +1241,8 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
sbq_desc->p.skb = NULL;
return;
}
- pci_unmap_addr_set(sbq_desc, mapaddr, map);
- pci_unmap_len_set(sbq_desc, maplen,
+ dma_unmap_addr_set(sbq_desc, mapaddr, map);
+ dma_unmap_len_set(sbq_desc, maplen,
rx_ring->sbq_buf_size);
*sbq_desc->addr = cpu_to_le64(map);
}
@@ -1298,18 +1298,18 @@ static void ql_unmap_send(struct ql_adapter *qdev,
"unmapping OAL area.\n");
}
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(&tx_ring_desc->map[i],
+ dma_unmap_addr(&tx_ring_desc->map[i],
mapaddr),
- pci_unmap_len(&tx_ring_desc->map[i],
+ dma_unmap_len(&tx_ring_desc->map[i],
maplen),
PCI_DMA_TODEVICE);
} else {
netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
"unmapping frag %d.\n", i);
pci_unmap_page(qdev->pdev,
- pci_unmap_addr(&tx_ring_desc->map[i],
+ dma_unmap_addr(&tx_ring_desc->map[i],
mapaddr),
- pci_unmap_len(&tx_ring_desc->map[i],
+ dma_unmap_len(&tx_ring_desc->map[i],
maplen), PCI_DMA_TODEVICE);
}
}
@@ -1348,8 +1348,8 @@ static int ql_map_send(struct ql_adapter *qdev,
tbd->len = cpu_to_le32(len);
tbd->addr = cpu_to_le64(map);
- pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
- pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
+ dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
+ dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
map_idx++;
/*
@@ -1402,9 +1402,9 @@ static int ql_map_send(struct ql_adapter *qdev,
tbd->len =
cpu_to_le32((sizeof(struct tx_buf_desc) *
(frag_cnt - frag_idx)) | TX_DESC_C);
- pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
+ dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
map);
- pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
+ dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
sizeof(struct oal));
tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
map_idx++;
@@ -1425,8 +1425,8 @@ static int ql_map_send(struct ql_adapter *qdev,
tbd->addr = cpu_to_le64(map);
tbd->len = cpu_to_le32(frag->size);
- pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
- pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
+ dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
+ dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
frag->size);
}
@@ -1742,8 +1742,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
*/
sbq_desc = ql_get_curr_sbuf(rx_ring);
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(sbq_desc, mapaddr),
- pci_unmap_len(sbq_desc, maplen),
+ dma_unmap_addr(sbq_desc, mapaddr),
+ dma_unmap_len(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
skb = sbq_desc->p.skb;
ql_realign_skb(skb, hdr_len);
@@ -1774,18 +1774,18 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
*/
sbq_desc = ql_get_curr_sbuf(rx_ring);
pci_dma_sync_single_for_cpu(qdev->pdev,
- pci_unmap_addr
+ dma_unmap_addr
(sbq_desc, mapaddr),
- pci_unmap_len
+ dma_unmap_len
(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
memcpy(skb_put(skb, length),
sbq_desc->p.skb->data, length);
pci_dma_sync_single_for_device(qdev->pdev,
- pci_unmap_addr
+ dma_unmap_addr
(sbq_desc,
mapaddr),
- pci_unmap_len
+ dma_unmap_len
(sbq_desc,
maplen),
PCI_DMA_FROMDEVICE);
@@ -1798,9 +1798,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
ql_realign_skb(skb, length);
skb_put(skb, length);
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(sbq_desc,
+ dma_unmap_addr(sbq_desc,
mapaddr),
- pci_unmap_len(sbq_desc,
+ dma_unmap_len(sbq_desc,
maplen),
PCI_DMA_FROMDEVICE);
sbq_desc->p.skb = NULL;
@@ -1839,9 +1839,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
return NULL;
}
pci_unmap_page(qdev->pdev,
- pci_unmap_addr(lbq_desc,
+ dma_unmap_addr(lbq_desc,
mapaddr),
- pci_unmap_len(lbq_desc, maplen),
+ dma_unmap_len(lbq_desc, maplen),
PCI_DMA_FROMDEVICE);
skb_reserve(skb, NET_IP_ALIGN);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
@@ -1874,8 +1874,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
int size, i = 0;
sbq_desc = ql_get_curr_sbuf(rx_ring);
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(sbq_desc, mapaddr),
- pci_unmap_len(sbq_desc, maplen),
+ dma_unmap_addr(sbq_desc, mapaddr),
+ dma_unmap_len(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
/*
@@ -2737,8 +2737,8 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
}
if (sbq_desc->p.skb) {
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(sbq_desc, mapaddr),
- pci_unmap_len(sbq_desc, maplen),
+ dma_unmap_addr(sbq_desc, mapaddr),
+ dma_unmap_len(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
dev_kfree_skb(sbq_desc->p.skb);
sbq_desc->p.skb = NULL;
@@ -4207,7 +4207,7 @@ static struct net_device_stats *qlge_get_stats(struct net_device
static void qlge_set_multicast_list(struct net_device *ndev)
{
struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
int i, status;
status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
@@ -4271,8 +4271,8 @@ static void qlge_set_multicast_list(struct net_device *ndev)
if (status)
goto exit;
i = 0;
- netdev_for_each_mc_addr(mc_ptr, ndev) {
- if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
+ netdev_for_each_mc_addr(ha, ndev) {
+ if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
MAC_ADDR_TYPE_MULTI_MAC, i)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to loadmulticast address.\n");
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 0298d8c1dcb..9a251acf5ab 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -330,7 +330,7 @@ static int r6040_alloc_rxbufs(struct net_device *dev)
do {
skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
if (!skb) {
- printk(KERN_ERR DRV_NAME "%s: failed to alloc skb for rx\n", dev->name);
+ netdev_err(dev, "failed to alloc skb for rx\n");
rc = -ENOMEM;
goto err_exit;
}
@@ -400,9 +400,6 @@ static void r6040_init_mac_regs(struct net_device *dev)
* we may got called by r6040_tx_timeout which has left
* some unsent tx buffers */
iowrite16(0x01, ioaddr + MTPR);
-
- /* Check media */
- mii_check_media(&lp->mii_if, 1, 1);
}
static void r6040_tx_timeout(struct net_device *dev)
@@ -410,9 +407,9 @@ static void r6040_tx_timeout(struct net_device *dev)
struct r6040_private *priv = netdev_priv(dev);
void __iomem *ioaddr = priv->base;
- printk(KERN_WARNING "%s: transmit timed out, int enable %4.4x "
+ netdev_warn(dev, "transmit timed out, int enable %4.4x "
"status %4.4x, PHY status %4.4x\n",
- dev->name, ioread16(ioaddr + MIER),
+ ioread16(ioaddr + MIER),
ioread16(ioaddr + MISR),
r6040_mdio_read(dev, priv->mii_if.phy_id, MII_BMSR));
@@ -530,8 +527,6 @@ static int r6040_phy_mode_chk(struct net_device *dev)
phy_dat = 0x0000;
}
- mii_check_media(&lp->mii_if, 0, 1);
-
return phy_dat;
};
@@ -813,6 +808,9 @@ static void r6040_timer(unsigned long data)
/* Timer active again */
mod_timer(&lp->timer, round_jiffies(jiffies + HZ));
+
+ /* Check media */
+ mii_check_media(&lp->mii_if, 1, 1);
}
/* Read/set MAC address routines */
@@ -897,7 +895,7 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
if (!lp->tx_free_desc) {
spin_unlock_irqrestore(&lp->lock, flags);
netif_stop_queue(dev);
- printk(KERN_ERR DRV_NAME ": no tx descriptor\n");
+ netdev_err(dev, ": no tx descriptor\n");
return NETDEV_TX_BUSY;
}
@@ -924,7 +922,6 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
if (!lp->tx_free_desc)
netif_stop_queue(dev);
- dev->trans_start = jiffies;
spin_unlock_irqrestore(&lp->lock, flags);
return NETDEV_TX_OK;
@@ -937,7 +934,7 @@ static void r6040_multicast_list(struct net_device *dev)
u16 *adrp;
u16 reg;
unsigned long flags;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int i;
/* MAC Address */
@@ -972,8 +969,8 @@ static void r6040_multicast_list(struct net_device *dev)
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- char *addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ char *addrs = ha->addr;
if (!(*addrs & 1))
continue;
@@ -990,9 +987,9 @@ static void r6040_multicast_list(struct net_device *dev)
}
/* Multicast Address 1~4 case */
i = 0;
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (i < MCAST_MAX) {
- adrp = (u16 *) dmi->dmi_addr;
+ adrp = (u16 *) ha->addr;
iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
@@ -1090,20 +1087,20 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
/* this should always be supported */
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- printk(KERN_ERR DRV_NAME ": 32-bit PCI DMA addresses"
+ dev_err(&pdev->dev, "32-bit PCI DMA addresses"
"not supported by the card\n");
goto err_out;
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- printk(KERN_ERR DRV_NAME ": 32-bit PCI DMA addresses"
+ dev_err(&pdev->dev, "32-bit PCI DMA addresses"
"not supported by the card\n");
goto err_out;
}
/* IO Size check */
if (pci_resource_len(pdev, bar) < io_size) {
- printk(KERN_ERR DRV_NAME ": Insufficient PCI resources, aborting\n");
+ dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
err = -EIO;
goto err_out;
}
@@ -1112,7 +1109,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
dev = alloc_etherdev(sizeof(struct r6040_private));
if (!dev) {
- printk(KERN_ERR DRV_NAME ": Failed to allocate etherdev\n");
+ dev_err(&pdev->dev, "Failed to allocate etherdev\n");
err = -ENOMEM;
goto err_out;
}
@@ -1122,14 +1119,13 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
- printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
+ dev_err(&pdev->dev, "Failed to request PCI regions\n");
goto err_out_free_dev;
}
ioaddr = pci_iomap(pdev, bar, io_size);
if (!ioaddr) {
- printk(KERN_ERR DRV_NAME ": ioremap failed for device %s\n",
- pci_name(pdev));
+ dev_err(&pdev->dev, "ioremap failed for device\n");
err = -EIO;
goto err_out_free_res;
}
@@ -1156,7 +1152,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
/* Some bootloader/BIOSes do not initialize
* MAC address, warn about that */
if (!(adrp[0] || adrp[1] || adrp[2])) {
- printk(KERN_WARNING DRV_NAME ": MAC address not initialized, generating random\n");
+ netdev_warn(dev, "MAC address not initialized, generating random\n");
random_ether_addr(dev->dev_addr);
}
@@ -1184,7 +1180,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
/* Check the vendor ID on the PHY, if 0xffff assume none attached */
if (r6040_phy_read(ioaddr, lp->phy_addr, 2) == 0xffff) {
- printk(KERN_ERR DRV_NAME ": Failed to detect an attached PHY\n");
+ dev_err(&pdev->dev, "Failed to detect an attached PHY\n");
err = -ENODEV;
goto err_out_unmap;
}
@@ -1192,7 +1188,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
/* Register net device. After this dev->name assign */
err = register_netdev(dev);
if (err) {
- printk(KERN_ERR DRV_NAME ": Failed to register net device\n");
+ dev_err(&pdev->dev, "Failed to register net device\n");
goto err_out_unmap;
}
return 0;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index dd8106ff35a..217e709bda3 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -23,6 +23,7 @@
#include <linux/tcp.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -509,6 +510,7 @@ struct rtl8169_private {
struct mii_if_info mii;
struct rtl8169_counters counters;
+ u32 saved_wolopts;
};
MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -748,53 +750,61 @@ static void rtl8169_check_link_status(struct net_device *dev,
spin_lock_irqsave(&tp->lock, flags);
if (tp->link_ok(ioaddr)) {
+ /* This is to cancel a scheduled suspend if there's one. */
+ pm_request_resume(&tp->pci_dev->dev);
netif_carrier_on(dev);
netif_info(tp, ifup, dev, "link up\n");
} else {
netif_carrier_off(dev);
netif_info(tp, ifdown, dev, "link down\n");
+ pm_schedule_suspend(&tp->pci_dev->dev, 100);
}
spin_unlock_irqrestore(&tp->lock, flags);
}
-static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
+
+static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
{
- struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
u8 options;
-
- wol->wolopts = 0;
-
-#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
- wol->supported = WAKE_ANY;
-
- spin_lock_irq(&tp->lock);
+ u32 wolopts = 0;
options = RTL_R8(Config1);
if (!(options & PMEnable))
- goto out_unlock;
+ return 0;
options = RTL_R8(Config3);
if (options & LinkUp)
- wol->wolopts |= WAKE_PHY;
+ wolopts |= WAKE_PHY;
if (options & MagicPacket)
- wol->wolopts |= WAKE_MAGIC;
+ wolopts |= WAKE_MAGIC;
options = RTL_R8(Config5);
if (options & UWF)
- wol->wolopts |= WAKE_UCAST;
+ wolopts |= WAKE_UCAST;
if (options & BWF)
- wol->wolopts |= WAKE_BCAST;
+ wolopts |= WAKE_BCAST;
if (options & MWF)
- wol->wolopts |= WAKE_MCAST;
+ wolopts |= WAKE_MCAST;
-out_unlock:
- spin_unlock_irq(&tp->lock);
+ return wolopts;
}
-static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct rtl8169_private *tp = netdev_priv(dev);
+
+ spin_lock_irq(&tp->lock);
+
+ wol->supported = WAKE_ANY;
+ wol->wolopts = __rtl8169_get_wol(tp);
+
+ spin_unlock_irq(&tp->lock);
+}
+
+static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
+{
void __iomem *ioaddr = tp->mmio_addr;
unsigned int i;
static const struct {
@@ -811,23 +821,29 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{ WAKE_ANY, Config5, LanWake }
};
- spin_lock_irq(&tp->lock);
-
RTL_W8(Cfg9346, Cfg9346_Unlock);
for (i = 0; i < ARRAY_SIZE(cfg); i++) {
u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
- if (wol->wolopts & cfg[i].opt)
+ if (wolopts & cfg[i].opt)
options |= cfg[i].mask;
RTL_W8(cfg[i].reg, options);
}
RTL_W8(Cfg9346, Cfg9346_Lock);
+}
+
+static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ spin_lock_irq(&tp->lock);
if (wol->wolopts)
tp->features |= RTL_FEATURE_WOL;
else
tp->features &= ~RTL_FEATURE_WOL;
+ __rtl8169_set_wol(tp, wol->wolopts);
device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
spin_unlock_irq(&tp->lock);
@@ -3192,6 +3208,12 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
+ if (pci_dev_run_wake(pdev)) {
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ }
+ pm_runtime_idle(&pdev->dev);
+
out:
return rc;
@@ -3213,10 +3235,18 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
+ pm_runtime_get_sync(&pdev->dev);
+
flush_scheduled_work();
unregister_netdev(dev);
+ if (pci_dev_run_wake(pdev)) {
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ }
+ pm_runtime_put_noidle(&pdev->dev);
+
/* restore original MAC address */
rtl_rar_set(tp, dev->perm_addr);
@@ -3243,6 +3273,7 @@ static int rtl8169_open(struct net_device *dev)
struct pci_dev *pdev = tp->pci_dev;
int retval = -ENOMEM;
+ pm_runtime_get_sync(&pdev->dev);
/*
* Note that we use a magic value here, its wierd I know
@@ -3263,7 +3294,7 @@ static int rtl8169_open(struct net_device *dev)
tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES,
&tp->TxPhyAddr);
if (!tp->TxDescArray)
- goto out;
+ goto err_pm_runtime_put;
tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES,
&tp->RxPhyAddr);
@@ -3290,6 +3321,9 @@ static int rtl8169_open(struct net_device *dev)
rtl8169_request_timer(dev);
+ tp->saved_wolopts = 0;
+ pm_runtime_put_noidle(&pdev->dev);
+
rtl8169_check_link_status(dev, tp, tp->mmio_addr);
out:
return retval;
@@ -3299,9 +3333,13 @@ err_release_ring_2:
err_free_rx_1:
pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
tp->RxPhyAddr);
+ tp->RxDescArray = NULL;
err_free_tx_0:
pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
tp->TxPhyAddr);
+ tp->TxDescArray = NULL;
+err_pm_runtime_put:
+ pm_runtime_put_noidle(&pdev->dev);
goto out;
}
@@ -4720,6 +4758,8 @@ static int rtl8169_close(struct net_device *dev)
struct rtl8169_private *tp = netdev_priv(dev);
struct pci_dev *pdev = tp->pci_dev;
+ pm_runtime_get_sync(&pdev->dev);
+
/* update counters before going down */
rtl8169_update_counters(dev);
@@ -4734,6 +4774,8 @@ static int rtl8169_close(struct net_device *dev)
tp->TxDescArray = NULL;
tp->RxDescArray = NULL;
+ pm_runtime_put_sync(&pdev->dev);
+
return 0;
}
@@ -4759,12 +4801,12 @@ static void rtl_set_rx_mode(struct net_device *dev)
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
rx_mode |= AcceptMulticast;
}
@@ -4832,21 +4874,74 @@ static int rtl8169_suspend(struct device *device)
return 0;
}
+static void __rtl8169_resume(struct net_device *dev)
+{
+ netif_device_attach(dev);
+ rtl8169_schedule_work(dev, rtl8169_reset_task);
+}
+
static int rtl8169_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
- if (!netif_running(dev))
- goto out;
+ if (netif_running(dev))
+ __rtl8169_resume(dev);
- netif_device_attach(dev);
+ return 0;
+}
+
+static int rtl8169_runtime_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (!tp->TxDescArray)
+ return 0;
+
+ spin_lock_irq(&tp->lock);
+ tp->saved_wolopts = __rtl8169_get_wol(tp);
+ __rtl8169_set_wol(tp, WAKE_ANY);
+ spin_unlock_irq(&tp->lock);
+
+ rtl8169_net_suspend(dev);
+
+ return 0;
+}
+
+static int rtl8169_runtime_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (!tp->TxDescArray)
+ return 0;
+
+ spin_lock_irq(&tp->lock);
+ __rtl8169_set_wol(tp, tp->saved_wolopts);
+ tp->saved_wolopts = 0;
+ spin_unlock_irq(&tp->lock);
+
+ __rtl8169_resume(dev);
- rtl8169_schedule_work(dev, rtl8169_reset_task);
-out:
return 0;
}
+static int rtl8169_runtime_idle(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (!tp->TxDescArray)
+ return 0;
+
+ rtl8169_check_link_status(dev, tp, tp->mmio_addr);
+ return -EBUSY;
+}
+
static const struct dev_pm_ops rtl8169_pm_ops = {
.suspend = rtl8169_suspend,
.resume = rtl8169_resume,
@@ -4854,6 +4949,9 @@ static const struct dev_pm_ops rtl8169_pm_ops = {
.thaw = rtl8169_resume,
.poweroff = rtl8169_suspend,
.restore = rtl8169_resume,
+ .runtime_suspend = rtl8169_runtime_suspend,
+ .runtime_resume = rtl8169_runtime_resume,
+ .runtime_idle = rtl8169_runtime_idle,
};
#define RTL8169_PM_OPS (&rtl8169_pm_ops)
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index f2e335f0d1b..e26e107f93e 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -1467,7 +1467,6 @@ static netdev_tx_t rr_start_xmit(struct sk_buff *skb,
spin_unlock_irqrestore(&rrpriv->lock, flags);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 92ae8d3de39..668327ccd8d 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -2400,7 +2400,7 @@ static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
return NULL;
}
pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
- skb->len - skb->data_len, PCI_DMA_TODEVICE);
+ skb_headlen(skb), PCI_DMA_TODEVICE);
frg_cnt = skb_shinfo(skb)->nr_frags;
if (frg_cnt) {
txds++;
@@ -2943,7 +2943,6 @@ static void s2io_netpoll(struct net_device *dev)
}
}
enable_irq(dev->irq);
- return;
}
#endif
@@ -4202,7 +4201,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
}
- frg_len = skb->len - skb->data_len;
+ frg_len = skb_headlen(skb);
if (offload_type == SKB_GSO_UDP) {
int ufo_size;
@@ -4756,7 +4755,6 @@ reset:
s2io_stop_all_tx_queue(sp);
schedule_work(&sp->rst_timer_task);
sw_stat->soft_reset_cnt++;
- return;
}
/**
@@ -4965,7 +4963,7 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev)
static void s2io_set_multicast(struct net_device *dev)
{
int i, j, prev_cnt;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
struct s2io_nic *sp = netdev_priv(dev);
struct XENA_dev_config __iomem *bar0 = sp->bar0;
u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
@@ -5094,12 +5092,12 @@ static void s2io_set_multicast(struct net_device *dev)
/* Create the new Rx filter list and update the same in H/W. */
i = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(sp->usr_addrs[i].addr, ha->addr,
ETH_ALEN);
mac_addr = 0;
for (j = 0; j < ETH_ALEN; j++) {
- mac_addr |= mclist->dmi_addr[j];
+ mac_addr |= ha->addr[j];
mac_addr <<= 8;
}
mac_addr >>= 8;
@@ -8645,7 +8643,6 @@ static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
first->truesize += skb->truesize;
lro->last_frag = skb;
swstats->clubbed_frms_cnt++;
- return;
}
/**
diff --git a/drivers/net/s6gmac.c b/drivers/net/s6gmac.c
index 45f26344b36..a7ff8ea342b 100644
--- a/drivers/net/s6gmac.c
+++ b/drivers/net/s6gmac.c
@@ -396,7 +396,6 @@ static void s6gmac_rx_interrupt(struct net_device *dev)
} else {
skb_put(skb, (pfx >> S6_GMAC_BURST_POSTRD_LEN)
& S6_GMAC_BURST_POSTRD_LEN_MASK);
- skb->dev = dev;
skb->protocol = eth_type_trans(skb, dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_rx(skb);
@@ -853,8 +852,8 @@ static int s6gmac_tx(struct sk_buff *skb, struct net_device *dev)
{
struct s6gmac *pd = netdev_priv(dev);
unsigned long flags;
+
spin_lock_irqsave(&pd->lock, flags);
- dev->trans_start = jiffies;
writel(skb->len << S6_GMAC_BURST_PREWR_LEN |
0 << S6_GMAC_BURST_PREWR_CFE |
1 << S6_GMAC_BURST_PREWR_PPE |
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index abc8eefdd4b..a9ae505e1ba 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -426,7 +426,6 @@ sb1000_send_command(const int ioaddr[], const char* name,
if (sb1000_debug > 3)
printk(KERN_DEBUG "%s: sb1000_send_command out: %02x%02x%02x%02x"
"%02x%02x\n", name, out[0], out[1], out[2], out[3], out[4], out[5]);
- return;
}
/* Card Read Status (to be used during frame rx) */
@@ -438,7 +437,6 @@ sb1000_read_status(const int ioaddr[], unsigned char in[])
in[3] = inb(ioaddr[0] + 3);
in[4] = inb(ioaddr[0] + 4);
in[0] = inb(ioaddr[0] + 5);
- return;
}
/* Issue Read Command (to be used during frame rx) */
@@ -450,7 +448,6 @@ sb1000_issue_read_command(const int ioaddr[], const char* name)
sb1000_wait_for_ready_clear(ioaddr, name);
outb(0xa0, ioaddr[0] + 6);
sb1000_send_command(ioaddr, name, Command0);
- return;
}
@@ -733,7 +730,6 @@ sb1000_print_status_buffer(const char* name, unsigned char st[],
printk("\n");
}
}
- return;
}
/*
@@ -926,7 +922,6 @@ sb1000_error_dpc(struct net_device *dev)
sb1000_read_status(ioaddr, st);
if (st[1] & 0x10)
lp->rx_error_dpc_count = ErrorDpcCounterInitialize;
- return;
}
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 04efc0c1bda..1f3acc3a5df 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -48,23 +48,6 @@
#include <asm/io.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
-/* This is only here until the firmware is ready. In that case,
- the firmware leaves the ethernet address in the register for us. */
-#ifdef CONFIG_SIBYTE_STANDALONE
-#define SBMAC_ETH0_HWADDR "40:00:00:00:01:00"
-#define SBMAC_ETH1_HWADDR "40:00:00:00:01:01"
-#define SBMAC_ETH2_HWADDR "40:00:00:00:01:02"
-#define SBMAC_ETH3_HWADDR "40:00:00:00:01:03"
-#endif
-
-
-/* These identify the driver base version and may not be removed. */
-#if 0
-static char version1[] __initdata =
-"sb1250-mac.c:1.00 1/11/2001 Written by Mitch Lichtenberg\n";
-#endif
-
-
/* Operational parameters that usually are not changed. */
#define CONFIG_SBMAC_COALESCE
@@ -349,7 +332,6 @@ static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
********************************************************************* */
static char sbmac_string[] = "sb1250-mac";
-static char sbmac_pretty[] = "SB1250 MAC";
static char sbmac_mdio_string[] = "sb1250-mac-mdio";
@@ -2086,8 +2068,6 @@ static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- dev->trans_start = jiffies;
-
spin_unlock_irqrestore(&sc->sbm_lock, flags);
return NETDEV_TX_OK;
@@ -2112,7 +2092,7 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
uint64_t reg;
void __iomem *port;
int idx;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
struct net_device *dev = sc->sbm_dev;
/*
@@ -2161,10 +2141,10 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
* XXX if the table overflows */
idx = 1; /* skip station address */
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (idx == MAC_ADDR_COUNT)
break;
- reg = sbmac_addr2reg(mclist->dmi_addr);
+ reg = sbmac_addr2reg(ha->addr);
port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t));
__raw_writeq(reg, port);
idx++;
@@ -2182,85 +2162,6 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
}
}
-#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR)
-/**********************************************************************
- * SBMAC_PARSE_XDIGIT(str)
- *
- * Parse a hex digit, returning its value
- *
- * Input parameters:
- * str - character
- *
- * Return value:
- * hex value, or -1 if invalid
- ********************************************************************* */
-
-static int sbmac_parse_xdigit(char str)
-{
- int digit;
-
- if ((str >= '0') && (str <= '9'))
- digit = str - '0';
- else if ((str >= 'a') && (str <= 'f'))
- digit = str - 'a' + 10;
- else if ((str >= 'A') && (str <= 'F'))
- digit = str - 'A' + 10;
- else
- return -1;
-
- return digit;
-}
-
-/**********************************************************************
- * SBMAC_PARSE_HWADDR(str,hwaddr)
- *
- * Convert a string in the form xx:xx:xx:xx:xx:xx into a 6-byte
- * Ethernet address.
- *
- * Input parameters:
- * str - string
- * hwaddr - pointer to hardware address
- *
- * Return value:
- * 0 if ok, else -1
- ********************************************************************* */
-
-static int sbmac_parse_hwaddr(char *str, unsigned char *hwaddr)
-{
- int digit1,digit2;
- int idx = 6;
-
- while (*str && (idx > 0)) {
- digit1 = sbmac_parse_xdigit(*str);
- if (digit1 < 0)
- return -1;
- str++;
- if (!*str)
- return -1;
-
- if ((*str == ':') || (*str == '-')) {
- digit2 = digit1;
- digit1 = 0;
- }
- else {
- digit2 = sbmac_parse_xdigit(*str);
- if (digit2 < 0)
- return -1;
- str++;
- }
-
- *hwaddr++ = (digit1 << 4) | digit2;
- idx--;
-
- if (*str == '-')
- str++;
- if (*str == ':')
- str++;
- }
- return 0;
-}
-#endif
-
static int sb1250_change_mtu(struct net_device *_dev, int new_mtu)
{
if (new_mtu > ENET_PACKET_SIZE)
@@ -2585,7 +2486,7 @@ static void sbmac_tx_timeout (struct net_device *dev)
spin_lock_irqsave(&sc->sbm_lock, flags);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
dev->stats.tx_errors++;
spin_unlock_irqrestore(&sc->sbm_lock, flags);
@@ -2662,7 +2563,6 @@ static int sbmac_close(struct net_device *dev)
static int sbmac_poll(struct napi_struct *napi, int budget)
{
struct sbmac_softc *sc = container_of(napi, struct sbmac_softc, napi);
- struct net_device *dev = sc->sbm_dev;
int work_done;
work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), budget, 1);
@@ -2766,162 +2666,6 @@ static int __exit sbmac_remove(struct platform_device *pldev)
return 0;
}
-
-static struct platform_device **sbmac_pldev;
-static int sbmac_max_units;
-
-#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR)
-static void __init sbmac_setup_hwaddr(int idx, char *addr)
-{
- void __iomem *sbm_base;
- unsigned long start, end;
- uint8_t eaddr[6];
- uint64_t val;
-
- if (idx >= sbmac_max_units)
- return;
-
- start = A_MAC_CHANNEL_BASE(idx);
- end = A_MAC_CHANNEL_BASE(idx + 1) - 1;
-
- sbm_base = ioremap_nocache(start, end - start + 1);
- if (!sbm_base) {
- printk(KERN_ERR "%s: unable to map device registers\n",
- sbmac_string);
- return;
- }
-
- sbmac_parse_hwaddr(addr, eaddr);
- val = sbmac_addr2reg(eaddr);
- __raw_writeq(val, sbm_base + R_MAC_ETHERNET_ADDR);
- val = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR);
-
- iounmap(sbm_base);
-}
-#endif
-
-static int __init sbmac_platform_probe_one(int idx)
-{
- struct platform_device *pldev;
- struct {
- struct resource r;
- char name[strlen(sbmac_pretty) + 4];
- } *res;
- int err;
-
- res = kzalloc(sizeof(*res), GFP_KERNEL);
- if (!res) {
- printk(KERN_ERR "%s.%d: unable to allocate memory\n",
- sbmac_string, idx);
- err = -ENOMEM;
- goto out_err;
- }
-
- /*
- * This is the base address of the MAC.
- */
- snprintf(res->name, sizeof(res->name), "%s %d", sbmac_pretty, idx);
- res->r.name = res->name;
- res->r.flags = IORESOURCE_MEM;
- res->r.start = A_MAC_CHANNEL_BASE(idx);
- res->r.end = A_MAC_CHANNEL_BASE(idx + 1) - 1;
-
- pldev = platform_device_register_simple(sbmac_string, idx, &res->r, 1);
- if (IS_ERR(pldev)) {
- printk(KERN_ERR "%s.%d: unable to register platform device\n",
- sbmac_string, idx);
- err = PTR_ERR(pldev);
- goto out_kfree;
- }
-
- if (!pldev->dev.driver) {
- err = 0; /* No hardware at this address. */
- goto out_unregister;
- }
-
- sbmac_pldev[idx] = pldev;
- return 0;
-
-out_unregister:
- platform_device_unregister(pldev);
-
-out_kfree:
- kfree(res);
-
-out_err:
- return err;
-}
-
-static void __init sbmac_platform_probe(void)
-{
- int i;
-
- /* Set the number of available units based on the SOC type. */
- switch (soc_type) {
- case K_SYS_SOC_TYPE_BCM1250:
- case K_SYS_SOC_TYPE_BCM1250_ALT:
- sbmac_max_units = 3;
- break;
- case K_SYS_SOC_TYPE_BCM1120:
- case K_SYS_SOC_TYPE_BCM1125:
- case K_SYS_SOC_TYPE_BCM1125H:
- case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */
- sbmac_max_units = 2;
- break;
- case K_SYS_SOC_TYPE_BCM1x55:
- case K_SYS_SOC_TYPE_BCM1x80:
- sbmac_max_units = 4;
- break;
- default:
- return; /* none */
- }
-
- /*
- * For bringup when not using the firmware, we can pre-fill
- * the MAC addresses using the environment variables
- * specified in this file (or maybe from the config file?)
- */
-#ifdef SBMAC_ETH0_HWADDR
- sbmac_setup_hwaddr(0, SBMAC_ETH0_HWADDR);
-#endif
-#ifdef SBMAC_ETH1_HWADDR
- sbmac_setup_hwaddr(1, SBMAC_ETH1_HWADDR);
-#endif
-#ifdef SBMAC_ETH2_HWADDR
- sbmac_setup_hwaddr(2, SBMAC_ETH2_HWADDR);
-#endif
-#ifdef SBMAC_ETH3_HWADDR
- sbmac_setup_hwaddr(3, SBMAC_ETH3_HWADDR);
-#endif
-
- sbmac_pldev = kcalloc(sbmac_max_units, sizeof(*sbmac_pldev),
- GFP_KERNEL);
- if (!sbmac_pldev) {
- printk(KERN_ERR "%s: unable to allocate memory\n",
- sbmac_string);
- return;
- }
-
- /*
- * Walk through the Ethernet controllers and find
- * those who have their MAC addresses set.
- */
- for (i = 0; i < sbmac_max_units; i++)
- if (sbmac_platform_probe_one(i))
- break;
-}
-
-
-static void __exit sbmac_platform_cleanup(void)
-{
- int i;
-
- for (i = 0; i < sbmac_max_units; i++)
- platform_device_unregister(sbmac_pldev[i]);
- kfree(sbmac_pldev);
-}
-
-
static struct platform_driver sbmac_driver = {
.probe = sbmac_probe,
.remove = __exit_p(sbmac_remove),
@@ -2932,20 +2676,11 @@ static struct platform_driver sbmac_driver = {
static int __init sbmac_init_module(void)
{
- int err;
-
- err = platform_driver_register(&sbmac_driver);
- if (err)
- return err;
-
- sbmac_platform_probe();
-
- return err;
+ return platform_driver_register(&sbmac_driver);
}
static void __exit sbmac_cleanup_module(void)
{
- sbmac_platform_cleanup();
platform_driver_unregister(&sbmac_driver);
}
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index d87c4787fff..8c4067af32b 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -433,13 +433,13 @@ static void _sc92031_set_mar(struct net_device *dev)
(dev->flags & IFF_ALLMULTI))
mar0 = mar1 = 0xffffffff;
else if (dev->flags & IFF_MULTICAST) {
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(mc_list, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
u32 crc;
unsigned bit = 0;
- crc = ~ether_crc(ETH_ALEN, mc_list->dmi_addr);
+ crc = ~ether_crc(ETH_ALEN, ha->addr);
crc >>= 24;
if (crc & 0x01) bit |= 0x02;
@@ -987,8 +987,6 @@ static netdev_tx_t sc92031_start_xmit(struct sk_buff *skb,
iowrite32(tx_status, port_base + TxStatus0 + entry * 4);
mmiowb();
- dev->trans_start = jiffies;
-
if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC)
netif_stop_queue(dev);
diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c
index 374832cca11..d2fce98f557 100644
--- a/drivers/net/seeq8005.c
+++ b/drivers/net/seeq8005.c
@@ -390,7 +390,7 @@ static void seeq8005_timeout(struct net_device *dev)
tx_done(dev) ? "IRQ conflict" : "network cable problem");
/* Try to restart the adaptor. */
seeq8005_init(dev, 1);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -411,7 +411,6 @@ static netdev_tx_t seeq8005_send_packet(struct sk_buff *skb,
netif_stop_queue(dev);
hardware_send_packet(dev, buf, length);
- dev->trans_start = jiffies;
dev->stats.tx_bytes += length;
dev_kfree_skb (skb);
/* You might need to clean up and record Tx statistics here. */
@@ -579,7 +578,6 @@ static void seeq8005_rx(struct net_device *dev)
/* If any worth-while packets have been received, netif_rx()
has done a mark_bh(NET_BH) for us and will work on them
when we get to the bottom-half routine. */
- return;
}
/* The inverse routine to net_open(). */
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 649a264d6a8..15646052723 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -225,17 +225,17 @@ static void efx_fini_channels(struct efx_nic *efx);
* never be concurrently called more than once on the same channel,
* though different channels may be being processed concurrently.
*/
-static int efx_process_channel(struct efx_channel *channel, int rx_quota)
+static int efx_process_channel(struct efx_channel *channel, int budget)
{
struct efx_nic *efx = channel->efx;
- int rx_packets;
+ int spent;
if (unlikely(efx->reset_pending != RESET_TYPE_NONE ||
!channel->enabled))
return 0;
- rx_packets = efx_nic_process_eventq(channel, rx_quota);
- if (rx_packets == 0)
+ spent = efx_nic_process_eventq(channel, budget);
+ if (spent == 0)
return 0;
/* Deliver last RX packet. */
@@ -249,7 +249,7 @@ static int efx_process_channel(struct efx_channel *channel, int rx_quota)
efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
- return rx_packets;
+ return spent;
}
/* Mark channel as finished processing
@@ -278,17 +278,17 @@ static int efx_poll(struct napi_struct *napi, int budget)
{
struct efx_channel *channel =
container_of(napi, struct efx_channel, napi_str);
- int rx_packets;
+ int spent;
EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
channel->channel, raw_smp_processor_id());
- rx_packets = efx_process_channel(channel, budget);
+ spent = efx_process_channel(channel, budget);
- if (rx_packets < budget) {
+ if (spent < budget) {
struct efx_nic *efx = channel->efx;
- if (channel->used_flags & EFX_USED_BY_RX &&
+ if (channel->channel < efx->n_rx_channels &&
efx->irq_rx_adaptive &&
unlikely(++channel->irq_count == 1000)) {
if (unlikely(channel->irq_mod_score <
@@ -318,7 +318,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
efx_channel_processed(channel);
}
- return rx_packets;
+ return spent;
}
/* Process the eventq of the specified channel immediately on this CPU
@@ -333,7 +333,6 @@ void efx_process_channel_now(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
- BUG_ON(!channel->used_flags);
BUG_ON(!channel->enabled);
/* Disable interrupts and wait for ISRs to complete */
@@ -446,12 +445,12 @@ static void efx_set_channel_names(struct efx_nic *efx)
efx_for_each_channel(channel, efx) {
number = channel->channel;
- if (efx->n_channels > efx->n_rx_queues) {
- if (channel->channel < efx->n_rx_queues) {
+ if (efx->n_channels > efx->n_rx_channels) {
+ if (channel->channel < efx->n_rx_channels) {
type = "-rx";
} else {
type = "-tx";
- number -= efx->n_rx_queues;
+ number -= efx->n_rx_channels;
}
}
snprintf(channel->name, sizeof(channel->name),
@@ -585,8 +584,6 @@ static void efx_remove_channel(struct efx_channel *channel)
efx_for_each_channel_tx_queue(tx_queue, channel)
efx_remove_tx_queue(tx_queue);
efx_remove_eventq(channel);
-
- channel->used_flags = 0;
}
void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
@@ -956,10 +953,9 @@ static void efx_fini_io(struct efx_nic *efx)
pci_disable_device(efx->pci_dev);
}
-/* Get number of RX queues wanted. Return number of online CPU
- * packages in the expectation that an IRQ balancer will spread
- * interrupts across them. */
-static int efx_wanted_rx_queues(void)
+/* Get number of channels wanted. Each channel will have its own IRQ,
+ * 1 RX queue and/or 2 TX queues. */
+static int efx_wanted_channels(void)
{
cpumask_var_t core_mask;
int count;
@@ -995,34 +991,39 @@ static void efx_probe_interrupts(struct efx_nic *efx)
if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
struct msix_entry xentries[EFX_MAX_CHANNELS];
- int wanted_ints;
- int rx_queues;
+ int n_channels;
- /* We want one RX queue and interrupt per CPU package
- * (or as specified by the rss_cpus module parameter).
- * We will need one channel per interrupt.
- */
- rx_queues = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
- wanted_ints = rx_queues + (separate_tx_channels ? 1 : 0);
- wanted_ints = min(wanted_ints, max_channels);
+ n_channels = efx_wanted_channels();
+ if (separate_tx_channels)
+ n_channels *= 2;
+ n_channels = min(n_channels, max_channels);
- for (i = 0; i < wanted_ints; i++)
+ for (i = 0; i < n_channels; i++)
xentries[i].entry = i;
- rc = pci_enable_msix(efx->pci_dev, xentries, wanted_ints);
+ rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
if (rc > 0) {
EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors"
- " available (%d < %d).\n", rc, wanted_ints);
+ " available (%d < %d).\n", rc, n_channels);
EFX_ERR(efx, "WARNING: Performance may be reduced.\n");
- EFX_BUG_ON_PARANOID(rc >= wanted_ints);
- wanted_ints = rc;
+ EFX_BUG_ON_PARANOID(rc >= n_channels);
+ n_channels = rc;
rc = pci_enable_msix(efx->pci_dev, xentries,
- wanted_ints);
+ n_channels);
}
if (rc == 0) {
- efx->n_rx_queues = min(rx_queues, wanted_ints);
- efx->n_channels = wanted_ints;
- for (i = 0; i < wanted_ints; i++)
+ efx->n_channels = n_channels;
+ if (separate_tx_channels) {
+ efx->n_tx_channels =
+ max(efx->n_channels / 2, 1U);
+ efx->n_rx_channels =
+ max(efx->n_channels -
+ efx->n_tx_channels, 1U);
+ } else {
+ efx->n_tx_channels = efx->n_channels;
+ efx->n_rx_channels = efx->n_channels;
+ }
+ for (i = 0; i < n_channels; i++)
efx->channel[i].irq = xentries[i].vector;
} else {
/* Fall back to single channel MSI */
@@ -1033,8 +1034,9 @@ static void efx_probe_interrupts(struct efx_nic *efx)
/* Try single interrupt MSI */
if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
- efx->n_rx_queues = 1;
efx->n_channels = 1;
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
rc = pci_enable_msi(efx->pci_dev);
if (rc == 0) {
efx->channel[0].irq = efx->pci_dev->irq;
@@ -1046,8 +1048,9 @@ static void efx_probe_interrupts(struct efx_nic *efx)
/* Assume legacy interrupts */
if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
- efx->n_rx_queues = 1;
efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
efx->legacy_irq = efx->pci_dev->irq;
}
}
@@ -1068,21 +1071,24 @@ static void efx_remove_interrupts(struct efx_nic *efx)
static void efx_set_channels(struct efx_nic *efx)
{
+ struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
+ unsigned tx_channel_offset =
+ separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
- efx_for_each_tx_queue(tx_queue, efx) {
- if (separate_tx_channels)
- tx_queue->channel = &efx->channel[efx->n_channels-1];
- else
- tx_queue->channel = &efx->channel[0];
- tx_queue->channel->used_flags |= EFX_USED_BY_TX;
+ efx_for_each_channel(channel, efx) {
+ if (channel->channel - tx_channel_offset < efx->n_tx_channels) {
+ channel->tx_queue = &efx->tx_queue[
+ (channel->channel - tx_channel_offset) *
+ EFX_TXQ_TYPES];
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ tx_queue->channel = channel;
+ }
}
- efx_for_each_rx_queue(rx_queue, efx) {
+ efx_for_each_rx_queue(rx_queue, efx)
rx_queue->channel = &efx->channel[rx_queue->queue];
- rx_queue->channel->used_flags |= EFX_USED_BY_RX;
- }
}
static int efx_probe_nic(struct efx_nic *efx)
@@ -1096,11 +1102,12 @@ static int efx_probe_nic(struct efx_nic *efx)
if (rc)
return rc;
- /* Determine the number of channels and RX queues by trying to hook
+ /* Determine the number of channels and queues by trying to hook
* in MSI-X interrupts. */
efx_probe_interrupts(efx);
efx_set_channels(efx);
+ efx->net_dev->real_num_tx_queues = efx->n_tx_channels;
/* Initialise the interrupt moderation settings */
efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true);
@@ -1187,11 +1194,12 @@ static void efx_start_all(struct efx_nic *efx)
/* Mark the port as enabled so port reconfigurations can start, then
* restart the transmit interface early so the watchdog timer stops */
efx_start_port(efx);
- if (efx_dev_registered(efx))
- efx_wake_queue(efx);
- efx_for_each_channel(channel, efx)
+ efx_for_each_channel(channel, efx) {
+ if (efx_dev_registered(efx))
+ efx_wake_queue(channel);
efx_start_channel(channel);
+ }
efx_nic_enable_interrupts(efx);
@@ -1282,7 +1290,9 @@ static void efx_stop_all(struct efx_nic *efx)
/* Stop the kernel transmit interface late, so the watchdog
* timer isn't ticking over the flush */
if (efx_dev_registered(efx)) {
- efx_stop_queue(efx);
+ struct efx_channel *channel;
+ efx_for_each_channel(channel, efx)
+ efx_stop_queue(channel);
netif_tx_lock_bh(efx->net_dev);
netif_tx_unlock_bh(efx->net_dev);
}
@@ -1537,9 +1547,8 @@ static void efx_watchdog(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d:"
- " resetting channels\n",
- atomic_read(&efx->netif_stop_count), efx->port_enabled);
+ EFX_ERR(efx, "TX stuck with port_enabled=%d: resetting channels\n",
+ efx->port_enabled);
efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
}
@@ -1603,7 +1612,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
static void efx_set_multicast_list(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
union efx_multicast_hash *mc_hash = &efx->multicast_hash;
u32 crc;
int bit;
@@ -1615,8 +1624,8 @@ static void efx_set_multicast_list(struct net_device *net_dev)
memset(mc_hash, 0xff, sizeof(*mc_hash));
} else {
memset(mc_hash, 0x00, sizeof(*mc_hash));
- netdev_for_each_mc_addr(mc_list, net_dev) {
- crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
+ netdev_for_each_mc_addr(ha, net_dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
set_bit_le(bit, mc_hash->byte);
}
@@ -2014,22 +2023,22 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
efx->net_dev = net_dev;
efx->rx_checksum_enabled = true;
- spin_lock_init(&efx->netif_stop_lock);
spin_lock_init(&efx->stats_lock);
mutex_init(&efx->mac_lock);
efx->mac_op = type->default_mac_ops;
efx->phy_op = &efx_dummy_phy_operations;
efx->mdio.dev = net_dev;
INIT_WORK(&efx->mac_work, efx_mac_work);
- atomic_set(&efx->netif_stop_count, 1);
for (i = 0; i < EFX_MAX_CHANNELS; i++) {
channel = &efx->channel[i];
channel->efx = efx;
channel->channel = i;
channel->work_pending = false;
+ spin_lock_init(&channel->tx_stop_lock);
+ atomic_set(&channel->tx_stop_count, 1);
}
- for (i = 0; i < EFX_TX_QUEUE_COUNT; i++) {
+ for (i = 0; i < EFX_MAX_TX_QUEUES; i++) {
tx_queue = &efx->tx_queue[i];
tx_queue->efx = efx;
tx_queue->queue = i;
@@ -2201,7 +2210,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
int i, rc;
/* Allocate and initialise a struct net_device and struct efx_nic */
- net_dev = alloc_etherdev(sizeof(*efx));
+ net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES);
if (!net_dev)
return -ENOMEM;
net_dev->features |= (type->offload_features | NETIF_F_SG |
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 7eff0a615cb..ffd708c5304 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -35,8 +35,8 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
extern netdev_tx_t
efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
-extern void efx_stop_queue(struct efx_nic *efx);
-extern void efx_wake_queue(struct efx_nic *efx);
+extern void efx_stop_queue(struct efx_channel *channel);
+extern void efx_wake_queue(struct efx_channel *channel);
#define EFX_TXQ_SIZE 1024
#define EFX_TXQ_MASK (EFX_TXQ_SIZE - 1)
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index d9f9c02a928..22026bfbc4c 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -304,7 +304,7 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
{
struct efx_tx_queue *tx_queue;
- efx_for_each_tx_queue(tx_queue, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) {
efx_fill_test(test_index++, strings, data,
&lb_tests->tx_sent[tx_queue->queue],
EFX_TX_QUEUE_NAME(tx_queue),
@@ -647,7 +647,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
efx_for_each_tx_queue(tx_queue, efx) {
channel = tx_queue->channel;
if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
- if (channel->used_flags != EFX_USED_BY_RX_TX)
+ if (channel->channel < efx->n_rx_channels)
coalesce->tx_coalesce_usecs_irq =
channel->irq_moderation;
else
@@ -690,7 +690,7 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
/* If the channel is shared only allow RX parameters to be set */
efx_for_each_tx_queue(tx_queue, efx) {
- if ((tx_queue->channel->used_flags == EFX_USED_BY_RX_TX) &&
+ if ((tx_queue->channel->channel < efx->n_rx_channels) &&
tx_usecs) {
EFX_ERR(efx, "Channel is shared. "
"Only RX coalescing may be set\n");
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 08278e7302b..655b697b45b 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -175,16 +175,19 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
- /* Check to see if we have a serious error condition */
- syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
- if (unlikely(syserr))
- return efx_nic_fatal_interrupt(efx);
-
/* Determine interrupting queues, clear interrupt status
* register and acknowledge the device interrupt.
*/
BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS);
queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
+
+ /* Check to see if we have a serious error condition */
+ if (queues & (1U << efx->fatal_irq_level)) {
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_nic_fatal_interrupt(efx);
+ }
+
EFX_ZERO_OWORD(*int_ker);
wmb(); /* Ensure the vector is cleared before interrupt ack */
falcon_irq_ack_a1(efx);
@@ -504,6 +507,9 @@ static void falcon_reset_macs(struct efx_nic *efx)
/* Ensure the correct MAC is selected before statistics
* are re-enabled by the caller */
efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
+
+ /* This can run even when the GMAC is selected */
+ falcon_setup_xaui(efx);
}
void falcon_drain_tx_fifo(struct efx_nic *efx)
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index 8ccab2c67a2..c84a2ce2ccb 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -26,7 +26,7 @@
*************************************************************************/
/* Configure the XAUI driver that is an output from Falcon */
-static void falcon_setup_xaui(struct efx_nic *efx)
+void falcon_setup_xaui(struct efx_nic *efx)
{
efx_oword_t sdctl, txdrv;
@@ -85,14 +85,14 @@ int falcon_reset_xaui(struct efx_nic *efx)
return -ETIMEDOUT;
}
-static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
+static void falcon_ack_status_intr(struct efx_nic *efx)
{
efx_oword_t reg;
if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
return;
- /* We expect xgmii faults if the wireside link is up */
+ /* We expect xgmii faults if the wireside link is down */
if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up)
return;
@@ -101,14 +101,7 @@ static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
if (efx->xmac_poll_required)
return;
- /* Flush the ISR */
- if (enable)
- efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
-
- EFX_POPULATE_OWORD_2(reg,
- FRF_AB_XM_MSK_RMTFLT, !enable,
- FRF_AB_XM_MSK_LCLFLT, !enable);
- efx_writeo(efx, &reg, FR_AB_XM_MGT_INT_MASK);
+ efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
}
static bool falcon_xgxs_link_ok(struct efx_nic *efx)
@@ -283,15 +276,13 @@ static bool falcon_xmac_check_fault(struct efx_nic *efx)
static int falcon_reconfigure_xmac(struct efx_nic *efx)
{
- falcon_mask_status_intr(efx, false);
-
falcon_reconfigure_xgxs_core(efx);
falcon_reconfigure_xmac_core(efx);
falcon_reconfigure_mac_wrapper(efx);
efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
- falcon_mask_status_intr(efx, true);
+ falcon_ack_status_intr(efx);
return 0;
}
@@ -362,9 +353,8 @@ void falcon_poll_xmac(struct efx_nic *efx)
!efx->xmac_poll_required)
return;
- falcon_mask_status_intr(efx, false);
efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
- falcon_mask_status_intr(efx, true);
+ falcon_ack_status_intr(efx);
}
struct efx_mac_operations falcon_xmac_operations = {
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index c48669c7741..93cc3c1b945 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -613,7 +613,7 @@ int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build)
}
if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) {
- rc = -EMSGSIZE;
+ rc = -EIO;
goto fail;
}
@@ -647,8 +647,10 @@ int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
outbuf, sizeof(outbuf), &outlen);
if (rc)
goto fail;
- if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN)
+ if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
+ rc = -EIO;
goto fail;
+ }
if (was_attached != NULL)
*was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
@@ -676,7 +678,7 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
goto fail;
if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LEN) {
- rc = -EMSGSIZE;
+ rc = -EIO;
goto fail;
}
@@ -738,8 +740,10 @@ int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
outbuf, sizeof(outbuf), &outlen);
if (rc)
goto fail;
- if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN)
+ if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
+ rc = -EIO;
goto fail;
+ }
*nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
return 0;
@@ -765,8 +769,10 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
outbuf, sizeof(outbuf), &outlen);
if (rc)
goto fail;
- if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN)
+ if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
+ rc = -EIO;
goto fail;
+ }
*size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
*erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
@@ -926,20 +932,26 @@ int efx_mcdi_nvram_test_all(struct efx_nic *efx)
rc = efx_mcdi_nvram_types(efx, &nvram_types);
if (rc)
- return rc;
+ goto fail1;
type = 0;
while (nvram_types != 0) {
if (nvram_types & 1) {
rc = efx_mcdi_nvram_test(efx, type);
if (rc)
- return rc;
+ goto fail2;
}
type++;
nvram_types >>= 1;
}
return 0;
+
+fail2:
+ EFX_ERR(efx, "%s: failed type=%u\n", __func__, type);
+fail1:
+ EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
}
static int efx_mcdi_read_assertion(struct efx_nic *efx)
@@ -968,7 +980,7 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
if (rc)
return rc;
if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
- return -EINVAL;
+ return -EIO;
/* Print out any recorded assertion state */
flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
@@ -1086,7 +1098,7 @@ int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
goto fail;
if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
- rc = -EMSGSIZE;
+ rc = -EIO;
goto fail;
}
@@ -1121,7 +1133,7 @@ int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
goto fail;
if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
- rc = -EMSGSIZE;
+ rc = -EIO;
goto fail;
}
diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/sfc/mcdi_mac.c
index 06d24a1e412..39182631ac9 100644
--- a/drivers/net/sfc/mcdi_mac.c
+++ b/drivers/net/sfc/mcdi_mac.c
@@ -80,7 +80,7 @@ int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
u8 inbuf[MC_CMD_MAC_STATS_IN_LEN];
int rc;
efx_dword_t *cmd_ptr;
- int period = 1000;
+ int period = enable ? 1000 : 0;
u32 addr_hi;
u32 addr_lo;
@@ -92,21 +92,14 @@ int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_LO, addr_lo);
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi);
cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD);
- if (enable)
- EFX_POPULATE_DWORD_6(*cmd_ptr,
- MC_CMD_MAC_STATS_CMD_DMA, 1,
- MC_CMD_MAC_STATS_CMD_CLEAR, clear,
- MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1,
- MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, 1,
- MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0,
- MC_CMD_MAC_STATS_CMD_PERIOD_MS, period);
- else
- EFX_POPULATE_DWORD_5(*cmd_ptr,
- MC_CMD_MAC_STATS_CMD_DMA, 0,
- MC_CMD_MAC_STATS_CMD_CLEAR, clear,
- MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1,
- MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, 0,
- MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0);
+ EFX_POPULATE_DWORD_7(*cmd_ptr,
+ MC_CMD_MAC_STATS_CMD_DMA, !!enable,
+ MC_CMD_MAC_STATS_CMD_CLEAR, clear,
+ MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1,
+ MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, !!enable,
+ MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0,
+ MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT, 1,
+ MC_CMD_MAC_STATS_CMD_PERIOD_MS, period);
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h
index bd59302695b..90359e64400 100644
--- a/drivers/net/sfc/mcdi_pcol.h
+++ b/drivers/net/sfc/mcdi_pcol.h
@@ -863,7 +863,7 @@
* bist output. The driver should only consume the BIST output
* after validating OUTLEN and PHY_CFG.PHY_TYPE.
*
- * If a driver can't succesfully parse the BIST output, it should
+ * If a driver can't successfully parse the BIST output, it should
* still respect the pass/Fail in OUT.RESULT
*
* Locks required: PHY_LOCK if doing a PHY BIST
@@ -872,7 +872,7 @@
#define MC_CMD_POLL_BIST 0x26
#define MC_CMD_POLL_BIST_IN_LEN 0
#define MC_CMD_POLL_BIST_OUT_LEN UNKNOWN
-#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 40
+#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
#define MC_CMD_POLL_BIST_RUNNING 1
@@ -882,15 +882,14 @@
/* Generic: */
#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
/* SFT9001-specific: */
-/* (offset 4 unused?) */
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 8
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 12
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 16
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 20
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 24
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 28
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 32
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 36
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 1
#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 2
#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 3
@@ -1054,9 +1053,13 @@
/* MC_CMD_PHY_STATS:
* Get generic PHY statistics
*
- * This call returns the statistics for a generic PHY, by direct DMA
- * into host memory, in a sparse array (indexed by the enumerate).
- * Each value is represented by a 32bit number.
+ * This call returns the statistics for a generic PHY in a sparse
+ * array (indexed by the enumerate). Each value is represented by
+ * a 32bit number.
+ *
+ * If the DMA_ADDR is 0, then no DMA is performed, and the statistics
+ * may be read directly out of shared memory. If DMA_ADDR != 0, then
+ * the statistics are dmad to that (page-aligned location)
*
* Locks required: None
* Returns: 0, ETIME
@@ -1066,7 +1069,8 @@
#define MC_CMD_PHY_STATS_IN_LEN 8
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
-#define MC_CMD_PHY_STATS_OUT_LEN 0
+#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (MC_CMD_PHY_NSTATS * 4)
/* Unified MAC statistics enumeration */
#define MC_CMD_MAC_GENERATION_START 0
@@ -1158,11 +1162,13 @@
#define MC_CMD_MAC_STATS_CMD_CLEAR_WIDTH 1
#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_LBN 2
#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_WIDTH 1
-/* Fields only relevent when PERIODIC_CHANGE is set */
+/* Remaining PERIOD* fields only relevent when PERIODIC_CHANGE is set */
#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_LBN 3
#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_WIDTH 1
#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_LBN 4
#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT_LBN 5
+#define MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT_WIDTH 1
#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_LBN 16
#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_WIDTH 16
#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
@@ -1729,6 +1735,39 @@
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 /* output bits */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 /* dirs: 0=out, 1=in */
+/* MC_CMD_TEST_HACK: (debug (unsurprisingly))
+ * Change bits of network port state for test purposes in ways that would never be
+ * useful in normal operation and so need a special command to change. */
+#define MC_CMD_TEST_HACK 0x2f
+#define MC_CMD_TEST_HACK_IN_LEN 8
+#define MC_CMD_TEST_HACK_IN_TXPAD_OFST 0
+#define MC_CMD_TEST_HACK_IN_TXPAD_AUTO 0 /* Let the MC manage things */
+#define MC_CMD_TEST_HACK_IN_TXPAD_ON 1 /* Force on */
+#define MC_CMD_TEST_HACK_IN_TXPAD_OFF 2 /* Force on */
+#define MC_CMD_TEST_HACK_IN_IPG_OFST 4 /* Takes a value in bits */
+#define MC_CMD_TEST_HACK_IN_IPG_AUTO 0 /* The MC picks the value */
+#define MC_CMD_TEST_HACK_OUT_LEN 0
+
+/* MC_CMD_SENSOR_SET_LIMS: (debug) (mostly) adjust the sensor limits. This
+ * is a warranty-voiding operation.
+ *
+ * IN: sensor identifier (one of the enumeration starting with MC_CMD_SENSOR_CONTROLLER_TEMP
+ * followed by 4 32-bit values: min(warning) max(warning), min(fatal), max(fatal). Which
+ * of these limits are meaningful and what their interpretation is is sensor-specific.
+ *
+ * OUT: nothing
+ *
+ * Returns: ENOENT if the sensor specified does not exist, EINVAL if the limits are
+ * out of range.
+ */
+#define MC_CMD_SENSOR_SET_LIMS 0x4e
+#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
+#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
+
/* Do NOT add new commands beyond 0x4f as part of 3.0 : 0x50 - 0x7f will be
* used for post-3.0 extensions. If you run out of space, look for gaps or
* commands that are unused in the existing range. */
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index 2f235469666..6032c0e1f1f 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -17,6 +17,8 @@
#include "mcdi.h"
#include "mcdi_pcol.h"
#include "mdio_10g.h"
+#include "nic.h"
+#include "selftest.h"
struct efx_mcdi_phy_cfg {
u32 flags;
@@ -48,7 +50,7 @@ efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_cfg *cfg)
goto fail;
if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) {
- rc = -EMSGSIZE;
+ rc = -EIO;
goto fail;
}
@@ -111,7 +113,7 @@ static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
goto fail;
if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) {
- rc = -EMSGSIZE;
+ rc = -EIO;
goto fail;
}
@@ -587,13 +589,153 @@ static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
return rc;
if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
- return -EMSGSIZE;
+ return -EIO;
if (MCDI_DWORD(outbuf, GET_PHY_STATE_STATE) != MC_CMD_PHY_STATE_OK)
return -EINVAL;
return 0;
}
+static const char *const mcdi_sft9001_cable_diag_names[] = {
+ "cable.pairA.length",
+ "cable.pairB.length",
+ "cable.pairC.length",
+ "cable.pairD.length",
+ "cable.pairA.status",
+ "cable.pairB.status",
+ "cable.pairC.status",
+ "cable.pairD.status",
+};
+
+static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode,
+ int *results)
+{
+ unsigned int retry, i, count = 0;
+ size_t outlen;
+ u32 status;
+ u8 *buf, *ptr;
+ int rc;
+
+ buf = kzalloc(0x100, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0);
+ MCDI_SET_DWORD(buf, START_BIST_IN_TYPE, bist_mode);
+ rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST, buf, MC_CMD_START_BIST_IN_LEN,
+ NULL, 0, NULL);
+ if (rc)
+ goto out;
+
+ /* Wait up to 10s for BIST to finish */
+ for (retry = 0; retry < 100; ++retry) {
+ BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
+ buf, 0x100, &outlen);
+ if (rc)
+ goto out;
+
+ status = MCDI_DWORD(buf, POLL_BIST_OUT_RESULT);
+ if (status != MC_CMD_POLL_BIST_RUNNING)
+ goto finished;
+
+ msleep(100);
+ }
+
+ rc = -ETIMEDOUT;
+ goto out;
+
+finished:
+ results[count++] = (status == MC_CMD_POLL_BIST_PASSED) ? 1 : -1;
+
+ /* SFT9001 specific cable diagnostics output */
+ if (efx->phy_type == PHY_TYPE_SFT9001B &&
+ (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT ||
+ bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) {
+ ptr = MCDI_PTR(buf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
+ if (status == MC_CMD_POLL_BIST_PASSED &&
+ outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) {
+ for (i = 0; i < 8; i++) {
+ results[count + i] =
+ EFX_DWORD_FIELD(((efx_dword_t *)ptr)[i],
+ EFX_DWORD_0);
+ }
+ }
+ count += 8;
+ }
+ rc = count;
+
+out:
+ kfree(buf);
+
+ return rc;
+}
+
+static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
+ unsigned flags)
+{
+ struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+ u32 mode;
+ int rc;
+
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) {
+ rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results);
+ if (rc < 0)
+ return rc;
+
+ results += rc;
+ }
+
+ /* If we support both LONG and SHORT, then run each in response to
+ * break or not. Otherwise, run the one we support */
+ mode = 0;
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN)) {
+ if ((flags & ETH_TEST_FL_OFFLINE) &&
+ (phy_cfg->flags &
+ (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN)))
+ mode = MC_CMD_PHY_BIST_CABLE_LONG;
+ else
+ mode = MC_CMD_PHY_BIST_CABLE_SHORT;
+ } else if (phy_cfg->flags &
+ (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))
+ mode = MC_CMD_PHY_BIST_CABLE_LONG;
+
+ if (mode != 0) {
+ rc = efx_mcdi_bist(efx, mode, results);
+ if (rc < 0)
+ return rc;
+ results += rc;
+ }
+
+ return 0;
+}
+
+const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index)
+{
+ struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) {
+ if (index == 0)
+ return "bist";
+ --index;
+ }
+
+ if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN) |
+ (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))) {
+ if (index == 0)
+ return "cable";
+ --index;
+
+ if (efx->phy_type == PHY_TYPE_SFT9001B) {
+ if (index < ARRAY_SIZE(mcdi_sft9001_cable_diag_names))
+ return mcdi_sft9001_cable_diag_names[index];
+ index -= ARRAY_SIZE(mcdi_sft9001_cable_diag_names);
+ }
+ }
+
+ return NULL;
+}
+
struct efx_phy_operations efx_mcdi_phy_ops = {
.probe = efx_mcdi_phy_probe,
.init = efx_port_dummy_op_int,
@@ -604,6 +746,6 @@ struct efx_phy_operations efx_mcdi_phy_ops = {
.get_settings = efx_mcdi_phy_get_settings,
.set_settings = efx_mcdi_phy_set_settings,
.test_alive = efx_mcdi_phy_test_alive,
- .run_tests = NULL,
- .test_name = NULL,
+ .run_tests = efx_mcdi_phy_run_tests,
+ .test_name = efx_mcdi_phy_test_name,
};
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index cb018e27209..2e6fd89f2a7 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -85,9 +85,13 @@ do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
#define EFX_MAX_CHANNELS 32
#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
-#define EFX_TX_QUEUE_OFFLOAD_CSUM 0
-#define EFX_TX_QUEUE_NO_CSUM 1
-#define EFX_TX_QUEUE_COUNT 2
+/* Checksum generation is a per-queue option in hardware, so each
+ * queue visible to the networking core is backed by two hardware TX
+ * queues. */
+#define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS
+#define EFX_TXQ_TYPE_OFFLOAD 1
+#define EFX_TXQ_TYPES 2
+#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES)
/**
* struct efx_special_buffer - An Efx special buffer
@@ -187,7 +191,7 @@ struct efx_tx_buffer {
struct efx_tx_queue {
/* Members which don't change on the fast path */
struct efx_nic *efx ____cacheline_aligned_in_smp;
- int queue;
+ unsigned queue;
struct efx_channel *channel;
struct efx_nic *nic;
struct efx_tx_buffer *buffer;
@@ -306,11 +310,6 @@ struct efx_buffer {
};
-/* Flags for channel->used_flags */
-#define EFX_USED_BY_RX 1
-#define EFX_USED_BY_TX 2
-#define EFX_USED_BY_RX_TX (EFX_USED_BY_RX | EFX_USED_BY_TX)
-
enum efx_rx_alloc_method {
RX_ALLOC_METHOD_AUTO = 0,
RX_ALLOC_METHOD_SKB = 1,
@@ -327,7 +326,6 @@ enum efx_rx_alloc_method {
* @efx: Associated Efx NIC
* @channel: Channel instance number
* @name: Name for channel and IRQ
- * @used_flags: Channel is used by net driver
* @enabled: Channel enabled indicator
* @irq: IRQ number (MSI and MSI-X only)
* @irq_moderation: IRQ moderation value (in hardware ticks)
@@ -352,12 +350,14 @@ enum efx_rx_alloc_method {
* @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
* @n_rx_overlength: Count of RX_OVERLENGTH errors
* @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
+ * @tx_queue: Pointer to first TX queue, or %NULL if not used for TX
+ * @tx_stop_count: Core TX queue stop count
+ * @tx_stop_lock: Core TX queue stop lock
*/
struct efx_channel {
struct efx_nic *efx;
int channel;
char name[IFNAMSIZ + 6];
- int used_flags;
bool enabled;
int irq;
unsigned int irq_moderation;
@@ -389,6 +389,9 @@ struct efx_channel {
struct efx_rx_buffer *rx_pkt;
bool rx_pkt_csummed;
+ struct efx_tx_queue *tx_queue;
+ atomic_t tx_stop_count;
+ spinlock_t tx_stop_lock;
};
enum efx_led_mode {
@@ -661,8 +664,9 @@ union efx_multicast_hash {
* @rx_queue: RX DMA queues
* @channel: Channels
* @next_buffer_table: First available buffer table id
- * @n_rx_queues: Number of RX queues
* @n_channels: Number of channels in use
+ * @n_rx_channels: Number of channels used for RX (= number of RX queues)
+ * @n_tx_channels: Number of channels used for TX
* @rx_buffer_len: RX buffer length
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
* @int_error_count: Number of internal errors seen recently
@@ -672,6 +676,8 @@ union efx_multicast_hash {
* This register is written with the SMP processor ID whenever an
* interrupt is handled. It is used by efx_nic_test_interrupt()
* to verify that an interrupt has occurred.
+ * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
+ * @fatal_irq_level: IRQ level (bit number) used for serious errors
* @spi_flash: SPI flash device
* This field will be %NULL if no flash device is present (or for Siena).
* @spi_eeprom: SPI EEPROM device
@@ -691,8 +697,6 @@ union efx_multicast_hash {
* @port_initialized: Port initialized?
* @net_dev: Operating system network device. Consider holding the rtnl lock
* @rx_checksum_enabled: RX checksumming enabled
- * @netif_stop_count: Port stop count
- * @netif_stop_lock: Port stop lock
* @mac_stats: MAC statistics. These include all statistics the MACs
* can provide. Generic code converts these into a standard
* &struct net_device_stats.
@@ -740,13 +744,14 @@ struct efx_nic {
enum nic_state state;
enum reset_type reset_pending;
- struct efx_tx_queue tx_queue[EFX_TX_QUEUE_COUNT];
+ struct efx_tx_queue tx_queue[EFX_MAX_TX_QUEUES];
struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
struct efx_channel channel[EFX_MAX_CHANNELS];
unsigned next_buffer_table;
- int n_rx_queues;
- int n_channels;
+ unsigned n_channels;
+ unsigned n_rx_channels;
+ unsigned n_tx_channels;
unsigned int rx_buffer_len;
unsigned int rx_buffer_order;
@@ -755,7 +760,8 @@ struct efx_nic {
struct efx_buffer irq_status;
volatile signed int last_irq_cpu;
- unsigned long irq_zero_count;
+ unsigned irq_zero_count;
+ unsigned fatal_irq_level;
struct efx_spi_device *spi_flash;
struct efx_spi_device *spi_eeprom;
@@ -777,9 +783,6 @@ struct efx_nic {
struct net_device *net_dev;
bool rx_checksum_enabled;
- atomic_t netif_stop_count;
- spinlock_t netif_stop_lock;
-
struct efx_mac_stats mac_stats;
struct efx_buffer stats_buffer;
spinlock_t stats_lock;
@@ -924,40 +927,35 @@ struct efx_nic_type {
/* Iterate over all used channels */
#define efx_for_each_channel(_channel, _efx) \
- for (_channel = &_efx->channel[0]; \
- _channel < &_efx->channel[EFX_MAX_CHANNELS]; \
- _channel++) \
- if (!_channel->used_flags) \
- continue; \
- else
+ for (_channel = &((_efx)->channel[0]); \
+ _channel < &((_efx)->channel[(efx)->n_channels]); \
+ _channel++)
/* Iterate over all used TX queues */
#define efx_for_each_tx_queue(_tx_queue, _efx) \
- for (_tx_queue = &_efx->tx_queue[0]; \
- _tx_queue < &_efx->tx_queue[EFX_TX_QUEUE_COUNT]; \
+ for (_tx_queue = &((_efx)->tx_queue[0]); \
+ _tx_queue < &((_efx)->tx_queue[EFX_TXQ_TYPES * \
+ (_efx)->n_tx_channels]); \
_tx_queue++)
/* Iterate over all TX queues belonging to a channel */
#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
- for (_tx_queue = &_channel->efx->tx_queue[0]; \
- _tx_queue < &_channel->efx->tx_queue[EFX_TX_QUEUE_COUNT]; \
- _tx_queue++) \
- if (_tx_queue->channel != _channel) \
- continue; \
- else
+ for (_tx_queue = (_channel)->tx_queue; \
+ _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
+ _tx_queue++)
/* Iterate over all used RX queues */
#define efx_for_each_rx_queue(_rx_queue, _efx) \
- for (_rx_queue = &_efx->rx_queue[0]; \
- _rx_queue < &_efx->rx_queue[_efx->n_rx_queues]; \
+ for (_rx_queue = &((_efx)->rx_queue[0]); \
+ _rx_queue < &((_efx)->rx_queue[(_efx)->n_rx_channels]); \
_rx_queue++)
/* Iterate over all RX queues belonging to a channel */
#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
- for (_rx_queue = &_channel->efx->rx_queue[_channel->channel]; \
+ for (_rx_queue = &((_channel)->efx->rx_queue[(_channel)->channel]); \
_rx_queue; \
_rx_queue = NULL) \
- if (_rx_queue->channel != _channel) \
+ if (_rx_queue->channel != (_channel)) \
continue; \
else
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index b06f8e34830..5d3aaec5855 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -418,7 +418,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
FRF_BZ_TX_NON_IP_DROP_DIS, 1);
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM;
+ int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS,
!csum);
@@ -431,10 +431,10 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
efx_oword_t reg;
/* Only 128 bits in this register */
- BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128);
+ BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
- if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM)
+ if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
clear_bit_le(tx_queue->queue, (void *)&reg);
else
set_bit_le(tx_queue->queue, (void *)&reg);
@@ -654,22 +654,23 @@ void efx_generate_event(struct efx_channel *channel, efx_qword_t *event)
* The NIC batches TX completion events; the message we receive is of
* the form "complete all TX events up to this index".
*/
-static void
+static int
efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
{
unsigned int tx_ev_desc_ptr;
unsigned int tx_ev_q_label;
struct efx_tx_queue *tx_queue;
struct efx_nic *efx = channel->efx;
+ int tx_packets = 0;
if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
/* Transmit completion */
tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
tx_queue = &efx->tx_queue[tx_ev_q_label];
- channel->irq_mod_score +=
- (tx_ev_desc_ptr - tx_queue->read_count) &
- EFX_TXQ_MASK;
+ tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
+ EFX_TXQ_MASK);
+ channel->irq_mod_score += tx_packets;
efx_xmit_done(tx_queue, tx_ev_desc_ptr);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
/* Rewrite the FIFO write pointer */
@@ -689,6 +690,8 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
EFX_QWORD_FMT"\n", channel->channel,
EFX_QWORD_VAL(*event));
}
+
+ return tx_packets;
}
/* Detect errors included in the rx_evt_pkt_ok bit. */
@@ -947,16 +950,17 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
}
}
-int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota)
+int efx_nic_process_eventq(struct efx_channel *channel, int budget)
{
unsigned int read_ptr;
efx_qword_t event, *p_event;
int ev_code;
- int rx_packets = 0;
+ int tx_packets = 0;
+ int spent = 0;
read_ptr = channel->eventq_read_ptr;
- do {
+ for (;;) {
p_event = efx_event(channel, read_ptr);
event = *p_event;
@@ -970,15 +974,23 @@ int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota)
/* Clear this event by marking it all ones */
EFX_SET_QWORD(*p_event);
+ /* Increment read pointer */
+ read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
+
ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
switch (ev_code) {
case FSE_AZ_EV_CODE_RX_EV:
efx_handle_rx_event(channel, &event);
- ++rx_packets;
+ if (++spent == budget)
+ goto out;
break;
case FSE_AZ_EV_CODE_TX_EV:
- efx_handle_tx_event(channel, &event);
+ tx_packets += efx_handle_tx_event(channel, &event);
+ if (tx_packets >= EFX_TXQ_SIZE) {
+ spent = budget;
+ goto out;
+ }
break;
case FSE_AZ_EV_CODE_DRV_GEN_EV:
channel->eventq_magic = EFX_QWORD_FIELD(
@@ -1001,14 +1013,11 @@ int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota)
" (data " EFX_QWORD_FMT ")\n", channel->channel,
ev_code, EFX_QWORD_VAL(event));
}
+ }
- /* Increment read pointer */
- read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
-
- } while (rx_packets < rx_quota);
-
+out:
channel->eventq_read_ptr = read_ptr;
- return rx_packets;
+ return spent;
}
@@ -1123,7 +1132,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
ev_queue = EFX_QWORD_FIELD(*event,
FSF_AZ_DRIVER_EV_SUBDATA);
- if (ev_queue < EFX_TX_QUEUE_COUNT) {
+ if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
tx_queue = efx->tx_queue + ev_queue;
tx_queue->flushed = FLUSH_DONE;
}
@@ -1133,7 +1142,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
ev_failed = EFX_QWORD_FIELD(
*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
- if (ev_queue < efx->n_rx_queues) {
+ if (ev_queue < efx->n_rx_channels) {
rx_queue = efx->rx_queue + ev_queue;
rx_queue->flushed =
ev_failed ? FLUSH_FAILED : FLUSH_DONE;
@@ -1229,15 +1238,9 @@ static inline void efx_nic_interrupts(struct efx_nic *efx,
bool enabled, bool force)
{
efx_oword_t int_en_reg_ker;
- unsigned int level = 0;
-
- if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
- /* Set the level always even if we're generating a test
- * interrupt, because our legacy interrupt handler is safe */
- level = 0x1f;
EFX_POPULATE_OWORD_3(int_en_reg_ker,
- FRF_AZ_KER_INT_LEVE_SEL, level,
+ FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level,
FRF_AZ_KER_INT_KER, force,
FRF_AZ_DRV_INT_EN_KER, enabled);
efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
@@ -1291,11 +1294,10 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
EFX_OWORD_VAL(fatal_intr),
error ? "disabling bus mastering" : "no recognised error");
- if (error == 0)
- goto out;
/* If this is a memory parity error dump which blocks are offending */
- mem_perr = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER);
+ mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
+ EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
if (mem_perr) {
efx_oword_t reg;
efx_reado(efx, &reg, FR_AZ_MEM_STAT);
@@ -1324,7 +1326,7 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
"NIC will be disabled\n");
efx_schedule_reset(efx, RESET_TYPE_DISABLE);
}
-out:
+
return IRQ_HANDLED;
}
@@ -1346,9 +1348,11 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
queues = EFX_EXTRACT_DWORD(reg, 0, 31);
/* Check to see if we have a serious error condition */
- syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
- if (unlikely(syserr))
- return efx_nic_fatal_interrupt(efx);
+ if (queues & (1U << efx->fatal_irq_level)) {
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_nic_fatal_interrupt(efx);
+ }
if (queues != 0) {
if (EFX_WORKAROUND_15783(efx))
@@ -1362,33 +1366,28 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
}
result = IRQ_HANDLED;
- } else if (EFX_WORKAROUND_15783(efx) &&
- efx->irq_zero_count++ == 0) {
+ } else if (EFX_WORKAROUND_15783(efx)) {
efx_qword_t *event;
- /* Ensure we rearm all event queues */
+ /* We can't return IRQ_HANDLED more than once on seeing ISR=0
+ * because this might be a shared interrupt. */
+ if (efx->irq_zero_count++ == 0)
+ result = IRQ_HANDLED;
+
+ /* Ensure we schedule or rearm all event queues */
efx_for_each_channel(channel, efx) {
event = efx_event(channel, channel->eventq_read_ptr);
if (efx_event_present(event))
efx_schedule_channel(channel);
+ else
+ efx_nic_eventq_read_ack(channel);
}
-
- result = IRQ_HANDLED;
}
if (result == IRQ_HANDLED) {
efx->last_irq_cpu = raw_smp_processor_id();
EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
- } else if (EFX_WORKAROUND_15783(efx)) {
- /* We can't return IRQ_HANDLED more than once on seeing ISR0=0
- * because this might be a shared interrupt, but we do need to
- * check the channel every time and preemptively rearm it if
- * it's idle. */
- efx_for_each_channel(channel, efx) {
- if (!channel->work_pending)
- efx_nic_eventq_read_ack(channel);
- }
}
return result;
@@ -1413,9 +1412,11 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
/* Check to see if we have a serious error condition */
- syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
- if (unlikely(syserr))
- return efx_nic_fatal_interrupt(efx);
+ if (channel->channel == efx->fatal_irq_level) {
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_nic_fatal_interrupt(efx);
+ }
/* Schedule processing of the channel */
efx_schedule_channel(channel);
@@ -1440,7 +1441,7 @@ static void efx_setup_rss_indir_table(struct efx_nic *efx)
offset < FR_BZ_RX_INDIRECTION_TBL + 0x800;
offset += 0x10) {
EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
- i % efx->n_rx_queues);
+ i % efx->n_rx_channels);
efx_writed(efx, &dword, offset);
i++;
}
@@ -1553,6 +1554,13 @@ void efx_nic_init_common(struct efx_nic *efx)
FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
+ if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
+ /* Use an interrupt level unused by event queues */
+ efx->fatal_irq_level = 0x1f;
+ else
+ /* Use a valid MSI-X vector */
+ efx->fatal_irq_level = 0;
+
/* Enable all the genuinely fatal interrupts. (They are still
* masked by the overall interrupt mask, controlled by
* falcon_interrupts()).
@@ -1563,6 +1571,8 @@ void efx_nic_init_common(struct efx_nic *efx)
FRF_AZ_ILL_ADR_INT_KER_EN, 1,
FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
+ EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
EFX_INVERT_OWORD(temp);
efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index 3166bafdfbe..bbc2c0c2f84 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -135,12 +135,14 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
* @fw_build: Firmware build number
* @mcdi: Management-Controller-to-Driver Interface
* @wol_filter_id: Wake-on-LAN packet filter id
+ * @ipv6_rss_key: Toeplitz hash key for IPv6 RSS
*/
struct siena_nic_data {
u64 fw_version;
u32 fw_build;
struct efx_mcdi_iface mcdi;
int wol_filter_id;
+ u8 ipv6_rss_key[40];
};
extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len);
@@ -203,6 +205,7 @@ extern void falcon_irq_ack_a1(struct efx_nic *efx);
extern int efx_nic_flush_queues(struct efx_nic *efx);
extern void falcon_start_nic_stats(struct efx_nic *efx);
extern void falcon_stop_nic_stats(struct efx_nic *efx);
+extern void falcon_setup_xaui(struct efx_nic *efx);
extern int falcon_reset_xaui(struct efx_nic *efx);
extern void efx_nic_init_common(struct efx_nic *efx);
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 0106b1d9aae..371e86cc090 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -616,10 +616,10 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
goto out;
}
- /* Test every TX queue */
- efx_for_each_tx_queue(tx_queue, efx) {
- state->offload_csum = (tx_queue->queue ==
- EFX_TX_QUEUE_OFFLOAD_CSUM);
+ /* Test both types of TX queue */
+ efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) {
+ state->offload_csum = (tx_queue->queue &
+ EFX_TXQ_TYPE_OFFLOAD);
rc = efx_test_loopback(tx_queue,
&tests->loopback[mode]);
if (rc)
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
index 643bef72b99..aed495a4dad 100644
--- a/drivers/net/sfc/selftest.h
+++ b/drivers/net/sfc/selftest.h
@@ -18,8 +18,8 @@
*/
struct efx_loopback_self_tests {
- int tx_sent[EFX_TX_QUEUE_COUNT];
- int tx_done[EFX_TX_QUEUE_COUNT];
+ int tx_sent[EFX_TXQ_TYPES];
+ int tx_done[EFX_TXQ_TYPES];
int rx_good;
int rx_bad;
};
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index e0c46f59d1f..727b4228e08 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -13,6 +13,7 @@
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/random.h>
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
@@ -274,6 +275,9 @@ static int siena_probe_nic(struct efx_nic *efx)
goto fail5;
}
+ get_random_bytes(&nic_data->ipv6_rss_key,
+ sizeof(nic_data->ipv6_rss_key));
+
return 0;
fail5:
@@ -293,6 +297,7 @@ fail1:
*/
static int siena_init_nic(struct efx_nic *efx)
{
+ struct siena_nic_data *nic_data = efx->nic_data;
efx_oword_t temp;
int rc;
@@ -319,6 +324,20 @@ static int siena_init_nic(struct efx_nic *efx)
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1);
efx_writeo(efx, &temp, FR_AZ_RX_CFG);
+ /* Enable IPv6 RSS */
+ BUILD_BUG_ON(sizeof(nic_data->ipv6_rss_key) !=
+ 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
+ memcpy(&temp, nic_data->ipv6_rss_key, sizeof(temp));
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
+ memcpy(&temp, nic_data->ipv6_rss_key + sizeof(temp), sizeof(temp));
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
+ EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
+ FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
+ memcpy(&temp, nic_data->ipv6_rss_key + 2 * sizeof(temp),
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
+
if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0)
/* No MCDI operation has been defined to set thresholds */
EFX_ERR(efx, "ignoring RX flow control thresholds\n");
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index be0e110a1f7..6bb12a87ef2 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -30,32 +30,46 @@
*/
#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u)
-/* We want to be able to nest calls to netif_stop_queue(), since each
- * channel can have an individual stop on the queue.
- */
-void efx_stop_queue(struct efx_nic *efx)
+/* We need to be able to nest calls to netif_tx_stop_queue(), partly
+ * because of the 2 hardware queues associated with each core queue,
+ * but also so that we can inhibit TX for reasons other than a full
+ * hardware queue. */
+void efx_stop_queue(struct efx_channel *channel)
{
- spin_lock_bh(&efx->netif_stop_lock);
+ struct efx_nic *efx = channel->efx;
+
+ if (!channel->tx_queue)
+ return;
+
+ spin_lock_bh(&channel->tx_stop_lock);
EFX_TRACE(efx, "stop TX queue\n");
- atomic_inc(&efx->netif_stop_count);
- netif_stop_queue(efx->net_dev);
+ atomic_inc(&channel->tx_stop_count);
+ netif_tx_stop_queue(
+ netdev_get_tx_queue(
+ efx->net_dev,
+ channel->tx_queue->queue / EFX_TXQ_TYPES));
- spin_unlock_bh(&efx->netif_stop_lock);
+ spin_unlock_bh(&channel->tx_stop_lock);
}
-/* Wake netif's TX queue
- * We want to be able to nest calls to netif_stop_queue(), since each
- * channel can have an individual stop on the queue.
- */
-void efx_wake_queue(struct efx_nic *efx)
+/* Decrement core TX queue stop count and wake it if the count is 0 */
+void efx_wake_queue(struct efx_channel *channel)
{
+ struct efx_nic *efx = channel->efx;
+
+ if (!channel->tx_queue)
+ return;
+
local_bh_disable();
- if (atomic_dec_and_lock(&efx->netif_stop_count,
- &efx->netif_stop_lock)) {
+ if (atomic_dec_and_lock(&channel->tx_stop_count,
+ &channel->tx_stop_lock)) {
EFX_TRACE(efx, "waking TX queue\n");
- netif_wake_queue(efx->net_dev);
- spin_unlock(&efx->netif_stop_lock);
+ netif_tx_wake_queue(
+ netdev_get_tx_queue(
+ efx->net_dev,
+ channel->tx_queue->queue / EFX_TXQ_TYPES));
+ spin_unlock(&channel->tx_stop_lock);
}
local_bh_enable();
}
@@ -298,7 +312,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
rc = NETDEV_TX_BUSY;
if (tx_queue->stopped == 1)
- efx_stop_queue(efx);
+ efx_stop_queue(tx_queue->channel);
unwind:
/* Work backwards until we hit the original insert pointer value */
@@ -374,10 +388,9 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
if (unlikely(efx->port_inhibited))
return NETDEV_TX_BUSY;
+ tx_queue = &efx->tx_queue[EFX_TXQ_TYPES * skb_get_queue_mapping(skb)];
if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
- tx_queue = &efx->tx_queue[EFX_TX_QUEUE_OFFLOAD_CSUM];
- else
- tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM];
+ tx_queue += EFX_TXQ_TYPE_OFFLOAD;
return efx_enqueue_skb(tx_queue, skb);
}
@@ -405,7 +418,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
netif_tx_lock(efx->net_dev);
if (tx_queue->stopped) {
tx_queue->stopped = 0;
- efx_wake_queue(efx);
+ efx_wake_queue(tx_queue->channel);
}
netif_tx_unlock(efx->net_dev);
}
@@ -488,7 +501,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
/* Release queue's stop on port, if any */
if (tx_queue->stopped) {
tx_queue->stopped = 0;
- efx_wake_queue(tx_queue->efx);
+ efx_wake_queue(tx_queue->channel);
}
}
@@ -1120,7 +1133,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
/* Stop the queue if it wasn't stopped before. */
if (tx_queue->stopped == 1)
- efx_stop_queue(efx);
+ efx_stop_queue(tx_queue->channel);
unwind:
/* Free the DMA mapping we were in the process of writing out */
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index acd9c734e48..518f7fc9147 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -37,7 +37,7 @@
/* Truncated IPv4 packets can confuse the TX packet parser */
#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB
/* Legacy ISR read can return zero once */
-#define EFX_WORKAROUND_15783 EFX_WORKAROUND_SIENA
+#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
/* Legacy interrupt storm when interrupt fifo fills */
#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index c8fc896fc46..cc4bd8c65f8 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -574,7 +574,7 @@ static inline int sgiseeq_reset(struct net_device *dev)
if (err)
return err;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
return 0;
@@ -638,8 +638,6 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE))
kick_tx(dev, sp, hregs);
- dev->trans_start = jiffies;
-
if (!TX_BUFFS_AVAIL(sp))
netif_stop_queue(dev);
spin_unlock_irqrestore(&sp->tx_lock, flags);
@@ -652,7 +650,7 @@ static void timeout(struct net_device *dev)
printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name);
sgiseeq_reset(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 6242b85d5d1..501a55ffce5 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -1148,8 +1148,6 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (!(ctrl_inl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
- ndev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -1296,6 +1294,9 @@ static int sh_mdio_release(struct net_device *ndev)
/* remove mdio bus info from net_device */
dev_set_drvdata(&ndev->dev, NULL);
+ /* free interrupts memory */
+ kfree(bus->irq);
+
/* free bitbang info */
free_mdio_bitbang(bus);
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index b30ce752bbf..a5d6a6bd0c1 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -849,13 +849,13 @@ static void sis190_set_rx_mode(struct net_device *dev)
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
int bit_nr =
- ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
+ ether_crc(ETH_ALEN, ha->addr) & 0x3f;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
rx_mode |= AcceptMulticast;
}
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index cc0c731c4f0..bbbded76ff1 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -858,7 +858,6 @@ static void mdio_reset(long mdio_addr)
outl(MDDIR | MDIO | MDC, mdio_addr);
mdio_delay();
}
- return;
}
/**
@@ -953,8 +952,6 @@ static void mdio_write(struct net_device *net_dev, int phy_id, int location,
mdio_delay();
}
outl(0x00, mdio_addr);
-
- return;
}
@@ -1264,7 +1261,6 @@ static void sis630_set_eq(struct net_device *net_dev, u8 revision)
mdio_write(net_dev, sis_priv->cur_phy, MII_RESV,
(reg14h | 0x2000) & 0xBFFF);
}
- return;
}
/**
@@ -1499,7 +1495,7 @@ static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex
}
if(netif_msg_link(sis_priv))
- printk(KERN_INFO "%s: Media Link On %s %s-duplex \n",
+ printk(KERN_INFO "%s: Media Link On %s %s-duplex\n",
net_dev->name,
*speed == HW_SPEED_100_MBPS ?
"100mbps" : "10mbps",
@@ -1523,7 +1519,7 @@ static void sis900_tx_timeout(struct net_device *net_dev)
int i;
if(netif_msg_tx_err(sis_priv))
- printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x \n",
+ printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x\n",
net_dev->name, inl(ioaddr + cr), inl(ioaddr + isr));
/* Disable interrupts by clearing the interrupt mask. */
@@ -1553,14 +1549,13 @@ static void sis900_tx_timeout(struct net_device *net_dev)
spin_unlock_irqrestore(&sis_priv->lock, flags);
- net_dev->trans_start = jiffies;
+ net_dev->trans_start = jiffies; /* prevent tx timeout */
/* load Transmit Descriptor Register */
outl(sis_priv->tx_ring_dma, ioaddr + txdp);
/* Enable all known interrupts by setting the interrupt mask. */
outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr);
- return;
}
/**
@@ -1623,8 +1618,6 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
spin_unlock_irqrestore(&sis_priv->lock, flags);
- net_dev->trans_start = jiffies;
-
if (netif_msg_tx_queued(sis_priv))
printk(KERN_DEBUG "%s: Queued Tx packet at %p size %d "
"to slot %d.\n",
@@ -2298,12 +2291,14 @@ static void set_rx_mode(struct net_device *net_dev)
/* Accept Broadcast packet, destination address matchs our
* MAC address, use Receive Filter to reject unwanted MCAST
* packets */
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
rx_mode = RFAAB;
- netdev_for_each_mc_addr(mclist, net_dev) {
- unsigned int bit_nr =
- sis900_mcast_bitnr(mclist->dmi_addr, sis_priv->chipset_rev);
+ netdev_for_each_mc_addr(ha, net_dev) {
+ unsigned int bit_nr;
+
+ bit_nr = sis900_mcast_bitnr(ha->addr,
+ sis_priv->chipset_rev);
mc_filter[bit_nr >> 4] |= (1 << (bit_nr & 0xf));
}
}
@@ -2330,8 +2325,6 @@ static void set_rx_mode(struct net_device *net_dev)
/* restore cr */
outl(cr_saved, ioaddr + cr);
}
-
- return;
}
/**
diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/skfp/fplustm.c
index 6028bbb3b28..9d8d1ac4817 100644
--- a/drivers/net/skfp/fplustm.c
+++ b/drivers/net/skfp/fplustm.c
@@ -1352,7 +1352,7 @@ void rtm_set_timer(struct s_smc *smc)
/*
* MIB timer and hardware timer have the same resolution of 80nS
*/
- DB_RMT("RMT: setting new fddiPATHT_Rmode, t = %d ns \n",
+ DB_RMT("RMT: setting new fddiPATHT_Rmode, t = %d ns\n",
(int) smc->mib.a[PATH0].fddiPATHT_Rmode,0) ;
outpd(ADDR(B2_RTM_INI),smc->mib.a[PATH0].fddiPATHT_Rmode) ;
}
diff --git a/drivers/net/skfp/pcmplc.c b/drivers/net/skfp/pcmplc.c
index e6b33ee05ed..ba45bc794d7 100644
--- a/drivers/net/skfp/pcmplc.c
+++ b/drivers/net/skfp/pcmplc.c
@@ -1277,7 +1277,7 @@ static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy)
mib = phy->mib ;
- DB_PCMN(1,"SIG rec %x %x: \n", bit,phy->r_val[bit] ) ;
+ DB_PCMN(1,"SIG rec %x %x:\n", bit,phy->r_val[bit] ) ;
bit++ ;
switch(bit) {
@@ -1580,7 +1580,7 @@ static void pc_tcode_actions(struct s_smc *smc, const int bit, struct s_phy *phy
mib->fddiPORTMacIndicated.T_val = phy->t_val[9] ;
break ;
}
- DB_PCMN(1,"SIG snd %x %x: \n", bit,phy->t_val[bit] ) ;
+ DB_PCMN(1,"SIG snd %x %x:\n", bit,phy->t_val[bit] ) ;
}
/*
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index d9016b75abc..31b2dabf094 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -844,7 +844,6 @@ static void skfp_ctl_set_multicast_list(struct net_device *dev)
spin_lock_irqsave(&bp->DriverLock, Flags);
skfp_ctl_set_multicast_list_wo_lock(dev);
spin_unlock_irqrestore(&bp->DriverLock, Flags);
- return;
} // skfp_ctl_set_multicast_list
@@ -852,7 +851,7 @@ static void skfp_ctl_set_multicast_list(struct net_device *dev)
static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
{
struct s_smc *smc = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
/* Enable promiscuous mode, if necessary */
if (dev->flags & IFF_PROMISC) {
@@ -876,13 +875,13 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
/* use exact filtering */
// point to first multicast addr
- netdev_for_each_mc_addr(dmi, dev) {
- mac_add_multicast(smc,
- (struct fddi_addr *)dmi->dmi_addr,
- 1);
+ netdev_for_each_mc_addr(ha, dev) {
+ mac_add_multicast(smc,
+ (struct fddi_addr *)ha->addr,
+ 1);
pr_debug(KERN_INFO "ENABLE MC ADDRESS: %pMF\n",
- dmi->dmi_addr);
+ ha->addr);
}
} else { // more MC addresses than HW supports
@@ -898,7 +897,6 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
/* Update adapter filters */
mac_update_multicast(smc);
}
- return;
} // skfp_ctl_set_multicast_list_wo_lock
@@ -1076,7 +1074,6 @@ static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
if (bp->QueueSkb == 0) {
netif_stop_queue(dev);
}
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
} // skfp_send_pkt
diff --git a/drivers/net/skfp/smt.c b/drivers/net/skfp/smt.c
index 83d16fecfac..6f35bb77595 100644
--- a/drivers/net/skfp/smt.c
+++ b/drivers/net/skfp/smt.c
@@ -574,7 +574,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
if (smt_check_para(smc,sm,plist_nif)) {
DB_SMT("SMT: NIF with para problem, ignoring\n",0,0) ;
break ;
- } ;
+ }
switch (sm->smt_type) {
case SMT_ANNOUNCE :
case SMT_REQUEST :
diff --git a/drivers/net/skfp/srf.c b/drivers/net/skfp/srf.c
index 6caf713b744..40882b3faba 100644
--- a/drivers/net/skfp/srf.c
+++ b/drivers/net/skfp/srf.c
@@ -414,7 +414,7 @@ static void smt_send_srf(struct s_smc *smc)
smt->smt_len = SMT_MAX_INFO_LEN - pcon.pc_len ;
mb->sm_len = smt->smt_len + sizeof(struct smt_header) ;
- DB_SMT("SRF: sending SRF at %x, len %d \n",smt,mb->sm_len) ;
+ DB_SMT("SRF: sending SRF at %x, len %d\n",smt,mb->sm_len) ;
DB_SMT("SRF: state SR%d Threshold %d\n",
smc->srf.sr_state,smc->srf.SRThreshold/TICKS_PER_SECOND) ;
#ifdef DEBUG
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 50eb70609f2..40e5c46e757 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -984,8 +984,8 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
wmb();
rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
- pci_unmap_addr_set(e, mapaddr, map);
- pci_unmap_len_set(e, maplen, bufsize);
+ dma_unmap_addr_set(e, mapaddr, map);
+ dma_unmap_len_set(e, maplen, bufsize);
}
/* Resume receiving using existing skb,
@@ -1018,8 +1018,8 @@ static void skge_rx_clean(struct skge_port *skge)
rd->control = 0;
if (e->skb) {
pci_unmap_single(hw->pdev,
- pci_unmap_addr(e, mapaddr),
- pci_unmap_len(e, maplen),
+ dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen),
PCI_DMA_FROMDEVICE);
dev_kfree_skb(e->skb);
e->skb = NULL;
@@ -2756,8 +2756,8 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
e->skb = skb;
len = skb_headlen(skb);
map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
- pci_unmap_addr_set(e, mapaddr, map);
- pci_unmap_len_set(e, maplen, len);
+ dma_unmap_addr_set(e, mapaddr, map);
+ dma_unmap_len_set(e, maplen, len);
td->dma_lo = map;
td->dma_hi = map >> 32;
@@ -2799,8 +2799,8 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
tf->dma_lo = map;
tf->dma_hi = (u64) map >> 32;
- pci_unmap_addr_set(e, mapaddr, map);
- pci_unmap_len_set(e, maplen, frag->size);
+ dma_unmap_addr_set(e, mapaddr, map);
+ dma_unmap_len_set(e, maplen, frag->size);
tf->control = BMU_OWN | BMU_SW | control | frag->size;
}
@@ -2837,12 +2837,12 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
/* skb header vs. fragment */
if (control & BMU_STF)
- pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr),
- pci_unmap_len(e, maplen),
+ pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen),
PCI_DMA_TODEVICE);
else
- pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr),
- pci_unmap_len(e, maplen),
+ pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen),
PCI_DMA_TODEVICE);
if (control & BMU_EOF) {
@@ -2918,7 +2918,7 @@ static void genesis_set_multicast(struct net_device *dev)
struct skge_port *skge = netdev_priv(dev);
struct skge_hw *hw = skge->hw;
int port = skge->port;
- struct dev_mc_list *list;
+ struct netdev_hw_addr *ha;
u32 mode;
u8 filter[8];
@@ -2938,8 +2938,8 @@ static void genesis_set_multicast(struct net_device *dev)
skge->flow_status == FLOW_STAT_SYMMETRIC)
genesis_add_filter(filter, pause_mc_addr);
- netdev_for_each_mc_addr(list, dev)
- genesis_add_filter(filter, list->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ genesis_add_filter(filter, ha->addr);
}
xm_write32(hw, port, XM_MODE, mode);
@@ -2957,7 +2957,7 @@ static void yukon_set_multicast(struct net_device *dev)
struct skge_port *skge = netdev_priv(dev);
struct skge_hw *hw = skge->hw;
int port = skge->port;
- struct dev_mc_list *list;
+ struct netdev_hw_addr *ha;
int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND ||
skge->flow_status == FLOW_STAT_SYMMETRIC);
u16 reg;
@@ -2980,8 +2980,8 @@ static void yukon_set_multicast(struct net_device *dev)
if (rx_pause)
yukon_add_filter(filter, pause_mc_addr);
- netdev_for_each_mc_addr(list, dev)
- yukon_add_filter(filter, list->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ yukon_add_filter(filter, ha->addr);
}
@@ -3060,11 +3060,11 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
goto resubmit;
pci_dma_sync_single_for_cpu(skge->hw->pdev,
- pci_unmap_addr(e, mapaddr),
+ dma_unmap_addr(e, mapaddr),
len, PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(e->skb, skb->data, len);
pci_dma_sync_single_for_device(skge->hw->pdev,
- pci_unmap_addr(e, mapaddr),
+ dma_unmap_addr(e, mapaddr),
len, PCI_DMA_FROMDEVICE);
skge_rx_reuse(e, skge->rx_buf_size);
} else {
@@ -3075,8 +3075,8 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
goto resubmit;
pci_unmap_single(skge->hw->pdev,
- pci_unmap_addr(e, mapaddr),
- pci_unmap_len(e, maplen),
+ dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen),
PCI_DMA_FROMDEVICE);
skb = e->skb;
prefetch(skb->data);
@@ -3667,7 +3667,7 @@ static int skge_debug_show(struct seq_file *seq, void *v)
t->csum_offs, t->csum_write, t->csum_start);
}
- seq_printf(seq, "\nRx Ring: \n");
+ seq_printf(seq, "\nRx Ring:\n");
for (e = skge->rx_ring.to_clean; ; e = e->next) {
const struct skge_rx_desc *r = e->desc;
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 831de1b6e96..507addcaffa 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2393,8 +2393,8 @@ struct skge_element {
struct skge_element *next;
void *desc;
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapaddr);
- DECLARE_PCI_UNMAP_LEN(maplen);
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
};
struct skge_ring {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 088c797eb73..2111c7bbf57 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -53,7 +53,7 @@
#include "sky2.h"
#define DRV_NAME "sky2"
-#define DRV_VERSION "1.27"
+#define DRV_VERSION "1.28"
/*
* The Yukon II chipset takes 64 bit command blocks (called list elements)
@@ -70,18 +70,15 @@
VLAN:GSO + CKSUM + Data + skb_frags * DMA */
#define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1))
#define TX_MIN_PENDING (MAX_SKB_TX_LE+1)
-#define TX_MAX_PENDING 4096
+#define TX_MAX_PENDING 1024
#define TX_DEF_PENDING 127
-#define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */
-#define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le))
#define TX_WATCHDOG (5 * HZ)
#define NAPI_WEIGHT 64
#define PHY_RETRIES 1000
#define SKY2_EEPROM_MAGIC 0x9955aabb
-
#define RING_NEXT(x,s) (((x)+1) & ((s)-1))
static const u32 default_msg =
@@ -227,7 +224,7 @@ static void sky2_power_on(struct sky2_hw *hw)
/* disable Core Clock Division, */
sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
- if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
/* enable bits are inverted */
sky2_write8(hw, B2_Y2_CLK_GATE,
Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
@@ -269,7 +266,7 @@ static void sky2_power_on(struct sky2_hw *hw)
static void sky2_power_aux(struct sky2_hw *hw)
{
- if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
sky2_write8(hw, B2_Y2_CLK_GATE, 0);
else
/* enable bits are inverted */
@@ -652,7 +649,7 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
reg1 &= ~phy_power[port];
- if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
reg1 |= coma_mode[port];
sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
@@ -824,7 +821,9 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
- if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 && port == 1) {
+ if (hw->chip_id == CHIP_ID_YUKON_XL &&
+ hw->chip_rev == CHIP_REV_YU_XL_A0 &&
+ port == 1) {
/* WA DEV_472 -- looks like crossed wires on port 2 */
/* clear GMAC 1 Control reset */
sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR);
@@ -878,6 +877,10 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
if (hw->dev[port]->mtu > ETH_DATA_LEN)
reg |= GM_SMOD_JUMBO_ENA;
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
+ hw->chip_rev == CHIP_REV_YU_EC_U_B1)
+ reg |= GM_NEW_FLOW_CTRL;
+
gma_write16(hw, port, GM_SERIAL_MODE, reg);
/* virtual address for data */
@@ -1126,7 +1129,7 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
if (pci_dma_mapping_error(pdev, re->data_addr))
goto mapping_error;
- pci_unmap_len_set(re, data_size, size);
+ dma_unmap_len_set(re, data_size, size);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -1148,7 +1151,7 @@ map_page_error:
PCI_DMA_FROMDEVICE);
}
- pci_unmap_single(pdev, re->data_addr, pci_unmap_len(re, data_size),
+ pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size),
PCI_DMA_FROMDEVICE);
mapping_error:
@@ -1163,7 +1166,7 @@ static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
struct sk_buff *skb = re->skb;
int i;
- pci_unmap_single(pdev, re->data_addr, pci_unmap_len(re, data_size),
+ pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size),
PCI_DMA_FROMDEVICE);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
@@ -1190,6 +1193,39 @@ static void rx_set_checksum(struct sky2_port *sky2)
? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
}
+/* Enable/disable receive hash calculation (RSS) */
+static void rx_set_rss(struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ int i, nkeys = 4;
+
+ /* Supports IPv6 and other modes */
+ if (hw->flags & SKY2_HW_NEW_LE) {
+ nkeys = 10;
+ sky2_write32(hw, SK_REG(sky2->port, RSS_CFG), HASH_ALL);
+ }
+
+ /* Program RSS initial values */
+ if (dev->features & NETIF_F_RXHASH) {
+ u32 key[nkeys];
+
+ get_random_bytes(key, nkeys * sizeof(u32));
+ for (i = 0; i < nkeys; i++)
+ sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4),
+ key[i]);
+
+ /* Need to turn on (undocumented) flag to make hashing work */
+ sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T),
+ RX_STFW_ENA);
+
+ sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+ BMU_ENA_RX_RSS_HASH);
+ } else
+ sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+ BMU_DIS_RX_RSS_HASH);
+}
+
/*
* The RX Stop command will not work for Yukon-2 if the BMU does not
* reach the end of packet and since we can't make sure that we have
@@ -1414,8 +1450,7 @@ static void sky2_rx_start(struct sky2_port *sky2)
/* These chips have no ram buffer?
* MAC Rx RAM Read is controlled by hardware */
if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
- (hw->chip_rev == CHIP_REV_YU_EC_U_A1 ||
- hw->chip_rev == CHIP_REV_YU_EC_U_B0))
+ hw->chip_rev > CHIP_REV_YU_EC_U_A0)
sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS);
sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
@@ -1423,6 +1458,9 @@ static void sky2_rx_start(struct sky2_port *sky2)
if (!(hw->flags & SKY2_HW_NEW_LE))
rx_set_checksum(sky2);
+ if (!(hw->flags & SKY2_HW_RSS_BROKEN))
+ rx_set_rss(sky2->netdev);
+
/* submit Rx ring */
for (i = 0; i < sky2->rx_pending; i++) {
re = sky2->rx_ring + i;
@@ -1657,12 +1695,12 @@ static unsigned tx_le_req(const struct sk_buff *skb)
static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
{
if (re->flags & TX_MAP_SINGLE)
- pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr),
- pci_unmap_len(re, maplen),
+ pci_unmap_single(pdev, dma_unmap_addr(re, mapaddr),
+ dma_unmap_len(re, maplen),
PCI_DMA_TODEVICE);
else if (re->flags & TX_MAP_PAGE)
- pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr),
- pci_unmap_len(re, maplen),
+ pci_unmap_page(pdev, dma_unmap_addr(re, mapaddr),
+ dma_unmap_len(re, maplen),
PCI_DMA_TODEVICE);
re->flags = 0;
}
@@ -1773,8 +1811,8 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
re = sky2->tx_ring + slot;
re->flags = TX_MAP_SINGLE;
- pci_unmap_addr_set(re, mapaddr, mapping);
- pci_unmap_len_set(re, maplen, len);
+ dma_unmap_addr_set(re, mapaddr, mapping);
+ dma_unmap_len_set(re, maplen, len);
le = get_tx_le(sky2, &slot);
le->addr = cpu_to_le32(lower_32_bits(mapping));
@@ -1802,8 +1840,8 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
re = sky2->tx_ring + slot;
re->flags = TX_MAP_PAGE;
- pci_unmap_addr_set(re, mapaddr, mapping);
- pci_unmap_len_set(re, maplen, frag->size);
+ dma_unmap_addr_set(re, mapaddr, mapping);
+ dma_unmap_len_set(re, maplen, frag->size);
le = get_tx_le(sky2, &slot);
le->addr = cpu_to_le32(lower_32_bits(mapping));
@@ -2142,7 +2180,8 @@ static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
istatus, phystat);
if (istatus & PHY_M_IS_AN_COMPL) {
- if (sky2_autoneg_done(sky2, phystat) == 0)
+ if (sky2_autoneg_done(sky2, phystat) == 0 &&
+ !netif_carrier_ok(dev))
sky2_link_up(sky2);
goto out;
}
@@ -2236,8 +2275,8 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
sky2_write32(hw, B0_IMSK, 0);
dev->trans_start = jiffies; /* prevent tx timeout */
- netif_stop_queue(dev);
napi_disable(&hw->napi);
+ netif_tx_disable(dev);
synchronize_irq(hw->pdev->irq);
@@ -2531,6 +2570,14 @@ static void sky2_rx_checksum(struct sky2_port *sky2, u32 status)
}
}
+static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
+{
+ struct sk_buff *skb;
+
+ skb = sky2->rx_ring[sky2->rx_next].skb;
+ skb->rxhash = le32_to_cpu(status);
+}
+
/* Process status response ring */
static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
{
@@ -2552,7 +2599,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
if (!(opcode & HW_OWNER))
break;
- hw->st_idx = RING_NEXT(hw->st_idx, STATUS_RING_SIZE);
+ hw->st_idx = RING_NEXT(hw->st_idx, hw->st_size);
port = le->css & CSS_LINK_BIT;
dev = hw->dev[port];
@@ -2603,6 +2650,10 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
sky2_rx_checksum(sky2, status);
break;
+ case OP_RSS_HASH:
+ sky2_rx_hash(sky2, status);
+ break;
+
case OP_TXINDEXLE:
/* TX index reports status for both ports */
sky2_tx_done(hw->dev[0], status & 0xfff);
@@ -2957,6 +3008,8 @@ static int __devinit sky2_init(struct sky2_hw *hw)
switch(hw->chip_id) {
case CHIP_ID_YUKON_XL:
hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY;
+ if (hw->chip_rev < CHIP_REV_YU_XL_A2)
+ hw->flags |= SKY2_HW_RSS_BROKEN;
break;
case CHIP_ID_YUKON_EC_U:
@@ -2982,10 +3035,11 @@ static int __devinit sky2_init(struct sky2_hw *hw)
dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n");
return -EOPNOTSUPP;
}
- hw->flags = SKY2_HW_GIGABIT;
+ hw->flags = SKY2_HW_GIGABIT | SKY2_HW_RSS_BROKEN;
break;
case CHIP_ID_YUKON_FE:
+ hw->flags = SKY2_HW_RSS_BROKEN;
break;
case CHIP_ID_YUKON_FE_P:
@@ -3192,7 +3246,7 @@ static void sky2_reset(struct sky2_hw *hw)
for (i = 0; i < hw->ports; i++)
sky2_gmac_reset(hw, i);
- memset(hw->st_le, 0, STATUS_LE_BYTES);
+ memset(hw->st_le, 0, hw->st_size * sizeof(struct sky2_status_le));
hw->st_idx = 0;
sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET);
@@ -3202,7 +3256,7 @@ static void sky2_reset(struct sky2_hw *hw)
sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32);
/* Set the list last index */
- sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1);
+ sky2_write16(hw, STAT_LAST_IDX, hw->st_size - 1);
sky2_write16(hw, STAT_TX_IDX_TH, 10);
sky2_write8(hw, STAT_FIFO_WM, 16);
@@ -3258,18 +3312,14 @@ static int sky2_reattach(struct net_device *dev)
return err;
}
-static void sky2_restart(struct work_struct *work)
+static void sky2_all_down(struct sky2_hw *hw)
{
- struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work);
- u32 imask;
int i;
- rtnl_lock();
-
- napi_disable(&hw->napi);
- synchronize_irq(hw->pdev->irq);
- imask = sky2_read32(hw, B0_IMSK);
+ sky2_read32(hw, B0_IMSK);
sky2_write32(hw, B0_IMSK, 0);
+ synchronize_irq(hw->pdev->irq);
+ napi_disable(&hw->napi);
for (i = 0; i < hw->ports; i++) {
struct net_device *dev = hw->dev[i];
@@ -3282,8 +3332,12 @@ static void sky2_restart(struct work_struct *work)
netif_tx_disable(dev);
sky2_hw_down(sky2);
}
+}
- sky2_reset(hw);
+static void sky2_all_up(struct sky2_hw *hw)
+{
+ u32 imask = Y2_IS_BASE;
+ int i;
for (i = 0; i < hw->ports; i++) {
struct net_device *dev = hw->dev[i];
@@ -3293,6 +3347,8 @@ static void sky2_restart(struct work_struct *work)
continue;
sky2_hw_up(sky2);
+ sky2_set_multicast(dev);
+ imask |= portirq_msk[i];
netif_wake_queue(dev);
}
@@ -3301,6 +3357,17 @@ static void sky2_restart(struct work_struct *work)
sky2_read32(hw, B0_Y2_SP_LISR);
napi_enable(&hw->napi);
+}
+
+static void sky2_restart(struct work_struct *work)
+{
+ struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work);
+
+ rtnl_lock();
+
+ sky2_all_down(hw);
+ sky2_reset(hw);
+ sky2_all_up(hw);
rtnl_unlock();
}
@@ -3622,7 +3689,7 @@ static void sky2_set_multicast(struct net_device *dev)
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
unsigned port = sky2->port;
- struct dev_mc_list *list;
+ struct netdev_hw_addr *ha;
u16 reg;
u8 filter[8];
int rx_pause;
@@ -3646,8 +3713,8 @@ static void sky2_set_multicast(struct net_device *dev)
if (rx_pause)
sky2_add_filter(filter, pause_mc_addr);
- netdev_for_each_mc_addr(list, dev)
- sky2_add_filter(filter, list->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ sky2_add_filter(filter, ha->addr);
}
gma_write16(hw, port, GM_MC_ADDR_H1,
@@ -4109,6 +4176,25 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len);
}
+static int sky2_set_flags(struct net_device *dev, u32 data)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (data & ~ETH_FLAG_RXHASH)
+ return -EOPNOTSUPP;
+
+ if (data & ETH_FLAG_RXHASH) {
+ if (sky2->hw->flags & SKY2_HW_RSS_BROKEN)
+ return -EINVAL;
+
+ dev->features |= NETIF_F_RXHASH;
+ } else
+ dev->features &= ~NETIF_F_RXHASH;
+
+ rx_set_rss(dev);
+
+ return 0;
+}
static const struct ethtool_ops sky2_ethtool_ops = {
.get_settings = sky2_get_settings,
@@ -4140,6 +4226,7 @@ static const struct ethtool_ops sky2_ethtool_ops = {
.phys_id = sky2_phys_id,
.get_sset_count = sky2_get_sset_count,
.get_ethtool_stats = sky2_get_ethtool_stats,
+ .set_flags = sky2_set_flags,
};
#ifdef CONFIG_SKY2_DEBUG
@@ -4250,12 +4337,13 @@ static int sky2_debug_show(struct seq_file *seq, void *v)
napi_disable(&hw->napi);
last = sky2_read16(hw, STAT_PUT_IDX);
+ seq_printf(seq, "Status ring %u\n", hw->st_size);
if (hw->st_idx == last)
seq_puts(seq, "Status ring (empty)\n");
else {
seq_puts(seq, "Status ring\n");
- for (idx = hw->st_idx; idx != last && idx < STATUS_RING_SIZE;
- idx = RING_NEXT(idx, STATUS_RING_SIZE)) {
+ for (idx = hw->st_idx; idx != last && idx < hw->st_size;
+ idx = RING_NEXT(idx, hw->st_size)) {
const struct sky2_status_le *le = hw->st_le + idx;
seq_printf(seq, "[%d] %#x %d %#x\n",
idx, le->opcode, le->length, le->status);
@@ -4492,6 +4580,10 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
if (highmem)
dev->features |= NETIF_F_HIGHDMA;
+ /* Enable receive hashing unless hardware is known broken */
+ if (!(hw->flags & SKY2_HW_RSS_BROKEN))
+ dev->features |= NETIF_F_RXHASH;
+
#ifdef SKY2_VLAN_TAG_USED
/* The workaround for FE+ status conflicts with VLAN tag detection. */
if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
@@ -4683,15 +4775,17 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
goto err_out_free_hw;
}
- /* ring for status responses */
- hw->st_le = pci_alloc_consistent(pdev, STATUS_LE_BYTES, &hw->st_dma);
- if (!hw->st_le)
- goto err_out_iounmap;
-
err = sky2_init(hw);
if (err)
goto err_out_iounmap;
+ /* ring for status responses */
+ hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING);
+ hw->st_le = pci_alloc_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
+ &hw->st_dma);
+ if (!hw->st_le)
+ goto err_out_reset;
+
dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n",
sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev);
@@ -4765,8 +4859,10 @@ err_out_unregister:
err_out_free_netdev:
free_netdev(dev);
err_out_free_pci:
+ pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
+ hw->st_le, hw->st_dma);
+err_out_reset:
sky2_write8(hw, B0_CTST, CS_RST_SET);
- pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
err_out_iounmap:
iounmap(hw->regs);
err_out_free_hw:
@@ -4804,7 +4900,8 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
free_irq(pdev->irq, hw);
if (hw->flags & SKY2_HW_USE_MSI)
pci_disable_msi(pdev);
- pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
+ pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
+ hw->st_le, hw->st_dma);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -4829,12 +4926,12 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
cancel_work_sync(&hw->restart_work);
rtnl_lock();
+
+ sky2_all_down(hw);
for (i = 0; i < hw->ports; i++) {
struct net_device *dev = hw->dev[i];
struct sky2_port *sky2 = netdev_priv(dev);
- sky2_detach(dev);
-
if (sky2->wol)
sky2_wol_init(sky2);
@@ -4843,8 +4940,6 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
device_set_wakeup_enable(&pdev->dev, wol != 0);
- sky2_write32(hw, B0_IMSK, 0);
- napi_disable(&hw->napi);
sky2_power_aux(hw);
rtnl_unlock();
@@ -4859,12 +4954,11 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
static int sky2_resume(struct pci_dev *pdev)
{
struct sky2_hw *hw = pci_get_drvdata(pdev);
- int i, err;
+ int err;
if (!hw)
return 0;
- rtnl_lock();
err = pci_set_power_state(pdev, PCI_D0);
if (err)
goto out;
@@ -4882,20 +4976,13 @@ static int sky2_resume(struct pci_dev *pdev)
goto out;
}
+ rtnl_lock();
sky2_reset(hw);
- sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
- napi_enable(&hw->napi);
-
- for (i = 0; i < hw->ports; i++) {
- err = sky2_reattach(hw->dev[i]);
- if (err)
- goto out;
- }
+ sky2_all_up(hw);
rtnl_unlock();
return 0;
out:
- rtnl_unlock();
dev_err(&pdev->dev, "resume failed (%d)\n", err);
pci_disable_device(pdev);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index a5e182dd981..084eff21b67 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -548,6 +548,14 @@ enum {
CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */
CHIP_ID_YUKON_OPT = 0xbc, /* YUKON-2 Optima */
};
+
+enum yukon_xl_rev {
+ CHIP_REV_YU_XL_A0 = 0,
+ CHIP_REV_YU_XL_A1 = 1,
+ CHIP_REV_YU_XL_A2 = 2,
+ CHIP_REV_YU_XL_A3 = 3,
+};
+
enum yukon_ec_rev {
CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */
CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */
@@ -557,6 +565,7 @@ enum yukon_ec_u_rev {
CHIP_REV_YU_EC_U_A0 = 1,
CHIP_REV_YU_EC_U_A1 = 2,
CHIP_REV_YU_EC_U_B0 = 3,
+ CHIP_REV_YU_EC_U_B1 = 5,
};
enum yukon_fe_rev {
CHIP_REV_YU_FE_A1 = 1,
@@ -685,8 +694,21 @@ enum {
TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */
TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */
TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */
+
+ RSS_KEY = 0x0220, /* RSS Key setup */
+ RSS_CFG = 0x0248, /* RSS Configuration */
};
+enum {
+ HASH_TCP_IPV6_EX_CTRL = 1<<5,
+ HASH_IPV6_EX_CTRL = 1<<4,
+ HASH_TCP_IPV6_CTRL = 1<<3,
+ HASH_IPV6_CTRL = 1<<2,
+ HASH_TCP_IPV4_CTRL = 1<<1,
+ HASH_IPV4_CTRL = 1<<0,
+
+ HASH_ALL = 0x3f,
+};
enum {
B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */
@@ -1775,10 +1797,13 @@ enum {
/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */
enum {
GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */
- GM_SMOD_LIMIT_4 = 1<<10, /* Bit 10: 4 consecutive Tx trials */
- GM_SMOD_VLAN_ENA = 1<<9, /* Bit 9: Enable VLAN (Max. Frame Len) */
- GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */
- GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */
+ GM_SMOD_LIMIT_4 = 1<<10, /* 4 consecutive Tx trials */
+ GM_SMOD_VLAN_ENA = 1<<9, /* Enable VLAN (Max. Frame Len) */
+ GM_SMOD_JUMBO_ENA = 1<<8, /* Enable Jumbo (Max. Frame Len) */
+
+ GM_NEW_FLOW_CTRL = 1<<6, /* Enable New Flow-Control */
+
+ GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */
};
#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK)
@@ -2157,14 +2182,14 @@ struct tx_ring_info {
unsigned long flags;
#define TX_MAP_SINGLE 0x0001
#define TX_MAP_PAGE 0x0002
- DECLARE_PCI_UNMAP_ADDR(mapaddr);
- DECLARE_PCI_UNMAP_LEN(maplen);
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
};
struct rx_ring_info {
struct sk_buff *skb;
dma_addr_t data_addr;
- DECLARE_PCI_UNMAP_LEN(data_size);
+ DEFINE_DMA_UNMAP_LEN(data_size);
dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT];
};
@@ -2249,6 +2274,7 @@ struct sky2_hw {
#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */
#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
+#define SKY2_HW_RSS_BROKEN 0x00000100
u8 chip_id;
u8 chip_rev;
@@ -2256,6 +2282,7 @@ struct sky2_hw {
u8 ports;
struct sky2_status_le *st_le;
+ u32 st_size;
u32 st_idx;
dma_addr_t st_dma;
diff --git a/drivers/net/slhc.c b/drivers/net/slhc.c
index 140d63f3caf..ac279fad9d4 100644
--- a/drivers/net/slhc.c
+++ b/drivers/net/slhc.c
@@ -731,7 +731,6 @@ void
slhc_free(struct slcompress *comp)
{
printk(KERN_DEBUG "Called IP function on non IP-system: slhc_free");
- return;
}
struct slcompress *
slhc_init(int rslots, int tslots)
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 89696156c05..fa434fb8fb7 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -458,7 +458,7 @@ static void sl_tx_timeout(struct net_device *dev)
* 14 Oct 1994 Dmitry Gorodchanin.
*/
#ifdef SL_CHECK_TRANSMIT
- if (time_before(jiffies, dev->trans_start + 20 * HZ)) {
+ if (time_before(jiffies, dev_trans_start(dev) + 20 * HZ)) {
/* 20 sec timeout not reached */
goto out;
}
@@ -1269,7 +1269,7 @@ static int sl_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
case SIOCGLEASE:
*p = sl->leased;
- };
+ }
spin_unlock_bh(&sl->lock);
return 0;
}
diff --git a/drivers/net/smc-mca.c b/drivers/net/smc-mca.c
index a93f122e9a9..d07c39cb4da 100644
--- a/drivers/net/smc-mca.c
+++ b/drivers/net/smc-mca.c
@@ -460,7 +460,6 @@ static void ultramca_reset_8390(struct net_device *dev)
if (ei_debug > 1)
printk("reset done\n");
- return;
}
/* Grab the 8390 specific header. Similar to the block_input routine, but
diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c
index 0291ea098a0..d2dd8e6113a 100644
--- a/drivers/net/smc-ultra.c
+++ b/drivers/net/smc-ultra.c
@@ -421,7 +421,6 @@ ultra_reset_8390(struct net_device *dev)
outb(0x01, cmd_port + 6); /* Enable interrupts and memory. */
if (ei_debug > 1) printk("reset done\n");
- return;
}
/* Grab the 8390 specific header. Similar to the block_input routine, but
diff --git a/drivers/net/smc-ultra32.c b/drivers/net/smc-ultra32.c
index 7a554adc70f..e459c3b2510 100644
--- a/drivers/net/smc-ultra32.c
+++ b/drivers/net/smc-ultra32.c
@@ -352,7 +352,6 @@ static void ultra32_reset_8390(struct net_device *dev)
outb(0x84, ioaddr + 5); /* Enable MEM16 & Disable Bus Master. */
outb(0x01, ioaddr + 6); /* Enable Interrupts. */
if (ei_debug > 1) printk("reset done\n");
- return;
}
/* Grab the 8390 specific header. Similar to the block_input routine, but
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 635820d42b1..66831f37839 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -382,7 +382,7 @@ static inline void smc911x_rcv(struct net_device *dev)
DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n",
dev->name, __func__);
status = SMC_GET_RX_STS_FIFO(lp);
- DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x \n",
+ DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x\n",
dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff);
pkt_len = (status & RX_STS_PKT_LEN_) >> 16;
if (status & RX_STS_ES_) {
@@ -1135,7 +1135,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
}
#else
if (status & INT_STS_TSFL_) {
- DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq \n", dev->name, );
+ DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq\n", dev->name, );
smc911x_tx(dev);
SMC_ACK_INT(lp, INT_STS_TSFL_);
}
@@ -1274,7 +1274,7 @@ static void smc911x_timeout(struct net_device *dev)
status = SMC_GET_INT(lp);
mask = SMC_GET_INT_EN(lp);
spin_unlock_irqrestore(&lp->lock, flags);
- DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x \n",
+ DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x\n",
dev->name, status, mask);
/* Dump the current TX FIFO contents and restart */
@@ -1289,7 +1289,7 @@ static void smc911x_timeout(struct net_device *dev)
schedule_work(&lp->phy_configure);
/* We can accept TX packets again */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -1340,7 +1340,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
* within that register.
*/
else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *cur_addr;
+ struct netdev_hw_addr *ha;
/* Set the Hash perfec mode */
mcr |= MAC_CR_HPFILT_;
@@ -1348,19 +1348,16 @@ static void smc911x_set_multicast_list(struct net_device *dev)
/* start with a table of all zeros: reject all */
memset(multicast_table, 0, sizeof(multicast_table));
- netdev_for_each_mc_addr(cur_addr, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
u32 position;
- /* do we have a pointer here? */
- if (!cur_addr)
- break;
/* make sure this is a multicast address -
shouldn't this be a given if we have it here ? */
- if (!(*cur_addr->dmi_addr & 1))
- continue;
+ if (!(*ha->addr & 1))
+ continue;
/* upper 6 bits are used as hash index */
- position = ether_crc(ETH_ALEN, cur_addr->dmi_addr)>>26;
+ position = ether_crc(ETH_ALEN, ha->addr)>>26;
multicast_table[position>>5] |= 1 << (position&0x1f);
}
diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c
index 3f2f7843aa4..7486d090806 100644
--- a/drivers/net/smc9194.c
+++ b/drivers/net/smc9194.c
@@ -416,7 +416,7 @@ static void smc_shutdown( int ioaddr )
/*
- . Function: smc_setmulticast( int ioaddr, int count, dev_mc_list * adds )
+ . Function: smc_setmulticast( int ioaddr, struct net_device *dev )
. Purpose:
. This sets the internal hardware table to filter out unwanted multicast
. packets before they take up memory.
@@ -437,26 +437,23 @@ static void smc_setmulticast(int ioaddr, struct net_device *dev)
{
int i;
unsigned char multicast_table[ 8 ];
- struct dev_mc_list *cur_addr;
+ struct netdev_hw_addr *ha;
/* table for flipping the order of 3 bits */
unsigned char invert3[] = { 0, 4, 2, 6, 1, 5, 3, 7 };
/* start with a table of all zeros: reject all */
memset( multicast_table, 0, sizeof( multicast_table ) );
- netdev_for_each_mc_addr(cur_addr, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
int position;
- /* do we have a pointer here? */
- if ( !cur_addr )
- break;
/* make sure this is a multicast address - shouldn't this
be a given if we have it here ? */
- if ( !( *cur_addr->dmi_addr & 1 ) )
+ if (!(*ha->addr & 1))
continue;
/* only use the low order bits */
- position = ether_crc_le(6, cur_addr->dmi_addr) & 0x3f;
+ position = ether_crc_le(6, ha->addr) & 0x3f;
/* do some messy swapping to put the bit in the right spot */
multicast_table[invert3[position&7]] |=
@@ -528,7 +525,7 @@ static netdev_tx_t smc_wait_to_send_packet(struct sk_buff *skb,
numPages = ((length & 0xfffe) + 6) / 256;
if (numPages > 7 ) {
- printk(CARDNAME": Far too big packet error. \n");
+ printk(CARDNAME": Far too big packet error.\n");
/* freeing the packet is a good thing here... but should
. any packets of this size get down here? */
dev_kfree_skb (skb);
@@ -570,9 +567,9 @@ static netdev_tx_t smc_wait_to_send_packet(struct sk_buff *skb,
if ( !time_out ) {
/* oh well, wait until the chip finds memory later */
SMC_ENABLE_INT( IM_ALLOC_INT );
- PRINTK2((CARDNAME": memory allocation deferred. \n"));
+ PRINTK2((CARDNAME": memory allocation deferred.\n"));
/* it's deferred, but I'll handle it later */
- return NETDEV_TX_OK;
+ return NETDEV_TX_OK;
}
/* or YES! I can send the packet now.. */
smc_hardware_send_packet(dev);
@@ -610,7 +607,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
ioaddr = dev->base_addr;
if ( !skb ) {
- PRINTK((CARDNAME": In XMIT with no packet to send \n"));
+ PRINTK((CARDNAME": In XMIT with no packet to send\n"));
return;
}
length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
@@ -620,7 +617,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
packet_no = inb( ioaddr + PNR_ARR + 1 );
if ( packet_no & 0x80 ) {
/* or isn't there? BAD CHIP! */
- printk(KERN_DEBUG CARDNAME": Memory allocation failed. \n");
+ printk(KERN_DEBUG CARDNAME": Memory allocation failed.\n");
dev_kfree_skb_any(skb);
lp->saved_skb = NULL;
netif_wake_queue(dev);
@@ -685,7 +682,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
/* and let the chipset deal with it */
outw( MC_ENQUEUE , ioaddr + MMU_CMD );
- PRINTK2((CARDNAME": Sent packet of length %d \n",length));
+ PRINTK2((CARDNAME": Sent packet of length %d\n", length));
lp->saved_skb = NULL;
dev_kfree_skb_any (skb);
@@ -694,8 +691,6 @@ static void smc_hardware_send_packet( struct net_device * dev )
/* we can send another packet */
netif_wake_queue(dev);
-
- return;
}
/*-------------------------------------------------------------------------
@@ -937,7 +932,7 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
if ( !chip_ids[ ( revision_register >> 4 ) & 0xF ] ) {
/* I don't recognize this chip, so... */
printk(CARDNAME ": IO %x: Unrecognized revision register:"
- " %x, Contact author. \n", ioaddr, revision_register );
+ " %x, Contact author.\n", ioaddr, revision_register);
retval = -ENODEV;
goto err_out;
@@ -1045,9 +1040,6 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
*/
printk("ADDR: %pM\n", dev->dev_addr);
- /* set the private data to zero by default */
- memset(netdev_priv(dev), 0, sizeof(struct smc_local));
-
/* Grab the IRQ */
retval = request_irq(dev->irq, smc_interrupt, 0, DRV_NAME, dev);
if (retval) {
@@ -1074,7 +1066,7 @@ static void print_packet( byte * buf, int length )
int remainder;
int lines;
- printk("Packet of length %d \n", length );
+ printk("Packet of length %d\n", length);
lines = length / 16;
remainder = length % 16;
@@ -1170,7 +1162,7 @@ static void smc_timeout(struct net_device *dev)
/* "kick" the adaptor */
smc_reset( dev->base_addr );
smc_enable( dev->base_addr );
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
/* clear anything saved */
((struct smc_local *)netdev_priv(dev))->saved_skb = NULL;
netif_wake_queue(dev);
@@ -1201,7 +1193,7 @@ static void smc_rcv(struct net_device *dev)
if ( packet_number & FP_RXEMPTY ) {
/* we got called , but nothing was on the FIFO */
- PRINTK((CARDNAME ": WARNING: smc_rcv with nothing on FIFO. \n"));
+ PRINTK((CARDNAME ": WARNING: smc_rcv with nothing on FIFO.\n"));
/* don't need to restore anything */
return;
}
@@ -1257,14 +1249,14 @@ static void smc_rcv(struct net_device *dev)
to send the DWORDs or the bytes first, or some
mixture. A mixture might improve already slow PIO
performance */
- PRINTK3((" Reading %d dwords (and %d bytes) \n",
+ PRINTK3((" Reading %d dwords (and %d bytes)\n",
packet_length >> 2, packet_length & 3 ));
insl(ioaddr + DATA_1 , data, packet_length >> 2 );
/* read the left over bytes */
insb( ioaddr + DATA_1, data + (packet_length & 0xFFFFFC),
packet_length & 0x3 );
#else
- PRINTK3((" Reading %d words and %d byte(s) \n",
+ PRINTK3((" Reading %d words and %d byte(s)\n",
(packet_length >> 1 ), packet_length & 1 ));
insw(ioaddr + DATA_1 , data, packet_length >> 1);
if ( packet_length & 1 ) {
@@ -1333,7 +1325,7 @@ static void smc_tx( struct net_device * dev )
outw( PTR_AUTOINC | PTR_READ, ioaddr + POINTER );
tx_status = inw( ioaddr + DATA_1 );
- PRINTK3((CARDNAME": TX DONE STATUS: %4x \n", tx_status ));
+ PRINTK3((CARDNAME": TX DONE STATUS: %4x\n", tx_status));
dev->stats.tx_errors++;
if ( tx_status & TS_LOSTCAR ) dev->stats.tx_carrier_errors++;
@@ -1347,7 +1339,7 @@ static void smc_tx( struct net_device * dev )
#endif
if ( tx_status & TS_SUCCESS ) {
- printk(CARDNAME": Successful packet caused interrupt \n");
+ printk(CARDNAME": Successful packet caused interrupt\n");
}
/* re-enable transmit */
SMC_SELECT_BANK( 0 );
@@ -1361,7 +1353,6 @@ static void smc_tx( struct net_device * dev )
lp->packets_waiting--;
outb( saved_packet, ioaddr + PNR_ARR );
- return;
}
/*--------------------------------------------------------------------
@@ -1393,7 +1384,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
int handled = 0;
- PRINTK3((CARDNAME": SMC interrupt started \n"));
+ PRINTK3((CARDNAME": SMC interrupt started\n"));
saved_bank = inw( ioaddr + BANK_SELECT );
@@ -1408,7 +1399,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
/* set a timeout value, so I don't stay here forever */
timeout = 4;
- PRINTK2((KERN_WARNING CARDNAME ": MASK IS %x \n", mask ));
+ PRINTK2((KERN_WARNING CARDNAME ": MASK IS %x\n", mask));
do {
/* read the status flag, and mask it */
status = inb( ioaddr + INTERRUPT ) & mask;
@@ -1418,7 +1409,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
handled = 1;
PRINTK3((KERN_WARNING CARDNAME
- ": Handling interrupt status %x \n", status ));
+ ": Handling interrupt status %x\n", status));
if (status & IM_RCV_INT) {
/* Got a packet(s). */
@@ -1452,7 +1443,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
} else if (status & IM_ALLOC_INT ) {
PRINTK2((KERN_DEBUG CARDNAME
- ": Allocation interrupt \n"));
+ ": Allocation interrupt\n"));
/* clear this interrupt so it doesn't happen again */
mask &= ~IM_ALLOC_INT;
@@ -1470,9 +1461,9 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
dev->stats.rx_fifo_errors++;
outb( IM_RX_OVRN_INT, ioaddr + INTERRUPT );
} else if (status & IM_EPH_INT ) {
- PRINTK((CARDNAME ": UNSUPPORTED: EPH INTERRUPT \n"));
+ PRINTK((CARDNAME ": UNSUPPORTED: EPH INTERRUPT\n"));
} else if (status & IM_ERCV_INT ) {
- PRINTK((CARDNAME ": UNSUPPORTED: ERCV INTERRUPT \n"));
+ PRINTK((CARDNAME ": UNSUPPORTED: ERCV INTERRUPT\n"));
outb( IM_ERCV_INT, ioaddr + INTERRUPT );
}
} while ( timeout -- );
@@ -1482,7 +1473,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
SMC_SELECT_BANK( 2 );
outb( mask, ioaddr + INT_MASK );
- PRINTK3(( KERN_WARNING CARDNAME ": MASK is now %x \n", mask ));
+ PRINTK3((KERN_WARNING CARDNAME ": MASK is now %x\n", mask));
outw( saved_pointer, ioaddr + POINTER );
SMC_SELECT_BANK( saved_bank );
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 860339d51d5..10cf0cbc218 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -1285,7 +1285,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
smc_phy_interrupt(dev);
} else if (status & IM_ERCV_INT) {
SMC_ACK_INT(lp, IM_ERCV_INT);
- PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT \n", dev->name);
+ PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT\n", dev->name);
}
} while (--timeout);
@@ -1360,7 +1360,7 @@ static void smc_timeout(struct net_device *dev)
schedule_work(&lp->phy_configure);
/* We can accept TX packets again */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -1412,7 +1412,7 @@ static void smc_set_multicast_list(struct net_device *dev)
* within that register.
*/
else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *cur_addr;
+ struct netdev_hw_addr *ha;
/* table for flipping the order of 3 bits */
static const unsigned char invert3[] = {0, 4, 2, 6, 1, 5, 3, 7};
@@ -1420,16 +1420,16 @@ static void smc_set_multicast_list(struct net_device *dev)
/* start with a table of all zeros: reject all */
memset(multicast_table, 0, sizeof(multicast_table));
- netdev_for_each_mc_addr(cur_addr, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
int position;
/* make sure this is a multicast address -
shouldn't this be a given if we have it here ? */
- if (!(*cur_addr->dmi_addr & 1))
+ if (!(*ha->addr & 1))
continue;
/* only use the low order bits */
- position = crc32_le(~0, cur_addr->dmi_addr, 6) & 0x3f;
+ position = crc32_le(~0, ha->addr, 6) & 0x3f;
/* do some messy swapping to put the bit in the right spot */
multicast_table[invert3[position&7]] |=
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index cbf520d38ea..cc559741b0f 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -736,7 +736,7 @@ static void smsc911x_phy_adjust_link(struct net_device *dev)
SMSC_TRACE(HW, "configuring for carrier OK");
if ((pdata->gpio_orig_setting & GPIO_CFG_LED1_EN_) &&
(!pdata->using_extphy)) {
- /* Restore orginal GPIO configuration */
+ /* Restore original GPIO configuration */
pdata->gpio_setting = pdata->gpio_orig_setting;
smsc911x_reg_write(pdata, GPIO_CFG,
pdata->gpio_setting);
@@ -750,7 +750,7 @@ static void smsc911x_phy_adjust_link(struct net_device *dev)
if ((pdata->gpio_setting & GPIO_CFG_LED1_EN_) &&
(!pdata->using_extphy)) {
/* Force 10/100 LED off, after saving
- * orginal GPIO configuration */
+ * original GPIO configuration */
pdata->gpio_orig_setting = pdata->gpio_setting;
pdata->gpio_setting &= ~GPIO_CFG_LED1_EN_;
@@ -1335,7 +1335,6 @@ static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
smsc911x_tx_writefifo(pdata, (unsigned int *)bufp, wrsz);
freespace -= (skb->len + 32);
dev_kfree_skb(skb);
- dev->trans_start = jiffies;
if (unlikely(smsc911x_tx_get_txstatcount(pdata) >= 30))
smsc911x_tx_update_txcounters(dev);
@@ -1382,13 +1381,13 @@ static void smsc911x_set_multicast_list(struct net_device *dev)
/* Enabling specific multicast addresses */
unsigned int hash_high = 0;
unsigned int hash_low = 0;
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
pdata->set_bits_mask = MAC_CR_HPFILT_;
pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_MCPAS_);
- netdev_for_each_mc_addr(mc_list, dev) {
- unsigned int bitnum = smsc911x_hash(mc_list->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ unsigned int bitnum = smsc911x_hash(ha->addr);
unsigned int mask = 0x01 << (bitnum & 0x1F);
if (bitnum & 0x20)
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index aafaebf4574..6cdee6a15f9 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -1034,8 +1034,6 @@ static netdev_tx_t smsc9420_hard_start_xmit(struct sk_buff *skb,
smsc9420_reg_write(pd, TX_POLL_DEMAND, 1);
smsc9420_pci_flush_write(pd);
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -1064,12 +1062,12 @@ static void smsc9420_set_multicast_list(struct net_device *dev)
mac_cr |= MAC_CR_MCPAS_;
mac_cr &= (~MAC_CR_HPFILT_);
} else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
u32 hash_lo = 0, hash_hi = 0;
smsc_dbg(HW, "Multicast filter enabled");
- netdev_for_each_mc_addr(mc_list, dev) {
- u32 bit_num = smsc9420_hash(mc_list->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ u32 bit_num = smsc9420_hash(ha->addr);
u32 mask = 1 << (bit_num & 0x1F);
if (bit_num & 0x20)
diff --git a/drivers/net/sonic.c b/drivers/net/sonic.c
index 287c251075e..26e25d7f582 100644
--- a/drivers/net/sonic.c
+++ b/drivers/net/sonic.c
@@ -174,7 +174,7 @@ static void sonic_tx_timeout(struct net_device *dev)
/* Try to restart the adaptor. */
sonic_init(dev);
lp->stats.tx_errors++;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -263,8 +263,6 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -531,7 +529,7 @@ static void sonic_multicast_list(struct net_device *dev)
{
struct sonic_local *lp = netdev_priv(dev);
unsigned int rcr;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned char *addr;
int i;
@@ -550,8 +548,8 @@ static void sonic_multicast_list(struct net_device *dev)
netdev_mc_count(dev));
sonic_set_cam_enable(dev, 1); /* always enable our own address */
i = 1;
- netdev_for_each_mc_addr(dmi, dev) {
- addr = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addr = ha->addr;
sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]);
sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]);
sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]);
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index dd3cb0f2d21..1636a34d95d 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -625,7 +625,7 @@ spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
static void
spider_net_set_multi(struct net_device *netdev)
{
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
u8 hash;
int i;
u32 reg;
@@ -646,8 +646,8 @@ spider_net_set_multi(struct net_device *netdev)
hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
set_bit(0xfd, bitmask);
- netdev_for_each_mc_addr(mc, netdev) {
- hash = spider_net_get_multicast_hash(netdev, mc->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ hash = spider_net_get_multicast_hash(netdev, ha->addr);
set_bit(hash, bitmask);
}
@@ -2095,8 +2095,6 @@ static void spider_net_link_phy(unsigned long data)
card->netdev->name, phy->speed,
phy->duplex == 1 ? "Full" : "Half",
phy->autoneg == 1 ? "" : "no ");
-
- return;
}
/**
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 6dfa6989901..74b7ae76906 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1173,7 +1173,7 @@ static void tx_timeout(struct net_device *dev)
/* Trigger an immediate transmit demand. */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
np->stats.tx_errors++;
netif_wake_queue(dev);
}
@@ -1221,8 +1221,6 @@ static void init_ring(struct net_device *dev)
for (i = 0; i < TX_RING_SIZE; i++)
memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
-
- return;
}
@@ -1312,8 +1310,6 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
netif_stop_queue(dev);
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -1766,7 +1762,7 @@ static void set_rx_mode(struct net_device *dev)
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->base;
u32 rx_mode = MinVLANPrio;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int i;
#ifdef VLAN_SUPPORT
@@ -1804,8 +1800,8 @@ static void set_rx_mode(struct net_device *dev)
/* Use the 16 element perfect filter, skip first two entries. */
void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
__be16 *eaddrs;
- netdev_for_each_mc_addr(mclist, dev) {
- eaddrs = (__be16 *)mclist->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ eaddrs = (__be16 *) ha->addr;
writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
@@ -1825,10 +1821,10 @@ static void set_rx_mode(struct net_device *dev)
__le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
/* The chip uses the upper 9 CRC bits
as index into the hash table */
- int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23;
+ int bit_nr = ether_crc_le(ETH_ALEN, ha->addr) >> 23;
__le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1];
*fptr |= cpu_to_le32(1 << (bit_nr & 31));
diff --git a/drivers/net/stmmac/Makefile b/drivers/net/stmmac/Makefile
index c776af15fe1..9691733ddb8 100644
--- a/drivers/net/stmmac/Makefile
+++ b/drivers/net/stmmac/Makefile
@@ -2,4 +2,4 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o
stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
- dwmac100.o $(stmmac-y)
+ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o $(stmmac-y)
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h
index 2a58172e986..144f76fd3e3 100644
--- a/drivers/net/stmmac/common.h
+++ b/drivers/net/stmmac/common.h
@@ -22,8 +22,26 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#include "descs.h"
#include <linux/netdevice.h>
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define STMMAC_VLAN_TAG_USED
+#include <linux/if_vlan.h>
+#endif
+
+#include "descs.h"
+
+#undef CHIP_DEBUG_PRINT
+/* Turn-on extra printk debug for MAC core, dma and descriptors */
+/* #define CHIP_DEBUG_PRINT */
+
+#ifdef CHIP_DEBUG_PRINT
+#define CHIP_DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define CHIP_DBG(fmt, args...) do { } while (0)
+#endif
+
+#undef FRAME_FILTER_DEBUG
+/* #define FRAME_FILTER_DEBUG */
struct stmmac_extra_stats {
/* Transmit errors */
@@ -231,3 +249,4 @@ extern void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
unsigned int high, unsigned int low);
extern void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
unsigned int high, unsigned int low);
+extern void dwmac_dma_flush_tx_fifo(unsigned long ioaddr);
diff --git a/drivers/net/stmmac/dwmac100.c b/drivers/net/stmmac/dwmac100.c
deleted file mode 100644
index 4cacca614fc..00000000000
--- a/drivers/net/stmmac/dwmac100.c
+++ /dev/null
@@ -1,538 +0,0 @@
-/*******************************************************************************
- This is the driver for the MAC 10/100 on-chip Ethernet controller
- currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
-
- DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
- this code.
-
- Copyright (C) 2007-2009 STMicroelectronics Ltd
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
-*******************************************************************************/
-
-#include <linux/crc32.h>
-#include <linux/mii.h>
-#include <linux/phy.h>
-#include <linux/slab.h>
-
-#include "common.h"
-#include "dwmac100.h"
-#include "dwmac_dma.h"
-
-#undef DWMAC100_DEBUG
-/*#define DWMAC100_DEBUG*/
-#ifdef DWMAC100_DEBUG
-#define DBG(fmt, args...) printk(fmt, ## args)
-#else
-#define DBG(fmt, args...) do { } while (0)
-#endif
-
-static void dwmac100_core_init(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + MAC_CONTROL);
-
- writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL);
-
-#ifdef STMMAC_VLAN_TAG_USED
- writel(ETH_P_8021Q, ioaddr + MAC_VLAN1);
-#endif
- return;
-}
-
-static void dwmac100_dump_mac_regs(unsigned long ioaddr)
-{
- pr_info("\t----------------------------------------------\n"
- "\t DWMAC 100 CSR (base addr = 0x%8x)\n"
- "\t----------------------------------------------\n",
- (unsigned int)ioaddr);
- pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
- readl(ioaddr + MAC_CONTROL));
- pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
- readl(ioaddr + MAC_ADDR_HIGH));
- pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
- readl(ioaddr + MAC_ADDR_LOW));
- pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
- MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
- pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
- MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
- pr_info("\tflow control (offset 0x%x): 0x%08x\n",
- MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
- pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
- readl(ioaddr + MAC_VLAN1));
- pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
- readl(ioaddr + MAC_VLAN2));
- pr_info("\n\tMAC management counter registers\n");
- pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
- MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
- pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
- MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
- pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
- MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
- pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
- MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
- pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
- MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
- return;
-}
-
-static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
- u32 dma_rx)
-{
- u32 value = readl(ioaddr + DMA_BUS_MODE);
- /* DMA SW reset */
- value |= DMA_BUS_MODE_SFT_RESET;
- writel(value, ioaddr + DMA_BUS_MODE);
- do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
-
- /* Enable Application Access by writing to DMA CSR0 */
- writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
- ioaddr + DMA_BUS_MODE);
-
- /* Mask interrupts by writing to CSR7 */
- writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
-
- /* The base address of the RX/TX descriptor lists must be written into
- * DMA CSR3 and CSR4, respectively. */
- writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
- writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
-
- return 0;
-}
-
-/* Store and Forward capability is not used at all..
- * The transmit threshold can be programmed by
- * setting the TTC bits in the DMA control register.*/
-static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode,
- int rxmode)
-{
- u32 csr6 = readl(ioaddr + DMA_CONTROL);
-
- if (txmode <= 32)
- csr6 |= DMA_CONTROL_TTC_32;
- else if (txmode <= 64)
- csr6 |= DMA_CONTROL_TTC_64;
- else
- csr6 |= DMA_CONTROL_TTC_128;
-
- writel(csr6, ioaddr + DMA_CONTROL);
-
- return;
-}
-
-static void dwmac100_dump_dma_regs(unsigned long ioaddr)
-{
- int i;
-
- DBG(KERN_DEBUG "DWMAC 100 DMA CSR \n");
- for (i = 0; i < 9; i++)
- pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
- (DMA_BUS_MODE + i * 4),
- readl(ioaddr + DMA_BUS_MODE + i * 4));
- DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n",
- DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR));
- DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n",
- DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
- return;
-}
-
-/* DMA controller has two counters to track the number of
- * the receive missed frames. */
-static void dwmac100_dma_diagnostic_fr(void *data,
- struct stmmac_extra_stats *x,
- unsigned long ioaddr)
-{
- struct net_device_stats *stats = (struct net_device_stats *)data;
- u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
-
- if (unlikely(csr8)) {
- if (csr8 & DMA_MISSED_FRAME_OVE) {
- stats->rx_over_errors += 0x800;
- x->rx_overflow_cntr += 0x800;
- } else {
- unsigned int ove_cntr;
- ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17);
- stats->rx_over_errors += ove_cntr;
- x->rx_overflow_cntr += ove_cntr;
- }
-
- if (csr8 & DMA_MISSED_FRAME_OVE_M) {
- stats->rx_missed_errors += 0xffff;
- x->rx_missed_cntr += 0xffff;
- } else {
- unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR);
- stats->rx_missed_errors += miss_f;
- x->rx_missed_cntr += miss_f;
- }
- }
- return;
-}
-
-static int dwmac100_get_tx_frame_status(void *data,
- struct stmmac_extra_stats *x,
- struct dma_desc *p, unsigned long ioaddr)
-{
- int ret = 0;
- struct net_device_stats *stats = (struct net_device_stats *)data;
-
- if (unlikely(p->des01.tx.error_summary)) {
- if (unlikely(p->des01.tx.underflow_error)) {
- x->tx_underflow++;
- stats->tx_fifo_errors++;
- }
- if (unlikely(p->des01.tx.no_carrier)) {
- x->tx_carrier++;
- stats->tx_carrier_errors++;
- }
- if (unlikely(p->des01.tx.loss_carrier)) {
- x->tx_losscarrier++;
- stats->tx_carrier_errors++;
- }
- if (unlikely((p->des01.tx.excessive_deferral) ||
- (p->des01.tx.excessive_collisions) ||
- (p->des01.tx.late_collision)))
- stats->collisions += p->des01.tx.collision_count;
- ret = -1;
- }
- if (unlikely(p->des01.tx.heartbeat_fail)) {
- x->tx_heartbeat++;
- stats->tx_heartbeat_errors++;
- ret = -1;
- }
- if (unlikely(p->des01.tx.deferred))
- x->tx_deferred++;
-
- return ret;
-}
-
-static int dwmac100_get_tx_len(struct dma_desc *p)
-{
- return p->des01.tx.buffer1_size;
-}
-
-/* This function verifies if each incoming frame has some errors
- * and, if required, updates the multicast statistics.
- * In case of success, it returns csum_none becasue the device
- * is not able to compute the csum in HW. */
-static int dwmac100_get_rx_frame_status(void *data,
- struct stmmac_extra_stats *x,
- struct dma_desc *p)
-{
- int ret = csum_none;
- struct net_device_stats *stats = (struct net_device_stats *)data;
-
- if (unlikely(p->des01.rx.last_descriptor == 0)) {
- pr_warning("dwmac100 Error: Oversized Ethernet "
- "frame spanned multiple buffers\n");
- stats->rx_length_errors++;
- return discard_frame;
- }
-
- if (unlikely(p->des01.rx.error_summary)) {
- if (unlikely(p->des01.rx.descriptor_error))
- x->rx_desc++;
- if (unlikely(p->des01.rx.partial_frame_error))
- x->rx_partial++;
- if (unlikely(p->des01.rx.run_frame))
- x->rx_runt++;
- if (unlikely(p->des01.rx.frame_too_long))
- x->rx_toolong++;
- if (unlikely(p->des01.rx.collision)) {
- x->rx_collision++;
- stats->collisions++;
- }
- if (unlikely(p->des01.rx.crc_error)) {
- x->rx_crc++;
- stats->rx_crc_errors++;
- }
- ret = discard_frame;
- }
- if (unlikely(p->des01.rx.dribbling))
- ret = discard_frame;
-
- if (unlikely(p->des01.rx.length_error)) {
- x->rx_length++;
- ret = discard_frame;
- }
- if (unlikely(p->des01.rx.mii_error)) {
- x->rx_mii++;
- ret = discard_frame;
- }
- if (p->des01.rx.multicast_frame) {
- x->rx_multicast++;
- stats->multicast++;
- }
- return ret;
-}
-
-static void dwmac100_irq_status(unsigned long ioaddr)
-{
- return;
-}
-
-static void dwmac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
- unsigned int reg_n)
-{
- stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
-}
-
-static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
- unsigned int reg_n)
-{
- stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
-}
-
-static void dwmac100_set_filter(struct net_device *dev)
-{
- unsigned long ioaddr = dev->base_addr;
- u32 value = readl(ioaddr + MAC_CONTROL);
-
- if (dev->flags & IFF_PROMISC) {
- value |= MAC_CONTROL_PR;
- value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
- MAC_CONTROL_HP);
- } else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
- || (dev->flags & IFF_ALLMULTI)) {
- value |= MAC_CONTROL_PM;
- value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
- writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
- writel(0xffffffff, ioaddr + MAC_HASH_LOW);
- } else if (netdev_mc_empty(dev)) { /* no multicast */
- value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
- MAC_CONTROL_HO | MAC_CONTROL_HP);
- } else {
- u32 mc_filter[2];
- struct dev_mc_list *mclist;
-
- /* Perfect filter mode for physical address and Hash
- filter for multicast */
- value |= MAC_CONTROL_HP;
- value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR |
- MAC_CONTROL_IF | MAC_CONTROL_HO);
-
- memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- /* The upper 6 bits of the calculated CRC are used to
- * index the contens of the hash table */
- int bit_nr =
- ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
- /* The most significant bit determines the register to
- * use (H/L) while the other 5 bits determine the bit
- * within the register. */
- mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
- }
- writel(mc_filter[0], ioaddr + MAC_HASH_LOW);
- writel(mc_filter[1], ioaddr + MAC_HASH_HIGH);
- }
-
- writel(value, ioaddr + MAC_CONTROL);
-
- DBG(KERN_INFO "%s: CTRL reg: 0x%08x Hash regs: "
- "HI 0x%08x, LO 0x%08x\n",
- __func__, readl(ioaddr + MAC_CONTROL),
- readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
- return;
-}
-
-static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
- unsigned int fc, unsigned int pause_time)
-{
- unsigned int flow = MAC_FLOW_CTRL_ENABLE;
-
- if (duplex)
- flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT);
- writel(flow, ioaddr + MAC_FLOW_CTRL);
-
- return;
-}
-
-/* No PMT module supported for this Ethernet Controller.
- * Tested on ST platforms only.
- */
-static void dwmac100_pmt(unsigned long ioaddr, unsigned long mode)
-{
- return;
-}
-
-static void dwmac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
- int disable_rx_ic)
-{
- int i;
- for (i = 0; i < ring_size; i++) {
- p->des01.rx.own = 1;
- p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
- if (i == ring_size - 1)
- p->des01.rx.end_ring = 1;
- if (disable_rx_ic)
- p->des01.rx.disable_ic = 1;
- p++;
- }
- return;
-}
-
-static void dwmac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
-{
- int i;
- for (i = 0; i < ring_size; i++) {
- p->des01.tx.own = 0;
- if (i == ring_size - 1)
- p->des01.tx.end_ring = 1;
- p++;
- }
- return;
-}
-
-static int dwmac100_get_tx_owner(struct dma_desc *p)
-{
- return p->des01.tx.own;
-}
-
-static int dwmac100_get_rx_owner(struct dma_desc *p)
-{
- return p->des01.rx.own;
-}
-
-static void dwmac100_set_tx_owner(struct dma_desc *p)
-{
- p->des01.tx.own = 1;
-}
-
-static void dwmac100_set_rx_owner(struct dma_desc *p)
-{
- p->des01.rx.own = 1;
-}
-
-static int dwmac100_get_tx_ls(struct dma_desc *p)
-{
- return p->des01.tx.last_segment;
-}
-
-static void dwmac100_release_tx_desc(struct dma_desc *p)
-{
- int ter = p->des01.tx.end_ring;
-
- /* clean field used within the xmit */
- p->des01.tx.first_segment = 0;
- p->des01.tx.last_segment = 0;
- p->des01.tx.buffer1_size = 0;
-
- /* clean status reported */
- p->des01.tx.error_summary = 0;
- p->des01.tx.underflow_error = 0;
- p->des01.tx.no_carrier = 0;
- p->des01.tx.loss_carrier = 0;
- p->des01.tx.excessive_deferral = 0;
- p->des01.tx.excessive_collisions = 0;
- p->des01.tx.late_collision = 0;
- p->des01.tx.heartbeat_fail = 0;
- p->des01.tx.deferred = 0;
-
- /* set termination field */
- p->des01.tx.end_ring = ter;
-
- return;
-}
-
-static void dwmac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
- int csum_flag)
-{
- p->des01.tx.first_segment = is_fs;
- p->des01.tx.buffer1_size = len;
-}
-
-static void dwmac100_clear_tx_ic(struct dma_desc *p)
-{
- p->des01.tx.interrupt = 0;
-}
-
-static void dwmac100_close_tx_desc(struct dma_desc *p)
-{
- p->des01.tx.last_segment = 1;
- p->des01.tx.interrupt = 1;
-}
-
-static int dwmac100_get_rx_frame_len(struct dma_desc *p)
-{
- return p->des01.rx.frame_length;
-}
-
-struct stmmac_ops dwmac100_ops = {
- .core_init = dwmac100_core_init,
- .dump_regs = dwmac100_dump_mac_regs,
- .host_irq_status = dwmac100_irq_status,
- .set_filter = dwmac100_set_filter,
- .flow_ctrl = dwmac100_flow_ctrl,
- .pmt = dwmac100_pmt,
- .set_umac_addr = dwmac100_set_umac_addr,
- .get_umac_addr = dwmac100_get_umac_addr,
-};
-
-struct stmmac_dma_ops dwmac100_dma_ops = {
- .init = dwmac100_dma_init,
- .dump_regs = dwmac100_dump_dma_regs,
- .dma_mode = dwmac100_dma_operation_mode,
- .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
- .enable_dma_transmission = dwmac_enable_dma_transmission,
- .enable_dma_irq = dwmac_enable_dma_irq,
- .disable_dma_irq = dwmac_disable_dma_irq,
- .start_tx = dwmac_dma_start_tx,
- .stop_tx = dwmac_dma_stop_tx,
- .start_rx = dwmac_dma_start_rx,
- .stop_rx = dwmac_dma_stop_rx,
- .dma_interrupt = dwmac_dma_interrupt,
-};
-
-struct stmmac_desc_ops dwmac100_desc_ops = {
- .tx_status = dwmac100_get_tx_frame_status,
- .rx_status = dwmac100_get_rx_frame_status,
- .get_tx_len = dwmac100_get_tx_len,
- .init_rx_desc = dwmac100_init_rx_desc,
- .init_tx_desc = dwmac100_init_tx_desc,
- .get_tx_owner = dwmac100_get_tx_owner,
- .get_rx_owner = dwmac100_get_rx_owner,
- .release_tx_desc = dwmac100_release_tx_desc,
- .prepare_tx_desc = dwmac100_prepare_tx_desc,
- .clear_tx_ic = dwmac100_clear_tx_ic,
- .close_tx_desc = dwmac100_close_tx_desc,
- .get_tx_ls = dwmac100_get_tx_ls,
- .set_tx_owner = dwmac100_set_tx_owner,
- .set_rx_owner = dwmac100_set_rx_owner,
- .get_rx_frame_len = dwmac100_get_rx_frame_len,
-};
-
-struct mac_device_info *dwmac100_setup(unsigned long ioaddr)
-{
- struct mac_device_info *mac;
-
- mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
-
- pr_info("\tDWMAC100\n");
-
- mac->mac = &dwmac100_ops;
- mac->desc = &dwmac100_desc_ops;
- mac->dma = &dwmac100_dma_ops;
-
- mac->pmt = PMT_NOT_SUPPORTED;
- mac->link.port = MAC_CONTROL_PS;
- mac->link.duplex = MAC_CONTROL_F;
- mac->link.speed = 0;
- mac->mii.addr = MAC_MII_ADDR;
- mac->mii.data = MAC_MII_DATA;
-
- return mac;
-}
diff --git a/drivers/net/stmmac/dwmac100.h b/drivers/net/stmmac/dwmac100.h
index 0f8f110d004..97956cbf1cb 100644
--- a/drivers/net/stmmac/dwmac100.h
+++ b/drivers/net/stmmac/dwmac100.h
@@ -22,6 +22,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#include <linux/phy.h>
+#include "common.h"
+
/*----------------------------------------------------------------------------
* MAC BLOCK defines
*---------------------------------------------------------------------------*/
@@ -114,3 +117,5 @@ enum ttc_control {
#define DMA_MISSED_FRAME_OVE_CNTR 0x0ffe0000 /* Overflow Frame Counter */
#define DMA_MISSED_FRAME_OVE_M 0x00010000 /* Missed Frame Overflow */
#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */
+
+extern struct stmmac_dma_ops dwmac100_dma_ops;
diff --git a/drivers/net/stmmac/dwmac1000.h b/drivers/net/stmmac/dwmac1000.h
index 62dca0e384e..d8d0f355377 100644
--- a/drivers/net/stmmac/dwmac1000.h
+++ b/drivers/net/stmmac/dwmac1000.h
@@ -172,7 +172,6 @@ enum rfd {
deac_full_minus_4 = 0x00401800,
};
#define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */
-#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
enum ttc_control {
DMA_CONTROL_TTC_64 = 0x00000000,
@@ -206,15 +205,4 @@ enum rtc_control {
#define GMAC_MMC_TX_INTR 0x108
#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
-#undef DWMAC1000_DEBUG
-/* #define DWMAC1000__DEBUG */
-#undef FRAME_FILTER_DEBUG
-/* #define FRAME_FILTER_DEBUG */
-#ifdef DWMAC1000__DEBUG
-#define DBG(fmt, args...) printk(fmt, ## args)
-#else
-#define DBG(fmt, args...) do { } while (0)
-#endif
-
extern struct stmmac_dma_ops dwmac1000_dma_ops;
-extern struct stmmac_desc_ops dwmac1000_desc_ops;
diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c
index 5bd95ebfe49..917b4e16923 100644
--- a/drivers/net/stmmac/dwmac1000_core.c
+++ b/drivers/net/stmmac/dwmac1000_core.c
@@ -48,7 +48,6 @@ static void dwmac1000_core_init(unsigned long ioaddr)
/* Tag detection without filtering */
writel(0x0, ioaddr + GMAC_VLAN_TAG);
#endif
- return;
}
static void dwmac1000_dump_regs(unsigned long ioaddr)
@@ -61,7 +60,6 @@ static void dwmac1000_dump_regs(unsigned long ioaddr)
pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
offset, readl(ioaddr + offset));
}
- return;
}
static void dwmac1000_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
@@ -83,8 +81,8 @@ static void dwmac1000_set_filter(struct net_device *dev)
unsigned long ioaddr = dev->base_addr;
unsigned int value = 0;
- DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
- __func__, netdev_mc_count(dev), netdev_uc_count(dev));
+ CHIP_DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
+ __func__, netdev_mc_count(dev), netdev_uc_count(dev));
if (dev->flags & IFF_PROMISC)
value = GMAC_FRAME_FILTER_PR;
@@ -95,17 +93,17 @@ static void dwmac1000_set_filter(struct net_device *dev)
writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
} else if (!netdev_mc_empty(dev)) {
u32 mc_filter[2];
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
/* Hash filter for multicast */
value = GMAC_FRAME_FILTER_HMC;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
/* The upper 6 bits of the calculated CRC are used to
index the contens of the hash table */
int bit_nr =
- bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
+ bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
/* The most significant bit determines the register to
* use (H/L) while the other 5 bits determine the bit
* within the register. */
@@ -136,11 +134,9 @@ static void dwmac1000_set_filter(struct net_device *dev)
#endif
writel(value, ioaddr + GMAC_FRAME_FILTER);
- DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
+ CHIP_DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
"HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
-
- return;
}
static void dwmac1000_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
@@ -148,23 +144,22 @@ static void dwmac1000_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
{
unsigned int flow = 0;
- DBG(KERN_DEBUG "GMAC Flow-Control:\n");
+ CHIP_DBG(KERN_DEBUG "GMAC Flow-Control:\n");
if (fc & FLOW_RX) {
- DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
+ CHIP_DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
flow |= GMAC_FLOW_CTRL_RFE;
}
if (fc & FLOW_TX) {
- DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
+ CHIP_DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
flow |= GMAC_FLOW_CTRL_TFE;
}
if (duplex) {
- DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
+ CHIP_DBG(KERN_DEBUG "\tduplex mode: PAUSE %d\n", pause_time);
flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
}
writel(flow, ioaddr + GMAC_FLOW_CTRL);
- return;
}
static void dwmac1000_pmt(unsigned long ioaddr, unsigned long mode)
@@ -172,15 +167,14 @@ static void dwmac1000_pmt(unsigned long ioaddr, unsigned long mode)
unsigned int pmt = 0;
if (mode == WAKE_MAGIC) {
- DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
+ CHIP_DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
pmt |= power_down | magic_pkt_en;
} else if (mode == WAKE_UCAST) {
- DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
+ CHIP_DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
pmt |= global_unicast;
}
writel(pmt, ioaddr + GMAC_PMT);
- return;
}
@@ -190,22 +184,20 @@ static void dwmac1000_irq_status(unsigned long ioaddr)
/* Not used events (e.g. MMC interrupts) are not handled. */
if ((intr_status & mmc_tx_irq))
- DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
+ CHIP_DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
readl(ioaddr + GMAC_MMC_TX_INTR));
if (unlikely(intr_status & mmc_rx_irq))
- DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
+ CHIP_DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
readl(ioaddr + GMAC_MMC_RX_INTR));
if (unlikely(intr_status & mmc_rx_csum_offload_irq))
- DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
+ CHIP_DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
if (unlikely(intr_status & pmt_irq)) {
- DBG(KERN_DEBUG "GMAC: received Magic frame\n");
+ CHIP_DBG(KERN_DEBUG "GMAC: received Magic frame\n");
/* clear the PMT bits 5 and 6 by reading the PMT
* status register. */
readl(ioaddr + GMAC_PMT);
}
-
- return;
}
struct stmmac_ops dwmac1000_ops = {
@@ -230,7 +222,6 @@ struct mac_device_info *dwmac1000_setup(unsigned long ioaddr)
mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
mac->mac = &dwmac1000_ops;
- mac->desc = &dwmac1000_desc_ops;
mac->dma = &dwmac1000_dma_ops;
mac->pmt = PMT_SUPPORTED;
diff --git a/drivers/net/stmmac/dwmac1000_dma.c b/drivers/net/stmmac/dwmac1000_dma.c
index 39d436a2da6..415805057cb 100644
--- a/drivers/net/stmmac/dwmac1000_dma.c
+++ b/drivers/net/stmmac/dwmac1000_dma.c
@@ -3,7 +3,7 @@
DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
developing this code.
- This contains the functions to handle the dma and descriptors.
+ This contains the functions to handle the dma.
Copyright (C) 2007-2009 STMicroelectronics Ltd
@@ -58,29 +58,20 @@ static int dwmac1000_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
return 0;
}
-/* Transmit FIFO flush operation */
-static void dwmac1000_flush_tx_fifo(unsigned long ioaddr)
-{
- u32 csr6 = readl(ioaddr + DMA_CONTROL);
- writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
-
- do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
-}
-
static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
int rxmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
if (txmode == SF_DMA_MODE) {
- DBG(KERN_DEBUG "GMAC: enabling TX store and forward mode\n");
+ CHIP_DBG(KERN_DEBUG "GMAC: enable TX store and forward mode\n");
/* Transmit COE type 2 cannot be done in cut-through mode. */
csr6 |= DMA_CONTROL_TSF;
/* Operating on second frame increase the performance
* especially when transmit store-and-forward is used.*/
csr6 |= DMA_CONTROL_OSF;
} else {
- DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode"
+ CHIP_DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode"
" (threshold = %d)\n", txmode);
csr6 &= ~DMA_CONTROL_TSF;
csr6 &= DMA_CONTROL_TC_TX_MASK;
@@ -98,10 +89,10 @@ static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
}
if (rxmode == SF_DMA_MODE) {
- DBG(KERN_DEBUG "GMAC: enabling RX store and forward mode\n");
+ CHIP_DBG(KERN_DEBUG "GMAC: enable RX store and forward mode\n");
csr6 |= DMA_CONTROL_RSF;
} else {
- DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode"
+ CHIP_DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode"
" (threshold = %d)\n", rxmode);
csr6 &= ~DMA_CONTROL_RSF;
csr6 &= DMA_CONTROL_TC_RX_MASK;
@@ -116,7 +107,6 @@ static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
}
writel(csr6, ioaddr + DMA_CONTROL);
- return;
}
/* Not yet implemented --- no RMON module */
@@ -138,306 +128,6 @@ static void dwmac1000_dump_dma_regs(unsigned long ioaddr)
readl(ioaddr + DMA_BUS_MODE + offset));
}
}
- return;
-}
-
-static int dwmac1000_get_tx_frame_status(void *data,
- struct stmmac_extra_stats *x,
- struct dma_desc *p, unsigned long ioaddr)
-{
- int ret = 0;
- struct net_device_stats *stats = (struct net_device_stats *)data;
-
- if (unlikely(p->des01.etx.error_summary)) {
- DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
- if (unlikely(p->des01.etx.jabber_timeout)) {
- DBG(KERN_ERR "\tjabber_timeout error\n");
- x->tx_jabber++;
- }
-
- if (unlikely(p->des01.etx.frame_flushed)) {
- DBG(KERN_ERR "\tframe_flushed error\n");
- x->tx_frame_flushed++;
- dwmac1000_flush_tx_fifo(ioaddr);
- }
-
- if (unlikely(p->des01.etx.loss_carrier)) {
- DBG(KERN_ERR "\tloss_carrier error\n");
- x->tx_losscarrier++;
- stats->tx_carrier_errors++;
- }
- if (unlikely(p->des01.etx.no_carrier)) {
- DBG(KERN_ERR "\tno_carrier error\n");
- x->tx_carrier++;
- stats->tx_carrier_errors++;
- }
- if (unlikely(p->des01.etx.late_collision)) {
- DBG(KERN_ERR "\tlate_collision error\n");
- stats->collisions += p->des01.etx.collision_count;
- }
- if (unlikely(p->des01.etx.excessive_collisions)) {
- DBG(KERN_ERR "\texcessive_collisions\n");
- stats->collisions += p->des01.etx.collision_count;
- }
- if (unlikely(p->des01.etx.excessive_deferral)) {
- DBG(KERN_INFO "\texcessive tx_deferral\n");
- x->tx_deferred++;
- }
-
- if (unlikely(p->des01.etx.underflow_error)) {
- DBG(KERN_ERR "\tunderflow error\n");
- dwmac1000_flush_tx_fifo(ioaddr);
- x->tx_underflow++;
- }
-
- if (unlikely(p->des01.etx.ip_header_error)) {
- DBG(KERN_ERR "\tTX IP header csum error\n");
- x->tx_ip_header_error++;
- }
-
- if (unlikely(p->des01.etx.payload_error)) {
- DBG(KERN_ERR "\tAddr/Payload csum error\n");
- x->tx_payload_error++;
- dwmac1000_flush_tx_fifo(ioaddr);
- }
-
- ret = -1;
- }
-
- if (unlikely(p->des01.etx.deferred)) {
- DBG(KERN_INFO "GMAC TX status: tx deferred\n");
- x->tx_deferred++;
- }
-#ifdef STMMAC_VLAN_TAG_USED
- if (p->des01.etx.vlan_frame) {
- DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
- x->tx_vlan++;
- }
-#endif
-
- return ret;
-}
-
-static int dwmac1000_get_tx_len(struct dma_desc *p)
-{
- return p->des01.etx.buffer1_size;
-}
-
-static int dwmac1000_coe_rdes0(int ipc_err, int type, int payload_err)
-{
- int ret = good_frame;
- u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
-
- /* bits 5 7 0 | Frame status
- * ----------------------------------------------------------
- * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
- * 1 0 0 | IPv4/6 No CSUM errorS.
- * 1 0 1 | IPv4/6 CSUM PAYLOAD error
- * 1 1 0 | IPv4/6 CSUM IP HR error
- * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
- * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
- * 0 1 1 | COE bypassed.. no IPv4/6 frame
- * 0 1 0 | Reserved.
- */
- if (status == 0x0) {
- DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
- ret = good_frame;
- } else if (status == 0x4) {
- DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
- ret = good_frame;
- } else if (status == 0x5) {
- DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
- ret = csum_none;
- } else if (status == 0x6) {
- DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
- ret = csum_none;
- } else if (status == 0x7) {
- DBG(KERN_ERR
- "RX Des0 status: IPv4/6 Header and Payload Error.\n");
- ret = csum_none;
- } else if (status == 0x1) {
- DBG(KERN_ERR
- "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
- ret = discard_frame;
- } else if (status == 0x3) {
- DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
- ret = discard_frame;
- }
- return ret;
-}
-
-static int dwmac1000_get_rx_frame_status(void *data,
- struct stmmac_extra_stats *x, struct dma_desc *p)
-{
- int ret = good_frame;
- struct net_device_stats *stats = (struct net_device_stats *)data;
-
- if (unlikely(p->des01.erx.error_summary)) {
- DBG(KERN_ERR "GMAC RX Error Summary... 0x%08x\n", p->des01.erx);
- if (unlikely(p->des01.erx.descriptor_error)) {
- DBG(KERN_ERR "\tdescriptor error\n");
- x->rx_desc++;
- stats->rx_length_errors++;
- }
- if (unlikely(p->des01.erx.overflow_error)) {
- DBG(KERN_ERR "\toverflow error\n");
- x->rx_gmac_overflow++;
- }
-
- if (unlikely(p->des01.erx.ipc_csum_error))
- DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
-
- if (unlikely(p->des01.erx.late_collision)) {
- DBG(KERN_ERR "\tlate_collision error\n");
- stats->collisions++;
- stats->collisions++;
- }
- if (unlikely(p->des01.erx.receive_watchdog)) {
- DBG(KERN_ERR "\treceive_watchdog error\n");
- x->rx_watchdog++;
- }
- if (unlikely(p->des01.erx.error_gmii)) {
- DBG(KERN_ERR "\tReceive Error\n");
- x->rx_mii++;
- }
- if (unlikely(p->des01.erx.crc_error)) {
- DBG(KERN_ERR "\tCRC error\n");
- x->rx_crc++;
- stats->rx_crc_errors++;
- }
- ret = discard_frame;
- }
-
- /* After a payload csum error, the ES bit is set.
- * It doesn't match with the information reported into the databook.
- * At any rate, we need to understand if the CSUM hw computation is ok
- * and report this info to the upper layers. */
- ret = dwmac1000_coe_rdes0(p->des01.erx.ipc_csum_error,
- p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
-
- if (unlikely(p->des01.erx.dribbling)) {
- DBG(KERN_ERR "GMAC RX: dribbling error\n");
- ret = discard_frame;
- }
- if (unlikely(p->des01.erx.sa_filter_fail)) {
- DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
- x->sa_rx_filter_fail++;
- ret = discard_frame;
- }
- if (unlikely(p->des01.erx.da_filter_fail)) {
- DBG(KERN_ERR "GMAC RX : Destination Address filter fail\n");
- x->da_rx_filter_fail++;
- ret = discard_frame;
- }
- if (unlikely(p->des01.erx.length_error)) {
- DBG(KERN_ERR "GMAC RX: length_error error\n");
- x->rx_length++;
- ret = discard_frame;
- }
-#ifdef STMMAC_VLAN_TAG_USED
- if (p->des01.erx.vlan_tag) {
- DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
- x->rx_vlan++;
- }
-#endif
- return ret;
-}
-
-static void dwmac1000_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
- int disable_rx_ic)
-{
- int i;
- for (i = 0; i < ring_size; i++) {
- p->des01.erx.own = 1;
- p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
- /* To support jumbo frames */
- p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
- if (i == ring_size - 1)
- p->des01.erx.end_ring = 1;
- if (disable_rx_ic)
- p->des01.erx.disable_ic = 1;
- p++;
- }
- return;
-}
-
-static void dwmac1000_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
-{
- int i;
-
- for (i = 0; i < ring_size; i++) {
- p->des01.etx.own = 0;
- if (i == ring_size - 1)
- p->des01.etx.end_ring = 1;
- p++;
- }
-
- return;
-}
-
-static int dwmac1000_get_tx_owner(struct dma_desc *p)
-{
- return p->des01.etx.own;
-}
-
-static int dwmac1000_get_rx_owner(struct dma_desc *p)
-{
- return p->des01.erx.own;
-}
-
-static void dwmac1000_set_tx_owner(struct dma_desc *p)
-{
- p->des01.etx.own = 1;
-}
-
-static void dwmac1000_set_rx_owner(struct dma_desc *p)
-{
- p->des01.erx.own = 1;
-}
-
-static int dwmac1000_get_tx_ls(struct dma_desc *p)
-{
- return p->des01.etx.last_segment;
-}
-
-static void dwmac1000_release_tx_desc(struct dma_desc *p)
-{
- int ter = p->des01.etx.end_ring;
-
- memset(p, 0, sizeof(struct dma_desc));
- p->des01.etx.end_ring = ter;
-
- return;
-}
-
-static void dwmac1000_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
- int csum_flag)
-{
- p->des01.etx.first_segment = is_fs;
- if (unlikely(len > BUF_SIZE_4KiB)) {
- p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
- p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
- } else {
- p->des01.etx.buffer1_size = len;
- }
- if (likely(csum_flag))
- p->des01.etx.checksum_insertion = cic_full;
-}
-
-static void dwmac1000_clear_tx_ic(struct dma_desc *p)
-{
- p->des01.etx.interrupt = 0;
-}
-
-static void dwmac1000_close_tx_desc(struct dma_desc *p)
-{
- p->des01.etx.last_segment = 1;
- p->des01.etx.interrupt = 1;
-}
-
-static int dwmac1000_get_rx_frame_len(struct dma_desc *p)
-{
- return p->des01.erx.frame_length;
}
struct stmmac_dma_ops dwmac1000_dma_ops = {
@@ -454,21 +144,3 @@ struct stmmac_dma_ops dwmac1000_dma_ops = {
.stop_rx = dwmac_dma_stop_rx,
.dma_interrupt = dwmac_dma_interrupt,
};
-
-struct stmmac_desc_ops dwmac1000_desc_ops = {
- .tx_status = dwmac1000_get_tx_frame_status,
- .rx_status = dwmac1000_get_rx_frame_status,
- .get_tx_len = dwmac1000_get_tx_len,
- .init_rx_desc = dwmac1000_init_rx_desc,
- .init_tx_desc = dwmac1000_init_tx_desc,
- .get_tx_owner = dwmac1000_get_tx_owner,
- .get_rx_owner = dwmac1000_get_rx_owner,
- .release_tx_desc = dwmac1000_release_tx_desc,
- .prepare_tx_desc = dwmac1000_prepare_tx_desc,
- .clear_tx_ic = dwmac1000_clear_tx_ic,
- .close_tx_desc = dwmac1000_close_tx_desc,
- .get_tx_ls = dwmac1000_get_tx_ls,
- .set_tx_owner = dwmac1000_set_tx_owner,
- .set_rx_owner = dwmac1000_set_rx_owner,
- .get_rx_frame_len = dwmac1000_get_rx_frame_len,
-};
diff --git a/drivers/net/stmmac/dwmac100_core.c b/drivers/net/stmmac/dwmac100_core.c
new file mode 100644
index 00000000000..6f270a0e151
--- /dev/null
+++ b/drivers/net/stmmac/dwmac100_core.c
@@ -0,0 +1,196 @@
+/*******************************************************************************
+ This is the driver for the MAC 10/100 on-chip Ethernet controller
+ currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
+
+ DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
+ this code.
+
+ This only implements the mac core functions for this chip.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/crc32.h>
+#include "dwmac100.h"
+
+static void dwmac100_core_init(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + MAC_CONTROL);
+
+ writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL);
+
+#ifdef STMMAC_VLAN_TAG_USED
+ writel(ETH_P_8021Q, ioaddr + MAC_VLAN1);
+#endif
+}
+
+static void dwmac100_dump_mac_regs(unsigned long ioaddr)
+{
+ pr_info("\t----------------------------------------------\n"
+ "\t DWMAC 100 CSR (base addr = 0x%8x)\n"
+ "\t----------------------------------------------\n",
+ (unsigned int)ioaddr);
+ pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
+ readl(ioaddr + MAC_CONTROL));
+ pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
+ readl(ioaddr + MAC_ADDR_HIGH));
+ pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
+ readl(ioaddr + MAC_ADDR_LOW));
+ pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
+ MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
+ pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
+ MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
+ pr_info("\tflow control (offset 0x%x): 0x%08x\n",
+ MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
+ pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
+ readl(ioaddr + MAC_VLAN1));
+ pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
+ readl(ioaddr + MAC_VLAN2));
+ pr_info("\n\tMAC management counter registers\n");
+ pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
+ MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
+ pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
+ MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
+ pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
+ MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
+ pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
+ MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
+ pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
+ MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
+}
+
+static void dwmac100_irq_status(unsigned long ioaddr)
+{
+ return;
+}
+
+static void dwmac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
+}
+
+static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
+}
+
+static void dwmac100_set_filter(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ u32 value = readl(ioaddr + MAC_CONTROL);
+
+ if (dev->flags & IFF_PROMISC) {
+ value |= MAC_CONTROL_PR;
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
+ MAC_CONTROL_HP);
+ } else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
+ || (dev->flags & IFF_ALLMULTI)) {
+ value |= MAC_CONTROL_PM;
+ value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
+ writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
+ writel(0xffffffff, ioaddr + MAC_HASH_LOW);
+ } else if (netdev_mc_empty(dev)) { /* no multicast */
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
+ MAC_CONTROL_HO | MAC_CONTROL_HP);
+ } else {
+ u32 mc_filter[2];
+ struct netdev_hw_addr *ha;
+
+ /* Perfect filter mode for physical address and Hash
+ filter for multicast */
+ value |= MAC_CONTROL_HP;
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR |
+ MAC_CONTROL_IF | MAC_CONTROL_HO);
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ /* The upper 6 bits of the calculated CRC are used to
+ * index the contens of the hash table */
+ int bit_nr =
+ ether_crc(ETH_ALEN, ha->addr) >> 26;
+ /* The most significant bit determines the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register. */
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+ writel(mc_filter[0], ioaddr + MAC_HASH_LOW);
+ writel(mc_filter[1], ioaddr + MAC_HASH_HIGH);
+ }
+
+ writel(value, ioaddr + MAC_CONTROL);
+
+ CHIP_DBG(KERN_INFO "%s: CTRL reg: 0x%08x Hash regs: "
+ "HI 0x%08x, LO 0x%08x\n",
+ __func__, readl(ioaddr + MAC_CONTROL),
+ readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
+}
+
+static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time)
+{
+ unsigned int flow = MAC_FLOW_CTRL_ENABLE;
+
+ if (duplex)
+ flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT);
+ writel(flow, ioaddr + MAC_FLOW_CTRL);
+}
+
+/* No PMT module supported for this Ethernet Controller.
+ * Tested on ST platforms only.
+ */
+static void dwmac100_pmt(unsigned long ioaddr, unsigned long mode)
+{
+ return;
+}
+
+struct stmmac_ops dwmac100_ops = {
+ .core_init = dwmac100_core_init,
+ .dump_regs = dwmac100_dump_mac_regs,
+ .host_irq_status = dwmac100_irq_status,
+ .set_filter = dwmac100_set_filter,
+ .flow_ctrl = dwmac100_flow_ctrl,
+ .pmt = dwmac100_pmt,
+ .set_umac_addr = dwmac100_set_umac_addr,
+ .get_umac_addr = dwmac100_get_umac_addr,
+};
+
+struct mac_device_info *dwmac100_setup(unsigned long ioaddr)
+{
+ struct mac_device_info *mac;
+
+ mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+
+ pr_info("\tDWMAC100\n");
+
+ mac->mac = &dwmac100_ops;
+ mac->dma = &dwmac100_dma_ops;
+
+ mac->pmt = PMT_NOT_SUPPORTED;
+ mac->link.port = MAC_CONTROL_PS;
+ mac->link.duplex = MAC_CONTROL_F;
+ mac->link.speed = 0;
+ mac->mii.addr = MAC_MII_ADDR;
+ mac->mii.data = MAC_MII_DATA;
+
+ return mac;
+}
diff --git a/drivers/net/stmmac/dwmac100_dma.c b/drivers/net/stmmac/dwmac100_dma.c
new file mode 100644
index 00000000000..2fece7b7272
--- /dev/null
+++ b/drivers/net/stmmac/dwmac100_dma.c
@@ -0,0 +1,134 @@
+/*******************************************************************************
+ This is the driver for the MAC 10/100 on-chip Ethernet controller
+ currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
+
+ DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
+ this code.
+
+ This contains the functions to handle the dma.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include "dwmac100.h"
+#include "dwmac_dma.h"
+
+static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
+ u32 dma_rx)
+{
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
+ /* DMA SW reset */
+ value |= DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + DMA_BUS_MODE);
+ do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
+
+ /* Enable Application Access by writing to DMA CSR0 */
+ writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
+ ioaddr + DMA_BUS_MODE);
+
+ /* Mask interrupts by writing to CSR7 */
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+
+ /* The base address of the RX/TX descriptor lists must be written into
+ * DMA CSR3 and CSR4, respectively. */
+ writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
+ writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
+
+ return 0;
+}
+
+/* Store and Forward capability is not used at all..
+ * The transmit threshold can be programmed by
+ * setting the TTC bits in the DMA control register.*/
+static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode,
+ int rxmode)
+{
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
+
+ if (txmode <= 32)
+ csr6 |= DMA_CONTROL_TTC_32;
+ else if (txmode <= 64)
+ csr6 |= DMA_CONTROL_TTC_64;
+ else
+ csr6 |= DMA_CONTROL_TTC_128;
+
+ writel(csr6, ioaddr + DMA_CONTROL);
+}
+
+static void dwmac100_dump_dma_regs(unsigned long ioaddr)
+{
+ int i;
+
+ CHIP_DBG(KERN_DEBUG "DWMAC 100 DMA CSR\n");
+ for (i = 0; i < 9; i++)
+ pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
+ (DMA_BUS_MODE + i * 4),
+ readl(ioaddr + DMA_BUS_MODE + i * 4));
+ CHIP_DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n",
+ DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR));
+ CHIP_DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n",
+ DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
+}
+
+/* DMA controller has two counters to track the number of
+ * the receive missed frames. */
+static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
+ unsigned long ioaddr)
+{
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+ u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
+
+ if (unlikely(csr8)) {
+ if (csr8 & DMA_MISSED_FRAME_OVE) {
+ stats->rx_over_errors += 0x800;
+ x->rx_overflow_cntr += 0x800;
+ } else {
+ unsigned int ove_cntr;
+ ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17);
+ stats->rx_over_errors += ove_cntr;
+ x->rx_overflow_cntr += ove_cntr;
+ }
+
+ if (csr8 & DMA_MISSED_FRAME_OVE_M) {
+ stats->rx_missed_errors += 0xffff;
+ x->rx_missed_cntr += 0xffff;
+ } else {
+ unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR);
+ stats->rx_missed_errors += miss_f;
+ x->rx_missed_cntr += miss_f;
+ }
+ }
+}
+
+struct stmmac_dma_ops dwmac100_dma_ops = {
+ .init = dwmac100_dma_init,
+ .dump_regs = dwmac100_dump_dma_regs,
+ .dma_mode = dwmac100_dma_operation_mode,
+ .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
+ .enable_dma_transmission = dwmac_enable_dma_transmission,
+ .enable_dma_irq = dwmac_enable_dma_irq,
+ .disable_dma_irq = dwmac_disable_dma_irq,
+ .start_tx = dwmac_dma_start_tx,
+ .stop_tx = dwmac_dma_stop_tx,
+ .start_rx = dwmac_dma_start_rx,
+ .stop_rx = dwmac_dma_stop_rx,
+ .dma_interrupt = dwmac_dma_interrupt,
+};
diff --git a/drivers/net/stmmac/dwmac_dma.h b/drivers/net/stmmac/dwmac_dma.h
index de848d9f606..7b815a1b7b8 100644
--- a/drivers/net/stmmac/dwmac_dma.h
+++ b/drivers/net/stmmac/dwmac_dma.h
@@ -95,6 +95,7 @@
#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
+#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
extern void dwmac_enable_dma_transmission(unsigned long ioaddr);
extern void dwmac_enable_dma_irq(unsigned long ioaddr);
diff --git a/drivers/net/stmmac/dwmac_lib.c b/drivers/net/stmmac/dwmac_lib.c
index d4adb1eaa44..a85415216ef 100644
--- a/drivers/net/stmmac/dwmac_lib.c
+++ b/drivers/net/stmmac/dwmac_lib.c
@@ -52,7 +52,6 @@ void dwmac_dma_start_tx(unsigned long ioaddr)
u32 value = readl(ioaddr + DMA_CONTROL);
value |= DMA_CONTROL_ST;
writel(value, ioaddr + DMA_CONTROL);
- return;
}
void dwmac_dma_stop_tx(unsigned long ioaddr)
@@ -60,7 +59,6 @@ void dwmac_dma_stop_tx(unsigned long ioaddr)
u32 value = readl(ioaddr + DMA_CONTROL);
value &= ~DMA_CONTROL_ST;
writel(value, ioaddr + DMA_CONTROL);
- return;
}
void dwmac_dma_start_rx(unsigned long ioaddr)
@@ -68,8 +66,6 @@ void dwmac_dma_start_rx(unsigned long ioaddr)
u32 value = readl(ioaddr + DMA_CONTROL);
value |= DMA_CONTROL_SR;
writel(value, ioaddr + DMA_CONTROL);
-
- return;
}
void dwmac_dma_stop_rx(unsigned long ioaddr)
@@ -77,8 +73,6 @@ void dwmac_dma_stop_rx(unsigned long ioaddr)
u32 value = readl(ioaddr + DMA_CONTROL);
value &= ~DMA_CONTROL_SR;
writel(value, ioaddr + DMA_CONTROL);
-
- return;
}
#ifdef DWMAC_DMA_DEBUG
@@ -111,7 +105,6 @@ static void show_tx_process_state(unsigned int status)
default:
break;
}
- return;
}
static void show_rx_process_state(unsigned int status)
@@ -149,7 +142,6 @@ static void show_rx_process_state(unsigned int status)
default:
break;
}
- return;
}
#endif
@@ -227,6 +219,13 @@ int dwmac_dma_interrupt(unsigned long ioaddr,
return ret;
}
+void dwmac_dma_flush_tx_fifo(unsigned long ioaddr)
+{
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
+ writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
+
+ do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
+}
void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
unsigned int high, unsigned int low)
@@ -237,8 +236,6 @@ void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
writel(data, ioaddr + high);
data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
writel(data, ioaddr + low);
-
- return;
}
void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
@@ -257,7 +254,5 @@ void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
addr[3] = (lo_addr >> 24) & 0xff;
addr[4] = hi_addr & 0xff;
addr[5] = (hi_addr >> 8) & 0xff;
-
- return;
}
diff --git a/drivers/net/stmmac/enh_desc.c b/drivers/net/stmmac/enh_desc.c
new file mode 100644
index 00000000000..3c18ebece04
--- /dev/null
+++ b/drivers/net/stmmac/enh_desc.c
@@ -0,0 +1,337 @@
+/*******************************************************************************
+ This contains the functions to handle the enhanced descriptors.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include "common.h"
+
+static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, unsigned long ioaddr)
+{
+ int ret = 0;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.etx.error_summary)) {
+ CHIP_DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
+ if (unlikely(p->des01.etx.jabber_timeout)) {
+ CHIP_DBG(KERN_ERR "\tjabber_timeout error\n");
+ x->tx_jabber++;
+ }
+
+ if (unlikely(p->des01.etx.frame_flushed)) {
+ CHIP_DBG(KERN_ERR "\tframe_flushed error\n");
+ x->tx_frame_flushed++;
+ dwmac_dma_flush_tx_fifo(ioaddr);
+ }
+
+ if (unlikely(p->des01.etx.loss_carrier)) {
+ CHIP_DBG(KERN_ERR "\tloss_carrier error\n");
+ x->tx_losscarrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(p->des01.etx.no_carrier)) {
+ CHIP_DBG(KERN_ERR "\tno_carrier error\n");
+ x->tx_carrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(p->des01.etx.late_collision)) {
+ CHIP_DBG(KERN_ERR "\tlate_collision error\n");
+ stats->collisions += p->des01.etx.collision_count;
+ }
+ if (unlikely(p->des01.etx.excessive_collisions)) {
+ CHIP_DBG(KERN_ERR "\texcessive_collisions\n");
+ stats->collisions += p->des01.etx.collision_count;
+ }
+ if (unlikely(p->des01.etx.excessive_deferral)) {
+ CHIP_DBG(KERN_INFO "\texcessive tx_deferral\n");
+ x->tx_deferred++;
+ }
+
+ if (unlikely(p->des01.etx.underflow_error)) {
+ CHIP_DBG(KERN_ERR "\tunderflow error\n");
+ dwmac_dma_flush_tx_fifo(ioaddr);
+ x->tx_underflow++;
+ }
+
+ if (unlikely(p->des01.etx.ip_header_error)) {
+ CHIP_DBG(KERN_ERR "\tTX IP header csum error\n");
+ x->tx_ip_header_error++;
+ }
+
+ if (unlikely(p->des01.etx.payload_error)) {
+ CHIP_DBG(KERN_ERR "\tAddr/Payload csum error\n");
+ x->tx_payload_error++;
+ dwmac_dma_flush_tx_fifo(ioaddr);
+ }
+
+ ret = -1;
+ }
+
+ if (unlikely(p->des01.etx.deferred)) {
+ CHIP_DBG(KERN_INFO "GMAC TX status: tx deferred\n");
+ x->tx_deferred++;
+ }
+#ifdef STMMAC_VLAN_TAG_USED
+ if (p->des01.etx.vlan_frame) {
+ CHIP_DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
+ x->tx_vlan++;
+ }
+#endif
+
+ return ret;
+}
+
+static int enh_desc_get_tx_len(struct dma_desc *p)
+{
+ return p->des01.etx.buffer1_size;
+}
+
+static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
+{
+ int ret = good_frame;
+ u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
+
+ /* bits 5 7 0 | Frame status
+ * ----------------------------------------------------------
+ * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
+ * 1 0 0 | IPv4/6 No CSUM errorS.
+ * 1 0 1 | IPv4/6 CSUM PAYLOAD error
+ * 1 1 0 | IPv4/6 CSUM IP HR error
+ * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
+ * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
+ * 0 1 1 | COE bypassed.. no IPv4/6 frame
+ * 0 1 0 | Reserved.
+ */
+ if (status == 0x0) {
+ CHIP_DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
+ ret = good_frame;
+ } else if (status == 0x4) {
+ CHIP_DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
+ ret = good_frame;
+ } else if (status == 0x5) {
+ CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
+ ret = csum_none;
+ } else if (status == 0x6) {
+ CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
+ ret = csum_none;
+ } else if (status == 0x7) {
+ CHIP_DBG(KERN_ERR
+ "RX Des0 status: IPv4/6 Header and Payload Error.\n");
+ ret = csum_none;
+ } else if (status == 0x1) {
+ CHIP_DBG(KERN_ERR
+ "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
+ ret = discard_frame;
+ } else if (status == 0x3) {
+ CHIP_DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
+ ret = discard_frame;
+ }
+ return ret;
+}
+
+static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p)
+{
+ int ret = good_frame;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.erx.error_summary)) {
+ CHIP_DBG(KERN_ERR "GMAC RX Error Summary 0x%08x\n",
+ p->des01.erx);
+ if (unlikely(p->des01.erx.descriptor_error)) {
+ CHIP_DBG(KERN_ERR "\tdescriptor error\n");
+ x->rx_desc++;
+ stats->rx_length_errors++;
+ }
+ if (unlikely(p->des01.erx.overflow_error)) {
+ CHIP_DBG(KERN_ERR "\toverflow error\n");
+ x->rx_gmac_overflow++;
+ }
+
+ if (unlikely(p->des01.erx.ipc_csum_error))
+ CHIP_DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
+
+ if (unlikely(p->des01.erx.late_collision)) {
+ CHIP_DBG(KERN_ERR "\tlate_collision error\n");
+ stats->collisions++;
+ stats->collisions++;
+ }
+ if (unlikely(p->des01.erx.receive_watchdog)) {
+ CHIP_DBG(KERN_ERR "\treceive_watchdog error\n");
+ x->rx_watchdog++;
+ }
+ if (unlikely(p->des01.erx.error_gmii)) {
+ CHIP_DBG(KERN_ERR "\tReceive Error\n");
+ x->rx_mii++;
+ }
+ if (unlikely(p->des01.erx.crc_error)) {
+ CHIP_DBG(KERN_ERR "\tCRC error\n");
+ x->rx_crc++;
+ stats->rx_crc_errors++;
+ }
+ ret = discard_frame;
+ }
+
+ /* After a payload csum error, the ES bit is set.
+ * It doesn't match with the information reported into the databook.
+ * At any rate, we need to understand if the CSUM hw computation is ok
+ * and report this info to the upper layers. */
+ ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error,
+ p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
+
+ if (unlikely(p->des01.erx.dribbling)) {
+ CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n");
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.erx.sa_filter_fail)) {
+ CHIP_DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
+ x->sa_rx_filter_fail++;
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.erx.da_filter_fail)) {
+ CHIP_DBG(KERN_ERR "GMAC RX : Dest Address filter fail\n");
+ x->da_rx_filter_fail++;
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.erx.length_error)) {
+ CHIP_DBG(KERN_ERR "GMAC RX: length_error error\n");
+ x->rx_length++;
+ ret = discard_frame;
+ }
+#ifdef STMMAC_VLAN_TAG_USED
+ if (p->des01.erx.vlan_tag) {
+ CHIP_DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
+ x->rx_vlan++;
+ }
+#endif
+ return ret;
+}
+
+static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
+ int disable_rx_ic)
+{
+ int i;
+ for (i = 0; i < ring_size; i++) {
+ p->des01.erx.own = 1;
+ p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
+ /* To support jumbo frames */
+ p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
+ if (i == ring_size - 1)
+ p->des01.erx.end_ring = 1;
+ if (disable_rx_ic)
+ p->des01.erx.disable_ic = 1;
+ p++;
+ }
+}
+
+static void enh_desc_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+{
+ int i;
+
+ for (i = 0; i < ring_size; i++) {
+ p->des01.etx.own = 0;
+ if (i == ring_size - 1)
+ p->des01.etx.end_ring = 1;
+ p++;
+ }
+}
+
+static int enh_desc_get_tx_owner(struct dma_desc *p)
+{
+ return p->des01.etx.own;
+}
+
+static int enh_desc_get_rx_owner(struct dma_desc *p)
+{
+ return p->des01.erx.own;
+}
+
+static void enh_desc_set_tx_owner(struct dma_desc *p)
+{
+ p->des01.etx.own = 1;
+}
+
+static void enh_desc_set_rx_owner(struct dma_desc *p)
+{
+ p->des01.erx.own = 1;
+}
+
+static int enh_desc_get_tx_ls(struct dma_desc *p)
+{
+ return p->des01.etx.last_segment;
+}
+
+static void enh_desc_release_tx_desc(struct dma_desc *p)
+{
+ int ter = p->des01.etx.end_ring;
+
+ memset(p, 0, sizeof(struct dma_desc));
+ p->des01.etx.end_ring = ter;
+}
+
+static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+ int csum_flag)
+{
+ p->des01.etx.first_segment = is_fs;
+ if (unlikely(len > BUF_SIZE_4KiB)) {
+ p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
+ p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
+ } else {
+ p->des01.etx.buffer1_size = len;
+ }
+ if (likely(csum_flag))
+ p->des01.etx.checksum_insertion = cic_full;
+}
+
+static void enh_desc_clear_tx_ic(struct dma_desc *p)
+{
+ p->des01.etx.interrupt = 0;
+}
+
+static void enh_desc_close_tx_desc(struct dma_desc *p)
+{
+ p->des01.etx.last_segment = 1;
+ p->des01.etx.interrupt = 1;
+}
+
+static int enh_desc_get_rx_frame_len(struct dma_desc *p)
+{
+ return p->des01.erx.frame_length;
+}
+
+struct stmmac_desc_ops enh_desc_ops = {
+ .tx_status = enh_desc_get_tx_status,
+ .rx_status = enh_desc_get_rx_status,
+ .get_tx_len = enh_desc_get_tx_len,
+ .init_rx_desc = enh_desc_init_rx_desc,
+ .init_tx_desc = enh_desc_init_tx_desc,
+ .get_tx_owner = enh_desc_get_tx_owner,
+ .get_rx_owner = enh_desc_get_rx_owner,
+ .release_tx_desc = enh_desc_release_tx_desc,
+ .prepare_tx_desc = enh_desc_prepare_tx_desc,
+ .clear_tx_ic = enh_desc_clear_tx_ic,
+ .close_tx_desc = enh_desc_close_tx_desc,
+ .get_tx_ls = enh_desc_get_tx_ls,
+ .set_tx_owner = enh_desc_set_tx_owner,
+ .set_rx_owner = enh_desc_set_rx_owner,
+ .get_rx_frame_len = enh_desc_get_rx_frame_len,
+};
diff --git a/drivers/net/stmmac/norm_desc.c b/drivers/net/stmmac/norm_desc.c
new file mode 100644
index 00000000000..31ad5364379
--- /dev/null
+++ b/drivers/net/stmmac/norm_desc.c
@@ -0,0 +1,236 @@
+/*******************************************************************************
+ This contains the functions to handle the normal descriptors.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include "common.h"
+
+static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, unsigned long ioaddr)
+{
+ int ret = 0;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.tx.error_summary)) {
+ if (unlikely(p->des01.tx.underflow_error)) {
+ x->tx_underflow++;
+ stats->tx_fifo_errors++;
+ }
+ if (unlikely(p->des01.tx.no_carrier)) {
+ x->tx_carrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(p->des01.tx.loss_carrier)) {
+ x->tx_losscarrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely((p->des01.tx.excessive_deferral) ||
+ (p->des01.tx.excessive_collisions) ||
+ (p->des01.tx.late_collision)))
+ stats->collisions += p->des01.tx.collision_count;
+ ret = -1;
+ }
+ if (unlikely(p->des01.tx.heartbeat_fail)) {
+ x->tx_heartbeat++;
+ stats->tx_heartbeat_errors++;
+ ret = -1;
+ }
+ if (unlikely(p->des01.tx.deferred))
+ x->tx_deferred++;
+
+ return ret;
+}
+
+static int ndesc_get_tx_len(struct dma_desc *p)
+{
+ return p->des01.tx.buffer1_size;
+}
+
+/* This function verifies if each incoming frame has some errors
+ * and, if required, updates the multicast statistics.
+ * In case of success, it returns csum_none becasue the device
+ * is not able to compute the csum in HW. */
+static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p)
+{
+ int ret = csum_none;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.rx.last_descriptor == 0)) {
+ pr_warning("ndesc Error: Oversized Ethernet "
+ "frame spanned multiple buffers\n");
+ stats->rx_length_errors++;
+ return discard_frame;
+ }
+
+ if (unlikely(p->des01.rx.error_summary)) {
+ if (unlikely(p->des01.rx.descriptor_error))
+ x->rx_desc++;
+ if (unlikely(p->des01.rx.partial_frame_error))
+ x->rx_partial++;
+ if (unlikely(p->des01.rx.run_frame))
+ x->rx_runt++;
+ if (unlikely(p->des01.rx.frame_too_long))
+ x->rx_toolong++;
+ if (unlikely(p->des01.rx.collision)) {
+ x->rx_collision++;
+ stats->collisions++;
+ }
+ if (unlikely(p->des01.rx.crc_error)) {
+ x->rx_crc++;
+ stats->rx_crc_errors++;
+ }
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.rx.dribbling))
+ ret = discard_frame;
+
+ if (unlikely(p->des01.rx.length_error)) {
+ x->rx_length++;
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.rx.mii_error)) {
+ x->rx_mii++;
+ ret = discard_frame;
+ }
+ if (p->des01.rx.multicast_frame) {
+ x->rx_multicast++;
+ stats->multicast++;
+ }
+ return ret;
+}
+
+static void ndesc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
+ int disable_rx_ic)
+{
+ int i;
+ for (i = 0; i < ring_size; i++) {
+ p->des01.rx.own = 1;
+ p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
+ if (i == ring_size - 1)
+ p->des01.rx.end_ring = 1;
+ if (disable_rx_ic)
+ p->des01.rx.disable_ic = 1;
+ p++;
+ }
+}
+
+static void ndesc_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+{
+ int i;
+ for (i = 0; i < ring_size; i++) {
+ p->des01.tx.own = 0;
+ if (i == ring_size - 1)
+ p->des01.tx.end_ring = 1;
+ p++;
+ }
+}
+
+static int ndesc_get_tx_owner(struct dma_desc *p)
+{
+ return p->des01.tx.own;
+}
+
+static int ndesc_get_rx_owner(struct dma_desc *p)
+{
+ return p->des01.rx.own;
+}
+
+static void ndesc_set_tx_owner(struct dma_desc *p)
+{
+ p->des01.tx.own = 1;
+}
+
+static void ndesc_set_rx_owner(struct dma_desc *p)
+{
+ p->des01.rx.own = 1;
+}
+
+static int ndesc_get_tx_ls(struct dma_desc *p)
+{
+ return p->des01.tx.last_segment;
+}
+
+static void ndesc_release_tx_desc(struct dma_desc *p)
+{
+ int ter = p->des01.tx.end_ring;
+
+ /* clean field used within the xmit */
+ p->des01.tx.first_segment = 0;
+ p->des01.tx.last_segment = 0;
+ p->des01.tx.buffer1_size = 0;
+
+ /* clean status reported */
+ p->des01.tx.error_summary = 0;
+ p->des01.tx.underflow_error = 0;
+ p->des01.tx.no_carrier = 0;
+ p->des01.tx.loss_carrier = 0;
+ p->des01.tx.excessive_deferral = 0;
+ p->des01.tx.excessive_collisions = 0;
+ p->des01.tx.late_collision = 0;
+ p->des01.tx.heartbeat_fail = 0;
+ p->des01.tx.deferred = 0;
+
+ /* set termination field */
+ p->des01.tx.end_ring = ter;
+}
+
+static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+ int csum_flag)
+{
+ p->des01.tx.first_segment = is_fs;
+ p->des01.tx.buffer1_size = len;
+}
+
+static void ndesc_clear_tx_ic(struct dma_desc *p)
+{
+ p->des01.tx.interrupt = 0;
+}
+
+static void ndesc_close_tx_desc(struct dma_desc *p)
+{
+ p->des01.tx.last_segment = 1;
+ p->des01.tx.interrupt = 1;
+}
+
+static int ndesc_get_rx_frame_len(struct dma_desc *p)
+{
+ return p->des01.rx.frame_length;
+}
+
+struct stmmac_desc_ops ndesc_ops = {
+ .tx_status = ndesc_get_tx_status,
+ .rx_status = ndesc_get_rx_status,
+ .get_tx_len = ndesc_get_tx_len,
+ .init_rx_desc = ndesc_init_rx_desc,
+ .init_tx_desc = ndesc_init_tx_desc,
+ .get_tx_owner = ndesc_get_tx_owner,
+ .get_rx_owner = ndesc_get_rx_owner,
+ .release_tx_desc = ndesc_release_tx_desc,
+ .prepare_tx_desc = ndesc_prepare_tx_desc,
+ .clear_tx_ic = ndesc_clear_tx_ic,
+ .close_tx_desc = ndesc_close_tx_desc,
+ .get_tx_ls = ndesc_get_tx_ls,
+ .set_tx_owner = ndesc_set_tx_owner,
+ .set_rx_owner = ndesc_set_rx_owner,
+ .get_rx_frame_len = ndesc_get_rx_frame_len,
+};
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index ba35e6943cf..ebebc644b1b 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -20,14 +20,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#define DRV_MODULE_VERSION "Jan_2010"
+#define DRV_MODULE_VERSION "Apr_2010"
#include <linux/stmmac.h>
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-#define STMMAC_VLAN_TAG_USED
-#include <linux/if_vlan.h>
-#endif
-
#include "common.h"
#ifdef CONFIG_STMMAC_TIMER
#include "stmmac_timer.h"
@@ -93,6 +88,7 @@ struct stmmac_priv {
#ifdef STMMAC_VLAN_TAG_USED
struct vlan_group *vlgrp;
#endif
+ int enh_desc;
};
#ifdef CONFIG_STM_DRIVERS
@@ -120,3 +116,5 @@ static inline int stmmac_claim_resource(struct platform_device *pdev)
extern int stmmac_mdio_unregister(struct net_device *ndev);
extern int stmmac_mdio_register(struct net_device *ndev);
extern void stmmac_set_ethtool_ops(struct net_device *netdev);
+extern struct stmmac_desc_ops enh_desc_ops;
+extern struct stmmac_desc_ops ndesc_ops;
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index c021eaa3ca6..f080509923f 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -102,7 +102,6 @@ void stmmac_ethtool_getdrvinfo(struct net_device *dev,
strcpy(info->version, DRV_MODULE_VERSION);
info->fw_version[0] = '\0';
info->n_stats = STMMAC_STATS_LEN;
- return;
}
int stmmac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -194,8 +193,6 @@ void stmmac_ethtool_gregs(struct net_device *dev,
reg_space[i + 55] =
readl(dev->base_addr + (DMA_BUS_MODE + (i * 4)));
}
-
- return;
}
int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data)
@@ -233,7 +230,6 @@ stmmac_get_pauseparam(struct net_device *netdev,
pause->tx_pause = 1;
spin_unlock(&priv->lock);
- return;
}
static int
@@ -292,8 +288,6 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
data[i] = (stmmac_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
}
-
- return;
}
static int stmmac_get_sset_count(struct net_device *netdev, int sset)
@@ -323,7 +317,6 @@ static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
WARN_ON(1);
break;
}
- return;
}
/* Currently only support WOL through Magic packet. */
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 4111a85ec80..a31d580f306 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -169,8 +169,6 @@ static void stmmac_verify_args(void)
flow_ctrl = FLOW_OFF;
if (unlikely((pause < 0) || (pause > 0xffff)))
pause = PAUSE_TIME;
-
- return;
}
#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG)
@@ -184,7 +182,6 @@ static void print_pkt(unsigned char *buf, int len)
pr_info(" %02x", buf[j]);
}
pr_info("\n");
- return;
}
#endif
@@ -514,7 +511,6 @@ static void init_dma_desc_rings(struct net_device *dev)
pr_info("TX descriptor ring:\n");
display_ring(priv->dma_tx, txsize);
}
- return;
}
static void dma_free_rx_skbufs(struct stmmac_priv *priv)
@@ -529,7 +525,6 @@ static void dma_free_rx_skbufs(struct stmmac_priv *priv)
}
priv->rx_skbuff[i] = NULL;
}
- return;
}
static void dma_free_tx_skbufs(struct stmmac_priv *priv)
@@ -547,7 +542,6 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
priv->tx_skbuff[i] = NULL;
}
}
- return;
}
static void free_dma_desc_resources(struct stmmac_priv *priv)
@@ -567,8 +561,6 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
kfree(priv->rx_skbuff_dma);
kfree(priv->rx_skbuff);
kfree(priv->tx_skbuff);
-
- return;
}
/**
@@ -598,8 +590,6 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
}
}
tx_coe = priv->tx_coe;
-
- return;
}
/**
@@ -675,7 +665,6 @@ static void stmmac_tx(struct stmmac_priv *priv)
}
netif_tx_unlock(priv->dev);
}
- return;
}
static inline void stmmac_enable_irq(struct stmmac_priv *priv)
@@ -731,8 +720,6 @@ void stmmac_schedule(struct net_device *dev)
priv->xstats.sched_timer_n++;
_stmmac_schedule(priv);
-
- return;
}
static void stmmac_no_timer_started(unsigned int x)
@@ -763,8 +750,6 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
priv->dev->stats.tx_errors++;
netif_wake_queue(priv->dev);
-
- return;
}
@@ -788,8 +773,6 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
stmmac_tx_err(priv);
} else if (unlikely(status == tx_hard_error))
stmmac_tx_err(priv);
-
- return;
}
/**
@@ -837,7 +820,7 @@ static int stmmac_open(struct net_device *dev)
#ifdef CONFIG_STMMAC_TIMER
priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
if (unlikely(priv->tm == NULL)) {
- pr_err("%s: ERROR: timer memory alloc failed \n", __func__);
+ pr_err("%s: ERROR: timer memory alloc failed\n", __func__);
return -ENOMEM;
}
priv->tm->freq = tmrate;
@@ -1197,7 +1180,6 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
}
priv->hw->desc->set_rx_owner(p + entry);
}
- return;
}
static int stmmac_rx(struct stmmac_priv *priv, int limit)
@@ -1280,7 +1262,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += frame_len;
- priv->dev->last_rx = jiffies;
}
entry = next_entry;
p = p_next; /* use prefetched values */
@@ -1332,7 +1313,6 @@ static void stmmac_tx_timeout(struct net_device *dev)
/* Clear Tx resources and restart transmitting again */
stmmac_tx_err(priv);
- return;
}
/* Configuration changes (passed on by ifconfig) */
@@ -1374,7 +1354,6 @@ static void stmmac_multicast_list(struct net_device *dev)
spin_lock(&priv->lock);
priv->hw->mac->set_filter(dev);
spin_unlock(&priv->lock);
- return;
}
/**
@@ -1490,8 +1469,6 @@ static void stmmac_vlan_rx_register(struct net_device *dev,
spin_lock(&priv->lock);
priv->vlgrp = grp;
spin_unlock(&priv->lock);
-
- return;
}
#endif
@@ -1587,6 +1564,12 @@ static int stmmac_mac_device_setup(struct net_device *dev)
else
device = dwmac100_setup(ioaddr);
+ if (priv->enh_desc) {
+ device->desc = &enh_desc_ops;
+ pr_info("\tEnhanced descriptor structure\n");
+ } else
+ device->desc = &ndesc_ops;
+
if (!device)
return -ENOMEM;
@@ -1727,6 +1710,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
priv->bus_id = plat_dat->bus_id;
priv->pbl = plat_dat->pbl; /* TLI */
priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
+ priv->enh_desc = plat_dat->enh_desc;
platform_set_drvdata(pdev, ndev);
diff --git a/drivers/net/stmmac/stmmac_timer.c b/drivers/net/stmmac/stmmac_timer.c
index 679f61ffb1f..2a0e1abde7e 100644
--- a/drivers/net/stmmac/stmmac_timer.c
+++ b/drivers/net/stmmac/stmmac_timer.c
@@ -31,8 +31,6 @@ static void stmmac_timer_handler(void *data)
struct net_device *dev = (struct net_device *)data;
stmmac_schedule(dev);
-
- return;
}
#define STMMAC_TIMER_MSG(timer, freq) \
@@ -47,13 +45,11 @@ static void stmmac_rtc_start(unsigned int new_freq)
{
rtc_irq_set_freq(stmmac_rtc, &stmmac_task, new_freq);
rtc_irq_set_state(stmmac_rtc, &stmmac_task, 1);
- return;
}
static void stmmac_rtc_stop(void)
{
rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
- return;
}
int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
@@ -102,13 +98,11 @@ static void stmmac_tmu_start(unsigned int new_freq)
{
clk_set_rate(timer_clock, new_freq);
clk_enable(timer_clock);
- return;
}
static void stmmac_tmu_stop(void)
{
clk_disable(timer_clock);
- return;
}
int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
diff --git a/drivers/net/stnic.c b/drivers/net/stnic.c
index 87a6b8eabc6..d85f0a84bc7 100644
--- a/drivers/net/stnic.c
+++ b/drivers/net/stnic.c
@@ -280,7 +280,6 @@ stnic_init (struct net_device *dev)
{
stnic_reset (dev);
NS8390_init (dev, 0);
- return;
}
static void __exit stnic_cleanup(void)
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index 8b28c89a9a7..15131234224 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -412,7 +412,7 @@ static int init586(struct net_device *dev)
volatile struct iasetup_cmd_struct *ias_cmd;
volatile struct tdr_cmd_struct *tdr_cmd;
volatile struct mcsetup_cmd_struct *mc_cmd;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int num_addrs=netdev_mc_count(dev);
ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct));
@@ -536,9 +536,9 @@ static int init586(struct net_device *dev)
mc_cmd->mc_cnt = swab16(num_addrs * 6);
i = 0;
- netdev_for_each_mc_addr(dmi, dev)
+ netdev_for_each_mc_addr(ha, dev)
memcpy((char *) mc_cmd->mc_list[i++],
- dmi->dmi_addr, ETH_ALEN);
+ ha->addr, ETH_ALEN);
p->scb->cbl_offset = make16(mc_cmd);
p->scb->cmd_cuc = CUC_START;
@@ -985,7 +985,7 @@ static void sun3_82586_timeout(struct net_device *dev)
p->scb->cmd_cuc = CUC_START;
sun3_attn586();
WAIT_4_SCB_CMD();
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
return 0;
}
#endif
@@ -998,7 +998,7 @@ static void sun3_82586_timeout(struct net_device *dev)
sun3_82586_close(dev);
sun3_82586_open(dev);
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
}
/******************************************************
@@ -1062,7 +1062,6 @@ static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
}
sun3_attn586();
- dev->trans_start = jiffies;
if(!i)
dev_kfree_skb(skb);
WAIT_4_SCB_CMD();
@@ -1082,7 +1081,6 @@ static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;
p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
- dev->trans_start = jiffies;
p->nop_point = next_nop;
dev_kfree_skb(skb);
# endif
@@ -1097,7 +1095,6 @@ static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
p->nop_cmds[next_nop]->cmd_status = 0;
p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count]));
- dev->trans_start = jiffies;
p->xmit_count = next_nop;
{
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index 1694ca5bfb4..358c22f9acb 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -523,8 +523,8 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
/* Transmitter timeout, serious problems. */
if (netif_queue_stopped(dev)) {
- int tickssofar = jiffies - dev->trans_start;
- if (tickssofar < 20)
+ int tickssofar = jiffies - dev_trans_start(dev);
+ if (tickssofar < HZ/5)
return NETDEV_TX_BUSY;
DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n",
@@ -559,7 +559,6 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
netif_start_queue(dev);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -637,8 +636,7 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
AREG = CSR0;
DPRINTK( 2, ( "%s: lance_start_xmit() exiting, csr0 %4.4x.\n",
dev->name, DREG ));
- dev->trans_start = jiffies;
- dev_kfree_skb( skb );
+ dev_kfree_skb(skb);
lp->lock = 0;
if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) ==
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index ed7865a0b5b..367e96f317d 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -362,7 +362,7 @@ static void bigmac_tcvr_write(struct bigmac *bp, void __iomem *tregs,
default:
printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n");
return;
- };
+ }
idle_transceiver(tregs);
write_tcvr_bit(bp, tregs, 0);
@@ -401,7 +401,7 @@ static unsigned short bigmac_tcvr_read(struct bigmac *bp,
default:
printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n");
return 0xffff;
- };
+ }
idle_transceiver(tregs);
write_tcvr_bit(bp, tregs, 0);
@@ -982,8 +982,6 @@ static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
sbus_writel(CREG_CTRL_TWAKEUP, bp->creg + CREG_CTRL);
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -999,7 +997,7 @@ static void bigmac_set_multicast(struct net_device *dev)
{
struct bigmac *bp = netdev_priv(dev);
void __iomem *bregs = bp->bregs;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
int i;
u32 tmp, crc;
@@ -1028,8 +1026,8 @@ static void bigmac_set_multicast(struct net_device *dev)
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if (!(*addrs & 1))
continue;
@@ -1133,8 +1131,8 @@ static int __devinit bigmac_ether_init(struct of_device *op,
goto fail_and_cleanup;
/* Get supported SBUS burst sizes. */
- bsizes = of_getintprop_default(qec_op->node, "burst-sizes", 0xff);
- bsizes_more = of_getintprop_default(qec_op->node, "burst-sizes", 0xff);
+ bsizes = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff);
+ bsizes_more = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff);
bsizes &= 0xff;
if (bsizes_more != 0xff)
@@ -1186,7 +1184,7 @@ static int __devinit bigmac_ether_init(struct of_device *op,
}
/* Get the board revision of this BigMAC. */
- bp->board_rev = of_getintprop_default(bp->bigmac_op->node,
+ bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node,
"board-version", 1);
/* Init auto-negotiation timer state. */
@@ -1292,8 +1290,11 @@ static const struct of_device_id bigmac_sbus_match[] = {
MODULE_DEVICE_TABLE(of, bigmac_sbus_match);
static struct of_platform_driver bigmac_sbus_driver = {
- .name = "sunbmac",
- .match_table = bigmac_sbus_match,
+ .driver = {
+ .name = "sunbmac",
+ .owner = THIS_MODULE,
+ .of_match_table = bigmac_sbus_match,
+ },
.probe = bigmac_sbus_probe,
.remove = __devexit_p(bigmac_sbus_remove),
};
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 8249a394a4e..2678588ea4b 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -788,7 +788,6 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
mdio_delay();
}
- return;
}
static int mdio_wait_link(struct net_device *dev, int wait)
@@ -972,7 +971,7 @@ static void tx_timeout(struct net_device *dev)
dev->if_port = 0;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
dev->stats.tx_errors++;
if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
netif_wake_queue(dev);
@@ -1022,7 +1021,6 @@ static void init_ring(struct net_device *dev)
np->tx_skbuff[i] = NULL;
np->tx_ring[i].status = 0;
}
- return;
}
static void tx_poll (unsigned long data)
@@ -1049,7 +1047,6 @@ static void tx_poll (unsigned long data)
if (ioread32 (np->base + TxListPtr) == 0)
iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
np->base + TxListPtr);
- return;
}
static netdev_tx_t
@@ -1084,7 +1081,6 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
} else {
netif_stop_queue (dev);
}
- dev->trans_start = jiffies;
if (netif_msg_tx_queued(np)) {
printk (KERN_DEBUG
"%s: Transmit frame #%d queued in slot %d.\n",
@@ -1379,7 +1375,6 @@ not_done:
if (np->budget <= 0)
np->budget = RX_BUDGET;
tasklet_schedule(&np->rx_tasklet);
- return;
}
static void refill_rx (struct net_device *dev)
@@ -1410,7 +1405,6 @@ static void refill_rx (struct net_device *dev)
np->rx_ring[entry].status = 0;
cnt++;
}
- return;
}
static void netdev_error(struct net_device *dev, int intr_status)
{
@@ -1522,13 +1516,13 @@ static void set_rx_mode(struct net_device *dev)
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
} else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int bit;
int index;
int crc;
memset (mc_filter, 0, sizeof (mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
if (crc & 0x80000000) index |= 1 << bit;
mc_filter[index/16] |= (1 << (index % 16));
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index e6880f1c4e8..434f9d73533 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -1136,7 +1136,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
writel(gp->tx_new, gp->regs + TXDMA_KICK);
spin_unlock_irqrestore(&gp->tx_lock, flags);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
return NETDEV_TX_OK;
}
@@ -1846,12 +1846,12 @@ static u32 gem_setup_multicast(struct gem *gp)
} else {
u16 hash_table[16];
u32 crc;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int i;
memset(hash_table, 0, sizeof(hash_table));
- netdev_for_each_mc_addr(dmi, gp->dev) {
- char *addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, gp->dev) {
+ char *addrs = ha->addr;
if (!(*addrs & 1))
continue;
@@ -2923,7 +2923,6 @@ static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
dev_addr[1] = 0x00;
dev_addr[2] = 0x20;
get_random_bytes(dev_addr + 3, 3);
- return;
}
#endif /* not Sparc and not PPC */
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index b17dbb11bd6..3d9650b8d38 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -855,7 +855,7 @@ static void happy_meal_timer(unsigned long data)
hp->timer_ticks = 0;
hp->timer_state = asleep; /* foo on you */
break;
- };
+ }
if (restart_timer) {
hp->happy_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */
@@ -1488,7 +1488,7 @@ static int happy_meal_init(struct happy_meal *hp)
HMD(("external, disable MII, "));
hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
break;
- };
+ }
if (happy_meal_tcvr_reset(hp, tregs))
return -EAGAIN;
@@ -1523,13 +1523,13 @@ static int happy_meal_init(struct happy_meal *hp)
hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
} else if ((hp->dev->flags & IFF_PROMISC) == 0) {
u16 hash_table[4];
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
memset(hash_table, 0, sizeof(hash_table));
- netdev_for_each_mc_addr(dmi, hp->dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, hp->dev) {
+ addrs = ha->addr;
if (!(*addrs & 1))
continue;
@@ -1734,7 +1734,7 @@ static void happy_meal_set_initial_advertisement(struct happy_meal *hp)
case external:
hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
break;
- };
+ }
if (happy_meal_tcvr_reset(hp, tregs))
return;
@@ -2341,8 +2341,6 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
spin_unlock_irq(&hp->happy_lock);
- dev->trans_start = jiffies;
-
tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
return NETDEV_TX_OK;
}
@@ -2362,7 +2360,7 @@ static void happy_meal_set_multicast(struct net_device *dev)
{
struct happy_meal *hp = netdev_priv(dev);
void __iomem *bregs = hp->bigmacregs;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
@@ -2380,8 +2378,8 @@ static void happy_meal_set_multicast(struct net_device *dev)
u16 hash_table[4];
memset(hash_table, 0, sizeof(hash_table));
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if (!(*addrs & 1))
continue;
@@ -2483,7 +2481,7 @@ static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
else {
const struct linux_prom_registers *regs;
struct of_device *op = hp->happy_dev;
- regs = of_get_property(op->node, "regs", NULL);
+ regs = of_get_property(op->dev.of_node, "regs", NULL);
if (regs)
sprintf(info->bus_info, "SBUS:%d",
regs->which_io);
@@ -2643,14 +2641,14 @@ static const struct net_device_ops hme_netdev_ops = {
#ifdef CONFIG_SBUS
static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe)
{
- struct device_node *dp = op->node, *sbus_dp;
+ struct device_node *dp = op->dev.of_node, *sbus_dp;
struct quattro *qp = NULL;
struct happy_meal *hp;
struct net_device *dev;
int i, qfe_slot = -1;
int err = -ENODEV;
- sbus_dp = to_of_device(op->dev.parent)->node;
+ sbus_dp = to_of_device(op->dev.parent)->dev.of_node;
/* We can match PCI devices too, do not accept those here. */
if (strcmp(sbus_dp->name, "sbus"))
@@ -2945,7 +2943,6 @@ static void get_hme_mac_nonsparc(struct pci_dev *pdev, unsigned char *dev_addr)
dev_addr[1] = 0x00;
dev_addr[2] = 0x20;
get_random_bytes(&dev_addr[3], 3);
- return;
}
#endif /* !(CONFIG_SPARC) */
@@ -3004,7 +3001,6 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
dev->base_addr = (long) pdev;
hp = netdev_priv(dev);
- memset(hp, 0, sizeof(*hp));
hp->happy_dev = pdev;
hp->dma_dev = &pdev->dev;
@@ -3241,7 +3237,7 @@ static void happy_meal_pci_exit(void)
#ifdef CONFIG_SBUS
static int __devinit hme_sbus_probe(struct of_device *op, const struct of_device_id *match)
{
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
const char *model = of_get_property(dp, "model", NULL);
int is_qfe = (match->data != NULL);
@@ -3295,8 +3291,11 @@ static const struct of_device_id hme_sbus_match[] = {
MODULE_DEVICE_TABLE(of, hme_sbus_match);
static struct of_platform_driver hme_sbus_driver = {
- .name = "hme",
- .match_table = hme_sbus_match,
+ .driver = {
+ .name = "hme",
+ .owner = THIS_MODULE,
+ .of_match_table = hme_sbus_match,
+ },
.probe = hme_sbus_probe,
.remove = __devexit_p(hme_sbus_remove),
};
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 0c21653ff9f..7d9c33dd9d1 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1003,7 +1003,7 @@ static int lance_reset(struct net_device *dev)
}
lp->init_ring(dev);
load_csrs(lp);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
status = init_restart_lance(lp);
return status;
}
@@ -1054,7 +1054,7 @@ static void lance_piocopy_from_skb(void __iomem *dest, unsigned char *src, int l
}
src = (char *) p16;
break;
- };
+ }
if (len >= 2) {
u16 val = src[0] << 8 | src[1];
sbus_writew(val, piobuf);
@@ -1160,7 +1160,6 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irq(&lp->lock);
- dev->trans_start = jiffies;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -1170,7 +1169,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void lance_load_multicast(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
u32 val;
@@ -1195,8 +1194,8 @@ static void lance_load_multicast(struct net_device *dev)
return;
/* Add addresses */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
/* multicast address? */
if (!(*addrs & 1))
@@ -1324,7 +1323,7 @@ static int __devinit sparc_lance_probe_one(struct of_device *op,
struct of_device *ledma,
struct of_device *lebuffer)
{
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
static unsigned version_printed;
struct lance_private *lp;
struct net_device *dev;
@@ -1411,7 +1410,7 @@ static int __devinit sparc_lance_probe_one(struct of_device *op,
lp->burst_sizes = 0;
if (lp->ledma) {
- struct device_node *ledma_dp = ledma->node;
+ struct device_node *ledma_dp = ledma->dev.of_node;
struct device_node *sbus_dp;
unsigned int sbmask;
const char *prop;
@@ -1507,7 +1506,7 @@ fail:
static int __devinit sunlance_sbus_probe(struct of_device *op, const struct of_device_id *match)
{
struct of_device *parent = to_of_device(op->dev.parent);
- struct device_node *parent_dp = parent->node;
+ struct device_node *parent_dp = parent->dev.of_node;
int err;
if (!strcmp(parent_dp->name, "ledma")) {
@@ -1546,8 +1545,11 @@ static const struct of_device_id sunlance_sbus_match[] = {
MODULE_DEVICE_TABLE(of, sunlance_sbus_match);
static struct of_platform_driver sunlance_sbus_driver = {
- .name = "sunlance",
- .match_table = sunlance_sbus_match,
+ .driver = {
+ .name = "sunlance",
+ .owner = THIS_MODULE,
+ .of_match_table = sunlance_sbus_match,
+ },
.probe = sunlance_sbus_probe,
.remove = __devexit_p(sunlance_sbus_remove),
};
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index be637dce944..72b579c8d81 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -602,7 +602,6 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
qep->tx_new = NEXT_TX(entry);
/* Get it going. */
- dev->trans_start = jiffies;
sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL);
dev->stats.tx_packets++;
@@ -627,7 +626,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void qe_set_multicast(struct net_device *dev)
{
struct sunqe *qep = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
u8 new_mconfig = qep->mconfig;
char *addrs;
int i;
@@ -651,8 +650,8 @@ static void qe_set_multicast(struct net_device *dev)
u8 *hbytes = (unsigned char *) &hash_table[0];
memset(hash_table, 0, sizeof(hash_table));
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if (!(*addrs & 1))
continue;
@@ -696,7 +695,7 @@ static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
strcpy(info->version, "3.0");
op = qep->op;
- regs = of_get_property(op->node, "reg", NULL);
+ regs = of_get_property(op->dev.of_node, "reg", NULL);
if (regs)
sprintf(info->bus_info, "SBUS:%d", regs->which_io);
@@ -800,7 +799,7 @@ static struct sunqec * __devinit get_qec(struct of_device *child)
if (qec_global_reset(qecp->gregs))
goto fail;
- qecp->qec_bursts = qec_get_burst(op->node);
+ qecp->qec_bursts = qec_get_burst(op->dev.of_node);
qec_init_once(qecp, op);
@@ -858,7 +857,7 @@ static int __devinit qec_ether_init(struct of_device *op)
res = -ENODEV;
- i = of_getintprop_default(op->node, "channel#", -1);
+ i = of_getintprop_default(op->dev.of_node, "channel#", -1);
if (i == -1)
goto fail;
qe->channel = i;
@@ -978,8 +977,11 @@ static const struct of_device_id qec_sbus_match[] = {
MODULE_DEVICE_TABLE(of, qec_sbus_match);
static struct of_platform_driver qec_sbus_driver = {
- .name = "qec",
- .match_table = qec_sbus_match,
+ .driver = {
+ .name = "qec",
+ .owner = THIS_MODULE,
+ .of_match_table = qec_sbus_match,
+ },
.probe = qec_sbus_probe,
.remove = __devexit_p(qec_sbus_remove),
};
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
index 6b1b7cea7f6..d281a7b3470 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/sunvnet.c
@@ -717,7 +717,6 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
out_dropped_unlock:
@@ -763,12 +762,12 @@ static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
static void __update_mc_list(struct vnet *vp, struct net_device *dev)
{
- struct dev_addr_list *p;
+ struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(p, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
struct vnet_mcast_entry *m;
- m = __vnet_mc_find(vp, p->dmi_addr);
+ m = __vnet_mc_find(vp, ha->addr);
if (m) {
m->hit = 1;
continue;
@@ -778,7 +777,7 @@ static void __update_mc_list(struct vnet *vp, struct net_device *dev)
m = kzalloc(sizeof(*m), GFP_ATOMIC);
if (!m)
continue;
- memcpy(m->addr, p->dmi_addr, ETH_ALEN);
+ memcpy(m->addr, ha->addr, ETH_ALEN);
m->hit = 1;
m->next = vp->mcast_list;
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 49bd84c0d58..be08b75dbc1 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -1357,8 +1357,6 @@ static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
}
lp->tfd_start = (lp->tfd_start + 1) % TX_FD_NUM;
- dev->trans_start = jiffies;
-
/* If we just used up the very last entry in the
* TX ring on this device, tell the queueing
* layer to send no more.
@@ -1954,16 +1952,16 @@ tc35815_set_multicast_list(struct net_device *dev)
/* Disable promiscuous mode, use normal mode. */
tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl);
} else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *cur_addr;
+ struct netdev_hw_addr *ha;
int i;
int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE);
tc_writel(0, &tr->CAM_Ctl);
/* Walk the address list, and load the filter */
i = 0;
- netdev_for_each_mc_addr(cur_addr, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
/* entry 0,1 is reserved. */
- tc35815_set_cam_entry(dev, i + 2, cur_addr->dmi_addr);
+ tc35815_set_cam_entry(dev, i + 2, ha->addr);
ena_bits |= CAM_Ena_Bit(i + 2);
i++;
}
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index f5493092521..20ab1619232 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -808,7 +808,7 @@ static void bdx_setmulti(struct net_device *ndev)
WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0);
} else if (!netdev_mc_empty(ndev)) {
u8 hash;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u32 reg, val;
/* set IMF to deny all multicast frames */
@@ -825,10 +825,10 @@ static void bdx_setmulti(struct net_device *ndev)
* into RX_MAC_MCST regs. we skip this phase now and accept ALL
* multicast frames throu IMF */
/* accept the rest of addresses throu IMF */
- netdev_for_each_mc_addr(mclist, ndev) {
+ netdev_for_each_mc_addr(ha, ndev) {
hash = 0;
for (i = 0; i < ETH_ALEN; i++)
- hash ^= mclist->dmi_addr[i];
+ hash ^= ha->addr[i];
reg = regRX_MCST_HASH0 + ((hash >> 5) << 2);
val = READ_REG(priv, reg);
val |= (1 << (hash % 32));
@@ -1303,7 +1303,6 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
priv->net_stats.rx_bytes += len;
skb_put(skb, len);
- skb->dev = priv->ndev;
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->protocol = eth_type_trans(skb, priv->ndev);
@@ -1509,7 +1508,7 @@ bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
int nr_frags = skb_shinfo(skb)->nr_frags;
int i;
- db->wptr->len = skb->len - skb->data_len;
+ db->wptr->len = skb_headlen(skb);
db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data,
db->wptr->len, PCI_DMA_TODEVICE);
pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
@@ -2034,7 +2033,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/************** priv ****************/
priv = nic->priv[port] = netdev_priv(ndev);
- memset(priv, 0, sizeof(struct bdx_priv));
priv->pBdxRegs = nic->regs + port * 0x8000;
priv->port = port;
priv->pdev = pdev;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index ecc41cffb47..573054ae7b5 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -67,8 +67,8 @@
#include "tg3.h"
#define DRV_MODULE_NAME "tg3"
-#define DRV_MODULE_VERSION "3.108"
-#define DRV_MODULE_RELDATE "February 17, 2010"
+#define DRV_MODULE_VERSION "3.110"
+#define DRV_MODULE_RELDATE "April 9, 2010"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -101,7 +101,7 @@
#define TG3_DEF_RX_RING_PENDING 200
#define TG3_RX_JUMBO_RING_SIZE 256
#define TG3_DEF_RX_JUMBO_RING_PENDING 100
-#define TG3_RSS_INDIR_TBL_SIZE 128
+#define TG3_RSS_INDIR_TBL_SIZE 128
/* Do not place this n-ring entries value into the tp struct itself,
* we really want to expose these constants to GCC so that modulo et
@@ -126,6 +126,9 @@
TG3_TX_RING_SIZE)
#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
+#define TG3_RX_DMA_ALIGN 16
+#define TG3_RX_HEADROOM ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN)
+
#define TG3_DMA_BYTE_ENAB 64
#define TG3_RX_STD_DMA_SZ 1536
@@ -142,6 +145,26 @@
#define TG3_RX_JMB_BUFF_RING_SIZE \
(sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
+#define TG3_RSS_MIN_NUM_MSIX_VECS 2
+
+/* Due to a hardware bug, the 5701 can only DMA to memory addresses
+ * that are at least dword aligned when used in PCIX mode. The driver
+ * works around this bug by double copying the packet. This workaround
+ * is built into the normal double copy length check for efficiency.
+ *
+ * However, the double copy is only necessary on those architectures
+ * where unaligned memory accesses are inefficient. For those architectures
+ * where unaligned memory accesses incur little penalty, we can reintegrate
+ * the 5701 in the normal rx path. Doing so saves a device structure
+ * dereference by hardcoding the double copy threshold in place.
+ */
+#define TG3_RX_COPY_THRESHOLD 256
+#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
+#else
+ #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
+#endif
+
/* minimum number of free TX descriptors required to wake up TX process */
#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
@@ -152,6 +175,8 @@
#define TG3_NUM_TEST 6
+#define TG3_FW_UPDATE_TIMEOUT_SEC 5
+
#define FIRMWARE_TG3 "tigon/tg3.bin"
#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
@@ -167,8 +192,6 @@ MODULE_FIRMWARE(FIRMWARE_TG3);
MODULE_FIRMWARE(FIRMWARE_TG3TSO);
MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
-#define TG3_RSS_MIN_NUM_MSIX_VECS 2
-
static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
module_param(tg3_debug, int, 0);
MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
@@ -360,7 +383,7 @@ static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
static u32 tg3_read32(struct tg3 *tp, u32 off)
{
- return (readl(tp->regs + off));
+ return readl(tp->regs + off);
}
static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
@@ -370,7 +393,7 @@ static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
{
- return (readl(tp->aperegs + off));
+ return readl(tp->aperegs + off);
}
static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
@@ -488,7 +511,7 @@ static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
{
- return (readl(tp->regs + off + GRCMBOX_BASE));
+ return readl(tp->regs + off + GRCMBOX_BASE);
}
static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
@@ -496,16 +519,16 @@ static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
writel(val, tp->regs + off + GRCMBOX_BASE);
}
-#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
+#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
-#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
-#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
-#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
+#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
+#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
+#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
-#define tw32(reg,val) tp->write32(tp, reg, val)
-#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
-#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
-#define tr32(reg) tp->read32(tp, reg)
+#define tw32(reg, val) tp->write32(tp, reg, val)
+#define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
+#define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
+#define tr32(reg) tp->read32(tp, reg)
static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
{
@@ -579,11 +602,11 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
return 0;
switch (locknum) {
- case TG3_APE_LOCK_GRC:
- case TG3_APE_LOCK_MEM:
- break;
- default:
- return -EINVAL;
+ case TG3_APE_LOCK_GRC:
+ case TG3_APE_LOCK_MEM:
+ break;
+ default:
+ return -EINVAL;
}
off = 4 * locknum;
@@ -617,11 +640,11 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
return;
switch (locknum) {
- case TG3_APE_LOCK_GRC:
- case TG3_APE_LOCK_MEM:
- break;
- default:
- return;
+ case TG3_APE_LOCK_GRC:
+ case TG3_APE_LOCK_MEM:
+ break;
+ default:
+ return;
}
off = 4 * locknum;
@@ -651,6 +674,7 @@ static void tg3_enable_ints(struct tg3 *tp)
tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
for (i = 0; i < tp->irq_cnt; i++) {
struct tg3_napi *tnapi = &tp->napi[i];
+
tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
@@ -1098,7 +1122,7 @@ static int tg3_mdio_init(struct tg3 *tp)
i = mdiobus_register(tp->mdio_bus);
if (i) {
- netdev_warn(tp->dev, "mdiobus_reg failed (0x%x)\n", i);
+ dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
mdiobus_free(tp->mdio_bus);
return i;
}
@@ -1106,7 +1130,7 @@ static int tg3_mdio_init(struct tg3 *tp)
phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
if (!phydev || !phydev->drv) {
- netdev_warn(tp->dev, "No PHY devices\n");
+ dev_warn(&tp->pdev->dev, "No PHY devices\n");
mdiobus_unregister(tp->mdio_bus);
mdiobus_free(tp->mdio_bus);
return -ENODEV;
@@ -1437,7 +1461,7 @@ static void tg3_adjust_link(struct net_device *dev)
phydev->speed != tp->link_config.active_speed ||
phydev->duplex != tp->link_config.active_duplex ||
oldflowctrl != tp->link_config.active_flowctrl)
- linkmesg = 1;
+ linkmesg = 1;
tp->link_config.active_speed = phydev->speed;
tp->link_config.active_duplex = phydev->duplex;
@@ -1464,7 +1488,7 @@ static int tg3_phy_init(struct tg3 *tp)
phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
phydev->dev_flags, phydev->interface);
if (IS_ERR(phydev)) {
- netdev_err(tp->dev, "Could not attach to PHY\n");
+ dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
return PTR_ERR(phydev);
}
@@ -1855,8 +1879,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
/* Set Extended packet length bit for jumbo frames */
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
- }
- else {
+ } else {
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
}
@@ -1974,8 +1997,7 @@ out:
tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
- }
- else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
+ } else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
@@ -2007,8 +2029,8 @@ out:
u32 phy_reg;
if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
- tg3_writephy(tp, MII_TG3_EXT_CTRL,
- phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
+ tg3_writephy(tp, MII_TG3_EXT_CTRL,
+ phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
}
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
@@ -3425,7 +3447,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp,
ap->rxconfig = rx_cfg_reg;
ret = ANEG_OK;
- switch(ap->state) {
+ switch (ap->state) {
case ANEG_STATE_UNKNOWN:
if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
ap->state = ANEG_STATE_AN_ENABLE;
@@ -3463,11 +3485,10 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp,
/* fallthru */
case ANEG_STATE_RESTART:
delta = ap->cur_time - ap->link_time;
- if (delta > ANEG_STATE_SETTLE_TIME) {
+ if (delta > ANEG_STATE_SETTLE_TIME)
ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
- } else {
+ else
ret = ANEG_TIMER_ENAB;
- }
break;
case ANEG_STATE_DISABLE_LINK_OK:
@@ -3491,9 +3512,8 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp,
break;
case ANEG_STATE_ABILITY_DETECT:
- if (ap->ability_match != 0 && ap->rxconfig != 0) {
+ if (ap->ability_match != 0 && ap->rxconfig != 0)
ap->state = ANEG_STATE_ACK_DETECT_INIT;
- }
break;
case ANEG_STATE_ACK_DETECT_INIT:
@@ -4171,9 +4191,9 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
current_duplex = DUPLEX_FULL;
else
current_duplex = DUPLEX_HALF;
- }
- else
+ } else {
current_link_up = 0;
+ }
}
}
@@ -4211,6 +4231,7 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
tp->serdes_counter--;
return;
}
+
if (!netif_carrier_ok(tp->dev) &&
(tp->link_config.autoneg == AUTONEG_ENABLE)) {
u32 bmcr;
@@ -4240,10 +4261,9 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
}
}
- }
- else if (netif_carrier_ok(tp->dev) &&
- (tp->link_config.autoneg == AUTONEG_ENABLE) &&
- (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
+ } else if (netif_carrier_ok(tp->dev) &&
+ (tp->link_config.autoneg == AUTONEG_ENABLE) &&
+ (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
u32 phy2;
/* Select expansion interrupt status register */
@@ -4266,13 +4286,12 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
{
int err;
- if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
+ if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
err = tg3_setup_fiber_phy(tp, force_reset);
- } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
+ else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
err = tg3_setup_fiber_mii_phy(tp, force_reset);
- } else {
+ else
err = tg3_setup_copper_phy(tp, force_reset);
- }
if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
u32 val, scale;
@@ -4335,8 +4354,11 @@ static void tg3_tx_recover(struct tg3 *tp)
BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
tp->write32_tx_mbox == tg3_write_indirect_mbox);
- netdev_warn(tp->dev, "The system may be re-ordering memory-mapped I/O cycles to the network device, attempting to recover\n"
- "Please report the problem to the driver maintainer and include system chipset information.\n");
+ netdev_warn(tp->dev,
+ "The system may be re-ordering memory-mapped I/O "
+ "cycles to the network device, attempting to recover. "
+ "Please report the problem to the driver maintainer "
+ "and include system chipset information.\n");
spin_lock(&tp->lock);
tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
@@ -4378,7 +4400,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
}
pci_unmap_single(tp->pdev,
- pci_unmap_addr(ri, mapping),
+ dma_unmap_addr(ri, mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
@@ -4392,7 +4414,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
tx_bug = 1;
pci_unmap_page(tp->pdev,
- pci_unmap_addr(ri, mapping),
+ dma_unmap_addr(ri, mapping),
skb_shinfo(skb)->frags[i].size,
PCI_DMA_TODEVICE);
sw_idx = NEXT_TX(sw_idx);
@@ -4430,7 +4452,7 @@ static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
if (!ri->skb)
return;
- pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping),
+ pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
map_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(ri->skb);
ri->skb = NULL;
@@ -4496,7 +4518,7 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
}
map->skb = skb;
- pci_unmap_addr_set(map, mapping, mapping);
+ dma_unmap_addr_set(map, mapping, mapping);
desc->addr_hi = ((u64)mapping >> 32);
desc->addr_lo = ((u64)mapping & 0xffffffff);
@@ -4516,8 +4538,8 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
struct tg3 *tp = tnapi->tp;
struct tg3_rx_buffer_desc *src_desc, *dest_desc;
struct ring_info *src_map, *dest_map;
- int dest_idx;
struct tg3_rx_prodring_set *spr = &tp->prodring[0];
+ int dest_idx;
switch (opaque_key) {
case RXD_OPAQUE_RING_STD:
@@ -4541,8 +4563,8 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
}
dest_map->skb = src_map->skb;
- pci_unmap_addr_set(dest_map, mapping,
- pci_unmap_addr(src_map, mapping));
+ dma_unmap_addr_set(dest_map, mapping,
+ dma_unmap_addr(src_map, mapping));
dest_desc->addr_hi = src_desc->addr_hi;
dest_desc->addr_lo = src_desc->addr_lo;
@@ -4605,18 +4627,20 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
struct sk_buff *skb;
dma_addr_t dma_addr;
u32 opaque_key, desc_idx, *post_ptr;
+ bool hw_vlan __maybe_unused = false;
+ u16 vtag __maybe_unused = 0;
desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
if (opaque_key == RXD_OPAQUE_RING_STD) {
ri = &tp->prodring[0].rx_std_buffers[desc_idx];
- dma_addr = pci_unmap_addr(ri, mapping);
+ dma_addr = dma_unmap_addr(ri, mapping);
skb = ri->skb;
post_ptr = &std_prod_idx;
rx_std_posted++;
} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
- dma_addr = pci_unmap_addr(ri, mapping);
+ dma_addr = dma_unmap_addr(ri, mapping);
skb = ri->skb;
post_ptr = &jmb_prod_idx;
} else
@@ -4638,12 +4662,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
ETH_FCS_LEN;
- if (len > RX_COPY_THRESHOLD &&
- tp->rx_offset == NET_IP_ALIGN) {
- /* rx_offset will likely not equal NET_IP_ALIGN
- * if this is a 5701 card running in PCI-X mode
- * [see tg3_get_invariants()]
- */
+ if (len > TG3_RX_COPY_THRESH(tp)) {
int skb_size;
skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
@@ -4668,12 +4687,12 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
tg3_recycle_rx(tnapi, tpr, opaque_key,
desc_idx, *post_ptr);
- copy_skb = netdev_alloc_skb(tp->dev,
- len + TG3_RAW_IP_ALIGN);
+ copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN +
+ TG3_RAW_IP_ALIGN);
if (copy_skb == NULL)
goto drop_it_no_recycle;
- skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
+ skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN);
skb_put(copy_skb, len);
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(skb, copy_skb->data, len);
@@ -4699,12 +4718,29 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
goto next_pkt;
}
+ if (desc->type_flags & RXD_FLAG_VLAN &&
+ !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) {
+ vtag = desc->err_vlan & RXD_VLAN_MASK;
#if TG3_VLAN_TAG_USED
- if (tp->vlgrp != NULL &&
- desc->type_flags & RXD_FLAG_VLAN) {
- vlan_gro_receive(&tnapi->napi, tp->vlgrp,
- desc->err_vlan & RXD_VLAN_MASK, skb);
- } else
+ if (tp->vlgrp)
+ hw_vlan = true;
+ else
+#endif
+ {
+ struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
+ __skb_push(skb, VLAN_HLEN);
+
+ memmove(ve, skb->data + VLAN_HLEN,
+ ETH_ALEN * 2);
+ ve->h_vlan_proto = htons(ETH_P_8021Q);
+ ve->h_vlan_TCI = htons(vtag);
+ }
+ }
+
+#if TG3_VLAN_TAG_USED
+ if (hw_vlan)
+ vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb);
+ else
#endif
napi_gro_receive(&tnapi->napi, skb);
@@ -4978,7 +5014,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
if (unlikely(work_done >= budget))
break;
- /* tp->last_tag is used in tg3_restart_ints() below
+ /* tp->last_tag is used in tg3_int_reenable() below
* to tell the hw how much work has been processed,
* so we must read it before checking for more work.
*/
@@ -4987,8 +5023,8 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
rmb();
/* check for RX/TX work to do */
- if (sblk->idx[0].tx_consumer == tnapi->tx_cons &&
- *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) {
+ if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
+ *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
napi_complete(napi);
/* Reenable interrupts. */
tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
@@ -5260,7 +5296,8 @@ static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
err = tg3_init_hw(tp, reset_phy);
if (err) {
- netdev_err(tp->dev, "Failed to re-initialize device, aborting\n");
+ netdev_err(tp->dev,
+ "Failed to re-initialize device, aborting\n");
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_full_unlock(tp);
del_timer_sync(&tp->timer);
@@ -5437,12 +5474,12 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
len = skb_shinfo(skb)->frags[i-1].size;
pci_unmap_single(tp->pdev,
- pci_unmap_addr(&tnapi->tx_buffers[entry],
+ dma_unmap_addr(&tnapi->tx_buffers[entry],
mapping),
len, PCI_DMA_TODEVICE);
if (i == 0) {
tnapi->tx_buffers[entry].skb = new_skb;
- pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
+ dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
new_addr);
} else {
tnapi->tx_buffers[entry].skb = NULL;
@@ -5492,7 +5529,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
struct netdev_queue *txq;
unsigned int i, last;
-
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
tnapi = &tp->napi[skb_get_queue_mapping(skb)];
if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
@@ -5508,7 +5544,8 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
netif_tx_stop_queue(txq);
/* This is a hard error, log it. */
- netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+ netdev_err(dev,
+ "BUG! Tx Ring full when queue awake!\n");
}
return NETDEV_TX_BUSY;
}
@@ -5552,9 +5589,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
tcp_hdr(skb)->check = 0;
- }
- else if (skb->ip_summed == CHECKSUM_PARTIAL)
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
base_flags |= TXD_FLAG_TCPUDP_CSUM;
+ }
+
#if TG3_VLAN_TAG_USED
if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
base_flags |= (TXD_FLAG_VLAN |
@@ -5571,7 +5609,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
}
tnapi->tx_buffers[entry].skb = skb;
- pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
+ dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
!mss && skb->len > ETH_DATA_LEN)
@@ -5597,7 +5635,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
goto dma_error;
tnapi->tx_buffers[entry].skb = NULL;
- pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
+ dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
mapping);
tg3_set_txd(tnapi, entry, mapping, len,
@@ -5627,7 +5665,7 @@ dma_error:
entry = tnapi->tx_prod;
tnapi->tx_buffers[entry].skb = NULL;
pci_unmap_single(tp->pdev,
- pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
+ dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
for (i = 0; i <= last; i++) {
@@ -5635,7 +5673,7 @@ dma_error:
entry = NEXT_TX(entry);
pci_unmap_page(tp->pdev,
- pci_unmap_addr(&tnapi->tx_buffers[entry],
+ dma_unmap_addr(&tnapi->tx_buffers[entry],
mapping),
frag->size, PCI_DMA_TODEVICE);
}
@@ -5695,7 +5733,6 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
struct netdev_queue *txq;
unsigned int i, last;
-
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
tnapi = &tp->napi[skb_get_queue_mapping(skb)];
if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
@@ -5711,7 +5748,8 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
netif_tx_stop_queue(txq);
/* This is a hard error, log it. */
- netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+ netdev_err(dev,
+ "BUG! Tx Ring full when queue awake!\n");
}
return NETDEV_TX_BUSY;
}
@@ -5737,7 +5775,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
hdr_len = ip_tcp_len + tcp_opt_len;
if (unlikely((ETH_HLEN + hdr_len) > 80) &&
(tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
- return (tg3_tso_bug(tp, skb));
+ return tg3_tso_bug(tp, skb);
base_flags |= (TXD_FLAG_CPU_PRE_DMA |
TXD_FLAG_CPU_POST_DMA);
@@ -5797,7 +5835,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
}
tnapi->tx_buffers[entry].skb = skb;
- pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
+ dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
would_hit_hwbug = 0;
@@ -5833,7 +5871,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
len, PCI_DMA_TODEVICE);
tnapi->tx_buffers[entry].skb = NULL;
- pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
+ dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
mapping);
if (pci_dma_mapping_error(tp->pdev, mapping))
goto dma_error;
@@ -5898,7 +5936,7 @@ dma_error:
entry = tnapi->tx_prod;
tnapi->tx_buffers[entry].skb = NULL;
pci_unmap_single(tp->pdev,
- pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
+ dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
for (i = 0; i <= last; i++) {
@@ -5906,7 +5944,7 @@ dma_error:
entry = NEXT_TX(entry);
pci_unmap_page(tp->pdev,
- pci_unmap_addr(&tnapi->tx_buffers[entry],
+ dma_unmap_addr(&tnapi->tx_buffers[entry],
mapping),
frag->size, PCI_DMA_TODEVICE);
}
@@ -5924,9 +5962,9 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
ethtool_op_set_tso(dev, 0);
- }
- else
+ } else {
tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
+ }
} else {
if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
@@ -6007,7 +6045,7 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
}
}
-/* Initialize tx/rx rings for packet processing.
+/* Initialize rx rings for packet processing.
*
* The chip has been shut down and the driver detached from
* the networking, so no interrupts or new tx packets will
@@ -6058,8 +6096,10 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
/* Now allocate fresh SKBs for each rx ring. */
for (i = 0; i < tp->rx_pending; i++) {
if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
- netdev_warn(tp->dev, "Using a smaller RX standard ring, only %d out of %d buffers were allocated successfully\n",
- i, tp->rx_pending);
+ netdev_warn(tp->dev,
+ "Using a smaller RX standard ring. Only "
+ "%d out of %d buffers were allocated "
+ "successfully\n", i, tp->rx_pending);
if (i == 0)
goto initfail;
tp->rx_pending = i;
@@ -6088,8 +6128,10 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
for (i = 0; i < tp->rx_jumbo_pending; i++) {
if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
- netdev_warn(tp->dev, "Using a smaller RX jumbo ring, only %d out of %d buffers were allocated successfully\n",
- i, tp->rx_jumbo_pending);
+ netdev_warn(tp->dev,
+ "Using a smaller RX jumbo ring. Only %d "
+ "out of %d buffers were allocated "
+ "successfully\n", i, tp->rx_jumbo_pending);
if (i == 0)
goto initfail;
tp->rx_jumbo_pending = i;
@@ -6187,7 +6229,7 @@ static void tg3_free_rings(struct tg3 *tp)
}
pci_unmap_single(tp->pdev,
- pci_unmap_addr(txp, mapping),
+ dma_unmap_addr(txp, mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
txp->skb = NULL;
@@ -6197,7 +6239,7 @@ static void tg3_free_rings(struct tg3 *tp)
for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
pci_unmap_page(tp->pdev,
- pci_unmap_addr(txp, mapping),
+ dma_unmap_addr(txp, mapping),
skb_shinfo(skb)->frags[k].size,
PCI_DMA_TODEVICE);
i++;
@@ -6433,8 +6475,9 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int
}
if (i == MAX_WAIT_CNT && !silent) {
- pr_err("tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
- ofs, enable_bit);
+ dev_err(&tp->pdev->dev,
+ "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
+ ofs, enable_bit);
return -ENODEV;
}
@@ -6480,8 +6523,9 @@ static int tg3_abort_hw(struct tg3 *tp, int silent)
break;
}
if (i >= MAX_WAIT_CNT) {
- netdev_err(tp->dev, "%s timed out, TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
- __func__, tr32(MAC_TX_MODE));
+ dev_err(&tp->pdev->dev,
+ "%s timed out, TX_MODE_ENABLE will not clear "
+ "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
err |= -ENODEV;
}
@@ -6551,35 +6595,35 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
return;
switch (kind) {
- case RESET_KIND_INIT:
- tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
- APE_HOST_SEG_SIG_MAGIC);
- tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
- APE_HOST_SEG_LEN_MAGIC);
- apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
- tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
- tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
- APE_HOST_DRIVER_ID_MAGIC);
- tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
- APE_HOST_BEHAV_NO_PHYLOCK);
-
- event = APE_EVENT_STATUS_STATE_START;
- break;
- case RESET_KIND_SHUTDOWN:
- /* With the interface we are currently using,
- * APE does not track driver state. Wiping
- * out the HOST SEGMENT SIGNATURE forces
- * the APE to assume OS absent status.
- */
- tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
+ case RESET_KIND_INIT:
+ tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
+ APE_HOST_SEG_SIG_MAGIC);
+ tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
+ APE_HOST_SEG_LEN_MAGIC);
+ apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
+ tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
+ tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
+ APE_HOST_DRIVER_ID_MAGIC);
+ tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
+ APE_HOST_BEHAV_NO_PHYLOCK);
+
+ event = APE_EVENT_STATUS_STATE_START;
+ break;
+ case RESET_KIND_SHUTDOWN:
+ /* With the interface we are currently using,
+ * APE does not track driver state. Wiping
+ * out the HOST SEGMENT SIGNATURE forces
+ * the APE to assume OS absent status.
+ */
+ tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
- event = APE_EVENT_STATUS_STATE_UNLOAD;
- break;
- case RESET_KIND_SUSPEND:
- event = APE_EVENT_STATUS_STATE_SUSPEND;
- break;
- default:
- return;
+ event = APE_EVENT_STATUS_STATE_UNLOAD;
+ break;
+ case RESET_KIND_SUSPEND:
+ event = APE_EVENT_STATUS_STATE_SUSPEND;
+ break;
+ default:
+ return;
}
event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
@@ -7156,7 +7200,8 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
if (cpu_base == TX_CPU_BASE &&
(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
- netdev_err(tp->dev, "%s: Trying to load TX cpu firmware which is 5705\n",
+ netdev_err(tp->dev,
+ "%s: Trying to load TX cpu firmware which is 5705\n",
__func__);
return -EINVAL;
}
@@ -7236,7 +7281,8 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
udelay(1000);
}
if (i >= 5) {
- netdev_err(tp->dev, "tg3_load_firmware fails to set RX CPU PC, is %08x should be %08x\n",
+ netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
+ "should be %08x\n", __func__,
tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
return -ENODEV;
}
@@ -7300,7 +7346,8 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
udelay(1000);
}
if (i >= 5) {
- netdev_err(tp->dev, "%s fails to set CPU PC, is %08x should be %08x\n",
+ netdev_err(tp->dev,
+ "%s fails to set CPU PC, is %08x should be %08x\n",
__func__, tr32(cpu_base + CPU_PC), info.fw_base);
return -ENODEV;
}
@@ -7568,9 +7615,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
- if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
+ if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
tg3_abort_hw(tp, 1);
- }
if (reset_phy)
tg3_phy_reset(tp);
@@ -7631,6 +7677,25 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(GRC_MODE, grc_mode);
}
+ if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+ u32 grc_mode = tr32(GRC_MODE);
+
+ /* Access the lower 1K of PL PCIE block registers. */
+ val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
+ tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
+
+ val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5);
+ tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
+ val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
+
+ tw32(GRC_MODE, grc_mode);
+
+ val = tr32(TG3_CPMU_LSPD_10MB_CLK);
+ val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
+ val |= CPMU_LSPD_10MB_MACCLK_6_25;
+ tw32(TG3_CPMU_LSPD_10MB_CLK, val);
+ }
+
/* This works around an issue with Athlon chipsets on
* B3 tigon3 silicon. This bit has no effect on any
* other revision. But do not set this on PCI Express
@@ -7679,6 +7744,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
val = tr32(TG3PCI_DMA_RW_CTRL) &
~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
+ if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
+ val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
@@ -7723,8 +7790,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
- }
- else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
+ } else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
int fw_len;
fw_len = tp->fw_len;
@@ -7839,9 +7905,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
- (RX_STD_MAX_SIZE << 2);
+ (TG3_RX_STD_DMA_SZ << 2);
else
- val = RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT;
+ val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
} else
val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
@@ -8476,8 +8542,8 @@ static void tg3_timer(unsigned long __opaque)
tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
FWCMD_NICDRV_ALIVE3);
tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
- /* 5 seconds timeout */
- tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
+ TG3_FW_UPDATE_TIMEOUT_SEC);
tg3_generate_fw_event(tp);
}
@@ -8625,8 +8691,9 @@ static int tg3_test_msi(struct tg3 *tp)
return err;
/* MSI test failed, go back to INTx mode */
- netdev_warn(tp->dev, "No interrupt was generated using MSI, switching to INTx mode\n"
- "Please report this failure to the PCI maintainer and include system chipset information\n");
+ netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
+ "to INTx mode. Please report this failure to the PCI "
+ "maintainer and include system chipset information\n");
free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
@@ -8739,7 +8806,8 @@ static void tg3_ints_init(struct tg3 *tp)
/* All MSI supporting chips should support tagged
* status. Assert that this is the case.
*/
- netdev_warn(tp->dev, "MSI without TAGGED? Not using MSI\n");
+ netdev_warn(tp->dev,
+ "MSI without TAGGED_STATUS? Not using MSI\n");
goto defcfg;
}
@@ -8914,236 +8982,6 @@ err_out1:
return err;
}
-#if 0
-/*static*/ void tg3_dump_state(struct tg3 *tp)
-{
- u32 val32, val32_2, val32_3, val32_4, val32_5;
- u16 val16;
- int i;
- struct tg3_hw_status *sblk = tp->napi[0]->hw_status;
-
- pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
- pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
- printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
- val16, val32);
-
- /* MAC block */
- printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
- tr32(MAC_MODE), tr32(MAC_STATUS));
- printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
- tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
- printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
- tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
- printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
- tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
-
- /* Send data initiator control block */
- printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
- tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
- printk(" SNDDATAI_STATSCTRL[%08x]\n",
- tr32(SNDDATAI_STATSCTRL));
-
- /* Send data completion control block */
- printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
-
- /* Send BD ring selector block */
- printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
- tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
-
- /* Send BD initiator control block */
- printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
- tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
-
- /* Send BD completion control block */
- printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
-
- /* Receive list placement control block */
- printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
- tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
- printk(" RCVLPC_STATSCTRL[%08x]\n",
- tr32(RCVLPC_STATSCTRL));
-
- /* Receive data and receive BD initiator control block */
- printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
- tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
-
- /* Receive data completion control block */
- printk("DEBUG: RCVDCC_MODE[%08x]\n",
- tr32(RCVDCC_MODE));
-
- /* Receive BD initiator control block */
- printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
- tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
-
- /* Receive BD completion control block */
- printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
- tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
-
- /* Receive list selector control block */
- printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
- tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
-
- /* Mbuf cluster free block */
- printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
- tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
-
- /* Host coalescing control block */
- printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
- tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
- printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
- tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
- tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
- printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
- tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
- tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
- printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
- tr32(HOSTCC_STATS_BLK_NIC_ADDR));
- printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
- tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
-
- /* Memory arbiter control block */
- printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
- tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
-
- /* Buffer manager control block */
- printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
- tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
- printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
- tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
- printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
- "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
- tr32(BUFMGR_DMA_DESC_POOL_ADDR),
- tr32(BUFMGR_DMA_DESC_POOL_SIZE));
-
- /* Read DMA control block */
- printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
- tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
-
- /* Write DMA control block */
- printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
- tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
-
- /* DMA completion block */
- printk("DEBUG: DMAC_MODE[%08x]\n",
- tr32(DMAC_MODE));
-
- /* GRC block */
- printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
- tr32(GRC_MODE), tr32(GRC_MISC_CFG));
- printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
- tr32(GRC_LOCAL_CTRL));
-
- /* TG3_BDINFOs */
- printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
- tr32(RCVDBDI_JUMBO_BD + 0x0),
- tr32(RCVDBDI_JUMBO_BD + 0x4),
- tr32(RCVDBDI_JUMBO_BD + 0x8),
- tr32(RCVDBDI_JUMBO_BD + 0xc));
- printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
- tr32(RCVDBDI_STD_BD + 0x0),
- tr32(RCVDBDI_STD_BD + 0x4),
- tr32(RCVDBDI_STD_BD + 0x8),
- tr32(RCVDBDI_STD_BD + 0xc));
- printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
- tr32(RCVDBDI_MINI_BD + 0x0),
- tr32(RCVDBDI_MINI_BD + 0x4),
- tr32(RCVDBDI_MINI_BD + 0x8),
- tr32(RCVDBDI_MINI_BD + 0xc));
-
- tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
- tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
- tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
- tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
- printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
- val32, val32_2, val32_3, val32_4);
-
- tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
- tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
- tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
- tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
- printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
- val32, val32_2, val32_3, val32_4);
-
- tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
- tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
- tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
- tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
- tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
- printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
- val32, val32_2, val32_3, val32_4, val32_5);
-
- /* SW status block */
- printk(KERN_DEBUG
- "Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
- sblk->status,
- sblk->status_tag,
- sblk->rx_jumbo_consumer,
- sblk->rx_consumer,
- sblk->rx_mini_consumer,
- sblk->idx[0].rx_producer,
- sblk->idx[0].tx_consumer);
-
- /* SW statistics block */
- printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
- ((u32 *)tp->hw_stats)[0],
- ((u32 *)tp->hw_stats)[1],
- ((u32 *)tp->hw_stats)[2],
- ((u32 *)tp->hw_stats)[3]);
-
- /* Mailboxes */
- printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
- tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
- tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
- tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
- tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
-
- /* NIC side send descriptors. */
- for (i = 0; i < 6; i++) {
- unsigned long txd;
-
- txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
- + (i * sizeof(struct tg3_tx_buffer_desc));
- printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
- i,
- readl(txd + 0x0), readl(txd + 0x4),
- readl(txd + 0x8), readl(txd + 0xc));
- }
-
- /* NIC side RX descriptors. */
- for (i = 0; i < 6; i++) {
- unsigned long rxd;
-
- rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
- + (i * sizeof(struct tg3_rx_buffer_desc));
- printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
- i,
- readl(rxd + 0x0), readl(rxd + 0x4),
- readl(rxd + 0x8), readl(rxd + 0xc));
- rxd += (4 * sizeof(u32));
- printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
- i,
- readl(rxd + 0x0), readl(rxd + 0x4),
- readl(rxd + 0x8), readl(rxd + 0xc));
- }
-
- for (i = 0; i < 6; i++) {
- unsigned long rxd;
-
- rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
- + (i * sizeof(struct tg3_rx_buffer_desc));
- printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
- i,
- readl(rxd + 0x0), readl(rxd + 0x4),
- readl(rxd + 0x8), readl(rxd + 0xc));
- rxd += (4 * sizeof(u32));
- printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
- i,
- readl(rxd + 0x0), readl(rxd + 0x4),
- readl(rxd + 0x8), readl(rxd + 0xc));
- }
-}
-#endif
-
static struct net_device_stats *tg3_get_stats(struct net_device *);
static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
@@ -9162,9 +9000,6 @@ static int tg3_close(struct net_device *dev)
tg3_phy_stop(tp);
tg3_full_lock(tp, 1);
-#if 0
- tg3_dump_state(tp);
-#endif
tg3_disable_ints(tp);
@@ -9406,9 +9241,8 @@ static inline u32 calc_crc(unsigned char *buf, int len)
reg >>= 1;
- if (tmp) {
+ if (tmp)
reg ^= 0xedb88320;
- }
}
}
@@ -9452,20 +9286,20 @@ static void __tg3_set_rx_mode(struct net_device *dev)
rx_mode |= RX_MODE_PROMISC;
} else if (dev->flags & IFF_ALLMULTI) {
/* Accept all multicast. */
- tg3_set_multi (tp, 1);
+ tg3_set_multi(tp, 1);
} else if (netdev_mc_empty(dev)) {
/* Reject all multicast. */
- tg3_set_multi (tp, 0);
+ tg3_set_multi(tp, 0);
} else {
/* Accept one or more multicast(s). */
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u32 mc_filter[4] = { 0, };
u32 regidx;
u32 bit;
u32 crc;
- netdev_for_each_mc_addr(mclist, dev) {
- crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = calc_crc(ha->addr, ETH_ALEN);
bit = ~crc & 0x7f;
regidx = (bit & 0x60) >> 5;
bit &= 0x1f;
@@ -9618,7 +9452,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
memcpy(data, ((char*)&val) + b_offset, b_count);
len -= b_count;
offset += b_count;
- eeprom->len += b_count;
+ eeprom->len += b_count;
}
/* read bytes upto the last 4 byte boundary */
@@ -10166,8 +10000,8 @@ static int tg3_set_rx_csum(struct net_device *dev, u32 data)
if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
if (data != 0)
return -EINVAL;
- return 0;
- }
+ return 0;
+ }
spin_lock_bh(&tp->lock);
if (data)
@@ -10186,8 +10020,8 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data)
if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
if (data != 0)
return -EINVAL;
- return 0;
- }
+ return 0;
+ }
if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
ethtool_op_set_tx_ipv6_csum(dev, data);
@@ -10197,7 +10031,7 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data)
return 0;
}
-static int tg3_get_sset_count (struct net_device *dev, int sset)
+static int tg3_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_TEST:
@@ -10209,7 +10043,7 @@ static int tg3_get_sset_count (struct net_device *dev, int sset)
}
}
-static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
+static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
switch (stringset) {
case ETH_SS_STATS:
@@ -10256,7 +10090,7 @@ static int tg3_phys_id(struct net_device *dev, u32 data)
return 0;
}
-static void tg3_get_ethtool_stats (struct net_device *dev,
+static void tg3_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *estats, u64 *tmp_stats)
{
struct tg3 *tp = netdev_priv(dev);
@@ -10362,8 +10196,7 @@ static int tg3_test_nvram(struct tg3 *tp)
for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
parity[k++] = buf8[i] & msk;
i++;
- }
- else if (i == 16) {
+ } else if (i == 16) {
int l;
u8 msk;
@@ -10461,7 +10294,7 @@ static int tg3_test_registers(struct tg3 *tp)
{ MAC_ADDR_0_HIGH, 0x0000,
0x00000000, 0x0000ffff },
{ MAC_ADDR_0_LOW, 0x0000,
- 0x00000000, 0xffffffff },
+ 0x00000000, 0xffffffff },
{ MAC_RX_MTU_SIZE, 0x0000,
0x00000000, 0x0000ffff },
{ MAC_TX_MODE, 0x0000,
@@ -10649,7 +10482,8 @@ static int tg3_test_registers(struct tg3 *tp)
out:
if (netif_msg_hw(tp))
- pr_err("Register test failed at offset %x\n", offset);
+ netdev_err(tp->dev,
+ "Register test failed at offset %x\n", offset);
tw32(offset, save_val);
return -EIO;
}
@@ -10825,9 +10659,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
MII_TG3_EXT_CTRL_LNK3_LED_MODE);
}
tw32(MAC_MODE, mac_mode);
- }
- else
+ } else {
return -EINVAL;
+ }
err = -EIO;
@@ -10909,7 +10743,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
rx_skb = tpr->rx_std_buffers[desc_idx].skb;
- map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
+ map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
for (i = 14; i < tx_len; i++) {
@@ -11083,7 +10917,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return phy_mii_ioctl(phydev, data, cmd);
}
- switch(cmd) {
+ switch (cmd) {
case SIOCGMIIPHY:
data->phy_id = tp->phy_addr;
@@ -11776,7 +11610,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
tp->tg3_flags |= TG3_FLAG_NVRAM;
if (tg3_nvram_lock(tp)) {
- netdev_warn(tp->dev, "Cannot get nvram lock, %s failed\n",
+ netdev_warn(tp->dev,
+ "Cannot get nvram lock, %s failed\n",
__func__);
return;
}
@@ -11895,7 +11730,7 @@ static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
if (ret)
break;
- page_off = offset & pagemask;
+ page_off = offset & pagemask;
size = pagesize;
if (len < size)
size = len;
@@ -11923,7 +11758,7 @@ static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
- if (tg3_nvram_exec_cmd(tp, nvram_cmd))
+ if (tg3_nvram_exec_cmd(tp, nvram_cmd))
break;
/* Issue another write enable to start the write. */
@@ -11977,7 +11812,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
memcpy(&data, buf + i, 4);
tw32(NVRAM_WRDATA, be32_to_cpu(data));
- page_off = offset % tp->nvram_pagesize;
+ page_off = offset % tp->nvram_pagesize;
phy_addr = tg3_nvram_phys_addr(tp, offset);
@@ -11985,7 +11820,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
- if ((page_off == 0) || (i == 0))
+ if (page_off == 0 || i == 0)
nvram_cmd |= NVRAM_CMD_FIRST;
if (page_off == (tp->nvram_pagesize - 4))
nvram_cmd |= NVRAM_CMD_LAST;
@@ -12028,8 +11863,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
- }
- else {
+ } else {
u32 grc_mode;
ret = tg3_nvram_lock(tp);
@@ -12049,8 +11883,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
ret = tg3_nvram_write_block_buffered(tp, offset, len,
buf);
- }
- else {
+ } else {
ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
buf);
}
@@ -12545,11 +12378,11 @@ skip_phy_reset:
return err;
}
-static void __devinit tg3_read_partno(struct tg3 *tp)
+static void __devinit tg3_read_vpd(struct tg3 *tp)
{
- unsigned char vpd_data[TG3_NVM_VPD_LEN]; /* in little-endian format */
+ u8 vpd_data[TG3_NVM_VPD_LEN];
unsigned int block_end, rosize, len;
- int i = 0;
+ int j, i = 0;
u32 magic;
if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
@@ -12598,6 +12431,32 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
if (block_end > TG3_NVM_VPD_LEN)
goto out_not_found;
+ j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
+ PCI_VPD_RO_KEYWORD_MFR_ID);
+ if (j > 0) {
+ len = pci_vpd_info_field_size(&vpd_data[j]);
+
+ j += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (j + len > block_end || len != 4 ||
+ memcmp(&vpd_data[j], "1028", 4))
+ goto partno;
+
+ j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
+ PCI_VPD_RO_KEYWORD_VENDOR0);
+ if (j < 0)
+ goto partno;
+
+ len = pci_vpd_info_field_size(&vpd_data[j]);
+
+ j += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (j + len > block_end)
+ goto partno;
+
+ memcpy(tp->fw_ver, &vpd_data[j], len);
+ strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
+ }
+
+partno:
i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
PCI_VPD_RO_KEYWORD_PARTNO);
if (i < 0)
@@ -12667,7 +12526,7 @@ static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
static void __devinit tg3_read_bc_ver(struct tg3 *tp)
{
u32 val, offset, start, ver_offset;
- int i;
+ int i, dst_off;
bool newver = false;
if (tg3_nvram_read(tp, 0xc, &offset) ||
@@ -12687,8 +12546,11 @@ static void __devinit tg3_read_bc_ver(struct tg3 *tp)
newver = true;
}
+ dst_off = strlen(tp->fw_ver);
+
if (newver) {
- if (tg3_nvram_read(tp, offset + 8, &ver_offset))
+ if (TG3_VER_SIZE - dst_off < 16 ||
+ tg3_nvram_read(tp, offset + 8, &ver_offset))
return;
offset = offset + ver_offset - start;
@@ -12697,7 +12559,7 @@ static void __devinit tg3_read_bc_ver(struct tg3 *tp)
if (tg3_nvram_read_be32(tp, offset + i, &v))
return;
- memcpy(tp->fw_ver + i, &v, sizeof(v));
+ memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
}
} else {
u32 major, minor;
@@ -12708,7 +12570,8 @@ static void __devinit tg3_read_bc_ver(struct tg3 *tp)
major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
TG3_NVM_BCVER_MAJSFT;
minor = ver_offset & TG3_NVM_BCVER_MINMSK;
- snprintf(&tp->fw_ver[0], 32, "v%d.%02d", major, minor);
+ snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
+ "v%d.%02d", major, minor);
}
}
@@ -12732,9 +12595,7 @@ static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
{
u32 offset, major, minor, build;
- tp->fw_ver[0] = 's';
- tp->fw_ver[1] = 'b';
- tp->fw_ver[2] = '\0';
+ strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
return;
@@ -12771,11 +12632,14 @@ static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
if (minor > 99 || build > 26)
return;
- snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor);
+ offset = strlen(tp->fw_ver);
+ snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
+ " v%d.%02d", major, minor);
if (build > 0) {
- tp->fw_ver[8] = 'a' + build - 1;
- tp->fw_ver[9] = '\0';
+ offset = strlen(tp->fw_ver);
+ if (offset < TG3_VER_SIZE - 1)
+ tp->fw_ver[offset] = 'a' + build - 1;
}
}
@@ -12862,12 +12726,13 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp)
static void __devinit tg3_read_fw_ver(struct tg3 *tp)
{
u32 val;
+ bool vpd_vers = false;
- if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
- tp->fw_ver[0] = 's';
- tp->fw_ver[1] = 'b';
- tp->fw_ver[2] = '\0';
+ if (tp->fw_ver[0] != 0)
+ vpd_vers = true;
+ if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
+ strcat(tp->fw_ver, "sb");
return;
}
@@ -12884,11 +12749,12 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
return;
if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
- (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
- return;
+ (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || vpd_vers)
+ goto done;
tg3_read_mgmtfw_ver(tp);
+done:
tp->fw_ver[TG3_VER_SIZE - 1] = 0;
}
@@ -12898,9 +12764,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
{
static struct pci_device_id write_reorder_chipsets[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
- PCI_DEVICE_ID_AMD_FE_GATE_700C) },
+ PCI_DEVICE_ID_AMD_FE_GATE_700C) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
- PCI_DEVICE_ID_AMD_8131_BRIDGE) },
+ PCI_DEVICE_ID_AMD_8131_BRIDGE) },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA,
PCI_DEVICE_ID_VIA_8385_0) },
{ },
@@ -13066,8 +12932,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
- }
- else {
+ } else {
struct pci_dev *bridge = NULL;
do {
@@ -13129,6 +12994,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
tp->dev->features |= NETIF_F_IPV6_CSUM;
+ tp->dev->features |= NETIF_F_GRO;
}
/* Determine TSO capabilities */
@@ -13189,8 +13055,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
- (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
- (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
+ (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
+ (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
@@ -13224,7 +13090,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
if (!tp->pcix_cap) {
- pr_err("Cannot find PCI-X capability, aborting\n");
+ dev_err(&tp->pdev->dev,
+ "Cannot find PCI-X capability, aborting\n");
return -EIO;
}
@@ -13421,7 +13288,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
/* Force the chip into D0. */
err = tg3_set_power_state(tp, PCI_D0);
if (err) {
- pr_err("(%s) transition to D0 failed\n", pci_name(tp->pdev));
+ dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
return err;
}
@@ -13595,13 +13462,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
err = tg3_phy_probe(tp);
if (err) {
- pr_err("(%s) phy probe failed, err %d\n",
- pci_name(tp->pdev), err);
+ dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
/* ... but do not return immediately ... */
tg3_mdio_fini(tp);
}
- tg3_read_partno(tp);
+ tg3_read_vpd(tp);
tg3_read_fw_ver(tp);
if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
@@ -13639,10 +13505,15 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
else
tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
- tp->rx_offset = NET_IP_ALIGN;
+ tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM;
+ tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
- (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
- tp->rx_offset = 0;
+ (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
+ tp->rx_offset -= NET_IP_ALIGN;
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ tp->rx_copy_thresh = ~(u16)0;
+#endif
+ }
tp->rx_std_max_post = TG3_RX_RING_SIZE;
@@ -13965,11 +13836,10 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm
}
pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
- if (to_device) {
+ if (to_device)
tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
- } else {
+ else
tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
- }
ret = -ENODEV;
for (i = 0; i < 40; i++) {
@@ -14105,8 +13975,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
/* Send the buffer to the chip. */
ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
if (ret) {
- pr_err("tg3_test_dma() Write the buffer failed %d\n",
- ret);
+ dev_err(&tp->pdev->dev,
+ "%s: Buffer write failed. err = %d\n",
+ __func__, ret);
break;
}
@@ -14116,8 +13987,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
u32 val;
tg3_read_mem(tp, 0x2100 + (i*4), &val);
if (le32_to_cpu(val) != p[i]) {
- pr_err(" tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n",
- val, i);
+ dev_err(&tp->pdev->dev,
+ "%s: Buffer corrupted on device! "
+ "(%d != %d)\n", __func__, val, i);
/* ret = -ENODEV here? */
}
p[i] = 0;
@@ -14126,9 +13998,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
/* Now read it back. */
ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
if (ret) {
- pr_err("tg3_test_dma() Read the buffer failed %d\n",
- ret);
-
+ dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
+ "err = %d\n", __func__, ret);
break;
}
@@ -14144,8 +14015,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
break;
} else {
- pr_err("tg3_test_dma() buffer corrupted on read back! (%d != %d)\n",
- p[i], i);
+ dev_err(&tp->pdev->dev,
+ "%s: Buffer corrupted on read back! "
+ "(%d != %d)\n", __func__, p[i], i);
ret = -ENODEV;
goto out;
}
@@ -14172,10 +14044,10 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
if (pci_dev_present(dma_wait_state_chipsets)) {
tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
- }
- else
+ } else {
/* Safe to use the calculated DMA boundary. */
tp->dma_rwctrl = saved_dma_rwctrl;
+ }
tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
}
@@ -14437,13 +14309,13 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = pci_enable_device(pdev);
if (err) {
- pr_err("Cannot enable PCI device, aborting\n");
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
return err;
}
err = pci_request_regions(pdev, DRV_MODULE_NAME);
if (err) {
- pr_err("Cannot obtain PCI resources, aborting\n");
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
goto err_out_disable_pdev;
}
@@ -14452,14 +14324,15 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
/* Find power-management capability. */
pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (pm_cap == 0) {
- pr_err("Cannot find PowerManagement capability, aborting\n");
+ dev_err(&pdev->dev,
+ "Cannot find Power Management capability, aborting\n");
err = -EIO;
goto err_out_free_res;
}
dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
if (!dev) {
- pr_err("Etherdev alloc failed, aborting\n");
+ dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
err = -ENOMEM;
goto err_out_free_res;
}
@@ -14509,7 +14382,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tp->regs = pci_ioremap_bar(pdev, BAR_0);
if (!tp->regs) {
- netdev_err(dev, "Cannot map device registers, aborting\n");
+ dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
err = -ENOMEM;
goto err_out_free_dev;
}
@@ -14525,7 +14398,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = tg3_get_invariants(tp);
if (err) {
- netdev_err(dev, "Problem fetching invariants of chip, aborting\n");
+ dev_err(&pdev->dev,
+ "Problem fetching invariants of chip, aborting\n");
goto err_out_iounmap;
}
@@ -14560,7 +14434,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = pci_set_consistent_dma_mask(pdev,
persist_dma_mask);
if (err < 0) {
- netdev_err(dev, "Unable to obtain 64 bit DMA for consistent allocations\n");
+ dev_err(&pdev->dev, "Unable to obtain 64 bit "
+ "DMA for consistent allocations\n");
goto err_out_iounmap;
}
}
@@ -14568,7 +14443,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
if (err || dma_mask == DMA_BIT_MASK(32)) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- netdev_err(dev, "No usable DMA configuration, aborting\n");
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
goto err_out_iounmap;
}
}
@@ -14617,14 +14493,16 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = tg3_get_device_address(tp);
if (err) {
- netdev_err(dev, "Could not obtain valid ethernet address, aborting\n");
+ dev_err(&pdev->dev,
+ "Could not obtain valid ethernet address, aborting\n");
goto err_out_iounmap;
}
if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
if (!tp->aperegs) {
- netdev_err(dev, "Cannot map APE registers, aborting\n");
+ dev_err(&pdev->dev,
+ "Cannot map APE registers, aborting\n");
err = -ENOMEM;
goto err_out_iounmap;
}
@@ -14648,7 +14526,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = tg3_test_dma(tp);
if (err) {
- netdev_err(dev, "DMA engine test failed, aborting\n");
+ dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
goto err_out_apeunmap;
}
@@ -14709,7 +14587,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = register_netdev(dev);
if (err) {
- netdev_err(dev, "Cannot register net device, aborting\n");
+ dev_err(&pdev->dev, "Cannot register net device, aborting\n");
goto err_out_apeunmap;
}
@@ -14722,11 +14600,12 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
struct phy_device *phydev;
phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
- netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
+ netdev_info(dev,
+ "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
phydev->drv->name, dev_name(&phydev->dev));
} else
- netdev_info(dev, "attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
- tg3_phy_string(tp),
+ netdev_info(dev, "attached PHY is %s (%s Ethernet) "
+ "(WireSpeed[%d])\n", tg3_phy_string(tp),
((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
"10/100/1000Base-T")),
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 574a1cc4d35..ce9c4918c31 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -23,11 +23,8 @@
#define TG3_BDINFO_NIC_ADDR 0xcUL /* 32-bit */
#define TG3_BDINFO_SIZE 0x10UL
-#define RX_COPY_THRESHOLD 256
-
#define TG3_RX_INTERNAL_RING_SZ_5906 32
-#define RX_STD_MAX_SIZE 1536
#define RX_STD_MAX_SIZE_5705 512
#define RX_JUMBO_MAX_SIZE 0xdeadbeef /* XXX */
@@ -183,6 +180,7 @@
#define METAL_REV_B2 0x02
#define TG3PCI_DMA_RW_CTRL 0x0000006c
#define DMA_RWCTRL_DIS_CACHE_ALIGNMENT 0x00000001
+#define DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK 0x00000380
#define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700
#define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000
#define DMA_RWCTRL_READ_BNDRY_16 0x00000100
@@ -252,7 +250,7 @@
/* 0x94 --> 0x98 unused */
#define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */
#define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */
-/* 0xa0 --> 0xb8 unused */
+/* 0xa8 --> 0xb8 unused */
#define TG3PCI_DUAL_MAC_CTRL 0x000000b8
#define DUAL_MAC_CTRL_CH_MASK 0x00000003
#define DUAL_MAC_CTRL_ID 0x00000004
@@ -1854,6 +1852,8 @@
#define TG3_PCIE_TLDLPL_PORT 0x00007c00
#define TG3_PCIE_PL_LO_PHYCTL1 0x00000004
#define TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN 0x00001000
+#define TG3_PCIE_PL_LO_PHYCTL5 0x00000014
+#define TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ 0x80000000
/* OTP bit definitions */
#define TG3_OTP_AGCTGT_MASK 0x000000e0
@@ -2082,7 +2082,7 @@
#define MII_TG3_DSP_AADJ1CH0 0x001f
#define MII_TG3_DSP_AADJ1CH3 0x601f
#define MII_TG3_DSP_AADJ1CH3_ADCCKADJ 0x0002
-#define MII_TG3_DSP_EXP8 0x0708
+#define MII_TG3_DSP_EXP8 0x0f08
#define MII_TG3_DSP_EXP8_REJ2MHz 0x0001
#define MII_TG3_DSP_EXP8_AEDW 0x0200
#define MII_TG3_DSP_EXP75 0x0f75
@@ -2512,7 +2512,7 @@ struct tg3_hw_stats {
*/
struct ring_info {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
struct tg3_config_info {
@@ -2561,7 +2561,7 @@ struct tg3_bufmgr_config {
struct tg3_ethtool_stats {
/* Statistics maintained by Receive MAC. */
- u64 rx_octets;
+ u64 rx_octets;
u64 rx_fragments;
u64 rx_ucast_packets;
u64 rx_mcast_packets;
@@ -2751,9 +2751,11 @@ struct tg3 {
struct tg3_napi napi[TG3_IRQ_MAX_VECS];
void (*write32_rx_mbox) (struct tg3 *, u32,
u32);
+ u32 rx_copy_thresh;
u32 rx_pending;
u32 rx_jumbo_pending;
u32 rx_std_max_post;
+ u32 rx_offset;
u32 rx_pkt_map_sz;
#if TG3_VLAN_TAG_USED
struct vlan_group *vlgrp;
@@ -2773,7 +2775,6 @@ struct tg3 {
unsigned long last_event_jiffies;
};
- u32 rx_offset;
u32 tg3_flags;
#define TG3_FLAG_TAGGED_STATUS 0x00000001
#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index 390540c101c..ccee3eddc5f 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -1034,7 +1034,7 @@ static void TLan_tx_timeout(struct net_device *dev)
TLan_ResetLists( dev );
TLan_ReadAndClearStats( dev, TLAN_IGNORE );
TLan_ResetAdapter( dev );
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue( dev );
}
@@ -1147,7 +1147,6 @@ static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
CIRC_INC( priv->txTail, TLAN_NUM_TX_LISTS );
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
} /* TLan_StartTx */
@@ -1314,7 +1313,7 @@ static struct net_device_stats *TLan_GetStats( struct net_device *dev )
static void TLan_SetMulticastList( struct net_device *dev )
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
u32 hash1 = 0;
u32 hash2 = 0;
int i;
@@ -1336,12 +1335,12 @@ static void TLan_SetMulticastList( struct net_device *dev )
TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF );
} else {
i = 0;
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if ( i < 3 ) {
TLan_SetMac( dev, i + 1,
- (char *) &dmi->dmi_addr );
+ (char *) &ha->addr);
} else {
- offset = TLan_HashFunc( (u8 *) &dmi->dmi_addr );
+ offset = TLan_HashFunc((u8 *)&ha->addr);
if ( offset < 32 )
hash1 |= ( 1 << offset );
else
@@ -2464,7 +2463,7 @@ static void TLan_PhyPrint( struct net_device *dev )
printk( "TLAN: Device %s, Unmanaged PHY.\n", dev->name );
} else if ( phy <= TLAN_PHY_MAX_ADDR ) {
printk( "TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy );
- printk( "TLAN: Off. +0 +1 +2 +3 \n" );
+ printk( "TLAN: Off. +0 +1 +2 +3\n" );
for ( i = 0; i < 0x20; i+= 4 ) {
printk( "TLAN: 0x%02x", i );
TLan_MiiReadReg( dev, phy, i, &data0 );
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 7d7f3eef1ab..10800f16a23 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -77,7 +77,7 @@ static char version[] __devinitdata =
#define FW_NAME "3com/3C359.bin"
MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
-MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver \n") ;
+MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver\n") ;
MODULE_FIRMWARE(FW_NAME);
/* Module parameters */
@@ -163,19 +163,19 @@ static void print_tx_state(struct net_device *dev)
u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
int i ;
- printk("tx_ring_head: %d, tx_ring_tail: %d, free_ent: %d \n",xl_priv->tx_ring_head,
+ printk("tx_ring_head: %d, tx_ring_tail: %d, free_ent: %d\n",xl_priv->tx_ring_head,
xl_priv->tx_ring_tail, xl_priv->free_ring_entries) ;
- printk("Ring , Address , FSH , DnNextPtr, Buffer, Buffer_Len \n");
+ printk("Ring , Address , FSH , DnNextPtr, Buffer, Buffer_Len\n");
for (i = 0; i < 16; i++) {
txd = &(xl_priv->xl_tx_ring[i]) ;
- printk("%d, %08lx, %08x, %08x, %08x, %08x \n", i, virt_to_bus(txd),
+ printk("%d, %08lx, %08x, %08x, %08x, %08x\n", i, virt_to_bus(txd),
txd->framestartheader, txd->dnnextptr, txd->buffer, txd->buffer_length ) ;
}
- printk("DNLISTPTR = %04x \n", readl(xl_mmio + MMIO_DNLISTPTR) );
+ printk("DNLISTPTR = %04x\n", readl(xl_mmio + MMIO_DNLISTPTR) );
- printk("DmaCtl = %04x \n", readl(xl_mmio + MMIO_DMA_CTRL) );
- printk("Queue status = %0x \n",netif_running(dev) ) ;
+ printk("DmaCtl = %04x\n", readl(xl_mmio + MMIO_DMA_CTRL) );
+ printk("Queue status = %0x\n",netif_running(dev) ) ;
}
static void print_rx_state(struct net_device *dev)
@@ -186,19 +186,19 @@ static void print_rx_state(struct net_device *dev)
u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
int i ;
- printk("rx_ring_tail: %d \n", xl_priv->rx_ring_tail) ;
- printk("Ring , Address , FrameState , UPNextPtr, FragAddr, Frag_Len \n");
+ printk("rx_ring_tail: %d\n", xl_priv->rx_ring_tail);
+ printk("Ring , Address , FrameState , UPNextPtr, FragAddr, Frag_Len\n");
for (i = 0; i < 16; i++) {
/* rxd = (struct xl_rx_desc *)xl_priv->rx_ring_dma_addr + (i * sizeof(struct xl_rx_desc)) ; */
rxd = &(xl_priv->xl_rx_ring[i]) ;
- printk("%d, %08lx, %08x, %08x, %08x, %08x \n", i, virt_to_bus(rxd),
+ printk("%d, %08lx, %08x, %08x, %08x, %08x\n", i, virt_to_bus(rxd),
rxd->framestatus, rxd->upnextptr, rxd->upfragaddr, rxd->upfraglen ) ;
}
- printk("UPLISTPTR = %04x \n", readl(xl_mmio + MMIO_UPLISTPTR) );
+ printk("UPLISTPTR = %04x\n", readl(xl_mmio + MMIO_UPLISTPTR));
- printk("DmaCtl = %04x \n", readl(xl_mmio + MMIO_DMA_CTRL) );
- printk("Queue status = %0x \n",netif_running(dev) ) ;
+ printk("DmaCtl = %04x\n", readl(xl_mmio + MMIO_DMA_CTRL));
+ printk("Queue status = %0x\n",netif_running(dev));
}
#endif
@@ -391,7 +391,7 @@ static int __devinit xl_init(struct net_device *dev)
struct xl_private *xl_priv = netdev_priv(dev);
int err;
- printk(KERN_INFO "%s \n", version);
+ printk(KERN_INFO "%s\n", version);
printk(KERN_INFO "%s: I/O at %hx, MMIO at %p, using irq %d\n",
xl_priv->xl_card_name, (unsigned int)dev->base_addr ,xl_priv->xl_mmio, dev->irq);
@@ -463,7 +463,7 @@ static int xl_hw_reset(struct net_device *dev)
writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD);
#if XL_DEBUG
- printk(KERN_INFO "Read from PMBAR = %04x \n", readw(xl_mmio + MMIO_MACDATA)) ;
+ printk(KERN_INFO "Read from PMBAR = %04x\n", readw(xl_mmio + MMIO_MACDATA));
#endif
if ( readw( (xl_mmio + MMIO_MACDATA)) & PMB_CPHOLD ) {
@@ -591,9 +591,9 @@ static int xl_hw_reset(struct net_device *dev)
#if XL_DEBUG
writel(IO_WORD_READ | SWITCHSETTINGS, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
if ( readw(xl_mmio + MMIO_MACDATA) & 2) {
- printk(KERN_INFO "Default ring speed 4 mbps \n") ;
+ printk(KERN_INFO "Default ring speed 4 mbps\n");
} else {
- printk(KERN_INFO "Default ring speed 16 mbps \n") ;
+ printk(KERN_INFO "Default ring speed 16 mbps\n");
}
printk(KERN_INFO "%s: xl_priv->srb = %04x\n",xl_priv->xl_card_name, xl_priv->srb);
#endif
@@ -651,7 +651,7 @@ static int xl_open(struct net_device *dev)
if (open_err != 0) { /* Something went wrong with the open command */
if (open_err & 0x07) { /* Wrong speed, retry at different speed */
- printk(KERN_WARNING "%s: Open Error, retrying at different ringspeed \n", dev->name) ;
+ printk(KERN_WARNING "%s: Open Error, retrying at different ringspeed\n", dev->name);
switchsettings = switchsettings ^ 2 ;
xl_ee_write(dev,0x08,switchsettings) ;
xl_hw_reset(dev) ;
@@ -703,7 +703,7 @@ static int xl_open(struct net_device *dev)
}
if (i==0) {
- printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled \n",dev->name) ;
+ printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name);
free_irq(dev->irq,dev) ;
kfree(xl_priv->xl_tx_ring);
kfree(xl_priv->xl_rx_ring);
@@ -853,7 +853,7 @@ static int xl_open_hw(struct net_device *dev)
writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 12, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
xl_priv->arb = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
- printk(", ARB: %04x \n",xl_priv->arb ) ;
+ printk(", ARB: %04x\n",xl_priv->arb );
writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 14, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
vsoff = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
@@ -867,7 +867,7 @@ static int xl_open_hw(struct net_device *dev)
ver_str[i] = readb(xl_mmio + MMIO_MACDATA) ;
}
ver_str[i] = '\0' ;
- printk(KERN_INFO "%s: Microcode version String: %s \n",dev->name,ver_str);
+ printk(KERN_INFO "%s: Microcode version String: %s\n",dev->name,ver_str);
}
/*
@@ -991,7 +991,7 @@ static void xl_rx(struct net_device *dev)
skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ;
if (skb==NULL) { /* Still need to fix the rx ring */
- printk(KERN_WARNING "%s: dev_alloc_skb failed in rx, single buffer \n",dev->name) ;
+ printk(KERN_WARNING "%s: dev_alloc_skb failed in rx, single buffer\n",dev->name);
adv_rx_ring(dev) ;
dev->stats.rx_dropped++ ;
writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ;
@@ -1092,7 +1092,7 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
*/
if (intstatus == 0x0001) {
writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
- printk(KERN_INFO "%s: 00001 int received \n",dev->name) ;
+ printk(KERN_INFO "%s: 00001 int received\n",dev->name);
} else {
if (intstatus & (HOSTERRINT | SRBRINT | ARBCINT | UPCOMPINT | DNCOMPINT | HARDERRINT | (1<<8) | TXUNDERRUN | ASBFINT)) {
@@ -1103,9 +1103,9 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
*/
if (intstatus & HOSTERRINT) {
- printk(KERN_WARNING "%s: Host Error, performing global reset, intstatus = %04x \n",dev->name,intstatus) ;
+ printk(KERN_WARNING "%s: Host Error, performing global reset, intstatus = %04x\n",dev->name,intstatus);
writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ;
- printk(KERN_WARNING "%s: Resetting hardware: \n", dev->name);
+ printk(KERN_WARNING "%s: Resetting hardware:\n", dev->name);
netif_stop_queue(dev) ;
xl_freemem(dev) ;
free_irq(dev->irq,dev);
@@ -1128,7 +1128,7 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
Must put a timeout check here ! */
/* Empty Loop */
}
- printk(KERN_WARNING "%s: TX Underrun received \n",dev->name) ;
+ printk(KERN_WARNING "%s: TX Underrun received\n",dev->name);
writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
} /* TxUnderRun */
@@ -1157,13 +1157,13 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
macstatus = readw(xl_mmio + MMIO_MACDATA) ;
printk(KERN_WARNING "%s: MacStatusError, details: ", dev->name);
if (macstatus & (1<<14))
- printk(KERN_WARNING "tchk error: Unrecoverable error \n") ;
+ printk(KERN_WARNING "tchk error: Unrecoverable error\n");
if (macstatus & (1<<3))
- printk(KERN_WARNING "eint error: Internal watchdog timer expired \n") ;
+ printk(KERN_WARNING "eint error: Internal watchdog timer expired\n");
if (macstatus & (1<<2))
- printk(KERN_WARNING "aint error: Host tried to perform invalid operation \n") ;
+ printk(KERN_WARNING "aint error: Host tried to perform invalid operation\n");
printk(KERN_WARNING "Instatus = %02x, macstatus = %02x\n",intstatus,macstatus) ;
- printk(KERN_WARNING "%s: Resetting hardware: \n", dev->name);
+ printk(KERN_WARNING "%s: Resetting hardware:\n", dev->name);
netif_stop_queue(dev) ;
xl_freemem(dev) ;
free_irq(dev->irq,dev);
@@ -1175,7 +1175,7 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
} else {
- printk(KERN_WARNING "%s: Received Unknown interrupt : %04x \n", dev->name, intstatus) ;
+ printk(KERN_WARNING "%s: Received Unknown interrupt : %04x\n", dev->name, intstatus);
writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
}
}
@@ -1350,11 +1350,11 @@ static int xl_close(struct net_device *dev)
writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD);
if (readb(xl_mmio + MMIO_MACDATA) != CLOSE_NIC) {
- printk(KERN_INFO "%s: CLOSE_NIC did not get a CLOSE_NIC response \n",dev->name) ;
+ printk(KERN_INFO "%s: CLOSE_NIC did not get a CLOSE_NIC response\n",dev->name);
} else {
writel((MEM_BYTE_READ | 0xd0000 | xl_priv->srb) +2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
if (readb(xl_mmio + MMIO_MACDATA)==0) {
- printk(KERN_INFO "%s: Adapter has been closed \n",dev->name) ;
+ printk(KERN_INFO "%s: Adapter has been closed\n",dev->name);
writew(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
xl_freemem(dev) ;
@@ -1391,7 +1391,7 @@ static int xl_close(struct net_device *dev)
static void xl_set_rx_mode(struct net_device *dev)
{
struct xl_private *xl_priv = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned char dev_mc_address[4] ;
u16 options ;
@@ -1408,11 +1408,11 @@ static void xl_set_rx_mode(struct net_device *dev)
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- netdev_for_each_mc_addr(dmi, dev) {
- dev_mc_address[0] |= dmi->dmi_addr[2] ;
- dev_mc_address[1] |= dmi->dmi_addr[3] ;
- dev_mc_address[2] |= dmi->dmi_addr[4] ;
- dev_mc_address[3] |= dmi->dmi_addr[5] ;
+ netdev_for_each_mc_addr(ha, dev) {
+ dev_mc_address[0] |= ha->addr[2];
+ dev_mc_address[1] |= ha->addr[3];
+ dev_mc_address[2] |= ha->addr[4];
+ dev_mc_address[3] |= ha->addr[5];
}
if (memcmp(xl_priv->xl_functional_addr,dev_mc_address,4) != 0) { /* Options have changed, run the command */
@@ -1447,11 +1447,11 @@ static void xl_srb_bh(struct net_device *dev)
printk(KERN_INFO "%s: Command: %d - Invalid Command code\n",dev->name,srb_cmd) ;
break ;
case 4:
- printk(KERN_INFO "%s: Command: %d - Adapter is closed, must be open for this command \n",dev->name,srb_cmd) ;
+ printk(KERN_INFO "%s: Command: %d - Adapter is closed, must be open for this command\n",dev->name,srb_cmd);
break ;
case 6:
- printk(KERN_INFO "%s: Command: %d - Options Invalid for command \n",dev->name,srb_cmd) ;
+ printk(KERN_INFO "%s: Command: %d - Options Invalid for command\n",dev->name,srb_cmd);
break ;
case 0: /* Successful command execution */
@@ -1472,11 +1472,11 @@ static void xl_srb_bh(struct net_device *dev)
break ;
case SET_FUNC_ADDRESS:
if(xl_priv->xl_message_level)
- printk(KERN_INFO "%s: Functional Address Set \n",dev->name) ;
+ printk(KERN_INFO "%s: Functional Address Set\n",dev->name);
break ;
case CLOSE_NIC:
if(xl_priv->xl_message_level)
- printk(KERN_INFO "%s: Received CLOSE_NIC interrupt in interrupt handler \n",dev->name) ;
+ printk(KERN_INFO "%s: Received CLOSE_NIC interrupt in interrupt handler\n",dev->name);
break ;
case SET_MULTICAST_MODE:
if(xl_priv->xl_message_level)
@@ -1485,9 +1485,9 @@ static void xl_srb_bh(struct net_device *dev)
case SET_RECEIVE_MODE:
if(xl_priv->xl_message_level) {
if (xl_priv->xl_copy_all_options == 0x0004)
- printk(KERN_INFO "%s: Entering promiscuous mode \n", dev->name) ;
+ printk(KERN_INFO "%s: Entering promiscuous mode\n", dev->name);
else
- printk(KERN_INFO "%s: Entering normal receive mode \n",dev->name) ;
+ printk(KERN_INFO "%s: Entering normal receive mode\n",dev->name);
}
break ;
@@ -1557,20 +1557,20 @@ static void xl_arb_cmd(struct net_device *dev)
xl_freemem(dev) ;
free_irq(dev->irq,dev);
- printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name) ;
+ printk(KERN_WARNING "%s: Adapter has been closed\n", dev->name);
} /* If serious error */
if (xl_priv->xl_message_level) {
if (lan_status_diff & LSC_SIG_LOSS)
- printk(KERN_WARNING "%s: No receive signal detected \n", dev->name) ;
+ printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
if (lan_status_diff & LSC_HARD_ERR)
- printk(KERN_INFO "%s: Beaconing \n",dev->name);
+ printk(KERN_INFO "%s: Beaconing\n",dev->name);
if (lan_status_diff & LSC_SOFT_ERR)
- printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n",dev->name);
+ printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
if (lan_status_diff & LSC_SS)
- printk(KERN_INFO "%s: Single Station on the ring \n", dev->name);
+ printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
if (lan_status_diff & LSC_RING_REC)
printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
if (lan_status_diff & LSC_FDX_MODE)
@@ -1579,7 +1579,7 @@ static void xl_arb_cmd(struct net_device *dev)
if (lan_status_diff & LSC_CO) {
if (xl_priv->xl_message_level)
- printk(KERN_INFO "%s: Counter Overflow \n", dev->name);
+ printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
/* Issue READ.LOG command */
xl_srb_cmd(dev, READ_LOG) ;
}
@@ -1595,7 +1595,7 @@ static void xl_arb_cmd(struct net_device *dev)
} /* Lan.change.status */
else if ( arb_cmd == RECEIVE_DATA) { /* Received.Data */
#if XL_DEBUG
- printk(KERN_INFO "Received.Data \n") ;
+ printk(KERN_INFO "Received.Data\n");
#endif
writel( ((MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
xl_priv->mac_buffer = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
@@ -1630,7 +1630,7 @@ static void xl_arb_cmd(struct net_device *dev)
xl_asb_cmd(dev) ;
} else {
- printk(KERN_WARNING "%s: Received unknown arb (xl_priv) command: %02x \n",dev->name,arb_cmd) ;
+ printk(KERN_WARNING "%s: Received unknown arb (xl_priv) command: %02x\n",dev->name,arb_cmd);
}
/* Acknowledge the arb interrupt */
@@ -1687,13 +1687,13 @@ static void xl_asb_bh(struct net_device *dev)
ret_code = readb(xl_mmio + MMIO_MACDATA) ;
switch (ret_code) {
case 0x01:
- printk(KERN_INFO "%s: ASB Command, unrecognized command code \n",dev->name) ;
+ printk(KERN_INFO "%s: ASB Command, unrecognized command code\n",dev->name);
break ;
case 0x26:
- printk(KERN_INFO "%s: ASB Command, unexpected receive buffer \n", dev->name) ;
+ printk(KERN_INFO "%s: ASB Command, unexpected receive buffer\n", dev->name);
break ;
case 0x40:
- printk(KERN_INFO "%s: ASB Command, Invalid Station ID \n", dev->name) ;
+ printk(KERN_INFO "%s: ASB Command, Invalid Station ID\n", dev->name);
break ;
}
xl_priv->asb_queued = 0 ;
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 1a0967246e2..91e6c78271a 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -986,7 +986,7 @@ static void open_sap(unsigned char type, struct net_device *dev)
static void tok_set_multicast_list(struct net_device *dev)
{
struct tok_info *ti = netdev_priv(dev);
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
unsigned char address[4];
int i;
@@ -995,11 +995,11 @@ static void tok_set_multicast_list(struct net_device *dev)
/*BMS ifconfig tr down or hot unplug a PCMCIA card ??hownowbrowncow*/
if (/*BMSHELPdev->start == 0 ||*/ ti->open_status != OPEN) return;
address[0] = address[1] = address[2] = address[3] = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- address[0] |= mclist->dmi_addr[2];
- address[1] |= mclist->dmi_addr[3];
- address[2] |= mclist->dmi_addr[4];
- address[3] |= mclist->dmi_addr[5];
+ netdev_for_each_mc_addr(ha, dev) {
+ address[0] |= ha->addr[2];
+ address[1] |= ha->addr[3];
+ address[2] |= ha->addr[4];
+ address[3] |= ha->addr[5];
}
SET_PAGE(ti->srb_page);
for (i = 0; i < sizeof(struct srb_set_funct_addr); i++)
@@ -1041,7 +1041,6 @@ static netdev_tx_t tok_send_packet(struct sk_buff *skb,
writew(ti->exsap_station_id, ti->srb + STATION_ID_OFST);
writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
spin_unlock_irqrestore(&(ti->lock), flags);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 7a5fbf5a9d7..5bd14070453 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -358,7 +358,7 @@ static int __devinit streamer_init_one(struct pci_dev *pdev,
pcr |= PCI_COMMAND_SERR;
pci_write_config_word (pdev, PCI_COMMAND, pcr);
- printk("%s \n", version);
+ printk("%s\n", version);
printk("%s: %s. I/O at %hx, MMIO at %p, using irq %d\n",dev->name,
streamer_priv->streamer_card_name,
(unsigned int) dev->base_addr,
@@ -651,7 +651,7 @@ static int streamer_open(struct net_device *dev)
#if STREAMER_DEBUG
writew(readw(streamer_mmio + LAPWWO),
streamer_mmio + LAPA);
- printk("srb open request: \n");
+ printk("srb open request:\n");
for (i = 0; i < 16; i++) {
printk("%x:", ntohs(readw(streamer_mmio + LAPDINC)));
}
@@ -701,7 +701,7 @@ static int streamer_open(struct net_device *dev)
if (srb_word != 0) {
if (srb_word == 0x07) {
if (!streamer_priv->streamer_ring_speed && open_finished) { /* Autosense , first time around */
- printk(KERN_WARNING "%s: Retrying at different ring speed \n",
+ printk(KERN_WARNING "%s: Retrying at different ring speed\n",
dev->name);
open_finished = 0;
} else {
@@ -717,7 +717,7 @@ static int streamer_open(struct net_device *dev)
((error_code & 0x0f) == 0x0d))
{
printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n", dev->name);
- printk(KERN_WARNING "%s: Please try again with a specified ring speed \n", dev->name);
+ printk(KERN_WARNING "%s: Please try again with a specified ring speed\n", dev->name);
free_irq(dev->irq, dev);
return -EIO;
}
@@ -923,7 +923,7 @@ static void streamer_rx(struct net_device *dev)
if (rx_desc->status & 0x7E830000) { /* errors */
if (streamer_priv->streamer_message_level) {
- printk(KERN_WARNING "%s: Rx Error %x \n",
+ printk(KERN_WARNING "%s: Rx Error %x\n",
dev->name, rx_desc->status);
}
} else { /* received without errors */
@@ -936,7 +936,7 @@ static void streamer_rx(struct net_device *dev)
if (skb == NULL)
{
- printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n", dev->name);
+ printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers.\n", dev->name);
dev->stats.rx_dropped++;
} else { /* we allocated an skb OK */
if (buffer_cnt == 1) {
@@ -1267,7 +1267,7 @@ static void streamer_set_rx_mode(struct net_device *dev)
netdev_priv(dev);
__u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
__u8 options = 0;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned char dev_mc_address[5];
writel(streamer_priv->srb, streamer_mmio + LAPA);
@@ -1303,11 +1303,11 @@ static void streamer_set_rx_mode(struct net_device *dev)
writel(streamer_priv->srb,streamer_mmio+LAPA);
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- netdev_for_each_mc_addr(dmi, dev) {
- dev_mc_address[0] |= dmi->dmi_addr[2] ;
- dev_mc_address[1] |= dmi->dmi_addr[3] ;
- dev_mc_address[2] |= dmi->dmi_addr[4] ;
- dev_mc_address[3] |= dmi->dmi_addr[5] ;
+ netdev_for_each_mc_addr(ha, dev) {
+ dev_mc_address[0] |= ha->addr[2];
+ dev_mc_address[1] |= ha->addr[3];
+ dev_mc_address[2] |= ha->addr[4];
+ dev_mc_address[3] |= ha->addr[5];
}
writew(htons(SRB_SET_FUNC_ADDRESS << 8),streamer_mmio+LAPDINC);
@@ -1364,7 +1364,7 @@ static void streamer_srb_bh(struct net_device *dev)
case 0x00:
break;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name);
+ printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
@@ -1392,13 +1392,13 @@ static void streamer_srb_bh(struct net_device *dev)
case 0x00:
break;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
break;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
break;
case 0x39: /* Must deal with this if individual multicast addresses used */
- printk(KERN_INFO "%s: Group address not found \n", dev->name);
+ printk(KERN_INFO "%s: Group address not found\n", dev->name);
break;
default:
break;
@@ -1414,10 +1414,10 @@ static void streamer_srb_bh(struct net_device *dev)
switch (srb_word) {
case 0x00:
if (streamer_priv->streamer_message_level)
- printk(KERN_INFO "%s: Functional Address Mask Set \n", dev->name);
+ printk(KERN_INFO "%s: Functional Address Mask Set\n", dev->name);
break;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
break;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
@@ -1448,7 +1448,7 @@ static void streamer_srb_bh(struct net_device *dev)
}
break;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
break;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
@@ -1467,7 +1467,7 @@ static void streamer_srb_bh(struct net_device *dev)
printk(KERN_INFO "%s: Read Source Routing Counters issued\n", dev->name);
break;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
break;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
@@ -1556,7 +1556,7 @@ static void streamer_arb_cmd(struct net_device *dev)
(streamer_mmio + LAPDINC)));
}
- printk("next %04x, fs %02x, len %04x \n", next,
+ printk("next %04x, fs %02x, len %04x\n", next,
status, len);
}
#endif
@@ -1593,7 +1593,7 @@ static void streamer_arb_cmd(struct net_device *dev)
mac_frame->protocol = tr_type_trans(mac_frame, dev);
#if STREAMER_NETWORK_MONITOR
- printk(KERN_WARNING "%s: Received MAC Frame, details: \n",
+ printk(KERN_WARNING "%s: Received MAC Frame, details:\n",
dev->name);
mac_hdr = tr_hdr(mac_frame);
printk(KERN_WARNING
@@ -1669,15 +1669,15 @@ drop_frame:
/* If serious error */
if (streamer_priv->streamer_message_level) {
if (lan_status_diff & LSC_SIG_LOSS)
- printk(KERN_WARNING "%s: No receive signal detected \n", dev->name);
+ printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
if (lan_status_diff & LSC_HARD_ERR)
- printk(KERN_INFO "%s: Beaconing \n", dev->name);
+ printk(KERN_INFO "%s: Beaconing\n", dev->name);
if (lan_status_diff & LSC_SOFT_ERR)
- printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n", dev->name);
+ printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n", dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n", dev->name);
if (lan_status_diff & LSC_SS)
- printk(KERN_INFO "%s: Single Station on the ring \n", dev->name);
+ printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
if (lan_status_diff & LSC_RING_REC)
printk(KERN_INFO "%s: Ring recovery ongoing\n", dev->name);
if (lan_status_diff & LSC_FDX_MODE)
@@ -1686,7 +1686,7 @@ drop_frame:
if (lan_status_diff & LSC_CO) {
if (streamer_priv->streamer_message_level)
- printk(KERN_INFO "%s: Counter Overflow \n", dev->name);
+ printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
/* Issue READ.LOG command */
@@ -1716,7 +1716,7 @@ drop_frame:
streamer_priv->streamer_lan_status = lan_status;
} /* Lan.change.status */
else
- printk(KERN_WARNING "%s: Unknown arb command \n", dev->name);
+ printk(KERN_WARNING "%s: Unknown arb command\n", dev->name);
}
static void streamer_asb_bh(struct net_device *dev)
@@ -1747,10 +1747,10 @@ static void streamer_asb_bh(struct net_device *dev)
rc=ntohs(readw(streamer_mmio+LAPD)) >> 8;
switch (rc) {
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized command code \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized command code\n", dev->name);
break;
case 0x26:
- printk(KERN_WARNING "%s: Unrecognized buffer address \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized buffer address\n", dev->name);
break;
case 0xFF:
/* Valid response, everything should be ok again */
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
index 53f631ebb16..785ad1a2157 100644
--- a/drivers/net/tokenring/madgemc.c
+++ b/drivers/net/tokenring/madgemc.c
@@ -109,7 +109,6 @@ static void madgemc_sifwriteb(struct net_device *dev, unsigned short val, unsign
SIFWRITEB(val, reg);
madgemc_setregpage(dev, 0);
}
- return;
}
/*
@@ -140,7 +139,6 @@ static void madgemc_sifwritew(struct net_device *dev, unsigned short val, unsign
SIFWRITEW(val, reg);
madgemc_setregpage(dev, 0);
}
- return;
}
static struct net_device_ops madgemc_netdev_ops __read_mostly;
@@ -505,8 +503,6 @@ static void madgemc_setregpage(struct net_device *dev, int page)
dev->base_addr + MC_CONTROL_REG1);
}
reg1 = inb(dev->base_addr + MC_CONTROL_REG1);
-
- return;
}
/*
@@ -527,8 +523,6 @@ static void madgemc_setsifsel(struct net_device *dev, int val)
dev->base_addr + MC_CONTROL_REG0);
}
reg0 = inb(dev->base_addr + MC_CONTROL_REG0);
-
- return;
}
/*
@@ -550,8 +544,6 @@ static void madgemc_setint(struct net_device *dev, int val)
outb(reg1 | MC_CONTROL_REG1_SINTEN,
dev->base_addr + MC_CONTROL_REG1);
}
-
- return;
}
/*
@@ -594,8 +586,6 @@ static void madgemc_chipset_close(struct net_device *dev)
madgemc_setint(dev, 0);
/* unmap SIF registers */
madgemc_setsifsel(dev, 0);
-
- return;
}
/*
@@ -656,8 +646,6 @@ static void madgemc_read_rom(struct net_device *dev, struct card_info *card)
/* Restore original register values */
outb(reg0, ioaddr + MC_CONTROL_REG0);
outb(reg1, ioaddr + MC_CONTROL_REG1);
-
- return;
}
static int madgemc_open(struct net_device *dev)
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index 3a25e0434ae..3d2fbe60b46 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -302,7 +302,7 @@ static int olympic_init(struct net_device *dev)
olympic_priv=netdev_priv(dev);
olympic_mmio=olympic_priv->olympic_mmio;
- printk("%s \n", version);
+ printk("%s\n", version);
printk("%s. I/O at %hx, MMIO at %p, LAP at %p, using irq %d\n", olympic_priv->olympic_card_name, (unsigned int) dev->base_addr,olympic_priv->olympic_mmio, olympic_priv->olympic_lap, dev->irq);
writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL);
@@ -468,7 +468,7 @@ static int olympic_open(struct net_device *dev)
#if OLYMPIC_DEBUG
printk("LAPWWO: %x, LAPA: %x\n",readw(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
printk("SISR Mask = %04x\n", readl(olympic_mmio+SISR_MASK));
- printk("Before the open command \n");
+ printk("Before the open command\n");
#endif
do {
memset_io(init_srb,0,SRB_COMMAND_SIZE);
@@ -520,7 +520,7 @@ static int olympic_open(struct net_device *dev)
break;
}
if (time_after(jiffies, t + 10*HZ)) {
- printk(KERN_WARNING "%s: SRB timed out. \n",dev->name) ;
+ printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
olympic_priv->srb_queued=0;
break ;
}
@@ -549,7 +549,7 @@ static int olympic_open(struct net_device *dev)
break;
case 0x07:
if (!olympic_priv->olympic_ring_speed && open_finished) { /* Autosense , first time around */
- printk(KERN_WARNING "%s: Retrying at different ring speed \n", dev->name);
+ printk(KERN_WARNING "%s: Retrying at different ring speed\n", dev->name);
open_finished = 0 ;
continue;
}
@@ -558,7 +558,7 @@ static int olympic_open(struct net_device *dev)
if (!olympic_priv->olympic_ring_speed && ((err & 0x0f) == 0x0d)) {
printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n",dev->name);
- printk(KERN_WARNING "%s: Please try again with a specified ring speed \n",dev->name);
+ printk(KERN_WARNING "%s: Please try again with a specified ring speed\n",dev->name);
} else {
printk(KERN_WARNING "%s: %s - %s\n", dev->name,
open_maj_error[(err & 0xf0) >> 4],
@@ -759,7 +759,7 @@ static void olympic_rx(struct net_device *dev)
olympic_priv->rx_status_last_received++ ;
olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1);
#if OLYMPIC_DEBUG
- printk("rx status: %x rx len: %x \n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));
+ printk("rx status: %x rx len: %x\n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));
#endif
length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff;
buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff;
@@ -774,15 +774,15 @@ static void olympic_rx(struct net_device *dev)
if (l_status_buffercnt & 0x3B000000) {
if (olympic_priv->olympic_message_level) {
if (l_status_buffercnt & (1<<29)) /* Rx Frame Truncated */
- printk(KERN_WARNING "%s: Rx Frame Truncated \n",dev->name);
+ printk(KERN_WARNING "%s: Rx Frame Truncated\n",dev->name);
if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */
- printk(KERN_WARNING "%s: Rx Frame Receive overrun \n",dev->name);
+ printk(KERN_WARNING "%s: Rx Frame Receive overrun\n",dev->name);
if (l_status_buffercnt & (1<<27)) /* No receive buffers */
- printk(KERN_WARNING "%s: No receive buffers \n",dev->name);
+ printk(KERN_WARNING "%s: No receive buffers\n",dev->name);
if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */
- printk(KERN_WARNING "%s: Receive frame error detect \n",dev->name);
+ printk(KERN_WARNING "%s: Receive frame error detect\n",dev->name);
if (l_status_buffercnt & (1<<24)) /* Received Error Detect */
- printk(KERN_WARNING "%s: Received Error Detect \n",dev->name);
+ printk(KERN_WARNING "%s: Received Error Detect\n",dev->name);
}
olympic_priv->rx_ring_last_received += i ;
olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
@@ -796,7 +796,7 @@ static void olympic_rx(struct net_device *dev)
}
if (skb == NULL) {
- printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n",dev->name) ;
+ printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers.\n",dev->name) ;
dev->stats.rx_dropped++;
/* Update counters even though we don't transfer the frame */
olympic_priv->rx_ring_last_received += i ;
@@ -1101,7 +1101,7 @@ static int olympic_close(struct net_device *dev)
}
if (t == 0) {
- printk(KERN_WARNING "%s: SRB timed out. May not be fatal. \n",dev->name) ;
+ printk(KERN_WARNING "%s: SRB timed out. May not be fatal.\n",dev->name);
}
olympic_priv->srb_queued=0;
}
@@ -1139,7 +1139,7 @@ static void olympic_set_rx_mode(struct net_device *dev)
u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
u8 options = 0;
u8 __iomem *srb;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned char dev_mc_address[4] ;
writel(olympic_priv->srb,olympic_mmio+LAPA);
@@ -1177,11 +1177,11 @@ static void olympic_set_rx_mode(struct net_device *dev)
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- netdev_for_each_mc_addr(dmi, dev) {
- dev_mc_address[0] |= dmi->dmi_addr[2] ;
- dev_mc_address[1] |= dmi->dmi_addr[3] ;
- dev_mc_address[2] |= dmi->dmi_addr[4] ;
- dev_mc_address[3] |= dmi->dmi_addr[5] ;
+ netdev_for_each_mc_addr(ha, dev) {
+ dev_mc_address[0] |= ha->addr[2];
+ dev_mc_address[1] |= ha->addr[3];
+ dev_mc_address[2] |= ha->addr[4];
+ dev_mc_address[3] |= ha->addr[5];
}
writeb(SRB_SET_FUNC_ADDRESS,srb+0);
@@ -1239,7 +1239,7 @@ static void olympic_srb_bh(struct net_device *dev)
case 0x00:
break ;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
+ printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break ;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
@@ -1266,13 +1266,13 @@ static void olympic_srb_bh(struct net_device *dev)
case 0x00:
break ;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
+ printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break ;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
break ;
case 0x39: /* Must deal with this if individual multicast addresses used */
- printk(KERN_INFO "%s: Group address not found \n",dev->name);
+ printk(KERN_INFO "%s: Group address not found\n",dev->name);
break ;
default:
break ;
@@ -1287,10 +1287,10 @@ static void olympic_srb_bh(struct net_device *dev)
switch (readb(srb+2)) {
case 0x00:
if (olympic_priv->olympic_message_level)
- printk(KERN_INFO "%s: Functional Address Mask Set \n",dev->name) ;
+ printk(KERN_INFO "%s: Functional Address Mask Set\n",dev->name);
break ;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
+ printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break ;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
@@ -1310,7 +1310,7 @@ static void olympic_srb_bh(struct net_device *dev)
printk(KERN_INFO "%s: Read Log issued\n",dev->name) ;
break ;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
+ printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break ;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
@@ -1328,7 +1328,7 @@ static void olympic_srb_bh(struct net_device *dev)
printk(KERN_INFO "%s: Read Source Routing Counters issued\n",dev->name) ;
break ;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
+ printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break ;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
@@ -1404,7 +1404,7 @@ static void olympic_arb_cmd(struct net_device *dev)
printk("Loc %d = %02x\n",i,readb(frame_data + i));
}
- printk("next %04x, fs %02x, len %04x \n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
+ printk("next %04x, fs %02x, len %04x\n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
}
#endif
mac_frame = dev_alloc_skb(frame_len) ;
@@ -1426,7 +1426,7 @@ static void olympic_arb_cmd(struct net_device *dev)
if (olympic_priv->olympic_network_monitor) {
struct trh_hdr *mac_hdr;
- printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name);
+ printk(KERN_WARNING "%s: Received MAC Frame, details:\n",dev->name);
mac_hdr = tr_hdr(mac_frame);
printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %pM\n",
dev->name, mac_hdr->daddr);
@@ -1489,20 +1489,20 @@ drop_frame:
writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
netif_stop_queue(dev);
olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ;
- printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name) ;
+ printk(KERN_WARNING "%s: Adapter has been closed\n", dev->name);
} /* If serious error */
if (olympic_priv->olympic_message_level) {
if (lan_status_diff & LSC_SIG_LOSS)
- printk(KERN_WARNING "%s: No receive signal detected \n", dev->name) ;
+ printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
if (lan_status_diff & LSC_HARD_ERR)
- printk(KERN_INFO "%s: Beaconing \n",dev->name);
+ printk(KERN_INFO "%s: Beaconing\n",dev->name);
if (lan_status_diff & LSC_SOFT_ERR)
- printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n",dev->name);
+ printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
if (lan_status_diff & LSC_SS)
- printk(KERN_INFO "%s: Single Station on the ring \n", dev->name);
+ printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
if (lan_status_diff & LSC_RING_REC)
printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
if (lan_status_diff & LSC_FDX_MODE)
@@ -1512,7 +1512,7 @@ drop_frame:
if (lan_status_diff & LSC_CO) {
if (olympic_priv->olympic_message_level)
- printk(KERN_INFO "%s: Counter Overflow \n", dev->name);
+ printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
/* Issue READ.LOG command */
@@ -1551,7 +1551,7 @@ drop_frame:
} /* Lan.change.status */
else
- printk(KERN_WARNING "%s: Unknown arb command \n", dev->name);
+ printk(KERN_WARNING "%s: Unknown arb command\n", dev->name);
}
static void olympic_asb_bh(struct net_device *dev)
@@ -1578,10 +1578,10 @@ static void olympic_asb_bh(struct net_device *dev)
if (olympic_priv->asb_queued == 2) {
switch (readb(asb_block+2)) {
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized command code \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized command code\n", dev->name);
break ;
case 0x26:
- printk(KERN_WARNING "%s: Unrecognized buffer address \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized buffer address\n", dev->name);
break ;
case 0xFF:
/* Valid response, everything should be ok again */
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index e40560137c4..0929fff5982 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -4562,7 +4562,7 @@ static void smctr_timeout(struct net_device *dev)
* fake transmission time and go on trying. Our own timeout
* routine is in sktr_timer_chk()
*/
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -5147,8 +5147,6 @@ static void smctr_set_multicast_list(struct net_device *dev)
{
if(smctr_debug > 10)
printk(KERN_DEBUG "%s: smctr_set_multicast_list\n", dev->name);
-
- return;
}
static int smctr_set_page(struct net_device *dev, __u8 *buf)
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 8b508c92241..435ef7d5470 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -325,8 +325,6 @@ static void tms380tr_timer_end_wait(unsigned long data)
tp->Sleeping = 0;
wake_up_interruptible(&tp->wait_for_tok_int);
}
-
- return;
}
/*
@@ -460,8 +458,6 @@ static void tms380tr_init_net_local(struct net_device *dev)
tp->RplHead = &tp->Rpl[0];
tp->RplTail = &tp->Rpl[RPL_NUM-1];
tp->RplTail->Status = (RX_START_FRAME | RX_END_FRAME | RX_FRAME_IRQ);
-
- return;
}
/*
@@ -481,8 +477,6 @@ static void tms380tr_init_ipb(struct net_local *tp)
tp->ipb.DMA_Abort_Thrhld = DMA_RETRIES;
tp->ipb.SCB_Addr = 0;
tp->ipb.SSB_Addr = 0;
-
- return;
}
/*
@@ -527,8 +521,6 @@ static void tms380tr_init_opb(struct net_device *dev)
tp->ocpl.ProdIDAddr[0] = LOWORD(Addr);
tp->ocpl.ProdIDAddr[1] = HIWORD(Addr);
-
- return;
}
/*
@@ -543,8 +535,6 @@ static void tms380tr_open_adapter(struct net_device *dev)
tp->OpenCommandIssued = 1;
tms380tr_exec_cmd(dev, OC_OPEN);
-
- return;
}
/*
@@ -554,8 +544,6 @@ static void tms380tr_open_adapter(struct net_device *dev)
static void tms380tr_disable_interrupts(struct net_device *dev)
{
SIFWRITEB(0, SIFACL);
-
- return;
}
/*
@@ -565,8 +553,6 @@ static void tms380tr_disable_interrupts(struct net_device *dev)
static void tms380tr_enable_interrupts(struct net_device *dev)
{
SIFWRITEB(ACL_SINTEN, SIFACL);
-
- return;
}
/*
@@ -578,8 +564,6 @@ static void tms380tr_exec_cmd(struct net_device *dev, unsigned short Command)
tp->CMDqueue |= Command;
tms380tr_chk_outstanding_cmds(dev);
-
- return;
}
static void tms380tr_timeout(struct net_device *dev)
@@ -592,7 +576,7 @@ static void tms380tr_timeout(struct net_device *dev)
* fake transmission time and go on trying. Our own timeout
* routine is in tms380tr_timer_chk()
*/
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -712,8 +696,6 @@ static void tms380tr_chk_src_addr(unsigned char *frame, unsigned char *hw_addr)
SRBit = frame[8] & 0x80;
memcpy(&frame[8], hw_addr, 6);
frame[8] |= SRBit;
-
- return;
}
/*
@@ -743,8 +725,6 @@ static void tms380tr_timer_chk(unsigned long data)
return;
tp->ReOpenInProgress = 1;
tms380tr_open_adapter(dev);
-
- return;
}
/*
@@ -863,8 +843,6 @@ static void tms380tr_reset_interrupt(struct net_device *dev)
* and clear STS_SYSTEM_IRQ bit: enable adapter for further interrupts.
*/
tms380tr_exec_sifcmd(dev, CMD_SSB_CLEAR | CMD_CLEAR_SYSTEM_IRQ);
-
- return;
}
/*
@@ -1119,8 +1097,6 @@ static void tms380tr_cmd_status_irq(struct net_device *dev)
tp->MacStat.frequency_errors += tp->errorlogtable.Frequency_Error;
tp->MacStat.internal_errors += tp->errorlogtable.Internal_Error;
}
-
- return;
}
/*
@@ -1211,17 +1187,17 @@ static void tms380tr_set_multicast_list(struct net_device *dev)
}
else
{
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
((char *)(&tp->ocpl.FunctAddr))[0] |=
- mclist->dmi_addr[2];
+ ha->addr[2];
((char *)(&tp->ocpl.FunctAddr))[1] |=
- mclist->dmi_addr[3];
+ ha->addr[3];
((char *)(&tp->ocpl.FunctAddr))[2] |=
- mclist->dmi_addr[4];
+ ha->addr[4];
((char *)(&tp->ocpl.FunctAddr))[3] |=
- mclist->dmi_addr[5];
+ ha->addr[5];
}
}
tms380tr_exec_cmd(dev, OC_SET_FUNCT_ADDR);
@@ -1229,7 +1205,6 @@ static void tms380tr_set_multicast_list(struct net_device *dev)
tp->ocpl.OPENOptions = OpenOptions;
tms380tr_exec_cmd(dev, OC_MODIFY_OPEN_PARMS);
- return;
}
/*
@@ -1247,7 +1222,6 @@ void tms380tr_wait(unsigned long time)
#else
udelay(time);
#endif
- return;
}
/*
@@ -1266,8 +1240,6 @@ static void tms380tr_exec_sifcmd(struct net_device *dev, unsigned int WriteValue
SifStsValue = SIFREADW(SIFSTS);
} while((SifStsValue & CMD_INTERRUPT_ADAPTER) && loop_counter--);
SIFWRITEW(cmd, SIFCMD);
-
- return;
}
/*
@@ -1390,7 +1362,7 @@ static int tms380tr_bringup_diags(struct net_device *dev)
Status &= STS_MASK;
if(tms380tr_debug > 3)
- printk(KERN_DEBUG " %04X \n", Status);
+ printk(KERN_DEBUG " %04X\n", Status);
/* BUD successfully completed */
if(Status == STS_INITIALIZE)
return (1);
@@ -1700,8 +1672,6 @@ static void tms380tr_chk_outstanding_cmds(struct net_device *dev)
/* Execute SCB and generate IRQ when done. */
tms380tr_exec_sifcmd(dev, CMD_EXECUTE | CMD_SCB_REQUEST);
-
- return;
}
/*
@@ -1774,8 +1744,6 @@ static void tms380tr_ring_status_irq(struct net_device *dev)
tp->AdapterOpenFlag = 0;
tms380tr_open_adapter(dev);
}
-
- return;
}
/*
@@ -1846,7 +1814,7 @@ static void tms380tr_chk_irq(struct net_device *dev)
break;
case DMA_WRITE_ABORT:
- printk(KERN_INFO "%s: DMA write operation aborted: \n",
+ printk(KERN_INFO "%s: DMA write operation aborted:\n",
dev->name);
switch (AdapterCheckBlock[1])
{
@@ -1932,8 +1900,6 @@ static void tms380tr_chk_irq(struct net_device *dev)
/* Restart of firmware successful */
tp->AdapterOpenFlag = 1;
}
-
- return;
}
/*
@@ -1988,8 +1954,6 @@ static void tms380tr_read_ram(struct net_device *dev, unsigned char *Data,
/* Restore original values */
SIFWRITEW(old_sifadx, SIFADX);
SIFWRITEW(old_sifadr, SIFADR);
-
- return;
}
/*
@@ -2021,8 +1985,6 @@ static void tms380tr_cancel_tx_queue(struct net_local* tp)
dma_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(tpl->Skb);
}
-
- return;
}
/*
@@ -2094,7 +2056,6 @@ static void tms380tr_tx_status_irq(struct net_device *dev)
if(!tp->TplFree->NextTPLPtr->BusyFlag)
netif_wake_queue(dev);
- return;
}
/*
@@ -2255,8 +2216,6 @@ static void tms380tr_rcv_status_irq(struct net_device *dev)
/* Inform adapter about RPL valid. */
tms380tr_exec_sifcmd(dev, CMD_RX_VALID);
}
-
- return;
}
/*
@@ -2269,8 +2228,6 @@ static void tms380tr_rcv_status_irq(struct net_device *dev)
static void tms380tr_write_rpl_status(RPL *rpl, unsigned int Status)
{
rpl->Status = Status;
-
- return;
}
/*
@@ -2287,8 +2244,6 @@ static void tms380tr_update_rcv_stats(struct net_local *tp, unsigned char DataPt
/* Test functional bit */
if(DataPtr[2] & GROUP_BIT)
tp->MacStat.multicast++;
-
- return;
}
static int tms380tr_set_mac_address(struct net_device *dev, void *addr)
@@ -2318,8 +2273,6 @@ static void tms380tr_dump(unsigned char *Data, int length)
Data[j+0],Data[j+1],Data[j+2],Data[j+3],
Data[j+4],Data[j+5],Data[j+6],Data[j+7]);
}
-
- return;
}
#endif
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index 5b1fbb3c3b5..a03730bd1da 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -263,7 +263,7 @@ static inline void tsi108_write_tbi(struct tsi108_prv_data *data,
return;
udelay(10);
}
- printk(KERN_ERR "%s function time out \n", __func__);
+ printk(KERN_ERR "%s function time out\n", __func__);
}
static int mii_speed(struct mii_if_info *mii)
@@ -704,8 +704,8 @@ static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev)
if (i == 0) {
data->txring[tx].buf0 = dma_map_single(NULL, skb->data,
- skb->len - skb->data_len, DMA_TO_DEVICE);
- data->txring[tx].len = skb->len - skb->data_len;
+ skb_headlen(skb), DMA_TO_DEVICE);
+ data->txring[tx].len = skb_headlen(skb);
misc |= TSI108_TX_SOF;
} else {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
@@ -1056,7 +1056,7 @@ static void tsi108_stop_ethernet(struct net_device *dev)
return;
udelay(10);
}
- printk(KERN_ERR "%s function time out \n", __func__);
+ printk(KERN_ERR "%s function time out\n", __func__);
}
static void tsi108_reset_ether(struct tsi108_prv_data * data)
@@ -1186,15 +1186,15 @@ static void tsi108_set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
int i;
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH;
memset(data->mc_hash, 0, sizeof(data->mc_hash));
- netdev_for_each_mc_addr(mc, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
u32 hash, crc;
- crc = ether_crc(6, mc->dmi_addr);
+ crc = ether_crc(6, ha->addr);
hash = crc >> 23;
__set_bit(hash, &data->mc_hash[0]);
}
@@ -1233,7 +1233,7 @@ static void tsi108_init_phy(struct net_device *dev)
udelay(10);
}
if (i == 0)
- printk(KERN_ERR "%s function time out \n", __func__);
+ printk(KERN_ERR "%s function time out\n", __func__);
if (data->phy_type == TSI108_PHY_BCM54XX) {
tsi108_write_mii(data, 0x09, 0x0300);
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 19cafc2b418..c0e70006374 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -654,7 +654,6 @@ static netdev_tx_t de_start_xmit (struct sk_buff *skb,
/* Trigger an immediate transmit demand. */
dw32(TxPoll, NormalTxPoll);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -671,15 +670,15 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
{
struct de_private *de = netdev_priv(dev);
u16 hash_table[32];
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int i;
u16 *eaddrs;
memset(hash_table, 0, sizeof(hash_table));
set_bit_le(255, hash_table); /* Broadcast entry */
/* This should work on big-endian machines as well. */
- netdev_for_each_mc_addr(mclist, dev) {
- int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
+ netdev_for_each_mc_addr(ha, dev) {
+ int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
set_bit_le(index, hash_table);
}
@@ -700,13 +699,13 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
{
struct de_private *de = netdev_priv(dev);
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u16 *eaddrs;
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
- netdev_for_each_mc_addr(mclist, dev) {
- eaddrs = (u16 *)mclist->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ eaddrs = (u16 *) ha->addr;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 09b57193a16..75a64c88cf7 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -1337,7 +1337,7 @@ de4x5_open(struct net_device *dev)
}
lp->interrupt = UNMASK_INTERRUPTS;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
START_DE4X5;
@@ -1507,7 +1507,6 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */
lp->tx_new = (++lp->tx_new) % lp->txRingSize;
- dev->trans_start = jiffies;
if (TX_BUFFS_AVAIL) {
netif_start_queue(dev); /* Another pkt may be queued */
@@ -1884,8 +1883,6 @@ de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len)
if (lp->pktStats.bins[0] == 0) { /* Reset counters */
memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
}
-
- return;
}
/*
@@ -1937,7 +1934,7 @@ set_multicast_list(struct net_device *dev)
lp->tx_new = (++lp->tx_new) % lp->txRingSize;
outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
}
}
}
@@ -1951,7 +1948,7 @@ static void
SetMulticastFilter(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
u_long iobase = dev->base_addr;
int i, bit, byte;
u16 hashcode;
@@ -1966,8 +1963,8 @@ SetMulticastFilter(struct net_device *dev)
if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) {
omr |= OMR_PM; /* Pass all multicasts */
} else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if ((*addrs & 0x01) == 1) { /* multicast address? */
crc = ether_crc_le(ETH_ALEN, addrs);
hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */
@@ -1983,8 +1980,8 @@ SetMulticastFilter(struct net_device *dev)
}
}
} else { /* Perfect filtering */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
for (i=0; i<ETH_ALEN; i++) {
*(pa + (i&1)) = *addrs++;
if (i & 0x01) pa += 4;
@@ -1992,8 +1989,6 @@ SetMulticastFilter(struct net_device *dev)
}
}
outl(omr, DE4X5_OMR);
-
- return;
}
#ifdef CONFIG_EISA
@@ -2188,8 +2183,6 @@ srom_search(struct net_device *dev, struct pci_dev *pdev)
return;
}
}
-
- return;
}
/*
@@ -3292,8 +3285,6 @@ de4x5_init_connection(struct net_device *dev)
outl(POLL_DEMAND, DE4X5_TPD);
netif_wake_queue(dev);
-
- return;
}
/*
@@ -3665,8 +3656,6 @@ de4x5_free_rx_buffs(struct net_device *dev)
lp->rx_ring[i].status = 0;
lp->rx_skb[i] = (struct sk_buff *)1; /* Dummy entry */
}
-
- return;
}
static void
@@ -3709,8 +3698,6 @@ de4x5_save_skbs(struct net_device *dev)
lp->cache.save_cnt++;
START_DE4X5;
}
-
- return;
}
static void
@@ -3742,8 +3729,6 @@ de4x5_rst_desc_ring(struct net_device *dev)
lp->cache.save_cnt--;
START_DE4X5;
}
-
- return;
}
static void
@@ -3772,8 +3757,6 @@ de4x5_cache_state(struct net_device *dev, int flag)
}
break;
}
-
- return;
}
static void
@@ -3846,8 +3829,6 @@ de4x5_setup_intr(struct net_device *dev)
outl(sts, DE4X5_STS);
ENABLE_IRQs;
}
-
- return;
}
/*
@@ -3880,8 +3861,6 @@ reset_init_sia(struct net_device *dev, s32 csr13, s32 csr14, s32 csr15)
outl(csr13, DE4X5_SICR);
mdelay(10);
-
- return;
}
/*
@@ -3902,8 +3881,6 @@ create_packet(struct net_device *dev, char *frame, int len)
*buf++ = 0; /* Packet length (2 bytes) */
*buf++ = 1;
-
- return;
}
/*
@@ -4007,8 +3984,6 @@ DevicePresent(struct net_device *dev, u_long aprom_addr)
}
de4x5_dbg_srom((struct de4x5_srom *)&lp->srom);
}
-
- return;
}
/*
@@ -4046,8 +4021,6 @@ enet_addr_rst(u_long aprom_addr)
}
}
}
-
- return;
}
/*
@@ -4187,8 +4160,6 @@ srom_repair(struct net_device *dev, int card)
lp->useSROM = true;
break;
}
-
- return;
}
/*
@@ -4262,8 +4233,6 @@ srom_latch(u_int command, u_long addr)
sendto_srom(command, addr);
sendto_srom(command | DT_CLK, addr);
sendto_srom(command, addr);
-
- return;
}
static void
@@ -4272,8 +4241,6 @@ srom_command(u_int command, u_long addr)
srom_latch(command, addr);
srom_latch(command, addr);
srom_latch((command & 0x0000ff00) | DT_CS, addr);
-
- return;
}
static void
@@ -4288,8 +4255,6 @@ srom_address(u_int command, u_long addr, u_char offset)
udelay(1);
i = (getfrom_srom(addr) >> 3) & 0x01;
-
- return;
}
static short
@@ -4323,8 +4288,6 @@ srom_busy(u_int command, u_long addr)
}
sendto_srom(command & 0x0000ff00, addr);
-
- return;
}
*/
@@ -4333,8 +4296,6 @@ sendto_srom(u_int command, u_long addr)
{
outl(command, addr);
udelay(1);
-
- return;
}
static int
@@ -4433,8 +4394,6 @@ srom_init(struct net_device *dev)
p += ((*p & BLOCK_LEN) + 1);
}
}
-
- return;
}
/*
@@ -4463,8 +4422,6 @@ srom_exec(struct net_device *dev, u_char *p)
outl(lp->cache.csr14, DE4X5_STRR);
outl(lp->cache.csr13, DE4X5_SICR);
}
-
- return;
}
/*
@@ -4889,8 +4846,6 @@ mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr)
mii_ta(MII_STWR, ioaddr); /* Turn around time - 2 MDC */
data = mii_swap(data, 16); /* Swap data bit ordering */
mii_wdata(data, 16, ioaddr); /* Write data */
-
- return;
}
static int
@@ -4916,8 +4871,6 @@ mii_wdata(int data, int len, u_long ioaddr)
sendto_mii(MII_MWR | MII_WR, data, ioaddr);
data >>= 1;
}
-
- return;
}
static void
@@ -4930,8 +4883,6 @@ mii_address(u_char addr, u_long ioaddr)
sendto_mii(MII_MWR | MII_WR, addr, ioaddr);
addr >>= 1;
}
-
- return;
}
static void
@@ -4943,8 +4894,6 @@ mii_ta(u_long rw, u_long ioaddr)
} else {
getfrom_mii(MII_MRD | MII_RD, ioaddr); /* Tri-state MDIO */
}
-
- return;
}
static int
@@ -4971,8 +4920,6 @@ sendto_mii(u32 command, int data, u_long ioaddr)
udelay(1);
outl(command | MII_MDC | j, ioaddr);
udelay(1);
-
- return;
}
static int
@@ -5077,7 +5024,7 @@ mii_get_phy(struct net_device *dev)
lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
lp->mii_cnt++;
lp->active++;
- printk("%s: Using generic MII device control. If the board doesn't operate, \nplease mail the following dump to the author:\n", dev->name);
+ printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
j = de4x5_debug;
de4x5_debug |= DEBUG_MII;
de4x5_dbg_mii(dev, k);
@@ -5186,8 +5133,6 @@ gep_wr(s32 data, struct net_device *dev)
} else if ((lp->chipset & ~0x00ff) == DC2114x) {
outl((data<<16) | lp->cache.csr15, DE4X5_SIGR);
}
-
- return;
}
static int
@@ -5247,8 +5192,6 @@ yawn(struct net_device *dev, int state)
break;
}
}
-
- return;
}
static void
@@ -5290,8 +5233,6 @@ de4x5_parse_params(struct net_device *dev)
}
*q = t;
}
-
- return;
}
static void
@@ -5337,12 +5278,10 @@ de4x5_dbg_open(struct net_device *dev)
}
}
printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf));
- printk("Ring size: \nRX: %d\nTX: %d\n",
+ printk("Ring size:\nRX: %d\nTX: %d\n",
(short)lp->rxRingSize,
(short)lp->txRingSize);
}
-
- return;
}
static void
@@ -5369,8 +5308,6 @@ de4x5_dbg_mii(struct net_device *dev, int k)
printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII));
}
}
-
- return;
}
static void
@@ -5395,8 +5332,6 @@ de4x5_dbg_media(struct net_device *dev)
}
lp->c_media = lp->media;
}
-
- return;
}
static void
@@ -5417,8 +5352,6 @@ de4x5_dbg_srom(struct de4x5_srom *p)
printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i));
}
}
-
- return;
}
static void
@@ -5440,8 +5373,6 @@ de4x5_dbg_rx(struct sk_buff *skb, int len)
printk("\n");
}
}
-
- return;
}
/*
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 9568156dea9..29e6c63d39f 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -1118,7 +1118,6 @@ static void dmfe_ethtool_get_wol(struct net_device *dev,
wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
wolinfo->wolopts = db->wol_mode;
- return;
}
@@ -1180,11 +1179,11 @@ static void dmfe_timer(unsigned long data)
/* TX polling kick monitor */
if ( db->tx_packet_cnt &&
- time_after(jiffies, dev->trans_start + DMFE_TX_KICK) ) {
+ time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) {
outl(0x1, dev->base_addr + DCR1); /* Tx polling again */
/* TX Timeout */
- if ( time_after(jiffies, dev->trans_start + DMFE_TX_TIMEOUT) ) {
+ if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) {
db->reset_TXtimeout++;
db->wait_reset = 1;
dev_warn(&dev->dev, "Tx timeout - resetting\n");
@@ -1453,7 +1452,7 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr)
static void dm9132_id_table(struct DEVICE *dev)
{
- struct dev_mc_list *mcptr;
+ struct netdev_hw_addr *ha;
u16 * addrptr;
unsigned long ioaddr = dev->base_addr+0xc0; /* ID Table */
u32 hash_val;
@@ -1477,8 +1476,8 @@ static void dm9132_id_table(struct DEVICE *dev)
hash_table[3] = 0x8000;
/* the multicast address in Hash Table : 64 bits */
- netdev_for_each_mc_addr(mcptr, dev) {
- hash_val = cal_CRC((char *) mcptr->dmi_addr, 6, 0) & 0x3f;
+ netdev_for_each_mc_addr(ha, dev) {
+ hash_val = cal_CRC((char *) ha->addr, 6, 0) & 0x3f;
hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
}
@@ -1496,7 +1495,7 @@ static void dm9132_id_table(struct DEVICE *dev)
static void send_filter_frame(struct DEVICE *dev)
{
struct dmfe_board_info *db = netdev_priv(dev);
- struct dev_mc_list *mcptr;
+ struct netdev_hw_addr *ha;
struct tx_desc *txptr;
u16 * addrptr;
u32 * suptr;
@@ -1519,8 +1518,8 @@ static void send_filter_frame(struct DEVICE *dev)
*suptr++ = 0xffff;
/* fit the multicast address */
- netdev_for_each_mc_addr(mcptr, dev) {
- addrptr = (u16 *) mcptr->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrptr = (u16 *) ha->addr;
*suptr++ = addrptr[0];
*suptr++ = addrptr[1];
*suptr++ = addrptr[2];
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c
index 68b170ae4d1..a0c770ee4b6 100644
--- a/drivers/net/tulip/media.c
+++ b/drivers/net/tulip/media.c
@@ -396,8 +396,6 @@ void tulip_select_media(struct net_device *dev, int startup)
tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0);
mdelay(1);
-
- return;
}
/*
diff --git a/drivers/net/tulip/pnic.c b/drivers/net/tulip/pnic.c
index 966efa1a27d..a63e64b6863 100644
--- a/drivers/net/tulip/pnic.c
+++ b/drivers/net/tulip/pnic.c
@@ -67,7 +67,7 @@ void pnic_lnk_change(struct net_device *dev, int csr5)
*/
if (tulip_media_cap[dev->if_port] & MediaIsMII)
return;
- if (! tp->nwayset || time_after(jiffies, dev->trans_start + 1*HZ)) {
+ if (! tp->nwayset || time_after(jiffies, dev_trans_start(dev) + 1*HZ)) {
tp->csr6 = 0x00420000 | (tp->csr6 & 0x0000fdff);
iowrite32(tp->csr6, ioaddr + CSR6);
iowrite32(0x30, ioaddr + CSR12);
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 3810db9dc2d..254643ed945 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -605,7 +605,7 @@ static void tulip_tx_timeout(struct net_device *dev)
out_unlock:
spin_unlock_irqrestore (&tp->lock, flags);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue (dev);
}
@@ -707,8 +707,6 @@ tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&tp->lock, flags);
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -991,15 +989,15 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
u16 hash_table[32];
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int i;
u16 *eaddrs;
memset(hash_table, 0, sizeof(hash_table));
set_bit_le(255, hash_table); /* Broadcast entry */
/* This should work on big-endian machines as well. */
- netdev_for_each_mc_addr(mclist, dev) {
- int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
+ netdev_for_each_mc_addr(ha, dev) {
+ int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
set_bit_le(index, hash_table);
}
@@ -1019,13 +1017,13 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u16 *eaddrs;
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
- netdev_for_each_mc_addr(mclist, dev) {
- eaddrs = (u16 *)mclist->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ eaddrs = (u16 *) ha->addr;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
@@ -1062,7 +1060,7 @@ static void set_rx_mode(struct net_device *dev)
} else if (tp->flags & MC_HASH_ONLY) {
/* Some work-alikes have only a 64-entry hash filter table. */
/* Should verify correctness on big-endian/__powerpc__ */
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
if (netdev_mc_count(dev) > 64) {
/* Arbitrary non-effective limit. */
tp->csr6 |= AcceptAllMulticast;
@@ -1070,18 +1068,21 @@ static void set_rx_mode(struct net_device *dev)
} else {
u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
int filterbit;
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (tp->flags & COMET_MAC_ADDR)
- filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
+ filterbit = ether_crc_le(ETH_ALEN,
+ ha->addr);
else
- filterbit = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ filterbit = ether_crc(ETH_ALEN,
+ ha->addr) >> 26;
filterbit &= 0x3f;
mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
if (tulip_debug > 2)
dev_info(&dev->dev,
"Added filter for %pM %08x bit %d\n",
- mclist->dmi_addr,
- ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit);
+ ha->addr,
+ ether_crc(ETH_ALEN, ha->addr),
+ filterbit);
}
if (mc_filter[0] == tp->mc_filter[0] &&
mc_filter[1] == tp->mc_filter[1])
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index a589dd34891..96de5829b94 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -1040,11 +1040,11 @@ static void uli526x_timer(unsigned long data)
/* TX polling kick monitor */
if ( db->tx_packet_cnt &&
- time_after(jiffies, dev->trans_start + ULI526X_TX_KICK) ) {
+ time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_KICK) ) {
outl(0x1, dev->base_addr + DCR1); // Tx polling again
// TX Timeout
- if ( time_after(jiffies, dev->trans_start + ULI526X_TX_TIMEOUT) ) {
+ if ( time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_TIMEOUT) ) {
db->reset_TXtimeout++;
db->wait_reset = 1;
printk( "%s: Tx timeout - resetting\n",
@@ -1393,7 +1393,7 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr)
static void send_filter_frame(struct net_device *dev, int mc_cnt)
{
struct uli526x_board_info *db = netdev_priv(dev);
- struct dev_mc_list *mcptr;
+ struct netdev_hw_addr *ha;
struct tx_desc *txptr;
u16 * addrptr;
u32 * suptr;
@@ -1416,8 +1416,8 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
*suptr++ = 0xffff << FLT_SHIFT;
/* fit the multicast address */
- netdev_for_each_mc_addr(mcptr, dev) {
- addrptr = (u16 *) mcptr->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrptr = (u16 *) ha->addr;
*suptr++ = addrptr[0] << FLT_SHIFT;
*suptr++ = addrptr[1] << FLT_SHIFT;
*suptr++ = addrptr[2] << FLT_SHIFT;
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 98dbf6cc1d6..608b279b921 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -626,7 +626,6 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
mdio_delay(mdio_addr);
}
- return;
}
@@ -969,9 +968,8 @@ static void tx_timeout(struct net_device *dev)
enable_irq(dev->irq);
netif_wake_queue(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
np->stats.tx_errors++;
- return;
}
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
@@ -1055,8 +1053,6 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
}
spin_unlock_irq(&np->lock);
- dev->trans_start = jiffies;
-
if (debug > 4) {
printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d\n",
dev->name, np->cur_tx, entry);
@@ -1366,13 +1362,15 @@ static u32 __set_rx_mode(struct net_device *dev)
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- int filterbit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
- filterbit &= 0x3f;
- mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
+ netdev_for_each_mc_addr(ha, dev) {
+ int filbit;
+
+ filbit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
+ filbit &= 0x3f;
+ mc_filter[filbit >> 5] |= 1 << (filbit & 31);
}
rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
}
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index acfeeb98056..a439e93be22 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -350,9 +350,9 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
#ifdef DEBUG
print_binary(status);
- printk("tx status 0x%08x 0x%08x \n",
+ printk("tx status 0x%08x 0x%08x\n",
card->tx_buffer[0], card->tx_buffer[4]);
- printk("rx status 0x%08x 0x%08x \n",
+ printk("rx status 0x%08x 0x%08x\n",
card->rx_buffer[0], card->rx_buffer[4]);
#endif
/* Handle shared irq and hotplug */
@@ -462,7 +462,7 @@ static int xircom_open(struct net_device *dev)
struct xircom_private *xp = netdev_priv(dev);
int retval;
enter("xircom_open");
- pr_info("xircom cardbus adaptor found, registering as %s, using irq %i \n",
+ pr_info("xircom cardbus adaptor found, registering as %s, using irq %i\n",
dev->name, dev->irq);
retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
if (retval) {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 43265207d46..6ad6fe70631 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -109,6 +109,9 @@ struct tun_struct {
struct tap_filter txflt;
struct socket socket;
+ struct socket_wq wq;
+
+ int vnet_hdr_sz;
#ifdef TUN_DEBUG
int debug;
@@ -323,7 +326,7 @@ static void tun_net_uninit(struct net_device *dev)
/* Inform the methods they need to stop using the dev.
*/
if (tfile) {
- wake_up_all(&tun->socket.wait);
+ wake_up_all(&tun->wq.wait);
if (atomic_dec_and_test(&tfile->count))
__tun_detach(tun);
}
@@ -393,12 +396,11 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
/* Enqueue packet */
skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb);
- dev->trans_start = jiffies;
/* Notify and wake up reader process */
if (tun->flags & TUN_FASYNC)
kill_fasync(&tun->fasync, SIGIO, POLL_IN);
- wake_up_interruptible_poll(&tun->socket.wait, POLLIN |
+ wake_up_interruptible_poll(&tun->wq.wait, POLLIN |
POLLRDNORM | POLLRDBAND);
return NETDEV_TX_OK;
@@ -415,7 +417,6 @@ static void tun_net_mclist(struct net_device *dev)
* _rx_ path and has nothing to do with the _tx_ path.
* In rx path we always accept everything userspace gives us.
*/
- return;
}
#define MIN_MTU 68
@@ -498,7 +499,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name);
- poll_wait(file, &tun->socket.wait, wait);
+ poll_wait(file, &tun->wq.wait, wait);
if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
@@ -525,6 +526,8 @@ static inline struct sk_buff *tun_alloc_skb(struct tun_struct *tun,
struct sk_buff *skb;
int err;
+ sock_update_classid(sk);
+
/* Under a page? Don't bother with paged skb. */
if (prepad + len < PAGE_SIZE || !linear)
linear = len;
@@ -563,7 +566,7 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
}
if (tun->flags & TUN_VNET_HDR) {
- if ((len -= sizeof(gso)) > count)
+ if ((len -= tun->vnet_hdr_sz) > count)
return -EINVAL;
if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
@@ -575,7 +578,7 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
if (gso.hdr_len > len)
return -EINVAL;
- offset += sizeof(gso);
+ offset += tun->vnet_hdr_sz;
}
if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
@@ -718,7 +721,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
if (tun->flags & TUN_VNET_HDR) {
struct virtio_net_hdr gso = { 0 }; /* no info leak */
- if ((len -= sizeof(gso)) < 0)
+ if ((len -= tun->vnet_hdr_sz) < 0)
return -EINVAL;
if (skb_is_gso(skb)) {
@@ -749,7 +752,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
sizeof(gso))))
return -EFAULT;
- total += sizeof(gso);
+ total += tun->vnet_hdr_sz;
}
len = min_t(int, skb->len, len);
@@ -773,7 +776,7 @@ static ssize_t tun_do_read(struct tun_struct *tun,
DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
- add_wait_queue(&tun->socket.wait, &wait);
+ add_wait_queue(&tun->wq.wait, &wait);
while (len) {
current->state = TASK_INTERRUPTIBLE;
@@ -804,7 +807,7 @@ static ssize_t tun_do_read(struct tun_struct *tun,
}
current->state = TASK_RUNNING;
- remove_wait_queue(&tun->socket.wait, &wait);
+ remove_wait_queue(&tun->wq.wait, &wait);
return ret;
}
@@ -861,6 +864,7 @@ static struct rtnl_link_ops tun_link_ops __read_mostly = {
static void tun_sock_write_space(struct sock *sk)
{
struct tun_struct *tun;
+ wait_queue_head_t *wqueue;
if (!sock_writeable(sk))
return;
@@ -868,8 +872,9 @@ static void tun_sock_write_space(struct sock *sk)
if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
return;
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
+ wqueue = sk_sleep(sk);
+ if (wqueue && waitqueue_active(wqueue))
+ wake_up_interruptible_sync_poll(wqueue, POLLOUT |
POLLWRNORM | POLLWRBAND);
tun = tun_sk(sk)->tun;
@@ -1033,13 +1038,15 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun->dev = dev;
tun->flags = flags;
tun->txflt.count = 0;
+ tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
err = -ENOMEM;
sk = sk_alloc(net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
if (!sk)
goto err_free_dev;
- init_waitqueue_head(&tun->socket.wait);
+ tun->socket.wq = &tun->wq;
+ init_waitqueue_head(&tun->wq.wait);
tun->socket.ops = &tun_socket_ops;
sock_init_data(&tun->socket, sk);
sk->sk_write_space = tun_sock_write_space;
@@ -1174,6 +1181,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
struct sock_fprog fprog;
struct ifreq ifr;
int sndbuf;
+ int vnet_hdr_sz;
int ret;
if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
@@ -1319,6 +1327,25 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
tun->socket.sk->sk_sndbuf = sndbuf;
break;
+ case TUNGETVNETHDRSZ:
+ vnet_hdr_sz = tun->vnet_hdr_sz;
+ if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
+ ret = -EFAULT;
+ break;
+
+ case TUNSETVNETHDRSZ:
+ if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
+ ret = -EFAULT;
+ break;
+ }
+ if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
+ ret = -EINVAL;
+ break;
+ }
+
+ tun->vnet_hdr_sz = vnet_hdr_sz;
+ break;
+
case TUNATTACHFILTER:
/* Can be set only for TAPs */
ret = -EINVAL;
@@ -1342,7 +1369,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
default:
ret = -EINVAL;
break;
- };
+ }
unlock:
rtnl_unlock();
@@ -1624,3 +1651,4 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR(DRV_COPYRIGHT);
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(TUN_MINOR);
+MODULE_ALIAS("devname:net/tun");
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 98d818daa77..22bde49262c 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -881,8 +881,6 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
wmb();
iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
- dev->trans_start = jiffies;
-
/* If we don't have room to put the worst case packet on the
* queue, then we must stop the queue. We need 2 extra
* descriptors -- one to prevent ring wrap, and one for the
@@ -920,11 +918,11 @@ typhoon_set_rx_mode(struct net_device *dev)
/* Too many to match, or accept all multicasts. */
filter |= TYPHOON_RX_FILTER_ALL_MCAST;
} else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit = ether_crc(ETH_ALEN, ha->addr) & 0x3f;
mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
}
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 1b0aef37e49..4a34833b85d 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -1999,7 +1999,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
static void ucc_geth_set_multi(struct net_device *dev)
{
struct ucc_geth_private *ugeth;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
struct ucc_fast __iomem *uf_regs;
struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
@@ -2028,16 +2028,16 @@ static void ucc_geth_set_multi(struct net_device *dev)
out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
/* Only support group multicast for now.
*/
- if (!(dmi->dmi_addr[0] & 1))
+ if (!(ha->addr[0] & 1))
continue;
/* Ask CPM to run CRC and set bit in
* filter mask.
*/
- hw_add_addr_in_hash(ugeth, dmi->dmi_addr);
+ hw_add_addr_in_hash(ugeth, ha->addr);
}
}
}
@@ -3148,8 +3148,6 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set bd status and length */
out_be32((u32 __iomem *)bd, bd_status);
- dev->trans_start = jiffies;
-
/* Move to next BD in the ring */
if (!(bd_status & T_W))
bd += sizeof(struct qe_bd);
@@ -3721,7 +3719,7 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *match)
{
struct device *device = &ofdev->dev;
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct net_device *dev = NULL;
struct ucc_geth_private *ugeth = NULL;
struct ucc_geth_info *ug_info;
@@ -3883,7 +3881,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
}
if (netif_msg_probe(&debug))
- printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n",
+ printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d)\n",
ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
ug_info->uf_info.irq);
@@ -3965,8 +3963,11 @@ static struct of_device_id ucc_geth_match[] = {
MODULE_DEVICE_TABLE(of, ucc_geth_match);
static struct of_platform_driver ucc_geth_driver = {
- .name = DRV_NAME,
- .match_table = ucc_geth_match,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = ucc_geth_match,
+ },
.probe = ucc_geth_probe,
.remove = ucc_geth_remove,
.suspend = ucc_geth_suspend,
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 35f56fc8280..1f802e90474 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -224,10 +224,9 @@ static int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
cmd, value, index, size);
if (data) {
- buf = kmalloc(size, GFP_KERNEL);
+ buf = kmemdup(data, size, GFP_KERNEL);
if (!buf)
goto out;
- memcpy(buf, data, size);
}
err = usb_control_msg(
@@ -322,8 +321,29 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/* get the packet length */
size = (u16) (header & 0x0000ffff);
- if ((skb->len) - ((size + 1) & 0xfffe) == 0)
+ if ((skb->len) - ((size + 1) & 0xfffe) == 0) {
+ u8 alignment = (unsigned long)skb->data & 0x3;
+ if (alignment != 0x2) {
+ /*
+ * not 16bit aligned so use the room provided by
+ * the 32 bit header to align the data
+ *
+ * note we want 16bit alignment as MAC header is
+ * 14bytes thus ip header will be aligned on
+ * 32bit boundary so accessing ipheader elements
+ * using a cast to struct ip header wont cause
+ * an unaligned accesses.
+ */
+ u8 realignment = (alignment + 2) & 0x3;
+ memmove(skb->data - realignment,
+ skb->data,
+ size);
+ skb->data -= realignment;
+ skb_set_tail_pointer(skb, size);
+ }
return 2;
+ }
+
if (size > ETH_FRAME_LEN) {
netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
size);
@@ -331,7 +351,18 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
}
ax_skb = skb_clone(skb, GFP_ATOMIC);
if (ax_skb) {
+ u8 alignment = (unsigned long)packet & 0x3;
ax_skb->len = size;
+
+ if (alignment != 0x2) {
+ /*
+ * not 16bit aligned use the room provided by
+ * the 32 bit header to align the data
+ */
+ u8 realignment = (alignment + 2) & 0x3;
+ memmove(packet - realignment, packet, size);
+ packet -= realignment;
+ }
ax_skb->data = packet;
skb_set_tail_pointer(ax_skb, size);
usbnet_skb_return(dev, ax_skb);
@@ -558,16 +589,14 @@ static void asix_set_multicast(struct net_device *net)
* for our 8 byte filter buffer
* to avoid allocating memory that
* is tricky to free later */
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
u32 crc_bits;
memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
/* Build the multicast hash filter. */
- netdev_for_each_mc_addr(mc_list, net) {
- crc_bits =
- ether_crc(ETH_ALEN,
- mc_list->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, net) {
+ crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
data->multi_filter[crc_bits >> 3] |=
1 << (crc_bits & 7);
}
@@ -794,16 +823,14 @@ static void ax88172_set_multicast(struct net_device *net)
* for our 8 byte filter buffer
* to avoid allocating memory that
* is tricky to free later */
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
u32 crc_bits;
memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
/* Build the multicast hash filter. */
- netdev_for_each_mc_addr(mc_list, net) {
- crc_bits =
- ether_crc(ETH_ALEN,
- mc_list->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, net) {
+ crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
data->multi_filter[crc_bits >> 3] |=
1 << (crc_bits & 7);
}
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 602e123b274..97687d33590 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -629,7 +629,7 @@ static void catc_multicast(unsigned char *addr, u8 *multicast)
static void catc_set_multicast_list(struct net_device *netdev)
{
struct catc *catc = netdev_priv(netdev);
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
u8 broadcast[6];
u8 rx = RxEnable | RxPolarity | RxMultiCast;
@@ -647,8 +647,8 @@ static void catc_set_multicast_list(struct net_device *netdev)
if (netdev->flags & IFF_ALLMULTI) {
memset(catc->multicast, 0xff, 64);
} else {
- netdev_for_each_mc_addr(mc, netdev) {
- u32 crc = ether_crc_le(6, mc->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ u32 crc = ether_crc_le(6, ha->addr);
if (!catc->is_f5u011) {
catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7);
} else {
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 3547cf13d21..b3fe0de4046 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -64,6 +64,11 @@ static int is_wireless_rndis(struct usb_interface_descriptor *desc)
#endif
+static const u8 mbm_guid[16] = {
+ 0xa3, 0x17, 0xa8, 0x8b, 0x04, 0x5e, 0x4f, 0x01,
+ 0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a,
+};
+
/*
* probes control interface, claims data interface, collects the bulk
* endpoints, activates data interface (if needed), maybe sets MTU.
@@ -79,6 +84,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
int status;
int rndis;
struct usb_driver *driver = driver_of(intf);
+ struct usb_cdc_mdlm_desc *desc = NULL;
+ struct usb_cdc_mdlm_detail_desc *detail = NULL;
if (sizeof dev->data < sizeof *info)
return -EDOM;
@@ -229,6 +236,34 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
* side link address we were given.
*/
break;
+ case USB_CDC_MDLM_TYPE:
+ if (desc) {
+ dev_dbg(&intf->dev, "extra MDLM descriptor\n");
+ goto bad_desc;
+ }
+
+ desc = (void *)buf;
+
+ if (desc->bLength != sizeof(*desc))
+ goto bad_desc;
+
+ if (memcmp(&desc->bGUID, mbm_guid, 16))
+ goto bad_desc;
+ break;
+ case USB_CDC_MDLM_DETAIL_TYPE:
+ if (detail) {
+ dev_dbg(&intf->dev, "extra MDLM detail descriptor\n");
+ goto bad_desc;
+ }
+
+ detail = (void *)buf;
+
+ if (detail->bGuidDescriptorType == 0) {
+ if (detail->bLength < (sizeof(*detail) + 1))
+ goto bad_desc;
+ } else
+ goto bad_desc;
+ break;
}
next_desc:
len -= buf [0]; /* bLength */
@@ -543,80 +578,10 @@ static const struct usb_device_id products [] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long) &cdc_info,
}, {
- /* Ericsson F3507g */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1900, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson F3507g ver. 2 */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1902, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson F3607gw */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1904, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson F3607gw ver 2 */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1905, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson F3607gw ver 3 */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1906, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson F3307 */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190a, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson F3307 ver 2 */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1909, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson C3607w */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1049, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson C3607w ver 2 */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190b, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Toshiba F3507g */
- USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Toshiba F3607gw */
- USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130c, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Toshiba F3607gw ver 2 */
- USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x1311, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Dell F3507g */
- USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8147, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Dell F3607gw */
- USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8183, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Dell F3607gw ver 2 */
- USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8184, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
+ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&mbm_info,
+
},
{ }, // END
};
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 5dfed9297b2..02b622e3b9f 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -93,10 +93,9 @@ static int dm_write(struct usbnet *dev, u8 reg, u16 length, void *data)
netdev_dbg(dev->net, "dm_write() reg=0x%02x, length=%d\n", reg, length);
if (data) {
- buf = kmalloc(length, GFP_KERNEL);
+ buf = kmemdup(data, length, GFP_KERNEL);
if (!buf)
goto out;
- memcpy(buf, data, length);
}
err = usb_control_msg(dev->udev,
@@ -387,10 +386,10 @@ static void dm9601_set_multicast(struct net_device *net)
netdev_mc_count(net) > DM_MAX_MCAST) {
rx_ctl |= 0x04;
} else if (!netdev_mc_empty(net)) {
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(mc_list, net) {
- u32 crc = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, net) {
+ u32 crc = ether_crc(ETH_ALEN, ha->addr) >> 26;
hashes[crc >> 3] |= 1 << (crc & 0x7);
}
}
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index be0cc99e881..0a3c41faea9 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -475,6 +475,9 @@ static const struct usb_device_id hso_ids[] = {
{USB_DEVICE(0x0af0, 0x8302)},
{USB_DEVICE(0x0af0, 0x8304)},
{USB_DEVICE(0x0af0, 0x8400)},
+ {USB_DEVICE(0x0af0, 0x8600)},
+ {USB_DEVICE(0x0af0, 0x8800)},
+ {USB_DEVICE(0x0af0, 0x8900)},
{USB_DEVICE(0x0af0, 0xd035)},
{USB_DEVICE(0x0af0, 0xd055)},
{USB_DEVICE(0x0af0, 0xd155)},
@@ -834,8 +837,6 @@ static netdev_tx_t hso_net_start_xmit(struct sk_buff *skb,
} else {
net->stats.tx_packets++;
net->stats.tx_bytes += skb->len;
- /* And tell the kernel when the last transmit started. */
- net->trans_start = jiffies;
}
dev_kfree_skb(skb);
/* we're done */
@@ -1474,7 +1475,6 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
spin_unlock_irqrestore(&serial->serial_lock, flags);
/* done */
- return;
}
/* how many characters in the buffer */
@@ -1994,7 +1994,6 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb)
hso_kick_transmit(serial);
D1(" ");
- return;
}
/* called for writing diag or CS serial port */
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 418825d26f9..197c352c47f 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -128,17 +128,13 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone)
if (rx_urb == NULL)
goto free_tx_urb;
- tx_buf = usb_buffer_alloc(iphone->udev,
- IPHETH_BUF_SIZE,
- GFP_KERNEL,
- &tx_urb->transfer_dma);
+ tx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE,
+ GFP_KERNEL, &tx_urb->transfer_dma);
if (tx_buf == NULL)
goto free_rx_urb;
- rx_buf = usb_buffer_alloc(iphone->udev,
- IPHETH_BUF_SIZE,
- GFP_KERNEL,
- &rx_urb->transfer_dma);
+ rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE,
+ GFP_KERNEL, &rx_urb->transfer_dma);
if (rx_buf == NULL)
goto free_tx_buf;
@@ -150,8 +146,8 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone)
return 0;
free_tx_buf:
- usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, tx_buf,
- tx_urb->transfer_dma);
+ usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, tx_buf,
+ tx_urb->transfer_dma);
free_rx_urb:
usb_free_urb(rx_urb);
free_tx_urb:
@@ -162,10 +158,10 @@ error_nomem:
static void ipheth_free_urbs(struct ipheth_device *iphone)
{
- usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf,
- iphone->rx_urb->transfer_dma);
- usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf,
- iphone->tx_urb->transfer_dma);
+ usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf,
+ iphone->rx_urb->transfer_dma);
+ usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf,
+ iphone->tx_urb->transfer_dma);
usb_free_urb(iphone->rx_urb);
usb_free_urb(iphone->tx_urb);
}
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index c4c334d9770..d6078b8c427 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -856,7 +856,6 @@ skip:
{
kaweth->stats.tx_packets++;
kaweth->stats.tx_bytes += skb->len;
- net->trans_start = jiffies;
}
spin_unlock_irq(&kaweth->device_lock);
@@ -1156,13 +1155,13 @@ err_fw:
if (!kaweth->irq_urb)
goto err_tx_and_rx;
- kaweth->intbuffer = usb_buffer_alloc( kaweth->dev,
+ kaweth->intbuffer = usb_alloc_coherent( kaweth->dev,
INTBUFFERSIZE,
GFP_KERNEL,
&kaweth->intbufferhandle);
if (!kaweth->intbuffer)
goto err_tx_and_rx_and_irq;
- kaweth->rx_buf = usb_buffer_alloc( kaweth->dev,
+ kaweth->rx_buf = usb_alloc_coherent( kaweth->dev,
KAWETH_BUF_SIZE,
GFP_KERNEL,
&kaweth->rxbufferhandle);
@@ -1203,9 +1202,9 @@ err_fw:
err_intfdata:
usb_set_intfdata(intf, NULL);
- usb_buffer_free(kaweth->dev, KAWETH_BUF_SIZE, (void *)kaweth->rx_buf, kaweth->rxbufferhandle);
+ usb_free_coherent(kaweth->dev, KAWETH_BUF_SIZE, (void *)kaweth->rx_buf, kaweth->rxbufferhandle);
err_all_but_rxbuf:
- usb_buffer_free(kaweth->dev, INTBUFFERSIZE, (void *)kaweth->intbuffer, kaweth->intbufferhandle);
+ usb_free_coherent(kaweth->dev, INTBUFFERSIZE, (void *)kaweth->intbuffer, kaweth->intbufferhandle);
err_tx_and_rx_and_irq:
usb_free_urb(kaweth->irq_urb);
err_tx_and_rx:
@@ -1242,8 +1241,8 @@ static void kaweth_disconnect(struct usb_interface *intf)
usb_free_urb(kaweth->tx_urb);
usb_free_urb(kaweth->irq_urb);
- usb_buffer_free(kaweth->dev, KAWETH_BUF_SIZE, (void *)kaweth->rx_buf, kaweth->rxbufferhandle);
- usb_buffer_free(kaweth->dev, INTBUFFERSIZE, (void *)kaweth->intbuffer, kaweth->intbufferhandle);
+ usb_free_coherent(kaweth->dev, KAWETH_BUF_SIZE, (void *)kaweth->rx_buf, kaweth->rxbufferhandle);
+ usb_free_coherent(kaweth->dev, INTBUFFERSIZE, (void *)kaweth->intbuffer, kaweth->intbufferhandle);
free_netdev(netdev);
}
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 9f24e3f871e..a6281e3987b 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -142,12 +142,10 @@ static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, const void *
int ret;
void *buffer;
- buffer = kmalloc(size, GFP_NOIO);
+ buffer = kmemdup(data, size, GFP_NOIO);
if (buffer == NULL)
return -ENOMEM;
- memcpy(buffer, data, size);
-
ret = usb_control_msg(xdev, usb_sndctrlpipe(xdev, 0), MCS7830_WR_BREQ,
MCS7830_WR_BMREQ, 0x0000, index, buffer,
size, MCS7830_CTRL_TIMEOUT);
@@ -453,12 +451,12 @@ static void mcs7830_data_set_multicast(struct net_device *net)
* for our 8 byte filter buffer
* to avoid allocating memory that
* is tricky to free later */
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
u32 crc_bits;
/* Build the multicast hash filter. */
- netdev_for_each_mc_addr(mc_list, net) {
- crc_bits = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, net) {
+ crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7);
}
}
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 41838773b56..974d17f0263 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -203,13 +203,12 @@ static int set_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
char *buffer;
DECLARE_WAITQUEUE(wait, current);
- buffer = kmalloc(size, GFP_KERNEL);
+ buffer = kmemdup(data, size, GFP_KERNEL);
if (!buffer) {
netif_warn(pegasus, drv, pegasus->net,
"out of memory in %s\n", __func__);
return -ENOMEM;
}
- memcpy(buffer, data, size);
add_wait_queue(&pegasus->ctrl_wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
@@ -255,13 +254,12 @@ static int set_register(pegasus_t * pegasus, __u16 indx, __u8 data)
char *tmp;
DECLARE_WAITQUEUE(wait, current);
- tmp = kmalloc(1, GFP_KERNEL);
+ tmp = kmemdup(&data, 1, GFP_KERNEL);
if (!tmp) {
netif_warn(pegasus, drv, pegasus->net,
"out of memory in %s\n", __func__);
return -ENOMEM;
}
- memcpy(tmp, &data, 1);
add_wait_queue(&pegasus->ctrl_wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
while (pegasus->flags & ETH_REGS_CHANGED)
@@ -808,7 +806,7 @@ static void write_bulk_callback(struct urb *urb)
break;
}
- net->trans_start = jiffies;
+ net->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(net);
}
@@ -909,7 +907,6 @@ static netdev_tx_t pegasus_start_xmit(struct sk_buff *skb,
} else {
pegasus->stats.tx_packets++;
pegasus->stats.tx_bytes += skb->len;
- net->trans_start = jiffies;
}
dev_kfree_skb(skb);
diff --git a/drivers/net/usb/pegasus.h b/drivers/net/usb/pegasus.h
index b90d8766ab7..29f5211e645 100644
--- a/drivers/net/usb/pegasus.h
+++ b/drivers/net/usb/pegasus.h
@@ -256,7 +256,7 @@ PEGASUS_DEV( "IO DATA USB ET/TX", VENDOR_IODATA, 0x0904,
DEFAULT_GPIO_RESET )
PEGASUS_DEV( "IO DATA USB ET/TX-S", VENDOR_IODATA, 0x0913,
DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "IO DATA USB ETX-US2", VENDOR_IODATA, 0x092a,
+PEGASUS_DEV( "IO DATA USB ETX-US2", VENDOR_IODATA, 0x093a,
DEFAULT_GPIO_RESET | PEGASUS_II )
PEGASUS_DEV( "Kingston KNU101TX Ethernet", VENDOR_KINGSTON, 0x000a,
DEFAULT_GPIO_RESET)
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index dd8a4adf48c..28d3ee175e7 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -104,8 +104,10 @@ static void rndis_msg_indicate(struct usbnet *dev, struct rndis_indicate *msg,
int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen)
{
struct cdc_state *info = (void *) &dev->data;
+ struct usb_cdc_notification notification;
int master_ifnum;
int retval;
+ int partial;
unsigned count;
__le32 rsp;
u32 xid = 0, msg_len, request_id;
@@ -133,13 +135,17 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen)
if (unlikely(retval < 0 || xid == 0))
return retval;
- // FIXME Seems like some devices discard responses when
- // we time out and cancel our "get response" requests...
- // so, this is fragile. Probably need to poll for status.
+ /* Some devices don't respond on the control channel until
+ * polled on the status channel, so do that first. */
+ retval = usb_interrupt_msg(
+ dev->udev,
+ usb_rcvintpipe(dev->udev, dev->status->desc.bEndpointAddress),
+ &notification, sizeof(notification), &partial,
+ RNDIS_CONTROL_TIMEOUT_MS);
+ if (unlikely(retval < 0))
+ return retval;
- /* ignore status endpoint, just poll the control channel;
- * the request probably completed immediately
- */
+ /* Poll the control channel; the request probably completed immediately */
rsp = buf->msg_type | RNDIS_MSG_COMPLETION;
for (count = 0; count < 10; count++) {
memset(buf, 0, CONTROL_BUFFER_SIZE);
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 35b98b1b79e..753ee6eb7ed 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -445,14 +445,14 @@ static void smsc75xx_set_multicast(struct net_device *netdev)
netif_dbg(dev, drv, dev->net, "receive all multicast enabled");
pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_DPF;
} else if (!netdev_mc_empty(dev->net)) {
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
pdata->rfe_ctl |= RFE_CTL_MHF | RFE_CTL_DPF;
- netdev_for_each_mc_addr(mc_list, netdev) {
- u32 bitnum = smsc75xx_hash(mc_list->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ u32 bitnum = smsc75xx_hash(ha->addr);
pdata->multicast_hash_table[bitnum / 32] |=
(1 << (bitnum % 32));
}
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 3135af63d37..12a3c88c528 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -385,13 +385,13 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
pdata->mac_cr |= MAC_CR_MCPAS_;
pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_HPFILT_);
} else if (!netdev_mc_empty(dev->net)) {
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
pdata->mac_cr |= MAC_CR_HPFILT_;
pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
- netdev_for_each_mc_addr(mc_list, netdev) {
- u32 bitnum = smsc95xx_hash(mc_list->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ u32 bitnum = smsc95xx_hash(ha->addr);
u32 mask = 0x01 << (bitnum & 0x1F);
if (bitnum & 0x20)
hash_hi |= mask;
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 7177abc78dc..a95c73de582 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1069,12 +1069,15 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
* NOTE: strictly conforming cdc-ether devices should expect
* the ZLP here, but ignore the one-byte packet.
*/
- if (!(info->flags & FLAG_SEND_ZLP) && (length % dev->maxpacket) == 0) {
- urb->transfer_buffer_length++;
- if (skb_tailroom(skb)) {
- skb->data[skb->len] = 0;
- __skb_put(skb, 1);
- }
+ if (length % dev->maxpacket == 0) {
+ if (!(info->flags & FLAG_SEND_ZLP)) {
+ urb->transfer_buffer_length++;
+ if (skb_tailroom(skb)) {
+ skb->data[skb->len] = 0;
+ __skb_put(skb, 1);
+ }
+ } else
+ urb->transfer_flags |= URB_ZERO_PACKET;
}
spin_lock_irqsave(&dev->txq.lock, flags);
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 388751aa66e..4930f9dbc49 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -1209,7 +1209,7 @@ static void rhine_reset_task(struct work_struct *work)
spin_unlock_bh(&rp->lock);
enable_irq(rp->pdev->irq);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
dev->stats.tx_errors++;
netif_wake_queue(dev);
}
@@ -1294,8 +1294,6 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
netif_stop_queue(dev);
- dev->trans_start = jiffies;
-
spin_unlock_irqrestore(&rp->lock, flags);
if (debug > 4) {
@@ -1703,11 +1701,11 @@ static void rhine_set_rx_mode(struct net_device *dev)
iowrite32(0xffffffff, ioaddr + MulticastFilter1);
rx_mode = 0x0C;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
}
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index bc278d4ee89..42dffd3e579 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -719,30 +719,30 @@ static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
u32 status = 0;
u16 ANAR;
- if (!MII_REG_BITS_IS_ON(BMSR_LNK, MII_REG_BMSR, regs))
+ if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
status |= VELOCITY_LINK_FAIL;
- if (MII_REG_BITS_IS_ON(G1000CR_1000FD, MII_REG_G1000CR, regs))
+ if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
- else if (MII_REG_BITS_IS_ON(G1000CR_1000, MII_REG_G1000CR, regs))
+ else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
status |= (VELOCITY_SPEED_1000);
else {
- velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
- if (ANAR & ANAR_TXFD)
+ velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
+ if (ANAR & ADVERTISE_100FULL)
status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
- else if (ANAR & ANAR_TX)
+ else if (ANAR & ADVERTISE_100HALF)
status |= VELOCITY_SPEED_100;
- else if (ANAR & ANAR_10FD)
+ else if (ANAR & ADVERTISE_10FULL)
status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
else
status |= (VELOCITY_SPEED_10);
}
- if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
- velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
- if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
- == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
- if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
+ if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
+ velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
+ if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
+ == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
+ if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
status |= VELOCITY_AUTONEG_ENABLE;
}
}
@@ -801,23 +801,23 @@ static void set_mii_flow_control(struct velocity_info *vptr)
/*Enable or Disable PAUSE in ANAR */
switch (vptr->options.flow_cntl) {
case FLOW_CNTL_TX:
- MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
- MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
break;
case FLOW_CNTL_RX:
- MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
- MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
break;
case FLOW_CNTL_TX_RX:
- MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
- MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
break;
case FLOW_CNTL_DISABLE:
- MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
- MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
break;
default:
break;
@@ -832,10 +832,10 @@ static void set_mii_flow_control(struct velocity_info *vptr)
*/
static void mii_set_auto_on(struct velocity_info *vptr)
{
- if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs))
- MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
+ if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
+ MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
else
- MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs);
+ MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
}
static u32 check_connection_type(struct mac_regs __iomem *regs)
@@ -860,11 +860,11 @@ static u32 check_connection_type(struct mac_regs __iomem *regs)
else
status |= VELOCITY_SPEED_100;
- if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
- velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
- if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
- == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
- if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
+ if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
+ velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
+ if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
+ == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
+ if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
status |= VELOCITY_AUTONEG_ENABLE;
}
}
@@ -905,7 +905,7 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
*/
if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
- MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
+ MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
/*
* If connection type is AUTO
@@ -915,9 +915,9 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
/* clear force MAC mode bit */
BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
/* set duplex mode of MAC according to duplex mode of MII */
- MII_REG_BITS_ON(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10, MII_REG_ANAR, vptr->mac_regs);
- MII_REG_BITS_ON(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
- MII_REG_BITS_ON(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
+ MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
/* enable AUTO-NEGO mode */
mii_set_auto_on(vptr);
@@ -952,31 +952,31 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
}
- MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
else
BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
- /* MII_REG_BITS_OFF(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); */
- velocity_mii_read(vptr->mac_regs, MII_REG_ANAR, &ANAR);
- ANAR &= (~(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10));
+ /* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */
+ velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
+ ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
if (mii_status & VELOCITY_SPEED_100) {
if (mii_status & VELOCITY_DUPLEX_FULL)
- ANAR |= ANAR_TXFD;
+ ANAR |= ADVERTISE_100FULL;
else
- ANAR |= ANAR_TX;
+ ANAR |= ADVERTISE_100HALF;
} else {
if (mii_status & VELOCITY_DUPLEX_FULL)
- ANAR |= ANAR_10FD;
+ ANAR |= ADVERTISE_10FULL;
else
- ANAR |= ANAR_10;
+ ANAR |= ADVERTISE_10HALF;
}
- velocity_mii_write(vptr->mac_regs, MII_REG_ANAR, ANAR);
+ velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
/* enable AUTO-NEGO mode */
mii_set_auto_on(vptr);
- /* MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); */
+ /* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */
}
/* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
/* vptr->mii_status=check_connection_type(vptr->mac_regs); */
@@ -1126,7 +1126,7 @@ static void velocity_set_multi(struct net_device *dev)
struct mac_regs __iomem *regs = vptr->mac_regs;
u8 rx_mode;
int i;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
writel(0xffffffff, &regs->MARCAM[0]);
@@ -1142,8 +1142,8 @@ static void velocity_set_multi(struct net_device *dev)
mac_get_cam_mask(regs, vptr->mCAMmask);
i = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- mac_set_cam(regs, i + offset, mclist->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ mac_set_cam(regs, i + offset, ha->addr);
vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
i++;
}
@@ -1178,36 +1178,36 @@ static void mii_init(struct velocity_info *vptr, u32 mii_status)
/*
* Reset to hardware default
*/
- MII_REG_BITS_OFF((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
/*
* Turn on ECHODIS bit in NWay-forced full mode and turn it
* off it in NWay-forced half mode for NWay-forced v.s.
* legacy-forced issue.
*/
if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
- MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
+ MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
else
- MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
+ MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
/*
* Turn on Link/Activity LED enable bit for CIS8201
*/
- MII_REG_BITS_ON(PLED_LALBE, MII_REG_PLED, vptr->mac_regs);
+ MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
break;
case PHYID_VT3216_32BIT:
case PHYID_VT3216_64BIT:
/*
* Reset to hardware default
*/
- MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
/*
* Turn on ECHODIS bit in NWay-forced full mode and turn it
* off it in NWay-forced half mode for NWay-forced v.s.
* legacy-forced issue
*/
if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
- MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
+ MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
else
- MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
+ MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
break;
case PHYID_MARVELL_1000:
@@ -1219,15 +1219,15 @@ static void mii_init(struct velocity_info *vptr, u32 mii_status)
/*
* Reset to hardware default
*/
- MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
break;
default:
;
}
- velocity_mii_read(vptr->mac_regs, MII_REG_BMCR, &BMCR);
- if (BMCR & BMCR_ISO) {
- BMCR &= ~BMCR_ISO;
- velocity_mii_write(vptr->mac_regs, MII_REG_BMCR, BMCR);
+ velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
+ if (BMCR & BMCR_ISOLATE) {
+ BMCR &= ~BMCR_ISOLATE;
+ velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
}
}
@@ -2606,7 +2606,6 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
td_ptr->td_buf[0].size |= TD_QUEUE;
mac_tx_queue_wake(vptr->mac_regs, qnum);
- dev->trans_start = jiffies;
spin_unlock_irqrestore(&vptr->lock, flags);
out:
return NETDEV_TX_OK;
@@ -2953,13 +2952,13 @@ static int velocity_set_wol(struct velocity_info *vptr)
if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
- MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
+ MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
- MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
}
if (vptr->mii_status & VELOCITY_SPEED_1000)
- MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
+ MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index ef4a0f64ba1..c38191179fa 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -1240,86 +1240,16 @@ struct velocity_context {
u32 pattern[8];
};
-
-/*
- * MII registers.
- */
-
-
/*
* Registers in the MII (offset unit is WORD)
*/
-#define MII_REG_BMCR 0x00 // physical address
-#define MII_REG_BMSR 0x01 //
-#define MII_REG_PHYID1 0x02 // OUI
-#define MII_REG_PHYID2 0x03 // OUI + Module ID + REV ID
-#define MII_REG_ANAR 0x04 //
-#define MII_REG_ANLPAR 0x05 //
-#define MII_REG_G1000CR 0x09 //
-#define MII_REG_G1000SR 0x0A //
-#define MII_REG_MODCFG 0x10 //
-#define MII_REG_TCSR 0x16 //
-#define MII_REG_PLED 0x1B //
-// NS, MYSON only
-#define MII_REG_PCR 0x17 //
-// ESI only
-#define MII_REG_PCSR 0x17 //
-#define MII_REG_AUXCR 0x1C //
-
// Marvell 88E1000/88E1000S
#define MII_REG_PSCR 0x10 // PHY specific control register
//
-// Bits in the BMCR register
-//
-#define BMCR_RESET 0x8000 //
-#define BMCR_LBK 0x4000 //
-#define BMCR_SPEED100 0x2000 //
-#define BMCR_AUTO 0x1000 //
-#define BMCR_PD 0x0800 //
-#define BMCR_ISO 0x0400 //
-#define BMCR_REAUTO 0x0200 //
-#define BMCR_FDX 0x0100 //
-#define BMCR_SPEED1G 0x0040 //
-//
-// Bits in the BMSR register
-//
-#define BMSR_AUTOCM 0x0020 //
-#define BMSR_LNK 0x0004 //
-
-//
-// Bits in the ANAR register
-//
-#define ANAR_ASMDIR 0x0800 // Asymmetric PAUSE support
-#define ANAR_PAUSE 0x0400 // Symmetric PAUSE Support
-#define ANAR_T4 0x0200 //
-#define ANAR_TXFD 0x0100 //
-#define ANAR_TX 0x0080 //
-#define ANAR_10FD 0x0040 //
-#define ANAR_10 0x0020 //
-//
-// Bits in the ANLPAR register
-//
-#define ANLPAR_ASMDIR 0x0800 // Asymmetric PAUSE support
-#define ANLPAR_PAUSE 0x0400 // Symmetric PAUSE Support
-#define ANLPAR_T4 0x0200 //
-#define ANLPAR_TXFD 0x0100 //
-#define ANLPAR_TX 0x0080 //
-#define ANLPAR_10FD 0x0040 //
-#define ANLPAR_10 0x0020 //
-
-//
-// Bits in the G1000CR register
-//
-#define G1000CR_1000FD 0x0200 // PHY is 1000-T Full-duplex capable
-#define G1000CR_1000 0x0100 // PHY is 1000-T Half-duplex capable
-
-//
-// Bits in the G1000SR register
+// Bits in the Silicon revision register
//
-#define G1000SR_1000FD 0x0800 // LP PHY is 1000-T Full-duplex capable
-#define G1000SR_1000 0x0400 // LP PHY is 1000-T Half-duplex capable
#define TCSR_ECHODIS 0x2000 //
#define AUXCR_MDPPS 0x0004 //
@@ -1338,7 +1268,6 @@ struct velocity_context {
#define PHYID_REV_ID_MASK 0x0000000FUL
-#define PHYID_GET_PHY_REV_ID(i) ((i) & PHYID_REV_ID_MASK)
#define PHYID_GET_PHY_ID(i) ((i) & ~PHYID_REV_ID_MASK)
#define MII_REG_BITS_ON(x,i,p) do {\
@@ -1362,8 +1291,8 @@ struct velocity_context {
#define MII_GET_PHY_ID(p) ({\
u32 id;\
- velocity_mii_read((p),MII_REG_PHYID2,(u16 *) &id);\
- velocity_mii_read((p),MII_REG_PHYID1,((u16 *) &id)+1);\
+ velocity_mii_read((p),MII_PHYSID2,(u16 *) &id);\
+ velocity_mii_read((p),MII_PHYSID1,((u16 *) &id)+1);\
(id);})
/*
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b0577dd1a42..78eb3190b9b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -40,8 +40,7 @@ module_param(gso, bool, 0444);
#define VIRTNET_SEND_COMMAND_SG_MAX 2
-struct virtnet_info
-{
+struct virtnet_info {
struct virtio_device *vdev;
struct virtqueue *rvq, *svq, *cvq;
struct net_device *dev;
@@ -62,6 +61,10 @@ struct virtnet_info
/* Chain pages by the private ptr. */
struct page *pages;
+
+ /* fragments + linear part + virtio header */
+ struct scatterlist rx_sg[MAX_SKB_FRAGS + 2];
+ struct scatterlist tx_sg[MAX_SKB_FRAGS + 2];
};
struct skb_vnet_hdr {
@@ -119,7 +122,7 @@ static void skb_xmit_done(struct virtqueue *svq)
struct virtnet_info *vi = svq->vdev->priv;
/* Suppress further interrupts. */
- svq->vq_ops->disable_cb(svq);
+ virtqueue_disable_cb(svq);
/* We were probably waiting for more output buffers. */
netif_wake_queue(vi->dev);
@@ -207,7 +210,7 @@ static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
return -EINVAL;
}
- page = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
+ page = virtqueue_get_buf(vi->rvq, &len);
if (!page) {
pr_debug("%s: rx error: %d buffers missing\n",
skb->dev->name, hdr->mhdr.num_buffers);
@@ -324,10 +327,8 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
{
struct sk_buff *skb;
struct skb_vnet_hdr *hdr;
- struct scatterlist sg[2];
int err;
- sg_init_table(sg, 2);
skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
if (unlikely(!skb))
return -ENOMEM;
@@ -335,11 +336,11 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
skb_put(skb, MAX_PACKET_LEN);
hdr = skb_vnet_hdr(skb);
- sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
+ sg_set_buf(vi->rx_sg, &hdr->hdr, sizeof hdr->hdr);
- skb_to_sgvec(skb, sg + 1, 0, skb->len);
+ skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len);
- err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 2, skb);
+ err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 2, skb);
if (err < 0)
dev_kfree_skb(skb);
@@ -348,13 +349,11 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
{
- struct scatterlist sg[MAX_SKB_FRAGS + 2];
struct page *first, *list = NULL;
char *p;
int i, err, offset;
- sg_init_table(sg, MAX_SKB_FRAGS + 2);
- /* page in sg[MAX_SKB_FRAGS + 1] is list tail */
+ /* page in vi->rx_sg[MAX_SKB_FRAGS + 1] is list tail */
for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
first = get_a_page(vi, gfp);
if (!first) {
@@ -362,7 +361,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
give_pages(vi, list);
return -ENOMEM;
}
- sg_set_buf(&sg[i], page_address(first), PAGE_SIZE);
+ sg_set_buf(&vi->rx_sg[i], page_address(first), PAGE_SIZE);
/* chain new page in list head to match sg */
first->private = (unsigned long)list;
@@ -376,17 +375,17 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
}
p = page_address(first);
- /* sg[0], sg[1] share the same page */
- /* a separated sg[0] for virtio_net_hdr only during to QEMU bug*/
- sg_set_buf(&sg[0], p, sizeof(struct virtio_net_hdr));
+ /* vi->rx_sg[0], vi->rx_sg[1] share the same page */
+ /* a separated vi->rx_sg[0] for virtio_net_hdr only due to QEMU bug */
+ sg_set_buf(&vi->rx_sg[0], p, sizeof(struct virtio_net_hdr));
- /* sg[1] for data packet, from offset */
+ /* vi->rx_sg[1] for data packet, from offset */
offset = sizeof(struct padded_vnet_hdr);
- sg_set_buf(&sg[1], p + offset, PAGE_SIZE - offset);
+ sg_set_buf(&vi->rx_sg[1], p + offset, PAGE_SIZE - offset);
/* chain first in list head */
first->private = (unsigned long)list;
- err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, MAX_SKB_FRAGS + 2,
+ err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
first);
if (err < 0)
give_pages(vi, first);
@@ -397,16 +396,15 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
{
struct page *page;
- struct scatterlist sg;
int err;
page = get_a_page(vi, gfp);
if (!page)
return -ENOMEM;
- sg_init_one(&sg, page_address(page), PAGE_SIZE);
+ sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE);
- err = vi->rvq->vq_ops->add_buf(vi->rvq, &sg, 0, 1, page);
+ err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 1, page);
if (err < 0)
give_pages(vi, page);
@@ -435,7 +433,7 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
} while (err > 0);
if (unlikely(vi->num > vi->max))
vi->max = vi->num;
- vi->rvq->vq_ops->kick(vi->rvq);
+ virtqueue_kick(vi->rvq);
return !oom;
}
@@ -444,7 +442,7 @@ static void skb_recv_done(struct virtqueue *rvq)
struct virtnet_info *vi = rvq->vdev->priv;
/* Schedule NAPI, Suppress further interrupts if successful. */
if (napi_schedule_prep(&vi->napi)) {
- rvq->vq_ops->disable_cb(rvq);
+ virtqueue_disable_cb(rvq);
__napi_schedule(&vi->napi);
}
}
@@ -473,7 +471,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
again:
while (received < budget &&
- (buf = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
+ (buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) {
receive_buf(vi->dev, buf, len);
--vi->num;
received++;
@@ -487,9 +485,9 @@ again:
/* Out of packets? */
if (received < budget) {
napi_complete(napi);
- if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) &&
+ if (unlikely(!virtqueue_enable_cb(vi->rvq)) &&
napi_schedule_prep(napi)) {
- vi->rvq->vq_ops->disable_cb(vi->rvq);
+ virtqueue_disable_cb(vi->rvq);
__napi_schedule(napi);
goto again;
}
@@ -503,7 +501,7 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
struct sk_buff *skb;
unsigned int len, tot_sgs = 0;
- while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
+ while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
pr_debug("Sent skb %p\n", skb);
vi->dev->stats.tx_bytes += skb->len;
vi->dev->stats.tx_packets++;
@@ -515,12 +513,9 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
{
- struct scatterlist sg[2+MAX_SKB_FRAGS];
struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
- sg_init_table(sg, 2+MAX_SKB_FRAGS);
-
pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -554,12 +549,13 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
/* Encode metadata header at front. */
if (vi->mergeable_rx_bufs)
- sg_set_buf(sg, &hdr->mhdr, sizeof hdr->mhdr);
+ sg_set_buf(vi->tx_sg, &hdr->mhdr, sizeof hdr->mhdr);
else
- sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
+ sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr);
- hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
- return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
+ hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1;
+ return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg,
+ 0, skb);
}
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -578,14 +574,14 @@ again:
if (unlikely(capacity < 0)) {
netif_stop_queue(dev);
dev_warn(&dev->dev, "Unexpected full queue\n");
- if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
- vi->svq->vq_ops->disable_cb(vi->svq);
+ if (unlikely(!virtqueue_enable_cb(vi->svq))) {
+ virtqueue_disable_cb(vi->svq);
netif_start_queue(dev);
goto again;
}
return NETDEV_TX_BUSY;
}
- vi->svq->vq_ops->kick(vi->svq);
+ virtqueue_kick(vi->svq);
/* Don't wait up for transmitted skbs to be freed. */
skb_orphan(skb);
@@ -595,12 +591,12 @@ again:
* before it gets out of hand. Naturally, this wastes entries. */
if (capacity < 2+MAX_SKB_FRAGS) {
netif_stop_queue(dev);
- if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
+ if (unlikely(!virtqueue_enable_cb(vi->svq))) {
/* More just got used, free them then recheck. */
capacity += free_old_xmit_skbs(vi);
if (capacity >= 2+MAX_SKB_FRAGS) {
netif_start_queue(dev);
- vi->svq->vq_ops->disable_cb(vi->svq);
+ virtqueue_disable_cb(vi->svq);
}
}
}
@@ -645,7 +641,7 @@ static int virtnet_open(struct net_device *dev)
* now. virtnet_poll wants re-enable the queue, so we disable here.
* We synchronize against interrupts via NAPI_STATE_SCHED */
if (napi_schedule_prep(&vi->napi)) {
- vi->rvq->vq_ops->disable_cb(vi->rvq);
+ virtqueue_disable_cb(vi->rvq);
__napi_schedule(&vi->napi);
}
return 0;
@@ -682,15 +678,15 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
- BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0);
+ BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi) < 0);
- vi->cvq->vq_ops->kick(vi->cvq);
+ virtqueue_kick(vi->cvq);
/*
* Spin for a response, the kick causes an ioport write, trapping
* into the hypervisor, so the request should be handled immediately.
*/
- while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp))
+ while (!virtqueue_get_buf(vi->cvq, &tmp))
cpu_relax();
return status == VIRTIO_NET_OK;
@@ -722,7 +718,6 @@ static void virtnet_set_rx_mode(struct net_device *dev)
struct scatterlist sg[2];
u8 promisc, allmulti;
struct virtio_net_ctrl_mac *mac_data;
- struct dev_addr_list *addr;
struct netdev_hw_addr *ha;
int uc_count;
int mc_count;
@@ -779,8 +774,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
mac_data->entries = mc_count;
i = 0;
- netdev_for_each_mc_addr(addr, dev)
- memcpy(&mac_data->macs[i++][0], addr->da_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
sg_set_buf(&sg[1], mac_data,
sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
@@ -942,6 +937,8 @@ static int virtnet_probe(struct virtio_device *vdev)
vdev->priv = vi;
vi->pages = NULL;
INIT_DELAYED_WORK(&vi->refill, refill_work);
+ sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg));
+ sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg));
/* If we can receive ANY GSO packets, we must allocate large ones. */
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
@@ -1006,13 +1003,13 @@ static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;
while (1) {
- buf = vi->svq->vq_ops->detach_unused_buf(vi->svq);
+ buf = virtqueue_detach_unused_buf(vi->svq);
if (!buf)
break;
dev_kfree_skb(buf);
}
while (1) {
- buf = vi->rvq->vq_ops->detach_unused_buf(vi->rvq);
+ buf = virtqueue_detach_unused_buf(vi->rvq);
if (!buf)
break;
if (vi->mergeable_rx_bufs || vi->big_packets)
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index cff3485d967..989b742551a 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -992,7 +992,6 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD,
tq->tx_ring.next2fill);
}
- netdev->trans_start = jiffies;
return NETDEV_TX_OK;
@@ -1174,7 +1173,6 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
netif_receive_skb(skb);
}
- adapter->netdev->last_rx = jiffies;
ctx->skb = NULL;
}
@@ -1371,13 +1369,12 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
rq->rx_ring[1].size);
- bi = kmalloc(sz, GFP_KERNEL);
+ bi = kzalloc(sz, GFP_KERNEL);
if (!bi) {
printk(KERN_ERR "%s: failed to allocate rx bufinfo\n",
adapter->netdev->name);
goto err;
}
- memset(bi, 0, sz);
rq->buf_info[0] = bi;
rq->buf_info[1] = bi + rq->rx_ring[0].size;
@@ -1675,11 +1672,11 @@ vmxnet3_copy_mc(struct net_device *netdev)
/* We may be called with BH disabled */
buf = kmalloc(sz, GFP_ATOMIC);
if (buf) {
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
int i = 0;
- netdev_for_each_mc_addr(mc, netdev)
- memcpy(buf + i++ * ETH_ALEN, mc->dmi_addr,
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(buf + i++ * ETH_ALEN, ha->addr,
ETH_ALEN);
}
}
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index a21a25d218b..297f0d20207 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -183,8 +183,6 @@ __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
pci_save_state(hldev->pdev);
-
- return;
}
/*
@@ -342,8 +340,6 @@ void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev)
hldev->minor_revision =
(u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64);
-
- return;
}
/*
@@ -357,8 +353,10 @@ __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
switch (host_type) {
case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
- access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
- VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
+ if (func_id == 0) {
+ access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
+ VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
+ }
break;
case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
@@ -426,8 +424,6 @@ void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
hldev->first_vp_id = i;
break;
}
-
- return;
}
/*
@@ -633,8 +629,10 @@ vxge_hw_device_initialize(
__vxge_hw_device_pci_e_init(hldev);
status = __vxge_hw_device_reg_addr_get(hldev);
- if (status != VXGE_HW_OK)
+ if (status != VXGE_HW_OK) {
+ vfree(hldev);
goto exit;
+ }
__vxge_hw_device_id_get(hldev);
__vxge_hw_device_host_info_get(hldev);
@@ -1213,19 +1211,16 @@ __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
/* link this RxD block with previous one */
__vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
}
-
- return;
}
/*
- * __vxge_hw_ring_initial_replenish - Initial replenish of RxDs
+ * __vxge_hw_ring_replenish - Initial replenish of RxDs
* This function replenishes the RxDs from reserve array to work array
*/
enum vxge_hw_status
-vxge_hw_ring_replenish(struct __vxge_hw_ring *ring, u16 min_flag)
+vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
{
void *rxd;
- int i = 0;
struct __vxge_hw_channel *channel;
enum vxge_hw_status status = VXGE_HW_OK;
@@ -1246,11 +1241,6 @@ vxge_hw_ring_replenish(struct __vxge_hw_ring *ring, u16 min_flag)
}
vxge_hw_ring_rxd_post(ring, rxd);
- if (min_flag) {
- i++;
- if (i == VXGE_HW_RING_MIN_BUFF_ALLOCATION)
- break;
- }
}
status = VXGE_HW_OK;
exit:
@@ -1355,7 +1345,7 @@ __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
* Currently we don't have a case when the 1) is done without the 2).
*/
if (ring->rxd_init) {
- status = vxge_hw_ring_replenish(ring, 1);
+ status = vxge_hw_ring_replenish(ring);
if (status != VXGE_HW_OK) {
__vxge_hw_ring_delete(vp);
goto exit;
@@ -1417,7 +1407,7 @@ enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
goto exit;
if (ring->rxd_init) {
- status = vxge_hw_ring_replenish(ring, 1);
+ status = vxge_hw_ring_replenish(ring);
if (status != VXGE_HW_OK)
goto exit;
}
@@ -2320,8 +2310,6 @@ __vxge_hw_fifo_mempool_item_alloc(
txdl_priv->first_txdp = txdp;
txdl_priv->next_txdl_priv = NULL;
txdl_priv->alloc_frags = 0;
-
- return;
}
/*
@@ -2578,7 +2566,6 @@ __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
writeq(0, &vpath_reg->rts_access_steer_data1);
wmb();
- return;
}
@@ -3486,7 +3473,6 @@ __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
writeq(val64, &vp_reg->prc_cfg4);
- return;
}
/*
@@ -3905,7 +3891,6 @@ vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
}
}
- return;
}
/*
* __vxge_hw_vpath_initialize
@@ -5039,8 +5024,6 @@ __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
if (status == VXGE_HW_OK)
__vxge_hw_blockpool_blocks_remove(blockpool);
}
-
- return;
}
/*
@@ -5096,6 +5079,4 @@ __vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
}
__vxge_hw_blockpool_blocks_remove(blockpool);
-
- return;
}
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 13f5416307f..4ae2625d4d8 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -765,10 +765,18 @@ struct vxge_hw_device_hw_info {
#define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6
#define VXGE_HW_VH_NORMAL_FUNCTION 7
u64 function_mode;
-#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 0
-#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 1
+#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 0
+#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 1
#define VXGE_HW_FUNCTION_MODE_SRIOV 2
#define VXGE_HW_FUNCTION_MODE_MRIOV 3
+#define VXGE_HW_FUNCTION_MODE_MRIOV_8 4
+#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17 5
+#define VXGE_HW_FUNCTION_MODE_SRIOV_8 6
+#define VXGE_HW_FUNCTION_MODE_SRIOV_4 7
+#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2 8
+#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4 9
+#define VXGE_HW_FUNCTION_MODE_MRIOV_4 10
+
u32 func_id;
u64 vpath_mask;
struct vxge_hw_device_version fw_version;
@@ -1915,20 +1923,32 @@ static inline void *vxge_os_dma_malloc(struct pci_dev *pdev,
gfp_t flags;
void *vaddr;
unsigned long misaligned = 0;
+ int realloc_flag = 0;
*p_dma_acch = *p_dmah = NULL;
if (in_interrupt())
flags = GFP_ATOMIC | GFP_DMA;
else
flags = GFP_KERNEL | GFP_DMA;
-
- size += VXGE_CACHE_LINE_SIZE;
-
+realloc:
vaddr = kmalloc((size), flags);
if (vaddr == NULL)
return vaddr;
- misaligned = (unsigned long)VXGE_ALIGN(*((u64 *)&vaddr),
+ misaligned = (unsigned long)VXGE_ALIGN((unsigned long)vaddr,
VXGE_CACHE_LINE_SIZE);
+ if (realloc_flag)
+ goto out;
+
+ if (misaligned) {
+ /* misaligned, free current one and try allocating
+ * size + VXGE_CACHE_LINE_SIZE memory
+ */
+ kfree((void *) vaddr);
+ size += VXGE_CACHE_LINE_SIZE;
+ realloc_flag = 1;
+ goto realloc;
+ }
+out:
*(unsigned long *)p_dma_acch = misaligned;
vaddr = (void *)((u8 *)vaddr + misaligned);
return vaddr;
@@ -2254,4 +2274,6 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
struct vxge_hw_rth_hash_types *hash_type,
u16 bucket_size);
+enum vxge_hw_status
+__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id);
#endif
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
index aaf374cfd32..cadef8549c0 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -109,7 +109,7 @@ static void vxge_ethtool_gregs(struct net_device *dev,
int index, offset;
enum vxge_hw_status status;
u64 reg;
- u8 *reg_space = (u8 *) space;
+ u64 *reg_space = (u64 *) space;
struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
pci_get_drvdata(vdev->pdev);
@@ -129,8 +129,7 @@ static void vxge_ethtool_gregs(struct net_device *dev,
__func__, __LINE__);
return;
}
-
- memcpy((reg_space + offset), &reg, 8);
+ *reg_space++ = reg;
}
}
}
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index ba6d0da78c3..b504bd56136 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -445,7 +445,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
ring->ndev->name, __func__, __LINE__);
ring->pkts_processed = 0;
- vxge_hw_ring_replenish(ringh, 0);
+ vxge_hw_ring_replenish(ringh);
do {
prefetch((char *)dtr + L1_CACHE_BYTES);
@@ -1118,7 +1118,7 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
*/
static void vxge_set_multicast(struct net_device *dev)
{
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
struct vxgedev *vdev;
int i, mcast_cnt = 0;
struct __vxge_hw_device *hldev;
@@ -1218,8 +1218,8 @@ static void vxge_set_multicast(struct net_device *dev)
}
/* Add new ones */
- netdev_for_each_mc_addr(mclist, dev) {
- memcpy(mac_info.macaddr, mclist->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(mac_info.macaddr, ha->addr, ETH_ALEN);
for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
vpath_idx++) {
mac_info.vpath_no = vpath_idx;
@@ -1364,28 +1364,26 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
{
struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
- int msix_id, alarm_msix_id;
- int tim_msix_id[4] = {[0 ...3] = 0};
+ int msix_id = 0;
+ int tim_msix_id[4] = {0, 1, 0, 0};
+ int alarm_msix_id = VXGE_ALARM_MSIX_ID;
vxge_hw_vpath_intr_enable(vpath->handle);
if (vdev->config.intr_type == INTA)
vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
else {
- msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE;
- alarm_msix_id =
- VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
-
- tim_msix_id[0] = msix_id;
- tim_msix_id[1] = msix_id + 1;
vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
alarm_msix_id);
+ msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
/* enable the alarm vector */
- vxge_hw_vpath_msix_unmask(vpath->handle, alarm_msix_id);
+ msix_id = (vpath->handle->vpath->hldev->first_vp_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id;
+ vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
}
}
@@ -1406,12 +1404,13 @@ void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
if (vdev->config.intr_type == INTA)
vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
else {
- msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE;
+ msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
/* disable the alarm vector */
- msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
+ msix_id = (vpath->handle->vpath->hldev->first_vp_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
}
}
@@ -1765,7 +1764,6 @@ static void vxge_netpoll(struct net_device *dev)
vxge_debug_entryexit(VXGE_TRACE,
"%s:%d Exiting...", __func__, __LINE__);
- return;
}
#endif
@@ -2224,19 +2222,18 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
enum vxge_hw_status status;
struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
struct vxgedev *vdev = vpath->vdev;
- int alarm_msix_id =
- VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
+ int msix_id = (vpath->handle->vpath->vp_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
for (i = 0; i < vdev->no_of_vpath; i++) {
- vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle,
- alarm_msix_id);
+ vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
vdev->exec_mode);
if (status == VXGE_HW_OK) {
vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
- alarm_msix_id);
+ msix_id);
continue;
}
vxge_debug_intr(VXGE_ERR,
@@ -2249,18 +2246,17 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
static int vxge_alloc_msix(struct vxgedev *vdev)
{
int j, i, ret = 0;
- int intr_cnt = 0;
- int alarm_msix_id = 0, msix_intr_vect = 0;
+ int msix_intr_vect = 0, temp;
vdev->intr_cnt = 0;
+start:
/* Tx/Rx MSIX Vectors count */
vdev->intr_cnt = vdev->no_of_vpath * 2;
/* Alarm MSIX Vectors count */
vdev->intr_cnt++;
- intr_cnt = (vdev->max_vpath_supported * 2) + 1;
- vdev->entries = kzalloc(intr_cnt * sizeof(struct msix_entry),
+ vdev->entries = kzalloc(vdev->intr_cnt * sizeof(struct msix_entry),
GFP_KERNEL);
if (!vdev->entries) {
vxge_debug_init(VXGE_ERR,
@@ -2269,8 +2265,9 @@ static int vxge_alloc_msix(struct vxgedev *vdev)
return -ENOMEM;
}
- vdev->vxge_entries = kzalloc(intr_cnt * sizeof(struct vxge_msix_entry),
- GFP_KERNEL);
+ vdev->vxge_entries =
+ kzalloc(vdev->intr_cnt * sizeof(struct vxge_msix_entry),
+ GFP_KERNEL);
if (!vdev->vxge_entries) {
vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
VXGE_DRIVER_NAME);
@@ -2278,9 +2275,7 @@ static int vxge_alloc_msix(struct vxgedev *vdev)
return -ENOMEM;
}
- /* Last vector in the list is used for alarm */
- alarm_msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
- for (i = 0, j = 0; i < vdev->max_vpath_supported; i++) {
+ for (i = 0, j = 0; i < vdev->no_of_vpath; i++) {
msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
@@ -2298,47 +2293,31 @@ static int vxge_alloc_msix(struct vxgedev *vdev)
}
/* Initialize the alarm vector */
- vdev->entries[j].entry = alarm_msix_id;
- vdev->vxge_entries[j].entry = alarm_msix_id;
+ vdev->entries[j].entry = VXGE_ALARM_MSIX_ID;
+ vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
vdev->vxge_entries[j].in_use = 0;
- ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt);
- /* if driver request exceeeds available irq's, request with a small
- * number.
- */
- if (ret > 0) {
- vxge_debug_init(VXGE_ERR,
- "%s: MSI-X enable failed for %d vectors, available: %d",
- VXGE_DRIVER_NAME, intr_cnt, ret);
- vdev->max_vpath_supported = vdev->no_of_vpath;
- intr_cnt = (vdev->max_vpath_supported * 2) + 1;
-
- /* Reset the alarm vector setting */
- vdev->entries[j].entry = 0;
- vdev->vxge_entries[j].entry = 0;
-
- /* Initialize the alarm vector with new setting */
- vdev->entries[intr_cnt - 1].entry = alarm_msix_id;
- vdev->vxge_entries[intr_cnt - 1].entry = alarm_msix_id;
- vdev->vxge_entries[intr_cnt - 1].in_use = 0;
-
- ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt);
- if (!ret)
- vxge_debug_init(VXGE_ERR,
- "%s: MSI-X enabled for %d vectors",
- VXGE_DRIVER_NAME, intr_cnt);
- }
+ ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
- if (ret) {
+ if (ret > 0) {
vxge_debug_init(VXGE_ERR,
"%s: MSI-X enable failed for %d vectors, ret: %d",
- VXGE_DRIVER_NAME, intr_cnt, ret);
+ VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
kfree(vdev->entries);
kfree(vdev->vxge_entries);
vdev->entries = NULL;
vdev->vxge_entries = NULL;
+
+ if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3))
+ return -ENODEV;
+ /* Try with less no of vector by reducing no of vpaths count */
+ temp = (ret - 1)/2;
+ vxge_close_vpaths(vdev, temp);
+ vdev->no_of_vpath = temp;
+ goto start;
+ } else if (ret < 0)
return -ENODEV;
- }
+
return 0;
}
@@ -2346,43 +2325,26 @@ static int vxge_enable_msix(struct vxgedev *vdev)
{
int i, ret = 0;
- enum vxge_hw_status status;
/* 0 - Tx, 1 - Rx */
- int tim_msix_id[4];
- int alarm_msix_id = 0, msix_intr_vect = 0;
+ int tim_msix_id[4] = {0, 1, 0, 0};
+
vdev->intr_cnt = 0;
/* allocate msix vectors */
ret = vxge_alloc_msix(vdev);
if (!ret) {
- /* Last vector in the list is used for alarm */
- alarm_msix_id =
- VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
for (i = 0; i < vdev->no_of_vpath; i++) {
/* If fifo or ring are not enabled
the MSIX vector for that should be set to 0
Hence initializeing this array to all 0s.
*/
- memset(tim_msix_id, 0, sizeof(tim_msix_id));
- msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
- tim_msix_id[0] = msix_intr_vect;
-
- tim_msix_id[1] = msix_intr_vect + 1;
- vdev->vpaths[i].ring.rx_vector_no = tim_msix_id[1];
+ vdev->vpaths[i].ring.rx_vector_no =
+ (vdev->vpaths[i].device_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
- status = vxge_hw_vpath_msix_set(
- vdev->vpaths[i].handle,
- tim_msix_id, alarm_msix_id);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "vxge_hw_vpath_msix_set "
- "failed with status : %x", status);
- kfree(vdev->entries);
- kfree(vdev->vxge_entries);
- pci_disable_msix(vdev->pdev);
- return -ENODEV;
- }
+ vxge_hw_vpath_msix_set(vdev->vpaths[i].handle,
+ tim_msix_id, VXGE_ALARM_MSIX_ID);
}
}
@@ -2393,7 +2355,7 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
{
int intr_cnt;
- for (intr_cnt = 0; intr_cnt < (vdev->max_vpath_supported * 2 + 1);
+ for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1);
intr_cnt++) {
if (vdev->vxge_entries[intr_cnt].in_use) {
synchronize_irq(vdev->entries[intr_cnt].vector);
@@ -2458,9 +2420,10 @@ static int vxge_add_isr(struct vxgedev *vdev)
switch (msix_idx) {
case 0:
snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge fn: %d vpath: %d Tx MSI-X: %d",
- vdev->ndev->name, pci_fun, vp_idx,
- vdev->entries[intr_cnt].entry);
+ "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
+ vdev->ndev->name,
+ vdev->entries[intr_cnt].entry,
+ pci_fun, vp_idx);
ret = request_irq(
vdev->entries[intr_cnt].vector,
vxge_tx_msix_handle, 0,
@@ -2472,9 +2435,10 @@ static int vxge_add_isr(struct vxgedev *vdev)
break;
case 1:
snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge fn: %d vpath: %d Rx MSI-X: %d",
- vdev->ndev->name, pci_fun, vp_idx,
- vdev->entries[intr_cnt].entry);
+ "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
+ vdev->ndev->name,
+ vdev->entries[intr_cnt].entry,
+ pci_fun, vp_idx);
ret = request_irq(
vdev->entries[intr_cnt].vector,
vxge_rx_msix_napi_handle,
@@ -2502,9 +2466,11 @@ static int vxge_add_isr(struct vxgedev *vdev)
if (irq_req) {
/* We requested for this msix interrupt */
vdev->vxge_entries[intr_cnt].in_use = 1;
+ msix_idx += vdev->vpaths[vp_idx].device_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE;
vxge_hw_vpath_msix_unmask(
vdev->vpaths[vp_idx].handle,
- intr_idx);
+ msix_idx);
intr_cnt++;
}
@@ -2514,16 +2480,17 @@ static int vxge_add_isr(struct vxgedev *vdev)
vp_idx++;
}
- intr_cnt = vdev->max_vpath_supported * 2;
+ intr_cnt = vdev->no_of_vpath * 2;
snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge Alarm fn: %d MSI-X: %d",
- vdev->ndev->name, pci_fun,
- vdev->entries[intr_cnt].entry);
+ "%s:vxge:MSI-X %d - Alarm - fn:%d",
+ vdev->ndev->name,
+ vdev->entries[intr_cnt].entry,
+ pci_fun);
/* For Alarm interrupts */
ret = request_irq(vdev->entries[intr_cnt].vector,
vxge_alarm_msix_handle, 0,
vdev->desc[intr_cnt],
- &vdev->vpaths[vp_idx]);
+ &vdev->vpaths[0]);
if (ret) {
vxge_debug_init(VXGE_ERR,
"%s: MSIX - %d Registration failed",
@@ -2536,16 +2503,19 @@ static int vxge_add_isr(struct vxgedev *vdev)
goto INTA_MODE;
}
+ msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
- intr_idx - 2);
+ msix_idx);
vdev->vxge_entries[intr_cnt].in_use = 1;
- vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[vp_idx];
+ vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
}
INTA_MODE:
#endif
- snprintf(vdev->desc[0], VXGE_INTR_STRLEN, "%s:vxge", vdev->ndev->name);
if (vdev->config.intr_type == INTA) {
+ snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
+ "%s:vxge:INTA", vdev->ndev->name);
vxge_hw_device_set_intr_type(vdev->devh,
VXGE_HW_INTR_MODE_IRQLINE);
vxge_hw_vpath_tti_ci_set(vdev->devh,
@@ -2844,7 +2814,6 @@ static void vxge_napi_del_all(struct vxgedev *vdev)
for (i = 0; i < vdev->no_of_vpath; i++)
netif_napi_del(&vdev->vpaths[i].ring.napi);
}
- return;
}
int do_vxge_close(struct net_device *dev, int do_io)
@@ -3529,8 +3498,6 @@ static void verify_bandwidth(void)
for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
bw_percentage[i] = bw_percentage[0];
}
-
- return;
}
/*
@@ -3995,6 +3962,36 @@ static void vxge_io_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
}
+static inline u32 vxge_get_num_vfs(u64 function_mode)
+{
+ u32 num_functions = 0;
+
+ switch (function_mode) {
+ case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
+ case VXGE_HW_FUNCTION_MODE_SRIOV_8:
+ num_functions = 8;
+ break;
+ case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
+ num_functions = 1;
+ break;
+ case VXGE_HW_FUNCTION_MODE_SRIOV:
+ case VXGE_HW_FUNCTION_MODE_MRIOV:
+ case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17:
+ num_functions = 17;
+ break;
+ case VXGE_HW_FUNCTION_MODE_SRIOV_4:
+ num_functions = 4;
+ break;
+ case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2:
+ num_functions = 2;
+ break;
+ case VXGE_HW_FUNCTION_MODE_MRIOV_8:
+ num_functions = 8; /* TODO */
+ break;
+ }
+ return num_functions;
+}
+
/**
* vxge_probe
* @pdev : structure containing the PCI related information of the device.
@@ -4022,14 +4019,19 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
u8 *macaddr;
struct vxge_mac_addrs *entry;
static int bus = -1, device = -1;
+ u32 host_type;
u8 new_device = 0;
+ enum vxge_hw_status is_privileged;
+ u32 function_mode;
+ u32 num_vfs = 0;
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
attr.pdev = pdev;
- if (bus != pdev->bus->number)
- new_device = 1;
- if (device != PCI_SLOT(pdev->devfn))
+ /* In SRIOV-17 mode, functions of the same adapter
+ * can be deployed on different buses */
+ if ((!pdev->is_virtfn) && ((bus != pdev->bus->number) ||
+ (device != PCI_SLOT(pdev->devfn))))
new_device = 1;
bus = pdev->bus->number;
@@ -4046,9 +4048,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
driver_config->total_dev_cnt);
driver_config->config_dev_cnt = 0;
driver_config->total_dev_cnt = 0;
- driver_config->g_no_cpus = 0;
}
-
+ /* Now making the CPU based no of vpath calculation
+ * applicable for individual functions as well.
+ */
+ driver_config->g_no_cpus = 0;
driver_config->vpath_per_dev = max_config_vpath;
driver_config->total_dev_cnt++;
@@ -4161,6 +4165,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
"%s:%d Vpath mask = %llx", __func__, __LINE__,
(unsigned long long)vpath_mask);
+ function_mode = ll_config.device_hw_info.function_mode;
+ host_type = ll_config.device_hw_info.host_type;
+ is_privileged = __vxge_hw_device_is_privilaged(host_type,
+ ll_config.device_hw_info.func_id);
+
/* Check how many vpaths are available */
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
if (!((vpath_mask) & vxge_mBIT(i)))
@@ -4168,14 +4177,18 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
max_vpath_supported++;
}
+ if (new_device)
+ num_vfs = vxge_get_num_vfs(function_mode) - 1;
+
/* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
- if ((VXGE_HW_FUNCTION_MODE_SRIOV ==
- ll_config.device_hw_info.function_mode) &&
- (max_config_dev > 1) && (pdev->is_physfn)) {
- ret = pci_enable_sriov(pdev, max_config_dev - 1);
- if (ret)
- vxge_debug_ll_config(VXGE_ERR,
- "Failed to enable SRIOV: %d \n", ret);
+ if (is_sriov(function_mode) && (max_config_dev > 1) &&
+ (ll_config.intr_type != INTA) &&
+ (is_privileged == VXGE_HW_OK)) {
+ ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs)
+ ? (max_config_dev - 1) : num_vfs);
+ if (ret)
+ vxge_debug_ll_config(VXGE_ERR,
+ "Failed in enabling SRIOV mode: %d\n", ret);
}
/*
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index 7c83ba4be9d..60276b20fa5 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -31,6 +31,7 @@
#define PCI_DEVICE_ID_TITAN_UNI 0x5833
#define VXGE_USE_DEFAULT 0xffffffff
#define VXGE_HW_VPATH_MSIX_ACTIVE 4
+#define VXGE_ALARM_MSIX_ID 2
#define VXGE_HW_RXSYNC_FREQ_CNT 4
#define VXGE_LL_WATCH_DOG_TIMEOUT (15 * HZ)
#define VXGE_LL_RX_COPY_THRESHOLD 256
@@ -89,6 +90,11 @@
#define VXGE_LL_MAX_FRAME_SIZE(dev) ((dev)->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE)
+#define is_sriov(function_mode) \
+ ((function_mode == VXGE_HW_FUNCTION_MODE_SRIOV) || \
+ (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_8) || \
+ (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_4))
+
enum vxge_reset_event {
/* reset events */
VXGE_LL_VPATH_RESET = 0,
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 2c012f4ce46..6cc1dd79b40 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -231,11 +231,8 @@ void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
{
__vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)),
- 0, 32),
+ (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&channel->common_reg->set_msix_mask_vect[msix_id%4]);
-
- return;
}
/**
@@ -252,11 +249,8 @@ vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
{
__vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)),
- 0, 32),
+ (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&channel->common_reg->clear_msix_mask_vect[msix_id%4]);
-
- return;
}
/**
@@ -331,8 +325,6 @@ void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
val64 = readq(&hldev->common_reg->titan_general_int_status);
vxge_hw_device_unmask_all(hldev);
-
- return;
}
/**
@@ -364,8 +356,6 @@ void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
vxge_hw_vpath_intr_disable(
VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
}
-
- return;
}
/**
@@ -385,8 +375,6 @@ void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
&hldev->common_reg->titan_mask_all_int);
-
- return;
}
/**
@@ -406,8 +394,6 @@ void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
&hldev->common_reg->titan_mask_all_int);
-
- return;
}
/**
@@ -649,8 +635,6 @@ void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
&hldev->common_reg->tim_int_status1);
}
-
- return;
}
/*
@@ -878,7 +862,7 @@ void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
channel = &ring->channel;
- rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
+ rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
if (ring->stats->common_stats.usage_cnt > 0)
ring->stats->common_stats.usage_cnt--;
@@ -902,7 +886,7 @@ void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
channel = &ring->channel;
wmb();
- rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
+ rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
vxge_hw_channel_dtr_post(channel, rxdh);
@@ -966,6 +950,7 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
struct __vxge_hw_channel *channel;
struct vxge_hw_ring_rxd_1 *rxdp;
enum vxge_hw_status status = VXGE_HW_OK;
+ u64 control_0, own;
channel = &ring->channel;
@@ -977,8 +962,12 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
goto exit;
}
+ control_0 = rxdp->control_0;
+ own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
+ *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
+
/* check whether it is not the end */
- if (!(rxdp->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)) {
+ if (!own || ((*t_code == VXGE_HW_RING_T_CODE_FRM_DROP) && own)) {
vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
0);
@@ -986,8 +975,6 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
++ring->cmpl_cnt;
vxge_hw_channel_dtr_complete(channel);
- *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(rxdp->control_0);
-
vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
ring->stats->common_stats.usage_cnt++;
@@ -1035,12 +1022,13 @@ enum vxge_hw_status vxge_hw_ring_handle_tcode(
* such as unknown UPV6 header), Drop it !!!
*/
- if (t_code == 0 || t_code == 5) {
+ if (t_code == VXGE_HW_RING_T_CODE_OK ||
+ t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
status = VXGE_HW_OK;
goto exit;
}
- if (t_code > 0xF) {
+ if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
status = VXGE_HW_ERR_INVALID_TCODE;
goto exit;
}
@@ -2216,29 +2204,24 @@ exit:
* This API will associate a given MSIX vector numbers with the four TIM
* interrupts and alarm interrupt.
*/
-enum vxge_hw_status
+void
vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
int alarm_msix_id)
{
u64 val64;
struct __vxge_hw_virtualpath *vpath = vp->vpath;
struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
- u32 first_vp_id = vpath->hldev->first_vp_id;
+ u32 vp_id = vp->vpath->vp_id;
val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
- (first_vp_id * 4) + tim_msix_id[0]) |
+ (vp_id * 4) + tim_msix_id[0]) |
VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
- (first_vp_id * 4) + tim_msix_id[1]) |
- VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI(
- (first_vp_id * 4) + tim_msix_id[2]);
-
- val64 |= VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI(
- (first_vp_id * 4) + tim_msix_id[3]);
+ (vp_id * 4) + tim_msix_id[1]);
writeq(val64, &vp_reg->interrupt_cfg0);
writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
- (first_vp_id * 4) + alarm_msix_id),
+ (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
&vp_reg->interrupt_cfg2);
if (vpath->hldev->config.intr_mode ==
@@ -2258,8 +2241,6 @@ vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
0, 32), &vp_reg->one_shot_vect3_en);
}
-
- return VXGE_HW_OK;
}
/**
@@ -2279,11 +2260,8 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
{
struct __vxge_hw_device *hldev = vp->vpath->hldev;
__vxge_hw_pio_mem_write32_upper(
- (u32) vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
- (msix_id / 4)), 0, 32),
+ (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
-
- return;
}
/**
@@ -2305,19 +2283,15 @@ vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
if (hldev->config.intr_mode ==
VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
__vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
- (msix_id/4)), 0, 32),
+ (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&hldev->common_reg->
clr_msix_one_shot_vec[msix_id%4]);
} else {
__vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
- (msix_id/4)), 0, 32),
+ (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&hldev->common_reg->
clear_msix_mask_vect[msix_id%4]);
}
-
- return;
}
/**
@@ -2337,11 +2311,8 @@ vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
{
struct __vxge_hw_device *hldev = vp->vpath->hldev;
__vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
- (msix_id/4)), 0, 32),
+ (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
-
- return;
}
/**
@@ -2358,8 +2329,6 @@ vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
__vxge_hw_pio_mem_write32_upper(
(u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
&vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
-
- return;
}
/**
@@ -2398,8 +2367,6 @@ void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
&hldev->common_reg->tim_int_mask1);
}
-
- return;
}
/**
@@ -2436,8 +2403,6 @@ void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
&hldev->common_reg->tim_int_mask1);
}
-
- return;
}
/**
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 861c853e3e8..c252f3d3f65 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -1866,6 +1866,51 @@ struct vxge_hw_ring_rxd_info {
u32 rth_hash_type;
u32 rth_value;
};
+/**
+ * enum vxge_hw_ring_tcode - Transfer codes returned by adapter
+ * @VXGE_HW_RING_T_CODE_OK: Transfer ok.
+ * @VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH: Layer 3 checksum presentation
+ * configuration mismatch.
+ * @VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH: Layer 4 checksum presentation
+ * configuration mismatch.
+ * @VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH: Layer 3 and Layer 4 checksum
+ * presentation configuration mismatch.
+ * @VXGE_HW_RING_T_CODE_L3_PKT_ERR: Layer 3 error unparseable packet,
+ * such as unknown IPv6 header.
+ * @VXGE_HW_RING_T_CODE_L2_FRM_ERR: Layer 2 error frame integrity
+ * error, such as FCS or ECC).
+ * @VXGE_HW_RING_T_CODE_BUF_SIZE_ERR: Buffer size error the RxD buffer(
+ * s) were not appropriately sized and data loss occurred.
+ * @VXGE_HW_RING_T_CODE_INT_ECC_ERR: Internal ECC error RxD corrupted.
+ * @VXGE_HW_RING_T_CODE_BENIGN_OVFLOW: Benign overflow the contents of
+ * Segment1 exceeded the capacity of Buffer1 and the remainder
+ * was placed in Buffer2. Segment2 now starts in Buffer3.
+ * No data loss or errors occurred.
+ * @VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF: Buffer size 0 one of the RxDs
+ * assigned buffers has a size of 0 bytes.
+ * @VXGE_HW_RING_T_CODE_FRM_DROP: Frame dropped either due to
+ * VPath Reset or because of a VPIN mismatch.
+ * @VXGE_HW_RING_T_CODE_UNUSED: Unused
+ * @VXGE_HW_RING_T_CODE_MULTI_ERR: Multiple errors more than one
+ * transfer code condition occurred.
+ *
+ * Transfer codes returned by adapter.
+ */
+enum vxge_hw_ring_tcode {
+ VXGE_HW_RING_T_CODE_OK = 0x0,
+ VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH = 0x1,
+ VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH = 0x2,
+ VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH = 0x3,
+ VXGE_HW_RING_T_CODE_L3_PKT_ERR = 0x5,
+ VXGE_HW_RING_T_CODE_L2_FRM_ERR = 0x6,
+ VXGE_HW_RING_T_CODE_BUF_SIZE_ERR = 0x7,
+ VXGE_HW_RING_T_CODE_INT_ECC_ERR = 0x8,
+ VXGE_HW_RING_T_CODE_BENIGN_OVFLOW = 0x9,
+ VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF = 0xA,
+ VXGE_HW_RING_T_CODE_FRM_DROP = 0xC,
+ VXGE_HW_RING_T_CODE_UNUSED = 0xE,
+ VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF
+};
/**
* enum enum vxge_hw_ring_hash_type - RTH hash types
@@ -1910,7 +1955,7 @@ vxge_hw_ring_rxd_post_post(
void *rxdh);
enum vxge_hw_status
-vxge_hw_ring_replenish(struct __vxge_hw_ring *ring_handle, u16 min_flag);
+vxge_hw_ring_replenish(struct __vxge_hw_ring *ring_handle);
void
vxge_hw_ring_rxd_post_post_wmb(
@@ -2042,7 +2087,6 @@ void vxge_hw_fifo_txdl_free(
#define VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET (VXGE_HW_BLOCK_SIZE-8)
#define VXGE_HW_RING_MEMBLOCK_IDX_OFFSET (VXGE_HW_BLOCK_SIZE-16)
-#define VXGE_HW_RING_MIN_BUFF_ALLOCATION 64
/*
* struct __vxge_hw_ring_rxd_priv - Receive descriptor HW-private data.
@@ -2332,7 +2376,7 @@ enum vxge_hw_status vxge_hw_vpath_alarm_process(
struct __vxge_hw_vpath_handle *vpath_handle,
u32 skip_alarms);
-enum vxge_hw_status
+void
vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vpath_handle,
int *tim_msix_id, int alarm_msix_id);
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index 77c2a754b7b..5da7ab1fd30 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -17,7 +17,7 @@
#define VXGE_VERSION_MAJOR "2"
#define VXGE_VERSION_MINOR "0"
-#define VXGE_VERSION_FIX "6"
-#define VXGE_VERSION_BUILD "18937"
+#define VXGE_VERSION_FIX "8"
+#define VXGE_VERSION_BUILD "20182"
#define VXGE_VERSION_FOR "k"
#endif
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
index cd8cb95c5bd..cf9e15fd8d9 100644
--- a/drivers/net/wan/cycx_x25.c
+++ b/drivers/net/wan/cycx_x25.c
@@ -634,11 +634,12 @@ static netdev_tx_t cycx_netdevice_hard_start_xmit(struct sk_buff *skb,
}
} else { /* chan->protocol == ETH_P_X25 */
switch (skb->data[0]) {
- case 0: break;
- case 1: /* Connect request */
+ case X25_IFACE_DATA:
+ break;
+ case X25_IFACE_CONNECT:
cycx_x25_chan_connect(dev);
goto free_packet;
- case 2: /* Disconnect request */
+ case X25_IFACE_DISCONNECT:
cycx_x25_chan_disconnect(dev);
goto free_packet;
default:
@@ -1406,7 +1407,8 @@ static void cycx_x25_set_chan_state(struct net_device *dev, u8 state)
reset_timer(dev);
if (chan->protocol == ETH_P_X25)
- cycx_x25_chan_send_event(dev, 1);
+ cycx_x25_chan_send_event(dev,
+ X25_IFACE_CONNECT);
break;
case WAN_CONNECTING:
@@ -1424,7 +1426,8 @@ static void cycx_x25_set_chan_state(struct net_device *dev, u8 state)
}
if (chan->protocol == ETH_P_X25)
- cycx_x25_chan_send_event(dev, 2);
+ cycx_x25_chan_send_event(dev,
+ X25_IFACE_DISCONNECT);
netif_wake_queue(dev);
break;
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index a4859f7a7cc..d45b08d1dbc 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -1175,8 +1175,6 @@ static netdev_tx_t dscc4_start_xmit(struct sk_buff *skb,
spin_unlock(&dpriv->lock);
#endif
- dev->trans_start = jiffies;
-
if (debug > 2)
dscc4_tx_print(dev, dpriv, "Xmit");
/* To be cleaned(unsigned int)/optimized. Later, ok ? */
diff --git a/drivers/net/wan/hd64570.c b/drivers/net/wan/hd64570.c
index 4dde2ea4a18..a3ea27ce04f 100644
--- a/drivers/net/wan/hd64570.c
+++ b/drivers/net/wan/hd64570.c
@@ -658,7 +658,6 @@ static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
#endif
writew(len, &desc->len);
writeb(ST_TX_EOM, &desc->stat);
- dev->trans_start = jiffies;
port->txin = next_desc(port, port->txin, 1);
sca_outw(desc_offset(port, port->txin, 1),
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
index aad9ed45c25..ea476cbd38b 100644
--- a/drivers/net/wan/hd64572.c
+++ b/drivers/net/wan/hd64572.c
@@ -585,7 +585,6 @@ static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
writew(len, &desc->len);
writeb(ST_TX_EOM, &desc->stat);
- dev->trans_start = jiffies;
port->txin = (port->txin + 1) % card->tx_ring_buffers;
sca_outl(desc_offset(port, port->txin, 1),
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index c7adbb79f7c..70527e5a54a 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -49,14 +49,14 @@ static void x25_connect_disconnect(struct net_device *dev, int reason, int code)
static void x25_connected(struct net_device *dev, int reason)
{
- x25_connect_disconnect(dev, reason, 1);
+ x25_connect_disconnect(dev, reason, X25_IFACE_CONNECT);
}
static void x25_disconnected(struct net_device *dev, int reason)
{
- x25_connect_disconnect(dev, reason, 2);
+ x25_connect_disconnect(dev, reason, X25_IFACE_DISCONNECT);
}
@@ -71,7 +71,7 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
return NET_RX_DROP;
ptr = skb->data;
- *ptr = 0;
+ *ptr = X25_IFACE_DATA;
skb->protocol = x25_type_trans(skb, dev);
return netif_rx(skb);
@@ -94,13 +94,13 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
/* X.25 to LAPB */
switch (skb->data[0]) {
- case 0: /* Data to be transmitted */
+ case X25_IFACE_DATA: /* Data to be transmitted */
skb_pull(skb, 1);
if ((result = lapb_data_request(dev, skb)) != LAPB_OK)
dev_kfree_skb(skb);
return NETDEV_TX_OK;
- case 1:
+ case X25_IFACE_CONNECT:
if ((result = lapb_connect_request(dev))!= LAPB_OK) {
if (result == LAPB_CONNECTED)
/* Send connect confirm. msg to level 3 */
@@ -112,7 +112,7 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
}
break;
- case 2:
+ case X25_IFACE_DISCONNECT:
if ((result = lapb_disconnect_request(dev)) != LAPB_OK) {
if (result == LAPB_NOTCONNECTED)
/* Send disconnect confirm. msg to level 3 */
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 0c2cdde686a..88e363033e2 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -891,7 +891,6 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
wmb();
queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc);
- dev->trans_start = jiffies;
if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
#if DEBUG_TX
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 98e2f99903d..4d4dc38c729 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -139,7 +139,7 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
return NET_RX_DROP;
ptr = skb->data;
- *ptr = 0x00;
+ *ptr = X25_IFACE_DATA;
skb->protocol = x25_type_trans(skb, dev);
return netif_rx(skb);
@@ -161,14 +161,14 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
goto drop;
switch (skb->data[0]) {
- case 0x00:
+ case X25_IFACE_DATA:
break;
- case 0x01:
+ case X25_IFACE_CONNECT:
if ((err = lapb_connect_request(dev)) != LAPB_OK)
printk(KERN_ERR "lapbeth: lapb_connect_request "
"error: %d\n", err);
goto drop;
- case 0x02:
+ case X25_IFACE_DISCONNECT:
if ((err = lapb_disconnect_request(dev)) != LAPB_OK)
printk(KERN_ERR "lapbeth: lapb_disconnect_request "
"err: %d\n", err);
@@ -225,7 +225,7 @@ static void lapbeth_connected(struct net_device *dev, int reason)
}
ptr = skb_put(skb, 1);
- *ptr = 0x01;
+ *ptr = X25_IFACE_CONNECT;
skb->protocol = x25_type_trans(skb, dev);
netif_rx(skb);
@@ -242,7 +242,7 @@ static void lapbeth_disconnected(struct net_device *dev, int reason)
}
ptr = skb_put(skb, 1);
- *ptr = 0x02;
+ *ptr = X25_IFACE_DISCONNECT;
skb->protocol = x25_type_trans(skb, dev);
netif_rx(skb);
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index b2785037712..e2c6f7f4f51 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1506,8 +1506,6 @@ static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
/* send now! */
LMC_CSR_WRITE (sc, csr_txpoll, 0);
- dev->trans_start = jiffies;
-
spin_unlock_irqrestore(&sc->lmc_lock, flags);
lmc_trace(dev, "lmc_start_xmit_out");
@@ -2103,7 +2101,7 @@ static void lmc_driver_timeout(struct net_device *dev)
printk("%s: Xmitter busy|\n", dev->name);
sc->extra_stats.tx_tbusy_calls++;
- if (jiffies - dev->trans_start < TX_TIMEOUT)
+ if (jiffies - dev_trans_start(dev) < TX_TIMEOUT)
goto bug_out;
/*
@@ -2135,7 +2133,7 @@ static void lmc_driver_timeout(struct net_device *dev)
sc->lmc_device->stats.tx_errors++;
sc->extra_stats.tx_ProcTimeout++; /* -baz */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
bug_out:
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index 3f744c64309..c6aa66e5b52 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -396,7 +396,7 @@ static void tx1_dma_buf_check(pc300_t * card, int ch)
u16 next_bd = card->chan[ch].tx_next_bd;
u32 scabase = card->hw.scabase;
- printk ("\nnfree_tx_bd = %d \n", card->chan[ch].nfree_tx_bd);
+ printk ("\nnfree_tx_bd = %d\n", card->chan[ch].nfree_tx_bd);
printk("#CH%d: f_bd = %d(0x%08x), n_bd = %d(0x%08x)\n", ch,
first_bd, TX_BD_ADDR(ch, first_bd),
next_bd, TX_BD_ADDR(ch, next_bd));
@@ -1790,7 +1790,7 @@ static void cpc_tx_timeout(struct net_device *dev)
cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) &
~(CPLD_REG2_FALC_LED1 << (2 * ch)));
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
CPC_UNLOCK(card, flags);
netif_wake_queue(dev);
}
@@ -1849,7 +1849,6 @@ static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev)
if (d->trace_on) {
cpc_trace(dev, skb, 'T');
}
- dev->trans_start = jiffies;
/* Start transmission */
CPC_LOCK(card, flags);
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index 4917a94943b..4293889e287 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -366,7 +366,7 @@ static void cpc_tty_close(struct tty_struct *tty, struct file *flip)
int res;
if (!tty || !tty->driver_data ) {
- CPC_TTY_DBG("hdlx-tty: no TTY in close \n");
+ CPC_TTY_DBG("hdlx-tty: no TTY in close\n");
return;
}
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 31c41af2246..43ae6f440bf 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -1352,7 +1352,7 @@ static int sdla_set_config(struct net_device *dev, struct ifmap *map)
return(-EINVAL);
if (!request_region(map->base_addr, SDLA_IO_EXTENTS, dev->name)){
- printk(KERN_WARNING "SDLA: io-port 0x%04lx in use \n", dev->base_addr);
+ printk(KERN_WARNING "SDLA: io-port 0x%04lx in use\n", dev->base_addr);
return(-EINVAL);
}
base = map->base_addr;
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index 541c700dcee..db73a7be199 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -298,7 +298,6 @@ static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev)
desc->stat = PACKET_FULL;
writel(1 << (DOORBELL_TO_CARD_TX_0 + port->node),
port->card->plx + PLX_DOORBELL_TO_CARD);
- dev->trans_start = jiffies;
port->tx_out = (port->tx_out + 1) % TX_BUFFERS;
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 80d5c5834a0..166e77dfffd 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -29,12 +29,12 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
-#include <linux/x25.h>
#include <linux/lapb.h>
#include <linux/init.h>
#include <linux/rtnetlink.h>
#include <linux/compat.h>
#include <linux/slab.h>
+#include <net/x25device.h>
#include "x25_asy.h"
#include <net/x25device.h>
@@ -315,15 +315,15 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb,
}
switch (skb->data[0]) {
- case 0x00:
+ case X25_IFACE_DATA:
break;
- case 0x01: /* Connection request .. do nothing */
+ case X25_IFACE_CONNECT: /* Connection request .. do nothing */
err = lapb_connect_request(dev);
if (err != LAPB_OK)
printk(KERN_ERR "x25_asy: lapb_connect_request error - %d\n", err);
kfree_skb(skb);
return NETDEV_TX_OK;
- case 0x02: /* Disconnect request .. do nothing - hang up ?? */
+ case X25_IFACE_DISCONNECT: /* do nothing - hang up ?? */
err = lapb_disconnect_request(dev);
if (err != LAPB_OK)
printk(KERN_ERR "x25_asy: lapb_disconnect_request error - %d\n", err);
@@ -411,7 +411,7 @@ static void x25_asy_connected(struct net_device *dev, int reason)
}
ptr = skb_put(skb, 1);
- *ptr = 0x01;
+ *ptr = X25_IFACE_CONNECT;
skb->protocol = x25_type_trans(skb, sl->dev);
netif_rx(skb);
@@ -430,7 +430,7 @@ static void x25_asy_disconnected(struct net_device *dev, int reason)
}
ptr = skb_put(skb, 1);
- *ptr = 0x02;
+ *ptr = X25_IFACE_DISCONNECT;
skb->protocol = x25_type_trans(skb, sl->dev);
netif_rx(skb);
diff --git a/drivers/net/wd.c b/drivers/net/wd.c
index d8322d2d1e2..746a5ee32f3 100644
--- a/drivers/net/wd.c
+++ b/drivers/net/wd.c
@@ -395,7 +395,6 @@ wd_reset_8390(struct net_device *dev)
outb(NIC16 | ((dev->mem_start>>19) & 0x1f), wd_cmd_port+WD_CMDREG5);
if (ei_debug > 1) printk("reset done\n");
- return;
}
/* Grab the 8390 specific header. Similar to the block_input routine, but
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 6180772dcc0..d86e8f31e7f 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -83,6 +83,21 @@
#define D_SUBMODULE control
#include "debug-levels.h"
+static int i2400m_idle_mode_disabled;/* 0 (idle mode enabled) by default */
+module_param_named(idle_mode_disabled, i2400m_idle_mode_disabled, int, 0644);
+MODULE_PARM_DESC(idle_mode_disabled,
+ "If true, the device will not enable idle mode negotiation "
+ "with the base station (when connected) to save power.");
+
+/* 0 (power saving enabled) by default */
+static int i2400m_power_save_disabled;
+module_param_named(power_save_disabled, i2400m_power_save_disabled, int, 0644);
+MODULE_PARM_DESC(power_save_disabled,
+ "If true, the driver will not tell the device to enter "
+ "power saving mode when it reports it is ready for it. "
+ "False by default (so the device is told to do power "
+ "saving).");
+
int i2400m_passive_mode; /* 0 (passive mode disabled) by default */
module_param_named(passive_mode, i2400m_passive_mode, int, 0644);
MODULE_PARM_DESC(passive_mode,
@@ -346,7 +361,7 @@ void i2400m_report_tlv_system_state(struct i2400m *i2400m,
i2400m_state);
i2400m_reset(i2400m, I2400M_RT_WARM);
break;
- };
+ }
d_fnend(3, dev, "(i2400m %p ss %p [%u]) = void\n",
i2400m, ss, i2400m_state);
}
@@ -395,7 +410,7 @@ void i2400m_report_tlv_media_status(struct i2400m *i2400m,
default:
dev_err(dev, "HW BUG? unknown media status %u\n",
status);
- };
+ }
d_fnend(3, dev, "(i2400m %p ms %p [%u]) = void\n",
i2400m, ms, status);
}
@@ -524,7 +539,7 @@ void i2400m_report_hook(struct i2400m *i2400m,
}
}
break;
- };
+ }
d_fnend(3, dev, "(i2400m %p l3l4_hdr %p size %zu) = void\n",
i2400m, l3l4_hdr, size);
}
@@ -567,8 +582,7 @@ void i2400m_msg_ack_hook(struct i2400m *i2400m,
size);
}
break;
- };
- return;
+ }
}
@@ -740,7 +754,7 @@ struct sk_buff *i2400m_msg_to_dev(struct i2400m *i2400m,
break;
default:
ack_timeout = HZ;
- };
+ }
if (unlikely(i2400m->trace_msg_from_user))
wimax_msg(&i2400m->wimax_dev, "echo", buf, buf_len, GFP_KERNEL);
@@ -1419,5 +1433,4 @@ void i2400m_dev_shutdown(struct i2400m *i2400m)
d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
- return;
}
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 94dc83c3969..9c8b78d4abd 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -75,25 +75,6 @@
#include "debug-levels.h"
-int i2400m_idle_mode_disabled; /* 0 (idle mode enabled) by default */
-module_param_named(idle_mode_disabled, i2400m_idle_mode_disabled, int, 0644);
-MODULE_PARM_DESC(idle_mode_disabled,
- "If true, the device will not enable idle mode negotiation "
- "with the base station (when connected) to save power.");
-
-int i2400m_rx_reorder_disabled; /* 0 (rx reorder enabled) by default */
-module_param_named(rx_reorder_disabled, i2400m_rx_reorder_disabled, int, 0644);
-MODULE_PARM_DESC(rx_reorder_disabled,
- "If true, RX reordering will be disabled.");
-
-int i2400m_power_save_disabled; /* 0 (power saving enabled) by default */
-module_param_named(power_save_disabled, i2400m_power_save_disabled, int, 0644);
-MODULE_PARM_DESC(power_save_disabled,
- "If true, the driver will not tell the device to enter "
- "power saving mode when it reports it is ready for it. "
- "False by default (so the device is told to do power "
- "saving).");
-
static char i2400m_debug_params[128];
module_param_string(debug, i2400m_debug_params, sizeof(i2400m_debug_params),
0644);
@@ -395,6 +376,16 @@ retry:
result = i2400m_dev_initialize(i2400m);
if (result < 0)
goto error_dev_initialize;
+
+ /* We don't want any additional unwanted error recovery triggered
+ * from any other context so if anything went wrong before we come
+ * here, let's keep i2400m->error_recovery untouched and leave it to
+ * dev_reset_handle(). See dev_reset_handle(). */
+
+ atomic_dec(&i2400m->error_recovery);
+ /* Every thing works so far, ok, now we are ready to
+ * take error recovery if it's required. */
+
/* At this point, reports will come for the device and set it
* to the right state if it is different than UNINITIALIZED */
d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n",
@@ -403,10 +394,10 @@ retry:
error_dev_initialize:
error_check_mac_addr:
+error_fw_check:
i2400m->ready = 0;
wmb(); /* see i2400m->ready's documentation */
flush_workqueue(i2400m->work_queue);
-error_fw_check:
if (i2400m->bus_dev_stop)
i2400m->bus_dev_stop(i2400m);
error_bus_dev_start:
@@ -436,7 +427,8 @@ int i2400m_dev_start(struct i2400m *i2400m, enum i2400m_bri bm_flags)
result = __i2400m_dev_start(i2400m, bm_flags);
if (result >= 0) {
i2400m->updown = 1;
- wmb(); /* see i2400m->updown's documentation */
+ i2400m->alive = 1;
+ wmb();/* see i2400m->updown and i2400m->alive's doc */
}
}
mutex_unlock(&i2400m->init_mutex);
@@ -497,7 +489,8 @@ void i2400m_dev_stop(struct i2400m *i2400m)
if (i2400m->updown) {
__i2400m_dev_stop(i2400m);
i2400m->updown = 0;
- wmb(); /* see i2400m->updown's documentation */
+ i2400m->alive = 0;
+ wmb(); /* see i2400m->updown and i2400m->alive's doc */
}
mutex_unlock(&i2400m->init_mutex);
}
@@ -617,12 +610,12 @@ int i2400m_post_reset(struct i2400m *i2400m)
error_dev_start:
if (i2400m->bus_release)
i2400m->bus_release(i2400m);
-error_bus_setup:
/* even if the device was up, it could not be recovered, so we
* mark it as down. */
i2400m->updown = 0;
wmb(); /* see i2400m->updown's documentation */
mutex_unlock(&i2400m->init_mutex);
+error_bus_setup:
d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
return result;
}
@@ -669,6 +662,9 @@ void __i2400m_dev_reset_handle(struct work_struct *ws)
d_fnstart(3, dev, "(ws %p i2400m %p reason %s)\n", ws, i2400m, reason);
+ i2400m->boot_mode = 1;
+ wmb(); /* Make sure i2400m_msg_to_dev() sees boot_mode */
+
result = 0;
if (mutex_trylock(&i2400m->init_mutex) == 0) {
/* We are still in i2400m_dev_start() [let it fail] or
@@ -679,39 +675,68 @@ void __i2400m_dev_reset_handle(struct work_struct *ws)
complete(&i2400m->msg_completion);
goto out;
}
- if (i2400m->updown == 0) {
- dev_info(dev, "%s: device is down, doing nothing\n", reason);
- goto out_unlock;
- }
+
dev_err(dev, "%s: reinitializing driver\n", reason);
- __i2400m_dev_stop(i2400m);
- result = __i2400m_dev_start(i2400m,
- I2400M_BRI_SOFT | I2400M_BRI_MAC_REINIT);
- if (result < 0) {
+ rmb();
+ if (i2400m->updown) {
+ __i2400m_dev_stop(i2400m);
i2400m->updown = 0;
wmb(); /* see i2400m->updown's documentation */
- dev_err(dev, "%s: cannot start the device: %d\n",
- reason, result);
- result = -EUCLEAN;
}
-out_unlock:
+
+ if (i2400m->alive) {
+ result = __i2400m_dev_start(i2400m,
+ I2400M_BRI_SOFT | I2400M_BRI_MAC_REINIT);
+ if (result < 0) {
+ dev_err(dev, "%s: cannot start the device: %d\n",
+ reason, result);
+ result = -EUCLEAN;
+ if (atomic_read(&i2400m->bus_reset_retries)
+ >= I2400M_BUS_RESET_RETRIES) {
+ result = -ENODEV;
+ dev_err(dev, "tried too many times to "
+ "reset the device, giving up\n");
+ }
+ }
+ }
+
if (i2400m->reset_ctx) {
ctx->result = result;
complete(&ctx->completion);
}
mutex_unlock(&i2400m->init_mutex);
if (result == -EUCLEAN) {
+ /*
+ * We come here because the reset during operational mode
+ * wasn't successully done and need to proceed to a bus
+ * reset. For the dev_reset_handle() to be able to handle
+ * the reset event later properly, we restore boot_mode back
+ * to the state before previous reset. ie: just like we are
+ * issuing the bus reset for the first time
+ */
+ i2400m->boot_mode = 0;
+ wmb();
+
+ atomic_inc(&i2400m->bus_reset_retries);
/* ops, need to clean up [w/ init_mutex not held] */
result = i2400m_reset(i2400m, I2400M_RT_BUS);
if (result >= 0)
result = -ENODEV;
+ } else {
+ rmb();
+ if (i2400m->alive) {
+ /* great, we expect the device state up and
+ * dev_start() actually brings the device state up */
+ i2400m->updown = 1;
+ wmb();
+ atomic_set(&i2400m->bus_reset_retries, 0);
+ }
}
out:
i2400m_put(i2400m);
kfree(iw);
d_fnend(3, dev, "(ws %p i2400m %p reason %s) = void\n",
ws, i2400m, reason);
- return;
}
@@ -729,14 +754,72 @@ out:
*/
int i2400m_dev_reset_handle(struct i2400m *i2400m, const char *reason)
{
- i2400m->boot_mode = 1;
- wmb(); /* Make sure i2400m_msg_to_dev() sees boot_mode */
return i2400m_schedule_work(i2400m, __i2400m_dev_reset_handle,
GFP_ATOMIC, &reason, sizeof(reason));
}
EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle);
+ /*
+ * The actual work of error recovery.
+ *
+ * The current implementation of error recovery is to trigger a bus reset.
+ */
+static
+void __i2400m_error_recovery(struct work_struct *ws)
+{
+ struct i2400m_work *iw = container_of(ws, struct i2400m_work, ws);
+ struct i2400m *i2400m = iw->i2400m;
+
+ i2400m_reset(i2400m, I2400M_RT_BUS);
+
+ i2400m_put(i2400m);
+ kfree(iw);
+ return;
+}
+
+/*
+ * Schedule a work struct for error recovery.
+ *
+ * The intention of error recovery is to bring back the device to some
+ * known state whenever TX sees -110 (-ETIMEOUT) on copying the data to
+ * the device. The TX failure could mean a device bus stuck, so the current
+ * error recovery implementation is to trigger a bus reset to the device
+ * and hopefully it can bring back the device.
+ *
+ * The actual work of error recovery has to be in a thread context because
+ * it is kicked off in the TX thread (i2400ms->tx_workqueue) which is to be
+ * destroyed by the error recovery mechanism (currently a bus reset).
+ *
+ * Also, there may be already a queue of TX works that all hit
+ * the -ETIMEOUT error condition because the device is stuck already.
+ * Since bus reset is used as the error recovery mechanism and we don't
+ * want consecutive bus resets simply because the multiple TX works
+ * in the queue all hit the same device erratum, the flag "error_recovery"
+ * is introduced for preventing unwanted consecutive bus resets.
+ *
+ * Error recovery shall only be invoked again if previous one was completed.
+ * The flag error_recovery is set when error recovery mechanism is scheduled,
+ * and is checked when we need to schedule another error recovery. If it is
+ * in place already, then we shouldn't schedule another one.
+ */
+void i2400m_error_recovery(struct i2400m *i2400m)
+{
+ struct device *dev = i2400m_dev(i2400m);
+
+ if (atomic_add_return(1, &i2400m->error_recovery) == 1) {
+ if (i2400m_schedule_work(i2400m, __i2400m_error_recovery,
+ GFP_ATOMIC, NULL, 0) < 0) {
+ dev_err(dev, "run out of memory for "
+ "scheduling an error recovery ?\n");
+ atomic_dec(&i2400m->error_recovery);
+ }
+ } else
+ atomic_dec(&i2400m->error_recovery);
+ return;
+}
+EXPORT_SYMBOL_GPL(i2400m_error_recovery);
+
/*
* Alloc the command and ack buffers for boot mode
*
@@ -803,6 +886,13 @@ void i2400m_init(struct i2400m *i2400m)
mutex_init(&i2400m->init_mutex);
/* wake_tx_ws is initialized in i2400m_tx_setup() */
+ atomic_set(&i2400m->bus_reset_retries, 0);
+
+ i2400m->alive = 0;
+
+ /* initialize error_recovery to 1 for denoting we
+ * are not yet ready to take any error recovery */
+ atomic_set(&i2400m->error_recovery, 1);
}
EXPORT_SYMBOL_GPL(i2400m_init);
@@ -996,7 +1086,6 @@ void __exit i2400m_driver_exit(void)
/* for scheds i2400m_dev_reset_handle() */
flush_scheduled_work();
i2400m_barker_db_exit();
- return;
}
module_exit(i2400m_driver_exit);
diff --git a/drivers/net/wimax/i2400m/i2400m-sdio.h b/drivers/net/wimax/i2400m/i2400m-sdio.h
index b9c4bed3b45..360d4fb195f 100644
--- a/drivers/net/wimax/i2400m/i2400m-sdio.h
+++ b/drivers/net/wimax/i2400m/i2400m-sdio.h
@@ -99,7 +99,10 @@ enum {
*
* @tx_workqueue: workqeueue used for data TX; we don't use the
* system's workqueue as that might cause deadlocks with code in
- * the bus-generic driver.
+ * the bus-generic driver. The read/write operation to the queue
+ * is protected with spinlock (tx_lock in struct i2400m) to avoid
+ * the queue being destroyed in the middle of a the queue read/write
+ * operation.
*
* @debugfs_dentry: dentry for the SDIO specific debugfs files
*
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 820b128705e..fa74777fd65 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -160,6 +160,16 @@
#include <linux/wimax/i2400m.h>
#include <asm/byteorder.h>
+enum {
+/* netdev interface */
+ /*
+ * Out of NWG spec (R1_v1.2.2), 3.3.3 ASN Bearer Plane MTU Size
+ *
+ * The MTU is 1400 or less
+ */
+ I2400M_MAX_MTU = 1400,
+};
+
/* Misc constants */
enum {
/* Size of the Boot Mode Command buffer */
@@ -167,6 +177,11 @@ enum {
I2400M_BM_ACK_BUF_SIZE = 256,
};
+enum {
+ /* Maximum number of bus reset can be retried */
+ I2400M_BUS_RESET_RETRIES = 3,
+};
+
/**
* struct i2400m_poke_table - Hardware poke table for the Intel 2400m
*
@@ -227,6 +242,11 @@ struct i2400m_barker_db;
* so we have a tx_blk_size variable that the bus layer sets to
* tell the engine how much of that we need.
*
+ * @bus_tx_room_min: [fill] Minimum room required while allocating
+ * TX queue's buffer space for message header. SDIO requires
+ * 224 bytes and USB 16 bytes. Refer bus specific driver code
+ * for details.
+ *
* @bus_pl_size_max: [fill] Maximum payload size.
*
* @bus_setup: [optional fill] Function called by the bus-generic code
@@ -397,7 +417,7 @@ struct i2400m_barker_db;
*
* @tx_size_max: biggest TX message sent.
*
- * @rx_lock: spinlock to protect RX members
+ * @rx_lock: spinlock to protect RX members and rx_roq_refcount.
*
* @rx_pl_num: total number of payloads received
*
@@ -421,6 +441,10 @@ struct i2400m_barker_db;
* delivered. Then the driver can release them to the host. See
* drivers/net/i2400m/rx.c for details.
*
+ * @rx_roq_refcount: refcount rx_roq. This refcounts any access to
+ * rx_roq thus preventing rx_roq being destroyed when rx_roq
+ * is being accessed. rx_roq_refcount is protected by rx_lock.
+ *
* @rx_reports: reports received from the device that couldn't be
* processed because the driver wasn't still ready; when ready,
* they are pulled from here and chewed.
@@ -507,6 +531,38 @@ struct i2400m_barker_db;
* same.
*
* @pm_notifier: used to register for PM events
+ *
+ * @bus_reset_retries: counter for the number of bus resets attempted for
+ * this boot. It's not for tracking the number of bus resets during
+ * the whole driver life cycle (from insmod to rmmod) but for the
+ * number of dev_start() executed until dev_start() returns a success
+ * (ie: a good boot means a dev_stop() followed by a successful
+ * dev_start()). dev_reset_handler() increments this counter whenever
+ * it is triggering a bus reset. It checks this counter to decide if a
+ * subsequent bus reset should be retried. dev_reset_handler() retries
+ * the bus reset until dev_start() succeeds or the counter reaches
+ * I2400M_BUS_RESET_RETRIES. The counter is cleared to 0 in
+ * dev_reset_handle() when dev_start() returns a success,
+ * ie: a successul boot is completed.
+ *
+ * @alive: flag to denote if the device *should* be alive. This flag is
+ * everything like @updown (see doc for @updown) except reflecting
+ * the device state *we expect* rather than the actual state as denoted
+ * by @updown. It is set 1 whenever @updown is set 1 in dev_start().
+ * Then the device is expected to be alive all the time
+ * (i2400m->alive remains 1) until the driver is removed. Therefore
+ * all the device reboot events detected can be still handled properly
+ * by either dev_reset_handle() or .pre_reset/.post_reset as long as
+ * the driver presents. It is set 0 along with @updown in dev_stop().
+ *
+ * @error_recovery: flag to denote if we are ready to take an error recovery.
+ * 0 for ready to take an error recovery; 1 for not ready. It is
+ * initialized to 1 while probe() since we don't tend to take any error
+ * recovery during probe(). It is decremented by 1 whenever dev_start()
+ * succeeds to indicate we are ready to take error recovery from now on.
+ * It is checked every time we wanna schedule an error recovery. If an
+ * error recovery is already in place (error_recovery was set 1), we
+ * should not schedule another one until the last one is done.
*/
struct i2400m {
struct wimax_dev wimax_dev; /* FIRST! See doc */
@@ -522,6 +578,7 @@ struct i2400m {
wait_queue_head_t state_wq; /* Woken up when on state updates */
size_t bus_tx_block_size;
+ size_t bus_tx_room_min;
size_t bus_pl_size_max;
unsigned bus_bm_retries;
@@ -550,10 +607,12 @@ struct i2400m {
tx_num, tx_size_acc, tx_size_min, tx_size_max;
/* RX stuff */
- spinlock_t rx_lock; /* protect RX state */
+ /* protect RX state and rx_roq_refcount */
+ spinlock_t rx_lock;
unsigned rx_pl_num, rx_pl_max, rx_pl_min,
rx_num, rx_size_acc, rx_size_min, rx_size_max;
- struct i2400m_roq *rx_roq; /* not under rx_lock! */
+ struct i2400m_roq *rx_roq; /* access is refcounted */
+ struct kref rx_roq_refcount; /* refcount access to rx_roq */
u8 src_mac_addr[ETH_HLEN];
struct list_head rx_reports; /* under rx_lock! */
struct work_struct rx_report_ws;
@@ -581,6 +640,16 @@ struct i2400m {
struct i2400m_barker_db *barker;
struct notifier_block pm_notifier;
+
+ /* counting bus reset retries in this boot */
+ atomic_t bus_reset_retries;
+
+ /* if the device is expected to be alive */
+ unsigned alive;
+
+ /* 0 if we are ready for error recovery; 1 if not ready */
+ atomic_t error_recovery;
+
};
@@ -803,6 +872,7 @@ void i2400m_put(struct i2400m *i2400m)
extern int i2400m_dev_reset_handle(struct i2400m *, const char *);
extern int i2400m_pre_reset(struct i2400m *);
extern int i2400m_post_reset(struct i2400m *);
+extern void i2400m_error_recovery(struct i2400m *);
/*
* _setup()/_release() are called by the probe/disconnect functions of
@@ -815,7 +885,6 @@ extern int i2400m_rx(struct i2400m *, struct sk_buff *);
extern struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *);
extern void i2400m_tx_msg_sent(struct i2400m *);
-extern int i2400m_power_save_disabled;
/*
* Utility functions
@@ -922,10 +991,5 @@ extern int i2400m_barker_db_init(const char *);
extern void i2400m_barker_db_exit(void);
-/* Module parameters */
-
-extern int i2400m_idle_mode_disabled;
-extern int i2400m_rx_reorder_disabled;
-
#endif /* #ifndef __I2400M_H__ */
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index b811c2f1f5e..94742e1eafe 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -84,17 +84,15 @@
enum {
/* netdev interface */
- /*
- * Out of NWG spec (R1_v1.2.2), 3.3.3 ASN Bearer Plane MTU Size
- *
- * The MTU is 1400 or less
- */
- I2400M_MAX_MTU = 1400,
/* 20 secs? yep, this is the maximum timeout that the device
* might take to get out of IDLE / negotiate it with the base
* station. We add 1sec for good measure. */
I2400M_TX_TIMEOUT = 21 * HZ,
- I2400M_TX_QLEN = 5,
+ /*
+ * Experimentation has determined that, 20 to be a good value
+ * for minimizing the jitter in the throughput.
+ */
+ I2400M_TX_QLEN = 20,
};
@@ -255,7 +253,6 @@ void i2400m_net_wake_stop(struct i2400m *i2400m)
kfree_skb(wake_tx_skb);
}
d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
- return;
}
@@ -434,7 +431,6 @@ void i2400m_tx_timeout(struct net_device *net_dev)
* this, there might be data pending to be sent or not...
*/
net_dev->stats.tx_errors++;
- return;
}
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index fa2e11e5b4b..8cc9e319f43 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -155,6 +155,11 @@
#define D_SUBMODULE rx
#include "debug-levels.h"
+static int i2400m_rx_reorder_disabled; /* 0 (rx reorder enabled) by default */
+module_param_named(rx_reorder_disabled, i2400m_rx_reorder_disabled, int, 0644);
+MODULE_PARM_DESC(rx_reorder_disabled,
+ "If true, RX reordering will be disabled.");
+
struct i2400m_report_hook_args {
struct sk_buff *skb_rx;
const struct i2400m_l3l4_hdr *l3l4_hdr;
@@ -300,20 +305,18 @@ void i2400m_rx_ctl_ack(struct i2400m *i2400m,
d_printf(1, dev, "Huh? waiter for command reply cancelled\n");
goto error_waiter_cancelled;
}
- if (ack_skb == NULL) {
+ if (IS_ERR(ack_skb))
dev_err(dev, "CMD/GET/SET ack: cannot allocate SKB\n");
- i2400m->ack_skb = ERR_PTR(-ENOMEM);
- } else
- i2400m->ack_skb = ack_skb;
+ i2400m->ack_skb = ack_skb;
spin_unlock_irqrestore(&i2400m->rx_lock, flags);
complete(&i2400m->msg_completion);
return;
error_waiter_cancelled:
- kfree_skb(ack_skb);
+ if (!IS_ERR(ack_skb))
+ kfree_skb(ack_skb);
error_no_waiter:
spin_unlock_irqrestore(&i2400m->rx_lock, flags);
- return;
}
@@ -718,7 +721,6 @@ void __i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq,
out:
d_fnend(4, dev, "(i2400m %p roq %p skb %p sn %u nsn %d) = void\n",
i2400m, roq, skb, sn, nsn);
- return;
}
@@ -743,12 +745,12 @@ unsigned __i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
unsigned new_nws, nsn_itr;
new_nws = __i2400m_roq_nsn(roq, sn);
- if (unlikely(new_nws >= 1024) && d_test(1)) {
- dev_err(dev, "SW BUG? __update_ws new_nws %u (sn %u ws %u)\n",
- new_nws, sn, roq->ws);
- WARN_ON(1);
- i2400m_roq_log_dump(i2400m, roq);
- }
+ /*
+ * For type 2(update_window_start) rx messages, there is no
+ * need to check if the normalized sequence number is greater 1023.
+ * Simply insert and deliver all packets to the host up to the
+ * window start.
+ */
skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) {
roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb;
nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn);
@@ -798,7 +800,6 @@ void i2400m_roq_reset(struct i2400m *i2400m, struct i2400m_roq *roq)
}
roq->ws = 0;
d_fnend(2, dev, "(i2400m %p roq %p) = void\n", i2400m, roq);
- return;
}
@@ -837,7 +838,6 @@ void i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq,
}
d_fnend(2, dev, "(i2400m %p roq %p skb %p lbn %u) = void\n",
i2400m, roq, skb, lbn);
- return;
}
@@ -863,7 +863,6 @@ void i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_WS,
old_ws, len, sn, nsn, roq->ws);
d_fnstart(2, dev, "(i2400m %p roq %p sn %u) = void\n", i2400m, roq, sn);
- return;
}
@@ -890,33 +889,52 @@ void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
i2400m, roq, skb, sn);
len = skb_queue_len(&roq->queue);
nsn = __i2400m_roq_nsn(roq, sn);
+ /*
+ * For type 3(queue_update_window_start) rx messages, there is no
+ * need to check if the normalized sequence number is greater 1023.
+ * Simply insert and deliver all packets to the host up to the
+ * window start.
+ */
old_ws = roq->ws;
- if (unlikely(nsn >= 1024)) {
- dev_err(dev, "SW BUG? queue_update_ws nsn %u (sn %u ws %u)\n",
- nsn, sn, roq->ws);
- i2400m_roq_log_dump(i2400m, roq);
- i2400m_reset(i2400m, I2400M_RT_WARM);
- } else {
- /* if the queue is empty, don't bother as we'd queue
- * it and inmediately unqueue it -- just deliver it */
- if (len == 0) {
- struct i2400m_roq_data *roq_data;
- roq_data = (struct i2400m_roq_data *) &skb->cb;
- i2400m_net_erx(i2400m, skb, roq_data->cs);
- }
- else
- __i2400m_roq_queue(i2400m, roq, skb, sn, nsn);
- __i2400m_roq_update_ws(i2400m, roq, sn + 1);
- i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS,
- old_ws, len, sn, nsn, roq->ws);
- }
+ /* If the queue is empty, don't bother as we'd queue
+ * it and immediately unqueue it -- just deliver it.
+ */
+ if (len == 0) {
+ struct i2400m_roq_data *roq_data;
+ roq_data = (struct i2400m_roq_data *) &skb->cb;
+ i2400m_net_erx(i2400m, skb, roq_data->cs);
+ } else
+ __i2400m_roq_queue(i2400m, roq, skb, sn, nsn);
+
+ __i2400m_roq_update_ws(i2400m, roq, sn + 1);
+ i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS,
+ old_ws, len, sn, nsn, roq->ws);
+
d_fnend(2, dev, "(i2400m %p roq %p skb %p sn %u) = void\n",
i2400m, roq, skb, sn);
- return;
}
/*
+ * This routine destroys the memory allocated for rx_roq, when no
+ * other thread is accessing it. Access to rx_roq is refcounted by
+ * rx_roq_refcount, hence memory allocated must be destroyed when
+ * rx_roq_refcount becomes zero. This routine gets executed when
+ * rx_roq_refcount becomes zero.
+ */
+void i2400m_rx_roq_destroy(struct kref *ref)
+{
+ unsigned itr;
+ struct i2400m *i2400m
+ = container_of(ref, struct i2400m, rx_roq_refcount);
+ for (itr = 0; itr < I2400M_RO_CIN + 1; itr++)
+ __skb_queue_purge(&i2400m->rx_roq[itr].queue);
+ kfree(i2400m->rx_roq[0].log);
+ kfree(i2400m->rx_roq);
+ i2400m->rx_roq = NULL;
+}
+
+/*
* Receive and send up an extended data packet
*
* @i2400m: device descriptor
@@ -969,6 +987,7 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
unsigned ro_needed, ro_type, ro_cin, ro_sn;
struct i2400m_roq *roq;
struct i2400m_roq_data *roq_data;
+ unsigned long flags;
BUILD_BUG_ON(ETH_HLEN > sizeof(*hdr));
@@ -1007,7 +1026,16 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
ro_cin = (reorder >> I2400M_RO_CIN_SHIFT) & I2400M_RO_CIN;
ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN;
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ if (i2400m->rx_roq == NULL) {
+ kfree_skb(skb); /* rx_roq is already destroyed */
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+ goto error;
+ }
roq = &i2400m->rx_roq[ro_cin];
+ kref_get(&i2400m->rx_roq_refcount);
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+
roq_data = (struct i2400m_roq_data *) &skb->cb;
roq_data->sn = ro_sn;
roq_data->cs = cs;
@@ -1034,6 +1062,10 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
default:
dev_err(dev, "HW BUG? unknown reorder type %u\n", ro_type);
}
+
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy);
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
}
else
i2400m_net_erx(i2400m, skb, cs);
@@ -1041,7 +1073,6 @@ error_skb_clone:
error:
d_fnend(2, dev, "(i2400m %p skb_rx %p single %u payload %p "
"size %zu) = void\n", i2400m, skb_rx, single_last, payload, size);
- return;
}
@@ -1344,6 +1375,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
__i2400m_roq_init(&i2400m->rx_roq[itr]);
i2400m->rx_roq[itr].log = &rd[itr];
}
+ kref_init(&i2400m->rx_roq_refcount);
}
return 0;
@@ -1357,12 +1389,12 @@ error_roq_alloc:
/* Tear down the RX queue and infrastructure */
void i2400m_rx_release(struct i2400m *i2400m)
{
+ unsigned long flags;
+
if (i2400m->rx_reorder) {
- unsigned itr;
- for(itr = 0; itr < I2400M_RO_CIN + 1; itr++)
- __skb_queue_purge(&i2400m->rx_roq[itr].queue);
- kfree(i2400m->rx_roq[0].log);
- kfree(i2400m->rx_roq);
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy);
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
}
/* at this point, nothing can be received... */
i2400m_report_hook_flush(i2400m);
diff --git a/drivers/net/wimax/i2400m/sdio-rx.c b/drivers/net/wimax/i2400m/sdio-rx.c
index d619da33f20..8b809c2ead6 100644
--- a/drivers/net/wimax/i2400m/sdio-rx.c
+++ b/drivers/net/wimax/i2400m/sdio-rx.c
@@ -197,7 +197,6 @@ error_alloc_skb:
error_get_size:
error_bad_size:
d_fnend(7, dev, "(i2400ms %p) = %d\n", i2400ms, ret);
- return;
}
@@ -229,7 +228,6 @@ void i2400ms_irq(struct sdio_func *func)
i2400ms_rx(i2400ms);
error_no_irq:
d_fnend(6, dev, "(i2400ms %p) = void\n", i2400ms);
- return;
}
diff --git a/drivers/net/wimax/i2400m/sdio-tx.c b/drivers/net/wimax/i2400m/sdio-tx.c
index de66d068c9c..b53cd1c80e3 100644
--- a/drivers/net/wimax/i2400m/sdio-tx.c
+++ b/drivers/net/wimax/i2400m/sdio-tx.c
@@ -98,6 +98,10 @@ void i2400ms_tx_submit(struct work_struct *ws)
tx_msg_size, result);
}
+ if (result == -ETIMEDOUT) {
+ i2400m_error_recovery(i2400m);
+ break;
+ }
d_printf(2, dev, "TX: %zub submitted\n", tx_msg_size);
}
@@ -114,13 +118,17 @@ void i2400ms_bus_tx_kick(struct i2400m *i2400m)
{
struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
struct device *dev = &i2400ms->func->dev;
+ unsigned long flags;
d_fnstart(3, dev, "(i2400m %p) = void\n", i2400m);
/* schedule tx work, this is because tx may block, therefore
* it has to run in a thread context.
*/
- queue_work(i2400ms->tx_workqueue, &i2400ms->tx_worker);
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ if (i2400ms->tx_workqueue != NULL)
+ queue_work(i2400ms->tx_workqueue, &i2400ms->tx_worker);
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
}
@@ -130,27 +138,40 @@ int i2400ms_tx_setup(struct i2400ms *i2400ms)
int result;
struct device *dev = &i2400ms->func->dev;
struct i2400m *i2400m = &i2400ms->i2400m;
+ struct workqueue_struct *tx_workqueue;
+ unsigned long flags;
d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms);
INIT_WORK(&i2400ms->tx_worker, i2400ms_tx_submit);
snprintf(i2400ms->tx_wq_name, sizeof(i2400ms->tx_wq_name),
"%s-tx", i2400m->wimax_dev.name);
- i2400ms->tx_workqueue =
+ tx_workqueue =
create_singlethread_workqueue(i2400ms->tx_wq_name);
- if (NULL == i2400ms->tx_workqueue) {
+ if (tx_workqueue == NULL) {
dev_err(dev, "TX: failed to create workqueue\n");
result = -ENOMEM;
} else
result = 0;
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ i2400ms->tx_workqueue = tx_workqueue;
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result);
return result;
}
void i2400ms_tx_release(struct i2400ms *i2400ms)
{
- if (i2400ms->tx_workqueue) {
- destroy_workqueue(i2400ms->tx_workqueue);
- i2400ms->tx_workqueue = NULL;
- }
+ struct i2400m *i2400m = &i2400ms->i2400m;
+ struct workqueue_struct *tx_workqueue;
+ unsigned long flags;
+
+ tx_workqueue = i2400ms->tx_workqueue;
+
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ i2400ms->tx_workqueue = NULL;
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+
+ if (tx_workqueue)
+ destroy_workqueue(tx_workqueue);
}
diff --git a/drivers/net/wimax/i2400m/sdio.c b/drivers/net/wimax/i2400m/sdio.c
index 7632f80954e..9bfc26e1bc6 100644
--- a/drivers/net/wimax/i2400m/sdio.c
+++ b/drivers/net/wimax/i2400m/sdio.c
@@ -483,6 +483,13 @@ int i2400ms_probe(struct sdio_func *func,
sdio_set_drvdata(func, i2400ms);
i2400m->bus_tx_block_size = I2400MS_BLK_SIZE;
+ /*
+ * Room required in the TX queue for SDIO message to accommodate
+ * a smallest payload while allocating header space is 224 bytes,
+ * which is the smallest message size(the block size 256 bytes)
+ * minus the smallest message header size(32 bytes).
+ */
+ i2400m->bus_tx_room_min = I2400MS_BLK_SIZE - I2400M_PL_ALIGN * 2;
i2400m->bus_pl_size_max = I2400MS_PL_SIZE_MAX;
i2400m->bus_setup = i2400ms_bus_setup;
i2400m->bus_dev_start = i2400ms_bus_dev_start;
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
index b0cb90624cf..3f819efc06b 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -258,8 +258,10 @@ enum {
* Doc says maximum transaction is 16KiB. If we had 16KiB en
* route and 16KiB being queued, it boils down to needing
* 32KiB.
+ * 32KiB is insufficient for 1400 MTU, hence increasing
+ * tx buffer size to 64KiB.
*/
- I2400M_TX_BUF_SIZE = 32768,
+ I2400M_TX_BUF_SIZE = 65536,
/**
* Message header and payload descriptors have to be 16
* aligned (16 + 4 * N = 16 * M). If we take that average sent
@@ -270,10 +272,21 @@ enum {
* at the end there are less, we pad up to the nearest
* multiple of 16.
*/
- I2400M_TX_PLD_MAX = 12,
+ /*
+ * According to Intel Wimax i3200, i5x50 and i6x50 specification
+ * documents, the maximum number of payloads per message can be
+ * up to 60. Increasing the number of payloads to 60 per message
+ * helps to accommodate smaller payloads in a single transaction.
+ */
+ I2400M_TX_PLD_MAX = 60,
I2400M_TX_PLD_SIZE = sizeof(struct i2400m_msg_hdr)
+ I2400M_TX_PLD_MAX * sizeof(struct i2400m_pld),
I2400M_TX_SKIP = 0x80000000,
+ /*
+ * According to Intel Wimax i3200, i5x50 and i6x50 specification
+ * documents, the maximum size of each message can be up to 16KiB.
+ */
+ I2400M_TX_MSG_SIZE = 16384,
};
#define TAIL_FULL ((void *)~(unsigned long)NULL)
@@ -328,6 +341,14 @@ size_t __i2400m_tx_tail_room(struct i2400m *i2400m)
* @padding: ensure that there is at least this many bytes of free
* contiguous space in the fifo. This is needed because later on
* we might need to add padding.
+ * @try_head: specify either to allocate head room or tail room space
+ * in the TX FIFO. This boolean is required to avoids a system hang
+ * due to an infinite loop caused by i2400m_tx_fifo_push().
+ * The caller must always try to allocate tail room space first by
+ * calling this routine with try_head = 0. In case if there
+ * is not enough tail room space but there is enough head room space,
+ * (i2400m_tx_fifo_push() returns TAIL_FULL) try to allocate head
+ * room space, by calling this routine again with try_head = 1.
*
* Returns:
*
@@ -359,6 +380,48 @@ size_t __i2400m_tx_tail_room(struct i2400m *i2400m)
* fail and return TAIL_FULL and let the caller figure out if we wants to
* skip the tail room and try to allocate from the head.
*
+ * There is a corner case, wherein i2400m_tx_new() can get into
+ * an infinite loop calling i2400m_tx_fifo_push().
+ * In certain situations, tx_in would have reached on the top of TX FIFO
+ * and i2400m_tx_tail_room() returns 0, as described below:
+ *
+ * N ___________ tail room is zero
+ * |<- IN ->|
+ * | |
+ * | |
+ * | |
+ * | data |
+ * |<- OUT ->|
+ * | |
+ * | |
+ * | head room |
+ * 0 -----------
+ * During such a time, where tail room is zero in the TX FIFO and if there
+ * is a request to add a payload to TX FIFO, which calls:
+ * i2400m_tx()
+ * ->calls i2400m_tx_close()
+ * ->calls i2400m_tx_skip_tail()
+ * goto try_new;
+ * ->calls i2400m_tx_new()
+ * |----> [try_head:]
+ * infinite loop | ->calls i2400m_tx_fifo_push()
+ * | if (tail_room < needed)
+ * | if (head_room => needed)
+ * | return TAIL_FULL;
+ * |<---- goto try_head;
+ *
+ * i2400m_tx() calls i2400m_tx_close() to close the message, since there
+ * is no tail room to accommodate the payload and calls
+ * i2400m_tx_skip_tail() to skip the tail space. Now i2400m_tx() calls
+ * i2400m_tx_new() to allocate space for new message header calling
+ * i2400m_tx_fifo_push() that returns TAIL_FULL, since there is no tail space
+ * to accommodate the message header, but there is enough head space.
+ * The i2400m_tx_new() keeps re-retrying by calling i2400m_tx_fifo_push()
+ * ending up in a loop causing system freeze.
+ *
+ * This corner case is avoided by using a try_head boolean,
+ * as an argument to i2400m_tx_fifo_push().
+ *
* Note:
*
* Assumes i2400m->tx_lock is taken, and we use that as a barrier
@@ -367,7 +430,8 @@ size_t __i2400m_tx_tail_room(struct i2400m *i2400m)
* pop data off the queue
*/
static
-void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size, size_t padding)
+void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size,
+ size_t padding, bool try_head)
{
struct device *dev = i2400m_dev(i2400m);
size_t room, tail_room, needed_size;
@@ -382,9 +446,21 @@ void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size, size_t padding)
}
/* Is there space at the tail? */
tail_room = __i2400m_tx_tail_room(i2400m);
- if (tail_room < needed_size) {
- if (i2400m->tx_out % I2400M_TX_BUF_SIZE
- < i2400m->tx_in % I2400M_TX_BUF_SIZE) {
+ if (!try_head && tail_room < needed_size) {
+ /*
+ * If the tail room space is not enough to push the message
+ * in the TX FIFO, then there are two possibilities:
+ * 1. There is enough head room space to accommodate
+ * this message in the TX FIFO.
+ * 2. There is not enough space in the head room and
+ * in tail room of the TX FIFO to accommodate the message.
+ * In the case (1), return TAIL_FULL so that the caller
+ * can figure out, if the caller wants to push the message
+ * into the head room space.
+ * In the case (2), return NULL, indicating that the TX FIFO
+ * cannot accommodate the message.
+ */
+ if (room - tail_room >= needed_size) {
d_printf(2, dev, "fifo push %zu/%zu: tail full\n",
size, padding);
return TAIL_FULL; /* There might be head space */
@@ -485,14 +561,25 @@ void i2400m_tx_new(struct i2400m *i2400m)
{
struct device *dev = i2400m_dev(i2400m);
struct i2400m_msg_hdr *tx_msg;
+ bool try_head = 0;
BUG_ON(i2400m->tx_msg != NULL);
+ /*
+ * In certain situations, TX queue might have enough space to
+ * accommodate the new message header I2400M_TX_PLD_SIZE, but
+ * might not have enough space to accommodate the payloads.
+ * Adding bus_tx_room_min padding while allocating a new TX message
+ * increases the possibilities of including at least one payload of the
+ * size <= bus_tx_room_min.
+ */
try_head:
- tx_msg = i2400m_tx_fifo_push(i2400m, I2400M_TX_PLD_SIZE, 0);
+ tx_msg = i2400m_tx_fifo_push(i2400m, I2400M_TX_PLD_SIZE,
+ i2400m->bus_tx_room_min, try_head);
if (tx_msg == NULL)
goto out;
else if (tx_msg == TAIL_FULL) {
i2400m_tx_skip_tail(i2400m);
d_printf(2, dev, "new TX message: tail full, trying head\n");
+ try_head = 1;
goto try_head;
}
memset(tx_msg, 0, I2400M_TX_PLD_SIZE);
@@ -566,7 +653,7 @@ void i2400m_tx_close(struct i2400m *i2400m)
aligned_size = ALIGN(tx_msg_moved->size, i2400m->bus_tx_block_size);
padding = aligned_size - tx_msg_moved->size;
if (padding > 0) {
- pad_buf = i2400m_tx_fifo_push(i2400m, padding, 0);
+ pad_buf = i2400m_tx_fifo_push(i2400m, padding, 0, 0);
if (unlikely(WARN_ON(pad_buf == NULL
|| pad_buf == TAIL_FULL))) {
/* This should not happen -- append should verify
@@ -632,6 +719,7 @@ int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
unsigned long flags;
size_t padded_len;
void *ptr;
+ bool try_head = 0;
unsigned is_singleton = pl_type == I2400M_PT_RESET_WARM
|| pl_type == I2400M_PT_RESET_COLD;
@@ -643,9 +731,11 @@ int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
* current one is out of payload slots or we have a singleton,
* close it and start a new one */
spin_lock_irqsave(&i2400m->tx_lock, flags);
- result = -ESHUTDOWN;
- if (i2400m->tx_buf == NULL)
+ /* If tx_buf is NULL, device is shutdown */
+ if (i2400m->tx_buf == NULL) {
+ result = -ESHUTDOWN;
goto error_tx_new;
+ }
try_new:
if (unlikely(i2400m->tx_msg == NULL))
i2400m_tx_new(i2400m);
@@ -659,7 +749,13 @@ try_new:
}
if (i2400m->tx_msg == NULL)
goto error_tx_new;
- if (i2400m->tx_msg->size + padded_len > I2400M_TX_BUF_SIZE / 2) {
+ /*
+ * Check if this skb will fit in the TX queue's current active
+ * TX message. The total message size must not exceed the maximum
+ * size of each message I2400M_TX_MSG_SIZE. If it exceeds,
+ * close the current message and push this skb into the new message.
+ */
+ if (i2400m->tx_msg->size + padded_len > I2400M_TX_MSG_SIZE) {
d_printf(2, dev, "TX: message too big, going new\n");
i2400m_tx_close(i2400m);
i2400m_tx_new(i2400m);
@@ -669,11 +765,12 @@ try_new:
/* So we have a current message header; now append space for
* the message -- if there is not enough, try the head */
ptr = i2400m_tx_fifo_push(i2400m, padded_len,
- i2400m->bus_tx_block_size);
+ i2400m->bus_tx_block_size, try_head);
if (ptr == TAIL_FULL) { /* Tail is full, try head */
d_printf(2, dev, "pl append: tail full\n");
i2400m_tx_close(i2400m);
i2400m_tx_skip_tail(i2400m);
+ try_head = 1;
goto try_new;
} else if (ptr == NULL) { /* All full */
result = -ENOSPC;
@@ -689,7 +786,7 @@ try_new:
pl_type, buf_len);
tx_msg->num_pls = le16_to_cpu(num_pls+1);
tx_msg->size += padded_len;
- d_printf(2, dev, "TX: appended %zu b (up to %u b) pl #%u \n",
+ d_printf(2, dev, "TX: appended %zu b (up to %u b) pl #%u\n",
padded_len, tx_msg->size, num_pls+1);
d_printf(2, dev,
"TX: appended hdr @%zu %zu b pl #%u @%zu %zu/%zu b\n",
@@ -860,25 +957,43 @@ EXPORT_SYMBOL_GPL(i2400m_tx_msg_sent);
* i2400m_tx_setup - Initialize the TX queue and infrastructure
*
* Make sure we reset the TX sequence to zero, as when this function
- * is called, the firmware has been just restarted.
+ * is called, the firmware has been just restarted. Same rational
+ * for tx_in, tx_out, tx_msg_size and tx_msg. We reset them since
+ * the memory for TX queue is reallocated.
*/
int i2400m_tx_setup(struct i2400m *i2400m)
{
- int result;
+ int result = 0;
+ void *tx_buf;
+ unsigned long flags;
/* Do this here only once -- can't do on
* i2400m_hard_start_xmit() as we'll cause race conditions if
* the WS was scheduled on another CPU */
INIT_WORK(&i2400m->wake_tx_ws, i2400m_wake_tx_work);
- i2400m->tx_sequence = 0;
- i2400m->tx_buf = kmalloc(I2400M_TX_BUF_SIZE, GFP_KERNEL);
- if (i2400m->tx_buf == NULL)
+ tx_buf = kmalloc(I2400M_TX_BUF_SIZE, GFP_ATOMIC);
+ if (tx_buf == NULL) {
result = -ENOMEM;
- else
- result = 0;
+ goto error_kmalloc;
+ }
+
+ /*
+ * Fail the build if we can't fit at least two maximum size messages
+ * on the TX FIFO [one being delivered while one is constructed].
+ */
+ BUILD_BUG_ON(2 * I2400M_TX_MSG_SIZE > I2400M_TX_BUF_SIZE);
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ i2400m->tx_sequence = 0;
+ i2400m->tx_in = 0;
+ i2400m->tx_out = 0;
+ i2400m->tx_msg_size = 0;
+ i2400m->tx_msg = NULL;
+ i2400m->tx_buf = tx_buf;
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
/* Huh? the bus layer has to define this... */
BUG_ON(i2400m->bus_tx_block_size == 0);
+error_kmalloc:
return result;
}
diff --git a/drivers/net/wimax/i2400m/usb-notif.c b/drivers/net/wimax/i2400m/usb-notif.c
index 7b6a1d98bd7..d44b545f408 100644
--- a/drivers/net/wimax/i2400m/usb-notif.c
+++ b/drivers/net/wimax/i2400m/usb-notif.c
@@ -178,7 +178,6 @@ error_submit:
out:
d_fnend(4, dev, "(urb %p status %d actual_length %d) = void\n",
urb, urb->status, urb->actual_length);
- return;
}
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index d8c4d6497fd..0d5081d77dc 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -82,6 +82,8 @@ MODULE_PARM_DESC(debug,
/* Our firmware file name */
static const char *i2400mu_bus_fw_names_5x50[] = {
+#define I2400MU_FW_FILE_NAME_v1_5 "i2400m-fw-usb-1.5.sbcf"
+ I2400MU_FW_FILE_NAME_v1_5,
#define I2400MU_FW_FILE_NAME_v1_4 "i2400m-fw-usb-1.4.sbcf"
I2400MU_FW_FILE_NAME_v1_4,
NULL,
@@ -467,6 +469,13 @@ int i2400mu_probe(struct usb_interface *iface,
usb_set_intfdata(iface, i2400mu);
i2400m->bus_tx_block_size = I2400MU_BLK_SIZE;
+ /*
+ * Room required in the Tx queue for USB message to accommodate
+ * a smallest payload while allocating header space is 16 bytes.
+ * Adding this room for the new tx message increases the
+ * possibilities of including any payload with size <= 16 bytes.
+ */
+ i2400m->bus_tx_room_min = I2400MU_BLK_SIZE;
i2400m->bus_pl_size_max = I2400MU_PL_SIZE_MAX;
i2400m->bus_setup = NULL;
i2400m->bus_dev_start = i2400mu_bus_dev_start;
@@ -505,7 +514,7 @@ int i2400mu_probe(struct usb_interface *iface,
iface->needs_remote_wakeup = 1; /* autosuspend (15s delay) */
device_init_wakeup(dev, 1);
usb_dev->autosuspend_delay = 15 * HZ;
- usb_dev->autosuspend_disabled = 0;
+ usb_enable_autosuspend(usb_dev);
#endif
result = i2400m_setup(i2400m, I2400M_BRI_MAC_REINIT);
@@ -778,4 +787,5 @@ MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>");
MODULE_DESCRIPTION("Driver for USB based Intel Wireless WiMAX Connection 2400M "
"(5x50 & 6050)");
MODULE_LICENSE("GPL");
-MODULE_FIRMWARE(I2400MU_FW_FILE_NAME_v1_4);
+MODULE_FIRMWARE(I2400MU_FW_FILE_NAME_v1_5);
+MODULE_FIRMWARE(I6050U_FW_FILE_NAME_v1_5);
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 58894366075..174e3442d51 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -5,6 +5,7 @@
menuconfig WLAN
bool "Wireless LAN"
depends on !S390
+ depends on NET
select WIRELESS
default y
---help---
@@ -38,6 +39,12 @@ config LIBERTAS_THINFIRM
---help---
A library for Marvell Libertas 8xxx devices using thinfirm.
+config LIBERTAS_THINFIRM_DEBUG
+ bool "Enable full debugging output in the Libertas thin firmware module."
+ depends on LIBERTAS_THINFIRM
+ ---help---
+ Debugging support.
+
config LIBERTAS_THINFIRM_USB
tristate "Marvell Libertas 8388 USB 802.11b/g cards with thin firmware"
depends on LIBERTAS_THINFIRM && USB
@@ -210,90 +217,7 @@ config USB_NET_RNDIS_WLAN
If you choose to build a module, it'll be called rndis_wlan.
-config RTL8180
- tristate "Realtek 8180/8185 PCI support"
- depends on MAC80211 && PCI && EXPERIMENTAL
- select EEPROM_93CX6
- ---help---
- This is a driver for RTL8180 and RTL8185 based cards.
- These are PCI based chips found in cards such as:
-
- (RTL8185 802.11g)
- A-Link WL54PC
-
- (RTL8180 802.11b)
- Belkin F5D6020 v3
- Belkin F5D6020 v3
- Dlink DWL-610
- Dlink DWL-510
- Netgear MA521
- Level-One WPC-0101
- Acer Aspire 1357 LMi
- VCTnet PC-11B1
- Ovislink AirLive WL-1120PCM
- Mentor WL-PCI
- Linksys WPC11 v4
- TrendNET TEW-288PI
- D-Link DWL-520 Rev D
- Repotec RP-WP7126
- TP-Link TL-WN250/251
- Zonet ZEW1000
- Longshine LCS-8031-R
- HomeLine HLW-PCC200
- GigaFast WF721-AEX
- Planet WL-3553
- Encore ENLWI-PCI1-NT
- TrendNET TEW-266PC
- Gigabyte GN-WLMR101
- Siemens-fujitsu Amilo D1840W
- Edimax EW-7126
- PheeNet WL-11PCIR
- Tonze PC-2100T
- Planet WL-8303
- Dlink DWL-650 v M1
- Edimax EW-7106
- Q-Tec 770WC
- Topcom Skyr@cer 4011b
- Roper FreeLan 802.11b (edition 2004)
- Wistron Neweb Corp CB-200B
- Pentagram HorNET
- QTec 775WC
- TwinMOS Booming B Series
- Micronet SP906BB
- Sweex LC700010
- Surecom EP-9428
- Safecom SWLCR-1100
-
- Thanks to Realtek for their support!
-
-config RTL8187
- tristate "Realtek 8187 and 8187B USB support"
- depends on MAC80211 && USB
- select EEPROM_93CX6
- ---help---
- This is a driver for RTL8187 and RTL8187B based cards.
- These are USB based chips found in devices such as:
-
- Netgear WG111v2
- Level 1 WNC-0301USB
- Micronet SP907GK V5
- Encore ENUWI-G2
- Trendnet TEW-424UB
- ASUS P5B Deluxe/P5K Premium motherboards
- Toshiba Satellite Pro series of laptops
- Asus Wireless Link
- Linksys WUSB54GC-EU v2
- (v1 = rt73usb; v3 is rt2070-based,
- use staging/rt3070 or try rt2800usb)
-
- Thanks to Realtek for their support!
-
-# If possible, automatically enable LEDs for RTL8187.
-
-config RTL8187_LEDS
- bool
- depends on RTL8187 && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = RTL8187)
- default y
+source "drivers/net/wireless/rtl818x/Kconfig"
config ADM8211
tristate "ADMtek ADM8211 support"
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index ab61d2b558d..880ad9d170c 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1318,21 +1318,19 @@ static void adm8211_bss_info_changed(struct ieee80211_hw *dev,
}
static u64 adm8211_prepare_multicast(struct ieee80211_hw *hw,
- int mc_count, struct dev_addr_list *mclist)
+ struct netdev_hw_addr_list *mc_list)
{
- unsigned int bit_nr, i;
+ unsigned int bit_nr;
u32 mc_filter[2];
+ struct netdev_hw_addr *ha;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0; i < mc_count; i++) {
- if (!mclist)
- break;
- bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_hw_addr_list_for_each(ha, mc_list) {
+ bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
bit_nr &= 0x3F;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
- mclist = mclist->next;
}
return mc_filter[0] | ((u64)(mc_filter[1]) << 32);
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index dc5018a6d9e..3b7ab20a5c5 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -2876,7 +2876,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
ai->wep_capable = (cap_rid.softCap & cpu_to_le16(0x02)) ? 1 : 0;
ai->max_wep_idx = (cap_rid.softCap & cpu_to_le16(0x80)) ? 3 : 0;
- airo_print_info(dev->name, "Firmware version %x.%x.%02x",
+ airo_print_info(dev->name, "Firmware version %x.%x.%02d",
((le16_to_cpu(cap_rid.softVer) >> 8) & 0xF),
(le16_to_cpu(cap_rid.softVer) & 0xFF),
le16_to_cpu(cap_rid.softSubVer));
@@ -3193,19 +3193,26 @@ static void airo_print_status(const char *devname, u16 status)
{
u8 reason = status & 0xFF;
- switch (status) {
+ switch (status & 0xFF00) {
case STAT_NOBEACON:
- airo_print_dbg(devname, "link lost (missed beacons)");
- break;
- case STAT_MAXRETRIES:
- case STAT_MAXARL:
- airo_print_dbg(devname, "link lost (max retries)");
- break;
- case STAT_FORCELOSS:
- airo_print_dbg(devname, "link lost (local choice)");
- break;
- case STAT_TSFSYNC:
- airo_print_dbg(devname, "link lost (TSF sync lost)");
+ switch (status) {
+ case STAT_NOBEACON:
+ airo_print_dbg(devname, "link lost (missed beacons)");
+ break;
+ case STAT_MAXRETRIES:
+ case STAT_MAXARL:
+ airo_print_dbg(devname, "link lost (max retries)");
+ break;
+ case STAT_FORCELOSS:
+ airo_print_dbg(devname, "link lost (local choice)");
+ break;
+ case STAT_TSFSYNC:
+ airo_print_dbg(devname, "link lost (TSF sync lost)");
+ break;
+ default:
+ airo_print_dbg(devname, "unknow status %x\n", status);
+ break;
+ }
break;
case STAT_DEAUTH:
airo_print_dbg(devname, "deauthenticated (reason: %d)", reason);
@@ -3221,7 +3228,11 @@ static void airo_print_status(const char *devname, u16 status)
airo_print_dbg(devname, "authentication failed (reason: %d)",
reason);
break;
+ case STAT_ASSOC:
+ case STAT_REASSOC:
+ break;
default:
+ airo_print_dbg(devname, "unknow status %x\n", status);
break;
}
}
@@ -5151,13 +5162,6 @@ static void proc_SSID_on_close(struct inode *inode, struct file *file)
enable_MAC(ai, 1);
}
-static inline u8 hexVal(char c) {
- if (c>='0' && c<='9') return c -= '0';
- if (c>='a' && c<='f') return c -= 'a'-10;
- if (c>='A' && c<='F') return c -= 'A'-10;
- return 0;
-}
-
static void proc_APList_on_close( struct inode *inode, struct file *file ) {
struct proc_data *data = (struct proc_data *)file->private_data;
struct proc_dir_entry *dp = PDE(inode);
@@ -5177,11 +5181,11 @@ static void proc_APList_on_close( struct inode *inode, struct file *file ) {
switch(j%3) {
case 0:
APList_rid.ap[i][j/3]=
- hexVal(data->wbuffer[j+i*6*3])<<4;
+ hex_to_bin(data->wbuffer[j+i*6*3])<<4;
break;
case 1:
APList_rid.ap[i][j/3]|=
- hexVal(data->wbuffer[j+i*6*3]);
+ hex_to_bin(data->wbuffer[j+i*6*3]);
break;
}
}
@@ -5329,10 +5333,10 @@ static void proc_wepkey_on_close( struct inode *inode, struct file *file ) {
for( i = 0; i < 16*3 && data->wbuffer[i+j]; i++ ) {
switch(i%3) {
case 0:
- key[i/3] = hexVal(data->wbuffer[i+j])<<4;
+ key[i/3] = hex_to_bin(data->wbuffer[i+j])<<4;
break;
case 1:
- key[i/3] |= hexVal(data->wbuffer[i+j]);
+ key[i/3] |= hex_to_bin(data->wbuffer[i+j]);
break;
}
}
diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c
index f6036fb4231..33bdc6a84e8 100644
--- a/drivers/net/wireless/airo_cs.c
+++ b/drivers/net/wireless/airo_cs.c
@@ -75,42 +75,7 @@ static void airo_release(struct pcmcia_device *link);
static void airo_detach(struct pcmcia_device *p_dev);
-/*
- You'll also need to prototype all the functions that will actually
- be used to talk to your device. See 'pcmem_cs' for a good example
- of a fully self-sufficient driver; the other drivers rely more or
- less on other parts of the kernel.
-*/
-
-/*
- A linked list of "instances" of the aironet device. Each actual
- PCMCIA card corresponds to one device instance, and is described
- by one struct pcmcia_device structure (defined in ds.h).
-
- You may not want to use a linked list for this -- for example, the
- memory card driver uses an array of struct pcmcia_device pointers,
- where minor device numbers are used to derive the corresponding
- array index.
-*/
-
-/*
- A driver needs to provide a dev_node_t structure for each device
- on a card. In some cases, there is only one device per card (for
- example, ethernet cards, modems). In other cases, there may be
- many actual or logical devices (SCSI adapters, memory cards with
- multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a struct pcmcia_device
- structure. We allocate them in the card's private data structure,
- because they generally shouldn't be allocated dynamically.
-
- In this case, we also provide a flag to indicate if a device is
- "stopped" due to a power management event, or card ejection. The
- device IO routines can use a flag like this to throttle IO to a
- card that is not ready to accept it.
-*/
-
typedef struct local_info_t {
- dev_node_t node;
struct net_device *eth_dev;
} local_info_t;
@@ -132,10 +97,6 @@ static int airo_probe(struct pcmcia_device *p_dev)
dev_dbg(&p_dev->dev, "airo_attach()\n");
- /* Interrupt setup */
- p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- p_dev->irq.Handler = NULL;
-
/*
General socket configuration defaults can go here. In this
client, we assume very little, and rely on the CIS for almost
@@ -212,9 +173,7 @@ static int airo_cs_config_check(struct pcmcia_device *p_dev,
else if (dflt->vpp1.present & (1<<CISTPL_POWER_VNOM))
p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM]/10000;
- /* Do we need to allocate an interrupt? */
- if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
+ p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -300,16 +259,8 @@ static int airo_config(struct pcmcia_device *link)
if (ret)
goto failed;
- /*
- Allocate an interrupt line. Note that this does not assign a
- handler to the interrupt, unless the 'Handler' member of the
- irq structure is initialized.
- */
- if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
- goto failed;
- }
+ if (!link->irq)
+ goto failed;
/*
This actually configures the PCMCIA socket -- setting up
@@ -320,26 +271,17 @@ static int airo_config(struct pcmcia_device *link)
if (ret)
goto failed;
((local_info_t *)link->priv)->eth_dev =
- init_airo_card(link->irq.AssignedIRQ,
+ init_airo_card(link->irq,
link->io.BasePort1, 1, &link->dev);
if (!((local_info_t *)link->priv)->eth_dev)
goto failed;
- /*
- At this point, the dev_node_t structure(s) need to be
- initialized and arranged in a linked list at link->dev_node.
- */
- strcpy(dev->node.dev_name, ((local_info_t *)link->priv)->eth_dev->name);
- dev->node.major = dev->node.minor = 0;
- link->dev_node = &dev->node;
-
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x: ",
- dev->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x: ",
+ link->conf.ConfigIndex);
if (link->conf.Vpp)
printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10);
- if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %d", link->irq.AssignedIRQ);
+ printk(", irq %d", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1+link->io.NumPorts1-1);
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 0fb419936df..8a2d4afc74f 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1223,7 +1223,6 @@ static void at76_rx_callback(struct urb *urb)
priv->rx_tasklet.data = (unsigned long)urb;
tasklet_schedule(&priv->rx_tasklet);
- return;
}
static int at76_submit_rx_urb(struct at76_priv *priv)
@@ -1889,6 +1888,7 @@ static void at76_dwork_hw_scan(struct work_struct *work)
}
static int at76_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
struct cfg80211_scan_request *req)
{
struct at76_priv *priv = hw->priv;
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 4e7a7fd695c..0a75be027af 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -3,7 +3,7 @@ menuconfig ATH_COMMON
depends on CFG80211
---help---
This will enable the support for the Atheros wireless drivers.
- ath5k, ath9k and ar9170 drivers share some common code, this option
+ ath5k, ath9k, ath9k_htc and ar9170 drivers share some common code, this option
enables the common ath.ko module which shares common helpers.
For more information and documentation on this module you can visit:
diff --git a/drivers/net/wireless/ath/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h
index dc662b76a1c..4f845f80c09 100644
--- a/drivers/net/wireless/ath/ar9170/ar9170.h
+++ b/drivers/net/wireless/ath/ar9170/ar9170.h
@@ -109,41 +109,6 @@ struct ar9170_rxstream_mpdu_merge {
bool has_plcp;
};
-#define AR9170_NUM_TID 16
-#define WME_BA_BMP_SIZE 64
-#define AR9170_NUM_MAX_AGG_LEN (2 * WME_BA_BMP_SIZE)
-
-#define WME_AC_BE 2
-#define WME_AC_BK 3
-#define WME_AC_VI 1
-#define WME_AC_VO 0
-
-#define TID_TO_WME_AC(_tid) \
- ((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
- (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
- (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
- WME_AC_VO)
-
-#define BAW_WITHIN(_start, _bawsz, _seqno) \
- ((((_seqno) - (_start)) & 0xfff) < (_bawsz))
-
-enum ar9170_tid_state {
- AR9170_TID_STATE_INVALID,
- AR9170_TID_STATE_SHUTDOWN,
- AR9170_TID_STATE_PROGRESS,
- AR9170_TID_STATE_COMPLETE,
-};
-
-struct ar9170_sta_tid {
- struct list_head list;
- struct sk_buff_head queue;
- u8 addr[ETH_ALEN];
- u16 ssn;
- u16 tid;
- enum ar9170_tid_state state;
- bool active;
-};
-
struct ar9170_tx_queue_stats {
unsigned int len;
unsigned int limit;
@@ -152,14 +117,11 @@ struct ar9170_tx_queue_stats {
#define AR9170_QUEUE_TIMEOUT 64
#define AR9170_TX_TIMEOUT 8
-#define AR9170_BA_TIMEOUT 4
#define AR9170_JANITOR_DELAY 128
#define AR9170_TX_INVALID_RATE 0xffffffff
-#define AR9170_NUM_TX_STATUS 128
-#define AR9170_NUM_TX_AGG_MAX 30
-#define AR9170_NUM_TX_LIMIT_HARD AR9170_TXQ_DEPTH
-#define AR9170_NUM_TX_LIMIT_SOFT (AR9170_TXQ_DEPTH - 10)
+#define AR9170_NUM_TX_LIMIT_HARD AR9170_TXQ_DEPTH
+#define AR9170_NUM_TX_LIMIT_SOFT (AR9170_TXQ_DEPTH - 10)
struct ar9170 {
struct ieee80211_hw *hw;
@@ -234,11 +196,6 @@ struct ar9170 {
struct sk_buff_head tx_pending[__AR9170_NUM_TXQ];
struct sk_buff_head tx_status[__AR9170_NUM_TXQ];
struct delayed_work tx_janitor;
- /* tx ampdu */
- struct sk_buff_head tx_status_ampdu;
- spinlock_t tx_ampdu_list_lock;
- struct list_head tx_ampdu_list;
- atomic_t tx_ampdu_pending;
/* rxstream mpdu merge */
struct ar9170_rxstream_mpdu_merge rx_mpdu;
@@ -250,11 +207,6 @@ struct ar9170 {
u8 global_ampdu_factor;
};
-struct ar9170_sta_info {
- struct ar9170_sta_tid agg[AR9170_NUM_TID];
- unsigned int ampdu_max_len;
-};
-
struct ar9170_tx_info {
unsigned long timeout;
};
diff --git a/drivers/net/wireless/ath/ar9170/cmd.h b/drivers/net/wireless/ath/ar9170/cmd.h
index 826c45e6b27..ec8134b4b94 100644
--- a/drivers/net/wireless/ath/ar9170/cmd.h
+++ b/drivers/net/wireless/ath/ar9170/cmd.h
@@ -79,7 +79,7 @@ __regwrite_out : \
if (__nreg) { \
if (IS_ACCEPTING_CMD(__ar)) \
__err = ar->exec_cmd(__ar, AR9170_CMD_WREG, \
- 8 * __nreg, \
+ 8 * __nreg, \
(u8 *) &__ar->cmdbuf[1], \
0, NULL); \
__nreg = 0; \
diff --git a/drivers/net/wireless/ath/ar9170/eeprom.h b/drivers/net/wireless/ath/ar9170/eeprom.h
index d2c8cc83f1d..6c466388342 100644
--- a/drivers/net/wireless/ath/ar9170/eeprom.h
+++ b/drivers/net/wireless/ath/ar9170/eeprom.h
@@ -127,8 +127,8 @@ struct ar9170_eeprom {
__le16 checksum;
__le16 version;
u8 operating_flags;
-#define AR9170_OPFLAG_5GHZ 1
-#define AR9170_OPFLAG_2GHZ 2
+#define AR9170_OPFLAG_5GHZ 1
+#define AR9170_OPFLAG_2GHZ 2
u8 misc;
__le16 reg_domain[2];
u8 mac_address[6];
diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h
index 0a1d4c28e68..06f1f3c951a 100644
--- a/drivers/net/wireless/ath/ar9170/hw.h
+++ b/drivers/net/wireless/ath/ar9170/hw.h
@@ -425,5 +425,6 @@ enum ar9170_txq {
#define AR9170_TXQ_DEPTH 32
#define AR9170_TX_MAX_PENDING 128
+#define AR9170_RX_STREAM_MAX_SIZE 65535
#endif /* __AR9170_HW_H */
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index c5369298099..2abc8757899 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -50,10 +50,6 @@ static int modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
-static int modparam_ht;
-module_param_named(ht, modparam_ht, bool, S_IRUGO);
-MODULE_PARM_DESC(ht, "enable MPDU aggregation.");
-
#define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
.bitrate = (_bitrate), \
.flags = (_flags), \
@@ -182,7 +178,6 @@ static struct ieee80211_supported_band ar9170_band_5GHz = {
};
static void ar9170_tx(struct ar9170 *ar);
-static bool ar9170_tx_ampdu(struct ar9170 *ar);
static inline u16 ar9170_get_seq_h(struct ieee80211_hdr *hdr)
{
@@ -195,21 +190,7 @@ static inline u16 ar9170_get_seq(struct sk_buff *skb)
return ar9170_get_seq_h((void *) txc->frame_data);
}
-static inline u16 ar9170_get_tid_h(struct ieee80211_hdr *hdr)
-{
- return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
-}
-
-static inline u16 ar9170_get_tid(struct sk_buff *skb)
-{
- struct ar9170_tx_control *txc = (void *) skb->data;
- return ar9170_get_tid_h((struct ieee80211_hdr *) txc->frame_data);
-}
-
-#define GET_NEXT_SEQ(seq) ((seq + 1) & 0x0fff)
-#define GET_NEXT_SEQ_FROM_SKB(skb) (GET_NEXT_SEQ(ar9170_get_seq(skb)))
-
-#if (defined AR9170_QUEUE_DEBUG) || (defined AR9170_TXAGG_DEBUG)
+#ifdef AR9170_QUEUE_DEBUG
static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
{
struct ar9170_tx_control *txc = (void *) skb->data;
@@ -236,7 +217,7 @@ static void __ar9170_dump_txqueue(struct ar9170 *ar,
wiphy_name(ar->hw->wiphy), skb_queue_len(queue));
skb_queue_walk(queue, skb) {
- printk(KERN_DEBUG "index:%d => \n", i++);
+ printk(KERN_DEBUG "index:%d =>\n", i++);
ar9170_print_txheader(ar, skb);
}
if (i != skb_queue_len(queue))
@@ -244,7 +225,7 @@ static void __ar9170_dump_txqueue(struct ar9170 *ar,
"mismatch %d != %d\n", skb_queue_len(queue), i);
printk(KERN_DEBUG "---[ end ]---\n");
}
-#endif /* AR9170_QUEUE_DEBUG || AR9170_TXAGG_DEBUG */
+#endif /* AR9170_QUEUE_DEBUG */
#ifdef AR9170_QUEUE_DEBUG
static void ar9170_dump_txqueue(struct ar9170 *ar,
@@ -275,20 +256,6 @@ static void __ar9170_dump_txstats(struct ar9170 *ar)
}
#endif /* AR9170_QUEUE_STOP_DEBUG */
-#ifdef AR9170_TXAGG_DEBUG
-static void ar9170_dump_tx_status_ampdu(struct ar9170 *ar)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ar->tx_status_ampdu.lock, flags);
- printk(KERN_DEBUG "%s: A-MPDU tx_status queue => \n",
- wiphy_name(ar->hw->wiphy));
- __ar9170_dump_txqueue(ar, &ar->tx_status_ampdu);
- spin_unlock_irqrestore(&ar->tx_status_ampdu.lock, flags);
-}
-
-#endif /* AR9170_TXAGG_DEBUG */
-
/* caller must guarantee exclusive access for _bin_ queue. */
static void ar9170_recycle_expired(struct ar9170 *ar,
struct sk_buff_head *queue,
@@ -308,7 +275,7 @@ static void ar9170_recycle_expired(struct ar9170 *ar,
if (time_is_before_jiffies(arinfo->timeout)) {
#ifdef AR9170_QUEUE_DEBUG
printk(KERN_DEBUG "%s: [%ld > %ld] frame expired => "
- "recycle \n", wiphy_name(ar->hw->wiphy),
+ "recycle\n", wiphy_name(ar->hw->wiphy),
jiffies, arinfo->timeout);
ar9170_print_txheader(ar, skb);
#endif /* AR9170_QUEUE_DEBUG */
@@ -360,70 +327,6 @@ static void ar9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
ieee80211_tx_status_irqsafe(ar->hw, skb);
}
-static void ar9170_tx_fake_ampdu_status(struct ar9170 *ar)
-{
- struct sk_buff_head success;
- struct sk_buff *skb;
- unsigned int i;
- unsigned long queue_bitmap = 0;
-
- skb_queue_head_init(&success);
-
- while (skb_queue_len(&ar->tx_status_ampdu) > AR9170_NUM_TX_STATUS)
- __skb_queue_tail(&success, skb_dequeue(&ar->tx_status_ampdu));
-
- ar9170_recycle_expired(ar, &ar->tx_status_ampdu, &success);
-
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: collected %d A-MPDU frames.\n",
- wiphy_name(ar->hw->wiphy), skb_queue_len(&success));
- __ar9170_dump_txqueue(ar, &success);
-#endif /* AR9170_TXAGG_DEBUG */
-
- while ((skb = __skb_dequeue(&success))) {
- struct ieee80211_tx_info *txinfo;
-
- queue_bitmap |= BIT(skb_get_queue_mapping(skb));
-
- txinfo = IEEE80211_SKB_CB(skb);
- ieee80211_tx_info_clear_status(txinfo);
-
- txinfo->flags |= IEEE80211_TX_STAT_ACK;
- txinfo->status.rates[0].count = 1;
-
- skb_pull(skb, sizeof(struct ar9170_tx_control));
- ieee80211_tx_status_irqsafe(ar->hw, skb);
- }
-
- for_each_set_bit(i, &queue_bitmap, BITS_PER_BYTE) {
-#ifdef AR9170_QUEUE_STOP_DEBUG
- printk(KERN_DEBUG "%s: wake queue %d\n",
- wiphy_name(ar->hw->wiphy), i);
- __ar9170_dump_txstats(ar);
-#endif /* AR9170_QUEUE_STOP_DEBUG */
- ieee80211_wake_queue(ar->hw, i);
- }
-
- if (queue_bitmap)
- ar9170_tx(ar);
-}
-
-static void ar9170_tx_ampdu_callback(struct ar9170 *ar, struct sk_buff *skb)
-{
- struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
- struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
-
- arinfo->timeout = jiffies +
- msecs_to_jiffies(AR9170_BA_TIMEOUT);
-
- skb_queue_tail(&ar->tx_status_ampdu, skb);
- ar9170_tx_fake_ampdu_status(ar);
-
- if (atomic_dec_and_test(&ar->tx_ampdu_pending) &&
- !list_empty(&ar->tx_ampdu_list))
- ar9170_tx_ampdu(ar);
-}
-
void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -447,14 +350,10 @@ void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED);
} else {
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- ar9170_tx_ampdu_callback(ar, skb);
- } else {
- arinfo->timeout = jiffies +
- msecs_to_jiffies(AR9170_TX_TIMEOUT);
+ arinfo->timeout = jiffies +
+ msecs_to_jiffies(AR9170_TX_TIMEOUT);
- skb_queue_tail(&ar->tx_status[queue], skb);
- }
+ skb_queue_tail(&ar->tx_status[queue], skb);
}
if (!ar->tx_stats[queue].len &&
@@ -524,38 +423,6 @@ static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar,
return NULL;
}
-static void ar9170_handle_block_ack(struct ar9170 *ar, u16 count, u16 r)
-{
- struct sk_buff *skb;
- struct ieee80211_tx_info *txinfo;
-
- while (count) {
- skb = ar9170_get_queued_skb(ar, NULL, &ar->tx_status_ampdu, r);
- if (!skb)
- break;
-
- txinfo = IEEE80211_SKB_CB(skb);
- ieee80211_tx_info_clear_status(txinfo);
-
- /* FIXME: maybe more ? */
- txinfo->status.rates[0].count = 1;
-
- skb_pull(skb, sizeof(struct ar9170_tx_control));
- ieee80211_tx_status_irqsafe(ar->hw, skb);
- count--;
- }
-
-#ifdef AR9170_TXAGG_DEBUG
- if (count) {
- printk(KERN_DEBUG "%s: got %d more failed mpdus, but no more "
- "suitable frames left in tx_status queue.\n",
- wiphy_name(ar->hw->wiphy), count);
-
- ar9170_dump_tx_status_ampdu(ar);
- }
-#endif /* AR9170_TXAGG_DEBUG */
-}
-
/*
* This worker tries to keeps an maintain tx_status queues.
* So we can guarantee that incoming tx_status reports are
@@ -592,8 +459,6 @@ static void ar9170_tx_janitor(struct work_struct *work)
resched = true;
}
- ar9170_tx_fake_ampdu_status(ar);
-
if (!resched)
return;
@@ -673,10 +538,6 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
case 0xc5:
/* BlockACK events */
- ar9170_handle_block_ack(ar,
- le16_to_cpu(cmd->ba_fail_cnt.failed),
- le16_to_cpu(cmd->ba_fail_cnt.rate));
- ar9170_tx_fake_ampdu_status(ar);
break;
case 0xc6:
@@ -689,7 +550,8 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
/* firmware debug */
case 0xca:
- printk(KERN_DEBUG "ar9170 FW: %.*s\n", len - 4, (char *)buf + 4);
+ printk(KERN_DEBUG "ar9170 FW: %.*s\n", len - 4,
+ (char *)buf + 4);
break;
case 0xcb:
len -= 4;
@@ -926,7 +788,6 @@ static void ar9170_rx_phy_status(struct ar9170 *ar,
/* TODO: we could do something with phy_errors */
status->signal = ar->noise[0] + phy->rssi_combined;
- status->noise = ar->noise[0];
}
static struct sk_buff *ar9170_rx_copy_data(u8 *buf, int len)
@@ -1247,7 +1108,6 @@ static int ar9170_op_start(struct ieee80211_hw *hw)
ar->global_ampdu_density = 6;
ar->global_ampdu_factor = 3;
- atomic_set(&ar->tx_ampdu_pending, 0);
ar->bad_hw_nagger = jiffies;
err = ar->open(ar);
@@ -1310,40 +1170,10 @@ static void ar9170_op_stop(struct ieee80211_hw *hw)
skb_queue_purge(&ar->tx_pending[i]);
skb_queue_purge(&ar->tx_status[i]);
}
- skb_queue_purge(&ar->tx_status_ampdu);
mutex_unlock(&ar->mutex);
}
-static void ar9170_tx_indicate_immba(struct ar9170 *ar, struct sk_buff *skb)
-{
- struct ar9170_tx_control *txc = (void *) skb->data;
-
- txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_IMM_AMPDU);
-}
-
-static void ar9170_tx_copy_phy(struct ar9170 *ar, struct sk_buff *dst,
- struct sk_buff *src)
-{
- struct ar9170_tx_control *dst_txc, *src_txc;
- struct ieee80211_tx_info *dst_info, *src_info;
- struct ar9170_tx_info *dst_arinfo, *src_arinfo;
-
- src_txc = (void *) src->data;
- src_info = IEEE80211_SKB_CB(src);
- src_arinfo = (void *) src_info->rate_driver_data;
-
- dst_txc = (void *) dst->data;
- dst_info = IEEE80211_SKB_CB(dst);
- dst_arinfo = (void *) dst_info->rate_driver_data;
-
- dst_txc->phy_control = src_txc->phy_control;
-
- /* same MCS for the whole aggregate */
- memcpy(dst_info->driver_rates, src_info->driver_rates,
- sizeof(dst_info->driver_rates));
-}
-
static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
@@ -1420,14 +1250,7 @@ static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
txc->phy_control |=
cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- if (unlikely(!info->control.sta))
- goto err_out;
-
- txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
- } else {
- txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
- }
+ txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
}
return 0;
@@ -1537,158 +1360,6 @@ static void ar9170_tx_prepare_phy(struct ar9170 *ar, struct sk_buff *skb)
txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT);
}
-static bool ar9170_tx_ampdu(struct ar9170 *ar)
-{
- struct sk_buff_head agg;
- struct ar9170_sta_tid *tid_info = NULL, *tmp;
- struct sk_buff *skb, *first = NULL;
- unsigned long flags, f2;
- unsigned int i = 0;
- u16 seq, queue, tmpssn;
- bool run = false;
-
- skb_queue_head_init(&agg);
-
- spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
- if (list_empty(&ar->tx_ampdu_list)) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: aggregation list is empty.\n",
- wiphy_name(ar->hw->wiphy));
-#endif /* AR9170_TXAGG_DEBUG */
- goto out_unlock;
- }
-
- list_for_each_entry_safe(tid_info, tmp, &ar->tx_ampdu_list, list) {
- if (tid_info->state != AR9170_TID_STATE_COMPLETE) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: dangling aggregation entry!\n",
- wiphy_name(ar->hw->wiphy));
-#endif /* AR9170_TXAGG_DEBUG */
- continue;
- }
-
- if (++i > 64) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: enough frames aggregated.\n",
- wiphy_name(ar->hw->wiphy));
-#endif /* AR9170_TXAGG_DEBUG */
- break;
- }
-
- queue = TID_TO_WME_AC(tid_info->tid);
-
- if (skb_queue_len(&ar->tx_pending[queue]) >=
- AR9170_NUM_TX_AGG_MAX) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: queue %d full.\n",
- wiphy_name(ar->hw->wiphy), queue);
-#endif /* AR9170_TXAGG_DEBUG */
- continue;
- }
-
- list_del_init(&tid_info->list);
-
- spin_lock_irqsave(&tid_info->queue.lock, f2);
- tmpssn = seq = tid_info->ssn;
- first = skb_peek(&tid_info->queue);
-
- if (likely(first))
- tmpssn = ar9170_get_seq(first);
-
- if (unlikely(tmpssn != seq)) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: ssn mismatch [%d != %d]\n.",
- wiphy_name(ar->hw->wiphy), seq, tmpssn);
-#endif /* AR9170_TXAGG_DEBUG */
- tid_info->ssn = tmpssn;
- }
-
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: generate A-MPDU for tid:%d ssn:%d with "
- "%d queued frames.\n", wiphy_name(ar->hw->wiphy),
- tid_info->tid, tid_info->ssn,
- skb_queue_len(&tid_info->queue));
- __ar9170_dump_txqueue(ar, &tid_info->queue);
-#endif /* AR9170_TXAGG_DEBUG */
-
- while ((skb = skb_peek(&tid_info->queue))) {
- if (unlikely(ar9170_get_seq(skb) != seq))
- break;
-
- __skb_unlink(skb, &tid_info->queue);
- tid_info->ssn = seq = GET_NEXT_SEQ(seq);
-
- if (unlikely(skb_get_queue_mapping(skb) != queue)) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: tid:%d(q:%d) queue:%d "
- "!match.\n", wiphy_name(ar->hw->wiphy),
- tid_info->tid,
- TID_TO_WME_AC(tid_info->tid),
- skb_get_queue_mapping(skb));
-#endif /* AR9170_TXAGG_DEBUG */
- dev_kfree_skb_any(skb);
- continue;
- }
-
- if (unlikely(first == skb)) {
- ar9170_tx_prepare_phy(ar, skb);
- __skb_queue_tail(&agg, skb);
- first = skb;
- } else {
- ar9170_tx_copy_phy(ar, skb, first);
- __skb_queue_tail(&agg, skb);
- }
-
- if (unlikely(skb_queue_len(&agg) ==
- AR9170_NUM_TX_AGG_MAX))
- break;
- }
-
- if (skb_queue_empty(&tid_info->queue))
- tid_info->active = false;
- else
- list_add_tail(&tid_info->list,
- &ar->tx_ampdu_list);
-
- spin_unlock_irqrestore(&tid_info->queue.lock, f2);
-
- if (unlikely(skb_queue_empty(&agg))) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: queued empty list!\n",
- wiphy_name(ar->hw->wiphy));
-#endif /* AR9170_TXAGG_DEBUG */
- continue;
- }
-
- /*
- * tell the FW/HW that this is the last frame,
- * that way it will wait for the immediate block ack.
- */
- ar9170_tx_indicate_immba(ar, skb_peek_tail(&agg));
-
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: generated A-MPDU looks like this:\n",
- wiphy_name(ar->hw->wiphy));
- __ar9170_dump_txqueue(ar, &agg);
-#endif /* AR9170_TXAGG_DEBUG */
-
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
-
- spin_lock_irqsave(&ar->tx_pending[queue].lock, flags);
- skb_queue_splice_tail_init(&agg, &ar->tx_pending[queue]);
- spin_unlock_irqrestore(&ar->tx_pending[queue].lock, flags);
- run = true;
-
- spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
- }
-
-out_unlock:
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
- __skb_queue_purge(&agg);
-
- return run;
-}
-
static void ar9170_tx(struct ar9170 *ar)
{
struct sk_buff *skb;
@@ -1728,7 +1399,7 @@ static void ar9170_tx(struct ar9170 *ar)
printk(KERN_DEBUG "%s: queue %d full\n",
wiphy_name(ar->hw->wiphy), i);
- printk(KERN_DEBUG "%s: stuck frames: ===> \n",
+ printk(KERN_DEBUG "%s: stuck frames: ===>\n",
wiphy_name(ar->hw->wiphy));
ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
ar9170_dump_txqueue(ar, &ar->tx_status[i]);
@@ -1763,9 +1434,6 @@ static void ar9170_tx(struct ar9170 *ar)
arinfo->timeout = jiffies +
msecs_to_jiffies(AR9170_TX_TIMEOUT);
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- atomic_inc(&ar->tx_ampdu_pending);
-
#ifdef AR9170_QUEUE_DEBUG
printk(KERN_DEBUG "%s: send frame q:%d =>\n",
wiphy_name(ar->hw->wiphy), i);
@@ -1774,9 +1442,6 @@ static void ar9170_tx(struct ar9170 *ar)
err = ar->tx(ar, skb);
if (unlikely(err)) {
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- atomic_dec(&ar->tx_ampdu_pending);
-
frames_failed++;
dev_kfree_skb_any(skb);
} else {
@@ -1823,94 +1488,11 @@ static void ar9170_tx(struct ar9170 *ar)
msecs_to_jiffies(AR9170_JANITOR_DELAY));
}
-static bool ar9170_tx_ampdu_queue(struct ar9170 *ar, struct sk_buff *skb)
-{
- struct ieee80211_tx_info *txinfo;
- struct ar9170_sta_info *sta_info;
- struct ar9170_sta_tid *agg;
- struct sk_buff *iter;
- unsigned long flags, f2;
- unsigned int max;
- u16 tid, seq, qseq;
- bool run = false, queue = false;
-
- tid = ar9170_get_tid(skb);
- seq = ar9170_get_seq(skb);
- txinfo = IEEE80211_SKB_CB(skb);
- sta_info = (void *) txinfo->control.sta->drv_priv;
- agg = &sta_info->agg[tid];
- max = sta_info->ampdu_max_len;
-
- spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
-
- if (unlikely(agg->state != AR9170_TID_STATE_COMPLETE)) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: BlockACK session not fully initialized "
- "for ESS:%pM tid:%d state:%d.\n",
- wiphy_name(ar->hw->wiphy), agg->addr, agg->tid,
- agg->state);
-#endif /* AR9170_TXAGG_DEBUG */
- goto err_unlock;
- }
-
- if (!agg->active) {
- agg->active = true;
- agg->ssn = seq;
- queue = true;
- }
-
- /* check if seq is within the BA window */
- if (unlikely(!BAW_WITHIN(agg->ssn, max, seq))) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: frame with tid:%d seq:%d does not "
- "fit into BA window (%d - %d)\n",
- wiphy_name(ar->hw->wiphy), tid, seq, agg->ssn,
- (agg->ssn + max) & 0xfff);
-#endif /* AR9170_TXAGG_DEBUG */
- goto err_unlock;
- }
-
- spin_lock_irqsave(&agg->queue.lock, f2);
-
- skb_queue_reverse_walk(&agg->queue, iter) {
- qseq = ar9170_get_seq(iter);
-
- if (GET_NEXT_SEQ(qseq) == seq) {
- __skb_queue_after(&agg->queue, iter, skb);
- goto queued;
- }
- }
-
- __skb_queue_head(&agg->queue, skb);
-
-queued:
- spin_unlock_irqrestore(&agg->queue.lock, f2);
-
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: new aggregate %p queued.\n",
- wiphy_name(ar->hw->wiphy), skb);
- __ar9170_dump_txqueue(ar, &agg->queue);
-#endif /* AR9170_TXAGG_DEBUG */
-
- if (skb_queue_len(&agg->queue) >= AR9170_NUM_TX_AGG_MAX)
- run = true;
-
- if (queue)
- list_add_tail(&agg->list, &ar->tx_ampdu_list);
-
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
- return run;
-
-err_unlock:
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
- dev_kfree_skb_irq(skb);
- return false;
-}
-
int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct ar9170 *ar = hw->priv;
struct ieee80211_tx_info *info;
+ unsigned int queue;
if (unlikely(!IS_STARTED(ar)))
goto err_free;
@@ -1918,18 +1500,10 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (unlikely(ar9170_tx_prepare(ar, skb)))
goto err_free;
+ queue = skb_get_queue_mapping(skb);
info = IEEE80211_SKB_CB(skb);
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- bool run = ar9170_tx_ampdu_queue(ar, skb);
-
- if (run || !atomic_read(&ar->tx_ampdu_pending))
- ar9170_tx_ampdu(ar);
- } else {
- unsigned int queue = skb_get_queue_mapping(skb);
-
- ar9170_tx_prepare_phy(ar, skb);
- skb_queue_tail(&ar->tx_pending[queue], skb);
- }
+ ar9170_tx_prepare_phy(ar, skb);
+ skb_queue_tail(&ar->tx_pending[queue], skb);
ar9170_tx(ar);
return NETDEV_TX_OK;
@@ -2046,21 +1620,17 @@ out:
return err;
}
-static u64 ar9170_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count,
- struct dev_addr_list *mclist)
+static u64 ar9170_op_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
{
u64 mchash;
- int i;
+ struct netdev_hw_addr *ha;
/* always get broadcast frames */
mchash = 1ULL << (0xff >> 2);
- for (i = 0; i < mc_count; i++) {
- if (WARN_ON(!mclist))
- break;
- mchash |= 1ULL << (mclist->dmi_addr[5] >> 2);
- mclist = mclist->next;
- }
+ netdev_hw_addr_list_for_each(ha, mc_list)
+ mchash |= 1ULL << (ha->addr[5] >> 2);
return mchash;
}
@@ -2330,57 +1900,6 @@ out:
return err;
}
-static int ar9170_sta_add(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct ar9170 *ar = hw->priv;
- struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
- unsigned int i;
-
- memset(sta_info, 0, sizeof(*sta_info));
-
- if (!sta->ht_cap.ht_supported)
- return 0;
-
- if (sta->ht_cap.ampdu_density > ar->global_ampdu_density)
- ar->global_ampdu_density = sta->ht_cap.ampdu_density;
-
- if (sta->ht_cap.ampdu_factor < ar->global_ampdu_factor)
- ar->global_ampdu_factor = sta->ht_cap.ampdu_factor;
-
- for (i = 0; i < AR9170_NUM_TID; i++) {
- sta_info->agg[i].state = AR9170_TID_STATE_SHUTDOWN;
- sta_info->agg[i].active = false;
- sta_info->agg[i].ssn = 0;
- sta_info->agg[i].tid = i;
- INIT_LIST_HEAD(&sta_info->agg[i].list);
- skb_queue_head_init(&sta_info->agg[i].queue);
- }
-
- sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
-
- return 0;
-}
-
-static int ar9170_sta_remove(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
- unsigned int i;
-
- if (!sta->ht_cap.ht_supported)
- return 0;
-
- for (i = 0; i < AR9170_NUM_TID; i++) {
- sta_info->agg[i].state = AR9170_TID_STATE_INVALID;
- skb_queue_purge(&sta_info->agg[i].queue);
- }
-
- return 0;
-}
-
static int ar9170_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
@@ -2423,55 +1942,7 @@ static int ar9170_ampdu_action(struct ieee80211_hw *hw,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
- struct ar9170 *ar = hw->priv;
- struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
- struct ar9170_sta_tid *tid_info = &sta_info->agg[tid];
- unsigned long flags;
-
- if (!modparam_ht)
- return -EOPNOTSUPP;
-
switch (action) {
- case IEEE80211_AMPDU_TX_START:
- spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
- if (tid_info->state != AR9170_TID_STATE_SHUTDOWN ||
- !list_empty(&tid_info->list)) {
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_INFO "%s: A-MPDU [ESS:[%pM] tid:[%d]] "
- "is in a very bad state!\n",
- wiphy_name(hw->wiphy), sta->addr, tid);
-#endif /* AR9170_TXAGG_DEBUG */
- return -EBUSY;
- }
-
- *ssn = tid_info->ssn;
- tid_info->state = AR9170_TID_STATE_PROGRESS;
- tid_info->active = false;
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
-
- case IEEE80211_AMPDU_TX_STOP:
- spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
- tid_info->state = AR9170_TID_STATE_SHUTDOWN;
- list_del_init(&tid_info->list);
- tid_info->active = false;
- skb_queue_purge(&tid_info->queue);
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
- ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
-
- case IEEE80211_AMPDU_TX_OPERATIONAL:
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_INFO "%s: A-MPDU for %pM [tid:%d] Operational.\n",
- wiphy_name(hw->wiphy), sta->addr, tid);
-#endif /* AR9170_TXAGG_DEBUG */
- spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
- sta_info->agg[tid].state = AR9170_TID_STATE_COMPLETE;
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
- break;
-
case IEEE80211_AMPDU_RX_START:
case IEEE80211_AMPDU_RX_STOP:
/* Handled by firmware */
@@ -2497,8 +1968,6 @@ static const struct ieee80211_ops ar9170_ops = {
.bss_info_changed = ar9170_op_bss_info_changed,
.get_tsf = ar9170_op_get_tsf,
.set_key = ar9170_set_key,
- .sta_add = ar9170_sta_add,
- .sta_remove = ar9170_sta_remove,
.get_stats = ar9170_get_stats,
.ampdu_action = ar9170_ampdu_action,
};
@@ -2516,7 +1985,7 @@ void *ar9170_alloc(size_t priv_size)
* tends to split the streams into separate rx descriptors.
*/
- skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE, GFP_KERNEL);
+ skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
if (!skb)
goto err_nomem;
@@ -2531,8 +2000,6 @@ void *ar9170_alloc(size_t priv_size)
mutex_init(&ar->mutex);
spin_lock_init(&ar->cmdlock);
spin_lock_init(&ar->tx_stats_lock);
- spin_lock_init(&ar->tx_ampdu_list_lock);
- skb_queue_head_init(&ar->tx_status_ampdu);
for (i = 0; i < __AR9170_NUM_TXQ; i++) {
skb_queue_head_init(&ar->tx_status[i]);
skb_queue_head_init(&ar->tx_pending[i]);
@@ -2540,7 +2007,6 @@ void *ar9170_alloc(size_t priv_size)
ar9170_rx_reset_rx_mpdu(ar);
INIT_WORK(&ar->beacon_work, ar9170_new_beacon);
INIT_DELAYED_WORK(&ar->tx_janitor, ar9170_tx_janitor);
- INIT_LIST_HEAD(&ar->tx_ampdu_list);
/* all hw supports 2.4 GHz, so set channel to 1 by default */
ar->channel = &ar9170_2ghz_chantable[0];
@@ -2551,19 +2017,10 @@ void *ar9170_alloc(size_t priv_size)
BIT(NL80211_IFTYPE_ADHOC);
ar->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_NOISE_DBM;
-
- if (modparam_ht) {
- ar->hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
- } else {
- ar9170_band_2GHz.ht_cap.ht_supported = false;
- ar9170_band_5GHz.ht_cap.ht_supported = false;
- }
+ IEEE80211_HW_SIGNAL_DBM;
ar->hw->queues = __AR9170_NUM_TXQ;
ar->hw->extra_tx_headroom = 8;
- ar->hw->sta_data_size = sizeof(struct ar9170_sta_info);
ar->hw->max_rates = 1;
ar->hw->max_rate_tries = 3;
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index e1c2fcaa8be..82ab532a492 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -42,6 +42,7 @@
#include <linux/usb.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
+#include <linux/device.h>
#include <net/mac80211.h>
#include "ar9170.h"
#include "cmd.h"
@@ -67,18 +68,28 @@ static struct usb_device_id ar9170_usb_ids[] = {
{ USB_DEVICE(0x0cf3, 0x1001) },
/* TP-Link TL-WN821N v2 */
{ USB_DEVICE(0x0cf3, 0x1002) },
+ /* 3Com Dual Band 802.11n USB Adapter */
+ { USB_DEVICE(0x0cf3, 0x1010) },
+ /* H3C Dual Band 802.11n USB Adapter */
+ { USB_DEVICE(0x0cf3, 0x1011) },
/* Cace Airpcap NX */
{ USB_DEVICE(0xcace, 0x0300) },
/* D-Link DWA 160 A1 */
{ USB_DEVICE(0x07d1, 0x3c10) },
/* D-Link DWA 160 A2 */
{ USB_DEVICE(0x07d1, 0x3a09) },
+ /* Netgear WNA1000 */
+ { USB_DEVICE(0x0846, 0x9040) },
/* Netgear WNDA3100 */
{ USB_DEVICE(0x0846, 0x9010) },
/* Netgear WN111 v2 */
{ USB_DEVICE(0x0846, 0x9001) },
/* Zydas ZD1221 */
{ USB_DEVICE(0x0ace, 0x1221) },
+ /* Proxim ORiNOCO 802.11n USB */
+ { USB_DEVICE(0x1435, 0x0804) },
+ /* WNC Generic 11n USB Dongle */
+ { USB_DEVICE(0x1435, 0x0326) },
/* ZyXEL NWD271N */
{ USB_DEVICE(0x0586, 0x3417) },
/* Z-Com UB81 BG */
@@ -99,6 +110,8 @@ static struct usb_device_id ar9170_usb_ids[] = {
{ USB_DEVICE(0x0409, 0x0249) },
/* AVM FRITZ!WLAN USB Stick N 2.4 */
{ USB_DEVICE(0x057C, 0x8402), .driver_info = AR9170_REQ_FW1_ONLY },
+ /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */
+ { USB_DEVICE(0x1668, 0x1200) },
/* terminate */
{}
@@ -202,7 +215,7 @@ resubmit:
return;
free:
- usb_buffer_free(aru->udev, 64, urb->transfer_buffer, urb->transfer_dma);
+ usb_free_coherent(aru->udev, 64, urb->transfer_buffer, urb->transfer_dma);
}
static void ar9170_usb_rx_completed(struct urb *urb)
@@ -283,7 +296,7 @@ static int ar9170_usb_alloc_rx_irq_urb(struct ar9170_usb *aru)
if (!urb)
goto out;
- ibuf = usb_buffer_alloc(aru->udev, 64, GFP_KERNEL, &urb->transfer_dma);
+ ibuf = usb_alloc_coherent(aru->udev, 64, GFP_KERNEL, &urb->transfer_dma);
if (!ibuf)
goto out;
@@ -296,8 +309,8 @@ static int ar9170_usb_alloc_rx_irq_urb(struct ar9170_usb *aru)
err = usb_submit_urb(urb, GFP_KERNEL);
if (err) {
usb_unanchor_urb(urb);
- usb_buffer_free(aru->udev, 64, urb->transfer_buffer,
- urb->transfer_dma);
+ usb_free_coherent(aru->udev, 64, urb->transfer_buffer,
+ urb->transfer_dma);
}
out:
@@ -731,10 +744,10 @@ static void ar9170_usb_firmware_failed(struct ar9170_usb *aru)
/* unbind anything failed */
if (parent)
- down(&parent->sem);
+ device_lock(parent);
device_release_driver(&aru->udev->dev);
if (parent)
- up(&parent->sem);
+ device_unlock(parent);
usb_put_dev(aru->udev);
}
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 71fc960814f..d32f2828b09 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -48,6 +48,12 @@ enum ath_device_state {
ATH_HW_INITIALIZED,
};
+enum ath_bus_type {
+ ATH_PCI,
+ ATH_AHB,
+ ATH_USB,
+};
+
struct reg_dmn_pair_mapping {
u16 regDmnEnum;
u16 reg_5ghz_ctl;
@@ -65,17 +71,30 @@ struct ath_regulatory {
struct reg_dmn_pair_mapping *regpair;
};
+/**
+ * struct ath_ops - Register read/write operations
+ *
+ * @read: Register read
+ * @write: Register write
+ * @enable_write_buffer: Enable multiple register writes
+ * @disable_write_buffer: Disable multiple register writes
+ * @write_flush: Flush buffered register writes
+ */
struct ath_ops {
unsigned int (*read)(void *, u32 reg_offset);
- void (*write)(void *, u32 val, u32 reg_offset);
+ void (*write)(void *, u32 val, u32 reg_offset);
+ void (*enable_write_buffer)(void *);
+ void (*disable_write_buffer)(void *);
+ void (*write_flush) (void *);
};
struct ath_common;
struct ath_bus_ops {
- void (*read_cachesize)(struct ath_common *common, int *csz);
- bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
- void (*bt_coex_prep)(struct ath_common *common);
+ enum ath_bus_type ath_bus_type;
+ void (*read_cachesize)(struct ath_common *common, int *csz);
+ bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
+ void (*bt_coex_prep)(struct ath_common *common);
};
struct ath_common {
diff --git a/drivers/net/wireless/ath/ath5k/Makefile b/drivers/net/wireless/ath/ath5k/Makefile
index 090dc6d268a..cc09595b781 100644
--- a/drivers/net/wireless/ath/ath5k/Makefile
+++ b/drivers/net/wireless/ath/ath5k/Makefile
@@ -12,5 +12,6 @@ ath5k-y += attach.o
ath5k-y += base.o
ath5k-y += led.o
ath5k-y += rfkill.o
+ath5k-y += ani.o
ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o
obj-$(CONFIG_ATH5K) += ath5k.o
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
new file mode 100644
index 00000000000..f2311ab3550
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -0,0 +1,744 @@
+/*
+ * Copyright (C) 2010 Bruno Randolf <br1@einfach.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath5k.h"
+#include "base.h"
+#include "reg.h"
+#include "debug.h"
+#include "ani.h"
+
+/**
+ * DOC: Basic ANI Operation
+ *
+ * Adaptive Noise Immunity (ANI) controls five noise immunity parameters
+ * depending on the amount of interference in the environment, increasing
+ * or reducing sensitivity as necessary.
+ *
+ * The parameters are:
+ * - "noise immunity"
+ * - "spur immunity"
+ * - "firstep level"
+ * - "OFDM weak signal detection"
+ * - "CCK weak signal detection"
+ *
+ * Basically we look at the amount of ODFM and CCK timing errors we get and then
+ * raise or lower immunity accordingly by setting one or more of these
+ * parameters.
+ * Newer chipsets have PHY error counters in hardware which will generate a MIB
+ * interrupt when they overflow. Older hardware has too enable PHY error frames
+ * by setting a RX flag and then count every single PHY error. When a specified
+ * threshold of errors has been reached we will raise immunity.
+ * Also we regularly check the amount of errors and lower or raise immunity as
+ * necessary.
+ */
+
+
+/*** ANI parameter control ***/
+
+/**
+ * ath5k_ani_set_noise_immunity_level() - Set noise immunity level
+ *
+ * @level: level between 0 and @ATH5K_ANI_MAX_NOISE_IMM_LVL
+ */
+void
+ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
+{
+ /* TODO:
+ * ANI documents suggest the following five levels to use, but the HAL
+ * and ath9k use only use the last two levels, making this
+ * essentially an on/off option. There *may* be a reason for this (???),
+ * so i stick with the HAL version for now...
+ */
+#if 0
+ const s8 hi[] = { -18, -18, -16, -14, -12 };
+ const s8 lo[] = { -52, -56, -60, -64, -70 };
+ const s8 sz[] = { -34, -41, -48, -55, -62 };
+ const s8 fr[] = { -70, -72, -75, -78, -80 };
+#else
+ const s8 sz[] = { -55, -62 };
+ const s8 lo[] = { -64, -70 };
+ const s8 hi[] = { -14, -12 };
+ const s8 fr[] = { -78, -80 };
+#endif
+ if (level < 0 || level >= ARRAY_SIZE(sz)) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "level out of range %d", level);
+ return;
+ }
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DESIRED_SIZE,
+ AR5K_PHY_DESIRED_SIZE_TOT, sz[level]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_AGCCOARSE,
+ AR5K_PHY_AGCCOARSE_LO, lo[level]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_AGCCOARSE,
+ AR5K_PHY_AGCCOARSE_HI, hi[level]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SIG,
+ AR5K_PHY_SIG_FIRPWR, fr[level]);
+
+ ah->ah_sc->ani_state.noise_imm_level = level;
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
+}
+
+
+/**
+ * ath5k_ani_set_spur_immunity_level() - Set spur immunity level
+ *
+ * @level: level between 0 and @max_spur_level (the maximum level is dependent
+ * on the chip revision).
+ */
+void
+ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
+{
+ const int val[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
+
+ if (level < 0 || level >= ARRAY_SIZE(val) ||
+ level > ah->ah_sc->ani_state.max_spur_level) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "level out of range %d", level);
+ return;
+ }
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_OFDM_SELFCORR,
+ AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1, val[level]);
+
+ ah->ah_sc->ani_state.spur_level = level;
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
+}
+
+
+/**
+ * ath5k_ani_set_firstep_level() - Set "firstep" level
+ *
+ * @level: level between 0 and @ATH5K_ANI_MAX_FIRSTEP_LVL
+ */
+void
+ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
+{
+ const int val[] = { 0, 4, 8 };
+
+ if (level < 0 || level >= ARRAY_SIZE(val)) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "level out of range %d", level);
+ return;
+ }
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SIG,
+ AR5K_PHY_SIG_FIRSTEP, val[level]);
+
+ ah->ah_sc->ani_state.firstep_level = level;
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
+}
+
+
+/**
+ * ath5k_ani_set_ofdm_weak_signal_detection() - Control OFDM weak signal
+ * detection
+ *
+ * @on: turn on or off
+ */
+void
+ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on)
+{
+ const int m1l[] = { 127, 50 };
+ const int m2l[] = { 127, 40 };
+ const int m1[] = { 127, 0x4d };
+ const int m2[] = { 127, 0x40 };
+ const int m2cnt[] = { 31, 16 };
+ const int m2lcnt[] = { 63, 48 };
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
+ AR5K_PHY_WEAK_OFDM_LOW_THR_M1, m1l[on]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
+ AR5K_PHY_WEAK_OFDM_LOW_THR_M2, m2l[on]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
+ AR5K_PHY_WEAK_OFDM_HIGH_THR_M1, m1[on]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
+ AR5K_PHY_WEAK_OFDM_HIGH_THR_M2, m2[on]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
+ AR5K_PHY_WEAK_OFDM_HIGH_THR_M2_COUNT, m2cnt[on]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
+ AR5K_PHY_WEAK_OFDM_LOW_THR_M2_COUNT, m2lcnt[on]);
+
+ if (on)
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
+ AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN);
+ else
+ AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
+ AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN);
+
+ ah->ah_sc->ani_state.ofdm_weak_sig = on;
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "turned %s",
+ on ? "on" : "off");
+}
+
+
+/**
+ * ath5k_ani_set_cck_weak_signal_detection() - control CCK weak signal detection
+ *
+ * @on: turn on or off
+ */
+void
+ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on)
+{
+ const int val[] = { 8, 6 };
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_CCK_CROSSCORR,
+ AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR, val[on]);
+ ah->ah_sc->ani_state.cck_weak_sig = on;
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "turned %s",
+ on ? "on" : "off");
+}
+
+
+/*** ANI algorithm ***/
+
+/**
+ * ath5k_ani_raise_immunity() - Increase noise immunity
+ *
+ * @ofdm_trigger: If this is true we are called because of too many OFDM errors,
+ * the algorithm will tune more parameters then.
+ *
+ * Try to raise noise immunity (=decrease sensitivity) in several steps
+ * depending on the average RSSI of the beacons we received.
+ */
+static void
+ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
+ bool ofdm_trigger)
+{
+ int rssi = ah->ah_beacon_rssi_avg.avg;
+
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "raise immunity (%s)",
+ ofdm_trigger ? "ODFM" : "CCK");
+
+ /* first: raise noise immunity */
+ if (as->noise_imm_level < ATH5K_ANI_MAX_NOISE_IMM_LVL) {
+ ath5k_ani_set_noise_immunity_level(ah, as->noise_imm_level + 1);
+ return;
+ }
+
+ /* only OFDM: raise spur immunity level */
+ if (ofdm_trigger &&
+ as->spur_level < ah->ah_sc->ani_state.max_spur_level) {
+ ath5k_ani_set_spur_immunity_level(ah, as->spur_level + 1);
+ return;
+ }
+
+ /* AP mode */
+ if (ah->ah_sc->opmode == NL80211_IFTYPE_AP) {
+ if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
+ ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
+ return;
+ }
+
+ /* STA and IBSS mode */
+
+ /* TODO: for IBSS mode it would be better to keep a beacon RSSI average
+ * per each neighbour node and use the minimum of these, to make sure we
+ * don't shut out a remote node by raising immunity too high. */
+
+ if (rssi > ATH5K_ANI_RSSI_THR_HIGH) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "beacon RSSI high");
+ /* only OFDM: beacon RSSI is high, we can disable ODFM weak
+ * signal detection */
+ if (ofdm_trigger && as->ofdm_weak_sig == true) {
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
+ ath5k_ani_set_spur_immunity_level(ah, 0);
+ return;
+ }
+ /* as a last resort or CCK: raise firstep level */
+ if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL) {
+ ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
+ return;
+ }
+ } else if (rssi > ATH5K_ANI_RSSI_THR_LOW) {
+ /* beacon RSSI in mid range, we need OFDM weak signal detect,
+ * but can raise firstep level */
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "beacon RSSI mid");
+ if (ofdm_trigger && as->ofdm_weak_sig == false)
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
+ if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
+ ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
+ return;
+ } else if (ah->ah_current_channel->band == IEEE80211_BAND_2GHZ) {
+ /* beacon RSSI is low. in B/G mode turn of OFDM weak signal
+ * detect and zero firstep level to maximize CCK sensitivity */
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "beacon RSSI low, 2GHz");
+ if (ofdm_trigger && as->ofdm_weak_sig == true)
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
+ if (as->firstep_level > 0)
+ ath5k_ani_set_firstep_level(ah, 0);
+ return;
+ }
+
+ /* TODO: why not?:
+ if (as->cck_weak_sig == true) {
+ ath5k_ani_set_cck_weak_signal_detection(ah, false);
+ }
+ */
+}
+
+
+/**
+ * ath5k_ani_lower_immunity() - Decrease noise immunity
+ *
+ * Try to lower noise immunity (=increase sensitivity) in several steps
+ * depending on the average RSSI of the beacons we received.
+ */
+static void
+ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
+{
+ int rssi = ah->ah_beacon_rssi_avg.avg;
+
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "lower immunity");
+
+ if (ah->ah_sc->opmode == NL80211_IFTYPE_AP) {
+ /* AP mode */
+ if (as->firstep_level > 0) {
+ ath5k_ani_set_firstep_level(ah, as->firstep_level - 1);
+ return;
+ }
+ } else {
+ /* STA and IBSS mode (see TODO above) */
+ if (rssi > ATH5K_ANI_RSSI_THR_HIGH) {
+ /* beacon signal is high, leave OFDM weak signal
+ * detection off or it may oscillate
+ * TODO: who said it's off??? */
+ } else if (rssi > ATH5K_ANI_RSSI_THR_LOW) {
+ /* beacon RSSI is mid-range: turn on ODFM weak signal
+ * detection and next, lower firstep level */
+ if (as->ofdm_weak_sig == false) {
+ ath5k_ani_set_ofdm_weak_signal_detection(ah,
+ true);
+ return;
+ }
+ if (as->firstep_level > 0) {
+ ath5k_ani_set_firstep_level(ah,
+ as->firstep_level - 1);
+ return;
+ }
+ } else {
+ /* beacon signal is low: only reduce firstep level */
+ if (as->firstep_level > 0) {
+ ath5k_ani_set_firstep_level(ah,
+ as->firstep_level - 1);
+ return;
+ }
+ }
+ }
+
+ /* all modes */
+ if (as->spur_level > 0) {
+ ath5k_ani_set_spur_immunity_level(ah, as->spur_level - 1);
+ return;
+ }
+
+ /* finally, reduce noise immunity */
+ if (as->noise_imm_level > 0) {
+ ath5k_ani_set_noise_immunity_level(ah, as->noise_imm_level - 1);
+ return;
+ }
+}
+
+
+/**
+ * ath5k_hw_ani_get_listen_time() - Calculate time spent listening
+ *
+ * Return an approximation of the time spent "listening" in milliseconds (ms)
+ * since the last call of this function by deducting the cycles spent
+ * transmitting and receiving from the total cycle count.
+ * Save profile count values for debugging/statistics and because we might want
+ * to use them later.
+ *
+ * We assume no one else clears these registers!
+ */
+static int
+ath5k_hw_ani_get_listen_time(struct ath5k_hw *ah, struct ath5k_ani_state *as)
+{
+ int listen;
+
+ /* freeze */
+ ath5k_hw_reg_write(ah, AR5K_MIBC_FMC, AR5K_MIBC);
+ /* read */
+ as->pfc_cycles = ath5k_hw_reg_read(ah, AR5K_PROFCNT_CYCLE);
+ as->pfc_busy = ath5k_hw_reg_read(ah, AR5K_PROFCNT_RXCLR);
+ as->pfc_tx = ath5k_hw_reg_read(ah, AR5K_PROFCNT_TX);
+ as->pfc_rx = ath5k_hw_reg_read(ah, AR5K_PROFCNT_RX);
+ /* clear */
+ ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_TX);
+ ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RX);
+ ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RXCLR);
+ ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_CYCLE);
+ /* un-freeze */
+ ath5k_hw_reg_write(ah, 0, AR5K_MIBC);
+
+ /* TODO: where does 44000 come from? (11g clock rate?) */
+ listen = (as->pfc_cycles - as->pfc_rx - as->pfc_tx) / 44000;
+
+ if (as->pfc_cycles == 0 || listen < 0)
+ return 0;
+ return listen;
+}
+
+
+/**
+ * ath5k_ani_save_and_clear_phy_errors() - Clear and save PHY error counters
+ *
+ * Clear the PHY error counters as soon as possible, since this might be called
+ * from a MIB interrupt and we want to make sure we don't get interrupted again.
+ * Add the count of CCK and OFDM errors to our internal state, so it can be used
+ * by the algorithm later.
+ *
+ * Will be called from interrupt and tasklet context.
+ * Returns 0 if both counters are zero.
+ */
+static int
+ath5k_ani_save_and_clear_phy_errors(struct ath5k_hw *ah,
+ struct ath5k_ani_state *as)
+{
+ unsigned int ofdm_err, cck_err;
+
+ if (!ah->ah_capabilities.cap_has_phyerr_counters)
+ return 0;
+
+ ofdm_err = ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1);
+ cck_err = ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2);
+
+ /* reset counters first, we might be in a hurry (interrupt) */
+ ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_OFDM_TRIG_HIGH,
+ AR5K_PHYERR_CNT1);
+ ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_CCK_TRIG_HIGH,
+ AR5K_PHYERR_CNT2);
+
+ ofdm_err = ATH5K_ANI_OFDM_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX - ofdm_err);
+ cck_err = ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX - cck_err);
+
+ /* sometimes both can be zero, especially when there is a superfluous
+ * second interrupt. detect that here and return an error. */
+ if (ofdm_err <= 0 && cck_err <= 0)
+ return 0;
+
+ /* avoid negative values should one of the registers overflow */
+ if (ofdm_err > 0) {
+ as->ofdm_errors += ofdm_err;
+ as->sum_ofdm_errors += ofdm_err;
+ }
+ if (cck_err > 0) {
+ as->cck_errors += cck_err;
+ as->sum_cck_errors += cck_err;
+ }
+ return 1;
+}
+
+
+/**
+ * ath5k_ani_period_restart() - Restart ANI period
+ *
+ * Just reset counters, so they are clear for the next "ani period".
+ */
+static void
+ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as)
+{
+ /* keep last values for debugging */
+ as->last_ofdm_errors = as->ofdm_errors;
+ as->last_cck_errors = as->cck_errors;
+ as->last_listen = as->listen_time;
+
+ as->ofdm_errors = 0;
+ as->cck_errors = 0;
+ as->listen_time = 0;
+}
+
+
+/**
+ * ath5k_ani_calibration() - The main ANI calibration function
+ *
+ * We count OFDM and CCK errors relative to the time where we did not send or
+ * receive ("listen" time) and raise or lower immunity accordingly.
+ * This is called regularly (every second) from the calibration timer, but also
+ * when an error threshold has been reached.
+ *
+ * In order to synchronize access from different contexts, this should be
+ * called only indirectly by scheduling the ANI tasklet!
+ */
+void
+ath5k_ani_calibration(struct ath5k_hw *ah)
+{
+ struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
+ int listen, ofdm_high, ofdm_low, cck_high, cck_low;
+
+ if (as->ani_mode != ATH5K_ANI_MODE_AUTO)
+ return;
+
+ /* get listen time since last call and add it to the counter because we
+ * might not have restarted the "ani period" last time */
+ listen = ath5k_hw_ani_get_listen_time(ah, as);
+ as->listen_time += listen;
+
+ ath5k_ani_save_and_clear_phy_errors(ah, as);
+
+ ofdm_high = as->listen_time * ATH5K_ANI_OFDM_TRIG_HIGH / 1000;
+ cck_high = as->listen_time * ATH5K_ANI_CCK_TRIG_HIGH / 1000;
+ ofdm_low = as->listen_time * ATH5K_ANI_OFDM_TRIG_LOW / 1000;
+ cck_low = as->listen_time * ATH5K_ANI_CCK_TRIG_LOW / 1000;
+
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "listen %d (now %d)", as->listen_time, listen);
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "check high ofdm %d/%d cck %d/%d",
+ as->ofdm_errors, ofdm_high, as->cck_errors, cck_high);
+
+ if (as->ofdm_errors > ofdm_high || as->cck_errors > cck_high) {
+ /* too many PHY errors - we have to raise immunity */
+ bool ofdm_flag = as->ofdm_errors > ofdm_high ? true : false;
+ ath5k_ani_raise_immunity(ah, as, ofdm_flag);
+ ath5k_ani_period_restart(ah, as);
+
+ } else if (as->listen_time > 5 * ATH5K_ANI_LISTEN_PERIOD) {
+ /* If more than 5 (TODO: why 5?) periods have passed and we got
+ * relatively little errors we can try to lower immunity */
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "check low ofdm %d/%d cck %d/%d",
+ as->ofdm_errors, ofdm_low, as->cck_errors, cck_low);
+
+ if (as->ofdm_errors <= ofdm_low && as->cck_errors <= cck_low)
+ ath5k_ani_lower_immunity(ah, as);
+
+ ath5k_ani_period_restart(ah, as);
+ }
+}
+
+
+/*** INTERRUPT HANDLER ***/
+
+/**
+ * ath5k_ani_mib_intr() - Interrupt handler for ANI MIB counters
+ *
+ * Just read & reset the registers quickly, so they don't generate more
+ * interrupts, save the counters and schedule the tasklet to decide whether
+ * to raise immunity or not.
+ *
+ * We just need to handle PHY error counters, ath5k_hw_update_mib_counters()
+ * should take care of all "normal" MIB interrupts.
+ */
+void
+ath5k_ani_mib_intr(struct ath5k_hw *ah)
+{
+ struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
+
+ /* nothing to do here if HW does not have PHY error counters - they
+ * can't be the reason for the MIB interrupt then */
+ if (!ah->ah_capabilities.cap_has_phyerr_counters)
+ return;
+
+ /* not in use but clear anyways */
+ ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
+ ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
+
+ if (ah->ah_sc->ani_state.ani_mode != ATH5K_ANI_MODE_AUTO)
+ return;
+
+ /* if one of the errors triggered, we can get a superfluous second
+ * interrupt, even though we have already reset the register. the
+ * function detects that so we can return early */
+ if (ath5k_ani_save_and_clear_phy_errors(ah, as) == 0)
+ return;
+
+ if (as->ofdm_errors > ATH5K_ANI_OFDM_TRIG_HIGH ||
+ as->cck_errors > ATH5K_ANI_CCK_TRIG_HIGH)
+ tasklet_schedule(&ah->ah_sc->ani_tasklet);
+}
+
+
+/**
+ * ath5k_ani_phy_error_report() - Used by older HW to report PHY errors
+ *
+ * This is used by hardware without PHY error counters to report PHY errors
+ * on a frame-by-frame basis, instead of the interrupt.
+ */
+void
+ath5k_ani_phy_error_report(struct ath5k_hw *ah,
+ enum ath5k_phy_error_code phyerr)
+{
+ struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
+
+ if (phyerr == AR5K_RX_PHY_ERROR_OFDM_TIMING) {
+ as->ofdm_errors++;
+ if (as->ofdm_errors > ATH5K_ANI_OFDM_TRIG_HIGH)
+ tasklet_schedule(&ah->ah_sc->ani_tasklet);
+ } else if (phyerr == AR5K_RX_PHY_ERROR_CCK_TIMING) {
+ as->cck_errors++;
+ if (as->cck_errors > ATH5K_ANI_CCK_TRIG_HIGH)
+ tasklet_schedule(&ah->ah_sc->ani_tasklet);
+ }
+}
+
+
+/*** INIT ***/
+
+/**
+ * ath5k_enable_phy_err_counters() - Enable PHY error counters
+ *
+ * Enable PHY error counters for OFDM and CCK timing errors.
+ */
+static void
+ath5k_enable_phy_err_counters(struct ath5k_hw *ah)
+{
+ ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_OFDM_TRIG_HIGH,
+ AR5K_PHYERR_CNT1);
+ ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_CCK_TRIG_HIGH,
+ AR5K_PHYERR_CNT2);
+ ath5k_hw_reg_write(ah, AR5K_PHY_ERR_FIL_OFDM, AR5K_PHYERR_CNT1_MASK);
+ ath5k_hw_reg_write(ah, AR5K_PHY_ERR_FIL_CCK, AR5K_PHYERR_CNT2_MASK);
+
+ /* not in use */
+ ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
+ ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
+}
+
+
+/**
+ * ath5k_disable_phy_err_counters() - Disable PHY error counters
+ *
+ * Disable PHY error counters for OFDM and CCK timing errors.
+ */
+static void
+ath5k_disable_phy_err_counters(struct ath5k_hw *ah)
+{
+ ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT1);
+ ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT2);
+ ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT1_MASK);
+ ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT2_MASK);
+
+ /* not in use */
+ ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
+ ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
+}
+
+
+/**
+ * ath5k_ani_init() - Initialize ANI
+ * @mode: Which mode to use (auto, manual high, manual low, off)
+ *
+ * Initialize ANI according to mode.
+ */
+void
+ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode)
+{
+ /* ANI is only possible on 5212 and newer */
+ if (ah->ah_version < AR5K_AR5212)
+ return;
+
+ /* clear old state information */
+ memset(&ah->ah_sc->ani_state, 0, sizeof(ah->ah_sc->ani_state));
+
+ /* older hardware has more spur levels than newer */
+ if (ah->ah_mac_srev < AR5K_SREV_AR2414)
+ ah->ah_sc->ani_state.max_spur_level = 7;
+ else
+ ah->ah_sc->ani_state.max_spur_level = 2;
+
+ /* initial values for our ani parameters */
+ if (mode == ATH5K_ANI_MODE_OFF) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "ANI off\n");
+ } else if (mode == ATH5K_ANI_MODE_MANUAL_LOW) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "ANI manual low -> high sensitivity\n");
+ ath5k_ani_set_noise_immunity_level(ah, 0);
+ ath5k_ani_set_spur_immunity_level(ah, 0);
+ ath5k_ani_set_firstep_level(ah, 0);
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
+ ath5k_ani_set_cck_weak_signal_detection(ah, true);
+ } else if (mode == ATH5K_ANI_MODE_MANUAL_HIGH) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "ANI manual high -> low sensitivity\n");
+ ath5k_ani_set_noise_immunity_level(ah,
+ ATH5K_ANI_MAX_NOISE_IMM_LVL);
+ ath5k_ani_set_spur_immunity_level(ah,
+ ah->ah_sc->ani_state.max_spur_level);
+ ath5k_ani_set_firstep_level(ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
+ ath5k_ani_set_cck_weak_signal_detection(ah, false);
+ } else if (mode == ATH5K_ANI_MODE_AUTO) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "ANI auto\n");
+ ath5k_ani_set_noise_immunity_level(ah, 0);
+ ath5k_ani_set_spur_immunity_level(ah, 0);
+ ath5k_ani_set_firstep_level(ah, 0);
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
+ ath5k_ani_set_cck_weak_signal_detection(ah, false);
+ }
+
+ /* newer hardware has PHY error counter registers which we can use to
+ * get OFDM and CCK error counts. older hardware has to set rxfilter and
+ * report every single PHY error by calling ath5k_ani_phy_error_report()
+ */
+ if (mode == ATH5K_ANI_MODE_AUTO) {
+ if (ah->ah_capabilities.cap_has_phyerr_counters)
+ ath5k_enable_phy_err_counters(ah);
+ else
+ ath5k_hw_set_rx_filter(ah, ath5k_hw_get_rx_filter(ah) |
+ AR5K_RX_FILTER_PHYERR);
+ } else {
+ if (ah->ah_capabilities.cap_has_phyerr_counters)
+ ath5k_disable_phy_err_counters(ah);
+ else
+ ath5k_hw_set_rx_filter(ah, ath5k_hw_get_rx_filter(ah) &
+ ~AR5K_RX_FILTER_PHYERR);
+ }
+
+ ah->ah_sc->ani_state.ani_mode = mode;
+}
+
+
+/*** DEBUG ***/
+
+#ifdef CONFIG_ATH5K_DEBUG
+
+void
+ath5k_ani_print_counters(struct ath5k_hw *ah)
+{
+ /* clears too */
+ printk(KERN_NOTICE "ACK fail\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_ACK_FAIL));
+ printk(KERN_NOTICE "RTS fail\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_RTS_FAIL));
+ printk(KERN_NOTICE "RTS success\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_RTS_OK));
+ printk(KERN_NOTICE "FCS error\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_FCS_FAIL));
+
+ /* no clear */
+ printk(KERN_NOTICE "tx\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_PROFCNT_TX));
+ printk(KERN_NOTICE "rx\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_PROFCNT_RX));
+ printk(KERN_NOTICE "busy\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_PROFCNT_RXCLR));
+ printk(KERN_NOTICE "cycles\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_PROFCNT_CYCLE));
+
+ printk(KERN_NOTICE "AR5K_PHYERR_CNT1\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1));
+ printk(KERN_NOTICE "AR5K_PHYERR_CNT2\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2));
+ printk(KERN_NOTICE "AR5K_OFDM_FIL_CNT\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_OFDM_FIL_CNT));
+ printk(KERN_NOTICE "AR5K_CCK_FIL_CNT\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_CCK_FIL_CNT));
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath5k/ani.h b/drivers/net/wireless/ath/ath5k/ani.h
new file mode 100644
index 00000000000..55cf26d8522
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/ani.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2010 Bruno Randolf <br1@einfach.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef ANI_H
+#define ANI_H
+
+/* these thresholds are relative to the ATH5K_ANI_LISTEN_PERIOD */
+#define ATH5K_ANI_LISTEN_PERIOD 100
+#define ATH5K_ANI_OFDM_TRIG_HIGH 500
+#define ATH5K_ANI_OFDM_TRIG_LOW 200
+#define ATH5K_ANI_CCK_TRIG_HIGH 200
+#define ATH5K_ANI_CCK_TRIG_LOW 100
+
+/* average beacon RSSI thresholds */
+#define ATH5K_ANI_RSSI_THR_HIGH 40
+#define ATH5K_ANI_RSSI_THR_LOW 7
+
+/* maximum availabe levels */
+#define ATH5K_ANI_MAX_FIRSTEP_LVL 2
+#define ATH5K_ANI_MAX_NOISE_IMM_LVL 1
+
+
+/**
+ * enum ath5k_ani_mode - mode for ANI / noise sensitivity
+ *
+ * @ATH5K_ANI_MODE_OFF: Turn ANI off. This can be useful to just stop the ANI
+ * algorithm after it has been on auto mode.
+ * ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low,
+ * maximizing sensitivity. ANI will not run.
+ * ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high,
+ * minimizing sensitivity. ANI will not run.
+ * ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the
+ * amount of OFDM and CCK frame errors (default).
+ */
+enum ath5k_ani_mode {
+ ATH5K_ANI_MODE_OFF = 0,
+ ATH5K_ANI_MODE_MANUAL_LOW = 1,
+ ATH5K_ANI_MODE_MANUAL_HIGH = 2,
+ ATH5K_ANI_MODE_AUTO = 3
+};
+
+
+/**
+ * struct ath5k_ani_state - ANI state and associated counters
+ *
+ * @max_spur_level: the maximum spur level is chip dependent
+ */
+struct ath5k_ani_state {
+ enum ath5k_ani_mode ani_mode;
+
+ /* state */
+ int noise_imm_level;
+ int spur_level;
+ int firstep_level;
+ bool ofdm_weak_sig;
+ bool cck_weak_sig;
+
+ int max_spur_level;
+
+ /* used by the algorithm */
+ unsigned int listen_time;
+ unsigned int ofdm_errors;
+ unsigned int cck_errors;
+
+ /* debug/statistics only: numbers from last ANI calibration */
+ unsigned int pfc_tx;
+ unsigned int pfc_rx;
+ unsigned int pfc_busy;
+ unsigned int pfc_cycles;
+ unsigned int last_listen;
+ unsigned int last_ofdm_errors;
+ unsigned int last_cck_errors;
+ unsigned int sum_ofdm_errors;
+ unsigned int sum_cck_errors;
+};
+
+void ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode);
+void ath5k_ani_mib_intr(struct ath5k_hw *ah);
+void ath5k_ani_calibration(struct ath5k_hw *ah);
+void ath5k_ani_phy_error_report(struct ath5k_hw *ah,
+ enum ath5k_phy_error_code phyerr);
+
+/* for manual control */
+void ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level);
+void ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level);
+void ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level);
+void ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on);
+void ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on);
+
+void ath5k_ani_print_counters(struct ath5k_hw *ah);
+
+#endif /* ANI_H */
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index ac67f02e26d..2785946f659 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -202,7 +202,8 @@
#define AR5K_TUNE_MAX_TXPOWER 63
#define AR5K_TUNE_DEFAULT_TXPOWER 25
#define AR5K_TUNE_TPC_TXPOWER false
-#define AR5K_TUNE_HWTXTRIES 4
+#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL 10000 /* 10 sec */
+#define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI 1000 /* 1 sec */
#define AR5K_INIT_CARR_SENSE_EN 1
@@ -614,28 +615,6 @@ struct ath5k_rx_status {
#define AR5K_BEACON_ENA 0x00800000 /*enable beacon xmit*/
#define AR5K_BEACON_RESET_TSF 0x01000000 /*force a TSF reset*/
-#if 0
-/**
- * struct ath5k_beacon_state - Per-station beacon timer state.
- * @bs_interval: in TU's, can also include the above flags
- * @bs_cfp_max_duration: if non-zero hw is setup to coexist with a
- * Point Coordination Function capable AP
- */
-struct ath5k_beacon_state {
- u32 bs_next_beacon;
- u32 bs_next_dtim;
- u32 bs_interval;
- u8 bs_dtim_period;
- u8 bs_cfp_period;
- u16 bs_cfp_max_duration;
- u16 bs_cfp_du_remain;
- u16 bs_tim_offset;
- u16 bs_sleep_duration;
- u16 bs_bmiss_threshold;
- u32 bs_cfp_next;
-};
-#endif
-
/*
* TSF to TU conversion:
@@ -822,9 +801,9 @@ struct ath5k_athchan_2ghz {
* @AR5K_INT_TXURN: received when we should increase the TX trigger threshold
* We currently do increments on interrupt by
* (AR5K_TUNE_MAX_TX_FIFO_THRES - current_trigger_level) / 2
- * @AR5K_INT_MIB: Indicates the Management Information Base counters should be
- * checked. We should do this with ath5k_hw_update_mib_counters() but
- * it seems we should also then do some noise immunity work.
+ * @AR5K_INT_MIB: Indicates the either Management Information Base counters or
+ * one of the PHY error counters reached the maximum value and should be
+ * read and cleared.
* @AR5K_INT_RXPHY: RX PHY Error
* @AR5K_INT_RXKCM: RX Key cache miss
* @AR5K_INT_SWBA: SoftWare Beacon Alert - indicates its time to send a
@@ -912,10 +891,11 @@ enum ath5k_int {
AR5K_INT_NOCARD = 0xffffffff
};
-/* Software interrupts used for calibration */
-enum ath5k_software_interrupt {
- AR5K_SWI_FULL_CALIBRATION = 0x01,
- AR5K_SWI_SHORT_CALIBRATION = 0x02,
+/* mask which calibration is active at the moment */
+enum ath5k_calibration_mask {
+ AR5K_CALIBRATION_FULL = 0x01,
+ AR5K_CALIBRATION_SHORT = 0x02,
+ AR5K_CALIBRATION_ANI = 0x04,
};
/*
@@ -1004,6 +984,8 @@ struct ath5k_capabilities {
struct {
u8 q_tx_num;
} cap_queues;
+
+ bool cap_has_phyerr_counters;
};
/* size of noise floor history (keep it a power of two) */
@@ -1014,6 +996,15 @@ struct ath5k_nfcal_hist
s16 nfval[ATH5K_NF_CAL_HIST_MAX]; /* last few noise floors */
};
+/**
+ * struct avg_val - Helper structure for average calculation
+ * @avg: contains the actual average value
+ * @avg_weight: is used internally during calculation to prevent rounding errors
+ */
+struct ath5k_avg_val {
+ int avg;
+ int avg_weight;
+};
/***************************************\
HARDWARE ABSTRACTION LAYER STRUCTURE
@@ -1028,7 +1019,6 @@ struct ath5k_nfcal_hist
/* TODO: Clean up and merge with ath5k_softc */
struct ath5k_hw {
- u32 ah_magic;
struct ath_common common;
struct ath5k_softc *ah_sc;
@@ -1036,7 +1026,6 @@ struct ath5k_hw {
enum ath5k_int ah_imr;
- enum nl80211_iftype ah_op_mode;
struct ieee80211_channel *ah_current_channel;
bool ah_turbo;
bool ah_calibration;
@@ -1049,7 +1038,6 @@ struct ath5k_hw {
u32 ah_phy;
u32 ah_mac_srev;
u16 ah_mac_version;
- u16 ah_mac_revision;
u16 ah_phy_revision;
u16 ah_radio_5ghz_revision;
u16 ah_radio_2ghz_revision;
@@ -1071,8 +1059,6 @@ struct ath5k_hw {
u8 ah_def_ant;
bool ah_software_retry;
- int ah_gpio_npins;
-
struct ath5k_capabilities ah_capabilities;
struct ath5k_txq_info ah_txq[AR5K_NUM_TX_QUEUES];
@@ -1123,17 +1109,18 @@ struct ath5k_hw {
struct ath5k_nfcal_hist ah_nfcal_hist;
+ /* average beacon RSSI in our BSS (used by ANI) */
+ struct ath5k_avg_val ah_beacon_rssi_avg;
+
/* noise floor from last periodic calibration */
s32 ah_noise_floor;
/* Calibration timestamp */
- unsigned long ah_cal_tstamp;
-
- /* Calibration interval (secs) */
- u8 ah_cal_intval;
+ unsigned long ah_cal_next_full;
+ unsigned long ah_cal_next_ani;
- /* Software interrupt mask */
- u8 ah_swi_mask;
+ /* Calibration mask */
+ u8 ah_cal_mask;
/*
* Function pointers
@@ -1141,9 +1128,9 @@ struct ath5k_hw {
int (*ah_setup_rx_desc)(struct ath5k_hw *ah, struct ath5k_desc *desc,
u32 size, unsigned int flags);
int (*ah_setup_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
- unsigned int, unsigned int, enum ath5k_pkt_type, unsigned int,
+ unsigned int, unsigned int, int, enum ath5k_pkt_type,
unsigned int, unsigned int, unsigned int, unsigned int,
- unsigned int, unsigned int, unsigned int);
+ unsigned int, unsigned int, unsigned int, unsigned int);
int (*ah_setup_mrr_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
unsigned int, unsigned int, unsigned int, unsigned int,
unsigned int, unsigned int);
@@ -1158,158 +1145,145 @@ struct ath5k_hw {
*/
/* Attach/Detach Functions */
-extern int ath5k_hw_attach(struct ath5k_softc *sc);
-extern void ath5k_hw_detach(struct ath5k_hw *ah);
+int ath5k_hw_attach(struct ath5k_softc *sc);
+void ath5k_hw_detach(struct ath5k_hw *ah);
/* LED functions */
-extern int ath5k_init_leds(struct ath5k_softc *sc);
-extern void ath5k_led_enable(struct ath5k_softc *sc);
-extern void ath5k_led_off(struct ath5k_softc *sc);
-extern void ath5k_unregister_leds(struct ath5k_softc *sc);
+int ath5k_init_leds(struct ath5k_softc *sc);
+void ath5k_led_enable(struct ath5k_softc *sc);
+void ath5k_led_off(struct ath5k_softc *sc);
+void ath5k_unregister_leds(struct ath5k_softc *sc);
/* Reset Functions */
-extern int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial);
-extern int ath5k_hw_on_hold(struct ath5k_hw *ah);
-extern int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, struct ieee80211_channel *channel, bool change_channel);
+int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial);
+int ath5k_hw_on_hold(struct ath5k_hw *ah);
+int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
+ struct ieee80211_channel *channel, bool change_channel);
+int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
+ bool is_set);
/* Power management functions */
-extern int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode, bool set_chip, u16 sleep_duration);
/* DMA Related Functions */
-extern void ath5k_hw_start_rx_dma(struct ath5k_hw *ah);
-extern int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah);
-extern u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah);
-extern void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr);
-extern int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue);
-extern int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue);
-extern u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue);
-extern int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue,
+void ath5k_hw_start_rx_dma(struct ath5k_hw *ah);
+int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah);
+u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah);
+void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr);
+int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue);
+int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue);
+u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue);
+int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue,
u32 phys_addr);
-extern int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase);
+int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase);
/* Interrupt handling */
-extern bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah);
-extern int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask);
-extern enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum
-ath5k_int new_mask);
-extern void ath5k_hw_update_mib_counters(struct ath5k_hw *ah, struct ieee80211_low_level_stats *stats);
+bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah);
+int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask);
+enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask);
+void ath5k_hw_update_mib_counters(struct ath5k_hw *ah);
/* EEPROM access functions */
-extern int ath5k_eeprom_init(struct ath5k_hw *ah);
-extern void ath5k_eeprom_detach(struct ath5k_hw *ah);
-extern int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac);
-extern bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah);
+int ath5k_eeprom_init(struct ath5k_hw *ah);
+void ath5k_eeprom_detach(struct ath5k_hw *ah);
+int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac);
/* Protocol Control Unit Functions */
-extern int ath5k_hw_set_opmode(struct ath5k_hw *ah);
-extern void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class);
+extern int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype opmode);
+void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class);
/* BSSID Functions */
-extern int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac);
-extern void ath5k_hw_set_associd(struct ath5k_hw *ah);
-extern void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask);
+int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac);
+void ath5k_hw_set_associd(struct ath5k_hw *ah);
+void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask);
/* Receive start/stop functions */
-extern void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah);
-extern void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
+void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah);
+void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
/* RX Filter functions */
-extern void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1);
-extern int ath5k_hw_set_mcast_filter_idx(struct ath5k_hw *ah, u32 index);
-extern int ath5k_hw_clear_mcast_filter_idx(struct ath5k_hw *ah, u32 index);
-extern u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah);
-extern void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter);
+void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1);
+u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah);
+void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter);
/* Beacon control functions */
-extern u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah);
-extern u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah);
-extern void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64);
-extern void ath5k_hw_reset_tsf(struct ath5k_hw *ah);
-extern void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval);
-#if 0
-extern int ath5k_hw_set_beacon_timers(struct ath5k_hw *ah, const struct ath5k_beacon_state *state);
-extern void ath5k_hw_reset_beacon(struct ath5k_hw *ah);
-extern int ath5k_hw_beaconq_finish(struct ath5k_hw *ah, unsigned long phys_addr);
-#endif
+u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah);
+void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64);
+void ath5k_hw_reset_tsf(struct ath5k_hw *ah);
+void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval);
/* ACK bit rate */
void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high);
-/* ACK/CTS Timeouts */
-extern int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout);
-extern unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah);
-extern int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout);
-extern unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah);
/* Clock rate related functions */
unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec);
unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock);
unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah);
/* Key table (WEP) functions */
-extern int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry);
-extern int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry);
-extern int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry, const struct ieee80211_key_conf *key, const u8 *mac);
-extern int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac);
+int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry);
+int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry,
+ const struct ieee80211_key_conf *key, const u8 *mac);
+int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac);
/* Queue Control Unit, DFS Control Unit Functions */
-extern int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, struct ath5k_txq_info *queue_info);
-extern int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
- const struct ath5k_txq_info *queue_info);
-extern int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah,
- enum ath5k_tx_queue queue_type,
- struct ath5k_txq_info *queue_info);
-extern u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue);
-extern void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue);
-extern int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue);
-extern unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah);
-extern int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time);
+int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
+ struct ath5k_txq_info *queue_info);
+int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
+ const struct ath5k_txq_info *queue_info);
+int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah,
+ enum ath5k_tx_queue queue_type,
+ struct ath5k_txq_info *queue_info);
+u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue);
+void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue);
+int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue);
+int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time);
/* Hardware Descriptor Functions */
-extern int ath5k_hw_init_desc_functions(struct ath5k_hw *ah);
+int ath5k_hw_init_desc_functions(struct ath5k_hw *ah);
/* GPIO Functions */
-extern void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state);
-extern int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio);
-extern int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio);
-extern u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio);
-extern int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val);
-extern void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio, u32 interrupt_level);
+void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state);
+int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio);
+int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio);
+u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio);
+int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val);
+void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
+ u32 interrupt_level);
/* rfkill Functions */
-extern void ath5k_rfkill_hw_start(struct ath5k_hw *ah);
-extern void ath5k_rfkill_hw_stop(struct ath5k_hw *ah);
+void ath5k_rfkill_hw_start(struct ath5k_hw *ah);
+void ath5k_rfkill_hw_stop(struct ath5k_hw *ah);
/* Misc functions */
int ath5k_hw_set_capabilities(struct ath5k_hw *ah);
-extern int ath5k_hw_get_capability(struct ath5k_hw *ah, enum ath5k_capability_type cap_type, u32 capability, u32 *result);
-extern int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid, u16 assoc_id);
-extern int ath5k_hw_disable_pspoll(struct ath5k_hw *ah);
+int ath5k_hw_get_capability(struct ath5k_hw *ah,
+ enum ath5k_capability_type cap_type, u32 capability,
+ u32 *result);
+int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid, u16 assoc_id);
+int ath5k_hw_disable_pspoll(struct ath5k_hw *ah);
/* Initial register settings functions */
-extern int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel);
+int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel);
/* Initialize RF */
-extern int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
- struct ieee80211_channel *channel,
- unsigned int mode);
-extern int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq);
-extern enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah);
-extern int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah);
+int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel,
+ unsigned int mode);
+int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq);
+enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah);
+int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah);
/* PHY/RF channel functions */
-extern bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags);
-extern int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel);
+bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags);
+int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel);
/* PHY calibration */
void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah);
-extern int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, struct ieee80211_channel *channel);
-extern int ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq);
-extern s16 ath5k_hw_get_noise_floor(struct ath5k_hw *ah);
-extern void ath5k_hw_calibration_poll(struct ath5k_hw *ah);
+int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel);
/* Spur mitigation */
bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
- struct ieee80211_channel *channel);
+ struct ieee80211_channel *channel);
void ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
- struct ieee80211_channel *channel);
+ struct ieee80211_channel *channel);
/* Misc PHY functions */
-extern u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan);
-extern int ath5k_hw_phy_disable(struct ath5k_hw *ah);
+u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan);
+int ath5k_hw_phy_disable(struct ath5k_hw *ah);
/* Antenna control */
-extern void ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode);
-extern void ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant);
-extern unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah);
+void ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode);
/* TX power setup */
-extern int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, u8 ee_mode, u8 txpower);
-extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower);
+int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
+ u8 ee_mode, u8 txpower);
+int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower);
/*
* Functions used internaly
@@ -1335,29 +1309,6 @@ static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
iowrite32(val, ah->ah_iobase + reg);
}
-#if defined(_ATH5K_RESET) || defined(_ATH5K_PHY)
-/*
- * Check if a register write has been completed
- */
-static int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag,
- u32 val, bool is_set)
-{
- int i;
- u32 data;
-
- for (i = AR5K_TUNE_REGISTER_TIMEOUT; i > 0; i--) {
- data = ath5k_hw_reg_read(ah, reg);
- if (is_set && (data & flag))
- break;
- else if ((data & flag) == val)
- break;
- udelay(15);
- }
-
- return (i <= 0) ? -EAGAIN : 0;
-}
-#endif
-
static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits)
{
u32 retval = 0, bit, i;
@@ -1370,9 +1321,27 @@ static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits)
return retval;
}
-static inline int ath5k_pad_size(int hdrlen)
+#define AVG_SAMPLES 8
+#define AVG_FACTOR 1000
+
+/**
+ * ath5k_moving_average - Exponentially weighted moving average
+ * @avg: average structure
+ * @val: current value
+ *
+ * This implementation make use of a struct ath5k_avg_val to prevent rounding
+ * errors.
+ */
+static inline struct ath5k_avg_val
+ath5k_moving_average(const struct ath5k_avg_val avg, const int val)
{
- return (hdrlen < 24) ? 0 : hdrlen & 3;
+ struct ath5k_avg_val new;
+ new.avg_weight = avg.avg_weight ?
+ (((avg.avg_weight * ((AVG_SAMPLES) - 1)) +
+ (val * (AVG_FACTOR))) / (AVG_SAMPLES)) :
+ (val * (AVG_FACTOR));
+ new.avg = new.avg_weight / (AVG_FACTOR);
+ return new;
}
#endif
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index dc0786cc263..e0c244b02f0 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -114,7 +114,6 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
/*
* HW information
*/
- ah->ah_op_mode = NL80211_IFTYPE_STATION;
ah->ah_radar.r_enabled = AR5K_TUNE_RADAR_ALERT;
ah->ah_turbo = false;
ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
@@ -124,6 +123,9 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
ah->ah_cw_min = AR5K_TUNE_CWMIN;
ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY;
ah->ah_software_retry = false;
+ ah->ah_ant_mode = AR5K_ANTMODE_DEFAULT;
+ ah->ah_noise_floor = -95; /* until first NF calibration is run */
+ sc->ani_state.ani_mode = ATH5K_ANI_MODE_AUTO;
/*
* Find the mac version
@@ -149,7 +151,6 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
/* Get MAC, PHY and RADIO revisions */
ah->ah_mac_srev = srev;
ah->ah_mac_version = AR5K_REG_MS(srev, AR5K_SREV_VER);
- ah->ah_mac_revision = AR5K_REG_MS(srev, AR5K_SREV_REV);
ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) &
0xffffffff;
ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah,
@@ -328,7 +329,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
/* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */
memcpy(common->curbssid, ath_bcast_mac, ETH_ALEN);
ath5k_hw_set_associd(ah);
- ath5k_hw_set_opmode(ah);
+ ath5k_hw_set_opmode(ah, sc->opmode);
ath5k_hw_rfgain_opt_init(ah);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 3abbe7513ab..cc6d41dec33 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -59,8 +59,8 @@
#include "base.h"
#include "reg.h"
#include "debug.h"
+#include "ani.h"
-static u8 ath5k_calinterval = 10; /* Calibrate PHY every 10 secs (TODO: Fixme) */
static int modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
@@ -199,7 +199,7 @@ static void __devexit ath5k_pci_remove(struct pci_dev *pdev);
static int ath5k_pci_suspend(struct device *dev);
static int ath5k_pci_resume(struct device *dev);
-SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
+static SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
#define ATH5K_PM_OPS (&ath5k_pm_ops)
#else
#define ATH5K_PM_OPS NULL
@@ -231,7 +231,7 @@ static void ath5k_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
static int ath5k_config(struct ieee80211_hw *hw, u32 changed);
static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
- int mc_count, struct dev_addr_list *mc_list);
+ struct netdev_hw_addr_list *mc_list);
static void ath5k_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *new_flags,
@@ -242,6 +242,8 @@ static int ath5k_set_key(struct ieee80211_hw *hw,
struct ieee80211_key_conf *key);
static int ath5k_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats);
+static int ath5k_get_survey(struct ieee80211_hw *hw,
+ int idx, struct survey_info *survey);
static u64 ath5k_get_tsf(struct ieee80211_hw *hw);
static void ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf);
static void ath5k_reset_tsf(struct ieee80211_hw *hw);
@@ -267,6 +269,7 @@ static const struct ieee80211_ops ath5k_hw_ops = {
.configure_filter = ath5k_configure_filter,
.set_key = ath5k_set_key,
.get_stats = ath5k_get_stats,
+ .get_survey = ath5k_get_survey,
.conf_tx = NULL,
.get_tsf = ath5k_get_tsf,
.set_tsf = ath5k_set_tsf,
@@ -308,7 +311,7 @@ static int ath5k_rxbuf_setup(struct ath5k_softc *sc,
struct ath5k_buf *bf);
static int ath5k_txbuf_setup(struct ath5k_softc *sc,
struct ath5k_buf *bf,
- struct ath5k_txq *txq);
+ struct ath5k_txq *txq, int padsize);
static inline void ath5k_txbuf_free(struct ath5k_softc *sc,
struct ath5k_buf *bf)
{
@@ -365,6 +368,7 @@ static void ath5k_beacon_send(struct ath5k_softc *sc);
static void ath5k_beacon_config(struct ath5k_softc *sc);
static void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
static void ath5k_tasklet_beacon(unsigned long data);
+static void ath5k_tasklet_ani(unsigned long data);
static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
{
@@ -544,8 +548,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
SET_IEEE80211_DEV(hw, &pdev->dev);
hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_NOISE_DBM;
+ IEEE80211_HW_SIGNAL_DBM;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
@@ -830,6 +833,7 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
tasklet_init(&sc->restq, ath5k_tasklet_reset, (unsigned long)sc);
tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc);
tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc);
+ tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc);
ret = ath5k_eeprom_read_mac(ah, mac);
if (ret) {
@@ -1138,8 +1142,6 @@ ath5k_mode_setup(struct ath5k_softc *sc)
struct ath5k_hw *ah = sc->ah;
u32 rfilt;
- ah->ah_op_mode = sc->opmode;
-
/* configure rx filter */
rfilt = sc->filter_flags;
ath5k_hw_set_rx_filter(ah, rfilt);
@@ -1148,8 +1150,9 @@ ath5k_mode_setup(struct ath5k_softc *sc)
ath5k_hw_set_bssid_mask(ah, sc->bssidmask);
/* configure operational mode */
- ath5k_hw_set_opmode(ah);
+ ath5k_hw_set_opmode(ah, sc->opmode);
+ ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "mode setup opmode %d\n", sc->opmode);
ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
}
@@ -1211,6 +1214,7 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
struct ath5k_hw *ah = sc->ah;
struct sk_buff *skb = bf->skb;
struct ath5k_desc *ds;
+ int ret;
if (!skb) {
skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr);
@@ -1237,9 +1241,9 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
ds = bf->desc;
ds->ds_link = bf->daddr; /* link to self */
ds->ds_data = bf->skbaddr;
- ah->ah_setup_rx_desc(ah, ds,
- skb_tailroom(skb), /* buffer size */
- 0);
+ ret = ah->ah_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
+ if (ret)
+ return ret;
if (sc->rxlink != NULL)
*sc->rxlink = bf->daddr;
@@ -1272,7 +1276,7 @@ static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
static int
ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
- struct ath5k_txq *txq)
+ struct ath5k_txq *txq, int padsize)
{
struct ath5k_hw *ah = sc->ah;
struct ath5k_desc *ds = bf->desc;
@@ -1324,7 +1328,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
sc->vif, pktlen, info));
}
ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
- ieee80211_get_hdrlen_from_skb(skb),
+ ieee80211_get_hdrlen_from_skb(skb), padsize,
get_hw_packet_type(skb),
(sc->power_level * 2),
hw_rate,
@@ -1636,7 +1640,6 @@ ath5k_txq_cleanup(struct ath5k_softc *sc)
sc->txqs[i].link);
}
}
- ieee80211_wake_queues(sc->hw); /* XXX move to callers */
for (i = 0; i < ARRAY_SIZE(sc->txqs); i++)
if (sc->txqs[i].setup)
@@ -1807,6 +1810,86 @@ ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb,
}
static void
+ath5k_update_beacon_rssi(struct ath5k_softc *sc, struct sk_buff *skb, int rssi)
+{
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+ struct ath5k_hw *ah = sc->ah;
+ struct ath_common *common = ath5k_hw_common(ah);
+
+ /* only beacons from our BSSID */
+ if (!ieee80211_is_beacon(mgmt->frame_control) ||
+ memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0)
+ return;
+
+ ah->ah_beacon_rssi_avg = ath5k_moving_average(ah->ah_beacon_rssi_avg,
+ rssi);
+
+ /* in IBSS mode we should keep RSSI statistics per neighbour */
+ /* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */
+}
+
+/*
+ * Compute padding position. skb must contains an IEEE 802.11 frame
+ */
+static int ath5k_common_padpos(struct sk_buff *skb)
+{
+ struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
+ __le16 frame_control = hdr->frame_control;
+ int padpos = 24;
+
+ if (ieee80211_has_a4(frame_control)) {
+ padpos += ETH_ALEN;
+ }
+ if (ieee80211_is_data_qos(frame_control)) {
+ padpos += IEEE80211_QOS_CTL_LEN;
+ }
+
+ return padpos;
+}
+
+/*
+ * This function expects a 802.11 frame and returns the number of
+ * bytes added, or -1 if we don't have enought header room.
+ */
+
+static int ath5k_add_padding(struct sk_buff *skb)
+{
+ int padpos = ath5k_common_padpos(skb);
+ int padsize = padpos & 3;
+
+ if (padsize && skb->len>padpos) {
+
+ if (skb_headroom(skb) < padsize)
+ return -1;
+
+ skb_push(skb, padsize);
+ memmove(skb->data, skb->data+padsize, padpos);
+ return padsize;
+ }
+
+ return 0;
+}
+
+/*
+ * This function expects a 802.11 frame and returns the number of
+ * bytes removed
+ */
+
+static int ath5k_remove_padding(struct sk_buff *skb)
+{
+ int padpos = ath5k_common_padpos(skb);
+ int padsize = padpos & 3;
+
+ if (padsize && skb->len>=padpos+padsize) {
+ memmove(skb->data + padsize, skb->data, padpos);
+ skb_pull(skb, padsize);
+ return padsize;
+ }
+
+ return 0;
+}
+
+static void
ath5k_tasklet_rx(unsigned long data)
{
struct ieee80211_rx_status *rxs;
@@ -1819,8 +1902,6 @@ ath5k_tasklet_rx(unsigned long data)
struct ath5k_buf *bf;
struct ath5k_desc *ds;
int ret;
- int hdrlen;
- int padsize;
int rx_flag;
spin_lock(&sc->rxbuflock);
@@ -1845,18 +1926,24 @@ ath5k_tasklet_rx(unsigned long data)
break;
else if (unlikely(ret)) {
ATH5K_ERR(sc, "error in processing rx descriptor\n");
+ sc->stats.rxerr_proc++;
spin_unlock(&sc->rxbuflock);
return;
}
- if (unlikely(rs.rs_more)) {
- ATH5K_WARN(sc, "unsupported jumbo\n");
- goto next;
- }
+ sc->stats.rx_all_count++;
if (unlikely(rs.rs_status)) {
- if (rs.rs_status & AR5K_RXERR_PHY)
+ if (rs.rs_status & AR5K_RXERR_CRC)
+ sc->stats.rxerr_crc++;
+ if (rs.rs_status & AR5K_RXERR_FIFO)
+ sc->stats.rxerr_fifo++;
+ if (rs.rs_status & AR5K_RXERR_PHY) {
+ sc->stats.rxerr_phy++;
+ if (rs.rs_phyerr > 0 && rs.rs_phyerr < 32)
+ sc->stats.rxerr_phy_code[rs.rs_phyerr]++;
goto next;
+ }
if (rs.rs_status & AR5K_RXERR_DECRYPT) {
/*
* Decrypt error. If the error occurred
@@ -1868,12 +1955,14 @@ ath5k_tasklet_rx(unsigned long data)
*
* XXX do key cache faulting
*/
+ sc->stats.rxerr_decrypt++;
if (rs.rs_keyix == AR5K_RXKEYIX_INVALID &&
!(rs.rs_status & AR5K_RXERR_CRC))
goto accept;
}
if (rs.rs_status & AR5K_RXERR_MIC) {
rx_flag |= RX_FLAG_MMIC_ERROR;
+ sc->stats.rxerr_mic++;
goto accept;
}
@@ -1883,6 +1972,12 @@ ath5k_tasklet_rx(unsigned long data)
sc->opmode != NL80211_IFTYPE_MONITOR)
goto next;
}
+
+ if (unlikely(rs.rs_more)) {
+ sc->stats.rxerr_jumbo++;
+ goto next;
+
+ }
accept:
next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
@@ -1905,12 +2000,8 @@ accept:
* bytes and we can optimize this a bit. In addition, we must
* not try to remove padding from short control frames that do
* not have payload. */
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- padsize = ath5k_pad_size(hdrlen);
- if (padsize) {
- memmove(skb->data + padsize, skb->data, hdrlen);
- skb_pull(skb, padsize);
- }
+ ath5k_remove_padding(skb);
+
rxs = IEEE80211_SKB_RXCB(skb);
/*
@@ -1939,10 +2030,15 @@ accept:
rxs->freq = sc->curchan->center_freq;
rxs->band = sc->curband->band;
- rxs->noise = sc->ah->ah_noise_floor;
- rxs->signal = rxs->noise + rs.rs_rssi;
+ rxs->signal = sc->ah->ah_noise_floor + rs.rs_rssi;
rxs->antenna = rs.rs_antenna;
+
+ if (rs.rs_antenna > 0 && rs.rs_antenna < 5)
+ sc->stats.antenna_rx[rs.rs_antenna]++;
+ else
+ sc->stats.antenna_rx[0]++; /* invalid */
+
rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate);
rxs->flag |= ath5k_rx_decrypted(sc, ds, skb, &rs);
@@ -1952,6 +2048,8 @@ accept:
ath5k_debug_dump_skb(sc, skb, "RX ", 0);
+ ath5k_update_beacon_rssi(sc, skb, rs.rs_rssi);
+
/* check beacons in IBSS mode */
if (sc->opmode == NL80211_IFTYPE_ADHOC)
ath5k_check_ibss_tsf(sc, skb, rxs);
@@ -1988,6 +2086,17 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
list_for_each_entry_safe(bf, bf0, &txq->q, list) {
ds = bf->desc;
+ /*
+ * It's possible that the hardware can say the buffer is
+ * completed when it hasn't yet loaded the ds_link from
+ * host memory and moved on. If there are more TX
+ * descriptors in the queue, wait for TXDP to change
+ * before processing this one.
+ */
+ if (ath5k_hw_get_txdp(sc->ah, txq->qnum) == bf->daddr &&
+ !list_is_last(&bf->list, &txq->q))
+ break;
+
ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
if (unlikely(ret == -EINPROGRESS))
break;
@@ -1997,6 +2106,7 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
break;
}
+ sc->stats.tx_all_count++;
skb = bf->skb;
info = IEEE80211_SKB_CB(skb);
bf->skb = NULL;
@@ -2022,14 +2132,31 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
info->status.rates[ts.ts_final_idx].count++;
if (unlikely(ts.ts_status)) {
- sc->ll_stats.dot11ACKFailureCount++;
- if (ts.ts_status & AR5K_TXERR_FILT)
+ sc->stats.ack_fail++;
+ if (ts.ts_status & AR5K_TXERR_FILT) {
info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+ sc->stats.txerr_filt++;
+ }
+ if (ts.ts_status & AR5K_TXERR_XRETRY)
+ sc->stats.txerr_retry++;
+ if (ts.ts_status & AR5K_TXERR_FIFO)
+ sc->stats.txerr_fifo++;
} else {
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ts.ts_rssi;
}
+ /*
+ * Remove MAC header padding before giving the frame
+ * back to mac80211.
+ */
+ ath5k_remove_padding(skb);
+
+ if (ts.ts_antenna > 0 && ts.ts_antenna < 5)
+ sc->stats.antenna_tx[ts.ts_antenna]++;
+ else
+ sc->stats.antenna_tx[0]++; /* invalid */
+
ieee80211_tx_status(sc->hw, skb);
spin_lock(&sc->txbuflock);
@@ -2073,6 +2200,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
int ret = 0;
u8 antenna;
u32 flags;
+ const int padsize = 0;
bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
@@ -2120,7 +2248,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
* from tx power (value is in dB units already) */
ds->ds_data = bf->skbaddr;
ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
- ieee80211_get_hdrlen_from_skb(skb),
+ ieee80211_get_hdrlen_from_skb(skb), padsize,
AR5K_PKT_TYPE_BEACON, (sc->power_level * 2),
ieee80211_get_tx_rate(sc->hw, info)->hw_value,
1, AR5K_TXKEYIX_INVALID,
@@ -2407,9 +2535,6 @@ ath5k_init(struct ath5k_softc *sc)
*/
ath5k_stop_locked(sc);
- /* Set PHY calibration interval */
- ah->ah_cal_intval = ath5k_calinterval;
-
/*
* The basic interface to setting the hardware in a good
* state is ``reset''. On return the hardware is known to
@@ -2421,7 +2546,8 @@ ath5k_init(struct ath5k_softc *sc)
sc->curband = &sc->sbands[sc->curchan->band];
sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
- AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_SWI;
+ AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
+
ret = ath5k_reset(sc, NULL);
if (ret)
goto done;
@@ -2435,8 +2561,7 @@ ath5k_init(struct ath5k_softc *sc)
for (i = 0; i < AR5K_KEYTABLE_SIZE; i++)
ath5k_hw_reset_key(ah, i);
- /* Set ack to be sent at low bit-rates */
- ath5k_hw_set_ack_bitrate_high(ah, false);
+ ath5k_hw_set_ack_bitrate_high(ah, true);
ret = 0;
done:
mmiowb();
@@ -2533,12 +2658,33 @@ ath5k_stop_hw(struct ath5k_softc *sc)
tasklet_kill(&sc->restq);
tasklet_kill(&sc->calib);
tasklet_kill(&sc->beacontq);
+ tasklet_kill(&sc->ani_tasklet);
ath5k_rfkill_hw_stop(sc->ah);
return ret;
}
+static void
+ath5k_intr_calibration_poll(struct ath5k_hw *ah)
+{
+ if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
+ !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) {
+ /* run ANI only when full calibration is not active */
+ ah->ah_cal_next_ani = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
+ tasklet_schedule(&ah->ah_sc->ani_tasklet);
+
+ } else if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
+ ah->ah_cal_next_full = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
+ tasklet_schedule(&ah->ah_sc->calib);
+ }
+ /* we could use SWI to generate enough interrupts to meet our
+ * calibration interval requirements, if necessary:
+ * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */
+}
+
static irqreturn_t
ath5k_intr(int irq, void *dev_id)
{
@@ -2562,7 +2708,20 @@ ath5k_intr(int irq, void *dev_id)
*/
tasklet_schedule(&sc->restq);
} else if (unlikely(status & AR5K_INT_RXORN)) {
- tasklet_schedule(&sc->restq);
+ /*
+ * Receive buffers are full. Either the bus is busy or
+ * the CPU is not fast enough to process all received
+ * frames.
+ * Older chipsets need a reset to come out of this
+ * condition, but we treat it as RX for newer chips.
+ * We don't know exactly which versions need a reset -
+ * this guess is copied from the HAL.
+ */
+ sc->stats.rxorn_intr++;
+ if (ah->ah_mac_srev < AR5K_SREV_AR5212)
+ tasklet_schedule(&sc->restq);
+ else
+ tasklet_schedule(&sc->rxtq);
} else {
if (status & AR5K_INT_SWBA) {
tasklet_hi_schedule(&sc->beacontq);
@@ -2587,15 +2746,10 @@ ath5k_intr(int irq, void *dev_id)
if (status & AR5K_INT_BMISS) {
/* TODO */
}
- if (status & AR5K_INT_SWI) {
- tasklet_schedule(&sc->calib);
- }
if (status & AR5K_INT_MIB) {
- /*
- * These stats are also used for ANI i think
- * so how about updating them more often ?
- */
- ath5k_hw_update_mib_counters(ah, &sc->ll_stats);
+ sc->stats.mib_intr++;
+ ath5k_hw_update_mib_counters(ah);
+ ath5k_ani_mib_intr(ah);
}
if (status & AR5K_INT_GPIO)
tasklet_schedule(&sc->rf_kill.toggleq);
@@ -2606,7 +2760,7 @@ ath5k_intr(int irq, void *dev_id)
if (unlikely(!counter))
ATH5K_WARN(sc, "too many interrupts, giving up for now\n");
- ath5k_hw_calibration_poll(ah);
+ ath5k_intr_calibration_poll(ah);
return IRQ_HANDLED;
}
@@ -2630,8 +2784,7 @@ ath5k_tasklet_calibrate(unsigned long data)
struct ath5k_hw *ah = sc->ah;
/* Only full calibration for now */
- if (ah->ah_swi_mask != AR5K_SWI_FULL_CALIBRATION)
- return;
+ ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
/* Stop queues so that calibration
* doesn't interfere with tx */
@@ -2647,18 +2800,29 @@ ath5k_tasklet_calibrate(unsigned long data)
* to load new gain values.
*/
ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n");
- ath5k_reset_wake(sc);
+ ath5k_reset(sc, sc->curchan);
}
if (ath5k_hw_phy_calibrate(ah, sc->curchan))
ATH5K_ERR(sc, "calibration of channel %u failed\n",
ieee80211_frequency_to_channel(
sc->curchan->center_freq));
- ah->ah_swi_mask = 0;
-
/* Wake queues */
ieee80211_wake_queues(sc->hw);
+ ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
+}
+
+
+static void
+ath5k_tasklet_ani(unsigned long data)
+{
+ struct ath5k_softc *sc = (void *)data;
+ struct ath5k_hw *ah = sc->ah;
+
+ ah->ah_cal_mask |= AR5K_CALIBRATION_ANI;
+ ath5k_ani_calibration(ah);
+ ah->ah_cal_mask &= ~AR5K_CALIBRATION_ANI;
}
@@ -2680,7 +2844,6 @@ static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath5k_softc *sc = hw->priv;
struct ath5k_buf *bf;
unsigned long flags;
- int hdrlen;
int padsize;
ath5k_debug_dump_skb(sc, skb, "TX ", 1);
@@ -2692,17 +2855,11 @@ static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
* the hardware expects the header padded to 4 byte boundaries
* if this is not the case we add the padding after the header
*/
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- padsize = ath5k_pad_size(hdrlen);
- if (padsize) {
-
- if (skb_headroom(skb) < padsize) {
- ATH5K_ERR(sc, "tx hdrlen not %%4: %d not enough"
- " headroom to pad %d\n", hdrlen, padsize);
- goto drop_packet;
- }
- skb_push(skb, padsize);
- memmove(skb->data, skb->data+padsize, hdrlen);
+ padsize = ath5k_add_padding(skb);
+ if (padsize < 0) {
+ ATH5K_ERR(sc, "tx hdrlen not %%4: not enough"
+ " headroom to pad");
+ goto drop_packet;
}
spin_lock_irqsave(&sc->txbuflock, flags);
@@ -2721,7 +2878,7 @@ static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
bf->skb = skb;
- if (ath5k_txbuf_setup(sc, bf, txq)) {
+ if (ath5k_txbuf_setup(sc, bf, txq, padsize)) {
bf->skb = NULL;
spin_lock_irqsave(&sc->txbuflock, flags);
list_add_tail(&bf->list, &sc->txbuf);
@@ -2768,6 +2925,8 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan)
goto err;
}
+ ath5k_ani_init(ah, ah->ah_sc->ani_state.ani_mode);
+
/*
* Change channels and update the h/w rate map if we're switching;
* e.g. 11a to 11b/g.
@@ -2836,6 +2995,8 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
goto end;
}
+ ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "add interface mode %d\n", sc->opmode);
+
ath5k_hw_set_lladdr(sc->ah, vif->addr);
ath5k_mode_setup(sc);
@@ -2906,7 +3067,7 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
* then we must allow the user to set how many tx antennas we
* have available
*/
- ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT);
+ ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
unlock:
mutex_unlock(&sc->lock);
@@ -2914,22 +3075,20 @@ unlock:
}
static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
- int mc_count, struct dev_addr_list *mclist)
+ struct netdev_hw_addr_list *mc_list)
{
u32 mfilt[2], val;
- int i;
u8 pos;
+ struct netdev_hw_addr *ha;
mfilt[0] = 0;
mfilt[1] = 1;
- for (i = 0; i < mc_count; i++) {
- if (!mclist)
- break;
+ netdev_hw_addr_list_for_each(ha, mc_list) {
/* calculate XOR of eight 6-bit values */
- val = get_unaligned_le32(mclist->dmi_addr + 0);
+ val = get_unaligned_le32(ha->addr + 0);
pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
- val = get_unaligned_le32(mclist->dmi_addr + 3);
+ val = get_unaligned_le32(ha->addr + 3);
pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
pos &= 0x3f;
mfilt[pos / 32] |= (1 << (pos % 32));
@@ -2937,8 +3096,7 @@ static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
* but not sure, needs testing, if we do use this we'd
* neet to inform below to not reset the mcast */
/* ath5k_hw_set_mcast_filterindex(ah,
- * mclist->dmi_addr[5]); */
- mclist = mclist->next;
+ * ha->addr[5]); */
}
return ((u64)(mfilt[1]) << 32) | mfilt[0];
@@ -3124,12 +3282,30 @@ ath5k_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
struct ath5k_softc *sc = hw->priv;
- struct ath5k_hw *ah = sc->ah;
/* Force update */
- ath5k_hw_update_mib_counters(ah, &sc->ll_stats);
+ ath5k_hw_update_mib_counters(sc->ah);
+
+ stats->dot11ACKFailureCount = sc->stats.ack_fail;
+ stats->dot11RTSFailureCount = sc->stats.rts_fail;
+ stats->dot11RTSSuccessCount = sc->stats.rts_ok;
+ stats->dot11FCSErrorCount = sc->stats.fcs_error;
+
+ return 0;
+}
+
+static int ath5k_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct ath5k_softc *sc = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+
+ if (idx != 0)
+ return -ENOENT;
- memcpy(stats, &sc->ll_stats, sizeof(sc->ll_stats));
+ survey->channel = conf->channel;
+ survey->filled = SURVEY_INFO_NOISE_DBM;
+ survey->noise = sc->ah->ah_noise_floor;
return 0;
}
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index 7e1a88a5abd..56221bc7c8c 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -50,6 +50,7 @@
#include "ath5k.h"
#include "debug.h"
+#include "ani.h"
#include "../regd.h"
#include "../ath.h"
@@ -105,6 +106,38 @@ struct ath5k_rfkill {
struct tasklet_struct toggleq;
};
+/* statistics */
+struct ath5k_statistics {
+ /* antenna use */
+ unsigned int antenna_rx[5]; /* frames count per antenna RX */
+ unsigned int antenna_tx[5]; /* frames count per antenna TX */
+
+ /* frame errors */
+ unsigned int rx_all_count; /* all RX frames, including errors */
+ unsigned int tx_all_count; /* all TX frames, including errors */
+ unsigned int rxerr_crc;
+ unsigned int rxerr_phy;
+ unsigned int rxerr_phy_code[32];
+ unsigned int rxerr_fifo;
+ unsigned int rxerr_decrypt;
+ unsigned int rxerr_mic;
+ unsigned int rxerr_proc;
+ unsigned int rxerr_jumbo;
+ unsigned int txerr_retry;
+ unsigned int txerr_fifo;
+ unsigned int txerr_filt;
+
+ /* MIB counters */
+ unsigned int ack_fail;
+ unsigned int rts_fail;
+ unsigned int rts_ok;
+ unsigned int fcs_error;
+ unsigned int beacons;
+
+ unsigned int mib_intr;
+ unsigned int rxorn_intr;
+};
+
#if CHAN_DEBUG
#define ATH_CHAN_MAX (26+26+26+200+200)
#else
@@ -117,7 +150,6 @@ struct ath5k_softc {
struct pci_dev *pdev; /* for dma mapping */
void __iomem *iobase; /* address of the device */
struct mutex lock; /* dev-level lock */
- struct ieee80211_low_level_stats ll_stats;
struct ieee80211_hw *hw; /* IEEE 802.11 common */
struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
struct ieee80211_channel channels[ATH_CHAN_MAX];
@@ -191,6 +223,11 @@ struct ath5k_softc {
int power_level; /* Requested tx power in dbm */
bool assoc; /* associate state */
bool enable_beacon; /* true if beacons are on */
+
+ struct ath5k_statistics stats;
+
+ struct ath5k_ani_state ani_state;
+ struct tasklet_struct ani_tasklet; /* ANI calibration */
};
#define ath5k_hw_hasbssidmask(_ah) \
diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c
index 367a6c7d3cc..74f007126f4 100644
--- a/drivers/net/wireless/ath/ath5k/caps.c
+++ b/drivers/net/wireless/ath/ath5k/caps.c
@@ -102,9 +102,6 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
}
}
- /* GPIO */
- ah->ah_gpio_npins = AR5K_NUM_GPIO;
-
/* Set number of supported TX queues */
if (ah->ah_version == AR5K_AR5210)
ah->ah_capabilities.cap_queues.q_tx_num =
@@ -112,6 +109,12 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
else
ah->ah_capabilities.cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
+ /* newer hardware has PHY error counters */
+ if (ah->ah_mac_srev >= AR5K_SREV_AR5213A)
+ ah->ah_capabilities.cap_has_phyerr_counters = true;
+ else
+ ah->ah_capabilities.cap_has_phyerr_counters = false;
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 747508c15d3..6fb5c5ffa5b 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -69,6 +69,7 @@ module_param_named(debug, ath5k_debug, uint, 0);
#include <linux/seq_file.h>
#include "reg.h"
+#include "ani.h"
static struct dentry *ath5k_global_debugfs;
@@ -307,6 +308,7 @@ static const struct {
{ ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" },
{ ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" },
{ ATH5K_DEBUG_TRACE, "trace", "trace function calls" },
+ { ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" },
{ ATH5K_DEBUG_ANY, "all", "show all debug levels" },
};
@@ -364,6 +366,369 @@ static const struct file_operations fops_debug = {
};
+/* debugfs: antenna */
+
+static ssize_t read_file_antenna(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_softc *sc = file->private_data;
+ char buf[700];
+ unsigned int len = 0;
+ unsigned int i;
+ unsigned int v;
+
+ len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
+ sc->ah->ah_ant_mode);
+ len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
+ sc->ah->ah_def_ant);
+ len += snprintf(buf+len, sizeof(buf)-len, "tx antenna\t%d\n",
+ sc->ah->ah_tx_ant);
+
+ len += snprintf(buf+len, sizeof(buf)-len, "\nANTENNA\t\tRX\tTX\n");
+ for (i = 1; i < ARRAY_SIZE(sc->stats.antenna_rx); i++) {
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "[antenna %d]\t%d\t%d\n",
+ i, sc->stats.antenna_rx[i], sc->stats.antenna_tx[i]);
+ }
+ len += snprintf(buf+len, sizeof(buf)-len, "[invalid]\t%d\t%d\n",
+ sc->stats.antenna_rx[0], sc->stats.antenna_tx[0]);
+
+ v = ath5k_hw_reg_read(sc->ah, AR5K_DEFAULT_ANTENNA);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "\nAR5K_DEFAULT_ANTENNA\t0x%08x\n", v);
+
+ v = ath5k_hw_reg_read(sc->ah, AR5K_STA_ID1);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_STA_ID1_DEFAULT_ANTENNA\t%d\n",
+ (v & AR5K_STA_ID1_DEFAULT_ANTENNA) != 0);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_STA_ID1_DESC_ANTENNA\t%d\n",
+ (v & AR5K_STA_ID1_DESC_ANTENNA) != 0);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_STA_ID1_RTS_DEF_ANTENNA\t%d\n",
+ (v & AR5K_STA_ID1_RTS_DEF_ANTENNA) != 0);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_STA_ID1_SELFGEN_DEF_ANT\t%d\n",
+ (v & AR5K_STA_ID1_SELFGEN_DEF_ANT) != 0);
+
+ v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_AGCCTL);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "\nAR5K_PHY_AGCCTL_OFDM_DIV_DIS\t%d\n",
+ (v & AR5K_PHY_AGCCTL_OFDM_DIV_DIS) != 0);
+
+ v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_RESTART);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_PHY_RESTART_DIV_GC\t\t%x\n",
+ (v & AR5K_PHY_RESTART_DIV_GC) >> AR5K_PHY_RESTART_DIV_GC_S);
+
+ v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_FAST_ANT_DIV);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_PHY_FAST_ANT_DIV_EN\t%d\n",
+ (v & AR5K_PHY_FAST_ANT_DIV_EN) != 0);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_antenna(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_softc *sc = file->private_data;
+ unsigned int i;
+ char buf[20];
+
+ if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
+ return -EFAULT;
+
+ if (strncmp(buf, "diversity", 9) == 0) {
+ ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_DEFAULT);
+ printk(KERN_INFO "ath5k debug: enable diversity\n");
+ } else if (strncmp(buf, "fixed-a", 7) == 0) {
+ ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_A);
+ printk(KERN_INFO "ath5k debugfs: fixed antenna A\n");
+ } else if (strncmp(buf, "fixed-b", 7) == 0) {
+ ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_B);
+ printk(KERN_INFO "ath5k debug: fixed antenna B\n");
+ } else if (strncmp(buf, "clear", 5) == 0) {
+ for (i = 0; i < ARRAY_SIZE(sc->stats.antenna_rx); i++) {
+ sc->stats.antenna_rx[i] = 0;
+ sc->stats.antenna_tx[i] = 0;
+ }
+ printk(KERN_INFO "ath5k debug: cleared antenna stats\n");
+ }
+ return count;
+}
+
+static const struct file_operations fops_antenna = {
+ .read = read_file_antenna,
+ .write = write_file_antenna,
+ .open = ath5k_debugfs_open,
+ .owner = THIS_MODULE,
+};
+
+
+/* debugfs: frameerrors */
+
+static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_softc *sc = file->private_data;
+ struct ath5k_statistics *st = &sc->stats;
+ char buf[700];
+ unsigned int len = 0;
+ int i;
+
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "RX\n---------------------\n");
+ len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%d\t(%d%%)\n",
+ st->rxerr_crc,
+ st->rx_all_count > 0 ?
+ st->rxerr_crc*100/st->rx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "PHY\t%d\t(%d%%)\n",
+ st->rxerr_phy,
+ st->rx_all_count > 0 ?
+ st->rxerr_phy*100/st->rx_all_count : 0);
+ for (i = 0; i < 32; i++) {
+ if (st->rxerr_phy_code[i])
+ len += snprintf(buf+len, sizeof(buf)-len,
+ " phy_err[%d]\t%d\n",
+ i, st->rxerr_phy_code[i]);
+ }
+
+ len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%d\t(%d%%)\n",
+ st->rxerr_fifo,
+ st->rx_all_count > 0 ?
+ st->rxerr_fifo*100/st->rx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "decrypt\t%d\t(%d%%)\n",
+ st->rxerr_decrypt,
+ st->rx_all_count > 0 ?
+ st->rxerr_decrypt*100/st->rx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "MIC\t%d\t(%d%%)\n",
+ st->rxerr_mic,
+ st->rx_all_count > 0 ?
+ st->rxerr_mic*100/st->rx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "process\t%d\t(%d%%)\n",
+ st->rxerr_proc,
+ st->rx_all_count > 0 ?
+ st->rxerr_proc*100/st->rx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "jumbo\t%d\t(%d%%)\n",
+ st->rxerr_jumbo,
+ st->rx_all_count > 0 ?
+ st->rxerr_jumbo*100/st->rx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "[RX all\t%d]\n",
+ st->rx_all_count);
+
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "\nTX\n---------------------\n");
+ len += snprintf(buf+len, sizeof(buf)-len, "retry\t%d\t(%d%%)\n",
+ st->txerr_retry,
+ st->tx_all_count > 0 ?
+ st->txerr_retry*100/st->tx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%d\t(%d%%)\n",
+ st->txerr_fifo,
+ st->tx_all_count > 0 ?
+ st->txerr_fifo*100/st->tx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "filter\t%d\t(%d%%)\n",
+ st->txerr_filt,
+ st->tx_all_count > 0 ?
+ st->txerr_filt*100/st->tx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "[TX all\t%d]\n",
+ st->tx_all_count);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_frameerrors(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_softc *sc = file->private_data;
+ struct ath5k_statistics *st = &sc->stats;
+ char buf[20];
+
+ if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
+ return -EFAULT;
+
+ if (strncmp(buf, "clear", 5) == 0) {
+ st->rxerr_crc = 0;
+ st->rxerr_phy = 0;
+ st->rxerr_fifo = 0;
+ st->rxerr_decrypt = 0;
+ st->rxerr_mic = 0;
+ st->rxerr_proc = 0;
+ st->rxerr_jumbo = 0;
+ st->rx_all_count = 0;
+ st->txerr_retry = 0;
+ st->txerr_fifo = 0;
+ st->txerr_filt = 0;
+ st->tx_all_count = 0;
+ printk(KERN_INFO "ath5k debug: cleared frameerrors stats\n");
+ }
+ return count;
+}
+
+static const struct file_operations fops_frameerrors = {
+ .read = read_file_frameerrors,
+ .write = write_file_frameerrors,
+ .open = ath5k_debugfs_open,
+ .owner = THIS_MODULE,
+};
+
+
+/* debugfs: ani */
+
+static ssize_t read_file_ani(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_softc *sc = file->private_data;
+ struct ath5k_statistics *st = &sc->stats;
+ struct ath5k_ani_state *as = &sc->ani_state;
+
+ char buf[700];
+ unsigned int len = 0;
+
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "HW has PHY error counters:\t%s\n",
+ sc->ah->ah_capabilities.cap_has_phyerr_counters ?
+ "yes" : "no");
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "HW max spur immunity level:\t%d\n",
+ as->max_spur_level);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "\nANI state\n--------------------------------------------\n");
+ len += snprintf(buf+len, sizeof(buf)-len, "operating mode:\t\t\t");
+ switch (as->ani_mode) {
+ case ATH5K_ANI_MODE_OFF:
+ len += snprintf(buf+len, sizeof(buf)-len, "OFF\n");
+ break;
+ case ATH5K_ANI_MODE_MANUAL_LOW:
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "MANUAL LOW\n");
+ break;
+ case ATH5K_ANI_MODE_MANUAL_HIGH:
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "MANUAL HIGH\n");
+ break;
+ case ATH5K_ANI_MODE_AUTO:
+ len += snprintf(buf+len, sizeof(buf)-len, "AUTO\n");
+ break;
+ default:
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "??? (not good)\n");
+ break;
+ }
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "noise immunity level:\t\t%d\n",
+ as->noise_imm_level);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "spur immunity level:\t\t%d\n",
+ as->spur_level);
+ len += snprintf(buf+len, sizeof(buf)-len, "firstep level:\t\t\t%d\n",
+ as->firstep_level);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "OFDM weak signal detection:\t%s\n",
+ as->ofdm_weak_sig ? "on" : "off");
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "CCK weak signal detection:\t%s\n",
+ as->cck_weak_sig ? "on" : "off");
+
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "\nMIB INTERRUPTS:\t\t%u\n",
+ st->mib_intr);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "beacon RSSI average:\t%d\n",
+ sc->ah->ah_beacon_rssi_avg.avg);
+ len += snprintf(buf+len, sizeof(buf)-len, "profcnt tx\t\t%u\t(%d%%)\n",
+ as->pfc_tx,
+ as->pfc_cycles > 0 ?
+ as->pfc_tx*100/as->pfc_cycles : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "profcnt rx\t\t%u\t(%d%%)\n",
+ as->pfc_rx,
+ as->pfc_cycles > 0 ?
+ as->pfc_rx*100/as->pfc_cycles : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "profcnt busy\t\t%u\t(%d%%)\n",
+ as->pfc_busy,
+ as->pfc_cycles > 0 ?
+ as->pfc_busy*100/as->pfc_cycles : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "profcnt cycles\t\t%u\n",
+ as->pfc_cycles);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "listen time\t\t%d\tlast: %d\n",
+ as->listen_time, as->last_listen);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "OFDM errors\t\t%u\tlast: %u\tsum: %u\n",
+ as->ofdm_errors, as->last_ofdm_errors,
+ as->sum_ofdm_errors);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "CCK errors\t\t%u\tlast: %u\tsum: %u\n",
+ as->cck_errors, as->last_cck_errors,
+ as->sum_cck_errors);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_PHYERR_CNT1\t%x\t(=%d)\n",
+ ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT1),
+ ATH5K_ANI_OFDM_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
+ ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT1)));
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_PHYERR_CNT2\t%x\t(=%d)\n",
+ ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT2),
+ ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
+ ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT2)));
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_ani(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_softc *sc = file->private_data;
+ char buf[20];
+
+ if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
+ return -EFAULT;
+
+ if (strncmp(buf, "sens-low", 8) == 0) {
+ ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_MANUAL_HIGH);
+ } else if (strncmp(buf, "sens-high", 9) == 0) {
+ ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_MANUAL_LOW);
+ } else if (strncmp(buf, "ani-off", 7) == 0) {
+ ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_OFF);
+ } else if (strncmp(buf, "ani-on", 6) == 0) {
+ ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_AUTO);
+ } else if (strncmp(buf, "noise-low", 9) == 0) {
+ ath5k_ani_set_noise_immunity_level(sc->ah, 0);
+ } else if (strncmp(buf, "noise-high", 10) == 0) {
+ ath5k_ani_set_noise_immunity_level(sc->ah,
+ ATH5K_ANI_MAX_NOISE_IMM_LVL);
+ } else if (strncmp(buf, "spur-low", 8) == 0) {
+ ath5k_ani_set_spur_immunity_level(sc->ah, 0);
+ } else if (strncmp(buf, "spur-high", 9) == 0) {
+ ath5k_ani_set_spur_immunity_level(sc->ah,
+ sc->ani_state.max_spur_level);
+ } else if (strncmp(buf, "fir-low", 7) == 0) {
+ ath5k_ani_set_firstep_level(sc->ah, 0);
+ } else if (strncmp(buf, "fir-high", 8) == 0) {
+ ath5k_ani_set_firstep_level(sc->ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
+ } else if (strncmp(buf, "ofdm-off", 8) == 0) {
+ ath5k_ani_set_ofdm_weak_signal_detection(sc->ah, false);
+ } else if (strncmp(buf, "ofdm-on", 7) == 0) {
+ ath5k_ani_set_ofdm_weak_signal_detection(sc->ah, true);
+ } else if (strncmp(buf, "cck-off", 7) == 0) {
+ ath5k_ani_set_cck_weak_signal_detection(sc->ah, false);
+ } else if (strncmp(buf, "cck-on", 6) == 0) {
+ ath5k_ani_set_cck_weak_signal_detection(sc->ah, true);
+ }
+ return count;
+}
+
+static const struct file_operations fops_ani = {
+ .read = read_file_ani,
+ .write = write_file_ani,
+ .open = ath5k_debugfs_open,
+ .owner = THIS_MODULE,
+};
+
+
/* init */
void
@@ -393,6 +758,20 @@ ath5k_debug_init_device(struct ath5k_softc *sc)
sc->debug.debugfs_reset = debugfs_create_file("reset", S_IWUSR,
sc->debug.debugfs_phydir, sc, &fops_reset);
+
+ sc->debug.debugfs_antenna = debugfs_create_file("antenna",
+ S_IWUSR | S_IRUSR,
+ sc->debug.debugfs_phydir, sc, &fops_antenna);
+
+ sc->debug.debugfs_frameerrors = debugfs_create_file("frameerrors",
+ S_IWUSR | S_IRUSR,
+ sc->debug.debugfs_phydir, sc,
+ &fops_frameerrors);
+
+ sc->debug.debugfs_ani = debugfs_create_file("ani",
+ S_IWUSR | S_IRUSR,
+ sc->debug.debugfs_phydir, sc,
+ &fops_ani);
}
void
@@ -408,6 +787,9 @@ ath5k_debug_finish_device(struct ath5k_softc *sc)
debugfs_remove(sc->debug.debugfs_registers);
debugfs_remove(sc->debug.debugfs_beacon);
debugfs_remove(sc->debug.debugfs_reset);
+ debugfs_remove(sc->debug.debugfs_antenna);
+ debugfs_remove(sc->debug.debugfs_frameerrors);
+ debugfs_remove(sc->debug.debugfs_ani);
debugfs_remove(sc->debug.debugfs_phydir);
}
diff --git a/drivers/net/wireless/ath/ath5k/debug.h b/drivers/net/wireless/ath/ath5k/debug.h
index 66f69f04e55..ddd5b3a99e8 100644
--- a/drivers/net/wireless/ath/ath5k/debug.h
+++ b/drivers/net/wireless/ath/ath5k/debug.h
@@ -74,6 +74,9 @@ struct ath5k_dbg_info {
struct dentry *debugfs_registers;
struct dentry *debugfs_beacon;
struct dentry *debugfs_reset;
+ struct dentry *debugfs_antenna;
+ struct dentry *debugfs_frameerrors;
+ struct dentry *debugfs_ani;
};
/**
@@ -113,6 +116,7 @@ enum ath5k_debug_level {
ATH5K_DEBUG_DUMP_TX = 0x00000200,
ATH5K_DEBUG_DUMPBANDS = 0x00000400,
ATH5K_DEBUG_TRACE = 0x00001000,
+ ATH5K_DEBUG_ANI = 0x00002000,
ATH5K_DEBUG_ANY = 0xffffffff
};
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index dc30a2b70a6..7d7b646ab65 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -35,7 +35,8 @@
*/
static int
ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
- unsigned int pkt_len, unsigned int hdr_len, enum ath5k_pkt_type type,
+ unsigned int pkt_len, unsigned int hdr_len, int padsize,
+ enum ath5k_pkt_type type,
unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0,
unsigned int key_index, unsigned int antenna_mode, unsigned int flags,
unsigned int rtscts_rate, unsigned int rtscts_duration)
@@ -71,7 +72,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
/* Verify and set frame length */
/* remove padding we might have added before */
- frame_len = pkt_len - ath5k_pad_size(hdr_len) + FCS_LEN;
+ frame_len = pkt_len - padsize + FCS_LEN;
if (frame_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN)
return -EINVAL;
@@ -100,7 +101,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN);
}
- /*Diferences between 5210-5211*/
+ /*Differences between 5210-5211*/
if (ah->ah_version == AR5K_AR5210) {
switch (type) {
case AR5K_PKT_TYPE_BEACON:
@@ -165,6 +166,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
*/
static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len,
+ int padsize,
enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0,
unsigned int tx_tries0, unsigned int key_index,
unsigned int antenna_mode, unsigned int flags,
@@ -206,7 +208,7 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
/* Verify and set frame length */
/* remove padding we might have added before */
- frame_len = pkt_len - ath5k_pad_size(hdr_len) + FCS_LEN;
+ frame_len = pkt_len - padsize + FCS_LEN;
if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN)
return -EINVAL;
@@ -229,7 +231,7 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
AR5K_REG_SM(antenna_mode, AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT);
tx_ctl->tx_control_1 |= AR5K_REG_SM(type,
AR5K_4W_TX_DESC_CTL1_FRAME_TYPE);
- tx_ctl->tx_control_2 = AR5K_REG_SM(tx_tries0 + AR5K_TUNE_HWTXTRIES,
+ tx_ctl->tx_control_2 = AR5K_REG_SM(tx_tries0,
AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0);
tx_ctl->tx_control_3 = tx_rate0 & AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
@@ -643,6 +645,7 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
rs->rs_status |= AR5K_RXERR_PHY;
rs->rs_phyerr |= AR5K_REG_MS(rx_err->rx_error_1,
AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE);
+ ath5k_ani_phy_error_report(ah, rs->rs_phyerr);
}
if (rx_status->rx_status_1 &
@@ -668,12 +671,6 @@ int ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
ah->ah_version != AR5K_AR5212)
return -ENOTSUPP;
- /* XXX: What is this magic value and where is it used ? */
- if (ah->ah_version == AR5K_AR5212)
- ah->ah_magic = AR5K_EEPROM_MAGIC_5212;
- else if (ah->ah_version == AR5K_AR5211)
- ah->ah_magic = AR5K_EEPROM_MAGIC_5211;
-
if (ah->ah_version == AR5K_AR5212) {
ah->ah_setup_rx_desc = ath5k_hw_setup_rx_desc;
ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;
diff --git a/drivers/net/wireless/ath/ath5k/desc.h b/drivers/net/wireless/ath/ath5k/desc.h
index 56158c804e3..64538fbe416 100644
--- a/drivers/net/wireless/ath/ath5k/desc.h
+++ b/drivers/net/wireless/ath/ath5k/desc.h
@@ -112,15 +112,32 @@ struct ath5k_hw_rx_error {
#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE 0x0000ff00
#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE_S 8
-/* PHY Error codes */
-#define AR5K_DESC_RX_PHY_ERROR_NONE 0x00
-#define AR5K_DESC_RX_PHY_ERROR_TIMING 0x20
-#define AR5K_DESC_RX_PHY_ERROR_PARITY 0x40
-#define AR5K_DESC_RX_PHY_ERROR_RATE 0x60
-#define AR5K_DESC_RX_PHY_ERROR_LENGTH 0x80
-#define AR5K_DESC_RX_PHY_ERROR_64QAM 0xa0
-#define AR5K_DESC_RX_PHY_ERROR_SERVICE 0xc0
-#define AR5K_DESC_RX_PHY_ERROR_TRANSMITOVR 0xe0
+/**
+ * enum ath5k_phy_error_code - PHY Error codes
+ */
+enum ath5k_phy_error_code {
+ AR5K_RX_PHY_ERROR_UNDERRUN = 0, /* Transmit underrun */
+ AR5K_RX_PHY_ERROR_TIMING = 1, /* Timing error */
+ AR5K_RX_PHY_ERROR_PARITY = 2, /* Illegal parity */
+ AR5K_RX_PHY_ERROR_RATE = 3, /* Illegal rate */
+ AR5K_RX_PHY_ERROR_LENGTH = 4, /* Illegal length */
+ AR5K_RX_PHY_ERROR_RADAR = 5, /* Radar detect */
+ AR5K_RX_PHY_ERROR_SERVICE = 6, /* Illegal service */
+ AR5K_RX_PHY_ERROR_TOR = 7, /* Transmit override receive */
+ /* these are specific to the 5212 */
+ AR5K_RX_PHY_ERROR_OFDM_TIMING = 17,
+ AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY = 18,
+ AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL = 19,
+ AR5K_RX_PHY_ERROR_OFDM_LENGTH_ILLEGAL = 20,
+ AR5K_RX_PHY_ERROR_OFDM_POWER_DROP = 21,
+ AR5K_RX_PHY_ERROR_OFDM_SERVICE = 22,
+ AR5K_RX_PHY_ERROR_OFDM_RESTART = 23,
+ AR5K_RX_PHY_ERROR_CCK_TIMING = 25,
+ AR5K_RX_PHY_ERROR_CCK_HEADER_CRC = 26,
+ AR5K_RX_PHY_ERROR_CCK_RATE_ILLEGAL = 27,
+ AR5K_RX_PHY_ERROR_CCK_SERVICE = 30,
+ AR5K_RX_PHY_ERROR_CCK_RESTART = 31,
+};
/*
* 5210/5211 hardware 2-word TX control descriptor
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 67665cdc7af..ed0263672d6 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -331,7 +331,8 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
ee->ee_x_gain[mode] = (val >> 1) & 0xf;
ee->ee_xpd[mode] = val & 0x1;
- if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0)
+ if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0 &&
+ mode != AR5K_EEPROM_MODE_11B)
ee->ee_fixed_bias[mode] = (val >> 13) & 0x1;
if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_3) {
@@ -341,6 +342,7 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
if (mode == AR5K_EEPROM_MODE_11A)
ee->ee_xr_power[mode] = val & 0x3f;
else {
+ /* b_DB_11[bg] and b_OB_11[bg] */
ee->ee_ob[mode][0] = val & 0x7;
ee->ee_db[mode][0] = (val >> 3) & 0x7;
}
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
index 473a483bb9c..c4a6d5f26af 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -24,9 +24,6 @@
* SERDES infos are present */
#define AR5K_EEPROM_MAGIC 0x003d /* EEPROM Magic number */
#define AR5K_EEPROM_MAGIC_VALUE 0x5aa5 /* Default - found on EEPROM */
-#define AR5K_EEPROM_MAGIC_5212 0x0000145c /* 5212 */
-#define AR5K_EEPROM_MAGIC_5211 0x0000145b /* 5211 */
-#define AR5K_EEPROM_MAGIC_5210 0x0000145a /* 5210 */
#define AR5K_EEPROM_IS_HB63 0x000b /* Talon detect */
@@ -78,9 +75,9 @@
#define AR5K_EEPROM_HDR_11A(_v) (((_v) >> AR5K_EEPROM_MODE_11A) & 0x1)
#define AR5K_EEPROM_HDR_11B(_v) (((_v) >> AR5K_EEPROM_MODE_11B) & 0x1)
#define AR5K_EEPROM_HDR_11G(_v) (((_v) >> AR5K_EEPROM_MODE_11G) & 0x1)
-#define AR5K_EEPROM_HDR_T_2GHZ_DIS(_v) (((_v) >> 3) & 0x1) /* Disable turbo for 2Ghz (?) */
-#define AR5K_EEPROM_HDR_T_5GHZ_DBM(_v) (((_v) >> 4) & 0x7f) /* Max turbo power for a/XR mode (eeprom_init) */
-#define AR5K_EEPROM_HDR_DEVICE(_v) (((_v) >> 11) & 0x7)
+#define AR5K_EEPROM_HDR_T_2GHZ_DIS(_v) (((_v) >> 3) & 0x1) /* Disable turbo for 2Ghz */
+#define AR5K_EEPROM_HDR_T_5GHZ_DBM(_v) (((_v) >> 4) & 0x7f) /* Max turbo power for < 2W power consumption */
+#define AR5K_EEPROM_HDR_DEVICE(_v) (((_v) >> 11) & 0x7) /* Device type (1 Cardbus, 2 PCI, 3 MiniPCI, 4 AP) */
#define AR5K_EEPROM_HDR_RFKILL(_v) (((_v) >> 14) & 0x1) /* Device has RFKill support */
#define AR5K_EEPROM_HDR_T_5GHZ_DIS(_v) (((_v) >> 15) & 0x1) /* Disable turbo for 5Ghz */
@@ -101,7 +98,7 @@
#define AR5K_EEPROM_MISC1 AR5K_EEPROM_INFO(5)
#define AR5K_EEPROM_TARGET_PWRSTART(_v) ((_v) & 0xfff)
-#define AR5K_EEPROM_HAS32KHZCRYSTAL(_v) (((_v) >> 14) & 0x1)
+#define AR5K_EEPROM_HAS32KHZCRYSTAL(_v) (((_v) >> 14) & 0x1) /* has 32KHz crystal for sleep mode */
#define AR5K_EEPROM_HAS32KHZCRYSTAL_OLD(_v) (((_v) >> 15) & 0x1)
#define AR5K_EEPROM_MISC2 AR5K_EEPROM_INFO(6)
@@ -114,26 +111,27 @@
#define AR5K_EEPROM_MISC4 AR5K_EEPROM_INFO(8)
#define AR5K_EEPROM_CAL_DATA_START(_v) (((_v) >> 4) & 0xfff)
-#define AR5K_EEPROM_MASK_R0(_v) (((_v) >> 2) & 0x3)
-#define AR5K_EEPROM_MASK_R1(_v) ((_v) & 0x3)
+#define AR5K_EEPROM_MASK_R0(_v) (((_v) >> 2) & 0x3) /* modes supported by radio 0 (bit 1: G, bit 2: A) */
+#define AR5K_EEPROM_MASK_R1(_v) ((_v) & 0x3) /* modes supported by radio 1 (bit 1: G, bit 2: A) */
#define AR5K_EEPROM_MISC5 AR5K_EEPROM_INFO(9)
-#define AR5K_EEPROM_COMP_DIS(_v) ((_v) & 0x1)
-#define AR5K_EEPROM_AES_DIS(_v) (((_v) >> 1) & 0x1)
-#define AR5K_EEPROM_FF_DIS(_v) (((_v) >> 2) & 0x1)
-#define AR5K_EEPROM_BURST_DIS(_v) (((_v) >> 3) & 0x1)
-#define AR5K_EEPROM_MAX_QCU(_v) (((_v) >> 4) & 0xf)
-#define AR5K_EEPROM_HEAVY_CLIP_EN(_v) (((_v) >> 8) & 0x1)
-#define AR5K_EEPROM_KEY_CACHE_SIZE(_v) (((_v) >> 12) & 0xf)
+#define AR5K_EEPROM_COMP_DIS(_v) ((_v) & 0x1) /* disable compression */
+#define AR5K_EEPROM_AES_DIS(_v) (((_v) >> 1) & 0x1) /* disable AES */
+#define AR5K_EEPROM_FF_DIS(_v) (((_v) >> 2) & 0x1) /* disable fast frames */
+#define AR5K_EEPROM_BURST_DIS(_v) (((_v) >> 3) & 0x1) /* disable bursting */
+#define AR5K_EEPROM_MAX_QCU(_v) (((_v) >> 4) & 0xf) /* max number of QCUs. defaults to 10 */
+#define AR5K_EEPROM_HEAVY_CLIP_EN(_v) (((_v) >> 8) & 0x1) /* enable heayy clipping */
+#define AR5K_EEPROM_KEY_CACHE_SIZE(_v) (((_v) >> 12) & 0xf) /* key cache size. defaults to 128 */
#define AR5K_EEPROM_MISC6 AR5K_EEPROM_INFO(10)
-#define AR5K_EEPROM_TX_CHAIN_DIS ((_v) & 0x8)
-#define AR5K_EEPROM_RX_CHAIN_DIS (((_v) >> 3) & 0x8)
-#define AR5K_EEPROM_FCC_MID_EN (((_v) >> 6) & 0x1)
-#define AR5K_EEPROM_JAP_U1EVEN_EN (((_v) >> 7) & 0x1)
-#define AR5K_EEPROM_JAP_U2_EN (((_v) >> 8) & 0x1)
-#define AR5K_EEPROM_JAP_U1ODD_EN (((_v) >> 9) & 0x1)
-#define AR5K_EEPROM_JAP_11A_NEW_EN (((_v) >> 10) & 0x1)
+#define AR5K_EEPROM_TX_CHAIN_DIS ((_v) & 0x7) /* MIMO chains disabled for TX bitmask */
+#define AR5K_EEPROM_RX_CHAIN_DIS (((_v) >> 3) & 0x7) /* MIMO chains disabled for RX bitmask */
+#define AR5K_EEPROM_FCC_MID_EN (((_v) >> 6) & 0x1) /* 5.47-5.7GHz supported */
+#define AR5K_EEPROM_JAP_U1EVEN_EN (((_v) >> 7) & 0x1) /* Japan UNII1 band (5.15-5.25GHz) on even channels (5180, 5200, 5220, 5240) supported */
+#define AR5K_EEPROM_JAP_U2_EN (((_v) >> 8) & 0x1) /* Japan UNII2 band (5.25-5.35GHz) supported */
+#define AR5K_EEPROM_JAP_MID_EN (((_v) >> 9) & 0x1) /* Japan band from 5.47-5.7GHz supported */
+#define AR5K_EEPROM_JAP_U1ODD_EN (((_v) >> 10) & 0x1) /* Japan UNII2 band (5.15-5.25GHz) on odd channels (5170, 5190, 5210, 5230) supported */
+#define AR5K_EEPROM_JAP_11A_NEW_EN (((_v) >> 11) & 0x1) /* Japan A mode enabled (using even channels) */
/* calibration settings */
#define AR5K_EEPROM_MODES_11A(_v) AR5K_EEPROM_OFF(_v, 0x00c5, 0x00d4)
@@ -389,7 +387,49 @@ struct ath5k_edge_power {
bool flag;
};
-/* EEPROM calibration data */
+/**
+ * struct ath5k_eeprom_info - EEPROM calibration data
+ *
+ * @ee_regdomain: ath/regd.c takes care of COUNTRY_ERD and WORLDWIDE_ROAMING
+ * flags
+ * @ee_ant_gain: Antenna gain in 0.5dB steps signed [5211 only?]
+ * @ee_cck_ofdm_gain_delta: difference in gainF to output the same power for
+ * OFDM and CCK packets
+ * @ee_cck_ofdm_power_delta: power difference between OFDM (6Mbps) and CCK
+ * (11Mbps) rate in G mode. 0.1dB steps
+ * @ee_scaled_cck_delta: for Japan Channel 14: 0.1dB resolution
+ *
+ * @ee_i_cal: Initial I coefficient to correct I/Q mismatch in the receive path
+ * @ee_q_cal: Initial Q coefficient to correct I/Q mismatch in the receive path
+ * @ee_fixed_bias: use ee_ob and ee_db settings or use automatic control
+ * @ee_switch_settling: RX/TX Switch settling time
+ * @ee_atn_tx_rx: Difference in attenuation between TX and RX in 1dB steps
+ * @ee_ant_control: Antenna Control Settings
+ * @ee_ob: Bias current for Output stage of PA
+ * B/G mode: Index [0] is used for AR2112/5112, otherwise [1]
+ * A mode: [0] 5.15-5.25 [1] 5.25-5.50 [2] 5.50-5.70 [3] 5.70-5.85 GHz
+ * @ee_db: Bias current for Output stage of PA. see @ee_ob
+ * @ee_tx_end2xlna_enable: Time difference from when BB finishes sending a frame
+ * to when the external LNA is activated
+ * @ee_tx_end2xpa_disable: Time difference from when BB finishes sending a frame
+ * to when the external PA switch is deactivated
+ * @ee_tx_frm2xpa_enable: Time difference from when MAC sends frame to when
+ * external PA switch is activated
+ * @ee_thr_62: Clear Channel Assessment (CCA) sensitivity
+ * (IEEE802.11a section 17.3.10.5 )
+ * @ee_xlna_gain: Total gain of the LNA (information only)
+ * @ee_xpd: Use external (1) or internal power detector
+ * @ee_x_gain: Gain for external power detector output (differences in EEMAP
+ * versions!)
+ * @ee_i_gain: Initial gain value after reset
+ * @ee_margin_tx_rx: Margin in dB when final attenuation stage should be used
+ *
+ * @ee_false_detect: Backoff in Sensitivity (dB) on channels with spur signals
+ * @ee_noise_floor_thr: Noise floor threshold in 1dB steps
+ * @ee_adc_desired_size: Desired amplitude for ADC, used by AGC; in 0.5 dB steps
+ * @ee_pga_desired_size: Desired output of PGA (for BB gain) in 0.5 dB steps
+ * @ee_pd_gain_overlap: PD ADC curves need to overlap in 0.5dB steps (ee_map>=2)
+ */
struct ath5k_eeprom_info {
/* Header information */
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index aefe84f9c04..5212e275f1c 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -39,16 +39,16 @@
* ath5k_hw_set_opmode - Set PCU operating mode
*
* @ah: The &struct ath5k_hw
+ * @op_mode: &enum nl80211_iftype operating mode
*
* Initialize PCU for the various operating modes (AP/STA etc)
- *
- * NOTE: ah->ah_op_mode must be set before calling this.
*/
-int ath5k_hw_set_opmode(struct ath5k_hw *ah)
+int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
{
struct ath_common *common = ath5k_hw_common(ah);
u32 pcu_reg, beacon_reg, low_id, high_id;
+ ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_MODE, "mode %d\n", op_mode);
/* Preserve rest settings */
pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000;
@@ -61,7 +61,7 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah)
ATH5K_TRACE(ah->ah_sc);
- switch (ah->ah_op_mode) {
+ switch (op_mode) {
case NL80211_IFTYPE_ADHOC:
pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_KEYSRCH_MODE;
beacon_reg |= AR5K_BCR_ADHOC;
@@ -113,39 +113,26 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_update - Update mib counters (mac layer statistics)
+ * ath5k_hw_update - Update MIB counters (mac layer statistics)
*
* @ah: The &struct ath5k_hw
- * @stats: The &struct ieee80211_low_level_stats we use to track
- * statistics on the driver
*
- * Reads MIB counters from PCU and updates sw statistics. Must be
- * called after a MIB interrupt.
+ * Reads MIB counters from PCU and updates sw statistics. Is called after a
+ * MIB interrupt, because one of these counters might have reached their maximum
+ * and triggered the MIB interrupt, to let us read and clear the counter.
+ *
+ * Is called in interrupt context!
*/
-void ath5k_hw_update_mib_counters(struct ath5k_hw *ah,
- struct ieee80211_low_level_stats *stats)
+void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
{
- ATH5K_TRACE(ah->ah_sc);
+ struct ath5k_statistics *stats = &ah->ah_sc->stats;
/* Read-And-Clear */
- stats->dot11ACKFailureCount += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL);
- stats->dot11RTSFailureCount += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL);
- stats->dot11RTSSuccessCount += ath5k_hw_reg_read(ah, AR5K_RTS_OK);
- stats->dot11FCSErrorCount += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL);
-
- /* XXX: Should we use this to track beacon count ?
- * -we read it anyway to clear the register */
- ath5k_hw_reg_read(ah, AR5K_BEACON_CNT);
-
- /* Reset profile count registers on 5212*/
- if (ah->ah_version == AR5K_AR5212) {
- ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_TX);
- ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RX);
- ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RXCLR);
- ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_CYCLE);
- }
-
- /* TODO: Handle ANI stats */
+ stats->ack_fail += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL);
+ stats->rts_fail += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL);
+ stats->rts_ok += ath5k_hw_reg_read(ah, AR5K_RTS_OK);
+ stats->fcs_error += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL);
+ stats->beacons += ath5k_hw_reg_read(ah, AR5K_BEACON_CNT);
}
/**
@@ -167,9 +154,9 @@ void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high)
else {
u32 val = AR5K_STA_ID1_BASE_RATE_11B | AR5K_STA_ID1_ACKCTS_6MB;
if (high)
- AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val);
- else
AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, val);
+ else
+ AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val);
}
}
@@ -179,25 +166,12 @@ void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high)
\******************/
/**
- * ath5k_hw_het_ack_timeout - Get ACK timeout from PCU in usec
- *
- * @ah: The &struct ath5k_hw
- */
-unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
-{
- ATH5K_TRACE(ah->ah_sc);
-
- return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah,
- AR5K_TIME_OUT), AR5K_TIME_OUT_ACK));
-}
-
-/**
* ath5k_hw_set_ack_timeout - Set ACK timeout on PCU
*
* @ah: The &struct ath5k_hw
* @timeout: Timeout in usec
*/
-int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
+static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
ATH5K_TRACE(ah->ah_sc);
if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
@@ -211,24 +185,12 @@ int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
}
/**
- * ath5k_hw_get_cts_timeout - Get CTS timeout from PCU in usec
- *
- * @ah: The &struct ath5k_hw
- */
-unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
-{
- ATH5K_TRACE(ah->ah_sc);
- return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah,
- AR5K_TIME_OUT), AR5K_TIME_OUT_CTS));
-}
-
-/**
* ath5k_hw_set_cts_timeout - Set CTS timeout on PCU
*
* @ah: The &struct ath5k_hw
* @timeout: Timeout in usec
*/
-int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
+static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
ATH5K_TRACE(ah->ah_sc);
if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
@@ -290,7 +252,7 @@ unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah)
*
* @ah: The &struct ath5k_hw
*/
-unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
+static unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
@@ -308,7 +270,7 @@ unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
*
* @ah: The &struct ath5k_hw
*/
-unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
+static unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
@@ -417,7 +379,6 @@ void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
* (ACK etc).
*
* NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma
- * TODO: Init ANI here
*/
void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
{
@@ -451,42 +412,6 @@ void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
}
-/*
- * Set multicast filter by index
- */
-int ath5k_hw_set_mcast_filter_idx(struct ath5k_hw *ah, u32 index)
-{
-
- ATH5K_TRACE(ah->ah_sc);
- if (index >= 64)
- return -EINVAL;
- else if (index >= 32)
- AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER1,
- (1 << (index - 32)));
- else
- AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index));
-
- return 0;
-}
-
-/*
- * Clear Multicast filter by index
- */
-int ath5k_hw_clear_mcast_filter_idx(struct ath5k_hw *ah, u32 index)
-{
-
- ATH5K_TRACE(ah->ah_sc);
- if (index >= 64)
- return -EINVAL;
- else if (index >= 32)
- AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER1,
- (1 << (index - 32)));
- else
- AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index));
-
- return 0;
-}
-
/**
* ath5k_hw_get_rx_filter - Get current rx filter
*
@@ -571,18 +496,7 @@ void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
* Beacon control *
\****************/
-/**
- * ath5k_hw_get_tsf32 - Get a 32bit TSF
- *
- * @ah: The &struct ath5k_hw
- *
- * Returns lower 32 bits of current TSF
- */
-u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah)
-{
- ATH5K_TRACE(ah->ah_sc);
- return ath5k_hw_reg_read(ah, AR5K_TSF_L32);
-}
+#define ATH5K_MAX_TSF_READ 10
/**
* ath5k_hw_get_tsf64 - Get the full 64bit TSF
@@ -593,10 +507,35 @@ u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah)
*/
u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
{
- u64 tsf = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
+ u32 tsf_lower, tsf_upper1, tsf_upper2;
+ int i;
+
+ /*
+ * While reading TSF upper and then lower part, the clock is still
+ * counting (or jumping in case of IBSS merge) so we might get
+ * inconsistent values. To avoid this, we read the upper part again
+ * and check it has not been changed. We make the hypothesis that a
+ * maximum of 3 changes can happens in a row (we use 10 as a safe
+ * value).
+ *
+ * Impact on performance is pretty small, since in most cases, only
+ * 3 register reads are needed.
+ */
+
+ tsf_upper1 = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
+ for (i = 0; i < ATH5K_MAX_TSF_READ; i++) {
+ tsf_lower = ath5k_hw_reg_read(ah, AR5K_TSF_L32);
+ tsf_upper2 = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
+ if (tsf_upper2 == tsf_upper1)
+ break;
+ tsf_upper1 = tsf_upper2;
+ }
+
+ WARN_ON( i == ATH5K_MAX_TSF_READ );
+
ATH5K_TRACE(ah->ah_sc);
- return ath5k_hw_reg_read(ah, AR5K_TSF_L32) | (tsf << 32);
+ return (((u64)tsf_upper1 << 32) | tsf_lower);
}
/**
@@ -651,7 +590,7 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
/*
* Set the additional timers by mode
*/
- switch (ah->ah_op_mode) {
+ switch (ah->ah_sc->opmode) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_STATION:
/* In STA mode timer1 is used as next wakeup
@@ -688,8 +627,8 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
* Set the beacon register and enable all timers.
*/
/* When in AP or Mesh Point mode zero timer0 to start TSF */
- if (ah->ah_op_mode == NL80211_IFTYPE_AP ||
- ah->ah_op_mode == NL80211_IFTYPE_MESH_POINT)
+ if (ah->ah_sc->opmode == NL80211_IFTYPE_AP ||
+ ah->ah_sc->opmode == NL80211_IFTYPE_MESH_POINT)
ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0);
@@ -722,203 +661,6 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
}
-#if 0
-/*
- * Set beacon timers
- */
-int ath5k_hw_set_beacon_timers(struct ath5k_hw *ah,
- const struct ath5k_beacon_state *state)
-{
- u32 cfp_period, next_cfp, dtim, interval, next_beacon;
-
- /*
- * TODO: should be changed through *state
- * review struct ath5k_beacon_state struct
- *
- * XXX: These are used for cfp period bellow, are they
- * ok ? Is it O.K. for tsf here to be 0 or should we use
- * get_tsf ?
- */
- u32 dtim_count = 0; /* XXX */
- u32 cfp_count = 0; /* XXX */
- u32 tsf = 0; /* XXX */
-
- ATH5K_TRACE(ah->ah_sc);
- /* Return on an invalid beacon state */
- if (state->bs_interval < 1)
- return -EINVAL;
-
- interval = state->bs_interval;
- dtim = state->bs_dtim_period;
-
- /*
- * PCF support?
- */
- if (state->bs_cfp_period > 0) {
- /*
- * Enable PCF mode and set the CFP
- * (Contention Free Period) and timer registers
- */
- cfp_period = state->bs_cfp_period * state->bs_dtim_period *
- state->bs_interval;
- next_cfp = (cfp_count * state->bs_dtim_period + dtim_count) *
- state->bs_interval;
-
- AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1,
- AR5K_STA_ID1_DEFAULT_ANTENNA |
- AR5K_STA_ID1_PCF);
- ath5k_hw_reg_write(ah, cfp_period, AR5K_CFP_PERIOD);
- ath5k_hw_reg_write(ah, state->bs_cfp_max_duration,
- AR5K_CFP_DUR);
- ath5k_hw_reg_write(ah, (tsf + (next_cfp == 0 ? cfp_period :
- next_cfp)) << 3, AR5K_TIMER2);
- } else {
- /* Disable PCF mode */
- AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
- AR5K_STA_ID1_DEFAULT_ANTENNA |
- AR5K_STA_ID1_PCF);
- }
-
- /*
- * Enable the beacon timer register
- */
- ath5k_hw_reg_write(ah, state->bs_next_beacon, AR5K_TIMER0);
-
- /*
- * Start the beacon timers
- */
- ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, AR5K_BEACON) &
- ~(AR5K_BEACON_PERIOD | AR5K_BEACON_TIM)) |
- AR5K_REG_SM(state->bs_tim_offset ? state->bs_tim_offset + 4 : 0,
- AR5K_BEACON_TIM) | AR5K_REG_SM(state->bs_interval,
- AR5K_BEACON_PERIOD), AR5K_BEACON);
-
- /*
- * Write new beacon miss threshold, if it appears to be valid
- * XXX: Figure out right values for min <= bs_bmiss_threshold <= max
- * and return if its not in range. We can test this by reading value and
- * setting value to a largest value and seeing which values register.
- */
-
- AR5K_REG_WRITE_BITS(ah, AR5K_RSSI_THR, AR5K_RSSI_THR_BMISS,
- state->bs_bmiss_threshold);
-
- /*
- * Set sleep control register
- * XXX: Didn't find this in 5210 code but since this register
- * exists also in ar5k's 5210 headers i leave it as common code.
- */
- AR5K_REG_WRITE_BITS(ah, AR5K_SLEEP_CTL, AR5K_SLEEP_CTL_SLDUR,
- (state->bs_sleep_duration - 3) << 3);
-
- /*
- * Set enhanced sleep registers on 5212
- */
- if (ah->ah_version == AR5K_AR5212) {
- if (state->bs_sleep_duration > state->bs_interval &&
- roundup(state->bs_sleep_duration, interval) ==
- state->bs_sleep_duration)
- interval = state->bs_sleep_duration;
-
- if (state->bs_sleep_duration > dtim && (dtim == 0 ||
- roundup(state->bs_sleep_duration, dtim) ==
- state->bs_sleep_duration))
- dtim = state->bs_sleep_duration;
-
- if (interval > dtim)
- return -EINVAL;
-
- next_beacon = interval == dtim ? state->bs_next_dtim :
- state->bs_next_beacon;
-
- ath5k_hw_reg_write(ah,
- AR5K_REG_SM((state->bs_next_dtim - 3) << 3,
- AR5K_SLEEP0_NEXT_DTIM) |
- AR5K_REG_SM(10, AR5K_SLEEP0_CABTO) |
- AR5K_SLEEP0_ENH_SLEEP_EN |
- AR5K_SLEEP0_ASSUME_DTIM, AR5K_SLEEP0);
-
- ath5k_hw_reg_write(ah, AR5K_REG_SM((next_beacon - 3) << 3,
- AR5K_SLEEP1_NEXT_TIM) |
- AR5K_REG_SM(10, AR5K_SLEEP1_BEACON_TO), AR5K_SLEEP1);
-
- ath5k_hw_reg_write(ah,
- AR5K_REG_SM(interval, AR5K_SLEEP2_TIM_PER) |
- AR5K_REG_SM(dtim, AR5K_SLEEP2_DTIM_PER), AR5K_SLEEP2);
- }
-
- return 0;
-}
-
-/*
- * Reset beacon timers
- */
-void ath5k_hw_reset_beacon(struct ath5k_hw *ah)
-{
- ATH5K_TRACE(ah->ah_sc);
- /*
- * Disable beacon timer
- */
- ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
-
- /*
- * Disable some beacon register values
- */
- AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
- AR5K_STA_ID1_DEFAULT_ANTENNA | AR5K_STA_ID1_PCF);
- ath5k_hw_reg_write(ah, AR5K_BEACON_PERIOD, AR5K_BEACON);
-}
-
-/*
- * Wait for beacon queue to finish
- */
-int ath5k_hw_beaconq_finish(struct ath5k_hw *ah, unsigned long phys_addr)
-{
- unsigned int i;
- int ret;
-
- ATH5K_TRACE(ah->ah_sc);
-
- /* 5210 doesn't have QCU*/
- if (ah->ah_version == AR5K_AR5210) {
- /*
- * Wait for beaconn queue to finish by checking
- * Control Register and Beacon Status Register.
- */
- for (i = AR5K_TUNE_BEACON_INTERVAL / 2; i > 0; i--) {
- if (!(ath5k_hw_reg_read(ah, AR5K_BSR) & AR5K_BSR_TXQ1F)
- ||
- !(ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_BSR_TXQ1F))
- break;
- udelay(10);
- }
-
- /* Timeout... */
- if (i <= 0) {
- /*
- * Re-schedule the beacon queue
- */
- ath5k_hw_reg_write(ah, phys_addr, AR5K_NOQCU_TXDP1);
- ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
- AR5K_BCR);
-
- return -EIO;
- }
- ret = 0;
- } else {
- /*5211/5212*/
- ret = ath5k_hw_register_timeout(ah,
- AR5K_QUEUE_STATUS(AR5K_TX_QUEUE_ID_BEACON),
- AR5K_QCU_STS_FRMPENDCNT, 0, false);
-
- if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, AR5K_TX_QUEUE_ID_BEACON))
- return -EIO;
- }
-
- return ret;
-}
-#endif
-
/*********************\
* Key table functions *
@@ -971,19 +713,6 @@ int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry)
return 0;
}
-/*
- * Check if a table entry is valid
- */
-int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry)
-{
- ATH5K_TRACE(ah->ah_sc);
- AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
-
- /* Check the validation flag at the end of the entry */
- return ath5k_hw_reg_read(ah, AR5K_KEYTABLE_MAC1(entry)) &
- AR5K_KEYTABLE_VALID;
-}
-
static
int ath5k_keycache_type(const struct ieee80211_key_conf *key)
{
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 68e2bccd90d..1b81c477880 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -20,8 +20,6 @@
*
*/
-#define _ATH5K_PHY
-
#include <linux/delay.h>
#include <linux/slab.h>
@@ -982,7 +980,7 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
return -EINVAL;
data0 = ath5k_hw_bitswap((data0 << 2) & 0xff, 8);
- } else if ((c - (c % 5)) != 2 || c > 5435) {
+ } else if ((c % 5) != 2 || c > 5435) {
if (!(c % 20) && c >= 5120) {
data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
data2 = ath5k_hw_bitswap(3, 2);
@@ -995,7 +993,7 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
} else
return -EINVAL;
} else {
- data0 = ath5k_hw_bitswap((10 * (c - 2) - 4800) / 25 + 1, 8);
+ data0 = ath5k_hw_bitswap((10 * (c - 2 - 4800)) / 25 + 1, 8);
data2 = ath5k_hw_bitswap(0, 2);
}
@@ -1023,7 +1021,7 @@ static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
data0 = ath5k_hw_bitswap((c - 2272), 8);
data2 = 0;
/* ? 5GHz ? */
- } else if ((c - (c % 5)) != 2 || c > 5435) {
+ } else if ((c % 5) != 2 || c > 5435) {
if (!(c % 20) && c < 5120)
data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
else if (!(c % 10))
@@ -1034,7 +1032,7 @@ static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
return -EINVAL;
data2 = ath5k_hw_bitswap(1, 2);
} else {
- data0 = ath5k_hw_bitswap((10 * (c - 2) - 4800) / 25 + 1, 8);
+ data0 = ath5k_hw_bitswap((10 * (c - 2 - 4800)) / 25 + 1, 8);
data2 = ath5k_hw_bitswap(0, 2);
}
@@ -1105,28 +1103,6 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
PHY calibration
\*****************/
-void
-ath5k_hw_calibration_poll(struct ath5k_hw *ah)
-{
- /* Calibration interval in jiffies */
- unsigned long cal_intval;
-
- cal_intval = msecs_to_jiffies(ah->ah_cal_intval * 1000);
-
- /* Initialize timestamp if needed */
- if (!ah->ah_cal_tstamp)
- ah->ah_cal_tstamp = jiffies;
-
- /* For now we always do full calibration
- * Mark software interrupt mask and fire software
- * interrupt (bit gets auto-cleared) */
- if (time_is_before_eq_jiffies(ah->ah_cal_tstamp + cal_intval)) {
- ah->ah_cal_tstamp = jiffies;
- ah->ah_swi_mask = AR5K_SWI_FULL_CALIBRATION;
- AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI);
- }
-}
-
static int sign_extend(int val, const int nbits)
{
int order = BIT(nbits-1);
@@ -1191,7 +1167,7 @@ static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
* The median of the values in the history is then loaded into the
* hardware for its own use for RSSI and CCA measurements.
*/
-void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
+static void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 val;
@@ -1400,7 +1376,11 @@ static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
}
i_coffd = ((i_pwr >> 1) + (q_pwr >> 1)) >> 7;
- q_coffd = q_pwr >> 7;
+
+ if (ah->ah_version == AR5K_AR5211)
+ q_coffd = q_pwr >> 6;
+ else
+ q_coffd = q_pwr >> 7;
/* protect against divide by 0 and loss of sign bits */
if (i_coffd == 0 || q_coffd < 2)
@@ -1409,7 +1389,10 @@ static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
i_coff = (-iq_corr) / i_coffd;
i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */
- q_coff = (i_pwr / q_coffd) - 128;
+ if (ah->ah_version == AR5K_AR5211)
+ q_coff = (i_pwr / q_coffd) - 64;
+ else
+ q_coff = (i_pwr / q_coffd) - 128;
q_coff = clamp(q_coff, -16, 15); /* signed 5 bit */
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
@@ -1769,7 +1752,7 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan)
* Antenna control *
\*****************/
-void /*TODO:Boundary check*/
+static void /*TODO:Boundary check*/
ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant)
{
ATH5K_TRACE(ah->ah_sc);
@@ -1778,16 +1761,6 @@ ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant)
ath5k_hw_reg_write(ah, ant & 0x7, AR5K_DEFAULT_ANTENNA);
}
-unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah)
-{
- ATH5K_TRACE(ah->ah_sc);
-
- if (ah->ah_version != AR5K_AR5210)
- return ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA) & 0x7;
-
- return false; /*XXX: What do we return for 5210 ?*/
-}
-
/*
* Enable/disable fast rx antenna diversity
*/
@@ -1931,6 +1904,7 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
ah->ah_tx_ant = tx_ant;
ah->ah_ant_mode = ant_mode;
+ ah->ah_def_ant = def_ant;
sta_id1 |= use_def_for_tx ? AR5K_STA_ID1_DEFAULT_ANTENNA : 0;
sta_id1 |= update_def_on_tx ? AR5K_STA_ID1_DESC_ANTENNA : 0;
@@ -2171,8 +2145,6 @@ ath5k_get_chan_pcal_surrounding_piers(struct ath5k_hw *ah,
done:
*pcinfo_l = &pcinfo[idx_l];
*pcinfo_r = &pcinfo[idx_r];
-
- return;
}
/*
@@ -2441,19 +2413,6 @@ ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min,
pcdac_tmp = pcdac_high_pwr;
edge_flag = 0x40;
-#if 0
- /* If both min and max power limits are in lower
- * power curve's range, only use the low power curve.
- * TODO: min/max levels are related to target
- * power values requested from driver/user
- * XXX: Is this really needed ? */
- if (min_pwr < table_max[1] &&
- max_pwr < table_max[1]) {
- edge_flag = 0;
- pcdac_tmp = pcdac_low_pwr;
- max_pwr_idx = (table_max[1] - table_min[1])/2;
- }
-#endif
} else {
pcdac_low_pwr = ah->ah_txpower.tmpL[1]; /* Zeroed */
pcdac_high_pwr = ah->ah_txpower.tmpL[0];
@@ -2600,7 +2559,7 @@ ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah,
max_idx = (pdadc_n < table_size) ? pdadc_n : table_size;
/* Fill pdadc_out table */
- while (pdadc_0 < max_idx)
+ while (pdadc_0 < max_idx && pdadc_i < 128)
pdadc_out[pdadc_i++] = pdadc_tmp[pdadc_0++];
/* Need to extrapolate above this pdgain? */
@@ -3144,5 +3103,3 @@ int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
return ath5k_hw_txpower(ah, channel, ee_mode, txpower);
}
-
-#undef _ATH5K_PHY
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 9122a8556f4..f5831da33f7 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -517,23 +517,6 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
}
/*
- * Get slot time from DCU
- */
-unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
-{
- unsigned int slot_time_clock;
-
- ATH5K_TRACE(ah->ah_sc);
-
- if (ah->ah_version == AR5K_AR5210)
- slot_time_clock = ath5k_hw_reg_read(ah, AR5K_SLOT_TIME);
- else
- slot_time_clock = ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT);
-
- return ath5k_hw_clocktoh(ah, slot_time_clock & 0xffff);
-}
-
-/*
* Set slot time on DCU
*/
int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index 1464f89b249..55b4ac6d236 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -212,10 +212,10 @@
* MIB control register
*/
#define AR5K_MIBC 0x0040 /* Register Address */
-#define AR5K_MIBC_COW 0x00000001 /* Warn test indicator */
+#define AR5K_MIBC_COW 0x00000001 /* Counter Overflow Warning */
#define AR5K_MIBC_FMC 0x00000002 /* Freeze MIB Counters */
-#define AR5K_MIBC_CMC 0x00000004 /* Clean MIB Counters */
-#define AR5K_MIBC_MCS 0x00000008 /* MIB counter strobe */
+#define AR5K_MIBC_CMC 0x00000004 /* Clear MIB Counters */
+#define AR5K_MIBC_MCS 0x00000008 /* MIB counter strobe, increment all */
/*
* Timeout prescale register
@@ -1139,8 +1139,8 @@
#define AR5K_STA_ID1_DEFAULT_ANTENNA 0x00200000 /* Use default antenna */
#define AR5K_STA_ID1_DESC_ANTENNA 0x00400000 /* Update antenna from descriptor */
#define AR5K_STA_ID1_RTS_DEF_ANTENNA 0x00800000 /* Use default antenna for RTS */
-#define AR5K_STA_ID1_ACKCTS_6MB 0x01000000 /* Use 6Mbit/s for ACK/CTS */
-#define AR5K_STA_ID1_BASE_RATE_11B 0x02000000 /* Use 11b base rate for ACK/CTS [5211+] */
+#define AR5K_STA_ID1_ACKCTS_6MB 0x01000000 /* Rate to use for ACK/CTS. 0: highest mandatory rate <= RX rate; 1: 1Mbps in B mode */
+#define AR5K_STA_ID1_BASE_RATE_11B 0x02000000 /* 802.11b base rate. 0: 1, 2, 5.5 and 11Mbps; 1: 1 and 2Mbps. [5211+] */
#define AR5K_STA_ID1_SELFGEN_DEF_ANT 0x04000000 /* Use def. antenna for self generated frames */
#define AR5K_STA_ID1_CRYPT_MIC_EN 0x08000000 /* Enable MIC */
#define AR5K_STA_ID1_KEYSRCH_MODE 0x10000000 /* Look up key when key id != 0 */
@@ -1516,7 +1516,14 @@
AR5K_NAV_5210 : AR5K_NAV_5211)
/*
- * RTS success register
+ * MIB counters:
+ *
+ * max value is 0xc000, if this is reached we get a MIB interrupt.
+ * they can be controlled via AR5K_MIBC and are cleared on read.
+ */
+
+/*
+ * RTS success (MIB counter)
*/
#define AR5K_RTS_OK_5210 0x8090
#define AR5K_RTS_OK_5211 0x8088
@@ -1524,7 +1531,7 @@
AR5K_RTS_OK_5210 : AR5K_RTS_OK_5211)
/*
- * RTS failure register
+ * RTS failure (MIB counter)
*/
#define AR5K_RTS_FAIL_5210 0x8094
#define AR5K_RTS_FAIL_5211 0x808c
@@ -1532,7 +1539,7 @@
AR5K_RTS_FAIL_5210 : AR5K_RTS_FAIL_5211)
/*
- * ACK failure register
+ * ACK failure (MIB counter)
*/
#define AR5K_ACK_FAIL_5210 0x8098
#define AR5K_ACK_FAIL_5211 0x8090
@@ -1540,7 +1547,7 @@
AR5K_ACK_FAIL_5210 : AR5K_ACK_FAIL_5211)
/*
- * FCS failure register
+ * FCS failure (MIB counter)
*/
#define AR5K_FCS_FAIL_5210 0x809c
#define AR5K_FCS_FAIL_5211 0x8094
@@ -1667,11 +1674,17 @@
/*
* Profile count registers
+ *
+ * These registers can be cleared and freezed with ATH5K_MIBC, but they do not
+ * generate a MIB interrupt.
+ * Instead of overflowing, they shift by one bit to the right. All registers
+ * shift together, i.e. when one reaches the max, all shift at the same time by
+ * one bit to the right. This way we should always get consistent values.
*/
#define AR5K_PROFCNT_TX 0x80ec /* Tx count */
#define AR5K_PROFCNT_RX 0x80f0 /* Rx count */
-#define AR5K_PROFCNT_RXCLR 0x80f4 /* Clear Rx count */
-#define AR5K_PROFCNT_CYCLE 0x80f8 /* Cycle count (?) */
+#define AR5K_PROFCNT_RXCLR 0x80f4 /* Busy count */
+#define AR5K_PROFCNT_CYCLE 0x80f8 /* Cycle counter */
/*
* Quiet period control registers
@@ -1758,7 +1771,7 @@
#define AR5K_CCK_FIL_CNT 0x8128
/*
- * PHY Error Counters (?)
+ * PHY Error Counters (same masks as AR5K_PHY_ERR_FIL)
*/
#define AR5K_PHYERR_CNT1 0x812c
#define AR5K_PHYERR_CNT1_MASK 0x8130
@@ -1766,6 +1779,9 @@
#define AR5K_PHYERR_CNT2 0x8134
#define AR5K_PHYERR_CNT2_MASK 0x8138
+/* if the PHY Error Counters reach this maximum, we get MIB interrupts */
+#define ATH5K_PHYERR_CNT_MAX 0x00c00000
+
/*
* TSF Threshold register (?)
*/
@@ -1974,7 +1990,7 @@
#define AR5K_PHY_SETTLING 0x9844 /* Register Address */
#define AR5K_PHY_SETTLING_AGC 0x0000007f /* AGC settling time */
#define AR5K_PHY_SETTLING_AGC_S 0
-#define AR5K_PHY_SETTLING_SWITCH 0x00003f80 /* Switch settlig time */
+#define AR5K_PHY_SETTLING_SWITCH 0x00003f80 /* Switch settling time */
#define AR5K_PHY_SETTLING_SWITCH_S 7
/*
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index cbf28e37984..307f80e83f9 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -19,8 +19,6 @@
*
*/
-#define _ATH5K_RESET
-
/*****************************\
Reset functions and helpers
\*****************************/
@@ -34,6 +32,27 @@
#include "base.h"
#include "debug.h"
+/*
+ * Check if a register write has been completed
+ */
+int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
+ bool is_set)
+{
+ int i;
+ u32 data;
+
+ for (i = AR5K_TUNE_REGISTER_TIMEOUT; i > 0; i--) {
+ data = ath5k_hw_reg_read(ah, reg);
+ if (is_set && (data & flag))
+ break;
+ else if ((data & flag) == val)
+ break;
+ udelay(15);
+ }
+
+ return (i <= 0) ? -EAGAIN : 0;
+}
+
/**
* ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212
*
@@ -221,8 +240,8 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
/*
* Sleep control
*/
-int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
- bool set_chip, u16 sleep_duration)
+static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
+ bool set_chip, u16 sleep_duration)
{
unsigned int i;
u32 staid, data;
@@ -608,7 +627,6 @@ static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
AR5K_REG_WRITE_BITS(ah, AR5K_TSF_PARM, AR5K_TSF_PARM_INC, 1);
}
- return;
}
/* TODO: Half/Quarter rate */
@@ -864,8 +882,6 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
/* Heavy clipping -disable for now */
if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_5_1)
ath5k_hw_reg_write(ah, 0, AR5K_PHY_HEAVY_CLIP_ENABLE);
-
- return;
}
/*
@@ -1017,11 +1033,6 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
if (ret)
return ret;
- /*
- * Initialize operating mode
- */
- ah->ah_op_mode = op_mode;
-
/* PHY access enable */
if (ah->ah_mac_srev >= AR5K_SREV_AR5211)
ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
@@ -1192,7 +1203,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
ath5k_hw_set_associd(ah);
/* Set PCU config */
- ath5k_hw_set_opmode(ah);
+ ath5k_hw_set_opmode(ah, op_mode);
/* Clear any pending interrupts
* PISR/SISR Not available on 5210 */
@@ -1378,7 +1389,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
* external 32KHz crystal when sleeping if one
* exists */
if (ah->ah_version == AR5K_AR5212 &&
- ah->ah_op_mode != NL80211_IFTYPE_AP)
+ op_mode != NL80211_IFTYPE_AP)
ath5k_hw_set_sleep_clock(ah, true);
/*
@@ -1388,5 +1399,3 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
ath5k_hw_reset_tsf(ah);
return 0;
}
-
-#undef _ATH5K_RESET
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 5774cea23a3..35f23bdc442 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -32,3 +32,24 @@ config ATH9K_DEBUGFS
Also required for changing debug message flags at run time.
+config ATH9K_HTC
+ tristate "Atheros HTC based wireless cards support"
+ depends on USB && MAC80211
+ select ATH9K_HW
+ select MAC80211_LEDS
+ select LEDS_CLASS
+ select NEW_LEDS
+ select ATH9K_COMMON
+ ---help---
+ Support for Atheros HTC based cards.
+ Chipsets supported: AR9271
+
+ For more information: http://wireless.kernel.org/en/users/Drivers/ath9k_htc
+
+ The built module will be ath9k_htc.
+
+config ATH9K_HTC_DEBUGFS
+ bool "Atheros ath9k_htc debugging"
+ depends on ATH9K_HTC && DEBUG_FS
+ ---help---
+ Say Y, if you need access to ath9k_htc's statistics.
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 6b50d5eb9ec..dd112be218a 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -13,18 +13,38 @@ ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
obj-$(CONFIG_ATH9K) += ath9k.o
-ath9k_hw-y:= hw.o \
+ath9k_hw-y:= \
+ ar9002_hw.o \
+ ar9003_hw.o \
+ hw.o \
+ ar9003_phy.o \
+ ar9002_phy.o \
+ ar5008_phy.o \
+ ar9002_calib.o \
+ ar9003_calib.o \
+ calib.o \
eeprom.o \
eeprom_def.o \
eeprom_4k.o \
eeprom_9287.o \
- calib.o \
ani.o \
- phy.o \
btcoex.o \
mac.o \
+ ar9002_mac.o \
+ ar9003_mac.o \
+ ar9003_eeprom.o
obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o
ath9k_common-y:= common.o
+
+ath9k_htc-y += htc_hst.o \
+ hif_usb.o \
+ wmi.o \
+ htc_drv_txrx.o \
+ htc_drv_main.o \
+ htc_drv_beacon.o \
+ htc_drv_init.o
+
+obj-$(CONFIG_ATH9K_HTC) += ath9k_htc.o
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index ca4994f1315..85fdd26039c 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -47,6 +47,7 @@ static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
}
static struct ath_bus_ops ath_ahb_bus_ops = {
+ .ath_bus_type = ATH_AHB,
.read_cachesize = ath_ahb_read_cachesize,
.eeprom_read = ath_ahb_eeprom_read,
};
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 2a0cd64c2bf..ba8b20f0159 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -15,6 +15,7 @@
*/
#include "hw.h"
+#include "hw-ops.h"
static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
struct ath9k_channel *chan)
@@ -37,190 +38,6 @@ static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
return 0;
}
-static bool ath9k_hw_ani_control(struct ath_hw *ah,
- enum ath9k_ani_cmd cmd, int param)
-{
- struct ar5416AniState *aniState = ah->curani;
- struct ath_common *common = ath9k_hw_common(ah);
-
- switch (cmd & ah->ani_function) {
- case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
- u32 level = param;
-
- if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
- ath_print(common, ATH_DBG_ANI,
- "level out of range (%u > %u)\n",
- level,
- (unsigned)ARRAY_SIZE(ah->totalSizeDesired));
- return false;
- }
-
- REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
- AR_PHY_DESIRED_SZ_TOT_DES,
- ah->totalSizeDesired[level]);
- REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
- AR_PHY_AGC_CTL1_COARSE_LOW,
- ah->coarse_low[level]);
- REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
- AR_PHY_AGC_CTL1_COARSE_HIGH,
- ah->coarse_high[level]);
- REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
- AR_PHY_FIND_SIG_FIRPWR,
- ah->firpwr[level]);
-
- if (level > aniState->noiseImmunityLevel)
- ah->stats.ast_ani_niup++;
- else if (level < aniState->noiseImmunityLevel)
- ah->stats.ast_ani_nidown++;
- aniState->noiseImmunityLevel = level;
- break;
- }
- case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
- const int m1ThreshLow[] = { 127, 50 };
- const int m2ThreshLow[] = { 127, 40 };
- const int m1Thresh[] = { 127, 0x4d };
- const int m2Thresh[] = { 127, 0x40 };
- const int m2CountThr[] = { 31, 16 };
- const int m2CountThrLow[] = { 63, 48 };
- u32 on = param ? 1 : 0;
-
- REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
- AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
- m1ThreshLow[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
- AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
- m2ThreshLow[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR,
- AR_PHY_SFCORR_M1_THRESH,
- m1Thresh[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR,
- AR_PHY_SFCORR_M2_THRESH,
- m2Thresh[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR,
- AR_PHY_SFCORR_M2COUNT_THR,
- m2CountThr[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
- AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
- m2CountThrLow[on]);
-
- REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
- AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
- m1ThreshLow[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
- AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
- m2ThreshLow[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
- AR_PHY_SFCORR_EXT_M1_THRESH,
- m1Thresh[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
- AR_PHY_SFCORR_EXT_M2_THRESH,
- m2Thresh[on]);
-
- if (on)
- REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
- AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
- else
- REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
- AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
-
- if (!on != aniState->ofdmWeakSigDetectOff) {
- if (on)
- ah->stats.ast_ani_ofdmon++;
- else
- ah->stats.ast_ani_ofdmoff++;
- aniState->ofdmWeakSigDetectOff = !on;
- }
- break;
- }
- case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
- const int weakSigThrCck[] = { 8, 6 };
- u32 high = param ? 1 : 0;
-
- REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
- AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
- weakSigThrCck[high]);
- if (high != aniState->cckWeakSigThreshold) {
- if (high)
- ah->stats.ast_ani_cckhigh++;
- else
- ah->stats.ast_ani_ccklow++;
- aniState->cckWeakSigThreshold = high;
- }
- break;
- }
- case ATH9K_ANI_FIRSTEP_LEVEL:{
- const int firstep[] = { 0, 4, 8 };
- u32 level = param;
-
- if (level >= ARRAY_SIZE(firstep)) {
- ath_print(common, ATH_DBG_ANI,
- "level out of range (%u > %u)\n",
- level,
- (unsigned) ARRAY_SIZE(firstep));
- return false;
- }
- REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
- AR_PHY_FIND_SIG_FIRSTEP,
- firstep[level]);
- if (level > aniState->firstepLevel)
- ah->stats.ast_ani_stepup++;
- else if (level < aniState->firstepLevel)
- ah->stats.ast_ani_stepdown++;
- aniState->firstepLevel = level;
- break;
- }
- case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
- const int cycpwrThr1[] =
- { 2, 4, 6, 8, 10, 12, 14, 16 };
- u32 level = param;
-
- if (level >= ARRAY_SIZE(cycpwrThr1)) {
- ath_print(common, ATH_DBG_ANI,
- "level out of range (%u > %u)\n",
- level,
- (unsigned) ARRAY_SIZE(cycpwrThr1));
- return false;
- }
- REG_RMW_FIELD(ah, AR_PHY_TIMING5,
- AR_PHY_TIMING5_CYCPWR_THR1,
- cycpwrThr1[level]);
- if (level > aniState->spurImmunityLevel)
- ah->stats.ast_ani_spurup++;
- else if (level < aniState->spurImmunityLevel)
- ah->stats.ast_ani_spurdown++;
- aniState->spurImmunityLevel = level;
- break;
- }
- case ATH9K_ANI_PRESENT:
- break;
- default:
- ath_print(common, ATH_DBG_ANI,
- "invalid cmd %u\n", cmd);
- return false;
- }
-
- ath_print(common, ATH_DBG_ANI, "ANI parameters:\n");
- ath_print(common, ATH_DBG_ANI,
- "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
- "ofdmWeakSigDetectOff=%d\n",
- aniState->noiseImmunityLevel,
- aniState->spurImmunityLevel,
- !aniState->ofdmWeakSigDetectOff);
- ath_print(common, ATH_DBG_ANI,
- "cckWeakSigThreshold=%d, "
- "firstepLevel=%d, listenTime=%d\n",
- aniState->cckWeakSigThreshold,
- aniState->firstepLevel,
- aniState->listenTime);
- ath_print(common, ATH_DBG_ANI,
- "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
- aniState->cycleCount,
- aniState->ofdmPhyErrCount,
- aniState->cckPhyErrCount);
-
- return true;
-}
-
static void ath9k_hw_update_mibstats(struct ath_hw *ah,
struct ath9k_mib_stats *stats)
{
@@ -262,11 +79,17 @@ static void ath9k_ani_restart(struct ath_hw *ah)
"Writing ofdmbase=%u cckbase=%u\n",
aniState->ofdmPhyErrBase,
aniState->cckPhyErrBase);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_PHY_ERR_1, aniState->ofdmPhyErrBase);
REG_WRITE(ah, AR_PHY_ERR_2, aniState->cckPhyErrBase);
REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
aniState->ofdmPhyErrCount = 0;
@@ -540,8 +363,14 @@ void ath9k_ani_reset(struct ath_hw *ah)
ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) &
~ATH9K_RX_FILTER_PHYERR);
ath9k_ani_restart(ah);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
void ath9k_hw_ani_monitor(struct ath_hw *ah,
@@ -639,6 +468,8 @@ void ath9k_enable_mib_counters(struct ath_hw *ah)
ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_FILT_OFDM, 0);
REG_WRITE(ah, AR_FILT_CCK, 0);
REG_WRITE(ah, AR_MIBC,
@@ -646,6 +477,9 @@ void ath9k_enable_mib_counters(struct ath_hw *ah)
& 0x0f);
REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
/* Freeze the MIB counters, get the stats and then clear them */
@@ -809,20 +643,17 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
ath_print(common, ATH_DBG_ANI, "Setting cckErrBase = 0x%08x\n",
ah->ani[0].cckPhyErrBase);
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_PHY_ERR_1, ah->ani[0].ofdmPhyErrBase);
REG_WRITE(ah, AR_PHY_ERR_2, ah->ani[0].cckPhyErrBase);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
ath9k_enable_mib_counters(ah);
ah->aniperiod = ATH9K_ANI_PERIOD;
if (ah->config.enable_ani)
ah->proc_phyerr |= HAL_PROCESS_ANI;
}
-
-void ath9k_hw_ani_disable(struct ath_hw *ah)
-{
- ath_print(ath9k_hw_common(ah), ATH_DBG_ANI, "Disabling ANI\n");
-
- ath9k_hw_disable_mib_counters(ah);
- REG_WRITE(ah, AR_PHY_ERR_1, 0);
- REG_WRITE(ah, AR_PHY_ERR_2, 0);
-}
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index 4e1ab94a515..3356762ea38 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -118,6 +118,5 @@ u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah, u32 *rxc_pcnt,
void ath9k_hw_procmibevent(struct ath_hw *ah);
void ath9k_hw_ani_setup(struct ath_hw *ah);
void ath9k_hw_ani_init(struct ath_hw *ah);
-void ath9k_hw_ani_disable(struct ath_hw *ah);
#endif /* ANI_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_initvals.h b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
new file mode 100644
index 00000000000..025c31ac614
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
@@ -0,0 +1,742 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_AR5008_H
+#define INITVALS_AR5008_H
+
+static const u32 ar5416Modes[][6] = {
+ { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
+ { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
+ { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
+ { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
+ { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
+ { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
+ { 0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810 },
+ { 0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a },
+ { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
+ { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
+ { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
+ { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
+ { 0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0 },
+ { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x00009850, 0x6c48b4e0, 0x6d48b4e0, 0x6d48b0de, 0x6c48b0de, 0x6c48b0de },
+ { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
+ { 0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e },
+ { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18 },
+ { 0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
+ { 0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 },
+ { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
+ { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
+ { 0x00009918, 0x000001b8, 0x00000370, 0x00000268, 0x00000134, 0x00000134 },
+ { 0x00009924, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b },
+ { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 },
+ { 0x00009960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
+ { 0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
+ { 0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
+ { 0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120, 0x00001120 },
+ { 0x000099bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00 },
+ { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
+ { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
+ { 0x000099c8, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c },
+ { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
+ { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
+ { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
+ { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
+ { 0x0000a20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000b20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000c20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
+ { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
+ { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
+ { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
+ { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
+ { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
+ { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
+ { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
+ { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
+ { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
+ { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
+ { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
+ { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
+ { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+};
+
+static const u32 ar5416Common[][2] = {
+ { 0x0000000c, 0x00000000 },
+ { 0x00000030, 0x00020015 },
+ { 0x00000034, 0x00000005 },
+ { 0x00000040, 0x00000000 },
+ { 0x00000044, 0x00000008 },
+ { 0x00000048, 0x00000008 },
+ { 0x0000004c, 0x00000010 },
+ { 0x00000050, 0x00000000 },
+ { 0x00000054, 0x0000001f },
+ { 0x00000800, 0x00000000 },
+ { 0x00000804, 0x00000000 },
+ { 0x00000808, 0x00000000 },
+ { 0x0000080c, 0x00000000 },
+ { 0x00000810, 0x00000000 },
+ { 0x00000814, 0x00000000 },
+ { 0x00000818, 0x00000000 },
+ { 0x0000081c, 0x00000000 },
+ { 0x00000820, 0x00000000 },
+ { 0x00000824, 0x00000000 },
+ { 0x00001040, 0x002ffc0f },
+ { 0x00001044, 0x002ffc0f },
+ { 0x00001048, 0x002ffc0f },
+ { 0x0000104c, 0x002ffc0f },
+ { 0x00001050, 0x002ffc0f },
+ { 0x00001054, 0x002ffc0f },
+ { 0x00001058, 0x002ffc0f },
+ { 0x0000105c, 0x002ffc0f },
+ { 0x00001060, 0x002ffc0f },
+ { 0x00001064, 0x002ffc0f },
+ { 0x00001230, 0x00000000 },
+ { 0x00001270, 0x00000000 },
+ { 0x00001038, 0x00000000 },
+ { 0x00001078, 0x00000000 },
+ { 0x000010b8, 0x00000000 },
+ { 0x000010f8, 0x00000000 },
+ { 0x00001138, 0x00000000 },
+ { 0x00001178, 0x00000000 },
+ { 0x000011b8, 0x00000000 },
+ { 0x000011f8, 0x00000000 },
+ { 0x00001238, 0x00000000 },
+ { 0x00001278, 0x00000000 },
+ { 0x000012b8, 0x00000000 },
+ { 0x000012f8, 0x00000000 },
+ { 0x00001338, 0x00000000 },
+ { 0x00001378, 0x00000000 },
+ { 0x000013b8, 0x00000000 },
+ { 0x000013f8, 0x00000000 },
+ { 0x00001438, 0x00000000 },
+ { 0x00001478, 0x00000000 },
+ { 0x000014b8, 0x00000000 },
+ { 0x000014f8, 0x00000000 },
+ { 0x00001538, 0x00000000 },
+ { 0x00001578, 0x00000000 },
+ { 0x000015b8, 0x00000000 },
+ { 0x000015f8, 0x00000000 },
+ { 0x00001638, 0x00000000 },
+ { 0x00001678, 0x00000000 },
+ { 0x000016b8, 0x00000000 },
+ { 0x000016f8, 0x00000000 },
+ { 0x00001738, 0x00000000 },
+ { 0x00001778, 0x00000000 },
+ { 0x000017b8, 0x00000000 },
+ { 0x000017f8, 0x00000000 },
+ { 0x0000103c, 0x00000000 },
+ { 0x0000107c, 0x00000000 },
+ { 0x000010bc, 0x00000000 },
+ { 0x000010fc, 0x00000000 },
+ { 0x0000113c, 0x00000000 },
+ { 0x0000117c, 0x00000000 },
+ { 0x000011bc, 0x00000000 },
+ { 0x000011fc, 0x00000000 },
+ { 0x0000123c, 0x00000000 },
+ { 0x0000127c, 0x00000000 },
+ { 0x000012bc, 0x00000000 },
+ { 0x000012fc, 0x00000000 },
+ { 0x0000133c, 0x00000000 },
+ { 0x0000137c, 0x00000000 },
+ { 0x000013bc, 0x00000000 },
+ { 0x000013fc, 0x00000000 },
+ { 0x0000143c, 0x00000000 },
+ { 0x0000147c, 0x00000000 },
+ { 0x00004030, 0x00000002 },
+ { 0x0000403c, 0x00000002 },
+ { 0x00007010, 0x00000000 },
+ { 0x00007038, 0x000004c2 },
+ { 0x00008004, 0x00000000 },
+ { 0x00008008, 0x00000000 },
+ { 0x0000800c, 0x00000000 },
+ { 0x00008018, 0x00000700 },
+ { 0x00008020, 0x00000000 },
+ { 0x00008038, 0x00000000 },
+ { 0x0000803c, 0x00000000 },
+ { 0x00008048, 0x40000000 },
+ { 0x00008054, 0x00000000 },
+ { 0x00008058, 0x00000000 },
+ { 0x0000805c, 0x000fc78f },
+ { 0x00008060, 0x0000000f },
+ { 0x00008064, 0x00000000 },
+ { 0x000080c0, 0x2a82301a },
+ { 0x000080c4, 0x05dc01e0 },
+ { 0x000080c8, 0x1f402710 },
+ { 0x000080cc, 0x01f40000 },
+ { 0x000080d0, 0x00001e00 },
+ { 0x000080d4, 0x00000000 },
+ { 0x000080d8, 0x00400000 },
+ { 0x000080e0, 0xffffffff },
+ { 0x000080e4, 0x0000ffff },
+ { 0x000080e8, 0x003f3f3f },
+ { 0x000080ec, 0x00000000 },
+ { 0x000080f0, 0x00000000 },
+ { 0x000080f4, 0x00000000 },
+ { 0x000080f8, 0x00000000 },
+ { 0x000080fc, 0x00020000 },
+ { 0x00008100, 0x00020000 },
+ { 0x00008104, 0x00000001 },
+ { 0x00008108, 0x00000052 },
+ { 0x0000810c, 0x00000000 },
+ { 0x00008110, 0x00000168 },
+ { 0x00008118, 0x000100aa },
+ { 0x0000811c, 0x00003210 },
+ { 0x00008124, 0x00000000 },
+ { 0x00008128, 0x00000000 },
+ { 0x0000812c, 0x00000000 },
+ { 0x00008130, 0x00000000 },
+ { 0x00008134, 0x00000000 },
+ { 0x00008138, 0x00000000 },
+ { 0x0000813c, 0x00000000 },
+ { 0x00008144, 0xffffffff },
+ { 0x00008168, 0x00000000 },
+ { 0x0000816c, 0x00000000 },
+ { 0x00008170, 0x32143320 },
+ { 0x00008174, 0xfaa4fa50 },
+ { 0x00008178, 0x00000100 },
+ { 0x0000817c, 0x00000000 },
+ { 0x000081c4, 0x00000000 },
+ { 0x000081ec, 0x00000000 },
+ { 0x000081f0, 0x00000000 },
+ { 0x000081f4, 0x00000000 },
+ { 0x000081f8, 0x00000000 },
+ { 0x000081fc, 0x00000000 },
+ { 0x00008200, 0x00000000 },
+ { 0x00008204, 0x00000000 },
+ { 0x00008208, 0x00000000 },
+ { 0x0000820c, 0x00000000 },
+ { 0x00008210, 0x00000000 },
+ { 0x00008214, 0x00000000 },
+ { 0x00008218, 0x00000000 },
+ { 0x0000821c, 0x00000000 },
+ { 0x00008220, 0x00000000 },
+ { 0x00008224, 0x00000000 },
+ { 0x00008228, 0x00000000 },
+ { 0x0000822c, 0x00000000 },
+ { 0x00008230, 0x00000000 },
+ { 0x00008234, 0x00000000 },
+ { 0x00008238, 0x00000000 },
+ { 0x0000823c, 0x00000000 },
+ { 0x00008240, 0x00100000 },
+ { 0x00008244, 0x0010f400 },
+ { 0x00008248, 0x00000100 },
+ { 0x0000824c, 0x0001e800 },
+ { 0x00008250, 0x00000000 },
+ { 0x00008254, 0x00000000 },
+ { 0x00008258, 0x00000000 },
+ { 0x0000825c, 0x400000ff },
+ { 0x00008260, 0x00080922 },
+ { 0x00008264, 0x88000010 },
+ { 0x00008270, 0x00000000 },
+ { 0x00008274, 0x40000000 },
+ { 0x00008278, 0x003e4180 },
+ { 0x0000827c, 0x00000000 },
+ { 0x00008284, 0x0000002c },
+ { 0x00008288, 0x0000002c },
+ { 0x0000828c, 0x00000000 },
+ { 0x00008294, 0x00000000 },
+ { 0x00008298, 0x00000000 },
+ { 0x00008300, 0x00000000 },
+ { 0x00008304, 0x00000000 },
+ { 0x00008308, 0x00000000 },
+ { 0x0000830c, 0x00000000 },
+ { 0x00008310, 0x00000000 },
+ { 0x00008314, 0x00000000 },
+ { 0x00008318, 0x00000000 },
+ { 0x00008328, 0x00000000 },
+ { 0x0000832c, 0x00000007 },
+ { 0x00008330, 0x00000302 },
+ { 0x00008334, 0x00000e00 },
+ { 0x00008338, 0x00070000 },
+ { 0x0000833c, 0x00000000 },
+ { 0x00008340, 0x000107ff },
+ { 0x00009808, 0x00000000 },
+ { 0x0000980c, 0xad848e19 },
+ { 0x00009810, 0x7d14e000 },
+ { 0x00009814, 0x9c0a9f6b },
+ { 0x0000981c, 0x00000000 },
+ { 0x0000982c, 0x0000a000 },
+ { 0x00009830, 0x00000000 },
+ { 0x0000983c, 0x00200400 },
+ { 0x00009840, 0x206a002e },
+ { 0x0000984c, 0x1284233c },
+ { 0x00009854, 0x00000859 },
+ { 0x00009900, 0x00000000 },
+ { 0x00009904, 0x00000000 },
+ { 0x00009908, 0x00000000 },
+ { 0x0000990c, 0x00000000 },
+ { 0x0000991c, 0x10000fff },
+ { 0x00009920, 0x05100000 },
+ { 0x0000a920, 0x05100000 },
+ { 0x0000b920, 0x05100000 },
+ { 0x00009928, 0x00000001 },
+ { 0x0000992c, 0x00000004 },
+ { 0x00009934, 0x1e1f2022 },
+ { 0x00009938, 0x0a0b0c0d },
+ { 0x0000993c, 0x00000000 },
+ { 0x00009948, 0x9280b212 },
+ { 0x0000994c, 0x00020028 },
+ { 0x00009954, 0x5d50e188 },
+ { 0x00009958, 0x00081fff },
+ { 0x0000c95c, 0x004b6a8e },
+ { 0x0000c968, 0x000003ce },
+ { 0x00009970, 0x190fb515 },
+ { 0x00009974, 0x00000000 },
+ { 0x00009978, 0x00000001 },
+ { 0x0000997c, 0x00000000 },
+ { 0x00009980, 0x00000000 },
+ { 0x00009984, 0x00000000 },
+ { 0x00009988, 0x00000000 },
+ { 0x0000998c, 0x00000000 },
+ { 0x00009990, 0x00000000 },
+ { 0x00009994, 0x00000000 },
+ { 0x00009998, 0x00000000 },
+ { 0x0000999c, 0x00000000 },
+ { 0x000099a0, 0x00000000 },
+ { 0x000099a4, 0x00000001 },
+ { 0x000099a8, 0x001fff00 },
+ { 0x000099ac, 0x00000000 },
+ { 0x000099b0, 0x03051000 },
+ { 0x000099dc, 0x00000000 },
+ { 0x000099e0, 0x00000200 },
+ { 0x000099e4, 0xaaaaaaaa },
+ { 0x000099e8, 0x3c466478 },
+ { 0x000099ec, 0x000000aa },
+ { 0x000099fc, 0x00001042 },
+ { 0x00009b00, 0x00000000 },
+ { 0x00009b04, 0x00000001 },
+ { 0x00009b08, 0x00000002 },
+ { 0x00009b0c, 0x00000003 },
+ { 0x00009b10, 0x00000004 },
+ { 0x00009b14, 0x00000005 },
+ { 0x00009b18, 0x00000008 },
+ { 0x00009b1c, 0x00000009 },
+ { 0x00009b20, 0x0000000a },
+ { 0x00009b24, 0x0000000b },
+ { 0x00009b28, 0x0000000c },
+ { 0x00009b2c, 0x0000000d },
+ { 0x00009b30, 0x00000010 },
+ { 0x00009b34, 0x00000011 },
+ { 0x00009b38, 0x00000012 },
+ { 0x00009b3c, 0x00000013 },
+ { 0x00009b40, 0x00000014 },
+ { 0x00009b44, 0x00000015 },
+ { 0x00009b48, 0x00000018 },
+ { 0x00009b4c, 0x00000019 },
+ { 0x00009b50, 0x0000001a },
+ { 0x00009b54, 0x0000001b },
+ { 0x00009b58, 0x0000001c },
+ { 0x00009b5c, 0x0000001d },
+ { 0x00009b60, 0x00000020 },
+ { 0x00009b64, 0x00000021 },
+ { 0x00009b68, 0x00000022 },
+ { 0x00009b6c, 0x00000023 },
+ { 0x00009b70, 0x00000024 },
+ { 0x00009b74, 0x00000025 },
+ { 0x00009b78, 0x00000028 },
+ { 0x00009b7c, 0x00000029 },
+ { 0x00009b80, 0x0000002a },
+ { 0x00009b84, 0x0000002b },
+ { 0x00009b88, 0x0000002c },
+ { 0x00009b8c, 0x0000002d },
+ { 0x00009b90, 0x00000030 },
+ { 0x00009b94, 0x00000031 },
+ { 0x00009b98, 0x00000032 },
+ { 0x00009b9c, 0x00000033 },
+ { 0x00009ba0, 0x00000034 },
+ { 0x00009ba4, 0x00000035 },
+ { 0x00009ba8, 0x00000035 },
+ { 0x00009bac, 0x00000035 },
+ { 0x00009bb0, 0x00000035 },
+ { 0x00009bb4, 0x00000035 },
+ { 0x00009bb8, 0x00000035 },
+ { 0x00009bbc, 0x00000035 },
+ { 0x00009bc0, 0x00000035 },
+ { 0x00009bc4, 0x00000035 },
+ { 0x00009bc8, 0x00000035 },
+ { 0x00009bcc, 0x00000035 },
+ { 0x00009bd0, 0x00000035 },
+ { 0x00009bd4, 0x00000035 },
+ { 0x00009bd8, 0x00000035 },
+ { 0x00009bdc, 0x00000035 },
+ { 0x00009be0, 0x00000035 },
+ { 0x00009be4, 0x00000035 },
+ { 0x00009be8, 0x00000035 },
+ { 0x00009bec, 0x00000035 },
+ { 0x00009bf0, 0x00000035 },
+ { 0x00009bf4, 0x00000035 },
+ { 0x00009bf8, 0x00000010 },
+ { 0x00009bfc, 0x0000001a },
+ { 0x0000a210, 0x40806333 },
+ { 0x0000a214, 0x00106c10 },
+ { 0x0000a218, 0x009c4060 },
+ { 0x0000a220, 0x018830c6 },
+ { 0x0000a224, 0x00000400 },
+ { 0x0000a228, 0x00000bb5 },
+ { 0x0000a22c, 0x00000011 },
+ { 0x0000a234, 0x20202020 },
+ { 0x0000a238, 0x20202020 },
+ { 0x0000a23c, 0x13c889af },
+ { 0x0000a240, 0x38490a20 },
+ { 0x0000a244, 0x00007bb6 },
+ { 0x0000a248, 0x0fff3ffc },
+ { 0x0000a24c, 0x00000001 },
+ { 0x0000a250, 0x0000a000 },
+ { 0x0000a254, 0x00000000 },
+ { 0x0000a258, 0x0cc75380 },
+ { 0x0000a25c, 0x0f0f0f01 },
+ { 0x0000a260, 0xdfa91f01 },
+ { 0x0000a268, 0x00000000 },
+ { 0x0000a26c, 0x0e79e5c6 },
+ { 0x0000b26c, 0x0e79e5c6 },
+ { 0x0000c26c, 0x0e79e5c6 },
+ { 0x0000d270, 0x00820820 },
+ { 0x0000a278, 0x1ce739ce },
+ { 0x0000a27c, 0x051701ce },
+ { 0x0000a338, 0x00000000 },
+ { 0x0000a33c, 0x00000000 },
+ { 0x0000a340, 0x00000000 },
+ { 0x0000a344, 0x00000000 },
+ { 0x0000a348, 0x3fffffff },
+ { 0x0000a34c, 0x3fffffff },
+ { 0x0000a350, 0x3fffffff },
+ { 0x0000a354, 0x0003ffff },
+ { 0x0000a358, 0x79a8aa1f },
+ { 0x0000d35c, 0x07ffffef },
+ { 0x0000d360, 0x0fffffe7 },
+ { 0x0000d364, 0x17ffffe5 },
+ { 0x0000d368, 0x1fffffe4 },
+ { 0x0000d36c, 0x37ffffe3 },
+ { 0x0000d370, 0x3fffffe3 },
+ { 0x0000d374, 0x57ffffe3 },
+ { 0x0000d378, 0x5fffffe2 },
+ { 0x0000d37c, 0x7fffffe2 },
+ { 0x0000d380, 0x7f3c7bba },
+ { 0x0000d384, 0xf3307ff0 },
+ { 0x0000a388, 0x08000000 },
+ { 0x0000a38c, 0x20202020 },
+ { 0x0000a390, 0x20202020 },
+ { 0x0000a394, 0x1ce739ce },
+ { 0x0000a398, 0x000001ce },
+ { 0x0000a39c, 0x00000001 },
+ { 0x0000a3a0, 0x00000000 },
+ { 0x0000a3a4, 0x00000000 },
+ { 0x0000a3a8, 0x00000000 },
+ { 0x0000a3ac, 0x00000000 },
+ { 0x0000a3b0, 0x00000000 },
+ { 0x0000a3b4, 0x00000000 },
+ { 0x0000a3b8, 0x00000000 },
+ { 0x0000a3bc, 0x00000000 },
+ { 0x0000a3c0, 0x00000000 },
+ { 0x0000a3c4, 0x00000000 },
+ { 0x0000a3c8, 0x00000246 },
+ { 0x0000a3cc, 0x20202020 },
+ { 0x0000a3d0, 0x20202020 },
+ { 0x0000a3d4, 0x20202020 },
+ { 0x0000a3dc, 0x1ce739ce },
+ { 0x0000a3e0, 0x000001ce },
+};
+
+static const u32 ar5416Bank0[][2] = {
+ { 0x000098b0, 0x1e5795e5 },
+ { 0x000098e0, 0x02008020 },
+};
+
+static const u32 ar5416BB_RfGain[][3] = {
+ { 0x00009a00, 0x00000000, 0x00000000 },
+ { 0x00009a04, 0x00000040, 0x00000040 },
+ { 0x00009a08, 0x00000080, 0x00000080 },
+ { 0x00009a0c, 0x000001a1, 0x00000141 },
+ { 0x00009a10, 0x000001e1, 0x00000181 },
+ { 0x00009a14, 0x00000021, 0x000001c1 },
+ { 0x00009a18, 0x00000061, 0x00000001 },
+ { 0x00009a1c, 0x00000168, 0x00000041 },
+ { 0x00009a20, 0x000001a8, 0x000001a8 },
+ { 0x00009a24, 0x000001e8, 0x000001e8 },
+ { 0x00009a28, 0x00000028, 0x00000028 },
+ { 0x00009a2c, 0x00000068, 0x00000068 },
+ { 0x00009a30, 0x00000189, 0x000000a8 },
+ { 0x00009a34, 0x000001c9, 0x00000169 },
+ { 0x00009a38, 0x00000009, 0x000001a9 },
+ { 0x00009a3c, 0x00000049, 0x000001e9 },
+ { 0x00009a40, 0x00000089, 0x00000029 },
+ { 0x00009a44, 0x00000170, 0x00000069 },
+ { 0x00009a48, 0x000001b0, 0x00000190 },
+ { 0x00009a4c, 0x000001f0, 0x000001d0 },
+ { 0x00009a50, 0x00000030, 0x00000010 },
+ { 0x00009a54, 0x00000070, 0x00000050 },
+ { 0x00009a58, 0x00000191, 0x00000090 },
+ { 0x00009a5c, 0x000001d1, 0x00000151 },
+ { 0x00009a60, 0x00000011, 0x00000191 },
+ { 0x00009a64, 0x00000051, 0x000001d1 },
+ { 0x00009a68, 0x00000091, 0x00000011 },
+ { 0x00009a6c, 0x000001b8, 0x00000051 },
+ { 0x00009a70, 0x000001f8, 0x00000198 },
+ { 0x00009a74, 0x00000038, 0x000001d8 },
+ { 0x00009a78, 0x00000078, 0x00000018 },
+ { 0x00009a7c, 0x00000199, 0x00000058 },
+ { 0x00009a80, 0x000001d9, 0x00000098 },
+ { 0x00009a84, 0x00000019, 0x00000159 },
+ { 0x00009a88, 0x00000059, 0x00000199 },
+ { 0x00009a8c, 0x00000099, 0x000001d9 },
+ { 0x00009a90, 0x000000d9, 0x00000019 },
+ { 0x00009a94, 0x000000f9, 0x00000059 },
+ { 0x00009a98, 0x000000f9, 0x00000099 },
+ { 0x00009a9c, 0x000000f9, 0x000000d9 },
+ { 0x00009aa0, 0x000000f9, 0x000000f9 },
+ { 0x00009aa4, 0x000000f9, 0x000000f9 },
+ { 0x00009aa8, 0x000000f9, 0x000000f9 },
+ { 0x00009aac, 0x000000f9, 0x000000f9 },
+ { 0x00009ab0, 0x000000f9, 0x000000f9 },
+ { 0x00009ab4, 0x000000f9, 0x000000f9 },
+ { 0x00009ab8, 0x000000f9, 0x000000f9 },
+ { 0x00009abc, 0x000000f9, 0x000000f9 },
+ { 0x00009ac0, 0x000000f9, 0x000000f9 },
+ { 0x00009ac4, 0x000000f9, 0x000000f9 },
+ { 0x00009ac8, 0x000000f9, 0x000000f9 },
+ { 0x00009acc, 0x000000f9, 0x000000f9 },
+ { 0x00009ad0, 0x000000f9, 0x000000f9 },
+ { 0x00009ad4, 0x000000f9, 0x000000f9 },
+ { 0x00009ad8, 0x000000f9, 0x000000f9 },
+ { 0x00009adc, 0x000000f9, 0x000000f9 },
+ { 0x00009ae0, 0x000000f9, 0x000000f9 },
+ { 0x00009ae4, 0x000000f9, 0x000000f9 },
+ { 0x00009ae8, 0x000000f9, 0x000000f9 },
+ { 0x00009aec, 0x000000f9, 0x000000f9 },
+ { 0x00009af0, 0x000000f9, 0x000000f9 },
+ { 0x00009af4, 0x000000f9, 0x000000f9 },
+ { 0x00009af8, 0x000000f9, 0x000000f9 },
+ { 0x00009afc, 0x000000f9, 0x000000f9 },
+};
+
+static const u32 ar5416Bank1[][2] = {
+ { 0x000098b0, 0x02108421 },
+ { 0x000098ec, 0x00000008 },
+};
+
+static const u32 ar5416Bank2[][2] = {
+ { 0x000098b0, 0x0e73ff17 },
+ { 0x000098e0, 0x00000420 },
+};
+
+static const u32 ar5416Bank3[][3] = {
+ { 0x000098f0, 0x01400018, 0x01c00018 },
+};
+
+static const u32 ar5416Bank6[][3] = {
+
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x40ff0000, 0x40ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x004210a2, 0x004210a2 },
+ { 0x0000989c, 0x0014008f, 0x0014008f },
+ { 0x0000989c, 0x00c40003, 0x00c40003 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x0001805e, 0x0001805e },
+ { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
+ { 0x0000989c, 0x000000f1, 0x000000f1 },
+ { 0x0000989c, 0x00002081, 0x00002081 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+static const u32 ar5416Bank6TPC[][3] = {
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x40ff0000, 0x40ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x00423022, 0x00423022 },
+ { 0x0000989c, 0x201400df, 0x201400df },
+ { 0x0000989c, 0x00c40002, 0x00c40002 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x0001805e, 0x0001805e },
+ { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
+ { 0x0000989c, 0x000000e1, 0x000000e1 },
+ { 0x0000989c, 0x00007081, 0x00007081 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+static const u32 ar5416Bank7[][2] = {
+ { 0x0000989c, 0x00000500 },
+ { 0x0000989c, 0x00000800 },
+ { 0x000098cc, 0x0000000e },
+};
+
+static const u32 ar5416Addac[][2] = {
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000003 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x0000000c },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000030 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000060 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000058 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x000098cc, 0x00000000 },
+};
+
+static const u32 ar5416Modes_9100[][6] = {
+ { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
+ { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
+ { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
+ { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
+ { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
+ { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
+ { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
+ { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
+ { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
+ { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
+ { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
+ { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x00009850, 0x6d48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6d48b0e2, 0x6d48b0e2 },
+ { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec86d2e, 0x7ec84d2e, 0x7ec82d2e },
+ { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
+ { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
+ { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
+ { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
+ { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
+ { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
+ { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
+ { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a11, 0xd00a8a0d, 0xd00a8a0d },
+ { 0x00009940, 0x00754604, 0x00754604, 0xfff81204, 0xfff81204, 0xfff81204 },
+ { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 },
+ { 0x00009954, 0x5f3ca3de, 0x5f3ca3de, 0xe250a51e, 0xe250a51e, 0xe250a51e },
+ { 0x00009958, 0x2108ecff, 0x2108ecff, 0x3388ffff, 0x3388ffff, 0x3388ffff },
+#ifdef TB243
+ { 0x00009960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
+ { 0x0000a960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
+ { 0x0000b960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
+ { 0x00009964, 0x00000000, 0x00000000, 0x00002210, 0x00002210, 0x00001120 },
+#else
+ { 0x00009960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
+ { 0x0000a960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
+ { 0x0000b960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
+ { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
+#endif
+ { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a1000, 0x001a0c00, 0x001a0c00 },
+ { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
+ { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
+ { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
+ { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
+ { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
+ { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
+ { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
+ { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
+ { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
+ { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
+ { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
+ { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
+ { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
+ { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
+ { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
+ { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
+ { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
+ { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
+ { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
+ { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
+ { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+};
+
+#endif /* INITVALS_AR5008_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
new file mode 100644
index 00000000000..b2c17c98bb3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -0,0 +1,1374 @@
+/*
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include "../regd.h"
+#include "ar9002_phy.h"
+
+/* All code below is for non single-chip solutions */
+
+/**
+ * ar5008_hw_phy_modify_rx_buffer() - perform analog swizzling of parameters
+ * @rfbuf:
+ * @reg32:
+ * @numBits:
+ * @firstBit:
+ * @column:
+ *
+ * Performs analog "swizzling" of parameters into their location.
+ * Used on external AR2133/AR5133 radios.
+ */
+static void ar5008_hw_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32,
+ u32 numBits, u32 firstBit,
+ u32 column)
+{
+ u32 tmp32, mask, arrayEntry, lastBit;
+ int32_t bitPosition, bitsLeft;
+
+ tmp32 = ath9k_hw_reverse_bits(reg32, numBits);
+ arrayEntry = (firstBit - 1) / 8;
+ bitPosition = (firstBit - 1) % 8;
+ bitsLeft = numBits;
+ while (bitsLeft > 0) {
+ lastBit = (bitPosition + bitsLeft > 8) ?
+ 8 : bitPosition + bitsLeft;
+ mask = (((1 << lastBit) - 1) ^ ((1 << bitPosition) - 1)) <<
+ (column * 8);
+ rfBuf[arrayEntry] &= ~mask;
+ rfBuf[arrayEntry] |= ((tmp32 << bitPosition) <<
+ (column * 8)) & mask;
+ bitsLeft -= 8 - bitPosition;
+ tmp32 = tmp32 >> (8 - bitPosition);
+ bitPosition = 0;
+ arrayEntry++;
+ }
+}
+
+/*
+ * Fix on 2.4 GHz band for orientation sensitivity issue by increasing
+ * rf_pwd_icsyndiv.
+ *
+ * Theoretical Rules:
+ * if 2 GHz band
+ * if forceBiasAuto
+ * if synth_freq < 2412
+ * bias = 0
+ * else if 2412 <= synth_freq <= 2422
+ * bias = 1
+ * else // synth_freq > 2422
+ * bias = 2
+ * else if forceBias > 0
+ * bias = forceBias & 7
+ * else
+ * no change, use value from ini file
+ * else
+ * no change, invalid band
+ *
+ * 1st Mod:
+ * 2422 also uses value of 2
+ * <approved>
+ *
+ * 2nd Mod:
+ * Less than 2412 uses value of 0, 2412 and above uses value of 2
+ */
+static void ar5008_hw_force_bias(struct ath_hw *ah, u16 synth_freq)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 tmp_reg;
+ int reg_writes = 0;
+ u32 new_bias = 0;
+
+ if (!AR_SREV_5416(ah) || synth_freq >= 3000)
+ return;
+
+ BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
+
+ if (synth_freq < 2412)
+ new_bias = 0;
+ else if (synth_freq < 2422)
+ new_bias = 1;
+ else
+ new_bias = 2;
+
+ /* pre-reverse this field */
+ tmp_reg = ath9k_hw_reverse_bits(new_bias, 3);
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "Force rf_pwd_icsyndiv to %1d on %4d\n",
+ new_bias, synth_freq);
+
+ /* swizzle rf_pwd_icsyndiv */
+ ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data, tmp_reg, 3, 181, 3);
+
+ /* write Bank 6 with new params */
+ REG_WRITE_RF_ARRAY(&ah->iniBank6, ah->analogBank6Data, reg_writes);
+}
+
+/**
+ * ar5008_hw_set_channel - tune to a channel on the external AR2133/AR5133 radios
+ * @ah: atheros hardware stucture
+ * @chan:
+ *
+ * For the external AR2133/AR5133 radios, takes the MHz channel value and set
+ * the channel value. Assumes writes enabled to analog bus and bank6 register
+ * cache in ah->analogBank6Data.
+ */
+static int ar5008_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 channelSel = 0;
+ u32 bModeSynth = 0;
+ u32 aModeRefSel = 0;
+ u32 reg32 = 0;
+ u16 freq;
+ struct chan_centers centers;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ freq = centers.synth_center;
+
+ if (freq < 4800) {
+ u32 txctl;
+
+ if (((freq - 2192) % 5) == 0) {
+ channelSel = ((freq - 672) * 2 - 3040) / 10;
+ bModeSynth = 0;
+ } else if (((freq - 2224) % 5) == 0) {
+ channelSel = ((freq - 704) * 2 - 3040) / 10;
+ bModeSynth = 1;
+ } else {
+ ath_print(common, ATH_DBG_FATAL,
+ "Invalid channel %u MHz\n", freq);
+ return -EINVAL;
+ }
+
+ channelSel = (channelSel << 2) & 0xff;
+ channelSel = ath9k_hw_reverse_bits(channelSel, 8);
+
+ txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
+ if (freq == 2484) {
+
+ REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+ txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
+ } else {
+ REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+ txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
+ }
+
+ } else if ((freq % 20) == 0 && freq >= 5120) {
+ channelSel =
+ ath9k_hw_reverse_bits(((freq - 4800) / 20 << 2), 8);
+ aModeRefSel = ath9k_hw_reverse_bits(1, 2);
+ } else if ((freq % 10) == 0) {
+ channelSel =
+ ath9k_hw_reverse_bits(((freq - 4800) / 10 << 1), 8);
+ if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
+ aModeRefSel = ath9k_hw_reverse_bits(2, 2);
+ else
+ aModeRefSel = ath9k_hw_reverse_bits(1, 2);
+ } else if ((freq % 5) == 0) {
+ channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8);
+ aModeRefSel = ath9k_hw_reverse_bits(1, 2);
+ } else {
+ ath_print(common, ATH_DBG_FATAL,
+ "Invalid channel %u MHz\n", freq);
+ return -EINVAL;
+ }
+
+ ar5008_hw_force_bias(ah, freq);
+
+ reg32 =
+ (channelSel << 8) | (aModeRefSel << 2) | (bModeSynth << 1) |
+ (1 << 5) | 0x1;
+
+ REG_WRITE(ah, AR_PHY(0x37), reg32);
+
+ ah->curchan = chan;
+ ah->curchan_rad_index = -1;
+
+ return 0;
+}
+
+/**
+ * ar5008_hw_spur_mitigate - convert baseband spur frequency for external radios
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * For non single-chip solutions. Converts to baseband spur frequency given the
+ * input channel frequency and compute register settings below.
+ */
+static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ int bb_spur = AR_NO_SPUR;
+ int bin, cur_bin;
+ int spur_freq_sd;
+ int spur_delta_phase;
+ int denominator;
+ int upper, lower, cur_vit_mask;
+ int tmp, new;
+ int i;
+ int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
+ AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
+ };
+ int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
+ AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
+ };
+ int inc[4] = { 0, 100, 0, 0 };
+
+ int8_t mask_m[123];
+ int8_t mask_p[123];
+ int8_t mask_amt;
+ int tmp_mask;
+ int cur_bb_spur;
+ bool is2GHz = IS_CHAN_2GHZ(chan);
+
+ memset(&mask_m, 0, sizeof(int8_t) * 123);
+ memset(&mask_p, 0, sizeof(int8_t) * 123);
+
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
+ cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
+ if (AR_NO_SPUR == cur_bb_spur)
+ break;
+ cur_bb_spur = cur_bb_spur - (chan->channel * 10);
+ if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
+ bb_spur = cur_bb_spur;
+ break;
+ }
+ }
+
+ if (AR_NO_SPUR == bb_spur)
+ return;
+
+ bin = bb_spur * 32;
+
+ tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
+ new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
+ AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
+ AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
+ AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
+
+ REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
+
+ new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
+ AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
+ AR_PHY_SPUR_REG_MASK_RATE_SELECT |
+ AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
+ SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
+ REG_WRITE(ah, AR_PHY_SPUR_REG, new);
+
+ spur_delta_phase = ((bb_spur * 524288) / 100) &
+ AR_PHY_TIMING11_SPUR_DELTA_PHASE;
+
+ denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
+ spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
+
+ new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
+ SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
+ SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
+ REG_WRITE(ah, AR_PHY_TIMING11, new);
+
+ cur_bin = -6000;
+ upper = bin + 100;
+ lower = bin - 100;
+
+ for (i = 0; i < 4; i++) {
+ int pilot_mask = 0;
+ int chan_mask = 0;
+ int bp = 0;
+ for (bp = 0; bp < 30; bp++) {
+ if ((cur_bin > lower) && (cur_bin < upper)) {
+ pilot_mask = pilot_mask | 0x1 << bp;
+ chan_mask = chan_mask | 0x1 << bp;
+ }
+ cur_bin += 100;
+ }
+ cur_bin += inc[i];
+ REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
+ REG_WRITE(ah, chan_mask_reg[i], chan_mask);
+ }
+
+ cur_vit_mask = 6100;
+ upper = bin + 120;
+ lower = bin - 120;
+
+ for (i = 0; i < 123; i++) {
+ if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
+
+ /* workaround for gcc bug #37014 */
+ volatile int tmp_v = abs(cur_vit_mask - bin);
+
+ if (tmp_v < 75)
+ mask_amt = 1;
+ else
+ mask_amt = 0;
+ if (cur_vit_mask < 0)
+ mask_m[abs(cur_vit_mask / 100)] = mask_amt;
+ else
+ mask_p[cur_vit_mask / 100] = mask_amt;
+ }
+ cur_vit_mask -= 100;
+ }
+
+ tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
+ | (mask_m[48] << 26) | (mask_m[49] << 24)
+ | (mask_m[50] << 22) | (mask_m[51] << 20)
+ | (mask_m[52] << 18) | (mask_m[53] << 16)
+ | (mask_m[54] << 14) | (mask_m[55] << 12)
+ | (mask_m[56] << 10) | (mask_m[57] << 8)
+ | (mask_m[58] << 6) | (mask_m[59] << 4)
+ | (mask_m[60] << 2) | (mask_m[61] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
+ REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
+
+ tmp_mask = (mask_m[31] << 28)
+ | (mask_m[32] << 26) | (mask_m[33] << 24)
+ | (mask_m[34] << 22) | (mask_m[35] << 20)
+ | (mask_m[36] << 18) | (mask_m[37] << 16)
+ | (mask_m[48] << 14) | (mask_m[39] << 12)
+ | (mask_m[40] << 10) | (mask_m[41] << 8)
+ | (mask_m[42] << 6) | (mask_m[43] << 4)
+ | (mask_m[44] << 2) | (mask_m[45] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
+
+ tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
+ | (mask_m[18] << 26) | (mask_m[18] << 24)
+ | (mask_m[20] << 22) | (mask_m[20] << 20)
+ | (mask_m[22] << 18) | (mask_m[22] << 16)
+ | (mask_m[24] << 14) | (mask_m[24] << 12)
+ | (mask_m[25] << 10) | (mask_m[26] << 8)
+ | (mask_m[27] << 6) | (mask_m[28] << 4)
+ | (mask_m[29] << 2) | (mask_m[30] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
+
+ tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
+ | (mask_m[2] << 26) | (mask_m[3] << 24)
+ | (mask_m[4] << 22) | (mask_m[5] << 20)
+ | (mask_m[6] << 18) | (mask_m[7] << 16)
+ | (mask_m[8] << 14) | (mask_m[9] << 12)
+ | (mask_m[10] << 10) | (mask_m[11] << 8)
+ | (mask_m[12] << 6) | (mask_m[13] << 4)
+ | (mask_m[14] << 2) | (mask_m[15] << 0);
+ REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
+
+ tmp_mask = (mask_p[15] << 28)
+ | (mask_p[14] << 26) | (mask_p[13] << 24)
+ | (mask_p[12] << 22) | (mask_p[11] << 20)
+ | (mask_p[10] << 18) | (mask_p[9] << 16)
+ | (mask_p[8] << 14) | (mask_p[7] << 12)
+ | (mask_p[6] << 10) | (mask_p[5] << 8)
+ | (mask_p[4] << 6) | (mask_p[3] << 4)
+ | (mask_p[2] << 2) | (mask_p[1] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
+
+ tmp_mask = (mask_p[30] << 28)
+ | (mask_p[29] << 26) | (mask_p[28] << 24)
+ | (mask_p[27] << 22) | (mask_p[26] << 20)
+ | (mask_p[25] << 18) | (mask_p[24] << 16)
+ | (mask_p[23] << 14) | (mask_p[22] << 12)
+ | (mask_p[21] << 10) | (mask_p[20] << 8)
+ | (mask_p[19] << 6) | (mask_p[18] << 4)
+ | (mask_p[17] << 2) | (mask_p[16] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
+
+ tmp_mask = (mask_p[45] << 28)
+ | (mask_p[44] << 26) | (mask_p[43] << 24)
+ | (mask_p[42] << 22) | (mask_p[41] << 20)
+ | (mask_p[40] << 18) | (mask_p[39] << 16)
+ | (mask_p[38] << 14) | (mask_p[37] << 12)
+ | (mask_p[36] << 10) | (mask_p[35] << 8)
+ | (mask_p[34] << 6) | (mask_p[33] << 4)
+ | (mask_p[32] << 2) | (mask_p[31] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
+
+ tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
+ | (mask_p[59] << 26) | (mask_p[58] << 24)
+ | (mask_p[57] << 22) | (mask_p[56] << 20)
+ | (mask_p[55] << 18) | (mask_p[54] << 16)
+ | (mask_p[53] << 14) | (mask_p[52] << 12)
+ | (mask_p[51] << 10) | (mask_p[50] << 8)
+ | (mask_p[49] << 6) | (mask_p[48] << 4)
+ | (mask_p[47] << 2) | (mask_p[46] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
+}
+
+/**
+ * ar5008_hw_rf_alloc_ext_banks - allocates banks for external radio programming
+ * @ah: atheros hardware structure
+ *
+ * Only required for older devices with external AR2133/AR5133 radios.
+ */
+static int ar5008_hw_rf_alloc_ext_banks(struct ath_hw *ah)
+{
+#define ATH_ALLOC_BANK(bank, size) do { \
+ bank = kzalloc((sizeof(u32) * size), GFP_KERNEL); \
+ if (!bank) { \
+ ath_print(common, ATH_DBG_FATAL, \
+ "Cannot allocate RF banks\n"); \
+ return -ENOMEM; \
+ } \
+ } while (0);
+
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
+
+ ATH_ALLOC_BANK(ah->analogBank0Data, ah->iniBank0.ia_rows);
+ ATH_ALLOC_BANK(ah->analogBank1Data, ah->iniBank1.ia_rows);
+ ATH_ALLOC_BANK(ah->analogBank2Data, ah->iniBank2.ia_rows);
+ ATH_ALLOC_BANK(ah->analogBank3Data, ah->iniBank3.ia_rows);
+ ATH_ALLOC_BANK(ah->analogBank6Data, ah->iniBank6.ia_rows);
+ ATH_ALLOC_BANK(ah->analogBank6TPCData, ah->iniBank6TPC.ia_rows);
+ ATH_ALLOC_BANK(ah->analogBank7Data, ah->iniBank7.ia_rows);
+ ATH_ALLOC_BANK(ah->addac5416_21,
+ ah->iniAddac.ia_rows * ah->iniAddac.ia_columns);
+ ATH_ALLOC_BANK(ah->bank6Temp, ah->iniBank6.ia_rows);
+
+ return 0;
+#undef ATH_ALLOC_BANK
+}
+
+
+/**
+ * ar5008_hw_rf_free_ext_banks - Free memory for analog bank scratch buffers
+ * @ah: atheros hardware struture
+ * For the external AR2133/AR5133 radios banks.
+ */
+static void ar5008_hw_rf_free_ext_banks(struct ath_hw *ah)
+{
+#define ATH_FREE_BANK(bank) do { \
+ kfree(bank); \
+ bank = NULL; \
+ } while (0);
+
+ BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
+
+ ATH_FREE_BANK(ah->analogBank0Data);
+ ATH_FREE_BANK(ah->analogBank1Data);
+ ATH_FREE_BANK(ah->analogBank2Data);
+ ATH_FREE_BANK(ah->analogBank3Data);
+ ATH_FREE_BANK(ah->analogBank6Data);
+ ATH_FREE_BANK(ah->analogBank6TPCData);
+ ATH_FREE_BANK(ah->analogBank7Data);
+ ATH_FREE_BANK(ah->addac5416_21);
+ ATH_FREE_BANK(ah->bank6Temp);
+
+#undef ATH_FREE_BANK
+}
+
+/* *
+ * ar5008_hw_set_rf_regs - programs rf registers based on EEPROM
+ * @ah: atheros hardware structure
+ * @chan:
+ * @modesIndex:
+ *
+ * Used for the external AR2133/AR5133 radios.
+ *
+ * Reads the EEPROM header info from the device structure and programs
+ * all rf registers. This routine requires access to the analog
+ * rf device. This is not required for single-chip devices.
+ */
+static bool ar5008_hw_set_rf_regs(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u16 modesIndex)
+{
+ u32 eepMinorRev;
+ u32 ob5GHz = 0, db5GHz = 0;
+ u32 ob2GHz = 0, db2GHz = 0;
+ int regWrites = 0;
+
+ /*
+ * Software does not need to program bank data
+ * for single chip devices, that is AR9280 or anything
+ * after that.
+ */
+ if (AR_SREV_9280_10_OR_LATER(ah))
+ return true;
+
+ /* Setup rf parameters */
+ eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV);
+
+ /* Setup Bank 0 Write */
+ RF_BANK_SETUP(ah->analogBank0Data, &ah->iniBank0, 1);
+
+ /* Setup Bank 1 Write */
+ RF_BANK_SETUP(ah->analogBank1Data, &ah->iniBank1, 1);
+
+ /* Setup Bank 2 Write */
+ RF_BANK_SETUP(ah->analogBank2Data, &ah->iniBank2, 1);
+
+ /* Setup Bank 6 Write */
+ RF_BANK_SETUP(ah->analogBank3Data, &ah->iniBank3,
+ modesIndex);
+ {
+ int i;
+ for (i = 0; i < ah->iniBank6TPC.ia_rows; i++) {
+ ah->analogBank6Data[i] =
+ INI_RA(&ah->iniBank6TPC, i, modesIndex);
+ }
+ }
+
+ /* Only the 5 or 2 GHz OB/DB need to be set for a mode */
+ if (eepMinorRev >= 2) {
+ if (IS_CHAN_2GHZ(chan)) {
+ ob2GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_2);
+ db2GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_2);
+ ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
+ ob2GHz, 3, 197, 0);
+ ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
+ db2GHz, 3, 194, 0);
+ } else {
+ ob5GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_5);
+ db5GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_5);
+ ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
+ ob5GHz, 3, 203, 0);
+ ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
+ db5GHz, 3, 200, 0);
+ }
+ }
+
+ /* Setup Bank 7 Setup */
+ RF_BANK_SETUP(ah->analogBank7Data, &ah->iniBank7, 1);
+
+ /* Write Analog registers */
+ REG_WRITE_RF_ARRAY(&ah->iniBank0, ah->analogBank0Data,
+ regWrites);
+ REG_WRITE_RF_ARRAY(&ah->iniBank1, ah->analogBank1Data,
+ regWrites);
+ REG_WRITE_RF_ARRAY(&ah->iniBank2, ah->analogBank2Data,
+ regWrites);
+ REG_WRITE_RF_ARRAY(&ah->iniBank3, ah->analogBank3Data,
+ regWrites);
+ REG_WRITE_RF_ARRAY(&ah->iniBank6TPC, ah->analogBank6Data,
+ regWrites);
+ REG_WRITE_RF_ARRAY(&ah->iniBank7, ah->analogBank7Data,
+ regWrites);
+
+ return true;
+}
+
+static void ar5008_hw_init_bb(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 synthDelay;
+
+ synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
+ if (IS_CHAN_B(chan))
+ synthDelay = (4 * synthDelay) / 22;
+ else
+ synthDelay /= 10;
+
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+
+ udelay(synthDelay + BASE_ACTIVATE_DELAY);
+}
+
+static void ar5008_hw_init_chain_masks(struct ath_hw *ah)
+{
+ int rx_chainmask, tx_chainmask;
+
+ rx_chainmask = ah->rxchainmask;
+ tx_chainmask = ah->txchainmask;
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ switch (rx_chainmask) {
+ case 0x5:
+ DISABLE_REGWRITE_BUFFER(ah);
+ REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
+ AR_PHY_SWAP_ALT_CHAIN);
+ ENABLE_REGWRITE_BUFFER(ah);
+ case 0x3:
+ if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) {
+ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
+ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
+ break;
+ }
+ case 0x1:
+ case 0x2:
+ case 0x7:
+ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
+ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
+ break;
+ default:
+ break;
+ }
+
+ REG_WRITE(ah, AR_SELFGEN_MASK, tx_chainmask);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ if (tx_chainmask == 0x5) {
+ REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
+ AR_PHY_SWAP_ALT_CHAIN);
+ }
+ if (AR_SREV_9100(ah))
+ REG_WRITE(ah, AR_PHY_ANALOG_SWAP,
+ REG_READ(ah, AR_PHY_ANALOG_SWAP) | 0x00000001);
+}
+
+static void ar5008_hw_override_ini(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 val;
+
+ /*
+ * Set the RX_ABORT and RX_DIS and clear if off only after
+ * RXE is set for MAC. This prevents frames with corrupted
+ * descriptor status.
+ */
+ REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
+
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ val = REG_READ(ah, AR_PCU_MISC_MODE2);
+
+ if (!AR_SREV_9271(ah))
+ val &= ~AR_PCU_MISC_MODE2_HWWAR1;
+
+ if (AR_SREV_9287_10_OR_LATER(ah))
+ val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
+
+ REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
+ }
+
+ if (!AR_SREV_5416_20_OR_LATER(ah) ||
+ AR_SREV_9280_10_OR_LATER(ah))
+ return;
+ /*
+ * Disable BB clock gating
+ * Necessary to avoid issues on AR5416 2.0
+ */
+ REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
+
+ /*
+ * Disable RIFS search on some chips to avoid baseband
+ * hang issues.
+ */
+ if (AR_SREV_9100(ah) || AR_SREV_9160(ah)) {
+ val = REG_READ(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS);
+ val &= ~AR_PHY_RIFS_INIT_DELAY;
+ REG_WRITE(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS, val);
+ }
+}
+
+static void ar5008_hw_set_channel_regs(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 phymode;
+ u32 enableDacFifo = 0;
+
+ if (AR_SREV_9285_10_OR_LATER(ah))
+ enableDacFifo = (REG_READ(ah, AR_PHY_TURBO) &
+ AR_PHY_FC_ENABLE_DAC_FIFO);
+
+ phymode = AR_PHY_FC_HT_EN | AR_PHY_FC_SHORT_GI_40
+ | AR_PHY_FC_SINGLE_HT_LTF1 | AR_PHY_FC_WALSH | enableDacFifo;
+
+ if (IS_CHAN_HT40(chan)) {
+ phymode |= AR_PHY_FC_DYN2040_EN;
+
+ if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
+ (chan->chanmode == CHANNEL_G_HT40PLUS))
+ phymode |= AR_PHY_FC_DYN2040_PRI_CH;
+
+ }
+ REG_WRITE(ah, AR_PHY_TURBO, phymode);
+
+ ath9k_hw_set11nmac2040(ah);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
+ REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+}
+
+
+static int ar5008_hw_process_ini(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
+ int i, regWrites = 0;
+ struct ieee80211_channel *channel = chan->chan;
+ u32 modesIndex, freqIndex;
+
+ switch (chan->chanmode) {
+ case CHANNEL_A:
+ case CHANNEL_A_HT20:
+ modesIndex = 1;
+ freqIndex = 1;
+ break;
+ case CHANNEL_A_HT40PLUS:
+ case CHANNEL_A_HT40MINUS:
+ modesIndex = 2;
+ freqIndex = 1;
+ break;
+ case CHANNEL_G:
+ case CHANNEL_G_HT20:
+ case CHANNEL_B:
+ modesIndex = 4;
+ freqIndex = 2;
+ break;
+ case CHANNEL_G_HT40PLUS:
+ case CHANNEL_G_HT40MINUS:
+ modesIndex = 3;
+ freqIndex = 2;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (AR_SREV_9287_12_OR_LATER(ah)) {
+ /* Enable ASYNC FIFO */
+ REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
+ AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL);
+ REG_SET_BIT(ah, AR_PHY_MODE, AR_PHY_MODE_ASYNCFIFO);
+ REG_CLR_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
+ AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
+ REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
+ AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
+ }
+
+ /*
+ * Set correct baseband to analog shift setting to
+ * access analog chips.
+ */
+ REG_WRITE(ah, AR_PHY(0), 0x00000007);
+
+ /* Write ADDAC shifts */
+ REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO);
+ ah->eep_ops->set_addac(ah, chan);
+
+ if (AR_SREV_5416_22_OR_LATER(ah)) {
+ REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites);
+ } else {
+ struct ar5416IniArray temp;
+ u32 addacSize =
+ sizeof(u32) * ah->iniAddac.ia_rows *
+ ah->iniAddac.ia_columns;
+
+ /* For AR5416 2.0/2.1 */
+ memcpy(ah->addac5416_21,
+ ah->iniAddac.ia_array, addacSize);
+
+ /* override CLKDRV value at [row, column] = [31, 1] */
+ (ah->addac5416_21)[31 * ah->iniAddac.ia_columns + 1] = 0;
+
+ temp.ia_array = ah->addac5416_21;
+ temp.ia_columns = ah->iniAddac.ia_columns;
+ temp.ia_rows = ah->iniAddac.ia_rows;
+ REG_WRITE_ARRAY(&temp, 1, regWrites);
+ }
+
+ REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ for (i = 0; i < ah->iniModes.ia_rows; i++) {
+ u32 reg = INI_RA(&ah->iniModes, i, 0);
+ u32 val = INI_RA(&ah->iniModes, i, modesIndex);
+
+ if (reg == AR_AN_TOP2 && ah->need_an_top2_fixup)
+ val &= ~AR_AN_TOP2_PWDCLKIND;
+
+ REG_WRITE(ah, reg, val);
+
+ if (reg >= 0x7800 && reg < 0x78a0
+ && ah->config.analog_shiftreg) {
+ udelay(100);
+ }
+
+ DO_DELAY(regWrites);
+ }
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ if (AR_SREV_9280(ah) || AR_SREV_9287_10_OR_LATER(ah))
+ REG_WRITE_ARRAY(&ah->iniModesRxGain, modesIndex, regWrites);
+
+ if (AR_SREV_9280(ah) || AR_SREV_9285_12_OR_LATER(ah) ||
+ AR_SREV_9287_10_OR_LATER(ah))
+ REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
+
+ if (AR_SREV_9271_10(ah))
+ REG_WRITE_ARRAY(&ah->iniModes_9271_1_0_only,
+ modesIndex, regWrites);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ /* Write common array parameters */
+ for (i = 0; i < ah->iniCommon.ia_rows; i++) {
+ u32 reg = INI_RA(&ah->iniCommon, i, 0);
+ u32 val = INI_RA(&ah->iniCommon, i, 1);
+
+ REG_WRITE(ah, reg, val);
+
+ if (reg >= 0x7800 && reg < 0x78a0
+ && ah->config.analog_shiftreg) {
+ udelay(100);
+ }
+
+ DO_DELAY(regWrites);
+ }
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ if (AR_SREV_9271(ah)) {
+ if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) == 1)
+ REG_WRITE_ARRAY(&ah->iniModes_high_power_tx_gain_9271,
+ modesIndex, regWrites);
+ else
+ REG_WRITE_ARRAY(&ah->iniModes_normal_power_tx_gain_9271,
+ modesIndex, regWrites);
+ }
+
+ REG_WRITE_ARRAY(&ah->iniBB_RfGain, freqIndex, regWrites);
+
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan)) {
+ REG_WRITE_ARRAY(&ah->iniModesAdditional, modesIndex,
+ regWrites);
+ }
+
+ ar5008_hw_override_ini(ah, chan);
+ ar5008_hw_set_channel_regs(ah, chan);
+ ar5008_hw_init_chain_masks(ah);
+ ath9k_olc_init(ah);
+
+ /* Set TX power */
+ ah->eep_ops->set_txpower(ah, chan,
+ ath9k_regd_get_ctl(regulatory, chan),
+ channel->max_antenna_gain * 2,
+ channel->max_power * 2,
+ min((u32) MAX_RATE_POWER,
+ (u32) regulatory->power_limit));
+
+ /* Write analog registers */
+ if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
+ ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
+ "ar5416SetRfRegs failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void ar5008_hw_set_rfmode(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ u32 rfMode = 0;
+
+ if (chan == NULL)
+ return;
+
+ rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
+ ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
+
+ if (!AR_SREV_9280_10_OR_LATER(ah))
+ rfMode |= (IS_CHAN_5GHZ(chan)) ?
+ AR_PHY_MODE_RF5GHZ : AR_PHY_MODE_RF2GHZ;
+
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
+
+ REG_WRITE(ah, AR_PHY_MODE, rfMode);
+}
+
+static void ar5008_hw_mark_phy_inactive(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
+}
+
+static void ar5008_hw_set_delta_slope(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 coef_scaled, ds_coef_exp, ds_coef_man;
+ u32 clockMhzScaled = 0x64000000;
+ struct chan_centers centers;
+
+ if (IS_CHAN_HALF_RATE(chan))
+ clockMhzScaled = clockMhzScaled >> 1;
+ else if (IS_CHAN_QUARTER_RATE(chan))
+ clockMhzScaled = clockMhzScaled >> 2;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ coef_scaled = clockMhzScaled / centers.synth_center;
+
+ ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
+ &ds_coef_exp);
+
+ REG_RMW_FIELD(ah, AR_PHY_TIMING3,
+ AR_PHY_TIMING3_DSC_MAN, ds_coef_man);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING3,
+ AR_PHY_TIMING3_DSC_EXP, ds_coef_exp);
+
+ coef_scaled = (9 * coef_scaled) / 10;
+
+ ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
+ &ds_coef_exp);
+
+ REG_RMW_FIELD(ah, AR_PHY_HALFGI,
+ AR_PHY_HALFGI_DSC_MAN, ds_coef_man);
+ REG_RMW_FIELD(ah, AR_PHY_HALFGI,
+ AR_PHY_HALFGI_DSC_EXP, ds_coef_exp);
+}
+
+static bool ar5008_hw_rfbus_req(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
+ return ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
+ AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT);
+}
+
+static void ar5008_hw_rfbus_done(struct ath_hw *ah)
+{
+ u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
+ if (IS_CHAN_B(ah->curchan))
+ synthDelay = (4 * synthDelay) / 22;
+ else
+ synthDelay /= 10;
+
+ udelay(synthDelay + BASE_ACTIVATE_DELAY);
+
+ REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
+}
+
+static void ar5008_hw_enable_rfkill(struct ath_hw *ah)
+{
+ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
+ AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
+
+ REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
+ AR_GPIO_INPUT_MUX2_RFSILENT);
+
+ ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
+ REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
+}
+
+static void ar5008_restore_chainmask(struct ath_hw *ah)
+{
+ int rx_chainmask = ah->rxchainmask;
+
+ if ((rx_chainmask == 0x5) || (rx_chainmask == 0x3)) {
+ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
+ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
+ }
+}
+
+static void ar5008_set_diversity(struct ath_hw *ah, bool value)
+{
+ u32 v = REG_READ(ah, AR_PHY_CCK_DETECT);
+ if (value)
+ v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
+ else
+ v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
+ REG_WRITE(ah, AR_PHY_CCK_DETECT, v);
+}
+
+static u32 ar9100_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ if (chan && IS_CHAN_5GHZ(chan))
+ return 0x1450;
+ return 0x1458;
+}
+
+static u32 ar9160_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 pll;
+
+ pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
+
+ if (chan && IS_CHAN_HALF_RATE(chan))
+ pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
+ else if (chan && IS_CHAN_QUARTER_RATE(chan))
+ pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
+
+ if (chan && IS_CHAN_5GHZ(chan))
+ pll |= SM(0x50, AR_RTC_9160_PLL_DIV);
+ else
+ pll |= SM(0x58, AR_RTC_9160_PLL_DIV);
+
+ return pll;
+}
+
+static u32 ar5008_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 pll;
+
+ pll = AR_RTC_PLL_REFDIV_5 | AR_RTC_PLL_DIV2;
+
+ if (chan && IS_CHAN_HALF_RATE(chan))
+ pll |= SM(0x1, AR_RTC_PLL_CLKSEL);
+ else if (chan && IS_CHAN_QUARTER_RATE(chan))
+ pll |= SM(0x2, AR_RTC_PLL_CLKSEL);
+
+ if (chan && IS_CHAN_5GHZ(chan))
+ pll |= SM(0xa, AR_RTC_PLL_DIV);
+ else
+ pll |= SM(0xb, AR_RTC_PLL_DIV);
+
+ return pll;
+}
+
+static bool ar5008_hw_ani_control(struct ath_hw *ah,
+ enum ath9k_ani_cmd cmd, int param)
+{
+ struct ar5416AniState *aniState = ah->curani;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ switch (cmd & ah->ani_function) {
+ case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
+ ath_print(common, ATH_DBG_ANI,
+ "level out of range (%u > %u)\n",
+ level,
+ (unsigned)ARRAY_SIZE(ah->totalSizeDesired));
+ return false;
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
+ AR_PHY_DESIRED_SZ_TOT_DES,
+ ah->totalSizeDesired[level]);
+ REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
+ AR_PHY_AGC_CTL1_COARSE_LOW,
+ ah->coarse_low[level]);
+ REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
+ AR_PHY_AGC_CTL1_COARSE_HIGH,
+ ah->coarse_high[level]);
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
+ AR_PHY_FIND_SIG_FIRPWR,
+ ah->firpwr[level]);
+
+ if (level > aniState->noiseImmunityLevel)
+ ah->stats.ast_ani_niup++;
+ else if (level < aniState->noiseImmunityLevel)
+ ah->stats.ast_ani_nidown++;
+ aniState->noiseImmunityLevel = level;
+ break;
+ }
+ case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
+ const int m1ThreshLow[] = { 127, 50 };
+ const int m2ThreshLow[] = { 127, 40 };
+ const int m1Thresh[] = { 127, 0x4d };
+ const int m2Thresh[] = { 127, 0x40 };
+ const int m2CountThr[] = { 31, 16 };
+ const int m2CountThrLow[] = { 63, 48 };
+ u32 on = param ? 1 : 0;
+
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
+ m1ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
+ m2ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M1_THRESH,
+ m1Thresh[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M2_THRESH,
+ m2Thresh[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M2COUNT_THR,
+ m2CountThr[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
+ m2CountThrLow[on]);
+
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
+ m1ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
+ m2ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M1_THRESH,
+ m1Thresh[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M2_THRESH,
+ m2Thresh[on]);
+
+ if (on)
+ REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
+ else
+ REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
+
+ if (!on != aniState->ofdmWeakSigDetectOff) {
+ if (on)
+ ah->stats.ast_ani_ofdmon++;
+ else
+ ah->stats.ast_ani_ofdmoff++;
+ aniState->ofdmWeakSigDetectOff = !on;
+ }
+ break;
+ }
+ case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
+ const int weakSigThrCck[] = { 8, 6 };
+ u32 high = param ? 1 : 0;
+
+ REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
+ AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
+ weakSigThrCck[high]);
+ if (high != aniState->cckWeakSigThreshold) {
+ if (high)
+ ah->stats.ast_ani_cckhigh++;
+ else
+ ah->stats.ast_ani_ccklow++;
+ aniState->cckWeakSigThreshold = high;
+ }
+ break;
+ }
+ case ATH9K_ANI_FIRSTEP_LEVEL:{
+ const int firstep[] = { 0, 4, 8 };
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(firstep)) {
+ ath_print(common, ATH_DBG_ANI,
+ "level out of range (%u > %u)\n",
+ level,
+ (unsigned) ARRAY_SIZE(firstep));
+ return false;
+ }
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
+ AR_PHY_FIND_SIG_FIRSTEP,
+ firstep[level]);
+ if (level > aniState->firstepLevel)
+ ah->stats.ast_ani_stepup++;
+ else if (level < aniState->firstepLevel)
+ ah->stats.ast_ani_stepdown++;
+ aniState->firstepLevel = level;
+ break;
+ }
+ case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
+ const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(cycpwrThr1)) {
+ ath_print(common, ATH_DBG_ANI,
+ "level out of range (%u > %u)\n",
+ level,
+ (unsigned) ARRAY_SIZE(cycpwrThr1));
+ return false;
+ }
+ REG_RMW_FIELD(ah, AR_PHY_TIMING5,
+ AR_PHY_TIMING5_CYCPWR_THR1,
+ cycpwrThr1[level]);
+ if (level > aniState->spurImmunityLevel)
+ ah->stats.ast_ani_spurup++;
+ else if (level < aniState->spurImmunityLevel)
+ ah->stats.ast_ani_spurdown++;
+ aniState->spurImmunityLevel = level;
+ break;
+ }
+ case ATH9K_ANI_PRESENT:
+ break;
+ default:
+ ath_print(common, ATH_DBG_ANI,
+ "invalid cmd %u\n", cmd);
+ return false;
+ }
+
+ ath_print(common, ATH_DBG_ANI, "ANI parameters:\n");
+ ath_print(common, ATH_DBG_ANI,
+ "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
+ "ofdmWeakSigDetectOff=%d\n",
+ aniState->noiseImmunityLevel,
+ aniState->spurImmunityLevel,
+ !aniState->ofdmWeakSigDetectOff);
+ ath_print(common, ATH_DBG_ANI,
+ "cckWeakSigThreshold=%d, "
+ "firstepLevel=%d, listenTime=%d\n",
+ aniState->cckWeakSigThreshold,
+ aniState->firstepLevel,
+ aniState->listenTime);
+ ath_print(common, ATH_DBG_ANI,
+ "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
+ aniState->cycleCount,
+ aniState->ofdmPhyErrCount,
+ aniState->cckPhyErrCount);
+
+ return true;
+}
+
+static void ar5008_hw_do_getnf(struct ath_hw *ah,
+ int16_t nfarray[NUM_NF_READINGS])
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int16_t nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 0] is %d\n", nf);
+ nfarray[0] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR_PHY_CH1_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 1] is %d\n", nf);
+ nfarray[1] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CH2_CCA), AR_PHY_CH2_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 2] is %d\n", nf);
+ nfarray[2] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 0] is %d\n", nf);
+ nfarray[3] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR_PHY_CH1_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 1] is %d\n", nf);
+ nfarray[4] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA), AR_PHY_CH2_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 2] is %d\n", nf);
+ nfarray[5] = nf;
+}
+
+static void ar5008_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath9k_nfcal_hist *h;
+ int i, j;
+ int32_t val;
+ const u32 ar5416_cca_regs[6] = {
+ AR_PHY_CCA,
+ AR_PHY_CH1_CCA,
+ AR_PHY_CH2_CCA,
+ AR_PHY_EXT_CCA,
+ AR_PHY_CH1_EXT_CCA,
+ AR_PHY_CH2_EXT_CCA
+ };
+ u8 chainmask, rx_chain_status;
+
+ rx_chain_status = REG_READ(ah, AR_PHY_RX_CHAINMASK);
+ if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
+ chainmask = 0x9;
+ else if (AR_SREV_9280(ah) || AR_SREV_9287(ah)) {
+ if ((rx_chain_status & 0x2) || (rx_chain_status & 0x4))
+ chainmask = 0x1B;
+ else
+ chainmask = 0x09;
+ } else {
+ if (rx_chain_status & 0x4)
+ chainmask = 0x3F;
+ else if (rx_chain_status & 0x2)
+ chainmask = 0x1B;
+ else
+ chainmask = 0x09;
+ }
+
+ h = ah->nfCalHist;
+
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ if (chainmask & (1 << i)) {
+ val = REG_READ(ah, ar5416_cca_regs[i]);
+ val &= 0xFFFFFE00;
+ val |= (((u32) (h[i].privNF) << 1) & 0x1ff);
+ REG_WRITE(ah, ar5416_cca_regs[i], val);
+ }
+ }
+
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_ENABLE_NF);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
+
+ for (j = 0; j < 5; j++) {
+ if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
+ AR_PHY_AGC_CONTROL_NF) == 0)
+ break;
+ udelay(50);
+ }
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ if (chainmask & (1 << i)) {
+ val = REG_READ(ah, ar5416_cca_regs[i]);
+ val &= 0xFFFFFE00;
+ val |= (((u32) (-50) << 1) & 0x1ff);
+ REG_WRITE(ah, ar5416_cca_regs[i], val);
+ }
+ }
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+}
+
+void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+
+ priv_ops->rf_set_freq = ar5008_hw_set_channel;
+ priv_ops->spur_mitigate_freq = ar5008_hw_spur_mitigate;
+
+ priv_ops->rf_alloc_ext_banks = ar5008_hw_rf_alloc_ext_banks;
+ priv_ops->rf_free_ext_banks = ar5008_hw_rf_free_ext_banks;
+ priv_ops->set_rf_regs = ar5008_hw_set_rf_regs;
+ priv_ops->set_channel_regs = ar5008_hw_set_channel_regs;
+ priv_ops->init_bb = ar5008_hw_init_bb;
+ priv_ops->process_ini = ar5008_hw_process_ini;
+ priv_ops->set_rfmode = ar5008_hw_set_rfmode;
+ priv_ops->mark_phy_inactive = ar5008_hw_mark_phy_inactive;
+ priv_ops->set_delta_slope = ar5008_hw_set_delta_slope;
+ priv_ops->rfbus_req = ar5008_hw_rfbus_req;
+ priv_ops->rfbus_done = ar5008_hw_rfbus_done;
+ priv_ops->enable_rfkill = ar5008_hw_enable_rfkill;
+ priv_ops->restore_chainmask = ar5008_restore_chainmask;
+ priv_ops->set_diversity = ar5008_set_diversity;
+ priv_ops->ani_control = ar5008_hw_ani_control;
+ priv_ops->do_getnf = ar5008_hw_do_getnf;
+ priv_ops->loadnf = ar5008_hw_loadnf;
+
+ if (AR_SREV_9100(ah))
+ priv_ops->compute_pll_control = ar9100_hw_compute_pll_control;
+ else if (AR_SREV_9160_10_OR_LATER(ah))
+ priv_ops->compute_pll_control = ar9160_hw_compute_pll_control;
+ else
+ priv_ops->compute_pll_control = ar5008_hw_compute_pll_control;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
new file mode 100644
index 00000000000..0b94bd385b0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
@@ -0,0 +1,1254 @@
+
+static const u32 ar5416Common_9100[][2] = {
+ { 0x0000000c, 0x00000000 },
+ { 0x00000030, 0x00020015 },
+ { 0x00000034, 0x00000005 },
+ { 0x00000040, 0x00000000 },
+ { 0x00000044, 0x00000008 },
+ { 0x00000048, 0x00000008 },
+ { 0x0000004c, 0x00000010 },
+ { 0x00000050, 0x00000000 },
+ { 0x00000054, 0x0000001f },
+ { 0x00000800, 0x00000000 },
+ { 0x00000804, 0x00000000 },
+ { 0x00000808, 0x00000000 },
+ { 0x0000080c, 0x00000000 },
+ { 0x00000810, 0x00000000 },
+ { 0x00000814, 0x00000000 },
+ { 0x00000818, 0x00000000 },
+ { 0x0000081c, 0x00000000 },
+ { 0x00000820, 0x00000000 },
+ { 0x00000824, 0x00000000 },
+ { 0x00001040, 0x002ffc0f },
+ { 0x00001044, 0x002ffc0f },
+ { 0x00001048, 0x002ffc0f },
+ { 0x0000104c, 0x002ffc0f },
+ { 0x00001050, 0x002ffc0f },
+ { 0x00001054, 0x002ffc0f },
+ { 0x00001058, 0x002ffc0f },
+ { 0x0000105c, 0x002ffc0f },
+ { 0x00001060, 0x002ffc0f },
+ { 0x00001064, 0x002ffc0f },
+ { 0x00001230, 0x00000000 },
+ { 0x00001270, 0x00000000 },
+ { 0x00001038, 0x00000000 },
+ { 0x00001078, 0x00000000 },
+ { 0x000010b8, 0x00000000 },
+ { 0x000010f8, 0x00000000 },
+ { 0x00001138, 0x00000000 },
+ { 0x00001178, 0x00000000 },
+ { 0x000011b8, 0x00000000 },
+ { 0x000011f8, 0x00000000 },
+ { 0x00001238, 0x00000000 },
+ { 0x00001278, 0x00000000 },
+ { 0x000012b8, 0x00000000 },
+ { 0x000012f8, 0x00000000 },
+ { 0x00001338, 0x00000000 },
+ { 0x00001378, 0x00000000 },
+ { 0x000013b8, 0x00000000 },
+ { 0x000013f8, 0x00000000 },
+ { 0x00001438, 0x00000000 },
+ { 0x00001478, 0x00000000 },
+ { 0x000014b8, 0x00000000 },
+ { 0x000014f8, 0x00000000 },
+ { 0x00001538, 0x00000000 },
+ { 0x00001578, 0x00000000 },
+ { 0x000015b8, 0x00000000 },
+ { 0x000015f8, 0x00000000 },
+ { 0x00001638, 0x00000000 },
+ { 0x00001678, 0x00000000 },
+ { 0x000016b8, 0x00000000 },
+ { 0x000016f8, 0x00000000 },
+ { 0x00001738, 0x00000000 },
+ { 0x00001778, 0x00000000 },
+ { 0x000017b8, 0x00000000 },
+ { 0x000017f8, 0x00000000 },
+ { 0x0000103c, 0x00000000 },
+ { 0x0000107c, 0x00000000 },
+ { 0x000010bc, 0x00000000 },
+ { 0x000010fc, 0x00000000 },
+ { 0x0000113c, 0x00000000 },
+ { 0x0000117c, 0x00000000 },
+ { 0x000011bc, 0x00000000 },
+ { 0x000011fc, 0x00000000 },
+ { 0x0000123c, 0x00000000 },
+ { 0x0000127c, 0x00000000 },
+ { 0x000012bc, 0x00000000 },
+ { 0x000012fc, 0x00000000 },
+ { 0x0000133c, 0x00000000 },
+ { 0x0000137c, 0x00000000 },
+ { 0x000013bc, 0x00000000 },
+ { 0x000013fc, 0x00000000 },
+ { 0x0000143c, 0x00000000 },
+ { 0x0000147c, 0x00000000 },
+ { 0x00020010, 0x00000003 },
+ { 0x00020038, 0x000004c2 },
+ { 0x00008004, 0x00000000 },
+ { 0x00008008, 0x00000000 },
+ { 0x0000800c, 0x00000000 },
+ { 0x00008018, 0x00000700 },
+ { 0x00008020, 0x00000000 },
+ { 0x00008038, 0x00000000 },
+ { 0x0000803c, 0x00000000 },
+ { 0x00008048, 0x40000000 },
+ { 0x00008054, 0x00004000 },
+ { 0x00008058, 0x00000000 },
+ { 0x0000805c, 0x000fc78f },
+ { 0x00008060, 0x0000000f },
+ { 0x00008064, 0x00000000 },
+ { 0x000080c0, 0x2a82301a },
+ { 0x000080c4, 0x05dc01e0 },
+ { 0x000080c8, 0x1f402710 },
+ { 0x000080cc, 0x01f40000 },
+ { 0x000080d0, 0x00001e00 },
+ { 0x000080d4, 0x00000000 },
+ { 0x000080d8, 0x00400000 },
+ { 0x000080e0, 0xffffffff },
+ { 0x000080e4, 0x0000ffff },
+ { 0x000080e8, 0x003f3f3f },
+ { 0x000080ec, 0x00000000 },
+ { 0x000080f0, 0x00000000 },
+ { 0x000080f4, 0x00000000 },
+ { 0x000080f8, 0x00000000 },
+ { 0x000080fc, 0x00020000 },
+ { 0x00008100, 0x00020000 },
+ { 0x00008104, 0x00000001 },
+ { 0x00008108, 0x00000052 },
+ { 0x0000810c, 0x00000000 },
+ { 0x00008110, 0x00000168 },
+ { 0x00008118, 0x000100aa },
+ { 0x0000811c, 0x00003210 },
+ { 0x00008120, 0x08f04800 },
+ { 0x00008124, 0x00000000 },
+ { 0x00008128, 0x00000000 },
+ { 0x0000812c, 0x00000000 },
+ { 0x00008130, 0x00000000 },
+ { 0x00008134, 0x00000000 },
+ { 0x00008138, 0x00000000 },
+ { 0x0000813c, 0x00000000 },
+ { 0x00008144, 0x00000000 },
+ { 0x00008168, 0x00000000 },
+ { 0x0000816c, 0x00000000 },
+ { 0x00008170, 0x32143320 },
+ { 0x00008174, 0xfaa4fa50 },
+ { 0x00008178, 0x00000100 },
+ { 0x0000817c, 0x00000000 },
+ { 0x000081c4, 0x00000000 },
+ { 0x000081d0, 0x00003210 },
+ { 0x000081ec, 0x00000000 },
+ { 0x000081f0, 0x00000000 },
+ { 0x000081f4, 0x00000000 },
+ { 0x000081f8, 0x00000000 },
+ { 0x000081fc, 0x00000000 },
+ { 0x00008200, 0x00000000 },
+ { 0x00008204, 0x00000000 },
+ { 0x00008208, 0x00000000 },
+ { 0x0000820c, 0x00000000 },
+ { 0x00008210, 0x00000000 },
+ { 0x00008214, 0x00000000 },
+ { 0x00008218, 0x00000000 },
+ { 0x0000821c, 0x00000000 },
+ { 0x00008220, 0x00000000 },
+ { 0x00008224, 0x00000000 },
+ { 0x00008228, 0x00000000 },
+ { 0x0000822c, 0x00000000 },
+ { 0x00008230, 0x00000000 },
+ { 0x00008234, 0x00000000 },
+ { 0x00008238, 0x00000000 },
+ { 0x0000823c, 0x00000000 },
+ { 0x00008240, 0x00100000 },
+ { 0x00008244, 0x0010f400 },
+ { 0x00008248, 0x00000100 },
+ { 0x0000824c, 0x0001e800 },
+ { 0x00008250, 0x00000000 },
+ { 0x00008254, 0x00000000 },
+ { 0x00008258, 0x00000000 },
+ { 0x0000825c, 0x400000ff },
+ { 0x00008260, 0x00080922 },
+ { 0x00008270, 0x00000000 },
+ { 0x00008274, 0x40000000 },
+ { 0x00008278, 0x003e4180 },
+ { 0x0000827c, 0x00000000 },
+ { 0x00008284, 0x0000002c },
+ { 0x00008288, 0x0000002c },
+ { 0x0000828c, 0x00000000 },
+ { 0x00008294, 0x00000000 },
+ { 0x00008298, 0x00000000 },
+ { 0x00008300, 0x00000000 },
+ { 0x00008304, 0x00000000 },
+ { 0x00008308, 0x00000000 },
+ { 0x0000830c, 0x00000000 },
+ { 0x00008310, 0x00000000 },
+ { 0x00008314, 0x00000000 },
+ { 0x00008318, 0x00000000 },
+ { 0x00008328, 0x00000000 },
+ { 0x0000832c, 0x00000007 },
+ { 0x00008330, 0x00000302 },
+ { 0x00008334, 0x00000e00 },
+ { 0x00008338, 0x00000000 },
+ { 0x0000833c, 0x00000000 },
+ { 0x00008340, 0x000107ff },
+ { 0x00009808, 0x00000000 },
+ { 0x0000980c, 0xad848e19 },
+ { 0x00009810, 0x7d14e000 },
+ { 0x00009814, 0x9c0a9f6b },
+ { 0x0000981c, 0x00000000 },
+ { 0x0000982c, 0x0000a000 },
+ { 0x00009830, 0x00000000 },
+ { 0x0000983c, 0x00200400 },
+ { 0x00009840, 0x206a01ae },
+ { 0x0000984c, 0x1284233c },
+ { 0x00009854, 0x00000859 },
+ { 0x00009900, 0x00000000 },
+ { 0x00009904, 0x00000000 },
+ { 0x00009908, 0x00000000 },
+ { 0x0000990c, 0x00000000 },
+ { 0x0000991c, 0x10000fff },
+ { 0x00009920, 0x05100000 },
+ { 0x0000a920, 0x05100000 },
+ { 0x0000b920, 0x05100000 },
+ { 0x00009928, 0x00000001 },
+ { 0x0000992c, 0x00000004 },
+ { 0x00009934, 0x1e1f2022 },
+ { 0x00009938, 0x0a0b0c0d },
+ { 0x0000993c, 0x00000000 },
+ { 0x00009948, 0x9280b212 },
+ { 0x0000994c, 0x00020028 },
+ { 0x0000c95c, 0x004b6a8e },
+ { 0x0000c968, 0x000003ce },
+ { 0x00009970, 0x190fb515 },
+ { 0x00009974, 0x00000000 },
+ { 0x00009978, 0x00000001 },
+ { 0x0000997c, 0x00000000 },
+ { 0x00009980, 0x00000000 },
+ { 0x00009984, 0x00000000 },
+ { 0x00009988, 0x00000000 },
+ { 0x0000998c, 0x00000000 },
+ { 0x00009990, 0x00000000 },
+ { 0x00009994, 0x00000000 },
+ { 0x00009998, 0x00000000 },
+ { 0x0000999c, 0x00000000 },
+ { 0x000099a0, 0x00000000 },
+ { 0x000099a4, 0x00000001 },
+ { 0x000099a8, 0x201fff00 },
+ { 0x000099ac, 0x006f0000 },
+ { 0x000099b0, 0x03051000 },
+ { 0x000099dc, 0x00000000 },
+ { 0x000099e0, 0x00000200 },
+ { 0x000099e4, 0xaaaaaaaa },
+ { 0x000099e8, 0x3c466478 },
+ { 0x000099ec, 0x0cc80caa },
+ { 0x000099fc, 0x00001042 },
+ { 0x00009b00, 0x00000000 },
+ { 0x00009b04, 0x00000001 },
+ { 0x00009b08, 0x00000002 },
+ { 0x00009b0c, 0x00000003 },
+ { 0x00009b10, 0x00000004 },
+ { 0x00009b14, 0x00000005 },
+ { 0x00009b18, 0x00000008 },
+ { 0x00009b1c, 0x00000009 },
+ { 0x00009b20, 0x0000000a },
+ { 0x00009b24, 0x0000000b },
+ { 0x00009b28, 0x0000000c },
+ { 0x00009b2c, 0x0000000d },
+ { 0x00009b30, 0x00000010 },
+ { 0x00009b34, 0x00000011 },
+ { 0x00009b38, 0x00000012 },
+ { 0x00009b3c, 0x00000013 },
+ { 0x00009b40, 0x00000014 },
+ { 0x00009b44, 0x00000015 },
+ { 0x00009b48, 0x00000018 },
+ { 0x00009b4c, 0x00000019 },
+ { 0x00009b50, 0x0000001a },
+ { 0x00009b54, 0x0000001b },
+ { 0x00009b58, 0x0000001c },
+ { 0x00009b5c, 0x0000001d },
+ { 0x00009b60, 0x00000020 },
+ { 0x00009b64, 0x00000021 },
+ { 0x00009b68, 0x00000022 },
+ { 0x00009b6c, 0x00000023 },
+ { 0x00009b70, 0x00000024 },
+ { 0x00009b74, 0x00000025 },
+ { 0x00009b78, 0x00000028 },
+ { 0x00009b7c, 0x00000029 },
+ { 0x00009b80, 0x0000002a },
+ { 0x00009b84, 0x0000002b },
+ { 0x00009b88, 0x0000002c },
+ { 0x00009b8c, 0x0000002d },
+ { 0x00009b90, 0x00000030 },
+ { 0x00009b94, 0x00000031 },
+ { 0x00009b98, 0x00000032 },
+ { 0x00009b9c, 0x00000033 },
+ { 0x00009ba0, 0x00000034 },
+ { 0x00009ba4, 0x00000035 },
+ { 0x00009ba8, 0x00000035 },
+ { 0x00009bac, 0x00000035 },
+ { 0x00009bb0, 0x00000035 },
+ { 0x00009bb4, 0x00000035 },
+ { 0x00009bb8, 0x00000035 },
+ { 0x00009bbc, 0x00000035 },
+ { 0x00009bc0, 0x00000035 },
+ { 0x00009bc4, 0x00000035 },
+ { 0x00009bc8, 0x00000035 },
+ { 0x00009bcc, 0x00000035 },
+ { 0x00009bd0, 0x00000035 },
+ { 0x00009bd4, 0x00000035 },
+ { 0x00009bd8, 0x00000035 },
+ { 0x00009bdc, 0x00000035 },
+ { 0x00009be0, 0x00000035 },
+ { 0x00009be4, 0x00000035 },
+ { 0x00009be8, 0x00000035 },
+ { 0x00009bec, 0x00000035 },
+ { 0x00009bf0, 0x00000035 },
+ { 0x00009bf4, 0x00000035 },
+ { 0x00009bf8, 0x00000010 },
+ { 0x00009bfc, 0x0000001a },
+ { 0x0000a210, 0x40806333 },
+ { 0x0000a214, 0x00106c10 },
+ { 0x0000a218, 0x009c4060 },
+ { 0x0000a220, 0x018830c6 },
+ { 0x0000a224, 0x00000400 },
+ { 0x0000a228, 0x001a0bb5 },
+ { 0x0000a22c, 0x00000000 },
+ { 0x0000a234, 0x20202020 },
+ { 0x0000a238, 0x20202020 },
+ { 0x0000a23c, 0x13c889ae },
+ { 0x0000a240, 0x38490a20 },
+ { 0x0000a244, 0x00007bb6 },
+ { 0x0000a248, 0x0fff3ffc },
+ { 0x0000a24c, 0x00000001 },
+ { 0x0000a250, 0x0000a000 },
+ { 0x0000a254, 0x00000000 },
+ { 0x0000a258, 0x0cc75380 },
+ { 0x0000a25c, 0x0f0f0f01 },
+ { 0x0000a260, 0xdfa91f01 },
+ { 0x0000a268, 0x00000001 },
+ { 0x0000a26c, 0x0ebae9c6 },
+ { 0x0000b26c, 0x0ebae9c6 },
+ { 0x0000c26c, 0x0ebae9c6 },
+ { 0x0000d270, 0x00820820 },
+ { 0x0000a278, 0x1ce739ce },
+ { 0x0000a27c, 0x050701ce },
+ { 0x0000a338, 0x00000000 },
+ { 0x0000a33c, 0x00000000 },
+ { 0x0000a340, 0x00000000 },
+ { 0x0000a344, 0x00000000 },
+ { 0x0000a348, 0x3fffffff },
+ { 0x0000a34c, 0x3fffffff },
+ { 0x0000a350, 0x3fffffff },
+ { 0x0000a354, 0x0003ffff },
+ { 0x0000a358, 0x79a8aa33 },
+ { 0x0000d35c, 0x07ffffef },
+ { 0x0000d360, 0x0fffffe7 },
+ { 0x0000d364, 0x17ffffe5 },
+ { 0x0000d368, 0x1fffffe4 },
+ { 0x0000d36c, 0x37ffffe3 },
+ { 0x0000d370, 0x3fffffe3 },
+ { 0x0000d374, 0x57ffffe3 },
+ { 0x0000d378, 0x5fffffe2 },
+ { 0x0000d37c, 0x7fffffe2 },
+ { 0x0000d380, 0x7f3c7bba },
+ { 0x0000d384, 0xf3307ff0 },
+ { 0x0000a388, 0x0c000000 },
+ { 0x0000a38c, 0x20202020 },
+ { 0x0000a390, 0x20202020 },
+ { 0x0000a394, 0x1ce739ce },
+ { 0x0000a398, 0x000001ce },
+ { 0x0000a39c, 0x00000001 },
+ { 0x0000a3a0, 0x00000000 },
+ { 0x0000a3a4, 0x00000000 },
+ { 0x0000a3a8, 0x00000000 },
+ { 0x0000a3ac, 0x00000000 },
+ { 0x0000a3b0, 0x00000000 },
+ { 0x0000a3b4, 0x00000000 },
+ { 0x0000a3b8, 0x00000000 },
+ { 0x0000a3bc, 0x00000000 },
+ { 0x0000a3c0, 0x00000000 },
+ { 0x0000a3c4, 0x00000000 },
+ { 0x0000a3c8, 0x00000246 },
+ { 0x0000a3cc, 0x20202020 },
+ { 0x0000a3d0, 0x20202020 },
+ { 0x0000a3d4, 0x20202020 },
+ { 0x0000a3dc, 0x1ce739ce },
+ { 0x0000a3e0, 0x000001ce },
+};
+
+static const u32 ar5416Bank0_9100[][2] = {
+ { 0x000098b0, 0x1e5795e5 },
+ { 0x000098e0, 0x02008020 },
+};
+
+static const u32 ar5416BB_RfGain_9100[][3] = {
+ { 0x00009a00, 0x00000000, 0x00000000 },
+ { 0x00009a04, 0x00000040, 0x00000040 },
+ { 0x00009a08, 0x00000080, 0x00000080 },
+ { 0x00009a0c, 0x000001a1, 0x00000141 },
+ { 0x00009a10, 0x000001e1, 0x00000181 },
+ { 0x00009a14, 0x00000021, 0x000001c1 },
+ { 0x00009a18, 0x00000061, 0x00000001 },
+ { 0x00009a1c, 0x00000168, 0x00000041 },
+ { 0x00009a20, 0x000001a8, 0x000001a8 },
+ { 0x00009a24, 0x000001e8, 0x000001e8 },
+ { 0x00009a28, 0x00000028, 0x00000028 },
+ { 0x00009a2c, 0x00000068, 0x00000068 },
+ { 0x00009a30, 0x00000189, 0x000000a8 },
+ { 0x00009a34, 0x000001c9, 0x00000169 },
+ { 0x00009a38, 0x00000009, 0x000001a9 },
+ { 0x00009a3c, 0x00000049, 0x000001e9 },
+ { 0x00009a40, 0x00000089, 0x00000029 },
+ { 0x00009a44, 0x00000170, 0x00000069 },
+ { 0x00009a48, 0x000001b0, 0x00000190 },
+ { 0x00009a4c, 0x000001f0, 0x000001d0 },
+ { 0x00009a50, 0x00000030, 0x00000010 },
+ { 0x00009a54, 0x00000070, 0x00000050 },
+ { 0x00009a58, 0x00000191, 0x00000090 },
+ { 0x00009a5c, 0x000001d1, 0x00000151 },
+ { 0x00009a60, 0x00000011, 0x00000191 },
+ { 0x00009a64, 0x00000051, 0x000001d1 },
+ { 0x00009a68, 0x00000091, 0x00000011 },
+ { 0x00009a6c, 0x000001b8, 0x00000051 },
+ { 0x00009a70, 0x000001f8, 0x00000198 },
+ { 0x00009a74, 0x00000038, 0x000001d8 },
+ { 0x00009a78, 0x00000078, 0x00000018 },
+ { 0x00009a7c, 0x00000199, 0x00000058 },
+ { 0x00009a80, 0x000001d9, 0x00000098 },
+ { 0x00009a84, 0x00000019, 0x00000159 },
+ { 0x00009a88, 0x00000059, 0x00000199 },
+ { 0x00009a8c, 0x00000099, 0x000001d9 },
+ { 0x00009a90, 0x000000d9, 0x00000019 },
+ { 0x00009a94, 0x000000f9, 0x00000059 },
+ { 0x00009a98, 0x000000f9, 0x00000099 },
+ { 0x00009a9c, 0x000000f9, 0x000000d9 },
+ { 0x00009aa0, 0x000000f9, 0x000000f9 },
+ { 0x00009aa4, 0x000000f9, 0x000000f9 },
+ { 0x00009aa8, 0x000000f9, 0x000000f9 },
+ { 0x00009aac, 0x000000f9, 0x000000f9 },
+ { 0x00009ab0, 0x000000f9, 0x000000f9 },
+ { 0x00009ab4, 0x000000f9, 0x000000f9 },
+ { 0x00009ab8, 0x000000f9, 0x000000f9 },
+ { 0x00009abc, 0x000000f9, 0x000000f9 },
+ { 0x00009ac0, 0x000000f9, 0x000000f9 },
+ { 0x00009ac4, 0x000000f9, 0x000000f9 },
+ { 0x00009ac8, 0x000000f9, 0x000000f9 },
+ { 0x00009acc, 0x000000f9, 0x000000f9 },
+ { 0x00009ad0, 0x000000f9, 0x000000f9 },
+ { 0x00009ad4, 0x000000f9, 0x000000f9 },
+ { 0x00009ad8, 0x000000f9, 0x000000f9 },
+ { 0x00009adc, 0x000000f9, 0x000000f9 },
+ { 0x00009ae0, 0x000000f9, 0x000000f9 },
+ { 0x00009ae4, 0x000000f9, 0x000000f9 },
+ { 0x00009ae8, 0x000000f9, 0x000000f9 },
+ { 0x00009aec, 0x000000f9, 0x000000f9 },
+ { 0x00009af0, 0x000000f9, 0x000000f9 },
+ { 0x00009af4, 0x000000f9, 0x000000f9 },
+ { 0x00009af8, 0x000000f9, 0x000000f9 },
+ { 0x00009afc, 0x000000f9, 0x000000f9 },
+};
+
+static const u32 ar5416Bank1_9100[][2] = {
+ { 0x000098b0, 0x02108421},
+ { 0x000098ec, 0x00000008},
+};
+
+static const u32 ar5416Bank2_9100[][2] = {
+ { 0x000098b0, 0x0e73ff17},
+ { 0x000098e0, 0x00000420},
+};
+
+static const u32 ar5416Bank3_9100[][3] = {
+ { 0x000098f0, 0x01400018, 0x01c00018 },
+};
+
+static const u32 ar5416Bank6_9100[][3] = {
+
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x004210a2, 0x004210a2 },
+ { 0x0000989c, 0x0014000f, 0x0014000f },
+ { 0x0000989c, 0x00c40002, 0x00c40002 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x000180d6, 0x000180d6 },
+ { 0x0000989c, 0x0000c0aa, 0x0000c0aa },
+ { 0x0000989c, 0x000000b1, 0x000000b1 },
+ { 0x0000989c, 0x00002000, 0x00002000 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+
+static const u32 ar5416Bank6TPC_9100[][3] = {
+
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x40ff0000, 0x40ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x00423022, 0x00423022 },
+ { 0x0000989c, 0x2014008f, 0x2014008f },
+ { 0x0000989c, 0x00c40002, 0x00c40002 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x0001805e, 0x0001805e },
+ { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
+ { 0x0000989c, 0x000000e1, 0x000000e1 },
+ { 0x0000989c, 0x00007080, 0x00007080 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+static const u32 ar5416Bank7_9100[][2] = {
+ { 0x0000989c, 0x00000500 },
+ { 0x0000989c, 0x00000800 },
+ { 0x000098cc, 0x0000000e },
+};
+
+static const u32 ar5416Addac_9100[][2] = {
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000010 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x000000c0 },
+ {0x0000989c, 0x00000015 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x000098cc, 0x00000000 },
+};
+
+static const u32 ar5416Modes_9160[][6] = {
+ { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
+ { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
+ { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
+ { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
+ { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
+ { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
+ { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
+ { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
+ { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
+ { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
+ { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
+ { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x00009850, 0x6c48b4e2, 0x6c48b4e2, 0x6c48b0e2, 0x6c48b0e2, 0x6c48b0e2 },
+ { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
+ { 0x0000985c, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e },
+ { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
+ { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
+ { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
+ { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
+ { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
+ { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
+ { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
+ { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 },
+ { 0x00009960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
+ { 0x0000a960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
+ { 0x0000b960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
+ { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
+ { 0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce, 0x000003ce },
+ { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a0c00, 0x001a0c00, 0x001a0c00 },
+ { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
+ { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
+ { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
+ { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
+ { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
+ { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
+ { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
+ { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
+ { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
+ { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
+ { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
+ { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
+ { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
+ { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
+ { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
+ { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
+ { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
+ { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
+ { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
+ { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
+ { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+};
+
+static const u32 ar5416Common_9160[][2] = {
+ { 0x0000000c, 0x00000000 },
+ { 0x00000030, 0x00020015 },
+ { 0x00000034, 0x00000005 },
+ { 0x00000040, 0x00000000 },
+ { 0x00000044, 0x00000008 },
+ { 0x00000048, 0x00000008 },
+ { 0x0000004c, 0x00000010 },
+ { 0x00000050, 0x00000000 },
+ { 0x00000054, 0x0000001f },
+ { 0x00000800, 0x00000000 },
+ { 0x00000804, 0x00000000 },
+ { 0x00000808, 0x00000000 },
+ { 0x0000080c, 0x00000000 },
+ { 0x00000810, 0x00000000 },
+ { 0x00000814, 0x00000000 },
+ { 0x00000818, 0x00000000 },
+ { 0x0000081c, 0x00000000 },
+ { 0x00000820, 0x00000000 },
+ { 0x00000824, 0x00000000 },
+ { 0x00001040, 0x002ffc0f },
+ { 0x00001044, 0x002ffc0f },
+ { 0x00001048, 0x002ffc0f },
+ { 0x0000104c, 0x002ffc0f },
+ { 0x00001050, 0x002ffc0f },
+ { 0x00001054, 0x002ffc0f },
+ { 0x00001058, 0x002ffc0f },
+ { 0x0000105c, 0x002ffc0f },
+ { 0x00001060, 0x002ffc0f },
+ { 0x00001064, 0x002ffc0f },
+ { 0x00001230, 0x00000000 },
+ { 0x00001270, 0x00000000 },
+ { 0x00001038, 0x00000000 },
+ { 0x00001078, 0x00000000 },
+ { 0x000010b8, 0x00000000 },
+ { 0x000010f8, 0x00000000 },
+ { 0x00001138, 0x00000000 },
+ { 0x00001178, 0x00000000 },
+ { 0x000011b8, 0x00000000 },
+ { 0x000011f8, 0x00000000 },
+ { 0x00001238, 0x00000000 },
+ { 0x00001278, 0x00000000 },
+ { 0x000012b8, 0x00000000 },
+ { 0x000012f8, 0x00000000 },
+ { 0x00001338, 0x00000000 },
+ { 0x00001378, 0x00000000 },
+ { 0x000013b8, 0x00000000 },
+ { 0x000013f8, 0x00000000 },
+ { 0x00001438, 0x00000000 },
+ { 0x00001478, 0x00000000 },
+ { 0x000014b8, 0x00000000 },
+ { 0x000014f8, 0x00000000 },
+ { 0x00001538, 0x00000000 },
+ { 0x00001578, 0x00000000 },
+ { 0x000015b8, 0x00000000 },
+ { 0x000015f8, 0x00000000 },
+ { 0x00001638, 0x00000000 },
+ { 0x00001678, 0x00000000 },
+ { 0x000016b8, 0x00000000 },
+ { 0x000016f8, 0x00000000 },
+ { 0x00001738, 0x00000000 },
+ { 0x00001778, 0x00000000 },
+ { 0x000017b8, 0x00000000 },
+ { 0x000017f8, 0x00000000 },
+ { 0x0000103c, 0x00000000 },
+ { 0x0000107c, 0x00000000 },
+ { 0x000010bc, 0x00000000 },
+ { 0x000010fc, 0x00000000 },
+ { 0x0000113c, 0x00000000 },
+ { 0x0000117c, 0x00000000 },
+ { 0x000011bc, 0x00000000 },
+ { 0x000011fc, 0x00000000 },
+ { 0x0000123c, 0x00000000 },
+ { 0x0000127c, 0x00000000 },
+ { 0x000012bc, 0x00000000 },
+ { 0x000012fc, 0x00000000 },
+ { 0x0000133c, 0x00000000 },
+ { 0x0000137c, 0x00000000 },
+ { 0x000013bc, 0x00000000 },
+ { 0x000013fc, 0x00000000 },
+ { 0x0000143c, 0x00000000 },
+ { 0x0000147c, 0x00000000 },
+ { 0x00004030, 0x00000002 },
+ { 0x0000403c, 0x00000002 },
+ { 0x00007010, 0x00000020 },
+ { 0x00007038, 0x000004c2 },
+ { 0x00008004, 0x00000000 },
+ { 0x00008008, 0x00000000 },
+ { 0x0000800c, 0x00000000 },
+ { 0x00008018, 0x00000700 },
+ { 0x00008020, 0x00000000 },
+ { 0x00008038, 0x00000000 },
+ { 0x0000803c, 0x00000000 },
+ { 0x00008048, 0x40000000 },
+ { 0x00008054, 0x00000000 },
+ { 0x00008058, 0x00000000 },
+ { 0x0000805c, 0x000fc78f },
+ { 0x00008060, 0x0000000f },
+ { 0x00008064, 0x00000000 },
+ { 0x000080c0, 0x2a82301a },
+ { 0x000080c4, 0x05dc01e0 },
+ { 0x000080c8, 0x1f402710 },
+ { 0x000080cc, 0x01f40000 },
+ { 0x000080d0, 0x00001e00 },
+ { 0x000080d4, 0x00000000 },
+ { 0x000080d8, 0x00400000 },
+ { 0x000080e0, 0xffffffff },
+ { 0x000080e4, 0x0000ffff },
+ { 0x000080e8, 0x003f3f3f },
+ { 0x000080ec, 0x00000000 },
+ { 0x000080f0, 0x00000000 },
+ { 0x000080f4, 0x00000000 },
+ { 0x000080f8, 0x00000000 },
+ { 0x000080fc, 0x00020000 },
+ { 0x00008100, 0x00020000 },
+ { 0x00008104, 0x00000001 },
+ { 0x00008108, 0x00000052 },
+ { 0x0000810c, 0x00000000 },
+ { 0x00008110, 0x00000168 },
+ { 0x00008118, 0x000100aa },
+ { 0x0000811c, 0x00003210 },
+ { 0x00008120, 0x08f04800 },
+ { 0x00008124, 0x00000000 },
+ { 0x00008128, 0x00000000 },
+ { 0x0000812c, 0x00000000 },
+ { 0x00008130, 0x00000000 },
+ { 0x00008134, 0x00000000 },
+ { 0x00008138, 0x00000000 },
+ { 0x0000813c, 0x00000000 },
+ { 0x00008144, 0xffffffff },
+ { 0x00008168, 0x00000000 },
+ { 0x0000816c, 0x00000000 },
+ { 0x00008170, 0x32143320 },
+ { 0x00008174, 0xfaa4fa50 },
+ { 0x00008178, 0x00000100 },
+ { 0x0000817c, 0x00000000 },
+ { 0x000081c4, 0x00000000 },
+ { 0x000081d0, 0x00003210 },
+ { 0x000081ec, 0x00000000 },
+ { 0x000081f0, 0x00000000 },
+ { 0x000081f4, 0x00000000 },
+ { 0x000081f8, 0x00000000 },
+ { 0x000081fc, 0x00000000 },
+ { 0x00008200, 0x00000000 },
+ { 0x00008204, 0x00000000 },
+ { 0x00008208, 0x00000000 },
+ { 0x0000820c, 0x00000000 },
+ { 0x00008210, 0x00000000 },
+ { 0x00008214, 0x00000000 },
+ { 0x00008218, 0x00000000 },
+ { 0x0000821c, 0x00000000 },
+ { 0x00008220, 0x00000000 },
+ { 0x00008224, 0x00000000 },
+ { 0x00008228, 0x00000000 },
+ { 0x0000822c, 0x00000000 },
+ { 0x00008230, 0x00000000 },
+ { 0x00008234, 0x00000000 },
+ { 0x00008238, 0x00000000 },
+ { 0x0000823c, 0x00000000 },
+ { 0x00008240, 0x00100000 },
+ { 0x00008244, 0x0010f400 },
+ { 0x00008248, 0x00000100 },
+ { 0x0000824c, 0x0001e800 },
+ { 0x00008250, 0x00000000 },
+ { 0x00008254, 0x00000000 },
+ { 0x00008258, 0x00000000 },
+ { 0x0000825c, 0x400000ff },
+ { 0x00008260, 0x00080922 },
+ { 0x00008270, 0x00000000 },
+ { 0x00008274, 0x40000000 },
+ { 0x00008278, 0x003e4180 },
+ { 0x0000827c, 0x00000000 },
+ { 0x00008284, 0x0000002c },
+ { 0x00008288, 0x0000002c },
+ { 0x0000828c, 0x00000000 },
+ { 0x00008294, 0x00000000 },
+ { 0x00008298, 0x00000000 },
+ { 0x00008300, 0x00000000 },
+ { 0x00008304, 0x00000000 },
+ { 0x00008308, 0x00000000 },
+ { 0x0000830c, 0x00000000 },
+ { 0x00008310, 0x00000000 },
+ { 0x00008314, 0x00000000 },
+ { 0x00008318, 0x00000000 },
+ { 0x00008328, 0x00000000 },
+ { 0x0000832c, 0x00000007 },
+ { 0x00008330, 0x00000302 },
+ { 0x00008334, 0x00000e00 },
+ { 0x00008338, 0x00ff0000 },
+ { 0x0000833c, 0x00000000 },
+ { 0x00008340, 0x000107ff },
+ { 0x00009808, 0x00000000 },
+ { 0x0000980c, 0xad848e19 },
+ { 0x00009810, 0x7d14e000 },
+ { 0x00009814, 0x9c0a9f6b },
+ { 0x0000981c, 0x00000000 },
+ { 0x0000982c, 0x0000a000 },
+ { 0x00009830, 0x00000000 },
+ { 0x0000983c, 0x00200400 },
+ { 0x00009840, 0x206a01ae },
+ { 0x0000984c, 0x1284233c },
+ { 0x00009854, 0x00000859 },
+ { 0x00009900, 0x00000000 },
+ { 0x00009904, 0x00000000 },
+ { 0x00009908, 0x00000000 },
+ { 0x0000990c, 0x00000000 },
+ { 0x0000991c, 0x10000fff },
+ { 0x00009920, 0x05100000 },
+ { 0x0000a920, 0x05100000 },
+ { 0x0000b920, 0x05100000 },
+ { 0x00009928, 0x00000001 },
+ { 0x0000992c, 0x00000004 },
+ { 0x00009934, 0x1e1f2022 },
+ { 0x00009938, 0x0a0b0c0d },
+ { 0x0000993c, 0x00000000 },
+ { 0x00009948, 0x9280b212 },
+ { 0x0000994c, 0x00020028 },
+ { 0x00009954, 0x5f3ca3de },
+ { 0x00009958, 0x2108ecff },
+ { 0x00009940, 0x00750604 },
+ { 0x0000c95c, 0x004b6a8e },
+ { 0x00009970, 0x190fb515 },
+ { 0x00009974, 0x00000000 },
+ { 0x00009978, 0x00000001 },
+ { 0x0000997c, 0x00000000 },
+ { 0x00009980, 0x00000000 },
+ { 0x00009984, 0x00000000 },
+ { 0x00009988, 0x00000000 },
+ { 0x0000998c, 0x00000000 },
+ { 0x00009990, 0x00000000 },
+ { 0x00009994, 0x00000000 },
+ { 0x00009998, 0x00000000 },
+ { 0x0000999c, 0x00000000 },
+ { 0x000099a0, 0x00000000 },
+ { 0x000099a4, 0x00000001 },
+ { 0x000099a8, 0x201fff00 },
+ { 0x000099ac, 0x006f0000 },
+ { 0x000099b0, 0x03051000 },
+ { 0x000099dc, 0x00000000 },
+ { 0x000099e0, 0x00000200 },
+ { 0x000099e4, 0xaaaaaaaa },
+ { 0x000099e8, 0x3c466478 },
+ { 0x000099ec, 0x0cc80caa },
+ { 0x000099fc, 0x00001042 },
+ { 0x00009b00, 0x00000000 },
+ { 0x00009b04, 0x00000001 },
+ { 0x00009b08, 0x00000002 },
+ { 0x00009b0c, 0x00000003 },
+ { 0x00009b10, 0x00000004 },
+ { 0x00009b14, 0x00000005 },
+ { 0x00009b18, 0x00000008 },
+ { 0x00009b1c, 0x00000009 },
+ { 0x00009b20, 0x0000000a },
+ { 0x00009b24, 0x0000000b },
+ { 0x00009b28, 0x0000000c },
+ { 0x00009b2c, 0x0000000d },
+ { 0x00009b30, 0x00000010 },
+ { 0x00009b34, 0x00000011 },
+ { 0x00009b38, 0x00000012 },
+ { 0x00009b3c, 0x00000013 },
+ { 0x00009b40, 0x00000014 },
+ { 0x00009b44, 0x00000015 },
+ { 0x00009b48, 0x00000018 },
+ { 0x00009b4c, 0x00000019 },
+ { 0x00009b50, 0x0000001a },
+ { 0x00009b54, 0x0000001b },
+ { 0x00009b58, 0x0000001c },
+ { 0x00009b5c, 0x0000001d },
+ { 0x00009b60, 0x00000020 },
+ { 0x00009b64, 0x00000021 },
+ { 0x00009b68, 0x00000022 },
+ { 0x00009b6c, 0x00000023 },
+ { 0x00009b70, 0x00000024 },
+ { 0x00009b74, 0x00000025 },
+ { 0x00009b78, 0x00000028 },
+ { 0x00009b7c, 0x00000029 },
+ { 0x00009b80, 0x0000002a },
+ { 0x00009b84, 0x0000002b },
+ { 0x00009b88, 0x0000002c },
+ { 0x00009b8c, 0x0000002d },
+ { 0x00009b90, 0x00000030 },
+ { 0x00009b94, 0x00000031 },
+ { 0x00009b98, 0x00000032 },
+ { 0x00009b9c, 0x00000033 },
+ { 0x00009ba0, 0x00000034 },
+ { 0x00009ba4, 0x00000035 },
+ { 0x00009ba8, 0x00000035 },
+ { 0x00009bac, 0x00000035 },
+ { 0x00009bb0, 0x00000035 },
+ { 0x00009bb4, 0x00000035 },
+ { 0x00009bb8, 0x00000035 },
+ { 0x00009bbc, 0x00000035 },
+ { 0x00009bc0, 0x00000035 },
+ { 0x00009bc4, 0x00000035 },
+ { 0x00009bc8, 0x00000035 },
+ { 0x00009bcc, 0x00000035 },
+ { 0x00009bd0, 0x00000035 },
+ { 0x00009bd4, 0x00000035 },
+ { 0x00009bd8, 0x00000035 },
+ { 0x00009bdc, 0x00000035 },
+ { 0x00009be0, 0x00000035 },
+ { 0x00009be4, 0x00000035 },
+ { 0x00009be8, 0x00000035 },
+ { 0x00009bec, 0x00000035 },
+ { 0x00009bf0, 0x00000035 },
+ { 0x00009bf4, 0x00000035 },
+ { 0x00009bf8, 0x00000010 },
+ { 0x00009bfc, 0x0000001a },
+ { 0x0000a210, 0x40806333 },
+ { 0x0000a214, 0x00106c10 },
+ { 0x0000a218, 0x009c4060 },
+ { 0x0000a220, 0x018830c6 },
+ { 0x0000a224, 0x00000400 },
+ { 0x0000a228, 0x001a0bb5 },
+ { 0x0000a22c, 0x00000000 },
+ { 0x0000a234, 0x20202020 },
+ { 0x0000a238, 0x20202020 },
+ { 0x0000a23c, 0x13c889af },
+ { 0x0000a240, 0x38490a20 },
+ { 0x0000a244, 0x00007bb6 },
+ { 0x0000a248, 0x0fff3ffc },
+ { 0x0000a24c, 0x00000001 },
+ { 0x0000a250, 0x0000e000 },
+ { 0x0000a254, 0x00000000 },
+ { 0x0000a258, 0x0cc75380 },
+ { 0x0000a25c, 0x0f0f0f01 },
+ { 0x0000a260, 0xdfa91f01 },
+ { 0x0000a268, 0x00000001 },
+ { 0x0000a26c, 0x0ebae9c6 },
+ { 0x0000b26c, 0x0ebae9c6 },
+ { 0x0000c26c, 0x0ebae9c6 },
+ { 0x0000d270, 0x00820820 },
+ { 0x0000a278, 0x1ce739ce },
+ { 0x0000a27c, 0x050701ce },
+ { 0x0000a338, 0x00000000 },
+ { 0x0000a33c, 0x00000000 },
+ { 0x0000a340, 0x00000000 },
+ { 0x0000a344, 0x00000000 },
+ { 0x0000a348, 0x3fffffff },
+ { 0x0000a34c, 0x3fffffff },
+ { 0x0000a350, 0x3fffffff },
+ { 0x0000a354, 0x0003ffff },
+ { 0x0000a358, 0x79bfaa03 },
+ { 0x0000d35c, 0x07ffffef },
+ { 0x0000d360, 0x0fffffe7 },
+ { 0x0000d364, 0x17ffffe5 },
+ { 0x0000d368, 0x1fffffe4 },
+ { 0x0000d36c, 0x37ffffe3 },
+ { 0x0000d370, 0x3fffffe3 },
+ { 0x0000d374, 0x57ffffe3 },
+ { 0x0000d378, 0x5fffffe2 },
+ { 0x0000d37c, 0x7fffffe2 },
+ { 0x0000d380, 0x7f3c7bba },
+ { 0x0000d384, 0xf3307ff0 },
+ { 0x0000a388, 0x0c000000 },
+ { 0x0000a38c, 0x20202020 },
+ { 0x0000a390, 0x20202020 },
+ { 0x0000a394, 0x1ce739ce },
+ { 0x0000a398, 0x000001ce },
+ { 0x0000a39c, 0x00000001 },
+ { 0x0000a3a0, 0x00000000 },
+ { 0x0000a3a4, 0x00000000 },
+ { 0x0000a3a8, 0x00000000 },
+ { 0x0000a3ac, 0x00000000 },
+ { 0x0000a3b0, 0x00000000 },
+ { 0x0000a3b4, 0x00000000 },
+ { 0x0000a3b8, 0x00000000 },
+ { 0x0000a3bc, 0x00000000 },
+ { 0x0000a3c0, 0x00000000 },
+ { 0x0000a3c4, 0x00000000 },
+ { 0x0000a3c8, 0x00000246 },
+ { 0x0000a3cc, 0x20202020 },
+ { 0x0000a3d0, 0x20202020 },
+ { 0x0000a3d4, 0x20202020 },
+ { 0x0000a3dc, 0x1ce739ce },
+ { 0x0000a3e0, 0x000001ce },
+};
+
+static const u32 ar5416Bank0_9160[][2] = {
+ { 0x000098b0, 0x1e5795e5 },
+ { 0x000098e0, 0x02008020 },
+};
+
+static const u32 ar5416BB_RfGain_9160[][3] = {
+ { 0x00009a00, 0x00000000, 0x00000000 },
+ { 0x00009a04, 0x00000040, 0x00000040 },
+ { 0x00009a08, 0x00000080, 0x00000080 },
+ { 0x00009a0c, 0x000001a1, 0x00000141 },
+ { 0x00009a10, 0x000001e1, 0x00000181 },
+ { 0x00009a14, 0x00000021, 0x000001c1 },
+ { 0x00009a18, 0x00000061, 0x00000001 },
+ { 0x00009a1c, 0x00000168, 0x00000041 },
+ { 0x00009a20, 0x000001a8, 0x000001a8 },
+ { 0x00009a24, 0x000001e8, 0x000001e8 },
+ { 0x00009a28, 0x00000028, 0x00000028 },
+ { 0x00009a2c, 0x00000068, 0x00000068 },
+ { 0x00009a30, 0x00000189, 0x000000a8 },
+ { 0x00009a34, 0x000001c9, 0x00000169 },
+ { 0x00009a38, 0x00000009, 0x000001a9 },
+ { 0x00009a3c, 0x00000049, 0x000001e9 },
+ { 0x00009a40, 0x00000089, 0x00000029 },
+ { 0x00009a44, 0x00000170, 0x00000069 },
+ { 0x00009a48, 0x000001b0, 0x00000190 },
+ { 0x00009a4c, 0x000001f0, 0x000001d0 },
+ { 0x00009a50, 0x00000030, 0x00000010 },
+ { 0x00009a54, 0x00000070, 0x00000050 },
+ { 0x00009a58, 0x00000191, 0x00000090 },
+ { 0x00009a5c, 0x000001d1, 0x00000151 },
+ { 0x00009a60, 0x00000011, 0x00000191 },
+ { 0x00009a64, 0x00000051, 0x000001d1 },
+ { 0x00009a68, 0x00000091, 0x00000011 },
+ { 0x00009a6c, 0x000001b8, 0x00000051 },
+ { 0x00009a70, 0x000001f8, 0x00000198 },
+ { 0x00009a74, 0x00000038, 0x000001d8 },
+ { 0x00009a78, 0x00000078, 0x00000018 },
+ { 0x00009a7c, 0x00000199, 0x00000058 },
+ { 0x00009a80, 0x000001d9, 0x00000098 },
+ { 0x00009a84, 0x00000019, 0x00000159 },
+ { 0x00009a88, 0x00000059, 0x00000199 },
+ { 0x00009a8c, 0x00000099, 0x000001d9 },
+ { 0x00009a90, 0x000000d9, 0x00000019 },
+ { 0x00009a94, 0x000000f9, 0x00000059 },
+ { 0x00009a98, 0x000000f9, 0x00000099 },
+ { 0x00009a9c, 0x000000f9, 0x000000d9 },
+ { 0x00009aa0, 0x000000f9, 0x000000f9 },
+ { 0x00009aa4, 0x000000f9, 0x000000f9 },
+ { 0x00009aa8, 0x000000f9, 0x000000f9 },
+ { 0x00009aac, 0x000000f9, 0x000000f9 },
+ { 0x00009ab0, 0x000000f9, 0x000000f9 },
+ { 0x00009ab4, 0x000000f9, 0x000000f9 },
+ { 0x00009ab8, 0x000000f9, 0x000000f9 },
+ { 0x00009abc, 0x000000f9, 0x000000f9 },
+ { 0x00009ac0, 0x000000f9, 0x000000f9 },
+ { 0x00009ac4, 0x000000f9, 0x000000f9 },
+ { 0x00009ac8, 0x000000f9, 0x000000f9 },
+ { 0x00009acc, 0x000000f9, 0x000000f9 },
+ { 0x00009ad0, 0x000000f9, 0x000000f9 },
+ { 0x00009ad4, 0x000000f9, 0x000000f9 },
+ { 0x00009ad8, 0x000000f9, 0x000000f9 },
+ { 0x00009adc, 0x000000f9, 0x000000f9 },
+ { 0x00009ae0, 0x000000f9, 0x000000f9 },
+ { 0x00009ae4, 0x000000f9, 0x000000f9 },
+ { 0x00009ae8, 0x000000f9, 0x000000f9 },
+ { 0x00009aec, 0x000000f9, 0x000000f9 },
+ { 0x00009af0, 0x000000f9, 0x000000f9 },
+ { 0x00009af4, 0x000000f9, 0x000000f9 },
+ { 0x00009af8, 0x000000f9, 0x000000f9 },
+ { 0x00009afc, 0x000000f9, 0x000000f9 },
+};
+
+static const u32 ar5416Bank1_9160[][2] = {
+ { 0x000098b0, 0x02108421 },
+ { 0x000098ec, 0x00000008 },
+};
+
+static const u32 ar5416Bank2_9160[][2] = {
+ { 0x000098b0, 0x0e73ff17 },
+ { 0x000098e0, 0x00000420 },
+};
+
+static const u32 ar5416Bank3_9160[][3] = {
+ { 0x000098f0, 0x01400018, 0x01c00018 },
+};
+
+static const u32 ar5416Bank6_9160[][3] = {
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x40ff0000, 0x40ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x004210a2, 0x004210a2 },
+ { 0x0000989c, 0x0014008f, 0x0014008f },
+ { 0x0000989c, 0x00c40003, 0x00c40003 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x0001805e, 0x0001805e },
+ { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
+ { 0x0000989c, 0x000000f1, 0x000000f1 },
+ { 0x0000989c, 0x00002081, 0x00002081 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+static const u32 ar5416Bank6TPC_9160[][3] = {
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x40ff0000, 0x40ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x00423022, 0x00423022 },
+ { 0x0000989c, 0x2014008f, 0x2014008f },
+ { 0x0000989c, 0x00c40002, 0x00c40002 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x0001805e, 0x0001805e },
+ { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
+ { 0x0000989c, 0x000000e1, 0x000000e1 },
+ { 0x0000989c, 0x00007080, 0x00007080 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+static const u32 ar5416Bank7_9160[][2] = {
+ { 0x0000989c, 0x00000500 },
+ { 0x0000989c, 0x00000800 },
+ { 0x000098cc, 0x0000000e },
+};
+
+static const u32 ar5416Addac_9160[][2] = {
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x000000c0 },
+ {0x0000989c, 0x00000018 },
+ {0x0000989c, 0x00000004 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x000000c0 },
+ {0x0000989c, 0x00000019 },
+ {0x0000989c, 0x00000004 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000004 },
+ {0x0000989c, 0x00000003 },
+ {0x0000989c, 0x00000008 },
+ {0x0000989c, 0x00000000 },
+ {0x000098cc, 0x00000000 },
+};
+
+static const u32 ar5416Addac_91601_1[][2] = {
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x000000c0 },
+ {0x0000989c, 0x00000018 },
+ {0x0000989c, 0x00000004 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x000000c0 },
+ {0x0000989c, 0x00000019 },
+ {0x0000989c, 0x00000004 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x000098cc, 0x00000000 },
+};
+
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
new file mode 100644
index 00000000000..5fdbb53b47e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -0,0 +1,1000 @@
+/*
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include "ar9002_phy.h"
+
+#define AR9285_CLCAL_REDO_THRESH 1
+
+static void ar9002_hw_setup_calibration(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0),
+ AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX,
+ currCal->calData->calCountMax);
+
+ switch (currCal->calData->calType) {
+ case IQ_MISMATCH_CAL:
+ REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "starting IQ Mismatch Calibration\n");
+ break;
+ case ADC_GAIN_CAL:
+ REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "starting ADC Gain Calibration\n");
+ break;
+ case ADC_DC_CAL:
+ REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "starting ADC DC Calibration\n");
+ break;
+ case ADC_DC_INIT_CAL:
+ REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_INIT);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "starting Init ADC DC Calibration\n");
+ break;
+ case TEMP_COMP_CAL:
+ break; /* Not supported */
+ }
+
+ REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
+ AR_PHY_TIMING_CTRL4_DO_CAL);
+}
+
+static bool ar9002_hw_per_calibration(struct ath_hw *ah,
+ struct ath9k_channel *ichan,
+ u8 rxchainmask,
+ struct ath9k_cal_list *currCal)
+{
+ bool iscaldone = false;
+
+ if (currCal->calState == CAL_RUNNING) {
+ if (!(REG_READ(ah, AR_PHY_TIMING_CTRL4(0)) &
+ AR_PHY_TIMING_CTRL4_DO_CAL)) {
+
+ currCal->calData->calCollect(ah);
+ ah->cal_samples++;
+
+ if (ah->cal_samples >=
+ currCal->calData->calNumSamples) {
+ int i, numChains = 0;
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ if (rxchainmask & (1 << i))
+ numChains++;
+ }
+
+ currCal->calData->calPostProc(ah, numChains);
+ ichan->CalValid |= currCal->calData->calType;
+ currCal->calState = CAL_DONE;
+ iscaldone = true;
+ } else {
+ ar9002_hw_setup_calibration(ah, currCal);
+ }
+ }
+ } else if (!(ichan->CalValid & currCal->calData->calType)) {
+ ath9k_hw_reset_calibration(ah, currCal);
+ }
+
+ return iscaldone;
+}
+
+/* Assumes you are talking about the currently configured channel */
+static bool ar9002_hw_iscal_supported(struct ath_hw *ah,
+ enum ath9k_cal_types calType)
+{
+ struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
+
+ switch (calType & ah->supp_cals) {
+ case IQ_MISMATCH_CAL: /* Both 2 GHz and 5 GHz support OFDM */
+ return true;
+ case ADC_GAIN_CAL:
+ case ADC_DC_CAL:
+ if (!(conf->channel->band == IEEE80211_BAND_2GHZ &&
+ conf_is_ht20(conf)))
+ return true;
+ break;
+ }
+ return false;
+}
+
+static void ar9002_hw_iqcal_collect(struct ath_hw *ah)
+{
+ int i;
+
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ ah->totalPowerMeasI[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
+ ah->totalPowerMeasQ[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
+ ah->totalIqCorrMeas[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
+ ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
+ ah->cal_samples, i, ah->totalPowerMeasI[i],
+ ah->totalPowerMeasQ[i],
+ ah->totalIqCorrMeas[i]);
+ }
+}
+
+static void ar9002_hw_adc_gaincal_collect(struct ath_hw *ah)
+{
+ int i;
+
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ ah->totalAdcIOddPhase[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
+ ah->totalAdcIEvenPhase[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
+ ah->totalAdcQOddPhase[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
+ ah->totalAdcQEvenPhase[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
+
+ ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
+ "oddq=0x%08x; evenq=0x%08x;\n",
+ ah->cal_samples, i,
+ ah->totalAdcIOddPhase[i],
+ ah->totalAdcIEvenPhase[i],
+ ah->totalAdcQOddPhase[i],
+ ah->totalAdcQEvenPhase[i]);
+ }
+}
+
+static void ar9002_hw_adc_dccal_collect(struct ath_hw *ah)
+{
+ int i;
+
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ ah->totalAdcDcOffsetIOddPhase[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
+ ah->totalAdcDcOffsetIEvenPhase[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
+ ah->totalAdcDcOffsetQOddPhase[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
+ ah->totalAdcDcOffsetQEvenPhase[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
+
+ ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
+ "oddq=0x%08x; evenq=0x%08x;\n",
+ ah->cal_samples, i,
+ ah->totalAdcDcOffsetIOddPhase[i],
+ ah->totalAdcDcOffsetIEvenPhase[i],
+ ah->totalAdcDcOffsetQOddPhase[i],
+ ah->totalAdcDcOffsetQEvenPhase[i]);
+ }
+}
+
+static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 powerMeasQ, powerMeasI, iqCorrMeas;
+ u32 qCoffDenom, iCoffDenom;
+ int32_t qCoff, iCoff;
+ int iqCorrNeg, i;
+
+ for (i = 0; i < numChains; i++) {
+ powerMeasI = ah->totalPowerMeasI[i];
+ powerMeasQ = ah->totalPowerMeasQ[i];
+ iqCorrMeas = ah->totalIqCorrMeas[i];
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Starting IQ Cal and Correction for Chain %d\n",
+ i);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Orignal: Chn %diq_corr_meas = 0x%08x\n",
+ i, ah->totalIqCorrMeas[i]);
+
+ iqCorrNeg = 0;
+
+ if (iqCorrMeas > 0x80000000) {
+ iqCorrMeas = (0xffffffff - iqCorrMeas) + 1;
+ iqCorrNeg = 1;
+ }
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
+ ath_print(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
+ iqCorrNeg);
+
+ iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128;
+ qCoffDenom = powerMeasQ / 64;
+
+ if ((powerMeasQ != 0) && (iCoffDenom != 0) &&
+ (qCoffDenom != 0)) {
+ iCoff = iqCorrMeas / iCoffDenom;
+ qCoff = powerMeasI / qCoffDenom - 64;
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d iCoff = 0x%08x\n", i, iCoff);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d qCoff = 0x%08x\n", i, qCoff);
+
+ iCoff = iCoff & 0x3f;
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "New: Chn %d iCoff = 0x%08x\n", i, iCoff);
+ if (iqCorrNeg == 0x0)
+ iCoff = 0x40 - iCoff;
+
+ if (qCoff > 15)
+ qCoff = 15;
+ else if (qCoff <= -16)
+ qCoff = 16;
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
+ i, iCoff, qCoff);
+
+ REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF,
+ iCoff);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
+ qCoff);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "IQ Cal and Correction done for Chain %d\n",
+ i);
+ }
+ }
+
+ REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
+ AR_PHY_TIMING_CTRL4_IQCORR_ENABLE);
+}
+
+static void ar9002_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 iOddMeasOffset, iEvenMeasOffset, qOddMeasOffset, qEvenMeasOffset;
+ u32 qGainMismatch, iGainMismatch, val, i;
+
+ for (i = 0; i < numChains; i++) {
+ iOddMeasOffset = ah->totalAdcIOddPhase[i];
+ iEvenMeasOffset = ah->totalAdcIEvenPhase[i];
+ qOddMeasOffset = ah->totalAdcQOddPhase[i];
+ qEvenMeasOffset = ah->totalAdcQEvenPhase[i];
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Starting ADC Gain Cal for Chain %d\n", i);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_odd_i = 0x%08x\n", i,
+ iOddMeasOffset);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_even_i = 0x%08x\n", i,
+ iEvenMeasOffset);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_odd_q = 0x%08x\n", i,
+ qOddMeasOffset);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_even_q = 0x%08x\n", i,
+ qEvenMeasOffset);
+
+ if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) {
+ iGainMismatch =
+ ((iEvenMeasOffset * 32) /
+ iOddMeasOffset) & 0x3f;
+ qGainMismatch =
+ ((qOddMeasOffset * 32) /
+ qEvenMeasOffset) & 0x3f;
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d gain_mismatch_i = 0x%08x\n", i,
+ iGainMismatch);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d gain_mismatch_q = 0x%08x\n", i,
+ qGainMismatch);
+
+ val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
+ val &= 0xfffff000;
+ val |= (qGainMismatch) | (iGainMismatch << 6);
+ REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "ADC Gain Cal done for Chain %d\n", i);
+ }
+ }
+
+ REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
+ REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
+ AR_PHY_NEW_ADC_GAIN_CORR_ENABLE);
+}
+
+static void ar9002_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 iOddMeasOffset, iEvenMeasOffset, val, i;
+ int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch;
+ const struct ath9k_percal_data *calData =
+ ah->cal_list_curr->calData;
+ u32 numSamples =
+ (1 << (calData->calCountMax + 5)) * calData->calNumSamples;
+
+ for (i = 0; i < numChains; i++) {
+ iOddMeasOffset = ah->totalAdcDcOffsetIOddPhase[i];
+ iEvenMeasOffset = ah->totalAdcDcOffsetIEvenPhase[i];
+ qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i];
+ qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i];
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Starting ADC DC Offset Cal for Chain %d\n", i);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_odd_i = %d\n", i,
+ iOddMeasOffset);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_even_i = %d\n", i,
+ iEvenMeasOffset);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_odd_q = %d\n", i,
+ qOddMeasOffset);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_even_q = %d\n", i,
+ qEvenMeasOffset);
+
+ iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) /
+ numSamples) & 0x1ff;
+ qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) /
+ numSamples) & 0x1ff;
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
+ iDcMismatch);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
+ qDcMismatch);
+
+ val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
+ val &= 0xc0000fff;
+ val |= (qDcMismatch << 12) | (iDcMismatch << 21);
+ REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "ADC DC Offset Cal done for Chain %d\n", i);
+ }
+
+ REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
+ REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
+ AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE);
+}
+
+static void ar9287_hw_olc_temp_compensation(struct ath_hw *ah)
+{
+ u32 rddata;
+ int32_t delta, currPDADC, slope;
+
+ rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
+ currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
+
+ if (ah->initPDADC == 0 || currPDADC == 0) {
+ /*
+ * Zero value indicates that no frames have been transmitted
+ * yet, can't do temperature compensation until frames are
+ * transmitted.
+ */
+ return;
+ } else {
+ slope = ah->eep_ops->get_eeprom(ah, EEP_TEMPSENSE_SLOPE);
+
+ if (slope == 0) { /* to avoid divide by zero case */
+ delta = 0;
+ } else {
+ delta = ((currPDADC - ah->initPDADC)*4) / slope;
+ }
+ REG_RMW_FIELD(ah, AR_PHY_CH0_TX_PWRCTRL11,
+ AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
+ REG_RMW_FIELD(ah, AR_PHY_CH1_TX_PWRCTRL11,
+ AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
+ }
+}
+
+static void ar9280_hw_olc_temp_compensation(struct ath_hw *ah)
+{
+ u32 rddata, i;
+ int delta, currPDADC, regval;
+
+ rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
+ currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
+
+ if (ah->initPDADC == 0 || currPDADC == 0)
+ return;
+
+ if (ah->eep_ops->get_eeprom(ah, EEP_DAC_HPWR_5G))
+ delta = (currPDADC - ah->initPDADC + 4) / 8;
+ else
+ delta = (currPDADC - ah->initPDADC + 5) / 10;
+
+ if (delta != ah->PDADCdelta) {
+ ah->PDADCdelta = delta;
+ for (i = 1; i < AR9280_TX_GAIN_TABLE_SIZE; i++) {
+ regval = ah->originalGain[i] - delta;
+ if (regval < 0)
+ regval = 0;
+
+ REG_RMW_FIELD(ah,
+ AR_PHY_TX_GAIN_TBL1 + i * 4,
+ AR_PHY_TX_GAIN, regval);
+ }
+ }
+}
+
+static void ar9271_hw_pa_cal(struct ath_hw *ah, bool is_reset)
+{
+ u32 regVal;
+ unsigned int i;
+ u32 regList[][2] = {
+ { 0x786c, 0 },
+ { 0x7854, 0 },
+ { 0x7820, 0 },
+ { 0x7824, 0 },
+ { 0x7868, 0 },
+ { 0x783c, 0 },
+ { 0x7838, 0 } ,
+ { 0x7828, 0 } ,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(regList); i++)
+ regList[i][1] = REG_READ(ah, regList[i][0]);
+
+ regVal = REG_READ(ah, 0x7834);
+ regVal &= (~(0x1));
+ REG_WRITE(ah, 0x7834, regVal);
+ regVal = REG_READ(ah, 0x9808);
+ regVal |= (0x1 << 27);
+ REG_WRITE(ah, 0x9808, regVal);
+
+ /* 786c,b23,1, pwddac=1 */
+ REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
+ /* 7854, b5,1, pdrxtxbb=1 */
+ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
+ /* 7854, b7,1, pdv2i=1 */
+ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
+ /* 7854, b8,1, pddacinterface=1 */
+ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
+ /* 7824,b12,0, offcal=0 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
+ /* 7838, b1,0, pwddb=0 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
+ /* 7820,b11,0, enpacal=0 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
+ /* 7820,b25,1, pdpadrv1=0 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
+ /* 7820,b24,0, pdpadrv2=0 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0);
+ /* 7820,b23,0, pdpaout=0 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
+ /* 783c,b14-16,7, padrvgn2tab_0=7 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
+ /*
+ * 7838,b29-31,0, padrvgn1tab_0=0
+ * does not matter since we turn it off
+ */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
+
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_CCOMP, 0xfff);
+
+ /* Set:
+ * localmode=1,bmode=1,bmoderxtx=1,synthon=1,
+ * txon=1,paon=1,oscon=1,synthon_force=1
+ */
+ REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0);
+ udelay(30);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9271_AN_RF2G6_OFFS, 0);
+
+ /* find off_6_1; */
+ for (i = 6; i > 0; i--) {
+ regVal = REG_READ(ah, 0x7834);
+ regVal |= (1 << (20 + i));
+ REG_WRITE(ah, 0x7834, regVal);
+ udelay(1);
+ /* regVal = REG_READ(ah, 0x7834); */
+ regVal &= (~(0x1 << (20 + i)));
+ regVal |= (MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9)
+ << (20 + i));
+ REG_WRITE(ah, 0x7834, regVal);
+ }
+
+ regVal = (regVal >> 20) & 0x7f;
+
+ /* Update PA cal info */
+ if ((!is_reset) && (ah->pacal_info.prev_offset == regVal)) {
+ if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
+ ah->pacal_info.max_skipcount =
+ 2 * ah->pacal_info.max_skipcount;
+ ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
+ } else {
+ ah->pacal_info.max_skipcount = 1;
+ ah->pacal_info.skipcount = 0;
+ ah->pacal_info.prev_offset = regVal;
+ }
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ regVal = REG_READ(ah, 0x7834);
+ regVal |= 0x1;
+ REG_WRITE(ah, 0x7834, regVal);
+ regVal = REG_READ(ah, 0x9808);
+ regVal &= (~(0x1 << 27));
+ REG_WRITE(ah, 0x9808, regVal);
+
+ for (i = 0; i < ARRAY_SIZE(regList); i++)
+ REG_WRITE(ah, regList[i][0], regList[i][1]);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+}
+
+static inline void ar9285_hw_pa_cal(struct ath_hw *ah, bool is_reset)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 regVal;
+ int i, offset, offs_6_1, offs_0;
+ u32 ccomp_org, reg_field;
+ u32 regList[][2] = {
+ { 0x786c, 0 },
+ { 0x7854, 0 },
+ { 0x7820, 0 },
+ { 0x7824, 0 },
+ { 0x7868, 0 },
+ { 0x783c, 0 },
+ { 0x7838, 0 },
+ };
+
+ ath_print(common, ATH_DBG_CALIBRATE, "Running PA Calibration\n");
+
+ /* PA CAL is not needed for high power solution */
+ if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) ==
+ AR5416_EEP_TXGAIN_HIGH_POWER)
+ return;
+
+ if (AR_SREV_9285_11(ah)) {
+ REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14));
+ udelay(10);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(regList); i++)
+ regList[i][1] = REG_READ(ah, regList[i][0]);
+
+ regVal = REG_READ(ah, 0x7834);
+ regVal &= (~(0x1));
+ REG_WRITE(ah, 0x7834, regVal);
+ regVal = REG_READ(ah, 0x9808);
+ regVal |= (0x1 << 27);
+ REG_WRITE(ah, 0x9808, regVal);
+
+ REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
+ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
+ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
+ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
+ ccomp_org = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_CCOMP);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, 0xf);
+
+ REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0);
+ udelay(30);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 0);
+
+ for (i = 6; i > 0; i--) {
+ regVal = REG_READ(ah, 0x7834);
+ regVal |= (1 << (19 + i));
+ REG_WRITE(ah, 0x7834, regVal);
+ udelay(1);
+ regVal = REG_READ(ah, 0x7834);
+ regVal &= (~(0x1 << (19 + i)));
+ reg_field = MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9);
+ regVal |= (reg_field << (19 + i));
+ REG_WRITE(ah, 0x7834, regVal);
+ }
+
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 1);
+ udelay(1);
+ reg_field = MS(REG_READ(ah, AR9285_AN_RF2G9), AR9285_AN_RXTXBB1_SPARE9);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, reg_field);
+ offs_6_1 = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_OFFS);
+ offs_0 = MS(REG_READ(ah, AR9285_AN_RF2G3), AR9285_AN_RF2G3_PDVCCOMP);
+
+ offset = (offs_6_1<<1) | offs_0;
+ offset = offset - 0;
+ offs_6_1 = offset>>1;
+ offs_0 = offset & 1;
+
+ if ((!is_reset) && (ah->pacal_info.prev_offset == offset)) {
+ if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
+ ah->pacal_info.max_skipcount =
+ 2 * ah->pacal_info.max_skipcount;
+ ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
+ } else {
+ ah->pacal_info.max_skipcount = 1;
+ ah->pacal_info.skipcount = 0;
+ ah->pacal_info.prev_offset = offset;
+ }
+
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, offs_6_1);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, offs_0);
+
+ regVal = REG_READ(ah, 0x7834);
+ regVal |= 0x1;
+ REG_WRITE(ah, 0x7834, regVal);
+ regVal = REG_READ(ah, 0x9808);
+ regVal &= (~(0x1 << 27));
+ REG_WRITE(ah, 0x9808, regVal);
+
+ for (i = 0; i < ARRAY_SIZE(regList); i++)
+ REG_WRITE(ah, regList[i][0], regList[i][1]);
+
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, ccomp_org);
+
+ if (AR_SREV_9285_11(ah))
+ REG_WRITE(ah, AR9285_AN_TOP4, AR9285_AN_TOP4_DEFAULT);
+
+}
+
+static void ar9002_hw_pa_cal(struct ath_hw *ah, bool is_reset)
+{
+ if (AR_SREV_9271(ah)) {
+ if (is_reset || !ah->pacal_info.skipcount)
+ ar9271_hw_pa_cal(ah, is_reset);
+ else
+ ah->pacal_info.skipcount--;
+ } else if (AR_SREV_9285_11_OR_LATER(ah)) {
+ if (is_reset || !ah->pacal_info.skipcount)
+ ar9285_hw_pa_cal(ah, is_reset);
+ else
+ ah->pacal_info.skipcount--;
+ }
+}
+
+static void ar9002_hw_olc_temp_compensation(struct ath_hw *ah)
+{
+ if (OLC_FOR_AR9287_10_LATER)
+ ar9287_hw_olc_temp_compensation(ah);
+ else if (OLC_FOR_AR9280_20_LATER)
+ ar9280_hw_olc_temp_compensation(ah);
+}
+
+static bool ar9002_hw_calibrate(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 rxchainmask,
+ bool longcal)
+{
+ bool iscaldone = true;
+ struct ath9k_cal_list *currCal = ah->cal_list_curr;
+
+ if (currCal &&
+ (currCal->calState == CAL_RUNNING ||
+ currCal->calState == CAL_WAITING)) {
+ iscaldone = ar9002_hw_per_calibration(ah, chan,
+ rxchainmask, currCal);
+ if (iscaldone) {
+ ah->cal_list_curr = currCal = currCal->calNext;
+
+ if (currCal->calState == CAL_WAITING) {
+ iscaldone = false;
+ ath9k_hw_reset_calibration(ah, currCal);
+ }
+ }
+ }
+
+ /* Do NF cal only at longer intervals */
+ if (longcal) {
+ /* Do periodic PAOffset Cal */
+ ar9002_hw_pa_cal(ah, false);
+ ar9002_hw_olc_temp_compensation(ah);
+
+ /*
+ * Get the value from the previous NF cal and update
+ * history buffer.
+ */
+ ath9k_hw_getnf(ah, chan);
+
+ /*
+ * Load the NF from history buffer of the current channel.
+ * NF is slow time-variant, so it is OK to use a historical
+ * value.
+ */
+ ath9k_hw_loadnf(ah, ah->curchan);
+
+ ath9k_hw_start_nfcal(ah);
+ }
+
+ return iscaldone;
+}
+
+/* Carrier leakage Calibration fix */
+static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
+ if (IS_CHAN_HT20(chan)) {
+ REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_FLTR_CAL);
+ REG_CLR_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
+ if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) {
+ ath_print(common, ATH_DBG_CALIBRATE, "offset "
+ "calibration failed to complete in "
+ "1ms; noisy ??\n");
+ return false;
+ }
+ REG_CLR_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
+ REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
+ }
+ REG_CLR_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
+ REG_SET_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
+ if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT)) {
+ ath_print(common, ATH_DBG_CALIBRATE, "offset calibration "
+ "failed to complete in 1ms; noisy ??\n");
+ return false;
+ }
+
+ REG_SET_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
+ REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
+
+ return true;
+}
+
+static bool ar9285_hw_clc(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ int i;
+ u_int32_t txgain_max;
+ u_int32_t clc_gain, gain_mask = 0, clc_num = 0;
+ u_int32_t reg_clc_I0, reg_clc_Q0;
+ u_int32_t i0_num = 0;
+ u_int32_t q0_num = 0;
+ u_int32_t total_num = 0;
+ u_int32_t reg_rf2g5_org;
+ bool retv = true;
+
+ if (!(ar9285_hw_cl_cal(ah, chan)))
+ return false;
+
+ txgain_max = MS(REG_READ(ah, AR_PHY_TX_PWRCTRL7),
+ AR_PHY_TX_PWRCTRL_TX_GAIN_TAB_MAX);
+
+ for (i = 0; i < (txgain_max+1); i++) {
+ clc_gain = (REG_READ(ah, (AR_PHY_TX_GAIN_TBL1+(i<<2))) &
+ AR_PHY_TX_GAIN_CLC) >> AR_PHY_TX_GAIN_CLC_S;
+ if (!(gain_mask & (1 << clc_gain))) {
+ gain_mask |= (1 << clc_gain);
+ clc_num++;
+ }
+ }
+
+ for (i = 0; i < clc_num; i++) {
+ reg_clc_I0 = (REG_READ(ah, (AR_PHY_CLC_TBL1 + (i << 2)))
+ & AR_PHY_CLC_I0) >> AR_PHY_CLC_I0_S;
+ reg_clc_Q0 = (REG_READ(ah, (AR_PHY_CLC_TBL1 + (i << 2)))
+ & AR_PHY_CLC_Q0) >> AR_PHY_CLC_Q0_S;
+ if (reg_clc_I0 == 0)
+ i0_num++;
+
+ if (reg_clc_Q0 == 0)
+ q0_num++;
+ }
+ total_num = i0_num + q0_num;
+ if (total_num > AR9285_CLCAL_REDO_THRESH) {
+ reg_rf2g5_org = REG_READ(ah, AR9285_RF2G5);
+ if (AR_SREV_9285E_20(ah)) {
+ REG_WRITE(ah, AR9285_RF2G5,
+ (reg_rf2g5_org & AR9285_RF2G5_IC50TX) |
+ AR9285_RF2G5_IC50TX_XE_SET);
+ } else {
+ REG_WRITE(ah, AR9285_RF2G5,
+ (reg_rf2g5_org & AR9285_RF2G5_IC50TX) |
+ AR9285_RF2G5_IC50TX_SET);
+ }
+ retv = ar9285_hw_cl_cal(ah, chan);
+ REG_WRITE(ah, AR9285_RF2G5, reg_rf2g5_org);
+ }
+ return retv;
+}
+
+static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (AR_SREV_9271(ah) || AR_SREV_9285_12_OR_LATER(ah)) {
+ if (!ar9285_hw_clc(ah, chan))
+ return false;
+ } else {
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ if (!AR_SREV_9287_10_OR_LATER(ah))
+ REG_CLR_BIT(ah, AR_PHY_ADC_CTL,
+ AR_PHY_ADC_CTL_OFF_PWDADC);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_FLTR_CAL);
+ }
+
+ /* Calibrate the AGC */
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) |
+ AR_PHY_AGC_CONTROL_CAL);
+
+ /* Poll for offset calibration complete */
+ if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT)) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "offset calibration failed to "
+ "complete in 1ms; noisy environment?\n");
+ return false;
+ }
+
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ if (!AR_SREV_9287_10_OR_LATER(ah))
+ REG_SET_BIT(ah, AR_PHY_ADC_CTL,
+ AR_PHY_ADC_CTL_OFF_PWDADC);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_FLTR_CAL);
+ }
+ }
+
+ /* Do PA Calibration */
+ ar9002_hw_pa_cal(ah, true);
+
+ /* Do NF Calibration after DC offset and other calibrations */
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_NF);
+
+ ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
+
+ /* Enable IQ, ADC Gain and ADC DC offset CALs */
+ if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) {
+ if (ar9002_hw_iscal_supported(ah, ADC_GAIN_CAL)) {
+ INIT_CAL(&ah->adcgain_caldata);
+ INSERT_CAL(ah, &ah->adcgain_caldata);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "enabling ADC Gain Calibration.\n");
+ }
+ if (ar9002_hw_iscal_supported(ah, ADC_DC_CAL)) {
+ INIT_CAL(&ah->adcdc_caldata);
+ INSERT_CAL(ah, &ah->adcdc_caldata);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "enabling ADC DC Calibration.\n");
+ }
+ if (ar9002_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) {
+ INIT_CAL(&ah->iq_caldata);
+ INSERT_CAL(ah, &ah->iq_caldata);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "enabling IQ Calibration.\n");
+ }
+
+ ah->cal_list_curr = ah->cal_list;
+
+ if (ah->cal_list_curr)
+ ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
+ }
+
+ chan->CalValid = 0;
+
+ return true;
+}
+
+static const struct ath9k_percal_data iq_cal_multi_sample = {
+ IQ_MISMATCH_CAL,
+ MAX_CAL_SAMPLES,
+ PER_MIN_LOG_COUNT,
+ ar9002_hw_iqcal_collect,
+ ar9002_hw_iqcalibrate
+};
+static const struct ath9k_percal_data iq_cal_single_sample = {
+ IQ_MISMATCH_CAL,
+ MIN_CAL_SAMPLES,
+ PER_MAX_LOG_COUNT,
+ ar9002_hw_iqcal_collect,
+ ar9002_hw_iqcalibrate
+};
+static const struct ath9k_percal_data adc_gain_cal_multi_sample = {
+ ADC_GAIN_CAL,
+ MAX_CAL_SAMPLES,
+ PER_MIN_LOG_COUNT,
+ ar9002_hw_adc_gaincal_collect,
+ ar9002_hw_adc_gaincal_calibrate
+};
+static const struct ath9k_percal_data adc_gain_cal_single_sample = {
+ ADC_GAIN_CAL,
+ MIN_CAL_SAMPLES,
+ PER_MAX_LOG_COUNT,
+ ar9002_hw_adc_gaincal_collect,
+ ar9002_hw_adc_gaincal_calibrate
+};
+static const struct ath9k_percal_data adc_dc_cal_multi_sample = {
+ ADC_DC_CAL,
+ MAX_CAL_SAMPLES,
+ PER_MIN_LOG_COUNT,
+ ar9002_hw_adc_dccal_collect,
+ ar9002_hw_adc_dccal_calibrate
+};
+static const struct ath9k_percal_data adc_dc_cal_single_sample = {
+ ADC_DC_CAL,
+ MIN_CAL_SAMPLES,
+ PER_MAX_LOG_COUNT,
+ ar9002_hw_adc_dccal_collect,
+ ar9002_hw_adc_dccal_calibrate
+};
+static const struct ath9k_percal_data adc_init_dc_cal = {
+ ADC_DC_INIT_CAL,
+ MIN_CAL_SAMPLES,
+ INIT_LOG_COUNT,
+ ar9002_hw_adc_dccal_collect,
+ ar9002_hw_adc_dccal_calibrate
+};
+
+static void ar9002_hw_init_cal_settings(struct ath_hw *ah)
+{
+ if (AR_SREV_9100(ah)) {
+ ah->iq_caldata.calData = &iq_cal_multi_sample;
+ ah->supp_cals = IQ_MISMATCH_CAL;
+ return;
+ }
+
+ if (AR_SREV_9160_10_OR_LATER(ah)) {
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ ah->iq_caldata.calData = &iq_cal_single_sample;
+ ah->adcgain_caldata.calData =
+ &adc_gain_cal_single_sample;
+ ah->adcdc_caldata.calData =
+ &adc_dc_cal_single_sample;
+ ah->adcdc_calinitdata.calData =
+ &adc_init_dc_cal;
+ } else {
+ ah->iq_caldata.calData = &iq_cal_multi_sample;
+ ah->adcgain_caldata.calData =
+ &adc_gain_cal_multi_sample;
+ ah->adcdc_caldata.calData =
+ &adc_dc_cal_multi_sample;
+ ah->adcdc_calinitdata.calData =
+ &adc_init_dc_cal;
+ }
+ ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
+ }
+}
+
+void ar9002_hw_attach_calib_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ priv_ops->init_cal_settings = ar9002_hw_init_cal_settings;
+ priv_ops->init_cal = ar9002_hw_init_cal;
+ priv_ops->setup_calibration = ar9002_hw_setup_calibration;
+ priv_ops->iscal_supported = ar9002_hw_iscal_supported;
+
+ ops->calibrate = ar9002_hw_calibrate;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
new file mode 100644
index 00000000000..a8a8cdc04af
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -0,0 +1,598 @@
+/*
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "ar5008_initvals.h"
+#include "ar9001_initvals.h"
+#include "ar9002_initvals.h"
+
+/* General hardware code for the A5008/AR9001/AR9002 hadware families */
+
+static bool ar9002_hw_macversion_supported(u32 macversion)
+{
+ switch (macversion) {
+ case AR_SREV_VERSION_5416_PCI:
+ case AR_SREV_VERSION_5416_PCIE:
+ case AR_SREV_VERSION_9160:
+ case AR_SREV_VERSION_9100:
+ case AR_SREV_VERSION_9280:
+ case AR_SREV_VERSION_9285:
+ case AR_SREV_VERSION_9287:
+ case AR_SREV_VERSION_9271:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
+{
+ if (AR_SREV_9271(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271,
+ ARRAY_SIZE(ar9271Modes_9271), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271,
+ ARRAY_SIZE(ar9271Common_9271), 2);
+ INIT_INI_ARRAY(&ah->iniCommon_normal_cck_fir_coeff_9271,
+ ar9271Common_normal_cck_fir_coeff_9271,
+ ARRAY_SIZE(ar9271Common_normal_cck_fir_coeff_9271), 2);
+ INIT_INI_ARRAY(&ah->iniCommon_japan_2484_cck_fir_coeff_9271,
+ ar9271Common_japan_2484_cck_fir_coeff_9271,
+ ARRAY_SIZE(ar9271Common_japan_2484_cck_fir_coeff_9271), 2);
+ INIT_INI_ARRAY(&ah->iniModes_9271_1_0_only,
+ ar9271Modes_9271_1_0_only,
+ ARRAY_SIZE(ar9271Modes_9271_1_0_only), 6);
+ INIT_INI_ARRAY(&ah->iniModes_9271_ANI_reg, ar9271Modes_9271_ANI_reg,
+ ARRAY_SIZE(ar9271Modes_9271_ANI_reg), 6);
+ INIT_INI_ARRAY(&ah->iniModes_high_power_tx_gain_9271,
+ ar9271Modes_high_power_tx_gain_9271,
+ ARRAY_SIZE(ar9271Modes_high_power_tx_gain_9271), 6);
+ INIT_INI_ARRAY(&ah->iniModes_normal_power_tx_gain_9271,
+ ar9271Modes_normal_power_tx_gain_9271,
+ ARRAY_SIZE(ar9271Modes_normal_power_tx_gain_9271), 6);
+ return;
+ }
+
+ if (AR_SREV_9287_11_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1,
+ ARRAY_SIZE(ar9287Modes_9287_1_1), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_1,
+ ARRAY_SIZE(ar9287Common_9287_1_1), 2);
+ if (ah->config.pcie_clock_req)
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9287PciePhy_clkreq_off_L1_9287_1_1,
+ ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_1), 2);
+ else
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9287PciePhy_clkreq_always_on_L1_9287_1_1,
+ ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_1),
+ 2);
+ } else if (AR_SREV_9287_10_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_0,
+ ARRAY_SIZE(ar9287Modes_9287_1_0), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_0,
+ ARRAY_SIZE(ar9287Common_9287_1_0), 2);
+
+ if (ah->config.pcie_clock_req)
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9287PciePhy_clkreq_off_L1_9287_1_0,
+ ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_0), 2);
+ else
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9287PciePhy_clkreq_always_on_L1_9287_1_0,
+ ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_0),
+ 2);
+ } else if (AR_SREV_9285_12_OR_LATER(ah)) {
+
+
+ INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285_1_2,
+ ARRAY_SIZE(ar9285Modes_9285_1_2), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285_1_2,
+ ARRAY_SIZE(ar9285Common_9285_1_2), 2);
+
+ if (ah->config.pcie_clock_req) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9285PciePhy_clkreq_off_L1_9285_1_2,
+ ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285_1_2), 2);
+ } else {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9285PciePhy_clkreq_always_on_L1_9285_1_2,
+ ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285_1_2),
+ 2);
+ }
+ } else if (AR_SREV_9285_10_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285,
+ ARRAY_SIZE(ar9285Modes_9285), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285,
+ ARRAY_SIZE(ar9285Common_9285), 2);
+
+ if (ah->config.pcie_clock_req) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9285PciePhy_clkreq_off_L1_9285,
+ ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285), 2);
+ } else {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9285PciePhy_clkreq_always_on_L1_9285,
+ ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285), 2);
+ }
+ } else if (AR_SREV_9280_20_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280_2,
+ ARRAY_SIZE(ar9280Modes_9280_2), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280_2,
+ ARRAY_SIZE(ar9280Common_9280_2), 2);
+
+ if (ah->config.pcie_clock_req) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9280PciePhy_clkreq_off_L1_9280,
+ ARRAY_SIZE(ar9280PciePhy_clkreq_off_L1_9280), 2);
+ } else {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9280PciePhy_clkreq_always_on_L1_9280,
+ ARRAY_SIZE(ar9280PciePhy_clkreq_always_on_L1_9280), 2);
+ }
+ INIT_INI_ARRAY(&ah->iniModesAdditional,
+ ar9280Modes_fast_clock_9280_2,
+ ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), 3);
+ } else if (AR_SREV_9280_10_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280,
+ ARRAY_SIZE(ar9280Modes_9280), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280,
+ ARRAY_SIZE(ar9280Common_9280), 2);
+ } else if (AR_SREV_9160_10_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9160,
+ ARRAY_SIZE(ar5416Modes_9160), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9160,
+ ARRAY_SIZE(ar5416Common_9160), 2);
+ INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9160,
+ ARRAY_SIZE(ar5416Bank0_9160), 2);
+ INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9160,
+ ARRAY_SIZE(ar5416BB_RfGain_9160), 3);
+ INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9160,
+ ARRAY_SIZE(ar5416Bank1_9160), 2);
+ INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9160,
+ ARRAY_SIZE(ar5416Bank2_9160), 2);
+ INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9160,
+ ARRAY_SIZE(ar5416Bank3_9160), 3);
+ INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9160,
+ ARRAY_SIZE(ar5416Bank6_9160), 3);
+ INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9160,
+ ARRAY_SIZE(ar5416Bank6TPC_9160), 3);
+ INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9160,
+ ARRAY_SIZE(ar5416Bank7_9160), 2);
+ if (AR_SREV_9160_11(ah)) {
+ INIT_INI_ARRAY(&ah->iniAddac,
+ ar5416Addac_91601_1,
+ ARRAY_SIZE(ar5416Addac_91601_1), 2);
+ } else {
+ INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9160,
+ ARRAY_SIZE(ar5416Addac_9160), 2);
+ }
+ } else if (AR_SREV_9100_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9100,
+ ARRAY_SIZE(ar5416Modes_9100), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9100,
+ ARRAY_SIZE(ar5416Common_9100), 2);
+ INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9100,
+ ARRAY_SIZE(ar5416Bank0_9100), 2);
+ INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9100,
+ ARRAY_SIZE(ar5416BB_RfGain_9100), 3);
+ INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9100,
+ ARRAY_SIZE(ar5416Bank1_9100), 2);
+ INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9100,
+ ARRAY_SIZE(ar5416Bank2_9100), 2);
+ INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9100,
+ ARRAY_SIZE(ar5416Bank3_9100), 3);
+ INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9100,
+ ARRAY_SIZE(ar5416Bank6_9100), 3);
+ INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9100,
+ ARRAY_SIZE(ar5416Bank6TPC_9100), 3);
+ INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9100,
+ ARRAY_SIZE(ar5416Bank7_9100), 2);
+ INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9100,
+ ARRAY_SIZE(ar5416Addac_9100), 2);
+ } else {
+ INIT_INI_ARRAY(&ah->iniModes, ar5416Modes,
+ ARRAY_SIZE(ar5416Modes), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar5416Common,
+ ARRAY_SIZE(ar5416Common), 2);
+ INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0,
+ ARRAY_SIZE(ar5416Bank0), 2);
+ INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain,
+ ARRAY_SIZE(ar5416BB_RfGain), 3);
+ INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1,
+ ARRAY_SIZE(ar5416Bank1), 2);
+ INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2,
+ ARRAY_SIZE(ar5416Bank2), 2);
+ INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3,
+ ARRAY_SIZE(ar5416Bank3), 3);
+ INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6,
+ ARRAY_SIZE(ar5416Bank6), 3);
+ INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC,
+ ARRAY_SIZE(ar5416Bank6TPC), 3);
+ INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7,
+ ARRAY_SIZE(ar5416Bank7), 2);
+ INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac,
+ ARRAY_SIZE(ar5416Addac), 2);
+ }
+}
+
+/* Support for Japan ch.14 (2484) spread */
+void ar9002_hw_cck_chan14_spread(struct ath_hw *ah)
+{
+ if (AR_SREV_9287_11_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniCckfirNormal,
+ ar9287Common_normal_cck_fir_coeff_92871_1,
+ ARRAY_SIZE(ar9287Common_normal_cck_fir_coeff_92871_1),
+ 2);
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9287Common_japan_2484_cck_fir_coeff_92871_1,
+ ARRAY_SIZE(ar9287Common_japan_2484_cck_fir_coeff_92871_1),
+ 2);
+ }
+}
+
+static void ar9280_20_hw_init_rxgain_ini(struct ath_hw *ah)
+{
+ u32 rxgain_type;
+
+ if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >=
+ AR5416_EEP_MINOR_VER_17) {
+ rxgain_type = ah->eep_ops->get_eeprom(ah, EEP_RXGAIN_TYPE);
+
+ if (rxgain_type == AR5416_EEP_RXGAIN_13DB_BACKOFF)
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9280Modes_backoff_13db_rxgain_9280_2,
+ ARRAY_SIZE(ar9280Modes_backoff_13db_rxgain_9280_2), 6);
+ else if (rxgain_type == AR5416_EEP_RXGAIN_23DB_BACKOFF)
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9280Modes_backoff_23db_rxgain_9280_2,
+ ARRAY_SIZE(ar9280Modes_backoff_23db_rxgain_9280_2), 6);
+ else
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9280Modes_original_rxgain_9280_2,
+ ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
+ } else {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9280Modes_original_rxgain_9280_2,
+ ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
+ }
+}
+
+static void ar9280_20_hw_init_txgain_ini(struct ath_hw *ah)
+{
+ u32 txgain_type;
+
+ if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >=
+ AR5416_EEP_MINOR_VER_19) {
+ txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
+
+ if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER)
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9280Modes_high_power_tx_gain_9280_2,
+ ARRAY_SIZE(ar9280Modes_high_power_tx_gain_9280_2), 6);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9280Modes_original_tx_gain_9280_2,
+ ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
+ } else {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9280Modes_original_tx_gain_9280_2,
+ ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
+ }
+}
+
+static void ar9002_hw_init_mode_gain_regs(struct ath_hw *ah)
+{
+ if (AR_SREV_9287_11_OR_LATER(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9287Modes_rx_gain_9287_1_1,
+ ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_1), 6);
+ else if (AR_SREV_9287_10(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9287Modes_rx_gain_9287_1_0,
+ ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_0), 6);
+ else if (AR_SREV_9280_20(ah))
+ ar9280_20_hw_init_rxgain_ini(ah);
+
+ if (AR_SREV_9287_11_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9287Modes_tx_gain_9287_1_1,
+ ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 6);
+ } else if (AR_SREV_9287_10(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9287Modes_tx_gain_9287_1_0,
+ ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_0), 6);
+ } else if (AR_SREV_9280_20(ah)) {
+ ar9280_20_hw_init_txgain_ini(ah);
+ } else if (AR_SREV_9285_12_OR_LATER(ah)) {
+ u32 txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
+
+ /* txgain table */
+ if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) {
+ if (AR_SREV_9285E_20(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9285Modes_XE2_0_high_power,
+ ARRAY_SIZE(
+ ar9285Modes_XE2_0_high_power), 6);
+ } else {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9285Modes_high_power_tx_gain_9285_1_2,
+ ARRAY_SIZE(
+ ar9285Modes_high_power_tx_gain_9285_1_2), 6);
+ }
+ } else {
+ if (AR_SREV_9285E_20(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9285Modes_XE2_0_normal_power,
+ ARRAY_SIZE(
+ ar9285Modes_XE2_0_normal_power), 6);
+ } else {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9285Modes_original_tx_gain_9285_1_2,
+ ARRAY_SIZE(
+ ar9285Modes_original_tx_gain_9285_1_2), 6);
+ }
+ }
+ }
+}
+
+/*
+ * Helper for ASPM support.
+ *
+ * Disable PLL when in L0s as well as receiver clock when in L1.
+ * This power saving option must be enabled through the SerDes.
+ *
+ * Programming the SerDes must go through the same 288 bit serial shift
+ * register as the other analog registers. Hence the 9 writes.
+ */
+static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
+ int restore,
+ int power_off)
+{
+ u8 i;
+ u32 val;
+
+ if (ah->is_pciexpress != true)
+ return;
+
+ /* Do not touch SerDes registers */
+ if (ah->config.pcie_powersave_enable == 2)
+ return;
+
+ /* Nothing to do on restore for 11N */
+ if (!restore) {
+ if (AR_SREV_9280_20_OR_LATER(ah)) {
+ /*
+ * AR9280 2.0 or later chips use SerDes values from the
+ * initvals.h initialized depending on chipset during
+ * __ath9k_hw_init()
+ */
+ for (i = 0; i < ah->iniPcieSerdes.ia_rows; i++) {
+ REG_WRITE(ah, INI_RA(&ah->iniPcieSerdes, i, 0),
+ INI_RA(&ah->iniPcieSerdes, i, 1));
+ }
+ } else if (AR_SREV_9280(ah) &&
+ (ah->hw_version.macRev == AR_SREV_REVISION_9280_10)) {
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
+
+ /* RX shut off when elecidle is asserted */
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560);
+
+ /* Shut off CLKREQ active in L1 */
+ if (ah->config.pcie_clock_req)
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc);
+ else
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd);
+
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007);
+
+ /* Load the new settings */
+ REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
+
+ } else {
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
+
+ /* RX shut off when elecidle is asserted */
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579);
+
+ /*
+ * Ignore ah->ah_config.pcie_clock_req setting for
+ * pre-AR9280 11n
+ */
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff);
+
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007);
+
+ /* Load the new settings */
+ REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+ }
+
+ udelay(1000);
+
+ /* set bit 19 to allow forcing of pcie core into L1 state */
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
+
+ /* Several PCIe massages to ensure proper behaviour */
+ if (ah->config.pcie_waen) {
+ val = ah->config.pcie_waen;
+ if (!power_off)
+ val &= (~AR_WA_D3_L1_DISABLE);
+ } else {
+ if (AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
+ AR_SREV_9287(ah)) {
+ val = AR9285_WA_DEFAULT;
+ if (!power_off)
+ val &= (~AR_WA_D3_L1_DISABLE);
+ } else if (AR_SREV_9280(ah)) {
+ /*
+ * On AR9280 chips bit 22 of 0x4004 needs to be
+ * set otherwise card may disappear.
+ */
+ val = AR9280_WA_DEFAULT;
+ if (!power_off)
+ val &= (~AR_WA_D3_L1_DISABLE);
+ } else
+ val = AR_WA_DEFAULT;
+ }
+
+ REG_WRITE(ah, AR_WA, val);
+ }
+
+ if (power_off) {
+ /*
+ * Set PCIe workaround bits
+ * bit 14 in WA register (disable L1) should only
+ * be set when device enters D3 and be cleared
+ * when device comes back to D0.
+ */
+ if (ah->config.pcie_waen) {
+ if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
+ REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
+ } else {
+ if (((AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
+ AR_SREV_9287(ah)) &&
+ (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) ||
+ (AR_SREV_9280(ah) &&
+ (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) {
+ REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
+ }
+ }
+ }
+}
+
+static int ar9002_hw_get_radiorev(struct ath_hw *ah)
+{
+ u32 val;
+ int i;
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_PHY(0x36), 0x00007058);
+ for (i = 0; i < 8; i++)
+ REG_WRITE(ah, AR_PHY(0x20), 0x00010000);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ val = (REG_READ(ah, AR_PHY(256)) >> 24) & 0xff;
+ val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4);
+
+ return ath9k_hw_reverse_bits(val, 8);
+}
+
+int ar9002_hw_rf_claim(struct ath_hw *ah)
+{
+ u32 val;
+
+ REG_WRITE(ah, AR_PHY(0), 0x00000007);
+
+ val = ar9002_hw_get_radiorev(ah);
+ switch (val & AR_RADIO_SREV_MAJOR) {
+ case 0:
+ val = AR_RAD5133_SREV_MAJOR;
+ break;
+ case AR_RAD5133_SREV_MAJOR:
+ case AR_RAD5122_SREV_MAJOR:
+ case AR_RAD2133_SREV_MAJOR:
+ case AR_RAD2122_SREV_MAJOR:
+ break;
+ default:
+ ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
+ "Radio Chip Rev 0x%02X not supported\n",
+ val & AR_RADIO_SREV_MAJOR);
+ return -EOPNOTSUPP;
+ }
+
+ ah->hw_version.analog5GhzRev = val;
+
+ return 0;
+}
+
+/*
+ * Enable ASYNC FIFO
+ *
+ * If Async FIFO is enabled, the following counters change as MAC now runs
+ * at 117 Mhz instead of 88/44MHz when async FIFO is disabled.
+ *
+ * The values below tested for ht40 2 chain.
+ * Overwrite the delay/timeouts initialized in process ini.
+ */
+void ar9002_hw_enable_async_fifo(struct ath_hw *ah)
+{
+ if (AR_SREV_9287_12_OR_LATER(ah)) {
+ REG_WRITE(ah, AR_D_GBL_IFS_SIFS,
+ AR_D_GBL_IFS_SIFS_ASYNC_FIFO_DUR);
+ REG_WRITE(ah, AR_D_GBL_IFS_SLOT,
+ AR_D_GBL_IFS_SLOT_ASYNC_FIFO_DUR);
+ REG_WRITE(ah, AR_D_GBL_IFS_EIFS,
+ AR_D_GBL_IFS_EIFS_ASYNC_FIFO_DUR);
+
+ REG_WRITE(ah, AR_TIME_OUT, AR_TIME_OUT_ACK_CTS_ASYNC_FIFO_DUR);
+ REG_WRITE(ah, AR_USEC, AR_USEC_ASYNC_FIFO_DUR);
+
+ REG_SET_BIT(ah, AR_MAC_PCU_LOGIC_ANALYZER,
+ AR_MAC_PCU_LOGIC_ANALYZER_DISBUG20768);
+ REG_RMW_FIELD(ah, AR_AHB_MODE, AR_AHB_CUSTOM_BURST_EN,
+ AR_AHB_CUSTOM_BURST_ASYNC_FIFO_VAL);
+ }
+}
+
+/*
+ * We don't enable WEP aggregation on mac80211 but we keep this
+ * around for HAL unification purposes.
+ */
+void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah)
+{
+ if (AR_SREV_9287_12_OR_LATER(ah)) {
+ REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
+ AR_PCU_MISC_MODE2_ENABLE_AGGWEP);
+ }
+}
+
+/* Sets up the AR5008/AR9001/AR9002 hardware familiy callbacks */
+void ar9002_hw_attach_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ priv_ops->init_mode_regs = ar9002_hw_init_mode_regs;
+ priv_ops->init_mode_gain_regs = ar9002_hw_init_mode_gain_regs;
+ priv_ops->macversion_supported = ar9002_hw_macversion_supported;
+
+ ops->config_pci_powersave = ar9002_hw_configpcipowersave;
+
+ ar5008_hw_attach_phy_ops(ah);
+ if (AR_SREV_9280_10_OR_LATER(ah))
+ ar9002_hw_attach_phy_ops(ah);
+
+ ar9002_hw_attach_calib_ops(ah);
+ ar9002_hw_attach_mac_ops(ah);
+}
diff --git a/drivers/net/wireless/ath/ath9k/initvals.h b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
index 8a3bf3ab998..dae7f3304eb 100644
--- a/drivers/net/wireless/ath/ath9k/initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008-2009 Atheros Communications Inc.
+ * Copyright (c) 2010 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -14,1982 +14,9 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-static const u32 ar5416Modes[][6] = {
- { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
- { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
- { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
- { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
- { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
- { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
- { 0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810 },
- { 0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a },
- { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
- { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
- { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
- { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
- { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
- { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
- { 0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0 },
- { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x00009850, 0x6c48b4e0, 0x6d48b4e0, 0x6d48b0de, 0x6c48b0de, 0x6c48b0de },
- { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
- { 0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e },
- { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18 },
- { 0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
- { 0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 },
- { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
- { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
- { 0x00009918, 0x000001b8, 0x00000370, 0x00000268, 0x00000134, 0x00000134 },
- { 0x00009924, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b },
- { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 },
- { 0x00009960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
- { 0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
- { 0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
- { 0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120, 0x00001120 },
- { 0x000099bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00 },
- { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
- { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
- { 0x000099c8, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c },
- { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
- { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
- { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
- { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
- { 0x0000a20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000b20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000c20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
- { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
- { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
- { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
- { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
- { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
- { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
- { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
- { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
- { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
- { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
- { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
- { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
- { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
-};
-
-static const u32 ar5416Common[][2] = {
- { 0x0000000c, 0x00000000 },
- { 0x00000030, 0x00020015 },
- { 0x00000034, 0x00000005 },
- { 0x00000040, 0x00000000 },
- { 0x00000044, 0x00000008 },
- { 0x00000048, 0x00000008 },
- { 0x0000004c, 0x00000010 },
- { 0x00000050, 0x00000000 },
- { 0x00000054, 0x0000001f },
- { 0x00000800, 0x00000000 },
- { 0x00000804, 0x00000000 },
- { 0x00000808, 0x00000000 },
- { 0x0000080c, 0x00000000 },
- { 0x00000810, 0x00000000 },
- { 0x00000814, 0x00000000 },
- { 0x00000818, 0x00000000 },
- { 0x0000081c, 0x00000000 },
- { 0x00000820, 0x00000000 },
- { 0x00000824, 0x00000000 },
- { 0x00001040, 0x002ffc0f },
- { 0x00001044, 0x002ffc0f },
- { 0x00001048, 0x002ffc0f },
- { 0x0000104c, 0x002ffc0f },
- { 0x00001050, 0x002ffc0f },
- { 0x00001054, 0x002ffc0f },
- { 0x00001058, 0x002ffc0f },
- { 0x0000105c, 0x002ffc0f },
- { 0x00001060, 0x002ffc0f },
- { 0x00001064, 0x002ffc0f },
- { 0x00001230, 0x00000000 },
- { 0x00001270, 0x00000000 },
- { 0x00001038, 0x00000000 },
- { 0x00001078, 0x00000000 },
- { 0x000010b8, 0x00000000 },
- { 0x000010f8, 0x00000000 },
- { 0x00001138, 0x00000000 },
- { 0x00001178, 0x00000000 },
- { 0x000011b8, 0x00000000 },
- { 0x000011f8, 0x00000000 },
- { 0x00001238, 0x00000000 },
- { 0x00001278, 0x00000000 },
- { 0x000012b8, 0x00000000 },
- { 0x000012f8, 0x00000000 },
- { 0x00001338, 0x00000000 },
- { 0x00001378, 0x00000000 },
- { 0x000013b8, 0x00000000 },
- { 0x000013f8, 0x00000000 },
- { 0x00001438, 0x00000000 },
- { 0x00001478, 0x00000000 },
- { 0x000014b8, 0x00000000 },
- { 0x000014f8, 0x00000000 },
- { 0x00001538, 0x00000000 },
- { 0x00001578, 0x00000000 },
- { 0x000015b8, 0x00000000 },
- { 0x000015f8, 0x00000000 },
- { 0x00001638, 0x00000000 },
- { 0x00001678, 0x00000000 },
- { 0x000016b8, 0x00000000 },
- { 0x000016f8, 0x00000000 },
- { 0x00001738, 0x00000000 },
- { 0x00001778, 0x00000000 },
- { 0x000017b8, 0x00000000 },
- { 0x000017f8, 0x00000000 },
- { 0x0000103c, 0x00000000 },
- { 0x0000107c, 0x00000000 },
- { 0x000010bc, 0x00000000 },
- { 0x000010fc, 0x00000000 },
- { 0x0000113c, 0x00000000 },
- { 0x0000117c, 0x00000000 },
- { 0x000011bc, 0x00000000 },
- { 0x000011fc, 0x00000000 },
- { 0x0000123c, 0x00000000 },
- { 0x0000127c, 0x00000000 },
- { 0x000012bc, 0x00000000 },
- { 0x000012fc, 0x00000000 },
- { 0x0000133c, 0x00000000 },
- { 0x0000137c, 0x00000000 },
- { 0x000013bc, 0x00000000 },
- { 0x000013fc, 0x00000000 },
- { 0x0000143c, 0x00000000 },
- { 0x0000147c, 0x00000000 },
- { 0x00004030, 0x00000002 },
- { 0x0000403c, 0x00000002 },
- { 0x00007010, 0x00000000 },
- { 0x00007038, 0x000004c2 },
- { 0x00008004, 0x00000000 },
- { 0x00008008, 0x00000000 },
- { 0x0000800c, 0x00000000 },
- { 0x00008018, 0x00000700 },
- { 0x00008020, 0x00000000 },
- { 0x00008038, 0x00000000 },
- { 0x0000803c, 0x00000000 },
- { 0x00008048, 0x40000000 },
- { 0x00008054, 0x00000000 },
- { 0x00008058, 0x00000000 },
- { 0x0000805c, 0x000fc78f },
- { 0x00008060, 0x0000000f },
- { 0x00008064, 0x00000000 },
- { 0x000080c0, 0x2a82301a },
- { 0x000080c4, 0x05dc01e0 },
- { 0x000080c8, 0x1f402710 },
- { 0x000080cc, 0x01f40000 },
- { 0x000080d0, 0x00001e00 },
- { 0x000080d4, 0x00000000 },
- { 0x000080d8, 0x00400000 },
- { 0x000080e0, 0xffffffff },
- { 0x000080e4, 0x0000ffff },
- { 0x000080e8, 0x003f3f3f },
- { 0x000080ec, 0x00000000 },
- { 0x000080f0, 0x00000000 },
- { 0x000080f4, 0x00000000 },
- { 0x000080f8, 0x00000000 },
- { 0x000080fc, 0x00020000 },
- { 0x00008100, 0x00020000 },
- { 0x00008104, 0x00000001 },
- { 0x00008108, 0x00000052 },
- { 0x0000810c, 0x00000000 },
- { 0x00008110, 0x00000168 },
- { 0x00008118, 0x000100aa },
- { 0x0000811c, 0x00003210 },
- { 0x00008124, 0x00000000 },
- { 0x00008128, 0x00000000 },
- { 0x0000812c, 0x00000000 },
- { 0x00008130, 0x00000000 },
- { 0x00008134, 0x00000000 },
- { 0x00008138, 0x00000000 },
- { 0x0000813c, 0x00000000 },
- { 0x00008144, 0xffffffff },
- { 0x00008168, 0x00000000 },
- { 0x0000816c, 0x00000000 },
- { 0x00008170, 0x32143320 },
- { 0x00008174, 0xfaa4fa50 },
- { 0x00008178, 0x00000100 },
- { 0x0000817c, 0x00000000 },
- { 0x000081c4, 0x00000000 },
- { 0x000081ec, 0x00000000 },
- { 0x000081f0, 0x00000000 },
- { 0x000081f4, 0x00000000 },
- { 0x000081f8, 0x00000000 },
- { 0x000081fc, 0x00000000 },
- { 0x00008200, 0x00000000 },
- { 0x00008204, 0x00000000 },
- { 0x00008208, 0x00000000 },
- { 0x0000820c, 0x00000000 },
- { 0x00008210, 0x00000000 },
- { 0x00008214, 0x00000000 },
- { 0x00008218, 0x00000000 },
- { 0x0000821c, 0x00000000 },
- { 0x00008220, 0x00000000 },
- { 0x00008224, 0x00000000 },
- { 0x00008228, 0x00000000 },
- { 0x0000822c, 0x00000000 },
- { 0x00008230, 0x00000000 },
- { 0x00008234, 0x00000000 },
- { 0x00008238, 0x00000000 },
- { 0x0000823c, 0x00000000 },
- { 0x00008240, 0x00100000 },
- { 0x00008244, 0x0010f400 },
- { 0x00008248, 0x00000100 },
- { 0x0000824c, 0x0001e800 },
- { 0x00008250, 0x00000000 },
- { 0x00008254, 0x00000000 },
- { 0x00008258, 0x00000000 },
- { 0x0000825c, 0x400000ff },
- { 0x00008260, 0x00080922 },
- { 0x00008264, 0xa8000010 },
- { 0x00008270, 0x00000000 },
- { 0x00008274, 0x40000000 },
- { 0x00008278, 0x003e4180 },
- { 0x0000827c, 0x00000000 },
- { 0x00008284, 0x0000002c },
- { 0x00008288, 0x0000002c },
- { 0x0000828c, 0x00000000 },
- { 0x00008294, 0x00000000 },
- { 0x00008298, 0x00000000 },
- { 0x00008300, 0x00000000 },
- { 0x00008304, 0x00000000 },
- { 0x00008308, 0x00000000 },
- { 0x0000830c, 0x00000000 },
- { 0x00008310, 0x00000000 },
- { 0x00008314, 0x00000000 },
- { 0x00008318, 0x00000000 },
- { 0x00008328, 0x00000000 },
- { 0x0000832c, 0x00000007 },
- { 0x00008330, 0x00000302 },
- { 0x00008334, 0x00000e00 },
- { 0x00008338, 0x00070000 },
- { 0x0000833c, 0x00000000 },
- { 0x00008340, 0x000107ff },
- { 0x00009808, 0x00000000 },
- { 0x0000980c, 0xad848e19 },
- { 0x00009810, 0x7d14e000 },
- { 0x00009814, 0x9c0a9f6b },
- { 0x0000981c, 0x00000000 },
- { 0x0000982c, 0x0000a000 },
- { 0x00009830, 0x00000000 },
- { 0x0000983c, 0x00200400 },
- { 0x00009840, 0x206a002e },
- { 0x0000984c, 0x1284233c },
- { 0x00009854, 0x00000859 },
- { 0x00009900, 0x00000000 },
- { 0x00009904, 0x00000000 },
- { 0x00009908, 0x00000000 },
- { 0x0000990c, 0x00000000 },
- { 0x0000991c, 0x10000fff },
- { 0x00009920, 0x05100000 },
- { 0x0000a920, 0x05100000 },
- { 0x0000b920, 0x05100000 },
- { 0x00009928, 0x00000001 },
- { 0x0000992c, 0x00000004 },
- { 0x00009934, 0x1e1f2022 },
- { 0x00009938, 0x0a0b0c0d },
- { 0x0000993c, 0x00000000 },
- { 0x00009948, 0x9280b212 },
- { 0x0000994c, 0x00020028 },
- { 0x00009954, 0x5d50e188 },
- { 0x00009958, 0x00081fff },
- { 0x0000c95c, 0x004b6a8e },
- { 0x0000c968, 0x000003ce },
- { 0x00009970, 0x190fb515 },
- { 0x00009974, 0x00000000 },
- { 0x00009978, 0x00000001 },
- { 0x0000997c, 0x00000000 },
- { 0x00009980, 0x00000000 },
- { 0x00009984, 0x00000000 },
- { 0x00009988, 0x00000000 },
- { 0x0000998c, 0x00000000 },
- { 0x00009990, 0x00000000 },
- { 0x00009994, 0x00000000 },
- { 0x00009998, 0x00000000 },
- { 0x0000999c, 0x00000000 },
- { 0x000099a0, 0x00000000 },
- { 0x000099a4, 0x00000001 },
- { 0x000099a8, 0x001fff00 },
- { 0x000099ac, 0x00000000 },
- { 0x000099b0, 0x03051000 },
- { 0x000099dc, 0x00000000 },
- { 0x000099e0, 0x00000200 },
- { 0x000099e4, 0xaaaaaaaa },
- { 0x000099e8, 0x3c466478 },
- { 0x000099ec, 0x000000aa },
- { 0x000099fc, 0x00001042 },
- { 0x00009b00, 0x00000000 },
- { 0x00009b04, 0x00000001 },
- { 0x00009b08, 0x00000002 },
- { 0x00009b0c, 0x00000003 },
- { 0x00009b10, 0x00000004 },
- { 0x00009b14, 0x00000005 },
- { 0x00009b18, 0x00000008 },
- { 0x00009b1c, 0x00000009 },
- { 0x00009b20, 0x0000000a },
- { 0x00009b24, 0x0000000b },
- { 0x00009b28, 0x0000000c },
- { 0x00009b2c, 0x0000000d },
- { 0x00009b30, 0x00000010 },
- { 0x00009b34, 0x00000011 },
- { 0x00009b38, 0x00000012 },
- { 0x00009b3c, 0x00000013 },
- { 0x00009b40, 0x00000014 },
- { 0x00009b44, 0x00000015 },
- { 0x00009b48, 0x00000018 },
- { 0x00009b4c, 0x00000019 },
- { 0x00009b50, 0x0000001a },
- { 0x00009b54, 0x0000001b },
- { 0x00009b58, 0x0000001c },
- { 0x00009b5c, 0x0000001d },
- { 0x00009b60, 0x00000020 },
- { 0x00009b64, 0x00000021 },
- { 0x00009b68, 0x00000022 },
- { 0x00009b6c, 0x00000023 },
- { 0x00009b70, 0x00000024 },
- { 0x00009b74, 0x00000025 },
- { 0x00009b78, 0x00000028 },
- { 0x00009b7c, 0x00000029 },
- { 0x00009b80, 0x0000002a },
- { 0x00009b84, 0x0000002b },
- { 0x00009b88, 0x0000002c },
- { 0x00009b8c, 0x0000002d },
- { 0x00009b90, 0x00000030 },
- { 0x00009b94, 0x00000031 },
- { 0x00009b98, 0x00000032 },
- { 0x00009b9c, 0x00000033 },
- { 0x00009ba0, 0x00000034 },
- { 0x00009ba4, 0x00000035 },
- { 0x00009ba8, 0x00000035 },
- { 0x00009bac, 0x00000035 },
- { 0x00009bb0, 0x00000035 },
- { 0x00009bb4, 0x00000035 },
- { 0x00009bb8, 0x00000035 },
- { 0x00009bbc, 0x00000035 },
- { 0x00009bc0, 0x00000035 },
- { 0x00009bc4, 0x00000035 },
- { 0x00009bc8, 0x00000035 },
- { 0x00009bcc, 0x00000035 },
- { 0x00009bd0, 0x00000035 },
- { 0x00009bd4, 0x00000035 },
- { 0x00009bd8, 0x00000035 },
- { 0x00009bdc, 0x00000035 },
- { 0x00009be0, 0x00000035 },
- { 0x00009be4, 0x00000035 },
- { 0x00009be8, 0x00000035 },
- { 0x00009bec, 0x00000035 },
- { 0x00009bf0, 0x00000035 },
- { 0x00009bf4, 0x00000035 },
- { 0x00009bf8, 0x00000010 },
- { 0x00009bfc, 0x0000001a },
- { 0x0000a210, 0x40806333 },
- { 0x0000a214, 0x00106c10 },
- { 0x0000a218, 0x009c4060 },
- { 0x0000a220, 0x018830c6 },
- { 0x0000a224, 0x00000400 },
- { 0x0000a228, 0x00000bb5 },
- { 0x0000a22c, 0x00000011 },
- { 0x0000a234, 0x20202020 },
- { 0x0000a238, 0x20202020 },
- { 0x0000a23c, 0x13c889af },
- { 0x0000a240, 0x38490a20 },
- { 0x0000a244, 0x00007bb6 },
- { 0x0000a248, 0x0fff3ffc },
- { 0x0000a24c, 0x00000001 },
- { 0x0000a250, 0x0000a000 },
- { 0x0000a254, 0x00000000 },
- { 0x0000a258, 0x0cc75380 },
- { 0x0000a25c, 0x0f0f0f01 },
- { 0x0000a260, 0xdfa91f01 },
- { 0x0000a268, 0x00000000 },
- { 0x0000a26c, 0x0e79e5c6 },
- { 0x0000b26c, 0x0e79e5c6 },
- { 0x0000c26c, 0x0e79e5c6 },
- { 0x0000d270, 0x00820820 },
- { 0x0000a278, 0x1ce739ce },
- { 0x0000a27c, 0x051701ce },
- { 0x0000a338, 0x00000000 },
- { 0x0000a33c, 0x00000000 },
- { 0x0000a340, 0x00000000 },
- { 0x0000a344, 0x00000000 },
- { 0x0000a348, 0x3fffffff },
- { 0x0000a34c, 0x3fffffff },
- { 0x0000a350, 0x3fffffff },
- { 0x0000a354, 0x0003ffff },
- { 0x0000a358, 0x79a8aa1f },
- { 0x0000d35c, 0x07ffffef },
- { 0x0000d360, 0x0fffffe7 },
- { 0x0000d364, 0x17ffffe5 },
- { 0x0000d368, 0x1fffffe4 },
- { 0x0000d36c, 0x37ffffe3 },
- { 0x0000d370, 0x3fffffe3 },
- { 0x0000d374, 0x57ffffe3 },
- { 0x0000d378, 0x5fffffe2 },
- { 0x0000d37c, 0x7fffffe2 },
- { 0x0000d380, 0x7f3c7bba },
- { 0x0000d384, 0xf3307ff0 },
- { 0x0000a388, 0x08000000 },
- { 0x0000a38c, 0x20202020 },
- { 0x0000a390, 0x20202020 },
- { 0x0000a394, 0x1ce739ce },
- { 0x0000a398, 0x000001ce },
- { 0x0000a39c, 0x00000001 },
- { 0x0000a3a0, 0x00000000 },
- { 0x0000a3a4, 0x00000000 },
- { 0x0000a3a8, 0x00000000 },
- { 0x0000a3ac, 0x00000000 },
- { 0x0000a3b0, 0x00000000 },
- { 0x0000a3b4, 0x00000000 },
- { 0x0000a3b8, 0x00000000 },
- { 0x0000a3bc, 0x00000000 },
- { 0x0000a3c0, 0x00000000 },
- { 0x0000a3c4, 0x00000000 },
- { 0x0000a3c8, 0x00000246 },
- { 0x0000a3cc, 0x20202020 },
- { 0x0000a3d0, 0x20202020 },
- { 0x0000a3d4, 0x20202020 },
- { 0x0000a3dc, 0x1ce739ce },
- { 0x0000a3e0, 0x000001ce },
-};
-
-static const u32 ar5416Bank0[][2] = {
- { 0x000098b0, 0x1e5795e5 },
- { 0x000098e0, 0x02008020 },
-};
-
-static const u32 ar5416BB_RfGain[][3] = {
- { 0x00009a00, 0x00000000, 0x00000000 },
- { 0x00009a04, 0x00000040, 0x00000040 },
- { 0x00009a08, 0x00000080, 0x00000080 },
- { 0x00009a0c, 0x000001a1, 0x00000141 },
- { 0x00009a10, 0x000001e1, 0x00000181 },
- { 0x00009a14, 0x00000021, 0x000001c1 },
- { 0x00009a18, 0x00000061, 0x00000001 },
- { 0x00009a1c, 0x00000168, 0x00000041 },
- { 0x00009a20, 0x000001a8, 0x000001a8 },
- { 0x00009a24, 0x000001e8, 0x000001e8 },
- { 0x00009a28, 0x00000028, 0x00000028 },
- { 0x00009a2c, 0x00000068, 0x00000068 },
- { 0x00009a30, 0x00000189, 0x000000a8 },
- { 0x00009a34, 0x000001c9, 0x00000169 },
- { 0x00009a38, 0x00000009, 0x000001a9 },
- { 0x00009a3c, 0x00000049, 0x000001e9 },
- { 0x00009a40, 0x00000089, 0x00000029 },
- { 0x00009a44, 0x00000170, 0x00000069 },
- { 0x00009a48, 0x000001b0, 0x00000190 },
- { 0x00009a4c, 0x000001f0, 0x000001d0 },
- { 0x00009a50, 0x00000030, 0x00000010 },
- { 0x00009a54, 0x00000070, 0x00000050 },
- { 0x00009a58, 0x00000191, 0x00000090 },
- { 0x00009a5c, 0x000001d1, 0x00000151 },
- { 0x00009a60, 0x00000011, 0x00000191 },
- { 0x00009a64, 0x00000051, 0x000001d1 },
- { 0x00009a68, 0x00000091, 0x00000011 },
- { 0x00009a6c, 0x000001b8, 0x00000051 },
- { 0x00009a70, 0x000001f8, 0x00000198 },
- { 0x00009a74, 0x00000038, 0x000001d8 },
- { 0x00009a78, 0x00000078, 0x00000018 },
- { 0x00009a7c, 0x00000199, 0x00000058 },
- { 0x00009a80, 0x000001d9, 0x00000098 },
- { 0x00009a84, 0x00000019, 0x00000159 },
- { 0x00009a88, 0x00000059, 0x00000199 },
- { 0x00009a8c, 0x00000099, 0x000001d9 },
- { 0x00009a90, 0x000000d9, 0x00000019 },
- { 0x00009a94, 0x000000f9, 0x00000059 },
- { 0x00009a98, 0x000000f9, 0x00000099 },
- { 0x00009a9c, 0x000000f9, 0x000000d9 },
- { 0x00009aa0, 0x000000f9, 0x000000f9 },
- { 0x00009aa4, 0x000000f9, 0x000000f9 },
- { 0x00009aa8, 0x000000f9, 0x000000f9 },
- { 0x00009aac, 0x000000f9, 0x000000f9 },
- { 0x00009ab0, 0x000000f9, 0x000000f9 },
- { 0x00009ab4, 0x000000f9, 0x000000f9 },
- { 0x00009ab8, 0x000000f9, 0x000000f9 },
- { 0x00009abc, 0x000000f9, 0x000000f9 },
- { 0x00009ac0, 0x000000f9, 0x000000f9 },
- { 0x00009ac4, 0x000000f9, 0x000000f9 },
- { 0x00009ac8, 0x000000f9, 0x000000f9 },
- { 0x00009acc, 0x000000f9, 0x000000f9 },
- { 0x00009ad0, 0x000000f9, 0x000000f9 },
- { 0x00009ad4, 0x000000f9, 0x000000f9 },
- { 0x00009ad8, 0x000000f9, 0x000000f9 },
- { 0x00009adc, 0x000000f9, 0x000000f9 },
- { 0x00009ae0, 0x000000f9, 0x000000f9 },
- { 0x00009ae4, 0x000000f9, 0x000000f9 },
- { 0x00009ae8, 0x000000f9, 0x000000f9 },
- { 0x00009aec, 0x000000f9, 0x000000f9 },
- { 0x00009af0, 0x000000f9, 0x000000f9 },
- { 0x00009af4, 0x000000f9, 0x000000f9 },
- { 0x00009af8, 0x000000f9, 0x000000f9 },
- { 0x00009afc, 0x000000f9, 0x000000f9 },
-};
-
-static const u32 ar5416Bank1[][2] = {
- { 0x000098b0, 0x02108421 },
- { 0x000098ec, 0x00000008 },
-};
-
-static const u32 ar5416Bank2[][2] = {
- { 0x000098b0, 0x0e73ff17 },
- { 0x000098e0, 0x00000420 },
-};
-
-static const u32 ar5416Bank3[][3] = {
- { 0x000098f0, 0x01400018, 0x01c00018 },
-};
-
-static const u32 ar5416Bank6[][3] = {
-
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00e00000, 0x00e00000 },
- { 0x0000989c, 0x005e0000, 0x005e0000 },
- { 0x0000989c, 0x00120000, 0x00120000 },
- { 0x0000989c, 0x00620000, 0x00620000 },
- { 0x0000989c, 0x00020000, 0x00020000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x40ff0000, 0x40ff0000 },
- { 0x0000989c, 0x005f0000, 0x005f0000 },
- { 0x0000989c, 0x00870000, 0x00870000 },
- { 0x0000989c, 0x00f90000, 0x00f90000 },
- { 0x0000989c, 0x007b0000, 0x007b0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00f50000, 0x00f50000 },
- { 0x0000989c, 0x00dc0000, 0x00dc0000 },
- { 0x0000989c, 0x00110000, 0x00110000 },
- { 0x0000989c, 0x006100a8, 0x006100a8 },
- { 0x0000989c, 0x004210a2, 0x004210a2 },
- { 0x0000989c, 0x0014008f, 0x0014008f },
- { 0x0000989c, 0x00c40003, 0x00c40003 },
- { 0x0000989c, 0x003000f2, 0x003000f2 },
- { 0x0000989c, 0x00440016, 0x00440016 },
- { 0x0000989c, 0x00410040, 0x00410040 },
- { 0x0000989c, 0x0001805e, 0x0001805e },
- { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
- { 0x0000989c, 0x000000f1, 0x000000f1 },
- { 0x0000989c, 0x00002081, 0x00002081 },
- { 0x0000989c, 0x000000d4, 0x000000d4 },
- { 0x000098d0, 0x0000000f, 0x0010000f },
-};
-
-static const u32 ar5416Bank6TPC[][3] = {
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00e00000, 0x00e00000 },
- { 0x0000989c, 0x005e0000, 0x005e0000 },
- { 0x0000989c, 0x00120000, 0x00120000 },
- { 0x0000989c, 0x00620000, 0x00620000 },
- { 0x0000989c, 0x00020000, 0x00020000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x40ff0000, 0x40ff0000 },
- { 0x0000989c, 0x005f0000, 0x005f0000 },
- { 0x0000989c, 0x00870000, 0x00870000 },
- { 0x0000989c, 0x00f90000, 0x00f90000 },
- { 0x0000989c, 0x007b0000, 0x007b0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00f50000, 0x00f50000 },
- { 0x0000989c, 0x00dc0000, 0x00dc0000 },
- { 0x0000989c, 0x00110000, 0x00110000 },
- { 0x0000989c, 0x006100a8, 0x006100a8 },
- { 0x0000989c, 0x00423022, 0x00423022 },
- { 0x0000989c, 0x201400df, 0x201400df },
- { 0x0000989c, 0x00c40002, 0x00c40002 },
- { 0x0000989c, 0x003000f2, 0x003000f2 },
- { 0x0000989c, 0x00440016, 0x00440016 },
- { 0x0000989c, 0x00410040, 0x00410040 },
- { 0x0000989c, 0x0001805e, 0x0001805e },
- { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
- { 0x0000989c, 0x000000e1, 0x000000e1 },
- { 0x0000989c, 0x00007081, 0x00007081 },
- { 0x0000989c, 0x000000d4, 0x000000d4 },
- { 0x000098d0, 0x0000000f, 0x0010000f },
-};
-
-static const u32 ar5416Bank7[][2] = {
- { 0x0000989c, 0x00000500 },
- { 0x0000989c, 0x00000800 },
- { 0x000098cc, 0x0000000e },
-};
-
-static const u32 ar5416Addac[][2] = {
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000003 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x0000000c },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000030 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000060 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000058 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x000098cc, 0x00000000 },
-};
-
-static const u32 ar5416Modes_9100[][6] = {
- { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
- { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
- { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
- { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
- { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
- { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
- { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
- { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
- { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
- { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
- { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
- { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
- { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
- { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x00009850, 0x6d48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6d48b0e2, 0x6d48b0e2 },
- { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec86d2e, 0x7ec84d2e, 0x7ec82d2e },
- { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
- { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
- { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
- { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
- { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
- { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
- { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
- { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a11, 0xd00a8a0d, 0xd00a8a0d },
- { 0x00009940, 0x00754604, 0x00754604, 0xfff81204, 0xfff81204, 0xfff81204 },
- { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 },
- { 0x00009954, 0x5f3ca3de, 0x5f3ca3de, 0xe250a51e, 0xe250a51e, 0xe250a51e },
- { 0x00009958, 0x2108ecff, 0x2108ecff, 0x3388ffff, 0x3388ffff, 0x3388ffff },
-#ifdef TB243
- { 0x00009960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
- { 0x0000a960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
- { 0x0000b960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
- { 0x00009964, 0x00000000, 0x00000000, 0x00002210, 0x00002210, 0x00001120 },
-#else
- { 0x00009960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
- { 0x0000a960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
- { 0x0000b960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
- { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
-#endif
- { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a1000, 0x001a0c00, 0x001a0c00 },
- { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
- { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
- { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
- { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
- { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
- { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
- { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
- { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
- { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
- { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
- { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
- { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
- { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
- { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
- { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
- { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
- { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
- { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
- { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
- { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
- { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
-};
-
-static const u32 ar5416Common_9100[][2] = {
- { 0x0000000c, 0x00000000 },
- { 0x00000030, 0x00020015 },
- { 0x00000034, 0x00000005 },
- { 0x00000040, 0x00000000 },
- { 0x00000044, 0x00000008 },
- { 0x00000048, 0x00000008 },
- { 0x0000004c, 0x00000010 },
- { 0x00000050, 0x00000000 },
- { 0x00000054, 0x0000001f },
- { 0x00000800, 0x00000000 },
- { 0x00000804, 0x00000000 },
- { 0x00000808, 0x00000000 },
- { 0x0000080c, 0x00000000 },
- { 0x00000810, 0x00000000 },
- { 0x00000814, 0x00000000 },
- { 0x00000818, 0x00000000 },
- { 0x0000081c, 0x00000000 },
- { 0x00000820, 0x00000000 },
- { 0x00000824, 0x00000000 },
- { 0x00001040, 0x002ffc0f },
- { 0x00001044, 0x002ffc0f },
- { 0x00001048, 0x002ffc0f },
- { 0x0000104c, 0x002ffc0f },
- { 0x00001050, 0x002ffc0f },
- { 0x00001054, 0x002ffc0f },
- { 0x00001058, 0x002ffc0f },
- { 0x0000105c, 0x002ffc0f },
- { 0x00001060, 0x002ffc0f },
- { 0x00001064, 0x002ffc0f },
- { 0x00001230, 0x00000000 },
- { 0x00001270, 0x00000000 },
- { 0x00001038, 0x00000000 },
- { 0x00001078, 0x00000000 },
- { 0x000010b8, 0x00000000 },
- { 0x000010f8, 0x00000000 },
- { 0x00001138, 0x00000000 },
- { 0x00001178, 0x00000000 },
- { 0x000011b8, 0x00000000 },
- { 0x000011f8, 0x00000000 },
- { 0x00001238, 0x00000000 },
- { 0x00001278, 0x00000000 },
- { 0x000012b8, 0x00000000 },
- { 0x000012f8, 0x00000000 },
- { 0x00001338, 0x00000000 },
- { 0x00001378, 0x00000000 },
- { 0x000013b8, 0x00000000 },
- { 0x000013f8, 0x00000000 },
- { 0x00001438, 0x00000000 },
- { 0x00001478, 0x00000000 },
- { 0x000014b8, 0x00000000 },
- { 0x000014f8, 0x00000000 },
- { 0x00001538, 0x00000000 },
- { 0x00001578, 0x00000000 },
- { 0x000015b8, 0x00000000 },
- { 0x000015f8, 0x00000000 },
- { 0x00001638, 0x00000000 },
- { 0x00001678, 0x00000000 },
- { 0x000016b8, 0x00000000 },
- { 0x000016f8, 0x00000000 },
- { 0x00001738, 0x00000000 },
- { 0x00001778, 0x00000000 },
- { 0x000017b8, 0x00000000 },
- { 0x000017f8, 0x00000000 },
- { 0x0000103c, 0x00000000 },
- { 0x0000107c, 0x00000000 },
- { 0x000010bc, 0x00000000 },
- { 0x000010fc, 0x00000000 },
- { 0x0000113c, 0x00000000 },
- { 0x0000117c, 0x00000000 },
- { 0x000011bc, 0x00000000 },
- { 0x000011fc, 0x00000000 },
- { 0x0000123c, 0x00000000 },
- { 0x0000127c, 0x00000000 },
- { 0x000012bc, 0x00000000 },
- { 0x000012fc, 0x00000000 },
- { 0x0000133c, 0x00000000 },
- { 0x0000137c, 0x00000000 },
- { 0x000013bc, 0x00000000 },
- { 0x000013fc, 0x00000000 },
- { 0x0000143c, 0x00000000 },
- { 0x0000147c, 0x00000000 },
- { 0x00020010, 0x00000003 },
- { 0x00020038, 0x000004c2 },
- { 0x00008004, 0x00000000 },
- { 0x00008008, 0x00000000 },
- { 0x0000800c, 0x00000000 },
- { 0x00008018, 0x00000700 },
- { 0x00008020, 0x00000000 },
- { 0x00008038, 0x00000000 },
- { 0x0000803c, 0x00000000 },
- { 0x00008048, 0x40000000 },
- { 0x00008054, 0x00004000 },
- { 0x00008058, 0x00000000 },
- { 0x0000805c, 0x000fc78f },
- { 0x00008060, 0x0000000f },
- { 0x00008064, 0x00000000 },
- { 0x000080c0, 0x2a82301a },
- { 0x000080c4, 0x05dc01e0 },
- { 0x000080c8, 0x1f402710 },
- { 0x000080cc, 0x01f40000 },
- { 0x000080d0, 0x00001e00 },
- { 0x000080d4, 0x00000000 },
- { 0x000080d8, 0x00400000 },
- { 0x000080e0, 0xffffffff },
- { 0x000080e4, 0x0000ffff },
- { 0x000080e8, 0x003f3f3f },
- { 0x000080ec, 0x00000000 },
- { 0x000080f0, 0x00000000 },
- { 0x000080f4, 0x00000000 },
- { 0x000080f8, 0x00000000 },
- { 0x000080fc, 0x00020000 },
- { 0x00008100, 0x00020000 },
- { 0x00008104, 0x00000001 },
- { 0x00008108, 0x00000052 },
- { 0x0000810c, 0x00000000 },
- { 0x00008110, 0x00000168 },
- { 0x00008118, 0x000100aa },
- { 0x0000811c, 0x00003210 },
- { 0x00008120, 0x08f04800 },
- { 0x00008124, 0x00000000 },
- { 0x00008128, 0x00000000 },
- { 0x0000812c, 0x00000000 },
- { 0x00008130, 0x00000000 },
- { 0x00008134, 0x00000000 },
- { 0x00008138, 0x00000000 },
- { 0x0000813c, 0x00000000 },
- { 0x00008144, 0x00000000 },
- { 0x00008168, 0x00000000 },
- { 0x0000816c, 0x00000000 },
- { 0x00008170, 0x32143320 },
- { 0x00008174, 0xfaa4fa50 },
- { 0x00008178, 0x00000100 },
- { 0x0000817c, 0x00000000 },
- { 0x000081c4, 0x00000000 },
- { 0x000081d0, 0x00003210 },
- { 0x000081ec, 0x00000000 },
- { 0x000081f0, 0x00000000 },
- { 0x000081f4, 0x00000000 },
- { 0x000081f8, 0x00000000 },
- { 0x000081fc, 0x00000000 },
- { 0x00008200, 0x00000000 },
- { 0x00008204, 0x00000000 },
- { 0x00008208, 0x00000000 },
- { 0x0000820c, 0x00000000 },
- { 0x00008210, 0x00000000 },
- { 0x00008214, 0x00000000 },
- { 0x00008218, 0x00000000 },
- { 0x0000821c, 0x00000000 },
- { 0x00008220, 0x00000000 },
- { 0x00008224, 0x00000000 },
- { 0x00008228, 0x00000000 },
- { 0x0000822c, 0x00000000 },
- { 0x00008230, 0x00000000 },
- { 0x00008234, 0x00000000 },
- { 0x00008238, 0x00000000 },
- { 0x0000823c, 0x00000000 },
- { 0x00008240, 0x00100000 },
- { 0x00008244, 0x0010f400 },
- { 0x00008248, 0x00000100 },
- { 0x0000824c, 0x0001e800 },
- { 0x00008250, 0x00000000 },
- { 0x00008254, 0x00000000 },
- { 0x00008258, 0x00000000 },
- { 0x0000825c, 0x400000ff },
- { 0x00008260, 0x00080922 },
- { 0x00008270, 0x00000000 },
- { 0x00008274, 0x40000000 },
- { 0x00008278, 0x003e4180 },
- { 0x0000827c, 0x00000000 },
- { 0x00008284, 0x0000002c },
- { 0x00008288, 0x0000002c },
- { 0x0000828c, 0x00000000 },
- { 0x00008294, 0x00000000 },
- { 0x00008298, 0x00000000 },
- { 0x00008300, 0x00000000 },
- { 0x00008304, 0x00000000 },
- { 0x00008308, 0x00000000 },
- { 0x0000830c, 0x00000000 },
- { 0x00008310, 0x00000000 },
- { 0x00008314, 0x00000000 },
- { 0x00008318, 0x00000000 },
- { 0x00008328, 0x00000000 },
- { 0x0000832c, 0x00000007 },
- { 0x00008330, 0x00000302 },
- { 0x00008334, 0x00000e00 },
- { 0x00008338, 0x00000000 },
- { 0x0000833c, 0x00000000 },
- { 0x00008340, 0x000107ff },
- { 0x00009808, 0x00000000 },
- { 0x0000980c, 0xad848e19 },
- { 0x00009810, 0x7d14e000 },
- { 0x00009814, 0x9c0a9f6b },
- { 0x0000981c, 0x00000000 },
- { 0x0000982c, 0x0000a000 },
- { 0x00009830, 0x00000000 },
- { 0x0000983c, 0x00200400 },
- { 0x00009840, 0x206a01ae },
- { 0x0000984c, 0x1284233c },
- { 0x00009854, 0x00000859 },
- { 0x00009900, 0x00000000 },
- { 0x00009904, 0x00000000 },
- { 0x00009908, 0x00000000 },
- { 0x0000990c, 0x00000000 },
- { 0x0000991c, 0x10000fff },
- { 0x00009920, 0x05100000 },
- { 0x0000a920, 0x05100000 },
- { 0x0000b920, 0x05100000 },
- { 0x00009928, 0x00000001 },
- { 0x0000992c, 0x00000004 },
- { 0x00009934, 0x1e1f2022 },
- { 0x00009938, 0x0a0b0c0d },
- { 0x0000993c, 0x00000000 },
- { 0x00009948, 0x9280b212 },
- { 0x0000994c, 0x00020028 },
- { 0x0000c95c, 0x004b6a8e },
- { 0x0000c968, 0x000003ce },
- { 0x00009970, 0x190fb515 },
- { 0x00009974, 0x00000000 },
- { 0x00009978, 0x00000001 },
- { 0x0000997c, 0x00000000 },
- { 0x00009980, 0x00000000 },
- { 0x00009984, 0x00000000 },
- { 0x00009988, 0x00000000 },
- { 0x0000998c, 0x00000000 },
- { 0x00009990, 0x00000000 },
- { 0x00009994, 0x00000000 },
- { 0x00009998, 0x00000000 },
- { 0x0000999c, 0x00000000 },
- { 0x000099a0, 0x00000000 },
- { 0x000099a4, 0x00000001 },
- { 0x000099a8, 0x201fff00 },
- { 0x000099ac, 0x006f0000 },
- { 0x000099b0, 0x03051000 },
- { 0x000099dc, 0x00000000 },
- { 0x000099e0, 0x00000200 },
- { 0x000099e4, 0xaaaaaaaa },
- { 0x000099e8, 0x3c466478 },
- { 0x000099ec, 0x0cc80caa },
- { 0x000099fc, 0x00001042 },
- { 0x00009b00, 0x00000000 },
- { 0x00009b04, 0x00000001 },
- { 0x00009b08, 0x00000002 },
- { 0x00009b0c, 0x00000003 },
- { 0x00009b10, 0x00000004 },
- { 0x00009b14, 0x00000005 },
- { 0x00009b18, 0x00000008 },
- { 0x00009b1c, 0x00000009 },
- { 0x00009b20, 0x0000000a },
- { 0x00009b24, 0x0000000b },
- { 0x00009b28, 0x0000000c },
- { 0x00009b2c, 0x0000000d },
- { 0x00009b30, 0x00000010 },
- { 0x00009b34, 0x00000011 },
- { 0x00009b38, 0x00000012 },
- { 0x00009b3c, 0x00000013 },
- { 0x00009b40, 0x00000014 },
- { 0x00009b44, 0x00000015 },
- { 0x00009b48, 0x00000018 },
- { 0x00009b4c, 0x00000019 },
- { 0x00009b50, 0x0000001a },
- { 0x00009b54, 0x0000001b },
- { 0x00009b58, 0x0000001c },
- { 0x00009b5c, 0x0000001d },
- { 0x00009b60, 0x00000020 },
- { 0x00009b64, 0x00000021 },
- { 0x00009b68, 0x00000022 },
- { 0x00009b6c, 0x00000023 },
- { 0x00009b70, 0x00000024 },
- { 0x00009b74, 0x00000025 },
- { 0x00009b78, 0x00000028 },
- { 0x00009b7c, 0x00000029 },
- { 0x00009b80, 0x0000002a },
- { 0x00009b84, 0x0000002b },
- { 0x00009b88, 0x0000002c },
- { 0x00009b8c, 0x0000002d },
- { 0x00009b90, 0x00000030 },
- { 0x00009b94, 0x00000031 },
- { 0x00009b98, 0x00000032 },
- { 0x00009b9c, 0x00000033 },
- { 0x00009ba0, 0x00000034 },
- { 0x00009ba4, 0x00000035 },
- { 0x00009ba8, 0x00000035 },
- { 0x00009bac, 0x00000035 },
- { 0x00009bb0, 0x00000035 },
- { 0x00009bb4, 0x00000035 },
- { 0x00009bb8, 0x00000035 },
- { 0x00009bbc, 0x00000035 },
- { 0x00009bc0, 0x00000035 },
- { 0x00009bc4, 0x00000035 },
- { 0x00009bc8, 0x00000035 },
- { 0x00009bcc, 0x00000035 },
- { 0x00009bd0, 0x00000035 },
- { 0x00009bd4, 0x00000035 },
- { 0x00009bd8, 0x00000035 },
- { 0x00009bdc, 0x00000035 },
- { 0x00009be0, 0x00000035 },
- { 0x00009be4, 0x00000035 },
- { 0x00009be8, 0x00000035 },
- { 0x00009bec, 0x00000035 },
- { 0x00009bf0, 0x00000035 },
- { 0x00009bf4, 0x00000035 },
- { 0x00009bf8, 0x00000010 },
- { 0x00009bfc, 0x0000001a },
- { 0x0000a210, 0x40806333 },
- { 0x0000a214, 0x00106c10 },
- { 0x0000a218, 0x009c4060 },
- { 0x0000a220, 0x018830c6 },
- { 0x0000a224, 0x00000400 },
- { 0x0000a228, 0x001a0bb5 },
- { 0x0000a22c, 0x00000000 },
- { 0x0000a234, 0x20202020 },
- { 0x0000a238, 0x20202020 },
- { 0x0000a23c, 0x13c889ae },
- { 0x0000a240, 0x38490a20 },
- { 0x0000a244, 0x00007bb6 },
- { 0x0000a248, 0x0fff3ffc },
- { 0x0000a24c, 0x00000001 },
- { 0x0000a250, 0x0000a000 },
- { 0x0000a254, 0x00000000 },
- { 0x0000a258, 0x0cc75380 },
- { 0x0000a25c, 0x0f0f0f01 },
- { 0x0000a260, 0xdfa91f01 },
- { 0x0000a268, 0x00000001 },
- { 0x0000a26c, 0x0ebae9c6 },
- { 0x0000b26c, 0x0ebae9c6 },
- { 0x0000c26c, 0x0ebae9c6 },
- { 0x0000d270, 0x00820820 },
- { 0x0000a278, 0x1ce739ce },
- { 0x0000a27c, 0x050701ce },
- { 0x0000a338, 0x00000000 },
- { 0x0000a33c, 0x00000000 },
- { 0x0000a340, 0x00000000 },
- { 0x0000a344, 0x00000000 },
- { 0x0000a348, 0x3fffffff },
- { 0x0000a34c, 0x3fffffff },
- { 0x0000a350, 0x3fffffff },
- { 0x0000a354, 0x0003ffff },
- { 0x0000a358, 0x79a8aa33 },
- { 0x0000d35c, 0x07ffffef },
- { 0x0000d360, 0x0fffffe7 },
- { 0x0000d364, 0x17ffffe5 },
- { 0x0000d368, 0x1fffffe4 },
- { 0x0000d36c, 0x37ffffe3 },
- { 0x0000d370, 0x3fffffe3 },
- { 0x0000d374, 0x57ffffe3 },
- { 0x0000d378, 0x5fffffe2 },
- { 0x0000d37c, 0x7fffffe2 },
- { 0x0000d380, 0x7f3c7bba },
- { 0x0000d384, 0xf3307ff0 },
- { 0x0000a388, 0x0c000000 },
- { 0x0000a38c, 0x20202020 },
- { 0x0000a390, 0x20202020 },
- { 0x0000a394, 0x1ce739ce },
- { 0x0000a398, 0x000001ce },
- { 0x0000a39c, 0x00000001 },
- { 0x0000a3a0, 0x00000000 },
- { 0x0000a3a4, 0x00000000 },
- { 0x0000a3a8, 0x00000000 },
- { 0x0000a3ac, 0x00000000 },
- { 0x0000a3b0, 0x00000000 },
- { 0x0000a3b4, 0x00000000 },
- { 0x0000a3b8, 0x00000000 },
- { 0x0000a3bc, 0x00000000 },
- { 0x0000a3c0, 0x00000000 },
- { 0x0000a3c4, 0x00000000 },
- { 0x0000a3c8, 0x00000246 },
- { 0x0000a3cc, 0x20202020 },
- { 0x0000a3d0, 0x20202020 },
- { 0x0000a3d4, 0x20202020 },
- { 0x0000a3dc, 0x1ce739ce },
- { 0x0000a3e0, 0x000001ce },
-};
-
-static const u32 ar5416Bank0_9100[][2] = {
- { 0x000098b0, 0x1e5795e5 },
- { 0x000098e0, 0x02008020 },
-};
-
-static const u32 ar5416BB_RfGain_9100[][3] = {
- { 0x00009a00, 0x00000000, 0x00000000 },
- { 0x00009a04, 0x00000040, 0x00000040 },
- { 0x00009a08, 0x00000080, 0x00000080 },
- { 0x00009a0c, 0x000001a1, 0x00000141 },
- { 0x00009a10, 0x000001e1, 0x00000181 },
- { 0x00009a14, 0x00000021, 0x000001c1 },
- { 0x00009a18, 0x00000061, 0x00000001 },
- { 0x00009a1c, 0x00000168, 0x00000041 },
- { 0x00009a20, 0x000001a8, 0x000001a8 },
- { 0x00009a24, 0x000001e8, 0x000001e8 },
- { 0x00009a28, 0x00000028, 0x00000028 },
- { 0x00009a2c, 0x00000068, 0x00000068 },
- { 0x00009a30, 0x00000189, 0x000000a8 },
- { 0x00009a34, 0x000001c9, 0x00000169 },
- { 0x00009a38, 0x00000009, 0x000001a9 },
- { 0x00009a3c, 0x00000049, 0x000001e9 },
- { 0x00009a40, 0x00000089, 0x00000029 },
- { 0x00009a44, 0x00000170, 0x00000069 },
- { 0x00009a48, 0x000001b0, 0x00000190 },
- { 0x00009a4c, 0x000001f0, 0x000001d0 },
- { 0x00009a50, 0x00000030, 0x00000010 },
- { 0x00009a54, 0x00000070, 0x00000050 },
- { 0x00009a58, 0x00000191, 0x00000090 },
- { 0x00009a5c, 0x000001d1, 0x00000151 },
- { 0x00009a60, 0x00000011, 0x00000191 },
- { 0x00009a64, 0x00000051, 0x000001d1 },
- { 0x00009a68, 0x00000091, 0x00000011 },
- { 0x00009a6c, 0x000001b8, 0x00000051 },
- { 0x00009a70, 0x000001f8, 0x00000198 },
- { 0x00009a74, 0x00000038, 0x000001d8 },
- { 0x00009a78, 0x00000078, 0x00000018 },
- { 0x00009a7c, 0x00000199, 0x00000058 },
- { 0x00009a80, 0x000001d9, 0x00000098 },
- { 0x00009a84, 0x00000019, 0x00000159 },
- { 0x00009a88, 0x00000059, 0x00000199 },
- { 0x00009a8c, 0x00000099, 0x000001d9 },
- { 0x00009a90, 0x000000d9, 0x00000019 },
- { 0x00009a94, 0x000000f9, 0x00000059 },
- { 0x00009a98, 0x000000f9, 0x00000099 },
- { 0x00009a9c, 0x000000f9, 0x000000d9 },
- { 0x00009aa0, 0x000000f9, 0x000000f9 },
- { 0x00009aa4, 0x000000f9, 0x000000f9 },
- { 0x00009aa8, 0x000000f9, 0x000000f9 },
- { 0x00009aac, 0x000000f9, 0x000000f9 },
- { 0x00009ab0, 0x000000f9, 0x000000f9 },
- { 0x00009ab4, 0x000000f9, 0x000000f9 },
- { 0x00009ab8, 0x000000f9, 0x000000f9 },
- { 0x00009abc, 0x000000f9, 0x000000f9 },
- { 0x00009ac0, 0x000000f9, 0x000000f9 },
- { 0x00009ac4, 0x000000f9, 0x000000f9 },
- { 0x00009ac8, 0x000000f9, 0x000000f9 },
- { 0x00009acc, 0x000000f9, 0x000000f9 },
- { 0x00009ad0, 0x000000f9, 0x000000f9 },
- { 0x00009ad4, 0x000000f9, 0x000000f9 },
- { 0x00009ad8, 0x000000f9, 0x000000f9 },
- { 0x00009adc, 0x000000f9, 0x000000f9 },
- { 0x00009ae0, 0x000000f9, 0x000000f9 },
- { 0x00009ae4, 0x000000f9, 0x000000f9 },
- { 0x00009ae8, 0x000000f9, 0x000000f9 },
- { 0x00009aec, 0x000000f9, 0x000000f9 },
- { 0x00009af0, 0x000000f9, 0x000000f9 },
- { 0x00009af4, 0x000000f9, 0x000000f9 },
- { 0x00009af8, 0x000000f9, 0x000000f9 },
- { 0x00009afc, 0x000000f9, 0x000000f9 },
-};
-
-static const u32 ar5416Bank1_9100[][2] = {
- { 0x000098b0, 0x02108421},
- { 0x000098ec, 0x00000008},
-};
-
-static const u32 ar5416Bank2_9100[][2] = {
- { 0x000098b0, 0x0e73ff17},
- { 0x000098e0, 0x00000420},
-};
-
-static const u32 ar5416Bank3_9100[][3] = {
- { 0x000098f0, 0x01400018, 0x01c00018 },
-};
-
-static const u32 ar5416Bank6_9100[][3] = {
-
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00e00000, 0x00e00000 },
- { 0x0000989c, 0x005e0000, 0x005e0000 },
- { 0x0000989c, 0x00120000, 0x00120000 },
- { 0x0000989c, 0x00620000, 0x00620000 },
- { 0x0000989c, 0x00020000, 0x00020000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x005f0000, 0x005f0000 },
- { 0x0000989c, 0x00870000, 0x00870000 },
- { 0x0000989c, 0x00f90000, 0x00f90000 },
- { 0x0000989c, 0x007b0000, 0x007b0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00f50000, 0x00f50000 },
- { 0x0000989c, 0x00dc0000, 0x00dc0000 },
- { 0x0000989c, 0x00110000, 0x00110000 },
- { 0x0000989c, 0x006100a8, 0x006100a8 },
- { 0x0000989c, 0x004210a2, 0x004210a2 },
- { 0x0000989c, 0x0014000f, 0x0014000f },
- { 0x0000989c, 0x00c40002, 0x00c40002 },
- { 0x0000989c, 0x003000f2, 0x003000f2 },
- { 0x0000989c, 0x00440016, 0x00440016 },
- { 0x0000989c, 0x00410040, 0x00410040 },
- { 0x0000989c, 0x000180d6, 0x000180d6 },
- { 0x0000989c, 0x0000c0aa, 0x0000c0aa },
- { 0x0000989c, 0x000000b1, 0x000000b1 },
- { 0x0000989c, 0x00002000, 0x00002000 },
- { 0x0000989c, 0x000000d4, 0x000000d4 },
- { 0x000098d0, 0x0000000f, 0x0010000f },
-};
-
-
-static const u32 ar5416Bank6TPC_9100[][3] = {
-
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00e00000, 0x00e00000 },
- { 0x0000989c, 0x005e0000, 0x005e0000 },
- { 0x0000989c, 0x00120000, 0x00120000 },
- { 0x0000989c, 0x00620000, 0x00620000 },
- { 0x0000989c, 0x00020000, 0x00020000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x40ff0000, 0x40ff0000 },
- { 0x0000989c, 0x005f0000, 0x005f0000 },
- { 0x0000989c, 0x00870000, 0x00870000 },
- { 0x0000989c, 0x00f90000, 0x00f90000 },
- { 0x0000989c, 0x007b0000, 0x007b0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00f50000, 0x00f50000 },
- { 0x0000989c, 0x00dc0000, 0x00dc0000 },
- { 0x0000989c, 0x00110000, 0x00110000 },
- { 0x0000989c, 0x006100a8, 0x006100a8 },
- { 0x0000989c, 0x00423022, 0x00423022 },
- { 0x0000989c, 0x2014008f, 0x2014008f },
- { 0x0000989c, 0x00c40002, 0x00c40002 },
- { 0x0000989c, 0x003000f2, 0x003000f2 },
- { 0x0000989c, 0x00440016, 0x00440016 },
- { 0x0000989c, 0x00410040, 0x00410040 },
- { 0x0000989c, 0x0001805e, 0x0001805e },
- { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
- { 0x0000989c, 0x000000e1, 0x000000e1 },
- { 0x0000989c, 0x00007080, 0x00007080 },
- { 0x0000989c, 0x000000d4, 0x000000d4 },
- { 0x000098d0, 0x0000000f, 0x0010000f },
-};
-
-static const u32 ar5416Bank7_9100[][2] = {
- { 0x0000989c, 0x00000500 },
- { 0x0000989c, 0x00000800 },
- { 0x000098cc, 0x0000000e },
-};
-
-static const u32 ar5416Addac_9100[][2] = {
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000010 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x000000c0 },
- {0x0000989c, 0x00000015 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x000098cc, 0x00000000 },
-};
-
-static const u32 ar5416Modes_9160[][6] = {
- { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
- { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
- { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
- { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
- { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
- { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
- { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
- { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
- { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
- { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
- { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
- { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
- { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
- { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x00009850, 0x6c48b4e2, 0x6c48b4e2, 0x6c48b0e2, 0x6c48b0e2, 0x6c48b0e2 },
- { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
- { 0x0000985c, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e },
- { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
- { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
- { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
- { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
- { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
- { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
- { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
- { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 },
- { 0x00009960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
- { 0x0000a960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
- { 0x0000b960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
- { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
- { 0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce, 0x000003ce },
- { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a0c00, 0x001a0c00, 0x001a0c00 },
- { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
- { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
- { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
- { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
- { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
- { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
- { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
- { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
- { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
- { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
- { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
- { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
- { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
- { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
- { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
- { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
- { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
- { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
- { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
- { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
- { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
-};
-
-static const u32 ar5416Common_9160[][2] = {
- { 0x0000000c, 0x00000000 },
- { 0x00000030, 0x00020015 },
- { 0x00000034, 0x00000005 },
- { 0x00000040, 0x00000000 },
- { 0x00000044, 0x00000008 },
- { 0x00000048, 0x00000008 },
- { 0x0000004c, 0x00000010 },
- { 0x00000050, 0x00000000 },
- { 0x00000054, 0x0000001f },
- { 0x00000800, 0x00000000 },
- { 0x00000804, 0x00000000 },
- { 0x00000808, 0x00000000 },
- { 0x0000080c, 0x00000000 },
- { 0x00000810, 0x00000000 },
- { 0x00000814, 0x00000000 },
- { 0x00000818, 0x00000000 },
- { 0x0000081c, 0x00000000 },
- { 0x00000820, 0x00000000 },
- { 0x00000824, 0x00000000 },
- { 0x00001040, 0x002ffc0f },
- { 0x00001044, 0x002ffc0f },
- { 0x00001048, 0x002ffc0f },
- { 0x0000104c, 0x002ffc0f },
- { 0x00001050, 0x002ffc0f },
- { 0x00001054, 0x002ffc0f },
- { 0x00001058, 0x002ffc0f },
- { 0x0000105c, 0x002ffc0f },
- { 0x00001060, 0x002ffc0f },
- { 0x00001064, 0x002ffc0f },
- { 0x00001230, 0x00000000 },
- { 0x00001270, 0x00000000 },
- { 0x00001038, 0x00000000 },
- { 0x00001078, 0x00000000 },
- { 0x000010b8, 0x00000000 },
- { 0x000010f8, 0x00000000 },
- { 0x00001138, 0x00000000 },
- { 0x00001178, 0x00000000 },
- { 0x000011b8, 0x00000000 },
- { 0x000011f8, 0x00000000 },
- { 0x00001238, 0x00000000 },
- { 0x00001278, 0x00000000 },
- { 0x000012b8, 0x00000000 },
- { 0x000012f8, 0x00000000 },
- { 0x00001338, 0x00000000 },
- { 0x00001378, 0x00000000 },
- { 0x000013b8, 0x00000000 },
- { 0x000013f8, 0x00000000 },
- { 0x00001438, 0x00000000 },
- { 0x00001478, 0x00000000 },
- { 0x000014b8, 0x00000000 },
- { 0x000014f8, 0x00000000 },
- { 0x00001538, 0x00000000 },
- { 0x00001578, 0x00000000 },
- { 0x000015b8, 0x00000000 },
- { 0x000015f8, 0x00000000 },
- { 0x00001638, 0x00000000 },
- { 0x00001678, 0x00000000 },
- { 0x000016b8, 0x00000000 },
- { 0x000016f8, 0x00000000 },
- { 0x00001738, 0x00000000 },
- { 0x00001778, 0x00000000 },
- { 0x000017b8, 0x00000000 },
- { 0x000017f8, 0x00000000 },
- { 0x0000103c, 0x00000000 },
- { 0x0000107c, 0x00000000 },
- { 0x000010bc, 0x00000000 },
- { 0x000010fc, 0x00000000 },
- { 0x0000113c, 0x00000000 },
- { 0x0000117c, 0x00000000 },
- { 0x000011bc, 0x00000000 },
- { 0x000011fc, 0x00000000 },
- { 0x0000123c, 0x00000000 },
- { 0x0000127c, 0x00000000 },
- { 0x000012bc, 0x00000000 },
- { 0x000012fc, 0x00000000 },
- { 0x0000133c, 0x00000000 },
- { 0x0000137c, 0x00000000 },
- { 0x000013bc, 0x00000000 },
- { 0x000013fc, 0x00000000 },
- { 0x0000143c, 0x00000000 },
- { 0x0000147c, 0x00000000 },
- { 0x00004030, 0x00000002 },
- { 0x0000403c, 0x00000002 },
- { 0x00007010, 0x00000020 },
- { 0x00007038, 0x000004c2 },
- { 0x00008004, 0x00000000 },
- { 0x00008008, 0x00000000 },
- { 0x0000800c, 0x00000000 },
- { 0x00008018, 0x00000700 },
- { 0x00008020, 0x00000000 },
- { 0x00008038, 0x00000000 },
- { 0x0000803c, 0x00000000 },
- { 0x00008048, 0x40000000 },
- { 0x00008054, 0x00000000 },
- { 0x00008058, 0x00000000 },
- { 0x0000805c, 0x000fc78f },
- { 0x00008060, 0x0000000f },
- { 0x00008064, 0x00000000 },
- { 0x000080c0, 0x2a82301a },
- { 0x000080c4, 0x05dc01e0 },
- { 0x000080c8, 0x1f402710 },
- { 0x000080cc, 0x01f40000 },
- { 0x000080d0, 0x00001e00 },
- { 0x000080d4, 0x00000000 },
- { 0x000080d8, 0x00400000 },
- { 0x000080e0, 0xffffffff },
- { 0x000080e4, 0x0000ffff },
- { 0x000080e8, 0x003f3f3f },
- { 0x000080ec, 0x00000000 },
- { 0x000080f0, 0x00000000 },
- { 0x000080f4, 0x00000000 },
- { 0x000080f8, 0x00000000 },
- { 0x000080fc, 0x00020000 },
- { 0x00008100, 0x00020000 },
- { 0x00008104, 0x00000001 },
- { 0x00008108, 0x00000052 },
- { 0x0000810c, 0x00000000 },
- { 0x00008110, 0x00000168 },
- { 0x00008118, 0x000100aa },
- { 0x0000811c, 0x00003210 },
- { 0x00008120, 0x08f04800 },
- { 0x00008124, 0x00000000 },
- { 0x00008128, 0x00000000 },
- { 0x0000812c, 0x00000000 },
- { 0x00008130, 0x00000000 },
- { 0x00008134, 0x00000000 },
- { 0x00008138, 0x00000000 },
- { 0x0000813c, 0x00000000 },
- { 0x00008144, 0xffffffff },
- { 0x00008168, 0x00000000 },
- { 0x0000816c, 0x00000000 },
- { 0x00008170, 0x32143320 },
- { 0x00008174, 0xfaa4fa50 },
- { 0x00008178, 0x00000100 },
- { 0x0000817c, 0x00000000 },
- { 0x000081c4, 0x00000000 },
- { 0x000081d0, 0x00003210 },
- { 0x000081ec, 0x00000000 },
- { 0x000081f0, 0x00000000 },
- { 0x000081f4, 0x00000000 },
- { 0x000081f8, 0x00000000 },
- { 0x000081fc, 0x00000000 },
- { 0x00008200, 0x00000000 },
- { 0x00008204, 0x00000000 },
- { 0x00008208, 0x00000000 },
- { 0x0000820c, 0x00000000 },
- { 0x00008210, 0x00000000 },
- { 0x00008214, 0x00000000 },
- { 0x00008218, 0x00000000 },
- { 0x0000821c, 0x00000000 },
- { 0x00008220, 0x00000000 },
- { 0x00008224, 0x00000000 },
- { 0x00008228, 0x00000000 },
- { 0x0000822c, 0x00000000 },
- { 0x00008230, 0x00000000 },
- { 0x00008234, 0x00000000 },
- { 0x00008238, 0x00000000 },
- { 0x0000823c, 0x00000000 },
- { 0x00008240, 0x00100000 },
- { 0x00008244, 0x0010f400 },
- { 0x00008248, 0x00000100 },
- { 0x0000824c, 0x0001e800 },
- { 0x00008250, 0x00000000 },
- { 0x00008254, 0x00000000 },
- { 0x00008258, 0x00000000 },
- { 0x0000825c, 0x400000ff },
- { 0x00008260, 0x00080922 },
- { 0x00008270, 0x00000000 },
- { 0x00008274, 0x40000000 },
- { 0x00008278, 0x003e4180 },
- { 0x0000827c, 0x00000000 },
- { 0x00008284, 0x0000002c },
- { 0x00008288, 0x0000002c },
- { 0x0000828c, 0x00000000 },
- { 0x00008294, 0x00000000 },
- { 0x00008298, 0x00000000 },
- { 0x00008300, 0x00000000 },
- { 0x00008304, 0x00000000 },
- { 0x00008308, 0x00000000 },
- { 0x0000830c, 0x00000000 },
- { 0x00008310, 0x00000000 },
- { 0x00008314, 0x00000000 },
- { 0x00008318, 0x00000000 },
- { 0x00008328, 0x00000000 },
- { 0x0000832c, 0x00000007 },
- { 0x00008330, 0x00000302 },
- { 0x00008334, 0x00000e00 },
- { 0x00008338, 0x00ff0000 },
- { 0x0000833c, 0x00000000 },
- { 0x00008340, 0x000107ff },
- { 0x00009808, 0x00000000 },
- { 0x0000980c, 0xad848e19 },
- { 0x00009810, 0x7d14e000 },
- { 0x00009814, 0x9c0a9f6b },
- { 0x0000981c, 0x00000000 },
- { 0x0000982c, 0x0000a000 },
- { 0x00009830, 0x00000000 },
- { 0x0000983c, 0x00200400 },
- { 0x00009840, 0x206a01ae },
- { 0x0000984c, 0x1284233c },
- { 0x00009854, 0x00000859 },
- { 0x00009900, 0x00000000 },
- { 0x00009904, 0x00000000 },
- { 0x00009908, 0x00000000 },
- { 0x0000990c, 0x00000000 },
- { 0x0000991c, 0x10000fff },
- { 0x00009920, 0x05100000 },
- { 0x0000a920, 0x05100000 },
- { 0x0000b920, 0x05100000 },
- { 0x00009928, 0x00000001 },
- { 0x0000992c, 0x00000004 },
- { 0x00009934, 0x1e1f2022 },
- { 0x00009938, 0x0a0b0c0d },
- { 0x0000993c, 0x00000000 },
- { 0x00009948, 0x9280b212 },
- { 0x0000994c, 0x00020028 },
- { 0x00009954, 0x5f3ca3de },
- { 0x00009958, 0x2108ecff },
- { 0x00009940, 0x00750604 },
- { 0x0000c95c, 0x004b6a8e },
- { 0x00009970, 0x190fb515 },
- { 0x00009974, 0x00000000 },
- { 0x00009978, 0x00000001 },
- { 0x0000997c, 0x00000000 },
- { 0x00009980, 0x00000000 },
- { 0x00009984, 0x00000000 },
- { 0x00009988, 0x00000000 },
- { 0x0000998c, 0x00000000 },
- { 0x00009990, 0x00000000 },
- { 0x00009994, 0x00000000 },
- { 0x00009998, 0x00000000 },
- { 0x0000999c, 0x00000000 },
- { 0x000099a0, 0x00000000 },
- { 0x000099a4, 0x00000001 },
- { 0x000099a8, 0x201fff00 },
- { 0x000099ac, 0x006f0000 },
- { 0x000099b0, 0x03051000 },
- { 0x000099dc, 0x00000000 },
- { 0x000099e0, 0x00000200 },
- { 0x000099e4, 0xaaaaaaaa },
- { 0x000099e8, 0x3c466478 },
- { 0x000099ec, 0x0cc80caa },
- { 0x000099fc, 0x00001042 },
- { 0x00009b00, 0x00000000 },
- { 0x00009b04, 0x00000001 },
- { 0x00009b08, 0x00000002 },
- { 0x00009b0c, 0x00000003 },
- { 0x00009b10, 0x00000004 },
- { 0x00009b14, 0x00000005 },
- { 0x00009b18, 0x00000008 },
- { 0x00009b1c, 0x00000009 },
- { 0x00009b20, 0x0000000a },
- { 0x00009b24, 0x0000000b },
- { 0x00009b28, 0x0000000c },
- { 0x00009b2c, 0x0000000d },
- { 0x00009b30, 0x00000010 },
- { 0x00009b34, 0x00000011 },
- { 0x00009b38, 0x00000012 },
- { 0x00009b3c, 0x00000013 },
- { 0x00009b40, 0x00000014 },
- { 0x00009b44, 0x00000015 },
- { 0x00009b48, 0x00000018 },
- { 0x00009b4c, 0x00000019 },
- { 0x00009b50, 0x0000001a },
- { 0x00009b54, 0x0000001b },
- { 0x00009b58, 0x0000001c },
- { 0x00009b5c, 0x0000001d },
- { 0x00009b60, 0x00000020 },
- { 0x00009b64, 0x00000021 },
- { 0x00009b68, 0x00000022 },
- { 0x00009b6c, 0x00000023 },
- { 0x00009b70, 0x00000024 },
- { 0x00009b74, 0x00000025 },
- { 0x00009b78, 0x00000028 },
- { 0x00009b7c, 0x00000029 },
- { 0x00009b80, 0x0000002a },
- { 0x00009b84, 0x0000002b },
- { 0x00009b88, 0x0000002c },
- { 0x00009b8c, 0x0000002d },
- { 0x00009b90, 0x00000030 },
- { 0x00009b94, 0x00000031 },
- { 0x00009b98, 0x00000032 },
- { 0x00009b9c, 0x00000033 },
- { 0x00009ba0, 0x00000034 },
- { 0x00009ba4, 0x00000035 },
- { 0x00009ba8, 0x00000035 },
- { 0x00009bac, 0x00000035 },
- { 0x00009bb0, 0x00000035 },
- { 0x00009bb4, 0x00000035 },
- { 0x00009bb8, 0x00000035 },
- { 0x00009bbc, 0x00000035 },
- { 0x00009bc0, 0x00000035 },
- { 0x00009bc4, 0x00000035 },
- { 0x00009bc8, 0x00000035 },
- { 0x00009bcc, 0x00000035 },
- { 0x00009bd0, 0x00000035 },
- { 0x00009bd4, 0x00000035 },
- { 0x00009bd8, 0x00000035 },
- { 0x00009bdc, 0x00000035 },
- { 0x00009be0, 0x00000035 },
- { 0x00009be4, 0x00000035 },
- { 0x00009be8, 0x00000035 },
- { 0x00009bec, 0x00000035 },
- { 0x00009bf0, 0x00000035 },
- { 0x00009bf4, 0x00000035 },
- { 0x00009bf8, 0x00000010 },
- { 0x00009bfc, 0x0000001a },
- { 0x0000a210, 0x40806333 },
- { 0x0000a214, 0x00106c10 },
- { 0x0000a218, 0x009c4060 },
- { 0x0000a220, 0x018830c6 },
- { 0x0000a224, 0x00000400 },
- { 0x0000a228, 0x001a0bb5 },
- { 0x0000a22c, 0x00000000 },
- { 0x0000a234, 0x20202020 },
- { 0x0000a238, 0x20202020 },
- { 0x0000a23c, 0x13c889af },
- { 0x0000a240, 0x38490a20 },
- { 0x0000a244, 0x00007bb6 },
- { 0x0000a248, 0x0fff3ffc },
- { 0x0000a24c, 0x00000001 },
- { 0x0000a250, 0x0000e000 },
- { 0x0000a254, 0x00000000 },
- { 0x0000a258, 0x0cc75380 },
- { 0x0000a25c, 0x0f0f0f01 },
- { 0x0000a260, 0xdfa91f01 },
- { 0x0000a268, 0x00000001 },
- { 0x0000a26c, 0x0ebae9c6 },
- { 0x0000b26c, 0x0ebae9c6 },
- { 0x0000c26c, 0x0ebae9c6 },
- { 0x0000d270, 0x00820820 },
- { 0x0000a278, 0x1ce739ce },
- { 0x0000a27c, 0x050701ce },
- { 0x0000a338, 0x00000000 },
- { 0x0000a33c, 0x00000000 },
- { 0x0000a340, 0x00000000 },
- { 0x0000a344, 0x00000000 },
- { 0x0000a348, 0x3fffffff },
- { 0x0000a34c, 0x3fffffff },
- { 0x0000a350, 0x3fffffff },
- { 0x0000a354, 0x0003ffff },
- { 0x0000a358, 0x79bfaa03 },
- { 0x0000d35c, 0x07ffffef },
- { 0x0000d360, 0x0fffffe7 },
- { 0x0000d364, 0x17ffffe5 },
- { 0x0000d368, 0x1fffffe4 },
- { 0x0000d36c, 0x37ffffe3 },
- { 0x0000d370, 0x3fffffe3 },
- { 0x0000d374, 0x57ffffe3 },
- { 0x0000d378, 0x5fffffe2 },
- { 0x0000d37c, 0x7fffffe2 },
- { 0x0000d380, 0x7f3c7bba },
- { 0x0000d384, 0xf3307ff0 },
- { 0x0000a388, 0x0c000000 },
- { 0x0000a38c, 0x20202020 },
- { 0x0000a390, 0x20202020 },
- { 0x0000a394, 0x1ce739ce },
- { 0x0000a398, 0x000001ce },
- { 0x0000a39c, 0x00000001 },
- { 0x0000a3a0, 0x00000000 },
- { 0x0000a3a4, 0x00000000 },
- { 0x0000a3a8, 0x00000000 },
- { 0x0000a3ac, 0x00000000 },
- { 0x0000a3b0, 0x00000000 },
- { 0x0000a3b4, 0x00000000 },
- { 0x0000a3b8, 0x00000000 },
- { 0x0000a3bc, 0x00000000 },
- { 0x0000a3c0, 0x00000000 },
- { 0x0000a3c4, 0x00000000 },
- { 0x0000a3c8, 0x00000246 },
- { 0x0000a3cc, 0x20202020 },
- { 0x0000a3d0, 0x20202020 },
- { 0x0000a3d4, 0x20202020 },
- { 0x0000a3dc, 0x1ce739ce },
- { 0x0000a3e0, 0x000001ce },
-};
-
-static const u32 ar5416Bank0_9160[][2] = {
- { 0x000098b0, 0x1e5795e5 },
- { 0x000098e0, 0x02008020 },
-};
-
-static const u32 ar5416BB_RfGain_9160[][3] = {
- { 0x00009a00, 0x00000000, 0x00000000 },
- { 0x00009a04, 0x00000040, 0x00000040 },
- { 0x00009a08, 0x00000080, 0x00000080 },
- { 0x00009a0c, 0x000001a1, 0x00000141 },
- { 0x00009a10, 0x000001e1, 0x00000181 },
- { 0x00009a14, 0x00000021, 0x000001c1 },
- { 0x00009a18, 0x00000061, 0x00000001 },
- { 0x00009a1c, 0x00000168, 0x00000041 },
- { 0x00009a20, 0x000001a8, 0x000001a8 },
- { 0x00009a24, 0x000001e8, 0x000001e8 },
- { 0x00009a28, 0x00000028, 0x00000028 },
- { 0x00009a2c, 0x00000068, 0x00000068 },
- { 0x00009a30, 0x00000189, 0x000000a8 },
- { 0x00009a34, 0x000001c9, 0x00000169 },
- { 0x00009a38, 0x00000009, 0x000001a9 },
- { 0x00009a3c, 0x00000049, 0x000001e9 },
- { 0x00009a40, 0x00000089, 0x00000029 },
- { 0x00009a44, 0x00000170, 0x00000069 },
- { 0x00009a48, 0x000001b0, 0x00000190 },
- { 0x00009a4c, 0x000001f0, 0x000001d0 },
- { 0x00009a50, 0x00000030, 0x00000010 },
- { 0x00009a54, 0x00000070, 0x00000050 },
- { 0x00009a58, 0x00000191, 0x00000090 },
- { 0x00009a5c, 0x000001d1, 0x00000151 },
- { 0x00009a60, 0x00000011, 0x00000191 },
- { 0x00009a64, 0x00000051, 0x000001d1 },
- { 0x00009a68, 0x00000091, 0x00000011 },
- { 0x00009a6c, 0x000001b8, 0x00000051 },
- { 0x00009a70, 0x000001f8, 0x00000198 },
- { 0x00009a74, 0x00000038, 0x000001d8 },
- { 0x00009a78, 0x00000078, 0x00000018 },
- { 0x00009a7c, 0x00000199, 0x00000058 },
- { 0x00009a80, 0x000001d9, 0x00000098 },
- { 0x00009a84, 0x00000019, 0x00000159 },
- { 0x00009a88, 0x00000059, 0x00000199 },
- { 0x00009a8c, 0x00000099, 0x000001d9 },
- { 0x00009a90, 0x000000d9, 0x00000019 },
- { 0x00009a94, 0x000000f9, 0x00000059 },
- { 0x00009a98, 0x000000f9, 0x00000099 },
- { 0x00009a9c, 0x000000f9, 0x000000d9 },
- { 0x00009aa0, 0x000000f9, 0x000000f9 },
- { 0x00009aa4, 0x000000f9, 0x000000f9 },
- { 0x00009aa8, 0x000000f9, 0x000000f9 },
- { 0x00009aac, 0x000000f9, 0x000000f9 },
- { 0x00009ab0, 0x000000f9, 0x000000f9 },
- { 0x00009ab4, 0x000000f9, 0x000000f9 },
- { 0x00009ab8, 0x000000f9, 0x000000f9 },
- { 0x00009abc, 0x000000f9, 0x000000f9 },
- { 0x00009ac0, 0x000000f9, 0x000000f9 },
- { 0x00009ac4, 0x000000f9, 0x000000f9 },
- { 0x00009ac8, 0x000000f9, 0x000000f9 },
- { 0x00009acc, 0x000000f9, 0x000000f9 },
- { 0x00009ad0, 0x000000f9, 0x000000f9 },
- { 0x00009ad4, 0x000000f9, 0x000000f9 },
- { 0x00009ad8, 0x000000f9, 0x000000f9 },
- { 0x00009adc, 0x000000f9, 0x000000f9 },
- { 0x00009ae0, 0x000000f9, 0x000000f9 },
- { 0x00009ae4, 0x000000f9, 0x000000f9 },
- { 0x00009ae8, 0x000000f9, 0x000000f9 },
- { 0x00009aec, 0x000000f9, 0x000000f9 },
- { 0x00009af0, 0x000000f9, 0x000000f9 },
- { 0x00009af4, 0x000000f9, 0x000000f9 },
- { 0x00009af8, 0x000000f9, 0x000000f9 },
- { 0x00009afc, 0x000000f9, 0x000000f9 },
-};
-
-static const u32 ar5416Bank1_9160[][2] = {
- { 0x000098b0, 0x02108421 },
- { 0x000098ec, 0x00000008 },
-};
-
-static const u32 ar5416Bank2_9160[][2] = {
- { 0x000098b0, 0x0e73ff17 },
- { 0x000098e0, 0x00000420 },
-};
-
-static const u32 ar5416Bank3_9160[][3] = {
- { 0x000098f0, 0x01400018, 0x01c00018 },
-};
-
-static const u32 ar5416Bank6_9160[][3] = {
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00e00000, 0x00e00000 },
- { 0x0000989c, 0x005e0000, 0x005e0000 },
- { 0x0000989c, 0x00120000, 0x00120000 },
- { 0x0000989c, 0x00620000, 0x00620000 },
- { 0x0000989c, 0x00020000, 0x00020000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x40ff0000, 0x40ff0000 },
- { 0x0000989c, 0x005f0000, 0x005f0000 },
- { 0x0000989c, 0x00870000, 0x00870000 },
- { 0x0000989c, 0x00f90000, 0x00f90000 },
- { 0x0000989c, 0x007b0000, 0x007b0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00f50000, 0x00f50000 },
- { 0x0000989c, 0x00dc0000, 0x00dc0000 },
- { 0x0000989c, 0x00110000, 0x00110000 },
- { 0x0000989c, 0x006100a8, 0x006100a8 },
- { 0x0000989c, 0x004210a2, 0x004210a2 },
- { 0x0000989c, 0x0014008f, 0x0014008f },
- { 0x0000989c, 0x00c40003, 0x00c40003 },
- { 0x0000989c, 0x003000f2, 0x003000f2 },
- { 0x0000989c, 0x00440016, 0x00440016 },
- { 0x0000989c, 0x00410040, 0x00410040 },
- { 0x0000989c, 0x0001805e, 0x0001805e },
- { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
- { 0x0000989c, 0x000000f1, 0x000000f1 },
- { 0x0000989c, 0x00002081, 0x00002081 },
- { 0x0000989c, 0x000000d4, 0x000000d4 },
- { 0x000098d0, 0x0000000f, 0x0010000f },
-};
-
-static const u32 ar5416Bank6TPC_9160[][3] = {
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00e00000, 0x00e00000 },
- { 0x0000989c, 0x005e0000, 0x005e0000 },
- { 0x0000989c, 0x00120000, 0x00120000 },
- { 0x0000989c, 0x00620000, 0x00620000 },
- { 0x0000989c, 0x00020000, 0x00020000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x40ff0000, 0x40ff0000 },
- { 0x0000989c, 0x005f0000, 0x005f0000 },
- { 0x0000989c, 0x00870000, 0x00870000 },
- { 0x0000989c, 0x00f90000, 0x00f90000 },
- { 0x0000989c, 0x007b0000, 0x007b0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00f50000, 0x00f50000 },
- { 0x0000989c, 0x00dc0000, 0x00dc0000 },
- { 0x0000989c, 0x00110000, 0x00110000 },
- { 0x0000989c, 0x006100a8, 0x006100a8 },
- { 0x0000989c, 0x00423022, 0x00423022 },
- { 0x0000989c, 0x2014008f, 0x2014008f },
- { 0x0000989c, 0x00c40002, 0x00c40002 },
- { 0x0000989c, 0x003000f2, 0x003000f2 },
- { 0x0000989c, 0x00440016, 0x00440016 },
- { 0x0000989c, 0x00410040, 0x00410040 },
- { 0x0000989c, 0x0001805e, 0x0001805e },
- { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
- { 0x0000989c, 0x000000e1, 0x000000e1 },
- { 0x0000989c, 0x00007080, 0x00007080 },
- { 0x0000989c, 0x000000d4, 0x000000d4 },
- { 0x000098d0, 0x0000000f, 0x0010000f },
-};
-
-static const u32 ar5416Bank7_9160[][2] = {
- { 0x0000989c, 0x00000500 },
- { 0x0000989c, 0x00000800 },
- { 0x000098cc, 0x0000000e },
-};
+#ifndef INITVALS_9002_10_H
+#define INITVALS_9002_10_H
-static u32 ar5416Addac_9160[][2] = {
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x000000c0 },
- {0x0000989c, 0x00000018 },
- {0x0000989c, 0x00000004 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x000000c0 },
- {0x0000989c, 0x00000019 },
- {0x0000989c, 0x00000004 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000004 },
- {0x0000989c, 0x00000003 },
- {0x0000989c, 0x00000008 },
- {0x0000989c, 0x00000000 },
- {0x000098cc, 0x00000000 },
-};
-
-static u32 ar5416Addac_91601_1[][2] = {
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x000000c0 },
- {0x0000989c, 0x00000018 },
- {0x0000989c, 0x00000004 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x000000c0 },
- {0x0000989c, 0x00000019 },
- {0x0000989c, 0x00000004 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x000098cc, 0x00000000 },
-};
-
-/* XXX 9280 1 */
static const u32 ar9280Modes_9280[][6] = {
{ 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
{ 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -2766,7 +793,7 @@ static const u32 ar9280Common_9280_2[][2] = {
{ 0x00008258, 0x00000000 },
{ 0x0000825c, 0x400000ff },
{ 0x00008260, 0x00080922 },
- { 0x00008264, 0xa8a00010 },
+ { 0x00008264, 0x88a00010 },
{ 0x00008270, 0x00000000 },
{ 0x00008274, 0x40000000 },
{ 0x00008278, 0x003e4180 },
@@ -3441,7 +1468,7 @@ static const u32 ar9280PciePhy_clkreq_always_on_L1_9280[][2] = {
};
/* AR9285 Revsion 10*/
-static const u_int32_t ar9285Modes_9285[][6] = {
+static const u32 ar9285Modes_9285[][6] = {
{ 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
{ 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
{ 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
@@ -3763,7 +1790,7 @@ static const u_int32_t ar9285Modes_9285[][6] = {
{ 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
};
-static const u_int32_t ar9285Common_9285[][2] = {
+static const u32 ar9285Common_9285[][2] = {
{ 0x0000000c, 0x00000000 },
{ 0x00000030, 0x00020045 },
{ 0x00000034, 0x00000005 },
@@ -3936,7 +1963,7 @@ static const u_int32_t ar9285Common_9285[][2] = {
{ 0x00008258, 0x00000000 },
{ 0x0000825c, 0x400000ff },
{ 0x00008260, 0x00080922 },
- { 0x00008264, 0xa8a00010 },
+ { 0x00008264, 0x88a00010 },
{ 0x00008270, 0x00000000 },
{ 0x00008274, 0x40000000 },
{ 0x00008278, 0x003e4180 },
@@ -4096,7 +2123,7 @@ static const u_int32_t ar9285Common_9285[][2] = {
{ 0x00007870, 0x10142c00 },
};
-static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285[][2] = {
+static const u32 ar9285PciePhy_clkreq_always_on_L1_9285[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -4109,7 +2136,7 @@ static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285[][2] = {
{0x00004044, 0x00000000 },
};
-static const u_int32_t ar9285PciePhy_clkreq_off_L1_9285[][2] = {
+static const u32 ar9285PciePhy_clkreq_off_L1_9285[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -4123,7 +2150,7 @@ static const u_int32_t ar9285PciePhy_clkreq_off_L1_9285[][2] = {
};
/* AR9285 v1_2 PCI Register Writes. Created: 04/13/09 */
-static const u_int32_t ar9285Modes_9285_1_2[][6] = {
+static const u32 ar9285Modes_9285_1_2[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
{ 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -4184,7 +2211,7 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
{ 0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 },
{ 0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 },
{ 0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 },
- { 0x00009a50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 },
+ { 0x00009a50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 },
{ 0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 },
{ 0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 },
{ 0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 },
@@ -4198,8 +2225,8 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
{ 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
{ 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
{ 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
- { 0x00009a88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
- { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
+ { 0x00009a88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 },
+ { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
{ 0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
{ 0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 },
{ 0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 },
@@ -4312,7 +2339,7 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
{ 0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 },
{ 0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 },
{ 0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 },
- { 0x0000aa50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 },
+ { 0x0000aa50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 },
{ 0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 },
{ 0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 },
{ 0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 },
@@ -4326,8 +2353,8 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
{ 0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
{ 0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
{ 0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
- { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
- { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
+ { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 },
+ { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
{ 0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
{ 0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 },
{ 0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 },
@@ -4429,7 +2456,7 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
{ 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
};
-static const u_int32_t ar9285Common_9285_1_2[][2] = {
+static const u32 ar9285Common_9285_1_2[][2] = {
{ 0x0000000c, 0x00000000 },
{ 0x00000030, 0x00020045 },
{ 0x00000034, 0x00000005 },
@@ -4731,17 +2758,12 @@ static const u_int32_t ar9285Common_9285_1_2[][2] = {
{ 0x00007808, 0x54214514 },
{ 0x0000780c, 0x02025830 },
{ 0x00007810, 0x71c0d388 },
- { 0x00007814, 0x924934a8 },
{ 0x0000781c, 0x00000000 },
{ 0x00007824, 0x00d86fff },
- { 0x00007828, 0x26d2491b },
{ 0x0000782c, 0x6e36d97b },
- { 0x00007830, 0xedb6d96e },
{ 0x00007834, 0x71400087 },
- { 0x0000783c, 0x0001fffe },
- { 0x00007840, 0xffeb1a20 },
{ 0x00007844, 0x000c0db6 },
- { 0x00007848, 0x6db61b6f },
+ { 0x00007848, 0x6db6246f },
{ 0x0000784c, 0x6d9b66db },
{ 0x00007850, 0x6d8c6dba },
{ 0x00007854, 0x00040000 },
@@ -4753,7 +2775,7 @@ static const u_int32_t ar9285Common_9285_1_2[][2] = {
{ 0x00007870, 0x10142c00 },
};
-static const u_int32_t ar9285Modes_high_power_tx_gain_9285_1_2[][6] = {
+static const u32 ar9285Modes_high_power_tx_gain_9285_1_2[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
{ 0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200, 0x00000000 },
@@ -4777,7 +2799,12 @@ static const u_int32_t ar9285Modes_high_power_tx_gain_9285_1_2[][6] = {
{ 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x00007814, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8 },
+ { 0x00007828, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b },
+ { 0x00007830, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e },
{ 0x00007838, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803 },
+ { 0x0000783c, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe },
+ { 0x00007840, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20 },
{ 0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe },
{ 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 },
{ 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652, 0x0a22a652 },
@@ -4789,7 +2816,7 @@ static const u_int32_t ar9285Modes_high_power_tx_gain_9285_1_2[][6] = {
{ 0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 },
};
-static const u_int32_t ar9285Modes_original_tx_gain_9285_1_2[][6] = {
+static const u32 ar9285Modes_original_tx_gain_9285_1_2[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
{ 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 },
@@ -4813,7 +2840,52 @@ static const u_int32_t ar9285Modes_original_tx_gain_9285_1_2[][6] = {
{ 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x00007814, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8 },
+ { 0x00007828, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b },
+ { 0x00007830, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e },
{ 0x00007838, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801 },
+ { 0x0000783c, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe },
+ { 0x00007840, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20 },
+ { 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 },
+ { 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 },
+ { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652, 0x0a22a652 },
+ { 0x0000a278, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c },
+ { 0x0000a27c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c },
+ { 0x0000a394, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c },
+ { 0x0000a398, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c },
+ { 0x0000a3dc, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c },
+ { 0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c },
+};
+
+static const u32 ar9285Modes_XE2_0_normal_power[][6] = {
+ { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 },
+ { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 },
+ { 0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000 },
+ { 0x0000a310, 0x00000000, 0x00000000, 0x00022618, 0x00022618, 0x00000000 },
+ { 0x0000a314, 0x00000000, 0x00000000, 0x0002a6c9, 0x0002a6c9, 0x00000000 },
+ { 0x0000a318, 0x00000000, 0x00000000, 0x00031710, 0x00031710, 0x00000000 },
+ { 0x0000a31c, 0x00000000, 0x00000000, 0x00035718, 0x00035718, 0x00000000 },
+ { 0x0000a320, 0x00000000, 0x00000000, 0x00038758, 0x00038758, 0x00000000 },
+ { 0x0000a324, 0x00000000, 0x00000000, 0x0003c75a, 0x0003c75a, 0x00000000 },
+ { 0x0000a328, 0x00000000, 0x00000000, 0x0004075c, 0x0004075c, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x0004475e, 0x0004475e, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x0004679f, 0x0004679f, 0x00000000 },
+ { 0x0000a334, 0x00000000, 0x00000000, 0x000487df, 0x000487df, 0x00000000 },
+ { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
+ { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
+ { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x00007814, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8 },
+ { 0x00007828, 0x4ad2491b, 0x4ad2491b, 0x2ad2491b, 0x4ad2491b, 0x4ad2491b },
+ { 0x00007830, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6dbae },
+ { 0x00007838, 0xdac71441, 0xdac71441, 0xdac71441, 0xdac71441, 0xdac71441 },
+ { 0x0000783c, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe },
+ { 0x00007840, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c },
{ 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 },
{ 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 },
{ 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652, 0x0a22a652 },
@@ -4825,7 +2897,47 @@ static const u_int32_t ar9285Modes_original_tx_gain_9285_1_2[][6] = {
{ 0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c },
};
-static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285_1_2[][2] = {
+static const u32 ar9285Modes_XE2_0_high_power[][6] = {
+ { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200, 0x00000000 },
+ { 0x0000a308, 0x00000000, 0x00000000, 0x00008201, 0x00008201, 0x00000000 },
+ { 0x0000a30c, 0x00000000, 0x00000000, 0x0000b240, 0x0000b240, 0x00000000 },
+ { 0x0000a310, 0x00000000, 0x00000000, 0x0000d241, 0x0000d241, 0x00000000 },
+ { 0x0000a314, 0x00000000, 0x00000000, 0x0000f600, 0x0000f600, 0x00000000 },
+ { 0x0000a318, 0x00000000, 0x00000000, 0x00012800, 0x00012800, 0x00000000 },
+ { 0x0000a31c, 0x00000000, 0x00000000, 0x00016802, 0x00016802, 0x00000000 },
+ { 0x0000a320, 0x00000000, 0x00000000, 0x0001b805, 0x0001b805, 0x00000000 },
+ { 0x0000a324, 0x00000000, 0x00000000, 0x00021a80, 0x00021a80, 0x00000000 },
+ { 0x0000a328, 0x00000000, 0x00000000, 0x00028b00, 0x00028b00, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x0002ab40, 0x0002ab40, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x0002cd80, 0x0002cd80, 0x00000000 },
+ { 0x0000a334, 0x00000000, 0x00000000, 0x00033d82, 0x00033d82, 0x00000000 },
+ { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
+ { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
+ { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x00007814, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8 },
+ { 0x00007828, 0x4ad2491b, 0x4ad2491b, 0x2ad2491b, 0x4ad2491b, 0x4ad2491b },
+ { 0x00007830, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e },
+ { 0x00007838, 0xdac71443, 0xdac71443, 0xdac71443, 0xdac71443, 0xdac71443 },
+ { 0x0000783c, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe },
+ { 0x00007840, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c },
+ { 0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe },
+ { 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 },
+ { 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652, 0x0a22a652 },
+ { 0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
+ { 0x0000a27c, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7 },
+ { 0x0000a394, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
+ { 0x0000a398, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 },
+ { 0x0000a3dc, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
+ { 0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 },
+};
+
+static const u32 ar9285PciePhy_clkreq_always_on_L1_9285_1_2[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -4838,7 +2950,7 @@ static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285_1_2[][2] = {
{0x00004044, 0x00000000 },
};
-static const u_int32_t ar9285PciePhy_clkreq_off_L1_9285_1_2[][2] = {
+static const u32 ar9285PciePhy_clkreq_off_L1_9285_1_2[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -4852,7 +2964,7 @@ static const u_int32_t ar9285PciePhy_clkreq_off_L1_9285_1_2[][2] = {
};
/* AR9287 Revision 10 */
-static const u_int32_t ar9287Modes_9287_1_0[][6] = {
+static const u32 ar9287Modes_9287_1_0[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160, 0x000001e0 },
{ 0x00001070, 0x00000000, 0x00000000, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -4899,7 +3011,7 @@ static const u_int32_t ar9287Modes_9287_1_0[][6] = {
{ 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
};
-static const u_int32_t ar9287Common_9287_1_0[][2] = {
+static const u32 ar9287Common_9287_1_0[][2] = {
{ 0x0000000c, 0x00000000 },
{ 0x00000030, 0x00020015 },
{ 0x00000034, 0x00000005 },
@@ -5073,7 +3185,7 @@ static const u_int32_t ar9287Common_9287_1_0[][2] = {
{ 0x00008258, 0x00000000 },
{ 0x0000825c, 0x400000ff },
{ 0x00008260, 0x00080922 },
- { 0x00008264, 0xa8a00010 },
+ { 0x00008264, 0x88a00010 },
{ 0x00008270, 0x00000000 },
{ 0x00008274, 0x40000000 },
{ 0x00008278, 0x003e4180 },
@@ -5270,7 +3382,7 @@ static const u_int32_t ar9287Common_9287_1_0[][2] = {
{ 0x000078b8, 0x2a850160 },
};
-static const u_int32_t ar9287Modes_tx_gain_9287_1_0[][6] = {
+static const u32 ar9287Modes_tx_gain_9287_1_0[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
{ 0x0000a304, 0x00000000, 0x00000000, 0x00004002, 0x00004002, 0x00004002 },
@@ -5320,7 +3432,7 @@ static const u_int32_t ar9287Modes_tx_gain_9287_1_0[][6] = {
};
-static const u_int32_t ar9287Modes_rx_gain_9287_1_0[][6] = {
+static const u32 ar9287Modes_rx_gain_9287_1_0[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x00009a00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120 },
{ 0x00009a04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124 },
@@ -5582,7 +3694,7 @@ static const u_int32_t ar9287Modes_rx_gain_9287_1_0[][6] = {
{ 0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067 },
};
-static const u_int32_t ar9287PciePhy_clkreq_always_on_L1_9287_1_0[][2] = {
+static const u32 ar9287PciePhy_clkreq_always_on_L1_9287_1_0[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -5595,7 +3707,7 @@ static const u_int32_t ar9287PciePhy_clkreq_always_on_L1_9287_1_0[][2] = {
{0x00004044, 0x00000000 },
};
-static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_0[][2] = {
+static const u32 ar9287PciePhy_clkreq_off_L1_9287_1_0[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -5610,7 +3722,7 @@ static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_0[][2] = {
/* AR9287 Revision 11 */
-static const u_int32_t ar9287Modes_9287_1_1[][6] = {
+static const u32 ar9287Modes_9287_1_1[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160, 0x000001e0 },
{ 0x00001070, 0x00000000, 0x00000000, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -5657,7 +3769,7 @@ static const u_int32_t ar9287Modes_9287_1_1[][6] = {
{ 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
};
-static const u_int32_t ar9287Common_9287_1_1[][2] = {
+static const u32 ar9287Common_9287_1_1[][2] = {
{ 0x0000000c, 0x00000000 },
{ 0x00000030, 0x00020015 },
{ 0x00000034, 0x00000005 },
@@ -6027,21 +4139,22 @@ static const u_int32_t ar9287Common_9287_1_1[][2] = {
/*
* For Japanese regulatory requirements, 2484 MHz requires the following three
- * registers be programmed differently from the channel between 2412 and 2472 MHz.
+ * registers be programmed differently from the channel between 2412 and
+ * 2472 MHz.
*/
-static const u_int32_t ar9287Common_normal_cck_fir_coeff_92871_1[][2] = {
+static const u32 ar9287Common_normal_cck_fir_coeff_92871_1[][2] = {
{ 0x0000a1f4, 0x00fffeff },
{ 0x0000a1f8, 0x00f5f9ff },
{ 0x0000a1fc, 0xb79f6427 },
};
-static const u_int32_t ar9287Common_japan_2484_cck_fir_coeff_92871_1[][2] = {
+static const u32 ar9287Common_japan_2484_cck_fir_coeff_92871_1[][2] = {
{ 0x0000a1f4, 0x00000000 },
{ 0x0000a1f8, 0xefff0301 },
{ 0x0000a1fc, 0xca9228ee },
};
-static const u_int32_t ar9287Modes_tx_gain_9287_1_1[][6] = {
+static const u32 ar9287Modes_tx_gain_9287_1_1[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
{ 0x0000a304, 0x00000000, 0x00000000, 0x00004002, 0x00004002, 0x00004002 },
@@ -6090,7 +4203,7 @@ static const u_int32_t ar9287Modes_tx_gain_9287_1_1[][6] = {
{ 0x0000a274, 0x0a180000, 0x0a180000, 0x0a1aa000, 0x0a1aa000, 0x0a1aa000 },
};
-static const u_int32_t ar9287Modes_rx_gain_9287_1_1[][6] = {
+static const u32 ar9287Modes_rx_gain_9287_1_1[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x00009a00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120 },
{ 0x00009a04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124 },
@@ -6352,7 +4465,7 @@ static const u_int32_t ar9287Modes_rx_gain_9287_1_1[][6] = {
{ 0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067 },
};
-static const u_int32_t ar9287PciePhy_clkreq_always_on_L1_9287_1_1[][2] = {
+static const u32 ar9287PciePhy_clkreq_always_on_L1_9287_1_1[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -6365,7 +4478,7 @@ static const u_int32_t ar9287PciePhy_clkreq_always_on_L1_9287_1_1[][2] = {
{0x00004044, 0x00000000 },
};
-static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = {
+static const u32 ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -6380,7 +4493,7 @@ static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = {
/* AR9271 initialization values automaticaly created: 06/04/09 */
-static const u_int32_t ar9271Modes_9271[][6] = {
+static const u32 ar9271Modes_9271[][6] = {
{ 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
{ 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
{ 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
@@ -6441,7 +4554,7 @@ static const u_int32_t ar9271Modes_9271[][6] = {
{ 0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 },
{ 0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 },
{ 0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 },
- { 0x00009a50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 },
+ { 0x00009a50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 },
{ 0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 },
{ 0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 },
{ 0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 },
@@ -6455,8 +4568,8 @@ static const u_int32_t ar9271Modes_9271[][6] = {
{ 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
{ 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
{ 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
- { 0x00009a88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
- { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
+ { 0x00009a88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 },
+ { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
{ 0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
{ 0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 },
{ 0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 },
@@ -6569,7 +4682,7 @@ static const u_int32_t ar9271Modes_9271[][6] = {
{ 0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 },
{ 0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 },
{ 0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 },
- { 0x0000aa50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 },
+ { 0x0000aa50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 },
{ 0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 },
{ 0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 },
{ 0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 },
@@ -6583,8 +4696,8 @@ static const u_int32_t ar9271Modes_9271[][6] = {
{ 0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
{ 0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
{ 0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
- { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
- { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
+ { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 },
+ { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
{ 0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
{ 0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 },
{ 0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 },
@@ -6683,29 +4796,10 @@ static const u_int32_t ar9271Modes_9271[][6] = {
{ 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
{ 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
{ 0x0000a250, 0x0004f000, 0x0004f000, 0x0004a000, 0x0004a000, 0x0004a000 },
- { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a218652, 0x0a218652, 0x0a22a652 },
- { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 },
- { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 },
- { 0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000 },
- { 0x0000a310, 0x00000000, 0x00000000, 0x0001e610, 0x0001e610, 0x00000000 },
- { 0x0000a314, 0x00000000, 0x00000000, 0x0002d6d0, 0x0002d6d0, 0x00000000 },
- { 0x0000a318, 0x00000000, 0x00000000, 0x00039758, 0x00039758, 0x00000000 },
- { 0x0000a31c, 0x00000000, 0x00000000, 0x0003b759, 0x0003b759, 0x00000000 },
- { 0x0000a320, 0x00000000, 0x00000000, 0x0003d75a, 0x0003d75a, 0x00000000 },
- { 0x0000a324, 0x00000000, 0x00000000, 0x0004175c, 0x0004175c, 0x00000000 },
- { 0x0000a328, 0x00000000, 0x00000000, 0x0004575e, 0x0004575e, 0x00000000 },
- { 0x0000a32c, 0x00000000, 0x00000000, 0x0004979f, 0x0004979f, 0x00000000 },
- { 0x0000a330, 0x00000000, 0x00000000, 0x0004d7df, 0x0004d7df, 0x00000000 },
- { 0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000 },
- { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
- { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
- { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
- { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
};
-static const u_int32_t ar9271Common_9271[][2] = {
+static const u32 ar9271Common_9271[][2] = {
{ 0x0000000c, 0x00000000 },
{ 0x00000030, 0x00020045 },
{ 0x00000034, 0x00000005 },
@@ -6910,13 +5004,10 @@ static const u_int32_t ar9271Common_9271[][2] = {
{ 0x00007810, 0x71c0d388 },
{ 0x00007814, 0x924934a8 },
{ 0x0000781c, 0x00000000 },
- { 0x00007820, 0x00000c04 },
- { 0x00007824, 0x00d8abff },
{ 0x00007828, 0x66964300 },
{ 0x0000782c, 0x8db6d961 },
{ 0x00007830, 0x8db6d96c },
{ 0x00007834, 0x6140008b },
- { 0x00007838, 0x00000029 },
{ 0x0000783c, 0x72ee0a72 },
{ 0x00007840, 0xbbfffffc },
{ 0x00007844, 0x000c0db6 },
@@ -6929,7 +5020,6 @@ static const u_int32_t ar9271Common_9271[][2] = {
{ 0x00007860, 0x21084210 },
{ 0x00007864, 0xf7d7ffde },
{ 0x00007868, 0xc2034080 },
- { 0x0000786c, 0x48609eb4 },
{ 0x00007870, 0x10142c00 },
{ 0x00009808, 0x00000000 },
{ 0x0000980c, 0xafe68e30 },
@@ -6982,9 +5072,6 @@ static const u_int32_t ar9271Common_9271[][2] = {
{ 0x000099e8, 0x3c466478 },
{ 0x000099ec, 0x0cc80caa },
{ 0x000099f0, 0x00000000 },
- { 0x0000a1f4, 0x00000000 },
- { 0x0000a1f8, 0x71733d01 },
- { 0x0000a1fc, 0xd0ad5c12 },
{ 0x0000a208, 0x803e68c8 },
{ 0x0000a210, 0x4080a333 },
{ 0x0000a214, 0x00206c10 },
@@ -7004,13 +5091,9 @@ static const u_int32_t ar9271Common_9271[][2] = {
{ 0x0000a260, 0xdfa90f01 },
{ 0x0000a268, 0x00000000 },
{ 0x0000a26c, 0x0ebae9e6 },
- { 0x0000a278, 0x3bdef7bd },
- { 0x0000a27c, 0x050e83bd },
{ 0x0000a388, 0x0c000000 },
{ 0x0000a38c, 0x20202020 },
{ 0x0000a390, 0x20202020 },
- { 0x0000a394, 0x3bdef7bd },
- { 0x0000a398, 0x000003bd },
{ 0x0000a39c, 0x00000001 },
{ 0x0000a3a0, 0x00000000 },
{ 0x0000a3a4, 0x00000000 },
@@ -7025,8 +5108,6 @@ static const u_int32_t ar9271Common_9271[][2] = {
{ 0x0000a3cc, 0x20202020 },
{ 0x0000a3d0, 0x20202020 },
{ 0x0000a3d4, 0x20202020 },
- { 0x0000a3dc, 0x3bdef7bd },
- { 0x0000a3e0, 0x000003bd },
{ 0x0000a3e4, 0x00000000 },
{ 0x0000a3e8, 0x18c43433 },
{ 0x0000a3ec, 0x00f70081 },
@@ -7046,7 +5127,104 @@ static const u_int32_t ar9271Common_9271[][2] = {
{ 0x0000d384, 0xf3307ff0 },
};
-static const u_int32_t ar9271Modes_9271_1_0_only[][6] = {
+static const u32 ar9271Common_normal_cck_fir_coeff_9271[][2] = {
+ { 0x0000a1f4, 0x00fffeff },
+ { 0x0000a1f8, 0x00f5f9ff },
+ { 0x0000a1fc, 0xb79f6427 },
+};
+
+static const u32 ar9271Common_japan_2484_cck_fir_coeff_9271[][2] = {
+ { 0x0000a1f4, 0x00000000 },
+ { 0x0000a1f8, 0xefff0301 },
+ { 0x0000a1fc, 0xca9228ee },
+};
+
+static const u32 ar9271Modes_9271_1_0_only[][6] = {
{ 0x00009910, 0x30002311, 0x30002311, 0x30002311, 0x30002311, 0x30002311 },
{ 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
};
+
+static const u32 ar9271Modes_9271_ANI_reg[][6] = {
+ { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 },
+ { 0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e, 0x3139605e },
+ { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e },
+ { 0x0000986c, 0x06903881, 0x06903881, 0x06903881, 0x06903881, 0x06903881 },
+ { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 },
+ { 0x0000a208, 0x803e68c8, 0x803e68c8, 0x803e68c8, 0x803e68c8, 0x803e68c8 },
+ { 0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d },
+ { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 },
+};
+
+static const u32 ar9271Modes_normal_power_tx_gain_9271[][6] = {
+ { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 },
+ { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 },
+ { 0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000 },
+ { 0x0000a310, 0x00000000, 0x00000000, 0x0001e610, 0x0001e610, 0x00000000 },
+ { 0x0000a314, 0x00000000, 0x00000000, 0x0002d6d0, 0x0002d6d0, 0x00000000 },
+ { 0x0000a318, 0x00000000, 0x00000000, 0x00039758, 0x00039758, 0x00000000 },
+ { 0x0000a31c, 0x00000000, 0x00000000, 0x0003b759, 0x0003b759, 0x00000000 },
+ { 0x0000a320, 0x00000000, 0x00000000, 0x0003d75a, 0x0003d75a, 0x00000000 },
+ { 0x0000a324, 0x00000000, 0x00000000, 0x0004175c, 0x0004175c, 0x00000000 },
+ { 0x0000a328, 0x00000000, 0x00000000, 0x0004575e, 0x0004575e, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x0004979f, 0x0004979f, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x0004d7df, 0x0004d7df, 0x00000000 },
+ { 0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000 },
+ { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
+ { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
+ { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x00007838, 0x00000029, 0x00000029, 0x00000029, 0x00000029, 0x00000029 },
+ { 0x00007824, 0x00d8abff, 0x00d8abff, 0x00d8abff, 0x00d8abff, 0x00d8abff },
+ { 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 },
+ { 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 },
+ { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a218652, 0x0a218652, 0x0a22a652 },
+ { 0x0000a278, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd },
+ { 0x0000a27c, 0x050e83bd, 0x050e83bd, 0x050e83bd, 0x050e83bd, 0x050e83bd },
+ { 0x0000a394, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd },
+ { 0x0000a398, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd },
+ { 0x0000a3dc, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd },
+ { 0x0000a3e0, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd },
+};
+
+static const u32 ar9271Modes_high_power_tx_gain_9271[][6] = {
+ { 0x0000a300, 0x00000000, 0x00000000, 0x00010000, 0x00010000, 0x00000000 },
+ { 0x0000a304, 0x00000000, 0x00000000, 0x00016200, 0x00016200, 0x00000000 },
+ { 0x0000a308, 0x00000000, 0x00000000, 0x00018201, 0x00018201, 0x00000000 },
+ { 0x0000a30c, 0x00000000, 0x00000000, 0x0001b240, 0x0001b240, 0x00000000 },
+ { 0x0000a310, 0x00000000, 0x00000000, 0x0001d241, 0x0001d241, 0x00000000 },
+ { 0x0000a314, 0x00000000, 0x00000000, 0x0001f600, 0x0001f600, 0x00000000 },
+ { 0x0000a318, 0x00000000, 0x00000000, 0x00022800, 0x00022800, 0x00000000 },
+ { 0x0000a31c, 0x00000000, 0x00000000, 0x00026802, 0x00026802, 0x00000000 },
+ { 0x0000a320, 0x00000000, 0x00000000, 0x0002b805, 0x0002b805, 0x00000000 },
+ { 0x0000a324, 0x00000000, 0x00000000, 0x0002ea41, 0x0002ea41, 0x00000000 },
+ { 0x0000a328, 0x00000000, 0x00000000, 0x00038b00, 0x00038b00, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x0003ab40, 0x0003ab40, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x0003cd80, 0x0003cd80, 0x00000000 },
+ { 0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000 },
+ { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
+ { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
+ { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x00007838, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b },
+ { 0x00007824, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff },
+ { 0x0000786c, 0x08609eb6, 0x08609eb6, 0x08609eba, 0x08609eba, 0x08609eb6 },
+ { 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 },
+ { 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a212652, 0x0a212652, 0x0a22a652 },
+ { 0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
+ { 0x0000a27c, 0x05018063, 0x05038063, 0x05018063, 0x05018063, 0x05018063 },
+ { 0x0000a394, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63 },
+ { 0x0000a398, 0x00000063, 0x00000063, 0x00000063, 0x00000063, 0x00000063 },
+ { 0x0000a3dc, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63 },
+ { 0x0000a3e0, 0x00000063, 0x00000063, 0x00000063, 0x00000063, 0x00000063 },
+};
+
+#endif /* INITVALS_9002_10_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
new file mode 100644
index 00000000000..2be20d2070c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -0,0 +1,480 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+
+#define AR_BufLen 0x00000fff
+
+static void ar9002_hw_rx_enable(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_CR, AR_CR_RXE);
+}
+
+static void ar9002_hw_set_desc_link(void *ds, u32 ds_link)
+{
+ ((struct ath_desc*) ds)->ds_link = ds_link;
+}
+
+static void ar9002_hw_get_desc_link(void *ds, u32 **ds_link)
+{
+ *ds_link = &((struct ath_desc *)ds)->ds_link;
+}
+
+static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+{
+ u32 isr = 0;
+ u32 mask2 = 0;
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ u32 sync_cause = 0;
+ bool fatal_int = false;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!AR_SREV_9100(ah)) {
+ if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
+ if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
+ == AR_RTC_STATUS_ON) {
+ isr = REG_READ(ah, AR_ISR);
+ }
+ }
+
+ sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) &
+ AR_INTR_SYNC_DEFAULT;
+
+ *masked = 0;
+
+ if (!isr && !sync_cause)
+ return false;
+ } else {
+ *masked = 0;
+ isr = REG_READ(ah, AR_ISR);
+ }
+
+ if (isr) {
+ if (isr & AR_ISR_BCNMISC) {
+ u32 isr2;
+ isr2 = REG_READ(ah, AR_ISR_S2);
+ if (isr2 & AR_ISR_S2_TIM)
+ mask2 |= ATH9K_INT_TIM;
+ if (isr2 & AR_ISR_S2_DTIM)
+ mask2 |= ATH9K_INT_DTIM;
+ if (isr2 & AR_ISR_S2_DTIMSYNC)
+ mask2 |= ATH9K_INT_DTIMSYNC;
+ if (isr2 & (AR_ISR_S2_CABEND))
+ mask2 |= ATH9K_INT_CABEND;
+ if (isr2 & AR_ISR_S2_GTT)
+ mask2 |= ATH9K_INT_GTT;
+ if (isr2 & AR_ISR_S2_CST)
+ mask2 |= ATH9K_INT_CST;
+ if (isr2 & AR_ISR_S2_TSFOOR)
+ mask2 |= ATH9K_INT_TSFOOR;
+ }
+
+ isr = REG_READ(ah, AR_ISR_RAC);
+ if (isr == 0xffffffff) {
+ *masked = 0;
+ return false;
+ }
+
+ *masked = isr & ATH9K_INT_COMMON;
+
+ if (ah->config.rx_intr_mitigation) {
+ if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
+ *masked |= ATH9K_INT_RX;
+ }
+
+ if (isr & (AR_ISR_RXOK | AR_ISR_RXERR))
+ *masked |= ATH9K_INT_RX;
+ if (isr &
+ (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR |
+ AR_ISR_TXEOL)) {
+ u32 s0_s, s1_s;
+
+ *masked |= ATH9K_INT_TX;
+
+ s0_s = REG_READ(ah, AR_ISR_S0_S);
+ ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
+ ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
+
+ s1_s = REG_READ(ah, AR_ISR_S1_S);
+ ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
+ ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
+ }
+
+ if (isr & AR_ISR_RXORN) {
+ ath_print(common, ATH_DBG_INTERRUPT,
+ "receive FIFO overrun interrupt\n");
+ }
+
+ if (!AR_SREV_9100(ah)) {
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ u32 isr5 = REG_READ(ah, AR_ISR_S5_S);
+ if (isr5 & AR_ISR_S5_TIM_TIMER)
+ *masked |= ATH9K_INT_TIM_TIMER;
+ }
+ }
+
+ *masked |= mask2;
+ }
+
+ if (AR_SREV_9100(ah))
+ return true;
+
+ if (isr & AR_ISR_GENTMR) {
+ u32 s5_s;
+
+ s5_s = REG_READ(ah, AR_ISR_S5_S);
+ if (isr & AR_ISR_GENTMR) {
+ ah->intr_gen_timer_trigger =
+ MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
+
+ ah->intr_gen_timer_thresh =
+ MS(s5_s, AR_ISR_S5_GENTIMER_THRESH);
+
+ if (ah->intr_gen_timer_trigger)
+ *masked |= ATH9K_INT_GENTIMER;
+
+ }
+ }
+
+ if (sync_cause) {
+ fatal_int =
+ (sync_cause &
+ (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
+ ? true : false;
+
+ if (fatal_int) {
+ if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
+ ath_print(common, ATH_DBG_ANY,
+ "received PCI FATAL interrupt\n");
+ }
+ if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
+ ath_print(common, ATH_DBG_ANY,
+ "received PCI PERR interrupt\n");
+ }
+ *masked |= ATH9K_INT_FATAL;
+ }
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
+ ath_print(common, ATH_DBG_INTERRUPT,
+ "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n");
+ REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
+ REG_WRITE(ah, AR_RC, 0);
+ *masked |= ATH9K_INT_FATAL;
+ }
+ if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
+ ath_print(common, ATH_DBG_INTERRUPT,
+ "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
+ }
+
+ REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
+ (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
+ }
+
+ return true;
+}
+
+static void ar9002_hw_fill_txdesc(struct ath_hw *ah, void *ds, u32 seglen,
+ bool is_firstseg, bool is_lastseg,
+ const void *ds0, dma_addr_t buf_addr,
+ unsigned int qcu)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_data = buf_addr;
+
+ if (is_firstseg) {
+ ads->ds_ctl1 |= seglen | (is_lastseg ? 0 : AR_TxMore);
+ } else if (is_lastseg) {
+ ads->ds_ctl0 = 0;
+ ads->ds_ctl1 = seglen;
+ ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
+ ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
+ } else {
+ ads->ds_ctl0 = 0;
+ ads->ds_ctl1 = seglen | AR_TxMore;
+ ads->ds_ctl2 = 0;
+ ads->ds_ctl3 = 0;
+ }
+ ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
+ ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
+ ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
+ ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
+ ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
+}
+
+static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
+ struct ath_tx_status *ts)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ if ((ads->ds_txstatus9 & AR_TxDone) == 0)
+ return -EINPROGRESS;
+
+ ts->ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
+ ts->ts_tstamp = ads->AR_SendTimestamp;
+ ts->ts_status = 0;
+ ts->ts_flags = 0;
+
+ if (ads->ds_txstatus1 & AR_FrmXmitOK)
+ ts->ts_status |= ATH9K_TX_ACKED;
+ if (ads->ds_txstatus1 & AR_ExcessiveRetries)
+ ts->ts_status |= ATH9K_TXERR_XRETRY;
+ if (ads->ds_txstatus1 & AR_Filtered)
+ ts->ts_status |= ATH9K_TXERR_FILT;
+ if (ads->ds_txstatus1 & AR_FIFOUnderrun) {
+ ts->ts_status |= ATH9K_TXERR_FIFO;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (ads->ds_txstatus9 & AR_TxOpExceeded)
+ ts->ts_status |= ATH9K_TXERR_XTXOP;
+ if (ads->ds_txstatus1 & AR_TxTimerExpired)
+ ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
+
+ if (ads->ds_txstatus1 & AR_DescCfgErr)
+ ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
+ if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
+ ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
+ ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (ads->ds_txstatus0 & AR_TxBaStatus) {
+ ts->ts_flags |= ATH9K_TX_BA;
+ ts->ba_low = ads->AR_BaBitmapLow;
+ ts->ba_high = ads->AR_BaBitmapHigh;
+ }
+
+ ts->ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
+ switch (ts->ts_rateindex) {
+ case 0:
+ ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
+ break;
+ case 1:
+ ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
+ break;
+ case 2:
+ ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
+ break;
+ case 3:
+ ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
+ break;
+ }
+
+ ts->ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
+ ts->ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
+ ts->ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
+ ts->ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
+ ts->ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
+ ts->ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
+ ts->ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
+ ts->evm0 = ads->AR_TxEVM0;
+ ts->evm1 = ads->AR_TxEVM1;
+ ts->evm2 = ads->AR_TxEVM2;
+ ts->ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
+ ts->ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
+ ts->ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
+ ts->ts_antenna = 0;
+
+ return 0;
+}
+
+static void ar9002_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
+ u32 pktLen, enum ath9k_pkt_type type,
+ u32 txPower, u32 keyIx,
+ enum ath9k_key_type keyType, u32 flags)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ txPower += ah->txpower_indexoffset;
+ if (txPower > 63)
+ txPower = 63;
+
+ ads->ds_ctl0 = (pktLen & AR_FrameLen)
+ | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
+ | SM(txPower, AR_XmitPower)
+ | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
+ | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
+ | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
+ | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
+
+ ads->ds_ctl1 =
+ (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
+ | SM(type, AR_FrameType)
+ | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
+ | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
+ | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
+
+ ads->ds_ctl6 = SM(keyType, AR_EncrType);
+
+ if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) {
+ ads->ds_ctl8 = 0;
+ ads->ds_ctl9 = 0;
+ ads->ds_ctl10 = 0;
+ ads->ds_ctl11 = 0;
+ }
+}
+
+static void ar9002_hw_set11n_ratescenario(struct ath_hw *ah, void *ds,
+ void *lastds,
+ u32 durUpdateEn, u32 rtsctsRate,
+ u32 rtsctsDuration,
+ struct ath9k_11n_rate_series series[],
+ u32 nseries, u32 flags)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+ struct ar5416_desc *last_ads = AR5416DESC(lastds);
+ u32 ds_ctl0;
+
+ if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
+ ds_ctl0 = ads->ds_ctl0;
+
+ if (flags & ATH9K_TXDESC_RTSENA) {
+ ds_ctl0 &= ~AR_CTSEnable;
+ ds_ctl0 |= AR_RTSEnable;
+ } else {
+ ds_ctl0 &= ~AR_RTSEnable;
+ ds_ctl0 |= AR_CTSEnable;
+ }
+
+ ads->ds_ctl0 = ds_ctl0;
+ } else {
+ ads->ds_ctl0 =
+ (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
+ }
+
+ ads->ds_ctl2 = set11nTries(series, 0)
+ | set11nTries(series, 1)
+ | set11nTries(series, 2)
+ | set11nTries(series, 3)
+ | (durUpdateEn ? AR_DurUpdateEna : 0)
+ | SM(0, AR_BurstDur);
+
+ ads->ds_ctl3 = set11nRate(series, 0)
+ | set11nRate(series, 1)
+ | set11nRate(series, 2)
+ | set11nRate(series, 3);
+
+ ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
+ | set11nPktDurRTSCTS(series, 1);
+
+ ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
+ | set11nPktDurRTSCTS(series, 3);
+
+ ads->ds_ctl7 = set11nRateFlags(series, 0)
+ | set11nRateFlags(series, 1)
+ | set11nRateFlags(series, 2)
+ | set11nRateFlags(series, 3)
+ | SM(rtsctsRate, AR_RTSCTSRate);
+ last_ads->ds_ctl2 = ads->ds_ctl2;
+ last_ads->ds_ctl3 = ads->ds_ctl3;
+}
+
+static void ar9002_hw_set11n_aggr_first(struct ath_hw *ah, void *ds,
+ u32 aggrLen)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
+ ads->ds_ctl6 &= ~AR_AggrLen;
+ ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
+}
+
+static void ar9002_hw_set11n_aggr_middle(struct ath_hw *ah, void *ds,
+ u32 numDelims)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+ unsigned int ctl6;
+
+ ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
+
+ ctl6 = ads->ds_ctl6;
+ ctl6 &= ~AR_PadDelim;
+ ctl6 |= SM(numDelims, AR_PadDelim);
+ ads->ds_ctl6 = ctl6;
+}
+
+static void ar9002_hw_set11n_aggr_last(struct ath_hw *ah, void *ds)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_ctl1 |= AR_IsAggr;
+ ads->ds_ctl1 &= ~AR_MoreAggr;
+ ads->ds_ctl6 &= ~AR_PadDelim;
+}
+
+static void ar9002_hw_clr11n_aggr(struct ath_hw *ah, void *ds)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
+}
+
+static void ar9002_hw_set11n_burstduration(struct ath_hw *ah, void *ds,
+ u32 burstDuration)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_ctl2 &= ~AR_BurstDur;
+ ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
+}
+
+static void ar9002_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
+ u32 vmf)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ if (vmf)
+ ads->ds_ctl0 |= AR_VirtMoreFrag;
+ else
+ ads->ds_ctl0 &= ~AR_VirtMoreFrag;
+}
+
+void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
+ u32 size, u32 flags)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+
+ ads->ds_ctl1 = size & AR_BufLen;
+ if (flags & ATH9K_RXDESC_INTREQ)
+ ads->ds_ctl1 |= AR_RxIntrReq;
+
+ ads->ds_rxstatus8 &= ~AR_RxDone;
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
+ memset(&(ads->u), 0, sizeof(ads->u));
+}
+EXPORT_SYMBOL(ath9k_hw_setuprxdesc);
+
+void ar9002_hw_attach_mac_ops(struct ath_hw *ah)
+{
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ ops->rx_enable = ar9002_hw_rx_enable;
+ ops->set_desc_link = ar9002_hw_set_desc_link;
+ ops->get_desc_link = ar9002_hw_get_desc_link;
+ ops->get_isr = ar9002_hw_get_isr;
+ ops->fill_txdesc = ar9002_hw_fill_txdesc;
+ ops->proc_txdesc = ar9002_hw_proc_txdesc;
+ ops->set11n_txdesc = ar9002_hw_set11n_txdesc;
+ ops->set11n_ratescenario = ar9002_hw_set11n_ratescenario;
+ ops->set11n_aggr_first = ar9002_hw_set11n_aggr_first;
+ ops->set11n_aggr_middle = ar9002_hw_set11n_aggr_middle;
+ ops->set11n_aggr_last = ar9002_hw_set11n_aggr_last;
+ ops->clr11n_aggr = ar9002_hw_clr11n_aggr;
+ ops->set11n_burstduration = ar9002_hw_set11n_burstduration;
+ ops->set11n_virtualmorefrag = ar9002_hw_set11n_virtualmorefrag;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
new file mode 100644
index 00000000000..ed314e89bfe
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -0,0 +1,535 @@
+/*
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/**
+ * DOC: Programming Atheros 802.11n analog front end radios
+ *
+ * AR5416 MAC based PCI devices and AR518 MAC based PCI-Express
+ * devices have either an external AR2133 analog front end radio for single
+ * band 2.4 GHz communication or an AR5133 analog front end radio for dual
+ * band 2.4 GHz / 5 GHz communication.
+ *
+ * All devices after the AR5416 and AR5418 family starting with the AR9280
+ * have their analog front radios, MAC/BB and host PCIe/USB interface embedded
+ * into a single-chip and require less programming.
+ *
+ * The following single-chips exist with a respective embedded radio:
+ *
+ * AR9280 - 11n dual-band 2x2 MIMO for PCIe
+ * AR9281 - 11n single-band 1x2 MIMO for PCIe
+ * AR9285 - 11n single-band 1x1 for PCIe
+ * AR9287 - 11n single-band 2x2 MIMO for PCIe
+ *
+ * AR9220 - 11n dual-band 2x2 MIMO for PCI
+ * AR9223 - 11n single-band 2x2 MIMO for PCI
+ *
+ * AR9287 - 11n single-band 1x1 MIMO for USB
+ */
+
+#include "hw.h"
+#include "ar9002_phy.h"
+
+/**
+ * ar9002_hw_set_channel - set channel on single-chip device
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * This is the function to change channel on single-chip devices, that is
+ * all devices after ar9280.
+ *
+ * This function takes the channel value in MHz and sets
+ * hardware channel value. Assumes writes have been enabled to analog bus.
+ *
+ * Actual Expression,
+ *
+ * For 2GHz channel,
+ * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
+ * (freq_ref = 40MHz)
+ *
+ * For 5GHz channel,
+ * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10)
+ * (freq_ref = 40MHz/(24>>amodeRefSel))
+ */
+static int ar9002_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ u16 bMode, fracMode, aModeRefSel = 0;
+ u32 freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0;
+ struct chan_centers centers;
+ u32 refDivA = 24;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ freq = centers.synth_center;
+
+ reg32 = REG_READ(ah, AR_PHY_SYNTH_CONTROL);
+ reg32 &= 0xc0000000;
+
+ if (freq < 4800) { /* 2 GHz, fractional mode */
+ u32 txctl;
+ int regWrites = 0;
+
+ bMode = 1;
+ fracMode = 1;
+ aModeRefSel = 0;
+ channelSel = CHANSEL_2G(freq);
+
+ if (AR_SREV_9287_11_OR_LATER(ah)) {
+ if (freq == 2484) {
+ /* Enable channel spreading for channel 14 */
+ REG_WRITE_ARRAY(&ah->iniCckfirJapan2484,
+ 1, regWrites);
+ } else {
+ REG_WRITE_ARRAY(&ah->iniCckfirNormal,
+ 1, regWrites);
+ }
+ } else {
+ txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
+ if (freq == 2484) {
+ /* Enable channel spreading for channel 14 */
+ REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+ txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
+ } else {
+ REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+ txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
+ }
+ }
+ } else {
+ bMode = 0;
+ fracMode = 0;
+
+ switch (ah->eep_ops->get_eeprom(ah, EEP_FRAC_N_5G)) {
+ case 0:
+ if ((freq % 20) == 0)
+ aModeRefSel = 3;
+ else if ((freq % 10) == 0)
+ aModeRefSel = 2;
+ if (aModeRefSel)
+ break;
+ case 1:
+ default:
+ aModeRefSel = 0;
+ /*
+ * Enable 2G (fractional) mode for channels
+ * which are 5MHz spaced.
+ */
+ fracMode = 1;
+ refDivA = 1;
+ channelSel = CHANSEL_5G(freq);
+
+ /* RefDivA setting */
+ REG_RMW_FIELD(ah, AR_AN_SYNTH9,
+ AR_AN_SYNTH9_REFDIVA, refDivA);
+
+ }
+
+ if (!fracMode) {
+ ndiv = (freq * (refDivA >> aModeRefSel)) / 60;
+ channelSel = ndiv & 0x1ff;
+ channelFrac = (ndiv & 0xfffffe00) * 2;
+ channelSel = (channelSel << 17) | channelFrac;
+ }
+ }
+
+ reg32 = reg32 |
+ (bMode << 29) |
+ (fracMode << 28) | (aModeRefSel << 26) | (channelSel);
+
+ REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
+
+ ah->curchan = chan;
+ ah->curchan_rad_index = -1;
+
+ return 0;
+}
+
+/**
+ * ar9002_hw_spur_mitigate - convert baseband spur frequency
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * For single-chip solutions. Converts to baseband spur frequency given the
+ * input channel frequency and compute register settings below.
+ */
+static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ int bb_spur = AR_NO_SPUR;
+ int freq;
+ int bin, cur_bin;
+ int bb_spur_off, spur_subchannel_sd;
+ int spur_freq_sd;
+ int spur_delta_phase;
+ int denominator;
+ int upper, lower, cur_vit_mask;
+ int tmp, newVal;
+ int i;
+ int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
+ AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
+ };
+ int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
+ AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
+ };
+ int inc[4] = { 0, 100, 0, 0 };
+ struct chan_centers centers;
+
+ int8_t mask_m[123];
+ int8_t mask_p[123];
+ int8_t mask_amt;
+ int tmp_mask;
+ int cur_bb_spur;
+ bool is2GHz = IS_CHAN_2GHZ(chan);
+
+ memset(&mask_m, 0, sizeof(int8_t) * 123);
+ memset(&mask_p, 0, sizeof(int8_t) * 123);
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ freq = centers.synth_center;
+
+ ah->config.spurmode = SPUR_ENABLE_EEPROM;
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
+ cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
+
+ if (is2GHz)
+ cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ;
+ else
+ cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ;
+
+ if (AR_NO_SPUR == cur_bb_spur)
+ break;
+ cur_bb_spur = cur_bb_spur - freq;
+
+ if (IS_CHAN_HT40(chan)) {
+ if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT40) &&
+ (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT40)) {
+ bb_spur = cur_bb_spur;
+ break;
+ }
+ } else if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT20) &&
+ (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT20)) {
+ bb_spur = cur_bb_spur;
+ break;
+ }
+ }
+
+ if (AR_NO_SPUR == bb_spur) {
+ REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
+ AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
+ return;
+ } else {
+ REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
+ AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
+ }
+
+ bin = bb_spur * 320;
+
+ tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ newVal = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
+ AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
+ AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
+ AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
+ REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), newVal);
+
+ newVal = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
+ AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
+ AR_PHY_SPUR_REG_MASK_RATE_SELECT |
+ AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
+ SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
+ REG_WRITE(ah, AR_PHY_SPUR_REG, newVal);
+
+ if (IS_CHAN_HT40(chan)) {
+ if (bb_spur < 0) {
+ spur_subchannel_sd = 1;
+ bb_spur_off = bb_spur + 10;
+ } else {
+ spur_subchannel_sd = 0;
+ bb_spur_off = bb_spur - 10;
+ }
+ } else {
+ spur_subchannel_sd = 0;
+ bb_spur_off = bb_spur;
+ }
+
+ if (IS_CHAN_HT40(chan))
+ spur_delta_phase =
+ ((bb_spur * 262144) /
+ 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
+ else
+ spur_delta_phase =
+ ((bb_spur * 524288) /
+ 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
+
+ denominator = IS_CHAN_2GHZ(chan) ? 44 : 40;
+ spur_freq_sd = ((bb_spur_off * 2048) / denominator) & 0x3ff;
+
+ newVal = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
+ SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
+ SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
+ REG_WRITE(ah, AR_PHY_TIMING11, newVal);
+
+ newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
+ REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);
+
+ cur_bin = -6000;
+ upper = bin + 100;
+ lower = bin - 100;
+
+ for (i = 0; i < 4; i++) {
+ int pilot_mask = 0;
+ int chan_mask = 0;
+ int bp = 0;
+ for (bp = 0; bp < 30; bp++) {
+ if ((cur_bin > lower) && (cur_bin < upper)) {
+ pilot_mask = pilot_mask | 0x1 << bp;
+ chan_mask = chan_mask | 0x1 << bp;
+ }
+ cur_bin += 100;
+ }
+ cur_bin += inc[i];
+ REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
+ REG_WRITE(ah, chan_mask_reg[i], chan_mask);
+ }
+
+ cur_vit_mask = 6100;
+ upper = bin + 120;
+ lower = bin - 120;
+
+ for (i = 0; i < 123; i++) {
+ if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
+
+ /* workaround for gcc bug #37014 */
+ volatile int tmp_v = abs(cur_vit_mask - bin);
+
+ if (tmp_v < 75)
+ mask_amt = 1;
+ else
+ mask_amt = 0;
+ if (cur_vit_mask < 0)
+ mask_m[abs(cur_vit_mask / 100)] = mask_amt;
+ else
+ mask_p[cur_vit_mask / 100] = mask_amt;
+ }
+ cur_vit_mask -= 100;
+ }
+
+ tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
+ | (mask_m[48] << 26) | (mask_m[49] << 24)
+ | (mask_m[50] << 22) | (mask_m[51] << 20)
+ | (mask_m[52] << 18) | (mask_m[53] << 16)
+ | (mask_m[54] << 14) | (mask_m[55] << 12)
+ | (mask_m[56] << 10) | (mask_m[57] << 8)
+ | (mask_m[58] << 6) | (mask_m[59] << 4)
+ | (mask_m[60] << 2) | (mask_m[61] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
+ REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
+
+ tmp_mask = (mask_m[31] << 28)
+ | (mask_m[32] << 26) | (mask_m[33] << 24)
+ | (mask_m[34] << 22) | (mask_m[35] << 20)
+ | (mask_m[36] << 18) | (mask_m[37] << 16)
+ | (mask_m[48] << 14) | (mask_m[39] << 12)
+ | (mask_m[40] << 10) | (mask_m[41] << 8)
+ | (mask_m[42] << 6) | (mask_m[43] << 4)
+ | (mask_m[44] << 2) | (mask_m[45] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
+
+ tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
+ | (mask_m[18] << 26) | (mask_m[18] << 24)
+ | (mask_m[20] << 22) | (mask_m[20] << 20)
+ | (mask_m[22] << 18) | (mask_m[22] << 16)
+ | (mask_m[24] << 14) | (mask_m[24] << 12)
+ | (mask_m[25] << 10) | (mask_m[26] << 8)
+ | (mask_m[27] << 6) | (mask_m[28] << 4)
+ | (mask_m[29] << 2) | (mask_m[30] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
+
+ tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
+ | (mask_m[2] << 26) | (mask_m[3] << 24)
+ | (mask_m[4] << 22) | (mask_m[5] << 20)
+ | (mask_m[6] << 18) | (mask_m[7] << 16)
+ | (mask_m[8] << 14) | (mask_m[9] << 12)
+ | (mask_m[10] << 10) | (mask_m[11] << 8)
+ | (mask_m[12] << 6) | (mask_m[13] << 4)
+ | (mask_m[14] << 2) | (mask_m[15] << 0);
+ REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
+
+ tmp_mask = (mask_p[15] << 28)
+ | (mask_p[14] << 26) | (mask_p[13] << 24)
+ | (mask_p[12] << 22) | (mask_p[11] << 20)
+ | (mask_p[10] << 18) | (mask_p[9] << 16)
+ | (mask_p[8] << 14) | (mask_p[7] << 12)
+ | (mask_p[6] << 10) | (mask_p[5] << 8)
+ | (mask_p[4] << 6) | (mask_p[3] << 4)
+ | (mask_p[2] << 2) | (mask_p[1] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
+
+ tmp_mask = (mask_p[30] << 28)
+ | (mask_p[29] << 26) | (mask_p[28] << 24)
+ | (mask_p[27] << 22) | (mask_p[26] << 20)
+ | (mask_p[25] << 18) | (mask_p[24] << 16)
+ | (mask_p[23] << 14) | (mask_p[22] << 12)
+ | (mask_p[21] << 10) | (mask_p[20] << 8)
+ | (mask_p[19] << 6) | (mask_p[18] << 4)
+ | (mask_p[17] << 2) | (mask_p[16] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
+
+ tmp_mask = (mask_p[45] << 28)
+ | (mask_p[44] << 26) | (mask_p[43] << 24)
+ | (mask_p[42] << 22) | (mask_p[41] << 20)
+ | (mask_p[40] << 18) | (mask_p[39] << 16)
+ | (mask_p[38] << 14) | (mask_p[37] << 12)
+ | (mask_p[36] << 10) | (mask_p[35] << 8)
+ | (mask_p[34] << 6) | (mask_p[33] << 4)
+ | (mask_p[32] << 2) | (mask_p[31] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
+
+ tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
+ | (mask_p[59] << 26) | (mask_p[58] << 24)
+ | (mask_p[57] << 22) | (mask_p[56] << 20)
+ | (mask_p[55] << 18) | (mask_p[54] << 16)
+ | (mask_p[53] << 14) | (mask_p[52] << 12)
+ | (mask_p[51] << 10) | (mask_p[50] << 8)
+ | (mask_p[49] << 6) | (mask_p[48] << 4)
+ | (mask_p[47] << 2) | (mask_p[46] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+}
+
+static void ar9002_olc_init(struct ath_hw *ah)
+{
+ u32 i;
+
+ if (!OLC_FOR_AR9280_20_LATER)
+ return;
+
+ if (OLC_FOR_AR9287_10_LATER) {
+ REG_SET_BIT(ah, AR_PHY_TX_PWRCTRL9,
+ AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL);
+ ath9k_hw_analog_shift_rmw(ah, AR9287_AN_TXPC0,
+ AR9287_AN_TXPC0_TXPCMODE,
+ AR9287_AN_TXPC0_TXPCMODE_S,
+ AR9287_AN_TXPC0_TXPCMODE_TEMPSENSE);
+ udelay(100);
+ } else {
+ for (i = 0; i < AR9280_TX_GAIN_TABLE_SIZE; i++)
+ ah->originalGain[i] =
+ MS(REG_READ(ah, AR_PHY_TX_GAIN_TBL1 + i * 4),
+ AR_PHY_TX_GAIN);
+ ah->PDADCdelta = 0;
+ }
+}
+
+static u32 ar9002_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 pll;
+
+ pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
+
+ if (chan && IS_CHAN_HALF_RATE(chan))
+ pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
+ else if (chan && IS_CHAN_QUARTER_RATE(chan))
+ pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
+
+ if (chan && IS_CHAN_5GHZ(chan)) {
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ pll = 0x142c;
+ else if (AR_SREV_9280_20(ah))
+ pll = 0x2850;
+ else
+ pll |= SM(0x28, AR_RTC_9160_PLL_DIV);
+ } else {
+ pll |= SM(0x2c, AR_RTC_9160_PLL_DIV);
+ }
+
+ return pll;
+}
+
+static void ar9002_hw_do_getnf(struct ath_hw *ah,
+ int16_t nfarray[NUM_NF_READINGS])
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int16_t nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR);
+
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 0] is %d\n", nf);
+
+ if (AR_SREV_9271(ah) && (nf >= -114))
+ nf = -116;
+
+ nfarray[0] = nf;
+
+ if (!AR_SREV_9285(ah) && !AR_SREV_9271(ah)) {
+ nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
+ AR9280_PHY_CH1_MINCCA_PWR);
+
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 1] is %d\n", nf);
+ nfarray[1] = nf;
+ }
+
+ nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR9280_PHY_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 0] is %d\n", nf);
+
+ if (AR_SREV_9271(ah) && (nf >= -114))
+ nf = -116;
+
+ nfarray[3] = nf;
+
+ if (!AR_SREV_9285(ah) && !AR_SREV_9271(ah)) {
+ nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
+ AR9280_PHY_CH1_EXT_MINCCA_PWR);
+
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 1] is %d\n", nf);
+ nfarray[4] = nf;
+ }
+}
+
+void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+
+ priv_ops->set_rf_regs = NULL;
+ priv_ops->rf_alloc_ext_banks = NULL;
+ priv_ops->rf_free_ext_banks = NULL;
+ priv_ops->rf_set_freq = ar9002_hw_set_channel;
+ priv_ops->spur_mitigate_freq = ar9002_hw_spur_mitigate;
+ priv_ops->olc_init = ar9002_olc_init;
+ priv_ops->compute_pll_control = ar9002_hw_compute_pll_control;
+ priv_ops->do_getnf = ar9002_hw_do_getnf;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
new file mode 100644
index 00000000000..81bf6e5840e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
@@ -0,0 +1,572 @@
+/*
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef AR9002_PHY_H
+#define AR9002_PHY_H
+
+#define AR_PHY_TEST 0x9800
+#define PHY_AGC_CLR 0x10000000
+#define RFSILENT_BB 0x00002000
+
+#define AR_PHY_TURBO 0x9804
+#define AR_PHY_FC_TURBO_MODE 0x00000001
+#define AR_PHY_FC_TURBO_SHORT 0x00000002
+#define AR_PHY_FC_DYN2040_EN 0x00000004
+#define AR_PHY_FC_DYN2040_PRI_ONLY 0x00000008
+#define AR_PHY_FC_DYN2040_PRI_CH 0x00000010
+/* For 25 MHz channel spacing -- not used but supported by hw */
+#define AR_PHY_FC_DYN2040_EXT_CH 0x00000020
+#define AR_PHY_FC_HT_EN 0x00000040
+#define AR_PHY_FC_SHORT_GI_40 0x00000080
+#define AR_PHY_FC_WALSH 0x00000100
+#define AR_PHY_FC_SINGLE_HT_LTF1 0x00000200
+#define AR_PHY_FC_ENABLE_DAC_FIFO 0x00000800
+
+#define AR_PHY_TEST2 0x9808
+
+#define AR_PHY_TIMING2 0x9810
+#define AR_PHY_TIMING3 0x9814
+#define AR_PHY_TIMING3_DSC_MAN 0xFFFE0000
+#define AR_PHY_TIMING3_DSC_MAN_S 17
+#define AR_PHY_TIMING3_DSC_EXP 0x0001E000
+#define AR_PHY_TIMING3_DSC_EXP_S 13
+
+#define AR_PHY_CHIP_ID_REV_0 0x80
+#define AR_PHY_CHIP_ID_REV_1 0x81
+#define AR_PHY_CHIP_ID_9160_REV_0 0xb0
+
+#define AR_PHY_ACTIVE 0x981C
+#define AR_PHY_ACTIVE_EN 0x00000001
+#define AR_PHY_ACTIVE_DIS 0x00000000
+
+#define AR_PHY_RF_CTL2 0x9824
+#define AR_PHY_TX_END_DATA_START 0x000000FF
+#define AR_PHY_TX_END_DATA_START_S 0
+#define AR_PHY_TX_END_PA_ON 0x0000FF00
+#define AR_PHY_TX_END_PA_ON_S 8
+
+#define AR_PHY_RF_CTL3 0x9828
+#define AR_PHY_TX_END_TO_A2_RX_ON 0x00FF0000
+#define AR_PHY_TX_END_TO_A2_RX_ON_S 16
+
+#define AR_PHY_ADC_CTL 0x982C
+#define AR_PHY_ADC_CTL_OFF_INBUFGAIN 0x00000003
+#define AR_PHY_ADC_CTL_OFF_INBUFGAIN_S 0
+#define AR_PHY_ADC_CTL_OFF_PWDDAC 0x00002000
+#define AR_PHY_ADC_CTL_OFF_PWDBANDGAP 0x00004000
+#define AR_PHY_ADC_CTL_OFF_PWDADC 0x00008000
+#define AR_PHY_ADC_CTL_ON_INBUFGAIN 0x00030000
+#define AR_PHY_ADC_CTL_ON_INBUFGAIN_S 16
+
+#define AR_PHY_ADC_SERIAL_CTL 0x9830
+#define AR_PHY_SEL_INTERNAL_ADDAC 0x00000000
+#define AR_PHY_SEL_EXTERNAL_RADIO 0x00000001
+
+#define AR_PHY_RF_CTL4 0x9834
+#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF 0xFF000000
+#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF_S 24
+#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF 0x00FF0000
+#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF_S 16
+#define AR_PHY_RF_CTL4_FRAME_XPAB_ON 0x0000FF00
+#define AR_PHY_RF_CTL4_FRAME_XPAB_ON_S 8
+#define AR_PHY_RF_CTL4_FRAME_XPAA_ON 0x000000FF
+#define AR_PHY_RF_CTL4_FRAME_XPAA_ON_S 0
+
+#define AR_PHY_TSTDAC_CONST 0x983c
+
+#define AR_PHY_SETTLING 0x9844
+#define AR_PHY_SETTLING_SWITCH 0x00003F80
+#define AR_PHY_SETTLING_SWITCH_S 7
+
+#define AR_PHY_RXGAIN 0x9848
+#define AR_PHY_RXGAIN_TXRX_ATTEN 0x0003F000
+#define AR_PHY_RXGAIN_TXRX_ATTEN_S 12
+#define AR_PHY_RXGAIN_TXRX_RF_MAX 0x007C0000
+#define AR_PHY_RXGAIN_TXRX_RF_MAX_S 18
+#define AR9280_PHY_RXGAIN_TXRX_ATTEN 0x00003F80
+#define AR9280_PHY_RXGAIN_TXRX_ATTEN_S 7
+#define AR9280_PHY_RXGAIN_TXRX_MARGIN 0x001FC000
+#define AR9280_PHY_RXGAIN_TXRX_MARGIN_S 14
+
+#define AR_PHY_DESIRED_SZ 0x9850
+#define AR_PHY_DESIRED_SZ_ADC 0x000000FF
+#define AR_PHY_DESIRED_SZ_ADC_S 0
+#define AR_PHY_DESIRED_SZ_PGA 0x0000FF00
+#define AR_PHY_DESIRED_SZ_PGA_S 8
+#define AR_PHY_DESIRED_SZ_TOT_DES 0x0FF00000
+#define AR_PHY_DESIRED_SZ_TOT_DES_S 20
+
+#define AR_PHY_FIND_SIG 0x9858
+#define AR_PHY_FIND_SIG_FIRSTEP 0x0003F000
+#define AR_PHY_FIND_SIG_FIRSTEP_S 12
+#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000
+#define AR_PHY_FIND_SIG_FIRPWR_S 18
+
+#define AR_PHY_AGC_CTL1 0x985C
+#define AR_PHY_AGC_CTL1_COARSE_LOW 0x00007F80
+#define AR_PHY_AGC_CTL1_COARSE_LOW_S 7
+#define AR_PHY_AGC_CTL1_COARSE_HIGH 0x003F8000
+#define AR_PHY_AGC_CTL1_COARSE_HIGH_S 15
+
+#define AR_PHY_CCA 0x9864
+#define AR_PHY_MINCCA_PWR 0x0FF80000
+#define AR_PHY_MINCCA_PWR_S 19
+#define AR_PHY_CCA_THRESH62 0x0007F000
+#define AR_PHY_CCA_THRESH62_S 12
+#define AR9280_PHY_MINCCA_PWR 0x1FF00000
+#define AR9280_PHY_MINCCA_PWR_S 20
+#define AR9280_PHY_CCA_THRESH62 0x000FF000
+#define AR9280_PHY_CCA_THRESH62_S 12
+
+#define AR_PHY_SFCORR_LOW 0x986C
+#define AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW 0x00000001
+#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW 0x00003F00
+#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW_S 8
+#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW 0x001FC000
+#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW_S 14
+#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW 0x0FE00000
+#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW_S 21
+
+#define AR_PHY_SFCORR 0x9868
+#define AR_PHY_SFCORR_M2COUNT_THR 0x0000001F
+#define AR_PHY_SFCORR_M2COUNT_THR_S 0
+#define AR_PHY_SFCORR_M1_THRESH 0x00FE0000
+#define AR_PHY_SFCORR_M1_THRESH_S 17
+#define AR_PHY_SFCORR_M2_THRESH 0x7F000000
+#define AR_PHY_SFCORR_M2_THRESH_S 24
+
+#define AR_PHY_SLEEP_CTR_CONTROL 0x9870
+#define AR_PHY_SLEEP_CTR_LIMIT 0x9874
+#define AR_PHY_SYNTH_CONTROL 0x9874
+#define AR_PHY_SLEEP_SCAL 0x9878
+
+#define AR_PHY_PLL_CTL 0x987c
+#define AR_PHY_PLL_CTL_40 0xaa
+#define AR_PHY_PLL_CTL_40_5413 0x04
+#define AR_PHY_PLL_CTL_44 0xab
+#define AR_PHY_PLL_CTL_44_2133 0xeb
+#define AR_PHY_PLL_CTL_40_2133 0xea
+
+#define AR_PHY_SPECTRAL_SCAN 0x9910 /* AR9280 spectral scan configuration register */
+#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x1
+#define AR_PHY_SPECTRAL_SCAN_ENA 0x00000001 /* Enable spectral scan, reg 68, bit 0 */
+#define AR_PHY_SPECTRAL_SCAN_ENA_S 0 /* Enable spectral scan, reg 68, bit 0 */
+#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002 /* Activate spectral scan reg 68, bit 1*/
+#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1 /* Activate spectral scan reg 68, bit 1*/
+#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0 /* Interval for FFT reports, reg 68, bits 4-7*/
+#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4
+#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00 /* Interval for FFT reports, reg 68, bits 8-15*/
+#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8
+#define AR_PHY_SPECTRAL_SCAN_COUNT 0x00FF0000 /* Number of reports, reg 68, bits 16-23*/
+#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16
+#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000 /* Short repeat, reg 68, bit 24*/
+#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24 /* Short repeat, reg 68, bit 24*/
+
+#define AR_PHY_RX_DELAY 0x9914
+#define AR_PHY_SEARCH_START_DELAY 0x9918
+#define AR_PHY_RX_DELAY_DELAY 0x00003FFF
+
+#define AR_PHY_TIMING_CTRL4(_i) (0x9920 + ((_i) << 12))
+#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF 0x01F
+#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF_S 0
+#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF 0x7E0
+#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF_S 5
+#define AR_PHY_TIMING_CTRL4_IQCORR_ENABLE 0x800
+#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX 0xF000
+#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX_S 12
+#define AR_PHY_TIMING_CTRL4_DO_CAL 0x10000
+
+#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI 0x80000000
+#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER 0x40000000
+#define AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK 0x20000000
+#define AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK 0x10000000
+
+#define AR_PHY_TIMING5 0x9924
+#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE
+#define AR_PHY_TIMING5_CYCPWR_THR1_S 1
+
+#define AR_PHY_POWER_TX_RATE1 0x9934
+#define AR_PHY_POWER_TX_RATE2 0x9938
+#define AR_PHY_POWER_TX_RATE_MAX 0x993c
+#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
+
+#define AR_PHY_FRAME_CTL 0x9944
+#define AR_PHY_FRAME_CTL_TX_CLIP 0x00000038
+#define AR_PHY_FRAME_CTL_TX_CLIP_S 3
+
+#define AR_PHY_TXPWRADJ 0x994C
+#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA 0x00000FC0
+#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA_S 6
+#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX 0x00FC0000
+#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX_S 18
+
+#define AR_PHY_RADAR_EXT 0x9940
+#define AR_PHY_RADAR_EXT_ENA 0x00004000
+
+#define AR_PHY_RADAR_0 0x9954
+#define AR_PHY_RADAR_0_ENA 0x00000001
+#define AR_PHY_RADAR_0_FFT_ENA 0x80000000
+#define AR_PHY_RADAR_0_INBAND 0x0000003e
+#define AR_PHY_RADAR_0_INBAND_S 1
+#define AR_PHY_RADAR_0_PRSSI 0x00000FC0
+#define AR_PHY_RADAR_0_PRSSI_S 6
+#define AR_PHY_RADAR_0_HEIGHT 0x0003F000
+#define AR_PHY_RADAR_0_HEIGHT_S 12
+#define AR_PHY_RADAR_0_RRSSI 0x00FC0000
+#define AR_PHY_RADAR_0_RRSSI_S 18
+#define AR_PHY_RADAR_0_FIRPWR 0x7F000000
+#define AR_PHY_RADAR_0_FIRPWR_S 24
+
+#define AR_PHY_RADAR_1 0x9958
+#define AR_PHY_RADAR_1_RELPWR_ENA 0x00800000
+#define AR_PHY_RADAR_1_USE_FIR128 0x00400000
+#define AR_PHY_RADAR_1_RELPWR_THRESH 0x003F0000
+#define AR_PHY_RADAR_1_RELPWR_THRESH_S 16
+#define AR_PHY_RADAR_1_BLOCK_CHECK 0x00008000
+#define AR_PHY_RADAR_1_MAX_RRSSI 0x00004000
+#define AR_PHY_RADAR_1_RELSTEP_CHECK 0x00002000
+#define AR_PHY_RADAR_1_RELSTEP_THRESH 0x00001F00
+#define AR_PHY_RADAR_1_RELSTEP_THRESH_S 8
+#define AR_PHY_RADAR_1_MAXLEN 0x000000FF
+#define AR_PHY_RADAR_1_MAXLEN_S 0
+
+#define AR_PHY_SWITCH_CHAIN_0 0x9960
+#define AR_PHY_SWITCH_COM 0x9964
+
+#define AR_PHY_SIGMA_DELTA 0x996C
+#define AR_PHY_SIGMA_DELTA_ADC_SEL 0x00000003
+#define AR_PHY_SIGMA_DELTA_ADC_SEL_S 0
+#define AR_PHY_SIGMA_DELTA_FILT2 0x000000F8
+#define AR_PHY_SIGMA_DELTA_FILT2_S 3
+#define AR_PHY_SIGMA_DELTA_FILT1 0x00001F00
+#define AR_PHY_SIGMA_DELTA_FILT1_S 8
+#define AR_PHY_SIGMA_DELTA_ADC_CLIP 0x01FFE000
+#define AR_PHY_SIGMA_DELTA_ADC_CLIP_S 13
+
+#define AR_PHY_RESTART 0x9970
+#define AR_PHY_RESTART_DIV_GC 0x001C0000
+#define AR_PHY_RESTART_DIV_GC_S 18
+
+#define AR_PHY_RFBUS_REQ 0x997C
+#define AR_PHY_RFBUS_REQ_EN 0x00000001
+
+#define AR_PHY_TIMING7 0x9980
+#define AR_PHY_TIMING8 0x9984
+#define AR_PHY_TIMING8_PILOT_MASK_2 0x000FFFFF
+#define AR_PHY_TIMING8_PILOT_MASK_2_S 0
+
+#define AR_PHY_BIN_MASK2_1 0x9988
+#define AR_PHY_BIN_MASK2_2 0x998c
+#define AR_PHY_BIN_MASK2_3 0x9990
+#define AR_PHY_BIN_MASK2_4 0x9994
+
+#define AR_PHY_BIN_MASK_1 0x9900
+#define AR_PHY_BIN_MASK_2 0x9904
+#define AR_PHY_BIN_MASK_3 0x9908
+
+#define AR_PHY_MASK_CTL 0x990c
+
+#define AR_PHY_BIN_MASK2_4_MASK_4 0x00003FFF
+#define AR_PHY_BIN_MASK2_4_MASK_4_S 0
+
+#define AR_PHY_TIMING9 0x9998
+#define AR_PHY_TIMING10 0x999c
+#define AR_PHY_TIMING10_PILOT_MASK_2 0x000FFFFF
+#define AR_PHY_TIMING10_PILOT_MASK_2_S 0
+
+#define AR_PHY_TIMING11 0x99a0
+#define AR_PHY_TIMING11_SPUR_DELTA_PHASE 0x000FFFFF
+#define AR_PHY_TIMING11_SPUR_DELTA_PHASE_S 0
+#define AR_PHY_TIMING11_USE_SPUR_IN_AGC 0x40000000
+#define AR_PHY_TIMING11_USE_SPUR_IN_SELFCOR 0x80000000
+
+#define AR_PHY_RX_CHAINMASK 0x99a4
+#define AR_PHY_NEW_ADC_DC_GAIN_CORR(_i) (0x99b4 + ((_i) << 12))
+#define AR_PHY_NEW_ADC_GAIN_CORR_ENABLE 0x40000000
+#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000
+
+#define AR_PHY_MULTICHAIN_GAIN_CTL 0x99ac
+#define AR_PHY_9285_ANT_DIV_CTL_ALL 0x7f000000
+#define AR_PHY_9285_ANT_DIV_CTL 0x01000000
+#define AR_PHY_9285_ANT_DIV_CTL_S 24
+#define AR_PHY_9285_ANT_DIV_ALT_LNACONF 0x06000000
+#define AR_PHY_9285_ANT_DIV_ALT_LNACONF_S 25
+#define AR_PHY_9285_ANT_DIV_MAIN_LNACONF 0x18000000
+#define AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S 27
+#define AR_PHY_9285_ANT_DIV_ALT_GAINTB 0x20000000
+#define AR_PHY_9285_ANT_DIV_ALT_GAINTB_S 29
+#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB 0x40000000
+#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB_S 30
+#define AR_PHY_9285_ANT_DIV_LNA1 2
+#define AR_PHY_9285_ANT_DIV_LNA2 1
+#define AR_PHY_9285_ANT_DIV_LNA1_PLUS_LNA2 3
+#define AR_PHY_9285_ANT_DIV_LNA1_MINUS_LNA2 0
+#define AR_PHY_9285_ANT_DIV_GAINTB_0 0
+#define AR_PHY_9285_ANT_DIV_GAINTB_1 1
+
+#define AR_PHY_EXT_CCA0 0x99b8
+#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
+#define AR_PHY_EXT_CCA0_THRESH62_S 0
+
+#define AR_PHY_EXT_CCA 0x99bc
+#define AR_PHY_EXT_CCA_CYCPWR_THR1 0x0000FE00
+#define AR_PHY_EXT_CCA_CYCPWR_THR1_S 9
+#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
+#define AR_PHY_EXT_CCA_THRESH62_S 16
+#define AR_PHY_EXT_MINCCA_PWR 0xFF800000
+#define AR_PHY_EXT_MINCCA_PWR_S 23
+#define AR9280_PHY_EXT_MINCCA_PWR 0x01FF0000
+#define AR9280_PHY_EXT_MINCCA_PWR_S 16
+
+#define AR_PHY_SFCORR_EXT 0x99c0
+#define AR_PHY_SFCORR_EXT_M1_THRESH 0x0000007F
+#define AR_PHY_SFCORR_EXT_M1_THRESH_S 0
+#define AR_PHY_SFCORR_EXT_M2_THRESH 0x00003F80
+#define AR_PHY_SFCORR_EXT_M2_THRESH_S 7
+#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW 0x001FC000
+#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW_S 14
+#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW 0x0FE00000
+#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW_S 21
+#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
+
+#define AR_PHY_HALFGI 0x99D0
+#define AR_PHY_HALFGI_DSC_MAN 0x0007FFF0
+#define AR_PHY_HALFGI_DSC_MAN_S 4
+#define AR_PHY_HALFGI_DSC_EXP 0x0000000F
+#define AR_PHY_HALFGI_DSC_EXP_S 0
+
+#define AR_PHY_CHAN_INFO_MEMORY 0x99DC
+#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
+
+#define AR_PHY_HEAVY_CLIP_ENABLE 0x99E0
+
+#define AR_PHY_HEAVY_CLIP_FACTOR_RIFS 0x99EC
+#define AR_PHY_RIFS_INIT_DELAY 0x03ff0000
+
+#define AR_PHY_M_SLEEP 0x99f0
+#define AR_PHY_REFCLKDLY 0x99f4
+#define AR_PHY_REFCLKPD 0x99f8
+
+#define AR_PHY_CALMODE 0x99f0
+
+#define AR_PHY_CALMODE_IQ 0x00000000
+#define AR_PHY_CALMODE_ADC_GAIN 0x00000001
+#define AR_PHY_CALMODE_ADC_DC_PER 0x00000002
+#define AR_PHY_CALMODE_ADC_DC_INIT 0x00000003
+
+#define AR_PHY_CAL_MEAS_0(_i) (0x9c10 + ((_i) << 12))
+#define AR_PHY_CAL_MEAS_1(_i) (0x9c14 + ((_i) << 12))
+#define AR_PHY_CAL_MEAS_2(_i) (0x9c18 + ((_i) << 12))
+#define AR_PHY_CAL_MEAS_3(_i) (0x9c1c + ((_i) << 12))
+
+#define AR_PHY_CURRENT_RSSI 0x9c1c
+#define AR9280_PHY_CURRENT_RSSI 0x9c3c
+
+#define AR_PHY_RFBUS_GRANT 0x9C20
+#define AR_PHY_RFBUS_GRANT_EN 0x00000001
+
+#define AR_PHY_CHAN_INFO_GAIN_DIFF 0x9CF4
+#define AR_PHY_CHAN_INFO_GAIN_DIFF_UPPER_LIMIT 320
+
+#define AR_PHY_CHAN_INFO_GAIN 0x9CFC
+
+#define AR_PHY_MODE 0xA200
+#define AR_PHY_MODE_ASYNCFIFO 0x80
+#define AR_PHY_MODE_AR2133 0x08
+#define AR_PHY_MODE_AR5111 0x00
+#define AR_PHY_MODE_AR5112 0x08
+#define AR_PHY_MODE_DYNAMIC 0x04
+#define AR_PHY_MODE_RF2GHZ 0x02
+#define AR_PHY_MODE_RF5GHZ 0x00
+#define AR_PHY_MODE_CCK 0x01
+#define AR_PHY_MODE_OFDM 0x00
+#define AR_PHY_MODE_DYN_CCK_DISABLE 0x100
+
+#define AR_PHY_CCK_TX_CTRL 0xA204
+#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010
+#define AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK 0x0000000C
+#define AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK_S 2
+
+#define AR_PHY_CCK_DETECT 0xA208
+#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F
+#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
+/* [12:6] settling time for antenna switch */
+#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0
+#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6
+#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000
+#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV_S 13
+
+#define AR_PHY_GAIN_2GHZ 0xA20C
+#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN 0x00FC0000
+#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN_S 18
+#define AR_PHY_GAIN_2GHZ_BSW_MARGIN 0x00003C00
+#define AR_PHY_GAIN_2GHZ_BSW_MARGIN_S 10
+#define AR_PHY_GAIN_2GHZ_BSW_ATTEN 0x0000001F
+#define AR_PHY_GAIN_2GHZ_BSW_ATTEN_S 0
+
+#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN 0x003E0000
+#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN_S 17
+#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN 0x0001F000
+#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN_S 12
+#define AR_PHY_GAIN_2GHZ_XATTEN2_DB 0x00000FC0
+#define AR_PHY_GAIN_2GHZ_XATTEN2_DB_S 6
+#define AR_PHY_GAIN_2GHZ_XATTEN1_DB 0x0000003F
+#define AR_PHY_GAIN_2GHZ_XATTEN1_DB_S 0
+
+#define AR_PHY_CCK_RXCTRL4 0xA21C
+#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT 0x01F80000
+#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT_S 19
+
+#define AR_PHY_DAG_CTRLCCK 0xA228
+#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR 0x00000200
+#define AR_PHY_DAG_CTRLCCK_RSSI_THR 0x0001FC00
+#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10
+
+#define AR_PHY_FORCE_CLKEN_CCK 0xA22C
+#define AR_PHY_FORCE_CLKEN_CCK_MRC_MUX 0x00000040
+
+#define AR_PHY_POWER_TX_RATE3 0xA234
+#define AR_PHY_POWER_TX_RATE4 0xA238
+
+#define AR_PHY_SCRM_SEQ_XR 0xA23C
+#define AR_PHY_HEADER_DETECT_XR 0xA240
+#define AR_PHY_CHIRP_DETECTED_XR 0xA244
+#define AR_PHY_BLUETOOTH 0xA254
+
+#define AR_PHY_TPCRG1 0xA258
+#define AR_PHY_TPCRG1_NUM_PD_GAIN 0x0000c000
+#define AR_PHY_TPCRG1_NUM_PD_GAIN_S 14
+
+#define AR_PHY_TPCRG1_PD_GAIN_1 0x00030000
+#define AR_PHY_TPCRG1_PD_GAIN_1_S 16
+#define AR_PHY_TPCRG1_PD_GAIN_2 0x000C0000
+#define AR_PHY_TPCRG1_PD_GAIN_2_S 18
+#define AR_PHY_TPCRG1_PD_GAIN_3 0x00300000
+#define AR_PHY_TPCRG1_PD_GAIN_3_S 20
+
+#define AR_PHY_TPCRG1_PD_CAL_ENABLE 0x00400000
+#define AR_PHY_TPCRG1_PD_CAL_ENABLE_S 22
+
+#define AR_PHY_TX_PWRCTRL4 0xa264
+#define AR_PHY_TX_PWRCTRL_PD_AVG_VALID 0x00000001
+#define AR_PHY_TX_PWRCTRL_PD_AVG_VALID_S 0
+#define AR_PHY_TX_PWRCTRL_PD_AVG_OUT 0x000001FE
+#define AR_PHY_TX_PWRCTRL_PD_AVG_OUT_S 1
+
+#define AR_PHY_TX_PWRCTRL6_0 0xa270
+#define AR_PHY_TX_PWRCTRL6_1 0xb270
+#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE 0x03000000
+#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE_S 24
+
+#define AR_PHY_TX_PWRCTRL7 0xa274
+#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN 0x01F80000
+#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN_S 19
+
+#define AR_PHY_TX_PWRCTRL9 0xa27C
+#define AR_PHY_TX_DESIRED_SCALE_CCK 0x00007C00
+#define AR_PHY_TX_DESIRED_SCALE_CCK_S 10
+#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL 0x80000000
+#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL_S 31
+
+#define AR_PHY_TX_GAIN_TBL1 0xa300
+#define AR_PHY_TX_GAIN 0x0007F000
+#define AR_PHY_TX_GAIN_S 12
+
+#define AR_PHY_CH0_TX_PWRCTRL11 0xa398
+#define AR_PHY_CH1_TX_PWRCTRL11 0xb398
+#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP 0x0000FC00
+#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP_S 10
+
+#define AR_PHY_VIT_MASK2_M_46_61 0xa3a0
+#define AR_PHY_MASK2_M_31_45 0xa3a4
+#define AR_PHY_MASK2_M_16_30 0xa3a8
+#define AR_PHY_MASK2_M_00_15 0xa3ac
+#define AR_PHY_MASK2_P_15_01 0xa3b8
+#define AR_PHY_MASK2_P_30_16 0xa3bc
+#define AR_PHY_MASK2_P_45_31 0xa3c0
+#define AR_PHY_MASK2_P_61_45 0xa3c4
+#define AR_PHY_SPUR_REG 0x994c
+
+#define AR_PHY_SPUR_REG_MASK_RATE_CNTL (0xFF << 18)
+#define AR_PHY_SPUR_REG_MASK_RATE_CNTL_S 18
+
+#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM 0x20000
+#define AR_PHY_SPUR_REG_MASK_RATE_SELECT (0xFF << 9)
+#define AR_PHY_SPUR_REG_MASK_RATE_SELECT_S 9
+#define AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI 0x100
+#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH 0x7F
+#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH_S 0
+
+#define AR_PHY_PILOT_MASK_01_30 0xa3b0
+#define AR_PHY_PILOT_MASK_31_60 0xa3b4
+
+#define AR_PHY_CHANNEL_MASK_01_30 0x99d4
+#define AR_PHY_CHANNEL_MASK_31_60 0x99d8
+
+#define AR_PHY_ANALOG_SWAP 0xa268
+#define AR_PHY_SWAP_ALT_CHAIN 0x00000040
+
+#define AR_PHY_TPCRG5 0xA26C
+#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP 0x0000000F
+#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP_S 0
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1 0x000003F0
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1_S 4
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2 0x0000FC00
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2_S 10
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3 0x003F0000
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3_S 16
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4 0x0FC00000
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4_S 22
+
+/* Carrier leak calibration control, do it after AGC calibration */
+#define AR_PHY_CL_CAL_CTL 0xA358
+#define AR_PHY_CL_CAL_ENABLE 0x00000002
+#define AR_PHY_PARALLEL_CAL_ENABLE 0x00000001
+
+#define AR_PHY_POWER_TX_RATE5 0xA38C
+#define AR_PHY_POWER_TX_RATE6 0xA390
+
+#define AR_PHY_CAL_CHAINMASK 0xA39C
+
+#define AR_PHY_POWER_TX_SUB 0xA3C8
+#define AR_PHY_POWER_TX_RATE7 0xA3CC
+#define AR_PHY_POWER_TX_RATE8 0xA3D0
+#define AR_PHY_POWER_TX_RATE9 0xA3D4
+
+#define AR_PHY_XPA_CFG 0xA3D8
+#define AR_PHY_FORCE_XPA_CFG 0x000000001
+#define AR_PHY_FORCE_XPA_CFG_S 0
+
+#define AR_PHY_CH1_CCA 0xa864
+#define AR_PHY_CH1_MINCCA_PWR 0x0FF80000
+#define AR_PHY_CH1_MINCCA_PWR_S 19
+#define AR9280_PHY_CH1_MINCCA_PWR 0x1FF00000
+#define AR9280_PHY_CH1_MINCCA_PWR_S 20
+
+#define AR_PHY_CH2_CCA 0xb864
+#define AR_PHY_CH2_MINCCA_PWR 0x0FF80000
+#define AR_PHY_CH2_MINCCA_PWR_S 19
+
+#define AR_PHY_CH1_EXT_CCA 0xa9bc
+#define AR_PHY_CH1_EXT_MINCCA_PWR 0xFF800000
+#define AR_PHY_CH1_EXT_MINCCA_PWR_S 23
+#define AR9280_PHY_CH1_EXT_MINCCA_PWR 0x01FF0000
+#define AR9280_PHY_CH1_EXT_MINCCA_PWR_S 16
+
+#define AR_PHY_CH2_EXT_CCA 0xb9bc
+#define AR_PHY_CH2_EXT_MINCCA_PWR 0xFF800000
+#define AR_PHY_CH2_EXT_MINCCA_PWR_S 23
+
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
new file mode 100644
index 00000000000..56a9e5fa6d6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -0,0 +1,802 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include "ar9003_phy.h"
+
+static void ar9003_hw_setup_calibration(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ /* Select calibration to run */
+ switch (currCal->calData->calType) {
+ case IQ_MISMATCH_CAL:
+ /*
+ * Start calibration with
+ * 2^(INIT_IQCAL_LOG_COUNT_MAX+1) samples
+ */
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_IQCAL_LOG_COUNT_MAX,
+ currCal->calData->calCountMax);
+ REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "starting IQ Mismatch Calibration\n");
+
+ /* Kick-off cal */
+ REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL);
+ break;
+ case TEMP_COMP_CAL:
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_THERM,
+ AR_PHY_65NM_CH0_THERM_LOCAL, 1);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_THERM,
+ AR_PHY_65NM_CH0_THERM_START, 1);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "starting Temperature Compensation Calibration\n");
+ break;
+ case ADC_DC_INIT_CAL:
+ case ADC_GAIN_CAL:
+ case ADC_DC_CAL:
+ /* Not yet */
+ break;
+ }
+}
+
+/*
+ * Generic calibration routine.
+ * Recalibrate the lower PHY chips to account for temperature/environment
+ * changes.
+ */
+static bool ar9003_hw_per_calibration(struct ath_hw *ah,
+ struct ath9k_channel *ichan,
+ u8 rxchainmask,
+ struct ath9k_cal_list *currCal)
+{
+ /* Cal is assumed not done until explicitly set below */
+ bool iscaldone = false;
+
+ /* Calibration in progress. */
+ if (currCal->calState == CAL_RUNNING) {
+ /* Check to see if it has finished. */
+ if (!(REG_READ(ah, AR_PHY_TIMING4) & AR_PHY_TIMING4_DO_CAL)) {
+ /*
+ * Accumulate cal measures for active chains
+ */
+ currCal->calData->calCollect(ah);
+ ah->cal_samples++;
+
+ if (ah->cal_samples >=
+ currCal->calData->calNumSamples) {
+ unsigned int i, numChains = 0;
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (rxchainmask & (1 << i))
+ numChains++;
+ }
+
+ /*
+ * Process accumulated data
+ */
+ currCal->calData->calPostProc(ah, numChains);
+
+ /* Calibration has finished. */
+ ichan->CalValid |= currCal->calData->calType;
+ currCal->calState = CAL_DONE;
+ iscaldone = true;
+ } else {
+ /*
+ * Set-up collection of another sub-sample until we
+ * get desired number
+ */
+ ar9003_hw_setup_calibration(ah, currCal);
+ }
+ }
+ } else if (!(ichan->CalValid & currCal->calData->calType)) {
+ /* If current cal is marked invalid in channel, kick it off */
+ ath9k_hw_reset_calibration(ah, currCal);
+ }
+
+ return iscaldone;
+}
+
+static bool ar9003_hw_calibrate(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 rxchainmask,
+ bool longcal)
+{
+ bool iscaldone = true;
+ struct ath9k_cal_list *currCal = ah->cal_list_curr;
+
+ /*
+ * For given calibration:
+ * 1. Call generic cal routine
+ * 2. When this cal is done (isCalDone) if we have more cals waiting
+ * (eg after reset), mask this to upper layers by not propagating
+ * isCalDone if it is set to TRUE.
+ * Instead, change isCalDone to FALSE and setup the waiting cal(s)
+ * to be run.
+ */
+ if (currCal &&
+ (currCal->calState == CAL_RUNNING ||
+ currCal->calState == CAL_WAITING)) {
+ iscaldone = ar9003_hw_per_calibration(ah, chan,
+ rxchainmask, currCal);
+ if (iscaldone) {
+ ah->cal_list_curr = currCal = currCal->calNext;
+
+ if (currCal->calState == CAL_WAITING) {
+ iscaldone = false;
+ ath9k_hw_reset_calibration(ah, currCal);
+ }
+ }
+ }
+
+ /* Do NF cal only at longer intervals */
+ if (longcal) {
+ /*
+ * Load the NF from history buffer of the current channel.
+ * NF is slow time-variant, so it is OK to use a historical
+ * value.
+ */
+ ath9k_hw_loadnf(ah, ah->curchan);
+
+ /* start NF calibration, without updating BB NF register */
+ ath9k_hw_start_nfcal(ah);
+ }
+
+ return iscaldone;
+}
+
+static void ar9003_hw_iqcal_collect(struct ath_hw *ah)
+{
+ int i;
+
+ /* Accumulate IQ cal measures for active chains */
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ ah->totalPowerMeasI[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
+ ah->totalPowerMeasQ[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
+ ah->totalIqCorrMeas[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
+ ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
+ ah->cal_samples, i, ah->totalPowerMeasI[i],
+ ah->totalPowerMeasQ[i],
+ ah->totalIqCorrMeas[i]);
+ }
+}
+
+static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 powerMeasQ, powerMeasI, iqCorrMeas;
+ u32 qCoffDenom, iCoffDenom;
+ int32_t qCoff, iCoff;
+ int iqCorrNeg, i;
+ const u_int32_t offset_array[3] = {
+ AR_PHY_RX_IQCAL_CORR_B0,
+ AR_PHY_RX_IQCAL_CORR_B1,
+ AR_PHY_RX_IQCAL_CORR_B2,
+ };
+
+ for (i = 0; i < numChains; i++) {
+ powerMeasI = ah->totalPowerMeasI[i];
+ powerMeasQ = ah->totalPowerMeasQ[i];
+ iqCorrMeas = ah->totalIqCorrMeas[i];
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Starting IQ Cal and Correction for Chain %d\n",
+ i);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Orignal: Chn %diq_corr_meas = 0x%08x\n",
+ i, ah->totalIqCorrMeas[i]);
+
+ iqCorrNeg = 0;
+
+ if (iqCorrMeas > 0x80000000) {
+ iqCorrMeas = (0xffffffff - iqCorrMeas) + 1;
+ iqCorrNeg = 1;
+ }
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
+ ath_print(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
+ iqCorrNeg);
+
+ iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 256;
+ qCoffDenom = powerMeasQ / 64;
+
+ if ((iCoffDenom != 0) && (qCoffDenom != 0)) {
+ iCoff = iqCorrMeas / iCoffDenom;
+ qCoff = powerMeasI / qCoffDenom - 64;
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d iCoff = 0x%08x\n", i, iCoff);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d qCoff = 0x%08x\n", i, qCoff);
+
+ /* Force bounds on iCoff */
+ if (iCoff >= 63)
+ iCoff = 63;
+ else if (iCoff <= -63)
+ iCoff = -63;
+
+ /* Negate iCoff if iqCorrNeg == 0 */
+ if (iqCorrNeg == 0x0)
+ iCoff = -iCoff;
+
+ /* Force bounds on qCoff */
+ if (qCoff >= 63)
+ qCoff = 63;
+ else if (qCoff <= -63)
+ qCoff = -63;
+
+ iCoff = iCoff & 0x7f;
+ qCoff = qCoff & 0x7f;
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
+ i, iCoff, qCoff);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Register offset (0x%04x) "
+ "before update = 0x%x\n",
+ offset_array[i],
+ REG_READ(ah, offset_array[i]));
+
+ REG_RMW_FIELD(ah, offset_array[i],
+ AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF,
+ iCoff);
+ REG_RMW_FIELD(ah, offset_array[i],
+ AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF,
+ qCoff);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Register offset (0x%04x) QI COFF "
+ "(bitfields 0x%08x) after update = 0x%x\n",
+ offset_array[i],
+ AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF,
+ REG_READ(ah, offset_array[i]));
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Register offset (0x%04x) QQ COFF "
+ "(bitfields 0x%08x) after update = 0x%x\n",
+ offset_array[i],
+ AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF,
+ REG_READ(ah, offset_array[i]));
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "IQ Cal and Correction done for Chain %d\n",
+ i);
+ }
+ }
+
+ REG_SET_BIT(ah, AR_PHY_RX_IQCAL_CORR_B0,
+ AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "IQ Cal and Correction (offset 0x%04x) enabled "
+ "(bit position 0x%08x). New Value 0x%08x\n",
+ (unsigned) (AR_PHY_RX_IQCAL_CORR_B0),
+ AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE,
+ REG_READ(ah, AR_PHY_RX_IQCAL_CORR_B0));
+}
+
+static const struct ath9k_percal_data iq_cal_single_sample = {
+ IQ_MISMATCH_CAL,
+ MIN_CAL_SAMPLES,
+ PER_MAX_LOG_COUNT,
+ ar9003_hw_iqcal_collect,
+ ar9003_hw_iqcalibrate
+};
+
+static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
+{
+ ah->iq_caldata.calData = &iq_cal_single_sample;
+ ah->supp_cals = IQ_MISMATCH_CAL;
+}
+
+static bool ar9003_hw_iscal_supported(struct ath_hw *ah,
+ enum ath9k_cal_types calType)
+{
+ switch (calType & ah->supp_cals) {
+ case IQ_MISMATCH_CAL:
+ /*
+ * XXX: Run IQ Mismatch for non-CCK only
+ * Note that CHANNEL_B is never set though.
+ */
+ return true;
+ case ADC_GAIN_CAL:
+ case ADC_DC_CAL:
+ return false;
+ case TEMP_COMP_CAL:
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * solve 4x4 linear equation used in loopback iq cal.
+ */
+static bool ar9003_hw_solve_iq_cal(struct ath_hw *ah,
+ s32 sin_2phi_1,
+ s32 cos_2phi_1,
+ s32 sin_2phi_2,
+ s32 cos_2phi_2,
+ s32 mag_a0_d0,
+ s32 phs_a0_d0,
+ s32 mag_a1_d0,
+ s32 phs_a1_d0,
+ s32 solved_eq[])
+{
+ s32 f1 = cos_2phi_1 - cos_2phi_2,
+ f3 = sin_2phi_1 - sin_2phi_2,
+ f2;
+ s32 mag_tx, phs_tx, mag_rx, phs_rx;
+ const s32 result_shift = 1 << 15;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ f2 = (f1 * f1 + f3 * f3) / result_shift;
+
+ if (!f2) {
+ ath_print(common, ATH_DBG_CALIBRATE, "Divide by 0\n");
+ return false;
+ }
+
+ /* mag mismatch, tx */
+ mag_tx = f1 * (mag_a0_d0 - mag_a1_d0) + f3 * (phs_a0_d0 - phs_a1_d0);
+ /* phs mismatch, tx */
+ phs_tx = f3 * (-mag_a0_d0 + mag_a1_d0) + f1 * (phs_a0_d0 - phs_a1_d0);
+
+ mag_tx = (mag_tx / f2);
+ phs_tx = (phs_tx / f2);
+
+ /* mag mismatch, rx */
+ mag_rx = mag_a0_d0 - (cos_2phi_1 * mag_tx + sin_2phi_1 * phs_tx) /
+ result_shift;
+ /* phs mismatch, rx */
+ phs_rx = phs_a0_d0 + (sin_2phi_1 * mag_tx - cos_2phi_1 * phs_tx) /
+ result_shift;
+
+ solved_eq[0] = mag_tx;
+ solved_eq[1] = phs_tx;
+ solved_eq[2] = mag_rx;
+ solved_eq[3] = phs_rx;
+
+ return true;
+}
+
+static s32 ar9003_hw_find_mag_approx(struct ath_hw *ah, s32 in_re, s32 in_im)
+{
+ s32 abs_i = abs(in_re),
+ abs_q = abs(in_im),
+ max_abs, min_abs;
+
+ if (abs_i > abs_q) {
+ max_abs = abs_i;
+ min_abs = abs_q;
+ } else {
+ max_abs = abs_q;
+ min_abs = abs_i;
+ }
+
+ return max_abs - (max_abs / 32) + (min_abs / 8) + (min_abs / 4);
+}
+
+#define DELPT 32
+
+static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
+ s32 chain_idx,
+ const s32 iq_res[],
+ s32 iqc_coeff[])
+{
+ s32 i2_m_q2_a0_d0, i2_p_q2_a0_d0, iq_corr_a0_d0,
+ i2_m_q2_a0_d1, i2_p_q2_a0_d1, iq_corr_a0_d1,
+ i2_m_q2_a1_d0, i2_p_q2_a1_d0, iq_corr_a1_d0,
+ i2_m_q2_a1_d1, i2_p_q2_a1_d1, iq_corr_a1_d1;
+ s32 mag_a0_d0, mag_a1_d0, mag_a0_d1, mag_a1_d1,
+ phs_a0_d0, phs_a1_d0, phs_a0_d1, phs_a1_d1,
+ sin_2phi_1, cos_2phi_1,
+ sin_2phi_2, cos_2phi_2;
+ s32 mag_tx, phs_tx, mag_rx, phs_rx;
+ s32 solved_eq[4], mag_corr_tx, phs_corr_tx, mag_corr_rx, phs_corr_rx,
+ q_q_coff, q_i_coff;
+ const s32 res_scale = 1 << 15;
+ const s32 delpt_shift = 1 << 8;
+ s32 mag1, mag2;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ i2_m_q2_a0_d0 = iq_res[0] & 0xfff;
+ i2_p_q2_a0_d0 = (iq_res[0] >> 12) & 0xfff;
+ iq_corr_a0_d0 = ((iq_res[0] >> 24) & 0xff) + ((iq_res[1] & 0xf) << 8);
+
+ if (i2_m_q2_a0_d0 > 0x800)
+ i2_m_q2_a0_d0 = -((0xfff - i2_m_q2_a0_d0) + 1);
+
+ if (i2_p_q2_a0_d0 > 0x800)
+ i2_p_q2_a0_d0 = -((0xfff - i2_p_q2_a0_d0) + 1);
+
+ if (iq_corr_a0_d0 > 0x800)
+ iq_corr_a0_d0 = -((0xfff - iq_corr_a0_d0) + 1);
+
+ i2_m_q2_a0_d1 = (iq_res[1] >> 4) & 0xfff;
+ i2_p_q2_a0_d1 = (iq_res[2] & 0xfff);
+ iq_corr_a0_d1 = (iq_res[2] >> 12) & 0xfff;
+
+ if (i2_m_q2_a0_d1 > 0x800)
+ i2_m_q2_a0_d1 = -((0xfff - i2_m_q2_a0_d1) + 1);
+
+ if (i2_p_q2_a0_d1 > 0x800)
+ i2_p_q2_a0_d1 = -((0xfff - i2_p_q2_a0_d1) + 1);
+
+ if (iq_corr_a0_d1 > 0x800)
+ iq_corr_a0_d1 = -((0xfff - iq_corr_a0_d1) + 1);
+
+ i2_m_q2_a1_d0 = ((iq_res[2] >> 24) & 0xff) + ((iq_res[3] & 0xf) << 8);
+ i2_p_q2_a1_d0 = (iq_res[3] >> 4) & 0xfff;
+ iq_corr_a1_d0 = iq_res[4] & 0xfff;
+
+ if (i2_m_q2_a1_d0 > 0x800)
+ i2_m_q2_a1_d0 = -((0xfff - i2_m_q2_a1_d0) + 1);
+
+ if (i2_p_q2_a1_d0 > 0x800)
+ i2_p_q2_a1_d0 = -((0xfff - i2_p_q2_a1_d0) + 1);
+
+ if (iq_corr_a1_d0 > 0x800)
+ iq_corr_a1_d0 = -((0xfff - iq_corr_a1_d0) + 1);
+
+ i2_m_q2_a1_d1 = (iq_res[4] >> 12) & 0xfff;
+ i2_p_q2_a1_d1 = ((iq_res[4] >> 24) & 0xff) + ((iq_res[5] & 0xf) << 8);
+ iq_corr_a1_d1 = (iq_res[5] >> 4) & 0xfff;
+
+ if (i2_m_q2_a1_d1 > 0x800)
+ i2_m_q2_a1_d1 = -((0xfff - i2_m_q2_a1_d1) + 1);
+
+ if (i2_p_q2_a1_d1 > 0x800)
+ i2_p_q2_a1_d1 = -((0xfff - i2_p_q2_a1_d1) + 1);
+
+ if (iq_corr_a1_d1 > 0x800)
+ iq_corr_a1_d1 = -((0xfff - iq_corr_a1_d1) + 1);
+
+ if ((i2_p_q2_a0_d0 == 0) || (i2_p_q2_a0_d1 == 0) ||
+ (i2_p_q2_a1_d0 == 0) || (i2_p_q2_a1_d1 == 0)) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Divide by 0:\na0_d0=%d\n"
+ "a0_d1=%d\na2_d0=%d\na1_d1=%d\n",
+ i2_p_q2_a0_d0, i2_p_q2_a0_d1,
+ i2_p_q2_a1_d0, i2_p_q2_a1_d1);
+ return false;
+ }
+
+ mag_a0_d0 = (i2_m_q2_a0_d0 * res_scale) / i2_p_q2_a0_d0;
+ phs_a0_d0 = (iq_corr_a0_d0 * res_scale) / i2_p_q2_a0_d0;
+
+ mag_a0_d1 = (i2_m_q2_a0_d1 * res_scale) / i2_p_q2_a0_d1;
+ phs_a0_d1 = (iq_corr_a0_d1 * res_scale) / i2_p_q2_a0_d1;
+
+ mag_a1_d0 = (i2_m_q2_a1_d0 * res_scale) / i2_p_q2_a1_d0;
+ phs_a1_d0 = (iq_corr_a1_d0 * res_scale) / i2_p_q2_a1_d0;
+
+ mag_a1_d1 = (i2_m_q2_a1_d1 * res_scale) / i2_p_q2_a1_d1;
+ phs_a1_d1 = (iq_corr_a1_d1 * res_scale) / i2_p_q2_a1_d1;
+
+ /* w/o analog phase shift */
+ sin_2phi_1 = (((mag_a0_d0 - mag_a0_d1) * delpt_shift) / DELPT);
+ /* w/o analog phase shift */
+ cos_2phi_1 = (((phs_a0_d1 - phs_a0_d0) * delpt_shift) / DELPT);
+ /* w/ analog phase shift */
+ sin_2phi_2 = (((mag_a1_d0 - mag_a1_d1) * delpt_shift) / DELPT);
+ /* w/ analog phase shift */
+ cos_2phi_2 = (((phs_a1_d1 - phs_a1_d0) * delpt_shift) / DELPT);
+
+ /*
+ * force sin^2 + cos^2 = 1;
+ * find magnitude by approximation
+ */
+ mag1 = ar9003_hw_find_mag_approx(ah, cos_2phi_1, sin_2phi_1);
+ mag2 = ar9003_hw_find_mag_approx(ah, cos_2phi_2, sin_2phi_2);
+
+ if ((mag1 == 0) || (mag2 == 0)) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Divide by 0: mag1=%d, mag2=%d\n",
+ mag1, mag2);
+ return false;
+ }
+
+ /* normalization sin and cos by mag */
+ sin_2phi_1 = (sin_2phi_1 * res_scale / mag1);
+ cos_2phi_1 = (cos_2phi_1 * res_scale / mag1);
+ sin_2phi_2 = (sin_2phi_2 * res_scale / mag2);
+ cos_2phi_2 = (cos_2phi_2 * res_scale / mag2);
+
+ /* calculate IQ mismatch */
+ if (!ar9003_hw_solve_iq_cal(ah,
+ sin_2phi_1, cos_2phi_1,
+ sin_2phi_2, cos_2phi_2,
+ mag_a0_d0, phs_a0_d0,
+ mag_a1_d0,
+ phs_a1_d0, solved_eq)) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Call to ar9003_hw_solve_iq_cal() failed.\n");
+ return false;
+ }
+
+ mag_tx = solved_eq[0];
+ phs_tx = solved_eq[1];
+ mag_rx = solved_eq[2];
+ phs_rx = solved_eq[3];
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "chain %d: mag mismatch=%d phase mismatch=%d\n",
+ chain_idx, mag_tx/res_scale, phs_tx/res_scale);
+
+ if (res_scale == mag_tx) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Divide by 0: mag_tx=%d, res_scale=%d\n",
+ mag_tx, res_scale);
+ return false;
+ }
+
+ /* calculate and quantize Tx IQ correction factor */
+ mag_corr_tx = (mag_tx * res_scale) / (res_scale - mag_tx);
+ phs_corr_tx = -phs_tx;
+
+ q_q_coff = (mag_corr_tx * 128 / res_scale);
+ q_i_coff = (phs_corr_tx * 256 / res_scale);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "tx chain %d: mag corr=%d phase corr=%d\n",
+ chain_idx, q_q_coff, q_i_coff);
+
+ if (q_i_coff < -63)
+ q_i_coff = -63;
+ if (q_i_coff > 63)
+ q_i_coff = 63;
+ if (q_q_coff < -63)
+ q_q_coff = -63;
+ if (q_q_coff > 63)
+ q_q_coff = 63;
+
+ iqc_coeff[0] = (q_q_coff * 128) + q_i_coff;
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "tx chain %d: iq corr coeff=%x\n",
+ chain_idx, iqc_coeff[0]);
+
+ if (-mag_rx == res_scale) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Divide by 0: mag_rx=%d, res_scale=%d\n",
+ mag_rx, res_scale);
+ return false;
+ }
+
+ /* calculate and quantize Rx IQ correction factors */
+ mag_corr_rx = (-mag_rx * res_scale) / (res_scale + mag_rx);
+ phs_corr_rx = -phs_rx;
+
+ q_q_coff = (mag_corr_rx * 128 / res_scale);
+ q_i_coff = (phs_corr_rx * 256 / res_scale);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "rx chain %d: mag corr=%d phase corr=%d\n",
+ chain_idx, q_q_coff, q_i_coff);
+
+ if (q_i_coff < -63)
+ q_i_coff = -63;
+ if (q_i_coff > 63)
+ q_i_coff = 63;
+ if (q_q_coff < -63)
+ q_q_coff = -63;
+ if (q_q_coff > 63)
+ q_q_coff = 63;
+
+ iqc_coeff[1] = (q_q_coff * 128) + q_i_coff;
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "rx chain %d: iq corr coeff=%x\n",
+ chain_idx, iqc_coeff[1]);
+
+ return true;
+}
+
+static void ar9003_hw_tx_iq_cal(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ const u32 txiqcal_status[AR9300_MAX_CHAINS] = {
+ AR_PHY_TX_IQCAL_STATUS_B0,
+ AR_PHY_TX_IQCAL_STATUS_B1,
+ AR_PHY_TX_IQCAL_STATUS_B2,
+ };
+ const u32 tx_corr_coeff[AR9300_MAX_CHAINS] = {
+ AR_PHY_TX_IQCAL_CORR_COEFF_01_B0,
+ AR_PHY_TX_IQCAL_CORR_COEFF_01_B1,
+ AR_PHY_TX_IQCAL_CORR_COEFF_01_B2,
+ };
+ const u32 rx_corr[AR9300_MAX_CHAINS] = {
+ AR_PHY_RX_IQCAL_CORR_B0,
+ AR_PHY_RX_IQCAL_CORR_B1,
+ AR_PHY_RX_IQCAL_CORR_B2,
+ };
+ const u_int32_t chan_info_tab[] = {
+ AR_PHY_CHAN_INFO_TAB_0,
+ AR_PHY_CHAN_INFO_TAB_1,
+ AR_PHY_CHAN_INFO_TAB_2,
+ };
+ s32 iq_res[6];
+ s32 iqc_coeff[2];
+ s32 i, j;
+ u32 num_chains = 0;
+
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (ah->txchainmask & (1 << i))
+ num_chains++;
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
+ AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT,
+ DELPT);
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_START,
+ AR_PHY_TX_IQCAL_START_DO_CAL,
+ AR_PHY_TX_IQCAL_START_DO_CAL);
+
+ if (!ath9k_hw_wait(ah, AR_PHY_TX_IQCAL_START,
+ AR_PHY_TX_IQCAL_START_DO_CAL,
+ 0, AH_WAIT_TIMEOUT)) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Tx IQ Cal not complete.\n");
+ goto TX_IQ_CAL_FAILED;
+ }
+
+ for (i = 0; i < num_chains; i++) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Doing Tx IQ Cal for chain %d.\n", i);
+
+ if (REG_READ(ah, txiqcal_status[i]) &
+ AR_PHY_TX_IQCAL_STATUS_FAILED) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Tx IQ Cal failed for chain %d.\n", i);
+ goto TX_IQ_CAL_FAILED;
+ }
+
+ for (j = 0; j < 3; j++) {
+ u_int8_t idx = 2 * j,
+ offset = 4 * j;
+
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_INFO_MEMORY,
+ AR_PHY_CHAN_INFO_TAB_S2_READ, 0);
+
+ /* 32 bits */
+ iq_res[idx] = REG_READ(ah, chan_info_tab[i] + offset);
+
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_INFO_MEMORY,
+ AR_PHY_CHAN_INFO_TAB_S2_READ, 1);
+
+ /* 16 bits */
+ iq_res[idx+1] = 0xffff & REG_READ(ah,
+ chan_info_tab[i] +
+ offset);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "IQ RES[%d]=0x%x IQ_RES[%d]=0x%x\n",
+ idx, iq_res[idx], idx+1, iq_res[idx+1]);
+ }
+
+ if (!ar9003_hw_calc_iq_corr(ah, i, iq_res, iqc_coeff)) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Failed in calculation of IQ correction.\n");
+ goto TX_IQ_CAL_FAILED;
+ }
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "IQ_COEFF[0] = 0x%x IQ_COEFF[1] = 0x%x\n",
+ iqc_coeff[0], iqc_coeff[1]);
+
+ REG_RMW_FIELD(ah, tx_corr_coeff[i],
+ AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE,
+ iqc_coeff[0]);
+ REG_RMW_FIELD(ah, rx_corr[i],
+ AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_Q_COFF,
+ iqc_coeff[1] >> 7);
+ REG_RMW_FIELD(ah, rx_corr[i],
+ AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_I_COFF,
+ iqc_coeff[1]);
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_3,
+ AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_RX_IQCAL_CORR_B0,
+ AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1);
+
+ return;
+
+TX_IQ_CAL_FAILED:
+ ath_print(common, ATH_DBG_CALIBRATE, "Tx IQ Cal failed\n");
+}
+
+static bool ar9003_hw_init_cal(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ /*
+ * 0x7 = 0b111 , AR9003 needs to be configured for 3-chain mode before
+ * running AGC/TxIQ cals
+ */
+ ar9003_hw_set_chain_masks(ah, 0x7, 0x7);
+
+ /* Calibrate the AGC */
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) |
+ AR_PHY_AGC_CONTROL_CAL);
+
+ /* Poll for offset calibration complete */
+ if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT)) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "offset calibration failed to "
+ "complete in 1ms; noisy environment?\n");
+ return false;
+ }
+
+ /* Do Tx IQ Calibration */
+ if (ah->config.tx_iq_calibration)
+ ar9003_hw_tx_iq_cal(ah);
+
+ /* Revert chainmasks to their original values before NF cal */
+ ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
+
+ /* Initialize list pointers */
+ ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
+
+ if (ar9003_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) {
+ INIT_CAL(&ah->iq_caldata);
+ INSERT_CAL(ah, &ah->iq_caldata);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "enabling IQ Calibration.\n");
+ }
+
+ if (ar9003_hw_iscal_supported(ah, TEMP_COMP_CAL)) {
+ INIT_CAL(&ah->tempCompCalData);
+ INSERT_CAL(ah, &ah->tempCompCalData);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "enabling Temperature Compensation Calibration.\n");
+ }
+
+ /* Initialize current pointer to first element in list */
+ ah->cal_list_curr = ah->cal_list;
+
+ if (ah->cal_list_curr)
+ ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
+
+ chan->CalValid = 0;
+
+ return true;
+}
+
+void ar9003_hw_attach_calib_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ priv_ops->init_cal_settings = ar9003_hw_init_cal_settings;
+ priv_ops->init_cal = ar9003_hw_init_cal;
+ priv_ops->setup_calibration = ar9003_hw_setup_calibration;
+ priv_ops->iscal_supported = ar9003_hw_iscal_supported;
+
+ ops->calibrate = ar9003_hw_calibrate;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
new file mode 100644
index 00000000000..23eb60ea545
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -0,0 +1,1838 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "ar9003_phy.h"
+#include "ar9003_eeprom.h"
+
+#define COMP_HDR_LEN 4
+#define COMP_CKSUM_LEN 2
+
+#define AR_CH0_TOP (0x00016288)
+#define AR_CH0_TOP_XPABIASLVL (0x3)
+#define AR_CH0_TOP_XPABIASLVL_S (8)
+
+#define AR_CH0_THERM (0x00016290)
+#define AR_CH0_THERM_SPARE (0x3f)
+#define AR_CH0_THERM_SPARE_S (0)
+
+#define AR_SWITCH_TABLE_COM_ALL (0xffff)
+#define AR_SWITCH_TABLE_COM_ALL_S (0)
+
+#define AR_SWITCH_TABLE_COM2_ALL (0xffffff)
+#define AR_SWITCH_TABLE_COM2_ALL_S (0)
+
+#define AR_SWITCH_TABLE_ALL (0xfff)
+#define AR_SWITCH_TABLE_ALL_S (0)
+
+#define LE16(x) __constant_cpu_to_le16(x)
+#define LE32(x) __constant_cpu_to_le32(x)
+
+static const struct ar9300_eeprom ar9300_default = {
+ .eepromVersion = 2,
+ .templateVersion = 2,
+ .macAddr = {1, 2, 3, 4, 5, 6},
+ .custData = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .baseEepHeader = {
+ .regDmn = { LE16(0), LE16(0x1f) },
+ .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
+ .opCapFlags = {
+ .opFlags = AR9300_OPFLAGS_11G | AR9300_OPFLAGS_11A,
+ .eepMisc = 0,
+ },
+ .rfSilent = 0,
+ .blueToothOptions = 0,
+ .deviceCap = 0,
+ .deviceType = 5, /* takes lower byte in eeprom location */
+ .pwrTableOffset = AR9300_PWR_TABLE_OFFSET,
+ .params_for_tuning_caps = {0, 0},
+ .featureEnable = 0x0c,
+ /*
+ * bit0 - enable tx temp comp - disabled
+ * bit1 - enable tx volt comp - disabled
+ * bit2 - enable fastClock - enabled
+ * bit3 - enable doubling - enabled
+ * bit4 - enable internal regulator - disabled
+ */
+ .miscConfiguration = 0, /* bit0 - turn down drivestrength */
+ .eepromWriteEnableGpio = 3,
+ .wlanDisableGpio = 0,
+ .wlanLedGpio = 8,
+ .rxBandSelectGpio = 0xff,
+ .txrxgain = 0,
+ .swreg = 0,
+ },
+ .modalHeader2G = {
+ /* ar9300_modal_eep_header 2g */
+ /* 4 idle,t1,t2,b(4 bits per setting) */
+ .antCtrlCommon = LE32(0x110),
+ /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
+ .antCtrlCommon2 = LE32(0x22222),
+
+ /*
+ * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r,
+ * rx1, rx12, b (2 bits each)
+ */
+ .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150) },
+
+ /*
+ * xatten1DB[AR9300_MAX_CHAINS]; 3 xatten1_db
+ * for ar9280 (0xa20c/b20c 5:0)
+ */
+ .xatten1DB = {0, 0, 0},
+
+ /*
+ * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
+ * for ar9280 (0xa20c/b20c 16:12
+ */
+ .xatten1Margin = {0, 0, 0},
+ .tempSlope = 36,
+ .voltSlope = 0,
+
+ /*
+ * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur
+ * channels in usual fbin coding format
+ */
+ .spurChans = {0, 0, 0, 0, 0},
+
+ /*
+ * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check
+ * if the register is per chain
+ */
+ .noiseFloorThreshCh = {-1, 0, 0},
+ .ob = {1, 1, 1},/* 3 chain */
+ .db_stage2 = {1, 1, 1}, /* 3 chain */
+ .db_stage3 = {0, 0, 0},
+ .db_stage4 = {0, 0, 0},
+ .xpaBiasLvl = 0,
+ .txFrameToDataStart = 0x0e,
+ .txFrameToPaOn = 0x0e,
+ .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
+ .antennaGain = 0,
+ .switchSettling = 0x2c,
+ .adcDesiredSize = -30,
+ .txEndToXpaOff = 0,
+ .txEndToRxOn = 0x2,
+ .txFrameToXpaOn = 0xe,
+ .thresh62 = 28,
+ .futureModal = { /* [32] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ },
+ },
+ .calFreqPier2G = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1),
+ },
+ /* ar9300_cal_data_per_freq_op_loop 2g */
+ .calPierData2G = {
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ },
+ .calTarget_freqbin_Cck = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2484, 1),
+ },
+ .calTarget_freqbin_2G = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTarget_freqbin_2GHT20 = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTarget_freqbin_2GHT40 = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTargetPowerCck = {
+ /* 1L-5L,5S,11L,11S */
+ { {36, 36, 36, 36} },
+ { {36, 36, 36, 36} },
+ },
+ .calTargetPower2G = {
+ /* 6-24,36,48,54 */
+ { {32, 32, 28, 24} },
+ { {32, 32, 28, 24} },
+ { {32, 32, 28, 24} },
+ },
+ .calTargetPower2GHT20 = {
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ },
+ .calTargetPower2GHT40 = {
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ },
+ .ctlIndex_2G = {
+ 0x11, 0x12, 0x15, 0x17, 0x41, 0x42,
+ 0x45, 0x47, 0x31, 0x32, 0x35, 0x37,
+ },
+ .ctl_freqbin_2G = {
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2457, 1),
+ FREQ2FBIN(2462, 1)
+ },
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2462, 1),
+ 0xFF,
+ },
+
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2462, 1),
+ 0xFF,
+ },
+ {
+ FREQ2FBIN(2422, 1),
+ FREQ2FBIN(2427, 1),
+ FREQ2FBIN(2447, 1),
+ FREQ2FBIN(2452, 1)
+ },
+
+ {
+ /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1),
+ },
+
+ {
+ /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0,
+ },
+
+ {
+ /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2472, 1),
+ 0,
+ },
+
+ {
+ /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
+ /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
+ /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
+ /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
+ },
+
+ {
+ /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ },
+
+ {
+ /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0
+ },
+
+ {
+ /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0
+ },
+
+ {
+ /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
+ /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
+ /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
+ /* Data[11].ctlEdges[3].bChannel */
+ FREQ2FBIN(2462, 1),
+ }
+ },
+ .ctlPowerData_2G = {
+ { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+ { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+ { { {60, 1}, {60, 0}, {60, 0}, {60, 1} } },
+
+ { { {60, 1}, {60, 0}, {0, 0}, {0, 0} } },
+ { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+ { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+
+ { { {60, 0}, {60, 1}, {60, 1}, {60, 0} } },
+ { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+ { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+
+ { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+ { { {60, 0}, {60, 1}, {60, 1}, {60, 1} } },
+ },
+ .modalHeader5G = {
+ /* 4 idle,t1,t2,b (4 bits per setting) */
+ .antCtrlCommon = LE32(0x110),
+ /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */
+ .antCtrlCommon2 = LE32(0x22222),
+ /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */
+ .antCtrlChain = {
+ LE16(0x000), LE16(0x000), LE16(0x000),
+ },
+ /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
+ .xatten1DB = {0, 0, 0},
+
+ /*
+ * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
+ * for merlin (0xa20c/b20c 16:12
+ */
+ .xatten1Margin = {0, 0, 0},
+ .tempSlope = 68,
+ .voltSlope = 0,
+ /* spurChans spur channels in usual fbin coding format */
+ .spurChans = {0, 0, 0, 0, 0},
+ /* noiseFloorThreshCh Check if the register is per chain */
+ .noiseFloorThreshCh = {-1, 0, 0},
+ .ob = {3, 3, 3}, /* 3 chain */
+ .db_stage2 = {3, 3, 3}, /* 3 chain */
+ .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
+ .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
+ .xpaBiasLvl = 0,
+ .txFrameToDataStart = 0x0e,
+ .txFrameToPaOn = 0x0e,
+ .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
+ .antennaGain = 0,
+ .switchSettling = 0x2d,
+ .adcDesiredSize = -30,
+ .txEndToXpaOff = 0,
+ .txEndToRxOn = 0x2,
+ .txFrameToXpaOn = 0xe,
+ .thresh62 = 28,
+ .futureModal = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ },
+ },
+ .calFreqPier5G = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5220, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5725, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calPierData5G = {
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+
+ },
+ .calTarget_freqbin_5G = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5220, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5725, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTarget_freqbin_5GHT20 = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5240, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5745, 0),
+ FREQ2FBIN(5725, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTarget_freqbin_5GHT40 = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5240, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5745, 0),
+ FREQ2FBIN(5725, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTargetPower5G = {
+ /* 6-24,36,48,54 */
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ },
+ .calTargetPower5GHT20 = {
+ /*
+ * 0_8_16,1-3_9-11_17-19,
+ * 4,5,6,7,12,13,14,15,20,21,22,23
+ */
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ },
+ .calTargetPower5GHT40 = {
+ /*
+ * 0_8_16,1-3_9-11_17-19,
+ * 4,5,6,7,12,13,14,15,20,21,22,23
+ */
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ },
+ .ctlIndex_5G = {
+ 0x10, 0x16, 0x18, 0x40, 0x46,
+ 0x48, 0x30, 0x36, 0x38
+ },
+ .ctl_freqbin_5G = {
+ {
+ /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
+ /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0),
+ /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+ {
+ /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
+ /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0),
+ /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+
+ {
+ /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
+ /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0),
+ /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0),
+ /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0)
+ },
+
+ {
+ /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
+ /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0),
+ /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[3].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[3].ctlEdges[7].bChannel */ 0xFF,
+ },
+
+ {
+ /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[4].ctlEdges[4].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[5].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[7].bChannel */ 0xFF,
+ },
+
+ {
+ /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0),
+ /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0),
+ /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[5].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[5].ctlEdges[7].bChannel */ 0xFF
+ },
+
+ {
+ /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
+ /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0),
+ /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0),
+ /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0)
+ },
+
+ {
+ /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0),
+ /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0),
+ /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+
+ {
+ /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
+ /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0),
+ /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0),
+ /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0)
+ }
+ },
+ .ctlPowerData_5G = {
+ {
+ {
+ {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ {60, 1}, {60, 1}, {60, 1}, {60, 0},
+ }
+ },
+ {
+ {
+ {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ {60, 1}, {60, 1}, {60, 1}, {60, 0},
+ }
+ },
+ {
+ {
+ {60, 0}, {60, 1}, {60, 0}, {60, 1},
+ {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ }
+ },
+ {
+ {
+ {60, 0}, {60, 1}, {60, 1}, {60, 0},
+ {60, 1}, {60, 0}, {60, 0}, {60, 0},
+ }
+ },
+ {
+ {
+ {60, 1}, {60, 1}, {60, 1}, {60, 0},
+ {60, 0}, {60, 0}, {60, 0}, {60, 0},
+ }
+ },
+ {
+ {
+ {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ {60, 1}, {60, 0}, {60, 0}, {60, 0},
+ }
+ },
+ {
+ {
+ {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ }
+ },
+ {
+ {
+ {60, 1}, {60, 1}, {60, 0}, {60, 1},
+ {60, 1}, {60, 1}, {60, 1}, {60, 0},
+ }
+ },
+ {
+ {
+ {60, 1}, {60, 0}, {60, 1}, {60, 1},
+ {60, 1}, {60, 1}, {60, 0}, {60, 1},
+ }
+ },
+ }
+};
+
+static int ath9k_hw_ar9300_check_eeprom(struct ath_hw *ah)
+{
+ return 0;
+}
+
+static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
+ enum eeprom_param param)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct ar9300_base_eep_hdr *pBase = &eep->baseEepHeader;
+
+ switch (param) {
+ case EEP_MAC_LSW:
+ return eep->macAddr[0] << 8 | eep->macAddr[1];
+ case EEP_MAC_MID:
+ return eep->macAddr[2] << 8 | eep->macAddr[3];
+ case EEP_MAC_MSW:
+ return eep->macAddr[4] << 8 | eep->macAddr[5];
+ case EEP_REG_0:
+ return le16_to_cpu(pBase->regDmn[0]);
+ case EEP_REG_1:
+ return le16_to_cpu(pBase->regDmn[1]);
+ case EEP_OP_CAP:
+ return pBase->deviceCap;
+ case EEP_OP_MODE:
+ return pBase->opCapFlags.opFlags;
+ case EEP_RF_SILENT:
+ return pBase->rfSilent;
+ case EEP_TX_MASK:
+ return (pBase->txrxMask >> 4) & 0xf;
+ case EEP_RX_MASK:
+ return pBase->txrxMask & 0xf;
+ case EEP_DRIVE_STRENGTH:
+#define AR9300_EEP_BASE_DRIV_STRENGTH 0x1
+ return pBase->miscConfiguration & AR9300_EEP_BASE_DRIV_STRENGTH;
+ case EEP_INTERNAL_REGULATOR:
+ /* Bit 4 is internal regulator flag */
+ return (pBase->featureEnable & 0x10) >> 4;
+ case EEP_SWREG:
+ return le32_to_cpu(pBase->swreg);
+ default:
+ return 0;
+ }
+}
+
+static bool ar9300_eeprom_read_byte(struct ath_common *common, int address,
+ u8 *buffer)
+{
+ u16 val;
+
+ if (unlikely(!ath9k_hw_nvram_read(common, address / 2, &val)))
+ return false;
+
+ *buffer = (val >> (8 * (address % 2))) & 0xff;
+ return true;
+}
+
+static bool ar9300_eeprom_read_word(struct ath_common *common, int address,
+ u8 *buffer)
+{
+ u16 val;
+
+ if (unlikely(!ath9k_hw_nvram_read(common, address / 2, &val)))
+ return false;
+
+ buffer[0] = val >> 8;
+ buffer[1] = val & 0xff;
+
+ return true;
+}
+
+static bool ar9300_read_eeprom(struct ath_hw *ah, int address, u8 *buffer,
+ int count)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int i;
+
+ if ((address < 0) || ((address + count) / 2 > AR9300_EEPROM_SIZE - 1)) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "eeprom address not in range\n");
+ return false;
+ }
+
+ /*
+ * Since we're reading the bytes in reverse order from a little-endian
+ * word stream, an even address means we only use the lower half of
+ * the 16-bit word at that address
+ */
+ if (address % 2 == 0) {
+ if (!ar9300_eeprom_read_byte(common, address--, buffer++))
+ goto error;
+
+ count--;
+ }
+
+ for (i = 0; i < count / 2; i++) {
+ if (!ar9300_eeprom_read_word(common, address, buffer))
+ goto error;
+
+ address -= 2;
+ buffer += 2;
+ }
+
+ if (count % 2)
+ if (!ar9300_eeprom_read_byte(common, address, buffer))
+ goto error;
+
+ return true;
+
+error:
+ ath_print(common, ATH_DBG_EEPROM,
+ "unable to read eeprom region at offset %d\n", address);
+ return false;
+}
+
+static void ar9300_comp_hdr_unpack(u8 *best, int *code, int *reference,
+ int *length, int *major, int *minor)
+{
+ unsigned long value[4];
+
+ value[0] = best[0];
+ value[1] = best[1];
+ value[2] = best[2];
+ value[3] = best[3];
+ *code = ((value[0] >> 5) & 0x0007);
+ *reference = (value[0] & 0x001f) | ((value[1] >> 2) & 0x0020);
+ *length = ((value[1] << 4) & 0x07f0) | ((value[2] >> 4) & 0x000f);
+ *major = (value[2] & 0x000f);
+ *minor = (value[3] & 0x00ff);
+}
+
+static u16 ar9300_comp_cksum(u8 *data, int dsize)
+{
+ int it, checksum = 0;
+
+ for (it = 0; it < dsize; it++) {
+ checksum += data[it];
+ checksum &= 0xffff;
+ }
+
+ return checksum;
+}
+
+static bool ar9300_uncompress_block(struct ath_hw *ah,
+ u8 *mptr,
+ int mdataSize,
+ u8 *block,
+ int size)
+{
+ int it;
+ int spot;
+ int offset;
+ int length;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ spot = 0;
+
+ for (it = 0; it < size; it += (length+2)) {
+ offset = block[it];
+ offset &= 0xff;
+ spot += offset;
+ length = block[it+1];
+ length &= 0xff;
+
+ if (length > 0 && spot >= 0 && spot+length < mdataSize) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "Restore at %d: spot=%d "
+ "offset=%d length=%d\n",
+ it, spot, offset, length);
+ memcpy(&mptr[spot], &block[it+2], length);
+ spot += length;
+ } else if (length > 0) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "Bad restore at %d: spot=%d "
+ "offset=%d length=%d\n",
+ it, spot, offset, length);
+ return false;
+ }
+ }
+ return true;
+}
+
+static int ar9300_compress_decision(struct ath_hw *ah,
+ int it,
+ int code,
+ int reference,
+ u8 *mptr,
+ u8 *word, int length, int mdata_size)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u8 *dptr;
+
+ switch (code) {
+ case _CompressNone:
+ if (length != mdata_size) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "EEPROM structure size mismatch"
+ "memory=%d eeprom=%d\n", mdata_size, length);
+ return -1;
+ }
+ memcpy(mptr, (u8 *) (word + COMP_HDR_LEN), length);
+ ath_print(common, ATH_DBG_EEPROM, "restored eeprom %d:"
+ " uncompressed, length %d\n", it, length);
+ break;
+ case _CompressBlock:
+ if (reference == 0) {
+ dptr = mptr;
+ } else {
+ if (reference != 2) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "cant find reference eeprom"
+ "struct %d\n", reference);
+ return -1;
+ }
+ memcpy(mptr, &ar9300_default, mdata_size);
+ }
+ ath_print(common, ATH_DBG_EEPROM,
+ "restore eeprom %d: block, reference %d,"
+ " length %d\n", it, reference, length);
+ ar9300_uncompress_block(ah, mptr, mdata_size,
+ (u8 *) (word + COMP_HDR_LEN), length);
+ break;
+ default:
+ ath_print(common, ATH_DBG_EEPROM, "unknown compression"
+ " code %d\n", code);
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * Read the configuration data from the eeprom.
+ * The data can be put in any specified memory buffer.
+ *
+ * Returns -1 on error.
+ * Returns address of next memory location on success.
+ */
+static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
+ u8 *mptr, int mdata_size)
+{
+#define MDEFAULT 15
+#define MSTATE 100
+ int cptr;
+ u8 *word;
+ int code;
+ int reference, length, major, minor;
+ int osize;
+ int it;
+ u16 checksum, mchecksum;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ word = kzalloc(2048, GFP_KERNEL);
+ if (!word)
+ return -1;
+
+ memcpy(mptr, &ar9300_default, mdata_size);
+
+ cptr = AR9300_BASE_ADDR;
+ for (it = 0; it < MSTATE; it++) {
+ if (!ar9300_read_eeprom(ah, cptr, word, COMP_HDR_LEN))
+ goto fail;
+
+ if ((word[0] == 0 && word[1] == 0 && word[2] == 0 &&
+ word[3] == 0) || (word[0] == 0xff && word[1] == 0xff
+ && word[2] == 0xff && word[3] == 0xff))
+ break;
+
+ ar9300_comp_hdr_unpack(word, &code, &reference,
+ &length, &major, &minor);
+ ath_print(common, ATH_DBG_EEPROM,
+ "Found block at %x: code=%d ref=%d"
+ "length=%d major=%d minor=%d\n", cptr, code,
+ reference, length, major, minor);
+ if (length >= 1024) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "Skipping bad header\n");
+ cptr -= COMP_HDR_LEN;
+ continue;
+ }
+
+ osize = length;
+ ar9300_read_eeprom(ah, cptr, word,
+ COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
+ checksum = ar9300_comp_cksum(&word[COMP_HDR_LEN], length);
+ mchecksum = word[COMP_HDR_LEN + osize] |
+ (word[COMP_HDR_LEN + osize + 1] << 8);
+ ath_print(common, ATH_DBG_EEPROM,
+ "checksum %x %x\n", checksum, mchecksum);
+ if (checksum == mchecksum) {
+ ar9300_compress_decision(ah, it, code, reference, mptr,
+ word, length, mdata_size);
+ } else {
+ ath_print(common, ATH_DBG_EEPROM,
+ "skipping block with bad checksum\n");
+ }
+ cptr -= (COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
+ }
+
+ kfree(word);
+ return cptr;
+
+fail:
+ kfree(word);
+ return -1;
+}
+
+/*
+ * Restore the configuration structure by reading the eeprom.
+ * This function destroys any existing in-memory structure
+ * content.
+ */
+static bool ath9k_hw_ar9300_fill_eeprom(struct ath_hw *ah)
+{
+ u8 *mptr = (u8 *) &ah->eeprom.ar9300_eep;
+
+ if (ar9300_eeprom_restore_internal(ah, mptr,
+ sizeof(struct ar9300_eeprom)) < 0)
+ return false;
+
+ return true;
+}
+
+/* XXX: review hardware docs */
+static int ath9k_hw_ar9300_get_eeprom_ver(struct ath_hw *ah)
+{
+ return ah->eeprom.ar9300_eep.eepromVersion;
+}
+
+/* XXX: could be read from the eepromVersion, not sure yet */
+static int ath9k_hw_ar9300_get_eeprom_rev(struct ath_hw *ah)
+{
+ return 0;
+}
+
+static u8 ath9k_hw_ar9300_get_num_ant_config(struct ath_hw *ah,
+ enum ieee80211_band freq_band)
+{
+ return 1;
+}
+
+static u16 ath9k_hw_ar9300_get_eeprom_antenna_cfg(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return -EINVAL;
+}
+
+static s32 ar9003_hw_xpa_bias_level_get(struct ath_hw *ah, bool is2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ if (is2ghz)
+ return eep->modalHeader2G.xpaBiasLvl;
+ else
+ return eep->modalHeader5G.xpaBiasLvl;
+}
+
+static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
+{
+ int bias = ar9003_hw_xpa_bias_level_get(ah, is2ghz);
+ REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, (bias & 0x3));
+ REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_SPARE,
+ ((bias >> 2) & 0x3));
+}
+
+static u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ __le32 val;
+
+ if (is2ghz)
+ val = eep->modalHeader2G.antCtrlCommon;
+ else
+ val = eep->modalHeader5G.antCtrlCommon;
+ return le32_to_cpu(val);
+}
+
+static u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ __le32 val;
+
+ if (is2ghz)
+ val = eep->modalHeader2G.antCtrlCommon2;
+ else
+ val = eep->modalHeader5G.antCtrlCommon2;
+ return le32_to_cpu(val);
+}
+
+static u16 ar9003_hw_ant_ctrl_chain_get(struct ath_hw *ah,
+ int chain,
+ bool is2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ __le16 val = 0;
+
+ if (chain >= 0 && chain < AR9300_MAX_CHAINS) {
+ if (is2ghz)
+ val = eep->modalHeader2G.antCtrlChain[chain];
+ else
+ val = eep->modalHeader5G.antCtrlChain[chain];
+ }
+
+ return le16_to_cpu(val);
+}
+
+static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
+{
+ u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM, AR_SWITCH_TABLE_COM_ALL, value);
+
+ value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz);
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2, AR_SWITCH_TABLE_COM2_ALL, value);
+
+ value = ar9003_hw_ant_ctrl_chain_get(ah, 0, is2ghz);
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_0, AR_SWITCH_TABLE_ALL, value);
+
+ value = ar9003_hw_ant_ctrl_chain_get(ah, 1, is2ghz);
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_1, AR_SWITCH_TABLE_ALL, value);
+
+ value = ar9003_hw_ant_ctrl_chain_get(ah, 2, is2ghz);
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_2, AR_SWITCH_TABLE_ALL, value);
+}
+
+static void ar9003_hw_drive_strength_apply(struct ath_hw *ah)
+{
+ int drive_strength;
+ unsigned long reg;
+
+ drive_strength = ath9k_hw_ar9300_get_eeprom(ah, EEP_DRIVE_STRENGTH);
+
+ if (!drive_strength)
+ return;
+
+ reg = REG_READ(ah, AR_PHY_65NM_CH0_BIAS1);
+ reg &= ~0x00ffffc0;
+ reg |= 0x5 << 21;
+ reg |= 0x5 << 18;
+ reg |= 0x5 << 15;
+ reg |= 0x5 << 12;
+ reg |= 0x5 << 9;
+ reg |= 0x5 << 6;
+ REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS1, reg);
+
+ reg = REG_READ(ah, AR_PHY_65NM_CH0_BIAS2);
+ reg &= ~0xffffffe0;
+ reg |= 0x5 << 29;
+ reg |= 0x5 << 26;
+ reg |= 0x5 << 23;
+ reg |= 0x5 << 20;
+ reg |= 0x5 << 17;
+ reg |= 0x5 << 14;
+ reg |= 0x5 << 11;
+ reg |= 0x5 << 8;
+ reg |= 0x5 << 5;
+ REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS2, reg);
+
+ reg = REG_READ(ah, AR_PHY_65NM_CH0_BIAS4);
+ reg &= ~0xff800000;
+ reg |= 0x5 << 29;
+ reg |= 0x5 << 26;
+ reg |= 0x5 << 23;
+ REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS4, reg);
+}
+
+static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
+{
+ int internal_regulator =
+ ath9k_hw_ar9300_get_eeprom(ah, EEP_INTERNAL_REGULATOR);
+
+ if (internal_regulator) {
+ /* Internal regulator is ON. Write swreg register. */
+ int swreg = ath9k_hw_ar9300_get_eeprom(ah, EEP_SWREG);
+ REG_WRITE(ah, AR_RTC_REG_CONTROL1,
+ REG_READ(ah, AR_RTC_REG_CONTROL1) &
+ (~AR_RTC_REG_CONTROL1_SWREG_PROGRAM));
+ REG_WRITE(ah, AR_RTC_REG_CONTROL0, swreg);
+ /* Set REG_CONTROL1.SWREG_PROGRAM */
+ REG_WRITE(ah, AR_RTC_REG_CONTROL1,
+ REG_READ(ah,
+ AR_RTC_REG_CONTROL1) |
+ AR_RTC_REG_CONTROL1_SWREG_PROGRAM);
+ } else {
+ REG_WRITE(ah, AR_RTC_SLEEP_CLK,
+ (REG_READ(ah,
+ AR_RTC_SLEEP_CLK) |
+ AR_RTC_FORCE_SWREG_PRD));
+ }
+}
+
+static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ ar9003_hw_xpa_bias_level_apply(ah, IS_CHAN_2GHZ(chan));
+ ar9003_hw_ant_ctrl_apply(ah, IS_CHAN_2GHZ(chan));
+ ar9003_hw_drive_strength_apply(ah);
+ ar9003_hw_internal_regulator_apply(ah);
+}
+
+static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+}
+
+/*
+ * Returns the interpolated y value corresponding to the specified x value
+ * from the np ordered pairs of data (px,py).
+ * The pairs do not have to be in any order.
+ * If the specified x value is less than any of the px,
+ * the returned y value is equal to the py for the lowest px.
+ * If the specified x value is greater than any of the px,
+ * the returned y value is equal to the py for the highest px.
+ */
+static int ar9003_hw_power_interpolate(int32_t x,
+ int32_t *px, int32_t *py, u_int16_t np)
+{
+ int ip = 0;
+ int lx = 0, ly = 0, lhave = 0;
+ int hx = 0, hy = 0, hhave = 0;
+ int dx = 0;
+ int y = 0;
+
+ lhave = 0;
+ hhave = 0;
+
+ /* identify best lower and higher x calibration measurement */
+ for (ip = 0; ip < np; ip++) {
+ dx = x - px[ip];
+
+ /* this measurement is higher than our desired x */
+ if (dx <= 0) {
+ if (!hhave || dx > (x - hx)) {
+ /* new best higher x measurement */
+ hx = px[ip];
+ hy = py[ip];
+ hhave = 1;
+ }
+ }
+ /* this measurement is lower than our desired x */
+ if (dx >= 0) {
+ if (!lhave || dx < (x - lx)) {
+ /* new best lower x measurement */
+ lx = px[ip];
+ ly = py[ip];
+ lhave = 1;
+ }
+ }
+ }
+
+ /* the low x is good */
+ if (lhave) {
+ /* so is the high x */
+ if (hhave) {
+ /* they're the same, so just pick one */
+ if (hx == lx)
+ y = ly;
+ else /* interpolate */
+ y = ly + (((x - lx) * (hy - ly)) / (hx - lx));
+ } else /* only low is good, use it */
+ y = ly;
+ } else if (hhave) /* only high is good, use it */
+ y = hy;
+ else /* nothing is good,this should never happen unless np=0, ???? */
+ y = -(1 << 30);
+ return y;
+}
+
+static u8 ar9003_hw_eeprom_get_tgt_pwr(struct ath_hw *ah,
+ u16 rateIndex, u16 freq, bool is2GHz)
+{
+ u16 numPiers, i;
+ s32 targetPowerArray[AR9300_NUM_5G_20_TARGET_POWERS];
+ s32 freqArray[AR9300_NUM_5G_20_TARGET_POWERS];
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct cal_tgt_pow_legacy *pEepromTargetPwr;
+ u8 *pFreqBin;
+
+ if (is2GHz) {
+ numPiers = AR9300_NUM_2G_20_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower2G;
+ pFreqBin = eep->calTarget_freqbin_2G;
+ } else {
+ numPiers = AR9300_NUM_5G_20_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower5G;
+ pFreqBin = eep->calTarget_freqbin_5G;
+ }
+
+ /*
+ * create array of channels and targetpower from
+ * targetpower piers stored on eeprom
+ */
+ for (i = 0; i < numPiers; i++) {
+ freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz);
+ targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
+ }
+
+ /* interpolate to get target power for given frequency */
+ return (u8) ar9003_hw_power_interpolate((s32) freq,
+ freqArray,
+ targetPowerArray, numPiers);
+}
+
+static u8 ar9003_hw_eeprom_get_ht20_tgt_pwr(struct ath_hw *ah,
+ u16 rateIndex,
+ u16 freq, bool is2GHz)
+{
+ u16 numPiers, i;
+ s32 targetPowerArray[AR9300_NUM_5G_20_TARGET_POWERS];
+ s32 freqArray[AR9300_NUM_5G_20_TARGET_POWERS];
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct cal_tgt_pow_ht *pEepromTargetPwr;
+ u8 *pFreqBin;
+
+ if (is2GHz) {
+ numPiers = AR9300_NUM_2G_20_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower2GHT20;
+ pFreqBin = eep->calTarget_freqbin_2GHT20;
+ } else {
+ numPiers = AR9300_NUM_5G_20_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower5GHT20;
+ pFreqBin = eep->calTarget_freqbin_5GHT20;
+ }
+
+ /*
+ * create array of channels and targetpower
+ * from targetpower piers stored on eeprom
+ */
+ for (i = 0; i < numPiers; i++) {
+ freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz);
+ targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
+ }
+
+ /* interpolate to get target power for given frequency */
+ return (u8) ar9003_hw_power_interpolate((s32) freq,
+ freqArray,
+ targetPowerArray, numPiers);
+}
+
+static u8 ar9003_hw_eeprom_get_ht40_tgt_pwr(struct ath_hw *ah,
+ u16 rateIndex,
+ u16 freq, bool is2GHz)
+{
+ u16 numPiers, i;
+ s32 targetPowerArray[AR9300_NUM_5G_40_TARGET_POWERS];
+ s32 freqArray[AR9300_NUM_5G_40_TARGET_POWERS];
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct cal_tgt_pow_ht *pEepromTargetPwr;
+ u8 *pFreqBin;
+
+ if (is2GHz) {
+ numPiers = AR9300_NUM_2G_40_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower2GHT40;
+ pFreqBin = eep->calTarget_freqbin_2GHT40;
+ } else {
+ numPiers = AR9300_NUM_5G_40_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower5GHT40;
+ pFreqBin = eep->calTarget_freqbin_5GHT40;
+ }
+
+ /*
+ * create array of channels and targetpower from
+ * targetpower piers stored on eeprom
+ */
+ for (i = 0; i < numPiers; i++) {
+ freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz);
+ targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
+ }
+
+ /* interpolate to get target power for given frequency */
+ return (u8) ar9003_hw_power_interpolate((s32) freq,
+ freqArray,
+ targetPowerArray, numPiers);
+}
+
+static u8 ar9003_hw_eeprom_get_cck_tgt_pwr(struct ath_hw *ah,
+ u16 rateIndex, u16 freq)
+{
+ u16 numPiers = AR9300_NUM_2G_CCK_TARGET_POWERS, i;
+ s32 targetPowerArray[AR9300_NUM_2G_CCK_TARGET_POWERS];
+ s32 freqArray[AR9300_NUM_2G_CCK_TARGET_POWERS];
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct cal_tgt_pow_legacy *pEepromTargetPwr = eep->calTargetPowerCck;
+ u8 *pFreqBin = eep->calTarget_freqbin_Cck;
+
+ /*
+ * create array of channels and targetpower from
+ * targetpower piers stored on eeprom
+ */
+ for (i = 0; i < numPiers; i++) {
+ freqArray[i] = FBIN2FREQ(pFreqBin[i], 1);
+ targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
+ }
+
+ /* interpolate to get target power for given frequency */
+ return (u8) ar9003_hw_power_interpolate((s32) freq,
+ freqArray,
+ targetPowerArray, numPiers);
+}
+
+/* Set tx power registers to array of values passed in */
+static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
+{
+#define POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
+ /* make sure forced gain is not set */
+ REG_WRITE(ah, 0xa458, 0);
+
+ /* Write the OFDM power per rate set */
+
+ /* 6 (LSB), 9, 12, 18 (MSB) */
+ REG_WRITE(ah, 0xa3c0,
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0));
+
+ /* 24 (LSB), 36, 48, 54 (MSB) */
+ REG_WRITE(ah, 0xa3c4,
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_54], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_48], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_36], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0));
+
+ /* Write the CCK power per rate set */
+
+ /* 1L (LSB), reserved, 2L, 2S (MSB) */
+ REG_WRITE(ah, 0xa3c8,
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) |
+ /* POW_SM(txPowerTimes2, 8) | this is reserved for AR9003 */
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0));
+
+ /* 5.5L (LSB), 5.5S, 11L, 11S (MSB) */
+ REG_WRITE(ah, 0xa3cc,
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_11S], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_11L], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_5S], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0)
+ );
+
+ /* Write the HT20 power per rate set */
+
+ /* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */
+ REG_WRITE(ah, 0xa3d0,
+ POW_SM(pPwrArray[ALL_TARGET_HT20_5], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_4], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_1_3_9_11_17_19], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_0_8_16], 0)
+ );
+
+ /* 6 (LSB), 7, 12, 13 (MSB) */
+ REG_WRITE(ah, 0xa3d4,
+ POW_SM(pPwrArray[ALL_TARGET_HT20_13], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_12], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_7], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_6], 0)
+ );
+
+ /* 14 (LSB), 15, 20, 21 */
+ REG_WRITE(ah, 0xa3e4,
+ POW_SM(pPwrArray[ALL_TARGET_HT20_21], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_20], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_15], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_14], 0)
+ );
+
+ /* Mixed HT20 and HT40 rates */
+
+ /* HT20 22 (LSB), HT20 23, HT40 22, HT40 23 (MSB) */
+ REG_WRITE(ah, 0xa3e8,
+ POW_SM(pPwrArray[ALL_TARGET_HT40_23], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_22], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_23], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_22], 0)
+ );
+
+ /*
+ * Write the HT40 power per rate set
+ * correct PAR difference between HT40 and HT20/LEGACY
+ * 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB)
+ */
+ REG_WRITE(ah, 0xa3d8,
+ POW_SM(pPwrArray[ALL_TARGET_HT40_5], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_4], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_1_3_9_11_17_19], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_0_8_16], 0)
+ );
+
+ /* 6 (LSB), 7, 12, 13 (MSB) */
+ REG_WRITE(ah, 0xa3dc,
+ POW_SM(pPwrArray[ALL_TARGET_HT40_13], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_12], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_7], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_6], 0)
+ );
+
+ /* 14 (LSB), 15, 20, 21 */
+ REG_WRITE(ah, 0xa3ec,
+ POW_SM(pPwrArray[ALL_TARGET_HT40_21], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_20], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_15], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_14], 0)
+ );
+
+ return 0;
+#undef POW_SM
+}
+
+static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq)
+{
+ u8 targetPowerValT2[ar9300RateSize];
+ /* XXX: hard code for now, need to get from eeprom struct */
+ u8 ht40PowerIncForPdadc = 0;
+ bool is2GHz = false;
+ unsigned int i = 0;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (freq < 4000)
+ is2GHz = true;
+
+ targetPowerValT2[ALL_TARGET_LEGACY_6_24] =
+ ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_6_24, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_LEGACY_36] =
+ ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_36, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_LEGACY_48] =
+ ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_48, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_LEGACY_54] =
+ ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_54, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_LEGACY_1L_5L] =
+ ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_1L_5L,
+ freq);
+ targetPowerValT2[ALL_TARGET_LEGACY_5S] =
+ ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_5S, freq);
+ targetPowerValT2[ALL_TARGET_LEGACY_11L] =
+ ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11L, freq);
+ targetPowerValT2[ALL_TARGET_LEGACY_11S] =
+ ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11S, freq);
+ targetPowerValT2[ALL_TARGET_HT20_0_8_16] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_1_3_9_11_17_19] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_1_3_9_11_17_19,
+ freq, is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_4] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_4, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_5] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_5, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_6] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_6, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_7] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_7, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_12] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_12, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_13] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_13, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_14] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_14, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_15] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_15, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_20] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_20, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_21] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_21, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_22] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_22, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_23] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_23, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT40_0_8_16] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_1_3_9_11_17_19] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_1_3_9_11_17_19,
+ freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_4] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_4, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_5] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_5, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_6] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_6, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_7] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_7, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_12] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_12, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_13] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_13, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_14] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_14, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_15] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_15, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_20] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_20, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_21] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_21, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_22] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_22, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_23] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_23, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+
+ while (i < ar9300RateSize) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
+ i++;
+
+ ath_print(common, ATH_DBG_EEPROM,
+ "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
+ i++;
+
+ ath_print(common, ATH_DBG_EEPROM,
+ "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
+ i++;
+
+ ath_print(common, ATH_DBG_EEPROM,
+ "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]);
+ i++;
+ }
+
+ /* Write target power array to registers */
+ ar9003_hw_tx_power_regwrite(ah, targetPowerValT2);
+}
+
+static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
+ int mode,
+ int ipier,
+ int ichain,
+ int *pfrequency,
+ int *pcorrection,
+ int *ptemperature, int *pvoltage)
+{
+ u8 *pCalPier;
+ struct ar9300_cal_data_per_freq_op_loop *pCalPierStruct;
+ int is2GHz;
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (ichain >= AR9300_MAX_CHAINS) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "Invalid chain index, must be less than %d\n",
+ AR9300_MAX_CHAINS);
+ return -1;
+ }
+
+ if (mode) { /* 5GHz */
+ if (ipier >= AR9300_NUM_5G_CAL_PIERS) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "Invalid 5GHz cal pier index, must "
+ "be less than %d\n",
+ AR9300_NUM_5G_CAL_PIERS);
+ return -1;
+ }
+ pCalPier = &(eep->calFreqPier5G[ipier]);
+ pCalPierStruct = &(eep->calPierData5G[ichain][ipier]);
+ is2GHz = 0;
+ } else {
+ if (ipier >= AR9300_NUM_2G_CAL_PIERS) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "Invalid 2GHz cal pier index, must "
+ "be less than %d\n", AR9300_NUM_2G_CAL_PIERS);
+ return -1;
+ }
+
+ pCalPier = &(eep->calFreqPier2G[ipier]);
+ pCalPierStruct = &(eep->calPierData2G[ichain][ipier]);
+ is2GHz = 1;
+ }
+
+ *pfrequency = FBIN2FREQ(*pCalPier, is2GHz);
+ *pcorrection = pCalPierStruct->refPower;
+ *ptemperature = pCalPierStruct->tempMeas;
+ *pvoltage = pCalPierStruct->voltMeas;
+
+ return 0;
+}
+
+static int ar9003_hw_power_control_override(struct ath_hw *ah,
+ int frequency,
+ int *correction,
+ int *voltage, int *temperature)
+{
+ int tempSlope = 0;
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ REG_RMW(ah, AR_PHY_TPC_11_B0,
+ (correction[0] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
+ AR_PHY_TPC_OLPC_GAIN_DELTA);
+ REG_RMW(ah, AR_PHY_TPC_11_B1,
+ (correction[1] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
+ AR_PHY_TPC_OLPC_GAIN_DELTA);
+ REG_RMW(ah, AR_PHY_TPC_11_B2,
+ (correction[2] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
+ AR_PHY_TPC_OLPC_GAIN_DELTA);
+
+ /* enable open loop power control on chip */
+ REG_RMW(ah, AR_PHY_TPC_6_B0,
+ (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
+ AR_PHY_TPC_6_ERROR_EST_MODE);
+ REG_RMW(ah, AR_PHY_TPC_6_B1,
+ (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
+ AR_PHY_TPC_6_ERROR_EST_MODE);
+ REG_RMW(ah, AR_PHY_TPC_6_B2,
+ (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
+ AR_PHY_TPC_6_ERROR_EST_MODE);
+
+ /*
+ * enable temperature compensation
+ * Need to use register names
+ */
+ if (frequency < 4000)
+ tempSlope = eep->modalHeader2G.tempSlope;
+ else
+ tempSlope = eep->modalHeader5G.tempSlope;
+
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19, AR_PHY_TPC_19_ALPHA_THERM, tempSlope);
+ REG_RMW_FIELD(ah, AR_PHY_TPC_18, AR_PHY_TPC_18_THERM_CAL_VALUE,
+ temperature[0]);
+
+ return 0;
+}
+
+/* Apply the recorded correction values. */
+static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
+{
+ int ichain, ipier, npier;
+ int mode;
+ int lfrequency[AR9300_MAX_CHAINS],
+ lcorrection[AR9300_MAX_CHAINS],
+ ltemperature[AR9300_MAX_CHAINS], lvoltage[AR9300_MAX_CHAINS];
+ int hfrequency[AR9300_MAX_CHAINS],
+ hcorrection[AR9300_MAX_CHAINS],
+ htemperature[AR9300_MAX_CHAINS], hvoltage[AR9300_MAX_CHAINS];
+ int fdiff;
+ int correction[AR9300_MAX_CHAINS],
+ voltage[AR9300_MAX_CHAINS], temperature[AR9300_MAX_CHAINS];
+ int pfrequency, pcorrection, ptemperature, pvoltage;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ mode = (frequency >= 4000);
+ if (mode)
+ npier = AR9300_NUM_5G_CAL_PIERS;
+ else
+ npier = AR9300_NUM_2G_CAL_PIERS;
+
+ for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
+ lfrequency[ichain] = 0;
+ hfrequency[ichain] = 100000;
+ }
+ /* identify best lower and higher frequency calibration measurement */
+ for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
+ for (ipier = 0; ipier < npier; ipier++) {
+ if (!ar9003_hw_cal_pier_get(ah, mode, ipier, ichain,
+ &pfrequency, &pcorrection,
+ &ptemperature, &pvoltage)) {
+ fdiff = frequency - pfrequency;
+
+ /*
+ * this measurement is higher than
+ * our desired frequency
+ */
+ if (fdiff <= 0) {
+ if (hfrequency[ichain] <= 0 ||
+ hfrequency[ichain] >= 100000 ||
+ fdiff >
+ (frequency - hfrequency[ichain])) {
+ /*
+ * new best higher
+ * frequency measurement
+ */
+ hfrequency[ichain] = pfrequency;
+ hcorrection[ichain] =
+ pcorrection;
+ htemperature[ichain] =
+ ptemperature;
+ hvoltage[ichain] = pvoltage;
+ }
+ }
+ if (fdiff >= 0) {
+ if (lfrequency[ichain] <= 0
+ || fdiff <
+ (frequency - lfrequency[ichain])) {
+ /*
+ * new best lower
+ * frequency measurement
+ */
+ lfrequency[ichain] = pfrequency;
+ lcorrection[ichain] =
+ pcorrection;
+ ltemperature[ichain] =
+ ptemperature;
+ lvoltage[ichain] = pvoltage;
+ }
+ }
+ }
+ }
+ }
+
+ /* interpolate */
+ for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "ch=%d f=%d low=%d %d h=%d %d\n",
+ ichain, frequency, lfrequency[ichain],
+ lcorrection[ichain], hfrequency[ichain],
+ hcorrection[ichain]);
+ /* they're the same, so just pick one */
+ if (hfrequency[ichain] == lfrequency[ichain]) {
+ correction[ichain] = lcorrection[ichain];
+ voltage[ichain] = lvoltage[ichain];
+ temperature[ichain] = ltemperature[ichain];
+ }
+ /* the low frequency is good */
+ else if (frequency - lfrequency[ichain] < 1000) {
+ /* so is the high frequency, interpolate */
+ if (hfrequency[ichain] - frequency < 1000) {
+
+ correction[ichain] = lcorrection[ichain] +
+ (((frequency - lfrequency[ichain]) *
+ (hcorrection[ichain] -
+ lcorrection[ichain])) /
+ (hfrequency[ichain] - lfrequency[ichain]));
+
+ temperature[ichain] = ltemperature[ichain] +
+ (((frequency - lfrequency[ichain]) *
+ (htemperature[ichain] -
+ ltemperature[ichain])) /
+ (hfrequency[ichain] - lfrequency[ichain]));
+
+ voltage[ichain] =
+ lvoltage[ichain] +
+ (((frequency -
+ lfrequency[ichain]) * (hvoltage[ichain] -
+ lvoltage[ichain]))
+ / (hfrequency[ichain] -
+ lfrequency[ichain]));
+ }
+ /* only low is good, use it */
+ else {
+ correction[ichain] = lcorrection[ichain];
+ temperature[ichain] = ltemperature[ichain];
+ voltage[ichain] = lvoltage[ichain];
+ }
+ }
+ /* only high is good, use it */
+ else if (hfrequency[ichain] - frequency < 1000) {
+ correction[ichain] = hcorrection[ichain];
+ temperature[ichain] = htemperature[ichain];
+ voltage[ichain] = hvoltage[ichain];
+ } else { /* nothing is good, presume 0???? */
+ correction[ichain] = 0;
+ temperature[ichain] = 0;
+ voltage[ichain] = 0;
+ }
+ }
+
+ ar9003_hw_power_control_override(ah, frequency, correction, voltage,
+ temperature);
+
+ ath_print(common, ATH_DBG_EEPROM,
+ "for frequency=%d, calibration correction = %d %d %d\n",
+ frequency, correction[0], correction[1], correction[2]);
+
+ return 0;
+}
+
+static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
+ struct ath9k_channel *chan, u16 cfgCtl,
+ u8 twiceAntennaReduction,
+ u8 twiceMaxRegulatoryPower,
+ u8 powerLimit)
+{
+ ah->txpower_limit = powerLimit;
+ ar9003_hw_set_target_power_eeprom(ah, chan->channel);
+ ar9003_hw_calibration_apply(ah, chan->channel);
+}
+
+static u16 ath9k_hw_ar9300_get_spur_channel(struct ath_hw *ah,
+ u16 i, bool is2GHz)
+{
+ return AR_NO_SPUR;
+}
+
+s32 ar9003_hw_get_tx_gain_idx(struct ath_hw *ah)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ return (eep->baseEepHeader.txrxgain >> 4) & 0xf; /* bits 7:4 */
+}
+
+s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ return (eep->baseEepHeader.txrxgain) & 0xf; /* bits 3:0 */
+}
+
+const struct eeprom_ops eep_ar9300_ops = {
+ .check_eeprom = ath9k_hw_ar9300_check_eeprom,
+ .get_eeprom = ath9k_hw_ar9300_get_eeprom,
+ .fill_eeprom = ath9k_hw_ar9300_fill_eeprom,
+ .get_eeprom_ver = ath9k_hw_ar9300_get_eeprom_ver,
+ .get_eeprom_rev = ath9k_hw_ar9300_get_eeprom_rev,
+ .get_num_ant_config = ath9k_hw_ar9300_get_num_ant_config,
+ .get_eeprom_antenna_cfg = ath9k_hw_ar9300_get_eeprom_antenna_cfg,
+ .set_board_values = ath9k_hw_ar9300_set_board_values,
+ .set_addac = ath9k_hw_ar9300_set_addac,
+ .set_txpower = ath9k_hw_ar9300_set_txpower,
+ .get_spur_channel = ath9k_hw_ar9300_get_spur_channel
+};
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
new file mode 100644
index 00000000000..23fb353c3bb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -0,0 +1,323 @@
+#ifndef AR9003_EEPROM_H
+#define AR9003_EEPROM_H
+
+#include <linux/types.h>
+
+#define AR9300_EEP_VER 0xD000
+#define AR9300_EEP_VER_MINOR_MASK 0xFFF
+#define AR9300_EEP_MINOR_VER_1 0x1
+#define AR9300_EEP_MINOR_VER AR9300_EEP_MINOR_VER_1
+
+/* 16-bit offset location start of calibration struct */
+#define AR9300_EEP_START_LOC 256
+#define AR9300_NUM_5G_CAL_PIERS 8
+#define AR9300_NUM_2G_CAL_PIERS 3
+#define AR9300_NUM_5G_20_TARGET_POWERS 8
+#define AR9300_NUM_5G_40_TARGET_POWERS 8
+#define AR9300_NUM_2G_CCK_TARGET_POWERS 2
+#define AR9300_NUM_2G_20_TARGET_POWERS 3
+#define AR9300_NUM_2G_40_TARGET_POWERS 3
+/* #define AR9300_NUM_CTLS 21 */
+#define AR9300_NUM_CTLS_5G 9
+#define AR9300_NUM_CTLS_2G 12
+#define AR9300_CTL_MODE_M 0xF
+#define AR9300_NUM_BAND_EDGES_5G 8
+#define AR9300_NUM_BAND_EDGES_2G 4
+#define AR9300_NUM_PD_GAINS 4
+#define AR9300_PD_GAINS_IN_MASK 4
+#define AR9300_PD_GAIN_ICEPTS 5
+#define AR9300_EEPROM_MODAL_SPURS 5
+#define AR9300_MAX_RATE_POWER 63
+#define AR9300_NUM_PDADC_VALUES 128
+#define AR9300_NUM_RATES 16
+#define AR9300_BCHAN_UNUSED 0xFF
+#define AR9300_MAX_PWR_RANGE_IN_HALF_DB 64
+#define AR9300_OPFLAGS_11A 0x01
+#define AR9300_OPFLAGS_11G 0x02
+#define AR9300_OPFLAGS_5G_HT40 0x04
+#define AR9300_OPFLAGS_2G_HT40 0x08
+#define AR9300_OPFLAGS_5G_HT20 0x10
+#define AR9300_OPFLAGS_2G_HT20 0x20
+#define AR9300_EEPMISC_BIG_ENDIAN 0x01
+#define AR9300_EEPMISC_WOW 0x02
+#define AR9300_CUSTOMER_DATA_SIZE 20
+
+#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
+#define FBIN2FREQ(x, y) ((y) ? (2300 + x) : (4800 + 5 * x))
+#define AR9300_MAX_CHAINS 3
+#define AR9300_ANT_16S 25
+#define AR9300_FUTURE_MODAL_SZ 6
+
+#define AR9300_NUM_ANT_CHAIN_FIELDS 7
+#define AR9300_NUM_ANT_COMMON_FIELDS 4
+#define AR9300_SIZE_ANT_CHAIN_FIELD 3
+#define AR9300_SIZE_ANT_COMMON_FIELD 4
+#define AR9300_ANT_CHAIN_MASK 0x7
+#define AR9300_ANT_COMMON_MASK 0xf
+#define AR9300_CHAIN_0_IDX 0
+#define AR9300_CHAIN_1_IDX 1
+#define AR9300_CHAIN_2_IDX 2
+
+#define AR928X_NUM_ANT_CHAIN_FIELDS 6
+#define AR928X_SIZE_ANT_CHAIN_FIELD 2
+#define AR928X_ANT_CHAIN_MASK 0x3
+
+/* Delta from which to start power to pdadc table */
+/* This offset is used in both open loop and closed loop power control
+ * schemes. In open loop power control, it is not really needed, but for
+ * the "sake of consistency" it was kept. For certain AP designs, this
+ * value is overwritten by the value in the flag "pwrTableOffset" just
+ * before writing the pdadc vs pwr into the chip registers.
+ */
+#define AR9300_PWR_TABLE_OFFSET 0
+
+/* enable flags for voltage and temp compensation */
+#define ENABLE_TEMP_COMPENSATION 0x01
+#define ENABLE_VOLT_COMPENSATION 0x02
+/* byte addressable */
+#define AR9300_EEPROM_SIZE (16*1024)
+#define FIXED_CCA_THRESHOLD 15
+
+#define AR9300_BASE_ADDR 0x3ff
+
+enum targetPowerHTRates {
+ HT_TARGET_RATE_0_8_16,
+ HT_TARGET_RATE_1_3_9_11_17_19,
+ HT_TARGET_RATE_4,
+ HT_TARGET_RATE_5,
+ HT_TARGET_RATE_6,
+ HT_TARGET_RATE_7,
+ HT_TARGET_RATE_12,
+ HT_TARGET_RATE_13,
+ HT_TARGET_RATE_14,
+ HT_TARGET_RATE_15,
+ HT_TARGET_RATE_20,
+ HT_TARGET_RATE_21,
+ HT_TARGET_RATE_22,
+ HT_TARGET_RATE_23
+};
+
+enum targetPowerLegacyRates {
+ LEGACY_TARGET_RATE_6_24,
+ LEGACY_TARGET_RATE_36,
+ LEGACY_TARGET_RATE_48,
+ LEGACY_TARGET_RATE_54
+};
+
+enum targetPowerCckRates {
+ LEGACY_TARGET_RATE_1L_5L,
+ LEGACY_TARGET_RATE_5S,
+ LEGACY_TARGET_RATE_11L,
+ LEGACY_TARGET_RATE_11S
+};
+
+enum ar9300_Rates {
+ ALL_TARGET_LEGACY_6_24,
+ ALL_TARGET_LEGACY_36,
+ ALL_TARGET_LEGACY_48,
+ ALL_TARGET_LEGACY_54,
+ ALL_TARGET_LEGACY_1L_5L,
+ ALL_TARGET_LEGACY_5S,
+ ALL_TARGET_LEGACY_11L,
+ ALL_TARGET_LEGACY_11S,
+ ALL_TARGET_HT20_0_8_16,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_4,
+ ALL_TARGET_HT20_5,
+ ALL_TARGET_HT20_6,
+ ALL_TARGET_HT20_7,
+ ALL_TARGET_HT20_12,
+ ALL_TARGET_HT20_13,
+ ALL_TARGET_HT20_14,
+ ALL_TARGET_HT20_15,
+ ALL_TARGET_HT20_20,
+ ALL_TARGET_HT20_21,
+ ALL_TARGET_HT20_22,
+ ALL_TARGET_HT20_23,
+ ALL_TARGET_HT40_0_8_16,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_4,
+ ALL_TARGET_HT40_5,
+ ALL_TARGET_HT40_6,
+ ALL_TARGET_HT40_7,
+ ALL_TARGET_HT40_12,
+ ALL_TARGET_HT40_13,
+ ALL_TARGET_HT40_14,
+ ALL_TARGET_HT40_15,
+ ALL_TARGET_HT40_20,
+ ALL_TARGET_HT40_21,
+ ALL_TARGET_HT40_22,
+ ALL_TARGET_HT40_23,
+ ar9300RateSize,
+};
+
+
+struct eepFlags {
+ u8 opFlags;
+ u8 eepMisc;
+} __packed;
+
+enum CompressAlgorithm {
+ _CompressNone = 0,
+ _CompressLzma,
+ _CompressPairs,
+ _CompressBlock,
+ _Compress4,
+ _Compress5,
+ _Compress6,
+ _Compress7,
+};
+
+struct ar9300_base_eep_hdr {
+ __le16 regDmn[2];
+ /* 4 bits tx and 4 bits rx */
+ u8 txrxMask;
+ struct eepFlags opCapFlags;
+ u8 rfSilent;
+ u8 blueToothOptions;
+ u8 deviceCap;
+ /* takes lower byte in eeprom location */
+ u8 deviceType;
+ /* offset in dB to be added to beginning
+ * of pdadc table in calibration
+ */
+ int8_t pwrTableOffset;
+ u8 params_for_tuning_caps[2];
+ /*
+ * bit0 - enable tx temp comp
+ * bit1 - enable tx volt comp
+ * bit2 - enable fastClock - default to 1
+ * bit3 - enable doubling - default to 1
+ * bit4 - enable internal regulator - default to 1
+ */
+ u8 featureEnable;
+ /* misc flags: bit0 - turn down drivestrength */
+ u8 miscConfiguration;
+ u8 eepromWriteEnableGpio;
+ u8 wlanDisableGpio;
+ u8 wlanLedGpio;
+ u8 rxBandSelectGpio;
+ u8 txrxgain;
+ /* SW controlled internal regulator fields */
+ __le32 swreg;
+} __packed;
+
+struct ar9300_modal_eep_header {
+ /* 4 idle, t1, t2, b (4 bits per setting) */
+ __le32 antCtrlCommon;
+ /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
+ __le32 antCtrlCommon2;
+ /* 6 idle, t, r, rx1, rx12, b (2 bits each) */
+ __le16 antCtrlChain[AR9300_MAX_CHAINS];
+ /* 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
+ u8 xatten1DB[AR9300_MAX_CHAINS];
+ /* 3 xatten1_margin for merlin (0xa20c/b20c 16:12 */
+ u8 xatten1Margin[AR9300_MAX_CHAINS];
+ int8_t tempSlope;
+ int8_t voltSlope;
+ /* spur channels in usual fbin coding format */
+ u8 spurChans[AR9300_EEPROM_MODAL_SPURS];
+ /* 3 Check if the register is per chain */
+ int8_t noiseFloorThreshCh[AR9300_MAX_CHAINS];
+ u8 ob[AR9300_MAX_CHAINS];
+ u8 db_stage2[AR9300_MAX_CHAINS];
+ u8 db_stage3[AR9300_MAX_CHAINS];
+ u8 db_stage4[AR9300_MAX_CHAINS];
+ u8 xpaBiasLvl;
+ u8 txFrameToDataStart;
+ u8 txFrameToPaOn;
+ u8 txClip;
+ int8_t antennaGain;
+ u8 switchSettling;
+ int8_t adcDesiredSize;
+ u8 txEndToXpaOff;
+ u8 txEndToRxOn;
+ u8 txFrameToXpaOn;
+ u8 thresh62;
+ u8 futureModal[32];
+} __packed;
+
+struct ar9300_cal_data_per_freq_op_loop {
+ int8_t refPower;
+ /* pdadc voltage at power measurement */
+ u8 voltMeas;
+ /* pcdac used for power measurement */
+ u8 tempMeas;
+ /* range is -60 to -127 create a mapping equation 1db resolution */
+ int8_t rxNoisefloorCal;
+ /*range is same as noisefloor */
+ int8_t rxNoisefloorPower;
+ /* temp measured when noisefloor cal was performed */
+ u8 rxTempMeas;
+} __packed;
+
+struct cal_tgt_pow_legacy {
+ u8 tPow2x[4];
+} __packed;
+
+struct cal_tgt_pow_ht {
+ u8 tPow2x[14];
+} __packed;
+
+struct cal_ctl_edge_pwr {
+ u8 tPower:6,
+ flag:2;
+} __packed;
+
+struct cal_ctl_data_2g {
+ struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_2G];
+} __packed;
+
+struct cal_ctl_data_5g {
+ struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_5G];
+} __packed;
+
+struct ar9300_eeprom {
+ u8 eepromVersion;
+ u8 templateVersion;
+ u8 macAddr[6];
+ u8 custData[AR9300_CUSTOMER_DATA_SIZE];
+
+ struct ar9300_base_eep_hdr baseEepHeader;
+
+ struct ar9300_modal_eep_header modalHeader2G;
+ u8 calFreqPier2G[AR9300_NUM_2G_CAL_PIERS];
+ struct ar9300_cal_data_per_freq_op_loop
+ calPierData2G[AR9300_MAX_CHAINS][AR9300_NUM_2G_CAL_PIERS];
+ u8 calTarget_freqbin_Cck[AR9300_NUM_2G_CCK_TARGET_POWERS];
+ u8 calTarget_freqbin_2G[AR9300_NUM_2G_20_TARGET_POWERS];
+ u8 calTarget_freqbin_2GHT20[AR9300_NUM_2G_20_TARGET_POWERS];
+ u8 calTarget_freqbin_2GHT40[AR9300_NUM_2G_40_TARGET_POWERS];
+ struct cal_tgt_pow_legacy
+ calTargetPowerCck[AR9300_NUM_2G_CCK_TARGET_POWERS];
+ struct cal_tgt_pow_legacy
+ calTargetPower2G[AR9300_NUM_2G_20_TARGET_POWERS];
+ struct cal_tgt_pow_ht
+ calTargetPower2GHT20[AR9300_NUM_2G_20_TARGET_POWERS];
+ struct cal_tgt_pow_ht
+ calTargetPower2GHT40[AR9300_NUM_2G_40_TARGET_POWERS];
+ u8 ctlIndex_2G[AR9300_NUM_CTLS_2G];
+ u8 ctl_freqbin_2G[AR9300_NUM_CTLS_2G][AR9300_NUM_BAND_EDGES_2G];
+ struct cal_ctl_data_2g ctlPowerData_2G[AR9300_NUM_CTLS_2G];
+ struct ar9300_modal_eep_header modalHeader5G;
+ u8 calFreqPier5G[AR9300_NUM_5G_CAL_PIERS];
+ struct ar9300_cal_data_per_freq_op_loop
+ calPierData5G[AR9300_MAX_CHAINS][AR9300_NUM_5G_CAL_PIERS];
+ u8 calTarget_freqbin_5G[AR9300_NUM_5G_20_TARGET_POWERS];
+ u8 calTarget_freqbin_5GHT20[AR9300_NUM_5G_20_TARGET_POWERS];
+ u8 calTarget_freqbin_5GHT40[AR9300_NUM_5G_40_TARGET_POWERS];
+ struct cal_tgt_pow_legacy
+ calTargetPower5G[AR9300_NUM_5G_20_TARGET_POWERS];
+ struct cal_tgt_pow_ht
+ calTargetPower5GHT20[AR9300_NUM_5G_20_TARGET_POWERS];
+ struct cal_tgt_pow_ht
+ calTargetPower5GHT40[AR9300_NUM_5G_40_TARGET_POWERS];
+ u8 ctlIndex_5G[AR9300_NUM_CTLS_5G];
+ u8 ctl_freqbin_5G[AR9300_NUM_CTLS_5G][AR9300_NUM_BAND_EDGES_5G];
+ struct cal_ctl_data_5g ctlPowerData_5G[AR9300_NUM_CTLS_5G];
+} __packed;
+
+s32 ar9003_hw_get_tx_gain_idx(struct ath_hw *ah);
+s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
new file mode 100644
index 00000000000..b15309caf1d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "ar9003_mac.h"
+#include "ar9003_initvals.h"
+
+/* General hardware code for the AR9003 hadware family */
+
+static bool ar9003_hw_macversion_supported(u32 macversion)
+{
+ switch (macversion) {
+ case AR_SREV_VERSION_9300:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+/* AR9003 2.0 - new INI format (pre, core, post arrays per subsystem) */
+/*
+ * XXX: move TX/RX gain INI to its own init_mode_gain_regs after
+ * ensuring it does not affect hardware bring up
+ */
+static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
+{
+ /* mac */
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ ar9300_2p0_mac_core,
+ ARRAY_SIZE(ar9300_2p0_mac_core), 2);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar9300_2p0_mac_postamble,
+ ARRAY_SIZE(ar9300_2p0_mac_postamble), 5);
+
+ /* bb */
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar9300_2p0_baseband_core,
+ ARRAY_SIZE(ar9300_2p0_baseband_core), 2);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar9300_2p0_baseband_postamble,
+ ARRAY_SIZE(ar9300_2p0_baseband_postamble), 5);
+
+ /* radio */
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar9300_2p0_radio_core,
+ ARRAY_SIZE(ar9300_2p0_radio_core), 2);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ ar9300_2p0_radio_postamble,
+ ARRAY_SIZE(ar9300_2p0_radio_postamble), 5);
+
+ /* soc */
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar9300_2p0_soc_preamble,
+ ARRAY_SIZE(ar9300_2p0_soc_preamble), 2);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ ar9300_2p0_soc_postamble,
+ ARRAY_SIZE(ar9300_2p0_soc_postamble), 5);
+
+ /* rx/tx gain */
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9300Common_rx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Common_rx_gain_table_2p0), 2);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_lowest_ob_db_tx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p0),
+ 5);
+
+ /* Load PCIE SERDES settings from INI */
+
+ /* Awake Setting */
+
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9300PciePhy_pll_on_clkreq_disable_L1_2p0,
+ ARRAY_SIZE(ar9300PciePhy_pll_on_clkreq_disable_L1_2p0),
+ 2);
+
+ /* Sleep Setting */
+
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ ar9300PciePhy_clkreq_enable_L1_2p0,
+ ARRAY_SIZE(ar9300PciePhy_clkreq_enable_L1_2p0),
+ 2);
+
+ /* Fast clock modal settings */
+ INIT_INI_ARRAY(&ah->iniModesAdditional,
+ ar9300Modes_fast_clock_2p0,
+ ARRAY_SIZE(ar9300Modes_fast_clock_2p0),
+ 3);
+}
+
+static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
+{
+ switch (ar9003_hw_get_tx_gain_idx(ah)) {
+ case 0:
+ default:
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_lowest_ob_db_tx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p0),
+ 5);
+ break;
+ case 1:
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_high_ob_db_tx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p0),
+ 5);
+ break;
+ case 2:
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_low_ob_db_tx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p0),
+ 5);
+ break;
+ }
+}
+
+static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
+{
+ switch (ar9003_hw_get_rx_gain_idx(ah)) {
+ case 0:
+ default:
+ INIT_INI_ARRAY(&ah->iniModesRxGain, ar9300Common_rx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Common_rx_gain_table_2p0),
+ 2);
+ break;
+ case 1:
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9300Common_wo_xlna_rx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p0),
+ 2);
+ break;
+ }
+}
+
+/* set gain table pointers according to values read from the eeprom */
+static void ar9003_hw_init_mode_gain_regs(struct ath_hw *ah)
+{
+ ar9003_tx_gain_table_apply(ah);
+ ar9003_rx_gain_table_apply(ah);
+}
+
+/*
+ * Helper for ASPM support.
+ *
+ * Disable PLL when in L0s as well as receiver clock when in L1.
+ * This power saving option must be enabled through the SerDes.
+ *
+ * Programming the SerDes must go through the same 288 bit serial shift
+ * register as the other analog registers. Hence the 9 writes.
+ */
+static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
+ int restore,
+ int power_off)
+{
+ if (ah->is_pciexpress != true)
+ return;
+
+ /* Do not touch SerDes registers */
+ if (ah->config.pcie_powersave_enable == 2)
+ return;
+
+ /* Nothing to do on restore for 11N */
+ if (!restore) {
+ /* set bit 19 to allow forcing of pcie core into L1 state */
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
+
+ /* Several PCIe massages to ensure proper behaviour */
+ if (ah->config.pcie_waen)
+ REG_WRITE(ah, AR_WA, ah->config.pcie_waen);
+ }
+}
+
+/* Sets up the AR9003 hardware familiy callbacks */
+void ar9003_hw_attach_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ priv_ops->init_mode_regs = ar9003_hw_init_mode_regs;
+ priv_ops->init_mode_gain_regs = ar9003_hw_init_mode_gain_regs;
+ priv_ops->macversion_supported = ar9003_hw_macversion_supported;
+
+ ops->config_pci_powersave = ar9003_hw_configpcipowersave;
+
+ ar9003_hw_attach_phy_ops(ah);
+ ar9003_hw_attach_calib_ops(ah);
+ ar9003_hw_attach_mac_ops(ah);
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_initvals.h
new file mode 100644
index 00000000000..db019dd220b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_initvals.h
@@ -0,0 +1,1784 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9003_H
+#define INITVALS_9003_H
+
+/* AR9003 2.0 */
+
+static const u32 ar9300_2p0_radio_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0001609c, 0x0dd08f29, 0x0dd08f29, 0x0b283f31, 0x0b283f31},
+ {0x000160ac, 0xa4653c00, 0xa4653c00, 0x24652800, 0x24652800},
+ {0x000160b0, 0x03284f3e, 0x03284f3e, 0x05d08f20, 0x05d08f20},
+ {0x0001610c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+ {0x0001650c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+ {0x0001690c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016940, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+};
+
+static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+ {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
+ {0x0000a598, 0x21820220, 0x21820220, 0x16800402, 0x16800402},
+ {0x0000a59c, 0x27820223, 0x27820223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
+ {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
+ {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
+ {0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x47801a83, 0x47801a83},
+ {0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x4a801c84, 0x4a801c84},
+ {0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x4e801ce3, 0x4e801ce3},
+ {0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x52801ce5, 0x52801ce5},
+ {0x0000a5dc, 0x7086308c, 0x7086308c, 0x56801ce9, 0x56801ce9},
+ {0x0000a5e0, 0x738a308a, 0x738a308a, 0x5a801ceb, 0x5a801ceb},
+ {0x0000a5e4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5e8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5ec, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f0, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5fc, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9300Modes_fast_clock_2p0[][3] = {
+ /* Addr 5G_HT20 5G_HT40 */
+ {0x00001030, 0x00000268, 0x000004d0},
+ {0x00001070, 0x0000018c, 0x00000318},
+ {0x000010b0, 0x00000fd0, 0x00001fa0},
+ {0x00008014, 0x044c044c, 0x08980898},
+ {0x0000801c, 0x148ec02b, 0x148ec057},
+ {0x00008318, 0x000044c0, 0x00008980},
+ {0x00009e00, 0x03721821, 0x03721821},
+ {0x0000a230, 0x0000000b, 0x00000016},
+ {0x0000a254, 0x00000898, 0x00001130},
+};
+
+static const u32 ar9300_2p0_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73f00000},
+ {0x0001600c, 0x00000000},
+ {0x00016040, 0x7f80fff8},
+ {0x0001604c, 0x76d005b5},
+ {0x00016050, 0x556cf031},
+ {0x00016054, 0x13449440},
+ {0x00016058, 0x0c51c92c},
+ {0x0001605c, 0x3db7fffc},
+ {0x00016060, 0xfffffffc},
+ {0x00016064, 0x000f0278},
+ {0x0001606c, 0x6db60000},
+ {0x00016080, 0x00000000},
+ {0x00016084, 0x0e48048c},
+ {0x00016088, 0x54214514},
+ {0x0001608c, 0x119f481e},
+ {0x00016090, 0x24926490},
+ {0x00016098, 0xd2888888},
+ {0x000160a0, 0x0a108ffe},
+ {0x000160a4, 0x812fc370},
+ {0x000160a8, 0x423c8000},
+ {0x000160b4, 0x92480080},
+ {0x000160c0, 0x00adb6d0},
+ {0x000160c4, 0x6db6db60},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x01e6c000},
+ {0x00016100, 0x3fffbe01},
+ {0x00016104, 0xfff80000},
+ {0x00016108, 0x00080010},
+ {0x00016144, 0x02084080},
+ {0x00016148, 0x00000000},
+ {0x00016280, 0x058a0001},
+ {0x00016284, 0x3d840208},
+ {0x00016288, 0x05a20408},
+ {0x0001628c, 0x00038c07},
+ {0x00016290, 0x40000004},
+ {0x00016294, 0x458aa14f},
+ {0x00016380, 0x00000000},
+ {0x00016384, 0x00000000},
+ {0x00016388, 0x00800700},
+ {0x0001638c, 0x00800700},
+ {0x00016390, 0x00800700},
+ {0x00016394, 0x00000000},
+ {0x00016398, 0x00000000},
+ {0x0001639c, 0x00000000},
+ {0x000163a0, 0x00000001},
+ {0x000163a4, 0x00000001},
+ {0x000163a8, 0x00000000},
+ {0x000163ac, 0x00000000},
+ {0x000163b0, 0x00000000},
+ {0x000163b4, 0x00000000},
+ {0x000163b8, 0x00000000},
+ {0x000163bc, 0x00000000},
+ {0x000163c0, 0x000000a0},
+ {0x000163c4, 0x000c0000},
+ {0x000163c8, 0x14021402},
+ {0x000163cc, 0x00001402},
+ {0x000163d0, 0x00000000},
+ {0x000163d4, 0x00000000},
+ {0x00016400, 0x36db6db6},
+ {0x00016404, 0x6db6db40},
+ {0x00016408, 0x73f00000},
+ {0x0001640c, 0x00000000},
+ {0x00016440, 0x7f80fff8},
+ {0x0001644c, 0x76d005b5},
+ {0x00016450, 0x556cf031},
+ {0x00016454, 0x13449440},
+ {0x00016458, 0x0c51c92c},
+ {0x0001645c, 0x3db7fffc},
+ {0x00016460, 0xfffffffc},
+ {0x00016464, 0x000f0278},
+ {0x0001646c, 0x6db60000},
+ {0x00016500, 0x3fffbe01},
+ {0x00016504, 0xfff80000},
+ {0x00016508, 0x00080010},
+ {0x00016544, 0x02084080},
+ {0x00016548, 0x00000000},
+ {0x00016780, 0x00000000},
+ {0x00016784, 0x00000000},
+ {0x00016788, 0x00800700},
+ {0x0001678c, 0x00800700},
+ {0x00016790, 0x00800700},
+ {0x00016794, 0x00000000},
+ {0x00016798, 0x00000000},
+ {0x0001679c, 0x00000000},
+ {0x000167a0, 0x00000001},
+ {0x000167a4, 0x00000001},
+ {0x000167a8, 0x00000000},
+ {0x000167ac, 0x00000000},
+ {0x000167b0, 0x00000000},
+ {0x000167b4, 0x00000000},
+ {0x000167b8, 0x00000000},
+ {0x000167bc, 0x00000000},
+ {0x000167c0, 0x000000a0},
+ {0x000167c4, 0x000c0000},
+ {0x000167c8, 0x14021402},
+ {0x000167cc, 0x00001402},
+ {0x000167d0, 0x00000000},
+ {0x000167d4, 0x00000000},
+ {0x00016800, 0x36db6db6},
+ {0x00016804, 0x6db6db40},
+ {0x00016808, 0x73f00000},
+ {0x0001680c, 0x00000000},
+ {0x00016840, 0x7f80fff8},
+ {0x0001684c, 0x76d005b5},
+ {0x00016850, 0x556cf031},
+ {0x00016854, 0x13449440},
+ {0x00016858, 0x0c51c92c},
+ {0x0001685c, 0x3db7fffc},
+ {0x00016860, 0xfffffffc},
+ {0x00016864, 0x000f0278},
+ {0x0001686c, 0x6db60000},
+ {0x00016900, 0x3fffbe01},
+ {0x00016904, 0xfff80000},
+ {0x00016908, 0x00080010},
+ {0x00016944, 0x02084080},
+ {0x00016948, 0x00000000},
+ {0x00016b80, 0x00000000},
+ {0x00016b84, 0x00000000},
+ {0x00016b88, 0x00800700},
+ {0x00016b8c, 0x00800700},
+ {0x00016b90, 0x00800700},
+ {0x00016b94, 0x00000000},
+ {0x00016b98, 0x00000000},
+ {0x00016b9c, 0x00000000},
+ {0x00016ba0, 0x00000001},
+ {0x00016ba4, 0x00000001},
+ {0x00016ba8, 0x00000000},
+ {0x00016bac, 0x00000000},
+ {0x00016bb0, 0x00000000},
+ {0x00016bb4, 0x00000000},
+ {0x00016bb8, 0x00000000},
+ {0x00016bbc, 0x00000000},
+ {0x00016bc0, 0x000000a0},
+ {0x00016bc4, 0x000c0000},
+ {0x00016bc8, 0x14021402},
+ {0x00016bcc, 0x00001402},
+ {0x00016bd0, 0x00000000},
+ {0x00016bd4, 0x00000000},
+};
+
+static const u32 ar9300Common_rx_gain_table_merlin_2p0[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x02000101},
+ {0x0000a004, 0x02000102},
+ {0x0000a008, 0x02000103},
+ {0x0000a00c, 0x02000104},
+ {0x0000a010, 0x02000200},
+ {0x0000a014, 0x02000201},
+ {0x0000a018, 0x02000202},
+ {0x0000a01c, 0x02000203},
+ {0x0000a020, 0x02000204},
+ {0x0000a024, 0x02000205},
+ {0x0000a028, 0x02000208},
+ {0x0000a02c, 0x02000302},
+ {0x0000a030, 0x02000303},
+ {0x0000a034, 0x02000304},
+ {0x0000a038, 0x02000400},
+ {0x0000a03c, 0x02010300},
+ {0x0000a040, 0x02010301},
+ {0x0000a044, 0x02010302},
+ {0x0000a048, 0x02000500},
+ {0x0000a04c, 0x02010400},
+ {0x0000a050, 0x02020300},
+ {0x0000a054, 0x02020301},
+ {0x0000a058, 0x02020302},
+ {0x0000a05c, 0x02020303},
+ {0x0000a060, 0x02020400},
+ {0x0000a064, 0x02030300},
+ {0x0000a068, 0x02030301},
+ {0x0000a06c, 0x02030302},
+ {0x0000a070, 0x02030303},
+ {0x0000a074, 0x02030400},
+ {0x0000a078, 0x02040300},
+ {0x0000a07c, 0x02040301},
+ {0x0000a080, 0x02040302},
+ {0x0000a084, 0x02040303},
+ {0x0000a088, 0x02030500},
+ {0x0000a08c, 0x02040400},
+ {0x0000a090, 0x02050203},
+ {0x0000a094, 0x02050204},
+ {0x0000a098, 0x02050205},
+ {0x0000a09c, 0x02040500},
+ {0x0000a0a0, 0x02050301},
+ {0x0000a0a4, 0x02050302},
+ {0x0000a0a8, 0x02050303},
+ {0x0000a0ac, 0x02050400},
+ {0x0000a0b0, 0x02050401},
+ {0x0000a0b4, 0x02050402},
+ {0x0000a0b8, 0x02050403},
+ {0x0000a0bc, 0x02050500},
+ {0x0000a0c0, 0x02050501},
+ {0x0000a0c4, 0x02050502},
+ {0x0000a0c8, 0x02050503},
+ {0x0000a0cc, 0x02050504},
+ {0x0000a0d0, 0x02050600},
+ {0x0000a0d4, 0x02050601},
+ {0x0000a0d8, 0x02050602},
+ {0x0000a0dc, 0x02050603},
+ {0x0000a0e0, 0x02050604},
+ {0x0000a0e4, 0x02050700},
+ {0x0000a0e8, 0x02050701},
+ {0x0000a0ec, 0x02050702},
+ {0x0000a0f0, 0x02050703},
+ {0x0000a0f4, 0x02050704},
+ {0x0000a0f8, 0x02050705},
+ {0x0000a0fc, 0x02050708},
+ {0x0000a100, 0x02050709},
+ {0x0000a104, 0x0205070a},
+ {0x0000a108, 0x0205070b},
+ {0x0000a10c, 0x0205070c},
+ {0x0000a110, 0x0205070d},
+ {0x0000a114, 0x02050710},
+ {0x0000a118, 0x02050711},
+ {0x0000a11c, 0x02050712},
+ {0x0000a120, 0x02050713},
+ {0x0000a124, 0x02050714},
+ {0x0000a128, 0x02050715},
+ {0x0000a12c, 0x02050730},
+ {0x0000a130, 0x02050731},
+ {0x0000a134, 0x02050732},
+ {0x0000a138, 0x02050733},
+ {0x0000a13c, 0x02050734},
+ {0x0000a140, 0x02050735},
+ {0x0000a144, 0x02050750},
+ {0x0000a148, 0x02050751},
+ {0x0000a14c, 0x02050752},
+ {0x0000a150, 0x02050753},
+ {0x0000a154, 0x02050754},
+ {0x0000a158, 0x02050755},
+ {0x0000a15c, 0x02050770},
+ {0x0000a160, 0x02050771},
+ {0x0000a164, 0x02050772},
+ {0x0000a168, 0x02050773},
+ {0x0000a16c, 0x02050774},
+ {0x0000a170, 0x02050775},
+ {0x0000a174, 0x00000776},
+ {0x0000a178, 0x00000776},
+ {0x0000a17c, 0x00000776},
+ {0x0000a180, 0x00000776},
+ {0x0000a184, 0x00000776},
+ {0x0000a188, 0x00000776},
+ {0x0000a18c, 0x00000776},
+ {0x0000a190, 0x00000776},
+ {0x0000a194, 0x00000776},
+ {0x0000a198, 0x00000776},
+ {0x0000a19c, 0x00000776},
+ {0x0000a1a0, 0x00000776},
+ {0x0000a1a4, 0x00000776},
+ {0x0000a1a8, 0x00000776},
+ {0x0000a1ac, 0x00000776},
+ {0x0000a1b0, 0x00000776},
+ {0x0000a1b4, 0x00000776},
+ {0x0000a1b8, 0x00000776},
+ {0x0000a1bc, 0x00000776},
+ {0x0000a1c0, 0x00000776},
+ {0x0000a1c4, 0x00000776},
+ {0x0000a1c8, 0x00000776},
+ {0x0000a1cc, 0x00000776},
+ {0x0000a1d0, 0x00000776},
+ {0x0000a1d4, 0x00000776},
+ {0x0000a1d8, 0x00000776},
+ {0x0000a1dc, 0x00000776},
+ {0x0000a1e0, 0x00000776},
+ {0x0000a1e4, 0x00000776},
+ {0x0000a1e8, 0x00000776},
+ {0x0000a1ec, 0x00000776},
+ {0x0000a1f0, 0x00000776},
+ {0x0000a1f4, 0x00000776},
+ {0x0000a1f8, 0x00000776},
+ {0x0000a1fc, 0x00000776},
+ {0x0000b000, 0x02000101},
+ {0x0000b004, 0x02000102},
+ {0x0000b008, 0x02000103},
+ {0x0000b00c, 0x02000104},
+ {0x0000b010, 0x02000200},
+ {0x0000b014, 0x02000201},
+ {0x0000b018, 0x02000202},
+ {0x0000b01c, 0x02000203},
+ {0x0000b020, 0x02000204},
+ {0x0000b024, 0x02000205},
+ {0x0000b028, 0x02000208},
+ {0x0000b02c, 0x02000302},
+ {0x0000b030, 0x02000303},
+ {0x0000b034, 0x02000304},
+ {0x0000b038, 0x02000400},
+ {0x0000b03c, 0x02010300},
+ {0x0000b040, 0x02010301},
+ {0x0000b044, 0x02010302},
+ {0x0000b048, 0x02000500},
+ {0x0000b04c, 0x02010400},
+ {0x0000b050, 0x02020300},
+ {0x0000b054, 0x02020301},
+ {0x0000b058, 0x02020302},
+ {0x0000b05c, 0x02020303},
+ {0x0000b060, 0x02020400},
+ {0x0000b064, 0x02030300},
+ {0x0000b068, 0x02030301},
+ {0x0000b06c, 0x02030302},
+ {0x0000b070, 0x02030303},
+ {0x0000b074, 0x02030400},
+ {0x0000b078, 0x02040300},
+ {0x0000b07c, 0x02040301},
+ {0x0000b080, 0x02040302},
+ {0x0000b084, 0x02040303},
+ {0x0000b088, 0x02030500},
+ {0x0000b08c, 0x02040400},
+ {0x0000b090, 0x02050203},
+ {0x0000b094, 0x02050204},
+ {0x0000b098, 0x02050205},
+ {0x0000b09c, 0x02040500},
+ {0x0000b0a0, 0x02050301},
+ {0x0000b0a4, 0x02050302},
+ {0x0000b0a8, 0x02050303},
+ {0x0000b0ac, 0x02050400},
+ {0x0000b0b0, 0x02050401},
+ {0x0000b0b4, 0x02050402},
+ {0x0000b0b8, 0x02050403},
+ {0x0000b0bc, 0x02050500},
+ {0x0000b0c0, 0x02050501},
+ {0x0000b0c4, 0x02050502},
+ {0x0000b0c8, 0x02050503},
+ {0x0000b0cc, 0x02050504},
+ {0x0000b0d0, 0x02050600},
+ {0x0000b0d4, 0x02050601},
+ {0x0000b0d8, 0x02050602},
+ {0x0000b0dc, 0x02050603},
+ {0x0000b0e0, 0x02050604},
+ {0x0000b0e4, 0x02050700},
+ {0x0000b0e8, 0x02050701},
+ {0x0000b0ec, 0x02050702},
+ {0x0000b0f0, 0x02050703},
+ {0x0000b0f4, 0x02050704},
+ {0x0000b0f8, 0x02050705},
+ {0x0000b0fc, 0x02050708},
+ {0x0000b100, 0x02050709},
+ {0x0000b104, 0x0205070a},
+ {0x0000b108, 0x0205070b},
+ {0x0000b10c, 0x0205070c},
+ {0x0000b110, 0x0205070d},
+ {0x0000b114, 0x02050710},
+ {0x0000b118, 0x02050711},
+ {0x0000b11c, 0x02050712},
+ {0x0000b120, 0x02050713},
+ {0x0000b124, 0x02050714},
+ {0x0000b128, 0x02050715},
+ {0x0000b12c, 0x02050730},
+ {0x0000b130, 0x02050731},
+ {0x0000b134, 0x02050732},
+ {0x0000b138, 0x02050733},
+ {0x0000b13c, 0x02050734},
+ {0x0000b140, 0x02050735},
+ {0x0000b144, 0x02050750},
+ {0x0000b148, 0x02050751},
+ {0x0000b14c, 0x02050752},
+ {0x0000b150, 0x02050753},
+ {0x0000b154, 0x02050754},
+ {0x0000b158, 0x02050755},
+ {0x0000b15c, 0x02050770},
+ {0x0000b160, 0x02050771},
+ {0x0000b164, 0x02050772},
+ {0x0000b168, 0x02050773},
+ {0x0000b16c, 0x02050774},
+ {0x0000b170, 0x02050775},
+ {0x0000b174, 0x00000776},
+ {0x0000b178, 0x00000776},
+ {0x0000b17c, 0x00000776},
+ {0x0000b180, 0x00000776},
+ {0x0000b184, 0x00000776},
+ {0x0000b188, 0x00000776},
+ {0x0000b18c, 0x00000776},
+ {0x0000b190, 0x00000776},
+ {0x0000b194, 0x00000776},
+ {0x0000b198, 0x00000776},
+ {0x0000b19c, 0x00000776},
+ {0x0000b1a0, 0x00000776},
+ {0x0000b1a4, 0x00000776},
+ {0x0000b1a8, 0x00000776},
+ {0x0000b1ac, 0x00000776},
+ {0x0000b1b0, 0x00000776},
+ {0x0000b1b4, 0x00000776},
+ {0x0000b1b8, 0x00000776},
+ {0x0000b1bc, 0x00000776},
+ {0x0000b1c0, 0x00000776},
+ {0x0000b1c4, 0x00000776},
+ {0x0000b1c8, 0x00000776},
+ {0x0000b1cc, 0x00000776},
+ {0x0000b1d0, 0x00000776},
+ {0x0000b1d4, 0x00000776},
+ {0x0000b1d8, 0x00000776},
+ {0x0000b1dc, 0x00000776},
+ {0x0000b1e0, 0x00000776},
+ {0x0000b1e4, 0x00000776},
+ {0x0000b1e8, 0x00000776},
+ {0x0000b1ec, 0x00000776},
+ {0x0000b1f0, 0x00000776},
+ {0x0000b1f4, 0x00000776},
+ {0x0000b1f8, 0x00000776},
+ {0x0000b1fc, 0x00000776},
+};
+
+static const u32 ar9300_2p0_mac_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
+
+static const u32 ar9300_2p0_soc_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00007010, 0x00000023, 0x00000023, 0x00000023, 0x00000023},
+};
+
+static const u32 ar9200_merlin_2p0_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00007800, 0x00040000},
+ {0x00007804, 0xdb005012},
+ {0x00007808, 0x04924914},
+ {0x0000780c, 0x21084210},
+ {0x00007810, 0x6d801300},
+ {0x00007814, 0x0019beff},
+ {0x00007818, 0x07e41000},
+ {0x0000781c, 0x00392000},
+ {0x00007820, 0x92592480},
+ {0x00007824, 0x00040000},
+ {0x00007828, 0xdb005012},
+ {0x0000782c, 0x04924914},
+ {0x00007830, 0x21084210},
+ {0x00007834, 0x6d801300},
+ {0x00007838, 0x0019beff},
+ {0x0000783c, 0x07e40000},
+ {0x00007840, 0x00392000},
+ {0x00007844, 0x92592480},
+ {0x00007848, 0x00100000},
+ {0x0000784c, 0x773f0567},
+ {0x00007850, 0x54214514},
+ {0x00007854, 0x12035828},
+ {0x00007858, 0x92592692},
+ {0x0000785c, 0x00000000},
+ {0x00007860, 0x56400000},
+ {0x00007864, 0x0a8e370e},
+ {0x00007868, 0xc0102850},
+ {0x0000786c, 0x812d4000},
+ {0x00007870, 0x807ec400},
+ {0x00007874, 0x001b6db0},
+ {0x00007878, 0x00376b63},
+ {0x0000787c, 0x06db6db6},
+ {0x00007880, 0x006d8000},
+ {0x00007884, 0xffeffffe},
+ {0x00007888, 0xffeffffe},
+ {0x0000788c, 0x00010000},
+ {0x00007890, 0x02060aeb},
+ {0x00007894, 0x5a108000},
+};
+
+static const u32 ar9300_2p0_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
+ {0x00009c00, 0x00000044, 0x000000c4, 0x000000c4, 0x00000044},
+ {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
+ {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x000037c0, 0x000037c4, 0x000037c4, 0x000037c0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+ {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
+ {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000b284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+ {0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000be04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000be20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000c284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+};
+
+static const u32 ar9300_2p0_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a9f6b},
+ {0x0000980c, 0x04900000},
+ {0x00009814, 0x9280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x5f3ca3de},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14750600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x52440bbe},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0xff55ff55},
+ {0x00009c08, 0x0320ff55},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x9883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c0040b},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038230c},
+ {0x00009e24, 0x990bb515},
+ {0x00009e28, 0x0c6f0000},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e3c, 0xcf946222},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009e54, 0x00000000},
+ {0x00009fc0, 0x803e4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x00009fd0, 0x01193b93},
+ {0x0000a20c, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a22c, 0x01036a1e},
+ {0x0000a234, 0x10000fff},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a2a0, 0x00000001},
+ {0x0000a2c0, 0x00000001},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2cc, 0x18c43433},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2dc, 0x00000000},
+ {0x0000a2e0, 0x00000000},
+ {0x0000a2e4, 0x00000000},
+ {0x0000a2e8, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3a4, 0x00000000},
+ {0x0000a3a8, 0xaaaaaaaa},
+ {0x0000a3ac, 0x3c466478},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000246},
+ {0x0000a3f8, 0x0cdbd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739ce},
+ {0x0000a418, 0x2d001dce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00000000},
+ {0x0000a440, 0x00000000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x04000080},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a458, 0x00000000},
+ {0x0000a600, 0x00000000},
+ {0x0000a604, 0x00000000},
+ {0x0000a608, 0x00000000},
+ {0x0000a60c, 0x00000000},
+ {0x0000a610, 0x00000000},
+ {0x0000a614, 0x00000000},
+ {0x0000a618, 0x00000000},
+ {0x0000a61c, 0x00000000},
+ {0x0000a620, 0x00000000},
+ {0x0000a624, 0x00000000},
+ {0x0000a628, 0x00000000},
+ {0x0000a62c, 0x00000000},
+ {0x0000a630, 0x00000000},
+ {0x0000a634, 0x00000000},
+ {0x0000a638, 0x00000000},
+ {0x0000a63c, 0x00000000},
+ {0x0000a640, 0x00000000},
+ {0x0000a644, 0x3fad9d74},
+ {0x0000a648, 0x0048060a},
+ {0x0000a64c, 0x00000637},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x09080504},
+ {0x0000a678, 0x0d0c0b0a},
+ {0x0000a67c, 0x13121110},
+ {0x0000a680, 0x31301514},
+ {0x0000a684, 0x35343332},
+ {0x0000a688, 0x00000036},
+ {0x0000a690, 0x00000838},
+ {0x0000a7c0, 0x00000000},
+ {0x0000a7c4, 0xfffffffc},
+ {0x0000a7c8, 0x00000000},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000001},
+ {0x0000a8d0, 0x004b6a8e},
+ {0x0000a8d4, 0x00000820},
+ {0x0000a8dc, 0x00000000},
+ {0x0000a8f0, 0x00000000},
+ {0x0000a8f4, 0x00000000},
+ {0x0000b2d0, 0x00000080},
+ {0x0000b2d4, 0x00000000},
+ {0x0000b2dc, 0x00000000},
+ {0x0000b2e0, 0x00000000},
+ {0x0000b2e4, 0x00000000},
+ {0x0000b2e8, 0x00000000},
+ {0x0000b2ec, 0x00000000},
+ {0x0000b2f0, 0x00000000},
+ {0x0000b2f4, 0x00000000},
+ {0x0000b2f8, 0x00000000},
+ {0x0000b408, 0x0e79e5c0},
+ {0x0000b40c, 0x00820820},
+ {0x0000b420, 0x00000000},
+ {0x0000b8d0, 0x004b6a8e},
+ {0x0000b8d4, 0x00000820},
+ {0x0000b8dc, 0x00000000},
+ {0x0000b8f0, 0x00000000},
+ {0x0000b8f4, 0x00000000},
+ {0x0000c2d0, 0x00000080},
+ {0x0000c2d4, 0x00000000},
+ {0x0000c2dc, 0x00000000},
+ {0x0000c2e0, 0x00000000},
+ {0x0000c2e4, 0x00000000},
+ {0x0000c2e8, 0x00000000},
+ {0x0000c2ec, 0x00000000},
+ {0x0000c2f0, 0x00000000},
+ {0x0000c2f4, 0x00000000},
+ {0x0000c2f8, 0x00000000},
+ {0x0000c408, 0x0e79e5c0},
+ {0x0000c40c, 0x00820820},
+ {0x0000c420, 0x00000000},
+};
+
+static const u32 ar9300Modes_high_power_tx_gain_table_2p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
+ {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
+ {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06802223, 0x06802223, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a822220, 0x0a822220, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x0f822223, 0x0f822223, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x14822620, 0x14822620, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x18822622, 0x18822622, 0x11800400, 0x11800400},
+ {0x0000a598, 0x1b822822, 0x1b822822, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x20822842, 0x20822842, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x22822c41, 0x22822c41, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x28823042, 0x28823042, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x2c823044, 0x2c823044, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x2f823644, 0x2f823644, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x34825643, 0x34825643, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x38825a44, 0x38825a44, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x3b825e45, 0x3b825e45, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x41825e4a, 0x41825e4a, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x48825e6c, 0x48825e6c, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x4e825e8e, 0x4e825e8e, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x53825eb2, 0x53825eb2, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x59825eb5, 0x59825eb5, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x5f825ef6, 0x5f825ef6, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x62825f56, 0x62825f56, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x66827f56, 0x66827f56, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x6a829f56, 0x6a829f56, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x70849f56, 0x70849f56, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
+ {0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
+ {0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
+ {0x00016444, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
+ {0x00016448, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
+ {0x00016468, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
+ {0x00016844, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
+ {0x00016848, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
+ {0x00016868, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
+};
+
+static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
+ {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
+ {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06802223, 0x06802223, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a822220, 0x0a822220, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x0f822223, 0x0f822223, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x14822620, 0x14822620, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x18822622, 0x18822622, 0x11800400, 0x11800400},
+ {0x0000a598, 0x1b822822, 0x1b822822, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x20822842, 0x20822842, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x22822c41, 0x22822c41, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x28823042, 0x28823042, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x2c823044, 0x2c823044, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x2f823644, 0x2f823644, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x34825643, 0x34825643, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x38825a44, 0x38825a44, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x3b825e45, 0x3b825e45, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x41825e4a, 0x41825e4a, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x48825e6c, 0x48825e6c, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x4e825e8e, 0x4e825e8e, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x53825eb2, 0x53825eb2, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x59825eb5, 0x59825eb5, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x5f825ef6, 0x5f825ef6, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x62825f56, 0x62825f56, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x66827f56, 0x66827f56, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x6a829f56, 0x6a829f56, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x70849f56, 0x70849f56, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
+ {0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
+ {0x00016448, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
+ {0x00016848, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9300Common_rx_gain_table_2p0[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x01910190},
+ {0x0000a030, 0x01930192},
+ {0x0000a034, 0x01950194},
+ {0x0000a038, 0x038a0196},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x22222229},
+ {0x0000a084, 0x1d1d1d1d},
+ {0x0000a088, 0x1d1d1d1d},
+ {0x0000a08c, 0x1d1d1d1d},
+ {0x0000a090, 0x171d1d1d},
+ {0x0000a094, 0x11111717},
+ {0x0000a098, 0x00030311},
+ {0x0000a09c, 0x00000000},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x32323232},
+ {0x0000b084, 0x2f2f3232},
+ {0x0000b088, 0x23282a2d},
+ {0x0000b08c, 0x1c1e2123},
+ {0x0000b090, 0x14171919},
+ {0x0000b094, 0x0e0e1214},
+ {0x0000b098, 0x03050707},
+ {0x0000b09c, 0x00030303},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+ {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
+ {0x0000a598, 0x21820220, 0x21820220, 0x16800402, 0x16800402},
+ {0x0000a59c, 0x27820223, 0x27820223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
+ {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
+ {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
+ {0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x47801a83, 0x47801a83},
+ {0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x4a801c84, 0x4a801c84},
+ {0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x4e801ce3, 0x4e801ce3},
+ {0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x52801ce5, 0x52801ce5},
+ {0x0000a5dc, 0x7086308c, 0x7086308c, 0x56801ce9, 0x56801ce9},
+ {0x0000a5e0, 0x738a308a, 0x738a308a, 0x5a801ceb, 0x5a801ceb},
+ {0x0000a5e4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5e8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5ec, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f0, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5fc, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x64000001, 0x64000001, 0x64000001, 0x64000001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x64000001, 0x64000001, 0x64000001, 0x64000001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x64000001, 0x64000001, 0x64000001, 0x64000001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9300_2p0_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x00020085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c20},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00a00000},
+ {0x000080d8, 0x00400000},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x18486200},
+ {0x00008174, 0x33332210},
+ {0x00008178, 0x00000000},
+ {0x0000817c, 0x00020000},
+ {0x000081c0, 0x00000000},
+ {0x000081c4, 0x33332210},
+ {0x000081c8, 0x00000000},
+ {0x000081cc, 0x00000000},
+ {0x000081d4, 0x00000000},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f424},
+ {0x00008248, 0x00000800},
+ {0x0000824c, 0x0001e848},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x98a00010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000140},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x00000007},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0x00ff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xaa48105b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a0, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x000301ff},
+};
+
+static const u32 ar9300Common_wo_xlna_rx_gain_table_2p0[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x03820190},
+ {0x0000a030, 0x03840383},
+ {0x0000a034, 0x03880385},
+ {0x0000a038, 0x038a0389},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x29292929},
+ {0x0000a084, 0x29292929},
+ {0x0000a088, 0x29292929},
+ {0x0000a08c, 0x29292929},
+ {0x0000a090, 0x22292929},
+ {0x0000a094, 0x1d1d2222},
+ {0x0000a098, 0x0c111117},
+ {0x0000a09c, 0x00030303},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x32323232},
+ {0x0000b084, 0x2f2f3232},
+ {0x0000b088, 0x23282a2d},
+ {0x0000b08c, 0x1c1e2123},
+ {0x0000b090, 0x14171919},
+ {0x0000b094, 0x0e0e1214},
+ {0x0000b098, 0x03050707},
+ {0x0000b09c, 0x00030303},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9300_2p0_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x000040a4, 0x00a0c1c9},
+ {0x00007008, 0x00000000},
+ {0x00007020, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+};
+
+static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p0[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x08212e5e},
+ {0x00004040, 0x0008003b},
+ {0x00004044, 0x00000000},
+};
+
+static const u32 ar9300PciePhy_clkreq_enable_L1_2p0[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x08253e5e},
+ {0x00004040, 0x0008003b},
+ {0x00004044, 0x00000000},
+};
+
+static const u32 ar9300PciePhy_clkreq_disable_L1_2p0[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x08213e5e},
+ {0x00004040, 0x0008003b},
+ {0x00004044, 0x00000000},
+};
+
+#endif /* INITVALS_9003_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
new file mode 100644
index 00000000000..37ba37481a4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -0,0 +1,614 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include "hw.h"
+#include "ar9003_mac.h"
+
+static void ar9003_hw_rx_enable(struct ath_hw *hw)
+{
+ REG_WRITE(hw, AR_CR, 0);
+}
+
+static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
+{
+ int checksum;
+
+ checksum = ads->info + ads->link
+ + ads->data0 + ads->ctl3
+ + ads->data1 + ads->ctl5
+ + ads->data2 + ads->ctl7
+ + ads->data3 + ads->ctl9;
+
+ return ((checksum & 0xffff) + (checksum >> 16)) & AR_TxPtrChkSum;
+}
+
+static void ar9003_hw_set_desc_link(void *ds, u32 ds_link)
+{
+ struct ar9003_txc *ads = ds;
+
+ ads->link = ds_link;
+ ads->ctl10 &= ~AR_TxPtrChkSum;
+ ads->ctl10 |= ar9003_calc_ptr_chksum(ads);
+}
+
+static void ar9003_hw_get_desc_link(void *ds, u32 **ds_link)
+{
+ struct ar9003_txc *ads = ds;
+
+ *ds_link = &ads->link;
+}
+
+static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+{
+ u32 isr = 0;
+ u32 mask2 = 0;
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ u32 sync_cause = 0;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
+ if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
+ == AR_RTC_STATUS_ON)
+ isr = REG_READ(ah, AR_ISR);
+ }
+
+ sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT;
+
+ *masked = 0;
+
+ if (!isr && !sync_cause)
+ return false;
+
+ if (isr) {
+ if (isr & AR_ISR_BCNMISC) {
+ u32 isr2;
+ isr2 = REG_READ(ah, AR_ISR_S2);
+
+ mask2 |= ((isr2 & AR_ISR_S2_TIM) >>
+ MAP_ISR_S2_TIM);
+ mask2 |= ((isr2 & AR_ISR_S2_DTIM) >>
+ MAP_ISR_S2_DTIM);
+ mask2 |= ((isr2 & AR_ISR_S2_DTIMSYNC) >>
+ MAP_ISR_S2_DTIMSYNC);
+ mask2 |= ((isr2 & AR_ISR_S2_CABEND) >>
+ MAP_ISR_S2_CABEND);
+ mask2 |= ((isr2 & AR_ISR_S2_GTT) <<
+ MAP_ISR_S2_GTT);
+ mask2 |= ((isr2 & AR_ISR_S2_CST) <<
+ MAP_ISR_S2_CST);
+ mask2 |= ((isr2 & AR_ISR_S2_TSFOOR) >>
+ MAP_ISR_S2_TSFOOR);
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ REG_WRITE(ah, AR_ISR_S2, isr2);
+ isr &= ~AR_ISR_BCNMISC;
+ }
+ }
+
+ if ((pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED))
+ isr = REG_READ(ah, AR_ISR_RAC);
+
+ if (isr == 0xffffffff) {
+ *masked = 0;
+ return false;
+ }
+
+ *masked = isr & ATH9K_INT_COMMON;
+
+ if (ah->config.rx_intr_mitigation)
+ if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
+ *masked |= ATH9K_INT_RXLP;
+
+ if (ah->config.tx_intr_mitigation)
+ if (isr & (AR_ISR_TXMINTR | AR_ISR_TXINTM))
+ *masked |= ATH9K_INT_TX;
+
+ if (isr & (AR_ISR_LP_RXOK | AR_ISR_RXERR))
+ *masked |= ATH9K_INT_RXLP;
+
+ if (isr & AR_ISR_HP_RXOK)
+ *masked |= ATH9K_INT_RXHP;
+
+ if (isr & (AR_ISR_TXOK | AR_ISR_TXERR | AR_ISR_TXEOL)) {
+ *masked |= ATH9K_INT_TX;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ u32 s0, s1;
+ s0 = REG_READ(ah, AR_ISR_S0);
+ REG_WRITE(ah, AR_ISR_S0, s0);
+ s1 = REG_READ(ah, AR_ISR_S1);
+ REG_WRITE(ah, AR_ISR_S1, s1);
+
+ isr &= ~(AR_ISR_TXOK | AR_ISR_TXERR |
+ AR_ISR_TXEOL);
+ }
+ }
+
+ if (isr & AR_ISR_GENTMR) {
+ u32 s5;
+
+ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
+ s5 = REG_READ(ah, AR_ISR_S5_S);
+ else
+ s5 = REG_READ(ah, AR_ISR_S5);
+
+ ah->intr_gen_timer_trigger =
+ MS(s5, AR_ISR_S5_GENTIMER_TRIG);
+
+ ah->intr_gen_timer_thresh =
+ MS(s5, AR_ISR_S5_GENTIMER_THRESH);
+
+ if (ah->intr_gen_timer_trigger)
+ *masked |= ATH9K_INT_GENTIMER;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ REG_WRITE(ah, AR_ISR_S5, s5);
+ isr &= ~AR_ISR_GENTMR;
+ }
+
+ }
+
+ *masked |= mask2;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ REG_WRITE(ah, AR_ISR, isr);
+
+ (void) REG_READ(ah, AR_ISR);
+ }
+ }
+
+ if (sync_cause) {
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
+ REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
+ REG_WRITE(ah, AR_RC, 0);
+ *masked |= ATH9K_INT_FATAL;
+ }
+
+ if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT)
+ ath_print(common, ATH_DBG_INTERRUPT,
+ "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
+
+ REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
+ (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
+
+ }
+ return true;
+}
+
+static void ar9003_hw_fill_txdesc(struct ath_hw *ah, void *ds, u32 seglen,
+ bool is_firstseg, bool is_lastseg,
+ const void *ds0, dma_addr_t buf_addr,
+ unsigned int qcu)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+ unsigned int descid = 0;
+
+ ads->info = (ATHEROS_VENDOR_ID << AR_DescId_S) |
+ (1 << AR_TxRxDesc_S) |
+ (1 << AR_CtrlStat_S) |
+ (qcu << AR_TxQcuNum_S) | 0x17;
+
+ ads->data0 = buf_addr;
+ ads->data1 = 0;
+ ads->data2 = 0;
+ ads->data3 = 0;
+
+ ads->ctl3 = (seglen << AR_BufLen_S);
+ ads->ctl3 &= AR_BufLen;
+
+ /* Fill in pointer checksum and descriptor id */
+ ads->ctl10 = ar9003_calc_ptr_chksum(ads);
+ ads->ctl10 |= (descid << AR_TxDescId_S);
+
+ if (is_firstseg) {
+ ads->ctl12 |= (is_lastseg ? 0 : AR_TxMore);
+ } else if (is_lastseg) {
+ ads->ctl11 = 0;
+ ads->ctl12 = 0;
+ ads->ctl13 = AR9003TXC_CONST(ds0)->ctl13;
+ ads->ctl14 = AR9003TXC_CONST(ds0)->ctl14;
+ } else {
+ /* XXX Intermediate descriptor in a multi-descriptor frame.*/
+ ads->ctl11 = 0;
+ ads->ctl12 = AR_TxMore;
+ ads->ctl13 = 0;
+ ads->ctl14 = 0;
+ }
+}
+
+static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
+ struct ath_tx_status *ts)
+{
+ struct ar9003_txs *ads;
+
+ ads = &ah->ts_ring[ah->ts_tail];
+
+ if ((ads->status8 & AR_TxDone) == 0)
+ return -EINPROGRESS;
+
+ ah->ts_tail = (ah->ts_tail + 1) % ah->ts_size;
+
+ if ((MS(ads->ds_info, AR_DescId) != ATHEROS_VENDOR_ID) ||
+ (MS(ads->ds_info, AR_TxRxDesc) != 1)) {
+ ath_print(ath9k_hw_common(ah), ATH_DBG_XMIT,
+ "Tx Descriptor error %x\n", ads->ds_info);
+ memset(ads, 0, sizeof(*ads));
+ return -EIO;
+ }
+
+ ts->qid = MS(ads->ds_info, AR_TxQcuNum);
+ ts->desc_id = MS(ads->status1, AR_TxDescId);
+ ts->ts_seqnum = MS(ads->status8, AR_SeqNum);
+ ts->ts_tstamp = ads->status4;
+ ts->ts_status = 0;
+ ts->ts_flags = 0;
+
+ if (ads->status3 & AR_ExcessiveRetries)
+ ts->ts_status |= ATH9K_TXERR_XRETRY;
+ if (ads->status3 & AR_Filtered)
+ ts->ts_status |= ATH9K_TXERR_FILT;
+ if (ads->status3 & AR_FIFOUnderrun) {
+ ts->ts_status |= ATH9K_TXERR_FIFO;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (ads->status8 & AR_TxOpExceeded)
+ ts->ts_status |= ATH9K_TXERR_XTXOP;
+ if (ads->status3 & AR_TxTimerExpired)
+ ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
+
+ if (ads->status3 & AR_DescCfgErr)
+ ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
+ if (ads->status3 & AR_TxDataUnderrun) {
+ ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (ads->status3 & AR_TxDelimUnderrun) {
+ ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (ads->status2 & AR_TxBaStatus) {
+ ts->ts_flags |= ATH9K_TX_BA;
+ ts->ba_low = ads->status5;
+ ts->ba_high = ads->status6;
+ }
+
+ ts->ts_rateindex = MS(ads->status8, AR_FinalTxIdx);
+
+ ts->ts_rssi = MS(ads->status7, AR_TxRSSICombined);
+ ts->ts_rssi_ctl0 = MS(ads->status2, AR_TxRSSIAnt00);
+ ts->ts_rssi_ctl1 = MS(ads->status2, AR_TxRSSIAnt01);
+ ts->ts_rssi_ctl2 = MS(ads->status2, AR_TxRSSIAnt02);
+ ts->ts_rssi_ext0 = MS(ads->status7, AR_TxRSSIAnt10);
+ ts->ts_rssi_ext1 = MS(ads->status7, AR_TxRSSIAnt11);
+ ts->ts_rssi_ext2 = MS(ads->status7, AR_TxRSSIAnt12);
+ ts->ts_shortretry = MS(ads->status3, AR_RTSFailCnt);
+ ts->ts_longretry = MS(ads->status3, AR_DataFailCnt);
+ ts->ts_virtcol = MS(ads->status3, AR_VirtRetryCnt);
+ ts->ts_antenna = 0;
+
+ ts->tid = MS(ads->status8, AR_TxTid);
+
+ memset(ads, 0, sizeof(*ads));
+
+ return 0;
+}
+
+static void ar9003_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
+ u32 pktlen, enum ath9k_pkt_type type, u32 txpower,
+ u32 keyIx, enum ath9k_key_type keyType, u32 flags)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+
+ if (txpower > ah->txpower_limit)
+ txpower = ah->txpower_limit;
+
+ txpower += ah->txpower_indexoffset;
+ if (txpower > 63)
+ txpower = 63;
+
+ ads->ctl11 = (pktlen & AR_FrameLen)
+ | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
+ | SM(txpower, AR_XmitPower)
+ | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
+ | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
+ | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0)
+ | (flags & ATH9K_TXDESC_LOWRXCHAIN ? AR_LowRxChain : 0);
+
+ ads->ctl12 =
+ (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
+ | SM(type, AR_FrameType)
+ | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
+ | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
+ | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
+
+ ads->ctl17 = SM(keyType, AR_EncrType) |
+ (flags & ATH9K_TXDESC_LDPC ? AR_LDPC : 0);
+ ads->ctl18 = 0;
+ ads->ctl19 = AR_Not_Sounding;
+
+ ads->ctl20 = 0;
+ ads->ctl21 = 0;
+ ads->ctl22 = 0;
+}
+
+static void ar9003_hw_set11n_ratescenario(struct ath_hw *ah, void *ds,
+ void *lastds,
+ u32 durUpdateEn, u32 rtsctsRate,
+ u32 rtsctsDuration,
+ struct ath9k_11n_rate_series series[],
+ u32 nseries, u32 flags)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+ struct ar9003_txc *last_ads = (struct ar9003_txc *) lastds;
+ u_int32_t ctl11;
+
+ if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
+ ctl11 = ads->ctl11;
+
+ if (flags & ATH9K_TXDESC_RTSENA) {
+ ctl11 &= ~AR_CTSEnable;
+ ctl11 |= AR_RTSEnable;
+ } else {
+ ctl11 &= ~AR_RTSEnable;
+ ctl11 |= AR_CTSEnable;
+ }
+
+ ads->ctl11 = ctl11;
+ } else {
+ ads->ctl11 = (ads->ctl11 & ~(AR_RTSEnable | AR_CTSEnable));
+ }
+
+ ads->ctl13 = set11nTries(series, 0)
+ | set11nTries(series, 1)
+ | set11nTries(series, 2)
+ | set11nTries(series, 3)
+ | (durUpdateEn ? AR_DurUpdateEna : 0)
+ | SM(0, AR_BurstDur);
+
+ ads->ctl14 = set11nRate(series, 0)
+ | set11nRate(series, 1)
+ | set11nRate(series, 2)
+ | set11nRate(series, 3);
+
+ ads->ctl15 = set11nPktDurRTSCTS(series, 0)
+ | set11nPktDurRTSCTS(series, 1);
+
+ ads->ctl16 = set11nPktDurRTSCTS(series, 2)
+ | set11nPktDurRTSCTS(series, 3);
+
+ ads->ctl18 = set11nRateFlags(series, 0)
+ | set11nRateFlags(series, 1)
+ | set11nRateFlags(series, 2)
+ | set11nRateFlags(series, 3)
+ | SM(rtsctsRate, AR_RTSCTSRate);
+ ads->ctl19 = AR_Not_Sounding;
+
+ last_ads->ctl13 = ads->ctl13;
+ last_ads->ctl14 = ads->ctl14;
+}
+
+static void ar9003_hw_set11n_aggr_first(struct ath_hw *ah, void *ds,
+ u32 aggrLen)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+
+ ads->ctl12 |= (AR_IsAggr | AR_MoreAggr);
+
+ ads->ctl17 &= ~AR_AggrLen;
+ ads->ctl17 |= SM(aggrLen, AR_AggrLen);
+}
+
+static void ar9003_hw_set11n_aggr_middle(struct ath_hw *ah, void *ds,
+ u32 numDelims)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+ unsigned int ctl17;
+
+ ads->ctl12 |= (AR_IsAggr | AR_MoreAggr);
+
+ /*
+ * We use a stack variable to manipulate ctl6 to reduce uncached
+ * read modify, modfiy, write.
+ */
+ ctl17 = ads->ctl17;
+ ctl17 &= ~AR_PadDelim;
+ ctl17 |= SM(numDelims, AR_PadDelim);
+ ads->ctl17 = ctl17;
+}
+
+static void ar9003_hw_set11n_aggr_last(struct ath_hw *ah, void *ds)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+
+ ads->ctl12 |= AR_IsAggr;
+ ads->ctl12 &= ~AR_MoreAggr;
+ ads->ctl17 &= ~AR_PadDelim;
+}
+
+static void ar9003_hw_clr11n_aggr(struct ath_hw *ah, void *ds)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+
+ ads->ctl12 &= (~AR_IsAggr & ~AR_MoreAggr);
+}
+
+static void ar9003_hw_set11n_burstduration(struct ath_hw *ah, void *ds,
+ u32 burstDuration)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+
+ ads->ctl13 &= ~AR_BurstDur;
+ ads->ctl13 |= SM(burstDuration, AR_BurstDur);
+
+}
+
+static void ar9003_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
+ u32 vmf)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+
+ if (vmf)
+ ads->ctl11 |= AR_VirtMoreFrag;
+ else
+ ads->ctl11 &= ~AR_VirtMoreFrag;
+}
+
+void ar9003_hw_attach_mac_ops(struct ath_hw *hw)
+{
+ struct ath_hw_ops *ops = ath9k_hw_ops(hw);
+
+ ops->rx_enable = ar9003_hw_rx_enable;
+ ops->set_desc_link = ar9003_hw_set_desc_link;
+ ops->get_desc_link = ar9003_hw_get_desc_link;
+ ops->get_isr = ar9003_hw_get_isr;
+ ops->fill_txdesc = ar9003_hw_fill_txdesc;
+ ops->proc_txdesc = ar9003_hw_proc_txdesc;
+ ops->set11n_txdesc = ar9003_hw_set11n_txdesc;
+ ops->set11n_ratescenario = ar9003_hw_set11n_ratescenario;
+ ops->set11n_aggr_first = ar9003_hw_set11n_aggr_first;
+ ops->set11n_aggr_middle = ar9003_hw_set11n_aggr_middle;
+ ops->set11n_aggr_last = ar9003_hw_set11n_aggr_last;
+ ops->clr11n_aggr = ar9003_hw_clr11n_aggr;
+ ops->set11n_burstduration = ar9003_hw_set11n_burstduration;
+ ops->set11n_virtualmorefrag = ar9003_hw_set11n_virtualmorefrag;
+}
+
+void ath9k_hw_set_rx_bufsize(struct ath_hw *ah, u16 buf_size)
+{
+ REG_WRITE(ah, AR_DATABUF_SIZE, buf_size & AR_DATABUF_SIZE_MASK);
+}
+EXPORT_SYMBOL(ath9k_hw_set_rx_bufsize);
+
+void ath9k_hw_addrxbuf_edma(struct ath_hw *ah, u32 rxdp,
+ enum ath9k_rx_qtype qtype)
+{
+ if (qtype == ATH9K_RX_QUEUE_HP)
+ REG_WRITE(ah, AR_HP_RXDP, rxdp);
+ else
+ REG_WRITE(ah, AR_LP_RXDP, rxdp);
+}
+EXPORT_SYMBOL(ath9k_hw_addrxbuf_edma);
+
+int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
+ void *buf_addr)
+{
+ struct ar9003_rxs *rxsp = (struct ar9003_rxs *) buf_addr;
+ unsigned int phyerr;
+
+ /* TODO: byte swap on big endian for ar9300_10 */
+
+ if ((rxsp->status11 & AR_RxDone) == 0)
+ return -EINPROGRESS;
+
+ if (MS(rxsp->ds_info, AR_DescId) != 0x168c)
+ return -EINVAL;
+
+ if ((rxsp->ds_info & (AR_TxRxDesc | AR_CtrlStat)) != 0)
+ return -EINPROGRESS;
+
+ if (!rxs)
+ return 0;
+
+ rxs->rs_status = 0;
+ rxs->rs_flags = 0;
+
+ rxs->rs_datalen = rxsp->status2 & AR_DataLen;
+ rxs->rs_tstamp = rxsp->status3;
+
+ /* XXX: Keycache */
+ rxs->rs_rssi = MS(rxsp->status5, AR_RxRSSICombined);
+ rxs->rs_rssi_ctl0 = MS(rxsp->status1, AR_RxRSSIAnt00);
+ rxs->rs_rssi_ctl1 = MS(rxsp->status1, AR_RxRSSIAnt01);
+ rxs->rs_rssi_ctl2 = MS(rxsp->status1, AR_RxRSSIAnt02);
+ rxs->rs_rssi_ext0 = MS(rxsp->status5, AR_RxRSSIAnt10);
+ rxs->rs_rssi_ext1 = MS(rxsp->status5, AR_RxRSSIAnt11);
+ rxs->rs_rssi_ext2 = MS(rxsp->status5, AR_RxRSSIAnt12);
+
+ if (rxsp->status11 & AR_RxKeyIdxValid)
+ rxs->rs_keyix = MS(rxsp->status11, AR_KeyIdx);
+ else
+ rxs->rs_keyix = ATH9K_RXKEYIX_INVALID;
+
+ rxs->rs_rate = MS(rxsp->status1, AR_RxRate);
+ rxs->rs_more = (rxsp->status2 & AR_RxMore) ? 1 : 0;
+
+ rxs->rs_isaggr = (rxsp->status11 & AR_RxAggr) ? 1 : 0;
+ rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0;
+ rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7);
+ rxs->rs_flags = (rxsp->status4 & AR_GI) ? ATH9K_RX_GI : 0;
+ rxs->rs_flags |= (rxsp->status4 & AR_2040) ? ATH9K_RX_2040 : 0;
+
+ rxs->evm0 = rxsp->status6;
+ rxs->evm1 = rxsp->status7;
+ rxs->evm2 = rxsp->status8;
+ rxs->evm3 = rxsp->status9;
+ rxs->evm4 = (rxsp->status10 & 0xffff);
+
+ if (rxsp->status11 & AR_PreDelimCRCErr)
+ rxs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
+
+ if (rxsp->status11 & AR_PostDelimCRCErr)
+ rxs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
+
+ if (rxsp->status11 & AR_DecryptBusyErr)
+ rxs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
+
+ if ((rxsp->status11 & AR_RxFrameOK) == 0) {
+ if (rxsp->status11 & AR_CRCErr) {
+ rxs->rs_status |= ATH9K_RXERR_CRC;
+ } else if (rxsp->status11 & AR_PHYErr) {
+ rxs->rs_status |= ATH9K_RXERR_PHY;
+ phyerr = MS(rxsp->status11, AR_PHYErrCode);
+ rxs->rs_phyerr = phyerr;
+ } else if (rxsp->status11 & AR_DecryptCRCErr) {
+ rxs->rs_status |= ATH9K_RXERR_DECRYPT;
+ } else if (rxsp->status11 & AR_MichaelErr) {
+ rxs->rs_status |= ATH9K_RXERR_MIC;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ath9k_hw_process_rxdesc_edma);
+
+void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah)
+{
+ ah->ts_tail = 0;
+
+ memset((void *) ah->ts_ring, 0,
+ ah->ts_size * sizeof(struct ar9003_txs));
+
+ ath_print(ath9k_hw_common(ah), ATH_DBG_XMIT,
+ "TS Start 0x%x End 0x%x Virt %p, Size %d\n",
+ ah->ts_paddr_start, ah->ts_paddr_end,
+ ah->ts_ring, ah->ts_size);
+
+ REG_WRITE(ah, AR_Q_STATUS_RING_START, ah->ts_paddr_start);
+ REG_WRITE(ah, AR_Q_STATUS_RING_END, ah->ts_paddr_end);
+}
+
+void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start,
+ u32 ts_paddr_start,
+ u8 size)
+{
+
+ ah->ts_paddr_start = ts_paddr_start;
+ ah->ts_paddr_end = ts_paddr_start + (size * sizeof(struct ar9003_txs));
+ ah->ts_size = size;
+ ah->ts_ring = (struct ar9003_txs *) ts_start;
+
+ ath9k_hw_reset_txstatus_ring(ah);
+}
+EXPORT_SYMBOL(ath9k_hw_setup_statusring);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.h b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
new file mode 100644
index 00000000000..f17558b1453
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef AR9003_MAC_H
+#define AR9003_MAC_H
+
+#define AR_DescId 0xffff0000
+#define AR_DescId_S 16
+#define AR_CtrlStat 0x00004000
+#define AR_CtrlStat_S 14
+#define AR_TxRxDesc 0x00008000
+#define AR_TxRxDesc_S 15
+#define AR_TxQcuNum 0x00000f00
+#define AR_TxQcuNum_S 8
+
+#define AR_BufLen 0x0fff0000
+#define AR_BufLen_S 16
+
+#define AR_TxDescId 0xffff0000
+#define AR_TxDescId_S 16
+#define AR_TxPtrChkSum 0x0000ffff
+
+#define AR_TxTid 0xf0000000
+#define AR_TxTid_S 28
+
+#define AR_LowRxChain 0x00004000
+
+#define AR_Not_Sounding 0x20000000
+
+#define MAP_ISR_S2_CST 6
+#define MAP_ISR_S2_GTT 6
+#define MAP_ISR_S2_TIM 3
+#define MAP_ISR_S2_CABEND 0
+#define MAP_ISR_S2_DTIMSYNC 7
+#define MAP_ISR_S2_DTIM 7
+#define MAP_ISR_S2_TSFOOR 4
+
+#define AR9003TXC_CONST(_ds) ((const struct ar9003_txc *) _ds)
+
+struct ar9003_rxs {
+ u32 ds_info;
+ u32 status1;
+ u32 status2;
+ u32 status3;
+ u32 status4;
+ u32 status5;
+ u32 status6;
+ u32 status7;
+ u32 status8;
+ u32 status9;
+ u32 status10;
+ u32 status11;
+} __packed;
+
+/* Transmit Control Descriptor */
+struct ar9003_txc {
+ u32 info; /* descriptor information */
+ u32 link; /* link pointer */
+ u32 data0; /* data pointer to 1st buffer */
+ u32 ctl3; /* DMA control 3 */
+ u32 data1; /* data pointer to 2nd buffer */
+ u32 ctl5; /* DMA control 5 */
+ u32 data2; /* data pointer to 3rd buffer */
+ u32 ctl7; /* DMA control 7 */
+ u32 data3; /* data pointer to 4th buffer */
+ u32 ctl9; /* DMA control 9 */
+ u32 ctl10; /* DMA control 10 */
+ u32 ctl11; /* DMA control 11 */
+ u32 ctl12; /* DMA control 12 */
+ u32 ctl13; /* DMA control 13 */
+ u32 ctl14; /* DMA control 14 */
+ u32 ctl15; /* DMA control 15 */
+ u32 ctl16; /* DMA control 16 */
+ u32 ctl17; /* DMA control 17 */
+ u32 ctl18; /* DMA control 18 */
+ u32 ctl19; /* DMA control 19 */
+ u32 ctl20; /* DMA control 20 */
+ u32 ctl21; /* DMA control 21 */
+ u32 ctl22; /* DMA control 22 */
+ u32 pad[9]; /* pad to cache line (128 bytes/32 dwords) */
+} __packed;
+
+struct ar9003_txs {
+ u32 ds_info;
+ u32 status1;
+ u32 status2;
+ u32 status3;
+ u32 status4;
+ u32 status5;
+ u32 status6;
+ u32 status7;
+ u32 status8;
+} __packed;
+
+void ar9003_hw_attach_mac_ops(struct ath_hw *hw);
+void ath9k_hw_set_rx_bufsize(struct ath_hw *ah, u16 buf_size);
+void ath9k_hw_addrxbuf_edma(struct ath_hw *ah, u32 rxdp,
+ enum ath9k_rx_qtype qtype);
+
+int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah,
+ struct ath_rx_status *rxs,
+ void *buf_addr);
+void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah);
+void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start,
+ u32 ts_paddr_start,
+ u8 size);
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
new file mode 100644
index 00000000000..80431a2f6dc
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -0,0 +1,1134 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "ar9003_phy.h"
+
+/**
+ * ar9003_hw_set_channel - set channel on single-chip device
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * This is the function to change channel on single-chip devices, that is
+ * all devices after ar9280.
+ *
+ * This function takes the channel value in MHz and sets
+ * hardware channel value. Assumes writes have been enabled to analog bus.
+ *
+ * Actual Expression,
+ *
+ * For 2GHz channel,
+ * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
+ * (freq_ref = 40MHz)
+ *
+ * For 5GHz channel,
+ * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10)
+ * (freq_ref = 40MHz/(24>>amodeRefSel))
+ *
+ * For 5GHz channels which are 5MHz spaced,
+ * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
+ * (freq_ref = 40MHz)
+ */
+static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ u16 bMode, fracMode = 0, aModeRefSel = 0;
+ u32 freq, channelSel = 0, reg32 = 0;
+ struct chan_centers centers;
+ int loadSynthChannel;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ freq = centers.synth_center;
+
+ if (freq < 4800) { /* 2 GHz, fractional mode */
+ channelSel = CHANSEL_2G(freq);
+ /* Set to 2G mode */
+ bMode = 1;
+ } else {
+ channelSel = CHANSEL_5G(freq);
+ /* Doubler is ON, so, divide channelSel by 2. */
+ channelSel >>= 1;
+ /* Set to 5G mode */
+ bMode = 0;
+ }
+
+ /* Enable fractional mode for all channels */
+ fracMode = 1;
+ aModeRefSel = 0;
+ loadSynthChannel = 0;
+
+ reg32 = (bMode << 29);
+ REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
+
+ /* Enable Long shift Select for Synthesizer */
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_SYNTH4,
+ AR_PHY_SYNTH4_LONG_SHIFT_SELECT, 1);
+
+ /* Program Synth. setting */
+ reg32 = (channelSel << 2) | (fracMode << 30) |
+ (aModeRefSel << 28) | (loadSynthChannel << 31);
+ REG_WRITE(ah, AR_PHY_65NM_CH0_SYNTH7, reg32);
+
+ /* Toggle Load Synth channel bit */
+ loadSynthChannel = 1;
+ reg32 = (channelSel << 2) | (fracMode << 30) |
+ (aModeRefSel << 28) | (loadSynthChannel << 31);
+ REG_WRITE(ah, AR_PHY_65NM_CH0_SYNTH7, reg32);
+
+ ah->curchan = chan;
+ ah->curchan_rad_index = -1;
+
+ return 0;
+}
+
+/**
+ * ar9003_hw_spur_mitigate - convert baseband spur frequency
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * For single-chip solutions. Converts to baseband spur frequency given the
+ * input channel frequency and compute register settings below.
+ *
+ * Spur mitigation for MRC CCK
+ */
+static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 spur_freq[4] = { 2420, 2440, 2464, 2480 };
+ int cur_bb_spur, negative = 0, cck_spur_freq;
+ int i;
+
+ /*
+ * Need to verify range +/- 10 MHz in control channel, otherwise spur
+ * is out-of-band and can be ignored.
+ */
+
+ for (i = 0; i < 4; i++) {
+ negative = 0;
+ cur_bb_spur = spur_freq[i] - chan->channel;
+
+ if (cur_bb_spur < 0) {
+ negative = 1;
+ cur_bb_spur = -cur_bb_spur;
+ }
+ if (cur_bb_spur < 10) {
+ cck_spur_freq = (int)((cur_bb_spur << 19) / 11);
+
+ if (negative == 1)
+ cck_spur_freq = -cck_spur_freq;
+
+ cck_spur_freq = cck_spur_freq & 0xfffff;
+
+ REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_YCOK_MAX, 0x7);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_SPUR_RSSI_THR, 0x7f);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_SPUR_FILTER_TYPE,
+ 0x2);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT,
+ 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ,
+ cck_spur_freq);
+
+ return;
+ }
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_YCOK_MAX, 0x5);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT, 0x0);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ, 0x0);
+}
+
+/* Clean all spur register fields */
+static void ar9003_hw_spur_ofdm_clear(struct ath_hw *ah)
+{
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_SPUR_FILTER, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_SPUR_FREQ_SD, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_SPUR_DELTA_PHASE, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_SPUR_RSSI, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT, 0);
+
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_ENABLE_MASK_PPM, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_PILOT_MASK, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_CHAN_MASK, 0);
+ REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
+ AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
+ AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
+ AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
+ AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
+ AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
+ AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0);
+}
+
+static void ar9003_hw_spur_ofdm(struct ath_hw *ah,
+ int freq_offset,
+ int spur_freq_sd,
+ int spur_delta_phase,
+ int spur_subchannel_sd)
+{
+ int mask_index = 0;
+
+ /* OFDM Spur mitigation */
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_SPUR_FILTER, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_SPUR_FREQ_SD, spur_freq_sd);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_SPUR_DELTA_PHASE, spur_delta_phase);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD, spur_subchannel_sd);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_SPUR_RSSI, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_SPUR_RSSI_THRESH, 34);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI, 1);
+
+ if (REG_READ_FIELD(ah, AR_PHY_MODE,
+ AR_PHY_MODE_DYNAMIC) == 0x1)
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT, 1);
+
+ mask_index = (freq_offset << 4) / 5;
+ if (mask_index < 0)
+ mask_index = mask_index - 1;
+
+ mask_index = mask_index & 0x7f;
+
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_ENABLE_MASK_PPM, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_PILOT_MASK, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_CHAN_MASK, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
+ AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A, mask_index);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
+ AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A, mask_index);
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
+ AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A, mask_index);
+ REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
+ AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A, 0xc);
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
+ AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A, 0xc);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
+ AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0xa0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0xff);
+}
+
+static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ int freq_offset)
+{
+ int spur_freq_sd = 0;
+ int spur_subchannel_sd = 0;
+ int spur_delta_phase = 0;
+
+ if (IS_CHAN_HT40(chan)) {
+ if (freq_offset < 0) {
+ if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
+ AR_PHY_GC_DYN2040_PRI_CH) == 0x0)
+ spur_subchannel_sd = 1;
+ else
+ spur_subchannel_sd = 0;
+
+ spur_freq_sd = ((freq_offset + 10) << 9) / 11;
+
+ } else {
+ if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
+ AR_PHY_GC_DYN2040_PRI_CH) == 0x0)
+ spur_subchannel_sd = 0;
+ else
+ spur_subchannel_sd = 1;
+
+ spur_freq_sd = ((freq_offset - 10) << 9) / 11;
+
+ }
+
+ spur_delta_phase = (freq_offset << 17) / 5;
+
+ } else {
+ spur_subchannel_sd = 0;
+ spur_freq_sd = (freq_offset << 9) /11;
+ spur_delta_phase = (freq_offset << 18) / 5;
+ }
+
+ spur_freq_sd = spur_freq_sd & 0x3ff;
+ spur_delta_phase = spur_delta_phase & 0xfffff;
+
+ ar9003_hw_spur_ofdm(ah,
+ freq_offset,
+ spur_freq_sd,
+ spur_delta_phase,
+ spur_subchannel_sd);
+}
+
+/* Spur mitigation for OFDM */
+static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ int synth_freq;
+ int range = 10;
+ int freq_offset = 0;
+ int mode;
+ u8* spurChansPtr;
+ unsigned int i;
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ if (IS_CHAN_5GHZ(chan)) {
+ spurChansPtr = &(eep->modalHeader5G.spurChans[0]);
+ mode = 0;
+ }
+ else {
+ spurChansPtr = &(eep->modalHeader2G.spurChans[0]);
+ mode = 1;
+ }
+
+ if (spurChansPtr[0] == 0)
+ return; /* No spur in the mode */
+
+ if (IS_CHAN_HT40(chan)) {
+ range = 19;
+ if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
+ AR_PHY_GC_DYN2040_PRI_CH) == 0x0)
+ synth_freq = chan->channel - 10;
+ else
+ synth_freq = chan->channel + 10;
+ } else {
+ range = 10;
+ synth_freq = chan->channel;
+ }
+
+ ar9003_hw_spur_ofdm_clear(ah);
+
+ for (i = 0; spurChansPtr[i] && i < 5; i++) {
+ freq_offset = FBIN2FREQ(spurChansPtr[i], mode) - synth_freq;
+ if (abs(freq_offset) < range) {
+ ar9003_hw_spur_ofdm_work(ah, chan, freq_offset);
+ break;
+ }
+ }
+}
+
+static void ar9003_hw_spur_mitigate(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ ar9003_hw_spur_mitigate_mrc_cck(ah, chan);
+ ar9003_hw_spur_mitigate_ofdm(ah, chan);
+}
+
+static u32 ar9003_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 pll;
+
+ pll = SM(0x5, AR_RTC_9300_PLL_REFDIV);
+
+ if (chan && IS_CHAN_HALF_RATE(chan))
+ pll |= SM(0x1, AR_RTC_9300_PLL_CLKSEL);
+ else if (chan && IS_CHAN_QUARTER_RATE(chan))
+ pll |= SM(0x2, AR_RTC_9300_PLL_CLKSEL);
+
+ pll |= SM(0x2c, AR_RTC_9300_PLL_DIV);
+
+ return pll;
+}
+
+static void ar9003_hw_set_channel_regs(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 phymode;
+ u32 enableDacFifo = 0;
+
+ enableDacFifo =
+ (REG_READ(ah, AR_PHY_GEN_CTRL) & AR_PHY_GC_ENABLE_DAC_FIFO);
+
+ /* Enable 11n HT, 20 MHz */
+ phymode = AR_PHY_GC_HT_EN | AR_PHY_GC_SINGLE_HT_LTF1 | AR_PHY_GC_WALSH |
+ AR_PHY_GC_SHORT_GI_40 | enableDacFifo;
+
+ /* Configure baseband for dynamic 20/40 operation */
+ if (IS_CHAN_HT40(chan)) {
+ phymode |= AR_PHY_GC_DYN2040_EN;
+ /* Configure control (primary) channel at +-10MHz */
+ if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
+ (chan->chanmode == CHANNEL_G_HT40PLUS))
+ phymode |= AR_PHY_GC_DYN2040_PRI_CH;
+
+ }
+
+ /* make sure we preserve INI settings */
+ phymode |= REG_READ(ah, AR_PHY_GEN_CTRL);
+ /* turn off Green Field detection for STA for now */
+ phymode &= ~AR_PHY_GC_GF_DETECT_EN;
+
+ REG_WRITE(ah, AR_PHY_GEN_CTRL, phymode);
+
+ /* Configure MAC for 20/40 operation */
+ ath9k_hw_set11nmac2040(ah);
+
+ /* global transmit timeout (25 TUs default)*/
+ REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
+ /* carrier sense timeout */
+ REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
+}
+
+static void ar9003_hw_init_bb(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 synthDelay;
+
+ /*
+ * Wait for the frequency synth to settle (synth goes on
+ * via AR_PHY_ACTIVE_EN). Read the phy active delay register.
+ * Value is in 100ns increments.
+ */
+ synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
+ if (IS_CHAN_B(chan))
+ synthDelay = (4 * synthDelay) / 22;
+ else
+ synthDelay /= 10;
+
+ /* Activate the PHY (includes baseband activate + synthesizer on) */
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+
+ /*
+ * There is an issue if the AP starts the calibration before
+ * the base band timeout completes. This could result in the
+ * rx_clear false triggering. As a workaround we add delay an
+ * extra BASE_ACTIVATE_DELAY usecs to ensure this condition
+ * does not happen.
+ */
+ udelay(synthDelay + BASE_ACTIVATE_DELAY);
+}
+
+void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
+{
+ switch (rx) {
+ case 0x5:
+ REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
+ AR_PHY_SWAP_ALT_CHAIN);
+ case 0x3:
+ case 0x1:
+ case 0x2:
+ case 0x7:
+ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx);
+ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx);
+ break;
+ default:
+ break;
+ }
+
+ REG_WRITE(ah, AR_SELFGEN_MASK, tx);
+ if (tx == 0x5) {
+ REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
+ AR_PHY_SWAP_ALT_CHAIN);
+ }
+}
+
+/*
+ * Override INI values with chip specific configuration.
+ */
+static void ar9003_hw_override_ini(struct ath_hw *ah)
+{
+ u32 val;
+
+ /*
+ * Set the RX_ABORT and RX_DIS and clear it only after
+ * RXE is set for MAC. This prevents frames with
+ * corrupted descriptor status.
+ */
+ REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
+
+ /*
+ * For AR9280 and above, there is a new feature that allows
+ * Multicast search based on both MAC Address and Key ID. By default,
+ * this feature is enabled. But since the driver is not using this
+ * feature, we switch it off; otherwise multicast search based on
+ * MAC addr only will fail.
+ */
+ val = REG_READ(ah, AR_PCU_MISC_MODE2) & (~AR_ADHOC_MCAST_KEYID_ENABLE);
+ REG_WRITE(ah, AR_PCU_MISC_MODE2,
+ val | AR_AGG_WEP_ENABLE_FIX | AR_AGG_WEP_ENABLE);
+}
+
+static void ar9003_hw_prog_ini(struct ath_hw *ah,
+ struct ar5416IniArray *iniArr,
+ int column)
+{
+ unsigned int i, regWrites = 0;
+
+ /* New INI format: Array may be undefined (pre, core, post arrays) */
+ if (!iniArr->ia_array)
+ return;
+
+ /*
+ * New INI format: Pre, core, and post arrays for a given subsystem
+ * may be modal (> 2 columns) or non-modal (2 columns). Determine if
+ * the array is non-modal and force the column to 1.
+ */
+ if (column >= iniArr->ia_columns)
+ column = 1;
+
+ for (i = 0; i < iniArr->ia_rows; i++) {
+ u32 reg = INI_RA(iniArr, i, 0);
+ u32 val = INI_RA(iniArr, i, column);
+
+ REG_WRITE(ah, reg, val);
+
+ /*
+ * Determine if this is a shift register value, and insert the
+ * configured delay if so.
+ */
+ if (reg >= 0x16000 && reg < 0x17000
+ && ah->config.analog_shiftreg)
+ udelay(100);
+
+ DO_DELAY(regWrites);
+ }
+}
+
+static int ar9003_hw_process_ini(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
+ unsigned int regWrites = 0, i;
+ struct ieee80211_channel *channel = chan->chan;
+ u32 modesIndex, freqIndex;
+
+ switch (chan->chanmode) {
+ case CHANNEL_A:
+ case CHANNEL_A_HT20:
+ modesIndex = 1;
+ freqIndex = 1;
+ break;
+ case CHANNEL_A_HT40PLUS:
+ case CHANNEL_A_HT40MINUS:
+ modesIndex = 2;
+ freqIndex = 1;
+ break;
+ case CHANNEL_G:
+ case CHANNEL_G_HT20:
+ case CHANNEL_B:
+ modesIndex = 4;
+ freqIndex = 2;
+ break;
+ case CHANNEL_G_HT40PLUS:
+ case CHANNEL_G_HT40MINUS:
+ modesIndex = 3;
+ freqIndex = 2;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ATH_INI_NUM_SPLIT; i++) {
+ ar9003_hw_prog_ini(ah, &ah->iniSOC[i], modesIndex);
+ ar9003_hw_prog_ini(ah, &ah->iniMac[i], modesIndex);
+ ar9003_hw_prog_ini(ah, &ah->iniBB[i], modesIndex);
+ ar9003_hw_prog_ini(ah, &ah->iniRadio[i], modesIndex);
+ }
+
+ REG_WRITE_ARRAY(&ah->iniModesRxGain, 1, regWrites);
+ REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
+
+ /*
+ * For 5GHz channels requiring Fast Clock, apply
+ * different modal values.
+ */
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ REG_WRITE_ARRAY(&ah->iniModesAdditional,
+ modesIndex, regWrites);
+
+ ar9003_hw_override_ini(ah);
+ ar9003_hw_set_channel_regs(ah, chan);
+ ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
+
+ /* Set TX power */
+ ah->eep_ops->set_txpower(ah, chan,
+ ath9k_regd_get_ctl(regulatory, chan),
+ channel->max_antenna_gain * 2,
+ channel->max_power * 2,
+ min((u32) MAX_RATE_POWER,
+ (u32) regulatory->power_limit));
+
+ return 0;
+}
+
+static void ar9003_hw_set_rfmode(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 rfMode = 0;
+
+ if (chan == NULL)
+ return;
+
+ rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
+ ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
+
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
+
+ REG_WRITE(ah, AR_PHY_MODE, rfMode);
+}
+
+static void ar9003_hw_mark_phy_inactive(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
+}
+
+static void ar9003_hw_set_delta_slope(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 coef_scaled, ds_coef_exp, ds_coef_man;
+ u32 clockMhzScaled = 0x64000000;
+ struct chan_centers centers;
+
+ /*
+ * half and quarter rate can divide the scaled clock by 2 or 4
+ * scale for selected channel bandwidth
+ */
+ if (IS_CHAN_HALF_RATE(chan))
+ clockMhzScaled = clockMhzScaled >> 1;
+ else if (IS_CHAN_QUARTER_RATE(chan))
+ clockMhzScaled = clockMhzScaled >> 2;
+
+ /*
+ * ALGO -> coef = 1e8/fcarrier*fclock/40;
+ * scaled coef to provide precision for this floating calculation
+ */
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ coef_scaled = clockMhzScaled / centers.synth_center;
+
+ ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
+ &ds_coef_exp);
+
+ REG_RMW_FIELD(ah, AR_PHY_TIMING3,
+ AR_PHY_TIMING3_DSC_MAN, ds_coef_man);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING3,
+ AR_PHY_TIMING3_DSC_EXP, ds_coef_exp);
+
+ /*
+ * For Short GI,
+ * scaled coeff is 9/10 that of normal coeff
+ */
+ coef_scaled = (9 * coef_scaled) / 10;
+
+ ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
+ &ds_coef_exp);
+
+ /* for short gi */
+ REG_RMW_FIELD(ah, AR_PHY_SGI_DELTA,
+ AR_PHY_SGI_DSC_MAN, ds_coef_man);
+ REG_RMW_FIELD(ah, AR_PHY_SGI_DELTA,
+ AR_PHY_SGI_DSC_EXP, ds_coef_exp);
+}
+
+static bool ar9003_hw_rfbus_req(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
+ return ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
+ AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT);
+}
+
+/*
+ * Wait for the frequency synth to settle (synth goes on via PHY_ACTIVE_EN).
+ * Read the phy active delay register. Value is in 100ns increments.
+ */
+static void ar9003_hw_rfbus_done(struct ath_hw *ah)
+{
+ u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
+ if (IS_CHAN_B(ah->curchan))
+ synthDelay = (4 * synthDelay) / 22;
+ else
+ synthDelay /= 10;
+
+ udelay(synthDelay + BASE_ACTIVATE_DELAY);
+
+ REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
+}
+
+/*
+ * Set the interrupt and GPIO values so the ISR can disable RF
+ * on a switch signal. Assumes GPIO port and interrupt polarity
+ * are set prior to call.
+ */
+static void ar9003_hw_enable_rfkill(struct ath_hw *ah)
+{
+ /* Connect rfsilent_bb_l to baseband */
+ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
+ AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
+ /* Set input mux for rfsilent_bb_l to GPIO #0 */
+ REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
+ AR_GPIO_INPUT_MUX2_RFSILENT);
+
+ /*
+ * Configure the desired GPIO port for input and
+ * enable baseband rf silence.
+ */
+ ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
+ REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
+}
+
+static void ar9003_hw_set_diversity(struct ath_hw *ah, bool value)
+{
+ u32 v = REG_READ(ah, AR_PHY_CCK_DETECT);
+ if (value)
+ v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
+ else
+ v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
+ REG_WRITE(ah, AR_PHY_CCK_DETECT, v);
+}
+
+static bool ar9003_hw_ani_control(struct ath_hw *ah,
+ enum ath9k_ani_cmd cmd, int param)
+{
+ struct ar5416AniState *aniState = ah->curani;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ switch (cmd & ah->ani_function) {
+ case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
+ ath_print(common, ATH_DBG_ANI,
+ "level out of range (%u > %u)\n",
+ level,
+ (unsigned)ARRAY_SIZE(ah->totalSizeDesired));
+ return false;
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
+ AR_PHY_DESIRED_SZ_TOT_DES,
+ ah->totalSizeDesired[level]);
+ REG_RMW_FIELD(ah, AR_PHY_AGC,
+ AR_PHY_AGC_COARSE_LOW,
+ ah->coarse_low[level]);
+ REG_RMW_FIELD(ah, AR_PHY_AGC,
+ AR_PHY_AGC_COARSE_HIGH,
+ ah->coarse_high[level]);
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
+ AR_PHY_FIND_SIG_FIRPWR, ah->firpwr[level]);
+
+ if (level > aniState->noiseImmunityLevel)
+ ah->stats.ast_ani_niup++;
+ else if (level < aniState->noiseImmunityLevel)
+ ah->stats.ast_ani_nidown++;
+ aniState->noiseImmunityLevel = level;
+ break;
+ }
+ case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
+ const int m1ThreshLow[] = { 127, 50 };
+ const int m2ThreshLow[] = { 127, 40 };
+ const int m1Thresh[] = { 127, 0x4d };
+ const int m2Thresh[] = { 127, 0x40 };
+ const int m2CountThr[] = { 31, 16 };
+ const int m2CountThrLow[] = { 63, 48 };
+ u32 on = param ? 1 : 0;
+
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
+ m1ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
+ m2ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M1_THRESH, m1Thresh[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M2_THRESH, m2Thresh[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M2COUNT_THR, m2CountThr[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
+ m2CountThrLow[on]);
+
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M1_THRESH, m1Thresh[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M2_THRESH, m2Thresh[on]);
+
+ if (on)
+ REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
+ else
+ REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
+
+ if (!on != aniState->ofdmWeakSigDetectOff) {
+ if (on)
+ ah->stats.ast_ani_ofdmon++;
+ else
+ ah->stats.ast_ani_ofdmoff++;
+ aniState->ofdmWeakSigDetectOff = !on;
+ }
+ break;
+ }
+ case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
+ const int weakSigThrCck[] = { 8, 6 };
+ u32 high = param ? 1 : 0;
+
+ REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
+ AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
+ weakSigThrCck[high]);
+ if (high != aniState->cckWeakSigThreshold) {
+ if (high)
+ ah->stats.ast_ani_cckhigh++;
+ else
+ ah->stats.ast_ani_ccklow++;
+ aniState->cckWeakSigThreshold = high;
+ }
+ break;
+ }
+ case ATH9K_ANI_FIRSTEP_LEVEL:{
+ const int firstep[] = { 0, 4, 8 };
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(firstep)) {
+ ath_print(common, ATH_DBG_ANI,
+ "level out of range (%u > %u)\n",
+ level,
+ (unsigned) ARRAY_SIZE(firstep));
+ return false;
+ }
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
+ AR_PHY_FIND_SIG_FIRSTEP,
+ firstep[level]);
+ if (level > aniState->firstepLevel)
+ ah->stats.ast_ani_stepup++;
+ else if (level < aniState->firstepLevel)
+ ah->stats.ast_ani_stepdown++;
+ aniState->firstepLevel = level;
+ break;
+ }
+ case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
+ const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(cycpwrThr1)) {
+ ath_print(common, ATH_DBG_ANI,
+ "level out of range (%u > %u)\n",
+ level,
+ (unsigned) ARRAY_SIZE(cycpwrThr1));
+ return false;
+ }
+ REG_RMW_FIELD(ah, AR_PHY_TIMING5,
+ AR_PHY_TIMING5_CYCPWR_THR1,
+ cycpwrThr1[level]);
+ if (level > aniState->spurImmunityLevel)
+ ah->stats.ast_ani_spurup++;
+ else if (level < aniState->spurImmunityLevel)
+ ah->stats.ast_ani_spurdown++;
+ aniState->spurImmunityLevel = level;
+ break;
+ }
+ case ATH9K_ANI_PRESENT:
+ break;
+ default:
+ ath_print(common, ATH_DBG_ANI,
+ "invalid cmd %u\n", cmd);
+ return false;
+ }
+
+ ath_print(common, ATH_DBG_ANI, "ANI parameters:\n");
+ ath_print(common, ATH_DBG_ANI,
+ "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
+ "ofdmWeakSigDetectOff=%d\n",
+ aniState->noiseImmunityLevel,
+ aniState->spurImmunityLevel,
+ !aniState->ofdmWeakSigDetectOff);
+ ath_print(common, ATH_DBG_ANI,
+ "cckWeakSigThreshold=%d, "
+ "firstepLevel=%d, listenTime=%d\n",
+ aniState->cckWeakSigThreshold,
+ aniState->firstepLevel,
+ aniState->listenTime);
+ ath_print(common, ATH_DBG_ANI,
+ "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
+ aniState->cycleCount,
+ aniState->ofdmPhyErrCount,
+ aniState->cckPhyErrCount);
+
+ return true;
+}
+
+static void ar9003_hw_nf_sanitize_2g(struct ath_hw *ah, s16 *nf)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (*nf > ah->nf_2g_max) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "2 GHz NF (%d) > MAX (%d), "
+ "correcting to MAX",
+ *nf, ah->nf_2g_max);
+ *nf = ah->nf_2g_max;
+ } else if (*nf < ah->nf_2g_min) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "2 GHz NF (%d) < MIN (%d), "
+ "correcting to MIN",
+ *nf, ah->nf_2g_min);
+ *nf = ah->nf_2g_min;
+ }
+}
+
+static void ar9003_hw_nf_sanitize_5g(struct ath_hw *ah, s16 *nf)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (*nf > ah->nf_5g_max) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "5 GHz NF (%d) > MAX (%d), "
+ "correcting to MAX",
+ *nf, ah->nf_5g_max);
+ *nf = ah->nf_5g_max;
+ } else if (*nf < ah->nf_5g_min) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "5 GHz NF (%d) < MIN (%d), "
+ "correcting to MIN",
+ *nf, ah->nf_5g_min);
+ *nf = ah->nf_5g_min;
+ }
+}
+
+static void ar9003_hw_nf_sanitize(struct ath_hw *ah, s16 *nf)
+{
+ if (IS_CHAN_2GHZ(ah->curchan))
+ ar9003_hw_nf_sanitize_2g(ah, nf);
+ else
+ ar9003_hw_nf_sanitize_5g(ah, nf);
+}
+
+static void ar9003_hw_do_getnf(struct ath_hw *ah,
+ int16_t nfarray[NUM_NF_READINGS])
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int16_t nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CCA_0), AR_PHY_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ar9003_hw_nf_sanitize(ah, &nf);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 0] is %d\n", nf);
+ nfarray[0] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CCA_1), AR_PHY_CH1_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ar9003_hw_nf_sanitize(ah, &nf);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 1] is %d\n", nf);
+ nfarray[1] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CCA_2), AR_PHY_CH2_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ar9003_hw_nf_sanitize(ah, &nf);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 2] is %d\n", nf);
+ nfarray[2] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ar9003_hw_nf_sanitize(ah, &nf);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 0] is %d\n", nf);
+ nfarray[3] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_1), AR_PHY_CH1_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ar9003_hw_nf_sanitize(ah, &nf);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 1] is %d\n", nf);
+ nfarray[4] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_2), AR_PHY_CH2_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ar9003_hw_nf_sanitize(ah, &nf);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 2] is %d\n", nf);
+ nfarray[5] = nf;
+}
+
+void ar9003_hw_set_nf_limits(struct ath_hw *ah)
+{
+ ah->nf_2g_max = AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ;
+ ah->nf_2g_min = AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ;
+ ah->nf_5g_max = AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ;
+ ah->nf_5g_min = AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ;
+}
+
+/*
+ * Find out which of the RX chains are enabled
+ */
+static u32 ar9003_hw_get_rx_chainmask(struct ath_hw *ah)
+{
+ u32 chain = REG_READ(ah, AR_PHY_RX_CHAINMASK);
+ /*
+ * The bits [2:0] indicate the rx chain mask and are to be
+ * interpreted as follows:
+ * 00x => Only chain 0 is enabled
+ * 01x => Chain 1 and 0 enabled
+ * 1xx => Chain 2,1 and 0 enabled
+ */
+ return chain & 0x7;
+}
+
+static void ar9003_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath9k_nfcal_hist *h;
+ unsigned i, j;
+ int32_t val;
+ const u32 ar9300_cca_regs[6] = {
+ AR_PHY_CCA_0,
+ AR_PHY_CCA_1,
+ AR_PHY_CCA_2,
+ AR_PHY_EXT_CCA,
+ AR_PHY_EXT_CCA_1,
+ AR_PHY_EXT_CCA_2,
+ };
+ u8 chainmask, rx_chain_status;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ rx_chain_status = ar9003_hw_get_rx_chainmask(ah);
+
+ chainmask = 0x3F;
+ h = ah->nfCalHist;
+
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ if (chainmask & (1 << i)) {
+ val = REG_READ(ah, ar9300_cca_regs[i]);
+ val &= 0xFFFFFE00;
+ val |= (((u32) (h[i].privNF) << 1) & 0x1ff);
+ REG_WRITE(ah, ar9300_cca_regs[i], val);
+ }
+ }
+
+ /*
+ * Load software filtered NF value into baseband internal minCCApwr
+ * variable.
+ */
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_ENABLE_NF);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
+
+ /*
+ * Wait for load to complete, should be fast, a few 10s of us.
+ * The max delay was changed from an original 250us to 10000us
+ * since 250us often results in NF load timeout and causes deaf
+ * condition during stress testing 12/12/2009
+ */
+ for (j = 0; j < 1000; j++) {
+ if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
+ AR_PHY_AGC_CONTROL_NF) == 0)
+ break;
+ udelay(10);
+ }
+
+ /*
+ * We timed out waiting for the noisefloor to load, probably due to an
+ * in-progress rx. Simply return here and allow the load plenty of time
+ * to complete before the next calibration interval. We need to avoid
+ * trying to load -50 (which happens below) while the previous load is
+ * still in progress as this can cause rx deafness. Instead by returning
+ * here, the baseband nf cal will just be capped by our present
+ * noisefloor until the next calibration timer.
+ */
+ if (j == 1000) {
+ ath_print(common, ATH_DBG_ANY, "Timeout while waiting for nf "
+ "to load: AR_PHY_AGC_CONTROL=0x%x\n",
+ REG_READ(ah, AR_PHY_AGC_CONTROL));
+ return;
+ }
+
+ /*
+ * Restore maxCCAPower register parameter again so that we're not capped
+ * by the median we just loaded. This will be initial (and max) value
+ * of next noise floor calibration the baseband does.
+ */
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ if (chainmask & (1 << i)) {
+ val = REG_READ(ah, ar9300_cca_regs[i]);
+ val &= 0xFFFFFE00;
+ val |= (((u32) (-50) << 1) & 0x1ff);
+ REG_WRITE(ah, ar9300_cca_regs[i], val);
+ }
+ }
+}
+
+void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+
+ priv_ops->rf_set_freq = ar9003_hw_set_channel;
+ priv_ops->spur_mitigate_freq = ar9003_hw_spur_mitigate;
+ priv_ops->compute_pll_control = ar9003_hw_compute_pll_control;
+ priv_ops->set_channel_regs = ar9003_hw_set_channel_regs;
+ priv_ops->init_bb = ar9003_hw_init_bb;
+ priv_ops->process_ini = ar9003_hw_process_ini;
+ priv_ops->set_rfmode = ar9003_hw_set_rfmode;
+ priv_ops->mark_phy_inactive = ar9003_hw_mark_phy_inactive;
+ priv_ops->set_delta_slope = ar9003_hw_set_delta_slope;
+ priv_ops->rfbus_req = ar9003_hw_rfbus_req;
+ priv_ops->rfbus_done = ar9003_hw_rfbus_done;
+ priv_ops->enable_rfkill = ar9003_hw_enable_rfkill;
+ priv_ops->set_diversity = ar9003_hw_set_diversity;
+ priv_ops->ani_control = ar9003_hw_ani_control;
+ priv_ops->do_getnf = ar9003_hw_do_getnf;
+ priv_ops->loadnf = ar9003_hw_loadnf;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
new file mode 100644
index 00000000000..f08cc8bda00
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -0,0 +1,847 @@
+/*
+ * Copyright (c) 2002-2010 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef AR9003_PHY_H
+#define AR9003_PHY_H
+
+/*
+ * Channel Register Map
+ */
+#define AR_CHAN_BASE 0x9800
+
+#define AR_PHY_TIMING1 (AR_CHAN_BASE + 0x0)
+#define AR_PHY_TIMING2 (AR_CHAN_BASE + 0x4)
+#define AR_PHY_TIMING3 (AR_CHAN_BASE + 0x8)
+#define AR_PHY_TIMING4 (AR_CHAN_BASE + 0xc)
+#define AR_PHY_TIMING5 (AR_CHAN_BASE + 0x10)
+#define AR_PHY_TIMING6 (AR_CHAN_BASE + 0x14)
+#define AR_PHY_TIMING11 (AR_CHAN_BASE + 0x18)
+#define AR_PHY_SPUR_REG (AR_CHAN_BASE + 0x1c)
+#define AR_PHY_RX_IQCAL_CORR_B0 (AR_CHAN_BASE + 0xdc)
+#define AR_PHY_TX_IQCAL_CONTROL_3 (AR_CHAN_BASE + 0xb0)
+
+#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000
+#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20
+
+#define AR_PHY_TIMING11_SPUR_DELTA_PHASE 0x000FFFFF
+#define AR_PHY_TIMING11_SPUR_DELTA_PHASE_S 0
+
+#define AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC 0x40000000
+#define AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC_S 30
+
+#define AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR 0x80000000
+#define AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR_S 31
+
+#define AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT 0x4000000
+#define AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT_S 26
+
+#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM 0x20000 /* bins move with freq offset */
+#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM_S 17
+#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH 0x000000FF
+#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH_S 0
+#define AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI 0x00000100
+#define AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI_S 8
+#define AR_PHY_SPUR_REG_MASK_RATE_CNTL 0x03FC0000
+#define AR_PHY_SPUR_REG_MASK_RATE_CNTL_S 18
+
+#define AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN 0x20000000
+#define AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN_S 29
+
+#define AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN 0x80000000
+#define AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN_S 31
+
+#define AR_PHY_FIND_SIG_LOW (AR_CHAN_BASE + 0x20)
+
+#define AR_PHY_SFCORR (AR_CHAN_BASE + 0x24)
+#define AR_PHY_SFCORR_LOW (AR_CHAN_BASE + 0x28)
+#define AR_PHY_SFCORR_EXT (AR_CHAN_BASE + 0x2c)
+
+#define AR_PHY_EXT_CCA (AR_CHAN_BASE + 0x30)
+#define AR_PHY_RADAR_0 (AR_CHAN_BASE + 0x34)
+#define AR_PHY_RADAR_1 (AR_CHAN_BASE + 0x38)
+#define AR_PHY_RADAR_EXT (AR_CHAN_BASE + 0x3c)
+#define AR_PHY_MULTICHAIN_CTRL (AR_CHAN_BASE + 0x80)
+#define AR_PHY_PERCHAIN_CSD (AR_CHAN_BASE + 0x84)
+
+#define AR_PHY_TX_PHASE_RAMP_0 (AR_CHAN_BASE + 0xd0)
+#define AR_PHY_ADC_GAIN_DC_CORR_0 (AR_CHAN_BASE + 0xd4)
+#define AR_PHY_IQ_ADC_MEAS_0_B0 (AR_CHAN_BASE + 0xc0)
+#define AR_PHY_IQ_ADC_MEAS_1_B0 (AR_CHAN_BASE + 0xc4)
+#define AR_PHY_IQ_ADC_MEAS_2_B0 (AR_CHAN_BASE + 0xc8)
+#define AR_PHY_IQ_ADC_MEAS_3_B0 (AR_CHAN_BASE + 0xcc)
+
+/* The following registers changed position from AR9300 1.0 to AR9300 2.0 */
+#define AR_PHY_TX_PHASE_RAMP_0_9300_10 (AR_CHAN_BASE + 0xd0 - 0x10)
+#define AR_PHY_ADC_GAIN_DC_CORR_0_9300_10 (AR_CHAN_BASE + 0xd4 - 0x10)
+#define AR_PHY_IQ_ADC_MEAS_0_B0_9300_10 (AR_CHAN_BASE + 0xc0 + 0x8)
+#define AR_PHY_IQ_ADC_MEAS_1_B0_9300_10 (AR_CHAN_BASE + 0xc4 + 0x8)
+#define AR_PHY_IQ_ADC_MEAS_2_B0_9300_10 (AR_CHAN_BASE + 0xc8 + 0x8)
+#define AR_PHY_IQ_ADC_MEAS_3_B0_9300_10 (AR_CHAN_BASE + 0xcc + 0x8)
+
+#define AR_PHY_TX_CRC (AR_CHAN_BASE + 0xa0)
+#define AR_PHY_TST_DAC_CONST (AR_CHAN_BASE + 0xa4)
+#define AR_PHY_SPUR_REPORT_0 (AR_CHAN_BASE + 0xa8)
+#define AR_PHY_CHAN_INFO_TAB_0 (AR_CHAN_BASE + 0x300)
+
+/*
+ * Channel Field Definitions
+ */
+#define AR_PHY_TIMING2_USE_FORCE_PPM 0x00001000
+#define AR_PHY_TIMING2_FORCE_PPM_VAL 0x00000fff
+#define AR_PHY_TIMING3_DSC_MAN 0xFFFE0000
+#define AR_PHY_TIMING3_DSC_MAN_S 17
+#define AR_PHY_TIMING3_DSC_EXP 0x0001E000
+#define AR_PHY_TIMING3_DSC_EXP_S 13
+#define AR_PHY_TIMING4_IQCAL_LOG_COUNT_MAX 0xF000
+#define AR_PHY_TIMING4_IQCAL_LOG_COUNT_MAX_S 12
+#define AR_PHY_TIMING4_DO_CAL 0x10000
+
+#define AR_PHY_TIMING4_ENABLE_PILOT_MASK 0x10000000
+#define AR_PHY_TIMING4_ENABLE_PILOT_MASK_S 28
+#define AR_PHY_TIMING4_ENABLE_CHAN_MASK 0x20000000
+#define AR_PHY_TIMING4_ENABLE_CHAN_MASK_S 29
+
+#define AR_PHY_TIMING4_ENABLE_SPUR_FILTER 0x40000000
+#define AR_PHY_TIMING4_ENABLE_SPUR_FILTER_S 30
+#define AR_PHY_TIMING4_ENABLE_SPUR_RSSI 0x80000000
+#define AR_PHY_TIMING4_ENABLE_SPUR_RSSI_S 31
+
+#define AR_PHY_NEW_ADC_GAIN_CORR_ENABLE 0x40000000
+#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000
+#define AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW 0x00000001
+#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW 0x00003F00
+#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW_S 8
+#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW 0x001FC000
+#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW_S 14
+#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW 0x0FE00000
+#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW_S 21
+#define AR_PHY_SFCORR_M2COUNT_THR 0x0000001F
+#define AR_PHY_SFCORR_M2COUNT_THR_S 0
+#define AR_PHY_SFCORR_M1_THRESH 0x00FE0000
+#define AR_PHY_SFCORR_M1_THRESH_S 17
+#define AR_PHY_SFCORR_M2_THRESH 0x7F000000
+#define AR_PHY_SFCORR_M2_THRESH_S 24
+#define AR_PHY_SFCORR_EXT_M1_THRESH 0x0000007F
+#define AR_PHY_SFCORR_EXT_M1_THRESH_S 0
+#define AR_PHY_SFCORR_EXT_M2_THRESH 0x00003F80
+#define AR_PHY_SFCORR_EXT_M2_THRESH_S 7
+#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW 0x001FC000
+#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW_S 14
+#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW 0x0FE00000
+#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW_S 21
+#define AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD 0x10000000
+#define AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD_S 28
+#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
+#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
+#define AR_PHY_EXT_CCA_THRESH62_S 16
+#define AR_PHY_EXT_MINCCA_PWR 0x01FF0000
+#define AR_PHY_EXT_MINCCA_PWR_S 16
+#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE
+#define AR_PHY_TIMING5_CYCPWR_THR1_S 1
+#define AR_PHY_TIMING5_CYCPWR_THR1_ENABLE 0x00000001
+#define AR_PHY_TIMING5_CYCPWR_THR1_ENABLE_S 0
+#define AR_PHY_TIMING5_CYCPWR_THR1A 0x007F0000
+#define AR_PHY_TIMING5_CYCPWR_THR1A_S 16
+#define AR_PHY_TIMING5_RSSI_THR1A (0x7F << 16)
+#define AR_PHY_TIMING5_RSSI_THR1A_S 16
+#define AR_PHY_TIMING5_RSSI_THR1A_ENA (0x1 << 15)
+#define AR_PHY_RADAR_0_ENA 0x00000001
+#define AR_PHY_RADAR_0_FFT_ENA 0x80000000
+#define AR_PHY_RADAR_0_INBAND 0x0000003e
+#define AR_PHY_RADAR_0_INBAND_S 1
+#define AR_PHY_RADAR_0_PRSSI 0x00000FC0
+#define AR_PHY_RADAR_0_PRSSI_S 6
+#define AR_PHY_RADAR_0_HEIGHT 0x0003F000
+#define AR_PHY_RADAR_0_HEIGHT_S 12
+#define AR_PHY_RADAR_0_RRSSI 0x00FC0000
+#define AR_PHY_RADAR_0_RRSSI_S 18
+#define AR_PHY_RADAR_0_FIRPWR 0x7F000000
+#define AR_PHY_RADAR_0_FIRPWR_S 24
+#define AR_PHY_RADAR_1_RELPWR_ENA 0x00800000
+#define AR_PHY_RADAR_1_USE_FIR128 0x00400000
+#define AR_PHY_RADAR_1_RELPWR_THRESH 0x003F0000
+#define AR_PHY_RADAR_1_RELPWR_THRESH_S 16
+#define AR_PHY_RADAR_1_BLOCK_CHECK 0x00008000
+#define AR_PHY_RADAR_1_MAX_RRSSI 0x00004000
+#define AR_PHY_RADAR_1_RELSTEP_CHECK 0x00002000
+#define AR_PHY_RADAR_1_RELSTEP_THRESH 0x00001F00
+#define AR_PHY_RADAR_1_RELSTEP_THRESH_S 8
+#define AR_PHY_RADAR_1_MAXLEN 0x000000FF
+#define AR_PHY_RADAR_1_MAXLEN_S 0
+#define AR_PHY_RADAR_EXT_ENA 0x00004000
+#define AR_PHY_RADAR_DC_PWR_THRESH 0x007f8000
+#define AR_PHY_RADAR_DC_PWR_THRESH_S 15
+#define AR_PHY_RADAR_LB_DC_CAP 0x7f800000
+#define AR_PHY_RADAR_LB_DC_CAP_S 23
+#define AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW (0x3f << 6)
+#define AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW_S 6
+#define AR_PHY_FIND_SIG_LOW_FIRPWR (0x7f << 12)
+#define AR_PHY_FIND_SIG_LOW_FIRPWR_S 12
+#define AR_PHY_FIND_SIG_LOW_FIRPWR_SIGN_BIT 19
+#define AR_PHY_FIND_SIG_LOW_RELSTEP 0x1f
+#define AR_PHY_FIND_SIG_LOW_RELSTEP_S 0
+#define AR_PHY_FIND_SIG_LOW_RELSTEP_SIGN_BIT 5
+#define AR_PHY_CHAN_INFO_TAB_S2_READ 0x00000008
+#define AR_PHY_CHAN_INFO_TAB_S2_READ_S 3
+#define AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF 0x0000007F
+#define AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF_S 0
+#define AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF 0x00003F80
+#define AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF_S 7
+#define AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE 0x00004000
+#define AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_Q_COFF 0x003f8000
+#define AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_Q_COFF_S 15
+#define AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_I_COFF 0x1fc00000
+#define AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_I_COFF_S 22
+
+/*
+ * MRC Register Map
+ */
+#define AR_MRC_BASE 0x9c00
+
+#define AR_PHY_TIMING_3A (AR_MRC_BASE + 0x0)
+#define AR_PHY_LDPC_CNTL1 (AR_MRC_BASE + 0x4)
+#define AR_PHY_LDPC_CNTL2 (AR_MRC_BASE + 0x8)
+#define AR_PHY_PILOT_SPUR_MASK (AR_MRC_BASE + 0xc)
+#define AR_PHY_CHAN_SPUR_MASK (AR_MRC_BASE + 0x10)
+#define AR_PHY_SGI_DELTA (AR_MRC_BASE + 0x14)
+#define AR_PHY_ML_CNTL_1 (AR_MRC_BASE + 0x18)
+#define AR_PHY_ML_CNTL_2 (AR_MRC_BASE + 0x1c)
+#define AR_PHY_TST_ADC (AR_MRC_BASE + 0x20)
+
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A 0x00000FE0
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A_S 5
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A 0x1F
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A_S 0
+
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A 0x00000FE0
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A_S 5
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A 0x1F
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A_S 0
+
+/*
+ * MRC Feild Definitions
+ */
+#define AR_PHY_SGI_DSC_MAN 0x0007FFF0
+#define AR_PHY_SGI_DSC_MAN_S 4
+#define AR_PHY_SGI_DSC_EXP 0x0000000F
+#define AR_PHY_SGI_DSC_EXP_S 0
+/*
+ * BBB Register Map
+ */
+#define AR_BBB_BASE 0x9d00
+
+/*
+ * AGC Register Map
+ */
+#define AR_AGC_BASE 0x9e00
+
+#define AR_PHY_SETTLING (AR_AGC_BASE + 0x0)
+#define AR_PHY_FORCEMAX_GAINS_0 (AR_AGC_BASE + 0x4)
+#define AR_PHY_GAINS_MINOFF0 (AR_AGC_BASE + 0x8)
+#define AR_PHY_DESIRED_SZ (AR_AGC_BASE + 0xc)
+#define AR_PHY_FIND_SIG (AR_AGC_BASE + 0x10)
+#define AR_PHY_AGC (AR_AGC_BASE + 0x14)
+#define AR_PHY_EXT_ATTEN_CTL_0 (AR_AGC_BASE + 0x18)
+#define AR_PHY_CCA_0 (AR_AGC_BASE + 0x1c)
+#define AR_PHY_EXT_CCA0 (AR_AGC_BASE + 0x20)
+#define AR_PHY_RESTART (AR_AGC_BASE + 0x24)
+#define AR_PHY_MC_GAIN_CTRL (AR_AGC_BASE + 0x28)
+#define AR_PHY_EXTCHN_PWRTHR1 (AR_AGC_BASE + 0x2c)
+#define AR_PHY_EXT_CHN_WIN (AR_AGC_BASE + 0x30)
+#define AR_PHY_20_40_DET_THR (AR_AGC_BASE + 0x34)
+#define AR_PHY_RIFS_SRCH (AR_AGC_BASE + 0x38)
+#define AR_PHY_PEAK_DET_CTRL_1 (AR_AGC_BASE + 0x3c)
+#define AR_PHY_PEAK_DET_CTRL_2 (AR_AGC_BASE + 0x40)
+#define AR_PHY_RX_GAIN_BOUNDS_1 (AR_AGC_BASE + 0x44)
+#define AR_PHY_RX_GAIN_BOUNDS_2 (AR_AGC_BASE + 0x48)
+#define AR_PHY_RSSI_0 (AR_AGC_BASE + 0x180)
+#define AR_PHY_SPUR_CCK_REP0 (AR_AGC_BASE + 0x184)
+#define AR_PHY_CCK_DETECT (AR_AGC_BASE + 0x1c0)
+#define AR_PHY_DAG_CTRLCCK (AR_AGC_BASE + 0x1c4)
+#define AR_PHY_IQCORR_CTRL_CCK (AR_AGC_BASE + 0x1c8)
+
+#define AR_PHY_CCK_SPUR_MIT (AR_AGC_BASE + 0x1cc)
+#define AR_PHY_CCK_SPUR_MIT_SPUR_RSSI_THR 0x000001fe
+#define AR_PHY_CCK_SPUR_MIT_SPUR_RSSI_THR_S 1
+#define AR_PHY_CCK_SPUR_MIT_SPUR_FILTER_TYPE 0x60000000
+#define AR_PHY_CCK_SPUR_MIT_SPUR_FILTER_TYPE_S 29
+#define AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT 0x00000001
+#define AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT_S 0
+#define AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ 0x1ffffe00
+#define AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ_S 9
+
+#define AR_PHY_RX_OCGAIN (AR_AGC_BASE + 0x200)
+
+#define AR_PHY_CCA_NOM_VAL_9300_2GHZ -110
+#define AR_PHY_CCA_NOM_VAL_9300_5GHZ -115
+#define AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ -125
+#define AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ -125
+#define AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ -95
+#define AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ -100
+
+/*
+ * AGC Field Definitions
+ */
+#define AR_PHY_EXT_ATTEN_CTL_RXTX_MARGIN 0x00FC0000
+#define AR_PHY_EXT_ATTEN_CTL_RXTX_MARGIN_S 18
+#define AR_PHY_EXT_ATTEN_CTL_BSW_MARGIN 0x00003C00
+#define AR_PHY_EXT_ATTEN_CTL_BSW_MARGIN_S 10
+#define AR_PHY_EXT_ATTEN_CTL_BSW_ATTEN 0x0000001F
+#define AR_PHY_EXT_ATTEN_CTL_BSW_ATTEN_S 0
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN2_MARGIN 0x003E0000
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN2_MARGIN_S 17
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN 0x0001F000
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN_S 12
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN2_DB 0x00000FC0
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN2_DB_S 6
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB 0x0000003F
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB_S 0
+#define AR_PHY_RXGAIN_TXRX_ATTEN 0x0003F000
+#define AR_PHY_RXGAIN_TXRX_ATTEN_S 12
+#define AR_PHY_RXGAIN_TXRX_RF_MAX 0x007C0000
+#define AR_PHY_RXGAIN_TXRX_RF_MAX_S 18
+#define AR9280_PHY_RXGAIN_TXRX_ATTEN 0x00003F80
+#define AR9280_PHY_RXGAIN_TXRX_ATTEN_S 7
+#define AR9280_PHY_RXGAIN_TXRX_MARGIN 0x001FC000
+#define AR9280_PHY_RXGAIN_TXRX_MARGIN_S 14
+#define AR_PHY_SETTLING_SWITCH 0x00003F80
+#define AR_PHY_SETTLING_SWITCH_S 7
+#define AR_PHY_DESIRED_SZ_ADC 0x000000FF
+#define AR_PHY_DESIRED_SZ_ADC_S 0
+#define AR_PHY_DESIRED_SZ_PGA 0x0000FF00
+#define AR_PHY_DESIRED_SZ_PGA_S 8
+#define AR_PHY_DESIRED_SZ_TOT_DES 0x0FF00000
+#define AR_PHY_DESIRED_SZ_TOT_DES_S 20
+#define AR_PHY_MINCCA_PWR 0x1FF00000
+#define AR_PHY_MINCCA_PWR_S 20
+#define AR_PHY_CCA_THRESH62 0x0007F000
+#define AR_PHY_CCA_THRESH62_S 12
+#define AR9280_PHY_MINCCA_PWR 0x1FF00000
+#define AR9280_PHY_MINCCA_PWR_S 20
+#define AR9280_PHY_CCA_THRESH62 0x000FF000
+#define AR9280_PHY_CCA_THRESH62_S 12
+#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
+#define AR_PHY_EXT_CCA0_THRESH62_S 0
+#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F
+#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
+#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0
+#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6
+#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000
+
+#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR 0x00000200
+#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR_S 9
+#define AR_PHY_DAG_CTRLCCK_RSSI_THR 0x0001FC00
+#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10
+
+#define AR_PHY_RIFS_INIT_DELAY 0x3ff0000
+#define AR_PHY_AGC_COARSE_LOW 0x00007F80
+#define AR_PHY_AGC_COARSE_LOW_S 7
+#define AR_PHY_AGC_COARSE_HIGH 0x003F8000
+#define AR_PHY_AGC_COARSE_HIGH_S 15
+#define AR_PHY_AGC_COARSE_PWR_CONST 0x0000007F
+#define AR_PHY_AGC_COARSE_PWR_CONST_S 0
+#define AR_PHY_FIND_SIG_FIRSTEP 0x0003F000
+#define AR_PHY_FIND_SIG_FIRSTEP_S 12
+#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000
+#define AR_PHY_FIND_SIG_FIRPWR_S 18
+#define AR_PHY_FIND_SIG_FIRPWR_SIGN_BIT 25
+#define AR_PHY_FIND_SIG_RELPWR (0x1f << 6)
+#define AR_PHY_FIND_SIG_RELPWR_S 6
+#define AR_PHY_FIND_SIG_RELPWR_SIGN_BIT 11
+#define AR_PHY_FIND_SIG_RELSTEP 0x1f
+#define AR_PHY_FIND_SIG_RELSTEP_S 0
+#define AR_PHY_FIND_SIG_RELSTEP_SIGN_BIT 5
+#define AR_PHY_RESTART_DIV_GC 0x001C0000
+#define AR_PHY_RESTART_DIV_GC_S 18
+#define AR_PHY_RESTART_ENA 0x01
+#define AR_PHY_DC_RESTART_DIS 0x40000000
+
+#define AR_PHY_TPC_OLPC_GAIN_DELTA_PAL_ON 0xFF000000
+#define AR_PHY_TPC_OLPC_GAIN_DELTA_PAL_ON_S 24
+#define AR_PHY_TPC_OLPC_GAIN_DELTA 0x00FF0000
+#define AR_PHY_TPC_OLPC_GAIN_DELTA_S 16
+
+#define AR_PHY_TPC_6_ERROR_EST_MODE 0x03000000
+#define AR_PHY_TPC_6_ERROR_EST_MODE_S 24
+
+/*
+ * SM Register Map
+ */
+#define AR_SM_BASE 0xa200
+
+#define AR_PHY_D2_CHIP_ID (AR_SM_BASE + 0x0)
+#define AR_PHY_GEN_CTRL (AR_SM_BASE + 0x4)
+#define AR_PHY_MODE (AR_SM_BASE + 0x8)
+#define AR_PHY_ACTIVE (AR_SM_BASE + 0xc)
+#define AR_PHY_SPUR_MASK_A (AR_SM_BASE + 0x20)
+#define AR_PHY_SPUR_MASK_B (AR_SM_BASE + 0x24)
+#define AR_PHY_SPECTRAL_SCAN (AR_SM_BASE + 0x28)
+#define AR_PHY_RADAR_BW_FILTER (AR_SM_BASE + 0x2c)
+#define AR_PHY_SEARCH_START_DELAY (AR_SM_BASE + 0x30)
+#define AR_PHY_MAX_RX_LEN (AR_SM_BASE + 0x34)
+#define AR_PHY_FRAME_CTL (AR_SM_BASE + 0x38)
+#define AR_PHY_RFBUS_REQ (AR_SM_BASE + 0x3c)
+#define AR_PHY_RFBUS_GRANT (AR_SM_BASE + 0x40)
+#define AR_PHY_RIFS (AR_SM_BASE + 0x44)
+#define AR_PHY_RX_CLR_DELAY (AR_SM_BASE + 0x50)
+#define AR_PHY_RX_DELAY (AR_SM_BASE + 0x54)
+
+#define AR_PHY_XPA_TIMING_CTL (AR_SM_BASE + 0x64)
+#define AR_PHY_MISC_PA_CTL (AR_SM_BASE + 0x80)
+#define AR_PHY_SWITCH_CHAIN_0 (AR_SM_BASE + 0x84)
+#define AR_PHY_SWITCH_COM (AR_SM_BASE + 0x88)
+#define AR_PHY_SWITCH_COM_2 (AR_SM_BASE + 0x8c)
+#define AR_PHY_RX_CHAINMASK (AR_SM_BASE + 0xa0)
+#define AR_PHY_CAL_CHAINMASK (AR_SM_BASE + 0xc0)
+#define AR_PHY_CALMODE (AR_SM_BASE + 0xc8)
+#define AR_PHY_FCAL_1 (AR_SM_BASE + 0xcc)
+#define AR_PHY_FCAL_2_0 (AR_SM_BASE + 0xd0)
+#define AR_PHY_DFT_TONE_CTL_0 (AR_SM_BASE + 0xd4)
+#define AR_PHY_CL_CAL_CTL (AR_SM_BASE + 0xd8)
+#define AR_PHY_CL_TAB_0 (AR_SM_BASE + 0x100)
+#define AR_PHY_SYNTH_CONTROL (AR_SM_BASE + 0x140)
+#define AR_PHY_ADDAC_CLK_SEL (AR_SM_BASE + 0x144)
+#define AR_PHY_PLL_CTL (AR_SM_BASE + 0x148)
+#define AR_PHY_ANALOG_SWAP (AR_SM_BASE + 0x14c)
+#define AR_PHY_ADDAC_PARA_CTL (AR_SM_BASE + 0x150)
+#define AR_PHY_XPA_CFG (AR_SM_BASE + 0x158)
+
+#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A 0x0001FC00
+#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A_S 10
+#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A 0x3FF
+#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A_S 0
+
+#define AR_PHY_TEST (AR_SM_BASE + 0x160)
+
+#define AR_PHY_TEST_BBB_OBS_SEL 0x780000
+#define AR_PHY_TEST_BBB_OBS_SEL_S 19
+
+#define AR_PHY_TEST_RX_OBS_SEL_BIT5_S 23
+#define AR_PHY_TEST_RX_OBS_SEL_BIT5 (1 << AR_PHY_TEST_RX_OBS_SEL_BIT5_S)
+
+#define AR_PHY_TEST_CHAIN_SEL 0xC0000000
+#define AR_PHY_TEST_CHAIN_SEL_S 30
+
+#define AR_PHY_TEST_CTL_STATUS (AR_SM_BASE + 0x164)
+#define AR_PHY_TEST_CTL_TSTDAC_EN 0x1
+#define AR_PHY_TEST_CTL_TSTDAC_EN_S 0
+#define AR_PHY_TEST_CTL_TX_OBS_SEL 0x1C
+#define AR_PHY_TEST_CTL_TX_OBS_SEL_S 2
+#define AR_PHY_TEST_CTL_TX_OBS_MUX_SEL 0x60
+#define AR_PHY_TEST_CTL_TX_OBS_MUX_SEL_S 5
+#define AR_PHY_TEST_CTL_TSTADC_EN 0x100
+#define AR_PHY_TEST_CTL_TSTADC_EN_S 8
+#define AR_PHY_TEST_CTL_RX_OBS_SEL 0x3C00
+#define AR_PHY_TEST_CTL_RX_OBS_SEL_S 10
+
+
+#define AR_PHY_TSTDAC (AR_SM_BASE + 0x168)
+
+#define AR_PHY_CHAN_STATUS (AR_SM_BASE + 0x16c)
+#define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + 0x170)
+#define AR_PHY_CHNINFO_NOISEPWR (AR_SM_BASE + 0x174)
+#define AR_PHY_CHNINFO_GAINDIFF (AR_SM_BASE + 0x178)
+#define AR_PHY_CHNINFO_FINETIM (AR_SM_BASE + 0x17c)
+#define AR_PHY_CHAN_INFO_GAIN_0 (AR_SM_BASE + 0x180)
+#define AR_PHY_SCRAMBLER_SEED (AR_SM_BASE + 0x190)
+#define AR_PHY_CCK_TX_CTRL (AR_SM_BASE + 0x194)
+
+#define AR_PHY_HEAVYCLIP_CTL (AR_SM_BASE + 0x1a4)
+#define AR_PHY_HEAVYCLIP_20 (AR_SM_BASE + 0x1a8)
+#define AR_PHY_HEAVYCLIP_40 (AR_SM_BASE + 0x1ac)
+#define AR_PHY_ILLEGAL_TXRATE (AR_SM_BASE + 0x1b0)
+
+#define AR_PHY_PWRTX_MAX (AR_SM_BASE + 0x1f0)
+#define AR_PHY_POWER_TX_SUB (AR_SM_BASE + 0x1f4)
+
+#define AR_PHY_TPC_4_B0 (AR_SM_BASE + 0x204)
+#define AR_PHY_TPC_5_B0 (AR_SM_BASE + 0x208)
+#define AR_PHY_TPC_6_B0 (AR_SM_BASE + 0x20c)
+#define AR_PHY_TPC_11_B0 (AR_SM_BASE + 0x220)
+#define AR_PHY_TPC_18 (AR_SM_BASE + 0x23c)
+#define AR_PHY_TPC_19 (AR_SM_BASE + 0x240)
+
+#define AR_PHY_TX_FORCED_GAIN (AR_SM_BASE + 0x258)
+
+#define AR_PHY_PDADC_TAB_0 (AR_SM_BASE + 0x280)
+
+#define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + 0x448)
+#define AR_PHY_TX_IQCAL_START (AR_SM_BASE + 0x440)
+#define AR_PHY_TX_IQCAL_STATUS_B0 (AR_SM_BASE + 0x48c)
+#define AR_PHY_TX_IQCAL_CORR_COEFF_01_B0 (AR_SM_BASE + 0x450)
+
+#define AR_PHY_PANIC_WD_STATUS (AR_SM_BASE + 0x5c0)
+#define AR_PHY_PANIC_WD_CTL_1 (AR_SM_BASE + 0x5c4)
+#define AR_PHY_PANIC_WD_CTL_2 (AR_SM_BASE + 0x5c8)
+#define AR_PHY_BT_CTL (AR_SM_BASE + 0x5cc)
+#define AR_PHY_ONLY_WARMRESET (AR_SM_BASE + 0x5d0)
+#define AR_PHY_ONLY_CTL (AR_SM_BASE + 0x5d4)
+#define AR_PHY_ECO_CTRL (AR_SM_BASE + 0x5dc)
+#define AR_PHY_BB_THERM_ADC_1 (AR_SM_BASE + 0x248)
+
+#define AR_PHY_65NM_CH0_SYNTH4 0x1608c
+#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT 0x00000002
+#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT_S 1
+#define AR_PHY_65NM_CH0_SYNTH7 0x16098
+#define AR_PHY_65NM_CH0_BIAS1 0x160c0
+#define AR_PHY_65NM_CH0_BIAS2 0x160c4
+#define AR_PHY_65NM_CH0_BIAS4 0x160cc
+#define AR_PHY_65NM_CH0_RXTX4 0x1610c
+#define AR_PHY_65NM_CH0_THERM 0x16290
+
+#define AR_PHY_65NM_CH0_THERM_LOCAL 0x80000000
+#define AR_PHY_65NM_CH0_THERM_LOCAL_S 31
+#define AR_PHY_65NM_CH0_THERM_START 0x20000000
+#define AR_PHY_65NM_CH0_THERM_START_S 29
+#define AR_PHY_65NM_CH0_THERM_SAR_ADC_OUT 0x0000ff00
+#define AR_PHY_65NM_CH0_THERM_SAR_ADC_OUT_S 8
+
+#define AR_PHY_65NM_CH0_RXTX1 0x16100
+#define AR_PHY_65NM_CH0_RXTX2 0x16104
+#define AR_PHY_65NM_CH1_RXTX1 0x16500
+#define AR_PHY_65NM_CH1_RXTX2 0x16504
+#define AR_PHY_65NM_CH2_RXTX1 0x16900
+#define AR_PHY_65NM_CH2_RXTX2 0x16904
+
+#define AR_PHY_RX1DB_BIQUAD_LONG_SHIFT 0x00380000
+#define AR_PHY_RX1DB_BIQUAD_LONG_SHIFT_S 19
+#define AR_PHY_RX6DB_BIQUAD_LONG_SHIFT 0x00c00000
+#define AR_PHY_RX6DB_BIQUAD_LONG_SHIFT_S 22
+#define AR_PHY_LNAGAIN_LONG_SHIFT 0xe0000000
+#define AR_PHY_LNAGAIN_LONG_SHIFT_S 29
+#define AR_PHY_MXRGAIN_LONG_SHIFT 0x03000000
+#define AR_PHY_MXRGAIN_LONG_SHIFT_S 24
+#define AR_PHY_VGAGAIN_LONG_SHIFT 0x1c000000
+#define AR_PHY_VGAGAIN_LONG_SHIFT_S 26
+#define AR_PHY_SCFIR_GAIN_LONG_SHIFT 0x00000001
+#define AR_PHY_SCFIR_GAIN_LONG_SHIFT_S 0
+#define AR_PHY_MANRXGAIN_LONG_SHIFT 0x00000002
+#define AR_PHY_MANRXGAIN_LONG_SHIFT_S 1
+
+/*
+ * SM Field Definitions
+ */
+#define AR_PHY_CL_CAL_ENABLE 0x00000002
+#define AR_PHY_PARALLEL_CAL_ENABLE 0x00000001
+#define AR_PHY_TPCRG1_PD_CAL_ENABLE 0x00400000
+#define AR_PHY_TPCRG1_PD_CAL_ENABLE_S 22
+
+#define AR_PHY_ADDAC_PARACTL_OFF_PWDADC 0x00008000
+
+#define AR_PHY_FCAL20_CAP_STATUS_0 0x01f00000
+#define AR_PHY_FCAL20_CAP_STATUS_0_S 20
+
+#define AR_PHY_RFBUS_REQ_EN 0x00000001 /* request for RF bus */
+#define AR_PHY_RFBUS_GRANT_EN 0x00000001 /* RF bus granted */
+#define AR_PHY_GC_TURBO_MODE 0x00000001 /* set turbo mode bits */
+#define AR_PHY_GC_TURBO_SHORT 0x00000002 /* set short symbols to turbo mode setting */
+#define AR_PHY_GC_DYN2040_EN 0x00000004 /* enable dyn 20/40 mode */
+#define AR_PHY_GC_DYN2040_PRI_ONLY 0x00000008 /* dyn 20/40 - primary only */
+#define AR_PHY_GC_DYN2040_PRI_CH 0x00000010 /* dyn 20/40 - primary ch offset (0=+10MHz, 1=-10MHz)*/
+#define AR_PHY_GC_DYN2040_PRI_CH_S 4
+#define AR_PHY_GC_DYN2040_EXT_CH 0x00000020 /* dyn 20/40 - ext ch spacing (0=20MHz/ 1=25MHz) */
+#define AR_PHY_GC_HT_EN 0x00000040 /* ht enable */
+#define AR_PHY_GC_SHORT_GI_40 0x00000080 /* allow short GI for HT 40 */
+#define AR_PHY_GC_WALSH 0x00000100 /* walsh spatial spreading for 2 chains,2 streams TX */
+#define AR_PHY_GC_SINGLE_HT_LTF1 0x00000200 /* single length (4us) 1st HT long training symbol */
+#define AR_PHY_GC_GF_DETECT_EN 0x00000400 /* enable Green Field detection. Only affects rx, not tx */
+#define AR_PHY_GC_ENABLE_DAC_FIFO 0x00000800 /* fifo between bb and dac */
+#define AR_PHY_RX_DELAY_DELAY 0x00003FFF /* delay from wakeup to rx ena */
+
+#define AR_PHY_CALMODE_IQ 0x00000000
+#define AR_PHY_CALMODE_ADC_GAIN 0x00000001
+#define AR_PHY_CALMODE_ADC_DC_PER 0x00000002
+#define AR_PHY_CALMODE_ADC_DC_INIT 0x00000003
+#define AR_PHY_SWAP_ALT_CHAIN 0x00000040
+#define AR_PHY_MODE_OFDM 0x00000000
+#define AR_PHY_MODE_CCK 0x00000001
+#define AR_PHY_MODE_DYNAMIC 0x00000004
+#define AR_PHY_MODE_DYNAMIC_S 2
+#define AR_PHY_MODE_HALF 0x00000020
+#define AR_PHY_MODE_QUARTER 0x00000040
+#define AR_PHY_MAC_CLK_MODE 0x00000080
+#define AR_PHY_MODE_DYN_CCK_DISABLE 0x00000100
+#define AR_PHY_MODE_SVD_HALF 0x00000200
+#define AR_PHY_ACTIVE_EN 0x00000001
+#define AR_PHY_ACTIVE_DIS 0x00000000
+#define AR_PHY_FORCE_XPA_CFG 0x000000001
+#define AR_PHY_FORCE_XPA_CFG_S 0
+#define AR_PHY_XPA_TIMING_CTL_TX_END_XPAB_OFF 0xFF000000
+#define AR_PHY_XPA_TIMING_CTL_TX_END_XPAB_OFF_S 24
+#define AR_PHY_XPA_TIMING_CTL_TX_END_XPAA_OFF 0x00FF0000
+#define AR_PHY_XPA_TIMING_CTL_TX_END_XPAA_OFF_S 16
+#define AR_PHY_XPA_TIMING_CTL_FRAME_XPAB_ON 0x0000FF00
+#define AR_PHY_XPA_TIMING_CTL_FRAME_XPAB_ON_S 8
+#define AR_PHY_XPA_TIMING_CTL_FRAME_XPAA_ON 0x000000FF
+#define AR_PHY_XPA_TIMING_CTL_FRAME_XPAA_ON_S 0
+#define AR_PHY_TX_END_TO_A2_RX_ON 0x00FF0000
+#define AR_PHY_TX_END_TO_A2_RX_ON_S 16
+#define AR_PHY_TX_END_DATA_START 0x000000FF
+#define AR_PHY_TX_END_DATA_START_S 0
+#define AR_PHY_TX_END_PA_ON 0x0000FF00
+#define AR_PHY_TX_END_PA_ON_S 8
+#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP 0x0000000F
+#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP_S 0
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1 0x000003F0
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1_S 4
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2 0x0000FC00
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2_S 10
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3 0x003F0000
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3_S 16
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4 0x0FC00000
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4_S 22
+#define AR_PHY_TPCRG1_NUM_PD_GAIN 0x0000c000
+#define AR_PHY_TPCRG1_NUM_PD_GAIN_S 14
+#define AR_PHY_TPCRG1_PD_GAIN_1 0x00030000
+#define AR_PHY_TPCRG1_PD_GAIN_1_S 16
+#define AR_PHY_TPCRG1_PD_GAIN_2 0x000C0000
+#define AR_PHY_TPCRG1_PD_GAIN_2_S 18
+#define AR_PHY_TPCRG1_PD_GAIN_3 0x00300000
+#define AR_PHY_TPCRG1_PD_GAIN_3_S 20
+#define AR_PHY_TPCGR1_FORCED_DAC_GAIN 0x0000003e
+#define AR_PHY_TPCGR1_FORCED_DAC_GAIN_S 1
+#define AR_PHY_TPCGR1_FORCE_DAC_GAIN 0x00000001
+#define AR_PHY_TXGAIN_FORCE 0x00000001
+#define AR_PHY_TXGAIN_FORCED_PADVGNRA 0x00003c00
+#define AR_PHY_TXGAIN_FORCED_PADVGNRA_S 10
+#define AR_PHY_TXGAIN_FORCED_PADVGNRB 0x0003c000
+#define AR_PHY_TXGAIN_FORCED_PADVGNRB_S 14
+#define AR_PHY_TXGAIN_FORCED_PADVGNRD 0x00c00000
+#define AR_PHY_TXGAIN_FORCED_PADVGNRD_S 22
+#define AR_PHY_TXGAIN_FORCED_TXMXRGAIN 0x000003c0
+#define AR_PHY_TXGAIN_FORCED_TXMXRGAIN_S 6
+#define AR_PHY_TXGAIN_FORCED_TXBB1DBGAIN 0x0000000e
+#define AR_PHY_TXGAIN_FORCED_TXBB1DBGAIN_S 1
+
+#define AR_PHY_POWER_TX_RATE1 0x9934
+#define AR_PHY_POWER_TX_RATE2 0x9938
+#define AR_PHY_POWER_TX_RATE_MAX 0x993c
+#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
+#define PHY_AGC_CLR 0x10000000
+#define RFSILENT_BB 0x00002000
+#define AR_PHY_CHAN_INFO_GAIN_DIFF_PPM_MASK 0xFFF
+#define AR_PHY_CHAN_INFO_GAIN_DIFF_PPM_SIGNED_BIT 0x800
+#define AR_PHY_CHAN_INFO_GAIN_DIFF_UPPER_LIMIT 320
+#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
+#define AR_PHY_RX_DELAY_DELAY 0x00003FFF
+#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010
+#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x00000001
+#define AR_PHY_SPECTRAL_SCAN_ENABLE_S 0
+#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002
+#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1
+#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0
+#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4
+#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00
+#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8
+#define AR_PHY_SPECTRAL_SCAN_COUNT 0x00FF0000
+#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16
+#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000
+#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24
+#define AR_PHY_CHANNEL_STATUS_RX_CLEAR 0x00000004
+#define AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT 0x01fc0000
+#define AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT_S 18
+#define AR_PHY_TX_IQCAL_START_DO_CAL 0x00000001
+#define AR_PHY_TX_IQCAL_START_DO_CAL_S 0
+
+#define AR_PHY_TX_IQCAL_STATUS_FAILED 0x00000001
+#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE 0x00003fff
+#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE_S 0
+
+#define AR_PHY_TPC_18_THERM_CAL_VALUE 0xff
+#define AR_PHY_TPC_18_THERM_CAL_VALUE_S 0
+#define AR_PHY_TPC_19_ALPHA_THERM 0xff
+#define AR_PHY_TPC_19_ALPHA_THERM_S 0
+
+#define AR_PHY_65NM_CH0_RXTX4_THERM_ON 0x10000000
+#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_S 28
+
+#define AR_PHY_BB_THERM_ADC_1_INIT_THERM 0x000000ff
+#define AR_PHY_BB_THERM_ADC_1_INIT_THERM_S 0
+
+/*
+ * Channel 1 Register Map
+ */
+#define AR_CHAN1_BASE 0xa800
+
+#define AR_PHY_EXT_CCA_1 (AR_CHAN1_BASE + 0x30)
+#define AR_PHY_TX_PHASE_RAMP_1 (AR_CHAN1_BASE + 0xd0)
+#define AR_PHY_ADC_GAIN_DC_CORR_1 (AR_CHAN1_BASE + 0xd4)
+
+#define AR_PHY_SPUR_REPORT_1 (AR_CHAN1_BASE + 0xa8)
+#define AR_PHY_CHAN_INFO_TAB_1 (AR_CHAN1_BASE + 0x300)
+#define AR_PHY_RX_IQCAL_CORR_B1 (AR_CHAN1_BASE + 0xdc)
+
+/*
+ * Channel 1 Field Definitions
+ */
+#define AR_PHY_CH1_EXT_MINCCA_PWR 0x01FF0000
+#define AR_PHY_CH1_EXT_MINCCA_PWR_S 16
+
+/*
+ * AGC 1 Register Map
+ */
+#define AR_AGC1_BASE 0xae00
+
+#define AR_PHY_FORCEMAX_GAINS_1 (AR_AGC1_BASE + 0x4)
+#define AR_PHY_EXT_ATTEN_CTL_1 (AR_AGC1_BASE + 0x18)
+#define AR_PHY_CCA_1 (AR_AGC1_BASE + 0x1c)
+#define AR_PHY_CCA_CTRL_1 (AR_AGC1_BASE + 0x20)
+#define AR_PHY_RSSI_1 (AR_AGC1_BASE + 0x180)
+#define AR_PHY_SPUR_CCK_REP_1 (AR_AGC1_BASE + 0x184)
+#define AR_PHY_RX_OCGAIN_2 (AR_AGC1_BASE + 0x200)
+
+/*
+ * AGC 1 Field Definitions
+ */
+#define AR_PHY_CH1_MINCCA_PWR 0x1FF00000
+#define AR_PHY_CH1_MINCCA_PWR_S 20
+
+/*
+ * SM 1 Register Map
+ */
+#define AR_SM1_BASE 0xb200
+
+#define AR_PHY_SWITCH_CHAIN_1 (AR_SM1_BASE + 0x84)
+#define AR_PHY_FCAL_2_1 (AR_SM1_BASE + 0xd0)
+#define AR_PHY_DFT_TONE_CTL_1 (AR_SM1_BASE + 0xd4)
+#define AR_PHY_CL_TAB_1 (AR_SM1_BASE + 0x100)
+#define AR_PHY_CHAN_INFO_GAIN_1 (AR_SM1_BASE + 0x180)
+#define AR_PHY_TPC_4_B1 (AR_SM1_BASE + 0x204)
+#define AR_PHY_TPC_5_B1 (AR_SM1_BASE + 0x208)
+#define AR_PHY_TPC_6_B1 (AR_SM1_BASE + 0x20c)
+#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220)
+#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + 0x240)
+#define AR_PHY_TX_IQCAL_STATUS_B1 (AR_SM1_BASE + 0x48c)
+#define AR_PHY_TX_IQCAL_CORR_COEFF_01_B1 (AR_SM1_BASE + 0x450)
+
+/*
+ * Channel 2 Register Map
+ */
+#define AR_CHAN2_BASE 0xb800
+
+#define AR_PHY_EXT_CCA_2 (AR_CHAN2_BASE + 0x30)
+#define AR_PHY_TX_PHASE_RAMP_2 (AR_CHAN2_BASE + 0xd0)
+#define AR_PHY_ADC_GAIN_DC_CORR_2 (AR_CHAN2_BASE + 0xd4)
+
+#define AR_PHY_SPUR_REPORT_2 (AR_CHAN2_BASE + 0xa8)
+#define AR_PHY_CHAN_INFO_TAB_2 (AR_CHAN2_BASE + 0x300)
+#define AR_PHY_RX_IQCAL_CORR_B2 (AR_CHAN2_BASE + 0xdc)
+
+/*
+ * Channel 2 Field Definitions
+ */
+#define AR_PHY_CH2_EXT_MINCCA_PWR 0x01FF0000
+#define AR_PHY_CH2_EXT_MINCCA_PWR_S 16
+/*
+ * AGC 2 Register Map
+ */
+#define AR_AGC2_BASE 0xbe00
+
+#define AR_PHY_FORCEMAX_GAINS_2 (AR_AGC2_BASE + 0x4)
+#define AR_PHY_EXT_ATTEN_CTL_2 (AR_AGC2_BASE + 0x18)
+#define AR_PHY_CCA_2 (AR_AGC2_BASE + 0x1c)
+#define AR_PHY_CCA_CTRL_2 (AR_AGC2_BASE + 0x20)
+#define AR_PHY_RSSI_2 (AR_AGC2_BASE + 0x180)
+
+/*
+ * AGC 2 Field Definitions
+ */
+#define AR_PHY_CH2_MINCCA_PWR 0x1FF00000
+#define AR_PHY_CH2_MINCCA_PWR_S 20
+
+/*
+ * SM 2 Register Map
+ */
+#define AR_SM2_BASE 0xc200
+
+#define AR_PHY_SWITCH_CHAIN_2 (AR_SM2_BASE + 0x84)
+#define AR_PHY_FCAL_2_2 (AR_SM2_BASE + 0xd0)
+#define AR_PHY_DFT_TONE_CTL_2 (AR_SM2_BASE + 0xd4)
+#define AR_PHY_CL_TAB_2 (AR_SM2_BASE + 0x100)
+#define AR_PHY_CHAN_INFO_GAIN_2 (AR_SM2_BASE + 0x180)
+#define AR_PHY_TPC_4_B2 (AR_SM2_BASE + 0x204)
+#define AR_PHY_TPC_5_B2 (AR_SM2_BASE + 0x208)
+#define AR_PHY_TPC_6_B2 (AR_SM2_BASE + 0x20c)
+#define AR_PHY_TPC_11_B2 (AR_SM2_BASE + 0x220)
+#define AR_PHY_PDADC_TAB_2 (AR_SM2_BASE + 0x240)
+#define AR_PHY_TX_IQCAL_STATUS_B2 (AR_SM2_BASE + 0x48c)
+#define AR_PHY_TX_IQCAL_CORR_COEFF_01_B2 (AR_SM2_BASE + 0x450)
+
+#define AR_PHY_TX_IQCAL_STATUS_B2_FAILED 0x00000001
+
+/*
+ * AGC 3 Register Map
+ */
+#define AR_AGC3_BASE 0xce00
+
+#define AR_PHY_RSSI_3 (AR_AGC3_BASE + 0x180)
+
+/*
+ * Misc helper defines
+ */
+#define AR_PHY_CHAIN_OFFSET (AR_CHAN1_BASE - AR_CHAN_BASE)
+
+#define AR_PHY_NEW_ADC_DC_GAIN_CORR(_i) (AR_PHY_ADC_GAIN_DC_CORR_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_NEW_ADC_DC_GAIN_CORR_9300_10(_i) (AR_PHY_ADC_GAIN_DC_CORR_0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_SWITCH_CHAIN(_i) (AR_PHY_SWITCH_CHAIN_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_EXT_ATTEN_CTL(_i) (AR_PHY_EXT_ATTEN_CTL_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+
+#define AR_PHY_RXGAIN(_i) (AR_PHY_FORCEMAX_GAINS_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_TPCRG5(_i) (AR_PHY_TPC_5_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_PDADC_TAB(_i) (AR_PHY_PDADC_TAB_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+
+#define AR_PHY_CAL_MEAS_0(_i) (AR_PHY_IQ_ADC_MEAS_0_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_1(_i) (AR_PHY_IQ_ADC_MEAS_1_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_2(_i) (AR_PHY_IQ_ADC_MEAS_2_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_3(_i) (AR_PHY_IQ_ADC_MEAS_3_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_0_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_0_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_1_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_1_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_2_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_2_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_3_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_3_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
+
+#define AR_PHY_BB_PANIC_NON_IDLE_ENABLE 0x00000001
+#define AR_PHY_BB_PANIC_IDLE_ENABLE 0x00000002
+#define AR_PHY_BB_PANIC_IDLE_MASK 0xFFFF0000
+#define AR_PHY_BB_PANIC_NON_IDLE_MASK 0x0000FFFC
+
+#define AR_PHY_BB_PANIC_RST_ENABLE 0x00000002
+#define AR_PHY_BB_PANIC_IRQ_ENABLE 0x00000004
+#define AR_PHY_BB_PANIC_CNTL2_MASK 0xFFFFFFF9
+
+#define AR_PHY_BB_WD_STATUS 0x00000007
+#define AR_PHY_BB_WD_STATUS_S 0
+#define AR_PHY_BB_WD_DET_HANG 0x00000008
+#define AR_PHY_BB_WD_DET_HANG_S 3
+#define AR_PHY_BB_WD_RADAR_SM 0x000000F0
+#define AR_PHY_BB_WD_RADAR_SM_S 4
+#define AR_PHY_BB_WD_RX_OFDM_SM 0x00000F00
+#define AR_PHY_BB_WD_RX_OFDM_SM_S 8
+#define AR_PHY_BB_WD_RX_CCK_SM 0x0000F000
+#define AR_PHY_BB_WD_RX_CCK_SM_S 12
+#define AR_PHY_BB_WD_TX_OFDM_SM 0x000F0000
+#define AR_PHY_BB_WD_TX_OFDM_SM_S 16
+#define AR_PHY_BB_WD_TX_CCK_SM 0x00F00000
+#define AR_PHY_BB_WD_TX_CCK_SM_S 20
+#define AR_PHY_BB_WD_AGC_SM 0x0F000000
+#define AR_PHY_BB_WD_AGC_SM_S 24
+#define AR_PHY_BB_WD_SRCH_SM 0xF0000000
+#define AR_PHY_BB_WD_SRCH_SM_S 28
+
+#define AR_PHY_BB_WD_STATUS_CLR 0x00000008
+
+void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
+
+#endif /* AR9003_PHY_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 83c7ea4c007..fbb7dec6dde 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -114,8 +114,10 @@ enum buffer_type {
#define bf_isretried(bf) (bf->bf_state.bf_type & BUF_RETRY)
#define bf_isxretried(bf) (bf->bf_state.bf_type & BUF_XRETRY)
+#define ATH_TXSTATUS_RING_SIZE 64
+
struct ath_descdma {
- struct ath_desc *dd_desc;
+ void *dd_desc;
dma_addr_t dd_desc_paddr;
u32 dd_desc_len;
struct ath_buf *dd_bufptr;
@@ -123,7 +125,7 @@ struct ath_descdma {
int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
struct list_head *head, const char *name,
- int nbuf, int ndesc);
+ int nbuf, int ndesc, bool is_tx);
void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
struct list_head *head);
@@ -178,9 +180,6 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
#define BAW_WITHIN(_start, _bawsz, _seqno) \
((((_seqno) - (_start)) & 4095) < (_bawsz))
-#define ATH_DS_BA_SEQ(_ds) ((_ds)->ds_us.tx.ts_seqnum)
-#define ATH_DS_BA_BITMAP(_ds) (&(_ds)->ds_us.tx.ba_low)
-#define ATH_DS_TX_BA(_ds) ((_ds)->ds_us.tx.ts_flags & ATH9K_TX_BA)
#define ATH_AN_2_TID(_an, _tidno) (&(_an)->tid[(_tidno)])
#define ATH_TX_COMPLETE_POLL_INT 1000
@@ -191,6 +190,7 @@ enum ATH_AGGR_STATUS {
ATH_AGGR_LIMITED,
};
+#define ATH_TXFIFO_DEPTH 8
struct ath_txq {
u32 axq_qnum;
u32 *axq_link;
@@ -200,6 +200,10 @@ struct ath_txq {
bool stopped;
bool axq_tx_inprogress;
struct list_head axq_acq;
+ struct list_head txq_fifo[ATH_TXFIFO_DEPTH];
+ struct list_head txq_fifo_pending;
+ u8 txq_headidx;
+ u8 txq_tailidx;
};
#define AGGR_CLEANUP BIT(1)
@@ -226,6 +230,12 @@ struct ath_tx {
struct ath_descdma txdma;
};
+struct ath_rx_edma {
+ struct sk_buff_head rx_fifo;
+ struct sk_buff_head rx_buffers;
+ u32 rx_fifo_hwsize;
+};
+
struct ath_rx {
u8 defant;
u8 rxotherant;
@@ -235,6 +245,8 @@ struct ath_rx {
spinlock_t rxbuflock;
struct list_head rxbuf;
struct ath_descdma rxdma;
+ struct ath_buf *rx_bufptr;
+ struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
};
int ath_startrecv(struct ath_softc *sc);
@@ -243,7 +255,7 @@ void ath_flushrecv(struct ath_softc *sc);
u32 ath_calcrxfilter(struct ath_softc *sc);
int ath_rx_init(struct ath_softc *sc, int nbufs);
void ath_rx_cleanup(struct ath_softc *sc);
-int ath_rx_tasklet(struct ath_softc *sc, int flush);
+int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp);
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
int ath_tx_setup(struct ath_softc *sc, int haltype);
@@ -261,6 +273,7 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath_tx_control *txctl);
void ath_tx_tasklet(struct ath_softc *sc);
+void ath_tx_edma_tasklet(struct ath_softc *sc);
void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb);
bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno);
void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
@@ -483,7 +496,6 @@ struct ath_softc {
bool ps_enabled;
bool ps_idle;
unsigned long ps_usecount;
- enum ath9k_int imask;
struct ath_config config;
struct ath_rx rx;
@@ -511,6 +523,8 @@ struct ath_softc {
struct ath_beacon_config cur_beacon_conf;
struct delayed_work tx_complete_work;
struct ath_btcoex btcoex;
+
+ struct ath_descdma txsdma;
};
struct ath_wiphy {
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index b4a31a43a62..f43d85a302c 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -76,24 +76,13 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
ds = bf->bf_desc;
flags = ATH9K_TXDESC_NOACK;
- if (((sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
- (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) &&
- (ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) {
- ds->ds_link = bf->bf_daddr; /* self-linked */
- flags |= ATH9K_TXDESC_VEOL;
- /* Let hardware handle antenna switching. */
- antenna = 0;
- } else {
- ds->ds_link = 0;
- /*
- * Switch antenna every beacon.
- * Should only switch every beacon period, not for every SWBA
- * XXX assumes two antennae
- */
- antenna = ((sc->beacon.ast_be_xmit / sc->nbcnvifs) & 1 ? 2 : 1);
- }
-
- ds->ds_data = bf->bf_buf_addr;
+ ds->ds_link = 0;
+ /*
+ * Switch antenna every beacon.
+ * Should only switch every beacon period, not for every SWBA
+ * XXX assumes two antennae
+ */
+ antenna = ((sc->beacon.ast_be_xmit / sc->nbcnvifs) & 1 ? 2 : 1);
sband = &sc->sbands[common->hw->conf.channel->band];
rate = sband->bitrates[rateidx].hw_value;
@@ -109,7 +98,8 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
/* NB: beacon's BufLen must be a multiple of 4 bytes */
ath9k_hw_filltxdesc(ah, ds, roundup(skb->len, 4),
- true, true, ds);
+ true, true, ds, bf->bf_buf_addr,
+ sc->beacon.beaconq);
memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
series[0].Tries = 1;
@@ -216,36 +206,6 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
return bf;
}
-/*
- * Startup beacon transmission for adhoc mode when they are sent entirely
- * by the hardware using the self-linked descriptor + veol trick.
-*/
-static void ath_beacon_start_adhoc(struct ath_softc *sc,
- struct ieee80211_vif *vif)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_buf *bf;
- struct ath_vif *avp;
- struct sk_buff *skb;
-
- avp = (void *)vif->drv_priv;
-
- if (avp->av_bcbuf == NULL)
- return;
-
- bf = avp->av_bcbuf;
- skb = bf->bf_mpdu;
-
- ath_beacon_setup(sc, avp, bf, 0);
-
- /* NB: caller is known to have already stopped tx dma */
- ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bf->bf_daddr);
- ath9k_hw_txstart(ah, sc->beacon.beaconq);
- ath_print(common, ATH_DBG_BEACON, "TXDP%u = %llx (%p)\n",
- sc->beacon.beaconq, ito64(bf->bf_daddr), bf->bf_desc);
-}
-
int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
{
struct ath_softc *sc = aphy->sc;
@@ -266,7 +226,8 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
list_del(&avp->av_bcbuf->list);
if (sc->sc_ah->opmode == NL80211_IFTYPE_AP ||
- !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) {
+ sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC ||
+ sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) {
int slot;
/*
* Assign the vif to a beacon xmit slot. As
@@ -275,17 +236,11 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
avp->av_bslot = 0;
for (slot = 0; slot < ATH_BCBUF; slot++)
if (sc->beacon.bslot[slot] == NULL) {
- /*
- * XXX hack, space out slots to better
- * deal with misses
- */
- if (slot+1 < ATH_BCBUF &&
- sc->beacon.bslot[slot+1] == NULL) {
- avp->av_bslot = slot+1;
- break;
- }
avp->av_bslot = slot;
+
/* NB: keep looking for a double slot */
+ if (slot == 0 || !sc->beacon.bslot[slot-1])
+ break;
}
BUG_ON(sc->beacon.bslot[avp->av_bslot] != NULL);
sc->beacon.bslot[avp->av_bslot] = vif;
@@ -524,6 +479,7 @@ static void ath9k_beacon_init(struct ath_softc *sc,
static void ath_beacon_config_ap(struct ath_softc *sc,
struct ath_beacon_config *conf)
{
+ struct ath_hw *ah = sc->sc_ah;
u32 nexttbtt, intval;
/* NB: the beacon interval is kept internally in TU's */
@@ -539,15 +495,15 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
* prepare beacon frames.
*/
intval |= ATH9K_BEACON_ENA;
- sc->imask |= ATH9K_INT_SWBA;
+ ah->imask |= ATH9K_INT_SWBA;
ath_beaconq_config(sc);
/* Set the computed AP beacon timers */
- ath9k_hw_set_interrupts(sc->sc_ah, 0);
+ ath9k_hw_set_interrupts(ah, 0);
ath9k_beacon_init(sc, nexttbtt, intval);
sc->beacon.bmisscnt = 0;
- ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
+ ath9k_hw_set_interrupts(ah, ah->imask);
/* Clear the reset TSF flag, so that subsequent beacon updation
will not reset the HW TSF. */
@@ -566,7 +522,8 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
static void ath_beacon_config_sta(struct ath_softc *sc,
struct ath_beacon_config *conf)
{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_beacon_state bs;
int dtimperiod, dtimcount, sleepduration;
int cfpperiod, cfpcount;
@@ -605,7 +562,7 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
* Pull nexttbtt forward to reflect the current
* TSF and calculate dtim+cfp state for the result.
*/
- tsf = ath9k_hw_gettsf64(sc->sc_ah);
+ tsf = ath9k_hw_gettsf64(ah);
tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
num_beacons = tsftu / intval + 1;
@@ -678,17 +635,18 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
/* Set the computed STA beacon timers */
- ath9k_hw_set_interrupts(sc->sc_ah, 0);
- ath9k_hw_set_sta_beacon_timers(sc->sc_ah, &bs);
- sc->imask |= ATH9K_INT_BMISS;
- ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
+ ath9k_hw_set_interrupts(ah, 0);
+ ath9k_hw_set_sta_beacon_timers(ah, &bs);
+ ah->imask |= ATH9K_INT_BMISS;
+ ath9k_hw_set_interrupts(ah, ah->imask);
}
static void ath_beacon_config_adhoc(struct ath_softc *sc,
struct ath_beacon_config *conf,
struct ieee80211_vif *vif)
{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
u64 tsf;
u32 tsftu, intval, nexttbtt;
@@ -703,7 +661,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
else if (intval)
nexttbtt = roundup(nexttbtt, intval);
- tsf = ath9k_hw_gettsf64(sc->sc_ah);
+ tsf = ath9k_hw_gettsf64(ah);
tsftu = TSF_TO_TU((u32)(tsf>>32), (u32)tsf) + FUDGE;
do {
nexttbtt += intval;
@@ -719,21 +677,16 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
* self-linked tx descriptor and let the hardware deal with things.
*/
intval |= ATH9K_BEACON_ENA;
- if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_VEOL))
- sc->imask |= ATH9K_INT_SWBA;
+ ah->imask |= ATH9K_INT_SWBA;
ath_beaconq_config(sc);
/* Set the computed ADHOC beacon timers */
- ath9k_hw_set_interrupts(sc->sc_ah, 0);
+ ath9k_hw_set_interrupts(ah, 0);
ath9k_beacon_init(sc, nexttbtt, intval);
sc->beacon.bmisscnt = 0;
- ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
-
- /* FIXME: Handle properly when vif is NULL */
- if (vif && sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)
- ath_beacon_start_adhoc(sc, vif);
+ ath9k_hw_set_interrupts(ah, ah->imask);
}
void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 238a5744d8e..07b8fa6fb62 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -15,6 +15,9 @@
*/
#include "hw.h"
+#include "hw-ops.h"
+
+/* Common calibration code */
/* We can tune this as we go by monitoring really low values */
#define ATH9K_NF_TOO_LOW -60
@@ -83,93 +86,11 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath9k_nfcal_hist *h,
ath9k_hw_get_nf_hist_mid(h[i].nfCalBuffer);
}
}
- return;
}
-static void ath9k_hw_do_getnf(struct ath_hw *ah,
- int16_t nfarray[NUM_NF_READINGS])
-{
- struct ath_common *common = ath9k_hw_common(ah);
- int16_t nf;
-
- if (AR_SREV_9280_10_OR_LATER(ah))
- nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR);
- else
- nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR);
-
- if (nf & 0x100)
- nf = 0 - ((nf ^ 0x1ff) + 1);
- ath_print(common, ATH_DBG_CALIBRATE,
- "NF calibrated [ctl] [chain 0] is %d\n", nf);
- nfarray[0] = nf;
-
- if (!AR_SREV_9285(ah)) {
- if (AR_SREV_9280_10_OR_LATER(ah))
- nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
- AR9280_PHY_CH1_MINCCA_PWR);
- else
- nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
- AR_PHY_CH1_MINCCA_PWR);
-
- if (nf & 0x100)
- nf = 0 - ((nf ^ 0x1ff) + 1);
- ath_print(common, ATH_DBG_CALIBRATE,
- "NF calibrated [ctl] [chain 1] is %d\n", nf);
- nfarray[1] = nf;
-
- if (!AR_SREV_9280(ah) && !AR_SREV_9287(ah)) {
- nf = MS(REG_READ(ah, AR_PHY_CH2_CCA),
- AR_PHY_CH2_MINCCA_PWR);
- if (nf & 0x100)
- nf = 0 - ((nf ^ 0x1ff) + 1);
- ath_print(common, ATH_DBG_CALIBRATE,
- "NF calibrated [ctl] [chain 2] is %d\n", nf);
- nfarray[2] = nf;
- }
- }
-
- if (AR_SREV_9280_10_OR_LATER(ah))
- nf = MS(REG_READ(ah, AR_PHY_EXT_CCA),
- AR9280_PHY_EXT_MINCCA_PWR);
- else
- nf = MS(REG_READ(ah, AR_PHY_EXT_CCA),
- AR_PHY_EXT_MINCCA_PWR);
-
- if (nf & 0x100)
- nf = 0 - ((nf ^ 0x1ff) + 1);
- ath_print(common, ATH_DBG_CALIBRATE,
- "NF calibrated [ext] [chain 0] is %d\n", nf);
- nfarray[3] = nf;
-
- if (!AR_SREV_9285(ah)) {
- if (AR_SREV_9280_10_OR_LATER(ah))
- nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
- AR9280_PHY_CH1_EXT_MINCCA_PWR);
- else
- nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
- AR_PHY_CH1_EXT_MINCCA_PWR);
-
- if (nf & 0x100)
- nf = 0 - ((nf ^ 0x1ff) + 1);
- ath_print(common, ATH_DBG_CALIBRATE,
- "NF calibrated [ext] [chain 1] is %d\n", nf);
- nfarray[4] = nf;
-
- if (!AR_SREV_9280(ah) && !AR_SREV_9287(ah)) {
- nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA),
- AR_PHY_CH2_EXT_MINCCA_PWR);
- if (nf & 0x100)
- nf = 0 - ((nf ^ 0x1ff) + 1);
- ath_print(common, ATH_DBG_CALIBRATE,
- "NF calibrated [ext] [chain 2] is %d\n", nf);
- nfarray[5] = nf;
- }
- }
-}
-
-static bool getNoiseFloorThresh(struct ath_hw *ah,
- enum ieee80211_band band,
- int16_t *nft)
+static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah,
+ enum ieee80211_band band,
+ int16_t *nft)
{
switch (band) {
case IEEE80211_BAND_5GHZ:
@@ -186,44 +107,8 @@ static bool getNoiseFloorThresh(struct ath_hw *ah,
return true;
}
-static void ath9k_hw_setup_calibration(struct ath_hw *ah,
- struct ath9k_cal_list *currCal)
-{
- struct ath_common *common = ath9k_hw_common(ah);
-
- REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0),
- AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX,
- currCal->calData->calCountMax);
-
- switch (currCal->calData->calType) {
- case IQ_MISMATCH_CAL:
- REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
- ath_print(common, ATH_DBG_CALIBRATE,
- "starting IQ Mismatch Calibration\n");
- break;
- case ADC_GAIN_CAL:
- REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN);
- ath_print(common, ATH_DBG_CALIBRATE,
- "starting ADC Gain Calibration\n");
- break;
- case ADC_DC_CAL:
- REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER);
- ath_print(common, ATH_DBG_CALIBRATE,
- "starting ADC DC Calibration\n");
- break;
- case ADC_DC_INIT_CAL:
- REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_INIT);
- ath_print(common, ATH_DBG_CALIBRATE,
- "starting Init ADC DC Calibration\n");
- break;
- }
-
- REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
- AR_PHY_TIMING_CTRL4_DO_CAL);
-}
-
-static void ath9k_hw_reset_calibration(struct ath_hw *ah,
- struct ath9k_cal_list *currCal)
+void ath9k_hw_reset_calibration(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal)
{
int i;
@@ -241,324 +126,6 @@ static void ath9k_hw_reset_calibration(struct ath_hw *ah,
ah->cal_samples = 0;
}
-static bool ath9k_hw_per_calibration(struct ath_hw *ah,
- struct ath9k_channel *ichan,
- u8 rxchainmask,
- struct ath9k_cal_list *currCal)
-{
- bool iscaldone = false;
-
- if (currCal->calState == CAL_RUNNING) {
- if (!(REG_READ(ah, AR_PHY_TIMING_CTRL4(0)) &
- AR_PHY_TIMING_CTRL4_DO_CAL)) {
-
- currCal->calData->calCollect(ah);
- ah->cal_samples++;
-
- if (ah->cal_samples >= currCal->calData->calNumSamples) {
- int i, numChains = 0;
- for (i = 0; i < AR5416_MAX_CHAINS; i++) {
- if (rxchainmask & (1 << i))
- numChains++;
- }
-
- currCal->calData->calPostProc(ah, numChains);
- ichan->CalValid |= currCal->calData->calType;
- currCal->calState = CAL_DONE;
- iscaldone = true;
- } else {
- ath9k_hw_setup_calibration(ah, currCal);
- }
- }
- } else if (!(ichan->CalValid & currCal->calData->calType)) {
- ath9k_hw_reset_calibration(ah, currCal);
- }
-
- return iscaldone;
-}
-
-/* Assumes you are talking about the currently configured channel */
-static bool ath9k_hw_iscal_supported(struct ath_hw *ah,
- enum ath9k_cal_types calType)
-{
- struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
-
- switch (calType & ah->supp_cals) {
- case IQ_MISMATCH_CAL: /* Both 2 GHz and 5 GHz support OFDM */
- return true;
- case ADC_GAIN_CAL:
- case ADC_DC_CAL:
- if (!(conf->channel->band == IEEE80211_BAND_2GHZ &&
- conf_is_ht20(conf)))
- return true;
- break;
- }
- return false;
-}
-
-static void ath9k_hw_iqcal_collect(struct ath_hw *ah)
-{
- int i;
-
- for (i = 0; i < AR5416_MAX_CHAINS; i++) {
- ah->totalPowerMeasI[i] +=
- REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
- ah->totalPowerMeasQ[i] +=
- REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
- ah->totalIqCorrMeas[i] +=
- (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
- ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
- "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
- ah->cal_samples, i, ah->totalPowerMeasI[i],
- ah->totalPowerMeasQ[i],
- ah->totalIqCorrMeas[i]);
- }
-}
-
-static void ath9k_hw_adc_gaincal_collect(struct ath_hw *ah)
-{
- int i;
-
- for (i = 0; i < AR5416_MAX_CHAINS; i++) {
- ah->totalAdcIOddPhase[i] +=
- REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
- ah->totalAdcIEvenPhase[i] +=
- REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
- ah->totalAdcQOddPhase[i] +=
- REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
- ah->totalAdcQEvenPhase[i] +=
- REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
-
- ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
- "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
- "oddq=0x%08x; evenq=0x%08x;\n",
- ah->cal_samples, i,
- ah->totalAdcIOddPhase[i],
- ah->totalAdcIEvenPhase[i],
- ah->totalAdcQOddPhase[i],
- ah->totalAdcQEvenPhase[i]);
- }
-}
-
-static void ath9k_hw_adc_dccal_collect(struct ath_hw *ah)
-{
- int i;
-
- for (i = 0; i < AR5416_MAX_CHAINS; i++) {
- ah->totalAdcDcOffsetIOddPhase[i] +=
- (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
- ah->totalAdcDcOffsetIEvenPhase[i] +=
- (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
- ah->totalAdcDcOffsetQOddPhase[i] +=
- (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
- ah->totalAdcDcOffsetQEvenPhase[i] +=
- (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
-
- ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
- "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
- "oddq=0x%08x; evenq=0x%08x;\n",
- ah->cal_samples, i,
- ah->totalAdcDcOffsetIOddPhase[i],
- ah->totalAdcDcOffsetIEvenPhase[i],
- ah->totalAdcDcOffsetQOddPhase[i],
- ah->totalAdcDcOffsetQEvenPhase[i]);
- }
-}
-
-static void ath9k_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- u32 powerMeasQ, powerMeasI, iqCorrMeas;
- u32 qCoffDenom, iCoffDenom;
- int32_t qCoff, iCoff;
- int iqCorrNeg, i;
-
- for (i = 0; i < numChains; i++) {
- powerMeasI = ah->totalPowerMeasI[i];
- powerMeasQ = ah->totalPowerMeasQ[i];
- iqCorrMeas = ah->totalIqCorrMeas[i];
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Starting IQ Cal and Correction for Chain %d\n",
- i);
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Orignal: Chn %diq_corr_meas = 0x%08x\n",
- i, ah->totalIqCorrMeas[i]);
-
- iqCorrNeg = 0;
-
- if (iqCorrMeas > 0x80000000) {
- iqCorrMeas = (0xffffffff - iqCorrMeas) + 1;
- iqCorrNeg = 1;
- }
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
- ath_print(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
- iqCorrNeg);
-
- iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128;
- qCoffDenom = powerMeasQ / 64;
-
- if ((powerMeasQ != 0) && (iCoffDenom != 0) &&
- (qCoffDenom != 0)) {
- iCoff = iqCorrMeas / iCoffDenom;
- qCoff = powerMeasI / qCoffDenom - 64;
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d iCoff = 0x%08x\n", i, iCoff);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d qCoff = 0x%08x\n", i, qCoff);
-
- iCoff = iCoff & 0x3f;
- ath_print(common, ATH_DBG_CALIBRATE,
- "New: Chn %d iCoff = 0x%08x\n", i, iCoff);
- if (iqCorrNeg == 0x0)
- iCoff = 0x40 - iCoff;
-
- if (qCoff > 15)
- qCoff = 15;
- else if (qCoff <= -16)
- qCoff = 16;
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
- i, iCoff, qCoff);
-
- REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
- AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF,
- iCoff);
- REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
- AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
- qCoff);
- ath_print(common, ATH_DBG_CALIBRATE,
- "IQ Cal and Correction done for Chain %d\n",
- i);
- }
- }
-
- REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
- AR_PHY_TIMING_CTRL4_IQCORR_ENABLE);
-}
-
-static void ath9k_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- u32 iOddMeasOffset, iEvenMeasOffset, qOddMeasOffset, qEvenMeasOffset;
- u32 qGainMismatch, iGainMismatch, val, i;
-
- for (i = 0; i < numChains; i++) {
- iOddMeasOffset = ah->totalAdcIOddPhase[i];
- iEvenMeasOffset = ah->totalAdcIEvenPhase[i];
- qOddMeasOffset = ah->totalAdcQOddPhase[i];
- qEvenMeasOffset = ah->totalAdcQEvenPhase[i];
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Starting ADC Gain Cal for Chain %d\n", i);
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_i = 0x%08x\n", i,
- iOddMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_i = 0x%08x\n", i,
- iEvenMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_q = 0x%08x\n", i,
- qOddMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_q = 0x%08x\n", i,
- qEvenMeasOffset);
-
- if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) {
- iGainMismatch =
- ((iEvenMeasOffset * 32) /
- iOddMeasOffset) & 0x3f;
- qGainMismatch =
- ((qOddMeasOffset * 32) /
- qEvenMeasOffset) & 0x3f;
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d gain_mismatch_i = 0x%08x\n", i,
- iGainMismatch);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d gain_mismatch_q = 0x%08x\n", i,
- qGainMismatch);
-
- val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
- val &= 0xfffff000;
- val |= (qGainMismatch) | (iGainMismatch << 6);
- REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "ADC Gain Cal done for Chain %d\n", i);
- }
- }
-
- REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
- REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
- AR_PHY_NEW_ADC_GAIN_CORR_ENABLE);
-}
-
-static void ath9k_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- u32 iOddMeasOffset, iEvenMeasOffset, val, i;
- int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch;
- const struct ath9k_percal_data *calData =
- ah->cal_list_curr->calData;
- u32 numSamples =
- (1 << (calData->calCountMax + 5)) * calData->calNumSamples;
-
- for (i = 0; i < numChains; i++) {
- iOddMeasOffset = ah->totalAdcDcOffsetIOddPhase[i];
- iEvenMeasOffset = ah->totalAdcDcOffsetIEvenPhase[i];
- qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i];
- qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i];
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Starting ADC DC Offset Cal for Chain %d\n", i);
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_i = %d\n", i,
- iOddMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_i = %d\n", i,
- iEvenMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_q = %d\n", i,
- qOddMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_q = %d\n", i,
- qEvenMeasOffset);
-
- iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) /
- numSamples) & 0x1ff;
- qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) /
- numSamples) & 0x1ff;
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
- iDcMismatch);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
- qDcMismatch);
-
- val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
- val &= 0xc0000fff;
- val |= (qDcMismatch << 12) | (iDcMismatch << 21);
- REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "ADC DC Offset Cal done for Chain %d\n", i);
- }
-
- REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
- REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
- AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE);
-}
-
/* This is done for the currently configured channel */
bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
{
@@ -605,72 +172,6 @@ void ath9k_hw_start_nfcal(struct ath_hw *ah)
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
}
-void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- struct ath9k_nfcal_hist *h;
- int i, j;
- int32_t val;
- const u32 ar5416_cca_regs[6] = {
- AR_PHY_CCA,
- AR_PHY_CH1_CCA,
- AR_PHY_CH2_CCA,
- AR_PHY_EXT_CCA,
- AR_PHY_CH1_EXT_CCA,
- AR_PHY_CH2_EXT_CCA
- };
- u8 chainmask, rx_chain_status;
-
- rx_chain_status = REG_READ(ah, AR_PHY_RX_CHAINMASK);
- if (AR_SREV_9285(ah))
- chainmask = 0x9;
- else if (AR_SREV_9280(ah) || AR_SREV_9287(ah)) {
- if ((rx_chain_status & 0x2) || (rx_chain_status & 0x4))
- chainmask = 0x1B;
- else
- chainmask = 0x09;
- } else {
- if (rx_chain_status & 0x4)
- chainmask = 0x3F;
- else if (rx_chain_status & 0x2)
- chainmask = 0x1B;
- else
- chainmask = 0x09;
- }
-
- h = ah->nfCalHist;
-
- for (i = 0; i < NUM_NF_READINGS; i++) {
- if (chainmask & (1 << i)) {
- val = REG_READ(ah, ar5416_cca_regs[i]);
- val &= 0xFFFFFE00;
- val |= (((u32) (h[i].privNF) << 1) & 0x1ff);
- REG_WRITE(ah, ar5416_cca_regs[i], val);
- }
- }
-
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_ENABLE_NF);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
-
- for (j = 0; j < 5; j++) {
- if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
- AR_PHY_AGC_CONTROL_NF) == 0)
- break;
- udelay(50);
- }
-
- for (i = 0; i < NUM_NF_READINGS; i++) {
- if (chainmask & (1 << i)) {
- val = REG_READ(ah, ar5416_cca_regs[i]);
- val &= 0xFFFFFE00;
- val |= (((u32) (-50) << 1) & 0x1ff);
- REG_WRITE(ah, ar5416_cca_regs[i], val);
- }
- }
-}
-
int16_t ath9k_hw_getnf(struct ath_hw *ah,
struct ath9k_channel *chan)
{
@@ -690,7 +191,7 @@ int16_t ath9k_hw_getnf(struct ath_hw *ah,
} else {
ath9k_hw_do_getnf(ah, nfarray);
nf = nfarray[0];
- if (getNoiseFloorThresh(ah, c->band, &nfThresh)
+ if (ath9k_hw_get_nf_thresh(ah, c->band, &nfThresh)
&& nf > nfThresh) {
ath_print(common, ATH_DBG_CALIBRATE,
"noise floor failed detected; "
@@ -715,7 +216,7 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah)
if (AR_SREV_9280(ah))
noise_floor = AR_PHY_CCA_MAX_AR9280_GOOD_VALUE;
- else if (AR_SREV_9285(ah))
+ else if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
noise_floor = AR_PHY_CCA_MAX_AR9285_GOOD_VALUE;
else if (AR_SREV_9287(ah))
noise_floor = AR_PHY_CCA_MAX_AR9287_GOOD_VALUE;
@@ -748,508 +249,3 @@ s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
return nf;
}
EXPORT_SYMBOL(ath9k_hw_getchan_noise);
-
-static void ath9k_olc_temp_compensation_9287(struct ath_hw *ah)
-{
- u32 rddata;
- int32_t delta, currPDADC, slope;
-
- rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
- currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
-
- if (ah->initPDADC == 0 || currPDADC == 0) {
- /*
- * Zero value indicates that no frames have been transmitted yet,
- * can't do temperature compensation until frames are transmitted.
- */
- return;
- } else {
- slope = ah->eep_ops->get_eeprom(ah, EEP_TEMPSENSE_SLOPE);
-
- if (slope == 0) { /* to avoid divide by zero case */
- delta = 0;
- } else {
- delta = ((currPDADC - ah->initPDADC)*4) / slope;
- }
- REG_RMW_FIELD(ah, AR_PHY_CH0_TX_PWRCTRL11,
- AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
- REG_RMW_FIELD(ah, AR_PHY_CH1_TX_PWRCTRL11,
- AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
- }
-}
-
-static void ath9k_olc_temp_compensation(struct ath_hw *ah)
-{
- u32 rddata, i;
- int delta, currPDADC, regval;
-
- if (OLC_FOR_AR9287_10_LATER) {
- ath9k_olc_temp_compensation_9287(ah);
- } else {
- rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
- currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
-
- if (ah->initPDADC == 0 || currPDADC == 0) {
- return;
- } else {
- if (ah->eep_ops->get_eeprom(ah, EEP_DAC_HPWR_5G))
- delta = (currPDADC - ah->initPDADC + 4) / 8;
- else
- delta = (currPDADC - ah->initPDADC + 5) / 10;
-
- if (delta != ah->PDADCdelta) {
- ah->PDADCdelta = delta;
- for (i = 1; i < AR9280_TX_GAIN_TABLE_SIZE; i++) {
- regval = ah->originalGain[i] - delta;
- if (regval < 0)
- regval = 0;
-
- REG_RMW_FIELD(ah,
- AR_PHY_TX_GAIN_TBL1 + i * 4,
- AR_PHY_TX_GAIN, regval);
- }
- }
- }
- }
-}
-
-static void ath9k_hw_9271_pa_cal(struct ath_hw *ah, bool is_reset)
-{
- u32 regVal;
- unsigned int i;
- u32 regList [][2] = {
- { 0x786c, 0 },
- { 0x7854, 0 },
- { 0x7820, 0 },
- { 0x7824, 0 },
- { 0x7868, 0 },
- { 0x783c, 0 },
- { 0x7838, 0 } ,
- { 0x7828, 0 } ,
- };
-
- for (i = 0; i < ARRAY_SIZE(regList); i++)
- regList[i][1] = REG_READ(ah, regList[i][0]);
-
- regVal = REG_READ(ah, 0x7834);
- regVal &= (~(0x1));
- REG_WRITE(ah, 0x7834, regVal);
- regVal = REG_READ(ah, 0x9808);
- regVal |= (0x1 << 27);
- REG_WRITE(ah, 0x9808, regVal);
-
- /* 786c,b23,1, pwddac=1 */
- REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
- /* 7854, b5,1, pdrxtxbb=1 */
- REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
- /* 7854, b7,1, pdv2i=1 */
- REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
- /* 7854, b8,1, pddacinterface=1 */
- REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
- /* 7824,b12,0, offcal=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
- /* 7838, b1,0, pwddb=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
- /* 7820,b11,0, enpacal=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
- /* 7820,b25,1, pdpadrv1=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
- /* 7820,b24,0, pdpadrv2=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1,AR9285_AN_RF2G1_PDPADRV2,0);
- /* 7820,b23,0, pdpaout=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
- /* 783c,b14-16,7, padrvgn2tab_0=7 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G8,AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
- /*
- * 7838,b29-31,0, padrvgn1tab_0=0
- * does not matter since we turn it off
- */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G7,AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
-
- REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_CCOMP, 0xfff);
-
- /* Set:
- * localmode=1,bmode=1,bmoderxtx=1,synthon=1,
- * txon=1,paon=1,oscon=1,synthon_force=1
- */
- REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0);
- udelay(30);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9271_AN_RF2G6_OFFS, 0);
-
- /* find off_6_1; */
- for (i = 6; i > 0; i--) {
- regVal = REG_READ(ah, 0x7834);
- regVal |= (1 << (20 + i));
- REG_WRITE(ah, 0x7834, regVal);
- udelay(1);
- //regVal = REG_READ(ah, 0x7834);
- regVal &= (~(0x1 << (20 + i)));
- regVal |= (MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9)
- << (20 + i));
- REG_WRITE(ah, 0x7834, regVal);
- }
-
- regVal = (regVal >>20) & 0x7f;
-
- /* Update PA cal info */
- if ((!is_reset) && (ah->pacal_info.prev_offset == regVal)) {
- if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
- ah->pacal_info.max_skipcount =
- 2 * ah->pacal_info.max_skipcount;
- ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
- } else {
- ah->pacal_info.max_skipcount = 1;
- ah->pacal_info.skipcount = 0;
- ah->pacal_info.prev_offset = regVal;
- }
-
- regVal = REG_READ(ah, 0x7834);
- regVal |= 0x1;
- REG_WRITE(ah, 0x7834, regVal);
- regVal = REG_READ(ah, 0x9808);
- regVal &= (~(0x1 << 27));
- REG_WRITE(ah, 0x9808, regVal);
-
- for (i = 0; i < ARRAY_SIZE(regList); i++)
- REG_WRITE(ah, regList[i][0], regList[i][1]);
-}
-
-static inline void ath9k_hw_9285_pa_cal(struct ath_hw *ah, bool is_reset)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- u32 regVal;
- int i, offset, offs_6_1, offs_0;
- u32 ccomp_org, reg_field;
- u32 regList[][2] = {
- { 0x786c, 0 },
- { 0x7854, 0 },
- { 0x7820, 0 },
- { 0x7824, 0 },
- { 0x7868, 0 },
- { 0x783c, 0 },
- { 0x7838, 0 },
- };
-
- ath_print(common, ATH_DBG_CALIBRATE, "Running PA Calibration\n");
-
- /* PA CAL is not needed for high power solution */
- if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) ==
- AR5416_EEP_TXGAIN_HIGH_POWER)
- return;
-
- if (AR_SREV_9285_11(ah)) {
- REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14));
- udelay(10);
- }
-
- for (i = 0; i < ARRAY_SIZE(regList); i++)
- regList[i][1] = REG_READ(ah, regList[i][0]);
-
- regVal = REG_READ(ah, 0x7834);
- regVal &= (~(0x1));
- REG_WRITE(ah, 0x7834, regVal);
- regVal = REG_READ(ah, 0x9808);
- regVal |= (0x1 << 27);
- REG_WRITE(ah, 0x9808, regVal);
-
- REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
- REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
- REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
- REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
- ccomp_org = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_CCOMP);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, 0xf);
-
- REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0);
- udelay(30);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, 0);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 0);
-
- for (i = 6; i > 0; i--) {
- regVal = REG_READ(ah, 0x7834);
- regVal |= (1 << (19 + i));
- REG_WRITE(ah, 0x7834, regVal);
- udelay(1);
- regVal = REG_READ(ah, 0x7834);
- regVal &= (~(0x1 << (19 + i)));
- reg_field = MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9);
- regVal |= (reg_field << (19 + i));
- REG_WRITE(ah, 0x7834, regVal);
- }
-
- REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 1);
- udelay(1);
- reg_field = MS(REG_READ(ah, AR9285_AN_RF2G9), AR9285_AN_RXTXBB1_SPARE9);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, reg_field);
- offs_6_1 = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_OFFS);
- offs_0 = MS(REG_READ(ah, AR9285_AN_RF2G3), AR9285_AN_RF2G3_PDVCCOMP);
-
- offset = (offs_6_1<<1) | offs_0;
- offset = offset - 0;
- offs_6_1 = offset>>1;
- offs_0 = offset & 1;
-
- if ((!is_reset) && (ah->pacal_info.prev_offset == offset)) {
- if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
- ah->pacal_info.max_skipcount =
- 2 * ah->pacal_info.max_skipcount;
- ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
- } else {
- ah->pacal_info.max_skipcount = 1;
- ah->pacal_info.skipcount = 0;
- ah->pacal_info.prev_offset = offset;
- }
-
- REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, offs_6_1);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, offs_0);
-
- regVal = REG_READ(ah, 0x7834);
- regVal |= 0x1;
- REG_WRITE(ah, 0x7834, regVal);
- regVal = REG_READ(ah, 0x9808);
- regVal &= (~(0x1 << 27));
- REG_WRITE(ah, 0x9808, regVal);
-
- for (i = 0; i < ARRAY_SIZE(regList); i++)
- REG_WRITE(ah, regList[i][0], regList[i][1]);
-
- REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, ccomp_org);
-
- if (AR_SREV_9285_11(ah))
- REG_WRITE(ah, AR9285_AN_TOP4, AR9285_AN_TOP4_DEFAULT);
-
-}
-
-bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
- u8 rxchainmask, bool longcal)
-{
- bool iscaldone = true;
- struct ath9k_cal_list *currCal = ah->cal_list_curr;
-
- if (currCal &&
- (currCal->calState == CAL_RUNNING ||
- currCal->calState == CAL_WAITING)) {
- iscaldone = ath9k_hw_per_calibration(ah, chan,
- rxchainmask, currCal);
- if (iscaldone) {
- ah->cal_list_curr = currCal = currCal->calNext;
-
- if (currCal->calState == CAL_WAITING) {
- iscaldone = false;
- ath9k_hw_reset_calibration(ah, currCal);
- }
- }
- }
-
- /* Do NF cal only at longer intervals */
- if (longcal) {
- /* Do periodic PAOffset Cal */
- if (AR_SREV_9271(ah))
- ath9k_hw_9271_pa_cal(ah, false);
- else if (AR_SREV_9285_11_OR_LATER(ah)) {
- if (!ah->pacal_info.skipcount)
- ath9k_hw_9285_pa_cal(ah, false);
- else
- ah->pacal_info.skipcount--;
- }
-
- if (OLC_FOR_AR9280_20_LATER || OLC_FOR_AR9287_10_LATER)
- ath9k_olc_temp_compensation(ah);
-
- /* Get the value from the previous NF cal and update history buffer */
- ath9k_hw_getnf(ah, chan);
-
- /*
- * Load the NF from history buffer of the current channel.
- * NF is slow time-variant, so it is OK to use a historical value.
- */
- ath9k_hw_loadnf(ah, ah->curchan);
-
- ath9k_hw_start_nfcal(ah);
- }
-
- return iscaldone;
-}
-EXPORT_SYMBOL(ath9k_hw_calibrate);
-
-/* Carrier leakage Calibration fix */
-static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- struct ath_common *common = ath9k_hw_common(ah);
-
- REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
- if (IS_CHAN_HT20(chan)) {
- REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
- REG_SET_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_FLTR_CAL);
- REG_CLR_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
- if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) {
- ath_print(common, ATH_DBG_CALIBRATE, "offset "
- "calibration failed to complete in "
- "1ms; noisy ??\n");
- return false;
- }
- REG_CLR_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
- REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
- REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
- }
- REG_CLR_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
- REG_SET_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
- if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
- 0, AH_WAIT_TIMEOUT)) {
- ath_print(common, ATH_DBG_CALIBRATE, "offset calibration "
- "failed to complete in 1ms; noisy ??\n");
- return false;
- }
-
- REG_SET_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
- REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
-
- return true;
-}
-
-bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- struct ath_common *common = ath9k_hw_common(ah);
-
- if (AR_SREV_9271(ah) || AR_SREV_9285_12_OR_LATER(ah)) {
- if (!ar9285_clc(ah, chan))
- return false;
- } else {
- if (AR_SREV_9280_10_OR_LATER(ah)) {
- if (!AR_SREV_9287_10_OR_LATER(ah))
- REG_CLR_BIT(ah, AR_PHY_ADC_CTL,
- AR_PHY_ADC_CTL_OFF_PWDADC);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_FLTR_CAL);
- }
-
- /* Calibrate the AGC */
- REG_WRITE(ah, AR_PHY_AGC_CONTROL,
- REG_READ(ah, AR_PHY_AGC_CONTROL) |
- AR_PHY_AGC_CONTROL_CAL);
-
- /* Poll for offset calibration complete */
- if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
- 0, AH_WAIT_TIMEOUT)) {
- ath_print(common, ATH_DBG_CALIBRATE,
- "offset calibration failed to "
- "complete in 1ms; noisy environment?\n");
- return false;
- }
-
- if (AR_SREV_9280_10_OR_LATER(ah)) {
- if (!AR_SREV_9287_10_OR_LATER(ah))
- REG_SET_BIT(ah, AR_PHY_ADC_CTL,
- AR_PHY_ADC_CTL_OFF_PWDADC);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_FLTR_CAL);
- }
- }
-
- /* Do PA Calibration */
- if (AR_SREV_9271(ah))
- ath9k_hw_9271_pa_cal(ah, true);
- else if (AR_SREV_9285_11_OR_LATER(ah))
- ath9k_hw_9285_pa_cal(ah, true);
-
- /* Do NF Calibration after DC offset and other calibrations */
- REG_WRITE(ah, AR_PHY_AGC_CONTROL,
- REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_NF);
-
- ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
-
- /* Enable IQ, ADC Gain and ADC DC offset CALs */
- if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) {
- if (ath9k_hw_iscal_supported(ah, ADC_GAIN_CAL)) {
- INIT_CAL(&ah->adcgain_caldata);
- INSERT_CAL(ah, &ah->adcgain_caldata);
- ath_print(common, ATH_DBG_CALIBRATE,
- "enabling ADC Gain Calibration.\n");
- }
- if (ath9k_hw_iscal_supported(ah, ADC_DC_CAL)) {
- INIT_CAL(&ah->adcdc_caldata);
- INSERT_CAL(ah, &ah->adcdc_caldata);
- ath_print(common, ATH_DBG_CALIBRATE,
- "enabling ADC DC Calibration.\n");
- }
- if (ath9k_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) {
- INIT_CAL(&ah->iq_caldata);
- INSERT_CAL(ah, &ah->iq_caldata);
- ath_print(common, ATH_DBG_CALIBRATE,
- "enabling IQ Calibration.\n");
- }
-
- ah->cal_list_curr = ah->cal_list;
-
- if (ah->cal_list_curr)
- ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
- }
-
- chan->CalValid = 0;
-
- return true;
-}
-
-const struct ath9k_percal_data iq_cal_multi_sample = {
- IQ_MISMATCH_CAL,
- MAX_CAL_SAMPLES,
- PER_MIN_LOG_COUNT,
- ath9k_hw_iqcal_collect,
- ath9k_hw_iqcalibrate
-};
-const struct ath9k_percal_data iq_cal_single_sample = {
- IQ_MISMATCH_CAL,
- MIN_CAL_SAMPLES,
- PER_MAX_LOG_COUNT,
- ath9k_hw_iqcal_collect,
- ath9k_hw_iqcalibrate
-};
-const struct ath9k_percal_data adc_gain_cal_multi_sample = {
- ADC_GAIN_CAL,
- MAX_CAL_SAMPLES,
- PER_MIN_LOG_COUNT,
- ath9k_hw_adc_gaincal_collect,
- ath9k_hw_adc_gaincal_calibrate
-};
-const struct ath9k_percal_data adc_gain_cal_single_sample = {
- ADC_GAIN_CAL,
- MIN_CAL_SAMPLES,
- PER_MAX_LOG_COUNT,
- ath9k_hw_adc_gaincal_collect,
- ath9k_hw_adc_gaincal_calibrate
-};
-const struct ath9k_percal_data adc_dc_cal_multi_sample = {
- ADC_DC_CAL,
- MAX_CAL_SAMPLES,
- PER_MIN_LOG_COUNT,
- ath9k_hw_adc_dccal_collect,
- ath9k_hw_adc_dccal_calibrate
-};
-const struct ath9k_percal_data adc_dc_cal_single_sample = {
- ADC_DC_CAL,
- MIN_CAL_SAMPLES,
- PER_MAX_LOG_COUNT,
- ath9k_hw_adc_dccal_collect,
- ath9k_hw_adc_dccal_calibrate
-};
-const struct ath9k_percal_data adc_init_dc_cal = {
- ADC_DC_INIT_CAL,
- MIN_CAL_SAMPLES,
- INIT_LOG_COUNT,
- ath9k_hw_adc_dccal_collect,
- ath9k_hw_adc_dccal_calibrate
-};
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index b2c873e9748..24538bdb912 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -19,14 +19,6 @@
#include "hw.h"
-extern const struct ath9k_percal_data iq_cal_multi_sample;
-extern const struct ath9k_percal_data iq_cal_single_sample;
-extern const struct ath9k_percal_data adc_gain_cal_multi_sample;
-extern const struct ath9k_percal_data adc_gain_cal_single_sample;
-extern const struct ath9k_percal_data adc_dc_cal_multi_sample;
-extern const struct ath9k_percal_data adc_dc_cal_single_sample;
-extern const struct ath9k_percal_data adc_init_dc_cal;
-
#define AR_PHY_CCA_MAX_AR5416_GOOD_VALUE -85
#define AR_PHY_CCA_MAX_AR9280_GOOD_VALUE -112
#define AR_PHY_CCA_MAX_AR9285_GOOD_VALUE -118
@@ -76,7 +68,8 @@ enum ath9k_cal_types {
ADC_DC_INIT_CAL = 0x1,
ADC_GAIN_CAL = 0x2,
ADC_DC_CAL = 0x4,
- IQ_MISMATCH_CAL = 0x8
+ IQ_MISMATCH_CAL = 0x8,
+ TEMP_COMP_CAL = 0x10,
};
enum ath9k_cal_state {
@@ -122,14 +115,12 @@ struct ath9k_pacal_info{
bool ath9k_hw_reset_calvalid(struct ath_hw *ah);
void ath9k_hw_start_nfcal(struct ath_hw *ah);
-void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan);
int16_t ath9k_hw_getnf(struct ath_hw *ah,
struct ath9k_channel *chan);
void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah);
s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan);
-bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
- u8 rxchainmask, bool longcal);
-bool ath9k_hw_init_cal(struct ath_hw *ah,
- struct ath9k_channel *chan);
+void ath9k_hw_reset_calibration(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal);
+
#endif /* CALIB_H */
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index 4d775ae141d..7707341cd0d 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -57,13 +57,19 @@ static bool ath9k_rx_accept(struct ath_common *common,
* rs_more indicates chained descriptors which can be used
* to link buffers together for a sort of scatter-gather
* operation.
- *
+ * reject the frame, we don't support scatter-gather yet and
+ * the frame is probably corrupt anyway
+ */
+ if (rx_stats->rs_more)
+ return false;
+
+ /*
* The rx_stats->rs_status will not be set until the end of the
* chained descriptors so it can be ignored if rs_more is set. The
* rs_more will be false at the last element of the chained
* descriptors.
*/
- if (!rx_stats->rs_more && rx_stats->rs_status != 0) {
+ if (rx_stats->rs_status != 0) {
if (rx_stats->rs_status & ATH9K_RXERR_CRC)
rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
if (rx_stats->rs_status & ATH9K_RXERR_PHY)
@@ -102,11 +108,11 @@ static bool ath9k_rx_accept(struct ath_common *common,
return true;
}
-static u8 ath9k_process_rate(struct ath_common *common,
- struct ieee80211_hw *hw,
- struct ath_rx_status *rx_stats,
- struct ieee80211_rx_status *rxs,
- struct sk_buff *skb)
+static int ath9k_process_rate(struct ath_common *common,
+ struct ieee80211_hw *hw,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs,
+ struct sk_buff *skb)
{
struct ieee80211_supported_band *sband;
enum ieee80211_band band;
@@ -122,25 +128,32 @@ static u8 ath9k_process_rate(struct ath_common *common,
rxs->flag |= RX_FLAG_40MHZ;
if (rx_stats->rs_flags & ATH9K_RX_GI)
rxs->flag |= RX_FLAG_SHORT_GI;
- return rx_stats->rs_rate & 0x7f;
+ rxs->rate_idx = rx_stats->rs_rate & 0x7f;
+ return 0;
}
for (i = 0; i < sband->n_bitrates; i++) {
- if (sband->bitrates[i].hw_value == rx_stats->rs_rate)
- return i;
+ if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
+ rxs->rate_idx = i;
+ return 0;
+ }
if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
rxs->flag |= RX_FLAG_SHORTPRE;
- return i;
+ rxs->rate_idx = i;
+ return 0;
}
}
- /* No valid hardware bitrate found -- we should not get here */
+ /*
+ * No valid hardware bitrate found -- we should not get here
+ * because hardware has already validated this frame as OK.
+ */
ath_print(common, ATH_DBG_XMIT, "unsupported hw bitrate detected "
"0x%02x using 1 Mbit\n", rx_stats->rs_rate);
if ((common->debug_mask & ATH_DBG_XMIT))
print_hex_dump_bytes("", DUMP_PREFIX_NONE, skb->data, skb->len);
- return 0;
+ return -EINVAL;
}
static void ath9k_process_rssi(struct ath_common *common,
@@ -202,17 +215,22 @@ int ath9k_cmn_rx_skb_preprocess(struct ath_common *common,
struct ath_hw *ah = common->ah;
memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
+
+ /*
+ * everything but the rate is checked here, the rate check is done
+ * separately to avoid doing two lookups for a rate for each frame.
+ */
if (!ath9k_rx_accept(common, skb, rx_status, rx_stats, decrypt_error))
return -EINVAL;
ath9k_process_rssi(common, hw, skb, rx_stats);
- rx_status->rate_idx = ath9k_process_rate(common, hw,
- rx_stats, rx_status, skb);
+ if (ath9k_process_rate(common, hw, rx_stats, rx_status, skb))
+ return -EINVAL;
+
rx_status->mactime = ath9k_hw_extend_tsf(ah, rx_stats->rs_tstamp);
rx_status->band = hw->conf.channel->band;
rx_status->freq = hw->conf.channel->center_freq;
- rx_status->noise = common->ani.noise_floor;
rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi;
rx_status->antenna = rx_stats->rs_antenna;
rx_status->flag |= RX_FLAG_TSFT;
@@ -255,7 +273,8 @@ void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
keyix = rx_stats->rs_keyix;
- if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error) {
+ if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
+ ieee80211_has_protected(fc)) {
rxs->flag |= RX_FLAG_DECRYPTED;
} else if (ieee80211_has_protected(fc)
&& !decrypt_error && skb->len >= hdrlen + 4) {
@@ -286,6 +305,345 @@ int ath9k_cmn_padpos(__le16 frame_control)
}
EXPORT_SYMBOL(ath9k_cmn_padpos);
+int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+
+ if (tx_info->control.hw_key) {
+ if (tx_info->control.hw_key->alg == ALG_WEP)
+ return ATH9K_KEY_TYPE_WEP;
+ else if (tx_info->control.hw_key->alg == ALG_TKIP)
+ return ATH9K_KEY_TYPE_TKIP;
+ else if (tx_info->control.hw_key->alg == ALG_CCMP)
+ return ATH9K_KEY_TYPE_AES;
+ }
+
+ return ATH9K_KEY_TYPE_CLEAR;
+}
+EXPORT_SYMBOL(ath9k_cmn_get_hw_crypto_keytype);
+
+static u32 ath9k_get_extchanmode(struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type)
+{
+ u32 chanmode = 0;
+
+ switch (chan->band) {
+ case IEEE80211_BAND_2GHZ:
+ switch (channel_type) {
+ case NL80211_CHAN_NO_HT:
+ case NL80211_CHAN_HT20:
+ chanmode = CHANNEL_G_HT20;
+ break;
+ case NL80211_CHAN_HT40PLUS:
+ chanmode = CHANNEL_G_HT40PLUS;
+ break;
+ case NL80211_CHAN_HT40MINUS:
+ chanmode = CHANNEL_G_HT40MINUS;
+ break;
+ }
+ break;
+ case IEEE80211_BAND_5GHZ:
+ switch (channel_type) {
+ case NL80211_CHAN_NO_HT:
+ case NL80211_CHAN_HT20:
+ chanmode = CHANNEL_A_HT20;
+ break;
+ case NL80211_CHAN_HT40PLUS:
+ chanmode = CHANNEL_A_HT40PLUS;
+ break;
+ case NL80211_CHAN_HT40MINUS:
+ chanmode = CHANNEL_A_HT40MINUS;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return chanmode;
+}
+
+/*
+ * Update internal channel flags.
+ */
+void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw,
+ struct ath9k_channel *ichan)
+{
+ struct ieee80211_channel *chan = hw->conf.channel;
+ struct ieee80211_conf *conf = &hw->conf;
+
+ ichan->channel = chan->center_freq;
+ ichan->chan = chan;
+
+ if (chan->band == IEEE80211_BAND_2GHZ) {
+ ichan->chanmode = CHANNEL_G;
+ ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM | CHANNEL_G;
+ } else {
+ ichan->chanmode = CHANNEL_A;
+ ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
+ }
+
+ if (conf_is_ht(conf))
+ ichan->chanmode = ath9k_get_extchanmode(chan,
+ conf->channel_type);
+}
+EXPORT_SYMBOL(ath9k_cmn_update_ichannel);
+
+/*
+ * Get the internal channel reference.
+ */
+struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
+ struct ath_hw *ah)
+{
+ struct ieee80211_channel *curchan = hw->conf.channel;
+ struct ath9k_channel *channel;
+ u8 chan_idx;
+
+ chan_idx = curchan->hw_value;
+ channel = &ah->channels[chan_idx];
+ ath9k_cmn_update_ichannel(hw, channel);
+
+ return channel;
+}
+EXPORT_SYMBOL(ath9k_cmn_get_curchannel);
+
+static int ath_setkey_tkip(struct ath_common *common, u16 keyix, const u8 *key,
+ struct ath9k_keyval *hk, const u8 *addr,
+ bool authenticator)
+{
+ struct ath_hw *ah = common->ah;
+ const u8 *key_rxmic;
+ const u8 *key_txmic;
+
+ key_txmic = key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
+ key_rxmic = key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
+
+ if (addr == NULL) {
+ /*
+ * Group key installation - only two key cache entries are used
+ * regardless of splitmic capability since group key is only
+ * used either for TX or RX.
+ */
+ if (authenticator) {
+ memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
+ memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_mic));
+ } else {
+ memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
+ memcpy(hk->kv_txmic, key_rxmic, sizeof(hk->kv_mic));
+ }
+ return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr);
+ }
+ if (!common->splitmic) {
+ /* TX and RX keys share the same key cache entry. */
+ memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
+ memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic));
+ return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr);
+ }
+
+ /* Separate key cache entries for TX and RX */
+
+ /* TX key goes at first index, RX key at +32. */
+ memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
+ if (!ath9k_hw_set_keycache_entry(ah, keyix, hk, NULL)) {
+ /* TX MIC entry failed. No need to proceed further */
+ ath_print(common, ATH_DBG_FATAL,
+ "Setting TX MIC Key Failed\n");
+ return 0;
+ }
+
+ memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
+ /* XXX delete tx key on failure? */
+ return ath9k_hw_set_keycache_entry(ah, keyix + 32, hk, addr);
+}
+
+static int ath_reserve_key_cache_slot_tkip(struct ath_common *common)
+{
+ int i;
+
+ for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
+ if (test_bit(i, common->keymap) ||
+ test_bit(i + 64, common->keymap))
+ continue; /* At least one part of TKIP key allocated */
+ if (common->splitmic &&
+ (test_bit(i + 32, common->keymap) ||
+ test_bit(i + 64 + 32, common->keymap)))
+ continue; /* At least one part of TKIP key allocated */
+
+ /* Found a free slot for a TKIP key */
+ return i;
+ }
+ return -1;
+}
+
+static int ath_reserve_key_cache_slot(struct ath_common *common)
+{
+ int i;
+
+ /* First, try to find slots that would not be available for TKIP. */
+ if (common->splitmic) {
+ for (i = IEEE80211_WEP_NKID; i < common->keymax / 4; i++) {
+ if (!test_bit(i, common->keymap) &&
+ (test_bit(i + 32, common->keymap) ||
+ test_bit(i + 64, common->keymap) ||
+ test_bit(i + 64 + 32, common->keymap)))
+ return i;
+ if (!test_bit(i + 32, common->keymap) &&
+ (test_bit(i, common->keymap) ||
+ test_bit(i + 64, common->keymap) ||
+ test_bit(i + 64 + 32, common->keymap)))
+ return i + 32;
+ if (!test_bit(i + 64, common->keymap) &&
+ (test_bit(i , common->keymap) ||
+ test_bit(i + 32, common->keymap) ||
+ test_bit(i + 64 + 32, common->keymap)))
+ return i + 64;
+ if (!test_bit(i + 64 + 32, common->keymap) &&
+ (test_bit(i, common->keymap) ||
+ test_bit(i + 32, common->keymap) ||
+ test_bit(i + 64, common->keymap)))
+ return i + 64 + 32;
+ }
+ } else {
+ for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
+ if (!test_bit(i, common->keymap) &&
+ test_bit(i + 64, common->keymap))
+ return i;
+ if (test_bit(i, common->keymap) &&
+ !test_bit(i + 64, common->keymap))
+ return i + 64;
+ }
+ }
+
+ /* No partially used TKIP slots, pick any available slot */
+ for (i = IEEE80211_WEP_NKID; i < common->keymax; i++) {
+ /* Do not allow slots that could be needed for TKIP group keys
+ * to be used. This limitation could be removed if we know that
+ * TKIP will not be used. */
+ if (i >= 64 && i < 64 + IEEE80211_WEP_NKID)
+ continue;
+ if (common->splitmic) {
+ if (i >= 32 && i < 32 + IEEE80211_WEP_NKID)
+ continue;
+ if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID)
+ continue;
+ }
+
+ if (!test_bit(i, common->keymap))
+ return i; /* Found a free slot for a key */
+ }
+
+ /* No free slot found */
+ return -1;
+}
+
+/*
+ * Configure encryption in the HW.
+ */
+int ath9k_cmn_key_config(struct ath_common *common,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ath_hw *ah = common->ah;
+ struct ath9k_keyval hk;
+ const u8 *mac = NULL;
+ int ret = 0;
+ int idx;
+
+ memset(&hk, 0, sizeof(hk));
+
+ switch (key->alg) {
+ case ALG_WEP:
+ hk.kv_type = ATH9K_CIPHER_WEP;
+ break;
+ case ALG_TKIP:
+ hk.kv_type = ATH9K_CIPHER_TKIP;
+ break;
+ case ALG_CCMP:
+ hk.kv_type = ATH9K_CIPHER_AES_CCM;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ hk.kv_len = key->keylen;
+ memcpy(hk.kv_val, key->key, key->keylen);
+
+ if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ /* For now, use the default keys for broadcast keys. This may
+ * need to change with virtual interfaces. */
+ idx = key->keyidx;
+ } else if (key->keyidx) {
+ if (WARN_ON(!sta))
+ return -EOPNOTSUPP;
+ mac = sta->addr;
+
+ if (vif->type != NL80211_IFTYPE_AP) {
+ /* Only keyidx 0 should be used with unicast key, but
+ * allow this for client mode for now. */
+ idx = key->keyidx;
+ } else
+ return -EIO;
+ } else {
+ if (WARN_ON(!sta))
+ return -EOPNOTSUPP;
+ mac = sta->addr;
+
+ if (key->alg == ALG_TKIP)
+ idx = ath_reserve_key_cache_slot_tkip(common);
+ else
+ idx = ath_reserve_key_cache_slot(common);
+ if (idx < 0)
+ return -ENOSPC; /* no free key cache entries */
+ }
+
+ if (key->alg == ALG_TKIP)
+ ret = ath_setkey_tkip(common, idx, key->key, &hk, mac,
+ vif->type == NL80211_IFTYPE_AP);
+ else
+ ret = ath9k_hw_set_keycache_entry(ah, idx, &hk, mac);
+
+ if (!ret)
+ return -EIO;
+
+ set_bit(idx, common->keymap);
+ if (key->alg == ALG_TKIP) {
+ set_bit(idx + 64, common->keymap);
+ if (common->splitmic) {
+ set_bit(idx + 32, common->keymap);
+ set_bit(idx + 64 + 32, common->keymap);
+ }
+ }
+
+ return idx;
+}
+EXPORT_SYMBOL(ath9k_cmn_key_config);
+
+/*
+ * Delete Key.
+ */
+void ath9k_cmn_key_delete(struct ath_common *common,
+ struct ieee80211_key_conf *key)
+{
+ struct ath_hw *ah = common->ah;
+
+ ath9k_hw_keyreset(ah, key->hw_key_idx);
+ if (key->hw_key_idx < IEEE80211_WEP_NKID)
+ return;
+
+ clear_bit(key->hw_key_idx, common->keymap);
+ if (key->alg != ALG_TKIP)
+ return;
+
+ clear_bit(key->hw_key_idx + 64, common->keymap);
+ if (common->splitmic) {
+ ath9k_hw_keyreset(ah, key->hw_key_idx + 32);
+ clear_bit(key->hw_key_idx + 32, common->keymap);
+ clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
+ }
+}
+EXPORT_SYMBOL(ath9k_cmn_key_delete);
+
static int __init ath9k_cmn_init(void)
{
return 0;
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index 042999c2fe9..e08f7e5a26e 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -20,9 +20,12 @@
#include "../debug.h"
#include "hw.h"
+#include "hw-ops.h"
/* Common header for Atheros 802.11n base driver cores */
+#define IEEE80211_WEP_NKID 4
+
#define WME_NUM_TID 16
#define WME_BA_BMP_SIZE 64
#define WME_MAX_BA WME_BA_BMP_SIZE
@@ -74,11 +77,12 @@ struct ath_buf {
an aggregate) */
struct ath_buf *bf_next; /* next subframe in the aggregate */
struct sk_buff *bf_mpdu; /* enclosing frame structure */
- struct ath_desc *bf_desc; /* virtual addr of desc */
+ void *bf_desc; /* virtual addr of desc */
dma_addr_t bf_daddr; /* physical addr of desc */
dma_addr_t bf_buf_addr; /* physical addr of data buffer */
bool bf_stale;
bool bf_isnullfunc;
+ bool bf_tx_aborted;
u16 bf_flags;
struct ath_buf_state bf_state;
dma_addr_t bf_dmacontext;
@@ -125,3 +129,14 @@ void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
bool decrypt_error);
int ath9k_cmn_padpos(__le16 frame_control);
+int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
+void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw,
+ struct ath9k_channel *ichan);
+struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
+ struct ath_hw *ah);
+int ath9k_cmn_key_config(struct ath_common *common,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+void ath9k_cmn_key_delete(struct ath_common *common,
+ struct ieee80211_key_conf *key);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 081e0085ed4..29898f8d189 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -78,6 +78,90 @@ static const struct file_operations fops_debug = {
#define DMA_BUF_LEN 1024
+static ssize_t read_file_tx_chainmask(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ char buf[32];
+ unsigned int len;
+
+ len = snprintf(buf, sizeof(buf), "0x%08x\n", common->tx_chainmask);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_tx_chainmask(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ unsigned long mask;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EINVAL;
+
+ buf[len] = '\0';
+ if (strict_strtoul(buf, 0, &mask))
+ return -EINVAL;
+
+ common->tx_chainmask = mask;
+ sc->sc_ah->caps.tx_chainmask = mask;
+ return count;
+}
+
+static const struct file_operations fops_tx_chainmask = {
+ .read = read_file_tx_chainmask,
+ .write = write_file_tx_chainmask,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
+
+static ssize_t read_file_rx_chainmask(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ char buf[32];
+ unsigned int len;
+
+ len = snprintf(buf, sizeof(buf), "0x%08x\n", common->rx_chainmask);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_rx_chainmask(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ unsigned long mask;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EINVAL;
+
+ buf[len] = '\0';
+ if (strict_strtoul(buf, 0, &mask))
+ return -EINVAL;
+
+ common->rx_chainmask = mask;
+ sc->sc_ah->caps.rx_chainmask = mask;
+ return count;
+}
+
+static const struct file_operations fops_rx_chainmask = {
+ .read = read_file_rx_chainmask,
+ .write = write_file_rx_chainmask,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
+
static ssize_t read_file_dma(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -157,10 +241,10 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
"txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
(val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
- len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x \n",
+ len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x\n",
REG_READ_D(ah, AR_OBS_BUS_1));
len += snprintf(buf + len, DMA_BUF_LEN - len,
- "AR_CR: 0x%x \n", REG_READ_D(ah, AR_CR));
+ "AR_CR: 0x%x\n", REG_READ_D(ah, AR_CR));
ath9k_ps_restore(sc);
@@ -180,8 +264,15 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
{
if (status)
sc->debug.stats.istats.total++;
- if (status & ATH9K_INT_RX)
- sc->debug.stats.istats.rxok++;
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ if (status & ATH9K_INT_RXLP)
+ sc->debug.stats.istats.rxlp++;
+ if (status & ATH9K_INT_RXHP)
+ sc->debug.stats.istats.rxhp++;
+ } else {
+ if (status & ATH9K_INT_RX)
+ sc->debug.stats.istats.rxok++;
+ }
if (status & ATH9K_INT_RXEOL)
sc->debug.stats.istats.rxeol++;
if (status & ATH9K_INT_RXORN)
@@ -223,8 +314,15 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
char buf[512];
unsigned int len = 0;
- len += snprintf(buf + len, sizeof(buf) - len,
- "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%8s: %10u\n", "RXHP", sc->debug.stats.istats.rxhp);
+ } else {
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
+ }
len += snprintf(buf + len, sizeof(buf) - len,
"%8s: %10u\n", "RXEOL", sc->debug.stats.istats.rxeol);
len += snprintf(buf + len, sizeof(buf) - len,
@@ -557,10 +655,8 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
}
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_buf *bf)
+ struct ath_buf *bf, struct ath_tx_status *ts)
{
- struct ath_desc *ds = bf->bf_desc;
-
if (bf_isampdu(bf)) {
if (bf_isxretried(bf))
TX_STAT_INC(txq->axq_qnum, a_xretries);
@@ -570,17 +666,17 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
TX_STAT_INC(txq->axq_qnum, completed);
}
- if (ds->ds_txstat.ts_status & ATH9K_TXERR_FIFO)
+ if (ts->ts_status & ATH9K_TXERR_FIFO)
TX_STAT_INC(txq->axq_qnum, fifo_underrun);
- if (ds->ds_txstat.ts_status & ATH9K_TXERR_XTXOP)
+ if (ts->ts_status & ATH9K_TXERR_XTXOP)
TX_STAT_INC(txq->axq_qnum, xtxop);
- if (ds->ds_txstat.ts_status & ATH9K_TXERR_TIMER_EXPIRED)
+ if (ts->ts_status & ATH9K_TXERR_TIMER_EXPIRED)
TX_STAT_INC(txq->axq_qnum, timer_exp);
- if (ds->ds_txstat.ts_flags & ATH9K_TX_DESC_CFG_ERR)
+ if (ts->ts_flags & ATH9K_TX_DESC_CFG_ERR)
TX_STAT_INC(txq->axq_qnum, desc_cfg_err);
- if (ds->ds_txstat.ts_flags & ATH9K_TX_DATA_UNDERRUN)
+ if (ts->ts_flags & ATH9K_TX_DATA_UNDERRUN)
TX_STAT_INC(txq->axq_qnum, data_underrun);
- if (ds->ds_txstat.ts_flags & ATH9K_TX_DELIM_UNDERRUN)
+ if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN)
TX_STAT_INC(txq->axq_qnum, delim_underrun);
}
@@ -663,30 +759,29 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
#undef PHY_ERR
}
-void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf)
+void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
{
#define RX_STAT_INC(c) sc->debug.stats.rxstats.c++
#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
- struct ath_desc *ds = bf->bf_desc;
u32 phyerr;
- if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
+ if (rs->rs_status & ATH9K_RXERR_CRC)
RX_STAT_INC(crc_err);
- if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT)
+ if (rs->rs_status & ATH9K_RXERR_DECRYPT)
RX_STAT_INC(decrypt_crc_err);
- if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC)
+ if (rs->rs_status & ATH9K_RXERR_MIC)
RX_STAT_INC(mic_err);
- if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_PRE)
+ if (rs->rs_status & ATH9K_RX_DELIM_CRC_PRE)
RX_STAT_INC(pre_delim_crc_err);
- if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_POST)
+ if (rs->rs_status & ATH9K_RX_DELIM_CRC_POST)
RX_STAT_INC(post_delim_crc_err);
- if (ds->ds_rxstat.rs_status & ATH9K_RX_DECRYPT_BUSY)
+ if (rs->rs_status & ATH9K_RX_DECRYPT_BUSY)
RX_STAT_INC(decrypt_busy_err);
- if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) {
+ if (rs->rs_status & ATH9K_RXERR_PHY) {
RX_STAT_INC(phy_err);
- phyerr = ds->ds_rxstat.rs_phyerr & 0x24;
+ phyerr = rs->rs_phyerr & 0x24;
RX_PHY_ERR_INC(phyerr);
}
@@ -700,6 +795,86 @@ static const struct file_operations fops_recv = {
.owner = THIS_MODULE
};
+static ssize_t read_file_regidx(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = snprintf(buf, sizeof(buf), "0x%08x\n", sc->debug.regidx);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_regidx(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned long regidx;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EINVAL;
+
+ buf[len] = '\0';
+ if (strict_strtoul(buf, 0, &regidx))
+ return -EINVAL;
+
+ sc->debug.regidx = regidx;
+ return count;
+}
+
+static const struct file_operations fops_regidx = {
+ .read = read_file_regidx,
+ .write = write_file_regidx,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
+static ssize_t read_file_regval(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_hw *ah = sc->sc_ah;
+ char buf[32];
+ unsigned int len;
+ u32 regval;
+
+ regval = REG_READ_D(ah, sc->debug.regidx);
+ len = snprintf(buf, sizeof(buf), "0x%08x\n", regval);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_regval(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_hw *ah = sc->sc_ah;
+ unsigned long regval;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EINVAL;
+
+ buf[len] = '\0';
+ if (strict_strtoul(buf, 0, &regval))
+ return -EINVAL;
+
+ REG_WRITE_D(ah, sc->debug.regidx, regval);
+ return count;
+}
+
+static const struct file_operations fops_regval = {
+ .read = read_file_regval,
+ .write = write_file_regval,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
int ath9k_init_debug(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -711,54 +886,55 @@ int ath9k_init_debug(struct ath_hw *ah)
sc->debug.debugfs_phy = debugfs_create_dir(wiphy_name(sc->hw->wiphy),
ath9k_debugfs_root);
if (!sc->debug.debugfs_phy)
- goto err;
+ return -ENOMEM;
#ifdef CONFIG_ATH_DEBUG
- sc->debug.debugfs_debug = debugfs_create_file("debug",
- S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_debug);
- if (!sc->debug.debugfs_debug)
+ if (!debugfs_create_file("debug", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_debug))
goto err;
#endif
- sc->debug.debugfs_dma = debugfs_create_file("dma", S_IRUSR,
- sc->debug.debugfs_phy, sc, &fops_dma);
- if (!sc->debug.debugfs_dma)
+ if (!debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy,
+ sc, &fops_dma))
+ goto err;
+
+ if (!debugfs_create_file("interrupt", S_IRUSR, sc->debug.debugfs_phy,
+ sc, &fops_interrupt))
+ goto err;
+
+ if (!debugfs_create_file("rcstat", S_IRUSR, sc->debug.debugfs_phy,
+ sc, &fops_rcstat))
+ goto err;
+
+ if (!debugfs_create_file("wiphy", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_wiphy))
+ goto err;
+
+ if (!debugfs_create_file("xmit", S_IRUSR, sc->debug.debugfs_phy,
+ sc, &fops_xmit))
goto err;
- sc->debug.debugfs_interrupt = debugfs_create_file("interrupt",
- S_IRUSR,
- sc->debug.debugfs_phy,
- sc, &fops_interrupt);
- if (!sc->debug.debugfs_interrupt)
+ if (!debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy,
+ sc, &fops_recv))
goto err;
- sc->debug.debugfs_rcstat = debugfs_create_file("rcstat",
- S_IRUSR,
- sc->debug.debugfs_phy,
- sc, &fops_rcstat);
- if (!sc->debug.debugfs_rcstat)
+ if (!debugfs_create_file("rx_chainmask", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_rx_chainmask))
goto err;
- sc->debug.debugfs_wiphy = debugfs_create_file(
- "wiphy", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc,
- &fops_wiphy);
- if (!sc->debug.debugfs_wiphy)
+ if (!debugfs_create_file("tx_chainmask", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_tx_chainmask))
goto err;
- sc->debug.debugfs_xmit = debugfs_create_file("xmit",
- S_IRUSR,
- sc->debug.debugfs_phy,
- sc, &fops_xmit);
- if (!sc->debug.debugfs_xmit)
+ if (!debugfs_create_file("regidx", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_regidx))
goto err;
- sc->debug.debugfs_recv = debugfs_create_file("recv",
- S_IRUSR,
- sc->debug.debugfs_phy,
- sc, &fops_recv);
- if (!sc->debug.debugfs_recv)
+ if (!debugfs_create_file("regval", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_regval))
goto err;
+ sc->debug.regidx = 0;
return 0;
err:
ath9k_exit_debug(ah);
@@ -770,14 +946,7 @@ void ath9k_exit_debug(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
struct ath_softc *sc = (struct ath_softc *) common->priv;
- debugfs_remove(sc->debug.debugfs_recv);
- debugfs_remove(sc->debug.debugfs_xmit);
- debugfs_remove(sc->debug.debugfs_wiphy);
- debugfs_remove(sc->debug.debugfs_rcstat);
- debugfs_remove(sc->debug.debugfs_interrupt);
- debugfs_remove(sc->debug.debugfs_dma);
- debugfs_remove(sc->debug.debugfs_debug);
- debugfs_remove(sc->debug.debugfs_phy);
+ debugfs_remove_recursive(sc->debug.debugfs_phy);
}
int ath9k_debug_create_root(void)
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 86780e68b31..5147b8709e1 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -35,6 +35,8 @@ struct ath_buf;
* struct ath_interrupt_stats - Contains statistics about interrupts
* @total: Total no. of interrupts generated so far
* @rxok: RX with no errors
+ * @rxlp: RX with low priority RX
+ * @rxhp: RX with high priority, uapsd only
* @rxeol: RX with no more RXDESC available
* @rxorn: RX FIFO overrun
* @txok: TX completed at the requested rate
@@ -55,6 +57,8 @@ struct ath_buf;
struct ath_interrupt_stats {
u32 total;
u32 rxok;
+ u32 rxlp;
+ u32 rxhp;
u32 rxeol;
u32 rxorn;
u32 txok;
@@ -149,13 +153,7 @@ struct ath_stats {
struct ath9k_debug {
struct dentry *debugfs_phy;
- struct dentry *debugfs_debug;
- struct dentry *debugfs_dma;
- struct dentry *debugfs_interrupt;
- struct dentry *debugfs_rcstat;
- struct dentry *debugfs_wiphy;
- struct dentry *debugfs_xmit;
- struct dentry *debugfs_recv;
+ u32 regidx;
struct ath_stats stats;
};
@@ -167,8 +165,8 @@ void ath9k_debug_remove_root(void);
void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
void ath_debug_stat_rc(struct ath_softc *sc, int final_rate);
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_buf *bf);
-void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf);
+ struct ath_buf *bf, struct ath_tx_status *ts);
+void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs);
void ath_debug_stat_retries(struct ath_softc *sc, int rix,
int xretries, int retries, u8 per);
@@ -204,12 +202,13 @@ static inline void ath_debug_stat_rc(struct ath_softc *sc,
static inline void ath_debug_stat_tx(struct ath_softc *sc,
struct ath_txq *txq,
- struct ath_buf *bf)
+ struct ath_buf *bf,
+ struct ath_tx_status *ts)
{
}
static inline void ath_debug_stat_rx(struct ath_softc *sc,
- struct ath_buf *bf)
+ struct ath_rx_status *rs)
{
}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index dacaae93414..ca8704a9d7a 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -36,8 +36,6 @@ void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask,
if (ah->config.analog_shiftreg)
udelay(100);
-
- return;
}
int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight,
@@ -256,14 +254,13 @@ int ath9k_hw_eeprom_init(struct ath_hw *ah)
{
int status;
- if (AR_SREV_9287(ah)) {
- ah->eep_map = EEP_MAP_AR9287;
- ah->eep_ops = &eep_AR9287_ops;
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ah->eep_ops = &eep_ar9300_ops;
+ else if (AR_SREV_9287(ah)) {
+ ah->eep_ops = &eep_ar9287_ops;
} else if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) {
- ah->eep_map = EEP_MAP_4KBITS;
ah->eep_ops = &eep_4k_ops;
} else {
- ah->eep_map = EEP_MAP_DEFAULT;
ah->eep_ops = &eep_def_ops;
}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 2f2993b50e2..21354c15a9a 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -19,6 +19,7 @@
#include "../ath.h"
#include <net/cfg80211.h>
+#include "ar9003_eeprom.h"
#define AH_USE_EEPROM 0x1
@@ -93,7 +94,6 @@
*/
#define AR9285_RDEXT_DEFAULT 0x1F
-#define AR_EEPROM_MAC(i) (0x1d+(i))
#define ATH9K_POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
#define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM))
@@ -155,6 +155,7 @@
#define AR5416_BCHAN_UNUSED 0xFF
#define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64
#define AR5416_MAX_CHAINS 3
+#define AR9300_MAX_CHAINS 3
#define AR5416_PWR_TABLE_OFFSET_DB -5
/* Rx gain type values */
@@ -249,16 +250,20 @@ enum eeprom_param {
EEP_MINOR_REV,
EEP_TX_MASK,
EEP_RX_MASK,
+ EEP_FSTCLK_5G,
EEP_RXGAIN_TYPE,
- EEP_TXGAIN_TYPE,
EEP_OL_PWRCTRL,
+ EEP_TXGAIN_TYPE,
EEP_RC_CHAIN_MASK,
EEP_DAC_HPWR_5G,
EEP_FRAC_N_5G,
EEP_DEV_TYPE,
EEP_TEMPSENSE_SLOPE,
EEP_TEMPSENSE_SLOPE_PAL_ON,
- EEP_PWR_TABLE_OFFSET
+ EEP_PWR_TABLE_OFFSET,
+ EEP_DRIVE_STRENGTH,
+ EEP_INTERNAL_REGULATOR,
+ EEP_SWREG
};
enum ar5416_rates {
@@ -295,7 +300,8 @@ struct base_eep_header {
u32 binBuildNumber;
u8 deviceType;
u8 pwdclkind;
- u8 futureBase_1[2];
+ u8 fastClk5g;
+ u8 divChain;
u8 rxGainType;
u8 dacHiPwrMode_5G;
u8 openLoopPwrCntl;
@@ -656,13 +662,6 @@ struct ath9k_country_entry {
u8 iso[3];
};
-enum ath9k_eep_map {
- EEP_MAP_DEFAULT = 0x0,
- EEP_MAP_4KBITS,
- EEP_MAP_AR9287,
- EEP_MAP_MAX
-};
-
struct eeprom_ops {
int (*check_eeprom)(struct ath_hw *hw);
u32 (*get_eeprom)(struct ath_hw *hw, enum eeprom_param param);
@@ -713,6 +712,8 @@ int ath9k_hw_eeprom_init(struct ath_hw *ah);
extern const struct eeprom_ops eep_def_ops;
extern const struct eeprom_ops eep_4k_ops;
-extern const struct eeprom_ops eep_AR9287_ops;
+extern const struct eeprom_ops eep_ar9287_ops;
+extern const struct eeprom_ops eep_ar9287_ops;
+extern const struct eeprom_ops eep_ar9300_ops;
#endif /* EEPROM_H */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 68db16690ab..41a77d1bd43 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -15,6 +15,7 @@
*/
#include "hw.h"
+#include "ar9002_phy.h"
static int ath9k_hw_4k_get_eeprom_ver(struct ath_hw *ah)
{
@@ -43,7 +44,7 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) {
ath_print(common, ATH_DBG_EEPROM,
- "Unable to read eeprom region \n");
+ "Unable to read eeprom region\n");
return false;
}
eep_data++;
@@ -182,11 +183,11 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah,
switch (param) {
case EEP_NFTHRESH_2:
return pModal->noiseFloorThreshCh[0];
- case AR_EEPROM_MAC(0):
+ case EEP_MAC_LSW:
return pBase->macAddr[0] << 8 | pBase->macAddr[1];
- case AR_EEPROM_MAC(1):
+ case EEP_MAC_MID:
return pBase->macAddr[2] << 8 | pBase->macAddr[3];
- case AR_EEPROM_MAC(2):
+ case EEP_MAC_MSW:
return pBase->macAddr[4] << 8 | pBase->macAddr[5];
case EEP_REG_0:
return pBase->regDmn[0];
@@ -453,6 +454,8 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
&tMinCalPower, gainBoundaries,
pdadcValues, numXpdGain);
+ ENABLE_REGWRITE_BUFFER(ah);
+
if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah)) {
REG_WRITE(ah, AR_PHY_TPCRG5 + regChainOffset,
SM(pdGainOverlap_t2,
@@ -493,6 +496,9 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
regOffset += 4;
}
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
}
@@ -758,6 +764,8 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
ratesArray[i] -= AR5416_PWR_TABLE_OFFSET_DB * 2;
}
+ ENABLE_REGWRITE_BUFFER(ah);
+
/* OFDM power per rate */
REG_WRITE(ah, AR_PHY_POWER_TX_RATE1,
ATH9K_POW_SM(ratesArray[rate18mb], 24)
@@ -820,6 +828,9 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
| ATH9K_POW_SM(ratesArray[rateDupOfdm], 8)
| ATH9K_POW_SM(ratesArray[rateDupCck], 0));
}
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
static void ath9k_hw_4k_set_addac(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 839d05a1df2..b471db5fb82 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -15,6 +15,7 @@
*/
#include "hw.h"
+#include "ar9002_phy.h"
static int ath9k_hw_AR9287_get_eeprom_ver(struct ath_hw *ah)
{
@@ -44,7 +45,7 @@ static bool ath9k_hw_AR9287_fill_eeprom(struct ath_hw *ah)
if (!ath9k_hw_nvram_read(common,
addr + eep_start_loc, eep_data)) {
ath_print(common, ATH_DBG_EEPROM,
- "Unable to read eeprom region \n");
+ "Unable to read eeprom region\n");
return false;
}
eep_data++;
@@ -172,11 +173,11 @@ static u32 ath9k_hw_AR9287_get_eeprom(struct ath_hw *ah,
switch (param) {
case EEP_NFTHRESH_2:
return pModal->noiseFloorThreshCh[0];
- case AR_EEPROM_MAC(0):
+ case EEP_MAC_LSW:
return pBase->macAddr[0] << 8 | pBase->macAddr[1];
- case AR_EEPROM_MAC(1):
+ case EEP_MAC_MID:
return pBase->macAddr[2] << 8 | pBase->macAddr[3];
- case AR_EEPROM_MAC(2):
+ case EEP_MAC_MSW:
return pBase->macAddr[4] << 8 | pBase->macAddr[5];
case EEP_REG_0:
return pBase->regDmn[0];
@@ -1169,7 +1170,7 @@ static u16 ath9k_hw_AR9287_get_spur_channel(struct ath_hw *ah,
#undef EEP_MAP9287_SPURCHAN
}
-const struct eeprom_ops eep_AR9287_ops = {
+const struct eeprom_ops eep_ar9287_ops = {
.check_eeprom = ath9k_hw_AR9287_check_eeprom,
.get_eeprom = ath9k_hw_AR9287_get_eeprom,
.fill_eeprom = ath9k_hw_AR9287_fill_eeprom,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 404a0341242..7e1ed78d0e6 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -15,6 +15,7 @@
*/
#include "hw.h"
+#include "ar9002_phy.h"
static void ath9k_get_txgain_index(struct ath_hw *ah,
struct ath9k_channel *chan,
@@ -49,7 +50,6 @@ static void ath9k_get_txgain_index(struct ath_hw *ah,
i++;
*pcdacIdx = i;
- return;
}
static void ath9k_olc_get_pdadcs(struct ath_hw *ah,
@@ -222,6 +222,12 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
return -EINVAL;
}
+ /* Enable fixup for AR_AN_TOP2 if necessary */
+ if (AR_SREV_9280_10_OR_LATER(ah) &&
+ (eep->baseEepHeader.version & 0xff) > 0x0a &&
+ eep->baseEepHeader.pwdclkind == 0)
+ ah->need_an_top2_fixup = 1;
+
return 0;
}
@@ -237,11 +243,11 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
return pModal[0].noiseFloorThreshCh[0];
case EEP_NFTHRESH_2:
return pModal[1].noiseFloorThreshCh[0];
- case AR_EEPROM_MAC(0):
+ case EEP_MAC_LSW:
return pBase->macAddr[0] << 8 | pBase->macAddr[1];
- case AR_EEPROM_MAC(1):
+ case EEP_MAC_MID:
return pBase->macAddr[2] << 8 | pBase->macAddr[3];
- case AR_EEPROM_MAC(2):
+ case EEP_MAC_MSW:
return pBase->macAddr[4] << 8 | pBase->macAddr[5];
case EEP_REG_0:
return pBase->regDmn[0];
@@ -267,6 +273,8 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
return pBase->txMask;
case EEP_RX_MASK:
return pBase->rxMask;
+ case EEP_FSTCLK_5G:
+ return pBase->fastClk5g;
case EEP_RXGAIN_TYPE:
return pBase->rxGainType;
case EEP_TXGAIN_TYPE:
@@ -742,8 +750,6 @@ static void ath9k_hw_get_def_gain_boundaries_pdadcs(struct ath_hw *ah,
pPDADCValues[k] = pPDADCValues[k - 1];
k++;
}
-
- return;
}
static int16_t ath9k_change_gain_boundary_setting(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index deab8beb068..0ee75e79fe3 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -283,22 +283,17 @@ static void ath9k_gen_timer_start(struct ath_hw *ah,
u32 timer_next,
u32 timer_period)
{
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
-
ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
- if ((sc->imask & ATH9K_INT_GENTIMER) == 0) {
+ if ((ah->imask & ATH9K_INT_GENTIMER) == 0) {
ath9k_hw_set_interrupts(ah, 0);
- sc->imask |= ATH9K_INT_GENTIMER;
- ath9k_hw_set_interrupts(ah, sc->imask);
+ ah->imask |= ATH9K_INT_GENTIMER;
+ ath9k_hw_set_interrupts(ah, ah->imask);
}
}
static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
{
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
ath9k_hw_gen_timer_stop(ah, timer);
@@ -306,8 +301,8 @@ static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
/* if no timer is enabled, turn off interrupt mask */
if (timer_table->timer_mask.val == 0) {
ath9k_hw_set_interrupts(ah, 0);
- sc->imask &= ~ATH9K_INT_GENTIMER;
- ath9k_hw_set_interrupts(ah, sc->imask);
+ ah->imask &= ~ATH9K_INT_GENTIMER;
+ ath9k_hw_set_interrupts(ah, ah->imask);
}
}
@@ -364,7 +359,7 @@ static void ath_btcoex_no_stomp_timer(void *arg)
bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "no stomp timer running \n");
+ "no stomp timer running\n");
spin_lock_bh(&btcoex->btcoex_lock);
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
new file mode 100644
index 00000000000..77b359162d6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -0,0 +1,1014 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+#define ATH9K_FW_USB_DEV(devid, fw) \
+ { USB_DEVICE(0x0cf3, devid), .driver_info = (unsigned long) fw }
+
+static struct usb_device_id ath9k_hif_usb_ids[] = {
+ ATH9K_FW_USB_DEV(0x9271, "ar9271.fw"),
+ ATH9K_FW_USB_DEV(0x1006, "ar9271.fw"),
+ { },
+};
+
+MODULE_DEVICE_TABLE(usb, ath9k_hif_usb_ids);
+
+static int __hif_usb_tx(struct hif_device_usb *hif_dev);
+
+static void hif_usb_regout_cb(struct urb *urb)
+{
+ struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ goto free;
+ default:
+ break;
+ }
+
+ if (cmd) {
+ ath9k_htc_txcompletion_cb(cmd->hif_dev->htc_handle,
+ cmd->skb, 1);
+ kfree(cmd);
+ }
+
+ return;
+free:
+ kfree_skb(cmd->skb);
+ kfree(cmd);
+}
+
+static int hif_usb_send_regout(struct hif_device_usb *hif_dev,
+ struct sk_buff *skb)
+{
+ struct urb *urb;
+ struct cmd_buf *cmd;
+ int ret = 0;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (urb == NULL)
+ return -ENOMEM;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL) {
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ cmd->skb = skb;
+ cmd->hif_dev = hif_dev;
+
+ usb_fill_int_urb(urb, hif_dev->udev,
+ usb_sndintpipe(hif_dev->udev, USB_REG_OUT_PIPE),
+ skb->data, skb->len,
+ hif_usb_regout_cb, cmd, 1);
+
+ usb_anchor_urb(urb, &hif_dev->regout_submitted);
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ usb_unanchor_urb(urb);
+ kfree(cmd);
+ }
+ usb_free_urb(urb);
+
+ return ret;
+}
+
+static inline void ath9k_skb_queue_purge(struct hif_device_usb *hif_dev,
+ struct sk_buff_head *list)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(list)) != NULL) {
+ dev_kfree_skb_any(skb);
+ TX_STAT_INC(skb_dropped);
+ }
+}
+
+static void hif_usb_tx_cb(struct urb *urb)
+{
+ struct tx_buf *tx_buf = (struct tx_buf *) urb->context;
+ struct hif_device_usb *hif_dev;
+ struct sk_buff *skb;
+
+ if (!tx_buf || !tx_buf->hif_dev)
+ return;
+
+ hif_dev = tx_buf->hif_dev;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ /*
+ * The URB has been killed, free the SKBs
+ * and return.
+ */
+ ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
+ return;
+ default:
+ break;
+ }
+
+ /* Check if TX has been stopped */
+ spin_lock(&hif_dev->tx.tx_lock);
+ if (hif_dev->tx.flags & HIF_USB_TX_STOP) {
+ spin_unlock(&hif_dev->tx.tx_lock);
+ ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
+ goto add_free;
+ }
+ spin_unlock(&hif_dev->tx.tx_lock);
+
+ /* Complete the queued SKBs. */
+ while ((skb = __skb_dequeue(&tx_buf->skb_queue)) != NULL) {
+ ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
+ skb, 1);
+ TX_STAT_INC(skb_completed);
+ }
+
+add_free:
+ /* Re-initialize the SKB queue */
+ tx_buf->len = tx_buf->offset = 0;
+ __skb_queue_head_init(&tx_buf->skb_queue);
+
+ /* Add this TX buffer to the free list */
+ spin_lock(&hif_dev->tx.tx_lock);
+ list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
+ hif_dev->tx.tx_buf_cnt++;
+ if (!(hif_dev->tx.flags & HIF_USB_TX_STOP))
+ __hif_usb_tx(hif_dev); /* Check for pending SKBs */
+ TX_STAT_INC(buf_completed);
+ spin_unlock(&hif_dev->tx.tx_lock);
+}
+
+/* TX lock has to be taken */
+static int __hif_usb_tx(struct hif_device_usb *hif_dev)
+{
+ struct tx_buf *tx_buf = NULL;
+ struct sk_buff *nskb = NULL;
+ int ret = 0, i;
+ u16 *hdr, tx_skb_cnt = 0;
+ u8 *buf;
+
+ if (hif_dev->tx.tx_skb_cnt == 0)
+ return 0;
+
+ /* Check if a free TX buffer is available */
+ if (list_empty(&hif_dev->tx.tx_buf))
+ return 0;
+
+ tx_buf = list_first_entry(&hif_dev->tx.tx_buf, struct tx_buf, list);
+ list_move_tail(&tx_buf->list, &hif_dev->tx.tx_pending);
+ hif_dev->tx.tx_buf_cnt--;
+
+ tx_skb_cnt = min_t(u16, hif_dev->tx.tx_skb_cnt, MAX_TX_AGGR_NUM);
+
+ for (i = 0; i < tx_skb_cnt; i++) {
+ nskb = __skb_dequeue(&hif_dev->tx.tx_skb_queue);
+
+ /* Should never be NULL */
+ BUG_ON(!nskb);
+
+ hif_dev->tx.tx_skb_cnt--;
+
+ buf = tx_buf->buf;
+ buf += tx_buf->offset;
+ hdr = (u16 *)buf;
+ *hdr++ = nskb->len;
+ *hdr++ = ATH_USB_TX_STREAM_MODE_TAG;
+ buf += 4;
+ memcpy(buf, nskb->data, nskb->len);
+ tx_buf->len = nskb->len + 4;
+
+ if (i < (tx_skb_cnt - 1))
+ tx_buf->offset += (((tx_buf->len - 1) / 4) + 1) * 4;
+
+ if (i == (tx_skb_cnt - 1))
+ tx_buf->len += tx_buf->offset;
+
+ __skb_queue_tail(&tx_buf->skb_queue, nskb);
+ TX_STAT_INC(skb_queued);
+ }
+
+ usb_fill_bulk_urb(tx_buf->urb, hif_dev->udev,
+ usb_sndbulkpipe(hif_dev->udev, USB_WLAN_TX_PIPE),
+ tx_buf->buf, tx_buf->len,
+ hif_usb_tx_cb, tx_buf);
+
+ ret = usb_submit_urb(tx_buf->urb, GFP_ATOMIC);
+ if (ret) {
+ tx_buf->len = tx_buf->offset = 0;
+ ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
+ __skb_queue_head_init(&tx_buf->skb_queue);
+ list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
+ hif_dev->tx.tx_buf_cnt++;
+ }
+
+ if (!ret)
+ TX_STAT_INC(buf_queued);
+
+ return ret;
+}
+
+static int hif_usb_send_tx(struct hif_device_usb *hif_dev, struct sk_buff *skb,
+ struct ath9k_htc_tx_ctl *tx_ctl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+
+ if (hif_dev->tx.flags & HIF_USB_TX_STOP) {
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+ return -ENODEV;
+ }
+
+ /* Check if the max queue count has been reached */
+ if (hif_dev->tx.tx_skb_cnt > MAX_TX_BUF_NUM) {
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+ return -ENOMEM;
+ }
+
+ __skb_queue_tail(&hif_dev->tx.tx_skb_queue, skb);
+ hif_dev->tx.tx_skb_cnt++;
+
+ /* Send normal frames immediately */
+ if (!tx_ctl || (tx_ctl && (tx_ctl->type == ATH9K_HTC_NORMAL)))
+ __hif_usb_tx(hif_dev);
+
+ /* Check if AMPDUs have to be sent immediately */
+ if (tx_ctl && (tx_ctl->type == ATH9K_HTC_AMPDU) &&
+ (hif_dev->tx.tx_buf_cnt == MAX_TX_URB_NUM) &&
+ (hif_dev->tx.tx_skb_cnt < 2)) {
+ __hif_usb_tx(hif_dev);
+ }
+
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+
+ return 0;
+}
+
+static void hif_usb_start(void *hif_handle, u8 pipe_id)
+{
+ struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
+ unsigned long flags;
+
+ hif_dev->flags |= HIF_USB_START;
+
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ hif_dev->tx.flags &= ~HIF_USB_TX_STOP;
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+}
+
+static void hif_usb_stop(void *hif_handle, u8 pipe_id)
+{
+ struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ ath9k_skb_queue_purge(hif_dev, &hif_dev->tx.tx_skb_queue);
+ hif_dev->tx.tx_skb_cnt = 0;
+ hif_dev->tx.flags |= HIF_USB_TX_STOP;
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+}
+
+static int hif_usb_send(void *hif_handle, u8 pipe_id, struct sk_buff *skb,
+ struct ath9k_htc_tx_ctl *tx_ctl)
+{
+ struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
+ int ret = 0;
+
+ switch (pipe_id) {
+ case USB_WLAN_TX_PIPE:
+ ret = hif_usb_send_tx(hif_dev, skb, tx_ctl);
+ break;
+ case USB_REG_OUT_PIPE:
+ ret = hif_usb_send_regout(hif_dev, skb);
+ break;
+ default:
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: Invalid TX pipe: %d\n", pipe_id);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static struct ath9k_htc_hif hif_usb = {
+ .transport = ATH9K_HIF_USB,
+ .name = "ath9k_hif_usb",
+
+ .control_ul_pipe = USB_REG_OUT_PIPE,
+ .control_dl_pipe = USB_REG_IN_PIPE,
+
+ .start = hif_usb_start,
+ .stop = hif_usb_stop,
+ .send = hif_usb_send,
+};
+
+static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
+ struct sk_buff *skb)
+{
+ struct sk_buff *nskb, *skb_pool[MAX_PKT_NUM_IN_TRANSFER];
+ int index = 0, i = 0, chk_idx, len = skb->len;
+ int rx_remain_len = 0, rx_pkt_len = 0;
+ u16 pkt_len, pkt_tag, pool_index = 0;
+ u8 *ptr;
+
+ spin_lock(&hif_dev->rx_lock);
+
+ rx_remain_len = hif_dev->rx_remain_len;
+ rx_pkt_len = hif_dev->rx_transfer_len;
+
+ if (rx_remain_len != 0) {
+ struct sk_buff *remain_skb = hif_dev->remain_skb;
+
+ if (remain_skb) {
+ ptr = (u8 *) remain_skb->data;
+
+ index = rx_remain_len;
+ rx_remain_len -= hif_dev->rx_pad_len;
+ ptr += rx_pkt_len;
+
+ memcpy(ptr, skb->data, rx_remain_len);
+
+ rx_pkt_len += rx_remain_len;
+ hif_dev->rx_remain_len = 0;
+ skb_put(remain_skb, rx_pkt_len);
+
+ skb_pool[pool_index++] = remain_skb;
+
+ } else {
+ index = rx_remain_len;
+ }
+ }
+
+ spin_unlock(&hif_dev->rx_lock);
+
+ while (index < len) {
+ ptr = (u8 *) skb->data;
+
+ pkt_len = ptr[index] + (ptr[index+1] << 8);
+ pkt_tag = ptr[index+2] + (ptr[index+3] << 8);
+
+ if (pkt_tag == ATH_USB_RX_STREAM_MODE_TAG) {
+ u16 pad_len;
+
+ pad_len = 4 - (pkt_len & 0x3);
+ if (pad_len == 4)
+ pad_len = 0;
+
+ chk_idx = index;
+ index = index + 4 + pkt_len + pad_len;
+
+ if (index > MAX_RX_BUF_SIZE) {
+ spin_lock(&hif_dev->rx_lock);
+ hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE;
+ hif_dev->rx_transfer_len =
+ MAX_RX_BUF_SIZE - chk_idx - 4;
+ hif_dev->rx_pad_len = pad_len;
+
+ nskb = __dev_alloc_skb(pkt_len + 32,
+ GFP_ATOMIC);
+ if (!nskb) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: RX memory allocation"
+ " error\n");
+ spin_unlock(&hif_dev->rx_lock);
+ goto err;
+ }
+ skb_reserve(nskb, 32);
+ RX_STAT_INC(skb_allocated);
+
+ memcpy(nskb->data, &(skb->data[chk_idx+4]),
+ hif_dev->rx_transfer_len);
+
+ /* Record the buffer pointer */
+ hif_dev->remain_skb = nskb;
+ spin_unlock(&hif_dev->rx_lock);
+ } else {
+ nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
+ if (!nskb) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: RX memory allocation"
+ " error\n");
+ goto err;
+ }
+ skb_reserve(nskb, 32);
+ RX_STAT_INC(skb_allocated);
+
+ memcpy(nskb->data, &(skb->data[chk_idx+4]), pkt_len);
+ skb_put(nskb, pkt_len);
+ skb_pool[pool_index++] = nskb;
+ }
+ } else {
+ RX_STAT_INC(skb_dropped);
+ return;
+ }
+ }
+
+err:
+ for (i = 0; i < pool_index; i++) {
+ ath9k_htc_rx_msg(hif_dev->htc_handle, skb_pool[i],
+ skb_pool[i]->len, USB_WLAN_RX_PIPE);
+ RX_STAT_INC(skb_completed);
+ }
+}
+
+static void ath9k_hif_usb_rx_cb(struct urb *urb)
+{
+ struct sk_buff *skb = (struct sk_buff *) urb->context;
+ struct hif_device_usb *hif_dev = (struct hif_device_usb *)
+ usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
+ int ret;
+
+ if (!skb)
+ return;
+
+ if (!hif_dev)
+ goto free;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ goto free;
+ default:
+ goto resubmit;
+ }
+
+ if (likely(urb->actual_length != 0)) {
+ skb_put(skb, urb->actual_length);
+ ath9k_hif_usb_rx_stream(hif_dev, skb);
+ }
+
+resubmit:
+ skb_reset_tail_pointer(skb);
+ skb_trim(skb, 0);
+
+ usb_anchor_urb(urb, &hif_dev->rx_submitted);
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ usb_unanchor_urb(urb);
+ goto free;
+ }
+
+ return;
+free:
+ kfree_skb(skb);
+}
+
+static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
+{
+ struct sk_buff *skb = (struct sk_buff *) urb->context;
+ struct sk_buff *nskb;
+ struct hif_device_usb *hif_dev = (struct hif_device_usb *)
+ usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
+ int ret;
+
+ if (!skb)
+ return;
+
+ if (!hif_dev)
+ goto free;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ goto free;
+ default:
+ goto resubmit;
+ }
+
+ if (likely(urb->actual_length != 0)) {
+ skb_put(skb, urb->actual_length);
+
+ /* Process the command first */
+ ath9k_htc_rx_msg(hif_dev->htc_handle, skb,
+ skb->len, USB_REG_IN_PIPE);
+
+
+ nskb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC);
+ if (!nskb) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: REG_IN memory allocation failure\n");
+ urb->context = NULL;
+ return;
+ }
+
+ usb_fill_int_urb(urb, hif_dev->udev,
+ usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE),
+ nskb->data, MAX_REG_IN_BUF_SIZE,
+ ath9k_hif_usb_reg_in_cb, nskb, 1);
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ kfree_skb(nskb);
+ urb->context = NULL;
+ }
+
+ return;
+ }
+
+resubmit:
+ skb_reset_tail_pointer(skb);
+ skb_trim(skb, 0);
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret)
+ goto free;
+
+ return;
+free:
+ kfree_skb(skb);
+ urb->context = NULL;
+}
+
+static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
+{
+ struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
+
+ list_for_each_entry_safe(tx_buf, tx_buf_tmp,
+ &hif_dev->tx.tx_buf, list) {
+ usb_kill_urb(tx_buf->urb);
+ list_del(&tx_buf->list);
+ usb_free_urb(tx_buf->urb);
+ kfree(tx_buf->buf);
+ kfree(tx_buf);
+ }
+
+ list_for_each_entry_safe(tx_buf, tx_buf_tmp,
+ &hif_dev->tx.tx_pending, list) {
+ usb_kill_urb(tx_buf->urb);
+ list_del(&tx_buf->list);
+ usb_free_urb(tx_buf->urb);
+ kfree(tx_buf->buf);
+ kfree(tx_buf);
+ }
+}
+
+static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev)
+{
+ struct tx_buf *tx_buf;
+ int i;
+
+ INIT_LIST_HEAD(&hif_dev->tx.tx_buf);
+ INIT_LIST_HEAD(&hif_dev->tx.tx_pending);
+ spin_lock_init(&hif_dev->tx.tx_lock);
+ __skb_queue_head_init(&hif_dev->tx.tx_skb_queue);
+
+ for (i = 0; i < MAX_TX_URB_NUM; i++) {
+ tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL);
+ if (!tx_buf)
+ goto err;
+
+ tx_buf->buf = kzalloc(MAX_TX_BUF_SIZE, GFP_KERNEL);
+ if (!tx_buf->buf)
+ goto err;
+
+ tx_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!tx_buf->urb)
+ goto err;
+
+ tx_buf->hif_dev = hif_dev;
+ __skb_queue_head_init(&tx_buf->skb_queue);
+
+ list_add_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
+ }
+
+ hif_dev->tx.tx_buf_cnt = MAX_TX_URB_NUM;
+
+ return 0;
+err:
+ if (tx_buf) {
+ kfree(tx_buf->buf);
+ kfree(tx_buf);
+ }
+ ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
+ return -ENOMEM;
+}
+
+static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev)
+{
+ usb_kill_anchored_urbs(&hif_dev->rx_submitted);
+}
+
+static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
+{
+ struct urb *urb = NULL;
+ struct sk_buff *skb = NULL;
+ int i, ret;
+
+ init_usb_anchor(&hif_dev->rx_submitted);
+ spin_lock_init(&hif_dev->rx_lock);
+
+ for (i = 0; i < MAX_RX_URB_NUM; i++) {
+
+ /* Allocate URB */
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (urb == NULL) {
+ ret = -ENOMEM;
+ goto err_urb;
+ }
+
+ /* Allocate buffer */
+ skb = alloc_skb(MAX_RX_BUF_SIZE, GFP_KERNEL);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto err_skb;
+ }
+
+ usb_fill_bulk_urb(urb, hif_dev->udev,
+ usb_rcvbulkpipe(hif_dev->udev,
+ USB_WLAN_RX_PIPE),
+ skb->data, MAX_RX_BUF_SIZE,
+ ath9k_hif_usb_rx_cb, skb);
+
+ /* Anchor URB */
+ usb_anchor_urb(urb, &hif_dev->rx_submitted);
+
+ /* Submit URB */
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ usb_unanchor_urb(urb);
+ goto err_submit;
+ }
+
+ /*
+ * Drop reference count.
+ * This ensures that the URB is freed when killing them.
+ */
+ usb_free_urb(urb);
+ }
+
+ return 0;
+
+err_submit:
+ kfree_skb(skb);
+err_skb:
+ usb_free_urb(urb);
+err_urb:
+ ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
+ return ret;
+}
+
+static void ath9k_hif_usb_dealloc_reg_in_urb(struct hif_device_usb *hif_dev)
+{
+ if (hif_dev->reg_in_urb) {
+ usb_kill_urb(hif_dev->reg_in_urb);
+ if (hif_dev->reg_in_urb->context)
+ kfree_skb((void *)hif_dev->reg_in_urb->context);
+ usb_free_urb(hif_dev->reg_in_urb);
+ hif_dev->reg_in_urb = NULL;
+ }
+}
+
+static int ath9k_hif_usb_alloc_reg_in_urb(struct hif_device_usb *hif_dev)
+{
+ struct sk_buff *skb;
+
+ hif_dev->reg_in_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (hif_dev->reg_in_urb == NULL)
+ return -ENOMEM;
+
+ skb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_KERNEL);
+ if (!skb)
+ goto err;
+
+ usb_fill_int_urb(hif_dev->reg_in_urb, hif_dev->udev,
+ usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE),
+ skb->data, MAX_REG_IN_BUF_SIZE,
+ ath9k_hif_usb_reg_in_cb, skb, 1);
+
+ if (usb_submit_urb(hif_dev->reg_in_urb, GFP_KERNEL) != 0)
+ goto err;
+
+ return 0;
+
+err:
+ ath9k_hif_usb_dealloc_reg_in_urb(hif_dev);
+ return -ENOMEM;
+}
+
+static int ath9k_hif_usb_alloc_urbs(struct hif_device_usb *hif_dev)
+{
+ /* Register Write */
+ init_usb_anchor(&hif_dev->regout_submitted);
+
+ /* TX */
+ if (ath9k_hif_usb_alloc_tx_urbs(hif_dev) < 0)
+ goto err;
+
+ /* RX */
+ if (ath9k_hif_usb_alloc_rx_urbs(hif_dev) < 0)
+ goto err;
+
+ /* Register Read */
+ if (ath9k_hif_usb_alloc_reg_in_urb(hif_dev) < 0)
+ goto err;
+
+ return 0;
+err:
+ return -ENOMEM;
+}
+
+static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
+{
+ usb_kill_anchored_urbs(&hif_dev->regout_submitted);
+ ath9k_hif_usb_dealloc_reg_in_urb(hif_dev);
+ ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
+ ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
+}
+
+static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
+{
+ int transfer, err;
+ const void *data = hif_dev->firmware->data;
+ size_t len = hif_dev->firmware->size;
+ u32 addr = AR9271_FIRMWARE;
+ u8 *buf = kzalloc(4096, GFP_KERNEL);
+
+ if (!buf)
+ return -ENOMEM;
+
+ while (len) {
+ transfer = min_t(int, len, 4096);
+ memcpy(buf, data, transfer);
+
+ err = usb_control_msg(hif_dev->udev,
+ usb_sndctrlpipe(hif_dev->udev, 0),
+ FIRMWARE_DOWNLOAD, 0x40 | USB_DIR_OUT,
+ addr >> 8, 0, buf, transfer, HZ);
+ if (err < 0) {
+ kfree(buf);
+ return err;
+ }
+
+ len -= transfer;
+ data += transfer;
+ addr += transfer;
+ }
+ kfree(buf);
+
+ /*
+ * Issue FW download complete command to firmware.
+ */
+ err = usb_control_msg(hif_dev->udev, usb_sndctrlpipe(hif_dev->udev, 0),
+ FIRMWARE_DOWNLOAD_COMP,
+ 0x40 | USB_DIR_OUT,
+ AR9271_FIRMWARE_TEXT >> 8, 0, NULL, 0, HZ);
+ if (err)
+ return -EIO;
+
+ dev_info(&hif_dev->udev->dev, "ath9k_htc: Transferred FW: %s, size: %ld\n",
+ "ar9271.fw", (unsigned long) hif_dev->firmware->size);
+
+ return 0;
+}
+
+static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev,
+ const char *fw_name)
+{
+ int ret;
+
+ /* Request firmware */
+ ret = request_firmware(&hif_dev->firmware, fw_name, &hif_dev->udev->dev);
+ if (ret) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: Firmware - %s not found\n", fw_name);
+ goto err_fw_req;
+ }
+
+ /* Alloc URBs */
+ ret = ath9k_hif_usb_alloc_urbs(hif_dev);
+ if (ret) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: Unable to allocate URBs\n");
+ goto err_urb;
+ }
+
+ /* Download firmware */
+ ret = ath9k_hif_usb_download_fw(hif_dev);
+ if (ret) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: Firmware - %s download failed\n", fw_name);
+ goto err_fw_download;
+ }
+
+ return 0;
+
+err_fw_download:
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
+err_urb:
+ release_firmware(hif_dev->firmware);
+err_fw_req:
+ hif_dev->firmware = NULL;
+ return ret;
+}
+
+static void ath9k_hif_usb_dev_deinit(struct hif_device_usb *hif_dev)
+{
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
+ if (hif_dev->firmware)
+ release_firmware(hif_dev->firmware);
+}
+
+static int ath9k_hif_usb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(interface);
+ struct hif_device_usb *hif_dev;
+ const char *fw_name = (const char *) id->driver_info;
+ int ret = 0;
+
+ hif_dev = kzalloc(sizeof(struct hif_device_usb), GFP_KERNEL);
+ if (!hif_dev) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ usb_get_dev(udev);
+ hif_dev->udev = udev;
+ hif_dev->interface = interface;
+ hif_dev->device_id = id->idProduct;
+#ifdef CONFIG_PM
+ udev->reset_resume = 1;
+#endif
+ usb_set_intfdata(interface, hif_dev);
+
+ hif_dev->htc_handle = ath9k_htc_hw_alloc(hif_dev, &hif_usb,
+ &hif_dev->udev->dev);
+ if (hif_dev->htc_handle == NULL) {
+ ret = -ENOMEM;
+ goto err_htc_hw_alloc;
+ }
+
+ ret = ath9k_hif_usb_dev_init(hif_dev, fw_name);
+ if (ret) {
+ ret = -EINVAL;
+ goto err_hif_init_usb;
+ }
+
+ ret = ath9k_htc_hw_init(hif_dev->htc_handle,
+ &hif_dev->udev->dev, hif_dev->device_id);
+ if (ret) {
+ ret = -EINVAL;
+ goto err_htc_hw_init;
+ }
+
+ dev_info(&hif_dev->udev->dev, "ath9k_htc: USB layer initialized\n");
+
+ return 0;
+
+err_htc_hw_init:
+ ath9k_hif_usb_dev_deinit(hif_dev);
+err_hif_init_usb:
+ ath9k_htc_hw_free(hif_dev->htc_handle);
+err_htc_hw_alloc:
+ usb_set_intfdata(interface, NULL);
+ kfree(hif_dev);
+ usb_put_dev(udev);
+err_alloc:
+ return ret;
+}
+
+static void ath9k_hif_usb_reboot(struct usb_device *udev)
+{
+ u32 reboot_cmd = 0xffffffff;
+ void *buf;
+ int ret;
+
+ buf = kmalloc(4, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ memcpy(buf, &reboot_cmd, 4);
+
+ ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, USB_REG_OUT_PIPE),
+ buf, 4, NULL, HZ);
+ if (ret)
+ dev_err(&udev->dev, "ath9k_htc: USB reboot failed\n");
+
+ kfree(buf);
+}
+
+static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
+{
+ struct usb_device *udev = interface_to_usbdev(interface);
+ struct hif_device_usb *hif_dev =
+ (struct hif_device_usb *) usb_get_intfdata(interface);
+
+ if (hif_dev) {
+ ath9k_htc_hw_deinit(hif_dev->htc_handle,
+ (udev->state == USB_STATE_NOTATTACHED) ? true : false);
+ ath9k_htc_hw_free(hif_dev->htc_handle);
+ ath9k_hif_usb_dev_deinit(hif_dev);
+ usb_set_intfdata(interface, NULL);
+ }
+
+ if (hif_dev->flags & HIF_USB_START)
+ ath9k_hif_usb_reboot(udev);
+
+ kfree(hif_dev);
+ dev_info(&udev->dev, "ath9k_htc: USB layer deinitialized\n");
+ usb_put_dev(udev);
+}
+
+#ifdef CONFIG_PM
+static int ath9k_hif_usb_suspend(struct usb_interface *interface,
+ pm_message_t message)
+{
+ struct hif_device_usb *hif_dev =
+ (struct hif_device_usb *) usb_get_intfdata(interface);
+
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
+
+ return 0;
+}
+
+static int ath9k_hif_usb_resume(struct usb_interface *interface)
+{
+ struct hif_device_usb *hif_dev =
+ (struct hif_device_usb *) usb_get_intfdata(interface);
+ int ret;
+
+ ret = ath9k_hif_usb_alloc_urbs(hif_dev);
+ if (ret)
+ return ret;
+
+ if (hif_dev->firmware) {
+ ret = ath9k_hif_usb_download_fw(hif_dev);
+ if (ret)
+ goto fail_resume;
+ } else {
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
+ return -EIO;
+ }
+
+ mdelay(100);
+
+ ret = ath9k_htc_resume(hif_dev->htc_handle);
+
+ if (ret)
+ goto fail_resume;
+
+ return 0;
+
+fail_resume:
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
+
+ return ret;
+}
+#endif
+
+static struct usb_driver ath9k_hif_usb_driver = {
+ .name = "ath9k_hif_usb",
+ .probe = ath9k_hif_usb_probe,
+ .disconnect = ath9k_hif_usb_disconnect,
+#ifdef CONFIG_PM
+ .suspend = ath9k_hif_usb_suspend,
+ .resume = ath9k_hif_usb_resume,
+ .reset_resume = ath9k_hif_usb_resume,
+#endif
+ .id_table = ath9k_hif_usb_ids,
+ .soft_unbind = 1,
+};
+
+int ath9k_hif_usb_init(void)
+{
+ return usb_register(&ath9k_hif_usb_driver);
+}
+
+void ath9k_hif_usb_exit(void)
+{
+ usb_deregister(&ath9k_hif_usb_driver);
+}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h
new file mode 100644
index 00000000000..0aca49b6fcb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HTC_USB_H
+#define HTC_USB_H
+
+#define AR9271_FIRMWARE 0x501000
+#define AR9271_FIRMWARE_TEXT 0x903000
+
+#define FIRMWARE_DOWNLOAD 0x30
+#define FIRMWARE_DOWNLOAD_COMP 0x31
+
+#define ATH_USB_RX_STREAM_MODE_TAG 0x4e00
+#define ATH_USB_TX_STREAM_MODE_TAG 0x697e
+
+/* FIXME: Verify these numbers (with Windows) */
+#define MAX_TX_URB_NUM 8
+#define MAX_TX_BUF_NUM 1024
+#define MAX_TX_BUF_SIZE 32768
+#define MAX_TX_AGGR_NUM 20
+
+#define MAX_RX_URB_NUM 8
+#define MAX_RX_BUF_SIZE 16384
+#define MAX_PKT_NUM_IN_TRANSFER 10
+
+#define MAX_REG_OUT_URB_NUM 1
+#define MAX_REG_OUT_BUF_NUM 8
+
+#define MAX_REG_IN_BUF_SIZE 64
+
+/* USB Endpoint definition */
+#define USB_WLAN_TX_PIPE 1
+#define USB_WLAN_RX_PIPE 2
+#define USB_REG_IN_PIPE 3
+#define USB_REG_OUT_PIPE 4
+
+#define HIF_USB_MAX_RXPIPES 2
+#define HIF_USB_MAX_TXPIPES 4
+
+struct tx_buf {
+ u8 *buf;
+ u16 len;
+ u16 offset;
+ struct urb *urb;
+ struct sk_buff_head skb_queue;
+ struct hif_device_usb *hif_dev;
+ struct list_head list;
+};
+
+#define HIF_USB_TX_STOP BIT(0)
+
+struct hif_usb_tx {
+ u8 flags;
+ u8 tx_buf_cnt;
+ u16 tx_skb_cnt;
+ struct sk_buff_head tx_skb_queue;
+ struct list_head tx_buf;
+ struct list_head tx_pending;
+ spinlock_t tx_lock;
+};
+
+struct cmd_buf {
+ struct sk_buff *skb;
+ struct hif_device_usb *hif_dev;
+};
+
+#define HIF_USB_START BIT(0)
+
+struct hif_device_usb {
+ u16 device_id;
+ struct usb_device *udev;
+ struct usb_interface *interface;
+ const struct firmware *firmware;
+ struct htc_target *htc_handle;
+ struct hif_usb_tx tx;
+ struct urb *reg_in_urb;
+ struct usb_anchor regout_submitted;
+ struct usb_anchor rx_submitted;
+ struct sk_buff *remain_skb;
+ int rx_remain_len;
+ int rx_pkt_len;
+ int rx_transfer_len;
+ int rx_pad_len;
+ spinlock_t rx_lock;
+ u8 flags; /* HIF_USB_* */
+};
+
+int ath9k_hif_usb_init(void);
+void ath9k_hif_usb_exit(void);
+
+#endif /* HTC_USB_H */
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
new file mode 100644
index 00000000000..c251603ab03
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -0,0 +1,465 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HTC_H
+#define HTC_H
+
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/firmware.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/leds.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include "common.h"
+#include "htc_hst.h"
+#include "hif_usb.h"
+#include "wmi.h"
+
+#define ATH_STA_SHORT_CALINTERVAL 1000 /* 1 second */
+#define ATH_ANI_POLLINTERVAL 100 /* 100 ms */
+#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
+#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
+
+#define ATH_DEFAULT_BMISS_LIMIT 10
+#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
+#define TSF_TO_TU(_h, _l) \
+ ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
+
+extern struct ieee80211_ops ath9k_htc_ops;
+extern int htc_modparam_nohwcrypt;
+
+enum htc_phymode {
+ HTC_MODE_AUTO = 0,
+ HTC_MODE_11A = 1,
+ HTC_MODE_11B = 2,
+ HTC_MODE_11G = 3,
+ HTC_MODE_FH = 4,
+ HTC_MODE_TURBO_A = 5,
+ HTC_MODE_TURBO_G = 6,
+ HTC_MODE_11NA = 7,
+ HTC_MODE_11NG = 8
+};
+
+enum htc_opmode {
+ HTC_M_STA = 1,
+ HTC_M_IBSS = 0,
+ HTC_M_AHDEMO = 3,
+ HTC_M_HOSTAP = 6,
+ HTC_M_MONITOR = 8,
+ HTC_M_WDS = 2
+};
+
+#define ATH9K_HTC_HDRSPACE sizeof(struct htc_frame_hdr)
+#define ATH9K_HTC_AMPDU 1
+#define ATH9K_HTC_NORMAL 2
+
+#define ATH9K_HTC_TX_CTSONLY 0x1
+#define ATH9K_HTC_TX_RTSCTS 0x2
+#define ATH9K_HTC_TX_USE_MIN_RATE 0x100
+
+struct tx_frame_hdr {
+ u8 data_type;
+ u8 node_idx;
+ u8 vif_idx;
+ u8 tidno;
+ u32 flags; /* ATH9K_HTC_TX_* */
+ u8 key_type;
+ u8 keyix;
+ u8 reserved[26];
+} __packed;
+
+struct tx_mgmt_hdr {
+ u8 node_idx;
+ u8 vif_idx;
+ u8 tidno;
+ u8 flags;
+ u8 key_type;
+ u8 keyix;
+ u16 reserved;
+} __packed;
+
+struct tx_beacon_header {
+ u8 len_changed;
+ u8 vif_index;
+ u16 rev;
+} __packed;
+
+struct ath9k_htc_target_hw {
+ u32 flags;
+ u32 flags_ext;
+ u32 ampdu_limit;
+ u8 ampdu_subframes;
+ u8 tx_chainmask;
+ u8 tx_chainmask_legacy;
+ u8 rtscts_ratecode;
+ u8 protmode;
+} __packed;
+
+struct ath9k_htc_cap_target {
+ u32 flags;
+ u32 flags_ext;
+ u32 ampdu_limit;
+ u8 ampdu_subframes;
+ u8 tx_chainmask;
+ u8 tx_chainmask_legacy;
+ u8 rtscts_ratecode;
+ u8 protmode;
+} __packed;
+
+struct ath9k_htc_target_vif {
+ u8 index;
+ u8 des_bssid[ETH_ALEN];
+ __be32 opmode;
+ u8 myaddr[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+ u32 flags;
+ u32 flags_ext;
+ u16 ps_sta;
+ __be16 rtsthreshold;
+ u8 ath_cap;
+ u8 node;
+ s8 mcast_rate;
+} __packed;
+
+#define ATH_HTC_STA_AUTH 0x0001
+#define ATH_HTC_STA_QOS 0x0002
+#define ATH_HTC_STA_ERP 0x0004
+#define ATH_HTC_STA_HT 0x0008
+
+/* FIXME: UAPSD variables */
+struct ath9k_htc_target_sta {
+ u16 associd;
+ u16 txpower;
+ u32 ucastkey;
+ u8 macaddr[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+ u8 sta_index;
+ u8 vif_index;
+ u8 vif_sta;
+ __be16 flags; /* ATH_HTC_STA_* */
+ u16 htcap;
+ u8 valid;
+ u16 capinfo;
+ struct ath9k_htc_target_hw *hw;
+ struct ath9k_htc_target_vif *vif;
+ u16 txseqmgmt;
+ u8 is_vif_sta;
+ u16 maxampdu;
+ u16 iv16;
+ u32 iv32;
+} __packed;
+
+struct ath9k_htc_target_aggr {
+ u8 sta_index;
+ u8 tidno;
+ u8 aggr_enable;
+ u8 padding;
+} __packed;
+
+#define ATH_HTC_RATE_MAX 30
+
+#define WLAN_RC_DS_FLAG 0x01
+#define WLAN_RC_40_FLAG 0x02
+#define WLAN_RC_SGI_FLAG 0x04
+#define WLAN_RC_HT_FLAG 0x08
+
+struct ath9k_htc_rateset {
+ u8 rs_nrates;
+ u8 rs_rates[ATH_HTC_RATE_MAX];
+};
+
+struct ath9k_htc_rate {
+ struct ath9k_htc_rateset legacy_rates;
+ struct ath9k_htc_rateset ht_rates;
+} __packed;
+
+struct ath9k_htc_target_rate {
+ u8 sta_index;
+ u8 isnew;
+ __be32 capflags;
+ struct ath9k_htc_rate rates;
+};
+
+struct ath9k_htc_target_stats {
+ __be32 tx_shortretry;
+ __be32 tx_longretry;
+ __be32 tx_xretries;
+ __be32 ht_txunaggr_xretry;
+ __be32 ht_tx_xretries;
+} __packed;
+
+struct ath9k_htc_vif {
+ u8 index;
+};
+
+#define ATH9K_HTC_MAX_STA 8
+#define ATH9K_HTC_MAX_TID 8
+
+enum tid_aggr_state {
+ AGGR_STOP = 0,
+ AGGR_PROGRESS,
+ AGGR_START,
+ AGGR_OPERATIONAL
+};
+
+struct ath9k_htc_sta {
+ u8 index;
+ enum tid_aggr_state tid_state[ATH9K_HTC_MAX_TID];
+};
+
+struct ath9k_htc_aggr_work {
+ u16 tid;
+ u8 sta_addr[ETH_ALEN];
+ struct ieee80211_hw *hw;
+ struct ieee80211_vif *vif;
+ enum ieee80211_ampdu_mlme_action action;
+ struct mutex mutex;
+};
+
+#define ATH9K_HTC_RXBUF 256
+#define HTC_RX_FRAME_HEADER_SIZE 40
+
+struct ath9k_htc_rxbuf {
+ bool in_process;
+ struct sk_buff *skb;
+ struct ath_htc_rx_status rxstatus;
+ struct list_head list;
+};
+
+struct ath9k_htc_rx {
+ int last_rssi; /* FIXME: per-STA */
+ struct list_head rxbuf;
+ spinlock_t rxbuflock;
+};
+
+struct ath9k_htc_tx_ctl {
+ u8 type; /* ATH9K_HTC_* */
+};
+
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+
+#define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++)
+#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c++)
+
+struct ath_tx_stats {
+ u32 buf_queued;
+ u32 buf_completed;
+ u32 skb_queued;
+ u32 skb_completed;
+ u32 skb_dropped;
+};
+
+struct ath_rx_stats {
+ u32 skb_allocated;
+ u32 skb_completed;
+ u32 skb_dropped;
+};
+
+struct ath9k_debug {
+ struct dentry *debugfs_phy;
+ struct dentry *debugfs_tgt_stats;
+ struct dentry *debugfs_xmit;
+ struct dentry *debugfs_recv;
+ struct ath_tx_stats tx_stats;
+ struct ath_rx_stats rx_stats;
+ u32 txrate;
+};
+
+#else
+
+#define TX_STAT_INC(c) do { } while (0)
+#define RX_STAT_INC(c) do { } while (0)
+
+#endif /* CONFIG_ATH9K_HTC_DEBUGFS */
+
+#define ATH_LED_PIN_DEF 1
+#define ATH_LED_PIN_9287 8
+#define ATH_LED_PIN_9271 15
+#define ATH_LED_ON_DURATION_IDLE 350 /* in msecs */
+#define ATH_LED_OFF_DURATION_IDLE 250 /* in msecs */
+
+enum ath_led_type {
+ ATH_LED_RADIO,
+ ATH_LED_ASSOC,
+ ATH_LED_TX,
+ ATH_LED_RX
+};
+
+struct ath_led {
+ struct ath9k_htc_priv *priv;
+ struct led_classdev led_cdev;
+ enum ath_led_type led_type;
+ struct delayed_work brightness_work;
+ char name[32];
+ bool registered;
+ int brightness;
+};
+
+struct htc_beacon_config {
+ u16 beacon_interval;
+ u16 listen_interval;
+ u16 dtim_period;
+ u16 bmiss_timeout;
+ u8 dtim_count;
+};
+
+#define OP_INVALID BIT(0)
+#define OP_SCANNING BIT(1)
+#define OP_FULL_RESET BIT(2)
+#define OP_LED_ASSOCIATED BIT(3)
+#define OP_LED_ON BIT(4)
+#define OP_PREAMBLE_SHORT BIT(5)
+#define OP_PROTECT_ENABLE BIT(6)
+#define OP_TXAGGR BIT(7)
+#define OP_ASSOCIATED BIT(8)
+#define OP_ENABLE_BEACON BIT(9)
+#define OP_LED_DEINIT BIT(10)
+#define OP_UNPLUGGED BIT(11)
+
+struct ath9k_htc_priv {
+ struct device *dev;
+ struct ieee80211_hw *hw;
+ struct ath_hw *ah;
+ struct htc_target *htc;
+ struct wmi *wmi;
+
+ enum htc_endpoint_id wmi_cmd_ep;
+ enum htc_endpoint_id beacon_ep;
+ enum htc_endpoint_id cab_ep;
+ enum htc_endpoint_id uapsd_ep;
+ enum htc_endpoint_id mgmt_ep;
+ enum htc_endpoint_id data_be_ep;
+ enum htc_endpoint_id data_bk_ep;
+ enum htc_endpoint_id data_vi_ep;
+ enum htc_endpoint_id data_vo_ep;
+
+ u16 op_flags;
+ u16 curtxpow;
+ u16 txpowlimit;
+ u16 nvifs;
+ u16 nstations;
+ u16 seq_no;
+ u32 bmiss_cnt;
+
+ spinlock_t beacon_lock;
+
+ bool tx_queues_stop;
+ spinlock_t tx_lock;
+
+ struct ieee80211_vif *vif;
+ struct htc_beacon_config cur_beacon_conf;
+ unsigned int rxfilter;
+ struct tasklet_struct wmi_tasklet;
+ struct tasklet_struct rx_tasklet;
+ struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+ struct ath9k_htc_rx rx;
+ struct tasklet_struct tx_tasklet;
+ struct sk_buff_head tx_queue;
+ struct ath9k_htc_aggr_work aggr_work;
+ struct delayed_work ath9k_aggr_work;
+ struct delayed_work ath9k_ani_work;
+ struct work_struct ps_work;
+
+ struct mutex htc_pm_lock;
+ unsigned long ps_usecount;
+ bool ps_enabled;
+ bool ps_idle;
+
+ struct ath_led radio_led;
+ struct ath_led assoc_led;
+ struct ath_led tx_led;
+ struct ath_led rx_led;
+ struct delayed_work ath9k_led_blink_work;
+ int led_on_duration;
+ int led_off_duration;
+ int led_on_cnt;
+ int led_off_cnt;
+ int hwq_map[ATH9K_WME_AC_VO+1];
+
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+ struct ath9k_debug debug;
+#endif
+ struct ath9k_htc_target_rate tgt_rate;
+
+ struct mutex mutex;
+};
+
+static inline void ath_read_cachesize(struct ath_common *common, int *csz)
+{
+ common->bus_ops->read_cachesize(common, csz);
+}
+
+void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif);
+void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending);
+
+void ath9k_htc_rxep(void *priv, struct sk_buff *skb,
+ enum htc_endpoint_id ep_id);
+void ath9k_htc_txep(void *priv, struct sk_buff *skb, enum htc_endpoint_id ep_id,
+ bool txok);
+void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
+ enum htc_endpoint_id ep_id, bool txok);
+
+void ath9k_htc_station_work(struct work_struct *work);
+void ath9k_htc_aggr_work(struct work_struct *work);
+void ath9k_ani_work(struct work_struct *work);;
+
+int ath9k_tx_init(struct ath9k_htc_priv *priv);
+void ath9k_tx_tasklet(unsigned long data);
+int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb);
+void ath9k_tx_cleanup(struct ath9k_htc_priv *priv);
+bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv,
+ enum ath9k_tx_queue_subtype qtype);
+int get_hw_qnum(u16 queue, int *hwq_map);
+int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum,
+ struct ath9k_tx_queue_info *qinfo);
+
+int ath9k_rx_init(struct ath9k_htc_priv *priv);
+void ath9k_rx_cleanup(struct ath9k_htc_priv *priv);
+void ath9k_host_rx_init(struct ath9k_htc_priv *priv);
+void ath9k_rx_tasklet(unsigned long data);
+u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv);
+
+void ath9k_htc_ps_wakeup(struct ath9k_htc_priv *priv);
+void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv);
+void ath9k_ps_work(struct work_struct *work);
+
+void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv);
+void ath9k_init_leds(struct ath9k_htc_priv *priv);
+void ath9k_deinit_leds(struct ath9k_htc_priv *priv);
+
+int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
+ u16 devid);
+void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug);
+#ifdef CONFIG_PM
+int ath9k_htc_resume(struct htc_target *htc_handle);
+#endif
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+int ath9k_htc_debug_create_root(void);
+void ath9k_htc_debug_remove_root(void);
+int ath9k_htc_init_debug(struct ath_hw *ah);
+void ath9k_htc_exit_debug(struct ath_hw *ah);
+#else
+static inline int ath9k_htc_debug_create_root(void) { return 0; };
+static inline void ath9k_htc_debug_remove_root(void) {};
+static inline int ath9k_htc_init_debug(struct ath_hw *ah) { return 0; };
+static inline void ath9k_htc_exit_debug(struct ath_hw *ah) {};
+#endif /* CONFIG_ATH9K_HTC_DEBUGFS */
+
+#endif /* HTC_H */
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
new file mode 100644
index 00000000000..c10c7d002eb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+#define FUDGE 2
+
+static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
+ struct htc_beacon_config *bss_conf)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_beacon_state bs;
+ enum ath9k_int imask = 0;
+ int dtimperiod, dtimcount, sleepduration;
+ int cfpperiod, cfpcount, bmiss_timeout;
+ u32 nexttbtt = 0, intval, tsftu;
+ __be32 htc_imask = 0;
+ u64 tsf;
+ int num_beacons, offset, dtim_dec_count, cfp_dec_count;
+ int ret;
+ u8 cmd_rsp;
+
+ memset(&bs, 0, sizeof(bs));
+
+ intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
+ bmiss_timeout = (ATH_DEFAULT_BMISS_LIMIT * bss_conf->beacon_interval);
+
+ /*
+ * Setup dtim and cfp parameters according to
+ * last beacon we received (which may be none).
+ */
+ dtimperiod = bss_conf->dtim_period;
+ if (dtimperiod <= 0) /* NB: 0 if not known */
+ dtimperiod = 1;
+ dtimcount = 1;
+ if (dtimcount >= dtimperiod) /* NB: sanity check */
+ dtimcount = 0;
+ cfpperiod = 1; /* NB: no PCF support yet */
+ cfpcount = 0;
+
+ sleepduration = intval;
+ if (sleepduration <= 0)
+ sleepduration = intval;
+
+ /*
+ * Pull nexttbtt forward to reflect the current
+ * TSF and calculate dtim+cfp state for the result.
+ */
+ tsf = ath9k_hw_gettsf64(priv->ah);
+ tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
+
+ num_beacons = tsftu / intval + 1;
+ offset = tsftu % intval;
+ nexttbtt = tsftu - offset;
+ if (offset)
+ nexttbtt += intval;
+
+ /* DTIM Beacon every dtimperiod Beacon */
+ dtim_dec_count = num_beacons % dtimperiod;
+ /* CFP every cfpperiod DTIM Beacon */
+ cfp_dec_count = (num_beacons / dtimperiod) % cfpperiod;
+ if (dtim_dec_count)
+ cfp_dec_count++;
+
+ dtimcount -= dtim_dec_count;
+ if (dtimcount < 0)
+ dtimcount += dtimperiod;
+
+ cfpcount -= cfp_dec_count;
+ if (cfpcount < 0)
+ cfpcount += cfpperiod;
+
+ bs.bs_intval = intval;
+ bs.bs_nexttbtt = nexttbtt;
+ bs.bs_dtimperiod = dtimperiod*intval;
+ bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
+ bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
+ bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
+ bs.bs_cfpmaxduration = 0;
+
+ /*
+ * Calculate the number of consecutive beacons to miss* before taking
+ * a BMISS interrupt. The configuration is specified in TU so we only
+ * need calculate based on the beacon interval. Note that we clamp the
+ * result to at most 15 beacons.
+ */
+ if (sleepduration > intval) {
+ bs.bs_bmissthreshold = ATH_DEFAULT_BMISS_LIMIT / 2;
+ } else {
+ bs.bs_bmissthreshold = DIV_ROUND_UP(bmiss_timeout, intval);
+ if (bs.bs_bmissthreshold > 15)
+ bs.bs_bmissthreshold = 15;
+ else if (bs.bs_bmissthreshold <= 0)
+ bs.bs_bmissthreshold = 1;
+ }
+
+ /*
+ * Calculate sleep duration. The configuration is given in ms.
+ * We ensure a multiple of the beacon period is used. Also, if the sleep
+ * duration is greater than the DTIM period then it makes senses
+ * to make it a multiple of that.
+ *
+ * XXX fixed at 100ms
+ */
+
+ bs.bs_sleepduration = roundup(IEEE80211_MS_TO_TU(100), sleepduration);
+ if (bs.bs_sleepduration > bs.bs_dtimperiod)
+ bs.bs_sleepduration = bs.bs_dtimperiod;
+
+ /* TSF out of range threshold fixed at 1 second */
+ bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
+
+ ath_print(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
+ ath_print(common, ATH_DBG_BEACON,
+ "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
+ bs.bs_bmissthreshold, bs.bs_sleepduration,
+ bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
+
+ /* Set the computed STA beacon timers */
+
+ WMI_CMD(WMI_DISABLE_INTR_CMDID);
+ ath9k_hw_set_sta_beacon_timers(priv->ah, &bs);
+ imask |= ATH9K_INT_BMISS;
+ htc_imask = cpu_to_be32(imask);
+ WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
+}
+
+static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
+ struct htc_beacon_config *bss_conf)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ enum ath9k_int imask = 0;
+ u32 nexttbtt, intval;
+ __be32 htc_imask = 0;
+ int ret;
+ u8 cmd_rsp;
+
+ intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
+ nexttbtt = intval;
+ intval |= ATH9K_BEACON_ENA;
+ if (priv->op_flags & OP_ENABLE_BEACON)
+ imask |= ATH9K_INT_SWBA;
+
+ ath_print(common, ATH_DBG_BEACON,
+ "IBSS Beacon config, intval: %d, imask: 0x%x\n",
+ bss_conf->beacon_interval, imask);
+
+ WMI_CMD(WMI_DISABLE_INTR_CMDID);
+ ath9k_hw_beaconinit(priv->ah, nexttbtt, intval);
+ priv->bmiss_cnt = 0;
+ htc_imask = cpu_to_be32(imask);
+ WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
+}
+
+void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
+ enum htc_endpoint_id ep_id, bool txok)
+{
+ dev_kfree_skb_any(skb);
+}
+
+void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending)
+{
+ struct ath9k_htc_vif *avp = (void *)priv->vif->drv_priv;
+ struct tx_beacon_header beacon_hdr;
+ struct ath9k_htc_tx_ctl tx_ctl;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *beacon;
+ u8 *tx_fhdr;
+
+ memset(&beacon_hdr, 0, sizeof(struct tx_beacon_header));
+ memset(&tx_ctl, 0, sizeof(struct ath9k_htc_tx_ctl));
+
+ /* FIXME: Handle BMISS */
+ if (beacon_pending != 0) {
+ priv->bmiss_cnt++;
+ return;
+ }
+
+ spin_lock_bh(&priv->beacon_lock);
+
+ if (unlikely(priv->op_flags & OP_SCANNING)) {
+ spin_unlock_bh(&priv->beacon_lock);
+ return;
+ }
+
+ /* Get a new beacon */
+ beacon = ieee80211_beacon_get(priv->hw, priv->vif);
+ if (!beacon) {
+ spin_unlock_bh(&priv->beacon_lock);
+ return;
+ }
+
+ info = IEEE80211_SKB_CB(beacon);
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
+ struct ieee80211_hdr *hdr =
+ (struct ieee80211_hdr *) beacon->data;
+ priv->seq_no += 0x10;
+ hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(priv->seq_no);
+ }
+
+ tx_ctl.type = ATH9K_HTC_NORMAL;
+ beacon_hdr.vif_index = avp->index;
+ tx_fhdr = skb_push(beacon, sizeof(beacon_hdr));
+ memcpy(tx_fhdr, (u8 *) &beacon_hdr, sizeof(beacon_hdr));
+
+ htc_send(priv->htc, beacon, priv->beacon_ep, &tx_ctl);
+
+ spin_unlock_bh(&priv->beacon_lock);
+}
+
+
+void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+
+ cur_conf->beacon_interval = bss_conf->beacon_int;
+ if (cur_conf->beacon_interval == 0)
+ cur_conf->beacon_interval = 100;
+
+ cur_conf->dtim_period = bss_conf->dtim_period;
+ cur_conf->listen_interval = 1;
+ cur_conf->dtim_count = 1;
+ cur_conf->bmiss_timeout =
+ ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ ath9k_htc_beacon_config_sta(priv, cur_conf);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ath9k_htc_beacon_config_adhoc(priv, cur_conf);
+ break;
+ default:
+ ath_print(common, ATH_DBG_CONFIG,
+ "Unsupported beaconing mode\n");
+ return;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
new file mode 100644
index 00000000000..dc015077a8d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -0,0 +1,834 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+MODULE_AUTHOR("Atheros Communications");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Atheros driver 802.11n HTC based wireless devices");
+
+static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
+module_param_named(debug, ath9k_debug, uint, 0);
+MODULE_PARM_DESC(debug, "Debugging mask");
+
+int htc_modparam_nohwcrypt;
+module_param_named(nohwcrypt, htc_modparam_nohwcrypt, int, 0444);
+MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
+
+#define CHAN2G(_freq, _idx) { \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 20, \
+}
+
+static struct ieee80211_channel ath9k_2ghz_channels[] = {
+ CHAN2G(2412, 0), /* Channel 1 */
+ CHAN2G(2417, 1), /* Channel 2 */
+ CHAN2G(2422, 2), /* Channel 3 */
+ CHAN2G(2427, 3), /* Channel 4 */
+ CHAN2G(2432, 4), /* Channel 5 */
+ CHAN2G(2437, 5), /* Channel 6 */
+ CHAN2G(2442, 6), /* Channel 7 */
+ CHAN2G(2447, 7), /* Channel 8 */
+ CHAN2G(2452, 8), /* Channel 9 */
+ CHAN2G(2457, 9), /* Channel 10 */
+ CHAN2G(2462, 10), /* Channel 11 */
+ CHAN2G(2467, 11), /* Channel 12 */
+ CHAN2G(2472, 12), /* Channel 13 */
+ CHAN2G(2484, 13), /* Channel 14 */
+};
+
+/* Atheros hardware rate code addition for short premble */
+#define SHPCHECK(__hw_rate, __flags) \
+ ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04) : 0)
+
+#define RATE(_bitrate, _hw_rate, _flags) { \
+ .bitrate = (_bitrate), \
+ .flags = (_flags), \
+ .hw_value = (_hw_rate), \
+ .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
+}
+
+static struct ieee80211_rate ath9k_legacy_rates[] = {
+ RATE(10, 0x1b, 0),
+ RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE), /* shortp : 0x1e */
+ RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE), /* shortp: 0x1d */
+ RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE), /* short: 0x1c */
+ RATE(60, 0x0b, 0),
+ RATE(90, 0x0f, 0),
+ RATE(120, 0x0a, 0),
+ RATE(180, 0x0e, 0),
+ RATE(240, 0x09, 0),
+ RATE(360, 0x0d, 0),
+ RATE(480, 0x08, 0),
+ RATE(540, 0x0c, 0),
+};
+
+static int ath9k_htc_wait_for_target(struct ath9k_htc_priv *priv)
+{
+ int time_left;
+
+ if (atomic_read(&priv->htc->tgt_ready) > 0) {
+ atomic_dec(&priv->htc->tgt_ready);
+ return 0;
+ }
+
+ /* Firmware can take up to 50ms to get ready, to be safe use 1 second */
+ time_left = wait_for_completion_timeout(&priv->htc->target_wait, HZ);
+ if (!time_left) {
+ dev_err(priv->dev, "ath9k_htc: Target is unresponsive\n");
+ return -ETIMEDOUT;
+ }
+
+ atomic_dec(&priv->htc->tgt_ready);
+
+ return 0;
+}
+
+static void ath9k_deinit_priv(struct ath9k_htc_priv *priv)
+{
+ ath9k_htc_exit_debug(priv->ah);
+ ath9k_hw_deinit(priv->ah);
+ tasklet_kill(&priv->wmi_tasklet);
+ tasklet_kill(&priv->rx_tasklet);
+ tasklet_kill(&priv->tx_tasklet);
+ kfree(priv->ah);
+ priv->ah = NULL;
+}
+
+static void ath9k_deinit_device(struct ath9k_htc_priv *priv)
+{
+ struct ieee80211_hw *hw = priv->hw;
+
+ wiphy_rfkill_stop_polling(hw->wiphy);
+ ath9k_deinit_leds(priv);
+ ieee80211_unregister_hw(hw);
+ ath9k_rx_cleanup(priv);
+ ath9k_tx_cleanup(priv);
+ ath9k_deinit_priv(priv);
+}
+
+static inline int ath9k_htc_connect_svc(struct ath9k_htc_priv *priv,
+ u16 service_id,
+ void (*tx) (void *,
+ struct sk_buff *,
+ enum htc_endpoint_id,
+ bool txok),
+ enum htc_endpoint_id *ep_id)
+{
+ struct htc_service_connreq req;
+
+ memset(&req, 0, sizeof(struct htc_service_connreq));
+
+ req.service_id = service_id;
+ req.ep_callbacks.priv = priv;
+ req.ep_callbacks.rx = ath9k_htc_rxep;
+ req.ep_callbacks.tx = tx;
+
+ return htc_connect_service(priv->htc, &req, ep_id);
+}
+
+static int ath9k_init_htc_services(struct ath9k_htc_priv *priv)
+{
+ int ret;
+
+ /* WMI CMD*/
+ ret = ath9k_wmi_connect(priv->htc, priv->wmi, &priv->wmi_cmd_ep);
+ if (ret)
+ goto err;
+
+ /* Beacon */
+ ret = ath9k_htc_connect_svc(priv, WMI_BEACON_SVC, ath9k_htc_beaconep,
+ &priv->beacon_ep);
+ if (ret)
+ goto err;
+
+ /* CAB */
+ ret = ath9k_htc_connect_svc(priv, WMI_CAB_SVC, ath9k_htc_txep,
+ &priv->cab_ep);
+ if (ret)
+ goto err;
+
+
+ /* UAPSD */
+ ret = ath9k_htc_connect_svc(priv, WMI_UAPSD_SVC, ath9k_htc_txep,
+ &priv->uapsd_ep);
+ if (ret)
+ goto err;
+
+ /* MGMT */
+ ret = ath9k_htc_connect_svc(priv, WMI_MGMT_SVC, ath9k_htc_txep,
+ &priv->mgmt_ep);
+ if (ret)
+ goto err;
+
+ /* DATA BE */
+ ret = ath9k_htc_connect_svc(priv, WMI_DATA_BE_SVC, ath9k_htc_txep,
+ &priv->data_be_ep);
+ if (ret)
+ goto err;
+
+ /* DATA BK */
+ ret = ath9k_htc_connect_svc(priv, WMI_DATA_BK_SVC, ath9k_htc_txep,
+ &priv->data_bk_ep);
+ if (ret)
+ goto err;
+
+ /* DATA VI */
+ ret = ath9k_htc_connect_svc(priv, WMI_DATA_VI_SVC, ath9k_htc_txep,
+ &priv->data_vi_ep);
+ if (ret)
+ goto err;
+
+ /* DATA VO */
+ ret = ath9k_htc_connect_svc(priv, WMI_DATA_VO_SVC, ath9k_htc_txep,
+ &priv->data_vo_ep);
+ if (ret)
+ goto err;
+
+ ret = htc_init(priv->htc);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ dev_err(priv->dev, "ath9k_htc: Unable to initialize HTC services\n");
+ return ret;
+}
+
+static int ath9k_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ return ath_reg_notifier_apply(wiphy, request,
+ ath9k_hw_regulatory(priv->ah));
+}
+
+static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ __be32 val, reg = cpu_to_be32(reg_offset);
+ int r;
+
+ r = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID,
+ (u8 *) &reg, sizeof(reg),
+ (u8 *) &val, sizeof(val),
+ 100);
+ if (unlikely(r)) {
+ ath_print(common, ATH_DBG_WMI,
+ "REGISTER READ FAILED: (0x%04x, %d)\n",
+ reg_offset, r);
+ return -EIO;
+ }
+
+ return be32_to_cpu(val);
+}
+
+static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ __be32 buf[2] = {
+ cpu_to_be32(reg_offset),
+ cpu_to_be32(val),
+ };
+ int r;
+
+ r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
+ (u8 *) &buf, sizeof(buf),
+ (u8 *) &val, sizeof(val),
+ 100);
+ if (unlikely(r)) {
+ ath_print(common, ATH_DBG_WMI,
+ "REGISTER WRITE FAILED:(0x%04x, %d)\n",
+ reg_offset, r);
+ }
+}
+
+static void ath9k_regwrite_buffer(void *hw_priv, u32 val, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ u32 rsp_status;
+ int r;
+
+ mutex_lock(&priv->wmi->multi_write_mutex);
+
+ /* Store the register/value */
+ priv->wmi->multi_write[priv->wmi->multi_write_idx].reg =
+ cpu_to_be32(reg_offset);
+ priv->wmi->multi_write[priv->wmi->multi_write_idx].val =
+ cpu_to_be32(val);
+
+ priv->wmi->multi_write_idx++;
+
+ /* If the buffer is full, send it out. */
+ if (priv->wmi->multi_write_idx == MAX_CMD_NUMBER) {
+ r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
+ (u8 *) &priv->wmi->multi_write,
+ sizeof(struct register_write) * priv->wmi->multi_write_idx,
+ (u8 *) &rsp_status, sizeof(rsp_status),
+ 100);
+ if (unlikely(r)) {
+ ath_print(common, ATH_DBG_WMI,
+ "REGISTER WRITE FAILED, multi len: %d\n",
+ priv->wmi->multi_write_idx);
+ }
+ priv->wmi->multi_write_idx = 0;
+ }
+
+ mutex_unlock(&priv->wmi->multi_write_mutex);
+}
+
+static void ath9k_regwrite(void *hw_priv, u32 val, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ if (atomic_read(&priv->wmi->mwrite_cnt))
+ ath9k_regwrite_buffer(hw_priv, val, reg_offset);
+ else
+ ath9k_regwrite_single(hw_priv, val, reg_offset);
+}
+
+static void ath9k_enable_regwrite_buffer(void *hw_priv)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ atomic_inc(&priv->wmi->mwrite_cnt);
+}
+
+static void ath9k_disable_regwrite_buffer(void *hw_priv)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ atomic_dec(&priv->wmi->mwrite_cnt);
+}
+
+static void ath9k_regwrite_flush(void *hw_priv)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ u32 rsp_status;
+ int r;
+
+ mutex_lock(&priv->wmi->multi_write_mutex);
+
+ if (priv->wmi->multi_write_idx) {
+ r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
+ (u8 *) &priv->wmi->multi_write,
+ sizeof(struct register_write) * priv->wmi->multi_write_idx,
+ (u8 *) &rsp_status, sizeof(rsp_status),
+ 100);
+ if (unlikely(r)) {
+ ath_print(common, ATH_DBG_WMI,
+ "REGISTER WRITE FAILED, multi len: %d\n",
+ priv->wmi->multi_write_idx);
+ }
+ priv->wmi->multi_write_idx = 0;
+ }
+
+ mutex_unlock(&priv->wmi->multi_write_mutex);
+}
+
+static const struct ath_ops ath9k_common_ops = {
+ .read = ath9k_regread,
+ .write = ath9k_regwrite,
+ .enable_write_buffer = ath9k_enable_regwrite_buffer,
+ .disable_write_buffer = ath9k_disable_regwrite_buffer,
+ .write_flush = ath9k_regwrite_flush,
+};
+
+static void ath_usb_read_cachesize(struct ath_common *common, int *csz)
+{
+ *csz = L1_CACHE_BYTES >> 2;
+}
+
+static bool ath_usb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
+{
+ struct ath_hw *ah = (struct ath_hw *) common->ah;
+
+ (void)REG_READ(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S));
+
+ if (!ath9k_hw_wait(ah,
+ AR_EEPROM_STATUS_DATA,
+ AR_EEPROM_STATUS_DATA_BUSY |
+ AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0,
+ AH_WAIT_TIMEOUT))
+ return false;
+
+ *data = MS(REG_READ(ah, AR_EEPROM_STATUS_DATA),
+ AR_EEPROM_STATUS_DATA_VAL);
+
+ return true;
+}
+
+static const struct ath_bus_ops ath9k_usb_bus_ops = {
+ .ath_bus_type = ATH_USB,
+ .read_cachesize = ath_usb_read_cachesize,
+ .eeprom_read = ath_usb_eeprom_read,
+};
+
+static void setup_ht_cap(struct ath9k_htc_priv *priv,
+ struct ieee80211_sta_ht_cap *ht_info)
+{
+ ht_info->ht_supported = true;
+ ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_SM_PS |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_DSSSCCK40;
+
+ ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
+
+ memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
+ ht_info->mcs.rx_mask[0] = 0xff;
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
+}
+
+static int ath9k_init_queues(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->hwq_map); i++)
+ priv->hwq_map[i] = -1;
+
+ if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_BE)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for BE traffic\n");
+ goto err;
+ }
+
+ if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_BK)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for BK traffic\n");
+ goto err;
+ }
+ if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_VI)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for VI traffic\n");
+ goto err;
+ }
+ if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_VO)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for VO traffic\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ return -EINVAL;
+}
+
+static void ath9k_init_crypto(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ int i = 0;
+
+ /* Get the hardware key cache size. */
+ common->keymax = priv->ah->caps.keycache_size;
+ if (common->keymax > ATH_KEYMAX) {
+ ath_print(common, ATH_DBG_ANY,
+ "Warning, using only %u entries in %u key cache\n",
+ ATH_KEYMAX, common->keymax);
+ common->keymax = ATH_KEYMAX;
+ }
+
+ /*
+ * Reset the key cache since some parts do not
+ * reset the contents on initial power up.
+ */
+ for (i = 0; i < common->keymax; i++)
+ ath9k_hw_keyreset(priv->ah, (u16) i);
+
+ if (ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_TKIP, NULL)) {
+ /*
+ * Whether we should enable h/w TKIP MIC.
+ * XXX: if we don't support WME TKIP MIC, then we wouldn't
+ * report WMM capable, so it's always safe to turn on
+ * TKIP MIC in this case.
+ */
+ ath9k_hw_setcapability(priv->ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
+ }
+
+ /*
+ * Check whether the separate key cache entries
+ * are required to handle both tx+rx MIC keys.
+ * With split mic keys the number of stations is limited
+ * to 27 otherwise 59.
+ */
+ if (ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_TKIP, NULL)
+ && ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_MIC, NULL)
+ && ath9k_hw_getcapability(priv->ah, ATH9K_CAP_TKIP_SPLIT,
+ 0, NULL))
+ common->splitmic = 1;
+
+ /* turn on mcast key search if possible */
+ if (!ath9k_hw_getcapability(priv->ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
+ (void)ath9k_hw_setcapability(priv->ah, ATH9K_CAP_MCAST_KEYSRCH,
+ 1, 1, NULL);
+}
+
+static void ath9k_init_channels_rates(struct ath9k_htc_priv *priv)
+{
+ if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes)) {
+ priv->sbands[IEEE80211_BAND_2GHZ].channels =
+ ath9k_2ghz_channels;
+ priv->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
+ priv->sbands[IEEE80211_BAND_2GHZ].n_channels =
+ ARRAY_SIZE(ath9k_2ghz_channels);
+ priv->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
+ priv->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
+ ARRAY_SIZE(ath9k_legacy_rates);
+ }
+}
+
+static void ath9k_init_misc(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+
+ common->tx_chainmask = priv->ah->caps.tx_chainmask;
+ common->rx_chainmask = priv->ah->caps.rx_chainmask;
+
+ if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
+ memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
+
+ priv->op_flags |= OP_TXAGGR;
+ priv->ah->opmode = NL80211_IFTYPE_STATION;
+}
+
+static int ath9k_init_priv(struct ath9k_htc_priv *priv, u16 devid)
+{
+ struct ath_hw *ah = NULL;
+ struct ath_common *common;
+ int ret = 0, csz = 0;
+
+ priv->op_flags |= OP_INVALID;
+
+ ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
+ if (!ah)
+ return -ENOMEM;
+
+ ah->hw_version.devid = devid;
+ ah->hw_version.subsysid = 0; /* FIXME */
+ priv->ah = ah;
+
+ common = ath9k_hw_common(ah);
+ common->ops = &ath9k_common_ops;
+ common->bus_ops = &ath9k_usb_bus_ops;
+ common->ah = ah;
+ common->hw = priv->hw;
+ common->priv = priv;
+ common->debug_mask = ath9k_debug;
+
+ spin_lock_init(&priv->wmi->wmi_lock);
+ spin_lock_init(&priv->beacon_lock);
+ spin_lock_init(&priv->tx_lock);
+ mutex_init(&priv->mutex);
+ mutex_init(&priv->aggr_work.mutex);
+ mutex_init(&priv->htc_pm_lock);
+ tasklet_init(&priv->wmi_tasklet, ath9k_wmi_tasklet,
+ (unsigned long)priv);
+ tasklet_init(&priv->rx_tasklet, ath9k_rx_tasklet,
+ (unsigned long)priv);
+ tasklet_init(&priv->tx_tasklet, ath9k_tx_tasklet, (unsigned long)priv);
+ INIT_DELAYED_WORK(&priv->ath9k_aggr_work, ath9k_htc_aggr_work);
+ INIT_DELAYED_WORK(&priv->ath9k_ani_work, ath9k_ani_work);
+ INIT_WORK(&priv->ps_work, ath9k_ps_work);
+
+ /*
+ * Cache line size is used to size and align various
+ * structures used to communicate with the hardware.
+ */
+ ath_read_cachesize(common, &csz);
+ common->cachelsz = csz << 2; /* convert to bytes */
+
+ ret = ath9k_hw_init(ah);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to initialize hardware; "
+ "initialization status: %d\n", ret);
+ goto err_hw;
+ }
+
+ ret = ath9k_htc_init_debug(ah);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to create debugfs files\n");
+ goto err_debug;
+ }
+
+ ret = ath9k_init_queues(priv);
+ if (ret)
+ goto err_queues;
+
+ ath9k_init_crypto(priv);
+ ath9k_init_channels_rates(priv);
+ ath9k_init_misc(priv);
+
+ return 0;
+
+err_queues:
+ ath9k_htc_exit_debug(ah);
+err_debug:
+ ath9k_hw_deinit(ah);
+err_hw:
+
+ kfree(ah);
+ priv->ah = NULL;
+
+ return ret;
+}
+
+static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
+ struct ieee80211_hw *hw)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+
+ hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_HAS_RATE_CONTROL |
+ IEEE80211_HW_RX_INCLUDES_FCS |
+ IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_PS_NULLFUNC_STACK;
+
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC);
+
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ hw->queues = 4;
+ hw->channel_change_time = 5000;
+ hw->max_listen_interval = 10;
+ hw->vif_data_size = sizeof(struct ath9k_htc_vif);
+ hw->sta_data_size = sizeof(struct ath9k_htc_sta);
+
+ /* tx_frame_hdr is larger than tx_mgmt_hdr anyway */
+ hw->extra_tx_headroom = sizeof(struct tx_frame_hdr) +
+ sizeof(struct htc_frame_hdr) + 4;
+
+ if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes))
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &priv->sbands[IEEE80211_BAND_2GHZ];
+
+ if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
+ if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes))
+ setup_ht_cap(priv,
+ &priv->sbands[IEEE80211_BAND_2GHZ].ht_cap);
+ }
+
+ SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
+}
+
+static int ath9k_init_device(struct ath9k_htc_priv *priv, u16 devid)
+{
+ struct ieee80211_hw *hw = priv->hw;
+ struct ath_common *common;
+ struct ath_hw *ah;
+ int error = 0;
+ struct ath_regulatory *reg;
+
+ /* Bring up device */
+ error = ath9k_init_priv(priv, devid);
+ if (error != 0)
+ goto err_init;
+
+ ah = priv->ah;
+ common = ath9k_hw_common(ah);
+ ath9k_set_hw_capab(priv, hw);
+
+ /* Initialize regulatory */
+ error = ath_regd_init(&common->regulatory, priv->hw->wiphy,
+ ath9k_reg_notifier);
+ if (error)
+ goto err_regd;
+
+ reg = &common->regulatory;
+
+ /* Setup TX */
+ error = ath9k_tx_init(priv);
+ if (error != 0)
+ goto err_tx;
+
+ /* Setup RX */
+ error = ath9k_rx_init(priv);
+ if (error != 0)
+ goto err_rx;
+
+ /* Register with mac80211 */
+ error = ieee80211_register_hw(hw);
+ if (error)
+ goto err_register;
+
+ /* Handle world regulatory */
+ if (!ath_is_world_regd(reg)) {
+ error = regulatory_hint(hw->wiphy, reg->alpha2);
+ if (error)
+ goto err_world;
+ }
+
+ ath9k_init_leds(priv);
+ ath9k_start_rfkill_poll(priv);
+
+ return 0;
+
+err_world:
+ ieee80211_unregister_hw(hw);
+err_register:
+ ath9k_rx_cleanup(priv);
+err_rx:
+ ath9k_tx_cleanup(priv);
+err_tx:
+ /* Nothing */
+err_regd:
+ ath9k_deinit_priv(priv);
+err_init:
+ return error;
+}
+
+int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
+ u16 devid)
+{
+ struct ieee80211_hw *hw;
+ struct ath9k_htc_priv *priv;
+ int ret;
+
+ hw = ieee80211_alloc_hw(sizeof(struct ath9k_htc_priv), &ath9k_htc_ops);
+ if (!hw)
+ return -ENOMEM;
+
+ priv = hw->priv;
+ priv->hw = hw;
+ priv->htc = htc_handle;
+ priv->dev = dev;
+ htc_handle->drv_priv = priv;
+ SET_IEEE80211_DEV(hw, priv->dev);
+
+ ret = ath9k_htc_wait_for_target(priv);
+ if (ret)
+ goto err_free;
+
+ priv->wmi = ath9k_init_wmi(priv);
+ if (!priv->wmi) {
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ ret = ath9k_init_htc_services(priv);
+ if (ret)
+ goto err_init;
+
+ /* The device may have been unplugged earlier. */
+ priv->op_flags &= ~OP_UNPLUGGED;
+
+ ret = ath9k_init_device(priv, devid);
+ if (ret)
+ goto err_init;
+
+ return 0;
+
+err_init:
+ ath9k_deinit_wmi(priv);
+err_free:
+ ieee80211_free_hw(hw);
+ return ret;
+}
+
+void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug)
+{
+ if (htc_handle->drv_priv) {
+
+ /* Check if the device has been yanked out. */
+ if (hotunplug)
+ htc_handle->drv_priv->op_flags |= OP_UNPLUGGED;
+
+ ath9k_deinit_device(htc_handle->drv_priv);
+ ath9k_deinit_wmi(htc_handle->drv_priv);
+ ieee80211_free_hw(htc_handle->drv_priv->hw);
+ }
+}
+
+#ifdef CONFIG_PM
+int ath9k_htc_resume(struct htc_target *htc_handle)
+{
+ int ret;
+
+ ret = ath9k_htc_wait_for_target(htc_handle->drv_priv);
+ if (ret)
+ return ret;
+
+ ret = ath9k_init_htc_services(htc_handle->drv_priv);
+ return ret;
+}
+#endif
+
+static int __init ath9k_htc_init(void)
+{
+ int error;
+
+ error = ath9k_htc_debug_create_root();
+ if (error < 0) {
+ printk(KERN_ERR
+ "ath9k_htc: Unable to create debugfs root: %d\n",
+ error);
+ goto err_dbg;
+ }
+
+ error = ath9k_hif_usb_init();
+ if (error < 0) {
+ printk(KERN_ERR
+ "ath9k_htc: No USB devices found,"
+ " driver not installed.\n");
+ error = -ENODEV;
+ goto err_usb;
+ }
+
+ return 0;
+
+err_usb:
+ ath9k_htc_debug_remove_root();
+err_dbg:
+ return error;
+}
+module_init(ath9k_htc_init);
+
+static void __exit ath9k_htc_exit(void)
+{
+ ath9k_hif_usb_exit();
+ ath9k_htc_debug_remove_root();
+ printk(KERN_INFO "ath9k_htc: Driver unloaded\n");
+}
+module_exit(ath9k_htc_exit);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
new file mode 100644
index 00000000000..9d371c18eb4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -0,0 +1,1775 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+static struct dentry *ath9k_debugfs_root;
+#endif
+
+/*************/
+/* Utilities */
+/*************/
+
+static void ath_update_txpow(struct ath9k_htc_priv *priv)
+{
+ struct ath_hw *ah = priv->ah;
+ u32 txpow;
+
+ if (priv->curtxpow != priv->txpowlimit) {
+ ath9k_hw_set_txpowerlimit(ah, priv->txpowlimit);
+ /* read back in case value is clamped */
+ ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
+ priv->curtxpow = txpow;
+ }
+}
+
+/* HACK Alert: Use 11NG for 2.4, use 11NA for 5 */
+static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv,
+ struct ath9k_channel *ichan)
+{
+ enum htc_phymode mode;
+
+ mode = HTC_MODE_AUTO;
+
+ switch (ichan->chanmode) {
+ case CHANNEL_G:
+ case CHANNEL_G_HT20:
+ case CHANNEL_G_HT40PLUS:
+ case CHANNEL_G_HT40MINUS:
+ mode = HTC_MODE_11NG;
+ break;
+ case CHANNEL_A:
+ case CHANNEL_A_HT20:
+ case CHANNEL_A_HT40PLUS:
+ case CHANNEL_A_HT40MINUS:
+ mode = HTC_MODE_11NA;
+ break;
+ default:
+ break;
+ }
+
+ return mode;
+}
+
+static bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
+ enum ath9k_power_mode mode)
+{
+ bool ret;
+
+ mutex_lock(&priv->htc_pm_lock);
+ ret = ath9k_hw_setpower(priv->ah, mode);
+ mutex_unlock(&priv->htc_pm_lock);
+
+ return ret;
+}
+
+void ath9k_htc_ps_wakeup(struct ath9k_htc_priv *priv)
+{
+ mutex_lock(&priv->htc_pm_lock);
+ if (++priv->ps_usecount != 1)
+ goto unlock;
+ ath9k_hw_setpower(priv->ah, ATH9K_PM_AWAKE);
+
+unlock:
+ mutex_unlock(&priv->htc_pm_lock);
+}
+
+void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv)
+{
+ mutex_lock(&priv->htc_pm_lock);
+ if (--priv->ps_usecount != 0)
+ goto unlock;
+
+ if (priv->ps_idle)
+ ath9k_hw_setpower(priv->ah, ATH9K_PM_FULL_SLEEP);
+ else if (priv->ps_enabled)
+ ath9k_hw_setpower(priv->ah, ATH9K_PM_NETWORK_SLEEP);
+
+unlock:
+ mutex_unlock(&priv->htc_pm_lock);
+}
+
+void ath9k_ps_work(struct work_struct *work)
+{
+ struct ath9k_htc_priv *priv =
+ container_of(work, struct ath9k_htc_priv,
+ ps_work);
+ ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
+
+ /* The chip wakes up after receiving the first beacon
+ while network sleep is enabled. For the driver to
+ be in sync with the hw, set the chip to awake and
+ only then set it to sleep.
+ */
+ ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP);
+}
+
+static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
+ struct ieee80211_hw *hw,
+ struct ath9k_channel *hchan)
+{
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_conf *conf = &common->hw->conf;
+ bool fastcc = true;
+ struct ieee80211_channel *channel = hw->conf.channel;
+ enum htc_phymode mode;
+ __be16 htc_mode;
+ u8 cmd_rsp;
+ int ret;
+
+ if (priv->op_flags & OP_INVALID)
+ return -EIO;
+
+ if (priv->op_flags & OP_FULL_RESET)
+ fastcc = false;
+
+ /* Fiddle around with fastcc later on, for now just use full reset */
+ fastcc = false;
+ ath9k_htc_ps_wakeup(priv);
+ htc_stop(priv->htc);
+ WMI_CMD(WMI_DISABLE_INTR_CMDID);
+ WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
+ WMI_CMD(WMI_STOP_RECV_CMDID);
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "(%u MHz) -> (%u MHz), HT: %d, HT40: %d\n",
+ priv->ah->curchan->channel,
+ channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf));
+
+ ret = ath9k_hw_reset(ah, hchan, fastcc);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to reset channel (%u Mhz) "
+ "reset status %d\n", channel->center_freq, ret);
+ goto err;
+ }
+
+ ath_update_txpow(priv);
+
+ WMI_CMD(WMI_START_RECV_CMDID);
+ if (ret)
+ goto err;
+
+ ath9k_host_rx_init(priv);
+
+ mode = ath9k_htc_get_curmode(priv, hchan);
+ htc_mode = cpu_to_be16(mode);
+ WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode);
+ if (ret)
+ goto err;
+
+ WMI_CMD(WMI_ENABLE_INTR_CMDID);
+ if (ret)
+ goto err;
+
+ htc_start(priv->htc);
+
+ priv->op_flags &= ~OP_FULL_RESET;
+err:
+ ath9k_htc_ps_restore(priv);
+ return ret;
+}
+
+static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_vif hvif;
+ int ret = 0;
+ u8 cmd_rsp;
+
+ if (priv->nvifs > 0)
+ return -ENOBUFS;
+
+ memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
+ memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
+
+ hvif.opmode = cpu_to_be32(HTC_M_MONITOR);
+ priv->ah->opmode = NL80211_IFTYPE_MONITOR;
+ hvif.index = priv->nvifs;
+
+ WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
+ if (ret)
+ return ret;
+
+ priv->nvifs++;
+ return 0;
+}
+
+static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_vif hvif;
+ int ret = 0;
+ u8 cmd_rsp;
+
+ memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
+ memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
+ hvif.index = 0; /* Should do for now */
+ WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
+ priv->nvifs--;
+
+ return ret;
+}
+
+static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_sta tsta;
+ struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
+ struct ath9k_htc_sta *ista;
+ int ret;
+ u8 cmd_rsp;
+
+ if (priv->nstations >= ATH9K_HTC_MAX_STA)
+ return -ENOBUFS;
+
+ memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta));
+
+ if (sta) {
+ ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ memcpy(&tsta.macaddr, sta->addr, ETH_ALEN);
+ memcpy(&tsta.bssid, common->curbssid, ETH_ALEN);
+ tsta.associd = common->curaid;
+ tsta.is_vif_sta = 0;
+ tsta.valid = true;
+ ista->index = priv->nstations;
+ } else {
+ memcpy(&tsta.macaddr, vif->addr, ETH_ALEN);
+ tsta.is_vif_sta = 1;
+ }
+
+ tsta.sta_index = priv->nstations;
+ tsta.vif_index = avp->index;
+ tsta.maxampdu = 0xffff;
+ if (sta && sta->ht_cap.ht_supported)
+ tsta.flags = cpu_to_be16(ATH_HTC_STA_HT);
+
+ WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta);
+ if (ret) {
+ if (sta)
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to add station entry for: %pM\n", sta->addr);
+ return ret;
+ }
+
+ if (sta)
+ ath_print(common, ATH_DBG_CONFIG,
+ "Added a station entry for: %pM (idx: %d)\n",
+ sta->addr, tsta.sta_index);
+
+ priv->nstations++;
+ return 0;
+}
+
+static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_sta *ista;
+ int ret;
+ u8 cmd_rsp, sta_idx;
+
+ if (sta) {
+ ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ sta_idx = ista->index;
+ } else {
+ sta_idx = 0;
+ }
+
+ WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx);
+ if (ret) {
+ if (sta)
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to remove station entry for: %pM\n",
+ sta->addr);
+ return ret;
+ }
+
+ if (sta)
+ ath_print(common, ATH_DBG_CONFIG,
+ "Removed a station entry for: %pM (idx: %d)\n",
+ sta->addr, sta_idx);
+
+ priv->nstations--;
+ return 0;
+}
+
+static int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv)
+{
+ struct ath9k_htc_cap_target tcap;
+ int ret;
+ u8 cmd_rsp;
+
+ memset(&tcap, 0, sizeof(struct ath9k_htc_cap_target));
+
+ /* FIXME: Values are hardcoded */
+ tcap.flags = 0x240c40;
+ tcap.flags_ext = 0x80601000;
+ tcap.ampdu_limit = 0xffff0000;
+ tcap.ampdu_subframes = 20;
+ tcap.tx_chainmask_legacy = 1;
+ tcap.protmode = 1;
+ tcap.tx_chainmask = 1;
+
+ WMI_CMD_BUF(WMI_TARGET_IC_UPDATE_CMDID, &tcap);
+
+ return ret;
+}
+
+static int ath9k_htc_init_rate(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ struct ieee80211_supported_band *sband;
+ struct ath9k_htc_target_rate trate;
+ u32 caps = 0;
+ u8 cmd_rsp;
+ int i, j, ret;
+
+ memset(&trate, 0, sizeof(trate));
+
+ /* Only 2GHz is supported */
+ sband = priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+
+ for (i = 0, j = 0; i < sband->n_bitrates; i++) {
+ if (sta->supp_rates[sband->band] & BIT(i)) {
+ priv->tgt_rate.rates.legacy_rates.rs_rates[j]
+ = (sband->bitrates[i].bitrate * 2) / 10;
+ j++;
+ }
+ }
+ priv->tgt_rate.rates.legacy_rates.rs_nrates = j;
+
+ if (sta->ht_cap.ht_supported) {
+ for (i = 0, j = 0; i < 77; i++) {
+ if (sta->ht_cap.mcs.rx_mask[i/8] & (1<<(i%8)))
+ priv->tgt_rate.rates.ht_rates.rs_rates[j++] = i;
+ if (j == ATH_HTC_RATE_MAX)
+ break;
+ }
+ priv->tgt_rate.rates.ht_rates.rs_nrates = j;
+
+ caps = WLAN_RC_HT_FLAG;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
+ caps |= WLAN_RC_40_FLAG;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ caps |= WLAN_RC_SGI_FLAG;
+
+ }
+
+ priv->tgt_rate.sta_index = ista->index;
+ priv->tgt_rate.isnew = 1;
+ trate = priv->tgt_rate;
+ priv->tgt_rate.capflags = cpu_to_be32(caps);
+ trate.capflags = cpu_to_be32(caps);
+
+ WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, &trate);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to initialize Rate information on target\n");
+ return ret;
+ }
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "Updated target STA: %pM (caps: 0x%x)\n", sta->addr, caps);
+ return 0;
+}
+
+static bool check_rc_update(struct ieee80211_hw *hw, bool *cw40)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+
+ if (!conf_is_ht(conf))
+ return false;
+
+ if (!(priv->op_flags & OP_ASSOCIATED) ||
+ (priv->op_flags & OP_SCANNING))
+ return false;
+
+ if (conf_is_ht40(conf)) {
+ if (priv->ah->curchan->chanmode &
+ (CHANNEL_HT40PLUS | CHANNEL_HT40MINUS)) {
+ return false;
+ } else {
+ *cw40 = true;
+ return true;
+ }
+ } else { /* ht20 */
+ if (priv->ah->curchan->chanmode & CHANNEL_HT20)
+ return false;
+ else
+ return true;
+ }
+}
+
+static void ath9k_htc_rc_update(struct ath9k_htc_priv *priv, bool is_cw40)
+{
+ struct ath9k_htc_target_rate trate;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ int ret;
+ u32 caps = be32_to_cpu(priv->tgt_rate.capflags);
+ u8 cmd_rsp;
+
+ memset(&trate, 0, sizeof(trate));
+
+ trate = priv->tgt_rate;
+
+ if (is_cw40)
+ caps |= WLAN_RC_40_FLAG;
+ else
+ caps &= ~WLAN_RC_40_FLAG;
+
+ priv->tgt_rate.capflags = cpu_to_be32(caps);
+ trate.capflags = cpu_to_be32(caps);
+
+ WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, &trate);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to update Rate information on target\n");
+ return;
+ }
+
+ ath_print(common, ATH_DBG_CONFIG, "Rate control updated with "
+ "caps:0x%x on target\n", priv->tgt_rate.capflags);
+}
+
+static int ath9k_htc_aggr_oper(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif,
+ u8 *sta_addr, u8 tid, bool oper)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_aggr aggr;
+ struct ieee80211_sta *sta = NULL;
+ struct ath9k_htc_sta *ista;
+ int ret = 0;
+ u8 cmd_rsp;
+
+ if (tid >= ATH9K_HTC_MAX_TID)
+ return -EINVAL;
+
+ memset(&aggr, 0, sizeof(struct ath9k_htc_target_aggr));
+
+ rcu_read_lock();
+
+ /* Check if we are able to retrieve the station */
+ sta = ieee80211_find_sta(vif, sta_addr);
+ if (!sta) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ ista = (struct ath9k_htc_sta *) sta->drv_priv;
+
+ if (oper)
+ ista->tid_state[tid] = AGGR_START;
+ else
+ ista->tid_state[tid] = AGGR_STOP;
+
+ aggr.sta_index = ista->index;
+
+ rcu_read_unlock();
+
+ aggr.tidno = tid;
+ aggr.aggr_enable = oper;
+
+ WMI_CMD_BUF(WMI_TX_AGGR_ENABLE_CMDID, &aggr);
+ if (ret)
+ ath_print(common, ATH_DBG_CONFIG,
+ "Unable to %s TX aggregation for (%pM, %d)\n",
+ (oper) ? "start" : "stop", sta->addr, tid);
+ else
+ ath_print(common, ATH_DBG_CONFIG,
+ "%s aggregation for (%pM, %d)\n",
+ (oper) ? "Starting" : "Stopping", sta->addr, tid);
+
+ return ret;
+}
+
+void ath9k_htc_aggr_work(struct work_struct *work)
+{
+ int ret = 0;
+ struct ath9k_htc_priv *priv =
+ container_of(work, struct ath9k_htc_priv,
+ ath9k_aggr_work.work);
+ struct ath9k_htc_aggr_work *wk = &priv->aggr_work;
+
+ mutex_lock(&wk->mutex);
+
+ switch (wk->action) {
+ case IEEE80211_AMPDU_TX_START:
+ ret = ath9k_htc_aggr_oper(priv, wk->vif, wk->sta_addr,
+ wk->tid, true);
+ if (!ret)
+ ieee80211_start_tx_ba_cb(wk->vif, wk->sta_addr,
+ wk->tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP:
+ ath9k_htc_aggr_oper(priv, wk->vif, wk->sta_addr,
+ wk->tid, false);
+ ieee80211_stop_tx_ba_cb(wk->vif, wk->sta_addr, wk->tid);
+ break;
+ default:
+ ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
+ "Unknown AMPDU action\n");
+ }
+
+ mutex_unlock(&wk->mutex);
+}
+
+/*********/
+/* DEBUG */
+/*********/
+
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+
+static int ath9k_debugfs_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath9k_htc_priv *priv =
+ (struct ath9k_htc_priv *) file->private_data;
+ struct ath9k_htc_target_stats cmd_rsp;
+ char buf[512];
+ unsigned int len = 0;
+ int ret = 0;
+
+ memset(&cmd_rsp, 0, sizeof(cmd_rsp));
+
+ WMI_CMD(WMI_TGT_STATS_CMDID);
+ if (ret)
+ return -EINVAL;
+
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%19s : %10u\n", "TX Short Retries",
+ be32_to_cpu(cmd_rsp.tx_shortretry));
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%19s : %10u\n", "TX Long Retries",
+ be32_to_cpu(cmd_rsp.tx_longretry));
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%19s : %10u\n", "TX Xretries",
+ be32_to_cpu(cmd_rsp.tx_xretries));
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%19s : %10u\n", "TX Unaggr. Xretries",
+ be32_to_cpu(cmd_rsp.ht_txunaggr_xretry));
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%19s : %10u\n", "TX Xretries (HT)",
+ be32_to_cpu(cmd_rsp.ht_tx_xretries));
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%19s : %10u\n", "TX Rate", priv->debug.txrate);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_tgt_stats = {
+ .read = read_file_tgt_stats,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
+static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath9k_htc_priv *priv =
+ (struct ath9k_htc_priv *) file->private_data;
+ char buf[512];
+ unsigned int len = 0;
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Buffers queued",
+ priv->debug.tx_stats.buf_queued);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Buffers completed",
+ priv->debug.tx_stats.buf_completed);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs queued",
+ priv->debug.tx_stats.skb_queued);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs completed",
+ priv->debug.tx_stats.skb_completed);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs dropped",
+ priv->debug.tx_stats.skb_dropped);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_xmit = {
+ .read = read_file_xmit,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
+static ssize_t read_file_recv(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath9k_htc_priv *priv =
+ (struct ath9k_htc_priv *) file->private_data;
+ char buf[512];
+ unsigned int len = 0;
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs allocated",
+ priv->debug.rx_stats.skb_allocated);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs completed",
+ priv->debug.rx_stats.skb_completed);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs Dropped",
+ priv->debug.rx_stats.skb_dropped);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_recv = {
+ .read = read_file_recv,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
+int ath9k_htc_init_debug(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ if (!ath9k_debugfs_root)
+ return -ENOENT;
+
+ priv->debug.debugfs_phy = debugfs_create_dir(wiphy_name(priv->hw->wiphy),
+ ath9k_debugfs_root);
+ if (!priv->debug.debugfs_phy)
+ goto err;
+
+ priv->debug.debugfs_tgt_stats = debugfs_create_file("tgt_stats", S_IRUSR,
+ priv->debug.debugfs_phy,
+ priv, &fops_tgt_stats);
+ if (!priv->debug.debugfs_tgt_stats)
+ goto err;
+
+
+ priv->debug.debugfs_xmit = debugfs_create_file("xmit", S_IRUSR,
+ priv->debug.debugfs_phy,
+ priv, &fops_xmit);
+ if (!priv->debug.debugfs_xmit)
+ goto err;
+
+ priv->debug.debugfs_recv = debugfs_create_file("recv", S_IRUSR,
+ priv->debug.debugfs_phy,
+ priv, &fops_recv);
+ if (!priv->debug.debugfs_recv)
+ goto err;
+
+ return 0;
+
+err:
+ ath9k_htc_exit_debug(ah);
+ return -ENOMEM;
+}
+
+void ath9k_htc_exit_debug(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ debugfs_remove(priv->debug.debugfs_recv);
+ debugfs_remove(priv->debug.debugfs_xmit);
+ debugfs_remove(priv->debug.debugfs_tgt_stats);
+ debugfs_remove(priv->debug.debugfs_phy);
+}
+
+int ath9k_htc_debug_create_root(void)
+{
+ ath9k_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (!ath9k_debugfs_root)
+ return -ENOENT;
+
+ return 0;
+}
+
+void ath9k_htc_debug_remove_root(void)
+{
+ debugfs_remove(ath9k_debugfs_root);
+ ath9k_debugfs_root = NULL;
+}
+
+#endif /* CONFIG_ATH9K_HTC_DEBUGFS */
+
+/*******/
+/* ANI */
+/*******/
+
+static void ath_start_ani(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ unsigned long timestamp = jiffies_to_msecs(jiffies);
+
+ common->ani.longcal_timer = timestamp;
+ common->ani.shortcal_timer = timestamp;
+ common->ani.checkani_timer = timestamp;
+
+ ieee80211_queue_delayed_work(common->hw, &priv->ath9k_ani_work,
+ msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
+}
+
+void ath9k_ani_work(struct work_struct *work)
+{
+ struct ath9k_htc_priv *priv =
+ container_of(work, struct ath9k_htc_priv,
+ ath9k_ani_work.work);
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ bool longcal = false;
+ bool shortcal = false;
+ bool aniflag = false;
+ unsigned int timestamp = jiffies_to_msecs(jiffies);
+ u32 cal_interval, short_cal_interval;
+
+ short_cal_interval = ATH_STA_SHORT_CALINTERVAL;
+
+ /* Only calibrate if awake */
+ if (ah->power_mode != ATH9K_PM_AWAKE)
+ goto set_timer;
+
+ /* Long calibration runs independently of short calibration. */
+ if ((timestamp - common->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) {
+ longcal = true;
+ ath_print(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
+ common->ani.longcal_timer = timestamp;
+ }
+
+ /* Short calibration applies only while caldone is false */
+ if (!common->ani.caldone) {
+ if ((timestamp - common->ani.shortcal_timer) >=
+ short_cal_interval) {
+ shortcal = true;
+ ath_print(common, ATH_DBG_ANI,
+ "shortcal @%lu\n", jiffies);
+ common->ani.shortcal_timer = timestamp;
+ common->ani.resetcal_timer = timestamp;
+ }
+ } else {
+ if ((timestamp - common->ani.resetcal_timer) >=
+ ATH_RESTART_CALINTERVAL) {
+ common->ani.caldone = ath9k_hw_reset_calvalid(ah);
+ if (common->ani.caldone)
+ common->ani.resetcal_timer = timestamp;
+ }
+ }
+
+ /* Verify whether we must check ANI */
+ if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
+ aniflag = true;
+ common->ani.checkani_timer = timestamp;
+ }
+
+ /* Skip all processing if there's nothing to do. */
+ if (longcal || shortcal || aniflag) {
+
+ ath9k_htc_ps_wakeup(priv);
+
+ /* Call ANI routine if necessary */
+ if (aniflag)
+ ath9k_hw_ani_monitor(ah, ah->curchan);
+
+ /* Perform calibration if necessary */
+ if (longcal || shortcal) {
+ common->ani.caldone =
+ ath9k_hw_calibrate(ah, ah->curchan,
+ common->rx_chainmask,
+ longcal);
+
+ if (longcal)
+ common->ani.noise_floor =
+ ath9k_hw_getchan_noise(ah, ah->curchan);
+
+ ath_print(common, ATH_DBG_ANI,
+ " calibrate chan %u/%x nf: %d\n",
+ ah->curchan->channel,
+ ah->curchan->channelFlags,
+ common->ani.noise_floor);
+ }
+
+ ath9k_htc_ps_restore(priv);
+ }
+
+set_timer:
+ /*
+ * Set timer interval based on previous results.
+ * The interval must be the shortest necessary to satisfy ANI,
+ * short calibration and long calibration.
+ */
+ cal_interval = ATH_LONG_CALINTERVAL;
+ if (priv->ah->config.enable_ani)
+ cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
+ if (!common->ani.caldone)
+ cal_interval = min(cal_interval, (u32)short_cal_interval);
+
+ ieee80211_queue_delayed_work(common->hw, &priv->ath9k_ani_work,
+ msecs_to_jiffies(cal_interval));
+}
+
+/*******/
+/* LED */
+/*******/
+
+static void ath9k_led_blink_work(struct work_struct *work)
+{
+ struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv,
+ ath9k_led_blink_work.work);
+
+ if (!(priv->op_flags & OP_LED_ASSOCIATED))
+ return;
+
+ if ((priv->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
+ (priv->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 0);
+ else
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin,
+ (priv->op_flags & OP_LED_ON) ? 1 : 0);
+
+ ieee80211_queue_delayed_work(priv->hw,
+ &priv->ath9k_led_blink_work,
+ (priv->op_flags & OP_LED_ON) ?
+ msecs_to_jiffies(priv->led_off_duration) :
+ msecs_to_jiffies(priv->led_on_duration));
+
+ priv->led_on_duration = priv->led_on_cnt ?
+ max((ATH_LED_ON_DURATION_IDLE - priv->led_on_cnt), 25) :
+ ATH_LED_ON_DURATION_IDLE;
+ priv->led_off_duration = priv->led_off_cnt ?
+ max((ATH_LED_OFF_DURATION_IDLE - priv->led_off_cnt), 10) :
+ ATH_LED_OFF_DURATION_IDLE;
+ priv->led_on_cnt = priv->led_off_cnt = 0;
+
+ if (priv->op_flags & OP_LED_ON)
+ priv->op_flags &= ~OP_LED_ON;
+ else
+ priv->op_flags |= OP_LED_ON;
+}
+
+static void ath9k_led_brightness_work(struct work_struct *work)
+{
+ struct ath_led *led = container_of(work, struct ath_led,
+ brightness_work.work);
+ struct ath9k_htc_priv *priv = led->priv;
+
+ switch (led->brightness) {
+ case LED_OFF:
+ if (led->led_type == ATH_LED_ASSOC ||
+ led->led_type == ATH_LED_RADIO) {
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin,
+ (led->led_type == ATH_LED_RADIO));
+ priv->op_flags &= ~OP_LED_ASSOCIATED;
+ if (led->led_type == ATH_LED_RADIO)
+ priv->op_flags &= ~OP_LED_ON;
+ } else {
+ priv->led_off_cnt++;
+ }
+ break;
+ case LED_FULL:
+ if (led->led_type == ATH_LED_ASSOC) {
+ priv->op_flags |= OP_LED_ASSOCIATED;
+ ieee80211_queue_delayed_work(priv->hw,
+ &priv->ath9k_led_blink_work, 0);
+ } else if (led->led_type == ATH_LED_RADIO) {
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 0);
+ priv->op_flags |= OP_LED_ON;
+ } else {
+ priv->led_on_cnt++;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void ath9k_led_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
+ struct ath9k_htc_priv *priv = led->priv;
+
+ led->brightness = brightness;
+ if (!(priv->op_flags & OP_LED_DEINIT))
+ ieee80211_queue_delayed_work(priv->hw,
+ &led->brightness_work, 0);
+}
+
+static void ath9k_led_stop_brightness(struct ath9k_htc_priv *priv)
+{
+ cancel_delayed_work_sync(&priv->radio_led.brightness_work);
+ cancel_delayed_work_sync(&priv->assoc_led.brightness_work);
+ cancel_delayed_work_sync(&priv->tx_led.brightness_work);
+ cancel_delayed_work_sync(&priv->rx_led.brightness_work);
+}
+
+static int ath9k_register_led(struct ath9k_htc_priv *priv, struct ath_led *led,
+ char *trigger)
+{
+ int ret;
+
+ led->priv = priv;
+ led->led_cdev.name = led->name;
+ led->led_cdev.default_trigger = trigger;
+ led->led_cdev.brightness_set = ath9k_led_brightness;
+
+ ret = led_classdev_register(wiphy_dev(priv->hw->wiphy), &led->led_cdev);
+ if (ret)
+ ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
+ "Failed to register led:%s", led->name);
+ else
+ led->registered = 1;
+
+ INIT_DELAYED_WORK(&led->brightness_work, ath9k_led_brightness_work);
+
+ return ret;
+}
+
+static void ath9k_unregister_led(struct ath_led *led)
+{
+ if (led->registered) {
+ led_classdev_unregister(&led->led_cdev);
+ led->registered = 0;
+ }
+}
+
+void ath9k_deinit_leds(struct ath9k_htc_priv *priv)
+{
+ priv->op_flags |= OP_LED_DEINIT;
+ ath9k_unregister_led(&priv->assoc_led);
+ priv->op_flags &= ~OP_LED_ASSOCIATED;
+ ath9k_unregister_led(&priv->tx_led);
+ ath9k_unregister_led(&priv->rx_led);
+ ath9k_unregister_led(&priv->radio_led);
+}
+
+void ath9k_init_leds(struct ath9k_htc_priv *priv)
+{
+ char *trigger;
+ int ret;
+
+ if (AR_SREV_9287(priv->ah))
+ priv->ah->led_pin = ATH_LED_PIN_9287;
+ else if (AR_SREV_9271(priv->ah))
+ priv->ah->led_pin = ATH_LED_PIN_9271;
+ else
+ priv->ah->led_pin = ATH_LED_PIN_DEF;
+
+ /* Configure gpio 1 for output */
+ ath9k_hw_cfg_output(priv->ah, priv->ah->led_pin,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ /* LED off, active low */
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1);
+
+ INIT_DELAYED_WORK(&priv->ath9k_led_blink_work, ath9k_led_blink_work);
+
+ trigger = ieee80211_get_radio_led_name(priv->hw);
+ snprintf(priv->radio_led.name, sizeof(priv->radio_led.name),
+ "ath9k-%s::radio", wiphy_name(priv->hw->wiphy));
+ ret = ath9k_register_led(priv, &priv->radio_led, trigger);
+ priv->radio_led.led_type = ATH_LED_RADIO;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_assoc_led_name(priv->hw);
+ snprintf(priv->assoc_led.name, sizeof(priv->assoc_led.name),
+ "ath9k-%s::assoc", wiphy_name(priv->hw->wiphy));
+ ret = ath9k_register_led(priv, &priv->assoc_led, trigger);
+ priv->assoc_led.led_type = ATH_LED_ASSOC;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_tx_led_name(priv->hw);
+ snprintf(priv->tx_led.name, sizeof(priv->tx_led.name),
+ "ath9k-%s::tx", wiphy_name(priv->hw->wiphy));
+ ret = ath9k_register_led(priv, &priv->tx_led, trigger);
+ priv->tx_led.led_type = ATH_LED_TX;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_rx_led_name(priv->hw);
+ snprintf(priv->rx_led.name, sizeof(priv->rx_led.name),
+ "ath9k-%s::rx", wiphy_name(priv->hw->wiphy));
+ ret = ath9k_register_led(priv, &priv->rx_led, trigger);
+ priv->rx_led.led_type = ATH_LED_RX;
+ if (ret)
+ goto fail;
+
+ priv->op_flags &= ~OP_LED_DEINIT;
+
+ return;
+
+fail:
+ cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
+ ath9k_deinit_leds(priv);
+}
+
+/*******************/
+/* Rfkill */
+/*******************/
+
+static bool ath_is_rfkill_set(struct ath9k_htc_priv *priv)
+{
+ return ath9k_hw_gpio_get(priv->ah, priv->ah->rfkill_gpio) ==
+ priv->ah->rfkill_polarity;
+}
+
+static void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ bool blocked = !!ath_is_rfkill_set(priv);
+
+ wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
+}
+
+void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv)
+{
+ if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
+ wiphy_rfkill_start_polling(priv->hw->wiphy);
+}
+
+/**********************/
+/* mac80211 Callbacks */
+/**********************/
+
+static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ struct ath9k_htc_priv *priv = hw->priv;
+ int padpos, padsize, ret;
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+
+ /* Add the padding after the header if this is not already done */
+ padpos = ath9k_cmn_padpos(hdr->frame_control);
+ padsize = padpos & 3;
+ if (padsize && skb->len > padpos) {
+ if (skb_headroom(skb) < padsize)
+ return -1;
+ skb_push(skb, padsize);
+ memmove(skb->data, skb->data + padsize, padpos);
+ }
+
+ ret = ath9k_htc_tx_start(priv, skb);
+ if (ret != 0) {
+ if (ret == -ENOMEM) {
+ ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
+ "Stopping TX queues\n");
+ ieee80211_stop_queues(hw);
+ spin_lock_bh(&priv->tx_lock);
+ priv->tx_queues_stop = true;
+ spin_unlock_bh(&priv->tx_lock);
+ } else {
+ ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
+ "Tx failed");
+ }
+ goto fail_tx;
+ }
+
+ return 0;
+
+fail_tx:
+ dev_kfree_skb_any(skb);
+ return 0;
+}
+
+static int ath9k_htc_radio_enable(struct ieee80211_hw *hw, bool led)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_channel *curchan = hw->conf.channel;
+ struct ath9k_channel *init_channel;
+ int ret = 0;
+ enum htc_phymode mode;
+ __be16 htc_mode;
+ u8 cmd_rsp;
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "Starting driver with initial channel: %d MHz\n",
+ curchan->center_freq);
+
+ /* setup initial channel */
+ init_channel = ath9k_cmn_get_curchannel(hw, ah);
+
+ /* Reset SERDES registers */
+ ath9k_hw_configpcipowersave(ah, 0, 0);
+
+ ath9k_hw_htc_resetinit(ah);
+ ret = ath9k_hw_reset(ah, init_channel, false);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to reset hardware; reset status %d "
+ "(freq %u MHz)\n", ret, curchan->center_freq);
+ return ret;
+ }
+
+ ath_update_txpow(priv);
+
+ mode = ath9k_htc_get_curmode(priv, init_channel);
+ htc_mode = cpu_to_be16(mode);
+ WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode);
+ WMI_CMD(WMI_ATH_INIT_CMDID);
+ WMI_CMD(WMI_START_RECV_CMDID);
+
+ ath9k_host_rx_init(priv);
+
+ priv->op_flags &= ~OP_INVALID;
+ htc_start(priv->htc);
+
+ spin_lock_bh(&priv->tx_lock);
+ priv->tx_queues_stop = false;
+ spin_unlock_bh(&priv->tx_lock);
+
+ if (led) {
+ /* Enable LED */
+ ath9k_hw_cfg_output(ah, ah->led_pin,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ ath9k_hw_set_gpio(ah, ah->led_pin, 0);
+ }
+
+ ieee80211_wake_queues(hw);
+
+ return ret;
+}
+
+static int ath9k_htc_start(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ int ret = 0;
+
+ mutex_lock(&priv->mutex);
+ ret = ath9k_htc_radio_enable(hw, false);
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+static void ath9k_htc_radio_disable(struct ieee80211_hw *hw, bool led)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ int ret = 0;
+ u8 cmd_rsp;
+
+ if (priv->op_flags & OP_INVALID) {
+ ath_print(common, ATH_DBG_ANY, "Device not present\n");
+ return;
+ }
+
+ if (led) {
+ /* Disable LED */
+ ath9k_hw_set_gpio(ah, ah->led_pin, 1);
+ ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
+ }
+
+ /* Cancel all the running timers/work .. */
+ cancel_work_sync(&priv->ps_work);
+ cancel_delayed_work_sync(&priv->ath9k_ani_work);
+ cancel_delayed_work_sync(&priv->ath9k_aggr_work);
+ cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
+ ath9k_led_stop_brightness(priv);
+
+ ath9k_htc_ps_wakeup(priv);
+ htc_stop(priv->htc);
+ WMI_CMD(WMI_DISABLE_INTR_CMDID);
+ WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
+ WMI_CMD(WMI_STOP_RECV_CMDID);
+ ath9k_hw_phy_disable(ah);
+ ath9k_hw_disable(ah);
+ ath9k_hw_configpcipowersave(ah, 1, 1);
+ ath9k_htc_ps_restore(priv);
+ ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
+
+ skb_queue_purge(&priv->tx_queue);
+
+ /* Remove monitor interface here */
+ if (ah->opmode == NL80211_IFTYPE_MONITOR) {
+ if (ath9k_htc_remove_monitor_interface(priv))
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to remove monitor interface\n");
+ else
+ ath_print(common, ATH_DBG_CONFIG,
+ "Monitor interface removed\n");
+ }
+
+ priv->op_flags |= OP_INVALID;
+
+ ath_print(common, ATH_DBG_CONFIG, "Driver halt\n");
+}
+
+static void ath9k_htc_stop(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ mutex_lock(&priv->mutex);
+ ath9k_htc_radio_disable(hw, false);
+ mutex_unlock(&priv->mutex);
+}
+
+
+static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath9k_htc_vif *avp = (void *)vif->drv_priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_vif hvif;
+ int ret = 0;
+ u8 cmd_rsp;
+
+ mutex_lock(&priv->mutex);
+
+ /* Only one interface for now */
+ if (priv->nvifs > 0) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ ath9k_htc_ps_wakeup(priv);
+ memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
+ memcpy(&hvif.myaddr, vif->addr, ETH_ALEN);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ hvif.opmode = cpu_to_be32(HTC_M_STA);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ hvif.opmode = cpu_to_be32(HTC_M_IBSS);
+ break;
+ default:
+ ath_print(common, ATH_DBG_FATAL,
+ "Interface type %d not yet supported\n", vif->type);
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "Attach a VIF of type: %d\n", vif->type);
+
+ priv->ah->opmode = vif->type;
+
+ /* Index starts from zero on the target */
+ avp->index = hvif.index = priv->nvifs;
+ hvif.rtsthreshold = cpu_to_be16(2304);
+ WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
+ if (ret)
+ goto out;
+
+ priv->nvifs++;
+
+ /*
+ * We need a node in target to tx mgmt frames
+ * before association.
+ */
+ ret = ath9k_htc_add_station(priv, vif, NULL);
+ if (ret)
+ goto out;
+
+ ret = ath9k_htc_update_cap_target(priv);
+ if (ret)
+ ath_print(common, ATH_DBG_CONFIG, "Failed to update"
+ " capability in target \n");
+
+ priv->vif = vif;
+out:
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+ return ret;
+}
+
+static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_vif *avp = (void *)vif->drv_priv;
+ struct ath9k_htc_target_vif hvif;
+ int ret = 0;
+ u8 cmd_rsp;
+
+ ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n");
+
+ mutex_lock(&priv->mutex);
+
+ memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
+ memcpy(&hvif.myaddr, vif->addr, ETH_ALEN);
+ hvif.index = avp->index;
+ WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
+ priv->nvifs--;
+
+ ath9k_htc_remove_station(priv, vif, NULL);
+ priv->vif = NULL;
+
+ mutex_unlock(&priv->mutex);
+}
+
+static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ieee80211_conf *conf = &hw->conf;
+
+ mutex_lock(&priv->mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+ bool enable_radio = false;
+ bool idle = !!(conf->flags & IEEE80211_CONF_IDLE);
+
+ if (!idle && priv->ps_idle)
+ enable_radio = true;
+
+ priv->ps_idle = idle;
+
+ if (enable_radio) {
+ ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
+ ath9k_htc_radio_enable(hw, true);
+ ath_print(common, ATH_DBG_CONFIG,
+ "not-idle: enabling radio\n");
+ }
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ struct ieee80211_channel *curchan = hw->conf.channel;
+ int pos = curchan->hw_value;
+ bool is_cw40 = false;
+
+ ath_print(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
+ curchan->center_freq);
+
+ if (check_rc_update(hw, &is_cw40))
+ ath9k_htc_rc_update(priv, is_cw40);
+
+ ath9k_cmn_update_ichannel(hw, &priv->ah->channels[pos]);
+
+ if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to set channel\n");
+ mutex_unlock(&priv->mutex);
+ return -EINVAL;
+ }
+
+ }
+ if (changed & IEEE80211_CONF_CHANGE_PS) {
+ if (conf->flags & IEEE80211_CONF_PS) {
+ ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP);
+ priv->ps_enabled = true;
+ } else {
+ priv->ps_enabled = false;
+ cancel_work_sync(&priv->ps_work);
+ ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
+ }
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (conf->flags & IEEE80211_CONF_MONITOR) {
+ if (ath9k_htc_add_monitor_interface(priv))
+ ath_print(common, ATH_DBG_FATAL,
+ "Failed to set monitor mode\n");
+ else
+ ath_print(common, ATH_DBG_CONFIG,
+ "HW opmode set to Monitor mode\n");
+ }
+ }
+
+ if (priv->ps_idle) {
+ ath_print(common, ATH_DBG_CONFIG,
+ "idle: disabling radio\n");
+ ath9k_htc_radio_disable(hw, true);
+ }
+
+ mutex_unlock(&priv->mutex);
+
+ return 0;
+}
+
+#define SUPPORTED_FILTERS \
+ (FIF_PROMISC_IN_BSS | \
+ FIF_ALLMULTI | \
+ FIF_CONTROL | \
+ FIF_PSPOLL | \
+ FIF_OTHER_BSS | \
+ FIF_BCN_PRBRESP_PROMISC | \
+ FIF_FCSFAIL)
+
+static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ u32 rfilt;
+
+ mutex_lock(&priv->mutex);
+
+ ath9k_htc_ps_wakeup(priv);
+ changed_flags &= SUPPORTED_FILTERS;
+ *total_flags &= SUPPORTED_FILTERS;
+
+ priv->rxfilter = *total_flags;
+ rfilt = ath9k_htc_calcrxfilter(priv);
+ ath9k_hw_setrxfilter(priv->ah, rfilt);
+
+ ath_print(ath9k_hw_common(priv->ah), ATH_DBG_CONFIG,
+ "Set HW RX filter: 0x%x\n", rfilt);
+
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+static void ath9k_htc_sta_notify(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd,
+ struct ieee80211_sta *sta)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ int ret;
+
+ mutex_lock(&priv->mutex);
+
+ switch (cmd) {
+ case STA_NOTIFY_ADD:
+ ret = ath9k_htc_add_station(priv, vif, sta);
+ if (!ret)
+ ath9k_htc_init_rate(priv, vif, sta);
+ break;
+ case STA_NOTIFY_REMOVE:
+ ath9k_htc_remove_station(priv, vif, sta);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&priv->mutex);
+}
+
+static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_tx_queue_info qi;
+ int ret = 0, qnum;
+
+ if (queue >= WME_NUM_AC)
+ return 0;
+
+ mutex_lock(&priv->mutex);
+
+ memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
+
+ qi.tqi_aifs = params->aifs;
+ qi.tqi_cwmin = params->cw_min;
+ qi.tqi_cwmax = params->cw_max;
+ qi.tqi_burstTime = params->txop;
+
+ qnum = get_hw_qnum(queue, priv->hwq_map);
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "Configure tx [queue/hwq] [%d/%d], "
+ "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
+ queue, qnum, params->aifs, params->cw_min,
+ params->cw_max, params->txop);
+
+ ret = ath_htc_txq_update(priv, qnum, &qi);
+ if (ret)
+ ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n");
+
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+static int ath9k_htc_set_key(struct ieee80211_hw *hw,
+ enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ int ret = 0;
+
+ if (htc_modparam_nohwcrypt)
+ return -ENOSPC;
+
+ mutex_lock(&priv->mutex);
+ ath_print(common, ATH_DBG_CONFIG, "Set HW Key\n");
+ ath9k_htc_ps_wakeup(priv);
+
+ switch (cmd) {
+ case SET_KEY:
+ ret = ath9k_cmn_key_config(common, vif, sta, key);
+ if (ret >= 0) {
+ key->hw_key_idx = ret;
+ /* push IV and Michael MIC generation to stack */
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ if (key->alg == ALG_TKIP)
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ if (priv->ah->sw_mgmt_crypto && key->alg == ALG_CCMP)
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
+ ret = 0;
+ }
+ break;
+ case DISABLE_KEY:
+ ath9k_cmn_key_delete(common, key);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ common->curaid = bss_conf->assoc ?
+ bss_conf->aid : 0;
+ ath_print(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
+ bss_conf->assoc);
+
+ if (bss_conf->assoc) {
+ priv->op_flags |= OP_ASSOCIATED;
+ ath_start_ani(priv);
+ } else {
+ priv->op_flags &= ~OP_ASSOCIATED;
+ cancel_work_sync(&priv->ps_work);
+ cancel_delayed_work_sync(&priv->ath9k_ani_work);
+ }
+ }
+
+ if (changed & BSS_CHANGED_BSSID) {
+ /* Set BSSID */
+ memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
+ ath9k_hw_write_associd(ah);
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "BSSID: %pM aid: 0x%x\n",
+ common->curbssid, common->curaid);
+ }
+
+ if ((changed & BSS_CHANGED_BEACON_INT) ||
+ (changed & BSS_CHANGED_BEACON) ||
+ ((changed & BSS_CHANGED_BEACON_ENABLED) &&
+ bss_conf->enable_beacon)) {
+ priv->op_flags |= OP_ENABLE_BEACON;
+ ath9k_htc_beacon_config(priv, vif);
+ }
+
+ if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
+ !bss_conf->enable_beacon) {
+ priv->op_flags &= ~OP_ENABLE_BEACON;
+ ath9k_htc_beacon_config(priv, vif);
+ }
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ ath_print(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
+ bss_conf->use_short_preamble);
+ if (bss_conf->use_short_preamble)
+ priv->op_flags |= OP_PREAMBLE_SHORT;
+ else
+ priv->op_flags &= ~OP_PREAMBLE_SHORT;
+ }
+
+ if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+ ath_print(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
+ bss_conf->use_cts_prot);
+ if (bss_conf->use_cts_prot &&
+ hw->conf.channel->band != IEEE80211_BAND_5GHZ)
+ priv->op_flags |= OP_PROTECT_ENABLE;
+ else
+ priv->op_flags &= ~OP_PROTECT_ENABLE;
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ if (bss_conf->use_short_slot)
+ ah->slottime = 9;
+ else
+ ah->slottime = 20;
+
+ ath9k_hw_init_global_settings(ah);
+ }
+
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+static u64 ath9k_htc_get_tsf(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ u64 tsf;
+
+ mutex_lock(&priv->mutex);
+ tsf = ath9k_hw_gettsf64(priv->ah);
+ mutex_unlock(&priv->mutex);
+
+ return tsf;
+}
+
+static void ath9k_htc_set_tsf(struct ieee80211_hw *hw, u64 tsf)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ mutex_lock(&priv->mutex);
+ ath9k_hw_settsf64(priv->ah, tsf);
+ mutex_unlock(&priv->mutex);
+}
+
+static void ath9k_htc_reset_tsf(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ ath9k_htc_ps_wakeup(priv);
+ mutex_lock(&priv->mutex);
+ ath9k_hw_reset_tsf(priv->ah);
+ mutex_unlock(&priv->mutex);
+ ath9k_htc_ps_restore(priv);
+}
+
+static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta,
+ u16 tid, u16 *ssn)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath9k_htc_aggr_work *work = &priv->aggr_work;
+ struct ath9k_htc_sta *ista;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ case IEEE80211_AMPDU_TX_STOP:
+ if (!(priv->op_flags & OP_TXAGGR))
+ return -ENOTSUPP;
+ memcpy(work->sta_addr, sta->addr, ETH_ALEN);
+ work->hw = hw;
+ work->vif = vif;
+ work->action = action;
+ work->tid = tid;
+ ieee80211_queue_delayed_work(hw, &priv->ath9k_aggr_work, 0);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ ista->tid_state[tid] = AGGR_OPERATIONAL;
+ break;
+ default:
+ ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
+ "Unknown AMPDU action\n");
+ }
+
+ return 0;
+}
+
+static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ mutex_lock(&priv->mutex);
+ spin_lock_bh(&priv->beacon_lock);
+ priv->op_flags |= OP_SCANNING;
+ spin_unlock_bh(&priv->beacon_lock);
+ cancel_work_sync(&priv->ps_work);
+ cancel_delayed_work_sync(&priv->ath9k_ani_work);
+ mutex_unlock(&priv->mutex);
+}
+
+static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ ath9k_htc_ps_wakeup(priv);
+ mutex_lock(&priv->mutex);
+ spin_lock_bh(&priv->beacon_lock);
+ priv->op_flags &= ~OP_SCANNING;
+ spin_unlock_bh(&priv->beacon_lock);
+ priv->op_flags |= OP_FULL_RESET;
+ if (priv->op_flags & OP_ASSOCIATED)
+ ath9k_htc_beacon_config(priv, priv->vif);
+ ath_start_ani(priv);
+ mutex_unlock(&priv->mutex);
+ ath9k_htc_ps_restore(priv);
+}
+
+static int ath9k_htc_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ return 0;
+}
+
+static void ath9k_htc_set_coverage_class(struct ieee80211_hw *hw,
+ u8 coverage_class)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ mutex_lock(&priv->mutex);
+ priv->ah->coverage_class = coverage_class;
+ ath9k_hw_init_global_settings(priv->ah);
+ mutex_unlock(&priv->mutex);
+}
+
+struct ieee80211_ops ath9k_htc_ops = {
+ .tx = ath9k_htc_tx,
+ .start = ath9k_htc_start,
+ .stop = ath9k_htc_stop,
+ .add_interface = ath9k_htc_add_interface,
+ .remove_interface = ath9k_htc_remove_interface,
+ .config = ath9k_htc_config,
+ .configure_filter = ath9k_htc_configure_filter,
+ .sta_notify = ath9k_htc_sta_notify,
+ .conf_tx = ath9k_htc_conf_tx,
+ .bss_info_changed = ath9k_htc_bss_info_changed,
+ .set_key = ath9k_htc_set_key,
+ .get_tsf = ath9k_htc_get_tsf,
+ .set_tsf = ath9k_htc_set_tsf,
+ .reset_tsf = ath9k_htc_reset_tsf,
+ .ampdu_action = ath9k_htc_ampdu_action,
+ .sw_scan_start = ath9k_htc_sw_scan_start,
+ .sw_scan_complete = ath9k_htc_sw_scan_complete,
+ .set_rts_threshold = ath9k_htc_set_rts_threshold,
+ .rfkill_poll = ath9k_htc_rfkill_poll_state,
+ .set_coverage_class = ath9k_htc_set_coverage_class,
+};
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
new file mode 100644
index 00000000000..2571b443ac8
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -0,0 +1,707 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+/******/
+/* TX */
+/******/
+
+int get_hw_qnum(u16 queue, int *hwq_map)
+{
+ switch (queue) {
+ case 0:
+ return hwq_map[ATH9K_WME_AC_VO];
+ case 1:
+ return hwq_map[ATH9K_WME_AC_VI];
+ case 2:
+ return hwq_map[ATH9K_WME_AC_BE];
+ case 3:
+ return hwq_map[ATH9K_WME_AC_BK];
+ default:
+ return hwq_map[ATH9K_WME_AC_BE];
+ }
+}
+
+int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum,
+ struct ath9k_tx_queue_info *qinfo)
+{
+ struct ath_hw *ah = priv->ah;
+ int error = 0;
+ struct ath9k_tx_queue_info qi;
+
+ ath9k_hw_get_txq_props(ah, qnum, &qi);
+
+ qi.tqi_aifs = qinfo->tqi_aifs;
+ qi.tqi_cwmin = qinfo->tqi_cwmin / 2; /* XXX */
+ qi.tqi_cwmax = qinfo->tqi_cwmax;
+ qi.tqi_burstTime = qinfo->tqi_burstTime;
+ qi.tqi_readyTime = qinfo->tqi_readyTime;
+
+ if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
+ ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
+ "Unable to update hardware queue %u!\n", qnum);
+ error = -EIO;
+ } else {
+ ath9k_hw_resettxqueue(ah, qnum);
+ }
+
+ return error;
+}
+
+int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = tx_info->control.sta;
+ struct ath9k_htc_sta *ista;
+ struct ath9k_htc_vif *avp;
+ struct ath9k_htc_tx_ctl tx_ctl;
+ enum htc_endpoint_id epid;
+ u16 qnum, hw_qnum;
+ __le16 fc;
+ u8 *tx_fhdr;
+ u8 sta_idx;
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+ fc = hdr->frame_control;
+
+ avp = (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv;
+ if (sta) {
+ ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ sta_idx = ista->index;
+ } else {
+ sta_idx = 0;
+ }
+
+ memset(&tx_ctl, 0, sizeof(struct ath9k_htc_tx_ctl));
+
+ if (ieee80211_is_data(fc)) {
+ struct tx_frame_hdr tx_hdr;
+ u8 *qc;
+
+ memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr));
+
+ tx_hdr.node_idx = sta_idx;
+ tx_hdr.vif_idx = avp->index;
+
+ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
+ tx_ctl.type = ATH9K_HTC_AMPDU;
+ tx_hdr.data_type = ATH9K_HTC_AMPDU;
+ } else {
+ tx_ctl.type = ATH9K_HTC_NORMAL;
+ tx_hdr.data_type = ATH9K_HTC_NORMAL;
+ }
+
+ if (ieee80211_is_data(fc)) {
+ qc = ieee80211_get_qos_ctl(hdr);
+ tx_hdr.tidno = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ }
+
+ /* Check for RTS protection */
+ if (priv->hw->wiphy->rts_threshold != (u32) -1)
+ if (skb->len > priv->hw->wiphy->rts_threshold)
+ tx_hdr.flags |= ATH9K_HTC_TX_RTSCTS;
+
+ /* CTS-to-self */
+ if (!(tx_hdr.flags & ATH9K_HTC_TX_RTSCTS) &&
+ (priv->op_flags & OP_PROTECT_ENABLE))
+ tx_hdr.flags |= ATH9K_HTC_TX_CTSONLY;
+
+ tx_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
+ if (tx_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
+ tx_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
+ else
+ tx_hdr.keyix = tx_info->control.hw_key->hw_key_idx;
+
+ tx_fhdr = skb_push(skb, sizeof(tx_hdr));
+ memcpy(tx_fhdr, (u8 *) &tx_hdr, sizeof(tx_hdr));
+
+ qnum = skb_get_queue_mapping(skb);
+ hw_qnum = get_hw_qnum(qnum, priv->hwq_map);
+
+ switch (hw_qnum) {
+ case 0:
+ epid = priv->data_be_ep;
+ break;
+ case 2:
+ epid = priv->data_vi_ep;
+ break;
+ case 3:
+ epid = priv->data_vo_ep;
+ break;
+ case 1:
+ default:
+ epid = priv->data_bk_ep;
+ break;
+ }
+ } else {
+ struct tx_mgmt_hdr mgmt_hdr;
+
+ memset(&mgmt_hdr, 0, sizeof(struct tx_mgmt_hdr));
+
+ tx_ctl.type = ATH9K_HTC_NORMAL;
+
+ mgmt_hdr.node_idx = sta_idx;
+ mgmt_hdr.vif_idx = avp->index;
+ mgmt_hdr.tidno = 0;
+ mgmt_hdr.flags = 0;
+
+ mgmt_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
+ if (mgmt_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
+ mgmt_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
+ else
+ mgmt_hdr.keyix = tx_info->control.hw_key->hw_key_idx;
+
+ tx_fhdr = skb_push(skb, sizeof(mgmt_hdr));
+ memcpy(tx_fhdr, (u8 *) &mgmt_hdr, sizeof(mgmt_hdr));
+ epid = priv->mgmt_ep;
+ }
+
+ return htc_send(priv->htc, skb, epid, &tx_ctl);
+}
+
+void ath9k_tx_tasklet(unsigned long data)
+{
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
+ struct ieee80211_sta *sta;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_info *tx_info;
+ struct sk_buff *skb = NULL;
+ __le16 fc;
+
+ while ((skb = skb_dequeue(&priv->tx_queue)) != NULL) {
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+ fc = hdr->frame_control;
+ tx_info = IEEE80211_SKB_CB(skb);
+
+ memset(&tx_info->status, 0, sizeof(tx_info->status));
+
+ rcu_read_lock();
+
+ sta = ieee80211_find_sta(priv->vif, hdr->addr1);
+ if (!sta) {
+ rcu_read_unlock();
+ ieee80211_tx_status(priv->hw, skb);
+ continue;
+ }
+
+ /* Check if we need to start aggregation */
+
+ if (sta && conf_is_ht(&priv->hw->conf) &&
+ (priv->op_flags & OP_TXAGGR)
+ && !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
+ if (ieee80211_is_data_qos(fc)) {
+ u8 *qc, tid;
+ struct ath9k_htc_sta *ista;
+
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & 0xf;
+ ista = (struct ath9k_htc_sta *)sta->drv_priv;
+
+ if ((tid < ATH9K_HTC_MAX_TID) &&
+ ista->tid_state[tid] == AGGR_STOP) {
+ ieee80211_start_tx_ba_session(sta, tid);
+ ista->tid_state[tid] = AGGR_PROGRESS;
+ }
+ }
+ }
+
+ rcu_read_unlock();
+
+ /* Send status to mac80211 */
+ ieee80211_tx_status(priv->hw, skb);
+ }
+
+ /* Wake TX queues if needed */
+ spin_lock_bh(&priv->tx_lock);
+ if (priv->tx_queues_stop) {
+ priv->tx_queues_stop = false;
+ spin_unlock_bh(&priv->tx_lock);
+ ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
+ "Waking up TX queues\n");
+ ieee80211_wake_queues(priv->hw);
+ return;
+ }
+ spin_unlock_bh(&priv->tx_lock);
+}
+
+void ath9k_htc_txep(void *drv_priv, struct sk_buff *skb,
+ enum htc_endpoint_id ep_id, bool txok)
+{
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) drv_priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ieee80211_tx_info *tx_info;
+
+ if (!skb)
+ return;
+
+ if (ep_id == priv->mgmt_ep) {
+ skb_pull(skb, sizeof(struct tx_mgmt_hdr));
+ } else if ((ep_id == priv->data_bk_ep) ||
+ (ep_id == priv->data_be_ep) ||
+ (ep_id == priv->data_vi_ep) ||
+ (ep_id == priv->data_vo_ep)) {
+ skb_pull(skb, sizeof(struct tx_frame_hdr));
+ } else {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unsupported TX EPID: %d\n", ep_id);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ tx_info = IEEE80211_SKB_CB(skb);
+
+ if (txok)
+ tx_info->flags |= IEEE80211_TX_STAT_ACK;
+
+ skb_queue_tail(&priv->tx_queue, skb);
+ tasklet_schedule(&priv->tx_tasklet);
+}
+
+int ath9k_tx_init(struct ath9k_htc_priv *priv)
+{
+ skb_queue_head_init(&priv->tx_queue);
+ return 0;
+}
+
+void ath9k_tx_cleanup(struct ath9k_htc_priv *priv)
+{
+
+}
+
+bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv,
+ enum ath9k_tx_queue_subtype subtype)
+{
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_tx_queue_info qi;
+ int qnum;
+
+ memset(&qi, 0, sizeof(qi));
+
+ qi.tqi_subtype = subtype;
+ qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
+ qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
+ qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
+ qi.tqi_physCompBuf = 0;
+ qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | TXQ_FLAG_TXDESCINT_ENABLE;
+
+ qnum = ath9k_hw_setuptxqueue(priv->ah, ATH9K_TX_QUEUE_DATA, &qi);
+ if (qnum == -1)
+ return false;
+
+ if (qnum >= ARRAY_SIZE(priv->hwq_map)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "qnum %u out of range, max %u!\n",
+ qnum, (unsigned int)ARRAY_SIZE(priv->hwq_map));
+ ath9k_hw_releasetxqueue(ah, qnum);
+ return false;
+ }
+
+ priv->hwq_map[subtype] = qnum;
+ return true;
+}
+
+/******/
+/* RX */
+/******/
+
+/*
+ * Calculate the RX filter to be set in the HW.
+ */
+u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
+{
+#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
+
+ struct ath_hw *ah = priv->ah;
+ u32 rfilt;
+
+ rfilt = (ath9k_hw_getrxfilter(ah) & RX_FILTER_PRESERVE)
+ | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
+ | ATH9K_RX_FILTER_MCAST;
+
+ /* If not a STA, enable processing of Probe Requests */
+ if (ah->opmode != NL80211_IFTYPE_STATION)
+ rfilt |= ATH9K_RX_FILTER_PROBEREQ;
+
+ /*
+ * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
+ * mode interface or when in monitor mode. AP mode does not need this
+ * since it receives all in-BSS frames anyway.
+ */
+ if (((ah->opmode != NL80211_IFTYPE_AP) &&
+ (priv->rxfilter & FIF_PROMISC_IN_BSS)) ||
+ (ah->opmode == NL80211_IFTYPE_MONITOR))
+ rfilt |= ATH9K_RX_FILTER_PROM;
+
+ if (priv->rxfilter & FIF_CONTROL)
+ rfilt |= ATH9K_RX_FILTER_CONTROL;
+
+ if ((ah->opmode == NL80211_IFTYPE_STATION) &&
+ !(priv->rxfilter & FIF_BCN_PRBRESP_PROMISC))
+ rfilt |= ATH9K_RX_FILTER_MYBEACON;
+ else
+ rfilt |= ATH9K_RX_FILTER_BEACON;
+
+ if (conf_is_ht(&priv->hw->conf))
+ rfilt |= ATH9K_RX_FILTER_COMP_BAR;
+
+ return rfilt;
+
+#undef RX_FILTER_PRESERVE
+}
+
+/*
+ * Recv initialization for opmode change.
+ */
+static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv)
+{
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ u32 rfilt, mfilt[2];
+
+ /* configure rx filter */
+ rfilt = ath9k_htc_calcrxfilter(priv);
+ ath9k_hw_setrxfilter(ah, rfilt);
+
+ /* configure bssid mask */
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
+ ath_hw_setbssidmask(common);
+
+ /* configure operational mode */
+ ath9k_hw_setopmode(ah);
+
+ /* Handle any link-level address change. */
+ ath9k_hw_setmac(ah, common->macaddr);
+
+ /* calculate and install multicast filter */
+ mfilt[0] = mfilt[1] = ~0;
+ ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
+}
+
+void ath9k_host_rx_init(struct ath9k_htc_priv *priv)
+{
+ ath9k_hw_rxena(priv->ah);
+ ath9k_htc_opmode_init(priv);
+ ath9k_hw_startpcureceive(priv->ah);
+ priv->rx.last_rssi = ATH_RSSI_DUMMY_MARKER;
+}
+
+static void ath9k_process_rate(struct ieee80211_hw *hw,
+ struct ieee80211_rx_status *rxs,
+ u8 rx_rate, u8 rs_flags)
+{
+ struct ieee80211_supported_band *sband;
+ enum ieee80211_band band;
+ unsigned int i = 0;
+
+ if (rx_rate & 0x80) {
+ /* HT rate */
+ rxs->flag |= RX_FLAG_HT;
+ if (rs_flags & ATH9K_RX_2040)
+ rxs->flag |= RX_FLAG_40MHZ;
+ if (rs_flags & ATH9K_RX_GI)
+ rxs->flag |= RX_FLAG_SHORT_GI;
+ rxs->rate_idx = rx_rate & 0x7f;
+ return;
+ }
+
+ band = hw->conf.channel->band;
+ sband = hw->wiphy->bands[band];
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if (sband->bitrates[i].hw_value == rx_rate) {
+ rxs->rate_idx = i;
+ return;
+ }
+ if (sband->bitrates[i].hw_value_short == rx_rate) {
+ rxs->rate_idx = i;
+ rxs->flag |= RX_FLAG_SHORTPRE;
+ return;
+ }
+ }
+
+}
+
+static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
+ struct ath9k_htc_rxbuf *rxbuf,
+ struct ieee80211_rx_status *rx_status)
+
+{
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_hw *hw = priv->hw;
+ struct sk_buff *skb = rxbuf->skb;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath_htc_rx_status *rxstatus;
+ int hdrlen, padpos, padsize;
+ int last_rssi = ATH_RSSI_DUMMY_MARKER;
+ __le16 fc;
+
+ if (skb->len <= HTC_RX_FRAME_HEADER_SIZE) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Corrupted RX frame, dropping\n");
+ goto rx_next;
+ }
+
+ rxstatus = (struct ath_htc_rx_status *)skb->data;
+
+ if (be16_to_cpu(rxstatus->rs_datalen) -
+ (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Corrupted RX data len, dropping "
+ "(dlen: %d, skblen: %d)\n",
+ rxstatus->rs_datalen, skb->len);
+ goto rx_next;
+ }
+
+ /* Get the RX status information */
+ memcpy(&rxbuf->rxstatus, rxstatus, HTC_RX_FRAME_HEADER_SIZE);
+ skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = hdr->frame_control;
+ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+
+ padpos = ath9k_cmn_padpos(fc);
+
+ padsize = padpos & 3;
+ if (padsize && skb->len >= padpos+padsize+FCS_LEN) {
+ memmove(skb->data + padsize, skb->data, padpos);
+ skb_pull(skb, padsize);
+ }
+
+ memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
+
+ if (rxbuf->rxstatus.rs_status != 0) {
+ if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_CRC)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_PHY)
+ goto rx_next;
+
+ if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT) {
+ /* FIXME */
+ } else if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_MIC) {
+ if (ieee80211_is_ctl(fc))
+ /*
+ * Sometimes, we get invalid
+ * MIC failures on valid control frames.
+ * Remove these mic errors.
+ */
+ rxbuf->rxstatus.rs_status &= ~ATH9K_RXERR_MIC;
+ else
+ rx_status->flag |= RX_FLAG_MMIC_ERROR;
+ }
+
+ /*
+ * Reject error frames with the exception of
+ * decryption and MIC failures. For monitor mode,
+ * we also ignore the CRC error.
+ */
+ if (priv->ah->opmode == NL80211_IFTYPE_MONITOR) {
+ if (rxbuf->rxstatus.rs_status &
+ ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
+ ATH9K_RXERR_CRC))
+ goto rx_next;
+ } else {
+ if (rxbuf->rxstatus.rs_status &
+ ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
+ goto rx_next;
+ }
+ }
+ }
+
+ if (!(rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT)) {
+ u8 keyix;
+ keyix = rxbuf->rxstatus.rs_keyix;
+ if (keyix != ATH9K_RXKEYIX_INVALID) {
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ } else if (ieee80211_has_protected(fc) &&
+ skb->len >= hdrlen + 4) {
+ keyix = skb->data[hdrlen + 3] >> 6;
+ if (test_bit(keyix, common->keymap))
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ }
+ }
+
+ ath9k_process_rate(hw, rx_status, rxbuf->rxstatus.rs_rate,
+ rxbuf->rxstatus.rs_flags);
+
+ if (priv->op_flags & OP_ASSOCIATED) {
+ if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD &&
+ !rxbuf->rxstatus.rs_moreaggr)
+ ATH_RSSI_LPF(priv->rx.last_rssi,
+ rxbuf->rxstatus.rs_rssi);
+
+ last_rssi = priv->rx.last_rssi;
+
+ if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
+ rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi,
+ ATH_RSSI_EP_MULTIPLIER);
+
+ if (rxbuf->rxstatus.rs_rssi < 0)
+ rxbuf->rxstatus.rs_rssi = 0;
+
+ if (ieee80211_is_beacon(fc))
+ priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi;
+ }
+
+ rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
+ rx_status->band = hw->conf.channel->band;
+ rx_status->freq = hw->conf.channel->center_freq;
+ rx_status->signal = rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
+ rx_status->antenna = rxbuf->rxstatus.rs_antenna;
+ rx_status->flag |= RX_FLAG_TSFT;
+
+ return true;
+
+rx_next:
+ return false;
+}
+
+/*
+ * FIXME: Handle FLUSH later on.
+ */
+void ath9k_rx_tasklet(unsigned long data)
+{
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
+ struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
+ struct ieee80211_rx_status rx_status;
+ struct sk_buff *skb;
+ unsigned long flags;
+ struct ieee80211_hdr *hdr;
+
+ do {
+ spin_lock_irqsave(&priv->rx.rxbuflock, flags);
+ list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
+ if (tmp_buf->in_process) {
+ rxbuf = tmp_buf;
+ break;
+ }
+ }
+
+ if (rxbuf == NULL) {
+ spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
+ break;
+ }
+
+ if (!rxbuf->skb)
+ goto requeue;
+
+ if (!ath9k_rx_prepare(priv, rxbuf, &rx_status)) {
+ dev_kfree_skb_any(rxbuf->skb);
+ goto requeue;
+ }
+
+ memcpy(IEEE80211_SKB_RXCB(rxbuf->skb), &rx_status,
+ sizeof(struct ieee80211_rx_status));
+ skb = rxbuf->skb;
+ hdr = (struct ieee80211_hdr *) skb->data;
+
+ if (ieee80211_is_beacon(hdr->frame_control) && priv->ps_enabled)
+ ieee80211_queue_work(priv->hw, &priv->ps_work);
+
+ spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
+
+ ieee80211_rx(priv->hw, skb);
+
+ spin_lock_irqsave(&priv->rx.rxbuflock, flags);
+requeue:
+ rxbuf->in_process = false;
+ rxbuf->skb = NULL;
+ list_move_tail(&rxbuf->list, &priv->rx.rxbuf);
+ rxbuf = NULL;
+ spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
+ } while (1);
+
+}
+
+void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
+ enum htc_endpoint_id ep_id)
+{
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)drv_priv;
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
+
+ spin_lock(&priv->rx.rxbuflock);
+ list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
+ if (!tmp_buf->in_process) {
+ rxbuf = tmp_buf;
+ break;
+ }
+ }
+ spin_unlock(&priv->rx.rxbuflock);
+
+ if (rxbuf == NULL) {
+ ath_print(common, ATH_DBG_ANY,
+ "No free RX buffer\n");
+ goto err;
+ }
+
+ spin_lock(&priv->rx.rxbuflock);
+ rxbuf->skb = skb;
+ rxbuf->in_process = true;
+ spin_unlock(&priv->rx.rxbuflock);
+
+ tasklet_schedule(&priv->rx_tasklet);
+ return;
+err:
+ dev_kfree_skb_any(skb);
+}
+
+/* FIXME: Locking for cleanup/init */
+
+void ath9k_rx_cleanup(struct ath9k_htc_priv *priv)
+{
+ struct ath9k_htc_rxbuf *rxbuf, *tbuf;
+
+ list_for_each_entry_safe(rxbuf, tbuf, &priv->rx.rxbuf, list) {
+ list_del(&rxbuf->list);
+ if (rxbuf->skb)
+ dev_kfree_skb_any(rxbuf->skb);
+ kfree(rxbuf);
+ }
+}
+
+int ath9k_rx_init(struct ath9k_htc_priv *priv)
+{
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_rxbuf *rxbuf;
+ int i = 0;
+
+ INIT_LIST_HEAD(&priv->rx.rxbuf);
+ spin_lock_init(&priv->rx.rxbuflock);
+
+ for (i = 0; i < ATH9K_HTC_RXBUF; i++) {
+ rxbuf = kzalloc(sizeof(struct ath9k_htc_rxbuf), GFP_KERNEL);
+ if (rxbuf == NULL) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to allocate RX buffers\n");
+ goto err;
+ }
+ list_add_tail(&rxbuf->list, &priv->rx.rxbuf);
+ }
+
+ return 0;
+
+err:
+ ath9k_rx_cleanup(priv);
+ return -ENOMEM;
+}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
new file mode 100644
index 00000000000..064397fd738
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -0,0 +1,480 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+static int htc_issue_send(struct htc_target *target, struct sk_buff* skb,
+ u16 len, u8 flags, u8 epid,
+ struct ath9k_htc_tx_ctl *tx_ctl)
+{
+ struct htc_frame_hdr *hdr;
+ struct htc_endpoint *endpoint = &target->endpoint[epid];
+ int status;
+
+ hdr = (struct htc_frame_hdr *)
+ skb_push(skb, sizeof(struct htc_frame_hdr));
+ hdr->endpoint_id = epid;
+ hdr->flags = flags;
+ hdr->payload_len = cpu_to_be16(len);
+
+ status = target->hif->send(target->hif_dev, endpoint->ul_pipeid, skb,
+ tx_ctl);
+ return status;
+}
+
+static struct htc_endpoint *get_next_avail_ep(struct htc_endpoint *endpoint)
+{
+ enum htc_endpoint_id avail_epid;
+
+ for (avail_epid = (ENDPOINT_MAX - 1); avail_epid > ENDPOINT0; avail_epid--)
+ if (endpoint[avail_epid].service_id == 0)
+ return &endpoint[avail_epid];
+ return NULL;
+}
+
+static u8 service_to_ulpipe(u16 service_id)
+{
+ switch (service_id) {
+ case WMI_CONTROL_SVC:
+ return 4;
+ case WMI_BEACON_SVC:
+ case WMI_CAB_SVC:
+ case WMI_UAPSD_SVC:
+ case WMI_MGMT_SVC:
+ case WMI_DATA_VO_SVC:
+ case WMI_DATA_VI_SVC:
+ case WMI_DATA_BE_SVC:
+ case WMI_DATA_BK_SVC:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static u8 service_to_dlpipe(u16 service_id)
+{
+ switch (service_id) {
+ case WMI_CONTROL_SVC:
+ return 3;
+ case WMI_BEACON_SVC:
+ case WMI_CAB_SVC:
+ case WMI_UAPSD_SVC:
+ case WMI_MGMT_SVC:
+ case WMI_DATA_VO_SVC:
+ case WMI_DATA_VI_SVC:
+ case WMI_DATA_BE_SVC:
+ case WMI_DATA_BK_SVC:
+ return 2;
+ default:
+ return 0;
+ }
+}
+
+static void htc_process_target_rdy(struct htc_target *target,
+ void *buf)
+{
+ struct htc_endpoint *endpoint;
+ struct htc_ready_msg *htc_ready_msg = (struct htc_ready_msg *) buf;
+
+ target->credits = be16_to_cpu(htc_ready_msg->credits);
+ target->credit_size = be16_to_cpu(htc_ready_msg->credit_size);
+
+ endpoint = &target->endpoint[ENDPOINT0];
+ endpoint->service_id = HTC_CTRL_RSVD_SVC;
+ endpoint->max_msglen = HTC_MAX_CONTROL_MESSAGE_LENGTH;
+ atomic_inc(&target->tgt_ready);
+ complete(&target->target_wait);
+}
+
+static void htc_process_conn_rsp(struct htc_target *target,
+ struct htc_frame_hdr *htc_hdr)
+{
+ struct htc_conn_svc_rspmsg *svc_rspmsg;
+ struct htc_endpoint *endpoint, *tmp_endpoint = NULL;
+ u16 service_id;
+ u16 max_msglen;
+ enum htc_endpoint_id epid, tepid;
+
+ svc_rspmsg = (struct htc_conn_svc_rspmsg *)
+ ((void *) htc_hdr + sizeof(struct htc_frame_hdr));
+
+ if (svc_rspmsg->status == HTC_SERVICE_SUCCESS) {
+ epid = svc_rspmsg->endpoint_id;
+ service_id = be16_to_cpu(svc_rspmsg->service_id);
+ max_msglen = be16_to_cpu(svc_rspmsg->max_msg_len);
+ endpoint = &target->endpoint[epid];
+
+ for (tepid = (ENDPOINT_MAX - 1); tepid > ENDPOINT0; tepid--) {
+ tmp_endpoint = &target->endpoint[tepid];
+ if (tmp_endpoint->service_id == service_id) {
+ tmp_endpoint->service_id = 0;
+ break;
+ }
+ }
+
+ if (tepid == ENDPOINT0)
+ return;
+
+ endpoint->service_id = service_id;
+ endpoint->max_txqdepth = tmp_endpoint->max_txqdepth;
+ endpoint->ep_callbacks = tmp_endpoint->ep_callbacks;
+ endpoint->ul_pipeid = tmp_endpoint->ul_pipeid;
+ endpoint->dl_pipeid = tmp_endpoint->dl_pipeid;
+ endpoint->max_msglen = max_msglen;
+ target->conn_rsp_epid = epid;
+ complete(&target->cmd_wait);
+ } else {
+ target->conn_rsp_epid = ENDPOINT_UNUSED;
+ }
+}
+
+static int htc_config_pipe_credits(struct htc_target *target)
+{
+ struct sk_buff *skb;
+ struct htc_config_pipe_msg *cp_msg;
+ int ret, time_left;
+
+ skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
+ if (!skb) {
+ dev_err(target->dev, "failed to allocate send buffer\n");
+ return -ENOMEM;
+ }
+ skb_reserve(skb, sizeof(struct htc_frame_hdr));
+
+ cp_msg = (struct htc_config_pipe_msg *)
+ skb_put(skb, sizeof(struct htc_config_pipe_msg));
+
+ cp_msg->message_id = cpu_to_be16(HTC_MSG_CONFIG_PIPE_ID);
+ cp_msg->pipe_id = USB_WLAN_TX_PIPE;
+ cp_msg->credits = 28;
+
+ target->htc_flags |= HTC_OP_CONFIG_PIPE_CREDITS;
+
+ ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0, NULL);
+ if (ret)
+ goto err;
+
+ time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
+ if (!time_left) {
+ dev_err(target->dev, "HTC credit config timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+err:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+static int htc_setup_complete(struct htc_target *target)
+{
+ struct sk_buff *skb;
+ struct htc_comp_msg *comp_msg;
+ int ret = 0, time_left;
+
+ skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
+ if (!skb) {
+ dev_err(target->dev, "failed to allocate send buffer\n");
+ return -ENOMEM;
+ }
+ skb_reserve(skb, sizeof(struct htc_frame_hdr));
+
+ comp_msg = (struct htc_comp_msg *)
+ skb_put(skb, sizeof(struct htc_comp_msg));
+ comp_msg->msg_id = cpu_to_be16(HTC_MSG_SETUP_COMPLETE_ID);
+
+ target->htc_flags |= HTC_OP_START_WAIT;
+
+ ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0, NULL);
+ if (ret)
+ goto err;
+
+ time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
+ if (!time_left) {
+ dev_err(target->dev, "HTC start timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+
+err:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+/* HTC APIs */
+
+int htc_init(struct htc_target *target)
+{
+ int ret;
+
+ ret = htc_config_pipe_credits(target);
+ if (ret)
+ return ret;
+
+ return htc_setup_complete(target);
+}
+
+int htc_connect_service(struct htc_target *target,
+ struct htc_service_connreq *service_connreq,
+ enum htc_endpoint_id *conn_rsp_epid)
+{
+ struct sk_buff *skb;
+ struct htc_endpoint *endpoint;
+ struct htc_conn_svc_msg *conn_msg;
+ int ret, time_left;
+
+ /* Find an available endpoint */
+ endpoint = get_next_avail_ep(target->endpoint);
+ if (!endpoint) {
+ dev_err(target->dev, "Endpoint is not available for"
+ "service %d\n", service_connreq->service_id);
+ return -EINVAL;
+ }
+
+ endpoint->service_id = service_connreq->service_id;
+ endpoint->max_txqdepth = service_connreq->max_send_qdepth;
+ endpoint->ul_pipeid = service_to_ulpipe(service_connreq->service_id);
+ endpoint->dl_pipeid = service_to_dlpipe(service_connreq->service_id);
+ endpoint->ep_callbacks = service_connreq->ep_callbacks;
+
+ skb = alloc_skb(sizeof(struct htc_conn_svc_msg) +
+ sizeof(struct htc_frame_hdr), GFP_ATOMIC);
+ if (!skb) {
+ dev_err(target->dev, "Failed to allocate buf to send"
+ "service connect req\n");
+ return -ENOMEM;
+ }
+
+ skb_reserve(skb, sizeof(struct htc_frame_hdr));
+
+ conn_msg = (struct htc_conn_svc_msg *)
+ skb_put(skb, sizeof(struct htc_conn_svc_msg));
+ conn_msg->service_id = cpu_to_be16(service_connreq->service_id);
+ conn_msg->msg_id = cpu_to_be16(HTC_MSG_CONNECT_SERVICE_ID);
+ conn_msg->con_flags = cpu_to_be16(service_connreq->con_flags);
+ conn_msg->dl_pipeid = endpoint->dl_pipeid;
+ conn_msg->ul_pipeid = endpoint->ul_pipeid;
+
+ ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0, NULL);
+ if (ret)
+ goto err;
+
+ time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
+ if (!time_left) {
+ dev_err(target->dev, "Service connection timeout for: %d\n",
+ service_connreq->service_id);
+ return -ETIMEDOUT;
+ }
+
+ *conn_rsp_epid = target->conn_rsp_epid;
+ return 0;
+err:
+ kfree_skb(skb);
+ return ret;
+}
+
+int htc_send(struct htc_target *target, struct sk_buff *skb,
+ enum htc_endpoint_id epid, struct ath9k_htc_tx_ctl *tx_ctl)
+{
+ return htc_issue_send(target, skb, skb->len, 0, epid, tx_ctl);
+}
+
+void htc_stop(struct htc_target *target)
+{
+ enum htc_endpoint_id epid;
+ struct htc_endpoint *endpoint;
+
+ for (epid = ENDPOINT0; epid < ENDPOINT_MAX; epid++) {
+ endpoint = &target->endpoint[epid];
+ if (endpoint->service_id != 0)
+ target->hif->stop(target->hif_dev, endpoint->ul_pipeid);
+ }
+}
+
+void htc_start(struct htc_target *target)
+{
+ enum htc_endpoint_id epid;
+ struct htc_endpoint *endpoint;
+
+ for (epid = ENDPOINT0; epid < ENDPOINT_MAX; epid++) {
+ endpoint = &target->endpoint[epid];
+ if (endpoint->service_id != 0)
+ target->hif->start(target->hif_dev,
+ endpoint->ul_pipeid);
+ }
+}
+
+void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
+ struct sk_buff *skb, bool txok)
+{
+ struct htc_endpoint *endpoint;
+ struct htc_frame_hdr *htc_hdr = NULL;
+
+ if (htc_handle->htc_flags & HTC_OP_CONFIG_PIPE_CREDITS) {
+ complete(&htc_handle->cmd_wait);
+ htc_handle->htc_flags &= ~HTC_OP_CONFIG_PIPE_CREDITS;
+ goto ret;
+ }
+
+ if (htc_handle->htc_flags & HTC_OP_START_WAIT) {
+ complete(&htc_handle->cmd_wait);
+ htc_handle->htc_flags &= ~HTC_OP_START_WAIT;
+ goto ret;
+ }
+
+ if (skb) {
+ htc_hdr = (struct htc_frame_hdr *) skb->data;
+ endpoint = &htc_handle->endpoint[htc_hdr->endpoint_id];
+ skb_pull(skb, sizeof(struct htc_frame_hdr));
+
+ if (endpoint->ep_callbacks.tx) {
+ endpoint->ep_callbacks.tx(endpoint->ep_callbacks.priv,
+ skb, htc_hdr->endpoint_id,
+ txok);
+ }
+ }
+
+ return;
+ret:
+ /* HTC-generated packets are freed here. */
+ if (htc_hdr && htc_hdr->endpoint_id != ENDPOINT0)
+ dev_kfree_skb_any(skb);
+ else
+ kfree_skb(skb);
+}
+
+/*
+ * HTC Messages are handled directly here and the obtained SKB
+ * is freed.
+ *
+ * Sevice messages (Data, WMI) passed to the corresponding
+ * endpoint RX handlers, which have to free the SKB.
+ */
+void ath9k_htc_rx_msg(struct htc_target *htc_handle,
+ struct sk_buff *skb, u32 len, u8 pipe_id)
+{
+ struct htc_frame_hdr *htc_hdr;
+ enum htc_endpoint_id epid;
+ struct htc_endpoint *endpoint;
+ __be16 *msg_id;
+
+ if (!htc_handle || !skb)
+ return;
+
+ htc_hdr = (struct htc_frame_hdr *) skb->data;
+ epid = htc_hdr->endpoint_id;
+
+ if (epid >= ENDPOINT_MAX) {
+ if (pipe_id != USB_REG_IN_PIPE)
+ dev_kfree_skb_any(skb);
+ else
+ kfree_skb(skb);
+ return;
+ }
+
+ if (epid == ENDPOINT0) {
+
+ /* Handle trailer */
+ if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER) {
+ if (be32_to_cpu(*(__be32 *) skb->data) == 0x00C60000)
+ /* Move past the Watchdog pattern */
+ htc_hdr = (struct htc_frame_hdr *)(skb->data + 4);
+ }
+
+ /* Get the message ID */
+ msg_id = (__be16 *) ((void *) htc_hdr +
+ sizeof(struct htc_frame_hdr));
+
+ /* Now process HTC messages */
+ switch (be16_to_cpu(*msg_id)) {
+ case HTC_MSG_READY_ID:
+ htc_process_target_rdy(htc_handle, htc_hdr);
+ break;
+ case HTC_MSG_CONNECT_SERVICE_RESPONSE_ID:
+ htc_process_conn_rsp(htc_handle, htc_hdr);
+ break;
+ default:
+ break;
+ }
+
+ kfree_skb(skb);
+
+ } else {
+ if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER)
+ skb_trim(skb, len - htc_hdr->control[0]);
+
+ skb_pull(skb, sizeof(struct htc_frame_hdr));
+
+ endpoint = &htc_handle->endpoint[epid];
+ if (endpoint->ep_callbacks.rx)
+ endpoint->ep_callbacks.rx(endpoint->ep_callbacks.priv,
+ skb, epid);
+ }
+}
+
+struct htc_target *ath9k_htc_hw_alloc(void *hif_handle,
+ struct ath9k_htc_hif *hif,
+ struct device *dev)
+{
+ struct htc_endpoint *endpoint;
+ struct htc_target *target;
+
+ target = kzalloc(sizeof(struct htc_target), GFP_KERNEL);
+ if (!target) {
+ printk(KERN_ERR "Unable to allocate memory for"
+ "target device\n");
+ return NULL;
+ }
+
+ init_completion(&target->target_wait);
+ init_completion(&target->cmd_wait);
+
+ target->hif = hif;
+ target->hif_dev = hif_handle;
+ target->dev = dev;
+
+ /* Assign control endpoint pipe IDs */
+ endpoint = &target->endpoint[ENDPOINT0];
+ endpoint->ul_pipeid = hif->control_ul_pipe;
+ endpoint->dl_pipeid = hif->control_dl_pipe;
+
+ atomic_set(&target->tgt_ready, 0);
+
+ return target;
+}
+
+void ath9k_htc_hw_free(struct htc_target *htc)
+{
+ kfree(htc);
+}
+
+int ath9k_htc_hw_init(struct htc_target *target,
+ struct device *dev, u16 devid)
+{
+ if (ath9k_htc_probe_device(target, dev, devid)) {
+ printk(KERN_ERR "Failed to initialize the device\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+void ath9k_htc_hw_deinit(struct htc_target *target, bool hot_unplug)
+{
+ if (target)
+ ath9k_htc_disconnect_device(target, hot_unplug);
+}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.h b/drivers/net/wireless/ath/ath9k/htc_hst.h
new file mode 100644
index 00000000000..faba6790328
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.h
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HTC_HST_H
+#define HTC_HST_H
+
+struct ath9k_htc_priv;
+struct htc_target;
+struct ath9k_htc_tx_ctl;
+
+enum ath9k_hif_transports {
+ ATH9K_HIF_USB,
+};
+
+struct ath9k_htc_hif {
+ struct list_head list;
+ const enum ath9k_hif_transports transport;
+ const char *name;
+
+ u8 control_dl_pipe;
+ u8 control_ul_pipe;
+
+ void (*start) (void *hif_handle, u8 pipe);
+ void (*stop) (void *hif_handle, u8 pipe);
+ int (*send) (void *hif_handle, u8 pipe, struct sk_buff *buf,
+ struct ath9k_htc_tx_ctl *tx_ctl);
+};
+
+enum htc_endpoint_id {
+ ENDPOINT_UNUSED = -1,
+ ENDPOINT0 = 0,
+ ENDPOINT1 = 1,
+ ENDPOINT2 = 2,
+ ENDPOINT3 = 3,
+ ENDPOINT4 = 4,
+ ENDPOINT5 = 5,
+ ENDPOINT6 = 6,
+ ENDPOINT7 = 7,
+ ENDPOINT8 = 8,
+ ENDPOINT_MAX = 22
+};
+
+/* Htc frame hdr flags */
+#define HTC_FLAGS_RECV_TRAILER (1 << 1)
+
+struct htc_frame_hdr {
+ u8 endpoint_id;
+ u8 flags;
+ __be16 payload_len;
+ u8 control[4];
+} __packed;
+
+struct htc_ready_msg {
+ __be16 message_id;
+ __be16 credits;
+ __be16 credit_size;
+ u8 max_endpoints;
+ u8 pad;
+} __packed;
+
+struct htc_config_pipe_msg {
+ __be16 message_id;
+ u8 pipe_id;
+ u8 credits;
+} __packed;
+
+struct htc_packet {
+ void *pktcontext;
+ u8 *buf;
+ u8 *buf_payload;
+ u32 buflen;
+ u32 payload_len;
+
+ int endpoint;
+ int status;
+
+ void *context;
+ u32 reserved;
+};
+
+struct htc_ep_callbacks {
+ void *priv;
+ void (*tx) (void *, struct sk_buff *, enum htc_endpoint_id, bool txok);
+ void (*rx) (void *, struct sk_buff *, enum htc_endpoint_id);
+};
+
+#define HTC_TX_QUEUE_SIZE 256
+
+struct htc_txq {
+ struct sk_buff *buf[HTC_TX_QUEUE_SIZE];
+ u32 txqdepth;
+ u16 txbuf_cnt;
+ u16 txq_head;
+ u16 txq_tail;
+};
+
+struct htc_endpoint {
+ u16 service_id;
+
+ struct htc_ep_callbacks ep_callbacks;
+ struct htc_txq htc_txq;
+ u32 max_txqdepth;
+ int max_msglen;
+
+ u8 ul_pipeid;
+ u8 dl_pipeid;
+};
+
+#define HTC_MAX_CONTROL_MESSAGE_LENGTH 255
+#define HTC_CONTROL_BUFFER_SIZE \
+ (HTC_MAX_CONTROL_MESSAGE_LENGTH + sizeof(struct htc_frame_hdr))
+
+struct htc_control_buf {
+ struct htc_packet htc_pkt;
+ u8 buf[HTC_CONTROL_BUFFER_SIZE];
+};
+
+#define HTC_OP_START_WAIT BIT(0)
+#define HTC_OP_CONFIG_PIPE_CREDITS BIT(1)
+
+struct htc_target {
+ void *hif_dev;
+ struct ath9k_htc_priv *drv_priv;
+ struct device *dev;
+ struct ath9k_htc_hif *hif;
+ struct htc_endpoint endpoint[ENDPOINT_MAX];
+ struct completion target_wait;
+ struct completion cmd_wait;
+ struct list_head list;
+ enum htc_endpoint_id conn_rsp_epid;
+ u16 credits;
+ u16 credit_size;
+ u8 htc_flags;
+ atomic_t tgt_ready;
+};
+
+enum htc_msg_id {
+ HTC_MSG_READY_ID = 1,
+ HTC_MSG_CONNECT_SERVICE_ID,
+ HTC_MSG_CONNECT_SERVICE_RESPONSE_ID,
+ HTC_MSG_SETUP_COMPLETE_ID,
+ HTC_MSG_CONFIG_PIPE_ID,
+ HTC_MSG_CONFIG_PIPE_RESPONSE_ID,
+};
+
+struct htc_service_connreq {
+ u16 service_id;
+ u16 con_flags;
+ u32 max_send_qdepth;
+ struct htc_ep_callbacks ep_callbacks;
+};
+
+/* Current service IDs */
+
+enum htc_service_group_ids{
+ RSVD_SERVICE_GROUP = 0,
+ WMI_SERVICE_GROUP = 1,
+
+ HTC_SERVICE_GROUP_LAST = 255
+};
+
+#define MAKE_SERVICE_ID(group, index) \
+ (int)(((int)group << 8) | (int)(index))
+
+/* NOTE: service ID of 0x0000 is reserved and should never be used */
+#define HTC_CTRL_RSVD_SVC MAKE_SERVICE_ID(RSVD_SERVICE_GROUP, 1)
+#define HTC_LOOPBACK_RSVD_SVC MAKE_SERVICE_ID(RSVD_SERVICE_GROUP, 2)
+
+#define WMI_CONTROL_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 0)
+#define WMI_BEACON_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 1)
+#define WMI_CAB_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 2)
+#define WMI_UAPSD_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 3)
+#define WMI_MGMT_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 4)
+#define WMI_DATA_VO_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 5)
+#define WMI_DATA_VI_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 6)
+#define WMI_DATA_BE_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 7)
+#define WMI_DATA_BK_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 8)
+
+struct htc_conn_svc_msg {
+ __be16 msg_id;
+ __be16 service_id;
+ __be16 con_flags;
+ u8 dl_pipeid;
+ u8 ul_pipeid;
+ u8 svc_meta_len;
+ u8 pad;
+} __packed;
+
+/* connect response status codes */
+#define HTC_SERVICE_SUCCESS 0
+#define HTC_SERVICE_NOT_FOUND 1
+#define HTC_SERVICE_FAILED 2
+#define HTC_SERVICE_NO_RESOURCES 3
+#define HTC_SERVICE_NO_MORE_EP 4
+
+struct htc_conn_svc_rspmsg {
+ __be16 msg_id;
+ __be16 service_id;
+ u8 status;
+ u8 endpoint_id;
+ __be16 max_msg_len;
+ u8 svc_meta_len;
+ u8 pad;
+} __packed;
+
+struct htc_comp_msg {
+ __be16 msg_id;
+} __packed;
+
+int htc_init(struct htc_target *target);
+int htc_connect_service(struct htc_target *target,
+ struct htc_service_connreq *service_connreq,
+ enum htc_endpoint_id *conn_rsp_eid);
+int htc_send(struct htc_target *target, struct sk_buff *skb,
+ enum htc_endpoint_id eid, struct ath9k_htc_tx_ctl *tx_ctl);
+void htc_stop(struct htc_target *target);
+void htc_start(struct htc_target *target);
+
+void ath9k_htc_rx_msg(struct htc_target *htc_handle,
+ struct sk_buff *skb, u32 len, u8 pipe_id);
+void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
+ struct sk_buff *skb, bool txok);
+
+struct htc_target *ath9k_htc_hw_alloc(void *hif_handle,
+ struct ath9k_htc_hif *hif,
+ struct device *dev);
+void ath9k_htc_hw_free(struct htc_target *htc);
+int ath9k_htc_hw_init(struct htc_target *target,
+ struct device *dev, u16 devid);
+void ath9k_htc_hw_deinit(struct htc_target *target, bool hot_unplug);
+
+#endif /* HTC_HST_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
new file mode 100644
index 00000000000..624422a8169
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -0,0 +1,280 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ATH9K_HW_OPS_H
+#define ATH9K_HW_OPS_H
+
+#include "hw.h"
+
+/* Hardware core and driver accessible callbacks */
+
+static inline void ath9k_hw_configpcipowersave(struct ath_hw *ah,
+ int restore,
+ int power_off)
+{
+ ath9k_hw_ops(ah)->config_pci_powersave(ah, restore, power_off);
+}
+
+static inline void ath9k_hw_rxena(struct ath_hw *ah)
+{
+ ath9k_hw_ops(ah)->rx_enable(ah);
+}
+
+static inline void ath9k_hw_set_desc_link(struct ath_hw *ah, void *ds,
+ u32 link)
+{
+ ath9k_hw_ops(ah)->set_desc_link(ds, link);
+}
+
+static inline void ath9k_hw_get_desc_link(struct ath_hw *ah, void *ds,
+ u32 **link)
+{
+ ath9k_hw_ops(ah)->get_desc_link(ds, link);
+}
+static inline bool ath9k_hw_calibrate(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 rxchainmask,
+ bool longcal)
+{
+ return ath9k_hw_ops(ah)->calibrate(ah, chan, rxchainmask, longcal);
+}
+
+static inline bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
+{
+ return ath9k_hw_ops(ah)->get_isr(ah, masked);
+}
+
+static inline void ath9k_hw_filltxdesc(struct ath_hw *ah, void *ds, u32 seglen,
+ bool is_firstseg, bool is_lastseg,
+ const void *ds0, dma_addr_t buf_addr,
+ unsigned int qcu)
+{
+ ath9k_hw_ops(ah)->fill_txdesc(ah, ds, seglen, is_firstseg, is_lastseg,
+ ds0, buf_addr, qcu);
+}
+
+static inline int ath9k_hw_txprocdesc(struct ath_hw *ah, void *ds,
+ struct ath_tx_status *ts)
+{
+ return ath9k_hw_ops(ah)->proc_txdesc(ah, ds, ts);
+}
+
+static inline void ath9k_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
+ u32 pktLen, enum ath9k_pkt_type type,
+ u32 txPower, u32 keyIx,
+ enum ath9k_key_type keyType,
+ u32 flags)
+{
+ ath9k_hw_ops(ah)->set11n_txdesc(ah, ds, pktLen, type, txPower, keyIx,
+ keyType, flags);
+}
+
+static inline void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, void *ds,
+ void *lastds,
+ u32 durUpdateEn, u32 rtsctsRate,
+ u32 rtsctsDuration,
+ struct ath9k_11n_rate_series series[],
+ u32 nseries, u32 flags)
+{
+ ath9k_hw_ops(ah)->set11n_ratescenario(ah, ds, lastds, durUpdateEn,
+ rtsctsRate, rtsctsDuration, series,
+ nseries, flags);
+}
+
+static inline void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, void *ds,
+ u32 aggrLen)
+{
+ ath9k_hw_ops(ah)->set11n_aggr_first(ah, ds, aggrLen);
+}
+
+static inline void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, void *ds,
+ u32 numDelims)
+{
+ ath9k_hw_ops(ah)->set11n_aggr_middle(ah, ds, numDelims);
+}
+
+static inline void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, void *ds)
+{
+ ath9k_hw_ops(ah)->set11n_aggr_last(ah, ds);
+}
+
+static inline void ath9k_hw_clr11n_aggr(struct ath_hw *ah, void *ds)
+{
+ ath9k_hw_ops(ah)->clr11n_aggr(ah, ds);
+}
+
+static inline void ath9k_hw_set11n_burstduration(struct ath_hw *ah, void *ds,
+ u32 burstDuration)
+{
+ ath9k_hw_ops(ah)->set11n_burstduration(ah, ds, burstDuration);
+}
+
+static inline void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
+ u32 vmf)
+{
+ ath9k_hw_ops(ah)->set11n_virtualmorefrag(ah, ds, vmf);
+}
+
+/* Private hardware call ops */
+
+/* PHY ops */
+
+static inline int ath9k_hw_rf_set_freq(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->rf_set_freq(ah, chan);
+}
+
+static inline void ath9k_hw_spur_mitigate_freq(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ ath9k_hw_private_ops(ah)->spur_mitigate_freq(ah, chan);
+}
+
+static inline int ath9k_hw_rf_alloc_ext_banks(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->rf_alloc_ext_banks)
+ return 0;
+
+ return ath9k_hw_private_ops(ah)->rf_alloc_ext_banks(ah);
+}
+
+static inline void ath9k_hw_rf_free_ext_banks(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->rf_free_ext_banks)
+ return;
+
+ ath9k_hw_private_ops(ah)->rf_free_ext_banks(ah);
+}
+
+static inline bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u16 modesIndex)
+{
+ if (!ath9k_hw_private_ops(ah)->set_rf_regs)
+ return true;
+
+ return ath9k_hw_private_ops(ah)->set_rf_regs(ah, chan, modesIndex);
+}
+
+static inline void ath9k_hw_init_bb(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->init_bb(ah, chan);
+}
+
+static inline void ath9k_hw_set_channel_regs(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->set_channel_regs(ah, chan);
+}
+
+static inline int ath9k_hw_process_ini(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->process_ini(ah, chan);
+}
+
+static inline void ath9k_olc_init(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->olc_init)
+ return;
+
+ return ath9k_hw_private_ops(ah)->olc_init(ah);
+}
+
+static inline void ath9k_hw_set_rfmode(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->set_rfmode(ah, chan);
+}
+
+static inline void ath9k_hw_mark_phy_inactive(struct ath_hw *ah)
+{
+ return ath9k_hw_private_ops(ah)->mark_phy_inactive(ah);
+}
+
+static inline void ath9k_hw_set_delta_slope(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->set_delta_slope(ah, chan);
+}
+
+static inline bool ath9k_hw_rfbus_req(struct ath_hw *ah)
+{
+ return ath9k_hw_private_ops(ah)->rfbus_req(ah);
+}
+
+static inline void ath9k_hw_rfbus_done(struct ath_hw *ah)
+{
+ return ath9k_hw_private_ops(ah)->rfbus_done(ah);
+}
+
+static inline void ath9k_enable_rfkill(struct ath_hw *ah)
+{
+ return ath9k_hw_private_ops(ah)->enable_rfkill(ah);
+}
+
+static inline void ath9k_hw_restore_chainmask(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->restore_chainmask)
+ return;
+
+ return ath9k_hw_private_ops(ah)->restore_chainmask(ah);
+}
+
+static inline void ath9k_hw_set_diversity(struct ath_hw *ah, bool value)
+{
+ return ath9k_hw_private_ops(ah)->set_diversity(ah, value);
+}
+
+static inline bool ath9k_hw_ani_control(struct ath_hw *ah,
+ enum ath9k_ani_cmd cmd, int param)
+{
+ return ath9k_hw_private_ops(ah)->ani_control(ah, cmd, param);
+}
+
+static inline void ath9k_hw_do_getnf(struct ath_hw *ah,
+ int16_t nfarray[NUM_NF_READINGS])
+{
+ ath9k_hw_private_ops(ah)->do_getnf(ah, nfarray);
+}
+
+static inline void ath9k_hw_loadnf(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ ath9k_hw_private_ops(ah)->loadnf(ah, chan);
+}
+
+static inline bool ath9k_hw_init_cal(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->init_cal(ah, chan);
+}
+
+static inline void ath9k_hw_setup_calibration(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal)
+{
+ ath9k_hw_private_ops(ah)->setup_calibration(ah, currCal);
+}
+
+static inline bool ath9k_hw_iscal_supported(struct ath_hw *ah,
+ enum ath9k_cal_types calType)
+{
+ return ath9k_hw_private_ops(ah)->iscal_supported(ah, calType);
+}
+
+#endif /* ATH9K_HW_OPS_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 78b571129c9..c33f17dbe6f 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008-2009 Atheros Communications Inc.
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -19,18 +19,16 @@
#include <asm/unaligned.h>
#include "hw.h"
+#include "hw-ops.h"
#include "rc.h"
-#include "initvals.h"
+#include "ar9003_mac.h"
#define ATH9K_CLOCK_RATE_CCK 22
#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40
#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44
+#define ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM 44
static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
-static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan);
-static u32 ath9k_hw_ini_fixup(struct ath_hw *ah,
- struct ar5416_eeprom_def *pEepData,
- u32 reg, u32 value);
MODULE_AUTHOR("Atheros Communications");
MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
@@ -49,6 +47,39 @@ static void __exit ath9k_exit(void)
}
module_exit(ath9k_exit);
+/* Private hardware callbacks */
+
+static void ath9k_hw_init_cal_settings(struct ath_hw *ah)
+{
+ ath9k_hw_private_ops(ah)->init_cal_settings(ah);
+}
+
+static void ath9k_hw_init_mode_regs(struct ath_hw *ah)
+{
+ ath9k_hw_private_ops(ah)->init_mode_regs(ah);
+}
+
+static bool ath9k_hw_macversion_supported(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+
+ return priv_ops->macversion_supported(ah->hw_version.macVersion);
+}
+
+static u32 ath9k_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->compute_pll_control(ah, chan);
+}
+
+static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->init_mode_gain_regs)
+ return;
+
+ ath9k_hw_private_ops(ah)->init_mode_gain_regs(ah);
+}
+
/********************/
/* Helper Functions */
/********************/
@@ -61,7 +92,11 @@ static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs)
return usecs *ATH9K_CLOCK_RATE_CCK;
if (conf->channel->band == IEEE80211_BAND_2GHZ)
return usecs *ATH9K_CLOCK_RATE_2GHZ_OFDM;
- return usecs *ATH9K_CLOCK_RATE_5GHZ_OFDM;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK)
+ return usecs * ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM;
+ else
+ return usecs * ATH9K_CLOCK_RATE_5GHZ_OFDM;
}
static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs)
@@ -236,21 +271,6 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
}
}
-static int ath9k_hw_get_radiorev(struct ath_hw *ah)
-{
- u32 val;
- int i;
-
- REG_WRITE(ah, AR_PHY(0x36), 0x00007058);
-
- for (i = 0; i < 8; i++)
- REG_WRITE(ah, AR_PHY(0x20), 0x00010000);
- val = (REG_READ(ah, AR_PHY(256)) >> 24) & 0xff;
- val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4);
-
- return ath9k_hw_reverse_bits(val, 8);
-}
-
/************************************/
/* HW Attach, Detach, Init Routines */
/************************************/
@@ -260,6 +280,8 @@ static void ath9k_hw_disablepcie(struct ath_hw *ah)
if (AR_SREV_9100(ah))
return;
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
REG_WRITE(ah, AR_PCIE_SERDES, 0x28000029);
@@ -271,20 +293,30 @@ static void ath9k_hw_disablepcie(struct ath_hw *ah)
REG_WRITE(ah, AR_PCIE_SERDES, 0x000e1007);
REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
+/* This should work for all families including legacy */
static bool ath9k_hw_chip_test(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- u32 regAddr[2] = { AR_STA_ID0, AR_PHY_BASE + (8 << 2) };
+ u32 regAddr[2] = { AR_STA_ID0 };
u32 regHold[2];
u32 patternData[4] = { 0x55555555,
0xaaaaaaaa,
0x66666666,
0x99999999 };
- int i, j;
+ int i, j, loop_max;
+
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ loop_max = 2;
+ regAddr[1] = AR_PHY_BASE + (8 << 2);
+ } else
+ loop_max = 1;
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < loop_max; i++) {
u32 addr = regAddr[i];
u32 wrData, rdData;
@@ -339,7 +371,13 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.ofdm_trig_high = 500;
ah->config.cck_trig_high = 200;
ah->config.cck_trig_low = 100;
- ah->config.enable_ani = 1;
+
+ /*
+ * For now ANI is disabled for AR9003, it is still
+ * being tested.
+ */
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ ah->config.enable_ani = 1;
for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
ah->config.spurchans[i][0] = AR_NO_SPUR;
@@ -354,6 +392,12 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.rx_intr_mitigation = true;
/*
+ * Tx IQ Calibration (ah->config.tx_iq_calibration) is only
+ * used by AR9003, but it is showing reliability issues.
+ * It will take a while to fix so this is currently disabled.
+ */
+
+ /*
* We need this for PCI devices only (Cardbus, PCI, miniPCI)
* _and_ if on non-uniprocessor systems (Multiprocessor/HT).
* This means we use it for all AR5416 devices, and the few
@@ -372,7 +416,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
if (num_possible_cpus() > 1)
ah->config.serialize_regmode = SER_REG_MODE_AUTO;
}
-EXPORT_SYMBOL(ath9k_hw_init);
static void ath9k_hw_init_defaults(struct ath_hw *ah)
{
@@ -386,8 +429,6 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
ah->hw_version.subvendorid = 0;
ah->ah_flags = 0;
- if (ah->hw_version.devid == AR5416_AR9100_DEVID)
- ah->hw_version.macVersion = AR_SREV_VERSION_9100;
if (!AR_SREV_9100(ah))
ah->ah_flags = AH_USE_EEPROM;
@@ -400,44 +441,17 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
ah->power_mode = ATH9K_PM_UNDEFINED;
}
-static int ath9k_hw_rf_claim(struct ath_hw *ah)
-{
- u32 val;
-
- REG_WRITE(ah, AR_PHY(0), 0x00000007);
-
- val = ath9k_hw_get_radiorev(ah);
- switch (val & AR_RADIO_SREV_MAJOR) {
- case 0:
- val = AR_RAD5133_SREV_MAJOR;
- break;
- case AR_RAD5133_SREV_MAJOR:
- case AR_RAD5122_SREV_MAJOR:
- case AR_RAD2133_SREV_MAJOR:
- case AR_RAD2122_SREV_MAJOR:
- break;
- default:
- ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
- "Radio Chip Rev 0x%02X not supported\n",
- val & AR_RADIO_SREV_MAJOR);
- return -EOPNOTSUPP;
- }
-
- ah->hw_version.analog5GhzRev = val;
-
- return 0;
-}
-
static int ath9k_hw_init_macaddr(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
u32 sum;
int i;
u16 eeval;
+ u32 EEP_MAC[] = { EEP_MAC_LSW, EEP_MAC_MID, EEP_MAC_MSW };
sum = 0;
for (i = 0; i < 3; i++) {
- eeval = ah->eep_ops->get_eeprom(ah, AR_EEPROM_MAC(i));
+ eeval = ah->eep_ops->get_eeprom(ah, EEP_MAC[i]);
sum += eeval;
common->macaddr[2 * i] = eeval >> 8;
common->macaddr[2 * i + 1] = eeval & 0xff;
@@ -448,64 +462,20 @@ static int ath9k_hw_init_macaddr(struct ath_hw *ah)
return 0;
}
-static void ath9k_hw_init_rxgain_ini(struct ath_hw *ah)
-{
- u32 rxgain_type;
-
- if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_17) {
- rxgain_type = ah->eep_ops->get_eeprom(ah, EEP_RXGAIN_TYPE);
-
- if (rxgain_type == AR5416_EEP_RXGAIN_13DB_BACKOFF)
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9280Modes_backoff_13db_rxgain_9280_2,
- ARRAY_SIZE(ar9280Modes_backoff_13db_rxgain_9280_2), 6);
- else if (rxgain_type == AR5416_EEP_RXGAIN_23DB_BACKOFF)
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9280Modes_backoff_23db_rxgain_9280_2,
- ARRAY_SIZE(ar9280Modes_backoff_23db_rxgain_9280_2), 6);
- else
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9280Modes_original_rxgain_9280_2,
- ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
- } else {
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9280Modes_original_rxgain_9280_2,
- ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
- }
-}
-
-static void ath9k_hw_init_txgain_ini(struct ath_hw *ah)
-{
- u32 txgain_type;
-
- if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_19) {
- txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
-
- if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER)
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9280Modes_high_power_tx_gain_9280_2,
- ARRAY_SIZE(ar9280Modes_high_power_tx_gain_9280_2), 6);
- else
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9280Modes_original_tx_gain_9280_2,
- ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
- } else {
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9280Modes_original_tx_gain_9280_2,
- ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
- }
-}
-
static int ath9k_hw_post_init(struct ath_hw *ah)
{
int ecode;
- if (!ath9k_hw_chip_test(ah))
- return -ENODEV;
+ if (!AR_SREV_9271(ah)) {
+ if (!ath9k_hw_chip_test(ah))
+ return -ENODEV;
+ }
- ecode = ath9k_hw_rf_claim(ah);
- if (ecode != 0)
- return ecode;
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ ecode = ar9002_hw_rf_claim(ah);
+ if (ecode != 0)
+ return ecode;
+ }
ecode = ath9k_hw_eeprom_init(ah);
if (ecode != 0)
@@ -516,14 +486,12 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
ah->eep_ops->get_eeprom_ver(ah),
ah->eep_ops->get_eeprom_rev(ah));
- if (!AR_SREV_9280_10_OR_LATER(ah)) {
- ecode = ath9k_hw_rf_alloc_ext_banks(ah);
- if (ecode) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
- "Failed allocating banks for "
- "external radio\n");
- return ecode;
- }
+ ecode = ath9k_hw_rf_alloc_ext_banks(ah);
+ if (ecode) {
+ ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
+ "Failed allocating banks for "
+ "external radio\n");
+ return ecode;
}
if (!AR_SREV_9100(ah)) {
@@ -534,321 +502,22 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
return 0;
}
-static bool ath9k_hw_devid_supported(u16 devid)
+static void ath9k_hw_attach_ops(struct ath_hw *ah)
{
- switch (devid) {
- case AR5416_DEVID_PCI:
- case AR5416_DEVID_PCIE:
- case AR5416_AR9100_DEVID:
- case AR9160_DEVID_PCI:
- case AR9280_DEVID_PCI:
- case AR9280_DEVID_PCIE:
- case AR9285_DEVID_PCIE:
- case AR5416_DEVID_AR9287_PCI:
- case AR5416_DEVID_AR9287_PCIE:
- case AR9271_USB:
- case AR2427_DEVID_PCIE:
- return true;
- default:
- break;
- }
- return false;
-}
-
-static bool ath9k_hw_macversion_supported(u32 macversion)
-{
- switch (macversion) {
- case AR_SREV_VERSION_5416_PCI:
- case AR_SREV_VERSION_5416_PCIE:
- case AR_SREV_VERSION_9160:
- case AR_SREV_VERSION_9100:
- case AR_SREV_VERSION_9280:
- case AR_SREV_VERSION_9285:
- case AR_SREV_VERSION_9287:
- case AR_SREV_VERSION_9271:
- return true;
- default:
- break;
- }
- return false;
-}
-
-static void ath9k_hw_init_cal_settings(struct ath_hw *ah)
-{
- if (AR_SREV_9160_10_OR_LATER(ah)) {
- if (AR_SREV_9280_10_OR_LATER(ah)) {
- ah->iq_caldata.calData = &iq_cal_single_sample;
- ah->adcgain_caldata.calData =
- &adc_gain_cal_single_sample;
- ah->adcdc_caldata.calData =
- &adc_dc_cal_single_sample;
- ah->adcdc_calinitdata.calData =
- &adc_init_dc_cal;
- } else {
- ah->iq_caldata.calData = &iq_cal_multi_sample;
- ah->adcgain_caldata.calData =
- &adc_gain_cal_multi_sample;
- ah->adcdc_caldata.calData =
- &adc_dc_cal_multi_sample;
- ah->adcdc_calinitdata.calData =
- &adc_init_dc_cal;
- }
- ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
- }
-}
-
-static void ath9k_hw_init_mode_regs(struct ath_hw *ah)
-{
- if (AR_SREV_9271(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271,
- ARRAY_SIZE(ar9271Modes_9271), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271,
- ARRAY_SIZE(ar9271Common_9271), 2);
- INIT_INI_ARRAY(&ah->iniModes_9271_1_0_only,
- ar9271Modes_9271_1_0_only,
- ARRAY_SIZE(ar9271Modes_9271_1_0_only), 6);
- return;
- }
-
- if (AR_SREV_9287_11_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1,
- ARRAY_SIZE(ar9287Modes_9287_1_1), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_1,
- ARRAY_SIZE(ar9287Common_9287_1_1), 2);
- if (ah->config.pcie_clock_req)
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9287PciePhy_clkreq_off_L1_9287_1_1,
- ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_1), 2);
- else
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9287PciePhy_clkreq_always_on_L1_9287_1_1,
- ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_1),
- 2);
- } else if (AR_SREV_9287_10_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_0,
- ARRAY_SIZE(ar9287Modes_9287_1_0), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_0,
- ARRAY_SIZE(ar9287Common_9287_1_0), 2);
-
- if (ah->config.pcie_clock_req)
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9287PciePhy_clkreq_off_L1_9287_1_0,
- ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_0), 2);
- else
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9287PciePhy_clkreq_always_on_L1_9287_1_0,
- ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_0),
- 2);
- } else if (AR_SREV_9285_12_OR_LATER(ah)) {
-
-
- INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285_1_2,
- ARRAY_SIZE(ar9285Modes_9285_1_2), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285_1_2,
- ARRAY_SIZE(ar9285Common_9285_1_2), 2);
-
- if (ah->config.pcie_clock_req) {
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9285PciePhy_clkreq_off_L1_9285_1_2,
- ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285_1_2), 2);
- } else {
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9285PciePhy_clkreq_always_on_L1_9285_1_2,
- ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285_1_2),
- 2);
- }
- } else if (AR_SREV_9285_10_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285,
- ARRAY_SIZE(ar9285Modes_9285), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285,
- ARRAY_SIZE(ar9285Common_9285), 2);
-
- if (ah->config.pcie_clock_req) {
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9285PciePhy_clkreq_off_L1_9285,
- ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285), 2);
- } else {
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9285PciePhy_clkreq_always_on_L1_9285,
- ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285), 2);
- }
- } else if (AR_SREV_9280_20_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280_2,
- ARRAY_SIZE(ar9280Modes_9280_2), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280_2,
- ARRAY_SIZE(ar9280Common_9280_2), 2);
-
- if (ah->config.pcie_clock_req) {
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9280PciePhy_clkreq_off_L1_9280,
- ARRAY_SIZE(ar9280PciePhy_clkreq_off_L1_9280),2);
- } else {
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9280PciePhy_clkreq_always_on_L1_9280,
- ARRAY_SIZE(ar9280PciePhy_clkreq_always_on_L1_9280), 2);
- }
- INIT_INI_ARRAY(&ah->iniModesAdditional,
- ar9280Modes_fast_clock_9280_2,
- ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), 3);
- } else if (AR_SREV_9280_10_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280,
- ARRAY_SIZE(ar9280Modes_9280), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280,
- ARRAY_SIZE(ar9280Common_9280), 2);
- } else if (AR_SREV_9160_10_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9160,
- ARRAY_SIZE(ar5416Modes_9160), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9160,
- ARRAY_SIZE(ar5416Common_9160), 2);
- INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9160,
- ARRAY_SIZE(ar5416Bank0_9160), 2);
- INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9160,
- ARRAY_SIZE(ar5416BB_RfGain_9160), 3);
- INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9160,
- ARRAY_SIZE(ar5416Bank1_9160), 2);
- INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9160,
- ARRAY_SIZE(ar5416Bank2_9160), 2);
- INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9160,
- ARRAY_SIZE(ar5416Bank3_9160), 3);
- INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9160,
- ARRAY_SIZE(ar5416Bank6_9160), 3);
- INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9160,
- ARRAY_SIZE(ar5416Bank6TPC_9160), 3);
- INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9160,
- ARRAY_SIZE(ar5416Bank7_9160), 2);
- if (AR_SREV_9160_11(ah)) {
- INIT_INI_ARRAY(&ah->iniAddac,
- ar5416Addac_91601_1,
- ARRAY_SIZE(ar5416Addac_91601_1), 2);
- } else {
- INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9160,
- ARRAY_SIZE(ar5416Addac_9160), 2);
- }
- } else if (AR_SREV_9100_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9100,
- ARRAY_SIZE(ar5416Modes_9100), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9100,
- ARRAY_SIZE(ar5416Common_9100), 2);
- INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9100,
- ARRAY_SIZE(ar5416Bank0_9100), 2);
- INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9100,
- ARRAY_SIZE(ar5416BB_RfGain_9100), 3);
- INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9100,
- ARRAY_SIZE(ar5416Bank1_9100), 2);
- INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9100,
- ARRAY_SIZE(ar5416Bank2_9100), 2);
- INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9100,
- ARRAY_SIZE(ar5416Bank3_9100), 3);
- INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9100,
- ARRAY_SIZE(ar5416Bank6_9100), 3);
- INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9100,
- ARRAY_SIZE(ar5416Bank6TPC_9100), 3);
- INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9100,
- ARRAY_SIZE(ar5416Bank7_9100), 2);
- INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9100,
- ARRAY_SIZE(ar5416Addac_9100), 2);
- } else {
- INIT_INI_ARRAY(&ah->iniModes, ar5416Modes,
- ARRAY_SIZE(ar5416Modes), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar5416Common,
- ARRAY_SIZE(ar5416Common), 2);
- INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0,
- ARRAY_SIZE(ar5416Bank0), 2);
- INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain,
- ARRAY_SIZE(ar5416BB_RfGain), 3);
- INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1,
- ARRAY_SIZE(ar5416Bank1), 2);
- INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2,
- ARRAY_SIZE(ar5416Bank2), 2);
- INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3,
- ARRAY_SIZE(ar5416Bank3), 3);
- INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6,
- ARRAY_SIZE(ar5416Bank6), 3);
- INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC,
- ARRAY_SIZE(ar5416Bank6TPC), 3);
- INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7,
- ARRAY_SIZE(ar5416Bank7), 2);
- INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac,
- ARRAY_SIZE(ar5416Addac), 2);
- }
-}
-
-static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
-{
- if (AR_SREV_9287_11_OR_LATER(ah))
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9287Modes_rx_gain_9287_1_1,
- ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_1), 6);
- else if (AR_SREV_9287_10(ah))
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9287Modes_rx_gain_9287_1_0,
- ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_0), 6);
- else if (AR_SREV_9280_20(ah))
- ath9k_hw_init_rxgain_ini(ah);
-
- if (AR_SREV_9287_11_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9287Modes_tx_gain_9287_1_1,
- ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 6);
- } else if (AR_SREV_9287_10(ah)) {
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9287Modes_tx_gain_9287_1_0,
- ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_0), 6);
- } else if (AR_SREV_9280_20(ah)) {
- ath9k_hw_init_txgain_ini(ah);
- } else if (AR_SREV_9285_12_OR_LATER(ah)) {
- u32 txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
-
- /* txgain table */
- if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) {
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9285Modes_high_power_tx_gain_9285_1_2,
- ARRAY_SIZE(ar9285Modes_high_power_tx_gain_9285_1_2), 6);
- } else {
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9285Modes_original_tx_gain_9285_1_2,
- ARRAY_SIZE(ar9285Modes_original_tx_gain_9285_1_2), 6);
- }
-
- }
-}
-
-static void ath9k_hw_init_eeprom_fix(struct ath_hw *ah)
-{
- u32 i, j;
-
- if (ah->hw_version.devid == AR9280_DEVID_PCI) {
-
- /* EEPROM Fixup */
- for (i = 0; i < ah->iniModes.ia_rows; i++) {
- u32 reg = INI_RA(&ah->iniModes, i, 0);
-
- for (j = 1; j < ah->iniModes.ia_columns; j++) {
- u32 val = INI_RA(&ah->iniModes, i, j);
-
- INI_RA(&ah->iniModes, i, j) =
- ath9k_hw_ini_fixup(ah,
- &ah->eeprom.def,
- reg, val);
- }
- }
- }
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ar9003_hw_attach_ops(ah);
+ else
+ ar9002_hw_attach_ops(ah);
}
-int ath9k_hw_init(struct ath_hw *ah)
+/* Called for all hardware families */
+static int __ath9k_hw_init(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
int r = 0;
- if (!ath9k_hw_devid_supported(ah->hw_version.devid)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unsupported device ID: 0x%0x\n",
- ah->hw_version.devid);
- return -EOPNOTSUPP;
- }
-
- ath9k_hw_init_defaults(ah);
- ath9k_hw_init_config(ah);
+ if (ah->hw_version.devid == AR5416_AR9100_DEVID)
+ ah->hw_version.macVersion = AR_SREV_VERSION_9100;
if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
ath_print(common, ATH_DBG_FATAL,
@@ -856,6 +525,11 @@ int ath9k_hw_init(struct ath_hw *ah)
return -EIO;
}
+ ath9k_hw_init_defaults(ah);
+ ath9k_hw_init_config(ah);
+
+ ath9k_hw_attach_ops(ah);
+
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) {
ath_print(common, ATH_DBG_FATAL, "Couldn't wakeup chip\n");
return -EIO;
@@ -880,7 +554,7 @@ int ath9k_hw_init(struct ath_hw *ah)
else
ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD;
- if (!ath9k_hw_macversion_supported(ah->hw_version.macVersion)) {
+ if (!ath9k_hw_macversion_supported(ah)) {
ath_print(common, ATH_DBG_FATAL,
"Mac Chip Rev 0x%02x.%x is not supported by "
"this driver\n", ah->hw_version.macVersion,
@@ -888,45 +562,45 @@ int ath9k_hw_init(struct ath_hw *ah)
return -EOPNOTSUPP;
}
- if (AR_SREV_9100(ah)) {
- ah->iq_caldata.calData = &iq_cal_multi_sample;
- ah->supp_cals = IQ_MISMATCH_CAL;
- ah->is_pciexpress = false;
- }
-
- if (AR_SREV_9271(ah))
+ if (AR_SREV_9271(ah) || AR_SREV_9100(ah))
ah->is_pciexpress = false;
ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID);
-
ath9k_hw_init_cal_settings(ah);
ah->ani_function = ATH9K_ANI_ALL;
- if (AR_SREV_9280_10_OR_LATER(ah)) {
+ if (AR_SREV_9280_10_OR_LATER(ah) && !AR_SREV_9300_20_OR_LATER(ah))
ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL;
- ah->ath9k_hw_rf_set_freq = &ath9k_hw_ar9280_set_channel;
- ah->ath9k_hw_spur_mitigate_freq = &ath9k_hw_9280_spur_mitigate;
- } else {
- ah->ath9k_hw_rf_set_freq = &ath9k_hw_set_channel;
- ah->ath9k_hw_spur_mitigate_freq = &ath9k_hw_spur_mitigate;
- }
ath9k_hw_init_mode_regs(ah);
+ /*
+ * Configire PCIE after Ini init. SERDES values now come from ini file
+ * This enables PCIe low power mode.
+ */
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ u32 regval;
+ unsigned int i;
+
+ /* Set Bits 16 and 17 in the AR_WA register. */
+ regval = REG_READ(ah, AR_WA);
+ regval |= 0x00030000;
+ REG_WRITE(ah, AR_WA, regval);
+
+ for (i = 0; i < ah->iniPcieSerdesLowPower.ia_rows; i++) {
+ REG_WRITE(ah,
+ INI_RA(&ah->iniPcieSerdesLowPower, i, 0),
+ INI_RA(&ah->iniPcieSerdesLowPower, i, 1));
+ }
+ }
+
if (ah->is_pciexpress)
ath9k_hw_configpcipowersave(ah, 0, 0);
else
ath9k_hw_disablepcie(ah);
- /* Support for Japan ch.14 (2484) spread */
- if (AR_SREV_9287_11_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniCckfirNormal,
- ar9287Common_normal_cck_fir_coeff_92871_1,
- ARRAY_SIZE(ar9287Common_normal_cck_fir_coeff_92871_1), 2);
- INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
- ar9287Common_japan_2484_cck_fir_coeff_92871_1,
- ARRAY_SIZE(ar9287Common_japan_2484_cck_fir_coeff_92871_1), 2);
- }
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ ar9002_hw_cck_chan14_spread(ah);
r = ath9k_hw_post_init(ah);
if (r)
@@ -937,8 +611,6 @@ int ath9k_hw_init(struct ath_hw *ah)
if (r)
return r;
- ath9k_hw_init_eeprom_fix(ah);
-
r = ath9k_hw_init_macaddr(ah);
if (r) {
ath_print(common, ATH_DBG_FATAL,
@@ -951,6 +623,9 @@ int ath9k_hw_init(struct ath_hw *ah)
else
ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S);
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ar9003_hw_set_nf_limits(ah);
+
ath9k_init_nfcal_hist_buffer(ah);
common->state = ATH_HW_INITIALIZED;
@@ -958,24 +633,50 @@ int ath9k_hw_init(struct ath_hw *ah)
return 0;
}
-static void ath9k_hw_init_bb(struct ath_hw *ah,
- struct ath9k_channel *chan)
+int ath9k_hw_init(struct ath_hw *ah)
{
- u32 synthDelay;
+ int ret;
+ struct ath_common *common = ath9k_hw_common(ah);
- synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
- if (IS_CHAN_B(chan))
- synthDelay = (4 * synthDelay) / 22;
- else
- synthDelay /= 10;
+ /* These are all the AR5008/AR9001/AR9002 hardware family of chipsets */
+ switch (ah->hw_version.devid) {
+ case AR5416_DEVID_PCI:
+ case AR5416_DEVID_PCIE:
+ case AR5416_AR9100_DEVID:
+ case AR9160_DEVID_PCI:
+ case AR9280_DEVID_PCI:
+ case AR9280_DEVID_PCIE:
+ case AR9285_DEVID_PCIE:
+ case AR9287_DEVID_PCI:
+ case AR9287_DEVID_PCIE:
+ case AR2427_DEVID_PCIE:
+ case AR9300_DEVID_PCIE:
+ break;
+ default:
+ if (common->bus_ops->ath_bus_type == ATH_USB)
+ break;
+ ath_print(common, ATH_DBG_FATAL,
+ "Hardware device ID 0x%04x not supported\n",
+ ah->hw_version.devid);
+ return -EOPNOTSUPP;
+ }
- REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+ ret = __ath9k_hw_init(ah);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to initialize hardware; "
+ "initialization status: %d\n", ret);
+ return ret;
+ }
- udelay(synthDelay + BASE_ACTIVATE_DELAY);
+ return 0;
}
+EXPORT_SYMBOL(ath9k_hw_init);
static void ath9k_hw_init_qos(struct ath_hw *ah)
{
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa);
REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210);
@@ -989,105 +690,22 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
REG_WRITE(ah, AR_TXOP_4_7, 0xFFFFFFFF);
REG_WRITE(ah, AR_TXOP_8_11, 0xFFFFFFFF);
REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF);
-}
-
-static void ath9k_hw_change_target_baud(struct ath_hw *ah, u32 freq, u32 baud)
-{
- u32 lcr;
- u32 baud_divider = freq * 1000 * 1000 / 16 / baud;
-
- lcr = REG_READ(ah , 0x5100c);
- lcr |= 0x80;
- REG_WRITE(ah, 0x5100c, lcr);
- REG_WRITE(ah, 0x51004, (baud_divider >> 8));
- REG_WRITE(ah, 0x51000, (baud_divider & 0xff));
-
- lcr &= ~0x80;
- REG_WRITE(ah, 0x5100c, lcr);
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
static void ath9k_hw_init_pll(struct ath_hw *ah,
struct ath9k_channel *chan)
{
- u32 pll;
-
- if (AR_SREV_9100(ah)) {
- if (chan && IS_CHAN_5GHZ(chan))
- pll = 0x1450;
- else
- pll = 0x1458;
- } else {
- if (AR_SREV_9280_10_OR_LATER(ah)) {
- pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
-
- if (chan && IS_CHAN_HALF_RATE(chan))
- pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
- else if (chan && IS_CHAN_QUARTER_RATE(chan))
- pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
-
- if (chan && IS_CHAN_5GHZ(chan)) {
- pll |= SM(0x28, AR_RTC_9160_PLL_DIV);
-
-
- if (AR_SREV_9280_20(ah)) {
- if (((chan->channel % 20) == 0)
- || ((chan->channel % 10) == 0))
- pll = 0x2850;
- else
- pll = 0x142c;
- }
- } else {
- pll |= SM(0x2c, AR_RTC_9160_PLL_DIV);
- }
-
- } else if (AR_SREV_9160_10_OR_LATER(ah)) {
-
- pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
-
- if (chan && IS_CHAN_HALF_RATE(chan))
- pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
- else if (chan && IS_CHAN_QUARTER_RATE(chan))
- pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
-
- if (chan && IS_CHAN_5GHZ(chan))
- pll |= SM(0x50, AR_RTC_9160_PLL_DIV);
- else
- pll |= SM(0x58, AR_RTC_9160_PLL_DIV);
- } else {
- pll = AR_RTC_PLL_REFDIV_5 | AR_RTC_PLL_DIV2;
-
- if (chan && IS_CHAN_HALF_RATE(chan))
- pll |= SM(0x1, AR_RTC_PLL_CLKSEL);
- else if (chan && IS_CHAN_QUARTER_RATE(chan))
- pll |= SM(0x2, AR_RTC_PLL_CLKSEL);
+ u32 pll = ath9k_hw_compute_pll_control(ah, chan);
- if (chan && IS_CHAN_5GHZ(chan))
- pll |= SM(0xa, AR_RTC_PLL_DIV);
- else
- pll |= SM(0xb, AR_RTC_PLL_DIV);
- }
- }
REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
/* Switch the core clock for ar9271 to 117Mhz */
if (AR_SREV_9271(ah)) {
- if ((pll == 0x142c) || (pll == 0x2850) ) {
- udelay(500);
- /* set CLKOBS to output AHB clock */
- REG_WRITE(ah, 0x7020, 0xe);
- /*
- * 0x304: 117Mhz, ahb_ratio: 1x1
- * 0x306: 40Mhz, ahb_ratio: 1x1
- */
- REG_WRITE(ah, 0x50040, 0x304);
- /*
- * makes adjustments for the baud dividor to keep the
- * targetted baud rate based on the used core clock.
- */
- ath9k_hw_change_target_baud(ah, AR9271_CORE_CLOCK,
- AR9271_TARGET_BAUD_RATE);
- }
+ udelay(500);
+ REG_WRITE(ah, 0x50040, 0x304);
}
udelay(RTC_PLL_SETTLE_DELAY);
@@ -1095,70 +713,58 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
}
-static void ath9k_hw_init_chain_masks(struct ath_hw *ah)
-{
- int rx_chainmask, tx_chainmask;
-
- rx_chainmask = ah->rxchainmask;
- tx_chainmask = ah->txchainmask;
-
- switch (rx_chainmask) {
- case 0x5:
- REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
- AR_PHY_SWAP_ALT_CHAIN);
- case 0x3:
- if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) {
- REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
- REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
- break;
- }
- case 0x1:
- case 0x2:
- case 0x7:
- REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
- REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
- break;
- default:
- break;
- }
-
- REG_WRITE(ah, AR_SELFGEN_MASK, tx_chainmask);
- if (tx_chainmask == 0x5) {
- REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
- AR_PHY_SWAP_ALT_CHAIN);
- }
- if (AR_SREV_9100(ah))
- REG_WRITE(ah, AR_PHY_ANALOG_SWAP,
- REG_READ(ah, AR_PHY_ANALOG_SWAP) | 0x00000001);
-}
-
static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
enum nl80211_iftype opmode)
{
- ah->mask_reg = AR_IMR_TXERR |
+ u32 imr_reg = AR_IMR_TXERR |
AR_IMR_TXURN |
AR_IMR_RXERR |
AR_IMR_RXORN |
AR_IMR_BCNMISC;
- if (ah->config.rx_intr_mitigation)
- ah->mask_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
- else
- ah->mask_reg |= AR_IMR_RXOK;
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ imr_reg |= AR_IMR_RXOK_HP;
+ if (ah->config.rx_intr_mitigation)
+ imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
+ else
+ imr_reg |= AR_IMR_RXOK_LP;
- ah->mask_reg |= AR_IMR_TXOK;
+ } else {
+ if (ah->config.rx_intr_mitigation)
+ imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
+ else
+ imr_reg |= AR_IMR_RXOK;
+ }
+
+ if (ah->config.tx_intr_mitigation)
+ imr_reg |= AR_IMR_TXINTM | AR_IMR_TXMINTR;
+ else
+ imr_reg |= AR_IMR_TXOK;
if (opmode == NL80211_IFTYPE_AP)
- ah->mask_reg |= AR_IMR_MIB;
+ imr_reg |= AR_IMR_MIB;
+
+ ENABLE_REGWRITE_BUFFER(ah);
- REG_WRITE(ah, AR_IMR, ah->mask_reg);
- REG_WRITE(ah, AR_IMR_S2, REG_READ(ah, AR_IMR_S2) | AR_IMR_S2_GTT);
+ REG_WRITE(ah, AR_IMR, imr_reg);
+ ah->imrs2_reg |= AR_IMR_S2_GTT;
+ REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
if (!AR_SREV_9100(ah)) {
REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF);
REG_WRITE(ah, AR_INTR_SYNC_ENABLE, AR_INTR_SYNC_DEFAULT);
REG_WRITE(ah, AR_INTR_SYNC_MASK, 0);
}
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, 0);
+ REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK, 0);
+ REG_WRITE(ah, AR_INTR_PRIO_SYNC_ENABLE, 0);
+ REG_WRITE(ah, AR_INTR_PRIO_SYNC_MASK, 0);
+ }
}
static void ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
@@ -1241,19 +847,13 @@ void ath9k_hw_deinit(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- if (common->state <= ATH_HW_INITIALIZED)
+ if (common->state < ATH_HW_INITIALIZED)
goto free_hw;
- if (!AR_SREV_9100(ah))
- ath9k_hw_ani_disable(ah);
-
ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
free_hw:
- if (!AR_SREV_9280_10_OR_LATER(ah))
- ath9k_hw_rf_free_ext_banks(ah);
- kfree(ah);
- ah = NULL;
+ ath9k_hw_rf_free_ext_banks(ah);
}
EXPORT_SYMBOL(ath9k_hw_deinit);
@@ -1261,136 +861,7 @@ EXPORT_SYMBOL(ath9k_hw_deinit);
/* INI */
/*******/
-static void ath9k_hw_override_ini(struct ath_hw *ah,
- struct ath9k_channel *chan)
-{
- u32 val;
-
- if (AR_SREV_9271(ah)) {
- /*
- * Enable spectral scan to solution for issues with stuck
- * beacons on AR9271 1.0. The beacon stuck issue is not seeon on
- * AR9271 1.1
- */
- if (AR_SREV_9271_10(ah)) {
- val = REG_READ(ah, AR_PHY_SPECTRAL_SCAN) |
- AR_PHY_SPECTRAL_SCAN_ENABLE;
- REG_WRITE(ah, AR_PHY_SPECTRAL_SCAN, val);
- }
- else if (AR_SREV_9271_11(ah))
- /*
- * change AR_PHY_RF_CTL3 setting to fix MAC issue
- * present on AR9271 1.1
- */
- REG_WRITE(ah, AR_PHY_RF_CTL3, 0x3a020001);
- return;
- }
-
- /*
- * Set the RX_ABORT and RX_DIS and clear if off only after
- * RXE is set for MAC. This prevents frames with corrupted
- * descriptor status.
- */
- REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
-
- if (AR_SREV_9280_10_OR_LATER(ah)) {
- val = REG_READ(ah, AR_PCU_MISC_MODE2) &
- (~AR_PCU_MISC_MODE2_HWWAR1);
-
- if (AR_SREV_9287_10_OR_LATER(ah))
- val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
-
- REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
- }
-
- if (!AR_SREV_5416_20_OR_LATER(ah) ||
- AR_SREV_9280_10_OR_LATER(ah))
- return;
- /*
- * Disable BB clock gating
- * Necessary to avoid issues on AR5416 2.0
- */
- REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
-
- /*
- * Disable RIFS search on some chips to avoid baseband
- * hang issues.
- */
- if (AR_SREV_9100(ah) || AR_SREV_9160(ah)) {
- val = REG_READ(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS);
- val &= ~AR_PHY_RIFS_INIT_DELAY;
- REG_WRITE(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS, val);
- }
-}
-
-static u32 ath9k_hw_def_ini_fixup(struct ath_hw *ah,
- struct ar5416_eeprom_def *pEepData,
- u32 reg, u32 value)
-{
- struct base_eep_header *pBase = &(pEepData->baseEepHeader);
- struct ath_common *common = ath9k_hw_common(ah);
-
- switch (ah->hw_version.devid) {
- case AR9280_DEVID_PCI:
- if (reg == 0x7894) {
- ath_print(common, ATH_DBG_EEPROM,
- "ini VAL: %x EEPROM: %x\n", value,
- (pBase->version & 0xff));
-
- if ((pBase->version & 0xff) > 0x0a) {
- ath_print(common, ATH_DBG_EEPROM,
- "PWDCLKIND: %d\n",
- pBase->pwdclkind);
- value &= ~AR_AN_TOP2_PWDCLKIND;
- value |= AR_AN_TOP2_PWDCLKIND &
- (pBase->pwdclkind << AR_AN_TOP2_PWDCLKIND_S);
- } else {
- ath_print(common, ATH_DBG_EEPROM,
- "PWDCLKIND Earlier Rev\n");
- }
-
- ath_print(common, ATH_DBG_EEPROM,
- "final ini VAL: %x\n", value);
- }
- break;
- }
-
- return value;
-}
-
-static u32 ath9k_hw_ini_fixup(struct ath_hw *ah,
- struct ar5416_eeprom_def *pEepData,
- u32 reg, u32 value)
-{
- if (ah->eep_map == EEP_MAP_4KBITS)
- return value;
- else
- return ath9k_hw_def_ini_fixup(ah, pEepData, reg, value);
-}
-
-static void ath9k_olc_init(struct ath_hw *ah)
-{
- u32 i;
-
- if (OLC_FOR_AR9287_10_LATER) {
- REG_SET_BIT(ah, AR_PHY_TX_PWRCTRL9,
- AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL);
- ath9k_hw_analog_shift_rmw(ah, AR9287_AN_TXPC0,
- AR9287_AN_TXPC0_TXPCMODE,
- AR9287_AN_TXPC0_TXPCMODE_S,
- AR9287_AN_TXPC0_TXPCMODE_TEMPSENSE);
- udelay(100);
- } else {
- for (i = 0; i < AR9280_TX_GAIN_TABLE_SIZE; i++)
- ah->originalGain[i] =
- MS(REG_READ(ah, AR_PHY_TX_GAIN_TBL1 + i * 4),
- AR_PHY_TX_GAIN);
- ah->PDADCdelta = 0;
- }
-}
-
-static u32 ath9k_regd_get_ctl(struct ath_regulatory *reg,
- struct ath9k_channel *chan)
+u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan)
{
u32 ctl = ath_regd_get_band_ctl(reg, chan->chan->band);
@@ -1404,173 +875,24 @@ static u32 ath9k_regd_get_ctl(struct ath_regulatory *reg,
return ctl;
}
-static int ath9k_hw_process_ini(struct ath_hw *ah,
- struct ath9k_channel *chan)
-{
- struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
- int i, regWrites = 0;
- struct ieee80211_channel *channel = chan->chan;
- u32 modesIndex, freqIndex;
-
- switch (chan->chanmode) {
- case CHANNEL_A:
- case CHANNEL_A_HT20:
- modesIndex = 1;
- freqIndex = 1;
- break;
- case CHANNEL_A_HT40PLUS:
- case CHANNEL_A_HT40MINUS:
- modesIndex = 2;
- freqIndex = 1;
- break;
- case CHANNEL_G:
- case CHANNEL_G_HT20:
- case CHANNEL_B:
- modesIndex = 4;
- freqIndex = 2;
- break;
- case CHANNEL_G_HT40PLUS:
- case CHANNEL_G_HT40MINUS:
- modesIndex = 3;
- freqIndex = 2;
- break;
-
- default:
- return -EINVAL;
- }
-
- REG_WRITE(ah, AR_PHY(0), 0x00000007);
- REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO);
- ah->eep_ops->set_addac(ah, chan);
-
- if (AR_SREV_5416_22_OR_LATER(ah)) {
- REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites);
- } else {
- struct ar5416IniArray temp;
- u32 addacSize =
- sizeof(u32) * ah->iniAddac.ia_rows *
- ah->iniAddac.ia_columns;
-
- memcpy(ah->addac5416_21,
- ah->iniAddac.ia_array, addacSize);
-
- (ah->addac5416_21)[31 * ah->iniAddac.ia_columns + 1] = 0;
-
- temp.ia_array = ah->addac5416_21;
- temp.ia_columns = ah->iniAddac.ia_columns;
- temp.ia_rows = ah->iniAddac.ia_rows;
- REG_WRITE_ARRAY(&temp, 1, regWrites);
- }
-
- REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC);
-
- for (i = 0; i < ah->iniModes.ia_rows; i++) {
- u32 reg = INI_RA(&ah->iniModes, i, 0);
- u32 val = INI_RA(&ah->iniModes, i, modesIndex);
-
- REG_WRITE(ah, reg, val);
-
- if (reg >= 0x7800 && reg < 0x78a0
- && ah->config.analog_shiftreg) {
- udelay(100);
- }
-
- DO_DELAY(regWrites);
- }
-
- if (AR_SREV_9280(ah) || AR_SREV_9287_10_OR_LATER(ah))
- REG_WRITE_ARRAY(&ah->iniModesRxGain, modesIndex, regWrites);
-
- if (AR_SREV_9280(ah) || AR_SREV_9285_12_OR_LATER(ah) ||
- AR_SREV_9287_10_OR_LATER(ah))
- REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
-
- for (i = 0; i < ah->iniCommon.ia_rows; i++) {
- u32 reg = INI_RA(&ah->iniCommon, i, 0);
- u32 val = INI_RA(&ah->iniCommon, i, 1);
-
- REG_WRITE(ah, reg, val);
-
- if (reg >= 0x7800 && reg < 0x78a0
- && ah->config.analog_shiftreg) {
- udelay(100);
- }
-
- DO_DELAY(regWrites);
- }
-
- ath9k_hw_write_regs(ah, freqIndex, regWrites);
-
- if (AR_SREV_9271_10(ah))
- REG_WRITE_ARRAY(&ah->iniModes_9271_1_0_only,
- modesIndex, regWrites);
-
- if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan)) {
- REG_WRITE_ARRAY(&ah->iniModesAdditional, modesIndex,
- regWrites);
- }
-
- ath9k_hw_override_ini(ah, chan);
- ath9k_hw_set_regs(ah, chan);
- ath9k_hw_init_chain_masks(ah);
-
- if (OLC_FOR_AR9280_20_LATER)
- ath9k_olc_init(ah);
-
- ah->eep_ops->set_txpower(ah, chan,
- ath9k_regd_get_ctl(regulatory, chan),
- channel->max_antenna_gain * 2,
- channel->max_power * 2,
- min((u32) MAX_RATE_POWER,
- (u32) regulatory->power_limit));
-
- if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
- "ar5416SetRfRegs failed\n");
- return -EIO;
- }
-
- return 0;
-}
-
/****************************************/
/* Reset and Channel Switching Routines */
/****************************************/
-static void ath9k_hw_set_rfmode(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- u32 rfMode = 0;
-
- if (chan == NULL)
- return;
-
- rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
- ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
-
- if (!AR_SREV_9280_10_OR_LATER(ah))
- rfMode |= (IS_CHAN_5GHZ(chan)) ?
- AR_PHY_MODE_RF5GHZ : AR_PHY_MODE_RF2GHZ;
-
- if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan))
- rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
-
- REG_WRITE(ah, AR_PHY_MODE, rfMode);
-}
-
-static void ath9k_hw_mark_phy_inactive(struct ath_hw *ah)
-{
- REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
-}
-
static inline void ath9k_hw_set_dma(struct ath_hw *ah)
{
+ struct ath_common *common = ath9k_hw_common(ah);
u32 regval;
+ ENABLE_REGWRITE_BUFFER(ah);
+
/*
* set AHB_MODE not to do cacheline prefetches
*/
- regval = REG_READ(ah, AR_AHB_MODE);
- REG_WRITE(ah, AR_AHB_MODE, regval | AR_AHB_PREFETCH_RD_EN);
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ regval = REG_READ(ah, AR_AHB_MODE);
+ REG_WRITE(ah, AR_AHB_MODE, regval | AR_AHB_PREFETCH_RD_EN);
+ }
/*
* let mac dma reads be in 128 byte chunks
@@ -1578,12 +900,18 @@ static inline void ath9k_hw_set_dma(struct ath_hw *ah)
regval = REG_READ(ah, AR_TXCFG) & ~AR_TXCFG_DMASZ_MASK;
REG_WRITE(ah, AR_TXCFG, regval | AR_TXCFG_DMASZ_128B);
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
/*
* Restore TX Trigger Level to its pre-reset value.
* The initial value depends on whether aggregation is enabled, and is
* adjusted whenever underruns are detected.
*/
- REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->tx_trig_level);
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->tx_trig_level);
+
+ ENABLE_REGWRITE_BUFFER(ah);
/*
* let mac dma writes be in 128 byte chunks
@@ -1596,6 +924,14 @@ static inline void ath9k_hw_set_dma(struct ath_hw *ah)
*/
REG_WRITE(ah, AR_RXFIFO_CFG, 0x200);
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ REG_RMW_FIELD(ah, AR_RXBP_THRESH, AR_RXBP_THRESH_HP, 0x1);
+ REG_RMW_FIELD(ah, AR_RXBP_THRESH, AR_RXBP_THRESH_LP, 0x1);
+
+ ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
+ ah->caps.rx_status_len);
+ }
+
/*
* reduce the number of usable entries in PCU TXBUF to avoid
* wrap around issues.
@@ -1611,6 +947,12 @@ static inline void ath9k_hw_set_dma(struct ath_hw *ah)
REG_WRITE(ah, AR_PCU_TXBUF_CTRL,
AR_PCU_TXBUF_CTRL_USABLE_SIZE);
}
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ath9k_hw_reset_txstatus_ring(ah);
}
static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
@@ -1638,10 +980,8 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
}
}
-static inline void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah,
- u32 coef_scaled,
- u32 *coef_mantissa,
- u32 *coef_exponent)
+void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
+ u32 *coef_mantissa, u32 *coef_exponent)
{
u32 coef_exp, coef_man;
@@ -1657,40 +997,6 @@ static inline void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah,
*coef_exponent = coef_exp - 16;
}
-static void ath9k_hw_set_delta_slope(struct ath_hw *ah,
- struct ath9k_channel *chan)
-{
- u32 coef_scaled, ds_coef_exp, ds_coef_man;
- u32 clockMhzScaled = 0x64000000;
- struct chan_centers centers;
-
- if (IS_CHAN_HALF_RATE(chan))
- clockMhzScaled = clockMhzScaled >> 1;
- else if (IS_CHAN_QUARTER_RATE(chan))
- clockMhzScaled = clockMhzScaled >> 2;
-
- ath9k_hw_get_channel_centers(ah, chan, &centers);
- coef_scaled = clockMhzScaled / centers.synth_center;
-
- ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
- &ds_coef_exp);
-
- REG_RMW_FIELD(ah, AR_PHY_TIMING3,
- AR_PHY_TIMING3_DSC_MAN, ds_coef_man);
- REG_RMW_FIELD(ah, AR_PHY_TIMING3,
- AR_PHY_TIMING3_DSC_EXP, ds_coef_exp);
-
- coef_scaled = (9 * coef_scaled) / 10;
-
- ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
- &ds_coef_exp);
-
- REG_RMW_FIELD(ah, AR_PHY_HALFGI,
- AR_PHY_HALFGI_DSC_MAN, ds_coef_man);
- REG_RMW_FIELD(ah, AR_PHY_HALFGI,
- AR_PHY_HALFGI_DSC_EXP, ds_coef_exp);
-}
-
static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
{
u32 rst_flags;
@@ -1704,6 +1010,8 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
(void)REG_READ(ah, AR_RTC_DERIVED_CLK);
}
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
AR_RTC_FORCE_WAKE_ON_INT);
@@ -1715,11 +1023,16 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
if (tmpReg &
(AR_INTR_SYNC_LOCAL_TIMEOUT |
AR_INTR_SYNC_RADM_CPL_TIMEOUT)) {
+ u32 val;
REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
- REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
- } else {
+
+ val = AR_RC_HOSTIF;
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ val |= AR_RC_AHB;
+ REG_WRITE(ah, AR_RC, val);
+
+ } else if (!AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_RC, AR_RC_AHB);
- }
rst_flags = AR_RTC_RC_MAC_WARM;
if (type == ATH9K_RESET_COLD)
@@ -1727,6 +1040,10 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
}
REG_WRITE(ah, AR_RTC_RC, rst_flags);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
udelay(50);
REG_WRITE(ah, AR_RTC_RC, 0);
@@ -1747,16 +1064,23 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
{
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
AR_RTC_FORCE_WAKE_ON_INT);
- if (!AR_SREV_9100(ah))
+ if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_RC, AR_RC_AHB);
REG_WRITE(ah, AR_RTC_RESET, 0);
- udelay(2);
- if (!AR_SREV_9100(ah))
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ udelay(2);
+
+ if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_RC, 0);
REG_WRITE(ah, AR_RTC_RESET, 1);
@@ -1792,34 +1116,6 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
}
}
-static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- u32 phymode;
- u32 enableDacFifo = 0;
-
- if (AR_SREV_9285_10_OR_LATER(ah))
- enableDacFifo = (REG_READ(ah, AR_PHY_TURBO) &
- AR_PHY_FC_ENABLE_DAC_FIFO);
-
- phymode = AR_PHY_FC_HT_EN | AR_PHY_FC_SHORT_GI_40
- | AR_PHY_FC_SINGLE_HT_LTF1 | AR_PHY_FC_WALSH | enableDacFifo;
-
- if (IS_CHAN_HT40(chan)) {
- phymode |= AR_PHY_FC_DYN2040_EN;
-
- if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
- (chan->chanmode == CHANNEL_G_HT40PLUS))
- phymode |= AR_PHY_FC_DYN2040_PRI_CH;
-
- }
- REG_WRITE(ah, AR_PHY_TURBO, phymode);
-
- ath9k_hw_set11nmac2040(ah);
-
- REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
- REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
-}
-
static bool ath9k_hw_chip_reset(struct ath_hw *ah,
struct ath9k_channel *chan)
{
@@ -1845,7 +1141,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_channel *channel = chan->chan;
- u32 synthDelay, qnum;
+ u32 qnum;
int r;
for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
@@ -1857,17 +1153,15 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
}
}
- REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
- if (!ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
- AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT)) {
+ if (!ath9k_hw_rfbus_req(ah)) {
ath_print(common, ATH_DBG_FATAL,
"Could not kill baseband RX\n");
return false;
}
- ath9k_hw_set_regs(ah, chan);
+ ath9k_hw_set_channel_regs(ah, chan);
- r = ah->ath9k_hw_rf_set_freq(ah, chan);
+ r = ath9k_hw_rf_set_freq(ah, chan);
if (r) {
ath_print(common, ATH_DBG_FATAL,
"Failed to set channel\n");
@@ -1881,20 +1175,12 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
min((u32) MAX_RATE_POWER,
(u32) regulatory->power_limit));
- synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
- if (IS_CHAN_B(chan))
- synthDelay = (4 * synthDelay) / 22;
- else
- synthDelay /= 10;
-
- udelay(synthDelay + BASE_ACTIVATE_DELAY);
-
- REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
+ ath9k_hw_rfbus_done(ah);
if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
ath9k_hw_set_delta_slope(ah, chan);
- ah->ath9k_hw_spur_mitigate_freq(ah, chan);
+ ath9k_hw_spur_mitigate_freq(ah, chan);
if (!chan->oneTimeCalsDone)
chan->oneTimeCalsDone = true;
@@ -1902,17 +1188,33 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
return true;
}
-static void ath9k_enable_rfkill(struct ath_hw *ah)
+bool ath9k_hw_check_alive(struct ath_hw *ah)
{
- REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
- AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
+ int count = 50;
+ u32 reg;
+
+ if (AR_SREV_9285_10_OR_LATER(ah))
+ return true;
+
+ do {
+ reg = REG_READ(ah, AR_OBS_BUS_1);
- REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
- AR_GPIO_INPUT_MUX2_RFSILENT);
+ if ((reg & 0x7E7FFFEF) == 0x00702400)
+ continue;
- ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
- REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
+ switch (reg & 0x7E000B00) {
+ case 0x1E000000:
+ case 0x52000B00:
+ case 0x18000B00:
+ continue;
+ default:
+ return true;
+ }
+ } while (count-- > 0);
+
+ return false;
}
+EXPORT_SYMBOL(ath9k_hw_check_alive);
int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
bool bChannelChange)
@@ -1923,11 +1225,18 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
u32 saveDefAntenna;
u32 macStaId1;
u64 tsf = 0;
- int i, rx_chainmask, r;
+ int i, r;
ah->txchainmask = common->tx_chainmask;
ah->rxchainmask = common->rx_chainmask;
+ if (!ah->chip_fullsleep) {
+ ath9k_hw_abortpcurecv(ah);
+ if (!ath9k_hw_stopdmarecv(ah))
+ ath_print(common, ATH_DBG_XMIT,
+ "Failed to stop receive dma\n");
+ }
+
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
return -EIO;
@@ -1940,8 +1249,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
(chan->channel != ah->curchan->channel) &&
((chan->channelFlags & CHANNEL_ALL) ==
(ah->curchan->channelFlags & CHANNEL_ALL)) &&
- !(AR_SREV_9280(ah) || IS_CHAN_A_5MHZ_SPACED(chan) ||
- IS_CHAN_A_5MHZ_SPACED(ah->curchan))) {
+ !AR_SREV_9280(ah)) {
if (ath9k_hw_channel_change(ah, chan)) {
ath9k_hw_loadnf(ah, ah->curchan);
@@ -1966,6 +1274,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_mark_phy_inactive(ah);
+ /* Only required on the first reset */
if (AR_SREV_9271(ah) && ah->htc_reset_init) {
REG_WRITE(ah,
AR9271_RESET_POWER_DOWN_CONTROL,
@@ -1978,6 +1287,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
return -EINVAL;
}
+ /* Only required on the first reset */
if (AR_SREV_9271(ah) && ah->htc_reset_init) {
ah->htc_reset_init = false;
REG_WRITE(ah,
@@ -1993,16 +1303,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (AR_SREV_9280_10_OR_LATER(ah))
REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
- if (AR_SREV_9287_12_OR_LATER(ah)) {
- /* Enable ASYNC FIFO */
- REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
- AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL);
- REG_SET_BIT(ah, AR_PHY_MODE, AR_PHY_MODE_ASYNCFIFO);
- REG_CLR_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
- AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
- REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
- AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
- }
r = ath9k_hw_process_ini(ah, chan);
if (r)
return r;
@@ -2027,9 +1327,13 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
ath9k_hw_set_delta_slope(ah, chan);
- ah->ath9k_hw_spur_mitigate_freq(ah, chan);
+ ath9k_hw_spur_mitigate_freq(ah, chan);
ah->eep_ops->set_board_values(ah, chan);
+ ath9k_hw_set_operating_mode(ah, ah->opmode);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr));
REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(common->macaddr + 4)
| macStaId1
@@ -2037,25 +1341,27 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
| (ah->config.
ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0)
| ah->sta_id1_defaults);
- ath9k_hw_set_operating_mode(ah, ah->opmode);
-
ath_hw_setbssidmask(common);
-
REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna);
-
ath9k_hw_write_associd(ah);
-
REG_WRITE(ah, AR_ISR, ~0);
-
REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
- r = ah->ath9k_hw_rf_set_freq(ah, chan);
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ r = ath9k_hw_rf_set_freq(ah, chan);
if (r)
return r;
+ ENABLE_REGWRITE_BUFFER(ah);
+
for (i = 0; i < AR_NUM_DCU; i++)
REG_WRITE(ah, AR_DQCUMASK(i), 1 << i);
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
ah->intr_txqs = 0;
for (i = 0; i < ah->caps.total_queues; i++)
ath9k_hw_resettxqueue(ah, i);
@@ -2068,25 +1374,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_init_global_settings(ah);
- if (AR_SREV_9287_12_OR_LATER(ah)) {
- REG_WRITE(ah, AR_D_GBL_IFS_SIFS,
- AR_D_GBL_IFS_SIFS_ASYNC_FIFO_DUR);
- REG_WRITE(ah, AR_D_GBL_IFS_SLOT,
- AR_D_GBL_IFS_SLOT_ASYNC_FIFO_DUR);
- REG_WRITE(ah, AR_D_GBL_IFS_EIFS,
- AR_D_GBL_IFS_EIFS_ASYNC_FIFO_DUR);
-
- REG_WRITE(ah, AR_TIME_OUT, AR_TIME_OUT_ACK_CTS_ASYNC_FIFO_DUR);
- REG_WRITE(ah, AR_USEC, AR_USEC_ASYNC_FIFO_DUR);
-
- REG_SET_BIT(ah, AR_MAC_PCU_LOGIC_ANALYZER,
- AR_MAC_PCU_LOGIC_ANALYZER_DISBUG20768);
- REG_RMW_FIELD(ah, AR_AHB_MODE, AR_AHB_CUSTOM_BURST_EN,
- AR_AHB_CUSTOM_BURST_ASYNC_FIFO_VAL);
- }
- if (AR_SREV_9287_12_OR_LATER(ah)) {
- REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
- AR_PCU_MISC_MODE2_ENABLE_AGGWEP);
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ ar9002_hw_enable_async_fifo(ah);
+ ar9002_hw_enable_wep_aggregation(ah);
}
REG_WRITE(ah, AR_STA_ID1,
@@ -2101,19 +1391,24 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000);
}
+ if (ah->config.tx_intr_mitigation) {
+ REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_LAST, 300);
+ REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_FIRST, 750);
+ }
+
ath9k_hw_init_bb(ah, chan);
if (!ath9k_hw_init_cal(ah, chan))
return -EIO;
- rx_chainmask = ah->rxchainmask;
- if ((rx_chainmask == 0x5) || (rx_chainmask == 0x3)) {
- REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
- REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
- }
+ ENABLE_REGWRITE_BUFFER(ah);
+ ath9k_hw_restore_chainmask(ah);
REG_WRITE(ah, AR_CFG_LED, saveLedState | AR_CFG_SCLK_32KHZ);
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
/*
* For big endian systems turn on swapping for descriptors
*/
@@ -2143,6 +1438,11 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (ah->btcoex_hw.enabled)
ath9k_hw_btcoex_enable(ah);
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ ath9k_hw_loadnf(ah, curchan);
+ ath9k_hw_start_nfcal(ah);
+ }
+
return 0;
}
EXPORT_SYMBOL(ath9k_hw_reset);
@@ -2429,21 +1729,35 @@ EXPORT_SYMBOL(ath9k_hw_keyisvalid);
/* Power Management (Chipset) */
/******************************/
+/*
+ * Notify Power Mgt is disabled in self-generated frames.
+ * If requested, force chip to sleep.
+ */
static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
{
REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
if (setChip) {
+ /*
+ * Clear the RTC force wake bit to allow the
+ * mac to go to sleep.
+ */
REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_EN);
- if (!AR_SREV_9100(ah))
+ if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
- if(!AR_SREV_5416(ah))
+ /* Shutdown chip. Active low */
+ if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah))
REG_CLR_BIT(ah, (AR_RTC_RESET),
AR_RTC_RESET_EN);
}
}
+/*
+ * Notify Power Management is enabled in self-generating
+ * frames. If request, set power mode of chip to
+ * auto/normal. Duration in units of 128us (1/8 TU).
+ */
static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip)
{
REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
@@ -2451,9 +1765,14 @@ static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip)
struct ath9k_hw_capabilities *pCap = &ah->caps;
if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ /* Set WakeOnInterrupt bit; clear ForceWake bit */
REG_WRITE(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_ON_INT);
} else {
+ /*
+ * Clear the RTC force wake bit to allow the
+ * mac to go to sleep.
+ */
REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_EN);
}
@@ -2472,7 +1791,8 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
ATH9K_RESET_POWER_ON) != true) {
return false;
}
- ath9k_hw_init_pll(ah, NULL);
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ ath9k_hw_init_pll(ah, NULL);
}
if (AR_SREV_9100(ah))
REG_SET_BIT(ah, AR_RTC_RESET,
@@ -2542,424 +1862,6 @@ bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
}
EXPORT_SYMBOL(ath9k_hw_setpower);
-/*
- * Helper for ASPM support.
- *
- * Disable PLL when in L0s as well as receiver clock when in L1.
- * This power saving option must be enabled through the SerDes.
- *
- * Programming the SerDes must go through the same 288 bit serial shift
- * register as the other analog registers. Hence the 9 writes.
- */
-void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off)
-{
- u8 i;
- u32 val;
-
- if (ah->is_pciexpress != true)
- return;
-
- /* Do not touch SerDes registers */
- if (ah->config.pcie_powersave_enable == 2)
- return;
-
- /* Nothing to do on restore for 11N */
- if (!restore) {
- if (AR_SREV_9280_20_OR_LATER(ah)) {
- /*
- * AR9280 2.0 or later chips use SerDes values from the
- * initvals.h initialized depending on chipset during
- * ath9k_hw_init()
- */
- for (i = 0; i < ah->iniPcieSerdes.ia_rows; i++) {
- REG_WRITE(ah, INI_RA(&ah->iniPcieSerdes, i, 0),
- INI_RA(&ah->iniPcieSerdes, i, 1));
- }
- } else if (AR_SREV_9280(ah) &&
- (ah->hw_version.macRev == AR_SREV_REVISION_9280_10)) {
- REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
-
- /* RX shut off when elecidle is asserted */
- REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820);
- REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560);
-
- /* Shut off CLKREQ active in L1 */
- if (ah->config.pcie_clock_req)
- REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc);
- else
- REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd);
-
- REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
- REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007);
-
- /* Load the new settings */
- REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
-
- } else {
- REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
-
- /* RX shut off when elecidle is asserted */
- REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824);
- REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579);
-
- /*
- * Ignore ah->ah_config.pcie_clock_req setting for
- * pre-AR9280 11n
- */
- REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff);
-
- REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
- REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007);
-
- /* Load the new settings */
- REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
- }
-
- udelay(1000);
-
- /* set bit 19 to allow forcing of pcie core into L1 state */
- REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
-
- /* Several PCIe massages to ensure proper behaviour */
- if (ah->config.pcie_waen) {
- val = ah->config.pcie_waen;
- if (!power_off)
- val &= (~AR_WA_D3_L1_DISABLE);
- } else {
- if (AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
- AR_SREV_9287(ah)) {
- val = AR9285_WA_DEFAULT;
- if (!power_off)
- val &= (~AR_WA_D3_L1_DISABLE);
- } else if (AR_SREV_9280(ah)) {
- /*
- * On AR9280 chips bit 22 of 0x4004 needs to be
- * set otherwise card may disappear.
- */
- val = AR9280_WA_DEFAULT;
- if (!power_off)
- val &= (~AR_WA_D3_L1_DISABLE);
- } else
- val = AR_WA_DEFAULT;
- }
-
- REG_WRITE(ah, AR_WA, val);
- }
-
- if (power_off) {
- /*
- * Set PCIe workaround bits
- * bit 14 in WA register (disable L1) should only
- * be set when device enters D3 and be cleared
- * when device comes back to D0.
- */
- if (ah->config.pcie_waen) {
- if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
- REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
- } else {
- if (((AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
- AR_SREV_9287(ah)) &&
- (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) ||
- (AR_SREV_9280(ah) &&
- (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) {
- REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
- }
- }
- }
-}
-EXPORT_SYMBOL(ath9k_hw_configpcipowersave);
-
-/**********************/
-/* Interrupt Handling */
-/**********************/
-
-bool ath9k_hw_intrpend(struct ath_hw *ah)
-{
- u32 host_isr;
-
- if (AR_SREV_9100(ah))
- return true;
-
- host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
- if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
- return true;
-
- host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
- if ((host_isr & AR_INTR_SYNC_DEFAULT)
- && (host_isr != AR_INTR_SPURIOUS))
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(ath9k_hw_intrpend);
-
-bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
-{
- u32 isr = 0;
- u32 mask2 = 0;
- struct ath9k_hw_capabilities *pCap = &ah->caps;
- u32 sync_cause = 0;
- bool fatal_int = false;
- struct ath_common *common = ath9k_hw_common(ah);
-
- if (!AR_SREV_9100(ah)) {
- if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
- if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
- == AR_RTC_STATUS_ON) {
- isr = REG_READ(ah, AR_ISR);
- }
- }
-
- sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) &
- AR_INTR_SYNC_DEFAULT;
-
- *masked = 0;
-
- if (!isr && !sync_cause)
- return false;
- } else {
- *masked = 0;
- isr = REG_READ(ah, AR_ISR);
- }
-
- if (isr) {
- if (isr & AR_ISR_BCNMISC) {
- u32 isr2;
- isr2 = REG_READ(ah, AR_ISR_S2);
- if (isr2 & AR_ISR_S2_TIM)
- mask2 |= ATH9K_INT_TIM;
- if (isr2 & AR_ISR_S2_DTIM)
- mask2 |= ATH9K_INT_DTIM;
- if (isr2 & AR_ISR_S2_DTIMSYNC)
- mask2 |= ATH9K_INT_DTIMSYNC;
- if (isr2 & (AR_ISR_S2_CABEND))
- mask2 |= ATH9K_INT_CABEND;
- if (isr2 & AR_ISR_S2_GTT)
- mask2 |= ATH9K_INT_GTT;
- if (isr2 & AR_ISR_S2_CST)
- mask2 |= ATH9K_INT_CST;
- if (isr2 & AR_ISR_S2_TSFOOR)
- mask2 |= ATH9K_INT_TSFOOR;
- }
-
- isr = REG_READ(ah, AR_ISR_RAC);
- if (isr == 0xffffffff) {
- *masked = 0;
- return false;
- }
-
- *masked = isr & ATH9K_INT_COMMON;
-
- if (ah->config.rx_intr_mitigation) {
- if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
- *masked |= ATH9K_INT_RX;
- }
-
- if (isr & (AR_ISR_RXOK | AR_ISR_RXERR))
- *masked |= ATH9K_INT_RX;
- if (isr &
- (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR |
- AR_ISR_TXEOL)) {
- u32 s0_s, s1_s;
-
- *masked |= ATH9K_INT_TX;
-
- s0_s = REG_READ(ah, AR_ISR_S0_S);
- ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
- ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
-
- s1_s = REG_READ(ah, AR_ISR_S1_S);
- ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
- ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
- }
-
- if (isr & AR_ISR_RXORN) {
- ath_print(common, ATH_DBG_INTERRUPT,
- "receive FIFO overrun interrupt\n");
- }
-
- if (!AR_SREV_9100(ah)) {
- if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
- u32 isr5 = REG_READ(ah, AR_ISR_S5_S);
- if (isr5 & AR_ISR_S5_TIM_TIMER)
- *masked |= ATH9K_INT_TIM_TIMER;
- }
- }
-
- *masked |= mask2;
- }
-
- if (AR_SREV_9100(ah))
- return true;
-
- if (isr & AR_ISR_GENTMR) {
- u32 s5_s;
-
- s5_s = REG_READ(ah, AR_ISR_S5_S);
- if (isr & AR_ISR_GENTMR) {
- ah->intr_gen_timer_trigger =
- MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
-
- ah->intr_gen_timer_thresh =
- MS(s5_s, AR_ISR_S5_GENTIMER_THRESH);
-
- if (ah->intr_gen_timer_trigger)
- *masked |= ATH9K_INT_GENTIMER;
-
- }
- }
-
- if (sync_cause) {
- fatal_int =
- (sync_cause &
- (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
- ? true : false;
-
- if (fatal_int) {
- if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
- ath_print(common, ATH_DBG_ANY,
- "received PCI FATAL interrupt\n");
- }
- if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
- ath_print(common, ATH_DBG_ANY,
- "received PCI PERR interrupt\n");
- }
- *masked |= ATH9K_INT_FATAL;
- }
- if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
- ath_print(common, ATH_DBG_INTERRUPT,
- "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n");
- REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
- REG_WRITE(ah, AR_RC, 0);
- *masked |= ATH9K_INT_FATAL;
- }
- if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
- ath_print(common, ATH_DBG_INTERRUPT,
- "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
- }
-
- REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
- (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
- }
-
- return true;
-}
-EXPORT_SYMBOL(ath9k_hw_getisr);
-
-enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
-{
- u32 omask = ah->mask_reg;
- u32 mask, mask2;
- struct ath9k_hw_capabilities *pCap = &ah->caps;
- struct ath_common *common = ath9k_hw_common(ah);
-
- ath_print(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
-
- if (omask & ATH9K_INT_GLOBAL) {
- ath_print(common, ATH_DBG_INTERRUPT, "disable IER\n");
- REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
- (void) REG_READ(ah, AR_IER);
- if (!AR_SREV_9100(ah)) {
- REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
- (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
-
- REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
- (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
- }
- }
-
- mask = ints & ATH9K_INT_COMMON;
- mask2 = 0;
-
- if (ints & ATH9K_INT_TX) {
- if (ah->txok_interrupt_mask)
- mask |= AR_IMR_TXOK;
- if (ah->txdesc_interrupt_mask)
- mask |= AR_IMR_TXDESC;
- if (ah->txerr_interrupt_mask)
- mask |= AR_IMR_TXERR;
- if (ah->txeol_interrupt_mask)
- mask |= AR_IMR_TXEOL;
- }
- if (ints & ATH9K_INT_RX) {
- mask |= AR_IMR_RXERR;
- if (ah->config.rx_intr_mitigation)
- mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
- else
- mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
- if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
- mask |= AR_IMR_GENTMR;
- }
-
- if (ints & (ATH9K_INT_BMISC)) {
- mask |= AR_IMR_BCNMISC;
- if (ints & ATH9K_INT_TIM)
- mask2 |= AR_IMR_S2_TIM;
- if (ints & ATH9K_INT_DTIM)
- mask2 |= AR_IMR_S2_DTIM;
- if (ints & ATH9K_INT_DTIMSYNC)
- mask2 |= AR_IMR_S2_DTIMSYNC;
- if (ints & ATH9K_INT_CABEND)
- mask2 |= AR_IMR_S2_CABEND;
- if (ints & ATH9K_INT_TSFOOR)
- mask2 |= AR_IMR_S2_TSFOOR;
- }
-
- if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
- mask |= AR_IMR_BCNMISC;
- if (ints & ATH9K_INT_GTT)
- mask2 |= AR_IMR_S2_GTT;
- if (ints & ATH9K_INT_CST)
- mask2 |= AR_IMR_S2_CST;
- }
-
- ath_print(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
- REG_WRITE(ah, AR_IMR, mask);
- mask = REG_READ(ah, AR_IMR_S2) & ~(AR_IMR_S2_TIM |
- AR_IMR_S2_DTIM |
- AR_IMR_S2_DTIMSYNC |
- AR_IMR_S2_CABEND |
- AR_IMR_S2_CABTO |
- AR_IMR_S2_TSFOOR |
- AR_IMR_S2_GTT | AR_IMR_S2_CST);
- REG_WRITE(ah, AR_IMR_S2, mask | mask2);
- ah->mask_reg = ints;
-
- if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
- if (ints & ATH9K_INT_TIM_TIMER)
- REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
- else
- REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
- }
-
- if (ints & ATH9K_INT_GLOBAL) {
- ath_print(common, ATH_DBG_INTERRUPT, "enable IER\n");
- REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
- if (!AR_SREV_9100(ah)) {
- REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
- AR_INTR_MAC_IRQ);
- REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
-
-
- REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
- AR_INTR_SYNC_DEFAULT);
- REG_WRITE(ah, AR_INTR_SYNC_MASK,
- AR_INTR_SYNC_DEFAULT);
- }
- ath_print(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
- REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
- }
-
- return omask;
-}
-EXPORT_SYMBOL(ath9k_hw_set_interrupts);
-
/*******************/
/* Beacon Handling */
/*******************/
@@ -2970,6 +1872,8 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
ah->beacon_interval = beacon_period;
+ ENABLE_REGWRITE_BUFFER(ah);
+
switch (ah->opmode) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_MONITOR:
@@ -3013,6 +1917,9 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
REG_WRITE(ah, AR_SWBA_PERIOD, TU_TO_USEC(beacon_period));
REG_WRITE(ah, AR_NDP_PERIOD, TU_TO_USEC(beacon_period));
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
beacon_period &= ~ATH9K_BEACON_ENA;
if (beacon_period & ATH9K_BEACON_RESET_TSF) {
ath9k_hw_reset_tsf(ah);
@@ -3029,6 +1936,8 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath_common *common = ath9k_hw_common(ah);
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt));
REG_WRITE(ah, AR_BEACON_PERIOD,
@@ -3036,6 +1945,9 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
REG_WRITE(ah, AR_DMA_BEACON_PERIOD,
TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD));
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
REG_RMW_FIELD(ah, AR_RSSI_THR,
AR_RSSI_THR_BM_THR, bs->bs_bmissthreshold);
@@ -3058,6 +1970,8 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
ath_print(common, ATH_DBG_BEACON, "beacon period %d\n", beaconintval);
ath_print(common, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod);
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_NEXT_DTIM,
TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP));
REG_WRITE(ah, AR_NEXT_TIM, TU_TO_USEC(nextTbtt - SLEEP_SLOP));
@@ -3077,6 +1991,9 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
REG_WRITE(ah, AR_TIM_PERIOD, TU_TO_USEC(beaconintval));
REG_WRITE(ah, AR_DTIM_PERIOD, TU_TO_USEC(dtimperiod));
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
REG_SET_BIT(ah, AR_TIMER_MODE,
AR_TBTT_TIMER_EN | AR_TIM_TIMER_EN |
AR_DTIM_TIMER_EN);
@@ -3219,7 +2136,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
else
pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD;
- if (AR_SREV_9285_10_OR_LATER(ah))
+ if (AR_SREV_9271(ah))
+ pCap->num_gpio_pins = AR9271_NUM_GPIO;
+ else if (AR_SREV_9285_10_OR_LATER(ah))
pCap->num_gpio_pins = AR9285_NUM_GPIO;
else if (AR_SREV_9280_10_OR_LATER(ah))
pCap->num_gpio_pins = AR928X_NUM_GPIO;
@@ -3246,8 +2165,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT;
}
#endif
-
- pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
+ if (AR_SREV_9271(ah))
+ pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP;
+ else
+ pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
if (AR_SREV_9280(ah) || AR_SREV_9285(ah))
pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS;
@@ -3291,6 +2212,26 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
btcoex_hw->scheme = ATH_BTCOEX_CFG_NONE;
}
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_LDPC |
+ ATH9K_HW_CAP_FASTCLOCK;
+ pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH;
+ pCap->rx_lp_qdepth = ATH9K_HW_RX_LP_QDEPTH;
+ pCap->rx_status_len = sizeof(struct ar9003_rxs);
+ pCap->tx_desc_len = sizeof(struct ar9003_txc);
+ pCap->txs_len = sizeof(struct ar9003_txs);
+ } else {
+ pCap->tx_desc_len = sizeof(struct ath_desc);
+ if (AR_SREV_9280_20(ah) &&
+ ((ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) <=
+ AR5416_EEP_MINOR_VER_16) ||
+ ah->eep_ops->get_eeprom(ah, EEP_FSTCLK_5G)))
+ pCap->hw_caps |= ATH9K_HW_CAP_FASTCLOCK;
+ }
+
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ pCap->hw_caps |= ATH9K_HW_CAP_RAC_SUPPORTED;
+
return 0;
}
@@ -3323,10 +2264,6 @@ bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
case ATH9K_CAP_TKIP_SPLIT:
return (ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA) ?
false : true;
- case ATH9K_CAP_DIVERSITY:
- return (REG_READ(ah, AR_PHY_CCK_DETECT) &
- AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV) ?
- true : false;
case ATH9K_CAP_MCAST_KEYSRCH:
switch (capability) {
case 0:
@@ -3369,8 +2306,6 @@ EXPORT_SYMBOL(ath9k_hw_getcapability);
bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
u32 capability, u32 setting, int *status)
{
- u32 v;
-
switch (type) {
case ATH9K_CAP_TKIP_MIC:
if (setting)
@@ -3380,14 +2315,6 @@ bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
ah->sta_id1_defaults &=
~AR_STA_ID1_CRPT_MIC_ENABLE;
return true;
- case ATH9K_CAP_DIVERSITY:
- v = REG_READ(ah, AR_PHY_CCK_DETECT);
- if (setting)
- v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
- else
- v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
- REG_WRITE(ah, AR_PHY_CCK_DETECT, v);
- return true;
case ATH9K_CAP_MCAST_KEYSRCH:
if (setting)
ah->sta_id1_defaults |= AR_STA_ID1_MCAST_KSRCH;
@@ -3455,7 +2382,11 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
if (gpio >= ah->caps.num_gpio_pins)
return 0xffffffff;
- if (AR_SREV_9287_10_OR_LATER(ah))
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ return MS_REG_READ(AR9300, gpio) != 0;
+ else if (AR_SREV_9271(ah))
+ return MS_REG_READ(AR9271, gpio) != 0;
+ else if (AR_SREV_9287_10_OR_LATER(ah))
return MS_REG_READ(AR9287, gpio) != 0;
else if (AR_SREV_9285_10_OR_LATER(ah))
return MS_REG_READ(AR9285, gpio) != 0;
@@ -3484,6 +2415,9 @@ EXPORT_SYMBOL(ath9k_hw_cfg_output);
void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
{
+ if (AR_SREV_9271(ah))
+ val = ~val;
+
REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
AR_GPIO_BIT(gpio));
}
@@ -3523,6 +2457,8 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
{
u32 phybits;
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_RX_FILTER, bits);
phybits = 0;
@@ -3538,6 +2474,9 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
else
REG_WRITE(ah, AR_RXCFG,
REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
EXPORT_SYMBOL(ath9k_hw_setrxfilter);
@@ -3610,14 +2549,25 @@ void ath9k_hw_write_associd(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_write_associd);
+#define ATH9K_MAX_TSF_READ 10
+
u64 ath9k_hw_gettsf64(struct ath_hw *ah)
{
- u64 tsf;
+ u32 tsf_lower, tsf_upper1, tsf_upper2;
+ int i;
+
+ tsf_upper1 = REG_READ(ah, AR_TSF_U32);
+ for (i = 0; i < ATH9K_MAX_TSF_READ; i++) {
+ tsf_lower = REG_READ(ah, AR_TSF_L32);
+ tsf_upper2 = REG_READ(ah, AR_TSF_U32);
+ if (tsf_upper2 == tsf_upper1)
+ break;
+ tsf_upper1 = tsf_upper2;
+ }
- tsf = REG_READ(ah, AR_TSF_U32);
- tsf = (tsf << 32) | REG_READ(ah, AR_TSF_L32);
+ WARN_ON( i == ATH9K_MAX_TSF_READ );
- return tsf;
+ return (((u64)tsf_upper1 << 32) | tsf_lower);
}
EXPORT_SYMBOL(ath9k_hw_gettsf64);
@@ -3868,6 +2818,16 @@ void ath_gen_timer_isr(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath_gen_timer_isr);
+/********/
+/* HTC */
+/********/
+
+void ath9k_hw_htc_resetinit(struct ath_hw *ah)
+{
+ ah->htc_reset_init = true;
+}
+EXPORT_SYMBOL(ath9k_hw_htc_resetinit);
+
static struct {
u32 version;
const char * name;
@@ -3882,6 +2842,7 @@ static struct {
{ AR_SREV_VERSION_9285, "9285" },
{ AR_SREV_VERSION_9287, "9287" },
{ AR_SREV_VERSION_9271, "9271" },
+ { AR_SREV_VERSION_9300, "9300" },
};
/* For devices with external radios */
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index dbbf7ca5f97..77245dff599 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008-2009 Atheros Communications Inc.
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -41,18 +41,16 @@
#define AR9280_DEVID_PCIE 0x002a
#define AR9285_DEVID_PCIE 0x002b
#define AR2427_DEVID_PCIE 0x002c
+#define AR9287_DEVID_PCI 0x002d
+#define AR9287_DEVID_PCIE 0x002e
+#define AR9300_DEVID_PCIE 0x0030
#define AR5416_AR9100_DEVID 0x000b
-#define AR9271_USB 0x9271
-
#define AR_SUBVENDOR_ID_NOG 0x0e11
#define AR_SUBVENDOR_ID_NEW_A 0x7065
#define AR5416_MAGIC 0x19641014
-#define AR5416_DEVID_AR9287_PCI 0x002D
-#define AR5416_DEVID_AR9287_PCIE 0x002E
-
#define AR9280_COEX2WIRE_SUBSYSID 0x309b
#define AT9285_COEX3WIRE_SA_SUBSYSID 0x30aa
#define AT9285_COEX3WIRE_DA_SUBSYSID 0x30ab
@@ -70,6 +68,24 @@
#define REG_READ(_ah, _reg) \
ath9k_hw_common(_ah)->ops->read((_ah), (_reg))
+#define ENABLE_REGWRITE_BUFFER(_ah) \
+ do { \
+ if (AR_SREV_9271(_ah)) \
+ ath9k_hw_common(_ah)->ops->enable_write_buffer((_ah)); \
+ } while (0)
+
+#define DISABLE_REGWRITE_BUFFER(_ah) \
+ do { \
+ if (AR_SREV_9271(_ah)) \
+ ath9k_hw_common(_ah)->ops->disable_write_buffer((_ah)); \
+ } while (0)
+
+#define REGWRITE_BUFFER_FLUSH(_ah) \
+ do { \
+ if (AR_SREV_9271(_ah)) \
+ ath9k_hw_common(_ah)->ops->write_flush((_ah)); \
+ } while (0)
+
#define SM(_v, _f) (((_v) << _f##_S) & _f)
#define MS(_v, _f) (((_v) & _f) >> _f##_S)
#define REG_RMW(_a, _r, _set, _clr) \
@@ -77,6 +93,8 @@
#define REG_RMW_FIELD(_a, _r, _f, _v) \
REG_WRITE(_a, _r, \
(REG_READ(_a, _r) & ~_f) | (((_v) << _f##_S) & _f))
+#define REG_READ_FIELD(_a, _r, _f) \
+ (((REG_READ(_a, _r) & _f) >> _f##_S))
#define REG_SET_BIT(_a, _r, _f) \
REG_WRITE(_a, _r, REG_READ(_a, _r) | _f)
#define REG_CLR_BIT(_a, _r, _f) \
@@ -137,6 +155,16 @@
#define TU_TO_USEC(_tu) ((_tu) << 10)
+#define ATH9K_HW_RX_HP_QDEPTH 16
+#define ATH9K_HW_RX_LP_QDEPTH 128
+
+enum ath_ini_subsys {
+ ATH_INI_PRE = 0,
+ ATH_INI_CORE,
+ ATH_INI_POST,
+ ATH_INI_NUM_SPLIT,
+};
+
enum wireless_mode {
ATH9K_MODE_11A = 0,
ATH9K_MODE_11G,
@@ -167,13 +195,16 @@ enum ath9k_hw_caps {
ATH9K_HW_CAP_ENHANCEDPM = BIT(14),
ATH9K_HW_CAP_AUTOSLEEP = BIT(15),
ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(16),
+ ATH9K_HW_CAP_EDMA = BIT(17),
+ ATH9K_HW_CAP_RAC_SUPPORTED = BIT(18),
+ ATH9K_HW_CAP_LDPC = BIT(19),
+ ATH9K_HW_CAP_FASTCLOCK = BIT(20),
};
enum ath9k_capability_type {
ATH9K_CAP_CIPHER = 0,
ATH9K_CAP_TKIP_MIC,
ATH9K_CAP_TKIP_SPLIT,
- ATH9K_CAP_DIVERSITY,
ATH9K_CAP_TXPOW,
ATH9K_CAP_MCAST_KEYSRCH,
ATH9K_CAP_DS
@@ -194,6 +225,11 @@ struct ath9k_hw_capabilities {
u8 num_gpio_pins;
u8 num_antcfg_2ghz;
u8 num_antcfg_5ghz;
+ u8 rx_hp_qdepth;
+ u8 rx_lp_qdepth;
+ u8 rx_status_len;
+ u8 tx_desc_len;
+ u8 txs_len;
};
struct ath9k_ops_config {
@@ -214,6 +250,7 @@ struct ath9k_ops_config {
u32 enable_ani;
int serialize_regmode;
bool rx_intr_mitigation;
+ bool tx_intr_mitigation;
#define SPUR_DISABLE 0
#define SPUR_ENABLE_IOCTL 1
#define SPUR_ENABLE_EEPROM 2
@@ -225,6 +262,7 @@ struct ath9k_ops_config {
#define AR_BASE_FREQ_5GHZ 4900
#define AR_SPUR_FEEQ_BOUND_HT40 19
#define AR_SPUR_FEEQ_BOUND_HT20 10
+ bool tx_iq_calibration; /* Only available for >= AR9003 */
int spurmode;
u16 spurchans[AR_EEPROM_MODAL_SPURS][2];
u8 max_txtrig_level;
@@ -233,6 +271,8 @@ struct ath9k_ops_config {
enum ath9k_int {
ATH9K_INT_RX = 0x00000001,
ATH9K_INT_RXDESC = 0x00000002,
+ ATH9K_INT_RXHP = 0x00000001,
+ ATH9K_INT_RXLP = 0x00000002,
ATH9K_INT_RXNOFRM = 0x00000008,
ATH9K_INT_RXEOL = 0x00000010,
ATH9K_INT_RXORN = 0x00000020,
@@ -329,10 +369,9 @@ struct ath9k_channel {
#define IS_CHAN_2GHZ(_c) (((_c)->channelFlags & CHANNEL_2GHZ) != 0)
#define IS_CHAN_HALF_RATE(_c) (((_c)->channelFlags & CHANNEL_HALF) != 0)
#define IS_CHAN_QUARTER_RATE(_c) (((_c)->channelFlags & CHANNEL_QUARTER) != 0)
-#define IS_CHAN_A_5MHZ_SPACED(_c) \
+#define IS_CHAN_A_FAST_CLOCK(_ah, _c) \
((((_c)->channelFlags & CHANNEL_5GHZ) != 0) && \
- (((_c)->channel % 20) != 0) && \
- (((_c)->channel % 10) != 0))
+ ((_ah)->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK))
/* These macros check chanmode and not channelFlags */
#define IS_CHAN_B(_c) ((_c)->chanmode == CHANNEL_B)
@@ -365,6 +404,12 @@ enum ser_reg_mode {
SER_REG_MODE_AUTO = 2,
};
+enum ath9k_rx_qtype {
+ ATH9K_RX_QUEUE_HP,
+ ATH9K_RX_QUEUE_LP,
+ ATH9K_RX_QUEUE_MAX,
+};
+
struct ath9k_beacon_state {
u32 bs_nexttbtt;
u32 bs_nextdtim;
@@ -442,6 +487,124 @@ struct ath_gen_timer_table {
} timer_mask;
};
+/**
+ * struct ath_hw_private_ops - callbacks used internally by hardware code
+ *
+ * This structure contains private callbacks designed to only be used internally
+ * by the hardware core.
+ *
+ * @init_cal_settings: setup types of calibrations supported
+ * @init_cal: starts actual calibration
+ *
+ * @init_mode_regs: Initializes mode registers
+ * @init_mode_gain_regs: Initialize TX/RX gain registers
+ * @macversion_supported: If this specific mac revision is supported
+ *
+ * @rf_set_freq: change frequency
+ * @spur_mitigate_freq: spur mitigation
+ * @rf_alloc_ext_banks:
+ * @rf_free_ext_banks:
+ * @set_rf_regs:
+ * @compute_pll_control: compute the PLL control value to use for
+ * AR_RTC_PLL_CONTROL for a given channel
+ * @setup_calibration: set up calibration
+ * @iscal_supported: used to query if a type of calibration is supported
+ * @loadnf: load noise floor read from each chain on the CCA registers
+ */
+struct ath_hw_private_ops {
+ /* Calibration ops */
+ void (*init_cal_settings)(struct ath_hw *ah);
+ bool (*init_cal)(struct ath_hw *ah, struct ath9k_channel *chan);
+
+ void (*init_mode_regs)(struct ath_hw *ah);
+ void (*init_mode_gain_regs)(struct ath_hw *ah);
+ bool (*macversion_supported)(u32 macversion);
+ void (*setup_calibration)(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal);
+ bool (*iscal_supported)(struct ath_hw *ah,
+ enum ath9k_cal_types calType);
+
+ /* PHY ops */
+ int (*rf_set_freq)(struct ath_hw *ah,
+ struct ath9k_channel *chan);
+ void (*spur_mitigate_freq)(struct ath_hw *ah,
+ struct ath9k_channel *chan);
+ int (*rf_alloc_ext_banks)(struct ath_hw *ah);
+ void (*rf_free_ext_banks)(struct ath_hw *ah);
+ bool (*set_rf_regs)(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u16 modesIndex);
+ void (*set_channel_regs)(struct ath_hw *ah, struct ath9k_channel *chan);
+ void (*init_bb)(struct ath_hw *ah,
+ struct ath9k_channel *chan);
+ int (*process_ini)(struct ath_hw *ah, struct ath9k_channel *chan);
+ void (*olc_init)(struct ath_hw *ah);
+ void (*set_rfmode)(struct ath_hw *ah, struct ath9k_channel *chan);
+ void (*mark_phy_inactive)(struct ath_hw *ah);
+ void (*set_delta_slope)(struct ath_hw *ah, struct ath9k_channel *chan);
+ bool (*rfbus_req)(struct ath_hw *ah);
+ void (*rfbus_done)(struct ath_hw *ah);
+ void (*enable_rfkill)(struct ath_hw *ah);
+ void (*restore_chainmask)(struct ath_hw *ah);
+ void (*set_diversity)(struct ath_hw *ah, bool value);
+ u32 (*compute_pll_control)(struct ath_hw *ah,
+ struct ath9k_channel *chan);
+ bool (*ani_control)(struct ath_hw *ah, enum ath9k_ani_cmd cmd,
+ int param);
+ void (*do_getnf)(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]);
+ void (*loadnf)(struct ath_hw *ah, struct ath9k_channel *chan);
+};
+
+/**
+ * struct ath_hw_ops - callbacks used by hardware code and driver code
+ *
+ * This structure contains callbacks designed to to be used internally by
+ * hardware code and also by the lower level driver.
+ *
+ * @config_pci_powersave:
+ * @calibrate: periodic calibration for NF, ANI, IQ, ADC gain, ADC-DC
+ */
+struct ath_hw_ops {
+ void (*config_pci_powersave)(struct ath_hw *ah,
+ int restore,
+ int power_off);
+ void (*rx_enable)(struct ath_hw *ah);
+ void (*set_desc_link)(void *ds, u32 link);
+ void (*get_desc_link)(void *ds, u32 **link);
+ bool (*calibrate)(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 rxchainmask,
+ bool longcal);
+ bool (*get_isr)(struct ath_hw *ah, enum ath9k_int *masked);
+ void (*fill_txdesc)(struct ath_hw *ah, void *ds, u32 seglen,
+ bool is_firstseg, bool is_is_lastseg,
+ const void *ds0, dma_addr_t buf_addr,
+ unsigned int qcu);
+ int (*proc_txdesc)(struct ath_hw *ah, void *ds,
+ struct ath_tx_status *ts);
+ void (*set11n_txdesc)(struct ath_hw *ah, void *ds,
+ u32 pktLen, enum ath9k_pkt_type type,
+ u32 txPower, u32 keyIx,
+ enum ath9k_key_type keyType,
+ u32 flags);
+ void (*set11n_ratescenario)(struct ath_hw *ah, void *ds,
+ void *lastds,
+ u32 durUpdateEn, u32 rtsctsRate,
+ u32 rtsctsDuration,
+ struct ath9k_11n_rate_series series[],
+ u32 nseries, u32 flags);
+ void (*set11n_aggr_first)(struct ath_hw *ah, void *ds,
+ u32 aggrLen);
+ void (*set11n_aggr_middle)(struct ath_hw *ah, void *ds,
+ u32 numDelims);
+ void (*set11n_aggr_last)(struct ath_hw *ah, void *ds);
+ void (*clr11n_aggr)(struct ath_hw *ah, void *ds);
+ void (*set11n_burstduration)(struct ath_hw *ah, void *ds,
+ u32 burstDuration);
+ void (*set11n_virtualmorefrag)(struct ath_hw *ah, void *ds,
+ u32 vmf);
+};
+
struct ath_hw {
struct ieee80211_hw *hw;
struct ath_common common;
@@ -455,13 +618,18 @@ struct ath_hw {
struct ar5416_eeprom_def def;
struct ar5416_eeprom_4k map4k;
struct ar9287_eeprom map9287;
+ struct ar9300_eeprom ar9300_eep;
} eeprom;
const struct eeprom_ops *eep_ops;
- enum ath9k_eep_map eep_map;
bool sw_mgmt_crypto;
bool is_pciexpress;
+ bool need_an_top2_fixup;
u16 tx_trig_level;
+ s16 nf_2g_max;
+ s16 nf_2g_min;
+ s16 nf_5g_max;
+ s16 nf_5g_min;
u16 rfsilent;
u32 rfkill_gpio;
u32 rfkill_polarity;
@@ -478,7 +646,8 @@ struct ath_hw {
struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES];
int16_t curchan_rad_index;
- u32 mask_reg;
+ enum ath9k_int imask;
+ u32 imrs2_reg;
u32 txok_interrupt_mask;
u32 txerr_interrupt_mask;
u32 txdesc_interrupt_mask;
@@ -493,6 +662,7 @@ struct ath_hw {
struct ath9k_cal_list adcgain_caldata;
struct ath9k_cal_list adcdc_calinitdata;
struct ath9k_cal_list adcdc_caldata;
+ struct ath9k_cal_list tempCompCalData;
struct ath9k_cal_list *cal_list;
struct ath9k_cal_list *cal_list_last;
struct ath9k_cal_list *cal_list_curr;
@@ -533,12 +703,10 @@ struct ath_hw {
DONT_USE_32KHZ,
} enable_32kHz_clock;
- /* Callback for radio frequency change */
- int (*ath9k_hw_rf_set_freq)(struct ath_hw *ah, struct ath9k_channel *chan);
-
- /* Callback for baseband spur frequency */
- void (*ath9k_hw_spur_mitigate_freq)(struct ath_hw *ah,
- struct ath9k_channel *chan);
+ /* Private to hardware code */
+ struct ath_hw_private_ops private_ops;
+ /* Accessed by the lower level driver */
+ struct ath_hw_ops ops;
/* Used to program the radio on non single-chip devices */
u32 *analogBank0Data;
@@ -551,6 +719,7 @@ struct ath_hw {
u32 *addac5416_21;
u32 *bank6Temp;
+ u8 txpower_limit;
int16_t txpower_indexoffset;
int coverage_class;
u32 beacon_interval;
@@ -592,16 +761,34 @@ struct ath_hw {
struct ar5416IniArray iniBank7;
struct ar5416IniArray iniAddac;
struct ar5416IniArray iniPcieSerdes;
+ struct ar5416IniArray iniPcieSerdesLowPower;
struct ar5416IniArray iniModesAdditional;
struct ar5416IniArray iniModesRxGain;
struct ar5416IniArray iniModesTxGain;
struct ar5416IniArray iniModes_9271_1_0_only;
struct ar5416IniArray iniCckfirNormal;
struct ar5416IniArray iniCckfirJapan2484;
+ struct ar5416IniArray iniCommon_normal_cck_fir_coeff_9271;
+ struct ar5416IniArray iniCommon_japan_2484_cck_fir_coeff_9271;
+ struct ar5416IniArray iniModes_9271_ANI_reg;
+ struct ar5416IniArray iniModes_high_power_tx_gain_9271;
+ struct ar5416IniArray iniModes_normal_power_tx_gain_9271;
+
+ struct ar5416IniArray iniMac[ATH_INI_NUM_SPLIT];
+ struct ar5416IniArray iniBB[ATH_INI_NUM_SPLIT];
+ struct ar5416IniArray iniRadio[ATH_INI_NUM_SPLIT];
+ struct ar5416IniArray iniSOC[ATH_INI_NUM_SPLIT];
u32 intr_gen_timer_trigger;
u32 intr_gen_timer_thresh;
struct ath_gen_timer_table hw_gen_timers;
+
+ struct ar9003_txs *ts_ring;
+ void *ts_start;
+ u32 ts_paddr_start;
+ u32 ts_paddr_end;
+ u16 ts_tail;
+ u8 ts_size;
};
static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah)
@@ -614,6 +801,16 @@ static inline struct ath_regulatory *ath9k_hw_regulatory(struct ath_hw *ah)
return &(ath9k_hw_common(ah)->regulatory);
}
+static inline struct ath_hw_private_ops *ath9k_hw_private_ops(struct ath_hw *ah)
+{
+ return &ah->private_ops;
+}
+
+static inline struct ath_hw_ops *ath9k_hw_ops(struct ath_hw *ah)
+{
+ return &ah->ops;
+}
+
/* Initialization, Detach, Reset */
const char *ath9k_hw_probe(u16 vendorid, u16 devid);
void ath9k_hw_deinit(struct ath_hw *ah);
@@ -625,6 +822,7 @@ bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
u32 capability, u32 *result);
bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
u32 capability, u32 setting, int *status);
+u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan);
/* Key Cache Management */
bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry);
@@ -673,16 +871,10 @@ void ath9k_hw_set11nmac2040(struct ath_hw *ah);
void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
const struct ath9k_beacon_state *bs);
+bool ath9k_hw_check_alive(struct ath_hw *ah);
bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode);
-void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off);
-
-/* Interrupt Handling */
-bool ath9k_hw_intrpend(struct ath_hw *ah);
-bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked);
-enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints);
-
/* Generic hw timer primitives */
struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
void (*trigger)(void *),
@@ -701,6 +893,39 @@ u32 ath9k_hw_gettsf32(struct ath_hw *ah);
void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len);
+/* HTC */
+void ath9k_hw_htc_resetinit(struct ath_hw *ah);
+
+/* PHY */
+void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
+ u32 *coef_mantissa, u32 *coef_exponent);
+
+/*
+ * Code Specific to AR5008, AR9001 or AR9002,
+ * we stuff these here to avoid callbacks for AR9003.
+ */
+void ar9002_hw_cck_chan14_spread(struct ath_hw *ah);
+int ar9002_hw_rf_claim(struct ath_hw *ah);
+void ar9002_hw_enable_async_fifo(struct ath_hw *ah);
+void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah);
+
+/*
+ * Code specifric to AR9003, we stuff these here to avoid callbacks
+ * for older families
+ */
+void ar9003_hw_set_nf_limits(struct ath_hw *ah);
+
+/* Hardware family op attach helpers */
+void ar5008_hw_attach_phy_ops(struct ath_hw *ah);
+void ar9002_hw_attach_phy_ops(struct ath_hw *ah);
+void ar9003_hw_attach_phy_ops(struct ath_hw *ah);
+
+void ar9002_hw_attach_calib_ops(struct ath_hw *ah);
+void ar9003_hw_attach_calib_ops(struct ath_hw *ah);
+
+void ar9002_hw_attach_ops(struct ath_hw *ah);
+void ar9003_hw_attach_ops(struct ath_hw *ah);
+
#define ATH_PCIE_CAP_LINK_CTRL 0x70
#define ATH_PCIE_CAP_LINK_L0S 1
#define ATH_PCIE_CAP_LINK_L1 2
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 3d4d897add6..d457cb3bd77 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -175,6 +175,18 @@ static const struct ath_ops ath9k_common_ops = {
.write = ath9k_iowrite32,
};
+static int count_streams(unsigned int chainmask, int max)
+{
+ int streams = 0;
+
+ do {
+ if (++streams == max)
+ break;
+ } while ((chainmask = chainmask & (chainmask - 1)));
+
+ return streams;
+}
+
/**************************/
/* Initialization */
/**************************/
@@ -182,8 +194,10 @@ static const struct ath_ops ath9k_common_ops = {
static void setup_ht_cap(struct ath_softc *sc,
struct ieee80211_sta_ht_cap *ht_info)
{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
u8 tx_streams, rx_streams;
+ int i, max_streams;
ht_info->ht_supported = true;
ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
@@ -191,28 +205,40 @@ static void setup_ht_cap(struct ath_softc *sc,
IEEE80211_HT_CAP_SGI_40 |
IEEE80211_HT_CAP_DSSSCCK40;
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
+ ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ max_streams = 3;
+ else
+ max_streams = 2;
+
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ if (max_streams >= 2)
+ ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
+ ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+ }
+
/* set up supported mcs set */
memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
- tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ?
- 1 : 2;
- rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ?
- 1 : 2;
+ tx_streams = count_streams(common->tx_chainmask, max_streams);
+ rx_streams = count_streams(common->rx_chainmask, max_streams);
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "TX streams %d, RX streams: %d\n",
+ tx_streams, rx_streams);
if (tx_streams != rx_streams) {
- ath_print(common, ATH_DBG_CONFIG,
- "TX streams %d, RX streams: %d\n",
- tx_streams, rx_streams);
ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
ht_info->mcs.tx_params |= ((tx_streams - 1) <<
IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
}
- ht_info->mcs.rx_mask[0] = 0xff;
- if (rx_streams >= 2)
- ht_info->mcs.rx_mask[1] = 0xff;
+ for (i = 0; i < rx_streams; i++)
+ ht_info->mcs.rx_mask[i] = 0xff;
ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
}
@@ -235,31 +261,37 @@ static int ath9k_reg_notifier(struct wiphy *wiphy,
*/
int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
struct list_head *head, const char *name,
- int nbuf, int ndesc)
+ int nbuf, int ndesc, bool is_tx)
{
#define DS2PHYS(_dd, _ds) \
((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_desc *ds;
+ u8 *ds;
struct ath_buf *bf;
- int i, bsize, error;
+ int i, bsize, error, desc_len;
ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
name, nbuf, ndesc);
INIT_LIST_HEAD(head);
+
+ if (is_tx)
+ desc_len = sc->sc_ah->caps.tx_desc_len;
+ else
+ desc_len = sizeof(struct ath_desc);
+
/* ath_desc must be a multiple of DWORDs */
- if ((sizeof(struct ath_desc) % 4) != 0) {
+ if ((desc_len % 4) != 0) {
ath_print(common, ATH_DBG_FATAL,
"ath_desc not DWORD aligned\n");
- BUG_ON((sizeof(struct ath_desc) % 4) != 0);
+ BUG_ON((desc_len % 4) != 0);
error = -ENOMEM;
goto fail;
}
- dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
+ dd->dd_desc_len = desc_len * nbuf * ndesc;
/*
* Need additional DMA memory because we can't use
@@ -272,11 +304,11 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
u32 dma_len;
while (ndesc_skipped) {
- dma_len = ndesc_skipped * sizeof(struct ath_desc);
+ dma_len = ndesc_skipped * desc_len;
dd->dd_desc_len += dma_len;
ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
- };
+ }
}
/* allocate descriptors */
@@ -286,7 +318,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
error = -ENOMEM;
goto fail;
}
- ds = dd->dd_desc;
+ ds = (u8 *) dd->dd_desc;
ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
name, ds, (u32) dd->dd_desc_len,
ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
@@ -300,7 +332,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
}
dd->dd_bufptr = bf;
- for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
+ for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
bf->bf_desc = ds;
bf->bf_daddr = DS2PHYS(dd, ds);
@@ -316,7 +348,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
((caddr_t) dd->dd_desc +
dd->dd_desc_len));
- ds += ndesc;
+ ds += (desc_len * ndesc);
bf->bf_desc = ds;
bf->bf_daddr = DS2PHYS(dd, ds);
}
@@ -514,7 +546,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
- ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
+ ath9k_hw_set_diversity(sc->sc_ah, true);
sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
@@ -568,13 +600,10 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
ath_read_cachesize(common, &csz);
common->cachelsz = csz << 2; /* convert to bytes */
+ /* Initializes the hardware for all supported chipsets */
ret = ath9k_hw_init(ah);
- if (ret) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to initialize hardware; "
- "initialization status: %d\n", ret);
+ if (ret)
goto err_hw;
- }
ret = ath9k_init_debug(ah);
if (ret) {
@@ -760,6 +789,9 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
tasklet_kill(&sc->intr_tq);
tasklet_kill(&sc->bcon_tasklet);
+
+ kfree(sc->sc_ah);
+ sc->sc_ah = NULL;
}
void ath9k_deinit_device(struct ath_softc *sc)
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index efc420cd42b..0e425cb4bbb 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -25,14 +25,21 @@ static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
ah->txurn_interrupt_mask);
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_IMR_S0,
SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
| SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC));
REG_WRITE(ah, AR_IMR_S1,
SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR)
| SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL));
- REG_RMW_FIELD(ah, AR_IMR_S2,
- AR_IMR_S2_QCU_TXURN, ah->txurn_interrupt_mask);
+
+ ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN;
+ ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN);
+ REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
@@ -55,6 +62,18 @@ void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
}
EXPORT_SYMBOL(ath9k_hw_txstart);
+void ath9k_hw_cleartxdesc(struct ath_hw *ah, void *ds)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
+ ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
+ ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
+ ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
+ ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
+}
+EXPORT_SYMBOL(ath9k_hw_cleartxdesc);
+
u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
{
u32 npend;
@@ -103,7 +122,7 @@ bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
if (ah->tx_trig_level >= ah->config.max_txtrig_level)
return false;
- omask = ath9k_hw_set_interrupts(ah, ah->mask_reg & ~ATH9K_INT_GLOBAL);
+ omask = ath9k_hw_set_interrupts(ah, ah->imask & ~ATH9K_INT_GLOBAL);
txcfg = REG_READ(ah, AR_TXCFG);
curLevel = MS(txcfg, AR_FTRIG);
@@ -205,280 +224,6 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
}
EXPORT_SYMBOL(ath9k_hw_stoptxdma);
-void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds,
- u32 segLen, bool firstSeg,
- bool lastSeg, const struct ath_desc *ds0)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- if (firstSeg) {
- ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
- } else if (lastSeg) {
- ads->ds_ctl0 = 0;
- ads->ds_ctl1 = segLen;
- ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
- ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
- } else {
- ads->ds_ctl0 = 0;
- ads->ds_ctl1 = segLen | AR_TxMore;
- ads->ds_ctl2 = 0;
- ads->ds_ctl3 = 0;
- }
- ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
- ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
- ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
- ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
- ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
-}
-EXPORT_SYMBOL(ath9k_hw_filltxdesc);
-
-void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
- ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
- ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
- ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
- ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
-}
-EXPORT_SYMBOL(ath9k_hw_cleartxdesc);
-
-int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- if ((ads->ds_txstatus9 & AR_TxDone) == 0)
- return -EINPROGRESS;
-
- ds->ds_txstat.ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
- ds->ds_txstat.ts_tstamp = ads->AR_SendTimestamp;
- ds->ds_txstat.ts_status = 0;
- ds->ds_txstat.ts_flags = 0;
-
- if (ads->ds_txstatus1 & AR_FrmXmitOK)
- ds->ds_txstat.ts_status |= ATH9K_TX_ACKED;
- if (ads->ds_txstatus1 & AR_ExcessiveRetries)
- ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY;
- if (ads->ds_txstatus1 & AR_Filtered)
- ds->ds_txstat.ts_status |= ATH9K_TXERR_FILT;
- if (ads->ds_txstatus1 & AR_FIFOUnderrun) {
- ds->ds_txstat.ts_status |= ATH9K_TXERR_FIFO;
- ath9k_hw_updatetxtriglevel(ah, true);
- }
- if (ads->ds_txstatus9 & AR_TxOpExceeded)
- ds->ds_txstat.ts_status |= ATH9K_TXERR_XTXOP;
- if (ads->ds_txstatus1 & AR_TxTimerExpired)
- ds->ds_txstat.ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
-
- if (ads->ds_txstatus1 & AR_DescCfgErr)
- ds->ds_txstat.ts_flags |= ATH9K_TX_DESC_CFG_ERR;
- if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
- ds->ds_txstat.ts_flags |= ATH9K_TX_DATA_UNDERRUN;
- ath9k_hw_updatetxtriglevel(ah, true);
- }
- if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
- ds->ds_txstat.ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
- ath9k_hw_updatetxtriglevel(ah, true);
- }
- if (ads->ds_txstatus0 & AR_TxBaStatus) {
- ds->ds_txstat.ts_flags |= ATH9K_TX_BA;
- ds->ds_txstat.ba_low = ads->AR_BaBitmapLow;
- ds->ds_txstat.ba_high = ads->AR_BaBitmapHigh;
- }
-
- ds->ds_txstat.ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
- switch (ds->ds_txstat.ts_rateindex) {
- case 0:
- ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
- break;
- case 1:
- ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
- break;
- case 2:
- ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
- break;
- case 3:
- ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
- break;
- }
-
- ds->ds_txstat.ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
- ds->ds_txstat.ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
- ds->ds_txstat.ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
- ds->ds_txstat.ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
- ds->ds_txstat.ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
- ds->ds_txstat.ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
- ds->ds_txstat.ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
- ds->ds_txstat.evm0 = ads->AR_TxEVM0;
- ds->ds_txstat.evm1 = ads->AR_TxEVM1;
- ds->ds_txstat.evm2 = ads->AR_TxEVM2;
- ds->ds_txstat.ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
- ds->ds_txstat.ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
- ds->ds_txstat.ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
- ds->ds_txstat.ts_antenna = 0;
-
- return 0;
-}
-EXPORT_SYMBOL(ath9k_hw_txprocdesc);
-
-void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds,
- u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
- u32 keyIx, enum ath9k_key_type keyType, u32 flags)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- txPower += ah->txpower_indexoffset;
- if (txPower > 63)
- txPower = 63;
-
- ads->ds_ctl0 = (pktLen & AR_FrameLen)
- | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
- | SM(txPower, AR_XmitPower)
- | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
- | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
- | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
- | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
-
- ads->ds_ctl1 =
- (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
- | SM(type, AR_FrameType)
- | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
- | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
- | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
-
- ads->ds_ctl6 = SM(keyType, AR_EncrType);
-
- if (AR_SREV_9285(ah)) {
- ads->ds_ctl8 = 0;
- ads->ds_ctl9 = 0;
- ads->ds_ctl10 = 0;
- ads->ds_ctl11 = 0;
- }
-}
-EXPORT_SYMBOL(ath9k_hw_set11n_txdesc);
-
-void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds,
- struct ath_desc *lastds,
- u32 durUpdateEn, u32 rtsctsRate,
- u32 rtsctsDuration,
- struct ath9k_11n_rate_series series[],
- u32 nseries, u32 flags)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
- struct ar5416_desc *last_ads = AR5416DESC(lastds);
- u32 ds_ctl0;
-
- if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
- ds_ctl0 = ads->ds_ctl0;
-
- if (flags & ATH9K_TXDESC_RTSENA) {
- ds_ctl0 &= ~AR_CTSEnable;
- ds_ctl0 |= AR_RTSEnable;
- } else {
- ds_ctl0 &= ~AR_RTSEnable;
- ds_ctl0 |= AR_CTSEnable;
- }
-
- ads->ds_ctl0 = ds_ctl0;
- } else {
- ads->ds_ctl0 =
- (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
- }
-
- ads->ds_ctl2 = set11nTries(series, 0)
- | set11nTries(series, 1)
- | set11nTries(series, 2)
- | set11nTries(series, 3)
- | (durUpdateEn ? AR_DurUpdateEna : 0)
- | SM(0, AR_BurstDur);
-
- ads->ds_ctl3 = set11nRate(series, 0)
- | set11nRate(series, 1)
- | set11nRate(series, 2)
- | set11nRate(series, 3);
-
- ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
- | set11nPktDurRTSCTS(series, 1);
-
- ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
- | set11nPktDurRTSCTS(series, 3);
-
- ads->ds_ctl7 = set11nRateFlags(series, 0)
- | set11nRateFlags(series, 1)
- | set11nRateFlags(series, 2)
- | set11nRateFlags(series, 3)
- | SM(rtsctsRate, AR_RTSCTSRate);
- last_ads->ds_ctl2 = ads->ds_ctl2;
- last_ads->ds_ctl3 = ads->ds_ctl3;
-}
-EXPORT_SYMBOL(ath9k_hw_set11n_ratescenario);
-
-void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds,
- u32 aggrLen)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
- ads->ds_ctl6 &= ~AR_AggrLen;
- ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
-}
-EXPORT_SYMBOL(ath9k_hw_set11n_aggr_first);
-
-void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds,
- u32 numDelims)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
- unsigned int ctl6;
-
- ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
-
- ctl6 = ads->ds_ctl6;
- ctl6 &= ~AR_PadDelim;
- ctl6 |= SM(numDelims, AR_PadDelim);
- ads->ds_ctl6 = ctl6;
-}
-EXPORT_SYMBOL(ath9k_hw_set11n_aggr_middle);
-
-void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- ads->ds_ctl1 |= AR_IsAggr;
- ads->ds_ctl1 &= ~AR_MoreAggr;
- ads->ds_ctl6 &= ~AR_PadDelim;
-}
-EXPORT_SYMBOL(ath9k_hw_set11n_aggr_last);
-
-void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
-}
-EXPORT_SYMBOL(ath9k_hw_clr11n_aggr);
-
-void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds,
- u32 burstDuration)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- ads->ds_ctl2 &= ~AR_BurstDur;
- ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
-}
-EXPORT_SYMBOL(ath9k_hw_set11n_burstduration);
-
-void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, struct ath_desc *ds,
- u32 vmf)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- if (vmf)
- ads->ds_ctl0 |= AR_VirtMoreFrag;
- else
- ads->ds_ctl0 &= ~AR_VirtMoreFrag;
-}
-
void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
{
*txqs &= ah->intr_txqs;
@@ -730,6 +475,8 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
} else
cwMin = qi->tqi_cwmin;
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_DLCL_IFS(q),
SM(cwMin, AR_D_LCL_IFS_CWMIN) |
SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
@@ -744,6 +491,8 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
REG_WRITE(ah, AR_DMISC(q),
AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
+ REGWRITE_BUFFER_FLUSH(ah);
+
if (qi->tqi_cbrPeriod) {
REG_WRITE(ah, AR_QCBRCFG(q),
SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
@@ -759,6 +508,8 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
AR_Q_RDYTIMECFG_EN);
}
+ REGWRITE_BUFFER_FLUSH(ah);
+
REG_WRITE(ah, AR_DCHNTIME(q),
SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
(qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
@@ -776,6 +527,10 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
REG_READ(ah, AR_DMISC(q)) |
AR_D_MISC_POST_FR_BKOFF_DIS);
}
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
REG_WRITE(ah, AR_DMISC(q),
REG_READ(ah, AR_DMISC(q)) |
@@ -783,6 +538,8 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
}
switch (qi->tqi_type) {
case ATH9K_TX_QUEUE_BEACON:
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
| AR_Q_MISC_FSP_DBA_GATED
| AR_Q_MISC_BEACON_USE
@@ -793,8 +550,20 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
| AR_D_MISC_BEACON_USE
| AR_D_MISC_POST_FR_BKOFF_DIS);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ /* cwmin and cwmax should be 0 for beacon queue */
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN)
+ | SM(0, AR_D_LCL_IFS_CWMAX)
+ | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
+ }
break;
case ATH9K_TX_QUEUE_CAB:
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
| AR_Q_MISC_FSP_DBA_GATED
| AR_Q_MISC_CBR_INCR_DIS1
@@ -808,6 +577,10 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
| (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
break;
case ATH9K_TX_QUEUE_PSPOLL:
REG_WRITE(ah, AR_QMISC(q),
@@ -829,6 +602,9 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
AR_D_MISC_POST_FR_BKOFF_DIS);
}
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ REG_WRITE(ah, AR_Q_DESC_CRCCHK, AR_Q_DESC_CRCCHK_EN);
+
if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
ah->txok_interrupt_mask |= 1 << q;
else
@@ -856,7 +632,7 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
EXPORT_SYMBOL(ath9k_hw_resettxqueue);
int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
- u32 pa, struct ath_desc *nds, u64 tsf)
+ struct ath_rx_status *rs, u64 tsf)
{
struct ar5416_desc ads;
struct ar5416_desc *adsp = AR5416DESC(ds);
@@ -867,92 +643,76 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
ads.u.rx = adsp->u.rx;
- ds->ds_rxstat.rs_status = 0;
- ds->ds_rxstat.rs_flags = 0;
+ rs->rs_status = 0;
+ rs->rs_flags = 0;
- ds->ds_rxstat.rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
- ds->ds_rxstat.rs_tstamp = ads.AR_RcvTimestamp;
+ rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
+ rs->rs_tstamp = ads.AR_RcvTimestamp;
if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
- ds->ds_rxstat.rs_rssi = ATH9K_RSSI_BAD;
- ds->ds_rxstat.rs_rssi_ctl0 = ATH9K_RSSI_BAD;
- ds->ds_rxstat.rs_rssi_ctl1 = ATH9K_RSSI_BAD;
- ds->ds_rxstat.rs_rssi_ctl2 = ATH9K_RSSI_BAD;
- ds->ds_rxstat.rs_rssi_ext0 = ATH9K_RSSI_BAD;
- ds->ds_rxstat.rs_rssi_ext1 = ATH9K_RSSI_BAD;
- ds->ds_rxstat.rs_rssi_ext2 = ATH9K_RSSI_BAD;
+ rs->rs_rssi = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ctl0 = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ctl1 = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ctl2 = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ext0 = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ext1 = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ext2 = ATH9K_RSSI_BAD;
} else {
- ds->ds_rxstat.rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
- ds->ds_rxstat.rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
+ rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
+ rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
AR_RxRSSIAnt00);
- ds->ds_rxstat.rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
+ rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
AR_RxRSSIAnt01);
- ds->ds_rxstat.rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
+ rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
AR_RxRSSIAnt02);
- ds->ds_rxstat.rs_rssi_ext0 = MS(ads.ds_rxstatus4,
+ rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4,
AR_RxRSSIAnt10);
- ds->ds_rxstat.rs_rssi_ext1 = MS(ads.ds_rxstatus4,
+ rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4,
AR_RxRSSIAnt11);
- ds->ds_rxstat.rs_rssi_ext2 = MS(ads.ds_rxstatus4,
+ rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4,
AR_RxRSSIAnt12);
}
if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
- ds->ds_rxstat.rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
+ rs->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
else
- ds->ds_rxstat.rs_keyix = ATH9K_RXKEYIX_INVALID;
+ rs->rs_keyix = ATH9K_RXKEYIX_INVALID;
- ds->ds_rxstat.rs_rate = RXSTATUS_RATE(ah, (&ads));
- ds->ds_rxstat.rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
+ rs->rs_rate = RXSTATUS_RATE(ah, (&ads));
+ rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
- ds->ds_rxstat.rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
- ds->ds_rxstat.rs_moreaggr =
+ rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
+ rs->rs_moreaggr =
(ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
- ds->ds_rxstat.rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
- ds->ds_rxstat.rs_flags =
+ rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
+ rs->rs_flags =
(ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
- ds->ds_rxstat.rs_flags |=
+ rs->rs_flags |=
(ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
- ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
+ rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
- ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_POST;
+ rs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
- ds->ds_rxstat.rs_flags |= ATH9K_RX_DECRYPT_BUSY;
+ rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
if (ads.ds_rxstatus8 & AR_CRCErr)
- ds->ds_rxstat.rs_status |= ATH9K_RXERR_CRC;
+ rs->rs_status |= ATH9K_RXERR_CRC;
else if (ads.ds_rxstatus8 & AR_PHYErr) {
- ds->ds_rxstat.rs_status |= ATH9K_RXERR_PHY;
+ rs->rs_status |= ATH9K_RXERR_PHY;
phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
- ds->ds_rxstat.rs_phyerr = phyerr;
+ rs->rs_phyerr = phyerr;
} else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
- ds->ds_rxstat.rs_status |= ATH9K_RXERR_DECRYPT;
+ rs->rs_status |= ATH9K_RXERR_DECRYPT;
else if (ads.ds_rxstatus8 & AR_MichaelErr)
- ds->ds_rxstat.rs_status |= ATH9K_RXERR_MIC;
+ rs->rs_status |= ATH9K_RXERR_MIC;
}
return 0;
}
EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
-void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
- u32 size, u32 flags)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
- struct ath9k_hw_capabilities *pCap = &ah->caps;
-
- ads->ds_ctl1 = size & AR_BufLen;
- if (flags & ATH9K_RXDESC_INTREQ)
- ads->ds_ctl1 |= AR_RxIntrReq;
-
- ads->ds_rxstatus8 &= ~AR_RxDone;
- if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
- memset(&(ads->u), 0, sizeof(ads->u));
-}
-EXPORT_SYMBOL(ath9k_hw_setuprxdesc);
-
/*
* This can stop or re-enables RX.
*
@@ -996,12 +756,6 @@ void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
}
EXPORT_SYMBOL(ath9k_hw_putrxbuf);
-void ath9k_hw_rxena(struct ath_hw *ah)
-{
- REG_WRITE(ah, AR_CR, AR_CR_RXE);
-}
-EXPORT_SYMBOL(ath9k_hw_rxena);
-
void ath9k_hw_startpcureceive(struct ath_hw *ah)
{
ath9k_enable_mib_counters(ah);
@@ -1020,6 +774,14 @@ void ath9k_hw_stoppcurecv(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_stoppcurecv);
+void ath9k_hw_abortpcurecv(struct ath_hw *ah)
+{
+ REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS);
+
+ ath9k_hw_disable_mib_counters(ah);
+}
+EXPORT_SYMBOL(ath9k_hw_abortpcurecv);
+
bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
{
#define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
@@ -1065,3 +827,142 @@ int ath9k_hw_beaconq_setup(struct ath_hw *ah)
return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
}
EXPORT_SYMBOL(ath9k_hw_beaconq_setup);
+
+bool ath9k_hw_intrpend(struct ath_hw *ah)
+{
+ u32 host_isr;
+
+ if (AR_SREV_9100(ah))
+ return true;
+
+ host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
+ if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
+ return true;
+
+ host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
+ if ((host_isr & AR_INTR_SYNC_DEFAULT)
+ && (host_isr != AR_INTR_SPURIOUS))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(ath9k_hw_intrpend);
+
+enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah,
+ enum ath9k_int ints)
+{
+ enum ath9k_int omask = ah->imask;
+ u32 mask, mask2;
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ ath_print(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
+
+ if (omask & ATH9K_INT_GLOBAL) {
+ ath_print(common, ATH_DBG_INTERRUPT, "disable IER\n");
+ REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
+ (void) REG_READ(ah, AR_IER);
+ if (!AR_SREV_9100(ah)) {
+ REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
+ (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
+
+ REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
+ (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
+ }
+ }
+
+ /* TODO: global int Ref count */
+ mask = ints & ATH9K_INT_COMMON;
+ mask2 = 0;
+
+ if (ints & ATH9K_INT_TX) {
+ if (ah->config.tx_intr_mitigation)
+ mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
+ else {
+ if (ah->txok_interrupt_mask)
+ mask |= AR_IMR_TXOK;
+ if (ah->txdesc_interrupt_mask)
+ mask |= AR_IMR_TXDESC;
+ }
+ if (ah->txerr_interrupt_mask)
+ mask |= AR_IMR_TXERR;
+ if (ah->txeol_interrupt_mask)
+ mask |= AR_IMR_TXEOL;
+ }
+ if (ints & ATH9K_INT_RX) {
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
+ if (ah->config.rx_intr_mitigation) {
+ mask &= ~AR_IMR_RXOK_LP;
+ mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
+ } else {
+ mask |= AR_IMR_RXOK_LP;
+ }
+ } else {
+ if (ah->config.rx_intr_mitigation)
+ mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
+ else
+ mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
+ }
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
+ mask |= AR_IMR_GENTMR;
+ }
+
+ if (ints & (ATH9K_INT_BMISC)) {
+ mask |= AR_IMR_BCNMISC;
+ if (ints & ATH9K_INT_TIM)
+ mask2 |= AR_IMR_S2_TIM;
+ if (ints & ATH9K_INT_DTIM)
+ mask2 |= AR_IMR_S2_DTIM;
+ if (ints & ATH9K_INT_DTIMSYNC)
+ mask2 |= AR_IMR_S2_DTIMSYNC;
+ if (ints & ATH9K_INT_CABEND)
+ mask2 |= AR_IMR_S2_CABEND;
+ if (ints & ATH9K_INT_TSFOOR)
+ mask2 |= AR_IMR_S2_TSFOOR;
+ }
+
+ if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
+ mask |= AR_IMR_BCNMISC;
+ if (ints & ATH9K_INT_GTT)
+ mask2 |= AR_IMR_S2_GTT;
+ if (ints & ATH9K_INT_CST)
+ mask2 |= AR_IMR_S2_CST;
+ }
+
+ ath_print(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
+ REG_WRITE(ah, AR_IMR, mask);
+ ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
+ AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
+ AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST);
+ ah->imrs2_reg |= mask2;
+ REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ if (ints & ATH9K_INT_TIM_TIMER)
+ REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
+ else
+ REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
+ }
+
+ if (ints & ATH9K_INT_GLOBAL) {
+ ath_print(common, ATH_DBG_INTERRUPT, "enable IER\n");
+ REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
+ if (!AR_SREV_9100(ah)) {
+ REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
+ AR_INTR_MAC_IRQ);
+ REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
+
+
+ REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
+ AR_INTR_SYNC_DEFAULT);
+ REG_WRITE(ah, AR_INTR_SYNC_MASK,
+ AR_INTR_SYNC_DEFAULT);
+ }
+ ath_print(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
+ REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
+ }
+
+ return omask;
+}
+EXPORT_SYMBOL(ath9k_hw_set_interrupts);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 29851e6376a..00f3e0c7528 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -37,6 +37,8 @@
AR_2040_##_index : 0) \
|((_series)[_index].RateFlags & ATH9K_RATESERIES_HALFGI ? \
AR_GI##_index : 0) \
+ |((_series)[_index].RateFlags & ATH9K_RATESERIES_STBC ? \
+ AR_STBC##_index : 0) \
|SM((_series)[_index].ChSel, AR_ChainSel##_index))
#define CCK_SIFS_TIME 10
@@ -86,7 +88,6 @@
#define ATH9K_TX_DESC_CFG_ERR 0x04
#define ATH9K_TX_DATA_UNDERRUN 0x08
#define ATH9K_TX_DELIM_UNDERRUN 0x10
-#define ATH9K_TX_SW_ABORTED 0x40
#define ATH9K_TX_SW_FILTERED 0x80
/* 64 bytes */
@@ -117,7 +118,10 @@ struct ath_tx_status {
int8_t ts_rssi_ext0;
int8_t ts_rssi_ext1;
int8_t ts_rssi_ext2;
- u8 pad[3];
+ u8 qid;
+ u16 desc_id;
+ u8 tid;
+ u8 pad[2];
u32 ba_low;
u32 ba_high;
u32 evm0;
@@ -148,6 +152,34 @@ struct ath_rx_status {
u32 evm0;
u32 evm1;
u32 evm2;
+ u32 evm3;
+ u32 evm4;
+};
+
+struct ath_htc_rx_status {
+ __be64 rs_tstamp;
+ __be16 rs_datalen;
+ u8 rs_status;
+ u8 rs_phyerr;
+ int8_t rs_rssi;
+ int8_t rs_rssi_ctl0;
+ int8_t rs_rssi_ctl1;
+ int8_t rs_rssi_ctl2;
+ int8_t rs_rssi_ext0;
+ int8_t rs_rssi_ext1;
+ int8_t rs_rssi_ext2;
+ u8 rs_keyix;
+ u8 rs_rate;
+ u8 rs_antenna;
+ u8 rs_more;
+ u8 rs_isaggr;
+ u8 rs_moreaggr;
+ u8 rs_num_delims;
+ u8 rs_flags;
+ u8 rs_dummy;
+ __be32 evm0;
+ __be32 evm1;
+ __be32 evm2;
};
#define ATH9K_RXERR_CRC 0x01
@@ -207,18 +239,9 @@ struct ath_desc {
u32 ds_ctl0;
u32 ds_ctl1;
u32 ds_hw[20];
- union {
- struct ath_tx_status tx;
- struct ath_rx_status rx;
- void *stats;
- } ds_us;
void *ds_vdata;
} __packed;
-#define ds_txstat ds_us.tx
-#define ds_rxstat ds_us.rx
-#define ds_stat ds_us.stats
-
#define ATH9K_TXDESC_CLRDMASK 0x0001
#define ATH9K_TXDESC_NOACK 0x0002
#define ATH9K_TXDESC_RTSENA 0x0004
@@ -242,7 +265,8 @@ struct ath_desc {
#define ATH9K_TXDESC_EXT_AND_CTL 0x0080
#define ATH9K_TXDESC_VMF 0x0100
#define ATH9K_TXDESC_FRAG_IS_ON 0x0200
-#define ATH9K_TXDESC_CAB 0x0400
+#define ATH9K_TXDESC_LOWRXCHAIN 0x0400
+#define ATH9K_TXDESC_LDPC 0x00010000
#define ATH9K_RXDESC_INTREQ 0x0020
@@ -336,7 +360,6 @@ struct ar5416_desc {
#define AR_DestIdxValid 0x40000000
#define AR_CTSEnable 0x80000000
-#define AR_BufLen 0x00000fff
#define AR_TxMore 0x00001000
#define AR_DestIdx 0x000fe000
#define AR_DestIdx_S 13
@@ -393,6 +416,7 @@ struct ar5416_desc {
#define AR_EncrType 0x0c000000
#define AR_EncrType_S 26
#define AR_TxCtlRsvd61 0xf0000000
+#define AR_LDPC 0x80000000
#define AR_2040_0 0x00000001
#define AR_GI0 0x00000002
@@ -412,7 +436,10 @@ struct ar5416_desc {
#define AR_ChainSel3_S 17
#define AR_RTSCTSRate 0x0ff00000
#define AR_RTSCTSRate_S 20
-#define AR_TxCtlRsvd70 0xf0000000
+#define AR_STBC0 0x10000000
+#define AR_STBC1 0x20000000
+#define AR_STBC2 0x40000000
+#define AR_STBC3 0x80000000
#define AR_TxRSSIAnt00 0x000000ff
#define AR_TxRSSIAnt00_S 0
@@ -476,7 +503,6 @@ struct ar5416_desc {
#define AR_RxCTLRsvd00 0xffffffff
-#define AR_BufLen 0x00000fff
#define AR_RxCtlRsvd00 0x00001000
#define AR_RxIntrReq 0x00002000
#define AR_RxCtlRsvd01 0xffffc000
@@ -626,6 +652,7 @@ enum ath9k_rx_filter {
#define ATH9K_RATESERIES_RTS_CTS 0x0001
#define ATH9K_RATESERIES_2040 0x0002
#define ATH9K_RATESERIES_HALFGI 0x0004
+#define ATH9K_RATESERIES_STBC 0x0008
struct ath9k_11n_rate_series {
u32 Tries;
@@ -669,33 +696,10 @@ struct ath9k_channel;
u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q);
void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp);
void ath9k_hw_txstart(struct ath_hw *ah, u32 q);
+void ath9k_hw_cleartxdesc(struct ath_hw *ah, void *ds);
u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q);
bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel);
bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q);
-void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds,
- u32 segLen, bool firstSeg,
- bool lastSeg, const struct ath_desc *ds0);
-void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds);
-int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds);
-void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds,
- u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
- u32 keyIx, enum ath9k_key_type keyType, u32 flags);
-void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds,
- struct ath_desc *lastds,
- u32 durUpdateEn, u32 rtsctsRate,
- u32 rtsctsDuration,
- struct ath9k_11n_rate_series series[],
- u32 nseries, u32 flags);
-void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds,
- u32 aggrLen);
-void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds,
- u32 numDelims);
-void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds);
-void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds);
-void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds,
- u32 burstDuration);
-void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, struct ath_desc *ds,
- u32 vmf);
void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs);
bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
const struct ath9k_tx_queue_info *qinfo);
@@ -706,15 +710,22 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q);
bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q);
int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
- u32 pa, struct ath_desc *nds, u64 tsf);
+ struct ath_rx_status *rs, u64 tsf);
void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
u32 size, u32 flags);
bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set);
void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp);
-void ath9k_hw_rxena(struct ath_hw *ah);
void ath9k_hw_startpcureceive(struct ath_hw *ah);
void ath9k_hw_stoppcurecv(struct ath_hw *ah);
+void ath9k_hw_abortpcurecv(struct ath_hw *ah);
bool ath9k_hw_stopdmarecv(struct ath_hw *ah);
int ath9k_hw_beaconq_setup(struct ath_hw *ah);
+/* Interrupt Handling */
+bool ath9k_hw_intrpend(struct ath_hw *ah);
+enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah,
+ enum ath9k_int ints);
+
+void ar9002_hw_attach_mac_ops(struct ath_hw *ah);
+
#endif /* MAC_H */
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 115e1aeedb5..abfa0493236 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -225,7 +225,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
ath_cache_conf_rate(sc, &hw->conf);
ath_update_txpow(sc);
- ath9k_hw_set_interrupts(ah, sc->imask);
+ ath9k_hw_set_interrupts(ah, ah->imask);
ps_restore:
ath9k_ps_restore(sc);
@@ -401,23 +401,41 @@ void ath9k_tasklet(unsigned long data)
struct ath_common *common = ath9k_hw_common(ah);
u32 status = sc->intrstatus;
+ u32 rxmask;
ath9k_ps_wakeup(sc);
- if (status & ATH9K_INT_FATAL) {
+ if ((status & ATH9K_INT_FATAL) ||
+ !ath9k_hw_check_alive(ah)) {
ath_reset(sc, false);
ath9k_ps_restore(sc);
return;
}
- if (status & (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ rxmask = (ATH9K_INT_RXHP | ATH9K_INT_RXLP | ATH9K_INT_RXEOL |
+ ATH9K_INT_RXORN);
+ else
+ rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
+
+ if (status & rxmask) {
spin_lock_bh(&sc->rx.rxflushlock);
- ath_rx_tasklet(sc, 0);
+
+ /* Check for high priority Rx first */
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
+ (status & ATH9K_INT_RXHP))
+ ath_rx_tasklet(sc, 0, true);
+
+ ath_rx_tasklet(sc, 0, false);
spin_unlock_bh(&sc->rx.rxflushlock);
}
- if (status & ATH9K_INT_TX)
- ath_tx_tasklet(sc);
+ if (status & ATH9K_INT_TX) {
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ ath_tx_edma_tasklet(sc);
+ else
+ ath_tx_tasklet(sc);
+ }
if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) {
/*
@@ -434,7 +452,7 @@ void ath9k_tasklet(unsigned long data)
ath_gen_timer_isr(sc->sc_ah);
/* re-enable hardware interrupt */
- ath9k_hw_set_interrupts(ah, sc->imask);
+ ath9k_hw_set_interrupts(ah, ah->imask);
ath9k_ps_restore(sc);
}
@@ -445,6 +463,8 @@ irqreturn_t ath_isr(int irq, void *dev)
ATH9K_INT_RXORN | \
ATH9K_INT_RXEOL | \
ATH9K_INT_RX | \
+ ATH9K_INT_RXLP | \
+ ATH9K_INT_RXHP | \
ATH9K_INT_TX | \
ATH9K_INT_BMISS | \
ATH9K_INT_CST | \
@@ -477,7 +497,7 @@ irqreturn_t ath_isr(int irq, void *dev)
* value to insure we only process bits we requested.
*/
ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
- status &= sc->imask; /* discard unasked-for bits */
+ status &= ah->imask; /* discard unasked-for bits */
/*
* If there are no status bits set, then this interrupt was not
@@ -496,7 +516,8 @@ irqreturn_t ath_isr(int irq, void *dev)
* If a FATAL or RXORN interrupt is received, we have to reset the
* chip immediately.
*/
- if (status & (ATH9K_INT_FATAL | ATH9K_INT_RXORN))
+ if ((status & ATH9K_INT_FATAL) || ((status & ATH9K_INT_RXORN) &&
+ !(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)))
goto chip_reset;
if (status & ATH9K_INT_SWBA)
@@ -505,6 +526,13 @@ irqreturn_t ath_isr(int irq, void *dev)
if (status & ATH9K_INT_TXURN)
ath9k_hw_updatetxtriglevel(ah, true);
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ if (status & ATH9K_INT_RXEOL) {
+ ah->imask &= ~(ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
+ ath9k_hw_set_interrupts(ah, ah->imask);
+ }
+ }
+
if (status & ATH9K_INT_MIB) {
/*
* Disable interrupts until we service the MIB
@@ -518,7 +546,7 @@ irqreturn_t ath_isr(int irq, void *dev)
* the interrupt.
*/
ath9k_hw_procmibevent(ah);
- ath9k_hw_set_interrupts(ah, sc->imask);
+ ath9k_hw_set_interrupts(ah, ah->imask);
}
if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
@@ -536,7 +564,7 @@ chip_reset:
if (sched) {
/* turn off every interrupt except SWBA */
- ath9k_hw_set_interrupts(ah, (sc->imask & ATH9K_INT_SWBA));
+ ath9k_hw_set_interrupts(ah, (ah->imask & ATH9K_INT_SWBA));
tasklet_schedule(&sc->intr_tq);
}
@@ -887,7 +915,7 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
ath_beacon_config(sc, NULL); /* restart beacons */
/* Re-Enable interrupts */
- ath9k_hw_set_interrupts(ah, sc->imask);
+ ath9k_hw_set_interrupts(ah, ah->imask);
/* Enable LED */
ath9k_hw_cfg_output(ah, ah->led_pin,
@@ -977,7 +1005,7 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
if (sc->sc_flags & SC_OP_BEACONS)
ath_beacon_config(sc, NULL); /* restart beacons */
- ath9k_hw_set_interrupts(ah, sc->imask);
+ ath9k_hw_set_interrupts(ah, ah->imask);
if (retry_tx) {
int i;
@@ -1162,23 +1190,28 @@ static int ath9k_start(struct ieee80211_hw *hw)
}
/* Setup our intr mask. */
- sc->imask = ATH9K_INT_RX | ATH9K_INT_TX
- | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
- | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
+ ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL |
+ ATH9K_INT_RXORN | ATH9K_INT_FATAL |
+ ATH9K_INT_GLOBAL;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ ah->imask |= ATH9K_INT_RXHP | ATH9K_INT_RXLP;
+ else
+ ah->imask |= ATH9K_INT_RX;
if (ah->caps.hw_caps & ATH9K_HW_CAP_GTT)
- sc->imask |= ATH9K_INT_GTT;
+ ah->imask |= ATH9K_INT_GTT;
if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
- sc->imask |= ATH9K_INT_CST;
+ ah->imask |= ATH9K_INT_CST;
ath_cache_conf_rate(sc, &hw->conf);
sc->sc_flags &= ~SC_OP_INVALID;
/* Disable BMISS interrupt when we're not associated */
- sc->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
- ath9k_hw_set_interrupts(ah, sc->imask);
+ ah->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
+ ath9k_hw_set_interrupts(ah, ah->imask);
ieee80211_wake_queues(hw);
@@ -1372,14 +1405,15 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
{
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
struct ath_vif *avp = (void *)vif->drv_priv;
enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
int ret = 0;
mutex_lock(&sc->mutex);
- if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) &&
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) &&
sc->nvifs > 0) {
ret = -ENOBUFS;
goto out;
@@ -1414,19 +1448,19 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
sc->nvifs++;
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
ath9k_set_bssid_mask(hw);
if (sc->nvifs > 1)
goto out; /* skip global settings for secondary vif */
if (ic_opmode == NL80211_IFTYPE_AP) {
- ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
+ ath9k_hw_set_tsfadjust(ah, 1);
sc->sc_flags |= SC_OP_TSF_RESET;
}
/* Set the device opmode */
- sc->sc_ah->opmode = ic_opmode;
+ ah->opmode = ic_opmode;
/*
* Enable MIB interrupts when there are hardware phy counters.
@@ -1435,11 +1469,12 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
if ((vif->type == NL80211_IFTYPE_STATION) ||
(vif->type == NL80211_IFTYPE_ADHOC) ||
(vif->type == NL80211_IFTYPE_MESH_POINT)) {
- sc->imask |= ATH9K_INT_MIB;
- sc->imask |= ATH9K_INT_TSFOOR;
+ if (ah->config.enable_ani)
+ ah->imask |= ATH9K_INT_MIB;
+ ah->imask |= ATH9K_INT_TSFOOR;
}
- ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
+ ath9k_hw_set_interrupts(ah, ah->imask);
if (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_ADHOC ||
@@ -1495,15 +1530,16 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
void ath9k_enable_ps(struct ath_softc *sc)
{
+ struct ath_hw *ah = sc->sc_ah;
+
sc->ps_enabled = true;
- if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
- if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
- sc->imask |= ATH9K_INT_TIM_TIMER;
- ath9k_hw_set_interrupts(sc->sc_ah,
- sc->imask);
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ if ((ah->imask & ATH9K_INT_TIM_TIMER) == 0) {
+ ah->imask |= ATH9K_INT_TIM_TIMER;
+ ath9k_hw_set_interrupts(ah, ah->imask);
}
}
- ath9k_hw_setrxabort(sc->sc_ah, 1);
+ ath9k_hw_setrxabort(ah, 1);
}
static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
@@ -1579,10 +1615,10 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
PS_WAIT_FOR_CAB |
PS_WAIT_FOR_PSPOLL_DATA |
PS_WAIT_FOR_TX_ACK);
- if (sc->imask & ATH9K_INT_TIM_TIMER) {
- sc->imask &= ~ATH9K_INT_TIM_TIMER;
+ if (ah->imask & ATH9K_INT_TIM_TIMER) {
+ ah->imask &= ~ATH9K_INT_TIM_TIMER;
ath9k_hw_set_interrupts(sc->sc_ah,
- sc->imask);
+ ah->imask);
}
}
}
@@ -1986,6 +2022,25 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
return ret;
}
+static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct ath_wiphy *aphy = hw->priv;
+ struct ath_softc *sc = aphy->sc;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_conf *conf = &hw->conf;
+
+ if (idx != 0)
+ return -ENOENT;
+
+ survey->channel = conf->channel;
+ survey->filled = SURVEY_INFO_NOISE_DBM;
+ survey->noise = common->ani.noise_floor;
+
+ return 0;
+}
+
static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
{
struct ath_wiphy *aphy = hw->priv;
@@ -2057,6 +2112,7 @@ struct ieee80211_ops ath9k_ops = {
.set_tsf = ath9k_set_tsf,
.reset_tsf = ath9k_reset_tsf,
.ampdu_action = ath9k_ampdu_action,
+ .get_survey = ath9k_get_survey,
.sw_scan_start = ath9k_sw_scan_start,
.sw_scan_complete = ath9k_sw_scan_complete,
.rfkill_poll = ath9k_rfkill_poll_state,
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 9441c6718a3..1ec836cf1c0 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -88,6 +88,7 @@ static void ath_pci_bt_coex_prep(struct ath_common *common)
}
static const struct ath_bus_ops ath_pci_bus_ops = {
+ .ath_bus_type = ATH_PCI,
.read_cachesize = ath_pci_read_cachesize,
.eeprom_read = ath_pci_eeprom_read,
.bt_coex_prep = ath_pci_bt_coex_prep,
diff --git a/drivers/net/wireless/ath/ath9k/phy.c b/drivers/net/wireless/ath/ath9k/phy.c
deleted file mode 100644
index 2547b3c4a26..00000000000
--- a/drivers/net/wireless/ath/ath9k/phy.c
+++ /dev/null
@@ -1,978 +0,0 @@
-/*
- * Copyright (c) 2008-2009 Atheros Communications Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-/**
- * DOC: Programming Atheros 802.11n analog front end radios
- *
- * AR5416 MAC based PCI devices and AR518 MAC based PCI-Express
- * devices have either an external AR2133 analog front end radio for single
- * band 2.4 GHz communication or an AR5133 analog front end radio for dual
- * band 2.4 GHz / 5 GHz communication.
- *
- * All devices after the AR5416 and AR5418 family starting with the AR9280
- * have their analog front radios, MAC/BB and host PCIe/USB interface embedded
- * into a single-chip and require less programming.
- *
- * The following single-chips exist with a respective embedded radio:
- *
- * AR9280 - 11n dual-band 2x2 MIMO for PCIe
- * AR9281 - 11n single-band 1x2 MIMO for PCIe
- * AR9285 - 11n single-band 1x1 for PCIe
- * AR9287 - 11n single-band 2x2 MIMO for PCIe
- *
- * AR9220 - 11n dual-band 2x2 MIMO for PCI
- * AR9223 - 11n single-band 2x2 MIMO for PCI
- *
- * AR9287 - 11n single-band 1x1 MIMO for USB
- */
-
-#include <linux/slab.h>
-
-#include "hw.h"
-
-/**
- * ath9k_hw_write_regs - ??
- *
- * @ah: atheros hardware structure
- * @freqIndex:
- * @regWrites:
- *
- * Used for both the chipsets with an external AR2133/AR5133 radios and
- * single-chip devices.
- */
-void ath9k_hw_write_regs(struct ath_hw *ah, u32 freqIndex, int regWrites)
-{
- REG_WRITE_ARRAY(&ah->iniBB_RfGain, freqIndex, regWrites);
-}
-
-/**
- * ath9k_hw_ar9280_set_channel - set channel on single-chip device
- * @ah: atheros hardware structure
- * @chan:
- *
- * This is the function to change channel on single-chip devices, that is
- * all devices after ar9280.
- *
- * This function takes the channel value in MHz and sets
- * hardware channel value. Assumes writes have been enabled to analog bus.
- *
- * Actual Expression,
- *
- * For 2GHz channel,
- * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
- * (freq_ref = 40MHz)
- *
- * For 5GHz channel,
- * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10)
- * (freq_ref = 40MHz/(24>>amodeRefSel))
- */
-int ath9k_hw_ar9280_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- u16 bMode, fracMode, aModeRefSel = 0;
- u32 freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0;
- struct chan_centers centers;
- u32 refDivA = 24;
-
- ath9k_hw_get_channel_centers(ah, chan, &centers);
- freq = centers.synth_center;
-
- reg32 = REG_READ(ah, AR_PHY_SYNTH_CONTROL);
- reg32 &= 0xc0000000;
-
- if (freq < 4800) { /* 2 GHz, fractional mode */
- u32 txctl;
- int regWrites = 0;
-
- bMode = 1;
- fracMode = 1;
- aModeRefSel = 0;
- channelSel = (freq * 0x10000) / 15;
-
- if (AR_SREV_9287_11_OR_LATER(ah)) {
- if (freq == 2484) {
- /* Enable channel spreading for channel 14 */
- REG_WRITE_ARRAY(&ah->iniCckfirJapan2484,
- 1, regWrites);
- } else {
- REG_WRITE_ARRAY(&ah->iniCckfirNormal,
- 1, regWrites);
- }
- } else {
- txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
- if (freq == 2484) {
- /* Enable channel spreading for channel 14 */
- REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
- txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
- } else {
- REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
- txctl &~ AR_PHY_CCK_TX_CTRL_JAPAN);
- }
- }
- } else {
- bMode = 0;
- fracMode = 0;
-
- switch(ah->eep_ops->get_eeprom(ah, EEP_FRAC_N_5G)) {
- case 0:
- if ((freq % 20) == 0) {
- aModeRefSel = 3;
- } else if ((freq % 10) == 0) {
- aModeRefSel = 2;
- }
- if (aModeRefSel)
- break;
- case 1:
- default:
- aModeRefSel = 0;
- /*
- * Enable 2G (fractional) mode for channels
- * which are 5MHz spaced.
- */
- fracMode = 1;
- refDivA = 1;
- channelSel = (freq * 0x8000) / 15;
-
- /* RefDivA setting */
- REG_RMW_FIELD(ah, AR_AN_SYNTH9,
- AR_AN_SYNTH9_REFDIVA, refDivA);
-
- }
-
- if (!fracMode) {
- ndiv = (freq * (refDivA >> aModeRefSel)) / 60;
- channelSel = ndiv & 0x1ff;
- channelFrac = (ndiv & 0xfffffe00) * 2;
- channelSel = (channelSel << 17) | channelFrac;
- }
- }
-
- reg32 = reg32 |
- (bMode << 29) |
- (fracMode << 28) | (aModeRefSel << 26) | (channelSel);
-
- REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
-
- ah->curchan = chan;
- ah->curchan_rad_index = -1;
-
- return 0;
-}
-
-/**
- * ath9k_hw_9280_spur_mitigate - convert baseband spur frequency
- * @ah: atheros hardware structure
- * @chan:
- *
- * For single-chip solutions. Converts to baseband spur frequency given the
- * input channel frequency and compute register settings below.
- */
-void ath9k_hw_9280_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- int bb_spur = AR_NO_SPUR;
- int freq;
- int bin, cur_bin;
- int bb_spur_off, spur_subchannel_sd;
- int spur_freq_sd;
- int spur_delta_phase;
- int denominator;
- int upper, lower, cur_vit_mask;
- int tmp, newVal;
- int i;
- int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
- AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
- };
- int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
- AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
- };
- int inc[4] = { 0, 100, 0, 0 };
- struct chan_centers centers;
-
- int8_t mask_m[123];
- int8_t mask_p[123];
- int8_t mask_amt;
- int tmp_mask;
- int cur_bb_spur;
- bool is2GHz = IS_CHAN_2GHZ(chan);
-
- memset(&mask_m, 0, sizeof(int8_t) * 123);
- memset(&mask_p, 0, sizeof(int8_t) * 123);
-
- ath9k_hw_get_channel_centers(ah, chan, &centers);
- freq = centers.synth_center;
-
- ah->config.spurmode = SPUR_ENABLE_EEPROM;
- for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
- cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
-
- if (is2GHz)
- cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ;
- else
- cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ;
-
- if (AR_NO_SPUR == cur_bb_spur)
- break;
- cur_bb_spur = cur_bb_spur - freq;
-
- if (IS_CHAN_HT40(chan)) {
- if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT40) &&
- (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT40)) {
- bb_spur = cur_bb_spur;
- break;
- }
- } else if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT20) &&
- (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT20)) {
- bb_spur = cur_bb_spur;
- break;
- }
- }
-
- if (AR_NO_SPUR == bb_spur) {
- REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
- AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
- return;
- } else {
- REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
- AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
- }
-
- bin = bb_spur * 320;
-
- tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
-
- newVal = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
- AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
- AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
- AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
- REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), newVal);
-
- newVal = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
- AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
- AR_PHY_SPUR_REG_MASK_RATE_SELECT |
- AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
- SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
- REG_WRITE(ah, AR_PHY_SPUR_REG, newVal);
-
- if (IS_CHAN_HT40(chan)) {
- if (bb_spur < 0) {
- spur_subchannel_sd = 1;
- bb_spur_off = bb_spur + 10;
- } else {
- spur_subchannel_sd = 0;
- bb_spur_off = bb_spur - 10;
- }
- } else {
- spur_subchannel_sd = 0;
- bb_spur_off = bb_spur;
- }
-
- if (IS_CHAN_HT40(chan))
- spur_delta_phase =
- ((bb_spur * 262144) /
- 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
- else
- spur_delta_phase =
- ((bb_spur * 524288) /
- 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
-
- denominator = IS_CHAN_2GHZ(chan) ? 44 : 40;
- spur_freq_sd = ((bb_spur_off * 2048) / denominator) & 0x3ff;
-
- newVal = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
- SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
- SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
- REG_WRITE(ah, AR_PHY_TIMING11, newVal);
-
- newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
- REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);
-
- cur_bin = -6000;
- upper = bin + 100;
- lower = bin - 100;
-
- for (i = 0; i < 4; i++) {
- int pilot_mask = 0;
- int chan_mask = 0;
- int bp = 0;
- for (bp = 0; bp < 30; bp++) {
- if ((cur_bin > lower) && (cur_bin < upper)) {
- pilot_mask = pilot_mask | 0x1 << bp;
- chan_mask = chan_mask | 0x1 << bp;
- }
- cur_bin += 100;
- }
- cur_bin += inc[i];
- REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
- REG_WRITE(ah, chan_mask_reg[i], chan_mask);
- }
-
- cur_vit_mask = 6100;
- upper = bin + 120;
- lower = bin - 120;
-
- for (i = 0; i < 123; i++) {
- if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
-
- /* workaround for gcc bug #37014 */
- volatile int tmp_v = abs(cur_vit_mask - bin);
-
- if (tmp_v < 75)
- mask_amt = 1;
- else
- mask_amt = 0;
- if (cur_vit_mask < 0)
- mask_m[abs(cur_vit_mask / 100)] = mask_amt;
- else
- mask_p[cur_vit_mask / 100] = mask_amt;
- }
- cur_vit_mask -= 100;
- }
-
- tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
- | (mask_m[48] << 26) | (mask_m[49] << 24)
- | (mask_m[50] << 22) | (mask_m[51] << 20)
- | (mask_m[52] << 18) | (mask_m[53] << 16)
- | (mask_m[54] << 14) | (mask_m[55] << 12)
- | (mask_m[56] << 10) | (mask_m[57] << 8)
- | (mask_m[58] << 6) | (mask_m[59] << 4)
- | (mask_m[60] << 2) | (mask_m[61] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
- REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
-
- tmp_mask = (mask_m[31] << 28)
- | (mask_m[32] << 26) | (mask_m[33] << 24)
- | (mask_m[34] << 22) | (mask_m[35] << 20)
- | (mask_m[36] << 18) | (mask_m[37] << 16)
- | (mask_m[48] << 14) | (mask_m[39] << 12)
- | (mask_m[40] << 10) | (mask_m[41] << 8)
- | (mask_m[42] << 6) | (mask_m[43] << 4)
- | (mask_m[44] << 2) | (mask_m[45] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
-
- tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
- | (mask_m[18] << 26) | (mask_m[18] << 24)
- | (mask_m[20] << 22) | (mask_m[20] << 20)
- | (mask_m[22] << 18) | (mask_m[22] << 16)
- | (mask_m[24] << 14) | (mask_m[24] << 12)
- | (mask_m[25] << 10) | (mask_m[26] << 8)
- | (mask_m[27] << 6) | (mask_m[28] << 4)
- | (mask_m[29] << 2) | (mask_m[30] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
-
- tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
- | (mask_m[2] << 26) | (mask_m[3] << 24)
- | (mask_m[4] << 22) | (mask_m[5] << 20)
- | (mask_m[6] << 18) | (mask_m[7] << 16)
- | (mask_m[8] << 14) | (mask_m[9] << 12)
- | (mask_m[10] << 10) | (mask_m[11] << 8)
- | (mask_m[12] << 6) | (mask_m[13] << 4)
- | (mask_m[14] << 2) | (mask_m[15] << 0);
- REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
-
- tmp_mask = (mask_p[15] << 28)
- | (mask_p[14] << 26) | (mask_p[13] << 24)
- | (mask_p[12] << 22) | (mask_p[11] << 20)
- | (mask_p[10] << 18) | (mask_p[9] << 16)
- | (mask_p[8] << 14) | (mask_p[7] << 12)
- | (mask_p[6] << 10) | (mask_p[5] << 8)
- | (mask_p[4] << 6) | (mask_p[3] << 4)
- | (mask_p[2] << 2) | (mask_p[1] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
-
- tmp_mask = (mask_p[30] << 28)
- | (mask_p[29] << 26) | (mask_p[28] << 24)
- | (mask_p[27] << 22) | (mask_p[26] << 20)
- | (mask_p[25] << 18) | (mask_p[24] << 16)
- | (mask_p[23] << 14) | (mask_p[22] << 12)
- | (mask_p[21] << 10) | (mask_p[20] << 8)
- | (mask_p[19] << 6) | (mask_p[18] << 4)
- | (mask_p[17] << 2) | (mask_p[16] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
-
- tmp_mask = (mask_p[45] << 28)
- | (mask_p[44] << 26) | (mask_p[43] << 24)
- | (mask_p[42] << 22) | (mask_p[41] << 20)
- | (mask_p[40] << 18) | (mask_p[39] << 16)
- | (mask_p[38] << 14) | (mask_p[37] << 12)
- | (mask_p[36] << 10) | (mask_p[35] << 8)
- | (mask_p[34] << 6) | (mask_p[33] << 4)
- | (mask_p[32] << 2) | (mask_p[31] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
-
- tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
- | (mask_p[59] << 26) | (mask_p[58] << 24)
- | (mask_p[57] << 22) | (mask_p[56] << 20)
- | (mask_p[55] << 18) | (mask_p[54] << 16)
- | (mask_p[53] << 14) | (mask_p[52] << 12)
- | (mask_p[51] << 10) | (mask_p[50] << 8)
- | (mask_p[49] << 6) | (mask_p[48] << 4)
- | (mask_p[47] << 2) | (mask_p[46] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
-}
-
-/* All code below is for non single-chip solutions */
-
-/**
- * ath9k_phy_modify_rx_buffer() - perform analog swizzling of parameters
- * @rfbuf:
- * @reg32:
- * @numBits:
- * @firstBit:
- * @column:
- *
- * Performs analog "swizzling" of parameters into their location.
- * Used on external AR2133/AR5133 radios.
- */
-static void ath9k_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32,
- u32 numBits, u32 firstBit,
- u32 column)
-{
- u32 tmp32, mask, arrayEntry, lastBit;
- int32_t bitPosition, bitsLeft;
-
- tmp32 = ath9k_hw_reverse_bits(reg32, numBits);
- arrayEntry = (firstBit - 1) / 8;
- bitPosition = (firstBit - 1) % 8;
- bitsLeft = numBits;
- while (bitsLeft > 0) {
- lastBit = (bitPosition + bitsLeft > 8) ?
- 8 : bitPosition + bitsLeft;
- mask = (((1 << lastBit) - 1) ^ ((1 << bitPosition) - 1)) <<
- (column * 8);
- rfBuf[arrayEntry] &= ~mask;
- rfBuf[arrayEntry] |= ((tmp32 << bitPosition) <<
- (column * 8)) & mask;
- bitsLeft -= 8 - bitPosition;
- tmp32 = tmp32 >> (8 - bitPosition);
- bitPosition = 0;
- arrayEntry++;
- }
-}
-
-/*
- * Fix on 2.4 GHz band for orientation sensitivity issue by increasing
- * rf_pwd_icsyndiv.
- *
- * Theoretical Rules:
- * if 2 GHz band
- * if forceBiasAuto
- * if synth_freq < 2412
- * bias = 0
- * else if 2412 <= synth_freq <= 2422
- * bias = 1
- * else // synth_freq > 2422
- * bias = 2
- * else if forceBias > 0
- * bias = forceBias & 7
- * else
- * no change, use value from ini file
- * else
- * no change, invalid band
- *
- * 1st Mod:
- * 2422 also uses value of 2
- * <approved>
- *
- * 2nd Mod:
- * Less than 2412 uses value of 0, 2412 and above uses value of 2
- */
-static void ath9k_hw_force_bias(struct ath_hw *ah, u16 synth_freq)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- u32 tmp_reg;
- int reg_writes = 0;
- u32 new_bias = 0;
-
- if (!AR_SREV_5416(ah) || synth_freq >= 3000) {
- return;
- }
-
- BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
-
- if (synth_freq < 2412)
- new_bias = 0;
- else if (synth_freq < 2422)
- new_bias = 1;
- else
- new_bias = 2;
-
- /* pre-reverse this field */
- tmp_reg = ath9k_hw_reverse_bits(new_bias, 3);
-
- ath_print(common, ATH_DBG_CONFIG,
- "Force rf_pwd_icsyndiv to %1d on %4d\n",
- new_bias, synth_freq);
-
- /* swizzle rf_pwd_icsyndiv */
- ath9k_phy_modify_rx_buffer(ah->analogBank6Data, tmp_reg, 3, 181, 3);
-
- /* write Bank 6 with new params */
- REG_WRITE_RF_ARRAY(&ah->iniBank6, ah->analogBank6Data, reg_writes);
-}
-
-/**
- * ath9k_hw_set_channel - tune to a channel on the external AR2133/AR5133 radios
- * @ah: atheros hardware stucture
- * @chan:
- *
- * For the external AR2133/AR5133 radios, takes the MHz channel value and set
- * the channel value. Assumes writes enabled to analog bus and bank6 register
- * cache in ah->analogBank6Data.
- */
-int ath9k_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- u32 channelSel = 0;
- u32 bModeSynth = 0;
- u32 aModeRefSel = 0;
- u32 reg32 = 0;
- u16 freq;
- struct chan_centers centers;
-
- ath9k_hw_get_channel_centers(ah, chan, &centers);
- freq = centers.synth_center;
-
- if (freq < 4800) {
- u32 txctl;
-
- if (((freq - 2192) % 5) == 0) {
- channelSel = ((freq - 672) * 2 - 3040) / 10;
- bModeSynth = 0;
- } else if (((freq - 2224) % 5) == 0) {
- channelSel = ((freq - 704) * 2 - 3040) / 10;
- bModeSynth = 1;
- } else {
- ath_print(common, ATH_DBG_FATAL,
- "Invalid channel %u MHz\n", freq);
- return -EINVAL;
- }
-
- channelSel = (channelSel << 2) & 0xff;
- channelSel = ath9k_hw_reverse_bits(channelSel, 8);
-
- txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
- if (freq == 2484) {
-
- REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
- txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
- } else {
- REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
- txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
- }
-
- } else if ((freq % 20) == 0 && freq >= 5120) {
- channelSel =
- ath9k_hw_reverse_bits(((freq - 4800) / 20 << 2), 8);
- aModeRefSel = ath9k_hw_reverse_bits(1, 2);
- } else if ((freq % 10) == 0) {
- channelSel =
- ath9k_hw_reverse_bits(((freq - 4800) / 10 << 1), 8);
- if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
- aModeRefSel = ath9k_hw_reverse_bits(2, 2);
- else
- aModeRefSel = ath9k_hw_reverse_bits(1, 2);
- } else if ((freq % 5) == 0) {
- channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8);
- aModeRefSel = ath9k_hw_reverse_bits(1, 2);
- } else {
- ath_print(common, ATH_DBG_FATAL,
- "Invalid channel %u MHz\n", freq);
- return -EINVAL;
- }
-
- ath9k_hw_force_bias(ah, freq);
-
- reg32 =
- (channelSel << 8) | (aModeRefSel << 2) | (bModeSynth << 1) |
- (1 << 5) | 0x1;
-
- REG_WRITE(ah, AR_PHY(0x37), reg32);
-
- ah->curchan = chan;
- ah->curchan_rad_index = -1;
-
- return 0;
-}
-
-/**
- * ath9k_hw_spur_mitigate - convert baseband spur frequency for external radios
- * @ah: atheros hardware structure
- * @chan:
- *
- * For non single-chip solutions. Converts to baseband spur frequency given the
- * input channel frequency and compute register settings below.
- */
-void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- int bb_spur = AR_NO_SPUR;
- int bin, cur_bin;
- int spur_freq_sd;
- int spur_delta_phase;
- int denominator;
- int upper, lower, cur_vit_mask;
- int tmp, new;
- int i;
- int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
- AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
- };
- int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
- AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
- };
- int inc[4] = { 0, 100, 0, 0 };
-
- int8_t mask_m[123];
- int8_t mask_p[123];
- int8_t mask_amt;
- int tmp_mask;
- int cur_bb_spur;
- bool is2GHz = IS_CHAN_2GHZ(chan);
-
- memset(&mask_m, 0, sizeof(int8_t) * 123);
- memset(&mask_p, 0, sizeof(int8_t) * 123);
-
- for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
- cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
- if (AR_NO_SPUR == cur_bb_spur)
- break;
- cur_bb_spur = cur_bb_spur - (chan->channel * 10);
- if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
- bb_spur = cur_bb_spur;
- break;
- }
- }
-
- if (AR_NO_SPUR == bb_spur)
- return;
-
- bin = bb_spur * 32;
-
- tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
- new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
- AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
- AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
- AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
-
- REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
-
- new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
- AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
- AR_PHY_SPUR_REG_MASK_RATE_SELECT |
- AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
- SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
- REG_WRITE(ah, AR_PHY_SPUR_REG, new);
-
- spur_delta_phase = ((bb_spur * 524288) / 100) &
- AR_PHY_TIMING11_SPUR_DELTA_PHASE;
-
- denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
- spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
-
- new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
- SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
- SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
- REG_WRITE(ah, AR_PHY_TIMING11, new);
-
- cur_bin = -6000;
- upper = bin + 100;
- lower = bin - 100;
-
- for (i = 0; i < 4; i++) {
- int pilot_mask = 0;
- int chan_mask = 0;
- int bp = 0;
- for (bp = 0; bp < 30; bp++) {
- if ((cur_bin > lower) && (cur_bin < upper)) {
- pilot_mask = pilot_mask | 0x1 << bp;
- chan_mask = chan_mask | 0x1 << bp;
- }
- cur_bin += 100;
- }
- cur_bin += inc[i];
- REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
- REG_WRITE(ah, chan_mask_reg[i], chan_mask);
- }
-
- cur_vit_mask = 6100;
- upper = bin + 120;
- lower = bin - 120;
-
- for (i = 0; i < 123; i++) {
- if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
-
- /* workaround for gcc bug #37014 */
- volatile int tmp_v = abs(cur_vit_mask - bin);
-
- if (tmp_v < 75)
- mask_amt = 1;
- else
- mask_amt = 0;
- if (cur_vit_mask < 0)
- mask_m[abs(cur_vit_mask / 100)] = mask_amt;
- else
- mask_p[cur_vit_mask / 100] = mask_amt;
- }
- cur_vit_mask -= 100;
- }
-
- tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
- | (mask_m[48] << 26) | (mask_m[49] << 24)
- | (mask_m[50] << 22) | (mask_m[51] << 20)
- | (mask_m[52] << 18) | (mask_m[53] << 16)
- | (mask_m[54] << 14) | (mask_m[55] << 12)
- | (mask_m[56] << 10) | (mask_m[57] << 8)
- | (mask_m[58] << 6) | (mask_m[59] << 4)
- | (mask_m[60] << 2) | (mask_m[61] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
- REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
-
- tmp_mask = (mask_m[31] << 28)
- | (mask_m[32] << 26) | (mask_m[33] << 24)
- | (mask_m[34] << 22) | (mask_m[35] << 20)
- | (mask_m[36] << 18) | (mask_m[37] << 16)
- | (mask_m[48] << 14) | (mask_m[39] << 12)
- | (mask_m[40] << 10) | (mask_m[41] << 8)
- | (mask_m[42] << 6) | (mask_m[43] << 4)
- | (mask_m[44] << 2) | (mask_m[45] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
-
- tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
- | (mask_m[18] << 26) | (mask_m[18] << 24)
- | (mask_m[20] << 22) | (mask_m[20] << 20)
- | (mask_m[22] << 18) | (mask_m[22] << 16)
- | (mask_m[24] << 14) | (mask_m[24] << 12)
- | (mask_m[25] << 10) | (mask_m[26] << 8)
- | (mask_m[27] << 6) | (mask_m[28] << 4)
- | (mask_m[29] << 2) | (mask_m[30] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
-
- tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
- | (mask_m[2] << 26) | (mask_m[3] << 24)
- | (mask_m[4] << 22) | (mask_m[5] << 20)
- | (mask_m[6] << 18) | (mask_m[7] << 16)
- | (mask_m[8] << 14) | (mask_m[9] << 12)
- | (mask_m[10] << 10) | (mask_m[11] << 8)
- | (mask_m[12] << 6) | (mask_m[13] << 4)
- | (mask_m[14] << 2) | (mask_m[15] << 0);
- REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
-
- tmp_mask = (mask_p[15] << 28)
- | (mask_p[14] << 26) | (mask_p[13] << 24)
- | (mask_p[12] << 22) | (mask_p[11] << 20)
- | (mask_p[10] << 18) | (mask_p[9] << 16)
- | (mask_p[8] << 14) | (mask_p[7] << 12)
- | (mask_p[6] << 10) | (mask_p[5] << 8)
- | (mask_p[4] << 6) | (mask_p[3] << 4)
- | (mask_p[2] << 2) | (mask_p[1] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
-
- tmp_mask = (mask_p[30] << 28)
- | (mask_p[29] << 26) | (mask_p[28] << 24)
- | (mask_p[27] << 22) | (mask_p[26] << 20)
- | (mask_p[25] << 18) | (mask_p[24] << 16)
- | (mask_p[23] << 14) | (mask_p[22] << 12)
- | (mask_p[21] << 10) | (mask_p[20] << 8)
- | (mask_p[19] << 6) | (mask_p[18] << 4)
- | (mask_p[17] << 2) | (mask_p[16] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
-
- tmp_mask = (mask_p[45] << 28)
- | (mask_p[44] << 26) | (mask_p[43] << 24)
- | (mask_p[42] << 22) | (mask_p[41] << 20)
- | (mask_p[40] << 18) | (mask_p[39] << 16)
- | (mask_p[38] << 14) | (mask_p[37] << 12)
- | (mask_p[36] << 10) | (mask_p[35] << 8)
- | (mask_p[34] << 6) | (mask_p[33] << 4)
- | (mask_p[32] << 2) | (mask_p[31] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
-
- tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
- | (mask_p[59] << 26) | (mask_p[58] << 24)
- | (mask_p[57] << 22) | (mask_p[56] << 20)
- | (mask_p[55] << 18) | (mask_p[54] << 16)
- | (mask_p[53] << 14) | (mask_p[52] << 12)
- | (mask_p[51] << 10) | (mask_p[50] << 8)
- | (mask_p[49] << 6) | (mask_p[48] << 4)
- | (mask_p[47] << 2) | (mask_p[46] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
-}
-
-/**
- * ath9k_hw_rf_alloc_ext_banks - allocates banks for external radio programming
- * @ah: atheros hardware structure
- *
- * Only required for older devices with external AR2133/AR5133 radios.
- */
-int ath9k_hw_rf_alloc_ext_banks(struct ath_hw *ah)
-{
-#define ATH_ALLOC_BANK(bank, size) do { \
- bank = kzalloc((sizeof(u32) * size), GFP_KERNEL); \
- if (!bank) { \
- ath_print(common, ATH_DBG_FATAL, \
- "Cannot allocate RF banks\n"); \
- return -ENOMEM; \
- } \
- } while (0);
-
- struct ath_common *common = ath9k_hw_common(ah);
-
- BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
-
- ATH_ALLOC_BANK(ah->analogBank0Data, ah->iniBank0.ia_rows);
- ATH_ALLOC_BANK(ah->analogBank1Data, ah->iniBank1.ia_rows);
- ATH_ALLOC_BANK(ah->analogBank2Data, ah->iniBank2.ia_rows);
- ATH_ALLOC_BANK(ah->analogBank3Data, ah->iniBank3.ia_rows);
- ATH_ALLOC_BANK(ah->analogBank6Data, ah->iniBank6.ia_rows);
- ATH_ALLOC_BANK(ah->analogBank6TPCData, ah->iniBank6TPC.ia_rows);
- ATH_ALLOC_BANK(ah->analogBank7Data, ah->iniBank7.ia_rows);
- ATH_ALLOC_BANK(ah->addac5416_21,
- ah->iniAddac.ia_rows * ah->iniAddac.ia_columns);
- ATH_ALLOC_BANK(ah->bank6Temp, ah->iniBank6.ia_rows);
-
- return 0;
-#undef ATH_ALLOC_BANK
-}
-
-
-/**
- * ath9k_hw_rf_free_ext_banks - Free memory for analog bank scratch buffers
- * @ah: atheros hardware struture
- * For the external AR2133/AR5133 radios banks.
- */
-void
-ath9k_hw_rf_free_ext_banks(struct ath_hw *ah)
-{
-#define ATH_FREE_BANK(bank) do { \
- kfree(bank); \
- bank = NULL; \
- } while (0);
-
- BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
-
- ATH_FREE_BANK(ah->analogBank0Data);
- ATH_FREE_BANK(ah->analogBank1Data);
- ATH_FREE_BANK(ah->analogBank2Data);
- ATH_FREE_BANK(ah->analogBank3Data);
- ATH_FREE_BANK(ah->analogBank6Data);
- ATH_FREE_BANK(ah->analogBank6TPCData);
- ATH_FREE_BANK(ah->analogBank7Data);
- ATH_FREE_BANK(ah->addac5416_21);
- ATH_FREE_BANK(ah->bank6Temp);
-
-#undef ATH_FREE_BANK
-}
-
-/* *
- * ath9k_hw_set_rf_regs - programs rf registers based on EEPROM
- * @ah: atheros hardware structure
- * @chan:
- * @modesIndex:
- *
- * Used for the external AR2133/AR5133 radios.
- *
- * Reads the EEPROM header info from the device structure and programs
- * all rf registers. This routine requires access to the analog
- * rf device. This is not required for single-chip devices.
- */
-bool ath9k_hw_set_rf_regs(struct ath_hw *ah, struct ath9k_channel *chan,
- u16 modesIndex)
-{
- u32 eepMinorRev;
- u32 ob5GHz = 0, db5GHz = 0;
- u32 ob2GHz = 0, db2GHz = 0;
- int regWrites = 0;
-
- /*
- * Software does not need to program bank data
- * for single chip devices, that is AR9280 or anything
- * after that.
- */
- if (AR_SREV_9280_10_OR_LATER(ah))
- return true;
-
- /* Setup rf parameters */
- eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV);
-
- /* Setup Bank 0 Write */
- RF_BANK_SETUP(ah->analogBank0Data, &ah->iniBank0, 1);
-
- /* Setup Bank 1 Write */
- RF_BANK_SETUP(ah->analogBank1Data, &ah->iniBank1, 1);
-
- /* Setup Bank 2 Write */
- RF_BANK_SETUP(ah->analogBank2Data, &ah->iniBank2, 1);
-
- /* Setup Bank 6 Write */
- RF_BANK_SETUP(ah->analogBank3Data, &ah->iniBank3,
- modesIndex);
- {
- int i;
- for (i = 0; i < ah->iniBank6TPC.ia_rows; i++) {
- ah->analogBank6Data[i] =
- INI_RA(&ah->iniBank6TPC, i, modesIndex);
- }
- }
-
- /* Only the 5 or 2 GHz OB/DB need to be set for a mode */
- if (eepMinorRev >= 2) {
- if (IS_CHAN_2GHZ(chan)) {
- ob2GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_2);
- db2GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_2);
- ath9k_phy_modify_rx_buffer(ah->analogBank6Data,
- ob2GHz, 3, 197, 0);
- ath9k_phy_modify_rx_buffer(ah->analogBank6Data,
- db2GHz, 3, 194, 0);
- } else {
- ob5GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_5);
- db5GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_5);
- ath9k_phy_modify_rx_buffer(ah->analogBank6Data,
- ob5GHz, 3, 203, 0);
- ath9k_phy_modify_rx_buffer(ah->analogBank6Data,
- db5GHz, 3, 200, 0);
- }
- }
-
- /* Setup Bank 7 Setup */
- RF_BANK_SETUP(ah->analogBank7Data, &ah->iniBank7, 1);
-
- /* Write Analog registers */
- REG_WRITE_RF_ARRAY(&ah->iniBank0, ah->analogBank0Data,
- regWrites);
- REG_WRITE_RF_ARRAY(&ah->iniBank1, ah->analogBank1Data,
- regWrites);
- REG_WRITE_RF_ARRAY(&ah->iniBank2, ah->analogBank2Data,
- regWrites);
- REG_WRITE_RF_ARRAY(&ah->iniBank3, ah->analogBank3Data,
- regWrites);
- REG_WRITE_RF_ARRAY(&ah->iniBank6TPC, ah->analogBank6Data,
- regWrites);
- REG_WRITE_RF_ARRAY(&ah->iniBank7, ah->analogBank7Data,
- regWrites);
-
- return true;
-}
diff --git a/drivers/net/wireless/ath/ath9k/phy.h b/drivers/net/wireless/ath/ath9k/phy.h
index 0999a495fd4..e724c2c1ae2 100644
--- a/drivers/net/wireless/ath/ath9k/phy.h
+++ b/drivers/net/wireless/ath/ath9k/phy.h
@@ -17,589 +17,25 @@
#ifndef PHY_H
#define PHY_H
-/* Common between single chip and non single-chip solutions */
-void ath9k_hw_write_regs(struct ath_hw *ah, u32 freqIndex, int regWrites);
-
-/* Single chip radio settings */
-int ath9k_hw_ar9280_set_channel(struct ath_hw *ah, struct ath9k_channel *chan);
-void ath9k_hw_9280_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan);
-
-/* Routines below are for non single-chip solutions */
-int ath9k_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan);
-void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan);
-
-int ath9k_hw_rf_alloc_ext_banks(struct ath_hw *ah);
-void ath9k_hw_rf_free_ext_banks(struct ath_hw *ah);
-
-bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
- struct ath9k_channel *chan,
- u16 modesIndex);
+#define CHANSEL_DIV 15
+#define CHANSEL_2G(_freq) (((_freq) * 0x10000) / CHANSEL_DIV)
+#define CHANSEL_5G(_freq) (((_freq) * 0x8000) / CHANSEL_DIV)
#define AR_PHY_BASE 0x9800
#define AR_PHY(_n) (AR_PHY_BASE + ((_n)<<2))
-#define AR_PHY_TEST 0x9800
-#define PHY_AGC_CLR 0x10000000
-#define RFSILENT_BB 0x00002000
-
-#define AR_PHY_TURBO 0x9804
-#define AR_PHY_FC_TURBO_MODE 0x00000001
-#define AR_PHY_FC_TURBO_SHORT 0x00000002
-#define AR_PHY_FC_DYN2040_EN 0x00000004
-#define AR_PHY_FC_DYN2040_PRI_ONLY 0x00000008
-#define AR_PHY_FC_DYN2040_PRI_CH 0x00000010
-/* For 25 MHz channel spacing -- not used but supported by hw */
-#define AR_PHY_FC_DYN2040_EXT_CH 0x00000020
-#define AR_PHY_FC_HT_EN 0x00000040
-#define AR_PHY_FC_SHORT_GI_40 0x00000080
-#define AR_PHY_FC_WALSH 0x00000100
-#define AR_PHY_FC_SINGLE_HT_LTF1 0x00000200
-#define AR_PHY_FC_ENABLE_DAC_FIFO 0x00000800
-
-#define AR_PHY_TEST2 0x9808
-
-#define AR_PHY_TIMING2 0x9810
-#define AR_PHY_TIMING3 0x9814
-#define AR_PHY_TIMING3_DSC_MAN 0xFFFE0000
-#define AR_PHY_TIMING3_DSC_MAN_S 17
-#define AR_PHY_TIMING3_DSC_EXP 0x0001E000
-#define AR_PHY_TIMING3_DSC_EXP_S 13
-
-#define AR_PHY_CHIP_ID 0x9818
-#define AR_PHY_CHIP_ID_REV_0 0x80
-#define AR_PHY_CHIP_ID_REV_1 0x81
-#define AR_PHY_CHIP_ID_9160_REV_0 0xb0
-
-#define AR_PHY_ACTIVE 0x981C
-#define AR_PHY_ACTIVE_EN 0x00000001
-#define AR_PHY_ACTIVE_DIS 0x00000000
-
-#define AR_PHY_RF_CTL2 0x9824
-#define AR_PHY_TX_END_DATA_START 0x000000FF
-#define AR_PHY_TX_END_DATA_START_S 0
-#define AR_PHY_TX_END_PA_ON 0x0000FF00
-#define AR_PHY_TX_END_PA_ON_S 8
-
-#define AR_PHY_RF_CTL3 0x9828
-#define AR_PHY_TX_END_TO_A2_RX_ON 0x00FF0000
-#define AR_PHY_TX_END_TO_A2_RX_ON_S 16
-
-#define AR_PHY_ADC_CTL 0x982C
-#define AR_PHY_ADC_CTL_OFF_INBUFGAIN 0x00000003
-#define AR_PHY_ADC_CTL_OFF_INBUFGAIN_S 0
-#define AR_PHY_ADC_CTL_OFF_PWDDAC 0x00002000
-#define AR_PHY_ADC_CTL_OFF_PWDBANDGAP 0x00004000
-#define AR_PHY_ADC_CTL_OFF_PWDADC 0x00008000
-#define AR_PHY_ADC_CTL_ON_INBUFGAIN 0x00030000
-#define AR_PHY_ADC_CTL_ON_INBUFGAIN_S 16
-
-#define AR_PHY_ADC_SERIAL_CTL 0x9830
-#define AR_PHY_SEL_INTERNAL_ADDAC 0x00000000
-#define AR_PHY_SEL_EXTERNAL_RADIO 0x00000001
-
-#define AR_PHY_RF_CTL4 0x9834
-#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF 0xFF000000
-#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF_S 24
-#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF 0x00FF0000
-#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF_S 16
-#define AR_PHY_RF_CTL4_FRAME_XPAB_ON 0x0000FF00
-#define AR_PHY_RF_CTL4_FRAME_XPAB_ON_S 8
-#define AR_PHY_RF_CTL4_FRAME_XPAA_ON 0x000000FF
-#define AR_PHY_RF_CTL4_FRAME_XPAA_ON_S 0
-
-#define AR_PHY_TSTDAC_CONST 0x983c
-
-#define AR_PHY_SETTLING 0x9844
-#define AR_PHY_SETTLING_SWITCH 0x00003F80
-#define AR_PHY_SETTLING_SWITCH_S 7
-
-#define AR_PHY_RXGAIN 0x9848
-#define AR_PHY_RXGAIN_TXRX_ATTEN 0x0003F000
-#define AR_PHY_RXGAIN_TXRX_ATTEN_S 12
-#define AR_PHY_RXGAIN_TXRX_RF_MAX 0x007C0000
-#define AR_PHY_RXGAIN_TXRX_RF_MAX_S 18
-#define AR9280_PHY_RXGAIN_TXRX_ATTEN 0x00003F80
-#define AR9280_PHY_RXGAIN_TXRX_ATTEN_S 7
-#define AR9280_PHY_RXGAIN_TXRX_MARGIN 0x001FC000
-#define AR9280_PHY_RXGAIN_TXRX_MARGIN_S 14
-
-#define AR_PHY_DESIRED_SZ 0x9850
-#define AR_PHY_DESIRED_SZ_ADC 0x000000FF
-#define AR_PHY_DESIRED_SZ_ADC_S 0
-#define AR_PHY_DESIRED_SZ_PGA 0x0000FF00
-#define AR_PHY_DESIRED_SZ_PGA_S 8
-#define AR_PHY_DESIRED_SZ_TOT_DES 0x0FF00000
-#define AR_PHY_DESIRED_SZ_TOT_DES_S 20
-
-#define AR_PHY_FIND_SIG 0x9858
-#define AR_PHY_FIND_SIG_FIRSTEP 0x0003F000
-#define AR_PHY_FIND_SIG_FIRSTEP_S 12
-#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000
-#define AR_PHY_FIND_SIG_FIRPWR_S 18
-
-#define AR_PHY_AGC_CTL1 0x985C
-#define AR_PHY_AGC_CTL1_COARSE_LOW 0x00007F80
-#define AR_PHY_AGC_CTL1_COARSE_LOW_S 7
-#define AR_PHY_AGC_CTL1_COARSE_HIGH 0x003F8000
-#define AR_PHY_AGC_CTL1_COARSE_HIGH_S 15
-
-#define AR_PHY_AGC_CONTROL 0x9860
-#define AR_PHY_AGC_CONTROL_CAL 0x00000001
-#define AR_PHY_AGC_CONTROL_NF 0x00000002
-#define AR_PHY_AGC_CONTROL_ENABLE_NF 0x00008000
-#define AR_PHY_AGC_CONTROL_FLTR_CAL 0x00010000
-#define AR_PHY_AGC_CONTROL_NO_UPDATE_NF 0x00020000
-
-#define AR_PHY_CCA 0x9864
-#define AR_PHY_MINCCA_PWR 0x0FF80000
-#define AR_PHY_MINCCA_PWR_S 19
-#define AR_PHY_CCA_THRESH62 0x0007F000
-#define AR_PHY_CCA_THRESH62_S 12
-#define AR9280_PHY_MINCCA_PWR 0x1FF00000
-#define AR9280_PHY_MINCCA_PWR_S 20
-#define AR9280_PHY_CCA_THRESH62 0x000FF000
-#define AR9280_PHY_CCA_THRESH62_S 12
-
-#define AR_PHY_SFCORR_LOW 0x986C
-#define AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW 0x00000001
-#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW 0x00003F00
-#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW_S 8
-#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW 0x001FC000
-#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW_S 14
-#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW 0x0FE00000
-#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW_S 21
-
-#define AR_PHY_SFCORR 0x9868
-#define AR_PHY_SFCORR_M2COUNT_THR 0x0000001F
-#define AR_PHY_SFCORR_M2COUNT_THR_S 0
-#define AR_PHY_SFCORR_M1_THRESH 0x00FE0000
-#define AR_PHY_SFCORR_M1_THRESH_S 17
-#define AR_PHY_SFCORR_M2_THRESH 0x7F000000
-#define AR_PHY_SFCORR_M2_THRESH_S 24
-
-#define AR_PHY_SLEEP_CTR_CONTROL 0x9870
-#define AR_PHY_SLEEP_CTR_LIMIT 0x9874
-#define AR_PHY_SYNTH_CONTROL 0x9874
-#define AR_PHY_SLEEP_SCAL 0x9878
-
-#define AR_PHY_PLL_CTL 0x987c
-#define AR_PHY_PLL_CTL_40 0xaa
-#define AR_PHY_PLL_CTL_40_5413 0x04
-#define AR_PHY_PLL_CTL_44 0xab
-#define AR_PHY_PLL_CTL_44_2133 0xeb
-#define AR_PHY_PLL_CTL_40_2133 0xea
-
-#define AR_PHY_SPECTRAL_SCAN 0x9910 /* AR9280 spectral scan configuration register */
-#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x1
-#define AR_PHY_SPECTRAL_SCAN_ENA 0x00000001 /* Enable spectral scan, reg 68, bit 0 */
-#define AR_PHY_SPECTRAL_SCAN_ENA_S 0 /* Enable spectral scan, reg 68, bit 0 */
-#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002 /* Activate spectral scan reg 68, bit 1*/
-#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1 /* Activate spectral scan reg 68, bit 1*/
-#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0 /* Interval for FFT reports, reg 68, bits 4-7*/
-#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4
-#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00 /* Interval for FFT reports, reg 68, bits 8-15*/
-#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8
-#define AR_PHY_SPECTRAL_SCAN_COUNT 0x00FF0000 /* Number of reports, reg 68, bits 16-23*/
-#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16
-#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000 /* Short repeat, reg 68, bit 24*/
-#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24 /* Short repeat, reg 68, bit 24*/
-
-#define AR_PHY_RX_DELAY 0x9914
-#define AR_PHY_SEARCH_START_DELAY 0x9918
-#define AR_PHY_RX_DELAY_DELAY 0x00003FFF
-
-#define AR_PHY_TIMING_CTRL4(_i) (0x9920 + ((_i) << 12))
-#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF 0x01F
-#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF_S 0
-#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF 0x7E0
-#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF_S 5
-#define AR_PHY_TIMING_CTRL4_IQCORR_ENABLE 0x800
-#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX 0xF000
-#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX_S 12
-#define AR_PHY_TIMING_CTRL4_DO_CAL 0x10000
-
-#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI 0x80000000
-#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER 0x40000000
-#define AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK 0x20000000
-#define AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK 0x10000000
-
-#define AR_PHY_TIMING5 0x9924
-#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE
-#define AR_PHY_TIMING5_CYCPWR_THR1_S 1
-
-#define AR_PHY_POWER_TX_RATE1 0x9934
-#define AR_PHY_POWER_TX_RATE2 0x9938
-#define AR_PHY_POWER_TX_RATE_MAX 0x993c
-#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
-
-#define AR_PHY_FRAME_CTL 0x9944
-#define AR_PHY_FRAME_CTL_TX_CLIP 0x00000038
-#define AR_PHY_FRAME_CTL_TX_CLIP_S 3
-
-#define AR_PHY_TXPWRADJ 0x994C
-#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA 0x00000FC0
-#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA_S 6
-#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX 0x00FC0000
-#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX_S 18
-
-#define AR_PHY_RADAR_EXT 0x9940
-#define AR_PHY_RADAR_EXT_ENA 0x00004000
-
-#define AR_PHY_RADAR_0 0x9954
-#define AR_PHY_RADAR_0_ENA 0x00000001
-#define AR_PHY_RADAR_0_FFT_ENA 0x80000000
-#define AR_PHY_RADAR_0_INBAND 0x0000003e
-#define AR_PHY_RADAR_0_INBAND_S 1
-#define AR_PHY_RADAR_0_PRSSI 0x00000FC0
-#define AR_PHY_RADAR_0_PRSSI_S 6
-#define AR_PHY_RADAR_0_HEIGHT 0x0003F000
-#define AR_PHY_RADAR_0_HEIGHT_S 12
-#define AR_PHY_RADAR_0_RRSSI 0x00FC0000
-#define AR_PHY_RADAR_0_RRSSI_S 18
-#define AR_PHY_RADAR_0_FIRPWR 0x7F000000
-#define AR_PHY_RADAR_0_FIRPWR_S 24
-
-#define AR_PHY_RADAR_1 0x9958
-#define AR_PHY_RADAR_1_RELPWR_ENA 0x00800000
-#define AR_PHY_RADAR_1_USE_FIR128 0x00400000
-#define AR_PHY_RADAR_1_RELPWR_THRESH 0x003F0000
-#define AR_PHY_RADAR_1_RELPWR_THRESH_S 16
-#define AR_PHY_RADAR_1_BLOCK_CHECK 0x00008000
-#define AR_PHY_RADAR_1_MAX_RRSSI 0x00004000
-#define AR_PHY_RADAR_1_RELSTEP_CHECK 0x00002000
-#define AR_PHY_RADAR_1_RELSTEP_THRESH 0x00001F00
-#define AR_PHY_RADAR_1_RELSTEP_THRESH_S 8
-#define AR_PHY_RADAR_1_MAXLEN 0x000000FF
-#define AR_PHY_RADAR_1_MAXLEN_S 0
-
-#define AR_PHY_SWITCH_CHAIN_0 0x9960
-#define AR_PHY_SWITCH_COM 0x9964
-
-#define AR_PHY_SIGMA_DELTA 0x996C
-#define AR_PHY_SIGMA_DELTA_ADC_SEL 0x00000003
-#define AR_PHY_SIGMA_DELTA_ADC_SEL_S 0
-#define AR_PHY_SIGMA_DELTA_FILT2 0x000000F8
-#define AR_PHY_SIGMA_DELTA_FILT2_S 3
-#define AR_PHY_SIGMA_DELTA_FILT1 0x00001F00
-#define AR_PHY_SIGMA_DELTA_FILT1_S 8
-#define AR_PHY_SIGMA_DELTA_ADC_CLIP 0x01FFE000
-#define AR_PHY_SIGMA_DELTA_ADC_CLIP_S 13
-
-#define AR_PHY_RESTART 0x9970
-#define AR_PHY_RESTART_DIV_GC 0x001C0000
-#define AR_PHY_RESTART_DIV_GC_S 18
-
-#define AR_PHY_RFBUS_REQ 0x997C
-#define AR_PHY_RFBUS_REQ_EN 0x00000001
-
-#define AR_PHY_TIMING7 0x9980
-#define AR_PHY_TIMING8 0x9984
-#define AR_PHY_TIMING8_PILOT_MASK_2 0x000FFFFF
-#define AR_PHY_TIMING8_PILOT_MASK_2_S 0
-
-#define AR_PHY_BIN_MASK2_1 0x9988
-#define AR_PHY_BIN_MASK2_2 0x998c
-#define AR_PHY_BIN_MASK2_3 0x9990
-#define AR_PHY_BIN_MASK2_4 0x9994
-
-#define AR_PHY_BIN_MASK_1 0x9900
-#define AR_PHY_BIN_MASK_2 0x9904
-#define AR_PHY_BIN_MASK_3 0x9908
-
-#define AR_PHY_MASK_CTL 0x990c
-
-#define AR_PHY_BIN_MASK2_4_MASK_4 0x00003FFF
-#define AR_PHY_BIN_MASK2_4_MASK_4_S 0
-
-#define AR_PHY_TIMING9 0x9998
-#define AR_PHY_TIMING10 0x999c
-#define AR_PHY_TIMING10_PILOT_MASK_2 0x000FFFFF
-#define AR_PHY_TIMING10_PILOT_MASK_2_S 0
-
-#define AR_PHY_TIMING11 0x99a0
-#define AR_PHY_TIMING11_SPUR_DELTA_PHASE 0x000FFFFF
-#define AR_PHY_TIMING11_SPUR_DELTA_PHASE_S 0
-#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000
-#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20
-#define AR_PHY_TIMING11_USE_SPUR_IN_AGC 0x40000000
-#define AR_PHY_TIMING11_USE_SPUR_IN_SELFCOR 0x80000000
-
-#define AR_PHY_RX_CHAINMASK 0x99a4
-#define AR_PHY_NEW_ADC_DC_GAIN_CORR(_i) (0x99b4 + ((_i) << 12))
-#define AR_PHY_NEW_ADC_GAIN_CORR_ENABLE 0x40000000
-#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000
-
-#define AR_PHY_MULTICHAIN_GAIN_CTL 0x99ac
-#define AR_PHY_9285_ANT_DIV_CTL_ALL 0x7f000000
-#define AR_PHY_9285_ANT_DIV_CTL 0x01000000
-#define AR_PHY_9285_ANT_DIV_CTL_S 24
-#define AR_PHY_9285_ANT_DIV_ALT_LNACONF 0x06000000
-#define AR_PHY_9285_ANT_DIV_ALT_LNACONF_S 25
-#define AR_PHY_9285_ANT_DIV_MAIN_LNACONF 0x18000000
-#define AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S 27
-#define AR_PHY_9285_ANT_DIV_ALT_GAINTB 0x20000000
-#define AR_PHY_9285_ANT_DIV_ALT_GAINTB_S 29
-#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB 0x40000000
-#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB_S 30
-#define AR_PHY_9285_ANT_DIV_LNA1 2
-#define AR_PHY_9285_ANT_DIV_LNA2 1
-#define AR_PHY_9285_ANT_DIV_LNA1_PLUS_LNA2 3
-#define AR_PHY_9285_ANT_DIV_LNA1_MINUS_LNA2 0
-#define AR_PHY_9285_ANT_DIV_GAINTB_0 0
-#define AR_PHY_9285_ANT_DIV_GAINTB_1 1
+#define AR_PHY_TX_PWRCTRL_TX_GAIN_TAB_MAX 0x0007E000
+#define AR_PHY_TX_PWRCTRL_TX_GAIN_TAB_MAX_S 13
+#define AR_PHY_TX_GAIN_CLC 0x0000001E
+#define AR_PHY_TX_GAIN_CLC_S 1
+#define AR_PHY_TX_GAIN 0x0007F000
+#define AR_PHY_TX_GAIN_S 12
-#define AR_PHY_EXT_CCA0 0x99b8
-#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
-#define AR_PHY_EXT_CCA0_THRESH62_S 0
-
-#define AR_PHY_EXT_CCA 0x99bc
-#define AR_PHY_EXT_CCA_CYCPWR_THR1 0x0000FE00
-#define AR_PHY_EXT_CCA_CYCPWR_THR1_S 9
-#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
-#define AR_PHY_EXT_CCA_THRESH62_S 16
-#define AR_PHY_EXT_MINCCA_PWR 0xFF800000
-#define AR_PHY_EXT_MINCCA_PWR_S 23
-#define AR9280_PHY_EXT_MINCCA_PWR 0x01FF0000
-#define AR9280_PHY_EXT_MINCCA_PWR_S 16
-
-#define AR_PHY_SFCORR_EXT 0x99c0
-#define AR_PHY_SFCORR_EXT_M1_THRESH 0x0000007F
-#define AR_PHY_SFCORR_EXT_M1_THRESH_S 0
-#define AR_PHY_SFCORR_EXT_M2_THRESH 0x00003F80
-#define AR_PHY_SFCORR_EXT_M2_THRESH_S 7
-#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW 0x001FC000
-#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW_S 14
-#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW 0x0FE00000
-#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW_S 21
-#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
-
-#define AR_PHY_HALFGI 0x99D0
-#define AR_PHY_HALFGI_DSC_MAN 0x0007FFF0
-#define AR_PHY_HALFGI_DSC_MAN_S 4
-#define AR_PHY_HALFGI_DSC_EXP 0x0000000F
-#define AR_PHY_HALFGI_DSC_EXP_S 0
-
-#define AR_PHY_CHAN_INFO_MEMORY 0x99DC
-#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
-
-#define AR_PHY_HEAVY_CLIP_ENABLE 0x99E0
-
-#define AR_PHY_HEAVY_CLIP_FACTOR_RIFS 0x99EC
-#define AR_PHY_RIFS_INIT_DELAY 0x03ff0000
-
-#define AR_PHY_M_SLEEP 0x99f0
-#define AR_PHY_REFCLKDLY 0x99f4
-#define AR_PHY_REFCLKPD 0x99f8
-
-#define AR_PHY_CALMODE 0x99f0
-
-#define AR_PHY_CALMODE_IQ 0x00000000
-#define AR_PHY_CALMODE_ADC_GAIN 0x00000001
-#define AR_PHY_CALMODE_ADC_DC_PER 0x00000002
-#define AR_PHY_CALMODE_ADC_DC_INIT 0x00000003
-
-#define AR_PHY_CAL_MEAS_0(_i) (0x9c10 + ((_i) << 12))
-#define AR_PHY_CAL_MEAS_1(_i) (0x9c14 + ((_i) << 12))
-#define AR_PHY_CAL_MEAS_2(_i) (0x9c18 + ((_i) << 12))
-#define AR_PHY_CAL_MEAS_3(_i) (0x9c1c + ((_i) << 12))
-
-#define AR_PHY_CURRENT_RSSI 0x9c1c
-#define AR9280_PHY_CURRENT_RSSI 0x9c3c
-
-#define AR_PHY_RFBUS_GRANT 0x9C20
-#define AR_PHY_RFBUS_GRANT_EN 0x00000001
-
-#define AR_PHY_CHAN_INFO_GAIN_DIFF 0x9CF4
-#define AR_PHY_CHAN_INFO_GAIN_DIFF_UPPER_LIMIT 320
-
-#define AR_PHY_CHAN_INFO_GAIN 0x9CFC
-
-#define AR_PHY_MODE 0xA200
-#define AR_PHY_MODE_ASYNCFIFO 0x80
-#define AR_PHY_MODE_AR2133 0x08
-#define AR_PHY_MODE_AR5111 0x00
-#define AR_PHY_MODE_AR5112 0x08
-#define AR_PHY_MODE_DYNAMIC 0x04
-#define AR_PHY_MODE_RF2GHZ 0x02
-#define AR_PHY_MODE_RF5GHZ 0x00
-#define AR_PHY_MODE_CCK 0x01
-#define AR_PHY_MODE_OFDM 0x00
-#define AR_PHY_MODE_DYN_CCK_DISABLE 0x100
-
-#define AR_PHY_CCK_TX_CTRL 0xA204
-#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010
-#define AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK 0x0000000C
-#define AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK_S 2
-
-#define AR_PHY_CCK_DETECT 0xA208
-#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F
-#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
-/* [12:6] settling time for antenna switch */
-#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0
-#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6
-#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000
-#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV_S 13
-
-#define AR_PHY_GAIN_2GHZ 0xA20C
-#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN 0x00FC0000
-#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN_S 18
-#define AR_PHY_GAIN_2GHZ_BSW_MARGIN 0x00003C00
-#define AR_PHY_GAIN_2GHZ_BSW_MARGIN_S 10
-#define AR_PHY_GAIN_2GHZ_BSW_ATTEN 0x0000001F
-#define AR_PHY_GAIN_2GHZ_BSW_ATTEN_S 0
-
-#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN 0x003E0000
-#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN_S 17
-#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN 0x0001F000
-#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN_S 12
-#define AR_PHY_GAIN_2GHZ_XATTEN2_DB 0x00000FC0
-#define AR_PHY_GAIN_2GHZ_XATTEN2_DB_S 6
-#define AR_PHY_GAIN_2GHZ_XATTEN1_DB 0x0000003F
-#define AR_PHY_GAIN_2GHZ_XATTEN1_DB_S 0
-
-#define AR_PHY_CCK_RXCTRL4 0xA21C
-#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT 0x01F80000
-#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT_S 19
-
-#define AR_PHY_DAG_CTRLCCK 0xA228
-#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR 0x00000200
-#define AR_PHY_DAG_CTRLCCK_RSSI_THR 0x0001FC00
-#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10
-
-#define AR_PHY_FORCE_CLKEN_CCK 0xA22C
-#define AR_PHY_FORCE_CLKEN_CCK_MRC_MUX 0x00000040
-
-#define AR_PHY_POWER_TX_RATE3 0xA234
-#define AR_PHY_POWER_TX_RATE4 0xA238
-
-#define AR_PHY_SCRM_SEQ_XR 0xA23C
-#define AR_PHY_HEADER_DETECT_XR 0xA240
-#define AR_PHY_CHIRP_DETECTED_XR 0xA244
-#define AR_PHY_BLUETOOTH 0xA254
-
-#define AR_PHY_TPCRG1 0xA258
-#define AR_PHY_TPCRG1_NUM_PD_GAIN 0x0000c000
-#define AR_PHY_TPCRG1_NUM_PD_GAIN_S 14
-
-#define AR_PHY_TPCRG1_PD_GAIN_1 0x00030000
-#define AR_PHY_TPCRG1_PD_GAIN_1_S 16
-#define AR_PHY_TPCRG1_PD_GAIN_2 0x000C0000
-#define AR_PHY_TPCRG1_PD_GAIN_2_S 18
-#define AR_PHY_TPCRG1_PD_GAIN_3 0x00300000
-#define AR_PHY_TPCRG1_PD_GAIN_3_S 20
-
-#define AR_PHY_TPCRG1_PD_CAL_ENABLE 0x00400000
-#define AR_PHY_TPCRG1_PD_CAL_ENABLE_S 22
-
-#define AR_PHY_TX_PWRCTRL4 0xa264
-#define AR_PHY_TX_PWRCTRL_PD_AVG_VALID 0x00000001
-#define AR_PHY_TX_PWRCTRL_PD_AVG_VALID_S 0
-#define AR_PHY_TX_PWRCTRL_PD_AVG_OUT 0x000001FE
-#define AR_PHY_TX_PWRCTRL_PD_AVG_OUT_S 1
-
-#define AR_PHY_TX_PWRCTRL6_0 0xa270
-#define AR_PHY_TX_PWRCTRL6_1 0xb270
-#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE 0x03000000
-#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE_S 24
-
-#define AR_PHY_TX_PWRCTRL7 0xa274
-#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN 0x01F80000
-#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN_S 19
-
-#define AR_PHY_TX_PWRCTRL9 0xa27C
-#define AR_PHY_TX_DESIRED_SCALE_CCK 0x00007C00
-#define AR_PHY_TX_DESIRED_SCALE_CCK_S 10
-#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL 0x80000000
-#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL_S 31
-
-#define AR_PHY_TX_GAIN_TBL1 0xa300
-#define AR_PHY_TX_GAIN 0x0007F000
-#define AR_PHY_TX_GAIN_S 12
-
-#define AR_PHY_CH0_TX_PWRCTRL11 0xa398
-#define AR_PHY_CH1_TX_PWRCTRL11 0xb398
-#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP 0x0000FC00
-#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP_S 10
-
-#define AR_PHY_VIT_MASK2_M_46_61 0xa3a0
-#define AR_PHY_MASK2_M_31_45 0xa3a4
-#define AR_PHY_MASK2_M_16_30 0xa3a8
-#define AR_PHY_MASK2_M_00_15 0xa3ac
-#define AR_PHY_MASK2_P_15_01 0xa3b8
-#define AR_PHY_MASK2_P_30_16 0xa3bc
-#define AR_PHY_MASK2_P_45_31 0xa3c0
-#define AR_PHY_MASK2_P_61_45 0xa3c4
-#define AR_PHY_SPUR_REG 0x994c
-
-#define AR_PHY_SPUR_REG_MASK_RATE_CNTL (0xFF << 18)
-#define AR_PHY_SPUR_REG_MASK_RATE_CNTL_S 18
-
-#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM 0x20000
-#define AR_PHY_SPUR_REG_MASK_RATE_SELECT (0xFF << 9)
-#define AR_PHY_SPUR_REG_MASK_RATE_SELECT_S 9
-#define AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI 0x100
-#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH 0x7F
-#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH_S 0
-
-#define AR_PHY_PILOT_MASK_01_30 0xa3b0
-#define AR_PHY_PILOT_MASK_31_60 0xa3b4
-
-#define AR_PHY_CHANNEL_MASK_01_30 0x99d4
-#define AR_PHY_CHANNEL_MASK_31_60 0x99d8
-
-#define AR_PHY_ANALOG_SWAP 0xa268
-#define AR_PHY_SWAP_ALT_CHAIN 0x00000040
-
-#define AR_PHY_TPCRG5 0xA26C
-#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP 0x0000000F
-#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP_S 0
-#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1 0x000003F0
-#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1_S 4
-#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2 0x0000FC00
-#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2_S 10
-#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3 0x003F0000
-#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3_S 16
-#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4 0x0FC00000
-#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4_S 22
-
-/* Carrier leak calibration control, do it after AGC calibration */
-#define AR_PHY_CL_CAL_CTL 0xA358
-#define AR_PHY_CL_CAL_ENABLE 0x00000002
-#define AR_PHY_PARALLEL_CAL_ENABLE 0x00000001
-
-#define AR_PHY_POWER_TX_RATE5 0xA38C
-#define AR_PHY_POWER_TX_RATE6 0xA390
-
-#define AR_PHY_CAL_CHAINMASK 0xA39C
-
-#define AR_PHY_POWER_TX_SUB 0xA3C8
-#define AR_PHY_POWER_TX_RATE7 0xA3CC
-#define AR_PHY_POWER_TX_RATE8 0xA3D0
-#define AR_PHY_POWER_TX_RATE9 0xA3D4
-
-#define AR_PHY_XPA_CFG 0xA3D8
-#define AR_PHY_FORCE_XPA_CFG 0x000000001
-#define AR_PHY_FORCE_XPA_CFG_S 0
-
-#define AR_PHY_CH1_CCA 0xa864
-#define AR_PHY_CH1_MINCCA_PWR 0x0FF80000
-#define AR_PHY_CH1_MINCCA_PWR_S 19
-#define AR9280_PHY_CH1_MINCCA_PWR 0x1FF00000
-#define AR9280_PHY_CH1_MINCCA_PWR_S 20
-
-#define AR_PHY_CH2_CCA 0xb864
-#define AR_PHY_CH2_MINCCA_PWR 0x0FF80000
-#define AR_PHY_CH2_MINCCA_PWR_S 19
-
-#define AR_PHY_CH1_EXT_CCA 0xa9bc
-#define AR_PHY_CH1_EXT_MINCCA_PWR 0xFF800000
-#define AR_PHY_CH1_EXT_MINCCA_PWR_S 23
-#define AR9280_PHY_CH1_EXT_MINCCA_PWR 0x01FF0000
-#define AR9280_PHY_CH1_EXT_MINCCA_PWR_S 16
-
-#define AR_PHY_CH2_EXT_CCA 0xb9bc
-#define AR_PHY_CH2_EXT_MINCCA_PWR 0xFF800000
-#define AR_PHY_CH2_EXT_MINCCA_PWR_S 23
+#define AR_PHY_CLC_TBL1 0xa35c
+#define AR_PHY_CLC_I0 0x07ff0000
+#define AR_PHY_CLC_I0_S 16
+#define AR_PHY_CLC_Q0 0x0000ffd0
+#define AR_PHY_CLC_Q0_S 5
#define REG_WRITE_RF_ARRAY(iniarray, regData, regWr) do { \
int r; \
@@ -615,6 +51,7 @@ bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
#define ANTSWAP_AB 0x0001
#define REDUCE_CHAIN_0 0x00000050
#define REDUCE_CHAIN_1 0x00000051
+#define AR_PHY_CHIP_ID 0x9818
#define RF_BANK_SETUP(_bank, _iniarray, _col) do { \
int i; \
@@ -622,4 +59,7 @@ bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
(_bank)[i] = INI_RA((_iniarray), i, _col);; \
} while (0)
+#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000
+#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20
+
#endif
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 244e1c62917..8519452c95f 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -691,6 +691,19 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
rate_table = sc->cur_rate_table;
rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, &is_probe);
+ /*
+ * If we're in HT mode and both us and our peer supports LDPC.
+ * We don't need to check our own device's capabilities as our own
+ * ht capabilities would have already been intersected with our peer's.
+ */
+ if (conf_is_ht(&sc->hw->conf) &&
+ (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
+ tx_info->flags |= IEEE80211_TX_CTL_LDPC;
+
+ if (conf_is_ht(&sc->hw->conf) &&
+ (sta->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC))
+ tx_info->flags |= (1 << IEEE80211_TX_CTL_STBC_SHIFT);
+
if (is_probe) {
/* set one try for probe rates. For the
* probes don't enable rts */
@@ -1228,8 +1241,12 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
long_retry = rate->count - 1;
}
- if (!priv_sta || !ieee80211_is_data(fc) ||
- !(tx_info->pad[0] & ATH_TX_INFO_UPDATE_RC))
+ if (!priv_sta || !ieee80211_is_data(fc))
+ return;
+
+ /* This packet was aggregated but doesn't carry status info */
+ if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
+ !(tx_info->flags & IEEE80211_TX_STAT_AMPDU))
return;
if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED)
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 4f6d6fd442f..3d8d40cdc99 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -110,8 +110,8 @@ struct ath_rate_table {
int rate_cnt;
int mcs_start;
struct {
- int valid;
- int valid_single_stream;
+ u8 valid;
+ u8 valid_single_stream;
u8 phy;
u32 ratekbps;
u32 user_ratekbps;
@@ -172,14 +172,13 @@ struct ath_rate_priv {
#define ATH_TX_INFO_FRAME_TYPE_INTERNAL (1 << 0)
#define ATH_TX_INFO_FRAME_TYPE_PAUSE (1 << 1)
-#define ATH_TX_INFO_UPDATE_RC (1 << 2)
#define ATH_TX_INFO_XRETRY (1 << 3)
#define ATH_TX_INFO_UNDERRUN (1 << 4)
enum ath9k_internal_frame_type {
- ATH9K_NOT_INTERNAL,
- ATH9K_INT_PAUSE,
- ATH9K_INT_UNPAUSE
+ ATH9K_IFT_NOT_INTERNAL,
+ ATH9K_IFT_PAUSE,
+ ATH9K_IFT_UNPAUSE
};
int ath_rate_control_register(void);
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 1ca42e5148c..ca6065b71b4 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -15,6 +15,15 @@
*/
#include "ath9k.h"
+#include "ar9003_mac.h"
+
+#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
+
+static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
+{
+ return sc->ps_enabled &&
+ (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP);
+}
static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc,
struct ieee80211_hdr *hdr)
@@ -115,56 +124,244 @@ static void ath_opmode_init(struct ath_softc *sc)
ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
}
-int ath_rx_init(struct ath_softc *sc, int nbufs)
+static bool ath_rx_edma_buf_link(struct ath_softc *sc,
+ enum ath9k_rx_qtype qtype)
{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_rx_edma *rx_edma;
struct sk_buff *skb;
struct ath_buf *bf;
- int error = 0;
- spin_lock_init(&sc->rx.rxflushlock);
- sc->sc_flags &= ~SC_OP_RXFLUSH;
- spin_lock_init(&sc->rx.rxbuflock);
+ rx_edma = &sc->rx.rx_edma[qtype];
+ if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
+ return false;
- common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
- min(common->cachelsz, (u16)64));
+ bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+ list_del_init(&bf->list);
- ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
- common->cachelsz, common->rx_bufsize);
+ skb = bf->bf_mpdu;
+
+ ATH_RXBUF_RESET(bf);
+ memset(skb->data, 0, ah->caps.rx_status_len);
+ dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+ ah->caps.rx_status_len, DMA_TO_DEVICE);
+
+ SKB_CB_ATHBUF(skb) = bf;
+ ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
+ skb_queue_tail(&rx_edma->rx_fifo, skb);
+
+ return true;
+}
- /* Initialize rx descriptors */
+static void ath_rx_addbuffer_edma(struct ath_softc *sc,
+ enum ath9k_rx_qtype qtype, int size)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ u32 nbuf = 0;
- error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
- "rx", nbufs, 1);
- if (error != 0) {
- ath_print(common, ATH_DBG_FATAL,
- "failed to allocate rx descriptors: %d\n", error);
- goto err;
+ if (list_empty(&sc->rx.rxbuf)) {
+ ath_print(common, ATH_DBG_QUEUE, "No free rx buf available\n");
+ return;
}
+ while (!list_empty(&sc->rx.rxbuf)) {
+ nbuf++;
+
+ if (!ath_rx_edma_buf_link(sc, qtype))
+ break;
+
+ if (nbuf >= size)
+ break;
+ }
+}
+
+static void ath_rx_remove_buffer(struct ath_softc *sc,
+ enum ath9k_rx_qtype qtype)
+{
+ struct ath_buf *bf;
+ struct ath_rx_edma *rx_edma;
+ struct sk_buff *skb;
+
+ rx_edma = &sc->rx.rx_edma[qtype];
+
+ while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
+ bf = SKB_CB_ATHBUF(skb);
+ BUG_ON(!bf);
+ list_add_tail(&bf->list, &sc->rx.rxbuf);
+ }
+}
+
+static void ath_rx_edma_cleanup(struct ath_softc *sc)
+{
+ struct ath_buf *bf;
+
+ ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
+ ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
+
list_for_each_entry(bf, &sc->rx.rxbuf, list) {
+ if (bf->bf_mpdu)
+ dev_kfree_skb_any(bf->bf_mpdu);
+ }
+
+ INIT_LIST_HEAD(&sc->rx.rxbuf);
+
+ kfree(sc->rx.rx_bufptr);
+ sc->rx.rx_bufptr = NULL;
+}
+
+static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
+{
+ skb_queue_head_init(&rx_edma->rx_fifo);
+ skb_queue_head_init(&rx_edma->rx_buffers);
+ rx_edma->rx_fifo_hwsize = size;
+}
+
+static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct sk_buff *skb;
+ struct ath_buf *bf;
+ int error = 0, i;
+ u32 size;
+
+
+ common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN +
+ ah->caps.rx_status_len,
+ min(common->cachelsz, (u16)64));
+
+ ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
+ ah->caps.rx_status_len);
+
+ ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
+ ah->caps.rx_lp_qdepth);
+ ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
+ ah->caps.rx_hp_qdepth);
+
+ size = sizeof(struct ath_buf) * nbufs;
+ bf = kzalloc(size, GFP_KERNEL);
+ if (!bf)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&sc->rx.rxbuf);
+ sc->rx.rx_bufptr = bf;
+
+ for (i = 0; i < nbufs; i++, bf++) {
skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
- if (skb == NULL) {
+ if (!skb) {
error = -ENOMEM;
- goto err;
+ goto rx_init_fail;
}
+ memset(skb->data, 0, common->rx_bufsize);
bf->bf_mpdu = skb;
+
bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
common->rx_bufsize,
- DMA_FROM_DEVICE);
+ DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(sc->dev,
- bf->bf_buf_addr))) {
- dev_kfree_skb_any(skb);
- bf->bf_mpdu = NULL;
+ bf->bf_buf_addr))) {
+ dev_kfree_skb_any(skb);
+ bf->bf_mpdu = NULL;
+ ath_print(common, ATH_DBG_FATAL,
+ "dma_mapping_error() on RX init\n");
+ error = -ENOMEM;
+ goto rx_init_fail;
+ }
+
+ list_add_tail(&bf->list, &sc->rx.rxbuf);
+ }
+
+ return 0;
+
+rx_init_fail:
+ ath_rx_edma_cleanup(sc);
+ return error;
+}
+
+static void ath_edma_start_recv(struct ath_softc *sc)
+{
+ spin_lock_bh(&sc->rx.rxbuflock);
+
+ ath9k_hw_rxena(sc->sc_ah);
+
+ ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP,
+ sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize);
+
+ ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP,
+ sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize);
+
+ spin_unlock_bh(&sc->rx.rxbuflock);
+
+ ath_opmode_init(sc);
+
+ ath9k_hw_startpcureceive(sc->sc_ah);
+}
+
+static void ath_edma_stop_recv(struct ath_softc *sc)
+{
+ spin_lock_bh(&sc->rx.rxbuflock);
+ ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
+ ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
+ spin_unlock_bh(&sc->rx.rxbuflock);
+}
+
+int ath_rx_init(struct ath_softc *sc, int nbufs)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct sk_buff *skb;
+ struct ath_buf *bf;
+ int error = 0;
+
+ spin_lock_init(&sc->rx.rxflushlock);
+ sc->sc_flags &= ~SC_OP_RXFLUSH;
+ spin_lock_init(&sc->rx.rxbuflock);
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ return ath_rx_edma_init(sc, nbufs);
+ } else {
+ common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
+ min(common->cachelsz, (u16)64));
+
+ ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
+ common->cachelsz, common->rx_bufsize);
+
+ /* Initialize rx descriptors */
+
+ error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
+ "rx", nbufs, 1, 0);
+ if (error != 0) {
ath_print(common, ATH_DBG_FATAL,
- "dma_mapping_error() on RX init\n");
- error = -ENOMEM;
+ "failed to allocate rx descriptors: %d\n",
+ error);
goto err;
}
- bf->bf_dmacontext = bf->bf_buf_addr;
+
+ list_for_each_entry(bf, &sc->rx.rxbuf, list) {
+ skb = ath_rxbuf_alloc(common, common->rx_bufsize,
+ GFP_KERNEL);
+ if (skb == NULL) {
+ error = -ENOMEM;
+ goto err;
+ }
+
+ bf->bf_mpdu = skb;
+ bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
+ common->rx_bufsize,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(sc->dev,
+ bf->bf_buf_addr))) {
+ dev_kfree_skb_any(skb);
+ bf->bf_mpdu = NULL;
+ ath_print(common, ATH_DBG_FATAL,
+ "dma_mapping_error() on RX init\n");
+ error = -ENOMEM;
+ goto err;
+ }
+ bf->bf_dmacontext = bf->bf_buf_addr;
+ }
+ sc->rx.rxlink = NULL;
}
- sc->rx.rxlink = NULL;
err:
if (error)
@@ -180,17 +377,23 @@ void ath_rx_cleanup(struct ath_softc *sc)
struct sk_buff *skb;
struct ath_buf *bf;
- list_for_each_entry(bf, &sc->rx.rxbuf, list) {
- skb = bf->bf_mpdu;
- if (skb) {
- dma_unmap_single(sc->dev, bf->bf_buf_addr,
- common->rx_bufsize, DMA_FROM_DEVICE);
- dev_kfree_skb(skb);
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ ath_rx_edma_cleanup(sc);
+ return;
+ } else {
+ list_for_each_entry(bf, &sc->rx.rxbuf, list) {
+ skb = bf->bf_mpdu;
+ if (skb) {
+ dma_unmap_single(sc->dev, bf->bf_buf_addr,
+ common->rx_bufsize,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ }
}
- }
- if (sc->rx.rxdma.dd_desc_len != 0)
- ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
+ if (sc->rx.rxdma.dd_desc_len != 0)
+ ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
+ }
}
/*
@@ -273,6 +476,11 @@ int ath_startrecv(struct ath_softc *sc)
struct ath_hw *ah = sc->sc_ah;
struct ath_buf *bf, *tbf;
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ ath_edma_start_recv(sc);
+ return 0;
+ }
+
spin_lock_bh(&sc->rx.rxbuflock);
if (list_empty(&sc->rx.rxbuf))
goto start_recv;
@@ -306,7 +514,11 @@ bool ath_stoprecv(struct ath_softc *sc)
ath9k_hw_stoppcurecv(ah);
ath9k_hw_setrxfilter(ah, 0);
stopped = ath9k_hw_stopdmarecv(ah);
- sc->rx.rxlink = NULL;
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ ath_edma_stop_recv(sc);
+ else
+ sc->rx.rxlink = NULL;
return stopped;
}
@@ -315,7 +527,9 @@ void ath_flushrecv(struct ath_softc *sc)
{
spin_lock_bh(&sc->rx.rxflushlock);
sc->sc_flags |= SC_OP_RXFLUSH;
- ath_rx_tasklet(sc, 1);
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ ath_rx_tasklet(sc, 1, true);
+ ath_rx_tasklet(sc, 1, false);
sc->sc_flags &= ~SC_OP_RXFLUSH;
spin_unlock_bh(&sc->rx.rxflushlock);
}
@@ -408,8 +622,8 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
hdr = (struct ieee80211_hdr *)skb->data;
/* Process Beacon and CAB receive in PS state */
- if ((sc->ps_flags & PS_WAIT_FOR_BEACON) &&
- ieee80211_is_beacon(hdr->frame_control))
+ if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc))
+ && ieee80211_is_beacon(hdr->frame_control))
ath_rx_ps_beacon(sc, skb);
else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
(ieee80211_is_data(hdr->frame_control) ||
@@ -469,15 +683,148 @@ static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw,
ieee80211_rx(hw, skb);
}
-int ath_rx_tasklet(struct ath_softc *sc, int flush)
+static bool ath_edma_get_buffers(struct ath_softc *sc,
+ enum ath9k_rx_qtype qtype)
{
-#define PA2DESC(_sc, _pa) \
- ((struct ath_desc *)((caddr_t)(_sc)->rx.rxdma.dd_desc + \
- ((_pa) - (_sc)->rx.rxdma.dd_desc_paddr)))
+ struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct sk_buff *skb;
+ struct ath_buf *bf;
+ int ret;
+
+ skb = skb_peek(&rx_edma->rx_fifo);
+ if (!skb)
+ return false;
+
+ bf = SKB_CB_ATHBUF(skb);
+ BUG_ON(!bf);
+
+ dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+ common->rx_bufsize, DMA_FROM_DEVICE);
+
+ ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data);
+ if (ret == -EINPROGRESS)
+ return false;
+
+ __skb_unlink(skb, &rx_edma->rx_fifo);
+ if (ret == -EINVAL) {
+ /* corrupt descriptor, skip this one and the following one */
+ list_add_tail(&bf->list, &sc->rx.rxbuf);
+ ath_rx_edma_buf_link(sc, qtype);
+ skb = skb_peek(&rx_edma->rx_fifo);
+ if (!skb)
+ return true;
+
+ bf = SKB_CB_ATHBUF(skb);
+ BUG_ON(!bf);
+ __skb_unlink(skb, &rx_edma->rx_fifo);
+ list_add_tail(&bf->list, &sc->rx.rxbuf);
+ ath_rx_edma_buf_link(sc, qtype);
+ return true;
+ }
+ skb_queue_tail(&rx_edma->rx_buffers, skb);
+
+ return true;
+}
+
+static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
+ struct ath_rx_status *rs,
+ enum ath9k_rx_qtype qtype)
+{
+ struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
+ struct sk_buff *skb;
struct ath_buf *bf;
+
+ while (ath_edma_get_buffers(sc, qtype));
+ skb = __skb_dequeue(&rx_edma->rx_buffers);
+ if (!skb)
+ return NULL;
+
+ bf = SKB_CB_ATHBUF(skb);
+ ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data);
+ return bf;
+}
+
+static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
+ struct ath_rx_status *rs)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
struct ath_desc *ds;
- struct ath_rx_status *rx_stats;
+ struct ath_buf *bf;
+ int ret;
+
+ if (list_empty(&sc->rx.rxbuf)) {
+ sc->rx.rxlink = NULL;
+ return NULL;
+ }
+
+ bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+ ds = bf->bf_desc;
+
+ /*
+ * Must provide the virtual address of the current
+ * descriptor, the physical address, and the virtual
+ * address of the next descriptor in the h/w chain.
+ * This allows the HAL to look ahead to see if the
+ * hardware is done with a descriptor by checking the
+ * done bit in the following descriptor and the address
+ * of the current descriptor the DMA engine is working
+ * on. All this is necessary because of our use of
+ * a self-linked list to avoid rx overruns.
+ */
+ ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0);
+ if (ret == -EINPROGRESS) {
+ struct ath_rx_status trs;
+ struct ath_buf *tbf;
+ struct ath_desc *tds;
+
+ memset(&trs, 0, sizeof(trs));
+ if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
+ sc->rx.rxlink = NULL;
+ return NULL;
+ }
+
+ tbf = list_entry(bf->list.next, struct ath_buf, list);
+
+ /*
+ * On some hardware the descriptor status words could
+ * get corrupted, including the done bit. Because of
+ * this, check if the next descriptor's done bit is
+ * set or not.
+ *
+ * If the next descriptor's done bit is set, the current
+ * descriptor has been corrupted. Force s/w to discard
+ * this descriptor and continue...
+ */
+
+ tds = tbf->bf_desc;
+ ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0);
+ if (ret == -EINPROGRESS)
+ return NULL;
+ }
+
+ if (!bf->bf_mpdu)
+ return bf;
+
+ /*
+ * Synchronize the DMA transfer with CPU before
+ * 1. accessing the frame
+ * 2. requeueing the same buffer to h/w
+ */
+ dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+ common->rx_bufsize,
+ DMA_FROM_DEVICE);
+
+ return bf;
+}
+
+
+int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
+{
+ struct ath_buf *bf;
struct sk_buff *skb = NULL, *requeue_skb;
struct ieee80211_rx_status *rxs;
struct ath_hw *ah = sc->sc_ah;
@@ -491,7 +838,17 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
struct ieee80211_hdr *hdr;
int retval;
bool decrypt_error = false;
+ struct ath_rx_status rs;
+ enum ath9k_rx_qtype qtype;
+ bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
+ int dma_type;
+ if (edma)
+ dma_type = DMA_FROM_DEVICE;
+ else
+ dma_type = DMA_BIDIRECTIONAL;
+
+ qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
spin_lock_bh(&sc->rx.rxbuflock);
do {
@@ -499,79 +856,25 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
break;
- if (list_empty(&sc->rx.rxbuf)) {
- sc->rx.rxlink = NULL;
- break;
- }
-
- bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
- ds = bf->bf_desc;
-
- /*
- * Must provide the virtual address of the current
- * descriptor, the physical address, and the virtual
- * address of the next descriptor in the h/w chain.
- * This allows the HAL to look ahead to see if the
- * hardware is done with a descriptor by checking the
- * done bit in the following descriptor and the address
- * of the current descriptor the DMA engine is working
- * on. All this is necessary because of our use of
- * a self-linked list to avoid rx overruns.
- */
- retval = ath9k_hw_rxprocdesc(ah, ds,
- bf->bf_daddr,
- PA2DESC(sc, ds->ds_link),
- 0);
- if (retval == -EINPROGRESS) {
- struct ath_buf *tbf;
- struct ath_desc *tds;
-
- if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
- sc->rx.rxlink = NULL;
- break;
- }
+ memset(&rs, 0, sizeof(rs));
+ if (edma)
+ bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
+ else
+ bf = ath_get_next_rx_buf(sc, &rs);
- tbf = list_entry(bf->list.next, struct ath_buf, list);
-
- /*
- * On some hardware the descriptor status words could
- * get corrupted, including the done bit. Because of
- * this, check if the next descriptor's done bit is
- * set or not.
- *
- * If the next descriptor's done bit is set, the current
- * descriptor has been corrupted. Force s/w to discard
- * this descriptor and continue...
- */
-
- tds = tbf->bf_desc;
- retval = ath9k_hw_rxprocdesc(ah, tds, tbf->bf_daddr,
- PA2DESC(sc, tds->ds_link), 0);
- if (retval == -EINPROGRESS) {
- break;
- }
- }
+ if (!bf)
+ break;
skb = bf->bf_mpdu;
if (!skb)
continue;
- /*
- * Synchronize the DMA transfer with CPU before
- * 1. accessing the frame
- * 2. requeueing the same buffer to h/w
- */
- dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
- common->rx_bufsize,
- DMA_FROM_DEVICE);
-
hdr = (struct ieee80211_hdr *) skb->data;
rxs = IEEE80211_SKB_RXCB(skb);
hw = ath_get_virt_hw(sc, hdr);
- rx_stats = &ds->ds_rxstat;
- ath_debug_stat_rx(sc, bf);
+ ath_debug_stat_rx(sc, &rs);
/*
* If we're asked to flush receive queue, directly
@@ -580,7 +883,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
if (flush)
goto requeue;
- retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, rx_stats,
+ retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, &rs,
rxs, &decrypt_error);
if (retval)
goto requeue;
@@ -599,18 +902,20 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
/* Unmap the frame */
dma_unmap_single(sc->dev, bf->bf_buf_addr,
common->rx_bufsize,
- DMA_FROM_DEVICE);
+ dma_type);
- skb_put(skb, rx_stats->rs_datalen);
+ skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
+ if (ah->caps.rx_status_len)
+ skb_pull(skb, ah->caps.rx_status_len);
- ath9k_cmn_rx_skb_postprocess(common, skb, rx_stats,
+ ath9k_cmn_rx_skb_postprocess(common, skb, &rs,
rxs, decrypt_error);
/* We will now give hardware our shiny new allocated skb */
bf->bf_mpdu = requeue_skb;
bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
common->rx_bufsize,
- DMA_FROM_DEVICE);
+ dma_type);
if (unlikely(dma_mapping_error(sc->dev,
bf->bf_buf_addr))) {
dev_kfree_skb_any(requeue_skb);
@@ -626,27 +931,32 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
* change the default rx antenna if rx diversity chooses the
* other antenna 3 times in a row.
*/
- if (sc->rx.defant != ds->ds_rxstat.rs_antenna) {
+ if (sc->rx.defant != rs.rs_antenna) {
if (++sc->rx.rxotherant >= 3)
- ath_setdefantenna(sc, rx_stats->rs_antenna);
+ ath_setdefantenna(sc, rs.rs_antenna);
} else {
sc->rx.rxotherant = 0;
}
- if (unlikely(sc->ps_flags & (PS_WAIT_FOR_BEACON |
- PS_WAIT_FOR_CAB |
- PS_WAIT_FOR_PSPOLL_DATA)))
+ if (unlikely(ath9k_check_auto_sleep(sc) ||
+ (sc->ps_flags & (PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA))))
ath_rx_ps(sc, skb);
ath_rx_send_to_mac80211(hw, sc, skb, rxs);
requeue:
- list_move_tail(&bf->list, &sc->rx.rxbuf);
- ath_rx_buf_link(sc, bf);
+ if (edma) {
+ list_add_tail(&bf->list, &sc->rx.rxbuf);
+ ath_rx_edma_buf_link(sc, qtype);
+ } else {
+ list_move_tail(&bf->list, &sc->rx.rxbuf);
+ ath_rx_buf_link(sc, bf);
+ }
} while (1);
spin_unlock_bh(&sc->rx.rxbuflock);
return 0;
-#undef PA2DESC
}
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 72cfa8ebd9a..d4371a43bda 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -20,7 +20,7 @@
#include "../reg.h"
#define AR_CR 0x0008
-#define AR_CR_RXE 0x00000004
+#define AR_CR_RXE (AR_SREV_9300_20_OR_LATER(ah) ? 0x0000000c : 0x00000004)
#define AR_CR_RXD 0x00000020
#define AR_CR_SWI 0x00000040
@@ -39,6 +39,12 @@
#define AR_CFG_PCI_MASTER_REQ_Q_THRESH 0x00060000
#define AR_CFG_PCI_MASTER_REQ_Q_THRESH_S 17
+#define AR_RXBP_THRESH 0x0018
+#define AR_RXBP_THRESH_HP 0x0000000f
+#define AR_RXBP_THRESH_HP_S 0
+#define AR_RXBP_THRESH_LP 0x00003f00
+#define AR_RXBP_THRESH_LP_S 8
+
#define AR_MIRT 0x0020
#define AR_MIRT_VAL 0x0000ffff
#define AR_MIRT_VAL_S 16
@@ -144,6 +150,9 @@
#define AR_MACMISC_MISC_OBS_BUS_MSB_S 15
#define AR_MACMISC_MISC_OBS_BUS_1 1
+#define AR_DATABUF_SIZE 0x0060
+#define AR_DATABUF_SIZE_MASK 0x00000FFF
+
#define AR_GTXTO 0x0064
#define AR_GTXTO_TIMEOUT_COUNTER 0x0000FFFF
#define AR_GTXTO_TIMEOUT_LIMIT 0xFFFF0000
@@ -160,9 +169,14 @@
#define AR_CST_TIMEOUT_LIMIT 0xFFFF0000
#define AR_CST_TIMEOUT_LIMIT_S 16
+#define AR_HP_RXDP 0x0074
+#define AR_LP_RXDP 0x0078
+
#define AR_ISR 0x0080
#define AR_ISR_RXOK 0x00000001
#define AR_ISR_RXDESC 0x00000002
+#define AR_ISR_HP_RXOK 0x00000001
+#define AR_ISR_LP_RXOK 0x00000002
#define AR_ISR_RXERR 0x00000004
#define AR_ISR_RXNOPKT 0x00000008
#define AR_ISR_RXEOL 0x00000010
@@ -232,7 +246,6 @@
#define AR_ISR_S5_TIMER_THRESH 0x0007FE00
#define AR_ISR_S5_TIM_TIMER 0x00000010
#define AR_ISR_S5_DTIM_TIMER 0x00000020
-#define AR_ISR_S5_S 0x00d8
#define AR_IMR_S5 0x00b8
#define AR_IMR_S5_TIM_TIMER 0x00000010
#define AR_IMR_S5_DTIM_TIMER 0x00000020
@@ -240,7 +253,6 @@
#define AR_ISR_S5_GENTIMER_TRIG_S 0
#define AR_ISR_S5_GENTIMER_THRESH 0xFF800000
#define AR_ISR_S5_GENTIMER_THRESH_S 16
-#define AR_ISR_S5_S 0x00d8
#define AR_IMR_S5_GENTIMER_TRIG 0x0000FF80
#define AR_IMR_S5_GENTIMER_TRIG_S 0
#define AR_IMR_S5_GENTIMER_THRESH 0xFF800000
@@ -249,6 +261,8 @@
#define AR_IMR 0x00a0
#define AR_IMR_RXOK 0x00000001
#define AR_IMR_RXDESC 0x00000002
+#define AR_IMR_RXOK_HP 0x00000001
+#define AR_IMR_RXOK_LP 0x00000002
#define AR_IMR_RXERR 0x00000004
#define AR_IMR_RXNOPKT 0x00000008
#define AR_IMR_RXEOL 0x00000010
@@ -332,10 +346,10 @@
#define AR_ISR_S1_QCU_TXEOL 0x03FF0000
#define AR_ISR_S1_QCU_TXEOL_S 16
-#define AR_ISR_S2_S 0x00cc
-#define AR_ISR_S3_S 0x00d0
-#define AR_ISR_S4_S 0x00d4
-#define AR_ISR_S5_S 0x00d8
+#define AR_ISR_S2_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00d0 : 0x00cc)
+#define AR_ISR_S3_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00d4 : 0x00d0)
+#define AR_ISR_S4_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00d8 : 0x00d4)
+#define AR_ISR_S5_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00dc : 0x00d8)
#define AR_DMADBG_0 0x00e0
#define AR_DMADBG_1 0x00e4
#define AR_DMADBG_2 0x00e8
@@ -369,6 +383,9 @@
#define AR_Q9_TXDP 0x0824
#define AR_QTXDP(_i) (AR_Q0_TXDP + ((_i)<<2))
+#define AR_Q_STATUS_RING_START 0x830
+#define AR_Q_STATUS_RING_END 0x834
+
#define AR_Q_TXE 0x0840
#define AR_Q_TXE_M 0x000003FF
@@ -461,6 +478,10 @@
#define AR_Q_RDYTIMESHDN 0x0a40
#define AR_Q_RDYTIMESHDN_M 0x000003FF
+/* MAC Descriptor CRC check */
+#define AR_Q_DESC_CRCCHK 0xa44
+/* Enable CRC check on the descriptor fetched from host */
+#define AR_Q_DESC_CRCCHK_EN 1
#define AR_NUM_DCU 10
#define AR_DCU_0 0x0001
@@ -679,7 +700,7 @@
#define AR_WA 0x4004
#define AR_WA_D3_L1_DISABLE (1 << 14)
-#define AR9285_WA_DEFAULT 0x004a05cb
+#define AR9285_WA_DEFAULT 0x004a050b
#define AR9280_WA_DEFAULT 0x0040073b
#define AR_WA_DEFAULT 0x0000073f
@@ -759,6 +780,8 @@
#define AR_SREV_VERSION_9271 0x140
#define AR_SREV_REVISION_9271_10 0
#define AR_SREV_REVISION_9271_11 1
+#define AR_SREV_VERSION_9300 0x1c0
+#define AR_SREV_REVISION_9300_20 2 /* 2.0 and 2.1 */
#define AR_SREV_5416(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \
@@ -844,6 +867,19 @@
#define AR_SREV_9271_11(_ah) \
(AR_SREV_9271(_ah) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9271_11))
+#define AR_SREV_9300(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300))
+#define AR_SREV_9300_20(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9300_20))
+#define AR_SREV_9300_20_OR_LATER(_ah) \
+ (((_ah)->hw_version.macVersion > AR_SREV_VERSION_9300) || \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300) && \
+ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9300_20)))
+
+#define AR_SREV_9285E_20(_ah) \
+ (AR_SREV_9285_12_OR_LATER(_ah) && \
+ ((REG_READ(_ah, AR_AN_SYNTH9) & 0x7) == 0x1))
#define AR_RADIO_SREV_MAJOR 0xf0
#define AR_RAD5133_SREV_MAJOR 0xc0
@@ -940,6 +976,8 @@ enum {
#define AR928X_NUM_GPIO 10
#define AR9285_NUM_GPIO 12
#define AR9287_NUM_GPIO 11
+#define AR9271_NUM_GPIO 16
+#define AR9300_NUM_GPIO 17
#define AR_GPIO_IN_OUT 0x4048
#define AR_GPIO_IN_VAL 0x0FFFC000
@@ -950,19 +988,23 @@ enum {
#define AR9285_GPIO_IN_VAL_S 12
#define AR9287_GPIO_IN_VAL 0x003FF800
#define AR9287_GPIO_IN_VAL_S 11
+#define AR9271_GPIO_IN_VAL 0xFFFF0000
+#define AR9271_GPIO_IN_VAL_S 16
+#define AR9300_GPIO_IN_VAL 0x0001FFFF
+#define AR9300_GPIO_IN_VAL_S 0
-#define AR_GPIO_OE_OUT 0x404c
+#define AR_GPIO_OE_OUT (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c)
#define AR_GPIO_OE_OUT_DRV 0x3
#define AR_GPIO_OE_OUT_DRV_NO 0x0
#define AR_GPIO_OE_OUT_DRV_LOW 0x1
#define AR_GPIO_OE_OUT_DRV_HI 0x2
#define AR_GPIO_OE_OUT_DRV_ALL 0x3
-#define AR_GPIO_INTR_POL 0x4050
-#define AR_GPIO_INTR_POL_VAL 0x00001FFF
+#define AR_GPIO_INTR_POL (AR_SREV_9300_20_OR_LATER(ah) ? 0x4058 : 0x4050)
+#define AR_GPIO_INTR_POL_VAL 0x0001FFFF
#define AR_GPIO_INTR_POL_VAL_S 0
-#define AR_GPIO_INPUT_EN_VAL 0x4054
+#define AR_GPIO_INPUT_EN_VAL (AR_SREV_9300_20_OR_LATER(ah) ? 0x405c : 0x4054)
#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF 0x00000004
#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_S 2
#define AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_DEF 0x00000008
@@ -980,13 +1022,13 @@ enum {
#define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000
#define AR_GPIO_JTAG_DISABLE 0x00020000
-#define AR_GPIO_INPUT_MUX1 0x4058
+#define AR_GPIO_INPUT_MUX1 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4060 : 0x4058)
#define AR_GPIO_INPUT_MUX1_BT_ACTIVE 0x000f0000
#define AR_GPIO_INPUT_MUX1_BT_ACTIVE_S 16
#define AR_GPIO_INPUT_MUX1_BT_PRIORITY 0x00000f00
#define AR_GPIO_INPUT_MUX1_BT_PRIORITY_S 8
-#define AR_GPIO_INPUT_MUX2 0x405c
+#define AR_GPIO_INPUT_MUX2 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4064 : 0x405c)
#define AR_GPIO_INPUT_MUX2_CLK25 0x0000000f
#define AR_GPIO_INPUT_MUX2_CLK25_S 0
#define AR_GPIO_INPUT_MUX2_RFSILENT 0x000000f0
@@ -994,13 +1036,13 @@ enum {
#define AR_GPIO_INPUT_MUX2_RTC_RESET 0x00000f00
#define AR_GPIO_INPUT_MUX2_RTC_RESET_S 8
-#define AR_GPIO_OUTPUT_MUX1 0x4060
-#define AR_GPIO_OUTPUT_MUX2 0x4064
-#define AR_GPIO_OUTPUT_MUX3 0x4068
+#define AR_GPIO_OUTPUT_MUX1 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4068 : 0x4060)
+#define AR_GPIO_OUTPUT_MUX2 (AR_SREV_9300_20_OR_LATER(ah) ? 0x406c : 0x4064)
+#define AR_GPIO_OUTPUT_MUX3 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4070 : 0x4068)
-#define AR_INPUT_STATE 0x406c
+#define AR_INPUT_STATE (AR_SREV_9300_20_OR_LATER(ah) ? 0x4074 : 0x406c)
-#define AR_EEPROM_STATUS_DATA 0x407c
+#define AR_EEPROM_STATUS_DATA (AR_SREV_9300_20_OR_LATER(ah) ? 0x4084 : 0x407c)
#define AR_EEPROM_STATUS_DATA_VAL 0x0000ffff
#define AR_EEPROM_STATUS_DATA_VAL_S 0
#define AR_EEPROM_STATUS_DATA_BUSY 0x00010000
@@ -1008,13 +1050,24 @@ enum {
#define AR_EEPROM_STATUS_DATA_PROT_ACCESS 0x00040000
#define AR_EEPROM_STATUS_DATA_ABSENT_ACCESS 0x00080000
-#define AR_OBS 0x4080
+#define AR_OBS (AR_SREV_9300_20_OR_LATER(ah) ? 0x4088 : 0x4080)
-#define AR_GPIO_PDPU 0x4088
+#define AR_GPIO_PDPU (AR_SREV_9300_20_OR_LATER(ah) ? 0x4090 : 0x4088)
-#define AR_PCIE_MSI 0x4094
+#define AR_PCIE_MSI (AR_SREV_9300_20_OR_LATER(ah) ? 0x40a4 : 0x4094)
#define AR_PCIE_MSI_ENABLE 0x00000001
+#define AR_INTR_PRIO_SYNC_ENABLE 0x40c4
+#define AR_INTR_PRIO_ASYNC_MASK 0x40c8
+#define AR_INTR_PRIO_SYNC_MASK 0x40cc
+#define AR_INTR_PRIO_ASYNC_ENABLE 0x40d4
+
+#define AR_RTC_9300_PLL_DIV 0x000003ff
+#define AR_RTC_9300_PLL_DIV_S 0
+#define AR_RTC_9300_PLL_REFDIV 0x00003C00
+#define AR_RTC_9300_PLL_REFDIV_S 10
+#define AR_RTC_9300_PLL_CLKSEL 0x0000C000
+#define AR_RTC_9300_PLL_CLKSEL_S 14
#define AR_RTC_9160_PLL_DIV 0x000003ff
#define AR_RTC_9160_PLL_DIV_S 0
@@ -1032,6 +1085,16 @@ enum {
#define AR_RTC_RC_COLD_RESET 0x00000004
#define AR_RTC_RC_WARM_RESET 0x00000008
+/* Crystal Control */
+#define AR_RTC_XTAL_CONTROL 0x7004
+
+/* Reg Control 0 */
+#define AR_RTC_REG_CONTROL0 0x7008
+
+/* Reg Control 1 */
+#define AR_RTC_REG_CONTROL1 0x700c
+#define AR_RTC_REG_CONTROL1_SWREG_PROGRAM 0x00000001
+
#define AR_RTC_PLL_CONTROL \
((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0014) : 0x7014)
@@ -1062,6 +1125,7 @@ enum {
#define AR_RTC_SLEEP_CLK \
((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0048) : 0x7048)
#define AR_RTC_FORCE_DERIVED_CLK 0x2
+#define AR_RTC_FORCE_SWREG_PRD 0x00000004
#define AR_RTC_FORCE_WAKE \
((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x004c) : 0x704c)
@@ -1178,6 +1242,13 @@ enum {
#define AR9285_AN_RF2G4_DB2_4 0x00003800
#define AR9285_AN_RF2G4_DB2_4_S 11
+#define AR9285_RF2G5 0x7830
+#define AR9285_RF2G5_IC50TX 0xfffff8ff
+#define AR9285_RF2G5_IC50TX_SET 0x00000400
+#define AR9285_RF2G5_IC50TX_XE_SET 0x00000500
+#define AR9285_RF2G5_IC50TX_CLEAR 0x00000700
+#define AR9285_RF2G5_IC50TX_CLEAR_S 8
+
/* AR9271 : 0x7828, 0x782c different setting from AR9285 */
#define AR9271_AN_RF2G3_OB_cck 0x001C0000
#define AR9271_AN_RF2G3_OB_cck_S 18
@@ -1519,7 +1590,7 @@ enum {
#define AR_TSFOOR_THRESHOLD 0x813c
#define AR_TSFOOR_THRESHOLD_VAL 0x0000FFFF
-#define AR_PHY_ERR_EIFS_MASK 8144
+#define AR_PHY_ERR_EIFS_MASK 0x8144
#define AR_PHY_ERR_3 0x8168
#define AR_PHY_ERR_3_COUNT 0x00FFFFFF
@@ -1585,24 +1656,26 @@ enum {
#define AR_FIRST_NDP_TIMER 7
#define AR_NDP2_PERIOD 0x81a0
#define AR_NDP2_TIMER_MODE 0x81c0
-#define AR_NEXT_TBTT_TIMER 0x8200
-#define AR_NEXT_DMA_BEACON_ALERT 0x8204
-#define AR_NEXT_SWBA 0x8208
-#define AR_NEXT_CFP 0x8208
-#define AR_NEXT_HCF 0x820C
-#define AR_NEXT_TIM 0x8210
-#define AR_NEXT_DTIM 0x8214
-#define AR_NEXT_QUIET_TIMER 0x8218
-#define AR_NEXT_NDP_TIMER 0x821C
-
-#define AR_BEACON_PERIOD 0x8220
-#define AR_DMA_BEACON_PERIOD 0x8224
-#define AR_SWBA_PERIOD 0x8228
-#define AR_HCF_PERIOD 0x822C
-#define AR_TIM_PERIOD 0x8230
-#define AR_DTIM_PERIOD 0x8234
-#define AR_QUIET_PERIOD 0x8238
-#define AR_NDP_PERIOD 0x823C
+
+#define AR_GEN_TIMERS(_i) (0x8200 + ((_i) << 2))
+#define AR_NEXT_TBTT_TIMER AR_GEN_TIMERS(0)
+#define AR_NEXT_DMA_BEACON_ALERT AR_GEN_TIMERS(1)
+#define AR_NEXT_SWBA AR_GEN_TIMERS(2)
+#define AR_NEXT_CFP AR_GEN_TIMERS(2)
+#define AR_NEXT_HCF AR_GEN_TIMERS(3)
+#define AR_NEXT_TIM AR_GEN_TIMERS(4)
+#define AR_NEXT_DTIM AR_GEN_TIMERS(5)
+#define AR_NEXT_QUIET_TIMER AR_GEN_TIMERS(6)
+#define AR_NEXT_NDP_TIMER AR_GEN_TIMERS(7)
+
+#define AR_BEACON_PERIOD AR_GEN_TIMERS(8)
+#define AR_DMA_BEACON_PERIOD AR_GEN_TIMERS(9)
+#define AR_SWBA_PERIOD AR_GEN_TIMERS(10)
+#define AR_HCF_PERIOD AR_GEN_TIMERS(11)
+#define AR_TIM_PERIOD AR_GEN_TIMERS(12)
+#define AR_DTIM_PERIOD AR_GEN_TIMERS(13)
+#define AR_QUIET_PERIOD AR_GEN_TIMERS(14)
+#define AR_NDP_PERIOD AR_GEN_TIMERS(15)
#define AR_TIMER_MODE 0x8240
#define AR_TBTT_TIMER_EN 0x00000001
@@ -1716,4 +1789,32 @@ enum {
#define AR9271_CORE_CLOCK 117 /* clock to 117Mhz */
#define AR9271_TARGET_BAUD_RATE 19200 /* 115200 */
+#define AR_AGG_WEP_ENABLE_FIX 0x00000008 /* This allows the use of AR_AGG_WEP_ENABLE */
+#define AR_ADHOC_MCAST_KEYID_ENABLE 0x00000040 /* This bit enables the Multicast search
+ * based on both MAC Address and Key ID.
+ * If bit is 0, then Multicast search is
+ * based on MAC address only.
+ * For Merlin and above only.
+ */
+#define AR_AGG_WEP_ENABLE 0x00020000 /* This field enables AGG_WEP feature,
+ * when it is enable, AGG_WEP would takes
+ * charge of the encryption interface of
+ * pcu_txsm.
+ */
+
+#define AR9300_SM_BASE 0xa200
+#define AR9002_PHY_AGC_CONTROL 0x9860
+#define AR9003_PHY_AGC_CONTROL AR9300_SM_BASE + 0xc4
+#define AR_PHY_AGC_CONTROL (AR_SREV_9300_20_OR_LATER(ah) ? AR9003_PHY_AGC_CONTROL : AR9002_PHY_AGC_CONTROL)
+#define AR_PHY_AGC_CONTROL_CAL 0x00000001 /* do internal calibration */
+#define AR_PHY_AGC_CONTROL_NF 0x00000002 /* do noise-floor calibration */
+#define AR_PHY_AGC_CONTROL_OFFSET_CAL 0x00000800 /* allow offset calibration */
+#define AR_PHY_AGC_CONTROL_ENABLE_NF 0x00008000 /* enable noise floor calibration to happen */
+#define AR_PHY_AGC_CONTROL_FLTR_CAL 0x00010000 /* allow tx filter calibration */
+#define AR_PHY_AGC_CONTROL_NO_UPDATE_NF 0x00020000 /* don't update noise floor automatically */
+#define AR_PHY_AGC_CONTROL_EXT_NF_PWR_MEAS 0x00040000 /* extend noise floor power measurement */
+#define AR_PHY_AGC_CONTROL_CLC_SUCCESS 0x00080000 /* carrier leak calibration done */
+#define AR_PHY_AGC_CONTROL_YCOK_MAX 0x000003c0
+#define AR_PHY_AGC_CONTROL_YCOK_MAX_S 6
+
#endif
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
index 00c0e21a4af..105ad40968f 100644
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ b/drivers/net/wireless/ath/ath9k/virtual.c
@@ -220,7 +220,7 @@ static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
memset(&txctl, 0, sizeof(struct ath_tx_control));
txctl.txq = &sc->tx.txq[sc->tx.hwq_map[ATH9K_WME_AC_VO]];
- txctl.frame_type = ps ? ATH9K_INT_PAUSE : ATH9K_INT_UNPAUSE;
+ txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE;
if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
goto exit;
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
new file mode 100644
index 00000000000..e23172c9caa
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+static const char *wmi_cmd_to_name(enum wmi_cmd_id wmi_cmd)
+{
+ switch (wmi_cmd) {
+ case WMI_ECHO_CMDID:
+ return "WMI_ECHO_CMDID";
+ case WMI_ACCESS_MEMORY_CMDID:
+ return "WMI_ACCESS_MEMORY_CMDID";
+ case WMI_DISABLE_INTR_CMDID:
+ return "WMI_DISABLE_INTR_CMDID";
+ case WMI_ENABLE_INTR_CMDID:
+ return "WMI_ENABLE_INTR_CMDID";
+ case WMI_RX_LINK_CMDID:
+ return "WMI_RX_LINK_CMDID";
+ case WMI_ATH_INIT_CMDID:
+ return "WMI_ATH_INIT_CMDID";
+ case WMI_ABORT_TXQ_CMDID:
+ return "WMI_ABORT_TXQ_CMDID";
+ case WMI_STOP_TX_DMA_CMDID:
+ return "WMI_STOP_TX_DMA_CMDID";
+ case WMI_STOP_DMA_RECV_CMDID:
+ return "WMI_STOP_DMA_RECV_CMDID";
+ case WMI_ABORT_TX_DMA_CMDID:
+ return "WMI_ABORT_TX_DMA_CMDID";
+ case WMI_DRAIN_TXQ_CMDID:
+ return "WMI_DRAIN_TXQ_CMDID";
+ case WMI_DRAIN_TXQ_ALL_CMDID:
+ return "WMI_DRAIN_TXQ_ALL_CMDID";
+ case WMI_START_RECV_CMDID:
+ return "WMI_START_RECV_CMDID";
+ case WMI_STOP_RECV_CMDID:
+ return "WMI_STOP_RECV_CMDID";
+ case WMI_FLUSH_RECV_CMDID:
+ return "WMI_FLUSH_RECV_CMDID";
+ case WMI_SET_MODE_CMDID:
+ return "WMI_SET_MODE_CMDID";
+ case WMI_RESET_CMDID:
+ return "WMI_RESET_CMDID";
+ case WMI_NODE_CREATE_CMDID:
+ return "WMI_NODE_CREATE_CMDID";
+ case WMI_NODE_REMOVE_CMDID:
+ return "WMI_NODE_REMOVE_CMDID";
+ case WMI_VAP_REMOVE_CMDID:
+ return "WMI_VAP_REMOVE_CMDID";
+ case WMI_VAP_CREATE_CMDID:
+ return "WMI_VAP_CREATE_CMDID";
+ case WMI_BEACON_UPDATE_CMDID:
+ return "WMI_BEACON_UPDATE_CMDID";
+ case WMI_REG_READ_CMDID:
+ return "WMI_REG_READ_CMDID";
+ case WMI_REG_WRITE_CMDID:
+ return "WMI_REG_WRITE_CMDID";
+ case WMI_RC_STATE_CHANGE_CMDID:
+ return "WMI_RC_STATE_CHANGE_CMDID";
+ case WMI_RC_RATE_UPDATE_CMDID:
+ return "WMI_RC_RATE_UPDATE_CMDID";
+ case WMI_DEBUG_INFO_CMDID:
+ return "WMI_DEBUG_INFO_CMDID";
+ case WMI_HOST_ATTACH:
+ return "WMI_HOST_ATTACH";
+ case WMI_TARGET_IC_UPDATE_CMDID:
+ return "WMI_TARGET_IC_UPDATE_CMDID";
+ case WMI_TGT_STATS_CMDID:
+ return "WMI_TGT_STATS_CMDID";
+ case WMI_TX_AGGR_ENABLE_CMDID:
+ return "WMI_TX_AGGR_ENABLE_CMDID";
+ case WMI_TGT_DETACH_CMDID:
+ return "WMI_TGT_DETACH_CMDID";
+ case WMI_TGT_TXQ_ENABLE_CMDID:
+ return "WMI_TGT_TXQ_ENABLE_CMDID";
+ }
+
+ return "Bogus";
+}
+
+struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv)
+{
+ struct wmi *wmi;
+
+ wmi = kzalloc(sizeof(struct wmi), GFP_KERNEL);
+ if (!wmi)
+ return NULL;
+
+ wmi->drv_priv = priv;
+ wmi->stopped = false;
+ mutex_init(&wmi->op_mutex);
+ mutex_init(&wmi->multi_write_mutex);
+ init_completion(&wmi->cmd_wait);
+
+ return wmi;
+}
+
+void ath9k_deinit_wmi(struct ath9k_htc_priv *priv)
+{
+ struct wmi *wmi = priv->wmi;
+
+ mutex_lock(&wmi->op_mutex);
+ wmi->stopped = true;
+ mutex_unlock(&wmi->op_mutex);
+
+ kfree(priv->wmi);
+}
+
+void ath9k_wmi_tasklet(unsigned long data)
+{
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct wmi_cmd_hdr *hdr;
+ struct wmi_swba *swba_hdr;
+ enum wmi_event_id event;
+ struct sk_buff *skb;
+ void *wmi_event;
+ unsigned long flags;
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+ __be32 txrate;
+#endif
+
+ spin_lock_irqsave(&priv->wmi->wmi_lock, flags);
+ skb = priv->wmi->wmi_skb;
+ spin_unlock_irqrestore(&priv->wmi->wmi_lock, flags);
+
+ hdr = (struct wmi_cmd_hdr *) skb->data;
+ event = be16_to_cpu(hdr->command_id);
+ wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+
+ ath_print(common, ATH_DBG_WMI,
+ "WMI Event: 0x%x\n", event);
+
+ switch (event) {
+ case WMI_TGT_RDY_EVENTID:
+ break;
+ case WMI_SWBA_EVENTID:
+ swba_hdr = (struct wmi_swba *) wmi_event;
+ ath9k_htc_swba(priv, swba_hdr->beacon_pending);
+ break;
+ case WMI_FATAL_EVENTID:
+ break;
+ case WMI_TXTO_EVENTID:
+ break;
+ case WMI_BMISS_EVENTID:
+ break;
+ case WMI_WLAN_TXCOMP_EVENTID:
+ break;
+ case WMI_DELBA_EVENTID:
+ break;
+ case WMI_TXRATE_EVENTID:
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+ txrate = ((struct wmi_event_txrate *)wmi_event)->txrate;
+ priv->debug.txrate = be32_to_cpu(txrate);
+#endif
+ break;
+ default:
+ break;
+ }
+
+ kfree_skb(skb);
+}
+
+static void ath9k_wmi_rsp_callback(struct wmi *wmi, struct sk_buff *skb)
+{
+ skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+
+ if (wmi->cmd_rsp_buf != NULL && wmi->cmd_rsp_len != 0)
+ memcpy(wmi->cmd_rsp_buf, skb->data, wmi->cmd_rsp_len);
+
+ complete(&wmi->cmd_wait);
+}
+
+static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
+ enum htc_endpoint_id epid)
+{
+ struct wmi *wmi = (struct wmi *) priv;
+ struct wmi_cmd_hdr *hdr;
+ u16 cmd_id;
+
+ if (unlikely(wmi->stopped))
+ goto free_skb;
+
+ hdr = (struct wmi_cmd_hdr *) skb->data;
+ cmd_id = be16_to_cpu(hdr->command_id);
+
+ if (cmd_id & 0x1000) {
+ spin_lock(&wmi->wmi_lock);
+ wmi->wmi_skb = skb;
+ spin_unlock(&wmi->wmi_lock);
+ tasklet_schedule(&wmi->drv_priv->wmi_tasklet);
+ return;
+ }
+
+ /* Check if there has been a timeout. */
+ spin_lock(&wmi->wmi_lock);
+ if (cmd_id != wmi->last_cmd_id) {
+ spin_unlock(&wmi->wmi_lock);
+ goto free_skb;
+ }
+ spin_unlock(&wmi->wmi_lock);
+
+ /* WMI command response */
+ ath9k_wmi_rsp_callback(wmi, skb);
+
+free_skb:
+ kfree_skb(skb);
+}
+
+static void ath9k_wmi_ctrl_tx(void *priv, struct sk_buff *skb,
+ enum htc_endpoint_id epid, bool txok)
+{
+ kfree_skb(skb);
+}
+
+int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi,
+ enum htc_endpoint_id *wmi_ctrl_epid)
+{
+ struct htc_service_connreq connect;
+ int ret;
+
+ wmi->htc = htc;
+
+ memset(&connect, 0, sizeof(connect));
+
+ connect.ep_callbacks.priv = wmi;
+ connect.ep_callbacks.tx = ath9k_wmi_ctrl_tx;
+ connect.ep_callbacks.rx = ath9k_wmi_ctrl_rx;
+ connect.service_id = WMI_CONTROL_SVC;
+
+ ret = htc_connect_service(htc, &connect, &wmi->ctrl_epid);
+ if (ret)
+ return ret;
+
+ *wmi_ctrl_epid = wmi->ctrl_epid;
+
+ return 0;
+}
+
+static int ath9k_wmi_cmd_issue(struct wmi *wmi,
+ struct sk_buff *skb,
+ enum wmi_cmd_id cmd, u16 len)
+{
+ struct wmi_cmd_hdr *hdr;
+
+ hdr = (struct wmi_cmd_hdr *) skb_push(skb, sizeof(struct wmi_cmd_hdr));
+ hdr->command_id = cpu_to_be16(cmd);
+ hdr->seq_no = cpu_to_be16(++wmi->tx_seq_id);
+
+ return htc_send(wmi->htc, skb, wmi->ctrl_epid, NULL);
+}
+
+int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
+ u8 *cmd_buf, u32 cmd_len,
+ u8 *rsp_buf, u32 rsp_len,
+ u32 timeout)
+{
+ struct ath_hw *ah = wmi->drv_priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u16 headroom = sizeof(struct htc_frame_hdr) +
+ sizeof(struct wmi_cmd_hdr);
+ struct sk_buff *skb;
+ u8 *data;
+ int time_left, ret = 0;
+ unsigned long flags;
+
+ if (wmi->drv_priv->op_flags & OP_UNPLUGGED)
+ return 0;
+
+ if (!wmi)
+ return -EINVAL;
+
+ skb = alloc_skb(headroom + cmd_len, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_reserve(skb, headroom);
+
+ if (cmd_len != 0 && cmd_buf != NULL) {
+ data = (u8 *) skb_put(skb, cmd_len);
+ memcpy(data, cmd_buf, cmd_len);
+ }
+
+ mutex_lock(&wmi->op_mutex);
+
+ /* check if wmi stopped flag is set */
+ if (unlikely(wmi->stopped)) {
+ ret = -EPROTO;
+ goto out;
+ }
+
+ /* record the rsp buffer and length */
+ wmi->cmd_rsp_buf = rsp_buf;
+ wmi->cmd_rsp_len = rsp_len;
+
+ spin_lock_irqsave(&wmi->wmi_lock, flags);
+ wmi->last_cmd_id = cmd_id;
+ spin_unlock_irqrestore(&wmi->wmi_lock, flags);
+
+ ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len);
+ if (ret)
+ goto out;
+
+ time_left = wait_for_completion_timeout(&wmi->cmd_wait, timeout);
+ if (!time_left) {
+ ath_print(common, ATH_DBG_WMI,
+ "Timeout waiting for WMI command: %s\n",
+ wmi_cmd_to_name(cmd_id));
+ mutex_unlock(&wmi->op_mutex);
+ return -ETIMEDOUT;
+ }
+
+ mutex_unlock(&wmi->op_mutex);
+
+ return 0;
+
+out:
+ ath_print(common, ATH_DBG_WMI,
+ "WMI failure for: %s\n", wmi_cmd_to_name(cmd_id));
+ mutex_unlock(&wmi->op_mutex);
+ kfree_skb(skb);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h
new file mode 100644
index 00000000000..765db5faa2d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/wmi.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef WMI_H
+#define WMI_H
+
+
+struct wmi_event_txrate {
+ __be32 txrate;
+ struct {
+ u8 rssi_thresh;
+ u8 per;
+ } rc_stats;
+} __packed;
+
+struct wmi_cmd_hdr {
+ __be16 command_id;
+ __be16 seq_no;
+} __packed;
+
+struct wmi_swba {
+ u8 beacon_pending;
+} __packed;
+
+enum wmi_cmd_id {
+ WMI_ECHO_CMDID = 0x0001,
+ WMI_ACCESS_MEMORY_CMDID,
+
+ /* Commands to Target */
+ WMI_DISABLE_INTR_CMDID,
+ WMI_ENABLE_INTR_CMDID,
+ WMI_RX_LINK_CMDID,
+ WMI_ATH_INIT_CMDID,
+ WMI_ABORT_TXQ_CMDID,
+ WMI_STOP_TX_DMA_CMDID,
+ WMI_STOP_DMA_RECV_CMDID,
+ WMI_ABORT_TX_DMA_CMDID,
+ WMI_DRAIN_TXQ_CMDID,
+ WMI_DRAIN_TXQ_ALL_CMDID,
+ WMI_START_RECV_CMDID,
+ WMI_STOP_RECV_CMDID,
+ WMI_FLUSH_RECV_CMDID,
+ WMI_SET_MODE_CMDID,
+ WMI_RESET_CMDID,
+ WMI_NODE_CREATE_CMDID,
+ WMI_NODE_REMOVE_CMDID,
+ WMI_VAP_REMOVE_CMDID,
+ WMI_VAP_CREATE_CMDID,
+ WMI_BEACON_UPDATE_CMDID,
+ WMI_REG_READ_CMDID,
+ WMI_REG_WRITE_CMDID,
+ WMI_RC_STATE_CHANGE_CMDID,
+ WMI_RC_RATE_UPDATE_CMDID,
+ WMI_DEBUG_INFO_CMDID,
+ WMI_HOST_ATTACH,
+ WMI_TARGET_IC_UPDATE_CMDID,
+ WMI_TGT_STATS_CMDID,
+ WMI_TX_AGGR_ENABLE_CMDID,
+ WMI_TGT_DETACH_CMDID,
+ WMI_TGT_TXQ_ENABLE_CMDID,
+};
+
+enum wmi_event_id {
+ WMI_TGT_RDY_EVENTID = 0x1001,
+ WMI_SWBA_EVENTID,
+ WMI_FATAL_EVENTID,
+ WMI_TXTO_EVENTID,
+ WMI_BMISS_EVENTID,
+ WMI_WLAN_TXCOMP_EVENTID,
+ WMI_DELBA_EVENTID,
+ WMI_TXRATE_EVENTID,
+};
+
+#define MAX_CMD_NUMBER 62
+
+struct register_write {
+ __be32 reg;
+ __be32 val;
+};
+
+struct wmi {
+ struct ath9k_htc_priv *drv_priv;
+ struct htc_target *htc;
+ enum htc_endpoint_id ctrl_epid;
+ struct mutex op_mutex;
+ struct completion cmd_wait;
+ enum wmi_cmd_id last_cmd_id;
+ u16 tx_seq_id;
+ u8 *cmd_rsp_buf;
+ u32 cmd_rsp_len;
+ bool stopped;
+
+ struct sk_buff *wmi_skb;
+ spinlock_t wmi_lock;
+
+ atomic_t mwrite_cnt;
+ struct register_write multi_write[MAX_CMD_NUMBER];
+ u32 multi_write_idx;
+ struct mutex multi_write_mutex;
+};
+
+struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv);
+void ath9k_deinit_wmi(struct ath9k_htc_priv *priv);
+int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi,
+ enum htc_endpoint_id *wmi_ctrl_epid);
+int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
+ u8 *cmd_buf, u32 cmd_len,
+ u8 *rsp_buf, u32 rsp_len,
+ u32 timeout);
+void ath9k_wmi_tasklet(unsigned long data);
+
+#define WMI_CMD(_wmi_cmd) \
+ do { \
+ ret = ath9k_wmi_cmd(priv->wmi, _wmi_cmd, NULL, 0, \
+ (u8 *) &cmd_rsp, \
+ sizeof(cmd_rsp), HZ*2); \
+ } while (0)
+
+#define WMI_CMD_BUF(_wmi_cmd, _buf) \
+ do { \
+ ret = ath9k_wmi_cmd(priv->wmi, _wmi_cmd, \
+ (u8 *) _buf, sizeof(*_buf), \
+ &cmd_rsp, sizeof(cmd_rsp), HZ*2); \
+ } while (0)
+
+#endif /* WMI_H */
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 294b486bc3e..3db19172b43 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -15,10 +15,11 @@
*/
#include "ath9k.h"
+#include "ar9003_mac.h"
#define BITS_PER_BYTE 8
#define OFDM_PLCP_BITS 22
-#define HT_RC_2_MCS(_rc) ((_rc) & 0x0f)
+#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
#define L_STF 8
#define L_LTF 8
@@ -33,7 +34,7 @@
#define OFDM_SIFS_TIME 16
-static u32 bits_per_symbol[][2] = {
+static u16 bits_per_symbol[][2] = {
/* 20MHz 40MHz */
{ 26, 54 }, /* 0: BPSK */
{ 52, 108 }, /* 1: QPSK 1/2 */
@@ -43,14 +44,6 @@ static u32 bits_per_symbol[][2] = {
{ 208, 432 }, /* 5: 64-QAM 2/3 */
{ 234, 486 }, /* 6: 64-QAM 3/4 */
{ 260, 540 }, /* 7: 64-QAM 5/6 */
- { 52, 108 }, /* 8: BPSK */
- { 104, 216 }, /* 9: QPSK 1/2 */
- { 156, 324 }, /* 10: QPSK 3/4 */
- { 208, 432 }, /* 11: 16-QAM 1/2 */
- { 312, 648 }, /* 12: 16-QAM 3/4 */
- { 416, 864 }, /* 13: 64-QAM 2/3 */
- { 468, 972 }, /* 14: 64-QAM 3/4 */
- { 520, 1080 }, /* 15: 64-QAM 5/6 */
};
#define IS_HT_RATE(_rate) ((_rate) & 0x80)
@@ -59,40 +52,50 @@ static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
struct ath_atx_tid *tid,
struct list_head *bf_head);
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
- struct ath_txq *txq,
- struct list_head *bf_q,
- int txok, int sendbar);
+ struct ath_txq *txq, struct list_head *bf_q,
+ struct ath_tx_status *ts, int txok, int sendbar);
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
struct list_head *head);
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
- int txok);
-static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
+ struct ath_tx_status *ts, int txok);
+static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
int nbad, int txok, bool update_rc);
enum {
- MCS_DEFAULT,
+ MCS_HT20,
+ MCS_HT20_SGI,
MCS_HT40,
MCS_HT40_SGI,
};
-static int ath_max_4ms_framelen[3][16] = {
- [MCS_DEFAULT] = {
- 3216, 6434, 9650, 12868, 19304, 25740, 28956, 32180,
- 6430, 12860, 19300, 25736, 38600, 51472, 57890, 64320,
+static int ath_max_4ms_framelen[4][32] = {
+ [MCS_HT20] = {
+ 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
+ 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
+ 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
+ 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
+ },
+ [MCS_HT20_SGI] = {
+ 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
+ 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
+ 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
+ 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
},
[MCS_HT40] = {
- 6684, 13368, 20052, 26738, 40104, 53476, 60156, 66840,
- 13360, 26720, 40080, 53440, 80160, 106880, 120240, 133600,
+ 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
+ 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
+ 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
+ 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
},
[MCS_HT40_SGI] = {
- /* TODO: Only MCS 7 and 15 updated, recalculate the rest */
- 6684, 13368, 20052, 26738, 40104, 53476, 60156, 74200,
- 13360, 26720, 40080, 53440, 80160, 106880, 120240, 148400,
+ 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
+ 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
+ 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
+ 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
}
};
-
/*********************/
/* Aggregation logic */
/*********************/
@@ -223,6 +226,9 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
{
struct ath_buf *bf;
struct list_head bf_head;
+ struct ath_tx_status ts;
+
+ memset(&ts, 0, sizeof(ts));
INIT_LIST_HEAD(&bf_head);
for (;;) {
@@ -236,7 +242,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
ath_tx_update_baw(sc, tid, bf->bf_seqno);
spin_unlock(&txq->axq_lock);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
spin_lock(&txq->axq_lock);
}
@@ -259,25 +265,46 @@ static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
}
-static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
+static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
{
- struct ath_buf *tbf;
+ struct ath_buf *bf = NULL;
spin_lock_bh(&sc->tx.txbuflock);
- if (WARN_ON(list_empty(&sc->tx.txbuf))) {
+
+ if (unlikely(list_empty(&sc->tx.txbuf))) {
spin_unlock_bh(&sc->tx.txbuflock);
return NULL;
}
- tbf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
- list_del(&tbf->list);
+
+ bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
+ list_del(&bf->list);
+
spin_unlock_bh(&sc->tx.txbuflock);
+ return bf;
+}
+
+static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
+{
+ spin_lock_bh(&sc->tx.txbuflock);
+ list_add_tail(&bf->list, &sc->tx.txbuf);
+ spin_unlock_bh(&sc->tx.txbuflock);
+}
+
+static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
+{
+ struct ath_buf *tbf;
+
+ tbf = ath_tx_get_buffer(sc);
+ if (WARN_ON(!tbf))
+ return NULL;
+
ATH_TXBUF_RESET(tbf);
tbf->aphy = bf->aphy;
tbf->bf_mpdu = bf->bf_mpdu;
tbf->bf_buf_addr = bf->bf_buf_addr;
- *(tbf->bf_desc) = *(bf->bf_desc);
+ memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
tbf->bf_state = bf->bf_state;
tbf->bf_dmacontext = bf->bf_dmacontext;
@@ -286,7 +313,7 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf, struct list_head *bf_q,
- int txok)
+ struct ath_tx_status *ts, int txok)
{
struct ath_node *an = NULL;
struct sk_buff *skb;
@@ -296,7 +323,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ieee80211_tx_info *tx_info;
struct ath_atx_tid *tid = NULL;
struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
- struct ath_desc *ds = bf_last->bf_desc;
struct list_head bf_head, bf_pending;
u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
u32 ba[WME_BA_BMP_SIZE >> 5];
@@ -325,10 +351,9 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
memset(ba, 0, WME_BA_BMP_SIZE >> 3);
if (isaggr && txok) {
- if (ATH_DS_TX_BA(ds)) {
- seq_st = ATH_DS_BA_SEQ(ds);
- memcpy(ba, ATH_DS_BA_BITMAP(ds),
- WME_BA_BMP_SIZE >> 3);
+ if (ts->ts_flags & ATH9K_TX_BA) {
+ seq_st = ts->ts_seqnum;
+ memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
} else {
/*
* AR5416 can become deaf/mute when BA
@@ -345,7 +370,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
INIT_LIST_HEAD(&bf_pending);
INIT_LIST_HEAD(&bf_head);
- nbad = ath_tx_num_badfrms(sc, bf, txok);
+ nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
while (bf) {
txfail = txpending = 0;
bf_next = bf->bf_next;
@@ -359,7 +384,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
acked_cnt++;
} else {
if (!(tid->state & AGGR_CLEANUP) &&
- ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
+ !bf_last->bf_tx_aborted) {
if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
ath_tx_set_retry(sc, txq, bf);
txpending = 1;
@@ -378,7 +403,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
}
}
- if (bf_next == NULL) {
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
+ bf_next == NULL) {
/*
* Make sure the last desc is reclaimed if it
* not a holding desc.
@@ -402,45 +428,53 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
spin_unlock_bh(&txq->axq_lock);
if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
- ath_tx_rc_status(bf, ds, nbad, txok, true);
+ ath_tx_rc_status(bf, ts, nbad, txok, true);
rc_update = false;
} else {
- ath_tx_rc_status(bf, ds, nbad, txok, false);
+ ath_tx_rc_status(bf, ts, nbad, txok, false);
}
- ath_tx_complete_buf(sc, bf, txq, &bf_head, !txfail, sendbar);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
+ !txfail, sendbar);
} else {
/* retry the un-acked ones */
- if (bf->bf_next == NULL && bf_last->bf_stale) {
- struct ath_buf *tbf;
-
- tbf = ath_clone_txbuf(sc, bf_last);
- /*
- * Update tx baw and complete the frame with
- * failed status if we run out of tx buf
- */
- if (!tbf) {
- spin_lock_bh(&txq->axq_lock);
- ath_tx_update_baw(sc, tid,
- bf->bf_seqno);
- spin_unlock_bh(&txq->axq_lock);
-
- bf->bf_state.bf_type |= BUF_XRETRY;
- ath_tx_rc_status(bf, ds, nbad,
- 0, false);
- ath_tx_complete_buf(sc, bf, txq,
- &bf_head, 0, 0);
- break;
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
+ if (bf->bf_next == NULL && bf_last->bf_stale) {
+ struct ath_buf *tbf;
+
+ tbf = ath_clone_txbuf(sc, bf_last);
+ /*
+ * Update tx baw and complete the
+ * frame with failed status if we
+ * run out of tx buf.
+ */
+ if (!tbf) {
+ spin_lock_bh(&txq->axq_lock);
+ ath_tx_update_baw(sc, tid,
+ bf->bf_seqno);
+ spin_unlock_bh(&txq->axq_lock);
+
+ bf->bf_state.bf_type |=
+ BUF_XRETRY;
+ ath_tx_rc_status(bf, ts, nbad,
+ 0, false);
+ ath_tx_complete_buf(sc, bf, txq,
+ &bf_head,
+ ts, 0, 0);
+ break;
+ }
+
+ ath9k_hw_cleartxdesc(sc->sc_ah,
+ tbf->bf_desc);
+ list_add_tail(&tbf->list, &bf_head);
+ } else {
+ /*
+ * Clear descriptor status words for
+ * software retry
+ */
+ ath9k_hw_cleartxdesc(sc->sc_ah,
+ bf->bf_desc);
}
-
- ath9k_hw_cleartxdesc(sc->sc_ah, tbf->bf_desc);
- list_add_tail(&tbf->list, &bf_head);
- } else {
- /*
- * Clear descriptor status words for
- * software retry
- */
- ath9k_hw_cleartxdesc(sc->sc_ah, bf->bf_desc);
}
/*
@@ -508,12 +542,13 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
break;
}
- if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
- modeidx = MCS_HT40_SGI;
- else if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
modeidx = MCS_HT40;
else
- modeidx = MCS_DEFAULT;
+ modeidx = MCS_HT20;
+
+ if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
+ modeidx++;
frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
max_4ms_framelen = min(max_4ms_framelen, frmlen);
@@ -558,7 +593,7 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
u32 nsymbits, nsymbols;
u16 minlen;
u8 flags, rix;
- int width, half_gi, ndelim, mindelim;
+ int width, streams, half_gi, ndelim, mindelim;
/* Select standard number of delimiters based on frame length alone */
ndelim = ATH_AGGR_GET_NDELIM(frmlen);
@@ -598,7 +633,8 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
if (nsymbols == 0)
nsymbols = 1;
- nsymbits = bits_per_symbol[rix][width];
+ streams = HT_RC_2_STREAMS(rix);
+ nsymbits = bits_per_symbol[rix % 8][width] * streams;
minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
if (frmlen < minlen) {
@@ -664,7 +700,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
bpad = PADBYTES(al_delta) + (ndelim << 2);
bf->bf_next = NULL;
- bf->bf_desc->ds_link = 0;
+ ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
/* link buffers of this frame to the aggregate */
ath_tx_addto_baw(sc, tid, bf);
@@ -672,7 +708,8 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
list_move_tail(&bf->list, bf_q);
if (bf_prev) {
bf_prev->bf_next = bf;
- bf_prev->bf_desc->ds_link = bf->bf_daddr;
+ ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
+ bf->bf_daddr);
}
bf_prev = bf;
@@ -752,8 +789,11 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
struct ath_node *an = (struct ath_node *)sta->drv_priv;
struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
+ struct ath_tx_status ts;
struct ath_buf *bf;
struct list_head bf_head;
+
+ memset(&ts, 0, sizeof(ts));
INIT_LIST_HEAD(&bf_head);
if (txtid->state & AGGR_CLEANUP)
@@ -780,7 +820,7 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
}
list_move_tail(&bf->list, &bf_head);
ath_tx_update_baw(sc, txtid, bf->bf_seqno);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
}
spin_unlock_bh(&txq->axq_lock);
@@ -849,7 +889,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_tx_queue_info qi;
- int qnum;
+ int qnum, i;
memset(&qi, 0, sizeof(qi));
qi.tqi_subtype = subtype;
@@ -873,11 +913,16 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
* The UAPSD queue is an exception, since we take a desc-
* based intr on the EOSP frames.
*/
- if (qtype == ATH9K_TX_QUEUE_UAPSD)
- qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
- else
- qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
- TXQ_FLAG_TXDESCINT_ENABLE;
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
+ TXQ_FLAG_TXERRINT_ENABLE;
+ } else {
+ if (qtype == ATH9K_TX_QUEUE_UAPSD)
+ qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
+ else
+ qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
+ TXQ_FLAG_TXDESCINT_ENABLE;
+ }
qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
if (qnum == -1) {
/*
@@ -904,6 +949,11 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
txq->axq_depth = 0;
txq->axq_tx_inprogress = false;
sc->tx.txqsetup |= 1<<qnum;
+
+ txq->txq_headidx = txq->txq_tailidx = 0;
+ for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
+ INIT_LIST_HEAD(&txq->txq_fifo[i]);
+ INIT_LIST_HEAD(&txq->txq_fifo_pending);
}
return &sc->tx.txq[qnum];
}
@@ -1028,45 +1078,63 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
{
struct ath_buf *bf, *lastbf;
struct list_head bf_head;
+ struct ath_tx_status ts;
+ memset(&ts, 0, sizeof(ts));
INIT_LIST_HEAD(&bf_head);
for (;;) {
spin_lock_bh(&txq->axq_lock);
- if (list_empty(&txq->axq_q)) {
- txq->axq_link = NULL;
- spin_unlock_bh(&txq->axq_lock);
- break;
- }
-
- bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
+ txq->txq_headidx = txq->txq_tailidx = 0;
+ spin_unlock_bh(&txq->axq_lock);
+ break;
+ } else {
+ bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
+ struct ath_buf, list);
+ }
+ } else {
+ if (list_empty(&txq->axq_q)) {
+ txq->axq_link = NULL;
+ spin_unlock_bh(&txq->axq_lock);
+ break;
+ }
+ bf = list_first_entry(&txq->axq_q, struct ath_buf,
+ list);
- if (bf->bf_stale) {
- list_del(&bf->list);
- spin_unlock_bh(&txq->axq_lock);
+ if (bf->bf_stale) {
+ list_del(&bf->list);
+ spin_unlock_bh(&txq->axq_lock);
- spin_lock_bh(&sc->tx.txbuflock);
- list_add_tail(&bf->list, &sc->tx.txbuf);
- spin_unlock_bh(&sc->tx.txbuflock);
- continue;
+ ath_tx_return_buffer(sc, bf);
+ continue;
+ }
}
lastbf = bf->bf_lastbf;
if (!retry_tx)
- lastbf->bf_desc->ds_txstat.ts_flags =
- ATH9K_TX_SW_ABORTED;
+ lastbf->bf_tx_aborted = true;
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ list_cut_position(&bf_head,
+ &txq->txq_fifo[txq->txq_tailidx],
+ &lastbf->list);
+ INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
+ } else {
+ /* remove ath_buf's of the same mpdu from txq */
+ list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
+ }
- /* remove ath_buf's of the same mpdu from txq */
- list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
txq->axq_depth--;
spin_unlock_bh(&txq->axq_lock);
if (bf_isampdu(bf))
- ath_tx_complete_aggr(sc, txq, bf, &bf_head, 0);
+ ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
else
- ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
}
spin_lock_bh(&txq->axq_lock);
@@ -1081,6 +1149,27 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
spin_unlock_bh(&txq->axq_lock);
}
}
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ spin_lock_bh(&txq->axq_lock);
+ while (!list_empty(&txq->txq_fifo_pending)) {
+ bf = list_first_entry(&txq->txq_fifo_pending,
+ struct ath_buf, list);
+ list_cut_position(&bf_head,
+ &txq->txq_fifo_pending,
+ &bf->bf_lastbf->list);
+ spin_unlock_bh(&txq->axq_lock);
+
+ if (bf_isampdu(bf))
+ ath_tx_complete_aggr(sc, txq, bf, &bf_head,
+ &ts, 0);
+ else
+ ath_tx_complete_buf(sc, bf, txq, &bf_head,
+ &ts, 0, 0);
+ spin_lock_bh(&txq->axq_lock);
+ }
+ spin_unlock_bh(&txq->axq_lock);
+ }
}
void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
@@ -1218,44 +1307,47 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
bf = list_first_entry(head, struct ath_buf, list);
- list_splice_tail_init(head, &txq->axq_q);
- txq->axq_depth++;
-
ath_print(common, ATH_DBG_QUEUE,
"qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
- if (txq->axq_link == NULL) {
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
+ list_splice_tail_init(head, &txq->txq_fifo_pending);
+ return;
+ }
+ if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
+ ath_print(common, ATH_DBG_XMIT,
+ "Initializing tx fifo %d which "
+ "is non-empty\n",
+ txq->txq_headidx);
+ INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
+ list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
+ INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
ath_print(common, ATH_DBG_XMIT,
"TXDP[%u] = %llx (%p)\n",
txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
} else {
- *txq->axq_link = bf->bf_daddr;
- ath_print(common, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n",
- txq->axq_qnum, txq->axq_link,
- ito64(bf->bf_daddr), bf->bf_desc);
- }
- txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
- ath9k_hw_txstart(ah, txq->axq_qnum);
-}
+ list_splice_tail_init(head, &txq->axq_q);
-static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
-{
- struct ath_buf *bf = NULL;
-
- spin_lock_bh(&sc->tx.txbuflock);
-
- if (unlikely(list_empty(&sc->tx.txbuf))) {
- spin_unlock_bh(&sc->tx.txbuflock);
- return NULL;
+ if (txq->axq_link == NULL) {
+ ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
+ ath_print(common, ATH_DBG_XMIT,
+ "TXDP[%u] = %llx (%p)\n",
+ txq->axq_qnum, ito64(bf->bf_daddr),
+ bf->bf_desc);
+ } else {
+ *txq->axq_link = bf->bf_daddr;
+ ath_print(common, ATH_DBG_XMIT,
+ "link[%u] (%p)=%llx (%p)\n",
+ txq->axq_qnum, txq->axq_link,
+ ito64(bf->bf_daddr), bf->bf_desc);
+ }
+ ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
+ &txq->axq_link);
+ ath9k_hw_txstart(ah, txq->axq_qnum);
}
-
- bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
- list_del(&bf->list);
-
- spin_unlock_bh(&sc->tx.txbuflock);
-
- return bf;
+ txq->axq_depth++;
}
static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
@@ -1402,8 +1494,7 @@ static void assign_aggr_tid_seqno(struct sk_buff *skb,
INCR(tid->seq_next, IEEE80211_SEQ_MAX);
}
-static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
- struct ath_txq *txq)
+static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
int flags = 0;
@@ -1414,6 +1505,9 @@ static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
flags |= ATH9K_TXDESC_NOACK;
+ if (use_ldpc)
+ flags |= ATH9K_TXDESC_LDPC;
+
return flags;
}
@@ -1432,8 +1526,9 @@ static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
/* find number of symbols: PLCP + data */
+ streams = HT_RC_2_STREAMS(rix);
nbits = (pktlen << 3) + OFDM_PLCP_BITS;
- nsymbits = bits_per_symbol[rix][width];
+ nsymbits = bits_per_symbol[rix % 8][width] * streams;
nsymbols = (nbits + nsymbits - 1) / nsymbits;
if (!half_gi)
@@ -1442,7 +1537,6 @@ static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
duration = SYMBOL_TIME_HALFGI(nsymbols);
/* addup duration for legacy/ht training and signal fields */
- streams = HT_RC_2_STREAMS(rix);
duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
return duration;
@@ -1513,6 +1607,8 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
series[i].Rate = rix | 0x80;
series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
is_40, is_sgi, is_sp);
+ if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
+ series[i].RateFlags |= ATH9K_RATESERIES_STBC;
continue;
}
@@ -1565,15 +1661,16 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
int hdrlen;
__le16 fc;
int padpos, padsize;
+ bool use_ldpc = false;
tx_info->pad[0] = 0;
switch (txctl->frame_type) {
- case ATH9K_NOT_INTERNAL:
+ case ATH9K_IFT_NOT_INTERNAL:
break;
- case ATH9K_INT_PAUSE:
+ case ATH9K_IFT_PAUSE:
tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
/* fall through */
- case ATH9K_INT_UNPAUSE:
+ case ATH9K_IFT_UNPAUSE:
tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
break;
}
@@ -1591,10 +1688,13 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
bf->bf_frmlen -= padsize;
}
- if (conf_is_ht(&hw->conf))
+ if (conf_is_ht(&hw->conf)) {
bf->bf_state.bf_type |= BUF_HT;
+ if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
+ use_ldpc = true;
+ }
- bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
+ bf->bf_flags = setup_tx_flags(skb, use_ldpc);
bf->bf_keytype = get_hw_crypto_keytype(skb);
if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
@@ -1653,8 +1753,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
list_add_tail(&bf->list, &bf_head);
ds = bf->bf_desc;
- ds->ds_link = 0;
- ds->ds_data = bf->bf_buf_addr;
+ ath9k_hw_set_desc_link(ah, ds, 0);
ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
@@ -1663,7 +1762,9 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
skb->len, /* segment length */
true, /* first segment */
true, /* last segment */
- ds); /* first descriptor */
+ ds, /* first descriptor */
+ bf->bf_buf_addr,
+ txctl->txq->axq_qnum);
spin_lock_bh(&txctl->txq->axq_lock);
@@ -1732,9 +1833,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
}
spin_unlock_bh(&txq->axq_lock);
- spin_lock_bh(&sc->tx.txbuflock);
- list_add_tail(&bf->list, &sc->tx.txbuf);
- spin_unlock_bh(&sc->tx.txbuflock);
+ ath_tx_return_buffer(sc, bf);
return r;
}
@@ -1852,9 +1951,8 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
}
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
- struct ath_txq *txq,
- struct list_head *bf_q,
- int txok, int sendbar)
+ struct ath_txq *txq, struct list_head *bf_q,
+ struct ath_tx_status *ts, int txok, int sendbar)
{
struct sk_buff *skb = bf->bf_mpdu;
unsigned long flags;
@@ -1872,7 +1970,7 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
ath_tx_complete(sc, skb, bf->aphy, tx_flags);
- ath_debug_stat_tx(sc, txq, bf);
+ ath_debug_stat_tx(sc, txq, bf, ts);
/*
* Return the list of ath_buf of this mpdu to free queue
@@ -1883,23 +1981,21 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
}
static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
- int txok)
+ struct ath_tx_status *ts, int txok)
{
- struct ath_buf *bf_last = bf->bf_lastbf;
- struct ath_desc *ds = bf_last->bf_desc;
u16 seq_st = 0;
u32 ba[WME_BA_BMP_SIZE >> 5];
int ba_index;
int nbad = 0;
int isaggr = 0;
- if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
+ if (bf->bf_tx_aborted)
return 0;
isaggr = bf_isaggr(bf);
if (isaggr) {
- seq_st = ATH_DS_BA_SEQ(ds);
- memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
+ seq_st = ts->ts_seqnum;
+ memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
}
while (bf) {
@@ -1913,7 +2009,7 @@ static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
return nbad;
}
-static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
+static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
int nbad, int txok, bool update_rc)
{
struct sk_buff *skb = bf->bf_mpdu;
@@ -1923,24 +2019,24 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
u8 i, tx_rateindex;
if (txok)
- tx_info->status.ack_signal = ds->ds_txstat.ts_rssi;
+ tx_info->status.ack_signal = ts->ts_rssi;
- tx_rateindex = ds->ds_txstat.ts_rateindex;
+ tx_rateindex = ts->ts_rateindex;
WARN_ON(tx_rateindex >= hw->max_rates);
- if (update_rc)
- tx_info->pad[0] |= ATH_TX_INFO_UPDATE_RC;
- if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
+ if (ts->ts_status & ATH9K_TXERR_FILT)
tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+ if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc)
+ tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
- if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
+ if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
(bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
if (ieee80211_is_data(hdr->frame_control)) {
- if (ds->ds_txstat.ts_flags &
+ if (ts->ts_flags &
(ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
- if ((ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY) ||
- (ds->ds_txstat.ts_status & ATH9K_TXERR_FIFO))
+ if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
+ (ts->ts_status & ATH9K_TXERR_FIFO))
tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
tx_info->status.ampdu_len = bf->bf_nframes;
tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
@@ -1978,6 +2074,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
struct ath_buf *bf, *lastbf, *bf_held = NULL;
struct list_head bf_head;
struct ath_desc *ds;
+ struct ath_tx_status ts;
int txok;
int status;
@@ -2017,7 +2114,8 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
lastbf = bf->bf_lastbf;
ds = lastbf->bf_desc;
- status = ath9k_hw_txprocdesc(ah, ds);
+ memset(&ts, 0, sizeof(ts));
+ status = ath9k_hw_txprocdesc(ah, ds, &ts);
if (status == -EINPROGRESS) {
spin_unlock_bh(&txq->axq_lock);
break;
@@ -2028,7 +2126,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
* can disable RX.
*/
if (bf->bf_isnullfunc &&
- (ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) {
+ (ts.ts_status & ATH9K_TX_ACKED)) {
if ((sc->ps_flags & PS_ENABLED))
ath9k_enable_ps(sc);
else
@@ -2047,31 +2145,30 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
&txq->axq_q, lastbf->list.prev);
txq->axq_depth--;
- txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_MASK);
+ txok = !(ts.ts_status & ATH9K_TXERR_MASK);
txq->axq_tx_inprogress = false;
+ if (bf_held)
+ list_del(&bf_held->list);
spin_unlock_bh(&txq->axq_lock);
- if (bf_held) {
- spin_lock_bh(&sc->tx.txbuflock);
- list_move_tail(&bf_held->list, &sc->tx.txbuf);
- spin_unlock_bh(&sc->tx.txbuflock);
- }
+ if (bf_held)
+ ath_tx_return_buffer(sc, bf_held);
if (!bf_isampdu(bf)) {
/*
* This frame is sent out as a single frame.
* Use hardware retry status for this frame.
*/
- bf->bf_retries = ds->ds_txstat.ts_longretry;
- if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
+ bf->bf_retries = ts.ts_longretry;
+ if (ts.ts_status & ATH9K_TXERR_XRETRY)
bf->bf_state.bf_type |= BUF_XRETRY;
- ath_tx_rc_status(bf, ds, 0, txok, true);
+ ath_tx_rc_status(bf, &ts, 0, txok, true);
}
if (bf_isampdu(bf))
- ath_tx_complete_aggr(sc, txq, bf, &bf_head, txok);
+ ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
else
- ath_tx_complete_buf(sc, bf, txq, &bf_head, txok, 0);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
ath_wake_mac80211_queue(sc, txq);
@@ -2133,10 +2230,121 @@ void ath_tx_tasklet(struct ath_softc *sc)
}
}
+void ath_tx_edma_tasklet(struct ath_softc *sc)
+{
+ struct ath_tx_status txs;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_txq *txq;
+ struct ath_buf *bf, *lastbf;
+ struct list_head bf_head;
+ int status;
+ int txok;
+
+ for (;;) {
+ status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
+ if (status == -EINPROGRESS)
+ break;
+ if (status == -EIO) {
+ ath_print(common, ATH_DBG_XMIT,
+ "Error processing tx status\n");
+ break;
+ }
+
+ /* Skip beacon completions */
+ if (txs.qid == sc->beacon.beaconq)
+ continue;
+
+ txq = &sc->tx.txq[txs.qid];
+
+ spin_lock_bh(&txq->axq_lock);
+ if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
+ spin_unlock_bh(&txq->axq_lock);
+ return;
+ }
+
+ bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
+ struct ath_buf, list);
+ lastbf = bf->bf_lastbf;
+
+ INIT_LIST_HEAD(&bf_head);
+ list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
+ &lastbf->list);
+ INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
+ txq->axq_depth--;
+ txq->axq_tx_inprogress = false;
+ spin_unlock_bh(&txq->axq_lock);
+
+ txok = !(txs.ts_status & ATH9K_TXERR_MASK);
+
+ if (!bf_isampdu(bf)) {
+ bf->bf_retries = txs.ts_longretry;
+ if (txs.ts_status & ATH9K_TXERR_XRETRY)
+ bf->bf_state.bf_type |= BUF_XRETRY;
+ ath_tx_rc_status(bf, &txs, 0, txok, true);
+ }
+
+ if (bf_isampdu(bf))
+ ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
+ else
+ ath_tx_complete_buf(sc, bf, txq, &bf_head,
+ &txs, txok, 0);
+
+ ath_wake_mac80211_queue(sc, txq);
+
+ spin_lock_bh(&txq->axq_lock);
+ if (!list_empty(&txq->txq_fifo_pending)) {
+ INIT_LIST_HEAD(&bf_head);
+ bf = list_first_entry(&txq->txq_fifo_pending,
+ struct ath_buf, list);
+ list_cut_position(&bf_head, &txq->txq_fifo_pending,
+ &bf->bf_lastbf->list);
+ ath_tx_txqaddbuf(sc, txq, &bf_head);
+ } else if (sc->sc_flags & SC_OP_TXAGGR)
+ ath_txq_schedule(sc, txq);
+ spin_unlock_bh(&txq->axq_lock);
+ }
+}
+
/*****************/
/* Init, Cleanup */
/*****************/
+static int ath_txstatus_setup(struct ath_softc *sc, int size)
+{
+ struct ath_descdma *dd = &sc->txsdma;
+ u8 txs_len = sc->sc_ah->caps.txs_len;
+
+ dd->dd_desc_len = size * txs_len;
+ dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
+ &dd->dd_desc_paddr, GFP_KERNEL);
+ if (!dd->dd_desc)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int ath_tx_edma_init(struct ath_softc *sc)
+{
+ int err;
+
+ err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
+ if (!err)
+ ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
+ sc->txsdma.dd_desc_paddr,
+ ATH_TXSTATUS_RING_SIZE);
+
+ return err;
+}
+
+static void ath_tx_edma_cleanup(struct ath_softc *sc)
+{
+ struct ath_descdma *dd = &sc->txsdma;
+
+ dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
+ dd->dd_desc_paddr);
+}
+
int ath_tx_init(struct ath_softc *sc, int nbufs)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -2145,7 +2353,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
spin_lock_init(&sc->tx.txbuflock);
error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
- "tx", nbufs, 1);
+ "tx", nbufs, 1, 1);
if (error != 0) {
ath_print(common, ATH_DBG_FATAL,
"Failed to allocate tx descriptors: %d\n", error);
@@ -2153,7 +2361,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
}
error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
- "beacon", ATH_BCBUF, 1);
+ "beacon", ATH_BCBUF, 1, 1);
if (error != 0) {
ath_print(common, ATH_DBG_FATAL,
"Failed to allocate beacon descriptors: %d\n", error);
@@ -2162,6 +2370,12 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ error = ath_tx_edma_init(sc);
+ if (error)
+ goto err;
+ }
+
err:
if (error != 0)
ath_tx_cleanup(sc);
@@ -2176,6 +2390,9 @@ void ath_tx_cleanup(struct ath_softc *sc)
if (sc->tx.txdma.dd_desc_len != 0)
ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ ath_tx_edma_cleanup(sc);
}
void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
diff --git a/drivers/net/wireless/ath/debug.h b/drivers/net/wireless/ath/debug.h
index 8263633c003..873bf526e11 100644
--- a/drivers/net/wireless/ath/debug.h
+++ b/drivers/net/wireless/ath/debug.h
@@ -59,6 +59,7 @@ enum ATH_DEBUG {
ATH_DBG_PS = 0x00000800,
ATH_DBG_HWTIMER = 0x00001000,
ATH_DBG_BTCOEX = 0x00002000,
+ ATH_DBG_WMI = 0x00004000,
ATH_DBG_ANY = 0xffffffff
};
diff --git a/drivers/net/wireless/ath/hw.c b/drivers/net/wireless/ath/hw.c
index ecc9eb01f4f..a8f81ea09f1 100644
--- a/drivers/net/wireless/ath/hw.c
+++ b/drivers/net/wireless/ath/hw.c
@@ -19,8 +19,8 @@
#include "ath.h"
#include "reg.h"
-#define REG_READ common->ops->read
-#define REG_WRITE common->ops->write
+#define REG_READ (common->ops->read)
+#define REG_WRITE (common->ops->write)
/**
* ath_hw_set_bssid_mask - filter out bssids we listen
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 00489c40be0..3f4244f56ce 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -50,6 +50,7 @@
#define ATH9K_5GHZ_ALL ATH9K_5GHZ_5150_5350, \
ATH9K_5GHZ_5470_5850
+
/* This one skips what we call "mid band" */
#define ATH9K_5GHZ_NO_MIDBAND ATH9K_5GHZ_5150_5350, \
ATH9K_5GHZ_5725_5850
@@ -332,7 +333,6 @@ static void ath_reg_apply_world_flags(struct wiphy *wiphy,
ath_reg_apply_active_scan_flags(wiphy, initiator);
break;
}
- return;
}
int ath_reg_notifier_apply(struct wiphy *wiphy,
@@ -360,7 +360,7 @@ EXPORT_SYMBOL(ath_reg_notifier_apply);
static bool ath_regd_is_eeprom_valid(struct ath_regulatory *reg)
{
- u16 rd = ath_regd_get_eepromRD(reg);
+ u16 rd = ath_regd_get_eepromRD(reg);
int i;
if (rd & COUNTRY_ERD_FLAG) {
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 3edbbcf0f54..c8f7090b27d 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -865,7 +865,6 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
/* low bit of first byte of destination tells us if broadcast */
tx_update_descriptor(priv, *(skb->data) & 0x01, len + 18, buff, TX_PACKET_TYPE_DATA);
- dev->trans_start = jiffies;
dev->stats.tx_bytes += len;
spin_unlock_irqrestore(&priv->irqlock, flags);
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c
index 32407911842..c2746fc7f2b 100644
--- a/drivers/net/wireless/atmel_cs.c
+++ b/drivers/net/wireless/atmel_cs.c
@@ -85,41 +85,7 @@ static void atmel_release(struct pcmcia_device *link);
static void atmel_detach(struct pcmcia_device *p_dev);
-/*
- You'll also need to prototype all the functions that will actually
- be used to talk to your device. See 'pcmem_cs' for a good example
- of a fully self-sufficient driver; the other drivers rely more or
- less on other parts of the kernel.
-*/
-
-/*
- A linked list of "instances" of the atmelnet device. Each actual
- PCMCIA card corresponds to one device instance, and is described
- by one struct pcmcia_device structure (defined in ds.h).
-
- You may not want to use a linked list for this -- for example, the
- memory card driver uses an array of struct pcmcia_device pointers, where minor
- device numbers are used to derive the corresponding array index.
-*/
-
-/*
- A driver needs to provide a dev_node_t structure for each device
- on a card. In some cases, there is only one device per card (for
- example, ethernet cards, modems). In other cases, there may be
- many actual or logical devices (SCSI adapters, memory cards with
- multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a struct pcmcia_device
- structure. We allocate them in the card's private data structure,
- because they generally shouldn't be allocated dynamically.
-
- In this case, we also provide a flag to indicate if a device is
- "stopped" due to a power management event, or card ejection. The
- device IO routines can use a flag like this to throttle IO to a
- card that is not ready to accept it.
-*/
-
typedef struct local_info_t {
- dev_node_t node;
struct net_device *eth_dev;
} local_info_t;
@@ -141,10 +107,6 @@ static int atmel_probe(struct pcmcia_device *p_dev)
dev_dbg(&p_dev->dev, "atmel_attach()\n");
- /* Interrupt setup */
- p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- p_dev->irq.Handler = NULL;
-
/*
General socket configuration defaults can go here. In this
client, we assume very little, and rely on the CIS for almost
@@ -226,9 +188,7 @@ static int atmel_config_check(struct pcmcia_device *p_dev,
else if (dflt->vpp1.present & (1<<CISTPL_POWER_VNOM))
p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM]/10000;
- /* Do we need to allocate an interrupt? */
- if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
+ p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -278,15 +238,9 @@ static int atmel_config(struct pcmcia_device *link)
if (pcmcia_loop_config(link, atmel_config_check, NULL))
goto failed;
- /*
- Allocate an interrupt line. Note that this does not assign a
- handler to the interrupt, unless the 'Handler' member of the
- irq structure is initialized.
- */
- if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
- goto failed;
+ if (!link->irq) {
+ dev_err(&link->dev, "atmel: cannot assign IRQ: check that CONFIG_ISA is set in kernel config.");
+ goto failed;
}
/*
@@ -298,14 +252,8 @@ static int atmel_config(struct pcmcia_device *link)
if (ret)
goto failed;
- if (link->irq.AssignedIRQ == 0) {
- printk(KERN_ALERT
- "atmel: cannot assign IRQ: check that CONFIG_ISA is set in kernel config.");
- goto failed;
- }
-
((local_info_t*)link->priv)->eth_dev =
- init_atmel_card(link->irq.AssignedIRQ,
+ init_atmel_card(link->irq,
link->io.BasePort1,
did ? did->driver_info : ATMEL_FW_TYPE_NONE,
&link->dev,
@@ -315,14 +263,6 @@ static int atmel_config(struct pcmcia_device *link)
goto failed;
- /*
- At this point, the dev_node_t structure(s) need to be
- initialized and arranged in a linked list at link->dev_node.
- */
- strcpy(dev->node.dev_name, ((local_info_t*)link->priv)->eth_dev->name );
- dev->node.major = dev->node.minor = 0;
- link->dev_node = &dev->node;
-
return 0;
failed:
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index b8807fb12c9..3a003e6803a 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -104,6 +104,7 @@
#define B43_MMIO_MACFILTER_CONTROL 0x420
#define B43_MMIO_MACFILTER_DATA 0x422
#define B43_MMIO_RCMTA_COUNT 0x43C
+#define B43_MMIO_PSM_PHY_HDR 0x492
#define B43_MMIO_RADIO_HWENABLED_LO 0x49A
#define B43_MMIO_GPIO_CONTROL 0x49C
#define B43_MMIO_GPIO_MASK 0x49E
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 9a374ef83a2..7965b70efba 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4349,11 +4349,10 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
b43_set_phytxctl_defaults(dev);
/* Minimum Contention Window */
- if (phy->type == B43_PHYTYPE_B) {
+ if (phy->type == B43_PHYTYPE_B)
b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MINCONT, 0x1F);
- } else {
+ else
b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MINCONT, 0xF);
- }
/* Maximum Contention Window */
b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MAXCONT, 0x3FF);
@@ -4572,6 +4571,23 @@ static void b43_op_sw_scan_complete_notifier(struct ieee80211_hw *hw)
mutex_unlock(&wl->mutex);
}
+static int b43_op_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct b43_wl *wl = hw_to_b43_wl(hw);
+ struct b43_wldev *dev = wl->current_dev;
+ struct ieee80211_conf *conf = &hw->conf;
+
+ if (idx != 0)
+ return -ENOENT;
+
+ survey->channel = conf->channel;
+ survey->filled = SURVEY_INFO_NOISE_DBM;
+ survey->noise = dev->stats.link_noise;
+
+ return 0;
+}
+
static const struct ieee80211_ops b43_hw_ops = {
.tx = b43_op_tx,
.conf_tx = b43_op_conf_tx,
@@ -4591,6 +4607,7 @@ static const struct ieee80211_ops b43_hw_ops = {
.sta_notify = b43_op_sta_notify,
.sw_scan_start = b43_op_sw_scan_start_notifier,
.sw_scan_complete = b43_op_sw_scan_complete_notifier,
+ .get_survey = b43_op_get_survey,
.rfkill_poll = b43_rfkill_poll,
};
@@ -4906,8 +4923,7 @@ static int b43_wireless_init(struct ssb_device *dev)
/* fill hw info */
hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_NOISE_DBM;
+ IEEE80211_HW_SIGNAL_DBM;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
diff --git a/drivers/net/wireless/b43/pcmcia.c b/drivers/net/wireless/b43/pcmcia.c
index 609e7051e01..0e99b634267 100644
--- a/drivers/net/wireless/b43/pcmcia.c
+++ b/drivers/net/wireless/b43/pcmcia.c
@@ -98,10 +98,7 @@ static int __devinit b43_pcmcia_probe(struct pcmcia_device *dev)
if (res != 0)
goto err_disable;
- dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- dev->irq.Handler = NULL; /* The handler is registered later. */
- res = pcmcia_request_irq(dev, &dev->irq);
- if (res != 0)
+ if (!dev->irq)
goto err_disable;
res = pcmcia_request_configuration(dev, &dev->conf);
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 9c7cd282e46..3d6b3377596 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -73,6 +73,22 @@ static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
u16 value, u8 core, bool off);
static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
u16 value, u8 core);
+static int nphy_channel_switch(struct b43_wldev *dev, unsigned int channel);
+
+static inline bool b43_empty_chanspec(struct b43_chanspec *chanspec)
+{
+ return !chanspec->channel && !chanspec->sideband &&
+ !chanspec->b_width && !chanspec->b_freq;
+}
+
+static inline bool b43_eq_chanspecs(struct b43_chanspec *chanspec1,
+ struct b43_chanspec *chanspec2)
+{
+ return (chanspec1->channel == chanspec2->channel &&
+ chanspec1->sideband == chanspec2->sideband &&
+ chanspec1->b_width == chanspec2->b_width &&
+ chanspec1->b_freq == chanspec2->b_freq);
+}
void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
{//TODO
@@ -89,34 +105,44 @@ static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev,
}
static void b43_chantab_radio_upload(struct b43_wldev *dev,
- const struct b43_nphy_channeltab_entry *e)
-{
- b43_radio_write16(dev, B2055_PLL_REF, e->radio_pll_ref);
- b43_radio_write16(dev, B2055_RF_PLLMOD0, e->radio_rf_pllmod0);
- b43_radio_write16(dev, B2055_RF_PLLMOD1, e->radio_rf_pllmod1);
- b43_radio_write16(dev, B2055_VCO_CAPTAIL, e->radio_vco_captail);
- b43_radio_write16(dev, B2055_VCO_CAL1, e->radio_vco_cal1);
- b43_radio_write16(dev, B2055_VCO_CAL2, e->radio_vco_cal2);
- b43_radio_write16(dev, B2055_PLL_LFC1, e->radio_pll_lfc1);
- b43_radio_write16(dev, B2055_PLL_LFR1, e->radio_pll_lfr1);
- b43_radio_write16(dev, B2055_PLL_LFC2, e->radio_pll_lfc2);
- b43_radio_write16(dev, B2055_LGBUF_CENBUF, e->radio_lgbuf_cenbuf);
- b43_radio_write16(dev, B2055_LGEN_TUNE1, e->radio_lgen_tune1);
- b43_radio_write16(dev, B2055_LGEN_TUNE2, e->radio_lgen_tune2);
- b43_radio_write16(dev, B2055_C1_LGBUF_ATUNE, e->radio_c1_lgbuf_atune);
- b43_radio_write16(dev, B2055_C1_LGBUF_GTUNE, e->radio_c1_lgbuf_gtune);
- b43_radio_write16(dev, B2055_C1_RX_RFR1, e->radio_c1_rx_rfr1);
- b43_radio_write16(dev, B2055_C1_TX_PGAPADTN, e->radio_c1_tx_pgapadtn);
- b43_radio_write16(dev, B2055_C1_TX_MXBGTRIM, e->radio_c1_tx_mxbgtrim);
- b43_radio_write16(dev, B2055_C2_LGBUF_ATUNE, e->radio_c2_lgbuf_atune);
- b43_radio_write16(dev, B2055_C2_LGBUF_GTUNE, e->radio_c2_lgbuf_gtune);
- b43_radio_write16(dev, B2055_C2_RX_RFR1, e->radio_c2_rx_rfr1);
- b43_radio_write16(dev, B2055_C2_TX_PGAPADTN, e->radio_c2_tx_pgapadtn);
- b43_radio_write16(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim);
+ const struct b43_nphy_channeltab_entry_rev2 *e)
+{
+ b43_radio_write(dev, B2055_PLL_REF, e->radio_pll_ref);
+ b43_radio_write(dev, B2055_RF_PLLMOD0, e->radio_rf_pllmod0);
+ b43_radio_write(dev, B2055_RF_PLLMOD1, e->radio_rf_pllmod1);
+ b43_radio_write(dev, B2055_VCO_CAPTAIL, e->radio_vco_captail);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+
+ b43_radio_write(dev, B2055_VCO_CAL1, e->radio_vco_cal1);
+ b43_radio_write(dev, B2055_VCO_CAL2, e->radio_vco_cal2);
+ b43_radio_write(dev, B2055_PLL_LFC1, e->radio_pll_lfc1);
+ b43_radio_write(dev, B2055_PLL_LFR1, e->radio_pll_lfr1);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+
+ b43_radio_write(dev, B2055_PLL_LFC2, e->radio_pll_lfc2);
+ b43_radio_write(dev, B2055_LGBUF_CENBUF, e->radio_lgbuf_cenbuf);
+ b43_radio_write(dev, B2055_LGEN_TUNE1, e->radio_lgen_tune1);
+ b43_radio_write(dev, B2055_LGEN_TUNE2, e->radio_lgen_tune2);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+
+ b43_radio_write(dev, B2055_C1_LGBUF_ATUNE, e->radio_c1_lgbuf_atune);
+ b43_radio_write(dev, B2055_C1_LGBUF_GTUNE, e->radio_c1_lgbuf_gtune);
+ b43_radio_write(dev, B2055_C1_RX_RFR1, e->radio_c1_rx_rfr1);
+ b43_radio_write(dev, B2055_C1_TX_PGAPADTN, e->radio_c1_tx_pgapadtn);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+
+ b43_radio_write(dev, B2055_C1_TX_MXBGTRIM, e->radio_c1_tx_mxbgtrim);
+ b43_radio_write(dev, B2055_C2_LGBUF_ATUNE, e->radio_c2_lgbuf_atune);
+ b43_radio_write(dev, B2055_C2_LGBUF_GTUNE, e->radio_c2_lgbuf_gtune);
+ b43_radio_write(dev, B2055_C2_RX_RFR1, e->radio_c2_rx_rfr1);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+
+ b43_radio_write(dev, B2055_C2_TX_PGAPADTN, e->radio_c2_tx_pgapadtn);
+ b43_radio_write(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim);
}
static void b43_chantab_phy_upload(struct b43_wldev *dev,
- const struct b43_nphy_channeltab_entry *e)
+ const struct b43_phy_n_sfo_cfg *e)
{
b43_phy_write(dev, B43_NPHY_BW1A, e->phy_bw1a);
b43_phy_write(dev, B43_NPHY_BW2, e->phy_bw2);
@@ -131,34 +157,20 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
//TODO
}
-/* Tune the hardware to a new channel. */
-static int nphy_channel_switch(struct b43_wldev *dev, unsigned int channel)
-{
- const struct b43_nphy_channeltab_entry *tabent;
- tabent = b43_nphy_get_chantabent(dev, channel);
- if (!tabent)
- return -ESRCH;
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2055Setup */
+static void b43_radio_2055_setup(struct b43_wldev *dev,
+ const struct b43_nphy_channeltab_entry_rev2 *e)
+{
+ B43_WARN_ON(dev->phy.rev >= 3);
- //FIXME enable/disable band select upper20 in RXCTL
- if (0 /*FIXME 5Ghz*/)
- b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, 0x20);
- else
- b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, 0x50);
- b43_chantab_radio_upload(dev, tabent);
+ b43_chantab_radio_upload(dev, e);
udelay(50);
- b43_radio_write16(dev, B2055_VCO_CAL10, 5);
- b43_radio_write16(dev, B2055_VCO_CAL10, 45);
- b43_radio_write16(dev, B2055_VCO_CAL10, 65);
+ b43_radio_write(dev, B2055_VCO_CAL10, 0x05);
+ b43_radio_write(dev, B2055_VCO_CAL10, 0x45);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+ b43_radio_write(dev, B2055_VCO_CAL10, 0x65);
udelay(300);
- if (0 /*FIXME 5Ghz*/)
- b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ);
- else
- b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
- b43_chantab_phy_upload(dev, tabent);
- b43_nphy_tx_power_fix(dev);
-
- return 0;
}
static void b43_radio_init2055_pre(struct b43_wldev *dev)
@@ -174,52 +186,64 @@ static void b43_radio_init2055_pre(struct b43_wldev *dev)
static void b43_radio_init2055_post(struct b43_wldev *dev)
{
+ struct b43_phy_n *nphy = dev->phy.n;
struct ssb_sprom *sprom = &(dev->dev->bus->sprom);
struct ssb_boardinfo *binfo = &(dev->dev->bus->boardinfo);
int i;
u16 val;
+ bool workaround = false;
+
+ if (sprom->revision < 4)
+ workaround = (binfo->vendor != PCI_VENDOR_ID_BROADCOM ||
+ binfo->type != 0x46D ||
+ binfo->rev < 0x41);
+ else
+ workaround = ((sprom->boardflags_hi & B43_BFH_NOPA) == 0);
b43_radio_mask(dev, B2055_MASTER1, 0xFFF3);
- msleep(1);
- if ((sprom->revision != 4) ||
- !(sprom->boardflags_hi & B43_BFH_RSSIINV)) {
- if ((binfo->vendor != PCI_VENDOR_ID_BROADCOM) ||
- (binfo->type != 0x46D) ||
- (binfo->rev < 0x41)) {
- b43_radio_mask(dev, B2055_C1_RX_BB_REG, 0x7F);
- b43_radio_mask(dev, B2055_C1_RX_BB_REG, 0x7F);
- msleep(1);
- }
+ if (workaround) {
+ b43_radio_mask(dev, B2055_C1_RX_BB_REG, 0x7F);
+ b43_radio_mask(dev, B2055_C2_RX_BB_REG, 0x7F);
}
- b43_radio_maskset(dev, B2055_RRCCAL_NOPTSEL, 0x3F, 0x2C);
- msleep(1);
- b43_radio_write16(dev, B2055_CAL_MISC, 0x3C);
- msleep(1);
+ b43_radio_maskset(dev, B2055_RRCCAL_NOPTSEL, 0xFFC0, 0x2C);
+ b43_radio_write(dev, B2055_CAL_MISC, 0x3C);
b43_radio_mask(dev, B2055_CAL_MISC, 0xFFBE);
- msleep(1);
b43_radio_set(dev, B2055_CAL_LPOCTL, 0x80);
- msleep(1);
b43_radio_set(dev, B2055_CAL_MISC, 0x1);
msleep(1);
b43_radio_set(dev, B2055_CAL_MISC, 0x40);
- msleep(1);
- for (i = 0; i < 100; i++) {
- val = b43_radio_read16(dev, B2055_CAL_COUT2);
- if (val & 0x80)
+ for (i = 0; i < 200; i++) {
+ val = b43_radio_read(dev, B2055_CAL_COUT2);
+ if (val & 0x80) {
+ i = 0;
break;
+ }
udelay(10);
}
- msleep(1);
+ if (i)
+ b43err(dev->wl, "radio post init timeout\n");
b43_radio_mask(dev, B2055_CAL_LPOCTL, 0xFF7F);
- msleep(1);
nphy_channel_switch(dev, dev->phy.channel);
- b43_radio_write16(dev, B2055_C1_RX_BB_LPF, 0x9);
- b43_radio_write16(dev, B2055_C2_RX_BB_LPF, 0x9);
- b43_radio_write16(dev, B2055_C1_RX_BB_MIDACHP, 0x83);
- b43_radio_write16(dev, B2055_C2_RX_BB_MIDACHP, 0x83);
+ b43_radio_write(dev, B2055_C1_RX_BB_LPF, 0x9);
+ b43_radio_write(dev, B2055_C2_RX_BB_LPF, 0x9);
+ b43_radio_write(dev, B2055_C1_RX_BB_MIDACHP, 0x83);
+ b43_radio_write(dev, B2055_C2_RX_BB_MIDACHP, 0x83);
+ b43_radio_maskset(dev, B2055_C1_LNA_GAINBST, 0xFFF8, 0x6);
+ b43_radio_maskset(dev, B2055_C2_LNA_GAINBST, 0xFFF8, 0x6);
+ if (!nphy->gain_boost) {
+ b43_radio_set(dev, B2055_C1_RX_RFSPC1, 0x2);
+ b43_radio_set(dev, B2055_C2_RX_RFSPC1, 0x2);
+ } else {
+ b43_radio_mask(dev, B2055_C1_RX_RFSPC1, 0xFFFD);
+ b43_radio_mask(dev, B2055_C2_RX_RFSPC1, 0xFFFD);
+ }
+ udelay(2);
}
-/* Initialize a Broadcom 2055 N-radio */
+/*
+ * Initialize a Broadcom 2055 N-radio
+ * http://bcm-v4.sipsolutions.net/802.11/Radio/2055/Init
+ */
static void b43_radio_init2055(struct b43_wldev *dev)
{
b43_radio_init2055_pre(dev);
@@ -230,16 +254,15 @@ static void b43_radio_init2055(struct b43_wldev *dev)
b43_radio_init2055_post(dev);
}
-void b43_nphy_radio_turn_on(struct b43_wldev *dev)
+/*
+ * Initialize a Broadcom 2056 N-radio
+ * http://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init
+ */
+static void b43_radio_init2056(struct b43_wldev *dev)
{
- b43_radio_init2055(dev);
+ /* TODO */
}
-void b43_nphy_radio_turn_off(struct b43_wldev *dev)
-{
- b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
- ~B43_NPHY_RFCTL_CMD_EN);
-}
/*
* Upload the N-PHY tables.
@@ -647,6 +670,41 @@ static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st)
clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES);
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SuperSwitchInit */
+static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init)
+{
+ if (dev->phy.rev >= 3) {
+ if (!init)
+ return;
+ if (0 /* FIXME */) {
+ b43_ntab_write(dev, B43_NTAB16(9, 2), 0x211);
+ b43_ntab_write(dev, B43_NTAB16(9, 3), 0x222);
+ b43_ntab_write(dev, B43_NTAB16(9, 8), 0x144);
+ b43_ntab_write(dev, B43_NTAB16(9, 12), 0x188);
+ }
+ } else {
+ b43_phy_write(dev, B43_NPHY_GPIO_LOOEN, 0);
+ b43_phy_write(dev, B43_NPHY_GPIO_HIOEN, 0);
+
+ ssb_chipco_gpio_control(&dev->dev->bus->chipco, 0xFC00,
+ 0xFC00);
+ b43_write32(dev, B43_MMIO_MACCTL,
+ b43_read32(dev, B43_MMIO_MACCTL) &
+ ~B43_MACCTL_GPOUTSMSK);
+ b43_write16(dev, B43_MMIO_GPIO_MASK,
+ b43_read16(dev, B43_MMIO_GPIO_MASK) | 0xFC00);
+ b43_write16(dev, B43_MMIO_GPIO_CONTROL,
+ b43_read16(dev, B43_MMIO_GPIO_CONTROL) & ~0xFC00);
+
+ if (init) {
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
+ }
+ }
+}
+
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */
static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val)
{
@@ -723,7 +781,7 @@ static void b43_nphy_spur_workaround(struct b43_wldev *dev)
{
struct b43_phy_n *nphy = dev->phy.n;
- unsigned int channel;
+ u8 channel = nphy->radio_chanspec.channel;
int tone[2] = { 57, 58 };
u32 noise[2] = { 0x3FF, 0x3FF };
@@ -732,8 +790,6 @@ static void b43_nphy_spur_workaround(struct b43_wldev *dev)
if (nphy->hang_avoid)
b43_nphy_stay_in_carrier_search(dev, 1);
- /* FIXME: channel = radio_chanspec */
-
if (nphy->gband_spurwar_en) {
/* TODO: N PHY Adjust Analog Pfbw (7) */
if (channel == 11 && dev->phy.is_40mhz)
@@ -779,6 +835,62 @@ static void b43_nphy_spur_workaround(struct b43_wldev *dev)
b43_nphy_stay_in_carrier_search(dev, 0);
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/AdjustLnaGainTbl */
+static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u8 i;
+ s16 tmp;
+ u16 data[4];
+ s16 gain[2];
+ u16 minmax[2];
+ u16 lna_gain[4] = { -2, 10, 19, 25 };
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ if (nphy->gain_boost) {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ gain[0] = 6;
+ gain[1] = 6;
+ } else {
+ tmp = 40370 - 315 * nphy->radio_chanspec.channel;
+ gain[0] = ((tmp >> 13) + ((tmp >> 12) & 1));
+ tmp = 23242 - 224 * nphy->radio_chanspec.channel;
+ gain[1] = ((tmp >> 13) + ((tmp >> 12) & 1));
+ }
+ } else {
+ gain[0] = 0;
+ gain[1] = 0;
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (nphy->elna_gain_config) {
+ data[0] = 19 + gain[i];
+ data[1] = 25 + gain[i];
+ data[2] = 25 + gain[i];
+ data[3] = 25 + gain[i];
+ } else {
+ data[0] = lna_gain[0] + gain[i];
+ data[1] = lna_gain[1] + gain[i];
+ data[2] = lna_gain[2] + gain[i];
+ data[3] = lna_gain[3] + gain[i];
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB16(10, 8), 4, data);
+
+ minmax[i] = 23 + gain[i];
+ }
+
+ b43_phy_maskset(dev, B43_NPHY_C1_MINMAX_GAIN, ~B43_NPHY_C1_MINGAIN,
+ minmax[0] << B43_NPHY_C1_MINGAIN_SHIFT);
+ b43_phy_maskset(dev, B43_NPHY_C2_MINMAX_GAIN, ~B43_NPHY_C2_MINGAIN,
+ minmax[1] << B43_NPHY_C2_MINGAIN_SHIFT);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
static void b43_nphy_gain_crtl_workarounds(struct b43_wldev *dev)
{
@@ -863,7 +975,7 @@ static void b43_nphy_gain_crtl_workarounds(struct b43_wldev *dev)
b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
(code << 8 | 0x7C));
- /* TODO: b43_nphy_adjust_lna_gain_table(dev); */
+ b43_nphy_adjust_lna_gain_table(dev);
if (nphy->elna_gain_config) {
b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0808);
@@ -1970,12 +2082,12 @@ static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev)
u16 *rssical_phy_regs = NULL;
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
- if (!nphy->rssical_chanspec_2G)
+ if (b43_empty_chanspec(&nphy->rssical_chanspec_2G))
return;
rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G;
rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G;
} else {
- if (!nphy->rssical_chanspec_5G)
+ if (b43_empty_chanspec(&nphy->rssical_chanspec_5G))
return;
rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_5G;
rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G;
@@ -2395,7 +2507,7 @@ static void b43_nphy_save_cal(struct b43_wldev *dev)
struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
u16 *txcal_radio_regs = NULL;
- u8 *iqcal_chanspec;
+ struct b43_chanspec *iqcal_chanspec;
u16 *table = NULL;
if (nphy->hang_avoid)
@@ -2451,12 +2563,12 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev)
struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
- if (nphy->iqcal_chanspec_2G == 0)
+ if (b43_empty_chanspec(&nphy->iqcal_chanspec_2G))
return;
table = nphy->cal_cache.txcal_coeffs_2G;
loft = &nphy->cal_cache.txcal_coeffs_2G[5];
} else {
- if (nphy->iqcal_chanspec_5G == 0)
+ if (b43_empty_chanspec(&nphy->iqcal_chanspec_5G))
return;
table = nphy->cal_cache.txcal_coeffs_5G;
loft = &nphy->cal_cache.txcal_coeffs_5G[5];
@@ -2689,7 +2801,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
}
b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4,
buffer);
- b43_ntab_write_bulk(dev, B43_NTAB16(15, 101), 2,
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 101), 2,
buffer);
b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2,
buffer);
@@ -2701,8 +2813,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length,
nphy->txiqlocal_bestc);
nphy->txiqlocal_coeffsvalid = true;
- /* TODO: Set nphy->txiqlocal_chanspec to
- the current channel */
+ nphy->txiqlocal_chanspec = nphy->radio_chanspec;
} else {
length = 11;
if (dev->phy.rev < 3)
@@ -2737,7 +2848,8 @@ static void b43_nphy_reapply_tx_cal_coeffs(struct b43_wldev *dev)
u16 buffer[7];
bool equal = true;
- if (!nphy->txiqlocal_coeffsvalid || 1 /* FIXME */)
+ if (!nphy->txiqlocal_coeffsvalid ||
+ b43_eq_chanspecs(&nphy->txiqlocal_chanspec, &nphy->radio_chanspec))
return;
b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer);
@@ -3092,9 +3204,11 @@ int b43_phy_initn(struct b43_wldev *dev)
do_rssi_cal = false;
if (phy->rev >= 3) {
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
- do_rssi_cal = (nphy->rssical_chanspec_2G == 0);
+ do_rssi_cal =
+ b43_empty_chanspec(&nphy->rssical_chanspec_2G);
else
- do_rssi_cal = (nphy->rssical_chanspec_5G == 0);
+ do_rssi_cal =
+ b43_empty_chanspec(&nphy->rssical_chanspec_5G);
if (do_rssi_cal)
b43_nphy_rssi_cal(dev);
@@ -3106,9 +3220,9 @@ int b43_phy_initn(struct b43_wldev *dev)
if (!((nphy->measure_hold & 0x6) != 0)) {
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
- do_cal = (nphy->iqcal_chanspec_2G == 0);
+ do_cal = b43_empty_chanspec(&nphy->iqcal_chanspec_2G);
else
- do_cal = (nphy->iqcal_chanspec_5G == 0);
+ do_cal = b43_empty_chanspec(&nphy->iqcal_chanspec_5G);
if (nphy->mute)
do_cal = false;
@@ -3117,7 +3231,7 @@ int b43_phy_initn(struct b43_wldev *dev)
target = b43_nphy_get_tx_gains(dev);
if (nphy->antsel_type == 2)
- ;/*TODO NPHY Superswitch Init with argument 1*/
+ b43_nphy_superswitch_init(dev, true);
if (nphy->perical != 2) {
b43_nphy_rssi_cal(dev);
if (phy->rev >= 3) {
@@ -3155,6 +3269,133 @@ int b43_phy_initn(struct b43_wldev *dev)
return 0;
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */
+static void b43_nphy_chanspec_setup(struct b43_wldev *dev,
+ const struct b43_phy_n_sfo_cfg *e,
+ struct b43_chanspec chanspec)
+{
+ struct b43_phy *phy = &dev->phy;
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u16 tmp;
+ u32 tmp32;
+
+ tmp = b43_phy_read(dev, B43_NPHY_BANDCTL) & B43_NPHY_BANDCTL_5GHZ;
+ if (chanspec.b_freq == 1 && tmp == 0) {
+ tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR);
+ b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4);
+ b43_phy_set(dev, B43_PHY_B_BBCFG, 0xC000);
+ b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32);
+ b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ);
+ } else if (chanspec.b_freq == 1) {
+ b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
+ tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR);
+ b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4);
+ b43_phy_mask(dev, B43_PHY_B_BBCFG, (u16)~0xC000);
+ b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32);
+ }
+
+ b43_chantab_phy_upload(dev, e);
+
+ tmp = chanspec.channel;
+ if (chanspec.b_freq == 1)
+ tmp |= 0x0100;
+ if (chanspec.b_width == 3)
+ tmp |= 0x0200;
+ b43_shm_write16(dev, B43_SHM_SHARED, 0xA0, tmp);
+
+ if (nphy->radio_chanspec.channel == 14) {
+ b43_nphy_classifier(dev, 2, 0);
+ b43_phy_set(dev, B43_PHY_B_TEST, 0x0800);
+ } else {
+ b43_nphy_classifier(dev, 2, 2);
+ if (chanspec.b_freq == 2)
+ b43_phy_mask(dev, B43_PHY_B_TEST, ~0x840);
+ }
+
+ if (nphy->txpwrctrl)
+ b43_nphy_tx_power_fix(dev);
+
+ if (dev->phy.rev < 3)
+ b43_nphy_adjust_lna_gain_table(dev);
+
+ b43_nphy_tx_lp_fbw(dev);
+
+ if (dev->phy.rev >= 3 && 0) {
+ /* TODO */
+ }
+
+ b43_phy_write(dev, B43_NPHY_NDATAT_DUP40, 0x3830);
+
+ if (phy->rev >= 3)
+ b43_nphy_spur_workaround(dev);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetChanspec */
+static int b43_nphy_set_chanspec(struct b43_wldev *dev,
+ struct b43_chanspec chanspec)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ const struct b43_nphy_channeltab_entry_rev2 *tabent_r2;
+ const struct b43_nphy_channeltab_entry_rev3 *tabent_r3;
+
+ u8 tmp;
+ u8 channel = chanspec.channel;
+
+ if (dev->phy.rev >= 3) {
+ /* TODO */
+ tabent_r3 = NULL;
+ if (!tabent_r3)
+ return -ESRCH;
+ } else {
+ tabent_r2 = b43_nphy_get_chantabent_rev2(dev, channel);
+ if (!tabent_r2)
+ return -ESRCH;
+ }
+
+ nphy->radio_chanspec = chanspec;
+
+ if (chanspec.b_width != nphy->b_width)
+ ; /* TODO: BMAC BW Set (chanspec.b_width) */
+
+ /* TODO: use defines */
+ if (chanspec.b_width == 3) {
+ if (chanspec.sideband == 2)
+ b43_phy_set(dev, B43_NPHY_RXCTL,
+ B43_NPHY_RXCTL_BSELU20);
+ else
+ b43_phy_mask(dev, B43_NPHY_RXCTL,
+ ~B43_NPHY_RXCTL_BSELU20);
+ }
+
+ if (dev->phy.rev >= 3) {
+ tmp = (chanspec.b_freq == 1) ? 4 : 0;
+ b43_radio_maskset(dev, 0x08, 0xFFFB, tmp);
+ /* TODO: PHY Radio2056 Setup (dev, tabent_r3); */
+ b43_nphy_chanspec_setup(dev, &(tabent_r3->phy_regs), chanspec);
+ } else {
+ tmp = (chanspec.b_freq == 1) ? 0x0020 : 0x0050;
+ b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, tmp);
+ b43_radio_2055_setup(dev, tabent_r2);
+ b43_nphy_chanspec_setup(dev, &(tabent_r2->phy_regs), chanspec);
+ }
+
+ return 0;
+}
+
+/* Tune the hardware to a new channel */
+static int nphy_channel_switch(struct b43_wldev *dev, unsigned int channel)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ struct b43_chanspec chanspec;
+ chanspec = nphy->radio_chanspec;
+ chanspec.channel = channel;
+
+ return b43_nphy_set_chanspec(dev, chanspec);
+}
+
static int b43_nphy_op_allocate(struct b43_wldev *dev)
{
struct b43_phy_n *nphy;
@@ -3243,9 +3484,43 @@ static void b43_nphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value);
}
+/* http://bcm-v4.sipsolutions.net/802.11/Radio/Switch%20Radio */
static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
bool blocked)
-{//TODO
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ if (b43_read32(dev, B43_MMIO_MACCTL) & B43_MACCTL_ENABLED)
+ b43err(dev->wl, "MAC not suspended\n");
+
+ if (blocked) {
+ b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
+ ~B43_NPHY_RFCTL_CMD_CHIP0PU);
+ if (dev->phy.rev >= 3) {
+ b43_radio_mask(dev, 0x09, ~0x2);
+
+ b43_radio_write(dev, 0x204D, 0);
+ b43_radio_write(dev, 0x2053, 0);
+ b43_radio_write(dev, 0x2058, 0);
+ b43_radio_write(dev, 0x205E, 0);
+ b43_radio_mask(dev, 0x2062, ~0xF0);
+ b43_radio_write(dev, 0x2064, 0);
+
+ b43_radio_write(dev, 0x304D, 0);
+ b43_radio_write(dev, 0x3053, 0);
+ b43_radio_write(dev, 0x3058, 0);
+ b43_radio_write(dev, 0x305E, 0);
+ b43_radio_mask(dev, 0x3062, ~0xF0);
+ b43_radio_write(dev, 0x3064, 0);
+ }
+ } else {
+ if (dev->phy.rev >= 3) {
+ b43_radio_init2056(dev);
+ b43_nphy_set_chanspec(dev, nphy->radio_chanspec);
+ } else {
+ b43_radio_init2055(dev);
+ }
+ }
}
static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on)
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index 403aad3f894..8b6d570dd0a 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -711,6 +711,8 @@
#define B43_NPHY_PAPD_EN1 B43_PHY_N(0x29B) /* PAPD Enable1 TBD */
#define B43_NPHY_EPS_TABLE_ADJ1 B43_PHY_N(0x29C) /* EPS Table Adj1 TBD */
+#define B43_PHY_B_BBCFG B43_PHY_N_BMODE(0x001) /* BB config */
+#define B43_PHY_B_TEST B43_PHY_N_BMODE(0x00A)
/* Broadcom 2055 radio registers */
@@ -924,6 +926,13 @@
struct b43_wldev;
+struct b43_chanspec {
+ u8 channel;
+ u8 sideband;
+ u8 b_width;
+ u8 b_freq;
+};
+
struct b43_phy_n_iq_comp {
s16 a0;
s16 b0;
@@ -975,7 +984,8 @@ struct b43_phy_n {
u16 papd_epsilon_offset[2];
s32 preamble_override;
u32 bb_mult_save;
- u16 radio_chanspec;
+ u8 b_width;
+ struct b43_chanspec radio_chanspec;
bool gain_boost;
bool elna_gain_config;
@@ -991,6 +1001,7 @@ struct b43_phy_n {
u16 txiqlocal_bestc[11];
bool txiqlocal_coeffsvalid;
struct b43_phy_n_txpwrindex txpwrindex[2];
+ struct b43_chanspec txiqlocal_chanspec;
u8 txrx_chain;
u16 tx_rx_cal_phy_saveregs[11];
@@ -1006,12 +1017,12 @@ struct b43_phy_n {
bool gband_spurwar_en;
bool ipa2g_on;
- u8 iqcal_chanspec_2G;
- u8 rssical_chanspec_2G;
+ struct b43_chanspec iqcal_chanspec_2G;
+ struct b43_chanspec rssical_chanspec_2G;
bool ipa5g_on;
- u8 iqcal_chanspec_5G;
- u8 rssical_chanspec_5G;
+ struct b43_chanspec iqcal_chanspec_5G;
+ struct b43_chanspec rssical_chanspec_5G;
struct b43_phy_n_rssical_cache rssical_cache;
struct b43_phy_n_cal_cache cal_cache;
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index a00d509150f..d96e870ab8f 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -318,14 +318,14 @@ void b2055_upload_inittab(struct b43_wldev *dev,
.radio_c2_tx_mxbgtrim = r21
#define PHYREGS(r0, r1, r2, r3, r4, r5) \
- .phy_bw1a = r0, \
- .phy_bw2 = r1, \
- .phy_bw3 = r2, \
- .phy_bw4 = r3, \
- .phy_bw5 = r4, \
- .phy_bw6 = r5
-
-static const struct b43_nphy_channeltab_entry b43_nphy_channeltab[] = {
+ .phy_regs.phy_bw1a = r0, \
+ .phy_regs.phy_bw2 = r1, \
+ .phy_regs.phy_bw3 = r2, \
+ .phy_regs.phy_bw4 = r3, \
+ .phy_regs.phy_bw5 = r4, \
+ .phy_regs.phy_bw6 = r5
+
+static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab[] = {
{ .channel = 184,
.freq = 4920, /* MHz */
.unk2 = 3280,
@@ -1320,10 +1320,10 @@ static const struct b43_nphy_channeltab_entry b43_nphy_channeltab[] = {
},
};
-const struct b43_nphy_channeltab_entry *
-b43_nphy_get_chantabent(struct b43_wldev *dev, u8 channel)
+const struct b43_nphy_channeltab_entry_rev2 *
+b43_nphy_get_chantabent_rev2(struct b43_wldev *dev, u8 channel)
{
- const struct b43_nphy_channeltab_entry *e;
+ const struct b43_nphy_channeltab_entry_rev2 *e;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(b43_nphy_channeltab); i++) {
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index 9c1c6ecd367..8fc1da9f8fe 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -4,9 +4,22 @@
#include <linux/types.h>
-struct b43_nphy_channeltab_entry {
+struct b43_phy_n_sfo_cfg {
+ u16 phy_bw1a;
+ u16 phy_bw2;
+ u16 phy_bw3;
+ u16 phy_bw4;
+ u16 phy_bw5;
+ u16 phy_bw6;
+};
+
+struct b43_nphy_channeltab_entry_rev2 {
/* The channel number */
u8 channel;
+ /* The channel frequency in MHz */
+ u16 freq;
+ /* An unknown value */
+ u16 unk2;
/* Radio register values on channelswitch */
u8 radio_pll_ref;
u8 radio_rf_pllmod0;
@@ -31,16 +44,18 @@ struct b43_nphy_channeltab_entry {
u8 radio_c2_tx_pgapadtn;
u8 radio_c2_tx_mxbgtrim;
/* PHY register values on channelswitch */
- u16 phy_bw1a;
- u16 phy_bw2;
- u16 phy_bw3;
- u16 phy_bw4;
- u16 phy_bw5;
- u16 phy_bw6;
+ struct b43_phy_n_sfo_cfg phy_regs;
+};
+
+struct b43_nphy_channeltab_entry_rev3 {
+ /* The channel number */
+ u8 channel;
/* The channel frequency in MHz */
u16 freq;
- /* An unknown value */
- u16 unk2;
+ /* Radio register values on channelswitch */
+ /* TODO */
+ /* PHY register values on channelswitch */
+ struct b43_phy_n_sfo_cfg phy_regs;
};
@@ -77,8 +92,8 @@ void b2055_upload_inittab(struct b43_wldev *dev,
/* Get the NPHY Channel Switch Table entry for a channel number.
* Returns NULL on failure to find an entry. */
-const struct b43_nphy_channeltab_entry *
-b43_nphy_get_chantabent(struct b43_wldev *dev, u8 channel);
+const struct b43_nphy_channeltab_entry_rev2 *
+b43_nphy_get_chantabent_rev2(struct b43_wldev *dev, u8 channel);
/* The N-PHY tables. */
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index eda06529ef5..e6b0528f3b5 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -610,7 +610,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
}
/* Link quality statistics */
- status.noise = dev->stats.link_noise;
if ((chanstat & B43_RX_CHAN_PHYTYPE) == B43_PHYTYPE_N) {
// s8 rssi = max(rxhdr->power0, rxhdr->power1);
//TODO: Find out what the rssi value is (dBm or percentage?)
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index bb2dd9329aa..1713f5f7a58 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -3482,6 +3482,23 @@ static int b43legacy_op_beacon_set_tim(struct ieee80211_hw *hw,
return 0;
}
+static int b43legacy_op_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
+ struct b43legacy_wldev *dev = wl->current_dev;
+ struct ieee80211_conf *conf = &hw->conf;
+
+ if (idx != 0)
+ return -ENOENT;
+
+ survey->channel = conf->channel;
+ survey->filled = SURVEY_INFO_NOISE_DBM;
+ survey->noise = dev->stats.link_noise;
+
+ return 0;
+}
+
static const struct ieee80211_ops b43legacy_hw_ops = {
.tx = b43legacy_op_tx,
.conf_tx = b43legacy_op_conf_tx,
@@ -3494,6 +3511,7 @@ static const struct ieee80211_ops b43legacy_hw_ops = {
.start = b43legacy_op_start,
.stop = b43legacy_op_stop,
.set_tim = b43legacy_op_beacon_set_tim,
+ .get_survey = b43legacy_op_get_survey,
.rfkill_poll = b43legacy_rfkill_poll,
};
@@ -3769,8 +3787,7 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
/* fill hw info */
hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_NOISE_DBM;
+ IEEE80211_HW_SIGNAL_DBM;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_STATION) |
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 9c8882d9275..7d177d97f1f 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -548,7 +548,6 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
(phystat0 & B43legacy_RX_PHYST0_OFDM),
(phystat0 & B43legacy_RX_PHYST0_GAINCTL),
(phystat3 & B43legacy_RX_PHYST3_TRSTATE));
- status.noise = dev->stats.link_noise;
/* change to support A PHY */
if (phystat0 & B43legacy_RX_PHYST0_OFDM)
status.rate_idx = b43legacy_plcp_get_bitrate_idx_ofdm(plcp, false);
diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c
index f4c56121d38..e0b3e8d406b 100644
--- a/drivers/net/wireless/hostap/hostap_80211_rx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_rx.c
@@ -355,8 +355,7 @@ static struct hostap_bss_info *__hostap_add_bss(local_info_t *local, u8 *bssid,
list_del(&bss->list);
local->num_bss_info--;
} else {
- bss = (struct hostap_bss_info *)
- kmalloc(sizeof(*bss), GFP_ATOMIC);
+ bss = kmalloc(sizeof(*bss), GFP_ATOMIC);
if (bss == NULL)
return NULL;
}
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index 7e72ac1de49..231dbd77f5f 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -349,7 +349,7 @@ static int ap_control_proc_read(char *page, char **start, off_t off,
default:
policy_txt = "unknown";
break;
- };
+ }
p += sprintf(p, "MAC policy: %s\n", policy_txt);
p += sprintf(p, "MAC entries: %u\n", ap->mac_restrictions.entries);
p += sprintf(p, "MAC list:\n");
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index a36501dbbe0..db72461c486 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -39,7 +39,6 @@ MODULE_PARM_DESC(ignore_cis_vcc, "Ignore broken CIS VCC entry");
/* struct local_info::hw_priv */
struct hostap_cs_priv {
- dev_node_t node;
struct pcmcia_device *link;
int sandisk_connectplus;
};
@@ -556,15 +555,7 @@ static int prism2_config_check(struct pcmcia_device *p_dev,
p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM] / 10000;
/* Do we need to allocate an interrupt? */
- if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
- else if (!(p_dev->conf.Attributes & CONF_ENABLE_IRQ)) {
- /* At least Compaq WL200 does not have IRQInfo1 set,
- * but it does not work without interrupts.. */
- printk(KERN_WARNING "Config has no IRQ info, but trying to "
- "enable IRQ anyway..\n");
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
- }
+ p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
PDEBUG(DEBUG_EXTRA, "IO window settings: cfg->io.nwin=%d "
@@ -633,21 +624,10 @@ static int prism2_config(struct pcmcia_device *link)
local = iface->local;
local->hw_priv = hw_priv;
hw_priv->link = link;
- strcpy(hw_priv->node.dev_name, dev->name);
- link->dev_node = &hw_priv->node;
- /*
- * Allocate an interrupt line. Note that this does not assign a
- * handler to the interrupt, unless the 'Handler' member of the
- * irq structure is initialized.
- */
- if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = prism2_interrupt;
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
- goto failed;
- }
+ ret = pcmcia_request_irq(link, prism2_interrupt);
+ if (ret)
+ goto failed;
/*
* This actually configures the PCMCIA socket -- setting up
@@ -658,7 +638,7 @@ static int prism2_config(struct pcmcia_device *link)
if (ret)
goto failed;
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
/* Finally, report what we've done */
@@ -668,7 +648,7 @@ static int prism2_config(struct pcmcia_device *link)
printk(", Vpp %d.%d", link->conf.Vpp / 10,
link->conf.Vpp % 10);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %d", link->irq.AssignedIRQ);
+ printk(", irq %d", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1+link->io.NumPorts1-1);
@@ -682,11 +662,9 @@ static int prism2_config(struct pcmcia_device *link)
sandisk_enable_wireless(dev);
ret = prism2_hw_config(dev, 1);
- if (!ret) {
+ if (!ret)
ret = hostap_hw_ready(dev);
- if (ret == 0 && local->ddev)
- strcpy(hw_priv->node.dev_name, local->ddev->name);
- }
+
return ret;
failed:
diff --git a/drivers/net/wireless/hostap/hostap_download.c b/drivers/net/wireless/hostap/hostap_download.c
index 89d3849abfe..e73bf739fd9 100644
--- a/drivers/net/wireless/hostap/hostap_download.c
+++ b/drivers/net/wireless/hostap/hostap_download.c
@@ -744,7 +744,7 @@ static int prism2_download(local_info_t *local,
local->dev->name, param->dl_cmd);
ret = -EINVAL;
break;
- };
+ }
out:
if (ret == 0 && dl &&
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 9a082308a9d..a85e43a8d75 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -3039,8 +3039,7 @@ static int prism2_ioctl_priv_download(local_info_t *local, struct iw_point *p)
p->length > 1024 || !p->pointer)
return -EINVAL;
- param = (struct prism2_download_param *)
- kmalloc(p->length, GFP_KERNEL);
+ param = kmalloc(p->length, GFP_KERNEL);
if (param == NULL)
return -ENOMEM;
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 9b72c45a774..0bd4dfa59a8 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -174,6 +174,8 @@ that only one external action is invoked at a time.
#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver"
#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
+struct pm_qos_request_list *ipw2100_pm_qos_req;
+
/* Debugging stuff */
#ifdef CONFIG_IPW2100_DEBUG
#define IPW2100_RX_DEBUG /* Reception debugging */
@@ -1739,7 +1741,7 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
/* the ipw2100 hardware really doesn't want power management delays
* longer than 175usec
*/
- pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100", 175);
+ pm_qos_update_request(ipw2100_pm_qos_req, 175);
/* If the interrupt is enabled, turn it off... */
spin_lock_irqsave(&priv->low_lock, flags);
@@ -1887,8 +1889,7 @@ static void ipw2100_down(struct ipw2100_priv *priv)
ipw2100_disable_interrupts(priv);
spin_unlock_irqrestore(&priv->low_lock, flags);
- pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100",
- PM_QOS_DEFAULT_VALUE);
+ pm_qos_update_request(ipw2100_pm_qos_req, PM_QOS_DEFAULT_VALUE);
/* We have to signal any supplicant if we are disassociating */
if (associated)
@@ -2140,7 +2141,7 @@ static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status)
DECLARE_SSID_BUF(ssid);
IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
- "disassociated: '%s' %pM \n",
+ "disassociated: '%s' %pM\n",
print_ssid(ssid, priv->essid, priv->essid_len),
priv->bssid);
@@ -3239,7 +3240,6 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv)
IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX,
txq->next);
}
- return;
}
static void ipw2100_irq_tasklet(struct ipw2100_priv *priv)
@@ -3285,7 +3285,7 @@ static void ipw2100_irq_tasklet(struct ipw2100_priv *priv)
if (inta & IPW2100_INTA_PARITY_ERROR) {
printk(KERN_ERR DRV_NAME
- ": ***** PARITY ERROR INTERRUPT !!!! \n");
+ ": ***** PARITY ERROR INTERRUPT !!!!\n");
priv->inta_other++;
write_register(dev, IPW_REG_INTA, IPW2100_INTA_PARITY_ERROR);
}
@@ -6102,7 +6102,7 @@ static const struct net_device_ops ipw2100_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-/* Look into using netdev destructor to shutdown ieee80211? */
+/* Look into using netdev destructor to shutdown libipw? */
static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
void __iomem * base_addr,
@@ -6112,7 +6112,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
struct ipw2100_priv *priv;
struct net_device *dev;
- dev = alloc_ieee80211(sizeof(struct ipw2100_priv), 0);
+ dev = alloc_libipw(sizeof(struct ipw2100_priv), 0);
if (!dev)
return NULL;
priv = libipw_priv(dev);
@@ -6425,7 +6425,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
sysfs_remove_group(&pci_dev->dev.kobj,
&ipw2100_attribute_group);
- free_ieee80211(dev, 0);
+ free_libipw(dev, 0);
pci_set_drvdata(pci_dev, NULL);
}
@@ -6483,10 +6483,10 @@ static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
if (dev->base_addr)
iounmap((void __iomem *)dev->base_addr);
- /* wiphy_unregister needs to be here, before free_ieee80211 */
+ /* wiphy_unregister needs to be here, before free_libipw */
wiphy_unregister(priv->ieee->wdev.wiphy);
kfree(priv->ieee->bg_band.channels);
- free_ieee80211(dev, 0);
+ free_libipw(dev, 0);
}
pci_release_regions(pci_dev);
@@ -6669,7 +6669,7 @@ static int __init ipw2100_init(void)
if (ret)
goto out;
- pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100",
+ ipw2100_pm_qos_req = pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
#ifdef CONFIG_IPW2100_DEBUG
ipw2100_debug_level = debug;
@@ -6692,7 +6692,7 @@ static void __exit ipw2100_exit(void)
&driver_attr_debug_level);
#endif
pci_unregister_driver(&ipw2100_pci_driver);
- pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100");
+ pm_qos_remove_request(ipw2100_pm_qos_req);
}
module_init(ipw2100_init);
@@ -6753,7 +6753,7 @@ static int ipw2100_wx_set_freq(struct net_device *dev,
err = -EOPNOTSUPP;
goto done;
} else { /* Set the channel */
- IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
+ IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
err = ipw2100_set_channel(priv, fwrq->m, 0);
}
@@ -6782,7 +6782,7 @@ static int ipw2100_wx_get_freq(struct net_device *dev,
else
wrqu->freq.m = 0;
- IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
+ IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
return 0;
}
@@ -6794,7 +6794,7 @@ static int ipw2100_wx_set_mode(struct net_device *dev,
struct ipw2100_priv *priv = libipw_priv(dev);
int err = 0;
- IPW_DEBUG_WX("SET Mode -> %d \n", wrqu->mode);
+ IPW_DEBUG_WX("SET Mode -> %d\n", wrqu->mode);
if (wrqu->mode == priv->ieee->iw_mode)
return 0;
@@ -7149,7 +7149,7 @@ static int ipw2100_wx_set_nick(struct net_device *dev,
memset(priv->nick, 0, sizeof(priv->nick));
memcpy(priv->nick, extra, wrqu->data.length);
- IPW_DEBUG_WX("SET Nickname -> %s \n", priv->nick);
+ IPW_DEBUG_WX("SET Nickname -> %s\n", priv->nick);
return 0;
}
@@ -7168,7 +7168,7 @@ static int ipw2100_wx_get_nick(struct net_device *dev,
memcpy(extra, priv->nick, wrqu->data.length);
wrqu->data.flags = 1; /* active */
- IPW_DEBUG_WX("GET Nickname -> %s \n", extra);
+ IPW_DEBUG_WX("GET Nickname -> %s\n", extra);
return 0;
}
@@ -7207,7 +7207,7 @@ static int ipw2100_wx_set_rate(struct net_device *dev,
err = ipw2100_set_tx_rates(priv, rate, 0);
- IPW_DEBUG_WX("SET Rate -> %04X \n", rate);
+ IPW_DEBUG_WX("SET Rate -> %04X\n", rate);
done:
mutex_unlock(&priv->action_mutex);
return err;
@@ -7258,7 +7258,7 @@ static int ipw2100_wx_get_rate(struct net_device *dev,
wrqu->bitrate.value = 0;
}
- IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
+ IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
done:
mutex_unlock(&priv->action_mutex);
@@ -7294,7 +7294,7 @@ static int ipw2100_wx_set_rts(struct net_device *dev,
err = ipw2100_set_rts_threshold(priv, value);
- IPW_DEBUG_WX("SET RTS Threshold -> 0x%08X \n", value);
+ IPW_DEBUG_WX("SET RTS Threshold -> 0x%08X\n", value);
done:
mutex_unlock(&priv->action_mutex);
return err;
@@ -7316,7 +7316,7 @@ static int ipw2100_wx_get_rts(struct net_device *dev,
/* If RTS is set to the default value, then it is disabled */
wrqu->rts.disabled = (priv->rts_threshold & RTS_DISABLED) ? 1 : 0;
- IPW_DEBUG_WX("GET RTS Threshold -> 0x%08X \n", wrqu->rts.value);
+ IPW_DEBUG_WX("GET RTS Threshold -> 0x%08X\n", wrqu->rts.value);
return 0;
}
@@ -7355,7 +7355,7 @@ static int ipw2100_wx_set_txpow(struct net_device *dev,
err = ipw2100_set_tx_power(priv, value);
- IPW_DEBUG_WX("SET TX Power -> %d \n", value);
+ IPW_DEBUG_WX("SET TX Power -> %d\n", value);
done:
mutex_unlock(&priv->action_mutex);
@@ -7384,7 +7384,7 @@ static int ipw2100_wx_get_txpow(struct net_device *dev,
wrqu->txpower.flags = IW_TXPOW_DBM;
- IPW_DEBUG_WX("GET TX Power -> %d \n", wrqu->txpower.value);
+ IPW_DEBUG_WX("GET TX Power -> %d\n", wrqu->txpower.value);
return 0;
}
@@ -7414,7 +7414,7 @@ static int ipw2100_wx_set_frag(struct net_device *dev,
priv->frag_threshold = priv->ieee->fts;
}
- IPW_DEBUG_WX("SET Frag Threshold -> %d \n", priv->ieee->fts);
+ IPW_DEBUG_WX("SET Frag Threshold -> %d\n", priv->ieee->fts);
return 0;
}
@@ -7432,7 +7432,7 @@ static int ipw2100_wx_get_frag(struct net_device *dev,
wrqu->frag.fixed = 0; /* no auto select */
wrqu->frag.disabled = (priv->frag_threshold & FRAG_DISABLED) ? 1 : 0;
- IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
+ IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
return 0;
}
@@ -7458,14 +7458,14 @@ static int ipw2100_wx_set_retry(struct net_device *dev,
if (wrqu->retry.flags & IW_RETRY_SHORT) {
err = ipw2100_set_short_retry(priv, wrqu->retry.value);
- IPW_DEBUG_WX("SET Short Retry Limit -> %d \n",
+ IPW_DEBUG_WX("SET Short Retry Limit -> %d\n",
wrqu->retry.value);
goto done;
}
if (wrqu->retry.flags & IW_RETRY_LONG) {
err = ipw2100_set_long_retry(priv, wrqu->retry.value);
- IPW_DEBUG_WX("SET Long Retry Limit -> %d \n",
+ IPW_DEBUG_WX("SET Long Retry Limit -> %d\n",
wrqu->retry.value);
goto done;
}
@@ -7474,7 +7474,7 @@ static int ipw2100_wx_set_retry(struct net_device *dev,
if (!err)
err = ipw2100_set_long_retry(priv, wrqu->retry.value);
- IPW_DEBUG_WX("SET Both Retry Limits -> %d \n", wrqu->retry.value);
+ IPW_DEBUG_WX("SET Both Retry Limits -> %d\n", wrqu->retry.value);
done:
mutex_unlock(&priv->action_mutex);
@@ -7508,7 +7508,7 @@ static int ipw2100_wx_get_retry(struct net_device *dev,
wrqu->retry.value = priv->short_retry_limit;
}
- IPW_DEBUG_WX("GET Retry -> %d \n", wrqu->retry.value);
+ IPW_DEBUG_WX("GET Retry -> %d\n", wrqu->retry.value);
return 0;
}
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 8d72e3d1958..3aa3bb18f61 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -459,7 +459,7 @@ static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
{
u32 word;
_ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
- IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
+ IPW_DEBUG_IO(" reg = 0x%8X :\n", reg);
word = _ipw_read32(priv, IPW_INDIRECT_DATA);
return (word >> ((reg & 0x3) * 8)) & 0xff;
}
@@ -473,7 +473,7 @@ static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
_ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
value = _ipw_read32(priv, IPW_INDIRECT_DATA);
- IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
+ IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x\n", reg, value);
return value;
}
@@ -2349,16 +2349,25 @@ static void ipw_bg_adapter_restart(struct work_struct *work)
mutex_unlock(&priv->mutex);
}
-#define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
+static void ipw_abort_scan(struct ipw_priv *priv);
+
+#define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
static void ipw_scan_check(void *data)
{
struct ipw_priv *priv = data;
- if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
+
+ if (priv->status & STATUS_SCAN_ABORTING) {
IPW_DEBUG_SCAN("Scan completion watchdog resetting "
"adapter after (%dms).\n",
jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
queue_work(priv->workqueue, &priv->adapter_restart);
+ } else if (priv->status & STATUS_SCANNING) {
+ IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
+ "after (%dms).\n",
+ jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
+ ipw_abort_scan(priv);
+ queue_delayed_work(priv->workqueue, &priv->scan_check, HZ);
}
}
@@ -2598,8 +2607,6 @@ static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
/* the eeprom requires some time to complete the operation */
udelay(p->eeprom_delay);
-
- return;
}
/* perform a chip select operation */
@@ -2739,7 +2746,7 @@ static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
static int ipw_fw_dma_enable(struct ipw_priv *priv)
{ /* start dma engine but no transfers yet */
- IPW_DEBUG_FW(">> : \n");
+ IPW_DEBUG_FW(">> :\n");
/* Start the dma */
ipw_fw_dma_reset_command_blocks(priv);
@@ -2747,7 +2754,7 @@ static int ipw_fw_dma_enable(struct ipw_priv *priv)
/* Write CB base address */
ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
- IPW_DEBUG_FW("<< : \n");
+ IPW_DEBUG_FW("<< :\n");
return 0;
}
@@ -2762,7 +2769,7 @@ static void ipw_fw_dma_abort(struct ipw_priv *priv)
ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
priv->sram_desc.last_cb_index = 0;
- IPW_DEBUG_FW("<< \n");
+ IPW_DEBUG_FW("<<\n");
}
static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
@@ -2813,29 +2820,29 @@ static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
IPW_DEBUG_FW(">> :\n");
address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
- IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
+ IPW_DEBUG_FW_INFO("Current CB is 0x%x\n", address);
/* Read the DMA Controlor register */
register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
- IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
+ IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x\n", register_value);
/* Print the CB values */
cb_fields_address = address;
register_value = ipw_read_reg32(priv, cb_fields_address);
- IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
+ IPW_DEBUG_FW_INFO("Current CB Control Field is 0x%x\n", register_value);
cb_fields_address += sizeof(u32);
register_value = ipw_read_reg32(priv, cb_fields_address);
- IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
+ IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x\n", register_value);
cb_fields_address += sizeof(u32);
register_value = ipw_read_reg32(priv, cb_fields_address);
- IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
+ IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x\n",
register_value);
cb_fields_address += sizeof(u32);
register_value = ipw_read_reg32(priv, cb_fields_address);
- IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
+ IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x\n", register_value);
IPW_DEBUG_FW(">> :\n");
}
@@ -2851,7 +2858,7 @@ static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
sizeof(struct command_block);
- IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
+ IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X\n",
current_cb_index, current_cb_address);
IPW_DEBUG_FW(">> :\n");
@@ -2910,7 +2917,7 @@ static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
int ret, i;
u32 size;
- IPW_DEBUG_FW(">> \n");
+ IPW_DEBUG_FW(">>\n");
IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
nr, dest_address, len);
@@ -2927,7 +2934,7 @@ static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
IPW_DEBUG_FW_INFO(": Added new cb\n");
}
- IPW_DEBUG_FW("<< \n");
+ IPW_DEBUG_FW("<<\n");
return 0;
}
@@ -2936,7 +2943,7 @@ static int ipw_fw_dma_wait(struct ipw_priv *priv)
u32 current_index = 0, previous_index;
u32 watchdog = 0;
- IPW_DEBUG_FW(">> : \n");
+ IPW_DEBUG_FW(">> :\n");
current_index = ipw_fw_dma_command_block_index(priv);
IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
@@ -2965,7 +2972,7 @@ static int ipw_fw_dma_wait(struct ipw_priv *priv)
ipw_set_bit(priv, IPW_RESET_REG,
IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
- IPW_DEBUG_FW("<< dmaWaitSync \n");
+ IPW_DEBUG_FW("<< dmaWaitSync\n");
return 0;
}
@@ -3026,7 +3033,7 @@ static int ipw_stop_master(struct ipw_priv *priv)
{
int rc;
- IPW_DEBUG_TRACE(">> \n");
+ IPW_DEBUG_TRACE(">>\n");
/* stop master. typical delay - 0 */
ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
@@ -3045,7 +3052,7 @@ static int ipw_stop_master(struct ipw_priv *priv)
static void ipw_arc_release(struct ipw_priv *priv)
{
- IPW_DEBUG_TRACE(">> \n");
+ IPW_DEBUG_TRACE(">>\n");
mdelay(5);
ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
@@ -3067,7 +3074,7 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
image = (__le16 *) data;
- IPW_DEBUG_TRACE(">> \n");
+ IPW_DEBUG_TRACE(">>\n");
rc = ipw_stop_master(priv);
@@ -3181,7 +3188,7 @@ static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
void **virts;
dma_addr_t *phys;
- IPW_DEBUG_TRACE("<< : \n");
+ IPW_DEBUG_TRACE("<< :\n");
virts = kmalloc(sizeof(void *) * CB_NUMBER_OF_ELEMENTS_SMALL,
GFP_KERNEL);
@@ -4482,7 +4489,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
case CMAS_ASSOCIATED:{
IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
IPW_DL_ASSOC,
- "associated: '%s' %pM \n",
+ "associated: '%s' %pM\n",
print_ssid(ssid, priv->essid,
priv->essid_len),
priv->bssid);
@@ -4563,7 +4570,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
IPW_DL_ASSOC,
"deauthenticated: '%s' "
"%pM"
- ": (0x%04X) - %s \n",
+ ": (0x%04X) - %s\n",
print_ssid(ssid,
priv->
essid,
@@ -4614,7 +4621,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
IPW_DL_ASSOC,
- "disassociated: '%s' %pM \n",
+ "disassociated: '%s' %pM\n",
print_ssid(ssid, priv->essid,
priv->essid_len),
priv->bssid);
@@ -4652,7 +4659,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
switch (auth->state) {
case CMAS_AUTHENTICATED:
IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
- "authenticated: '%s' %pM \n",
+ "authenticated: '%s' %pM\n",
print_ssid(ssid, priv->essid,
priv->essid_len),
priv->bssid);
@@ -6925,7 +6932,7 @@ static u8 ipw_qos_current_mode(struct ipw_priv * priv)
} else {
mode = priv->ieee->mode;
}
- IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
+ IPW_DEBUG_QOS("QoS network/card mode %d\n", mode);
return mode;
}
@@ -6965,7 +6972,7 @@ static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
&def_parameters_OFDM, size);
if ((network->qos_data.active == 1) && (active_network == 1)) {
- IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
+ IPW_DEBUG_QOS("QoS was disabled call qos_activate\n");
schedule_work(&priv->qos_activate);
}
@@ -7542,7 +7549,7 @@ static int ipw_associate_network(struct ipw_priv *priv,
return err;
}
- IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM \n",
+ IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM\n",
print_ssid(ssid, priv->essid, priv->essid_len),
priv->bssid);
@@ -8793,7 +8800,7 @@ static int ipw_wx_set_freq(struct net_device *dev,
}
}
- IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
+ IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
mutex_lock(&priv->mutex);
ret = ipw_set_channel(priv, channel);
mutex_unlock(&priv->mutex);
@@ -8835,7 +8842,7 @@ static int ipw_wx_get_freq(struct net_device *dev,
wrqu->freq.m = 0;
mutex_unlock(&priv->mutex);
- IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
+ IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
return 0;
}
@@ -9230,7 +9237,7 @@ static int ipw_wx_get_sens(struct net_device *dev,
wrqu->sens.value = priv->roaming_threshold;
mutex_unlock(&priv->mutex);
- IPW_DEBUG_WX("GET roaming threshold -> %s %d \n",
+ IPW_DEBUG_WX("GET roaming threshold -> %s %d\n",
wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
return 0;
@@ -9358,7 +9365,7 @@ static int ipw_wx_get_rate(struct net_device *dev,
wrqu->bitrate.value = priv->last_rate;
wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
mutex_unlock(&priv->mutex);
- IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
+ IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
return 0;
}
@@ -9381,7 +9388,7 @@ static int ipw_wx_set_rts(struct net_device *dev,
ipw_send_rts_threshold(priv, priv->rts_threshold);
mutex_unlock(&priv->mutex);
- IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
+ IPW_DEBUG_WX("SET RTS Threshold -> %d\n", priv->rts_threshold);
return 0;
}
@@ -9395,7 +9402,7 @@ static int ipw_wx_get_rts(struct net_device *dev,
wrqu->rts.fixed = 0; /* no auto select */
wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
mutex_unlock(&priv->mutex);
- IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
+ IPW_DEBUG_WX("GET RTS Threshold -> %d\n", wrqu->rts.value);
return 0;
}
@@ -9445,7 +9452,7 @@ static int ipw_wx_get_txpow(struct net_device *dev,
wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
mutex_unlock(&priv->mutex);
- IPW_DEBUG_WX("GET TX Power -> %s %d \n",
+ IPW_DEBUG_WX("GET TX Power -> %s %d\n",
wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
return 0;
@@ -9471,7 +9478,7 @@ static int ipw_wx_set_frag(struct net_device *dev,
ipw_send_frag_threshold(priv, wrqu->frag.value);
mutex_unlock(&priv->mutex);
- IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
+ IPW_DEBUG_WX("SET Frag Threshold -> %d\n", wrqu->frag.value);
return 0;
}
@@ -9485,7 +9492,7 @@ static int ipw_wx_get_frag(struct net_device *dev,
wrqu->frag.fixed = 0; /* no auto select */
wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
mutex_unlock(&priv->mutex);
- IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
+ IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
return 0;
}
@@ -9549,7 +9556,7 @@ static int ipw_wx_get_retry(struct net_device *dev,
}
mutex_unlock(&priv->mutex);
- IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
+ IPW_DEBUG_WX("GET retry -> %d\n", wrqu->retry.value);
return 0;
}
@@ -9996,49 +10003,48 @@ static int ipw_wx_sw_reset(struct net_device *dev,
}
/* Rebase the WE IOCTLs to zero for the handler array */
-#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
static iw_handler ipw_wx_handlers[] = {
- IW_IOCTL(SIOCGIWNAME) = (iw_handler) cfg80211_wext_giwname,
- IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
- IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
- IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
- IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
- IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens,
- IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens,
- IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
- IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
- IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
- IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
- IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
- IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
- IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
- IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
- IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
- IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
- IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
- IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
- IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
- IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
- IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
- IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
- IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
- IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
- IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
- IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
- IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
- IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
- IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
- IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
- IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
- IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
- IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
- IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
- IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
- IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
- IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
- IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
- IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
- IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
+ IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
+ IW_HANDLER(SIOCSIWFREQ, ipw_wx_set_freq),
+ IW_HANDLER(SIOCGIWFREQ, ipw_wx_get_freq),
+ IW_HANDLER(SIOCSIWMODE, ipw_wx_set_mode),
+ IW_HANDLER(SIOCGIWMODE, ipw_wx_get_mode),
+ IW_HANDLER(SIOCSIWSENS, ipw_wx_set_sens),
+ IW_HANDLER(SIOCGIWSENS, ipw_wx_get_sens),
+ IW_HANDLER(SIOCGIWRANGE, ipw_wx_get_range),
+ IW_HANDLER(SIOCSIWAP, ipw_wx_set_wap),
+ IW_HANDLER(SIOCGIWAP, ipw_wx_get_wap),
+ IW_HANDLER(SIOCSIWSCAN, ipw_wx_set_scan),
+ IW_HANDLER(SIOCGIWSCAN, ipw_wx_get_scan),
+ IW_HANDLER(SIOCSIWESSID, ipw_wx_set_essid),
+ IW_HANDLER(SIOCGIWESSID, ipw_wx_get_essid),
+ IW_HANDLER(SIOCSIWNICKN, ipw_wx_set_nick),
+ IW_HANDLER(SIOCGIWNICKN, ipw_wx_get_nick),
+ IW_HANDLER(SIOCSIWRATE, ipw_wx_set_rate),
+ IW_HANDLER(SIOCGIWRATE, ipw_wx_get_rate),
+ IW_HANDLER(SIOCSIWRTS, ipw_wx_set_rts),
+ IW_HANDLER(SIOCGIWRTS, ipw_wx_get_rts),
+ IW_HANDLER(SIOCSIWFRAG, ipw_wx_set_frag),
+ IW_HANDLER(SIOCGIWFRAG, ipw_wx_get_frag),
+ IW_HANDLER(SIOCSIWTXPOW, ipw_wx_set_txpow),
+ IW_HANDLER(SIOCGIWTXPOW, ipw_wx_get_txpow),
+ IW_HANDLER(SIOCSIWRETRY, ipw_wx_set_retry),
+ IW_HANDLER(SIOCGIWRETRY, ipw_wx_get_retry),
+ IW_HANDLER(SIOCSIWENCODE, ipw_wx_set_encode),
+ IW_HANDLER(SIOCGIWENCODE, ipw_wx_get_encode),
+ IW_HANDLER(SIOCSIWPOWER, ipw_wx_set_power),
+ IW_HANDLER(SIOCGIWPOWER, ipw_wx_get_power),
+ IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
+ IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
+ IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
+ IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
+ IW_HANDLER(SIOCSIWGENIE, ipw_wx_set_genie),
+ IW_HANDLER(SIOCGIWGENIE, ipw_wx_get_genie),
+ IW_HANDLER(SIOCSIWMLME, ipw_wx_set_mlme),
+ IW_HANDLER(SIOCSIWAUTH, ipw_wx_set_auth),
+ IW_HANDLER(SIOCGIWAUTH, ipw_wx_get_auth),
+ IW_HANDLER(SIOCSIWENCODEEXT, ipw_wx_set_encodeext),
+ IW_HANDLER(SIOCGIWENCODEEXT, ipw_wx_get_encodeext),
};
enum {
@@ -11667,7 +11673,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv)
if (priv->prom_net_dev)
return -EPERM;
- priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv), 1);
+ priv->prom_net_dev = alloc_libipw(sizeof(struct ipw_prom_priv), 1);
if (priv->prom_net_dev == NULL)
return -ENOMEM;
@@ -11686,7 +11692,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv)
rc = register_netdev(priv->prom_net_dev);
if (rc) {
- free_ieee80211(priv->prom_net_dev, 1);
+ free_libipw(priv->prom_net_dev, 1);
priv->prom_net_dev = NULL;
return rc;
}
@@ -11700,7 +11706,7 @@ static void ipw_prom_free(struct ipw_priv *priv)
return;
unregister_netdev(priv->prom_net_dev);
- free_ieee80211(priv->prom_net_dev, 1);
+ free_libipw(priv->prom_net_dev, 1);
priv->prom_net_dev = NULL;
}
@@ -11728,7 +11734,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
struct ipw_priv *priv;
int i;
- net_dev = alloc_ieee80211(sizeof(struct ipw_priv), 0);
+ net_dev = alloc_libipw(sizeof(struct ipw_priv), 0);
if (net_dev == NULL) {
err = -ENOMEM;
goto out;
@@ -11748,7 +11754,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
mutex_init(&priv->mutex);
if (pci_enable_device(pdev)) {
err = -ENODEV;
- goto out_free_ieee80211;
+ goto out_free_libipw;
}
pci_set_master(pdev);
@@ -11875,8 +11881,8 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
out_pci_disable_device:
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
- out_free_ieee80211:
- free_ieee80211(priv->net_dev, 0);
+ out_free_libipw:
+ free_libipw(priv->net_dev, 0);
out:
return err;
}
@@ -11943,11 +11949,11 @@ static void __devexit ipw_pci_remove(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
- /* wiphy_unregister needs to be here, before free_ieee80211 */
+ /* wiphy_unregister needs to be here, before free_libipw */
wiphy_unregister(priv->ieee->wdev.wiphy);
kfree(priv->ieee->a_band.channels);
kfree(priv->ieee->bg_band.channels);
- free_ieee80211(priv->net_dev, 0);
+ free_libipw(priv->net_dev, 0);
free_firmware();
}
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index a6d5e42647e..284b0e4cb81 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -64,7 +64,7 @@
extern u32 libipw_debug_level;
#define LIBIPW_DEBUG(level, fmt, args...) \
do { if (libipw_debug_level & (level)) \
- printk(KERN_DEBUG "ieee80211: %c %s " fmt, \
+ printk(KERN_DEBUG "libipw: %c %s " fmt, \
in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
static inline bool libipw_ratelimit_debug(u32 level)
{
@@ -116,8 +116,8 @@ static inline bool libipw_ratelimit_debug(u32 level)
#define LIBIPW_DL_RX (1<<9)
#define LIBIPW_DL_QOS (1<<31)
-#define LIBIPW_ERROR(f, a...) printk(KERN_ERR "ieee80211: " f, ## a)
-#define LIBIPW_WARNING(f, a...) printk(KERN_WARNING "ieee80211: " f, ## a)
+#define LIBIPW_ERROR(f, a...) printk(KERN_ERR "libipw: " f, ## a)
+#define LIBIPW_WARNING(f, a...) printk(KERN_WARNING "libipw: " f, ## a)
#define LIBIPW_DEBUG_INFO(f, a...) LIBIPW_DEBUG(LIBIPW_DL_INFO, f, ## a)
#define LIBIPW_DEBUG_WX(f, a...) LIBIPW_DEBUG(LIBIPW_DL_WX, f, ## a)
@@ -905,7 +905,7 @@ struct libipw_device {
struct libipw_reassoc_request * req);
/* This must be the last item so that it points to the data
- * allocated beyond this structure by alloc_ieee80211 */
+ * allocated beyond this structure by alloc_libipw */
u8 priv[0];
};
@@ -1017,9 +1017,9 @@ static inline int libipw_is_cck_rate(u8 rate)
return 0;
}
-/* ieee80211.c */
-extern void free_ieee80211(struct net_device *dev, int monitor);
-extern struct net_device *alloc_ieee80211(int sizeof_priv, int monitor);
+/* libipw.c */
+extern void free_libipw(struct net_device *dev, int monitor);
+extern struct net_device *alloc_libipw(int sizeof_priv, int monitor);
extern int libipw_change_mtu(struct net_device *dev, int new_mtu);
extern void libipw_networks_age(struct libipw_device *ieee,
diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c
index 2fa55867bd8..55965408ff3 100644
--- a/drivers/net/wireless/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/ipw2x00/libipw_module.c
@@ -53,7 +53,7 @@
#include "libipw.h"
#define DRV_DESCRIPTION "802.11 data/management/control stack"
-#define DRV_NAME "ieee80211"
+#define DRV_NAME "libipw"
#define DRV_VERSION LIBIPW_VERSION
#define DRV_COPYRIGHT "Copyright (C) 2004-2005 Intel Corporation <jketreno@linux.intel.com>"
@@ -140,7 +140,7 @@ int libipw_change_mtu(struct net_device *dev, int new_mtu)
}
EXPORT_SYMBOL(libipw_change_mtu);
-struct net_device *alloc_ieee80211(int sizeof_priv, int monitor)
+struct net_device *alloc_libipw(int sizeof_priv, int monitor)
{
struct libipw_device *ieee;
struct net_device *dev;
@@ -222,8 +222,9 @@ failed_free_netdev:
failed:
return NULL;
}
+EXPORT_SYMBOL(alloc_libipw);
-void free_ieee80211(struct net_device *dev, int monitor)
+void free_libipw(struct net_device *dev, int monitor)
{
struct libipw_device *ieee = netdev_priv(dev);
@@ -237,6 +238,7 @@ void free_ieee80211(struct net_device *dev, int monitor)
free_netdev(dev);
}
+EXPORT_SYMBOL(free_libipw);
#ifdef CONFIG_LIBIPW_DEBUG
@@ -291,7 +293,7 @@ static int __init libipw_init(void)
struct proc_dir_entry *e;
libipw_debug_level = debug;
- libipw_proc = proc_mkdir(DRV_NAME, init_net.proc_net);
+ libipw_proc = proc_mkdir("ieee80211", init_net.proc_net);
if (libipw_proc == NULL) {
LIBIPW_ERROR("Unable to create " DRV_NAME
" proc directory\n");
@@ -331,6 +333,3 @@ MODULE_PARM_DESC(debug, "debug output mask");
module_exit(libipw_exit);
module_init(libipw_init);
-
-EXPORT_SYMBOL(alloc_ieee80211);
-EXPORT_SYMBOL(free_ieee80211);
diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
index 39a34da52d5..0de1b189322 100644
--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
@@ -918,7 +918,6 @@ void libipw_rx_any(struct libipw_device *ieee,
drop_free:
dev_kfree_skb_irq(skb);
ieee->dev->stats.rx_dropped++;
- return;
}
#define MGMT_FRAME_FIXED_PART_LENGTH 0x24
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 4e378faee65..7c723538551 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -9,7 +9,10 @@ CFLAGS_iwl-devtrace.o := -I$(src)
# AGN
obj-$(CONFIG_IWLAGN) += iwlagn.o
-iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o
+iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o iwl-agn-ict.o
+iwlagn-objs += iwl-agn-ucode.o iwl-agn-hcmd.o iwl-agn-tx.o
+iwlagn-objs += iwl-agn-lib.o
+iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
iwlagn-$(CONFIG_IWL4965) += iwl-4965.o
iwlagn-$(CONFIG_IWL5000) += iwl-5000.o
@@ -19,5 +22,6 @@ iwlagn-$(CONFIG_IWL5000) += iwl-1000.o
# 3945
obj-$(CONFIG_IWL3945) += iwl3945.o
iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
+iwl3945-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-3945-debugfs.o
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 3bf2e6e9b2d..6be2992f8f2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -42,9 +42,11 @@
#include "iwl-core.h"
#include "iwl-io.h"
#include "iwl-sta.h"
+#include "iwl-agn.h"
#include "iwl-helpers.h"
-#include "iwl-5000-hw.h"
+#include "iwl-agn-hw.h"
#include "iwl-agn-led.h"
+#include "iwl-agn-debugfs.h"
/* Highest firmware API version supported */
#define IWL1000_UCODE_API_MAX 3
@@ -117,7 +119,7 @@ static struct iwl_sensitivity_ranges iwl1000_sensitivity = {
static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
{
if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
- priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES)
+ priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
priv->cfg->num_of_queues =
priv->cfg->mod_params->num_of_queues;
@@ -125,13 +127,13 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
priv->hw_params.scd_bc_tbls_size =
priv->cfg->num_of_queues *
- sizeof(struct iwl5000_scd_bc_tbl);
+ sizeof(struct iwlagn_scd_bc_tbl);
priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
priv->hw_params.max_stations = IWL5000_STATION_COUNT;
priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
- priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE;
- priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE;
+ priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
+ priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
priv->hw_params.max_bsm_size = 0;
priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
@@ -161,25 +163,25 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
static struct iwl_lib_ops iwl1000_lib = {
.set_hw_params = iwl1000_hw_set_hw_params,
- .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
- .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
- .txq_set_sched = iwl5000_txq_set_sched,
- .txq_agg_enable = iwl5000_txq_agg_enable,
- .txq_agg_disable = iwl5000_txq_agg_disable,
+ .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
+ .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
+ .txq_set_sched = iwlagn_txq_set_sched,
+ .txq_agg_enable = iwlagn_txq_agg_enable,
+ .txq_agg_disable = iwlagn_txq_agg_disable,
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = iwl_hw_txq_free_tfd,
.txq_init = iwl_hw_tx_queue_init,
- .rx_handler_setup = iwl5000_rx_handler_setup,
- .setup_deferred_work = iwl5000_setup_deferred_work,
- .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
- .load_ucode = iwl5000_load_ucode,
+ .rx_handler_setup = iwlagn_rx_handler_setup,
+ .setup_deferred_work = iwlagn_setup_deferred_work,
+ .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
+ .load_ucode = iwlagn_load_ucode,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
.dump_csr = iwl_dump_csr,
.dump_fh = iwl_dump_fh,
- .init_alive_start = iwl5000_init_alive_start,
- .alive_notify = iwl5000_alive_notify,
- .send_tx_power = iwl5000_send_tx_power,
+ .init_alive_start = iwlagn_init_alive_start,
+ .alive_notify = iwlagn_alive_notify,
+ .send_tx_power = iwlagn_send_tx_power,
.update_chain_flags = iwl_update_chain_flags,
.apm_ops = {
.init = iwl_apm_init,
@@ -189,40 +191,47 @@ static struct iwl_lib_ops iwl1000_lib = {
},
.eeprom_ops = {
.regulatory_bands = {
- EEPROM_5000_REG_BAND_1_CHANNELS,
- EEPROM_5000_REG_BAND_2_CHANNELS,
- EEPROM_5000_REG_BAND_3_CHANNELS,
- EEPROM_5000_REG_BAND_4_CHANNELS,
- EEPROM_5000_REG_BAND_5_CHANNELS,
- EEPROM_5000_REG_BAND_24_HT40_CHANNELS,
- EEPROM_5000_REG_BAND_52_HT40_CHANNELS
+ EEPROM_REG_BAND_1_CHANNELS,
+ EEPROM_REG_BAND_2_CHANNELS,
+ EEPROM_REG_BAND_3_CHANNELS,
+ EEPROM_REG_BAND_4_CHANNELS,
+ EEPROM_REG_BAND_5_CHANNELS,
+ EEPROM_REG_BAND_24_HT40_CHANNELS,
+ EEPROM_REG_BAND_52_HT40_CHANNELS
},
.verify_signature = iwlcore_eeprom_verify_signature,
.acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
.release_semaphore = iwlcore_eeprom_release_semaphore,
- .calib_version = iwl5000_eeprom_calib_version,
- .query_addr = iwl5000_eeprom_query_addr,
+ .calib_version = iwlagn_eeprom_calib_version,
+ .query_addr = iwlagn_eeprom_query_addr,
},
.post_associate = iwl_post_associate,
.isr = iwl_isr_ict,
.config_ap = iwl_config_ap,
.temp_ops = {
- .temperature = iwl5000_temperature,
+ .temperature = iwlagn_temperature,
.set_ct_kill = iwl1000_set_ct_threshold,
},
- .add_bcast_station = iwl_add_bcast_station,
+ .manage_ibss_station = iwlagn_manage_ibss_station,
+ .debugfs_ops = {
+ .rx_stats_read = iwl_ucode_rx_stats_read,
+ .tx_stats_read = iwl_ucode_tx_stats_read,
+ .general_stats_read = iwl_ucode_general_stats_read,
+ },
+ .recover_from_tx_stall = iwl_bg_monitor_recover,
+ .check_plcp_health = iwl_good_plcp_health,
+ .check_ack_health = iwl_good_ack_health,
};
static const struct iwl_ops iwl1000_ops = {
- .ucode = &iwl5000_ucode,
.lib = &iwl1000_lib,
- .hcmd = &iwl5000_hcmd,
- .utils = &iwl5000_hcmd_utils,
+ .hcmd = &iwlagn_hcmd,
+ .utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
};
struct iwl_cfg iwl1000_bgn_cfg = {
- .name = "1000 Series BGN",
+ .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
.fw_name_pre = IWL1000_FW_PRE,
.ucode_api_max = IWL1000_UCODE_API_MAX,
.ucode_api_min = IWL1000_UCODE_API_MIN,
@@ -230,10 +239,10 @@ struct iwl_cfg iwl1000_bgn_cfg = {
.ops = &iwl1000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_1000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_A,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -248,10 +257,15 @@ struct iwl_cfg iwl1000_bgn_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 128,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl1000_bg_cfg = {
- .name = "1000 Series BG",
+ .name = "Intel(R) Centrino(R) Wireless-N 1000 BG",
.fw_name_pre = IWL1000_FW_PRE,
.ucode_api_max = IWL1000_UCODE_API_MAX,
.ucode_api_min = IWL1000_UCODE_API_MIN,
@@ -259,10 +273,10 @@ struct iwl_cfg iwl1000_bg_cfg = {
.ops = &iwl1000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_1000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_A,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -270,12 +284,16 @@ struct iwl_cfg iwl1000_bg_cfg = {
.use_bsm = false,
.max_ll_items = OTP_MAX_LL_ITEMS_1000,
.shadow_ram_support = false,
- .ht_greenfield_support = true,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 128,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c
new file mode 100644
index 00000000000..6a9c64a50e3
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c
@@ -0,0 +1,500 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include "iwl-3945-debugfs.h"
+
+ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 +
+ sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400;
+ ssize_t ret;
+ struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+ struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
+ struct iwl39_statistics_rx_non_phy *general, *accum_general;
+ struct iwl39_statistics_rx_non_phy *delta_general, *max_general;
+
+ if (!iwl_is_alive(priv))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * The statistic information display here is based on
+ * the last statistics notification from uCode
+ * might not reflect the current uCode activity
+ */
+ ofdm = &priv->_3945.statistics.rx.ofdm;
+ cck = &priv->_3945.statistics.rx.cck;
+ general = &priv->_3945.statistics.rx.general;
+ accum_ofdm = &priv->_3945.accum_statistics.rx.ofdm;
+ accum_cck = &priv->_3945.accum_statistics.rx.cck;
+ accum_general = &priv->_3945.accum_statistics.rx.general;
+ delta_ofdm = &priv->_3945.delta_statistics.rx.ofdm;
+ delta_cck = &priv->_3945.delta_statistics.rx.cck;
+ delta_general = &priv->_3945.delta_statistics.rx.general;
+ max_ofdm = &priv->_3945.max_delta.rx.ofdm;
+ max_cck = &priv->_3945.max_delta.rx.cck;
+ max_general = &priv->_3945.max_delta.rx.general;
+
+ pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - OFDM:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
+ accum_ofdm->ina_cnt,
+ delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_cnt:",
+ le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+ delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "plcp_err:",
+ le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+ delta_ofdm->plcp_err, max_ofdm->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "crc32_err:",
+ le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+ delta_ofdm->crc32_err, max_ofdm->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "overrun_err:",
+ le32_to_cpu(ofdm->overrun_err),
+ accum_ofdm->overrun_err, delta_ofdm->overrun_err,
+ max_ofdm->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "early_overrun_err:",
+ le32_to_cpu(ofdm->early_overrun_err),
+ accum_ofdm->early_overrun_err,
+ delta_ofdm->early_overrun_err,
+ max_ofdm->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "crc32_good:", le32_to_cpu(ofdm->crc32_good),
+ accum_ofdm->crc32_good, delta_ofdm->crc32_good,
+ max_ofdm->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
+ le32_to_cpu(ofdm->false_alarm_cnt),
+ accum_ofdm->false_alarm_cnt,
+ delta_ofdm->false_alarm_cnt,
+ max_ofdm->false_alarm_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_sync_err_cnt:",
+ le32_to_cpu(ofdm->fina_sync_err_cnt),
+ accum_ofdm->fina_sync_err_cnt,
+ delta_ofdm->fina_sync_err_cnt,
+ max_ofdm->fina_sync_err_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sfd_timeout:",
+ le32_to_cpu(ofdm->sfd_timeout),
+ accum_ofdm->sfd_timeout,
+ delta_ofdm->sfd_timeout,
+ max_ofdm->sfd_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_timeout:",
+ le32_to_cpu(ofdm->fina_timeout),
+ accum_ofdm->fina_timeout,
+ delta_ofdm->fina_timeout,
+ max_ofdm->fina_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "unresponded_rts:",
+ le32_to_cpu(ofdm->unresponded_rts),
+ accum_ofdm->unresponded_rts,
+ delta_ofdm->unresponded_rts,
+ max_ofdm->unresponded_rts);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(ofdm->rxe_frame_limit_overrun),
+ accum_ofdm->rxe_frame_limit_overrun,
+ delta_ofdm->rxe_frame_limit_overrun,
+ max_ofdm->rxe_frame_limit_overrun);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sent_ack_cnt:",
+ le32_to_cpu(ofdm->sent_ack_cnt),
+ accum_ofdm->sent_ack_cnt,
+ delta_ofdm->sent_ack_cnt,
+ max_ofdm->sent_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sent_cts_cnt:",
+ le32_to_cpu(ofdm->sent_cts_cnt),
+ accum_ofdm->sent_cts_cnt,
+ delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - CCK:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "ina_cnt:",
+ le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+ delta_cck->ina_cnt, max_cck->ina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_cnt:",
+ le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+ delta_cck->fina_cnt, max_cck->fina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "plcp_err:",
+ le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+ delta_cck->plcp_err, max_cck->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "crc32_err:",
+ le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+ delta_cck->crc32_err, max_cck->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "overrun_err:",
+ le32_to_cpu(cck->overrun_err),
+ accum_cck->overrun_err,
+ delta_cck->overrun_err, max_cck->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "early_overrun_err:",
+ le32_to_cpu(cck->early_overrun_err),
+ accum_cck->early_overrun_err,
+ delta_cck->early_overrun_err,
+ max_cck->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "crc32_good:",
+ le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+ delta_cck->crc32_good,
+ max_cck->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "false_alarm_cnt:",
+ le32_to_cpu(cck->false_alarm_cnt),
+ accum_cck->false_alarm_cnt,
+ delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_sync_err_cnt:",
+ le32_to_cpu(cck->fina_sync_err_cnt),
+ accum_cck->fina_sync_err_cnt,
+ delta_cck->fina_sync_err_cnt,
+ max_cck->fina_sync_err_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sfd_timeout:",
+ le32_to_cpu(cck->sfd_timeout),
+ accum_cck->sfd_timeout,
+ delta_cck->sfd_timeout, max_cck->sfd_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_timeout:",
+ le32_to_cpu(cck->fina_timeout),
+ accum_cck->fina_timeout,
+ delta_cck->fina_timeout, max_cck->fina_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "unresponded_rts:",
+ le32_to_cpu(cck->unresponded_rts),
+ accum_cck->unresponded_rts,
+ delta_cck->unresponded_rts,
+ max_cck->unresponded_rts);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(cck->rxe_frame_limit_overrun),
+ accum_cck->rxe_frame_limit_overrun,
+ delta_cck->rxe_frame_limit_overrun,
+ max_cck->rxe_frame_limit_overrun);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sent_ack_cnt:",
+ le32_to_cpu(cck->sent_ack_cnt),
+ accum_cck->sent_ack_cnt,
+ delta_cck->sent_ack_cnt,
+ max_cck->sent_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sent_cts_cnt:",
+ le32_to_cpu(cck->sent_cts_cnt),
+ accum_cck->sent_cts_cnt,
+ delta_cck->sent_cts_cnt,
+ max_cck->sent_cts_cnt);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - GENERAL:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "bogus_cts:",
+ le32_to_cpu(general->bogus_cts),
+ accum_general->bogus_cts,
+ delta_general->bogus_cts, max_general->bogus_cts);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "bogus_ack:",
+ le32_to_cpu(general->bogus_ack),
+ accum_general->bogus_ack,
+ delta_general->bogus_ack, max_general->bogus_ack);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "non_bssid_frames:",
+ le32_to_cpu(general->non_bssid_frames),
+ accum_general->non_bssid_frames,
+ delta_general->non_bssid_frames,
+ max_general->non_bssid_frames);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "filtered_frames:",
+ le32_to_cpu(general->filtered_frames),
+ accum_general->filtered_frames,
+ delta_general->filtered_frames,
+ max_general->filtered_frames);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "non_channel_beacons:",
+ le32_to_cpu(general->non_channel_beacons),
+ accum_general->non_channel_beacons,
+ delta_general->non_channel_beacons,
+ max_general->non_channel_beacons);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = (sizeof(struct iwl39_statistics_tx) * 48) + 250;
+ ssize_t ret;
+ struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
+
+ if (!iwl_is_alive(priv))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * The statistic information display here is based on
+ * the last statistics notification from uCode
+ * might not reflect the current uCode activity
+ */
+ tx = &priv->_3945.statistics.tx;
+ accum_tx = &priv->_3945.accum_statistics.tx;
+ delta_tx = &priv->_3945.delta_statistics.tx;
+ max_tx = &priv->_3945.max_delta.tx;
+ pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Tx:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "preamble:",
+ le32_to_cpu(tx->preamble_cnt),
+ accum_tx->preamble_cnt,
+ delta_tx->preamble_cnt, max_tx->preamble_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rx_detected_cnt:",
+ le32_to_cpu(tx->rx_detected_cnt),
+ accum_tx->rx_detected_cnt,
+ delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "bt_prio_defer_cnt:",
+ le32_to_cpu(tx->bt_prio_defer_cnt),
+ accum_tx->bt_prio_defer_cnt,
+ delta_tx->bt_prio_defer_cnt,
+ max_tx->bt_prio_defer_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "bt_prio_kill_cnt:",
+ le32_to_cpu(tx->bt_prio_kill_cnt),
+ accum_tx->bt_prio_kill_cnt,
+ delta_tx->bt_prio_kill_cnt,
+ max_tx->bt_prio_kill_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "few_bytes_cnt:",
+ le32_to_cpu(tx->few_bytes_cnt),
+ accum_tx->few_bytes_cnt,
+ delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "cts_timeout:",
+ le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+ delta_tx->cts_timeout, max_tx->cts_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "ack_timeout:",
+ le32_to_cpu(tx->ack_timeout),
+ accum_tx->ack_timeout,
+ delta_tx->ack_timeout, max_tx->ack_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "expected_ack_cnt:",
+ le32_to_cpu(tx->expected_ack_cnt),
+ accum_tx->expected_ack_cnt,
+ delta_tx->expected_ack_cnt,
+ max_tx->expected_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "actual_ack_cnt:",
+ le32_to_cpu(tx->actual_ack_cnt),
+ accum_tx->actual_ack_cnt,
+ delta_tx->actual_ack_cnt,
+ max_tx->actual_ack_cnt);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t iwl3945_ucode_general_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = sizeof(struct iwl39_statistics_general) * 10 + 300;
+ ssize_t ret;
+ struct iwl39_statistics_general *general, *accum_general;
+ struct iwl39_statistics_general *delta_general, *max_general;
+ struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+ struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div;
+
+ if (!iwl_is_alive(priv))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * The statistic information display here is based on
+ * the last statistics notification from uCode
+ * might not reflect the current uCode activity
+ */
+ general = &priv->_3945.statistics.general;
+ dbg = &priv->_3945.statistics.general.dbg;
+ div = &priv->_3945.statistics.general.div;
+ accum_general = &priv->_3945.accum_statistics.general;
+ delta_general = &priv->_3945.delta_statistics.general;
+ max_general = &priv->_3945.max_delta.general;
+ accum_dbg = &priv->_3945.accum_statistics.general.dbg;
+ delta_dbg = &priv->_3945.delta_statistics.general.dbg;
+ max_dbg = &priv->_3945.max_delta.general.dbg;
+ accum_div = &priv->_3945.accum_statistics.general.div;
+ delta_div = &priv->_3945.delta_statistics.general.div;
+ max_div = &priv->_3945.max_delta.general.div;
+ pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_General:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "burst_check:",
+ le32_to_cpu(dbg->burst_check),
+ accum_dbg->burst_check,
+ delta_dbg->burst_check, max_dbg->burst_check);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "burst_count:",
+ le32_to_cpu(dbg->burst_count),
+ accum_dbg->burst_count,
+ delta_dbg->burst_count, max_dbg->burst_count);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sleep_time:",
+ le32_to_cpu(general->sleep_time),
+ accum_general->sleep_time,
+ delta_general->sleep_time, max_general->sleep_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "slots_out:",
+ le32_to_cpu(general->slots_out),
+ accum_general->slots_out,
+ delta_general->slots_out, max_general->slots_out);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "slots_idle:",
+ le32_to_cpu(general->slots_idle),
+ accum_general->slots_idle,
+ delta_general->slots_idle, max_general->slots_idle);
+ pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
+ le32_to_cpu(general->ttl_timestamp));
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "tx_on_a:",
+ le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+ delta_div->tx_on_a, max_div->tx_on_a);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "tx_on_b:",
+ le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+ delta_div->tx_on_b, max_div->tx_on_b);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "exec_time:",
+ le32_to_cpu(div->exec_time), accum_div->exec_time,
+ delta_div->exec_time, max_div->exec_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "probe_time:",
+ le32_to_cpu(div->probe_time), accum_div->probe_time,
+ delta_div->probe_time, max_div->probe_time);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h
new file mode 100644
index 00000000000..70809c53c21
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h
@@ -0,0 +1,60 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-debug.h"
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t iwl3945_ucode_general_stats_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos);
+#else
+static ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ return 0;
+}
+static ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ return 0;
+}
+static ssize_t iwl3945_ucode_general_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index 3a876a8ece3..91bcb4e3cdf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -71,13 +71,11 @@
#include "iwl-eeprom.h"
-/* Time constants */
-#define SHORT_SLOT_TIME 9
-#define LONG_SLOT_TIME 20
-
/* RSSI to dBm */
#define IWL39_RSSI_OFFSET 95
+#define IWL_DEFAULT_TX_POWER 0x0F
+
/*
* EEPROM related constants, enums, and structures.
*/
@@ -228,7 +226,6 @@ struct iwl3945_eeprom {
/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
#define IWL39_NUM_QUEUES 5
-#define IWL_NUM_SCAN_RATES (2)
#define IWL_DEFAULT_TX_RETRY 15
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index 902c4d4293e..8e84a08ff95 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -330,16 +330,25 @@ static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta,
}
-static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta)
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void iwl3945_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
{
- struct iwl3945_rs_sta *rs_sta = priv_sta;
- struct iwl_priv *priv = (struct iwl_priv *)priv_r;
+ struct ieee80211_hw *hw = priv->hw;
+ struct ieee80211_conf *conf = &priv->hw->conf;
+ struct iwl3945_sta_priv *psta;
+ struct iwl3945_rs_sta *rs_sta;
+ struct ieee80211_supported_band *sband;
int i;
- IWL_DEBUG_RATE(priv, "enter\n");
+ IWL_DEBUG_INFO(priv, "enter\n");
+ if (sta_id == priv->hw_params.bcast_sta_id)
+ goto out;
- spin_lock_init(&rs_sta->lock);
+ psta = (struct iwl3945_sta_priv *) sta->drv_priv;
+ rs_sta = &psta->rs_sta;
+ sband = hw->wiphy->bands[conf->channel->band];
rs_sta->priv = priv;
@@ -352,9 +361,7 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
rs_sta->last_flush = jiffies;
rs_sta->flush_time = IWL_RATE_FLUSH;
rs_sta->last_tx_packets = 0;
- rs_sta->ibss_sta_added = 0;
- init_timer(&rs_sta->rate_scale_flush);
rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush;
@@ -373,16 +380,18 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
}
}
- priv->sta_supp_rates = sta->supp_rates[sband->band];
+ priv->_3945.sta_supp_rates = sta->supp_rates[sband->band];
/* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */
if (sband->band == IEEE80211_BAND_5GHZ) {
rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
- priv->sta_supp_rates = priv->sta_supp_rates <<
+ priv->_3945.sta_supp_rates = priv->_3945.sta_supp_rates <<
IWL_FIRST_OFDM_RATE;
}
+out:
+ priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
- IWL_DEBUG_RATE(priv, "leave\n");
+ IWL_DEBUG_INFO(priv, "leave\n");
}
static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
@@ -406,6 +415,9 @@ static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
rs_sta = &psta->rs_sta;
+ spin_lock_init(&rs_sta->lock);
+ init_timer(&rs_sta->rate_scale_flush);
+
IWL_DEBUG_RATE(priv, "leave\n");
return rs_sta;
@@ -414,13 +426,14 @@ static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
static void rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
void *priv_sta)
{
- struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
- struct iwl3945_rs_sta *rs_sta = &psta->rs_sta;
- struct iwl_priv *priv __maybe_unused = rs_sta->priv;
+ struct iwl3945_rs_sta *rs_sta = priv_sta;
- IWL_DEBUG_RATE(priv, "enter\n");
+ /*
+ * Be careful not to use any members of iwl3945_rs_sta (like trying
+ * to use iwl_priv to print out debugging) since it may not be fully
+ * initialized at this point.
+ */
del_timer_sync(&rs_sta->rate_scale_flush);
- IWL_DEBUG_RATE(priv, "leave\n");
}
@@ -459,6 +472,13 @@ static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband
return;
}
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (!rs_sta->priv) {
+ IWL_DEBUG_RATE(priv, "leave: STA priv data uninitialized!\n");
+ return;
+ }
+
+
rs_sta->tx_packets++;
scale_rate_index = first_index;
@@ -525,8 +545,6 @@ static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband
spin_unlock_irqrestore(&rs_sta->lock, flags);
IWL_DEBUG_RATE(priv, "leave\n");
-
- return;
}
static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
@@ -626,14 +644,19 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
u32 fail_count;
s8 scale_action = 0;
unsigned long flags;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
u16 rate_mask = sta ? sta->supp_rates[sband->band] : 0;
s8 max_rate_idx = -1;
- struct iwl_priv *priv = (struct iwl_priv *)priv_r;
+ struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
IWL_DEBUG_RATE(priv, "enter\n");
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (rs_sta && !rs_sta->priv) {
+ IWL_DEBUG_RATE(priv, "Rate scaling information not initialized yet.\n");
+ priv_sta = NULL;
+ }
+
if (rate_control_send_low(sta, priv_sta, txrc))
return;
@@ -651,20 +674,6 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
if (sband->band == IEEE80211_BAND_5GHZ)
rate_mask = rate_mask << IWL_FIRST_OFDM_RATE;
- if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
- !rs_sta->ibss_sta_added) {
- u8 sta_id = iwl_find_station(priv, hdr->addr1);
-
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n",
- hdr->addr1);
- sta_id = iwl_add_station(priv, hdr->addr1, false,
- CMD_ASYNC, NULL);
- }
- if (sta_id != IWL_INVALID_STATION)
- rs_sta->ibss_sta_added = 1;
- }
-
spin_lock_irqsave(&rs_sta->lock, flags);
/* for recent assoc, choose best rate regarding
@@ -884,12 +893,22 @@ static void iwl3945_remove_debugfs(void *priv, void *priv_sta)
}
#endif
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta)
+{
+}
+
static struct rate_control_ops rs_ops = {
.module = NULL,
.name = RS_NAME,
.tx_status = rs_tx_status,
.get_rate = rs_get_rate,
- .rate_init = rs_rate_init,
+ .rate_init = rs_rate_init_stub,
.alloc = rs_alloc,
.free = rs_free,
.alloc_sta = rs_alloc_sta,
@@ -900,7 +919,6 @@ static struct rate_control_ops rs_ops = {
#endif
};
-
void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
{
struct iwl_priv *priv = hw->priv;
@@ -917,6 +935,7 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
sta = ieee80211_find_sta(priv->vif,
priv->stations[sta_id].sta.sta.addr);
if (!sta) {
+ IWL_DEBUG_RATE(priv, "Unable to find station to initialize rate scaling.\n");
rcu_read_unlock();
return;
}
@@ -947,7 +966,7 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
spin_unlock_irqrestore(&rs_sta->lock, flags);
- rssi = priv->last_rx_rssi;
+ rssi = priv->_3945.last_rx_rssi;
if (rssi == 0)
rssi = IWL_MIN_RSSI_VAL;
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 0728054a22d..068f7f8435c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -50,6 +50,7 @@
#include "iwl-helpers.h"
#include "iwl-led.h"
#include "iwl-3945-led.h"
+#include "iwl-3945-debugfs.h"
#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
[IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
@@ -192,12 +193,12 @@ static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
}
#ifdef CONFIG_IWLWIFI_DEBUG
-#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
static const char *iwl3945_get_tx_fail_reason(u32 status)
{
switch (status & TX_STATUS_MSK) {
- case TX_STATUS_SUCCESS:
+ case TX_3945_STATUS_SUCCESS:
return "SUCCESS";
TX_STATUS_ENTRY(SHORT_LIMIT);
TX_STATUS_ENTRY(LONG_LIMIT);
@@ -243,7 +244,7 @@ int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
next_rate = IWL_RATE_6M_INDEX;
break;
case IEEE80211_BAND_2GHZ:
- if (!(priv->sta_supp_rates & IWL_OFDM_RATES_MASK) &&
+ if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
iwl_is_associated(priv)) {
if (rate == IWL_RATE_11M_INDEX)
next_rate = IWL_RATE_5M_INDEX;
@@ -293,7 +294,7 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
* iwl3945_rx_reply_tx - Handle Tx response
*/
static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+ struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -351,18 +352,143 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
* RX handler implementations
*
*****************************************************************************/
+#ifdef CONFIG_IWLWIFI_DEBUG
+/*
+ * based on the assumption of all statistics counter are in DWORD
+ * FIXME: This function is for debugging, do not deal with
+ * the case of counters roll-over.
+ */
+static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
+ __le32 *stats)
+{
+ int i;
+ __le32 *prev_stats;
+ u32 *accum_stats;
+ u32 *delta, *max_delta;
+
+ prev_stats = (__le32 *)&priv->_3945.statistics;
+ accum_stats = (u32 *)&priv->_3945.accum_statistics;
+ delta = (u32 *)&priv->_3945.delta_statistics;
+ max_delta = (u32 *)&priv->_3945.max_delta;
+
+ for (i = sizeof(__le32); i < sizeof(struct iwl3945_notif_statistics);
+ i += sizeof(__le32), stats++, prev_stats++, delta++,
+ max_delta++, accum_stats++) {
+ if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+ *delta = (le32_to_cpu(*stats) -
+ le32_to_cpu(*prev_stats));
+ *accum_stats += *delta;
+ if (*delta > *max_delta)
+ *max_delta = *delta;
+ }
+ }
+
+ /* reset accumulative statistics for "no-counter" type statistics */
+ priv->_3945.accum_statistics.general.temperature =
+ priv->_3945.statistics.general.temperature;
+ priv->_3945.accum_statistics.general.ttl_timestamp =
+ priv->_3945.statistics.general.ttl_timestamp;
+}
+#endif
+
+/**
+ * iwl3945_good_plcp_health - checks for plcp error.
+ *
+ * When the plcp error is exceeding the thresholds, reset the radio
+ * to improve the throughput.
+ */
+static bool iwl3945_good_plcp_health(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt)
+{
+ bool rc = true;
+ struct iwl3945_notif_statistics current_stat;
+ int combined_plcp_delta;
+ unsigned int plcp_msec;
+ unsigned long plcp_received_jiffies;
+
+ memcpy(&current_stat, pkt->u.raw, sizeof(struct
+ iwl3945_notif_statistics));
+ /*
+ * check for plcp_err and trigger radio reset if it exceeds
+ * the plcp error threshold plcp_delta.
+ */
+ plcp_received_jiffies = jiffies;
+ plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
+ (long) priv->plcp_jiffies);
+ priv->plcp_jiffies = plcp_received_jiffies;
+ /*
+ * check to make sure plcp_msec is not 0 to prevent division
+ * by zero.
+ */
+ if (plcp_msec) {
+ combined_plcp_delta =
+ (le32_to_cpu(current_stat.rx.ofdm.plcp_err) -
+ le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err));
+
+ if ((combined_plcp_delta > 0) &&
+ ((combined_plcp_delta * 100) / plcp_msec) >
+ priv->cfg->plcp_delta_threshold) {
+ /*
+ * if plcp_err exceed the threshold, the following
+ * data is printed in csv format:
+ * Text: plcp_err exceeded %d,
+ * Received ofdm.plcp_err,
+ * Current ofdm.plcp_err,
+ * combined_plcp_delta,
+ * plcp_msec
+ */
+ IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
+ "%u, %d, %u mSecs\n",
+ priv->cfg->plcp_delta_threshold,
+ le32_to_cpu(current_stat.rx.ofdm.plcp_err),
+ combined_plcp_delta, plcp_msec);
+ /*
+ * Reset the RF radio due to the high plcp
+ * error rate
+ */
+ rc = false;
+ }
+ }
+ return rc;
+}
void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
(int)sizeof(struct iwl3945_notif_statistics),
le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
+#ifdef CONFIG_IWLWIFI_DEBUG
+ iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
+#endif
+ iwl_recover_from_statistics(priv, pkt);
+
+ memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics));
+}
+
+void iwl3945_reply_statistics(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ __le32 *flag = (__le32 *)&pkt->u.raw;
- memcpy(&priv->statistics_39, pkt->u.raw, sizeof(priv->statistics_39));
+ if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) {
+#ifdef CONFIG_IWLWIFI_DEBUG
+ memset(&priv->_3945.accum_statistics, 0,
+ sizeof(struct iwl3945_notif_statistics));
+ memset(&priv->_3945.delta_statistics, 0,
+ sizeof(struct iwl3945_notif_statistics));
+ memset(&priv->_3945.max_delta, 0,
+ sizeof(struct iwl3945_notif_statistics));
+#endif
+ IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
+ }
+ iwl3945_hw_rx_statistics(priv, rxb);
}
+
/******************************************************************************
*
* Misc. internal state and helper functions
@@ -487,7 +613,7 @@ static void _iwl3945_dbg_report_frame(struct iwl_priv *priv,
* but you can hack it to show more, if you'd like to. */
if (dataframe)
IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, "
- "len=%u, rssi=%d, chnl=%d, rate=%d, \n",
+ "len=%u, rssi=%d, chnl=%d, rate=%d,\n",
title, le16_to_cpu(fc), header->addr1[5],
length, rssi, channel, rate);
else {
@@ -549,7 +675,6 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
u16 len = le16_to_cpu(rx_hdr->len);
struct sk_buff *skb;
- int ret;
__le16 fc = hdr->frame_control;
/* We received data from the HW, so stop the watchdog */
@@ -566,9 +691,9 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
return;
}
- skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC);
+ skb = dev_alloc_skb(128);
if (!skb) {
- IWL_ERR(priv, "alloc_skb failed\n");
+ IWL_ERR(priv, "dev_alloc_skb failed\n");
return;
}
@@ -577,37 +702,13 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
(struct ieee80211_hdr *)rxb_addr(rxb),
le32_to_cpu(rx_end->status), stats);
- skb_reserve(skb, IWL_LINK_HDR_MAX);
skb_add_rx_frag(skb, 0, rxb->page,
(void *)rx_hdr->payload - (void *)pkt, len);
- /* mac80211 currently doesn't support paged SKB. Convert it to
- * linear SKB for management frame and data frame requires
- * software decryption or software defragementation. */
- if (ieee80211_is_mgmt(fc) ||
- ieee80211_has_protected(fc) ||
- ieee80211_has_morefrags(fc) ||
- le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
- ret = skb_linearize(skb);
- else
- ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
- 0 : -ENOMEM;
-
- if (ret) {
- kfree_skb(skb);
- goto out;
- }
-
- /*
- * XXX: We cannot touch the page and its virtual memory (pkt) after
- * here. It might have already been freed by the above skb change.
- */
-
iwl_update_stats(priv, false, fc, len);
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
ieee80211_rx(priv->hw, skb);
- out:
priv->alloc_rxb_page--;
rxb->page = NULL;
}
@@ -623,9 +724,8 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
- int snr;
- u16 rx_stats_sig_avg = le16_to_cpu(rx_stats->sig_avg);
- u16 rx_stats_noise_diff = le16_to_cpu(rx_stats->noise_diff);
+ u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
+ u16 rx_stats_noise_diff __maybe_unused = le16_to_cpu(rx_stats->noise_diff);
u8 network_packet;
rx_status.flag = 0;
@@ -663,53 +763,29 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
/* Convert 3945's rssi indicator to dBm */
rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET;
- /* Set default noise value to -127 */
- if (priv->last_rx_noise == 0)
- priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
-
- /* 3945 provides noise info for OFDM frames only.
- * sig_avg and noise_diff are measured by the 3945's digital signal
- * processor (DSP), and indicate linear levels of signal level and
- * distortion/noise within the packet preamble after
- * automatic gain control (AGC). sig_avg should stay fairly
- * constant if the radio's AGC is working well.
- * Since these values are linear (not dB or dBm), linear
- * signal-to-noise ratio (SNR) is (sig_avg / noise_diff).
- * Convert linear SNR to dB SNR, then subtract that from rssi dBm
- * to obtain noise level in dBm.
- * Calculate rx_status.signal (quality indicator in %) based on SNR. */
- if (rx_stats_noise_diff) {
- snr = rx_stats_sig_avg / rx_stats_noise_diff;
- rx_status.noise = rx_status.signal -
- iwl3945_calc_db_from_ratio(snr);
- } else {
- rx_status.noise = priv->last_rx_noise;
- }
-
-
- IWL_DEBUG_STATS(priv, "Rssi %d noise %d sig_avg %d noise_diff %d\n",
- rx_status.signal, rx_status.noise,
- rx_stats_sig_avg, rx_stats_noise_diff);
+ IWL_DEBUG_STATS(priv, "Rssi %d sig_avg %d noise_diff %d\n",
+ rx_status.signal, rx_stats_sig_avg,
+ rx_stats_noise_diff);
header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
network_packet = iwl3945_is_network_packet(priv, header);
- IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Noise:%u, Rate:%u\n",
+ IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
network_packet ? '*' : ' ',
le16_to_cpu(rx_hdr->channel),
rx_status.signal, rx_status.signal,
- rx_status.noise, rx_status.rate_idx);
+ rx_status.rate_idx);
/* Set "1" to report good data frames in groups of 100 */
iwl3945_dbg_report_frame(priv, pkt, header, 1);
iwl_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len), header);
if (network_packet) {
- priv->last_beacon_time = le32_to_cpu(rx_end->beacon_timestamp);
- priv->last_tsf = le64_to_cpu(rx_end->timestamp);
- priv->last_rx_rssi = rx_status.signal;
- priv->last_rx_noise = rx_status.noise;
+ priv->_3945.last_beacon_time =
+ le32_to_cpu(rx_end->beacon_timestamp);
+ priv->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
+ priv->_3945.last_rx_rssi = rx_status.signal;
}
iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
@@ -871,7 +947,8 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]);
}
-u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate, u8 flags)
+static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id,
+ u16 tx_rate, u8 flags)
{
unsigned long flags_spin;
struct iwl_station_entry *station;
@@ -957,7 +1034,7 @@ static int iwl3945_tx_reset(struct iwl_priv *priv)
iwl_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
iwl_write_direct32(priv, FH39_TSSR_CBB_BASE,
- priv->shared_phys);
+ priv->_3945.shared_phys);
iwl_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
@@ -1049,7 +1126,7 @@ static void iwl3945_nic_config(struct iwl_priv *priv)
IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
- IWL_DEBUG_INFO(priv, "RTP type \n");
+ IWL_DEBUG_INFO(priv, "RTP type\n");
else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n");
iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
@@ -1607,7 +1684,7 @@ static int iwl3945_hw_reg_set_new_power(struct iwl_priv *priv,
int power;
/* Get this chnlgrp's rate-to-max/clip-powers table */
- clip_pwrs = priv->clip39_groups[ch_info->group_index].clip_powers;
+ clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
/* Get this channel's rate-to-current-power settings table */
power_info = ch_info->power_info;
@@ -1701,6 +1778,11 @@ static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
int ref_temp;
int temperature = priv->temperature;
+ if (priv->disable_tx_power_cal ||
+ test_bit(STATUS_SCANNING, &priv->status)) {
+ /* do not perform tx power calibration */
+ return 0;
+ }
/* set up new Tx power info for each and every channel, 2.4 and 5.x */
for (i = 0; i < priv->channel_count; i++) {
ch_info = &priv->channel_info[i];
@@ -1733,7 +1815,7 @@ static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
}
/* Get this chnlgrp's rate-to-max/clip-powers table */
- clip_pwrs = priv->clip39_groups[ch_info->group_index].clip_powers;
+ clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
/* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
for (scan_tbl_index = 0;
@@ -1911,6 +1993,8 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
"configuration (%d).\n", rc);
return rc;
}
+ iwl_clear_ucode_stations(priv);
+ iwl_restore_stations(priv);
}
IWL_DEBUG_INFO(priv, "Sending RXON\n"
@@ -1941,7 +2025,10 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
- iwl_clear_stations_table(priv);
+ if (!new_assoc) {
+ iwl_clear_ucode_stations(priv);
+ iwl_restore_stations(priv);
+ }
/* If we issue a new RXON command which required a tune then we must
* send a new TXPOWER command or we won't be able to Tx any frames */
@@ -1951,19 +2038,6 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
return rc;
}
- /* Add the broadcast address so we can send broadcast frames */
- priv->cfg->ops->lib->add_bcast_station(priv);
-
- /* If we have set the ASSOC_MSK and we are in BSS mode then
- * add the IWL_AP_ID to the station rate table */
- if (iwl_is_associated(priv) &&
- (priv->iw_mode == NL80211_IFTYPE_STATION))
- if (iwl_add_station(priv, priv->active_rxon.bssid_addr,
- true, CMD_SYNC, NULL) == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Error adding AP address for transmit\n");
- return -EIO;
- }
-
/* Init the hardware's rate fallback order based on the band */
rc = iwl3945_init_hw_rate_table(priv);
if (rc) {
@@ -1998,13 +2072,13 @@ void iwl3945_reg_txpower_periodic(struct iwl_priv *priv)
reschedule:
queue_delayed_work(priv->workqueue,
- &priv->thermal_periodic, REG_RECALIB_PERIOD * HZ);
+ &priv->_3945.thermal_periodic, REG_RECALIB_PERIOD * HZ);
}
static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work)
{
struct iwl_priv *priv = container_of(work, struct iwl_priv,
- thermal_periodic.work);
+ _3945.thermal_periodic.work);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
@@ -2140,7 +2214,7 @@ static void iwl3945_hw_reg_init_channel_groups(struct iwl_priv *priv)
* power peaks, without too much distortion (clipping).
*/
/* we'll fill in this array with h/w max power levels */
- clip_pwrs = (s8 *) priv->clip39_groups[i].clip_powers;
+ clip_pwrs = (s8 *) priv->_3945.clip_groups[i].clip_powers;
/* divide factory saturation power by 2 to find -3dB level */
satur_pwr = (s8) (group->saturation_power >> 1);
@@ -2224,7 +2298,7 @@ int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
iwl3945_hw_reg_get_ch_grp_index(priv, ch_info);
/* Get this chnlgrp's rate->max/clip-powers table */
- clip_pwrs = priv->clip39_groups[ch_info->group_index].clip_powers;
+ clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
/* calculate power index *adjustment* value according to
* diff between current temperature and factory temperature */
@@ -2332,7 +2406,7 @@ int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
{
int txq_id = txq->q.id;
- struct iwl3945_shared *shared_data = priv->shared_virt;
+ struct iwl3945_shared *shared_data = priv->_3945.shared_virt;
shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
@@ -2385,6 +2459,30 @@ static u16 iwl3945_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
return (u16)sizeof(struct iwl3945_addsta_cmd);
}
+static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
+ struct ieee80211_vif *vif, bool add)
+{
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ int ret;
+
+ if (add) {
+ ret = iwl_add_bssid_station(priv, vif->bss_conf.bssid, false,
+ &vif_priv->ibss_bssid_sta_id);
+ if (ret)
+ return ret;
+
+ iwl3945_sync_sta(priv, vif_priv->ibss_bssid_sta_id,
+ (priv->band == IEEE80211_BAND_5GHZ) ?
+ IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP,
+ CMD_ASYNC);
+ iwl3945_rate_scale_init(priv->hw, vif_priv->ibss_bssid_sta_id);
+
+ return 0;
+ }
+
+ return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
+ vif->bss_conf.bssid);
+}
/**
* iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table
@@ -2432,7 +2530,7 @@ int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
/* If an OFDM rate is used, have it fall back to the
* 1M CCK rates */
- if (!(priv->sta_supp_rates & IWL_OFDM_RATES_MASK) &&
+ if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
iwl_is_associated(priv)) {
index = IWL_FIRST_CCK_RATE;
@@ -2471,12 +2569,12 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
memset((void *)&priv->hw_params, 0,
sizeof(struct iwl_hw_params));
- priv->shared_virt = dma_alloc_coherent(&priv->pci_dev->dev,
- sizeof(struct iwl3945_shared),
- &priv->shared_phys, GFP_KERNEL);
- if (!priv->shared_virt) {
+ priv->_3945.shared_virt =
+ dma_alloc_coherent(&priv->pci_dev->dev,
+ sizeof(struct iwl3945_shared),
+ &priv->_3945.shared_phys, GFP_KERNEL);
+ if (!priv->_3945.shared_virt) {
IWL_ERR(priv, "failed to allocate pci memory\n");
- mutex_unlock(&priv->mutex);
return -ENOMEM;
}
@@ -2537,13 +2635,13 @@ void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv)
void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv)
{
- INIT_DELAYED_WORK(&priv->thermal_periodic,
+ INIT_DELAYED_WORK(&priv->_3945.thermal_periodic,
iwl3945_bg_reg_txpower_periodic);
}
void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv)
{
- cancel_delayed_work(&priv->thermal_periodic);
+ cancel_delayed_work(&priv->_3945.thermal_periodic);
}
/* check contents of special bootstrap uCode SRAM */
@@ -2714,48 +2812,10 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
return 0;
}
-#define IWL3945_UCODE_GET(item) \
-static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode,\
- u32 api_ver) \
-{ \
- return le32_to_cpu(ucode->u.v1.item); \
-}
-
-static u32 iwl3945_ucode_get_header_size(u32 api_ver)
-{
- return UCODE_HEADER_SIZE(1);
-}
-static u32 iwl3945_ucode_get_build(const struct iwl_ucode_header *ucode,
- u32 api_ver)
-{
- return 0;
-}
-static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode,
- u32 api_ver)
-{
- return (u8 *) ucode->u.v1.data;
-}
-
-IWL3945_UCODE_GET(inst_size);
-IWL3945_UCODE_GET(data_size);
-IWL3945_UCODE_GET(init_size);
-IWL3945_UCODE_GET(init_data_size);
-IWL3945_UCODE_GET(boot_size);
-
static struct iwl_hcmd_ops iwl3945_hcmd = {
.rxon_assoc = iwl3945_send_rxon_assoc,
.commit_rxon = iwl3945_commit_rxon,
-};
-
-static struct iwl_ucode_ops iwl3945_ucode = {
- .get_header_size = iwl3945_ucode_get_header_size,
- .get_build = iwl3945_ucode_get_build,
- .get_inst_size = iwl3945_ucode_get_inst_size,
- .get_data_size = iwl3945_ucode_get_data_size,
- .get_init_size = iwl3945_ucode_get_init_size,
- .get_init_data_size = iwl3945_ucode_get_init_data_size,
- .get_boot_size = iwl3945_ucode_get_boot_size,
- .get_data = iwl3945_ucode_get_data,
+ .send_bt_config = iwl_send_bt_config,
};
static struct iwl_lib_ops iwl3945_lib = {
@@ -2791,17 +2851,24 @@ static struct iwl_lib_ops iwl3945_lib = {
.post_associate = iwl3945_post_associate,
.isr = iwl_isr_legacy,
.config_ap = iwl3945_config_ap,
- .add_bcast_station = iwl3945_add_bcast_station,
+ .manage_ibss_station = iwl3945_manage_ibss_station,
+ .check_plcp_health = iwl3945_good_plcp_health,
+
+ .debugfs_ops = {
+ .rx_stats_read = iwl3945_ucode_rx_stats_read,
+ .tx_stats_read = iwl3945_ucode_tx_stats_read,
+ .general_stats_read = iwl3945_ucode_general_stats_read,
+ },
};
static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
.get_hcmd_size = iwl3945_get_hcmd_size,
.build_addsta_hcmd = iwl3945_build_addsta_hcmd,
.rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
+ .request_scan = iwl3945_request_scan,
};
static const struct iwl_ops iwl3945_ops = {
- .ucode = &iwl3945_ucode,
.lib = &iwl3945_lib,
.hcmd = &iwl3945_hcmd,
.utils = &iwl3945_hcmd_utils,
@@ -2826,7 +2893,10 @@ static struct iwl_cfg iwl3945_bg_cfg = {
.ht_greenfield_support = false,
.led_compensation = 64,
.broken_powersave = true,
- .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .tx_power_by_driver = true,
};
static struct iwl_cfg iwl3945_abg_cfg = {
@@ -2844,7 +2914,10 @@ static struct iwl_cfg iwl3945_abg_cfg = {
.ht_greenfield_support = false,
.led_compensation = 64,
.broken_powersave = true,
- .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .tx_power_by_driver = true,
};
DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index 452dfd5456c..bb2aeebf365 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -95,7 +95,6 @@ struct iwl3945_rs_sta {
u8 tgg;
u8 flush_pending;
u8 start_rate;
- u8 ibss_sta_added;
struct timer_list rate_scale_flush;
struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945];
#ifdef CONFIG_MAC80211_DEBUGFS
@@ -107,7 +106,12 @@ struct iwl3945_rs_sta {
};
+/*
+ * The common struct MUST be first because it is shared between
+ * 3945 and agn!
+ */
struct iwl3945_sta_priv {
+ struct iwl_station_priv_common common;
struct iwl3945_rs_sta rs_sta;
};
@@ -212,13 +216,6 @@ extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
char **buf, bool display);
extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
-/*
- * Currently used by iwl-3945-rs... look at restructuring so that it doesn't
- * call this... todo... fix that.
-*/
-extern u8 iwl3945_sync_station(struct iwl_priv *priv, int sta_id,
- u16 tx_rate, u8 flags);
-
/******************************************************************************
*
* Functions implemented in iwl-[34]*.c which are forward declared here
@@ -265,10 +262,14 @@ extern int iwl3945_hw_reg_send_txpower(struct iwl_priv *priv);
extern int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power);
extern void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
+void iwl3945_reply_statistics(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
extern void iwl3945_disable_events(struct iwl_priv *priv);
extern int iwl4965_get_temperature(const struct iwl_priv *priv);
-extern void iwl3945_post_associate(struct iwl_priv *priv);
-extern void iwl3945_config_ap(struct iwl_priv *priv);
+extern void iwl3945_post_associate(struct iwl_priv *priv,
+ struct ieee80211_vif *vif);
+extern void iwl3945_config_ap(struct iwl_priv *priv,
+ struct ieee80211_vif *vif);
/**
* iwl3945_hw_find_station - Find station id for a given BSSID
@@ -287,14 +288,15 @@ extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv);
extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv);
extern void iwl3945_reg_txpower_periodic(struct iwl_priv *priv);
extern int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv);
-extern u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id,
- u16 tx_rate, u8 flags);
extern const struct iwl_channel_info *iwl3945_get_channel_info(
const struct iwl_priv *priv, enum ieee80211_band band, u16 channel);
extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate);
+/* scanning */
+void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
+
/* Requires full declaration of iwl_priv before including */
#include "iwl-io.h"
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index 67ef562e8db..cd4b61ae25b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -81,26 +81,6 @@
*/
#define IWL49_FIRST_AMPDU_QUEUE 7
-/* Time constants */
-#define SHORT_SLOT_TIME 9
-#define LONG_SLOT_TIME 20
-
-/* RSSI to dBm */
-#define IWL49_RSSI_OFFSET 44
-
-
-/* PCI registers */
-#define PCI_CFG_RETRY_TIMEOUT 0x041
-
-/* PCI register values */
-#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
-#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
-
-#define IWL_NUM_SCAN_RATES (2)
-
-#define IWL_DEFAULT_TX_RETRY 15
-
-
/* Sizes and addresses for instruction and data memory (SRAM) in
* 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
#define IWL49_RTC_INST_LOWER_BOUND (0x000000)
@@ -393,10 +373,6 @@ static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr)
* location(s) in command (struct iwl4965_txpowertable_cmd).
*/
-/* Limit range of txpower output target to be between these values */
-#define IWL_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm = 1 milliwatt */
-#define IWL_TX_POWER_TARGET_POWER_MAX (16) /* 16 dBm */
-
/**
* When MIMO is used (2 transmitters operating simultaneously), driver should
* limit each transmitter to deliver a max of 3 dB below the regulatory limit
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 8972166386c..d3afddae8d9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -46,6 +46,8 @@
#include "iwl-calib.h"
#include "iwl-sta.h"
#include "iwl-agn-led.h"
+#include "iwl-agn.h"
+#include "iwl-agn-debugfs.h"
static int iwl4965_send_tx_power(struct iwl_priv *priv);
static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
@@ -60,14 +62,6 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode"
#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api)
-
-/* module parameters */
-static struct iwl_mod_params iwl4965_mod_params = {
- .amsdu_size_8K = 1,
- .restart_fw = 1,
- /* the rest are 0 by default */
-};
-
/* check contents of special bootstrap uCode SRAM */
static int iwl4965_verify_bsm(struct iwl_priv *priv)
{
@@ -417,7 +411,7 @@ static void iwl4965_gain_computation(struct iwl_priv *priv,
sizeof(cmd), &cmd);
if (ret)
IWL_DEBUG_CALIB(priv, "fail sending cmd "
- "REPLY_PHY_CALIBRATION_CMD \n");
+ "REPLY_PHY_CALIBRATION_CMD\n");
/* TODO we might want recalculate
* rx_chain in rxon cmd */
@@ -502,14 +496,14 @@ static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
}
-static const u16 default_queue_to_tx_fifo[] = {
- IWL_TX_FIFO_AC3,
- IWL_TX_FIFO_AC2,
- IWL_TX_FIFO_AC1,
- IWL_TX_FIFO_AC0,
+static const s8 default_queue_to_tx_fifo[] = {
+ IWL_TX_FIFO_VO,
+ IWL_TX_FIFO_VI,
+ IWL_TX_FIFO_BE,
+ IWL_TX_FIFO_BK,
IWL49_CMD_FIFO_NUM,
- IWL_TX_FIFO_HCCA_1,
- IWL_TX_FIFO_HCCA_2
+ IWL_TX_FIFO_UNUSED,
+ IWL_TX_FIFO_UNUSED,
};
static int iwl4965_alive_notify(struct iwl_priv *priv)
@@ -589,9 +583,15 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
/* reset to 0 to enable all the queue first */
priv->txq_ctx_active_msk = 0;
/* Map each Tx/cmd queue to its corresponding fifo */
+ BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
int ac = default_queue_to_tx_fifo[i];
+
iwl_txq_ctx_activate(priv, i);
+
+ if (ac == IWL_TX_FIFO_UNUSED)
+ continue;
+
iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
}
@@ -1613,19 +1613,19 @@ static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
/* get absolute value */
if (temp_diff < 0) {
- IWL_DEBUG_POWER(priv, "Getting cooler, delta %d, \n", temp_diff);
+ IWL_DEBUG_POWER(priv, "Getting cooler, delta %d\n", temp_diff);
temp_diff = -temp_diff;
} else if (temp_diff == 0)
- IWL_DEBUG_POWER(priv, "Same temp, \n");
+ IWL_DEBUG_POWER(priv, "Temperature unchanged\n");
else
- IWL_DEBUG_POWER(priv, "Getting warmer, delta %d, \n", temp_diff);
+ IWL_DEBUG_POWER(priv, "Getting warmer, delta %d\n", temp_diff);
if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
- IWL_DEBUG_POWER(priv, "Thermal txpower calib not needed\n");
+ IWL_DEBUG_POWER(priv, " => thermal txpower calib not needed\n");
return 0;
}
- IWL_DEBUG_POWER(priv, "Thermal txpower calib needed\n");
+ IWL_DEBUG_POWER(priv, " => thermal txpower calib needed\n");
return 1;
}
@@ -1874,7 +1874,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
info->status.rates[0].count = tx_resp->failure_frame + 1;
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
info->flags |= iwl_tx_status_to_mac80211(status);
- iwl_hwrate_to_tx_control(priv, rate_n_flags, info);
+ iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info);
/* FIXME: code repetition end */
IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
@@ -1953,6 +1953,60 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
return 0;
}
+static u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
+{
+ int i;
+ int start = 0;
+ int ret = IWL_INVALID_STATION;
+ unsigned long flags;
+
+ if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) ||
+ (priv->iw_mode == NL80211_IFTYPE_AP))
+ start = IWL_STA_ID;
+
+ if (is_broadcast_ether_addr(addr))
+ return priv->hw_params.bcast_sta_id;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ for (i = start; i < priv->hw_params.max_stations; i++)
+ if (priv->stations[i].used &&
+ (!compare_ether_addr(priv->stations[i].sta.sta.addr,
+ addr))) {
+ ret = i;
+ goto out;
+ }
+
+ IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n",
+ addr, priv->num_stations);
+
+ out:
+ /*
+ * It may be possible that more commands interacting with stations
+ * arrive before we completed processing the adding of
+ * station
+ */
+ if (ret != IWL_INVALID_STATION &&
+ (!(priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) ||
+ ((priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) &&
+ (priv->stations[ret].used & IWL_STA_UCODE_INPROGRESS)))) {
+ IWL_ERR(priv, "Requested station info for sta %d before ready.\n",
+ ret);
+ ret = IWL_INVALID_STATION;
+ }
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+ return ret;
+}
+
+static int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
+{
+ if (priv->iw_mode == NL80211_IFTYPE_STATION) {
+ return IWL_AP_ID;
+ } else {
+ u8 *da = ieee80211_get_DA(hdr);
+ return iwl_find_station(priv, da);
+ }
+}
+
/**
* iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
*/
@@ -2014,7 +2068,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
"%d index %d\n", scd_ssn , index);
- freed = iwl_tx_queue_reclaim(priv, txq_id, index);
+ freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
if (qc)
iwl_free_tfds_in_queue(priv, sta_id,
tid, freed);
@@ -2031,7 +2085,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
} else {
info->status.rates[0].count = tx_resp->failure_frame + 1;
info->flags |= iwl_tx_status_to_mac80211(status);
- iwl_hwrate_to_tx_control(priv,
+ iwlagn_hwrate_to_tx_control(priv,
le32_to_cpu(tx_resp->rate_n_flags),
info);
@@ -2042,7 +2096,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
le32_to_cpu(tx_resp->rate_n_flags),
tx_resp->failure_frame);
- freed = iwl_tx_queue_reclaim(priv, txq_id, index);
+ freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
if (qc && likely(sta_id != IWL_INVALID_STATION))
iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
else if (sta_id == IWL_INVALID_STATION)
@@ -2053,10 +2107,9 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
iwl_wake_queue(priv, txq_id);
}
if (qc && likely(sta_id != IWL_INVALID_STATION))
- iwl_txq_check_empty(priv, sta_id, tid, txq_id);
+ iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
- if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
- IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
+ iwl_check_abort_status(priv, tx_resp->frame_count, status);
}
static int iwl4965_calc_rssi(struct iwl_priv *priv,
@@ -2090,7 +2143,7 @@ static int iwl4965_calc_rssi(struct iwl_priv *priv,
/* dBm = max_rssi dB - agc dB - constant.
* Higher AGC (higher radio gain) means lower signal. */
- return max_rssi - agc - IWL49_RSSI_OFFSET;
+ return max_rssi - agc - IWLAGN_RSSI_OFFSET;
}
@@ -2098,7 +2151,7 @@ static int iwl4965_calc_rssi(struct iwl_priv *priv,
static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
{
/* Legacy Rx frames */
- priv->rx_handlers[REPLY_RX] = iwl_rx_reply_rx;
+ priv->rx_handlers[REPLY_RX] = iwlagn_rx_reply_rx;
/* Tx response */
priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
}
@@ -2113,50 +2166,13 @@ static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
cancel_work_sync(&priv->txpower_work);
}
-#define IWL4965_UCODE_GET(item) \
-static u32 iwl4965_ucode_get_##item(const struct iwl_ucode_header *ucode,\
- u32 api_ver) \
-{ \
- return le32_to_cpu(ucode->u.v1.item); \
-}
-
-static u32 iwl4965_ucode_get_header_size(u32 api_ver)
-{
- return UCODE_HEADER_SIZE(1);
-}
-static u32 iwl4965_ucode_get_build(const struct iwl_ucode_header *ucode,
- u32 api_ver)
-{
- return 0;
-}
-static u8 *iwl4965_ucode_get_data(const struct iwl_ucode_header *ucode,
- u32 api_ver)
-{
- return (u8 *) ucode->u.v1.data;
-}
-
-IWL4965_UCODE_GET(inst_size);
-IWL4965_UCODE_GET(data_size);
-IWL4965_UCODE_GET(init_size);
-IWL4965_UCODE_GET(init_data_size);
-IWL4965_UCODE_GET(boot_size);
-
static struct iwl_hcmd_ops iwl4965_hcmd = {
.rxon_assoc = iwl4965_send_rxon_assoc,
.commit_rxon = iwl_commit_rxon,
.set_rxon_chain = iwl_set_rxon_chain,
+ .send_bt_config = iwl_send_bt_config,
};
-static struct iwl_ucode_ops iwl4965_ucode = {
- .get_header_size = iwl4965_ucode_get_header_size,
- .get_build = iwl4965_ucode_get_build,
- .get_inst_size = iwl4965_ucode_get_inst_size,
- .get_data_size = iwl4965_ucode_get_data_size,
- .get_init_size = iwl4965_ucode_get_init_size,
- .get_init_data_size = iwl4965_ucode_get_init_data_size,
- .get_boot_size = iwl4965_ucode_get_boot_size,
- .get_data = iwl4965_ucode_get_data,
-};
static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
.get_hcmd_size = iwl4965_get_hcmd_size,
.build_addsta_hcmd = iwl4965_build_addsta_hcmd,
@@ -2164,6 +2180,7 @@ static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
.gain_computation = iwl4965_gain_computation,
.rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
.calc_rssi = iwl4965_calc_rssi,
+ .request_scan = iwlagn_request_scan,
};
static struct iwl_lib_ops iwl4965_lib = {
@@ -2184,6 +2201,7 @@ static struct iwl_lib_ops iwl4965_lib = {
.load_ucode = iwl4965_load_bsm,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
+ .dump_fh = iwl_dump_fh,
.set_channel_switch = iwl4965_hw_channel_switch,
.apm_ops = {
.init = iwl_apm_init,
@@ -2216,11 +2234,16 @@ static struct iwl_lib_ops iwl4965_lib = {
.temperature = iwl4965_temperature_calib,
.set_ct_kill = iwl4965_set_ct_threshold,
},
- .add_bcast_station = iwl_add_bcast_station,
+ .manage_ibss_station = iwlagn_manage_ibss_station,
+ .debugfs_ops = {
+ .rx_stats_read = iwl_ucode_rx_stats_read,
+ .tx_stats_read = iwl_ucode_tx_stats_read,
+ .general_stats_read = iwl_ucode_general_stats_read,
+ },
+ .check_plcp_health = iwl_good_plcp_health,
};
static const struct iwl_ops iwl4965_ops = {
- .ucode = &iwl4965_ucode,
.lib = &iwl4965_lib,
.hcmd = &iwl4965_hcmd,
.utils = &iwl4965_hcmd_utils,
@@ -2228,7 +2251,7 @@ static const struct iwl_ops iwl4965_ops = {
};
struct iwl_cfg iwl4965_agn_cfg = {
- .name = "4965AGN",
+ .name = "Intel(R) Wireless WiFi Link 4965AGN",
.fw_name_pre = IWL4965_FW_PRE,
.ucode_api_max = IWL4965_UCODE_API_MAX,
.ucode_api_min = IWL4965_UCODE_API_MIN,
@@ -2239,7 +2262,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
.ops = &iwl4965_ops,
.num_of_queues = IWL49_NUM_QUEUES,
.num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
- .mod_params = &iwl4965_mod_params,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_AB,
.valid_rx_ant = ANT_ABC,
.pll_cfg_val = 0,
@@ -2251,27 +2274,20 @@ struct iwl_cfg iwl4965_agn_cfg = {
.led_compensation = 61,
.chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .temperature_kelvin = true,
+ .max_event_log_size = 512,
+ .tx_power_by_driver = true,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
+ /*
+ * Force use of chains B and C for scan RX on 5 GHz band
+ * because the device has off-channel reception on chain A.
+ */
+ .scan_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
};
/* Module firmware */
MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
-module_param_named(antenna, iwl4965_mod_params.antenna, int, S_IRUGO);
-MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
-module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
-MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
-module_param_named(
- disable_hw_scan, iwl4965_mod_params.disable_hw_scan, int, S_IRUGO);
-MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
-
-module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
-MODULE_PARM_DESC(queues_num, "number of hw queues.");
-/* 11n */
-module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
-MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
-module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
- int, S_IRUGO);
-MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
-
-module_param_named(fw_restart4965, iwl4965_mod_params.restart_fw, int, S_IRUGO);
-MODULE_PARM_DESC(fw_restart4965, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
index 714e032f621..146e6431ae9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -68,25 +68,6 @@
#ifndef __iwl_5000_hw_h__
#define __iwl_5000_hw_h__
-#define IWL50_RTC_INST_LOWER_BOUND (0x000000)
-#define IWL50_RTC_INST_UPPER_BOUND (0x020000)
-
-#define IWL50_RTC_DATA_LOWER_BOUND (0x800000)
-#define IWL50_RTC_DATA_UPPER_BOUND (0x80C000)
-
-#define IWL50_RTC_INST_SIZE (IWL50_RTC_INST_UPPER_BOUND - \
- IWL50_RTC_INST_LOWER_BOUND)
-#define IWL50_RTC_DATA_SIZE (IWL50_RTC_DATA_UPPER_BOUND - \
- IWL50_RTC_DATA_LOWER_BOUND)
-
-/* EEPROM */
-#define IWL_5000_EEPROM_IMG_SIZE 2048
-
-#define IWL50_CMD_FIFO_NUM 7
-#define IWL50_NUM_QUEUES 20
-#define IWL50_NUM_AMPDU_QUEUES 10
-#define IWL50_FIRST_AMPDU_QUEUE 10
-
/* 5150 only */
#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5)
@@ -103,19 +84,5 @@ static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
}
-/* Fixed (non-configurable) rx data from phy */
-
-/**
- * struct iwl5000_schedq_bc_tbl scheduler byte count table
- * base physical address of iwl5000_shared
- * is provided to SCD_DRAM_BASE_ADDR
- * @tfd_offset 0-12 - tx command byte count
- * 12-16 - station index
- */
-struct iwl5000_scd_bc_tbl {
- __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
-} __attribute__ ((packed));
-
-
#endif /* __iwl_5000_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index e476acb53aa..a28af7eb67e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -19,6 +19,7 @@
* file called LICENSE.
*
* Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
@@ -43,9 +44,11 @@
#include "iwl-io.h"
#include "iwl-sta.h"
#include "iwl-helpers.h"
+#include "iwl-agn.h"
#include "iwl-agn-led.h"
+#include "iwl-agn-hw.h"
#include "iwl-5000-hw.h"
-#include "iwl-6000-hw.h"
+#include "iwl-agn-debugfs.h"
/* Highest firmware API version supported */
#define IWL5000_UCODE_API_MAX 2
@@ -63,18 +66,8 @@
#define _IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE #api ".ucode"
#define IWL5150_MODULE_FIRMWARE(api) _IWL5150_MODULE_FIRMWARE(api)
-static const u16 iwl5000_default_queue_to_tx_fifo[] = {
- IWL_TX_FIFO_AC3,
- IWL_TX_FIFO_AC2,
- IWL_TX_FIFO_AC1,
- IWL_TX_FIFO_AC0,
- IWL50_CMD_FIFO_NUM,
- IWL_TX_FIFO_HCCA_1,
- IWL_TX_FIFO_HCCA_2
-};
-
/* NIC configuration for 5000 series */
-void iwl5000_nic_config(struct iwl_priv *priv)
+static void iwl5000_nic_config(struct iwl_priv *priv)
{
unsigned long flags;
u16 radio_cfg;
@@ -107,162 +100,6 @@ void iwl5000_nic_config(struct iwl_priv *priv)
spin_unlock_irqrestore(&priv->lock, flags);
}
-
-/*
- * EEPROM
- */
-static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
-{
- u16 offset = 0;
-
- if ((address & INDIRECT_ADDRESS) == 0)
- return address;
-
- switch (address & INDIRECT_TYPE_MSK) {
- case INDIRECT_HOST:
- offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_HOST);
- break;
- case INDIRECT_GENERAL:
- offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_GENERAL);
- break;
- case INDIRECT_REGULATORY:
- offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_REGULATORY);
- break;
- case INDIRECT_CALIBRATION:
- offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_CALIBRATION);
- break;
- case INDIRECT_PROCESS_ADJST:
- offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_PROCESS_ADJST);
- break;
- case INDIRECT_OTHERS:
- offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_OTHERS);
- break;
- default:
- IWL_ERR(priv, "illegal indirect type: 0x%X\n",
- address & INDIRECT_TYPE_MSK);
- break;
- }
-
- /* translate the offset from words to byte */
- return (address & ADDRESS_MSK) + (offset << 1);
-}
-
-u16 iwl5000_eeprom_calib_version(struct iwl_priv *priv)
-{
- struct iwl_eeprom_calib_hdr {
- u8 version;
- u8 pa_type;
- u16 voltage;
- } *hdr;
-
- hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
- EEPROM_5000_CALIB_ALL);
- return hdr->version;
-
-}
-
-static void iwl5000_gain_computation(struct iwl_priv *priv,
- u32 average_noise[NUM_RX_CHAINS],
- u16 min_average_noise_antenna_i,
- u32 min_average_noise,
- u8 default_chain)
-{
- int i;
- s32 delta_g;
- struct iwl_chain_noise_data *data = &priv->chain_noise_data;
-
- /*
- * Find Gain Code for the chains based on "default chain"
- */
- for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) {
- if ((data->disconn_array[i])) {
- data->delta_gain_code[i] = 0;
- continue;
- }
-
- delta_g = (priv->cfg->chain_noise_scale *
- ((s32)average_noise[default_chain] -
- (s32)average_noise[i])) / 1500;
-
- /* bound gain by 2 bits value max, 3rd bit is sign */
- data->delta_gain_code[i] =
- min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
-
- if (delta_g < 0)
- /*
- * set negative sign ...
- * note to Intel developers: This is uCode API format,
- * not the format of any internal device registers.
- * Do not change this format for e.g. 6050 or similar
- * devices. Change format only if more resolution
- * (i.e. more than 2 bits magnitude) is needed.
- */
- data->delta_gain_code[i] |= (1 << 2);
- }
-
- IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n",
- data->delta_gain_code[1], data->delta_gain_code[2]);
-
- if (!data->radio_write) {
- struct iwl_calib_chain_noise_gain_cmd cmd;
-
- memset(&cmd, 0, sizeof(cmd));
-
- cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD;
- cmd.hdr.first_group = 0;
- cmd.hdr.groups_num = 1;
- cmd.hdr.data_valid = 1;
- cmd.delta_gain_1 = data->delta_gain_code[1];
- cmd.delta_gain_2 = data->delta_gain_code[2];
- iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD,
- sizeof(cmd), &cmd, NULL);
-
- data->radio_write = 1;
- data->state = IWL_CHAIN_NOISE_CALIBRATED;
- }
-
- data->chain_noise_a = 0;
- data->chain_noise_b = 0;
- data->chain_noise_c = 0;
- data->chain_signal_a = 0;
- data->chain_signal_b = 0;
- data->chain_signal_c = 0;
- data->beacon_count = 0;
-}
-
-static void iwl5000_chain_noise_reset(struct iwl_priv *priv)
-{
- struct iwl_chain_noise_data *data = &priv->chain_noise_data;
- int ret;
-
- if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
- struct iwl_calib_chain_noise_reset_cmd cmd;
- memset(&cmd, 0, sizeof(cmd));
-
- cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD;
- cmd.hdr.first_group = 0;
- cmd.hdr.groups_num = 1;
- cmd.hdr.data_valid = 1;
- ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
- sizeof(cmd), &cmd);
- if (ret)
- IWL_ERR(priv,
- "Could not send REPLY_PHY_CALIBRATION_CMD\n");
- data->state = IWL_CHAIN_NOISE_ACCUMULATE;
- IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
- }
-}
-
-void iwl5000_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
- __le32 *tx_flags)
-{
- if ((info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
- (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
- *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK;
- else
- *tx_flags &= ~TX_CMD_FLG_RTS_CTS_MSK;
-}
-
static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
.min_nrg_cck = 95,
.max_nrg_cck = 0, /* not used, set to 0 */
@@ -314,14 +151,6 @@ static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
.nrg_th_cca = 62,
};
-const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv,
- size_t offset)
-{
- u32 address = eeprom_indirect_address(priv, offset);
- BUG_ON(address >= priv->cfg->eeprom_size);
- return &priv->eeprom[address];
-}
-
static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
{
const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF;
@@ -337,356 +166,10 @@ static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
}
-/*
- * Calibration
- */
-static int iwl5000_set_Xtal_calib(struct iwl_priv *priv)
-{
- struct iwl_calib_xtal_freq_cmd cmd;
- __le16 *xtal_calib =
- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
-
- cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
- cmd.hdr.first_group = 0;
- cmd.hdr.groups_num = 1;
- cmd.hdr.data_valid = 1;
- cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
- cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
- return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
- (u8 *)&cmd, sizeof(cmd));
-}
-
-static int iwl5000_send_calib_cfg(struct iwl_priv *priv)
-{
- struct iwl_calib_cfg_cmd calib_cfg_cmd;
- struct iwl_host_cmd cmd = {
- .id = CALIBRATION_CFG_CMD,
- .len = sizeof(struct iwl_calib_cfg_cmd),
- .data = &calib_cfg_cmd,
- };
-
- memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
- calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
- calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
- calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
- calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL;
-
- return iwl_send_cmd(priv, &cmd);
-}
-
-static void iwl5000_rx_calib_result(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
- int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- int index;
-
- /* reduce the size of the length field itself */
- len -= 4;
-
- /* Define the order in which the results will be sent to the runtime
- * uCode. iwl_send_calib_results sends them in a row according to their
- * index. We sort them here */
- switch (hdr->op_code) {
- case IWL_PHY_CALIBRATE_DC_CMD:
- index = IWL_CALIB_DC;
- break;
- case IWL_PHY_CALIBRATE_LO_CMD:
- index = IWL_CALIB_LO;
- break;
- case IWL_PHY_CALIBRATE_TX_IQ_CMD:
- index = IWL_CALIB_TX_IQ;
- break;
- case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
- index = IWL_CALIB_TX_IQ_PERD;
- break;
- case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
- index = IWL_CALIB_BASE_BAND;
- break;
- default:
- IWL_ERR(priv, "Unknown calibration notification %d\n",
- hdr->op_code);
- return;
- }
- iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
-}
-
-static void iwl5000_rx_calib_complete(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- IWL_DEBUG_INFO(priv, "Init. calibration is completed, restarting fw.\n");
- queue_work(priv->workqueue, &priv->restart);
-}
-
-/*
- * ucode
- */
-static int iwl5000_load_section(struct iwl_priv *priv, const char *name,
- struct fw_desc *image, u32 dst_addr)
-{
- dma_addr_t phy_addr = image->p_addr;
- u32 byte_cnt = image->len;
- int ret;
-
- priv->ucode_write_complete = 0;
-
- iwl_write_direct32(priv,
- FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
-
- iwl_write_direct32(priv,
- FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
-
- iwl_write_direct32(priv,
- FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
- phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
-
- iwl_write_direct32(priv,
- FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
- (iwl_get_dma_hi_addr(phy_addr)
- << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
-
- iwl_write_direct32(priv,
- FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
- 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
- 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
- FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
-
- iwl_write_direct32(priv,
- FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
-
- IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name);
- ret = wait_event_interruptible_timeout(priv->wait_command_queue,
- priv->ucode_write_complete, 5 * HZ);
- if (ret == -ERESTARTSYS) {
- IWL_ERR(priv, "Could not load the %s uCode section due "
- "to interrupt\n", name);
- return ret;
- }
- if (!ret) {
- IWL_ERR(priv, "Could not load the %s uCode section\n",
- name);
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static int iwl5000_load_given_ucode(struct iwl_priv *priv,
- struct fw_desc *inst_image,
- struct fw_desc *data_image)
-{
- int ret = 0;
-
- ret = iwl5000_load_section(priv, "INST", inst_image,
- IWL50_RTC_INST_LOWER_BOUND);
- if (ret)
- return ret;
-
- return iwl5000_load_section(priv, "DATA", data_image,
- IWL50_RTC_DATA_LOWER_BOUND);
-}
-
-int iwl5000_load_ucode(struct iwl_priv *priv)
-{
- int ret = 0;
-
- /* check whether init ucode should be loaded, or rather runtime ucode */
- if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) {
- IWL_DEBUG_INFO(priv, "Init ucode found. Loading init ucode...\n");
- ret = iwl5000_load_given_ucode(priv,
- &priv->ucode_init, &priv->ucode_init_data);
- if (!ret) {
- IWL_DEBUG_INFO(priv, "Init ucode load complete.\n");
- priv->ucode_type = UCODE_INIT;
- }
- } else {
- IWL_DEBUG_INFO(priv, "Init ucode not found, or already loaded. "
- "Loading runtime ucode...\n");
- ret = iwl5000_load_given_ucode(priv,
- &priv->ucode_code, &priv->ucode_data);
- if (!ret) {
- IWL_DEBUG_INFO(priv, "Runtime ucode load complete.\n");
- priv->ucode_type = UCODE_RT;
- }
- }
-
- return ret;
-}
-
-void iwl5000_init_alive_start(struct iwl_priv *priv)
-{
- int ret = 0;
-
- /* Check alive response for "valid" sign from uCode */
- if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
- /* We had an error bringing up the hardware, so take it
- * all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
- goto restart;
- }
-
- /* initialize uCode was loaded... verify inst image.
- * This is a paranoid check, because we would not have gotten the
- * "initialize" alive if code weren't properly loaded. */
- if (iwl_verify_ucode(priv)) {
- /* Runtime instruction load was bad;
- * take it all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
- goto restart;
- }
-
- iwl_clear_stations_table(priv);
- ret = priv->cfg->ops->lib->alive_notify(priv);
- if (ret) {
- IWL_WARN(priv,
- "Could not complete ALIVE transition: %d\n", ret);
- goto restart;
- }
-
- iwl5000_send_calib_cfg(priv);
- return;
-
-restart:
- /* real restart (first load init_ucode) */
- queue_work(priv->workqueue, &priv->restart);
-}
-
-static void iwl5000_set_wr_ptrs(struct iwl_priv *priv,
- int txq_id, u32 index)
-{
- iwl_write_direct32(priv, HBUS_TARG_WRPTR,
- (index & 0xff) | (txq_id << 8));
- iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(txq_id), index);
-}
-
-static void iwl5000_tx_queue_set_status(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- int tx_fifo_id, int scd_retry)
-{
- int txq_id = txq->q.id;
- int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
-
- iwl_write_prph(priv, IWL50_SCD_QUEUE_STATUS_BITS(txq_id),
- (active << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
- (tx_fifo_id << IWL50_SCD_QUEUE_STTS_REG_POS_TXF) |
- (1 << IWL50_SCD_QUEUE_STTS_REG_POS_WSL) |
- IWL50_SCD_QUEUE_STTS_REG_MSK);
-
- txq->sched_retry = scd_retry;
-
- IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
- active ? "Activate" : "Deactivate",
- scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
-}
-
-int iwl5000_alive_notify(struct iwl_priv *priv)
-{
- u32 a;
- unsigned long flags;
- int i, chan;
- u32 reg_val;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- priv->scd_base_addr = iwl_read_prph(priv, IWL50_SCD_SRAM_BASE_ADDR);
- a = priv->scd_base_addr + IWL50_SCD_CONTEXT_DATA_OFFSET;
- for (; a < priv->scd_base_addr + IWL50_SCD_TX_STTS_BITMAP_OFFSET;
- a += 4)
- iwl_write_targ_mem(priv, a, 0);
- for (; a < priv->scd_base_addr + IWL50_SCD_TRANSLATE_TBL_OFFSET;
- a += 4)
- iwl_write_targ_mem(priv, a, 0);
- for (; a < priv->scd_base_addr +
- IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
- iwl_write_targ_mem(priv, a, 0);
-
- iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR,
- priv->scd_bc_tbls.dma >> 10);
-
- /* Enable DMA channel */
- for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++)
- iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
-
- /* Update FH chicken bits */
- reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
- iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
- reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
-
- iwl_write_prph(priv, IWL50_SCD_QUEUECHAIN_SEL,
- IWL50_SCD_QUEUECHAIN_SEL_ALL(priv->hw_params.max_txq_num));
- iwl_write_prph(priv, IWL50_SCD_AGGR_SEL, 0);
-
- /* initiate the queues */
- for (i = 0; i < priv->hw_params.max_txq_num; i++) {
- iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(i), 0);
- iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
- iwl_write_targ_mem(priv, priv->scd_base_addr +
- IWL50_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
- iwl_write_targ_mem(priv, priv->scd_base_addr +
- IWL50_SCD_CONTEXT_QUEUE_OFFSET(i) +
- sizeof(u32),
- ((SCD_WIN_SIZE <<
- IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
- IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
- ((SCD_FRAME_LIMIT <<
- IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
- IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
- }
-
- iwl_write_prph(priv, IWL50_SCD_INTERRUPT_MASK,
- IWL_MASK(0, priv->hw_params.max_txq_num));
-
- /* Activate all Tx DMA/FIFO channels */
- priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
-
- iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
-
- /* make sure all queue are not stopped */
- memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
- for (i = 0; i < 4; i++)
- atomic_set(&priv->queue_stop_count[i], 0);
-
- /* reset to 0 to enable all the queue first */
- priv->txq_ctx_active_msk = 0;
- /* map qos queues to fifos one-to-one */
- for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) {
- int ac = iwl5000_default_queue_to_tx_fifo[i];
- iwl_txq_ctx_activate(priv, i);
- iwl5000_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
- }
-
- /*
- * TODO - need to initialize these queues and map them to FIFOs
- * in the loop above, not only mark them as active. We do this
- * because we want the first aggregation queue to be queue #10,
- * but do not use 8 or 9 otherwise yet.
- */
- iwl_txq_ctx_activate(priv, 7);
- iwl_txq_ctx_activate(priv, 8);
- iwl_txq_ctx_activate(priv, 9);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
-
- iwl_send_wimax_coex(priv);
-
- iwl5000_set_Xtal_calib(priv);
- iwl_send_calib_results(priv);
-
- return 0;
-}
-
-int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
+static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
{
if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
- priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES)
+ priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
priv->cfg->num_of_queues =
priv->cfg->mod_params->num_of_queues;
@@ -694,13 +177,13 @@ int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
priv->hw_params.scd_bc_tbls_size =
priv->cfg->num_of_queues *
- sizeof(struct iwl5000_scd_bc_tbl);
+ sizeof(struct iwlagn_scd_bc_tbl);
priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
priv->hw_params.max_stations = IWL5000_STATION_COUNT;
priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
- priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE;
- priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE;
+ priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
+ priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
priv->hw_params.max_bsm_size = 0;
priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
@@ -717,571 +200,61 @@ int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
/* Set initial sensitivity parameters */
/* Set initial calibration set */
- switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
- case CSR_HW_REV_TYPE_5150:
- priv->hw_params.sens = &iwl5150_sensitivity;
- priv->hw_params.calib_init_cfg =
- BIT(IWL_CALIB_DC) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_BASE_BAND);
-
- break;
- default:
- priv->hw_params.sens = &iwl5000_sensitivity;
- priv->hw_params.calib_init_cfg =
- BIT(IWL_CALIB_XTAL) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_TX_IQ_PERD) |
- BIT(IWL_CALIB_BASE_BAND);
- break;
- }
-
- return 0;
-}
-
-/**
- * iwl5000_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
- */
-void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- u16 byte_cnt)
-{
- struct iwl5000_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
- int write_ptr = txq->q.write_ptr;
- int txq_id = txq->q.id;
- u8 sec_ctl = 0;
- u8 sta_id = 0;
- u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
- __le16 bc_ent;
-
- WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
-
- if (txq_id != IWL_CMD_QUEUE_NUM) {
- sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
- sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
-
- switch (sec_ctl & TX_CMD_SEC_MSK) {
- case TX_CMD_SEC_CCM:
- len += CCMP_MIC_LEN;
- break;
- case TX_CMD_SEC_TKIP:
- len += TKIP_ICV_LEN;
- break;
- case TX_CMD_SEC_WEP:
- len += WEP_IV_LEN + WEP_ICV_LEN;
- break;
- }
- }
-
- bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
-
- scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
-
- if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].
- tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
-}
-
-void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq)
-{
- struct iwl5000_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
- int txq_id = txq->q.id;
- int read_ptr = txq->q.read_ptr;
- u8 sta_id = 0;
- __le16 bc_ent;
-
- WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
-
- if (txq_id != IWL_CMD_QUEUE_NUM)
- sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
-
- bc_ent = cpu_to_le16(1 | (sta_id << 12));
- scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
-
- if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].
- tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
-}
-
-static int iwl5000_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
- u16 txq_id)
-{
- u32 tbl_dw_addr;
- u32 tbl_dw;
- u16 scd_q2ratid;
-
- scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
-
- tbl_dw_addr = priv->scd_base_addr +
- IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
-
- tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
-
- if (txq_id & 0x1)
- tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
- else
- tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
-
- iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
-
- return 0;
-}
-static void iwl5000_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id)
-{
- /* Simply stop the queue, but don't change any configuration;
- * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
- iwl_write_prph(priv,
- IWL50_SCD_QUEUE_STATUS_BITS(txq_id),
- (0 << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
- (1 << IWL50_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
-}
-
-int iwl5000_txq_agg_enable(struct iwl_priv *priv, int txq_id,
- int tx_fifo, int sta_id, int tid, u16 ssn_idx)
-{
- unsigned long flags;
- u16 ra_tid;
-
- if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
- (IWL50_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
- <= txq_id)) {
- IWL_WARN(priv,
- "queue number out of range: %d, must be %d to %d\n",
- txq_id, IWL50_FIRST_AMPDU_QUEUE,
- IWL50_FIRST_AMPDU_QUEUE +
- priv->cfg->num_of_ampdu_queues - 1);
- return -EINVAL;
- }
-
- ra_tid = BUILD_RAxTID(sta_id, tid);
-
- /* Modify device's station table to Tx this TID */
- iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Stop this Tx queue before configuring it */
- iwl5000_tx_queue_stop_scheduler(priv, txq_id);
-
- /* Map receiver-address / traffic-ID to this queue */
- iwl5000_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
-
- /* Set this queue as a chain-building queue */
- iwl_set_bits_prph(priv, IWL50_SCD_QUEUECHAIN_SEL, (1<<txq_id));
-
- /* enable aggregations for the queue */
- iwl_set_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1<<txq_id));
-
- /* Place first TFD at index corresponding to start sequence number.
- * Assumes that ssn_idx is valid (!= 0xFFF) */
- priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
- priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
- iwl5000_set_wr_ptrs(priv, txq_id, ssn_idx);
-
- /* Set up Tx window size and frame limit for this queue */
- iwl_write_targ_mem(priv, priv->scd_base_addr +
- IWL50_SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
- sizeof(u32),
- ((SCD_WIN_SIZE <<
- IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
- IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
- ((SCD_FRAME_LIMIT <<
- IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
- IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
-
- iwl_set_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id));
-
- /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
- iwl5000_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
-
- spin_unlock_irqrestore(&priv->lock, flags);
+ priv->hw_params.sens = &iwl5000_sensitivity;
+ priv->hw_params.calib_init_cfg =
+ BIT(IWL_CALIB_XTAL) |
+ BIT(IWL_CALIB_LO) |
+ BIT(IWL_CALIB_TX_IQ) |
+ BIT(IWL_CALIB_TX_IQ_PERD) |
+ BIT(IWL_CALIB_BASE_BAND);
return 0;
}
-int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
- u16 ssn_idx, u8 tx_fifo)
+static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
{
- if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
- (IWL50_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
- <= txq_id)) {
- IWL_ERR(priv,
- "queue number out of range: %d, must be %d to %d\n",
- txq_id, IWL50_FIRST_AMPDU_QUEUE,
- IWL50_FIRST_AMPDU_QUEUE +
- priv->cfg->num_of_ampdu_queues - 1);
- return -EINVAL;
- }
-
- iwl5000_tx_queue_stop_scheduler(priv, txq_id);
-
- iwl_clear_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1 << txq_id));
-
- priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
- priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
- /* supposes that ssn_idx is valid (!= 0xFFF) */
- iwl5000_set_wr_ptrs(priv, txq_id, ssn_idx);
-
- iwl_clear_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id));
- iwl_txq_ctx_deactivate(priv, txq_id);
- iwl5000_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
-
- return 0;
-}
-
-u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
-{
- u16 size = (u16)sizeof(struct iwl_addsta_cmd);
- struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data;
- memcpy(addsta, cmd, size);
- /* resrved in 5000 */
- addsta->rate_n_flags = cpu_to_le16(0);
- return size;
-}
-
-
-/*
- * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
- * must be called under priv->lock and mac access
- */
-void iwl5000_txq_set_sched(struct iwl_priv *priv, u32 mask)
-{
- iwl_write_prph(priv, IWL50_SCD_TXFACT, mask);
-}
-
-
-static inline u32 iwl5000_get_scd_ssn(struct iwl5000_tx_resp *tx_resp)
-{
- return le32_to_cpup((__le32 *)&tx_resp->status +
- tx_resp->frame_count) & MAX_SN;
-}
-
-static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv,
- struct iwl_ht_agg *agg,
- struct iwl5000_tx_resp *tx_resp,
- int txq_id, u16 start_idx)
-{
- u16 status;
- struct agg_tx_status *frame_status = &tx_resp->status;
- struct ieee80211_tx_info *info = NULL;
- struct ieee80211_hdr *hdr = NULL;
- u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
- int i, sh, idx;
- u16 seq;
-
- if (agg->wait_for_ba)
- IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
-
- agg->frame_count = tx_resp->frame_count;
- agg->start_idx = start_idx;
- agg->rate_n_flags = rate_n_flags;
- agg->bitmap = 0;
-
- /* # frames attempted by Tx command */
- if (agg->frame_count == 1) {
- /* Only one frame was attempted; no block-ack will arrive */
- status = le16_to_cpu(frame_status[0].status);
- idx = start_idx;
-
- /* FIXME: code repetition */
- IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
- agg->frame_count, agg->start_idx, idx);
-
- info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
- info->status.rates[0].count = tx_resp->failure_frame + 1;
- info->flags &= ~IEEE80211_TX_CTL_AMPDU;
- info->flags |= iwl_tx_status_to_mac80211(status);
- iwl_hwrate_to_tx_control(priv, rate_n_flags, info);
-
- /* FIXME: code repetition end */
-
- IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
- status & 0xff, tx_resp->failure_frame);
- IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
-
- agg->wait_for_ba = 0;
- } else {
- /* Two or more frames were attempted; expect block-ack */
- u64 bitmap = 0;
- int start = agg->start_idx;
-
- /* Construct bit-map of pending frames within Tx window */
- for (i = 0; i < agg->frame_count; i++) {
- u16 sc;
- status = le16_to_cpu(frame_status[i].status);
- seq = le16_to_cpu(frame_status[i].sequence);
- idx = SEQ_TO_INDEX(seq);
- txq_id = SEQ_TO_QUEUE(seq);
-
- if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
- AGG_TX_STATE_ABORT_MSK))
- continue;
+ if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
+ priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
+ priv->cfg->num_of_queues =
+ priv->cfg->mod_params->num_of_queues;
- IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
- agg->frame_count, txq_id, idx);
+ priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
+ priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
+ priv->hw_params.scd_bc_tbls_size =
+ priv->cfg->num_of_queues *
+ sizeof(struct iwlagn_scd_bc_tbl);
+ priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
+ priv->hw_params.max_stations = IWL5000_STATION_COUNT;
+ priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
- hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
- if (!hdr) {
- IWL_ERR(priv,
- "BUG_ON idx doesn't point to valid skb"
- " idx=%d, txq_id=%d\n", idx, txq_id);
- return -1;
- }
+ priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
+ priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
- sc = le16_to_cpu(hdr->seq_ctrl);
- if (idx != (SEQ_TO_SN(sc) & 0xff)) {
- IWL_ERR(priv,
- "BUG_ON idx doesn't match seq control"
- " idx=%d, seq_idx=%d, seq=%d\n",
- idx, SEQ_TO_SN(sc),
- hdr->seq_ctrl);
- return -1;
- }
+ priv->hw_params.max_bsm_size = 0;
+ priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
+ BIT(IEEE80211_BAND_5GHZ);
+ priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
- IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
- i, idx, SEQ_TO_SN(sc));
+ priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+ priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
+ priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
+ priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
- sh = idx - start;
- if (sh > 64) {
- sh = (start - idx) + 0xff;
- bitmap = bitmap << sh;
- sh = 0;
- start = idx;
- } else if (sh < -64)
- sh = 0xff - (start - idx);
- else if (sh < 0) {
- sh = start - idx;
- start = idx;
- bitmap = bitmap << sh;
- sh = 0;
- }
- bitmap |= 1ULL << sh;
- IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
- start, (unsigned long long)bitmap);
- }
+ if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
+ priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
- agg->bitmap = bitmap;
- agg->start_idx = start;
- IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
- agg->frame_count, agg->start_idx,
- (unsigned long long)agg->bitmap);
+ /* Set initial sensitivity parameters */
+ /* Set initial calibration set */
+ priv->hw_params.sens = &iwl5150_sensitivity;
+ priv->hw_params.calib_init_cfg =
+ BIT(IWL_CALIB_DC) |
+ BIT(IWL_CALIB_LO) |
+ BIT(IWL_CALIB_TX_IQ) |
+ BIT(IWL_CALIB_BASE_BAND);
- if (bitmap)
- agg->wait_for_ba = 1;
- }
return 0;
}
-static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u16 sequence = le16_to_cpu(pkt->hdr.sequence);
- int txq_id = SEQ_TO_QUEUE(sequence);
- int index = SEQ_TO_INDEX(sequence);
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct ieee80211_tx_info *info;
- struct iwl5000_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
- u32 status = le16_to_cpu(tx_resp->status.status);
- int tid;
- int sta_id;
- int freed;
-
- if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
- IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
- "is out of range [0-%d] %d %d\n", txq_id,
- index, txq->q.n_bd, txq->q.write_ptr,
- txq->q.read_ptr);
- return;
- }
-
- info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
- memset(&info->status, 0, sizeof(info->status));
-
- tid = (tx_resp->ra_tid & IWL50_TX_RES_TID_MSK) >> IWL50_TX_RES_TID_POS;
- sta_id = (tx_resp->ra_tid & IWL50_TX_RES_RA_MSK) >> IWL50_TX_RES_RA_POS;
-
- if (txq->sched_retry) {
- const u32 scd_ssn = iwl5000_get_scd_ssn(tx_resp);
- struct iwl_ht_agg *agg = NULL;
-
- agg = &priv->stations[sta_id].tid[tid].agg;
-
- iwl5000_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
-
- /* check if BAR is needed */
- if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
- info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
-
- if (txq->q.read_ptr != (scd_ssn & 0xff)) {
- index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
- IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim "
- "scd_ssn=%d idx=%d txq=%d swq=%d\n",
- scd_ssn , index, txq_id, txq->swq_id);
-
- freed = iwl_tx_queue_reclaim(priv, txq_id, index);
- iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
-
- if (priv->mac80211_registered &&
- (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
- (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) {
- if (agg->state == IWL_AGG_OFF)
- iwl_wake_queue(priv, txq_id);
- else
- iwl_wake_queue(priv, txq->swq_id);
- }
- }
- } else {
- BUG_ON(txq_id != txq->swq_id);
-
- info->status.rates[0].count = tx_resp->failure_frame + 1;
- info->flags |= iwl_tx_status_to_mac80211(status);
- iwl_hwrate_to_tx_control(priv,
- le32_to_cpu(tx_resp->rate_n_flags),
- info);
-
- IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
- "0x%x retries %d\n",
- txq_id,
- iwl_get_tx_fail_reason(status), status,
- le32_to_cpu(tx_resp->rate_n_flags),
- tx_resp->failure_frame);
-
- freed = iwl_tx_queue_reclaim(priv, txq_id, index);
- iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
-
- if (priv->mac80211_registered &&
- (iwl_queue_space(&txq->q) > txq->q.low_mark))
- iwl_wake_queue(priv, txq_id);
- }
-
- iwl_txq_check_empty(priv, sta_id, tid, txq_id);
-
- if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
- IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
-}
-
-/* Currently 5000 is the superset of everything */
-u16 iwl5000_get_hcmd_size(u8 cmd_id, u16 len)
-{
- return len;
-}
-
-void iwl5000_setup_deferred_work(struct iwl_priv *priv)
-{
- /* in 5000 the tx power calibration is done in uCode */
- priv->disable_tx_power_cal = 1;
-}
-
-void iwl5000_rx_handler_setup(struct iwl_priv *priv)
-{
- /* init calibration handlers */
- priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
- iwl5000_rx_calib_result;
- priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] =
- iwl5000_rx_calib_complete;
- priv->rx_handlers[REPLY_TX] = iwl5000_rx_reply_tx;
-}
-
-
-int iwl5000_hw_valid_rtc_data_addr(u32 addr)
-{
- return (addr >= IWL50_RTC_DATA_LOWER_BOUND) &&
- (addr < IWL50_RTC_DATA_UPPER_BOUND);
-}
-
-static int iwl5000_send_rxon_assoc(struct iwl_priv *priv)
-{
- int ret = 0;
- struct iwl5000_rxon_assoc_cmd rxon_assoc;
- const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
- const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
-
- if ((rxon1->flags == rxon2->flags) &&
- (rxon1->filter_flags == rxon2->filter_flags) &&
- (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
- (rxon1->ofdm_ht_single_stream_basic_rates ==
- rxon2->ofdm_ht_single_stream_basic_rates) &&
- (rxon1->ofdm_ht_dual_stream_basic_rates ==
- rxon2->ofdm_ht_dual_stream_basic_rates) &&
- (rxon1->ofdm_ht_triple_stream_basic_rates ==
- rxon2->ofdm_ht_triple_stream_basic_rates) &&
- (rxon1->acquisition_data == rxon2->acquisition_data) &&
- (rxon1->rx_chain == rxon2->rx_chain) &&
- (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
- IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
- return 0;
- }
-
- rxon_assoc.flags = priv->staging_rxon.flags;
- rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
- rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
- rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
- rxon_assoc.reserved1 = 0;
- rxon_assoc.reserved2 = 0;
- rxon_assoc.reserved3 = 0;
- rxon_assoc.ofdm_ht_single_stream_basic_rates =
- priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
- rxon_assoc.ofdm_ht_dual_stream_basic_rates =
- priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
- rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
- rxon_assoc.ofdm_ht_triple_stream_basic_rates =
- priv->staging_rxon.ofdm_ht_triple_stream_basic_rates;
- rxon_assoc.acquisition_data = priv->staging_rxon.acquisition_data;
-
- ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
- sizeof(rxon_assoc), &rxon_assoc, NULL);
- if (ret)
- return ret;
-
- return ret;
-}
-int iwl5000_send_tx_power(struct iwl_priv *priv)
-{
- struct iwl5000_tx_power_dbm_cmd tx_power_cmd;
- u8 tx_ant_cfg_cmd;
-
- /* half dBm need to multiply */
- tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
-
- if (priv->tx_power_lmt_in_half_dbm &&
- priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) {
- /*
- * For the newer devices which using enhanced/extend tx power
- * table in EEPROM, the format is in half dBm. driver need to
- * convert to dBm format before report to mac80211.
- * By doing so, there is a possibility of 1/2 dBm resolution
- * lost. driver will perform "round-up" operation before
- * reporting, but it will cause 1/2 dBm tx power over the
- * regulatory limit. Perform the checking here, if the
- * "tx_power_user_lmt" is higher than EEPROM value (in
- * half-dBm format), lower the tx power based on EEPROM
- */
- tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm;
- }
- tx_power_cmd.flags = IWL50_TX_POWER_NO_CLOSED;
- tx_power_cmd.srv_chan_lmt = IWL50_TX_POWER_AUTO;
-
- if (IWL_UCODE_API(priv->ucode_ver) == 1)
- tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
- else
- tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
-
- return iwl_send_cmd_pdu_async(priv, tx_ant_cfg_cmd,
- sizeof(tx_power_cmd), &tx_power_cmd,
- NULL);
-}
-
-void iwl5000_temperature(struct iwl_priv *priv)
-{
- /* store temperature from statistics (in Celsius) */
- priv->temperature = le32_to_cpu(priv->statistics.general.temperature);
- iwl_tt_handler(priv);
-}
-
static void iwl5150_temperature(struct iwl_priv *priv)
{
u32 vt = 0;
@@ -1294,100 +267,6 @@ static void iwl5150_temperature(struct iwl_priv *priv)
iwl_tt_handler(priv);
}
-/* Calc max signal level (dBm) among 3 possible receivers */
-int iwl5000_calc_rssi(struct iwl_priv *priv,
- struct iwl_rx_phy_res *rx_resp)
-{
- /* data from PHY/DSP regarding signal strength, etc.,
- * contents are always there, not configurable by host
- */
- struct iwl5000_non_cfg_phy *ncphy =
- (struct iwl5000_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
- u32 val, rssi_a, rssi_b, rssi_c, max_rssi;
- u8 agc;
-
- val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_AGC_IDX]);
- agc = (val & IWL50_OFDM_AGC_MSK) >> IWL50_OFDM_AGC_BIT_POS;
-
- /* Find max rssi among 3 possible receivers.
- * These values are measured by the digital signal processor (DSP).
- * They should stay fairly constant even as the signal strength varies,
- * if the radio's automatic gain control (AGC) is working right.
- * AGC value (see below) will provide the "interesting" info.
- */
- val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_AB_IDX]);
- rssi_a = (val & IWL50_OFDM_RSSI_A_MSK) >> IWL50_OFDM_RSSI_A_BIT_POS;
- rssi_b = (val & IWL50_OFDM_RSSI_B_MSK) >> IWL50_OFDM_RSSI_B_BIT_POS;
- val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_C_IDX]);
- rssi_c = (val & IWL50_OFDM_RSSI_C_MSK) >> IWL50_OFDM_RSSI_C_BIT_POS;
-
- max_rssi = max_t(u32, rssi_a, rssi_b);
- max_rssi = max_t(u32, max_rssi, rssi_c);
-
- IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
- rssi_a, rssi_b, rssi_c, max_rssi, agc);
-
- /* dBm = max_rssi dB - agc dB - constant.
- * Higher AGC (higher radio gain) means lower signal. */
- return max_rssi - agc - IWL49_RSSI_OFFSET;
-}
-
-static int iwl5000_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
-{
- struct iwl_tx_ant_config_cmd tx_ant_cmd = {
- .valid = cpu_to_le32(valid_tx_ant),
- };
-
- if (IWL_UCODE_API(priv->ucode_ver) > 1) {
- IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
- return iwl_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD,
- sizeof(struct iwl_tx_ant_config_cmd),
- &tx_ant_cmd);
- } else {
- IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n");
- return -EOPNOTSUPP;
- }
-}
-
-
-#define IWL5000_UCODE_GET(item) \
-static u32 iwl5000_ucode_get_##item(const struct iwl_ucode_header *ucode,\
- u32 api_ver) \
-{ \
- if (api_ver <= 2) \
- return le32_to_cpu(ucode->u.v1.item); \
- return le32_to_cpu(ucode->u.v2.item); \
-}
-
-static u32 iwl5000_ucode_get_header_size(u32 api_ver)
-{
- if (api_ver <= 2)
- return UCODE_HEADER_SIZE(1);
- return UCODE_HEADER_SIZE(2);
-}
-
-static u32 iwl5000_ucode_get_build(const struct iwl_ucode_header *ucode,
- u32 api_ver)
-{
- if (api_ver <= 2)
- return 0;
- return le32_to_cpu(ucode->u.v2.build);
-}
-
-static u8 *iwl5000_ucode_get_data(const struct iwl_ucode_header *ucode,
- u32 api_ver)
-{
- if (api_ver <= 2)
- return (u8 *) ucode->u.v1.data;
- return (u8 *) ucode->u.v2.data;
-}
-
-IWL5000_UCODE_GET(inst_size);
-IWL5000_UCODE_GET(data_size);
-IWL5000_UCODE_GET(init_size);
-IWL5000_UCODE_GET(init_data_size);
-IWL5000_UCODE_GET(boot_size);
-
static int iwl5000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
{
struct iwl5000_channel_switch_cmd cmd;
@@ -1420,54 +299,27 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
return iwl_send_cmd_sync(priv, &hcmd);
}
-struct iwl_hcmd_ops iwl5000_hcmd = {
- .rxon_assoc = iwl5000_send_rxon_assoc,
- .commit_rxon = iwl_commit_rxon,
- .set_rxon_chain = iwl_set_rxon_chain,
- .set_tx_ant = iwl5000_send_tx_ant_config,
-};
-
-struct iwl_hcmd_utils_ops iwl5000_hcmd_utils = {
- .get_hcmd_size = iwl5000_get_hcmd_size,
- .build_addsta_hcmd = iwl5000_build_addsta_hcmd,
- .gain_computation = iwl5000_gain_computation,
- .chain_noise_reset = iwl5000_chain_noise_reset,
- .rts_tx_cmd_flag = iwl5000_rts_tx_cmd_flag,
- .calc_rssi = iwl5000_calc_rssi,
-};
-
-struct iwl_ucode_ops iwl5000_ucode = {
- .get_header_size = iwl5000_ucode_get_header_size,
- .get_build = iwl5000_ucode_get_build,
- .get_inst_size = iwl5000_ucode_get_inst_size,
- .get_data_size = iwl5000_ucode_get_data_size,
- .get_init_size = iwl5000_ucode_get_init_size,
- .get_init_data_size = iwl5000_ucode_get_init_data_size,
- .get_boot_size = iwl5000_ucode_get_boot_size,
- .get_data = iwl5000_ucode_get_data,
-};
-
-struct iwl_lib_ops iwl5000_lib = {
+static struct iwl_lib_ops iwl5000_lib = {
.set_hw_params = iwl5000_hw_set_hw_params,
- .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
- .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
- .txq_set_sched = iwl5000_txq_set_sched,
- .txq_agg_enable = iwl5000_txq_agg_enable,
- .txq_agg_disable = iwl5000_txq_agg_disable,
+ .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
+ .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
+ .txq_set_sched = iwlagn_txq_set_sched,
+ .txq_agg_enable = iwlagn_txq_agg_enable,
+ .txq_agg_disable = iwlagn_txq_agg_disable,
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = iwl_hw_txq_free_tfd,
.txq_init = iwl_hw_tx_queue_init,
- .rx_handler_setup = iwl5000_rx_handler_setup,
- .setup_deferred_work = iwl5000_setup_deferred_work,
- .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
+ .rx_handler_setup = iwlagn_rx_handler_setup,
+ .setup_deferred_work = iwlagn_setup_deferred_work,
+ .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
.dump_csr = iwl_dump_csr,
.dump_fh = iwl_dump_fh,
- .load_ucode = iwl5000_load_ucode,
- .init_alive_start = iwl5000_init_alive_start,
- .alive_notify = iwl5000_alive_notify,
- .send_tx_power = iwl5000_send_tx_power,
+ .load_ucode = iwlagn_load_ucode,
+ .init_alive_start = iwlagn_init_alive_start,
+ .alive_notify = iwlagn_alive_notify,
+ .send_tx_power = iwlagn_send_tx_power,
.update_chain_flags = iwl_update_chain_flags,
.set_channel_switch = iwl5000_hw_channel_switch,
.apm_ops = {
@@ -1478,50 +330,58 @@ struct iwl_lib_ops iwl5000_lib = {
},
.eeprom_ops = {
.regulatory_bands = {
- EEPROM_5000_REG_BAND_1_CHANNELS,
- EEPROM_5000_REG_BAND_2_CHANNELS,
- EEPROM_5000_REG_BAND_3_CHANNELS,
- EEPROM_5000_REG_BAND_4_CHANNELS,
- EEPROM_5000_REG_BAND_5_CHANNELS,
- EEPROM_5000_REG_BAND_24_HT40_CHANNELS,
- EEPROM_5000_REG_BAND_52_HT40_CHANNELS
+ EEPROM_REG_BAND_1_CHANNELS,
+ EEPROM_REG_BAND_2_CHANNELS,
+ EEPROM_REG_BAND_3_CHANNELS,
+ EEPROM_REG_BAND_4_CHANNELS,
+ EEPROM_REG_BAND_5_CHANNELS,
+ EEPROM_REG_BAND_24_HT40_CHANNELS,
+ EEPROM_REG_BAND_52_HT40_CHANNELS
},
.verify_signature = iwlcore_eeprom_verify_signature,
.acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
.release_semaphore = iwlcore_eeprom_release_semaphore,
- .calib_version = iwl5000_eeprom_calib_version,
- .query_addr = iwl5000_eeprom_query_addr,
+ .calib_version = iwlagn_eeprom_calib_version,
+ .query_addr = iwlagn_eeprom_query_addr,
},
.post_associate = iwl_post_associate,
.isr = iwl_isr_ict,
.config_ap = iwl_config_ap,
.temp_ops = {
- .temperature = iwl5000_temperature,
+ .temperature = iwlagn_temperature,
.set_ct_kill = iwl5000_set_ct_threshold,
},
- .add_bcast_station = iwl_add_bcast_station,
+ .manage_ibss_station = iwlagn_manage_ibss_station,
+ .debugfs_ops = {
+ .rx_stats_read = iwl_ucode_rx_stats_read,
+ .tx_stats_read = iwl_ucode_tx_stats_read,
+ .general_stats_read = iwl_ucode_general_stats_read,
+ },
+ .recover_from_tx_stall = iwl_bg_monitor_recover,
+ .check_plcp_health = iwl_good_plcp_health,
+ .check_ack_health = iwl_good_ack_health,
};
static struct iwl_lib_ops iwl5150_lib = {
- .set_hw_params = iwl5000_hw_set_hw_params,
- .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
- .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
- .txq_set_sched = iwl5000_txq_set_sched,
- .txq_agg_enable = iwl5000_txq_agg_enable,
- .txq_agg_disable = iwl5000_txq_agg_disable,
+ .set_hw_params = iwl5150_hw_set_hw_params,
+ .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
+ .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
+ .txq_set_sched = iwlagn_txq_set_sched,
+ .txq_agg_enable = iwlagn_txq_agg_enable,
+ .txq_agg_disable = iwlagn_txq_agg_disable,
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = iwl_hw_txq_free_tfd,
.txq_init = iwl_hw_tx_queue_init,
- .rx_handler_setup = iwl5000_rx_handler_setup,
- .setup_deferred_work = iwl5000_setup_deferred_work,
- .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
+ .rx_handler_setup = iwlagn_rx_handler_setup,
+ .setup_deferred_work = iwlagn_setup_deferred_work,
+ .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
.dump_csr = iwl_dump_csr,
- .load_ucode = iwl5000_load_ucode,
- .init_alive_start = iwl5000_init_alive_start,
- .alive_notify = iwl5000_alive_notify,
- .send_tx_power = iwl5000_send_tx_power,
+ .load_ucode = iwlagn_load_ucode,
+ .init_alive_start = iwlagn_init_alive_start,
+ .alive_notify = iwlagn_alive_notify,
+ .send_tx_power = iwlagn_send_tx_power,
.update_chain_flags = iwl_update_chain_flags,
.set_channel_switch = iwl5000_hw_channel_switch,
.apm_ops = {
@@ -1532,19 +392,19 @@ static struct iwl_lib_ops iwl5150_lib = {
},
.eeprom_ops = {
.regulatory_bands = {
- EEPROM_5000_REG_BAND_1_CHANNELS,
- EEPROM_5000_REG_BAND_2_CHANNELS,
- EEPROM_5000_REG_BAND_3_CHANNELS,
- EEPROM_5000_REG_BAND_4_CHANNELS,
- EEPROM_5000_REG_BAND_5_CHANNELS,
- EEPROM_5000_REG_BAND_24_HT40_CHANNELS,
- EEPROM_5000_REG_BAND_52_HT40_CHANNELS
+ EEPROM_REG_BAND_1_CHANNELS,
+ EEPROM_REG_BAND_2_CHANNELS,
+ EEPROM_REG_BAND_3_CHANNELS,
+ EEPROM_REG_BAND_4_CHANNELS,
+ EEPROM_REG_BAND_5_CHANNELS,
+ EEPROM_REG_BAND_24_HT40_CHANNELS,
+ EEPROM_REG_BAND_52_HT40_CHANNELS
},
.verify_signature = iwlcore_eeprom_verify_signature,
.acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
.release_semaphore = iwlcore_eeprom_release_semaphore,
- .calib_version = iwl5000_eeprom_calib_version,
- .query_addr = iwl5000_eeprom_query_addr,
+ .calib_version = iwlagn_eeprom_calib_version,
+ .query_addr = iwlagn_eeprom_query_addr,
},
.post_associate = iwl_post_associate,
.isr = iwl_isr_ict,
@@ -1553,45 +413,44 @@ static struct iwl_lib_ops iwl5150_lib = {
.temperature = iwl5150_temperature,
.set_ct_kill = iwl5150_set_ct_threshold,
},
- .add_bcast_station = iwl_add_bcast_station,
+ .manage_ibss_station = iwlagn_manage_ibss_station,
+ .debugfs_ops = {
+ .rx_stats_read = iwl_ucode_rx_stats_read,
+ .tx_stats_read = iwl_ucode_tx_stats_read,
+ .general_stats_read = iwl_ucode_general_stats_read,
+ },
+ .recover_from_tx_stall = iwl_bg_monitor_recover,
+ .check_plcp_health = iwl_good_plcp_health,
+ .check_ack_health = iwl_good_ack_health,
};
static const struct iwl_ops iwl5000_ops = {
- .ucode = &iwl5000_ucode,
.lib = &iwl5000_lib,
- .hcmd = &iwl5000_hcmd,
- .utils = &iwl5000_hcmd_utils,
+ .hcmd = &iwlagn_hcmd,
+ .utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
};
static const struct iwl_ops iwl5150_ops = {
- .ucode = &iwl5000_ucode,
.lib = &iwl5150_lib,
- .hcmd = &iwl5000_hcmd,
- .utils = &iwl5000_hcmd_utils,
+ .hcmd = &iwlagn_hcmd,
+ .utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
};
-struct iwl_mod_params iwl50_mod_params = {
- .amsdu_size_8K = 1,
- .restart_fw = 1,
- /* the rest are 0 by default */
-};
-
-
struct iwl_cfg iwl5300_agn_cfg = {
- .name = "5300AGN",
+ .name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
.fw_name_pre = IWL5000_FW_PRE,
.ucode_api_max = IWL5000_UCODE_API_MAX,
.ucode_api_min = IWL5000_UCODE_API_MIN,
.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
.ops = &iwl5000_ops,
- .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
+ .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.eeprom_ver = EEPROM_5000_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_ABC,
.valid_rx_ant = ANT_ABC,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1603,21 +462,26 @@ struct iwl_cfg iwl5300_agn_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl5100_bgn_cfg = {
- .name = "5100BGN",
+ .name = "Intel(R) WiFi Link 5100 BGN",
.fw_name_pre = IWL5000_FW_PRE,
.ucode_api_max = IWL5000_UCODE_API_MAX,
.ucode_api_min = IWL5000_UCODE_API_MIN,
.sku = IWL_SKU_G|IWL_SKU_N,
.ops = &iwl5000_ops,
- .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
+ .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.eeprom_ver = EEPROM_5000_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_B,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1629,21 +493,26 @@ struct iwl_cfg iwl5100_bgn_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl5100_abg_cfg = {
- .name = "5100ABG",
+ .name = "Intel(R) WiFi Link 5100 ABG",
.fw_name_pre = IWL5000_FW_PRE,
.ucode_api_max = IWL5000_UCODE_API_MAX,
.ucode_api_min = IWL5000_UCODE_API_MIN,
.sku = IWL_SKU_A|IWL_SKU_G,
.ops = &iwl5000_ops,
- .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
+ .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.eeprom_ver = EEPROM_5000_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_B,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1653,21 +522,26 @@ struct iwl_cfg iwl5100_abg_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl5100_agn_cfg = {
- .name = "5100AGN",
+ .name = "Intel(R) WiFi Link 5100 AGN",
.fw_name_pre = IWL5000_FW_PRE,
.ucode_api_max = IWL5000_UCODE_API_MAX,
.ucode_api_min = IWL5000_UCODE_API_MIN,
.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
.ops = &iwl5000_ops,
- .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
+ .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.eeprom_ver = EEPROM_5000_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_B,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1679,21 +553,26 @@ struct iwl_cfg iwl5100_agn_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl5350_agn_cfg = {
- .name = "5350AGN",
+ .name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
.fw_name_pre = IWL5000_FW_PRE,
.ucode_api_max = IWL5000_UCODE_API_MAX,
.ucode_api_min = IWL5000_UCODE_API_MIN,
.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
.ops = &iwl5000_ops,
- .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
+ .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.eeprom_ver = EEPROM_5050_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_ABC,
.valid_rx_ant = ANT_ABC,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1705,21 +584,26 @@ struct iwl_cfg iwl5350_agn_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl5150_agn_cfg = {
- .name = "5150AGN",
+ .name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
.fw_name_pre = IWL5150_FW_PRE,
.ucode_api_max = IWL5150_UCODE_API_MAX,
.ucode_api_min = IWL5150_UCODE_API_MIN,
.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
.ops = &iwl5150_ops,
- .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
+ .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.eeprom_ver = EEPROM_5050_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_A,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1731,21 +615,26 @@ struct iwl_cfg iwl5150_agn_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl5150_abg_cfg = {
- .name = "5150ABG",
+ .name = "Intel(R) WiMAX/WiFi Link 5150 ABG",
.fw_name_pre = IWL5150_FW_PRE,
.ucode_api_max = IWL5150_UCODE_API_MAX,
.ucode_api_min = IWL5150_UCODE_API_MIN,
.sku = IWL_SKU_A|IWL_SKU_G,
.ops = &iwl5150_ops,
- .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
+ .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.eeprom_ver = EEPROM_5050_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_A,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1755,20 +644,12 @@ struct iwl_cfg iwl5150_abg_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX));
-
-module_param_named(swcrypto50, iwl50_mod_params.sw_crypto, bool, S_IRUGO);
-MODULE_PARM_DESC(swcrypto50,
- "using software crypto engine (default 0 [hardware])\n");
-module_param_named(queues_num50, iwl50_mod_params.num_of_queues, int, S_IRUGO);
-MODULE_PARM_DESC(queues_num50, "number of hw queues in 50xx series");
-module_param_named(11n_disable50, iwl50_mod_params.disable_11n, int, S_IRUGO);
-MODULE_PARM_DESC(11n_disable50, "disable 50XX 11n functionality");
-module_param_named(amsdu_size_8K50, iwl50_mod_params.amsdu_size_8K,
- int, S_IRUGO);
-MODULE_PARM_DESC(amsdu_size_8K50, "enable 8K amsdu size in 50XX series");
-module_param_named(fw_restart50, iwl50_mod_params.restart_fw, int, S_IRUGO);
-MODULE_PARM_DESC(fw_restart50, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 92b3e64fc14..9fbf54cd3e1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -42,18 +42,22 @@
#include "iwl-core.h"
#include "iwl-io.h"
#include "iwl-sta.h"
+#include "iwl-agn.h"
#include "iwl-helpers.h"
-#include "iwl-5000-hw.h"
+#include "iwl-agn-hw.h"
#include "iwl-6000-hw.h"
#include "iwl-agn-led.h"
+#include "iwl-agn-debugfs.h"
/* Highest firmware API version supported */
#define IWL6000_UCODE_API_MAX 4
#define IWL6050_UCODE_API_MAX 4
+#define IWL6000G2_UCODE_API_MAX 4
/* Lowest firmware API version supported */
#define IWL6000_UCODE_API_MIN 4
#define IWL6050_UCODE_API_MIN 4
+#define IWL6000G2_UCODE_API_MIN 4
#define IWL6000_FW_PRE "iwlwifi-6000-"
#define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode"
@@ -63,6 +67,11 @@
#define _IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE #api ".ucode"
#define IWL6050_MODULE_FIRMWARE(api) _IWL6050_MODULE_FIRMWARE(api)
+#define IWL6000G2A_FW_PRE "iwlwifi-6000g2a-"
+#define _IWL6000G2A_MODULE_FIRMWARE(api) IWL6000G2A_FW_PRE #api ".ucode"
+#define IWL6000G2A_MODULE_FIRMWARE(api) _IWL6000G2A_MODULE_FIRMWARE(api)
+
+
static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
{
/* want Celsius */
@@ -136,7 +145,7 @@ static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
{
if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
- priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES)
+ priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
priv->cfg->num_of_queues =
priv->cfg->mod_params->num_of_queues;
@@ -144,7 +153,7 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
priv->hw_params.scd_bc_tbls_size =
priv->cfg->num_of_queues *
- sizeof(struct iwl5000_scd_bc_tbl);
+ sizeof(struct iwlagn_scd_bc_tbl);
priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
priv->hw_params.max_stations = IWL5000_STATION_COUNT;
priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
@@ -168,24 +177,56 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
/* Set initial sensitivity parameters */
/* Set initial calibration set */
priv->hw_params.sens = &iwl6000_sensitivity;
- switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
- case CSR_HW_REV_TYPE_6x50:
- priv->hw_params.calib_init_cfg =
- BIT(IWL_CALIB_XTAL) |
- BIT(IWL_CALIB_DC) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_BASE_BAND);
-
- break;
- default:
- priv->hw_params.calib_init_cfg =
- BIT(IWL_CALIB_XTAL) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_BASE_BAND);
- break;
- }
+ priv->hw_params.calib_init_cfg =
+ BIT(IWL_CALIB_XTAL) |
+ BIT(IWL_CALIB_LO) |
+ BIT(IWL_CALIB_TX_IQ) |
+ BIT(IWL_CALIB_BASE_BAND);
+
+ return 0;
+}
+
+static int iwl6050_hw_set_hw_params(struct iwl_priv *priv)
+{
+ if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
+ priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
+ priv->cfg->num_of_queues =
+ priv->cfg->mod_params->num_of_queues;
+
+ priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
+ priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
+ priv->hw_params.scd_bc_tbls_size =
+ priv->cfg->num_of_queues *
+ sizeof(struct iwlagn_scd_bc_tbl);
+ priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
+ priv->hw_params.max_stations = IWL5000_STATION_COUNT;
+ priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
+
+ priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
+ priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
+
+ priv->hw_params.max_bsm_size = 0;
+ priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
+ BIT(IEEE80211_BAND_5GHZ);
+ priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
+
+ priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+ priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
+ priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
+ priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+
+ if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
+ priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
+
+ /* Set initial sensitivity parameters */
+ /* Set initial calibration set */
+ priv->hw_params.sens = &iwl6000_sensitivity;
+ priv->hw_params.calib_init_cfg =
+ BIT(IWL_CALIB_XTAL) |
+ BIT(IWL_CALIB_DC) |
+ BIT(IWL_CALIB_LO) |
+ BIT(IWL_CALIB_TX_IQ) |
+ BIT(IWL_CALIB_BASE_BAND);
return 0;
}
@@ -225,25 +266,25 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
static struct iwl_lib_ops iwl6000_lib = {
.set_hw_params = iwl6000_hw_set_hw_params,
- .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
- .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
- .txq_set_sched = iwl5000_txq_set_sched,
- .txq_agg_enable = iwl5000_txq_agg_enable,
- .txq_agg_disable = iwl5000_txq_agg_disable,
+ .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
+ .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
+ .txq_set_sched = iwlagn_txq_set_sched,
+ .txq_agg_enable = iwlagn_txq_agg_enable,
+ .txq_agg_disable = iwlagn_txq_agg_disable,
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = iwl_hw_txq_free_tfd,
.txq_init = iwl_hw_tx_queue_init,
- .rx_handler_setup = iwl5000_rx_handler_setup,
- .setup_deferred_work = iwl5000_setup_deferred_work,
- .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
- .load_ucode = iwl5000_load_ucode,
+ .rx_handler_setup = iwlagn_rx_handler_setup,
+ .setup_deferred_work = iwlagn_setup_deferred_work,
+ .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
+ .load_ucode = iwlagn_load_ucode,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
.dump_csr = iwl_dump_csr,
.dump_fh = iwl_dump_fh,
- .init_alive_start = iwl5000_init_alive_start,
- .alive_notify = iwl5000_alive_notify,
- .send_tx_power = iwl5000_send_tx_power,
+ .init_alive_start = iwlagn_init_alive_start,
+ .alive_notify = iwlagn_alive_notify,
+ .send_tx_power = iwlagn_send_tx_power,
.update_chain_flags = iwl_update_chain_flags,
.set_channel_switch = iwl6000_hw_channel_switch,
.apm_ops = {
@@ -254,60 +295,67 @@ static struct iwl_lib_ops iwl6000_lib = {
},
.eeprom_ops = {
.regulatory_bands = {
- EEPROM_5000_REG_BAND_1_CHANNELS,
- EEPROM_5000_REG_BAND_2_CHANNELS,
- EEPROM_5000_REG_BAND_3_CHANNELS,
- EEPROM_5000_REG_BAND_4_CHANNELS,
- EEPROM_5000_REG_BAND_5_CHANNELS,
+ EEPROM_REG_BAND_1_CHANNELS,
+ EEPROM_REG_BAND_2_CHANNELS,
+ EEPROM_REG_BAND_3_CHANNELS,
+ EEPROM_REG_BAND_4_CHANNELS,
+ EEPROM_REG_BAND_5_CHANNELS,
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
- EEPROM_5000_REG_BAND_52_HT40_CHANNELS
+ EEPROM_REG_BAND_52_HT40_CHANNELS
},
.verify_signature = iwlcore_eeprom_verify_signature,
.acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
.release_semaphore = iwlcore_eeprom_release_semaphore,
- .calib_version = iwl5000_eeprom_calib_version,
- .query_addr = iwl5000_eeprom_query_addr,
+ .calib_version = iwlagn_eeprom_calib_version,
+ .query_addr = iwlagn_eeprom_query_addr,
.update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
},
.post_associate = iwl_post_associate,
.isr = iwl_isr_ict,
.config_ap = iwl_config_ap,
.temp_ops = {
- .temperature = iwl5000_temperature,
+ .temperature = iwlagn_temperature,
.set_ct_kill = iwl6000_set_ct_threshold,
},
- .add_bcast_station = iwl_add_bcast_station,
+ .manage_ibss_station = iwlagn_manage_ibss_station,
+ .debugfs_ops = {
+ .rx_stats_read = iwl_ucode_rx_stats_read,
+ .tx_stats_read = iwl_ucode_tx_stats_read,
+ .general_stats_read = iwl_ucode_general_stats_read,
+ },
+ .recover_from_tx_stall = iwl_bg_monitor_recover,
+ .check_plcp_health = iwl_good_plcp_health,
+ .check_ack_health = iwl_good_ack_health,
};
static const struct iwl_ops iwl6000_ops = {
- .ucode = &iwl5000_ucode,
.lib = &iwl6000_lib,
- .hcmd = &iwl5000_hcmd,
- .utils = &iwl5000_hcmd_utils,
+ .hcmd = &iwlagn_hcmd,
+ .utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
};
static struct iwl_lib_ops iwl6050_lib = {
- .set_hw_params = iwl6000_hw_set_hw_params,
- .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
- .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
- .txq_set_sched = iwl5000_txq_set_sched,
- .txq_agg_enable = iwl5000_txq_agg_enable,
- .txq_agg_disable = iwl5000_txq_agg_disable,
+ .set_hw_params = iwl6050_hw_set_hw_params,
+ .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
+ .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
+ .txq_set_sched = iwlagn_txq_set_sched,
+ .txq_agg_enable = iwlagn_txq_agg_enable,
+ .txq_agg_disable = iwlagn_txq_agg_disable,
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = iwl_hw_txq_free_tfd,
.txq_init = iwl_hw_tx_queue_init,
- .rx_handler_setup = iwl5000_rx_handler_setup,
- .setup_deferred_work = iwl5000_setup_deferred_work,
- .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
- .load_ucode = iwl5000_load_ucode,
+ .rx_handler_setup = iwlagn_rx_handler_setup,
+ .setup_deferred_work = iwlagn_setup_deferred_work,
+ .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
+ .load_ucode = iwlagn_load_ucode,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
.dump_csr = iwl_dump_csr,
.dump_fh = iwl_dump_fh,
- .init_alive_start = iwl5000_init_alive_start,
- .alive_notify = iwl5000_alive_notify,
- .send_tx_power = iwl5000_send_tx_power,
+ .init_alive_start = iwlagn_init_alive_start,
+ .alive_notify = iwlagn_alive_notify,
+ .send_tx_power = iwlagn_send_tx_power,
.update_chain_flags = iwl_update_chain_flags,
.set_channel_switch = iwl6000_hw_channel_switch,
.apm_ops = {
@@ -318,45 +366,90 @@ static struct iwl_lib_ops iwl6050_lib = {
},
.eeprom_ops = {
.regulatory_bands = {
- EEPROM_5000_REG_BAND_1_CHANNELS,
- EEPROM_5000_REG_BAND_2_CHANNELS,
- EEPROM_5000_REG_BAND_3_CHANNELS,
- EEPROM_5000_REG_BAND_4_CHANNELS,
- EEPROM_5000_REG_BAND_5_CHANNELS,
+ EEPROM_REG_BAND_1_CHANNELS,
+ EEPROM_REG_BAND_2_CHANNELS,
+ EEPROM_REG_BAND_3_CHANNELS,
+ EEPROM_REG_BAND_4_CHANNELS,
+ EEPROM_REG_BAND_5_CHANNELS,
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
- EEPROM_5000_REG_BAND_52_HT40_CHANNELS
+ EEPROM_REG_BAND_52_HT40_CHANNELS
},
.verify_signature = iwlcore_eeprom_verify_signature,
.acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
.release_semaphore = iwlcore_eeprom_release_semaphore,
- .calib_version = iwl5000_eeprom_calib_version,
- .query_addr = iwl5000_eeprom_query_addr,
+ .calib_version = iwlagn_eeprom_calib_version,
+ .query_addr = iwlagn_eeprom_query_addr,
.update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
},
.post_associate = iwl_post_associate,
.isr = iwl_isr_ict,
.config_ap = iwl_config_ap,
.temp_ops = {
- .temperature = iwl5000_temperature,
+ .temperature = iwlagn_temperature,
.set_ct_kill = iwl6000_set_ct_threshold,
.set_calib_version = iwl6050_set_calib_version,
},
- .add_bcast_station = iwl_add_bcast_station,
+ .manage_ibss_station = iwlagn_manage_ibss_station,
+ .debugfs_ops = {
+ .rx_stats_read = iwl_ucode_rx_stats_read,
+ .tx_stats_read = iwl_ucode_tx_stats_read,
+ .general_stats_read = iwl_ucode_general_stats_read,
+ },
+ .recover_from_tx_stall = iwl_bg_monitor_recover,
+ .check_plcp_health = iwl_good_plcp_health,
+ .check_ack_health = iwl_good_ack_health,
};
static const struct iwl_ops iwl6050_ops = {
- .ucode = &iwl5000_ucode,
.lib = &iwl6050_lib,
- .hcmd = &iwl5000_hcmd,
- .utils = &iwl5000_hcmd_utils,
+ .hcmd = &iwlagn_hcmd,
+ .utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
};
+
+struct iwl_cfg iwl6000g2a_2agn_cfg = {
+ .name = "6000 Series 2x2 AGN Gen2a",
+ .fw_name_pre = IWL6000G2A_FW_PRE,
+ .ucode_api_max = IWL6000G2_UCODE_API_MAX,
+ .ucode_api_min = IWL6000G2_UCODE_API_MIN,
+ .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
+ .ops = &iwl6000_ops,
+ .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
+ .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
+ .valid_tx_ant = ANT_AB,
+ .valid_rx_ant = ANT_AB,
+ .pll_cfg_val = 0,
+ .set_l0s = true,
+ .use_bsm = false,
+ .pa_type = IWL_PA_SYSTEM,
+ .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+ .shadow_ram_support = true,
+ .ht_greenfield_support = true,
+ .led_compensation = 51,
+ .use_rts_for_ht = true, /* use rts/cts protection */
+ .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .supports_idle = true,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
+};
+
/*
* "i": Internal configuration, use internal Power Amplifier
*/
struct iwl_cfg iwl6000i_2agn_cfg = {
- .name = "6000 Series 2x2 AGN",
+ .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
.fw_name_pre = IWL6000_FW_PRE,
.ucode_api_max = IWL6000_UCODE_API_MAX,
.ucode_api_min = IWL6000_UCODE_API_MIN,
@@ -364,10 +457,10 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
.ops = &iwl6000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_BC,
.valid_rx_ant = ANT_BC,
.pll_cfg_val = 0,
@@ -385,10 +478,15 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 1024,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl6000i_2abg_cfg = {
- .name = "6000 Series 2x2 ABG",
+ .name = "Intel(R) Centrino(R) Advanced-N 6200 ABG",
.fw_name_pre = IWL6000_FW_PRE,
.ucode_api_max = IWL6000_UCODE_API_MAX,
.ucode_api_min = IWL6000_UCODE_API_MIN,
@@ -396,10 +494,10 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
.ops = &iwl6000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_BC,
.valid_rx_ant = ANT_BC,
.pll_cfg_val = 0,
@@ -408,7 +506,6 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
.pa_type = IWL_PA_INTERNAL,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
- .ht_greenfield_support = true,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.supports_idle = true,
@@ -416,10 +513,15 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 1024,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl6000i_2bg_cfg = {
- .name = "6000 Series 2x2 BG",
+ .name = "Intel(R) Centrino(R) Advanced-N 6200 BG",
.fw_name_pre = IWL6000_FW_PRE,
.ucode_api_max = IWL6000_UCODE_API_MAX,
.ucode_api_min = IWL6000_UCODE_API_MIN,
@@ -427,10 +529,10 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
.ops = &iwl6000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_BC,
.valid_rx_ant = ANT_BC,
.pll_cfg_val = 0,
@@ -439,7 +541,6 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
.pa_type = IWL_PA_INTERNAL,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
- .ht_greenfield_support = true,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.supports_idle = true,
@@ -447,10 +548,15 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 1024,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl6050_2agn_cfg = {
- .name = "6050 Series 2x2 AGN",
+ .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
.fw_name_pre = IWL6050_FW_PRE,
.ucode_api_max = IWL6050_UCODE_API_MAX,
.ucode_api_min = IWL6050_UCODE_API_MIN,
@@ -458,10 +564,10 @@ struct iwl_cfg iwl6050_2agn_cfg = {
.ops = &iwl6050_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6050_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_AB,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = 0,
@@ -479,10 +585,15 @@ struct iwl_cfg iwl6050_2agn_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1500,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 1024,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl6050_2abg_cfg = {
- .name = "6050 Series 2x2 ABG",
+ .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 ABG",
.fw_name_pre = IWL6050_FW_PRE,
.ucode_api_max = IWL6050_UCODE_API_MAX,
.ucode_api_min = IWL6050_UCODE_API_MIN,
@@ -490,10 +601,10 @@ struct iwl_cfg iwl6050_2abg_cfg = {
.ops = &iwl6050_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6050_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_AB,
.valid_rx_ant = ANT_AB,
.pll_cfg_val = 0,
@@ -502,7 +613,6 @@ struct iwl_cfg iwl6050_2abg_cfg = {
.pa_type = IWL_PA_SYSTEM,
.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
.shadow_ram_support = true,
- .ht_greenfield_support = true,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.supports_idle = true,
@@ -510,10 +620,15 @@ struct iwl_cfg iwl6050_2abg_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1500,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 1024,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
struct iwl_cfg iwl6000_3agn_cfg = {
- .name = "6000 Series 3x3 AGN",
+ .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
.fw_name_pre = IWL6000_FW_PRE,
.ucode_api_max = IWL6000_UCODE_API_MAX,
.ucode_api_min = IWL6000_UCODE_API_MIN,
@@ -521,10 +636,10 @@ struct iwl_cfg iwl6000_3agn_cfg = {
.ops = &iwl6000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .num_of_queues = IWL50_NUM_QUEUES,
- .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
- .mod_params = &iwl50_mod_params,
+ .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_ABC,
.valid_rx_ant = ANT_ABC,
.pll_cfg_val = 0,
@@ -542,7 +657,13 @@ struct iwl_cfg iwl6000_3agn_cfg = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
+ .monitor_recover_period = IWL_MONITORING_PERIOD,
+ .max_event_log_size = 1024,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
};
MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6000G2A_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
new file mode 100644
index 00000000000..48c023b4ca3
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
@@ -0,0 +1,850 @@
+/******************************************************************************
+*
+* GPL LICENSE SUMMARY
+*
+* Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+* USA
+*
+* The full GNU General Public License is included in this distribution
+* in the file called LICENSE.GPL.
+*
+* Contact Information:
+* Intel Linux Wireless <ilw@linux.intel.com>
+* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+*****************************************************************************/
+
+#include "iwl-agn-debugfs.h"
+
+ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+ {
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = sizeof(struct statistics_rx_phy) * 40 +
+ sizeof(struct statistics_rx_non_phy) * 40 +
+ sizeof(struct statistics_rx_ht_phy) * 40 + 400;
+ ssize_t ret;
+ struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+ struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
+ struct statistics_rx_non_phy *general, *accum_general;
+ struct statistics_rx_non_phy *delta_general, *max_general;
+ struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
+
+ if (!iwl_is_alive(priv))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * the statistic information display here is based on
+ * the last statistics notification from uCode
+ * might not reflect the current uCode activity
+ */
+ ofdm = &priv->statistics.rx.ofdm;
+ cck = &priv->statistics.rx.cck;
+ general = &priv->statistics.rx.general;
+ ht = &priv->statistics.rx.ofdm_ht;
+ accum_ofdm = &priv->accum_statistics.rx.ofdm;
+ accum_cck = &priv->accum_statistics.rx.cck;
+ accum_general = &priv->accum_statistics.rx.general;
+ accum_ht = &priv->accum_statistics.rx.ofdm_ht;
+ delta_ofdm = &priv->delta_statistics.rx.ofdm;
+ delta_cck = &priv->delta_statistics.rx.cck;
+ delta_general = &priv->delta_statistics.rx.general;
+ delta_ht = &priv->delta_statistics.rx.ofdm_ht;
+ max_ofdm = &priv->max_delta.rx.ofdm;
+ max_cck = &priv->max_delta.rx.cck;
+ max_general = &priv->max_delta.rx.general;
+ max_ht = &priv->max_delta.rx.ofdm_ht;
+
+ pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - OFDM:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
+ accum_ofdm->ina_cnt,
+ delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_cnt:",
+ le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+ delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "plcp_err:",
+ le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+ delta_ofdm->plcp_err, max_ofdm->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "crc32_err:",
+ le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+ delta_ofdm->crc32_err, max_ofdm->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "overrun_err:",
+ le32_to_cpu(ofdm->overrun_err),
+ accum_ofdm->overrun_err, delta_ofdm->overrun_err,
+ max_ofdm->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "early_overrun_err:",
+ le32_to_cpu(ofdm->early_overrun_err),
+ accum_ofdm->early_overrun_err,
+ delta_ofdm->early_overrun_err,
+ max_ofdm->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "crc32_good:", le32_to_cpu(ofdm->crc32_good),
+ accum_ofdm->crc32_good, delta_ofdm->crc32_good,
+ max_ofdm->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
+ le32_to_cpu(ofdm->false_alarm_cnt),
+ accum_ofdm->false_alarm_cnt,
+ delta_ofdm->false_alarm_cnt,
+ max_ofdm->false_alarm_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_sync_err_cnt:",
+ le32_to_cpu(ofdm->fina_sync_err_cnt),
+ accum_ofdm->fina_sync_err_cnt,
+ delta_ofdm->fina_sync_err_cnt,
+ max_ofdm->fina_sync_err_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sfd_timeout:",
+ le32_to_cpu(ofdm->sfd_timeout),
+ accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
+ max_ofdm->sfd_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_timeout:",
+ le32_to_cpu(ofdm->fina_timeout),
+ accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
+ max_ofdm->fina_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "unresponded_rts:",
+ le32_to_cpu(ofdm->unresponded_rts),
+ accum_ofdm->unresponded_rts,
+ delta_ofdm->unresponded_rts,
+ max_ofdm->unresponded_rts);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(ofdm->rxe_frame_limit_overrun),
+ accum_ofdm->rxe_frame_limit_overrun,
+ delta_ofdm->rxe_frame_limit_overrun,
+ max_ofdm->rxe_frame_limit_overrun);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
+ le32_to_cpu(ofdm->sent_ack_cnt),
+ accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
+ max_ofdm->sent_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
+ le32_to_cpu(ofdm->sent_cts_cnt),
+ accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
+ max_ofdm->sent_cts_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sent_ba_rsp_cnt:",
+ le32_to_cpu(ofdm->sent_ba_rsp_cnt),
+ accum_ofdm->sent_ba_rsp_cnt,
+ delta_ofdm->sent_ba_rsp_cnt,
+ max_ofdm->sent_ba_rsp_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "dsp_self_kill:",
+ le32_to_cpu(ofdm->dsp_self_kill),
+ accum_ofdm->dsp_self_kill,
+ delta_ofdm->dsp_self_kill,
+ max_ofdm->dsp_self_kill);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "mh_format_err:",
+ le32_to_cpu(ofdm->mh_format_err),
+ accum_ofdm->mh_format_err,
+ delta_ofdm->mh_format_err,
+ max_ofdm->mh_format_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "re_acq_main_rssi_sum:",
+ le32_to_cpu(ofdm->re_acq_main_rssi_sum),
+ accum_ofdm->re_acq_main_rssi_sum,
+ delta_ofdm->re_acq_main_rssi_sum,
+ max_ofdm->re_acq_main_rssi_sum);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - CCK:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "ina_cnt:",
+ le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+ delta_cck->ina_cnt, max_cck->ina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_cnt:",
+ le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+ delta_cck->fina_cnt, max_cck->fina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "plcp_err:",
+ le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+ delta_cck->plcp_err, max_cck->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "crc32_err:",
+ le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+ delta_cck->crc32_err, max_cck->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "overrun_err:",
+ le32_to_cpu(cck->overrun_err),
+ accum_cck->overrun_err, delta_cck->overrun_err,
+ max_cck->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "early_overrun_err:",
+ le32_to_cpu(cck->early_overrun_err),
+ accum_cck->early_overrun_err,
+ delta_cck->early_overrun_err,
+ max_cck->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "crc32_good:",
+ le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+ delta_cck->crc32_good, max_cck->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "false_alarm_cnt:",
+ le32_to_cpu(cck->false_alarm_cnt),
+ accum_cck->false_alarm_cnt,
+ delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "fina_sync_err_cnt:",
+ le32_to_cpu(cck->fina_sync_err_cnt),
+ accum_cck->fina_sync_err_cnt,
+ delta_cck->fina_sync_err_cnt,
+ max_cck->fina_sync_err_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sfd_timeout:",
+ le32_to_cpu(cck->sfd_timeout),
+ accum_cck->sfd_timeout, delta_cck->sfd_timeout,
+ max_cck->sfd_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_timeout:",
+ le32_to_cpu(cck->fina_timeout),
+ accum_cck->fina_timeout, delta_cck->fina_timeout,
+ max_cck->fina_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "unresponded_rts:",
+ le32_to_cpu(cck->unresponded_rts),
+ accum_cck->unresponded_rts, delta_cck->unresponded_rts,
+ max_cck->unresponded_rts);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(cck->rxe_frame_limit_overrun),
+ accum_cck->rxe_frame_limit_overrun,
+ delta_cck->rxe_frame_limit_overrun,
+ max_cck->rxe_frame_limit_overrun);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
+ le32_to_cpu(cck->sent_ack_cnt),
+ accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
+ max_cck->sent_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
+ le32_to_cpu(cck->sent_cts_cnt),
+ accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
+ max_cck->sent_cts_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_ba_rsp_cnt:",
+ le32_to_cpu(cck->sent_ba_rsp_cnt),
+ accum_cck->sent_ba_rsp_cnt,
+ delta_cck->sent_ba_rsp_cnt,
+ max_cck->sent_ba_rsp_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "dsp_self_kill:",
+ le32_to_cpu(cck->dsp_self_kill),
+ accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
+ max_cck->dsp_self_kill);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "mh_format_err:",
+ le32_to_cpu(cck->mh_format_err),
+ accum_cck->mh_format_err, delta_cck->mh_format_err,
+ max_cck->mh_format_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "re_acq_main_rssi_sum:",
+ le32_to_cpu(cck->re_acq_main_rssi_sum),
+ accum_cck->re_acq_main_rssi_sum,
+ delta_cck->re_acq_main_rssi_sum,
+ max_cck->re_acq_main_rssi_sum);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - GENERAL:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "bogus_cts:",
+ le32_to_cpu(general->bogus_cts),
+ accum_general->bogus_cts, delta_general->bogus_cts,
+ max_general->bogus_cts);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "bogus_ack:",
+ le32_to_cpu(general->bogus_ack),
+ accum_general->bogus_ack, delta_general->bogus_ack,
+ max_general->bogus_ack);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "non_bssid_frames:",
+ le32_to_cpu(general->non_bssid_frames),
+ accum_general->non_bssid_frames,
+ delta_general->non_bssid_frames,
+ max_general->non_bssid_frames);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "filtered_frames:",
+ le32_to_cpu(general->filtered_frames),
+ accum_general->filtered_frames,
+ delta_general->filtered_frames,
+ max_general->filtered_frames);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "non_channel_beacons:",
+ le32_to_cpu(general->non_channel_beacons),
+ accum_general->non_channel_beacons,
+ delta_general->non_channel_beacons,
+ max_general->non_channel_beacons);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "channel_beacons:",
+ le32_to_cpu(general->channel_beacons),
+ accum_general->channel_beacons,
+ delta_general->channel_beacons,
+ max_general->channel_beacons);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "num_missed_bcon:",
+ le32_to_cpu(general->num_missed_bcon),
+ accum_general->num_missed_bcon,
+ delta_general->num_missed_bcon,
+ max_general->num_missed_bcon);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "adc_rx_saturation_time:",
+ le32_to_cpu(general->adc_rx_saturation_time),
+ accum_general->adc_rx_saturation_time,
+ delta_general->adc_rx_saturation_time,
+ max_general->adc_rx_saturation_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "ina_detect_search_tm:",
+ le32_to_cpu(general->ina_detection_search_time),
+ accum_general->ina_detection_search_time,
+ delta_general->ina_detection_search_time,
+ max_general->ina_detection_search_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "beacon_silence_rssi_a:",
+ le32_to_cpu(general->beacon_silence_rssi_a),
+ accum_general->beacon_silence_rssi_a,
+ delta_general->beacon_silence_rssi_a,
+ max_general->beacon_silence_rssi_a);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "beacon_silence_rssi_b:",
+ le32_to_cpu(general->beacon_silence_rssi_b),
+ accum_general->beacon_silence_rssi_b,
+ delta_general->beacon_silence_rssi_b,
+ max_general->beacon_silence_rssi_b);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "beacon_silence_rssi_c:",
+ le32_to_cpu(general->beacon_silence_rssi_c),
+ accum_general->beacon_silence_rssi_c,
+ delta_general->beacon_silence_rssi_c,
+ max_general->beacon_silence_rssi_c);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "interference_data_flag:",
+ le32_to_cpu(general->interference_data_flag),
+ accum_general->interference_data_flag,
+ delta_general->interference_data_flag,
+ max_general->interference_data_flag);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "channel_load:",
+ le32_to_cpu(general->channel_load),
+ accum_general->channel_load,
+ delta_general->channel_load,
+ max_general->channel_load);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "dsp_false_alarms:",
+ le32_to_cpu(general->dsp_false_alarms),
+ accum_general->dsp_false_alarms,
+ delta_general->dsp_false_alarms,
+ max_general->dsp_false_alarms);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "beacon_rssi_a:",
+ le32_to_cpu(general->beacon_rssi_a),
+ accum_general->beacon_rssi_a,
+ delta_general->beacon_rssi_a,
+ max_general->beacon_rssi_a);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "beacon_rssi_b:",
+ le32_to_cpu(general->beacon_rssi_b),
+ accum_general->beacon_rssi_b,
+ delta_general->beacon_rssi_b,
+ max_general->beacon_rssi_b);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "beacon_rssi_c:",
+ le32_to_cpu(general->beacon_rssi_c),
+ accum_general->beacon_rssi_c,
+ delta_general->beacon_rssi_c,
+ max_general->beacon_rssi_c);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "beacon_energy_a:",
+ le32_to_cpu(general->beacon_energy_a),
+ accum_general->beacon_energy_a,
+ delta_general->beacon_energy_a,
+ max_general->beacon_energy_a);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "beacon_energy_b:",
+ le32_to_cpu(general->beacon_energy_b),
+ accum_general->beacon_energy_b,
+ delta_general->beacon_energy_b,
+ max_general->beacon_energy_b);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "beacon_energy_c:",
+ le32_to_cpu(general->beacon_energy_c),
+ accum_general->beacon_energy_c,
+ delta_general->beacon_energy_c,
+ max_general->beacon_energy_c);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM_HT:\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - OFDM_HT:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "plcp_err:",
+ le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
+ delta_ht->plcp_err, max_ht->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "overrun_err:",
+ le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
+ delta_ht->overrun_err, max_ht->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "early_overrun_err:",
+ le32_to_cpu(ht->early_overrun_err),
+ accum_ht->early_overrun_err,
+ delta_ht->early_overrun_err,
+ max_ht->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "crc32_good:",
+ le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
+ delta_ht->crc32_good, max_ht->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "crc32_err:",
+ le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
+ delta_ht->crc32_err, max_ht->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "mh_format_err:",
+ le32_to_cpu(ht->mh_format_err),
+ accum_ht->mh_format_err,
+ delta_ht->mh_format_err, max_ht->mh_format_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg_crc32_good:",
+ le32_to_cpu(ht->agg_crc32_good),
+ accum_ht->agg_crc32_good,
+ delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg_mpdu_cnt:",
+ le32_to_cpu(ht->agg_mpdu_cnt),
+ accum_ht->agg_mpdu_cnt,
+ delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg_cnt:",
+ le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
+ delta_ht->agg_cnt, max_ht->agg_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "unsupport_mcs:",
+ le32_to_cpu(ht->unsupport_mcs),
+ accum_ht->unsupport_mcs,
+ delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t iwl_ucode_tx_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
+ ssize_t ret;
+ struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
+
+ if (!iwl_is_alive(priv))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /* the statistic information display here is based on
+ * the last statistics notification from uCode
+ * might not reflect the current uCode activity
+ */
+ tx = &priv->statistics.tx;
+ accum_tx = &priv->accum_statistics.tx;
+ delta_tx = &priv->delta_statistics.tx;
+ max_tx = &priv->max_delta.tx;
+ pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Tx:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "preamble:",
+ le32_to_cpu(tx->preamble_cnt),
+ accum_tx->preamble_cnt,
+ delta_tx->preamble_cnt, max_tx->preamble_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rx_detected_cnt:",
+ le32_to_cpu(tx->rx_detected_cnt),
+ accum_tx->rx_detected_cnt,
+ delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "bt_prio_defer_cnt:",
+ le32_to_cpu(tx->bt_prio_defer_cnt),
+ accum_tx->bt_prio_defer_cnt,
+ delta_tx->bt_prio_defer_cnt,
+ max_tx->bt_prio_defer_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "bt_prio_kill_cnt:",
+ le32_to_cpu(tx->bt_prio_kill_cnt),
+ accum_tx->bt_prio_kill_cnt,
+ delta_tx->bt_prio_kill_cnt,
+ max_tx->bt_prio_kill_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "few_bytes_cnt:",
+ le32_to_cpu(tx->few_bytes_cnt),
+ accum_tx->few_bytes_cnt,
+ delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "cts_timeout:",
+ le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+ delta_tx->cts_timeout, max_tx->cts_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "ack_timeout:",
+ le32_to_cpu(tx->ack_timeout),
+ accum_tx->ack_timeout,
+ delta_tx->ack_timeout, max_tx->ack_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "expected_ack_cnt:",
+ le32_to_cpu(tx->expected_ack_cnt),
+ accum_tx->expected_ack_cnt,
+ delta_tx->expected_ack_cnt,
+ max_tx->expected_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "actual_ack_cnt:",
+ le32_to_cpu(tx->actual_ack_cnt),
+ accum_tx->actual_ack_cnt,
+ delta_tx->actual_ack_cnt,
+ max_tx->actual_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "dump_msdu_cnt:",
+ le32_to_cpu(tx->dump_msdu_cnt),
+ accum_tx->dump_msdu_cnt,
+ delta_tx->dump_msdu_cnt,
+ max_tx->dump_msdu_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "abort_nxt_frame_mismatch:",
+ le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
+ accum_tx->burst_abort_next_frame_mismatch_cnt,
+ delta_tx->burst_abort_next_frame_mismatch_cnt,
+ max_tx->burst_abort_next_frame_mismatch_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "abort_missing_nxt_frame:",
+ le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
+ accum_tx->burst_abort_missing_next_frame_cnt,
+ delta_tx->burst_abort_missing_next_frame_cnt,
+ max_tx->burst_abort_missing_next_frame_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "cts_timeout_collision:",
+ le32_to_cpu(tx->cts_timeout_collision),
+ accum_tx->cts_timeout_collision,
+ delta_tx->cts_timeout_collision,
+ max_tx->cts_timeout_collision);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "ack_ba_timeout_collision:",
+ le32_to_cpu(tx->ack_or_ba_timeout_collision),
+ accum_tx->ack_or_ba_timeout_collision,
+ delta_tx->ack_or_ba_timeout_collision,
+ max_tx->ack_or_ba_timeout_collision);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg ba_timeout:",
+ le32_to_cpu(tx->agg.ba_timeout),
+ accum_tx->agg.ba_timeout,
+ delta_tx->agg.ba_timeout,
+ max_tx->agg.ba_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg ba_resched_frames:",
+ le32_to_cpu(tx->agg.ba_reschedule_frames),
+ accum_tx->agg.ba_reschedule_frames,
+ delta_tx->agg.ba_reschedule_frames,
+ max_tx->agg.ba_reschedule_frames);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg scd_query_agg_frame:",
+ le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
+ accum_tx->agg.scd_query_agg_frame_cnt,
+ delta_tx->agg.scd_query_agg_frame_cnt,
+ max_tx->agg.scd_query_agg_frame_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg scd_query_no_agg:",
+ le32_to_cpu(tx->agg.scd_query_no_agg),
+ accum_tx->agg.scd_query_no_agg,
+ delta_tx->agg.scd_query_no_agg,
+ max_tx->agg.scd_query_no_agg);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg scd_query_agg:",
+ le32_to_cpu(tx->agg.scd_query_agg),
+ accum_tx->agg.scd_query_agg,
+ delta_tx->agg.scd_query_agg,
+ max_tx->agg.scd_query_agg);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg scd_query_mismatch:",
+ le32_to_cpu(tx->agg.scd_query_mismatch),
+ accum_tx->agg.scd_query_mismatch,
+ delta_tx->agg.scd_query_mismatch,
+ max_tx->agg.scd_query_mismatch);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg frame_not_ready:",
+ le32_to_cpu(tx->agg.frame_not_ready),
+ accum_tx->agg.frame_not_ready,
+ delta_tx->agg.frame_not_ready,
+ max_tx->agg.frame_not_ready);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg underrun:",
+ le32_to_cpu(tx->agg.underrun),
+ accum_tx->agg.underrun,
+ delta_tx->agg.underrun, max_tx->agg.underrun);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg bt_prio_kill:",
+ le32_to_cpu(tx->agg.bt_prio_kill),
+ accum_tx->agg.bt_prio_kill,
+ delta_tx->agg.bt_prio_kill,
+ max_tx->agg.bt_prio_kill);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "agg rx_ba_rsp_cnt:",
+ le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
+ accum_tx->agg.rx_ba_rsp_cnt,
+ delta_tx->agg.rx_ba_rsp_cnt,
+ max_tx->agg.rx_ba_rsp_cnt);
+
+ if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "tx power: (1/2 dB step)\n");
+ if ((priv->cfg->valid_tx_ant & ANT_A) && tx->tx_power.ant_a)
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tantenna A: 0x%X\n",
+ tx->tx_power.ant_a);
+ if ((priv->cfg->valid_tx_ant & ANT_B) && tx->tx_power.ant_b)
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tantenna B: 0x%X\n",
+ tx->tx_power.ant_b);
+ if ((priv->cfg->valid_tx_ant & ANT_C) && tx->tx_power.ant_c)
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tantenna C: 0x%X\n",
+ tx->tx_power.ant_c);
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = sizeof(struct statistics_general) * 10 + 300;
+ ssize_t ret;
+ struct statistics_general *general, *accum_general;
+ struct statistics_general *delta_general, *max_general;
+ struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+ struct statistics_div *div, *accum_div, *delta_div, *max_div;
+
+ if (!iwl_is_alive(priv))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /* the statistic information display here is based on
+ * the last statistics notification from uCode
+ * might not reflect the current uCode activity
+ */
+ general = &priv->statistics.general;
+ dbg = &priv->statistics.general.dbg;
+ div = &priv->statistics.general.div;
+ accum_general = &priv->accum_statistics.general;
+ delta_general = &priv->delta_statistics.general;
+ max_general = &priv->max_delta.general;
+ accum_dbg = &priv->accum_statistics.general.dbg;
+ delta_dbg = &priv->delta_statistics.general.dbg;
+ max_dbg = &priv->max_delta.general.dbg;
+ accum_div = &priv->accum_statistics.general.div;
+ delta_div = &priv->delta_statistics.general.div;
+ max_div = &priv->max_delta.general.div;
+ pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
+ pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_General:");
+ pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u\n",
+ "temperature:",
+ le32_to_cpu(general->temperature));
+ pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u\n",
+ "temperature_m:",
+ le32_to_cpu(general->temperature_m));
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "burst_check:",
+ le32_to_cpu(dbg->burst_check),
+ accum_dbg->burst_check,
+ delta_dbg->burst_check, max_dbg->burst_check);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "burst_count:",
+ le32_to_cpu(dbg->burst_count),
+ accum_dbg->burst_count,
+ delta_dbg->burst_count, max_dbg->burst_count);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "sleep_time:",
+ le32_to_cpu(general->sleep_time),
+ accum_general->sleep_time,
+ delta_general->sleep_time, max_general->sleep_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "slots_out:",
+ le32_to_cpu(general->slots_out),
+ accum_general->slots_out,
+ delta_general->slots_out, max_general->slots_out);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "slots_idle:",
+ le32_to_cpu(general->slots_idle),
+ accum_general->slots_idle,
+ delta_general->slots_idle, max_general->slots_idle);
+ pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
+ le32_to_cpu(general->ttl_timestamp));
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "tx_on_a:",
+ le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+ delta_div->tx_on_a, max_div->tx_on_a);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "tx_on_b:",
+ le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+ delta_div->tx_on_b, max_div->tx_on_b);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "exec_time:",
+ le32_to_cpu(div->exec_time), accum_div->exec_time,
+ delta_div->exec_time, max_div->exec_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "probe_time:",
+ le32_to_cpu(div->probe_time), accum_div->probe_time,
+ delta_div->probe_time, max_div->probe_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rx_enable_counter:",
+ le32_to_cpu(general->rx_enable_counter),
+ accum_general->rx_enable_counter,
+ delta_general->rx_enable_counter,
+ max_general->rx_enable_counter);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "num_of_sos_states:",
+ le32_to_cpu(general->num_of_sos_states),
+ accum_general->num_of_sos_states,
+ delta_general->num_of_sos_states,
+ max_general->num_of_sos_states);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h
new file mode 100644
index 00000000000..59b1f25f0d8
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h
@@ -0,0 +1,56 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-debug.h"
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t iwl_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+#else
+static ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+static ssize_t iwl_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+static ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
new file mode 100644
index 00000000000..44ef5d93bef
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -0,0 +1,276 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-agn.h"
+
+static int iwlagn_send_rxon_assoc(struct iwl_priv *priv)
+{
+ int ret = 0;
+ struct iwl5000_rxon_assoc_cmd rxon_assoc;
+ const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
+ const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
+
+ if ((rxon1->flags == rxon2->flags) &&
+ (rxon1->filter_flags == rxon2->filter_flags) &&
+ (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
+ (rxon1->ofdm_ht_single_stream_basic_rates ==
+ rxon2->ofdm_ht_single_stream_basic_rates) &&
+ (rxon1->ofdm_ht_dual_stream_basic_rates ==
+ rxon2->ofdm_ht_dual_stream_basic_rates) &&
+ (rxon1->ofdm_ht_triple_stream_basic_rates ==
+ rxon2->ofdm_ht_triple_stream_basic_rates) &&
+ (rxon1->acquisition_data == rxon2->acquisition_data) &&
+ (rxon1->rx_chain == rxon2->rx_chain) &&
+ (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
+ IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
+ return 0;
+ }
+
+ rxon_assoc.flags = priv->staging_rxon.flags;
+ rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
+ rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
+ rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
+ rxon_assoc.reserved1 = 0;
+ rxon_assoc.reserved2 = 0;
+ rxon_assoc.reserved3 = 0;
+ rxon_assoc.ofdm_ht_single_stream_basic_rates =
+ priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
+ rxon_assoc.ofdm_ht_dual_stream_basic_rates =
+ priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
+ rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
+ rxon_assoc.ofdm_ht_triple_stream_basic_rates =
+ priv->staging_rxon.ofdm_ht_triple_stream_basic_rates;
+ rxon_assoc.acquisition_data = priv->staging_rxon.acquisition_data;
+
+ ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
+ sizeof(rxon_assoc), &rxon_assoc, NULL);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
+{
+ struct iwl_tx_ant_config_cmd tx_ant_cmd = {
+ .valid = cpu_to_le32(valid_tx_ant),
+ };
+
+ if (IWL_UCODE_API(priv->ucode_ver) > 1) {
+ IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
+ return iwl_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD,
+ sizeof(struct iwl_tx_ant_config_cmd),
+ &tx_ant_cmd);
+ } else {
+ IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n");
+ return -EOPNOTSUPP;
+ }
+}
+
+/* Currently this is the superset of everything */
+static u16 iwlagn_get_hcmd_size(u8 cmd_id, u16 len)
+{
+ return len;
+}
+
+static u16 iwlagn_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
+{
+ u16 size = (u16)sizeof(struct iwl_addsta_cmd);
+ struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data;
+ memcpy(addsta, cmd, size);
+ /* resrved in 5000 */
+ addsta->rate_n_flags = cpu_to_le16(0);
+ return size;
+}
+
+static void iwlagn_gain_computation(struct iwl_priv *priv,
+ u32 average_noise[NUM_RX_CHAINS],
+ u16 min_average_noise_antenna_i,
+ u32 min_average_noise,
+ u8 default_chain)
+{
+ int i;
+ s32 delta_g;
+ struct iwl_chain_noise_data *data = &priv->chain_noise_data;
+
+ /*
+ * Find Gain Code for the chains based on "default chain"
+ */
+ for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) {
+ if ((data->disconn_array[i])) {
+ data->delta_gain_code[i] = 0;
+ continue;
+ }
+
+ delta_g = (priv->cfg->chain_noise_scale *
+ ((s32)average_noise[default_chain] -
+ (s32)average_noise[i])) / 1500;
+
+ /* bound gain by 2 bits value max, 3rd bit is sign */
+ data->delta_gain_code[i] =
+ min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
+
+ if (delta_g < 0)
+ /*
+ * set negative sign ...
+ * note to Intel developers: This is uCode API format,
+ * not the format of any internal device registers.
+ * Do not change this format for e.g. 6050 or similar
+ * devices. Change format only if more resolution
+ * (i.e. more than 2 bits magnitude) is needed.
+ */
+ data->delta_gain_code[i] |= (1 << 2);
+ }
+
+ IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n",
+ data->delta_gain_code[1], data->delta_gain_code[2]);
+
+ if (!data->radio_write) {
+ struct iwl_calib_chain_noise_gain_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD;
+ cmd.hdr.first_group = 0;
+ cmd.hdr.groups_num = 1;
+ cmd.hdr.data_valid = 1;
+ cmd.delta_gain_1 = data->delta_gain_code[1];
+ cmd.delta_gain_2 = data->delta_gain_code[2];
+ iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD,
+ sizeof(cmd), &cmd, NULL);
+
+ data->radio_write = 1;
+ data->state = IWL_CHAIN_NOISE_CALIBRATED;
+ }
+
+ data->chain_noise_a = 0;
+ data->chain_noise_b = 0;
+ data->chain_noise_c = 0;
+ data->chain_signal_a = 0;
+ data->chain_signal_b = 0;
+ data->chain_signal_c = 0;
+ data->beacon_count = 0;
+}
+
+static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
+{
+ struct iwl_chain_noise_data *data = &priv->chain_noise_data;
+ int ret;
+
+ if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
+ struct iwl_calib_chain_noise_reset_cmd cmd;
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD;
+ cmd.hdr.first_group = 0;
+ cmd.hdr.groups_num = 1;
+ cmd.hdr.data_valid = 1;
+ ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
+ sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(priv,
+ "Could not send REPLY_PHY_CALIBRATION_CMD\n");
+ data->state = IWL_CHAIN_NOISE_ACCUMULATE;
+ IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
+ }
+}
+
+static void iwlagn_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
+ __le32 *tx_flags)
+{
+ if ((info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
+ (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
+ *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK;
+ else
+ *tx_flags &= ~TX_CMD_FLG_RTS_CTS_MSK;
+}
+
+/* Calc max signal level (dBm) among 3 possible receivers */
+static int iwlagn_calc_rssi(struct iwl_priv *priv,
+ struct iwl_rx_phy_res *rx_resp)
+{
+ /* data from PHY/DSP regarding signal strength, etc.,
+ * contents are always there, not configurable by host
+ */
+ struct iwl5000_non_cfg_phy *ncphy =
+ (struct iwl5000_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
+ u32 val, rssi_a, rssi_b, rssi_c, max_rssi;
+ u8 agc;
+
+ val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_AGC_IDX]);
+ agc = (val & IWL50_OFDM_AGC_MSK) >> IWL50_OFDM_AGC_BIT_POS;
+
+ /* Find max rssi among 3 possible receivers.
+ * These values are measured by the digital signal processor (DSP).
+ * They should stay fairly constant even as the signal strength varies,
+ * if the radio's automatic gain control (AGC) is working right.
+ * AGC value (see below) will provide the "interesting" info.
+ */
+ val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_AB_IDX]);
+ rssi_a = (val & IWL50_OFDM_RSSI_A_MSK) >> IWL50_OFDM_RSSI_A_BIT_POS;
+ rssi_b = (val & IWL50_OFDM_RSSI_B_MSK) >> IWL50_OFDM_RSSI_B_BIT_POS;
+ val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_C_IDX]);
+ rssi_c = (val & IWL50_OFDM_RSSI_C_MSK) >> IWL50_OFDM_RSSI_C_BIT_POS;
+
+ max_rssi = max_t(u32, rssi_a, rssi_b);
+ max_rssi = max_t(u32, max_rssi, rssi_c);
+
+ IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
+ rssi_a, rssi_b, rssi_c, max_rssi, agc);
+
+ /* dBm = max_rssi dB - agc dB - constant.
+ * Higher AGC (higher radio gain) means lower signal. */
+ return max_rssi - agc - IWLAGN_RSSI_OFFSET;
+}
+
+struct iwl_hcmd_ops iwlagn_hcmd = {
+ .rxon_assoc = iwlagn_send_rxon_assoc,
+ .commit_rxon = iwl_commit_rxon,
+ .set_rxon_chain = iwl_set_rxon_chain,
+ .set_tx_ant = iwlagn_send_tx_ant_config,
+ .send_bt_config = iwl_send_bt_config,
+};
+
+struct iwl_hcmd_utils_ops iwlagn_hcmd_utils = {
+ .get_hcmd_size = iwlagn_get_hcmd_size,
+ .build_addsta_hcmd = iwlagn_build_addsta_hcmd,
+ .gain_computation = iwlagn_gain_computation,
+ .chain_noise_reset = iwlagn_chain_noise_reset,
+ .rts_tx_cmd_flag = iwlagn_rts_tx_cmd_flag,
+ .calc_rssi = iwlagn_calc_rssi,
+ .request_scan = iwlagn_request_scan,
+};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
new file mode 100644
index 00000000000..f9a3fbb6338
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -0,0 +1,118 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+/*
+ * Please use this file (iwl-agn-hw.h) only for hardware-related definitions.
+ */
+
+#ifndef __iwl_agn_hw_h__
+#define __iwl_agn_hw_h__
+
+#define IWLAGN_RTC_INST_LOWER_BOUND (0x000000)
+#define IWLAGN_RTC_INST_UPPER_BOUND (0x020000)
+
+#define IWLAGN_RTC_DATA_LOWER_BOUND (0x800000)
+#define IWLAGN_RTC_DATA_UPPER_BOUND (0x80C000)
+
+#define IWLAGN_RTC_INST_SIZE (IWLAGN_RTC_INST_UPPER_BOUND - \
+ IWLAGN_RTC_INST_LOWER_BOUND)
+#define IWLAGN_RTC_DATA_SIZE (IWLAGN_RTC_DATA_UPPER_BOUND - \
+ IWLAGN_RTC_DATA_LOWER_BOUND)
+
+/* RSSI to dBm */
+#define IWLAGN_RSSI_OFFSET 44
+
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT 0x041
+
+/* PCI register values */
+#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
+#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
+
+#define IWLAGN_DEFAULT_TX_RETRY 15
+
+/* Limit range of txpower output target to be between these values */
+#define IWLAGN_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */
+#define IWLAGN_TX_POWER_TARGET_POWER_MAX (16) /* 16 dBm */
+
+/* EEPROM */
+#define IWLAGN_EEPROM_IMG_SIZE 2048
+
+#define IWLAGN_CMD_FIFO_NUM 7
+#define IWLAGN_NUM_QUEUES 20
+#define IWLAGN_NUM_AMPDU_QUEUES 10
+#define IWLAGN_FIRST_AMPDU_QUEUE 10
+
+/* Fixed (non-configurable) rx data from phy */
+
+/**
+ * struct iwlagn_schedq_bc_tbl scheduler byte count table
+ * base physical address provided by SCD_DRAM_BASE_ADDR
+ * @tfd_offset 0-12 - tx command byte count
+ * 12-16 - station index
+ */
+struct iwlagn_scd_bc_tbl {
+ __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
+} __attribute__ ((packed));
+
+
+#endif /* __iwl_agn_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
new file mode 100644
index 00000000000..c92b2c0cbd9
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
@@ -0,0 +1,308 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/sched.h>
+#include <linux/gfp.h>
+#include <net/mac80211.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-agn.h"
+#include "iwl-helpers.h"
+
+#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
+
+/* Free dram table */
+void iwl_free_isr_ict(struct iwl_priv *priv)
+{
+ if (priv->_agn.ict_tbl_vir) {
+ dma_free_coherent(&priv->pci_dev->dev,
+ (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
+ priv->_agn.ict_tbl_vir,
+ priv->_agn.ict_tbl_dma);
+ priv->_agn.ict_tbl_vir = NULL;
+ }
+}
+
+
+/* allocate dram shared table it is a PAGE_SIZE aligned
+ * also reset all data related to ICT table interrupt.
+ */
+int iwl_alloc_isr_ict(struct iwl_priv *priv)
+{
+
+ if (priv->cfg->use_isr_legacy)
+ return 0;
+ /* allocate shrared data table */
+ priv->_agn.ict_tbl_vir =
+ dma_alloc_coherent(&priv->pci_dev->dev,
+ (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
+ &priv->_agn.ict_tbl_dma, GFP_KERNEL);
+ if (!priv->_agn.ict_tbl_vir)
+ return -ENOMEM;
+
+ /* align table to PAGE_SIZE boundry */
+ priv->_agn.aligned_ict_tbl_dma = ALIGN(priv->_agn.ict_tbl_dma, PAGE_SIZE);
+
+ IWL_DEBUG_ISR(priv, "ict dma addr %Lx dma aligned %Lx diff %d\n",
+ (unsigned long long)priv->_agn.ict_tbl_dma,
+ (unsigned long long)priv->_agn.aligned_ict_tbl_dma,
+ (int)(priv->_agn.aligned_ict_tbl_dma - priv->_agn.ict_tbl_dma));
+
+ priv->_agn.ict_tbl = priv->_agn.ict_tbl_vir +
+ (priv->_agn.aligned_ict_tbl_dma - priv->_agn.ict_tbl_dma);
+
+ IWL_DEBUG_ISR(priv, "ict vir addr %p vir aligned %p diff %d\n",
+ priv->_agn.ict_tbl, priv->_agn.ict_tbl_vir,
+ (int)(priv->_agn.aligned_ict_tbl_dma - priv->_agn.ict_tbl_dma));
+
+ /* reset table and index to all 0 */
+ memset(priv->_agn.ict_tbl_vir,0, (sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
+ priv->_agn.ict_index = 0;
+
+ /* add periodic RX interrupt */
+ priv->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
+ return 0;
+}
+
+/* Device is going up inform it about using ICT interrupt table,
+ * also we need to tell the driver to start using ICT interrupt.
+ */
+int iwl_reset_ict(struct iwl_priv *priv)
+{
+ u32 val;
+ unsigned long flags;
+
+ if (!priv->_agn.ict_tbl_vir)
+ return 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ iwl_disable_interrupts(priv);
+
+ memset(&priv->_agn.ict_tbl[0], 0, sizeof(u32) * ICT_COUNT);
+
+ val = priv->_agn.aligned_ict_tbl_dma >> PAGE_SHIFT;
+
+ val |= CSR_DRAM_INT_TBL_ENABLE;
+ val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
+
+ IWL_DEBUG_ISR(priv, "CSR_DRAM_INT_TBL_REG =0x%X "
+ "aligned dma address %Lx\n",
+ val, (unsigned long long)priv->_agn.aligned_ict_tbl_dma);
+
+ iwl_write32(priv, CSR_DRAM_INT_TBL_REG, val);
+ priv->_agn.use_ict = true;
+ priv->_agn.ict_index = 0;
+ iwl_write32(priv, CSR_INT, priv->inta_mask);
+ iwl_enable_interrupts(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+/* Device is going down disable ict interrupt usage */
+void iwl_disable_ict(struct iwl_priv *priv)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->_agn.use_ict = false;
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static irqreturn_t iwl_isr(int irq, void *data)
+{
+ struct iwl_priv *priv = data;
+ u32 inta, inta_mask;
+ unsigned long flags;
+#ifdef CONFIG_IWLWIFI_DEBUG
+ u32 inta_fh;
+#endif
+ if (!priv)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Disable (but don't clear!) interrupts here to avoid
+ * back-to-back ISRs and sporadic interrupts from our NIC.
+ * If we have something to service, the tasklet will re-enable ints.
+ * If we *don't* have something, we'll re-enable before leaving here. */
+ inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
+ iwl_write32(priv, CSR_INT_MASK, 0x00000000);
+
+ /* Discover which interrupts are active/pending */
+ inta = iwl_read32(priv, CSR_INT);
+
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+ if (!inta) {
+ IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
+ goto none;
+ }
+
+ if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
+ /* Hardware disappeared. It might have already raised
+ * an interrupt */
+ IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
+ goto unplugged;
+ }
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
+ inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
+ IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, "
+ "fh 0x%08x\n", inta, inta_mask, inta_fh);
+ }
+#endif
+
+ priv->_agn.inta |= inta;
+ /* iwl_irq_tasklet() will service interrupts and re-enable them */
+ if (likely(inta))
+ tasklet_schedule(&priv->irq_tasklet);
+ else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
+ iwl_enable_interrupts(priv);
+
+ unplugged:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_HANDLED;
+
+ none:
+ /* re-enable interrupts here since we don't have anything to service. */
+ /* only Re-enable if diabled by irq and no schedules tasklet. */
+ if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
+ iwl_enable_interrupts(priv);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_NONE;
+}
+
+/* interrupt handler using ict table, with this interrupt driver will
+ * stop using INTA register to get device's interrupt, reading this register
+ * is expensive, device will write interrupts in ICT dram table, increment
+ * index then will fire interrupt to driver, driver will OR all ICT table
+ * entries from current index up to table entry with 0 value. the result is
+ * the interrupt we need to service, driver will set the entries back to 0 and
+ * set index.
+ */
+irqreturn_t iwl_isr_ict(int irq, void *data)
+{
+ struct iwl_priv *priv = data;
+ u32 inta, inta_mask;
+ u32 val = 0;
+ unsigned long flags;
+
+ if (!priv)
+ return IRQ_NONE;
+
+ /* dram interrupt table not set yet,
+ * use legacy interrupt.
+ */
+ if (!priv->_agn.use_ict)
+ return iwl_isr(irq, data);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Disable (but don't clear!) interrupts here to avoid
+ * back-to-back ISRs and sporadic interrupts from our NIC.
+ * If we have something to service, the tasklet will re-enable ints.
+ * If we *don't* have something, we'll re-enable before leaving here.
+ */
+ inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
+ iwl_write32(priv, CSR_INT_MASK, 0x00000000);
+
+
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+ if (!priv->_agn.ict_tbl[priv->_agn.ict_index]) {
+ IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
+ goto none;
+ }
+
+ /* read all entries that not 0 start with ict_index */
+ while (priv->_agn.ict_tbl[priv->_agn.ict_index]) {
+
+ val |= le32_to_cpu(priv->_agn.ict_tbl[priv->_agn.ict_index]);
+ IWL_DEBUG_ISR(priv, "ICT index %d value 0x%08X\n",
+ priv->_agn.ict_index,
+ le32_to_cpu(priv->_agn.ict_tbl[priv->_agn.ict_index]));
+ priv->_agn.ict_tbl[priv->_agn.ict_index] = 0;
+ priv->_agn.ict_index = iwl_queue_inc_wrap(priv->_agn.ict_index,
+ ICT_COUNT);
+
+ }
+
+ /* We should not get this value, just ignore it. */
+ if (val == 0xffffffff)
+ val = 0;
+
+ /*
+ * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
+ * (bit 15 before shifting it to 31) to clear when using interrupt
+ * coalescing. fortunately, bits 18 and 19 stay set when this happens
+ * so we use them to decide on the real state of the Rx bit.
+ * In order words, bit 15 is set if bit 18 or bit 19 are set.
+ */
+ if (val & 0xC0000)
+ val |= 0x8000;
+
+ inta = (0xff & val) | ((0xff00 & val) << 16);
+ IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
+ inta, inta_mask, val);
+
+ inta &= priv->inta_mask;
+ priv->_agn.inta |= inta;
+
+ /* iwl_irq_tasklet() will service interrupts and re-enable them */
+ if (likely(inta))
+ tasklet_schedule(&priv->irq_tasklet);
+ else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta) {
+ /* Allow interrupt if was disabled by this handler and
+ * no tasklet was schedules, We should not enable interrupt,
+ * tasklet will enable it.
+ */
+ iwl_enable_interrupts(priv);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_HANDLED;
+
+ none:
+ /* re-enable interrupts here since we don't have anything to service.
+ * only Re-enable if disabled by irq.
+ */
+ if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
+ iwl_enable_interrupts(priv);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_NONE;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
new file mode 100644
index 00000000000..1004cfc403b
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -0,0 +1,1530 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-agn-hw.h"
+#include "iwl-agn.h"
+#include "iwl-sta.h"
+
+static inline u32 iwlagn_get_scd_ssn(struct iwl5000_tx_resp *tx_resp)
+{
+ return le32_to_cpup((__le32 *)&tx_resp->status +
+ tx_resp->frame_count) & MAX_SN;
+}
+
+static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
+ struct iwl_ht_agg *agg,
+ struct iwl5000_tx_resp *tx_resp,
+ int txq_id, u16 start_idx)
+{
+ u16 status;
+ struct agg_tx_status *frame_status = &tx_resp->status;
+ struct ieee80211_tx_info *info = NULL;
+ struct ieee80211_hdr *hdr = NULL;
+ u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
+ int i, sh, idx;
+ u16 seq;
+
+ if (agg->wait_for_ba)
+ IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
+
+ agg->frame_count = tx_resp->frame_count;
+ agg->start_idx = start_idx;
+ agg->rate_n_flags = rate_n_flags;
+ agg->bitmap = 0;
+
+ /* # frames attempted by Tx command */
+ if (agg->frame_count == 1) {
+ /* Only one frame was attempted; no block-ack will arrive */
+ status = le16_to_cpu(frame_status[0].status);
+ idx = start_idx;
+
+ /* FIXME: code repetition */
+ IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
+ agg->frame_count, agg->start_idx, idx);
+
+ info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
+ info->status.rates[0].count = tx_resp->failure_frame + 1;
+ info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+ info->flags |= iwl_tx_status_to_mac80211(status);
+ iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info);
+
+ /* FIXME: code repetition end */
+
+ IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
+ status & 0xff, tx_resp->failure_frame);
+ IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
+
+ agg->wait_for_ba = 0;
+ } else {
+ /* Two or more frames were attempted; expect block-ack */
+ u64 bitmap = 0;
+ int start = agg->start_idx;
+
+ /* Construct bit-map of pending frames within Tx window */
+ for (i = 0; i < agg->frame_count; i++) {
+ u16 sc;
+ status = le16_to_cpu(frame_status[i].status);
+ seq = le16_to_cpu(frame_status[i].sequence);
+ idx = SEQ_TO_INDEX(seq);
+ txq_id = SEQ_TO_QUEUE(seq);
+
+ if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
+ AGG_TX_STATE_ABORT_MSK))
+ continue;
+
+ IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
+ agg->frame_count, txq_id, idx);
+
+ hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
+ if (!hdr) {
+ IWL_ERR(priv,
+ "BUG_ON idx doesn't point to valid skb"
+ " idx=%d, txq_id=%d\n", idx, txq_id);
+ return -1;
+ }
+
+ sc = le16_to_cpu(hdr->seq_ctrl);
+ if (idx != (SEQ_TO_SN(sc) & 0xff)) {
+ IWL_ERR(priv,
+ "BUG_ON idx doesn't match seq control"
+ " idx=%d, seq_idx=%d, seq=%d\n",
+ idx, SEQ_TO_SN(sc),
+ hdr->seq_ctrl);
+ return -1;
+ }
+
+ IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
+ i, idx, SEQ_TO_SN(sc));
+
+ sh = idx - start;
+ if (sh > 64) {
+ sh = (start - idx) + 0xff;
+ bitmap = bitmap << sh;
+ sh = 0;
+ start = idx;
+ } else if (sh < -64)
+ sh = 0xff - (start - idx);
+ else if (sh < 0) {
+ sh = start - idx;
+ start = idx;
+ bitmap = bitmap << sh;
+ sh = 0;
+ }
+ bitmap |= 1ULL << sh;
+ IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
+ start, (unsigned long long)bitmap);
+ }
+
+ agg->bitmap = bitmap;
+ agg->start_idx = start;
+ IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
+ agg->frame_count, agg->start_idx,
+ (unsigned long long)agg->bitmap);
+
+ if (bitmap)
+ agg->wait_for_ba = 1;
+ }
+ return 0;
+}
+
+void iwl_check_abort_status(struct iwl_priv *priv,
+ u8 frame_count, u32 status)
+{
+ if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
+ IWL_ERR(priv, "TODO: Implement Tx flush command!!!\n");
+ }
+}
+
+static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int txq_id = SEQ_TO_QUEUE(sequence);
+ int index = SEQ_TO_INDEX(sequence);
+ struct iwl_tx_queue *txq = &priv->txq[txq_id];
+ struct ieee80211_tx_info *info;
+ struct iwl5000_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
+ u32 status = le16_to_cpu(tx_resp->status.status);
+ int tid;
+ int sta_id;
+ int freed;
+
+ if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
+ IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
+ "is out of range [0-%d] %d %d\n", txq_id,
+ index, txq->q.n_bd, txq->q.write_ptr,
+ txq->q.read_ptr);
+ return;
+ }
+
+ info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
+ memset(&info->status, 0, sizeof(info->status));
+
+ tid = (tx_resp->ra_tid & IWL50_TX_RES_TID_MSK) >> IWL50_TX_RES_TID_POS;
+ sta_id = (tx_resp->ra_tid & IWL50_TX_RES_RA_MSK) >> IWL50_TX_RES_RA_POS;
+
+ if (txq->sched_retry) {
+ const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp);
+ struct iwl_ht_agg *agg = NULL;
+
+ agg = &priv->stations[sta_id].tid[tid].agg;
+
+ iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
+
+ /* check if BAR is needed */
+ if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
+ info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+
+ if (txq->q.read_ptr != (scd_ssn & 0xff)) {
+ index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
+ IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim "
+ "scd_ssn=%d idx=%d txq=%d swq=%d\n",
+ scd_ssn , index, txq_id, txq->swq_id);
+
+ freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
+ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
+
+ if (priv->mac80211_registered &&
+ (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
+ (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) {
+ if (agg->state == IWL_AGG_OFF)
+ iwl_wake_queue(priv, txq_id);
+ else
+ iwl_wake_queue(priv, txq->swq_id);
+ }
+ }
+ } else {
+ BUG_ON(txq_id != txq->swq_id);
+
+ info->status.rates[0].count = tx_resp->failure_frame + 1;
+ info->flags |= iwl_tx_status_to_mac80211(status);
+ iwlagn_hwrate_to_tx_control(priv,
+ le32_to_cpu(tx_resp->rate_n_flags),
+ info);
+
+ IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
+ "0x%x retries %d\n",
+ txq_id,
+ iwl_get_tx_fail_reason(status), status,
+ le32_to_cpu(tx_resp->rate_n_flags),
+ tx_resp->failure_frame);
+
+ freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
+ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
+
+ if (priv->mac80211_registered &&
+ (iwl_queue_space(&txq->q) > txq->q.low_mark))
+ iwl_wake_queue(priv, txq_id);
+ }
+
+ iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
+
+ iwl_check_abort_status(priv, tx_resp->frame_count, status);
+}
+
+void iwlagn_rx_handler_setup(struct iwl_priv *priv)
+{
+ /* init calibration handlers */
+ priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
+ iwlagn_rx_calib_result;
+ priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] =
+ iwlagn_rx_calib_complete;
+ priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
+}
+
+void iwlagn_setup_deferred_work(struct iwl_priv *priv)
+{
+ /* in agn, the tx power calibration is done in uCode */
+ priv->disable_tx_power_cal = 1;
+}
+
+int iwlagn_hw_valid_rtc_data_addr(u32 addr)
+{
+ return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) &&
+ (addr < IWLAGN_RTC_DATA_UPPER_BOUND);
+}
+
+int iwlagn_send_tx_power(struct iwl_priv *priv)
+{
+ struct iwl5000_tx_power_dbm_cmd tx_power_cmd;
+ u8 tx_ant_cfg_cmd;
+
+ /* half dBm need to multiply */
+ tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
+
+ if (priv->tx_power_lmt_in_half_dbm &&
+ priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) {
+ /*
+ * For the newer devices which using enhanced/extend tx power
+ * table in EEPROM, the format is in half dBm. driver need to
+ * convert to dBm format before report to mac80211.
+ * By doing so, there is a possibility of 1/2 dBm resolution
+ * lost. driver will perform "round-up" operation before
+ * reporting, but it will cause 1/2 dBm tx power over the
+ * regulatory limit. Perform the checking here, if the
+ * "tx_power_user_lmt" is higher than EEPROM value (in
+ * half-dBm format), lower the tx power based on EEPROM
+ */
+ tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm;
+ }
+ tx_power_cmd.flags = IWL50_TX_POWER_NO_CLOSED;
+ tx_power_cmd.srv_chan_lmt = IWL50_TX_POWER_AUTO;
+
+ if (IWL_UCODE_API(priv->ucode_ver) == 1)
+ tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
+ else
+ tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
+
+ return iwl_send_cmd_pdu_async(priv, tx_ant_cfg_cmd,
+ sizeof(tx_power_cmd), &tx_power_cmd,
+ NULL);
+}
+
+void iwlagn_temperature(struct iwl_priv *priv)
+{
+ /* store temperature from statistics (in Celsius) */
+ priv->temperature = le32_to_cpu(priv->statistics.general.temperature);
+ iwl_tt_handler(priv);
+}
+
+u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
+{
+ struct iwl_eeprom_calib_hdr {
+ u8 version;
+ u8 pa_type;
+ u16 voltage;
+ } *hdr;
+
+ hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
+ EEPROM_CALIB_ALL);
+ return hdr->version;
+
+}
+
+/*
+ * EEPROM
+ */
+static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
+{
+ u16 offset = 0;
+
+ if ((address & INDIRECT_ADDRESS) == 0)
+ return address;
+
+ switch (address & INDIRECT_TYPE_MSK) {
+ case INDIRECT_HOST:
+ offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST);
+ break;
+ case INDIRECT_GENERAL:
+ offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL);
+ break;
+ case INDIRECT_REGULATORY:
+ offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
+ break;
+ case INDIRECT_CALIBRATION:
+ offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
+ break;
+ case INDIRECT_PROCESS_ADJST:
+ offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST);
+ break;
+ case INDIRECT_OTHERS:
+ offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS);
+ break;
+ default:
+ IWL_ERR(priv, "illegal indirect type: 0x%X\n",
+ address & INDIRECT_TYPE_MSK);
+ break;
+ }
+
+ /* translate the offset from words to byte */
+ return (address & ADDRESS_MSK) + (offset << 1);
+}
+
+const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
+ size_t offset)
+{
+ u32 address = eeprom_indirect_address(priv, offset);
+ BUG_ON(address >= priv->cfg->eeprom_size);
+ return &priv->eeprom[address];
+}
+
+struct iwl_mod_params iwlagn_mod_params = {
+ .amsdu_size_8K = 1,
+ .restart_fw = 1,
+ /* the rest are 0 by default */
+};
+
+void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
+{
+ unsigned long flags;
+ int i;
+ spin_lock_irqsave(&rxq->lock, flags);
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+ /* Fill the rx_used queue with _all_ of the Rx buffers */
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+ /* In the reset function, these buffers may have been allocated
+ * to an SKB, so we need to unmap and free potential storage */
+ if (rxq->pool[i].page != NULL) {
+ pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << priv->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ __iwl_free_pages(priv, rxq->pool[i].page);
+ rxq->pool[i].page = NULL;
+ }
+ list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+ }
+
+ for (i = 0; i < RX_QUEUE_SIZE; i++)
+ rxq->queue[i] = NULL;
+
+ /* Set us so that we have processed and used all buffers, but have
+ * not restocked the Rx queue with fresh buffers */
+ rxq->read = rxq->write = 0;
+ rxq->write_actual = 0;
+ rxq->free_count = 0;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+}
+
+int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
+{
+ u32 rb_size;
+ const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
+ u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
+
+ if (!priv->cfg->use_isr_legacy)
+ rb_timeout = RX_RB_TIMEOUT;
+
+ if (priv->cfg->mod_params->amsdu_size_8K)
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
+ else
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+
+ /* Stop Rx DMA */
+ iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+
+ /* Reset driver's Rx queue write index */
+ iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+
+ /* Tell device where to find RBD circular buffer in DRAM */
+ iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ (u32)(rxq->dma_addr >> 8));
+
+ /* Tell device where in DRAM to update its Rx status */
+ iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ rxq->rb_stts_dma >> 4);
+
+ /* Enable Rx DMA
+ * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
+ * the credit mechanism in 5000 HW RX FIFO
+ * Direct rx interrupts to hosts
+ * Rx buffer size 4 or 8k
+ * RB timeout 0x10
+ * 256 RBDs
+ */
+ iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+ FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
+ FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+ FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
+ rb_size|
+ (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
+ (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+
+ /* Set interrupt coalescing timer to default (2048 usecs) */
+ iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+
+ return 0;
+}
+
+int iwlagn_hw_nic_init(struct iwl_priv *priv)
+{
+ unsigned long flags;
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ int ret;
+
+ /* nic_init */
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->cfg->ops->lib->apm_ops.init(priv);
+
+ /* Set interrupt coalescing calibration timer to default (512 usecs) */
+ iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
+
+ priv->cfg->ops->lib->apm_ops.config(priv);
+
+ /* Allocate the RX queue, or reset if it is already allocated */
+ if (!rxq->bd) {
+ ret = iwl_rx_queue_alloc(priv);
+ if (ret) {
+ IWL_ERR(priv, "Unable to initialize Rx queue\n");
+ return -ENOMEM;
+ }
+ } else
+ iwlagn_rx_queue_reset(priv, rxq);
+
+ iwlagn_rx_replenish(priv);
+
+ iwlagn_rx_init(priv, rxq);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ rxq->need_update = 1;
+ iwl_rx_queue_update_write_ptr(priv, rxq);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Allocate or reset and init all Tx and Command queues */
+ if (!priv->txq) {
+ ret = iwlagn_txq_ctx_alloc(priv);
+ if (ret)
+ return ret;
+ } else
+ iwlagn_txq_ctx_reset(priv);
+
+ set_bit(STATUS_INIT, &priv->status);
+
+ return 0;
+}
+
+/**
+ * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv,
+ dma_addr_t dma_addr)
+{
+ return cpu_to_le32((u32)(dma_addr >> 8));
+}
+
+/**
+ * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' index forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+void iwlagn_rx_queue_restock(struct iwl_priv *priv)
+{
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ struct list_head *element;
+ struct iwl_rx_mem_buffer *rxb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
+ /* The overwritten rxb must be a used one */
+ rxb = rxq->queue[rxq->write];
+ BUG_ON(rxb && rxb->page);
+
+ /* Get next free Rx buffer, remove from free list */
+ element = rxq->rx_free.next;
+ rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+ list_del(element);
+
+ /* Point to Rx buffer via next RBD in circular buffer */
+ rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv,
+ rxb->page_dma);
+ rxq->queue[rxq->write] = rxb;
+ rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+ rxq->free_count--;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ /* If the pre-allocated buffer pool is dropping low, schedule to
+ * refill it */
+ if (rxq->free_count <= RX_LOW_WATERMARK)
+ queue_work(priv->workqueue, &priv->rx_replenish);
+
+
+ /* If we've added more space for the firmware to place data, tell it.
+ * Increment device's write pointer in multiples of 8. */
+ if (rxq->write_actual != (rxq->write & ~0x7)) {
+ spin_lock_irqsave(&rxq->lock, flags);
+ rxq->need_update = 1;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ iwl_rx_queue_update_write_ptr(priv, rxq);
+ }
+}
+
+/**
+ * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
+ *
+ * When moving to rx_free an SKB is allocated for the slot.
+ *
+ * Also restock the Rx queue via iwl_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
+{
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ struct list_head *element;
+ struct iwl_rx_mem_buffer *rxb;
+ struct page *page;
+ unsigned long flags;
+ gfp_t gfp_mask = priority;
+
+ while (1) {
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ if (rxq->free_count > RX_LOW_WATERMARK)
+ gfp_mask |= __GFP_NOWARN;
+
+ if (priv->hw_params.rx_page_order > 0)
+ gfp_mask |= __GFP_COMP;
+
+ /* Alloc a new receive buffer */
+ page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
+ if (!page) {
+ if (net_ratelimit())
+ IWL_DEBUG_INFO(priv, "alloc_pages failed, "
+ "order: %d\n",
+ priv->hw_params.rx_page_order);
+
+ if ((rxq->free_count <= RX_LOW_WATERMARK) &&
+ net_ratelimit())
+ IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
+ priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
+ rxq->free_count);
+ /* We don't reschedule replenish work here -- we will
+ * call the restock method and if it still needs
+ * more buffers it will schedule replenish */
+ return;
+ }
+
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ __free_pages(page, priv->hw_params.rx_page_order);
+ return;
+ }
+ element = rxq->rx_used.next;
+ rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+ list_del(element);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ BUG_ON(rxb->page);
+ rxb->page = page;
+ /* Get physical address of the RB */
+ rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
+ PAGE_SIZE << priv->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ /* dma address must be no more than 36 bits */
+ BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+ /* and also 256 byte aligned! */
+ BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ priv->alloc_rxb_page++;
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ }
+}
+
+void iwlagn_rx_replenish(struct iwl_priv *priv)
+{
+ unsigned long flags;
+
+ iwlagn_rx_allocate(priv, GFP_KERNEL);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ iwlagn_rx_queue_restock(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+void iwlagn_rx_replenish_now(struct iwl_priv *priv)
+{
+ iwlagn_rx_allocate(priv, GFP_ATOMIC);
+
+ iwlagn_rx_queue_restock(priv);
+}
+
+/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
+ * If an SKB has been detached, the POOL needs to have its SKB set to NULL
+ * This free routine walks the list of POOL entries and if SKB is set to
+ * non NULL it is unmapped and freed
+ */
+void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
+{
+ int i;
+ for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
+ if (rxq->pool[i].page != NULL) {
+ pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << priv->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ __iwl_free_pages(priv, rxq->pool[i].page);
+ rxq->pool[i].page = NULL;
+ }
+ }
+
+ dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->dma_addr);
+ dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
+ rxq->bd = NULL;
+ rxq->rb_stts = NULL;
+}
+
+int iwlagn_rxq_stop(struct iwl_priv *priv)
+{
+
+ /* stop Rx DMA */
+ iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
+ FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+
+ return 0;
+}
+
+int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
+{
+ int idx = 0;
+ int band_offset = 0;
+
+ /* HT rate format: mac80211 wants an MCS number, which is just LSB */
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ idx = (rate_n_flags & 0xff);
+ return idx;
+ /* Legacy rate format, search for match in table */
+ } else {
+ if (band == IEEE80211_BAND_5GHZ)
+ band_offset = IWL_FIRST_OFDM_RATE;
+ for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
+ if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
+ return idx - band_offset;
+ }
+
+ return -1;
+}
+
+/* Calc max signal level (dBm) among 3 possible receivers */
+static inline int iwlagn_calc_rssi(struct iwl_priv *priv,
+ struct iwl_rx_phy_res *rx_resp)
+{
+ return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+/**
+ * iwlagn_dbg_report_frame - dump frame to syslog during debug sessions
+ *
+ * You may hack this function to show different aspects of received frames,
+ * including selective frame dumps.
+ * group100 parameter selects whether to show 1 out of 100 good data frames.
+ * All beacon and probe response frames are printed.
+ */
+static void iwlagn_dbg_report_frame(struct iwl_priv *priv,
+ struct iwl_rx_phy_res *phy_res, u16 length,
+ struct ieee80211_hdr *header, int group100)
+{
+ u32 to_us;
+ u32 print_summary = 0;
+ u32 print_dump = 0; /* set to 1 to dump all frames' contents */
+ u32 hundred = 0;
+ u32 dataframe = 0;
+ __le16 fc;
+ u16 seq_ctl;
+ u16 channel;
+ u16 phy_flags;
+ u32 rate_n_flags;
+ u32 tsf_low;
+ int rssi;
+
+ if (likely(!(iwl_get_debug_level(priv) & IWL_DL_RX)))
+ return;
+
+ /* MAC header */
+ fc = header->frame_control;
+ seq_ctl = le16_to_cpu(header->seq_ctrl);
+
+ /* metadata */
+ channel = le16_to_cpu(phy_res->channel);
+ phy_flags = le16_to_cpu(phy_res->phy_flags);
+ rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
+
+ /* signal statistics */
+ rssi = iwlagn_calc_rssi(priv, phy_res);
+ tsf_low = le64_to_cpu(phy_res->timestamp) & 0x0ffffffff;
+
+ to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
+
+ /* if data frame is to us and all is good,
+ * (optionally) print summary for only 1 out of every 100 */
+ if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) ==
+ cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
+ dataframe = 1;
+ if (!group100)
+ print_summary = 1; /* print each frame */
+ else if (priv->framecnt_to_us < 100) {
+ priv->framecnt_to_us++;
+ print_summary = 0;
+ } else {
+ priv->framecnt_to_us = 0;
+ print_summary = 1;
+ hundred = 1;
+ }
+ } else {
+ /* print summary for all other frames */
+ print_summary = 1;
+ }
+
+ if (print_summary) {
+ char *title;
+ int rate_idx;
+ u32 bitrate;
+
+ if (hundred)
+ title = "100Frames";
+ else if (ieee80211_has_retry(fc))
+ title = "Retry";
+ else if (ieee80211_is_assoc_resp(fc))
+ title = "AscRsp";
+ else if (ieee80211_is_reassoc_resp(fc))
+ title = "RasRsp";
+ else if (ieee80211_is_probe_resp(fc)) {
+ title = "PrbRsp";
+ print_dump = 1; /* dump frame contents */
+ } else if (ieee80211_is_beacon(fc)) {
+ title = "Beacon";
+ print_dump = 1; /* dump frame contents */
+ } else if (ieee80211_is_atim(fc))
+ title = "ATIM";
+ else if (ieee80211_is_auth(fc))
+ title = "Auth";
+ else if (ieee80211_is_deauth(fc))
+ title = "DeAuth";
+ else if (ieee80211_is_disassoc(fc))
+ title = "DisAssoc";
+ else
+ title = "Frame";
+
+ rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
+ if (unlikely((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT))) {
+ bitrate = 0;
+ WARN_ON_ONCE(1);
+ } else {
+ bitrate = iwl_rates[rate_idx].ieee / 2;
+ }
+
+ /* print frame summary.
+ * MAC addresses show just the last byte (for brevity),
+ * but you can hack it to show more, if you'd like to. */
+ if (dataframe)
+ IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, "
+ "len=%u, rssi=%d, chnl=%d, rate=%u,\n",
+ title, le16_to_cpu(fc), header->addr1[5],
+ length, rssi, channel, bitrate);
+ else {
+ /* src/dst addresses assume managed mode */
+ IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, src=0x%02x, "
+ "len=%u, rssi=%d, tim=%lu usec, "
+ "phy=0x%02x, chnl=%d\n",
+ title, le16_to_cpu(fc), header->addr1[5],
+ header->addr3[5], length, rssi,
+ tsf_low - priv->scan_start_tsf,
+ phy_flags, channel);
+ }
+ }
+ if (print_dump)
+ iwl_print_hex_dump(priv, IWL_DL_RX, header, length);
+}
+#endif
+
+static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
+{
+ u32 decrypt_out = 0;
+
+ if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
+ RX_RES_STATUS_STATION_FOUND)
+ decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
+ RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
+
+ decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
+
+ /* packet was not encrypted */
+ if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+ RX_RES_STATUS_SEC_TYPE_NONE)
+ return decrypt_out;
+
+ /* packet was encrypted with unknown alg */
+ if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+ RX_RES_STATUS_SEC_TYPE_ERR)
+ return decrypt_out;
+
+ /* decryption was not done in HW */
+ if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
+ RX_MPDU_RES_STATUS_DEC_DONE_MSK)
+ return decrypt_out;
+
+ switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
+
+ case RX_RES_STATUS_SEC_TYPE_CCMP:
+ /* alg is CCM: check MIC only */
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
+ /* Bad MIC */
+ decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+ else
+ decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+
+ break;
+
+ case RX_RES_STATUS_SEC_TYPE_TKIP:
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
+ /* Bad TTAK */
+ decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
+ break;
+ }
+ /* fall through if TTAK OK */
+ default:
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
+ decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+ else
+ decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+ break;
+ }
+
+ IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
+ decrypt_in, decrypt_out);
+
+ return decrypt_out;
+}
+
+static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
+ struct ieee80211_hdr *hdr,
+ u16 len,
+ u32 ampdu_status,
+ struct iwl_rx_mem_buffer *rxb,
+ struct ieee80211_rx_status *stats)
+{
+ struct sk_buff *skb;
+ __le16 fc = hdr->frame_control;
+
+ /* We only process data packets if the interface is open */
+ if (unlikely(!priv->is_open)) {
+ IWL_DEBUG_DROP_LIMIT(priv,
+ "Dropping packet while interface is not open.\n");
+ return;
+ }
+
+ /* In case of HW accelerated crypto and bad decryption, drop */
+ if (!priv->cfg->mod_params->sw_crypto &&
+ iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
+ return;
+
+ skb = dev_alloc_skb(128);
+ if (!skb) {
+ IWL_ERR(priv, "dev_alloc_skb failed\n");
+ return;
+ }
+
+ skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
+
+ iwl_update_stats(priv, false, fc, len);
+ memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+ ieee80211_rx(priv->hw, skb);
+ priv->alloc_rxb_page--;
+ rxb->page = NULL;
+}
+
+/* Called for REPLY_RX (legacy ABG frames), or
+ * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
+void iwlagn_rx_reply_rx(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct ieee80211_hdr *header;
+ struct ieee80211_rx_status rx_status;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_rx_phy_res *phy_res;
+ __le32 rx_pkt_status;
+ struct iwl4965_rx_mpdu_res_start *amsdu;
+ u32 len;
+ u32 ampdu_status;
+ u32 rate_n_flags;
+
+ /**
+ * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
+ * REPLY_RX: physical layer info is in this buffer
+ * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
+ * command and cached in priv->last_phy_res
+ *
+ * Here we set up local variables depending on which command is
+ * received.
+ */
+ if (pkt->hdr.cmd == REPLY_RX) {
+ phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
+ header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
+ + phy_res->cfg_phy_cnt);
+
+ len = le16_to_cpu(phy_res->byte_count);
+ rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
+ phy_res->cfg_phy_cnt + len);
+ ampdu_status = le32_to_cpu(rx_pkt_status);
+ } else {
+ if (!priv->_agn.last_phy_res_valid) {
+ IWL_ERR(priv, "MPDU frame without cached PHY data\n");
+ return;
+ }
+ phy_res = &priv->_agn.last_phy_res;
+ amsdu = (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
+ header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
+ len = le16_to_cpu(amsdu->byte_count);
+ rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
+ ampdu_status = iwlagn_translate_rx_status(priv,
+ le32_to_cpu(rx_pkt_status));
+ }
+
+ if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
+ IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
+ phy_res->cfg_phy_cnt);
+ return;
+ }
+
+ if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
+ !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
+ IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
+ le32_to_cpu(rx_pkt_status));
+ return;
+ }
+
+ /* This will be used in several places later */
+ rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
+
+ /* rx_status carries information about the packet to mac80211 */
+ rx_status.mactime = le64_to_cpu(phy_res->timestamp);
+ rx_status.freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel));
+ rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
+ IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ rx_status.rate_idx =
+ iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
+ rx_status.flag = 0;
+
+ /* TSF isn't reliable. In order to allow smooth user experience,
+ * this W/A doesn't propagate it to the mac80211 */
+ /*rx_status.flag |= RX_FLAG_TSFT;*/
+
+ priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
+
+ /* Find max signal strength (dBm) among 3 antenna/receiver chains */
+ rx_status.signal = iwlagn_calc_rssi(priv, phy_res);
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ /* Set "1" to report good data frames in groups of 100 */
+ if (unlikely(iwl_get_debug_level(priv) & IWL_DL_RX))
+ iwlagn_dbg_report_frame(priv, phy_res, len, header, 1);
+#endif
+ iwl_dbg_log_rx_data_frame(priv, len, header);
+ IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
+ rx_status.signal, (unsigned long long)rx_status.mactime);
+
+ /*
+ * "antenna number"
+ *
+ * It seems that the antenna field in the phy flags value
+ * is actually a bit field. This is undefined by radiotap,
+ * it wants an actual antenna number but I always get "7"
+ * for most legacy frames I receive indicating that the
+ * same frame was received on all three RX chains.
+ *
+ * I think this field should be removed in favor of a
+ * new 802.11n radiotap field "RX chains" that is defined
+ * as a bitmask.
+ */
+ rx_status.antenna =
+ (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
+ >> RX_RES_PHY_FLAGS_ANTENNA_POS;
+
+ /* set the preamble flag if appropriate */
+ if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
+ rx_status.flag |= RX_FLAG_SHORTPRE;
+
+ /* Set up the HT phy flags */
+ if (rate_n_flags & RATE_MCS_HT_MSK)
+ rx_status.flag |= RX_FLAG_HT;
+ if (rate_n_flags & RATE_MCS_HT40_MSK)
+ rx_status.flag |= RX_FLAG_40MHZ;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rx_status.flag |= RX_FLAG_SHORT_GI;
+
+ iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
+ rxb, &rx_status);
+}
+
+/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
+ * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
+void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ priv->_agn.last_phy_res_valid = true;
+ memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
+ sizeof(struct iwl_rx_phy_res));
+}
+
+static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ enum ieee80211_band band,
+ struct iwl_scan_channel *scan_ch)
+{
+ const struct ieee80211_supported_band *sband;
+ const struct iwl_channel_info *ch_info;
+ u16 passive_dwell = 0;
+ u16 active_dwell = 0;
+ int i, added = 0;
+ u16 channel = 0;
+
+ sband = iwl_get_hw_mode(priv, band);
+ if (!sband) {
+ IWL_ERR(priv, "invalid band\n");
+ return added;
+ }
+
+ active_dwell = iwl_get_active_dwell_time(priv, band, 0);
+ passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
+
+ if (passive_dwell <= active_dwell)
+ passive_dwell = active_dwell + 1;
+
+ /* only scan single channel, good enough to reset the RF */
+ /* pick the first valid not in-use channel */
+ if (band == IEEE80211_BAND_5GHZ) {
+ for (i = 14; i < priv->channel_count; i++) {
+ if (priv->channel_info[i].channel !=
+ le16_to_cpu(priv->staging_rxon.channel)) {
+ channel = priv->channel_info[i].channel;
+ ch_info = iwl_get_channel_info(priv,
+ band, channel);
+ if (is_channel_valid(ch_info))
+ break;
+ }
+ }
+ } else {
+ for (i = 0; i < 14; i++) {
+ if (priv->channel_info[i].channel !=
+ le16_to_cpu(priv->staging_rxon.channel)) {
+ channel =
+ priv->channel_info[i].channel;
+ ch_info = iwl_get_channel_info(priv,
+ band, channel);
+ if (is_channel_valid(ch_info))
+ break;
+ }
+ }
+ }
+ if (channel) {
+ scan_ch->channel = cpu_to_le16(channel);
+ scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+ scan_ch->active_dwell = cpu_to_le16(active_dwell);
+ scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+ /* Set txpower levels to defaults */
+ scan_ch->dsp_atten = 110;
+ if (band == IEEE80211_BAND_5GHZ)
+ scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else
+ scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+ added++;
+ } else
+ IWL_ERR(priv, "no valid channel found\n");
+ return added;
+}
+
+static int iwl_get_channels_for_scan(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ enum ieee80211_band band,
+ u8 is_active, u8 n_probes,
+ struct iwl_scan_channel *scan_ch)
+{
+ struct ieee80211_channel *chan;
+ const struct ieee80211_supported_band *sband;
+ const struct iwl_channel_info *ch_info;
+ u16 passive_dwell = 0;
+ u16 active_dwell = 0;
+ int added, i;
+ u16 channel;
+
+ sband = iwl_get_hw_mode(priv, band);
+ if (!sband)
+ return 0;
+
+ active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
+ passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
+
+ if (passive_dwell <= active_dwell)
+ passive_dwell = active_dwell + 1;
+
+ for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
+ chan = priv->scan_request->channels[i];
+
+ if (chan->band != band)
+ continue;
+
+ channel = ieee80211_frequency_to_channel(chan->center_freq);
+ scan_ch->channel = cpu_to_le16(channel);
+
+ ch_info = iwl_get_channel_info(priv, band, channel);
+ if (!is_channel_valid(ch_info)) {
+ IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n",
+ channel);
+ continue;
+ }
+
+ if (!is_active || is_channel_passive(ch_info) ||
+ (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
+ scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+ else
+ scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
+
+ if (n_probes)
+ scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
+
+ scan_ch->active_dwell = cpu_to_le16(active_dwell);
+ scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+
+ /* Set txpower levels to defaults */
+ scan_ch->dsp_atten = 110;
+
+ /* NOTE: if we were doing 6Mb OFDM for scans we'd use
+ * power level:
+ * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
+ */
+ if (band == IEEE80211_BAND_5GHZ)
+ scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else
+ scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+
+ IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
+ channel, le32_to_cpu(scan_ch->type),
+ (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
+ "ACTIVE" : "PASSIVE",
+ (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
+ active_dwell : passive_dwell);
+
+ scan_ch++;
+ added++;
+ }
+
+ IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
+ return added;
+}
+
+void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
+{
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_SCAN_CMD,
+ .len = sizeof(struct iwl_scan_cmd),
+ .flags = CMD_SIZE_HUGE,
+ };
+ struct iwl_scan_cmd *scan;
+ struct ieee80211_conf *conf = NULL;
+ u32 rate_flags = 0;
+ u16 cmd_len;
+ u16 rx_chain = 0;
+ enum ieee80211_band band;
+ u8 n_probes = 0;
+ u8 rx_ant = priv->hw_params.valid_rx_ant;
+ u8 rate;
+ bool is_active = false;
+ int chan_mod;
+ u8 active_chains;
+
+ conf = ieee80211_get_hw_conf(priv->hw);
+
+ cancel_delayed_work(&priv->scan_check);
+
+ if (!iwl_is_ready(priv)) {
+ IWL_WARN(priv, "request scan called when driver not ready.\n");
+ goto done;
+ }
+
+ /* Make sure the scan wasn't canceled before this queued work
+ * was given the chance to run... */
+ if (!test_bit(STATUS_SCANNING, &priv->status))
+ goto done;
+
+ /* This should never be called or scheduled if there is currently
+ * a scan active in the hardware. */
+ if (test_bit(STATUS_SCAN_HW, &priv->status)) {
+ IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests in parallel. "
+ "Ignoring second request.\n");
+ goto done;
+ }
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Aborting scan due to device shutdown\n");
+ goto done;
+ }
+
+ if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+ IWL_DEBUG_HC(priv, "Scan request while abort pending. Queuing.\n");
+ goto done;
+ }
+
+ if (iwl_is_rfkill(priv)) {
+ IWL_DEBUG_HC(priv, "Aborting scan due to RF Kill activation\n");
+ goto done;
+ }
+
+ if (!test_bit(STATUS_READY, &priv->status)) {
+ IWL_DEBUG_HC(priv, "Scan request while uninitialized. Queuing.\n");
+ goto done;
+ }
+
+ if (!priv->scan_cmd) {
+ priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
+ IWL_MAX_SCAN_SIZE, GFP_KERNEL);
+ if (!priv->scan_cmd) {
+ IWL_DEBUG_SCAN(priv,
+ "fail to allocate memory for scan\n");
+ goto done;
+ }
+ }
+ scan = priv->scan_cmd;
+ memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
+
+ scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
+ scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
+
+ if (iwl_is_associated(priv)) {
+ u16 interval = 0;
+ u32 extra;
+ u32 suspend_time = 100;
+ u32 scan_suspend_time = 100;
+ unsigned long flags;
+
+ IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
+ spin_lock_irqsave(&priv->lock, flags);
+ interval = vif ? vif->bss_conf.beacon_int : 0;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ scan->suspend_time = 0;
+ scan->max_out_time = cpu_to_le32(200 * 1024);
+ if (!interval)
+ interval = suspend_time;
+
+ extra = (suspend_time / interval) << 22;
+ scan_suspend_time = (extra |
+ ((suspend_time % interval) * 1024));
+ scan->suspend_time = cpu_to_le32(scan_suspend_time);
+ IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
+ scan_suspend_time, interval);
+ }
+
+ if (priv->is_internal_short_scan) {
+ IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
+ } else if (priv->scan_request->n_ssids) {
+ int i, p = 0;
+ IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
+ for (i = 0; i < priv->scan_request->n_ssids; i++) {
+ /* always does wildcard anyway */
+ if (!priv->scan_request->ssids[i].ssid_len)
+ continue;
+ scan->direct_scan[p].id = WLAN_EID_SSID;
+ scan->direct_scan[p].len =
+ priv->scan_request->ssids[i].ssid_len;
+ memcpy(scan->direct_scan[p].ssid,
+ priv->scan_request->ssids[i].ssid,
+ priv->scan_request->ssids[i].ssid_len);
+ n_probes++;
+ p++;
+ }
+ is_active = true;
+ } else
+ IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
+
+ scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
+ scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id;
+ scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+ switch (priv->scan_band) {
+ case IEEE80211_BAND_2GHZ:
+ scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
+ chan_mod = le32_to_cpu(priv->active_rxon.flags & RXON_FLG_CHANNEL_MODE_MSK)
+ >> RXON_FLG_CHANNEL_MODE_POS;
+ if (chan_mod == CHANNEL_MODE_PURE_40) {
+ rate = IWL_RATE_6M_PLCP;
+ } else {
+ rate = IWL_RATE_1M_PLCP;
+ rate_flags = RATE_MCS_CCK_MSK;
+ }
+ scan->good_CRC_th = IWL_GOOD_CRC_TH_DISABLED;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ rate = IWL_RATE_6M_PLCP;
+ /*
+ * If active scanning is requested but a certain channel is
+ * marked passive, we can do active scanning if we detect
+ * transmissions.
+ *
+ * There is an issue with some firmware versions that triggers
+ * a sysassert on a "good CRC threshold" of zero (== disabled),
+ * on a radar channel even though this means that we should NOT
+ * send probes.
+ *
+ * The "good CRC threshold" is the number of frames that we
+ * need to receive during our dwell time on a channel before
+ * sending out probes -- setting this to a huge value will
+ * mean we never reach it, but at the same time work around
+ * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
+ * here instead of IWL_GOOD_CRC_TH_DISABLED.
+ */
+ scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
+ IWL_GOOD_CRC_TH_NEVER;
+ break;
+ default:
+ IWL_WARN(priv, "Invalid scan band count\n");
+ goto done;
+ }
+
+ band = priv->scan_band;
+
+ if (priv->cfg->scan_antennas[band])
+ rx_ant = priv->cfg->scan_antennas[band];
+
+ priv->scan_tx_ant[band] =
+ iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band]);
+ rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
+ scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
+
+ /* In power save mode use one chain, otherwise use all chains */
+ if (test_bit(STATUS_POWER_PMI, &priv->status)) {
+ /* rx_ant has been set to all valid chains previously */
+ active_chains = rx_ant &
+ ((u8)(priv->chain_noise_data.active_chains));
+ if (!active_chains)
+ active_chains = rx_ant;
+
+ IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
+ priv->chain_noise_data.active_chains);
+
+ rx_ant = first_antenna(active_chains);
+ }
+ /* MIMO is not used here, but value is required */
+ rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
+ rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
+ rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
+ rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
+ scan->rx_chain = cpu_to_le16(rx_chain);
+ if (!priv->is_internal_short_scan) {
+ cmd_len = iwl_fill_probe_req(priv,
+ (struct ieee80211_mgmt *)scan->data,
+ priv->scan_request->ie,
+ priv->scan_request->ie_len,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan));
+ } else {
+ cmd_len = iwl_fill_probe_req(priv,
+ (struct ieee80211_mgmt *)scan->data,
+ NULL, 0,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan));
+
+ }
+ scan->tx_cmd.len = cpu_to_le16(cmd_len);
+
+ scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
+ RXON_FILTER_BCON_AWARE_MSK);
+
+ if (priv->is_internal_short_scan) {
+ scan->channel_count =
+ iwl_get_single_channel_for_scan(priv, vif, band,
+ (void *)&scan->data[le16_to_cpu(
+ scan->tx_cmd.len)]);
+ } else {
+ scan->channel_count =
+ iwl_get_channels_for_scan(priv, vif, band,
+ is_active, n_probes,
+ (void *)&scan->data[le16_to_cpu(
+ scan->tx_cmd.len)]);
+ }
+ if (scan->channel_count == 0) {
+ IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
+ goto done;
+ }
+
+ cmd.len += le16_to_cpu(scan->tx_cmd.len) +
+ scan->channel_count * sizeof(struct iwl_scan_channel);
+ cmd.data = scan;
+ scan->len = cpu_to_le16(cmd.len);
+
+ set_bit(STATUS_SCAN_HW, &priv->status);
+ if (iwl_send_cmd_sync(priv, &cmd))
+ goto done;
+
+ queue_delayed_work(priv->workqueue, &priv->scan_check,
+ IWL_SCAN_CHECK_WATCHDOG);
+
+ return;
+
+ done:
+ /* Cannot perform scan. Make sure we clear scanning
+ * bits from status so next scan request can be performed.
+ * If we don't clear scanning status bit here all next scan
+ * will fail
+ */
+ clear_bit(STATUS_SCAN_HW, &priv->status);
+ clear_bit(STATUS_SCANNING, &priv->status);
+ /* inform mac80211 scan aborted */
+ queue_work(priv->workqueue, &priv->scan_completed);
+}
+
+int iwlagn_manage_ibss_station(struct iwl_priv *priv,
+ struct ieee80211_vif *vif, bool add)
+{
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+ if (add)
+ return iwl_add_bssid_station(priv, vif->bss_conf.bssid, true,
+ &vif_priv->ibss_bssid_sta_id);
+ return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
+ vif->bss_conf.bssid);
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 1460116d329..cf4a95bae4f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -295,11 +295,11 @@ static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
return tl->total;
}
-static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
+static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
struct iwl_lq_sta *lq_data, u8 tid,
struct ieee80211_sta *sta)
{
- int ret;
+ int ret = -EAGAIN;
if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) {
IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
@@ -313,29 +313,29 @@ static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
*/
IWL_DEBUG_HT(priv, "Fail start Tx agg on tid: %d\n",
tid);
- ret = ieee80211_stop_tx_ba_session(sta, tid,
+ ieee80211_stop_tx_ba_session(sta, tid,
WLAN_BACK_INITIATOR);
}
- }
+ } else
+ IWL_ERR(priv, "Fail finding valid aggregation tid: %d\n", tid);
+ return ret;
}
static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
struct iwl_lq_sta *lq_data,
struct ieee80211_sta *sta)
{
- if ((tid < TID_MAX_LOAD_COUNT))
- rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
- else if (tid == IWL_AGG_ALL_TID)
- for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++)
- rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
- if (priv->cfg->use_rts_for_ht) {
- /*
- * switch to RTS/CTS if it is the prefer protection method
- * for HT traffic
- */
- IWL_DEBUG_HT(priv, "use RTS/CTS protection for HT\n");
- priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
- iwlcore_commit_rxon(priv);
+ if ((tid < TID_MAX_LOAD_COUNT) &&
+ !rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta)) {
+ if (priv->cfg->use_rts_for_ht) {
+ /*
+ * switch to RTS/CTS if it is the prefer protection
+ * method for HT traffic
+ */
+ IWL_DEBUG_HT(priv, "use RTS/CTS protection for HT\n");
+ priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
+ iwlcore_commit_rxon(priv);
+ }
}
}
@@ -611,10 +611,6 @@ static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
struct ieee80211_hdr *hdr,
enum iwl_table_type rate_type)
{
- if (hdr && is_multicast_ether_addr(hdr->addr1) &&
- lq_sta->active_rate_basic)
- return lq_sta->active_rate_basic;
-
if (is_legacy(rate_type)) {
return lq_sta->active_legacy_rate;
} else {
@@ -775,6 +771,15 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (!lq_sta) {
+ IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n");
+ return;
+ } else if (!lq_sta->drv) {
+ IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
+ return;
+ }
+
if (!ieee80211_is_data(hdr->frame_control) ||
info->flags & IEEE80211_TX_CTL_NO_ACK)
return;
@@ -784,10 +789,6 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
!(info->flags & IEEE80211_TX_STAT_AMPDU))
return;
- if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
- !lq_sta->ibss_sta_added)
- return;
-
/*
* Ignore this Tx frame response if its initial rate doesn't match
* that of latest Link Quality command. There may be stragglers
@@ -833,7 +834,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
lq_sta->missed_rate_counter++;
if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
lq_sta->missed_rate_counter = 0;
- iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
+ iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC, false);
}
/* Regardless, ignore this status info for outdated rate */
return;
@@ -867,14 +868,14 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
&rs_index);
rs_collect_tx_data(curr_tbl, rs_index,
- info->status.ampdu_ack_len,
- info->status.ampdu_ack_map);
+ info->status.ampdu_len,
+ info->status.ampdu_ack_len);
/* Update success/fail counts if not searching for new mode */
if (lq_sta->stay_in_tbl) {
- lq_sta->total_success += info->status.ampdu_ack_map;
- lq_sta->total_failed += (info->status.ampdu_ack_len -
- info->status.ampdu_ack_map);
+ lq_sta->total_success += info->status.ampdu_ack_len;
+ lq_sta->total_failed += (info->status.ampdu_len -
+ info->status.ampdu_ack_len);
}
} else {
/*
@@ -1913,7 +1914,7 @@ static u32 rs_update_rate_tbl(struct iwl_priv *priv,
/* Update uCode's rate table. */
rate = rate_n_flags_from_tbl(priv, tbl, index, is_green);
rs_fill_link_cmd(priv, lq_sta, rate);
- iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
+ iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC, false);
return rate;
}
@@ -2002,7 +2003,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
/* rates available for this association, and for modulation mode */
rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
- IWL_DEBUG_RATE(priv, "mask 0x%04X \n", rate_mask);
+ IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
/* mask with station rate restriction */
if (is_legacy(tbl->lq_type)) {
@@ -2077,10 +2078,12 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
}
/* Else we have enough samples; calculate estimate of
* actual average throughput */
-
- /* Sanity-check TPT calculations */
- BUG_ON(window->average_tpt != ((window->success_ratio *
- tbl->expected_tpt[index] + 64) / 128));
+ if (window->average_tpt != ((window->success_ratio *
+ tbl->expected_tpt[index] + 64) / 128)) {
+ IWL_ERR(priv, "expected_tpt should have been calculated by now\n");
+ window->average_tpt = ((window->success_ratio *
+ tbl->expected_tpt[index] + 64) / 128);
+ }
/* If we are searching for better modulation mode, check success. */
if (lq_sta->search_better_tbl &&
@@ -2289,7 +2292,7 @@ lq_update:
IWL_DEBUG_RATE(priv, "Switch current mcs: %X index: %d\n",
tbl->current_rate, index);
rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
- iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
+ iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC, false);
} else
done_search = 1;
}
@@ -2334,11 +2337,22 @@ out:
tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, index, is_green);
i = index;
lq_sta->last_txrate_idx = i;
-
- return;
}
-
+/**
+ * rs_initialize_lq - Initialize a station's hardware rate table
+ *
+ * The uCode's station table contains a table of fallback rates
+ * for automatic fallback during transmission.
+ *
+ * NOTE: This sets up a default set of values. These will be replaced later
+ * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of
+ * rc80211_simple.
+ *
+ * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
+ * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
+ * which requires station table entry to exist).
+ */
static void rs_initialize_lq(struct iwl_priv *priv,
struct ieee80211_conf *conf,
struct ieee80211_sta *sta,
@@ -2357,10 +2371,6 @@ static void rs_initialize_lq(struct iwl_priv *priv,
i = lq_sta->last_txrate_idx;
- if ((lq_sta->lq.sta_id == 0xff) &&
- (priv->iw_mode == NL80211_IFTYPE_ADHOC))
- goto out;
-
valid_tx_ant = priv->hw_params.valid_tx_ant;
if (!lq_sta->search_better_tbl)
@@ -2388,7 +2398,8 @@ static void rs_initialize_lq(struct iwl_priv *priv,
tbl->current_rate = rate;
rs_set_expected_tpt_table(lq_sta, tbl);
rs_fill_link_cmd(NULL, lq_sta, rate);
- iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
+ priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
+ iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_SYNC, true);
out:
return;
}
@@ -2399,10 +2410,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
struct sk_buff *skb = txrc->skb;
struct ieee80211_supported_band *sband = txrc->sband;
- struct iwl_priv *priv = (struct iwl_priv *)priv_r;
- struct ieee80211_conf *conf = &priv->hw->conf;
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_lq_sta *lq_sta = priv_sta;
int rate_idx;
@@ -2420,30 +2428,18 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
lq_sta->max_rate_idx = -1;
}
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (lq_sta && !lq_sta->drv) {
+ IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
+ priv_sta = NULL;
+ }
+
/* Send management frames and NO_ACK data using lowest rate. */
if (rate_control_send_low(sta, priv_sta, txrc))
return;
rate_idx = lq_sta->last_txrate_idx;
- if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
- !lq_sta->ibss_sta_added) {
- u8 sta_id = iwl_find_station(priv, hdr->addr1);
-
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n",
- hdr->addr1);
- sta_id = iwl_add_station(priv, hdr->addr1,
- false, CMD_ASYNC, ht_cap);
- }
- if ((sta_id != IWL_INVALID_STATION)) {
- lq_sta->lq.sta_id = sta_id;
- lq_sta->lq.rs_table[0].rate_n_flags = 0;
- lq_sta->ibss_sta_added = 1;
- rs_initialize_lq(priv, conf, sta, lq_sta);
- }
- }
-
if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
rate_idx -= IWL_FIRST_OFDM_RATE;
/* 6M and 9M shared same MCS index */
@@ -2493,16 +2489,25 @@ static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
return lq_sta;
}
-static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta)
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
{
int i, j;
- struct iwl_priv *priv = (struct iwl_priv *)priv_r;
+ struct ieee80211_hw *hw = priv->hw;
struct ieee80211_conf *conf = &priv->hw->conf;
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- struct iwl_lq_sta *lq_sta = priv_sta;
+ struct iwl_station_priv *sta_priv;
+ struct iwl_lq_sta *lq_sta;
+ struct ieee80211_supported_band *sband;
+
+ sta_priv = (struct iwl_station_priv *) sta->drv_priv;
+ lq_sta = &sta_priv->lq_sta;
+ sband = hw->wiphy->bands[conf->channel->band];
- lq_sta->lq.sta_id = 0xff;
+
+ lq_sta->lq.sta_id = sta_id;
for (j = 0; j < LQ_SIZE; j++)
for (i = 0; i < IWL_RATE_COUNT; i++)
@@ -2514,39 +2519,18 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
for (i = 0; i < IWL_RATE_COUNT; i++)
rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
- IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init ***\n");
+ IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init for station %d ***\n",
+ sta_id);
/* TODO: what is a good starting rate for STA? About middle? Maybe not
* the lowest or the highest rate.. Could consider using RSSI from
* previous packets? Need to have IEEE 802.1X auth succeed immediately
* after assoc.. */
- lq_sta->ibss_sta_added = 0;
- if (priv->iw_mode == NL80211_IFTYPE_AP) {
- u8 sta_id = iwl_find_station(priv,
- sta->addr);
-
- /* for IBSS the call are from tasklet */
- IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n", sta->addr);
-
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n", sta->addr);
- sta_id = iwl_add_station(priv, sta->addr, false,
- CMD_ASYNC, ht_cap);
- }
- if ((sta_id != IWL_INVALID_STATION)) {
- lq_sta->lq.sta_id = sta_id;
- lq_sta->lq.rs_table[0].rate_n_flags = 0;
- }
- /* FIXME: this is w/a remove it later */
- priv->assoc_station_added = 1;
- }
-
lq_sta->is_dup = 0;
lq_sta->max_rate_idx = -1;
lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
lq_sta->is_green = rs_use_green(sta, &priv->current_ht_config);
lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
- lq_sta->active_rate_basic = priv->active_rate_basic;
lq_sta->band = priv->band;
/*
* active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
@@ -2574,8 +2558,17 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
lq_sta->active_mimo3_rate);
/* These values will be overridden later */
- lq_sta->lq.general_params.single_stream_ant_msk = ANT_A;
- lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
+ lq_sta->lq.general_params.single_stream_ant_msk =
+ first_antenna(priv->hw_params.valid_tx_ant);
+ lq_sta->lq.general_params.dual_stream_ant_msk =
+ priv->hw_params.valid_tx_ant &
+ ~first_antenna(priv->hw_params.valid_tx_ant);
+ if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
+ lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
+ } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
+ lq_sta->lq.general_params.dual_stream_ant_msk =
+ priv->hw_params.valid_tx_ant;
+ }
/* as default allow aggregation for all tids */
lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
@@ -2794,7 +2787,7 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
if (lq_sta->dbg_fixed_rate) {
rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
- iwl_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC);
+ iwl_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC, false);
}
return count;
@@ -2950,12 +2943,6 @@ static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file,
desc += sprintf(buff+desc,
"Bit Rate= %d Mb/s\n",
iwl_rates[lq_sta->last_txrate_idx].ieee >> 1);
- desc += sprintf(buff+desc,
- "Signal Level= %d dBm\tNoise Level= %d dBm\n",
- priv->last_rx_rssi, priv->last_rx_noise);
- desc += sprintf(buff+desc,
- "Tsf= 0x%llx\tBeacon time= 0x%08X\n",
- priv->last_tsf, priv->last_beacon_time);
ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
return ret;
@@ -2995,12 +2982,21 @@ static void rs_remove_debugfs(void *priv, void *priv_sta)
}
#endif
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta)
+{
+}
static struct rate_control_ops rs_ops = {
.module = NULL,
.name = RS_NAME,
.tx_status = rs_tx_status,
.get_rate = rs_get_rate,
- .rate_init = rs_rate_init,
+ .rate_init = rs_rate_init_stub,
.alloc = rs_alloc,
.free = rs_free,
.alloc_sta = rs_alloc_sta,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index e71923961e6..8292f6d48ec 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -403,7 +403,6 @@ struct iwl_lq_sta {
u8 is_green;
u8 is_dup;
enum ieee80211_band band;
- u8 ibss_sta_added;
/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
u32 supp_rates;
@@ -411,7 +410,6 @@ struct iwl_lq_sta {
u16 active_siso_rate;
u16 active_mimo2_rate;
u16 active_mimo3_rate;
- u16 active_rate_basic;
s8 max_rate_idx; /* Max rate set by user */
u8 missed_rate_counter;
@@ -479,6 +477,12 @@ static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index)
*/
extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
+/* Initialize station's rate scaling information after adding station */
+extern void iwl_rs_rate_init(struct iwl_priv *priv,
+ struct ieee80211_sta *sta, u8 sta_id);
+extern void iwl3945_rs_rate_init(struct iwl_priv *priv,
+ struct ieee80211_sta *sta, u8 sta_id);
+
/**
* iwl_rate_control_register - Register the rate control algorithm callbacks
*
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
new file mode 100644
index 00000000000..c402bfc83f3
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -0,0 +1,1340 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-sta.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-agn-hw.h"
+#include "iwl-agn.h"
+
+/*
+ * mac80211 queues, ACs, hardware queues, FIFOs.
+ *
+ * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
+ *
+ * Mac80211 uses the following numbers, which we get as from it
+ * by way of skb_get_queue_mapping(skb):
+ *
+ * VO 0
+ * VI 1
+ * BE 2
+ * BK 3
+ *
+ *
+ * Regular (not A-MPDU) frames are put into hardware queues corresponding
+ * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
+ * own queue per aggregation session (RA/TID combination), such queues are
+ * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
+ * order to map frames to the right queue, we also need an AC->hw queue
+ * mapping. This is implemented here.
+ *
+ * Due to the way hw queues are set up (by the hw specific modules like
+ * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity
+ * mapping.
+ */
+
+static const u8 tid_to_ac[] = {
+ /* this matches the mac80211 numbers */
+ 2, 3, 3, 2, 1, 1, 0, 0
+};
+
+static const u8 ac_to_fifo[] = {
+ IWL_TX_FIFO_VO,
+ IWL_TX_FIFO_VI,
+ IWL_TX_FIFO_BE,
+ IWL_TX_FIFO_BK,
+};
+
+static inline int get_fifo_from_ac(u8 ac)
+{
+ return ac_to_fifo[ac];
+}
+
+static inline int get_ac_from_tid(u16 tid)
+{
+ if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+ return tid_to_ac[tid];
+
+ /* no support for TIDs 8-15 yet */
+ return -EINVAL;
+}
+
+static inline int get_fifo_from_tid(u16 tid)
+{
+ if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+ return get_fifo_from_ac(tid_to_ac[tid]);
+
+ /* no support for TIDs 8-15 yet */
+ return -EINVAL;
+}
+
+/**
+ * iwlagn_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
+ */
+void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ u16 byte_cnt)
+{
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
+ int write_ptr = txq->q.write_ptr;
+ int txq_id = txq->q.id;
+ u8 sec_ctl = 0;
+ u8 sta_id = 0;
+ u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
+ __le16 bc_ent;
+
+ WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
+
+ if (txq_id != IWL_CMD_QUEUE_NUM) {
+ sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
+ sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
+
+ switch (sec_ctl & TX_CMD_SEC_MSK) {
+ case TX_CMD_SEC_CCM:
+ len += CCMP_MIC_LEN;
+ break;
+ case TX_CMD_SEC_TKIP:
+ len += TKIP_ICV_LEN;
+ break;
+ case TX_CMD_SEC_WEP:
+ len += WEP_IV_LEN + WEP_ICV_LEN;
+ break;
+ }
+ }
+
+ bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
+
+ scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
+
+ if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id].
+ tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
+}
+
+void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq)
+{
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
+ int txq_id = txq->q.id;
+ int read_ptr = txq->q.read_ptr;
+ u8 sta_id = 0;
+ __le16 bc_ent;
+
+ WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
+
+ if (txq_id != IWL_CMD_QUEUE_NUM)
+ sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
+
+ bc_ent = cpu_to_le16(1 | (sta_id << 12));
+ scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
+
+ if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id].
+ tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
+}
+
+static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
+ u16 txq_id)
+{
+ u32 tbl_dw_addr;
+ u32 tbl_dw;
+ u16 scd_q2ratid;
+
+ scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
+
+ tbl_dw_addr = priv->scd_base_addr +
+ IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
+
+ tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
+
+ if (txq_id & 0x1)
+ tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
+ else
+ tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
+
+ iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
+
+ return 0;
+}
+
+static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id)
+{
+ /* Simply stop the queue, but don't change any configuration;
+ * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
+ iwl_write_prph(priv,
+ IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id),
+ (0 << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
+ (1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
+}
+
+void iwlagn_set_wr_ptrs(struct iwl_priv *priv,
+ int txq_id, u32 index)
+{
+ iwl_write_direct32(priv, HBUS_TARG_WRPTR,
+ (index & 0xff) | (txq_id << 8));
+ iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(txq_id), index);
+}
+
+void iwlagn_tx_queue_set_status(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ int tx_fifo_id, int scd_retry)
+{
+ int txq_id = txq->q.id;
+ int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
+
+ iwl_write_prph(priv, IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id),
+ (active << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+ (tx_fifo_id << IWLAGN_SCD_QUEUE_STTS_REG_POS_TXF) |
+ (1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_WSL) |
+ IWLAGN_SCD_QUEUE_STTS_REG_MSK);
+
+ txq->sched_retry = scd_retry;
+
+ IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n",
+ active ? "Activate" : "Deactivate",
+ scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
+}
+
+int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
+ int tx_fifo, int sta_id, int tid, u16 ssn_idx)
+{
+ unsigned long flags;
+ u16 ra_tid;
+
+ if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
+ (IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
+ <= txq_id)) {
+ IWL_WARN(priv,
+ "queue number out of range: %d, must be %d to %d\n",
+ txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
+ IWLAGN_FIRST_AMPDU_QUEUE +
+ priv->cfg->num_of_ampdu_queues - 1);
+ return -EINVAL;
+ }
+
+ ra_tid = BUILD_RAxTID(sta_id, tid);
+
+ /* Modify device's station table to Tx this TID */
+ iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Stop this Tx queue before configuring it */
+ iwlagn_tx_queue_stop_scheduler(priv, txq_id);
+
+ /* Map receiver-address / traffic-ID to this queue */
+ iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
+
+ /* Set this queue as a chain-building queue */
+ iwl_set_bits_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL, (1<<txq_id));
+
+ /* enable aggregations for the queue */
+ iwl_set_bits_prph(priv, IWLAGN_SCD_AGGR_SEL, (1<<txq_id));
+
+ /* Place first TFD at index corresponding to start sequence number.
+ * Assumes that ssn_idx is valid (!= 0xFFF) */
+ priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+ priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+ iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx);
+
+ /* Set up Tx window size and frame limit for this queue */
+ iwl_write_targ_mem(priv, priv->scd_base_addr +
+ IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
+ sizeof(u32),
+ ((SCD_WIN_SIZE <<
+ IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
+ IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
+ ((SCD_FRAME_LIMIT <<
+ IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+ IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
+
+ iwl_set_bits_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, (1 << txq_id));
+
+ /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
+ iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
+ u16 ssn_idx, u8 tx_fifo)
+{
+ if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
+ (IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
+ <= txq_id)) {
+ IWL_ERR(priv,
+ "queue number out of range: %d, must be %d to %d\n",
+ txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
+ IWLAGN_FIRST_AMPDU_QUEUE +
+ priv->cfg->num_of_ampdu_queues - 1);
+ return -EINVAL;
+ }
+
+ iwlagn_tx_queue_stop_scheduler(priv, txq_id);
+
+ iwl_clear_bits_prph(priv, IWLAGN_SCD_AGGR_SEL, (1 << txq_id));
+
+ priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+ priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+ /* supposes that ssn_idx is valid (!= 0xFFF) */
+ iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx);
+
+ iwl_clear_bits_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, (1 << txq_id));
+ iwl_txq_ctx_deactivate(priv, txq_id);
+ iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
+
+ return 0;
+}
+
+/*
+ * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
+ * must be called under priv->lock and mac access
+ */
+void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask)
+{
+ iwl_write_prph(priv, IWLAGN_SCD_TXFACT, mask);
+}
+
+static inline int get_queue_from_ac(u16 ac)
+{
+ return ac;
+}
+
+/*
+ * handle build REPLY_TX command notification.
+ */
+static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
+ struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr,
+ u8 std_id)
+{
+ __le16 fc = hdr->frame_control;
+ __le32 tx_flags = tx_cmd->tx_flags;
+
+ tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ tx_flags |= TX_CMD_FLG_ACK_MSK;
+ if (ieee80211_is_mgmt(fc))
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ if (ieee80211_is_probe_resp(fc) &&
+ !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
+ tx_flags |= TX_CMD_FLG_TSF_MSK;
+ } else {
+ tx_flags &= (~TX_CMD_FLG_ACK_MSK);
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+ if (ieee80211_is_back_req(fc))
+ tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
+
+
+ tx_cmd->sta_id = std_id;
+ if (ieee80211_has_morefrags(fc))
+ tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
+
+ if (ieee80211_is_data_qos(fc)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tx_cmd->tid_tspec = qc[0] & 0xf;
+ tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
+ } else {
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+ priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
+
+ if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
+ tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+
+ tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
+ if (ieee80211_is_mgmt(fc)) {
+ if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+ tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
+ else
+ tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
+ } else {
+ tx_cmd->timeout.pm_frame_timeout = 0;
+ }
+
+ tx_cmd->driver_txop = 0;
+ tx_cmd->tx_flags = tx_flags;
+ tx_cmd->next_frame_len = 0;
+}
+
+#define RTS_DFAULT_RETRY_LIMIT 60
+
+static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
+ struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info,
+ __le16 fc)
+{
+ u32 rate_flags;
+ int rate_idx;
+ u8 rts_retry_limit;
+ u8 data_retry_limit;
+ u8 rate_plcp;
+
+ /* Set retry limit on DATA packets and Probe Responses*/
+ if (ieee80211_is_probe_resp(fc))
+ data_retry_limit = 3;
+ else
+ data_retry_limit = IWLAGN_DEFAULT_TX_RETRY;
+ tx_cmd->data_retry_limit = data_retry_limit;
+
+ /* Set retry limit on RTS packets */
+ rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
+ if (data_retry_limit < rts_retry_limit)
+ rts_retry_limit = data_retry_limit;
+ tx_cmd->rts_retry_limit = rts_retry_limit;
+
+ /* DATA packets will use the uCode station table for rate/antenna
+ * selection */
+ if (ieee80211_is_data(fc)) {
+ tx_cmd->initial_rate_index = 0;
+ tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
+ return;
+ }
+
+ /**
+ * If the current TX rate stored in mac80211 has the MCS bit set, it's
+ * not really a TX rate. Thus, we use the lowest supported rate for
+ * this band. Also use the lowest supported rate if the stored rate
+ * index is invalid.
+ */
+ rate_idx = info->control.rates[0].idx;
+ if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
+ (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
+ rate_idx = rate_lowest_index(&priv->bands[info->band],
+ info->control.sta);
+ /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
+ if (info->band == IEEE80211_BAND_5GHZ)
+ rate_idx += IWL_FIRST_OFDM_RATE;
+ /* Get PLCP rate for tx_cmd->rate_n_flags */
+ rate_plcp = iwl_rates[rate_idx].plcp;
+ /* Zero out flags for this packet */
+ rate_flags = 0;
+
+ /* Set CCK flag as needed */
+ if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
+ rate_flags |= RATE_MCS_CCK_MSK;
+
+ /* Set up RTS and CTS flags for certain packets */
+ switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+ case cpu_to_le16(IEEE80211_STYPE_AUTH):
+ case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+ if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
+ tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+ tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* Set up antennas */
+ priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
+ rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
+
+ /* Set the rate in the TX cmd */
+ tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
+}
+
+static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
+ struct ieee80211_tx_info *info,
+ struct iwl_tx_cmd *tx_cmd,
+ struct sk_buff *skb_frag,
+ int sta_id)
+{
+ struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+ switch (keyconf->alg) {
+ case ALG_CCMP:
+ tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+ memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
+ IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
+ break;
+
+ case ALG_TKIP:
+ tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
+ ieee80211_get_tkip_key(keyconf, skb_frag,
+ IEEE80211_TKIP_P2_KEY, tx_cmd->key);
+ IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
+ break;
+
+ case ALG_WEP:
+ tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
+ (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
+
+ if (keyconf->keylen == WEP_KEY_LEN_128)
+ tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+
+ memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
+
+ IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
+ "with key %d\n", keyconf->keyidx);
+ break;
+
+ default:
+ IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg);
+ break;
+ }
+}
+
+/*
+ * start REPLY_TX command process
+ */
+int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = info->control.sta;
+ struct iwl_station_priv *sta_priv = NULL;
+ struct iwl_tx_queue *txq;
+ struct iwl_queue *q;
+ struct iwl_device_cmd *out_cmd;
+ struct iwl_cmd_meta *out_meta;
+ struct iwl_tx_cmd *tx_cmd;
+ int swq_id, txq_id;
+ dma_addr_t phys_addr;
+ dma_addr_t txcmd_phys;
+ dma_addr_t scratch_phys;
+ u16 len, len_org, firstlen, secondlen;
+ u16 seq_number = 0;
+ __le16 fc;
+ u8 hdr_len;
+ u8 sta_id;
+ u8 wait_write_ptr = 0;
+ u8 tid = 0;
+ u8 *qc = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (iwl_is_rfkill(priv)) {
+ IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
+ goto drop_unlock;
+ }
+
+ fc = hdr->frame_control;
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (ieee80211_is_auth(fc))
+ IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
+ else if (ieee80211_is_assoc_req(fc))
+ IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
+ else if (ieee80211_is_reassoc_req(fc))
+ IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
+#endif
+
+ hdr_len = ieee80211_hdrlen(fc);
+
+ /* Find index into station table for destination station */
+ if (!info->control.sta)
+ sta_id = priv->hw_params.bcast_sta_id;
+ else
+ sta_id = iwl_sta_id(info->control.sta);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
+ hdr->addr1);
+ goto drop_unlock;
+ }
+
+ IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
+
+ if (sta)
+ sta_priv = (void *)sta->drv_priv;
+
+ if (sta_priv && sta_id != priv->hw_params.bcast_sta_id &&
+ sta_priv->asleep) {
+ WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE));
+ /*
+ * This sends an asynchronous command to the device,
+ * but we can rely on it being processed before the
+ * next frame is processed -- and the next frame to
+ * this station is the one that will consume this
+ * counter.
+ * For now set the counter to just 1 since we do not
+ * support uAPSD yet.
+ */
+ iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
+ }
+
+ txq_id = get_queue_from_ac(skb_get_queue_mapping(skb));
+ if (ieee80211_is_data_qos(fc)) {
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ if (unlikely(tid >= MAX_TID_COUNT))
+ goto drop_unlock;
+ seq_number = priv->stations[sta_id].tid[tid].seq_number;
+ seq_number &= IEEE80211_SCTL_SEQ;
+ hdr->seq_ctrl = hdr->seq_ctrl &
+ cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(seq_number);
+ seq_number += 0x10;
+ /* aggregation is on for this <sta,tid> */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+ priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
+ txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
+ }
+ }
+
+ txq = &priv->txq[txq_id];
+ swq_id = txq->swq_id;
+ q = &txq->q;
+
+ if (unlikely(iwl_queue_space(q) < q->high_mark))
+ goto drop_unlock;
+
+ if (ieee80211_is_data_qos(fc))
+ priv->stations[sta_id].tid[tid].tfds_in_queue++;
+
+ /* Set up driver data for this TFD */
+ memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
+ txq->txb[q->write_ptr].skb[0] = skb;
+
+ /* Set up first empty entry in queue's array of Tx/cmd buffers */
+ out_cmd = txq->cmd[q->write_ptr];
+ out_meta = &txq->meta[q->write_ptr];
+ tx_cmd = &out_cmd->cmd.tx;
+ memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
+ memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
+
+ /*
+ * Set up the Tx-command (not MAC!) header.
+ * Store the chosen Tx queue and TFD index within the sequence field;
+ * after Tx, uCode's Tx response will return this value so driver can
+ * locate the frame within the tx queue and do post-tx processing.
+ */
+ out_cmd->hdr.cmd = REPLY_TX;
+ out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+ INDEX_TO_SEQ(q->write_ptr)));
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+
+ /* Total # bytes to be transmitted */
+ len = (u16)skb->len;
+ tx_cmd->len = cpu_to_le16(len);
+
+ if (info->control.hw_key)
+ iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
+
+ /* TODO need this for burst mode later on */
+ iwlagn_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
+ iwl_dbg_log_tx_data_frame(priv, len, hdr);
+
+ iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc);
+
+ iwl_update_stats(priv, true, fc, len);
+ /*
+ * Use the first empty entry in this queue's command buffer array
+ * to contain the Tx command and MAC header concatenated together
+ * (payload data will be in another buffer).
+ * Size of this varies, due to varying MAC header length.
+ * If end is not dword aligned, we'll have 2 extra bytes at the end
+ * of the MAC header (device reads on dword boundaries).
+ * We'll tell device about this padding later.
+ */
+ len = sizeof(struct iwl_tx_cmd) +
+ sizeof(struct iwl_cmd_header) + hdr_len;
+
+ len_org = len;
+ firstlen = len = (len + 3) & ~3;
+
+ if (len_org != len)
+ len_org = 1;
+ else
+ len_org = 0;
+
+ /* Tell NIC about any 2-byte padding after MAC header */
+ if (len_org)
+ tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+
+ /* Physical address of this Tx command's header (not MAC header!),
+ * within command buffer array. */
+ txcmd_phys = pci_map_single(priv->pci_dev,
+ &out_cmd->hdr, len,
+ PCI_DMA_BIDIRECTIONAL);
+ pci_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ pci_unmap_len_set(out_meta, len, len);
+ /* Add buffer containing Tx command and MAC(!) header to TFD's
+ * first entry */
+ priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+ txcmd_phys, len, 1, 0);
+
+ if (!ieee80211_has_morefrags(hdr->frame_control)) {
+ txq->need_update = 1;
+ if (qc)
+ priv->stations[sta_id].tid[tid].seq_number = seq_number;
+ } else {
+ wait_write_ptr = 1;
+ txq->need_update = 0;
+ }
+
+ /* Set up TFD's 2nd entry to point directly to remainder of skb,
+ * if any (802.11 null frames have no payload). */
+ secondlen = len = skb->len - hdr_len;
+ if (len) {
+ phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
+ len, PCI_DMA_TODEVICE);
+ priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+ phys_addr, len,
+ 0, 0);
+ }
+
+ scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
+ offsetof(struct iwl_tx_cmd, scratch);
+
+ len = sizeof(struct iwl_tx_cmd) +
+ sizeof(struct iwl_cmd_header) + hdr_len;
+ /* take back ownership of DMA buffer to enable update */
+ pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
+ len, PCI_DMA_BIDIRECTIONAL);
+ tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
+ tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
+
+ IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
+ le16_to_cpu(out_cmd->hdr.sequence));
+ IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+ iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
+ iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
+
+ /* Set up entry for this TFD in Tx byte-count array */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
+ le16_to_cpu(tx_cmd->len));
+
+ pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
+ len, PCI_DMA_BIDIRECTIONAL);
+
+ trace_iwlwifi_dev_tx(priv,
+ &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
+ sizeof(struct iwl_tfd),
+ &out_cmd->hdr, firstlen,
+ skb->data + hdr_len, secondlen);
+
+ /* Tell device the write index *just past* this latest filled TFD */
+ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
+ iwl_txq_update_write_ptr(priv, txq);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /*
+ * At this point the frame is "transmitted" successfully
+ * and we will get a TX status notification eventually,
+ * regardless of the value of ret. "ret" only indicates
+ * whether or not we should update the write pointer.
+ */
+
+ /* avoid atomic ops if it isn't an associated client */
+ if (sta_priv && sta_priv->client)
+ atomic_inc(&sta_priv->pending_frames);
+
+ if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
+ if (wait_write_ptr) {
+ spin_lock_irqsave(&priv->lock, flags);
+ txq->need_update = 1;
+ iwl_txq_update_write_ptr(priv, txq);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ } else {
+ iwl_stop_queue(priv, txq->swq_id);
+ }
+ }
+
+ return 0;
+
+drop_unlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return -1;
+}
+
+static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv,
+ struct iwl_dma_ptr *ptr, size_t size)
+{
+ ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
+ GFP_KERNEL);
+ if (!ptr->addr)
+ return -ENOMEM;
+ ptr->size = size;
+ return 0;
+}
+
+static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv,
+ struct iwl_dma_ptr *ptr)
+{
+ if (unlikely(!ptr->addr))
+ return;
+
+ dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
+ memset(ptr, 0, sizeof(*ptr));
+}
+
+/**
+ * iwlagn_hw_txq_ctx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv)
+{
+ int txq_id;
+
+ /* Tx queues */
+ if (priv->txq) {
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
+ if (txq_id == IWL_CMD_QUEUE_NUM)
+ iwl_cmd_queue_free(priv);
+ else
+ iwl_tx_queue_free(priv, txq_id);
+ }
+ iwlagn_free_dma_ptr(priv, &priv->kw);
+
+ iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
+
+ /* free tx queue structure */
+ iwl_free_txq_mem(priv);
+}
+
+/**
+ * iwlagn_txq_ctx_alloc - allocate TX queue context
+ * Allocate all Tx DMA structures and initialize them
+ *
+ * @param priv
+ * @return error code
+ */
+int iwlagn_txq_ctx_alloc(struct iwl_priv *priv)
+{
+ int ret;
+ int txq_id, slots_num;
+ unsigned long flags;
+
+ /* Free all tx/cmd queues and keep-warm buffer */
+ iwlagn_hw_txq_ctx_free(priv);
+
+ ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
+ priv->hw_params.scd_bc_tbls_size);
+ if (ret) {
+ IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
+ goto error_bc_tbls;
+ }
+ /* Alloc keep-warm buffer */
+ ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
+ if (ret) {
+ IWL_ERR(priv, "Keep Warm allocation failed\n");
+ goto error_kw;
+ }
+
+ /* allocate tx queue structure */
+ ret = iwl_alloc_txq_mem(priv);
+ if (ret)
+ goto error;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Turn off all Tx DMA fifos */
+ priv->cfg->ops->lib->txq_set_sched(priv, 0);
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Alloc and init all Tx queues, including the command queue (#4) */
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
+ slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
+ txq_id);
+ if (ret) {
+ IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+ }
+
+ return ret;
+
+ error:
+ iwlagn_hw_txq_ctx_free(priv);
+ iwlagn_free_dma_ptr(priv, &priv->kw);
+ error_kw:
+ iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
+ error_bc_tbls:
+ return ret;
+}
+
+void iwlagn_txq_ctx_reset(struct iwl_priv *priv)
+{
+ int txq_id, slots_num;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Turn off all Tx DMA fifos */
+ priv->cfg->ops->lib->txq_set_sched(priv, 0);
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Alloc and init all Tx queues, including the command queue (#4) */
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
+ slots_num = txq_id == IWL_CMD_QUEUE_NUM ?
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id);
+ }
+}
+
+/**
+ * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
+ */
+void iwlagn_txq_ctx_stop(struct iwl_priv *priv)
+{
+ int ch;
+ unsigned long flags;
+
+ /* Turn off all Tx DMA fifos */
+ spin_lock_irqsave(&priv->lock, flags);
+
+ priv->cfg->ops->lib->txq_set_sched(priv, 0);
+
+ /* Stop each Tx DMA channel, and wait for it to be idle */
+ for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
+ iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+ iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
+ FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
+ 1000);
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+/*
+ * Find first available (lowest unused) Tx Queue, mark it "active".
+ * Called only when finding queue for aggregation.
+ * Should never return anything < 7, because they should already
+ * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
+ */
+static int iwlagn_txq_ctx_activate_free(struct iwl_priv *priv)
+{
+ int txq_id;
+
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
+ if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
+ return txq_id;
+ return -1;
+}
+
+int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+{
+ int sta_id;
+ int tx_fifo;
+ int txq_id;
+ int ret;
+ unsigned long flags;
+ struct iwl_tid_data *tid_data;
+
+ tx_fifo = get_fifo_from_tid(tid);
+ if (unlikely(tx_fifo < 0))
+ return tx_fifo;
+
+ IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
+ __func__, sta->addr, tid);
+
+ sta_id = iwl_sta_id(sta);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Start AGG on invalid station\n");
+ return -ENXIO;
+ }
+ if (unlikely(tid >= MAX_TID_COUNT))
+ return -EINVAL;
+
+ if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
+ IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
+ return -ENXIO;
+ }
+
+ txq_id = iwlagn_txq_ctx_activate_free(priv);
+ if (txq_id == -1) {
+ IWL_ERR(priv, "No free aggregation queue available\n");
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ tid_data = &priv->stations[sta_id].tid[tid];
+ *ssn = SEQ_TO_SN(tid_data->seq_number);
+ tid_data->agg.txq_id = txq_id;
+ priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(get_ac_from_tid(tid), txq_id);
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
+ sta_id, tid, *ssn);
+ if (ret)
+ return ret;
+
+ if (tid_data->tfds_in_queue == 0) {
+ IWL_DEBUG_HT(priv, "HW queue is empty\n");
+ tid_data->agg.state = IWL_AGG_ON;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ } else {
+ IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
+ tid_data->tfds_in_queue);
+ tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
+ }
+ return ret;
+}
+
+int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid)
+{
+ int tx_fifo_id, txq_id, sta_id, ssn = -1;
+ struct iwl_tid_data *tid_data;
+ int write_ptr, read_ptr;
+ unsigned long flags;
+
+ tx_fifo_id = get_fifo_from_tid(tid);
+ if (unlikely(tx_fifo_id < 0))
+ return tx_fifo_id;
+
+ sta_id = iwl_sta_id(sta);
+
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
+ return -ENXIO;
+ }
+
+ if (priv->stations[sta_id].tid[tid].agg.state ==
+ IWL_EMPTYING_HW_QUEUE_ADDBA) {
+ IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
+ return 0;
+ }
+
+ if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
+ IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
+
+ tid_data = &priv->stations[sta_id].tid[tid];
+ ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
+ txq_id = tid_data->agg.txq_id;
+ write_ptr = priv->txq[txq_id].q.write_ptr;
+ read_ptr = priv->txq[txq_id].q.read_ptr;
+
+ /* The queue is not empty */
+ if (write_ptr != read_ptr) {
+ IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
+ priv->stations[sta_id].tid[tid].agg.state =
+ IWL_EMPTYING_HW_QUEUE_DELBA;
+ return 0;
+ }
+
+ IWL_DEBUG_HT(priv, "HW queue is empty\n");
+ priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ /*
+ * the only reason this call can fail is queue number out of range,
+ * which can happen if uCode is reloaded and all the station
+ * information are lost. if it is outside the range, there is no need
+ * to deactivate the uCode queue, just return "success" to allow
+ * mac80211 to clean up it own data.
+ */
+ priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
+ tx_fifo_id);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+
+ return 0;
+}
+
+int iwlagn_txq_check_empty(struct iwl_priv *priv,
+ int sta_id, u8 tid, int txq_id)
+{
+ struct iwl_queue *q = &priv->txq[txq_id].q;
+ u8 *addr = priv->stations[sta_id].sta.sta.addr;
+ struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
+
+ switch (priv->stations[sta_id].tid[tid].agg.state) {
+ case IWL_EMPTYING_HW_QUEUE_DELBA:
+ /* We are reclaiming the last packet of the */
+ /* aggregated HW queue */
+ if ((txq_id == tid_data->agg.txq_id) &&
+ (q->read_ptr == q->write_ptr)) {
+ u16 ssn = SEQ_TO_SN(tid_data->seq_number);
+ int tx_fifo = get_fifo_from_tid(tid);
+ IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
+ priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
+ ssn, tx_fifo);
+ tid_data->agg.state = IWL_AGG_OFF;
+ ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid);
+ }
+ break;
+ case IWL_EMPTYING_HW_QUEUE_ADDBA:
+ /* We are reclaiming the last packet of the queue */
+ if (tid_data->tfds_in_queue == 0) {
+ IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
+ tid_data->agg.state = IWL_AGG_ON;
+ ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid);
+ }
+ break;
+ }
+ return 0;
+}
+
+static void iwlagn_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_sta *sta;
+ struct iwl_station_priv *sta_priv;
+
+ sta = ieee80211_find_sta(priv->vif, hdr->addr1);
+ if (sta) {
+ sta_priv = (void *)sta->drv_priv;
+ /* avoid atomic ops if this isn't a client */
+ if (sta_priv->client &&
+ atomic_dec_return(&sta_priv->pending_frames) == 0)
+ ieee80211_sta_block_awake(priv->hw, sta, false);
+ }
+
+ ieee80211_tx_status_irqsafe(priv->hw, skb);
+}
+
+int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
+{
+ struct iwl_tx_queue *txq = &priv->txq[txq_id];
+ struct iwl_queue *q = &txq->q;
+ struct iwl_tx_info *tx_info;
+ int nfreed = 0;
+ struct ieee80211_hdr *hdr;
+
+ if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
+ IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
+ "is out of range [0-%d] %d %d.\n", txq_id,
+ index, q->n_bd, q->write_ptr, q->read_ptr);
+ return 0;
+ }
+
+ for (index = iwl_queue_inc_wrap(index, q->n_bd);
+ q->read_ptr != index;
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+ tx_info = &txq->txb[txq->q.read_ptr];
+ iwlagn_tx_status(priv, tx_info->skb[0]);
+
+ hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data;
+ if (hdr && ieee80211_is_data_qos(hdr->frame_control))
+ nfreed++;
+ tx_info->skb[0] = NULL;
+
+ if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
+ priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
+
+ priv->cfg->ops->lib->txq_free_tfd(priv, txq);
+ }
+ return nfreed;
+}
+
+/**
+ * iwlagn_tx_status_reply_compressed_ba - Update tx status from block-ack
+ *
+ * Go through block-ack's bitmap of ACK'd frames, update driver's record of
+ * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
+ */
+static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
+ struct iwl_ht_agg *agg,
+ struct iwl_compressed_ba_resp *ba_resp)
+
+{
+ int i, sh, ack;
+ u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
+ u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+ u64 bitmap;
+ int successes = 0;
+ struct ieee80211_tx_info *info;
+
+ if (unlikely(!agg->wait_for_ba)) {
+ IWL_ERR(priv, "Received BA when not expected\n");
+ return -EINVAL;
+ }
+
+ /* Mark that the expected block-ack response arrived */
+ agg->wait_for_ba = 0;
+ IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
+
+ /* Calculate shift to align block-ack bits with our Tx window bits */
+ sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
+ if (sh < 0) /* tbw something is wrong with indices */
+ sh += 0x100;
+
+ /* don't use 64-bit values for now */
+ bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
+
+ if (agg->frame_count > (64 - sh)) {
+ IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
+ return -1;
+ }
+
+ /* check for success or failure according to the
+ * transmitted bitmap and block-ack bitmap */
+ bitmap &= agg->bitmap;
+
+ /* For each frame attempted in aggregation,
+ * update driver's record of tx frame's status. */
+ for (i = 0; i < agg->frame_count ; i++) {
+ ack = bitmap & (1ULL << i);
+ successes += !!ack;
+ IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
+ ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
+ agg->start_idx + i);
+ }
+
+ info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
+ memset(&info->status, 0, sizeof(info->status));
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->flags |= IEEE80211_TX_STAT_AMPDU;
+ info->status.ampdu_ack_len = successes;
+ info->status.ampdu_ack_map = bitmap;
+ info->status.ampdu_len = agg->frame_count;
+ iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
+
+ IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap);
+
+ return 0;
+}
+
+/**
+ * translate ucode response to mac80211 tx status control values
+ */
+void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
+ struct ieee80211_tx_info *info)
+{
+ struct ieee80211_tx_rate *r = &info->control.rates[0];
+
+ info->antenna_sel_tx =
+ ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
+ if (rate_n_flags & RATE_MCS_HT_MSK)
+ r->flags |= IEEE80211_TX_RC_MCS;
+ if (rate_n_flags & RATE_MCS_GF_MSK)
+ r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ if (rate_n_flags & RATE_MCS_HT40_MSK)
+ r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ if (rate_n_flags & RATE_MCS_DUP_MSK)
+ r->flags |= IEEE80211_TX_RC_DUP_DATA;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ r->flags |= IEEE80211_TX_RC_SHORT_GI;
+ r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band);
+}
+
+/**
+ * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
+ *
+ * Handles block-acknowledge notification from device, which reports success
+ * of frames sent via aggregation.
+ */
+void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
+ struct iwl_tx_queue *txq = NULL;
+ struct iwl_ht_agg *agg;
+ int index;
+ int sta_id;
+ int tid;
+
+ /* "flow" corresponds to Tx queue */
+ u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+
+ /* "ssn" is start of block-ack Tx window, corresponds to index
+ * (in Tx queue's circular buffer) of first TFD/frame in window */
+ u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
+
+ if (scd_flow >= priv->hw_params.max_txq_num) {
+ IWL_ERR(priv,
+ "BUG_ON scd_flow is bigger than number of queues\n");
+ return;
+ }
+
+ txq = &priv->txq[scd_flow];
+ sta_id = ba_resp->sta_id;
+ tid = ba_resp->tid;
+ agg = &priv->stations[sta_id].tid[tid].agg;
+
+ /* Find index just before block-ack window */
+ index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
+
+ /* TODO: Need to get this copy more safely - now good for debug */
+
+ IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
+ "sta_id = %d\n",
+ agg->wait_for_ba,
+ (u8 *) &ba_resp->sta_addr_lo32,
+ ba_resp->sta_id);
+ IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
+ "%d, scd_ssn = %d\n",
+ ba_resp->tid,
+ ba_resp->seq_ctl,
+ (unsigned long long)le64_to_cpu(ba_resp->bitmap),
+ ba_resp->scd_flow,
+ ba_resp->scd_ssn);
+ IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
+ agg->start_idx,
+ (unsigned long long)agg->bitmap);
+
+ /* Update driver's record of ACK vs. not for each frame in window */
+ iwlagn_tx_status_reply_compressed_ba(priv, agg, ba_resp);
+
+ /* Release all TFDs before the SSN, i.e. all TFDs in front of
+ * block-ack window (we assume that they've been successfully
+ * transmitted ... if not, it's too late anyway). */
+ if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
+ /* calculate mac80211 ampdu sw queue to wake */
+ int freed = iwlagn_tx_queue_reclaim(priv, scd_flow, index);
+ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
+
+ if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
+ priv->mac80211_registered &&
+ (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
+ iwl_wake_queue(priv, txq->swq_id);
+
+ iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow);
+ }
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
new file mode 100644
index 00000000000..637286c396f
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -0,0 +1,425 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-agn-hw.h"
+#include "iwl-agn.h"
+
+static const s8 iwlagn_default_queue_to_tx_fifo[] = {
+ IWL_TX_FIFO_VO,
+ IWL_TX_FIFO_VI,
+ IWL_TX_FIFO_BE,
+ IWL_TX_FIFO_BK,
+ IWLAGN_CMD_FIFO_NUM,
+ IWL_TX_FIFO_UNUSED,
+ IWL_TX_FIFO_UNUSED,
+ IWL_TX_FIFO_UNUSED,
+ IWL_TX_FIFO_UNUSED,
+ IWL_TX_FIFO_UNUSED,
+};
+
+static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
+ {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
+ 0, COEX_UNASSOC_IDLE_FLAGS},
+ {COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
+ 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
+ {COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
+ 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
+ {COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
+ 0, COEX_CALIBRATION_FLAGS},
+ {COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
+ 0, COEX_PERIODIC_CALIBRATION_FLAGS},
+ {COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
+ 0, COEX_CONNECTION_ESTAB_FLAGS},
+ {COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
+ 0, COEX_ASSOCIATED_IDLE_FLAGS},
+ {COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
+ 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
+ {COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
+ 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
+ {COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
+ 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
+ {COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
+ {COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
+ {COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
+ 0, COEX_STAND_ALONE_DEBUG_FLAGS},
+ {COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
+ 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
+ {COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
+ {COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
+};
+
+/*
+ * ucode
+ */
+static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
+ struct fw_desc *image, u32 dst_addr)
+{
+ dma_addr_t phy_addr = image->p_addr;
+ u32 byte_cnt = image->len;
+ int ret;
+
+ priv->ucode_write_complete = 0;
+
+ iwl_write_direct32(priv,
+ FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
+
+ iwl_write_direct32(priv,
+ FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
+
+ iwl_write_direct32(priv,
+ FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
+ phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
+
+ iwl_write_direct32(priv,
+ FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
+ (iwl_get_dma_hi_addr(phy_addr)
+ << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
+
+ iwl_write_direct32(priv,
+ FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
+ 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
+ 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
+ FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
+
+ iwl_write_direct32(priv,
+ FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
+
+ IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name);
+ ret = wait_event_interruptible_timeout(priv->wait_command_queue,
+ priv->ucode_write_complete, 5 * HZ);
+ if (ret == -ERESTARTSYS) {
+ IWL_ERR(priv, "Could not load the %s uCode section due "
+ "to interrupt\n", name);
+ return ret;
+ }
+ if (!ret) {
+ IWL_ERR(priv, "Could not load the %s uCode section\n",
+ name);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int iwlagn_load_given_ucode(struct iwl_priv *priv,
+ struct fw_desc *inst_image,
+ struct fw_desc *data_image)
+{
+ int ret = 0;
+
+ ret = iwlagn_load_section(priv, "INST", inst_image,
+ IWLAGN_RTC_INST_LOWER_BOUND);
+ if (ret)
+ return ret;
+
+ return iwlagn_load_section(priv, "DATA", data_image,
+ IWLAGN_RTC_DATA_LOWER_BOUND);
+}
+
+int iwlagn_load_ucode(struct iwl_priv *priv)
+{
+ int ret = 0;
+
+ /* check whether init ucode should be loaded, or rather runtime ucode */
+ if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) {
+ IWL_DEBUG_INFO(priv, "Init ucode found. Loading init ucode...\n");
+ ret = iwlagn_load_given_ucode(priv,
+ &priv->ucode_init, &priv->ucode_init_data);
+ if (!ret) {
+ IWL_DEBUG_INFO(priv, "Init ucode load complete.\n");
+ priv->ucode_type = UCODE_INIT;
+ }
+ } else {
+ IWL_DEBUG_INFO(priv, "Init ucode not found, or already loaded. "
+ "Loading runtime ucode...\n");
+ ret = iwlagn_load_given_ucode(priv,
+ &priv->ucode_code, &priv->ucode_data);
+ if (!ret) {
+ IWL_DEBUG_INFO(priv, "Runtime ucode load complete.\n");
+ priv->ucode_type = UCODE_RT;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Calibration
+ */
+static int iwlagn_set_Xtal_calib(struct iwl_priv *priv)
+{
+ struct iwl_calib_xtal_freq_cmd cmd;
+ __le16 *xtal_calib =
+ (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL);
+
+ cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
+ cmd.hdr.first_group = 0;
+ cmd.hdr.groups_num = 1;
+ cmd.hdr.data_valid = 1;
+ cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
+ cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
+ return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
+ (u8 *)&cmd, sizeof(cmd));
+}
+
+static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
+{
+ struct iwl_calib_cfg_cmd calib_cfg_cmd;
+ struct iwl_host_cmd cmd = {
+ .id = CALIBRATION_CFG_CMD,
+ .len = sizeof(struct iwl_calib_cfg_cmd),
+ .data = &calib_cfg_cmd,
+ };
+
+ memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
+ calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
+ calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
+ calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
+ calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL;
+
+ return iwl_send_cmd(priv, &cmd);
+}
+
+void iwlagn_rx_calib_result(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
+ int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ int index;
+
+ /* reduce the size of the length field itself */
+ len -= 4;
+
+ /* Define the order in which the results will be sent to the runtime
+ * uCode. iwl_send_calib_results sends them in a row according to
+ * their index. We sort them here
+ */
+ switch (hdr->op_code) {
+ case IWL_PHY_CALIBRATE_DC_CMD:
+ index = IWL_CALIB_DC;
+ break;
+ case IWL_PHY_CALIBRATE_LO_CMD:
+ index = IWL_CALIB_LO;
+ break;
+ case IWL_PHY_CALIBRATE_TX_IQ_CMD:
+ index = IWL_CALIB_TX_IQ;
+ break;
+ case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
+ index = IWL_CALIB_TX_IQ_PERD;
+ break;
+ case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
+ index = IWL_CALIB_BASE_BAND;
+ break;
+ default:
+ IWL_ERR(priv, "Unknown calibration notification %d\n",
+ hdr->op_code);
+ return;
+ }
+ iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
+}
+
+void iwlagn_rx_calib_complete(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ IWL_DEBUG_INFO(priv, "Init. calibration is completed, restarting fw.\n");
+ queue_work(priv->workqueue, &priv->restart);
+}
+
+void iwlagn_init_alive_start(struct iwl_priv *priv)
+{
+ int ret = 0;
+
+ /* Check alive response for "valid" sign from uCode */
+ if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
+ /* We had an error bringing up the hardware, so take it
+ * all the way back down so we can try again */
+ IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
+ goto restart;
+ }
+
+ /* initialize uCode was loaded... verify inst image.
+ * This is a paranoid check, because we would not have gotten the
+ * "initialize" alive if code weren't properly loaded. */
+ if (iwl_verify_ucode(priv)) {
+ /* Runtime instruction load was bad;
+ * take it all the way back down so we can try again */
+ IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
+ goto restart;
+ }
+
+ ret = priv->cfg->ops->lib->alive_notify(priv);
+ if (ret) {
+ IWL_WARN(priv,
+ "Could not complete ALIVE transition: %d\n", ret);
+ goto restart;
+ }
+
+ iwlagn_send_calib_cfg(priv);
+ return;
+
+restart:
+ /* real restart (first load init_ucode) */
+ queue_work(priv->workqueue, &priv->restart);
+}
+
+static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
+{
+ struct iwl_wimax_coex_cmd coex_cmd;
+
+ if (priv->cfg->support_wimax_coexist) {
+ /* UnMask wake up src at associated sleep */
+ coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
+
+ /* UnMask wake up src at unassociated sleep */
+ coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
+ memcpy(coex_cmd.sta_prio, cu_priorities,
+ sizeof(struct iwl_wimax_coex_event_entry) *
+ COEX_NUM_OF_EVENTS);
+
+ /* enabling the coexistence feature */
+ coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
+
+ /* enabling the priorities tables */
+ coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
+ } else {
+ /* coexistence is disabled */
+ memset(&coex_cmd, 0, sizeof(coex_cmd));
+ }
+ return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD,
+ sizeof(coex_cmd), &coex_cmd);
+}
+
+int iwlagn_alive_notify(struct iwl_priv *priv)
+{
+ u32 a;
+ unsigned long flags;
+ int i, chan;
+ u32 reg_val;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ priv->scd_base_addr = iwl_read_prph(priv, IWLAGN_SCD_SRAM_BASE_ADDR);
+ a = priv->scd_base_addr + IWLAGN_SCD_CONTEXT_DATA_OFFSET;
+ for (; a < priv->scd_base_addr + IWLAGN_SCD_TX_STTS_BITMAP_OFFSET;
+ a += 4)
+ iwl_write_targ_mem(priv, a, 0);
+ for (; a < priv->scd_base_addr + IWLAGN_SCD_TRANSLATE_TBL_OFFSET;
+ a += 4)
+ iwl_write_targ_mem(priv, a, 0);
+ for (; a < priv->scd_base_addr +
+ IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
+ iwl_write_targ_mem(priv, a, 0);
+
+ iwl_write_prph(priv, IWLAGN_SCD_DRAM_BASE_ADDR,
+ priv->scd_bc_tbls.dma >> 10);
+
+ /* Enable DMA channel */
+ for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++)
+ iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
+
+ /* Update FH chicken bits */
+ reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
+ iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
+ reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
+
+ iwl_write_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL,
+ IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv->hw_params.max_txq_num));
+ iwl_write_prph(priv, IWLAGN_SCD_AGGR_SEL, 0);
+
+ /* initiate the queues */
+ for (i = 0; i < priv->hw_params.max_txq_num; i++) {
+ iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(i), 0);
+ iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
+ iwl_write_targ_mem(priv, priv->scd_base_addr +
+ IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
+ iwl_write_targ_mem(priv, priv->scd_base_addr +
+ IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i) +
+ sizeof(u32),
+ ((SCD_WIN_SIZE <<
+ IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
+ IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
+ ((SCD_FRAME_LIMIT <<
+ IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+ IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
+ }
+
+ iwl_write_prph(priv, IWLAGN_SCD_INTERRUPT_MASK,
+ IWL_MASK(0, priv->hw_params.max_txq_num));
+
+ /* Activate all Tx DMA/FIFO channels */
+ priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
+
+ iwlagn_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
+
+ /* make sure all queue are not stopped */
+ memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
+ for (i = 0; i < 4; i++)
+ atomic_set(&priv->queue_stop_count[i], 0);
+
+ /* reset to 0 to enable all the queue first */
+ priv->txq_ctx_active_msk = 0;
+ /* map qos queues to fifos one-to-one */
+ BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
+
+ for (i = 0; i < ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo); i++) {
+ int ac = iwlagn_default_queue_to_tx_fifo[i];
+
+ iwl_txq_ctx_activate(priv, i);
+
+ if (ac == IWL_TX_FIFO_UNUSED)
+ continue;
+
+ iwlagn_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ iwlagn_send_wimax_coex(priv);
+
+ iwlagn_set_Xtal_calib(priv);
+ iwl_send_calib_results(priv);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index bdff56583e1..aef4f71f198 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -55,6 +55,7 @@
#include "iwl-helpers.h"
#include "iwl-sta.h"
#include "iwl-calib.h"
+#include "iwl-agn.h"
/******************************************************************************
@@ -83,13 +84,6 @@ MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
MODULE_LICENSE("GPL");
MODULE_ALIAS("iwl4965");
-/*************** STATION TABLE MANAGEMENT ****
- * mac80211 should be examined to determine if sta_info is duplicating
- * the functionality provided here
- */
-
-/**************************************************************/
-
/**
* iwl_commit_rxon - commit staging_rxon to hardware
*
@@ -144,9 +138,6 @@ int iwl_commit_rxon(struct iwl_priv *priv)
return 0;
}
- /* station table will be cleared */
- priv->assoc_station_added = 0;
-
/* If we are currently associated and the new config requires
* an RXON_ASSOC and the new config wants the associated mask enabled,
* we must clear the associated from the active configuration
@@ -166,6 +157,13 @@ int iwl_commit_rxon(struct iwl_priv *priv)
IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
return ret;
}
+ iwl_clear_ucode_stations(priv);
+ iwl_restore_stations(priv);
+ ret = iwl_restore_default_wep_keys(priv);
+ if (ret) {
+ IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
+ return ret;
+ }
}
IWL_DEBUG_INFO(priv, "Sending RXON\n"
@@ -179,9 +177,8 @@ int iwl_commit_rxon(struct iwl_priv *priv)
iwl_set_rxon_hwcrypto(priv, !priv->cfg->mod_params->sw_crypto);
/* Apply the new configuration
- * RXON unassoc clears the station table in uCode, send it before
- * we add the bcast station. If assoc bit is set, we will send RXON
- * after having added the bcast and bssid station.
+ * RXON unassoc clears the station table in uCode so restoration of
+ * stations is needed after it (the RXON command) completes
*/
if (!new_assoc) {
ret = iwl_send_cmd_pdu(priv, REPLY_RXON,
@@ -190,35 +187,19 @@ int iwl_commit_rxon(struct iwl_priv *priv)
IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
return ret;
}
+ IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
+ iwl_clear_ucode_stations(priv);
+ iwl_restore_stations(priv);
+ ret = iwl_restore_default_wep_keys(priv);
+ if (ret) {
+ IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
+ return ret;
+ }
}
- iwl_clear_stations_table(priv);
-
priv->start_calib = 0;
-
- /* Add the broadcast address so we can send broadcast frames */
- priv->cfg->ops->lib->add_bcast_station(priv);
-
-
- /* If we have set the ASSOC_MSK and we are in BSS mode then
- * add the IWL_AP_ID to the station rate table */
if (new_assoc) {
- if (priv->iw_mode == NL80211_IFTYPE_STATION) {
- ret = iwl_rxon_add_station(priv,
- priv->active_rxon.bssid_addr, 1);
- if (ret == IWL_INVALID_STATION) {
- IWL_ERR(priv,
- "Error adding AP address for TX.\n");
- return -EIO;
- }
- priv->assoc_station_added = 1;
- if (priv->default_wep_key &&
- iwl_send_static_wepkey_cmd(priv, 0))
- IWL_ERR(priv,
- "Could not send WEP static key.\n");
- }
-
/*
* allow CTS-to-self if possible for new association.
* this is relevant only for 5000 series and up,
@@ -907,10 +888,10 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
iwl_rx_missed_beacon_notif;
/* Rx handlers */
- priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy;
- priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx;
+ priv->rx_handlers[REPLY_RX_PHY_CMD] = iwlagn_rx_reply_rx_phy;
+ priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwlagn_rx_reply_rx;
/* block ack */
- priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl_rx_reply_compressed_ba;
+ priv->rx_handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba;
/* Set up hardware specific Rx handlers */
priv->cfg->ops->lib->rx_handler_setup(priv);
}
@@ -1038,7 +1019,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
count++;
if (count >= 8) {
rxq->read = i;
- iwl_rx_replenish_now(priv);
+ iwlagn_rx_replenish_now(priv);
count = 0;
}
}
@@ -1047,9 +1028,9 @@ void iwl_rx_handle(struct iwl_priv *priv)
/* Backtrack one entry */
rxq->read = i;
if (fill_rx)
- iwl_rx_replenish_now(priv);
+ iwlagn_rx_replenish_now(priv);
else
- iwl_rx_queue_restock(priv);
+ iwlagn_rx_queue_restock(priv);
}
/* call this function to flush any scheduled tasklet */
@@ -1267,9 +1248,9 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
* hardware bugs here by ACKing all the possible interrupts so that
* interrupt coalescing can still be achieved.
*/
- iwl_write32(priv, CSR_INT, priv->inta | ~priv->inta_mask);
+ iwl_write32(priv, CSR_INT, priv->_agn.inta | ~priv->inta_mask);
- inta = priv->inta;
+ inta = priv->_agn.inta;
#ifdef CONFIG_IWLWIFI_DEBUG
if (iwl_get_debug_level(priv) & IWL_DL_ISR) {
@@ -1282,8 +1263,8 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
spin_unlock_irqrestore(&priv->lock, flags);
- /* saved interrupt in inta variable now we can reset priv->inta */
- priv->inta = 0;
+ /* saved interrupt in inta variable now we can reset priv->_agn.inta */
+ priv->_agn.inta = 0;
/* Now service all interrupt bits discovered above. */
if (inta & CSR_INT_BIT_HW_ERR) {
@@ -1448,6 +1429,60 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
iwl_enable_interrupts(priv);
}
+/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
+#define ACK_CNT_RATIO (50)
+#define BA_TIMEOUT_CNT (5)
+#define BA_TIMEOUT_MAX (16)
+
+/**
+ * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
+ *
+ * When the ACK count ratio is 0 and aggregated BA timeout retries exceeding
+ * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
+ * operation state.
+ */
+bool iwl_good_ack_health(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt)
+{
+ bool rc = true;
+ int actual_ack_cnt_delta, expected_ack_cnt_delta;
+ int ba_timeout_delta;
+
+ actual_ack_cnt_delta =
+ le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) -
+ le32_to_cpu(priv->statistics.tx.actual_ack_cnt);
+ expected_ack_cnt_delta =
+ le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) -
+ le32_to_cpu(priv->statistics.tx.expected_ack_cnt);
+ ba_timeout_delta =
+ le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) -
+ le32_to_cpu(priv->statistics.tx.agg.ba_timeout);
+ if ((priv->_agn.agg_tids_count > 0) &&
+ (expected_ack_cnt_delta > 0) &&
+ (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta)
+ < ACK_CNT_RATIO) &&
+ (ba_timeout_delta > BA_TIMEOUT_CNT)) {
+ IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d,"
+ " expected_ack_cnt = %d\n",
+ actual_ack_cnt_delta, expected_ack_cnt_delta);
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n",
+ priv->delta_statistics.tx.rx_detected_cnt);
+ IWL_DEBUG_RADIO(priv,
+ "ack_or_ba_timeout_collision delta = %d\n",
+ priv->delta_statistics.tx.
+ ack_or_ba_timeout_collision);
+#endif
+ IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n",
+ ba_timeout_delta);
+ if (!actual_ack_cnt_delta &&
+ (ba_timeout_delta >= BA_TIMEOUT_MAX))
+ rc = false;
+ }
+ return rc;
+}
+
/******************************************************************************
*
@@ -1471,9 +1506,13 @@ static void iwl_nic_start(struct iwl_priv *priv)
iwl_write32(priv, CSR_RESET, 0);
}
+struct iwlagn_ucode_capabilities {
+ u32 max_probe_length;
+};
static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
-static int iwl_mac_setup_register(struct iwl_priv *priv);
+static int iwl_mac_setup_register(struct iwl_priv *priv,
+ struct iwlagn_ucode_capabilities *capa);
static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
{
@@ -1500,6 +1539,199 @@ static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
iwl_ucode_callback);
}
+struct iwlagn_firmware_pieces {
+ const void *inst, *data, *init, *init_data, *boot;
+ size_t inst_size, data_size, init_size, init_data_size, boot_size;
+
+ u32 build;
+};
+
+static int iwlagn_load_legacy_firmware(struct iwl_priv *priv,
+ const struct firmware *ucode_raw,
+ struct iwlagn_firmware_pieces *pieces)
+{
+ struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
+ u32 api_ver, hdr_size;
+ const u8 *src;
+
+ priv->ucode_ver = le32_to_cpu(ucode->ver);
+ api_ver = IWL_UCODE_API(priv->ucode_ver);
+
+ switch (api_ver) {
+ default:
+ /*
+ * 4965 doesn't revision the firmware file format
+ * along with the API version, it always uses v1
+ * file format.
+ */
+ if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) !=
+ CSR_HW_REV_TYPE_4965) {
+ hdr_size = 28;
+ if (ucode_raw->size < hdr_size) {
+ IWL_ERR(priv, "File size too small!\n");
+ return -EINVAL;
+ }
+ pieces->build = le32_to_cpu(ucode->u.v2.build);
+ pieces->inst_size = le32_to_cpu(ucode->u.v2.inst_size);
+ pieces->data_size = le32_to_cpu(ucode->u.v2.data_size);
+ pieces->init_size = le32_to_cpu(ucode->u.v2.init_size);
+ pieces->init_data_size = le32_to_cpu(ucode->u.v2.init_data_size);
+ pieces->boot_size = le32_to_cpu(ucode->u.v2.boot_size);
+ src = ucode->u.v2.data;
+ break;
+ }
+ /* fall through for 4965 */
+ case 0:
+ case 1:
+ case 2:
+ hdr_size = 24;
+ if (ucode_raw->size < hdr_size) {
+ IWL_ERR(priv, "File size too small!\n");
+ return -EINVAL;
+ }
+ pieces->build = 0;
+ pieces->inst_size = le32_to_cpu(ucode->u.v1.inst_size);
+ pieces->data_size = le32_to_cpu(ucode->u.v1.data_size);
+ pieces->init_size = le32_to_cpu(ucode->u.v1.init_size);
+ pieces->init_data_size = le32_to_cpu(ucode->u.v1.init_data_size);
+ pieces->boot_size = le32_to_cpu(ucode->u.v1.boot_size);
+ src = ucode->u.v1.data;
+ break;
+ }
+
+ /* Verify size of file vs. image size info in file's header */
+ if (ucode_raw->size != hdr_size + pieces->inst_size +
+ pieces->data_size + pieces->init_size +
+ pieces->init_data_size + pieces->boot_size) {
+
+ IWL_ERR(priv,
+ "uCode file size %d does not match expected size\n",
+ (int)ucode_raw->size);
+ return -EINVAL;
+ }
+
+ pieces->inst = src;
+ src += pieces->inst_size;
+ pieces->data = src;
+ src += pieces->data_size;
+ pieces->init = src;
+ src += pieces->init_size;
+ pieces->init_data = src;
+ src += pieces->init_data_size;
+ pieces->boot = src;
+ src += pieces->boot_size;
+
+ return 0;
+}
+
+static int iwlagn_wanted_ucode_alternative = 1;
+
+static int iwlagn_load_firmware(struct iwl_priv *priv,
+ const struct firmware *ucode_raw,
+ struct iwlagn_firmware_pieces *pieces,
+ struct iwlagn_ucode_capabilities *capa)
+{
+ struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data;
+ struct iwl_ucode_tlv *tlv;
+ size_t len = ucode_raw->size;
+ const u8 *data;
+ int wanted_alternative = iwlagn_wanted_ucode_alternative, tmp;
+ u64 alternatives;
+
+ if (len < sizeof(*ucode))
+ return -EINVAL;
+
+ if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC))
+ return -EINVAL;
+
+ /*
+ * Check which alternatives are present, and "downgrade"
+ * when the chosen alternative is not present, warning
+ * the user when that happens. Some files may not have
+ * any alternatives, so don't warn in that case.
+ */
+ alternatives = le64_to_cpu(ucode->alternatives);
+ tmp = wanted_alternative;
+ if (wanted_alternative > 63)
+ wanted_alternative = 63;
+ while (wanted_alternative && !(alternatives & BIT(wanted_alternative)))
+ wanted_alternative--;
+ if (wanted_alternative && wanted_alternative != tmp)
+ IWL_WARN(priv,
+ "uCode alternative %d not available, choosing %d\n",
+ tmp, wanted_alternative);
+
+ priv->ucode_ver = le32_to_cpu(ucode->ver);
+ pieces->build = le32_to_cpu(ucode->build);
+ data = ucode->data;
+
+ len -= sizeof(*ucode);
+
+ while (len >= sizeof(*tlv)) {
+ u32 tlv_len;
+ enum iwl_ucode_tlv_type tlv_type;
+ u16 tlv_alt;
+ const u8 *tlv_data;
+
+ len -= sizeof(*tlv);
+ tlv = (void *)data;
+
+ tlv_len = le32_to_cpu(tlv->length);
+ tlv_type = le16_to_cpu(tlv->type);
+ tlv_alt = le16_to_cpu(tlv->alternative);
+ tlv_data = tlv->data;
+
+ if (len < tlv_len)
+ return -EINVAL;
+ len -= ALIGN(tlv_len, 4);
+ data += sizeof(*tlv) + ALIGN(tlv_len, 4);
+
+ /*
+ * Alternative 0 is always valid.
+ *
+ * Skip alternative TLVs that are not selected.
+ */
+ if (tlv_alt != 0 && tlv_alt != wanted_alternative)
+ continue;
+
+ switch (tlv_type) {
+ case IWL_UCODE_TLV_INST:
+ pieces->inst = tlv_data;
+ pieces->inst_size = tlv_len;
+ break;
+ case IWL_UCODE_TLV_DATA:
+ pieces->data = tlv_data;
+ pieces->data_size = tlv_len;
+ break;
+ case IWL_UCODE_TLV_INIT:
+ pieces->init = tlv_data;
+ pieces->init_size = tlv_len;
+ break;
+ case IWL_UCODE_TLV_INIT_DATA:
+ pieces->init_data = tlv_data;
+ pieces->init_data_size = tlv_len;
+ break;
+ case IWL_UCODE_TLV_BOOT:
+ pieces->boot = tlv_data;
+ pieces->boot_size = tlv_len;
+ break;
+ case IWL_UCODE_TLV_PROBE_MAX_LEN:
+ if (tlv_len != 4)
+ return -EINVAL;
+ capa->max_probe_length =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (len)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
* iwl_ucode_callback - callback when firmware was loaded
*
@@ -1510,14 +1742,18 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
{
struct iwl_priv *priv = context;
struct iwl_ucode_header *ucode;
+ int err;
+ struct iwlagn_firmware_pieces pieces;
const unsigned int api_max = priv->cfg->ucode_api_max;
const unsigned int api_min = priv->cfg->ucode_api_min;
- u8 *src;
- size_t len;
- u32 api_ver, build;
- u32 inst_size, data_size, init_size, init_data_size, boot_size;
- int err;
- u16 eeprom_ver;
+ u32 api_ver;
+ char buildstr[25];
+ u32 build;
+ struct iwlagn_ucode_capabilities ucode_capa = {
+ .max_probe_length = 200,
+ };
+
+ memset(&pieces, 0, sizeof(pieces));
if (!ucode_raw) {
IWL_ERR(priv, "request for firmware file '%s' failed.\n",
@@ -1528,8 +1764,8 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
priv->firmware_name, ucode_raw->size);
- /* Make sure that we got at least the v1 header! */
- if (ucode_raw->size < priv->cfg->ops->ucode->get_header_size(1)) {
+ /* Make sure that we got at least the API version number */
+ if (ucode_raw->size < 4) {
IWL_ERR(priv, "File size way too small!\n");
goto try_again;
}
@@ -1537,21 +1773,23 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
/* Data from ucode file: header followed by uCode images */
ucode = (struct iwl_ucode_header *)ucode_raw->data;
- priv->ucode_ver = le32_to_cpu(ucode->ver);
+ if (ucode->ver)
+ err = iwlagn_load_legacy_firmware(priv, ucode_raw, &pieces);
+ else
+ err = iwlagn_load_firmware(priv, ucode_raw, &pieces,
+ &ucode_capa);
+
+ if (err)
+ goto try_again;
+
api_ver = IWL_UCODE_API(priv->ucode_ver);
- build = priv->cfg->ops->ucode->get_build(ucode, api_ver);
- inst_size = priv->cfg->ops->ucode->get_inst_size(ucode, api_ver);
- data_size = priv->cfg->ops->ucode->get_data_size(ucode, api_ver);
- init_size = priv->cfg->ops->ucode->get_init_size(ucode, api_ver);
- init_data_size =
- priv->cfg->ops->ucode->get_init_data_size(ucode, api_ver);
- boot_size = priv->cfg->ops->ucode->get_boot_size(ucode, api_ver);
- src = priv->cfg->ops->ucode->get_data(ucode, api_ver);
-
- /* api_ver should match the api version forming part of the
- * firmware filename ... but we don't check for that and only rely
- * on the API version read from firmware header from here on forward */
+ build = pieces.build;
+ /*
+ * api_ver should match the api version forming part of the
+ * firmware filename ... but we don't check for that and only rely
+ * on the API version read from firmware header from here on forward
+ */
if (api_ver < api_min || api_ver > api_max) {
IWL_ERR(priv, "Driver unable to support your firmware API. "
"Driver supports v%u, firmware is v%u.\n",
@@ -1565,40 +1803,26 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
"from http://www.intellinuxwireless.org.\n",
api_max, api_ver);
- IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
- IWL_UCODE_MAJOR(priv->ucode_ver),
- IWL_UCODE_MINOR(priv->ucode_ver),
- IWL_UCODE_API(priv->ucode_ver),
- IWL_UCODE_SERIAL(priv->ucode_ver));
+ if (build)
+ sprintf(buildstr, " build %u", build);
+ else
+ buildstr[0] = '\0';
+
+ IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u%s\n",
+ IWL_UCODE_MAJOR(priv->ucode_ver),
+ IWL_UCODE_MINOR(priv->ucode_ver),
+ IWL_UCODE_API(priv->ucode_ver),
+ IWL_UCODE_SERIAL(priv->ucode_ver),
+ buildstr);
snprintf(priv->hw->wiphy->fw_version,
sizeof(priv->hw->wiphy->fw_version),
- "%u.%u.%u.%u",
+ "%u.%u.%u.%u%s",
IWL_UCODE_MAJOR(priv->ucode_ver),
IWL_UCODE_MINOR(priv->ucode_ver),
IWL_UCODE_API(priv->ucode_ver),
- IWL_UCODE_SERIAL(priv->ucode_ver));
-
- if (build)
- IWL_DEBUG_INFO(priv, "Build %u\n", build);
-
- eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
- IWL_DEBUG_INFO(priv, "NVM Type: %s, version: 0x%x\n",
- (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
- ? "OTP" : "EEPROM", eeprom_ver);
-
- IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
- priv->ucode_ver);
- IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n",
- inst_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n",
- data_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n",
- init_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n",
- init_data_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n",
- boot_size);
+ IWL_UCODE_SERIAL(priv->ucode_ver),
+ buildstr);
/*
* For any of the failures below (before allocating pci memory)
@@ -1606,43 +1830,47 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
* user just got a corrupted version of the latest API.
*/
- /* Verify size of file vs. image size info in file's header */
- if (ucode_raw->size !=
- priv->cfg->ops->ucode->get_header_size(api_ver) +
- inst_size + data_size + init_size +
- init_data_size + boot_size) {
-
- IWL_DEBUG_INFO(priv,
- "uCode file size %d does not match expected size\n",
- (int)ucode_raw->size);
- goto try_again;
- }
+ IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
+ priv->ucode_ver);
+ IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
+ pieces.inst_size);
+ IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
+ pieces.data_size);
+ IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
+ pieces.init_size);
+ IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
+ pieces.init_data_size);
+ IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n",
+ pieces.boot_size);
/* Verify that uCode images will fit in card's SRAM */
- if (inst_size > priv->hw_params.max_inst_size) {
- IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n",
- inst_size);
+ if (pieces.inst_size > priv->hw_params.max_inst_size) {
+ IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
+ pieces.inst_size);
goto try_again;
}
- if (data_size > priv->hw_params.max_data_size) {
- IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n",
- data_size);
+ if (pieces.data_size > priv->hw_params.max_data_size) {
+ IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
+ pieces.data_size);
goto try_again;
}
- if (init_size > priv->hw_params.max_inst_size) {
- IWL_INFO(priv, "uCode init instr len %d too large to fit in\n",
- init_size);
+
+ if (pieces.init_size > priv->hw_params.max_inst_size) {
+ IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
+ pieces.init_size);
goto try_again;
}
- if (init_data_size > priv->hw_params.max_data_size) {
- IWL_INFO(priv, "uCode init data len %d too large to fit in\n",
- init_data_size);
+
+ if (pieces.init_data_size > priv->hw_params.max_data_size) {
+ IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
+ pieces.init_data_size);
goto try_again;
}
- if (boot_size > priv->hw_params.max_bsm_size) {
- IWL_INFO(priv, "uCode boot instr len %d too large to fit in\n",
- boot_size);
+
+ if (pieces.boot_size > priv->hw_params.max_bsm_size) {
+ IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n",
+ pieces.boot_size);
goto try_again;
}
@@ -1651,13 +1879,13 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
/* Runtime instructions and 2 copies of data:
* 1) unmodified from disk
* 2) backup cache for save/restore during power-downs */
- priv->ucode_code.len = inst_size;
+ priv->ucode_code.len = pieces.inst_size;
iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
- priv->ucode_data.len = data_size;
+ priv->ucode_data.len = pieces.data_size;
iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
- priv->ucode_data_backup.len = data_size;
+ priv->ucode_data_backup.len = pieces.data_size;
iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
@@ -1665,11 +1893,11 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
goto err_pci_alloc;
/* Initialization instructions and data */
- if (init_size && init_data_size) {
- priv->ucode_init.len = init_size;
+ if (pieces.init_size && pieces.init_data_size) {
+ priv->ucode_init.len = pieces.init_size;
iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
- priv->ucode_init_data.len = init_data_size;
+ priv->ucode_init_data.len = pieces.init_data_size;
iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
@@ -1677,8 +1905,8 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
}
/* Bootstrap (instructions only, no data) */
- if (boot_size) {
- priv->ucode_boot.len = boot_size;
+ if (pieces.boot_size) {
+ priv->ucode_boot.len = pieces.boot_size;
iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
if (!priv->ucode_boot.v_addr)
@@ -1688,51 +1916,48 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
/* Copy images into buffers for card's bus-master reads ... */
/* Runtime instructions (first block of data in file) */
- len = inst_size;
- IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n", len);
- memcpy(priv->ucode_code.v_addr, src, len);
- src += len;
+ IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n",
+ pieces.inst_size);
+ memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size);
IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
- /* Runtime data (2nd block)
- * NOTE: Copy into backup buffer will be done in iwl_up() */
- len = data_size;
- IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n", len);
- memcpy(priv->ucode_data.v_addr, src, len);
- memcpy(priv->ucode_data_backup.v_addr, src, len);
- src += len;
-
- /* Initialization instructions (3rd block) */
- if (init_size) {
- len = init_size;
+ /*
+ * Runtime data
+ * NOTE: Copy into backup buffer will be done in iwl_up()
+ */
+ IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n",
+ pieces.data_size);
+ memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size);
+ memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
+
+ /* Initialization instructions */
+ if (pieces.init_size) {
IWL_DEBUG_INFO(priv, "Copying (but not loading) init instr len %Zd\n",
- len);
- memcpy(priv->ucode_init.v_addr, src, len);
- src += len;
+ pieces.init_size);
+ memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size);
}
- /* Initialization data (4th block) */
- if (init_data_size) {
- len = init_data_size;
+ /* Initialization data */
+ if (pieces.init_data_size) {
IWL_DEBUG_INFO(priv, "Copying (but not loading) init data len %Zd\n",
- len);
- memcpy(priv->ucode_init_data.v_addr, src, len);
- src += len;
+ pieces.init_data_size);
+ memcpy(priv->ucode_init_data.v_addr, pieces.init_data,
+ pieces.init_data_size);
}
- /* Bootstrap instructions (5th block) */
- len = boot_size;
- IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n", len);
- memcpy(priv->ucode_boot.v_addr, src, len);
+ /* Bootstrap instructions */
+ IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n",
+ pieces.boot_size);
+ memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
/**************************************************
* This is still part of probe() in a sense...
*
* 9. Setup and register with mac80211 and debugfs
**************************************************/
- err = iwl_mac_setup_register(priv);
+ err = iwl_mac_setup_register(priv, &ucode_capa);
if (err)
goto out_unbind;
@@ -1742,6 +1967,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
/* We have our copies now, allow OS release its copies */
release_firmware(ucode_raw);
+ complete(&priv->_agn.firmware_loading_complete);
return;
try_again:
@@ -1755,6 +1981,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
IWL_ERR(priv, "failed to allocate pci memory\n");
iwl_dealloc_ucode_pci(priv);
out_unbind:
+ complete(&priv->_agn.firmware_loading_complete);
device_release_driver(&priv->pci_dev->dev);
release_firmware(ucode_raw);
}
@@ -1809,6 +2036,7 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
u32 data2, line;
u32 desc, time, count, base, data1;
u32 blink1, blink2, ilink1, ilink2;
+ u32 pc, hcmd;
if (priv->ucode_type == UCODE_INIT)
base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
@@ -1831,6 +2059,7 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
}
desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32));
+ pc = iwl_read_targ_mem(priv, base + 2 * sizeof(u32));
blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32));
blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32));
ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32));
@@ -1839,6 +2068,7 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32));
line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
+ hcmd = iwl_read_targ_mem(priv, base + 22 * sizeof(u32));
trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, data2, line,
blink1, blink2, ilink1, ilink2);
@@ -1847,10 +2077,9 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
"data1 data2 line\n");
IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n",
desc_lookup(desc), desc, time, data1, data2, line);
- IWL_ERR(priv, "blink1 blink2 ilink1 ilink2\n");
- IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
- ilink1, ilink2);
-
+ IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n");
+ IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
+ pc, blink1, blink2, ilink1, ilink2, hcmd);
}
#define EVENT_START_OFFSET (4 * sizeof(u32))
@@ -1966,9 +2195,6 @@ static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
return pos;
}
-/* For sanity check only. Actual size is determined by uCode, typ. 512 */
-#define MAX_EVENT_LOG_SIZE (512)
-
#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
@@ -2001,16 +2227,16 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
- if (capacity > MAX_EVENT_LOG_SIZE) {
+ if (capacity > priv->cfg->max_event_log_size) {
IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
- capacity, MAX_EVENT_LOG_SIZE);
- capacity = MAX_EVENT_LOG_SIZE;
+ capacity, priv->cfg->max_event_log_size);
+ capacity = priv->cfg->max_event_log_size;
}
- if (next_entry > MAX_EVENT_LOG_SIZE) {
+ if (next_entry > priv->cfg->max_event_log_size) {
IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
- next_entry, MAX_EVENT_LOG_SIZE);
- next_entry = MAX_EVENT_LOG_SIZE;
+ next_entry, priv->cfg->max_event_log_size);
+ next_entry = priv->cfg->max_event_log_size;
}
size = num_wraps ? capacity : next_entry;
@@ -2095,7 +2321,6 @@ static void iwl_alive_start(struct iwl_priv *priv)
goto restart;
}
- iwl_clear_stations_table(priv);
ret = priv->cfg->ops->lib->alive_notify(priv);
if (ret) {
IWL_WARN(priv,
@@ -2106,13 +2331,19 @@ static void iwl_alive_start(struct iwl_priv *priv)
/* After the ALIVE response, we can send host commands to the uCode */
set_bit(STATUS_ALIVE, &priv->status);
+ if (priv->cfg->ops->lib->recover_from_tx_stall) {
+ /* Enable timer to monitor the driver queues */
+ mod_timer(&priv->monitor_recover,
+ jiffies +
+ msecs_to_jiffies(priv->cfg->monitor_recover_period));
+ }
+
if (iwl_is_rfkill(priv))
return;
ieee80211_wake_queues(priv->hw);
- priv->active_rate = priv->rates_mask;
- priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
+ priv->active_rate = IWL_RATES_MASK;
/* Configure Tx antenna selection based on H/W config */
if (priv->cfg->ops->hcmd->set_tx_ant)
@@ -2126,7 +2357,7 @@ static void iwl_alive_start(struct iwl_priv *priv)
active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
} else {
/* Initialize our rx_config data */
- iwl_connection_init_rx_config(priv, priv->iw_mode);
+ iwl_connection_init_rx_config(priv, NULL);
if (priv->cfg->ops->hcmd->set_rxon_chain)
priv->cfg->ops->hcmd->set_rxon_chain(priv);
@@ -2135,7 +2366,7 @@ static void iwl_alive_start(struct iwl_priv *priv)
}
/* Configure Bluetooth device coexistence support */
- iwl_send_bt_config(priv);
+ priv->cfg->ops->hcmd->send_bt_config(priv);
iwl_reset_run_time_calib(priv);
@@ -2152,18 +2383,8 @@ static void iwl_alive_start(struct iwl_priv *priv)
wake_up_interruptible(&priv->wait_command_queue);
iwl_power_update_mode(priv, true);
+ IWL_DEBUG_INFO(priv, "Updated power mode\n");
- /* reassociate for ADHOC mode */
- if (priv->vif && (priv->iw_mode == NL80211_IFTYPE_ADHOC)) {
- struct sk_buff *beacon = ieee80211_beacon_get(priv->hw,
- priv->vif);
- if (beacon)
- iwl_mac_beacon_update(priv->hw, beacon);
- }
-
-
- if (test_and_clear_bit(STATUS_MODE_PENDING, &priv->status))
- iwl_set_mode(priv, priv->iw_mode);
return;
@@ -2183,7 +2404,9 @@ static void __iwl_down(struct iwl_priv *priv)
if (!exit_pending)
set_bit(STATUS_EXIT_PENDING, &priv->status);
- iwl_clear_stations_table(priv);
+ iwl_clear_ucode_stations(priv);
+ iwl_dealloc_bcast_station(priv);
+ iwl_clear_driver_stations(priv);
/* Unblock any waiting calls */
wake_up_interruptible_all(&priv->wait_command_queue);
@@ -2231,8 +2454,8 @@ static void __iwl_down(struct iwl_priv *priv)
/* device going down, Stop using ICT table */
iwl_disable_ict(priv);
- iwl_txq_ctx_stop(priv);
- iwl_rxq_stop(priv);
+ iwlagn_txq_ctx_stop(priv);
+ iwlagn_rxq_stop(priv);
/* Power-down device's busmaster DMA clocks */
iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
@@ -2292,7 +2515,7 @@ static int iwl_prepare_card_hw(struct iwl_priv *priv)
{
int ret = 0;
- IWL_DEBUG_INFO(priv, "iwl_prepare_card_hw enter \n");
+ IWL_DEBUG_INFO(priv, "iwl_prepare_card_hw enter\n");
ret = iwl_set_hw_ready(priv);
if (priv->hw_ready)
@@ -2330,6 +2553,10 @@ static int __iwl_up(struct iwl_priv *priv)
return -EIO;
}
+ ret = iwl_alloc_bcast_station(priv, true);
+ if (ret)
+ return ret;
+
iwl_prepare_card_hw(priv);
if (!priv->hw_ready) {
@@ -2353,7 +2580,7 @@ static int __iwl_up(struct iwl_priv *priv)
iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
- ret = iwl_hw_nic_init(priv);
+ ret = iwlagn_hw_nic_init(priv);
if (ret) {
IWL_ERR(priv, "Unable to init nic\n");
return ret;
@@ -2380,8 +2607,6 @@ static int __iwl_up(struct iwl_priv *priv)
for (i = 0; i < MAX_HW_RESTARTS; i++) {
- iwl_clear_stations_table(priv);
-
/* load bootstrap state machine,
* load bootstrap program into processor's memory,
* prepare to load the "initialize" uCode */
@@ -2467,7 +2692,6 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
}
mutex_unlock(&priv->mutex);
- return;
}
static void iwl_bg_restart(struct work_struct *data)
@@ -2505,34 +2729,28 @@ static void iwl_bg_rx_replenish(struct work_struct *data)
return;
mutex_lock(&priv->mutex);
- iwl_rx_replenish(priv);
+ iwlagn_rx_replenish(priv);
mutex_unlock(&priv->mutex);
}
#define IWL_DELAY_NEXT_SCAN (HZ*2)
-void iwl_post_associate(struct iwl_priv *priv)
+void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
struct ieee80211_conf *conf = NULL;
int ret = 0;
- unsigned long flags;
- if (priv->iw_mode == NL80211_IFTYPE_AP) {
+ if (!vif || !priv->is_open)
+ return;
+
+ if (vif->type == NL80211_IFTYPE_AP) {
IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
return;
}
- IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
- priv->assoc_id, priv->active_rxon.bssid_addr);
-
-
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
-
- if (!priv->vif || !priv->is_open)
- return;
-
iwl_scan_cancel_timeout(priv, 200);
conf = ieee80211_get_hw_conf(priv->hw);
@@ -2540,7 +2758,7 @@ void iwl_post_associate(struct iwl_priv *priv)
priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
iwlcore_commit_rxon(priv);
- iwl_setup_rxon_timing(priv);
+ iwl_setup_rxon_timing(priv, vif);
ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
sizeof(priv->rxon_timing), &priv->rxon_timing);
if (ret)
@@ -2554,56 +2772,44 @@ void iwl_post_associate(struct iwl_priv *priv)
if (priv->cfg->ops->hcmd->set_rxon_chain)
priv->cfg->ops->hcmd->set_rxon_chain(priv);
- priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
+ priv->staging_rxon.assoc_id = cpu_to_le16(vif->bss_conf.aid);
IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
- priv->assoc_id, priv->beacon_int);
+ vif->bss_conf.aid, vif->bss_conf.beacon_int);
- if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
+ if (vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
else
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
- if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
+ if (vif->bss_conf.assoc_capability &
+ WLAN_CAPABILITY_SHORT_SLOT_TIME)
priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
else
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
+ if (vif->type == NL80211_IFTYPE_ADHOC)
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
-
}
iwlcore_commit_rxon(priv);
- switch (priv->iw_mode) {
+ IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
+ vif->bss_conf.aid, priv->active_rxon.bssid_addr);
+
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
break;
-
case NL80211_IFTYPE_ADHOC:
-
- /* assume default assoc id */
- priv->assoc_id = 1;
-
- iwl_rxon_add_station(priv, priv->bssid, 0);
iwl_send_beacon_cmd(priv);
-
break;
-
default:
IWL_ERR(priv, "%s Should not be called in %d mode\n",
- __func__, priv->iw_mode);
+ __func__, vif->type);
break;
}
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
- priv->assoc_station_added = 1;
-
- spin_lock_irqsave(&priv->lock, flags);
- iwl_activate_qos(priv, 0);
- spin_unlock_irqrestore(&priv->lock, flags);
-
/* the chain noise calibration will enabled PM upon completion
* If chain noise has already been run, then we need to enable
* power management here */
@@ -2628,7 +2834,8 @@ void iwl_post_associate(struct iwl_priv *priv)
* Not a mac80211 entry point function, but it fits in with all the
* other mac80211 functions grouped here.
*/
-static int iwl_mac_setup_register(struct iwl_priv *priv)
+static int iwl_mac_setup_register(struct iwl_priv *priv,
+ struct iwlagn_ucode_capabilities *capa)
{
int ret;
struct ieee80211_hw *hw = priv->hw;
@@ -2636,7 +2843,6 @@ static int iwl_mac_setup_register(struct iwl_priv *priv)
/* Tell mac80211 our characteristics */
hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_NOISE_DBM |
IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_SPECTRUM_MGMT;
@@ -2649,6 +2855,8 @@ static int iwl_mac_setup_register(struct iwl_priv *priv)
IEEE80211_HW_SUPPORTS_STATIC_SMPS;
hw->sta_data_size = sizeof(struct iwl_station_priv);
+ hw->vif_data_size = sizeof(struct iwl_vif_priv);
+
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
@@ -2664,7 +2872,7 @@ static int iwl_mac_setup_register(struct iwl_priv *priv)
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
/* we create the 802.11 header and a zero-length SSID element */
- hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
+ hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2;
/* Default value; 4 EDCA QOS priorities */
hw->queues = 4;
@@ -2770,17 +2978,16 @@ static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
- if (iwl_tx_skb(priv, skb))
+ if (iwlagn_tx_skb(priv, skb))
dev_kfree_skb_any(skb);
IWL_DEBUG_MACDUMP(priv, "leave\n");
return NETDEV_TX_OK;
}
-void iwl_config_ap(struct iwl_priv *priv)
+void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
int ret = 0;
- unsigned long flags;
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
@@ -2793,7 +3000,7 @@ void iwl_config_ap(struct iwl_priv *priv)
iwlcore_commit_rxon(priv);
/* RXON Timing */
- iwl_setup_rxon_timing(priv);
+ iwl_setup_rxon_timing(priv, vif);
ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
sizeof(priv->rxon_timing), &priv->rxon_timing);
if (ret)
@@ -2807,9 +3014,10 @@ void iwl_config_ap(struct iwl_priv *priv)
if (priv->cfg->ops->hcmd->set_rxon_chain)
priv->cfg->ops->hcmd->set_rxon_chain(priv);
- /* FIXME: what should be the assoc_id for AP? */
- priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
- if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
+ priv->staging_rxon.assoc_id = 0;
+
+ if (vif->bss_conf.assoc_capability &
+ WLAN_CAPABILITY_SHORT_PREAMBLE)
priv->staging_rxon.flags |=
RXON_FLG_SHORT_PREAMBLE_MSK;
else
@@ -2817,26 +3025,21 @@ void iwl_config_ap(struct iwl_priv *priv)
~RXON_FLG_SHORT_PREAMBLE_MSK;
if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
- if (priv->assoc_capability &
- WLAN_CAPABILITY_SHORT_SLOT_TIME)
+ if (vif->bss_conf.assoc_capability &
+ WLAN_CAPABILITY_SHORT_SLOT_TIME)
priv->staging_rxon.flags |=
RXON_FLG_SHORT_SLOT_MSK;
else
priv->staging_rxon.flags &=
~RXON_FLG_SHORT_SLOT_MSK;
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
+ if (vif->type == NL80211_IFTYPE_ADHOC)
priv->staging_rxon.flags &=
~RXON_FLG_SHORT_SLOT_MSK;
}
/* restore RXON assoc */
priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
iwlcore_commit_rxon(priv);
- iwl_reset_qos(priv);
- spin_lock_irqsave(&priv->lock, flags);
- iwl_activate_qos(priv, 1);
- spin_unlock_irqrestore(&priv->lock, flags);
- iwl_add_bcast_station(priv);
}
iwl_send_beacon_cmd(priv);
@@ -2855,8 +3058,7 @@ static void iwl_mac_update_tkip_key(struct ieee80211_hw *hw,
struct iwl_priv *priv = hw->priv;
IWL_DEBUG_MAC80211(priv, "enter\n");
- iwl_update_tkip_key(priv, keyconf,
- sta ? sta->addr : iwl_bcast_addr,
+ iwl_update_tkip_key(priv, keyconf, sta,
iv32, phase1key);
IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -2868,7 +3070,6 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
struct iwl_priv *priv = hw->priv;
- const u8 *addr;
int ret;
u8 sta_id;
bool is_default_wep_key = false;
@@ -2879,25 +3080,29 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
return -EOPNOTSUPP;
}
- addr = sta ? sta->addr : iwl_bcast_addr;
- sta_id = iwl_find_station(priv, addr);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n",
- addr);
- return -EINVAL;
+ if (sta) {
+ sta_id = iwl_sta_id(sta);
+
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n",
+ sta->addr);
+ return -EINVAL;
+ }
+ } else {
+ sta_id = priv->hw_params.bcast_sta_id;
}
mutex_lock(&priv->mutex);
iwl_scan_cancel_timeout(priv, 100);
- mutex_unlock(&priv->mutex);
- /* If we are getting WEP group key and we didn't receive any key mapping
+ /*
+ * If we are getting WEP group key and we didn't receive any key mapping
* so far, we are in legacy wep mode (group key only), otherwise we are
* in 1X mode.
- * In legacy wep mode, we use another host command to the uCode */
- if (key->alg == ALG_WEP && sta_id == priv->hw_params.bcast_sta_id &&
- priv->iw_mode != NL80211_IFTYPE_AP) {
+ * In legacy wep mode, we use another host command to the uCode.
+ */
+ if (key->alg == ALG_WEP && !sta && vif->type != NL80211_IFTYPE_AP) {
if (cmd == SET_KEY)
is_default_wep_key = !priv->key_mapping_key;
else
@@ -2926,6 +3131,7 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = -EINVAL;
}
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return ret;
@@ -2933,8 +3139,8 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
struct iwl_priv *priv = hw->priv;
int ret;
@@ -2948,20 +3154,31 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_RX_START:
IWL_DEBUG_HT(priv, "start Rx\n");
- return iwl_sta_rx_agg_start(priv, sta->addr, tid, *ssn);
+ return iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
case IEEE80211_AMPDU_RX_STOP:
IWL_DEBUG_HT(priv, "stop Rx\n");
- ret = iwl_sta_rx_agg_stop(priv, sta->addr, tid);
+ ret = iwl_sta_rx_agg_stop(priv, sta, tid);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return 0;
else
return ret;
case IEEE80211_AMPDU_TX_START:
IWL_DEBUG_HT(priv, "start Tx\n");
- return iwl_tx_agg_start(priv, sta->addr, tid, ssn);
+ ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
+ if (ret == 0) {
+ priv->_agn.agg_tids_count++;
+ IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n",
+ priv->_agn.agg_tids_count);
+ }
+ return ret;
case IEEE80211_AMPDU_TX_STOP:
IWL_DEBUG_HT(priv, "stop Tx\n");
- ret = iwl_tx_agg_stop(priv, sta->addr, tid);
+ ret = iwlagn_tx_agg_stop(priv, vif, sta, tid);
+ if ((ret == 0) && (priv->_agn.agg_tids_count > 0)) {
+ priv->_agn.agg_tids_count--;
+ IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n",
+ priv->_agn.agg_tids_count);
+ }
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return 0;
else
@@ -2977,18 +3194,6 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
return 0;
}
-static int iwl_mac_get_stats(struct ieee80211_hw *hw,
- struct ieee80211_low_level_stats *stats)
-{
- struct iwl_priv *priv = hw->priv;
-
- priv = hw->priv;
- IWL_DEBUG_MAC80211(priv, "enter\n");
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return 0;
-}
-
static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum sta_notify_cmd cmd,
@@ -2998,18 +3203,7 @@ static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
int sta_id;
- /*
- * TODO: We really should use this callback to
- * actually maintain the station table in
- * the device.
- */
-
switch (cmd) {
- case STA_NOTIFY_ADD:
- atomic_set(&sta_priv->pending_frames, 0);
- if (vif->type == NL80211_IFTYPE_AP)
- sta_priv->client = true;
- break;
case STA_NOTIFY_SLEEP:
WARN_ON(!sta_priv->client);
sta_priv->asleep = true;
@@ -3021,7 +3215,7 @@ static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
if (!sta_priv->asleep)
break;
sta_priv->asleep = false;
- sta_id = iwl_find_station(priv, sta->addr);
+ sta_id = iwl_sta_id(sta);
if (sta_id != IWL_INVALID_STATION)
iwl_sta_modify_ps_wake(priv, sta_id);
break;
@@ -3030,6 +3224,44 @@ static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
}
}
+static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+ int ret;
+ u8 sta_id;
+
+ sta_priv->common.sta_id = IWL_INVALID_STATION;
+
+ IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
+ sta->addr);
+
+ atomic_set(&sta_priv->pending_frames, 0);
+ if (vif->type == NL80211_IFTYPE_AP)
+ sta_priv->client = true;
+
+ ret = iwl_add_station_common(priv, sta->addr, is_ap, &sta->ht_cap,
+ &sta_id);
+ if (ret) {
+ IWL_ERR(priv, "Unable to add station %pM (%d)\n",
+ sta->addr, ret);
+ /* Should we return success if return code is EEXIST ? */
+ return ret;
+ }
+
+ sta_priv->common.sta_id = sta_id;
+
+ /* Initialize rate scaling */
+ IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
+ sta->addr);
+ iwl_rs_rate_init(priv, sta, sta_id);
+
+ return 0;
+}
+
/*****************************************************************************
*
* sysfs attributes
@@ -3130,125 +3362,6 @@ static ssize_t store_tx_power(struct device *d,
static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
-static ssize_t show_flags(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- return sprintf(buf, "0x%04X\n", priv->active_rxon.flags);
-}
-
-static ssize_t store_flags(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- unsigned long val;
- u32 flags;
- int ret = strict_strtoul(buf, 0, &val);
- if (ret)
- return ret;
- flags = (u32)val;
-
- mutex_lock(&priv->mutex);
- if (le32_to_cpu(priv->staging_rxon.flags) != flags) {
- /* Cancel any currently running scans... */
- if (iwl_scan_cancel_timeout(priv, 100))
- IWL_WARN(priv, "Could not cancel scan.\n");
- else {
- IWL_DEBUG_INFO(priv, "Commit rxon.flags = 0x%04X\n", flags);
- priv->staging_rxon.flags = cpu_to_le32(flags);
- iwlcore_commit_rxon(priv);
- }
- }
- mutex_unlock(&priv->mutex);
-
- return count;
-}
-
-static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
-
-static ssize_t show_filter_flags(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- return sprintf(buf, "0x%04X\n",
- le32_to_cpu(priv->active_rxon.filter_flags));
-}
-
-static ssize_t store_filter_flags(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- unsigned long val;
- u32 filter_flags;
- int ret = strict_strtoul(buf, 0, &val);
- if (ret)
- return ret;
- filter_flags = (u32)val;
-
- mutex_lock(&priv->mutex);
- if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) {
- /* Cancel any currently running scans... */
- if (iwl_scan_cancel_timeout(priv, 100))
- IWL_WARN(priv, "Could not cancel scan.\n");
- else {
- IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
- "0x%04X\n", filter_flags);
- priv->staging_rxon.filter_flags =
- cpu_to_le32(filter_flags);
- iwlcore_commit_rxon(priv);
- }
- }
- mutex_unlock(&priv->mutex);
-
- return count;
-}
-
-static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
- store_filter_flags);
-
-
-static ssize_t show_statistics(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- u32 size = sizeof(struct iwl_notif_statistics);
- u32 len = 0, ofs = 0;
- u8 *data = (u8 *)&priv->statistics;
- int rc = 0;
-
- if (!iwl_is_alive(priv))
- return -EAGAIN;
-
- mutex_lock(&priv->mutex);
- rc = iwl_send_statistics_request(priv, CMD_SYNC, false);
- mutex_unlock(&priv->mutex);
-
- if (rc) {
- len = sprintf(buf,
- "Error sending statistics request: 0x%08X\n", rc);
- return len;
- }
-
- while (size && (PAGE_SIZE - len)) {
- hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
- PAGE_SIZE - len, 1);
- len = strlen(buf);
- if (PAGE_SIZE - len)
- buf[len++] = '\n';
-
- ofs += 16;
- size -= min(size, 16U);
- }
-
- return len;
-}
-
-static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
-
static ssize_t show_rts_ht_protection(struct device *d,
struct device_attribute *attr, char *buf)
{
@@ -3316,6 +3429,13 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
priv->ucode_trace.data = (unsigned long)priv;
priv->ucode_trace.function = iwl_bg_ucode_trace;
+ if (priv->cfg->ops->lib->recover_from_tx_stall) {
+ init_timer(&priv->monitor_recover);
+ priv->monitor_recover.data = (unsigned long)priv;
+ priv->monitor_recover.function =
+ priv->cfg->ops->lib->recover_from_tx_stall;
+ }
+
if (!priv->cfg->use_isr_legacy)
tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
iwl_irq_tasklet, (unsigned long)priv);
@@ -3336,6 +3456,8 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
cancel_work_sync(&priv->beacon_update);
del_timer_sync(&priv->statistics_periodic);
del_timer_sync(&priv->ucode_trace);
+ if (priv->cfg->ops->lib->recover_from_tx_stall)
+ del_timer_sync(&priv->monitor_recover);
}
static void iwl_init_hw_rates(struct iwl_priv *priv,
@@ -3373,9 +3495,6 @@ static int iwl_init_drv(struct iwl_priv *priv)
mutex_init(&priv->mutex);
mutex_init(&priv->sync_cmd_mutex);
- /* Clear the driver's (not device's) station table */
- iwl_clear_stations_table(priv);
-
priv->ieee_channels = NULL;
priv->ieee_rates = NULL;
priv->band = IEEE80211_BAND_2GHZ;
@@ -3383,6 +3502,7 @@ static int iwl_init_drv(struct iwl_priv *priv)
priv->iw_mode = NL80211_IFTYPE_STATION;
priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
+ priv->_agn.agg_tids_count = 0;
/* initialize force reset */
priv->force_reset[IWL_RF_RESET].reset_duration =
@@ -3396,16 +3516,10 @@ static int iwl_init_drv(struct iwl_priv *priv)
iwl_init_scan_params(priv);
- iwl_reset_qos(priv);
-
- priv->qos_data.qos_active = 0;
- priv->qos_data.qos_cap.val = 0;
-
- priv->rates_mask = IWL_RATES_MASK;
/* Set the tx_power_user_lmt to the lowest power level
* this value will get overwritten by channel max power avg
* from eeprom */
- priv->tx_power_user_lmt = IWL_TX_POWER_TARGET_POWER_MIN;
+ priv->tx_power_user_lmt = IWLAGN_TX_POWER_TARGET_POWER_MIN;
ret = iwl_init_channel_map(priv);
if (ret) {
@@ -3433,13 +3547,10 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
iwl_calib_free_results(priv);
iwlcore_free_geos(priv);
iwl_free_channel_map(priv);
- kfree(priv->scan);
+ kfree(priv->scan_cmd);
}
static struct attribute *iwl_sysfs_entries[] = {
- &dev_attr_flags.attr,
- &dev_attr_filter_flags.attr,
- &dev_attr_statistics.attr,
&dev_attr_temperature.attr,
&dev_attr_tx_power.attr,
&dev_attr_rts_ht_protection.attr,
@@ -3464,13 +3575,14 @@ static struct ieee80211_ops iwl_hw_ops = {
.configure_filter = iwl_configure_filter,
.set_key = iwl_mac_set_key,
.update_tkip_key = iwl_mac_update_tkip_key,
- .get_stats = iwl_mac_get_stats,
.conf_tx = iwl_mac_conf_tx,
.reset_tsf = iwl_mac_reset_tsf,
.bss_info_changed = iwl_bss_info_changed,
.ampdu_action = iwl_mac_ampdu_action,
.hw_scan = iwl_mac_hw_scan,
.sta_notify = iwl_mac_sta_notify,
+ .sta_add = iwlagn_mac_sta_add,
+ .sta_remove = iwl_mac_sta_remove,
};
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -3574,7 +3686,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
iwl_hw_detect(priv);
- IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n",
+ IWL_INFO(priv, "Detected %s, REV=0x%X\n",
priv->cfg->name, priv->hw_rev);
/* We disable the RETRY_TIMEOUT register (0x41) to keep
@@ -3672,6 +3784,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_power_initialize(priv);
iwl_tt_initialize(priv);
+ init_completion(&priv->_agn.firmware_loading_complete);
+
err = iwl_request_firmware(priv, true);
if (err)
goto out_remove_sysfs;
@@ -3712,6 +3826,8 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
if (!priv)
return;
+ wait_for_completion(&priv->_agn.firmware_loading_complete);
+
IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
iwl_dbgfs_unregister(priv);
@@ -3752,10 +3868,9 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
iwl_dealloc_ucode_pci(priv);
if (priv->rxq.bd)
- iwl_rx_queue_free(priv, &priv->rxq);
- iwl_hw_txq_ctx_free(priv);
+ iwlagn_rx_queue_free(priv, &priv->rxq);
+ iwlagn_hw_txq_ctx_free(priv);
- iwl_clear_stations_table(priv);
iwl_eeprom_free(priv);
@@ -3870,6 +3985,11 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
{IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
+/* 6x00 Series Gen2a */
+ {IWL_PCI_DEVICE(0x0082, 0x1201, iwl6000g2a_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0x1211, iwl6000g2a_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1221, iwl6000g2a_2agn_cfg)},
+
/* 6x50 WiFi/WiMax Series */
{IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)},
{IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)},
@@ -3951,3 +4071,38 @@ module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "debug output mask");
#endif
+module_param_named(swcrypto50, iwlagn_mod_params.sw_crypto, bool, S_IRUGO);
+MODULE_PARM_DESC(swcrypto50,
+ "using crypto in software (default 0 [hardware]) (deprecated)");
+module_param_named(swcrypto, iwlagn_mod_params.sw_crypto, int, S_IRUGO);
+MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
+module_param_named(queues_num50,
+ iwlagn_mod_params.num_of_queues, int, S_IRUGO);
+MODULE_PARM_DESC(queues_num50,
+ "number of hw queues in 50xx series (deprecated)");
+module_param_named(queues_num, iwlagn_mod_params.num_of_queues, int, S_IRUGO);
+MODULE_PARM_DESC(queues_num, "number of hw queues.");
+module_param_named(11n_disable50, iwlagn_mod_params.disable_11n, int, S_IRUGO);
+MODULE_PARM_DESC(11n_disable50, "disable 50XX 11n functionality (deprecated)");
+module_param_named(11n_disable, iwlagn_mod_params.disable_11n, int, S_IRUGO);
+MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
+module_param_named(amsdu_size_8K50, iwlagn_mod_params.amsdu_size_8K,
+ int, S_IRUGO);
+MODULE_PARM_DESC(amsdu_size_8K50,
+ "enable 8K amsdu size in 50XX series (deprecated)");
+module_param_named(amsdu_size_8K, iwlagn_mod_params.amsdu_size_8K,
+ int, S_IRUGO);
+MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
+module_param_named(fw_restart50, iwlagn_mod_params.restart_fw, int, S_IRUGO);
+MODULE_PARM_DESC(fw_restart50,
+ "restart firmware in case of error (deprecated)");
+module_param_named(fw_restart, iwlagn_mod_params.restart_fw, int, S_IRUGO);
+MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
+module_param_named(
+ disable_hw_scan, iwlagn_mod_params.disable_hw_scan, int, S_IRUGO);
+MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
+
+module_param_named(ucode_alternative, iwlagn_wanted_ucode_alternative, int,
+ S_IRUGO);
+MODULE_PARM_DESC(ucode_alternative,
+ "specify ucode alternative to use from ucode file");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
new file mode 100644
index 00000000000..2d748053358
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -0,0 +1,181 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_agn_h__
+#define __iwl_agn_h__
+
+#include "iwl-dev.h"
+
+extern struct iwl_mod_params iwlagn_mod_params;
+extern struct iwl_hcmd_ops iwlagn_hcmd;
+extern struct iwl_hcmd_utils_ops iwlagn_hcmd_utils;
+
+int iwl_reset_ict(struct iwl_priv *priv);
+void iwl_disable_ict(struct iwl_priv *priv);
+int iwl_alloc_isr_ict(struct iwl_priv *priv);
+void iwl_free_isr_ict(struct iwl_priv *priv);
+irqreturn_t iwl_isr_ict(int irq, void *data);
+bool iwl_good_ack_health(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt);
+
+/* tx queue */
+void iwlagn_set_wr_ptrs(struct iwl_priv *priv,
+ int txq_id, u32 index);
+void iwlagn_tx_queue_set_status(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ int tx_fifo_id, int scd_retry);
+void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ u16 byte_cnt);
+void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq);
+int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
+ int tx_fifo, int sta_id, int tid, u16 ssn_idx);
+int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
+ u16 ssn_idx, u8 tx_fifo);
+void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask);
+
+/* uCode */
+int iwlagn_load_ucode(struct iwl_priv *priv);
+void iwlagn_rx_calib_result(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+void iwlagn_rx_calib_complete(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+void iwlagn_init_alive_start(struct iwl_priv *priv);
+int iwlagn_alive_notify(struct iwl_priv *priv);
+
+/* lib */
+void iwl_check_abort_status(struct iwl_priv *priv,
+ u8 frame_count, u32 status);
+void iwlagn_rx_handler_setup(struct iwl_priv *priv);
+void iwlagn_setup_deferred_work(struct iwl_priv *priv);
+int iwlagn_hw_valid_rtc_data_addr(u32 addr);
+int iwlagn_send_tx_power(struct iwl_priv *priv);
+void iwlagn_temperature(struct iwl_priv *priv);
+u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv);
+const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
+ size_t offset);
+void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
+int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
+int iwlagn_hw_nic_init(struct iwl_priv *priv);
+
+/* rx */
+void iwlagn_rx_queue_restock(struct iwl_priv *priv);
+void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority);
+void iwlagn_rx_replenish(struct iwl_priv *priv);
+void iwlagn_rx_replenish_now(struct iwl_priv *priv);
+void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
+int iwlagn_rxq_stop(struct iwl_priv *priv);
+int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
+void iwlagn_rx_reply_rx(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+
+/* tx */
+void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
+ struct ieee80211_tx_info *info);
+int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
+int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid);
+int iwlagn_txq_check_empty(struct iwl_priv *priv,
+ int sta_id, u8 tid, int txq_id);
+void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
+void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv);
+int iwlagn_txq_ctx_alloc(struct iwl_priv *priv);
+void iwlagn_txq_ctx_reset(struct iwl_priv *priv);
+void iwlagn_txq_ctx_stop(struct iwl_priv *priv);
+
+static inline u32 iwl_tx_status_to_mac80211(u32 status)
+{
+ status &= TX_STATUS_MSK;
+
+ switch (status) {
+ case TX_STATUS_SUCCESS:
+ case TX_STATUS_DIRECT_DONE:
+ return IEEE80211_TX_STAT_ACK;
+ case TX_STATUS_FAIL_DEST_PS:
+ return IEEE80211_TX_STAT_TX_FILTERED;
+ default:
+ return 0;
+ }
+}
+
+static inline bool iwl_is_tx_success(u32 status)
+{
+ status &= TX_STATUS_MSK;
+ return (status == TX_STATUS_SUCCESS) ||
+ (status == TX_STATUS_DIRECT_DONE);
+}
+
+/* scan */
+void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
+
+/* station mgmt */
+int iwlagn_manage_ibss_station(struct iwl_priv *priv,
+ struct ieee80211_vif *vif, bool add);
+
+#endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index 8b516c5ff0b..7e822777321 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -593,7 +593,7 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv,
IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
if (!rx_enable_time) {
- IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0! \n");
+ IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n");
return;
}
@@ -638,8 +638,6 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv,
iwl_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
iwl_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
iwl_sensitivity_write(priv);
-
- return;
}
EXPORT_SYMBOL(iwl_sensitivity_calibration);
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index f4e59ae07f8..9aab020c474 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -106,7 +106,7 @@ enum {
REPLY_TX = 0x1c,
REPLY_RATE_SCALE = 0x47, /* 3945 only */
REPLY_LEDS_CMD = 0x48,
- REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* 4965 only */
+ REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
/* WiMAX coexistence */
COEX_PRIORITY_TABLE_CMD = 0x5a, /* for 5000 series and up */
@@ -512,8 +512,9 @@ struct iwl_init_alive_resp {
*
* Entries without timestamps contain only event_id and data.
*
+ *
* 2) error_event_table_ptr indicates base of the error log. This contains
- * information about any uCode error that occurs. For 4965, the format
+ * information about any uCode error that occurs. For agn, the format
* of the error log is:
*
* __le32 valid; (nonzero) valid, (0) log is empty
@@ -529,6 +530,30 @@ struct iwl_init_alive_resp {
* __le32 bcon_time; beacon timer
* __le32 tsf_low; network timestamp function timer
* __le32 tsf_hi; network timestamp function timer
+ * __le32 gp1; GP1 timer register
+ * __le32 gp2; GP2 timer register
+ * __le32 gp3; GP3 timer register
+ * __le32 ucode_ver; uCode version
+ * __le32 hw_ver; HW Silicon version
+ * __le32 brd_ver; HW board version
+ * __le32 log_pc; log program counter
+ * __le32 frame_ptr; frame pointer
+ * __le32 stack_ptr; stack pointer
+ * __le32 hcmd; last host command
+ * __le32 isr0; isr status register LMPM_NIC_ISR0: rxtx_flag
+ * __le32 isr1; isr status register LMPM_NIC_ISR1: host_flag
+ * __le32 isr2; isr status register LMPM_NIC_ISR2: enc_flag
+ * __le32 isr3; isr status register LMPM_NIC_ISR3: time_flag
+ * __le32 isr4; isr status register LMPM_NIC_ISR4: wico interrupt
+ * __le32 isr_pref; isr status register LMPM_NIC_PREF_STAT
+ * __le32 wait_event; wait event() caller address
+ * __le32 l2p_control; L2pControlField
+ * __le32 l2p_duration; L2pDurationField
+ * __le32 l2p_mhvalid; L2pMhValidBits
+ * __le32 l2p_addr_match; L2pAddrMatchStat
+ * __le32 lmpm_pmg_sel; indicate which clocks are turned on (LMPM_PMG_SEL)
+ * __le32 u_timestamp; indicate when the date and time of the compilation
+ * __le32 reserved;
*
* The Linux driver can print both logs to the system log when a uCode error
* occurs.
@@ -1418,7 +1443,7 @@ struct iwl4965_rx_mpdu_res_start {
/* 1: Ignore Bluetooth priority for this frame.
* 0: Delay Tx until Bluetooth device is done (normal usage). */
-#define TX_CMD_FLG_BT_DIS_MSK cpu_to_le32(1 << 12)
+#define TX_CMD_FLG_IGNORE_BT cpu_to_le32(1 << 12)
/* 1: uCode overrides sequence control field in MAC header.
* 0: Driver provides sequence control field in MAC header.
@@ -1637,7 +1662,7 @@ struct iwl_tx_cmd {
struct ieee80211_hdr hdr[0];
} __attribute__ ((packed));
-/* TX command response is sent after *all* transmission attempts.
+/* TX command response is sent after *3945* transmission attempts.
*
* NOTES:
*
@@ -1665,24 +1690,65 @@ struct iwl_tx_cmd {
* control line. Receiving is still allowed in this case.
*/
enum {
+ TX_3945_STATUS_SUCCESS = 0x01,
+ TX_3945_STATUS_DIRECT_DONE = 0x02,
+ TX_3945_STATUS_FAIL_SHORT_LIMIT = 0x82,
+ TX_3945_STATUS_FAIL_LONG_LIMIT = 0x83,
+ TX_3945_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
+ TX_3945_STATUS_FAIL_MGMNT_ABORT = 0x85,
+ TX_3945_STATUS_FAIL_NEXT_FRAG = 0x86,
+ TX_3945_STATUS_FAIL_LIFE_EXPIRE = 0x87,
+ TX_3945_STATUS_FAIL_DEST_PS = 0x88,
+ TX_3945_STATUS_FAIL_ABORTED = 0x89,
+ TX_3945_STATUS_FAIL_BT_RETRY = 0x8a,
+ TX_3945_STATUS_FAIL_STA_INVALID = 0x8b,
+ TX_3945_STATUS_FAIL_FRAG_DROPPED = 0x8c,
+ TX_3945_STATUS_FAIL_TID_DISABLE = 0x8d,
+ TX_3945_STATUS_FAIL_FRAME_FLUSHED = 0x8e,
+ TX_3945_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
+ TX_3945_STATUS_FAIL_TX_LOCKED = 0x90,
+ TX_3945_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
+};
+
+/*
+ * TX command response is sent after *agn* transmission attempts.
+ *
+ * both postpone and abort status are expected behavior from uCode. there is
+ * no special operation required from driver; except for RFKILL_FLUSH,
+ * which required tx flush host command to flush all the tx frames in queues
+ */
+enum {
TX_STATUS_SUCCESS = 0x01,
TX_STATUS_DIRECT_DONE = 0x02,
+ /* postpone TX */
+ TX_STATUS_POSTPONE_DELAY = 0x40,
+ TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
+ TX_STATUS_POSTPONE_BT_PRIO = 0x42,
+ TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
+ TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
+ /* abort TX */
+ TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
TX_STATUS_FAIL_LONG_LIMIT = 0x83,
TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
- TX_STATUS_FAIL_MGMNT_ABORT = 0x85,
- TX_STATUS_FAIL_NEXT_FRAG = 0x86,
+ TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
+ TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
TX_STATUS_FAIL_DEST_PS = 0x88,
- TX_STATUS_FAIL_ABORTED = 0x89,
+ TX_STATUS_FAIL_HOST_ABORTED = 0x89,
TX_STATUS_FAIL_BT_RETRY = 0x8a,
TX_STATUS_FAIL_STA_INVALID = 0x8b,
TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
TX_STATUS_FAIL_TID_DISABLE = 0x8d,
- TX_STATUS_FAIL_FRAME_FLUSHED = 0x8e,
+ TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
- TX_STATUS_FAIL_TX_LOCKED = 0x90,
- TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
+ /* uCode drop due to FW drop request */
+ TX_STATUS_FAIL_FW_DROP = 0x90,
+ /*
+ * uCode drop due to station color mismatch
+ * between tx command and station table
+ */
+ TX_STATUS_FAIL_STA_COLOR_MISMATCH_DROP = 0x91,
};
#define TX_PACKET_MODE_REGULAR 0x0000
@@ -1704,30 +1770,6 @@ enum {
TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
};
-static inline u32 iwl_tx_status_to_mac80211(u32 status)
-{
- status &= TX_STATUS_MSK;
-
- switch (status) {
- case TX_STATUS_SUCCESS:
- case TX_STATUS_DIRECT_DONE:
- return IEEE80211_TX_STAT_ACK;
- case TX_STATUS_FAIL_DEST_PS:
- return IEEE80211_TX_STAT_TX_FILTERED;
- default:
- return 0;
- }
-}
-
-static inline bool iwl_is_tx_success(u32 status)
-{
- status &= TX_STATUS_MSK;
- return (status == TX_STATUS_SUCCESS) ||
- (status == TX_STATUS_DIRECT_DONE);
-}
-
-
-
/* *******************************
* TX aggregation status
******************************* */
@@ -2626,7 +2668,6 @@ struct iwl_ssid_ie {
#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
#define IWL_MAX_SCAN_SIZE 1024
#define IWL_MAX_CMD_SIZE 4096
-#define IWL_MAX_PROBE_REQUEST 200
/*
* REPLY_SCAN_CMD = 0x80 (command)
@@ -3086,6 +3127,11 @@ struct statistics_tx {
__le32 cts_timeout_collision;
__le32 ack_or_ba_timeout_collision;
struct statistics_tx_non_phy_agg agg;
+ /*
+ * "tx_power" are optional parameters provided by uCode,
+ * 6000 series is the only device provide the information,
+ * Those are reserved fields for all the other devices
+ */
struct statistics_tx_power tx_power;
__le32 reserved1;
} __attribute__ ((packed));
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 049b652bcb5..5a7eca8fb78 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -66,38 +66,7 @@ MODULE_LICENSE("GPL");
*/
static bool bt_coex_active = true;
module_param(bt_coex_active, bool, S_IRUGO);
-MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist\n");
-
-static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
- {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
- 0, COEX_UNASSOC_IDLE_FLAGS},
- {COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
- 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
- {COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
- 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
- {COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
- 0, COEX_CALIBRATION_FLAGS},
- {COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
- 0, COEX_PERIODIC_CALIBRATION_FLAGS},
- {COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
- 0, COEX_CONNECTION_ESTAB_FLAGS},
- {COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
- 0, COEX_ASSOCIATED_IDLE_FLAGS},
- {COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
- 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
- {COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
- 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
- {COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
- 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
- {COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
- {COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
- {COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
- 0, COEX_STAND_ALONE_DEBUG_FLAGS},
- {COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
- 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
- {COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
- {COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
-};
+MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
[IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
@@ -115,8 +84,6 @@ static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
u32 iwl_debug_level;
EXPORT_SYMBOL(iwl_debug_level);
-static irqreturn_t iwl_isr(int irq, void *data);
-
/*
* Parameter order:
* rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
@@ -143,30 +110,6 @@ const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = {
};
EXPORT_SYMBOL(iwl_rates);
-/**
- * translate ucode response to mac80211 tx status control values
- */
-void iwl_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
- struct ieee80211_tx_info *info)
-{
- struct ieee80211_tx_rate *r = &info->control.rates[0];
-
- info->antenna_sel_tx =
- ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
- if (rate_n_flags & RATE_MCS_HT_MSK)
- r->flags |= IEEE80211_TX_RC_MCS;
- if (rate_n_flags & RATE_MCS_GF_MSK)
- r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
- if (rate_n_flags & RATE_MCS_HT40_MSK)
- r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (rate_n_flags & RATE_MCS_DUP_MSK)
- r->flags |= IEEE80211_TX_RC_DUP_DATA;
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- r->flags |= IEEE80211_TX_RC_SHORT_GI;
- r->idx = iwl_hwrate_to_mac80211_idx(rate_n_flags, info->band);
-}
-EXPORT_SYMBOL(iwl_hwrate_to_tx_control);
-
int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
{
int idx = 0;
@@ -198,27 +141,6 @@ int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
}
EXPORT_SYMBOL(iwl_hwrate_to_plcp_idx);
-int iwl_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
-{
- int idx = 0;
- int band_offset = 0;
-
- /* HT rate format: mac80211 wants an MCS number, which is just LSB */
- if (rate_n_flags & RATE_MCS_HT_MSK) {
- idx = (rate_n_flags & 0xff);
- return idx;
- /* Legacy rate format, search for match in table */
- } else {
- if (band == IEEE80211_BAND_5GHZ)
- band_offset = IWL_FIRST_OFDM_RATE;
- for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
- if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
- return idx - band_offset;
- }
-
- return -1;
-}
-
u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant)
{
int i;
@@ -268,74 +190,16 @@ void iwl_hw_detect(struct iwl_priv *priv)
}
EXPORT_SYMBOL(iwl_hw_detect);
-int iwl_hw_nic_init(struct iwl_priv *priv)
-{
- unsigned long flags;
- struct iwl_rx_queue *rxq = &priv->rxq;
- int ret;
-
- /* nic_init */
- spin_lock_irqsave(&priv->lock, flags);
- priv->cfg->ops->lib->apm_ops.init(priv);
-
- /* Set interrupt coalescing calibration timer to default (512 usecs) */
- iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
-
- priv->cfg->ops->lib->apm_ops.config(priv);
-
- /* Allocate the RX queue, or reset if it is already allocated */
- if (!rxq->bd) {
- ret = iwl_rx_queue_alloc(priv);
- if (ret) {
- IWL_ERR(priv, "Unable to initialize Rx queue\n");
- return -ENOMEM;
- }
- } else
- iwl_rx_queue_reset(priv, rxq);
-
- iwl_rx_replenish(priv);
-
- iwl_rx_init(priv, rxq);
-
- spin_lock_irqsave(&priv->lock, flags);
-
- rxq->need_update = 1;
- iwl_rx_queue_update_write_ptr(priv, rxq);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Allocate or reset and init all Tx and Command queues */
- if (!priv->txq) {
- ret = iwl_txq_ctx_alloc(priv);
- if (ret)
- return ret;
- } else
- iwl_txq_ctx_reset(priv);
-
- set_bit(STATUS_INIT, &priv->status);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_hw_nic_init);
-
/*
* QoS support
*/
-void iwl_activate_qos(struct iwl_priv *priv, u8 force)
+static void iwl_update_qos(struct iwl_priv *priv)
{
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
priv->qos_data.def_qos_parm.qos_flags = 0;
- if (priv->qos_data.qos_cap.q_AP.queue_request &&
- !priv->qos_data.qos_cap.q_AP.txop_request)
- priv->qos_data.def_qos_parm.qos_flags |=
- QOS_PARAM_FLG_TXOP_TYPE_MSK;
if (priv->qos_data.qos_active)
priv->qos_data.def_qos_parm.qos_flags |=
QOS_PARAM_FLG_UPDATE_EDCA_MSK;
@@ -343,118 +207,14 @@ void iwl_activate_qos(struct iwl_priv *priv, u8 force)
if (priv->current_ht_config.is_ht)
priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
- if (force || iwl_is_associated(priv)) {
- IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
- priv->qos_data.qos_active,
- priv->qos_data.def_qos_parm.qos_flags);
+ IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
+ priv->qos_data.qos_active,
+ priv->qos_data.def_qos_parm.qos_flags);
- iwl_send_cmd_pdu_async(priv, REPLY_QOS_PARAM,
- sizeof(struct iwl_qosparam_cmd),
- &priv->qos_data.def_qos_parm, NULL);
- }
+ iwl_send_cmd_pdu_async(priv, REPLY_QOS_PARAM,
+ sizeof(struct iwl_qosparam_cmd),
+ &priv->qos_data.def_qos_parm, NULL);
}
-EXPORT_SYMBOL(iwl_activate_qos);
-
-/*
- * AC CWmin CW max AIFSN TXOP Limit TXOP Limit
- * (802.11b) (802.11a/g)
- * AC_BK 15 1023 7 0 0
- * AC_BE 15 1023 3 0 0
- * AC_VI 7 15 2 6.016ms 3.008ms
- * AC_VO 3 7 2 3.264ms 1.504ms
- */
-void iwl_reset_qos(struct iwl_priv *priv)
-{
- u16 cw_min = 15;
- u16 cw_max = 1023;
- u8 aifs = 2;
- bool is_legacy = false;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&priv->lock, flags);
- /* QoS always active in AP and ADHOC mode
- * In STA mode wait for association
- */
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC ||
- priv->iw_mode == NL80211_IFTYPE_AP)
- priv->qos_data.qos_active = 1;
- else
- priv->qos_data.qos_active = 0;
-
- /* check for legacy mode */
- if ((priv->iw_mode == NL80211_IFTYPE_ADHOC &&
- (priv->active_rate & IWL_OFDM_RATES_MASK) == 0) ||
- (priv->iw_mode == NL80211_IFTYPE_STATION &&
- (priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK) == 0)) {
- cw_min = 31;
- is_legacy = 1;
- }
-
- if (priv->qos_data.qos_active)
- aifs = 3;
-
- /* AC_BE */
- priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
- priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
- priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
- priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
- priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
-
- if (priv->qos_data.qos_active) {
- /* AC_BK */
- i = 1;
- priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
- priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
- priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
- priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
- priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
-
- /* AC_VI */
- i = 2;
- priv->qos_data.def_qos_parm.ac[i].cw_min =
- cpu_to_le16((cw_min + 1) / 2 - 1);
- priv->qos_data.def_qos_parm.ac[i].cw_max =
- cpu_to_le16(cw_min);
- priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
- if (is_legacy)
- priv->qos_data.def_qos_parm.ac[i].edca_txop =
- cpu_to_le16(6016);
- else
- priv->qos_data.def_qos_parm.ac[i].edca_txop =
- cpu_to_le16(3008);
- priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
-
- /* AC_VO */
- i = 3;
- priv->qos_data.def_qos_parm.ac[i].cw_min =
- cpu_to_le16((cw_min + 1) / 4 - 1);
- priv->qos_data.def_qos_parm.ac[i].cw_max =
- cpu_to_le16((cw_min + 1) / 2 - 1);
- priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
- priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
- if (is_legacy)
- priv->qos_data.def_qos_parm.ac[i].edca_txop =
- cpu_to_le16(3264);
- else
- priv->qos_data.def_qos_parm.ac[i].edca_txop =
- cpu_to_le16(1504);
- } else {
- for (i = 1; i < 4; i++) {
- priv->qos_data.def_qos_parm.ac[i].cw_min =
- cpu_to_le16(cw_min);
- priv->qos_data.def_qos_parm.ac[i].cw_max =
- cpu_to_le16(cw_max);
- priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
- priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
- priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
- }
- }
- IWL_DEBUG_QOS(priv, "set QoS to default \n");
-
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-EXPORT_SYMBOL(iwl_reset_qos);
#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
@@ -721,7 +481,7 @@ static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
return new_val;
}
-void iwl_setup_rxon_timing(struct iwl_priv *priv)
+void iwl_setup_rxon_timing(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
u64 tsf;
s32 interval_tm, rem;
@@ -735,15 +495,14 @@ void iwl_setup_rxon_timing(struct iwl_priv *priv)
priv->rxon_timing.timestamp = cpu_to_le64(priv->timestamp);
priv->rxon_timing.listen_interval = cpu_to_le16(conf->listen_interval);
- if (priv->iw_mode == NL80211_IFTYPE_STATION) {
- beacon_int = priv->beacon_int;
- priv->rxon_timing.atim_window = 0;
- } else {
- beacon_int = priv->vif->bss_conf.beacon_int;
+ beacon_int = vif->bss_conf.beacon_int;
+ if (vif->type == NL80211_IFTYPE_ADHOC) {
/* TODO: we need to get atim_window from upper stack
* for now we set to 0 */
priv->rxon_timing.atim_window = 0;
+ } else {
+ priv->rxon_timing.atim_window = 0;
}
beacon_int = iwl_adjust_beacon_interval(beacon_int,
@@ -903,23 +662,10 @@ EXPORT_SYMBOL(iwl_full_rxon_required);
u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv)
{
- int i;
- int rate_mask;
-
- /* Set rate mask*/
- if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
- rate_mask = priv->active_rate_basic & IWL_CCK_RATES_MASK;
- else
- rate_mask = priv->active_rate_basic & IWL_OFDM_RATES_MASK;
-
- /* Find lowest valid rate */
- for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
- i = iwl_rates[i].next_ieee) {
- if (rate_mask & (1 << i))
- return iwl_rates[i].plcp;
- }
-
- /* No valid rate was found. Assign the lowest one */
+ /*
+ * Assign the lowest rate -- should really get this from
+ * the beacon skb from mac80211.
+ */
if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
return IWL_RATE_1M_PLCP;
else
@@ -991,7 +737,6 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
"extension channel offset 0x%x\n",
le32_to_cpu(rxon->flags), ht_conf->ht_protection,
ht_conf->extension_chan_offset);
- return;
}
EXPORT_SYMBOL(iwl_set_rxon_ht);
@@ -1051,19 +796,6 @@ static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
}
/**
- * iwl_is_monitor_mode - Determine if interface in monitor mode
- *
- * priv->iw_mode is set in add_interface, but add_interface is
- * never called for monitor mode. The only way mac80211 informs us about
- * monitor mode is through configuring filters (call to configure_filter).
- */
-bool iwl_is_monitor_mode(struct iwl_priv *priv)
-{
- return !!(priv->staging_rxon.filter_flags & RXON_FILTER_PROMISC_MSK);
-}
-EXPORT_SYMBOL(iwl_is_monitor_mode);
-
-/**
* iwl_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
*
* Selects how many and which Rx receivers/antennas/chains to use.
@@ -1106,19 +838,6 @@ void iwl_set_rxon_chain(struct iwl_priv *priv)
rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
- /* copied from 'iwl_bg_request_scan()' */
- /* Force use of chains B and C (0x6) for Rx for 4965
- * Avoid A (0x1) because of its off-channel reception on A-band.
- * MIMO is not used here, but value is required */
- if (iwl_is_monitor_mode(priv) &&
- !(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) &&
- ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)) {
- rx_chain = ANT_ABC << RXON_RX_CHAIN_VALID_POS;
- rx_chain |= ANT_BC << RXON_RX_CHAIN_FORCE_SEL_POS;
- rx_chain |= ANT_ABC << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
- rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
- }
-
priv->staging_rxon.rx_chain = cpu_to_le16(rx_chain);
if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
@@ -1174,8 +893,9 @@ int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch)
}
EXPORT_SYMBOL(iwl_set_rxon_channel);
-void iwl_set_flags_for_band(struct iwl_priv *priv,
- enum ieee80211_band band)
+static void iwl_set_flags_for_band(struct iwl_priv *priv,
+ enum ieee80211_band band,
+ struct ieee80211_vif *vif)
{
if (band == IEEE80211_BAND_5GHZ) {
priv->staging_rxon.flags &=
@@ -1184,12 +904,12 @@ void iwl_set_flags_for_band(struct iwl_priv *priv,
priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
} else {
/* Copied from iwl_post_associate() */
- if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
+ if (vif && vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
else
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
+ if (vif && vif->type == NL80211_IFTYPE_ADHOC)
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
@@ -1201,13 +921,18 @@ void iwl_set_flags_for_band(struct iwl_priv *priv,
/*
* initialize rxon structure with default values from eeprom
*/
-void iwl_connection_init_rx_config(struct iwl_priv *priv, int mode)
+void iwl_connection_init_rx_config(struct iwl_priv *priv,
+ struct ieee80211_vif *vif)
{
const struct iwl_channel_info *ch_info;
+ enum nl80211_iftype type = NL80211_IFTYPE_STATION;
+
+ if (vif)
+ type = vif->type;
memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
- switch (mode) {
+ switch (type) {
case NL80211_IFTYPE_AP:
priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
break;
@@ -1225,7 +950,7 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv, int mode)
break;
default:
- IWL_ERR(priv, "Unsupported interface type %d\n", mode);
+ IWL_ERR(priv, "Unsupported interface type %d\n", type);
break;
}
@@ -1244,18 +969,10 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv, int mode)
if (!ch_info)
ch_info = &priv->channel_info[0];
- /*
- * in some case A channels are all non IBSS
- * in this case force B/G channel
- */
- if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
- !(is_channel_ibss(ch_info)))
- ch_info = &priv->channel_info[0];
-
priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
priv->band = ch_info->band;
- iwl_set_flags_for_band(priv, priv->band);
+ iwl_set_flags_for_band(priv, priv->band, vif);
priv->staging_rxon.ofdm_basic_rates =
(IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
@@ -1286,7 +1003,6 @@ static void iwl_set_rate(struct iwl_priv *priv)
}
priv->active_rate = 0;
- priv->active_rate_basic = 0;
for (i = 0; i < hw->n_bitrates; i++) {
rate = &(hw->bitrates[i]);
@@ -1294,30 +1010,13 @@ static void iwl_set_rate(struct iwl_priv *priv)
priv->active_rate |= (1 << rate->hw_value);
}
- IWL_DEBUG_RATE(priv, "Set active_rate = %0x, active_rate_basic = %0x\n",
- priv->active_rate, priv->active_rate_basic);
+ IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
- /*
- * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK)
- * otherwise set it to the default of all CCK rates and 6, 12, 24 for
- * OFDM
- */
- if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK)
- priv->staging_rxon.cck_basic_rates =
- ((priv->active_rate_basic &
- IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF;
- else
- priv->staging_rxon.cck_basic_rates =
- (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
-
- if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK)
- priv->staging_rxon.ofdm_basic_rates =
- ((priv->active_rate_basic &
- (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >>
- IWL_FIRST_OFDM_RATE) & 0xFF;
- else
- priv->staging_rxon.ofdm_basic_rates =
- (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
+ priv->staging_rxon.cck_basic_rates =
+ (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
+
+ priv->staging_rxon.ofdm_basic_rates =
+ (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
}
void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
@@ -1374,6 +1073,9 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
/* Cancel currently queued command. */
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
+ IWL_ERR(priv, "Loaded firmware version: %s\n",
+ priv->hw->wiphy->fw_version);
+
priv->cfg->ops->lib->dump_nic_error_log(priv);
if (priv->cfg->ops->lib->dump_csr)
priv->cfg->ops->lib->dump_csr(priv);
@@ -1401,7 +1103,7 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
}
EXPORT_SYMBOL(iwl_irq_handle_error);
-int iwl_apm_stop_master(struct iwl_priv *priv)
+static int iwl_apm_stop_master(struct iwl_priv *priv)
{
int ret = 0;
@@ -1417,7 +1119,6 @@ int iwl_apm_stop_master(struct iwl_priv *priv)
return ret;
}
-EXPORT_SYMBOL(iwl_apm_stop_master);
void iwl_apm_stop(struct iwl_priv *priv)
{
@@ -1561,41 +1262,33 @@ void iwl_configure_filter(struct ieee80211_hw *hw,
u64 multicast)
{
struct iwl_priv *priv = hw->priv;
- __le32 *filter_flags = &priv->staging_rxon.filter_flags;
+ __le32 filter_or = 0, filter_nand = 0;
+
+#define CHK(test, flag) do { \
+ if (*total_flags & (test)) \
+ filter_or |= (flag); \
+ else \
+ filter_nand |= (flag); \
+ } while (0)
IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
changed_flags, *total_flags);
- if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
- if (*total_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))
- *filter_flags |= RXON_FILTER_PROMISC_MSK;
- else
- *filter_flags &= ~RXON_FILTER_PROMISC_MSK;
- }
- if (changed_flags & FIF_ALLMULTI) {
- if (*total_flags & FIF_ALLMULTI)
- *filter_flags |= RXON_FILTER_ACCEPT_GRP_MSK;
- else
- *filter_flags &= ~RXON_FILTER_ACCEPT_GRP_MSK;
- }
- if (changed_flags & FIF_CONTROL) {
- if (*total_flags & FIF_CONTROL)
- *filter_flags |= RXON_FILTER_CTL2HOST_MSK;
- else
- *filter_flags &= ~RXON_FILTER_CTL2HOST_MSK;
- }
- if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
- if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
- *filter_flags |= RXON_FILTER_BCON_AWARE_MSK;
- else
- *filter_flags &= ~RXON_FILTER_BCON_AWARE_MSK;
- }
+ CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+ CHK(FIF_ALLMULTI, RXON_FILTER_ACCEPT_GRP_MSK);
+ CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
+ CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
- /* We avoid iwl_commit_rxon here to commit the new filter flags
- * since mac80211 will call ieee80211_hw_config immediately.
- * (mc_list is not supported at this time). Otherwise, we need to
- * queue a background iwl_commit_rxon work.
- */
+#undef CHK
+
+ mutex_lock(&priv->mutex);
+
+ priv->staging_rxon.filter_flags &= ~filter_nand;
+ priv->staging_rxon.filter_flags |= filter_or;
+
+ iwlcore_commit_rxon(priv);
+
+ mutex_unlock(&priv->mutex);
*total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
@@ -1626,10 +1319,11 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
int ret = 0;
s8 prev_tx_power = priv->tx_power_user_lmt;
- if (tx_power < IWL_TX_POWER_TARGET_POWER_MIN) {
- IWL_WARN(priv, "Requested user TXPOWER %d below lower limit %d.\n",
+ if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
+ IWL_WARN(priv,
+ "Requested user TXPOWER %d below lower limit %d.\n",
tx_power,
- IWL_TX_POWER_TARGET_POWER_MIN);
+ IWLAGN_TX_POWER_TARGET_POWER_MIN);
return -EINVAL;
}
@@ -1668,286 +1362,16 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
}
EXPORT_SYMBOL(iwl_set_tx_power);
-#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
-
-/* Free dram table */
-void iwl_free_isr_ict(struct iwl_priv *priv)
-{
- if (priv->ict_tbl_vir) {
- dma_free_coherent(&priv->pci_dev->dev,
- (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
- priv->ict_tbl_vir, priv->ict_tbl_dma);
- priv->ict_tbl_vir = NULL;
- }
-}
-EXPORT_SYMBOL(iwl_free_isr_ict);
-
-
-/* allocate dram shared table it is a PAGE_SIZE aligned
- * also reset all data related to ICT table interrupt.
- */
-int iwl_alloc_isr_ict(struct iwl_priv *priv)
-{
-
- if (priv->cfg->use_isr_legacy)
- return 0;
- /* allocate shrared data table */
- priv->ict_tbl_vir = dma_alloc_coherent(&priv->pci_dev->dev,
- (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
- &priv->ict_tbl_dma, GFP_KERNEL);
- if (!priv->ict_tbl_vir)
- return -ENOMEM;
-
- /* align table to PAGE_SIZE boundry */
- priv->aligned_ict_tbl_dma = ALIGN(priv->ict_tbl_dma, PAGE_SIZE);
-
- IWL_DEBUG_ISR(priv, "ict dma addr %Lx dma aligned %Lx diff %d\n",
- (unsigned long long)priv->ict_tbl_dma,
- (unsigned long long)priv->aligned_ict_tbl_dma,
- (int)(priv->aligned_ict_tbl_dma - priv->ict_tbl_dma));
-
- priv->ict_tbl = priv->ict_tbl_vir +
- (priv->aligned_ict_tbl_dma - priv->ict_tbl_dma);
-
- IWL_DEBUG_ISR(priv, "ict vir addr %p vir aligned %p diff %d\n",
- priv->ict_tbl, priv->ict_tbl_vir,
- (int)(priv->aligned_ict_tbl_dma - priv->ict_tbl_dma));
-
- /* reset table and index to all 0 */
- memset(priv->ict_tbl_vir,0, (sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
- priv->ict_index = 0;
-
- /* add periodic RX interrupt */
- priv->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
- return 0;
-}
-EXPORT_SYMBOL(iwl_alloc_isr_ict);
-
-/* Device is going up inform it about using ICT interrupt table,
- * also we need to tell the driver to start using ICT interrupt.
- */
-int iwl_reset_ict(struct iwl_priv *priv)
-{
- u32 val;
- unsigned long flags;
-
- if (!priv->ict_tbl_vir)
- return 0;
-
- spin_lock_irqsave(&priv->lock, flags);
- iwl_disable_interrupts(priv);
-
- memset(&priv->ict_tbl[0], 0, sizeof(u32) * ICT_COUNT);
-
- val = priv->aligned_ict_tbl_dma >> PAGE_SHIFT;
-
- val |= CSR_DRAM_INT_TBL_ENABLE;
- val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
-
- IWL_DEBUG_ISR(priv, "CSR_DRAM_INT_TBL_REG =0x%X "
- "aligned dma address %Lx\n",
- val, (unsigned long long)priv->aligned_ict_tbl_dma);
-
- iwl_write32(priv, CSR_DRAM_INT_TBL_REG, val);
- priv->use_ict = true;
- priv->ict_index = 0;
- iwl_write32(priv, CSR_INT, priv->inta_mask);
- iwl_enable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_reset_ict);
-
-/* Device is going down disable ict interrupt usage */
-void iwl_disable_ict(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- priv->use_ict = false;
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-EXPORT_SYMBOL(iwl_disable_ict);
-
-/* interrupt handler using ict table, with this interrupt driver will
- * stop using INTA register to get device's interrupt, reading this register
- * is expensive, device will write interrupts in ICT dram table, increment
- * index then will fire interrupt to driver, driver will OR all ICT table
- * entries from current index up to table entry with 0 value. the result is
- * the interrupt we need to service, driver will set the entries back to 0 and
- * set index.
- */
-irqreturn_t iwl_isr_ict(int irq, void *data)
-{
- struct iwl_priv *priv = data;
- u32 inta, inta_mask;
- u32 val = 0;
-
- if (!priv)
- return IRQ_NONE;
-
- /* dram interrupt table not set yet,
- * use legacy interrupt.
- */
- if (!priv->use_ict)
- return iwl_isr(irq, data);
-
- spin_lock(&priv->lock);
-
- /* Disable (but don't clear!) interrupts here to avoid
- * back-to-back ISRs and sporadic interrupts from our NIC.
- * If we have something to service, the tasklet will re-enable ints.
- * If we *don't* have something, we'll re-enable before leaving here.
- */
- inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
- iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
-
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- if (!priv->ict_tbl[priv->ict_index]) {
- IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
- goto none;
- }
-
- /* read all entries that not 0 start with ict_index */
- while (priv->ict_tbl[priv->ict_index]) {
-
- val |= le32_to_cpu(priv->ict_tbl[priv->ict_index]);
- IWL_DEBUG_ISR(priv, "ICT index %d value 0x%08X\n",
- priv->ict_index,
- le32_to_cpu(priv->ict_tbl[priv->ict_index]));
- priv->ict_tbl[priv->ict_index] = 0;
- priv->ict_index = iwl_queue_inc_wrap(priv->ict_index,
- ICT_COUNT);
-
- }
-
- /* We should not get this value, just ignore it. */
- if (val == 0xffffffff)
- val = 0;
-
- /*
- * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
- * (bit 15 before shifting it to 31) to clear when using interrupt
- * coalescing. fortunately, bits 18 and 19 stay set when this happens
- * so we use them to decide on the real state of the Rx bit.
- * In order words, bit 15 is set if bit 18 or bit 19 are set.
- */
- if (val & 0xC0000)
- val |= 0x8000;
-
- inta = (0xff & val) | ((0xff00 & val) << 16);
- IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
- inta, inta_mask, val);
-
- inta &= priv->inta_mask;
- priv->inta |= inta;
-
- /* iwl_irq_tasklet() will service interrupts and re-enable them */
- if (likely(inta))
- tasklet_schedule(&priv->irq_tasklet);
- else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta) {
- /* Allow interrupt if was disabled by this handler and
- * no tasklet was schedules, We should not enable interrupt,
- * tasklet will enable it.
- */
- iwl_enable_interrupts(priv);
- }
-
- spin_unlock(&priv->lock);
- return IRQ_HANDLED;
-
- none:
- /* re-enable interrupts here since we don't have anything to service.
- * only Re-enable if disabled by irq.
- */
- if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
- iwl_enable_interrupts(priv);
-
- spin_unlock(&priv->lock);
- return IRQ_NONE;
-}
-EXPORT_SYMBOL(iwl_isr_ict);
-
-
-static irqreturn_t iwl_isr(int irq, void *data)
-{
- struct iwl_priv *priv = data;
- u32 inta, inta_mask;
-#ifdef CONFIG_IWLWIFI_DEBUG
- u32 inta_fh;
-#endif
- if (!priv)
- return IRQ_NONE;
-
- spin_lock(&priv->lock);
-
- /* Disable (but don't clear!) interrupts here to avoid
- * back-to-back ISRs and sporadic interrupts from our NIC.
- * If we have something to service, the tasklet will re-enable ints.
- * If we *don't* have something, we'll re-enable before leaving here. */
- inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
- iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
- /* Discover which interrupts are active/pending */
- inta = iwl_read32(priv, CSR_INT);
-
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- if (!inta) {
- IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
- goto none;
- }
-
- if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
- /* Hardware disappeared. It might have already raised
- * an interrupt */
- IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
- goto unplugged;
- }
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
- inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
- IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, "
- "fh 0x%08x\n", inta, inta_mask, inta_fh);
- }
-#endif
-
- priv->inta |= inta;
- /* iwl_irq_tasklet() will service interrupts and re-enable them */
- if (likely(inta))
- tasklet_schedule(&priv->irq_tasklet);
- else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
- iwl_enable_interrupts(priv);
-
- unplugged:
- spin_unlock(&priv->lock);
- return IRQ_HANDLED;
-
- none:
- /* re-enable interrupts here since we don't have anything to service. */
- /* only Re-enable if diabled by irq and no schedules tasklet. */
- if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
- iwl_enable_interrupts(priv);
-
- spin_unlock(&priv->lock);
- return IRQ_NONE;
-}
-
irqreturn_t iwl_isr_legacy(int irq, void *data)
{
struct iwl_priv *priv = data;
u32 inta, inta_mask;
u32 inta_fh;
+ unsigned long flags;
if (!priv)
return IRQ_NONE;
- spin_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, flags);
/* Disable (but don't clear!) interrupts here to avoid
* back-to-back ISRs and sporadic interrupts from our NIC.
@@ -1985,7 +1409,7 @@ irqreturn_t iwl_isr_legacy(int irq, void *data)
tasklet_schedule(&priv->irq_tasklet);
unplugged:
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
return IRQ_HANDLED;
none:
@@ -1993,12 +1417,12 @@ irqreturn_t iwl_isr_legacy(int irq, void *data)
/* only Re-enable if diabled by irq */
if (test_bit(STATUS_INT_ENABLED, &priv->status))
iwl_enable_interrupts(priv);
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
return IRQ_NONE;
}
EXPORT_SYMBOL(iwl_isr_legacy);
-int iwl_send_bt_config(struct iwl_priv *priv)
+void iwl_send_bt_config(struct iwl_priv *priv)
{
struct iwl_bt_cmd bt_cmd = {
.lead_time = BT_LEAD_TIME_DEF,
@@ -2015,8 +1439,9 @@ int iwl_send_bt_config(struct iwl_priv *priv)
IWL_DEBUG_INFO(priv, "BT coex %s\n",
(bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
- return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
- sizeof(struct iwl_bt_cmd), &bt_cmd);
+ if (iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
+ sizeof(struct iwl_bt_cmd), &bt_cmd))
+ IWL_ERR(priv, "failed to send BT Coex Config\n");
}
EXPORT_SYMBOL(iwl_send_bt_config);
@@ -2306,12 +1731,6 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
cpu_to_le16((params->txop * 32));
priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
- priv->qos_data.qos_active = 1;
-
- if (priv->iw_mode == NL80211_IFTYPE_AP)
- iwl_activate_qos(priv, 1);
- else if (priv->assoc_id && iwl_is_associated(priv))
- iwl_activate_qos(priv, 0);
spin_unlock_irqrestore(&priv->lock, flags);
@@ -2321,12 +1740,13 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
EXPORT_SYMBOL(iwl_mac_conf_tx);
static void iwl_ht_conf(struct iwl_priv *priv,
- struct ieee80211_bss_conf *bss_conf)
+ struct ieee80211_vif *vif)
{
struct iwl_ht_config *ht_conf = &priv->current_ht_config;
struct ieee80211_sta *sta;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
- IWL_DEBUG_MAC80211(priv, "enter: \n");
+ IWL_DEBUG_MAC80211(priv, "enter:\n");
if (!ht_conf->is_ht)
return;
@@ -2338,10 +1758,10 @@ static void iwl_ht_conf(struct iwl_priv *priv,
ht_conf->single_chain_sufficient = false;
- switch (priv->iw_mode) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
rcu_read_lock();
- sta = ieee80211_find_sta(priv->vif, priv->bssid);
+ sta = ieee80211_find_sta(vif, bss_conf->bssid);
if (sta) {
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
int maxstreams;
@@ -2379,7 +1799,6 @@ static void iwl_ht_conf(struct iwl_priv *priv,
static inline void iwl_set_no_assoc(struct iwl_priv *priv)
{
- priv->assoc_id = 0;
iwl_led_disassociate(priv);
/*
* inform the ucode that there is no longer an
@@ -2392,7 +1811,6 @@ static inline void iwl_set_no_assoc(struct iwl_priv *priv)
iwlcore_commit_rxon(priv);
}
-#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
void iwl_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@@ -2408,14 +1826,12 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
mutex_lock(&priv->mutex);
- if (changes & BSS_CHANGED_BEACON &&
- priv->iw_mode == NL80211_IFTYPE_AP) {
+ if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) {
dev_kfree_skb(priv->ibss_beacon);
priv->ibss_beacon = ieee80211_beacon_get(hw, vif);
}
if (changes & BSS_CHANGED_BEACON_INT) {
- priv->beacon_int = bss_conf->beacon_int;
/* TODO: in AP mode, do something to make this take effect */
}
@@ -2435,8 +1851,7 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
}
/* mac80211 only sets assoc when in STATION mode */
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC ||
- bss_conf->assoc) {
+ if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
memcpy(priv->staging_rxon.bssid_addr,
bss_conf->bssid, ETH_ALEN);
@@ -2454,7 +1869,7 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
* mac80211 decides to do both changes at once because
* it will invoke post_associate.
*/
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
+ if (vif->type == NL80211_IFTYPE_ADHOC &&
changes & BSS_CHANGED_BEACON) {
struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
@@ -2497,7 +1912,7 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
}
if (changes & BSS_CHANGED_HT) {
- iwl_ht_conf(priv, bss_conf);
+ iwl_ht_conf(priv, vif);
if (priv->cfg->ops->hcmd->set_rxon_chain)
priv->cfg->ops->hcmd->set_rxon_chain(priv);
@@ -2506,28 +1921,17 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
if (changes & BSS_CHANGED_ASSOC) {
IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
if (bss_conf->assoc) {
- priv->assoc_id = bss_conf->aid;
- priv->beacon_int = bss_conf->beacon_int;
priv->timestamp = bss_conf->timestamp;
- priv->assoc_capability = bss_conf->assoc_capability;
iwl_led_associate(priv);
- /*
- * We have just associated, don't start scan too early
- * leave time for EAPOL exchange to complete.
- *
- * XXX: do this in mac80211
- */
- priv->next_scan_jiffies = jiffies +
- IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
if (!iwl_is_rfkill(priv))
- priv->cfg->ops->lib->post_associate(priv);
+ priv->cfg->ops->lib->post_associate(priv, vif);
} else
iwl_set_no_assoc(priv);
}
- if (changes && iwl_is_associated(priv) && priv->assoc_id) {
+ if (changes && iwl_is_associated(priv) && bss_conf->aid) {
IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
changes);
ret = iwl_send_rxon_assoc(priv);
@@ -2544,11 +1948,20 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
memcpy(priv->staging_rxon.bssid_addr,
bss_conf->bssid, ETH_ALEN);
memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
- iwlcore_config_ap(priv);
+ iwlcore_config_ap(priv, vif);
} else
iwl_set_no_assoc(priv);
}
+ if (changes & BSS_CHANGED_IBSS) {
+ ret = priv->cfg->ops->lib->manage_ibss_station(priv, vif,
+ bss_conf->ibss_joined);
+ if (ret)
+ IWL_ERR(priv, "failed to %s IBSS station %pM\n",
+ bss_conf->ibss_joined ? "add" : "remove",
+ bss_conf->bssid);
+ }
+
mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -2568,11 +1981,6 @@ int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
return -EIO;
}
- if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
- IWL_DEBUG_MAC80211(priv, "leave - not IBSS\n");
- return -EIO;
- }
-
spin_lock_irqsave(&priv->lock, flags);
if (priv->ibss_beacon)
@@ -2580,59 +1988,31 @@ int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
priv->ibss_beacon = skb;
- priv->assoc_id = 0;
timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
priv->timestamp = le64_to_cpu(timestamp);
IWL_DEBUG_MAC80211(priv, "leave\n");
spin_unlock_irqrestore(&priv->lock, flags);
- iwl_reset_qos(priv);
-
- priv->cfg->ops->lib->post_associate(priv);
-
+ priv->cfg->ops->lib->post_associate(priv, priv->vif);
return 0;
}
EXPORT_SYMBOL(iwl_mac_beacon_update);
-int iwl_set_mode(struct iwl_priv *priv, int mode)
+static int iwl_set_mode(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
- if (mode == NL80211_IFTYPE_ADHOC) {
- const struct iwl_channel_info *ch_info;
-
- ch_info = iwl_get_channel_info(priv,
- priv->band,
- le16_to_cpu(priv->staging_rxon.channel));
-
- if (!ch_info || !is_channel_ibss(ch_info)) {
- IWL_ERR(priv, "channel %d not IBSS channel\n",
- le16_to_cpu(priv->staging_rxon.channel));
- return -EINVAL;
- }
- }
-
- iwl_connection_init_rx_config(priv, mode);
+ iwl_connection_init_rx_config(priv, vif);
if (priv->cfg->ops->hcmd->set_rxon_chain)
priv->cfg->ops->hcmd->set_rxon_chain(priv);
memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
- iwl_clear_stations_table(priv);
-
- /* dont commit rxon if rf-kill is on*/
- if (!iwl_is_ready_rf(priv))
- return -EAGAIN;
-
- iwlcore_commit_rxon(priv);
-
- return 0;
+ return iwlcore_commit_rxon(priv);
}
-EXPORT_SYMBOL(iwl_set_mode);
-int iwl_mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct iwl_priv *priv = hw->priv;
int err = 0;
@@ -2641,6 +2021,11 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw,
mutex_lock(&priv->mutex);
+ if (WARN_ON(!iwl_is_ready_rf(priv))) {
+ err = -EINVAL;
+ goto out;
+ }
+
if (priv->vif) {
IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n");
err = -EOPNOTSUPP;
@@ -2650,15 +2035,18 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw,
priv->vif = vif;
priv->iw_mode = vif->type;
- if (vif->addr) {
- IWL_DEBUG_MAC80211(priv, "Set %pM\n", vif->addr);
- memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
- }
+ IWL_DEBUG_MAC80211(priv, "Set %pM\n", vif->addr);
+ memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
+
+ err = iwl_set_mode(priv, vif);
+ if (err)
+ goto out_err;
- if (iwl_set_mode(priv, vif->type) == -EAGAIN)
- /* we are not ready, will run again when ready */
- set_bit(STATUS_MODE_PENDING, &priv->status);
+ goto out;
+ out_err:
+ priv->vif = NULL;
+ priv->iw_mode = NL80211_IFTYPE_STATION;
out:
mutex_unlock(&priv->mutex);
@@ -2668,7 +2056,7 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw,
EXPORT_SYMBOL(iwl_mac_add_interface);
void iwl_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif)
{
struct iwl_priv *priv = hw->priv;
@@ -2694,10 +2082,6 @@ EXPORT_SYMBOL(iwl_mac_remove_interface);
/**
* iwl_mac_config - mac80211 config callback
- *
- * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
- * be set inappropriately and the driver currently sets the hardware up to
- * use it whenever needed.
*/
int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
{
@@ -2752,15 +2136,6 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
goto set_ch_out;
}
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
- !is_channel_ibss(ch_info)) {
- IWL_ERR(priv, "channel %d in band %d not "
- "IBSS channel\n",
- conf->channel->hw_value, conf->channel->band);
- ret = -EINVAL;
- goto set_ch_out;
- }
-
spin_lock_irqsave(&priv->lock, flags);
/* Configure HT40 channels */
@@ -2794,7 +2169,7 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
iwl_set_rxon_channel(priv, conf->channel);
iwl_set_rxon_ht(priv, ht_conf);
- iwl_set_flags_for_band(priv, conf->channel->band);
+ iwl_set_flags_for_band(priv, conf->channel->band, priv->vif);
spin_unlock_irqrestore(&priv->lock, flags);
if (iwl_is_associated(priv) &&
(le16_to_cpu(priv->active_rxon.channel) != ch) &&
@@ -2833,6 +2208,15 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
iwl_set_tx_power(priv, conf->power_level, false);
}
+ if (changed & IEEE80211_CONF_CHANGE_QOS) {
+ bool qos_active = !!(conf->flags & IEEE80211_CONF_QOS);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->qos_data.qos_active = qos_active;
+ iwl_update_qos(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
if (!iwl_is_ready(priv)) {
IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
goto out;
@@ -2867,12 +2251,7 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
spin_unlock_irqrestore(&priv->lock, flags);
- iwl_reset_qos(priv);
-
spin_lock_irqsave(&priv->lock, flags);
- priv->assoc_id = 0;
- priv->assoc_capability = 0;
- priv->assoc_station_added = 0;
/* new association get rid of ibss beacon skb */
if (priv->ibss_beacon)
@@ -2880,10 +2259,7 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
priv->ibss_beacon = NULL;
- priv->beacon_int = priv->vif->bss_conf.beacon_int;
priv->timestamp = 0;
- if ((priv->iw_mode == NL80211_IFTYPE_STATION))
- priv->beacon_int = 0;
spin_unlock_irqrestore(&priv->lock, flags);
@@ -2896,17 +2272,9 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
/* we are restarting association process
* clear RXON_FILTER_ASSOC_MSK bit
*/
- if (priv->iw_mode != NL80211_IFTYPE_AP) {
- iwl_scan_cancel_timeout(priv, 100);
- priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwlcore_commit_rxon(priv);
- }
-
- if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
- IWL_DEBUG_MAC80211(priv, "leave - not in IBSS\n");
- mutex_unlock(&priv->mutex);
- return;
- }
+ iwl_scan_cancel_timeout(priv, 100);
+ priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ iwlcore_commit_rxon(priv);
iwl_set_rate(priv);
@@ -2923,7 +2291,7 @@ int iwl_alloc_txq_mem(struct iwl_priv *priv)
sizeof(struct iwl_tx_queue) * priv->cfg->num_of_queues,
GFP_KERNEL);
if (!priv->txq) {
- IWL_ERR(priv, "Not enough memory for txq \n");
+ IWL_ERR(priv, "Not enough memory for txq\n");
return -ENOMEM;
}
return 0;
@@ -2937,34 +2305,6 @@ void iwl_free_txq_mem(struct iwl_priv *priv)
}
EXPORT_SYMBOL(iwl_free_txq_mem);
-int iwl_send_wimax_coex(struct iwl_priv *priv)
-{
- struct iwl_wimax_coex_cmd uninitialized_var(coex_cmd);
-
- if (priv->cfg->support_wimax_coexist) {
- /* UnMask wake up src at associated sleep */
- coex_cmd.flags |= COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
-
- /* UnMask wake up src at unassociated sleep */
- coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
- memcpy(coex_cmd.sta_prio, cu_priorities,
- sizeof(struct iwl_wimax_coex_event_entry) *
- COEX_NUM_OF_EVENTS);
-
- /* enabling the coexistence feature */
- coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
-
- /* enabling the priorities tables */
- coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
- } else {
- /* coexistence is disabled */
- memset(&coex_cmd, 0, sizeof(coex_cmd));
- }
- return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD,
- sizeof(coex_cmd), &coex_cmd);
-}
-EXPORT_SYMBOL(iwl_send_wimax_coex);
-
#ifdef CONFIG_IWLWIFI_DEBUGFS
#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
@@ -3403,6 +2743,99 @@ int iwl_force_reset(struct iwl_priv *priv, int mode)
}
return 0;
}
+EXPORT_SYMBOL(iwl_force_reset);
+
+/**
+ * iwl_bg_monitor_recover - Timer callback to check for stuck queue and recover
+ *
+ * During normal condition (no queue is stuck), the timer is continually set to
+ * execute every monitor_recover_period milliseconds after the last timer
+ * expired. When the queue read_ptr is at the same place, the timer is
+ * shorten to 100mSecs. This is
+ * 1) to reduce the chance that the read_ptr may wrap around (not stuck)
+ * 2) to detect the stuck queues quicker before the station and AP can
+ * disassociate each other.
+ *
+ * This function monitors all the tx queues and recover from it if any
+ * of the queues are stuck.
+ * 1. It first check the cmd queue for stuck conditions. If it is stuck,
+ * it will recover by resetting the firmware and return.
+ * 2. Then, it checks for station association. If it associates it will check
+ * other queues. If any queue is stuck, it will recover by resetting
+ * the firmware.
+ * Note: It the number of times the queue read_ptr to be at the same place to
+ * be MAX_REPEAT+1 in order to consider to be stuck.
+ */
+/*
+ * The maximum number of times the read pointer of the tx queue at the
+ * same place without considering to be stuck.
+ */
+#define MAX_REPEAT (2)
+static int iwl_check_stuck_queue(struct iwl_priv *priv, int cnt)
+{
+ struct iwl_tx_queue *txq;
+ struct iwl_queue *q;
+
+ txq = &priv->txq[cnt];
+ q = &txq->q;
+ /* queue is empty, skip */
+ if (q->read_ptr != q->write_ptr) {
+ if (q->read_ptr == q->last_read_ptr) {
+ /* a queue has not been read from last time */
+ if (q->repeat_same_read_ptr > MAX_REPEAT) {
+ IWL_ERR(priv,
+ "queue %d stuck %d time. Fw reload.\n",
+ q->id, q->repeat_same_read_ptr);
+ q->repeat_same_read_ptr = 0;
+ iwl_force_reset(priv, IWL_FW_RESET);
+ } else {
+ q->repeat_same_read_ptr++;
+ IWL_DEBUG_RADIO(priv,
+ "queue %d, not read %d time\n",
+ q->id,
+ q->repeat_same_read_ptr);
+ mod_timer(&priv->monitor_recover, jiffies +
+ msecs_to_jiffies(IWL_ONE_HUNDRED_MSECS));
+ }
+ return 1;
+ } else {
+ q->last_read_ptr = q->read_ptr;
+ q->repeat_same_read_ptr = 0;
+ }
+ }
+ return 0;
+}
+
+void iwl_bg_monitor_recover(unsigned long data)
+{
+ struct iwl_priv *priv = (struct iwl_priv *)data;
+ int cnt;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ /* monitor and check for stuck cmd queue */
+ if (iwl_check_stuck_queue(priv, IWL_CMD_QUEUE_NUM))
+ return;
+
+ /* monitor and check for other stuck queues */
+ if (iwl_is_associated(priv)) {
+ for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
+ /* skip as we already checked the command queue */
+ if (cnt == IWL_CMD_QUEUE_NUM)
+ continue;
+ if (iwl_check_stuck_queue(priv, cnt))
+ return;
+ }
+ }
+ /*
+ * Reschedule the timer to occur in
+ * priv->cfg->monitor_recover_period
+ */
+ mod_timer(&priv->monitor_recover,
+ jiffies + msecs_to_jiffies(priv->cfg->monitor_recover_period));
+}
+EXPORT_SYMBOL(iwl_bg_monitor_recover);
#ifdef CONFIG_PM
@@ -3432,6 +2865,12 @@ int iwl_pci_resume(struct pci_dev *pdev)
struct iwl_priv *priv = pci_get_drvdata(pdev);
int ret;
+ /*
+ * We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state.
+ */
+ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
pci_set_power_state(pdev, PCI_D0);
ret = pci_enable_device(pdev);
if (ret)
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 36940a9ec6b..7e5a5ba41fd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -90,6 +90,7 @@ struct iwl_hcmd_ops {
int (*commit_rxon)(struct iwl_priv *priv);
void (*set_rxon_chain)(struct iwl_priv *priv);
int (*set_tx_ant)(struct iwl_priv *priv, u8 valid_tx_ant);
+ void (*send_bt_config)(struct iwl_priv *priv);
};
struct iwl_hcmd_utils_ops {
@@ -105,6 +106,7 @@ struct iwl_hcmd_utils_ops {
__le32 *tx_flags);
int (*calc_rssi)(struct iwl_priv *priv,
struct iwl_rx_phy_res *rx_resp);
+ void (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
};
struct iwl_apm_ops {
@@ -114,23 +116,21 @@ struct iwl_apm_ops {
int (*set_pwr_src)(struct iwl_priv *priv, enum iwl_pwr_src src);
};
+struct iwl_debugfs_ops {
+ ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ ssize_t (*general_stats_read)(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+};
+
struct iwl_temp_ops {
void (*temperature)(struct iwl_priv *priv);
void (*set_ct_kill)(struct iwl_priv *priv);
void (*set_calib_version)(struct iwl_priv *priv);
};
-struct iwl_ucode_ops {
- u32 (*get_header_size)(u32);
- u32 (*get_build)(const struct iwl_ucode_header *, u32);
- u32 (*get_inst_size)(const struct iwl_ucode_header *, u32);
- u32 (*get_data_size)(const struct iwl_ucode_header *, u32);
- u32 (*get_init_size)(const struct iwl_ucode_header *, u32);
- u32 (*get_init_data_size)(const struct iwl_ucode_header *, u32);
- u32 (*get_boot_size)(const struct iwl_ucode_header *, u32);
- u8 * (*get_data)(const struct iwl_ucode_header *, u32);
-};
-
struct iwl_lib_ops {
/* set hw dependent parameters */
int (*set_hw_params)(struct iwl_priv *priv);
@@ -180,8 +180,9 @@ struct iwl_lib_ops {
/* power */
int (*send_tx_power) (struct iwl_priv *priv);
void (*update_chain_flags)(struct iwl_priv *priv);
- void (*post_associate) (struct iwl_priv *priv);
- void (*config_ap) (struct iwl_priv *priv);
+ void (*post_associate)(struct iwl_priv *priv,
+ struct ieee80211_vif *vif);
+ void (*config_ap)(struct iwl_priv *priv, struct ieee80211_vif *vif);
irqreturn_t (*isr) (int irq, void *data);
/* eeprom operations (as defined in iwl-eeprom.h) */
@@ -190,7 +191,17 @@ struct iwl_lib_ops {
/* temperature */
struct iwl_temp_ops temp_ops;
/* station management */
- void (*add_bcast_station)(struct iwl_priv *priv);
+ int (*manage_ibss_station)(struct iwl_priv *priv,
+ struct ieee80211_vif *vif, bool add);
+ /* recover from tx queue stall */
+ void (*recover_from_tx_stall)(unsigned long data);
+ /* check for plcp health */
+ bool (*check_plcp_health)(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt);
+ /* check for ack health */
+ bool (*check_ack_health)(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt);
+ struct iwl_debugfs_ops debugfs_ops;
};
struct iwl_led_ops {
@@ -200,7 +211,6 @@ struct iwl_led_ops {
};
struct iwl_ops {
- const struct iwl_ucode_ops *ucode;
const struct iwl_lib_ops *lib;
const struct iwl_hcmd_ops *hcmd;
const struct iwl_hcmd_utils_ops *utils;
@@ -237,6 +247,18 @@ struct iwl_mod_params {
* @support_wimax_coexist: support wimax/wifi co-exist
* @plcp_delta_threshold: plcp error rate threshold used to trigger
* radio tuning when there is a high receiving plcp error rate
+ * @chain_noise_scale: default chain noise scale used for gain computation
+ * @monitor_recover_period: default timer used to check stuck queues
+ * @temperature_kelvin: temperature report by uCode in kelvin
+ * @max_event_log_size: size of event log buffer size for ucode event logging
+ * @tx_power_by_driver: tx power calibration performed by driver
+ * instead of uCode
+ * @ucode_tracing: support ucode continuous tracing
+ * @sensitivity_calib_by_driver: driver has the capability to perform
+ * sensitivity calibration operation
+ * @chain_noise_calib_by_driver: driver has the capability to perform
+ * chain noise calibration operation
+ * @scan_antennas: available antenna for scan operation
*
* We enable the driver to be backward compatible wrt API version. The
* driver specifies which APIs it supports (with @ucode_api_max being the
@@ -295,6 +317,15 @@ struct iwl_cfg {
const bool support_wimax_coexist;
u8 plcp_delta_threshold;
s32 chain_noise_scale;
+ /* timer period for monitor the driver queues */
+ u32 monitor_recover_period;
+ bool temperature_kelvin;
+ u32 max_event_log_size;
+ const bool tx_power_by_driver;
+ const bool ucode_tracing;
+ const bool sensitivity_calib_by_driver;
+ const bool chain_noise_calib_by_driver;
+ u8 scan_antennas[IEEE80211_NUM_BANDS];
};
/***************************
@@ -304,8 +335,7 @@ struct iwl_cfg {
struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
struct ieee80211_ops *hw_ops);
void iwl_hw_detect(struct iwl_priv *priv);
-void iwl_reset_qos(struct iwl_priv *priv);
-void iwl_activate_qos(struct iwl_priv *priv, u8 force);
+void iwl_activate_qos(struct iwl_priv *priv);
int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
const struct ieee80211_tx_queue_params *params);
void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt);
@@ -316,8 +346,8 @@ int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch);
void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf);
u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
struct ieee80211_sta_ht_cap *sta_ht_inf);
-void iwl_set_flags_for_band(struct iwl_priv *priv, enum ieee80211_band band);
-void iwl_connection_init_rx_config(struct iwl_priv *priv, int mode);
+void iwl_connection_init_rx_config(struct iwl_priv *priv,
+ struct ieee80211_vif *vif);
int iwl_set_decrypted_flag(struct iwl_priv *priv,
struct ieee80211_hdr *hdr,
u32 decrypt_res,
@@ -326,29 +356,25 @@ void iwl_irq_handle_error(struct iwl_priv *priv);
void iwl_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags, u64 multicast);
-int iwl_hw_nic_init(struct iwl_priv *priv);
int iwl_set_hw_params(struct iwl_priv *priv);
-bool iwl_is_monitor_mode(struct iwl_priv *priv);
-void iwl_post_associate(struct iwl_priv *priv);
+void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif);
void iwl_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u32 changes);
int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb);
int iwl_commit_rxon(struct iwl_priv *priv);
-int iwl_set_mode(struct iwl_priv *priv, int mode);
int iwl_mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
void iwl_mac_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
int iwl_mac_config(struct ieee80211_hw *hw, u32 changed);
-void iwl_config_ap(struct iwl_priv *priv);
+void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif);
void iwl_mac_reset_tsf(struct ieee80211_hw *hw);
int iwl_alloc_txq_mem(struct iwl_priv *priv);
void iwl_free_txq_mem(struct iwl_priv *priv);
void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
__le32 *tx_flags);
-int iwl_send_wimax_coex(struct iwl_priv *priv);
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_alloc_traffic_mem(struct iwl_priv *priv);
void iwl_free_traffic_mem(struct iwl_priv *priv);
@@ -411,26 +437,24 @@ void iwl_rx_reply_error(struct iwl_priv *priv,
/*****************************************************
* RX
******************************************************/
-void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
void iwl_cmd_queue_free(struct iwl_priv *priv);
int iwl_rx_queue_alloc(struct iwl_priv *priv);
void iwl_rx_handle(struct iwl_priv *priv);
void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
struct iwl_rx_queue *q);
-void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-void iwl_rx_replenish(struct iwl_priv *priv);
-void iwl_rx_replenish_now(struct iwl_priv *priv);
-int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-void iwl_rx_queue_restock(struct iwl_priv *priv);
int iwl_rx_queue_space(const struct iwl_rx_queue *q);
-void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority);
void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
-int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
/* Handlers */
void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
+bool iwl_good_plcp_health(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt);
+bool iwl_good_ack_health(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt);
+void iwl_recover_from_statistics(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt);
void iwl_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_reply_statistics(struct iwl_priv *priv,
@@ -442,14 +466,10 @@ void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
/*****************************************************
* TX
******************************************************/
-int iwl_txq_ctx_alloc(struct iwl_priv *priv);
-void iwl_txq_ctx_reset(struct iwl_priv *priv);
void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
struct iwl_tx_queue *txq,
dma_addr_t addr, u16 len, u8 reset, u8 pad);
-int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
-void iwl_hw_txq_ctx_free(struct iwl_priv *priv);
int iwl_hw_tx_queue_init(struct iwl_priv *priv,
struct iwl_tx_queue *txq);
void iwl_free_tfds_in_queue(struct iwl_priv *priv,
@@ -460,9 +480,6 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
int slots_num, u32 txq_id);
void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
-int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn);
-int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid);
-int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id);
/*****************************************************
* TX power
****************************************************/
@@ -472,10 +489,7 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
* Rate
******************************************************************************/
-void iwl_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
- struct ieee80211_tx_info *info);
int iwl_hwrate_to_plcp_idx(u32 rate_n_flags);
-int iwl_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv);
@@ -505,7 +519,10 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
void iwl_init_scan_params(struct iwl_priv *priv);
int iwl_scan_cancel(struct iwl_priv *priv);
int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
-int iwl_mac_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req);
+int iwl_mac_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req);
+void iwl_bg_start_internal_scan(struct work_struct *work);
void iwl_internal_short_hw_scan(struct iwl_priv *priv);
int iwl_force_reset(struct iwl_priv *priv, int mode);
u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
@@ -515,7 +532,8 @@ u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
enum ieee80211_band band,
u8 n_probes);
u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band);
+ enum ieee80211_band band,
+ struct ieee80211_vif *vif);
void iwl_bg_scan_check(struct work_struct *data);
void iwl_bg_abort_scan(struct work_struct *work);
void iwl_bg_scan_completed(struct work_struct *work);
@@ -530,6 +548,7 @@ void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
+#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
/*******************************************************************************
* Calibrations - implemented in iwl-calib.c
@@ -563,11 +582,6 @@ int iwl_send_card_state(struct iwl_priv *priv, u32 flags,
* PCI *
*****************************************************/
irqreturn_t iwl_isr_legacy(int irq, void *data);
-int iwl_reset_ict(struct iwl_priv *priv);
-void iwl_disable_ict(struct iwl_priv *priv);
-int iwl_alloc_isr_ict(struct iwl_priv *priv);
-void iwl_free_isr_ict(struct iwl_priv *priv);
-irqreturn_t iwl_isr_ict(int irq, void *data);
static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv)
{
@@ -577,6 +591,9 @@ static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv)
pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
return pci_lnk_ctl;
}
+
+void iwl_bg_monitor_recover(unsigned long data);
+
#ifdef CONFIG_PM
int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state);
int iwl_pci_resume(struct pci_dev *pdev);
@@ -625,7 +642,6 @@ void iwlcore_free_geos(struct iwl_priv *priv);
#define STATUS_SCAN_HW 15
#define STATUS_POWER_PMI 16
#define STATUS_FW_ERROR 17
-#define STATUS_MODE_PENDING 18
static inline int iwl_is_ready(struct iwl_priv *priv)
@@ -672,23 +688,16 @@ static inline int iwl_is_ready_rf(struct iwl_priv *priv)
}
extern void iwl_rf_kill_ct_config(struct iwl_priv *priv);
-extern int iwl_send_bt_config(struct iwl_priv *priv);
+extern void iwl_send_bt_config(struct iwl_priv *priv);
extern int iwl_send_statistics_request(struct iwl_priv *priv,
u8 flags, bool clear);
extern int iwl_verify_ucode(struct iwl_priv *priv);
extern int iwl_send_lq_cmd(struct iwl_priv *priv,
- struct iwl_link_quality_cmd *lq, u8 flags);
-extern void iwl_rx_reply_rx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-extern void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
+ struct iwl_link_quality_cmd *lq, u8 flags, bool init);
void iwl_apm_stop(struct iwl_priv *priv);
-int iwl_apm_stop_master(struct iwl_priv *priv);
int iwl_apm_init(struct iwl_priv *priv);
-void iwl_setup_rxon_timing(struct iwl_priv *priv);
+void iwl_setup_rxon_timing(struct iwl_priv *priv, struct ieee80211_vif *vif);
static inline int iwl_send_rxon_assoc(struct iwl_priv *priv)
{
return priv->cfg->ops->hcmd->rxon_assoc(priv);
@@ -697,9 +706,10 @@ static inline int iwlcore_commit_rxon(struct iwl_priv *priv)
{
return priv->cfg->ops->hcmd->commit_rxon(priv);
}
-static inline void iwlcore_config_ap(struct iwl_priv *priv)
+static inline void iwlcore_config_ap(struct iwl_priv *priv,
+ struct ieee80211_vif *vif)
{
- priv->cfg->ops->lib->config_ap(priv);
+ priv->cfg->ops->lib->config_ap(priv, vif);
}
static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
struct iwl_priv *priv, enum ieee80211_band band)
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 808b7146bea..254c35ae8b3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -298,6 +298,7 @@
#define CSR_HW_REV_TYPE_1000 (0x0000060)
#define CSR_HW_REV_TYPE_6x00 (0x0000070)
#define CSR_HW_REV_TYPE_6x50 (0x0000080)
+#define CSR_HW_REV_TYPE_6x00g2 (0x00000B0)
#define CSR_HW_REV_TYPE_NONE (0x00000F0)
/* EEPROM REG */
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 1c7b53d511c..5c2bcef5df0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -78,6 +78,8 @@ static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
void iwl_dbgfs_unregister(struct iwl_priv *priv);
+extern int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf,
+ int bufsz);
#else
static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
{
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index b6e1b0ebe23..9659c5d01df 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -106,6 +106,26 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
.open = iwl_dbgfs_open_file_generic, \
};
+int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
+{
+ int p = 0;
+
+ p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
+ le32_to_cpu(priv->statistics.flag));
+ if (le32_to_cpu(priv->statistics.flag) & UCODE_STATISTICS_CLEAR_MSK)
+ p += scnprintf(buf + p, bufsz - p,
+ "\tStatistics have been cleared\n");
+ p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
+ (le32_to_cpu(priv->statistics.flag) &
+ UCODE_STATISTICS_FREQUENCY_MSK)
+ ? "2.4 GHz" : "5.2 GHz");
+ p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
+ (le32_to_cpu(priv->statistics.flag) &
+ UCODE_STATISTICS_NARROW_BAND_MSK)
+ ? "enabled" : "disabled");
+ return p;
+}
+EXPORT_SYMBOL(iwl_dbgfs_statistics_flag);
static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file,
char __user *user_buf,
@@ -561,8 +581,6 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
test_bit(STATUS_POWER_PMI, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
test_bit(STATUS_FW_ERROR, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_MODE_PENDING:\t %d\n",
- test_bit(STATUS_MODE_PENDING, &priv->status));
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
@@ -661,7 +679,6 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
int pos = 0, i;
char buf[256];
const size_t bufsz = sizeof(buf);
- ssize_t ret;
for (i = 0; i < AC_NUM; i++) {
pos += scnprintf(buf + pos, bufsz - pos,
@@ -673,8 +690,7 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
priv->qos_data.def_qos_parm.ac[i].aifsn,
priv->qos_data.def_qos_parm.ac[i].edca_txop);
}
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- return ret;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
@@ -684,7 +700,6 @@ static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
int pos = 0;
char buf[256];
const size_t bufsz = sizeof(buf);
- ssize_t ret;
pos += scnprintf(buf + pos, bufsz - pos,
"allow blinking: %s\n",
@@ -698,8 +713,7 @@ static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
priv->last_blink_time);
}
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- return ret;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
@@ -712,7 +726,6 @@ static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
char buf[100];
int pos = 0;
const size_t bufsz = sizeof(buf);
- ssize_t ret;
pos += scnprintf(buf + pos, bufsz - pos,
"Thermal Throttling Mode: %s\n",
@@ -732,8 +745,7 @@ static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
"HT mode: %d\n",
restriction->is_ht);
}
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- return ret;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
@@ -770,13 +782,11 @@ static ssize_t iwl_dbgfs_disable_ht40_read(struct file *file,
char buf[100];
int pos = 0;
const size_t bufsz = sizeof(buf);
- ssize_t ret;
pos += scnprintf(buf + pos, bufsz - pos,
"11n 40MHz Mode: %s\n",
priv->disable_ht40 ? "Disabled" : "Enabled");
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- return ret;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
@@ -1044,474 +1054,13 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
-static int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf,
- int bufsz)
-{
- int p = 0;
-
- p += scnprintf(buf + p, bufsz - p,
- "Statistics Flag(0x%X):\n",
- le32_to_cpu(priv->statistics.flag));
- if (le32_to_cpu(priv->statistics.flag) & UCODE_STATISTICS_CLEAR_MSK)
- p += scnprintf(buf + p, bufsz - p,
- "\tStatistics have been cleared\n");
- p += scnprintf(buf + p, bufsz - p,
- "\tOperational Frequency: %s\n",
- (le32_to_cpu(priv->statistics.flag) &
- UCODE_STATISTICS_FREQUENCY_MSK)
- ? "2.4 GHz" : "5.2 GHz");
- p += scnprintf(buf + p, bufsz - p,
- "\tTGj Narrow Band: %s\n",
- (le32_to_cpu(priv->statistics.flag) &
- UCODE_STATISTICS_NARROW_BAND_MSK)
- ? "enabled" : "disabled");
- return p;
-}
-
-static const char ucode_stats_header[] =
- "%-32s current acumulative delta max\n";
-static const char ucode_stats_short_format[] =
- " %-30s %10u\n";
-static const char ucode_stats_format[] =
- " %-30s %10u %10u %10u %10u\n";
-
static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = sizeof(struct statistics_rx_phy) * 40 +
- sizeof(struct statistics_rx_non_phy) * 40 +
- sizeof(struct statistics_rx_ht_phy) * 40 + 400;
- ssize_t ret;
- struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
- struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
- struct statistics_rx_non_phy *general, *accum_general;
- struct statistics_rx_non_phy *delta_general, *max_general;
- struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
-
- if (!iwl_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /* the statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- ofdm = &priv->statistics.rx.ofdm;
- cck = &priv->statistics.rx.cck;
- general = &priv->statistics.rx.general;
- ht = &priv->statistics.rx.ofdm_ht;
- accum_ofdm = &priv->accum_statistics.rx.ofdm;
- accum_cck = &priv->accum_statistics.rx.cck;
- accum_general = &priv->accum_statistics.rx.general;
- accum_ht = &priv->accum_statistics.rx.ofdm_ht;
- delta_ofdm = &priv->delta_statistics.rx.ofdm;
- delta_cck = &priv->delta_statistics.rx.cck;
- delta_general = &priv->delta_statistics.rx.general;
- delta_ht = &priv->delta_statistics.rx.ofdm_ht;
- max_ofdm = &priv->max_delta.rx.ofdm;
- max_cck = &priv->max_delta.rx.cck;
- max_general = &priv->max_delta.rx.general;
- max_ht = &priv->max_delta.rx.ofdm_ht;
-
- pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
- "Statistics_Rx - OFDM:");
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
- accum_ofdm->ina_cnt,
- delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "fina_cnt:",
- le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
- delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "plcp_err:",
- le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
- delta_ofdm->plcp_err, max_ofdm->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "crc32_err:",
- le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
- delta_ofdm->crc32_err, max_ofdm->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "overrun_err:",
- le32_to_cpu(ofdm->overrun_err),
- accum_ofdm->overrun_err,
- delta_ofdm->overrun_err, max_ofdm->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "early_overrun_err:",
- le32_to_cpu(ofdm->early_overrun_err),
- accum_ofdm->early_overrun_err,
- delta_ofdm->early_overrun_err,
- max_ofdm->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "crc32_good:",
- le32_to_cpu(ofdm->crc32_good),
- accum_ofdm->crc32_good,
- delta_ofdm->crc32_good, max_ofdm->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "false_alarm_cnt:",
- le32_to_cpu(ofdm->false_alarm_cnt),
- accum_ofdm->false_alarm_cnt,
- delta_ofdm->false_alarm_cnt,
- max_ofdm->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "fina_sync_err_cnt:",
- le32_to_cpu(ofdm->fina_sync_err_cnt),
- accum_ofdm->fina_sync_err_cnt,
- delta_ofdm->fina_sync_err_cnt,
- max_ofdm->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "sfd_timeout:",
- le32_to_cpu(ofdm->sfd_timeout),
- accum_ofdm->sfd_timeout,
- delta_ofdm->sfd_timeout,
- max_ofdm->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "fina_timeout:",
- le32_to_cpu(ofdm->fina_timeout),
- accum_ofdm->fina_timeout,
- delta_ofdm->fina_timeout,
- max_ofdm->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "unresponded_rts:",
- le32_to_cpu(ofdm->unresponded_rts),
- accum_ofdm->unresponded_rts,
- delta_ofdm->unresponded_rts,
- max_ofdm->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "rxe_frame_lmt_ovrun:",
- le32_to_cpu(ofdm->rxe_frame_limit_overrun),
- accum_ofdm->rxe_frame_limit_overrun,
- delta_ofdm->rxe_frame_limit_overrun,
- max_ofdm->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "sent_ack_cnt:",
- le32_to_cpu(ofdm->sent_ack_cnt),
- accum_ofdm->sent_ack_cnt,
- delta_ofdm->sent_ack_cnt,
- max_ofdm->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "sent_cts_cnt:",
- le32_to_cpu(ofdm->sent_cts_cnt),
- accum_ofdm->sent_cts_cnt,
- delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "sent_ba_rsp_cnt:",
- le32_to_cpu(ofdm->sent_ba_rsp_cnt),
- accum_ofdm->sent_ba_rsp_cnt,
- delta_ofdm->sent_ba_rsp_cnt,
- max_ofdm->sent_ba_rsp_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "dsp_self_kill:",
- le32_to_cpu(ofdm->dsp_self_kill),
- accum_ofdm->dsp_self_kill,
- delta_ofdm->dsp_self_kill,
- max_ofdm->dsp_self_kill);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "mh_format_err:",
- le32_to_cpu(ofdm->mh_format_err),
- accum_ofdm->mh_format_err,
- delta_ofdm->mh_format_err,
- max_ofdm->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "re_acq_main_rssi_sum:",
- le32_to_cpu(ofdm->re_acq_main_rssi_sum),
- accum_ofdm->re_acq_main_rssi_sum,
- delta_ofdm->re_acq_main_rssi_sum,
- max_ofdm->re_acq_main_rssi_sum);
-
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
- "Statistics_Rx - CCK:");
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "ina_cnt:",
- le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
- delta_cck->ina_cnt, max_cck->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "fina_cnt:",
- le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
- delta_cck->fina_cnt, max_cck->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "plcp_err:",
- le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
- delta_cck->plcp_err, max_cck->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "crc32_err:",
- le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
- delta_cck->crc32_err, max_cck->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "overrun_err:",
- le32_to_cpu(cck->overrun_err),
- accum_cck->overrun_err,
- delta_cck->overrun_err, max_cck->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "early_overrun_err:",
- le32_to_cpu(cck->early_overrun_err),
- accum_cck->early_overrun_err,
- delta_cck->early_overrun_err,
- max_cck->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "crc32_good:",
- le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
- delta_cck->crc32_good,
- max_cck->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "false_alarm_cnt:",
- le32_to_cpu(cck->false_alarm_cnt),
- accum_cck->false_alarm_cnt,
- delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "fina_sync_err_cnt:",
- le32_to_cpu(cck->fina_sync_err_cnt),
- accum_cck->fina_sync_err_cnt,
- delta_cck->fina_sync_err_cnt,
- max_cck->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "sfd_timeout:",
- le32_to_cpu(cck->sfd_timeout),
- accum_cck->sfd_timeout,
- delta_cck->sfd_timeout, max_cck->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "fina_timeout:",
- le32_to_cpu(cck->fina_timeout),
- accum_cck->fina_timeout,
- delta_cck->fina_timeout, max_cck->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "unresponded_rts:",
- le32_to_cpu(cck->unresponded_rts),
- accum_cck->unresponded_rts,
- delta_cck->unresponded_rts,
- max_cck->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "rxe_frame_lmt_ovrun:",
- le32_to_cpu(cck->rxe_frame_limit_overrun),
- accum_cck->rxe_frame_limit_overrun,
- delta_cck->rxe_frame_limit_overrun,
- max_cck->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "sent_ack_cnt:",
- le32_to_cpu(cck->sent_ack_cnt),
- accum_cck->sent_ack_cnt,
- delta_cck->sent_ack_cnt,
- max_cck->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "sent_cts_cnt:",
- le32_to_cpu(cck->sent_cts_cnt),
- accum_cck->sent_cts_cnt,
- delta_cck->sent_cts_cnt,
- max_cck->sent_cts_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "sent_ba_rsp_cnt:",
- le32_to_cpu(cck->sent_ba_rsp_cnt),
- accum_cck->sent_ba_rsp_cnt,
- delta_cck->sent_ba_rsp_cnt,
- max_cck->sent_ba_rsp_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "dsp_self_kill:",
- le32_to_cpu(cck->dsp_self_kill),
- accum_cck->dsp_self_kill,
- delta_cck->dsp_self_kill,
- max_cck->dsp_self_kill);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "mh_format_err:",
- le32_to_cpu(cck->mh_format_err),
- accum_cck->mh_format_err,
- delta_cck->mh_format_err, max_cck->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "re_acq_main_rssi_sum:",
- le32_to_cpu(cck->re_acq_main_rssi_sum),
- accum_cck->re_acq_main_rssi_sum,
- delta_cck->re_acq_main_rssi_sum,
- max_cck->re_acq_main_rssi_sum);
-
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
- "Statistics_Rx - GENERAL:");
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "bogus_cts:",
- le32_to_cpu(general->bogus_cts),
- accum_general->bogus_cts,
- delta_general->bogus_cts, max_general->bogus_cts);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "bogus_ack:",
- le32_to_cpu(general->bogus_ack),
- accum_general->bogus_ack,
- delta_general->bogus_ack, max_general->bogus_ack);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "non_bssid_frames:",
- le32_to_cpu(general->non_bssid_frames),
- accum_general->non_bssid_frames,
- delta_general->non_bssid_frames,
- max_general->non_bssid_frames);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "filtered_frames:",
- le32_to_cpu(general->filtered_frames),
- accum_general->filtered_frames,
- delta_general->filtered_frames,
- max_general->filtered_frames);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "non_channel_beacons:",
- le32_to_cpu(general->non_channel_beacons),
- accum_general->non_channel_beacons,
- delta_general->non_channel_beacons,
- max_general->non_channel_beacons);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "channel_beacons:",
- le32_to_cpu(general->channel_beacons),
- accum_general->channel_beacons,
- delta_general->channel_beacons,
- max_general->channel_beacons);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "num_missed_bcon:",
- le32_to_cpu(general->num_missed_bcon),
- accum_general->num_missed_bcon,
- delta_general->num_missed_bcon,
- max_general->num_missed_bcon);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "adc_rx_saturation_time:",
- le32_to_cpu(general->adc_rx_saturation_time),
- accum_general->adc_rx_saturation_time,
- delta_general->adc_rx_saturation_time,
- max_general->adc_rx_saturation_time);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "ina_detect_search_tm:",
- le32_to_cpu(general->ina_detection_search_time),
- accum_general->ina_detection_search_time,
- delta_general->ina_detection_search_time,
- max_general->ina_detection_search_time);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "beacon_silence_rssi_a:",
- le32_to_cpu(general->beacon_silence_rssi_a),
- accum_general->beacon_silence_rssi_a,
- delta_general->beacon_silence_rssi_a,
- max_general->beacon_silence_rssi_a);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "beacon_silence_rssi_b:",
- le32_to_cpu(general->beacon_silence_rssi_b),
- accum_general->beacon_silence_rssi_b,
- delta_general->beacon_silence_rssi_b,
- max_general->beacon_silence_rssi_b);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "beacon_silence_rssi_c:",
- le32_to_cpu(general->beacon_silence_rssi_c),
- accum_general->beacon_silence_rssi_c,
- delta_general->beacon_silence_rssi_c,
- max_general->beacon_silence_rssi_c);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "interference_data_flag:",
- le32_to_cpu(general->interference_data_flag),
- accum_general->interference_data_flag,
- delta_general->interference_data_flag,
- max_general->interference_data_flag);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "channel_load:",
- le32_to_cpu(general->channel_load),
- accum_general->channel_load,
- delta_general->channel_load,
- max_general->channel_load);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "dsp_false_alarms:",
- le32_to_cpu(general->dsp_false_alarms),
- accum_general->dsp_false_alarms,
- delta_general->dsp_false_alarms,
- max_general->dsp_false_alarms);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "beacon_rssi_a:",
- le32_to_cpu(general->beacon_rssi_a),
- accum_general->beacon_rssi_a,
- delta_general->beacon_rssi_a,
- max_general->beacon_rssi_a);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "beacon_rssi_b:",
- le32_to_cpu(general->beacon_rssi_b),
- accum_general->beacon_rssi_b,
- delta_general->beacon_rssi_b,
- max_general->beacon_rssi_b);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "beacon_rssi_c:",
- le32_to_cpu(general->beacon_rssi_c),
- accum_general->beacon_rssi_c,
- delta_general->beacon_rssi_c,
- max_general->beacon_rssi_c);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "beacon_energy_a:",
- le32_to_cpu(general->beacon_energy_a),
- accum_general->beacon_energy_a,
- delta_general->beacon_energy_a,
- max_general->beacon_energy_a);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "beacon_energy_b:",
- le32_to_cpu(general->beacon_energy_b),
- accum_general->beacon_energy_b,
- delta_general->beacon_energy_b,
- max_general->beacon_energy_b);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "beacon_energy_c:",
- le32_to_cpu(general->beacon_energy_c),
- accum_general->beacon_energy_c,
- delta_general->beacon_energy_c,
- max_general->beacon_energy_c);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM_HT:\n");
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
- "Statistics_Rx - OFDM_HT:");
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "plcp_err:",
- le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
- delta_ht->plcp_err, max_ht->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "overrun_err:",
- le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
- delta_ht->overrun_err, max_ht->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "early_overrun_err:",
- le32_to_cpu(ht->early_overrun_err),
- accum_ht->early_overrun_err,
- delta_ht->early_overrun_err,
- max_ht->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "crc32_good:",
- le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
- delta_ht->crc32_good, max_ht->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "crc32_err:",
- le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
- delta_ht->crc32_err, max_ht->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "mh_format_err:",
- le32_to_cpu(ht->mh_format_err),
- accum_ht->mh_format_err,
- delta_ht->mh_format_err, max_ht->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg_crc32_good:",
- le32_to_cpu(ht->agg_crc32_good),
- accum_ht->agg_crc32_good,
- delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg_mpdu_cnt:",
- le32_to_cpu(ht->agg_mpdu_cnt),
- accum_ht->agg_mpdu_cnt,
- delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg_cnt:",
- le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
- delta_ht->agg_cnt, max_ht->agg_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "unsupport_mcs:",
- le32_to_cpu(ht->unsupport_mcs),
- accum_ht->unsupport_mcs,
- delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
+ return priv->cfg->ops->lib->debugfs_ops.rx_stats_read(file,
+ user_buf, count, ppos);
}
static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
@@ -1519,173 +1068,8 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
size_t count, loff_t *ppos)
{
struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
- ssize_t ret;
- struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
-
- if (!iwl_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /* the statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- tx = &priv->statistics.tx;
- accum_tx = &priv->accum_statistics.tx;
- delta_tx = &priv->delta_statistics.tx;
- max_tx = &priv->max_delta.tx;
- pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
- "Statistics_Tx:");
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "preamble:",
- le32_to_cpu(tx->preamble_cnt),
- accum_tx->preamble_cnt,
- delta_tx->preamble_cnt, max_tx->preamble_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "rx_detected_cnt:",
- le32_to_cpu(tx->rx_detected_cnt),
- accum_tx->rx_detected_cnt,
- delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "bt_prio_defer_cnt:",
- le32_to_cpu(tx->bt_prio_defer_cnt),
- accum_tx->bt_prio_defer_cnt,
- delta_tx->bt_prio_defer_cnt,
- max_tx->bt_prio_defer_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "bt_prio_kill_cnt:",
- le32_to_cpu(tx->bt_prio_kill_cnt),
- accum_tx->bt_prio_kill_cnt,
- delta_tx->bt_prio_kill_cnt,
- max_tx->bt_prio_kill_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "few_bytes_cnt:",
- le32_to_cpu(tx->few_bytes_cnt),
- accum_tx->few_bytes_cnt,
- delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "cts_timeout:",
- le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
- delta_tx->cts_timeout, max_tx->cts_timeout);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "ack_timeout:",
- le32_to_cpu(tx->ack_timeout),
- accum_tx->ack_timeout,
- delta_tx->ack_timeout, max_tx->ack_timeout);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "expected_ack_cnt:",
- le32_to_cpu(tx->expected_ack_cnt),
- accum_tx->expected_ack_cnt,
- delta_tx->expected_ack_cnt,
- max_tx->expected_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "actual_ack_cnt:",
- le32_to_cpu(tx->actual_ack_cnt),
- accum_tx->actual_ack_cnt,
- delta_tx->actual_ack_cnt,
- max_tx->actual_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "dump_msdu_cnt:",
- le32_to_cpu(tx->dump_msdu_cnt),
- accum_tx->dump_msdu_cnt,
- delta_tx->dump_msdu_cnt,
- max_tx->dump_msdu_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "abort_nxt_frame_mismatch:",
- le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
- accum_tx->burst_abort_next_frame_mismatch_cnt,
- delta_tx->burst_abort_next_frame_mismatch_cnt,
- max_tx->burst_abort_next_frame_mismatch_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "abort_missing_nxt_frame:",
- le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
- accum_tx->burst_abort_missing_next_frame_cnt,
- delta_tx->burst_abort_missing_next_frame_cnt,
- max_tx->burst_abort_missing_next_frame_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "cts_timeout_collision:",
- le32_to_cpu(tx->cts_timeout_collision),
- accum_tx->cts_timeout_collision,
- delta_tx->cts_timeout_collision,
- max_tx->cts_timeout_collision);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "ack_ba_timeout_collision:",
- le32_to_cpu(tx->ack_or_ba_timeout_collision),
- accum_tx->ack_or_ba_timeout_collision,
- delta_tx->ack_or_ba_timeout_collision,
- max_tx->ack_or_ba_timeout_collision);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg ba_timeout:",
- le32_to_cpu(tx->agg.ba_timeout),
- accum_tx->agg.ba_timeout,
- delta_tx->agg.ba_timeout,
- max_tx->agg.ba_timeout);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg ba_resched_frames:",
- le32_to_cpu(tx->agg.ba_reschedule_frames),
- accum_tx->agg.ba_reschedule_frames,
- delta_tx->agg.ba_reschedule_frames,
- max_tx->agg.ba_reschedule_frames);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg scd_query_agg_frame:",
- le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
- accum_tx->agg.scd_query_agg_frame_cnt,
- delta_tx->agg.scd_query_agg_frame_cnt,
- max_tx->agg.scd_query_agg_frame_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg scd_query_no_agg:",
- le32_to_cpu(tx->agg.scd_query_no_agg),
- accum_tx->agg.scd_query_no_agg,
- delta_tx->agg.scd_query_no_agg,
- max_tx->agg.scd_query_no_agg);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg scd_query_agg:",
- le32_to_cpu(tx->agg.scd_query_agg),
- accum_tx->agg.scd_query_agg,
- delta_tx->agg.scd_query_agg,
- max_tx->agg.scd_query_agg);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg scd_query_mismatch:",
- le32_to_cpu(tx->agg.scd_query_mismatch),
- accum_tx->agg.scd_query_mismatch,
- delta_tx->agg.scd_query_mismatch,
- max_tx->agg.scd_query_mismatch);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg frame_not_ready:",
- le32_to_cpu(tx->agg.frame_not_ready),
- accum_tx->agg.frame_not_ready,
- delta_tx->agg.frame_not_ready,
- max_tx->agg.frame_not_ready);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg underrun:",
- le32_to_cpu(tx->agg.underrun),
- accum_tx->agg.underrun,
- delta_tx->agg.underrun, max_tx->agg.underrun);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg bt_prio_kill:",
- le32_to_cpu(tx->agg.bt_prio_kill),
- accum_tx->agg.bt_prio_kill,
- delta_tx->agg.bt_prio_kill,
- max_tx->agg.bt_prio_kill);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "agg rx_ba_rsp_cnt:",
- le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
- accum_tx->agg.rx_ba_rsp_cnt,
- delta_tx->agg.rx_ba_rsp_cnt,
- max_tx->agg.rx_ba_rsp_cnt);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
+ return priv->cfg->ops->lib->debugfs_ops.tx_stats_read(file,
+ user_buf, count, ppos);
}
static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
@@ -1693,107 +1077,8 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
size_t count, loff_t *ppos)
{
struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = sizeof(struct statistics_general) * 10 + 300;
- ssize_t ret;
- struct statistics_general *general, *accum_general;
- struct statistics_general *delta_general, *max_general;
- struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
- struct statistics_div *div, *accum_div, *delta_div, *max_div;
-
- if (!iwl_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /* the statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- general = &priv->statistics.general;
- dbg = &priv->statistics.general.dbg;
- div = &priv->statistics.general.div;
- accum_general = &priv->accum_statistics.general;
- delta_general = &priv->delta_statistics.general;
- max_general = &priv->max_delta.general;
- accum_dbg = &priv->accum_statistics.general.dbg;
- delta_dbg = &priv->delta_statistics.general.dbg;
- max_dbg = &priv->max_delta.general.dbg;
- accum_div = &priv->accum_statistics.general.div;
- delta_div = &priv->delta_statistics.general.div;
- max_div = &priv->max_delta.general.div;
- pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
- "Statistics_General:");
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_short_format,
- "temperature:",
- le32_to_cpu(general->temperature));
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_short_format,
- "temperature_m:",
- le32_to_cpu(general->temperature_m));
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "burst_check:",
- le32_to_cpu(dbg->burst_check),
- accum_dbg->burst_check,
- delta_dbg->burst_check, max_dbg->burst_check);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "burst_count:",
- le32_to_cpu(dbg->burst_count),
- accum_dbg->burst_count,
- delta_dbg->burst_count, max_dbg->burst_count);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "sleep_time:",
- le32_to_cpu(general->sleep_time),
- accum_general->sleep_time,
- delta_general->sleep_time, max_general->sleep_time);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "slots_out:",
- le32_to_cpu(general->slots_out),
- accum_general->slots_out,
- delta_general->slots_out, max_general->slots_out);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "slots_idle:",
- le32_to_cpu(general->slots_idle),
- accum_general->slots_idle,
- delta_general->slots_idle, max_general->slots_idle);
- pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
- le32_to_cpu(general->ttl_timestamp));
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "tx_on_a:",
- le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
- delta_div->tx_on_a, max_div->tx_on_a);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "tx_on_b:",
- le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
- delta_div->tx_on_b, max_div->tx_on_b);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "exec_time:",
- le32_to_cpu(div->exec_time), accum_div->exec_time,
- delta_div->exec_time, max_div->exec_time);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "probe_time:",
- le32_to_cpu(div->probe_time), accum_div->probe_time,
- delta_div->probe_time, max_div->probe_time);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "rx_enable_counter:",
- le32_to_cpu(general->rx_enable_counter),
- accum_general->rx_enable_counter,
- delta_general->rx_enable_counter,
- max_general->rx_enable_counter);
- pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
- "num_of_sos_states:",
- le32_to_cpu(general->num_of_sos_states),
- accum_general->num_of_sos_states,
- delta_general->num_of_sos_states,
- max_general->num_of_sos_states);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
+ return priv->cfg->ops->lib->debugfs_ops.general_stats_read(file,
+ user_buf, count, ppos);
}
static ssize_t iwl_dbgfs_sensitivity_read(struct file *file,
@@ -1935,46 +1220,6 @@ static ssize_t iwl_dbgfs_chain_noise_read(struct file *file,
return ret;
}
-static ssize_t iwl_dbgfs_tx_power_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- char buf[128];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
- struct statistics_tx *tx;
-
- if (!iwl_is_alive(priv))
- pos += scnprintf(buf + pos, bufsz - pos, "N/A\n");
- else {
- tx = &priv->statistics.tx;
- if (tx->tx_power.ant_a ||
- tx->tx_power.ant_b ||
- tx->tx_power.ant_c) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "tx power: (1/2 dB step)\n");
- if ((priv->cfg->valid_tx_ant & ANT_A) &&
- tx->tx_power.ant_a)
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tantenna A: 0x%X\n",
- tx->tx_power.ant_a);
- if ((priv->cfg->valid_tx_ant & ANT_B) &&
- tx->tx_power.ant_b)
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tantenna B: 0x%X\n",
- tx->tx_power.ant_b);
- if ((priv->cfg->valid_tx_ant & ANT_C) &&
- tx->tx_power.ant_c)
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tantenna C: 0x%X\n",
- tx->tx_power.ant_c);
- } else
- pos += scnprintf(buf + pos, bufsz - pos, "N/A\n");
- }
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -2052,7 +1297,6 @@ static ssize_t iwl_dbgfs_ucode_tracing_read(struct file *file,
int pos = 0;
char buf[128];
const size_t bufsz = sizeof(buf);
- ssize_t ret;
pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n",
priv->event_log.ucode_trace ? "On" : "Off");
@@ -2063,8 +1307,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_read(struct file *file,
pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n",
priv->event_log.wraps_more_count);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- return ret;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
@@ -2096,6 +1339,31 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
return count;
}
+static ssize_t iwl_dbgfs_rxon_flags_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ int len = 0;
+ char buf[20];
+
+ len = sprintf(buf, "0x%04X\n", le32_to_cpu(priv->active_rxon.flags));
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t iwl_dbgfs_rxon_filter_flags_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ int len = 0;
+ char buf[20];
+
+ len = sprintf(buf, "0x%04X\n",
+ le32_to_cpu(priv->active_rxon.filter_flags));
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -2125,13 +1393,11 @@ static ssize_t iwl_dbgfs_missed_beacon_read(struct file *file,
int pos = 0;
char buf[12];
const size_t bufsz = sizeof(buf);
- ssize_t ret;
pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
priv->missed_beacon_threshold);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- return ret;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
@@ -2160,27 +1426,6 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
return count;
}
-static ssize_t iwl_dbgfs_internal_scan_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- int scan;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &scan) != 1)
- return -EINVAL;
-
- iwl_internal_short_hw_scan(priv);
-
- return count;
-}
-
static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
@@ -2189,13 +1434,11 @@ static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file,
int pos = 0;
char buf[12];
const size_t bufsz = sizeof(buf);
- ssize_t ret;
pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
priv->cfg->plcp_delta_threshold);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- return ret;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
@@ -2288,7 +1531,6 @@ DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
DEBUGFS_READ_FILE_OPS(ucode_general_stats);
DEBUGFS_READ_FILE_OPS(sensitivity);
DEBUGFS_READ_FILE_OPS(chain_noise);
-DEBUGFS_READ_FILE_OPS(tx_power);
DEBUGFS_READ_FILE_OPS(power_save_status);
DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
@@ -2296,9 +1538,10 @@ DEBUGFS_WRITE_FILE_OPS(csr);
DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
DEBUGFS_READ_FILE_OPS(fh_reg);
DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
-DEBUGFS_WRITE_FILE_OPS(internal_scan);
DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
+DEBUGFS_READ_FILE_OPS(rxon_flags);
+DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
/*
* Create the debugfs files and directories
@@ -2334,8 +1577,11 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
DEBUGFS_ADD_FILE(led, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(sleep_level_override, dir_data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR);
+ if (!priv->cfg->broken_powersave) {
+ DEBUGFS_ADD_FILE(sleep_level_override, dir_data,
+ S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR);
+ }
DEBUGFS_ADD_FILE(thermal_throttling, dir_data, S_IRUSR);
DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
@@ -2343,29 +1589,33 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(tx_power, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(csr, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(internal_scan, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
- if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
- DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
+
+ if (priv->cfg->sensitivity_calib_by_driver)
DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
+ if (priv->cfg->chain_noise_calib_by_driver)
DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
+ if (priv->cfg->ucode_tracing)
DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
- }
- DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, &priv->disable_sens_cal);
- DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
- &priv->disable_chain_noise_cal);
- if (((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) ||
- ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_3945))
+ DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
+ if (priv->cfg->sensitivity_calib_by_driver)
+ DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
+ &priv->disable_sens_cal);
+ if (priv->cfg->chain_noise_calib_by_driver)
+ DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
+ &priv->disable_chain_noise_cal);
+ if (priv->cfg->tx_power_by_driver)
DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
&priv->disable_tx_power_cal);
return 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index ef1720a852e..f3f3473c5c7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -43,6 +43,7 @@
#include "iwl-debug.h"
#include "iwl-4965-hw.h"
#include "iwl-3945-hw.h"
+#include "iwl-agn-hw.h"
#include "iwl-led.h"
#include "iwl-power.h"
#include "iwl-agn-rs.h"
@@ -56,6 +57,7 @@ extern struct iwl_cfg iwl5100_bgn_cfg;
extern struct iwl_cfg iwl5100_abg_cfg;
extern struct iwl_cfg iwl5150_agn_cfg;
extern struct iwl_cfg iwl5150_abg_cfg;
+extern struct iwl_cfg iwl6000g2a_2agn_cfg;
extern struct iwl_cfg iwl6000i_2agn_cfg;
extern struct iwl_cfg iwl6000i_2abg_cfg;
extern struct iwl_cfg iwl6000i_2bg_cfg;
@@ -67,45 +69,6 @@ extern struct iwl_cfg iwl1000_bg_cfg;
struct iwl_tx_queue;
-/* shared structures from iwl-5000.c */
-extern struct iwl_mod_params iwl50_mod_params;
-extern struct iwl_ucode_ops iwl5000_ucode;
-extern struct iwl_lib_ops iwl5000_lib;
-extern struct iwl_hcmd_ops iwl5000_hcmd;
-extern struct iwl_hcmd_utils_ops iwl5000_hcmd_utils;
-
-/* shared functions from iwl-5000.c */
-extern u16 iwl5000_get_hcmd_size(u8 cmd_id, u16 len);
-extern u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd,
- u8 *data);
-extern void iwl5000_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
- __le32 *tx_flags);
-extern int iwl5000_calc_rssi(struct iwl_priv *priv,
- struct iwl_rx_phy_res *rx_resp);
-extern void iwl5000_nic_config(struct iwl_priv *priv);
-extern u16 iwl5000_eeprom_calib_version(struct iwl_priv *priv);
-extern const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv,
- size_t offset);
-extern void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- u16 byte_cnt);
-extern void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
-extern int iwl5000_load_ucode(struct iwl_priv *priv);
-extern void iwl5000_init_alive_start(struct iwl_priv *priv);
-extern int iwl5000_alive_notify(struct iwl_priv *priv);
-extern int iwl5000_hw_set_hw_params(struct iwl_priv *priv);
-extern int iwl5000_txq_agg_enable(struct iwl_priv *priv, int txq_id,
- int tx_fifo, int sta_id, int tid, u16 ssn_idx);
-extern int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
- u16 ssn_idx, u8 tx_fifo);
-extern void iwl5000_txq_set_sched(struct iwl_priv *priv, u32 mask);
-extern void iwl5000_setup_deferred_work(struct iwl_priv *priv);
-extern void iwl5000_rx_handler_setup(struct iwl_priv *priv);
-extern int iwl5000_hw_valid_rtc_data_addr(u32 addr);
-extern int iwl5000_send_tx_power(struct iwl_priv *priv);
-extern void iwl5000_temperature(struct iwl_priv *priv);
-
/* CT-KILL constants */
#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
#define CT_KILL_THRESHOLD 114 /* in Celsius */
@@ -183,6 +146,10 @@ struct iwl_queue {
int n_bd; /* number of BDs in this queue */
int write_ptr; /* 1-st empty entry (index) host_w*/
int read_ptr; /* last used entry (index) host_r*/
+ /* use for monitoring and recovering the stuck queue */
+ int last_read_ptr; /* storing the last read_ptr */
+ /* number of time read_ptr and last_read_ptr are the same */
+ u8 repeat_same_read_ptr;
dma_addr_t dma_addr; /* physical addr for BD's */
int n_window; /* safe queue window */
u32 id;
@@ -304,13 +271,11 @@ struct iwl_channel_info {
struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
};
-#define IWL_TX_FIFO_AC0 0
-#define IWL_TX_FIFO_AC1 1
-#define IWL_TX_FIFO_AC2 2
-#define IWL_TX_FIFO_AC3 3
-#define IWL_TX_FIFO_HCCA_1 5
-#define IWL_TX_FIFO_HCCA_2 6
-#define IWL_TX_FIFO_NONE 7
+#define IWL_TX_FIFO_BK 0
+#define IWL_TX_FIFO_BE 1
+#define IWL_TX_FIFO_VI 2
+#define IWL_TX_FIFO_VO 3
+#define IWL_TX_FIFO_UNUSED -1
/* Minimum number of queues. MAX_NUM is defined in hw specific files.
* Set the minimum to accommodate the 4 standard TX queues, 1 command
@@ -361,13 +326,6 @@ enum {
#define DEF_CMD_PAYLOAD_SIZE 320
-/*
- * IWL_LINK_HDR_MAX should include ieee80211_hdr, radiotap header,
- * SNAP header and alignment. It should also be big enough for 802.11
- * control frames.
- */
-#define IWL_LINK_HDR_MAX 64
-
/**
* struct iwl_device_cmd
*
@@ -519,38 +477,28 @@ struct iwl_ht_config {
u8 non_GF_STA_present;
};
-union iwl_qos_capabity {
- struct {
- u8 edca_count:4; /* bit 0-3 */
- u8 q_ack:1; /* bit 4 */
- u8 queue_request:1; /* bit 5 */
- u8 txop_request:1; /* bit 6 */
- u8 reserved:1; /* bit 7 */
- } q_AP;
- struct {
- u8 acvo_APSD:1; /* bit 0 */
- u8 acvi_APSD:1; /* bit 1 */
- u8 ac_bk_APSD:1; /* bit 2 */
- u8 ac_be_APSD:1; /* bit 3 */
- u8 q_ack:1; /* bit 4 */
- u8 max_len:2; /* bit 5-6 */
- u8 more_data_ack:1; /* bit 7 */
- } q_STA;
- u8 val;
-};
-
/* QoS structures */
struct iwl_qos_info {
int qos_active;
- union iwl_qos_capabity qos_cap;
struct iwl_qosparam_cmd def_qos_parm;
};
+/*
+ * Structure should be accessed with sta_lock held. When station addition
+ * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
+ * the commands (iwl_addsta_cmd and iwl_link_quality_cmd) without sta_lock
+ * held.
+ */
struct iwl_station_entry {
struct iwl_addsta_cmd sta;
struct iwl_tid_data tid[MAX_TID_COUNT];
u8 used;
struct iwl_hw_key keyinfo;
+ struct iwl_link_quality_cmd *lq;
+};
+
+struct iwl_station_priv_common {
+ u8 sta_id;
};
/*
@@ -559,14 +507,28 @@ struct iwl_station_entry {
* When mac80211 creates a station it reserves some space (hw->sta_data_size)
* in the structure for use by driver. This structure is places in that
* space.
+ *
+ * The common struct MUST be first because it is shared between
+ * 3945 and agn!
*/
struct iwl_station_priv {
+ struct iwl_station_priv_common common;
struct iwl_lq_sta lq_sta;
atomic_t pending_frames;
bool client;
bool asleep;
};
+/**
+ * struct iwl_vif_priv - driver's private per-interface information
+ *
+ * When mac80211 allocates a virtual interface, it can allocate
+ * space for us to put data into.
+ */
+struct iwl_vif_priv {
+ u8 ibss_bssid_sta_id;
+};
+
/* one for each uCode image (inst/data, boot/init/runtime) */
struct fw_desc {
void *v_addr; /* access by driver */
@@ -574,7 +536,7 @@ struct fw_desc {
u32 len; /* bytes */
};
-/* uCode file layout */
+/* v1/v2 uCode file layout */
struct iwl_ucode_header {
__le32 ver; /* major/minor/API/serial */
union {
@@ -597,7 +559,62 @@ struct iwl_ucode_header {
} v2;
} u;
};
-#define UCODE_HEADER_SIZE(ver) ((ver) == 1 ? 24 : 28)
+
+/*
+ * new TLV uCode file layout
+ *
+ * The new TLV file format contains TLVs, that each specify
+ * some piece of data. To facilitate "groups", for example
+ * different instruction image with different capabilities,
+ * bundled with the same init image, an alternative mechanism
+ * is provided:
+ * When the alternative field is 0, that means that the item
+ * is always valid. When it is non-zero, then it is only
+ * valid in conjunction with items of the same alternative,
+ * in which case the driver (user) selects one alternative
+ * to use.
+ */
+
+enum iwl_ucode_tlv_type {
+ IWL_UCODE_TLV_INVALID = 0, /* unused */
+ IWL_UCODE_TLV_INST = 1,
+ IWL_UCODE_TLV_DATA = 2,
+ IWL_UCODE_TLV_INIT = 3,
+ IWL_UCODE_TLV_INIT_DATA = 4,
+ IWL_UCODE_TLV_BOOT = 5,
+ IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */
+};
+
+struct iwl_ucode_tlv {
+ __le16 type; /* see above */
+ __le16 alternative; /* see comment */
+ __le32 length; /* not including type/length fields */
+ u8 data[0];
+} __attribute__ ((packed));
+
+#define IWL_TLV_UCODE_MAGIC 0x0a4c5749
+
+struct iwl_tlv_ucode_header {
+ /*
+ * The TLV style ucode header is distinguished from
+ * the v1/v2 style header by first four bytes being
+ * zero, as such is an invalid combination of
+ * major/minor/API/serial versions.
+ */
+ __le32 zero;
+ __le32 magic;
+ u8 human_readable[64];
+ __le32 ver; /* major/minor/API/serial */
+ __le32 build;
+ __le64 alternatives; /* bitmask of valid alternatives */
+ /*
+ * The data contained herein has a TLV layout,
+ * see above for the TLV header and types.
+ * Note that each TLV is padded to a length
+ * that is a multiple of 4 for alignment.
+ */
+ u8 data[0];
+};
struct iwl4965_ibss_seq {
u8 mac[ETH_ALEN];
@@ -1039,6 +1056,11 @@ struct iwl_event_log {
#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3)
#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
+/* timer constants use to monitor and recover stuck tx queues in mSecs */
+#define IWL_MONITORING_PERIOD (1000)
+#define IWL_ONE_HUNDRED_MSECS (100)
+#define IWL_SIXTY_SECS (60000)
+
enum iwl_reset {
IWL_RF_RESET = 0,
IWL_FW_RESET,
@@ -1092,10 +1114,6 @@ struct iwl_priv {
struct iwl_channel_info *channel_info; /* channel info array */
u8 channel_count; /* # of channels */
- /* each calibration channel group in the EEPROM has a derived
- * clip setting for each rate. 3945 only.*/
- const struct iwl3945_clip_group clip39_groups[5];
-
/* thermal calibration */
s32 temperature; /* degrees Kelvin */
s32 last_temperature;
@@ -1104,12 +1122,10 @@ struct iwl_priv {
struct iwl_calib_result calib_results[IWL_CALIB_MAX];
/* Scan related variables */
- unsigned long next_scan_jiffies;
unsigned long scan_start;
- unsigned long scan_pass_start;
unsigned long scan_start_tsf;
- void *scan;
- int scan_bands;
+ void *scan_cmd;
+ enum ieee80211_band scan_band;
struct cfg80211_scan_request *scan_request;
bool is_internal_short_scan;
u8 scan_tx_ant[IEEE80211_NUM_BANDS];
@@ -1168,16 +1184,13 @@ struct iwl_priv {
u64 led_tpt;
u16 active_rate;
- u16 active_rate_basic;
- u8 assoc_station_added;
u8 start_calib;
struct iwl_sensitivity_data sensitivity_data;
struct iwl_chain_noise_data chain_noise_data;
__le16 sensitivity_tbl[HD_TABLE_SIZE];
struct iwl_ht_config current_ht_config;
- u8 last_phy_res[100];
/* Rate scaling data */
u8 retry_rate;
@@ -1197,9 +1210,6 @@ struct iwl_priv {
unsigned long status;
- int last_rx_rssi; /* From Rx packet statistics */
- int last_rx_noise; /* From beacon statistics */
-
/* counts mgmt, ctl, and data packets */
struct traffic_stats tx_stats;
struct traffic_stats rx_stats;
@@ -1218,18 +1228,14 @@ struct iwl_priv {
#endif
/* context information */
- u16 rates_mask;
-
- u8 bssid[ETH_ALEN];
- u16 rts_threshold;
+ u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
u8 mac_addr[ETH_ALEN];
/*station table variables */
spinlock_t sta_lock;
int num_stations;
struct iwl_station_entry stations[IWL_STATION_COUNT];
- struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
- u8 default_wep_key;
+ struct iwl_wep_key wep_keys[WEP_KEYS_MAX]; /* protected by mutex */
u8 key_mapping_key;
unsigned long ucode_key_table;
@@ -1244,10 +1250,6 @@ struct iwl_priv {
u8 mac80211_registered;
- /* Rx'd packet timing information */
- u32 last_beacon_time;
- u64 last_tsf;
-
/* eeprom -- this is in the card's little endian byte order */
u8 *eeprom;
int nvm_device_type;
@@ -1259,29 +1261,67 @@ struct iwl_priv {
/* Last Rx'd beacon timestamp */
u64 timestamp;
- u16 beacon_int;
struct ieee80211_vif *vif;
- /*Added for 3945 */
- void *shared_virt;
- dma_addr_t shared_phys;
- /*End*/
- struct iwl_hw_params hw_params;
+ union {
+#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
+ struct {
+ void *shared_virt;
+ dma_addr_t shared_phys;
- /* INT ICT Table */
- __le32 *ict_tbl;
- dma_addr_t ict_tbl_dma;
- dma_addr_t aligned_ict_tbl_dma;
- int ict_index;
- void *ict_tbl_vir;
- u32 inta;
- bool use_ict;
+ struct delayed_work thermal_periodic;
+ struct delayed_work rfkill_poll;
+
+ struct iwl3945_notif_statistics statistics;
+#ifdef CONFIG_IWLWIFI_DEBUG
+ struct iwl3945_notif_statistics accum_statistics;
+ struct iwl3945_notif_statistics delta_statistics;
+ struct iwl3945_notif_statistics max_delta;
+#endif
+
+ u32 sta_supp_rates;
+ int last_rx_rssi; /* From Rx packet statistics */
+
+ /* Rx'd packet timing information */
+ u32 last_beacon_time;
+ u64 last_tsf;
+
+ /*
+ * each calibration channel group in the
+ * EEPROM has a derived clip setting for
+ * each rate.
+ */
+ const struct iwl3945_clip_group clip_groups[5];
+
+ } _3945;
+#endif
+#if defined(CONFIG_IWLAGN) || defined(CONFIG_IWLAGN_MODULE)
+ struct {
+ /* INT ICT Table */
+ __le32 *ict_tbl;
+ void *ict_tbl_vir;
+ dma_addr_t ict_tbl_dma;
+ dma_addr_t aligned_ict_tbl_dma;
+ int ict_index;
+ u32 inta;
+ bool use_ict;
+ /*
+ * reporting the number of tids has AGG on. 0 means
+ * no AGGREGATION
+ */
+ u8 agg_tids_count;
+
+ struct iwl_rx_phy_res last_phy_res;
+ bool last_phy_res_valid;
+
+ struct completion firmware_loading_complete;
+ } _agn;
+#endif
+ };
+
+ struct iwl_hw_params hw_params;
u32 inta_mask;
- /* Current association information needed to configure the
- * hardware */
- u16 assoc_id;
- u16 assoc_capability;
struct iwl_qos_info qos_data;
@@ -1291,7 +1331,6 @@ struct iwl_priv {
struct work_struct scan_completed;
struct work_struct rx_replenish;
struct work_struct abort_scan;
- struct work_struct request_scan;
struct work_struct beacon_update;
struct work_struct tt_work;
struct work_struct ct_enter;
@@ -1304,10 +1343,6 @@ struct iwl_priv {
struct delayed_work alive_start;
struct delayed_work scan_check;
- /*For 3945 only*/
- struct delayed_work thermal_periodic;
- struct delayed_work rfkill_poll;
-
/* TX Power */
s8 tx_power_user_lmt;
s8 tx_power_device_lmt;
@@ -1339,13 +1374,8 @@ struct iwl_priv {
struct work_struct run_time_calib_work;
struct timer_list statistics_periodic;
struct timer_list ucode_trace;
+ struct timer_list monitor_recover;
bool hw_ready;
- /*For 3945*/
-#define IWL_DEFAULT_TX_POWER 0x0F
-
- struct iwl3945_notif_statistics statistics_39;
-
- u32 sta_supp_rates;
struct iwl_event_log event_log;
}; /*iwl_priv */
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index 2ffc2edbf4f..4a487639d93 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -37,6 +37,7 @@ EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite8);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ioread32);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_rx);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_tx);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index fb5bb487f3b..ee11452519e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -590,9 +590,16 @@ int iwl_eeprom_init(struct iwl_priv *priv)
e[addr / 2] = cpu_to_le16(r >> 16);
}
}
+
+ IWL_DEBUG_INFO(priv, "NVM Type: %s, version: 0x%x\n",
+ (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
+ ? "OTP" : "EEPROM",
+ iwl_eeprom_query16(priv, EEPROM_VERSION));
+
ret = 0;
done:
priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv);
+
err:
if (ret)
iwl_eeprom_free(priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 8171c701e4e..95aa202c85e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -172,35 +172,35 @@ struct iwl_eeprom_enhanced_txpwr {
#define EEPROM_5000_TX_POWER_VERSION (4)
#define EEPROM_5000_EEPROM_VERSION (0x11A)
-/*5000 calibrations */
-#define EEPROM_5000_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
-#define EEPROM_5000_XTAL ((2*0x128) | EEPROM_5000_CALIB_ALL)
-#define EEPROM_5000_TEMPERATURE ((2*0x12A) | EEPROM_5000_CALIB_ALL)
-
-/* 5000 links */
-#define EEPROM_5000_LINK_HOST (2*0x64)
-#define EEPROM_5000_LINK_GENERAL (2*0x65)
-#define EEPROM_5000_LINK_REGULATORY (2*0x66)
-#define EEPROM_5000_LINK_CALIBRATION (2*0x67)
-#define EEPROM_5000_LINK_PROCESS_ADJST (2*0x68)
-#define EEPROM_5000_LINK_OTHERS (2*0x69)
-
-/* 5000 regulatory - indirect access */
-#define EEPROM_5000_REG_SKU_ID ((0x02)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 4 bytes */
-#define EEPROM_5000_REG_BAND_1_CHANNELS ((0x08)\
+/* 5000 and up calibration */
+#define EEPROM_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
+#define EEPROM_XTAL ((2*0x128) | EEPROM_CALIB_ALL)
+
+/* 5000 temperature */
+#define EEPROM_5000_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL)
+
+/* agn links */
+#define EEPROM_LINK_HOST (2*0x64)
+#define EEPROM_LINK_GENERAL (2*0x65)
+#define EEPROM_LINK_REGULATORY (2*0x66)
+#define EEPROM_LINK_CALIBRATION (2*0x67)
+#define EEPROM_LINK_PROCESS_ADJST (2*0x68)
+#define EEPROM_LINK_OTHERS (2*0x69)
+
+/* agn regulatory - indirect access */
+#define EEPROM_REG_BAND_1_CHANNELS ((0x08)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 28 bytes */
-#define EEPROM_5000_REG_BAND_2_CHANNELS ((0x26)\
+#define EEPROM_REG_BAND_2_CHANNELS ((0x26)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 26 bytes */
-#define EEPROM_5000_REG_BAND_3_CHANNELS ((0x42)\
+#define EEPROM_REG_BAND_3_CHANNELS ((0x42)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */
-#define EEPROM_5000_REG_BAND_4_CHANNELS ((0x5C)\
+#define EEPROM_REG_BAND_4_CHANNELS ((0x5C)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
-#define EEPROM_5000_REG_BAND_5_CHANNELS ((0x74)\
+#define EEPROM_REG_BAND_5_CHANNELS ((0x74)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 12 bytes */
-#define EEPROM_5000_REG_BAND_24_HT40_CHANNELS ((0x82)\
+#define EEPROM_REG_BAND_24_HT40_CHANNELS ((0x82)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
-#define EEPROM_5000_REG_BAND_52_HT40_CHANNELS ((0x92)\
+#define EEPROM_REG_BAND_52_HT40_CHANNELS ((0x92)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
/* 6000 regulatory - indirect access */
@@ -265,14 +265,21 @@ struct iwl_eeprom_enhanced_txpwr {
#define EEPROM_5050_EEPROM_VERSION (0x21E)
/* 1000 Specific */
+#define EEPROM_1000_TX_POWER_VERSION (4)
#define EEPROM_1000_EEPROM_VERSION (0x15C)
/* 6x00 Specific */
+#define EEPROM_6000_TX_POWER_VERSION (4)
#define EEPROM_6000_EEPROM_VERSION (0x434)
/* 6x50 Specific */
+#define EEPROM_6050_TX_POWER_VERSION (4)
#define EEPROM_6050_EEPROM_VERSION (0x532)
+/* 6x00g2 Specific */
+#define EEPROM_6000G2_TX_POWER_VERSION (6)
+#define EEPROM_6000G2_EEPROM_VERSION (0x709)
+
/* OTP */
/* lower blocks contain EEPROM image and calibration data */
#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 73681c4fefe..51f89e7ba68 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -169,7 +169,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
mutex_lock(&priv->sync_cmd_mutex);
set_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s \n",
+ IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
get_cmd_string(cmd->id));
cmd_idx = iwl_enqueue_hcmd(priv, cmd);
@@ -191,7 +191,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s \n",
+ IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
get_cmd_string(cmd->id));
ret = -ETIMEDOUT;
goto cancel;
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index 51a67fb2e18..3ff6b9d25a1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -31,6 +31,9 @@
#define __iwl_helpers_h__
#include <linux/ctype.h>
+#include <net/mac80211.h>
+
+#include "iwl-io.h"
#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 16eb3ced9b3..0203a3bbf87 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -298,7 +298,7 @@ static inline u32 __iwl_read_direct32(const char *f, u32 l,
struct iwl_priv *priv, u32 reg)
{
u32 value = _iwl_read_direct32(priv, reg);
- IWL_DEBUG_IO(priv, "read_direct32(0x%4X) = 0x%08x - %s %d \n", reg, value,
+ IWL_DEBUG_IO(priv, "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value,
f, l);
return value;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index a6f9c918aab..db5bfcb036c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -46,7 +46,7 @@
static int led_mode;
module_param(led_mode, int, S_IRUGO);
MODULE_PARM_DESC(led_mode, "led mode: 0=blinking, 1=On(RF On)/Off(RF Off), "
- "(default 0)\n");
+ "(default 0)");
static const struct {
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 548dac2f6a9..cda6a94d6cc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -318,10 +318,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
- if (priv->vif)
- dtimper = priv->hw->conf.ps_dtim_period;
- else
- dtimper = 1;
+ dtimper = priv->hw->conf.ps_dtim_period ?: 1;
if (priv->cfg->broken_powersave)
iwl_power_sleep_cam_cmd(priv, &cmd);
@@ -384,10 +381,10 @@ EXPORT_SYMBOL(iwl_ht_enabled);
bool iwl_within_ct_kill_margin(struct iwl_priv *priv)
{
- s32 temp = priv->temperature; /* degrees CELSIUS except 4965 */
+ s32 temp = priv->temperature; /* degrees CELSIUS except specified */
bool within_margin = false;
- if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)
+ if (priv->cfg->temperature_kelvin)
temp = KELVIN_TO_CELSIUS(priv->temperature);
if (!priv->thermal_throttle.advanced_tt)
@@ -840,12 +837,12 @@ EXPORT_SYMBOL(iwl_tt_exit_ct_kill);
static void iwl_bg_tt_work(struct work_struct *work)
{
struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work);
- s32 temp = priv->temperature; /* degrees CELSIUS except 4965 */
+ s32 temp = priv->temperature; /* degrees CELSIUS except specified */
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)
+ if (priv->cfg->temperature_kelvin)
temp = KELVIN_TO_CELSIUS(priv->temperature);
if (!priv->thermal_throttle.advanced_tt)
@@ -875,7 +872,7 @@ void iwl_tt_initialize(struct iwl_priv *priv)
int size = sizeof(struct iwl_tt_trans) * (IWL_TI_STATE_MAX - 1);
struct iwl_tt_trans *transaction;
- IWL_DEBUG_POWER(priv, "Initialize Thermal Throttling \n");
+ IWL_DEBUG_POWER(priv, "Initialize Thermal Throttling\n");
memset(tt, 0, sizeof(struct iwl_tt_mgmt));
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index d2d2a917490..b1f101caf19 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -254,7 +254,7 @@
* device. A queue maps to only one (selectable by driver) Tx DMA channel,
* but one DMA channel may take input from several queues.
*
- * Tx DMA channels have dedicated purposes. For 4965, they are used as follows
+ * Tx DMA FIFOs have dedicated purposes. For 4965, they are used as follows
* (cf. default_queue_to_tx_fifo in iwl-4965.c):
*
* 0 -- EDCA BK (background) frames, lowest priority
@@ -262,20 +262,20 @@
* 2 -- EDCA VI (video) frames, higher priority
* 3 -- EDCA VO (voice) and management frames, highest priority
* 4 -- Commands (e.g. RXON, etc.)
- * 5 -- HCCA short frames
- * 6 -- HCCA long frames
+ * 5 -- unused (HCCA)
+ * 6 -- unused (HCCA)
* 7 -- not used by driver (device-internal only)
*
- * For 5000 series and up, they are used slightly differently
+ * For 5000 series and up, they are used differently
* (cf. iwl5000_default_queue_to_tx_fifo in iwl-5000.c):
*
* 0 -- EDCA BK (background) frames, lowest priority
* 1 -- EDCA BE (best effort) frames, normal priority
* 2 -- EDCA VI (video) frames, higher priority
* 3 -- EDCA VO (voice) and management frames, highest priority
- * 4 -- (TBD)
- * 5 -- HCCA short frames
- * 6 -- HCCA long frames
+ * 4 -- unused
+ * 5 -- unused
+ * 6 -- unused
* 7 -- Commands
*
* Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6.
@@ -529,48 +529,48 @@
#define IWL_SCD_TXFIFO_POS_RA (4)
#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
-/* 5000 SCD */
-#define IWL50_SCD_QUEUE_STTS_REG_POS_TXF (0)
-#define IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE (3)
-#define IWL50_SCD_QUEUE_STTS_REG_POS_WSL (4)
-#define IWL50_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19)
-#define IWL50_SCD_QUEUE_STTS_REG_MSK (0x00FF0000)
-
-#define IWL50_SCD_QUEUE_CTX_REG1_CREDIT_POS (8)
-#define IWL50_SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00)
-#define IWL50_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24)
-#define IWL50_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000)
-#define IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS (0)
-#define IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F)
-#define IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
-#define IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
-
-#define IWL50_SCD_CONTEXT_DATA_OFFSET (0x600)
-#define IWL50_SCD_TX_STTS_BITMAP_OFFSET (0x7B1)
-#define IWL50_SCD_TRANSLATE_TBL_OFFSET (0x7E0)
-
-#define IWL50_SCD_CONTEXT_QUEUE_OFFSET(x)\
- (IWL50_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
-
-#define IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
- ((IWL50_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffc)
-
-#define IWL50_SCD_QUEUECHAIN_SEL_ALL(x) (((1<<(x)) - 1) &\
+/* agn SCD */
+#define IWLAGN_SCD_QUEUE_STTS_REG_POS_TXF (0)
+#define IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE (3)
+#define IWLAGN_SCD_QUEUE_STTS_REG_POS_WSL (4)
+#define IWLAGN_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19)
+#define IWLAGN_SCD_QUEUE_STTS_REG_MSK (0x00FF0000)
+
+#define IWLAGN_SCD_QUEUE_CTX_REG1_CREDIT_POS (8)
+#define IWLAGN_SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00)
+#define IWLAGN_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24)
+#define IWLAGN_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000)
+#define IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS (0)
+#define IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F)
+#define IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
+#define IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
+
+#define IWLAGN_SCD_CONTEXT_DATA_OFFSET (0x600)
+#define IWLAGN_SCD_TX_STTS_BITMAP_OFFSET (0x7B1)
+#define IWLAGN_SCD_TRANSLATE_TBL_OFFSET (0x7E0)
+
+#define IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(x)\
+ (IWLAGN_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
+
+#define IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
+ ((IWLAGN_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffc)
+
+#define IWLAGN_SCD_QUEUECHAIN_SEL_ALL(x) (((1<<(x)) - 1) &\
(~(1<<IWL_CMD_QUEUE_NUM)))
-#define IWL50_SCD_BASE (PRPH_BASE + 0xa02c00)
-
-#define IWL50_SCD_SRAM_BASE_ADDR (IWL50_SCD_BASE + 0x0)
-#define IWL50_SCD_DRAM_BASE_ADDR (IWL50_SCD_BASE + 0x8)
-#define IWL50_SCD_AIT (IWL50_SCD_BASE + 0x0c)
-#define IWL50_SCD_TXFACT (IWL50_SCD_BASE + 0x10)
-#define IWL50_SCD_ACTIVE (IWL50_SCD_BASE + 0x14)
-#define IWL50_SCD_QUEUE_WRPTR(x) (IWL50_SCD_BASE + 0x18 + (x) * 4)
-#define IWL50_SCD_QUEUE_RDPTR(x) (IWL50_SCD_BASE + 0x68 + (x) * 4)
-#define IWL50_SCD_QUEUECHAIN_SEL (IWL50_SCD_BASE + 0xe8)
-#define IWL50_SCD_AGGR_SEL (IWL50_SCD_BASE + 0x248)
-#define IWL50_SCD_INTERRUPT_MASK (IWL50_SCD_BASE + 0x108)
-#define IWL50_SCD_QUEUE_STATUS_BITS(x) (IWL50_SCD_BASE + 0x10c + (x) * 4)
+#define IWLAGN_SCD_BASE (PRPH_BASE + 0xa02c00)
+
+#define IWLAGN_SCD_SRAM_BASE_ADDR (IWLAGN_SCD_BASE + 0x0)
+#define IWLAGN_SCD_DRAM_BASE_ADDR (IWLAGN_SCD_BASE + 0x8)
+#define IWLAGN_SCD_AIT (IWLAGN_SCD_BASE + 0x0c)
+#define IWLAGN_SCD_TXFACT (IWLAGN_SCD_BASE + 0x10)
+#define IWLAGN_SCD_ACTIVE (IWLAGN_SCD_BASE + 0x14)
+#define IWLAGN_SCD_QUEUE_WRPTR(x) (IWLAGN_SCD_BASE + 0x18 + (x) * 4)
+#define IWLAGN_SCD_QUEUE_RDPTR(x) (IWLAGN_SCD_BASE + 0x68 + (x) * 4)
+#define IWLAGN_SCD_QUEUECHAIN_SEL (IWLAGN_SCD_BASE + 0xe8)
+#define IWLAGN_SCD_AGGR_SEL (IWLAGN_SCD_BASE + 0x248)
+#define IWLAGN_SCD_INTERRUPT_MASK (IWLAGN_SCD_BASE + 0x108)
+#define IWLAGN_SCD_QUEUE_STATUS_BITS(x) (IWLAGN_SCD_BASE + 0x10c + (x) * 4)
/*********************** END TX SCHEDULER *************************************/
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index e5eb339107d..0a5d7cf2519 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -163,197 +163,6 @@ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
-/**
- * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
- */
-static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv,
- dma_addr_t dma_addr)
-{
- return cpu_to_le32((u32)(dma_addr >> 8));
-}
-
-/**
- * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
- *
- * If there are slots in the RX queue that need to be restocked,
- * and we have free pre-allocated buffers, fill the ranks as much
- * as we can, pulling from rx_free.
- *
- * This moves the 'write' index forward to catch up with 'processed', and
- * also updates the memory address in the firmware to reference the new
- * target buffer.
- */
-void iwl_rx_queue_restock(struct iwl_priv *priv)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- unsigned long flags;
- int write;
-
- spin_lock_irqsave(&rxq->lock, flags);
- write = rxq->write & ~0x7;
- while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
- /* Get next free Rx buffer, remove from free list */
- element = rxq->rx_free.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
- /* Point to Rx buffer via next RBD in circular buffer */
- rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->page_dma);
- rxq->queue[rxq->write] = rxb;
- rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
- rxq->free_count--;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
- /* If the pre-allocated buffer pool is dropping low, schedule to
- * refill it */
- if (rxq->free_count <= RX_LOW_WATERMARK)
- queue_work(priv->workqueue, &priv->rx_replenish);
-
-
- /* If we've added more space for the firmware to place data, tell it.
- * Increment device's write pointer in multiples of 8. */
- if (rxq->write_actual != (rxq->write & ~0x7)) {
- spin_lock_irqsave(&rxq->lock, flags);
- rxq->need_update = 1;
- spin_unlock_irqrestore(&rxq->lock, flags);
- iwl_rx_queue_update_write_ptr(priv, rxq);
- }
-}
-EXPORT_SYMBOL(iwl_rx_queue_restock);
-
-
-/**
- * iwl_rx_replenish - Move all used packet from rx_used to rx_free
- *
- * When moving to rx_free an SKB is allocated for the slot.
- *
- * Also restock the Rx queue via iwl_rx_queue_restock.
- * This is called as a scheduled work item (except for during initialization)
- */
-void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- struct page *page;
- unsigned long flags;
- gfp_t gfp_mask = priority;
-
- while (1) {
- spin_lock_irqsave(&rxq->lock, flags);
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- return;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- if (rxq->free_count > RX_LOW_WATERMARK)
- gfp_mask |= __GFP_NOWARN;
-
- if (priv->hw_params.rx_page_order > 0)
- gfp_mask |= __GFP_COMP;
-
- /* Alloc a new receive buffer */
- page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
- if (!page) {
- if (net_ratelimit())
- IWL_DEBUG_INFO(priv, "alloc_pages failed, "
- "order: %d\n",
- priv->hw_params.rx_page_order);
-
- if ((rxq->free_count <= RX_LOW_WATERMARK) &&
- net_ratelimit())
- IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
- priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
- rxq->free_count);
- /* We don't reschedule replenish work here -- we will
- * call the restock method and if it still needs
- * more buffers it will schedule replenish */
- return;
- }
-
- spin_lock_irqsave(&rxq->lock, flags);
-
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- __free_pages(page, priv->hw_params.rx_page_order);
- return;
- }
- element = rxq->rx_used.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- rxb->page = page;
- /* Get physical address of the RB */
- rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- /* dma address must be no more than 36 bits */
- BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
- /* and also 256 byte aligned! */
- BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
-
- spin_lock_irqsave(&rxq->lock, flags);
-
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
- priv->alloc_rxb_page++;
-
- spin_unlock_irqrestore(&rxq->lock, flags);
- }
-}
-
-void iwl_rx_replenish(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- iwl_rx_allocate(priv, GFP_KERNEL);
-
- spin_lock_irqsave(&priv->lock, flags);
- iwl_rx_queue_restock(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-EXPORT_SYMBOL(iwl_rx_replenish);
-
-void iwl_rx_replenish_now(struct iwl_priv *priv)
-{
- iwl_rx_allocate(priv, GFP_ATOMIC);
-
- iwl_rx_queue_restock(priv);
-}
-EXPORT_SYMBOL(iwl_rx_replenish_now);
-
-
-/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
- * If an SKB has been detached, the POOL needs to have its SKB set to NULL
- * This free routine walks the list of POOL entries and if SKB is set to
- * non NULL it is unmapped and freed
- */
-void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- int i;
- for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
- if (rxq->pool[i].page != NULL) {
- pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- __iwl_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- }
-
- dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->dma_addr);
- dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
- rxq->bd = NULL;
- rxq->rb_stts = NULL;
-}
-EXPORT_SYMBOL(iwl_rx_queue_free);
int iwl_rx_queue_alloc(struct iwl_priv *priv)
{
@@ -396,98 +205,6 @@ err_bd:
}
EXPORT_SYMBOL(iwl_rx_queue_alloc);
-void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- unsigned long flags;
- int i;
- spin_lock_irqsave(&rxq->lock, flags);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
- /* Fill the rx_used queue with _all_ of the Rx buffers */
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
- /* In the reset function, these buffers may have been allocated
- * to an SKB, so we need to unmap and free potential storage */
- if (rxq->pool[i].page != NULL) {
- pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- __iwl_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
- }
-
- /* Set us so that we have processed and used all buffers, but have
- * not restocked the Rx queue with fresh buffers */
- rxq->read = rxq->write = 0;
- rxq->write_actual = 0;
- rxq->free_count = 0;
- spin_unlock_irqrestore(&rxq->lock, flags);
-}
-
-int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- u32 rb_size;
- const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
- u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
-
- if (!priv->cfg->use_isr_legacy)
- rb_timeout = RX_RB_TIMEOUT;
-
- if (priv->cfg->mod_params->amsdu_size_8K)
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
- else
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
-
- /* Stop Rx DMA */
- iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
-
- /* Reset driver's Rx queue write index */
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
-
- /* Tell device where to find RBD circular buffer in DRAM */
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- (u32)(rxq->dma_addr >> 8));
-
- /* Tell device where in DRAM to update its Rx status */
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
- rxq->rb_stts_dma >> 4);
-
- /* Enable Rx DMA
- * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
- * the credit mechanism in 5000 HW RX FIFO
- * Direct rx interrupts to hosts
- * Rx buffer size 4 or 8k
- * RB timeout 0x10
- * 256 RBDs
- */
- iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
- FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
- FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
- FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
- rb_size|
- (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
- (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
-
- /* Set interrupt coalescing timer to default (2048 usecs) */
- iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
-
- return 0;
-}
-
-int iwl_rxq_stop(struct iwl_priv *priv)
-{
-
- /* stop Rx DMA */
- iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
- iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
- FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_rxq_stop);
-
void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
@@ -543,6 +260,7 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
int bcn_silence_c =
le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
+ int last_rx_noise;
if (bcn_silence_a) {
total_silence += bcn_silence_a;
@@ -559,13 +277,13 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
/* Average among active antennas */
if (num_active_rx)
- priv->last_rx_noise = (total_silence / num_active_rx) - 107;
+ last_rx_noise = (total_silence / num_active_rx) - 107;
else
- priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
+ last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
bcn_silence_a, bcn_silence_b, bcn_silence_c,
- priv->last_rx_noise);
+ last_rx_noise);
}
#ifdef CONFIG_IWLWIFI_DEBUG
@@ -617,29 +335,20 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
#define REG_RECALIB_PERIOD (60)
-#define PLCP_MSG "plcp_err exceeded %u, %u, %u, %u, %u, %d, %u mSecs\n"
-void iwl_rx_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+/**
+ * iwl_good_plcp_health - checks for plcp error.
+ *
+ * When the plcp error is exceeding the thresholds, reset the radio
+ * to improve the throughput.
+ */
+bool iwl_good_plcp_health(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt)
{
- int change;
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ bool rc = true;
int combined_plcp_delta;
unsigned int plcp_msec;
unsigned long plcp_received_jiffies;
- IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
- (int)sizeof(priv->statistics),
- le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
-
- change = ((priv->statistics.general.temperature !=
- pkt->u.stats.general.temperature) ||
- ((priv->statistics.flag &
- STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
- (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
-#endif
/*
* check for plcp_err and trigger radio reset if it exceeds
* the plcp error threshold plcp_delta.
@@ -660,11 +369,11 @@ void iwl_rx_statistics(struct iwl_priv *priv,
le32_to_cpu(priv->statistics.rx.ofdm_ht.plcp_err));
if ((combined_plcp_delta > 0) &&
- ((combined_plcp_delta * 100) / plcp_msec) >
+ ((combined_plcp_delta * 100) / plcp_msec) >
priv->cfg->plcp_delta_threshold) {
/*
- * if plcp_err exceed the threshold, the following
- * data is printed in csv format:
+ * if plcp_err exceed the threshold,
+ * the following data is printed in csv format:
* Text: plcp_err exceeded %d,
* Received ofdm.plcp_err,
* Current ofdm.plcp_err,
@@ -673,22 +382,76 @@ void iwl_rx_statistics(struct iwl_priv *priv,
* combined_plcp_delta,
* plcp_msec
*/
- IWL_DEBUG_RADIO(priv, PLCP_MSG,
+ IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
+ "%u, %u, %u, %u, %d, %u mSecs\n",
priv->cfg->plcp_delta_threshold,
le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err),
le32_to_cpu(priv->statistics.rx.ofdm.plcp_err),
le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err),
le32_to_cpu(
- priv->statistics.rx.ofdm_ht.plcp_err),
+ priv->statistics.rx.ofdm_ht.plcp_err),
combined_plcp_delta, plcp_msec);
+ rc = false;
+ }
+ }
+ return rc;
+}
+EXPORT_SYMBOL(iwl_good_plcp_health);
- /*
- * Reset the RF radio due to the high plcp
- * error rate
- */
- iwl_force_reset(priv, IWL_RF_RESET);
+void iwl_recover_from_statistics(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt)
+{
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+ if (iwl_is_associated(priv)) {
+ if (priv->cfg->ops->lib->check_ack_health) {
+ if (!priv->cfg->ops->lib->check_ack_health(
+ priv, pkt)) {
+ /*
+ * low ack count detected
+ * restart Firmware
+ */
+ IWL_ERR(priv, "low ack count detected, "
+ "restart firmware\n");
+ if (!iwl_force_reset(priv, IWL_FW_RESET))
+ return;
+ }
+ }
+ if (priv->cfg->ops->lib->check_plcp_health) {
+ if (!priv->cfg->ops->lib->check_plcp_health(
+ priv, pkt)) {
+ /*
+ * high plcp error detected
+ * reset Radio
+ */
+ iwl_force_reset(priv, IWL_RF_RESET);
+ }
}
}
+}
+EXPORT_SYMBOL(iwl_recover_from_statistics);
+
+void iwl_rx_statistics(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ int change;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+
+ IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
+ (int)sizeof(priv->statistics),
+ le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
+
+ change = ((priv->statistics.general.temperature !=
+ pkt->u.stats.general.temperature) ||
+ ((priv->statistics.flag &
+ STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
+ (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
+#endif
+ iwl_recover_from_statistics(priv, pkt);
memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
@@ -731,139 +494,6 @@ void iwl_reply_statistics(struct iwl_priv *priv,
}
EXPORT_SYMBOL(iwl_reply_statistics);
-/* Calc max signal level (dBm) among 3 possible receivers */
-static inline int iwl_calc_rssi(struct iwl_priv *priv,
- struct iwl_rx_phy_res *rx_resp)
-{
- return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
-}
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-/**
- * iwl_dbg_report_frame - dump frame to syslog during debug sessions
- *
- * You may hack this function to show different aspects of received frames,
- * including selective frame dumps.
- * group100 parameter selects whether to show 1 out of 100 good data frames.
- * All beacon and probe response frames are printed.
- */
-static void iwl_dbg_report_frame(struct iwl_priv *priv,
- struct iwl_rx_phy_res *phy_res, u16 length,
- struct ieee80211_hdr *header, int group100)
-{
- u32 to_us;
- u32 print_summary = 0;
- u32 print_dump = 0; /* set to 1 to dump all frames' contents */
- u32 hundred = 0;
- u32 dataframe = 0;
- __le16 fc;
- u16 seq_ctl;
- u16 channel;
- u16 phy_flags;
- u32 rate_n_flags;
- u32 tsf_low;
- int rssi;
-
- if (likely(!(iwl_get_debug_level(priv) & IWL_DL_RX)))
- return;
-
- /* MAC header */
- fc = header->frame_control;
- seq_ctl = le16_to_cpu(header->seq_ctrl);
-
- /* metadata */
- channel = le16_to_cpu(phy_res->channel);
- phy_flags = le16_to_cpu(phy_res->phy_flags);
- rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
-
- /* signal statistics */
- rssi = iwl_calc_rssi(priv, phy_res);
- tsf_low = le64_to_cpu(phy_res->timestamp) & 0x0ffffffff;
-
- to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
-
- /* if data frame is to us and all is good,
- * (optionally) print summary for only 1 out of every 100 */
- if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) ==
- cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
- dataframe = 1;
- if (!group100)
- print_summary = 1; /* print each frame */
- else if (priv->framecnt_to_us < 100) {
- priv->framecnt_to_us++;
- print_summary = 0;
- } else {
- priv->framecnt_to_us = 0;
- print_summary = 1;
- hundred = 1;
- }
- } else {
- /* print summary for all other frames */
- print_summary = 1;
- }
-
- if (print_summary) {
- char *title;
- int rate_idx;
- u32 bitrate;
-
- if (hundred)
- title = "100Frames";
- else if (ieee80211_has_retry(fc))
- title = "Retry";
- else if (ieee80211_is_assoc_resp(fc))
- title = "AscRsp";
- else if (ieee80211_is_reassoc_resp(fc))
- title = "RasRsp";
- else if (ieee80211_is_probe_resp(fc)) {
- title = "PrbRsp";
- print_dump = 1; /* dump frame contents */
- } else if (ieee80211_is_beacon(fc)) {
- title = "Beacon";
- print_dump = 1; /* dump frame contents */
- } else if (ieee80211_is_atim(fc))
- title = "ATIM";
- else if (ieee80211_is_auth(fc))
- title = "Auth";
- else if (ieee80211_is_deauth(fc))
- title = "DeAuth";
- else if (ieee80211_is_disassoc(fc))
- title = "DisAssoc";
- else
- title = "Frame";
-
- rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
- if (unlikely((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT))) {
- bitrate = 0;
- WARN_ON_ONCE(1);
- } else {
- bitrate = iwl_rates[rate_idx].ieee / 2;
- }
-
- /* print frame summary.
- * MAC addresses show just the last byte (for brevity),
- * but you can hack it to show more, if you'd like to. */
- if (dataframe)
- IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, "
- "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
- title, le16_to_cpu(fc), header->addr1[5],
- length, rssi, channel, bitrate);
- else {
- /* src/dst addresses assume managed mode */
- IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, src=0x%02x, "
- "len=%u, rssi=%d, tim=%lu usec, "
- "phy=0x%02x, chnl=%d\n",
- title, le16_to_cpu(fc), header->addr1[5],
- header->addr3[5], length, rssi,
- tsf_low - priv->scan_start_tsf,
- phy_flags, channel);
- }
- }
- if (print_dump)
- iwl_print_hex_dump(priv, IWL_DL_RX, header, length);
-}
-#endif
-
/*
* returns non-zero if packet should be dropped
*/
@@ -911,305 +541,3 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv,
return 0;
}
EXPORT_SYMBOL(iwl_set_decrypted_flag);
-
-static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
-{
- u32 decrypt_out = 0;
-
- if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
- RX_RES_STATUS_STATION_FOUND)
- decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
- RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
-
- decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
-
- /* packet was not encrypted */
- if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
- RX_RES_STATUS_SEC_TYPE_NONE)
- return decrypt_out;
-
- /* packet was encrypted with unknown alg */
- if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
- RX_RES_STATUS_SEC_TYPE_ERR)
- return decrypt_out;
-
- /* decryption was not done in HW */
- if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
- RX_MPDU_RES_STATUS_DEC_DONE_MSK)
- return decrypt_out;
-
- switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
-
- case RX_RES_STATUS_SEC_TYPE_CCMP:
- /* alg is CCM: check MIC only */
- if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
- /* Bad MIC */
- decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
- else
- decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
-
- break;
-
- case RX_RES_STATUS_SEC_TYPE_TKIP:
- if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
- /* Bad TTAK */
- decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
- break;
- }
- /* fall through if TTAK OK */
- default:
- if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
- decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
- else
- decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
- break;
- };
-
- IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
- decrypt_in, decrypt_out);
-
- return decrypt_out;
-}
-
-static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- u16 len,
- u32 ampdu_status,
- struct iwl_rx_mem_buffer *rxb,
- struct ieee80211_rx_status *stats)
-{
- struct sk_buff *skb;
- int ret = 0;
- __le16 fc = hdr->frame_control;
-
- /* We only process data packets if the interface is open */
- if (unlikely(!priv->is_open)) {
- IWL_DEBUG_DROP_LIMIT(priv,
- "Dropping packet while interface is not open.\n");
- return;
- }
-
- /* In case of HW accelerated crypto and bad decryption, drop */
- if (!priv->cfg->mod_params->sw_crypto &&
- iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
- return;
-
- skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC);
- if (!skb) {
- IWL_ERR(priv, "alloc_skb failed\n");
- return;
- }
-
- skb_reserve(skb, IWL_LINK_HDR_MAX);
- skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
-
- /* mac80211 currently doesn't support paged SKB. Convert it to
- * linear SKB for management frame and data frame requires
- * software decryption or software defragementation. */
- if (ieee80211_is_mgmt(fc) ||
- ieee80211_has_protected(fc) ||
- ieee80211_has_morefrags(fc) ||
- le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG ||
- (ieee80211_is_data_qos(fc) &&
- *ieee80211_get_qos_ctl(hdr) &
- IEEE80211_QOS_CONTROL_A_MSDU_PRESENT))
- ret = skb_linearize(skb);
- else
- ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
- 0 : -ENOMEM;
-
- if (ret) {
- kfree_skb(skb);
- goto out;
- }
-
- /*
- * XXX: We cannot touch the page and its virtual memory (hdr) after
- * here. It might have already been freed by the above skb change.
- */
-
- iwl_update_stats(priv, false, fc, len);
- memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
-
- ieee80211_rx(priv->hw, skb);
- out:
- priv->alloc_rxb_page--;
- rxb->page = NULL;
-}
-
-/* This is necessary only for a number of statistics, see the caller. */
-static int iwl_is_network_packet(struct iwl_priv *priv,
- struct ieee80211_hdr *header)
-{
- /* Filter incoming packets to determine if they are targeted toward
- * this network, discarding packets coming from ourselves */
- switch (priv->iw_mode) {
- case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
- /* packets to our IBSS update information */
- return !compare_ether_addr(header->addr3, priv->bssid);
- case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
- /* packets to our IBSS update information */
- return !compare_ether_addr(header->addr2, priv->bssid);
- default:
- return 1;
- }
-}
-
-/* Called for REPLY_RX (legacy ABG frames), or
- * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
-void iwl_rx_reply_rx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct ieee80211_hdr *header;
- struct ieee80211_rx_status rx_status;
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_rx_phy_res *phy_res;
- __le32 rx_pkt_status;
- struct iwl4965_rx_mpdu_res_start *amsdu;
- u32 len;
- u32 ampdu_status;
- u32 rate_n_flags;
-
- /**
- * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
- * REPLY_RX: physical layer info is in this buffer
- * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
- * command and cached in priv->last_phy_res
- *
- * Here we set up local variables depending on which command is
- * received.
- */
- if (pkt->hdr.cmd == REPLY_RX) {
- phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
- header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
- + phy_res->cfg_phy_cnt);
-
- len = le16_to_cpu(phy_res->byte_count);
- rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
- phy_res->cfg_phy_cnt + len);
- ampdu_status = le32_to_cpu(rx_pkt_status);
- } else {
- if (!priv->last_phy_res[0]) {
- IWL_ERR(priv, "MPDU frame without cached PHY data\n");
- return;
- }
- phy_res = (struct iwl_rx_phy_res *)&priv->last_phy_res[1];
- amsdu = (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
- header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
- len = le16_to_cpu(amsdu->byte_count);
- rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
- ampdu_status = iwl_translate_rx_status(priv,
- le32_to_cpu(rx_pkt_status));
- }
-
- if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
- IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
- phy_res->cfg_phy_cnt);
- return;
- }
-
- if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
- !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
- IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
- le32_to_cpu(rx_pkt_status));
- return;
- }
-
- /* This will be used in several places later */
- rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
-
- /* rx_status carries information about the packet to mac80211 */
- rx_status.mactime = le64_to_cpu(phy_res->timestamp);
- rx_status.freq =
- ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel));
- rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
- rx_status.rate_idx =
- iwl_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
- rx_status.flag = 0;
-
- /* TSF isn't reliable. In order to allow smooth user experience,
- * this W/A doesn't propagate it to the mac80211 */
- /*rx_status.flag |= RX_FLAG_TSFT;*/
-
- priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
-
- /* Find max signal strength (dBm) among 3 antenna/receiver chains */
- rx_status.signal = iwl_calc_rssi(priv, phy_res);
-
- /* Meaningful noise values are available only from beacon statistics,
- * which are gathered only when associated, and indicate noise
- * only for the associated network channel ...
- * Ignore these noise values while scanning (other channels) */
- if (iwl_is_associated(priv) &&
- !test_bit(STATUS_SCANNING, &priv->status)) {
- rx_status.noise = priv->last_rx_noise;
- } else {
- rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
- }
-
- /* Reset beacon noise level if not associated. */
- if (!iwl_is_associated(priv))
- priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- /* Set "1" to report good data frames in groups of 100 */
- if (unlikely(iwl_get_debug_level(priv) & IWL_DL_RX))
- iwl_dbg_report_frame(priv, phy_res, len, header, 1);
-#endif
- iwl_dbg_log_rx_data_frame(priv, len, header);
- IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, TSF %llu\n",
- rx_status.signal, rx_status.noise,
- (unsigned long long)rx_status.mactime);
-
- /*
- * "antenna number"
- *
- * It seems that the antenna field in the phy flags value
- * is actually a bit field. This is undefined by radiotap,
- * it wants an actual antenna number but I always get "7"
- * for most legacy frames I receive indicating that the
- * same frame was received on all three RX chains.
- *
- * I think this field should be removed in favor of a
- * new 802.11n radiotap field "RX chains" that is defined
- * as a bitmask.
- */
- rx_status.antenna =
- (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
- >> RX_RES_PHY_FLAGS_ANTENNA_POS;
-
- /* set the preamble flag if appropriate */
- if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
- rx_status.flag |= RX_FLAG_SHORTPRE;
-
- /* Set up the HT phy flags */
- if (rate_n_flags & RATE_MCS_HT_MSK)
- rx_status.flag |= RX_FLAG_HT;
- if (rate_n_flags & RATE_MCS_HT40_MSK)
- rx_status.flag |= RX_FLAG_40MHZ;
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- rx_status.flag |= RX_FLAG_SHORT_GI;
-
- if (iwl_is_network_packet(priv, header)) {
- priv->last_rx_rssi = rx_status.signal;
- priv->last_beacon_time = priv->ucode_beacon_time;
- priv->last_tsf = le64_to_cpu(phy_res->timestamp);
- }
-
- iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
- rxb, &rx_status);
-}
-EXPORT_SYMBOL(iwl_rx_reply_rx);
-
-/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
- * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
-void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- priv->last_phy_res[0] = 1;
- memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
- sizeof(struct iwl_rx_phy_res));
-}
-EXPORT_SYMBOL(iwl_rx_reply_rx_phy);
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 741e65ec830..5d3f51ff2f0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -69,9 +69,8 @@ int iwl_scan_cancel(struct iwl_priv *priv)
}
if (test_bit(STATUS_SCANNING, &priv->status)) {
- if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+ if (!test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Queuing scan abort.\n");
- set_bit(STATUS_SCAN_ABORTING, &priv->status);
queue_work(priv->workqueue, &priv->abort_scan);
} else
@@ -201,9 +200,6 @@ static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
le32_to_cpu(notif->statistics[0]),
le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
#endif
-
- if (!priv->is_internal_short_scan)
- priv->next_scan_jiffies = 0;
}
/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
@@ -223,49 +219,24 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
/* The HW is no longer scanning */
clear_bit(STATUS_SCAN_HW, &priv->status);
- IWL_DEBUG_INFO(priv, "Scan pass on %sGHz took %dms\n",
- (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ?
- "2.4" : "5.2",
+ IWL_DEBUG_INFO(priv, "Scan on %sGHz took %dms\n",
+ (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
jiffies_to_msecs(elapsed_jiffies
- (priv->scan_pass_start, jiffies)));
-
- /* Remove this scanned band from the list of pending
- * bands to scan, band G precedes A in order of scanning
- * as seen in iwl_bg_request_scan */
- if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ))
- priv->scan_bands &= ~BIT(IEEE80211_BAND_2GHZ);
- else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ))
- priv->scan_bands &= ~BIT(IEEE80211_BAND_5GHZ);
+ (priv->scan_start, jiffies)));
- /* If a request to abort was given, or the scan did not succeed
+ /*
+ * If a request to abort was given, or the scan did not succeed
* then we reset the scan state machine and terminate,
- * re-queuing another scan if one has been requested */
- if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+ * re-queuing another scan if one has been requested
+ */
+ if (test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status))
IWL_DEBUG_INFO(priv, "Aborted scan completed.\n");
- clear_bit(STATUS_SCAN_ABORTING, &priv->status);
- } else {
- /* If there are more bands on this scan pass reschedule */
- if (priv->scan_bands)
- goto reschedule;
- }
-
- if (!priv->is_internal_short_scan)
- priv->next_scan_jiffies = 0;
IWL_DEBUG_INFO(priv, "Setting scan to off\n");
clear_bit(STATUS_SCANNING, &priv->status);
- IWL_DEBUG_INFO(priv, "Scan took %dms\n",
- jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies)));
-
queue_work(priv->workqueue, &priv->scan_completed);
-
- return;
-
-reschedule:
- priv->scan_pass_start = jiffies;
- queue_work(priv->workqueue, &priv->request_scan);
}
void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
@@ -294,7 +265,8 @@ inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
EXPORT_SYMBOL(iwl_get_active_dwell_time);
u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band)
+ enum ieee80211_band band,
+ struct ieee80211_vif *vif)
{
u16 passive = (band == IEEE80211_BAND_2GHZ) ?
IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
@@ -304,7 +276,7 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
/* If we're associated, we clamp the maximum passive
* dwell time to be 98% of the beacon interval (minus
* 2 * channel tune time) */
- passive = priv->beacon_int;
+ passive = vif ? vif->bss_conf.beacon_int : 0;
if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive)
passive = IWL_PASSIVE_DWELL_BASE;
passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
@@ -314,150 +286,6 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
}
EXPORT_SYMBOL(iwl_get_passive_dwell_time);
-static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
- enum ieee80211_band band,
- struct iwl_scan_channel *scan_ch)
-{
- const struct ieee80211_supported_band *sband;
- const struct iwl_channel_info *ch_info;
- u16 passive_dwell = 0;
- u16 active_dwell = 0;
- int i, added = 0;
- u16 channel = 0;
-
- sband = iwl_get_hw_mode(priv, band);
- if (!sband) {
- IWL_ERR(priv, "invalid band\n");
- return added;
- }
-
- active_dwell = iwl_get_active_dwell_time(priv, band, 0);
- passive_dwell = iwl_get_passive_dwell_time(priv, band);
-
- if (passive_dwell <= active_dwell)
- passive_dwell = active_dwell + 1;
-
- /* only scan single channel, good enough to reset the RF */
- /* pick the first valid not in-use channel */
- if (band == IEEE80211_BAND_5GHZ) {
- for (i = 14; i < priv->channel_count; i++) {
- if (priv->channel_info[i].channel !=
- le16_to_cpu(priv->staging_rxon.channel)) {
- channel = priv->channel_info[i].channel;
- ch_info = iwl_get_channel_info(priv,
- band, channel);
- if (is_channel_valid(ch_info))
- break;
- }
- }
- } else {
- for (i = 0; i < 14; i++) {
- if (priv->channel_info[i].channel !=
- le16_to_cpu(priv->staging_rxon.channel)) {
- channel =
- priv->channel_info[i].channel;
- ch_info = iwl_get_channel_info(priv,
- band, channel);
- if (is_channel_valid(ch_info))
- break;
- }
- }
- }
- if (channel) {
- scan_ch->channel = cpu_to_le16(channel);
- scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
- scan_ch->active_dwell = cpu_to_le16(active_dwell);
- scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
- /* Set txpower levels to defaults */
- scan_ch->dsp_atten = 110;
- if (band == IEEE80211_BAND_5GHZ)
- scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
- else
- scan_ch->tx_gain = ((1 << 5) | (5 << 3));
- added++;
- } else
- IWL_ERR(priv, "no valid channel found\n");
- return added;
-}
-
-static int iwl_get_channels_for_scan(struct iwl_priv *priv,
- enum ieee80211_band band,
- u8 is_active, u8 n_probes,
- struct iwl_scan_channel *scan_ch)
-{
- struct ieee80211_channel *chan;
- const struct ieee80211_supported_band *sband;
- const struct iwl_channel_info *ch_info;
- u16 passive_dwell = 0;
- u16 active_dwell = 0;
- int added, i;
- u16 channel;
-
- sband = iwl_get_hw_mode(priv, band);
- if (!sband)
- return 0;
-
- active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
- passive_dwell = iwl_get_passive_dwell_time(priv, band);
-
- if (passive_dwell <= active_dwell)
- passive_dwell = active_dwell + 1;
-
- for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
- chan = priv->scan_request->channels[i];
-
- if (chan->band != band)
- continue;
-
- channel = ieee80211_frequency_to_channel(chan->center_freq);
- scan_ch->channel = cpu_to_le16(channel);
-
- ch_info = iwl_get_channel_info(priv, band, channel);
- if (!is_channel_valid(ch_info)) {
- IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n",
- channel);
- continue;
- }
-
- if (!is_active || is_channel_passive(ch_info) ||
- (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
- scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
- else
- scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
-
- if (n_probes)
- scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
-
- scan_ch->active_dwell = cpu_to_le16(active_dwell);
- scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
-
- /* Set txpower levels to defaults */
- scan_ch->dsp_atten = 110;
-
- /* NOTE: if we were doing 6Mb OFDM for scans we'd use
- * power level:
- * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
- */
- if (band == IEEE80211_BAND_5GHZ)
- scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
- else
- scan_ch->tx_gain = ((1 << 5) | (5 << 3));
-
- IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
- channel, le32_to_cpu(scan_ch->type),
- (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
- "ACTIVE" : "PASSIVE",
- (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
- active_dwell : passive_dwell);
-
- scan_ch++;
- added++;
- }
-
- IWL_DEBUG_SCAN(priv, "total channels to scan %d \n", added);
- return added;
-}
-
void iwl_init_scan_params(struct iwl_priv *priv)
{
u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
@@ -468,7 +296,7 @@ void iwl_init_scan_params(struct iwl_priv *priv)
}
EXPORT_SYMBOL(iwl_init_scan_params);
-static int iwl_scan_initiate(struct iwl_priv *priv)
+static int iwl_scan_initiate(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
WARN_ON(!mutex_is_locked(&priv->mutex));
@@ -476,26 +304,28 @@ static int iwl_scan_initiate(struct iwl_priv *priv)
set_bit(STATUS_SCANNING, &priv->status);
priv->is_internal_short_scan = false;
priv->scan_start = jiffies;
- priv->scan_pass_start = priv->scan_start;
- queue_work(priv->workqueue, &priv->request_scan);
+ if (WARN_ON(!priv->cfg->ops->utils->request_scan))
+ return -EOPNOTSUPP;
+
+ priv->cfg->ops->utils->request_scan(priv, vif);
return 0;
}
-#define IWL_DELAY_NEXT_SCAN (HZ*2)
-
int iwl_mac_hw_scan(struct ieee80211_hw *hw,
- struct cfg80211_scan_request *req)
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req)
{
- unsigned long flags;
struct iwl_priv *priv = hw->priv;
- int ret, i;
+ int ret;
IWL_DEBUG_MAC80211(priv, "enter\n");
+ if (req->n_channels == 0)
+ return -EINVAL;
+
mutex_lock(&priv->mutex);
- spin_lock_irqsave(&priv->lock, flags);
if (!iwl_is_ready_rf(priv)) {
ret = -EIO;
@@ -515,30 +345,15 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
goto out_unlock;
}
- /* We don't schedule scan within next_scan_jiffies period.
- * Avoid scanning during possible EAPOL exchange, return
- * success immediately.
- */
- if (priv->next_scan_jiffies &&
- time_after(priv->next_scan_jiffies, jiffies)) {
- IWL_DEBUG_SCAN(priv, "scan rejected: within next scan period\n");
- queue_work(priv->workqueue, &priv->scan_completed);
- ret = 0;
- goto out_unlock;
- }
-
- priv->scan_bands = 0;
- for (i = 0; i < req->n_channels; i++)
- priv->scan_bands |= BIT(req->channels[i]->band);
-
+ /* mac80211 will only ask for one band at a time */
+ priv->scan_band = req->channels[0]->band;
priv->scan_request = req;
- ret = iwl_scan_initiate(priv);
+ ret = iwl_scan_initiate(priv, vif);
IWL_DEBUG_MAC80211(priv, "leave\n");
out_unlock:
- spin_unlock_irqrestore(&priv->lock, flags);
mutex_unlock(&priv->mutex);
return ret;
@@ -554,13 +369,18 @@ void iwl_internal_short_hw_scan(struct iwl_priv *priv)
queue_work(priv->workqueue, &priv->start_internal_scan);
}
-static void iwl_bg_start_internal_scan(struct work_struct *work)
+void iwl_bg_start_internal_scan(struct work_struct *work)
{
struct iwl_priv *priv =
container_of(work, struct iwl_priv, start_internal_scan);
mutex_lock(&priv->mutex);
+ if (priv->is_internal_short_scan == true) {
+ IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
+ goto unlock;
+ }
+
if (!iwl_is_ready_rf(priv)) {
IWL_DEBUG_SCAN(priv, "not ready or exit pending\n");
goto unlock;
@@ -576,22 +396,20 @@ static void iwl_bg_start_internal_scan(struct work_struct *work)
goto unlock;
}
- priv->scan_bands = 0;
- if (priv->band == IEEE80211_BAND_5GHZ)
- priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ);
- else
- priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ);
+ priv->scan_band = priv->band;
IWL_DEBUG_SCAN(priv, "Start internal short scan...\n");
set_bit(STATUS_SCANNING, &priv->status);
priv->is_internal_short_scan = true;
- queue_work(priv->workqueue, &priv->request_scan);
+
+ if (WARN_ON(!priv->cfg->ops->utils->request_scan))
+ goto unlock;
+
+ priv->cfg->ops->utils->request_scan(priv, NULL);
unlock:
mutex_unlock(&priv->mutex);
}
-EXPORT_SYMBOL(iwl_internal_short_hw_scan);
-
-#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
+EXPORT_SYMBOL(iwl_bg_start_internal_scan);
void iwl_bg_scan_check(struct work_struct *data)
{
@@ -654,289 +472,15 @@ u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
if (WARN_ON(left < ie_len))
return len;
- if (ies)
+ if (ies && ie_len) {
memcpy(pos, ies, ie_len);
- len += ie_len;
- left -= ie_len;
+ len += ie_len;
+ }
return (u16)len;
}
EXPORT_SYMBOL(iwl_fill_probe_req);
-static void iwl_bg_request_scan(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, request_scan);
- struct iwl_host_cmd cmd = {
- .id = REPLY_SCAN_CMD,
- .len = sizeof(struct iwl_scan_cmd),
- .flags = CMD_SIZE_HUGE,
- };
- struct iwl_scan_cmd *scan;
- struct ieee80211_conf *conf = NULL;
- int ret = 0;
- u32 rate_flags = 0;
- u16 cmd_len;
- u16 rx_chain = 0;
- enum ieee80211_band band;
- u8 n_probes = 0;
- u8 rx_ant = priv->hw_params.valid_rx_ant;
- u8 rate;
- bool is_active = false;
- int chan_mod;
- u8 active_chains;
-
- conf = ieee80211_get_hw_conf(priv->hw);
-
- mutex_lock(&priv->mutex);
-
- cancel_delayed_work(&priv->scan_check);
-
- if (!iwl_is_ready(priv)) {
- IWL_WARN(priv, "request scan called when driver not ready.\n");
- goto done;
- }
-
- /* Make sure the scan wasn't canceled before this queued work
- * was given the chance to run... */
- if (!test_bit(STATUS_SCANNING, &priv->status))
- goto done;
-
- /* This should never be called or scheduled if there is currently
- * a scan active in the hardware. */
- if (test_bit(STATUS_SCAN_HW, &priv->status)) {
- IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests in parallel. "
- "Ignoring second request.\n");
- ret = -EIO;
- goto done;
- }
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Aborting scan due to device shutdown\n");
- goto done;
- }
-
- if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
- IWL_DEBUG_HC(priv, "Scan request while abort pending. Queuing.\n");
- goto done;
- }
-
- if (iwl_is_rfkill(priv)) {
- IWL_DEBUG_HC(priv, "Aborting scan due to RF Kill activation\n");
- goto done;
- }
-
- if (!test_bit(STATUS_READY, &priv->status)) {
- IWL_DEBUG_HC(priv, "Scan request while uninitialized. Queuing.\n");
- goto done;
- }
-
- if (!priv->scan_bands) {
- IWL_DEBUG_HC(priv, "Aborting scan due to no requested bands\n");
- goto done;
- }
-
- if (!priv->scan) {
- priv->scan = kmalloc(sizeof(struct iwl_scan_cmd) +
- IWL_MAX_SCAN_SIZE, GFP_KERNEL);
- if (!priv->scan) {
- ret = -ENOMEM;
- goto done;
- }
- }
- scan = priv->scan;
- memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
-
- scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
- scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
-
- if (iwl_is_associated(priv)) {
- u16 interval = 0;
- u32 extra;
- u32 suspend_time = 100;
- u32 scan_suspend_time = 100;
- unsigned long flags;
-
- IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
- spin_lock_irqsave(&priv->lock, flags);
- interval = priv->beacon_int;
- spin_unlock_irqrestore(&priv->lock, flags);
-
- scan->suspend_time = 0;
- scan->max_out_time = cpu_to_le32(200 * 1024);
- if (!interval)
- interval = suspend_time;
-
- extra = (suspend_time / interval) << 22;
- scan_suspend_time = (extra |
- ((suspend_time % interval) * 1024));
- scan->suspend_time = cpu_to_le32(scan_suspend_time);
- IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
- scan_suspend_time, interval);
- }
-
- if (priv->is_internal_short_scan) {
- IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
- } else if (priv->scan_request->n_ssids) {
- int i, p = 0;
- IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
- for (i = 0; i < priv->scan_request->n_ssids; i++) {
- /* always does wildcard anyway */
- if (!priv->scan_request->ssids[i].ssid_len)
- continue;
- scan->direct_scan[p].id = WLAN_EID_SSID;
- scan->direct_scan[p].len =
- priv->scan_request->ssids[i].ssid_len;
- memcpy(scan->direct_scan[p].ssid,
- priv->scan_request->ssids[i].ssid,
- priv->scan_request->ssids[i].ssid_len);
- n_probes++;
- p++;
- }
- is_active = true;
- } else
- IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
-
- scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
- scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id;
- scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-
-
- if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) {
- band = IEEE80211_BAND_2GHZ;
- scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
- chan_mod = le32_to_cpu(priv->active_rxon.flags & RXON_FLG_CHANNEL_MODE_MSK)
- >> RXON_FLG_CHANNEL_MODE_POS;
- if (chan_mod == CHANNEL_MODE_PURE_40) {
- rate = IWL_RATE_6M_PLCP;
- } else {
- rate = IWL_RATE_1M_PLCP;
- rate_flags = RATE_MCS_CCK_MSK;
- }
- scan->good_CRC_th = IWL_GOOD_CRC_TH_DISABLED;
- } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) {
- band = IEEE80211_BAND_5GHZ;
- rate = IWL_RATE_6M_PLCP;
- /*
- * If active scanning is requested but a certain channel is
- * marked passive, we can do active scanning if we detect
- * transmissions.
- *
- * There is an issue with some firmware versions that triggers
- * a sysassert on a "good CRC threshold" of zero (== disabled),
- * on a radar channel even though this means that we should NOT
- * send probes.
- *
- * The "good CRC threshold" is the number of frames that we
- * need to receive during our dwell time on a channel before
- * sending out probes -- setting this to a huge value will
- * mean we never reach it, but at the same time work around
- * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
- * here instead of IWL_GOOD_CRC_TH_DISABLED.
- */
- scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
- IWL_GOOD_CRC_TH_NEVER;
-
- /* Force use of chains B and C (0x6) for scan Rx for 4965
- * Avoid A (0x1) because of its off-channel reception on A-band.
- */
- if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)
- rx_ant = ANT_BC;
- } else {
- IWL_WARN(priv, "Invalid scan band count\n");
- goto done;
- }
-
- priv->scan_tx_ant[band] =
- iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band]);
- rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
- scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
-
- /* In power save mode use one chain, otherwise use all chains */
- if (test_bit(STATUS_POWER_PMI, &priv->status)) {
- /* rx_ant has been set to all valid chains previously */
- active_chains = rx_ant &
- ((u8)(priv->chain_noise_data.active_chains));
- if (!active_chains)
- active_chains = rx_ant;
-
- IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
- priv->chain_noise_data.active_chains);
-
- rx_ant = first_antenna(active_chains);
- }
- /* MIMO is not used here, but value is required */
- rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
- rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
- rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
- rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
- scan->rx_chain = cpu_to_le16(rx_chain);
- if (!priv->is_internal_short_scan) {
- cmd_len = iwl_fill_probe_req(priv,
- (struct ieee80211_mgmt *)scan->data,
- priv->scan_request->ie,
- priv->scan_request->ie_len,
- IWL_MAX_SCAN_SIZE - sizeof(*scan));
- } else {
- cmd_len = iwl_fill_probe_req(priv,
- (struct ieee80211_mgmt *)scan->data,
- NULL, 0,
- IWL_MAX_SCAN_SIZE - sizeof(*scan));
-
- }
- scan->tx_cmd.len = cpu_to_le16(cmd_len);
- if (iwl_is_monitor_mode(priv))
- scan->filter_flags = RXON_FILTER_PROMISC_MSK;
-
- scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
- RXON_FILTER_BCON_AWARE_MSK);
-
- if (priv->is_internal_short_scan) {
- scan->channel_count =
- iwl_get_single_channel_for_scan(priv, band,
- (void *)&scan->data[le16_to_cpu(
- scan->tx_cmd.len)]);
- } else {
- scan->channel_count =
- iwl_get_channels_for_scan(priv, band,
- is_active, n_probes,
- (void *)&scan->data[le16_to_cpu(
- scan->tx_cmd.len)]);
- }
- if (scan->channel_count == 0) {
- IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
- goto done;
- }
-
- cmd.len += le16_to_cpu(scan->tx_cmd.len) +
- scan->channel_count * sizeof(struct iwl_scan_channel);
- cmd.data = scan;
- scan->len = cpu_to_le16(cmd.len);
-
- set_bit(STATUS_SCAN_HW, &priv->status);
- ret = iwl_send_cmd_sync(priv, &cmd);
- if (ret)
- goto done;
-
- queue_delayed_work(priv->workqueue, &priv->scan_check,
- IWL_SCAN_CHECK_WATCHDOG);
-
- mutex_unlock(&priv->mutex);
- return;
-
- done:
- /* Cannot perform scan. Make sure we clear scanning
- * bits from status so next scan request can be performed.
- * If we don't clear scanning status bit here all next scan
- * will fail
- */
- clear_bit(STATUS_SCAN_HW, &priv->status);
- clear_bit(STATUS_SCANNING, &priv->status);
- /* inform mac80211 scan aborted */
- queue_work(priv->workqueue, &priv->scan_completed);
- mutex_unlock(&priv->mutex);
-}
-
void iwl_bg_abort_scan(struct work_struct *work)
{
struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
@@ -958,17 +502,27 @@ void iwl_bg_scan_completed(struct work_struct *work)
{
struct iwl_priv *priv =
container_of(work, struct iwl_priv, scan_completed);
+ bool internal = false;
IWL_DEBUG_SCAN(priv, "SCAN complete scan\n");
cancel_delayed_work(&priv->scan_check);
- if (!priv->is_internal_short_scan)
- ieee80211_scan_completed(priv->hw, false);
- else {
+ mutex_lock(&priv->mutex);
+ if (priv->is_internal_short_scan) {
priv->is_internal_short_scan = false;
IWL_DEBUG_SCAN(priv, "internal short scan completed\n");
+ internal = true;
}
+ mutex_unlock(&priv->mutex);
+
+ /*
+ * Do not hold mutex here since this will cause mac80211 to call
+ * into driver again into functions that will attempt to take
+ * mutex.
+ */
+ if (!internal)
+ ieee80211_scan_completed(priv->hw, false);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
@@ -984,7 +538,6 @@ EXPORT_SYMBOL(iwl_bg_scan_completed);
void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
{
INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
- INIT_WORK(&priv->request_scan, iwl_bg_request_scan);
INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan);
INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan);
INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 4a6686fa6b3..83a26361a9b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -29,57 +29,12 @@
#include <net/mac80211.h>
#include <linux/etherdevice.h>
+#include <linux/sched.h>
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-sta.h"
-#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
-#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
-
-u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
-{
- int i;
- int start = 0;
- int ret = IWL_INVALID_STATION;
- unsigned long flags;
-
- if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) ||
- (priv->iw_mode == NL80211_IFTYPE_AP))
- start = IWL_STA_ID;
-
- if (is_broadcast_ether_addr(addr))
- return priv->hw_params.bcast_sta_id;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- for (i = start; i < priv->hw_params.max_stations; i++)
- if (priv->stations[i].used &&
- (!compare_ether_addr(priv->stations[i].sta.sta.addr,
- addr))) {
- ret = i;
- goto out;
- }
-
- IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n",
- addr, priv->num_stations);
-
- out:
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(iwl_find_station);
-
-int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
-{
- if (priv->iw_mode == NL80211_IFTYPE_STATION) {
- return IWL_AP_ID;
- } else {
- u8 *da = ieee80211_get_DA(hdr);
- return iwl_find_station(priv, da);
- }
-}
-EXPORT_SYMBOL(iwl_get_ra_sta_id);
-
/* priv->sta_lock must be held */
static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
{
@@ -132,7 +87,7 @@ static void iwl_process_add_sta_resp(struct iwl_priv *priv,
sta_id);
break;
case ADD_STA_MODIFY_NON_EXIST_STA:
- IWL_ERR(priv, "Attempting to modify non-existing station %d \n",
+ IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
sta_id);
break;
default:
@@ -158,13 +113,6 @@ static void iwl_process_add_sta_resp(struct iwl_priv *priv,
priv->stations[sta_id].sta.mode ==
STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
addsta->sta.addr);
-
- /*
- * Determine if we wanted to modify or add a station,
- * if adding a station succeeded we have some more initialization
- * to do when using station notification. TODO
- */
-
spin_unlock_irqrestore(&priv->sta_lock, flags);
}
@@ -190,6 +138,10 @@ int iwl_send_add_sta(struct iwl_priv *priv,
.flags = flags,
.data = data,
};
+ u8 sta_id __maybe_unused = sta->sta.sta_id;
+
+ IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
+ sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
if (flags & CMD_ASYNC)
cmd.callback = iwl_add_sta_callback;
@@ -263,18 +215,19 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
}
/**
- * iwl_add_station - Add station to tables in driver and device
+ * iwl_prep_station - Prepare station information for addition
+ *
+ * should be called with sta_lock held
*/
-u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
- struct ieee80211_sta_ht_cap *ht_info)
+static u8 iwl_prep_station(struct iwl_priv *priv, const u8 *addr,
+ bool is_ap,
+ struct ieee80211_sta_ht_cap *ht_info)
{
struct iwl_station_entry *station;
- unsigned long flags_spin;
int i;
- int sta_id = IWL_INVALID_STATION;
+ u8 sta_id = IWL_INVALID_STATION;
u16 rate;
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
if (is_ap)
sta_id = IWL_AP_ID;
else if (is_broadcast_ether_addr(addr))
@@ -292,20 +245,32 @@ u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
sta_id = i;
}
- /* These two conditions have the same outcome, but keep them separate
- since they have different meanings */
- if (unlikely(sta_id == IWL_INVALID_STATION)) {
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ /*
+ * These two conditions have the same outcome, but keep them
+ * separate
+ */
+ if (unlikely(sta_id == IWL_INVALID_STATION))
+ return sta_id;
+
+ /*
+ * uCode is not able to deal with multiple requests to add a
+ * station. Keep track if one is in progress so that we do not send
+ * another.
+ */
+ if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
+ IWL_DEBUG_INFO(priv, "STA %d already in process of being added.\n",
+ sta_id);
return sta_id;
}
- if (priv->stations[sta_id].used &&
+ if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
+ (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
!compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) {
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not adding again.\n",
+ sta_id, addr);
return sta_id;
}
-
station = &priv->stations[sta_id];
station->used = IWL_STA_DRIVER_ACTIVE;
IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
@@ -319,10 +284,12 @@ u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
station->sta.sta.sta_id = sta_id;
station->sta.station_flags = 0;
- /* BCAST station and IBSS stations do not work in HT mode */
- if (sta_id != priv->hw_params.bcast_sta_id &&
- priv->iw_mode != NL80211_IFTYPE_ADHOC)
- iwl_set_ht_add_station(priv, sta_id, ht_info);
+ /*
+ * OK to call unconditionally, since local stations (IBSS BSSID
+ * STA and broadcast STA) pass in a NULL ht_info, and mac80211
+ * doesn't allow HT IBSS.
+ */
+ iwl_set_ht_add_station(priv, sta_id, ht_info);
/* 3945 only */
rate = (priv->band == IEEE80211_BAND_5GHZ) ?
@@ -330,86 +297,221 @@ u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
/* Turn on both antennas for the station... */
station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
+ return sta_id;
+
+}
+
+#define STA_WAIT_TIMEOUT (HZ/2)
+
+/**
+ * iwl_add_station_common -
+ */
+int iwl_add_station_common(struct iwl_priv *priv, const u8 *addr,
+ bool is_ap,
+ struct ieee80211_sta_ht_cap *ht_info,
+ u8 *sta_id_r)
+{
+ struct iwl_station_entry *station;
+ unsigned long flags_spin;
+ int ret = 0;
+ u8 sta_id;
+
+ *sta_id_r = 0;
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ sta_id = iwl_prep_station(priv, addr, is_ap, ht_info);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
+ addr);
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ return -EINVAL;
+ }
+
+ /*
+ * uCode is not able to deal with multiple requests to add a
+ * station. Keep track if one is in progress so that we do not send
+ * another.
+ */
+ if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
+ IWL_DEBUG_INFO(priv, "STA %d already in process of being added.\n",
+ sta_id);
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ return -EEXIST;
+ }
+
+ if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
+ (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
+ IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not adding again.\n",
+ sta_id, addr);
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ return -EEXIST;
+ }
+
+ priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
+ station = &priv->stations[sta_id];
spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
/* Add station to device's station table */
- iwl_send_add_sta(priv, &station->sta, flags);
- return sta_id;
+ ret = iwl_send_add_sta(priv, &station->sta, CMD_SYNC);
+ if (ret) {
+ IWL_ERR(priv, "Adding station %pM failed.\n", station->sta.sta.addr);
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+ priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ }
+ *sta_id_r = sta_id;
+ return ret;
+}
+EXPORT_SYMBOL(iwl_add_station_common);
+
+static struct iwl_link_quality_cmd *iwl_sta_alloc_lq(struct iwl_priv *priv,
+ u8 sta_id)
+{
+ int i, r;
+ struct iwl_link_quality_cmd *link_cmd;
+ u32 rate_flags;
+
+ link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
+ if (!link_cmd) {
+ IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
+ return NULL;
+ }
+ /* Set up the rate scaling to start at selected rate, fall back
+ * all the way down to 1M in IEEE order, and then spin on 1M */
+ if (priv->band == IEEE80211_BAND_5GHZ)
+ r = IWL_RATE_6M_INDEX;
+ else
+ r = IWL_RATE_1M_INDEX;
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+ rate_flags = 0;
+ if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
+ rate_flags |= RATE_MCS_CCK_MSK;
+
+ rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
+ RATE_MCS_ANT_POS;
+
+ link_cmd->rs_table[i].rate_n_flags =
+ iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
+ r = iwl_get_prev_ieee_rate(r);
+ }
+
+ link_cmd->general_params.single_stream_ant_msk =
+ first_antenna(priv->hw_params.valid_tx_ant);
+
+ link_cmd->general_params.dual_stream_ant_msk =
+ priv->hw_params.valid_tx_ant &
+ ~first_antenna(priv->hw_params.valid_tx_ant);
+ if (!link_cmd->general_params.dual_stream_ant_msk) {
+ link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
+ } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
+ link_cmd->general_params.dual_stream_ant_msk =
+ priv->hw_params.valid_tx_ant;
+ }
+
+ link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+ link_cmd->agg_params.agg_time_limit =
+ cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+
+ link_cmd->sta_id = sta_id;
+
+ return link_cmd;
}
-EXPORT_SYMBOL(iwl_add_station);
-static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const u8 *addr)
+/*
+ * iwl_add_bssid_station - Add the special IBSS BSSID station
+ *
+ * Function sleeps.
+ */
+int iwl_add_bssid_station(struct iwl_priv *priv, const u8 *addr, bool init_rs,
+ u8 *sta_id_r)
{
+ int ret;
+ u8 sta_id;
+ struct iwl_link_quality_cmd *link_cmd;
unsigned long flags;
- u8 sta_id = iwl_find_station(priv, addr);
- BUG_ON(sta_id == IWL_INVALID_STATION);
+ if (sta_id_r)
+ *sta_id_r = IWL_INVALID_STATION;
- IWL_DEBUG_ASSOC(priv, "Removed STA from Ucode: %pM\n", addr);
+ ret = iwl_add_station_common(priv, addr, 0, NULL, &sta_id);
+ if (ret) {
+ IWL_ERR(priv, "Unable to add station %pM\n", addr);
+ return ret;
+ }
+
+ if (sta_id_r)
+ *sta_id_r = sta_id;
spin_lock_irqsave(&priv->sta_lock, flags);
+ priv->stations[sta_id].used |= IWL_STA_LOCAL;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
- /* Ucode must be active and driver must be non active */
- if (priv->stations[sta_id].used != IWL_STA_UCODE_ACTIVE)
- IWL_ERR(priv, "removed non active STA %d\n", sta_id);
+ if (init_rs) {
+ /* Set up default rate scaling table in device's station table */
+ link_cmd = iwl_sta_alloc_lq(priv, sta_id);
+ if (!link_cmd) {
+ IWL_ERR(priv, "Unable to initialize rate scaling for station %pM.\n",
+ addr);
+ return -ENOMEM;
+ }
- priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
+ ret = iwl_send_lq_cmd(priv, link_cmd, CMD_SYNC, true);
+ if (ret)
+ IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
- memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ priv->stations[sta_id].lq = link_cmd;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+ }
+
+ return 0;
}
+EXPORT_SYMBOL(iwl_add_bssid_station);
-static void iwl_remove_sta_callback(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt)
+/**
+ * iwl_sta_ucode_deactivate - deactivate ucode status for a station
+ *
+ * priv->sta_lock must be held
+ */
+static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
{
- struct iwl_rem_sta_cmd *rm_sta =
- (struct iwl_rem_sta_cmd *)cmd->cmd.payload;
- const u8 *addr = rm_sta->addr;
+ /* Ucode must be active and driver must be non active */
+ if ((priv->stations[sta_id].used &
+ (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) != IWL_STA_UCODE_ACTIVE)
+ IWL_ERR(priv, "removed non active STA %u\n", sta_id);
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
- pkt->hdr.flags);
- return;
- }
+ priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
- switch (pkt->u.rem_sta.status) {
- case REM_STA_SUCCESS_MSK:
- iwl_sta_ucode_deactivate(priv, addr);
- break;
- default:
- IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
- break;
- }
+ memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
+ IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
}
-static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
- u8 flags)
+static int iwl_send_remove_station(struct iwl_priv *priv,
+ struct iwl_station_entry *station)
{
struct iwl_rx_packet *pkt;
int ret;
+ unsigned long flags_spin;
struct iwl_rem_sta_cmd rm_sta_cmd;
struct iwl_host_cmd cmd = {
.id = REPLY_REMOVE_STA,
.len = sizeof(struct iwl_rem_sta_cmd),
- .flags = flags,
+ .flags = CMD_SYNC,
.data = &rm_sta_cmd,
};
memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
rm_sta_cmd.num_sta = 1;
- memcpy(&rm_sta_cmd.addr, addr , ETH_ALEN);
+ memcpy(&rm_sta_cmd.addr, &station->sta.sta.addr , ETH_ALEN);
+
+ cmd.flags |= CMD_WANT_SKB;
- if (flags & CMD_ASYNC)
- cmd.callback = iwl_remove_sta_callback;
- else
- cmd.flags |= CMD_WANT_SKB;
ret = iwl_send_cmd(priv, &cmd);
- if (ret || (flags & CMD_ASYNC))
+ if (ret)
return ret;
pkt = (struct iwl_rx_packet *)cmd.reply_page;
@@ -422,7 +524,9 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
if (!ret) {
switch (pkt->u.rem_sta.status) {
case REM_STA_SUCCESS_MSK:
- iwl_sta_ucode_deactivate(priv, addr);
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ iwl_sta_ucode_deactivate(priv, station->sta.sta.sta_id);
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
break;
default:
@@ -439,45 +543,48 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
/**
* iwl_remove_station - Remove driver's knowledge of station.
*/
-int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap)
+int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
+ const u8 *addr)
{
- int sta_id = IWL_INVALID_STATION;
- int i, ret = -EINVAL;
+ struct iwl_station_entry *station;
unsigned long flags;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ if (!iwl_is_ready(priv)) {
+ IWL_DEBUG_INFO(priv,
+ "Unable to remove station %pM, device not ready.\n",
+ addr);
+ /*
+ * It is typical for stations to be removed when we are
+ * going down. Return success since device will be down
+ * soon anyway
+ */
+ return 0;
+ }
- if (is_ap)
- sta_id = IWL_AP_ID;
- else if (is_broadcast_ether_addr(addr))
- sta_id = priv->hw_params.bcast_sta_id;
- else
- for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++)
- if (priv->stations[i].used &&
- !compare_ether_addr(priv->stations[i].sta.sta.addr,
- addr)) {
- sta_id = i;
- break;
- }
+ IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n",
+ sta_id, addr);
- if (unlikely(sta_id == IWL_INVALID_STATION))
- goto out;
+ if (WARN_ON(sta_id == IWL_INVALID_STATION))
+ return -EINVAL;
- IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n",
- sta_id, addr);
+ spin_lock_irqsave(&priv->sta_lock, flags);
if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
- IWL_ERR(priv, "Removing %pM but non DRIVER active\n",
+ IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
addr);
- goto out;
+ goto out_err;
}
if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
- IWL_ERR(priv, "Removing %pM but non UCODE active\n",
+ IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
addr);
- goto out;
+ goto out_err;
}
+ if (priv->stations[sta_id].used & IWL_STA_LOCAL) {
+ kfree(priv->stations[sta_id].lq);
+ priv->stations[sta_id].lq = NULL;
+ }
priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
@@ -485,47 +592,112 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap)
BUG_ON(priv->num_stations < 0);
+ station = &priv->stations[sta_id];
spin_unlock_irqrestore(&priv->sta_lock, flags);
- ret = iwl_send_remove_station(priv, addr, CMD_ASYNC);
- return ret;
-out:
+ return iwl_send_remove_station(priv, station);
+out_err:
spin_unlock_irqrestore(&priv->sta_lock, flags);
- return ret;
+ return -EINVAL;
}
+EXPORT_SYMBOL_GPL(iwl_remove_station);
/**
- * iwl_clear_stations_table - Clear the driver's station table
+ * iwl_clear_ucode_stations - clear ucode station table bits
*
- * NOTE: This does not clear or otherwise alter the device's station table.
+ * This function clears all the bits in the driver indicating
+ * which stations are active in the ucode. Call when something
+ * other than explicit station management would cause this in
+ * the ucode, e.g. unassociated RXON.
*/
-void iwl_clear_stations_table(struct iwl_priv *priv)
+void iwl_clear_ucode_stations(struct iwl_priv *priv)
{
- unsigned long flags;
int i;
+ unsigned long flags_spin;
+ bool cleared = false;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
- if (iwl_is_alive(priv) &&
- !test_bit(STATUS_EXIT_PENDING, &priv->status) &&
- iwl_send_cmd_pdu_async(priv, REPLY_REMOVE_ALL_STA, 0, NULL, NULL))
- IWL_ERR(priv, "Couldn't clear the station table\n");
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ for (i = 0; i < priv->hw_params.max_stations; i++) {
+ if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
+ IWL_DEBUG_INFO(priv, "Clearing ucode active for station %d\n", i);
+ priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
+ cleared = true;
+ }
+ }
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+
+ if (!cleared)
+ IWL_DEBUG_INFO(priv, "No active stations found to be cleared\n");
+}
+EXPORT_SYMBOL(iwl_clear_ucode_stations);
+
+/**
+ * iwl_restore_stations() - Restore driver known stations to device
+ *
+ * All stations considered active by driver, but not present in ucode, is
+ * restored.
+ *
+ * Function sleeps.
+ */
+void iwl_restore_stations(struct iwl_priv *priv)
+{
+ struct iwl_station_entry *station;
+ unsigned long flags_spin;
+ int i;
+ bool found = false;
+ int ret;
- priv->num_stations = 0;
- memset(priv->stations, 0, sizeof(priv->stations));
+ if (!iwl_is_ready(priv)) {
+ IWL_DEBUG_INFO(priv, "Not ready yet, not restoring any stations.\n");
+ return;
+ }
- /* clean ucode key table bit map */
- priv->ucode_key_table = 0;
+ IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ for (i = 0; i < priv->hw_params.max_stations; i++) {
+ if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
+ !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
+ IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
+ priv->stations[i].sta.sta.addr);
+ priv->stations[i].sta.mode = 0;
+ priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
+ found = true;
+ }
+ }
- /* keep track of static keys */
- for (i = 0; i < WEP_KEYS_MAX ; i++) {
- if (priv->wep_keys[i].key_size)
- set_bit(i, &priv->ucode_key_table);
+ for (i = 0; i < priv->hw_params.max_stations; i++) {
+ if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ station = &priv->stations[i];
+ ret = iwl_send_add_sta(priv, &priv->stations[i].sta, CMD_SYNC);
+ if (ret) {
+ IWL_ERR(priv, "Adding station %pM failed.\n",
+ station->sta.sta.addr);
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ priv->stations[i].used &= ~IWL_STA_DRIVER_ACTIVE;
+ priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ }
+ /*
+ * Rate scaling has already been initialized, send
+ * current LQ command
+ */
+ if (station->lq)
+ iwl_send_lq_cmd(priv, station->lq, CMD_SYNC, true);
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
+ }
}
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ if (!found)
+ IWL_DEBUG_INFO(priv, "Restoring all known stations .... no stations to be restored.\n");
+ else
+ IWL_DEBUG_INFO(priv, "Restoring all known stations .... complete.\n");
}
-EXPORT_SYMBOL(iwl_clear_stations_table);
+EXPORT_SYMBOL(iwl_restore_stations);
int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
{
@@ -539,7 +711,7 @@ int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
}
EXPORT_SYMBOL(iwl_get_free_ucode_key_index);
-int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
+static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
{
int i, not_empty = 0;
u8 buff[sizeof(struct iwl_wep_cmd) +
@@ -549,9 +721,11 @@ int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
struct iwl_host_cmd cmd = {
.id = REPLY_WEPKEY,
.data = wep_cmd,
- .flags = CMD_ASYNC,
+ .flags = CMD_SYNC,
};
+ might_sleep();
+
memset(wep_cmd, 0, cmd_size +
(sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
@@ -581,33 +755,34 @@ int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
else
return 0;
}
-EXPORT_SYMBOL(iwl_send_static_wepkey_cmd);
+
+int iwl_restore_default_wep_keys(struct iwl_priv *priv)
+{
+ WARN_ON(!mutex_is_locked(&priv->mutex));
+
+ return iwl_send_static_wepkey_cmd(priv, 0);
+}
+EXPORT_SYMBOL(iwl_restore_default_wep_keys);
int iwl_remove_default_wep_key(struct iwl_priv *priv,
struct ieee80211_key_conf *keyconf)
{
int ret;
- unsigned long flags;
- spin_lock_irqsave(&priv->sta_lock, flags);
+ WARN_ON(!mutex_is_locked(&priv->mutex));
+
IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
keyconf->keyidx);
- if (!test_and_clear_bit(keyconf->keyidx, &priv->ucode_key_table))
- IWL_ERR(priv, "index %d not used in uCode key table.\n",
- keyconf->keyidx);
-
- priv->default_wep_key--;
memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0]));
if (iwl_is_rfkill(priv)) {
IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n");
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ /* but keys in device are clear anyway so return success */
return 0;
}
ret = iwl_send_static_wepkey_cmd(priv, 1);
IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
keyconf->keyidx, ret);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
return ret;
}
@@ -617,7 +792,8 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
struct ieee80211_key_conf *keyconf)
{
int ret;
- unsigned long flags;
+
+ WARN_ON(!mutex_is_locked(&priv->mutex));
if (keyconf->keylen != WEP_KEY_LEN_128 &&
keyconf->keylen != WEP_KEY_LEN_64) {
@@ -629,13 +805,6 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
keyconf->hw_key_idx = HW_KEY_DEFAULT;
priv->stations[IWL_AP_ID].keyinfo.alg = ALG_WEP;
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->default_wep_key++;
-
- if (test_and_set_bit(keyconf->keyidx, &priv->ucode_key_table))
- IWL_ERR(priv, "index %d already used in uCode key table.\n",
- keyconf->keyidx);
-
priv->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
memcpy(&priv->wep_keys[keyconf->keyidx].key, &keyconf->key,
keyconf->keylen);
@@ -643,7 +812,6 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
ret = iwl_send_static_wepkey_cmd(priv, 0);
IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
keyconf->keylen, keyconf->keyidx, ret);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
return ret;
}
@@ -798,18 +966,23 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
void iwl_update_tkip_key(struct iwl_priv *priv,
struct ieee80211_key_conf *keyconf,
- const u8 *addr, u32 iv32, u16 *phase1key)
+ struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
{
- u8 sta_id = IWL_INVALID_STATION;
+ u8 sta_id;
unsigned long flags;
int i;
- sta_id = iwl_find_station(priv, addr);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n",
- addr);
- return;
- }
+ if (sta) {
+ sta_id = iwl_sta_id(sta);
+
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_DEBUG_MAC80211(priv, "leave - %pM not initialised.\n",
+ sta->addr);
+ return;
+ }
+ } else
+ sta_id = priv->hw_params.bcast_sta_id;
+
if (iwl_scan_cancel(priv)) {
/* cancel scan failed, just live w/ bad key and rely
@@ -885,7 +1058,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
if (iwl_is_rfkill(priv)) {
- IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled. \n");
+ IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
spin_unlock_irqrestore(&priv->sta_lock, flags);
return 0;
}
@@ -948,253 +1121,149 @@ static inline void iwl_dump_lq_cmd(struct iwl_priv *priv,
}
#endif
-int iwl_send_lq_cmd(struct iwl_priv *priv,
- struct iwl_link_quality_cmd *lq, u8 flags)
-{
- struct iwl_host_cmd cmd = {
- .id = REPLY_TX_LINK_QUALITY_CMD,
- .len = sizeof(struct iwl_link_quality_cmd),
- .flags = flags,
- .data = lq,
- };
-
- if ((lq->sta_id == 0xFF) &&
- (priv->iw_mode == NL80211_IFTYPE_ADHOC))
- return -EINVAL;
-
- if (lq->sta_id == 0xFF)
- lq->sta_id = IWL_AP_ID;
-
- iwl_dump_lq_cmd(priv, lq);
-
- if (iwl_is_associated(priv) && priv->assoc_station_added)
- return iwl_send_cmd(priv, &cmd);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_send_lq_cmd);
-
/**
- * iwl_sta_init_lq - Initialize a station's hardware rate table
- *
- * The uCode's station table contains a table of fallback rates
- * for automatic fallback during transmission.
- *
- * NOTE: This sets up a default set of values. These will be replaced later
- * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of
- * rc80211_simple.
+ * is_lq_table_valid() - Test one aspect of LQ cmd for validity
*
- * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
- * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
- * which requires station table entry to exist).
+ * It sometimes happens when a HT rate has been in use and we
+ * loose connectivity with AP then mac80211 will first tell us that the
+ * current channel is not HT anymore before removing the station. In such a
+ * scenario the RXON flags will be updated to indicate we are not
+ * communicating HT anymore, but the LQ command may still contain HT rates.
+ * Test for this to prevent driver from sending LQ command between the time
+ * RXON flags are updated and when LQ command is updated.
*/
-static void iwl_sta_init_lq(struct iwl_priv *priv, const u8 *addr, bool is_ap)
+static bool is_lq_table_valid(struct iwl_priv *priv,
+ struct iwl_link_quality_cmd *lq)
{
- int i, r;
- struct iwl_link_quality_cmd link_cmd = {
- .reserved1 = 0,
- };
- u32 rate_flags;
+ int i;
+ struct iwl_ht_config *ht_conf = &priv->current_ht_config;
- /* Set up the rate scaling to start at selected rate, fall back
- * all the way down to 1M in IEEE order, and then spin on 1M */
- if (is_ap)
- r = IWL_RATE_54M_INDEX;
- else if (priv->band == IEEE80211_BAND_5GHZ)
- r = IWL_RATE_6M_INDEX;
- else
- r = IWL_RATE_1M_INDEX;
+ if (ht_conf->is_ht)
+ return true;
+ IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
+ priv->active_rxon.channel);
for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
- rate_flags = 0;
- if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
- rate_flags |= RATE_MCS_CCK_MSK;
-
- rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
- RATE_MCS_ANT_POS;
-
- link_cmd.rs_table[i].rate_n_flags =
- iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
- r = iwl_get_prev_ieee_rate(r);
+ if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
+ IWL_DEBUG_INFO(priv,
+ "index %d of LQ expects HT channel\n",
+ i);
+ return false;
+ }
}
-
- link_cmd.general_params.single_stream_ant_msk =
- first_antenna(priv->hw_params.valid_tx_ant);
- link_cmd.general_params.dual_stream_ant_msk = 3;
- link_cmd.agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
- link_cmd.agg_params.agg_time_limit =
- cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
-
- /* Update the rate scaling for control frame Tx to AP */
- link_cmd.sta_id = is_ap ? IWL_AP_ID : priv->hw_params.bcast_sta_id;
-
- iwl_send_cmd_pdu_async(priv, REPLY_TX_LINK_QUALITY_CMD,
- sizeof(link_cmd), &link_cmd, NULL);
+ return true;
}
/**
- * iwl_rxon_add_station - add station into station table.
+ * iwl_send_lq_cmd() - Send link quality command
+ * @init: This command is sent as part of station initialization right
+ * after station has been added.
*
- * there is only one AP station with id= IWL_AP_ID
- * NOTE: mutex must be held before calling this function
+ * The link quality command is sent as the last step of station creation.
+ * This is the special case in which init is set and we call a callback in
+ * this case to clear the state indicating that station creation is in
+ * progress.
*/
-int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap)
+int iwl_send_lq_cmd(struct iwl_priv *priv,
+ struct iwl_link_quality_cmd *lq, u8 flags, bool init)
{
- struct ieee80211_sta *sta;
- struct ieee80211_sta_ht_cap ht_config;
- struct ieee80211_sta_ht_cap *cur_ht_config = NULL;
- u8 sta_id;
+ int ret = 0;
+ unsigned long flags_spin;
- /*
- * Set HT capabilities. It is ok to set this struct even if not using
- * HT config: the priv->current_ht_config.is_ht flag will just be false
- */
- rcu_read_lock();
- sta = ieee80211_find_sta(priv->vif, addr);
- if (sta) {
- memcpy(&ht_config, &sta->ht_cap, sizeof(ht_config));
- cur_ht_config = &ht_config;
- }
- rcu_read_unlock();
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_TX_LINK_QUALITY_CMD,
+ .len = sizeof(struct iwl_link_quality_cmd),
+ .flags = flags,
+ .data = lq,
+ };
- /* Add station to device's station table */
- sta_id = iwl_add_station(priv, addr, is_ap, CMD_SYNC, cur_ht_config);
+ if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
+ return -EINVAL;
- /* Set up default rate scaling table in device's station table */
- iwl_sta_init_lq(priv, addr, is_ap);
+ iwl_dump_lq_cmd(priv, lq);
+ BUG_ON(init && (cmd.flags & CMD_ASYNC));
- return sta_id;
+ if (is_lq_table_valid(priv, lq))
+ ret = iwl_send_cmd(priv, &cmd);
+ else
+ ret = -EINVAL;
+
+ if (cmd.flags & CMD_ASYNC)
+ return ret;
+
+ if (init) {
+ IWL_DEBUG_INFO(priv, "init LQ command complete, clearing sta addition status for sta %d\n",
+ lq->sta_id);
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ }
+ return ret;
}
-EXPORT_SYMBOL(iwl_rxon_add_station);
+EXPORT_SYMBOL(iwl_send_lq_cmd);
/**
- * iwl_sta_init_bcast_lq - Initialize a bcast station's hardware rate table
+ * iwl_alloc_bcast_station - add broadcast station into driver's station table.
*
- * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
- * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
- * which requires station table entry to exist).
+ * This adds the broadcast station into the driver's station table
+ * and marks it driver active, so that it will be restored to the
+ * device at the next best time.
*/
-static void iwl_sta_init_bcast_lq(struct iwl_priv *priv)
+int iwl_alloc_bcast_station(struct iwl_priv *priv, bool init_lq)
{
- int i, r;
- struct iwl_link_quality_cmd link_cmd = {
- .reserved1 = 0,
- };
- u32 rate_flags;
-
- /* Set up the rate scaling to start at selected rate, fall back
- * all the way down to 1M in IEEE order, and then spin on 1M */
- if (priv->band == IEEE80211_BAND_5GHZ)
- r = IWL_RATE_6M_INDEX;
- else
- r = IWL_RATE_1M_INDEX;
-
- for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
- rate_flags = 0;
- if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
- rate_flags |= RATE_MCS_CCK_MSK;
+ struct iwl_link_quality_cmd *link_cmd;
+ unsigned long flags;
+ u8 sta_id;
- rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
- RATE_MCS_ANT_POS;
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ sta_id = iwl_prep_station(priv, iwl_bcast_addr, false, NULL);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Unable to prepare broadcast station\n");
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
- link_cmd.rs_table[i].rate_n_flags =
- iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
- r = iwl_get_prev_ieee_rate(r);
+ return -EINVAL;
}
- link_cmd.general_params.single_stream_ant_msk =
- first_antenna(priv->hw_params.valid_tx_ant);
- link_cmd.general_params.dual_stream_ant_msk = 3;
- link_cmd.agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
- link_cmd.agg_params.agg_time_limit =
- cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
-
- /* Update the rate scaling for control frame Tx to AP */
- link_cmd.sta_id = priv->hw_params.bcast_sta_id;
-
- iwl_send_cmd_pdu_async(priv, REPLY_TX_LINK_QUALITY_CMD,
- sizeof(link_cmd), &link_cmd, NULL);
-}
-
+ priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
+ priv->stations[sta_id].used |= IWL_STA_BCAST;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
-/**
- * iwl_add_bcast_station - add broadcast station into station table.
- */
-void iwl_add_bcast_station(struct iwl_priv *priv)
-{
- IWL_DEBUG_INFO(priv, "Adding broadcast station to station table\n");
- iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL);
+ if (init_lq) {
+ link_cmd = iwl_sta_alloc_lq(priv, sta_id);
+ if (!link_cmd) {
+ IWL_ERR(priv,
+ "Unable to initialize rate scaling for bcast station.\n");
+ return -ENOMEM;
+ }
- /* Set up default rate scaling table in device's station table */
- iwl_sta_init_bcast_lq(priv);
-}
-EXPORT_SYMBOL(iwl_add_bcast_station);
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ priv->stations[sta_id].lq = link_cmd;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+ }
-/**
- * iwl3945_add_bcast_station - add broadcast station into station table.
- */
-void iwl3945_add_bcast_station(struct iwl_priv *priv)
-{
- IWL_DEBUG_INFO(priv, "Adding broadcast station to station table\n");
- iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL);
+ return 0;
}
-EXPORT_SYMBOL(iwl3945_add_bcast_station);
+EXPORT_SYMBOL_GPL(iwl_alloc_bcast_station);
-/**
- * iwl_get_sta_id - Find station's index within station table
- *
- * If new IBSS station, create new entry in station table
- */
-int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
+void iwl_dealloc_bcast_station(struct iwl_priv *priv)
{
- int sta_id;
- __le16 fc = hdr->frame_control;
-
- /* If this frame is broadcast or management, use broadcast station id */
- if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1))
- return priv->hw_params.bcast_sta_id;
-
- switch (priv->iw_mode) {
-
- /* If we are a client station in a BSS network, use the special
- * AP station entry (that's the only station we communicate with) */
- case NL80211_IFTYPE_STATION:
- return IWL_AP_ID;
-
- /* If we are an AP, then find the station, or use BCAST */
- case NL80211_IFTYPE_AP:
- sta_id = iwl_find_station(priv, hdr->addr1);
- if (sta_id != IWL_INVALID_STATION)
- return sta_id;
- return priv->hw_params.bcast_sta_id;
-
- /* If this frame is going out to an IBSS network, find the station,
- * or create a new station table entry */
- case NL80211_IFTYPE_ADHOC:
- sta_id = iwl_find_station(priv, hdr->addr1);
- if (sta_id != IWL_INVALID_STATION)
- return sta_id;
-
- /* Create new station table entry */
- sta_id = iwl_add_station(priv, hdr->addr1, false,
- CMD_ASYNC, NULL);
-
- if (sta_id != IWL_INVALID_STATION)
- return sta_id;
-
- IWL_DEBUG_DROP(priv, "Station %pM not in station map. "
- "Defaulting to broadcast...\n",
- hdr->addr1);
- iwl_print_hex_dump(priv, IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
- return priv->hw_params.bcast_sta_id;
+ unsigned long flags;
+ int i;
- default:
- IWL_WARN(priv, "Unknown mode of operation: %d\n",
- priv->iw_mode);
- return priv->hw_params.bcast_sta_id;
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ for (i = 0; i < priv->hw_params.max_stations; i++) {
+ if (!(priv->stations[i].used & IWL_STA_BCAST))
+ continue;
+
+ priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
+ priv->num_stations--;
+ BUG_ON(priv->num_stations < 0);
+ kfree(priv->stations[i].lq);
+ priv->stations[i].lq = NULL;
}
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
}
-EXPORT_SYMBOL(iwl_get_sta_id);
+EXPORT_SYMBOL_GPL(iwl_dealloc_bcast_station);
/**
* iwl_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
@@ -1214,13 +1283,13 @@ void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
}
EXPORT_SYMBOL(iwl_sta_tx_modify_enable_tid);
-int iwl_sta_rx_agg_start(struct iwl_priv *priv,
- const u8 *addr, int tid, u16 ssn)
+int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
+ int tid, u16 ssn)
{
unsigned long flags;
int sta_id;
- sta_id = iwl_find_station(priv, addr);
+ sta_id = iwl_sta_id(sta);
if (sta_id == IWL_INVALID_STATION)
return -ENXIO;
@@ -1233,16 +1302,17 @@ int iwl_sta_rx_agg_start(struct iwl_priv *priv,
spin_unlock_irqrestore(&priv->sta_lock, flags);
return iwl_send_add_sta(priv, &priv->stations[sta_id].sta,
- CMD_ASYNC);
+ CMD_ASYNC);
}
EXPORT_SYMBOL(iwl_sta_rx_agg_start);
-int iwl_sta_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid)
+int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
+ int tid)
{
unsigned long flags;
int sta_id;
- sta_id = iwl_find_station(priv, addr);
+ sta_id = iwl_sta_id(sta);
if (sta_id == IWL_INVALID_STATION) {
IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
return -ENXIO;
@@ -1291,3 +1361,22 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
}
+EXPORT_SYMBOL(iwl_sta_modify_sleep_tx_count);
+
+int iwl_mac_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv;
+ int ret;
+
+ IWL_DEBUG_INFO(priv, "received request to remove station %pM\n",
+ sta->addr);
+ ret = iwl_remove_station(priv, sta_common->sta_id, sta->addr);
+ if (ret)
+ IWL_ERR(priv, "Error removing station %pM\n",
+ sta->addr);
+ return ret;
+}
+EXPORT_SYMBOL(iwl_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index 2dc35fe28f5..c2a453a1a99 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -29,44 +29,82 @@
#ifndef __iwl_sta_h__
#define __iwl_sta_h__
+#include "iwl-dev.h"
+
#define HW_KEY_DYNAMIC 0
#define HW_KEY_DEFAULT 1
-/**
- * iwl_find_station - Find station id for a given BSSID
- * @bssid: MAC address of station ID to find
- */
-u8 iwl_find_station(struct iwl_priv *priv, const u8 *bssid);
+#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
+#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
+#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
+ being activated */
+#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
+ (this is for the IBSS BSSID stations) */
+#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
+
-int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty);
int iwl_remove_default_wep_key(struct iwl_priv *priv,
struct ieee80211_key_conf *key);
int iwl_set_default_wep_key(struct iwl_priv *priv,
struct ieee80211_key_conf *key);
+int iwl_restore_default_wep_keys(struct iwl_priv *priv);
int iwl_set_dynamic_key(struct iwl_priv *priv,
struct ieee80211_key_conf *key, u8 sta_id);
int iwl_remove_dynamic_key(struct iwl_priv *priv,
struct ieee80211_key_conf *key, u8 sta_id);
void iwl_update_tkip_key(struct iwl_priv *priv,
struct ieee80211_key_conf *keyconf,
- const u8 *addr, u32 iv32, u16 *phase1key);
+ struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
-int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap);
-void iwl_add_bcast_station(struct iwl_priv *priv);
-void iwl3945_add_bcast_station(struct iwl_priv *priv);
-int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap);
-void iwl_clear_stations_table(struct iwl_priv *priv);
+void iwl_restore_stations(struct iwl_priv *priv);
+void iwl_clear_ucode_stations(struct iwl_priv *priv);
+int iwl_alloc_bcast_station(struct iwl_priv *priv, bool init_lq);
+void iwl_dealloc_bcast_station(struct iwl_priv *priv);
int iwl_get_free_ucode_key_index(struct iwl_priv *priv);
-int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr);
-int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr);
int iwl_send_add_sta(struct iwl_priv *priv,
struct iwl_addsta_cmd *sta, u8 flags);
-u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
- struct ieee80211_sta_ht_cap *ht_info);
+int iwl_add_bssid_station(struct iwl_priv *priv, const u8 *addr, bool init_rs,
+ u8 *sta_id_r);
+int iwl_add_station_common(struct iwl_priv *priv, const u8 *addr,
+ bool is_ap,
+ struct ieee80211_sta_ht_cap *ht_info,
+ u8 *sta_id_r);
+int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
+ const u8 *addr);
+int iwl_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid);
-int iwl_sta_rx_agg_start(struct iwl_priv *priv,
- const u8 *addr, int tid, u16 ssn);
-int iwl_sta_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid);
+int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
+ int tid, u16 ssn);
+int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
+ int tid);
void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id);
void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt);
+
+/**
+ * iwl_clear_driver_stations - clear knowledge of all stations from driver
+ * @priv: iwl priv struct
+ *
+ * This is called during iwl_down() to make sure that in the case
+ * we're coming there from a hardware restart mac80211 will be
+ * able to reconfigure stations -- if we're getting there in the
+ * normal down flow then the stations will already be cleared.
+ */
+static inline void iwl_clear_driver_stations(struct iwl_priv *priv)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ memset(priv->stations, 0, sizeof(priv->stations));
+ priv->num_stations = 0;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+}
+
+static inline int iwl_sta_id(struct ieee80211_sta *sta)
+{
+ if (WARN_ON(!sta))
+ return IWL_INVALID_STATION;
+
+ return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id;
+}
#endif /* __iwl_sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 8dd0c036d54..1ece2ea0977 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -38,47 +38,6 @@
#include "iwl-io.h"
#include "iwl-helpers.h"
-static const u16 default_tid_to_tx_fifo[] = {
- IWL_TX_FIFO_AC1,
- IWL_TX_FIFO_AC0,
- IWL_TX_FIFO_AC0,
- IWL_TX_FIFO_AC1,
- IWL_TX_FIFO_AC2,
- IWL_TX_FIFO_AC2,
- IWL_TX_FIFO_AC3,
- IWL_TX_FIFO_AC3,
- IWL_TX_FIFO_NONE,
- IWL_TX_FIFO_NONE,
- IWL_TX_FIFO_NONE,
- IWL_TX_FIFO_NONE,
- IWL_TX_FIFO_NONE,
- IWL_TX_FIFO_NONE,
- IWL_TX_FIFO_NONE,
- IWL_TX_FIFO_NONE,
- IWL_TX_FIFO_AC3
-};
-
-static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
- struct iwl_dma_ptr *ptr, size_t size)
-{
- ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
- GFP_KERNEL);
- if (!ptr->addr)
- return -ENOMEM;
- ptr->size = size;
- return 0;
-}
-
-static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
- struct iwl_dma_ptr *ptr)
-{
- if (unlikely(!ptr->addr))
- return;
-
- dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
- memset(ptr, 0, sizeof(*ptr));
-}
-
/**
* iwl_txq_update_write_ptr - Send new write index to hardware
*/
@@ -310,6 +269,8 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
q->high_mark = 2;
q->write_ptr = q->read_ptr = 0;
+ q->last_read_ptr = 0;
+ q->repeat_same_read_ptr = 0;
return 0;
}
@@ -454,611 +415,6 @@ void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
}
EXPORT_SYMBOL(iwl_tx_queue_reset);
-/**
- * iwl_hw_txq_ctx_free - Free TXQ Context
- *
- * Destroy all TX DMA queues and structures
- */
-void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
-{
- int txq_id;
-
- /* Tx queues */
- if (priv->txq) {
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- if (txq_id == IWL_CMD_QUEUE_NUM)
- iwl_cmd_queue_free(priv);
- else
- iwl_tx_queue_free(priv, txq_id);
- }
- iwl_free_dma_ptr(priv, &priv->kw);
-
- iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
-
- /* free tx queue structure */
- iwl_free_txq_mem(priv);
-}
-EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
-
-/**
- * iwl_txq_ctx_alloc - allocate TX queue context
- * Allocate all Tx DMA structures and initialize them
- *
- * @param priv
- * @return error code
- */
-int iwl_txq_ctx_alloc(struct iwl_priv *priv)
-{
- int ret;
- int txq_id, slots_num;
- unsigned long flags;
-
- /* Free all tx/cmd queues and keep-warm buffer */
- iwl_hw_txq_ctx_free(priv);
-
- ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
- priv->hw_params.scd_bc_tbls_size);
- if (ret) {
- IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
- goto error_bc_tbls;
- }
- /* Alloc keep-warm buffer */
- ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
- if (ret) {
- IWL_ERR(priv, "Keep Warm allocation failed\n");
- goto error_kw;
- }
-
- /* allocate tx queue structure */
- ret = iwl_alloc_txq_mem(priv);
- if (ret)
- goto error;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Turn off all Tx DMA fifos */
- priv->cfg->ops->lib->txq_set_sched(priv, 0);
-
- /* Tell NIC where to find the "keep warm" buffer */
- iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Alloc and init all Tx queues, including the command queue (#4) */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
- txq_id);
- if (ret) {
- IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
- goto error;
- }
- }
-
- return ret;
-
- error:
- iwl_hw_txq_ctx_free(priv);
- iwl_free_dma_ptr(priv, &priv->kw);
- error_kw:
- iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
- error_bc_tbls:
- return ret;
-}
-
-void iwl_txq_ctx_reset(struct iwl_priv *priv)
-{
- int txq_id, slots_num;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Turn off all Tx DMA fifos */
- priv->cfg->ops->lib->txq_set_sched(priv, 0);
-
- /* Tell NIC where to find the "keep warm" buffer */
- iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Alloc and init all Tx queues, including the command queue (#4) */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- slots_num = txq_id == IWL_CMD_QUEUE_NUM ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id);
- }
-}
-
-/**
- * iwl_txq_ctx_stop - Stop all Tx DMA channels
- */
-void iwl_txq_ctx_stop(struct iwl_priv *priv)
-{
- int ch;
- unsigned long flags;
-
- /* Turn off all Tx DMA fifos */
- spin_lock_irqsave(&priv->lock, flags);
-
- priv->cfg->ops->lib->txq_set_sched(priv, 0);
-
- /* Stop each Tx DMA channel, and wait for it to be idle */
- for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
- iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
- iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
- FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
- 1000);
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-EXPORT_SYMBOL(iwl_txq_ctx_stop);
-
-/*
- * handle build REPLY_TX command notification.
- */
-static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
- struct iwl_tx_cmd *tx_cmd,
- struct ieee80211_tx_info *info,
- struct ieee80211_hdr *hdr,
- u8 std_id)
-{
- __le16 fc = hdr->frame_control;
- __le32 tx_flags = tx_cmd->tx_flags;
-
- tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
- tx_flags |= TX_CMD_FLG_ACK_MSK;
- if (ieee80211_is_mgmt(fc))
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- if (ieee80211_is_probe_resp(fc) &&
- !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
- tx_flags |= TX_CMD_FLG_TSF_MSK;
- } else {
- tx_flags &= (~TX_CMD_FLG_ACK_MSK);
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- }
-
- if (ieee80211_is_back_req(fc))
- tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
-
-
- tx_cmd->sta_id = std_id;
- if (ieee80211_has_morefrags(fc))
- tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
-
- if (ieee80211_is_data_qos(fc)) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- tx_cmd->tid_tspec = qc[0] & 0xf;
- tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
- } else {
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- }
-
- priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
-
- if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
- tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
-
- tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
- if (ieee80211_is_mgmt(fc)) {
- if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
- tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
- else
- tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
- } else {
- tx_cmd->timeout.pm_frame_timeout = 0;
- }
-
- tx_cmd->driver_txop = 0;
- tx_cmd->tx_flags = tx_flags;
- tx_cmd->next_frame_len = 0;
-}
-
-#define RTS_HCCA_RETRY_LIMIT 3
-#define RTS_DFAULT_RETRY_LIMIT 60
-
-static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
- struct iwl_tx_cmd *tx_cmd,
- struct ieee80211_tx_info *info,
- __le16 fc, int is_hcca)
-{
- u32 rate_flags;
- int rate_idx;
- u8 rts_retry_limit;
- u8 data_retry_limit;
- u8 rate_plcp;
-
- /* Set retry limit on DATA packets and Probe Responses*/
- if (ieee80211_is_probe_resp(fc))
- data_retry_limit = 3;
- else
- data_retry_limit = IWL_DEFAULT_TX_RETRY;
- tx_cmd->data_retry_limit = data_retry_limit;
-
- /* Set retry limit on RTS packets */
- rts_retry_limit = (is_hcca) ? RTS_HCCA_RETRY_LIMIT :
- RTS_DFAULT_RETRY_LIMIT;
- if (data_retry_limit < rts_retry_limit)
- rts_retry_limit = data_retry_limit;
- tx_cmd->rts_retry_limit = rts_retry_limit;
-
- /* DATA packets will use the uCode station table for rate/antenna
- * selection */
- if (ieee80211_is_data(fc)) {
- tx_cmd->initial_rate_index = 0;
- tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
- return;
- }
-
- /**
- * If the current TX rate stored in mac80211 has the MCS bit set, it's
- * not really a TX rate. Thus, we use the lowest supported rate for
- * this band. Also use the lowest supported rate if the stored rate
- * index is invalid.
- */
- rate_idx = info->control.rates[0].idx;
- if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
- (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
- rate_idx = rate_lowest_index(&priv->bands[info->band],
- info->control.sta);
- /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
- if (info->band == IEEE80211_BAND_5GHZ)
- rate_idx += IWL_FIRST_OFDM_RATE;
- /* Get PLCP rate for tx_cmd->rate_n_flags */
- rate_plcp = iwl_rates[rate_idx].plcp;
- /* Zero out flags for this packet */
- rate_flags = 0;
-
- /* Set CCK flag as needed */
- if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
- rate_flags |= RATE_MCS_CCK_MSK;
-
- /* Set up RTS and CTS flags for certain packets */
- switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
- case cpu_to_le16(IEEE80211_STYPE_AUTH):
- case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
- case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
- case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
- if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
- tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
- tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
- }
- break;
- default:
- break;
- }
-
- /* Set up antennas */
- priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
- rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
-
- /* Set the rate in the TX cmd */
- tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
-}
-
-static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- struct iwl_tx_cmd *tx_cmd,
- struct sk_buff *skb_frag,
- int sta_id)
-{
- struct ieee80211_key_conf *keyconf = info->control.hw_key;
-
- switch (keyconf->alg) {
- case ALG_CCMP:
- tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
- memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
- IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
- break;
-
- case ALG_TKIP:
- tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
- ieee80211_get_tkip_key(keyconf, skb_frag,
- IEEE80211_TKIP_P2_KEY, tx_cmd->key);
- IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
- break;
-
- case ALG_WEP:
- tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
- (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
-
- if (keyconf->keylen == WEP_KEY_LEN_128)
- tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
-
- memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
-
- IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
- "with key %d\n", keyconf->keyidx);
- break;
-
- default:
- IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg);
- break;
- }
-}
-
-/*
- * start REPLY_TX command process
- */
-int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_sta *sta = info->control.sta;
- struct iwl_station_priv *sta_priv = NULL;
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- struct iwl_device_cmd *out_cmd;
- struct iwl_cmd_meta *out_meta;
- struct iwl_tx_cmd *tx_cmd;
- int swq_id, txq_id;
- dma_addr_t phys_addr;
- dma_addr_t txcmd_phys;
- dma_addr_t scratch_phys;
- u16 len, len_org, firstlen, secondlen;
- u16 seq_number = 0;
- __le16 fc;
- u8 hdr_len;
- u8 sta_id;
- u8 wait_write_ptr = 0;
- u8 tid = 0;
- u8 *qc = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- if (iwl_is_rfkill(priv)) {
- IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
- goto drop_unlock;
- }
-
- fc = hdr->frame_control;
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (ieee80211_is_auth(fc))
- IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
- else if (ieee80211_is_assoc_req(fc))
- IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
- else if (ieee80211_is_reassoc_req(fc))
- IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
-#endif
-
- /* drop all non-injected data frame if we are not associated */
- if (ieee80211_is_data(fc) &&
- !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
- (!iwl_is_associated(priv) ||
- ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) ||
- !priv->assoc_station_added)) {
- IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n");
- goto drop_unlock;
- }
-
- hdr_len = ieee80211_hdrlen(fc);
-
- /* Find (or create) index into station table for destination station */
- if (info->flags & IEEE80211_TX_CTL_INJECTED)
- sta_id = priv->hw_params.bcast_sta_id;
- else
- sta_id = iwl_get_sta_id(priv, hdr);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
- hdr->addr1);
- goto drop_unlock;
- }
-
- IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
-
- if (sta)
- sta_priv = (void *)sta->drv_priv;
-
- if (sta_priv && sta_id != priv->hw_params.bcast_sta_id &&
- sta_priv->asleep) {
- WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE));
- /*
- * This sends an asynchronous command to the device,
- * but we can rely on it being processed before the
- * next frame is processed -- and the next frame to
- * this station is the one that will consume this
- * counter.
- * For now set the counter to just 1 since we do not
- * support uAPSD yet.
- */
- iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
- }
-
- txq_id = skb_get_queue_mapping(skb);
- if (ieee80211_is_data_qos(fc)) {
- qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
- if (unlikely(tid >= MAX_TID_COUNT))
- goto drop_unlock;
- seq_number = priv->stations[sta_id].tid[tid].seq_number;
- seq_number &= IEEE80211_SCTL_SEQ;
- hdr->seq_ctrl = hdr->seq_ctrl &
- cpu_to_le16(IEEE80211_SCTL_FRAG);
- hdr->seq_ctrl |= cpu_to_le16(seq_number);
- seq_number += 0x10;
- /* aggregation is on for this <sta,tid> */
- if (info->flags & IEEE80211_TX_CTL_AMPDU &&
- priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
- txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
- }
- }
-
- txq = &priv->txq[txq_id];
- swq_id = txq->swq_id;
- q = &txq->q;
-
- if (unlikely(iwl_queue_space(q) < q->high_mark))
- goto drop_unlock;
-
- if (ieee80211_is_data_qos(fc))
- priv->stations[sta_id].tid[tid].tfds_in_queue++;
-
- /* Set up driver data for this TFD */
- memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
- txq->txb[q->write_ptr].skb[0] = skb;
-
- /* Set up first empty entry in queue's array of Tx/cmd buffers */
- out_cmd = txq->cmd[q->write_ptr];
- out_meta = &txq->meta[q->write_ptr];
- tx_cmd = &out_cmd->cmd.tx;
- memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
- memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
-
- /*
- * Set up the Tx-command (not MAC!) header.
- * Store the chosen Tx queue and TFD index within the sequence field;
- * after Tx, uCode's Tx response will return this value so driver can
- * locate the frame within the tx queue and do post-tx processing.
- */
- out_cmd->hdr.cmd = REPLY_TX;
- out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(q->write_ptr)));
-
- /* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, hdr_len);
-
-
- /* Total # bytes to be transmitted */
- len = (u16)skb->len;
- tx_cmd->len = cpu_to_le16(len);
-
- if (info->control.hw_key)
- iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
-
- /* TODO need this for burst mode later on */
- iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
- iwl_dbg_log_tx_data_frame(priv, len, hdr);
-
- /* set is_hcca to 0; it probably will never be implemented */
- iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, 0);
-
- iwl_update_stats(priv, true, fc, len);
- /*
- * Use the first empty entry in this queue's command buffer array
- * to contain the Tx command and MAC header concatenated together
- * (payload data will be in another buffer).
- * Size of this varies, due to varying MAC header length.
- * If end is not dword aligned, we'll have 2 extra bytes at the end
- * of the MAC header (device reads on dword boundaries).
- * We'll tell device about this padding later.
- */
- len = sizeof(struct iwl_tx_cmd) +
- sizeof(struct iwl_cmd_header) + hdr_len;
-
- len_org = len;
- firstlen = len = (len + 3) & ~3;
-
- if (len_org != len)
- len_org = 1;
- else
- len_org = 0;
-
- /* Tell NIC about any 2-byte padding after MAC header */
- if (len_org)
- tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
-
- /* Physical address of this Tx command's header (not MAC header!),
- * within command buffer array. */
- txcmd_phys = pci_map_single(priv->pci_dev,
- &out_cmd->hdr, len,
- PCI_DMA_BIDIRECTIONAL);
- pci_unmap_addr_set(out_meta, mapping, txcmd_phys);
- pci_unmap_len_set(out_meta, len, len);
- /* Add buffer containing Tx command and MAC(!) header to TFD's
- * first entry */
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- txcmd_phys, len, 1, 0);
-
- if (!ieee80211_has_morefrags(hdr->frame_control)) {
- txq->need_update = 1;
- if (qc)
- priv->stations[sta_id].tid[tid].seq_number = seq_number;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
-
- /* Set up TFD's 2nd entry to point directly to remainder of skb,
- * if any (802.11 null frames have no payload). */
- secondlen = len = skb->len - hdr_len;
- if (len) {
- phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
- len, PCI_DMA_TODEVICE);
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- phys_addr, len,
- 0, 0);
- }
-
- scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
- offsetof(struct iwl_tx_cmd, scratch);
-
- len = sizeof(struct iwl_tx_cmd) +
- sizeof(struct iwl_cmd_header) + hdr_len;
- /* take back ownership of DMA buffer to enable update */
- pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
- len, PCI_DMA_BIDIRECTIONAL);
- tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
- tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
-
- IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
- le16_to_cpu(out_cmd->hdr.sequence));
- IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
- iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
- iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
-
- /* Set up entry for this TFD in Tx byte-count array */
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
- le16_to_cpu(tx_cmd->len));
-
- pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
- len, PCI_DMA_BIDIRECTIONAL);
-
- trace_iwlwifi_dev_tx(priv,
- &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
- sizeof(struct iwl_tfd),
- &out_cmd->hdr, firstlen,
- skb->data + hdr_len, secondlen);
-
- /* Tell device the write index *just past* this latest filled TFD */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_txq_update_write_ptr(priv, txq);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /*
- * At this point the frame is "transmitted" successfully
- * and we will get a TX status notification eventually,
- * regardless of the value of ret. "ret" only indicates
- * whether or not we should update the write pointer.
- */
-
- /* avoid atomic ops if it isn't an associated client */
- if (sta_priv && sta_priv->client)
- atomic_inc(&sta_priv->pending_frames);
-
- if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
- if (wait_write_ptr) {
- spin_lock_irqsave(&priv->lock, flags);
- txq->need_update = 1;
- iwl_txq_update_write_ptr(priv, txq);
- spin_unlock_irqrestore(&priv->lock, flags);
- } else {
- iwl_stop_queue(priv, txq->swq_id);
- }
- }
-
- return 0;
-
-drop_unlock:
- spin_unlock_irqrestore(&priv->lock, flags);
- return -1;
-}
-EXPORT_SYMBOL(iwl_tx_skb);
-
/*************** HOST COMMAND QUEUE FUNCTIONS *****/
/**
@@ -1192,61 +548,6 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
return idx;
}
-static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- struct ieee80211_sta *sta;
- struct iwl_station_priv *sta_priv;
-
- sta = ieee80211_find_sta(priv->vif, hdr->addr1);
- if (sta) {
- sta_priv = (void *)sta->drv_priv;
- /* avoid atomic ops if this isn't a client */
- if (sta_priv->client &&
- atomic_dec_return(&sta_priv->pending_frames) == 0)
- ieee80211_sta_block_awake(priv->hw, sta, false);
- }
-
- ieee80211_tx_status_irqsafe(priv->hw, skb);
-}
-
-int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
- struct iwl_tx_info *tx_info;
- int nfreed = 0;
- struct ieee80211_hdr *hdr;
-
- if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
- IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
- "is out of range [0-%d] %d %d.\n", txq_id,
- index, q->n_bd, q->write_ptr, q->read_ptr);
- return 0;
- }
-
- for (index = iwl_queue_inc_wrap(index, q->n_bd);
- q->read_ptr != index;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
-
- tx_info = &txq->txb[txq->q.read_ptr];
- iwl_tx_status(priv, tx_info->skb[0]);
-
- hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data;
- if (hdr && ieee80211_is_data_qos(hdr->frame_control))
- nfreed++;
- tx_info->skb[0] = NULL;
-
- if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
- priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
-
- priv->cfg->ops->lib->txq_free_tfd(priv, txq);
- }
- return nfreed;
-}
-EXPORT_SYMBOL(iwl_tx_queue_reclaim);
-
-
/**
* iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
*
@@ -1340,7 +641,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
if (!(meta->flags & CMD_ASYNC)) {
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s \n",
+ IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
get_cmd_string(cmd->hdr.cmd));
wake_up_interruptible(&priv->wait_command_queue);
}
@@ -1348,358 +649,37 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
}
EXPORT_SYMBOL(iwl_tx_cmd_complete);
-/*
- * Find first available (lowest unused) Tx Queue, mark it "active".
- * Called only when finding queue for aggregation.
- * Should never return anything < 7, because they should already
- * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
- */
-static int iwl_txq_ctx_activate_free(struct iwl_priv *priv)
-{
- int txq_id;
-
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
- return txq_id;
- return -1;
-}
-
-int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
-{
- int sta_id;
- int tx_fifo;
- int txq_id;
- int ret;
- unsigned long flags;
- struct iwl_tid_data *tid_data;
-
- if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
- tx_fifo = default_tid_to_tx_fifo[tid];
- else
- return -EINVAL;
-
- IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
- __func__, ra, tid);
-
- sta_id = iwl_find_station(priv, ra);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Start AGG on invalid station\n");
- return -ENXIO;
- }
- if (unlikely(tid >= MAX_TID_COUNT))
- return -EINVAL;
-
- if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
- IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
- return -ENXIO;
- }
-
- txq_id = iwl_txq_ctx_activate_free(priv);
- if (txq_id == -1) {
- IWL_ERR(priv, "No free aggregation queue available\n");
- return -ENXIO;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- tid_data = &priv->stations[sta_id].tid[tid];
- *ssn = SEQ_TO_SN(tid_data->seq_number);
- tid_data->agg.txq_id = txq_id;
- priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
- sta_id, tid, *ssn);
- if (ret)
- return ret;
-
- if (tid_data->tfds_in_queue == 0) {
- IWL_DEBUG_HT(priv, "HW queue is empty\n");
- tid_data->agg.state = IWL_AGG_ON;
- ieee80211_start_tx_ba_cb_irqsafe(priv->vif, ra, tid);
- } else {
- IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
- tid_data->tfds_in_queue);
- tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
- }
- return ret;
-}
-EXPORT_SYMBOL(iwl_tx_agg_start);
-
-int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
-{
- int tx_fifo_id, txq_id, sta_id, ssn = -1;
- struct iwl_tid_data *tid_data;
- int write_ptr, read_ptr;
- unsigned long flags;
-
- if (!ra) {
- IWL_ERR(priv, "ra = NULL\n");
- return -EINVAL;
- }
-
- if (unlikely(tid >= MAX_TID_COUNT))
- return -EINVAL;
-
- if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
- tx_fifo_id = default_tid_to_tx_fifo[tid];
- else
- return -EINVAL;
-
- sta_id = iwl_find_station(priv, ra);
-
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
- return -ENXIO;
- }
-
- if (priv->stations[sta_id].tid[tid].agg.state ==
- IWL_EMPTYING_HW_QUEUE_ADDBA) {
- IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
- ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
- priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
- return 0;
- }
-
- if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
- IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
-
- tid_data = &priv->stations[sta_id].tid[tid];
- ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
- txq_id = tid_data->agg.txq_id;
- write_ptr = priv->txq[txq_id].q.write_ptr;
- read_ptr = priv->txq[txq_id].q.read_ptr;
-
- /* The queue is not empty */
- if (write_ptr != read_ptr) {
- IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
- priv->stations[sta_id].tid[tid].agg.state =
- IWL_EMPTYING_HW_QUEUE_DELBA;
- return 0;
- }
-
- IWL_DEBUG_HT(priv, "HW queue is empty\n");
- priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
-
- spin_lock_irqsave(&priv->lock, flags);
- /*
- * the only reason this call can fail is queue number out of range,
- * which can happen if uCode is reloaded and all the station
- * information are lost. if it is outside the range, there is no need
- * to deactivate the uCode queue, just return "success" to allow
- * mac80211 to clean up it own data.
- */
- priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
- tx_fifo_id);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_tx_agg_stop);
-
-int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
-{
- struct iwl_queue *q = &priv->txq[txq_id].q;
- u8 *addr = priv->stations[sta_id].sta.sta.addr;
- struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
-
- switch (priv->stations[sta_id].tid[tid].agg.state) {
- case IWL_EMPTYING_HW_QUEUE_DELBA:
- /* We are reclaiming the last packet of the */
- /* aggregated HW queue */
- if ((txq_id == tid_data->agg.txq_id) &&
- (q->read_ptr == q->write_ptr)) {
- u16 ssn = SEQ_TO_SN(tid_data->seq_number);
- int tx_fifo = default_tid_to_tx_fifo[tid];
- IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
- priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
- ssn, tx_fifo);
- tid_data->agg.state = IWL_AGG_OFF;
- ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid);
- }
- break;
- case IWL_EMPTYING_HW_QUEUE_ADDBA:
- /* We are reclaiming the last packet of the queue */
- if (tid_data->tfds_in_queue == 0) {
- IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
- tid_data->agg.state = IWL_AGG_ON;
- ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid);
- }
- break;
- }
- return 0;
-}
-EXPORT_SYMBOL(iwl_txq_check_empty);
-
-/**
- * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack
- *
- * Go through block-ack's bitmap of ACK'd frames, update driver's record of
- * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
- */
-static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_ht_agg *agg,
- struct iwl_compressed_ba_resp *ba_resp)
-
-{
- int i, sh, ack;
- u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
- u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
- u64 bitmap;
- int successes = 0;
- struct ieee80211_tx_info *info;
-
- if (unlikely(!agg->wait_for_ba)) {
- IWL_ERR(priv, "Received BA when not expected\n");
- return -EINVAL;
- }
-
- /* Mark that the expected block-ack response arrived */
- agg->wait_for_ba = 0;
- IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
-
- /* Calculate shift to align block-ack bits with our Tx window bits */
- sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
- if (sh < 0) /* tbw something is wrong with indices */
- sh += 0x100;
-
- /* don't use 64-bit values for now */
- bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
-
- if (agg->frame_count > (64 - sh)) {
- IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
- return -1;
- }
-
- /* check for success or failure according to the
- * transmitted bitmap and block-ack bitmap */
- bitmap &= agg->bitmap;
-
- /* For each frame attempted in aggregation,
- * update driver's record of tx frame's status. */
- for (i = 0; i < agg->frame_count ; i++) {
- ack = bitmap & (1ULL << i);
- successes += !!ack;
- IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
- ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
- agg->start_idx + i);
- }
-
- info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
- memset(&info->status, 0, sizeof(info->status));
- info->flags |= IEEE80211_TX_STAT_ACK;
- info->flags |= IEEE80211_TX_STAT_AMPDU;
- info->status.ampdu_ack_map = successes;
- info->status.ampdu_ack_len = agg->frame_count;
- iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
-
- IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap);
-
- return 0;
-}
-
-/**
- * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
- *
- * Handles block-acknowledge notification from device, which reports success
- * of frames sent via aggregation.
- */
-void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
- struct iwl_tx_queue *txq = NULL;
- struct iwl_ht_agg *agg;
- int index;
- int sta_id;
- int tid;
-
- /* "flow" corresponds to Tx queue */
- u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
-
- /* "ssn" is start of block-ack Tx window, corresponds to index
- * (in Tx queue's circular buffer) of first TFD/frame in window */
- u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
-
- if (scd_flow >= priv->hw_params.max_txq_num) {
- IWL_ERR(priv,
- "BUG_ON scd_flow is bigger than number of queues\n");
- return;
- }
-
- txq = &priv->txq[scd_flow];
- sta_id = ba_resp->sta_id;
- tid = ba_resp->tid;
- agg = &priv->stations[sta_id].tid[tid].agg;
-
- /* Find index just before block-ack window */
- index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
-
- /* TODO: Need to get this copy more safely - now good for debug */
-
- IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
- "sta_id = %d\n",
- agg->wait_for_ba,
- (u8 *) &ba_resp->sta_addr_lo32,
- ba_resp->sta_id);
- IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
- "%d, scd_ssn = %d\n",
- ba_resp->tid,
- ba_resp->seq_ctl,
- (unsigned long long)le64_to_cpu(ba_resp->bitmap),
- ba_resp->scd_flow,
- ba_resp->scd_ssn);
- IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx \n",
- agg->start_idx,
- (unsigned long long)agg->bitmap);
-
- /* Update driver's record of ACK vs. not for each frame in window */
- iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp);
-
- /* Release all TFDs before the SSN, i.e. all TFDs in front of
- * block-ack window (we assume that they've been successfully
- * transmitted ... if not, it's too late anyway). */
- if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
- /* calculate mac80211 ampdu sw queue to wake */
- int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
- iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
-
- if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
- priv->mac80211_registered &&
- (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
- iwl_wake_queue(priv, txq->swq_id);
-
- iwl_txq_check_empty(priv, sta_id, tid, scd_flow);
- }
-}
-EXPORT_SYMBOL(iwl_rx_reply_compressed_ba);
-
#ifdef CONFIG_IWLWIFI_DEBUG
-#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
const char *iwl_get_tx_fail_reason(u32 status)
{
switch (status & TX_STATUS_MSK) {
case TX_STATUS_SUCCESS:
return "SUCCESS";
- TX_STATUS_ENTRY(SHORT_LIMIT);
- TX_STATUS_ENTRY(LONG_LIMIT);
- TX_STATUS_ENTRY(FIFO_UNDERRUN);
- TX_STATUS_ENTRY(MGMNT_ABORT);
- TX_STATUS_ENTRY(NEXT_FRAG);
- TX_STATUS_ENTRY(LIFE_EXPIRE);
- TX_STATUS_ENTRY(DEST_PS);
- TX_STATUS_ENTRY(ABORTED);
- TX_STATUS_ENTRY(BT_RETRY);
- TX_STATUS_ENTRY(STA_INVALID);
- TX_STATUS_ENTRY(FRAG_DROPPED);
- TX_STATUS_ENTRY(TID_DISABLE);
- TX_STATUS_ENTRY(FRAME_FLUSHED);
- TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
- TX_STATUS_ENTRY(TX_LOCKED);
- TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
+ TX_STATUS_POSTPONE(DELAY);
+ TX_STATUS_POSTPONE(FEW_BYTES);
+ TX_STATUS_POSTPONE(BT_PRIO);
+ TX_STATUS_POSTPONE(QUIET_PERIOD);
+ TX_STATUS_POSTPONE(CALC_TTAK);
+ TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
+ TX_STATUS_FAIL(SHORT_LIMIT);
+ TX_STATUS_FAIL(LONG_LIMIT);
+ TX_STATUS_FAIL(FIFO_UNDERRUN);
+ TX_STATUS_FAIL(DRAIN_FLOW);
+ TX_STATUS_FAIL(RFKILL_FLUSH);
+ TX_STATUS_FAIL(LIFE_EXPIRE);
+ TX_STATUS_FAIL(DEST_PS);
+ TX_STATUS_FAIL(HOST_ABORTED);
+ TX_STATUS_FAIL(BT_RETRY);
+ TX_STATUS_FAIL(STA_INVALID);
+ TX_STATUS_FAIL(FRAG_DROPPED);
+ TX_STATUS_FAIL(TID_DISABLE);
+ TX_STATUS_FAIL(FIFO_FLUSHED);
+ TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
+ TX_STATUS_FAIL(FW_DROP);
+ TX_STATUS_FAIL(STA_COLOR_MISMATCH_DROP);
}
return "UNKNOWN";
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index b74a56c48d2..3e5bffb6034 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -352,11 +352,11 @@ static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
static void iwl3945_unset_hw_params(struct iwl_priv *priv)
{
- if (priv->shared_virt)
+ if (priv->_3945.shared_virt)
dma_free_coherent(&priv->pci_dev->dev,
sizeof(struct iwl3945_shared),
- priv->shared_virt,
- priv->shared_phys);
+ priv->_3945.shared_virt,
+ priv->_3945.shared_phys);
}
static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
@@ -505,24 +505,15 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
#endif
- /* drop all non-injected data frame if we are not associated */
- if (ieee80211_is_data(fc) &&
- !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
- (!iwl_is_associated(priv) ||
- ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id))) {
- IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n");
- goto drop_unlock;
- }
-
spin_unlock_irqrestore(&priv->lock, flags);
hdr_len = ieee80211_hdrlen(fc);
- /* Find (or create) index into station table for destination station */
- if (info->flags & IEEE80211_TX_CTL_INJECTED)
+ /* Find index into station table for destination station */
+ if (!info->control.sta)
sta_id = priv->hw_params.bcast_sta_id;
else
- sta_id = iwl_get_sta_id(priv, hdr);
+ sta_id = iwl_sta_id(info->control.sta);
if (sta_id == IWL_INVALID_STATION) {
IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
hdr->addr1);
@@ -607,9 +598,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
txq->need_update = 0;
}
- IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
+ IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
le16_to_cpu(out_cmd->hdr.sequence));
- IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
+ IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd));
iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr,
ieee80211_hdrlen(fc));
@@ -754,7 +745,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
if (iwl_is_associated(priv))
add_time =
iwl3945_usecs_to_beacons(
- le64_to_cpu(params->start_time) - priv->last_tsf,
+ le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
le16_to_cpu(priv->rxon_timing.beacon_interval));
memset(&spectrum, 0, sizeof(spectrum));
@@ -768,7 +759,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
if (iwl_is_associated(priv))
spectrum.start_time =
- iwl3945_add_beacon_time(priv->last_beacon_time,
+ iwl3945_add_beacon_time(priv->_3945.last_beacon_time,
add_time,
le16_to_cpu(priv->rxon_timing.beacon_interval));
else
@@ -857,7 +848,6 @@ static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
#endif
IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
- return;
}
static void iwl3945_bg_beacon_update(struct work_struct *work)
@@ -966,7 +956,7 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
* statistics request from the host as well as for the periodic
* statistics notifications (after received beacons) from the uCode.
*/
- priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_hw_rx_statistics;
+ priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics;
priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
iwl_setup_rx_scan_handlers(priv);
@@ -1613,9 +1603,6 @@ static int iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
return pos;
}
-/* For sanity check only. Actual size is determined by uCode, typ. 512 */
-#define IWL3945_MAX_EVENT_LOG_SIZE (512)
-
#define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20)
int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
@@ -1642,16 +1629,16 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
- if (capacity > IWL3945_MAX_EVENT_LOG_SIZE) {
+ if (capacity > priv->cfg->max_event_log_size) {
IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
- capacity, IWL3945_MAX_EVENT_LOG_SIZE);
- capacity = IWL3945_MAX_EVENT_LOG_SIZE;
+ capacity, priv->cfg->max_event_log_size);
+ capacity = priv->cfg->max_event_log_size;
}
- if (next_entry > IWL3945_MAX_EVENT_LOG_SIZE) {
+ if (next_entry > priv->cfg->max_event_log_size) {
IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
- next_entry, IWL3945_MAX_EVENT_LOG_SIZE);
- next_entry = IWL3945_MAX_EVENT_LOG_SIZE;
+ next_entry, priv->cfg->max_event_log_size);
+ next_entry = priv->cfg->max_event_log_size;
}
size = num_wraps ? capacity : next_entry;
@@ -1860,7 +1847,8 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
enum ieee80211_band band,
u8 is_active, u8 n_probes,
- struct iwl3945_scan_channel *scan_ch)
+ struct iwl3945_scan_channel *scan_ch,
+ struct ieee80211_vif *vif)
{
struct ieee80211_channel *chan;
const struct ieee80211_supported_band *sband;
@@ -1874,7 +1862,7 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
return 0;
active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
- passive_dwell = iwl_get_passive_dwell_time(priv, band);
+ passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
if (passive_dwell <= active_dwell)
passive_dwell = active_dwell + 1;
@@ -1947,7 +1935,7 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
added++;
}
- IWL_DEBUG_SCAN(priv, "total channels to scan %d \n", added);
+ IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
return added;
}
@@ -2122,6 +2110,28 @@ static void iwl3945_nic_start(struct iwl_priv *priv)
iwl_write32(priv, CSR_RESET, 0);
}
+#define IWL3945_UCODE_GET(item) \
+static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\
+{ \
+ return le32_to_cpu(ucode->u.v1.item); \
+}
+
+static u32 iwl3945_ucode_get_header_size(u32 api_ver)
+{
+ return 24;
+}
+
+static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode)
+{
+ return (u8 *) ucode->u.v1.data;
+}
+
+IWL3945_UCODE_GET(inst_size);
+IWL3945_UCODE_GET(data_size);
+IWL3945_UCODE_GET(init_size);
+IWL3945_UCODE_GET(init_data_size);
+IWL3945_UCODE_GET(boot_size);
+
/**
* iwl3945_read_ucode - Read uCode images from disk file.
*
@@ -2170,7 +2180,7 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
goto error;
/* Make sure that we got at least our header! */
- if (ucode_raw->size < priv->cfg->ops->ucode->get_header_size(1)) {
+ if (ucode_raw->size < iwl3945_ucode_get_header_size(1)) {
IWL_ERR(priv, "File size way too small!\n");
ret = -EINVAL;
goto err_release;
@@ -2181,13 +2191,12 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
priv->ucode_ver = le32_to_cpu(ucode->ver);
api_ver = IWL_UCODE_API(priv->ucode_ver);
- inst_size = priv->cfg->ops->ucode->get_inst_size(ucode, api_ver);
- data_size = priv->cfg->ops->ucode->get_data_size(ucode, api_ver);
- init_size = priv->cfg->ops->ucode->get_init_size(ucode, api_ver);
- init_data_size =
- priv->cfg->ops->ucode->get_init_data_size(ucode, api_ver);
- boot_size = priv->cfg->ops->ucode->get_boot_size(ucode, api_ver);
- src = priv->cfg->ops->ucode->get_data(ucode, api_ver);
+ inst_size = iwl3945_ucode_get_inst_size(ucode);
+ data_size = iwl3945_ucode_get_data_size(ucode);
+ init_size = iwl3945_ucode_get_init_size(ucode);
+ init_data_size = iwl3945_ucode_get_init_data_size(ucode);
+ boot_size = iwl3945_ucode_get_boot_size(ucode);
+ src = iwl3945_ucode_get_data(ucode);
/* api_ver should match the api version forming part of the
* firmware filename ... but we don't check for that and only rely
@@ -2236,7 +2245,7 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
/* Verify size of file vs. image size info in file's header */
- if (ucode_raw->size != priv->cfg->ops->ucode->get_header_size(api_ver) +
+ if (ucode_raw->size != iwl3945_ucode_get_header_size(api_ver) +
inst_size + data_size + init_size +
init_data_size + boot_size) {
@@ -2490,8 +2499,6 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
goto restart;
}
- iwl_clear_stations_table(priv);
-
rfkill = iwl_read_prph(priv, APMG_RFKILL_REG);
IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
@@ -2513,13 +2520,19 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
/* After the ALIVE response, we can send commands to 3945 uCode */
set_bit(STATUS_ALIVE, &priv->status);
+ if (priv->cfg->ops->lib->recover_from_tx_stall) {
+ /* Enable timer to monitor the driver queues */
+ mod_timer(&priv->monitor_recover,
+ jiffies +
+ msecs_to_jiffies(priv->cfg->monitor_recover_period));
+ }
+
if (iwl_is_rfkill(priv))
return;
ieee80211_wake_queues(priv->hw);
- priv->active_rate = priv->rates_mask;
- priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
+ priv->active_rate = IWL_RATES_MASK;
iwl_power_update_mode(priv, true);
@@ -2531,11 +2544,11 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
} else {
/* Initialize our rx_config data */
- iwl_connection_init_rx_config(priv, priv->iw_mode);
+ iwl_connection_init_rx_config(priv, NULL);
}
/* Configure Bluetooth device coexistence support */
- iwl_send_bt_config(priv);
+ priv->cfg->ops->hcmd->send_bt_config(priv);
/* Configure the adapter for unassociated operation */
iwlcore_commit_rxon(priv);
@@ -2548,17 +2561,6 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
set_bit(STATUS_READY, &priv->status);
wake_up_interruptible(&priv->wait_command_queue);
- /* reassociate for ADHOC mode */
- if (priv->vif && (priv->iw_mode == NL80211_IFTYPE_ADHOC)) {
- struct sk_buff *beacon = ieee80211_beacon_get(priv->hw,
- priv->vif);
- if (beacon)
- iwl_mac_beacon_update(priv->hw, beacon);
- }
-
- if (test_and_clear_bit(STATUS_MODE_PENDING, &priv->status))
- iwl_set_mode(priv, priv->iw_mode);
-
return;
restart:
@@ -2580,7 +2582,10 @@ static void __iwl3945_down(struct iwl_priv *priv)
if (!exit_pending)
set_bit(STATUS_EXIT_PENDING, &priv->status);
- iwl_clear_stations_table(priv);
+ /* Station information will now be cleared in device */
+ iwl_clear_ucode_stations(priv);
+ iwl_dealloc_bcast_station(priv);
+ iwl_clear_driver_stations(priv);
/* Unblock any waiting calls */
wake_up_interruptible_all(&priv->wait_command_queue);
@@ -2661,6 +2666,10 @@ static int __iwl3945_up(struct iwl_priv *priv)
{
int rc, i;
+ rc = iwl_alloc_bcast_station(priv, false);
+ if (rc)
+ return rc;
+
if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
return -EIO;
@@ -2714,12 +2723,10 @@ static int __iwl3945_up(struct iwl_priv *priv)
for (i = 0; i < MAX_HW_RESTARTS; i++) {
- iwl_clear_stations_table(priv);
-
/* load bootstrap state machine,
* load bootstrap program into processor's memory,
* prepare to load the "initialize" uCode */
- priv->cfg->ops->lib->load_ucode(priv);
+ rc = priv->cfg->ops->lib->load_ucode(priv);
if (rc) {
IWL_ERR(priv,
@@ -2787,7 +2794,7 @@ static void iwl3945_bg_alive_start(struct work_struct *data)
static void iwl3945_rfkill_poll(struct work_struct *data)
{
struct iwl_priv *priv =
- container_of(data, struct iwl_priv, rfkill_poll.work);
+ container_of(data, struct iwl_priv, _3945.rfkill_poll.work);
bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status);
bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL)
& CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
@@ -2806,22 +2813,18 @@ static void iwl3945_rfkill_poll(struct work_struct *data)
/* Keep this running, even if radio now enabled. This will be
* cancelled in mac_start() if system decides to start again */
- queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
+ queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
round_jiffies_relative(2 * HZ));
}
-#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
-static void iwl3945_bg_request_scan(struct work_struct *data)
+void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, request_scan);
struct iwl_host_cmd cmd = {
.id = REPLY_SCAN_CMD,
.len = sizeof(struct iwl3945_scan_cmd),
.flags = CMD_SIZE_HUGE,
};
- int rc = 0;
struct iwl3945_scan_cmd *scan;
struct ieee80211_conf *conf = NULL;
u8 n_probes = 0;
@@ -2830,8 +2833,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
conf = ieee80211_get_hw_conf(priv->hw);
- mutex_lock(&priv->mutex);
-
cancel_delayed_work(&priv->scan_check);
if (!iwl_is_ready(priv)) {
@@ -2849,7 +2850,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
if (test_bit(STATUS_SCAN_HW, &priv->status)) {
IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests "
"Ignoring second request.\n");
- rc = -EIO;
goto done;
}
@@ -2875,20 +2875,15 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
goto done;
}
- if (!priv->scan_bands) {
- IWL_DEBUG_HC(priv, "Aborting scan due to no requested bands\n");
- goto done;
- }
-
- if (!priv->scan) {
- priv->scan = kmalloc(sizeof(struct iwl3945_scan_cmd) +
- IWL_MAX_SCAN_SIZE, GFP_KERNEL);
- if (!priv->scan) {
- rc = -ENOMEM;
+ if (!priv->scan_cmd) {
+ priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) +
+ IWL_MAX_SCAN_SIZE, GFP_KERNEL);
+ if (!priv->scan_cmd) {
+ IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n");
goto done;
}
}
- scan = priv->scan;
+ scan = priv->scan_cmd;
memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE);
scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
@@ -2904,7 +2899,7 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
spin_lock_irqsave(&priv->lock, flags);
- interval = priv->beacon_int;
+ interval = vif ? vif->bss_conf.beacon_int : 0;
spin_unlock_irqrestore(&priv->lock, flags);
scan->suspend_time = 0;
@@ -2927,7 +2922,9 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
scan_suspend_time, interval);
}
- if (priv->scan_request->n_ssids) {
+ if (priv->is_internal_short_scan) {
+ IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
+ } else if (priv->scan_request->n_ssids) {
int i, p = 0;
IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
for (i = 0; i < priv->scan_request->n_ssids; i++) {
@@ -2955,12 +2952,14 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
/* flags + rate selection */
- if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) {
+ switch (priv->scan_band) {
+ case IEEE80211_BAND_2GHZ:
scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
scan->good_CRC_th = 0;
band = IEEE80211_BAND_2GHZ;
- } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) {
+ break;
+ case IEEE80211_BAND_5GHZ:
scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
/*
* If active scaning is requested but a certain channel
@@ -2970,27 +2969,32 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
IWL_GOOD_CRC_TH_DISABLED;
band = IEEE80211_BAND_5GHZ;
- } else {
- IWL_WARN(priv, "Invalid scan band count\n");
+ break;
+ default:
+ IWL_WARN(priv, "Invalid scan band\n");
goto done;
}
- scan->tx_cmd.len = cpu_to_le16(
+ if (!priv->is_internal_short_scan) {
+ scan->tx_cmd.len = cpu_to_le16(
iwl_fill_probe_req(priv,
(struct ieee80211_mgmt *)scan->data,
priv->scan_request->ie,
priv->scan_request->ie_len,
IWL_MAX_SCAN_SIZE - sizeof(*scan)));
-
+ } else {
+ scan->tx_cmd.len = cpu_to_le16(
+ iwl_fill_probe_req(priv,
+ (struct ieee80211_mgmt *)scan->data,
+ NULL, 0,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan)));
+ }
/* select Rx antennas */
scan->flags |= iwl3945_get_antenna_flags(priv);
- if (iwl_is_monitor_mode(priv))
- scan->filter_flags = RXON_FILTER_PROMISC_MSK;
-
scan->channel_count =
iwl3945_get_channels_for_scan(priv, band, is_active, n_probes,
- (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
+ (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)], vif);
if (scan->channel_count == 0) {
IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
@@ -3003,14 +3007,12 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
scan->len = cpu_to_le16(cmd.len);
set_bit(STATUS_SCAN_HW, &priv->status);
- rc = iwl_send_cmd_sync(priv, &cmd);
- if (rc)
+ if (iwl_send_cmd_sync(priv, &cmd))
goto done;
queue_delayed_work(priv->workqueue, &priv->scan_check,
IWL_SCAN_CHECK_WATCHDOG);
- mutex_unlock(&priv->mutex);
return;
done:
@@ -3024,7 +3026,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
/* inform mac80211 scan aborted */
queue_work(priv->workqueue, &priv->scan_completed);
- mutex_unlock(&priv->mutex);
}
static void iwl3945_bg_restart(struct work_struct *data)
@@ -3066,28 +3067,25 @@ static void iwl3945_bg_rx_replenish(struct work_struct *data)
mutex_unlock(&priv->mutex);
}
-#define IWL_DELAY_NEXT_SCAN (HZ*2)
-
-void iwl3945_post_associate(struct iwl_priv *priv)
+void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
int rc = 0;
struct ieee80211_conf *conf = NULL;
- if (priv->iw_mode == NL80211_IFTYPE_AP) {
+ if (!vif || !priv->is_open)
+ return;
+
+ if (vif->type == NL80211_IFTYPE_AP) {
IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
return;
}
-
IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
- priv->assoc_id, priv->active_rxon.bssid_addr);
+ vif->bss_conf.aid, priv->active_rxon.bssid_addr);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- if (!priv->vif || !priv->is_open)
- return;
-
iwl_scan_cancel_timeout(priv, 200);
conf = ieee80211_get_hw_conf(priv->hw);
@@ -3096,7 +3094,7 @@ void iwl3945_post_associate(struct iwl_priv *priv)
iwlcore_commit_rxon(priv);
memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
- iwl_setup_rxon_timing(priv);
+ iwl_setup_rxon_timing(priv, vif);
rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
sizeof(priv->rxon_timing), &priv->rxon_timing);
if (rc)
@@ -3105,57 +3103,40 @@ void iwl3945_post_associate(struct iwl_priv *priv)
priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
- priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
+ priv->staging_rxon.assoc_id = cpu_to_le16(vif->bss_conf.aid);
IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
- priv->assoc_id, priv->beacon_int);
+ vif->bss_conf.aid, vif->bss_conf.beacon_int);
- if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
+ if (vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
else
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
- if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
+ if (vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
else
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
+ if (vif->type == NL80211_IFTYPE_ADHOC)
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
-
}
iwlcore_commit_rxon(priv);
- switch (priv->iw_mode) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
break;
-
case NL80211_IFTYPE_ADHOC:
-
- priv->assoc_id = 1;
- iwl_add_station(priv, priv->bssid, 0, CMD_SYNC, NULL);
- iwl3945_sync_sta(priv, IWL_STA_ID,
- (priv->band == IEEE80211_BAND_5GHZ) ?
- IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP,
- CMD_ASYNC);
- iwl3945_rate_scale_init(priv->hw, IWL_STA_ID);
iwl3945_send_beacon_cmd(priv);
-
break;
-
default:
- IWL_ERR(priv, "%s Should not be called in %d mode\n",
- __func__, priv->iw_mode);
+ IWL_ERR(priv, "%s Should not be called in %d mode\n",
+ __func__, vif->type);
break;
}
-
- iwl_activate_qos(priv, 0);
-
- /* we have just associated, don't start scan too early */
- priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
}
/*****************************************************************************
@@ -3214,7 +3195,7 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw)
/* ucode is running and will send rfkill notifications,
* no need to poll the killswitch state anymore */
- cancel_delayed_work(&priv->rfkill_poll);
+ cancel_delayed_work(&priv->_3945.rfkill_poll);
iwl_led_start(priv);
@@ -3255,7 +3236,7 @@ static void iwl3945_mac_stop(struct ieee80211_hw *hw)
flush_workqueue(priv->workqueue);
/* start polling the killswitch state again */
- queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
+ queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
round_jiffies_relative(2 * HZ));
IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -3277,7 +3258,7 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
return NETDEV_TX_OK;
}
-void iwl3945_config_ap(struct iwl_priv *priv)
+void iwl3945_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
int rc = 0;
@@ -3293,7 +3274,7 @@ void iwl3945_config_ap(struct iwl_priv *priv)
/* RXON Timing */
memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
- iwl_setup_rxon_timing(priv);
+ iwl_setup_rxon_timing(priv, vif);
rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
sizeof(priv->rxon_timing),
&priv->rxon_timing);
@@ -3301,9 +3282,10 @@ void iwl3945_config_ap(struct iwl_priv *priv)
IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
"Attempting to continue.\n");
- /* FIXME: what should be the assoc_id for AP? */
- priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
- if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
+ priv->staging_rxon.assoc_id = 0;
+
+ if (vif->bss_conf.assoc_capability &
+ WLAN_CAPABILITY_SHORT_PREAMBLE)
priv->staging_rxon.flags |=
RXON_FLG_SHORT_PREAMBLE_MSK;
else
@@ -3311,22 +3293,21 @@ void iwl3945_config_ap(struct iwl_priv *priv)
~RXON_FLG_SHORT_PREAMBLE_MSK;
if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
- if (priv->assoc_capability &
- WLAN_CAPABILITY_SHORT_SLOT_TIME)
+ if (vif->bss_conf.assoc_capability &
+ WLAN_CAPABILITY_SHORT_SLOT_TIME)
priv->staging_rxon.flags |=
RXON_FLG_SHORT_SLOT_MSK;
else
priv->staging_rxon.flags &=
~RXON_FLG_SHORT_SLOT_MSK;
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
+ if (vif->type == NL80211_IFTYPE_ADHOC)
priv->staging_rxon.flags &=
~RXON_FLG_SHORT_SLOT_MSK;
}
/* restore RXON assoc */
priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
iwlcore_commit_rxon(priv);
- iwl_add_station(priv, iwl_bcast_addr, 0, CMD_SYNC, NULL);
}
iwl3945_send_beacon_cmd(priv);
@@ -3341,7 +3322,6 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
struct iwl_priv *priv = hw->priv;
- const u8 *addr;
int ret = 0;
u8 sta_id = IWL_INVALID_STATION;
u8 static_key;
@@ -3353,21 +3333,24 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
- addr = sta ? sta->addr : iwl_bcast_addr;
static_key = !iwl_is_associated(priv);
if (!static_key) {
- sta_id = iwl_find_station(priv, addr);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n",
- addr);
- return -EINVAL;
+ if (!sta) {
+ sta_id = priv->hw_params.bcast_sta_id;
+ } else {
+ sta_id = iwl_sta_id(sta);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_DEBUG_MAC80211(priv,
+ "leave - %pM not in station map.\n",
+ sta->addr);
+ return -EINVAL;
+ }
}
}
mutex_lock(&priv->mutex);
iwl_scan_cancel_timeout(priv, 100);
- mutex_unlock(&priv->mutex);
switch (cmd) {
case SET_KEY:
@@ -3388,11 +3371,45 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = -EINVAL;
}
+ mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return ret;
}
+static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl3945_sta_priv *sta_priv = (void *)sta->drv_priv;
+ int ret;
+ bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+ u8 sta_id;
+
+ sta_priv->common.sta_id = IWL_INVALID_STATION;
+
+ IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
+ sta->addr);
+
+ ret = iwl_add_station_common(priv, sta->addr, is_ap, &sta->ht_cap,
+ &sta_id);
+ if (ret) {
+ IWL_ERR(priv, "Unable to add station %pM (%d)\n",
+ sta->addr, ret);
+ /* Should we return success if return code is EEXIST ? */
+ return ret;
+ }
+
+ sta_priv->common.sta_id = sta_id;
+
+ /* Initialize rate scaling */
+ IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
+ sta->addr);
+ iwl3945_rs_rate_init(priv, sta, sta_id);
+
+ return 0;
+}
/*****************************************************************************
*
* sysfs attributes
@@ -3592,7 +3609,7 @@ static ssize_t store_measurement(struct device *d,
struct iwl_priv *priv = dev_get_drvdata(d);
struct ieee80211_measurement_params params = {
.channel = le16_to_cpu(priv->active_rxon.channel),
- .start_time = cpu_to_le64(priv->last_tsf),
+ .start_time = cpu_to_le64(priv->_3945.last_tsf),
.duration = cpu_to_le16(1),
};
u8 type = IWL_MEASURE_BASIC;
@@ -3656,44 +3673,6 @@ static ssize_t show_channels(struct device *d,
static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
-static ssize_t show_statistics(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- u32 size = sizeof(struct iwl3945_notif_statistics);
- u32 len = 0, ofs = 0;
- u8 *data = (u8 *)&priv->statistics_39;
- int rc = 0;
-
- if (!iwl_is_alive(priv))
- return -EAGAIN;
-
- mutex_lock(&priv->mutex);
- rc = iwl_send_statistics_request(priv, CMD_SYNC, false);
- mutex_unlock(&priv->mutex);
-
- if (rc) {
- len = sprintf(buf,
- "Error sending statistics request: 0x%08X\n", rc);
- return len;
- }
-
- while (size && (PAGE_SIZE - len)) {
- hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
- PAGE_SIZE - len, 1);
- len = strlen(buf);
- if (PAGE_SIZE - len)
- buf[len++] = '\n';
-
- ofs += 16;
- size -= min(size, 16U);
- }
-
- return len;
-}
-
-static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
-
static ssize_t show_antenna(struct device *d,
struct device_attribute *attr, char *buf)
{
@@ -3775,14 +3754,21 @@ static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
- INIT_DELAYED_WORK(&priv->rfkill_poll, iwl3945_rfkill_poll);
+ INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
- INIT_WORK(&priv->request_scan, iwl3945_bg_request_scan);
INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan);
+ INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan);
INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
iwl3945_hw_setup_deferred_work(priv);
+ if (priv->cfg->ops->lib->recover_from_tx_stall) {
+ init_timer(&priv->monitor_recover);
+ priv->monitor_recover.data = (unsigned long)priv;
+ priv->monitor_recover.function =
+ priv->cfg->ops->lib->recover_from_tx_stall;
+ }
+
tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
iwl3945_irq_tasklet, (unsigned long)priv);
}
@@ -3794,7 +3780,10 @@ static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
cancel_delayed_work_sync(&priv->init_alive_start);
cancel_delayed_work(&priv->scan_check);
cancel_delayed_work(&priv->alive_start);
+ cancel_work_sync(&priv->start_internal_scan);
cancel_work_sync(&priv->beacon_update);
+ if (priv->cfg->ops->lib->recover_from_tx_stall)
+ del_timer_sync(&priv->monitor_recover);
}
static struct attribute *iwl3945_sysfs_entries[] = {
@@ -3805,7 +3794,6 @@ static struct attribute *iwl3945_sysfs_entries[] = {
&dev_attr_filter_flags.attr,
&dev_attr_measurement.attr,
&dev_attr_retry_rate.attr,
- &dev_attr_statistics.attr,
&dev_attr_status.attr,
&dev_attr_temperature.attr,
&dev_attr_tx_power.attr,
@@ -3832,7 +3820,9 @@ static struct ieee80211_ops iwl3945_hw_ops = {
.conf_tx = iwl_mac_conf_tx,
.reset_tsf = iwl_mac_reset_tsf,
.bss_info_changed = iwl_bss_info_changed,
- .hw_scan = iwl_mac_hw_scan
+ .hw_scan = iwl_mac_hw_scan,
+ .sta_add = iwl3945_mac_sta_add,
+ .sta_remove = iwl_mac_sta_remove,
};
static int iwl3945_init_drv(struct iwl_priv *priv)
@@ -3851,9 +3841,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
mutex_init(&priv->mutex);
mutex_init(&priv->sync_cmd_mutex);
- /* Clear the driver's (not device's) station table */
- iwl_clear_stations_table(priv);
-
priv->ieee_channels = NULL;
priv->ieee_rates = NULL;
priv->band = IEEE80211_BAND_2GHZ;
@@ -3861,12 +3848,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
priv->iw_mode = NL80211_IFTYPE_STATION;
priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
- iwl_reset_qos(priv);
-
- priv->qos_data.qos_active = 0;
- priv->qos_data.qos_cap.val = 0;
-
- priv->rates_mask = IWL_RATES_MASK;
priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER;
if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
@@ -3902,6 +3883,8 @@ err:
return ret;
}
+#define IWL3945_MAX_PROBE_REQUEST 200
+
static int iwl3945_setup_mac(struct iwl_priv *priv)
{
int ret;
@@ -3909,10 +3892,10 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
hw->rate_control_algorithm = "iwl-3945-rs";
hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
+ hw->vif_data_size = sizeof(struct iwl_vif_priv);
/* Tell mac80211 our characteristics */
hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_NOISE_DBM |
IEEE80211_HW_SPECTRUM_MGMT;
if (!priv->cfg->broken_powersave)
@@ -3928,7 +3911,7 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
/* we create the 802.11 header and a zero-length SSID element */
- hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
+ hw->wiphy->max_scan_ie_len = IWL3945_MAX_PROBE_REQUEST - 24 - 2;
/* Default value; 4 EDCA QOS priorities */
hw->queues = 4;
@@ -4131,7 +4114,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
/* Start monitoring the killswitch */
- queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
+ queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
2 * HZ);
return 0;
@@ -4205,7 +4188,7 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
- cancel_delayed_work_sync(&priv->rfkill_poll);
+ cancel_delayed_work_sync(&priv->_3945.rfkill_poll);
iwl3945_dealloc_ucode_pci(priv);
@@ -4214,7 +4197,6 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
iwl3945_hw_txq_ctx_free(priv);
iwl3945_unset_hw_params(priv);
- iwl_clear_stations_table(priv);
/*netif_stop_queue(dev); */
flush_workqueue(priv->workqueue);
@@ -4236,7 +4218,7 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
iwl_free_channel_map(priv);
iwlcore_free_geos(priv);
- kfree(priv->scan);
+ kfree(priv->scan_cmd);
if (priv->ibss_beacon)
dev_kfree_skb(priv->ibss_beacon);
diff --git a/drivers/net/wireless/iwmc3200wifi/Kconfig b/drivers/net/wireless/iwmc3200wifi/Kconfig
index b9d34a76696..03f998d098c 100644
--- a/drivers/net/wireless/iwmc3200wifi/Kconfig
+++ b/drivers/net/wireless/iwmc3200wifi/Kconfig
@@ -17,7 +17,7 @@ config IWM
config IWM_DEBUG
bool "Enable full debugging output in iwmc3200wifi"
depends on IWM && DEBUG_FS
- ---help---
+ help
This option will enable debug tracing and setting for iwm
You can set the debug level and module through debugfs. By
@@ -30,3 +30,10 @@ config IWM_DEBUG
Or, if you want the full debug, for all modules:
echo 0xff > /sys/kernel/debug/iwm/phyN/debug/level
echo 0xff > /sys/kernel/debug/iwm/phyN/debug/modules
+
+config IWM_TRACING
+ bool "Enable event tracing for iwmc3200wifi"
+ depends on IWM && EVENT_TRACING
+ help
+ Say Y here to trace all the commands and responses between
+ the driver and firmware (including TX/RX frames) with ftrace.
diff --git a/drivers/net/wireless/iwmc3200wifi/Makefile b/drivers/net/wireless/iwmc3200wifi/Makefile
index d34291b652d..cdc7e07ba11 100644
--- a/drivers/net/wireless/iwmc3200wifi/Makefile
+++ b/drivers/net/wireless/iwmc3200wifi/Makefile
@@ -3,3 +3,8 @@ iwmc3200wifi-objs += main.o netdev.o rx.o tx.o sdio.o hal.o fw.o
iwmc3200wifi-objs += commands.o cfg80211.o eeprom.o
iwmc3200wifi-$(CONFIG_IWM_DEBUG) += debugfs.o
+iwmc3200wifi-$(CONFIG_IWM_TRACING) += trace.o
+
+CFLAGS_trace.o := -I$(src)
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwmc3200wifi/bus.h b/drivers/net/wireless/iwmc3200wifi/bus.h
index 836663eec25..62edd5888a7 100644
--- a/drivers/net/wireless/iwmc3200wifi/bus.h
+++ b/drivers/net/wireless/iwmc3200wifi/bus.h
@@ -31,7 +31,7 @@ struct iwm_if_ops {
int (*disable)(struct iwm_priv *iwm);
int (*send_chunk)(struct iwm_priv *iwm, u8* buf, int count);
- int (*debugfs_init)(struct iwm_priv *iwm, struct dentry *parent_dir);
+ void (*debugfs_init)(struct iwm_priv *iwm, struct dentry *parent_dir);
void (*debugfs_exit)(struct iwm_priv *iwm);
const char *umac_name;
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index a1d45cce0eb..902e95f70f6 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -264,7 +264,7 @@ static int iwm_cfg80211_get_station(struct wiphy *wiphy,
int iwm_cfg80211_inform_bss(struct iwm_priv *iwm)
{
struct wiphy *wiphy = iwm_to_wiphy(iwm);
- struct iwm_bss_info *bss, *next;
+ struct iwm_bss_info *bss;
struct iwm_umac_notif_bss_info *umac_bss;
struct ieee80211_mgmt *mgmt;
struct ieee80211_channel *channel;
@@ -272,7 +272,7 @@ int iwm_cfg80211_inform_bss(struct iwm_priv *iwm)
s32 signal;
int freq;
- list_for_each_entry_safe(bss, next, &iwm->bss_list, node) {
+ list_for_each_entry(bss, &iwm->bss_list, node) {
umac_bss = bss->bss;
mgmt = (struct ieee80211_mgmt *)(umac_bss->frame_buf);
@@ -726,23 +726,26 @@ static int iwm_cfg80211_set_power_mgmt(struct wiphy *wiphy,
CFG_POWER_INDEX, iwm->conf.power_index);
}
-int iwm_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
- struct cfg80211_pmksa *pmksa)
+static int iwm_cfg80211_set_pmksa(struct wiphy *wiphy,
+ struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
return iwm_send_pmkid_update(iwm, pmksa, IWM_CMD_PMKID_ADD);
}
-int iwm_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
- struct cfg80211_pmksa *pmksa)
+static int iwm_cfg80211_del_pmksa(struct wiphy *wiphy,
+ struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
return iwm_send_pmkid_update(iwm, pmksa, IWM_CMD_PMKID_DEL);
}
-int iwm_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
+static int iwm_cfg80211_flush_pmksa(struct wiphy *wiphy,
+ struct net_device *netdev)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
struct cfg80211_pmksa pmksa;
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
index 42df7262f9f..330c7d9cf10 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ b/drivers/net/wireless/iwmc3200wifi/commands.c
@@ -507,7 +507,7 @@ static int iwm_target_read(struct iwm_priv *iwm, __le32 address,
return ret;
}
- /* When succeding, the send_target routine returns the seq number */
+ /* When succeeding, the send_target routine returns the seq number */
seq_num = ret;
ret = wait_event_interruptible_timeout(iwm->nonwifi_queue,
@@ -782,10 +782,9 @@ int iwm_send_mlme_profile(struct iwm_priv *iwm)
return 0;
}
-int iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
+int __iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
{
struct iwm_umac_invalidate_profile invalid;
- int ret;
invalid.hdr.oid = UMAC_WIFI_IF_CMD_INVALIDATE_PROFILE;
invalid.hdr.buf_size =
@@ -794,7 +793,14 @@ int iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
invalid.reason = WLAN_REASON_UNSPECIFIED;
- ret = iwm_send_wifi_if_cmd(iwm, &invalid, sizeof(invalid), 1);
+ return iwm_send_wifi_if_cmd(iwm, &invalid, sizeof(invalid), 1);
+}
+
+int iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
+{
+ int ret;
+
+ ret = __iwm_invalidate_mlme_profile(iwm);
if (ret)
return ret;
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h
index 3dfd9f0e900..7e16bcf5997 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.h
+++ b/drivers/net/wireless/iwmc3200wifi/commands.h
@@ -488,6 +488,7 @@ int iwm_umac_set_config_var(struct iwm_priv *iwm, u16 key,
void *payload, u16 payload_size);
int iwm_send_umac_config(struct iwm_priv *iwm, __le32 reset_flags);
int iwm_send_mlme_profile(struct iwm_priv *iwm);
+int __iwm_invalidate_mlme_profile(struct iwm_priv *iwm);
int iwm_invalidate_mlme_profile(struct iwm_priv *iwm);
int iwm_send_packet(struct iwm_priv *iwm, struct sk_buff *skb, int pool_id);
int iwm_set_tx_key(struct iwm_priv *iwm, u8 key_idx);
diff --git a/drivers/net/wireless/iwmc3200wifi/debug.h b/drivers/net/wireless/iwmc3200wifi/debug.h
index e35c9b693d1..a0c13a49ab3 100644
--- a/drivers/net/wireless/iwmc3200wifi/debug.h
+++ b/drivers/net/wireless/iwmc3200wifi/debug.h
@@ -113,13 +113,10 @@ struct iwm_debugfs {
};
#ifdef CONFIG_IWM_DEBUG
-int iwm_debugfs_init(struct iwm_priv *iwm);
+void iwm_debugfs_init(struct iwm_priv *iwm);
void iwm_debugfs_exit(struct iwm_priv *iwm);
#else
-static inline int iwm_debugfs_init(struct iwm_priv *iwm)
-{
- return 0;
-}
+static inline void iwm_debugfs_init(struct iwm_priv *iwm) {}
static inline void iwm_debugfs_exit(struct iwm_priv *iwm) {}
#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
index cbb81befdb5..53b0b7711f0 100644
--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
@@ -48,12 +48,11 @@ static struct {
#define add_dbg_module(dbg, name, id, initlevel) \
do { \
- struct dentry *d; \
dbg.dbg_module[id] = (initlevel); \
- d = debugfs_create_x8(name, 0600, dbg.dbgdir, \
- &(dbg.dbg_module[id])); \
- if (!IS_ERR(d)) \
- dbg.dbg_module_dentries[id] = d; \
+ dbg.dbg_module_dentries[id] = \
+ debugfs_create_x8(name, 0600, \
+ dbg.dbgdir, \
+ &(dbg.dbg_module[id])); \
} while (0)
static int iwm_debugfs_u32_read(void *data, u64 *val)
@@ -266,7 +265,7 @@ static ssize_t iwm_debugfs_rx_ticket_read(struct file *filp,
size_t count, loff_t *ppos)
{
struct iwm_priv *iwm = filp->private_data;
- struct iwm_rx_ticket_node *ticket, *next;
+ struct iwm_rx_ticket_node *ticket;
char *buf;
int buf_len = 4096, i;
size_t len = 0;
@@ -281,7 +280,8 @@ static ssize_t iwm_debugfs_rx_ticket_read(struct file *filp,
if (!buf)
return -ENOMEM;
- list_for_each_entry_safe(ticket, next, &iwm->rx_tickets, node) {
+ spin_lock(&iwm->ticket_lock);
+ list_for_each_entry(ticket, &iwm->rx_tickets, node) {
len += snprintf(buf + len, buf_len - len, "Ticket #%d\n",
ticket->ticket->id);
len += snprintf(buf + len, buf_len - len, "\taction: 0x%x\n",
@@ -289,14 +289,17 @@ static ssize_t iwm_debugfs_rx_ticket_read(struct file *filp,
len += snprintf(buf + len, buf_len - len, "\tflags: 0x%x\n",
ticket->ticket->flags);
}
+ spin_unlock(&iwm->ticket_lock);
for (i = 0; i < IWM_RX_ID_HASH; i++) {
- struct iwm_rx_packet *packet, *nxt;
+ struct iwm_rx_packet *packet;
struct list_head *pkt_list = &iwm->rx_packets[i];
+
if (!list_empty(pkt_list)) {
len += snprintf(buf + len, buf_len - len,
"Packet hash #%d\n", i);
- list_for_each_entry_safe(packet, nxt, pkt_list, node) {
+ spin_lock(&iwm->packet_lock[i]);
+ list_for_each_entry(packet, pkt_list, node) {
len += snprintf(buf + len, buf_len - len,
"\tPacket id: %d\n",
packet->id);
@@ -304,6 +307,7 @@ static ssize_t iwm_debugfs_rx_ticket_read(struct file *filp,
"\tPacket length: %lu\n",
packet->pkt_size);
}
+ spin_unlock(&iwm->packet_lock[i]);
}
}
@@ -418,89 +422,29 @@ static const struct file_operations iwm_debugfs_fw_err_fops = {
.read = iwm_debugfs_fw_err_read,
};
-int iwm_debugfs_init(struct iwm_priv *iwm)
+void iwm_debugfs_init(struct iwm_priv *iwm)
{
- int i, result;
- char devdir[16];
+ int i;
iwm->dbg.rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
- result = PTR_ERR(iwm->dbg.rootdir);
- if (!result || IS_ERR(iwm->dbg.rootdir)) {
- if (result == -ENODEV) {
- IWM_ERR(iwm, "DebugFS (CONFIG_DEBUG_FS) not "
- "enabled in kernel config\n");
- result = 0; /* No debugfs support */
- }
- IWM_ERR(iwm, "Couldn't create rootdir: %d\n", result);
- goto error;
- }
-
- snprintf(devdir, sizeof(devdir), "%s", wiphy_name(iwm_to_wiphy(iwm)));
-
- iwm->dbg.devdir = debugfs_create_dir(devdir, iwm->dbg.rootdir);
- result = PTR_ERR(iwm->dbg.devdir);
- if (IS_ERR(iwm->dbg.devdir) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create devdir: %d\n", result);
- goto error;
- }
-
+ iwm->dbg.devdir = debugfs_create_dir(wiphy_name(iwm_to_wiphy(iwm)),
+ iwm->dbg.rootdir);
iwm->dbg.dbgdir = debugfs_create_dir("debug", iwm->dbg.devdir);
- result = PTR_ERR(iwm->dbg.dbgdir);
- if (IS_ERR(iwm->dbg.dbgdir) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create dbgdir: %d\n", result);
- goto error;
- }
-
iwm->dbg.rxdir = debugfs_create_dir("rx", iwm->dbg.devdir);
- result = PTR_ERR(iwm->dbg.rxdir);
- if (IS_ERR(iwm->dbg.rxdir) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create rx dir: %d\n", result);
- goto error;
- }
-
iwm->dbg.txdir = debugfs_create_dir("tx", iwm->dbg.devdir);
- result = PTR_ERR(iwm->dbg.txdir);
- if (IS_ERR(iwm->dbg.txdir) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create tx dir: %d\n", result);
- goto error;
- }
-
iwm->dbg.busdir = debugfs_create_dir("bus", iwm->dbg.devdir);
- result = PTR_ERR(iwm->dbg.busdir);
- if (IS_ERR(iwm->dbg.busdir) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create bus dir: %d\n", result);
- goto error;
- }
-
- if (iwm->bus_ops->debugfs_init) {
- result = iwm->bus_ops->debugfs_init(iwm, iwm->dbg.busdir);
- if (result < 0) {
- IWM_ERR(iwm, "Couldn't create bus entry: %d\n", result);
- goto error;
- }
- }
-
+ if (iwm->bus_ops->debugfs_init)
+ iwm->bus_ops->debugfs_init(iwm, iwm->dbg.busdir);
iwm->dbg.dbg_level = IWM_DL_NONE;
iwm->dbg.dbg_level_dentry =
debugfs_create_file("level", 0200, iwm->dbg.dbgdir, iwm,
&fops_iwm_dbg_level);
- result = PTR_ERR(iwm->dbg.dbg_level_dentry);
- if (IS_ERR(iwm->dbg.dbg_level_dentry) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create dbg_level: %d\n", result);
- goto error;
- }
-
iwm->dbg.dbg_modules = IWM_DM_DEFAULT;
iwm->dbg.dbg_modules_dentry =
debugfs_create_file("modules", 0200, iwm->dbg.dbgdir, iwm,
&fops_iwm_dbg_modules);
- result = PTR_ERR(iwm->dbg.dbg_modules_dentry);
- if (IS_ERR(iwm->dbg.dbg_modules_dentry) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create dbg_modules: %d\n", result);
- goto error;
- }
for (i = 0; i < __IWM_DM_NR; i++)
add_dbg_module(iwm->dbg, iwm_debug_module[i].name,
@@ -509,44 +453,15 @@ int iwm_debugfs_init(struct iwm_priv *iwm)
iwm->dbg.txq_dentry = debugfs_create_file("queues", 0200,
iwm->dbg.txdir, iwm,
&iwm_debugfs_txq_fops);
- result = PTR_ERR(iwm->dbg.txq_dentry);
- if (IS_ERR(iwm->dbg.txq_dentry) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create tx queue: %d\n", result);
- goto error;
- }
-
iwm->dbg.tx_credit_dentry = debugfs_create_file("credits", 0200,
iwm->dbg.txdir, iwm,
&iwm_debugfs_tx_credit_fops);
- result = PTR_ERR(iwm->dbg.tx_credit_dentry);
- if (IS_ERR(iwm->dbg.tx_credit_dentry) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create tx credit: %d\n", result);
- goto error;
- }
-
iwm->dbg.rx_ticket_dentry = debugfs_create_file("tickets", 0200,
iwm->dbg.rxdir, iwm,
&iwm_debugfs_rx_ticket_fops);
- result = PTR_ERR(iwm->dbg.rx_ticket_dentry);
- if (IS_ERR(iwm->dbg.rx_ticket_dentry) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create rx ticket: %d\n", result);
- goto error;
- }
-
iwm->dbg.fw_err_dentry = debugfs_create_file("last_fw_err", 0200,
iwm->dbg.dbgdir, iwm,
&iwm_debugfs_fw_err_fops);
- result = PTR_ERR(iwm->dbg.fw_err_dentry);
- if (IS_ERR(iwm->dbg.fw_err_dentry) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create last FW err: %d\n", result);
- goto error;
- }
-
-
- return 0;
-
- error:
- return result;
}
void iwm_debugfs_exit(struct iwm_priv *iwm)
diff --git a/drivers/net/wireless/iwmc3200wifi/hal.c b/drivers/net/wireless/iwmc3200wifi/hal.c
index 229de990379..9531b18cf72 100644
--- a/drivers/net/wireless/iwmc3200wifi/hal.c
+++ b/drivers/net/wireless/iwmc3200wifi/hal.c
@@ -105,6 +105,7 @@
#include "hal.h"
#include "umac.h"
#include "debug.h"
+#include "trace.h"
static int iwm_nonwifi_cmd_init(struct iwm_priv *iwm,
struct iwm_nonwifi_cmd *cmd,
@@ -207,9 +208,9 @@ void iwm_cmd_flush(struct iwm_priv *iwm)
struct iwm_wifi_cmd *iwm_get_pending_wifi_cmd(struct iwm_priv *iwm, u16 seq_num)
{
- struct iwm_wifi_cmd *cmd, *next;
+ struct iwm_wifi_cmd *cmd;
- list_for_each_entry_safe(cmd, next, &iwm->wifi_pending_cmd, pending)
+ list_for_each_entry(cmd, &iwm->wifi_pending_cmd, pending)
if (cmd->seq_num == seq_num) {
list_del(&cmd->pending);
return cmd;
@@ -218,12 +219,12 @@ struct iwm_wifi_cmd *iwm_get_pending_wifi_cmd(struct iwm_priv *iwm, u16 seq_num)
return NULL;
}
-struct iwm_nonwifi_cmd *
-iwm_get_pending_nonwifi_cmd(struct iwm_priv *iwm, u8 seq_num, u8 cmd_opcode)
+struct iwm_nonwifi_cmd *iwm_get_pending_nonwifi_cmd(struct iwm_priv *iwm,
+ u8 seq_num, u8 cmd_opcode)
{
- struct iwm_nonwifi_cmd *cmd, *next;
+ struct iwm_nonwifi_cmd *cmd;
- list_for_each_entry_safe(cmd, next, &iwm->nonwifi_pending_cmd, pending)
+ list_for_each_entry(cmd, &iwm->nonwifi_pending_cmd, pending)
if ((cmd->seq_num == seq_num) &&
(cmd->udma_cmd.opcode == cmd_opcode) &&
(cmd->resp_received)) {
@@ -277,6 +278,7 @@ static int iwm_send_udma_nonwifi_cmd(struct iwm_priv *iwm,
udma_cmd->handle_by_hw, cmd->seq_num, udma_cmd->addr,
udma_cmd->op1_sz, udma_cmd->op2);
+ trace_iwm_tx_nonwifi_cmd(iwm, udma_hdr);
return iwm_bus_send_chunk(iwm, buf->start, buf->len);
}
@@ -363,6 +365,7 @@ static int iwm_send_udma_wifi_cmd(struct iwm_priv *iwm,
return ret;
}
+ trace_iwm_tx_wifi_cmd(iwm, umac_hdr);
return iwm_bus_send_chunk(iwm, buf->start, buf->len);
}
diff --git a/drivers/net/wireless/iwmc3200wifi/hal.h b/drivers/net/wireless/iwmc3200wifi/hal.h
index 0adfdc85765..c20936d9b6b 100644
--- a/drivers/net/wireless/iwmc3200wifi/hal.h
+++ b/drivers/net/wireless/iwmc3200wifi/hal.h
@@ -75,7 +75,8 @@ do { \
/* UDMA IN OP CODE -- cmd bits [3:0] */
-#define UDMA_IN_OPCODE_MASK 0xF
+#define UDMA_HDI_IN_NW_CMD_OPCODE_POS 0
+#define UDMA_HDI_IN_NW_CMD_OPCODE_SEED 0xF
#define UDMA_IN_OPCODE_GENERAL_RESP 0x0
#define UDMA_IN_OPCODE_READ_RESP 0x1
@@ -130,7 +131,7 @@ do { \
#define IWM_MAX_WIFI_CMD_BUFF_SIZE (IWM_SDIO_FW_MAX_CHUNK_SIZE - \
IWM_MAX_WIFI_HEADERS_SIZE)
-#define IWM_HAL_CONCATENATE_BUF_SIZE 8192
+#define IWM_HAL_CONCATENATE_BUF_SIZE (32 * 1024)
struct iwm_wifi_cmd_buff {
u16 len;
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 79ffa3b98d7..13266c3842f 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -48,6 +48,7 @@
#include "umac.h"
#include "lmac.h"
#include "eeprom.h"
+#include "trace.h"
#define IWM_COPYRIGHT "Copyright(c) 2009 Intel Corporation"
#define IWM_AUTHOR "<ilw@linux.intel.com>"
@@ -268,7 +269,9 @@ struct iwm_priv {
struct sk_buff_head rx_list;
struct list_head rx_tickets;
+ spinlock_t ticket_lock;
struct list_head rx_packets[IWM_RX_ID_HASH];
+ spinlock_t packet_lock[IWM_RX_ID_HASH];
struct workqueue_struct *rx_wq;
struct work_struct rx_worker;
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
index 23856d359e1..362002735b1 100644
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ b/drivers/net/wireless/iwmc3200wifi/main.c
@@ -277,8 +277,11 @@ int iwm_priv_init(struct iwm_priv *iwm)
skb_queue_head_init(&iwm->rx_list);
INIT_LIST_HEAD(&iwm->rx_tickets);
- for (i = 0; i < IWM_RX_ID_HASH; i++)
+ spin_lock_init(&iwm->ticket_lock);
+ for (i = 0; i < IWM_RX_ID_HASH; i++) {
INIT_LIST_HEAD(&iwm->rx_packets[i]);
+ spin_lock_init(&iwm->packet_lock[i]);
+ }
INIT_WORK(&iwm->rx_worker, iwm_rx_worker);
@@ -424,9 +427,9 @@ int iwm_notif_send(struct iwm_priv *iwm, struct iwm_wifi_cmd *cmd,
static struct iwm_notif *iwm_notif_find(struct iwm_priv *iwm, u32 cmd,
u8 source)
{
- struct iwm_notif *notif, *next;
+ struct iwm_notif *notif;
- list_for_each_entry_safe(notif, next, &iwm->pending_notif, pending) {
+ list_for_each_entry(notif, &iwm->pending_notif, pending) {
if ((notif->cmd_id == cmd) && (notif->src == source)) {
list_del(&notif->pending);
return notif;
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index 3257d4fad83..e1184deca55 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -343,15 +343,17 @@ static void iwm_rx_ticket_node_free(struct iwm_rx_ticket_node *ticket_node)
static struct iwm_rx_packet *iwm_rx_packet_get(struct iwm_priv *iwm, u16 id)
{
u8 id_hash = IWM_RX_ID_GET_HASH(id);
- struct list_head *packet_list;
- struct iwm_rx_packet *packet, *next;
-
- packet_list = &iwm->rx_packets[id_hash];
+ struct iwm_rx_packet *packet;
- list_for_each_entry_safe(packet, next, packet_list, node)
- if (packet->id == id)
+ spin_lock(&iwm->packet_lock[id_hash]);
+ list_for_each_entry(packet, &iwm->rx_packets[id_hash], node)
+ if (packet->id == id) {
+ list_del(&packet->node);
+ spin_unlock(&iwm->packet_lock[id_hash]);
return packet;
+ }
+ spin_unlock(&iwm->packet_lock[id_hash]);
return NULL;
}
@@ -389,18 +391,22 @@ void iwm_rx_free(struct iwm_priv *iwm)
struct iwm_rx_packet *packet, *np;
int i;
+ spin_lock(&iwm->ticket_lock);
list_for_each_entry_safe(ticket, nt, &iwm->rx_tickets, node) {
list_del(&ticket->node);
iwm_rx_ticket_node_free(ticket);
}
+ spin_unlock(&iwm->ticket_lock);
for (i = 0; i < IWM_RX_ID_HASH; i++) {
+ spin_lock(&iwm->packet_lock[i]);
list_for_each_entry_safe(packet, np, &iwm->rx_packets[i],
node) {
list_del(&packet->node);
kfree_skb(packet->skb);
kfree(packet);
}
+ spin_unlock(&iwm->packet_lock[i]);
}
}
@@ -425,10 +431,13 @@ static int iwm_ntf_rx_ticket(struct iwm_priv *iwm, u8 *buf,
return PTR_ERR(ticket_node);
IWM_DBG_RX(iwm, DBG, "TICKET %s(%d)\n",
- ticket->action == IWM_RX_TICKET_RELEASE ?
+ __le16_to_cpu(ticket->action) ==
+ IWM_RX_TICKET_RELEASE ?
"RELEASE" : "DROP",
ticket->id);
+ spin_lock(&iwm->ticket_lock);
list_add_tail(&ticket_node->node, &iwm->rx_tickets);
+ spin_unlock(&iwm->ticket_lock);
/*
* We received an Rx ticket, most likely there's
@@ -461,6 +470,7 @@ static int iwm_ntf_rx_packet(struct iwm_priv *iwm, u8 *buf,
struct iwm_rx_packet *packet;
u16 id, buf_offset;
u32 packet_size;
+ u8 id_hash;
IWM_DBG_RX(iwm, DBG, "\n");
@@ -478,7 +488,10 @@ static int iwm_ntf_rx_packet(struct iwm_priv *iwm, u8 *buf,
if (IS_ERR(packet))
return PTR_ERR(packet);
- list_add_tail(&packet->node, &iwm->rx_packets[IWM_RX_ID_GET_HASH(id)]);
+ id_hash = IWM_RX_ID_GET_HASH(id);
+ spin_lock(&iwm->packet_lock[id_hash]);
+ list_add_tail(&packet->node, &iwm->rx_packets[id_hash]);
+ spin_unlock(&iwm->packet_lock[id_hash]);
/* We might (unlikely) have received the packet _after_ the ticket */
queue_work(iwm->rx_wq, &iwm->rx_worker);
@@ -519,6 +532,8 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
unsigned long buf_size,
struct iwm_wifi_cmd *cmd)
{
+ struct wiphy *wiphy = iwm_to_wiphy(iwm);
+ struct ieee80211_channel *chan;
struct iwm_umac_notif_assoc_complete *complete =
(struct iwm_umac_notif_assoc_complete *)buf;
@@ -527,6 +542,18 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
switch (le32_to_cpu(complete->status)) {
case UMAC_ASSOC_COMPLETE_SUCCESS:
+ chan = ieee80211_get_channel(wiphy,
+ ieee80211_channel_to_frequency(complete->channel));
+ if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) {
+ /* Associated to a unallowed channel, disassociate. */
+ __iwm_invalidate_mlme_profile(iwm);
+ IWM_WARN(iwm, "Couldn't associate with %pM due to "
+ "channel %d is disabled. Check your local "
+ "regulatory setting.\n",
+ complete->bssid, complete->channel);
+ goto failure;
+ }
+
set_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
memcpy(iwm->bssid, complete->bssid, ETH_ALEN);
iwm->channel = complete->channel;
@@ -563,6 +590,7 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
GFP_KERNEL);
break;
case UMAC_ASSOC_COMPLETE_FAILURE:
+ failure:
clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
memset(iwm->bssid, 0, ETH_ALEN);
iwm->channel = 0;
@@ -757,7 +785,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
(struct iwm_umac_notif_bss_info *)buf;
struct ieee80211_channel *channel;
struct ieee80211_supported_band *band;
- struct iwm_bss_info *bss, *next;
+ struct iwm_bss_info *bss;
s32 signal;
int freq;
u16 frame_len = le16_to_cpu(umac_bss->frame_len);
@@ -776,7 +804,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
IWM_DBG_MLME(iwm, DBG, "\tRSSI: %d\n", umac_bss->rssi);
IWM_DBG_MLME(iwm, DBG, "\tFrame Length: %d\n", frame_len);
- list_for_each_entry_safe(bss, next, &iwm->bss_list, node)
+ list_for_each_entry(bss, &iwm->bss_list, node)
if (bss->bss->table_idx == umac_bss->table_idx)
break;
@@ -843,16 +871,15 @@ static int iwm_mlme_remove_bss(struct iwm_priv *iwm, u8 *buf,
int i;
for (i = 0; i < le32_to_cpu(bss_rm->count); i++) {
- table_idx = (le16_to_cpu(bss_rm->entries[i])
- & IWM_BSS_REMOVE_INDEX_MSK);
+ table_idx = le16_to_cpu(bss_rm->entries[i]) &
+ IWM_BSS_REMOVE_INDEX_MSK;
list_for_each_entry_safe(bss, next, &iwm->bss_list, node)
if (bss->bss->table_idx == cpu_to_le16(table_idx)) {
struct ieee80211_mgmt *mgmt;
mgmt = (struct ieee80211_mgmt *)
(bss->bss->frame_buf);
- IWM_DBG_MLME(iwm, ERR,
- "BSS removed: %pM\n",
+ IWM_DBG_MLME(iwm, ERR, "BSS removed: %pM\n",
mgmt->bssid);
list_del(&bss->node);
kfree(bss->bss);
@@ -1224,18 +1251,24 @@ static int iwm_rx_handle_wifi(struct iwm_priv *iwm, u8 *buf,
u8 source, cmd_id;
u16 seq_num;
u32 count;
- u8 resp;
wifi_hdr = (struct iwm_umac_wifi_in_hdr *)buf;
cmd_id = wifi_hdr->sw_hdr.cmd.cmd;
-
source = GET_VAL32(wifi_hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
if (source >= IWM_SRC_NUM) {
IWM_CRIT(iwm, "invalid source %d\n", source);
return -EINVAL;
}
- count = (GET_VAL32(wifi_hdr->sw_hdr.meta_data, UMAC_FW_CMD_BYTE_COUNT));
+ if (cmd_id == REPLY_RX_MPDU_CMD)
+ trace_iwm_rx_packet(iwm, buf, buf_size);
+ else if ((cmd_id == UMAC_NOTIFY_OPCODE_RX_TICKET) &&
+ (source == UMAC_HDI_IN_SOURCE_FW))
+ trace_iwm_rx_ticket(iwm, buf, buf_size);
+ else
+ trace_iwm_rx_wifi_cmd(iwm, wifi_hdr);
+
+ count = GET_VAL32(wifi_hdr->sw_hdr.meta_data, UMAC_FW_CMD_BYTE_COUNT);
count += sizeof(struct iwm_umac_wifi_in_hdr) -
sizeof(struct iwm_dev_cmd_hdr);
if (count > buf_size) {
@@ -1243,8 +1276,6 @@ static int iwm_rx_handle_wifi(struct iwm_priv *iwm, u8 *buf,
return -EINVAL;
}
- resp = GET_VAL32(wifi_hdr->sw_hdr.meta_data, UMAC_FW_CMD_STATUS);
-
seq_num = le16_to_cpu(wifi_hdr->sw_hdr.cmd.seq_num);
IWM_DBG_RX(iwm, DBG, "CMD:0x%x, source: 0x%x, seqnum: %d\n",
@@ -1317,8 +1348,9 @@ static int iwm_rx_handle_nonwifi(struct iwm_priv *iwm, u8 *buf,
{
u8 seq_num;
struct iwm_udma_in_hdr *hdr = (struct iwm_udma_in_hdr *)buf;
- struct iwm_nonwifi_cmd *cmd, *next;
+ struct iwm_nonwifi_cmd *cmd;
+ trace_iwm_rx_nonwifi_cmd(iwm, buf, buf_size);
seq_num = GET_VAL32(hdr->cmd, UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM);
/*
@@ -1329,7 +1361,7 @@ static int iwm_rx_handle_nonwifi(struct iwm_priv *iwm, u8 *buf,
* That means we only support synchronised non wifi command response
* schemes.
*/
- list_for_each_entry_safe(cmd, next, &iwm->nonwifi_pending_cmd, pending)
+ list_for_each_entry(cmd, &iwm->nonwifi_pending_cmd, pending)
if (cmd->seq_num == seq_num) {
cmd->resp_received = 1;
cmd->buf.len = buf_size;
@@ -1648,6 +1680,7 @@ void iwm_rx_worker(struct work_struct *work)
* We stop whenever a ticket is missing its packet, as we're
* supposed to send the packets in order.
*/
+ spin_lock(&iwm->ticket_lock);
list_for_each_entry_safe(ticket, next, &iwm->rx_tickets, node) {
struct iwm_rx_packet *packet =
iwm_rx_packet_get(iwm, le16_to_cpu(ticket->ticket->id));
@@ -1656,12 +1689,12 @@ void iwm_rx_worker(struct work_struct *work)
IWM_DBG_RX(iwm, DBG, "Skip rx_work: Wait for ticket %d "
"to be handled first\n",
le16_to_cpu(ticket->ticket->id));
- return;
+ break;
}
list_del(&ticket->node);
- list_del(&packet->node);
iwm_rx_process_packet(iwm, packet, ticket);
}
+ spin_unlock(&iwm->ticket_lock);
}
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c
index 1eafd6dec3f..edcb52330cf 100644
--- a/drivers/net/wireless/iwmc3200wifi/sdio.c
+++ b/drivers/net/wireless/iwmc3200wifi/sdio.c
@@ -366,21 +366,13 @@ static const struct file_operations iwm_debugfs_sdio_fops = {
.read = iwm_debugfs_sdio_read,
};
-static int if_sdio_debugfs_init(struct iwm_priv *iwm, struct dentry *parent_dir)
+static void if_sdio_debugfs_init(struct iwm_priv *iwm, struct dentry *parent_dir)
{
- int result;
struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
hw->cccr_dentry = debugfs_create_file("cccr", 0200,
parent_dir, iwm,
&iwm_debugfs_sdio_fops);
- result = PTR_ERR(hw->cccr_dentry);
- if (IS_ERR(hw->cccr_dentry) && (result != -ENODEV)) {
- IWM_ERR(iwm, "Couldn't create CCCR entry: %d\n", result);
- return result;
- }
-
- return 0;
}
static void if_sdio_debugfs_exit(struct iwm_priv *iwm)
@@ -440,11 +432,7 @@ static int iwm_sdio_probe(struct sdio_func *func,
hw = iwm_private(iwm);
hw->iwm = iwm;
- ret = iwm_debugfs_init(iwm);
- if (ret < 0) {
- IWM_ERR(iwm, "Debugfs registration failed\n");
- goto if_free;
- }
+ iwm_debugfs_init(iwm);
sdio_set_drvdata(func, hw);
@@ -473,7 +461,6 @@ static int iwm_sdio_probe(struct sdio_func *func,
destroy_workqueue(hw->isr_wq);
debugfs_exit:
iwm_debugfs_exit(iwm);
- if_free:
iwm_if_free(iwm);
return ret;
}
@@ -492,8 +479,6 @@ static void iwm_sdio_remove(struct sdio_func *func)
sdio_set_drvdata(func, NULL);
dev_info(dev, "IWM SDIO remove\n");
-
- return;
}
static const struct sdio_device_id iwm_sdio_ids[] = {
diff --git a/drivers/net/wireless/iwmc3200wifi/trace.c b/drivers/net/wireless/iwmc3200wifi/trace.c
new file mode 100644
index 00000000000..904d36f2231
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/trace.c
@@ -0,0 +1,3 @@
+#include "iwm.h"
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/net/wireless/iwmc3200wifi/trace.h b/drivers/net/wireless/iwmc3200wifi/trace.h
new file mode 100644
index 00000000000..abb4805fa8d
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/trace.h
@@ -0,0 +1,283 @@
+#if !defined(__IWM_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
+#define __IWM_TRACE_H__
+
+#include <linux/tracepoint.h>
+
+#if !defined(CONFIG_IWM_TRACING)
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwm
+
+#define IWM_ENTRY __array(char, ndev_name, 16)
+#define IWM_ASSIGN strlcpy(__entry->ndev_name, iwm_to_ndev(iwm)->name, 16)
+#define IWM_PR_FMT "%s"
+#define IWM_PR_ARG __entry->ndev_name
+
+TRACE_EVENT(iwm_tx_nonwifi_cmd,
+ TP_PROTO(struct iwm_priv *iwm, struct iwm_udma_out_nonwifi_hdr *hdr),
+
+ TP_ARGS(iwm, hdr),
+
+ TP_STRUCT__entry(
+ IWM_ENTRY
+ __field(u8, opcode)
+ __field(u8, resp)
+ __field(u8, eot)
+ __field(u8, hw)
+ __field(u16, seq)
+ __field(u32, addr)
+ __field(u32, op1)
+ __field(u32, op2)
+ ),
+
+ TP_fast_assign(
+ IWM_ASSIGN;
+ __entry->opcode = GET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_OPCODE);
+ __entry->resp = GET_VAL32(hdr->cmd, UDMA_HDI_OUT_NW_CMD_RESP);
+ __entry->eot = GET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_EOT);
+ __entry->hw = GET_VAL32(hdr->cmd, UDMA_HDI_OUT_NW_CMD_HANDLE_BY_HW);
+ __entry->seq = GET_VAL32(hdr->cmd, UDMA_HDI_OUT_CMD_NON_WIFI_HW_SEQ_NUM);
+ __entry->addr = le32_to_cpu(hdr->addr);
+ __entry->op1 = le32_to_cpu(hdr->op1_sz);
+ __entry->op2 = le32_to_cpu(hdr->op2);
+ ),
+
+ TP_printk(
+ IWM_PR_FMT " Tx TARGET CMD: opcode 0x%x, resp %d, eot %d, "
+ "hw %d, seq 0x%x, addr 0x%x, op1 0x%x, op2 0x%x",
+ IWM_PR_ARG, __entry->opcode, __entry->resp, __entry->eot,
+ __entry->hw, __entry->seq, __entry->addr, __entry->op1,
+ __entry->op2
+ )
+);
+
+TRACE_EVENT(iwm_tx_wifi_cmd,
+ TP_PROTO(struct iwm_priv *iwm, struct iwm_umac_wifi_out_hdr *hdr),
+
+ TP_ARGS(iwm, hdr),
+
+ TP_STRUCT__entry(
+ IWM_ENTRY
+ __field(u8, opcode)
+ __field(u8, lmac)
+ __field(u8, resp)
+ __field(u8, eot)
+ __field(u8, ra_tid)
+ __field(u8, credit_group)
+ __field(u8, color)
+ __field(u16, seq)
+ ),
+
+ TP_fast_assign(
+ IWM_ASSIGN;
+ __entry->opcode = hdr->sw_hdr.cmd.cmd;
+ __entry->lmac = 0;
+ __entry->seq = __le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
+ __entry->resp = GET_VAL8(hdr->sw_hdr.cmd.flags, UMAC_DEV_CMD_FLAGS_RESP_REQ);
+ __entry->color = GET_VAL32(hdr->sw_hdr.meta_data, UMAC_FW_CMD_TX_STA_COLOR);
+ __entry->eot = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_OUT_CMD_EOT);
+ __entry->ra_tid = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_RATID);
+ __entry->credit_group = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_CREDIT_GRP);
+ if (__entry->opcode == UMAC_CMD_OPCODE_WIFI_PASS_THROUGH ||
+ __entry->opcode == UMAC_CMD_OPCODE_WIFI_IF_WRAPPER) {
+ __entry->lmac = 1;
+ __entry->opcode = ((struct iwm_lmac_hdr *)(hdr + 1))->id;
+ }
+ ),
+
+ TP_printk(
+ IWM_PR_FMT " Tx %cMAC CMD: opcode 0x%x, resp %d, eot %d, "
+ "seq 0x%x, sta_color 0x%x, ra_tid 0x%x, credit_group 0x%x",
+ IWM_PR_ARG, __entry->lmac ? 'L' : 'U', __entry->opcode,
+ __entry->resp, __entry->eot, __entry->seq, __entry->color,
+ __entry->ra_tid, __entry->credit_group
+ )
+);
+
+TRACE_EVENT(iwm_tx_packets,
+ TP_PROTO(struct iwm_priv *iwm, u8 *buf, int len),
+
+ TP_ARGS(iwm, buf, len),
+
+ TP_STRUCT__entry(
+ IWM_ENTRY
+ __field(u8, eot)
+ __field(u8, ra_tid)
+ __field(u8, credit_group)
+ __field(u8, color)
+ __field(u16, seq)
+ __field(u8, npkt)
+ __field(u32, bytes)
+ ),
+
+ TP_fast_assign(
+ struct iwm_umac_wifi_out_hdr *hdr =
+ (struct iwm_umac_wifi_out_hdr *)buf;
+
+ IWM_ASSIGN;
+ __entry->eot = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_OUT_CMD_EOT);
+ __entry->ra_tid = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_RATID);
+ __entry->credit_group = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_CREDIT_GRP);
+ __entry->color = GET_VAL32(hdr->sw_hdr.meta_data, UMAC_FW_CMD_TX_STA_COLOR);
+ __entry->seq = __le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
+ __entry->npkt = 1;
+ __entry->bytes = len;
+
+ if (!__entry->eot) {
+ int count;
+ u8 *ptr = buf;
+
+ __entry->npkt = 0;
+ while (ptr < buf + len) {
+ count = GET_VAL32(hdr->sw_hdr.meta_data,
+ UMAC_FW_CMD_BYTE_COUNT);
+ ptr += ALIGN(sizeof(*hdr) + count, 16);
+ hdr = (struct iwm_umac_wifi_out_hdr *)ptr;
+ __entry->npkt++;
+ }
+ }
+ ),
+
+ TP_printk(
+ IWM_PR_FMT " Tx %spacket: eot %d, seq 0x%x, sta_color 0x%x, "
+ "ra_tid 0x%x, credit_group 0x%x, embeded_packets %d, %d bytes",
+ IWM_PR_ARG, !__entry->eot ? "concatenated " : "",
+ __entry->eot, __entry->seq, __entry->color, __entry->ra_tid,
+ __entry->credit_group, __entry->npkt, __entry->bytes
+ )
+);
+
+TRACE_EVENT(iwm_rx_nonwifi_cmd,
+ TP_PROTO(struct iwm_priv *iwm, void *buf, int len),
+
+ TP_ARGS(iwm, buf, len),
+
+ TP_STRUCT__entry(
+ IWM_ENTRY
+ __field(u8, opcode)
+ __field(u16, seq)
+ __field(u32, len)
+ ),
+
+ TP_fast_assign(
+ struct iwm_udma_in_hdr *hdr = buf;
+
+ IWM_ASSIGN;
+ __entry->opcode = GET_VAL32(hdr->cmd, UDMA_HDI_IN_NW_CMD_OPCODE);
+ __entry->seq = GET_VAL32(hdr->cmd, UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM);
+ __entry->len = len;
+ ),
+
+ TP_printk(
+ IWM_PR_FMT " Rx TARGET RESP: opcode 0x%x, seq 0x%x, len 0x%x",
+ IWM_PR_ARG, __entry->opcode, __entry->seq, __entry->len
+ )
+);
+
+TRACE_EVENT(iwm_rx_wifi_cmd,
+ TP_PROTO(struct iwm_priv *iwm, struct iwm_umac_wifi_in_hdr *hdr),
+
+ TP_ARGS(iwm, hdr),
+
+ TP_STRUCT__entry(
+ IWM_ENTRY
+ __field(u8, cmd)
+ __field(u8, source)
+ __field(u16, seq)
+ __field(u32, count)
+ ),
+
+ TP_fast_assign(
+ IWM_ASSIGN;
+ __entry->cmd = hdr->sw_hdr.cmd.cmd;
+ __entry->source = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
+ __entry->count = GET_VAL32(hdr->sw_hdr.meta_data, UMAC_FW_CMD_BYTE_COUNT);
+ __entry->seq = le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
+ ),
+
+ TP_printk(
+ IWM_PR_FMT " Rx %s RESP: cmd 0x%x, seq 0x%x, count 0x%x",
+ IWM_PR_ARG, __entry->source == UMAC_HDI_IN_SOURCE_FHRX ? "LMAC" :
+ __entry->source == UMAC_HDI_IN_SOURCE_FW ? "UMAC" : "UDMA",
+ __entry->cmd, __entry->seq, __entry->count
+ )
+);
+
+#define iwm_ticket_action_symbol \
+ { IWM_RX_TICKET_DROP, "DROP" }, \
+ { IWM_RX_TICKET_RELEASE, "RELEASE" }, \
+ { IWM_RX_TICKET_SNIFFER, "SNIFFER" }, \
+ { IWM_RX_TICKET_ENQUEUE, "ENQUEUE" }
+
+TRACE_EVENT(iwm_rx_ticket,
+ TP_PROTO(struct iwm_priv *iwm, void *buf, int len),
+
+ TP_ARGS(iwm, buf, len),
+
+ TP_STRUCT__entry(
+ IWM_ENTRY
+ __field(u8, action)
+ __field(u8, reason)
+ __field(u16, id)
+ __field(u16, flags)
+ ),
+
+ TP_fast_assign(
+ struct iwm_rx_ticket *ticket =
+ ((struct iwm_umac_notif_rx_ticket *)buf)->tickets;
+
+ IWM_ASSIGN;
+ __entry->id = le16_to_cpu(ticket->id);
+ __entry->action = le16_to_cpu(ticket->action);
+ __entry->flags = le16_to_cpu(ticket->flags);
+ __entry->reason = (__entry->flags & IWM_RX_TICKET_DROP_REASON_MSK) >> IWM_RX_TICKET_DROP_REASON_POS;
+ ),
+
+ TP_printk(
+ IWM_PR_FMT " Rx ticket: id 0x%x, action %s, %s 0x%x%s",
+ IWM_PR_ARG, __entry->id,
+ __print_symbolic(__entry->action, iwm_ticket_action_symbol),
+ __entry->reason ? "reason" : "flags",
+ __entry->reason ? __entry->reason : __entry->flags,
+ __entry->flags & IWM_RX_TICKET_AMSDU_MSK ? ", AMSDU frame" : ""
+ )
+);
+
+TRACE_EVENT(iwm_rx_packet,
+ TP_PROTO(struct iwm_priv *iwm, void *buf, int len),
+
+ TP_ARGS(iwm, buf, len),
+
+ TP_STRUCT__entry(
+ IWM_ENTRY
+ __field(u8, source)
+ __field(u16, id)
+ __field(u32, len)
+ ),
+
+ TP_fast_assign(
+ struct iwm_umac_wifi_in_hdr *hdr = buf;
+
+ IWM_ASSIGN;
+ __entry->source = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
+ __entry->id = le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
+ __entry->len = len - sizeof(*hdr);
+ ),
+
+ TP_printk(
+ IWM_PR_FMT " Rx %s packet: id 0x%x, %d bytes",
+ IWM_PR_ARG, __entry->source == UMAC_HDI_IN_SOURCE_FHRX ?
+ "LMAC" : "UMAC", __entry->id, __entry->len
+ )
+);
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwmc3200wifi/tx.c b/drivers/net/wireless/iwmc3200wifi/tx.c
index f6a02f123f3..3216621fc55 100644
--- a/drivers/net/wireless/iwmc3200wifi/tx.c
+++ b/drivers/net/wireless/iwmc3200wifi/tx.c
@@ -302,8 +302,8 @@ void iwm_tx_credit_init_pools(struct iwm_priv *iwm,
#define IWM_UDMA_HDR_LEN sizeof(struct iwm_umac_wifi_out_hdr)
-static int iwm_tx_build_packet(struct iwm_priv *iwm, struct sk_buff *skb,
- int pool_id, u8 *buf)
+static __le16 iwm_tx_build_packet(struct iwm_priv *iwm, struct sk_buff *skb,
+ int pool_id, u8 *buf)
{
struct iwm_umac_wifi_out_hdr *hdr = (struct iwm_umac_wifi_out_hdr *)buf;
struct iwm_udma_wifi_cmd udma_cmd;
@@ -347,6 +347,7 @@ static int iwm_tx_send_concat_packets(struct iwm_priv *iwm,
/* mark EOP for the last packet */
iwm_udma_wifi_hdr_set_eop(iwm, txq->concat_ptr, 1);
+ trace_iwm_tx_packets(iwm, txq->concat_buf, txq->concat_count);
ret = iwm_bus_send_chunk(iwm, txq->concat_buf, txq->concat_count);
txq->concat_count = 0;
@@ -451,7 +452,6 @@ void iwm_tx_worker(struct work_struct *work)
int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct iwm_priv *iwm = ndev_to_iwm(netdev);
- struct net_device *ndev = iwm_to_ndev(iwm);
struct wireless_dev *wdev = iwm_to_wdev(iwm);
struct iwm_tx_info *tx_info;
struct iwm_tx_queue *txq;
@@ -518,12 +518,12 @@ int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker);
- ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
return NETDEV_TX_OK;
drop:
- ndev->stats.tx_dropped++;
+ netdev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/wireless/iwmc3200wifi/umac.h b/drivers/net/wireless/iwmc3200wifi/umac.h
index 7f54a145ca6..0cbba3ecc81 100644
--- a/drivers/net/wireless/iwmc3200wifi/umac.h
+++ b/drivers/net/wireless/iwmc3200wifi/umac.h
@@ -362,7 +362,7 @@ struct iwm_udma_out_wifi_hdr {
#define IWM_RX_TICKET_SPECIAL_SNAP_MSK 0x4
#define IWM_RX_TICKET_AMSDU_MSK 0x8
#define IWM_RX_TICKET_DROP_REASON_POS 4
-#define IWM_RX_TICKET_DROP_REASON_MSK (0x1F << RX_TICKET_FLAGS_DROP_REASON_POS)
+#define IWM_RX_TICKET_DROP_REASON_MSK (0x1F << IWM_RX_TICKET_DROP_REASON_POS)
#define IWM_RX_DROP_NO_DROP 0x0
#define IWM_RX_DROP_BAD_CRC 0x1
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index 12a2ef9dace..aa06070e5ea 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -32,6 +32,9 @@ u8 lbs_bg_rates[MAX_RATES] =
0x00, 0x00 };
+static int assoc_helper_wep_keys(struct lbs_private *priv,
+ struct assoc_request *assoc_req);
+
/**
* @brief This function finds common rates between rates and card rates.
*
@@ -611,7 +614,7 @@ static int lbs_assoc_post(struct lbs_private *priv,
if (status_code) {
lbs_mac_event_disconnected(priv);
- ret = -1;
+ ret = status_code;
goto done;
}
@@ -814,7 +817,24 @@ static int lbs_try_associate(struct lbs_private *priv,
goto out;
ret = lbs_associate(priv, assoc_req, CMD_802_11_ASSOCIATE);
+ /* If the association fails with current auth mode, let's
+ * try by changing the auth mode
+ */
+ if ((priv->authtype_auto) &&
+ (ret == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) &&
+ (assoc_req->secinfo.wep_enabled) &&
+ (priv->connect_status != LBS_CONNECTED)) {
+ if (priv->secinfo.auth_mode == IW_AUTH_ALG_OPEN_SYSTEM)
+ priv->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY;
+ else
+ priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
+ if (!assoc_helper_wep_keys(priv, assoc_req))
+ ret = lbs_associate(priv, assoc_req,
+ CMD_802_11_ASSOCIATE);
+ }
+ if (ret)
+ ret = -1;
out:
lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
return ret;
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index ce7bec402a3..9d5d3ccf08c 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -79,6 +79,7 @@ static const u32 cipher_suites[] = {
static int lbs_cfg_set_channel(struct wiphy *wiphy,
+ struct net_device *netdev,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type)
{
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index a48ccaffb28..de2caac11dd 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -75,7 +75,7 @@ static ssize_t lbs_getscantable(struct file *file, char __user *userbuf,
return -ENOMEM;
pos += snprintf(buf+pos, len-pos,
- "# | ch | rssi | bssid | cap | Qual | SSID \n");
+ "# | ch | rssi | bssid | cap | Qual | SSID\n");
mutex_lock(&priv->lock);
list_for_each_entry (iter_bss, &priv->network_list, list) {
@@ -757,15 +757,12 @@ void lbs_debugfs_init(void)
{
if (!lbs_dir)
lbs_dir = debugfs_create_dir("lbs_wireless", NULL);
-
- return;
}
void lbs_debugfs_remove(void)
{
if (lbs_dir)
debugfs_remove(lbs_dir);
- return;
}
void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev)
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 6875e1498bd..a54880e4ad2 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -134,6 +134,7 @@ struct lbs_private {
u8 wpa_ie_len;
u16 wep_tx_keyidx;
struct enc_key wep_keys[4];
+ u8 authtype_auto;
/* Wake On LAN */
uint32_t wol_criteria;
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index 6d55439a7b9..08e4e390800 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -777,7 +777,7 @@ static void if_cs_release(struct pcmcia_device *p_dev)
lbs_deb_enter(LBS_DEB_CS);
- free_irq(p_dev->irq.AssignedIRQ, card);
+ free_irq(p_dev->irq, card);
pcmcia_disable_device(p_dev);
if (card->iobase)
ioport_unmap(card->iobase);
@@ -807,8 +807,7 @@ static int if_cs_ioprobe(struct pcmcia_device *p_dev,
p_dev->io.NumPorts1 = cfg->io.win[0].len;
/* Do we need to allocate an interrupt? */
- if (cfg->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
+ p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
if (cfg->io.nwin != 1) {
@@ -837,9 +836,6 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
card->p_dev = p_dev;
p_dev->priv = card;
- p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- p_dev->irq.Handler = NULL;
-
p_dev->conf.Attributes = 0;
p_dev->conf.IntType = INT_MEMORY_AND_IO;
@@ -854,13 +850,8 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
* a handler to the interrupt, unless the 'Handler' member of
* the irq structure is initialized.
*/
- if (p_dev->conf.Attributes & CONF_ENABLE_IRQ) {
- ret = pcmcia_request_irq(p_dev, &p_dev->irq);
- if (ret) {
- lbs_pr_err("error in pcmcia_request_irq\n");
- goto out1;
- }
- }
+ if (!p_dev->irq)
+ goto out1;
/* Initialize io access */
card->iobase = ioport_map(p_dev->io.BasePort1, p_dev->io.NumPorts1);
@@ -883,7 +874,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
/* Finally, report what we've done */
lbs_deb_cs("irq %d, io 0x%04x-0x%04x\n",
- p_dev->irq.AssignedIRQ, p_dev->io.BasePort1,
+ p_dev->irq, p_dev->io.BasePort1,
p_dev->io.BasePort1 + p_dev->io.NumPorts1 - 1);
/*
@@ -940,7 +931,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
priv->fw_ready = 1;
/* Now actually get the IRQ */
- ret = request_irq(p_dev->irq.AssignedIRQ, if_cs_interrupt,
+ ret = request_irq(p_dev->irq, if_cs_interrupt,
IRQF_SHARED, DRV_NAME, card);
if (ret) {
lbs_pr_err("error in request_irq\n");
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 7d1a3c6b6ce..64dd345d30f 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -35,6 +35,8 @@
#include <linux/mmc/card.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/host.h>
#include "host.h"
#include "decl.h"
@@ -313,12 +315,30 @@ out:
return ret;
}
+static int if_sdio_wait_status(struct if_sdio_card *card, const u8 condition)
+{
+ u8 status;
+ unsigned long timeout;
+ int ret = 0;
+
+ timeout = jiffies + HZ;
+ while (1) {
+ status = sdio_readb(card->func, IF_SDIO_STATUS, &ret);
+ if (ret)
+ return ret;
+ if ((status & condition) == condition)
+ break;
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ mdelay(1);
+ }
+ return ret;
+}
+
static int if_sdio_card_to_host(struct if_sdio_card *card)
{
int ret;
- u8 status;
u16 size, type, chunk;
- unsigned long timeout;
lbs_deb_enter(LBS_DEB_SDIO);
@@ -333,19 +353,9 @@ static int if_sdio_card_to_host(struct if_sdio_card *card)
goto out;
}
- timeout = jiffies + HZ;
- while (1) {
- status = sdio_readb(card->func, IF_SDIO_STATUS, &ret);
- if (ret)
- goto out;
- if (status & IF_SDIO_IO_RDY)
- break;
- if (time_after(jiffies, timeout)) {
- ret = -ETIMEDOUT;
- goto out;
- }
- mdelay(1);
- }
+ ret = if_sdio_wait_status(card, IF_SDIO_IO_RDY);
+ if (ret)
+ goto out;
/*
* The transfer must be in one transaction or the firmware
@@ -412,8 +422,6 @@ static void if_sdio_host_to_card_worker(struct work_struct *work)
{
struct if_sdio_card *card;
struct if_sdio_packet *packet;
- unsigned long timeout;
- u8 status;
int ret;
unsigned long flags;
@@ -433,25 +441,15 @@ static void if_sdio_host_to_card_worker(struct work_struct *work)
sdio_claim_host(card->func);
- timeout = jiffies + HZ;
- while (1) {
- status = sdio_readb(card->func, IF_SDIO_STATUS, &ret);
- if (ret)
- goto release;
- if (status & IF_SDIO_IO_RDY)
- break;
- if (time_after(jiffies, timeout)) {
- ret = -ETIMEDOUT;
- goto release;
- }
- mdelay(1);
+ ret = if_sdio_wait_status(card, IF_SDIO_IO_RDY);
+ if (ret == 0) {
+ ret = sdio_writesb(card->func, card->ioport,
+ packet->buffer, packet->nb);
}
- ret = sdio_writesb(card->func, card->ioport,
- packet->buffer, packet->nb);
if (ret)
- goto release;
-release:
+ lbs_pr_err("error %d sending packet to firmware\n", ret);
+
sdio_release_host(card->func);
kfree(packet);
@@ -464,10 +462,11 @@ release:
/* Firmware */
/********************************************************************/
+#define FW_DL_READY_STATUS (IF_SDIO_IO_RDY | IF_SDIO_DL_RDY)
+
static int if_sdio_prog_helper(struct if_sdio_card *card)
{
int ret;
- u8 status;
const struct firmware *fw;
unsigned long timeout;
u8 *chunk_buffer;
@@ -499,20 +498,14 @@ static int if_sdio_prog_helper(struct if_sdio_card *card)
size = fw->size;
while (size) {
- timeout = jiffies + HZ;
- while (1) {
- status = sdio_readb(card->func, IF_SDIO_STATUS, &ret);
- if (ret)
- goto release;
- if ((status & IF_SDIO_IO_RDY) &&
- (status & IF_SDIO_DL_RDY))
- break;
- if (time_after(jiffies, timeout)) {
- ret = -ETIMEDOUT;
- goto release;
- }
- mdelay(1);
- }
+ ret = if_sdio_wait_status(card, FW_DL_READY_STATUS);
+ if (ret)
+ goto release;
+
+ /* On some platforms (like Davinci) the chip needs more time
+ * between helper blocks.
+ */
+ mdelay(2);
chunk_size = min(size, (size_t)60);
@@ -582,7 +575,6 @@ out:
static int if_sdio_prog_real(struct if_sdio_card *card)
{
int ret;
- u8 status;
const struct firmware *fw;
unsigned long timeout;
u8 *chunk_buffer;
@@ -614,20 +606,9 @@ static int if_sdio_prog_real(struct if_sdio_card *card)
size = fw->size;
while (size) {
- timeout = jiffies + HZ;
- while (1) {
- status = sdio_readb(card->func, IF_SDIO_STATUS, &ret);
- if (ret)
- goto release;
- if ((status & IF_SDIO_IO_RDY) &&
- (status & IF_SDIO_DL_RDY))
- break;
- if (time_after(jiffies, timeout)) {
- ret = -ETIMEDOUT;
- goto release;
- }
- mdelay(1);
- }
+ ret = if_sdio_wait_status(card, FW_DL_READY_STATUS);
+ if (ret)
+ goto release;
req_size = sdio_readb(card->func, IF_SDIO_RD_BASE, &ret);
if (ret)
@@ -943,6 +924,7 @@ static int if_sdio_probe(struct sdio_func *func,
int ret, i;
unsigned int model;
struct if_sdio_packet *packet;
+ struct mmc_host *host = func->card->host;
lbs_deb_enter(LBS_DEB_SDIO);
@@ -1023,6 +1005,25 @@ static int if_sdio_probe(struct sdio_func *func,
if (ret)
goto disable;
+ /* For 1-bit transfers to the 8686 model, we need to enable the
+ * interrupt flag in the CCCR register. Set the MMC_QUIRK_LENIENT_FN0
+ * bit to allow access to non-vendor registers. */
+ if ((card->model == IF_SDIO_MODEL_8686) &&
+ (host->caps & MMC_CAP_SDIO_IRQ) &&
+ (host->ios.bus_width == MMC_BUS_WIDTH_1)) {
+ u8 reg;
+
+ func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
+ reg = sdio_f0_readb(func, SDIO_CCCR_IF, &ret);
+ if (ret)
+ goto release_int;
+
+ reg |= SDIO_BUS_ECSI;
+ sdio_f0_writeb(func, reg, SDIO_CCCR_IF, &ret);
+ if (ret)
+ goto release_int;
+ }
+
card->ioport = sdio_readb(func, IF_SDIO_IOPORT, &ret);
if (ret)
goto release_int;
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index fcea5741ba6..f41594c7ac1 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -133,8 +133,6 @@ static void if_usb_write_bulk_callback(struct urb *urb)
/* print the failure status number for debug */
lbs_pr_info("URB in failure status: %d\n", urb->status);
}
-
- return;
}
/**
@@ -651,8 +649,6 @@ static void if_usb_receive_fwload(struct urb *urb)
if_usb_submit_rx_urb_fwload(cardp);
kfree(syncfwheader);
-
- return;
}
#define MRVDRV_MIN_PKT_LEN 30
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 598080414b1..d9b8ee130c4 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -229,7 +229,7 @@ static void lbs_tx_timeout(struct net_device *dev)
lbs_pr_err("tx watch dog timeout\n");
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
if (priv->currenttxskb)
lbs_send_tx_feedback(priv, 0);
@@ -319,7 +319,7 @@ static int lbs_add_mcast_addrs(struct cmd_ds_mac_multicast_adr *cmd,
struct net_device *dev, int nr_addrs)
{
int i = nr_addrs;
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
int cnt;
if ((dev->flags & (IFF_UP|IFF_MULTICAST)) != (IFF_UP|IFF_MULTICAST))
@@ -327,19 +327,19 @@ static int lbs_add_mcast_addrs(struct cmd_ds_mac_multicast_adr *cmd,
netif_addr_lock_bh(dev);
cnt = netdev_mc_count(dev);
- netdev_for_each_mc_addr(mc_list, dev) {
- if (mac_in_list(cmd->maclist, nr_addrs, mc_list->dmi_addr)) {
+ netdev_for_each_mc_addr(ha, dev) {
+ if (mac_in_list(cmd->maclist, nr_addrs, ha->addr)) {
lbs_deb_net("mcast address %s:%pM skipped\n", dev->name,
- mc_list->dmi_addr);
+ ha->addr);
cnt--;
continue;
}
if (i == MRVDRV_MAX_MULTICAST_LIST_SIZE)
break;
- memcpy(&cmd->maclist[6*i], mc_list->dmi_addr, ETH_ALEN);
+ memcpy(&cmd->maclist[6*i], ha->addr, ETH_ALEN);
lbs_deb_net("mcast address %s:%pM added to filter\n", dev->name,
- mc_list->dmi_addr);
+ ha->addr);
i++;
cnt--;
}
@@ -836,6 +836,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
priv->is_auto_deep_sleep_enabled = 0;
priv->wakeup_dev_required = 0;
init_waitqueue_head(&priv->ds_awake_q);
+ priv->authtype_auto = 1;
mutex_init(&priv->lock);
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 784dae71470..a115bfa9513 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -39,10 +39,10 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
struct sk_buff *skb);
/**
- * @brief This function computes the avgSNR .
+ * @brief This function computes the avgSNR .
*
- * @param priv A pointer to struct lbs_private structure
- * @return avgSNR
+ * @param priv A pointer to struct lbs_private structure
+ * @return avgSNR
*/
static u8 lbs_getavgsnr(struct lbs_private *priv)
{
@@ -57,10 +57,10 @@ static u8 lbs_getavgsnr(struct lbs_private *priv)
}
/**
- * @brief This function computes the AvgNF
+ * @brief This function computes the AvgNF
*
- * @param priv A pointer to struct lbs_private structure
- * @return AvgNF
+ * @param priv A pointer to struct lbs_private structure
+ * @return AvgNF
*/
static u8 lbs_getavgnf(struct lbs_private *priv)
{
@@ -75,11 +75,11 @@ static u8 lbs_getavgnf(struct lbs_private *priv)
}
/**
- * @brief This function save the raw SNR/NF to our internel buffer
+ * @brief This function save the raw SNR/NF to our internel buffer
*
- * @param priv A pointer to struct lbs_private structure
- * @param prxpd A pointer to rxpd structure of received packet
- * @return n/a
+ * @param priv A pointer to struct lbs_private structure
+ * @param prxpd A pointer to rxpd structure of received packet
+ * @return n/a
*/
static void lbs_save_rawSNRNF(struct lbs_private *priv, struct rxpd *p_rx_pd)
{
@@ -90,15 +90,14 @@ static void lbs_save_rawSNRNF(struct lbs_private *priv, struct rxpd *p_rx_pd)
priv->nextSNRNF++;
if (priv->nextSNRNF >= DEFAULT_DATA_AVG_FACTOR)
priv->nextSNRNF = 0;
- return;
}
/**
- * @brief This function computes the RSSI in received packet.
+ * @brief This function computes the RSSI in received packet.
*
- * @param priv A pointer to struct lbs_private structure
- * @param prxpd A pointer to rxpd structure of received packet
- * @return n/a
+ * @param priv A pointer to struct lbs_private structure
+ * @param prxpd A pointer to rxpd structure of received packet
+ * @return n/a
*/
static void lbs_compute_rssi(struct lbs_private *priv, struct rxpd *p_rx_pd)
{
@@ -135,9 +134,9 @@ static void lbs_compute_rssi(struct lbs_private *priv, struct rxpd *p_rx_pd)
* @brief This function processes received packet and forwards it
* to kernel/upper layer
*
- * @param priv A pointer to struct lbs_private
- * @param skb A pointer to skb which includes the received packet
- * @return 0 or -1
+ * @param priv A pointer to struct lbs_private
+ * @param skb A pointer to skb which includes the received packet
+ * @return 0 or -1
*/
int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
{
@@ -197,7 +196,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
* before the snap_type.
*/
p_ethhdr = (struct ethhdr *)
- ((u8 *) & p_rx_pkt->eth803_hdr
+ ((u8 *) &p_rx_pkt->eth803_hdr
+ sizeof(p_rx_pkt->eth803_hdr) + sizeof(p_rx_pkt->rfc1042_hdr)
- sizeof(p_rx_pkt->eth803_hdr.dest_addr)
- sizeof(p_rx_pkt->eth803_hdr.src_addr)
@@ -214,7 +213,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
hdrchop = (u8 *)p_ethhdr - (u8 *)p_rx_pd;
} else {
lbs_deb_hex(LBS_DEB_RX, "RX Data: LLC/SNAP",
- (u8 *) & p_rx_pkt->rfc1042_hdr,
+ (u8 *) &p_rx_pkt->rfc1042_hdr,
sizeof(p_rx_pkt->rfc1042_hdr));
/* Chop off the rxpd */
@@ -255,8 +254,8 @@ EXPORT_SYMBOL_GPL(lbs_process_rxed_packet);
* @brief This function converts Tx/Rx rates from the Marvell WLAN format
* (see Table 2 in Section 3.1) to IEEE80211_RADIOTAP_RATE units (500 Kb/s)
*
- * @param rate Input rate
- * @return Output Rate (0 if invalid)
+ * @param rate Input rate
+ * @return Output Rate (0 if invalid)
*/
static u8 convert_mv_rate_to_radiotap(u8 rate)
{
@@ -295,9 +294,9 @@ static u8 convert_mv_rate_to_radiotap(u8 rate)
* @brief This function processes a received 802.11 packet and forwards it
* to kernel/upper layer
*
- * @param priv A pointer to struct lbs_private
- * @param skb A pointer to skb which includes the received packet
- * @return 0 or -1
+ * @param priv A pointer to struct lbs_private
+ * @param skb A pointer to skb which includes the received packet
+ * @return 0 or -1
*/
static int process_rxed_802_11_packet(struct lbs_private *priv,
struct sk_buff *skb)
@@ -314,7 +313,7 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
p_rx_pkt = (struct rx80211packethdr *) skb->data;
prxpd = &p_rx_pkt->rx_pd;
- // lbs_deb_hex(LBS_DEB_RX, "RX Data: Before chop rxpd", skb->data, min(skb->len, 100));
+ /* lbs_deb_hex(LBS_DEB_RX, "RX Data: Before chop rxpd", skb->data, min(skb->len, 100)); */
if (skb->len < (ETH_HLEN + 8 + sizeof(struct rxpd))) {
lbs_deb_rx("rx err: frame received with bad length\n");
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index 52d244ea3d9..a9bf658659e 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -147,8 +147,6 @@ netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
-
if (priv->monitormode) {
/* Keep the skb to echo it back once Tx feedback is
received from FW */
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index 9b555884b08..f96a96031a5 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -1441,8 +1441,10 @@ static int lbs_set_encode(struct net_device *dev,
set_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags);
if (dwrq->flags & IW_ENCODE_RESTRICTED) {
+ priv->authtype_auto = 0;
assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY;
} else if (dwrq->flags & IW_ENCODE_OPEN) {
+ priv->authtype_auto = 0;
assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
}
@@ -1621,8 +1623,10 @@ static int lbs_set_encodeext(struct net_device *dev,
goto out;
if (dwrq->flags & IW_ENCODE_RESTRICTED) {
+ priv->authtype_auto = 0;
assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY;
} else if (dwrq->flags & IW_ENCODE_OPEN) {
+ priv->authtype_auto = 0;
assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
}
diff --git a/drivers/net/wireless/libertas_tf/cmd.c b/drivers/net/wireless/libertas_tf/cmd.c
index b620daf59ef..8945afd6ce3 100644
--- a/drivers/net/wireless/libertas_tf/cmd.c
+++ b/drivers/net/wireless/libertas_tf/cmd.c
@@ -7,6 +7,8 @@
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/slab.h>
#include "libertas_tf.h"
@@ -82,6 +84,8 @@ int lbtf_update_hw_spec(struct lbtf_private *priv)
int ret = -1;
u32 i;
+ lbtf_deb_enter(LBTF_DEB_CMD);
+
memset(&cmd, 0, sizeof(cmd));
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
memcpy(cmd.permanentaddr, priv->current_addr, ETH_ALEN);
@@ -104,6 +108,8 @@ int lbtf_update_hw_spec(struct lbtf_private *priv)
priv->fwrelease >> 8 & 0xff,
priv->fwrelease & 0xff,
priv->fwcapinfo);
+ lbtf_deb_cmd("GET_HW_SPEC: hardware interface 0x%x, hardware spec 0x%04x\n",
+ cmd.hwifversion, cmd.version);
/* Clamp region code to 8-bit since FW spec indicates that it should
* only ever be 8-bit, even though the field size is 16-bit. Some
@@ -118,8 +124,10 @@ int lbtf_update_hw_spec(struct lbtf_private *priv)
}
/* if it's unidentified region code, use the default (USA) */
- if (i >= MRVDRV_MAX_REGION_CODE)
+ if (i >= MRVDRV_MAX_REGION_CODE) {
priv->regioncode = 0x10;
+ pr_info("unidentified region code; using the default (USA)\n");
+ }
if (priv->current_addr[0] == 0xff)
memmove(priv->current_addr, cmd.permanentaddr, ETH_ALEN);
@@ -128,6 +136,7 @@ int lbtf_update_hw_spec(struct lbtf_private *priv)
lbtf_geo_init(priv);
out:
+ lbtf_deb_leave(LBTF_DEB_CMD);
return ret;
}
@@ -141,13 +150,18 @@ out:
*/
int lbtf_set_channel(struct lbtf_private *priv, u8 channel)
{
+ int ret = 0;
struct cmd_ds_802_11_rf_channel cmd;
+ lbtf_deb_enter(LBTF_DEB_CMD);
+
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(CMD_OPT_802_11_RF_CHANNEL_SET);
cmd.channel = cpu_to_le16(channel);
- return lbtf_cmd_with_response(priv, CMD_802_11_RF_CHANNEL, &cmd);
+ ret = lbtf_cmd_with_response(priv, CMD_802_11_RF_CHANNEL, &cmd);
+ lbtf_deb_leave_args(LBTF_DEB_CMD, "ret %d", ret);
+ return ret;
}
int lbtf_beacon_set(struct lbtf_private *priv, struct sk_buff *beacon)
@@ -155,20 +169,28 @@ int lbtf_beacon_set(struct lbtf_private *priv, struct sk_buff *beacon)
struct cmd_ds_802_11_beacon_set cmd;
int size;
- if (beacon->len > MRVL_MAX_BCN_SIZE)
+ lbtf_deb_enter(LBTF_DEB_CMD);
+
+ if (beacon->len > MRVL_MAX_BCN_SIZE) {
+ lbtf_deb_leave_args(LBTF_DEB_CMD, "ret %d", -1);
return -1;
+ }
size = sizeof(cmd) - sizeof(cmd.beacon) + beacon->len;
cmd.hdr.size = cpu_to_le16(size);
cmd.len = cpu_to_le16(beacon->len);
memcpy(cmd.beacon, (u8 *) beacon->data, beacon->len);
lbtf_cmd_async(priv, CMD_802_11_BEACON_SET, &cmd.hdr, size);
+
+ lbtf_deb_leave_args(LBTF_DEB_CMD, "ret %d", 0);
return 0;
}
int lbtf_beacon_ctrl(struct lbtf_private *priv, bool beacon_enable,
- int beacon_int) {
+ int beacon_int)
+{
struct cmd_ds_802_11_beacon_control cmd;
+ lbtf_deb_enter(LBTF_DEB_CMD);
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(CMD_ACT_SET);
@@ -176,6 +198,8 @@ int lbtf_beacon_ctrl(struct lbtf_private *priv, bool beacon_enable,
cmd.beacon_period = cpu_to_le16(beacon_int);
lbtf_cmd_async(priv, CMD_802_11_BEACON_CTRL, &cmd.hdr, sizeof(cmd));
+
+ lbtf_deb_leave(LBTF_DEB_CMD);
return 0;
}
@@ -183,17 +207,28 @@ static void lbtf_queue_cmd(struct lbtf_private *priv,
struct cmd_ctrl_node *cmdnode)
{
unsigned long flags;
+ lbtf_deb_enter(LBTF_DEB_HOST);
- if (!cmdnode)
- return;
+ if (!cmdnode) {
+ lbtf_deb_host("QUEUE_CMD: cmdnode is NULL\n");
+ goto qcmd_done;
+ }
- if (!cmdnode->cmdbuf->size)
- return;
+ if (!cmdnode->cmdbuf->size) {
+ lbtf_deb_host("DNLD_CMD: cmd size is zero\n");
+ goto qcmd_done;
+ }
cmdnode->result = 0;
spin_lock_irqsave(&priv->driver_lock, flags);
list_add_tail(&cmdnode->list, &priv->cmdpendingq);
spin_unlock_irqrestore(&priv->driver_lock, flags);
+
+ lbtf_deb_host("QUEUE_CMD: inserted command 0x%04x into cmdpendingq\n",
+ le16_to_cpu(cmdnode->cmdbuf->command));
+
+qcmd_done:
+ lbtf_deb_leave(LBTF_DEB_HOST);
}
static void lbtf_submit_command(struct lbtf_private *priv,
@@ -206,22 +241,33 @@ static void lbtf_submit_command(struct lbtf_private *priv,
int timeo = 5 * HZ;
int ret;
+ lbtf_deb_enter(LBTF_DEB_HOST);
+
cmd = cmdnode->cmdbuf;
spin_lock_irqsave(&priv->driver_lock, flags);
priv->cur_cmd = cmdnode;
cmdsize = le16_to_cpu(cmd->size);
command = le16_to_cpu(cmd->command);
+
+ lbtf_deb_cmd("DNLD_CMD: command 0x%04x, seq %d, size %d\n",
+ command, le16_to_cpu(cmd->seqnum), cmdsize);
+ lbtf_deb_hex(LBTF_DEB_CMD, "DNLD_CMD", (void *) cmdnode->cmdbuf, cmdsize);
+
ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) cmd, cmdsize);
spin_unlock_irqrestore(&priv->driver_lock, flags);
- if (ret)
+ if (ret) {
+ pr_info("DNLD_CMD: hw_host_to_card failed: %d\n", ret);
/* Let the timer kick in and retry, and potentially reset
the whole thing if the condition persists */
timeo = HZ;
+ }
/* Setup the timer after transmit command */
mod_timer(&priv->command_timer, jiffies + timeo);
+
+ lbtf_deb_leave(LBTF_DEB_HOST);
}
/**
@@ -231,8 +277,10 @@ static void lbtf_submit_command(struct lbtf_private *priv,
static void __lbtf_cleanup_and_insert_cmd(struct lbtf_private *priv,
struct cmd_ctrl_node *cmdnode)
{
+ lbtf_deb_enter(LBTF_DEB_HOST);
+
if (!cmdnode)
- return;
+ goto cl_ins_out;
cmdnode->callback = NULL;
cmdnode->callback_arg = 0;
@@ -240,6 +288,9 @@ static void __lbtf_cleanup_and_insert_cmd(struct lbtf_private *priv,
memset(cmdnode->cmdbuf, 0, LBS_CMD_BUFFER_SIZE);
list_add_tail(&cmdnode->list, &priv->cmdfreeq);
+
+cl_ins_out:
+ lbtf_deb_leave(LBTF_DEB_HOST);
}
static void lbtf_cleanup_and_insert_cmd(struct lbtf_private *priv,
@@ -268,29 +319,41 @@ int lbtf_cmd_set_mac_multicast_addr(struct lbtf_private *priv)
{
struct cmd_ds_mac_multicast_addr cmd;
+ lbtf_deb_enter(LBTF_DEB_CMD);
+
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(CMD_ACT_SET);
cmd.nr_of_adrs = cpu_to_le16((u16) priv->nr_of_multicastmacaddr);
+
+ lbtf_deb_cmd("MULTICAST_ADR: setting %d addresses\n", cmd.nr_of_adrs);
+
memcpy(cmd.maclist, priv->multicastlist,
priv->nr_of_multicastmacaddr * ETH_ALEN);
lbtf_cmd_async(priv, CMD_MAC_MULTICAST_ADR, &cmd.hdr, sizeof(cmd));
+
+ lbtf_deb_leave(LBTF_DEB_CMD);
return 0;
}
void lbtf_set_mode(struct lbtf_private *priv, enum lbtf_mode mode)
{
struct cmd_ds_set_mode cmd;
+ lbtf_deb_enter(LBTF_DEB_WEXT);
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.mode = cpu_to_le16(mode);
+ lbtf_deb_wext("Switching to mode: 0x%x\n", mode);
lbtf_cmd_async(priv, CMD_802_11_SET_MODE, &cmd.hdr, sizeof(cmd));
+
+ lbtf_deb_leave(LBTF_DEB_WEXT);
}
void lbtf_set_bssid(struct lbtf_private *priv, bool activate, const u8 *bssid)
{
struct cmd_ds_set_bssid cmd;
+ lbtf_deb_enter(LBTF_DEB_CMD);
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.activate = activate ? 1 : 0;
@@ -298,11 +361,13 @@ void lbtf_set_bssid(struct lbtf_private *priv, bool activate, const u8 *bssid)
memcpy(cmd.bssid, bssid, ETH_ALEN);
lbtf_cmd_async(priv, CMD_802_11_SET_BSSID, &cmd.hdr, sizeof(cmd));
+ lbtf_deb_leave(LBTF_DEB_CMD);
}
int lbtf_set_mac_address(struct lbtf_private *priv, uint8_t *mac_addr)
{
struct cmd_ds_802_11_mac_address cmd;
+ lbtf_deb_enter(LBTF_DEB_CMD);
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(CMD_ACT_SET);
@@ -310,6 +375,7 @@ int lbtf_set_mac_address(struct lbtf_private *priv, uint8_t *mac_addr)
memcpy(cmd.macadd, mac_addr, ETH_ALEN);
lbtf_cmd_async(priv, CMD_802_11_MAC_ADDRESS, &cmd.hdr, sizeof(cmd));
+ lbtf_deb_leave(LBTF_DEB_CMD);
return 0;
}
@@ -318,6 +384,8 @@ int lbtf_set_radio_control(struct lbtf_private *priv)
int ret = 0;
struct cmd_ds_802_11_radio_control cmd;
+ lbtf_deb_enter(LBTF_DEB_CMD);
+
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(CMD_ACT_SET);
@@ -341,19 +409,28 @@ int lbtf_set_radio_control(struct lbtf_private *priv)
else
cmd.control &= cpu_to_le16(~TURN_ON_RF);
+ lbtf_deb_cmd("RADIO_SET: radio %d, preamble %d\n", priv->radioon,
+ priv->preamble);
+
ret = lbtf_cmd_with_response(priv, CMD_802_11_RADIO_CONTROL, &cmd);
+
+ lbtf_deb_leave_args(LBTF_DEB_CMD, "ret %d", ret);
return ret;
}
void lbtf_set_mac_control(struct lbtf_private *priv)
{
struct cmd_ds_mac_control cmd;
+ lbtf_deb_enter(LBTF_DEB_CMD);
+
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(priv->mac_control);
cmd.reserved = 0;
lbtf_cmd_async(priv, CMD_MAC_CONTROL,
&cmd.hdr, sizeof(cmd));
+
+ lbtf_deb_leave(LBTF_DEB_CMD);
}
/**
@@ -365,29 +442,43 @@ void lbtf_set_mac_control(struct lbtf_private *priv)
*/
int lbtf_allocate_cmd_buffer(struct lbtf_private *priv)
{
+ int ret = 0;
u32 bufsize;
u32 i;
struct cmd_ctrl_node *cmdarray;
+ lbtf_deb_enter(LBTF_DEB_HOST);
+
/* Allocate and initialize the command array */
bufsize = sizeof(struct cmd_ctrl_node) * LBS_NUM_CMD_BUFFERS;
cmdarray = kzalloc(bufsize, GFP_KERNEL);
- if (!cmdarray)
- return -1;
+ if (!cmdarray) {
+ lbtf_deb_host("ALLOC_CMD_BUF: tempcmd_array is NULL\n");
+ ret = -1;
+ goto done;
+ }
priv->cmd_array = cmdarray;
/* Allocate and initialize each command buffer in the command array */
for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) {
cmdarray[i].cmdbuf = kzalloc(LBS_CMD_BUFFER_SIZE, GFP_KERNEL);
- if (!cmdarray[i].cmdbuf)
- return -1;
+ if (!cmdarray[i].cmdbuf) {
+ lbtf_deb_host("ALLOC_CMD_BUF: ptempvirtualaddr is NULL\n");
+ ret = -1;
+ goto done;
+ }
}
for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) {
init_waitqueue_head(&cmdarray[i].cmdwait_q);
lbtf_cleanup_and_insert_cmd(priv, &cmdarray[i]);
}
- return 0;
+
+ ret = 0;
+
+done:
+ lbtf_deb_leave_args(LBTF_DEB_HOST, "ret %d", ret);
+ return ret;
}
/**
@@ -402,9 +493,13 @@ int lbtf_free_cmd_buffer(struct lbtf_private *priv)
struct cmd_ctrl_node *cmdarray;
unsigned int i;
+ lbtf_deb_enter(LBTF_DEB_HOST);
+
/* need to check if cmd array is allocated or not */
- if (priv->cmd_array == NULL)
- return 0;
+ if (priv->cmd_array == NULL) {
+ lbtf_deb_host("FREE_CMD_BUF: cmd_array is NULL\n");
+ goto done;
+ }
cmdarray = priv->cmd_array;
@@ -418,6 +513,8 @@ int lbtf_free_cmd_buffer(struct lbtf_private *priv)
kfree(priv->cmd_array);
priv->cmd_array = NULL;
+done:
+ lbtf_deb_leave(LBTF_DEB_HOST);
return 0;
}
@@ -433,6 +530,8 @@ static struct cmd_ctrl_node *lbtf_get_cmd_ctrl_node(struct lbtf_private *priv)
struct cmd_ctrl_node *tempnode;
unsigned long flags;
+ lbtf_deb_enter(LBTF_DEB_HOST);
+
if (!priv)
return NULL;
@@ -442,11 +541,14 @@ static struct cmd_ctrl_node *lbtf_get_cmd_ctrl_node(struct lbtf_private *priv)
tempnode = list_first_entry(&priv->cmdfreeq,
struct cmd_ctrl_node, list);
list_del(&tempnode->list);
- } else
+ } else {
+ lbtf_deb_host("GET_CMD_NODE: cmd_ctrl_node is not available\n");
tempnode = NULL;
+ }
spin_unlock_irqrestore(&priv->driver_lock, flags);
+ lbtf_deb_leave(LBTF_DEB_HOST);
return tempnode;
}
@@ -462,16 +564,20 @@ int lbtf_execute_next_command(struct lbtf_private *priv)
struct cmd_ctrl_node *cmdnode = NULL;
struct cmd_header *cmd;
unsigned long flags;
+ int ret = 0;
- /* Debug group is LBS_DEB_THREAD and not LBS_DEB_HOST, because the
+ /* Debug group is lbtf_deb_THREAD and not lbtf_deb_HOST, because the
* only caller to us is lbtf_thread() and we get even when a
* data packet is received */
+ lbtf_deb_enter(LBTF_DEB_THREAD);
spin_lock_irqsave(&priv->driver_lock, flags);
if (priv->cur_cmd) {
+ pr_alert("EXEC_NEXT_CMD: already processing command!\n");
spin_unlock_irqrestore(&priv->driver_lock, flags);
- return -1;
+ ret = -1;
+ goto done;
}
if (!list_empty(&priv->cmdpendingq)) {
@@ -483,11 +589,17 @@ int lbtf_execute_next_command(struct lbtf_private *priv)
cmd = cmdnode->cmdbuf;
list_del(&cmdnode->list);
+ lbtf_deb_host("EXEC_NEXT_CMD: sending command 0x%04x\n",
+ le16_to_cpu(cmd->command));
spin_unlock_irqrestore(&priv->driver_lock, flags);
lbtf_submit_command(priv, cmdnode);
} else
spin_unlock_irqrestore(&priv->driver_lock, flags);
- return 0;
+
+ ret = 0;
+done:
+ lbtf_deb_leave(LBTF_DEB_THREAD);
+ return ret;
}
static struct cmd_ctrl_node *__lbtf_cmd_async(struct lbtf_private *priv,
@@ -498,14 +610,22 @@ static struct cmd_ctrl_node *__lbtf_cmd_async(struct lbtf_private *priv,
{
struct cmd_ctrl_node *cmdnode;
- if (priv->surpriseremoved)
- return ERR_PTR(-ENOENT);
+ lbtf_deb_enter(LBTF_DEB_HOST);
+
+ if (priv->surpriseremoved) {
+ lbtf_deb_host("PREP_CMD: card removed\n");
+ cmdnode = ERR_PTR(-ENOENT);
+ goto done;
+ }
cmdnode = lbtf_get_cmd_ctrl_node(priv);
if (cmdnode == NULL) {
+ lbtf_deb_host("PREP_CMD: cmdnode is NULL\n");
+
/* Wake up main thread to execute next command */
queue_work(lbtf_wq, &priv->cmd_work);
- return ERR_PTR(-ENOBUFS);
+ cmdnode = ERR_PTR(-ENOBUFS);
+ goto done;
}
cmdnode->callback = callback;
@@ -520,17 +640,24 @@ static struct cmd_ctrl_node *__lbtf_cmd_async(struct lbtf_private *priv,
cmdnode->cmdbuf->size = cpu_to_le16(in_cmd_size);
cmdnode->cmdbuf->seqnum = cpu_to_le16(priv->seqnum);
cmdnode->cmdbuf->result = 0;
+
+ lbtf_deb_host("PREP_CMD: command 0x%04x\n", command);
+
cmdnode->cmdwaitqwoken = 0;
lbtf_queue_cmd(priv, cmdnode);
queue_work(lbtf_wq, &priv->cmd_work);
+ done:
+ lbtf_deb_leave_args(LBTF_DEB_HOST, "ret %p", cmdnode);
return cmdnode;
}
void lbtf_cmd_async(struct lbtf_private *priv, uint16_t command,
struct cmd_header *in_cmd, int in_cmd_size)
{
+ lbtf_deb_enter(LBTF_DEB_CMD);
__lbtf_cmd_async(priv, command, in_cmd, in_cmd_size, NULL, 0);
+ lbtf_deb_leave(LBTF_DEB_CMD);
}
int __lbtf_cmd(struct lbtf_private *priv, uint16_t command,
@@ -543,30 +670,35 @@ int __lbtf_cmd(struct lbtf_private *priv, uint16_t command,
unsigned long flags;
int ret = 0;
+ lbtf_deb_enter(LBTF_DEB_HOST);
+
cmdnode = __lbtf_cmd_async(priv, command, in_cmd, in_cmd_size,
callback, callback_arg);
- if (IS_ERR(cmdnode))
- return PTR_ERR(cmdnode);
+ if (IS_ERR(cmdnode)) {
+ ret = PTR_ERR(cmdnode);
+ goto done;
+ }
might_sleep();
ret = wait_event_interruptible(cmdnode->cmdwait_q,
cmdnode->cmdwaitqwoken);
- if (ret) {
- printk(KERN_DEBUG
- "libertastf: command 0x%04x interrupted by signal",
- command);
- return ret;
+ if (ret) {
+ pr_info("PREP_CMD: command 0x%04x interrupted by signal: %d\n",
+ command, ret);
+ goto done;
}
spin_lock_irqsave(&priv->driver_lock, flags);
ret = cmdnode->result;
if (ret)
- printk(KERN_DEBUG "libertastf: command 0x%04x failed: %d\n",
+ pr_info("PREP_CMD: command 0x%04x failed: %d\n",
command, ret);
__lbtf_cleanup_and_insert_cmd(priv, cmdnode);
spin_unlock_irqrestore(&priv->driver_lock, flags);
+done:
+ lbtf_deb_leave_args(LBTF_DEB_HOST, "ret %d", ret);
return ret;
}
EXPORT_SYMBOL_GPL(__lbtf_cmd);
@@ -587,6 +719,8 @@ int lbtf_process_rx_command(struct lbtf_private *priv)
unsigned long flags;
uint16_t result;
+ lbtf_deb_enter(LBTF_DEB_CMD);
+
mutex_lock(&priv->lock);
spin_lock_irqsave(&priv->driver_lock, flags);
@@ -602,7 +736,7 @@ int lbtf_process_rx_command(struct lbtf_private *priv)
result = le16_to_cpu(resp->result);
if (net_ratelimit())
- printk(KERN_DEBUG "libertastf: cmd response 0x%04x, seq %d, size %d\n",
+ pr_info("libertastf: cmd response 0x%04x, seq %d, size %d\n",
respcmd, le16_to_cpu(resp->seqnum),
le16_to_cpu(resp->size));
@@ -639,7 +773,7 @@ int lbtf_process_rx_command(struct lbtf_private *priv)
switch (respcmd) {
case CMD_RET(CMD_GET_HW_SPEC):
case CMD_RET(CMD_802_11_RESET):
- printk(KERN_DEBUG "libertastf: reset failed\n");
+ pr_info("libertastf: reset failed\n");
break;
}
@@ -666,5 +800,6 @@ int lbtf_process_rx_command(struct lbtf_private *priv)
done:
mutex_unlock(&priv->lock);
+ lbtf_deb_leave_args(LBTF_DEB_CMD, "ret %d", ret);
return ret;
}
diff --git a/drivers/net/wireless/libertas_tf/deb_defs.h b/drivers/net/wireless/libertas_tf/deb_defs.h
new file mode 100644
index 00000000000..ae753962d8b
--- /dev/null
+++ b/drivers/net/wireless/libertas_tf/deb_defs.h
@@ -0,0 +1,104 @@
+/**
+ * This header file contains global constant/enum definitions,
+ * global variable declaration.
+ */
+#ifndef _LBS_DEB_DEFS_H_
+#define _LBS_DEB_EFS_H_
+
+#ifndef DRV_NAME
+#define DRV_NAME "libertas_tf"
+#endif
+
+#include <linux/spinlock.h>
+
+#ifdef CONFIG_LIBERTAS_THINFIRM_DEBUG
+#define DEBUG
+#define PROC_DEBUG
+#endif
+
+#define LBTF_DEB_ENTER 0x00000001
+#define LBTF_DEB_LEAVE 0x00000002
+#define LBTF_DEB_MAIN 0x00000004
+#define LBTF_DEB_NET 0x00000008
+#define LBTF_DEB_MESH 0x00000010
+#define LBTF_DEB_WEXT 0x00000020
+#define LBTF_DEB_IOCTL 0x00000040
+#define LBTF_DEB_SCAN 0x00000080
+#define LBTF_DEB_ASSOC 0x00000100
+#define LBTF_DEB_JOIN 0x00000200
+#define LBTF_DEB_11D 0x00000400
+#define LBTF_DEB_DEBUGFS 0x00000800
+#define LBTF_DEB_ETHTOOL 0x00001000
+#define LBTF_DEB_HOST 0x00002000
+#define LBTF_DEB_CMD 0x00004000
+#define LBTF_DEB_RX 0x00008000
+#define LBTF_DEB_TX 0x00010000
+#define LBTF_DEB_USB 0x00020000
+#define LBTF_DEB_CS 0x00040000
+#define LBTF_DEB_FW 0x00080000
+#define LBTF_DEB_THREAD 0x00100000
+#define LBTF_DEB_HEX 0x00200000
+#define LBTF_DEB_SDIO 0x00400000
+#define LBTF_DEB_MACOPS 0x00800000
+
+extern unsigned int lbtf_debug;
+
+
+#ifdef DEBUG
+#define LBTF_DEB_LL(grp, grpnam, fmt, args...) \
+do { if ((lbtf_debug & (grp)) == (grp)) \
+ printk(KERN_DEBUG DRV_NAME grpnam "%s: " fmt, \
+ in_interrupt() ? " (INT)" : "", ## args); } while (0)
+#else
+#define LBTF_DEB_LL(grp, grpnam, fmt, args...) do {} while (0)
+#endif
+
+#define lbtf_deb_enter(grp) \
+ LBTF_DEB_LL(grp | LBTF_DEB_ENTER, " enter", "%s()\n", __func__);
+#define lbtf_deb_enter_args(grp, fmt, args...) \
+ LBTF_DEB_LL(grp | LBTF_DEB_ENTER, " enter", "%s(" fmt ")\n", __func__, ## args);
+#define lbtf_deb_leave(grp) \
+ LBTF_DEB_LL(grp | LBTF_DEB_LEAVE, " leave", "%s()\n", __func__);
+#define lbtf_deb_leave_args(grp, fmt, args...) \
+ LBTF_DEB_LL(grp | LBTF_DEB_LEAVE, " leave", "%s(), " fmt "\n", \
+ __func__, ##args);
+#define lbtf_deb_main(fmt, args...) LBTF_DEB_LL(LBTF_DEB_MAIN, " main", fmt, ##args)
+#define lbtf_deb_net(fmt, args...) LBTF_DEB_LL(LBTF_DEB_NET, " net", fmt, ##args)
+#define lbtf_deb_mesh(fmt, args...) LBTF_DEB_LL(LBTF_DEB_MESH, " mesh", fmt, ##args)
+#define lbtf_deb_wext(fmt, args...) LBTF_DEB_LL(LBTF_DEB_WEXT, " wext", fmt, ##args)
+#define lbtf_deb_ioctl(fmt, args...) LBTF_DEB_LL(LBTF_DEB_IOCTL, " ioctl", fmt, ##args)
+#define lbtf_deb_scan(fmt, args...) LBTF_DEB_LL(LBTF_DEB_SCAN, " scan", fmt, ##args)
+#define lbtf_deb_assoc(fmt, args...) LBTF_DEB_LL(LBTF_DEB_ASSOC, " assoc", fmt, ##args)
+#define lbtf_deb_join(fmt, args...) LBTF_DEB_LL(LBTF_DEB_JOIN, " join", fmt, ##args)
+#define lbtf_deb_11d(fmt, args...) LBTF_DEB_LL(LBTF_DEB_11D, " 11d", fmt, ##args)
+#define lbtf_deb_debugfs(fmt, args...) LBTF_DEB_LL(LBTF_DEB_DEBUGFS, " debugfs", fmt, ##args)
+#define lbtf_deb_ethtool(fmt, args...) LBTF_DEB_LL(LBTF_DEB_ETHTOOL, " ethtool", fmt, ##args)
+#define lbtf_deb_host(fmt, args...) LBTF_DEB_LL(LBTF_DEB_HOST, " host", fmt, ##args)
+#define lbtf_deb_cmd(fmt, args...) LBTF_DEB_LL(LBTF_DEB_CMD, " cmd", fmt, ##args)
+#define lbtf_deb_rx(fmt, args...) LBTF_DEB_LL(LBTF_DEB_RX, " rx", fmt, ##args)
+#define lbtf_deb_tx(fmt, args...) LBTF_DEB_LL(LBTF_DEB_TX, " tx", fmt, ##args)
+#define lbtf_deb_fw(fmt, args...) LBTF_DEB_LL(LBTF_DEB_FW, " fw", fmt, ##args)
+#define lbtf_deb_usb(fmt, args...) LBTF_DEB_LL(LBTF_DEB_USB, " usb", fmt, ##args)
+#define lbtf_deb_usbd(dev, fmt, args...) LBTF_DEB_LL(LBTF_DEB_USB, " usbd", "%s:" fmt, dev_name(dev), ##args)
+#define lbtf_deb_cs(fmt, args...) LBTF_DEB_LL(LBTF_DEB_CS, " cs", fmt, ##args)
+#define lbtf_deb_thread(fmt, args...) LBTF_DEB_LL(LBTF_DEB_THREAD, " thread", fmt, ##args)
+#define lbtf_deb_sdio(fmt, args...) LBTF_DEB_LL(LBTF_DEB_SDIO, " thread", fmt, ##args)
+#define lbtf_deb_macops(fmt, args...) LBTF_DEB_LL(LBTF_DEB_MACOPS, " thread", fmt, ##args)
+
+#ifdef DEBUG
+static inline void lbtf_deb_hex(unsigned int grp, const char *prompt, u8 *buf, int len)
+{
+ char newprompt[32];
+
+ if (len &&
+ (lbtf_debug & LBTF_DEB_HEX) &&
+ (lbtf_debug & grp)) {
+ snprintf(newprompt, sizeof(newprompt), DRV_NAME " %s: ", prompt);
+ print_hex_dump_bytes(prompt, DUMP_PREFIX_NONE, buf, len);
+ }
+}
+#else
+#define lbtf_deb_hex(grp, prompt, buf, len) do {} while (0)
+#endif
+
+#endif
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index 8cc9db60c14..c445500ffc6 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -7,6 +7,13 @@
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
+#define DRV_NAME "lbtf_usb"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "libertas_tf.h"
+#include "if_usb.h"
+
#include <linux/delay.h>
#include <linux/moduleparam.h>
#include <linux/firmware.h>
@@ -14,10 +21,8 @@
#include <linux/slab.h>
#include <linux/usb.h>
-#define DRV_NAME "lbtf_usb"
-
-#include "libertas_tf.h"
-#include "if_usb.h"
+#define INSANEDEBUG 0
+#define lbtf_deb_usb2(...) do { if (INSANEDEBUG) lbtf_deb_usbd(__VA_ARGS__); } while (0)
#define MESSAGE_HEADER_LEN 4
@@ -53,9 +58,14 @@ static int if_usb_reset_device(struct if_usb_card *cardp);
*/
static void if_usb_write_bulk_callback(struct urb *urb)
{
- if (urb->status != 0)
- printk(KERN_INFO "libertastf: URB in failure status: %d\n",
- urb->status);
+ if (urb->status != 0) {
+ /* print the failure status number for debug */
+ pr_info("URB in failure status: %d\n", urb->status);
+ } else {
+ lbtf_deb_usb2(&urb->dev->dev, "URB status is successful\n");
+ lbtf_deb_usb2(&urb->dev->dev, "Actual length transmitted %d\n",
+ urb->actual_length);
+ }
}
/**
@@ -65,6 +75,8 @@ static void if_usb_write_bulk_callback(struct urb *urb)
*/
static void if_usb_free(struct if_usb_card *cardp)
{
+ lbtf_deb_enter(LBTF_DEB_USB);
+
/* Unlink tx & rx urb */
usb_kill_urb(cardp->tx_urb);
usb_kill_urb(cardp->rx_urb);
@@ -81,6 +93,8 @@ static void if_usb_free(struct if_usb_card *cardp)
kfree(cardp->ep_out_buf);
cardp->ep_out_buf = NULL;
+
+ lbtf_deb_leave(LBTF_DEB_USB);
}
static void if_usb_setup_firmware(struct lbtf_private *priv)
@@ -88,23 +102,33 @@ static void if_usb_setup_firmware(struct lbtf_private *priv)
struct if_usb_card *cardp = priv->card;
struct cmd_ds_set_boot2_ver b2_cmd;
+ lbtf_deb_enter(LBTF_DEB_USB);
+
if_usb_submit_rx_urb(cardp);
b2_cmd.hdr.size = cpu_to_le16(sizeof(b2_cmd));
b2_cmd.action = 0;
b2_cmd.version = cardp->boot2_version;
if (lbtf_cmd_with_response(priv, CMD_SET_BOOT2_VER, &b2_cmd))
- printk(KERN_INFO "libertastf: setting boot2 version failed\n");
+ lbtf_deb_usb("Setting boot2 version failed\n");
+
+ lbtf_deb_leave(LBTF_DEB_USB);
}
static void if_usb_fw_timeo(unsigned long priv)
{
struct if_usb_card *cardp = (void *)priv;
- if (!cardp->fwdnldover)
+ lbtf_deb_enter(LBTF_DEB_USB);
+ if (!cardp->fwdnldover) {
/* Download timed out */
cardp->priv->surpriseremoved = 1;
+ pr_err("Download timed out\n");
+ } else {
+ lbtf_deb_usb("Download complete, no event. Assuming success\n");
+ }
wake_up(&cardp->fw_wq);
+ lbtf_deb_leave(LBTF_DEB_USB);
}
/**
@@ -125,11 +149,14 @@ static int if_usb_probe(struct usb_interface *intf,
struct if_usb_card *cardp;
int i;
+ lbtf_deb_enter(LBTF_DEB_USB);
udev = interface_to_usbdev(intf);
cardp = kzalloc(sizeof(struct if_usb_card), GFP_KERNEL);
- if (!cardp)
+ if (!cardp) {
+ pr_err("Out of memory allocating private data.\n");
goto error;
+ }
setup_timer(&cardp->fw_timeout, if_usb_fw_timeo, (unsigned long)cardp);
init_waitqueue_head(&cardp->fw_wq);
@@ -137,38 +164,62 @@ static int if_usb_probe(struct usb_interface *intf,
cardp->udev = udev;
iface_desc = intf->cur_altsetting;
+ lbtf_deb_usbd(&udev->dev, "bcdUSB = 0x%X bDeviceClass = 0x%X"
+ " bDeviceSubClass = 0x%X, bDeviceProtocol = 0x%X\n",
+ le16_to_cpu(udev->descriptor.bcdUSB),
+ udev->descriptor.bDeviceClass,
+ udev->descriptor.bDeviceSubClass,
+ udev->descriptor.bDeviceProtocol);
+
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
if (usb_endpoint_is_bulk_in(endpoint)) {
cardp->ep_in_size =
le16_to_cpu(endpoint->wMaxPacketSize);
cardp->ep_in = usb_endpoint_num(endpoint);
+
+ lbtf_deb_usbd(&udev->dev, "in_endpoint = %d\n", cardp->ep_in);
+ lbtf_deb_usbd(&udev->dev, "Bulk in size is %d\n", cardp->ep_in_size);
} else if (usb_endpoint_is_bulk_out(endpoint)) {
cardp->ep_out_size =
le16_to_cpu(endpoint->wMaxPacketSize);
cardp->ep_out = usb_endpoint_num(endpoint);
+
+ lbtf_deb_usbd(&udev->dev, "out_endpoint = %d\n", cardp->ep_out);
+ lbtf_deb_usbd(&udev->dev, "Bulk out size is %d\n",
+ cardp->ep_out_size);
}
}
- if (!cardp->ep_out_size || !cardp->ep_in_size)
+ if (!cardp->ep_out_size || !cardp->ep_in_size) {
+ lbtf_deb_usbd(&udev->dev, "Endpoints not found\n");
/* Endpoints not found */
goto dealloc;
+ }
cardp->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!cardp->rx_urb)
+ if (!cardp->rx_urb) {
+ lbtf_deb_usbd(&udev->dev, "Rx URB allocation failed\n");
goto dealloc;
+ }
cardp->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!cardp->tx_urb)
+ if (!cardp->tx_urb) {
+ lbtf_deb_usbd(&udev->dev, "Tx URB allocation failed\n");
goto dealloc;
+ }
cardp->cmd_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!cardp->cmd_urb)
+ if (!cardp->cmd_urb) {
+ lbtf_deb_usbd(&udev->dev, "Cmd URB allocation failed\n");
goto dealloc;
+ }
cardp->ep_out_buf = kmalloc(MRVDRV_ETH_TX_PACKET_BUFFER_SIZE,
GFP_KERNEL);
- if (!cardp->ep_out_buf)
+ if (!cardp->ep_out_buf) {
+ lbtf_deb_usbd(&udev->dev, "Could not allocate buffer\n");
goto dealloc;
+ }
priv = lbtf_add_card(cardp, &udev->dev);
if (!priv)
@@ -189,6 +240,7 @@ static int if_usb_probe(struct usb_interface *intf,
dealloc:
if_usb_free(cardp);
error:
+lbtf_deb_leave(LBTF_DEB_MAIN);
return -ENOMEM;
}
@@ -202,6 +254,8 @@ static void if_usb_disconnect(struct usb_interface *intf)
struct if_usb_card *cardp = usb_get_intfdata(intf);
struct lbtf_private *priv = (struct lbtf_private *) cardp->priv;
+ lbtf_deb_enter(LBTF_DEB_MAIN);
+
if_usb_reset_device(cardp);
if (priv)
@@ -212,6 +266,8 @@ static void if_usb_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
usb_put_dev(interface_to_usbdev(intf));
+
+ lbtf_deb_leave(LBTF_DEB_MAIN);
}
/**
@@ -226,6 +282,8 @@ static int if_usb_send_fw_pkt(struct if_usb_card *cardp)
struct fwdata *fwdata = cardp->ep_out_buf;
u8 *firmware = (u8 *) cardp->fw->data;
+ lbtf_deb_enter(LBTF_DEB_FW);
+
/* If we got a CRC failure on the last block, back
up and retry it */
if (!cardp->CRC_OK) {
@@ -233,6 +291,9 @@ static int if_usb_send_fw_pkt(struct if_usb_card *cardp)
cardp->fwseqnum--;
}
+ lbtf_deb_usb2(&cardp->udev->dev, "totalbytes = %d\n",
+ cardp->totalbytes);
+
/* struct fwdata (which we sent to the card) has an
extra __le32 field in between the header and the data,
which is not in the struct fwheader in the actual
@@ -246,18 +307,33 @@ static int if_usb_send_fw_pkt(struct if_usb_card *cardp)
memcpy(fwdata->data, &firmware[cardp->totalbytes],
le32_to_cpu(fwdata->hdr.datalength));
+ lbtf_deb_usb2(&cardp->udev->dev, "Data length = %d\n",
+ le32_to_cpu(fwdata->hdr.datalength));
+
fwdata->seqnum = cpu_to_le32(++cardp->fwseqnum);
cardp->totalbytes += le32_to_cpu(fwdata->hdr.datalength);
usb_tx_block(cardp, cardp->ep_out_buf, sizeof(struct fwdata) +
le32_to_cpu(fwdata->hdr.datalength), 0);
- if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_LAST_BLOCK))
+ if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_DATA_TO_RECV)) {
+ lbtf_deb_usb2(&cardp->udev->dev, "There are data to follow\n");
+ lbtf_deb_usb2(&cardp->udev->dev, "seqnum = %d totalbytes = %d\n",
+ cardp->fwseqnum, cardp->totalbytes);
+ } else if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_LAST_BLOCK)) {
+ lbtf_deb_usb2(&cardp->udev->dev, "Host has finished FW downloading\n");
+ lbtf_deb_usb2(&cardp->udev->dev, "Donwloading FW JUMP BLOCK\n");
+
/* Host has finished FW downloading
* Donwloading FW JUMP BLOCK
*/
cardp->fwfinalblk = 1;
+ }
+ lbtf_deb_usb2(&cardp->udev->dev, "Firmware download done; size %d\n",
+ cardp->totalbytes);
+
+ lbtf_deb_leave(LBTF_DEB_FW);
return 0;
}
@@ -266,6 +342,8 @@ static int if_usb_reset_device(struct if_usb_card *cardp)
struct cmd_ds_802_11_reset *cmd = cardp->ep_out_buf + 4;
int ret;
+ lbtf_deb_enter(LBTF_DEB_USB);
+
*(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST);
cmd->hdr.command = cpu_to_le16(CMD_802_11_RESET);
@@ -280,6 +358,8 @@ static int if_usb_reset_device(struct if_usb_card *cardp)
ret = usb_reset_device(cardp->udev);
msleep(100);
+ lbtf_deb_leave_args(LBTF_DEB_USB, "ret %d", ret);
+
return ret;
}
EXPORT_SYMBOL_GPL(if_usb_reset_device);
@@ -297,11 +377,15 @@ EXPORT_SYMBOL_GPL(if_usb_reset_device);
static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload,
uint16_t nb, u8 data)
{
+ int ret = -1;
struct urb *urb;
+ lbtf_deb_enter(LBTF_DEB_USB);
/* check if device is removed */
- if (cardp->priv->surpriseremoved)
- return -1;
+ if (cardp->priv->surpriseremoved) {
+ lbtf_deb_usbd(&cardp->udev->dev, "Device removed\n");
+ goto tx_ret;
+ }
if (data)
urb = cardp->tx_urb;
@@ -315,19 +399,34 @@ static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload,
urb->transfer_flags |= URB_ZERO_PACKET;
- if (usb_submit_urb(urb, GFP_ATOMIC))
- return -1;
- return 0;
+ if (usb_submit_urb(urb, GFP_ATOMIC)) {
+ lbtf_deb_usbd(&cardp->udev->dev, "usb_submit_urb failed: %d\n", ret);
+ goto tx_ret;
+ }
+
+ lbtf_deb_usb2(&cardp->udev->dev, "usb_submit_urb success\n");
+
+ ret = 0;
+
+tx_ret:
+ lbtf_deb_leave(LBTF_DEB_USB);
+ return ret;
}
static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
void (*callbackfn)(struct urb *urb))
{
struct sk_buff *skb;
+ int ret = -1;
+
+ lbtf_deb_enter(LBTF_DEB_USB);
skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE);
- if (!skb)
+ if (!skb) {
+ pr_err("No free skb\n");
+ lbtf_deb_leave(LBTF_DEB_USB);
return -1;
+ }
cardp->rx_skb = skb;
@@ -339,12 +438,19 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;
- if (usb_submit_urb(cardp->rx_urb, GFP_ATOMIC)) {
+ lbtf_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n", cardp->rx_urb);
+ ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC);
+ if (ret) {
+ lbtf_deb_usbd(&cardp->udev->dev, "Submit Rx URB failed: %d\n", ret);
kfree_skb(skb);
cardp->rx_skb = NULL;
+ lbtf_deb_leave(LBTF_DEB_USB);
return -1;
- } else
+ } else {
+ lbtf_deb_usb2(&cardp->udev->dev, "Submit Rx URB success\n");
+ lbtf_deb_leave(LBTF_DEB_USB);
return 0;
+ }
}
static int if_usb_submit_rx_urb_fwload(struct if_usb_card *cardp)
@@ -364,8 +470,12 @@ static void if_usb_receive_fwload(struct urb *urb)
struct fwsyncheader *syncfwheader;
struct bootcmdresp bcmdresp;
+ lbtf_deb_enter(LBTF_DEB_USB);
if (urb->status) {
+ lbtf_deb_usbd(&cardp->udev->dev,
+ "URB status is failed during fw load\n");
kfree_skb(skb);
+ lbtf_deb_leave(LBTF_DEB_USB);
return;
}
@@ -373,12 +483,17 @@ static void if_usb_receive_fwload(struct urb *urb)
__le32 *tmp = (__le32 *)(skb->data);
if (tmp[0] == cpu_to_le32(CMD_TYPE_INDICATION) &&
- tmp[1] == cpu_to_le32(MACREG_INT_CODE_FIRMWARE_READY))
+ tmp[1] == cpu_to_le32(MACREG_INT_CODE_FIRMWARE_READY)) {
/* Firmware ready event received */
+ pr_info("Firmware ready event received\n");
wake_up(&cardp->fw_wq);
- else
+ } else {
+ lbtf_deb_usb("Waiting for confirmation; got %x %x\n",
+ le32_to_cpu(tmp[0]), le32_to_cpu(tmp[1]));
if_usb_submit_rx_urb_fwload(cardp);
+ }
kfree_skb(skb);
+ lbtf_deb_leave(LBTF_DEB_USB);
return;
}
if (cardp->bootcmdresp <= 0) {
@@ -389,34 +504,60 @@ static void if_usb_receive_fwload(struct urb *urb)
if_usb_submit_rx_urb_fwload(cardp);
cardp->bootcmdresp = 1;
/* Received valid boot command response */
+ lbtf_deb_usbd(&cardp->udev->dev,
+ "Received valid boot command response\n");
+ lbtf_deb_leave(LBTF_DEB_USB);
return;
}
if (bcmdresp.magic != cpu_to_le32(BOOT_CMD_MAGIC_NUMBER)) {
if (bcmdresp.magic == cpu_to_le32(CMD_TYPE_REQUEST) ||
bcmdresp.magic == cpu_to_le32(CMD_TYPE_DATA) ||
- bcmdresp.magic == cpu_to_le32(CMD_TYPE_INDICATION))
+ bcmdresp.magic == cpu_to_le32(CMD_TYPE_INDICATION)) {
+ if (!cardp->bootcmdresp)
+ pr_info("Firmware already seems alive; resetting\n");
cardp->bootcmdresp = -1;
- } else if (bcmdresp.cmd == BOOT_CMD_FW_BY_USB &&
- bcmdresp.result == BOOT_CMD_RESP_OK)
+ } else {
+ pr_info("boot cmd response wrong magic number (0x%x)\n",
+ le32_to_cpu(bcmdresp.magic));
+ }
+ } else if (bcmdresp.cmd != BOOT_CMD_FW_BY_USB) {
+ pr_info("boot cmd response cmd_tag error (%d)\n",
+ bcmdresp.cmd);
+ } else if (bcmdresp.result != BOOT_CMD_RESP_OK) {
+ pr_info("boot cmd response result error (%d)\n",
+ bcmdresp.result);
+ } else {
cardp->bootcmdresp = 1;
+ lbtf_deb_usbd(&cardp->udev->dev,
+ "Received valid boot command response\n");
+ }
kfree_skb(skb);
if_usb_submit_rx_urb_fwload(cardp);
+ lbtf_deb_leave(LBTF_DEB_USB);
return;
}
syncfwheader = kmalloc(sizeof(struct fwsyncheader), GFP_ATOMIC);
if (!syncfwheader) {
+ lbtf_deb_usbd(&cardp->udev->dev, "Failure to allocate syncfwheader\n");
kfree_skb(skb);
+ lbtf_deb_leave(LBTF_DEB_USB);
return;
}
memcpy(syncfwheader, skb->data, sizeof(struct fwsyncheader));
- if (!syncfwheader->cmd)
+ if (!syncfwheader->cmd) {
+ lbtf_deb_usb2(&cardp->udev->dev, "FW received Blk with correct CRC\n");
+ lbtf_deb_usb2(&cardp->udev->dev, "FW received Blk seqnum = %d\n",
+ le32_to_cpu(syncfwheader->seqnum));
cardp->CRC_OK = 1;
- else
+ } else {
+ lbtf_deb_usbd(&cardp->udev->dev, "FW received Blk with CRC error\n");
cardp->CRC_OK = 0;
+ }
+
kfree_skb(skb);
/* reschedule timer for 200ms hence */
@@ -434,7 +575,7 @@ static void if_usb_receive_fwload(struct urb *urb)
kfree(syncfwheader);
- return;
+ lbtf_deb_leave(LBTF_DEB_USB);
}
#define MRVDRV_MIN_PKT_LEN 30
@@ -445,6 +586,7 @@ static inline void process_cmdtypedata(int recvlength, struct sk_buff *skb,
{
if (recvlength > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE + MESSAGE_HEADER_LEN
|| recvlength < MRVDRV_MIN_PKT_LEN) {
+ lbtf_deb_usbd(&cardp->udev->dev, "Packet length is Invalid\n");
kfree_skb(skb);
return;
}
@@ -460,6 +602,8 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
struct lbtf_private *priv)
{
if (recvlength > LBS_CMD_BUFFER_SIZE) {
+ lbtf_deb_usbd(&cardp->udev->dev,
+ "The receive buffer is too large\n");
kfree_skb(skb);
return;
}
@@ -489,16 +633,24 @@ static void if_usb_receive(struct urb *urb)
uint32_t recvtype = 0;
__le32 *pkt = (__le32 *) skb->data;
+ lbtf_deb_enter(LBTF_DEB_USB);
+
if (recvlength) {
if (urb->status) {
+ lbtf_deb_usbd(&cardp->udev->dev, "RX URB failed: %d\n",
+ urb->status);
kfree_skb(skb);
goto setup_for_next;
}
recvbuff = skb->data;
recvtype = le32_to_cpu(pkt[0]);
+ lbtf_deb_usbd(&cardp->udev->dev,
+ "Recv length = 0x%x, Recv type = 0x%X\n",
+ recvlength, recvtype);
} else if (urb->status) {
kfree_skb(skb);
+ lbtf_deb_leave(LBTF_DEB_USB);
return;
}
@@ -515,6 +667,7 @@ static void if_usb_receive(struct urb *urb)
{
/* Event cause handling */
u32 event_cause = le32_to_cpu(pkt[1]);
+ lbtf_deb_usbd(&cardp->udev->dev, "**EVENT** 0x%X\n", event_cause);
/* Icky undocumented magic special case */
if (event_cause & 0xffff0000) {
@@ -529,21 +682,22 @@ static void if_usb_receive(struct urb *urb)
} else if (event_cause == LBTF_EVENT_BCN_SENT)
lbtf_bcn_sent(priv);
else
- printk(KERN_DEBUG
+ lbtf_deb_usbd(&cardp->udev->dev,
"Unsupported notification %d received\n",
event_cause);
kfree_skb(skb);
break;
}
default:
- printk(KERN_DEBUG "libertastf: unknown command type 0x%X\n",
- recvtype);
+ lbtf_deb_usbd(&cardp->udev->dev,
+ "libertastf: unknown command type 0x%X\n", recvtype);
kfree_skb(skb);
break;
}
setup_for_next:
if_usb_submit_rx_urb(cardp);
+ lbtf_deb_leave(LBTF_DEB_USB);
}
/**
@@ -562,6 +716,9 @@ static int if_usb_host_to_card(struct lbtf_private *priv, uint8_t type,
struct if_usb_card *cardp = priv->card;
u8 data = 0;
+ lbtf_deb_usbd(&cardp->udev->dev, "*** type = %u\n", type);
+ lbtf_deb_usbd(&cardp->udev->dev, "size after = %d\n", nb);
+
if (type == MVMS_CMD) {
*(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST);
} else {
@@ -639,8 +796,10 @@ static int check_fwfile_format(const u8 *data, u32 totlen)
} while (!exit);
if (ret)
- printk(KERN_INFO
- "libertastf: firmware file format check failed\n");
+ pr_err("firmware file format check FAIL\n");
+ else
+ lbtf_deb_fw("firmware file format check PASS\n");
+
return ret;
}
@@ -651,10 +810,12 @@ static int if_usb_prog_firmware(struct if_usb_card *cardp)
static int reset_count = 10;
int ret = 0;
+ lbtf_deb_enter(LBTF_DEB_USB);
+
ret = request_firmware(&cardp->fw, lbtf_fw_name, &cardp->udev->dev);
if (ret < 0) {
- printk(KERN_INFO "libertastf: firmware %s not found\n",
- lbtf_fw_name);
+ pr_err("request_firmware() failed with %#x\n", ret);
+ pr_err("firmware %s not found\n", lbtf_fw_name);
goto done;
}
@@ -663,6 +824,7 @@ static int if_usb_prog_firmware(struct if_usb_card *cardp)
restart:
if (if_usb_submit_rx_urb_fwload(cardp) < 0) {
+ lbtf_deb_usbd(&cardp->udev->dev, "URB submission is failed\n");
ret = -1;
goto release_fw;
}
@@ -709,14 +871,13 @@ restart:
usb_kill_urb(cardp->rx_urb);
if (!cardp->fwdnldover) {
- printk(KERN_INFO "libertastf: failed to load fw,"
- " resetting device!\n");
+ pr_info("failed to load fw, resetting device!\n");
if (--reset_count >= 0) {
if_usb_reset_device(cardp);
goto restart;
}
- printk(KERN_INFO "libertastf: fw download failure\n");
+ pr_info("FW download failure, time = %d ms\n", i * 100);
ret = -1;
goto release_fw;
}
@@ -730,6 +891,7 @@ restart:
if_usb_setup_firmware(cardp->priv);
done:
+ lbtf_deb_leave_args(LBTF_DEB_USB, "ret %d", ret);
return ret;
}
EXPORT_SYMBOL_GPL(if_usb_prog_firmware);
@@ -751,13 +913,19 @@ static int __init if_usb_init_module(void)
{
int ret = 0;
+ lbtf_deb_enter(LBTF_DEB_MAIN);
+
ret = usb_register(&if_usb_driver);
+
+ lbtf_deb_leave_args(LBTF_DEB_MAIN, "ret %d", ret);
return ret;
}
static void __exit if_usb_exit_module(void)
{
+ lbtf_deb_enter(LBTF_DEB_MAIN);
usb_deregister(&if_usb_driver);
+ lbtf_deb_leave(LBTF_DEB_MAIN);
}
module_init(if_usb_init_module);
diff --git a/drivers/net/wireless/libertas_tf/libertas_tf.h b/drivers/net/wireless/libertas_tf/libertas_tf.h
index 4cc42dd5a00..fbbaaae7a1a 100644
--- a/drivers/net/wireless/libertas_tf/libertas_tf.h
+++ b/drivers/net/wireless/libertas_tf/libertas_tf.h
@@ -13,6 +13,8 @@
#include <linux/kthread.h>
#include <net/mac80211.h>
+#include "deb_defs.h"
+
#ifndef DRV_NAME
#define DRV_NAME "libertas_tf"
#endif
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index 7945ff5aa33..6a04c2157f7 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -7,10 +7,12 @@
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/slab.h>
+#include <linux/etherdevice.h>
#include "libertas_tf.h"
-#include "linux/etherdevice.h"
#define DRIVER_RELEASE_VERSION "004.p0"
/* thinfirm version: 5.132.X.pX */
@@ -18,7 +20,17 @@
#define LBTF_FW_VER_MAX 0x0584ffff
#define QOS_CONTROL_LEN 2
-static const char lbtf_driver_version[] = "THINFIRM-USB8388-" DRIVER_RELEASE_VERSION;
+/* Module parameters */
+unsigned int lbtf_debug;
+EXPORT_SYMBOL_GPL(lbtf_debug);
+module_param_named(libertas_tf_debug, lbtf_debug, int, 0644);
+
+static const char lbtf_driver_version[] = "THINFIRM-USB8388-" DRIVER_RELEASE_VERSION
+#ifdef DEBUG
+ "-dbg"
+#endif
+ "";
+
struct workqueue_struct *lbtf_wq;
static const struct ieee80211_channel lbtf_channels[] = {
@@ -81,6 +93,9 @@ static void lbtf_cmd_work(struct work_struct *work)
{
struct lbtf_private *priv = container_of(work, struct lbtf_private,
cmd_work);
+
+ lbtf_deb_enter(LBTF_DEB_CMD);
+
spin_lock_irq(&priv->driver_lock);
/* command response? */
if (priv->cmd_response_rxed) {
@@ -108,11 +123,16 @@ static void lbtf_cmd_work(struct work_struct *work)
priv->cmd_timed_out = 0;
spin_unlock_irq(&priv->driver_lock);
- if (!priv->fw_ready)
+ if (!priv->fw_ready) {
+ lbtf_deb_leave_args(LBTF_DEB_CMD, "fw not ready");
return;
+ }
+
/* Execute the next command */
if (!priv->cur_cmd)
lbtf_execute_next_command(priv);
+
+ lbtf_deb_leave(LBTF_DEB_CMD);
}
/**
@@ -126,6 +146,7 @@ static int lbtf_setup_firmware(struct lbtf_private *priv)
{
int ret = -1;
+ lbtf_deb_enter(LBTF_DEB_FW);
/*
* Read priv address from HW
*/
@@ -141,6 +162,7 @@ static int lbtf_setup_firmware(struct lbtf_private *priv)
ret = 0;
done:
+ lbtf_deb_leave_args(LBTF_DEB_FW, "ret: %d", ret);
return ret;
}
@@ -152,6 +174,7 @@ static void command_timer_fn(unsigned long data)
{
struct lbtf_private *priv = (struct lbtf_private *)data;
unsigned long flags;
+ lbtf_deb_enter(LBTF_DEB_CMD);
spin_lock_irqsave(&priv->driver_lock, flags);
@@ -168,10 +191,12 @@ static void command_timer_fn(unsigned long data)
queue_work(lbtf_wq, &priv->cmd_work);
out:
spin_unlock_irqrestore(&priv->driver_lock, flags);
+ lbtf_deb_leave(LBTF_DEB_CMD);
}
static int lbtf_init_adapter(struct lbtf_private *priv)
{
+ lbtf_deb_enter(LBTF_DEB_MAIN);
memset(priv->current_addr, 0xff, ETH_ALEN);
mutex_init(&priv->lock);
@@ -188,13 +213,16 @@ static int lbtf_init_adapter(struct lbtf_private *priv)
if (lbtf_allocate_cmd_buffer(priv))
return -1;
+ lbtf_deb_leave(LBTF_DEB_MAIN);
return 0;
}
static void lbtf_free_adapter(struct lbtf_private *priv)
{
+ lbtf_deb_enter(LBTF_DEB_MAIN);
lbtf_free_cmd_buffer(priv);
del_timer(&priv->command_timer);
+ lbtf_deb_leave(LBTF_DEB_MAIN);
}
static int lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
@@ -221,14 +249,18 @@ static void lbtf_tx_work(struct work_struct *work)
struct sk_buff *skb = NULL;
int err;
+ lbtf_deb_enter(LBTF_DEB_MACOPS | LBTF_DEB_TX);
+
if ((priv->vif->type == NL80211_IFTYPE_AP) &&
(!skb_queue_empty(&priv->bc_ps_buf)))
skb = skb_dequeue(&priv->bc_ps_buf);
else if (priv->skb_to_tx) {
skb = priv->skb_to_tx;
priv->skb_to_tx = NULL;
- } else
+ } else {
+ lbtf_deb_leave(LBTF_DEB_MACOPS | LBTF_DEB_TX);
return;
+ }
len = skb->len;
info = IEEE80211_SKB_CB(skb);
@@ -236,6 +268,7 @@ static void lbtf_tx_work(struct work_struct *work)
if (priv->surpriseremoved) {
dev_kfree_skb_any(skb);
+ lbtf_deb_leave(LBTF_DEB_MACOPS | LBTF_DEB_TX);
return;
}
@@ -249,6 +282,7 @@ static void lbtf_tx_work(struct work_struct *work)
ETH_ALEN);
txpd->tx_packet_length = cpu_to_le16(len);
txpd->tx_packet_location = cpu_to_le32(sizeof(struct txpd));
+ lbtf_deb_hex(LBTF_DEB_TX, "TX Data", skb->data, min_t(unsigned int, skb->len, 100));
BUG_ON(priv->tx_skb);
spin_lock_irq(&priv->driver_lock);
priv->tx_skb = skb;
@@ -257,7 +291,9 @@ static void lbtf_tx_work(struct work_struct *work)
if (err) {
dev_kfree_skb_any(skb);
priv->tx_skb = NULL;
+ pr_err("TX error: %d", err);
}
+ lbtf_deb_leave(LBTF_DEB_MACOPS | LBTF_DEB_TX);
}
static int lbtf_op_start(struct ieee80211_hw *hw)
@@ -266,6 +302,8 @@ static int lbtf_op_start(struct ieee80211_hw *hw)
void *card = priv->card;
int ret = -1;
+ lbtf_deb_enter(LBTF_DEB_MACOPS);
+
if (!priv->fw_ready)
/* Upload firmware */
if (priv->hw_prog_firmware(card))
@@ -286,10 +324,12 @@ static int lbtf_op_start(struct ieee80211_hw *hw)
}
printk(KERN_INFO "libertastf: Marvell WLAN 802.11 thinfirm adapter\n");
+ lbtf_deb_leave(LBTF_DEB_MACOPS);
return 0;
err_prog_firmware:
priv->hw_reset_device(card);
+ lbtf_deb_leave_args(LBTF_DEB_MACOPS, "error programing fw; ret=%d", ret);
return ret;
}
@@ -300,6 +340,9 @@ static void lbtf_op_stop(struct ieee80211_hw *hw)
struct sk_buff *skb;
struct cmd_ctrl_node *cmdnode;
+
+ lbtf_deb_enter(LBTF_DEB_MACOPS);
+
/* Flush pending command nodes */
spin_lock_irqsave(&priv->driver_lock, flags);
list_for_each_entry(cmdnode, &priv->cmdpendingq, list) {
@@ -316,13 +359,14 @@ static void lbtf_op_stop(struct ieee80211_hw *hw)
priv->radioon = RADIO_OFF;
lbtf_set_radio_control(priv);
- return;
+ lbtf_deb_leave(LBTF_DEB_MACOPS);
}
static int lbtf_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct lbtf_private *priv = hw->priv;
+ lbtf_deb_enter(LBTF_DEB_MACOPS);
if (priv->vif != NULL)
return -EOPNOTSUPP;
@@ -340,6 +384,7 @@ static int lbtf_op_add_interface(struct ieee80211_hw *hw,
return -EOPNOTSUPP;
}
lbtf_set_mac_address(priv, (u8 *) vif->addr);
+ lbtf_deb_leave(LBTF_DEB_MACOPS);
return 0;
}
@@ -347,6 +392,7 @@ static void lbtf_op_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct lbtf_private *priv = hw->priv;
+ lbtf_deb_enter(LBTF_DEB_MACOPS);
if (priv->vif->type == NL80211_IFTYPE_AP ||
priv->vif->type == NL80211_IFTYPE_MESH_POINT)
@@ -354,37 +400,38 @@ static void lbtf_op_remove_interface(struct ieee80211_hw *hw,
lbtf_set_mode(priv, LBTF_PASSIVE_MODE);
lbtf_set_bssid(priv, 0, NULL);
priv->vif = NULL;
+ lbtf_deb_leave(LBTF_DEB_MACOPS);
}
static int lbtf_op_config(struct ieee80211_hw *hw, u32 changed)
{
struct lbtf_private *priv = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
+ lbtf_deb_enter(LBTF_DEB_MACOPS);
if (conf->channel->center_freq != priv->cur_freq) {
priv->cur_freq = conf->channel->center_freq;
lbtf_set_channel(priv, conf->channel->hw_value);
}
+ lbtf_deb_leave(LBTF_DEB_MACOPS);
return 0;
}
static u64 lbtf_op_prepare_multicast(struct ieee80211_hw *hw,
- int mc_count, struct dev_addr_list *mclist)
+ struct netdev_hw_addr_list *mc_list)
{
struct lbtf_private *priv = hw->priv;
int i;
+ struct netdev_hw_addr *ha;
+ int mc_count = netdev_hw_addr_list_count(mc_list);
if (!mc_count || mc_count > MRVDRV_MAX_MULTICAST_LIST_SIZE)
return mc_count;
priv->nr_of_multicastmacaddr = mc_count;
- for (i = 0; i < mc_count; i++) {
- if (!mclist)
- break;
- memcpy(&priv->multicastlist[i], mclist->da_addr,
- ETH_ALEN);
- mclist = mclist->next;
- }
+ i = 0;
+ netdev_hw_addr_list_for_each(ha, mc_list)
+ memcpy(&priv->multicastlist[i++], ha->addr, ETH_ALEN);
return mc_count;
}
@@ -397,11 +444,16 @@ static void lbtf_op_configure_filter(struct ieee80211_hw *hw,
{
struct lbtf_private *priv = hw->priv;
int old_mac_control = priv->mac_control;
+
+ lbtf_deb_enter(LBTF_DEB_MACOPS);
+
changed_flags &= SUPPORTED_FIF_FLAGS;
*new_flags &= SUPPORTED_FIF_FLAGS;
- if (!changed_flags)
+ if (!changed_flags) {
+ lbtf_deb_leave(LBTF_DEB_MACOPS);
return;
+ }
if (*new_flags & (FIF_PROMISC_IN_BSS))
priv->mac_control |= CMD_ACT_MAC_PROMISCUOUS_ENABLE;
@@ -427,6 +479,8 @@ static void lbtf_op_configure_filter(struct ieee80211_hw *hw,
if (priv->mac_control != old_mac_control)
lbtf_set_mac_control(priv);
+
+ lbtf_deb_leave(LBTF_DEB_MACOPS);
}
static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw,
@@ -436,6 +490,7 @@ static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw,
{
struct lbtf_private *priv = hw->priv;
struct sk_buff *beacon;
+ lbtf_deb_enter(LBTF_DEB_MACOPS);
if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_INT)) {
switch (priv->vif->type) {
@@ -466,6 +521,8 @@ static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw,
priv->preamble = CMD_TYPE_LONG_PREAMBLE;
lbtf_set_radio_control(priv);
}
+
+ lbtf_deb_leave(LBTF_DEB_MACOPS);
}
static const struct ieee80211_ops lbtf_ops = {
@@ -488,6 +545,8 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
unsigned int flags;
struct ieee80211_hdr *hdr;
+ lbtf_deb_enter(LBTF_DEB_RX);
+
prxpd = (struct rxpd *) skb->data;
stats.flag = 0;
@@ -496,7 +555,6 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
stats.freq = priv->cur_freq;
stats.band = IEEE80211_BAND_2GHZ;
stats.signal = prxpd->snr;
- stats.noise = prxpd->nf;
/* Marvell rate index has a hole at value 4 */
if (prxpd->rx_rate > 4)
--prxpd->rx_rate;
@@ -518,7 +576,15 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
}
memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats));
+
+ lbtf_deb_rx("rx data: skb->len-sizeof(RxPd) = %d-%zd = %zd\n",
+ skb->len, sizeof(struct rxpd), skb->len - sizeof(struct rxpd));
+ lbtf_deb_hex(LBTF_DEB_RX, "RX Data", skb->data,
+ min_t(unsigned int, skb->len, 100));
+
ieee80211_rx_irqsafe(priv->hw, skb);
+
+ lbtf_deb_leave(LBTF_DEB_RX);
return 0;
}
EXPORT_SYMBOL_GPL(lbtf_rx);
@@ -535,6 +601,8 @@ struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev)
struct ieee80211_hw *hw;
struct lbtf_private *priv = NULL;
+ lbtf_deb_enter(LBTF_DEB_MAIN);
+
hw = ieee80211_alloc_hw(sizeof(struct lbtf_private), &lbtf_ops);
if (!hw)
goto done;
@@ -577,6 +645,7 @@ err_init_adapter:
priv = NULL;
done:
+ lbtf_deb_leave_args(LBTF_DEB_MAIN, "priv %p", priv);
return priv;
}
EXPORT_SYMBOL_GPL(lbtf_add_card);
@@ -586,6 +655,8 @@ int lbtf_remove_card(struct lbtf_private *priv)
{
struct ieee80211_hw *hw = priv->hw;
+ lbtf_deb_enter(LBTF_DEB_MAIN);
+
priv->surpriseremoved = 1;
del_timer(&priv->command_timer);
lbtf_free_adapter(priv);
@@ -593,6 +664,7 @@ int lbtf_remove_card(struct lbtf_private *priv)
ieee80211_unregister_hw(hw);
ieee80211_free_hw(hw);
+ lbtf_deb_leave(LBTF_DEB_MAIN);
return 0;
}
EXPORT_SYMBOL_GPL(lbtf_remove_card);
@@ -651,17 +723,21 @@ EXPORT_SYMBOL_GPL(lbtf_bcn_sent);
static int __init lbtf_init_module(void)
{
+ lbtf_deb_enter(LBTF_DEB_MAIN);
lbtf_wq = create_workqueue("libertastf");
if (lbtf_wq == NULL) {
printk(KERN_ERR "libertastf: couldn't create workqueue\n");
return -ENOMEM;
}
+ lbtf_deb_leave(LBTF_DEB_MAIN);
return 0;
}
static void __exit lbtf_exit_module(void)
{
+ lbtf_deb_enter(LBTF_DEB_MAIN);
destroy_workqueue(lbtf_wq);
+ lbtf_deb_leave(LBTF_DEB_MAIN);
}
module_init(lbtf_init_module);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 7cd5f56662f..6f8cb3ee6fe 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -291,7 +291,8 @@ struct mac80211_hwsim_data {
struct ieee80211_channel *channel;
unsigned long beacon_int; /* in jiffies unit */
unsigned int rx_filter;
- bool started, idle;
+ bool started, idle, scanning;
+ struct mutex mutex;
struct timer_list beacon_timer;
enum ps_mode {
PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL
@@ -651,17 +652,17 @@ static void mac80211_hwsim_beacon(unsigned long arg)
add_timer(&data->beacon_timer);
}
+static const char *hwsim_chantypes[] = {
+ [NL80211_CHAN_NO_HT] = "noht",
+ [NL80211_CHAN_HT20] = "ht20",
+ [NL80211_CHAN_HT40MINUS] = "ht40-",
+ [NL80211_CHAN_HT40PLUS] = "ht40+",
+};
static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
{
struct mac80211_hwsim_data *data = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
- static const char *chantypes[4] = {
- [NL80211_CHAN_NO_HT] = "noht",
- [NL80211_CHAN_HT20] = "ht20",
- [NL80211_CHAN_HT40MINUS] = "ht40-",
- [NL80211_CHAN_HT40PLUS] = "ht40+",
- };
static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = {
[IEEE80211_SMPS_AUTOMATIC] = "auto",
[IEEE80211_SMPS_OFF] = "off",
@@ -672,7 +673,7 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
printk(KERN_DEBUG "%s:%s (freq=%d/%s idle=%d ps=%d smps=%s)\n",
wiphy_name(hw->wiphy), __func__,
conf->channel->center_freq,
- chantypes[conf->channel_type],
+ hwsim_chantypes[conf->channel_type],
!!(conf->flags & IEEE80211_CONF_IDLE),
!!(conf->flags & IEEE80211_CONF_PS),
smps_modes[conf->smps_mode]);
@@ -760,9 +761,10 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_HT) {
- printk(KERN_DEBUG " %s: HT: op_mode=0x%x\n",
+ printk(KERN_DEBUG " %s: HT: op_mode=0x%x, chantype=%s\n",
wiphy_name(hw->wiphy),
- info->ht_operation_mode);
+ info->ht_operation_mode,
+ hwsim_chantypes[info->channel_type]);
}
if (changed & BSS_CHANGED_BASIC_RATES) {
@@ -829,6 +831,33 @@ static int mac80211_hwsim_conf_tx(
return 0;
}
+static int mac80211_hwsim_get_survey(
+ struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct ieee80211_conf *conf = &hw->conf;
+
+ printk(KERN_DEBUG "%s:%s (idx=%d)\n",
+ wiphy_name(hw->wiphy), __func__, idx);
+
+ if (idx != 0)
+ return -ENOENT;
+
+ /* Current channel */
+ survey->channel = conf->channel;
+
+ /*
+ * Magically conjured noise level --- this is only ok for simulated hardware.
+ *
+ * A real driver which cannot determine the real channel noise MUST NOT
+ * report any noise, especially not a magically conjured one :-)
+ */
+ survey->filled = SURVEY_INFO_NOISE_DBM;
+ survey->noise = -92;
+
+ return 0;
+}
+
#ifdef CONFIG_NL80211_TESTMODE
/*
* This section contains example code for using netlink
@@ -946,6 +975,7 @@ static void hw_scan_done(struct work_struct *work)
}
static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
struct cfg80211_scan_request *req)
{
struct hw_scan_done *hsd = kzalloc(sizeof(*hsd), GFP_KERNEL);
@@ -957,9 +987,9 @@ static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
hsd->hw = hw;
INIT_DELAYED_WORK(&hsd->w, hw_scan_done);
- printk(KERN_DEBUG "hwsim scan request\n");
+ printk(KERN_DEBUG "hwsim hw_scan request\n");
for (i = 0; i < req->n_channels; i++)
- printk(KERN_DEBUG "hwsim scan freq %d\n",
+ printk(KERN_DEBUG "hwsim hw_scan freq %d\n",
req->channels[i]->center_freq);
ieee80211_queue_delayed_work(hw, &hsd->w, 2 * HZ);
@@ -967,6 +997,36 @@ static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
return 0;
}
+static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw)
+{
+ struct mac80211_hwsim_data *hwsim = hw->priv;
+
+ mutex_lock(&hwsim->mutex);
+
+ if (hwsim->scanning) {
+ printk(KERN_DEBUG "two hwsim sw_scans detected!\n");
+ goto out;
+ }
+
+ printk(KERN_DEBUG "hwsim sw_scan request, prepping stuff\n");
+ hwsim->scanning = true;
+
+out:
+ mutex_unlock(&hwsim->mutex);
+}
+
+static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw)
+{
+ struct mac80211_hwsim_data *hwsim = hw->priv;
+
+ mutex_lock(&hwsim->mutex);
+
+ printk(KERN_DEBUG "hwsim sw_scan_complete\n");
+ hwsim->scanning = false;
+
+ mutex_unlock(&hwsim->mutex);
+}
+
static struct ieee80211_ops mac80211_hwsim_ops =
{
.tx = mac80211_hwsim_tx,
@@ -982,8 +1042,11 @@ static struct ieee80211_ops mac80211_hwsim_ops =
.sta_notify = mac80211_hwsim_sta_notify,
.set_tim = mac80211_hwsim_set_tim,
.conf_tx = mac80211_hwsim_conf_tx,
+ .get_survey = mac80211_hwsim_get_survey,
CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd)
.ampdu_action = mac80211_hwsim_ampdu_action,
+ .sw_scan_start = mac80211_hwsim_sw_scan,
+ .sw_scan_complete = mac80211_hwsim_sw_scan_complete,
.flush = mac80211_hwsim_flush,
};
@@ -1179,8 +1242,11 @@ static int __init init_mac80211_hwsim(void)
if (radios < 1 || radios > 100)
return -EINVAL;
- if (fake_hw_scan)
+ if (fake_hw_scan) {
mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
+ mac80211_hwsim_ops.sw_scan_start = NULL;
+ mac80211_hwsim_ops.sw_scan_complete = NULL;
+ }
spin_lock_init(&hwsim_radio_lock);
INIT_LIST_HEAD(&hwsim_radios);
@@ -1235,7 +1301,8 @@ static int __init init_mac80211_hwsim(void)
hw->flags = IEEE80211_HW_MFP_CAPABLE |
IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_SUPPORTS_STATIC_SMPS |
- IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS;
+ IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
+ IEEE80211_HW_AMPDU_AGGREGATION;
/* ask mac80211 to reserve space for magic */
hw->vif_data_size = sizeof(struct hwsim_vif_priv);
@@ -1285,6 +1352,7 @@ static int __init init_mac80211_hwsim(void)
}
/* By default all radios are belonging to the first group */
data->group = 1;
+ mutex_init(&data->mutex);
/* Work to be done prior to ieee80211_register_hw() */
switch (regtest) {
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 12fdcb25fd3..808adb90909 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -750,7 +750,6 @@ mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
memset(status, 0, sizeof(*status));
status->signal = -rxd->rssi;
- status->noise = -rxd->noise_floor;
if (rxd->rate & MWL8K_8366_AP_RATE_INFO_MCS_FORMAT) {
status->flag |= RX_FLAG_HT;
@@ -852,7 +851,6 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
memset(status, 0, sizeof(*status));
status->signal = -rxd->rssi;
- status->noise = -rxd->noise_level;
status->antenna = MWL8K_STA_RATE_INFO_ANTSELECT(rate_info);
status->rate_idx = MWL8K_STA_RATE_INFO_RATEID(rate_info);
@@ -1939,11 +1937,15 @@ struct mwl8k_cmd_mac_multicast_adr {
static struct mwl8k_cmd_pkt *
__mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
- int mc_count, struct dev_addr_list *mclist)
+ struct netdev_hw_addr_list *mc_list)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_cmd_mac_multicast_adr *cmd;
int size;
+ int mc_count = 0;
+
+ if (mc_list)
+ mc_count = netdev_hw_addr_list_count(mc_list);
if (allmulti || mc_count > priv->num_mcaddrs) {
allmulti = 1;
@@ -1964,17 +1966,13 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
if (allmulti) {
cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_ALL_MULTICAST);
} else if (mc_count) {
- int i;
+ struct netdev_hw_addr *ha;
+ int i = 0;
cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST);
cmd->numaddr = cpu_to_le16(mc_count);
- for (i = 0; i < mc_count && mclist; i++) {
- if (mclist->da_addrlen != ETH_ALEN) {
- kfree(cmd);
- return NULL;
- }
- memcpy(cmd->addr[i], mclist->da_addr, ETH_ALEN);
- mclist = mclist->next;
+ netdev_hw_addr_list_for_each(ha, mc_list) {
+ memcpy(cmd->addr[i], ha->addr, ETH_ALEN);
}
}
@@ -3553,7 +3551,7 @@ mwl8k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw,
- int mc_count, struct dev_addr_list *mclist)
+ struct netdev_hw_addr_list *mc_list)
{
struct mwl8k_cmd_pkt *cmd;
@@ -3564,7 +3562,7 @@ static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw,
* we'll end up throwing this packet away and creating a new
* one in mwl8k_configure_filter().
*/
- cmd = __mwl8k_cmd_mac_multicast_adr(hw, 0, mc_count, mclist);
+ cmd = __mwl8k_cmd_mac_multicast_adr(hw, 0, mc_list);
return (unsigned long)cmd;
}
@@ -3687,7 +3685,7 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
*/
if (*total_flags & FIF_ALLMULTI) {
kfree(cmd);
- cmd = __mwl8k_cmd_mac_multicast_adr(hw, 1, 0, NULL);
+ cmd = __mwl8k_cmd_mac_multicast_adr(hw, 1, NULL);
}
if (cmd != NULL) {
@@ -3984,8 +3982,8 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
hw->queues = MWL8K_TX_QUEUES;
- /* Set rssi and noise values to dBm */
- hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM;
+ /* Set rssi values to dBm */
+ hw->flags |= IEEE80211_HW_SIGNAL_DBM;
hw->vif_data_size = sizeof(struct mwl8k_vif);
hw->sta_data_size = sizeof(struct mwl8k_sta);
diff --git a/drivers/net/wireless/orinoco/Kconfig b/drivers/net/wireless/orinoco/Kconfig
index e2a2c18920a..60819bcf437 100644
--- a/drivers/net/wireless/orinoco/Kconfig
+++ b/drivers/net/wireless/orinoco/Kconfig
@@ -27,6 +27,17 @@ config HERMES
configure your card and that /etc/pcmcia/wireless.opts works :
<http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>
+config HERMES_PRISM
+ bool "Support Prism 2/2.5 chipset"
+ depends on HERMES
+ ---help---
+
+ Say Y to enable support for Prism 2 and 2.5 chipsets. These
+ chipsets are better handled by the hostap driver. This driver
+ would not support WPA or firmware download for Prism chipset.
+
+ If you are not sure, say N.
+
config HERMES_CACHE_FW_ON_INIT
bool "Cache Hermes firmware on driver initialisation"
depends on HERMES
@@ -86,7 +97,7 @@ config NORTEL_HERMES
config PCI_HERMES
tristate "Prism 2.5 PCI 802.11b adaptor support"
- depends on PCI && HERMES
+ depends on PCI && HERMES && HERMES_PRISM
help
Enable support for PCI and mini-PCI 802.11b wireless NICs based on
the Prism 2.5 chipset. These are true PCI cards, not the 802.11b
@@ -121,3 +132,10 @@ config PCMCIA_SPECTRUM
This driver requires firmware download on startup. Utilities
for downloading Symbol firmware are available at
<http://sourceforge.net/projects/orinoco/>
+
+config ORINOCO_USB
+ tristate "Agere Orinoco USB support"
+ depends on USB && HERMES
+ select FW_LOADER
+ ---help---
+ This driver is for USB versions of the Agere Orinoco card.
diff --git a/drivers/net/wireless/orinoco/Makefile b/drivers/net/wireless/orinoco/Makefile
index 9abd6329bcb..bfdefb85abc 100644
--- a/drivers/net/wireless/orinoco/Makefile
+++ b/drivers/net/wireless/orinoco/Makefile
@@ -11,3 +11,7 @@ obj-$(CONFIG_PCI_HERMES) += orinoco_pci.o
obj-$(CONFIG_TMD_HERMES) += orinoco_tmd.o
obj-$(CONFIG_NORTEL_HERMES) += orinoco_nortel.o
obj-$(CONFIG_PCMCIA_SPECTRUM) += spectrum_cs.o
+obj-$(CONFIG_ORINOCO_USB) += orinoco_usb.o
+
+# Orinoco should be endian clean.
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/orinoco/airport.c b/drivers/net/wireless/orinoco/airport.c
index c60df2c1aca..9bcee10c930 100644
--- a/drivers/net/wireless/orinoco/airport.c
+++ b/drivers/net/wireless/orinoco/airport.c
@@ -77,9 +77,9 @@ airport_resume(struct macio_dev *mdev)
enable_irq(card->irq);
- spin_lock_irqsave(&priv->lock, flags);
+ priv->hw.ops->lock_irqsave(&priv->lock, &flags);
err = orinoco_up(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
+ priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
return err;
}
@@ -195,7 +195,7 @@ airport_attach(struct macio_dev *mdev, const struct of_device_id *match)
ssleep(1);
/* Reset it before we get the interrupt */
- hermes_init(hw);
+ hw->ops->init(hw);
if (request_irq(card->irq, orinoco_interrupt, 0, DRIVER_NAME, priv)) {
printk(KERN_ERR PFX "Couldn't get IRQ %d\n", card->irq);
@@ -210,7 +210,7 @@ airport_attach(struct macio_dev *mdev, const struct of_device_id *match)
}
/* Register an interface with the stack */
- if (orinoco_if_add(priv, phys_addr, card->irq) != 0) {
+ if (orinoco_if_add(priv, phys_addr, card->irq, NULL) != 0) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
goto failed;
}
diff --git a/drivers/net/wireless/orinoco/cfg.c b/drivers/net/wireless/orinoco/cfg.c
index 27f2d334264..8c4169c227a 100644
--- a/drivers/net/wireless/orinoco/cfg.c
+++ b/drivers/net/wireless/orinoco/cfg.c
@@ -88,7 +88,9 @@ int orinoco_wiphy_register(struct wiphy *wiphy)
wiphy->rts_threshold = priv->rts_thresh;
if (!priv->has_mwo)
- wiphy->frag_threshold = priv->frag_thresh;
+ wiphy->frag_threshold = priv->frag_thresh + 1;
+ wiphy->retry_short = priv->short_retry_limit;
+ wiphy->retry_long = priv->long_retry_limit;
return wiphy_register(wiphy);
}
@@ -157,6 +159,7 @@ static int orinoco_scan(struct wiphy *wiphy, struct net_device *dev,
}
static int orinoco_set_channel(struct wiphy *wiphy,
+ struct net_device *netdev,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type)
{
@@ -187,7 +190,7 @@ static int orinoco_set_channel(struct wiphy *wiphy,
if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
/* Fast channel change - no commit if successful */
hermes_t *hw = &priv->hw;
- err = hermes_docmd_wait(hw, HERMES_CMD_TEST |
+ err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
HERMES_TEST_SET_CHANNEL,
channel, NULL);
}
@@ -196,8 +199,92 @@ static int orinoco_set_channel(struct wiphy *wiphy,
return err;
}
+static int orinoco_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+{
+ struct orinoco_private *priv = wiphy_priv(wiphy);
+ int frag_value = -1;
+ int rts_value = -1;
+ int err = 0;
+
+ if (changed & WIPHY_PARAM_RETRY_SHORT) {
+ /* Setting short retry not supported */
+ err = -EINVAL;
+ }
+
+ if (changed & WIPHY_PARAM_RETRY_LONG) {
+ /* Setting long retry not supported */
+ err = -EINVAL;
+ }
+
+ if (changed & WIPHY_PARAM_FRAG_THRESHOLD) {
+ /* Set fragmentation */
+ if (priv->has_mwo) {
+ if (wiphy->frag_threshold < 0)
+ frag_value = 0;
+ else {
+ printk(KERN_WARNING "%s: Fixed fragmentation "
+ "is not supported on this firmware. "
+ "Using MWO robust instead.\n",
+ priv->ndev->name);
+ frag_value = 1;
+ }
+ } else {
+ if (wiphy->frag_threshold < 0)
+ frag_value = 2346;
+ else if ((wiphy->frag_threshold < 257) ||
+ (wiphy->frag_threshold > 2347))
+ err = -EINVAL;
+ else
+ /* cfg80211 value is 257-2347 (odd only)
+ * orinoco rid has range 256-2346 (even only) */
+ frag_value = wiphy->frag_threshold & ~0x1;
+ }
+ }
+
+ if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
+ /* Set RTS.
+ *
+ * Prism documentation suggests default of 2432,
+ * and a range of 0-3000.
+ *
+ * Current implementation uses 2347 as the default and
+ * the upper limit.
+ */
+
+ if (wiphy->rts_threshold < 0)
+ rts_value = 2347;
+ else if (wiphy->rts_threshold > 2347)
+ err = -EINVAL;
+ else
+ rts_value = wiphy->rts_threshold;
+ }
+
+ if (!err) {
+ unsigned long flags;
+
+ if (orinoco_lock(priv, &flags) != 0)
+ return -EBUSY;
+
+ if (frag_value >= 0) {
+ if (priv->has_mwo)
+ priv->mwo_robust = frag_value;
+ else
+ priv->frag_thresh = frag_value;
+ }
+ if (rts_value >= 0)
+ priv->rts_thresh = rts_value;
+
+ err = orinoco_commit(priv);
+
+ orinoco_unlock(priv, &flags);
+ }
+
+ return err;
+}
+
const struct cfg80211_ops orinoco_cfg_ops = {
.change_virtual_intf = orinoco_change_vif,
.set_channel = orinoco_set_channel,
.scan = orinoco_scan,
+ .set_wiphy_params = orinoco_set_wiphy_params,
};
diff --git a/drivers/net/wireless/orinoco/fw.c b/drivers/net/wireless/orinoco/fw.c
index 5ea0f7cf85b..3e1947d097c 100644
--- a/drivers/net/wireless/orinoco/fw.c
+++ b/drivers/net/wireless/orinoco/fw.c
@@ -122,7 +122,7 @@ orinoco_dl_firmware(struct orinoco_private *priv,
dev_dbg(dev, "Attempting to download firmware %s\n", firmware);
/* Read current plug data */
- err = hermes_read_pda(hw, pda, fw->pda_addr, fw->pda_size, 0);
+ err = hw->ops->read_pda(hw, pda, fw->pda_addr, fw->pda_size);
dev_dbg(dev, "Read PDA returned %d\n", err);
if (err)
goto free;
@@ -149,7 +149,7 @@ orinoco_dl_firmware(struct orinoco_private *priv,
}
/* Enable aux port to allow programming */
- err = hermesi_program_init(hw, le32_to_cpu(hdr->entry_point));
+ err = hw->ops->program_init(hw, le32_to_cpu(hdr->entry_point));
dev_dbg(dev, "Program init returned %d\n", err);
if (err != 0)
goto abort;
@@ -177,7 +177,7 @@ orinoco_dl_firmware(struct orinoco_private *priv,
goto abort;
/* Tell card we've finished */
- err = hermesi_program_end(hw);
+ err = hw->ops->program_end(hw);
dev_dbg(dev, "Program end returned %d\n", err);
if (err != 0)
goto abort;
@@ -224,7 +224,7 @@ symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw,
if (!pda)
return -ENOMEM;
- ret = hermes_read_pda(hw, pda, fw->pda_addr, fw->pda_size, 1);
+ ret = hw->ops->read_pda(hw, pda, fw->pda_addr, fw->pda_size);
if (ret)
goto free;
}
@@ -260,7 +260,7 @@ symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw,
}
/* Reset hermes chip and make sure it responds */
- ret = hermes_init(hw);
+ ret = hw->ops->init(hw);
/* hermes_reset() should return 0 with the secondary firmware */
if (secondary && ret != 0)
diff --git a/drivers/net/wireless/orinoco/hermes.c b/drivers/net/wireless/orinoco/hermes.c
index 1a2fca76fd3..6c6a23e08df 100644
--- a/drivers/net/wireless/orinoco/hermes.c
+++ b/drivers/net/wireless/orinoco/hermes.c
@@ -52,6 +52,26 @@
#define ALLOC_COMPL_TIMEOUT (1000) /* in iterations of ~10us */
/*
+ * AUX port access. To unlock the AUX port write the access keys to the
+ * PARAM0-2 registers, then write HERMES_AUX_ENABLE to the HERMES_CONTROL
+ * register. Then read it and make sure it's HERMES_AUX_ENABLED.
+ */
+#define HERMES_AUX_ENABLE 0x8000 /* Enable auxiliary port access */
+#define HERMES_AUX_DISABLE 0x4000 /* Disable to auxiliary port access */
+#define HERMES_AUX_ENABLED 0xC000 /* Auxiliary port is open */
+#define HERMES_AUX_DISABLED 0x0000 /* Auxiliary port is closed */
+
+#define HERMES_AUX_PW0 0xFE01
+#define HERMES_AUX_PW1 0xDC23
+#define HERMES_AUX_PW2 0xBA45
+
+/* HERMES_CMD_DOWNLD */
+#define HERMES_PROGRAM_DISABLE (0x0000 | HERMES_CMD_DOWNLD)
+#define HERMES_PROGRAM_ENABLE_VOLATILE (0x0100 | HERMES_CMD_DOWNLD)
+#define HERMES_PROGRAM_ENABLE_NON_VOLATILE (0x0200 | HERMES_CMD_DOWNLD)
+#define HERMES_PROGRAM_NON_VOLATILE (0x0300 | HERMES_CMD_DOWNLD)
+
+/*
* Debugging helpers
*/
@@ -70,6 +90,7 @@
#endif /* ! HERMES_DEBUG */
+static const struct hermes_ops hermes_ops_local;
/*
* Internal functions
@@ -111,9 +132,9 @@ static int hermes_issue_cmd(hermes_t *hw, u16 cmd, u16 param0,
*/
/* For doing cmds that wipe the magic constant in SWSUPPORT0 */
-int hermes_doicmd_wait(hermes_t *hw, u16 cmd,
- u16 parm0, u16 parm1, u16 parm2,
- struct hermes_response *resp)
+static int hermes_doicmd_wait(hermes_t *hw, u16 cmd,
+ u16 parm0, u16 parm1, u16 parm2,
+ struct hermes_response *resp)
{
int err = 0;
int k;
@@ -163,17 +184,18 @@ int hermes_doicmd_wait(hermes_t *hw, u16 cmd,
out:
return err;
}
-EXPORT_SYMBOL(hermes_doicmd_wait);
void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing)
{
hw->iobase = address;
hw->reg_spacing = reg_spacing;
hw->inten = 0x0;
+ hw->eeprom_pda = false;
+ hw->ops = &hermes_ops_local;
}
EXPORT_SYMBOL(hermes_struct_init);
-int hermes_init(hermes_t *hw)
+static int hermes_init(hermes_t *hw)
{
u16 reg;
int err = 0;
@@ -217,7 +239,6 @@ int hermes_init(hermes_t *hw)
return err;
}
-EXPORT_SYMBOL(hermes_init);
/* Issue a command to the chip, and (busy!) wait for it to
* complete.
@@ -228,8 +249,8 @@ EXPORT_SYMBOL(hermes_init);
* > 0 on error returned by the firmware
*
* Callable from any context, but locking is your problem. */
-int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
- struct hermes_response *resp)
+static int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
+ struct hermes_response *resp)
{
int err;
int k;
@@ -291,9 +312,8 @@ int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
out:
return err;
}
-EXPORT_SYMBOL(hermes_docmd_wait);
-int hermes_allocate(hermes_t *hw, u16 size, u16 *fid)
+static int hermes_allocate(hermes_t *hw, u16 size, u16 *fid)
{
int err = 0;
int k;
@@ -333,7 +353,6 @@ int hermes_allocate(hermes_t *hw, u16 size, u16 *fid)
return 0;
}
-EXPORT_SYMBOL(hermes_allocate);
/* Set up a BAP to read a particular chunk of data from card's internal buffer.
*
@@ -403,8 +422,8 @@ static int hermes_bap_seek(hermes_t *hw, int bap, u16 id, u16 offset)
* 0 on success
* > 0 on error from firmware
*/
-int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
- u16 id, u16 offset)
+static int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
+ u16 id, u16 offset)
{
int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
int err = 0;
@@ -422,7 +441,6 @@ int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
out:
return err;
}
-EXPORT_SYMBOL(hermes_bap_pread);
/* Write a block of data to the chip's buffer, via the
* BAP. Synchronization/serialization is the caller's problem.
@@ -432,8 +450,8 @@ EXPORT_SYMBOL(hermes_bap_pread);
* 0 on success
* > 0 on error from firmware
*/
-int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
- u16 id, u16 offset)
+static int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
+ u16 id, u16 offset)
{
int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
int err = 0;
@@ -451,7 +469,6 @@ int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
out:
return err;
}
-EXPORT_SYMBOL(hermes_bap_pwrite);
/* Read a Length-Type-Value record from the card.
*
@@ -461,8 +478,8 @@ EXPORT_SYMBOL(hermes_bap_pwrite);
* practice.
*
* Callable from user or bh context. */
-int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned bufsize,
- u16 *length, void *buf)
+static int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned bufsize,
+ u16 *length, void *buf)
{
int err = 0;
int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
@@ -505,10 +522,9 @@ int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned bufsize,
return 0;
}
-EXPORT_SYMBOL(hermes_read_ltv);
-int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
- u16 length, const void *value)
+static int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
+ u16 length, const void *value)
{
int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
int err = 0;
@@ -533,4 +549,228 @@ int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
return err;
}
-EXPORT_SYMBOL(hermes_write_ltv);
+
+/*** Hermes AUX control ***/
+
+static inline void
+hermes_aux_setaddr(hermes_t *hw, u32 addr)
+{
+ hermes_write_reg(hw, HERMES_AUXPAGE, (u16) (addr >> 7));
+ hermes_write_reg(hw, HERMES_AUXOFFSET, (u16) (addr & 0x7F));
+}
+
+static inline int
+hermes_aux_control(hermes_t *hw, int enabled)
+{
+ int desired_state = enabled ? HERMES_AUX_ENABLED : HERMES_AUX_DISABLED;
+ int action = enabled ? HERMES_AUX_ENABLE : HERMES_AUX_DISABLE;
+ int i;
+
+ /* Already open? */
+ if (hermes_read_reg(hw, HERMES_CONTROL) == desired_state)
+ return 0;
+
+ hermes_write_reg(hw, HERMES_PARAM0, HERMES_AUX_PW0);
+ hermes_write_reg(hw, HERMES_PARAM1, HERMES_AUX_PW1);
+ hermes_write_reg(hw, HERMES_PARAM2, HERMES_AUX_PW2);
+ hermes_write_reg(hw, HERMES_CONTROL, action);
+
+ for (i = 0; i < 20; i++) {
+ udelay(10);
+ if (hermes_read_reg(hw, HERMES_CONTROL) ==
+ desired_state)
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+/*** Hermes programming ***/
+
+/* About to start programming data (Hermes I)
+ * offset is the entry point
+ *
+ * Spectrum_cs' Symbol fw does not require this
+ * wl_lkm Agere fw does
+ * Don't know about intersil
+ */
+static int hermesi_program_init(hermes_t *hw, u32 offset)
+{
+ int err;
+
+ /* Disable interrupts?*/
+ /*hw->inten = 0x0;*/
+ /*hermes_write_regn(hw, INTEN, 0);*/
+ /*hermes_set_irqmask(hw, 0);*/
+
+ /* Acknowledge any outstanding command */
+ hermes_write_regn(hw, EVACK, 0xFFFF);
+
+ /* Using init_cmd_wait rather than cmd_wait */
+ err = hw->ops->init_cmd_wait(hw,
+ 0x0100 | HERMES_CMD_INIT,
+ 0, 0, 0, NULL);
+ if (err)
+ return err;
+
+ err = hw->ops->init_cmd_wait(hw,
+ 0x0000 | HERMES_CMD_INIT,
+ 0, 0, 0, NULL);
+ if (err)
+ return err;
+
+ err = hermes_aux_control(hw, 1);
+ pr_debug("AUX enable returned %d\n", err);
+
+ if (err)
+ return err;
+
+ pr_debug("Enabling volatile, EP 0x%08x\n", offset);
+ err = hw->ops->init_cmd_wait(hw,
+ HERMES_PROGRAM_ENABLE_VOLATILE,
+ offset & 0xFFFFu,
+ offset >> 16,
+ 0,
+ NULL);
+ pr_debug("PROGRAM_ENABLE returned %d\n", err);
+
+ return err;
+}
+
+/* Done programming data (Hermes I)
+ *
+ * Spectrum_cs' Symbol fw does not require this
+ * wl_lkm Agere fw does
+ * Don't know about intersil
+ */
+static int hermesi_program_end(hermes_t *hw)
+{
+ struct hermes_response resp;
+ int rc = 0;
+ int err;
+
+ rc = hw->ops->cmd_wait(hw, HERMES_PROGRAM_DISABLE, 0, &resp);
+
+ pr_debug("PROGRAM_DISABLE returned %d, "
+ "r0 0x%04x, r1 0x%04x, r2 0x%04x\n",
+ rc, resp.resp0, resp.resp1, resp.resp2);
+
+ if ((rc == 0) &&
+ ((resp.status & HERMES_STATUS_CMDCODE) != HERMES_CMD_DOWNLD))
+ rc = -EIO;
+
+ err = hermes_aux_control(hw, 0);
+ pr_debug("AUX disable returned %d\n", err);
+
+ /* Acknowledge any outstanding command */
+ hermes_write_regn(hw, EVACK, 0xFFFF);
+
+ /* Reinitialise, ignoring return */
+ (void) hw->ops->init_cmd_wait(hw, 0x0000 | HERMES_CMD_INIT,
+ 0, 0, 0, NULL);
+
+ return rc ? rc : err;
+}
+
+static int hermes_program_bytes(struct hermes *hw, const char *data,
+ u32 addr, u32 len)
+{
+ /* wl lkm splits the programming into chunks of 2000 bytes.
+ * This restriction appears to come from USB. The PCMCIA
+ * adapters can program the whole lot in one go */
+ hermes_aux_setaddr(hw, addr);
+ hermes_write_bytes(hw, HERMES_AUXDATA, data, len);
+ return 0;
+}
+
+/* Read PDA from the adapter */
+static int hermes_read_pda(hermes_t *hw, __le16 *pda, u32 pda_addr, u16 pda_len)
+{
+ int ret;
+ u16 pda_size;
+ u16 data_len = pda_len;
+ __le16 *data = pda;
+
+ if (hw->eeprom_pda) {
+ /* PDA of spectrum symbol is in eeprom */
+
+ /* Issue command to read EEPROM */
+ ret = hw->ops->cmd_wait(hw, HERMES_CMD_READMIF, 0, NULL);
+ if (ret)
+ return ret;
+ } else {
+ /* wl_lkm does not include PDA size in the PDA area.
+ * We will pad the information into pda, so other routines
+ * don't have to be modified */
+ pda[0] = cpu_to_le16(pda_len - 2);
+ /* Includes CFG_PROD_DATA but not itself */
+ pda[1] = cpu_to_le16(0x0800); /* CFG_PROD_DATA */
+ data_len = pda_len - 4;
+ data = pda + 2;
+ }
+
+ /* Open auxiliary port */
+ ret = hermes_aux_control(hw, 1);
+ pr_debug("AUX enable returned %d\n", ret);
+ if (ret)
+ return ret;
+
+ /* Read PDA */
+ hermes_aux_setaddr(hw, pda_addr);
+ hermes_read_words(hw, HERMES_AUXDATA, data, data_len / 2);
+
+ /* Close aux port */
+ ret = hermes_aux_control(hw, 0);
+ pr_debug("AUX disable returned %d\n", ret);
+
+ /* Check PDA length */
+ pda_size = le16_to_cpu(pda[0]);
+ pr_debug("Actual PDA length %d, Max allowed %d\n",
+ pda_size, pda_len);
+ if (pda_size > pda_len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void hermes_lock_irqsave(spinlock_t *lock,
+ unsigned long *flags) __acquires(lock)
+{
+ spin_lock_irqsave(lock, *flags);
+}
+
+static void hermes_unlock_irqrestore(spinlock_t *lock,
+ unsigned long *flags) __releases(lock)
+{
+ spin_unlock_irqrestore(lock, *flags);
+}
+
+static void hermes_lock_irq(spinlock_t *lock) __acquires(lock)
+{
+ spin_lock_irq(lock);
+}
+
+static void hermes_unlock_irq(spinlock_t *lock) __releases(lock)
+{
+ spin_unlock_irq(lock);
+}
+
+/* Hermes operations for local buses */
+static const struct hermes_ops hermes_ops_local = {
+ .init = hermes_init,
+ .cmd_wait = hermes_docmd_wait,
+ .init_cmd_wait = hermes_doicmd_wait,
+ .allocate = hermes_allocate,
+ .read_ltv = hermes_read_ltv,
+ .write_ltv = hermes_write_ltv,
+ .bap_pread = hermes_bap_pread,
+ .bap_pwrite = hermes_bap_pwrite,
+ .read_pda = hermes_read_pda,
+ .program_init = hermesi_program_init,
+ .program_end = hermesi_program_end,
+ .program = hermes_program_bytes,
+ .lock_irqsave = hermes_lock_irqsave,
+ .unlock_irqrestore = hermes_unlock_irqrestore,
+ .lock_irq = hermes_lock_irq,
+ .unlock_irq = hermes_unlock_irq,
+};
diff --git a/drivers/net/wireless/orinoco/hermes.h b/drivers/net/wireless/orinoco/hermes.h
index 2dddbb597c4..9ca34e722b4 100644
--- a/drivers/net/wireless/orinoco/hermes.h
+++ b/drivers/net/wireless/orinoco/hermes.h
@@ -374,6 +374,37 @@ struct hermes_multicast {
/* Timeouts */
#define HERMES_BAP_BUSY_TIMEOUT (10000) /* In iterations of ~1us */
+struct hermes;
+
+/* Functions to access hardware */
+struct hermes_ops {
+ int (*init)(struct hermes *hw);
+ int (*cmd_wait)(struct hermes *hw, u16 cmd, u16 parm0,
+ struct hermes_response *resp);
+ int (*init_cmd_wait)(struct hermes *hw, u16 cmd,
+ u16 parm0, u16 parm1, u16 parm2,
+ struct hermes_response *resp);
+ int (*allocate)(struct hermes *hw, u16 size, u16 *fid);
+ int (*read_ltv)(struct hermes *hw, int bap, u16 rid, unsigned buflen,
+ u16 *length, void *buf);
+ int (*write_ltv)(struct hermes *hw, int bap, u16 rid,
+ u16 length, const void *value);
+ int (*bap_pread)(struct hermes *hw, int bap, void *buf, int len,
+ u16 id, u16 offset);
+ int (*bap_pwrite)(struct hermes *hw, int bap, const void *buf,
+ int len, u16 id, u16 offset);
+ int (*read_pda)(struct hermes *hw, __le16 *pda,
+ u32 pda_addr, u16 pda_len);
+ int (*program_init)(struct hermes *hw, u32 entry_point);
+ int (*program_end)(struct hermes *hw);
+ int (*program)(struct hermes *hw, const char *buf,
+ u32 addr, u32 len);
+ void (*lock_irqsave)(spinlock_t *lock, unsigned long *flags);
+ void (*unlock_irqrestore)(spinlock_t *lock, unsigned long *flags);
+ void (*lock_irq)(spinlock_t *lock);
+ void (*unlock_irq)(spinlock_t *lock);
+};
+
/* Basic control structure */
typedef struct hermes {
void __iomem *iobase;
@@ -381,6 +412,9 @@ typedef struct hermes {
#define HERMES_16BIT_REGSPACING 0
#define HERMES_32BIT_REGSPACING 1
u16 inten; /* Which interrupts should be enabled? */
+ bool eeprom_pda;
+ const struct hermes_ops *ops;
+ void *priv;
} hermes_t;
/* Register access convenience macros */
@@ -394,22 +428,6 @@ typedef struct hermes {
/* Function prototypes */
void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing);
-int hermes_init(hermes_t *hw);
-int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
- struct hermes_response *resp);
-int hermes_doicmd_wait(hermes_t *hw, u16 cmd,
- u16 parm0, u16 parm1, u16 parm2,
- struct hermes_response *resp);
-int hermes_allocate(hermes_t *hw, u16 size, u16 *fid);
-
-int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
- u16 id, u16 offset);
-int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
- u16 id, u16 offset);
-int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned buflen,
- u16 *length, void *buf);
-int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
- u16 length, const void *value);
/* Inline functions */
@@ -426,13 +444,13 @@ static inline void hermes_set_irqmask(hermes_t *hw, u16 events)
static inline int hermes_enable_port(hermes_t *hw, int port)
{
- return hermes_docmd_wait(hw, HERMES_CMD_ENABLE | (port << 8),
+ return hw->ops->cmd_wait(hw, HERMES_CMD_ENABLE | (port << 8),
0, NULL);
}
static inline int hermes_disable_port(hermes_t *hw, int port)
{
- return hermes_docmd_wait(hw, HERMES_CMD_DISABLE | (port << 8),
+ return hw->ops->cmd_wait(hw, HERMES_CMD_DISABLE | (port << 8),
0, NULL);
}
@@ -440,7 +458,7 @@ static inline int hermes_disable_port(hermes_t *hw, int port)
* information frame in __orinoco_ev_info() */
static inline int hermes_inquire(hermes_t *hw, u16 rid)
{
- return hermes_docmd_wait(hw, HERMES_CMD_INQUIRE, rid, NULL);
+ return hw->ops->cmd_wait(hw, HERMES_CMD_INQUIRE, rid, NULL);
}
#define HERMES_BYTES_TO_RECLEN(n) ((((n)+1)/2) + 1)
@@ -475,10 +493,10 @@ static inline void hermes_clear_words(struct hermes *hw, int off,
}
#define HERMES_READ_RECORD(hw, bap, rid, buf) \
- (hermes_read_ltv((hw), (bap), (rid), sizeof(*buf), NULL, (buf)))
+ (hw->ops->read_ltv((hw), (bap), (rid), sizeof(*buf), NULL, (buf)))
#define HERMES_WRITE_RECORD(hw, bap, rid, buf) \
- (hermes_write_ltv((hw), (bap), (rid), \
- HERMES_BYTES_TO_RECLEN(sizeof(*buf)), (buf)))
+ (hw->ops->write_ltv((hw), (bap), (rid), \
+ HERMES_BYTES_TO_RECLEN(sizeof(*buf)), (buf)))
static inline int hermes_read_wordrec(hermes_t *hw, int bap, u16 rid, u16 *word)
{
diff --git a/drivers/net/wireless/orinoco/hermes_dld.c b/drivers/net/wireless/orinoco/hermes_dld.c
index fb157eb889c..6da85e75fce 100644
--- a/drivers/net/wireless/orinoco/hermes_dld.c
+++ b/drivers/net/wireless/orinoco/hermes_dld.c
@@ -46,37 +46,11 @@
#define PFX "hermes_dld: "
-/*
- * AUX port access. To unlock the AUX port write the access keys to the
- * PARAM0-2 registers, then write HERMES_AUX_ENABLE to the HERMES_CONTROL
- * register. Then read it and make sure it's HERMES_AUX_ENABLED.
- */
-#define HERMES_AUX_ENABLE 0x8000 /* Enable auxiliary port access */
-#define HERMES_AUX_DISABLE 0x4000 /* Disable to auxiliary port access */
-#define HERMES_AUX_ENABLED 0xC000 /* Auxiliary port is open */
-#define HERMES_AUX_DISABLED 0x0000 /* Auxiliary port is closed */
-
-#define HERMES_AUX_PW0 0xFE01
-#define HERMES_AUX_PW1 0xDC23
-#define HERMES_AUX_PW2 0xBA45
-
-/* HERMES_CMD_DOWNLD */
-#define HERMES_PROGRAM_DISABLE (0x0000 | HERMES_CMD_DOWNLD)
-#define HERMES_PROGRAM_ENABLE_VOLATILE (0x0100 | HERMES_CMD_DOWNLD)
-#define HERMES_PROGRAM_ENABLE_NON_VOLATILE (0x0200 | HERMES_CMD_DOWNLD)
-#define HERMES_PROGRAM_NON_VOLATILE (0x0300 | HERMES_CMD_DOWNLD)
-
/* End markers used in dblocks */
#define PDI_END 0x00000000 /* End of PDA */
#define BLOCK_END 0xFFFFFFFF /* Last image block */
#define TEXT_END 0x1A /* End of text header */
-/* Limit the amout we try to download in a single shot.
- * Size is in bytes.
- */
-#define MAX_DL_SIZE 1024
-#define LIMIT_PROGRAM_SIZE 0
-
/*
* The following structures have little-endian fields denoted by
* the leading underscore. Don't access them directly - use inline
@@ -165,41 +139,6 @@ pdi_len(const struct pdi *pdi)
return 2 * (le16_to_cpu(pdi->len) - 1);
}
-/*** Hermes AUX control ***/
-
-static inline void
-hermes_aux_setaddr(hermes_t *hw, u32 addr)
-{
- hermes_write_reg(hw, HERMES_AUXPAGE, (u16) (addr >> 7));
- hermes_write_reg(hw, HERMES_AUXOFFSET, (u16) (addr & 0x7F));
-}
-
-static inline int
-hermes_aux_control(hermes_t *hw, int enabled)
-{
- int desired_state = enabled ? HERMES_AUX_ENABLED : HERMES_AUX_DISABLED;
- int action = enabled ? HERMES_AUX_ENABLE : HERMES_AUX_DISABLE;
- int i;
-
- /* Already open? */
- if (hermes_read_reg(hw, HERMES_CONTROL) == desired_state)
- return 0;
-
- hermes_write_reg(hw, HERMES_PARAM0, HERMES_AUX_PW0);
- hermes_write_reg(hw, HERMES_PARAM1, HERMES_AUX_PW1);
- hermes_write_reg(hw, HERMES_PARAM2, HERMES_AUX_PW2);
- hermes_write_reg(hw, HERMES_CONTROL, action);
-
- for (i = 0; i < 20; i++) {
- udelay(10);
- if (hermes_read_reg(hw, HERMES_CONTROL) ==
- desired_state)
- return 0;
- }
-
- return -EBUSY;
-}
-
/*** Plug Data Functions ***/
/*
@@ -271,62 +210,7 @@ hermes_plug_pdi(hermes_t *hw, const struct pdr *first_pdr,
return -EINVAL;
/* do the actual plugging */
- hermes_aux_setaddr(hw, pdr_addr(pdr));
- hermes_write_bytes(hw, HERMES_AUXDATA, pdi->data, pdi_len(pdi));
-
- return 0;
-}
-
-/* Read PDA from the adapter */
-int hermes_read_pda(hermes_t *hw,
- __le16 *pda,
- u32 pda_addr,
- u16 pda_len,
- int use_eeprom) /* can we get this into hw? */
-{
- int ret;
- u16 pda_size;
- u16 data_len = pda_len;
- __le16 *data = pda;
-
- if (use_eeprom) {
- /* PDA of spectrum symbol is in eeprom */
-
- /* Issue command to read EEPROM */
- ret = hermes_docmd_wait(hw, HERMES_CMD_READMIF, 0, NULL);
- if (ret)
- return ret;
- } else {
- /* wl_lkm does not include PDA size in the PDA area.
- * We will pad the information into pda, so other routines
- * don't have to be modified */
- pda[0] = cpu_to_le16(pda_len - 2);
- /* Includes CFG_PROD_DATA but not itself */
- pda[1] = cpu_to_le16(0x0800); /* CFG_PROD_DATA */
- data_len = pda_len - 4;
- data = pda + 2;
- }
-
- /* Open auxiliary port */
- ret = hermes_aux_control(hw, 1);
- pr_debug(PFX "AUX enable returned %d\n", ret);
- if (ret)
- return ret;
-
- /* read PDA from EEPROM */
- hermes_aux_setaddr(hw, pda_addr);
- hermes_read_words(hw, HERMES_AUXDATA, data, data_len / 2);
-
- /* Close aux port */
- ret = hermes_aux_control(hw, 0);
- pr_debug(PFX "AUX disable returned %d\n", ret);
-
- /* Check PDA length */
- pda_size = le16_to_cpu(pda[0]);
- pr_debug(PFX "Actual PDA length %d, Max allowed %d\n",
- pda_size, pda_len);
- if (pda_size > pda_len)
- return -EINVAL;
+ hw->ops->program(hw, pdi->data, pdr_addr(pdr), pdi_len(pdi));
return 0;
}
@@ -389,101 +273,13 @@ hermes_blocks_length(const char *first_block, const void *end)
/*** Hermes programming ***/
-/* About to start programming data (Hermes I)
- * offset is the entry point
- *
- * Spectrum_cs' Symbol fw does not require this
- * wl_lkm Agere fw does
- * Don't know about intersil
- */
-int hermesi_program_init(hermes_t *hw, u32 offset)
-{
- int err;
-
- /* Disable interrupts?*/
- /*hw->inten = 0x0;*/
- /*hermes_write_regn(hw, INTEN, 0);*/
- /*hermes_set_irqmask(hw, 0);*/
-
- /* Acknowledge any outstanding command */
- hermes_write_regn(hw, EVACK, 0xFFFF);
-
- /* Using doicmd_wait rather than docmd_wait */
- err = hermes_doicmd_wait(hw,
- 0x0100 | HERMES_CMD_INIT,
- 0, 0, 0, NULL);
- if (err)
- return err;
-
- err = hermes_doicmd_wait(hw,
- 0x0000 | HERMES_CMD_INIT,
- 0, 0, 0, NULL);
- if (err)
- return err;
-
- err = hermes_aux_control(hw, 1);
- pr_debug(PFX "AUX enable returned %d\n", err);
-
- if (err)
- return err;
-
- pr_debug(PFX "Enabling volatile, EP 0x%08x\n", offset);
- err = hermes_doicmd_wait(hw,
- HERMES_PROGRAM_ENABLE_VOLATILE,
- offset & 0xFFFFu,
- offset >> 16,
- 0,
- NULL);
- pr_debug(PFX "PROGRAM_ENABLE returned %d\n", err);
-
- return err;
-}
-
-/* Done programming data (Hermes I)
- *
- * Spectrum_cs' Symbol fw does not require this
- * wl_lkm Agere fw does
- * Don't know about intersil
- */
-int hermesi_program_end(hermes_t *hw)
-{
- struct hermes_response resp;
- int rc = 0;
- int err;
-
- rc = hermes_docmd_wait(hw, HERMES_PROGRAM_DISABLE, 0, &resp);
-
- pr_debug(PFX "PROGRAM_DISABLE returned %d, "
- "r0 0x%04x, r1 0x%04x, r2 0x%04x\n",
- rc, resp.resp0, resp.resp1, resp.resp2);
-
- if ((rc == 0) &&
- ((resp.status & HERMES_STATUS_CMDCODE) != HERMES_CMD_DOWNLD))
- rc = -EIO;
-
- err = hermes_aux_control(hw, 0);
- pr_debug(PFX "AUX disable returned %d\n", err);
-
- /* Acknowledge any outstanding command */
- hermes_write_regn(hw, EVACK, 0xFFFF);
-
- /* Reinitialise, ignoring return */
- (void) hermes_doicmd_wait(hw, 0x0000 | HERMES_CMD_INIT,
- 0, 0, 0, NULL);
-
- return rc ? rc : err;
-}
-
/* Program the data blocks */
int hermes_program(hermes_t *hw, const char *first_block, const void *end)
{
const struct dblock *blk;
u32 blkaddr;
u32 blklen;
-#if LIMIT_PROGRAM_SIZE
- u32 addr;
- u32 len;
-#endif
+ int err = 0;
blk = (const struct dblock *) first_block;
@@ -498,30 +294,10 @@ int hermes_program(hermes_t *hw, const char *first_block, const void *end)
pr_debug(PFX "Programming block of length %d "
"to address 0x%08x\n", blklen, blkaddr);
-#if !LIMIT_PROGRAM_SIZE
- /* wl_lkm driver splits this into writes of 2000 bytes */
- hermes_aux_setaddr(hw, blkaddr);
- hermes_write_bytes(hw, HERMES_AUXDATA, blk->data,
- blklen);
-#else
- len = (blklen < MAX_DL_SIZE) ? blklen : MAX_DL_SIZE;
- addr = blkaddr;
-
- while (addr < (blkaddr + blklen)) {
- pr_debug(PFX "Programming subblock of length %d "
- "to address 0x%08x. Data @ %p\n",
- len, addr, &blk->data[addr - blkaddr]);
-
- hermes_aux_setaddr(hw, addr);
- hermes_write_bytes(hw, HERMES_AUXDATA,
- &blk->data[addr - blkaddr],
- len);
-
- addr += len;
- len = ((blkaddr + blklen - addr) < MAX_DL_SIZE) ?
- (blkaddr + blklen - addr) : MAX_DL_SIZE;
- }
-#endif
+ err = hw->ops->program(hw, blk->data, blkaddr, blklen);
+ if (err)
+ break;
+
blk = (const struct dblock *) &blk->data[blklen];
if ((void *) blk > (end - sizeof(*blk)))
@@ -530,7 +306,7 @@ int hermes_program(hermes_t *hw, const char *first_block, const void *end)
blkaddr = dblock_addr(blk);
blklen = dblock_len(blk);
}
- return 0;
+ return err;
}
/*** Default plugging data for Hermes I ***/
@@ -690,9 +466,8 @@ int hermes_apply_pda_with_defaults(hermes_t *hw,
if ((pdi_len(pdi) == pdr_len(pdr)) &&
((void *) pdi->data + pdi_len(pdi) < pda_end)) {
/* do the actual plugging */
- hermes_aux_setaddr(hw, pdr_addr(pdr));
- hermes_write_bytes(hw, HERMES_AUXDATA,
- pdi->data, pdi_len(pdi));
+ hw->ops->program(hw, pdi->data, pdr_addr(pdr),
+ pdi_len(pdi));
}
}
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index e6369242e49..6fbd7885012 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -177,9 +177,9 @@ int determine_fw_capabilities(struct orinoco_private *priv,
/* 3Com MAC : 00:50:DA:* */
memset(tmp, 0, sizeof(tmp));
/* Get the Symbol firmware version */
- err = hermes_read_ltv(hw, USER_BAP,
- HERMES_RID_SECONDARYVERSION_SYMBOL,
- SYMBOL_MAX_VER_LEN, NULL, &tmp);
+ err = hw->ops->read_ltv(hw, USER_BAP,
+ HERMES_RID_SECONDARYVERSION_SYMBOL,
+ SYMBOL_MAX_VER_LEN, NULL, &tmp);
if (err) {
dev_warn(dev, "Error %d reading Symbol firmware info. "
"Wildly guessing capabilities...\n", err);
@@ -262,6 +262,13 @@ int determine_fw_capabilities(struct orinoco_private *priv,
if (fw_name)
dev_info(dev, "Firmware determined as %s\n", fw_name);
+#ifndef CONFIG_HERMES_PRISM
+ if (priv->firmware_type == FIRMWARE_TYPE_INTERSIL) {
+ dev_err(dev, "Support for Prism chipset is not enabled\n");
+ return -ENODEV;
+ }
+#endif
+
return 0;
}
@@ -279,8 +286,8 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr)
u16 reclen;
/* Get the MAC address */
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
- ETH_ALEN, NULL, dev_addr);
+ err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
+ ETH_ALEN, NULL, dev_addr);
if (err) {
dev_warn(dev, "Failed to read MAC address!\n");
goto out;
@@ -289,8 +296,8 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr)
dev_dbg(dev, "MAC address %pM\n", dev_addr);
/* Get the station name */
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
- sizeof(nickbuf), &reclen, &nickbuf);
+ err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
+ sizeof(nickbuf), &reclen, &nickbuf);
if (err) {
dev_err(dev, "failed to read station name\n");
goto out;
@@ -367,6 +374,32 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr)
err = hermes_read_wordrec(hw, USER_BAP,
HERMES_RID_CNFPREAMBLE_SYMBOL,
&priv->preamble);
+ if (err) {
+ dev_err(dev, "Failed to read preamble setup\n");
+ goto out;
+ }
+ }
+
+ /* Retry settings */
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_SHORTRETRYLIMIT,
+ &priv->short_retry_limit);
+ if (err) {
+ dev_err(dev, "Failed to read short retry limit\n");
+ goto out;
+ }
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_LONGRETRYLIMIT,
+ &priv->long_retry_limit);
+ if (err) {
+ dev_err(dev, "Failed to read long retry limit\n");
+ goto out;
+ }
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_MAXTRANSMITLIFETIME,
+ &priv->retry_lifetime);
+ if (err) {
+ dev_err(dev, "Failed to read max retry lifetime\n");
+ goto out;
}
out:
@@ -380,11 +413,11 @@ int orinoco_hw_allocate_fid(struct orinoco_private *priv)
struct hermes *hw = &priv->hw;
int err;
- err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
+ err = hw->ops->allocate(hw, priv->nicbuf_size, &priv->txfid);
if (err == -EIO && priv->nicbuf_size > TX_NICBUF_SIZE_BUG) {
/* Try workaround for old Symbol firmware bug */
priv->nicbuf_size = TX_NICBUF_SIZE_BUG;
- err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
+ err = hw->ops->allocate(hw, priv->nicbuf_size, &priv->txfid);
dev_warn(dev, "Firmware ALLOC bug detected "
"(old Symbol firmware?). Work around %s\n",
@@ -430,8 +463,9 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
struct hermes_idstring idbuf;
/* Set the MAC address */
- err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
- HERMES_BYTES_TO_RECLEN(ETH_ALEN), dev->dev_addr);
+ err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
+ HERMES_BYTES_TO_RECLEN(ETH_ALEN),
+ dev->dev_addr);
if (err) {
printk(KERN_ERR "%s: Error %d setting MAC address\n",
dev->name, err);
@@ -494,7 +528,7 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
idbuf.len = cpu_to_le16(strlen(priv->desired_essid));
memcpy(&idbuf.val, priv->desired_essid, sizeof(idbuf.val));
/* WinXP wants partner to configure OWNSSID even in IBSS mode. (jimc) */
- err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNSSID,
+ err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNSSID,
HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid)+2),
&idbuf);
if (err) {
@@ -502,7 +536,7 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
dev->name, err);
return err;
}
- err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFDESIREDSSID,
+ err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFDESIREDSSID,
HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid)+2),
&idbuf);
if (err) {
@@ -514,9 +548,9 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
/* Set the station name */
idbuf.len = cpu_to_le16(strlen(priv->nick));
memcpy(&idbuf.val, priv->nick, sizeof(idbuf.val));
- err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
- HERMES_BYTES_TO_RECLEN(strlen(priv->nick)+2),
- &idbuf);
+ err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
+ HERMES_BYTES_TO_RECLEN(strlen(priv->nick)+2),
+ &idbuf);
if (err) {
printk(KERN_ERR "%s: Error %d setting nickname\n",
dev->name, err);
@@ -631,12 +665,12 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
/* Enable monitor mode */
dev->type = ARPHRD_IEEE80211;
- err = hermes_docmd_wait(hw, HERMES_CMD_TEST |
+ err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
HERMES_TEST_MONITOR, 0, NULL);
} else {
/* Disable monitor mode */
dev->type = ARPHRD_ETHER;
- err = hermes_docmd_wait(hw, HERMES_CMD_TEST |
+ err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
HERMES_TEST_STOP, 0, NULL);
}
if (err)
@@ -662,8 +696,8 @@ int orinoco_hw_get_tkip_iv(struct orinoco_private *priv, int key, u8 *tsc)
if ((key < 0) || (key >= 4))
return -EINVAL;
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV,
- sizeof(tsc_arr), NULL, &tsc_arr);
+ err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV,
+ sizeof(tsc_arr), NULL, &tsc_arr);
if (!err)
memcpy(tsc, &tsc_arr[key][0], sizeof(tsc_arr[0]));
@@ -842,7 +876,7 @@ int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv)
memcpy(key, priv->keys[i].key,
priv->keys[i].key_len);
- err = hermes_write_ltv(hw, USER_BAP,
+ err = hw->ops->write_ltv(hw, USER_BAP,
HERMES_RID_CNFDEFAULTKEY0 + i,
HERMES_BYTES_TO_RECLEN(keylen),
key);
@@ -1049,17 +1083,17 @@ int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
* group address if either we want to multicast, or if we were
* multicasting and want to stop */
if (!promisc && (mc_count || priv->mc_count)) {
- struct dev_mc_list *p;
+ struct netdev_hw_addr *ha;
struct hermes_multicast mclist;
int i = 0;
- netdev_for_each_mc_addr(p, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (i == mc_count)
break;
- memcpy(mclist.addr[i++], p->dmi_addr, ETH_ALEN);
+ memcpy(mclist.addr[i++], ha->addr, ETH_ALEN);
}
- err = hermes_write_ltv(hw, USER_BAP,
+ err = hw->ops->write_ltv(hw, USER_BAP,
HERMES_RID_CNFGROUPADDRESSES,
HERMES_BYTES_TO_RECLEN(mc_count * ETH_ALEN),
&mclist);
@@ -1101,15 +1135,15 @@ int orinoco_hw_get_essid(struct orinoco_private *priv, int *active,
rid = (priv->port_type == 3) ? HERMES_RID_CNFOWNSSID :
HERMES_RID_CNFDESIREDSSID;
- err = hermes_read_ltv(hw, USER_BAP, rid, sizeof(essidbuf),
- NULL, &essidbuf);
+ err = hw->ops->read_ltv(hw, USER_BAP, rid, sizeof(essidbuf),
+ NULL, &essidbuf);
if (err)
goto fail_unlock;
} else {
*active = 0;
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTSSID,
- sizeof(essidbuf), NULL, &essidbuf);
+ err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENTSSID,
+ sizeof(essidbuf), NULL, &essidbuf);
if (err)
goto fail_unlock;
}
@@ -1180,8 +1214,8 @@ int orinoco_hw_get_bitratelist(struct orinoco_private *priv,
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_SUPPORTEDDATARATES,
- sizeof(list), NULL, &list);
+ err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_SUPPORTEDDATARATES,
+ sizeof(list), NULL, &list);
orinoco_unlock(priv, &flags);
if (err)
@@ -1248,7 +1282,7 @@ int orinoco_hw_trigger_scan(struct orinoco_private *priv,
idbuf.len = cpu_to_le16(len);
memcpy(idbuf.val, ssid->ssid, len);
- err = hermes_write_ltv(hw, USER_BAP,
+ err = hw->ops->write_ltv(hw, USER_BAP,
HERMES_RID_CNFSCANSSID_AGERE,
HERMES_BYTES_TO_RECLEN(len + 2),
&idbuf);
@@ -1312,8 +1346,8 @@ int orinoco_hw_get_current_bssid(struct orinoco_private *priv,
hermes_t *hw = &priv->hw;
int err;
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
- ETH_ALEN, NULL, addr);
+ err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
+ ETH_ALEN, NULL, addr);
return err;
}
diff --git a/drivers/net/wireless/orinoco/hw.h b/drivers/net/wireless/orinoco/hw.h
index 9799a1d14a6..97af71e7995 100644
--- a/drivers/net/wireless/orinoco/hw.h
+++ b/drivers/net/wireless/orinoco/hw.h
@@ -22,7 +22,6 @@
/* Forward declarations */
struct orinoco_private;
-struct dev_addr_list;
int determine_fw_capabilities(struct orinoco_private *priv, char *fw_name,
size_t fw_name_len, u32 *hw_ver);
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index 413e9ab6cab..ca71f08709b 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -254,7 +254,7 @@ void set_port_type(struct orinoco_private *priv)
/* Device methods */
/********************************************************************/
-static int orinoco_open(struct net_device *dev)
+int orinoco_open(struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
unsigned long flags;
@@ -272,8 +272,9 @@ static int orinoco_open(struct net_device *dev)
return err;
}
+EXPORT_SYMBOL(orinoco_open);
-static int orinoco_stop(struct net_device *dev)
+int orinoco_stop(struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
int err = 0;
@@ -281,25 +282,27 @@ static int orinoco_stop(struct net_device *dev)
/* We mustn't use orinoco_lock() here, because we need to be
able to close the interface even if hw_unavailable is set
(e.g. as we're released after a PC Card removal) */
- spin_lock_irq(&priv->lock);
+ orinoco_lock_irq(priv);
priv->open = 0;
err = __orinoco_down(priv);
- spin_unlock_irq(&priv->lock);
+ orinoco_unlock_irq(priv);
return err;
}
+EXPORT_SYMBOL(orinoco_stop);
-static struct net_device_stats *orinoco_get_stats(struct net_device *dev)
+struct net_device_stats *orinoco_get_stats(struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
return &priv->stats;
}
+EXPORT_SYMBOL(orinoco_get_stats);
-static void orinoco_set_multicast_list(struct net_device *dev)
+void orinoco_set_multicast_list(struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
unsigned long flags;
@@ -313,8 +316,9 @@ static void orinoco_set_multicast_list(struct net_device *dev)
__orinoco_set_multicast_list(dev);
orinoco_unlock(priv, &flags);
}
+EXPORT_SYMBOL(orinoco_set_multicast_list);
-static int orinoco_change_mtu(struct net_device *dev, int new_mtu)
+int orinoco_change_mtu(struct net_device *dev, int new_mtu)
{
struct orinoco_private *priv = ndev_priv(dev);
@@ -330,23 +334,115 @@ static int orinoco_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+EXPORT_SYMBOL(orinoco_change_mtu);
/********************************************************************/
/* Tx path */
/********************************************************************/
+/* Add encapsulation and MIC to the existing SKB.
+ * The main xmit routine will then send the whole lot to the card.
+ * Need 8 bytes headroom
+ * Need 8 bytes tailroom
+ *
+ * With encapsulated ethernet II frame
+ * --------
+ * 803.3 header (14 bytes)
+ * dst[6]
+ * -------- src[6]
+ * 803.3 header (14 bytes) len[2]
+ * dst[6] 803.2 header (8 bytes)
+ * src[6] encaps[6]
+ * len[2] <- leave alone -> len[2]
+ * -------- -------- <-- 0
+ * Payload Payload
+ * ... ...
+ *
+ * -------- --------
+ * MIC (8 bytes)
+ * --------
+ *
+ * returns 0 on success, -ENOMEM on error.
+ */
+int orinoco_process_xmit_skb(struct sk_buff *skb,
+ struct net_device *dev,
+ struct orinoco_private *priv,
+ int *tx_control,
+ u8 *mic_buf)
+{
+ struct orinoco_tkip_key *key;
+ struct ethhdr *eh;
+ int do_mic;
+
+ key = (struct orinoco_tkip_key *) priv->keys[priv->tx_key].key;
+
+ do_mic = ((priv->encode_alg == ORINOCO_ALG_TKIP) &&
+ (key != NULL));
+
+ if (do_mic)
+ *tx_control |= (priv->tx_key << HERMES_MIC_KEY_ID_SHIFT) |
+ HERMES_TXCTRL_MIC;
+
+ eh = (struct ethhdr *)skb->data;
+
+ /* Encapsulate Ethernet-II frames */
+ if (ntohs(eh->h_proto) > ETH_DATA_LEN) { /* Ethernet-II frame */
+ struct header_struct {
+ struct ethhdr eth; /* 802.3 header */
+ u8 encap[6]; /* 802.2 header */
+ } __attribute__ ((packed)) hdr;
+ int len = skb->len + sizeof(encaps_hdr) - (2 * ETH_ALEN);
+
+ if (skb_headroom(skb) < ENCAPS_OVERHEAD) {
+ if (net_ratelimit())
+ printk(KERN_ERR
+ "%s: Not enough headroom for 802.2 headers %d\n",
+ dev->name, skb_headroom(skb));
+ return -ENOMEM;
+ }
+
+ /* Fill in new header */
+ memcpy(&hdr.eth, eh, 2 * ETH_ALEN);
+ hdr.eth.h_proto = htons(len);
+ memcpy(hdr.encap, encaps_hdr, sizeof(encaps_hdr));
+
+ /* Make room for the new header, and copy it in */
+ eh = (struct ethhdr *) skb_push(skb, ENCAPS_OVERHEAD);
+ memcpy(eh, &hdr, sizeof(hdr));
+ }
+
+ /* Calculate Michael MIC */
+ if (do_mic) {
+ size_t len = skb->len - ETH_HLEN;
+ u8 *mic = &mic_buf[0];
+
+ /* Have to write to an even address, so copy the spare
+ * byte across */
+ if (skb->len % 2) {
+ *mic = skb->data[skb->len - 1];
+ mic++;
+ }
+
+ orinoco_mic(priv->tx_tfm_mic, key->tx_mic,
+ eh->h_dest, eh->h_source, 0 /* priority */,
+ skb->data + ETH_HLEN,
+ len, mic);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(orinoco_process_xmit_skb);
+
static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
struct net_device_stats *stats = &priv->stats;
- struct orinoco_tkip_key *key;
hermes_t *hw = &priv->hw;
int err = 0;
u16 txfid = priv->txfid;
- struct ethhdr *eh;
int tx_control;
unsigned long flags;
- int do_mic;
+ u8 mic_buf[MICHAEL_MIC_LEN+1];
if (!netif_running(dev)) {
printk(KERN_ERR "%s: Tx on stopped device!\n",
@@ -378,16 +474,12 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb->len < ETH_HLEN)
goto drop;
- key = (struct orinoco_tkip_key *) priv->keys[priv->tx_key].key;
-
- do_mic = ((priv->encode_alg == ORINOCO_ALG_TKIP) &&
- (key != NULL));
-
tx_control = HERMES_TXCTRL_TX_OK | HERMES_TXCTRL_TX_EX;
- if (do_mic)
- tx_control |= (priv->tx_key << HERMES_MIC_KEY_ID_SHIFT) |
- HERMES_TXCTRL_MIC;
+ err = orinoco_process_xmit_skb(skb, dev, priv, &tx_control,
+ &mic_buf[0]);
+ if (err)
+ goto drop;
if (priv->has_alt_txcntl) {
/* WPA enabled firmwares have tx_cntl at the end of
@@ -400,8 +492,8 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
memset(&desc, 0, sizeof(desc));
*txcntl = cpu_to_le16(tx_control);
- err = hermes_bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
- txfid, 0);
+ err = hw->ops->bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
+ txfid, 0);
if (err) {
if (net_ratelimit())
printk(KERN_ERR "%s: Error %d writing Tx "
@@ -414,8 +506,8 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
memset(&desc, 0, sizeof(desc));
desc.tx_control = cpu_to_le16(tx_control);
- err = hermes_bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
- txfid, 0);
+ err = hw->ops->bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
+ txfid, 0);
if (err) {
if (net_ratelimit())
printk(KERN_ERR "%s: Error %d writing Tx "
@@ -430,68 +522,24 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
HERMES_802_3_OFFSET - HERMES_802_11_OFFSET);
}
- eh = (struct ethhdr *)skb->data;
-
- /* Encapsulate Ethernet-II frames */
- if (ntohs(eh->h_proto) > ETH_DATA_LEN) { /* Ethernet-II frame */
- struct header_struct {
- struct ethhdr eth; /* 802.3 header */
- u8 encap[6]; /* 802.2 header */
- } __attribute__ ((packed)) hdr;
-
- /* Strip destination and source from the data */
- skb_pull(skb, 2 * ETH_ALEN);
-
- /* And move them to a separate header */
- memcpy(&hdr.eth, eh, 2 * ETH_ALEN);
- hdr.eth.h_proto = htons(sizeof(encaps_hdr) + skb->len);
- memcpy(hdr.encap, encaps_hdr, sizeof(encaps_hdr));
-
- /* Insert the SNAP header */
- if (skb_headroom(skb) < sizeof(hdr)) {
- printk(KERN_ERR
- "%s: Not enough headroom for 802.2 headers %d\n",
- dev->name, skb_headroom(skb));
- goto drop;
- }
- eh = (struct ethhdr *) skb_push(skb, sizeof(hdr));
- memcpy(eh, &hdr, sizeof(hdr));
- }
-
- err = hermes_bap_pwrite(hw, USER_BAP, skb->data, skb->len,
- txfid, HERMES_802_3_OFFSET);
+ err = hw->ops->bap_pwrite(hw, USER_BAP, skb->data, skb->len,
+ txfid, HERMES_802_3_OFFSET);
if (err) {
printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
dev->name, err);
goto busy;
}
- /* Calculate Michael MIC */
- if (do_mic) {
- u8 mic_buf[MICHAEL_MIC_LEN + 1];
- u8 *mic;
- size_t offset;
- size_t len;
+ if (tx_control & HERMES_TXCTRL_MIC) {
+ size_t offset = HERMES_802_3_OFFSET + skb->len;
+ size_t len = MICHAEL_MIC_LEN;
- if (skb->len % 2) {
- /* MIC start is on an odd boundary */
- mic_buf[0] = skb->data[skb->len - 1];
- mic = &mic_buf[1];
- offset = skb->len - 1;
- len = MICHAEL_MIC_LEN + 1;
- } else {
- mic = &mic_buf[0];
- offset = skb->len;
- len = MICHAEL_MIC_LEN;
+ if (offset % 2) {
+ offset--;
+ len++;
}
-
- orinoco_mic(priv->tx_tfm_mic, key->tx_mic,
- eh->h_dest, eh->h_source, 0 /* priority */,
- skb->data + ETH_HLEN, skb->len - ETH_HLEN, mic);
-
- /* Write the MIC */
- err = hermes_bap_pwrite(hw, USER_BAP, &mic_buf[0], len,
- txfid, HERMES_802_3_OFFSET + offset);
+ err = hw->ops->bap_pwrite(hw, USER_BAP, &mic_buf[0], len,
+ txfid, offset);
if (err) {
printk(KERN_ERR "%s: Error %d writing MIC to BAP\n",
dev->name, err);
@@ -502,7 +550,7 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
/* Finally, we actually initiate the send */
netif_stop_queue(dev);
- err = hermes_docmd_wait(hw, HERMES_CMD_TX | HERMES_CMD_RECL,
+ err = hw->ops->cmd_wait(hw, HERMES_CMD_TX | HERMES_CMD_RECL,
txfid, NULL);
if (err) {
netif_start_queue(dev);
@@ -512,7 +560,6 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
goto busy;
}
- dev->trans_start = jiffies;
stats->tx_bytes += HERMES_802_3_OFFSET + skb->len;
goto ok;
@@ -572,9 +619,9 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
return; /* Nothing's really happened */
/* Read part of the frame header - we need status and addr1 */
- err = hermes_bap_pread(hw, IRQ_BAP, &hdr,
- sizeof(struct hermes_txexc_data),
- fid, 0);
+ err = hw->ops->bap_pread(hw, IRQ_BAP, &hdr,
+ sizeof(struct hermes_txexc_data),
+ fid, 0);
hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
stats->tx_errors++;
@@ -615,7 +662,7 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
netif_wake_queue(dev);
}
-static void orinoco_tx_timeout(struct net_device *dev)
+void orinoco_tx_timeout(struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
struct net_device_stats *stats = &priv->stats;
@@ -630,6 +677,7 @@ static void orinoco_tx_timeout(struct net_device *dev)
schedule_work(&priv->reset_work);
}
+EXPORT_SYMBOL(orinoco_tx_timeout);
/********************************************************************/
/* Rx path (data frames) */
@@ -764,9 +812,9 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid,
/* If any, copy the data from the card to the skb */
if (datalen > 0) {
- err = hermes_bap_pread(hw, IRQ_BAP, skb_put(skb, datalen),
- ALIGN(datalen, 2), rxfid,
- HERMES_802_2_OFFSET);
+ err = hw->ops->bap_pread(hw, IRQ_BAP, skb_put(skb, datalen),
+ ALIGN(datalen, 2), rxfid,
+ HERMES_802_2_OFFSET);
if (err) {
printk(KERN_ERR "%s: error %d reading monitor frame\n",
dev->name, err);
@@ -792,7 +840,7 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid,
stats->rx_dropped++;
}
-static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
+void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
{
struct orinoco_private *priv = ndev_priv(dev);
struct net_device_stats *stats = &priv->stats;
@@ -814,8 +862,8 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
rxfid = hermes_read_regn(hw, RXFID);
- err = hermes_bap_pread(hw, IRQ_BAP, desc, sizeof(*desc),
- rxfid, 0);
+ err = hw->ops->bap_pread(hw, IRQ_BAP, desc, sizeof(*desc),
+ rxfid, 0);
if (err) {
printk(KERN_ERR "%s: error %d reading Rx descriptor. "
"Frame dropped.\n", dev->name, err);
@@ -882,9 +930,9 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
nothing is removed. 2 is for aligning the IP header. */
skb_reserve(skb, ETH_HLEN + 2);
- err = hermes_bap_pread(hw, IRQ_BAP, skb_put(skb, length),
- ALIGN(length, 2), rxfid,
- HERMES_802_2_OFFSET);
+ err = hw->ops->bap_pread(hw, IRQ_BAP, skb_put(skb, length),
+ ALIGN(length, 2), rxfid,
+ HERMES_802_2_OFFSET);
if (err) {
printk(KERN_ERR "%s: error %d reading frame. "
"Frame dropped.\n", dev->name, err);
@@ -913,6 +961,7 @@ update_stats:
out:
kfree(desc);
}
+EXPORT_SYMBOL(__orinoco_ev_rx);
static void orinoco_rx(struct net_device *dev,
struct hermes_rx_descriptor *desc,
@@ -1145,9 +1194,9 @@ static void orinoco_join_ap(struct work_struct *work)
goto out;
/* Read scan results from the firmware */
- err = hermes_read_ltv(hw, USER_BAP,
- HERMES_RID_SCANRESULTSTABLE,
- MAX_SCAN_LEN, &len, buf);
+ err = hw->ops->read_ltv(hw, USER_BAP,
+ HERMES_RID_SCANRESULTSTABLE,
+ MAX_SCAN_LEN, &len, buf);
if (err) {
printk(KERN_ERR "%s: Cannot read scan results\n",
dev->name);
@@ -1194,8 +1243,8 @@ static void orinoco_send_bssid_wevent(struct orinoco_private *priv)
union iwreq_data wrqu;
int err;
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
- ETH_ALEN, NULL, wrqu.ap_addr.sa_data);
+ err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
+ ETH_ALEN, NULL, wrqu.ap_addr.sa_data);
if (err != 0)
return;
@@ -1217,8 +1266,8 @@ static void orinoco_send_assocreqie_wevent(struct orinoco_private *priv)
if (!priv->has_wpa)
return;
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_ASSOC_REQ_INFO,
- sizeof(buf), NULL, &buf);
+ err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_ASSOC_REQ_INFO,
+ sizeof(buf), NULL, &buf);
if (err != 0)
return;
@@ -1247,8 +1296,9 @@ static void orinoco_send_assocrespie_wevent(struct orinoco_private *priv)
if (!priv->has_wpa)
return;
- err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_ASSOC_RESP_INFO,
- sizeof(buf), NULL, &buf);
+ err = hw->ops->read_ltv(hw, USER_BAP,
+ HERMES_RID_CURRENT_ASSOC_RESP_INFO,
+ sizeof(buf), NULL, &buf);
if (err != 0)
return;
@@ -1353,7 +1403,7 @@ static void orinoco_process_scan_results(struct work_struct *work)
spin_unlock_irqrestore(&priv->scan_lock, flags);
}
-static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
+void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
{
struct orinoco_private *priv = ndev_priv(dev);
u16 infofid;
@@ -1371,8 +1421,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
infofid = hermes_read_regn(hw, INFOFID);
/* Read the info frame header - don't try too hard */
- err = hermes_bap_pread(hw, IRQ_BAP, &info, sizeof(info),
- infofid, 0);
+ err = hw->ops->bap_pread(hw, IRQ_BAP, &info, sizeof(info),
+ infofid, 0);
if (err) {
printk(KERN_ERR "%s: error %d reading info frame. "
"Frame dropped.\n", dev->name, err);
@@ -1393,8 +1443,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
len = sizeof(tallies);
}
- err = hermes_bap_pread(hw, IRQ_BAP, &tallies, len,
- infofid, sizeof(info));
+ err = hw->ops->bap_pread(hw, IRQ_BAP, &tallies, len,
+ infofid, sizeof(info));
if (err)
break;
@@ -1429,8 +1479,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
break;
}
- err = hermes_bap_pread(hw, IRQ_BAP, &linkstatus, len,
- infofid, sizeof(info));
+ err = hw->ops->bap_pread(hw, IRQ_BAP, &linkstatus, len,
+ infofid, sizeof(info));
if (err)
break;
newstatus = le16_to_cpu(linkstatus.linkstatus);
@@ -1494,8 +1544,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
}
/* Read scan data */
- err = hermes_bap_pread(hw, IRQ_BAP, (void *) buf, len,
- infofid, sizeof(info));
+ err = hw->ops->bap_pread(hw, IRQ_BAP, (void *) buf, len,
+ infofid, sizeof(info));
if (err) {
kfree(buf);
qabort_scan(priv);
@@ -1547,8 +1597,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
break;
/* Read scan data */
- err = hermes_bap_pread(hw, IRQ_BAP, (void *) bss, len,
- infofid, sizeof(info));
+ err = hw->ops->bap_pread(hw, IRQ_BAP, (void *) bss, len,
+ infofid, sizeof(info));
if (err)
kfree(bss);
else
@@ -1568,9 +1618,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
/* We don't actually do anything about it */
break;
}
-
- return;
}
+EXPORT_SYMBOL(__orinoco_ev_info);
static void __orinoco_ev_infdrop(struct net_device *dev, hermes_t *hw)
{
@@ -1647,7 +1696,7 @@ static int orinoco_reinit_firmware(struct orinoco_private *priv)
struct hermes *hw = &priv->hw;
int err;
- err = hermes_init(hw);
+ err = hw->ops->init(hw);
if (priv->do_fw_download && !err) {
err = orinoco_download(priv);
if (err)
@@ -1735,7 +1784,7 @@ void orinoco_reset(struct work_struct *work)
}
/* This has to be called from user context */
- spin_lock_irq(&priv->lock);
+ orinoco_lock_irq(priv);
priv->hw_unavailable--;
@@ -1750,7 +1799,7 @@ void orinoco_reset(struct work_struct *work)
dev->trans_start = jiffies;
}
- spin_unlock_irq(&priv->lock);
+ orinoco_unlock_irq(priv);
return;
disable:
@@ -1984,7 +2033,7 @@ int orinoco_init(struct orinoco_private *priv)
priv->nicbuf_size = IEEE80211_MAX_FRAME_LEN + ETH_HLEN;
/* Initialize the firmware */
- err = hermes_init(hw);
+ err = hw->ops->init(hw);
if (err != 0) {
dev_err(dev, "Failed to initialize firmware (err = %d)\n",
err);
@@ -2067,9 +2116,9 @@ int orinoco_init(struct orinoco_private *priv)
/* Make the hardware available, as long as it hasn't been
* removed elsewhere (e.g. by PCMCIA hot unplug) */
- spin_lock_irq(&priv->lock);
+ orinoco_lock_irq(priv);
priv->hw_unavailable--;
- spin_unlock_irq(&priv->lock);
+ orinoco_unlock_irq(priv);
dev_dbg(dev, "Ready\n");
@@ -2192,7 +2241,8 @@ EXPORT_SYMBOL(alloc_orinocodev);
*/
int orinoco_if_add(struct orinoco_private *priv,
unsigned long base_addr,
- unsigned int irq)
+ unsigned int irq,
+ const struct net_device_ops *ops)
{
struct wiphy *wiphy = priv_to_wiphy(priv);
struct wireless_dev *wdev;
@@ -2211,16 +2261,21 @@ int orinoco_if_add(struct orinoco_private *priv,
/* Setup / override net_device fields */
dev->ieee80211_ptr = wdev;
- dev->netdev_ops = &orinoco_netdev_ops;
dev->watchdog_timeo = HZ; /* 1 second timeout */
dev->wireless_handlers = &orinoco_handler_def;
#ifdef WIRELESS_SPY
dev->wireless_data = &priv->wireless_data;
#endif
+ /* Default to standard ops if not set */
+ if (ops)
+ dev->netdev_ops = ops;
+ else
+ dev->netdev_ops = &orinoco_netdev_ops;
+
/* we use the default eth_mac_addr for setting the MAC addr */
/* Reserve space in skb for the SNAP header */
- dev->hard_header_len += ENCAPS_OVERHEAD;
+ dev->needed_headroom = ENCAPS_OVERHEAD;
netif_carrier_off(dev);
@@ -2305,7 +2360,7 @@ int orinoco_up(struct orinoco_private *priv)
unsigned long flags;
int err;
- spin_lock_irqsave(&priv->lock, flags);
+ priv->hw.ops->lock_irqsave(&priv->lock, &flags);
err = orinoco_reinit_firmware(priv);
if (err) {
@@ -2325,7 +2380,7 @@ int orinoco_up(struct orinoco_private *priv)
}
exit:
- spin_unlock_irqrestore(&priv->lock, flags);
+ priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
return 0;
}
@@ -2337,7 +2392,7 @@ void orinoco_down(struct orinoco_private *priv)
unsigned long flags;
int err;
- spin_lock_irqsave(&priv->lock, flags);
+ priv->hw.ops->lock_irqsave(&priv->lock, &flags);
err = __orinoco_down(priv);
if (err)
printk(KERN_WARNING "%s: Error %d downing interface\n",
@@ -2345,7 +2400,7 @@ void orinoco_down(struct orinoco_private *priv)
netif_device_detach(dev);
priv->hw_unavailable++;
- spin_unlock_irqrestore(&priv->lock, flags);
+ priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
}
EXPORT_SYMBOL(orinoco_down);
diff --git a/drivers/net/wireless/orinoco/main.h b/drivers/net/wireless/orinoco/main.h
index 21ab36cd76c..4dadf9880a9 100644
--- a/drivers/net/wireless/orinoco/main.h
+++ b/drivers/net/wireless/orinoco/main.h
@@ -33,18 +33,6 @@ int orinoco_commit(struct orinoco_private *priv);
void orinoco_reset(struct work_struct *work);
/* Information element helpers - find a home for these... */
-static inline u8 *orinoco_get_ie(u8 *data, size_t len,
- enum ieee80211_eid eid)
-{
- u8 *p = data;
- while ((p + 2) < (data + len)) {
- if (p[0] == eid)
- return p;
- p += p[1] + 2;
- }
- return NULL;
-}
-
#define WPA_OUI_TYPE "\x00\x50\xF2\x01"
#define WPA_SELECTOR_LEN 4
static inline u8 *orinoco_get_wpa_ie(u8 *data, size_t len)
diff --git a/drivers/net/wireless/orinoco/orinoco.h b/drivers/net/wireless/orinoco/orinoco.h
index 665ef56f838..a6da86e0a70 100644
--- a/drivers/net/wireless/orinoco/orinoco.h
+++ b/drivers/net/wireless/orinoco/orinoco.h
@@ -131,6 +131,8 @@ struct orinoco_private {
u16 ap_density, rts_thresh;
u16 pm_on, pm_mcast, pm_period, pm_timeout;
u16 preamble;
+ u16 short_retry_limit, long_retry_limit;
+ u16 retry_lifetime;
#ifdef WIRELESS_SPY
struct iw_spy_data spy_data; /* iwspy support */
struct iw_public_data wireless_data;
@@ -188,12 +190,30 @@ extern void free_orinocodev(struct orinoco_private *priv);
extern int orinoco_init(struct orinoco_private *priv);
extern int orinoco_if_add(struct orinoco_private *priv,
unsigned long base_addr,
- unsigned int irq);
+ unsigned int irq,
+ const struct net_device_ops *ops);
extern void orinoco_if_del(struct orinoco_private *priv);
extern int orinoco_up(struct orinoco_private *priv);
extern void orinoco_down(struct orinoco_private *priv);
extern irqreturn_t orinoco_interrupt(int irq, void *dev_id);
+extern void __orinoco_ev_info(struct net_device *dev, hermes_t *hw);
+extern void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw);
+
+int orinoco_process_xmit_skb(struct sk_buff *skb,
+ struct net_device *dev,
+ struct orinoco_private *priv,
+ int *tx_control,
+ u8 *mic);
+
+/* Common ndo functions exported for reuse by orinoco_usb */
+int orinoco_open(struct net_device *dev);
+int orinoco_stop(struct net_device *dev);
+struct net_device_stats *orinoco_get_stats(struct net_device *dev);
+void orinoco_set_multicast_list(struct net_device *dev);
+int orinoco_change_mtu(struct net_device *dev, int new_mtu);
+void orinoco_tx_timeout(struct net_device *dev);
+
/********************************************************************/
/* Locking and synchronization functions */
/********************************************************************/
@@ -201,11 +221,11 @@ extern irqreturn_t orinoco_interrupt(int irq, void *dev_id);
static inline int orinoco_lock(struct orinoco_private *priv,
unsigned long *flags)
{
- spin_lock_irqsave(&priv->lock, *flags);
+ priv->hw.ops->lock_irqsave(&priv->lock, flags);
if (priv->hw_unavailable) {
DEBUG(1, "orinoco_lock() called with hw_unavailable (dev=%p)\n",
priv->ndev);
- spin_unlock_irqrestore(&priv->lock, *flags);
+ priv->hw.ops->unlock_irqrestore(&priv->lock, flags);
return -EBUSY;
}
return 0;
@@ -214,7 +234,17 @@ static inline int orinoco_lock(struct orinoco_private *priv,
static inline void orinoco_unlock(struct orinoco_private *priv,
unsigned long *flags)
{
- spin_unlock_irqrestore(&priv->lock, *flags);
+ priv->hw.ops->unlock_irqrestore(&priv->lock, flags);
+}
+
+static inline void orinoco_lock_irq(struct orinoco_private *priv)
+{
+ priv->hw.ops->lock_irq(&priv->lock);
+}
+
+static inline void orinoco_unlock_irq(struct orinoco_private *priv)
+{
+ priv->hw.ops->unlock_irq(&priv->lock);
}
/*** Navigate from net_device to orinoco_private ***/
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index 1d4ada188ed..b16d5db52a4 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -50,7 +50,6 @@ MODULE_PARM_DESC(ignore_cis_vcc, "Allow voltage mismatch between card and socket
* struct orinoco_private */
struct orinoco_pccard {
struct pcmcia_device *p_dev;
- dev_node_t node;
/* Used to handle hard reset */
/* yuck, we need this hack to work around the insanity of the
@@ -119,10 +118,6 @@ orinoco_cs_probe(struct pcmcia_device *link)
card->p_dev = link;
link->priv = priv;
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = orinoco_interrupt;
-
/* General socket configuration defaults can go here. In this
* client, we assume very little, and rely on the CIS for
* almost everything. In most clients, many details (i.e.,
@@ -144,8 +139,7 @@ static void orinoco_cs_detach(struct pcmcia_device *link)
{
struct orinoco_private *priv = link->priv;
- if (link->dev_node)
- orinoco_if_del(priv);
+ orinoco_if_del(priv);
orinoco_cs_release(link);
@@ -230,7 +224,6 @@ static int
orinoco_cs_config(struct pcmcia_device *link)
{
struct orinoco_private *priv = link->priv;
- struct orinoco_pccard *card = priv->card;
hermes_t *hw = &priv->hw;
int ret;
void __iomem *mem;
@@ -258,12 +251,7 @@ orinoco_cs_config(struct pcmcia_device *link)
goto failed;
}
- /*
- * Allocate an interrupt line. Note that this does not assign
- * a handler to the interrupt, unless the 'Handler' member of
- * the irq structure is initialized.
- */
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_irq(link, orinoco_interrupt);
if (ret)
goto failed;
@@ -285,9 +273,6 @@ orinoco_cs_config(struct pcmcia_device *link)
if (ret)
goto failed;
- /* Ok, we have the configuration, prepare to register the netdev */
- card->node.major = card->node.minor = 0;
-
/* Initialise the main driver */
if (orinoco_init(priv) != 0) {
printk(KERN_ERR PFX "orinoco_init() failed\n");
@@ -296,17 +281,11 @@ orinoco_cs_config(struct pcmcia_device *link)
/* Register an interface with the stack */
if (orinoco_if_add(priv, link->io.BasePort1,
- link->irq.AssignedIRQ) != 0) {
+ link->irq, NULL) != 0) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
goto failed;
}
- /* At this point, the dev_node_t structure(s) needs to be
- * initialized and arranged in a linked list at link->dev_node. */
- strcpy(card->node.dev_name, priv->ndev->name);
- link->dev_node = &card->node; /* link->dev_node being non-NULL is also
- * used to indicate that the
- * net_device has been registered */
return 0;
failed:
@@ -327,9 +306,9 @@ orinoco_cs_release(struct pcmcia_device *link)
/* We're committed to taking the device away now, so mark the
* hardware as unavailable */
- spin_lock_irqsave(&priv->lock, flags);
+ priv->hw.ops->lock_irqsave(&priv->lock, &flags);
priv->hw_unavailable++;
- spin_unlock_irqrestore(&priv->lock, flags);
+ priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
pcmcia_disable_device(link);
if (priv->hw.iobase)
@@ -374,87 +353,90 @@ static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
"Pavel Roskin <proski@gnu.org>, et al)";
static struct pcmcia_device_id orinoco_cs_ids[] = {
- PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100), /* SonicWALL Long Range Wireless Card */
- PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300), /* Sohoware NCP110, Philips 802.11b */
- PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0002), /* AnyPoint(TM) Wireless II PC Card */
PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), /* 3Com AirConnect PCI 777A */
- PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000), /* PROXIM RangeLAN-DS/LAN PC CARD */
- PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002), /* Compaq WL100 11 Mbps Wireless Adapter */
PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002), /* Lucent Orinoco and old Intersil */
PCMCIA_DEVICE_MANF_CARD(0x016b, 0x0001), /* Ericsson WLAN Card C11 */
PCMCIA_DEVICE_MANF_CARD(0x01eb, 0x080a), /* Nortel Networks eMobility 802.11 Wireless Adapter */
- PCMCIA_DEVICE_MANF_CARD(0x01ff, 0x0008), /* Intermec MobileLAN 11Mbps 802.11b WLAN Card */
- PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002), /* Samsung SWL2000-N 11Mb/s WLAN Card */
PCMCIA_DEVICE_MANF_CARD(0x0261, 0x0002), /* AirWay 802.11 Adapter (PCMCIA) */
PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0001), /* ARtem Onair */
PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0003), /* ARtem Onair Comcard 11 */
PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0305), /* Buffalo WLI-PCM-S11 */
- PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612), /* Linksys WPC11 Version 2.5 */
- PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613), /* Linksys WPC11 Version 3 */
- PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0002), /* Compaq HNW-100 11 Mbps Wireless Adapter */
- PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), /* Linksys WCF12 Wireless CompactFlash Card */
PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002), /* ASUS SpaceLink WL-100 */
PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002), /* SpeedStream SS1021 Wireless Adapter */
PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x3021), /* SpeedStream Wireless Adapter */
PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001), /* PLANEX RoadLannerWave GW-NS11H */
+ PCMCIA_DEVICE_PROD_ID12("3Com", "3CRWE737A AirConnect Wireless LAN PC Card", 0x41240e5b, 0x56010af3),
+ PCMCIA_DEVICE_PROD_ID12("Allied Telesyn", "AT-WCL452 Wireless PCMCIA Radio", 0x5cd01705, 0x4271660f),
+ PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11B_CF_CARD_25", 0x78fc06ee, 0x45a50c1e),
+ PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11b_PC_CARD_25", 0x78fc06ee, 0xdb9aa842),
+ PCMCIA_DEVICE_PROD_ID12("Avaya Communication", "Avaya Wireless PC Card", 0xd8a43b78, 0x0d341169),
+ PCMCIA_DEVICE_PROD_ID12("BENQ", "AWL100 PCMCIA ADAPTER", 0x35dadc74, 0x01f7fedb),
+ PCMCIA_DEVICE_PROD_ID12("Cabletron", "RoamAbout 802.11 DS", 0x32d445f5, 0xedeffd90),
+ PCMCIA_DEVICE_PROD_ID12("D-Link Corporation", "D-Link DWL-650H 11Mbps WLAN Adapter", 0xef544d24, 0xcd8ea916),
+ PCMCIA_DEVICE_PROD_ID12("ELSA", "AirLancer MC-11", 0x4507a33a, 0xef54f0e3),
+ PCMCIA_DEVICE_PROD_ID12("HyperLink", "Wireless PC Card 11Mbps", 0x56cc3f1a, 0x0bcf220c),
+ PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless 2011 LAN PC Card", 0x816cc815, 0x07f58077),
+ PCMCIA_DEVICE_PROD_ID12("LeArtery", "SYNCBYAIR 11Mbps Wireless LAN PC Card", 0x7e3b326a, 0x49893e92),
+ PCMCIA_DEVICE_PROD_ID12("Lucent Technologies", "WaveLAN/IEEE", 0x23eb9949, 0xc562e72a),
+ PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11", 0x481e0094, 0x7360e410),
+ PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11G", 0x481e0094, 0xf57ca4b3),
+ PCMCIA_DEVICE_PROD_ID12("NCR", "WaveLAN/IEEE", 0x24358cd4, 0xc562e72a),
+ PCMCIA_DEVICE_PROD_ID12("Nortel Networks", "emobility 802.11 Wireless LAN PC Card", 0x2d617ea0, 0x88cd5767),
+ PCMCIA_DEVICE_PROD_ID12("OTC", "Wireless AirEZY 2411-PCC WLAN Card", 0x4ac44287, 0x235a6bed),
+ PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PC CARD HARMONY 80211B", 0xc6536a5e, 0x090c3cd9),
+ PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PCI CARD HARMONY 80211B", 0xc6536a5e, 0x9f494e26),
+ PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "11Mbps WLAN Card", 0x43d74cb4, 0x579bd91b),
+ PCMCIA_DEVICE_PROD_ID12("Symbol Technologies", "LA4111 Spectrum24 Wireless LAN PC Card", 0x3f02b4d6, 0x3663cb0e),
+#ifdef CONFIG_HERMES_PRISM
+ /* Only entries that certainly identify Prism chipset */
+ PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100), /* SonicWALL Long Range Wireless Card */
+ PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300), /* Sohoware NCP110, Philips 802.11b */
+ PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0002), /* AnyPoint(TM) Wireless II PC Card */
+ PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000), /* PROXIM RangeLAN-DS/LAN PC CARD */
+ PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002), /* Compaq WL100 11 Mbps Wireless Adapter */
+ PCMCIA_DEVICE_MANF_CARD(0x01ff, 0x0008), /* Intermec MobileLAN 11Mbps 802.11b WLAN Card */
+ PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002), /* Samsung SWL2000-N 11Mb/s WLAN Card */
+ PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612), /* Linksys WPC11 Version 2.5 */
+ PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613), /* Linksys WPC11 Version 3 */
+ PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0002), /* Compaq HNW-100 11 Mbps Wireless Adapter */
+ PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), /* Linksys WCF12 Wireless CompactFlash Card */
PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), /* Airvast WN-100 */
PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021), /* Adaptec Ultra Wireless ANW-8030 */
PCMCIA_DEVICE_MANF_CARD(0xc001, 0x0008), /* CONTEC FLEXSCAN/FX-DDS110-PCC */
PCMCIA_DEVICE_MANF_CARD(0xc250, 0x0002), /* Conceptronic CON11Cpro, EMTAC A2424i */
PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), /* Safeway 802.11b, ZCOMAX AirRunner/XI-300 */
PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), /* D-Link DCF660, Sandisk Connect SDWCFB-000 */
- PCMCIA_DEVICE_PROD_ID12(" ", "IEEE 802.11 Wireless LAN/PC Card", 0x3b6e20c8, 0xefccafe9),
- PCMCIA_DEVICE_PROD_ID12("3Com", "3CRWE737A AirConnect Wireless LAN PC Card", 0x41240e5b, 0x56010af3),
+ PCMCIA_DEVICE_PROD_ID123("Instant Wireless ", " Network PC CARD", "Version 01.02", 0x11d901af, 0x6e9bd926, 0x4b74baa0),
PCMCIA_DEVICE_PROD_ID12("ACTIONTEC", "PRISM Wireless LAN PC Card", 0x393089da, 0xa71e69d5),
PCMCIA_DEVICE_PROD_ID12("Addtron", "AWP-100 Wireless PCMCIA", 0xe6ec52ce, 0x08649af2),
- PCMCIA_DEVICE_PROD_ID12("Allied Telesyn", "AT-WCL452 Wireless PCMCIA Radio", 0x5cd01705, 0x4271660f),
- PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11b_PC_CARD_25", 0x78fc06ee, 0xdb9aa842),
- PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11B_CF_CARD_25", 0x78fc06ee, 0x45a50c1e),
- PCMCIA_DEVICE_PROD_ID12("Avaya Communication", "Avaya Wireless PC Card", 0xd8a43b78, 0x0d341169),
- PCMCIA_DEVICE_PROD_ID12("BENQ", "AWL100 PCMCIA ADAPTER", 0x35dadc74, 0x01f7fedb),
- PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-PCM-L11G", 0x2decece3, 0xf57ca4b3),
PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-CF-S11G", 0x2decece3, 0x82067c18),
- PCMCIA_DEVICE_PROD_ID12("Cabletron", "RoamAbout 802.11 DS", 0x32d445f5, 0xedeffd90),
+ PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-PCM-L11G", 0x2decece3, 0xf57ca4b3),
PCMCIA_DEVICE_PROD_ID12("Compaq", "WL200_11Mbps_Wireless_PCI_Card", 0x54f7c49c, 0x15a75e5b),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCC-11", 0x5261440f, 0xa6405584),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCCA-11", 0x5261440f, 0xdf6115f9),
PCMCIA_DEVICE_PROD_ID12("corega_K.K.", "Wireless_LAN_PCCB-11", 0x29e33311, 0xee7a27ae),
+ PCMCIA_DEVICE_PROD_ID12("Digital Data Communications", "WPC-0100", 0xfdd73470, 0xe0b6f146),
PCMCIA_DEVICE_PROD_ID12("D", "Link DRC-650 11Mbps WLAN Card", 0x71b18589, 0xf144e3ac),
PCMCIA_DEVICE_PROD_ID12("D", "Link DWL-650 11Mbps WLAN Card", 0x71b18589, 0xb6f1b0ab),
- PCMCIA_DEVICE_PROD_ID12("D-Link Corporation", "D-Link DWL-650H 11Mbps WLAN Adapter", 0xef544d24, 0xcd8ea916),
- PCMCIA_DEVICE_PROD_ID12("Digital Data Communications", "WPC-0100", 0xfdd73470, 0xe0b6f146),
- PCMCIA_DEVICE_PROD_ID12("ELSA", "AirLancer MC-11", 0x4507a33a, 0xef54f0e3),
- PCMCIA_DEVICE_PROD_ID12("HyperLink", "Wireless PC Card 11Mbps", 0x56cc3f1a, 0x0bcf220c),
- PCMCIA_DEVICE_PROD_ID123("Instant Wireless ", " Network PC CARD", "Version 01.02", 0x11d901af, 0x6e9bd926, 0x4b74baa0),
- PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless 2011 LAN PC Card", 0x816cc815, 0x07f58077),
+ PCMCIA_DEVICE_PROD_ID12(" ", "IEEE 802.11 Wireless LAN/PC Card", 0x3b6e20c8, 0xefccafe9),
PCMCIA_DEVICE_PROD_ID12("INTERSIL", "HFA384x/IEEE", 0x74c5e40d, 0xdb472a18),
PCMCIA_DEVICE_PROD_ID12("INTERSIL", "I-GATE 11M PC Card / PC Card plus", 0x74c5e40d, 0x8304ff77),
PCMCIA_DEVICE_PROD_ID12("Intersil", "PRISM 2_5 PCMCIA ADAPTER", 0x4b801a17, 0x6345a0bf),
- PCMCIA_DEVICE_PROD_ID12("LeArtery", "SYNCBYAIR 11Mbps Wireless LAN PC Card", 0x7e3b326a, 0x49893e92),
PCMCIA_DEVICE_PROD_ID12("Linksys", "Wireless CompactFlash Card", 0x0733cc81, 0x0c52f395),
- PCMCIA_DEVICE_PROD_ID12("Lucent Technologies", "WaveLAN/IEEE", 0x23eb9949, 0xc562e72a),
- PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11", 0x481e0094, 0x7360e410),
- PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11G", 0x481e0094, 0xf57ca4b3),
PCMCIA_DEVICE_PROD_ID12("Microsoft", "Wireless Notebook Adapter MN-520", 0x5961bf85, 0x6eec8c01),
- PCMCIA_DEVICE_PROD_ID12("NCR", "WaveLAN/IEEE", 0x24358cd4, 0xc562e72a),
- PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401 Wireless PC", "Card", 0xa37434e9, 0x9762e8f1),
PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401RA Wireless PC", "Card", 0x0306467f, 0x9762e8f1),
- PCMCIA_DEVICE_PROD_ID12("Nortel Networks", "emobility 802.11 Wireless LAN PC Card", 0x2d617ea0, 0x88cd5767),
+ PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401 Wireless PC", "Card", 0xa37434e9, 0x9762e8f1),
PCMCIA_DEVICE_PROD_ID12("OEM", "PRISM2 IEEE 802.11 PC-Card", 0xfea54c90, 0x48f2bdd6),
- PCMCIA_DEVICE_PROD_ID12("OTC", "Wireless AirEZY 2411-PCC WLAN Card", 0x4ac44287, 0x235a6bed),
PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-CF110", 0x209f40ab, 0xd9715264),
PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-NS110", 0x209f40ab, 0x46263178),
- PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PC CARD HARMONY 80211B", 0xc6536a5e, 0x090c3cd9),
- PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PCI CARD HARMONY 80211B", 0xc6536a5e, 0x9f494e26),
- PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "11Mbps WLAN Card", 0x43d74cb4, 0x579bd91b),
PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2532W-B EliteConnect Wireless Adapter", 0xc4f8b18b, 0x196bd757),
PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2632W", 0xc4f8b18b, 0x474a1f2a),
- PCMCIA_DEVICE_PROD_ID12("Symbol Technologies", "LA4111 Spectrum24 Wireless LAN PC Card", 0x3f02b4d6, 0x3663cb0e),
PCMCIA_DEVICE_PROD_ID12("ZoomAir 11Mbps High", "Rate wireless Networking", 0x273fe3db, 0x32a1eaee),
PCMCIA_DEVICE_PROD_ID3("HFA3863", 0x355cb092),
PCMCIA_DEVICE_PROD_ID3("ISL37100P", 0x630d52b2),
PCMCIA_DEVICE_PROD_ID3("ISL37101P-10", 0xdd97a26b),
PCMCIA_DEVICE_PROD_ID3("ISL37300P", 0xc9049a39),
+#endif
PCMCIA_DEVICE_NULL,
};
MODULE_DEVICE_TABLE(pcmcia, orinoco_cs_ids);
diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c
index 075f446b313..bc3ea0b67a4 100644
--- a/drivers/net/wireless/orinoco/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco/orinoco_nortel.c
@@ -220,7 +220,7 @@ static int orinoco_nortel_init_one(struct pci_dev *pdev,
goto fail;
}
- err = orinoco_if_add(priv, 0, 0);
+ err = orinoco_if_add(priv, 0, 0, NULL);
if (err) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
goto fail;
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c
index bda5317cc59..468197f8667 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco/orinoco_pci.c
@@ -170,7 +170,7 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
goto fail;
}
- err = orinoco_if_add(priv, 0, 0);
+ err = orinoco_if_add(priv, 0, 0, NULL);
if (err) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
goto fail;
diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c
index e0d5874ab42..9358f4d2307 100644
--- a/drivers/net/wireless/orinoco/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco/orinoco_plx.c
@@ -259,7 +259,7 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
goto fail;
}
- err = orinoco_if_add(priv, 0, 0);
+ err = orinoco_if_add(priv, 0, 0, NULL);
if (err) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
goto fail;
diff --git a/drivers/net/wireless/orinoco/orinoco_tmd.c b/drivers/net/wireless/orinoco/orinoco_tmd.c
index 88cbc7902aa..784605f0af1 100644
--- a/drivers/net/wireless/orinoco/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco/orinoco_tmd.c
@@ -156,7 +156,7 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev,
goto fail;
}
- err = orinoco_if_add(priv, 0, 0);
+ err = orinoco_if_add(priv, 0, 0, NULL);
if (err) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
goto fail;
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
new file mode 100644
index 00000000000..78f089baa8c
--- /dev/null
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -0,0 +1,1795 @@
+/*
+ * USB Orinoco driver
+ *
+ * Copyright (c) 2003 Manuel Estrada Sainz
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use your
+ * version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ *
+ * Queueing code based on linux-wlan-ng 0.2.1-pre5
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ *
+ * The license is the same as above.
+ *
+ * Initialy based on USB Skeleton driver - 0.7
+ *
+ * Copyright (c) 2001 Greg Kroah-Hartman (greg@kroah.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * NOTE: The original USB Skeleton driver is GPL, but all that code is
+ * gone so MPL/GPL applies.
+ */
+
+#define DRIVER_NAME "orinoco_usb"
+#define PFX DRIVER_NAME ": "
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/poll.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/fcntl.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/smp_lock.h>
+#include <linux/usb.h>
+#include <linux/timer.h>
+
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <linux/firmware.h>
+
+#include "mic.h"
+#include "orinoco.h"
+
+#ifndef URB_ASYNC_UNLINK
+#define URB_ASYNC_UNLINK 0
+#endif
+
+/* 802.2 LLC/SNAP header used for Ethernet encapsulation over 802.11 */
+static const u8 encaps_hdr[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
+#define ENCAPS_OVERHEAD (sizeof(encaps_hdr) + 2)
+
+struct header_struct {
+ /* 802.3 */
+ u8 dest[ETH_ALEN];
+ u8 src[ETH_ALEN];
+ __be16 len;
+ /* 802.2 */
+ u8 dsap;
+ u8 ssap;
+ u8 ctrl;
+ /* SNAP */
+ u8 oui[3];
+ __be16 ethertype;
+} __attribute__ ((packed));
+
+struct ez_usb_fw {
+ u16 size;
+ const u8 *code;
+};
+
+static struct ez_usb_fw firmware = {
+ .size = 0,
+ .code = NULL,
+};
+
+#ifdef CONFIG_USB_DEBUG
+static int debug = 1;
+#else
+static int debug;
+#endif
+
+/* Debugging macros */
+#undef dbg
+#define dbg(format, arg...) \
+ do { if (debug) printk(KERN_DEBUG PFX "%s: " format "\n", \
+ __func__ , ## arg); } while (0)
+#undef err
+#define err(format, arg...) \
+ do { printk(KERN_ERR PFX format "\n", ## arg); } while (0)
+
+/* Module paramaters */
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug enabled or not");
+
+MODULE_FIRMWARE("orinoco_ezusb_fw");
+
+/*
+ * Under some conditions, the card gets stuck and stops paying attention
+ * to the world (i.e. data communication stalls) until we do something to
+ * it. Sending an INQ_TALLIES command seems to be enough and should be
+ * harmless otherwise. This behaviour has been observed when using the
+ * driver on a systemimager client during installation. In the past a
+ * timer was used to send INQ_TALLIES commands when there was no other
+ * activity, but it was troublesome and was removed.
+ */
+
+#define USB_COMPAQ_VENDOR_ID 0x049f /* Compaq Computer Corp. */
+#define USB_COMPAQ_WL215_ID 0x001f /* Compaq WL215 USB Adapter */
+#define USB_COMPAQ_W200_ID 0x0076 /* Compaq W200 USB Adapter */
+#define USB_HP_WL215_ID 0x0082 /* Compaq WL215 USB Adapter */
+
+#define USB_MELCO_VENDOR_ID 0x0411
+#define USB_BUFFALO_L11_ID 0x0006 /* BUFFALO WLI-USB-L11 */
+#define USB_BUFFALO_L11G_WR_ID 0x000B /* BUFFALO WLI-USB-L11G-WR */
+#define USB_BUFFALO_L11G_ID 0x000D /* BUFFALO WLI-USB-L11G */
+
+#define USB_LUCENT_VENDOR_ID 0x047E /* Lucent Technologies */
+#define USB_LUCENT_ORINOCO_ID 0x0300 /* Lucent/Agere Orinoco USB Client */
+
+#define USB_AVAYA8_VENDOR_ID 0x0D98
+#define USB_AVAYAE_VENDOR_ID 0x0D9E
+#define USB_AVAYA_WIRELESS_ID 0x0300 /* Avaya Wireless USB Card */
+
+#define USB_AGERE_VENDOR_ID 0x0D4E /* Agere Systems */
+#define USB_AGERE_MODEL0801_ID 0x1000 /* Wireless USB Card Model 0801 */
+#define USB_AGERE_MODEL0802_ID 0x1001 /* Wireless USB Card Model 0802 */
+#define USB_AGERE_REBRANDED_ID 0x047A /* WLAN USB Card */
+
+#define USB_ELSA_VENDOR_ID 0x05CC
+#define USB_ELSA_AIRLANCER_ID 0x3100 /* ELSA AirLancer USB-11 */
+
+#define USB_LEGEND_VENDOR_ID 0x0E7C
+#define USB_LEGEND_JOYNET_ID 0x0300 /* Joynet WLAN USB Card */
+
+#define USB_SAMSUNG_VENDOR_ID 0x04E8
+#define USB_SAMSUNG_SEW2001U1_ID 0x5002 /* Samsung SEW-2001u Card */
+#define USB_SAMSUNG_SEW2001U2_ID 0x5B11 /* Samsung SEW-2001u Card */
+#define USB_SAMSUNG_SEW2003U_ID 0x7011 /* Samsung SEW-2003U Card */
+
+#define USB_IGATE_VENDOR_ID 0x0681
+#define USB_IGATE_IGATE_11M_ID 0x0012 /* I-GATE 11M USB Card */
+
+#define USB_FUJITSU_VENDOR_ID 0x0BF8
+#define USB_FUJITSU_E1100_ID 0x1002 /* connect2AIR WLAN E-1100 USB */
+
+#define USB_2WIRE_VENDOR_ID 0x1630
+#define USB_2WIRE_WIRELESS_ID 0xff81 /* 2Wire Wireless USB adapter */
+
+
+#define EZUSB_REQUEST_FW_TRANS 0xA0
+#define EZUSB_REQUEST_TRIGER 0xAA
+#define EZUSB_REQUEST_TRIG_AC 0xAC
+#define EZUSB_CPUCS_REG 0x7F92
+
+#define EZUSB_RID_TX 0x0700
+#define EZUSB_RID_RX 0x0701
+#define EZUSB_RID_INIT1 0x0702
+#define EZUSB_RID_ACK 0x0710
+#define EZUSB_RID_READ_PDA 0x0800
+#define EZUSB_RID_PROG_INIT 0x0852
+#define EZUSB_RID_PROG_SET_ADDR 0x0853
+#define EZUSB_RID_PROG_BYTES 0x0854
+#define EZUSB_RID_PROG_END 0x0855
+#define EZUSB_RID_DOCMD 0x0860
+
+/* Recognize info frames */
+#define EZUSB_IS_INFO(id) ((id >= 0xF000) && (id <= 0xF2FF))
+
+#define EZUSB_MAGIC 0x0210
+
+#define EZUSB_FRAME_DATA 1
+#define EZUSB_FRAME_CONTROL 2
+
+#define DEF_TIMEOUT (3*HZ)
+
+#define BULK_BUF_SIZE 2048
+
+#define MAX_DL_SIZE (BULK_BUF_SIZE - sizeof(struct ezusb_packet))
+
+#define FW_BUF_SIZE 64
+#define FW_VAR_OFFSET_PTR 0x359
+#define FW_VAR_VALUE 0
+#define FW_HOLE_START 0x100
+#define FW_HOLE_END 0x300
+
+struct ezusb_packet {
+ __le16 magic; /* 0x0210 */
+ u8 req_reply_count;
+ u8 ans_reply_count;
+ __le16 frame_type; /* 0x01 for data frames, 0x02 otherwise */
+ __le16 size; /* transport size */
+ __le16 crc; /* CRC up to here */
+ __le16 hermes_len;
+ __le16 hermes_rid;
+ u8 data[0];
+} __attribute__ ((packed));
+
+/* Table of devices that work or may work with this driver */
+static struct usb_device_id ezusb_table[] = {
+ {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_COMPAQ_WL215_ID)},
+ {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_HP_WL215_ID)},
+ {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_COMPAQ_W200_ID)},
+ {USB_DEVICE(USB_MELCO_VENDOR_ID, USB_BUFFALO_L11_ID)},
+ {USB_DEVICE(USB_MELCO_VENDOR_ID, USB_BUFFALO_L11G_WR_ID)},
+ {USB_DEVICE(USB_MELCO_VENDOR_ID, USB_BUFFALO_L11G_ID)},
+ {USB_DEVICE(USB_LUCENT_VENDOR_ID, USB_LUCENT_ORINOCO_ID)},
+ {USB_DEVICE(USB_AVAYA8_VENDOR_ID, USB_AVAYA_WIRELESS_ID)},
+ {USB_DEVICE(USB_AVAYAE_VENDOR_ID, USB_AVAYA_WIRELESS_ID)},
+ {USB_DEVICE(USB_AGERE_VENDOR_ID, USB_AGERE_MODEL0801_ID)},
+ {USB_DEVICE(USB_AGERE_VENDOR_ID, USB_AGERE_MODEL0802_ID)},
+ {USB_DEVICE(USB_ELSA_VENDOR_ID, USB_ELSA_AIRLANCER_ID)},
+ {USB_DEVICE(USB_LEGEND_VENDOR_ID, USB_LEGEND_JOYNET_ID)},
+ {USB_DEVICE_VER(USB_SAMSUNG_VENDOR_ID, USB_SAMSUNG_SEW2001U1_ID,
+ 0, 0)},
+ {USB_DEVICE(USB_SAMSUNG_VENDOR_ID, USB_SAMSUNG_SEW2001U2_ID)},
+ {USB_DEVICE(USB_SAMSUNG_VENDOR_ID, USB_SAMSUNG_SEW2003U_ID)},
+ {USB_DEVICE(USB_IGATE_VENDOR_ID, USB_IGATE_IGATE_11M_ID)},
+ {USB_DEVICE(USB_FUJITSU_VENDOR_ID, USB_FUJITSU_E1100_ID)},
+ {USB_DEVICE(USB_2WIRE_VENDOR_ID, USB_2WIRE_WIRELESS_ID)},
+ {USB_DEVICE(USB_AGERE_VENDOR_ID, USB_AGERE_REBRANDED_ID)},
+ {} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, ezusb_table);
+
+/* Structure to hold all of our device specific stuff */
+struct ezusb_priv {
+ struct usb_device *udev;
+ struct net_device *dev;
+ struct mutex mtx;
+ spinlock_t req_lock;
+ struct list_head req_pending;
+ struct list_head req_active;
+ spinlock_t reply_count_lock;
+ u16 hermes_reg_fake[0x40];
+ u8 *bap_buf;
+ struct urb *read_urb;
+ int read_pipe;
+ int write_pipe;
+ u8 reply_count;
+};
+
+enum ezusb_state {
+ EZUSB_CTX_START,
+ EZUSB_CTX_QUEUED,
+ EZUSB_CTX_REQ_SUBMITTED,
+ EZUSB_CTX_REQ_COMPLETE,
+ EZUSB_CTX_RESP_RECEIVED,
+ EZUSB_CTX_REQ_TIMEOUT,
+ EZUSB_CTX_REQ_FAILED,
+ EZUSB_CTX_RESP_TIMEOUT,
+ EZUSB_CTX_REQSUBMIT_FAIL,
+ EZUSB_CTX_COMPLETE,
+};
+
+struct request_context {
+ struct list_head list;
+ atomic_t refcount;
+ struct completion done; /* Signals that CTX is dead */
+ int killed;
+ struct urb *outurb; /* OUT for req pkt */
+ struct ezusb_priv *upriv;
+ struct ezusb_packet *buf;
+ int buf_length;
+ struct timer_list timer; /* Timeout handling */
+ enum ezusb_state state; /* Current state */
+ /* the RID that we will wait for */
+ u16 out_rid;
+ u16 in_rid;
+};
+
+
+/* Forward declarations */
+static void ezusb_ctx_complete(struct request_context *ctx);
+static void ezusb_req_queue_run(struct ezusb_priv *upriv);
+static void ezusb_bulk_in_callback(struct urb *urb);
+
+static inline u8 ezusb_reply_inc(u8 count)
+{
+ if (count < 0x7F)
+ return count + 1;
+ else
+ return 1;
+}
+
+static void ezusb_request_context_put(struct request_context *ctx)
+{
+ if (!atomic_dec_and_test(&ctx->refcount))
+ return;
+
+ WARN_ON(!ctx->done.done);
+ BUG_ON(ctx->outurb->status == -EINPROGRESS);
+ BUG_ON(timer_pending(&ctx->timer));
+ usb_free_urb(ctx->outurb);
+ kfree(ctx->buf);
+ kfree(ctx);
+}
+
+static inline void ezusb_mod_timer(struct ezusb_priv *upriv,
+ struct timer_list *timer,
+ unsigned long expire)
+{
+ if (!upriv->udev)
+ return;
+ mod_timer(timer, expire);
+}
+
+static void ezusb_request_timerfn(u_long _ctx)
+{
+ struct request_context *ctx = (void *) _ctx;
+
+ ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK;
+ if (usb_unlink_urb(ctx->outurb) == -EINPROGRESS) {
+ ctx->state = EZUSB_CTX_REQ_TIMEOUT;
+ } else {
+ ctx->state = EZUSB_CTX_RESP_TIMEOUT;
+ dbg("couldn't unlink");
+ atomic_inc(&ctx->refcount);
+ ctx->killed = 1;
+ ezusb_ctx_complete(ctx);
+ ezusb_request_context_put(ctx);
+ }
+};
+
+static struct request_context *ezusb_alloc_ctx(struct ezusb_priv *upriv,
+ u16 out_rid, u16 in_rid)
+{
+ struct request_context *ctx;
+
+ ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
+ if (!ctx)
+ return NULL;
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ ctx->buf = kmalloc(BULK_BUF_SIZE, GFP_ATOMIC);
+ if (!ctx->buf) {
+ kfree(ctx);
+ return NULL;
+ }
+ ctx->outurb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!ctx->outurb) {
+ kfree(ctx->buf);
+ kfree(ctx);
+ return NULL;
+ }
+
+ ctx->upriv = upriv;
+ ctx->state = EZUSB_CTX_START;
+ ctx->out_rid = out_rid;
+ ctx->in_rid = in_rid;
+
+ atomic_set(&ctx->refcount, 1);
+ init_completion(&ctx->done);
+
+ init_timer(&ctx->timer);
+ ctx->timer.function = ezusb_request_timerfn;
+ ctx->timer.data = (u_long) ctx;
+ return ctx;
+}
+
+
+/* Hopefully the real complete_all will soon be exported, in the mean
+ * while this should work. */
+static inline void ezusb_complete_all(struct completion *comp)
+{
+ complete(comp);
+ complete(comp);
+ complete(comp);
+ complete(comp);
+}
+
+static void ezusb_ctx_complete(struct request_context *ctx)
+{
+ struct ezusb_priv *upriv = ctx->upriv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&upriv->req_lock, flags);
+
+ list_del_init(&ctx->list);
+ if (upriv->udev) {
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+ ezusb_req_queue_run(upriv);
+ spin_lock_irqsave(&upriv->req_lock, flags);
+ }
+
+ switch (ctx->state) {
+ case EZUSB_CTX_COMPLETE:
+ case EZUSB_CTX_REQSUBMIT_FAIL:
+ case EZUSB_CTX_REQ_FAILED:
+ case EZUSB_CTX_REQ_TIMEOUT:
+ case EZUSB_CTX_RESP_TIMEOUT:
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ if ((ctx->out_rid == EZUSB_RID_TX) && upriv->dev) {
+ struct net_device *dev = upriv->dev;
+ struct orinoco_private *priv = ndev_priv(dev);
+ struct net_device_stats *stats = &priv->stats;
+
+ if (ctx->state != EZUSB_CTX_COMPLETE)
+ stats->tx_errors++;
+ else
+ stats->tx_packets++;
+
+ netif_wake_queue(dev);
+ }
+ ezusb_complete_all(&ctx->done);
+ ezusb_request_context_put(ctx);
+ break;
+
+ default:
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+ if (!upriv->udev) {
+ /* This is normal, as all request contexts get flushed
+ * when the device is disconnected */
+ err("Called, CTX not terminating, but device gone");
+ ezusb_complete_all(&ctx->done);
+ ezusb_request_context_put(ctx);
+ break;
+ }
+
+ err("Called, CTX not in terminating state.");
+ /* Things are really bad if this happens. Just leak
+ * the CTX because it may still be linked to the
+ * queue or the OUT urb may still be active.
+ * Just leaking at least prevents an Oops or Panic.
+ */
+ break;
+ }
+}
+
+/**
+ * ezusb_req_queue_run:
+ * Description:
+ * Note: Only one active CTX at any one time, because there's no
+ * other (reliable) way to match the response URB to the correct
+ * CTX.
+ **/
+static void ezusb_req_queue_run(struct ezusb_priv *upriv)
+{
+ unsigned long flags;
+ struct request_context *ctx;
+ int result;
+
+ spin_lock_irqsave(&upriv->req_lock, flags);
+
+ if (!list_empty(&upriv->req_active))
+ goto unlock;
+
+ if (list_empty(&upriv->req_pending))
+ goto unlock;
+
+ ctx =
+ list_entry(upriv->req_pending.next, struct request_context,
+ list);
+
+ if (!ctx->upriv->udev)
+ goto unlock;
+
+ /* We need to split this off to avoid a race condition */
+ list_move_tail(&ctx->list, &upriv->req_active);
+
+ if (ctx->state == EZUSB_CTX_QUEUED) {
+ atomic_inc(&ctx->refcount);
+ result = usb_submit_urb(ctx->outurb, GFP_ATOMIC);
+ if (result) {
+ ctx->state = EZUSB_CTX_REQSUBMIT_FAIL;
+
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ err("Fatal, failed to submit command urb."
+ " error=%d\n", result);
+
+ ezusb_ctx_complete(ctx);
+ ezusb_request_context_put(ctx);
+ goto done;
+ }
+
+ ctx->state = EZUSB_CTX_REQ_SUBMITTED;
+ ezusb_mod_timer(ctx->upriv, &ctx->timer,
+ jiffies + DEF_TIMEOUT);
+ }
+
+ unlock:
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ done:
+ return;
+}
+
+static void ezusb_req_enqueue_run(struct ezusb_priv *upriv,
+ struct request_context *ctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&upriv->req_lock, flags);
+
+ if (!ctx->upriv->udev) {
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+ goto done;
+ }
+ atomic_inc(&ctx->refcount);
+ list_add_tail(&ctx->list, &upriv->req_pending);
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ ctx->state = EZUSB_CTX_QUEUED;
+ ezusb_req_queue_run(upriv);
+
+ done:
+ return;
+}
+
+static void ezusb_request_out_callback(struct urb *urb)
+{
+ unsigned long flags;
+ enum ezusb_state state;
+ struct request_context *ctx = urb->context;
+ struct ezusb_priv *upriv = ctx->upriv;
+
+ spin_lock_irqsave(&upriv->req_lock, flags);
+
+ del_timer(&ctx->timer);
+
+ if (ctx->killed) {
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+ pr_warning("interrupt called with dead ctx");
+ goto out;
+ }
+
+ state = ctx->state;
+
+ if (urb->status == 0) {
+ switch (state) {
+ case EZUSB_CTX_REQ_SUBMITTED:
+ if (ctx->in_rid) {
+ ctx->state = EZUSB_CTX_REQ_COMPLETE;
+ /* reply URB still pending */
+ ezusb_mod_timer(upriv, &ctx->timer,
+ jiffies + DEF_TIMEOUT);
+ spin_unlock_irqrestore(&upriv->req_lock,
+ flags);
+ break;
+ }
+ /* fall through */
+ case EZUSB_CTX_RESP_RECEIVED:
+ /* IN already received before this OUT-ACK */
+ ctx->state = EZUSB_CTX_COMPLETE;
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+ ezusb_ctx_complete(ctx);
+ break;
+
+ default:
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+ err("Unexpected state(0x%x, %d) in OUT URB",
+ state, urb->status);
+ break;
+ }
+ } else {
+ /* If someone cancels the OUT URB then its status
+ * should be either -ECONNRESET or -ENOENT.
+ */
+ switch (state) {
+ case EZUSB_CTX_REQ_SUBMITTED:
+ case EZUSB_CTX_RESP_RECEIVED:
+ ctx->state = EZUSB_CTX_REQ_FAILED;
+ /* fall through */
+
+ case EZUSB_CTX_REQ_FAILED:
+ case EZUSB_CTX_REQ_TIMEOUT:
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ ezusb_ctx_complete(ctx);
+ break;
+
+ default:
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ err("Unexpected state(0x%x, %d) in OUT URB",
+ state, urb->status);
+ break;
+ }
+ }
+ out:
+ ezusb_request_context_put(ctx);
+}
+
+static void ezusb_request_in_callback(struct ezusb_priv *upriv,
+ struct urb *urb)
+{
+ struct ezusb_packet *ans = urb->transfer_buffer;
+ struct request_context *ctx = NULL;
+ enum ezusb_state state;
+ unsigned long flags;
+
+ /* Find the CTX on the active queue that requested this URB */
+ spin_lock_irqsave(&upriv->req_lock, flags);
+ if (upriv->udev) {
+ struct list_head *item;
+
+ list_for_each(item, &upriv->req_active) {
+ struct request_context *c;
+ int reply_count;
+
+ c = list_entry(item, struct request_context, list);
+ reply_count =
+ ezusb_reply_inc(c->buf->req_reply_count);
+ if ((ans->ans_reply_count == reply_count)
+ && (le16_to_cpu(ans->hermes_rid) == c->in_rid)) {
+ ctx = c;
+ break;
+ }
+ dbg("Skipped (0x%x/0x%x) (%d/%d)",
+ le16_to_cpu(ans->hermes_rid),
+ c->in_rid, ans->ans_reply_count, reply_count);
+ }
+ }
+
+ if (ctx == NULL) {
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+ err("%s: got unexpected RID: 0x%04X", __func__,
+ le16_to_cpu(ans->hermes_rid));
+ ezusb_req_queue_run(upriv);
+ return;
+ }
+
+ /* The data we want is in the in buffer, exchange */
+ urb->transfer_buffer = ctx->buf;
+ ctx->buf = (void *) ans;
+ ctx->buf_length = urb->actual_length;
+
+ state = ctx->state;
+ switch (state) {
+ case EZUSB_CTX_REQ_SUBMITTED:
+ /* We have received our response URB before
+ * our request has been acknowledged. Do NOT
+ * destroy our CTX yet, because our OUT URB
+ * is still alive ...
+ */
+ ctx->state = EZUSB_CTX_RESP_RECEIVED;
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ /* Let the machine continue running. */
+ break;
+
+ case EZUSB_CTX_REQ_COMPLETE:
+ /* This is the usual path: our request
+ * has already been acknowledged, and
+ * we have now received the reply.
+ */
+ ctx->state = EZUSB_CTX_COMPLETE;
+
+ /* Stop the intimer */
+ del_timer(&ctx->timer);
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ /* Call the completion handler */
+ ezusb_ctx_complete(ctx);
+ break;
+
+ default:
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ pr_warning("Matched IN URB, unexpected context state(0x%x)",
+ state);
+ /* Throw this CTX away and try submitting another */
+ del_timer(&ctx->timer);
+ ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK;
+ usb_unlink_urb(ctx->outurb);
+ ezusb_req_queue_run(upriv);
+ break;
+ } /* switch */
+}
+
+
+static void ezusb_req_ctx_wait(struct ezusb_priv *upriv,
+ struct request_context *ctx)
+{
+ switch (ctx->state) {
+ case EZUSB_CTX_QUEUED:
+ case EZUSB_CTX_REQ_SUBMITTED:
+ case EZUSB_CTX_REQ_COMPLETE:
+ case EZUSB_CTX_RESP_RECEIVED:
+ if (in_softirq()) {
+ /* If we get called from a timer, timeout timers don't
+ * get the chance to run themselves. So we make sure
+ * that we don't sleep for ever */
+ int msecs = DEF_TIMEOUT * (1000 / HZ);
+ while (!ctx->done.done && msecs--)
+ udelay(1000);
+ } else {
+ wait_event_interruptible(ctx->done.wait,
+ ctx->done.done);
+ }
+ break;
+ default:
+ /* Done or failed - nothing to wait for */
+ break;
+ }
+}
+
+static inline u16 build_crc(struct ezusb_packet *data)
+{
+ u16 crc = 0;
+ u8 *bytes = (u8 *)data;
+ int i;
+
+ for (i = 0; i < 8; i++)
+ crc = (crc << 1) + bytes[i];
+
+ return crc;
+}
+
+/**
+ * ezusb_fill_req:
+ *
+ * if data == NULL and length > 0 the data is assumed to be already in
+ * the target buffer and only the header is filled.
+ *
+ */
+static int ezusb_fill_req(struct ezusb_packet *req, u16 length, u16 rid,
+ const void *data, u16 frame_type, u8 reply_count)
+{
+ int total_size = sizeof(*req) + length;
+
+ BUG_ON(total_size > BULK_BUF_SIZE);
+
+ req->magic = cpu_to_le16(EZUSB_MAGIC);
+ req->req_reply_count = reply_count;
+ req->ans_reply_count = 0;
+ req->frame_type = cpu_to_le16(frame_type);
+ req->size = cpu_to_le16(length + 4);
+ req->crc = cpu_to_le16(build_crc(req));
+ req->hermes_len = cpu_to_le16(HERMES_BYTES_TO_RECLEN(length));
+ req->hermes_rid = cpu_to_le16(rid);
+ if (data)
+ memcpy(req->data, data, length);
+ return total_size;
+}
+
+static int ezusb_submit_in_urb(struct ezusb_priv *upriv)
+{
+ int retval = 0;
+ void *cur_buf = upriv->read_urb->transfer_buffer;
+
+ if (upriv->read_urb->status == -EINPROGRESS) {
+ dbg("urb busy, not resubmiting");
+ retval = -EBUSY;
+ goto exit;
+ }
+ usb_fill_bulk_urb(upriv->read_urb, upriv->udev, upriv->read_pipe,
+ cur_buf, BULK_BUF_SIZE,
+ ezusb_bulk_in_callback, upriv);
+ upriv->read_urb->transfer_flags = 0;
+ retval = usb_submit_urb(upriv->read_urb, GFP_ATOMIC);
+ if (retval)
+ err("%s submit failed %d", __func__, retval);
+
+ exit:
+ return retval;
+}
+
+static inline int ezusb_8051_cpucs(struct ezusb_priv *upriv, int reset)
+{
+ u8 res_val = reset; /* avoid argument promotion */
+
+ if (!upriv->udev) {
+ err("%s: !upriv->udev", __func__);
+ return -EFAULT;
+ }
+ return usb_control_msg(upriv->udev,
+ usb_sndctrlpipe(upriv->udev, 0),
+ EZUSB_REQUEST_FW_TRANS,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE |
+ USB_DIR_OUT, EZUSB_CPUCS_REG, 0, &res_val,
+ sizeof(res_val), DEF_TIMEOUT);
+}
+
+static int ezusb_firmware_download(struct ezusb_priv *upriv,
+ struct ez_usb_fw *fw)
+{
+ u8 fw_buffer[FW_BUF_SIZE];
+ int retval, addr;
+ int variant_offset;
+
+ /*
+ * This byte is 1 and should be replaced with 0. The offset is
+ * 0x10AD in version 0.0.6. The byte in question should follow
+ * the end of the code pointed to by the jump in the beginning
+ * of the firmware. Also, it is read by code located at 0x358.
+ */
+ variant_offset = be16_to_cpup((__be16 *) &fw->code[FW_VAR_OFFSET_PTR]);
+ if (variant_offset >= fw->size) {
+ printk(KERN_ERR PFX "Invalid firmware variant offset: "
+ "0x%04x\n", variant_offset);
+ retval = -EINVAL;
+ goto fail;
+ }
+
+ retval = ezusb_8051_cpucs(upriv, 1);
+ if (retval < 0)
+ goto fail;
+ for (addr = 0; addr < fw->size; addr += FW_BUF_SIZE) {
+ /* 0x100-0x300 should be left alone, it contains card
+ * specific data, like USB enumeration information */
+ if ((addr >= FW_HOLE_START) && (addr < FW_HOLE_END))
+ continue;
+
+ memcpy(fw_buffer, &fw->code[addr], FW_BUF_SIZE);
+ if (variant_offset >= addr &&
+ variant_offset < addr + FW_BUF_SIZE) {
+ dbg("Patching card_variant byte at 0x%04X",
+ variant_offset);
+ fw_buffer[variant_offset - addr] = FW_VAR_VALUE;
+ }
+ retval = usb_control_msg(upriv->udev,
+ usb_sndctrlpipe(upriv->udev, 0),
+ EZUSB_REQUEST_FW_TRANS,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE
+ | USB_DIR_OUT,
+ addr, 0x0,
+ fw_buffer, FW_BUF_SIZE,
+ DEF_TIMEOUT);
+
+ if (retval < 0)
+ goto fail;
+ }
+ retval = ezusb_8051_cpucs(upriv, 0);
+ if (retval < 0)
+ goto fail;
+
+ goto exit;
+ fail:
+ printk(KERN_ERR PFX "Firmware download failed, error %d\n",
+ retval);
+ exit:
+ return retval;
+}
+
+static int ezusb_access_ltv(struct ezusb_priv *upriv,
+ struct request_context *ctx,
+ u16 length, const void *data, u16 frame_type,
+ void *ans_buff, int ans_size, u16 *ans_length)
+{
+ int req_size;
+ int retval = 0;
+ enum ezusb_state state;
+
+ BUG_ON(in_irq());
+
+ if (!upriv->udev) {
+ dbg("Device disconnected");
+ return -ENODEV;
+ }
+
+ if (upriv->read_urb->status != -EINPROGRESS)
+ err("%s: in urb not pending", __func__);
+
+ /* protect upriv->reply_count, guarantee sequential numbers */
+ spin_lock_bh(&upriv->reply_count_lock);
+ req_size = ezusb_fill_req(ctx->buf, length, ctx->out_rid, data,
+ frame_type, upriv->reply_count);
+ usb_fill_bulk_urb(ctx->outurb, upriv->udev, upriv->write_pipe,
+ ctx->buf, req_size,
+ ezusb_request_out_callback, ctx);
+
+ if (ctx->in_rid)
+ upriv->reply_count = ezusb_reply_inc(upriv->reply_count);
+
+ ezusb_req_enqueue_run(upriv, ctx);
+
+ spin_unlock_bh(&upriv->reply_count_lock);
+
+ if (ctx->in_rid)
+ ezusb_req_ctx_wait(upriv, ctx);
+
+ state = ctx->state;
+ switch (state) {
+ case EZUSB_CTX_COMPLETE:
+ retval = ctx->outurb->status;
+ break;
+
+ case EZUSB_CTX_QUEUED:
+ case EZUSB_CTX_REQ_SUBMITTED:
+ if (!ctx->in_rid)
+ break;
+ default:
+ err("%s: Unexpected context state %d", __func__,
+ state);
+ /* fall though */
+ case EZUSB_CTX_REQ_TIMEOUT:
+ case EZUSB_CTX_REQ_FAILED:
+ case EZUSB_CTX_RESP_TIMEOUT:
+ case EZUSB_CTX_REQSUBMIT_FAIL:
+ printk(KERN_ERR PFX "Access failed, resetting (state %d,"
+ " reply_count %d)\n", state, upriv->reply_count);
+ upriv->reply_count = 0;
+ if (state == EZUSB_CTX_REQ_TIMEOUT
+ || state == EZUSB_CTX_RESP_TIMEOUT) {
+ printk(KERN_ERR PFX "ctx timed out\n");
+ retval = -ETIMEDOUT;
+ } else {
+ printk(KERN_ERR PFX "ctx failed\n");
+ retval = -EFAULT;
+ }
+ goto exit;
+ break;
+ }
+ if (ctx->in_rid) {
+ struct ezusb_packet *ans = ctx->buf;
+ int exp_len;
+
+ if (ans->hermes_len != 0)
+ exp_len = le16_to_cpu(ans->hermes_len) * 2 + 12;
+ else
+ exp_len = 14;
+
+ if (exp_len != ctx->buf_length) {
+ err("%s: length mismatch for RID 0x%04x: "
+ "expected %d, got %d", __func__,
+ ctx->in_rid, exp_len, ctx->buf_length);
+ retval = -EIO;
+ goto exit;
+ }
+
+ if (ans_buff)
+ memcpy(ans_buff, ans->data,
+ min_t(int, exp_len, ans_size));
+ if (ans_length)
+ *ans_length = le16_to_cpu(ans->hermes_len);
+ }
+ exit:
+ ezusb_request_context_put(ctx);
+ return retval;
+}
+
+static int ezusb_write_ltv(hermes_t *hw, int bap, u16 rid,
+ u16 length, const void *data)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ u16 frame_type;
+ struct request_context *ctx;
+
+ if (length == 0)
+ return -EINVAL;
+
+ length = HERMES_RECLEN_TO_BYTES(length);
+
+ /* On memory mapped devices HERMES_RID_CNFGROUPADDRESSES can be
+ * set to be empty, but the USB bridge doesn't like it */
+ if (length == 0)
+ return 0;
+
+ ctx = ezusb_alloc_ctx(upriv, rid, EZUSB_RID_ACK);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (rid == EZUSB_RID_TX)
+ frame_type = EZUSB_FRAME_DATA;
+ else
+ frame_type = EZUSB_FRAME_CONTROL;
+
+ return ezusb_access_ltv(upriv, ctx, length, data, frame_type,
+ NULL, 0, NULL);
+}
+
+static int ezusb_read_ltv(hermes_t *hw, int bap, u16 rid,
+ unsigned bufsize, u16 *length, void *buf)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ struct request_context *ctx;
+
+ if ((bufsize < 0) || (bufsize % 2))
+ return -EINVAL;
+
+ ctx = ezusb_alloc_ctx(upriv, rid, rid);
+ if (!ctx)
+ return -ENOMEM;
+
+ return ezusb_access_ltv(upriv, ctx, 0, NULL, EZUSB_FRAME_CONTROL,
+ buf, bufsize, length);
+}
+
+static int ezusb_doicmd_wait(hermes_t *hw, u16 cmd, u16 parm0, u16 parm1,
+ u16 parm2, struct hermes_response *resp)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ struct request_context *ctx;
+
+ __le16 data[4] = {
+ cpu_to_le16(cmd),
+ cpu_to_le16(parm0),
+ cpu_to_le16(parm1),
+ cpu_to_le16(parm2),
+ };
+ dbg("0x%04X, parm0 0x%04X, parm1 0x%04X, parm2 0x%04X",
+ cmd, parm0, parm1, parm2);
+ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_DOCMD, EZUSB_RID_ACK);
+ if (!ctx)
+ return -ENOMEM;
+
+ return ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
+ EZUSB_FRAME_CONTROL, NULL, 0, NULL);
+}
+
+static int ezusb_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
+ struct hermes_response *resp)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ struct request_context *ctx;
+
+ __le16 data[4] = {
+ cpu_to_le16(cmd),
+ cpu_to_le16(parm0),
+ 0,
+ 0,
+ };
+ dbg("0x%04X, parm0 0x%04X", cmd, parm0);
+ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_DOCMD, EZUSB_RID_ACK);
+ if (!ctx)
+ return -ENOMEM;
+
+ return ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
+ EZUSB_FRAME_CONTROL, NULL, 0, NULL);
+}
+
+static int ezusb_bap_pread(struct hermes *hw, int bap,
+ void *buf, int len, u16 id, u16 offset)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ struct ezusb_packet *ans = (void *) upriv->read_urb->transfer_buffer;
+ int actual_length = upriv->read_urb->actual_length;
+
+ if (id == EZUSB_RID_RX) {
+ if ((sizeof(*ans) + offset + len) > actual_length) {
+ printk(KERN_ERR PFX "BAP read beyond buffer end "
+ "in rx frame\n");
+ return -EINVAL;
+ }
+ memcpy(buf, ans->data + offset, len);
+ return 0;
+ }
+
+ if (EZUSB_IS_INFO(id)) {
+ /* Include 4 bytes for length/type */
+ if ((sizeof(*ans) + offset + len - 4) > actual_length) {
+ printk(KERN_ERR PFX "BAP read beyond buffer end "
+ "in info frame\n");
+ return -EFAULT;
+ }
+ memcpy(buf, ans->data + offset - 4, len);
+ } else {
+ printk(KERN_ERR PFX "Unexpected fid 0x%04x\n", id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ezusb_read_pda(struct hermes *hw, __le16 *pda,
+ u32 pda_addr, u16 pda_len)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ struct request_context *ctx;
+ __le16 data[] = {
+ cpu_to_le16(pda_addr & 0xffff),
+ cpu_to_le16(pda_len - 4)
+ };
+ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_READ_PDA, EZUSB_RID_READ_PDA);
+ if (!ctx)
+ return -ENOMEM;
+
+ /* wl_lkm does not include PDA size in the PDA area.
+ * We will pad the information into pda, so other routines
+ * don't have to be modified */
+ pda[0] = cpu_to_le16(pda_len - 2);
+ /* Includes CFG_PROD_DATA but not itself */
+ pda[1] = cpu_to_le16(0x0800); /* CFG_PROD_DATA */
+
+ return ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
+ EZUSB_FRAME_CONTROL, &pda[2], pda_len - 4,
+ NULL);
+}
+
+static int ezusb_program_init(struct hermes *hw, u32 entry_point)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ struct request_context *ctx;
+ __le32 data = cpu_to_le32(entry_point);
+
+ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_INIT, EZUSB_RID_ACK);
+ if (!ctx)
+ return -ENOMEM;
+
+ return ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
+ EZUSB_FRAME_CONTROL, NULL, 0, NULL);
+}
+
+static int ezusb_program_end(struct hermes *hw)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ struct request_context *ctx;
+
+ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_END, EZUSB_RID_ACK);
+ if (!ctx)
+ return -ENOMEM;
+
+ return ezusb_access_ltv(upriv, ctx, 0, NULL,
+ EZUSB_FRAME_CONTROL, NULL, 0, NULL);
+}
+
+static int ezusb_program_bytes(struct hermes *hw, const char *buf,
+ u32 addr, u32 len)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ struct request_context *ctx;
+ __le32 data = cpu_to_le32(addr);
+ int err;
+
+ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_SET_ADDR, EZUSB_RID_ACK);
+ if (!ctx)
+ return -ENOMEM;
+
+ err = ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
+ EZUSB_FRAME_CONTROL, NULL, 0, NULL);
+ if (err)
+ return err;
+
+ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_BYTES, EZUSB_RID_ACK);
+ if (!ctx)
+ return -ENOMEM;
+
+ return ezusb_access_ltv(upriv, ctx, len, buf,
+ EZUSB_FRAME_CONTROL, NULL, 0, NULL);
+}
+
+static int ezusb_program(struct hermes *hw, const char *buf,
+ u32 addr, u32 len)
+{
+ u32 ch_addr;
+ u32 ch_len;
+ int err = 0;
+
+ /* We can only send 2048 bytes out of the bulk xmit at a time,
+ * so we have to split any programming into chunks of <2048
+ * bytes. */
+
+ ch_len = (len < MAX_DL_SIZE) ? len : MAX_DL_SIZE;
+ ch_addr = addr;
+
+ while (ch_addr < (addr + len)) {
+ pr_debug("Programming subblock of length %d "
+ "to address 0x%08x. Data @ %p\n",
+ ch_len, ch_addr, &buf[ch_addr - addr]);
+
+ err = ezusb_program_bytes(hw, &buf[ch_addr - addr],
+ ch_addr, ch_len);
+ if (err)
+ break;
+
+ ch_addr += ch_len;
+ ch_len = ((addr + len - ch_addr) < MAX_DL_SIZE) ?
+ (addr + len - ch_addr) : MAX_DL_SIZE;
+ }
+
+ return err;
+}
+
+static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct orinoco_private *priv = ndev_priv(dev);
+ struct net_device_stats *stats = &priv->stats;
+ struct ezusb_priv *upriv = priv->card;
+ u8 mic[MICHAEL_MIC_LEN+1];
+ int err = 0;
+ int tx_control;
+ unsigned long flags;
+ struct request_context *ctx;
+ u8 *buf;
+ int tx_size;
+
+ if (!netif_running(dev)) {
+ printk(KERN_ERR "%s: Tx on stopped device!\n",
+ dev->name);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (netif_queue_stopped(dev)) {
+ printk(KERN_DEBUG "%s: Tx while transmitter busy!\n",
+ dev->name);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (orinoco_lock(priv, &flags) != 0) {
+ printk(KERN_ERR
+ "%s: ezusb_xmit() called while hw_unavailable\n",
+ dev->name);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (!netif_carrier_ok(dev) ||
+ (priv->iw_mode == NL80211_IFTYPE_MONITOR)) {
+ /* Oops, the firmware hasn't established a connection,
+ silently drop the packet (this seems to be the
+ safest approach). */
+ goto drop;
+ }
+
+ /* Check packet length */
+ if (skb->len < ETH_HLEN)
+ goto drop;
+
+ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_TX, 0);
+ if (!ctx)
+ goto busy;
+
+ memset(ctx->buf, 0, BULK_BUF_SIZE);
+ buf = ctx->buf->data;
+
+ tx_control = 0;
+
+ err = orinoco_process_xmit_skb(skb, dev, priv, &tx_control,
+ &mic[0]);
+ if (err)
+ goto drop;
+
+ {
+ __le16 *tx_cntl = (__le16 *)buf;
+ *tx_cntl = cpu_to_le16(tx_control);
+ buf += sizeof(*tx_cntl);
+ }
+
+ memcpy(buf, skb->data, skb->len);
+ buf += skb->len;
+
+ if (tx_control & HERMES_TXCTRL_MIC) {
+ u8 *m = mic;
+ /* Mic has been offset so it can be copied to an even
+ * address. We're copying eveything anyway, so we
+ * don't need to copy that first byte. */
+ if (skb->len % 2)
+ m++;
+ memcpy(buf, m, MICHAEL_MIC_LEN);
+ buf += MICHAEL_MIC_LEN;
+ }
+
+ /* Finally, we actually initiate the send */
+ netif_stop_queue(dev);
+
+ /* The card may behave better if we send evenly sized usb transfers */
+ tx_size = ALIGN(buf - ctx->buf->data, 2);
+
+ err = ezusb_access_ltv(upriv, ctx, tx_size, NULL,
+ EZUSB_FRAME_DATA, NULL, 0, NULL);
+
+ if (err) {
+ netif_start_queue(dev);
+ if (net_ratelimit())
+ printk(KERN_ERR "%s: Error %d transmitting packet\n",
+ dev->name, err);
+ goto busy;
+ }
+
+ dev->trans_start = jiffies;
+ stats->tx_bytes += skb->len;
+ goto ok;
+
+ drop:
+ stats->tx_errors++;
+ stats->tx_dropped++;
+
+ ok:
+ orinoco_unlock(priv, &flags);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+
+ busy:
+ orinoco_unlock(priv, &flags);
+ return NETDEV_TX_BUSY;
+}
+
+static int ezusb_allocate(struct hermes *hw, u16 size, u16 *fid)
+{
+ *fid = EZUSB_RID_TX;
+ return 0;
+}
+
+
+static int ezusb_hard_reset(struct orinoco_private *priv)
+{
+ struct ezusb_priv *upriv = priv->card;
+ int retval = ezusb_8051_cpucs(upriv, 1);
+
+ if (retval < 0) {
+ err("Failed to reset");
+ return retval;
+ }
+
+ retval = ezusb_8051_cpucs(upriv, 0);
+ if (retval < 0) {
+ err("Failed to unreset");
+ return retval;
+ }
+
+ dbg("sending control message");
+ retval = usb_control_msg(upriv->udev,
+ usb_sndctrlpipe(upriv->udev, 0),
+ EZUSB_REQUEST_TRIGER,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE |
+ USB_DIR_OUT, 0x0, 0x0, NULL, 0,
+ DEF_TIMEOUT);
+ if (retval < 0) {
+ err("EZUSB_REQUEST_TRIGER failed retval %d", retval);
+ return retval;
+ }
+#if 0
+ dbg("Sending EZUSB_REQUEST_TRIG_AC");
+ retval = usb_control_msg(upriv->udev,
+ usb_sndctrlpipe(upriv->udev, 0),
+ EZUSB_REQUEST_TRIG_AC,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE |
+ USB_DIR_OUT, 0x00FA, 0x0, NULL, 0,
+ DEF_TIMEOUT);
+ if (retval < 0) {
+ err("EZUSB_REQUEST_TRIG_AC failed retval %d", retval);
+ return retval;
+ }
+#endif
+
+ return 0;
+}
+
+
+static int ezusb_init(hermes_t *hw)
+{
+ struct ezusb_priv *upriv = hw->priv;
+ int retval;
+
+ BUG_ON(in_interrupt());
+ BUG_ON(!upriv);
+
+ upriv->reply_count = 0;
+ /* Write the MAGIC number on the simulated registers to keep
+ * orinoco.c happy */
+ hermes_write_regn(hw, SWSUPPORT0, HERMES_MAGIC);
+ hermes_write_regn(hw, RXFID, EZUSB_RID_RX);
+
+ usb_kill_urb(upriv->read_urb);
+ ezusb_submit_in_urb(upriv);
+
+ retval = ezusb_write_ltv(hw, 0, EZUSB_RID_INIT1,
+ HERMES_BYTES_TO_RECLEN(2), "\x10\x00");
+ if (retval < 0) {
+ printk(KERN_ERR PFX "EZUSB_RID_INIT1 error %d\n", retval);
+ return retval;
+ }
+
+ retval = ezusb_docmd_wait(hw, HERMES_CMD_INIT, 0, NULL);
+ if (retval < 0) {
+ printk(KERN_ERR PFX "HERMES_CMD_INIT error %d\n", retval);
+ return retval;
+ }
+
+ return 0;
+}
+
+static void ezusb_bulk_in_callback(struct urb *urb)
+{
+ struct ezusb_priv *upriv = (struct ezusb_priv *) urb->context;
+ struct ezusb_packet *ans = urb->transfer_buffer;
+ u16 crc;
+ u16 hermes_rid;
+
+ if (upriv->udev == NULL) {
+ dbg("disconnected");
+ return;
+ }
+
+ if (urb->status == -ETIMEDOUT) {
+ /* When a device gets unplugged we get this every time
+ * we resubmit, flooding the logs. Since we don't use
+ * USB timeouts, it shouldn't happen any other time*/
+ pr_warning("%s: urb timed out, not resubmiting", __func__);
+ return;
+ }
+ if (urb->status == -ECONNABORTED) {
+ pr_warning("%s: connection abort, resubmiting urb",
+ __func__);
+ goto resubmit;
+ }
+ if ((urb->status == -EILSEQ)
+ || (urb->status == -ENOENT)
+ || (urb->status == -ECONNRESET)) {
+ dbg("status %d, not resubmiting", urb->status);
+ return;
+ }
+ if (urb->status)
+ dbg("status: %d length: %d",
+ urb->status, urb->actual_length);
+ if (urb->actual_length < sizeof(*ans)) {
+ err("%s: short read, ignoring", __func__);
+ goto resubmit;
+ }
+ crc = build_crc(ans);
+ if (le16_to_cpu(ans->crc) != crc) {
+ err("CRC error, ignoring packet");
+ goto resubmit;
+ }
+
+ hermes_rid = le16_to_cpu(ans->hermes_rid);
+ if ((hermes_rid != EZUSB_RID_RX) && !EZUSB_IS_INFO(hermes_rid)) {
+ ezusb_request_in_callback(upriv, urb);
+ } else if (upriv->dev) {
+ struct net_device *dev = upriv->dev;
+ struct orinoco_private *priv = ndev_priv(dev);
+ hermes_t *hw = &priv->hw;
+
+ if (hermes_rid == EZUSB_RID_RX) {
+ __orinoco_ev_rx(dev, hw);
+ } else {
+ hermes_write_regn(hw, INFOFID,
+ le16_to_cpu(ans->hermes_rid));
+ __orinoco_ev_info(dev, hw);
+ }
+ }
+
+ resubmit:
+ if (upriv->udev)
+ ezusb_submit_in_urb(upriv);
+}
+
+static inline void ezusb_delete(struct ezusb_priv *upriv)
+{
+ struct net_device *dev;
+ struct list_head *item;
+ struct list_head *tmp_item;
+ unsigned long flags;
+
+ BUG_ON(in_interrupt());
+ BUG_ON(!upriv);
+
+ dev = upriv->dev;
+ mutex_lock(&upriv->mtx);
+
+ upriv->udev = NULL; /* No timer will be rearmed from here */
+
+ usb_kill_urb(upriv->read_urb);
+
+ spin_lock_irqsave(&upriv->req_lock, flags);
+ list_for_each_safe(item, tmp_item, &upriv->req_active) {
+ struct request_context *ctx;
+ int err;
+
+ ctx = list_entry(item, struct request_context, list);
+ atomic_inc(&ctx->refcount);
+
+ ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK;
+ err = usb_unlink_urb(ctx->outurb);
+
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+ if (err == -EINPROGRESS)
+ wait_for_completion(&ctx->done);
+
+ del_timer_sync(&ctx->timer);
+ /* FIXME: there is an slight chance for the irq handler to
+ * be running */
+ if (!list_empty(&ctx->list))
+ ezusb_ctx_complete(ctx);
+
+ ezusb_request_context_put(ctx);
+ spin_lock_irqsave(&upriv->req_lock, flags);
+ }
+ spin_unlock_irqrestore(&upriv->req_lock, flags);
+
+ list_for_each_safe(item, tmp_item, &upriv->req_pending)
+ ezusb_ctx_complete(list_entry(item,
+ struct request_context, list));
+
+ if (upriv->read_urb->status == -EINPROGRESS)
+ printk(KERN_ERR PFX "Some URB in progress\n");
+
+ mutex_unlock(&upriv->mtx);
+
+ kfree(upriv->read_urb->transfer_buffer);
+ if (upriv->bap_buf != NULL)
+ kfree(upriv->bap_buf);
+ if (upriv->read_urb != NULL)
+ usb_free_urb(upriv->read_urb);
+ if (upriv->dev) {
+ struct orinoco_private *priv = ndev_priv(upriv->dev);
+ orinoco_if_del(priv);
+ free_orinocodev(priv);
+ }
+}
+
+static void ezusb_lock_irqsave(spinlock_t *lock,
+ unsigned long *flags) __acquires(lock)
+{
+ spin_lock_bh(lock);
+}
+
+static void ezusb_unlock_irqrestore(spinlock_t *lock,
+ unsigned long *flags) __releases(lock)
+{
+ spin_unlock_bh(lock);
+}
+
+static void ezusb_lock_irq(spinlock_t *lock) __acquires(lock)
+{
+ spin_lock_bh(lock);
+}
+
+static void ezusb_unlock_irq(spinlock_t *lock) __releases(lock)
+{
+ spin_unlock_bh(lock);
+}
+
+static const struct hermes_ops ezusb_ops = {
+ .init = ezusb_init,
+ .cmd_wait = ezusb_docmd_wait,
+ .init_cmd_wait = ezusb_doicmd_wait,
+ .allocate = ezusb_allocate,
+ .read_ltv = ezusb_read_ltv,
+ .write_ltv = ezusb_write_ltv,
+ .bap_pread = ezusb_bap_pread,
+ .read_pda = ezusb_read_pda,
+ .program_init = ezusb_program_init,
+ .program_end = ezusb_program_end,
+ .program = ezusb_program,
+ .lock_irqsave = ezusb_lock_irqsave,
+ .unlock_irqrestore = ezusb_unlock_irqrestore,
+ .lock_irq = ezusb_lock_irq,
+ .unlock_irq = ezusb_unlock_irq,
+};
+
+static const struct net_device_ops ezusb_netdev_ops = {
+ .ndo_open = orinoco_open,
+ .ndo_stop = orinoco_stop,
+ .ndo_start_xmit = ezusb_xmit,
+ .ndo_set_multicast_list = orinoco_set_multicast_list,
+ .ndo_change_mtu = orinoco_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_tx_timeout = orinoco_tx_timeout,
+ .ndo_get_stats = orinoco_get_stats,
+};
+
+static int ezusb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(interface);
+ struct orinoco_private *priv;
+ hermes_t *hw;
+ struct ezusb_priv *upriv = NULL;
+ struct usb_interface_descriptor *iface_desc;
+ struct usb_endpoint_descriptor *ep;
+ const struct firmware *fw_entry;
+ int retval = 0;
+ int i;
+
+ priv = alloc_orinocodev(sizeof(*upriv), &udev->dev,
+ ezusb_hard_reset, NULL);
+ if (!priv) {
+ err("Couldn't allocate orinocodev");
+ goto exit;
+ }
+
+ hw = &priv->hw;
+
+ upriv = priv->card;
+
+ mutex_init(&upriv->mtx);
+ spin_lock_init(&upriv->reply_count_lock);
+
+ spin_lock_init(&upriv->req_lock);
+ INIT_LIST_HEAD(&upriv->req_pending);
+ INIT_LIST_HEAD(&upriv->req_active);
+
+ upriv->udev = udev;
+
+ hw->iobase = (void __force __iomem *) &upriv->hermes_reg_fake;
+ hw->reg_spacing = HERMES_16BIT_REGSPACING;
+ hw->priv = upriv;
+ hw->ops = &ezusb_ops;
+
+ /* set up the endpoint information */
+ /* check out the endpoints */
+
+ iface_desc = &interface->altsetting[0].desc;
+ for (i = 0; i < iface_desc->bNumEndpoints; ++i) {
+ ep = &interface->altsetting[0].endpoint[i].desc;
+
+ if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
+ == USB_DIR_IN) &&
+ ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+ == USB_ENDPOINT_XFER_BULK)) {
+ /* we found a bulk in endpoint */
+ if (upriv->read_urb != NULL) {
+ pr_warning("Found a second bulk in ep, ignored");
+ continue;
+ }
+
+ upriv->read_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!upriv->read_urb) {
+ err("No free urbs available");
+ goto error;
+ }
+ if (le16_to_cpu(ep->wMaxPacketSize) != 64)
+ pr_warning("bulk in: wMaxPacketSize!= 64");
+ if (ep->bEndpointAddress != (2 | USB_DIR_IN))
+ pr_warning("bulk in: bEndpointAddress: %d",
+ ep->bEndpointAddress);
+ upriv->read_pipe = usb_rcvbulkpipe(udev,
+ ep->
+ bEndpointAddress);
+ upriv->read_urb->transfer_buffer =
+ kmalloc(BULK_BUF_SIZE, GFP_KERNEL);
+ if (!upriv->read_urb->transfer_buffer) {
+ err("Couldn't allocate IN buffer");
+ goto error;
+ }
+ }
+
+ if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
+ == USB_DIR_OUT) &&
+ ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+ == USB_ENDPOINT_XFER_BULK)) {
+ /* we found a bulk out endpoint */
+ if (upriv->bap_buf != NULL) {
+ pr_warning("Found a second bulk out ep, ignored");
+ continue;
+ }
+
+ if (le16_to_cpu(ep->wMaxPacketSize) != 64)
+ pr_warning("bulk out: wMaxPacketSize != 64");
+ if (ep->bEndpointAddress != 2)
+ pr_warning("bulk out: bEndpointAddress: %d",
+ ep->bEndpointAddress);
+ upriv->write_pipe = usb_sndbulkpipe(udev,
+ ep->
+ bEndpointAddress);
+ upriv->bap_buf = kmalloc(BULK_BUF_SIZE, GFP_KERNEL);
+ if (!upriv->bap_buf) {
+ err("Couldn't allocate bulk_out_buffer");
+ goto error;
+ }
+ }
+ }
+ if (!upriv->bap_buf || !upriv->read_urb) {
+ err("Didn't find the required bulk endpoints");
+ goto error;
+ }
+
+ if (request_firmware(&fw_entry, "orinoco_ezusb_fw",
+ &interface->dev) == 0) {
+ firmware.size = fw_entry->size;
+ firmware.code = fw_entry->data;
+ }
+ if (firmware.size && firmware.code) {
+ ezusb_firmware_download(upriv, &firmware);
+ } else {
+ err("No firmware to download");
+ goto error;
+ }
+
+ if (ezusb_hard_reset(priv) < 0) {
+ err("Cannot reset the device");
+ goto error;
+ }
+
+ /* If the firmware is already downloaded orinoco.c will call
+ * ezusb_init but if the firmware is not already there, that will make
+ * the kernel very unstable, so we try initializing here and quit in
+ * case of error */
+ if (ezusb_init(hw) < 0) {
+ err("Couldn't initialize the device");
+ err("Firmware may not be downloaded or may be wrong.");
+ goto error;
+ }
+
+ /* Initialise the main driver */
+ if (orinoco_init(priv) != 0) {
+ err("orinoco_init() failed\n");
+ goto error;
+ }
+
+ if (orinoco_if_add(priv, 0, 0, &ezusb_netdev_ops) != 0) {
+ upriv->dev = NULL;
+ err("%s: orinoco_if_add() failed", __func__);
+ goto error;
+ }
+ upriv->dev = priv->ndev;
+
+ goto exit;
+
+ error:
+ ezusb_delete(upriv);
+ if (upriv->dev) {
+ /* upriv->dev was 0, so ezusb_delete() didn't free it */
+ free_orinocodev(priv);
+ }
+ upriv = NULL;
+ retval = -EFAULT;
+ exit:
+ if (fw_entry) {
+ firmware.code = NULL;
+ firmware.size = 0;
+ release_firmware(fw_entry);
+ }
+ usb_set_intfdata(interface, upriv);
+ return retval;
+}
+
+
+static void ezusb_disconnect(struct usb_interface *intf)
+{
+ struct ezusb_priv *upriv = usb_get_intfdata(intf);
+ usb_set_intfdata(intf, NULL);
+ ezusb_delete(upriv);
+ printk(KERN_INFO PFX "Disconnected\n");
+}
+
+
+/* usb specific object needed to register this driver with the usb subsystem */
+static struct usb_driver orinoco_driver = {
+ .name = DRIVER_NAME,
+ .probe = ezusb_probe,
+ .disconnect = ezusb_disconnect,
+ .id_table = ezusb_table,
+};
+
+/* Can't be declared "const" or the whole __initdata section will
+ * become const */
+static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
+ " (Manuel Estrada Sainz)";
+
+static int __init ezusb_module_init(void)
+{
+ int err;
+
+ printk(KERN_DEBUG "%s\n", version);
+
+ /* register this driver with the USB subsystem */
+ err = usb_register(&orinoco_driver);
+ if (err < 0) {
+ printk(KERN_ERR PFX "usb_register failed, error %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit ezusb_module_exit(void)
+{
+ /* deregister this driver with the USB subsystem */
+ usb_deregister(&orinoco_driver);
+}
+
+
+module_init(ezusb_module_init);
+module_exit(ezusb_module_exit);
+
+MODULE_AUTHOR("Manuel Estrada Sainz");
+MODULE_DESCRIPTION
+ ("Driver for Orinoco wireless LAN cards using EZUSB bridge");
+MODULE_LICENSE("Dual MPL/GPL");
diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c
index 330d42d4533..4300d9db7d8 100644
--- a/drivers/net/wireless/orinoco/scan.c
+++ b/drivers/net/wireless/orinoco/scan.c
@@ -127,7 +127,7 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
{
struct wiphy *wiphy = priv_to_wiphy(priv);
struct ieee80211_channel *channel;
- u8 *ie;
+ const u8 *ie;
u64 timestamp;
s32 signal;
u16 capability;
@@ -136,7 +136,7 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
int chan, freq;
ie_len = len - sizeof(*bss);
- ie = orinoco_get_ie(bss->data, ie_len, WLAN_EID_DS_PARAMS);
+ ie = cfg80211_find_ie(WLAN_EID_DS_PARAMS, bss->data, ie_len);
chan = ie ? ie[2] : 0;
freq = ieee80211_dsss_chan_to_freq(chan);
channel = ieee80211_get_channel(wiphy, freq);
diff --git a/drivers/net/wireless/orinoco/spectrum_cs.c b/drivers/net/wireless/orinoco/spectrum_cs.c
index 59bda240fdc..b51a9adc80f 100644
--- a/drivers/net/wireless/orinoco/spectrum_cs.c
+++ b/drivers/net/wireless/orinoco/spectrum_cs.c
@@ -57,7 +57,6 @@ MODULE_PARM_DESC(ignore_cis_vcc, "Allow voltage mismatch between card and socket
* struct orinoco_private */
struct orinoco_pccard {
struct pcmcia_device *p_dev;
- dev_node_t node;
};
/********************************************************************/
@@ -193,10 +192,6 @@ spectrum_cs_probe(struct pcmcia_device *link)
card->p_dev = link;
link->priv = priv;
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = orinoco_interrupt;
-
/* General socket configuration defaults can go here. In this
* client, we assume very little, and rely on the CIS for
* almost everything. In most clients, many details (i.e.,
@@ -218,8 +213,7 @@ static void spectrum_cs_detach(struct pcmcia_device *link)
{
struct orinoco_private *priv = link->priv;
- if (link->dev_node)
- orinoco_if_del(priv);
+ orinoco_if_del(priv);
spectrum_cs_release(link);
@@ -304,7 +298,6 @@ static int
spectrum_cs_config(struct pcmcia_device *link)
{
struct orinoco_private *priv = link->priv;
- struct orinoco_pccard *card = priv->card;
hermes_t *hw = &priv->hw;
int ret;
void __iomem *mem;
@@ -332,12 +325,7 @@ spectrum_cs_config(struct pcmcia_device *link)
goto failed;
}
- /*
- * Allocate an interrupt line. Note that this does not assign
- * a handler to the interrupt, unless the 'Handler' member of
- * the irq structure is initialized.
- */
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_irq(link, orinoco_interrupt);
if (ret)
goto failed;
@@ -349,6 +337,7 @@ spectrum_cs_config(struct pcmcia_device *link)
goto failed;
hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING);
+ hw->eeprom_pda = true;
/*
* This actually configures the PCMCIA socket -- setting up
@@ -359,9 +348,6 @@ spectrum_cs_config(struct pcmcia_device *link)
if (ret)
goto failed;
- /* Ok, we have the configuration, prepare to register the netdev */
- card->node.major = card->node.minor = 0;
-
/* Reset card */
if (spectrum_cs_hard_reset(priv) != 0)
goto failed;
@@ -374,17 +360,11 @@ spectrum_cs_config(struct pcmcia_device *link)
/* Register an interface with the stack */
if (orinoco_if_add(priv, link->io.BasePort1,
- link->irq.AssignedIRQ) != 0) {
+ link->irq, NULL) != 0) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
goto failed;
}
- /* At this point, the dev_node_t structure(s) needs to be
- * initialized and arranged in a linked list at link->dev_node. */
- strcpy(card->node.dev_name, priv->ndev->name);
- link->dev_node = &card->node; /* link->dev_node being non-NULL is also
- * used to indicate that the
- * net_device has been registered */
return 0;
failed:
@@ -405,9 +385,9 @@ spectrum_cs_release(struct pcmcia_device *link)
/* We're committed to taking the device away now, so mark the
* hardware as unavailable */
- spin_lock_irqsave(&priv->lock, flags);
+ priv->hw.ops->lock_irqsave(&priv->lock, &flags);
priv->hw_unavailable++;
- spin_unlock_irqrestore(&priv->lock, flags);
+ priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
pcmcia_disable_device(link);
if (priv->hw.iobase)
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index fbcc6e1a2e1..5775124e2ae 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -458,7 +458,7 @@ static int orinoco_ioctl_setfreq(struct net_device *dev,
if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
/* Fast channel change - no commit if successful */
hermes_t *hw = &priv->hw;
- err = hermes_docmd_wait(hw, HERMES_CMD_TEST |
+ err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
HERMES_TEST_SET_CHANNEL,
chan, NULL);
}
@@ -538,125 +538,6 @@ static int orinoco_ioctl_setsens(struct net_device *dev,
return -EINPROGRESS; /* Call commit handler */
}
-static int orinoco_ioctl_setrts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *rrq,
- char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- int val = rrq->value;
- unsigned long flags;
-
- if (rrq->disabled)
- val = 2347;
-
- if ((val < 0) || (val > 2347))
- return -EINVAL;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- priv->rts_thresh = val;
- orinoco_unlock(priv, &flags);
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-static int orinoco_ioctl_getrts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *rrq,
- char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
-
- rrq->value = priv->rts_thresh;
- rrq->disabled = (rrq->value == 2347);
- rrq->fixed = 1;
-
- return 0;
-}
-
-static int orinoco_ioctl_setfrag(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *frq,
- char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- int err = -EINPROGRESS; /* Call commit handler */
- unsigned long flags;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- if (priv->has_mwo) {
- if (frq->disabled)
- priv->mwo_robust = 0;
- else {
- if (frq->fixed)
- printk(KERN_WARNING "%s: Fixed fragmentation "
- "is not supported on this firmware. "
- "Using MWO robust instead.\n",
- dev->name);
- priv->mwo_robust = 1;
- }
- } else {
- if (frq->disabled)
- priv->frag_thresh = 2346;
- else {
- if ((frq->value < 256) || (frq->value > 2346))
- err = -EINVAL;
- else
- /* must be even */
- priv->frag_thresh = frq->value & ~0x1;
- }
- }
-
- orinoco_unlock(priv, &flags);
-
- return err;
-}
-
-static int orinoco_ioctl_getfrag(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *frq,
- char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- hermes_t *hw = &priv->hw;
- int err;
- u16 val;
- unsigned long flags;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- if (priv->has_mwo) {
- err = hermes_read_wordrec(hw, USER_BAP,
- HERMES_RID_CNFMWOROBUST_AGERE,
- &val);
- if (err)
- val = 0;
-
- frq->value = val ? 2347 : 0;
- frq->disabled = !val;
- frq->fixed = 0;
- } else {
- err = hermes_read_wordrec(hw, USER_BAP,
- HERMES_RID_CNFFRAGMENTATIONTHRESHOLD,
- &val);
- if (err)
- val = 0;
-
- frq->value = val;
- frq->disabled = (val >= 2346);
- frq->fixed = 1;
- }
-
- orinoco_unlock(priv, &flags);
-
- return err;
-}
-
static int orinoco_ioctl_setrate(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *rrq,
@@ -1201,60 +1082,6 @@ static int orinoco_ioctl_set_mlme(struct net_device *dev,
return ret;
}
-static int orinoco_ioctl_getretry(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *rrq,
- char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- hermes_t *hw = &priv->hw;
- int err = 0;
- u16 short_limit, long_limit, lifetime;
- unsigned long flags;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_SHORTRETRYLIMIT,
- &short_limit);
- if (err)
- goto out;
-
- err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_LONGRETRYLIMIT,
- &long_limit);
- if (err)
- goto out;
-
- err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_MAXTRANSMITLIFETIME,
- &lifetime);
- if (err)
- goto out;
-
- rrq->disabled = 0; /* Can't be disabled */
-
- /* Note : by default, display the retry number */
- if ((rrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
- rrq->flags = IW_RETRY_LIFETIME;
- rrq->value = lifetime * 1000; /* ??? */
- } else {
- /* By default, display the min number */
- if ((rrq->flags & IW_RETRY_LONG)) {
- rrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
- rrq->value = long_limit;
- } else {
- rrq->flags = IW_RETRY_LIMIT;
- rrq->value = short_limit;
- if (short_limit != long_limit)
- rrq->flags |= IW_RETRY_SHORT;
- }
- }
-
- out:
- orinoco_unlock(priv, &flags);
-
- return err;
-}
-
static int orinoco_ioctl_reset(struct net_device *dev,
struct iw_request_info *info,
void *wrqu,
@@ -1446,8 +1273,8 @@ static int orinoco_ioctl_getrid(struct net_device *dev,
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
- err = hermes_read_ltv(hw, USER_BAP, rid, MAX_RID_LEN, &length,
- extra);
+ err = hw->ops->read_ltv(hw, USER_BAP, rid, MAX_RID_LEN, &length,
+ extra);
if (err)
goto out;
@@ -1506,46 +1333,44 @@ static const struct iw_priv_args orinoco_privtab[] = {
* Structures to export the Wireless Handlers
*/
-#define STD_IW_HANDLER(id, func) \
- [IW_IOCTL_IDX(id)] = (iw_handler) func
static const iw_handler orinoco_handler[] = {
- STD_IW_HANDLER(SIOCSIWCOMMIT, orinoco_ioctl_commit),
- STD_IW_HANDLER(SIOCGIWNAME, cfg80211_wext_giwname),
- STD_IW_HANDLER(SIOCSIWFREQ, orinoco_ioctl_setfreq),
- STD_IW_HANDLER(SIOCGIWFREQ, orinoco_ioctl_getfreq),
- STD_IW_HANDLER(SIOCSIWMODE, cfg80211_wext_siwmode),
- STD_IW_HANDLER(SIOCGIWMODE, cfg80211_wext_giwmode),
- STD_IW_HANDLER(SIOCSIWSENS, orinoco_ioctl_setsens),
- STD_IW_HANDLER(SIOCGIWSENS, orinoco_ioctl_getsens),
- STD_IW_HANDLER(SIOCGIWRANGE, cfg80211_wext_giwrange),
- STD_IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
- STD_IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
- STD_IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
- STD_IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
- STD_IW_HANDLER(SIOCSIWAP, orinoco_ioctl_setwap),
- STD_IW_HANDLER(SIOCGIWAP, orinoco_ioctl_getwap),
- STD_IW_HANDLER(SIOCSIWSCAN, cfg80211_wext_siwscan),
- STD_IW_HANDLER(SIOCGIWSCAN, cfg80211_wext_giwscan),
- STD_IW_HANDLER(SIOCSIWESSID, orinoco_ioctl_setessid),
- STD_IW_HANDLER(SIOCGIWESSID, orinoco_ioctl_getessid),
- STD_IW_HANDLER(SIOCSIWRATE, orinoco_ioctl_setrate),
- STD_IW_HANDLER(SIOCGIWRATE, orinoco_ioctl_getrate),
- STD_IW_HANDLER(SIOCSIWRTS, orinoco_ioctl_setrts),
- STD_IW_HANDLER(SIOCGIWRTS, orinoco_ioctl_getrts),
- STD_IW_HANDLER(SIOCSIWFRAG, orinoco_ioctl_setfrag),
- STD_IW_HANDLER(SIOCGIWFRAG, orinoco_ioctl_getfrag),
- STD_IW_HANDLER(SIOCGIWRETRY, orinoco_ioctl_getretry),
- STD_IW_HANDLER(SIOCSIWENCODE, orinoco_ioctl_setiwencode),
- STD_IW_HANDLER(SIOCGIWENCODE, orinoco_ioctl_getiwencode),
- STD_IW_HANDLER(SIOCSIWPOWER, orinoco_ioctl_setpower),
- STD_IW_HANDLER(SIOCGIWPOWER, orinoco_ioctl_getpower),
- STD_IW_HANDLER(SIOCSIWGENIE, orinoco_ioctl_set_genie),
- STD_IW_HANDLER(SIOCGIWGENIE, orinoco_ioctl_get_genie),
- STD_IW_HANDLER(SIOCSIWMLME, orinoco_ioctl_set_mlme),
- STD_IW_HANDLER(SIOCSIWAUTH, orinoco_ioctl_set_auth),
- STD_IW_HANDLER(SIOCGIWAUTH, orinoco_ioctl_get_auth),
- STD_IW_HANDLER(SIOCSIWENCODEEXT, orinoco_ioctl_set_encodeext),
- STD_IW_HANDLER(SIOCGIWENCODEEXT, orinoco_ioctl_get_encodeext),
+ IW_HANDLER(SIOCSIWCOMMIT, (iw_handler)orinoco_ioctl_commit),
+ IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
+ IW_HANDLER(SIOCSIWFREQ, (iw_handler)orinoco_ioctl_setfreq),
+ IW_HANDLER(SIOCGIWFREQ, (iw_handler)orinoco_ioctl_getfreq),
+ IW_HANDLER(SIOCSIWMODE, (iw_handler)cfg80211_wext_siwmode),
+ IW_HANDLER(SIOCGIWMODE, (iw_handler)cfg80211_wext_giwmode),
+ IW_HANDLER(SIOCSIWSENS, (iw_handler)orinoco_ioctl_setsens),
+ IW_HANDLER(SIOCGIWSENS, (iw_handler)orinoco_ioctl_getsens),
+ IW_HANDLER(SIOCGIWRANGE, (iw_handler)cfg80211_wext_giwrange),
+ IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
+ IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
+ IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
+ IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
+ IW_HANDLER(SIOCSIWAP, (iw_handler)orinoco_ioctl_setwap),
+ IW_HANDLER(SIOCGIWAP, (iw_handler)orinoco_ioctl_getwap),
+ IW_HANDLER(SIOCSIWSCAN, (iw_handler)cfg80211_wext_siwscan),
+ IW_HANDLER(SIOCGIWSCAN, (iw_handler)cfg80211_wext_giwscan),
+ IW_HANDLER(SIOCSIWESSID, (iw_handler)orinoco_ioctl_setessid),
+ IW_HANDLER(SIOCGIWESSID, (iw_handler)orinoco_ioctl_getessid),
+ IW_HANDLER(SIOCSIWRATE, (iw_handler)orinoco_ioctl_setrate),
+ IW_HANDLER(SIOCGIWRATE, (iw_handler)orinoco_ioctl_getrate),
+ IW_HANDLER(SIOCSIWRTS, (iw_handler)cfg80211_wext_siwrts),
+ IW_HANDLER(SIOCGIWRTS, (iw_handler)cfg80211_wext_giwrts),
+ IW_HANDLER(SIOCSIWFRAG, (iw_handler)cfg80211_wext_siwfrag),
+ IW_HANDLER(SIOCGIWFRAG, (iw_handler)cfg80211_wext_giwfrag),
+ IW_HANDLER(SIOCGIWRETRY, (iw_handler)cfg80211_wext_giwretry),
+ IW_HANDLER(SIOCSIWENCODE, (iw_handler)orinoco_ioctl_setiwencode),
+ IW_HANDLER(SIOCGIWENCODE, (iw_handler)orinoco_ioctl_getiwencode),
+ IW_HANDLER(SIOCSIWPOWER, (iw_handler)orinoco_ioctl_setpower),
+ IW_HANDLER(SIOCGIWPOWER, (iw_handler)orinoco_ioctl_getpower),
+ IW_HANDLER(SIOCSIWGENIE, orinoco_ioctl_set_genie),
+ IW_HANDLER(SIOCGIWGENIE, orinoco_ioctl_get_genie),
+ IW_HANDLER(SIOCSIWMLME, orinoco_ioctl_set_mlme),
+ IW_HANDLER(SIOCSIWAUTH, orinoco_ioctl_set_auth),
+ IW_HANDLER(SIOCGIWAUTH, orinoco_ioctl_get_auth),
+ IW_HANDLER(SIOCSIWENCODEEXT, orinoco_ioctl_set_encodeext),
+ IW_HANDLER(SIOCGIWENCODEEXT, orinoco_ioctl_get_encodeext),
};
@@ -1553,15 +1378,15 @@ static const iw_handler orinoco_handler[] = {
Added typecasting since we no longer use iwreq_data -- Moustafa
*/
static const iw_handler orinoco_private_handler[] = {
- [0] = (iw_handler) orinoco_ioctl_reset,
- [1] = (iw_handler) orinoco_ioctl_reset,
- [2] = (iw_handler) orinoco_ioctl_setport3,
- [3] = (iw_handler) orinoco_ioctl_getport3,
- [4] = (iw_handler) orinoco_ioctl_setpreamble,
- [5] = (iw_handler) orinoco_ioctl_getpreamble,
- [6] = (iw_handler) orinoco_ioctl_setibssport,
- [7] = (iw_handler) orinoco_ioctl_getibssport,
- [9] = (iw_handler) orinoco_ioctl_getrid,
+ [0] = (iw_handler)orinoco_ioctl_reset,
+ [1] = (iw_handler)orinoco_ioctl_reset,
+ [2] = (iw_handler)orinoco_ioctl_setport3,
+ [3] = (iw_handler)orinoco_ioctl_getport3,
+ [4] = (iw_handler)orinoco_ioctl_setpreamble,
+ [5] = (iw_handler)orinoco_ioctl_getpreamble,
+ [6] = (iw_handler)orinoco_ioctl_setibssport,
+ [7] = (iw_handler)orinoco_ioctl_getibssport,
+ [9] = (iw_handler)orinoco_ioctl_getrid,
};
const struct iw_handler_def orinoco_handler_def = {
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index a7cb9eb759a..c072f41747c 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -546,7 +546,7 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
IEEE80211_HW_BEACON_FILTER |
- IEEE80211_HW_NOISE_DBM;
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS;
dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index c24067f1a0c..07c4528f6e6 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -132,7 +132,7 @@ static int p54p_upload_firmware(struct ieee80211_hw *dev)
static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
int ring_index, struct p54p_desc *ring, u32 ring_limit,
- struct sk_buff **rx_buf)
+ struct sk_buff **rx_buf, u32 index)
{
struct p54p_priv *priv = dev->priv;
struct p54p_ring_control *ring_control = priv->ring_control;
@@ -140,7 +140,7 @@ static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
idx = le32_to_cpu(ring_control->host_idx[ring_index]);
limit = idx;
- limit -= le32_to_cpu(ring_control->device_idx[ring_index]);
+ limit -= index;
limit = ring_limit - limit;
i = idx % ring_limit;
@@ -232,7 +232,7 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
i %= ring_limit;
}
- p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf);
+ p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf, *index);
}
static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
@@ -445,10 +445,10 @@ static int p54p_open(struct ieee80211_hw *dev)
priv->rx_idx_mgmt = priv->tx_idx_mgmt = 0;
p54p_refill_rx_ring(dev, 0, priv->ring_control->rx_data,
- ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data);
+ ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data, 0);
p54p_refill_rx_ring(dev, 2, priv->ring_control->rx_mgmt,
- ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt);
+ ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt, 0);
P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma));
P54P_READ(ring_control_base);
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 743a6c68b29..d5b197b4d5b 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -875,7 +875,6 @@ static void p54u_stop(struct ieee80211_hw *dev)
the hardware is still usable next time we want to start it.
until then, we just stop listening to the hardware.. */
p54u_free_urbs(dev);
- return;
}
static int __devinit p54u_probe(struct usb_interface *intf,
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 66057999a93..4e6891099d4 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -38,7 +38,7 @@ static void p54_dump_tx_queue(struct p54_common *priv)
u32 largest_hole = 0, free;
spin_lock_irqsave(&priv->tx_queue.lock, flags);
- printk(KERN_DEBUG "%s: / --- tx queue dump (%d entries) --- \n",
+ printk(KERN_DEBUG "%s: / --- tx queue dump (%d entries) ---\n",
wiphy_name(priv->hw->wiphy), skb_queue_len(&priv->tx_queue));
prev_addr = priv->rx_start;
@@ -350,7 +350,6 @@ static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb)
rx_status->flag |= RX_FLAG_MMIC_ERROR;
rx_status->signal = p54_rssi_to_dbm(priv, hdr->rssi);
- rx_status->noise = priv->noise;
if (hdr->rate & 0x10)
rx_status->flag |= RX_FLAG_SHORTPRE;
if (priv->hw->conf.channel->band == IEEE80211_BAND_5GHZ)
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index a45818ebfdf..8d1190c0f06 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -210,8 +210,6 @@ prism54_update_stats(struct work_struct *work)
priv->local_iwstatistics.discard.retries = r.u;
mutex_unlock(&priv->stats_lock);
-
- return;
}
struct iw_statistics *
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 689d59a13d5..2c8cc954d1b 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -228,14 +228,14 @@ islpci_interrupt(int irq, void *config)
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_FUNCTION_CALLS,
- "IRQ: Identification register 0x%p 0x%x \n", device, reg);
+ "IRQ: Identification register 0x%p 0x%x\n", device, reg);
#endif
/* check for each bit in the register separately */
if (reg & ISL38XX_INT_IDENT_UPDATE) {
#if VERBOSE > SHOW_ERROR_MESSAGES
/* Queue has been updated */
- DEBUG(SHOW_TRACING, "IRQ: Update flag \n");
+ DEBUG(SHOW_TRACING, "IRQ: Update flag\n");
DEBUG(SHOW_QUEUE_INDEXES,
"CB drv Qs: [%i][%i][%i][%i][%i][%i]\n",
@@ -301,7 +301,7 @@ islpci_interrupt(int irq, void *config)
ISL38XX_CB_RX_DATA_LQ) != 0) {
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_TRACING,
- "Received frame in Data Low Queue \n");
+ "Received frame in Data Low Queue\n");
#endif
islpci_eth_receive(priv);
}
@@ -326,7 +326,7 @@ islpci_interrupt(int irq, void *config)
/* Device has been initialized */
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_TRACING,
- "IRQ: Init flag, device initialized \n");
+ "IRQ: Init flag, device initialized\n");
#endif
wake_up(&priv->reset_done);
}
@@ -334,7 +334,7 @@ islpci_interrupt(int irq, void *config)
if (reg & ISL38XX_INT_IDENT_SLEEP) {
/* Device intends to move to powersave state */
#if VERBOSE > SHOW_ERROR_MESSAGES
- DEBUG(SHOW_TRACING, "IRQ: Sleep flag \n");
+ DEBUG(SHOW_TRACING, "IRQ: Sleep flag\n");
#endif
isl38xx_handle_sleep_request(priv->control_block,
&powerstate,
@@ -344,7 +344,7 @@ islpci_interrupt(int irq, void *config)
if (reg & ISL38XX_INT_IDENT_WAKEUP) {
/* Device has been woken up to active state */
#if VERBOSE > SHOW_ERROR_MESSAGES
- DEBUG(SHOW_TRACING, "IRQ: Wakeup flag \n");
+ DEBUG(SHOW_TRACING, "IRQ: Wakeup flag\n");
#endif
isl38xx_handle_wakeup(priv->control_block,
@@ -635,7 +635,7 @@ islpci_alloc_memory(islpci_private *priv)
ioremap(pci_resource_start(priv->pdev, 0),
ISL38XX_PCI_MEM_SIZE))) {
/* error in remapping the PCI device memory address range */
- printk(KERN_ERR "PCI memory remapping failed \n");
+ printk(KERN_ERR "PCI memory remapping failed\n");
return -1;
}
@@ -902,7 +902,7 @@ islpci_setup(struct pci_dev *pdev)
if (register_netdev(ndev)) {
DEBUG(SHOW_ERROR_MESSAGES,
- "ERROR: register_netdev() failed \n");
+ "ERROR: register_netdev() failed\n");
goto do_islpci_free_memory;
}
@@ -946,7 +946,7 @@ islpci_set_state(islpci_private *priv, islpci_state_t new_state)
if (!priv->state_off)
priv->state = new_state;
break;
- };
+ }
#if 0
printk(KERN_DEBUG "%s: state transition %d -> %d (off#%d)\n",
priv->ndev->name, old_state, new_state, priv->state_off);
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index ac99eaaeabc..2fc52bc2d7d 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -90,7 +90,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
u32 curr_frag;
#if VERBOSE > SHOW_ERROR_MESSAGES
- DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_transmit \n");
+ DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_transmit\n");
#endif
/* lock the driver code */
@@ -141,7 +141,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
}
#if VERBOSE > SHOW_ERROR_MESSAGES
- DEBUG(SHOW_TRACING, "memmove %p %p %i \n", skb->data,
+ DEBUG(SHOW_TRACING, "memmove %p %p %i\n", skb->data,
src, skb->len);
#endif
} else {
@@ -224,8 +224,6 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
priv->data_low_tx_full = 1;
}
- /* set the transmission time */
- ndev->trans_start = jiffies;
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += skb->len;
@@ -320,7 +318,7 @@ islpci_eth_receive(islpci_private *priv)
int discard = 0;
#if VERBOSE > SHOW_ERROR_MESSAGES
- DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_receive \n");
+ DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_receive\n");
#endif
/* the device has written an Ethernet frame in the data area
@@ -432,7 +430,7 @@ islpci_eth_receive(islpci_private *priv)
skb = dev_alloc_skb(MAX_FRAGMENT_SIZE_RX + 2);
if (unlikely(skb == NULL)) {
/* error allocating an sk_buff structure elements */
- DEBUG(SHOW_ERROR_MESSAGES, "Error allocating skb \n");
+ DEBUG(SHOW_ERROR_MESSAGES, "Error allocating skb\n");
break;
}
skb_reserve(skb, (4 - (long) skb->data) & 0x03);
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c
index adb289723a9..a5224f6160e 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.c
+++ b/drivers/net/wireless/prism54/islpci_mgt.c
@@ -114,7 +114,7 @@ islpci_mgmt_rx_fill(struct net_device *ndev)
u32 curr = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ]);
#if VERBOSE > SHOW_ERROR_MESSAGES
- DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgmt_rx_fill \n");
+ DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgmt_rx_fill\n");
#endif
while (curr - priv->index_mgmt_rx < ISL38XX_CB_MGMT_QSIZE) {
@@ -212,7 +212,7 @@ islpci_mgt_transmit(struct net_device *ndev, int operation, unsigned long oid,
{
pimfor_header_t *h = buf.mem;
DEBUG(SHOW_PIMFOR_FRAMES,
- "PIMFOR: op %i, oid 0x%08lx, device %i, flags 0x%x length 0x%x \n",
+ "PIMFOR: op %i, oid 0x%08lx, device %i, flags 0x%x length 0x%x\n",
h->operation, oid, h->device_id, h->flags, length);
/* display the buffer contents for debugging */
@@ -280,7 +280,7 @@ islpci_mgt_receive(struct net_device *ndev)
u32 curr_frag;
#if VERBOSE > SHOW_ERROR_MESSAGES
- DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_receive \n");
+ DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_receive\n");
#endif
/* Only once per interrupt, determine fragment range to
@@ -339,7 +339,7 @@ islpci_mgt_receive(struct net_device *ndev)
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_PIMFOR_FRAMES,
- "PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x \n",
+ "PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x\n",
header->operation, header->oid, header->device_id,
header->flags, header->length);
diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c
index d66933d70fb..9b796cae4af 100644
--- a/drivers/net/wireless/prism54/oid_mgt.c
+++ b/drivers/net/wireless/prism54/oid_mgt.c
@@ -820,7 +820,7 @@ mgt_response_to_str(enum oid_num_t n, union oid_res_t *r, char *str)
k = snprintf(str, PRIV_STR_SIZE, "nr=%u\n", list->nr);
for (i = 0; i < list->nr; i++)
k += snprintf(str + k, PRIV_STR_SIZE - k,
- "bss[%u] : \nage=%u\nchannel=%u\n"
+ "bss[%u] :\nage=%u\nchannel=%u\n"
"capinfo=0x%X\nrates=0x%X\n"
"basic_rates=0x%X\n",
i, list->bsslist[i].age,
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 11865ea2187..abff8934db1 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -51,7 +51,6 @@
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
-#include <pcmcia/mem_op.h>
#include <linux/wireless.h>
#include <net/iw_handler.h>
@@ -321,10 +320,6 @@ static int ray_probe(struct pcmcia_device *p_dev)
p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
p_dev->io.IOAddrLines = 5;
- /* Interrupt setup. For PCMCIA, driver takes what's given */
- p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- p_dev->irq.Handler = &ray_interrupt;
-
/* General socket configuration */
p_dev->conf.Attributes = CONF_ENABLE_IRQ;
p_dev->conf.IntType = INT_MEMORY_AND_IO;
@@ -383,8 +378,7 @@ static void ray_detach(struct pcmcia_device *link)
del_timer(&local->timer);
if (link->priv) {
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
free_netdev(dev);
}
dev_dbg(&link->dev, "ray_cs ray_detach ending\n");
@@ -417,10 +411,10 @@ static int ray_config(struct pcmcia_device *link)
/* Now allocate an interrupt line. Note that this does not
actually assign a handler to the interrupt.
*/
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_irq(link, ray_interrupt);
if (ret)
goto failed;
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
/* This actually configures the PCMCIA socket -- setting up
the I/O windows and the interrupt mapping.
@@ -493,9 +487,6 @@ static int ray_config(struct pcmcia_device *link)
return i;
}
- strcpy(local->node.dev_name, dev->name);
- link->dev_node = &local->node;
-
printk(KERN_INFO "%s: RayLink, irq %d, hw_addr %pM\n",
dev->name, dev->irq, dev->dev_addr);
@@ -555,7 +546,7 @@ static int ray_init(struct net_device *dev)
local->fw_ver = local->startup_res.firmware_version[0];
local->fw_bld = local->startup_res.firmware_version[1];
local->fw_var = local->startup_res.firmware_version[2];
- dev_dbg(&link->dev, "ray_init firmware version %d.%d \n", local->fw_ver,
+ dev_dbg(&link->dev, "ray_init firmware version %d.%d\n", local->fw_ver,
local->fw_bld);
local->tib_length = 0x20;
@@ -735,8 +726,6 @@ static void verify_dl_startup(u_long data)
start_net((u_long) local);
else
join_net((u_long) local);
-
- return;
} /* end verify_dl_startup */
/*===========================================================================*/
@@ -764,7 +753,6 @@ static void start_net(u_long data)
return;
}
local->card_status = CARD_DOING_ACQ;
- return;
} /* end start_net */
/*===========================================================================*/
@@ -795,7 +783,6 @@ static void join_net(u_long data)
return;
}
local->card_status = CARD_DOING_ACQ;
- return;
}
/*============================================================================
@@ -941,7 +928,6 @@ static netdev_tx_t ray_dev_start_xmit(struct sk_buff *skb,
case XMIT_MSG_BAD:
case XMIT_OK:
default:
- dev->trans_start = jiffies;
dev_kfree_skb(skb);
}
@@ -1112,10 +1098,10 @@ static const struct ethtool_ops netdev_ethtool_ops = {
/*
* Wireless Handler : get protocol name
*/
-static int ray_get_name(struct net_device *dev,
- struct iw_request_info *info, char *cwrq, char *extra)
+static int ray_get_name(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
- strcpy(cwrq, "IEEE 802.11-FH");
+ strcpy(wrqu->name, "IEEE 802.11-FH");
return 0;
}
@@ -1123,9 +1109,8 @@ static int ray_get_name(struct net_device *dev,
/*
* Wireless Handler : set frequency
*/
-static int ray_set_freq(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_freq *fwrq, char *extra)
+static int ray_set_freq(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
int err = -EINPROGRESS; /* Call commit handler */
@@ -1135,10 +1120,10 @@ static int ray_set_freq(struct net_device *dev,
return -EBUSY;
/* Setting by channel number */
- if ((fwrq->m > USA_HOP_MOD) || (fwrq->e > 0))
+ if ((wrqu->freq.m > USA_HOP_MOD) || (wrqu->freq.e > 0))
err = -EOPNOTSUPP;
else
- local->sparm.b5.a_hop_pattern = fwrq->m;
+ local->sparm.b5.a_hop_pattern = wrqu->freq.m;
return err;
}
@@ -1147,14 +1132,13 @@ static int ray_set_freq(struct net_device *dev,
/*
* Wireless Handler : get frequency
*/
-static int ray_get_freq(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_freq *fwrq, char *extra)
+static int ray_get_freq(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
- fwrq->m = local->sparm.b5.a_hop_pattern;
- fwrq->e = 0;
+ wrqu->freq.m = local->sparm.b5.a_hop_pattern;
+ wrqu->freq.e = 0;
return 0;
}
@@ -1162,9 +1146,8 @@ static int ray_get_freq(struct net_device *dev,
/*
* Wireless Handler : set ESSID
*/
-static int ray_set_essid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
+static int ray_set_essid(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
@@ -1173,19 +1156,17 @@ static int ray_set_essid(struct net_device *dev,
return -EBUSY;
/* Check if we asked for `any' */
- if (dwrq->flags == 0) {
+ if (wrqu->essid.flags == 0)
/* Corey : can you do that ? */
return -EOPNOTSUPP;
- } else {
- /* Check the size of the string */
- if (dwrq->length > IW_ESSID_MAX_SIZE) {
- return -E2BIG;
- }
- /* Set the ESSID in the card */
- memset(local->sparm.b5.a_current_ess_id, 0, IW_ESSID_MAX_SIZE);
- memcpy(local->sparm.b5.a_current_ess_id, extra, dwrq->length);
- }
+ /* Check the size of the string */
+ if (wrqu->essid.length > IW_ESSID_MAX_SIZE)
+ return -E2BIG;
+
+ /* Set the ESSID in the card */
+ memset(local->sparm.b5.a_current_ess_id, 0, IW_ESSID_MAX_SIZE);
+ memcpy(local->sparm.b5.a_current_ess_id, extra, wrqu->essid.length);
return -EINPROGRESS; /* Call commit handler */
}
@@ -1194,9 +1175,8 @@ static int ray_set_essid(struct net_device *dev,
/*
* Wireless Handler : get ESSID
*/
-static int ray_get_essid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
+static int ray_get_essid(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
@@ -1204,8 +1184,8 @@ static int ray_get_essid(struct net_device *dev,
memcpy(extra, local->sparm.b5.a_current_ess_id, IW_ESSID_MAX_SIZE);
/* Push it out ! */
- dwrq->length = strlen(extra);
- dwrq->flags = 1; /* active */
+ wrqu->essid.length = strlen(extra);
+ wrqu->essid.flags = 1; /* active */
return 0;
}
@@ -1214,14 +1194,13 @@ static int ray_get_essid(struct net_device *dev,
/*
* Wireless Handler : get AP address
*/
-static int ray_get_wap(struct net_device *dev,
- struct iw_request_info *info,
- struct sockaddr *awrq, char *extra)
+static int ray_get_wap(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
- memcpy(awrq->sa_data, local->bss_id, ETH_ALEN);
- awrq->sa_family = ARPHRD_ETHER;
+ memcpy(wrqu->ap_addr.sa_data, local->bss_id, ETH_ALEN);
+ wrqu->ap_addr.sa_family = ARPHRD_ETHER;
return 0;
}
@@ -1230,9 +1209,8 @@ static int ray_get_wap(struct net_device *dev,
/*
* Wireless Handler : set Bit-Rate
*/
-static int ray_set_rate(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
+static int ray_set_rate(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
@@ -1241,15 +1219,15 @@ static int ray_set_rate(struct net_device *dev,
return -EBUSY;
/* Check if rate is in range */
- if ((vwrq->value != 1000000) && (vwrq->value != 2000000))
+ if ((wrqu->bitrate.value != 1000000) && (wrqu->bitrate.value != 2000000))
return -EINVAL;
/* Hack for 1.5 Mb/s instead of 2 Mb/s */
if ((local->fw_ver == 0x55) && /* Please check */
- (vwrq->value == 2000000))
+ (wrqu->bitrate.value == 2000000))
local->net_default_tx_rate = 3;
else
- local->net_default_tx_rate = vwrq->value / 500000;
+ local->net_default_tx_rate = wrqu->bitrate.value / 500000;
return 0;
}
@@ -1258,17 +1236,16 @@ static int ray_set_rate(struct net_device *dev,
/*
* Wireless Handler : get Bit-Rate
*/
-static int ray_get_rate(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
+static int ray_get_rate(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
if (local->net_default_tx_rate == 3)
- vwrq->value = 2000000; /* Hum... */
+ wrqu->bitrate.value = 2000000; /* Hum... */
else
- vwrq->value = local->net_default_tx_rate * 500000;
- vwrq->fixed = 0; /* We are in auto mode */
+ wrqu->bitrate.value = local->net_default_tx_rate * 500000;
+ wrqu->bitrate.fixed = 0; /* We are in auto mode */
return 0;
}
@@ -1277,19 +1254,18 @@ static int ray_get_rate(struct net_device *dev,
/*
* Wireless Handler : set RTS threshold
*/
-static int ray_set_rts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
+static int ray_set_rts(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
- int rthr = vwrq->value;
+ int rthr = wrqu->rts.value;
/* Reject if card is already initialised */
if (local->card_status != CARD_AWAITING_PARAM)
return -EBUSY;
/* if(wrq->u.rts.fixed == 0) we should complain */
- if (vwrq->disabled)
+ if (wrqu->rts.disabled)
rthr = 32767;
else {
if ((rthr < 0) || (rthr > 2347)) /* What's the max packet size ??? */
@@ -1305,16 +1281,15 @@ static int ray_set_rts(struct net_device *dev,
/*
* Wireless Handler : get RTS threshold
*/
-static int ray_get_rts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
+static int ray_get_rts(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
- vwrq->value = (local->sparm.b5.a_rts_threshold[0] << 8)
+ wrqu->rts.value = (local->sparm.b5.a_rts_threshold[0] << 8)
+ local->sparm.b5.a_rts_threshold[1];
- vwrq->disabled = (vwrq->value == 32767);
- vwrq->fixed = 1;
+ wrqu->rts.disabled = (wrqu->rts.value == 32767);
+ wrqu->rts.fixed = 1;
return 0;
}
@@ -1323,19 +1298,18 @@ static int ray_get_rts(struct net_device *dev,
/*
* Wireless Handler : set Fragmentation threshold
*/
-static int ray_set_frag(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
+static int ray_set_frag(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
- int fthr = vwrq->value;
+ int fthr = wrqu->frag.value;
/* Reject if card is already initialised */
if (local->card_status != CARD_AWAITING_PARAM)
return -EBUSY;
/* if(wrq->u.frag.fixed == 0) should complain */
- if (vwrq->disabled)
+ if (wrqu->frag.disabled)
fthr = 32767;
else {
if ((fthr < 256) || (fthr > 2347)) /* To check out ! */
@@ -1351,16 +1325,15 @@ static int ray_set_frag(struct net_device *dev,
/*
* Wireless Handler : get Fragmentation threshold
*/
-static int ray_get_frag(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
+static int ray_get_frag(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
- vwrq->value = (local->sparm.b5.a_frag_threshold[0] << 8)
+ wrqu->frag.value = (local->sparm.b5.a_frag_threshold[0] << 8)
+ local->sparm.b5.a_frag_threshold[1];
- vwrq->disabled = (vwrq->value == 32767);
- vwrq->fixed = 1;
+ wrqu->frag.disabled = (wrqu->frag.value == 32767);
+ wrqu->frag.fixed = 1;
return 0;
}
@@ -1369,8 +1342,8 @@ static int ray_get_frag(struct net_device *dev,
/*
* Wireless Handler : set Mode of Operation
*/
-static int ray_set_mode(struct net_device *dev,
- struct iw_request_info *info, __u32 *uwrq, char *extra)
+static int ray_set_mode(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
int err = -EINPROGRESS; /* Call commit handler */
@@ -1380,7 +1353,7 @@ static int ray_set_mode(struct net_device *dev,
if (local->card_status != CARD_AWAITING_PARAM)
return -EBUSY;
- switch (*uwrq) {
+ switch (wrqu->mode) {
case IW_MODE_ADHOC:
card_mode = 0;
/* Fall through */
@@ -1398,15 +1371,15 @@ static int ray_set_mode(struct net_device *dev,
/*
* Wireless Handler : get Mode of Operation
*/
-static int ray_get_mode(struct net_device *dev,
- struct iw_request_info *info, __u32 *uwrq, char *extra)
+static int ray_get_mode(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
if (local->sparm.b5.a_network_type)
- *uwrq = IW_MODE_INFRA;
+ wrqu->mode = IW_MODE_INFRA;
else
- *uwrq = IW_MODE_ADHOC;
+ wrqu->mode = IW_MODE_ADHOC;
return 0;
}
@@ -1415,16 +1388,15 @@ static int ray_get_mode(struct net_device *dev,
/*
* Wireless Handler : get range info
*/
-static int ray_get_range(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
+static int ray_get_range(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct iw_range *range = (struct iw_range *)extra;
- memset((char *)range, 0, sizeof(struct iw_range));
+ memset(range, 0, sizeof(struct iw_range));
/* Set the length (very important for backward compatibility) */
- dwrq->length = sizeof(struct iw_range);
+ wrqu->data.length = sizeof(struct iw_range);
/* Set the Wireless Extension versions */
range->we_version_compiled = WIRELESS_EXT;
@@ -1447,8 +1419,7 @@ static int ray_get_range(struct net_device *dev,
/*
* Wireless Private Handler : set framing mode
*/
-static int ray_set_framing(struct net_device *dev,
- struct iw_request_info *info,
+static int ray_set_framing(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
translate = *(extra); /* Set framing mode */
@@ -1460,8 +1431,7 @@ static int ray_set_framing(struct net_device *dev,
/*
* Wireless Private Handler : get framing mode
*/
-static int ray_get_framing(struct net_device *dev,
- struct iw_request_info *info,
+static int ray_get_framing(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
*(extra) = translate;
@@ -1473,8 +1443,7 @@ static int ray_get_framing(struct net_device *dev,
/*
* Wireless Private Handler : get country
*/
-static int ray_get_country(struct net_device *dev,
- struct iw_request_info *info,
+static int ray_get_country(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
*(extra) = country;
@@ -1486,10 +1455,9 @@ static int ray_get_country(struct net_device *dev,
/*
* Commit handler : called after a bunch of SET operations
*/
-static int ray_commit(struct net_device *dev, struct iw_request_info *info, /* NULL */
- void *zwrq, /* NULL */
- char *extra)
-{ /* NULL */
+static int ray_commit(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
return 0;
}
@@ -1530,28 +1498,28 @@ static iw_stats *ray_get_wireless_stats(struct net_device *dev)
*/
static const iw_handler ray_handler[] = {
- [SIOCSIWCOMMIT - SIOCIWFIRST] = (iw_handler) ray_commit,
- [SIOCGIWNAME - SIOCIWFIRST] = (iw_handler) ray_get_name,
- [SIOCSIWFREQ - SIOCIWFIRST] = (iw_handler) ray_set_freq,
- [SIOCGIWFREQ - SIOCIWFIRST] = (iw_handler) ray_get_freq,
- [SIOCSIWMODE - SIOCIWFIRST] = (iw_handler) ray_set_mode,
- [SIOCGIWMODE - SIOCIWFIRST] = (iw_handler) ray_get_mode,
- [SIOCGIWRANGE - SIOCIWFIRST] = (iw_handler) ray_get_range,
+ IW_HANDLER(SIOCSIWCOMMIT, ray_commit),
+ IW_HANDLER(SIOCGIWNAME, ray_get_name),
+ IW_HANDLER(SIOCSIWFREQ, ray_set_freq),
+ IW_HANDLER(SIOCGIWFREQ, ray_get_freq),
+ IW_HANDLER(SIOCSIWMODE, ray_set_mode),
+ IW_HANDLER(SIOCGIWMODE, ray_get_mode),
+ IW_HANDLER(SIOCGIWRANGE, ray_get_range),
#ifdef WIRELESS_SPY
- [SIOCSIWSPY - SIOCIWFIRST] = (iw_handler) iw_handler_set_spy,
- [SIOCGIWSPY - SIOCIWFIRST] = (iw_handler) iw_handler_get_spy,
- [SIOCSIWTHRSPY - SIOCIWFIRST] = (iw_handler) iw_handler_set_thrspy,
- [SIOCGIWTHRSPY - SIOCIWFIRST] = (iw_handler) iw_handler_get_thrspy,
+ IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
+ IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
+ IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
+ IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
#endif /* WIRELESS_SPY */
- [SIOCGIWAP - SIOCIWFIRST] = (iw_handler) ray_get_wap,
- [SIOCSIWESSID - SIOCIWFIRST] = (iw_handler) ray_set_essid,
- [SIOCGIWESSID - SIOCIWFIRST] = (iw_handler) ray_get_essid,
- [SIOCSIWRATE - SIOCIWFIRST] = (iw_handler) ray_set_rate,
- [SIOCGIWRATE - SIOCIWFIRST] = (iw_handler) ray_get_rate,
- [SIOCSIWRTS - SIOCIWFIRST] = (iw_handler) ray_set_rts,
- [SIOCGIWRTS - SIOCIWFIRST] = (iw_handler) ray_get_rts,
- [SIOCSIWFRAG - SIOCIWFIRST] = (iw_handler) ray_set_frag,
- [SIOCGIWFRAG - SIOCIWFIRST] = (iw_handler) ray_get_frag,
+ IW_HANDLER(SIOCGIWAP, ray_get_wap),
+ IW_HANDLER(SIOCSIWESSID, ray_set_essid),
+ IW_HANDLER(SIOCGIWESSID, ray_get_essid),
+ IW_HANDLER(SIOCSIWRATE, ray_set_rate),
+ IW_HANDLER(SIOCGIWRATE, ray_get_rate),
+ IW_HANDLER(SIOCSIWRTS, ray_set_rts),
+ IW_HANDLER(SIOCGIWRTS, ray_get_rts),
+ IW_HANDLER(SIOCSIWFRAG, ray_set_frag),
+ IW_HANDLER(SIOCGIWFRAG, ray_get_frag),
};
#define SIOCSIPFRAMING SIOCIWFIRSTPRIV /* Set framing mode */
@@ -1559,9 +1527,9 @@ static const iw_handler ray_handler[] = {
#define SIOCGIPCOUNTRY SIOCIWFIRSTPRIV + 3 /* Get country code */
static const iw_handler ray_private_handler[] = {
- [0] = (iw_handler) ray_set_framing,
- [1] = (iw_handler) ray_get_framing,
- [3] = (iw_handler) ray_get_country,
+ [0] = ray_set_framing,
+ [1] = ray_get_framing,
+ [3] = ray_get_country,
};
static const struct iw_priv_args ray_private_args[] = {
@@ -1645,7 +1613,6 @@ static int ray_dev_close(struct net_device *dev)
static void ray_reset(struct net_device *dev)
{
pr_debug("ray_reset entered\n");
- return;
}
/*===========================================================================*/
@@ -1892,17 +1859,17 @@ static void ray_update_multi_list(struct net_device *dev, int all)
writeb(0xff, &pccs->var);
local->num_multi = 0xff;
} else {
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int i = 0;
/* Copy the kernel's list of MC addresses to card */
- netdev_for_each_mc_addr(dmi, dev) {
- memcpy_toio(p, dmi->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy_toio(p, ha->addr, ETH_ALEN);
dev_dbg(&link->dev,
"ray_update_multi add addr %02x%02x%02x%02x%02x%02x\n",
- dmi->dmi_addr[0], dmi->dmi_addr[1],
- dmi->dmi_addr[2], dmi->dmi_addr[3],
- dmi->dmi_addr[4], dmi->dmi_addr[5]);
+ ha->addr[0], ha->addr[1],
+ ha->addr[2], ha->addr[3],
+ ha->addr[4], ha->addr[5]);
p += ETH_ALEN;
i++;
}
@@ -2251,7 +2218,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs,
(dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN +
FCS_LEN)) {
pr_debug(
- "ray_cs invalid packet length %d received \n",
+ "ray_cs invalid packet length %d received\n",
rx_len);
return;
}
@@ -2262,7 +2229,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs,
(dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN +
FCS_LEN)) {
pr_debug(
- "ray_cs invalid packet length %d received \n",
+ "ray_cs invalid packet length %d received\n",
rx_len);
return;
}
@@ -2770,11 +2737,11 @@ static int ray_cs_proc_show(struct seq_file *m, void *v)
seq_printf(m, "Hop dwell = %d Kus\n",
pfh->dwell_time[0] +
256 * pfh->dwell_time[1]);
- seq_printf(m, "Hop set = %d \n",
+ seq_printf(m, "Hop set = %d\n",
pfh->hop_set);
- seq_printf(m, "Hop pattern = %d \n",
+ seq_printf(m, "Hop pattern = %d\n",
pfh->hop_pattern);
- seq_printf(m, "Hop index = %d \n",
+ seq_printf(m, "Hop index = %d\n",
pfh->hop_index);
p += p[1] + 2;
} else {
diff --git a/drivers/net/wireless/ray_cs.h b/drivers/net/wireless/ray_cs.h
index 1e23b7f4cca..9f01ddb1974 100644
--- a/drivers/net/wireless/ray_cs.h
+++ b/drivers/net/wireless/ray_cs.h
@@ -25,7 +25,6 @@ struct beacon_rx {
typedef struct ray_dev_t {
int card_status;
int authentication_state;
- dev_node_t node;
window_handle_t amem_handle; /* handle to window for attribute memory */
window_handle_t rmem_handle; /* handle to window for rx buffer on card */
void __iomem *sram; /* pointer to beginning of shared RAM */
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 1de5b22d3ef..4bd61ee627c 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -118,6 +118,7 @@ MODULE_PARM_DESC(workaround_interval,
#define OID_802_11_ADD_KEY cpu_to_le32(0x0d01011d)
#define OID_802_11_REMOVE_KEY cpu_to_le32(0x0d01011e)
#define OID_802_11_ASSOCIATION_INFORMATION cpu_to_le32(0x0d01011f)
+#define OID_802_11_CAPABILITY cpu_to_le32(0x0d010122)
#define OID_802_11_PMKID cpu_to_le32(0x0d010123)
#define OID_802_11_NETWORK_TYPES_SUPPORTED cpu_to_le32(0x0d010203)
#define OID_802_11_NETWORK_TYPE_IN_USE cpu_to_le32(0x0d010204)
@@ -359,6 +360,30 @@ struct ndis_80211_assoc_info {
__le32 offset_resp_ies;
} __attribute__((packed));
+struct ndis_80211_auth_encr_pair {
+ __le32 auth_mode;
+ __le32 encr_mode;
+} __attribute__((packed));
+
+struct ndis_80211_capability {
+ __le32 length;
+ __le32 version;
+ __le32 num_pmkids;
+ __le32 num_auth_encr_pair;
+ struct ndis_80211_auth_encr_pair auth_encr_pair[0];
+} __attribute__((packed));
+
+struct ndis_80211_bssid_info {
+ u8 bssid[6];
+ u8 pmkid[16];
+};
+
+struct ndis_80211_pmkid {
+ __le32 length;
+ __le32 bssid_info_count;
+ struct ndis_80211_bssid_info bssid_info[0];
+};
+
/*
* private data
*/
@@ -477,13 +502,7 @@ struct rndis_wlan_private {
/* encryption stuff */
int encr_tx_key_index;
struct rndis_wlan_encr_key encr_keys[4];
- enum nl80211_auth_type wpa_auth_type;
int wpa_version;
- int wpa_keymgmt;
- int wpa_ie_len;
- u8 *wpa_ie;
- int wpa_cipher_pair;
- int wpa_cipher_group;
u8 command_buffer[COMMAND_BUFFER_SIZE];
};
@@ -516,7 +535,7 @@ static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev,
static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev);
-static int rndis_set_channel(struct wiphy *wiphy,
+static int rndis_set_channel(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan, enum nl80211_channel_type channel_type);
static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev,
@@ -535,6 +554,14 @@ static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev,
static int rndis_dump_station(struct wiphy *wiphy, struct net_device *dev,
int idx, u8 *mac, struct station_info *sinfo);
+static int rndis_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa);
+
+static int rndis_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa);
+
+static int rndis_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev);
+
static struct cfg80211_ops rndis_config_ops = {
.change_virtual_intf = rndis_change_virtual_intf,
.scan = rndis_scan,
@@ -551,6 +578,9 @@ static struct cfg80211_ops rndis_config_ops = {
.set_default_key = rndis_set_default_key,
.get_station = rndis_get_station,
.dump_station = rndis_dump_station,
+ .set_pmksa = rndis_set_pmksa,
+ .del_pmksa = rndis_del_pmksa,
+ .flush_pmksa = rndis_flush_pmksa,
};
static void *rndis_wiphy_privid = &rndis_wiphy_privid;
@@ -705,6 +735,7 @@ static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
struct rndis_query_c *get_c;
} u;
int ret, buflen;
+ int resplen, respoffs, copylen;
buflen = *len + sizeof(*u.get);
if (buflen < CONTROL_BUFFER_SIZE)
@@ -734,11 +765,34 @@ static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
le32_to_cpu(u.get_c->status));
if (ret == 0) {
- memcpy(data, u.buf + le32_to_cpu(u.get_c->offset) + 8, *len);
+ resplen = le32_to_cpu(u.get_c->len);
+ respoffs = le32_to_cpu(u.get_c->offset) + 8;
+
+ if (respoffs > buflen) {
+ /* Device returned data offset outside buffer, error. */
+ netdev_dbg(dev->net, "%s(%s): received invalid "
+ "data offset: %d > %d\n", __func__,
+ oid_to_string(oid), respoffs, buflen);
+
+ ret = -EINVAL;
+ goto exit_unlock;
+ }
+
+ if ((resplen + respoffs) > buflen) {
+ /* Device would have returned more data if buffer would
+ * have been big enough. Copy just the bits that we got.
+ */
+ copylen = buflen - respoffs;
+ } else {
+ copylen = resplen;
+ }
+
+ if (copylen > *len)
+ copylen = *len;
- ret = le32_to_cpu(u.get_c->len);
- if (ret > *len)
- *len = ret;
+ memcpy(data, u.buf + respoffs, copylen);
+
+ *len = resplen;
ret = rndis_error_status(u.get_c->status);
if (ret < 0)
@@ -747,6 +801,7 @@ static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
le32_to_cpu(u.get_c->status), ret);
}
+exit_unlock:
mutex_unlock(&priv->command_lock);
if (u.buf != priv->command_buffer)
@@ -1092,8 +1147,6 @@ static int set_auth_mode(struct usbnet *usbdev, u32 wpa_version,
}
priv->wpa_version = wpa_version;
- priv->wpa_auth_type = auth_type;
- priv->wpa_keymgmt = keymgmt;
return 0;
}
@@ -1118,7 +1171,6 @@ static int set_priv_filter(struct usbnet *usbdev)
static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise)
{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
__le32 tmp;
int encr_mode, ret;
@@ -1147,8 +1199,6 @@ static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise)
return ret;
}
- priv->wpa_cipher_pair = pairwise;
- priv->wpa_cipher_group = groupwise;
return 0;
}
@@ -1496,7 +1546,7 @@ static int remove_key(struct usbnet *usbdev, int index, const u8 *bssid)
static void set_multicast_list(struct usbnet *usbdev)
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
__le32 filter, basefilter;
int ret;
char *mc_addrs = NULL;
@@ -1535,9 +1585,9 @@ static void set_multicast_list(struct usbnet *usbdev)
return;
}
- netdev_for_each_mc_addr(mclist, usbdev->net)
+ netdev_for_each_mc_addr(ha, usbdev->net)
memcpy(mc_addrs + i++ * ETH_ALEN,
- mclist->dmi_addr, ETH_ALEN);
+ ha->addr, ETH_ALEN);
}
netif_addr_unlock_bh(usbdev->net);
@@ -1569,6 +1619,194 @@ set_filter:
le32_to_cpu(filter), ret);
}
+#ifdef DEBUG
+static void debug_print_pmkids(struct usbnet *usbdev,
+ struct ndis_80211_pmkid *pmkids,
+ const char *func_str)
+{
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
+ int i, len, count, max_pmkids, entry_len;
+
+ max_pmkids = priv->wdev.wiphy->max_num_pmkids;
+ len = le32_to_cpu(pmkids->length);
+ count = le32_to_cpu(pmkids->bssid_info_count);
+
+ entry_len = (count > 0) ? (len - sizeof(*pmkids)) / count : -1;
+
+ netdev_dbg(usbdev->net, "%s(): %d PMKIDs (data len: %d, entry len: "
+ "%d)\n", func_str, count, len, entry_len);
+
+ if (count > max_pmkids)
+ count = max_pmkids;
+
+ for (i = 0; i < count; i++) {
+ u32 *tmp = (u32 *)pmkids->bssid_info[i].pmkid;
+
+ netdev_dbg(usbdev->net, "%s(): bssid: %pM, "
+ "pmkid: %08X:%08X:%08X:%08X\n",
+ func_str, pmkids->bssid_info[i].bssid,
+ cpu_to_be32(tmp[0]), cpu_to_be32(tmp[1]),
+ cpu_to_be32(tmp[2]), cpu_to_be32(tmp[3]));
+ }
+}
+#else
+static void debug_print_pmkids(struct usbnet *usbdev,
+ struct ndis_80211_pmkid *pmkids,
+ const char *func_str)
+{
+ return;
+}
+#endif
+
+static struct ndis_80211_pmkid *get_device_pmkids(struct usbnet *usbdev)
+{
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
+ struct ndis_80211_pmkid *pmkids;
+ int len, ret, max_pmkids;
+
+ max_pmkids = priv->wdev.wiphy->max_num_pmkids;
+ len = sizeof(*pmkids) + max_pmkids * sizeof(pmkids->bssid_info[0]);
+
+ pmkids = kzalloc(len, GFP_KERNEL);
+ if (!pmkids)
+ return ERR_PTR(-ENOMEM);
+
+ pmkids->length = cpu_to_le32(len);
+ pmkids->bssid_info_count = cpu_to_le32(max_pmkids);
+
+ ret = rndis_query_oid(usbdev, OID_802_11_PMKID, pmkids, &len);
+ if (ret < 0) {
+ netdev_dbg(usbdev->net, "%s(): OID_802_11_PMKID(%d, %d)"
+ " -> %d\n", __func__, len, max_pmkids, ret);
+
+ kfree(pmkids);
+ return ERR_PTR(ret);
+ }
+
+ if (le32_to_cpu(pmkids->bssid_info_count) > max_pmkids)
+ pmkids->bssid_info_count = cpu_to_le32(max_pmkids);
+
+ debug_print_pmkids(usbdev, pmkids, __func__);
+
+ return pmkids;
+}
+
+static int set_device_pmkids(struct usbnet *usbdev,
+ struct ndis_80211_pmkid *pmkids)
+{
+ int ret, len, num_pmkids;
+
+ num_pmkids = le32_to_cpu(pmkids->bssid_info_count);
+ len = sizeof(*pmkids) + num_pmkids * sizeof(pmkids->bssid_info[0]);
+ pmkids->length = cpu_to_le32(len);
+
+ debug_print_pmkids(usbdev, pmkids, __func__);
+
+ ret = rndis_set_oid(usbdev, OID_802_11_PMKID, pmkids,
+ le32_to_cpu(pmkids->length));
+ if (ret < 0) {
+ netdev_dbg(usbdev->net, "%s(): OID_802_11_PMKID(%d, %d) -> %d"
+ "\n", __func__, len, num_pmkids, ret);
+ }
+
+ kfree(pmkids);
+ return ret;
+}
+
+static struct ndis_80211_pmkid *remove_pmkid(struct usbnet *usbdev,
+ struct ndis_80211_pmkid *pmkids,
+ struct cfg80211_pmksa *pmksa,
+ int max_pmkids)
+{
+ int i, len, count, newlen, err;
+
+ len = le32_to_cpu(pmkids->length);
+ count = le32_to_cpu(pmkids->bssid_info_count);
+
+ if (count > max_pmkids)
+ count = max_pmkids;
+
+ for (i = 0; i < count; i++)
+ if (!compare_ether_addr(pmkids->bssid_info[i].bssid,
+ pmksa->bssid))
+ break;
+
+ /* pmkid not found */
+ if (i == count) {
+ netdev_dbg(usbdev->net, "%s(): bssid not found (%pM)\n",
+ __func__, pmksa->bssid);
+ err = -ENOENT;
+ goto error;
+ }
+
+ for (; i + 1 < count; i++)
+ pmkids->bssid_info[i] = pmkids->bssid_info[i + 1];
+
+ count--;
+ newlen = sizeof(*pmkids) + count * sizeof(pmkids->bssid_info[0]);
+
+ pmkids->length = cpu_to_le32(newlen);
+ pmkids->bssid_info_count = cpu_to_le32(count);
+
+ return pmkids;
+error:
+ kfree(pmkids);
+ return ERR_PTR(err);
+}
+
+static struct ndis_80211_pmkid *update_pmkid(struct usbnet *usbdev,
+ struct ndis_80211_pmkid *pmkids,
+ struct cfg80211_pmksa *pmksa,
+ int max_pmkids)
+{
+ int i, err, len, count, newlen;
+
+ len = le32_to_cpu(pmkids->length);
+ count = le32_to_cpu(pmkids->bssid_info_count);
+
+ if (count > max_pmkids)
+ count = max_pmkids;
+
+ /* update with new pmkid */
+ for (i = 0; i < count; i++) {
+ if (compare_ether_addr(pmkids->bssid_info[i].bssid,
+ pmksa->bssid))
+ continue;
+
+ memcpy(pmkids->bssid_info[i].pmkid, pmksa->pmkid,
+ WLAN_PMKID_LEN);
+
+ return pmkids;
+ }
+
+ /* out of space, return error */
+ if (i == max_pmkids) {
+ netdev_dbg(usbdev->net, "%s(): out of space\n", __func__);
+ err = -ENOSPC;
+ goto error;
+ }
+
+ /* add new pmkid */
+ newlen = sizeof(*pmkids) + (count + 1) * sizeof(pmkids->bssid_info[0]);
+
+ pmkids = krealloc(pmkids, newlen, GFP_KERNEL);
+ if (!pmkids) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ pmkids->length = cpu_to_le32(newlen);
+ pmkids->bssid_info_count = cpu_to_le32(count + 1);
+
+ memcpy(pmkids->bssid_info[count].bssid, pmksa->bssid, ETH_ALEN);
+ memcpy(pmkids->bssid_info[count].pmkid, pmksa->pmkid, WLAN_PMKID_LEN);
+
+ return pmkids;
+error:
+ kfree(pmkids);
+ return ERR_PTR(err);
+}
+
/*
* cfg80211 ops
*/
@@ -2053,7 +2291,7 @@ static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
return deauthenticate(usbdev);
}
-static int rndis_set_channel(struct wiphy *wiphy,
+static int rndis_set_channel(struct wiphy *wiphy, struct net_device *netdev,
struct ieee80211_channel *chan, enum nl80211_channel_type channel_type)
{
struct rndis_wlan_private *priv = wiphy_priv(wiphy);
@@ -2179,6 +2417,78 @@ static int rndis_dump_station(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
+static int rndis_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa)
+{
+ struct rndis_wlan_private *priv = wiphy_priv(wiphy);
+ struct usbnet *usbdev = priv->usbdev;
+ struct ndis_80211_pmkid *pmkids;
+ u32 *tmp = (u32 *)pmksa->pmkid;
+
+ netdev_dbg(usbdev->net, "%s(%pM, %08X:%08X:%08X:%08X)\n", __func__,
+ pmksa->bssid,
+ cpu_to_be32(tmp[0]), cpu_to_be32(tmp[1]),
+ cpu_to_be32(tmp[2]), cpu_to_be32(tmp[3]));
+
+ pmkids = get_device_pmkids(usbdev);
+ if (IS_ERR(pmkids)) {
+ /* couldn't read PMKID cache from device */
+ return PTR_ERR(pmkids);
+ }
+
+ pmkids = update_pmkid(usbdev, pmkids, pmksa, wiphy->max_num_pmkids);
+ if (IS_ERR(pmkids)) {
+ /* not found, list full, etc */
+ return PTR_ERR(pmkids);
+ }
+
+ return set_device_pmkids(usbdev, pmkids);
+}
+
+static int rndis_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa)
+{
+ struct rndis_wlan_private *priv = wiphy_priv(wiphy);
+ struct usbnet *usbdev = priv->usbdev;
+ struct ndis_80211_pmkid *pmkids;
+ u32 *tmp = (u32 *)pmksa->pmkid;
+
+ netdev_dbg(usbdev->net, "%s(%pM, %08X:%08X:%08X:%08X)\n", __func__,
+ pmksa->bssid,
+ cpu_to_be32(tmp[0]), cpu_to_be32(tmp[1]),
+ cpu_to_be32(tmp[2]), cpu_to_be32(tmp[3]));
+
+ pmkids = get_device_pmkids(usbdev);
+ if (IS_ERR(pmkids)) {
+ /* Couldn't read PMKID cache from device */
+ return PTR_ERR(pmkids);
+ }
+
+ pmkids = remove_pmkid(usbdev, pmkids, pmksa, wiphy->max_num_pmkids);
+ if (IS_ERR(pmkids)) {
+ /* not found, etc */
+ return PTR_ERR(pmkids);
+ }
+
+ return set_device_pmkids(usbdev, pmkids);
+}
+
+static int rndis_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
+{
+ struct rndis_wlan_private *priv = wiphy_priv(wiphy);
+ struct usbnet *usbdev = priv->usbdev;
+ struct ndis_80211_pmkid pmkid;
+
+ netdev_dbg(usbdev->net, "%s()\n", __func__);
+
+ memset(&pmkid, 0, sizeof(pmkid));
+
+ pmkid.length = cpu_to_le32(sizeof(pmkid));
+ pmkid.bssid_info_count = cpu_to_le32(0);
+
+ return rndis_set_oid(usbdev, OID_802_11_PMKID, &pmkid, sizeof(pmkid));
+}
+
/*
* workers, indication handlers, device poller
*/
@@ -2262,14 +2572,18 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
static void rndis_wlan_do_link_down_work(struct usbnet *usbdev)
{
- union iwreq_data evt;
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- netif_carrier_off(usbdev->net);
+ if (priv->connected) {
+ priv->connected = false;
+ memset(priv->bssid, 0, ETH_ALEN);
- evt.data.flags = 0;
- evt.data.length = 0;
- memset(evt.ap_addr.sa_data, 0, ETH_ALEN);
- wireless_send_event(usbdev->net, SIOCGIWAP, &evt, NULL);
+ deauthenticate(usbdev);
+
+ cfg80211_disconnected(usbdev->net, 0, NULL, 0, GFP_KERNEL);
+ }
+
+ netif_carrier_off(usbdev->net);
}
static void rndis_wlan_worker(struct work_struct *work)
@@ -2523,12 +2837,14 @@ static void rndis_wlan_indication(struct usbnet *usbdev, void *ind, int buflen)
}
}
-static int rndis_wlan_get_caps(struct usbnet *usbdev)
+static int rndis_wlan_get_caps(struct usbnet *usbdev, struct wiphy *wiphy)
{
struct {
__le32 num_items;
__le32 items[8];
} networks_supported;
+ struct ndis_80211_capability *caps;
+ u8 caps_buf[sizeof(*caps) + sizeof(caps->auth_encr_pair) * 16];
int len, retval, i, n;
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
@@ -2556,6 +2872,21 @@ static int rndis_wlan_get_caps(struct usbnet *usbdev)
}
}
+ /* get device 802.11 capabilities, number of PMKIDs */
+ caps = (struct ndis_80211_capability *)caps_buf;
+ len = sizeof(caps_buf);
+ retval = rndis_query_oid(usbdev, OID_802_11_CAPABILITY, caps, &len);
+ if (retval >= 0) {
+ netdev_dbg(usbdev->net, "OID_802_11_CAPABILITY -> len %d, "
+ "ver %d, pmkids %d, auth-encr-pairs %d\n",
+ le32_to_cpu(caps->length),
+ le32_to_cpu(caps->version),
+ le32_to_cpu(caps->num_pmkids),
+ le32_to_cpu(caps->num_auth_encr_pair));
+ wiphy->max_num_pmkids = le32_to_cpu(caps->num_pmkids);
+ } else
+ wiphy->max_num_pmkids = 0;
+
return retval;
}
@@ -2803,7 +3134,7 @@ static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf)
wiphy->max_scan_ssids = 1;
/* TODO: fill-out band/encr information based on priv->caps */
- rndis_wlan_get_caps(usbdev);
+ rndis_wlan_get_caps(usbdev, wiphy);
memcpy(priv->channels, rndis_channels, sizeof(rndis_channels));
memcpy(priv->rates, rndis_rates, sizeof(rndis_rates));
@@ -2863,9 +3194,6 @@ static void rndis_wlan_unbind(struct usbnet *usbdev, struct usb_interface *intf)
flush_workqueue(priv->workqueue);
destroy_workqueue(priv->workqueue);
- if (priv && priv->wpa_ie_len)
- kfree(priv->wpa_ie);
-
rndis_unbind(usbdev, intf);
wiphy_unregister(priv->wdev.wiphy);
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 5239e082cd0..eea1ef2f502 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -87,7 +87,7 @@ if RT2800PCI
config RT2800PCI_RT30XX
bool "rt2800pci - Include support for rt30xx (PCI/PCIe/PCMCIA) devices"
- default n
+ default y
---help---
This adds support for rt30xx wireless chipset family to the
rt2800pci driver.
@@ -156,7 +156,7 @@ if RT2800USB
config RT2800USB_RT30XX
bool "rt2800usb - Include support for rt30xx (USB) devices"
- default n
+ default y
---help---
This adds support for rt30xx wireless chipset family to the
rt2800usb driver.
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 5f5204b8289..ad2c98af7e9 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -526,6 +526,10 @@ static void rt2400pci_config_ps(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 1);
rt2x00pci_register_write(rt2x00dev, CSR20, reg);
+ } else {
+ rt2x00pci_register_read(rt2x00dev, CSR20, &reg);
+ rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 0);
+ rt2x00pci_register_write(rt2x00dev, CSR20, reg);
}
rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
@@ -922,7 +926,7 @@ static void rt2400pci_disable_radio(struct rt2x00_dev *rt2x00dev)
static int rt2400pci_set_state(struct rt2x00_dev *rt2x00dev,
enum dev_state state)
{
- u32 reg;
+ u32 reg, reg2;
unsigned int i;
char put_to_sleep;
char bbp_state;
@@ -943,11 +947,12 @@ static int rt2400pci_set_state(struct rt2x00_dev *rt2x00dev,
* device has entered the correct state.
*/
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2x00pci_register_read(rt2x00dev, PWRCSR1, &reg);
- bbp_state = rt2x00_get_field32(reg, PWRCSR1_BBP_CURR_STATE);
- rf_state = rt2x00_get_field32(reg, PWRCSR1_RF_CURR_STATE);
+ rt2x00pci_register_read(rt2x00dev, PWRCSR1, &reg2);
+ bbp_state = rt2x00_get_field32(reg2, PWRCSR1_BBP_CURR_STATE);
+ rf_state = rt2x00_get_field32(reg2, PWRCSR1_RF_CURR_STATE);
if (bbp_state == state && rf_state == state)
return 0;
+ rt2x00pci_register_write(rt2x00dev, PWRCSR1, reg);
msleep(10);
}
@@ -1003,19 +1008,19 @@ static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data;
- __le32 *txd = skbdesc->desc;
+ __le32 *txd = entry_priv->desc;
u32 word;
/*
* Start writing the descriptor words.
*/
- rt2x00_desc_read(entry_priv->desc, 1, &word);
+ rt2x00_desc_read(txd, 1, &word);
rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma);
- rt2x00_desc_write(entry_priv->desc, 1, word);
+ rt2x00_desc_write(txd, 1, word);
rt2x00_desc_read(txd, 2, &word);
- rt2x00_set_field32(&word, TXD_W2_BUFFER_LENGTH, skb->len);
- rt2x00_set_field32(&word, TXD_W2_DATABYTE_COUNT, skb->len);
+ rt2x00_set_field32(&word, TXD_W2_BUFFER_LENGTH, txdesc->length);
+ rt2x00_set_field32(&word, TXD_W2_DATABYTE_COUNT, txdesc->length);
rt2x00_desc_write(txd, 2, word);
rt2x00_desc_read(txd, 3, &word);
@@ -1036,6 +1041,11 @@ static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_BUSY, 1);
rt2x00_desc_write(txd, 4, word);
+ /*
+ * Writing TXD word 0 must the last to prevent a race condition with
+ * the device, whereby the device may take hold of the TXD before we
+ * finished updating it.
+ */
rt2x00_desc_read(txd, 0, &word);
rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1);
rt2x00_set_field32(&word, TXD_W0_VALID, 1);
@@ -1051,12 +1061,19 @@ static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
rt2x00_desc_write(txd, 0, word);
+
+ /*
+ * Register descriptor details in skb frame descriptor.
+ */
+ skbdesc->desc = txd;
+ skbdesc->desc_len = TXD_DESC_SIZE;
}
/*
* TX data initialization
*/
-static void rt2400pci_write_beacon(struct queue_entry *entry)
+static void rt2400pci_write_beacon(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct queue_entry_priv_pci *entry_priv = entry->priv_data;
@@ -1072,20 +1089,19 @@ static void rt2400pci_write_beacon(struct queue_entry *entry)
rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
- /*
- * Replace rt2x00lib allocated descriptor with the
- * pointer to the _real_ hardware descriptor.
- * After that, map the beacon to DMA and update the
- * descriptor.
- */
- memcpy(entry_priv->desc, skbdesc->desc, skbdesc->desc_len);
- skbdesc->desc = entry_priv->desc;
-
rt2x00queue_map_txskb(rt2x00dev, entry->skb);
rt2x00_desc_read(entry_priv->desc, 1, &word);
rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma);
rt2x00_desc_write(entry_priv->desc, 1, word);
+
+ /*
+ * Enable beaconing again.
+ */
+ rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
+ rt2x00_set_field32(&reg, CSR14_TBCN, 1);
+ rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
+ rt2x00pci_register_write(rt2x00dev, CSR14, reg);
}
static void rt2400pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
@@ -1093,17 +1109,6 @@ static void rt2400pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
{
u32 reg;
- if (queue == QID_BEACON) {
- rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
- if (!rt2x00_get_field32(reg, CSR14_BEACON_GEN)) {
- rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
- rt2x00_set_field32(&reg, CSR14_TBCN, 1);
- rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
- rt2x00pci_register_write(rt2x00dev, CSR14, reg);
- }
- return;
- }
-
rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue == QID_AC_BE));
rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue == QID_AC_BK));
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 2a73f593aab..41da3d218c6 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -574,6 +574,10 @@ static void rt2500pci_config_ps(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 1);
rt2x00pci_register_write(rt2x00dev, CSR20, reg);
+ } else {
+ rt2x00pci_register_read(rt2x00dev, CSR20, &reg);
+ rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 0);
+ rt2x00pci_register_write(rt2x00dev, CSR20, reg);
}
rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
@@ -1080,7 +1084,7 @@ static void rt2500pci_disable_radio(struct rt2x00_dev *rt2x00dev)
static int rt2500pci_set_state(struct rt2x00_dev *rt2x00dev,
enum dev_state state)
{
- u32 reg;
+ u32 reg, reg2;
unsigned int i;
char put_to_sleep;
char bbp_state;
@@ -1101,11 +1105,12 @@ static int rt2500pci_set_state(struct rt2x00_dev *rt2x00dev,
* device has entered the correct state.
*/
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2x00pci_register_read(rt2x00dev, PWRCSR1, &reg);
- bbp_state = rt2x00_get_field32(reg, PWRCSR1_BBP_CURR_STATE);
- rf_state = rt2x00_get_field32(reg, PWRCSR1_RF_CURR_STATE);
+ rt2x00pci_register_read(rt2x00dev, PWRCSR1, &reg2);
+ bbp_state = rt2x00_get_field32(reg2, PWRCSR1_BBP_CURR_STATE);
+ rf_state = rt2x00_get_field32(reg2, PWRCSR1_RF_CURR_STATE);
if (bbp_state == state && rf_state == state)
return 0;
+ rt2x00pci_register_write(rt2x00dev, PWRCSR1, reg);
msleep(10);
}
@@ -1161,15 +1166,15 @@ static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data;
- __le32 *txd = skbdesc->desc;
+ __le32 *txd = entry_priv->desc;
u32 word;
/*
* Start writing the descriptor words.
*/
- rt2x00_desc_read(entry_priv->desc, 1, &word);
+ rt2x00_desc_read(txd, 1, &word);
rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma);
- rt2x00_desc_write(entry_priv->desc, 1, word);
+ rt2x00_desc_write(txd, 1, word);
rt2x00_desc_read(txd, 2, &word);
rt2x00_set_field32(&word, TXD_W2_IV_OFFSET, IEEE80211_HEADER);
@@ -1190,6 +1195,11 @@ static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags));
rt2x00_desc_write(txd, 10, word);
+ /*
+ * Writing TXD word 0 must the last to prevent a race condition with
+ * the device, whereby the device may take hold of the TXD before we
+ * finished updating it.
+ */
rt2x00_desc_read(txd, 0, &word);
rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1);
rt2x00_set_field32(&word, TXD_W0_VALID, 1);
@@ -1205,15 +1215,22 @@ static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
+ rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE);
rt2x00_desc_write(txd, 0, word);
+
+ /*
+ * Register descriptor details in skb frame descriptor.
+ */
+ skbdesc->desc = txd;
+ skbdesc->desc_len = TXD_DESC_SIZE;
}
/*
* TX data initialization
*/
-static void rt2500pci_write_beacon(struct queue_entry *entry)
+static void rt2500pci_write_beacon(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct queue_entry_priv_pci *entry_priv = entry->priv_data;
@@ -1229,20 +1246,19 @@ static void rt2500pci_write_beacon(struct queue_entry *entry)
rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
- /*
- * Replace rt2x00lib allocated descriptor with the
- * pointer to the _real_ hardware descriptor.
- * After that, map the beacon to DMA and update the
- * descriptor.
- */
- memcpy(entry_priv->desc, skbdesc->desc, skbdesc->desc_len);
- skbdesc->desc = entry_priv->desc;
-
rt2x00queue_map_txskb(rt2x00dev, entry->skb);
rt2x00_desc_read(entry_priv->desc, 1, &word);
rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma);
rt2x00_desc_write(entry_priv->desc, 1, word);
+
+ /*
+ * Enable beaconing again.
+ */
+ rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
+ rt2x00_set_field32(&reg, CSR14_TBCN, 1);
+ rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
+ rt2x00pci_register_write(rt2x00dev, CSR14, reg);
}
static void rt2500pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
@@ -1250,17 +1266,6 @@ static void rt2500pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
{
u32 reg;
- if (queue == QID_BEACON) {
- rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
- if (!rt2x00_get_field32(reg, CSR14_BEACON_GEN)) {
- rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
- rt2x00_set_field32(&reg, CSR14_TBCN, 1);
- rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
- rt2x00pci_register_write(rt2x00dev, CSR14, reg);
- }
- return;
- }
-
rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue == QID_AC_BE));
rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue == QID_AC_BK));
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 8ebb705fe10..9ae96a626e6 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -649,6 +649,10 @@ static void rt2500usb_config_ps(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field16(&reg, MAC_CSR18_AUTO_WAKE, 1);
rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg);
+ } else {
+ rt2500usb_register_read(rt2x00dev, MAC_CSR18, &reg);
+ rt2x00_set_field16(&reg, MAC_CSR18_AUTO_WAKE, 0);
+ rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg);
}
rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
@@ -1030,12 +1034,30 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
struct txentry_desc *txdesc)
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
- __le32 *txd = skbdesc->desc;
+ __le32 *txd = (__le32 *)(skb->data - TXD_DESC_SIZE);
u32 word;
/*
* Start writing the descriptor words.
*/
+ rt2x00_desc_read(txd, 0, &word);
+ rt2x00_set_field32(&word, TXD_W0_RETRY_LIMIT, txdesc->retry_limit);
+ rt2x00_set_field32(&word, TXD_W0_MORE_FRAG,
+ test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W0_ACK,
+ test_bit(ENTRY_TXD_ACK, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W0_TIMESTAMP,
+ test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W0_OFDM,
+ (txdesc->rate_mode == RATE_MODE_OFDM));
+ rt2x00_set_field32(&word, TXD_W0_NEW_SEQ,
+ test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
+ rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
+ rt2x00_set_field32(&word, TXD_W0_CIPHER, !!txdesc->cipher);
+ rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx);
+ rt2x00_desc_write(txd, 0, word);
+
rt2x00_desc_read(txd, 1, &word);
rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset);
rt2x00_set_field32(&word, TXD_W1_AIFS, txdesc->aifs);
@@ -1055,23 +1077,11 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
_rt2x00_desc_write(txd, 4, skbdesc->iv[1]);
}
- rt2x00_desc_read(txd, 0, &word);
- rt2x00_set_field32(&word, TXD_W0_RETRY_LIMIT, txdesc->retry_limit);
- rt2x00_set_field32(&word, TXD_W0_MORE_FRAG,
- test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_ACK,
- test_bit(ENTRY_TXD_ACK, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_TIMESTAMP,
- test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_OFDM,
- (txdesc->rate_mode == RATE_MODE_OFDM));
- rt2x00_set_field32(&word, TXD_W0_NEW_SEQ,
- test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
- rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
- rt2x00_set_field32(&word, TXD_W0_CIPHER, !!txdesc->cipher);
- rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx);
- rt2x00_desc_write(txd, 0, word);
+ /*
+ * Register descriptor details in skb frame descriptor.
+ */
+ skbdesc->desc = txd;
+ skbdesc->desc_len = TXD_DESC_SIZE;
}
/*
@@ -1079,22 +1089,15 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
*/
static void rt2500usb_beacondone(struct urb *urb);
-static void rt2500usb_write_beacon(struct queue_entry *entry)
+static void rt2500usb_write_beacon(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data;
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
int pipe = usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint);
int length;
- u16 reg;
-
- /*
- * Add the descriptor in front of the skb.
- */
- skb_push(entry->skb, entry->queue->desc_size);
- memcpy(entry->skb->data, skbdesc->desc, skbdesc->desc_len);
- skbdesc->desc = entry->skb->data;
+ u16 reg, reg0;
/*
* Disable beaconing while we are reloading the beacon data,
@@ -1105,6 +1108,11 @@ static void rt2500usb_write_beacon(struct queue_entry *entry)
rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
/*
+ * Take the descriptor in front of the skb into account.
+ */
+ skb_push(entry->skb, TXD_DESC_SIZE);
+
+ /*
* USB devices cannot blindly pass the skb->len as the
* length of the data to usb_fill_bulk_urb. Pass the skb
* to the driver to determine what the length should be.
@@ -1129,6 +1137,26 @@ static void rt2500usb_write_beacon(struct queue_entry *entry)
* Send out the guardian byte.
*/
usb_submit_urb(bcn_priv->guardian_urb, GFP_ATOMIC);
+
+ /*
+ * Enable beaconing again.
+ */
+ rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 1);
+ rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 1);
+ reg0 = reg;
+ rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 1);
+ /*
+ * Beacon generation will fail initially.
+ * To prevent this we need to change the TXRX_CSR19
+ * register several times (reg0 is the same as reg
+ * except for TXRX_CSR19_BEACON_GEN, which is 0 in reg0
+ * and 1 in reg).
+ */
+ rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
+ rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg0);
+ rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
+ rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg0);
+ rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
}
static int rt2500usb_get_tx_data_len(struct queue_entry *entry)
@@ -1145,37 +1173,6 @@ static int rt2500usb_get_tx_data_len(struct queue_entry *entry)
return length;
}
-static void rt2500usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
- const enum data_queue_qid queue)
-{
- u16 reg, reg0;
-
- if (queue != QID_BEACON) {
- rt2x00usb_kick_tx_queue(rt2x00dev, queue);
- return;
- }
-
- rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg);
- if (!rt2x00_get_field16(reg, TXRX_CSR19_BEACON_GEN)) {
- rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 1);
- rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 1);
- reg0 = reg;
- rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 1);
- /*
- * Beacon generation will fail initially.
- * To prevent this we need to change the TXRX_CSR19
- * register several times (reg0 is the same as reg
- * except for TXRX_CSR19_BEACON_GEN, which is 0 in reg0
- * and 1 in reg).
- */
- rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
- rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg0);
- rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
- rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg0);
- rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
- }
-}
-
/*
* RX control handlers
*/
@@ -1210,11 +1207,9 @@ static void rt2500usb_fill_rxdone(struct queue_entry *entry,
if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR))
rxdesc->flags |= RX_FLAG_FAILED_PLCP_CRC;
- if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
- rxdesc->cipher = rt2x00_get_field32(word0, RXD_W0_CIPHER);
- if (rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR))
- rxdesc->cipher_status = RX_CRYPTO_FAIL_KEY;
- }
+ rxdesc->cipher = rt2x00_get_field32(word0, RXD_W0_CIPHER);
+ if (rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR))
+ rxdesc->cipher_status = RX_CRYPTO_FAIL_KEY;
if (rxdesc->cipher != CIPHER_NONE) {
_rt2x00_desc_read(rxd, 2, &rxdesc->iv[0]);
@@ -1644,11 +1639,6 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
unsigned int i;
/*
- * Disable powersaving as default.
- */
- rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
-
- /*
* Initialize all hw fields.
*/
rt2x00dev->hw->flags =
@@ -1781,7 +1771,7 @@ static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
.write_tx_data = rt2x00usb_write_tx_data,
.write_beacon = rt2500usb_write_beacon,
.get_tx_data_len = rt2500usb_get_tx_data_len,
- .kick_tx_queue = rt2500usb_kick_tx_queue,
+ .kick_tx_queue = rt2x00usb_kick_tx_queue,
.kill_tx_queue = rt2x00usb_kill_tx_queue,
.fill_rxdone = rt2500usb_fill_rxdone,
.config_shared_key = rt2500usb_config_key,
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 74c0433dba3..2aa03751c34 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -56,15 +56,20 @@
#define RF3021 0x0007
#define RF3022 0x0008
#define RF3052 0x0009
+#define RF3320 0x000b
/*
- * Chipset version.
+ * Chipset revisions.
*/
-#define RT2860C_VERSION 0x0100
-#define RT2860D_VERSION 0x0101
-#define RT2880E_VERSION 0x0200
-#define RT2883_VERSION 0x0300
-#define RT3070_VERSION 0x0200
+#define REV_RT2860C 0x0100
+#define REV_RT2860D 0x0101
+#define REV_RT2870D 0x0101
+#define REV_RT2872E 0x0200
+#define REV_RT3070E 0x0200
+#define REV_RT3070F 0x0201
+#define REV_RT3071E 0x0211
+#define REV_RT3090E 0x0211
+#define REV_RT3390E 0x0211
/*
* Signal information.
@@ -90,13 +95,19 @@
#define NUM_TX_QUEUES 4
/*
- * USB registers.
+ * Registers.
*/
/*
+ * OPT_14: Unknown register used by rt3xxx devices.
+ */
+#define OPT_14_CSR 0x0114
+#define OPT_14_CSR_BIT0 FIELD32(0x00000001)
+
+/*
* INT_SOURCE_CSR: Interrupt source register.
* Write one to clear corresponding bit.
- * TX_FIFO_STATUS: FIFO Statistics is full, sw should read 0x171c
+ * TX_FIFO_STATUS: FIFO Statistics is full, sw should read TX_STA_FIFO
*/
#define INT_SOURCE_CSR 0x0200
#define INT_SOURCE_CSR_RXDELAYINT FIELD32(0x00000001)
@@ -398,6 +409,31 @@
#define EFUSE_DATA3 0x059c
/*
+ * LDO_CFG0
+ */
+#define LDO_CFG0 0x05d4
+#define LDO_CFG0_DELAY3 FIELD32(0x000000ff)
+#define LDO_CFG0_DELAY2 FIELD32(0x0000ff00)
+#define LDO_CFG0_DELAY1 FIELD32(0x00ff0000)
+#define LDO_CFG0_BGSEL FIELD32(0x03000000)
+#define LDO_CFG0_LDO_CORE_VLEVEL FIELD32(0x1c000000)
+#define LD0_CFG0_LDO25_LEVEL FIELD32(0x60000000)
+#define LDO_CFG0_LDO25_LARGEA FIELD32(0x80000000)
+
+/*
+ * GPIO_SWITCH
+ */
+#define GPIO_SWITCH 0x05dc
+#define GPIO_SWITCH_0 FIELD32(0x00000001)
+#define GPIO_SWITCH_1 FIELD32(0x00000002)
+#define GPIO_SWITCH_2 FIELD32(0x00000004)
+#define GPIO_SWITCH_3 FIELD32(0x00000008)
+#define GPIO_SWITCH_4 FIELD32(0x00000010)
+#define GPIO_SWITCH_5 FIELD32(0x00000020)
+#define GPIO_SWITCH_6 FIELD32(0x00000040)
+#define GPIO_SWITCH_7 FIELD32(0x00000080)
+
+/*
* MAC Control/Status Registers(CSR).
* Some values are set in TU, whereas 1 TU == 1024 us.
*/
@@ -809,7 +845,7 @@
* TX_BAND_CFG: 0x1 use upper 20MHz, 0x0 use lower 20MHz
*/
#define TX_BAND_CFG 0x132c
-#define TX_BAND_CFG_HT40_PLUS FIELD32(0x00000001)
+#define TX_BAND_CFG_HT40_MINUS FIELD32(0x00000001)
#define TX_BAND_CFG_A FIELD32(0x00000002)
#define TX_BAND_CFG_BG FIELD32(0x00000004)
@@ -1483,7 +1519,7 @@ struct mac_iveiv_entry {
* BBP 3: RX Antenna
*/
#define BBP3_RX_ANTENNA FIELD8(0x18)
-#define BBP3_HT40_PLUS FIELD8(0x20)
+#define BBP3_HT40_MINUS FIELD8(0x20)
/*
* BBP 4: Bandwidth
@@ -1492,14 +1528,32 @@ struct mac_iveiv_entry {
#define BBP4_BANDWIDTH FIELD8(0x18)
/*
+ * BBP 138: Unknown
+ */
+#define BBP138_RX_ADC1 FIELD8(0x02)
+#define BBP138_RX_ADC2 FIELD8(0x04)
+#define BBP138_TX_DAC1 FIELD8(0x20)
+#define BBP138_TX_DAC2 FIELD8(0x40)
+
+/*
* RFCSR registers
* The wordsize of the RFCSR is 8 bits.
*/
/*
+ * RFCSR 1:
+ */
+#define RFCSR1_RF_BLOCK_EN FIELD8(0x01)
+#define RFCSR1_RX0_PD FIELD8(0x04)
+#define RFCSR1_TX0_PD FIELD8(0x08)
+#define RFCSR1_RX1_PD FIELD8(0x10)
+#define RFCSR1_TX1_PD FIELD8(0x20)
+
+/*
* RFCSR 6:
*/
-#define RFCSR6_R FIELD8(0x03)
+#define RFCSR6_R1 FIELD8(0x03)
+#define RFCSR6_R2 FIELD8(0x40)
/*
* RFCSR 7:
@@ -1512,6 +1566,33 @@ struct mac_iveiv_entry {
#define RFCSR12_TX_POWER FIELD8(0x1f)
/*
+ * RFCSR 13:
+ */
+#define RFCSR13_TX_POWER FIELD8(0x1f)
+
+/*
+ * RFCSR 15:
+ */
+#define RFCSR15_TX_LO2_EN FIELD8(0x08)
+
+/*
+ * RFCSR 17:
+ */
+#define RFCSR17_TXMIXER_GAIN FIELD8(0x07)
+#define RFCSR17_TX_LO1_EN FIELD8(0x08)
+#define RFCSR17_R FIELD8(0x20)
+
+/*
+ * RFCSR 20:
+ */
+#define RFCSR20_RX_LO1_EN FIELD8(0x08)
+
+/*
+ * RFCSR 21:
+ */
+#define RFCSR21_RX_LO2_EN FIELD8(0x08)
+
+/*
* RFCSR 22:
*/
#define RFCSR22_BASEBAND_LOOPBACK FIELD8(0x01)
@@ -1522,6 +1603,14 @@ struct mac_iveiv_entry {
#define RFCSR23_FREQ_OFFSET FIELD8(0x7f)
/*
+ * RFCSR 27:
+ */
+#define RFCSR27_R1 FIELD8(0x03)
+#define RFCSR27_R2 FIELD8(0x04)
+#define RFCSR27_R3 FIELD8(0x30)
+#define RFCSR27_R4 FIELD8(0x40)
+
+/*
* RFCSR 30:
*/
#define RFCSR30_RF_CALIBRATION FIELD8(0x80)
@@ -1603,6 +1692,8 @@ struct mac_iveiv_entry {
#define EEPROM_NIC_WPS_PBC FIELD16(0x0080)
#define EEPROM_NIC_BW40M_BG FIELD16(0x0100)
#define EEPROM_NIC_BW40M_A FIELD16(0x0200)
+#define EEPROM_NIC_ANT_DIVERSITY FIELD16(0x0800)
+#define EEPROM_NIC_DAC_TEST FIELD16(0x8000)
/*
* EEPROM frequency
@@ -1659,6 +1750,12 @@ struct mac_iveiv_entry {
#define EEPROM_RSSI_BG2_LNA_A1 FIELD16(0xff00)
/*
+ * EEPROM TXMIXER GAIN BG offset (note overlaps with EEPROM RSSI BG2).
+ */
+#define EEPROM_TXMIXER_GAIN_BG 0x0024
+#define EEPROM_TXMIXER_GAIN_BG_VAL FIELD16(0x0007)
+
+/*
* EEPROM RSSI A offset
*/
#define EEPROM_RSSI_A 0x0025
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index c015ce9fdd0..db4250d1c8b 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -41,9 +41,6 @@
#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE)
#include "rt2x00usb.h"
#endif
-#if defined(CONFIG_RT2X00_LIB_PCI) || defined(CONFIG_RT2X00_LIB_PCI_MODULE)
-#include "rt2x00pci.h"
-#endif
#include "rt2800lib.h"
#include "rt2800.h"
#include "rt2800usb.h"
@@ -76,6 +73,23 @@ MODULE_LICENSE("GPL");
rt2800_regbusy_read((__dev), H2M_MAILBOX_CSR, \
H2M_MAILBOX_CSR_OWNER, (__reg))
+static inline bool rt2800_is_305x_soc(struct rt2x00_dev *rt2x00dev)
+{
+ /* check for rt2872 on SoC */
+ if (!rt2x00_is_soc(rt2x00dev) ||
+ !rt2x00_rt(rt2x00dev, RT2872))
+ return false;
+
+ /* we know for sure that these rf chipsets are used on rt305x boards */
+ if (rt2x00_rf(rt2x00dev, RF3020) ||
+ rt2x00_rf(rt2x00dev, RF3021) ||
+ rt2x00_rf(rt2x00dev, RF3022))
+ return true;
+
+ NOTICE(rt2x00dev, "Unknown RF chipset on rt305x\n");
+ return false;
+}
+
static void rt2800_bbp_write(struct rt2x00_dev *rt2x00dev,
const unsigned int word, const u8 value)
{
@@ -268,6 +282,104 @@ int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
}
EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready);
+void rt2800_write_txwi(struct sk_buff *skb, struct txentry_desc *txdesc)
+{
+ __le32 *txwi = (__le32 *)(skb->data - TXWI_DESC_SIZE);
+ u32 word;
+
+ /*
+ * Initialize TX Info descriptor
+ */
+ rt2x00_desc_read(txwi, 0, &word);
+ rt2x00_set_field32(&word, TXWI_W0_FRAG,
+ test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
+ rt2x00_set_field32(&word, TXWI_W0_MIMO_PS, 0);
+ rt2x00_set_field32(&word, TXWI_W0_CF_ACK, 0);
+ rt2x00_set_field32(&word, TXWI_W0_TS,
+ test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
+ rt2x00_set_field32(&word, TXWI_W0_AMPDU,
+ test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags));
+ rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, txdesc->mpdu_density);
+ rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->txop);
+ rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->mcs);
+ rt2x00_set_field32(&word, TXWI_W0_BW,
+ test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags));
+ rt2x00_set_field32(&word, TXWI_W0_SHORT_GI,
+ test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags));
+ rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->stbc);
+ rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode);
+ rt2x00_desc_write(txwi, 0, word);
+
+ rt2x00_desc_read(txwi, 1, &word);
+ rt2x00_set_field32(&word, TXWI_W1_ACK,
+ test_bit(ENTRY_TXD_ACK, &txdesc->flags));
+ rt2x00_set_field32(&word, TXWI_W1_NSEQ,
+ test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
+ rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size);
+ rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID,
+ test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
+ txdesc->key_idx : 0xff);
+ rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
+ txdesc->length);
+ rt2x00_set_field32(&word, TXWI_W1_PACKETID, txdesc->queue + 1);
+ rt2x00_desc_write(txwi, 1, word);
+
+ /*
+ * Always write 0 to IV/EIV fields, hardware will insert the IV
+ * from the IVEIV register when TXD_W3_WIV is set to 0.
+ * When TXD_W3_WIV is set to 1 it will use the IV data
+ * from the descriptor. The TXWI_W1_WIRELESS_CLI_ID indicates which
+ * crypto entry in the registers should be used to encrypt the frame.
+ */
+ _rt2x00_desc_write(txwi, 2, 0 /* skbdesc->iv[0] */);
+ _rt2x00_desc_write(txwi, 3, 0 /* skbdesc->iv[1] */);
+}
+EXPORT_SYMBOL_GPL(rt2800_write_txwi);
+
+void rt2800_process_rxwi(struct sk_buff *skb, struct rxdone_entry_desc *rxdesc)
+{
+ __le32 *rxwi = (__le32 *) skb->data;
+ u32 word;
+
+ rt2x00_desc_read(rxwi, 0, &word);
+
+ rxdesc->cipher = rt2x00_get_field32(word, RXWI_W0_UDF);
+ rxdesc->size = rt2x00_get_field32(word, RXWI_W0_MPDU_TOTAL_BYTE_COUNT);
+
+ rt2x00_desc_read(rxwi, 1, &word);
+
+ if (rt2x00_get_field32(word, RXWI_W1_SHORT_GI))
+ rxdesc->flags |= RX_FLAG_SHORT_GI;
+
+ if (rt2x00_get_field32(word, RXWI_W1_BW))
+ rxdesc->flags |= RX_FLAG_40MHZ;
+
+ /*
+ * Detect RX rate, always use MCS as signal type.
+ */
+ rxdesc->dev_flags |= RXDONE_SIGNAL_MCS;
+ rxdesc->signal = rt2x00_get_field32(word, RXWI_W1_MCS);
+ rxdesc->rate_mode = rt2x00_get_field32(word, RXWI_W1_PHYMODE);
+
+ /*
+ * Mask of 0x8 bit to remove the short preamble flag.
+ */
+ if (rxdesc->rate_mode == RATE_MODE_CCK)
+ rxdesc->signal &= ~0x8;
+
+ rt2x00_desc_read(rxwi, 2, &word);
+
+ rxdesc->rssi =
+ (rt2x00_get_field32(word, RXWI_W2_RSSI0) +
+ rt2x00_get_field32(word, RXWI_W2_RSSI1)) / 2;
+
+ /*
+ * Remove RXWI descriptor from start of buffer.
+ */
+ skb_pull(skb, RXWI_DESC_SIZE);
+}
+EXPORT_SYMBOL_GPL(rt2800_process_rxwi);
+
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
const struct rt2x00debug rt2800_rt2x00debug = {
.owner = THIS_MODULE,
@@ -360,11 +472,6 @@ static int rt2800_blink_set(struct led_classdev *led_cdev,
rt2800_register_read(led->rt2x00dev, LED_CFG, &reg);
rt2x00_set_field32(&reg, LED_CFG_ON_PERIOD, *delay_on);
rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, *delay_off);
- rt2x00_set_field32(&reg, LED_CFG_SLOW_BLINK_PERIOD, 3);
- rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, 3);
- rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 3);
- rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE, 3);
- rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, 1);
rt2800_register_write(led->rt2x00dev, LED_CFG, reg);
return 0;
@@ -610,10 +717,6 @@ void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp)
{
u32 reg;
- rt2800_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
- rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_RX_ACK_TIMEOUT, 0x20);
- rt2800_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
-
rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
rt2x00_set_field32(&reg, AUTO_RSP_CFG_BAC_ACK_POLICY,
!!erp->short_preamble);
@@ -632,15 +735,10 @@ void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp)
rt2800_register_read(rt2x00dev, BKOFF_SLOT_CFG, &reg);
rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_SLOT_TIME, erp->slot_time);
- rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2);
rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
rt2800_register_read(rt2x00dev, XIFS_TIME_CFG, &reg);
- rt2x00_set_field32(&reg, XIFS_TIME_CFG_CCKM_SIFS_TIME, erp->sifs);
- rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_SIFS_TIME, erp->sifs);
- rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_XIFS_TIME, 4);
rt2x00_set_field32(&reg, XIFS_TIME_CFG_EIFS, erp->eifs);
- rt2x00_set_field32(&reg, XIFS_TIME_CFG_BB_RXEND_ENABLE, 1);
rt2800_register_write(rt2x00dev, XIFS_TIME_CFG, reg);
rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
@@ -718,10 +816,10 @@ static void rt2800_config_lna_gain(struct rt2x00_dev *rt2x00dev,
rt2x00dev->lna_gain = lna_gain;
}
-static void rt2800_config_channel_rt2x(struct rt2x00_dev *rt2x00dev,
- struct ieee80211_conf *conf,
- struct rf_channel *rf,
- struct channel_info *info)
+static void rt2800_config_channel_rf2xxx(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_conf *conf,
+ struct rf_channel *rf,
+ struct channel_info *info)
{
rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
@@ -787,10 +885,10 @@ static void rt2800_config_channel_rt2x(struct rt2x00_dev *rt2x00dev,
rt2800_rf_write(rt2x00dev, 4, rf->rf4);
}
-static void rt2800_config_channel_rt3x(struct rt2x00_dev *rt2x00dev,
- struct ieee80211_conf *conf,
- struct rf_channel *rf,
- struct channel_info *info)
+static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_conf *conf,
+ struct rf_channel *rf,
+ struct channel_info *info)
{
u8 rfcsr;
@@ -798,7 +896,7 @@ static void rt2800_config_channel_rt3x(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write(rt2x00dev, 3, rf->rf3);
rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
- rt2x00_set_field8(&rfcsr, RFCSR6_R, rf->rf2);
+ rt2x00_set_field8(&rfcsr, RFCSR6_R1, rf->rf2);
rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr);
@@ -806,6 +904,11 @@ static void rt2800_config_channel_rt3x(struct rt2x00_dev *rt2x00dev,
TXPOWER_G_TO_DEV(info->tx_power1));
rt2800_rfcsr_write(rt2x00dev, 12, rfcsr);
+ rt2800_rfcsr_read(rt2x00dev, 13, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER,
+ TXPOWER_G_TO_DEV(info->tx_power2));
+ rt2800_rfcsr_write(rt2x00dev, 13, rfcsr);
+
rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
@@ -827,15 +930,13 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
unsigned int tx_pin;
u8 bbp;
- if ((rt2x00_rt(rt2x00dev, RT3070) ||
- rt2x00_rt(rt2x00dev, RT3090)) &&
- (rt2x00_rf(rt2x00dev, RF2020) ||
- rt2x00_rf(rt2x00dev, RF3020) ||
- rt2x00_rf(rt2x00dev, RF3021) ||
- rt2x00_rf(rt2x00dev, RF3022)))
- rt2800_config_channel_rt3x(rt2x00dev, conf, rf, info);
+ if (rt2x00_rf(rt2x00dev, RF2020) ||
+ rt2x00_rf(rt2x00dev, RF3020) ||
+ rt2x00_rf(rt2x00dev, RF3021) ||
+ rt2x00_rf(rt2x00dev, RF3022))
+ rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info);
else
- rt2800_config_channel_rt2x(rt2x00dev, conf, rf, info);
+ rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
/*
* Change BBP settings
@@ -863,7 +964,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
}
rt2800_register_read(rt2x00dev, TX_BAND_CFG, &reg);
- rt2x00_set_field32(&reg, TX_BAND_CFG_HT40_PLUS, conf_is_ht40_plus(conf));
+ rt2x00_set_field32(&reg, TX_BAND_CFG_HT40_MINUS, conf_is_ht40_minus(conf));
rt2x00_set_field32(&reg, TX_BAND_CFG_A, rf->channel > 14);
rt2x00_set_field32(&reg, TX_BAND_CFG_BG, rf->channel <= 14);
rt2800_register_write(rt2x00dev, TX_BAND_CFG, reg);
@@ -896,11 +997,10 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_bbp_write(rt2x00dev, 4, bbp);
rt2800_bbp_read(rt2x00dev, 3, &bbp);
- rt2x00_set_field8(&bbp, BBP3_HT40_PLUS, conf_is_ht40_plus(conf));
+ rt2x00_set_field8(&bbp, BBP3_HT40_MINUS, conf_is_ht40_minus(conf));
rt2800_bbp_write(rt2x00dev, 3, bbp);
- if (rt2x00_rt(rt2x00dev, RT2860) &&
- (rt2x00_rev(rt2x00dev) == RT2860C_VERSION)) {
+ if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
if (conf_is_ht40(conf)) {
rt2800_bbp_write(rt2x00dev, 69, 0x1a);
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
@@ -988,10 +1088,6 @@ static void rt2800_config_retry_limit(struct rt2x00_dev *rt2x00dev,
libconf->conf->short_frame_max_tx_count);
rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_LIMIT,
libconf->conf->long_frame_max_tx_count);
- rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_THRE, 2000);
- rt2x00_set_field32(&reg, TX_RTY_CFG_NON_AGG_RTY_MODE, 0);
- rt2x00_set_field32(&reg, TX_RTY_CFG_AGG_RTY_MODE, 0);
- rt2x00_set_field32(&reg, TX_RTY_CFG_TX_AUTO_FB_ENABLE, 1);
rt2800_register_write(rt2x00dev, TX_RTY_CFG, reg);
}
@@ -1015,13 +1111,13 @@ static void rt2800_config_ps(struct rt2x00_dev *rt2x00dev,
rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
} else {
- rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
-
rt2800_register_read(rt2x00dev, AUTOWAKEUP_CFG, &reg);
rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 0);
rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE, 0);
rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTOWAKE, 0);
rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg);
+
+ rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
}
}
@@ -1062,9 +1158,10 @@ EXPORT_SYMBOL_GPL(rt2800_link_stats);
static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
{
if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
- if (rt2x00_is_usb(rt2x00dev) &&
- rt2x00_rt(rt2x00dev, RT3070) &&
- (rt2x00_rev(rt2x00dev) == RT3070_VERSION))
+ if (rt2x00_rt(rt2x00dev, RT3070) ||
+ rt2x00_rt(rt2x00dev, RT3071) ||
+ rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3390))
return 0x1c + (2 * rt2x00dev->lna_gain);
else
return 0x2e + rt2x00dev->lna_gain;
@@ -1095,8 +1192,7 @@ EXPORT_SYMBOL_GPL(rt2800_reset_tuner);
void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
const u32 count)
{
- if (rt2x00_rt(rt2x00dev, RT2860) &&
- (rt2x00_rev(rt2x00dev) == RT2860C_VERSION))
+ if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C))
return;
/*
@@ -1114,8 +1210,17 @@ EXPORT_SYMBOL_GPL(rt2800_link_tuner);
int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
+ u16 eeprom;
unsigned int i;
+ rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
+ rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
+ rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
+
if (rt2x00_is_usb(rt2x00dev)) {
/*
* Wait until BBP and RF are ready.
@@ -1135,8 +1240,25 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
rt2800_register_write(rt2x00dev, PBF_SYS_CTRL,
reg & ~0x00002000);
- } else if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
+ } else if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) {
+ /*
+ * Reset DMA indexes
+ */
+ rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
+ rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
+ rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
+
+ rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
+ rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
+
rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
+ }
rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
@@ -1181,12 +1303,42 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, BCN_TIME_CFG_TX_TIME_COMPENSATE, 0);
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
- if (rt2x00_is_usb(rt2x00dev) &&
- rt2x00_rt(rt2x00dev, RT3070) &&
- (rt2x00_rev(rt2x00dev) == RT3070_VERSION)) {
+ rt2800_config_filter(rt2x00dev, FIF_ALLMULTI);
+
+ rt2800_register_read(rt2x00dev, BKOFF_SLOT_CFG, &reg);
+ rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_SLOT_TIME, 9);
+ rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2);
+ rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
+
+ if (rt2x00_rt(rt2x00dev, RT3071) ||
+ rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3390)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
- rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
+ if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_DAC_TEST))
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2,
+ 0x0000002c);
+ else
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2,
+ 0x0000000f);
+ } else {
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
+ }
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2, reg);
+ } else if (rt2x00_rt(rt2x00dev, RT3070)) {
+ rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
+
+ if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
+ rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000002c);
+ } else {
+ rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
+ }
} else {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
@@ -1205,19 +1357,15 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_MPDU_LIFETIME, 9);
+ rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_RX_ACK_TIMEOUT, 32);
rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_TX_OP_TIMEOUT, 10);
rt2800_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
rt2800_register_read(rt2x00dev, MAX_LEN_CFG, &reg);
rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE);
- if ((rt2x00_rt(rt2x00dev, RT2872) &&
- (rt2x00_rev(rt2x00dev) >= RT2880E_VERSION)) ||
- rt2x00_rt(rt2x00dev, RT2880) ||
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT2872, REV_RT2872E) ||
rt2x00_rt(rt2x00dev, RT2883) ||
- rt2x00_rt(rt2x00dev, RT2890) ||
- rt2x00_rt(rt2x00dev, RT3052) ||
- (rt2x00_rt(rt2x00dev, RT3070) &&
- (rt2x00_rev(rt2x00dev) < RT3070_VERSION)))
+ rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070E))
rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2);
else
rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1);
@@ -1225,38 +1373,61 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 0);
rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg);
+ rt2800_register_read(rt2x00dev, LED_CFG, &reg);
+ rt2x00_set_field32(&reg, LED_CFG_ON_PERIOD, 70);
+ rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, 30);
+ rt2x00_set_field32(&reg, LED_CFG_SLOW_BLINK_PERIOD, 3);
+ rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, 3);
+ rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 3);
+ rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE, 3);
+ rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, 1);
+ rt2800_register_write(rt2x00dev, LED_CFG, reg);
+
rt2800_register_write(rt2x00dev, PBF_MAX_PCNT, 0x1f3fbf9f);
+ rt2800_register_read(rt2x00dev, TX_RTY_CFG, &reg);
+ rt2x00_set_field32(&reg, TX_RTY_CFG_SHORT_RTY_LIMIT, 15);
+ rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_LIMIT, 31);
+ rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_THRE, 2000);
+ rt2x00_set_field32(&reg, TX_RTY_CFG_NON_AGG_RTY_MODE, 0);
+ rt2x00_set_field32(&reg, TX_RTY_CFG_AGG_RTY_MODE, 0);
+ rt2x00_set_field32(&reg, TX_RTY_CFG_TX_AUTO_FB_ENABLE, 1);
+ rt2800_register_write(rt2x00dev, TX_RTY_CFG, reg);
+
rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
rt2x00_set_field32(&reg, AUTO_RSP_CFG_AUTORESPONDER, 1);
+ rt2x00_set_field32(&reg, AUTO_RSP_CFG_BAC_ACK_POLICY, 1);
rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MMODE, 0);
rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MREF, 0);
+ rt2x00_set_field32(&reg, AUTO_RSP_CFG_AR_PREAMBLE, 1);
rt2x00_set_field32(&reg, AUTO_RSP_CFG_DUAL_CTS_EN, 0);
rt2x00_set_field32(&reg, AUTO_RSP_CFG_ACK_CTS_PSM_BIT, 0);
rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
rt2800_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
- rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_RATE, 8);
+ rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_RATE, 3);
rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_CTRL, 0);
rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV, 1);
rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_CCK, 1);
rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM20, 1);
- rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM40, 1);
+ rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM40, 0);
rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF20, 1);
- rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF40, 1);
+ rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF40, 0);
+ rt2x00_set_field32(&reg, CCK_PROT_CFG_RTS_TH_EN, 1);
rt2800_register_write(rt2x00dev, CCK_PROT_CFG, reg);
rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
- rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_RATE, 8);
+ rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_RATE, 3);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL, 0);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV, 1);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_CCK, 1);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM20, 1);
- rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM40, 1);
+ rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM40, 0);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF20, 1);
- rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF40, 1);
+ rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF40, 0);
+ rt2x00_set_field32(&reg, OFDM_PROT_CFG_RTS_TH_EN, 1);
rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
rt2800_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
@@ -1269,11 +1440,13 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
+ rt2x00_set_field32(&reg, MM20_PROT_CFG_RTS_TH_EN, 0);
rt2800_register_write(rt2x00dev, MM20_PROT_CFG, reg);
rt2800_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, 0x4084);
- rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, 0);
+ rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL,
+ !rt2x00_is_usb(rt2x00dev));
rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV, 1);
rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
@@ -1281,6 +1454,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
+ rt2x00_set_field32(&reg, MM40_PROT_CFG_RTS_TH_EN, 0);
rt2800_register_write(rt2x00dev, MM40_PROT_CFG, reg);
rt2800_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
@@ -1293,6 +1467,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
+ rt2x00_set_field32(&reg, GF20_PROT_CFG_RTS_TH_EN, 0);
rt2800_register_write(rt2x00dev, GF20_PROT_CFG, reg);
rt2800_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
@@ -1305,6 +1480,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
+ rt2x00_set_field32(&reg, GF40_PROT_CFG_RTS_TH_EN, 0);
rt2800_register_write(rt2x00dev, GF40_PROT_CFG, reg);
if (rt2x00_is_usb(rt2x00dev)) {
@@ -1334,6 +1510,22 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_RTS_CFG, reg);
rt2800_register_write(rt2x00dev, EXP_ACK_TIME, 0x002400ca);
+
+ /*
+ * Usually the CCK SIFS time should be set to 10 and the OFDM SIFS
+ * time should be set to 16. However, the original Ralink driver uses
+ * 16 for both and indeed using a value of 10 for CCK SIFS results in
+ * connection problems with 11g + CTS protection. Hence, use the same
+ * defaults as the Ralink driver: 16 for both, CCK and OFDM SIFS.
+ */
+ rt2800_register_read(rt2x00dev, XIFS_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, XIFS_TIME_CFG_CCKM_SIFS_TIME, 16);
+ rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_SIFS_TIME, 16);
+ rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_XIFS_TIME, 4);
+ rt2x00_set_field32(&reg, XIFS_TIME_CFG_EIFS, 314);
+ rt2x00_set_field32(&reg, XIFS_TIME_CFG_BB_RXEND_ENABLE, 1);
+ rt2800_register_write(rt2x00dev, XIFS_TIME_CFG, reg);
+
rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
/*
@@ -1481,45 +1673,79 @@ int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_wait_bbp_ready(rt2x00dev)))
return -EACCES;
+ if (rt2800_is_305x_soc(rt2x00dev))
+ rt2800_bbp_write(rt2x00dev, 31, 0x08);
+
rt2800_bbp_write(rt2x00dev, 65, 0x2c);
rt2800_bbp_write(rt2x00dev, 66, 0x38);
- rt2800_bbp_write(rt2x00dev, 69, 0x12);
+
+ if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
+ rt2800_bbp_write(rt2x00dev, 69, 0x16);
+ rt2800_bbp_write(rt2x00dev, 73, 0x12);
+ } else {
+ rt2800_bbp_write(rt2x00dev, 69, 0x12);
+ rt2800_bbp_write(rt2x00dev, 73, 0x10);
+ }
+
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
- rt2800_bbp_write(rt2x00dev, 73, 0x10);
- rt2800_bbp_write(rt2x00dev, 81, 0x37);
+
+ if (rt2x00_rt(rt2x00dev, RT3070) ||
+ rt2x00_rt(rt2x00dev, RT3071) ||
+ rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3390)) {
+ rt2800_bbp_write(rt2x00dev, 79, 0x13);
+ rt2800_bbp_write(rt2x00dev, 80, 0x05);
+ rt2800_bbp_write(rt2x00dev, 81, 0x33);
+ } else if (rt2800_is_305x_soc(rt2x00dev)) {
+ rt2800_bbp_write(rt2x00dev, 78, 0x0e);
+ rt2800_bbp_write(rt2x00dev, 80, 0x08);
+ } else {
+ rt2800_bbp_write(rt2x00dev, 81, 0x37);
+ }
+
rt2800_bbp_write(rt2x00dev, 82, 0x62);
rt2800_bbp_write(rt2x00dev, 83, 0x6a);
- rt2800_bbp_write(rt2x00dev, 84, 0x99);
+
+ if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D) ||
+ rt2x00_rt_rev(rt2x00dev, RT2870, REV_RT2870D))
+ rt2800_bbp_write(rt2x00dev, 84, 0x19);
+ else
+ rt2800_bbp_write(rt2x00dev, 84, 0x99);
+
rt2800_bbp_write(rt2x00dev, 86, 0x00);
rt2800_bbp_write(rt2x00dev, 91, 0x04);
rt2800_bbp_write(rt2x00dev, 92, 0x00);
- rt2800_bbp_write(rt2x00dev, 103, 0x00);
- rt2800_bbp_write(rt2x00dev, 105, 0x05);
- if (rt2x00_rt(rt2x00dev, RT2860) &&
- (rt2x00_rev(rt2x00dev) == RT2860C_VERSION)) {
- rt2800_bbp_write(rt2x00dev, 69, 0x16);
- rt2800_bbp_write(rt2x00dev, 73, 0x12);
- }
-
- if (rt2x00_rt(rt2x00dev, RT2860) &&
- (rt2x00_rev(rt2x00dev) > RT2860D_VERSION))
- rt2800_bbp_write(rt2x00dev, 84, 0x19);
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) ||
+ rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) ||
+ rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) ||
+ rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) ||
+ rt2800_is_305x_soc(rt2x00dev))
+ rt2800_bbp_write(rt2x00dev, 103, 0xc0);
+ else
+ rt2800_bbp_write(rt2x00dev, 103, 0x00);
- if (rt2x00_is_usb(rt2x00dev) &&
- rt2x00_rt(rt2x00dev, RT3070) &&
- (rt2x00_rev(rt2x00dev) == RT3070_VERSION)) {
- rt2800_bbp_write(rt2x00dev, 70, 0x0a);
- rt2800_bbp_write(rt2x00dev, 84, 0x99);
+ if (rt2800_is_305x_soc(rt2x00dev))
+ rt2800_bbp_write(rt2x00dev, 105, 0x01);
+ else
rt2800_bbp_write(rt2x00dev, 105, 0x05);
- }
+ rt2800_bbp_write(rt2x00dev, 106, 0x35);
- if (rt2x00_rt(rt2x00dev, RT3052)) {
- rt2800_bbp_write(rt2x00dev, 31, 0x08);
- rt2800_bbp_write(rt2x00dev, 78, 0x0e);
- rt2800_bbp_write(rt2x00dev, 80, 0x08);
+ if (rt2x00_rt(rt2x00dev, RT3071) ||
+ rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3390)) {
+ rt2800_bbp_read(rt2x00dev, 138, &value);
+
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) == 1)
+ value |= 0x20;
+ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH) == 1)
+ value &= ~0x02;
+
+ rt2800_bbp_write(rt2x00dev, 138, value);
}
+
for (i = 0; i < EEPROM_BBP_SIZE; i++) {
rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
@@ -1598,19 +1824,16 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
{
u8 rfcsr;
u8 bbp;
+ u32 reg;
+ u16 eeprom;
- if (rt2x00_is_usb(rt2x00dev) &&
- rt2x00_rt(rt2x00dev, RT3070) &&
- (rt2x00_rev(rt2x00dev) != RT3070_VERSION))
+ if (!rt2x00_rt(rt2x00dev, RT3070) &&
+ !rt2x00_rt(rt2x00dev, RT3071) &&
+ !rt2x00_rt(rt2x00dev, RT3090) &&
+ !rt2x00_rt(rt2x00dev, RT3390) &&
+ !rt2800_is_305x_soc(rt2x00dev))
return 0;
- if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) {
- if (!rt2x00_rf(rt2x00dev, RF3020) &&
- !rt2x00_rf(rt2x00dev, RF3021) &&
- !rt2x00_rf(rt2x00dev, RF3022))
- return 0;
- }
-
/*
* Init RF calibration.
*/
@@ -1621,13 +1844,15 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
- if (rt2x00_is_usb(rt2x00dev)) {
+ if (rt2x00_rt(rt2x00dev, RT3070) ||
+ rt2x00_rt(rt2x00dev, RT3071) ||
+ rt2x00_rt(rt2x00dev, RT3090)) {
rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
rt2800_rfcsr_write(rt2x00dev, 7, 0x70);
rt2800_rfcsr_write(rt2x00dev, 9, 0x0f);
- rt2800_rfcsr_write(rt2x00dev, 10, 0x71);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0x41);
rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
rt2800_rfcsr_write(rt2x00dev, 12, 0x7b);
rt2800_rfcsr_write(rt2x00dev, 14, 0x90);
@@ -1640,9 +1865,41 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 21, 0xdb);
rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
- rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
rt2800_rfcsr_write(rt2x00dev, 29, 0x1f);
- } else if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) {
+ } else if (rt2x00_rt(rt2x00dev, RT3390)) {
+ rt2800_rfcsr_write(rt2x00dev, 0, 0xa0);
+ rt2800_rfcsr_write(rt2x00dev, 1, 0xe1);
+ rt2800_rfcsr_write(rt2x00dev, 2, 0xf1);
+ rt2800_rfcsr_write(rt2x00dev, 3, 0x62);
+ rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
+ rt2800_rfcsr_write(rt2x00dev, 5, 0x8b);
+ rt2800_rfcsr_write(rt2x00dev, 6, 0x42);
+ rt2800_rfcsr_write(rt2x00dev, 7, 0x34);
+ rt2800_rfcsr_write(rt2x00dev, 8, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 9, 0xc0);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0x61);
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x3b);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0xe0);
+ rt2800_rfcsr_write(rt2x00dev, 14, 0x90);
+ rt2800_rfcsr_write(rt2x00dev, 15, 0x53);
+ rt2800_rfcsr_write(rt2x00dev, 16, 0xe0);
+ rt2800_rfcsr_write(rt2x00dev, 17, 0x94);
+ rt2800_rfcsr_write(rt2x00dev, 18, 0x5c);
+ rt2800_rfcsr_write(rt2x00dev, 19, 0x4a);
+ rt2800_rfcsr_write(rt2x00dev, 20, 0xb2);
+ rt2800_rfcsr_write(rt2x00dev, 21, 0xf6);
+ rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 23, 0x14);
+ rt2800_rfcsr_write(rt2x00dev, 24, 0x08);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x3d);
+ rt2800_rfcsr_write(rt2x00dev, 26, 0x85);
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 28, 0x41);
+ rt2800_rfcsr_write(rt2x00dev, 29, 0x8f);
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x20);
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x0f);
+ } else if (rt2800_is_305x_soc(rt2x00dev)) {
rt2800_rfcsr_write(rt2x00dev, 0, 0x50);
rt2800_rfcsr_write(rt2x00dev, 1, 0x01);
rt2800_rfcsr_write(rt2x00dev, 2, 0xf7);
@@ -1673,15 +1930,57 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 27, 0x23);
rt2800_rfcsr_write(rt2x00dev, 28, 0x13);
rt2800_rfcsr_write(rt2x00dev, 29, 0x83);
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x00);
+ return 0;
+ }
+
+ if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
+ rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
+ rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
+ rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3);
+ rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
+ } else if (rt2x00_rt(rt2x00dev, RT3071) ||
+ rt2x00_rt(rt2x00dev, RT3090)) {
+ rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR6_R2, 1);
+ rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
+
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x14);
+
+ rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
+ rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
+ if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E)) {
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_DAC_TEST))
+ rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3);
+ else
+ rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 0);
+ }
+ rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
+ } else if (rt2x00_rt(rt2x00dev, RT3390)) {
+ rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
+ rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0);
+ rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
}
/*
* Set RX Filter calibration for 20MHz and 40MHz
*/
- rt2x00dev->calibration[0] =
- rt2800_init_rx_filter(rt2x00dev, false, 0x07, 0x16);
- rt2x00dev->calibration[1] =
- rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x19);
+ if (rt2x00_rt(rt2x00dev, RT3070)) {
+ rt2x00dev->calibration[0] =
+ rt2800_init_rx_filter(rt2x00dev, false, 0x07, 0x16);
+ rt2x00dev->calibration[1] =
+ rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x19);
+ } else if (rt2x00_rt(rt2x00dev, RT3071) ||
+ rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3390)) {
+ rt2x00dev->calibration[0] =
+ rt2800_init_rx_filter(rt2x00dev, false, 0x07, 0x13);
+ rt2x00dev->calibration[1] =
+ rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x15);
+ }
/*
* Set back to initial state
@@ -1699,6 +1998,81 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
rt2800_bbp_write(rt2x00dev, 4, bbp);
+ if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E))
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
+
+ rt2800_register_read(rt2x00dev, OPT_14_CSR, &reg);
+ rt2x00_set_field32(&reg, OPT_14_CSR_BIT0, 1);
+ rt2800_register_write(rt2x00dev, OPT_14_CSR, reg);
+
+ rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0);
+ if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_BG))
+ rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
+ }
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_TXMIXER_GAIN_BG_VAL) >= 1)
+ rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN,
+ rt2x00_get_field16(eeprom,
+ EEPROM_TXMIXER_GAIN_BG_VAL));
+ rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+
+ if (rt2x00_rt(rt2x00dev, RT3090)) {
+ rt2800_bbp_read(rt2x00dev, 138, &bbp);
+
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH) == 1)
+ rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0);
+ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) == 1)
+ rt2x00_set_field8(&bbp, BBP138_TX_DAC1, 1);
+
+ rt2800_bbp_write(rt2x00dev, 138, bbp);
+ }
+
+ if (rt2x00_rt(rt2x00dev, RT3071) ||
+ rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3390)) {
+ rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
+ rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 15, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR15_TX_LO2_EN, 0);
+ rt2800_rfcsr_write(rt2x00dev, 15, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 20, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR20_RX_LO1_EN, 0);
+ rt2800_rfcsr_write(rt2x00dev, 20, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 21, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR21_RX_LO2_EN, 0);
+ rt2800_rfcsr_write(rt2x00dev, 21, rfcsr);
+ }
+
+ if (rt2x00_rt(rt2x00dev, RT3070) || rt2x00_rt(rt2x00dev, RT3071)) {
+ rt2800_rfcsr_read(rt2x00dev, 27, &rfcsr);
+ if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E))
+ rt2x00_set_field8(&rfcsr, RFCSR27_R1, 3);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR27_R1, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR27_R2, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR27_R3, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR27_R4, 0);
+ rt2800_rfcsr_write(rt2x00dev, 27, rfcsr);
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(rt2800_init_rfcsr);
@@ -1774,10 +2148,7 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word);
} else if (rt2x00_rt(rt2x00dev, RT2860) ||
rt2x00_rt(rt2x00dev, RT2870) ||
- rt2x00_rt(rt2x00dev, RT2872) ||
- rt2x00_rt(rt2x00dev, RT2880) ||
- (rt2x00_rt(rt2x00dev, RT2883) &&
- (rt2x00_rev(rt2x00dev) < RT2883_VERSION))) {
+ rt2x00_rt(rt2x00dev, RT2872)) {
/*
* There is a max of 2 RX streams for RT28x0 series
*/
@@ -1882,10 +2253,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
if (!rt2x00_rt(rt2x00dev, RT2860) &&
!rt2x00_rt(rt2x00dev, RT2870) &&
!rt2x00_rt(rt2x00dev, RT2872) &&
- !rt2x00_rt(rt2x00dev, RT2880) &&
!rt2x00_rt(rt2x00dev, RT2883) &&
- !rt2x00_rt(rt2x00dev, RT2890) &&
- !rt2x00_rt(rt2x00dev, RT3052) &&
!rt2x00_rt(rt2x00dev, RT3070) &&
!rt2x00_rt(rt2x00dev, RT3071) &&
!rt2x00_rt(rt2x00dev, RT3090) &&
@@ -1954,7 +2322,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
EXPORT_SYMBOL_GPL(rt2800_init_eeprom);
/*
- * RF value list for rt28x0
+ * RF value list for rt28xx
* Supports: 2.4 GHz (all) & 5.2 GHz (RF2850 & RF2750)
*/
static const struct rf_channel rf_vals[] = {
@@ -2029,10 +2397,10 @@ static const struct rf_channel rf_vals[] = {
};
/*
- * RF value list for rt3070
- * Supports: 2.4 GHz
+ * RF value list for rt3xxx
+ * Supports: 2.4 GHz (all) & 5.2 GHz (RF3052)
*/
-static const struct rf_channel rf_vals_302x[] = {
+static const struct rf_channel rf_vals_3x[] = {
{1, 241, 2, 2 },
{2, 241, 2, 7 },
{3, 242, 2, 2 },
@@ -2047,6 +2415,51 @@ static const struct rf_channel rf_vals_302x[] = {
{12, 246, 2, 7 },
{13, 247, 2, 2 },
{14, 248, 2, 4 },
+
+ /* 802.11 UNI / HyperLan 2 */
+ {36, 0x56, 0, 4},
+ {38, 0x56, 0, 6},
+ {40, 0x56, 0, 8},
+ {44, 0x57, 0, 0},
+ {46, 0x57, 0, 2},
+ {48, 0x57, 0, 4},
+ {52, 0x57, 0, 8},
+ {54, 0x57, 0, 10},
+ {56, 0x58, 0, 0},
+ {60, 0x58, 0, 4},
+ {62, 0x58, 0, 6},
+ {64, 0x58, 0, 8},
+
+ /* 802.11 HyperLan 2 */
+ {100, 0x5b, 0, 8},
+ {102, 0x5b, 0, 10},
+ {104, 0x5c, 0, 0},
+ {108, 0x5c, 0, 4},
+ {110, 0x5c, 0, 6},
+ {112, 0x5c, 0, 8},
+ {116, 0x5d, 0, 0},
+ {118, 0x5d, 0, 2},
+ {120, 0x5d, 0, 4},
+ {124, 0x5d, 0, 8},
+ {126, 0x5d, 0, 10},
+ {128, 0x5e, 0, 0},
+ {132, 0x5e, 0, 4},
+ {134, 0x5e, 0, 6},
+ {136, 0x5e, 0, 8},
+ {140, 0x5f, 0, 0},
+
+ /* 802.11 UNII */
+ {149, 0x5f, 0, 9},
+ {151, 0x5f, 0, 11},
+ {153, 0x60, 0, 1},
+ {157, 0x60, 0, 5},
+ {159, 0x60, 0, 7},
+ {161, 0x60, 0, 9},
+ {165, 0x61, 0, 1},
+ {167, 0x61, 0, 3},
+ {169, 0x61, 0, 5},
+ {171, 0x61, 0, 7},
+ {173, 0x61, 0, 9},
};
int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
@@ -2087,11 +2500,11 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
if (rt2x00_rf(rt2x00dev, RF2820) ||
- rt2x00_rf(rt2x00dev, RF2720) ||
- rt2x00_rf(rt2x00dev, RF3052)) {
+ rt2x00_rf(rt2x00dev, RF2720)) {
spec->num_channels = 14;
spec->channels = rf_vals;
- } else if (rt2x00_rf(rt2x00dev, RF2850) || rt2x00_rf(rt2x00dev, RF2750)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2850) ||
+ rt2x00_rf(rt2x00dev, RF2750)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals);
spec->channels = rf_vals;
@@ -2099,8 +2512,12 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
rt2x00_rf(rt2x00dev, RF2020) ||
rt2x00_rf(rt2x00dev, RF3021) ||
rt2x00_rf(rt2x00dev, RF3022)) {
- spec->num_channels = ARRAY_SIZE(rf_vals_302x);
- spec->channels = rf_vals_302x;
+ spec->num_channels = 14;
+ spec->channels = rf_vals_3x;
+ } else if (rt2x00_rf(rt2x00dev, RF3052)) {
+ spec->supported_bands |= SUPPORT_BAND_5GHZ;
+ spec->num_channels = ARRAY_SIZE(rf_vals_3x);
+ spec->channels = rf_vals_3x;
}
/*
@@ -2111,8 +2528,11 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
else
spec->ht.ht_supported = false;
+ /*
+ * Don't set IEEE80211_HT_CAP_SUP_WIDTH_20_40 for now as it causes
+ * reception problems with HT40 capable 11n APs
+ */
spec->ht.cap =
- IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
IEEE80211_HT_CAP_GRN_FLD |
IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40 |
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index ebabeae62d1..94de999e229 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -111,6 +111,9 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
const u8 command, const u8 token,
const u8 arg0, const u8 arg1);
+void rt2800_write_txwi(struct sk_buff *skb, struct txentry_desc *txdesc);
+void rt2800_process_rxwi(struct sk_buff *skb, struct rxdone_entry_desc *txdesc);
+
extern const struct rt2x00debug rt2800_rt2x00debug;
int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 91cce2d0f6d..b2f23272c3a 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -60,6 +60,12 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
unsigned int i;
u32 reg;
+ /*
+ * SOC devices don't support MCU requests.
+ */
+ if (rt2x00_is_soc(rt2x00dev))
+ return;
+
for (i = 0; i < 200; i++) {
rt2800_register_read(rt2x00dev, H2M_MAILBOX_CID, &reg);
@@ -341,19 +347,6 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
struct queue_entry_priv_pci *entry_priv;
u32 reg;
- rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
- rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
-
- rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
- rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
-
/*
* Initialize registers.
*/
@@ -620,64 +613,31 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
/*
* TX descriptor initialization
*/
+static int rt2800pci_write_tx_data(struct queue_entry* entry,
+ struct txentry_desc *txdesc)
+{
+ int ret;
+
+ ret = rt2x00pci_write_tx_data(entry, txdesc);
+ if (ret)
+ return ret;
+
+ rt2800_write_txwi(entry->skb, txdesc);
+
+ return 0;
+}
+
+
static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb,
struct txentry_desc *txdesc)
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
- __le32 *txd = skbdesc->desc;
- __le32 *txwi = (__le32 *)(skb->data - rt2x00dev->ops->extra_tx_headroom);
+ struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data;
+ __le32 *txd = entry_priv->desc;
u32 word;
/*
- * Initialize TX Info descriptor
- */
- rt2x00_desc_read(txwi, 0, &word);
- rt2x00_set_field32(&word, TXWI_W0_FRAG,
- test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_MIMO_PS, 0);
- rt2x00_set_field32(&word, TXWI_W0_CF_ACK, 0);
- rt2x00_set_field32(&word, TXWI_W0_TS,
- test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_AMPDU,
- test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, txdesc->mpdu_density);
- rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->ifs);
- rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->mcs);
- rt2x00_set_field32(&word, TXWI_W0_BW,
- test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_SHORT_GI,
- test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->stbc);
- rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode);
- rt2x00_desc_write(txwi, 0, word);
-
- rt2x00_desc_read(txwi, 1, &word);
- rt2x00_set_field32(&word, TXWI_W1_ACK,
- test_bit(ENTRY_TXD_ACK, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W1_NSEQ,
- test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size);
- rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID,
- test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
- txdesc->key_idx : 0xff);
- rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
- skb->len - txdesc->l2pad);
- rt2x00_set_field32(&word, TXWI_W1_PACKETID,
- skbdesc->entry->queue->qid + 1);
- rt2x00_desc_write(txwi, 1, word);
-
- /*
- * Always write 0 to IV/EIV fields, hardware will insert the IV
- * from the IVEIV register when TXD_W3_WIV is set to 0.
- * When TXD_W3_WIV is set to 1 it will use the IV data
- * from the descriptor. The TXWI_W1_WIRELESS_CLI_ID indicates which
- * crypto entry in the registers should be used to encrypt the frame.
- */
- _rt2x00_desc_write(txwi, 2, 0 /* skbdesc->iv[0] */);
- _rt2x00_desc_write(txwi, 3, 0 /* skbdesc->iv[1] */);
-
- /*
* The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
* must contains a TXWI structure + 802.11 header + padding + 802.11
* data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
@@ -698,15 +658,14 @@ static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
!test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W1_BURST,
test_bit(ENTRY_TXD_BURST, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W1_SD_LEN0,
- rt2x00dev->ops->extra_tx_headroom);
+ rt2x00_set_field32(&word, TXD_W1_SD_LEN0, TXWI_DESC_SIZE);
rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
rt2x00_desc_write(txd, 1, word);
rt2x00_desc_read(txd, 2, &word);
rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
- skbdesc->skb_dma + rt2x00dev->ops->extra_tx_headroom);
+ skbdesc->skb_dma + TXWI_DESC_SIZE);
rt2x00_desc_write(txd, 2, word);
rt2x00_desc_read(txd, 3, &word);
@@ -714,15 +673,21 @@ static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
!test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W3_QSEL, 2);
rt2x00_desc_write(txd, 3, word);
+
+ /*
+ * Register descriptor details in skb frame descriptor.
+ */
+ skbdesc->desc = txd;
+ skbdesc->desc_len = TXD_DESC_SIZE;
}
/*
* TX data initialization
*/
-static void rt2800pci_write_beacon(struct queue_entry *entry)
+static void rt2800pci_write_beacon(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
unsigned int beacon_base;
u32 reg;
@@ -735,15 +700,25 @@ static void rt2800pci_write_beacon(struct queue_entry *entry)
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
/*
- * Write entire beacon with descriptor to register.
+ * Add the TXWI for the beacon to the skb.
+ */
+ rt2800_write_txwi(entry->skb, txdesc);
+ skb_push(entry->skb, TXWI_DESC_SIZE);
+
+ /*
+ * Write entire beacon with TXWI to register.
*/
beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
- rt2800_register_multiwrite(rt2x00dev,
- beacon_base,
- skbdesc->desc, skbdesc->desc_len);
- rt2800_register_multiwrite(rt2x00dev,
- beacon_base + skbdesc->desc_len,
- entry->skb->data, entry->skb->len);
+ rt2800_register_multiwrite(rt2x00dev, beacon_base,
+ entry->skb->data, entry->skb->len);
+
+ /*
+ * Enable beaconing again.
+ */
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
+ rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
/*
* Clean up beacon skb.
@@ -757,18 +732,6 @@ static void rt2800pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
{
struct data_queue *queue;
unsigned int idx, qidx = 0;
- u32 reg;
-
- if (queue_idx == QID_BEACON) {
- rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
- if (!rt2x00_get_field32(reg, BCN_TIME_CFG_BEACON_GEN)) {
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
- rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
- }
- return;
- }
if (queue_idx > QID_HCCA && queue_idx != QID_MGMT)
return;
@@ -811,34 +774,21 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct queue_entry_priv_pci *entry_priv = entry->priv_data;
__le32 *rxd = entry_priv->desc;
- __le32 *rxwi = (__le32 *)entry->skb->data;
- u32 rxd3;
- u32 rxwi0;
- u32 rxwi1;
- u32 rxwi2;
- u32 rxwi3;
-
- rt2x00_desc_read(rxd, 3, &rxd3);
- rt2x00_desc_read(rxwi, 0, &rxwi0);
- rt2x00_desc_read(rxwi, 1, &rxwi1);
- rt2x00_desc_read(rxwi, 2, &rxwi2);
- rt2x00_desc_read(rxwi, 3, &rxwi3);
-
- if (rt2x00_get_field32(rxd3, RXD_W3_CRC_ERROR))
+ u32 word;
+
+ rt2x00_desc_read(rxd, 3, &word);
+
+ if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR))
rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
- if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
- /*
- * Unfortunately we don't know the cipher type used during
- * decryption. This prevents us from correct providing
- * correct statistics through debugfs.
- */
- rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF);
- rxdesc->cipher_status =
- rt2x00_get_field32(rxd3, RXD_W3_CIPHER_ERROR);
- }
+ /*
+ * Unfortunately we don't know the cipher type used during
+ * decryption. This prevents us from correct providing
+ * correct statistics through debugfs.
+ */
+ rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W3_CIPHER_ERROR);
- if (rt2x00_get_field32(rxd3, RXD_W3_DECRYPTED)) {
+ if (rt2x00_get_field32(word, RXD_W3_DECRYPTED)) {
/*
* Hardware has stripped IV/EIV data from 802.11 frame during
* decryption. Unfortunately the descriptor doesn't contain
@@ -853,51 +803,22 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
rxdesc->flags |= RX_FLAG_MMIC_ERROR;
}
- if (rt2x00_get_field32(rxd3, RXD_W3_MY_BSS))
+ if (rt2x00_get_field32(word, RXD_W3_MY_BSS))
rxdesc->dev_flags |= RXDONE_MY_BSS;
- if (rt2x00_get_field32(rxd3, RXD_W3_L2PAD))
+ if (rt2x00_get_field32(word, RXD_W3_L2PAD))
rxdesc->dev_flags |= RXDONE_L2PAD;
- if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
- rxdesc->flags |= RX_FLAG_SHORT_GI;
-
- if (rt2x00_get_field32(rxwi1, RXWI_W1_BW))
- rxdesc->flags |= RX_FLAG_40MHZ;
-
/*
- * Detect RX rate, always use MCS as signal type.
+ * Process the RXWI structure that is at the start of the buffer.
*/
- rxdesc->dev_flags |= RXDONE_SIGNAL_MCS;
- rxdesc->rate_mode = rt2x00_get_field32(rxwi1, RXWI_W1_PHYMODE);
- rxdesc->signal = rt2x00_get_field32(rxwi1, RXWI_W1_MCS);
-
- /*
- * Mask of 0x8 bit to remove the short preamble flag.
- */
- if (rxdesc->rate_mode == RATE_MODE_CCK)
- rxdesc->signal &= ~0x8;
-
- rxdesc->rssi =
- (rt2x00_get_field32(rxwi2, RXWI_W2_RSSI0) +
- rt2x00_get_field32(rxwi2, RXWI_W2_RSSI1)) / 2;
-
- rxdesc->noise =
- (rt2x00_get_field32(rxwi3, RXWI_W3_SNR0) +
- rt2x00_get_field32(rxwi3, RXWI_W3_SNR1)) / 2;
-
- rxdesc->size = rt2x00_get_field32(rxwi0, RXWI_W0_MPDU_TOTAL_BYTE_COUNT);
+ rt2800_process_rxwi(entry->skb, rxdesc);
/*
* Set RX IDX in register to inform hardware that we have handled
* this entry and it is available for reuse again.
*/
rt2800_register_write(rt2x00dev, RX_CRX_IDX, entry->entry_idx);
-
- /*
- * Remove TXWI descriptor from start of buffer.
- */
- skb_pull(entry->skb, RXWI_DESC_SIZE);
}
/*
@@ -907,14 +828,12 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
{
struct data_queue *queue;
struct queue_entry *entry;
- struct queue_entry *entry_done;
- struct queue_entry_priv_pci *entry_priv;
+ __le32 *txwi;
struct txdone_entry_desc txdesc;
u32 word;
u32 reg;
u32 old_reg;
- unsigned int type;
- unsigned int index;
+ int wcid, ack, pid, tx_wcid, tx_ack, tx_pid;
u16 mcs, real_mcs;
/*
@@ -936,76 +855,89 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
break;
old_reg = reg;
+ wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
+ ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
+ pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
+
/*
* Skip this entry when it contains an invalid
* queue identication number.
*/
- type = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE) - 1;
- if (type >= QID_RX)
+ if (pid <= 0 || pid > QID_RX)
continue;
- queue = rt2x00queue_get_queue(rt2x00dev, type);
+ queue = rt2x00queue_get_queue(rt2x00dev, pid - 1);
if (unlikely(!queue))
continue;
/*
- * Skip this entry when it contains an invalid
- * index number.
+ * Inside each queue, we process each entry in a chronological
+ * order. We first check that the queue is not empty.
*/
- index = rt2x00_get_field32(reg, TX_STA_FIFO_WCID) - 1;
- if (unlikely(index >= queue->limit))
+ if (rt2x00queue_empty(queue))
continue;
+ entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
- entry = &queue->entries[index];
- entry_priv = entry->priv_data;
- rt2x00_desc_read((__le32 *)entry->skb->data, 0, &word);
+ /* Check if we got a match by looking at WCID/ACK/PID
+ * fields */
+ txwi = (__le32 *)(entry->skb->data -
+ rt2x00dev->ops->extra_tx_headroom);
- entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
- while (entry != entry_done) {
- /*
- * Catch up.
- * Just report any entries we missed as failed.
- */
- WARNING(rt2x00dev,
- "TX status report missed for entry %d\n",
- entry_done->entry_idx);
+ rt2x00_desc_read(txwi, 1, &word);
+ tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
+ tx_ack = rt2x00_get_field32(word, TXWI_W1_ACK);
+ tx_pid = rt2x00_get_field32(word, TXWI_W1_PACKETID);
- txdesc.flags = 0;
- __set_bit(TXDONE_UNKNOWN, &txdesc.flags);
- txdesc.retry = 0;
-
- rt2x00lib_txdone(entry_done, &txdesc);
- entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
- }
+ if ((wcid != tx_wcid) || (ack != tx_ack) || (pid != tx_pid))
+ WARNING(rt2x00dev, "invalid TX_STA_FIFO content\n");
/*
* Obtain the status about this packet.
*/
txdesc.flags = 0;
- if (rt2x00_get_field32(reg, TX_STA_FIFO_TX_SUCCESS))
- __set_bit(TXDONE_SUCCESS, &txdesc.flags);
- else
- __set_bit(TXDONE_FAILURE, &txdesc.flags);
+ rt2x00_desc_read(txwi, 0, &word);
+ mcs = rt2x00_get_field32(word, TXWI_W0_MCS);
+ real_mcs = rt2x00_get_field32(reg, TX_STA_FIFO_MCS);
/*
* Ralink has a retry mechanism using a global fallback
- * table. We setup this fallback table to try immediate
- * lower rate for all rates. In the TX_STA_FIFO,
- * the MCS field contains the MCS used for the successfull
- * transmission. If the first transmission succeed,
- * we have mcs == tx_mcs. On the second transmission,
- * we have mcs = tx_mcs - 1. So the number of
- * retry is (tx_mcs - mcs).
+ * table. We setup this fallback table to try the immediate
+ * lower rate for all rates. In the TX_STA_FIFO, the MCS field
+ * always contains the MCS used for the last transmission, be
+ * it successful or not.
*/
- mcs = rt2x00_get_field32(word, TXWI_W0_MCS);
- real_mcs = rt2x00_get_field32(reg, TX_STA_FIFO_MCS);
+ if (rt2x00_get_field32(reg, TX_STA_FIFO_TX_SUCCESS)) {
+ /*
+ * Transmission succeeded. The number of retries is
+ * mcs - real_mcs
+ */
+ __set_bit(TXDONE_SUCCESS, &txdesc.flags);
+ txdesc.retry = ((mcs > real_mcs) ? mcs - real_mcs : 0);
+ } else {
+ /*
+ * Transmission failed. The number of retries is
+ * always 7 in this case (for a total number of 8
+ * frames sent).
+ */
+ __set_bit(TXDONE_FAILURE, &txdesc.flags);
+ txdesc.retry = 7;
+ }
+
__set_bit(TXDONE_FALLBACK, &txdesc.flags);
- txdesc.retry = mcs - min(mcs, real_mcs);
+
rt2x00lib_txdone(entry, &txdesc);
}
}
+static void rt2800pci_wakeup(struct rt2x00_dev *rt2x00dev)
+{
+ struct ieee80211_conf conf = { .flags = 0 };
+ struct rt2x00lib_conf libconf = { .conf = &conf };
+
+ rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
+}
+
static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
{
struct rt2x00_dev *rt2x00dev = dev_instance;
@@ -1030,6 +962,9 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS))
rt2800pci_txdone(rt2x00dev);
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
+ rt2800pci_wakeup(rt2x00dev);
+
return IRQ_HANDLED;
}
@@ -1128,7 +1063,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
.reset_tuner = rt2800_reset_tuner,
.link_tuner = rt2800_link_tuner,
.write_tx_desc = rt2800pci_write_tx_desc,
- .write_tx_data = rt2x00pci_write_tx_data,
+ .write_tx_data = rt2800pci_write_tx_data,
.write_beacon = rt2800pci_write_beacon,
.kick_tx_queue = rt2800pci_kick_tx_queue,
.kill_tx_queue = rt2800pci_kill_tx_queue,
@@ -1184,6 +1119,7 @@ static const struct rt2x00_ops rt2800pci_ops = {
/*
* RT2800pci module information.
*/
+#ifdef CONFIG_RT2800PCI_PCI
static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x0601), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x0681), PCI_DEVICE_DATA(&rt2800pci_ops) },
@@ -1208,9 +1144,11 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3592), PCI_DEVICE_DATA(&rt2800pci_ops) },
+ { PCI_DEVICE(0x1814, 0x3593), PCI_DEVICE_DATA(&rt2800pci_ops) },
#endif
{ 0, }
};
+#endif /* CONFIG_RT2800PCI_PCI */
MODULE_AUTHOR(DRV_PROJECT);
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index d27d7d5d850..699161327d6 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -400,64 +400,20 @@ static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
struct txentry_desc *txdesc)
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
- __le32 *txi = skbdesc->desc;
- __le32 *txwi = &txi[TXINFO_DESC_SIZE / sizeof(__le32)];
+ __le32 *txi = (__le32 *)(skb->data - TXWI_DESC_SIZE - TXINFO_DESC_SIZE);
u32 word;
/*
- * Initialize TX Info descriptor
+ * Initialize TXWI descriptor
*/
- rt2x00_desc_read(txwi, 0, &word);
- rt2x00_set_field32(&word, TXWI_W0_FRAG,
- test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_MIMO_PS, 0);
- rt2x00_set_field32(&word, TXWI_W0_CF_ACK, 0);
- rt2x00_set_field32(&word, TXWI_W0_TS,
- test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_AMPDU,
- test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, txdesc->mpdu_density);
- rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->ifs);
- rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->mcs);
- rt2x00_set_field32(&word, TXWI_W0_BW,
- test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_SHORT_GI,
- test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->stbc);
- rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode);
- rt2x00_desc_write(txwi, 0, word);
-
- rt2x00_desc_read(txwi, 1, &word);
- rt2x00_set_field32(&word, TXWI_W1_ACK,
- test_bit(ENTRY_TXD_ACK, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W1_NSEQ,
- test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size);
- rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID,
- test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
- txdesc->key_idx : 0xff);
- rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
- skb->len - txdesc->l2pad);
- rt2x00_set_field32(&word, TXWI_W1_PACKETID,
- skbdesc->entry->queue->qid + 1);
- rt2x00_desc_write(txwi, 1, word);
+ rt2800_write_txwi(skb, txdesc);
/*
- * Always write 0 to IV/EIV fields, hardware will insert the IV
- * from the IVEIV register when TXINFO_W0_WIV is set to 0.
- * When TXINFO_W0_WIV is set to 1 it will use the IV data
- * from the descriptor. The TXWI_W1_WIRELESS_CLI_ID indicates which
- * crypto entry in the registers should be used to encrypt the frame.
- */
- _rt2x00_desc_write(txwi, 2, 0 /* skbdesc->iv[0] */);
- _rt2x00_desc_write(txwi, 3, 0 /* skbdesc->iv[1] */);
-
- /*
- * Initialize TX descriptor
+ * Initialize TXINFO descriptor
*/
rt2x00_desc_read(txi, 0, &word);
rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN,
- skb->len + TXWI_DESC_SIZE);
+ skb->len - TXINFO_DESC_SIZE);
rt2x00_set_field32(&word, TXINFO_W0_WIV,
!test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2);
@@ -466,26 +422,25 @@ static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_BURST,
test_bit(ENTRY_TXD_BURST, &txdesc->flags));
rt2x00_desc_write(txi, 0, word);
+
+ /*
+ * Register descriptor details in skb frame descriptor.
+ */
+ skbdesc->desc = txi;
+ skbdesc->desc_len = TXINFO_DESC_SIZE + TXWI_DESC_SIZE;
}
/*
* TX data initialization
*/
-static void rt2800usb_write_beacon(struct queue_entry *entry)
+static void rt2800usb_write_beacon(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
unsigned int beacon_base;
u32 reg;
/*
- * Add the descriptor in front of the skb.
- */
- skb_push(entry->skb, entry->queue->desc_size);
- memcpy(entry->skb->data, skbdesc->desc, skbdesc->desc_len);
- skbdesc->desc = entry->skb->data;
-
- /*
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
@@ -494,6 +449,12 @@ static void rt2800usb_write_beacon(struct queue_entry *entry)
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
/*
+ * Add the TXWI for the beacon to the skb.
+ */
+ rt2800_write_txwi(entry->skb, txdesc);
+ skb_push(entry->skb, TXWI_DESC_SIZE);
+
+ /*
* Write entire beacon with descriptor to register.
*/
beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
@@ -503,6 +464,14 @@ static void rt2800usb_write_beacon(struct queue_entry *entry)
REGISTER_TIMEOUT32(entry->skb->len));
/*
+ * Enable beaconing again.
+ */
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
+ rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+ /*
* Clean up the beacon skb.
*/
dev_kfree_skb(entry->skb);
@@ -524,84 +493,53 @@ static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
return length;
}
-static void rt2800usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
- const enum data_queue_qid queue)
-{
- u32 reg;
-
- if (queue != QID_BEACON) {
- rt2x00usb_kick_tx_queue(rt2x00dev, queue);
- return;
- }
-
- rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
- if (!rt2x00_get_field32(reg, BCN_TIME_CFG_BEACON_GEN)) {
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
- rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
- }
-}
-
/*
* RX control handlers
*/
static void rt2800usb_fill_rxdone(struct queue_entry *entry,
struct rxdone_entry_desc *rxdesc)
{
- struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
__le32 *rxi = (__le32 *)entry->skb->data;
- __le32 *rxwi;
__le32 *rxd;
- u32 rxi0;
- u32 rxwi0;
- u32 rxwi1;
- u32 rxwi2;
- u32 rxwi3;
- u32 rxd0;
+ u32 word;
int rx_pkt_len;
/*
+ * Copy descriptor to the skbdesc->desc buffer, making it safe from
+ * moving of frame data in rt2x00usb.
+ */
+ memcpy(skbdesc->desc, rxi, skbdesc->desc_len);
+
+ /*
* RX frame format is :
* | RXINFO | RXWI | header | L2 pad | payload | pad | RXD | USB pad |
* |<------------ rx_pkt_len -------------->|
*/
- rt2x00_desc_read(rxi, 0, &rxi0);
- rx_pkt_len = rt2x00_get_field32(rxi0, RXINFO_W0_USB_DMA_RX_PKT_LEN);
-
- rxwi = (__le32 *)(entry->skb->data + RXINFO_DESC_SIZE);
+ rt2x00_desc_read(rxi, 0, &word);
+ rx_pkt_len = rt2x00_get_field32(word, RXINFO_W0_USB_DMA_RX_PKT_LEN);
/*
- * FIXME : we need to check for rx_pkt_len validity
+ * Remove the RXINFO structure from the sbk.
*/
- rxd = (__le32 *)(entry->skb->data + RXINFO_DESC_SIZE + rx_pkt_len);
+ skb_pull(entry->skb, RXINFO_DESC_SIZE);
/*
- * Copy descriptor to the skbdesc->desc buffer, making it safe from
- * moving of frame data in rt2x00usb.
+ * FIXME: we need to check for rx_pkt_len validity
*/
- memcpy(skbdesc->desc, rxi, skbdesc->desc_len);
+ rxd = (__le32 *)(entry->skb->data + rx_pkt_len);
/*
* It is now safe to read the descriptor on all architectures.
*/
- rt2x00_desc_read(rxwi, 0, &rxwi0);
- rt2x00_desc_read(rxwi, 1, &rxwi1);
- rt2x00_desc_read(rxwi, 2, &rxwi2);
- rt2x00_desc_read(rxwi, 3, &rxwi3);
- rt2x00_desc_read(rxd, 0, &rxd0);
+ rt2x00_desc_read(rxd, 0, &word);
- if (rt2x00_get_field32(rxd0, RXD_W0_CRC_ERROR))
+ if (rt2x00_get_field32(word, RXD_W0_CRC_ERROR))
rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
- if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
- rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF);
- rxdesc->cipher_status =
- rt2x00_get_field32(rxd0, RXD_W0_CIPHER_ERROR);
- }
+ rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W0_CIPHER_ERROR);
- if (rt2x00_get_field32(rxd0, RXD_W0_DECRYPTED)) {
+ if (rt2x00_get_field32(word, RXD_W0_DECRYPTED)) {
/*
* Hardware has stripped IV/EIV data from 802.11 frame during
* decryption. Unfortunately the descriptor doesn't contain
@@ -616,45 +554,21 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
rxdesc->flags |= RX_FLAG_MMIC_ERROR;
}
- if (rt2x00_get_field32(rxd0, RXD_W0_MY_BSS))
+ if (rt2x00_get_field32(word, RXD_W0_MY_BSS))
rxdesc->dev_flags |= RXDONE_MY_BSS;
- if (rt2x00_get_field32(rxd0, RXD_W0_L2PAD))
+ if (rt2x00_get_field32(word, RXD_W0_L2PAD))
rxdesc->dev_flags |= RXDONE_L2PAD;
- if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
- rxdesc->flags |= RX_FLAG_SHORT_GI;
-
- if (rt2x00_get_field32(rxwi1, RXWI_W1_BW))
- rxdesc->flags |= RX_FLAG_40MHZ;
-
/*
- * Detect RX rate, always use MCS as signal type.
+ * Remove RXD descriptor from end of buffer.
*/
- rxdesc->dev_flags |= RXDONE_SIGNAL_MCS;
- rxdesc->rate_mode = rt2x00_get_field32(rxwi1, RXWI_W1_PHYMODE);
- rxdesc->signal = rt2x00_get_field32(rxwi1, RXWI_W1_MCS);
+ skb_trim(entry->skb, rx_pkt_len);
/*
- * Mask of 0x8 bit to remove the short preamble flag.
+ * Process the RXWI structure.
*/
- if (rxdesc->rate_mode == RATE_MODE_CCK)
- rxdesc->signal &= ~0x8;
-
- rxdesc->rssi =
- (rt2x00_get_field32(rxwi2, RXWI_W2_RSSI0) +
- rt2x00_get_field32(rxwi2, RXWI_W2_RSSI1)) / 2;
-
- rxdesc->noise =
- (rt2x00_get_field32(rxwi3, RXWI_W3_SNR0) +
- rt2x00_get_field32(rxwi3, RXWI_W3_SNR1)) / 2;
-
- rxdesc->size = rt2x00_get_field32(rxwi0, RXWI_W0_MPDU_TOTAL_BYTE_COUNT);
-
- /*
- * Remove RXWI descriptor from start of buffer.
- */
- skb_pull(entry->skb, skbdesc->desc_len);
+ rt2800_process_rxwi(entry->skb, rxdesc);
}
/*
@@ -747,7 +661,7 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
.write_tx_data = rt2x00usb_write_tx_data,
.write_beacon = rt2800usb_write_beacon,
.get_tx_data_len = rt2800usb_get_tx_data_len,
- .kick_tx_queue = rt2800usb_kick_tx_queue,
+ .kick_tx_queue = rt2x00usb_kick_tx_queue,
.kill_tx_queue = rt2x00usb_kill_tx_queue,
.fill_rxdone = rt2800usb_fill_rxdone,
.config_shared_key = rt2800_config_shared_key,
@@ -806,6 +720,10 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x07b8, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07b8, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1482, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Allwin */
+ { USB_DEVICE(0x8516, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x8516, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x8516, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Amit */
{ USB_DEVICE(0x15c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Askey */
@@ -841,13 +759,18 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x7392, 0x7717), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x7392, 0x7718), USB_DEVICE_DATA(&rt2800usb_ops) },
/* EnGenius */
- { USB_DEVICE(0X1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1740, 0x9702), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Gigabyte */
{ USB_DEVICE(0x1044, 0x800b), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Hawking */
{ USB_DEVICE(0x0e66, 0x0001), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0e66, 0x0003), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0e66, 0x0009), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0e66, 0x000b), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0e66, 0x0013), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0e66, 0x0017), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0e66, 0x0018), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Linksys */
{ USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -876,6 +799,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0df6, 0x002c), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x002d), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x0039), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x003b), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x003d), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
/* SMC */
{ USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -905,8 +830,17 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x07b8, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
/* AirTies */
{ USB_DEVICE(0x1eda, 0x2310), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Allwin */
+ { USB_DEVICE(0x8516, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x8516, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x8516, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* ASUS */
+ { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
/* AzureWave */
{ USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x13d3, 0x3307), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x13d3, 0x3321), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Conceptronic */
{ USB_DEVICE(0x14b2, 0x3c12), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Corega */
@@ -916,20 +850,46 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x07d1, 0x3c0d), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07d1, 0x3c0e), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07d1, 0x3c0f), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07d1, 0x3c16), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Draytek */
+ { USB_DEVICE(0x07fa, 0x7712), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Edimax */
{ USB_DEVICE(0x7392, 0x7711), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Encore */
{ USB_DEVICE(0x203d, 0x1480), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) },
/* EnGenius */
{ USB_DEVICE(0x1740, 0x9703), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1740, 0x9705), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1740, 0x9706), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1740, 0x9707), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1740, 0x9708), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1740, 0x9709), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Gigabyte */
{ USB_DEVICE(0x1044, 0x800d), USB_DEVICE_DATA(&rt2800usb_ops) },
/* I-O DATA */
{ USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x04bb, 0x0947), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x04bb, 0x0948), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Logitec */
+ { USB_DEVICE(0x0789, 0x0166), USB_DEVICE_DATA(&rt2800usb_ops) },
/* MSI */
{ USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x3821), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x3822), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x3870), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x3871), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x821a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x822a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x822b), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x822c), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x870a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x871a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x871b), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x871c), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x899a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Para */
+ { USB_DEVICE(0x20b8, 0x8888), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Pegatron */
{ USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -944,14 +904,22 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x148f, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Sitecom */
{ USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x0047), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x0048), USB_DEVICE_DATA(&rt2800usb_ops) },
/* SMC */
{ USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0xa701), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0xa702), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0xa703), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Zinwell */
{ USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) },
#endif
#ifdef CONFIG_RT2800USB_RT35XX
+ /* Allwin */
+ { USB_DEVICE(0x8516, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Askey */
{ USB_DEVICE(0x1690, 0x0744), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Cisco */
@@ -966,37 +934,27 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x148f, 0x8070), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Sitecom */
{ USB_DEVICE(0x0df6, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x0050), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Zinwell */
{ USB_DEVICE(0x5a57, 0x0284), USB_DEVICE_DATA(&rt2800usb_ops) },
#endif
#ifdef CONFIG_RT2800USB_UNKNOWN
/*
* Unclear what kind of devices these are (they aren't supported by the
- * vendor driver).
+ * vendor linux driver).
*/
- /* Allwin */
- { USB_DEVICE(0x8516, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x8516, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x8516, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x8516, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x8516, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x8516, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x8516, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Amigo */
{ USB_DEVICE(0x0e0b, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0e0b, 0x9041), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Askey */
- { USB_DEVICE(0x0930, 0x0a07), USB_DEVICE_DATA(&rt2800usb_ops) },
/* ASUS */
{ USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0b05, 0x1790), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1761, 0x0b05), USB_DEVICE_DATA(&rt2800usb_ops) },
/* AzureWave */
{ USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x13d3, 0x3322), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Belkin */
{ USB_DEVICE(0x050d, 0x825a), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Buffalo */
@@ -1015,24 +973,13 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x07d1, 0x3c0b), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x07d1, 0x3c16), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07d1, 0x3c17), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Encore */
{ USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* EnGenius */
- { USB_DEVICE(0x1740, 0x9707), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x1740, 0x9708), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x1740, 0x9709), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Gemtek */
{ USB_DEVICE(0x15a9, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Gigabyte */
{ USB_DEVICE(0x1044, 0x800c), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Hawking */
- { USB_DEVICE(0x0e66, 0x0009), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0e66, 0x000b), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* I-O DATA */
- { USB_DEVICE(0x04bb, 0x0947), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x04bb, 0x0948), USB_DEVICE_DATA(&rt2800usb_ops) },
/* LevelOne */
{ USB_DEVICE(0x1740, 0x0605), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1740, 0x0615), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -1042,43 +989,23 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x1737, 0x0079), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Motorola */
{ USB_DEVICE(0x100d, 0x9032), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* MSI */
- { USB_DEVICE(0x0db0, 0x3821), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x3822), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x3870), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x3871), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x821a), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x822a), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x870a), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x871a), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x899a), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Ovislink */
+ { USB_DEVICE(0x1b75, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1b75, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Para */
- { USB_DEVICE(0x20b8, 0x8888), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Pegatron */
{ USB_DEVICE(0x05a6, 0x0101), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1d4d, 0x0002), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1d4d, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1d4d, 0x0011), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Planex */
{ USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Qcom */
{ USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Sitecom */
- { USB_DEVICE(0x0df6, 0x003b), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x003d), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x0047), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x0048), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x004a), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x004d), USB_DEVICE_DATA(&rt2800usb_ops) },
/* SMC */
{ USB_DEVICE(0x083a, 0xa512), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x083a, 0xa701), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x083a, 0xa702), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x083a, 0xc522), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x083a, 0xd522), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0xf511), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Sweex */
{ USB_DEVICE(0x177f, 0x0153), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x177f, 0x0313), USB_DEVICE_DATA(&rt2800usb_ops) },
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.h b/drivers/net/wireless/rt2x00/rt2800usb.h
index d1d8ae94b4d..2bca6a71a7f 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.h
+++ b/drivers/net/wireless/rt2x00/rt2800usb.h
@@ -79,8 +79,6 @@
*/
#define TXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
#define RXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
-#define RXWI_DESC_SIZE ( 4 * sizeof(__le32) )
-#define RXD_DESC_SIZE ( 1 * sizeof(__le32) )
/*
* TX Info structure
@@ -113,44 +111,6 @@
#define RXINFO_W0_USB_DMA_RX_PKT_LEN FIELD32(0x0000ffff)
/*
- * RX WI structure
- */
-
-/*
- * Word0
- */
-#define RXWI_W0_WIRELESS_CLI_ID FIELD32(0x000000ff)
-#define RXWI_W0_KEY_INDEX FIELD32(0x00000300)
-#define RXWI_W0_BSSID FIELD32(0x00001c00)
-#define RXWI_W0_UDF FIELD32(0x0000e000)
-#define RXWI_W0_MPDU_TOTAL_BYTE_COUNT FIELD32(0x0fff0000)
-#define RXWI_W0_TID FIELD32(0xf0000000)
-
-/*
- * Word1
- */
-#define RXWI_W1_FRAG FIELD32(0x0000000f)
-#define RXWI_W1_SEQUENCE FIELD32(0x0000fff0)
-#define RXWI_W1_MCS FIELD32(0x007f0000)
-#define RXWI_W1_BW FIELD32(0x00800000)
-#define RXWI_W1_SHORT_GI FIELD32(0x01000000)
-#define RXWI_W1_STBC FIELD32(0x06000000)
-#define RXWI_W1_PHYMODE FIELD32(0xc0000000)
-
-/*
- * Word2
- */
-#define RXWI_W2_RSSI0 FIELD32(0x000000ff)
-#define RXWI_W2_RSSI1 FIELD32(0x0000ff00)
-#define RXWI_W2_RSSI2 FIELD32(0x00ff0000)
-
-/*
- * Word3
- */
-#define RXWI_W3_SNR0 FIELD32(0x000000ff)
-#define RXWI_W3_SNR1 FIELD32(0x0000ff00)
-
-/*
* RX descriptor format for RX Ring.
*/
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index d9daa9c406f..6c1ff4c15c8 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -177,16 +177,15 @@ struct rt2x00_chip {
#define RT2573 0x2573
#define RT2860 0x2860 /* 2.4GHz PCI/CB */
#define RT2870 0x2870
-#define RT2872 0x2872
-#define RT2880 0x2880 /* WSOC */
+#define RT2872 0x2872 /* WSOC */
#define RT2883 0x2883 /* WSOC */
-#define RT2890 0x2890 /* 2.4GHz PCIe */
-#define RT3052 0x3052 /* WSOC */
#define RT3070 0x3070
#define RT3071 0x3071
#define RT3090 0x3090 /* 2.4GHz PCIe */
#define RT3390 0x3390
#define RT3572 0x3572
+#define RT3593 0x3593 /* PCIe */
+#define RT3883 0x3883 /* WSOC */
u16 rf;
u16 rev;
@@ -550,8 +549,10 @@ struct rt2x00lib_ops {
void (*write_tx_desc) (struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb,
struct txentry_desc *txdesc);
- int (*write_tx_data) (struct queue_entry *entry);
- void (*write_beacon) (struct queue_entry *entry);
+ int (*write_tx_data) (struct queue_entry *entry,
+ struct txentry_desc *txdesc);
+ void (*write_beacon) (struct queue_entry *entry,
+ struct txentry_desc *txdesc);
int (*get_tx_data_len) (struct queue_entry *entry);
void (*kick_tx_queue) (struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid queue);
@@ -930,12 +931,12 @@ static inline void rt2x00_set_chip(struct rt2x00_dev *rt2x00dev,
rt2x00dev->chip.rt, rt2x00dev->chip.rf, rt2x00dev->chip.rev);
}
-static inline char rt2x00_rt(struct rt2x00_dev *rt2x00dev, const u16 rt)
+static inline bool rt2x00_rt(struct rt2x00_dev *rt2x00dev, const u16 rt)
{
return (rt2x00dev->chip.rt == rt);
}
-static inline char rt2x00_rf(struct rt2x00_dev *rt2x00dev, const u16 rf)
+static inline bool rt2x00_rf(struct rt2x00_dev *rt2x00dev, const u16 rf)
{
return (rt2x00dev->chip.rf == rf);
}
@@ -945,6 +946,24 @@ static inline u16 rt2x00_rev(struct rt2x00_dev *rt2x00dev)
return rt2x00dev->chip.rev;
}
+static inline bool rt2x00_rt_rev(struct rt2x00_dev *rt2x00dev,
+ const u16 rt, const u16 rev)
+{
+ return (rt2x00_rt(rt2x00dev, rt) && rt2x00_rev(rt2x00dev) == rev);
+}
+
+static inline bool rt2x00_rt_rev_lt(struct rt2x00_dev *rt2x00dev,
+ const u16 rt, const u16 rev)
+{
+ return (rt2x00_rt(rt2x00dev, rt) && rt2x00_rev(rt2x00dev) < rev);
+}
+
+static inline bool rt2x00_rt_rev_gte(struct rt2x00_dev *rt2x00dev,
+ const u16 rt, const u16 rev)
+{
+ return (rt2x00_rt(rt2x00dev, rt) && rt2x00_rev(rt2x00dev) >= rev);
+}
+
static inline void rt2x00_set_chip_intf(struct rt2x00_dev *rt2x00dev,
enum rt2x00_chip_intf intf)
{
diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c
index d291c7862e1..583dacd8d24 100644
--- a/drivers/net/wireless/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c
@@ -128,6 +128,7 @@ void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, struct txentry_desc *txdesc)
/* Pull buffer to correct size */
skb_pull(skb, txdesc->iv_len);
+ txdesc->length -= txdesc->iv_len;
/* IV/EIV data has officially been stripped */
skbdesc->flags |= SKBDESC_IV_STRIPPED;
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 9569fb4e5bc..e9fe93fd804 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -156,10 +156,11 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
enum rt2x00_dump_type type, struct sk_buff *skb)
{
struct rt2x00debug_intf *intf = rt2x00dev->debugfs_intf;
- struct skb_frame_desc *desc = get_skb_frame_desc(skb);
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
struct sk_buff *skbcopy;
struct rt2x00dump_hdr *dump_hdr;
struct timeval timestamp;
+ u32 data_len;
do_gettimeofday(&timestamp);
@@ -171,7 +172,11 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
return;
}
- skbcopy = alloc_skb(sizeof(*dump_hdr) + desc->desc_len + skb->len,
+ data_len = skb->len;
+ if (skbdesc->flags & SKBDESC_DESC_IN_SKB)
+ data_len -= skbdesc->desc_len;
+
+ skbcopy = alloc_skb(sizeof(*dump_hdr) + skbdesc->desc_len + data_len,
GFP_ATOMIC);
if (!skbcopy) {
DEBUG(rt2x00dev, "Failed to copy skb for dump.\n");
@@ -181,18 +186,20 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
dump_hdr = (struct rt2x00dump_hdr *)skb_put(skbcopy, sizeof(*dump_hdr));
dump_hdr->version = cpu_to_le32(DUMP_HEADER_VERSION);
dump_hdr->header_length = cpu_to_le32(sizeof(*dump_hdr));
- dump_hdr->desc_length = cpu_to_le32(desc->desc_len);
- dump_hdr->data_length = cpu_to_le32(skb->len);
+ dump_hdr->desc_length = cpu_to_le32(skbdesc->desc_len);
+ dump_hdr->data_length = cpu_to_le32(data_len);
dump_hdr->chip_rt = cpu_to_le16(rt2x00dev->chip.rt);
dump_hdr->chip_rf = cpu_to_le16(rt2x00dev->chip.rf);
dump_hdr->chip_rev = cpu_to_le16(rt2x00dev->chip.rev);
dump_hdr->type = cpu_to_le16(type);
- dump_hdr->queue_index = desc->entry->queue->qid;
- dump_hdr->entry_index = desc->entry->entry_idx;
+ dump_hdr->queue_index = skbdesc->entry->queue->qid;
+ dump_hdr->entry_index = skbdesc->entry->entry_idx;
dump_hdr->timestamp_sec = cpu_to_le32(timestamp.tv_sec);
dump_hdr->timestamp_usec = cpu_to_le32(timestamp.tv_usec);
- memcpy(skb_put(skbcopy, desc->desc_len), desc->desc, desc->desc_len);
+ if (!(skbdesc->flags & SKBDESC_DESC_IN_SKB))
+ memcpy(skb_put(skbcopy, skbdesc->desc_len), skbdesc->desc,
+ skbdesc->desc_len);
memcpy(skb_put(skbcopy, skb->len), skb->data, skb->len);
skb_queue_tail(&intf->frame_dump_skbqueue, skbcopy);
@@ -700,8 +707,6 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
exit:
rt2x00debug_deregister(rt2x00dev);
ERROR(rt2x00dev, "Failed to register debug handler.\n");
-
- return;
}
void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index eda73ba735a..3ae468c4d76 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -435,7 +435,6 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
rx_status->mactime = rxdesc.timestamp;
rx_status->rate_idx = rate_idx;
rx_status->signal = rxdesc.rssi;
- rx_status->noise = rxdesc.noise;
rx_status->flag = rxdesc.flags;
rx_status->antenna = rt2x00dev->link.ant.active.rx;
diff --git a/drivers/net/wireless/rt2x00/rt2x00dump.h b/drivers/net/wireless/rt2x00/rt2x00dump.h
index 727019a748e..ed303b423e4 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dump.h
+++ b/drivers/net/wireless/rt2x00/rt2x00dump.h
@@ -62,11 +62,14 @@
* the tx event which has either succeeded or failed. A frame
* with this type should also have been reported with as a
* %DUMP_FRAME_TX frame.
+ * @DUMP_FRAME_BEACON: This beacon frame is queued for transmission to the
+ * hardware.
*/
enum rt2x00_dump_type {
DUMP_FRAME_RXDONE = 1,
DUMP_FRAME_TX = 2,
DUMP_FRAME_TXDONE = 3,
+ DUMP_FRAME_BEACON = 4,
};
/**
diff --git a/drivers/net/wireless/rt2x00/rt2x00firmware.c b/drivers/net/wireless/rt2x00/rt2x00firmware.c
index 34beb00c434..b818a43c467 100644
--- a/drivers/net/wireless/rt2x00/rt2x00firmware.c
+++ b/drivers/net/wireless/rt2x00/rt2x00firmware.c
@@ -79,7 +79,7 @@ static int rt2x00lib_request_firmware(struct rt2x00_dev *rt2x00dev)
ERROR(rt2x00dev,
"Current firmware does not support detected chipset.\n");
goto exit;
- };
+ }
rt2x00dev->fw = fw;
diff --git a/drivers/net/wireless/rt2x00/rt2x00ht.c b/drivers/net/wireless/rt2x00/rt2x00ht.c
index 1056c92143a..5a407602ce3 100644
--- a/drivers/net/wireless/rt2x00/rt2x00ht.c
+++ b/drivers/net/wireless/rt2x00/rt2x00ht.c
@@ -35,6 +35,7 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
if (tx_info->control.sta)
txdesc->mpdu_density =
@@ -66,4 +67,20 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
__set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
__set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
+
+ /*
+ * Determine IFS values
+ * - Use TXOP_BACKOFF for management frames
+ * - Use TXOP_SIFS for fragment bursts
+ * - Use TXOP_HTTXOP for everything else
+ *
+ * Note: rt2800 devices won't use CTS protection (if used)
+ * for frames not transmitted with TXOP_HTTXOP
+ */
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ txdesc->txop = TXOP_BACKOFF;
+ else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
+ txdesc->txop = TXOP_SIFS;
+ else
+ txdesc->txop = TXOP_HTTXOP;
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index cf3f1c0c438..f71eee67f97 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -63,11 +63,10 @@ EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
/*
* TX data handlers.
*/
-int rt2x00pci_write_tx_data(struct queue_entry *entry)
+int rt2x00pci_write_tx_data(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct queue_entry_priv_pci *entry_priv = entry->priv_data;
- struct skb_frame_desc *skbdesc;
/*
* This should not happen, we already checked the entry
@@ -82,13 +81,6 @@ int rt2x00pci_write_tx_data(struct queue_entry *entry)
return -EINVAL;
}
- /*
- * Fill in skb descriptor
- */
- skbdesc = get_skb_frame_desc(entry->skb);
- skbdesc->desc = entry_priv->desc;
- skbdesc->desc_len = entry->queue->desc_size;
-
return 0;
}
EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data);
@@ -214,7 +206,7 @@ void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
/*
* Free irq line.
*/
- free_irq(to_pci_dev(rt2x00dev->dev)->irq, rt2x00dev);
+ free_irq(rt2x00dev->irq, rt2x00dev);
/*
* Free DMA
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h
index 8149ff68410..51bcef3839c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -92,7 +92,8 @@ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
* This function will initialize the DMA and skb descriptor
* to prepare the entry for the actual TX operation.
*/
-int rt2x00pci_write_tx_data(struct queue_entry *entry);
+int rt2x00pci_write_tx_data(struct queue_entry *entry,
+ struct txentry_desc *txdesc);
/**
* struct queue_entry_priv_pci: Per entry PCI specific information
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index a0bd36fc4d2..20dbdd6fb90 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -334,12 +334,10 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
txdesc->aifs = entry->queue->aifs;
/*
- * Header and alignment information.
+ * Header and frame information.
*/
+ txdesc->length = entry->skb->len;
txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
- if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags) &&
- (entry->skb->len > txdesc->header_length))
- txdesc->l2pad = L2PAD_SIZE(txdesc->header_length);
/*
* Check whether this frame is to be acked.
@@ -423,6 +421,7 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
{
struct data_queue *queue = entry->queue;
struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ enum rt2x00_dump_type dump_type;
rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc);
@@ -430,21 +429,26 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
* All processing on the frame has been completed, this means
* it is now ready to be dumped to userspace through debugfs.
*/
- rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb);
+ dump_type = (txdesc->queue == QID_BEACON) ?
+ DUMP_FRAME_BEACON : DUMP_FRAME_TX;
+ rt2x00debug_dump_frame(rt2x00dev, dump_type, entry->skb);
+}
+
+static void rt2x00queue_kick_tx_queue(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
+{
+ struct data_queue *queue = entry->queue;
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
/*
* Check if we need to kick the queue, there are however a few rules
- * 1) Don't kick beacon queue
- * 2) Don't kick unless this is the last in frame in a burst.
+ * 1) Don't kick unless this is the last in frame in a burst.
* When the burst flag is set, this frame is always followed
* by another frame which in some way are related to eachother.
* This is true for fragments, RTS or CTS-to-self frames.
- * 3) Rule 2 can be broken when the available entries
+ * 2) Rule 1 can be broken when the available entries
* in the queue are less then a certain threshold.
*/
- if (entry->queue->qid == QID_BEACON)
- return;
-
if (rt2x00queue_threshold(queue) ||
!test_bit(ENTRY_TXD_BURST, &txdesc->flags))
rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, queue->qid);
@@ -526,7 +530,8 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
* call failed. Since we always return NETDEV_TX_OK to mac80211,
* this frame will simply be dropped.
*/
- if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry))) {
+ if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry,
+ &txdesc))) {
clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
entry->skb = NULL;
return -EIO;
@@ -539,6 +544,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
rt2x00queue_index_inc(queue, Q_INDEX);
rt2x00queue_write_tx_descriptor(entry, &txdesc);
+ rt2x00queue_kick_tx_queue(entry, &txdesc);
return 0;
}
@@ -550,7 +556,6 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
struct rt2x00_intf *intf = vif_to_intf(vif);
struct skb_frame_desc *skbdesc;
struct txentry_desc txdesc;
- __le32 desc[16];
if (unlikely(!intf->beacon))
return -ENOBUFS;
@@ -583,19 +588,10 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
/*
- * For the descriptor we use a local array from where the
- * driver can move it to the correct location required for
- * the hardware.
- */
- memset(desc, 0, sizeof(desc));
-
- /*
* Fill in skb descriptor
*/
skbdesc = get_skb_frame_desc(intf->beacon->skb);
memset(skbdesc, 0, sizeof(*skbdesc));
- skbdesc->desc = desc;
- skbdesc->desc_len = intf->beacon->queue->desc_size;
skbdesc->entry = intf->beacon;
/*
@@ -604,12 +600,9 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
rt2x00queue_write_tx_descriptor(intf->beacon, &txdesc);
/*
- * Send beacon to hardware.
- * Also enable beacon generation, which might have been disabled
- * by the driver during the config_beacon() callback function.
+ * Send beacon to hardware and enable beacon genaration..
*/
- rt2x00dev->ops->lib->write_beacon(intf->beacon);
- rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, QID_BEACON);
+ rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
mutex_unlock(&intf->beacon_skb_mutex);
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index c1e482bb37b..f79170849ad 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -94,12 +94,15 @@ enum data_queue_qid {
* mac80211 but was stripped for processing by the driver.
* @SKBDESC_NOT_MAC80211: Frame didn't originate from mac80211,
* don't try to pass it back.
+ * @SKBDESC_DESC_IN_SKB: The descriptor is at the start of the
+ * skb, instead of in the desc field.
*/
enum skb_frame_desc_flags {
SKBDESC_DMA_MAPPED_RX = 1 << 0,
SKBDESC_DMA_MAPPED_TX = 1 << 1,
SKBDESC_IV_STRIPPED = 1 << 2,
SKBDESC_NOT_MAC80211 = 1 << 3,
+ SKBDESC_DESC_IN_SKB = 1 << 4,
};
/**
@@ -183,7 +186,6 @@ enum rxdone_entry_desc_flags {
* @timestamp: RX Timestamp
* @signal: Signal of the received frame.
* @rssi: RSSI of the received frame.
- * @noise: Measured noise during frame reception.
* @size: Data size of the received frame.
* @flags: MAC80211 receive flags (See &enum mac80211_rx_flags).
* @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags).
@@ -197,7 +199,6 @@ struct rxdone_entry_desc {
u64 timestamp;
int signal;
int rssi;
- int noise;
int size;
int flags;
int dev_flags;
@@ -287,8 +288,8 @@ enum txentry_desc_flags {
*
* @flags: Descriptor flags (See &enum queue_entry_flags).
* @queue: Queue identification (See &enum data_queue_qid).
+ * @length: Length of the entire frame.
* @header_length: Length of 802.11 header.
- * @l2pad: Amount of padding to align 802.11 payload to 4-byte boundrary.
* @length_high: PLCP length high word.
* @length_low: PLCP length low word.
* @signal: PLCP signal.
@@ -301,6 +302,7 @@ enum txentry_desc_flags {
* @retry_limit: Max number of retries.
* @aifs: AIFS value.
* @ifs: IFS value.
+ * @txop: IFS value for 11n capable chips.
* @cw_min: cwmin value.
* @cw_max: cwmax value.
* @cipher: Cipher type used for encryption.
@@ -313,8 +315,8 @@ struct txentry_desc {
enum data_queue_qid queue;
+ u16 length;
u16 header_length;
- u16 l2pad;
u16 length_high;
u16 length_low;
@@ -330,6 +332,7 @@ struct txentry_desc {
short retry_limit;
short aifs;
short ifs;
+ short txop;
short cw_min;
short cw_max;
diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h
index 603bfc0adaa..b9fe94873ee 100644
--- a/drivers/net/wireless/rt2x00/rt2x00reg.h
+++ b/drivers/net/wireless/rt2x00/rt2x00reg.h
@@ -101,6 +101,16 @@ enum ifs {
};
/*
+ * IFS backoff values for HT devices
+ */
+enum txop {
+ TXOP_HTTXOP = 0,
+ TXOP_PIFS = 1,
+ TXOP_SIFS = 2,
+ TXOP_BACKOFF = 3,
+};
+
+/*
* Cipher types for hardware encryption
*/
enum cipher {
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index f9a7f8b1741..bd1546ba7ad 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -216,12 +216,12 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
rt2x00lib_txdone(entry, &txdesc);
}
-int rt2x00usb_write_tx_data(struct queue_entry *entry)
+int rt2x00usb_write_tx_data(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
struct queue_entry_priv_usb *entry_priv = entry->priv_data;
- struct skb_frame_desc *skbdesc;
u32 length;
/*
@@ -231,13 +231,6 @@ int rt2x00usb_write_tx_data(struct queue_entry *entry)
memset(entry->skb->data, 0, entry->queue->desc_size);
/*
- * Fill in skb descriptor
- */
- skbdesc = get_skb_frame_desc(entry->skb);
- skbdesc->desc = entry->skb->data;
- skbdesc->desc_len = entry->queue->desc_size;
-
- /*
* USB devices cannot blindly pass the skb->len as the
* length of the data to usb_fill_bulk_urb. Pass the skb
* to the driver to determine what the length should be.
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index 3da6841b5d4..621d0f82925 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -376,7 +376,8 @@ void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev);
* This function will initialize the URB and skb descriptor
* to prepare the entry for the actual TX operation.
*/
-int rt2x00usb_write_tx_data(struct queue_entry *entry);
+int rt2x00usb_write_tx_data(struct queue_entry *entry,
+ struct txentry_desc *txdesc);
/**
* struct queue_entry_priv_usb: Per entry USB specific information
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 432e75f960b..6a74baf4e93 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -1689,7 +1689,7 @@ static void rt61pci_disable_radio(struct rt2x00_dev *rt2x00dev)
static int rt61pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state)
{
- u32 reg;
+ u32 reg, reg2;
unsigned int i;
char put_to_sleep;
@@ -1706,10 +1706,11 @@ static int rt61pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state)
* device has entered the correct state.
*/
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2x00pci_register_read(rt2x00dev, MAC_CSR12, &reg);
- state = rt2x00_get_field32(reg, MAC_CSR12_BBP_CURRENT_STATE);
+ rt2x00pci_register_read(rt2x00dev, MAC_CSR12, &reg2);
+ state = rt2x00_get_field32(reg2, MAC_CSR12_BBP_CURRENT_STATE);
if (state == !put_to_sleep)
return 0;
+ rt2x00pci_register_write(rt2x00dev, MAC_CSR12, reg);
msleep(10);
}
@@ -1764,7 +1765,8 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
struct txentry_desc *txdesc)
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
- __le32 *txd = skbdesc->desc;
+ struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data;
+ __le32 *txd = entry_priv->desc;
u32 word;
/*
@@ -1802,17 +1804,23 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1);
rt2x00_desc_write(txd, 5, word);
- rt2x00_desc_read(txd, 6, &word);
- rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS,
- skbdesc->skb_dma);
- rt2x00_desc_write(txd, 6, word);
+ if (txdesc->queue != QID_BEACON) {
+ rt2x00_desc_read(txd, 6, &word);
+ rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS,
+ skbdesc->skb_dma);
+ rt2x00_desc_write(txd, 6, word);
- if (skbdesc->desc_len > TXINFO_SIZE) {
rt2x00_desc_read(txd, 11, &word);
- rt2x00_set_field32(&word, TXD_W11_BUFFER_LENGTH0, skb->len);
+ rt2x00_set_field32(&word, TXD_W11_BUFFER_LENGTH0,
+ txdesc->length);
rt2x00_desc_write(txd, 11, word);
}
+ /*
+ * Writing TXD word 0 must the last to prevent a race condition with
+ * the device, whereby the device may take hold of the TXD before we
+ * finished updating it.
+ */
rt2x00_desc_read(txd, 0, &word);
rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1);
rt2x00_set_field32(&word, TXD_W0_VALID, 1);
@@ -1832,20 +1840,28 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&word, TXD_W0_KEY_TABLE,
test_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_KEY_INDEX, txdesc->key_idx);
- rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
+ rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
rt2x00_set_field32(&word, TXD_W0_BURST,
test_bit(ENTRY_TXD_BURST, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher);
rt2x00_desc_write(txd, 0, word);
+
+ /*
+ * Register descriptor details in skb frame descriptor.
+ */
+ skbdesc->desc = txd;
+ skbdesc->desc_len =
+ (txdesc->queue == QID_BEACON) ? TXINFO_SIZE : TXD_DESC_SIZE;
}
/*
* TX data initialization
*/
-static void rt61pci_write_beacon(struct queue_entry *entry)
+static void rt61pci_write_beacon(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
+ struct queue_entry_priv_pci *entry_priv = entry->priv_data;
unsigned int beacon_base;
u32 reg;
@@ -1861,14 +1877,25 @@ static void rt61pci_write_beacon(struct queue_entry *entry)
* Write entire beacon with descriptor to register.
*/
beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
- rt2x00pci_register_multiwrite(rt2x00dev,
- beacon_base,
- skbdesc->desc, skbdesc->desc_len);
- rt2x00pci_register_multiwrite(rt2x00dev,
- beacon_base + skbdesc->desc_len,
+ rt2x00pci_register_multiwrite(rt2x00dev, beacon_base,
+ entry_priv->desc, TXINFO_SIZE);
+ rt2x00pci_register_multiwrite(rt2x00dev, beacon_base + TXINFO_SIZE,
entry->skb->data, entry->skb->len);
/*
+ * Enable beaconing again.
+ *
+ * For Wi-Fi faily generated beacons between participating
+ * stations. Set TBTT phase adaptive adjustment step to 8us.
+ */
+ rt2x00pci_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
+
+ rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
+ rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
+ rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
+ rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
+
+ /*
* Clean up beacon skb.
*/
dev_kfree_skb_any(entry->skb);
@@ -1880,23 +1907,6 @@ static void rt61pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
{
u32 reg;
- if (queue == QID_BEACON) {
- /*
- * For Wi-Fi faily generated beacons between participating
- * stations. Set TBTT phase adaptive adjustment step to 8us.
- */
- rt2x00pci_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
-
- rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
- if (!rt2x00_get_field32(reg, TXRX_CSR9_BEACON_GEN)) {
- rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
- rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
- rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
- rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
- }
- return;
- }
-
rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC0, (queue == QID_AC_BE));
rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC1, (queue == QID_AC_BK));
@@ -1968,12 +1978,8 @@ static void rt61pci_fill_rxdone(struct queue_entry *entry,
if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
- if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
- rxdesc->cipher =
- rt2x00_get_field32(word0, RXD_W0_CIPHER_ALG);
- rxdesc->cipher_status =
- rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR);
- }
+ rxdesc->cipher = rt2x00_get_field32(word0, RXD_W0_CIPHER_ALG);
+ rxdesc->cipher_status = rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR);
if (rxdesc->cipher != CIPHER_NONE) {
_rt2x00_desc_read(entry_priv->desc, 2, &rxdesc->iv[0]);
@@ -2118,6 +2124,14 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
}
}
+static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
+{
+ struct ieee80211_conf conf = { .flags = 0 };
+ struct rt2x00lib_conf libconf = { .conf = &conf };
+
+ rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
+}
+
static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
{
struct rt2x00_dev *rt2x00dev = dev_instance;
@@ -2165,6 +2179,12 @@ static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
rt2x00pci_register_write(rt2x00dev,
M2H_CMD_DONE_CSR, 0xffffffff);
+ /*
+ * 4 - MCU Autowakeup interrupt.
+ */
+ if (rt2x00_get_field32(reg_mcu, MCU_INT_SOURCE_CSR_TWAKEUP))
+ rt61pci_wakeup(rt2x00dev);
+
return IRQ_HANDLED;
}
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index bb58d797fb7..6e0d82efe92 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -861,15 +861,15 @@ static void rt73usb_config_ps(struct rt2x00_dev *rt2x00dev,
rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
USB_MODE_SLEEP, REGISTER_TIMEOUT);
} else {
- rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
- USB_MODE_WAKEUP, REGISTER_TIMEOUT);
-
rt2x00usb_register_read(rt2x00dev, MAC_CSR11, &reg);
rt2x00_set_field32(&reg, MAC_CSR11_DELAY_AFTER_TBCN, 0);
rt2x00_set_field32(&reg, MAC_CSR11_TBCN_BEFORE_WAKEUP, 0);
rt2x00_set_field32(&reg, MAC_CSR11_AUTOWAKE, 0);
rt2x00_set_field32(&reg, MAC_CSR11_WAKEUP_LATENCY, 0);
rt2x00usb_register_write(rt2x00dev, MAC_CSR11, reg);
+
+ rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
+ USB_MODE_WAKEUP, REGISTER_TIMEOUT);
}
}
@@ -1366,7 +1366,7 @@ static void rt73usb_disable_radio(struct rt2x00_dev *rt2x00dev)
static int rt73usb_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state)
{
- u32 reg;
+ u32 reg, reg2;
unsigned int i;
char put_to_sleep;
@@ -1383,10 +1383,11 @@ static int rt73usb_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state)
* device has entered the correct state.
*/
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2x00usb_register_read(rt2x00dev, MAC_CSR12, &reg);
- state = rt2x00_get_field32(reg, MAC_CSR12_BBP_CURRENT_STATE);
+ rt2x00usb_register_read(rt2x00dev, MAC_CSR12, &reg2);
+ state = rt2x00_get_field32(reg2, MAC_CSR12_BBP_CURRENT_STATE);
if (state == !put_to_sleep)
return 0;
+ rt2x00usb_register_write(rt2x00dev, MAC_CSR12, reg);
msleep(10);
}
@@ -1441,12 +1442,38 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
struct txentry_desc *txdesc)
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
- __le32 *txd = skbdesc->desc;
+ __le32 *txd = (__le32 *)(skb->data - TXD_DESC_SIZE);
u32 word;
/*
* Start writing the descriptor words.
*/
+ rt2x00_desc_read(txd, 0, &word);
+ rt2x00_set_field32(&word, TXD_W0_BURST,
+ test_bit(ENTRY_TXD_BURST, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W0_VALID, 1);
+ rt2x00_set_field32(&word, TXD_W0_MORE_FRAG,
+ test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W0_ACK,
+ test_bit(ENTRY_TXD_ACK, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W0_TIMESTAMP,
+ test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W0_OFDM,
+ (txdesc->rate_mode == RATE_MODE_OFDM));
+ rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
+ rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
+ test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W0_TKIP_MIC,
+ test_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W0_KEY_TABLE,
+ test_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W0_KEY_INDEX, txdesc->key_idx);
+ rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
+ rt2x00_set_field32(&word, TXD_W0_BURST2,
+ test_bit(ENTRY_TXD_BURST, &txdesc->flags));
+ rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher);
+ rt2x00_desc_write(txd, 0, word);
+
rt2x00_desc_read(txd, 1, &word);
rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->queue);
rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs);
@@ -1475,51 +1502,24 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1);
rt2x00_desc_write(txd, 5, word);
- rt2x00_desc_read(txd, 0, &word);
- rt2x00_set_field32(&word, TXD_W0_BURST,
- test_bit(ENTRY_TXD_BURST, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_VALID, 1);
- rt2x00_set_field32(&word, TXD_W0_MORE_FRAG,
- test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_ACK,
- test_bit(ENTRY_TXD_ACK, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_TIMESTAMP,
- test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_OFDM,
- (txdesc->rate_mode == RATE_MODE_OFDM));
- rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
- rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
- test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_TKIP_MIC,
- test_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_KEY_TABLE,
- test_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_KEY_INDEX, txdesc->key_idx);
- rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
- rt2x00_set_field32(&word, TXD_W0_BURST2,
- test_bit(ENTRY_TXD_BURST, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher);
- rt2x00_desc_write(txd, 0, word);
+ /*
+ * Register descriptor details in skb frame descriptor.
+ */
+ skbdesc->desc = txd;
+ skbdesc->desc_len = TXD_DESC_SIZE;
}
/*
* TX data initialization
*/
-static void rt73usb_write_beacon(struct queue_entry *entry)
+static void rt73usb_write_beacon(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
unsigned int beacon_base;
u32 reg;
/*
- * Add the descriptor in front of the skb.
- */
- skb_push(entry->skb, entry->queue->desc_size);
- memcpy(entry->skb->data, skbdesc->desc, skbdesc->desc_len);
- skbdesc->desc = entry->skb->data;
-
- /*
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
@@ -1528,6 +1528,11 @@ static void rt73usb_write_beacon(struct queue_entry *entry)
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
/*
+ * Take the descriptor in front of the skb into account.
+ */
+ skb_push(entry->skb, TXD_DESC_SIZE);
+
+ /*
* Write entire beacon with descriptor to register.
*/
beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
@@ -1537,6 +1542,19 @@ static void rt73usb_write_beacon(struct queue_entry *entry)
REGISTER_TIMEOUT32(entry->skb->len));
/*
+ * Enable beaconing again.
+ *
+ * For Wi-Fi faily generated beacons between participating stations.
+ * Set TBTT phase adaptive adjustment step to 8us (default 16us)
+ */
+ rt2x00usb_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
+
+ rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
+ rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
+ rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
+ rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
+
+ /*
* Clean up the beacon skb.
*/
dev_kfree_skb(entry->skb);
@@ -1557,31 +1575,6 @@ static int rt73usb_get_tx_data_len(struct queue_entry *entry)
return length;
}
-static void rt73usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
- const enum data_queue_qid queue)
-{
- u32 reg;
-
- if (queue != QID_BEACON) {
- rt2x00usb_kick_tx_queue(rt2x00dev, queue);
- return;
- }
-
- /*
- * For Wi-Fi faily generated beacons between participating stations.
- * Set TBTT phase adaptive adjustment step to 8us (default 16us)
- */
- rt2x00usb_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
-
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
- if (!rt2x00_get_field32(reg, TXRX_CSR9_BEACON_GEN)) {
- rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
- rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
- rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
- rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
- }
-}
-
/*
* RX control handlers
*/
@@ -1645,12 +1638,8 @@ static void rt73usb_fill_rxdone(struct queue_entry *entry,
if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
- if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
- rxdesc->cipher =
- rt2x00_get_field32(word0, RXD_W0_CIPHER_ALG);
- rxdesc->cipher_status =
- rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR);
- }
+ rxdesc->cipher = rt2x00_get_field32(word0, RXD_W0_CIPHER_ALG);
+ rxdesc->cipher_status = rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR);
if (rxdesc->cipher != CIPHER_NONE) {
_rt2x00_desc_read(rxd, 2, &rxdesc->iv[0]);
@@ -2266,7 +2255,7 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
.write_tx_data = rt2x00usb_write_tx_data,
.write_beacon = rt73usb_write_beacon,
.get_tx_data_len = rt73usb_get_tx_data_len,
- .kick_tx_queue = rt73usb_kick_tx_queue,
+ .kick_tx_queue = rt2x00usb_kick_tx_queue,
.kill_tx_queue = rt2x00usb_kill_tx_queue,
.fill_rxdone = rt73usb_fill_rxdone,
.config_shared_key = rt73usb_config_shared_key,
diff --git a/drivers/net/wireless/rtl818x/Kconfig b/drivers/net/wireless/rtl818x/Kconfig
new file mode 100644
index 00000000000..17d80fe556d
--- /dev/null
+++ b/drivers/net/wireless/rtl818x/Kconfig
@@ -0,0 +1,88 @@
+#
+# RTL818X Wireless LAN device configuration
+#
+config RTL8180
+ tristate "Realtek 8180/8185 PCI support"
+ depends on MAC80211 && PCI && EXPERIMENTAL
+ select EEPROM_93CX6
+ ---help---
+ This is a driver for RTL8180 and RTL8185 based cards.
+ These are PCI based chips found in cards such as:
+
+ (RTL8185 802.11g)
+ A-Link WL54PC
+
+ (RTL8180 802.11b)
+ Belkin F5D6020 v3
+ Belkin F5D6020 v3
+ Dlink DWL-610
+ Dlink DWL-510
+ Netgear MA521
+ Level-One WPC-0101
+ Acer Aspire 1357 LMi
+ VCTnet PC-11B1
+ Ovislink AirLive WL-1120PCM
+ Mentor WL-PCI
+ Linksys WPC11 v4
+ TrendNET TEW-288PI
+ D-Link DWL-520 Rev D
+ Repotec RP-WP7126
+ TP-Link TL-WN250/251
+ Zonet ZEW1000
+ Longshine LCS-8031-R
+ HomeLine HLW-PCC200
+ GigaFast WF721-AEX
+ Planet WL-3553
+ Encore ENLWI-PCI1-NT
+ TrendNET TEW-266PC
+ Gigabyte GN-WLMR101
+ Siemens-fujitsu Amilo D1840W
+ Edimax EW-7126
+ PheeNet WL-11PCIR
+ Tonze PC-2100T
+ Planet WL-8303
+ Dlink DWL-650 v M1
+ Edimax EW-7106
+ Q-Tec 770WC
+ Topcom Skyr@cer 4011b
+ Roper FreeLan 802.11b (edition 2004)
+ Wistron Neweb Corp CB-200B
+ Pentagram HorNET
+ QTec 775WC
+ TwinMOS Booming B Series
+ Micronet SP906BB
+ Sweex LC700010
+ Surecom EP-9428
+ Safecom SWLCR-1100
+
+ Thanks to Realtek for their support!
+
+config RTL8187
+ tristate "Realtek 8187 and 8187B USB support"
+ depends on MAC80211 && USB
+ select EEPROM_93CX6
+ ---help---
+ This is a driver for RTL8187 and RTL8187B based cards.
+ These are USB based chips found in devices such as:
+
+ Netgear WG111v2
+ Level 1 WNC-0301USB
+ Micronet SP907GK V5
+ Encore ENUWI-G2
+ Trendnet TEW-424UB
+ ASUS P5B Deluxe/P5K Premium motherboards
+ Toshiba Satellite Pro series of laptops
+ Asus Wireless Link
+ Linksys WUSB54GC-EU v2
+ (v1 = rt73usb; v3 is rt2070-based,
+ use staging/rt3070 or try rt2800usb)
+
+ Thanks to Realtek for their support!
+
+# If possible, automatically enable LEDs for RTL8187.
+
+config RTL8187_LEDS
+ bool
+ depends on RTL8187 && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = RTL8187)
+ default y
+
diff --git a/drivers/net/wireless/rtl818x/rtl8180.h b/drivers/net/wireless/rtl818x/rtl8180.h
index de3844fe06d..4baf0cf0826 100644
--- a/drivers/net/wireless/rtl818x/rtl8180.h
+++ b/drivers/net/wireless/rtl818x/rtl8180.h
@@ -55,6 +55,14 @@ struct rtl8180_tx_ring {
struct sk_buff_head queue;
};
+struct rtl8180_vif {
+ struct ieee80211_hw *dev;
+
+ /* beaconing */
+ struct delayed_work beacon_work;
+ bool enable_beacon;
+};
+
struct rtl8180_priv {
/* common between rtl818x drivers */
struct rtl818x_csr __iomem *map;
@@ -78,6 +86,9 @@ struct rtl8180_priv {
u32 anaparam;
u16 rfparam;
u8 csthreshold;
+
+ /* sequence # */
+ u16 seqno;
};
void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data);
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index 2131a442831..515817de290 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -188,6 +188,7 @@ static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio)
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.rates[0].count = (flags & 0xFF) + 1;
+ info->status.rates[1].idx = -1;
ieee80211_tx_status_irqsafe(dev, skb);
if (ring->entries - skb_queue_len(&ring->queue) == 2)
@@ -233,6 +234,7 @@ static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct rtl8180_priv *priv = dev->priv;
struct rtl8180_tx_ring *ring;
struct rtl8180_tx_desc *entry;
@@ -284,6 +286,14 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
}
spin_lock_irqsave(&priv->lock, flags);
+
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
+ if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
+ priv->seqno += 0x10;
+ hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(priv->seqno);
+ }
+
idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries;
entry = &ring->desc[idx];
@@ -297,7 +307,8 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
entry->flags = cpu_to_le32(tx_flags);
__skb_queue_tail(&ring->queue, skb);
if (ring->entries - skb_queue_len(&ring->queue) < 2)
- ieee80211_stop_queue(dev, skb_get_queue_mapping(skb));
+ ieee80211_stop_queue(dev, prio);
+
spin_unlock_irqrestore(&priv->lock, flags);
rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4)));
@@ -652,10 +663,59 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
rtl8180_free_tx_ring(dev, i);
}
+static u64 rtl8180_get_tsf(struct ieee80211_hw *dev)
+{
+ struct rtl8180_priv *priv = dev->priv;
+
+ return rtl818x_ioread32(priv, &priv->map->TSFT[0]) |
+ (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32;
+}
+
+void rtl8180_beacon_work(struct work_struct *work)
+{
+ struct rtl8180_vif *vif_priv =
+ container_of(work, struct rtl8180_vif, beacon_work.work);
+ struct ieee80211_vif *vif =
+ container_of((void *)vif_priv, struct ieee80211_vif, drv_priv);
+ struct ieee80211_hw *dev = vif_priv->dev;
+ struct ieee80211_mgmt *mgmt;
+ struct sk_buff *skb;
+ int err = 0;
+
+ /* don't overflow the tx ring */
+ if (ieee80211_queue_stopped(dev, 0))
+ goto resched;
+
+ /* grab a fresh beacon */
+ skb = ieee80211_beacon_get(dev, vif);
+
+ /*
+ * update beacon timestamp w/ TSF value
+ * TODO: make hardware update beacon timestamp
+ */
+ mgmt = (struct ieee80211_mgmt *)skb->data;
+ mgmt->u.beacon.timestamp = cpu_to_le64(rtl8180_get_tsf(dev));
+
+ /* TODO: use actual beacon queue */
+ skb_set_queue_mapping(skb, 0);
+
+ err = rtl8180_tx(dev, skb);
+ WARN_ON(err);
+
+resched:
+ /*
+ * schedule next beacon
+ * TODO: use hardware support for beacon timing
+ */
+ schedule_delayed_work(&vif_priv->beacon_work,
+ usecs_to_jiffies(1024 * vif->bss_conf.beacon_int));
+}
+
static int rtl8180_add_interface(struct ieee80211_hw *dev,
struct ieee80211_vif *vif)
{
struct rtl8180_priv *priv = dev->priv;
+ struct rtl8180_vif *vif_priv;
/*
* We only support one active interface at a time.
@@ -665,6 +725,7 @@ static int rtl8180_add_interface(struct ieee80211_hw *dev,
switch (vif->type) {
case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
break;
default:
return -EOPNOTSUPP;
@@ -672,6 +733,12 @@ static int rtl8180_add_interface(struct ieee80211_hw *dev,
priv->vif = vif;
+ /* Initialize driver private area */
+ vif_priv = (struct rtl8180_vif *)&vif->drv_priv;
+ vif_priv->dev = dev;
+ INIT_DELAYED_WORK(&vif_priv->beacon_work, rtl8180_beacon_work);
+ vif_priv->enable_beacon = false;
+
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->MAC[0],
le32_to_cpu(*(__le32 *)vif->addr));
@@ -705,8 +772,11 @@ static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
u32 changed)
{
struct rtl8180_priv *priv = dev->priv;
+ struct rtl8180_vif *vif_priv;
int i;
+ vif_priv = (struct rtl8180_vif *)&vif->drv_priv;
+
if (changed & BSS_CHANGED_BSSID) {
for (i = 0; i < ETH_ALEN; i++)
rtl818x_iowrite8(priv, &priv->map->BSSID[i],
@@ -721,13 +791,22 @@ static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
}
if (changed & BSS_CHANGED_ERP_SLOT && priv->rf->conf_erp)
- priv->rf->conf_erp(dev, info);
+ priv->rf->conf_erp(dev, info);
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED)
+ vif_priv->enable_beacon = info->enable_beacon;
+
+ if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON)) {
+ cancel_delayed_work_sync(&vif_priv->beacon_work);
+ if (vif_priv->enable_beacon)
+ schedule_work(&vif_priv->beacon_work.work);
+ }
}
-static u64 rtl8180_prepare_multicast(struct ieee80211_hw *dev, int mc_count,
- struct dev_addr_list *mc_list)
+static u64 rtl8180_prepare_multicast(struct ieee80211_hw *dev,
+ struct netdev_hw_addr_list *mc_list)
{
- return mc_count;
+ return netdev_hw_addr_list_count(mc_list);
}
static void rtl8180_configure_filter(struct ieee80211_hw *dev,
@@ -762,14 +841,6 @@ static void rtl8180_configure_filter(struct ieee80211_hw *dev,
rtl818x_iowrite32(priv, &priv->map->RX_CONF, priv->rx_conf);
}
-static u64 rtl8180_get_tsf(struct ieee80211_hw *dev)
-{
- struct rtl8180_priv *priv = dev->priv;
-
- return rtl818x_ioread32(priv, &priv->map->TSFT[0]) |
- (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32;
-}
-
static const struct ieee80211_ops rtl8180_ops = {
.tx = rtl8180_tx,
.start = rtl8180_start,
@@ -827,6 +898,7 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
const char *chip_name, *rf_name = NULL;
u32 reg;
u16 eeprom_val;
+ u8 mac_addr[ETH_ALEN];
err = pci_enable_device(pdev);
if (err) {
@@ -855,8 +927,8 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
goto err_free_reg;
}
- if ((err = pci_set_dma_mask(pdev, 0xFFFFFF00ULL)) ||
- (err = pci_set_consistent_dma_mask(pdev, 0xFFFFFF00ULL))) {
+ if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) ||
+ (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
printk(KERN_ERR "%s (rtl8180): No suitable DMA available\n",
pci_name(pdev));
goto err_free_reg;
@@ -905,7 +977,9 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_SIGNAL_UNSPEC;
- dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ dev->vif_data_size = sizeof(struct rtl8180_vif);
+ dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC);
dev->queues = 1;
dev->max_signal = 65;
@@ -987,12 +1061,13 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
eeprom_93cx6_read(&eeprom, 0x19, &priv->rfparam);
}
- eeprom_93cx6_multiread(&eeprom, 0x7, (__le16 *)dev->wiphy->perm_addr, 3);
- if (!is_valid_ether_addr(dev->wiphy->perm_addr)) {
+ eeprom_93cx6_multiread(&eeprom, 0x7, (__le16 *)mac_addr, 3);
+ if (!is_valid_ether_addr(mac_addr)) {
printk(KERN_WARNING "%s (rtl8180): Invalid hwaddr! Using"
" randomly generated MAC addr\n", pci_name(pdev));
- random_ether_addr(dev->wiphy->perm_addr);
+ random_ether_addr(mac_addr);
}
+ SET_IEEE80211_PERM_ADDR(dev, mac_addr);
/* CCK TX power */
for (i = 0; i < 14; i += 2) {
@@ -1024,7 +1099,7 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
}
printk(KERN_INFO "%s: hwaddr %pM, %s + %s\n",
- wiphy_name(dev->wiphy), dev->wiphy->perm_addr,
+ wiphy_name(dev->wiphy), mac_addr,
chip_name, priv->rf->name);
return 0;
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 1d30792973f..891b8490e34 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -1194,9 +1194,9 @@ static void rtl8187_bss_info_changed(struct ieee80211_hw *dev,
}
static u64 rtl8187_prepare_multicast(struct ieee80211_hw *dev,
- int mc_count, struct dev_addr_list *mc_list)
+ struct netdev_hw_addr_list *mc_list)
{
- return mc_count;
+ return netdev_hw_addr_list_count(mc_list);
}
static void rtl8187_configure_filter(struct ieee80211_hw *dev,
@@ -1333,6 +1333,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
u16 txpwr, reg;
u16 product_id = le16_to_cpu(udev->descriptor.idProduct);
int err, i;
+ u8 mac_addr[ETH_ALEN];
dev = ieee80211_alloc_hw(sizeof(*priv), &rtl8187_ops);
if (!dev) {
@@ -1390,12 +1391,13 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
udelay(10);
eeprom_93cx6_multiread(&eeprom, RTL8187_EEPROM_MAC_ADDR,
- (__le16 __force *)dev->wiphy->perm_addr, 3);
- if (!is_valid_ether_addr(dev->wiphy->perm_addr)) {
+ (__le16 __force *)mac_addr, 3);
+ if (!is_valid_ether_addr(mac_addr)) {
printk(KERN_WARNING "rtl8187: Invalid hwaddr! Using randomly "
"generated MAC address\n");
- random_ether_addr(dev->wiphy->perm_addr);
+ random_ether_addr(mac_addr);
}
+ SET_IEEE80211_PERM_ADDR(dev, mac_addr);
channel = priv->channels;
for (i = 0; i < 3; i++) {
@@ -1526,7 +1528,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
skb_queue_head_init(&priv->b_tx_status.queue);
printk(KERN_INFO "%s: hwaddr %pM, %s V%d + %s, rfkill mask %d\n",
- wiphy_name(dev->wiphy), dev->wiphy->perm_addr,
+ wiphy_name(dev->wiphy), mac_addr,
chip_name, priv->asic_rev, priv->rf->name, priv->rfkill_mask);
#ifdef CONFIG_RTL8187_LEDS
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 785e0244e30..337fc7bec5a 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -51,3 +51,27 @@ config WL1271
If you choose to build a module, it'll be called wl1271. Say N if
unsure.
+
+config WL1271_SPI
+ tristate "TI wl1271 SPI support"
+ depends on WL1271 && SPI_MASTER
+ ---help---
+ This module adds support for the SPI interface of adapters using
+ TI wl1271 chipset. Select this if your platform is using
+ the SPI bus.
+
+ If you choose to build a module, it'll be called wl1251_spi.
+ Say N if unsure.
+
+config WL1271_SDIO
+ tristate "TI wl1271 SDIO support"
+ depends on WL1271 && MMC && ARM
+ ---help---
+ This module adds support for the SDIO interface of adapters using
+ TI wl1271 chipset. Select this if your platform is using
+ the SDIO bus.
+
+ If you choose to build a module, it'll be called
+ wl1271_sdio. Say N if unsure.
+
+
diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile
index f47ec94c16d..27ddd2be0a9 100644
--- a/drivers/net/wireless/wl12xx/Makefile
+++ b/drivers/net/wireless/wl12xx/Makefile
@@ -7,10 +7,12 @@ obj-$(CONFIG_WL1251) += wl1251.o
obj-$(CONFIG_WL1251_SPI) += wl1251_spi.o
obj-$(CONFIG_WL1251_SDIO) += wl1251_sdio.o
-wl1271-objs = wl1271_main.o wl1271_spi.o wl1271_cmd.o \
+wl1271-objs = wl1271_main.o wl1271_cmd.o wl1271_io.o \
wl1271_event.o wl1271_tx.o wl1271_rx.o \
wl1271_ps.o wl1271_acx.o wl1271_boot.o \
- wl1271_init.o wl1271_debugfs.o wl1271_io.o
+ wl1271_init.o wl1271_debugfs.o
wl1271-$(CONFIG_NL80211_TESTMODE) += wl1271_testmode.o
obj-$(CONFIG_WL1271) += wl1271.o
+obj-$(CONFIG_WL1271_SPI) += wl1271_spi.o
+obj-$(CONFIG_WL1271_SDIO) += wl1271_sdio.o
diff --git a/drivers/net/wireless/wl12xx/wl1251.h b/drivers/net/wireless/wl12xx/wl1251.h
index 37c61c19cae..4f5f02a26e6 100644
--- a/drivers/net/wireless/wl12xx/wl1251.h
+++ b/drivers/net/wireless/wl12xx/wl1251.h
@@ -256,6 +256,8 @@ struct wl1251_debugfs {
struct wl1251_if_operations {
void (*read)(struct wl1251 *wl, int addr, void *buf, size_t len);
void (*write)(struct wl1251 *wl, int addr, void *buf, size_t len);
+ void (*read_elp)(struct wl1251 *wl, int addr, u32 *val);
+ void (*write_elp)(struct wl1251 *wl, int addr, u32 val);
void (*reset)(struct wl1251 *wl);
void (*enable_irq)(struct wl1251 *wl);
void (*disable_irq)(struct wl1251 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1251_boot.c b/drivers/net/wireless/wl12xx/wl1251_boot.c
index d5ac79aeaa7..2545123931e 100644
--- a/drivers/net/wireless/wl12xx/wl1251_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1251_boot.c
@@ -497,7 +497,8 @@ int wl1251_boot(struct wl1251 *wl)
/* 2. start processing NVS file */
if (wl->use_eeprom) {
wl1251_reg_write32(wl, ACX_REG_EE_START, START_EEPROM_MGR);
- msleep(4000);
+ /* Wait for EEPROM NVS burst read to complete */
+ msleep(40);
wl1251_reg_write32(wl, ACX_EEPROMLESS_IND_REG, USE_EEPROM);
} else {
ret = wl1251_boot_upload_nvs(wl);
diff --git a/drivers/net/wireless/wl12xx/wl1251_io.h b/drivers/net/wireless/wl12xx/wl1251_io.h
index b89d2ac62ef..c545e9d5f51 100644
--- a/drivers/net/wireless/wl12xx/wl1251_io.h
+++ b/drivers/net/wireless/wl12xx/wl1251_io.h
@@ -48,6 +48,26 @@ static inline void wl1251_write32(struct wl1251 *wl, int addr, u32 val)
wl->if_ops->write(wl, addr, &val, sizeof(u32));
}
+static inline u32 wl1251_read_elp(struct wl1251 *wl, int addr)
+{
+ u32 response;
+
+ if (wl->if_ops->read_elp)
+ wl->if_ops->read_elp(wl, addr, &response);
+ else
+ wl->if_ops->read(wl, addr, &response, sizeof(u32));
+
+ return response;
+}
+
+static inline void wl1251_write_elp(struct wl1251 *wl, int addr, u32 val)
+{
+ if (wl->if_ops->write_elp)
+ wl->if_ops->write_elp(wl, addr, val);
+ else
+ wl->if_ops->write(wl, addr, &val, sizeof(u32));
+}
+
/* Memory target IO, address is translated to partition 0 */
void wl1251_mem_read(struct wl1251 *wl, int addr, void *buf, size_t len);
void wl1251_mem_write(struct wl1251 *wl, int addr, void *buf, size_t len);
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
index 1c8226eee40..00b24282fc7 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl12xx/wl1251_main.c
@@ -147,8 +147,8 @@ static void wl1251_fw_wakeup(struct wl1251 *wl)
u32 elp_reg;
elp_reg = ELPCTRL_WAKE_UP;
- wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
- elp_reg = wl1251_read32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
+ wl1251_write_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
+ elp_reg = wl1251_read_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
if (!(elp_reg & ELPCTRL_WLAN_READY))
wl1251_warning("WLAN not ready");
@@ -202,8 +202,8 @@ static int wl1251_chip_wakeup(struct wl1251 *wl)
goto out;
}
- /* No NVS from netlink, try to get it from the filesystem */
- if (wl->nvs == NULL) {
+ if (wl->nvs == NULL && !wl->use_eeprom) {
+ /* No NVS from netlink, try to get it from the filesystem */
ret = wl1251_fetch_nvs(wl);
if (ret < 0)
goto out;
@@ -857,6 +857,7 @@ out:
}
static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
struct cfg80211_scan_request *req)
{
struct wl1251 *wl = hw->priv;
@@ -1196,6 +1197,66 @@ static const struct ieee80211_ops wl1251_ops = {
.conf_tx = wl1251_op_conf_tx,
};
+static int wl1251_read_eeprom_byte(struct wl1251 *wl, off_t offset, u8 *data)
+{
+ unsigned long timeout;
+
+ wl1251_reg_write32(wl, EE_ADDR, offset);
+ wl1251_reg_write32(wl, EE_CTL, EE_CTL_READ);
+
+ /* EE_CTL_READ clears when data is ready */
+ timeout = jiffies + msecs_to_jiffies(100);
+ while (1) {
+ if (!(wl1251_reg_read32(wl, EE_CTL) & EE_CTL_READ))
+ break;
+
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ msleep(1);
+ }
+
+ *data = wl1251_reg_read32(wl, EE_DATA);
+ return 0;
+}
+
+static int wl1251_read_eeprom(struct wl1251 *wl, off_t offset,
+ u8 *data, size_t len)
+{
+ size_t i;
+ int ret;
+
+ wl1251_reg_write32(wl, EE_START, 0);
+
+ for (i = 0; i < len; i++) {
+ ret = wl1251_read_eeprom_byte(wl, offset + i, &data[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int wl1251_read_eeprom_mac(struct wl1251 *wl)
+{
+ u8 mac[ETH_ALEN];
+ int i, ret;
+
+ wl1251_set_partition(wl, 0, 0, REGISTERS_BASE, REGISTERS_DOWN_SIZE);
+
+ ret = wl1251_read_eeprom(wl, 0x1c, mac, sizeof(mac));
+ if (ret < 0) {
+ wl1251_warning("failed to read MAC address from EEPROM");
+ return ret;
+ }
+
+ /* MAC is stored in reverse order */
+ for (i = 0; i < ETH_ALEN; i++)
+ wl->mac_addr[i] = mac[ETH_ALEN - i - 1];
+
+ return 0;
+}
+
static int wl1251_register_hw(struct wl1251 *wl)
{
int ret;
@@ -1231,7 +1292,6 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
wl->hw->channel_change_time = 10000;
wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_NOISE_DBM |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_BEACON_FILTER |
IEEE80211_HW_SUPPORTS_UAPSD;
@@ -1242,6 +1302,9 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
wl->hw->queues = 4;
+ if (wl->use_eeprom)
+ wl1251_read_eeprom_mac(wl);
+
ret = wl1251_register_hw(wl);
if (ret)
goto out;
diff --git a/drivers/net/wireless/wl12xx/wl1251_ps.c b/drivers/net/wireless/wl12xx/wl1251_ps.c
index 851dfb65e47..b55cb2bd459 100644
--- a/drivers/net/wireless/wl12xx/wl1251_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1251_ps.c
@@ -45,7 +45,7 @@ void wl1251_elp_work(struct work_struct *work)
goto out;
wl1251_debug(DEBUG_PSM, "chip to elp");
- wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
+ wl1251_write_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
wl->elp = true;
out:
@@ -79,9 +79,9 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
start = jiffies;
timeout = jiffies + msecs_to_jiffies(WL1251_WAKEUP_TIMEOUT);
- wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
+ wl1251_write_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
- elp_reg = wl1251_read32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
+ elp_reg = wl1251_read_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
/*
* FIXME: we should wait for irq from chip but, as a temporary
@@ -93,7 +93,7 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
return -ETIMEDOUT;
}
msleep(1);
- elp_reg = wl1251_read32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
+ elp_reg = wl1251_read_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
}
wl1251_debug(DEBUG_PSM, "wakeup time: %u ms",
diff --git a/drivers/net/wireless/wl12xx/wl1251_reg.h b/drivers/net/wireless/wl12xx/wl1251_reg.h
index 0ca3b432605..d16edd9bf06 100644
--- a/drivers/net/wireless/wl12xx/wl1251_reg.h
+++ b/drivers/net/wireless/wl12xx/wl1251_reg.h
@@ -46,7 +46,14 @@
#define SOR_CFG (REGISTERS_BASE + 0x0800)
#define ECPU_CTRL (REGISTERS_BASE + 0x0804)
#define HI_CFG (REGISTERS_BASE + 0x0808)
+
+/* EEPROM registers */
#define EE_START (REGISTERS_BASE + 0x080C)
+#define EE_CTL (REGISTERS_BASE + 0x2000)
+#define EE_DATA (REGISTERS_BASE + 0x2004)
+#define EE_ADDR (REGISTERS_BASE + 0x2008)
+
+#define EE_CTL_READ 2
#define CHIP_ID_B (REGISTERS_BASE + 0x5674)
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.c b/drivers/net/wireless/wl12xx/wl1251_rx.c
index 6f229e0990f..851515836a7 100644
--- a/drivers/net/wireless/wl12xx/wl1251_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_rx.c
@@ -74,12 +74,6 @@ static void wl1251_rx_status(struct wl1251 *wl,
status->signal = desc->rssi;
- /*
- * FIXME: guessing that snr needs to be divided by two, otherwise
- * the values don't make any sense
- */
- status->noise = desc->rssi - desc->snr / 2;
-
status->freq = ieee80211_channel_to_frequency(desc->channel);
status->flag |= RX_FLAG_TSFT;
@@ -189,6 +183,4 @@ void wl1251_rx(struct wl1251 *wl)
/* Finally, we need to ACK the RX */
wl1251_rx_ack(wl);
-
- return;
}
diff --git a/drivers/net/wireless/wl12xx/wl1251_sdio.c b/drivers/net/wireless/wl12xx/wl1251_sdio.c
index 9423f22bdce..d234285c2c8 100644
--- a/drivers/net/wireless/wl12xx/wl1251_sdio.c
+++ b/drivers/net/wireless/wl12xx/wl1251_sdio.c
@@ -20,20 +20,14 @@
* Copyright (C) 2009 Bob Copeland (me@bobcopeland.com)
*/
#include <linux/module.h>
-#include <linux/crc7.h>
#include <linux/mod_devicetable.h>
-#include <linux/irq.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/platform_device.h>
+#include <linux/spi/wl12xx.h>
+#include <linux/irq.h>
#include "wl1251.h"
-#include "wl12xx_80211.h"
-#include "wl1251_reg.h"
-#include "wl1251_ps.h"
-#include "wl1251_io.h"
-#include "wl1251_tx.h"
-#include "wl1251_debugfs.h"
#ifndef SDIO_VENDOR_ID_TI
#define SDIO_VENDOR_ID_TI 0x104c
@@ -43,6 +37,8 @@
#define SDIO_DEVICE_ID_TI_WL1251 0x9066
#endif
+static struct wl12xx_platform_data *wl12xx_board_data;
+
static struct sdio_func *wl_to_func(struct wl1251 *wl)
{
return wl->if_priv;
@@ -65,7 +61,8 @@ static const struct sdio_device_id wl1251_devices[] = {
MODULE_DEVICE_TABLE(sdio, wl1251_devices);
-void wl1251_sdio_read(struct wl1251 *wl, int addr, void *buf, size_t len)
+static void wl1251_sdio_read(struct wl1251 *wl, int addr,
+ void *buf, size_t len)
{
int ret;
struct sdio_func *func = wl_to_func(wl);
@@ -77,7 +74,8 @@ void wl1251_sdio_read(struct wl1251 *wl, int addr, void *buf, size_t len)
sdio_release_host(func);
}
-void wl1251_sdio_write(struct wl1251 *wl, int addr, void *buf, size_t len)
+static void wl1251_sdio_write(struct wl1251 *wl, int addr,
+ void *buf, size_t len)
{
int ret;
struct sdio_func *func = wl_to_func(wl);
@@ -89,7 +87,33 @@ void wl1251_sdio_write(struct wl1251 *wl, int addr, void *buf, size_t len)
sdio_release_host(func);
}
-void wl1251_sdio_reset(struct wl1251 *wl)
+static void wl1251_sdio_read_elp(struct wl1251 *wl, int addr, u32 *val)
+{
+ int ret = 0;
+ struct sdio_func *func = wl_to_func(wl);
+
+ sdio_claim_host(func);
+ *val = sdio_readb(func, addr, &ret);
+ sdio_release_host(func);
+
+ if (ret)
+ wl1251_error("sdio_readb failed (%d)", ret);
+}
+
+static void wl1251_sdio_write_elp(struct wl1251 *wl, int addr, u32 val)
+{
+ int ret = 0;
+ struct sdio_func *func = wl_to_func(wl);
+
+ sdio_claim_host(func);
+ sdio_writeb(func, val, addr, &ret);
+ sdio_release_host(func);
+
+ if (ret)
+ wl1251_error("sdio_writeb failed (%d)", ret);
+}
+
+static void wl1251_sdio_reset(struct wl1251 *wl)
{
}
@@ -111,19 +135,64 @@ static void wl1251_sdio_disable_irq(struct wl1251 *wl)
sdio_release_host(func);
}
-void wl1251_sdio_set_power(bool enable)
+/* Interrupts when using dedicated WLAN_IRQ pin */
+static irqreturn_t wl1251_line_irq(int irq, void *cookie)
+{
+ struct wl1251 *wl = cookie;
+
+ ieee80211_queue_work(wl->hw, &wl->irq_work);
+
+ return IRQ_HANDLED;
+}
+
+static void wl1251_enable_line_irq(struct wl1251 *wl)
{
+ return enable_irq(wl->irq);
}
-struct wl1251_if_operations wl1251_sdio_ops = {
+static void wl1251_disable_line_irq(struct wl1251 *wl)
+{
+ return disable_irq(wl->irq);
+}
+
+static void wl1251_sdio_set_power(bool enable)
+{
+}
+
+static struct wl1251_if_operations wl1251_sdio_ops = {
.read = wl1251_sdio_read,
.write = wl1251_sdio_write,
+ .write_elp = wl1251_sdio_write_elp,
+ .read_elp = wl1251_sdio_read_elp,
.reset = wl1251_sdio_reset,
- .enable_irq = wl1251_sdio_enable_irq,
- .disable_irq = wl1251_sdio_disable_irq,
};
-int wl1251_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
+static int wl1251_platform_probe(struct platform_device *pdev)
+{
+ if (pdev->id != -1) {
+ wl1251_error("can only handle single device");
+ return -ENODEV;
+ }
+
+ wl12xx_board_data = pdev->dev.platform_data;
+ return 0;
+}
+
+/*
+ * Dummy platform_driver for passing platform_data to this driver,
+ * until we have a way to pass this through SDIO subsystem or
+ * some other way.
+ */
+static struct platform_driver wl1251_platform_driver = {
+ .driver = {
+ .name = "wl1251_data",
+ .owner = THIS_MODULE,
+ },
+ .probe = wl1251_platform_probe,
+};
+
+static int wl1251_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
{
int ret;
struct wl1251 *wl;
@@ -141,20 +210,50 @@ int wl1251_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
goto release;
sdio_set_block_size(func, 512);
+ sdio_release_host(func);
SET_IEEE80211_DEV(hw, &func->dev);
wl->if_priv = func;
wl->if_ops = &wl1251_sdio_ops;
wl->set_power = wl1251_sdio_set_power;
- sdio_release_host(func);
+ if (wl12xx_board_data != NULL) {
+ wl->set_power = wl12xx_board_data->set_power;
+ wl->irq = wl12xx_board_data->irq;
+ wl->use_eeprom = wl12xx_board_data->use_eeprom;
+ }
+
+ if (wl->irq) {
+ ret = request_irq(wl->irq, wl1251_line_irq, 0, "wl1251", wl);
+ if (ret < 0) {
+ wl1251_error("request_irq() failed: %d", ret);
+ goto disable;
+ }
+
+ set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
+ disable_irq(wl->irq);
+
+ wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
+ wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
+
+ wl1251_info("using dedicated interrupt line");
+ } else {
+ wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
+ wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
+
+ wl1251_info("using SDIO interrupt");
+ }
+
ret = wl1251_init_ieee80211(wl);
if (ret)
- goto disable;
+ goto out_free_irq;
sdio_set_drvdata(func, wl);
return ret;
+out_free_irq:
+ if (wl->irq)
+ free_irq(wl->irq, wl);
disable:
sdio_claim_host(func);
sdio_disable_func(func);
@@ -167,6 +266,8 @@ static void __devexit wl1251_sdio_remove(struct sdio_func *func)
{
struct wl1251 *wl = sdio_get_drvdata(func);
+ if (wl->irq)
+ free_irq(wl->irq, wl);
wl1251_free_hw(wl);
sdio_claim_host(func);
@@ -186,6 +287,12 @@ static int __init wl1251_sdio_init(void)
{
int err;
+ err = platform_driver_register(&wl1251_platform_driver);
+ if (err) {
+ wl1251_error("failed to register platform driver: %d", err);
+ return err;
+ }
+
err = sdio_register_driver(&wl1251_sdio_driver);
if (err)
wl1251_error("failed to register sdio driver: %d", err);
@@ -195,6 +302,7 @@ static int __init wl1251_sdio_init(void)
static void __exit wl1251_sdio_exit(void)
{
sdio_unregister_driver(&wl1251_sdio_driver);
+ platform_driver_unregister(&wl1251_platform_driver);
wl1251_notice("unloaded");
}
diff --git a/drivers/net/wireless/wl12xx/wl1251_spi.c b/drivers/net/wireless/wl12xx/wl1251_spi.c
index 3bfb59bd463..e81474203a2 100644
--- a/drivers/net/wireless/wl12xx/wl1251_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1251_spi.c
@@ -310,7 +310,7 @@ static int __devexit wl1251_spi_remove(struct spi_device *spi)
static struct spi_driver wl1251_spi_driver = {
.driver = {
- .name = "wl1251",
+ .name = DRIVER_NAME,
.bus = &spi_bus_type,
.owner = THIS_MODULE,
},
diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl1271.h
index 97ea5096bc8..6f1b6b5640c 100644
--- a/drivers/net/wireless/wl12xx/wl1271.h
+++ b/drivers/net/wireless/wl12xx/wl1271.h
@@ -53,6 +53,9 @@ enum {
DEBUG_MAC80211 = BIT(11),
DEBUG_CMD = BIT(12),
DEBUG_ACX = BIT(13),
+ DEBUG_SDIO = BIT(14),
+ DEBUG_FILTERS = BIT(15),
+ DEBUG_ADHOC = BIT(16),
DEBUG_ALL = ~0,
};
@@ -110,6 +113,9 @@ enum {
#define WL1271_FW_NAME "wl1271-fw.bin"
#define WL1271_NVS_NAME "wl1271-nvs.bin"
+#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff))
+#define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff))
+
/* NVS data structure */
#define WL1271_NVS_SECTION_SIZE 468
@@ -142,14 +148,7 @@ struct wl1271_nvs_file {
*/
#undef WL1271_80211A_ENABLED
-/*
- * FIXME: for the wl1271, a busy word count of 1 here will result in a more
- * optimal SPI interface. There is some SPI bug however, causing RXS time outs
- * with this mode occasionally on boot, so lets have three for now. A value of
- * three should make sure, that the chipset will always be ready, though this
- * will impact throughput and latencies slightly.
- */
-#define WL1271_BUSY_WORD_CNT 3
+#define WL1271_BUSY_WORD_CNT 1
#define WL1271_BUSY_WORD_LEN (WL1271_BUSY_WORD_CNT * sizeof(u32))
#define WL1271_ELP_HW_STATE_ASLEEP 0
@@ -334,11 +333,27 @@ struct wl1271_scan {
u8 probe_requests;
};
+struct wl1271_if_operations {
+ void (*read)(struct wl1271 *wl, int addr, void *buf, size_t len,
+ bool fixed);
+ void (*write)(struct wl1271 *wl, int addr, void *buf, size_t len,
+ bool fixed);
+ void (*reset)(struct wl1271 *wl);
+ void (*init)(struct wl1271 *wl);
+ void (*power)(struct wl1271 *wl, bool enable);
+ struct device* (*dev)(struct wl1271 *wl);
+ void (*enable_irq)(struct wl1271 *wl);
+ void (*disable_irq)(struct wl1271 *wl);
+};
+
struct wl1271 {
+ struct platform_device *plat_dev;
struct ieee80211_hw *hw;
bool mac80211_registered;
- struct spi_device *spi;
+ void *if_priv;
+
+ struct wl1271_if_operations *if_ops;
void (*set_power)(bool enable);
int irq;
@@ -357,6 +372,9 @@ struct wl1271 {
#define WL1271_FLAG_IN_ELP (6)
#define WL1271_FLAG_PSM (7)
#define WL1271_FLAG_PSM_REQUESTED (8)
+#define WL1271_FLAG_IRQ_PENDING (9)
+#define WL1271_FLAG_IRQ_RUNNING (10)
+#define WL1271_FLAG_IDLE (11)
unsigned long flags;
struct wl1271_partition_set part;
@@ -370,9 +388,12 @@ struct wl1271 {
size_t fw_len;
struct wl1271_nvs_file *nvs;
+ s8 hw_pg_ver;
+
u8 bssid[ETH_ALEN];
u8 mac_addr[ETH_ALEN];
u8 bss_type;
+ u8 set_bss_type;
u8 ssid[IW_ESSID_MAX_SIZE + 1];
u8 ssid_len;
int channel;
@@ -382,13 +403,13 @@ struct wl1271 {
/* Accounting for allocated / available TX blocks on HW */
u32 tx_blocks_freed[NUM_TX_QUEUES];
u32 tx_blocks_available;
- u8 tx_results_count;
+ u32 tx_results_count;
/* Transmitted TX packets counter for chipset interface */
- int tx_packets_count;
+ u32 tx_packets_count;
/* Time-offset between host and chipset clocks */
- int time_offset;
+ s64 time_offset;
/* Session counter for the chipset */
int session_counter;
@@ -403,8 +424,7 @@ struct wl1271 {
/* Security sequence number counters */
u8 tx_security_last_seq;
- u16 tx_security_seq_16;
- u32 tx_security_seq_32;
+ s64 tx_security_seq;
/* FW Rx counter */
u32 rx_counter;
@@ -430,14 +450,19 @@ struct wl1271 {
/* currently configured rate set */
u32 sta_rate_set;
u32 basic_rate_set;
+ u32 basic_rate;
u32 rate_set;
/* The current band */
enum ieee80211_band band;
+ /* Beaconing interval (needed for ad-hoc) */
+ u32 beacon_int;
+
/* Default key (for WEP) */
u32 default_key;
+ unsigned int filters;
unsigned int rx_config;
unsigned int rx_filter;
@@ -450,10 +475,13 @@ struct wl1271 {
/* in dBm */
int power_level;
+ int rssi_thold;
+ int last_rssi_event;
+
struct wl1271_stats stats;
struct wl1271_debugfs debugfs;
- u32 buffer_32;
+ __le32 buffer_32;
u32 buffer_cmd;
u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
@@ -465,6 +493,8 @@ struct wl1271 {
/* Current chipset configuration */
struct conf_drv_settings conf;
+ bool sg_enabled;
+
struct list_head list;
};
@@ -477,7 +507,8 @@ int wl1271_plt_stop(struct wl1271 *wl);
#define WL1271_DEFAULT_POWER_LEVEL 0
-#define WL1271_TX_QUEUE_MAX_LENGTH 20
+#define WL1271_TX_QUEUE_LOW_WATERMARK 10
+#define WL1271_TX_QUEUE_HIGH_WATERMARK 25
/* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power
on in case is has been shut down shortly before */
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.c b/drivers/net/wireless/wl12xx/wl1271_acx.c
index 308782421fc..e19e2f8f1e5 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.c
@@ -32,7 +32,6 @@
#include "wl1271.h"
#include "wl12xx_80211.h"
#include "wl1271_reg.h"
-#include "wl1271_spi.h"
#include "wl1271_ps.h"
int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
@@ -137,12 +136,7 @@ int wl1271_acx_tx_power(struct wl1271 *wl, int power)
goto out;
}
- /*
- * FIXME: This is a workaround needed while we don't the correct
- * calibration, to avoid distortions
- */
- /* acx->current_tx_power = power * 10; */
- acx->current_tx_power = 120;
+ acx->current_tx_power = power * 10;
ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx));
if (ret < 0) {
@@ -511,12 +505,17 @@ out:
return ret;
}
-int wl1271_acx_conn_monit_params(struct wl1271 *wl)
+#define ACX_CONN_MONIT_DISABLE_VALUE 0xffffffff
+
+int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable)
{
struct acx_conn_monit_params *acx;
+ u32 threshold = ACX_CONN_MONIT_DISABLE_VALUE;
+ u32 timeout = ACX_CONN_MONIT_DISABLE_VALUE;
int ret;
- wl1271_debug(DEBUG_ACX, "acx connection monitor parameters");
+ wl1271_debug(DEBUG_ACX, "acx connection monitor parameters: %s",
+ enable ? "enabled" : "disabled");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
@@ -524,8 +523,13 @@ int wl1271_acx_conn_monit_params(struct wl1271 *wl)
goto out;
}
- acx->synch_fail_thold = cpu_to_le32(wl->conf.conn.synch_fail_thold);
- acx->bss_lose_timeout = cpu_to_le32(wl->conf.conn.bss_lose_timeout);
+ if (enable) {
+ threshold = wl->conf.conn.synch_fail_thold;
+ timeout = wl->conf.conn.bss_lose_timeout;
+ }
+
+ acx->synch_fail_thold = cpu_to_le32(threshold);
+ acx->bss_lose_timeout = cpu_to_le32(timeout);
ret = wl1271_cmd_configure(wl, ACX_CONN_MONIT_PARAMS,
acx, sizeof(*acx));
@@ -541,7 +545,7 @@ out:
}
-int wl1271_acx_sg_enable(struct wl1271 *wl)
+int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable)
{
struct acx_bt_wlan_coex *pta;
int ret;
@@ -554,7 +558,10 @@ int wl1271_acx_sg_enable(struct wl1271 *wl)
goto out;
}
- pta->enable = SG_ENABLE;
+ if (enable)
+ pta->enable = wl->conf.sg.state;
+ else
+ pta->enable = CONF_SG_DISABLE;
ret = wl1271_cmd_configure(wl, ACX_SG_ENABLE, pta, sizeof(*pta));
if (ret < 0) {
@@ -571,7 +578,7 @@ int wl1271_acx_sg_cfg(struct wl1271 *wl)
{
struct acx_bt_wlan_coex_param *param;
struct conf_sg_settings *c = &wl->conf.sg;
- int ret;
+ int i, ret;
wl1271_debug(DEBUG_ACX, "acx sg cfg");
@@ -582,19 +589,9 @@ int wl1271_acx_sg_cfg(struct wl1271 *wl)
}
/* BT-WLAN coext parameters */
- param->per_threshold = cpu_to_le32(c->per_threshold);
- param->max_scan_compensation_time =
- cpu_to_le32(c->max_scan_compensation_time);
- param->nfs_sample_interval = cpu_to_le16(c->nfs_sample_interval);
- param->load_ratio = c->load_ratio;
- param->auto_ps_mode = c->auto_ps_mode;
- param->probe_req_compensation = c->probe_req_compensation;
- param->scan_window_compensation = c->scan_window_compensation;
- param->antenna_config = c->antenna_config;
- param->beacon_miss_threshold = c->beacon_miss_threshold;
- param->rate_adaptation_threshold =
- cpu_to_le32(c->rate_adaptation_threshold);
- param->rate_adaptation_snr = c->rate_adaptation_snr;
+ for (i = 0; i < CONF_SG_PARAMS_MAX; i++)
+ param->params[i] = cpu_to_le32(c->params[i]);
+ param->param_idx = CONF_SG_PARAMS_ALL;
ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param));
if (ret < 0) {
@@ -806,7 +803,7 @@ int wl1271_acx_rate_policies(struct wl1271 *wl)
/* configure one basic rate class */
idx = ACX_TX_BASIC_RATE;
- acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->basic_rate_set);
+ acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->basic_rate);
acx->rate_class[idx].short_retry_limit = c->short_retry_limit;
acx->rate_class[idx].long_retry_limit = c->long_retry_limit;
acx->rate_class[idx].aflags = c->aflags;
@@ -1143,3 +1140,129 @@ out:
kfree(acx);
return ret;
}
+
+int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable)
+{
+ struct wl1271_acx_keep_alive_mode *acx = NULL;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_ACX, "acx keep alive mode: %d", enable);
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->enabled = enable;
+
+ ret = wl1271_cmd_configure(wl, ACX_KEEP_ALIVE_MODE, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx keep alive mode failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid)
+{
+ struct wl1271_acx_keep_alive_config *acx = NULL;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_ACX, "acx keep alive config");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->period = cpu_to_le32(wl->conf.conn.keep_alive_interval);
+ acx->index = index;
+ acx->tpl_validation = tpl_valid;
+ acx->trigger = ACX_KEEP_ALIVE_NO_TX;
+
+ ret = wl1271_cmd_configure(wl, ACX_SET_KEEP_ALIVE_CONFIG,
+ acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx keep alive config failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
+ s16 thold, u8 hyst)
+{
+ struct wl1271_acx_rssi_snr_trigger *acx = NULL;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_ACX, "acx rssi snr trigger");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ wl->last_rssi_event = -1;
+
+ acx->pacing = cpu_to_le16(wl->conf.roam_trigger.trigger_pacing);
+ acx->metric = WL1271_ACX_TRIG_METRIC_RSSI_BEACON;
+ acx->type = WL1271_ACX_TRIG_TYPE_EDGE;
+ if (enable)
+ acx->enable = WL1271_ACX_TRIG_ENABLE;
+ else
+ acx->enable = WL1271_ACX_TRIG_DISABLE;
+
+ acx->index = WL1271_ACX_TRIG_IDX_RSSI;
+ acx->dir = WL1271_ACX_TRIG_DIR_BIDIR;
+ acx->threshold = cpu_to_le16(thold);
+ acx->hysteresis = hyst;
+
+ ret = wl1271_cmd_configure(wl, ACX_RSSI_SNR_TRIGGER, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx rssi snr trigger setting failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl)
+{
+ struct wl1271_acx_rssi_snr_avg_weights *acx = NULL;
+ struct conf_roam_trigger_settings *c = &wl->conf.roam_trigger;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_ACX, "acx rssi snr avg weights");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->rssi_beacon = c->avg_weight_rssi_beacon;
+ acx->rssi_data = c->avg_weight_rssi_data;
+ acx->snr_beacon = c->avg_weight_snr_beacon;
+ acx->snr_data = c->avg_weight_snr_data;
+
+ ret = wl1271_cmd_configure(wl, ACX_RSSI_SNR_WEIGHTS, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx rssi snr trigger weights failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.h b/drivers/net/wireless/wl12xx/wl1271_acx.h
index aeccc98581e..420e7e2fc02 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.h
@@ -392,81 +392,27 @@ struct acx_conn_monit_params {
__le32 bss_lose_timeout; /* number of TU's from synch fail */
} __attribute__ ((packed));
-enum {
- SG_ENABLE = 0,
- SG_DISABLE,
- SG_SENSE_NO_ACTIVITY,
- SG_SENSE_ACTIVE
-};
-
struct acx_bt_wlan_coex {
struct acx_header header;
- /*
- * 0 -> PTA enabled
- * 1 -> PTA disabled
- * 2 -> sense no active mode, i.e.
- * an interrupt is sent upon
- * BT activity.
- * 3 -> PTA is switched on in response
- * to the interrupt sending.
- */
u8 enable;
u8 pad[3];
} __attribute__ ((packed));
-struct acx_dco_itrim_params {
+struct acx_bt_wlan_coex_param {
struct acx_header header;
- u8 enable;
+ __le32 params[CONF_SG_PARAMS_MAX];
+ u8 param_idx;
u8 padding[3];
- __le32 timeout;
} __attribute__ ((packed));
-#define PTA_ANTENNA_TYPE_DEF (0)
-#define PTA_BT_HP_MAXTIME_DEF (2000)
-#define PTA_WLAN_HP_MAX_TIME_DEF (5000)
-#define PTA_SENSE_DISABLE_TIMER_DEF (1350)
-#define PTA_PROTECTIVE_RX_TIME_DEF (1500)
-#define PTA_PROTECTIVE_TX_TIME_DEF (1500)
-#define PTA_TIMEOUT_NEXT_BT_LP_PACKET_DEF (3000)
-#define PTA_SIGNALING_TYPE_DEF (1)
-#define PTA_AFH_LEVERAGE_ON_DEF (0)
-#define PTA_NUMBER_QUIET_CYCLE_DEF (0)
-#define PTA_MAX_NUM_CTS_DEF (3)
-#define PTA_NUMBER_OF_WLAN_PACKETS_DEF (2)
-#define PTA_NUMBER_OF_BT_PACKETS_DEF (2)
-#define PTA_PROTECTIVE_RX_TIME_FAST_DEF (1500)
-#define PTA_PROTECTIVE_TX_TIME_FAST_DEF (3000)
-#define PTA_CYCLE_TIME_FAST_DEF (8700)
-#define PTA_RX_FOR_AVALANCHE_DEF (5)
-#define PTA_ELP_HP_DEF (0)
-#define PTA_ANTI_STARVE_PERIOD_DEF (500)
-#define PTA_ANTI_STARVE_NUM_CYCLE_DEF (4)
-#define PTA_ALLOW_PA_SD_DEF (1)
-#define PTA_TIME_BEFORE_BEACON_DEF (6300)
-#define PTA_HPDM_MAX_TIME_DEF (1600)
-#define PTA_TIME_OUT_NEXT_WLAN_DEF (2550)
-#define PTA_AUTO_MODE_NO_CTS_DEF (0)
-#define PTA_BT_HP_RESPECTED_DEF (3)
-#define PTA_WLAN_RX_MIN_RATE_DEF (24)
-#define PTA_ACK_MODE_DEF (1)
-
-struct acx_bt_wlan_coex_param {
+struct acx_dco_itrim_params {
struct acx_header header;
- __le32 per_threshold;
- __le32 max_scan_compensation_time;
- __le16 nfs_sample_interval;
- u8 load_ratio;
- u8 auto_ps_mode;
- u8 probe_req_compensation;
- u8 scan_window_compensation;
- u8 antenna_config;
- u8 beacon_miss_threshold;
- __le32 rate_adaptation_threshold;
- s8 rate_adaptation_snr;
+ u8 enable;
u8 padding[3];
+ __le32 timeout;
} __attribute__ ((packed));
struct acx_energy_detection {
@@ -969,6 +915,84 @@ struct wl1271_acx_pm_config {
u8 padding[3];
} __attribute__ ((packed));
+struct wl1271_acx_keep_alive_mode {
+ struct acx_header header;
+
+ u8 enabled;
+ u8 padding[3];
+} __attribute__ ((packed));
+
+enum {
+ ACX_KEEP_ALIVE_NO_TX = 0,
+ ACX_KEEP_ALIVE_PERIOD_ONLY
+};
+
+enum {
+ ACX_KEEP_ALIVE_TPL_INVALID = 0,
+ ACX_KEEP_ALIVE_TPL_VALID
+};
+
+struct wl1271_acx_keep_alive_config {
+ struct acx_header header;
+
+ __le32 period;
+ u8 index;
+ u8 tpl_validation;
+ u8 trigger;
+ u8 padding;
+} __attribute__ ((packed));
+
+enum {
+ WL1271_ACX_TRIG_TYPE_LEVEL = 0,
+ WL1271_ACX_TRIG_TYPE_EDGE,
+};
+
+enum {
+ WL1271_ACX_TRIG_DIR_LOW = 0,
+ WL1271_ACX_TRIG_DIR_HIGH,
+ WL1271_ACX_TRIG_DIR_BIDIR,
+};
+
+enum {
+ WL1271_ACX_TRIG_ENABLE = 1,
+ WL1271_ACX_TRIG_DISABLE,
+};
+
+enum {
+ WL1271_ACX_TRIG_METRIC_RSSI_BEACON = 0,
+ WL1271_ACX_TRIG_METRIC_RSSI_DATA,
+ WL1271_ACX_TRIG_METRIC_SNR_BEACON,
+ WL1271_ACX_TRIG_METRIC_SNR_DATA,
+};
+
+enum {
+ WL1271_ACX_TRIG_IDX_RSSI = 0,
+ WL1271_ACX_TRIG_COUNT = 8,
+};
+
+struct wl1271_acx_rssi_snr_trigger {
+ struct acx_header header;
+
+ __le16 threshold;
+ __le16 pacing; /* 0 - 60000 ms */
+ u8 metric;
+ u8 type;
+ u8 dir;
+ u8 hysteresis;
+ u8 index;
+ u8 enable;
+ u8 padding[2];
+};
+
+struct wl1271_acx_rssi_snr_avg_weights {
+ struct acx_header header;
+
+ u8 rssi_beacon;
+ u8 rssi_data;
+ u8 snr_beacon;
+ u8 snr_data;
+};
+
enum {
ACX_WAKE_UP_CONDITIONS = 0x0002,
ACX_MEM_CFG = 0x0003,
@@ -1017,8 +1041,8 @@ enum {
ACX_FRAG_CFG = 0x004F,
ACX_BET_ENABLE = 0x0050,
ACX_RSSI_SNR_TRIGGER = 0x0051,
- ACX_RSSI_SNR_WEIGHTS = 0x0051,
- ACX_KEEP_ALIVE_MODE = 0x0052,
+ ACX_RSSI_SNR_WEIGHTS = 0x0052,
+ ACX_KEEP_ALIVE_MODE = 0x0053,
ACX_SET_KEEP_ALIVE_CONFIG = 0x0054,
ACX_BA_SESSION_RESPONDER_POLICY = 0x0055,
ACX_BA_SESSION_INITIATOR_POLICY = 0x0056,
@@ -1058,8 +1082,8 @@ int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold);
int wl1271_acx_dco_itrim_params(struct wl1271 *wl);
int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
-int wl1271_acx_conn_monit_params(struct wl1271 *wl);
-int wl1271_acx_sg_enable(struct wl1271 *wl);
+int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable);
+int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable);
int wl1271_acx_sg_cfg(struct wl1271 *wl);
int wl1271_acx_cca_threshold(struct wl1271 *wl);
int wl1271_acx_bcn_dtim_options(struct wl1271 *wl);
@@ -1085,5 +1109,10 @@ int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address,
u8 version);
int wl1271_acx_pm_config(struct wl1271 *wl);
+int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable);
+int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid);
+int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
+ s16 thold, u8 hyst);
+int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl);
#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.c b/drivers/net/wireless/wl12xx/wl1271_boot.c
index 02435626306..1a36d8a2196 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.c
@@ -1,7 +1,7 @@
/*
* This file is part of wl1271
*
- * Copyright (C) 2008-2009 Nokia Corporation
+ * Copyright (C) 2008-2010 Nokia Corporation
*
* Contact: Luciano Coelho <luciano.coelho@nokia.com>
*
@@ -27,7 +27,6 @@
#include "wl1271_acx.h"
#include "wl1271_reg.h"
#include "wl1271_boot.h"
-#include "wl1271_spi.h"
#include "wl1271_io.h"
#include "wl1271_event.h"
@@ -230,6 +229,14 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
nvs_len = sizeof(wl->nvs->nvs);
nvs_ptr = (u8 *)wl->nvs->nvs;
+ /* update current MAC address to NVS */
+ nvs_ptr[11] = wl->mac_addr[0];
+ nvs_ptr[10] = wl->mac_addr[1];
+ nvs_ptr[6] = wl->mac_addr[2];
+ nvs_ptr[5] = wl->mac_addr[3];
+ nvs_ptr[4] = wl->mac_addr[4];
+ nvs_ptr[3] = wl->mac_addr[5];
+
/*
* Layout before the actual NVS tables:
* 1 byte : burst length.
@@ -300,7 +307,7 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
static void wl1271_boot_enable_interrupts(struct wl1271 *wl)
{
- enable_irq(wl->irq);
+ wl1271_enable_interrupts(wl);
wl1271_write32(wl, ACX_REG_INTERRUPT_MASK,
WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
wl1271_write32(wl, HI_CFG, HI_CFG_DEF_VAL);
@@ -344,7 +351,7 @@ static int wl1271_boot_soft_reset(struct wl1271 *wl)
static int wl1271_boot_run_firmware(struct wl1271 *wl)
{
int loop, ret;
- u32 chip_id, interrupt;
+ u32 chip_id, intr;
wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
@@ -361,15 +368,15 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
loop = 0;
while (loop++ < INIT_LOOP) {
udelay(INIT_LOOP_DELAY);
- interrupt = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
+ intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
- if (interrupt == 0xffffffff) {
+ if (intr == 0xffffffff) {
wl1271_error("error reading hardware complete "
"init indication");
return -EIO;
}
/* check that ACX_INTR_INIT_COMPLETE is enabled */
- else if (interrupt & WL1271_ACX_INTR_INIT_COMPLETE) {
+ else if (intr & WL1271_ACX_INTR_INIT_COMPLETE) {
wl1271_write32(wl, ACX_REG_INTERRUPT_ACK,
WL1271_ACX_INTR_INIT_COMPLETE);
break;
@@ -404,7 +411,10 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
/* unmask required mbox events */
wl->event_mask = BSS_LOSE_EVENT_ID |
SCAN_COMPLETE_EVENT_ID |
- PS_REPORT_EVENT_ID;
+ PS_REPORT_EVENT_ID |
+ JOIN_EVENT_COMPLETE_ID |
+ DISCONNECT_EVENT_COMPLETE_ID |
+ RSSI_SNR_TRIGGER_0_EVENT_ID;
ret = wl1271_event_unmask(wl);
if (ret < 0) {
@@ -431,11 +441,23 @@ static int wl1271_boot_write_irq_polarity(struct wl1271 *wl)
return 0;
}
+static void wl1271_boot_hw_version(struct wl1271 *wl)
+{
+ u32 fuse;
+
+ fuse = wl1271_top_reg_read(wl, REG_FUSE_DATA_2_1);
+ fuse = (fuse & PG_VER_MASK) >> PG_VER_OFFSET;
+
+ wl->hw_pg_ver = (s8)fuse;
+}
+
int wl1271_boot(struct wl1271 *wl)
{
int ret = 0;
u32 tmp, clk, pause;
+ wl1271_boot_hw_version(wl);
+
if (REF_CLOCK == 0 || REF_CLOCK == 2 || REF_CLOCK == 4)
/* ref clk: 19.2/38.4/38.4-XTAL */
clk = 0x3;
@@ -445,11 +467,15 @@ int wl1271_boot(struct wl1271 *wl)
if (REF_CLOCK != 0) {
u16 val;
- /* Set clock type */
+ /* Set clock type (open drain) */
val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE);
val &= FREF_CLK_TYPE_BITS;
- val |= CLK_REQ_PRCM;
wl1271_top_reg_write(wl, OCP_REG_CLK_TYPE, val);
+
+ /* Set clock pull mode (no pull) */
+ val = wl1271_top_reg_read(wl, OCP_REG_CLK_PULL);
+ val |= NO_PULL;
+ wl1271_top_reg_write(wl, OCP_REG_CLK_PULL, val);
} else {
u16 val;
/* Set clock polarity */
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.h b/drivers/net/wireless/wl12xx/wl1271_boot.h
index 412443ee655..f829699d597 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.h
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.h
@@ -53,10 +53,16 @@ struct wl1271_static_data {
#define OCP_REG_POLARITY 0x0064
#define OCP_REG_CLK_TYPE 0x0448
#define OCP_REG_CLK_POLARITY 0x0cb2
+#define OCP_REG_CLK_PULL 0x0cb4
-#define CMD_MBOX_ADDRESS 0x407B4
+#define REG_FUSE_DATA_2_1 0x050a
+#define PG_VER_MASK 0x3c
+#define PG_VER_OFFSET 2
-#define POLARITY_LOW BIT(1)
+#define CMD_MBOX_ADDRESS 0x407B4
+
+#define POLARITY_LOW BIT(1)
+#define NO_PULL (BIT(14) | BIT(15))
#define FREF_CLK_TYPE_BITS 0xfffffe7f
#define CLK_REQ_PRCM 0x100
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c
index e7832f3318e..19393e236e2 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c
@@ -1,7 +1,7 @@
/*
* This file is part of wl1271
*
- * Copyright (C) 2009 Nokia Corporation
+ * Copyright (C) 2009-2010 Nokia Corporation
*
* Contact: Luciano Coelho <luciano.coelho@nokia.com>
*
@@ -26,15 +26,18 @@
#include <linux/crc7.h>
#include <linux/spi/spi.h>
#include <linux/etherdevice.h>
+#include <linux/ieee80211.h>
#include <linux/slab.h>
#include "wl1271.h"
#include "wl1271_reg.h"
-#include "wl1271_spi.h"
#include "wl1271_io.h"
#include "wl1271_acx.h"
#include "wl12xx_80211.h"
#include "wl1271_cmd.h"
+#include "wl1271_event.h"
+
+#define WL1271_CMD_FAST_POLL_COUNT 50
/*
* send command to firmware
@@ -52,6 +55,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
u32 intr;
int ret = 0;
u16 status;
+ u16 poll_count = 0;
cmd = buf;
cmd->id = cpu_to_le16(id);
@@ -73,7 +77,11 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
goto out;
}
- msleep(1);
+ poll_count++;
+ if (poll_count < WL1271_CMD_FAST_POLL_COUNT)
+ udelay(10);
+ else
+ msleep(1);
intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
}
@@ -249,7 +257,36 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
return ret;
}
-int wl1271_cmd_join(struct wl1271 *wl)
+/*
+ * Poll the mailbox event field until any of the bits in the mask is set or a
+ * timeout occurs (WL1271_EVENT_TIMEOUT in msecs)
+ */
+static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
+{
+ u32 events_vector, event;
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
+
+ do {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ msleep(1);
+
+ /* read from both event fields */
+ wl1271_read(wl, wl->mbox_ptr[0], &events_vector,
+ sizeof(events_vector), false);
+ event = events_vector & mask;
+ wl1271_read(wl, wl->mbox_ptr[1], &events_vector,
+ sizeof(events_vector), false);
+ event |= events_vector & mask;
+ } while (!event);
+
+ return 0;
+}
+
+int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
{
static bool do_cal = true;
struct wl1271_cmd_join *join;
@@ -280,30 +317,13 @@ int wl1271_cmd_join(struct wl1271 *wl)
join->rx_config_options = cpu_to_le32(wl->rx_config);
join->rx_filter_options = cpu_to_le32(wl->rx_filter);
- join->bss_type = wl->bss_type;
+ join->bss_type = bss_type;
+ join->basic_rate_set = cpu_to_le32(wl->basic_rate_set);
- /*
- * FIXME: disable temporarily all filters because after commit
- * 9cef8737 "mac80211: fix managed mode BSSID handling" broke
- * association. The filter logic needs to be implemented properly
- * and once that is done, this hack can be removed.
- */
- join->rx_config_options = cpu_to_le32(0);
- join->rx_filter_options = cpu_to_le32(WL1271_DEFAULT_RX_FILTER);
-
- if (wl->band == IEEE80211_BAND_2GHZ)
- join->basic_rate_set = cpu_to_le32(CONF_HW_BIT_RATE_1MBPS |
- CONF_HW_BIT_RATE_2MBPS |
- CONF_HW_BIT_RATE_5_5MBPS |
- CONF_HW_BIT_RATE_11MBPS);
- else {
+ if (wl->band == IEEE80211_BAND_5GHZ)
join->bss_type |= WL1271_JOIN_CMD_BSS_TYPE_5GHZ;
- join->basic_rate_set = cpu_to_le32(CONF_HW_BIT_RATE_6MBPS |
- CONF_HW_BIT_RATE_12MBPS |
- CONF_HW_BIT_RATE_24MBPS);
- }
- join->beacon_interval = cpu_to_le16(WL1271_DEFAULT_BEACON_INT);
+ join->beacon_interval = cpu_to_le16(wl->beacon_int);
join->dtim_interval = WL1271_DEFAULT_DTIM_PERIOD;
join->channel = wl->channel;
@@ -320,8 +340,7 @@ int wl1271_cmd_join(struct wl1271 *wl)
/* reset TX security counters */
wl->tx_security_last_seq = 0;
- wl->tx_security_seq_16 = 0;
- wl->tx_security_seq_32 = 0;
+ wl->tx_security_seq = 0;
ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join), 0);
if (ret < 0) {
@@ -329,11 +348,9 @@ int wl1271_cmd_join(struct wl1271 *wl)
goto out_free;
}
- /*
- * ugly hack: we should wait for JOIN_EVENT_COMPLETE_ID but to
- * simplify locking we just sleep instead, for now
- */
- msleep(10);
+ ret = wl1271_cmd_wait_for_event(wl, JOIN_EVENT_COMPLETE_ID);
+ if (ret < 0)
+ wl1271_error("cmd join event completion error");
out_free:
kfree(join);
@@ -465,7 +482,7 @@ int wl1271_cmd_data_path(struct wl1271 *wl, bool enable)
if (ret < 0) {
wl1271_error("tx %s cmd for channel %d failed",
enable ? "start" : "stop", cmd->channel);
- return ret;
+ goto out;
}
wl1271_debug(DEBUG_BOOT, "tx %s cmd channel %d",
@@ -499,7 +516,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send)
ps_params->ps_mode = ps_mode;
ps_params->send_null_data = send;
ps_params->retries = 5;
- ps_params->hang_over_period = 128;
+ ps_params->hang_over_period = 1;
ps_params->null_data_rate = cpu_to_le32(1); /* 1 Mbps */
ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
@@ -549,25 +566,29 @@ out:
return ret;
}
-int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
- u8 active_scan, u8 high_prio, u8 band,
- u8 probe_requests)
+int wl1271_cmd_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
+ const u8 *ie, size_t ie_len, u8 active_scan,
+ u8 high_prio, u8 band, u8 probe_requests)
{
struct wl1271_cmd_trigger_scan_to *trigger = NULL;
struct wl1271_cmd_scan *params = NULL;
struct ieee80211_channel *channels;
+ u32 rate;
int i, j, n_ch, ret;
u16 scan_options = 0;
u8 ieee_band;
- if (band == WL1271_SCAN_BAND_2_4_GHZ)
+ if (band == WL1271_SCAN_BAND_2_4_GHZ) {
ieee_band = IEEE80211_BAND_2GHZ;
- else if (band == WL1271_SCAN_BAND_DUAL && wl1271_11a_enabled())
+ rate = wl->conf.tx.basic_rate;
+ } else if (band == WL1271_SCAN_BAND_DUAL && wl1271_11a_enabled()) {
ieee_band = IEEE80211_BAND_2GHZ;
- else if (band == WL1271_SCAN_BAND_5_GHZ && wl1271_11a_enabled())
+ rate = wl->conf.tx.basic_rate;
+ } else if (band == WL1271_SCAN_BAND_5_GHZ && wl1271_11a_enabled()) {
ieee_band = IEEE80211_BAND_5GHZ;
- else
+ rate = wl->conf.tx.basic_rate_5;
+ } else
return -EINVAL;
if (wl->hw->wiphy->bands[ieee_band]->channels == NULL)
@@ -594,8 +615,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
params->params.scan_options = cpu_to_le16(scan_options);
params->params.num_probe_requests = probe_requests;
- /* Let the fw autodetect suitable tx_rate for probes */
- params->params.tx_rate = 0;
+ params->params.tx_rate = cpu_to_le32(rate);
params->params.tid_trigger = 0;
params->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
@@ -622,12 +642,13 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
params->params.num_channels = j;
- if (len && ssid) {
- params->params.ssid_len = len;
- memcpy(params->params.ssid, ssid, len);
+ if (ssid_len && ssid) {
+ params->params.ssid_len = ssid_len;
+ memcpy(params->params.ssid, ssid, ssid_len);
}
- ret = wl1271_cmd_build_probe_req(wl, ssid, len, ieee_band);
+ ret = wl1271_cmd_build_probe_req(wl, ssid, ssid_len,
+ ie, ie_len, ieee_band);
if (ret < 0) {
wl1271_error("PROBE request template failed");
goto out;
@@ -658,9 +679,9 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
wl->scan.active = active_scan;
wl->scan.high_prio = high_prio;
wl->scan.probe_requests = probe_requests;
- if (len && ssid) {
- wl->scan.ssid_len = len;
- memcpy(wl->scan.ssid, ssid, len);
+ if (ssid_len && ssid) {
+ wl->scan.ssid_len = ssid_len;
+ memcpy(wl->scan.ssid, ssid, ssid_len);
} else
wl->scan.ssid_len = 0;
}
@@ -675,11 +696,12 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
out:
kfree(params);
+ kfree(trigger);
return ret;
}
int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
- void *buf, size_t buf_len)
+ void *buf, size_t buf_len, int index, u32 rates)
{
struct wl1271_cmd_template_set *cmd;
int ret = 0;
@@ -697,9 +719,10 @@ int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
cmd->len = cpu_to_le16(buf_len);
cmd->template_type = template_id;
- cmd->enabled_rates = cpu_to_le32(wl->conf.tx.rc_conf.enabled_rates);
+ cmd->enabled_rates = cpu_to_le32(rates);
cmd->short_retry_limit = wl->conf.tx.rc_conf.short_retry_limit;
cmd->long_retry_limit = wl->conf.tx.rc_conf.long_retry_limit;
+ cmd->index = index;
if (buf)
memcpy(cmd->template_data, buf, buf_len);
@@ -717,155 +740,129 @@ out:
return ret;
}
-static int wl1271_build_basic_rates(u8 *rates, u8 band)
+int wl1271_cmd_build_null_data(struct wl1271 *wl)
{
- u8 index = 0;
-
- if (band == IEEE80211_BAND_2GHZ) {
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
- } else if (band == IEEE80211_BAND_5GHZ) {
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_6MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_12MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB;
- } else {
- wl1271_error("build_basic_rates invalid band: %d", band);
- }
+ struct sk_buff *skb = NULL;
+ int size;
+ void *ptr;
+ int ret = -ENOMEM;
- return index;
-}
-static int wl1271_build_extended_rates(u8 *rates, u8 band)
-{
- u8 index = 0;
-
- if (band == IEEE80211_BAND_2GHZ) {
- rates[index++] = IEEE80211_OFDM_RATE_6MB;
- rates[index++] = IEEE80211_OFDM_RATE_9MB;
- rates[index++] = IEEE80211_OFDM_RATE_12MB;
- rates[index++] = IEEE80211_OFDM_RATE_18MB;
- rates[index++] = IEEE80211_OFDM_RATE_24MB;
- rates[index++] = IEEE80211_OFDM_RATE_36MB;
- rates[index++] = IEEE80211_OFDM_RATE_48MB;
- rates[index++] = IEEE80211_OFDM_RATE_54MB;
- } else if (band == IEEE80211_BAND_5GHZ) {
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_9MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_18MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_36MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_48MB;
- rates[index++] =
- IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_54MB;
+ if (wl->bss_type == BSS_TYPE_IBSS) {
+ size = sizeof(struct wl12xx_null_data_template);
+ ptr = NULL;
} else {
- wl1271_error("build_basic_rates invalid band: %d", band);
+ skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+ if (!skb)
+ goto out;
+ size = skb->len;
+ ptr = skb->data;
}
- return index;
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, ptr, size, 0,
+ WL1271_RATE_AUTOMATIC);
+
+out:
+ dev_kfree_skb(skb);
+ if (ret)
+ wl1271_warning("cmd buld null data failed %d", ret);
+
+ return ret;
+
}
-int wl1271_cmd_build_null_data(struct wl1271 *wl)
+int wl1271_cmd_build_klv_null_data(struct wl1271 *wl)
{
- struct wl12xx_null_data_template template;
+ struct sk_buff *skb = NULL;
+ int ret = -ENOMEM;
- if (!is_zero_ether_addr(wl->bssid)) {
- memcpy(template.header.da, wl->bssid, ETH_ALEN);
- memcpy(template.header.bssid, wl->bssid, ETH_ALEN);
- } else {
- memset(template.header.da, 0xff, ETH_ALEN);
- memset(template.header.bssid, 0xff, ETH_ALEN);
- }
+ skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+ if (!skb)
+ goto out;
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV,
+ skb->data, skb->len,
+ CMD_TEMPL_KLV_IDX_NULL_DATA,
+ WL1271_RATE_AUTOMATIC);
- memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
- template.header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA |
- IEEE80211_STYPE_NULLFUNC |
- IEEE80211_FCTL_TODS);
+out:
+ dev_kfree_skb(skb);
+ if (ret)
+ wl1271_warning("cmd build klv null data failed %d", ret);
- return wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, &template,
- sizeof(template));
+ return ret;
}
int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid)
{
- struct wl12xx_ps_poll_template template;
-
- memcpy(template.bssid, wl->bssid, ETH_ALEN);
- memcpy(template.ta, wl->mac_addr, ETH_ALEN);
-
- /* aid in PS-Poll has its two MSBs each set to 1 */
- template.aid = cpu_to_le16(1 << 15 | 1 << 14 | aid);
+ struct sk_buff *skb;
+ int ret = 0;
- template.fc = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL);
+ skb = ieee80211_pspoll_get(wl->hw, wl->vif);
+ if (!skb)
+ goto out;
- return wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, &template,
- sizeof(template));
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, skb->data,
+ skb->len, 0, wl->basic_rate);
+out:
+ dev_kfree_skb(skb);
+ return ret;
}
-int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len,
- u8 band)
+int wl1271_cmd_build_probe_req(struct wl1271 *wl,
+ const u8 *ssid, size_t ssid_len,
+ const u8 *ie, size_t ie_len, u8 band)
{
- struct wl12xx_probe_req_template template;
- struct wl12xx_ie_rates *rates;
- char *ptr;
- u16 size;
+ struct sk_buff *skb;
int ret;
- ptr = (char *)&template;
- size = sizeof(struct ieee80211_header);
-
- memset(template.header.da, 0xff, ETH_ALEN);
- memset(template.header.bssid, 0xff, ETH_ALEN);
- memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
- template.header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
-
- /* IEs */
- /* SSID */
- template.ssid.header.id = WLAN_EID_SSID;
- template.ssid.header.len = ssid_len;
- if (ssid_len && ssid)
- memcpy(template.ssid.ssid, ssid, ssid_len);
- size += sizeof(struct wl12xx_ie_header) + ssid_len;
- ptr += size;
-
- /* Basic Rates */
- rates = (struct wl12xx_ie_rates *)ptr;
- rates->header.id = WLAN_EID_SUPP_RATES;
- rates->header.len = wl1271_build_basic_rates(rates->rates, band);
- size += sizeof(struct wl12xx_ie_header) + rates->header.len;
- ptr += sizeof(struct wl12xx_ie_header) + rates->header.len;
-
- /* Extended rates */
- rates = (struct wl12xx_ie_rates *)ptr;
- rates->header.id = WLAN_EID_EXT_SUPP_RATES;
- rates->header.len = wl1271_build_extended_rates(rates->rates, band);
- size += sizeof(struct wl12xx_ie_header) + rates->header.len;
-
- wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", &template, size);
+ skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len,
+ ie, ie_len);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len);
if (band == IEEE80211_BAND_2GHZ)
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
- &template, size);
+ skb->data, skb->len, 0,
+ wl->conf.tx.basic_rate);
else
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
- &template, size);
+ skb->data, skb->len, 0,
+ wl->conf.tx.basic_rate_5);
+
+out:
+ dev_kfree_skb(skb);
return ret;
}
+int wl1271_build_qos_null_data(struct wl1271 *wl)
+{
+ struct ieee80211_qos_hdr template;
+
+ memset(&template, 0, sizeof(template));
+
+ memcpy(template.addr1, wl->bssid, ETH_ALEN);
+ memcpy(template.addr2, wl->mac_addr, ETH_ALEN);
+ memcpy(template.addr3, wl->bssid, ETH_ALEN);
+
+ template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_NULLFUNC |
+ IEEE80211_FCTL_TODS);
+
+ /* FIXME: not sure what priority to use here */
+ template.qos_ctrl = cpu_to_le16(0);
+
+ return wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, &template,
+ sizeof(template), 0,
+ WL1271_RATE_AUTOMATIC);
+}
+
int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id)
{
struct wl1271_cmd_set_keys *cmd;
@@ -976,6 +973,10 @@ int wl1271_cmd_disconnect(struct wl1271 *wl)
goto out_free;
}
+ ret = wl1271_cmd_wait_for_event(wl, DISCONNECT_EVENT_COMPLETE_ID);
+ if (ret < 0)
+ wl1271_error("cmd disconnect event completion error");
+
out_free:
kfree(cmd);
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.h b/drivers/net/wireless/wl12xx/wl1271_cmd.h
index 2dc06c73532..f2820b42a94 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.h
@@ -33,7 +33,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
size_t res_len);
int wl1271_cmd_general_parms(struct wl1271 *wl);
int wl1271_cmd_radio_parms(struct wl1271 *wl);
-int wl1271_cmd_join(struct wl1271 *wl);
+int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type);
int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
@@ -41,15 +41,18 @@ int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send);
int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
size_t len);
-int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
- u8 active_scan, u8 high_prio, u8 band,
- u8 probe_requests);
+int wl1271_cmd_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
+ const u8 *ie, size_t ie_len, u8 active_scan,
+ u8 high_prio, u8 band, u8 probe_requests);
int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
- void *buf, size_t buf_len);
+ void *buf, size_t buf_len, int index, u32 rates);
int wl1271_cmd_build_null_data(struct wl1271 *wl);
int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid);
-int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len,
- u8 band);
+int wl1271_cmd_build_probe_req(struct wl1271 *wl,
+ const u8 *ssid, size_t ssid_len,
+ const u8 *ie, size_t ie_len, u8 band);
+int wl1271_build_qos_null_data(struct wl1271 *wl);
+int wl1271_cmd_build_klv_null_data(struct wl1271 *wl);
int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id);
int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, const u8 *addr,
@@ -99,6 +102,11 @@ enum wl1271_commands {
#define MAX_CMD_PARAMS 572
+enum {
+ CMD_TEMPL_KLV_IDX_NULL_DATA = 0,
+ CMD_TEMPL_KLV_IDX_MAX = 4
+};
+
enum cmd_templ {
CMD_TEMPL_NULL_DATA = 0,
CMD_TEMPL_BEACON,
@@ -121,6 +129,7 @@ enum cmd_templ {
/* unit ms */
#define WL1271_COMMAND_TIMEOUT 2000
#define WL1271_CMD_TEMPL_MAX_SIZE 252
+#define WL1271_EVENT_TIMEOUT 750
struct wl1271_cmd_header {
__le16 id;
@@ -243,6 +252,8 @@ struct cmd_enabledisable_path {
u8 padding[3];
} __attribute__ ((packed));
+#define WL1271_RATE_AUTOMATIC 0
+
struct wl1271_cmd_template_set {
struct wl1271_cmd_header header;
@@ -509,6 +520,8 @@ enum wl1271_disconnect_type {
};
struct wl1271_cmd_disconnect {
+ struct wl1271_cmd_header header;
+
__le32 rx_config_options;
__le32 rx_filter_options;
diff --git a/drivers/net/wireless/wl12xx/wl1271_conf.h b/drivers/net/wireless/wl12xx/wl1271_conf.h
index 6f9e75cc564..d046d044b5b 100644
--- a/drivers/net/wireless/wl12xx/wl1271_conf.h
+++ b/drivers/net/wireless/wl12xx/wl1271_conf.h
@@ -65,110 +65,344 @@ enum {
CONF_HW_RATE_INDEX_MAX = CONF_HW_RATE_INDEX_54MBPS,
};
-struct conf_sg_settings {
+enum {
+ CONF_HW_RXTX_RATE_MCS7 = 0,
+ CONF_HW_RXTX_RATE_MCS6,
+ CONF_HW_RXTX_RATE_MCS5,
+ CONF_HW_RXTX_RATE_MCS4,
+ CONF_HW_RXTX_RATE_MCS3,
+ CONF_HW_RXTX_RATE_MCS2,
+ CONF_HW_RXTX_RATE_MCS1,
+ CONF_HW_RXTX_RATE_MCS0,
+ CONF_HW_RXTX_RATE_54,
+ CONF_HW_RXTX_RATE_48,
+ CONF_HW_RXTX_RATE_36,
+ CONF_HW_RXTX_RATE_24,
+ CONF_HW_RXTX_RATE_22,
+ CONF_HW_RXTX_RATE_18,
+ CONF_HW_RXTX_RATE_12,
+ CONF_HW_RXTX_RATE_11,
+ CONF_HW_RXTX_RATE_9,
+ CONF_HW_RXTX_RATE_6,
+ CONF_HW_RXTX_RATE_5_5,
+ CONF_HW_RXTX_RATE_2,
+ CONF_HW_RXTX_RATE_1,
+ CONF_HW_RXTX_RATE_MAX,
+ CONF_HW_RXTX_RATE_UNSUPPORTED = 0xff
+};
+
+enum {
+ CONF_SG_DISABLE = 0,
+ CONF_SG_PROTECTIVE,
+ CONF_SG_OPPORTUNISTIC
+};
+
+enum {
/*
- * Defines the PER threshold in PPM of the BT voice of which reaching
- * this value will trigger raising the priority of the BT voice by
- * the BT IP until next NFS sample interval time as defined in
- * nfs_sample_interval.
+ * PER threshold in PPM of the BT voice
*
- * Unit: PER value in PPM (parts per million)
- * #Error_packets / #Total_packets
+ * Range: 0 - 10000000
+ */
+ CONF_SG_BT_PER_THRESHOLD = 0,
- * Range: u32
+ /*
+ * Number of consequent RX_ACTIVE activities to override BT voice
+ * frames to ensure WLAN connection
+ *
+ * Range: 0 - 100
+ */
+ CONF_SG_HV3_MAX_OVERRIDE,
+
+ /*
+ * Defines the PER threshold of the BT voice
+ *
+ * Range: 0 - 65000
+ */
+ CONF_SG_BT_NFS_SAMPLE_INTERVAL,
+
+ /*
+ * Defines the load ratio of BT
+ *
+ * Range: 0 - 100 (%)
+ */
+ CONF_SG_BT_LOAD_RATIO,
+
+ /*
+ * Defines whether the SG will force WLAN host to enter/exit PSM
+ *
+ * Range: 1 - SG can force, 0 - host handles PSM
+ */
+ CONF_SG_AUTO_PS_MODE,
+
+ /*
+ * Compensation percentage of probe requests when scan initiated
+ * during BT voice/ACL link.
+ *
+ * Range: 0 - 255 (%)
+ */
+ CONF_SG_AUTO_SCAN_PROBE_REQ,
+
+ /*
+ * Compensation percentage of probe requests when active scan initiated
+ * during BT voice
+ *
+ * Range: 0 - 255 (%)
+ */
+ CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3,
+
+ /*
+ * Defines antenna configuration (single/dual antenna)
+ *
+ * Range: 0 - single antenna, 1 - dual antenna
+ */
+ CONF_SG_ANTENNA_CONFIGURATION,
+
+ /*
+ * The threshold (percent) of max consequtive beacon misses before
+ * increasing priority of beacon reception.
+ *
+ * Range: 0 - 100 (%)
+ */
+ CONF_SG_BEACON_MISS_PERCENT,
+
+ /*
+ * The rate threshold below which receiving a data frame from the AP
+ * will increase the priority of the data frame above BT traffic.
+ *
+ * Range: 0,2, 5(=5.5), 6, 9, 11, 12, 18, 24, 36, 48, 54
+ */
+ CONF_SG_RATE_ADAPT_THRESH,
+
+ /*
+ * Not used currently.
+ *
+ * Range: 0
+ */
+ CONF_SG_RATE_ADAPT_SNR,
+
+ /*
+ * Configure the min and max time BT gains the antenna
+ * in WLAN PSM / BT master basic rate
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_BR,
+ CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_BR,
+
+ /*
+ * The time after it expires no new WLAN trigger frame is trasmitted
+ * in WLAN PSM / BT master basic rate
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_BR,
+
+ /*
+ * Configure the min and max time BT gains the antenna
+ * in WLAN PSM / BT slave basic rate
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_BR,
+ CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_BR,
+
+ /*
+ * The time after it expires no new WLAN trigger frame is trasmitted
+ * in WLAN PSM / BT slave basic rate
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_BR,
+
+ /*
+ * Configure the min and max time BT gains the antenna
+ * in WLAN PSM / BT master EDR
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_EDR,
+ CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_EDR,
+
+ /*
+ * The time after it expires no new WLAN trigger frame is trasmitted
+ * in WLAN PSM / BT master EDR
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_EDR,
+
+ /*
+ * Configure the min and max time BT gains the antenna
+ * in WLAN PSM / BT slave EDR
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_EDR,
+ CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_EDR,
+
+ /*
+ * The time after it expires no new WLAN trigger frame is trasmitted
+ * in WLAN PSM / BT slave EDR
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_EDR,
+
+ /*
+ * RX guard time before the beginning of a new BT voice frame during
+ * which no new WLAN trigger frame is transmitted.
+ *
+ * Range: 0 - 100000 (us)
+ */
+ CONF_SG_RXT,
+
+ /*
+ * TX guard time before the beginning of a new BT voice frame during
+ * which no new WLAN frame is transmitted.
+ *
+ * Range: 0 - 100000 (us)
+ */
+
+ CONF_SG_TXT,
+
+ /*
+ * Enable adaptive RXT/TXT algorithm. If disabled, the host values
+ * will be utilized.
+ *
+ * Range: 0 - disable, 1 - enable
+ */
+ CONF_SG_ADAPTIVE_RXT_TXT,
+
+ /*
+ * The used WLAN legacy service period during active BT ACL link
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_PS_POLL_TIMEOUT,
+
+ /*
+ * The used WLAN UPSD service period during active BT ACL link
+ *
+ * Range: 0 - 255 (ms)
*/
- u32 per_threshold;
+ CONF_SG_UPSD_TIMEOUT,
/*
- * This value is an absolute time in micro-seconds to limit the
- * maximum scan duration compensation while in SG
+ * Configure the min and max time BT gains the antenna
+ * in WLAN Active / BT master EDR
+ *
+ * Range: 0 - 255 (ms)
*/
- u32 max_scan_compensation_time;
+ CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MIN_EDR,
+ CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MAX_EDR,
- /* Defines the PER threshold of the BT voice of which reaching this
- * value will trigger raising the priority of the BT voice until next
- * NFS sample interval time as defined in sample_interval.
+ /*
+ * The maximum time WLAN can gain the antenna for
+ * in WLAN Active / BT master EDR
*
- * Unit: msec
- * Range: 1-65000
+ * Range: 0 - 255 (ms)
*/
- u16 nfs_sample_interval;
+ CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_MASTER_EDR,
/*
- * Defines the load ratio for the BT.
- * The WLAN ratio is: 100 - load_ratio
+ * Configure the min and max time BT gains the antenna
+ * in WLAN Active / BT slave EDR
*
- * Unit: Percent
- * Range: 0-100
+ * Range: 0 - 255 (ms)
*/
- u8 load_ratio;
+ CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MIN_EDR,
+ CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MAX_EDR,
/*
- * true - Co-ex is allowed to enter/exit P.S automatically and
- * transparently to the host
+ * The maximum time WLAN can gain the antenna for
+ * in WLAN Active / BT slave EDR
*
- * false - Co-ex is disallowed to enter/exit P.S and will trigger an
- * event to the host to notify for the need to enter/exit P.S
- * due to BT change state
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_SLAVE_EDR,
+
+ /*
+ * Configure the min and max time BT gains the antenna
+ * in WLAN Active / BT basic rate
+ *
+ * Range: 0 - 255 (ms)
+ */
+ CONF_SG_WLAN_ACTIVE_BT_ACL_MIN_BR,
+ CONF_SG_WLAN_ACTIVE_BT_ACL_MAX_BR,
+
+ /*
+ * The maximum time WLAN can gain the antenna for
+ * in WLAN Active / BT basic rate
*
+ * Range: 0 - 255 (ms)
*/
- u8 auto_ps_mode;
+ CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_BR,
/*
- * This parameter defines the compensation percentage of num of probe
- * requests in case scan is initiated during BT voice/BT ACL
- * guaranteed link.
+ * Compensation percentage of WLAN passive scan window if initiated
+ * during BT voice
*
- * Unit: Percent
- * Range: 0-255 (0 - No compensation)
+ * Range: 0 - 1000 (%)
*/
- u8 probe_req_compensation;
+ CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3,
/*
- * This parameter defines the compensation percentage of scan window
- * size in case scan is initiated during BT voice/BT ACL Guaranteed
- * link.
+ * Compensation percentage of WLAN passive scan window if initiated
+ * during BT A2DP
*
- * Unit: Percent
- * Range: 0-255 (0 - No compensation)
+ * Range: 0 - 1000 (%)
*/
- u8 scan_window_compensation;
+ CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP,
/*
- * Defines the antenna configuration.
+ * Fixed time ensured for BT traffic to gain the antenna during WLAN
+ * passive scan.
*
- * Range: 0 - Single Antenna; 1 - Dual Antenna
+ * Range: 0 - 1000 ms
*/
- u8 antenna_config;
+ CONF_SG_PASSIVE_SCAN_A2DP_BT_TIME,
/*
- * The percent out of the Max consecutive beacon miss roaming trigger
- * which is the threshold for raising the priority of beacon
- * reception.
+ * Fixed time ensured for WLAN traffic to gain the antenna during WLAN
+ * passive scan.
*
- * Range: 1-100
- * N = MaxConsecutiveBeaconMiss
- * P = coexMaxConsecutiveBeaconMissPrecent
- * Threshold = MIN( N-1, round(N * P / 100))
+ * Range: 0 - 1000 ms
*/
- u8 beacon_miss_threshold;
+ CONF_SG_PASSIVE_SCAN_A2DP_WLAN_TIME,
/*
- * The RX rate threshold below which rate adaptation is assumed to be
- * occurring at the AP which will raise priority for ACTIVE_RX and RX
- * SP.
+ * Number of consequent BT voice frames not interrupted by WLAN
*
- * Range: HW_BIT_RATE_*
+ * Range: 0 - 100
*/
- u32 rate_adaptation_threshold;
+ CONF_SG_HV3_MAX_SERVED,
/*
- * The SNR above which the RX rate threshold indicating AP rate
- * adaptation is valid
+ * Protection time of the DHCP procedure.
*
- * Range: -128 - 127
+ * Range: 0 - 100000 (ms)
*/
- s8 rate_adaptation_snr;
+ CONF_SG_DHCP_TIME,
+
+ /*
+ * Compensation percentage of WLAN active scan window if initiated
+ * during BT A2DP
+ *
+ * Range: 0 - 1000 (%)
+ */
+ CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP,
+ CONF_SG_TEMP_PARAM_1,
+ CONF_SG_TEMP_PARAM_2,
+ CONF_SG_TEMP_PARAM_3,
+ CONF_SG_TEMP_PARAM_4,
+ CONF_SG_TEMP_PARAM_5,
+ CONF_SG_PARAMS_MAX,
+ CONF_SG_PARAMS_ALL = 0xff
+};
+
+struct conf_sg_settings {
+ u32 params[CONF_SG_PARAMS_MAX];
+ u8 state;
};
enum conf_rx_queue_type {
@@ -440,6 +674,19 @@ struct conf_tx_settings {
*/
u16 tx_compl_threshold;
+ /*
+ * The rate used for control messages and scanning on the 2.4GHz band
+ *
+ * Range: CONF_HW_BIT_RATE_* bit mask
+ */
+ u32 basic_rate;
+
+ /*
+ * The rate used for control messages and scanning on the 5GHz band
+ *
+ * Range: CONF_HW_BIT_RATE_* bit mask
+ */
+ u32 basic_rate_5;
};
enum {
@@ -509,65 +756,6 @@ enum {
CONF_TRIG_EVENT_DIR_BIDIR
};
-
-struct conf_sig_trigger {
- /*
- * The RSSI / SNR threshold value.
- *
- * FIXME: what is the range?
- */
- s16 threshold;
-
- /*
- * Minimum delay between two trigger events for this trigger in ms.
- *
- * Range: 0 - 60000
- */
- u16 pacing;
-
- /*
- * The measurement data source for this trigger.
- *
- * Range: CONF_TRIG_METRIC_*
- */
- u8 metric;
-
- /*
- * The trigger type of this trigger.
- *
- * Range: CONF_TRIG_EVENT_TYPE_*
- */
- u8 type;
-
- /*
- * The direction of the trigger.
- *
- * Range: CONF_TRIG_EVENT_DIR_*
- */
- u8 direction;
-
- /*
- * Hysteresis range of the trigger around the threshold (in dB)
- *
- * Range: u8
- */
- u8 hysteresis;
-
- /*
- * Index of the trigger rule.
- *
- * Range: 0 - CONF_MAX_RSSI_SNR_TRIGGERS-1
- */
- u8 index;
-
- /*
- * Enable / disable this rule (to use for clearing rules.)
- *
- * Range: 1 - Enabled, 2 - Not enabled
- */
- u8 enable;
-};
-
struct conf_sig_weights {
/*
@@ -686,12 +874,6 @@ struct conf_conn_settings {
u8 ps_poll_threshold;
/*
- * Configuration of signal (rssi/snr) triggers.
- */
- u8 sig_trigger_count;
- struct conf_sig_trigger sig_trigger[CONF_MAX_RSSI_SNR_TRIGGERS];
-
- /*
* Configuration of signal average weights.
*/
struct conf_sig_weights sig_weights;
@@ -721,6 +903,22 @@ struct conf_conn_settings {
* Range 0 - 255
*/
u8 psm_entry_retries;
+
+ /*
+ *
+ * Specifies the interval of the connection keep-alive null-func
+ * frame in ms.
+ *
+ * Range: 1000 - 3600000
+ */
+ u32 keep_alive_interval;
+
+ /*
+ * Maximum listen interval supported by the driver in units of beacons.
+ *
+ * Range: u16
+ */
+ u8 max_listen_interval;
};
enum {
@@ -782,6 +980,43 @@ struct conf_pm_config_settings {
bool host_fast_wakeup_support;
};
+struct conf_roam_trigger_settings {
+ /*
+ * The minimum interval between two trigger events.
+ *
+ * Range: 0 - 60000 ms
+ */
+ u16 trigger_pacing;
+
+ /*
+ * The weight for rssi/beacon average calculation
+ *
+ * Range: 0 - 255
+ */
+ u8 avg_weight_rssi_beacon;
+
+ /*
+ * The weight for rssi/data frame average calculation
+ *
+ * Range: 0 - 255
+ */
+ u8 avg_weight_rssi_data;
+
+ /*
+ * The weight for snr/beacon average calculation
+ *
+ * Range: 0 - 255
+ */
+ u8 avg_weight_snr_beacon;
+
+ /*
+ * The weight for snr/data frame average calculation
+ *
+ * Range: 0 - 255
+ */
+ u8 avg_weight_snr_data;
+};
+
struct conf_drv_settings {
struct conf_sg_settings sg;
struct conf_rx_settings rx;
@@ -790,6 +1025,7 @@ struct conf_drv_settings {
struct conf_init_settings init;
struct conf_itrim_settings itrim;
struct conf_pm_config_settings pm_config;
+ struct conf_roam_trigger_settings roam_trigger;
};
#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_debugfs.c b/drivers/net/wireless/wl12xx/wl1271_debugfs.c
index 3f7ff8d0cf5..c239ef4d0b8 100644
--- a/drivers/net/wireless/wl12xx/wl1271_debugfs.c
+++ b/drivers/net/wireless/wl12xx/wl1271_debugfs.c
@@ -29,6 +29,7 @@
#include "wl1271.h"
#include "wl1271_acx.h"
#include "wl1271_ps.h"
+#include "wl1271_io.h"
/* ms */
#define WL1271_DEBUGFS_STATS_LIFETIME 1000
@@ -277,13 +278,10 @@ static ssize_t gpio_power_write(struct file *file,
goto out;
}
- if (value) {
- wl->set_power(true);
- set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
- } else {
- wl->set_power(false);
- clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
- }
+ if (value)
+ wl1271_power_on(wl);
+ else
+ wl1271_power_off(wl);
out:
mutex_unlock(&wl->mutex);
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.c b/drivers/net/wireless/wl12xx/wl1271_event.c
index 7468ef10194..cf37aa6eb13 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.c
+++ b/drivers/net/wireless/wl12xx/wl1271_event.c
@@ -23,7 +23,6 @@
#include "wl1271.h"
#include "wl1271_reg.h"
-#include "wl1271_spi.h"
#include "wl1271_io.h"
#include "wl1271_event.h"
#include "wl1271_ps.h"
@@ -32,34 +31,24 @@
static int wl1271_event_scan_complete(struct wl1271 *wl,
struct event_mailbox *mbox)
{
- int size = sizeof(struct wl12xx_probe_req_template);
wl1271_debug(DEBUG_EVENT, "status: 0x%x",
mbox->scheduled_scan_status);
if (test_bit(WL1271_FLAG_SCANNING, &wl->flags)) {
if (wl->scan.state == WL1271_SCAN_BAND_DUAL) {
- wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
- NULL, size);
/* 2.4 GHz band scanned, scan 5 GHz band, pretend
* to the wl1271_cmd_scan function that we are not
* scanning as it checks that.
*/
clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
+ /* FIXME: ie missing! */
wl1271_cmd_scan(wl, wl->scan.ssid, wl->scan.ssid_len,
+ NULL, 0,
wl->scan.active,
wl->scan.high_prio,
WL1271_SCAN_BAND_5_GHZ,
wl->scan.probe_requests);
} else {
- if (wl->scan.state == WL1271_SCAN_BAND_2_4_GHZ)
- wl1271_cmd_template_set(wl,
- CMD_TEMPL_CFG_PROBE_REQ_2_4,
- NULL, size);
- else
- wl1271_cmd_template_set(wl,
- CMD_TEMPL_CFG_PROBE_REQ_5,
- NULL, size);
-
mutex_unlock(&wl->mutex);
ieee80211_scan_completed(wl->hw, false);
mutex_lock(&wl->mutex);
@@ -92,16 +81,9 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
true);
} else {
- wl1271_error("PSM entry failed, giving up.\n");
- /* FIXME: this may need to be reconsidered. for now it
- is not possible to indicate to the mac80211
- afterwards that PSM entry failed. To maximize
- functionality (receiving data and remaining
- associated) make sure that we are in sync with the
- AP in regard of PSM mode. */
- ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
- false);
+ wl1271_info("No ack to nullfunc from AP.");
wl->psm_entry_retry = 0;
+ *beacon_loss = true;
}
break;
case EVENT_ENTER_POWER_SAVE_SUCCESS:
@@ -143,6 +125,24 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
return ret;
}
+static void wl1271_event_rssi_trigger(struct wl1271 *wl,
+ struct event_mailbox *mbox)
+{
+ enum nl80211_cqm_rssi_threshold_event event;
+ s8 metric = mbox->rssi_snr_trigger_metric[0];
+
+ wl1271_debug(DEBUG_EVENT, "RSSI trigger metric: %d", metric);
+
+ if (metric <= wl->rssi_thold)
+ event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
+ else
+ event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
+
+ if (event != wl->last_rssi_event)
+ ieee80211_cqm_rssi_notify(wl->vif, event, GFP_KERNEL);
+ wl->last_rssi_event = event;
+}
+
static void wl1271_event_mbox_dump(struct event_mailbox *mbox)
{
wl1271_debug(DEBUG_EVENT, "MBOX DUMP:");
@@ -172,10 +172,13 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
* The BSS_LOSE_EVENT_ID is only needed while psm (and hence beacon
* filtering) is enabled. Without PSM, the stack will receive all
* beacons and can detect beacon loss by itself.
+ *
+ * As there's possibility that the driver disables PSM before receiving
+ * BSS_LOSE_EVENT, beacon loss has to be reported to the stack.
+ *
*/
- if (vector & BSS_LOSE_EVENT_ID &&
- test_bit(WL1271_FLAG_PSM, &wl->flags)) {
- wl1271_debug(DEBUG_EVENT, "BSS_LOSE_EVENT");
+ if (vector & BSS_LOSE_EVENT_ID) {
+ wl1271_info("Beacon loss detected.");
/* indicate to the stack, that beacons have been lost */
beacon_loss = true;
@@ -188,17 +191,15 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
return ret;
}
- if (wl->vif && beacon_loss) {
- /* Obviously, it's dangerous to release the mutex while
- we are holding many of the variables in the wl struct.
- That's why it's done last in the function, and care must
- be taken that nothing more is done after this function
- returns. */
- mutex_unlock(&wl->mutex);
- ieee80211_beacon_loss(wl->vif);
- mutex_lock(&wl->mutex);
+ if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
+ wl1271_debug(DEBUG_EVENT, "RSSI_SNR_TRIGGER_0_EVENT");
+ if (wl->vif)
+ wl1271_event_rssi_trigger(wl, mbox);
}
+ if (wl->vif && beacon_loss)
+ ieee80211_connection_loss(wl->vif);
+
return 0;
}
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.h b/drivers/net/wireless/wl12xx/wl1271_event.h
index 278f9206aa5..58371008f27 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.h
+++ b/drivers/net/wireless/wl12xx/wl1271_event.h
@@ -38,6 +38,14 @@
*/
enum {
+ RSSI_SNR_TRIGGER_0_EVENT_ID = BIT(0),
+ RSSI_SNR_TRIGGER_1_EVENT_ID = BIT(1),
+ RSSI_SNR_TRIGGER_2_EVENT_ID = BIT(2),
+ RSSI_SNR_TRIGGER_3_EVENT_ID = BIT(3),
+ RSSI_SNR_TRIGGER_4_EVENT_ID = BIT(4),
+ RSSI_SNR_TRIGGER_5_EVENT_ID = BIT(5),
+ RSSI_SNR_TRIGGER_6_EVENT_ID = BIT(6),
+ RSSI_SNR_TRIGGER_7_EVENT_ID = BIT(7),
MEASUREMENT_START_EVENT_ID = BIT(8),
MEASUREMENT_COMPLETE_EVENT_ID = BIT(9),
SCAN_COMPLETE_EVENT_ID = BIT(10),
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.c b/drivers/net/wireless/wl12xx/wl1271_init.c
index d189e8fe05a..4447af1557f 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.c
+++ b/drivers/net/wireless/wl12xx/wl1271_init.c
@@ -52,50 +52,65 @@ static int wl1271_init_hwenc_config(struct wl1271 *wl)
int wl1271_init_templates_config(struct wl1271 *wl)
{
- int ret;
+ int ret, i;
/* send empty templates for fw memory reservation */
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, NULL,
- sizeof(struct wl12xx_probe_req_template));
+ sizeof(struct wl12xx_probe_req_template),
+ 0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
if (wl1271_11a_enabled()) {
+ size_t size = sizeof(struct wl12xx_probe_req_template);
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
- NULL,
- sizeof(struct wl12xx_probe_req_template));
+ NULL, size, 0,
+ WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
}
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL,
- sizeof(struct wl12xx_null_data_template));
+ sizeof(struct wl12xx_null_data_template),
+ 0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, NULL,
- sizeof(struct wl12xx_ps_poll_template));
+ sizeof(struct wl12xx_ps_poll_template),
+ 0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
sizeof
- (struct wl12xx_qos_null_data_template));
+ (struct wl12xx_qos_null_data_template),
+ 0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PROBE_RESPONSE, NULL,
sizeof
- (struct wl12xx_probe_resp_template));
+ (struct wl12xx_probe_resp_template),
+ 0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON, NULL,
sizeof
- (struct wl12xx_beacon_template));
+ (struct wl12xx_beacon_template),
+ 0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
+ for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, NULL,
+ WL1271_CMD_TEMPL_MAX_SIZE, i,
+ WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+ }
+
return 0;
}
@@ -161,11 +176,11 @@ int wl1271_init_pta(struct wl1271 *wl)
{
int ret;
- ret = wl1271_acx_sg_enable(wl);
+ ret = wl1271_acx_sg_cfg(wl);
if (ret < 0)
return ret;
- ret = wl1271_acx_sg_cfg(wl);
+ ret = wl1271_acx_sg_enable(wl, wl->sg_enabled);
if (ret < 0)
return ret;
@@ -237,7 +252,7 @@ int wl1271_hw_init(struct wl1271 *wl)
goto out_free_memmap;
/* Initialize connection monitoring thresholds */
- ret = wl1271_acx_conn_monit_params(wl);
+ ret = wl1271_acx_conn_monit_params(wl, false);
if (ret < 0)
goto out_free_memmap;
@@ -325,6 +340,24 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
+ /* disable all keep-alive templates */
+ for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
+ ret = wl1271_acx_keep_alive_config(wl, i,
+ ACX_KEEP_ALIVE_TPL_INVALID);
+ if (ret < 0)
+ goto out_free_memmap;
+ }
+
+ /* disable the keep-alive feature */
+ ret = wl1271_acx_keep_alive_mode(wl, false);
+ if (ret < 0)
+ goto out_free_memmap;
+
+ /* Configure rssi/snr averaging weights */
+ ret = wl1271_acx_rssi_snr_avg_weights(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
return 0;
out_free_memmap:
diff --git a/drivers/net/wireless/wl12xx/wl1271_io.c b/drivers/net/wireless/wl12xx/wl1271_io.c
index 5cd94d5666c..c8759acef13 100644
--- a/drivers/net/wireless/wl12xx/wl1271_io.c
+++ b/drivers/net/wireless/wl12xx/wl1271_io.c
@@ -28,30 +28,29 @@
#include "wl1271.h"
#include "wl12xx_80211.h"
-#include "wl1271_spi.h"
#include "wl1271_io.h"
-static int wl1271_translate_addr(struct wl1271 *wl, int addr)
+#define OCP_CMD_LOOP 32
+
+#define OCP_CMD_WRITE 0x1
+#define OCP_CMD_READ 0x2
+
+#define OCP_READY_MASK BIT(18)
+#define OCP_STATUS_MASK (BIT(16) | BIT(17))
+
+#define OCP_STATUS_NO_RESP 0x00000
+#define OCP_STATUS_OK 0x10000
+#define OCP_STATUS_REQ_FAILED 0x20000
+#define OCP_STATUS_RESP_ERROR 0x30000
+
+void wl1271_disable_interrupts(struct wl1271 *wl)
{
- /*
- * To translate, first check to which window of addresses the
- * particular address belongs. Then subtract the starting address
- * of that window from the address. Then, add offset of the
- * translated region.
- *
- * The translated regions occur next to each other in physical device
- * memory, so just add the sizes of the preceeding address regions to
- * get the offset to the new region.
- *
- * Currently, only the two first regions are addressed, and the
- * assumption is that all addresses will fall into either of those
- * two.
- */
- if ((addr >= wl->part.reg.start) &&
- (addr < wl->part.reg.start + wl->part.reg.size))
- return addr - wl->part.reg.start + wl->part.mem.size;
- else
- return addr - wl->part.mem.start;
+ wl->if_ops->disable_irq(wl);
+}
+
+void wl1271_enable_interrupts(struct wl1271 *wl)
+{
+ wl->if_ops->enable_irq(wl);
}
/* Set the SPI partitions to access the chip addresses
@@ -117,54 +116,12 @@ int wl1271_set_partition(struct wl1271 *wl,
void wl1271_io_reset(struct wl1271 *wl)
{
- wl1271_spi_reset(wl);
+ wl->if_ops->reset(wl);
}
void wl1271_io_init(struct wl1271 *wl)
{
- wl1271_spi_init(wl);
-}
-
-void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
-{
- wl1271_spi_raw_write(wl, addr, buf, len, fixed);
-}
-
-void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
-{
- wl1271_spi_raw_read(wl, addr, buf, len, fixed);
-}
-
-void wl1271_read(struct wl1271 *wl, int addr, void *buf, size_t len,
- bool fixed)
-{
- int physical;
-
- physical = wl1271_translate_addr(wl, addr);
-
- wl1271_spi_raw_read(wl, physical, buf, len, fixed);
-}
-
-void wl1271_write(struct wl1271 *wl, int addr, void *buf, size_t len,
- bool fixed)
-{
- int physical;
-
- physical = wl1271_translate_addr(wl, addr);
-
- wl1271_spi_raw_write(wl, physical, buf, len, fixed);
-}
-
-u32 wl1271_read32(struct wl1271 *wl, int addr)
-{
- return wl1271_raw_read32(wl, wl1271_translate_addr(wl, addr));
-}
-
-void wl1271_write32(struct wl1271 *wl, int addr, u32 val)
-{
- wl1271_raw_write32(wl, wl1271_translate_addr(wl, addr), val);
+ wl->if_ops->init(wl);
}
void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
diff --git a/drivers/net/wireless/wl12xx/wl1271_io.h b/drivers/net/wireless/wl12xx/wl1271_io.h
index fa9a0b35788..bc806c74c63 100644
--- a/drivers/net/wireless/wl12xx/wl1271_io.h
+++ b/drivers/net/wireless/wl12xx/wl1271_io.h
@@ -25,44 +25,145 @@
#ifndef __WL1271_IO_H__
#define __WL1271_IO_H__
+#include "wl1271_reg.h"
+
+#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0
+
+#define HW_PARTITION_REGISTERS_ADDR 0x1FFC0
+#define HW_PART0_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR)
+#define HW_PART0_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 4)
+#define HW_PART1_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 8)
+#define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12)
+#define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16)
+#define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20)
+#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 24)
+
+#define HW_ACCESS_REGISTER_SIZE 4
+
+#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000
+
struct wl1271;
+void wl1271_disable_interrupts(struct wl1271 *wl);
+void wl1271_enable_interrupts(struct wl1271 *wl);
+
void wl1271_io_reset(struct wl1271 *wl);
void wl1271_io_init(struct wl1271 *wl);
-/* Raw target IO, address is not translated */
-void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed);
-void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed);
+static inline struct device *wl1271_wl_to_dev(struct wl1271 *wl)
+{
+ return wl->if_ops->dev(wl);
+}
-/* Translated target IO */
-void wl1271_read(struct wl1271 *wl, int addr, void *buf, size_t len,
- bool fixed);
-void wl1271_write(struct wl1271 *wl, int addr, void *buf, size_t len,
- bool fixed);
-u32 wl1271_read32(struct wl1271 *wl, int addr);
-void wl1271_write32(struct wl1271 *wl, int addr, u32 val);
-/* Top Register IO */
-void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val);
-u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
+/* Raw target IO, address is not translated */
+static inline void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed)
+{
+ wl->if_ops->write(wl, addr, buf, len, fixed);
+}
-int wl1271_set_partition(struct wl1271 *wl,
- struct wl1271_partition_set *p);
+static inline void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed)
+{
+ wl->if_ops->read(wl, addr, buf, len, fixed);
+}
static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
{
wl1271_raw_read(wl, addr, &wl->buffer_32,
sizeof(wl->buffer_32), false);
- return wl->buffer_32;
+ return le32_to_cpu(wl->buffer_32);
}
static inline void wl1271_raw_write32(struct wl1271 *wl, int addr, u32 val)
{
- wl->buffer_32 = val;
+ wl->buffer_32 = cpu_to_le32(val);
wl1271_raw_write(wl, addr, &wl->buffer_32,
sizeof(wl->buffer_32), false);
}
+
+/* Translated target IO */
+static inline int wl1271_translate_addr(struct wl1271 *wl, int addr)
+{
+ /*
+ * To translate, first check to which window of addresses the
+ * particular address belongs. Then subtract the starting address
+ * of that window from the address. Then, add offset of the
+ * translated region.
+ *
+ * The translated regions occur next to each other in physical device
+ * memory, so just add the sizes of the preceeding address regions to
+ * get the offset to the new region.
+ *
+ * Currently, only the two first regions are addressed, and the
+ * assumption is that all addresses will fall into either of those
+ * two.
+ */
+ if ((addr >= wl->part.reg.start) &&
+ (addr < wl->part.reg.start + wl->part.reg.size))
+ return addr - wl->part.reg.start + wl->part.mem.size;
+ else
+ return addr - wl->part.mem.start;
+}
+
+static inline void wl1271_read(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed)
+{
+ int physical;
+
+ physical = wl1271_translate_addr(wl, addr);
+
+ wl1271_raw_read(wl, physical, buf, len, fixed);
+}
+
+static inline void wl1271_write(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed)
+{
+ int physical;
+
+ physical = wl1271_translate_addr(wl, addr);
+
+ wl1271_raw_write(wl, physical, buf, len, fixed);
+}
+
+static inline u32 wl1271_read32(struct wl1271 *wl, int addr)
+{
+ return wl1271_raw_read32(wl, wl1271_translate_addr(wl, addr));
+}
+
+static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val)
+{
+ wl1271_raw_write32(wl, wl1271_translate_addr(wl, addr), val);
+}
+
+static inline void wl1271_power_off(struct wl1271 *wl)
+{
+ wl->if_ops->power(wl, false);
+ clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
+}
+
+static inline void wl1271_power_on(struct wl1271 *wl)
+{
+ wl->if_ops->power(wl, true);
+ set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
+}
+
+
+/* Top Register IO */
+void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val);
+u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
+
+int wl1271_set_partition(struct wl1271 *wl,
+ struct wl1271_partition_set *p);
+
+/* Functions from wl1271_main.c */
+
+int wl1271_register_hw(struct wl1271 *wl);
+void wl1271_unregister_hw(struct wl1271 *wl);
+int wl1271_init_ieee80211(struct wl1271 *wl);
+struct ieee80211_hw *wl1271_alloc_hw(void);
+int wl1271_free_hw(struct wl1271 *wl);
+
#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/wl1271_main.c
index 65a1aeba241..b7d9137851a 100644
--- a/drivers/net/wireless/wl12xx/wl1271_main.c
+++ b/drivers/net/wireless/wl12xx/wl1271_main.c
@@ -22,23 +22,19 @@
*/
#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
#include <linux/firmware.h>
#include <linux/delay.h>
-#include <linux/irq.h>
#include <linux/spi/spi.h>
#include <linux/crc32.h>
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
-#include <linux/spi/wl12xx.h>
#include <linux/inetdevice.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include "wl1271.h"
#include "wl12xx_80211.h"
#include "wl1271_reg.h"
-#include "wl1271_spi.h"
#include "wl1271_io.h"
#include "wl1271_event.h"
#include "wl1271_tx.h"
@@ -54,17 +50,57 @@
static struct conf_drv_settings default_conf = {
.sg = {
- .per_threshold = 7500,
- .max_scan_compensation_time = 120000,
- .nfs_sample_interval = 400,
- .load_ratio = 50,
- .auto_ps_mode = 0,
- .probe_req_compensation = 170,
- .scan_window_compensation = 50,
- .antenna_config = 0,
- .beacon_miss_threshold = 60,
- .rate_adaptation_threshold = CONF_HW_BIT_RATE_12MBPS,
- .rate_adaptation_snr = 0
+ .params = {
+ [CONF_SG_BT_PER_THRESHOLD] = 7500,
+ [CONF_SG_HV3_MAX_OVERRIDE] = 0,
+ [CONF_SG_BT_NFS_SAMPLE_INTERVAL] = 400,
+ [CONF_SG_BT_LOAD_RATIO] = 50,
+ [CONF_SG_AUTO_PS_MODE] = 0,
+ [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
+ [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
+ [CONF_SG_ANTENNA_CONFIGURATION] = 0,
+ [CONF_SG_BEACON_MISS_PERCENT] = 60,
+ [CONF_SG_RATE_ADAPT_THRESH] = 12,
+ [CONF_SG_RATE_ADAPT_SNR] = 0,
+ [CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_BR] = 10,
+ [CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_BR] = 30,
+ [CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_BR] = 8,
+ [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_BR] = 20,
+ [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_BR] = 50,
+ /* Note: with UPSD, this should be 4 */
+ [CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_BR] = 8,
+ [CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_EDR] = 7,
+ [CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_EDR] = 25,
+ [CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_EDR] = 20,
+ /* Note: with UPDS, this should be 15 */
+ [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_EDR] = 8,
+ /* Note: with UPDS, this should be 50 */
+ [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_EDR] = 40,
+ /* Note: with UPDS, this should be 10 */
+ [CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_EDR] = 20,
+ [CONF_SG_RXT] = 1200,
+ [CONF_SG_TXT] = 1000,
+ [CONF_SG_ADAPTIVE_RXT_TXT] = 1,
+ [CONF_SG_PS_POLL_TIMEOUT] = 10,
+ [CONF_SG_UPSD_TIMEOUT] = 10,
+ [CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MIN_EDR] = 7,
+ [CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MAX_EDR] = 15,
+ [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_MASTER_EDR] = 15,
+ [CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MIN_EDR] = 8,
+ [CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MAX_EDR] = 20,
+ [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_SLAVE_EDR] = 15,
+ [CONF_SG_WLAN_ACTIVE_BT_ACL_MIN_BR] = 20,
+ [CONF_SG_WLAN_ACTIVE_BT_ACL_MAX_BR] = 50,
+ [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_BR] = 10,
+ [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200,
+ [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP] = 800,
+ [CONF_SG_PASSIVE_SCAN_A2DP_BT_TIME] = 75,
+ [CONF_SG_PASSIVE_SCAN_A2DP_WLAN_TIME] = 15,
+ [CONF_SG_HV3_MAX_SERVED] = 6,
+ [CONF_SG_DHCP_TIME] = 5000,
+ [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100,
+ },
+ .state = CONF_SG_PROTECTIVE,
},
.rx = {
.rx_msdu_life_time = 512000,
@@ -81,8 +117,7 @@ static struct conf_drv_settings default_conf = {
.tx = {
.tx_energy_detection = 0,
.rc_conf = {
- .enabled_rates = CONF_HW_BIT_RATE_1MBPS |
- CONF_HW_BIT_RATE_2MBPS,
+ .enabled_rates = 0,
.short_retry_limit = 10,
.long_retry_limit = 10,
.aflags = 0
@@ -179,11 +214,13 @@ static struct conf_drv_settings default_conf = {
},
.frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
.tx_compl_timeout = 700,
- .tx_compl_threshold = 4
+ .tx_compl_threshold = 4,
+ .basic_rate = CONF_HW_BIT_RATE_1MBPS,
+ .basic_rate_5 = CONF_HW_BIT_RATE_6MBPS,
},
.conn = {
.wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
- .listen_interval = 0,
+ .listen_interval = 1,
.bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED,
.bcn_filt_ie_count = 1,
.bcn_filt_ie = {
@@ -198,38 +235,11 @@ static struct conf_drv_settings default_conf = {
.broadcast_timeout = 20000,
.rx_broadcast_in_ps = 1,
.ps_poll_threshold = 20,
- .sig_trigger_count = 2,
- .sig_trigger = {
- [0] = {
- .threshold = -75,
- .pacing = 500,
- .metric = CONF_TRIG_METRIC_RSSI_BEACON,
- .type = CONF_TRIG_EVENT_TYPE_EDGE,
- .direction = CONF_TRIG_EVENT_DIR_LOW,
- .hysteresis = 2,
- .index = 0,
- .enable = 1
- },
- [1] = {
- .threshold = -75,
- .pacing = 500,
- .metric = CONF_TRIG_METRIC_RSSI_BEACON,
- .type = CONF_TRIG_EVENT_TYPE_EDGE,
- .direction = CONF_TRIG_EVENT_DIR_HIGH,
- .hysteresis = 2,
- .index = 1,
- .enable = 1
- }
- },
- .sig_weights = {
- .rssi_bcn_avg_weight = 10,
- .rssi_pkt_avg_weight = 10,
- .snr_bcn_avg_weight = 10,
- .snr_pkt_avg_weight = 10
- },
.bet_enable = CONF_BET_MODE_ENABLE,
.bet_max_consecutive = 10,
- .psm_entry_retries = 3
+ .psm_entry_retries = 3,
+ .keep_alive_interval = 55000,
+ .max_listen_interval = 20,
},
.init = {
.radioparam = {
@@ -243,9 +253,32 @@ static struct conf_drv_settings default_conf = {
.pm_config = {
.host_clk_settling_time = 5000,
.host_fast_wakeup_support = false
+ },
+ .roam_trigger = {
+ /* FIXME: due to firmware bug, must use value 1 for now */
+ .trigger_pacing = 1,
+ .avg_weight_rssi_beacon = 20,
+ .avg_weight_rssi_data = 10,
+ .avg_weight_snr_beacon = 20,
+ .avg_weight_snr_data = 10
}
};
+static void wl1271_device_release(struct device *dev)
+{
+
+}
+
+static struct platform_device wl1271_device = {
+ .name = "wl1271",
+ .id = -1,
+
+ /* device model insists to have a release function */
+ .dev = {
+ .release = wl1271_device_release,
+ },
+};
+
static LIST_HEAD(wl_list);
static void wl1271_conf_init(struct wl1271 *wl)
@@ -298,7 +331,7 @@ static int wl1271_plt_init(struct wl1271 *wl)
goto out_free_memmap;
/* Initialize connection monitoring thresholds */
- ret = wl1271_acx_conn_monit_params(wl);
+ ret = wl1271_acx_conn_monit_params(wl, false);
if (ret < 0)
goto out_free_memmap;
@@ -365,30 +398,14 @@ static int wl1271_plt_init(struct wl1271 *wl)
return ret;
}
-static void wl1271_disable_interrupts(struct wl1271 *wl)
-{
- disable_irq(wl->irq);
-}
-
-static void wl1271_power_off(struct wl1271 *wl)
-{
- wl->set_power(false);
- clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
-}
-
-static void wl1271_power_on(struct wl1271 *wl)
-{
- wl->set_power(true);
- set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
-}
-
static void wl1271_fw_status(struct wl1271 *wl,
struct wl1271_fw_status *status)
{
+ struct timespec ts;
u32 total = 0;
int i;
- wl1271_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false);
+ wl1271_raw_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false);
wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
"drv_rx_counter = %d, tx_results_counter = %d)",
@@ -413,14 +430,19 @@ static void wl1271_fw_status(struct wl1271 *wl,
ieee80211_queue_work(wl->hw, &wl->tx_work);
/* update the host-chipset time offset */
- wl->time_offset = jiffies_to_usecs(jiffies) -
- le32_to_cpu(status->fw_localtime);
+ getnstimeofday(&ts);
+ wl->time_offset = (timespec_to_ns(&ts) >> 10) -
+ (s64)le32_to_cpu(status->fw_localtime);
}
+#define WL1271_IRQ_MAX_LOOPS 10
+
static void wl1271_irq_work(struct work_struct *work)
{
int ret;
u32 intr;
+ int loopcount = WL1271_IRQ_MAX_LOOPS;
+ unsigned long flags;
struct wl1271 *wl =
container_of(work, struct wl1271, irq_work);
@@ -428,91 +450,78 @@ static void wl1271_irq_work(struct work_struct *work)
wl1271_debug(DEBUG_IRQ, "IRQ work");
- if (wl->state == WL1271_STATE_OFF)
+ if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
ret = wl1271_ps_elp_wakeup(wl, true);
if (ret < 0)
goto out;
- wl1271_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
-
- wl1271_fw_status(wl, wl->fw_status);
- intr = le32_to_cpu(wl->fw_status->intr);
- if (!intr) {
- wl1271_debug(DEBUG_IRQ, "Zero interrupt received.");
- goto out_sleep;
- }
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ while (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags) && loopcount) {
+ clear_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ loopcount--;
+
+ wl1271_fw_status(wl, wl->fw_status);
+ intr = le32_to_cpu(wl->fw_status->intr);
+ if (!intr) {
+ wl1271_debug(DEBUG_IRQ, "Zero interrupt received.");
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ continue;
+ }
- intr &= WL1271_INTR_MASK;
+ intr &= WL1271_INTR_MASK;
- if (intr & WL1271_ACX_INTR_EVENT_A) {
- wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
- wl1271_event_handle(wl, 0);
- }
+ if (intr & WL1271_ACX_INTR_DATA) {
+ wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
- if (intr & WL1271_ACX_INTR_EVENT_B) {
- wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
- wl1271_event_handle(wl, 1);
- }
+ /* check for tx results */
+ if (wl->fw_status->tx_results_counter !=
+ (wl->tx_results_count & 0xff))
+ wl1271_tx_complete(wl);
- if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
- wl1271_debug(DEBUG_IRQ,
- "WL1271_ACX_INTR_INIT_COMPLETE");
+ wl1271_rx(wl, wl->fw_status);
+ }
- if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
- wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
+ if (intr & WL1271_ACX_INTR_EVENT_A) {
+ wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
+ wl1271_event_handle(wl, 0);
+ }
- if (intr & WL1271_ACX_INTR_DATA) {
- u8 tx_res_cnt = wl->fw_status->tx_results_counter -
- wl->tx_results_count;
+ if (intr & WL1271_ACX_INTR_EVENT_B) {
+ wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
+ wl1271_event_handle(wl, 1);
+ }
- wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
+ if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
+ wl1271_debug(DEBUG_IRQ,
+ "WL1271_ACX_INTR_INIT_COMPLETE");
- /* check for tx results */
- if (tx_res_cnt)
- wl1271_tx_complete(wl, tx_res_cnt);
+ if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
+ wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
- wl1271_rx(wl, wl->fw_status);
+ spin_lock_irqsave(&wl->wl_lock, flags);
}
-out_sleep:
- wl1271_write32(wl, ACX_REG_INTERRUPT_MASK,
- WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
+ if (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags))
+ ieee80211_queue_work(wl->hw, &wl->irq_work);
+ else
+ clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
wl1271_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
}
-static irqreturn_t wl1271_irq(int irq, void *cookie)
-{
- struct wl1271 *wl;
- unsigned long flags;
-
- wl1271_debug(DEBUG_IRQ, "IRQ");
-
- wl = cookie;
-
- /* complete the ELP completion */
- spin_lock_irqsave(&wl->wl_lock, flags);
- if (wl->elp_compl) {
- complete(wl->elp_compl);
- wl->elp_compl = NULL;
- }
-
- ieee80211_queue_work(wl->hw, &wl->irq_work);
- spin_unlock_irqrestore(&wl->wl_lock, flags);
-
- return IRQ_HANDLED;
-}
-
static int wl1271_fetch_firmware(struct wl1271 *wl)
{
const struct firmware *fw;
int ret;
- ret = request_firmware(&fw, WL1271_FW_NAME, &wl->spi->dev);
+ ret = request_firmware(&fw, WL1271_FW_NAME, wl1271_wl_to_dev(wl));
if (ret < 0) {
wl1271_error("could not get firmware: %d", ret);
@@ -545,46 +554,12 @@ out:
return ret;
}
-static int wl1271_update_mac_addr(struct wl1271 *wl)
-{
- int ret = 0;
- u8 *nvs_ptr = (u8 *)wl->nvs->nvs;
-
- /* get mac address from the NVS */
- wl->mac_addr[0] = nvs_ptr[11];
- wl->mac_addr[1] = nvs_ptr[10];
- wl->mac_addr[2] = nvs_ptr[6];
- wl->mac_addr[3] = nvs_ptr[5];
- wl->mac_addr[4] = nvs_ptr[4];
- wl->mac_addr[5] = nvs_ptr[3];
-
- /* FIXME: if it is a zero-address, we should bail out. Now, instead,
- we randomize an address */
- if (is_zero_ether_addr(wl->mac_addr)) {
- static const u8 nokia_oui[3] = {0x00, 0x1f, 0xdf};
- memcpy(wl->mac_addr, nokia_oui, 3);
- get_random_bytes(wl->mac_addr + 3, 3);
-
- /* update this address to the NVS */
- nvs_ptr[11] = wl->mac_addr[0];
- nvs_ptr[10] = wl->mac_addr[1];
- nvs_ptr[6] = wl->mac_addr[2];
- nvs_ptr[5] = wl->mac_addr[3];
- nvs_ptr[4] = wl->mac_addr[4];
- nvs_ptr[3] = wl->mac_addr[5];
- }
-
- SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
-
- return ret;
-}
-
static int wl1271_fetch_nvs(struct wl1271 *wl)
{
const struct firmware *fw;
int ret;
- ret = request_firmware(&fw, WL1271_NVS_NAME, &wl->spi->dev);
+ ret = request_firmware(&fw, WL1271_NVS_NAME, wl1271_wl_to_dev(wl));
if (ret < 0) {
wl1271_error("could not get nvs file: %d", ret);
@@ -608,8 +583,6 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
memcpy(wl->nvs, fw->data, sizeof(struct wl1271_nvs_file));
- ret = wl1271_update_mac_addr(wl);
-
out:
release_firmware(fw);
@@ -826,15 +799,13 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* The workqueue is slow to process the tx_queue and we need stop
* the queue here, otherwise the queue will get too long.
*/
- if (skb_queue_len(&wl->tx_queue) >= WL1271_TX_QUEUE_MAX_LENGTH) {
- ieee80211_stop_queues(wl->hw);
+ if (skb_queue_len(&wl->tx_queue) >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
+ wl1271_debug(DEBUG_TX, "op_tx: stopping queues");
- /*
- * FIXME: this is racy, the variable is not properly
- * protected. Maybe fix this by removing the stupid
- * variable altogether and checking the real queue state?
- */
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ ieee80211_stop_queues(wl->hw);
set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
}
return NETDEV_TX_OK;
@@ -882,7 +853,7 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
if (wl == wl_temp)
break;
}
- if (wl == NULL)
+ if (wl != wl_temp)
return NOTIFY_DONE;
/* Get the interface IP address for the device. "ifa" will become
@@ -929,13 +900,60 @@ static struct notifier_block wl1271_dev_notifier = {
static int wl1271_op_start(struct ieee80211_hw *hw)
{
+ wl1271_debug(DEBUG_MAC80211, "mac80211 start");
+
+ /*
+ * We have to delay the booting of the hardware because
+ * we need to know the local MAC address before downloading and
+ * initializing the firmware. The MAC address cannot be changed
+ * after boot, and without the proper MAC address, the firmware
+ * will not function properly.
+ *
+ * The MAC address is first known when the corresponding interface
+ * is added. That is where we will initialize the hardware.
+ */
+
+ return 0;
+}
+
+static void wl1271_op_stop(struct ieee80211_hw *hw)
+{
+ wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
+}
+
+static int wl1271_op_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
struct wl1271 *wl = hw->priv;
int retries = WL1271_BOOT_RETRIES;
int ret = 0;
- wl1271_debug(DEBUG_MAC80211, "mac80211 start");
+ wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
+ vif->type, vif->addr);
mutex_lock(&wl->mutex);
+ if (wl->vif) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ wl->vif = vif;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ wl->bss_type = BSS_TYPE_STA_BSS;
+ wl->set_bss_type = BSS_TYPE_STA_BSS;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ wl->bss_type = BSS_TYPE_IBSS;
+ wl->set_bss_type = BSS_TYPE_STA_BSS;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
if (wl->state != WL1271_STATE_OFF) {
wl1271_error("cannot start because not in off state: %d",
@@ -991,19 +1009,20 @@ out:
return ret;
}
-static void wl1271_op_stop(struct ieee80211_hw *hw)
+static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
int i;
- wl1271_info("down");
-
- wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
-
unregister_inetaddr_notifier(&wl1271_dev_notifier);
- list_del(&wl->list);
mutex_lock(&wl->mutex);
+ wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
+
+ wl1271_info("down");
+
+ list_del(&wl->list);
WARN_ON(wl->state != WL1271_STATE_ON);
@@ -1032,6 +1051,7 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
memset(wl->ssid, 0, IW_ESSID_MAX_SIZE + 1);
wl->ssid_len = 0;
wl->bss_type = MAX_BSS_TYPE;
+ wl->set_bss_type = MAX_BSS_TYPE;
wl->band = IEEE80211_BAND_2GHZ;
wl->rx_counter = 0;
@@ -1041,163 +1061,142 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
wl->tx_results_count = 0;
wl->tx_packets_count = 0;
wl->tx_security_last_seq = 0;
- wl->tx_security_seq_16 = 0;
- wl->tx_security_seq_32 = 0;
+ wl->tx_security_seq = 0;
wl->time_offset = 0;
wl->session_counter = 0;
wl->rate_set = CONF_TX_RATE_MASK_BASIC;
wl->sta_rate_set = 0;
wl->flags = 0;
+ wl->vif = NULL;
+ wl->filters = 0;
for (i = 0; i < NUM_TX_QUEUES; i++)
wl->tx_blocks_freed[i] = 0;
wl1271_debugfs_reset(wl);
+
+ kfree(wl->fw_status);
+ wl->fw_status = NULL;
+ kfree(wl->tx_res_if);
+ wl->tx_res_if = NULL;
+ kfree(wl->target_mem_map);
+ wl->target_mem_map = NULL;
+
mutex_unlock(&wl->mutex);
}
-static int wl1271_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters)
{
- struct wl1271 *wl = hw->priv;
- int ret = 0;
+ wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
+ wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
- wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
- vif->type, vif->addr);
+ /* combine requested filters with current filter config */
+ filters = wl->filters | filters;
- mutex_lock(&wl->mutex);
- if (wl->vif) {
- ret = -EBUSY;
- goto out;
+ wl1271_debug(DEBUG_FILTERS, "RX filters set: ");
+
+ if (filters & FIF_PROMISC_IN_BSS) {
+ wl1271_debug(DEBUG_FILTERS, " - FIF_PROMISC_IN_BSS");
+ wl->rx_config &= ~CFG_UNI_FILTER_EN;
+ wl->rx_config |= CFG_BSSID_FILTER_EN;
+ }
+ if (filters & FIF_BCN_PRBRESP_PROMISC) {
+ wl1271_debug(DEBUG_FILTERS, " - FIF_BCN_PRBRESP_PROMISC");
+ wl->rx_config &= ~CFG_BSSID_FILTER_EN;
+ wl->rx_config &= ~CFG_SSID_FILTER_EN;
}
+ if (filters & FIF_OTHER_BSS) {
+ wl1271_debug(DEBUG_FILTERS, " - FIF_OTHER_BSS");
+ wl->rx_config &= ~CFG_BSSID_FILTER_EN;
+ }
+ if (filters & FIF_CONTROL) {
+ wl1271_debug(DEBUG_FILTERS, " - FIF_CONTROL");
+ wl->rx_filter |= CFG_RX_CTL_EN;
+ }
+ if (filters & FIF_FCSFAIL) {
+ wl1271_debug(DEBUG_FILTERS, " - FIF_FCSFAIL");
+ wl->rx_filter |= CFG_RX_FCS_ERROR;
+ }
+}
- wl->vif = vif;
+static int wl1271_dummy_join(struct wl1271 *wl)
+{
+ int ret = 0;
+ /* we need to use a dummy BSSID for now */
+ static const u8 dummy_bssid[ETH_ALEN] = { 0x0b, 0xad, 0xde,
+ 0xad, 0xbe, 0xef };
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- wl->bss_type = BSS_TYPE_STA_BSS;
- break;
- case NL80211_IFTYPE_ADHOC:
- wl->bss_type = BSS_TYPE_IBSS;
- break;
- default:
- ret = -EOPNOTSUPP;
+ memcpy(wl->bssid, dummy_bssid, ETH_ALEN);
+
+ /* pass through frames from all BSS */
+ wl1271_configure_filters(wl, FIF_OTHER_BSS);
+
+ ret = wl1271_cmd_join(wl, wl->set_bss_type);
+ if (ret < 0)
goto out;
- }
- /* FIXME: what if conf->mac_addr changes? */
+ set_bit(WL1271_FLAG_JOINED, &wl->flags);
out:
- mutex_unlock(&wl->mutex);
return ret;
}
-static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct wl1271 *wl = hw->priv;
-
- mutex_lock(&wl->mutex);
- wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
- wl->vif = NULL;
- mutex_unlock(&wl->mutex);
-}
-
-#if 0
-static int wl1271_op_config_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_if_conf *conf)
+static int wl1271_join(struct wl1271 *wl, bool set_assoc)
{
- struct wl1271 *wl = hw->priv;
- struct sk_buff *beacon;
int ret;
- wl1271_debug(DEBUG_MAC80211, "mac80211 config_interface bssid %pM",
- conf->bssid);
- wl1271_dump_ascii(DEBUG_MAC80211, "ssid: ", conf->ssid,
- conf->ssid_len);
+ /*
+ * One of the side effects of the JOIN command is that is clears
+ * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
+ * to a WPA/WPA2 access point will therefore kill the data-path.
+ * Currently there is no supported scenario for JOIN during
+ * association - if it becomes a supported scenario, the WPA/WPA2 keys
+ * must be handled somehow.
+ *
+ */
+ if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ wl1271_info("JOIN while associated.");
- mutex_lock(&wl->mutex);
+ if (set_assoc)
+ set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_cmd_join(wl, wl->set_bss_type);
if (ret < 0)
goto out;
- if (memcmp(wl->bssid, conf->bssid, ETH_ALEN)) {
- wl1271_debug(DEBUG_MAC80211, "bssid changed");
-
- memcpy(wl->bssid, conf->bssid, ETH_ALEN);
-
- ret = wl1271_cmd_join(wl);
- if (ret < 0)
- goto out_sleep;
-
- ret = wl1271_cmd_build_null_data(wl);
- if (ret < 0)
- goto out_sleep;
- }
-
- wl->ssid_len = conf->ssid_len;
- if (wl->ssid_len)
- memcpy(wl->ssid, conf->ssid, wl->ssid_len);
-
- if (conf->changed & IEEE80211_IFCC_BEACON) {
- beacon = ieee80211_beacon_get(hw, vif);
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON,
- beacon->data, beacon->len);
-
- if (ret < 0) {
- dev_kfree_skb(beacon);
- goto out_sleep;
- }
-
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PROBE_RESPONSE,
- beacon->data, beacon->len);
-
- dev_kfree_skb(beacon);
-
- if (ret < 0)
- goto out_sleep;
- }
-
-out_sleep:
- wl1271_ps_elp_sleep(wl);
-
-out:
- mutex_unlock(&wl->mutex);
-
- return ret;
-}
-#endif
-
-static int wl1271_join_channel(struct wl1271 *wl, int channel)
-{
- int ret = 0;
- /* we need to use a dummy BSSID for now */
- static const u8 dummy_bssid[ETH_ALEN] = { 0x0b, 0xad, 0xde,
- 0xad, 0xbe, 0xef };
+ set_bit(WL1271_FLAG_JOINED, &wl->flags);
- /* the dummy join is not required for ad-hoc */
- if (wl->bss_type == BSS_TYPE_IBSS)
+ if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
goto out;
- /* disable mac filter, so we hear everything */
- wl->rx_config &= ~CFG_BSSID_FILTER_EN;
+ /*
+ * The join command disable the keep-alive mode, shut down its process,
+ * and also clear the template config, so we need to reset it all after
+ * the join. The acx_aid starts the keep-alive process, and the order
+ * of the commands below is relevant.
+ */
+ ret = wl1271_acx_keep_alive_mode(wl, true);
+ if (ret < 0)
+ goto out;
- wl->channel = channel;
- memcpy(wl->bssid, dummy_bssid, ETH_ALEN);
+ ret = wl1271_acx_aid(wl, wl->aid);
+ if (ret < 0)
+ goto out;
- ret = wl1271_cmd_join(wl);
+ ret = wl1271_cmd_build_klv_null_data(wl);
if (ret < 0)
goto out;
- set_bit(WL1271_FLAG_JOINED, &wl->flags);
+ ret = wl1271_acx_keep_alive_config(wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
+ ACX_KEEP_ALIVE_TPL_VALID);
+ if (ret < 0)
+ goto out;
out:
return ret;
}
-static int wl1271_unjoin_channel(struct wl1271 *wl)
+static int wl1271_unjoin(struct wl1271 *wl)
{
int ret;
@@ -1207,14 +1206,41 @@ static int wl1271_unjoin_channel(struct wl1271 *wl)
goto out;
clear_bit(WL1271_FLAG_JOINED, &wl->flags);
- wl->channel = 0;
memset(wl->bssid, 0, ETH_ALEN);
- wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
+
+ /* stop filterting packets based on bssid */
+ wl1271_configure_filters(wl, FIF_OTHER_BSS);
out:
return ret;
}
+static void wl1271_set_band_rate(struct wl1271 *wl)
+{
+ if (wl->band == IEEE80211_BAND_2GHZ)
+ wl->basic_rate_set = wl->conf.tx.basic_rate;
+ else
+ wl->basic_rate_set = wl->conf.tx.basic_rate_5;
+}
+
+static u32 wl1271_min_rate_get(struct wl1271 *wl)
+{
+ int i;
+ u32 rate = 0;
+
+ if (!wl->basic_rate_set) {
+ WARN_ON(1);
+ wl->basic_rate_set = wl->conf.tx.basic_rate;
+ }
+
+ for (i = 0; !rate; i++) {
+ if ((wl->basic_rate_set >> i) & 0x1)
+ rate = 1 << i;
+ }
+
+ return rate;
+}
+
static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
{
struct wl1271 *wl = hw->priv;
@@ -1231,38 +1257,62 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&wl->mutex);
- wl->band = conf->channel->band;
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
ret = wl1271_ps_elp_wakeup(wl, false);
if (ret < 0)
goto out;
+ /* if the channel changes while joined, join again */
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
+ ((wl->band != conf->channel->band) ||
+ (wl->channel != channel))) {
+ wl->band = conf->channel->band;
+ wl->channel = channel;
+
+ /*
+ * FIXME: the mac80211 should really provide a fixed rate
+ * to use here. for now, just use the smallest possible rate
+ * for the band as a fixed rate for association frames and
+ * other control messages.
+ */
+ if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ wl1271_set_band_rate(wl);
+
+ wl->basic_rate = wl1271_min_rate_get(wl);
+ ret = wl1271_acx_rate_policies(wl);
+ if (ret < 0)
+ wl1271_warning("rate policy for update channel "
+ "failed %d", ret);
+
+ if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
+ ret = wl1271_join(wl, false);
+ if (ret < 0)
+ wl1271_warning("cmd join to update channel "
+ "failed %d", ret);
+ }
+ }
+
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
if (conf->flags & IEEE80211_CONF_IDLE &&
test_bit(WL1271_FLAG_JOINED, &wl->flags))
- wl1271_unjoin_channel(wl);
+ wl1271_unjoin(wl);
else if (!(conf->flags & IEEE80211_CONF_IDLE))
- wl1271_join_channel(wl, channel);
+ wl1271_dummy_join(wl);
if (conf->flags & IEEE80211_CONF_IDLE) {
- wl->rate_set = CONF_TX_RATE_MASK_BASIC;
+ wl->rate_set = wl1271_min_rate_get(wl);
wl->sta_rate_set = 0;
wl1271_acx_rate_policies(wl);
- }
+ wl1271_acx_keep_alive_config(
+ wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
+ ACX_KEEP_ALIVE_TPL_INVALID);
+ set_bit(WL1271_FLAG_IDLE, &wl->flags);
+ } else
+ clear_bit(WL1271_FLAG_IDLE, &wl->flags);
}
- /* if the channel changes while joined, join again */
- if (channel != wl->channel &&
- test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
- wl->channel = channel;
- /* FIXME: maybe use CMD_CHANNEL_SWITCH for this? */
- ret = wl1271_cmd_join(wl);
- if (ret < 0)
- wl1271_warning("cmd join to update channel failed %d",
- ret);
- } else
- wl->channel = channel;
-
if (conf->flags & IEEE80211_CONF_PS &&
!test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
@@ -1273,13 +1323,13 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
* through the bss_info_changed() hook.
*/
if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
- wl1271_info("psm enabled");
+ wl1271_debug(DEBUG_PSM, "psm enabled");
ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
true);
}
} else if (!(conf->flags & IEEE80211_CONF_PS) &&
test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
- wl1271_info("psm disabled");
+ wl1271_debug(DEBUG_PSM, "psm disabled");
clear_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
@@ -1311,11 +1361,15 @@ struct wl1271_filter_params {
u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
};
-static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count,
- struct dev_addr_list *mc_list)
+static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
{
struct wl1271_filter_params *fp;
- int i;
+ struct netdev_hw_addr *ha;
+ struct wl1271 *wl = hw->priv;
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ return 0;
fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
if (!fp) {
@@ -1324,21 +1378,16 @@ static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count,
}
/* update multicast filtering parameters */
- fp->enabled = true;
- if (mc_count > ACX_MC_ADDRESS_GROUP_MAX) {
- mc_count = 0;
- fp->enabled = false;
- }
-
fp->mc_list_length = 0;
- for (i = 0; i < mc_count; i++) {
- if (mc_list->da_addrlen == ETH_ALEN) {
+ if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
+ fp->enabled = false;
+ } else {
+ fp->enabled = true;
+ netdev_hw_addr_list_for_each(ha, mc_list) {
memcpy(fp->mc_list[fp->mc_list_length],
- mc_list->da_addr, ETH_ALEN);
+ ha->addr, ETH_ALEN);
fp->mc_list_length++;
- } else
- wl1271_warning("Unknown mc address length.");
- mc_list = mc_list->next;
+ }
}
return (u64)(unsigned long)fp;
@@ -1363,15 +1412,16 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- if (wl->state == WL1271_STATE_OFF)
+ *total &= WL1271_SUPPORTED_FILTERS;
+ changed &= WL1271_SUPPORTED_FILTERS;
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
ret = wl1271_ps_elp_wakeup(wl, false);
if (ret < 0)
goto out;
- *total &= WL1271_SUPPORTED_FILTERS;
- changed &= WL1271_SUPPORTED_FILTERS;
if (*total & FIF_ALLMULTI)
ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
@@ -1382,14 +1432,14 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
if (ret < 0)
goto out_sleep;
- kfree(fp);
-
- /* FIXME: We still need to set our filters properly */
-
/* determine, whether supported filter values have changed */
if (changed == 0)
goto out_sleep;
+ /* configure filters */
+ wl->filters = *total;
+ wl1271_configure_filters(wl, 0);
+
/* apply configured filters */
ret = wl1271_acx_rx_config(wl, wl->rx_config, wl->rx_filter);
if (ret < 0)
@@ -1400,6 +1450,7 @@ out_sleep:
out:
mutex_unlock(&wl->mutex);
+ kfree(fp);
}
static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -1450,15 +1501,15 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key_type = KEY_TKIP;
key_conf->hw_key_idx = key_conf->keyidx;
- tx_seq_32 = wl->tx_security_seq_32;
- tx_seq_16 = wl->tx_security_seq_16;
+ tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
+ tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
break;
case ALG_CCMP:
key_type = KEY_AES;
key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- tx_seq_32 = wl->tx_security_seq_32;
- tx_seq_16 = wl->tx_security_seq_16;
+ tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
+ tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
break;
default:
wl1271_error("Unknown key algo 0x%x", key_conf->alg);
@@ -1508,8 +1559,6 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
default:
wl1271_error("Unsupported key cmd 0x%x", cmd);
ret = -EOPNOTSUPP;
- goto out_sleep;
-
break;
}
@@ -1524,6 +1573,7 @@ out:
}
static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
struct cfg80211_scan_request *req)
{
struct wl1271 *wl = hw->priv;
@@ -1545,10 +1595,12 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
goto out;
if (wl1271_11a_enabled())
- ret = wl1271_cmd_scan(hw->priv, ssid, len, 1, 0,
+ ret = wl1271_cmd_scan(hw->priv, ssid, len,
+ req->ie, req->ie_len, 1, 0,
WL1271_SCAN_BAND_DUAL, 3);
else
- ret = wl1271_cmd_scan(hw->priv, ssid, len, 1, 0,
+ ret = wl1271_cmd_scan(hw->priv, ssid, len,
+ req->ie, req->ie_len, 1, 0,
WL1271_SCAN_BAND_2_4_GHZ, 3);
wl1271_ps_elp_sleep(wl);
@@ -1562,10 +1614,13 @@ out:
static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
struct wl1271 *wl = hw->priv;
- int ret;
+ int ret = 0;
mutex_lock(&wl->mutex);
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
+
ret = wl1271_ps_elp_wakeup(wl, false);
if (ret < 0)
goto out;
@@ -1607,6 +1662,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
enum wl1271_cmd_ps_mode mode;
struct wl1271 *wl = hw->priv;
bool do_join = false;
+ bool set_assoc = false;
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed");
@@ -1617,20 +1673,29 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- if (wl->bss_type == BSS_TYPE_IBSS) {
- /* FIXME: This implements rudimentary ad-hoc support -
- proper templates are on the wish list and notification
- on when they change. This patch will update the templates
- on every call to this function. */
+ if ((changed && BSS_CHANGED_BEACON_INT) &&
+ (wl->bss_type == BSS_TYPE_IBSS)) {
+ wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon interval updated: %d",
+ bss_conf->beacon_int);
+
+ wl->beacon_int = bss_conf->beacon_int;
+ do_join = true;
+ }
+
+ if ((changed && BSS_CHANGED_BEACON) &&
+ (wl->bss_type == BSS_TYPE_IBSS)) {
struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
+ wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon updated");
+
if (beacon) {
struct ieee80211_hdr *hdr;
wl1271_ssid_set(wl, beacon);
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON,
beacon->data,
- beacon->len);
+ beacon->len, 0,
+ wl1271_min_rate_get(wl));
if (ret < 0) {
dev_kfree_skb(beacon);
@@ -1645,7 +1710,8 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
ret = wl1271_cmd_template_set(wl,
CMD_TEMPL_PROBE_RESPONSE,
beacon->data,
- beacon->len);
+ beacon->len, 0,
+ wl1271_min_rate_get(wl));
dev_kfree_skb(beacon);
if (ret < 0)
goto out_sleep;
@@ -1655,20 +1721,48 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
}
}
+ if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
+ (wl->bss_type == BSS_TYPE_IBSS)) {
+ wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
+ bss_conf->enable_beacon ? "enabled" : "disabled");
+
+ if (bss_conf->enable_beacon)
+ wl->set_bss_type = BSS_TYPE_IBSS;
+ else
+ wl->set_bss_type = BSS_TYPE_STA_BSS;
+ do_join = true;
+ }
+
+ if (changed & BSS_CHANGED_CQM) {
+ bool enable = false;
+ if (bss_conf->cqm_rssi_thold)
+ enable = true;
+ ret = wl1271_acx_rssi_snr_trigger(wl, enable,
+ bss_conf->cqm_rssi_thold,
+ bss_conf->cqm_rssi_hyst);
+ if (ret < 0)
+ goto out;
+ wl->rssi_thold = bss_conf->cqm_rssi_thold;
+ }
+
if ((changed & BSS_CHANGED_BSSID) &&
/*
* Now we know the correct bssid, so we send a new join command
* and enable the BSSID filter
*/
memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
- wl->rx_config |= CFG_BSSID_FILTER_EN;
memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
+
ret = wl1271_cmd_build_null_data(wl);
- if (ret < 0) {
- wl1271_warning("cmd buld null data failed %d",
- ret);
+ if (ret < 0)
+ goto out_sleep;
+
+ ret = wl1271_build_qos_null_data(wl);
+ if (ret < 0)
goto out_sleep;
- }
+
+ /* filter out all packets not from this BSSID */
+ wl1271_configure_filters(wl, 0);
/* Need to update the BSSID (for filtering etc) */
do_join = true;
@@ -1676,8 +1770,21 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ASSOC) {
if (bss_conf->assoc) {
+ u32 rates;
wl->aid = bss_conf->aid;
- set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
+ set_assoc = true;
+
+ /*
+ * use basic rates from AP, and determine lowest rate
+ * to use with control frames.
+ */
+ rates = bss_conf->basic_rates;
+ wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl,
+ rates);
+ wl->basic_rate = wl1271_min_rate_get(wl);
+ ret = wl1271_acx_rate_policies(wl);
+ if (ret < 0)
+ goto out_sleep;
/*
* with wl1271, we don't need to update the
@@ -1689,7 +1796,17 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
if (ret < 0)
goto out_sleep;
- ret = wl1271_acx_aid(wl, wl->aid);
+ /*
+ * The SSID is intentionally set to NULL here - the
+ * firmware will set the probe request with a
+ * broadcast SSID regardless of what we set in the
+ * template.
+ */
+ ret = wl1271_cmd_build_probe_req(wl, NULL, 0,
+ NULL, 0, wl->band);
+
+ /* enable the connection monitoring feature */
+ ret = wl1271_acx_conn_monit_params(wl, true);
if (ret < 0)
goto out_sleep;
@@ -1705,6 +1822,22 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
/* use defaults when not associated */
clear_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
wl->aid = 0;
+
+ /* revert back to minimum rates for the current band */
+ wl1271_set_band_rate(wl);
+ wl->basic_rate = wl1271_min_rate_get(wl);
+ ret = wl1271_acx_rate_policies(wl);
+ if (ret < 0)
+ goto out_sleep;
+
+ /* disable connection monitor features */
+ ret = wl1271_acx_conn_monit_params(wl, false);
+
+ /* Disable the keep-alive feature */
+ ret = wl1271_acx_keep_alive_mode(wl, false);
+
+ if (ret < 0)
+ goto out_sleep;
}
}
@@ -1739,12 +1872,11 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
}
if (do_join) {
- ret = wl1271_cmd_join(wl);
+ ret = wl1271_join(wl, set_assoc);
if (ret < 0) {
wl1271_warning("cmd join failed %d", ret);
goto out_sleep;
}
- set_bit(WL1271_FLAG_JOINED, &wl->flags);
}
out_sleep:
@@ -1758,6 +1890,7 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct wl1271 *wl = hw->priv;
+ u8 ps_scheme;
int ret;
mutex_lock(&wl->mutex);
@@ -1768,17 +1901,22 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
if (ret < 0)
goto out;
+ /* the txop is confed in units of 32us by the mac80211, we need us */
ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
params->cw_min, params->cw_max,
- params->aifs, params->txop);
+ params->aifs, params->txop << 5);
if (ret < 0)
goto out_sleep;
+ if (params->uapsd)
+ ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
+ else
+ ps_scheme = CONF_PS_SCHEME_LEGACY;
+
ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
CONF_CHANNEL_TYPE_EDCF,
wl1271_tx_get_queue(queue),
- CONF_PS_SCHEME_LEGACY_PSPOLL,
- CONF_ACK_POLICY_LEGACY, 0, 0);
+ ps_scheme, CONF_ACK_POLICY_LEGACY, 0, 0);
if (ret < 0)
goto out_sleep;
@@ -1852,6 +1990,36 @@ static struct ieee80211_channel wl1271_channels[] = {
{ .hw_value = 13, .center_freq = 2472, .max_power = 25 },
};
+/* mapping to indexes for wl1271_rates */
+const static u8 wl1271_rate_to_idx_2ghz[] = {
+ /* MCS rates are used only with 11n */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS7 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS6 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS5 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS4 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS3 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS2 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS1 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS0 */
+
+ 11, /* CONF_HW_RXTX_RATE_54 */
+ 10, /* CONF_HW_RXTX_RATE_48 */
+ 9, /* CONF_HW_RXTX_RATE_36 */
+ 8, /* CONF_HW_RXTX_RATE_24 */
+
+ /* TI-specific rate */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_22 */
+
+ 7, /* CONF_HW_RXTX_RATE_18 */
+ 6, /* CONF_HW_RXTX_RATE_12 */
+ 3, /* CONF_HW_RXTX_RATE_11 */
+ 5, /* CONF_HW_RXTX_RATE_9 */
+ 4, /* CONF_HW_RXTX_RATE_6 */
+ 2, /* CONF_HW_RXTX_RATE_5_5 */
+ 1, /* CONF_HW_RXTX_RATE_2 */
+ 0 /* CONF_HW_RXTX_RATE_1 */
+};
+
/* can't be const, mac80211 writes to this */
static struct ieee80211_supported_band wl1271_band_2ghz = {
.channels = wl1271_channels,
@@ -1934,6 +2102,35 @@ static struct ieee80211_channel wl1271_channels_5ghz[] = {
{ .hw_value = 165, .center_freq = 5825},
};
+/* mapping to indexes for wl1271_rates_5ghz */
+const static u8 wl1271_rate_to_idx_5ghz[] = {
+ /* MCS rates are used only with 11n */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS7 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS6 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS5 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS4 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS3 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS2 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS1 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS0 */
+
+ 7, /* CONF_HW_RXTX_RATE_54 */
+ 6, /* CONF_HW_RXTX_RATE_48 */
+ 5, /* CONF_HW_RXTX_RATE_36 */
+ 4, /* CONF_HW_RXTX_RATE_24 */
+
+ /* TI-specific rate */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_22 */
+
+ 3, /* CONF_HW_RXTX_RATE_18 */
+ 2, /* CONF_HW_RXTX_RATE_12 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_11 */
+ 1, /* CONF_HW_RXTX_RATE_9 */
+ 0, /* CONF_HW_RXTX_RATE_6 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_5_5 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_2 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED /* CONF_HW_RXTX_RATE_1 */
+};
static struct ieee80211_supported_band wl1271_band_5ghz = {
.channels = wl1271_channels_5ghz,
@@ -1942,13 +2139,17 @@ static struct ieee80211_supported_band wl1271_band_5ghz = {
.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
};
+const static u8 *wl1271_band_rate_to_idx[] = {
+ [IEEE80211_BAND_2GHZ] = wl1271_rate_to_idx_2ghz,
+ [IEEE80211_BAND_5GHZ] = wl1271_rate_to_idx_5ghz
+};
+
static const struct ieee80211_ops wl1271_ops = {
.start = wl1271_op_start,
.stop = wl1271_op_stop,
.add_interface = wl1271_op_add_interface,
.remove_interface = wl1271_op_remove_interface,
.config = wl1271_op_config,
-/* .config_interface = wl1271_op_config_interface, */
.prepare_multicast = wl1271_op_prepare_multicast,
.configure_filter = wl1271_op_configure_filter,
.tx = wl1271_op_tx,
@@ -1960,7 +2161,113 @@ static const struct ieee80211_ops wl1271_ops = {
CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
};
-static int wl1271_register_hw(struct wl1271 *wl)
+
+u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate)
+{
+ u8 idx;
+
+ BUG_ON(wl->band >= sizeof(wl1271_band_rate_to_idx)/sizeof(u8 *));
+
+ if (unlikely(rate >= CONF_HW_RXTX_RATE_MAX)) {
+ wl1271_error("Illegal RX rate from HW: %d", rate);
+ return 0;
+ }
+
+ idx = wl1271_band_rate_to_idx[wl->band][rate];
+ if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
+ wl1271_error("Unsupported RX rate from HW: %d", rate);
+ return 0;
+ }
+
+ return idx;
+}
+
+static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct wl1271 *wl = dev_get_drvdata(dev);
+ ssize_t len;
+
+ /* FIXME: what's the maximum length of buf? page size?*/
+ len = 500;
+
+ mutex_lock(&wl->mutex);
+ len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
+ wl->sg_enabled);
+ mutex_unlock(&wl->mutex);
+
+ return len;
+
+}
+
+static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct wl1271 *wl = dev_get_drvdata(dev);
+ unsigned long res;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &res);
+
+ if (ret < 0) {
+ wl1271_warning("incorrect value written to bt_coex_mode");
+ return count;
+ }
+
+ mutex_lock(&wl->mutex);
+
+ res = !!res;
+
+ if (res == wl->sg_enabled)
+ goto out;
+
+ wl->sg_enabled = res;
+
+ if (wl->state == WL1271_STATE_OFF)
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl, false);
+ if (ret < 0)
+ goto out;
+
+ wl1271_acx_sg_enable(wl, wl->sg_enabled);
+ wl1271_ps_elp_sleep(wl);
+
+ out:
+ mutex_unlock(&wl->mutex);
+ return count;
+}
+
+static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
+ wl1271_sysfs_show_bt_coex_state,
+ wl1271_sysfs_store_bt_coex_state);
+
+static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct wl1271 *wl = dev_get_drvdata(dev);
+ ssize_t len;
+
+ /* FIXME: what's the maximum length of buf? page size?*/
+ len = 500;
+
+ mutex_lock(&wl->mutex);
+ if (wl->hw_pg_ver >= 0)
+ len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
+ else
+ len = snprintf(buf, len, "n/a\n");
+ mutex_unlock(&wl->mutex);
+
+ return len;
+}
+
+static DEVICE_ATTR(hw_pg_ver, S_IRUGO | S_IWUSR,
+ wl1271_sysfs_show_hw_pg_ver, NULL);
+
+int wl1271_register_hw(struct wl1271 *wl)
{
int ret;
@@ -1981,8 +2288,17 @@ static int wl1271_register_hw(struct wl1271 *wl)
return 0;
}
+EXPORT_SYMBOL_GPL(wl1271_register_hw);
+
+void wl1271_unregister_hw(struct wl1271 *wl)
+{
+ ieee80211_unregister_hw(wl->hw);
+ wl->mac80211_registered = false;
+
+}
+EXPORT_SYMBOL_GPL(wl1271_unregister_hw);
-static int wl1271_init_ieee80211(struct wl1271 *wl)
+int wl1271_init_ieee80211(struct wl1271 *wl)
{
/* The tx descriptor buffer and the TKIP space. */
wl->hw->extra_tx_headroom = WL1271_TKIP_IV_SPACE +
@@ -1991,11 +2307,15 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
/* unit us */
/* FIXME: find a proper value */
wl->hw->channel_change_time = 10000;
+ wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_NOISE_DBM |
IEEE80211_HW_BEACON_FILTER |
- IEEE80211_HW_SUPPORTS_PS;
+ IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_SUPPORTS_UAPSD |
+ IEEE80211_HW_HAS_RATE_CONTROL |
+ IEEE80211_HW_CONNECTION_MONITOR |
+ IEEE80211_HW_SUPPORTS_CQM_RSSI;
wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
@@ -2005,51 +2325,53 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
if (wl1271_11a_enabled())
wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wl1271_band_5ghz;
- SET_IEEE80211_DEV(wl->hw, &wl->spi->dev);
+ wl->hw->queues = 4;
+ wl->hw->max_rates = 1;
- return 0;
-}
-
-static void wl1271_device_release(struct device *dev)
-{
+ SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl));
+ return 0;
}
-
-static struct platform_device wl1271_device = {
- .name = "wl1271",
- .id = -1,
-
- /* device model insists to have a release function */
- .dev = {
- .release = wl1271_device_release,
- },
-};
+EXPORT_SYMBOL_GPL(wl1271_init_ieee80211);
#define WL1271_DEFAULT_CHANNEL 0
-static struct ieee80211_hw *wl1271_alloc_hw(void)
+struct ieee80211_hw *wl1271_alloc_hw(void)
{
struct ieee80211_hw *hw;
+ struct platform_device *plat_dev = NULL;
struct wl1271 *wl;
- int i;
+ int i, ret;
hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
if (!hw) {
wl1271_error("could not alloc ieee80211_hw");
- return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
+ goto err_hw_alloc;
+ }
+
+ plat_dev = kmalloc(sizeof(wl1271_device), GFP_KERNEL);
+ if (!plat_dev) {
+ wl1271_error("could not allocate platform_device");
+ ret = -ENOMEM;
+ goto err_plat_alloc;
}
+ memcpy(plat_dev, &wl1271_device, sizeof(wl1271_device));
+
wl = hw->priv;
memset(wl, 0, sizeof(*wl));
INIT_LIST_HEAD(&wl->list);
wl->hw = hw;
+ wl->plat_dev = plat_dev;
skb_queue_head_init(&wl->tx_queue);
INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
wl->channel = WL1271_DEFAULT_CHANNEL;
+ wl->beacon_int = WL1271_DEFAULT_BEACON_INT;
wl->default_key = 0;
wl->rx_counter = 0;
wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
@@ -2057,11 +2379,14 @@ static struct ieee80211_hw *wl1271_alloc_hw(void)
wl->psm_entry_retry = 0;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
+ wl->basic_rate = CONF_TX_RATE_MASK_BASIC;
wl->rate_set = CONF_TX_RATE_MASK_BASIC;
wl->sta_rate_set = 0;
wl->band = IEEE80211_BAND_2GHZ;
wl->vif = NULL;
wl->flags = 0;
+ wl->sg_enabled = true;
+ wl->hw_pg_ver = -1;
for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
wl->tx_frames[i] = NULL;
@@ -2074,167 +2399,72 @@ static struct ieee80211_hw *wl1271_alloc_hw(void)
/* Apply default driver configuration. */
wl1271_conf_init(wl);
- return hw;
-}
-
-int wl1271_free_hw(struct wl1271 *wl)
-{
- ieee80211_unregister_hw(wl->hw);
-
- wl1271_debugfs_exit(wl);
-
- kfree(wl->target_mem_map);
- vfree(wl->fw);
- wl->fw = NULL;
- kfree(wl->nvs);
- wl->nvs = NULL;
-
- kfree(wl->fw_status);
- kfree(wl->tx_res_if);
-
- ieee80211_free_hw(wl->hw);
-
- return 0;
-}
-
-static int __devinit wl1271_probe(struct spi_device *spi)
-{
- struct wl12xx_platform_data *pdata;
- struct ieee80211_hw *hw;
- struct wl1271 *wl;
- int ret;
+ wl1271_debugfs_init(wl);
- pdata = spi->dev.platform_data;
- if (!pdata) {
- wl1271_error("no platform data");
- return -ENODEV;
+ /* Register platform device */
+ ret = platform_device_register(wl->plat_dev);
+ if (ret) {
+ wl1271_error("couldn't register platform device");
+ goto err_hw;
}
+ dev_set_drvdata(&wl->plat_dev->dev, wl);
- hw = wl1271_alloc_hw();
- if (IS_ERR(hw))
- return PTR_ERR(hw);
-
- wl = hw->priv;
-
- dev_set_drvdata(&spi->dev, wl);
- wl->spi = spi;
-
- /* This is the only SPI value that we need to set here, the rest
- * comes from the board-peripherals file */
- spi->bits_per_word = 32;
-
- ret = spi_setup(spi);
+ /* Create sysfs file to control bt coex state */
+ ret = device_create_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
if (ret < 0) {
- wl1271_error("spi_setup failed");
- goto out_free;
- }
-
- wl->set_power = pdata->set_power;
- if (!wl->set_power) {
- wl1271_error("set power function missing in platform data");
- ret = -ENODEV;
- goto out_free;
+ wl1271_error("failed to create sysfs file bt_coex_state");
+ goto err_platform;
}
- wl->irq = spi->irq;
- if (wl->irq < 0) {
- wl1271_error("irq missing in platform data");
- ret = -ENODEV;
- goto out_free;
- }
-
- ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
+ /* Create sysfs file to get HW PG version */
+ ret = device_create_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
if (ret < 0) {
- wl1271_error("request_irq() failed: %d", ret);
- goto out_free;
- }
-
- set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
-
- disable_irq(wl->irq);
-
- ret = platform_device_register(&wl1271_device);
- if (ret) {
- wl1271_error("couldn't register platform device");
- goto out_irq;
+ wl1271_error("failed to create sysfs file hw_pg_ver");
+ goto err_bt_coex_state;
}
- dev_set_drvdata(&wl1271_device.dev, wl);
-
- ret = wl1271_init_ieee80211(wl);
- if (ret)
- goto out_platform;
-
- ret = wl1271_register_hw(wl);
- if (ret)
- goto out_platform;
-
- wl1271_debugfs_init(wl);
- wl1271_notice("initialized");
+ return hw;
- return 0;
+err_bt_coex_state:
+ device_remove_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
- out_platform:
- platform_device_unregister(&wl1271_device);
+err_platform:
+ platform_device_unregister(wl->plat_dev);
- out_irq:
- free_irq(wl->irq, wl);
+err_hw:
+ wl1271_debugfs_exit(wl);
+ kfree(plat_dev);
- out_free:
+err_plat_alloc:
ieee80211_free_hw(hw);
- return ret;
-}
-
-static int __devexit wl1271_remove(struct spi_device *spi)
-{
- struct wl1271 *wl = dev_get_drvdata(&spi->dev);
+err_hw_alloc:
- platform_device_unregister(&wl1271_device);
- free_irq(wl->irq, wl);
-
- wl1271_free_hw(wl);
-
- return 0;
+ return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(wl1271_alloc_hw);
-
-static struct spi_driver wl1271_spi_driver = {
- .driver = {
- .name = "wl1271",
- .bus = &spi_bus_type,
- .owner = THIS_MODULE,
- },
-
- .probe = wl1271_probe,
- .remove = __devexit_p(wl1271_remove),
-};
-
-static int __init wl1271_init(void)
+int wl1271_free_hw(struct wl1271 *wl)
{
- int ret;
+ platform_device_unregister(wl->plat_dev);
+ kfree(wl->plat_dev);
- ret = spi_register_driver(&wl1271_spi_driver);
- if (ret < 0) {
- wl1271_error("failed to register spi driver: %d", ret);
- goto out;
- }
+ wl1271_debugfs_exit(wl);
-out:
- return ret;
-}
+ vfree(wl->fw);
+ wl->fw = NULL;
+ kfree(wl->nvs);
+ wl->nvs = NULL;
-static void __exit wl1271_exit(void)
-{
- spi_unregister_driver(&wl1271_spi_driver);
+ kfree(wl->fw_status);
+ kfree(wl->tx_res_if);
- wl1271_notice("unloaded");
-}
+ ieee80211_free_hw(wl->hw);
-module_init(wl1271_init);
-module_exit(wl1271_exit);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wl1271_free_hw);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
-MODULE_FIRMWARE(WL1271_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.c b/drivers/net/wireless/wl12xx/wl1271_ps.c
index e2b1ebf096e..a5e60e0403e 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1271_ps.c
@@ -23,7 +23,6 @@
#include "wl1271_reg.h"
#include "wl1271_ps.h"
-#include "wl1271_spi.h"
#include "wl1271_io.h"
#define WL1271_WAKEUP_TIMEOUT 500
@@ -41,7 +40,8 @@ void wl1271_elp_work(struct work_struct *work)
mutex_lock(&wl->mutex);
if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) ||
- !test_bit(WL1271_FLAG_PSM, &wl->flags))
+ (!test_bit(WL1271_FLAG_PSM, &wl->flags) &&
+ !test_bit(WL1271_FLAG_IDLE, &wl->flags)))
goto out;
wl1271_debug(DEBUG_PSM, "chip to elp");
@@ -57,7 +57,8 @@ out:
/* Routines to toggle sleep mode while in ELP */
void wl1271_ps_elp_sleep(struct wl1271 *wl)
{
- if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+ if (test_bit(WL1271_FLAG_PSM, &wl->flags) ||
+ test_bit(WL1271_FLAG_IDLE, &wl->flags)) {
cancel_delayed_work(&wl->elp_work);
ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
msecs_to_jiffies(ELP_ENTRY_DELAY));
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.c b/drivers/net/wireless/wl12xx/wl1271_rx.c
index c723d9c7e13..b98fb643fab 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.c
@@ -27,7 +27,6 @@
#include "wl1271_acx.h"
#include "wl1271_reg.h"
#include "wl1271_rx.h"
-#include "wl1271_spi.h"
#include "wl1271_io.h"
static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status,
@@ -44,66 +43,6 @@ static u32 wl1271_rx_get_buf_size(struct wl1271_fw_status *status,
RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV;
}
-/* The values of this table must match the wl1271_rates[] array */
-static u8 wl1271_rx_rate_to_idx[] = {
- /* MCS rates are used only with 11n */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS7 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS6 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS5 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS4 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS3 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS2 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS1 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS0 */
-
- 11, /* WL1271_RATE_54 */
- 10, /* WL1271_RATE_48 */
- 9, /* WL1271_RATE_36 */
- 8, /* WL1271_RATE_24 */
-
- /* TI-specific rate */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_22 */
-
- 7, /* WL1271_RATE_18 */
- 6, /* WL1271_RATE_12 */
- 3, /* WL1271_RATE_11 */
- 5, /* WL1271_RATE_9 */
- 4, /* WL1271_RATE_6 */
- 2, /* WL1271_RATE_5_5 */
- 1, /* WL1271_RATE_2 */
- 0 /* WL1271_RATE_1 */
-};
-
-/* The values of this table must match the wl1271_rates[] array */
-static u8 wl1271_5_ghz_rx_rate_to_idx[] = {
- /* MCS rates are used only with 11n */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS7 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS6 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS5 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS4 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS3 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS2 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS1 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS0 */
-
- 7, /* WL1271_RATE_54 */
- 6, /* WL1271_RATE_48 */
- 5, /* WL1271_RATE_36 */
- 4, /* WL1271_RATE_24 */
-
- /* TI-specific rate */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_22 */
-
- 3, /* WL1271_RATE_18 */
- 2, /* WL1271_RATE_12 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_11 */
- 1, /* WL1271_RATE_9 */
- 0, /* WL1271_RATE_6 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_5_5 */
- WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_2 */
- WL1271_RX_RATE_UNSUPPORTED /* WL1271_RATE_1 */
-};
-
static void wl1271_rx_status(struct wl1271 *wl,
struct wl1271_rx_descriptor *desc,
struct ieee80211_rx_status *status,
@@ -111,20 +50,8 @@ static void wl1271_rx_status(struct wl1271 *wl,
{
memset(status, 0, sizeof(struct ieee80211_rx_status));
- if ((desc->flags & WL1271_RX_DESC_BAND_MASK) ==
- WL1271_RX_DESC_BAND_BG) {
- status->band = IEEE80211_BAND_2GHZ;
- status->rate_idx = wl1271_rx_rate_to_idx[desc->rate];
- } else if ((desc->flags & WL1271_RX_DESC_BAND_MASK) ==
- WL1271_RX_DESC_BAND_A) {
- status->band = IEEE80211_BAND_5GHZ;
- status->rate_idx = wl1271_5_ghz_rx_rate_to_idx[desc->rate];
- } else
- wl1271_warning("unsupported band 0x%x",
- desc->flags & WL1271_RX_DESC_BAND_MASK);
-
- if (unlikely(status->rate_idx == WL1271_RX_RATE_UNSUPPORTED))
- wl1271_warning("unsupported rate");
+ status->band = wl->band;
+ status->rate_idx = wl1271_rate_to_idx(wl, desc->rate);
/*
* FIXME: Add mactime handling. For IBSS (ad-hoc) we need to get the
@@ -134,13 +61,6 @@ static void wl1271_rx_status(struct wl1271 *wl,
*/
status->signal = desc->rssi;
- /*
- * FIXME: In wl1251, the SNR should be divided by two. In wl1271 we
- * need to divide by two for now, but TI has been discussing about
- * changing it. This needs to be rechecked.
- */
- status->noise = desc->rssi - (desc->snr >> 1);
-
status->freq = ieee80211_channel_to_frequency(desc->channel);
if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) {
@@ -162,6 +82,13 @@ static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length)
u8 *buf;
u8 beacon = 0;
+ /*
+ * In PLT mode we seem to get frames and mac80211 warns about them,
+ * workaround this by not retrieving them at all.
+ */
+ if (unlikely(wl->state == WL1271_STATE_PLT))
+ return;
+
skb = __dev_alloc_skb(length, GFP_KERNEL);
if (!skb) {
wl1271_error("Couldn't allocate RX frame");
@@ -186,6 +113,8 @@ static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length)
wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s", skb, skb->len,
beacon ? "beacon" : "");
+ skb_trim(skb, skb->len - desc->pad_len);
+
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
ieee80211_rx_ni(wl->hw, skb);
}
@@ -220,6 +149,7 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
wl->rx_counter++;
drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
- wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
}
+
+ wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
}
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.h b/drivers/net/wireless/wl12xx/wl1271_rx.h
index 1ae6d1783ed..b89be4758e7 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.h
@@ -43,7 +43,6 @@
#define RX_MAX_PACKET_ID 3
#define NUM_RX_PKT_DESC_MOD_MASK 7
-#define WL1271_RX_RATE_UNSUPPORTED 0xFF
#define RX_DESC_VALID_FCS 0x0001
#define RX_DESC_MATCH_RXADDR1 0x0002
@@ -117,5 +116,6 @@ struct wl1271_rx_descriptor {
} __attribute__ ((packed));
void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status);
+u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate);
#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_sdio.c b/drivers/net/wireless/wl12xx/wl1271_sdio.c
new file mode 100644
index 00000000000..d3d6f302f70
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1271_sdio.c
@@ -0,0 +1,291 @@
+/*
+ * This file is part of wl1271
+ *
+ * Copyright (C) 2009-2010 Nokia Corporation
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/crc7.h>
+#include <linux/vmalloc.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/card.h>
+#include <plat/gpio.h>
+
+#include "wl1271.h"
+#include "wl12xx_80211.h"
+#include "wl1271_io.h"
+
+
+#define RX71_WL1271_IRQ_GPIO 42
+
+#ifndef SDIO_VENDOR_ID_TI
+#define SDIO_VENDOR_ID_TI 0x0097
+#endif
+
+#ifndef SDIO_DEVICE_ID_TI_WL1271
+#define SDIO_DEVICE_ID_TI_WL1271 0x4076
+#endif
+
+static const struct sdio_device_id wl1271_devices[] = {
+ { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) },
+ {}
+};
+MODULE_DEVICE_TABLE(sdio, wl1271_devices);
+
+static inline struct sdio_func *wl_to_func(struct wl1271 *wl)
+{
+ return wl->if_priv;
+}
+
+static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
+{
+ return &(wl_to_func(wl)->dev);
+}
+
+static irqreturn_t wl1271_irq(int irq, void *cookie)
+{
+ struct wl1271 *wl = cookie;
+ unsigned long flags;
+
+ wl1271_debug(DEBUG_IRQ, "IRQ");
+
+ /* complete the ELP completion */
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ if (wl->elp_compl) {
+ complete(wl->elp_compl);
+ wl->elp_compl = NULL;
+ }
+
+ if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
+ ieee80211_queue_work(wl->hw, &wl->irq_work);
+ set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
+{
+ disable_irq(wl->irq);
+}
+
+static void wl1271_sdio_enable_interrupts(struct wl1271 *wl)
+{
+ enable_irq(wl->irq);
+}
+
+static void wl1271_sdio_reset(struct wl1271 *wl)
+{
+}
+
+static void wl1271_sdio_init(struct wl1271 *wl)
+{
+}
+
+static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed)
+{
+ int ret;
+ struct sdio_func *func = wl_to_func(wl);
+
+ if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
+ ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
+ wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
+ addr, ((u8 *)buf)[0]);
+ } else {
+ if (fixed)
+ ret = sdio_readsb(func, buf, addr, len);
+ else
+ ret = sdio_memcpy_fromio(func, buf, addr, len);
+
+ wl1271_debug(DEBUG_SDIO, "sdio read 53 addr 0x%x, %zu bytes",
+ addr, len);
+ wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
+ }
+
+ if (ret)
+ wl1271_error("sdio read failed (%d)", ret);
+
+}
+
+static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed)
+{
+ int ret;
+ struct sdio_func *func = wl_to_func(wl);
+
+ if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
+ sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
+ wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
+ addr, ((u8 *)buf)[0]);
+ } else {
+ wl1271_debug(DEBUG_SDIO, "sdio write 53 addr 0x%x, %zu bytes",
+ addr, len);
+ wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
+
+ if (fixed)
+ ret = sdio_writesb(func, addr, buf, len);
+ else
+ ret = sdio_memcpy_toio(func, addr, buf, len);
+ }
+ if (ret)
+ wl1271_error("sdio write failed (%d)", ret);
+
+}
+
+static void wl1271_sdio_set_power(struct wl1271 *wl, bool enable)
+{
+ struct sdio_func *func = wl_to_func(wl);
+
+ /* Let the SDIO stack handle wlan_enable control, so we
+ * keep host claimed while wlan is in use to keep wl1271
+ * alive.
+ */
+ if (enable) {
+ sdio_claim_host(func);
+ sdio_enable_func(func);
+ } else {
+ sdio_disable_func(func);
+ sdio_release_host(func);
+ }
+}
+
+static struct wl1271_if_operations sdio_ops = {
+ .read = wl1271_sdio_raw_read,
+ .write = wl1271_sdio_raw_write,
+ .reset = wl1271_sdio_reset,
+ .init = wl1271_sdio_init,
+ .power = wl1271_sdio_set_power,
+ .dev = wl1271_sdio_wl_to_dev,
+ .enable_irq = wl1271_sdio_enable_interrupts,
+ .disable_irq = wl1271_sdio_disable_interrupts
+};
+
+static int __devinit wl1271_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ struct ieee80211_hw *hw;
+ struct wl1271 *wl;
+ int ret;
+
+ /* We are only able to handle the wlan function */
+ if (func->num != 0x02)
+ return -ENODEV;
+
+ hw = wl1271_alloc_hw();
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ wl = hw->priv;
+
+ wl->if_priv = func;
+ wl->if_ops = &sdio_ops;
+
+ /* Grab access to FN0 for ELP reg. */
+ func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
+
+ wl->irq = gpio_to_irq(RX71_WL1271_IRQ_GPIO);
+ if (wl->irq < 0) {
+ ret = wl->irq;
+ wl1271_error("could not get irq!");
+ goto out_free;
+ }
+
+ ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
+ if (ret < 0) {
+ wl1271_error("request_irq() failed: %d", ret);
+ goto out_free;
+ }
+
+ set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
+
+ disable_irq(wl->irq);
+
+ ret = wl1271_init_ieee80211(wl);
+ if (ret)
+ goto out_irq;
+
+ ret = wl1271_register_hw(wl);
+ if (ret)
+ goto out_irq;
+
+ sdio_set_drvdata(func, wl);
+
+ wl1271_notice("initialized");
+
+ return 0;
+
+ out_irq:
+ free_irq(wl->irq, wl);
+
+
+ out_free:
+ wl1271_free_hw(wl);
+
+ return ret;
+}
+
+static void __devexit wl1271_remove(struct sdio_func *func)
+{
+ struct wl1271 *wl = sdio_get_drvdata(func);
+
+ free_irq(wl->irq, wl);
+
+ wl1271_unregister_hw(wl);
+ wl1271_free_hw(wl);
+}
+
+static struct sdio_driver wl1271_sdio_driver = {
+ .name = "wl1271_sdio",
+ .id_table = wl1271_devices,
+ .probe = wl1271_probe,
+ .remove = __devexit_p(wl1271_remove),
+};
+
+static int __init wl1271_init(void)
+{
+ int ret;
+
+ ret = sdio_register_driver(&wl1271_sdio_driver);
+ if (ret < 0) {
+ wl1271_error("failed to register sdio driver: %d", ret);
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static void __exit wl1271_exit(void)
+{
+ sdio_unregister_driver(&wl1271_sdio_driver);
+
+ wl1271_notice("unloaded");
+}
+
+module_init(wl1271_init);
+module_exit(wl1271_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
+MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
+MODULE_FIRMWARE(WL1271_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/wl1271_spi.c
index 053c84aceb4..5189b812f93 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.c
@@ -21,18 +21,69 @@
*
*/
+#include <linux/irq.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/crc7.h>
#include <linux/spi/spi.h>
+#include <linux/spi/wl12xx.h>
#include <linux/slab.h>
#include "wl1271.h"
#include "wl12xx_80211.h"
-#include "wl1271_spi.h"
+#include "wl1271_io.h"
+
+#include "wl1271_reg.h"
+
+#define WSPI_CMD_READ 0x40000000
+#define WSPI_CMD_WRITE 0x00000000
+#define WSPI_CMD_FIXED 0x20000000
+#define WSPI_CMD_BYTE_LENGTH 0x1FFE0000
+#define WSPI_CMD_BYTE_LENGTH_OFFSET 17
+#define WSPI_CMD_BYTE_ADDR 0x0001FFFF
+
+#define WSPI_INIT_CMD_CRC_LEN 5
+
+#define WSPI_INIT_CMD_START 0x00
+#define WSPI_INIT_CMD_TX 0x40
+/* the extra bypass bit is sampled by the TNET as '1' */
+#define WSPI_INIT_CMD_BYPASS_BIT 0x80
+#define WSPI_INIT_CMD_FIXEDBUSY_LEN 0x07
+#define WSPI_INIT_CMD_EN_FIXEDBUSY 0x80
+#define WSPI_INIT_CMD_DIS_FIXEDBUSY 0x00
+#define WSPI_INIT_CMD_IOD 0x40
+#define WSPI_INIT_CMD_IP 0x20
+#define WSPI_INIT_CMD_CS 0x10
+#define WSPI_INIT_CMD_WS 0x08
+#define WSPI_INIT_CMD_WSPI 0x01
+#define WSPI_INIT_CMD_END 0x01
+
+#define WSPI_INIT_CMD_LEN 8
+
+#define HW_ACCESS_WSPI_FIXED_BUSY_LEN \
+ ((WL1271_BUSY_WORD_LEN - 4) / sizeof(u32))
+#define HW_ACCESS_WSPI_INIT_CMD_MASK 0
+
+static inline struct spi_device *wl_to_spi(struct wl1271 *wl)
+{
+ return wl->if_priv;
+}
+static struct device *wl1271_spi_wl_to_dev(struct wl1271 *wl)
+{
+ return &(wl_to_spi(wl)->dev);
+}
-void wl1271_spi_reset(struct wl1271 *wl)
+static void wl1271_spi_disable_interrupts(struct wl1271 *wl)
+{
+ disable_irq(wl->irq);
+}
+
+static void wl1271_spi_enable_interrupts(struct wl1271 *wl)
+{
+ enable_irq(wl->irq);
+}
+
+static void wl1271_spi_reset(struct wl1271 *wl)
{
u8 *cmd;
struct spi_transfer t;
@@ -53,12 +104,13 @@ void wl1271_spi_reset(struct wl1271 *wl)
t.len = WSPI_INIT_CMD_LEN;
spi_message_add_tail(&t, &m);
- spi_sync(wl->spi, &m);
+ spi_sync(wl_to_spi(wl), &m);
+ kfree(cmd);
wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
}
-void wl1271_spi_init(struct wl1271 *wl)
+static void wl1271_spi_init(struct wl1271 *wl)
{
u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd;
struct spi_transfer t;
@@ -107,48 +159,25 @@ void wl1271_spi_init(struct wl1271 *wl)
t.len = WSPI_INIT_CMD_LEN;
spi_message_add_tail(&t, &m);
- spi_sync(wl->spi, &m);
+ spi_sync(wl_to_spi(wl), &m);
+ kfree(cmd);
wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
}
#define WL1271_BUSY_WORD_TIMEOUT 1000
-/* FIXME: Check busy words, removed due to SPI bug */
-#if 0
-static void wl1271_spi_read_busy(struct wl1271 *wl, void *buf, size_t len)
+static int wl1271_spi_read_busy(struct wl1271 *wl)
{
struct spi_transfer t[1];
struct spi_message m;
u32 *busy_buf;
int num_busy_bytes = 0;
- wl1271_info("spi read BUSY!");
-
- /*
- * Look for the non-busy word in the read buffer, and if found,
- * read in the remaining data into the buffer.
- */
- busy_buf = (u32 *)buf;
- for (; (u32)busy_buf < (u32)buf + len; busy_buf++) {
- num_busy_bytes += sizeof(u32);
- if (*busy_buf & 0x1) {
- spi_message_init(&m);
- memset(t, 0, sizeof(t));
- memmove(buf, busy_buf, len - num_busy_bytes);
- t[0].rx_buf = buf + (len - num_busy_bytes);
- t[0].len = num_busy_bytes;
- spi_message_add_tail(&t[0], &m);
- spi_sync(wl->spi, &m);
- return;
- }
- }
-
/*
* Read further busy words from SPI until a non-busy word is
* encountered, then read the data itself into the buffer.
*/
- wl1271_info("spi read BUSY-polling needed!");
num_busy_bytes = WL1271_BUSY_WORD_TIMEOUT;
busy_buf = wl->buffer_busyword;
@@ -158,28 +187,21 @@ static void wl1271_spi_read_busy(struct wl1271 *wl, void *buf, size_t len)
memset(t, 0, sizeof(t));
t[0].rx_buf = busy_buf;
t[0].len = sizeof(u32);
+ t[0].cs_change = true;
spi_message_add_tail(&t[0], &m);
- spi_sync(wl->spi, &m);
-
- if (*busy_buf & 0x1) {
- spi_message_init(&m);
- memset(t, 0, sizeof(t));
- t[0].rx_buf = buf;
- t[0].len = len;
- spi_message_add_tail(&t[0], &m);
- spi_sync(wl->spi, &m);
- return;
- }
+ spi_sync(wl_to_spi(wl), &m);
+
+ if (*busy_buf & 0x1)
+ return 0;
}
/* The SPI bus is unresponsive, the read failed. */
- memset(buf, 0, len);
wl1271_error("SPI read busy-word timeout!\n");
+ return -ETIMEDOUT;
}
-#endif
-void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
+static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed)
{
struct spi_transfer t[3];
struct spi_message m;
@@ -202,28 +224,38 @@ void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
t[0].tx_buf = cmd;
t[0].len = 4;
+ t[0].cs_change = true;
spi_message_add_tail(&t[0], &m);
/* Busy and non busy words read */
t[1].rx_buf = busy_buf;
t[1].len = WL1271_BUSY_WORD_LEN;
+ t[1].cs_change = true;
spi_message_add_tail(&t[1], &m);
- t[2].rx_buf = buf;
- t[2].len = len;
- spi_message_add_tail(&t[2], &m);
+ spi_sync(wl_to_spi(wl), &m);
- spi_sync(wl->spi, &m);
+ if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) &&
+ wl1271_spi_read_busy(wl)) {
+ memset(buf, 0, len);
+ return;
+ }
- /* FIXME: Check busy words, removed due to SPI bug */
- /* if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1))
- wl1271_spi_read_busy(wl, buf, len); */
+ spi_message_init(&m);
+ memset(t, 0, sizeof(t));
+
+ t[0].rx_buf = buf;
+ t[0].len = len;
+ t[0].cs_change = true;
+ spi_message_add_tail(&t[0], &m);
+
+ spi_sync(wl_to_spi(wl), &m);
wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd));
wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, len);
}
-void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
+static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
size_t len, bool fixed)
{
struct spi_transfer t[2];
@@ -251,8 +283,181 @@ void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
t[1].len = len;
spi_message_add_tail(&t[1], &m);
- spi_sync(wl->spi, &m);
+ spi_sync(wl_to_spi(wl), &m);
wl1271_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd));
wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, len);
}
+
+static irqreturn_t wl1271_irq(int irq, void *cookie)
+{
+ struct wl1271 *wl;
+ unsigned long flags;
+
+ wl1271_debug(DEBUG_IRQ, "IRQ");
+
+ wl = cookie;
+
+ /* complete the ELP completion */
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ if (wl->elp_compl) {
+ complete(wl->elp_compl);
+ wl->elp_compl = NULL;
+ }
+
+ if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
+ ieee80211_queue_work(wl->hw, &wl->irq_work);
+ set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static void wl1271_spi_set_power(struct wl1271 *wl, bool enable)
+{
+ if (wl->set_power)
+ wl->set_power(enable);
+}
+
+static struct wl1271_if_operations spi_ops = {
+ .read = wl1271_spi_raw_read,
+ .write = wl1271_spi_raw_write,
+ .reset = wl1271_spi_reset,
+ .init = wl1271_spi_init,
+ .power = wl1271_spi_set_power,
+ .dev = wl1271_spi_wl_to_dev,
+ .enable_irq = wl1271_spi_enable_interrupts,
+ .disable_irq = wl1271_spi_disable_interrupts
+};
+
+static int __devinit wl1271_probe(struct spi_device *spi)
+{
+ struct wl12xx_platform_data *pdata;
+ struct ieee80211_hw *hw;
+ struct wl1271 *wl;
+ int ret;
+
+ pdata = spi->dev.platform_data;
+ if (!pdata) {
+ wl1271_error("no platform data");
+ return -ENODEV;
+ }
+
+ hw = wl1271_alloc_hw();
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ wl = hw->priv;
+
+ dev_set_drvdata(&spi->dev, wl);
+ wl->if_priv = spi;
+
+ wl->if_ops = &spi_ops;
+
+ /* This is the only SPI value that we need to set here, the rest
+ * comes from the board-peripherals file */
+ spi->bits_per_word = 32;
+
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ wl1271_error("spi_setup failed");
+ goto out_free;
+ }
+
+ wl->set_power = pdata->set_power;
+ if (!wl->set_power) {
+ wl1271_error("set power function missing in platform data");
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ wl->irq = spi->irq;
+ if (wl->irq < 0) {
+ wl1271_error("irq missing in platform data");
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
+ if (ret < 0) {
+ wl1271_error("request_irq() failed: %d", ret);
+ goto out_free;
+ }
+
+ set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
+
+ disable_irq(wl->irq);
+
+ ret = wl1271_init_ieee80211(wl);
+ if (ret)
+ goto out_irq;
+
+ ret = wl1271_register_hw(wl);
+ if (ret)
+ goto out_irq;
+
+ wl1271_notice("initialized");
+
+ return 0;
+
+ out_irq:
+ free_irq(wl->irq, wl);
+
+ out_free:
+ wl1271_free_hw(wl);
+
+ return ret;
+}
+
+static int __devexit wl1271_remove(struct spi_device *spi)
+{
+ struct wl1271 *wl = dev_get_drvdata(&spi->dev);
+
+ free_irq(wl->irq, wl);
+
+ wl1271_unregister_hw(wl);
+ wl1271_free_hw(wl);
+
+ return 0;
+}
+
+
+static struct spi_driver wl1271_spi_driver = {
+ .driver = {
+ .name = "wl1271_spi",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+
+ .probe = wl1271_probe,
+ .remove = __devexit_p(wl1271_remove),
+};
+
+static int __init wl1271_init(void)
+{
+ int ret;
+
+ ret = spi_register_driver(&wl1271_spi_driver);
+ if (ret < 0) {
+ wl1271_error("failed to register spi driver: %d", ret);
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static void __exit wl1271_exit(void)
+{
+ spi_unregister_driver(&wl1271_spi_driver);
+
+ wl1271_notice("unloaded");
+}
+
+module_init(wl1271_init);
+module_exit(wl1271_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
+MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
+MODULE_FIRMWARE(WL1271_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.h b/drivers/net/wireless/wl12xx/wl1271_spi.h
deleted file mode 100644
index a803596dad4..00000000000
--- a/drivers/net/wireless/wl12xx/wl1271_spi.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * This file is part of wl1271
- *
- * Copyright (C) 1998-2009 Texas Instruments. All rights reserved.
- * Copyright (C) 2008-2009 Nokia Corporation
- *
- * Contact: Luciano Coelho <luciano.coelho@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#ifndef __WL1271_SPI_H__
-#define __WL1271_SPI_H__
-
-#include "wl1271_reg.h"
-
-#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0
-
-#define HW_PARTITION_REGISTERS_ADDR 0x1ffc0
-#define HW_PART0_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR)
-#define HW_PART0_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 4)
-#define HW_PART1_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 8)
-#define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12)
-#define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16)
-#define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20)
-#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 24)
-
-#define HW_ACCESS_REGISTER_SIZE 4
-
-#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000
-
-#define WSPI_CMD_READ 0x40000000
-#define WSPI_CMD_WRITE 0x00000000
-#define WSPI_CMD_FIXED 0x20000000
-#define WSPI_CMD_BYTE_LENGTH 0x1FFE0000
-#define WSPI_CMD_BYTE_LENGTH_OFFSET 17
-#define WSPI_CMD_BYTE_ADDR 0x0001FFFF
-
-#define WSPI_INIT_CMD_CRC_LEN 5
-
-#define WSPI_INIT_CMD_START 0x00
-#define WSPI_INIT_CMD_TX 0x40
-/* the extra bypass bit is sampled by the TNET as '1' */
-#define WSPI_INIT_CMD_BYPASS_BIT 0x80
-#define WSPI_INIT_CMD_FIXEDBUSY_LEN 0x07
-#define WSPI_INIT_CMD_EN_FIXEDBUSY 0x80
-#define WSPI_INIT_CMD_DIS_FIXEDBUSY 0x00
-#define WSPI_INIT_CMD_IOD 0x40
-#define WSPI_INIT_CMD_IP 0x20
-#define WSPI_INIT_CMD_CS 0x10
-#define WSPI_INIT_CMD_WS 0x08
-#define WSPI_INIT_CMD_WSPI 0x01
-#define WSPI_INIT_CMD_END 0x01
-
-#define WSPI_INIT_CMD_LEN 8
-
-#define HW_ACCESS_WSPI_FIXED_BUSY_LEN \
- ((WL1271_BUSY_WORD_LEN - 4) / sizeof(u32))
-#define HW_ACCESS_WSPI_INIT_CMD_MASK 0
-
-#define OCP_CMD_LOOP 32
-
-#define OCP_CMD_WRITE 0x1
-#define OCP_CMD_READ 0x2
-
-#define OCP_READY_MASK BIT(18)
-#define OCP_STATUS_MASK (BIT(16) | BIT(17))
-
-#define OCP_STATUS_NO_RESP 0x00000
-#define OCP_STATUS_OK 0x10000
-#define OCP_STATUS_REQ_FAILED 0x20000
-#define OCP_STATUS_RESP_ERROR 0x30000
-
-/* Raw target IO, address is not translated */
-void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed);
-void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed);
-
-/* INIT and RESET words */
-void wl1271_spi_reset(struct wl1271 *wl);
-void wl1271_spi_init(struct wl1271 *wl);
-#endif /* __WL1271_SPI_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_testmode.c b/drivers/net/wireless/wl12xx/wl1271_testmode.c
index 5c1c4f565fd..554deb4d024 100644
--- a/drivers/net/wireless/wl12xx/wl1271_testmode.c
+++ b/drivers/net/wireless/wl12xx/wl1271_testmode.c
@@ -26,7 +26,6 @@
#include <net/genetlink.h>
#include "wl1271.h"
-#include "wl1271_spi.h"
#include "wl1271_acx.h"
#define WL1271_TM_MAX_DATA_LENGTH 1024
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.c b/drivers/net/wireless/wl12xx/wl1271_tx.c
index 811e739d05b..62db79508dd 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.c
@@ -25,7 +25,6 @@
#include <linux/module.h>
#include "wl1271.h"
-#include "wl1271_spi.h"
#include "wl1271_io.h"
#include "wl1271_reg.h"
#include "wl1271_ps.h"
@@ -47,7 +46,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra)
{
struct wl1271_tx_hw_descr *desc;
u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
- u32 total_blocks, excluded;
+ u32 total_blocks;
int id, ret = -EBUSY;
/* allocate free identifier for the packet */
@@ -57,12 +56,8 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra)
/* approximate the number of blocks required for this packet
in the firmware */
- /* FIXME: try to figure out what is done here and make it cleaner */
- total_blocks = (total_len + 20) >> TX_HW_BLOCK_SHIFT_DIV;
- excluded = (total_blocks << 2) + ((total_len + 20) & 0xff) + 34;
- total_blocks += (excluded > 252) ? 2 : 1;
- total_blocks += TX_HW_BLOCK_SPARE;
-
+ total_blocks = total_len + TX_HW_BLOCK_SIZE - 1;
+ total_blocks = total_blocks / TX_HW_BLOCK_SIZE + TX_HW_BLOCK_SPARE;
if (total_blocks <= wl->tx_blocks_available) {
desc = (struct wl1271_tx_hw_descr *)skb_push(
skb, total_len - skb->len);
@@ -87,8 +82,10 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra)
static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
u32 extra, struct ieee80211_tx_info *control)
{
+ struct timespec ts;
struct wl1271_tx_hw_descr *desc;
int pad, ac;
+ s64 hosttime;
u16 tx_attr;
desc = (struct wl1271_tx_hw_descr *) skb->data;
@@ -102,8 +99,9 @@ static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
}
/* configure packet life time */
- desc->start_time = cpu_to_le32(jiffies_to_usecs(jiffies) -
- wl->time_offset);
+ getnstimeofday(&ts);
+ hosttime = (timespec_to_ns(&ts) >> 10);
+ desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
/* configure the tx attributes */
@@ -170,7 +168,6 @@ static int wl1271_tx_send_packet(struct wl1271 *wl, struct sk_buff *skb,
/* write packet new counter into the write access register */
wl->tx_packets_count++;
- wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
desc = (struct wl1271_tx_hw_descr *) skb->data;
wl1271_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u (%u words)",
@@ -223,7 +220,7 @@ static int wl1271_tx_frame(struct wl1271 *wl, struct sk_buff *skb)
return ret;
}
-static u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
+u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
{
struct ieee80211_supported_band *band;
u32 enabled_rates = 0;
@@ -245,6 +242,7 @@ void wl1271_tx_work(struct work_struct *work)
struct sk_buff *skb;
bool woken_up = false;
u32 sta_rates = 0;
+ u32 prev_tx_packets_count;
int ret;
/* check if the rates supported by the AP have changed */
@@ -261,6 +259,8 @@ void wl1271_tx_work(struct work_struct *work)
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
+ prev_tx_packets_count = wl->tx_packets_count;
+
/* if rates have changed, re-configure the rate policy */
if (unlikely(sta_rates)) {
wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates);
@@ -271,31 +271,26 @@ void wl1271_tx_work(struct work_struct *work)
if (!woken_up) {
ret = wl1271_ps_elp_wakeup(wl, false);
if (ret < 0)
- goto out;
+ goto out_ack;
woken_up = true;
}
ret = wl1271_tx_frame(wl, skb);
if (ret == -EBUSY) {
- /* firmware buffer is full, stop queues */
- wl1271_debug(DEBUG_TX, "tx_work: fw buffer full, "
- "stop queues");
- ieee80211_stop_queues(wl->hw);
- set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
+ /* firmware buffer is full, lets stop transmitting. */
skb_queue_head(&wl->tx_queue, skb);
- goto out;
+ goto out_ack;
} else if (ret < 0) {
dev_kfree_skb(skb);
- goto out;
- } else if (test_and_clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED,
- &wl->flags)) {
- /* firmware buffer has space, restart queues */
- wl1271_debug(DEBUG_TX,
- "complete_packet: waking queues");
- ieee80211_wake_queues(wl->hw);
+ goto out_ack;
}
}
+out_ack:
+ /* interrupt the firmware with the new packets */
+ if (prev_tx_packets_count != wl->tx_packets_count)
+ wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
+
out:
if (woken_up)
wl1271_ps_elp_sleep(wl);
@@ -308,11 +303,12 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
{
struct ieee80211_tx_info *info;
struct sk_buff *skb;
- u16 seq;
int id = result->id;
+ int rate = -1;
+ u8 retries = 0;
/* check for id legality */
- if (id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL) {
+ if (unlikely(id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL)) {
wl1271_warning("TX result illegal id: %d", id);
return;
}
@@ -320,31 +316,29 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
skb = wl->tx_frames[id];
info = IEEE80211_SKB_CB(skb);
- /* update packet status */
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
- if (result->status == TX_SUCCESS)
+ /* update the TX status info */
+ if (result->status == TX_SUCCESS) {
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_ACK;
- if (result->status & TX_RETRY_EXCEEDED) {
- /* FIXME */
- /* info->status.excessive_retries = 1; */
- wl->stats.excessive_retries++;
- }
+ rate = wl1271_rate_to_idx(wl, result->rate_class_index);
+ retries = result->ack_failures;
+ } else if (result->status == TX_RETRY_EXCEEDED) {
+ wl->stats.excessive_retries++;
+ retries = result->ack_failures;
}
- /* FIXME */
- /* info->status.retry_count = result->ack_failures; */
+ info->status.rates[0].idx = rate;
+ info->status.rates[0].count = retries;
+ info->status.rates[0].flags = 0;
+ info->status.ack_signal = -1;
+
wl->stats.retry_count += result->ack_failures;
/* update security sequence number */
- seq = wl->tx_security_seq_16 +
- (result->lsb_security_sequence_number -
- wl->tx_security_last_seq);
+ wl->tx_security_seq += (result->lsb_security_sequence_number -
+ wl->tx_security_last_seq);
wl->tx_security_last_seq = result->lsb_security_sequence_number;
- if (seq < wl->tx_security_seq_16)
- wl->tx_security_seq_32++;
- wl->tx_security_seq_16 = seq;
-
/* remove private header from packet */
skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
@@ -367,23 +361,29 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
}
/* Called upon reception of a TX complete interrupt */
-void wl1271_tx_complete(struct wl1271 *wl, u32 count)
+void wl1271_tx_complete(struct wl1271 *wl)
{
struct wl1271_acx_mem_map *memmap =
(struct wl1271_acx_mem_map *)wl->target_mem_map;
+ u32 count, fw_counter;
u32 i;
- wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
-
/* read the tx results from the chipset */
wl1271_read(wl, le32_to_cpu(memmap->tx_result),
wl->tx_res_if, sizeof(*wl->tx_res_if), false);
+ fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
+
+ /* write host counter to chipset (to ack) */
+ wl1271_write32(wl, le32_to_cpu(memmap->tx_result) +
+ offsetof(struct wl1271_tx_hw_res_if,
+ tx_result_host_counter), fw_counter);
+
+ count = fw_counter - wl->tx_results_count;
+ wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
/* verify that the result buffer is not getting overrun */
- if (count > TX_HW_RESULT_QUEUE_LEN) {
+ if (unlikely(count > TX_HW_RESULT_QUEUE_LEN))
wl1271_warning("TX result overflow from chipset: %d", count);
- count = TX_HW_RESULT_QUEUE_LEN;
- }
/* process the results */
for (i = 0; i < count; i++) {
@@ -397,11 +397,18 @@ void wl1271_tx_complete(struct wl1271 *wl, u32 count)
wl->tx_results_count++;
}
- /* write host counter to chipset (to ack) */
- wl1271_write32(wl, le32_to_cpu(memmap->tx_result) +
- offsetof(struct wl1271_tx_hw_res_if,
- tx_result_host_counter),
- le32_to_cpu(wl->tx_res_if->tx_result_fw_counter));
+ if (test_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags) &&
+ skb_queue_len(&wl->tx_queue) <= WL1271_TX_QUEUE_LOW_WATERMARK) {
+ unsigned long flags;
+
+ /* firmware buffer has space, restart queues */
+ wl1271_debug(DEBUG_TX, "tx_complete: waking queues");
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ ieee80211_wake_queues(wl->hw);
+ clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ ieee80211_queue_work(wl->hw, &wl->tx_work);
+ }
}
/* caller must hold wl->mutex */
@@ -409,31 +416,19 @@ void wl1271_tx_flush(struct wl1271 *wl)
{
int i;
struct sk_buff *skb;
- struct ieee80211_tx_info *info;
/* TX failure */
/* control->flags = 0; FIXME */
while ((skb = skb_dequeue(&wl->tx_queue))) {
- info = IEEE80211_SKB_CB(skb);
-
wl1271_debug(DEBUG_TX, "flushing skb 0x%p", skb);
-
- if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS))
- continue;
-
ieee80211_tx_status(wl->hw, skb);
}
for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
if (wl->tx_frames[i] != NULL) {
skb = wl->tx_frames[i];
- info = IEEE80211_SKB_CB(skb);
-
- if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS))
- continue;
-
- ieee80211_tx_status(wl->hw, skb);
wl->tx_frames[i] = NULL;
+ ieee80211_tx_status(wl->hw, skb);
}
}
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.h b/drivers/net/wireless/wl12xx/wl1271_tx.h
index 17e405a09ca..3b8b7ac253f 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.h
@@ -26,7 +26,7 @@
#define __WL1271_TX_H__
#define TX_HW_BLOCK_SPARE 2
-#define TX_HW_BLOCK_SHIFT_DIV 8
+#define TX_HW_BLOCK_SIZE 252
#define TX_HW_MGMT_PKT_LIFETIME_TU 2000
/* The chipset reference driver states, that the "aid" value 1
@@ -125,9 +125,6 @@ struct wl1271_tx_hw_res_if {
static inline int wl1271_tx_get_queue(int queue)
{
- /* FIXME: use best effort until WMM is enabled */
- return CONF_TX_AC_BE;
-
switch (queue) {
case 0:
return CONF_TX_AC_VO;
@@ -160,7 +157,9 @@ static inline int wl1271_tx_ac_to_tid(int ac)
}
void wl1271_tx_work(struct work_struct *work);
-void wl1271_tx_complete(struct wl1271 *wl, u32 count);
+void wl1271_tx_complete(struct wl1271 *wl);
void wl1271_tx_flush(struct wl1271 *wl);
+u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate);
+u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set);
#endif
diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
index 8bce1a550a2..8816e371fd0 100644
--- a/drivers/net/wireless/wl3501.h
+++ b/drivers/net/wireless/wl3501.h
@@ -610,7 +610,6 @@ struct wl3501_card {
struct iw_statistics wstats;
struct iw_spy_data spy_data;
struct iw_public_data wireless_data;
- struct dev_node_t node;
struct pcmcia_device *p_dev;
};
#endif
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 7b9621de239..376c6b964a9 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1307,7 +1307,7 @@ static void wl3501_tx_timeout(struct net_device *dev)
printk(KERN_ERR "%s: Error %d resetting card on Tx timeout!\n",
dev->name, rc);
else {
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
}
@@ -1326,7 +1326,6 @@ static netdev_tx_t wl3501_hard_start_xmit(struct sk_buff *skb,
spin_lock_irqsave(&this->lock, flags);
enabled = wl3501_block_interrupt(this);
- dev->trans_start = jiffies;
rc = wl3501_send_pkt(this, skb->data, skb->len);
if (enabled)
wl3501_unblock_interrupt(this);
@@ -1451,10 +1450,10 @@ static void wl3501_detach(struct pcmcia_device *link)
netif_device_detach(dev);
wl3501_release(link);
+ unregister_netdev(dev);
+
if (link->priv)
free_netdev(link->priv);
-
- return;
}
static int wl3501_get_name(struct net_device *dev, struct iw_request_info *info,
@@ -1834,32 +1833,32 @@ out:
}
static const iw_handler wl3501_handler[] = {
- [SIOCGIWNAME - SIOCIWFIRST] = wl3501_get_name,
- [SIOCSIWFREQ - SIOCIWFIRST] = wl3501_set_freq,
- [SIOCGIWFREQ - SIOCIWFIRST] = wl3501_get_freq,
- [SIOCSIWMODE - SIOCIWFIRST] = wl3501_set_mode,
- [SIOCGIWMODE - SIOCIWFIRST] = wl3501_get_mode,
- [SIOCGIWSENS - SIOCIWFIRST] = wl3501_get_sens,
- [SIOCGIWRANGE - SIOCIWFIRST] = wl3501_get_range,
- [SIOCSIWSPY - SIOCIWFIRST] = iw_handler_set_spy,
- [SIOCGIWSPY - SIOCIWFIRST] = iw_handler_get_spy,
- [SIOCSIWTHRSPY - SIOCIWFIRST] = iw_handler_set_thrspy,
- [SIOCGIWTHRSPY - SIOCIWFIRST] = iw_handler_get_thrspy,
- [SIOCSIWAP - SIOCIWFIRST] = wl3501_set_wap,
- [SIOCGIWAP - SIOCIWFIRST] = wl3501_get_wap,
- [SIOCSIWSCAN - SIOCIWFIRST] = wl3501_set_scan,
- [SIOCGIWSCAN - SIOCIWFIRST] = wl3501_get_scan,
- [SIOCSIWESSID - SIOCIWFIRST] = wl3501_set_essid,
- [SIOCGIWESSID - SIOCIWFIRST] = wl3501_get_essid,
- [SIOCSIWNICKN - SIOCIWFIRST] = wl3501_set_nick,
- [SIOCGIWNICKN - SIOCIWFIRST] = wl3501_get_nick,
- [SIOCGIWRATE - SIOCIWFIRST] = wl3501_get_rate,
- [SIOCGIWRTS - SIOCIWFIRST] = wl3501_get_rts_threshold,
- [SIOCGIWFRAG - SIOCIWFIRST] = wl3501_get_frag_threshold,
- [SIOCGIWTXPOW - SIOCIWFIRST] = wl3501_get_txpow,
- [SIOCGIWRETRY - SIOCIWFIRST] = wl3501_get_retry,
- [SIOCGIWENCODE - SIOCIWFIRST] = wl3501_get_encode,
- [SIOCGIWPOWER - SIOCIWFIRST] = wl3501_get_power,
+ IW_HANDLER(SIOCGIWNAME, wl3501_get_name),
+ IW_HANDLER(SIOCSIWFREQ, wl3501_set_freq),
+ IW_HANDLER(SIOCGIWFREQ, wl3501_get_freq),
+ IW_HANDLER(SIOCSIWMODE, wl3501_set_mode),
+ IW_HANDLER(SIOCGIWMODE, wl3501_get_mode),
+ IW_HANDLER(SIOCGIWSENS, wl3501_get_sens),
+ IW_HANDLER(SIOCGIWRANGE, wl3501_get_range),
+ IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
+ IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
+ IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
+ IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
+ IW_HANDLER(SIOCSIWAP, wl3501_set_wap),
+ IW_HANDLER(SIOCGIWAP, wl3501_get_wap),
+ IW_HANDLER(SIOCSIWSCAN, wl3501_set_scan),
+ IW_HANDLER(SIOCGIWSCAN, wl3501_get_scan),
+ IW_HANDLER(SIOCSIWESSID, wl3501_set_essid),
+ IW_HANDLER(SIOCGIWESSID, wl3501_get_essid),
+ IW_HANDLER(SIOCSIWNICKN, wl3501_set_nick),
+ IW_HANDLER(SIOCGIWNICKN, wl3501_get_nick),
+ IW_HANDLER(SIOCGIWRATE, wl3501_get_rate),
+ IW_HANDLER(SIOCGIWRTS, wl3501_get_rts_threshold),
+ IW_HANDLER(SIOCGIWFRAG, wl3501_get_frag_threshold),
+ IW_HANDLER(SIOCGIWTXPOW, wl3501_get_txpow),
+ IW_HANDLER(SIOCGIWRETRY, wl3501_get_retry),
+ IW_HANDLER(SIOCGIWENCODE, wl3501_get_encode),
+ IW_HANDLER(SIOCGIWPOWER, wl3501_get_power),
};
static const struct iw_handler_def wl3501_handler_def = {
@@ -1897,10 +1896,6 @@ static int wl3501_probe(struct pcmcia_device *p_dev)
p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
p_dev->io.IOAddrLines = 5;
- /* Interrupt setup */
- p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- p_dev->irq.Handler = wl3501_interrupt;
-
/* General socket configuration */
p_dev->conf.Attributes = CONF_ENABLE_IRQ;
p_dev->conf.IntType = INT_MEMORY_AND_IO;
@@ -1961,7 +1956,7 @@ static int wl3501_config(struct pcmcia_device *link)
/* Now allocate an interrupt line. Note that this does not actually
* assign a handler to the interrupt. */
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_irq(link, wl3501_interrupt);
if (ret)
goto failed;
@@ -1972,7 +1967,7 @@ static int wl3501_config(struct pcmcia_device *link)
if (ret)
goto failed;
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev)) {
@@ -1981,20 +1976,15 @@ static int wl3501_config(struct pcmcia_device *link)
}
this = netdev_priv(dev);
- /*
- * At this point, the dev_node_t structure(s) should be initialized and
- * arranged in a linked list at link->dev_node.
- */
- link->dev_node = &this->node;
this->base_addr = dev->base_addr;
if (!wl3501_get_flash_mac_addr(this)) {
printk(KERN_WARNING "%s: Cant read MAC addr in flash ROM?\n",
dev->name);
+ unregister_netdev(dev);
goto failed;
}
- strcpy(this->node.dev_name, dev->name);
for (i = 0; i < 6; i++)
dev->dev_addr[i] = ((char *)&this->mac_addr)[i];
@@ -2038,12 +2028,6 @@ failed:
*/
static void wl3501_release(struct pcmcia_device *link)
{
- struct net_device *dev = link->priv;
-
- /* Unlink the device chain */
- if (link->dev_node)
- unregister_netdev(dev);
-
pcmcia_disable_device(link);
}
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 9d127787464..390d77f762c 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -134,7 +134,6 @@ static void zd1201_usbfree(struct urb *urb)
kfree(urb->transfer_buffer);
usb_free_urb(urb);
- return;
}
/* cmdreq message:
@@ -185,7 +184,6 @@ static void zd1201_usbtx(struct urb *urb)
{
struct zd1201 *zd = urb->context;
netif_wake_queue(zd->dev);
- return;
}
/* Incoming data */
@@ -407,7 +405,6 @@ exit:
wake_up(&zd->rxdataq);
kfree(urb->transfer_buffer);
}
- return;
}
static int zd1201_getconfig(struct zd1201 *zd, int rid, void *riddata,
@@ -827,7 +824,6 @@ static netdev_tx_t zd1201_hard_start_xmit(struct sk_buff *skb,
} else {
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
}
kfree_skb(skb);
@@ -845,7 +841,7 @@ static void zd1201_tx_timeout(struct net_device *dev)
usb_unlink_urb(zd->tx_urb);
dev->stats.tx_errors++;
/* Restart the timeout to quiet the watchdog: */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
}
static int zd1201_set_mac_address(struct net_device *dev, void *p)
@@ -876,7 +872,7 @@ static struct iw_statistics *zd1201_get_wireless_stats(struct net_device *dev)
static void zd1201_set_multicast(struct net_device *dev)
{
struct zd1201 *zd = netdev_priv(dev);
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
unsigned char reqbuf[ETH_ALEN*ZD1201_MAXMULTI];
int i;
@@ -884,8 +880,8 @@ static void zd1201_set_multicast(struct net_device *dev)
return;
i = 0;
- netdev_for_each_mc_addr(mc, dev)
- memcpy(reqbuf + i++ * ETH_ALEN, mc->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy(reqbuf + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
zd1201_setconfig(zd, ZD1201_RID_CNFGROUPADDRESS, reqbuf,
netdev_mc_count(dev) * ETH_ALEN, 0);
}
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 16fa289ad77..b0b666019a9 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -948,20 +948,17 @@ static void set_rx_filter_handler(struct work_struct *work)
}
static u64 zd_op_prepare_multicast(struct ieee80211_hw *hw,
- int mc_count, struct dev_addr_list *mclist)
+ struct netdev_hw_addr_list *mc_list)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct zd_mc_hash hash;
- int i;
+ struct netdev_hw_addr *ha;
zd_mc_clear(&hash);
- for (i = 0; i < mc_count; i++) {
- if (!mclist)
- break;
- dev_dbg_f(zd_mac_dev(mac), "mc addr %pM\n", mclist->dmi_addr);
- zd_mc_add_addr(&hash, mclist->dmi_addr);
- mclist = mclist->next;
+ netdev_hw_addr_list_for_each(ha, mc_list) {
+ dev_dbg_f(zd_mac_dev(mac), "mc addr %pM\n", ha->addr);
+ zd_mc_add_addr(&hash, ha->addr);
}
return hash.low | ((u64)hash.high << 32);
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index d91ad1a612a..c257940b71b 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -664,15 +664,15 @@ static struct urb *alloc_rx_urb(struct zd_usb *usb)
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
return NULL;
- buffer = usb_buffer_alloc(udev, USB_MAX_RX_SIZE, GFP_KERNEL,
- &urb->transfer_dma);
+ buffer = usb_alloc_coherent(udev, USB_MAX_RX_SIZE, GFP_KERNEL,
+ &urb->transfer_dma);
if (!buffer) {
usb_free_urb(urb);
return NULL;
}
usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, EP_DATA_IN),
- buffer, USB_MAX_RX_SIZE,
+ buffer, USB_MAX_RX_SIZE,
rx_urb_complete, usb);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
@@ -683,8 +683,8 @@ static void free_rx_urb(struct urb *urb)
{
if (!urb)
return;
- usb_buffer_free(urb->dev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
usb_free_urb(urb);
}
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index 1e783ccc306..d04c5b26205 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -558,7 +558,7 @@ static void xemaclite_tx_timeout(struct net_device *dev)
}
/* To exclude tx timeout */
- dev->trans_start = 0xffffffff - TX_TIMEOUT - TX_TIMEOUT;
+ dev->trans_start = jiffies; /* prevent tx timeout */
/* We're all ready to go. Start the queue */
netif_wake_queue(dev);
@@ -590,7 +590,7 @@ static void xemaclite_tx_handler(struct net_device *dev)
dev->stats.tx_bytes += lp->deferred_skb->len;
dev_kfree_skb_irq(lp->deferred_skb);
lp->deferred_skb = NULL;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
}
@@ -639,7 +639,6 @@ static void xemaclite_rx_handler(struct net_device *dev)
}
skb_put(skb, len); /* Tell the skb how much data we got */
- skb->dev = dev; /* Fill out required meta-data */
skb->protocol = eth_type_trans(skb, dev);
skb->ip_summed = CHECKSUM_NONE;
@@ -1055,7 +1054,6 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
dev->stats.tx_bytes += len;
dev_kfree_skb(new_skb);
- dev->trans_start = jiffies;
return 0;
}
@@ -1090,7 +1088,7 @@ static void xemaclite_remove_ndev(struct net_device *ndev)
*/
static bool get_bool(struct of_device *ofdev, const char *s)
{
- u32 *p = (u32 *)of_get_property(ofdev->node, s, NULL);
+ u32 *p = (u32 *)of_get_property(ofdev->dev.of_node, s, NULL);
if (p) {
return (bool)*p;
@@ -1132,14 +1130,14 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
dev_info(dev, "Device Tree Probing\n");
/* Get iospace for the device */
- rc = of_address_to_resource(ofdev->node, 0, &r_mem);
+ rc = of_address_to_resource(ofdev->dev.of_node, 0, &r_mem);
if (rc) {
dev_err(dev, "invalid address\n");
return rc;
}
/* Get IRQ for the device */
- rc = of_irq_to_resource(ofdev->node, 0, &r_irq);
+ rc = of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq);
if (rc == NO_IRQ) {
dev_err(dev, "no IRQ found\n");
return rc;
@@ -1172,7 +1170,7 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
}
/* Get the virtual base address for the device */
- lp->base_addr = ioremap(r_mem.start, r_mem.end - r_mem.start + 1);
+ lp->base_addr = ioremap(r_mem.start, resource_size(&r_mem));
if (NULL == lp->base_addr) {
dev_err(dev, "EmacLite: Could not allocate iomem\n");
rc = -EIO;
@@ -1184,7 +1182,7 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
lp->next_rx_buf_to_use = 0x0;
lp->tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong");
lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong");
- mac_address = of_get_mac_address(ofdev->node);
+ mac_address = of_get_mac_address(ofdev->dev.of_node);
if (mac_address)
/* Set the MAC address. */
@@ -1199,7 +1197,7 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
/* Set the MAC address in the EmacLite device */
xemaclite_update_address(lp, ndev->dev_addr);
- lp->phy_node = of_parse_phandle(ofdev->node, "phy-handle", 0);
+ lp->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
rc = xemaclite_mdio_setup(lp, &ofdev->dev);
if (rc)
dev_warn(&ofdev->dev, "error registering MDIO bus\n");
@@ -1225,7 +1223,7 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
return 0;
error1:
- release_mem_region(ndev->mem_start, r_mem.end - r_mem.start + 1);
+ release_mem_region(ndev->mem_start, resource_size(&r_mem));
error2:
xemaclite_remove_ndev(ndev);
@@ -1293,8 +1291,11 @@ static struct of_device_id xemaclite_of_match[] __devinitdata = {
MODULE_DEVICE_TABLE(of, xemaclite_of_match);
static struct of_platform_driver xemaclite_of_driver = {
- .name = DRIVER_NAME,
- .match_table = xemaclite_of_match,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = xemaclite_of_match,
+ },
.probe = xemaclite_of_probe,
.remove = __devexit_p(xemaclite_of_remove),
};
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index ede5b2436f2..4eb67aed68d 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -564,7 +564,6 @@ static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value
for (i = 10000; i >= 0; i--)
if ((ioread16(ioaddr + MII_Status) & 1) == 0)
break;
- return;
}
@@ -1299,25 +1298,25 @@ static void set_rx_mode(struct net_device *dev)
/* Too many to filter well, or accept all multicasts. */
iowrite16(0x000B, ioaddr + AddrMode);
} else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u16 hash_table[4];
int i;
memset(hash_table, 0, sizeof(hash_table));
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
unsigned int bit;
/* Due to a bug in the early chip versions, multiple filter
slots must be set for each address. */
if (yp->drv_flags & HasMulticastBug) {
- bit = (ether_crc_le(3, mclist->dmi_addr) >> 3) & 0x3f;
+ bit = (ether_crc_le(3, ha->addr) >> 3) & 0x3f;
hash_table[bit >> 4] |= (1 << bit);
- bit = (ether_crc_le(4, mclist->dmi_addr) >> 3) & 0x3f;
+ bit = (ether_crc_le(4, ha->addr) >> 3) & 0x3f;
hash_table[bit >> 4] |= (1 << bit);
- bit = (ether_crc_le(5, mclist->dmi_addr) >> 3) & 0x3f;
+ bit = (ether_crc_le(5, ha->addr) >> 3) & 0x3f;
hash_table[bit >> 4] |= (1 << bit);
}
- bit = (ether_crc_le(6, mclist->dmi_addr) >> 3) & 0x3f;
+ bit = (ether_crc_le(6, ha->addr) >> 3) & 0x3f;
hash_table[bit >> 4] |= (1 << bit);
}
/* Copy the hash table to the chip. */
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
index dbfef8d70f2..c3a32920451 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/znet.c
@@ -587,7 +587,6 @@ static netdev_tx_t znet_send_packet(struct sk_buff *skb, struct net_device *dev)
}
spin_unlock_irqrestore (&znet->lock, flags);
- dev->trans_start = jiffies;
netif_start_queue (dev);
if (znet_debug > 4)
@@ -802,7 +801,6 @@ static void znet_rx(struct net_device *dev)
/* If any worth-while packets have been received, dev_rint()
has done a mark_bh(INET_BH) for us and will work on them
when we get to the bottom-half routine. */
- return;
}
/* The inverse routine to znet_open(). */
diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c
index 81c753a617a..b78a38d9172 100644
--- a/drivers/net/zorro8390.c
+++ b/drivers/net/zorro8390.c
@@ -102,6 +102,7 @@ static struct zorro_device_id zorro8390_zorro_tbl[] __devinitdata = {
{ ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, },
{ 0 }
};
+MODULE_DEVICE_TABLE(zorro, zorro8390_zorro_tbl);
static struct zorro_driver zorro8390_driver = {
.name = "zorro8390",
@@ -430,7 +431,6 @@ static void zorro8390_block_output(struct net_device *dev, int count,
z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */
ei_status.dmaing &= ~0x01;
- return;
}
static void __devexit zorro8390_remove_one(struct zorro_dev *z)
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 224ae6bc67b..7d18f8e0b01 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -10,8 +10,7 @@
#include <asm/errno.h>
/**
- * of_match_device - Tell if an of_device structure has a matching
- * of_match structure
+ * of_match_device - Tell if a struct device matches an of_device_id list
* @ids: array of of device match structures to search in
* @dev: the of device structure to match against
*
@@ -19,11 +18,11 @@
* system is in its list of supported devices.
*/
const struct of_device_id *of_match_device(const struct of_device_id *matches,
- const struct of_device *dev)
+ const struct device *dev)
{
- if (!dev->node)
+ if (!dev->of_node)
return NULL;
- return of_match_node(matches, dev->node);
+ return of_match_node(matches, dev->of_node);
}
EXPORT_SYMBOL(of_match_device);
@@ -54,7 +53,7 @@ static ssize_t devspec_show(struct device *dev,
struct of_device *ofdev;
ofdev = to_of_device(dev);
- return sprintf(buf, "%s\n", ofdev->node->full_name);
+ return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name);
}
static ssize_t name_show(struct device *dev,
@@ -63,7 +62,7 @@ static ssize_t name_show(struct device *dev,
struct of_device *ofdev;
ofdev = to_of_device(dev);
- return sprintf(buf, "%s\n", ofdev->node->name);
+ return sprintf(buf, "%s\n", ofdev->dev.of_node->name);
}
static ssize_t modalias_show(struct device *dev,
@@ -97,14 +96,14 @@ void of_release_dev(struct device *dev)
struct of_device *ofdev;
ofdev = to_of_device(dev);
- of_node_put(ofdev->node);
+ of_node_put(ofdev->dev.of_node);
kfree(ofdev);
}
EXPORT_SYMBOL(of_release_dev);
int of_device_register(struct of_device *ofdev)
{
- BUG_ON(ofdev->node == NULL);
+ BUG_ON(ofdev->dev.of_node == NULL);
device_initialize(&ofdev->dev);
@@ -112,7 +111,7 @@ int of_device_register(struct of_device *ofdev)
* the parent. If there is no parent defined, set the node
* explicitly */
if (!ofdev->dev.parent)
- set_dev_node(&ofdev->dev, of_node_to_nid(ofdev->node));
+ set_dev_node(&ofdev->dev, of_node_to_nid(ofdev->dev.of_node));
return device_add(&ofdev->dev);
}
@@ -132,11 +131,11 @@ ssize_t of_device_get_modalias(struct of_device *ofdev,
ssize_t tsize, csize, repend;
/* Name & Type */
- csize = snprintf(str, len, "of:N%sT%s",
- ofdev->node->name, ofdev->node->type);
+ csize = snprintf(str, len, "of:N%sT%s", ofdev->dev.of_node->name,
+ ofdev->dev.of_node->type);
/* Get compatible property if any */
- compat = of_get_property(ofdev->node, "compatible", &cplen);
+ compat = of_get_property(ofdev->dev.of_node, "compatible", &cplen);
if (!compat)
return csize;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index dee4fb56b09..b6987bba855 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -556,6 +556,21 @@ void __init unflatten_device_tree(void)
pr_debug(" -> unflatten_device_tree()\n");
+ if (!initial_boot_params) {
+ pr_debug("No device tree pointer\n");
+ return;
+ }
+
+ pr_debug("Unflattening device tree:\n");
+ pr_debug("magic: %08x\n", be32_to_cpu(initial_boot_params->magic));
+ pr_debug("size: %08x\n", be32_to_cpu(initial_boot_params->totalsize));
+ pr_debug("version: %08x\n", be32_to_cpu(initial_boot_params->version));
+
+ if (be32_to_cpu(initial_boot_params->magic) != OF_DT_HEADER) {
+ pr_err("Invalid device tree blob header\n");
+ return;
+ }
+
/* First pass, scan for size */
start = ((unsigned long)initial_boot_params) +
be32_to_cpu(initial_boot_params->off_dt_struct);
diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c
index a3a708e590d..ab6522c8e4f 100644
--- a/drivers/of/of_i2c.c
+++ b/drivers/of/of_i2c.c
@@ -42,7 +42,7 @@ void of_register_i2c_devices(struct i2c_adapter *adap,
info.addr = be32_to_cpup(addr);
- dev_archdata_set_node(&dev_ad, node);
+ info.of_node = node;
info.archdata = &dev_ad;
request_module("%s", info.type);
@@ -68,7 +68,7 @@ EXPORT_SYMBOL(of_register_i2c_devices);
static int of_dev_node_match(struct device *dev, void *data)
{
- return dev_archdata_get_node(&dev->archdata) == data;
+ return dev->of_node == data;
}
/* must call put_device() when done with returned i2c_client device */
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index b4748337223..42a6715f8e8 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -79,7 +79,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
/* Associate the OF node with the device structure so it
* can be looked up later */
of_node_get(child);
- dev_archdata_set_node(&phy->dev.archdata, child);
+ phy->dev.of_node = child;
/* All data is now stored in the phy struct; register it */
rc = phy_device_register(phy);
@@ -100,7 +100,7 @@ EXPORT_SYMBOL(of_mdiobus_register);
/* Helper function for of_phy_find_device */
static int of_phy_match(struct device *dev, void *phy_np)
{
- return dev_archdata_get_node(&dev->archdata) == phy_np;
+ return dev->of_node == phy_np;
}
/**
@@ -166,7 +166,7 @@ struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
if (!dev->dev.parent)
return NULL;
- net_np = dev_archdata_get_node(&dev->dev.parent->archdata);
+ net_np = dev->dev.parent->of_node;
if (!net_np)
return NULL;
diff --git a/drivers/of/of_spi.c b/drivers/of/of_spi.c
index f65f48b9844..5fed7e3c7da 100644
--- a/drivers/of/of_spi.c
+++ b/drivers/of/of_spi.c
@@ -79,7 +79,7 @@ void of_register_spi_devices(struct spi_master *master, struct device_node *np)
/* Store a pointer to the node in the device structure */
of_node_get(nc);
- spi->dev.archdata.of_node = nc;
+ spi->dev.of_node = nc;
/* Register the new device */
request_module(spi->modalias);
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index d58ade170c4..7dacc1ebe91 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -21,14 +21,12 @@ extern struct device_attribute of_platform_device_attrs[];
static int of_platform_bus_match(struct device *dev, struct device_driver *drv)
{
- struct of_device *of_dev = to_of_device(dev);
- struct of_platform_driver *of_drv = to_of_platform_driver(drv);
- const struct of_device_id *matches = of_drv->match_table;
+ const struct of_device_id *matches = drv->of_match_table;
if (!matches)
return 0;
- return of_match_device(matches, of_dev) != NULL;
+ return of_match_device(matches, dev) != NULL;
}
static int of_platform_device_probe(struct device *dev)
@@ -46,7 +44,7 @@ static int of_platform_device_probe(struct device *dev)
of_dev_get(of_dev);
- match = of_match_device(drv->match_table, of_dev);
+ match = of_match_device(drv->driver.of_match_table, dev);
if (match)
error = drv->probe(of_dev, match);
if (error)
@@ -386,11 +384,6 @@ int of_bus_type_init(struct bus_type *bus, const char *name)
int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus)
{
- /* initialize common driver fields */
- if (!drv->driver.name)
- drv->driver.name = drv->name;
- if (!drv->driver.owner)
- drv->driver.owner = drv->owner;
drv->driver.bus = bus;
/* register with core */
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 166b67ea622..219f79e2210 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -30,23 +30,7 @@
#define OP_BUFFER_FLAGS 0
-/*
- * Read and write access is using spin locking. Thus, writing to the
- * buffer by NMI handler (x86) could occur also during critical
- * sections when reading the buffer. To avoid this, there are 2
- * buffers for independent read and write access. Read access is in
- * process context only, write access only in the NMI handler. If the
- * read buffer runs empty, both buffers are swapped atomically. There
- * is potentially a small window during swapping where the buffers are
- * disabled and samples could be lost.
- *
- * Using 2 buffers is a little bit overhead, but the solution is clear
- * and does not require changes in the ring buffer implementation. It
- * can be changed to a single buffer solution when the ring buffer
- * access is implemented as non-locking atomic code.
- */
-static struct ring_buffer *op_ring_buffer_read;
-static struct ring_buffer *op_ring_buffer_write;
+static struct ring_buffer *op_ring_buffer;
DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
static void wq_sync_buffer(struct work_struct *work);
@@ -68,12 +52,9 @@ void oprofile_cpu_buffer_inc_smpl_lost(void)
void free_cpu_buffers(void)
{
- if (op_ring_buffer_read)
- ring_buffer_free(op_ring_buffer_read);
- op_ring_buffer_read = NULL;
- if (op_ring_buffer_write)
- ring_buffer_free(op_ring_buffer_write);
- op_ring_buffer_write = NULL;
+ if (op_ring_buffer)
+ ring_buffer_free(op_ring_buffer);
+ op_ring_buffer = NULL;
}
#define RB_EVENT_HDR_SIZE 4
@@ -86,11 +67,8 @@ int alloc_cpu_buffers(void)
unsigned long byte_size = buffer_size * (sizeof(struct op_sample) +
RB_EVENT_HDR_SIZE);
- op_ring_buffer_read = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
- if (!op_ring_buffer_read)
- goto fail;
- op_ring_buffer_write = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
- if (!op_ring_buffer_write)
+ op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
+ if (!op_ring_buffer)
goto fail;
for_each_possible_cpu(i) {
@@ -162,16 +140,11 @@ struct op_sample
*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size)
{
entry->event = ring_buffer_lock_reserve
- (op_ring_buffer_write, sizeof(struct op_sample) +
+ (op_ring_buffer, sizeof(struct op_sample) +
size * sizeof(entry->sample->data[0]));
- if (entry->event)
- entry->sample = ring_buffer_event_data(entry->event);
- else
- entry->sample = NULL;
-
- if (!entry->sample)
+ if (!entry->event)
return NULL;
-
+ entry->sample = ring_buffer_event_data(entry->event);
entry->size = size;
entry->data = entry->sample->data;
@@ -180,25 +153,16 @@ struct op_sample
int op_cpu_buffer_write_commit(struct op_entry *entry)
{
- return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event);
+ return ring_buffer_unlock_commit(op_ring_buffer, entry->event);
}
struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
{
struct ring_buffer_event *e;
- e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
- if (e)
- goto event;
- if (ring_buffer_swap_cpu(op_ring_buffer_read,
- op_ring_buffer_write,
- cpu))
+ e = ring_buffer_consume(op_ring_buffer, cpu, NULL, NULL);
+ if (!e)
return NULL;
- e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
- if (e)
- goto event;
- return NULL;
-event:
entry->event = e;
entry->sample = ring_buffer_event_data(e);
entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
@@ -209,8 +173,7 @@ event:
unsigned long op_cpu_buffer_entries(int cpu)
{
- return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
- + ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
+ return ring_buffer_entries_cpu(op_ring_buffer, cpu);
}
static int
@@ -356,8 +319,16 @@ void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
{
- int is_kernel = !user_mode(regs);
- unsigned long pc = profile_pc(regs);
+ int is_kernel;
+ unsigned long pc;
+
+ if (likely(regs)) {
+ is_kernel = !user_mode(regs);
+ pc = profile_pc(regs);
+ } else {
+ is_kernel = 0; /* This value will not be used */
+ pc = ESCAPE_CODE; /* as this causes an early return. */
+ }
__oprofile_add_ext_sample(pc, regs, event, is_kernel);
}
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index dc8a0428260..b336cd9ee7a 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -253,22 +253,26 @@ static int __init oprofile_init(void)
int err;
err = oprofile_arch_init(&oprofile_ops);
-
if (err < 0 || timer) {
printk(KERN_INFO "oprofile: using timer interrupt.\n");
- oprofile_timer_init(&oprofile_ops);
+ err = oprofile_timer_init(&oprofile_ops);
+ if (err)
+ goto out_arch;
}
-
err = oprofilefs_register();
if (err)
- oprofile_arch_exit();
+ goto out_arch;
+ return 0;
+out_arch:
+ oprofile_arch_exit();
return err;
}
static void __exit oprofile_exit(void)
{
+ oprofile_timer_exit();
oprofilefs_unregister();
oprofile_arch_exit();
}
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h
index cb92f5c98c1..47e12cb4ee8 100644
--- a/drivers/oprofile/oprof.h
+++ b/drivers/oprofile/oprof.h
@@ -34,7 +34,8 @@ struct super_block;
struct dentry;
void oprofile_create_files(struct super_block *sb, struct dentry *root);
-void oprofile_timer_init(struct oprofile_operations *ops);
+int oprofile_timer_init(struct oprofile_operations *ops);
+void oprofile_timer_exit(void);
int oprofile_set_backtrace(unsigned long depth);
int oprofile_set_timeout(unsigned long time);
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
index 333f915568c..dc0ae4d14df 100644
--- a/drivers/oprofile/timer_int.c
+++ b/drivers/oprofile/timer_int.c
@@ -13,34 +13,94 @@
#include <linux/oprofile.h>
#include <linux/profile.h>
#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/hrtimer.h>
+#include <asm/irq_regs.h>
#include <asm/ptrace.h>
#include "oprof.h"
-static int timer_notify(struct pt_regs *regs)
+static DEFINE_PER_CPU(struct hrtimer, oprofile_hrtimer);
+
+static enum hrtimer_restart oprofile_hrtimer_notify(struct hrtimer *hrtimer)
+{
+ oprofile_add_sample(get_irq_regs(), 0);
+ hrtimer_forward_now(hrtimer, ns_to_ktime(TICK_NSEC));
+ return HRTIMER_RESTART;
+}
+
+static void __oprofile_hrtimer_start(void *unused)
+{
+ struct hrtimer *hrtimer = &__get_cpu_var(oprofile_hrtimer);
+
+ hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer->function = oprofile_hrtimer_notify;
+
+ hrtimer_start(hrtimer, ns_to_ktime(TICK_NSEC),
+ HRTIMER_MODE_REL_PINNED);
+}
+
+static int oprofile_hrtimer_start(void)
{
- oprofile_add_sample(regs, 0);
+ on_each_cpu(__oprofile_hrtimer_start, NULL, 1);
return 0;
}
-static int timer_start(void)
+static void __oprofile_hrtimer_stop(int cpu)
{
- return register_timer_hook(timer_notify);
+ struct hrtimer *hrtimer = &per_cpu(oprofile_hrtimer, cpu);
+
+ hrtimer_cancel(hrtimer);
}
+static void oprofile_hrtimer_stop(void)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ __oprofile_hrtimer_stop(cpu);
+}
-static void timer_stop(void)
+static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
{
- unregister_timer_hook(timer_notify);
+ long cpu = (long) hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ smp_call_function_single(cpu, __oprofile_hrtimer_start,
+ NULL, 1);
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ __oprofile_hrtimer_stop(cpu);
+ break;
+ }
+ return NOTIFY_OK;
}
+static struct notifier_block __refdata oprofile_cpu_notifier = {
+ .notifier_call = oprofile_cpu_notify,
+};
-void __init oprofile_timer_init(struct oprofile_operations *ops)
+int __init oprofile_timer_init(struct oprofile_operations *ops)
{
+ int rc;
+
+ rc = register_hotcpu_notifier(&oprofile_cpu_notifier);
+ if (rc)
+ return rc;
ops->create_files = NULL;
ops->setup = NULL;
ops->shutdown = NULL;
- ops->start = timer_start;
- ops->stop = timer_stop;
+ ops->start = oprofile_hrtimer_start;
+ ops->stop = oprofile_hrtimer_stop;
ops->cpu_type = "timer";
+ return 0;
+}
+
+void __exit oprofile_timer_exit(void)
+{
+ unregister_hotcpu_notifier(&oprofile_cpu_notifier);
}
diff --git a/drivers/parport/parport_amiga.c b/drivers/parport/parport_amiga.c
index 1586e1caa2f..8bef6d60f88 100644
--- a/drivers/parport/parport_amiga.c
+++ b/drivers/parport/parport_amiga.c
@@ -18,6 +18,8 @@
#include <linux/parport.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
#include <asm/setup.h>
#include <asm/amigahw.h>
#include <asm/irq.h>
@@ -31,7 +33,6 @@
#define DPRINTK(x...) do { } while (0)
#endif
-static struct parport *this_port = NULL;
static void amiga_write_data(struct parport *p, unsigned char data)
{
@@ -227,18 +228,11 @@ static struct parport_operations pp_amiga_ops = {
/* ----------- Initialisation code --------------------------------- */
-static int __init parport_amiga_init(void)
+static int __init amiga_parallel_probe(struct platform_device *pdev)
{
struct parport *p;
int err;
- if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_PARALLEL))
- return -ENODEV;
-
- err = -EBUSY;
- if (!request_mem_region(CIAA_PHYSADDR-1+0x100, 0x100, "parallel"))
- goto out_mem;
-
ciaa.ddrb = 0xff;
ciab.ddra &= 0xf8;
mb();
@@ -246,41 +240,63 @@ static int __init parport_amiga_init(void)
p = parport_register_port((unsigned long)&ciaa.prb, IRQ_AMIGA_CIAA_FLG,
PARPORT_DMA_NONE, &pp_amiga_ops);
if (!p)
- goto out_port;
+ return -EBUSY;
- err = request_irq(IRQ_AMIGA_CIAA_FLG, parport_irq_handler, 0, p->name, p);
+ err = request_irq(IRQ_AMIGA_CIAA_FLG, parport_irq_handler, 0, p->name,
+ p);
if (err)
goto out_irq;
- this_port = p;
printk(KERN_INFO "%s: Amiga built-in port using irq\n", p->name);
/* XXX: set operating mode */
parport_announce_port(p);
+ platform_set_drvdata(pdev, p);
+
return 0;
out_irq:
parport_put_port(p);
-out_port:
- release_mem_region(CIAA_PHYSADDR-1+0x100, 0x100);
-out_mem:
return err;
}
-static void __exit parport_amiga_exit(void)
+static int __exit amiga_parallel_remove(struct platform_device *pdev)
+{
+ struct parport *port = platform_get_drvdata(pdev);
+
+ parport_remove_port(port);
+ if (port->irq != PARPORT_IRQ_NONE)
+ free_irq(IRQ_AMIGA_CIAA_FLG, port);
+ parport_put_port(port);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static struct platform_driver amiga_parallel_driver = {
+ .remove = __exit_p(amiga_parallel_remove),
+ .driver = {
+ .name = "amiga-parallel",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init amiga_parallel_init(void)
+{
+ return platform_driver_probe(&amiga_parallel_driver,
+ amiga_parallel_probe);
+}
+
+module_init(amiga_parallel_init);
+
+static void __exit amiga_parallel_exit(void)
{
- parport_remove_port(this_port);
- if (this_port->irq != PARPORT_IRQ_NONE)
- free_irq(IRQ_AMIGA_CIAA_FLG, this_port);
- parport_put_port(this_port);
- release_mem_region(CIAA_PHYSADDR-1+0x100, 0x100);
+ platform_driver_unregister(&amiga_parallel_driver);
}
+module_exit(amiga_parallel_exit);
MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>");
MODULE_DESCRIPTION("Parport Driver for Amiga builtin Port");
MODULE_SUPPORTED_DEVICE("Amiga builtin Parallel Port");
MODULE_LICENSE("GPL");
-
-module_init(parport_amiga_init)
-module_exit(parport_amiga_exit)
+MODULE_ALIAS("platform:amiga-parallel");
diff --git a/drivers/parport/parport_cs.c b/drivers/parport/parport_cs.c
index 7dd370fa343..fd8cfe95f0a 100644
--- a/drivers/parport/parport_cs.c
+++ b/drivers/parport/parport_cs.c
@@ -75,7 +75,6 @@ INT_MODULE_PARM(epp_mode, 1);
typedef struct parport_info_t {
struct pcmcia_device *p_dev;
int ndev;
- dev_node_t node;
struct parport *port;
} parport_info_t;
@@ -105,7 +104,6 @@ static int parport_probe(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -174,20 +172,19 @@ static int parport_config(struct pcmcia_device *link)
if (ret)
goto failed;
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
+ if (!link->irq)
goto failed;
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
goto failed;
p = parport_pc_probe_port(link->io.BasePort1, link->io.BasePort2,
- link->irq.AssignedIRQ, PARPORT_DMA_NONE,
+ link->irq, PARPORT_DMA_NONE,
&link->dev, IRQF_SHARED);
if (p == NULL) {
printk(KERN_NOTICE "parport_cs: parport_pc_probe_port() at "
"0x%3x, irq %u failed\n", link->io.BasePort1,
- link->irq.AssignedIRQ);
+ link->irq);
goto failed;
}
@@ -195,11 +192,7 @@ static int parport_config(struct pcmcia_device *link)
if (epp_mode)
p->modes |= PARPORT_MODE_TRISTATE | PARPORT_MODE_EPP;
info->ndev = 1;
- info->node.major = LP_MAJOR;
- info->node.minor = p->number;
info->port = p;
- strcpy(info->node.dev_name, p->name);
- link->dev_node = &info->node;
return 0;
diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c
index 065f229580d..9a5b4b89416 100644
--- a/drivers/parport/parport_sunbpp.c
+++ b/drivers/parport/parport_sunbpp.c
@@ -382,8 +382,11 @@ static const struct of_device_id bpp_match[] = {
MODULE_DEVICE_TABLE(of, bpp_match);
static struct of_platform_driver bpp_sbus_driver = {
- .name = "bpp",
- .match_table = bpp_match,
+ .driver = {
+ .name = "bpp",
+ .owner = THIS_MODULE,
+ .of_match_table = bpp_match,
+ },
.probe = bpp_probe,
.remove = __devexit_p(bpp_remove),
};
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 7858a117e80..34ef70d562b 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -19,7 +19,7 @@ config PCI_MSI
by using the 'pci=nomsi' option. This disables MSI for the
entire system.
- If you don't know what to do here, say N.
+ If you don't know what to do here, say Y.
config PCI_DEBUG
bool "PCI Debugging"
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 2f646fe1260..531bc697d80 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -13,7 +13,7 @@
* configuration space.
*/
-static DEFINE_SPINLOCK(pci_lock);
+static DEFINE_RAW_SPINLOCK(pci_lock);
/*
* Wrappers for all PCI configuration access functions. They just check
@@ -33,10 +33,10 @@ int pci_bus_read_config_##size \
unsigned long flags; \
u32 data = 0; \
if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
- spin_lock_irqsave(&pci_lock, flags); \
+ raw_spin_lock_irqsave(&pci_lock, flags); \
res = bus->ops->read(bus, devfn, pos, len, &data); \
*value = (type)data; \
- spin_unlock_irqrestore(&pci_lock, flags); \
+ raw_spin_unlock_irqrestore(&pci_lock, flags); \
return res; \
}
@@ -47,9 +47,9 @@ int pci_bus_write_config_##size \
int res; \
unsigned long flags; \
if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
- spin_lock_irqsave(&pci_lock, flags); \
+ raw_spin_lock_irqsave(&pci_lock, flags); \
res = bus->ops->write(bus, devfn, pos, len, value); \
- spin_unlock_irqrestore(&pci_lock, flags); \
+ raw_spin_unlock_irqrestore(&pci_lock, flags); \
return res; \
}
@@ -79,10 +79,10 @@ struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
struct pci_ops *old_ops;
unsigned long flags;
- spin_lock_irqsave(&pci_lock, flags);
+ raw_spin_lock_irqsave(&pci_lock, flags);
old_ops = bus->ops;
bus->ops = ops;
- spin_unlock_irqrestore(&pci_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_lock, flags);
return old_ops;
}
EXPORT_SYMBOL(pci_bus_set_ops);
@@ -136,9 +136,9 @@ static noinline void pci_wait_ucfg(struct pci_dev *dev)
__add_wait_queue(&pci_ucfg_wait, &wait);
do {
set_current_state(TASK_UNINTERRUPTIBLE);
- spin_unlock_irq(&pci_lock);
+ raw_spin_unlock_irq(&pci_lock);
schedule();
- spin_lock_irq(&pci_lock);
+ raw_spin_lock_irq(&pci_lock);
} while (dev->block_ucfg_access);
__remove_wait_queue(&pci_ucfg_wait, &wait);
}
@@ -150,11 +150,11 @@ int pci_user_read_config_##size \
int ret = 0; \
u32 data = -1; \
if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
- spin_lock_irq(&pci_lock); \
+ raw_spin_lock_irq(&pci_lock); \
if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
ret = dev->bus->ops->read(dev->bus, dev->devfn, \
pos, sizeof(type), &data); \
- spin_unlock_irq(&pci_lock); \
+ raw_spin_unlock_irq(&pci_lock); \
*val = (type)data; \
return ret; \
}
@@ -165,11 +165,11 @@ int pci_user_write_config_##size \
{ \
int ret = -EIO; \
if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
- spin_lock_irq(&pci_lock); \
+ raw_spin_lock_irq(&pci_lock); \
if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
ret = dev->bus->ops->write(dev->bus, dev->devfn, \
pos, sizeof(type), val); \
- spin_unlock_irq(&pci_lock); \
+ raw_spin_unlock_irq(&pci_lock); \
return ret; \
}
@@ -220,8 +220,13 @@ static int pci_vpd_pci22_wait(struct pci_dev *dev)
return 0;
}
- if (time_after(jiffies, timeout))
+ if (time_after(jiffies, timeout)) {
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "vpd r/w failed. This is likely a firmware "
+ "bug on this device. Contact the card "
+ "vendor for a firmware update.");
return -ETIMEDOUT;
+ }
if (fatal_signal_pending(current))
return -EINTR;
if (!cond_resched())
@@ -396,10 +401,10 @@ void pci_block_user_cfg_access(struct pci_dev *dev)
unsigned long flags;
int was_blocked;
- spin_lock_irqsave(&pci_lock, flags);
+ raw_spin_lock_irqsave(&pci_lock, flags);
was_blocked = dev->block_ucfg_access;
dev->block_ucfg_access = 1;
- spin_unlock_irqrestore(&pci_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_lock, flags);
/* If we BUG() inside the pci_lock, we're guaranteed to hose
* the machine */
@@ -417,7 +422,7 @@ void pci_unblock_user_cfg_access(struct pci_dev *dev)
{
unsigned long flags;
- spin_lock_irqsave(&pci_lock, flags);
+ raw_spin_lock_irqsave(&pci_lock, flags);
/* This indicates a problem in the caller, but we don't need
* to kill them, unlike a double-block above. */
@@ -425,6 +430,6 @@ void pci_unblock_user_cfg_access(struct pci_dev *dev)
dev->block_ucfg_access = 0;
wake_up_all(&pci_ucfg_wait);
- spin_unlock_irqrestore(&pci_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_lock, flags);
}
EXPORT_SYMBOL_GPL(pci_unblock_user_cfg_access);
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 33ead97f0c4..0a19708074c 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -131,9 +131,10 @@ static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
(*cnt)++;
- else
+ else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
printk(KERN_WARNING PREFIX
- "Unsupported device scope\n");
+ "Unsupported device scope\n");
+ }
start += scope->length;
}
if (*cnt == 0)
@@ -309,6 +310,8 @@ int dmar_find_matched_atsr_unit(struct pci_dev *dev)
struct acpi_dmar_atsr *atsr;
struct dmar_atsr_unit *atsru;
+ dev = pci_physfn(dev);
+
list_for_each_entry(atsru, &dmar_atsr_units, list) {
atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
if (atsr->segment == pci_domain_nr(dev->bus))
@@ -358,12 +361,14 @@ dmar_parse_one_rhsa(struct acpi_dmar_header *header)
return 0;
}
}
- WARN(1, "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- drhd->reg_base_addr,
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
+ WARN_TAINT(
+ 1, TAINT_FIRMWARE_WORKAROUND,
+ "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
+ "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+ drhd->reg_base_addr,
+ dmi_get_system_info(DMI_BIOS_VENDOR),
+ dmi_get_system_info(DMI_BIOS_VERSION),
+ dmi_get_system_info(DMI_PRODUCT_VERSION));
return 0;
}
@@ -507,7 +512,7 @@ parse_dmar_table(void)
return ret;
}
-int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
+static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
struct pci_dev *dev)
{
int index;
@@ -530,6 +535,8 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev)
struct dmar_drhd_unit *dmaru = NULL;
struct acpi_dmar_hardware_unit *drhd;
+ dev = pci_physfn(dev);
+
list_for_each_entry(dmaru, &dmar_drhd_units, list) {
drhd = container_of(dmaru->hdr,
struct acpi_dmar_hardware_unit,
@@ -614,7 +621,17 @@ int __init dmar_table_init(void)
return 0;
}
-static int bios_warned;
+static void warn_invalid_dmar(u64 addr, const char *message)
+{
+ WARN_TAINT_ONCE(
+ 1, TAINT_FIRMWARE_WORKAROUND,
+ "Your BIOS is broken; DMAR reported at address %llx%s!\n"
+ "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+ addr, message,
+ dmi_get_system_info(DMI_BIOS_VENDOR),
+ dmi_get_system_info(DMI_BIOS_VERSION),
+ dmi_get_system_info(DMI_PRODUCT_VERSION));
+}
int __init check_zero_address(void)
{
@@ -640,13 +657,7 @@ int __init check_zero_address(void)
drhd = (void *)entry_header;
if (!drhd->address) {
- /* Promote an attitude of violence to a BIOS engineer today */
- WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
- bios_warned = 1;
+ warn_invalid_dmar(0, "");
goto failed;
}
@@ -659,14 +670,8 @@ int __init check_zero_address(void)
ecap = dmar_readq(addr + DMAR_ECAP_REG);
early_iounmap(addr, VTD_PAGE_SIZE);
if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
- /* Promote an attitude of violence to a BIOS engineer today */
- WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- drhd->address,
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
- bios_warned = 1;
+ warn_invalid_dmar(drhd->address,
+ " returns all ones");
goto failed;
}
}
@@ -731,14 +736,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
int msagaw = 0;
if (!drhd->reg_base_addr) {
- if (!bios_warned) {
- WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
- bios_warned = 1;
- }
+ warn_invalid_dmar(0, "");
return -EINVAL;
}
@@ -758,16 +756,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
- if (!bios_warned) {
- /* Promote an attitude of violence to a BIOS engineer today */
- WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- drhd->reg_base_addr,
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
- bios_warned = 1;
- }
+ warn_invalid_dmar(drhd->reg_base_addr, " returns all ones");
goto err_unmap;
}
@@ -806,7 +795,8 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
}
ver = readl(iommu->reg + DMAR_VER_REG);
- pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
+ pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
+ iommu->seq_id,
(unsigned long long)drhd->reg_base_addr,
DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
(unsigned long long)iommu->cap,
@@ -1457,9 +1447,11 @@ int dmar_reenable_qi(struct intel_iommu *iommu)
/*
* Check interrupt remapping support in DMAR table description.
*/
-int dmar_ir_support(void)
+int __init dmar_ir_support(void)
{
struct acpi_table_dmar *dmar;
dmar = (struct acpi_table_dmar *)dmar_tbl;
+ if (!dmar)
+ return 0;
return dmar->flags & 0x1;
}
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 6ecbfb27db9..e525263210e 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -108,7 +108,7 @@ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status);
static int ibm_get_attention_status(struct hotplug_slot *slot, u8 *status);
static void ibm_handle_events(acpi_handle handle, u32 event, void *context);
static int ibm_get_table_from_acpi(char **bufp);
-static ssize_t ibm_read_apci_table(struct kobject *kobj,
+static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t size);
static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
@@ -351,6 +351,7 @@ read_table_done:
/**
* ibm_read_apci_table - callback for the sysfs apci_table file
+ * @filp: the open sysfs file
* @kobj: the kobject this binary attribute is a part of
* @bin_attr: struct bin_attribute for this file
* @buffer: the kernel space buffer to fill
@@ -364,7 +365,7 @@ read_table_done:
* things get really tricky here...
* our solution is to only allow reading the table in all at once.
*/
-static ssize_t ibm_read_apci_table(struct kobject *kobj,
+static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t size)
{
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index f184d1d2ecb..b3e5580c837 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -848,7 +848,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_disable_device;
}
- /* Check for the proper subsytem ID's
+ /* Check for the proper subsystem ID's
* Intel uses a different SSID programming model than Compaq.
* For Intel, each SSID bit identifies a PHP capability.
* Also Intel HPC's may have RID=0.
@@ -1075,13 +1075,12 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* make our own copy of the pci bus structure,
* as we like tweaking it a lot */
- ctrl->pci_bus = kmalloc(sizeof(*ctrl->pci_bus), GFP_KERNEL);
+ ctrl->pci_bus = kmemdup(pdev->bus, sizeof(*ctrl->pci_bus), GFP_KERNEL);
if (!ctrl->pci_bus) {
err("out of memory\n");
rc = -ENOMEM;
goto err_free_ctrl;
}
- memcpy(ctrl->pci_bus, pdev->bus, sizeof(*ctrl->pci_bus));
ctrl->bus = pdev->bus->number;
ctrl->rev = pdev->revision;
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 0a16444c14c..2fce726758d 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -84,12 +84,6 @@ int pciehp_configure_device(struct slot *p_slot)
dev = pci_get_slot(parent, PCI_DEVFN(0, fn));
if (!dev)
continue;
- if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
- ctrl_err(ctrl, "Cannot hot-add display device %s\n",
- pci_name(dev));
- pci_dev_put(dev);
- continue;
- }
if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) ||
(dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) {
pciehp_add_bridge(dev);
@@ -133,15 +127,9 @@ int pciehp_unconfigure_device(struct slot *p_slot)
presence = 0;
for (j = 0; j < 8; j++) {
- struct pci_dev* temp = pci_get_slot(parent, PCI_DEVFN(0, j));
+ struct pci_dev *temp = pci_get_slot(parent, PCI_DEVFN(0, j));
if (!temp)
continue;
- if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
- ctrl_err(ctrl, "Cannot remove display device %s\n",
- pci_name(temp));
- pci_dev_put(temp);
- continue;
- }
if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) {
pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl);
if (bctl & PCI_BRIDGE_CTL_VGA) {
@@ -149,7 +137,8 @@ int pciehp_unconfigure_device(struct slot *p_slot)
"Cannot remove display device %s\n",
pci_name(temp));
pci_dev_put(temp);
- continue;
+ rc = EINVAL;
+ break;
}
}
pci_remove_bus_device(temp);
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 417312528dd..796828fce34 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -491,13 +491,11 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
domain->iommu_coherency = 1;
- i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
- for (; i < g_num_of_iommus; ) {
+ for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
if (!ecap_coherent(g_iommus[i]->ecap)) {
domain->iommu_coherency = 0;
break;
}
- i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
}
}
@@ -507,13 +505,11 @@ static void domain_update_iommu_snooping(struct dmar_domain *domain)
domain->iommu_snooping = 1;
- i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
- for (; i < g_num_of_iommus; ) {
+ for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
if (!ecap_sc_support(g_iommus[i]->ecap)) {
domain->iommu_snooping = 0;
break;
}
- i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
}
}
@@ -1068,7 +1064,7 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
}
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
- unsigned long pfn, unsigned int pages)
+ unsigned long pfn, unsigned int pages, int map)
{
unsigned int mask = ilog2(__roundup_pow_of_two(pages));
uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
@@ -1089,10 +1085,10 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
DMA_TLB_PSI_FLUSH);
/*
- * In caching mode, domain ID 0 is reserved for non-present to present
- * mapping flush. Device IOTLB doesn't need to be flushed in this case.
+ * In caching mode, changes of pages from non-present to present require
+ * flush. However, device IOTLB doesn't need to be flushed in this case.
*/
- if (!cap_caching_mode(iommu->cap) || did)
+ if (!cap_caching_mode(iommu->cap) || !map)
iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
}
@@ -1154,7 +1150,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
unsigned long nlongs;
ndomains = cap_ndoms(iommu->cap);
- pr_debug("Number of Domains supportd <%ld>\n", ndomains);
+ pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu->seq_id,
+ ndomains);
nlongs = BITS_TO_LONGS(ndomains);
spin_lock_init(&iommu->lock);
@@ -1194,8 +1191,7 @@ void free_dmar_iommu(struct intel_iommu *iommu)
unsigned long flags;
if ((iommu->domains) && (iommu->domain_ids)) {
- i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
- for (; i < cap_ndoms(iommu->cap); ) {
+ for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
domain = iommu->domains[i];
clear_bit(i, iommu->domain_ids);
@@ -1207,9 +1203,6 @@ void free_dmar_iommu(struct intel_iommu *iommu)
domain_exit(domain);
}
spin_unlock_irqrestore(&domain->iommu_lock, flags);
-
- i = find_next_bit(iommu->domain_ids,
- cap_ndoms(iommu->cap), i+1);
}
}
@@ -1292,14 +1285,11 @@ static void iommu_detach_domain(struct dmar_domain *domain,
spin_lock_irqsave(&iommu->lock, flags);
ndomains = cap_ndoms(iommu->cap);
- num = find_first_bit(iommu->domain_ids, ndomains);
- for (; num < ndomains; ) {
+ for_each_set_bit(num, iommu->domain_ids, ndomains) {
if (iommu->domains[num] == domain) {
found = 1;
break;
}
- num = find_next_bit(iommu->domain_ids,
- cap_ndoms(iommu->cap), num+1);
}
if (found) {
@@ -1485,15 +1475,12 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
/* find an available domain id for this device in iommu */
ndomains = cap_ndoms(iommu->cap);
- num = find_first_bit(iommu->domain_ids, ndomains);
- for (; num < ndomains; ) {
+ for_each_set_bit(num, iommu->domain_ids, ndomains) {
if (iommu->domains[num] == domain) {
id = num;
found = 1;
break;
}
- num = find_next_bit(iommu->domain_ids,
- cap_ndoms(iommu->cap), num+1);
}
if (found == 0) {
@@ -1558,7 +1545,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
(((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
+ iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
} else {
iommu_flush_write_buffer(iommu);
}
@@ -2333,14 +2320,16 @@ int __init init_dmars(void)
*/
iommu->flush.flush_context = __iommu_flush_context;
iommu->flush.flush_iotlb = __iommu_flush_iotlb;
- printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
+ printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
"invalidation\n",
+ iommu->seq_id,
(unsigned long long)drhd->reg_base_addr);
} else {
iommu->flush.flush_context = qi_flush_context;
iommu->flush.flush_iotlb = qi_flush_iotlb;
- printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
+ printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
"invalidation\n",
+ iommu->seq_id,
(unsigned long long)drhd->reg_base_addr);
}
}
@@ -2621,7 +2610,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
/* it's a non-present to present mapping. Only flush if caching mode */
if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size);
+ iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
else
iommu_flush_write_buffer(iommu);
@@ -2661,15 +2650,24 @@ static void flush_unmaps(void)
if (!deferred_flush[i].next)
continue;
- iommu->flush.flush_iotlb(iommu, 0, 0, 0,
+ /* In caching mode, global flushes turn emulation expensive */
+ if (!cap_caching_mode(iommu->cap))
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH);
for (j = 0; j < deferred_flush[i].next; j++) {
unsigned long mask;
struct iova *iova = deferred_flush[i].iova[j];
-
- mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
- iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
- (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
+ struct dmar_domain *domain = deferred_flush[i].domain[j];
+
+ /* On real hardware multiple invalidations are expensive */
+ if (cap_caching_mode(iommu->cap))
+ iommu_flush_iotlb_psi(iommu, domain->id,
+ iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
+ else {
+ mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
+ iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
+ (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
+ }
__free_iova(&deferred_flush[i].domain[j]->iovad, iova);
}
deferred_flush[i].next = 0;
@@ -2750,7 +2748,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
if (intel_iommu_strict) {
iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
- last_pfn - start_pfn + 1);
+ last_pfn - start_pfn + 1, 0);
/* free iova */
__free_iova(&domain->iovad, iova);
} else {
@@ -2840,7 +2838,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
if (intel_iommu_strict) {
iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
- last_pfn - start_pfn + 1);
+ last_pfn - start_pfn + 1, 0);
/* free iova */
__free_iova(&domain->iovad, iova);
} else {
@@ -2874,7 +2872,6 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
struct dmar_domain *domain;
size_t size = 0;
int prot = 0;
- size_t offset_pfn = 0;
struct iova *iova = NULL;
int ret;
struct scatterlist *sg;
@@ -2928,7 +2925,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
/* it's a non-present to present mapping. Only flush if caching mode */
if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn);
+ iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
else
iommu_flush_write_buffer(iommu);
@@ -3436,22 +3433,6 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
/* domain id for virtual machine, it won't be set in context */
static unsigned long vm_domid;
-static int vm_domain_min_agaw(struct dmar_domain *domain)
-{
- int i;
- int min_agaw = domain->agaw;
-
- i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
- for (; i < g_num_of_iommus; ) {
- if (min_agaw > g_iommus[i]->agaw)
- min_agaw = g_iommus[i]->agaw;
-
- i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
- }
-
- return min_agaw;
-}
-
static struct dmar_domain *iommu_alloc_vm_domain(void)
{
struct dmar_domain *domain;
@@ -3512,8 +3493,7 @@ static void iommu_free_vm_domain(struct dmar_domain *domain)
iommu = drhd->iommu;
ndomains = cap_ndoms(iommu->cap);
- i = find_first_bit(iommu->domain_ids, ndomains);
- for (; i < ndomains; ) {
+ for_each_set_bit(i, iommu->domain_ids, ndomains) {
if (iommu->domains[i] == domain) {
spin_lock_irqsave(&iommu->lock, flags);
clear_bit(i, iommu->domain_ids);
@@ -3521,7 +3501,6 @@ static void iommu_free_vm_domain(struct dmar_domain *domain)
spin_unlock_irqrestore(&iommu->lock, flags);
break;
}
- i = find_next_bit(iommu->domain_ids, ndomains, i+1);
}
}
}
@@ -3582,7 +3561,6 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
struct pci_dev *pdev = to_pci_dev(dev);
struct intel_iommu *iommu;
int addr_width;
- u64 end;
/* normally pdev is not mapped */
if (unlikely(domain_context_mapped(pdev))) {
@@ -3605,14 +3583,30 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
/* check if this iommu agaw is sufficient for max mapped address */
addr_width = agaw_to_width(iommu->agaw);
- end = DOMAIN_MAX_ADDR(addr_width);
- end = end & VTD_PAGE_MASK;
- if (end < dmar_domain->max_addr) {
- printk(KERN_ERR "%s: iommu agaw (%d) is not "
+ if (addr_width > cap_mgaw(iommu->cap))
+ addr_width = cap_mgaw(iommu->cap);
+
+ if (dmar_domain->max_addr > (1LL << addr_width)) {
+ printk(KERN_ERR "%s: iommu width (%d) is not "
"sufficient for the mapped address (%llx)\n",
- __func__, iommu->agaw, dmar_domain->max_addr);
+ __func__, addr_width, dmar_domain->max_addr);
return -EFAULT;
}
+ dmar_domain->gaw = addr_width;
+
+ /*
+ * Knock out extra levels of page tables if necessary
+ */
+ while (iommu->agaw < dmar_domain->agaw) {
+ struct dma_pte *pte;
+
+ pte = dmar_domain->pgd;
+ if (dma_pte_present(pte)) {
+ free_pgtable_page(dmar_domain->pgd);
+ dmar_domain->pgd = (struct dma_pte *)dma_pte_addr(pte);
+ }
+ dmar_domain->agaw--;
+ }
return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
}
@@ -3626,14 +3620,14 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
domain_remove_one_dev_info(dmar_domain, pdev);
}
-static int intel_iommu_map_range(struct iommu_domain *domain,
- unsigned long iova, phys_addr_t hpa,
- size_t size, int iommu_prot)
+static int intel_iommu_map(struct iommu_domain *domain,
+ unsigned long iova, phys_addr_t hpa,
+ int gfp_order, int iommu_prot)
{
struct dmar_domain *dmar_domain = domain->priv;
u64 max_addr;
- int addr_width;
int prot = 0;
+ size_t size;
int ret;
if (iommu_prot & IOMMU_READ)
@@ -3643,20 +3637,17 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
prot |= DMA_PTE_SNP;
+ size = PAGE_SIZE << gfp_order;
max_addr = iova + size;
if (dmar_domain->max_addr < max_addr) {
- int min_agaw;
u64 end;
/* check if minimum agaw is sufficient for mapped address */
- min_agaw = vm_domain_min_agaw(dmar_domain);
- addr_width = agaw_to_width(min_agaw);
- end = DOMAIN_MAX_ADDR(addr_width);
- end = end & VTD_PAGE_MASK;
+ end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
if (end < max_addr) {
- printk(KERN_ERR "%s: iommu agaw (%d) is not "
+ printk(KERN_ERR "%s: iommu width (%d) is not "
"sufficient for the mapped address (%llx)\n",
- __func__, min_agaw, max_addr);
+ __func__, dmar_domain->gaw, max_addr);
return -EFAULT;
}
dmar_domain->max_addr = max_addr;
@@ -3669,19 +3660,19 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
return ret;
}
-static void intel_iommu_unmap_range(struct iommu_domain *domain,
- unsigned long iova, size_t size)
+static int intel_iommu_unmap(struct iommu_domain *domain,
+ unsigned long iova, int gfp_order)
{
struct dmar_domain *dmar_domain = domain->priv;
-
- if (!size)
- return;
+ size_t size = PAGE_SIZE << gfp_order;
dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
(iova + size - 1) >> VTD_PAGE_SHIFT);
if (dmar_domain->max_addr == iova + size)
dmar_domain->max_addr = iova;
+
+ return gfp_order;
}
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -3714,8 +3705,8 @@ static struct iommu_ops intel_iommu_ops = {
.domain_destroy = intel_iommu_domain_destroy,
.attach_dev = intel_iommu_attach_device,
.detach_dev = intel_iommu_detach_device,
- .map = intel_iommu_map_range,
- .unmap = intel_iommu_unmap_range,
+ .map = intel_iommu_map,
+ .unmap = intel_iommu_unmap,
.iova_to_phys = intel_iommu_iova_to_phys,
.domain_has_cap = intel_iommu_domain_has_cap,
};
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 6ee98a56946..1315ac688aa 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -832,9 +832,9 @@ static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
return -1;
}
- printk(KERN_INFO "IOAPIC id %d under DRHD base"
- " 0x%Lx\n", scope->enumeration_id,
- drhd->address);
+ printk(KERN_INFO "IOAPIC id %d under DRHD base "
+ " 0x%Lx IOMMU %d\n", scope->enumeration_id,
+ drhd->address, iommu->seq_id);
ir_parse_one_ioapic_scope(scope, iommu);
} else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index fad93983bfe..afd2fbf7d79 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -21,6 +21,7 @@
#include <linux/stat.h>
#include <linux/topology.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/capability.h>
#include <linux/pci-aspm.h>
#include <linux/slab.h>
@@ -357,7 +358,8 @@ boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf)
struct device_attribute vga_attr = __ATTR_RO(boot_vga);
static ssize_t
-pci_read_config(struct kobject *kobj, struct bin_attribute *bin_attr,
+pci_read_config(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct pci_dev *dev = to_pci_dev(container_of(kobj,struct device,kobj));
@@ -366,7 +368,7 @@ pci_read_config(struct kobject *kobj, struct bin_attribute *bin_attr,
u8 *data = (u8*) buf;
/* Several chips lock up trying to read undefined config space */
- if (capable(CAP_SYS_ADMIN)) {
+ if (cap_raised(filp->f_cred->cap_effective, CAP_SYS_ADMIN)) {
size = dev->cfg_size;
} else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
size = 128;
@@ -430,7 +432,8 @@ pci_read_config(struct kobject *kobj, struct bin_attribute *bin_attr,
}
static ssize_t
-pci_write_config(struct kobject *kobj, struct bin_attribute *bin_attr,
+pci_write_config(struct file* filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct pci_dev *dev = to_pci_dev(container_of(kobj,struct device,kobj));
@@ -487,7 +490,8 @@ pci_write_config(struct kobject *kobj, struct bin_attribute *bin_attr,
}
static ssize_t
-read_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr,
+read_vpd_attr(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct pci_dev *dev =
@@ -502,7 +506,8 @@ read_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr,
}
static ssize_t
-write_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr,
+write_vpd_attr(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct pci_dev *dev =
@@ -519,6 +524,7 @@ write_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr,
#ifdef HAVE_PCI_LEGACY
/**
* pci_read_legacy_io - read byte(s) from legacy I/O port space
+ * @filp: open sysfs file
* @kobj: kobject corresponding to file to read from
* @bin_attr: struct bin_attribute for this file
* @buf: buffer to store results
@@ -529,7 +535,8 @@ write_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr,
* callback routine (pci_legacy_read).
*/
static ssize_t
-pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
+pci_read_legacy_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct pci_bus *bus = to_pci_bus(container_of(kobj,
@@ -545,6 +552,7 @@ pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
/**
* pci_write_legacy_io - write byte(s) to legacy I/O port space
+ * @filp: open sysfs file
* @kobj: kobject corresponding to file to read from
* @bin_attr: struct bin_attribute for this file
* @buf: buffer containing value to be written
@@ -555,7 +563,8 @@ pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
* callback routine (pci_legacy_write).
*/
static ssize_t
-pci_write_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
+pci_write_legacy_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct pci_bus *bus = to_pci_bus(container_of(kobj,
@@ -570,6 +579,7 @@ pci_write_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
/**
* pci_mmap_legacy_mem - map legacy PCI memory into user memory space
+ * @filp: open sysfs file
* @kobj: kobject corresponding to device to be mapped
* @attr: struct bin_attribute for this file
* @vma: struct vm_area_struct passed to mmap
@@ -579,7 +589,8 @@ pci_write_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
* memory space.
*/
static int
-pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr,
+pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
struct vm_area_struct *vma)
{
struct pci_bus *bus = to_pci_bus(container_of(kobj,
@@ -591,6 +602,7 @@ pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr,
/**
* pci_mmap_legacy_io - map legacy PCI IO into user memory space
+ * @filp: open sysfs file
* @kobj: kobject corresponding to device to be mapped
* @attr: struct bin_attribute for this file
* @vma: struct vm_area_struct passed to mmap
@@ -600,7 +612,8 @@ pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr,
* memory space. Returns -ENOSYS if the operation isn't supported
*/
static int
-pci_mmap_legacy_io(struct kobject *kobj, struct bin_attribute *attr,
+pci_mmap_legacy_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
struct vm_area_struct *vma)
{
struct pci_bus *bus = to_pci_bus(container_of(kobj,
@@ -750,14 +763,16 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
}
static int
-pci_mmap_resource_uc(struct kobject *kobj, struct bin_attribute *attr,
+pci_mmap_resource_uc(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
struct vm_area_struct *vma)
{
return pci_mmap_resource(kobj, attr, vma, 0);
}
static int
-pci_mmap_resource_wc(struct kobject *kobj, struct bin_attribute *attr,
+pci_mmap_resource_wc(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
struct vm_area_struct *vma)
{
return pci_mmap_resource(kobj, attr, vma, 1);
@@ -861,6 +876,7 @@ void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
/**
* pci_write_rom - used to enable access to the PCI ROM display
+ * @filp: sysfs file
* @kobj: kernel object handle
* @bin_attr: struct bin_attribute for this file
* @buf: user input
@@ -870,7 +886,8 @@ void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
* writing anything except 0 enables it
*/
static ssize_t
-pci_write_rom(struct kobject *kobj, struct bin_attribute *bin_attr,
+pci_write_rom(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct pci_dev *pdev = to_pci_dev(container_of(kobj, struct device, kobj));
@@ -885,6 +902,7 @@ pci_write_rom(struct kobject *kobj, struct bin_attribute *bin_attr,
/**
* pci_read_rom - read a PCI ROM
+ * @filp: sysfs file
* @kobj: kernel object handle
* @bin_attr: struct bin_attribute for this file
* @buf: where to put the data we read from the ROM
@@ -895,7 +913,8 @@ pci_write_rom(struct kobject *kobj, struct bin_attribute *bin_attr,
* device corresponding to @kobj.
*/
static ssize_t
-pci_read_rom(struct kobject *kobj, struct bin_attribute *bin_attr,
+pci_read_rom(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct pci_dev *pdev = to_pci_dev(container_of(kobj, struct device, kobj));
@@ -960,7 +979,12 @@ static ssize_t reset_store(struct device *dev,
if (val != 1)
return -EINVAL;
- return pci_reset_function(pdev);
+
+ result = pci_reset_function(pdev);
+ if (result < 0)
+ return result;
+
+ return count;
}
static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_store);
@@ -1011,6 +1035,39 @@ error:
return retval;
}
+static void pci_remove_slot_links(struct pci_dev *dev)
+{
+ char func[10];
+ struct pci_slot *slot;
+
+ sysfs_remove_link(&dev->dev.kobj, "slot");
+ list_for_each_entry(slot, &dev->bus->slots, list) {
+ if (slot->number != PCI_SLOT(dev->devfn))
+ continue;
+ snprintf(func, 10, "function%d", PCI_FUNC(dev->devfn));
+ sysfs_remove_link(&slot->kobj, func);
+ }
+}
+
+static int pci_create_slot_links(struct pci_dev *dev)
+{
+ int result = 0;
+ char func[10];
+ struct pci_slot *slot;
+
+ list_for_each_entry(slot, &dev->bus->slots, list) {
+ if (slot->number != PCI_SLOT(dev->devfn))
+ continue;
+ result = sysfs_create_link(&dev->dev.kobj, &slot->kobj, "slot");
+ if (result)
+ goto out;
+ snprintf(func, 10, "function%d", PCI_FUNC(dev->devfn));
+ result = sysfs_create_link(&slot->kobj, &dev->dev.kobj, func);
+ }
+out:
+ return result;
+}
+
int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
{
int retval;
@@ -1073,6 +1130,8 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
if (retval)
goto err_vga_file;
+ pci_create_slot_links(pdev);
+
return 0;
err_vga_file:
@@ -1122,6 +1181,8 @@ void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
if (!sysfs_initialized)
return;
+ pci_remove_slot_links(pdev);
+
pci_remove_capabilities_sysfs(pdev);
if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 37499127c80..60f30e7f1c8 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1193,7 +1193,7 @@ void pci_disable_enabled_device(struct pci_dev *dev)
* anymore. This only involves disabling PCI bus-mastering, if active.
*
* Note we don't actually disable the device until all callers of
- * pci_device_enable() have called pci_device_disable().
+ * pci_enable_device() have called pci_disable_device().
*/
void
pci_disable_device(struct pci_dev *dev)
@@ -1503,7 +1503,7 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
* pci_back_from_sleep - turn PCI device on during system-wide transition into working state
* @dev: Device to handle.
*
- * Disable device's sytem wake-up capability and put it into D0.
+ * Disable device's system wake-up capability and put it into D0.
*/
int pci_back_from_sleep(struct pci_dev *dev)
{
@@ -1631,7 +1631,6 @@ void pci_pm_init(struct pci_dev *dev)
* let the user space enable it to wake up the system as needed.
*/
device_set_wakeup_capable(&dev->dev, true);
- device_set_wakeup_enable(&dev->dev, false);
/* Disable the PME# generation functionality */
pci_pme_active(dev, false);
} else {
@@ -1655,7 +1654,6 @@ void platform_pci_wakeup_init(struct pci_dev *dev)
return;
device_set_wakeup_capable(&dev->dev, true);
- device_set_wakeup_enable(&dev->dev, false);
platform_pci_sleep_wake(dev, false);
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 4eb10f48d27..f8077b3c8c8 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -244,7 +244,7 @@ struct pci_ats {
int stu; /* Smallest Translation Unit */
int qdep; /* Invalidate Queue Depth */
int ref_cnt; /* Physical Function reference count */
- int is_enabled:1; /* Enable bit is set */
+ unsigned int is_enabled:1; /* Enable bit is set */
};
#ifdef CONFIG_PCI_IOV
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index f8f425b8731..909924692b8 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -168,7 +168,7 @@ static u32 *find_pci_config_dword(struct aer_error *err, int where,
target = &err->root_status;
rw1cs = 1;
break;
- case PCI_ERR_ROOT_COR_SRC:
+ case PCI_ERR_ROOT_ERR_SRC:
target = &err->source_id;
break;
}
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 7a711ee314b..484cc55194b 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -72,13 +72,120 @@ void pci_no_aer(void)
pcie_aer_disable = 1; /* has priority over 'forceload' */
}
+static int set_device_error_reporting(struct pci_dev *dev, void *data)
+{
+ bool enable = *((bool *)data);
+
+ if ((dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) ||
+ (dev->pcie_type == PCI_EXP_TYPE_UPSTREAM) ||
+ (dev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)) {
+ if (enable)
+ pci_enable_pcie_error_reporting(dev);
+ else
+ pci_disable_pcie_error_reporting(dev);
+ }
+
+ if (enable)
+ pcie_set_ecrc_checking(dev);
+
+ return 0;
+}
+
+/**
+ * set_downstream_devices_error_reporting - enable/disable the error reporting bits on the root port and its downstream ports.
+ * @dev: pointer to root port's pci_dev data structure
+ * @enable: true = enable error reporting, false = disable error reporting.
+ */
+static void set_downstream_devices_error_reporting(struct pci_dev *dev,
+ bool enable)
+{
+ set_device_error_reporting(dev, &enable);
+
+ if (!dev->subordinate)
+ return;
+ pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable);
+}
+
+/**
+ * aer_enable_rootport - enable Root Port's interrupts when receiving messages
+ * @rpc: pointer to a Root Port data structure
+ *
+ * Invoked when PCIe bus loads AER service driver.
+ */
+static void aer_enable_rootport(struct aer_rpc *rpc)
+{
+ struct pci_dev *pdev = rpc->rpd->port;
+ int pos, aer_pos;
+ u16 reg16;
+ u32 reg32;
+
+ pos = pci_pcie_cap(pdev);
+ /* Clear PCIe Capability's Device Status */
+ pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, &reg16);
+ pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16);
+
+ /* Disable system error generation in response to error messages */
+ pci_read_config_word(pdev, pos + PCI_EXP_RTCTL, &reg16);
+ reg16 &= ~(SYSTEM_ERROR_INTR_ON_MESG_MASK);
+ pci_write_config_word(pdev, pos + PCI_EXP_RTCTL, reg16);
+
+ aer_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+ /* Clear error status */
+ pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, &reg32);
+ pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32);
+ pci_read_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, &reg32);
+ pci_write_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, reg32);
+ pci_read_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, &reg32);
+ pci_write_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, reg32);
+
+ /*
+ * Enable error reporting for the root port device and downstream port
+ * devices.
+ */
+ set_downstream_devices_error_reporting(pdev, true);
+
+ /* Enable Root Port's interrupt in response to error messages */
+ pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_COMMAND, &reg32);
+ reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
+ pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_COMMAND, reg32);
+}
+
+/**
+ * aer_disable_rootport - disable Root Port's interrupts when receiving messages
+ * @rpc: pointer to a Root Port data structure
+ *
+ * Invoked when PCIe bus unloads AER service driver.
+ */
+static void aer_disable_rootport(struct aer_rpc *rpc)
+{
+ struct pci_dev *pdev = rpc->rpd->port;
+ u32 reg32;
+ int pos;
+
+ /*
+ * Disable error reporting for the root port device and downstream port
+ * devices.
+ */
+ set_downstream_devices_error_reporting(pdev, false);
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+ /* Disable Root's interrupt in response to error messages */
+ pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
+ reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
+ pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, reg32);
+
+ /* Clear Root's error status reg */
+ pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, &reg32);
+ pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, reg32);
+}
+
/**
* aer_irq - Root Port's ISR
* @irq: IRQ assigned to Root Port
* @context: pointer to Root Port data structure
*
* Invoked when Root Port detects AER messages.
- **/
+ */
irqreturn_t aer_irq(int irq, void *context)
{
unsigned int status, id;
@@ -97,13 +204,13 @@ irqreturn_t aer_irq(int irq, void *context)
/* Read error status */
pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_STATUS, &status);
- if (!(status & ROOT_ERR_STATUS_MASKS)) {
+ if (!(status & (PCI_ERR_ROOT_UNCOR_RCV|PCI_ERR_ROOT_COR_RCV))) {
spin_unlock_irqrestore(&rpc->e_lock, flags);
return IRQ_NONE;
}
/* Read error source and clear error status */
- pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_COR_SRC, &id);
+ pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_ERR_SRC, &id);
pci_write_config_dword(pdev->port, pos + PCI_ERR_ROOT_STATUS, status);
/* Store error source for later DPC handler */
@@ -135,7 +242,7 @@ EXPORT_SYMBOL_GPL(aer_irq);
* @dev: pointer to the pcie_dev data structure
*
* Invoked when Root Port's AER service is loaded.
- **/
+ */
static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
{
struct aer_rpc *rpc;
@@ -144,15 +251,11 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
if (!rpc)
return NULL;
- /*
- * Initialize Root lock access, e_lock, to Root Error Status Reg,
- * Root Error ID Reg, and Root error producer/consumer index.
- */
+ /* Initialize Root lock access, e_lock, to Root Error Status Reg */
spin_lock_init(&rpc->e_lock);
rpc->rpd = dev;
INIT_WORK(&rpc->dpc_handler, aer_isr);
- rpc->prod_idx = rpc->cons_idx = 0;
mutex_init(&rpc->rpc_mutex);
init_waitqueue_head(&rpc->wait_release);
@@ -167,7 +270,7 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
* @dev: pointer to the pcie_dev data structure
*
* Invoked when PCI Express bus unloads or AER probe fails.
- **/
+ */
static void aer_remove(struct pcie_device *dev)
{
struct aer_rpc *rpc = get_service_data(dev);
@@ -179,7 +282,8 @@ static void aer_remove(struct pcie_device *dev)
wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx);
- aer_delete_rootport(rpc);
+ aer_disable_rootport(rpc);
+ kfree(rpc);
set_service_data(dev, NULL);
}
}
@@ -190,7 +294,7 @@ static void aer_remove(struct pcie_device *dev)
* @id: pointer to the service id data structure
*
* Invoked when PCI Express bus loads AER service driver.
- **/
+ */
static int __devinit aer_probe(struct pcie_device *dev)
{
int status;
@@ -230,47 +334,30 @@ static int __devinit aer_probe(struct pcie_device *dev)
* @dev: pointer to Root Port's pci_dev data structure
*
* Invoked by Port Bus driver when performing link reset at Root Port.
- **/
+ */
static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
{
- u16 p2p_ctrl;
- u32 status;
+ u32 reg32;
int pos;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
/* Disable Root's interrupt in response to error messages */
- pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, 0);
-
- /* Assert Secondary Bus Reset */
- pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &p2p_ctrl);
- p2p_ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
- pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
-
- /*
- * we should send hot reset message for 2ms to allow it time to
- * propogate to all downstream ports
- */
- msleep(2);
-
- /* De-assert Secondary Bus Reset */
- p2p_ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
- pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
+ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
+ reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
+ pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
- /*
- * System software must wait for at least 100ms from the end
- * of a reset of one or more device before it is permitted
- * to issue Configuration Requests to those devices.
- */
- msleep(200);
+ aer_do_secondary_bus_reset(dev);
dev_printk(KERN_DEBUG, &dev->dev, "Root Port link has been reset\n");
+ /* Clear Root Error Status */
+ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
+ pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, reg32);
+
/* Enable Root Port's interrupt in response to error messages */
- pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status);
- pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, status);
- pci_write_config_dword(dev,
- pos + PCI_ERR_ROOT_COMMAND,
- ROOT_PORT_INTR_ON_MESG_MASK);
+ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
+ reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
+ pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
return PCI_ERS_RESULT_RECOVERED;
}
@@ -281,7 +368,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
* @error: error severity being notified by port bus
*
* Invoked by Port Bus driver during error recovery.
- **/
+ */
static pci_ers_result_t aer_error_detected(struct pci_dev *dev,
enum pci_channel_state error)
{
@@ -294,7 +381,7 @@ static pci_ers_result_t aer_error_detected(struct pci_dev *dev,
* @dev: pointer to Root Port's pci_dev data structure
*
* Invoked by Port Bus driver during nonfatal recovery.
- **/
+ */
static void aer_error_resume(struct pci_dev *dev)
{
int pos;
@@ -321,7 +408,7 @@ static void aer_error_resume(struct pci_dev *dev)
* aer_service_init - register AER root service driver
*
* Invoked when AER root service driver is loaded.
- **/
+ */
static int __init aer_service_init(void)
{
if (pcie_aer_disable)
@@ -335,7 +422,7 @@ static int __init aer_service_init(void)
* aer_service_exit - unregister AER root service driver
*
* Invoked when AER root service driver is unloaded.
- **/
+ */
static void __exit aer_service_exit(void)
{
pcie_port_service_unregister(&aerdriver);
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index bd833ea3ba4..80c11d13149 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -17,9 +17,6 @@
#define AER_FATAL 1
#define AER_CORRECTABLE 2
-/* Root Error Status Register Bits */
-#define ROOT_ERR_STATUS_MASKS 0x0f
-
#define SYSTEM_ERROR_INTR_ON_MESG_MASK (PCI_EXP_RTCTL_SECEE| \
PCI_EXP_RTCTL_SENFEE| \
PCI_EXP_RTCTL_SEFEE)
@@ -117,8 +114,7 @@ static inline pci_ers_result_t merge_result(enum pci_ers_result orig,
}
extern struct bus_type pcie_port_bus_type;
-extern void aer_enable_rootport(struct aer_rpc *rpc);
-extern void aer_delete_rootport(struct aer_rpc *rpc);
+extern void aer_do_secondary_bus_reset(struct pci_dev *dev);
extern int aer_init(struct pcie_device *dev);
extern void aer_isr(struct work_struct *work);
extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
@@ -134,4 +130,21 @@ static inline int aer_osc_setup(struct pcie_device *pciedev)
}
#endif
+#ifdef CONFIG_ACPI_APEI
+extern int pcie_aer_get_firmware_first(struct pci_dev *pci_dev);
+#else
+static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
+{
+ if (pci_dev->__aer_firmware_first_valid)
+ return pci_dev->__aer_firmware_first;
+ return 0;
+}
+#endif
+
+static inline void pcie_aer_force_firmware_first(struct pci_dev *pci_dev,
+ int enable)
+{
+ pci_dev->__aer_firmware_first = !!enable;
+ pci_dev->__aer_firmware_first_valid = 1;
+}
#endif /* _AERDRV_H_ */
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index 04814087658..f278d7b0d95 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -16,6 +16,7 @@
#include <linux/acpi.h>
#include <linux/pci-acpi.h>
#include <linux/delay.h>
+#include <acpi/apei.h>
#include "aerdrv.h"
/**
@@ -53,3 +54,79 @@ int aer_osc_setup(struct pcie_device *pciedev)
return 0;
}
+
+#ifdef CONFIG_ACPI_APEI
+static inline int hest_match_pci(struct acpi_hest_aer_common *p,
+ struct pci_dev *pci)
+{
+ return (0 == pci_domain_nr(pci->bus) &&
+ p->bus == pci->bus->number &&
+ p->device == PCI_SLOT(pci->devfn) &&
+ p->function == PCI_FUNC(pci->devfn));
+}
+
+struct aer_hest_parse_info {
+ struct pci_dev *pci_dev;
+ int firmware_first;
+};
+
+static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data)
+{
+ struct aer_hest_parse_info *info = data;
+ struct acpi_hest_aer_common *p;
+ u8 pcie_type = 0;
+ u8 bridge = 0;
+ int ff = 0;
+
+ switch (hest_hdr->type) {
+ case ACPI_HEST_TYPE_AER_ROOT_PORT:
+ pcie_type = PCI_EXP_TYPE_ROOT_PORT;
+ break;
+ case ACPI_HEST_TYPE_AER_ENDPOINT:
+ pcie_type = PCI_EXP_TYPE_ENDPOINT;
+ break;
+ case ACPI_HEST_TYPE_AER_BRIDGE:
+ if ((info->pci_dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
+ bridge = 1;
+ break;
+ default:
+ return 0;
+ }
+
+ p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
+ if (p->flags & ACPI_HEST_GLOBAL) {
+ if ((info->pci_dev->is_pcie &&
+ info->pci_dev->pcie_type == pcie_type) || bridge)
+ ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
+ } else
+ if (hest_match_pci(p, info->pci_dev))
+ ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
+ info->firmware_first = ff;
+
+ return 0;
+}
+
+static void aer_set_firmware_first(struct pci_dev *pci_dev)
+{
+ int rc;
+ struct aer_hest_parse_info info = {
+ .pci_dev = pci_dev,
+ .firmware_first = 0,
+ };
+
+ rc = apei_hest_parse(aer_hest_parse, &info);
+
+ if (rc)
+ pci_dev->__aer_firmware_first = 0;
+ else
+ pci_dev->__aer_firmware_first = info.firmware_first;
+ pci_dev->__aer_firmware_first_valid = 1;
+}
+
+int pcie_aer_get_firmware_first(struct pci_dev *dev)
+{
+ if (!dev->__aer_firmware_first_valid)
+ aer_set_firmware_first(dev);
+ return dev->__aer_firmware_first;
+}
+#endif
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index aceb04b67b6..8af4f619bba 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -36,7 +36,7 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev)
u16 reg16 = 0;
int pos;
- if (dev->aer_firmware_first)
+ if (pcie_aer_get_firmware_first(dev))
return -EIO;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
@@ -47,13 +47,12 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev)
if (!pos)
return -EIO;
- pci_read_config_word(dev, pos+PCI_EXP_DEVCTL, &reg16);
- reg16 = reg16 |
- PCI_EXP_DEVCTL_CERE |
+ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16);
+ reg16 |= (PCI_EXP_DEVCTL_CERE |
PCI_EXP_DEVCTL_NFERE |
PCI_EXP_DEVCTL_FERE |
- PCI_EXP_DEVCTL_URRE;
- pci_write_config_word(dev, pos+PCI_EXP_DEVCTL, reg16);
+ PCI_EXP_DEVCTL_URRE);
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16);
return 0;
}
@@ -64,19 +63,19 @@ int pci_disable_pcie_error_reporting(struct pci_dev *dev)
u16 reg16 = 0;
int pos;
- if (dev->aer_firmware_first)
+ if (pcie_aer_get_firmware_first(dev))
return -EIO;
pos = pci_pcie_cap(dev);
if (!pos)
return -EIO;
- pci_read_config_word(dev, pos+PCI_EXP_DEVCTL, &reg16);
- reg16 = reg16 & ~(PCI_EXP_DEVCTL_CERE |
- PCI_EXP_DEVCTL_NFERE |
- PCI_EXP_DEVCTL_FERE |
- PCI_EXP_DEVCTL_URRE);
- pci_write_config_word(dev, pos+PCI_EXP_DEVCTL, reg16);
+ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16);
+ reg16 &= ~(PCI_EXP_DEVCTL_CERE |
+ PCI_EXP_DEVCTL_NFERE |
+ PCI_EXP_DEVCTL_FERE |
+ PCI_EXP_DEVCTL_URRE);
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16);
return 0;
}
@@ -99,99 +98,46 @@ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
-static int set_device_error_reporting(struct pci_dev *dev, void *data)
-{
- bool enable = *((bool *)data);
-
- if ((dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) ||
- (dev->pcie_type == PCI_EXP_TYPE_UPSTREAM) ||
- (dev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)) {
- if (enable)
- pci_enable_pcie_error_reporting(dev);
- else
- pci_disable_pcie_error_reporting(dev);
- }
-
- if (enable)
- pcie_set_ecrc_checking(dev);
-
- return 0;
-}
-
/**
- * set_downstream_devices_error_reporting - enable/disable the error reporting bits on the root port and its downstream ports.
- * @dev: pointer to root port's pci_dev data structure
- * @enable: true = enable error reporting, false = disable error reporting.
+ * add_error_device - list device to be handled
+ * @e_info: pointer to error info
+ * @dev: pointer to pci_dev to be added
*/
-static void set_downstream_devices_error_reporting(struct pci_dev *dev,
- bool enable)
-{
- set_device_error_reporting(dev, &enable);
-
- if (!dev->subordinate)
- return;
- pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable);
-}
-
-static inline int compare_device_id(struct pci_dev *dev,
- struct aer_err_info *e_info)
-{
- if (e_info->id == ((dev->bus->number << 8) | dev->devfn)) {
- /*
- * Device ID match
- */
- return 1;
- }
-
- return 0;
-}
-
static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
{
if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
e_info->dev[e_info->error_dev_num] = dev;
e_info->error_dev_num++;
- return 1;
+ return 0;
}
-
- return 0;
+ return -ENOSPC;
}
-
#define PCI_BUS(x) (((x) >> 8) & 0xff)
-static int find_device_iter(struct pci_dev *dev, void *data)
+/**
+ * is_error_source - check whether the device is source of reported error
+ * @dev: pointer to pci_dev to be checked
+ * @e_info: pointer to reported error info
+ */
+static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
{
int pos;
- u32 status;
- u32 mask;
+ u32 status, mask;
u16 reg16;
- int result;
- struct aer_err_info *e_info = (struct aer_err_info *)data;
/*
* When bus id is equal to 0, it might be a bad id
* reported by root port.
*/
if (!nosourceid && (PCI_BUS(e_info->id) != 0)) {
- result = compare_device_id(dev, e_info);
- if (result)
- add_error_device(e_info, dev);
+ /* Device ID match? */
+ if (e_info->id == ((dev->bus->number << 8) | dev->devfn))
+ return true;
- /*
- * If there is no multiple error, we stop
- * or continue based on the id comparing.
- */
+ /* Continue id comparing if there is no multiple error */
if (!e_info->multi_error_valid)
- return result;
-
- /*
- * If there are multiple errors and id does match,
- * We need continue to search other devices under
- * the root port. Return 0 means that.
- */
- if (result)
- return 0;
+ return false;
}
/*
@@ -200,71 +146,94 @@ static int find_device_iter(struct pci_dev *dev, void *data)
* 2) bus id is equal to 0. Some ports might lose the bus
* id of error source id;
* 3) There are multiple errors and prior id comparing fails;
- * We check AER status registers to find the initial reporter.
+ * We check AER status registers to find possible reporter.
*/
if (atomic_read(&dev->enable_cnt) == 0)
- return 0;
+ return false;
pos = pci_pcie_cap(dev);
if (!pos)
- return 0;
+ return false;
+
/* Check if AER is enabled */
- pci_read_config_word(dev, pos+PCI_EXP_DEVCTL, &reg16);
+ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16);
if (!(reg16 & (
PCI_EXP_DEVCTL_CERE |
PCI_EXP_DEVCTL_NFERE |
PCI_EXP_DEVCTL_FERE |
PCI_EXP_DEVCTL_URRE)))
- return 0;
+ return false;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
if (!pos)
- return 0;
+ return false;
- status = 0;
- mask = 0;
+ /* Check if error is recorded */
if (e_info->severity == AER_CORRECTABLE) {
pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &mask);
- if (status & ~mask) {
- add_error_device(e_info, dev);
- goto added;
- }
} else {
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
- if (status & ~mask) {
- add_error_device(e_info, dev);
- goto added;
- }
}
+ if (status & ~mask)
+ return true;
- return 0;
+ return false;
+}
-added:
- if (e_info->multi_error_valid)
- return 0;
- else
- return 1;
+static int find_device_iter(struct pci_dev *dev, void *data)
+{
+ struct aer_err_info *e_info = (struct aer_err_info *)data;
+
+ if (is_error_source(dev, e_info)) {
+ /* List this device */
+ if (add_error_device(e_info, dev)) {
+ /* We cannot handle more... Stop iteration */
+ /* TODO: Should print error message here? */
+ return 1;
+ }
+
+ /* If there is only a single error, stop iteration */
+ if (!e_info->multi_error_valid)
+ return 1;
+ }
+ return 0;
}
/**
* find_source_device - search through device hierarchy for source device
* @parent: pointer to Root Port pci_dev data structure
- * @err_info: including detailed error information such like id
+ * @e_info: including detailed error information such like id
*
- * Invoked when error is detected at the Root Port.
+ * Return true if found.
+ *
+ * Invoked by DPC when error is detected at the Root Port.
+ * Caller of this function must set id, severity, and multi_error_valid of
+ * struct aer_err_info pointed by @e_info properly. This function must fill
+ * e_info->error_dev_num and e_info->dev[], based on the given information.
*/
-static void find_source_device(struct pci_dev *parent,
+static bool find_source_device(struct pci_dev *parent,
struct aer_err_info *e_info)
{
struct pci_dev *dev = parent;
int result;
+ /* Must reset in this function */
+ e_info->error_dev_num = 0;
+
/* Is Root Port an agent that sends error message? */
result = find_device_iter(dev, e_info);
if (result)
- return;
+ return true;
pci_walk_bus(parent->subordinate, find_device_iter, e_info);
+
+ if (!e_info->error_dev_num) {
+ dev_printk(KERN_DEBUG, &parent->dev,
+ "can't find device of ID%04x\n",
+ e_info->id);
+ return false;
+ }
+ return true;
}
static int report_error_detected(struct pci_dev *dev, void *data)
@@ -403,43 +372,77 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
return result_data.result;
}
-struct find_aer_service_data {
- struct pcie_port_service_driver *aer_driver;
- int is_downstream;
-};
-
-static int find_aer_service_iter(struct device *device, void *data)
+/**
+ * aer_do_secondary_bus_reset - perform secondary bus reset
+ * @dev: pointer to bridge's pci_dev data structure
+ *
+ * Invoked when performing link reset at Root Port or Downstream Port.
+ */
+void aer_do_secondary_bus_reset(struct pci_dev *dev)
{
- struct device_driver *driver;
- struct pcie_port_service_driver *service_driver;
- struct find_aer_service_data *result;
+ u16 p2p_ctrl;
+
+ /* Assert Secondary Bus Reset */
+ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &p2p_ctrl);
+ p2p_ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
+
+ /*
+ * we should send hot reset message for 2ms to allow it time to
+ * propagate to all downstream ports
+ */
+ msleep(2);
+
+ /* De-assert Secondary Bus Reset */
+ p2p_ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
+
+ /*
+ * System software must wait for at least 100ms from the end
+ * of a reset of one or more device before it is permitted
+ * to issue Configuration Requests to those devices.
+ */
+ msleep(200);
+}
- result = (struct find_aer_service_data *) data;
+/**
+ * default_downstream_reset_link - default reset function for Downstream Port
+ * @dev: pointer to downstream port's pci_dev data structure
+ *
+ * Invoked when performing link reset at Downstream Port w/ no aer driver.
+ */
+static pci_ers_result_t default_downstream_reset_link(struct pci_dev *dev)
+{
+ aer_do_secondary_bus_reset(dev);
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "Downstream Port link has been reset\n");
+ return PCI_ERS_RESULT_RECOVERED;
+}
- if (device->bus == &pcie_port_bus_type) {
- struct pcie_device *pcie = to_pcie_device(device);
+static int find_aer_service_iter(struct device *device, void *data)
+{
+ struct pcie_port_service_driver *service_driver, **drv;
- if (pcie->port->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)
- result->is_downstream = 1;
+ drv = (struct pcie_port_service_driver **) data;
- driver = device->driver;
- if (driver) {
- service_driver = to_service_driver(driver);
- if (service_driver->service == PCIE_PORT_SERVICE_AER) {
- result->aer_driver = service_driver;
- return 1;
- }
+ if (device->bus == &pcie_port_bus_type && device->driver) {
+ service_driver = to_service_driver(device->driver);
+ if (service_driver->service == PCIE_PORT_SERVICE_AER) {
+ *drv = service_driver;
+ return 1;
}
}
return 0;
}
-static void find_aer_service(struct pci_dev *dev,
- struct find_aer_service_data *data)
+static struct pcie_port_service_driver *find_aer_service(struct pci_dev *dev)
{
- int retval;
- retval = device_for_each_child(&dev->dev, data, find_aer_service_iter);
+ struct pcie_port_service_driver *drv = NULL;
+
+ device_for_each_child(&dev->dev, &drv, find_aer_service_iter);
+
+ return drv;
}
static pci_ers_result_t reset_link(struct pcie_device *aerdev,
@@ -447,38 +450,34 @@ static pci_ers_result_t reset_link(struct pcie_device *aerdev,
{
struct pci_dev *udev;
pci_ers_result_t status;
- struct find_aer_service_data data;
+ struct pcie_port_service_driver *driver;
- if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)
+ if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) {
+ /* Reset this port for all subordinates */
udev = dev;
- else
+ } else {
+ /* Reset the upstream component (likely downstream port) */
udev = dev->bus->self;
+ }
- data.is_downstream = 0;
- data.aer_driver = NULL;
- find_aer_service(udev, &data);
+ /* Use the aer driver of the component firstly */
+ driver = find_aer_service(udev);
- /*
- * Use the aer driver of the error agent firstly.
- * If it hasn't the aer driver, use the root port's
- */
- if (!data.aer_driver || !data.aer_driver->reset_link) {
- if (data.is_downstream &&
- aerdev->device.driver &&
- to_service_driver(aerdev->device.driver)->reset_link) {
- data.aer_driver =
- to_service_driver(aerdev->device.driver);
- } else {
- dev_printk(KERN_DEBUG, &dev->dev, "no link-reset "
- "support\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
+ if (driver && driver->reset_link) {
+ status = driver->reset_link(udev);
+ } else if (udev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) {
+ status = default_downstream_reset_link(udev);
+ } else {
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "no link-reset support at upstream device %s\n",
+ pci_name(udev));
+ return PCI_ERS_RESULT_DISCONNECT;
}
- status = data.aer_driver->reset_link(udev);
if (status != PCI_ERS_RESULT_RECOVERED) {
- dev_printk(KERN_DEBUG, &dev->dev, "link reset at upstream "
- "device %s failed\n", pci_name(udev));
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "link reset at upstream device %s failed\n",
+ pci_name(udev));
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -495,8 +494,7 @@ static pci_ers_result_t reset_link(struct pcie_device *aerdev,
* error detected message to all downstream drivers within a hierarchy in
* question and return the returned code.
*/
-static pci_ers_result_t do_recovery(struct pcie_device *aerdev,
- struct pci_dev *dev,
+static void do_recovery(struct pcie_device *aerdev, struct pci_dev *dev,
int severity)
{
pci_ers_result_t status, result = PCI_ERS_RESULT_RECOVERED;
@@ -514,10 +512,8 @@ static pci_ers_result_t do_recovery(struct pcie_device *aerdev,
if (severity == AER_FATAL) {
result = reset_link(aerdev, dev);
- if (result != PCI_ERS_RESULT_RECOVERED) {
- /* TODO: Should panic here? */
- return result;
- }
+ if (result != PCI_ERS_RESULT_RECOVERED)
+ goto failed;
}
if (status == PCI_ERS_RESULT_CAN_RECOVER)
@@ -538,13 +534,22 @@ static pci_ers_result_t do_recovery(struct pcie_device *aerdev,
report_slot_reset);
}
- if (status == PCI_ERS_RESULT_RECOVERED)
- broadcast_error_message(dev,
+ if (status != PCI_ERS_RESULT_RECOVERED)
+ goto failed;
+
+ broadcast_error_message(dev,
state,
"resume",
report_resume);
- return status;
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "AER driver successfully recovered\n");
+ return;
+
+failed:
+ /* TODO: Should kernel panic here? */
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "AER driver didn't recover\n");
}
/**
@@ -559,7 +564,6 @@ static void handle_error_source(struct pcie_device *aerdev,
struct pci_dev *dev,
struct aer_err_info *info)
{
- pci_ers_result_t status = 0;
int pos;
if (info->severity == AER_CORRECTABLE) {
@@ -571,114 +575,8 @@ static void handle_error_source(struct pcie_device *aerdev,
if (pos)
pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
info->status);
- } else {
- status = do_recovery(aerdev, dev, info->severity);
- if (status == PCI_ERS_RESULT_RECOVERED) {
- dev_printk(KERN_DEBUG, &dev->dev, "AER driver "
- "successfully recovered\n");
- } else {
- /* TODO: Should kernel panic here? */
- dev_printk(KERN_DEBUG, &dev->dev, "AER driver didn't "
- "recover\n");
- }
- }
-}
-
-/**
- * aer_enable_rootport - enable Root Port's interrupts when receiving messages
- * @rpc: pointer to a Root Port data structure
- *
- * Invoked when PCIe bus loads AER service driver.
- */
-void aer_enable_rootport(struct aer_rpc *rpc)
-{
- struct pci_dev *pdev = rpc->rpd->port;
- int pos, aer_pos;
- u16 reg16;
- u32 reg32;
-
- pos = pci_pcie_cap(pdev);
- /* Clear PCIe Capability's Device Status */
- pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, &reg16);
- pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16);
-
- /* Disable system error generation in response to error messages */
- pci_read_config_word(pdev, pos + PCI_EXP_RTCTL, &reg16);
- reg16 &= ~(SYSTEM_ERROR_INTR_ON_MESG_MASK);
- pci_write_config_word(pdev, pos + PCI_EXP_RTCTL, reg16);
-
- aer_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
- /* Clear error status */
- pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, &reg32);
- pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32);
- pci_read_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, &reg32);
- pci_write_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, reg32);
- pci_read_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, &reg32);
- pci_write_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, reg32);
-
- /*
- * Enable error reporting for the root port device and downstream port
- * devices.
- */
- set_downstream_devices_error_reporting(pdev, true);
-
- /* Enable Root Port's interrupt in response to error messages */
- pci_write_config_dword(pdev,
- aer_pos + PCI_ERR_ROOT_COMMAND,
- ROOT_PORT_INTR_ON_MESG_MASK);
-}
-
-/**
- * disable_root_aer - disable Root Port's interrupts when receiving messages
- * @rpc: pointer to a Root Port data structure
- *
- * Invoked when PCIe bus unloads AER service driver.
- */
-static void disable_root_aer(struct aer_rpc *rpc)
-{
- struct pci_dev *pdev = rpc->rpd->port;
- u32 reg32;
- int pos;
-
- /*
- * Disable error reporting for the root port device and downstream port
- * devices.
- */
- set_downstream_devices_error_reporting(pdev, false);
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
- /* Disable Root's interrupt in response to error messages */
- pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, 0);
-
- /* Clear Root's error status reg */
- pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, &reg32);
- pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, reg32);
-}
-
-/**
- * get_e_source - retrieve an error source
- * @rpc: pointer to the root port which holds an error
- *
- * Invoked by DPC handler to consume an error.
- */
-static struct aer_err_source *get_e_source(struct aer_rpc *rpc)
-{
- struct aer_err_source *e_source;
- unsigned long flags;
-
- /* Lock access to Root error producer/consumer index */
- spin_lock_irqsave(&rpc->e_lock, flags);
- if (rpc->prod_idx == rpc->cons_idx) {
- spin_unlock_irqrestore(&rpc->e_lock, flags);
- return NULL;
- }
- e_source = &rpc->e_sources[rpc->cons_idx];
- rpc->cons_idx++;
- if (rpc->cons_idx == AER_ERROR_SOURCES_MAX)
- rpc->cons_idx = 0;
- spin_unlock_irqrestore(&rpc->e_lock, flags);
-
- return e_source;
+ } else
+ do_recovery(aerdev, dev, info->severity);
}
/**
@@ -687,11 +585,14 @@ static struct aer_err_source *get_e_source(struct aer_rpc *rpc)
* @info: pointer to structure to store the error record
*
* Return 1 on success, 0 on error.
+ *
+ * Note that @info is reused among all error devices. Clear fields properly.
*/
static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
{
int pos, temp;
+ /* Must reset in this function */
info->status = 0;
info->tlp_header_valid = 0;
@@ -744,12 +645,6 @@ static inline void aer_process_err_devices(struct pcie_device *p_device,
{
int i;
- if (!e_info->dev[0]) {
- dev_printk(KERN_DEBUG, &p_device->port->dev,
- "can't find device of ID%04x\n",
- e_info->id);
- }
-
/* Report all before handle them, not to lost records by reset etc. */
for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
if (get_device_error_info(e_info->dev[i], e_info))
@@ -770,11 +665,10 @@ static void aer_isr_one_error(struct pcie_device *p_device,
struct aer_err_source *e_src)
{
struct aer_err_info *e_info;
- int i;
/* struct aer_err_info might be big, so we allocate it with slab */
e_info = kmalloc(sizeof(struct aer_err_info), GFP_KERNEL);
- if (e_info == NULL) {
+ if (!e_info) {
dev_printk(KERN_DEBUG, &p_device->port->dev,
"Can't allocate mem when processing AER errors\n");
return;
@@ -784,37 +678,72 @@ static void aer_isr_one_error(struct pcie_device *p_device,
* There is a possibility that both correctable error and
* uncorrectable error being logged. Report correctable error first.
*/
- for (i = 1; i & ROOT_ERR_STATUS_MASKS ; i <<= 2) {
- if (i > 4)
- break;
- if (!(e_src->status & i))
- continue;
-
- memset(e_info, 0, sizeof(struct aer_err_info));
-
- /* Init comprehensive error information */
- if (i & PCI_ERR_ROOT_COR_RCV) {
- e_info->id = ERR_COR_ID(e_src->id);
- e_info->severity = AER_CORRECTABLE;
- } else {
- e_info->id = ERR_UNCOR_ID(e_src->id);
- e_info->severity = ((e_src->status >> 6) & 1);
- }
- if (e_src->status &
- (PCI_ERR_ROOT_MULTI_COR_RCV |
- PCI_ERR_ROOT_MULTI_UNCOR_RCV))
+ if (e_src->status & PCI_ERR_ROOT_COR_RCV) {
+ e_info->id = ERR_COR_ID(e_src->id);
+ e_info->severity = AER_CORRECTABLE;
+
+ if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV)
e_info->multi_error_valid = 1;
+ else
+ e_info->multi_error_valid = 0;
+
+ aer_print_port_info(p_device->port, e_info);
+
+ if (find_source_device(p_device->port, e_info))
+ aer_process_err_devices(p_device, e_info);
+ }
+
+ if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
+ e_info->id = ERR_UNCOR_ID(e_src->id);
+
+ if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
+ e_info->severity = AER_FATAL;
+ else
+ e_info->severity = AER_NONFATAL;
+
+ if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV)
+ e_info->multi_error_valid = 1;
+ else
+ e_info->multi_error_valid = 0;
aer_print_port_info(p_device->port, e_info);
- find_source_device(p_device->port, e_info);
- aer_process_err_devices(p_device, e_info);
+ if (find_source_device(p_device->port, e_info))
+ aer_process_err_devices(p_device, e_info);
}
kfree(e_info);
}
/**
+ * get_e_source - retrieve an error source
+ * @rpc: pointer to the root port which holds an error
+ * @e_src: pointer to store retrieved error source
+ *
+ * Return 1 if an error source is retrieved, otherwise 0.
+ *
+ * Invoked by DPC handler to consume an error.
+ */
+static int get_e_source(struct aer_rpc *rpc, struct aer_err_source *e_src)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ /* Lock access to Root error producer/consumer index */
+ spin_lock_irqsave(&rpc->e_lock, flags);
+ if (rpc->prod_idx != rpc->cons_idx) {
+ *e_src = rpc->e_sources[rpc->cons_idx];
+ rpc->cons_idx++;
+ if (rpc->cons_idx == AER_ERROR_SOURCES_MAX)
+ rpc->cons_idx = 0;
+ ret = 1;
+ }
+ spin_unlock_irqrestore(&rpc->e_lock, flags);
+
+ return ret;
+}
+
+/**
* aer_isr - consume errors detected by root port
* @work: definition of this work item
*
@@ -824,34 +753,17 @@ void aer_isr(struct work_struct *work)
{
struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
struct pcie_device *p_device = rpc->rpd;
- struct aer_err_source *e_src;
+ struct aer_err_source e_src;
mutex_lock(&rpc->rpc_mutex);
- e_src = get_e_source(rpc);
- while (e_src) {
- aer_isr_one_error(p_device, e_src);
- e_src = get_e_source(rpc);
- }
+ while (get_e_source(rpc, &e_src))
+ aer_isr_one_error(p_device, &e_src);
mutex_unlock(&rpc->rpc_mutex);
wake_up(&rpc->wait_release);
}
/**
- * aer_delete_rootport - disable root port aer and delete service data
- * @rpc: pointer to a root port device being deleted
- *
- * Invoked when AER service unloaded on a specific Root Port
- */
-void aer_delete_rootport(struct aer_rpc *rpc)
-{
- /* Disable root port AER itself */
- disable_root_aer(rpc);
-
- kfree(rpc);
-}
-
-/**
* aer_init - provide AER initialization
* @dev: pointer to AER pcie device
*
@@ -859,7 +771,7 @@ void aer_delete_rootport(struct aer_rpc *rpc)
*/
int aer_init(struct pcie_device *dev)
{
- if (dev->port->aer_firmware_first) {
+ if (pcie_aer_get_firmware_first(dev->port)) {
dev_printk(KERN_DEBUG, &dev->device,
"PCIe errors handled by platform firmware.\n");
goto out;
@@ -873,7 +785,7 @@ out:
if (forceload) {
dev_printk(KERN_DEBUG, &dev->device,
"aerdrv forceload requested.\n");
- dev->port->aer_firmware_first = 0;
+ pcie_aer_force_firmware_first(dev->port, 0);
return 0;
}
return -ENXIO;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index c82548afcd5..f4adba2d1dd 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -10,7 +10,6 @@
#include <linux/module.h>
#include <linux/cpumask.h>
#include <linux/pci-aspm.h>
-#include <acpi/acpi_hest.h>
#include "pci.h"
#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
@@ -904,12 +903,6 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev)
pdev->is_hotplug_bridge = 1;
}
-static void set_pci_aer_firmware_first(struct pci_dev *pdev)
-{
- if (acpi_hest_firmware_first_pci(pdev))
- pdev->aer_firmware_first = 1;
-}
-
#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
/**
@@ -939,7 +932,6 @@ int pci_setup_device(struct pci_dev *dev)
dev->multifunction = !!(hdr_type & 0x80);
dev->error_state = pci_channel_io_normal;
set_pcie_port_type(dev);
- set_pci_aer_firmware_first(dev);
list_for_each_entry(slot, &dev->bus->slots, list)
if (PCI_SLOT(dev->devfn) == slot->number)
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 27c0e6eb713..b7512cf08c5 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2127,6 +2127,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9602, quirk_disable_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASUSTEK, 0x9602, quirk_disable_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AI, 0x9602, quirk_disable_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi);
/* Go through the list of Hypertransport capabilities and
* return 1 if a HT MSI capability is found and enabled */
@@ -2218,15 +2219,16 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
ht_enable_msi_mapping);
-/* The P5N32-SLI Premium motherboard from Asus has a problem with msi
+/* The P5N32-SLI motherboards from Asus have a problem with msi
* for the MCP55 NIC. It is not yet determined whether the msi problem
* also affects other devices. As for now, turn off msi for this device.
*/
static void __devinit nvenet_msi_disable(struct pci_dev *dev)
{
- if (dmi_name_in_vendors("P5N32-SLI PREMIUM")) {
+ if (dmi_name_in_vendors("P5N32-SLI PREMIUM") ||
+ dmi_name_in_vendors("P5N32-E SLI")) {
dev_info(&dev->dev,
- "Disabling msi for MCP55 NIC on P5N32-SLI Premium\n");
+ "Disabling msi for MCP55 NIC on P5N32-SLI\n");
dev->no_msi = 1;
}
}
@@ -2552,6 +2554,19 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1518, quirk_i82576_sriov);
#endif /* CONFIG_PCI_IOV */
+/* Allow manual resource allocation for PCI hotplug bridges
+ * via pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For
+ * some PCI-PCI hotplug bridges, like PLX 6254 (former HINT HB6),
+ * kernel fails to allocate resources when hotplug device is
+ * inserted and PCI bus is rescanned.
+ */
+static void __devinit quirk_hotplug_bridge(struct pci_dev *dev)
+{
+ dev->is_hotplug_bridge = 1;
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HINT, 0x0020, quirk_hotplug_bridge);
+
/*
* This is a quirk for the Ricoh MMC controller found as a part of
* some mulifunction chips.
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 659eaa0fc48..e0189cf7c55 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -97,6 +97,50 @@ static ssize_t cur_speed_read_file(struct pci_slot *slot, char *buf)
return bus_speed_read(slot->bus->cur_bus_speed, buf);
}
+static void remove_sysfs_files(struct pci_slot *slot)
+{
+ char func[10];
+ struct list_head *tmp;
+
+ list_for_each(tmp, &slot->bus->devices) {
+ struct pci_dev *dev = pci_dev_b(tmp);
+ if (PCI_SLOT(dev->devfn) != slot->number)
+ continue;
+ sysfs_remove_link(&dev->dev.kobj, "slot");
+
+ snprintf(func, 10, "function%d", PCI_FUNC(dev->devfn));
+ sysfs_remove_link(&slot->kobj, func);
+ }
+}
+
+static int create_sysfs_files(struct pci_slot *slot)
+{
+ int result;
+ char func[10];
+ struct list_head *tmp;
+
+ list_for_each(tmp, &slot->bus->devices) {
+ struct pci_dev *dev = pci_dev_b(tmp);
+ if (PCI_SLOT(dev->devfn) != slot->number)
+ continue;
+
+ result = sysfs_create_link(&dev->dev.kobj, &slot->kobj, "slot");
+ if (result)
+ goto fail;
+
+ snprintf(func, 10, "function%d", PCI_FUNC(dev->devfn));
+ result = sysfs_create_link(&slot->kobj, &dev->dev.kobj, func);
+ if (result)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ remove_sysfs_files(slot);
+ return result;
+}
+
static void pci_slot_release(struct kobject *kobj)
{
struct pci_dev *dev;
@@ -109,6 +153,8 @@ static void pci_slot_release(struct kobject *kobj)
if (PCI_SLOT(dev->devfn) == slot->number)
dev->slot = NULL;
+ remove_sysfs_files(slot);
+
list_del(&slot->list);
kfree(slot);
@@ -300,6 +346,8 @@ placeholder:
INIT_LIST_HEAD(&slot->list);
list_add(&slot->list, &parent->slots);
+ create_sysfs_files(slot);
+
list_for_each_entry(dev, &parent->devices, bus_list)
if (PCI_SLOT(dev->devfn) == slot_nr)
dev->slot = slot;
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index d189e4743e6..d0f5ad30607 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -49,26 +49,6 @@ config PCMCIA_LOAD_CIS
If unsure, say Y.
-config PCMCIA_IOCTL
- bool "PCMCIA control ioctl (obsolete)"
- depends on PCMCIA && ARM && !SMP && !PREEMPT
- default y
- help
- If you say Y here, the deprecated ioctl interface to the PCMCIA
- subsystem will be built. It is needed by the deprecated pcmcia-cs
- tools (cardmgr, cardctl) to function properly.
-
- You should use the new pcmciautils package instead (see
- <file:Documentation/Changes> for location and details).
-
- This config option will most likely be removed from kernel 2.6.35,
- the associated code from kernel 2.6.36.
-
- As the PCMCIA ioctl is not locking safe, it depends on !SMP and
- !PREEMPT.
-
- If unsure, say N.
-
config CARDBUS
bool "32-bit CardBus support"
depends on PCI
@@ -234,7 +214,8 @@ config PCMCIA_PXA2XX
depends on ARM && ARCH_PXA && PCMCIA
depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \
|| MACH_ARMCORE || ARCH_PXA_PALM || TRIZEPS_PCMCIA \
- || ARCOM_PCMCIA || ARCH_PXA_ESERIES || MACH_STARGATE2)
+ || ARCOM_PCMCIA || ARCH_PXA_ESERIES || MACH_STARGATE2 \
+ || MACH_VPAC270)
select PCMCIA_SOC_COMMON
help
Say Y here to include support for the PXA2xx PCMCIA controller
@@ -317,7 +298,7 @@ config ELECTRA_CF
PA Semi Electra eval board.
config PCCARD_NONSTATIC
- tristate
+ bool
config PCCARD_IODYN
bool
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 381b031d9d7..d006e8beab9 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -2,15 +2,18 @@
# Makefile for the kernel pcmcia subsystem (c/o David Hinds)
#
-pcmcia_core-y += cs.o rsrc_mgr.o socket_sysfs.o
+pcmcia_core-y += cs.o socket_sysfs.o
pcmcia_core-$(CONFIG_CARDBUS) += cardbus.o
obj-$(CONFIG_PCCARD) += pcmcia_core.o
-pcmcia-y += ds.o pcmcia_resource.o cistpl.o
+pcmcia-y += ds.o pcmcia_resource.o cistpl.o pcmcia_cis.o
pcmcia-$(CONFIG_PCMCIA_IOCTL) += pcmcia_ioctl.o
obj-$(CONFIG_PCMCIA) += pcmcia.o
-obj-$(CONFIG_PCCARD_NONSTATIC) += rsrc_nonstatic.o
+pcmcia_rsrc-y += rsrc_mgr.o
+pcmcia_rsrc-$(CONFIG_PCCARD_NONSTATIC) += rsrc_nonstatic.o
+pcmcia_rsrc-$(CONFIG_PCCARD_IODYN) += rsrc_iodyn.o
+obj-$(CONFIG_PCCARD) += pcmcia_rsrc.o
# socket drivers
@@ -66,6 +69,7 @@ pxa2xx-obj-$(CONFIG_MACH_PALMTC) += pxa2xx_palmtc.o
pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o
pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o
pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o
+pxa2xx-obj-$(CONFIG_MACH_VPAC270) += pxa2xx_vpac270.o
obj-$(CONFIG_PCMCIA_PXA2XX) += pxa2xx_base.o $(pxa2xx-obj-y)
diff --git a/drivers/pcmcia/bfin_cf_pcmcia.c b/drivers/pcmcia/bfin_cf_pcmcia.c
index 9e84d039de4..eae9cbe37a3 100644
--- a/drivers/pcmcia/bfin_cf_pcmcia.c
+++ b/drivers/pcmcia/bfin_cf_pcmcia.c
@@ -113,7 +113,7 @@ static int bfin_cf_get_status(struct pcmcia_socket *s, u_int *sp)
if (bfin_cf_present(cf->cd_pfx)) {
*sp = SS_READY | SS_DETECT | SS_POWERON | SS_3VCARD;
- s->irq.AssignedIRQ = 0;
+ s->pcmcia_irq = 0;
s->pci_irq = cf->irq;
} else
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index e6ab2a47d8c..9a58862f140 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -94,7 +94,6 @@ int __ref cb_alloc(struct pcmcia_socket *s)
pci_enable_bridges(bus);
pci_bus_add_devices(bus);
- s->irq.AssignedIRQ = s->pci_irq;
return 0;
}
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 854959cada3..8844bc3e311 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -129,6 +129,8 @@ static void __iomem *set_cis_map(struct pcmcia_socket *s,
/**
* pcmcia_read_cis_mem() - low-level function to read CIS memory
+ *
+ * must be called with ops_mutex held
*/
int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
u_int len, void *ptr)
@@ -138,7 +140,6 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
dev_dbg(&s->dev, "pcmcia_read_cis_mem(%d, %#x, %u)\n", attr, addr, len);
- mutex_lock(&s->ops_mutex);
if (attr & IS_INDIRECT) {
/* Indirect accesses use a bunch of special registers at fixed
locations in common memory */
@@ -153,7 +154,6 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
if (!sys) {
dev_dbg(&s->dev, "could not map memory\n");
memset(ptr, 0xff, len);
- mutex_unlock(&s->ops_mutex);
return -1;
}
@@ -184,7 +184,6 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
if (!sys) {
dev_dbg(&s->dev, "could not map memory\n");
memset(ptr, 0xff, len);
- mutex_unlock(&s->ops_mutex);
return -1;
}
end = sys + s->map_size;
@@ -198,7 +197,6 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
addr = 0;
}
}
- mutex_unlock(&s->ops_mutex);
dev_dbg(&s->dev, " %#2.2x %#2.2x %#2.2x %#2.2x ...\n",
*(u_char *)(ptr+0), *(u_char *)(ptr+1),
*(u_char *)(ptr+2), *(u_char *)(ptr+3));
@@ -209,7 +207,8 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
/**
* pcmcia_write_cis_mem() - low-level function to write CIS memory
*
- * Probably only useful for writing one-byte registers.
+ * Probably only useful for writing one-byte registers. Must be called
+ * with ops_mutex held.
*/
void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
u_int len, void *ptr)
@@ -220,7 +219,6 @@ void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
dev_dbg(&s->dev,
"pcmcia_write_cis_mem(%d, %#x, %u)\n", attr, addr, len);
- mutex_lock(&s->ops_mutex);
if (attr & IS_INDIRECT) {
/* Indirect accesses use a bunch of special registers at fixed
locations in common memory */
@@ -234,7 +232,6 @@ void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
((cis_width) ? MAP_16BIT : 0));
if (!sys) {
dev_dbg(&s->dev, "could not map memory\n");
- mutex_unlock(&s->ops_mutex);
return; /* FIXME: Error */
}
@@ -260,7 +257,6 @@ void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
sys = set_cis_map(s, card_offset, flags);
if (!sys) {
dev_dbg(&s->dev, "could not map memory\n");
- mutex_unlock(&s->ops_mutex);
return; /* FIXME: error */
}
@@ -275,7 +271,6 @@ void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
addr = 0;
}
}
- mutex_unlock(&s->ops_mutex);
}
@@ -314,7 +309,6 @@ static int read_cis_cache(struct pcmcia_socket *s, int attr, u_int addr,
return 0;
}
}
- mutex_unlock(&s->ops_mutex);
ret = pcmcia_read_cis_mem(s, attr, addr, len, ptr);
@@ -326,11 +320,11 @@ static int read_cis_cache(struct pcmcia_socket *s, int attr, u_int addr,
cis->len = len;
cis->attr = attr;
memcpy(cis->cache, ptr, len);
- mutex_lock(&s->ops_mutex);
list_add(&cis->node, &s->cis_cache);
- mutex_unlock(&s->ops_mutex);
}
}
+ mutex_unlock(&s->ops_mutex);
+
return ret;
}
@@ -386,6 +380,7 @@ int verify_cis_cache(struct pcmcia_socket *s)
"no memory for verifying CIS\n");
return -ENOMEM;
}
+ mutex_lock(&s->ops_mutex);
list_for_each_entry(cis, &s->cis_cache, node) {
int len = cis->len;
@@ -395,10 +390,12 @@ int verify_cis_cache(struct pcmcia_socket *s)
ret = pcmcia_read_cis_mem(s, cis->attr, cis->addr, len, buf);
if (ret || memcmp(buf, cis->cache, len) != 0) {
kfree(buf);
+ mutex_unlock(&s->ops_mutex);
return -1;
}
}
kfree(buf);
+ mutex_unlock(&s->ops_mutex);
return 0;
}
@@ -1362,106 +1359,6 @@ EXPORT_SYMBOL(pcmcia_parse_tuple);
/**
- * pccard_read_tuple() - internal CIS tuple access
- * @s: the struct pcmcia_socket where the card is inserted
- * @function: the device function we loop for
- * @code: which CIS code shall we look for?
- * @parse: buffer where the tuple shall be parsed (or NULL, if no parse)
- *
- * pccard_read_tuple() reads out one tuple and attempts to parse it
- */
-int pccard_read_tuple(struct pcmcia_socket *s, unsigned int function,
- cisdata_t code, void *parse)
-{
- tuple_t tuple;
- cisdata_t *buf;
- int ret;
-
- buf = kmalloc(256, GFP_KERNEL);
- if (buf == NULL) {
- dev_printk(KERN_WARNING, &s->dev, "no memory to read tuple\n");
- return -ENOMEM;
- }
- tuple.DesiredTuple = code;
- tuple.Attributes = 0;
- if (function == BIND_FN_ALL)
- tuple.Attributes = TUPLE_RETURN_COMMON;
- ret = pccard_get_first_tuple(s, function, &tuple);
- if (ret != 0)
- goto done;
- tuple.TupleData = buf;
- tuple.TupleOffset = 0;
- tuple.TupleDataMax = 255;
- ret = pccard_get_tuple_data(s, &tuple);
- if (ret != 0)
- goto done;
- ret = pcmcia_parse_tuple(&tuple, parse);
-done:
- kfree(buf);
- return ret;
-}
-
-
-/**
- * pccard_loop_tuple() - loop over tuples in the CIS
- * @s: the struct pcmcia_socket where the card is inserted
- * @function: the device function we loop for
- * @code: which CIS code shall we look for?
- * @parse: buffer where the tuple shall be parsed (or NULL, if no parse)
- * @priv_data: private data to be passed to the loop_tuple function.
- * @loop_tuple: function to call for each CIS entry of type @function. IT
- * gets passed the raw tuple, the paresed tuple (if @parse is
- * set) and @priv_data.
- *
- * pccard_loop_tuple() loops over all CIS entries of type @function, and
- * calls the @loop_tuple function for each entry. If the call to @loop_tuple
- * returns 0, the loop exits. Returns 0 on success or errorcode otherwise.
- */
-int pccard_loop_tuple(struct pcmcia_socket *s, unsigned int function,
- cisdata_t code, cisparse_t *parse, void *priv_data,
- int (*loop_tuple) (tuple_t *tuple,
- cisparse_t *parse,
- void *priv_data))
-{
- tuple_t tuple;
- cisdata_t *buf;
- int ret;
-
- buf = kzalloc(256, GFP_KERNEL);
- if (buf == NULL) {
- dev_printk(KERN_WARNING, &s->dev, "no memory to read tuple\n");
- return -ENOMEM;
- }
-
- tuple.TupleData = buf;
- tuple.TupleDataMax = 255;
- tuple.TupleOffset = 0;
- tuple.DesiredTuple = code;
- tuple.Attributes = 0;
-
- ret = pccard_get_first_tuple(s, function, &tuple);
- while (!ret) {
- if (pccard_get_tuple_data(s, &tuple))
- goto next_entry;
-
- if (parse)
- if (pcmcia_parse_tuple(&tuple, parse))
- goto next_entry;
-
- ret = loop_tuple(&tuple, parse, priv_data);
- if (!ret)
- break;
-
-next_entry:
- ret = pccard_get_next_tuple(s, function, &tuple);
- }
-
- kfree(buf);
- return ret;
-}
-
-
-/**
* pccard_validate_cis() - check whether card has a sensible CIS
* @s: the struct pcmcia_socket we are to check
* @info: returns the number of tuples in the (valid) CIS, or 0
@@ -1634,7 +1531,7 @@ static ssize_t pccard_extract_cis(struct pcmcia_socket *s, char *buf,
}
-static ssize_t pccard_show_cis(struct kobject *kobj,
+static ssize_t pccard_show_cis(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -1665,7 +1562,7 @@ static ssize_t pccard_show_cis(struct kobject *kobj,
}
-static ssize_t pccard_store_cis(struct kobject *kobj,
+static ssize_t pccard_store_cis(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index c3383750e33..976d80706ea 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -337,7 +337,6 @@ static void socket_shutdown(struct pcmcia_socket *s)
s->socket = dead_socket;
s->ops->init(s);
s->ops->set_socket(s, &s->socket);
- s->irq.AssignedIRQ = s->irq.Config = 0;
s->lock_count = 0;
kfree(s->fake_cis);
s->fake_cis = NULL;
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index f95864c2191..4126a75445e 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -52,13 +52,11 @@ struct cis_cache_entry {
struct pccard_resource_ops {
int (*validate_mem) (struct pcmcia_socket *s);
- int (*adjust_io_region) (struct resource *res,
- unsigned long r_start,
- unsigned long r_end,
- struct pcmcia_socket *s);
- struct resource* (*find_io) (unsigned long base, int num,
- unsigned long align,
- struct pcmcia_socket *s);
+ int (*find_io) (struct pcmcia_socket *s,
+ unsigned int attr,
+ unsigned int *base,
+ unsigned int num,
+ unsigned int align);
struct resource* (*find_mem) (unsigned long base, unsigned long num,
unsigned long align, int low,
struct pcmcia_socket *s);
@@ -89,6 +87,14 @@ struct pccard_resource_ops {
/*
+ * Stuff internal to module "pcmcia_rsrc":
+ */
+extern int static_init(struct pcmcia_socket *s);
+extern struct resource *pcmcia_make_resource(unsigned long start,
+ unsigned long end,
+ int flags, const char *name);
+
+/*
* Stuff internal to module "pcmcia_core":
*/
@@ -149,6 +155,8 @@ extern struct resource *pcmcia_find_mem_region(u_long base,
int low,
struct pcmcia_socket *s);
+void pcmcia_cleanup_irq(struct pcmcia_socket *s);
+int pcmcia_setup_irq(struct pcmcia_device *p_dev);
/* cistpl.c */
extern struct bin_attribute pccard_cis_attr;
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 041eee43fd8..7ef7adee5e4 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -371,8 +371,6 @@ static int pcmcia_device_remove(struct device *dev)
if (p_drv->remove)
p_drv->remove(p_dev);
- p_dev->dev_node = NULL;
-
/* check for proper unloading */
if (p_dev->_irq || p_dev->_io || p_dev->_locked)
dev_printk(KERN_INFO, dev,
@@ -479,15 +477,6 @@ static int pcmcia_device_query(struct pcmcia_device *p_dev)
}
-/* device_add_lock is needed to avoid double registration by cardmgr and kernel.
- * Serializes pcmcia_device_add; will most likely be removed in future.
- *
- * While it has the caveat that adding new PCMCIA devices inside(!) device_register()
- * won't work, this doesn't matter much at the moment: the driver core doesn't
- * support it either.
- */
-static DEFINE_MUTEX(device_add_lock);
-
struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int function)
{
struct pcmcia_device *p_dev, *tmp_dev;
@@ -497,8 +486,6 @@ struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int fu
if (!s)
return NULL;
- mutex_lock(&device_add_lock);
-
pr_debug("adding device to %d, function %d\n", s->sock, function);
p_dev = kzalloc(sizeof(struct pcmcia_device), GFP_KERNEL);
@@ -538,8 +525,8 @@ struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int fu
/*
* p_dev->function_config must be the same for all card functions.
- * Note that this is serialized by the device_add_lock, so that
- * only one such struct will be created.
+ * Note that this is serialized by ops_mutex, so that only one
+ * such struct will be created.
*/
list_for_each_entry(tmp_dev, &s->devices_list, socket_device_list)
if (p_dev->func == tmp_dev->func) {
@@ -552,28 +539,31 @@ struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int fu
/* Add to the list in pcmcia_bus_socket */
list_add(&p_dev->socket_device_list, &s->devices_list);
- mutex_unlock(&s->ops_mutex);
+ if (pcmcia_setup_irq(p_dev))
+ dev_warn(&p_dev->dev,
+ "IRQ setup failed -- device might not work\n");
if (!p_dev->function_config) {
dev_dbg(&p_dev->dev, "creating config_t\n");
p_dev->function_config = kzalloc(sizeof(struct config_t),
GFP_KERNEL);
- if (!p_dev->function_config)
+ if (!p_dev->function_config) {
+ mutex_unlock(&s->ops_mutex);
goto err_unreg;
+ }
kref_init(&p_dev->function_config->ref);
}
+ mutex_unlock(&s->ops_mutex);
dev_printk(KERN_NOTICE, &p_dev->dev,
- "pcmcia: registering new device %s\n",
- p_dev->devname);
+ "pcmcia: registering new device %s (IRQ: %d)\n",
+ p_dev->devname, p_dev->irq);
pcmcia_device_query(p_dev);
if (device_register(&p_dev->dev))
goto err_unreg;
- mutex_unlock(&device_add_lock);
-
return p_dev;
err_unreg:
@@ -591,7 +581,6 @@ struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int fu
kfree(p_dev->devname);
kfree(p_dev);
err_put:
- mutex_unlock(&device_add_lock);
pcmcia_put_socket(s);
return NULL;
@@ -1258,6 +1247,7 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority)
handle_event(skt, event);
mutex_lock(&s->ops_mutex);
destroy_cis_cache(s);
+ pcmcia_cleanup_irq(s);
mutex_unlock(&s->ops_mutex);
break;
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index 2e59fe947d2..f94d8281cfb 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -185,7 +185,7 @@ static int __devinit electra_cf_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
struct device *device = &ofdev->dev;
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct electra_cf_socket *cf;
struct resource mem, io;
int status;
@@ -357,8 +357,11 @@ static const struct of_device_id electra_cf_match[] = {
MODULE_DEVICE_TABLE(of, electra_cf_match);
static struct of_platform_driver electra_cf_driver = {
- .name = (char *)driver_name,
- .match_table = electra_cf_match,
+ .driver = {
+ .name = (char *)driver_name,
+ .owner = THIS_MODULE,
+ .of_match_table = electra_cf_match,
+ },
.probe = electra_cf_probe,
.remove = electra_cf_remove,
};
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index 41cc954a5ff..1a648b90b63 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -1298,8 +1298,11 @@ static const struct of_device_id m8xx_pcmcia_match[] = {
MODULE_DEVICE_TABLE(of, m8xx_pcmcia_match);
static struct of_platform_driver m8xx_pcmcia_driver = {
- .name = driver_name,
- .match_table = m8xx_pcmcia_match,
+ .driver = {
+ .name = driver_name,
+ .owner = THIS_MODULE,
+ .match_table = m8xx_pcmcia_match,
+ },
.probe = m8xx_probe,
.remove = m8xx_remove,
};
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index a7cfc7964c7..0ad06a3bd56 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -117,7 +117,7 @@ static int omap_cf_get_status(struct pcmcia_socket *s, u_int *sp)
*sp = SS_READY | SS_DETECT | SS_POWERON | SS_3VCARD;
cf = container_of(s, struct omap_cf_socket, socket);
- s->irq.AssignedIRQ = 0;
+ s->pcmcia_irq = 0;
s->pci_irq = cf->irq;
} else
*sp = 0;
diff --git a/drivers/pcmcia/pcmcia_cis.c b/drivers/pcmcia/pcmcia_cis.c
new file mode 100644
index 00000000000..4a65eaf96b0
--- /dev/null
+++ b/drivers/pcmcia/pcmcia_cis.c
@@ -0,0 +1,356 @@
+/*
+ * PCMCIA high-level CIS access functions
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Copyright (C) 1999 David A. Hinds
+ * Copyright (C) 2004-2009 Dominik Brodowski
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ss.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/ds.h>
+#include "cs_internal.h"
+
+
+/**
+ * pccard_read_tuple() - internal CIS tuple access
+ * @s: the struct pcmcia_socket where the card is inserted
+ * @function: the device function we loop for
+ * @code: which CIS code shall we look for?
+ * @parse: buffer where the tuple shall be parsed (or NULL, if no parse)
+ *
+ * pccard_read_tuple() reads out one tuple and attempts to parse it
+ */
+int pccard_read_tuple(struct pcmcia_socket *s, unsigned int function,
+ cisdata_t code, void *parse)
+{
+ tuple_t tuple;
+ cisdata_t *buf;
+ int ret;
+
+ buf = kmalloc(256, GFP_KERNEL);
+ if (buf == NULL) {
+ dev_printk(KERN_WARNING, &s->dev, "no memory to read tuple\n");
+ return -ENOMEM;
+ }
+ tuple.DesiredTuple = code;
+ tuple.Attributes = 0;
+ if (function == BIND_FN_ALL)
+ tuple.Attributes = TUPLE_RETURN_COMMON;
+ ret = pccard_get_first_tuple(s, function, &tuple);
+ if (ret != 0)
+ goto done;
+ tuple.TupleData = buf;
+ tuple.TupleOffset = 0;
+ tuple.TupleDataMax = 255;
+ ret = pccard_get_tuple_data(s, &tuple);
+ if (ret != 0)
+ goto done;
+ ret = pcmcia_parse_tuple(&tuple, parse);
+done:
+ kfree(buf);
+ return ret;
+}
+
+
+/**
+ * pccard_loop_tuple() - loop over tuples in the CIS
+ * @s: the struct pcmcia_socket where the card is inserted
+ * @function: the device function we loop for
+ * @code: which CIS code shall we look for?
+ * @parse: buffer where the tuple shall be parsed (or NULL, if no parse)
+ * @priv_data: private data to be passed to the loop_tuple function.
+ * @loop_tuple: function to call for each CIS entry of type @function. IT
+ * gets passed the raw tuple, the paresed tuple (if @parse is
+ * set) and @priv_data.
+ *
+ * pccard_loop_tuple() loops over all CIS entries of type @function, and
+ * calls the @loop_tuple function for each entry. If the call to @loop_tuple
+ * returns 0, the loop exits. Returns 0 on success or errorcode otherwise.
+ */
+int pccard_loop_tuple(struct pcmcia_socket *s, unsigned int function,
+ cisdata_t code, cisparse_t *parse, void *priv_data,
+ int (*loop_tuple) (tuple_t *tuple,
+ cisparse_t *parse,
+ void *priv_data))
+{
+ tuple_t tuple;
+ cisdata_t *buf;
+ int ret;
+
+ buf = kzalloc(256, GFP_KERNEL);
+ if (buf == NULL) {
+ dev_printk(KERN_WARNING, &s->dev, "no memory to read tuple\n");
+ return -ENOMEM;
+ }
+
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = 255;
+ tuple.TupleOffset = 0;
+ tuple.DesiredTuple = code;
+ tuple.Attributes = 0;
+
+ ret = pccard_get_first_tuple(s, function, &tuple);
+ while (!ret) {
+ if (pccard_get_tuple_data(s, &tuple))
+ goto next_entry;
+
+ if (parse)
+ if (pcmcia_parse_tuple(&tuple, parse))
+ goto next_entry;
+
+ ret = loop_tuple(&tuple, parse, priv_data);
+ if (!ret)
+ break;
+
+next_entry:
+ ret = pccard_get_next_tuple(s, function, &tuple);
+ }
+
+ kfree(buf);
+ return ret;
+}
+
+struct pcmcia_cfg_mem {
+ struct pcmcia_device *p_dev;
+ void *priv_data;
+ int (*conf_check) (struct pcmcia_device *p_dev,
+ cistpl_cftable_entry_t *cfg,
+ cistpl_cftable_entry_t *dflt,
+ unsigned int vcc,
+ void *priv_data);
+ cisparse_t parse;
+ cistpl_cftable_entry_t dflt;
+};
+
+/**
+ * pcmcia_do_loop_config() - internal helper for pcmcia_loop_config()
+ *
+ * pcmcia_do_loop_config() is the internal callback for the call from
+ * pcmcia_loop_config() to pccard_loop_tuple(). Data is transferred
+ * by a struct pcmcia_cfg_mem.
+ */
+static int pcmcia_do_loop_config(tuple_t *tuple, cisparse_t *parse, void *priv)
+{
+ cistpl_cftable_entry_t *cfg = &parse->cftable_entry;
+ struct pcmcia_cfg_mem *cfg_mem = priv;
+
+ /* default values */
+ cfg_mem->p_dev->conf.ConfigIndex = cfg->index;
+ if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
+ cfg_mem->dflt = *cfg;
+
+ return cfg_mem->conf_check(cfg_mem->p_dev, cfg, &cfg_mem->dflt,
+ cfg_mem->p_dev->socket->socket.Vcc,
+ cfg_mem->priv_data);
+}
+
+/**
+ * pcmcia_loop_config() - loop over configuration options
+ * @p_dev: the struct pcmcia_device which we need to loop for.
+ * @conf_check: function to call for each configuration option.
+ * It gets passed the struct pcmcia_device, the CIS data
+ * describing the configuration option, and private data
+ * being passed to pcmcia_loop_config()
+ * @priv_data: private data to be passed to the conf_check function.
+ *
+ * pcmcia_loop_config() loops over all configuration options, and calls
+ * the driver-specific conf_check() for each one, checking whether
+ * it is a valid one. Returns 0 on success or errorcode otherwise.
+ */
+int pcmcia_loop_config(struct pcmcia_device *p_dev,
+ int (*conf_check) (struct pcmcia_device *p_dev,
+ cistpl_cftable_entry_t *cfg,
+ cistpl_cftable_entry_t *dflt,
+ unsigned int vcc,
+ void *priv_data),
+ void *priv_data)
+{
+ struct pcmcia_cfg_mem *cfg_mem;
+ int ret;
+
+ cfg_mem = kzalloc(sizeof(struct pcmcia_cfg_mem), GFP_KERNEL);
+ if (cfg_mem == NULL)
+ return -ENOMEM;
+
+ cfg_mem->p_dev = p_dev;
+ cfg_mem->conf_check = conf_check;
+ cfg_mem->priv_data = priv_data;
+
+ ret = pccard_loop_tuple(p_dev->socket, p_dev->func,
+ CISTPL_CFTABLE_ENTRY, &cfg_mem->parse,
+ cfg_mem, pcmcia_do_loop_config);
+
+ kfree(cfg_mem);
+ return ret;
+}
+EXPORT_SYMBOL(pcmcia_loop_config);
+
+
+struct pcmcia_loop_mem {
+ struct pcmcia_device *p_dev;
+ void *priv_data;
+ int (*loop_tuple) (struct pcmcia_device *p_dev,
+ tuple_t *tuple,
+ void *priv_data);
+};
+
+/**
+ * pcmcia_do_loop_tuple() - internal helper for pcmcia_loop_config()
+ *
+ * pcmcia_do_loop_tuple() is the internal callback for the call from
+ * pcmcia_loop_tuple() to pccard_loop_tuple(). Data is transferred
+ * by a struct pcmcia_cfg_mem.
+ */
+static int pcmcia_do_loop_tuple(tuple_t *tuple, cisparse_t *parse, void *priv)
+{
+ struct pcmcia_loop_mem *loop = priv;
+
+ return loop->loop_tuple(loop->p_dev, tuple, loop->priv_data);
+};
+
+/**
+ * pcmcia_loop_tuple() - loop over tuples in the CIS
+ * @p_dev: the struct pcmcia_device which we need to loop for.
+ * @code: which CIS code shall we look for?
+ * @priv_data: private data to be passed to the loop_tuple function.
+ * @loop_tuple: function to call for each CIS entry of type @function. IT
+ * gets passed the raw tuple and @priv_data.
+ *
+ * pcmcia_loop_tuple() loops over all CIS entries of type @function, and
+ * calls the @loop_tuple function for each entry. If the call to @loop_tuple
+ * returns 0, the loop exits. Returns 0 on success or errorcode otherwise.
+ */
+int pcmcia_loop_tuple(struct pcmcia_device *p_dev, cisdata_t code,
+ int (*loop_tuple) (struct pcmcia_device *p_dev,
+ tuple_t *tuple,
+ void *priv_data),
+ void *priv_data)
+{
+ struct pcmcia_loop_mem loop = {
+ .p_dev = p_dev,
+ .loop_tuple = loop_tuple,
+ .priv_data = priv_data};
+
+ return pccard_loop_tuple(p_dev->socket, p_dev->func, code, NULL,
+ &loop, pcmcia_do_loop_tuple);
+}
+EXPORT_SYMBOL(pcmcia_loop_tuple);
+
+
+struct pcmcia_loop_get {
+ size_t len;
+ cisdata_t **buf;
+};
+
+/**
+ * pcmcia_do_get_tuple() - internal helper for pcmcia_get_tuple()
+ *
+ * pcmcia_do_get_tuple() is the internal callback for the call from
+ * pcmcia_get_tuple() to pcmcia_loop_tuple(). As we're only interested in
+ * the first tuple, return 0 unconditionally. Create a memory buffer large
+ * enough to hold the content of the tuple, and fill it with the tuple data.
+ * The caller is responsible to free the buffer.
+ */
+static int pcmcia_do_get_tuple(struct pcmcia_device *p_dev, tuple_t *tuple,
+ void *priv)
+{
+ struct pcmcia_loop_get *get = priv;
+
+ *get->buf = kzalloc(tuple->TupleDataLen, GFP_KERNEL);
+ if (*get->buf) {
+ get->len = tuple->TupleDataLen;
+ memcpy(*get->buf, tuple->TupleData, tuple->TupleDataLen);
+ } else
+ dev_dbg(&p_dev->dev, "do_get_tuple: out of memory\n");
+ return 0;
+}
+
+/**
+ * pcmcia_get_tuple() - get first tuple from CIS
+ * @p_dev: the struct pcmcia_device which we need to loop for.
+ * @code: which CIS code shall we look for?
+ * @buf: pointer to store the buffer to.
+ *
+ * pcmcia_get_tuple() gets the content of the first CIS entry of type @code.
+ * It returns the buffer length (or zero). The caller is responsible to free
+ * the buffer passed in @buf.
+ */
+size_t pcmcia_get_tuple(struct pcmcia_device *p_dev, cisdata_t code,
+ unsigned char **buf)
+{
+ struct pcmcia_loop_get get = {
+ .len = 0,
+ .buf = buf,
+ };
+
+ *get.buf = NULL;
+ pcmcia_loop_tuple(p_dev, code, pcmcia_do_get_tuple, &get);
+
+ return get.len;
+}
+EXPORT_SYMBOL(pcmcia_get_tuple);
+
+
+/**
+ * pcmcia_do_get_mac() - internal helper for pcmcia_get_mac_from_cis()
+ *
+ * pcmcia_do_get_mac() is the internal callback for the call from
+ * pcmcia_get_mac_from_cis() to pcmcia_loop_tuple(). We check whether the
+ * tuple contains a proper LAN_NODE_ID of length 6, and copy the data
+ * to struct net_device->dev_addr[i].
+ */
+static int pcmcia_do_get_mac(struct pcmcia_device *p_dev, tuple_t *tuple,
+ void *priv)
+{
+ struct net_device *dev = priv;
+ int i;
+
+ if (tuple->TupleData[0] != CISTPL_FUNCE_LAN_NODE_ID)
+ return -EINVAL;
+ if (tuple->TupleDataLen < ETH_ALEN + 2) {
+ dev_warn(&p_dev->dev, "Invalid CIS tuple length for "
+ "LAN_NODE_ID\n");
+ return -EINVAL;
+ }
+
+ if (tuple->TupleData[1] != ETH_ALEN) {
+ dev_warn(&p_dev->dev, "Invalid header for LAN_NODE_ID\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = tuple->TupleData[i+2];
+ return 0;
+}
+
+/**
+ * pcmcia_get_mac_from_cis() - read out MAC address from CISTPL_FUNCE
+ * @p_dev: the struct pcmcia_device for which we want the address.
+ * @dev: a properly prepared struct net_device to store the info to.
+ *
+ * pcmcia_get_mac_from_cis() reads out the hardware MAC address from
+ * CISTPL_FUNCE and stores it into struct net_device *dev->dev_addr which
+ * must be set up properly by the driver (see examples!).
+ */
+int pcmcia_get_mac_from_cis(struct pcmcia_device *p_dev, struct net_device *dev)
+{
+ return pcmcia_loop_tuple(p_dev, CISTPL_FUNCE, pcmcia_do_get_mac, dev);
+}
+EXPORT_SYMBOL(pcmcia_get_mac_from_cis);
+
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
index 7631faa0cad..d007a2a0383 100644
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ b/drivers/pcmcia/pcmcia_ioctl.c
@@ -301,7 +301,9 @@ static int pccard_get_status(struct pcmcia_socket *s,
(c->IntType & (INT_MEMORY_AND_IO | INT_ZOOMED_VIDEO))) {
u_char reg;
if (c->CardValues & PRESENT_PIN_REPLACE) {
+ mutex_lock(&s->ops_mutex);
pcmcia_read_cis_mem(s, 1, (c->ConfigBase+CISREG_PRR)>>1, 1, &reg);
+ mutex_unlock(&s->ops_mutex);
status->CardState |=
(reg & PRR_WP_STATUS) ? CS_EVENT_WRITE_PROTECT : 0;
status->CardState |=
@@ -315,7 +317,9 @@ static int pccard_get_status(struct pcmcia_socket *s,
status->CardState |= CS_EVENT_READY_CHANGE;
}
if (c->CardValues & PRESENT_EXT_STATUS) {
+ mutex_lock(&s->ops_mutex);
pcmcia_read_cis_mem(s, 1, (c->ConfigBase+CISREG_ESR)>>1, 1, &reg);
+ mutex_unlock(&s->ops_mutex);
status->CardState |=
(reg & ESR_REQ_ATTN) ? CS_EVENT_REQUEST_ATTENTION : 0;
}
@@ -351,7 +355,7 @@ static int pccard_get_configuration_info(struct pcmcia_socket *s,
if (s->state & SOCKET_CARDBUS_CONFIG) {
config->Attributes = CONF_VALID_CLIENT;
config->IntType = INT_CARDBUS;
- config->AssignedIRQ = s->irq.AssignedIRQ;
+ config->AssignedIRQ = s->pcmcia_irq;
if (config->AssignedIRQ)
config->Attributes |= CONF_ENABLE_IRQ;
if (s->io[0].res) {
@@ -391,7 +395,7 @@ static int pccard_get_configuration_info(struct pcmcia_socket *s,
config->ExtStatus = c->ExtStatus;
config->Present = config->CardValues = c->CardValues;
config->IRQAttributes = c->irq.Attributes;
- config->AssignedIRQ = s->irq.AssignedIRQ;
+ config->AssignedIRQ = s->pcmcia_irq;
config->BasePort1 = c->io.BasePort1;
config->NumPorts1 = c->io.NumPorts1;
config->Attributes1 = c->io.Attributes1;
@@ -571,7 +575,6 @@ static struct pci_bus *pcmcia_lookup_bus(struct pcmcia_socket *s)
static int get_device_info(struct pcmcia_socket *s, bind_info_t *bind_info, int first)
{
- dev_node_t *node;
struct pcmcia_device *p_dev;
struct pcmcia_driver *p_drv;
int ret = 0;
@@ -633,21 +636,13 @@ static int get_device_info(struct pcmcia_socket *s, bind_info_t *bind_info, int
goto err_put;
}
- if (first)
- node = p_dev->dev_node;
- else
- for (node = p_dev->dev_node; node; node = node->next)
- if (node == bind_info->next)
- break;
- if (!node) {
+ if (!first) {
ret = -ENODEV;
goto err_put;
}
- strlcpy(bind_info->name, node->dev_name, DEV_NAME_LEN);
- bind_info->major = node->major;
- bind_info->minor = node->minor;
- bind_info->next = node->next;
+ strlcpy(bind_info->name, dev_name(&p_dev->dev), DEV_NAME_LEN);
+ bind_info->next = NULL;
err_put:
pcmcia_put_dev(p_dev);
@@ -818,8 +813,7 @@ static u_int ds_poll(struct file *file, poll_table *wait)
/*====================================================================*/
-static int ds_ioctl(struct inode *inode, struct file *file,
- u_int cmd, u_long arg)
+static int ds_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct pcmcia_socket *s;
void __user *uarg = (char __user *)arg;
@@ -1026,13 +1020,25 @@ free_out:
return err;
} /* ds_ioctl */
+static long ds_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = ds_ioctl(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
+
/*====================================================================*/
static const struct file_operations ds_fops = {
.owner = THIS_MODULE,
.open = ds_open,
.release = ds_release,
- .ioctl = ds_ioctl,
+ .unlocked_ioctl = ds_unlocked_ioctl,
.read = ds_read,
.write = ds_write,
.poll = ds_poll,
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index 7c3d03bb4f3..29f91fac1df 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -23,6 +23,8 @@
#include <linux/netdevice.h>
#include <linux/slab.h>
+#include <asm/irq.h>
+
#include <pcmcia/cs_types.h>
#include <pcmcia/ss.h>
#include <pcmcia/cs.h>
@@ -38,29 +40,6 @@ static int io_speed;
module_param(io_speed, int, 0444);
-#ifdef CONFIG_PCMCIA_PROBE
-#include <asm/irq.h>
-/* mask of IRQs already reserved by other cards, we should avoid using them */
-static u8 pcmcia_used_irq[NR_IRQS];
-#endif
-
-static int pcmcia_adjust_io_region(struct resource *res, unsigned long start,
- unsigned long end, struct pcmcia_socket *s)
-{
- if (s->resource_ops->adjust_io_region)
- return s->resource_ops->adjust_io_region(res, start, end, s);
- return -ENOMEM;
-}
-
-static struct resource *pcmcia_find_io_region(unsigned long base, int num,
- unsigned long align,
- struct pcmcia_socket *s)
-{
- if (s->resource_ops->find_io)
- return s->resource_ops->find_io(base, num, align, s);
- return NULL;
-}
-
int pcmcia_validate_mem(struct pcmcia_socket *s)
{
if (s->resource_ops->validate_mem)
@@ -86,8 +65,7 @@ struct resource *pcmcia_find_mem_region(u_long base, u_long num, u_long align,
static int alloc_io_space(struct pcmcia_socket *s, u_int attr,
unsigned int *base, unsigned int num, u_int lines)
{
- int i;
- unsigned int try, align;
+ unsigned int align;
align = (*base) ? (lines ? 1<<lines : 0) : 1;
if (align && (align < num)) {
@@ -104,50 +82,8 @@ static int alloc_io_space(struct pcmcia_socket *s, u_int attr,
*base, align);
align = 0;
}
- if ((s->features & SS_CAP_STATIC_MAP) && s->io_offset) {
- *base = s->io_offset | (*base & 0x0fff);
- return 0;
- }
- /* Check for an already-allocated window that must conflict with
- * what was asked for. It is a hack because it does not catch all
- * potential conflicts, just the most obvious ones.
- */
- for (i = 0; i < MAX_IO_WIN; i++)
- if ((s->io[i].res) && *base &&
- ((s->io[i].res->start & (align-1)) == *base))
- return 1;
- for (i = 0; i < MAX_IO_WIN; i++) {
- if (!s->io[i].res) {
- s->io[i].res = pcmcia_find_io_region(*base, num, align, s);
- if (s->io[i].res) {
- *base = s->io[i].res->start;
- s->io[i].res->flags = (s->io[i].res->flags & ~IORESOURCE_BITS) | (attr & IORESOURCE_BITS);
- s->io[i].InUse = num;
- break;
- } else
- return 1;
- } else if ((s->io[i].res->flags & IORESOURCE_BITS) != (attr & IORESOURCE_BITS))
- continue;
- /* Try to extend top of window */
- try = s->io[i].res->end + 1;
- if ((*base == 0) || (*base == try))
- if (pcmcia_adjust_io_region(s->io[i].res, s->io[i].res->start,
- s->io[i].res->end + num, s) == 0) {
- *base = try;
- s->io[i].InUse += num;
- break;
- }
- /* Try to extend bottom of window */
- try = s->io[i].res->start - num;
- if ((*base == 0) || (*base == try))
- if (pcmcia_adjust_io_region(s->io[i].res, s->io[i].res->start - num,
- s->io[i].res->end, s) == 0) {
- *base = try;
- s->io[i].InUse += num;
- break;
- }
- }
- return (i == MAX_IO_WIN);
+
+ return s->resource_ops->find_io(s, attr, base, num, align);
} /* alloc_io_space */
@@ -187,6 +123,7 @@ int pcmcia_access_configuration_register(struct pcmcia_device *p_dev,
config_t *c;
int addr;
u_char val;
+ int ret = 0;
if (!p_dev || !p_dev->function_config)
return -EINVAL;
@@ -203,11 +140,10 @@ int pcmcia_access_configuration_register(struct pcmcia_device *p_dev,
}
addr = (c->ConfigBase + reg->Offset) >> 1;
- mutex_unlock(&s->ops_mutex);
switch (reg->Action) {
case CS_READ:
- pcmcia_read_cis_mem(s, 1, addr, 1, &val);
+ ret = pcmcia_read_cis_mem(s, 1, addr, 1, &val);
reg->Value = val;
break;
case CS_WRITE:
@@ -216,10 +152,11 @@ int pcmcia_access_configuration_register(struct pcmcia_device *p_dev,
break;
default:
dev_dbg(&s->dev, "Invalid conf register request\n");
- return -EINVAL;
+ ret = -EINVAL;
break;
}
- return 0;
+ mutex_unlock(&s->ops_mutex);
+ return ret;
} /* pcmcia_access_configuration_register */
EXPORT_SYMBOL(pcmcia_access_configuration_register);
@@ -275,19 +212,9 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
goto unlock;
}
- if (mod->Attributes & CONF_IRQ_CHANGE_VALID) {
- if (mod->Attributes & CONF_ENABLE_IRQ) {
- c->Attributes |= CONF_ENABLE_IRQ;
- s->socket.io_irq = s->irq.AssignedIRQ;
- } else {
- c->Attributes &= ~CONF_ENABLE_IRQ;
- s->socket.io_irq = 0;
- }
- s->ops->set_socket(s, &s->socket);
- }
-
- if (mod->Attributes & CONF_VCC_CHANGE_VALID) {
- dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n");
+ if (mod->Attributes & (CONF_IRQ_CHANGE_VALID | CONF_VCC_CHANGE_VALID)) {
+ dev_dbg(&s->dev,
+ "changing Vcc or IRQ is not allowed at this time\n");
ret = -EINVAL;
goto unlock;
}
@@ -422,52 +349,6 @@ out:
} /* pcmcia_release_io */
-static int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req)
-{
- struct pcmcia_socket *s = p_dev->socket;
- config_t *c;
- int ret = -EINVAL;
-
- mutex_lock(&s->ops_mutex);
-
- c = p_dev->function_config;
-
- if (!p_dev->_irq)
- goto out;
-
- p_dev->_irq = 0;
-
- if (c->state & CONFIG_LOCKED)
- goto out;
-
- if (c->irq.Attributes != req->Attributes) {
- dev_dbg(&s->dev, "IRQ attributes must match assigned ones\n");
- goto out;
- }
- if (s->irq.AssignedIRQ != req->AssignedIRQ) {
- dev_dbg(&s->dev, "IRQ must match assigned one\n");
- goto out;
- }
- if (--s->irq.Config == 0) {
- c->state &= ~CONFIG_IRQ_REQ;
- s->irq.AssignedIRQ = 0;
- }
-
- if (req->Handler)
- free_irq(req->AssignedIRQ, p_dev->priv);
-
-#ifdef CONFIG_PCMCIA_PROBE
- pcmcia_used_irq[req->AssignedIRQ]--;
-#endif
- ret = 0;
-
-out:
- mutex_unlock(&s->ops_mutex);
-
- return ret;
-} /* pcmcia_release_irq */
-
-
int pcmcia_release_window(struct pcmcia_device *p_dev, window_handle_t wh)
{
struct pcmcia_socket *s = p_dev->socket;
@@ -551,12 +432,11 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
if (req->Attributes & CONF_ENABLE_SPKR)
s->socket.flags |= SS_SPKR_ENA;
if (req->Attributes & CONF_ENABLE_IRQ)
- s->socket.io_irq = s->irq.AssignedIRQ;
+ s->socket.io_irq = s->pcmcia_irq;
else
s->socket.io_irq = 0;
s->ops->set_socket(s, &s->socket);
s->lock_count++;
- mutex_unlock(&s->ops_mutex);
/* Set up CIS configuration registers */
base = c->ConfigBase = req->ConfigBase;
@@ -574,9 +454,9 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
if (req->Present & PRESENT_IOBASE_0)
c->Option |= COR_ADDR_DECODE;
}
- if (c->state & CONFIG_IRQ_REQ)
- if (!(c->irq.Attributes & IRQ_FORCED_PULSE))
- c->Option |= COR_LEVEL_REQ;
+ if ((req->Attributes & CONF_ENABLE_IRQ) &&
+ !(req->Attributes & CONF_ENABLE_PULSE_IRQ))
+ c->Option |= COR_LEVEL_REQ;
pcmcia_write_cis_mem(s, 1, (base + CISREG_COR)>>1, 1, &c->Option);
mdelay(40);
}
@@ -605,7 +485,6 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
/* Configure I/O windows */
if (c->state & CONFIG_IO_REQ) {
- mutex_lock(&s->ops_mutex);
iomap.speed = io_speed;
for (i = 0; i < MAX_IO_WIN; i++)
if (s->io[i].res) {
@@ -624,11 +503,11 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
s->ops->set_io_map(s, &iomap);
s->io[i].Config++;
}
- mutex_unlock(&s->ops_mutex);
}
c->state |= CONFIG_LOCKED;
p_dev->_locked = 1;
+ mutex_unlock(&s->ops_mutex);
return 0;
} /* pcmcia_request_configuration */
EXPORT_SYMBOL(pcmcia_request_configuration);
@@ -706,137 +585,176 @@ out:
EXPORT_SYMBOL(pcmcia_request_io);
-/** pcmcia_request_irq
+/**
+ * pcmcia_request_irq() - attempt to request a IRQ for a PCMCIA device
*
- * Request_irq() reserves an irq for this client.
+ * pcmcia_request_irq() is a wrapper around request_irq which will allow
+ * the PCMCIA core to clean up the registration in pcmcia_disable_device().
+ * Drivers are free to use request_irq() directly, but then they need to
+ * call free_irq themselfves, too. Also, only IRQF_SHARED capable IRQ
+ * handlers are allowed.
+ */
+int __must_check pcmcia_request_irq(struct pcmcia_device *p_dev,
+ irq_handler_t handler)
+{
+ int ret;
+
+ if (!p_dev->irq)
+ return -EINVAL;
+
+ ret = request_irq(p_dev->irq, handler, IRQF_SHARED,
+ p_dev->devname, p_dev->priv);
+ if (!ret)
+ p_dev->_irq = 1;
+
+ return ret;
+}
+EXPORT_SYMBOL(pcmcia_request_irq);
+
+
+/**
+ * pcmcia_request_exclusive_irq() - attempt to request an exclusive IRQ first
*
- * Also, since Linux only reserves irq's when they are actually
- * hooked, we don't guarantee that an irq will still be available
- * when the configuration is locked. Now that I think about it,
- * there might be a way to fix this using a dummy handler.
+ * pcmcia_request_exclusive_irq() is a wrapper around request_irq which
+ * attempts first to request an exclusive IRQ. If it fails, it also accepts
+ * a shared IRQ, but prints out a warning. PCMCIA drivers should allow for
+ * IRQ sharing and either use request_irq directly (then they need to call
+ * free_irq themselves, too), or the pcmcia_request_irq() function.
*/
+int __must_check
+__pcmcia_request_exclusive_irq(struct pcmcia_device *p_dev,
+ irq_handler_t handler)
+{
+ int ret;
+
+ if (!p_dev->irq)
+ return -EINVAL;
+
+ ret = request_irq(p_dev->irq, handler, 0, p_dev->devname, p_dev->priv);
+ if (ret) {
+ ret = pcmcia_request_irq(p_dev, handler);
+ dev_printk(KERN_WARNING, &p_dev->dev, "pcmcia: "
+ "request for exclusive IRQ could not be fulfilled.\n");
+ dev_printk(KERN_WARNING, &p_dev->dev, "pcmcia: the driver "
+ "needs updating to supported shared IRQ lines.\n");
+ }
+ if (ret)
+ dev_printk(KERN_INFO, &p_dev->dev, "request_irq() failed\n");
+ else
+ p_dev->_irq = 1;
+
+ return ret;
+} /* pcmcia_request_exclusive_irq */
+EXPORT_SYMBOL(__pcmcia_request_exclusive_irq);
+
#ifdef CONFIG_PCMCIA_PROBE
+
+/* mask of IRQs already reserved by other cards, we should avoid using them */
+static u8 pcmcia_used_irq[NR_IRQS];
+
static irqreturn_t test_action(int cpl, void *dev_id)
{
return IRQ_NONE;
}
-#endif
-int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
+/**
+ * pcmcia_setup_isa_irq() - determine whether an ISA IRQ can be used
+ * @p_dev - the associated PCMCIA device
+ *
+ * locking note: must be called with ops_mutex locked.
+ */
+static int pcmcia_setup_isa_irq(struct pcmcia_device *p_dev, int type)
{
struct pcmcia_socket *s = p_dev->socket;
- config_t *c;
- int ret = -EINVAL, irq = 0;
- int type;
+ unsigned int try, irq;
+ u32 mask = s->irq_mask;
+ int ret = -ENODEV;
- mutex_lock(&s->ops_mutex);
+ for (try = 0; try < 64; try++) {
+ irq = try % 32;
- if (!(s->state & SOCKET_PRESENT)) {
- dev_dbg(&s->dev, "No card present\n");
- goto out;
- }
- c = p_dev->function_config;
- if (c->state & CONFIG_LOCKED) {
- dev_dbg(&s->dev, "Configuration is locked\n");
- goto out;
- }
- if (c->state & CONFIG_IRQ_REQ) {
- dev_dbg(&s->dev, "IRQ already configured\n");
- goto out;
+ /* marked as available by driver, not blocked by userspace? */
+ if (!((mask >> irq) & 1))
+ continue;
+
+ /* avoid an IRQ which is already used by another PCMCIA card */
+ if ((try < 32) && pcmcia_used_irq[irq])
+ continue;
+
+ /* register the correct driver, if possible, to check whether
+ * registering a dummy handle works, i.e. if the IRQ isn't
+ * marked as used by the kernel resource management core */
+ ret = request_irq(irq, test_action, type, p_dev->devname,
+ p_dev);
+ if (!ret) {
+ free_irq(irq, p_dev);
+ p_dev->irq = s->pcmcia_irq = irq;
+ pcmcia_used_irq[irq]++;
+ break;
+ }
}
- /* Decide what type of interrupt we are registering */
- type = 0;
- if (s->functions > 1) /* All of this ought to be handled higher up */
- type = IRQF_SHARED;
- else if (req->Attributes & IRQ_TYPE_DYNAMIC_SHARING)
- type = IRQF_SHARED;
- else
- printk(KERN_WARNING "pcmcia: Driver needs updating to support IRQ sharing.\n");
+ return ret;
+}
- /* If the interrupt is already assigned, it must be the same */
- if (s->irq.AssignedIRQ != 0)
- irq = s->irq.AssignedIRQ;
+void pcmcia_cleanup_irq(struct pcmcia_socket *s)
+{
+ pcmcia_used_irq[s->pcmcia_irq]--;
+ s->pcmcia_irq = 0;
+}
-#ifdef CONFIG_PCMCIA_PROBE
- if (!irq) {
- int try;
- u32 mask = s->irq_mask;
- void *data = p_dev; /* something unique to this device */
+#else /* CONFIG_PCMCIA_PROBE */
- for (try = 0; try < 64; try++) {
- irq = try % 32;
+static int pcmcia_setup_isa_irq(struct pcmcia_device *p_dev, int type)
+{
+ return -EINVAL;
+}
- /* marked as available by driver, and not blocked by userspace? */
- if (!((mask >> irq) & 1))
- continue;
+void pcmcia_cleanup_irq(struct pcmcia_socket *s)
+{
+ s->pcmcia_irq = 0;
+ return;
+}
- /* avoid an IRQ which is already used by a PCMCIA card */
- if ((try < 32) && pcmcia_used_irq[irq])
- continue;
+#endif /* CONFIG_PCMCIA_PROBE */
- /* register the correct driver, if possible, of check whether
- * registering a dummy handle works, i.e. if the IRQ isn't
- * marked as used by the kernel resource management core */
- ret = request_irq(irq,
- (req->Handler) ? req->Handler : test_action,
- type,
- p_dev->devname,
- (req->Handler) ? p_dev->priv : data);
- if (!ret) {
- if (!req->Handler)
- free_irq(irq, data);
- break;
- }
- }
- }
-#endif
- /* only assign PCI irq if no IRQ already assigned */
- if (ret && !s->irq.AssignedIRQ) {
- if (!s->pci_irq) {
- dev_printk(KERN_INFO, &s->dev, "no IRQ found\n");
- goto out;
- }
- type = IRQF_SHARED;
- irq = s->pci_irq;
- }
- if (ret && req->Handler) {
- ret = request_irq(irq, req->Handler, type,
- p_dev->devname, p_dev->priv);
- if (ret) {
- dev_printk(KERN_INFO, &s->dev,
- "request_irq() failed\n");
- goto out;
- }
- }
+/**
+ * pcmcia_setup_irq() - determine IRQ to be used for device
+ * @p_dev - the associated PCMCIA device
+ *
+ * locking note: must be called with ops_mutex locked.
+ */
+int pcmcia_setup_irq(struct pcmcia_device *p_dev)
+{
+ struct pcmcia_socket *s = p_dev->socket;
- /* Make sure the fact the request type was overridden is passed back */
- if (type == IRQF_SHARED && !(req->Attributes & IRQ_TYPE_DYNAMIC_SHARING)) {
- req->Attributes |= IRQ_TYPE_DYNAMIC_SHARING;
- dev_printk(KERN_WARNING, &p_dev->dev, "pcmcia: "
- "request for exclusive IRQ could not be fulfilled.\n");
- dev_printk(KERN_WARNING, &p_dev->dev, "pcmcia: the driver "
- "needs updating to supported shared IRQ lines.\n");
+ if (p_dev->irq)
+ return 0;
+
+ /* already assigned? */
+ if (s->pcmcia_irq) {
+ p_dev->irq = s->pcmcia_irq;
+ return 0;
}
- c->irq.Attributes = req->Attributes;
- s->irq.AssignedIRQ = req->AssignedIRQ = irq;
- s->irq.Config++;
- c->state |= CONFIG_IRQ_REQ;
- p_dev->_irq = 1;
+ /* prefer an exclusive ISA irq */
+ if (!pcmcia_setup_isa_irq(p_dev, 0))
+ return 0;
-#ifdef CONFIG_PCMCIA_PROBE
- pcmcia_used_irq[irq]++;
-#endif
+ /* but accept a shared ISA irq */
+ if (!pcmcia_setup_isa_irq(p_dev, IRQF_SHARED))
+ return 0;
- ret = 0;
-out:
- mutex_unlock(&s->ops_mutex);
- return ret;
-} /* pcmcia_request_irq */
-EXPORT_SYMBOL(pcmcia_request_irq);
+ /* but use the PCI irq otherwise */
+ if (s->pci_irq) {
+ p_dev->irq = s->pcmcia_irq = s->pci_irq;
+ return 0;
+ }
+
+ return -EINVAL;
+}
/** pcmcia_request_window
@@ -939,237 +857,9 @@ void pcmcia_disable_device(struct pcmcia_device *p_dev)
{
pcmcia_release_configuration(p_dev);
pcmcia_release_io(p_dev, &p_dev->io);
- pcmcia_release_irq(p_dev, &p_dev->irq);
+ if (p_dev->_irq)
+ free_irq(p_dev->irq, p_dev->priv);
if (p_dev->win)
pcmcia_release_window(p_dev, p_dev->win);
}
EXPORT_SYMBOL(pcmcia_disable_device);
-
-
-struct pcmcia_cfg_mem {
- struct pcmcia_device *p_dev;
- void *priv_data;
- int (*conf_check) (struct pcmcia_device *p_dev,
- cistpl_cftable_entry_t *cfg,
- cistpl_cftable_entry_t *dflt,
- unsigned int vcc,
- void *priv_data);
- cisparse_t parse;
- cistpl_cftable_entry_t dflt;
-};
-
-/**
- * pcmcia_do_loop_config() - internal helper for pcmcia_loop_config()
- *
- * pcmcia_do_loop_config() is the internal callback for the call from
- * pcmcia_loop_config() to pccard_loop_tuple(). Data is transferred
- * by a struct pcmcia_cfg_mem.
- */
-static int pcmcia_do_loop_config(tuple_t *tuple, cisparse_t *parse, void *priv)
-{
- cistpl_cftable_entry_t *cfg = &parse->cftable_entry;
- struct pcmcia_cfg_mem *cfg_mem = priv;
-
- /* default values */
- cfg_mem->p_dev->conf.ConfigIndex = cfg->index;
- if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
- cfg_mem->dflt = *cfg;
-
- return cfg_mem->conf_check(cfg_mem->p_dev, cfg, &cfg_mem->dflt,
- cfg_mem->p_dev->socket->socket.Vcc,
- cfg_mem->priv_data);
-}
-
-/**
- * pcmcia_loop_config() - loop over configuration options
- * @p_dev: the struct pcmcia_device which we need to loop for.
- * @conf_check: function to call for each configuration option.
- * It gets passed the struct pcmcia_device, the CIS data
- * describing the configuration option, and private data
- * being passed to pcmcia_loop_config()
- * @priv_data: private data to be passed to the conf_check function.
- *
- * pcmcia_loop_config() loops over all configuration options, and calls
- * the driver-specific conf_check() for each one, checking whether
- * it is a valid one. Returns 0 on success or errorcode otherwise.
- */
-int pcmcia_loop_config(struct pcmcia_device *p_dev,
- int (*conf_check) (struct pcmcia_device *p_dev,
- cistpl_cftable_entry_t *cfg,
- cistpl_cftable_entry_t *dflt,
- unsigned int vcc,
- void *priv_data),
- void *priv_data)
-{
- struct pcmcia_cfg_mem *cfg_mem;
- int ret;
-
- cfg_mem = kzalloc(sizeof(struct pcmcia_cfg_mem), GFP_KERNEL);
- if (cfg_mem == NULL)
- return -ENOMEM;
-
- cfg_mem->p_dev = p_dev;
- cfg_mem->conf_check = conf_check;
- cfg_mem->priv_data = priv_data;
-
- ret = pccard_loop_tuple(p_dev->socket, p_dev->func,
- CISTPL_CFTABLE_ENTRY, &cfg_mem->parse,
- cfg_mem, pcmcia_do_loop_config);
-
- kfree(cfg_mem);
- return ret;
-}
-EXPORT_SYMBOL(pcmcia_loop_config);
-
-
-struct pcmcia_loop_mem {
- struct pcmcia_device *p_dev;
- void *priv_data;
- int (*loop_tuple) (struct pcmcia_device *p_dev,
- tuple_t *tuple,
- void *priv_data);
-};
-
-/**
- * pcmcia_do_loop_tuple() - internal helper for pcmcia_loop_config()
- *
- * pcmcia_do_loop_tuple() is the internal callback for the call from
- * pcmcia_loop_tuple() to pccard_loop_tuple(). Data is transferred
- * by a struct pcmcia_cfg_mem.
- */
-static int pcmcia_do_loop_tuple(tuple_t *tuple, cisparse_t *parse, void *priv)
-{
- struct pcmcia_loop_mem *loop = priv;
-
- return loop->loop_tuple(loop->p_dev, tuple, loop->priv_data);
-};
-
-/**
- * pcmcia_loop_tuple() - loop over tuples in the CIS
- * @p_dev: the struct pcmcia_device which we need to loop for.
- * @code: which CIS code shall we look for?
- * @priv_data: private data to be passed to the loop_tuple function.
- * @loop_tuple: function to call for each CIS entry of type @function. IT
- * gets passed the raw tuple and @priv_data.
- *
- * pcmcia_loop_tuple() loops over all CIS entries of type @function, and
- * calls the @loop_tuple function for each entry. If the call to @loop_tuple
- * returns 0, the loop exits. Returns 0 on success or errorcode otherwise.
- */
-int pcmcia_loop_tuple(struct pcmcia_device *p_dev, cisdata_t code,
- int (*loop_tuple) (struct pcmcia_device *p_dev,
- tuple_t *tuple,
- void *priv_data),
- void *priv_data)
-{
- struct pcmcia_loop_mem loop = {
- .p_dev = p_dev,
- .loop_tuple = loop_tuple,
- .priv_data = priv_data};
-
- return pccard_loop_tuple(p_dev->socket, p_dev->func, code, NULL,
- &loop, pcmcia_do_loop_tuple);
-}
-EXPORT_SYMBOL(pcmcia_loop_tuple);
-
-
-struct pcmcia_loop_get {
- size_t len;
- cisdata_t **buf;
-};
-
-/**
- * pcmcia_do_get_tuple() - internal helper for pcmcia_get_tuple()
- *
- * pcmcia_do_get_tuple() is the internal callback for the call from
- * pcmcia_get_tuple() to pcmcia_loop_tuple(). As we're only interested in
- * the first tuple, return 0 unconditionally. Create a memory buffer large
- * enough to hold the content of the tuple, and fill it with the tuple data.
- * The caller is responsible to free the buffer.
- */
-static int pcmcia_do_get_tuple(struct pcmcia_device *p_dev, tuple_t *tuple,
- void *priv)
-{
- struct pcmcia_loop_get *get = priv;
-
- *get->buf = kzalloc(tuple->TupleDataLen, GFP_KERNEL);
- if (*get->buf) {
- get->len = tuple->TupleDataLen;
- memcpy(*get->buf, tuple->TupleData, tuple->TupleDataLen);
- } else
- dev_dbg(&p_dev->dev, "do_get_tuple: out of memory\n");
- return 0;
-}
-
-/**
- * pcmcia_get_tuple() - get first tuple from CIS
- * @p_dev: the struct pcmcia_device which we need to loop for.
- * @code: which CIS code shall we look for?
- * @buf: pointer to store the buffer to.
- *
- * pcmcia_get_tuple() gets the content of the first CIS entry of type @code.
- * It returns the buffer length (or zero). The caller is responsible to free
- * the buffer passed in @buf.
- */
-size_t pcmcia_get_tuple(struct pcmcia_device *p_dev, cisdata_t code,
- unsigned char **buf)
-{
- struct pcmcia_loop_get get = {
- .len = 0,
- .buf = buf,
- };
-
- *get.buf = NULL;
- pcmcia_loop_tuple(p_dev, code, pcmcia_do_get_tuple, &get);
-
- return get.len;
-}
-EXPORT_SYMBOL(pcmcia_get_tuple);
-
-
-/**
- * pcmcia_do_get_mac() - internal helper for pcmcia_get_mac_from_cis()
- *
- * pcmcia_do_get_mac() is the internal callback for the call from
- * pcmcia_get_mac_from_cis() to pcmcia_loop_tuple(). We check whether the
- * tuple contains a proper LAN_NODE_ID of length 6, and copy the data
- * to struct net_device->dev_addr[i].
- */
-static int pcmcia_do_get_mac(struct pcmcia_device *p_dev, tuple_t *tuple,
- void *priv)
-{
- struct net_device *dev = priv;
- int i;
-
- if (tuple->TupleData[0] != CISTPL_FUNCE_LAN_NODE_ID)
- return -EINVAL;
- if (tuple->TupleDataLen < ETH_ALEN + 2) {
- dev_warn(&p_dev->dev, "Invalid CIS tuple length for "
- "LAN_NODE_ID\n");
- return -EINVAL;
- }
-
- if (tuple->TupleData[1] != ETH_ALEN) {
- dev_warn(&p_dev->dev, "Invalid header for LAN_NODE_ID\n");
- return -EINVAL;
- }
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = tuple->TupleData[i+2];
- return 0;
-}
-
-/**
- * pcmcia_get_mac_from_cis() - read out MAC address from CISTPL_FUNCE
- * @p_dev: the struct pcmcia_device for which we want the address.
- * @dev: a properly prepared struct net_device to store the info to.
- *
- * pcmcia_get_mac_from_cis() reads out the hardware MAC address from
- * CISTPL_FUNCE and stores it into struct net_device *dev->dev_addr which
- * must be set up properly by the driver (see examples!).
- */
-int pcmcia_get_mac_from_cis(struct pcmcia_device *p_dev, struct net_device *dev)
-{
- return pcmcia_loop_tuple(p_dev, CISTPL_FUNCE, pcmcia_do_get_mac, dev);
-}
-EXPORT_SYMBOL(pcmcia_get_mac_from_cis);
-
diff --git a/drivers/pcmcia/pxa2xx_vpac270.c b/drivers/pcmcia/pxa2xx_vpac270.c
new file mode 100644
index 00000000000..55627eccee8
--- /dev/null
+++ b/drivers/pcmcia/pxa2xx_vpac270.c
@@ -0,0 +1,229 @@
+/*
+ * linux/drivers/pcmcia/pxa2xx_vpac270.c
+ *
+ * Driver for Voipac PXA270 PCMCIA and CF sockets
+ *
+ * Copyright (C) 2010
+ * Marek Vasut <marek.vasut@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/mach-types.h>
+
+#include <mach/gpio.h>
+#include <mach/vpac270.h>
+
+#include "soc_common.h"
+
+static struct pcmcia_irqs cd_irqs[] = {
+ {
+ .sock = 0,
+ .irq = IRQ_GPIO(GPIO84_VPAC270_PCMCIA_CD),
+ .str = "PCMCIA CD"
+ },
+ {
+ .sock = 1,
+ .irq = IRQ_GPIO(GPIO17_VPAC270_CF_CD),
+ .str = "CF CD"
+ },
+};
+
+static int vpac270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
+{
+ int ret;
+
+ if (skt->nr == 0) {
+ ret = gpio_request(GPIO84_VPAC270_PCMCIA_CD, "PCMCIA CD");
+ if (ret)
+ goto err1;
+ ret = gpio_direction_input(GPIO84_VPAC270_PCMCIA_CD);
+ if (ret)
+ goto err2;
+
+ ret = gpio_request(GPIO35_VPAC270_PCMCIA_RDY, "PCMCIA RDY");
+ if (ret)
+ goto err2;
+ ret = gpio_direction_input(GPIO35_VPAC270_PCMCIA_RDY);
+ if (ret)
+ goto err3;
+
+ ret = gpio_request(GPIO107_VPAC270_PCMCIA_PPEN, "PCMCIA PPEN");
+ if (ret)
+ goto err3;
+ ret = gpio_direction_output(GPIO107_VPAC270_PCMCIA_PPEN, 0);
+ if (ret)
+ goto err4;
+
+ ret = gpio_request(GPIO11_VPAC270_PCMCIA_RESET, "PCMCIA RESET");
+ if (ret)
+ goto err4;
+ ret = gpio_direction_output(GPIO11_VPAC270_PCMCIA_RESET, 0);
+ if (ret)
+ goto err5;
+
+ skt->socket.pci_irq = gpio_to_irq(GPIO35_VPAC270_PCMCIA_RDY);
+
+ return soc_pcmcia_request_irqs(skt, &cd_irqs[0], 1);
+
+err5:
+ gpio_free(GPIO11_VPAC270_PCMCIA_RESET);
+err4:
+ gpio_free(GPIO107_VPAC270_PCMCIA_PPEN);
+err3:
+ gpio_free(GPIO35_VPAC270_PCMCIA_RDY);
+err2:
+ gpio_free(GPIO84_VPAC270_PCMCIA_CD);
+err1:
+ return ret;
+
+ } else {
+ ret = gpio_request(GPIO17_VPAC270_CF_CD, "CF CD");
+ if (ret)
+ goto err6;
+ ret = gpio_direction_input(GPIO17_VPAC270_CF_CD);
+ if (ret)
+ goto err7;
+
+ ret = gpio_request(GPIO12_VPAC270_CF_RDY, "CF RDY");
+ if (ret)
+ goto err7;
+ ret = gpio_direction_input(GPIO12_VPAC270_CF_RDY);
+ if (ret)
+ goto err8;
+
+ ret = gpio_request(GPIO16_VPAC270_CF_RESET, "CF RESET");
+ if (ret)
+ goto err8;
+ ret = gpio_direction_output(GPIO16_VPAC270_CF_RESET, 0);
+ if (ret)
+ goto err9;
+
+ skt->socket.pci_irq = gpio_to_irq(GPIO12_VPAC270_CF_RDY);
+
+ return soc_pcmcia_request_irqs(skt, &cd_irqs[1], 1);
+
+err9:
+ gpio_free(GPIO16_VPAC270_CF_RESET);
+err8:
+ gpio_free(GPIO12_VPAC270_CF_RDY);
+err7:
+ gpio_free(GPIO17_VPAC270_CF_CD);
+err6:
+ return ret;
+
+ }
+}
+
+static void vpac270_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
+{
+ gpio_free(GPIO11_VPAC270_PCMCIA_RESET);
+ gpio_free(GPIO107_VPAC270_PCMCIA_PPEN);
+ gpio_free(GPIO35_VPAC270_PCMCIA_RDY);
+ gpio_free(GPIO84_VPAC270_PCMCIA_CD);
+ gpio_free(GPIO16_VPAC270_CF_RESET);
+ gpio_free(GPIO12_VPAC270_CF_RDY);
+ gpio_free(GPIO17_VPAC270_CF_CD);
+}
+
+static void vpac270_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
+ struct pcmcia_state *state)
+{
+ if (skt->nr == 0) {
+ state->detect = !gpio_get_value(GPIO84_VPAC270_PCMCIA_CD);
+ state->ready = !!gpio_get_value(GPIO35_VPAC270_PCMCIA_RDY);
+ } else {
+ state->detect = !gpio_get_value(GPIO17_VPAC270_CF_CD);
+ state->ready = !!gpio_get_value(GPIO12_VPAC270_CF_RDY);
+ }
+ state->bvd1 = 1;
+ state->bvd2 = 1;
+ state->wrprot = 0;
+ state->vs_3v = 1;
+ state->vs_Xv = 0;
+}
+
+static int
+vpac270_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
+ const socket_state_t *state)
+{
+ if (skt->nr == 0) {
+ gpio_set_value(GPIO11_VPAC270_PCMCIA_RESET,
+ (state->flags & SS_RESET));
+ gpio_set_value(GPIO107_VPAC270_PCMCIA_PPEN,
+ !(state->Vcc == 33 || state->Vcc == 50));
+ } else {
+ gpio_set_value(GPIO16_VPAC270_CF_RESET,
+ (state->flags & SS_RESET));
+ }
+
+ return 0;
+}
+
+static void vpac270_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
+{
+}
+
+static void vpac270_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
+{
+}
+
+static struct pcmcia_low_level vpac270_pcmcia_ops = {
+ .owner = THIS_MODULE,
+
+ .first = 0,
+ .nr = 2,
+
+ .hw_init = vpac270_pcmcia_hw_init,
+ .hw_shutdown = vpac270_pcmcia_hw_shutdown,
+
+ .socket_state = vpac270_pcmcia_socket_state,
+ .configure_socket = vpac270_pcmcia_configure_socket,
+
+ .socket_init = vpac270_pcmcia_socket_init,
+ .socket_suspend = vpac270_pcmcia_socket_suspend,
+};
+
+static struct platform_device *vpac270_pcmcia_device;
+
+static int __init vpac270_pcmcia_init(void)
+{
+ int ret;
+
+ if (!machine_is_vpac270())
+ return -ENODEV;
+
+ vpac270_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
+ if (!vpac270_pcmcia_device)
+ return -ENOMEM;
+
+ ret = platform_device_add_data(vpac270_pcmcia_device,
+ &vpac270_pcmcia_ops, sizeof(vpac270_pcmcia_ops));
+
+ if (!ret)
+ ret = platform_device_add(vpac270_pcmcia_device);
+
+ if (ret)
+ platform_device_put(vpac270_pcmcia_device);
+
+ return ret;
+}
+
+static void __exit vpac270_pcmcia_exit(void)
+{
+ platform_device_unregister(vpac270_pcmcia_device);
+}
+
+module_init(vpac270_pcmcia_init);
+module_exit(vpac270_pcmcia_exit);
+
+MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
+MODULE_DESCRIPTION("PCMCIA support for Voipac PXA270");
+MODULE_ALIAS("platform:pxa2xx-pcmcia");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pcmcia/rsrc_iodyn.c b/drivers/pcmcia/rsrc_iodyn.c
new file mode 100644
index 00000000000..d0bf3502106
--- /dev/null
+++ b/drivers/pcmcia/rsrc_iodyn.c
@@ -0,0 +1,172 @@
+/*
+ * rsrc_iodyn.c -- Resource management routines for MEM-static sockets.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * (C) 1999 David A. Hinds
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <pcmcia/cs_types.h>
+#include <pcmcia/ss.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include "cs_internal.h"
+
+
+struct pcmcia_align_data {
+ unsigned long mask;
+ unsigned long offset;
+};
+
+static resource_size_t pcmcia_align(void *align_data,
+ const struct resource *res,
+ resource_size_t size, resource_size_t align)
+{
+ struct pcmcia_align_data *data = align_data;
+ resource_size_t start;
+
+ start = (res->start & ~data->mask) + data->offset;
+ if (start < res->start)
+ start += data->mask + 1;
+
+#ifdef CONFIG_X86
+ if (res->flags & IORESOURCE_IO) {
+ if (start & 0x300)
+ start = (start + 0x3ff) & ~0x3ff;
+ }
+#endif
+
+#ifdef CONFIG_M68K
+ if (res->flags & IORESOURCE_IO) {
+ if ((res->start + size - 1) >= 1024)
+ start = res->end;
+ }
+#endif
+
+ return start;
+}
+
+
+static struct resource *__iodyn_find_io_region(struct pcmcia_socket *s,
+ unsigned long base, int num,
+ unsigned long align)
+{
+ struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_IO,
+ dev_name(&s->dev));
+ struct pcmcia_align_data data;
+ unsigned long min = base;
+ int ret;
+
+ data.mask = align - 1;
+ data.offset = base & data.mask;
+
+#ifdef CONFIG_PCI
+ if (s->cb_dev) {
+ ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1,
+ min, 0, pcmcia_align, &data);
+ } else
+#endif
+ ret = allocate_resource(&ioport_resource, res, num, min, ~0UL,
+ 1, pcmcia_align, &data);
+
+ if (ret != 0) {
+ kfree(res);
+ res = NULL;
+ }
+ return res;
+}
+
+static int iodyn_find_io(struct pcmcia_socket *s, unsigned int attr,
+ unsigned int *base, unsigned int num,
+ unsigned int align)
+{
+ int i, ret = 0;
+
+ /* Check for an already-allocated window that must conflict with
+ * what was asked for. It is a hack because it does not catch all
+ * potential conflicts, just the most obvious ones.
+ */
+ for (i = 0; i < MAX_IO_WIN; i++) {
+ if (!s->io[i].res)
+ continue;
+
+ if (!*base)
+ continue;
+
+ if ((s->io[i].res->start & (align-1)) == *base)
+ return -EBUSY;
+ }
+
+ for (i = 0; i < MAX_IO_WIN; i++) {
+ struct resource *res = s->io[i].res;
+ unsigned int try;
+
+ if (res && (res->flags & IORESOURCE_BITS) !=
+ (attr & IORESOURCE_BITS))
+ continue;
+
+ if (!res) {
+ if (align == 0)
+ align = 0x10000;
+
+ res = s->io[i].res = __iodyn_find_io_region(s, *base,
+ num, align);
+ if (!res)
+ return -EINVAL;
+
+ *base = res->start;
+ s->io[i].res->flags =
+ ((res->flags & ~IORESOURCE_BITS) |
+ (attr & IORESOURCE_BITS));
+ s->io[i].InUse = num;
+ return 0;
+ }
+
+ /* Try to extend top of window */
+ try = res->end + 1;
+ if ((*base == 0) || (*base == try)) {
+ if (adjust_resource(s->io[i].res, res->start,
+ res->end - res->start + num + 1))
+ continue;
+ *base = try;
+ s->io[i].InUse += num;
+ return 0;
+ }
+
+ /* Try to extend bottom of window */
+ try = res->start - num;
+ if ((*base == 0) || (*base == try)) {
+ if (adjust_resource(s->io[i].res,
+ res->start - num,
+ res->end - res->start + num + 1))
+ continue;
+ *base = try;
+ s->io[i].InUse += num;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+
+struct pccard_resource_ops pccard_iodyn_ops = {
+ .validate_mem = NULL,
+ .find_io = iodyn_find_io,
+ .find_mem = NULL,
+ .add_io = NULL,
+ .add_mem = NULL,
+ .init = static_init,
+ .exit = NULL,
+};
+EXPORT_SYMBOL(pccard_iodyn_ops);
diff --git a/drivers/pcmcia/rsrc_mgr.c b/drivers/pcmcia/rsrc_mgr.c
index ffa5f3cae57..142efac3c38 100644
--- a/drivers/pcmcia/rsrc_mgr.c
+++ b/drivers/pcmcia/rsrc_mgr.c
@@ -22,7 +22,7 @@
#include <pcmcia/cistpl.h>
#include "cs_internal.h"
-static int static_init(struct pcmcia_socket *s)
+int static_init(struct pcmcia_socket *s)
{
/* the good thing about SS_CAP_STATIC_MAP sockets is
* that they don't need a resource database */
@@ -32,118 +32,44 @@ static int static_init(struct pcmcia_socket *s)
return 0;
}
-
-struct pccard_resource_ops pccard_static_ops = {
- .validate_mem = NULL,
- .adjust_io_region = NULL,
- .find_io = NULL,
- .find_mem = NULL,
- .add_io = NULL,
- .add_mem = NULL,
- .init = static_init,
- .exit = NULL,
-};
-EXPORT_SYMBOL(pccard_static_ops);
-
-
-#ifdef CONFIG_PCCARD_IODYN
-
-static struct resource *
-make_resource(unsigned long b, unsigned long n, int flags, char *name)
+struct resource *pcmcia_make_resource(unsigned long start, unsigned long end,
+ int flags, const char *name)
{
struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
if (res) {
res->name = name;
- res->start = b;
- res->end = b + n - 1;
+ res->start = start;
+ res->end = start + end - 1;
res->flags = flags;
}
return res;
}
-struct pcmcia_align_data {
- unsigned long mask;
- unsigned long offset;
-};
-
-static resource_size_t pcmcia_align(void *align_data,
- const struct resource *res,
- resource_size_t size, resource_size_t align)
+static int static_find_io(struct pcmcia_socket *s, unsigned int attr,
+ unsigned int *base, unsigned int num,
+ unsigned int align)
{
- struct pcmcia_align_data *data = align_data;
- resource_size_t start;
+ if (!s->io_offset)
+ return -EINVAL;
+ *base = s->io_offset | (*base & 0x0fff);
- start = (res->start & ~data->mask) + data->offset;
- if (start < res->start)
- start += data->mask + 1;
-
-#ifdef CONFIG_X86
- if (res->flags & IORESOURCE_IO) {
- if (start & 0x300)
- start = (start + 0x3ff) & ~0x3ff;
- }
-#endif
-
-#ifdef CONFIG_M68K
- if (res->flags & IORESOURCE_IO) {
- if ((res->start + size - 1) >= 1024)
- start = res->end;
- }
-#endif
-
- return start;
-}
-
-
-static int iodyn_adjust_io_region(struct resource *res, unsigned long r_start,
- unsigned long r_end, struct pcmcia_socket *s)
-{
- return adjust_resource(res, r_start, r_end - r_start + 1);
+ return 0;
}
-static struct resource *iodyn_find_io_region(unsigned long base, int num,
- unsigned long align, struct pcmcia_socket *s)
-{
- struct resource *res = make_resource(0, num, IORESOURCE_IO,
- dev_name(&s->dev));
- struct pcmcia_align_data data;
- unsigned long min = base;
- int ret;
-
- if (align == 0)
- align = 0x10000;
-
- data.mask = align - 1;
- data.offset = base & data.mask;
-
-#ifdef CONFIG_PCI
- if (s->cb_dev) {
- ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1,
- min, 0, pcmcia_align, &data);
- } else
-#endif
- ret = allocate_resource(&ioport_resource, res, num, min, ~0UL,
- 1, pcmcia_align, &data);
-
- if (ret != 0) {
- kfree(res);
- res = NULL;
- }
- return res;
-}
-
-struct pccard_resource_ops pccard_iodyn_ops = {
+struct pccard_resource_ops pccard_static_ops = {
.validate_mem = NULL,
- .adjust_io_region = iodyn_adjust_io_region,
- .find_io = iodyn_find_io_region,
+ .find_io = static_find_io,
.find_mem = NULL,
.add_io = NULL,
.add_mem = NULL,
.init = static_init,
.exit = NULL,
};
-EXPORT_SYMBOL(pccard_iodyn_ops);
+EXPORT_SYMBOL(pccard_static_ops);
+
-#endif /* CONFIG_PCCARD_IODYN */
+MODULE_AUTHOR("David A. Hinds, Dominik Brodowski");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("rsrc_nonstatic");
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index a6eb7b59ba9..dcd1a4ad3d6 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -34,8 +34,10 @@
#include <pcmcia/cistpl.h>
#include "cs_internal.h"
+/* moved to rsrc_mgr.c
MODULE_AUTHOR("David A. Hinds, Dominik Brodowski");
MODULE_LICENSE("GPL");
+*/
/* Parameters that can be set with 'insmod' */
@@ -70,27 +72,13 @@ struct socket_data {
======================================================================*/
static struct resource *
-make_resource(resource_size_t b, resource_size_t n, int flags, const char *name)
-{
- struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
-
- if (res) {
- res->name = name;
- res->start = b;
- res->end = b + n - 1;
- res->flags = flags;
- }
- return res;
-}
-
-static struct resource *
claim_region(struct pcmcia_socket *s, resource_size_t base,
resource_size_t size, int type, char *name)
{
struct resource *res, *parent;
parent = type & IORESOURCE_MEM ? &iomem_resource : &ioport_resource;
- res = make_resource(base, size, type | IORESOURCE_BUSY, name);
+ res = pcmcia_make_resource(base, size, type | IORESOURCE_BUSY, name);
if (res) {
#ifdef CONFIG_PCI
@@ -661,8 +649,9 @@ pcmcia_align(void *align_data, const struct resource *res,
* Adjust an existing IO region allocation, but making sure that we don't
* encroach outside the resources which the user supplied.
*/
-static int nonstatic_adjust_io_region(struct resource *res, unsigned long r_start,
- unsigned long r_end, struct pcmcia_socket *s)
+static int __nonstatic_adjust_io_region(struct pcmcia_socket *s,
+ unsigned long r_start,
+ unsigned long r_end)
{
struct resource_map *m;
struct socket_data *s_data = s->resource_data;
@@ -675,8 +664,7 @@ static int nonstatic_adjust_io_region(struct resource *res, unsigned long r_star
if (start > r_start || r_end > end)
continue;
- ret = adjust_resource(res, r_start, r_end - r_start + 1);
- break;
+ ret = 0;
}
return ret;
@@ -695,18 +683,17 @@ static int nonstatic_adjust_io_region(struct resource *res, unsigned long r_star
======================================================================*/
-static struct resource *nonstatic_find_io_region(unsigned long base, int num,
- unsigned long align, struct pcmcia_socket *s)
+static struct resource *__nonstatic_find_io_region(struct pcmcia_socket *s,
+ unsigned long base, int num,
+ unsigned long align)
{
- struct resource *res = make_resource(0, num, IORESOURCE_IO, dev_name(&s->dev));
+ struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_IO,
+ dev_name(&s->dev));
struct socket_data *s_data = s->resource_data;
struct pcmcia_align_data data;
unsigned long min = base;
int ret;
- if (align == 0)
- align = 0x10000;
-
data.mask = align - 1;
data.offset = base & data.mask;
data.map = &s_data->io_db;
@@ -727,10 +714,97 @@ static struct resource *nonstatic_find_io_region(unsigned long base, int num,
return res;
}
+static int nonstatic_find_io(struct pcmcia_socket *s, unsigned int attr,
+ unsigned int *base, unsigned int num,
+ unsigned int align)
+{
+ int i, ret = 0;
+
+ /* Check for an already-allocated window that must conflict with
+ * what was asked for. It is a hack because it does not catch all
+ * potential conflicts, just the most obvious ones.
+ */
+ for (i = 0; i < MAX_IO_WIN; i++) {
+ if (!s->io[i].res)
+ continue;
+
+ if (!*base)
+ continue;
+
+ if ((s->io[i].res->start & (align-1)) == *base)
+ return -EBUSY;
+ }
+
+ for (i = 0; i < MAX_IO_WIN; i++) {
+ struct resource *res = s->io[i].res;
+ unsigned int try;
+
+ if (res && (res->flags & IORESOURCE_BITS) !=
+ (attr & IORESOURCE_BITS))
+ continue;
+
+ if (!res) {
+ if (align == 0)
+ align = 0x10000;
+
+ res = s->io[i].res = __nonstatic_find_io_region(s,
+ *base, num,
+ align);
+ if (!res)
+ return -EINVAL;
+
+ *base = res->start;
+ s->io[i].res->flags =
+ ((res->flags & ~IORESOURCE_BITS) |
+ (attr & IORESOURCE_BITS));
+ s->io[i].InUse = num;
+ return 0;
+ }
+
+ /* Try to extend top of window */
+ try = res->end + 1;
+ if ((*base == 0) || (*base == try)) {
+ ret = __nonstatic_adjust_io_region(s, res->start,
+ res->end + num);
+ if (!ret) {
+ ret = adjust_resource(s->io[i].res, res->start,
+ res->end - res->start + num + 1);
+ if (ret)
+ continue;
+ *base = try;
+ s->io[i].InUse += num;
+ return 0;
+ }
+ }
+
+ /* Try to extend bottom of window */
+ try = res->start - num;
+ if ((*base == 0) || (*base == try)) {
+ ret = __nonstatic_adjust_io_region(s,
+ res->start - num,
+ res->end);
+ if (!ret) {
+ ret = adjust_resource(s->io[i].res,
+ res->start - num,
+ res->end - res->start + num + 1);
+ if (ret)
+ continue;
+ *base = try;
+ s->io[i].InUse += num;
+ return 0;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
+
static struct resource *nonstatic_find_mem_region(u_long base, u_long num,
u_long align, int low, struct pcmcia_socket *s)
{
- struct resource *res = make_resource(0, num, IORESOURCE_MEM, dev_name(&s->dev));
+ struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_MEM,
+ dev_name(&s->dev));
struct socket_data *s_data = s->resource_data;
struct pcmcia_align_data data;
unsigned long min, max;
@@ -861,23 +935,42 @@ static int nonstatic_autoadd_resources(struct pcmcia_socket *s)
return -ENODEV;
#if defined(CONFIG_X86)
- /* If this is the root bus, the risk of hitting
- * some strange system devices which aren't protected
- * by either ACPI resource tables or properly requested
- * resources is too big. Therefore, don't do auto-adding
- * of resources at the moment.
+ /* If this is the root bus, the risk of hitting some strange
+ * system devices is too high: If a driver isn't loaded, the
+ * resources are not claimed; even if a driver is loaded, it
+ * may not request all resources or even the wrong one. We
+ * can neither trust the rest of the kernel nor ACPI/PNP and
+ * CRS parsing to get it right. Therefore, use several
+ * safeguards:
+ *
+ * - Do not auto-add resources if the CardBus bridge is on
+ * the PCI root bus
+ *
+ * - Avoid any I/O ports < 0x100.
+ *
+ * - On PCI-PCI bridges, only use resources which are set up
+ * exclusively for the secondary PCI bus: the risk of hitting
+ * system devices is quite low, as they usually aren't
+ * connected to the secondary PCI bus.
*/
if (s->cb_dev->bus->number == 0)
return -EINVAL;
-#endif
+ for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
+ res = s->cb_dev->bus->resource[i];
+#else
pci_bus_for_each_resource(s->cb_dev->bus, res, i) {
+#endif
if (!res)
continue;
if (res->flags & IORESOURCE_IO) {
+ /* safeguard against the root resource, where the
+ * risk of hitting any other device would be too
+ * high */
if (res == &ioport_resource)
continue;
+
dev_printk(KERN_INFO, &s->cb_dev->dev,
"pcmcia: parent PCI bridge window: %pR\n",
res);
@@ -887,8 +980,12 @@ static int nonstatic_autoadd_resources(struct pcmcia_socket *s)
}
if (res->flags & IORESOURCE_MEM) {
+ /* safeguard against the root resource, where the
+ * risk of hitting any other device would be too
+ * high */
if (res == &iomem_resource)
continue;
+
dev_printk(KERN_INFO, &s->cb_dev->dev,
"pcmcia: parent PCI bridge window: %pR\n",
res);
@@ -956,8 +1053,7 @@ static void nonstatic_release_resource_db(struct pcmcia_socket *s)
struct pccard_resource_ops pccard_nonstatic_ops = {
.validate_mem = pcmcia_nonstatic_validate_mem,
- .adjust_io_region = nonstatic_adjust_io_region,
- .find_io = nonstatic_find_io_region,
+ .find_io = nonstatic_find_io,
.find_mem = nonstatic_find_mem_region,
.add_io = adjust_io,
.add_mem = adjust_memory,
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 83ace277426..424e576f3ac 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -1303,13 +1303,6 @@ static int yenta_dev_suspend_noirq(struct device *dev)
pci_read_config_dword(pdev, 17*4, &socket->saved_state[1]);
pci_disable_device(pdev);
- /*
- * Some laptops (IBM T22) do not like us putting the Cardbus
- * bridge into D3. At a guess, some other laptop will
- * probably require this, so leave it commented out for now.
- */
- /* pci_set_power_state(dev, 3); */
-
return 0;
}
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 6c3320d7505..3e1b8a28871 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -390,6 +390,7 @@ config EEEPC_WMI
depends on ACPI_WMI
depends on INPUT
depends on EXPERIMENTAL
+ depends on BACKLIGHT_CLASS_DEVICE
select INPUT_SPARSEKMAP
---help---
Say Y here if you want to support WMI-based hotkeys on Eee PC laptops.
@@ -527,4 +528,13 @@ config ACPI_CMPC
keys as input device, backlight device, tablet and accelerometer
devices.
+config INTEL_SCU_IPC
+ bool "Intel SCU IPC Support"
+ depends on X86_MRST
+ default y
+ ---help---
+ IPC is used to bridge the communications between kernel and SCU on
+ some embedded Intel x86 platforms. This is not needed for PC-type
+ machines.
+
endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index a906490e353..8770bfe7143 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -25,3 +25,4 @@ obj-$(CONFIG_ACPI_ASUS) += asus_acpi.o
obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o
obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
obj-$(CONFIG_TOSHIBA_BT_RFKILL) += toshiba_bluetooth.o
+obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 7f9e5ddc949..3bf399fe2bb 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -24,6 +24,7 @@
#include <acpi/acpi_drivers.h>
#include <linux/backlight.h>
#include <linux/input.h>
+#include <linux/rfkill.h>
MODULE_LICENSE("GPL");
@@ -37,7 +38,7 @@ struct cmpc_accel {
#define CMPC_ACCEL_HID "ACCE0000"
#define CMPC_TABLET_HID "TBLT0000"
-#define CMPC_BL_HID "IPML200"
+#define CMPC_IPML_HID "IPML200"
#define CMPC_KEYS_HID "FnBT0000"
/*
@@ -461,43 +462,168 @@ static const struct backlight_ops cmpc_bl_ops = {
.update_status = cmpc_bl_update_status
};
-static int cmpc_bl_add(struct acpi_device *acpi)
+/*
+ * RFKILL code.
+ */
+
+static acpi_status cmpc_get_rfkill_wlan(acpi_handle handle,
+ unsigned long long *value)
{
- struct backlight_properties props;
+ union acpi_object param;
+ struct acpi_object_list input;
+ unsigned long long output;
+ acpi_status status;
+
+ param.type = ACPI_TYPE_INTEGER;
+ param.integer.value = 0xC1;
+ input.count = 1;
+ input.pointer = &param;
+ status = acpi_evaluate_integer(handle, "GRDI", &input, &output);
+ if (ACPI_SUCCESS(status))
+ *value = output;
+ return status;
+}
+
+static acpi_status cmpc_set_rfkill_wlan(acpi_handle handle,
+ unsigned long long value)
+{
+ union acpi_object param[2];
+ struct acpi_object_list input;
+ acpi_status status;
+ unsigned long long output;
+
+ param[0].type = ACPI_TYPE_INTEGER;
+ param[0].integer.value = 0xC1;
+ param[1].type = ACPI_TYPE_INTEGER;
+ param[1].integer.value = value;
+ input.count = 2;
+ input.pointer = param;
+ status = acpi_evaluate_integer(handle, "GWRI", &input, &output);
+ return status;
+}
+
+static void cmpc_rfkill_query(struct rfkill *rfkill, void *data)
+{
+ acpi_status status;
+ acpi_handle handle;
+ unsigned long long state;
+ bool blocked;
+
+ handle = data;
+ status = cmpc_get_rfkill_wlan(handle, &state);
+ if (ACPI_SUCCESS(status)) {
+ blocked = state & 1 ? false : true;
+ rfkill_set_sw_state(rfkill, blocked);
+ }
+}
+
+static int cmpc_rfkill_block(void *data, bool blocked)
+{
+ acpi_status status;
+ acpi_handle handle;
+ unsigned long long state;
+
+ handle = data;
+ status = cmpc_get_rfkill_wlan(handle, &state);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+ if (blocked)
+ state &= ~1;
+ else
+ state |= 1;
+ status = cmpc_set_rfkill_wlan(handle, state);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+ return 0;
+}
+
+static const struct rfkill_ops cmpc_rfkill_ops = {
+ .query = cmpc_rfkill_query,
+ .set_block = cmpc_rfkill_block,
+};
+
+/*
+ * Common backlight and rfkill code.
+ */
+
+struct ipml200_dev {
struct backlight_device *bd;
+ struct rfkill *rf;
+};
+
+static int cmpc_ipml_add(struct acpi_device *acpi)
+{
+ int retval;
+ struct ipml200_dev *ipml;
+ struct backlight_properties props;
+
+ ipml = kmalloc(sizeof(*ipml), GFP_KERNEL);
+ if (ipml == NULL)
+ return -ENOMEM;
memset(&props, 0, sizeof(struct backlight_properties));
props.max_brightness = 7;
- bd = backlight_device_register("cmpc_bl", &acpi->dev, acpi->handle,
- &cmpc_bl_ops, &props);
- if (IS_ERR(bd))
- return PTR_ERR(bd);
- dev_set_drvdata(&acpi->dev, bd);
+ ipml->bd = backlight_device_register("cmpc_bl", &acpi->dev,
+ acpi->handle, &cmpc_bl_ops,
+ &props);
+ if (IS_ERR(ipml->bd)) {
+ retval = PTR_ERR(ipml->bd);
+ goto out_bd;
+ }
+
+ ipml->rf = rfkill_alloc("cmpc_rfkill", &acpi->dev, RFKILL_TYPE_WLAN,
+ &cmpc_rfkill_ops, acpi->handle);
+ /* rfkill_alloc may fail if RFKILL is disabled. We should still work
+ * anyway. */
+ if (!IS_ERR(ipml->rf)) {
+ retval = rfkill_register(ipml->rf);
+ if (retval) {
+ rfkill_destroy(ipml->rf);
+ ipml->rf = NULL;
+ }
+ } else {
+ ipml->rf = NULL;
+ }
+
+ dev_set_drvdata(&acpi->dev, ipml);
return 0;
+
+out_bd:
+ kfree(ipml);
+ return retval;
}
-static int cmpc_bl_remove(struct acpi_device *acpi, int type)
+static int cmpc_ipml_remove(struct acpi_device *acpi, int type)
{
- struct backlight_device *bd;
+ struct ipml200_dev *ipml;
+
+ ipml = dev_get_drvdata(&acpi->dev);
+
+ backlight_device_unregister(ipml->bd);
+
+ if (ipml->rf) {
+ rfkill_unregister(ipml->rf);
+ rfkill_destroy(ipml->rf);
+ }
+
+ kfree(ipml);
- bd = dev_get_drvdata(&acpi->dev);
- backlight_device_unregister(bd);
return 0;
}
-static const struct acpi_device_id cmpc_bl_device_ids[] = {
- {CMPC_BL_HID, 0},
+static const struct acpi_device_id cmpc_ipml_device_ids[] = {
+ {CMPC_IPML_HID, 0},
{"", 0}
};
-static struct acpi_driver cmpc_bl_acpi_driver = {
+static struct acpi_driver cmpc_ipml_acpi_driver = {
.owner = THIS_MODULE,
.name = "cmpc",
.class = "cmpc",
- .ids = cmpc_bl_device_ids,
+ .ids = cmpc_ipml_device_ids,
.ops = {
- .add = cmpc_bl_add,
- .remove = cmpc_bl_remove
+ .add = cmpc_ipml_add,
+ .remove = cmpc_ipml_remove
}
};
@@ -580,7 +706,7 @@ static int cmpc_init(void)
if (r)
goto failed_keys;
- r = acpi_bus_register_driver(&cmpc_bl_acpi_driver);
+ r = acpi_bus_register_driver(&cmpc_ipml_acpi_driver);
if (r)
goto failed_bl;
@@ -598,7 +724,7 @@ failed_accel:
acpi_bus_unregister_driver(&cmpc_tablet_acpi_driver);
failed_tablet:
- acpi_bus_unregister_driver(&cmpc_bl_acpi_driver);
+ acpi_bus_unregister_driver(&cmpc_ipml_acpi_driver);
failed_bl:
acpi_bus_unregister_driver(&cmpc_keys_acpi_driver);
@@ -611,7 +737,7 @@ static void cmpc_exit(void)
{
acpi_bus_unregister_driver(&cmpc_accel_acpi_driver);
acpi_bus_unregister_driver(&cmpc_tablet_acpi_driver);
- acpi_bus_unregister_driver(&cmpc_bl_acpi_driver);
+ acpi_bus_unregister_driver(&cmpc_ipml_acpi_driver);
acpi_bus_unregister_driver(&cmpc_keys_acpi_driver);
}
@@ -621,7 +747,7 @@ module_exit(cmpc_exit);
static const struct acpi_device_id cmpc_device_ids[] = {
{CMPC_ACCEL_HID, 0},
{CMPC_TABLET_HID, 0},
- {CMPC_BL_HID, 0},
+ {CMPC_IPML_HID, 0},
{CMPC_KEYS_HID, 0},
{"", 0}
};
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index b227eb469f4..9dc50fbf3d0 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -206,7 +206,7 @@ static int eeepc_wmi_backlight_notify(struct eeepc_wmi *eeepc, int code)
{
struct backlight_device *bd = eeepc->backlight_device;
int old = bd->props.brightness;
- int new;
+ int new = old;
if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
new = code - NOTIFY_BRNUP_MIN + 1;
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 47b4fd75aa3..e325aeb37d2 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -1090,10 +1090,9 @@ static int __init fujitsu_init(void)
if (acpi_disabled)
return -ENODEV;
- fujitsu = kmalloc(sizeof(struct fujitsu_t), GFP_KERNEL);
+ fujitsu = kzalloc(sizeof(struct fujitsu_t), GFP_KERNEL);
if (!fujitsu)
return -ENOMEM;
- memset(fujitsu, 0, sizeof(struct fujitsu_t));
fujitsu->keycode1 = KEY_PROG1;
fujitsu->keycode2 = KEY_PROG2;
fujitsu->keycode3 = KEY_PROG3;
@@ -1150,12 +1149,11 @@ static int __init fujitsu_init(void)
/* Register hotkey driver */
- fujitsu_hotkey = kmalloc(sizeof(struct fujitsu_hotkey_t), GFP_KERNEL);
+ fujitsu_hotkey = kzalloc(sizeof(struct fujitsu_hotkey_t), GFP_KERNEL);
if (!fujitsu_hotkey) {
ret = -ENOMEM;
goto fail_hotkey;
}
- memset(fujitsu_hotkey, 0, sizeof(struct fujitsu_hotkey_t));
result = acpi_bus_register_driver(&acpi_fujitsu_hotkey_driver);
if (result < 0) {
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
new file mode 100644
index 00000000000..576c3ed9243
--- /dev/null
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -0,0 +1,829 @@
+/*
+ * intel_scu_ipc.c: Driver for the Intel SCU IPC mechanism
+ *
+ * (C) Copyright 2008-2010 Intel Corporation
+ * Author: Sreedhara DS (sreedhara.ds@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ *
+ * SCU runing in ARC processor communicates with other entity running in IA
+ * core through IPC mechanism which in turn messaging between IA core ad SCU.
+ * SCU has two IPC mechanism IPC-1 and IPC-2. IPC-1 is used between IA32 and
+ * SCU where IPC-2 is used between P-Unit and SCU. This driver delas with
+ * IPC-1 Driver provides an API for power control unit registers (e.g. MSIC)
+ * along with other APIs.
+ */
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/sysdev.h>
+#include <linux/pm.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <asm/setup.h>
+#include <asm/intel_scu_ipc.h>
+
+/* IPC defines the following message types */
+#define IPCMSG_WATCHDOG_TIMER 0xF8 /* Set Kernel Watchdog Threshold */
+#define IPCMSG_BATTERY 0xEF /* Coulomb Counter Accumulator */
+#define IPCMSG_FW_UPDATE 0xFE /* Firmware update */
+#define IPCMSG_PCNTRL 0xFF /* Power controller unit read/write */
+#define IPCMSG_FW_REVISION 0xF4 /* Get firmware revision */
+
+/* Command id associated with message IPCMSG_PCNTRL */
+#define IPC_CMD_PCNTRL_W 0 /* Register write */
+#define IPC_CMD_PCNTRL_R 1 /* Register read */
+#define IPC_CMD_PCNTRL_M 2 /* Register read-modify-write */
+
+/* Miscelaneous Command ids */
+#define IPC_CMD_INDIRECT_RD 2 /* 32bit indirect read */
+#define IPC_CMD_INDIRECT_WR 5 /* 32bit indirect write */
+
+/*
+ * IPC register summary
+ *
+ * IPC register blocks are memory mapped at fixed address of 0xFF11C000
+ * To read or write information to the SCU, driver writes to IPC-1 memory
+ * mapped registers (base address 0xFF11C000). The following is the IPC
+ * mechanism
+ *
+ * 1. IA core cDMI interface claims this transaction and converts it to a
+ * Transaction Layer Packet (TLP) message which is sent across the cDMI.
+ *
+ * 2. South Complex cDMI block receives this message and writes it to
+ * the IPC-1 register block, causing an interrupt to the SCU
+ *
+ * 3. SCU firmware decodes this interrupt and IPC message and the appropriate
+ * message handler is called within firmware.
+ */
+
+#define IPC_BASE_ADDR 0xFF11C000 /* IPC1 base register address */
+#define IPC_MAX_ADDR 0x100 /* Maximum IPC regisers */
+#define IPC_WWBUF_SIZE 16 /* IPC Write buffer Size */
+#define IPC_RWBUF_SIZE 16 /* IPC Read buffer Size */
+#define IPC_I2C_BASE 0xFF12B000 /* I2C control register base address */
+#define IPC_I2C_MAX_ADDR 0x10 /* Maximum I2C regisers */
+
+static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id);
+static void ipc_remove(struct pci_dev *pdev);
+
+struct intel_scu_ipc_dev {
+ struct pci_dev *pdev;
+ void __iomem *ipc_base;
+ void __iomem *i2c_base;
+};
+
+static struct intel_scu_ipc_dev ipcdev; /* Only one for now */
+
+static int platform = 1;
+module_param(platform, int, 0);
+MODULE_PARM_DESC(platform, "1 for moorestown platform");
+
+
+
+
+/*
+ * IPC Read Buffer (Read Only):
+ * 16 byte buffer for receiving data from SCU, if IPC command
+ * processing results in response data
+ */
+#define IPC_READ_BUFFER 0x90
+
+#define IPC_I2C_CNTRL_ADDR 0
+#define I2C_DATA_ADDR 0x04
+
+static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
+
+/*
+ * Command Register (Write Only):
+ * A write to this register results in an interrupt to the SCU core processor
+ * Format:
+ * |rfu2(8) | size(8) | command id(4) | rfu1(3) | ioc(1) | command(8)|
+ */
+static inline void ipc_command(u32 cmd) /* Send ipc command */
+{
+ writel(cmd, ipcdev.ipc_base);
+}
+
+/*
+ * IPC Write Buffer (Write Only):
+ * 16-byte buffer for sending data associated with IPC command to
+ * SCU. Size of the data is specified in the IPC_COMMAND_REG register
+ */
+static inline void ipc_data_writel(u32 data, u32 offset) /* Write ipc data */
+{
+ writel(data, ipcdev.ipc_base + 0x80 + offset);
+}
+
+/*
+ * IPC destination Pointer (Write Only):
+ * Use content as pointer for destination write
+ */
+static inline void ipc_write_dptr(u32 data) /* Write dptr data */
+{
+ writel(data, ipcdev.ipc_base + 0x0C);
+}
+
+/*
+ * IPC Source Pointer (Write Only):
+ * Use content as pointer for read location
+*/
+static inline void ipc_write_sptr(u32 data) /* Write dptr data */
+{
+ writel(data, ipcdev.ipc_base + 0x08);
+}
+
+/*
+ * Status Register (Read Only):
+ * Driver will read this register to get the ready/busy status of the IPC
+ * block and error status of the IPC command that was just processed by SCU
+ * Format:
+ * |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)|
+ */
+
+static inline u8 ipc_read_status(void)
+{
+ return __raw_readl(ipcdev.ipc_base + 0x04);
+}
+
+static inline u8 ipc_data_readb(u32 offset) /* Read ipc byte data */
+{
+ return readb(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
+}
+
+static inline u8 ipc_data_readl(u32 offset) /* Read ipc u32 data */
+{
+ return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
+}
+
+static inline int busy_loop(void) /* Wait till scu status is busy */
+{
+ u32 status = 0;
+ u32 loop_count = 0;
+
+ status = ipc_read_status();
+ while (status & 1) {
+ udelay(1); /* scu processing time is in few u secods */
+ status = ipc_read_status();
+ loop_count++;
+ /* break if scu doesn't reset busy bit after huge retry */
+ if (loop_count > 100000) {
+ dev_err(&ipcdev.pdev->dev, "IPC timed out");
+ return -ETIMEDOUT;
+ }
+ }
+ return (status >> 1) & 1;
+}
+
+/* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
+static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
+{
+ int nc;
+ u32 offset = 0;
+ u32 err = 0;
+ u8 cbuf[IPC_WWBUF_SIZE] = { '\0' };
+ u32 *wbuf = (u32 *)&cbuf;
+
+ mutex_lock(&ipclock);
+ if (ipcdev.pdev == NULL) {
+ mutex_unlock(&ipclock);
+ return -ENODEV;
+ }
+
+ if (platform == 1) {
+ /* Entry is 4 bytes for read/write, 5 bytes for read modify */
+ for (nc = 0; nc < count; nc++) {
+ cbuf[offset] = addr[nc];
+ cbuf[offset + 1] = addr[nc] >> 8;
+ if (id != IPC_CMD_PCNTRL_R)
+ cbuf[offset + 2] = data[nc];
+ if (id == IPC_CMD_PCNTRL_M) {
+ cbuf[offset + 3] = data[nc + 1];
+ offset += 1;
+ }
+ offset += 3;
+ }
+ for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
+ ipc_data_writel(wbuf[nc], offset); /* Write wbuff */
+
+ } else {
+ for (nc = 0, offset = 0; nc < count; nc++, offset += 2)
+ ipc_data_writel(addr[nc], offset); /* Write addresses */
+ if (id != IPC_CMD_PCNTRL_R) {
+ for (nc = 0; nc < count; nc++, offset++)
+ ipc_data_writel(data[nc], offset); /* Write data */
+ if (id == IPC_CMD_PCNTRL_M)
+ ipc_data_writel(data[nc + 1], offset); /* Mask value*/
+ }
+ }
+
+ if (id != IPC_CMD_PCNTRL_M)
+ ipc_command((count * 3) << 16 | id << 12 | 0 << 8 | op);
+ else
+ ipc_command((count * 4) << 16 | id << 12 | 0 << 8 | op);
+
+ err = busy_loop();
+
+ if (id == IPC_CMD_PCNTRL_R) { /* Read rbuf */
+ /* Workaround: values are read as 0 without memcpy_fromio */
+ memcpy_fromio(cbuf, ipcdev.ipc_base + IPC_READ_BUFFER, 16);
+ if (platform == 1) {
+ for (nc = 0, offset = 2; nc < count; nc++, offset += 3)
+ data[nc] = ipc_data_readb(offset);
+ } else {
+ for (nc = 0; nc < count; nc++)
+ data[nc] = ipc_data_readb(nc);
+ }
+ }
+ mutex_unlock(&ipclock);
+ return err;
+}
+
+/**
+ * intel_scu_ipc_ioread8 - read a word via the SCU
+ * @addr: register on SCU
+ * @data: return pointer for read byte
+ *
+ * Read a single register. Returns 0 on success or an error code. All
+ * locking between SCU accesses is handled for the caller.
+ *
+ * This function may sleep.
+ */
+int intel_scu_ipc_ioread8(u16 addr, u8 *data)
+{
+ return pwr_reg_rdwr(&addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
+}
+EXPORT_SYMBOL(intel_scu_ipc_ioread8);
+
+/**
+ * intel_scu_ipc_ioread16 - read a word via the SCU
+ * @addr: register on SCU
+ * @data: return pointer for read word
+ *
+ * Read a register pair. Returns 0 on success or an error code. All
+ * locking between SCU accesses is handled for the caller.
+ *
+ * This function may sleep.
+ */
+int intel_scu_ipc_ioread16(u16 addr, u16 *data)
+{
+ u16 x[2] = {addr, addr + 1 };
+ return pwr_reg_rdwr(x, (u8 *)data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
+}
+EXPORT_SYMBOL(intel_scu_ipc_ioread16);
+
+/**
+ * intel_scu_ipc_ioread32 - read a dword via the SCU
+ * @addr: register on SCU
+ * @data: return pointer for read dword
+ *
+ * Read four registers. Returns 0 on success or an error code. All
+ * locking between SCU accesses is handled for the caller.
+ *
+ * This function may sleep.
+ */
+int intel_scu_ipc_ioread32(u16 addr, u32 *data)
+{
+ u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
+ return pwr_reg_rdwr(x, (u8 *)data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
+}
+EXPORT_SYMBOL(intel_scu_ipc_ioread32);
+
+/**
+ * intel_scu_ipc_iowrite8 - write a byte via the SCU
+ * @addr: register on SCU
+ * @data: byte to write
+ *
+ * Write a single register. Returns 0 on success or an error code. All
+ * locking between SCU accesses is handled for the caller.
+ *
+ * This function may sleep.
+ */
+int intel_scu_ipc_iowrite8(u16 addr, u8 data)
+{
+ return pwr_reg_rdwr(&addr, &data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
+}
+EXPORT_SYMBOL(intel_scu_ipc_iowrite8);
+
+/**
+ * intel_scu_ipc_iowrite16 - write a word via the SCU
+ * @addr: register on SCU
+ * @data: word to write
+ *
+ * Write two registers. Returns 0 on success or an error code. All
+ * locking between SCU accesses is handled for the caller.
+ *
+ * This function may sleep.
+ */
+int intel_scu_ipc_iowrite16(u16 addr, u16 data)
+{
+ u16 x[2] = {addr, addr + 1 };
+ return pwr_reg_rdwr(x, (u8 *)&data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
+}
+EXPORT_SYMBOL(intel_scu_ipc_iowrite16);
+
+/**
+ * intel_scu_ipc_iowrite32 - write a dword via the SCU
+ * @addr: register on SCU
+ * @data: dword to write
+ *
+ * Write four registers. Returns 0 on success or an error code. All
+ * locking between SCU accesses is handled for the caller.
+ *
+ * This function may sleep.
+ */
+int intel_scu_ipc_iowrite32(u16 addr, u32 data)
+{
+ u16 x[4] = {addr, addr + 1, addr + 2, addr + 3};
+ return pwr_reg_rdwr(x, (u8 *)&data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
+}
+EXPORT_SYMBOL(intel_scu_ipc_iowrite32);
+
+/**
+ * intel_scu_ipc_readvv - read a set of registers
+ * @addr: register list
+ * @data: bytes to return
+ * @len: length of array
+ *
+ * Read registers. Returns 0 on success or an error code. All
+ * locking between SCU accesses is handled for the caller.
+ *
+ * The largest array length permitted by the hardware is 5 items.
+ *
+ * This function may sleep.
+ */
+int intel_scu_ipc_readv(u16 *addr, u8 *data, int len)
+{
+ return pwr_reg_rdwr(addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
+}
+EXPORT_SYMBOL(intel_scu_ipc_readv);
+
+/**
+ * intel_scu_ipc_writev - write a set of registers
+ * @addr: register list
+ * @data: bytes to write
+ * @len: length of array
+ *
+ * Write registers. Returns 0 on success or an error code. All
+ * locking between SCU accesses is handled for the caller.
+ *
+ * The largest array length permitted by the hardware is 5 items.
+ *
+ * This function may sleep.
+ *
+ */
+int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
+{
+ return pwr_reg_rdwr(addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
+}
+EXPORT_SYMBOL(intel_scu_ipc_writev);
+
+
+/**
+ * intel_scu_ipc_update_register - r/m/w a register
+ * @addr: register address
+ * @bits: bits to update
+ * @mask: mask of bits to update
+ *
+ * Read-modify-write power control unit register. The first data argument
+ * must be register value and second is mask value
+ * mask is a bitmap that indicates which bits to update.
+ * 0 = masked. Don't modify this bit, 1 = modify this bit.
+ * returns 0 on success or an error code.
+ *
+ * This function may sleep. Locking between SCU accesses is handled
+ * for the caller.
+ */
+int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask)
+{
+ u8 data[2] = { bits, mask };
+ return pwr_reg_rdwr(&addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_M);
+}
+EXPORT_SYMBOL(intel_scu_ipc_update_register);
+
+/**
+ * intel_scu_ipc_register_read - 32bit indirect read
+ * @addr: register address
+ * @value: 32bit value return
+ *
+ * Performs IA 32 bit indirect read, returns 0 on success, or an
+ * error code.
+ *
+ * Can be used when SCCB(System Controller Configuration Block) register
+ * HRIM(Honor Restricted IPC Messages) is set (bit 23)
+ *
+ * This function may sleep. Locking for SCU accesses is handled for
+ * the caller.
+ */
+int intel_scu_ipc_register_read(u32 addr, u32 *value)
+{
+ u32 err = 0;
+
+ mutex_lock(&ipclock);
+ if (ipcdev.pdev == NULL) {
+ mutex_unlock(&ipclock);
+ return -ENODEV;
+ }
+ ipc_write_sptr(addr);
+ ipc_command(4 << 16 | IPC_CMD_INDIRECT_RD);
+ err = busy_loop();
+ *value = ipc_data_readl(0);
+ mutex_unlock(&ipclock);
+ return err;
+}
+EXPORT_SYMBOL(intel_scu_ipc_register_read);
+
+/**
+ * intel_scu_ipc_register_write - 32bit indirect write
+ * @addr: register address
+ * @value: 32bit value to write
+ *
+ * Performs IA 32 bit indirect write, returns 0 on success, or an
+ * error code.
+ *
+ * Can be used when SCCB(System Controller Configuration Block) register
+ * HRIM(Honor Restricted IPC Messages) is set (bit 23)
+ *
+ * This function may sleep. Locking for SCU accesses is handled for
+ * the caller.
+ */
+int intel_scu_ipc_register_write(u32 addr, u32 value)
+{
+ u32 err = 0;
+
+ mutex_lock(&ipclock);
+ if (ipcdev.pdev == NULL) {
+ mutex_unlock(&ipclock);
+ return -ENODEV;
+ }
+ ipc_write_dptr(addr);
+ ipc_data_writel(value, 0);
+ ipc_command(4 << 16 | IPC_CMD_INDIRECT_WR);
+ err = busy_loop();
+ mutex_unlock(&ipclock);
+ return err;
+}
+EXPORT_SYMBOL(intel_scu_ipc_register_write);
+
+/**
+ * intel_scu_ipc_simple_command - send a simple command
+ * @cmd: command
+ * @sub: sub type
+ *
+ * Issue a simple command to the SCU. Do not use this interface if
+ * you must then access data as any data values may be overwritten
+ * by another SCU access by the time this function returns.
+ *
+ * This function may sleep. Locking for SCU accesses is handled for
+ * the caller.
+ */
+int intel_scu_ipc_simple_command(int cmd, int sub)
+{
+ u32 err = 0;
+
+ mutex_lock(&ipclock);
+ if (ipcdev.pdev == NULL) {
+ mutex_unlock(&ipclock);
+ return -ENODEV;
+ }
+ ipc_command(cmd << 12 | sub);
+ err = busy_loop();
+ mutex_unlock(&ipclock);
+ return err;
+}
+EXPORT_SYMBOL(intel_scu_ipc_simple_command);
+
+/**
+ * intel_scu_ipc_command - command with data
+ * @cmd: command
+ * @sub: sub type
+ * @in: input data
+ * @inlen: input length
+ * @out: output data
+ * @outlein: output length
+ *
+ * Issue a command to the SCU which involves data transfers. Do the
+ * data copies under the lock but leave it for the caller to interpret
+ */
+
+int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
+ u32 *out, int outlen)
+{
+ u32 err = 0;
+ int i = 0;
+
+ mutex_lock(&ipclock);
+ if (ipcdev.pdev == NULL) {
+ mutex_unlock(&ipclock);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < inlen; i++)
+ ipc_data_writel(*in++, 4 * i);
+
+ ipc_command(cmd << 12 | sub);
+ err = busy_loop();
+
+ for (i = 0; i < outlen; i++)
+ *out++ = ipc_data_readl(4 * i);
+
+ mutex_unlock(&ipclock);
+ return err;
+}
+EXPORT_SYMBOL(intel_scu_ipc_command);
+
+/*I2C commands */
+#define IPC_I2C_WRITE 1 /* I2C Write command */
+#define IPC_I2C_READ 2 /* I2C Read command */
+
+/**
+ * intel_scu_ipc_i2c_cntrl - I2C read/write operations
+ * @addr: I2C address + command bits
+ * @data: data to read/write
+ *
+ * Perform an an I2C read/write operation via the SCU. All locking is
+ * handled for the caller. This function may sleep.
+ *
+ * Returns an error code or 0 on success.
+ *
+ * This has to be in the IPC driver for the locking.
+ */
+int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data)
+{
+ u32 cmd = 0;
+
+ mutex_lock(&ipclock);
+ cmd = (addr >> 24) & 0xFF;
+ if (cmd == IPC_I2C_READ) {
+ writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR);
+ /* Write not getting updated without delay */
+ mdelay(1);
+ *data = readl(ipcdev.i2c_base + I2C_DATA_ADDR);
+ } else if (cmd == IPC_I2C_WRITE) {
+ writel(addr, ipcdev.i2c_base + I2C_DATA_ADDR);
+ mdelay(1);
+ writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR);
+ } else {
+ dev_err(&ipcdev.pdev->dev,
+ "intel_scu_ipc: I2C INVALID_CMD = 0x%x\n", cmd);
+
+ mutex_unlock(&ipclock);
+ return -1;
+ }
+ mutex_unlock(&ipclock);
+ return 0;
+}
+EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl);
+
+#define IPC_FW_LOAD_ADDR 0xFFFC0000 /* Storage location for FW image */
+#define IPC_FW_UPDATE_MBOX_ADDR 0xFFFFDFF4 /* Mailbox between ipc and scu */
+#define IPC_MAX_FW_SIZE 262144 /* 256K storage size for loading the FW image */
+#define IPC_FW_MIP_HEADER_SIZE 2048 /* Firmware MIP header size */
+/* IPC inform SCU to get ready for update process */
+#define IPC_CMD_FW_UPDATE_READY 0x10FE
+/* IPC inform SCU to go for update process */
+#define IPC_CMD_FW_UPDATE_GO 0x20FE
+/* Status code for fw update */
+#define IPC_FW_UPDATE_SUCCESS 0x444f4e45 /* Status code 'DONE' */
+#define IPC_FW_UPDATE_BADN 0x4241444E /* Status code 'BADN' */
+#define IPC_FW_TXHIGH 0x54784849 /* Status code 'IPC_FW_TXHIGH' */
+#define IPC_FW_TXLOW 0x54784c4f /* Status code 'IPC_FW_TXLOW' */
+
+struct fw_update_mailbox {
+ u32 status;
+ u32 scu_flag;
+ u32 driver_flag;
+};
+
+
+/**
+ * intel_scu_ipc_fw_update - Firmware update utility
+ * @buffer: firmware buffer
+ * @length: size of firmware buffer
+ *
+ * This function provides an interface to load the firmware into
+ * the SCU. Returns 0 on success or -1 on failure
+ */
+int intel_scu_ipc_fw_update(u8 *buffer, u32 length)
+{
+ void __iomem *fw_update_base;
+ struct fw_update_mailbox __iomem *mailbox = NULL;
+ int retry_cnt = 0;
+ u32 status;
+
+ mutex_lock(&ipclock);
+ fw_update_base = ioremap_nocache(IPC_FW_LOAD_ADDR, (128*1024));
+ if (fw_update_base == NULL) {
+ mutex_unlock(&ipclock);
+ return -ENOMEM;
+ }
+ mailbox = ioremap_nocache(IPC_FW_UPDATE_MBOX_ADDR,
+ sizeof(struct fw_update_mailbox));
+ if (mailbox == NULL) {
+ iounmap(fw_update_base);
+ mutex_unlock(&ipclock);
+ return -ENOMEM;
+ }
+
+ ipc_command(IPC_CMD_FW_UPDATE_READY);
+
+ /* Intitialize mailbox */
+ writel(0, &mailbox->status);
+ writel(0, &mailbox->scu_flag);
+ writel(0, &mailbox->driver_flag);
+
+ /* Driver copies the 2KB MIP header to SRAM at 0xFFFC0000*/
+ memcpy_toio(fw_update_base, buffer, 0x800);
+
+ /* Driver sends "FW Update" IPC command (CMD_ID 0xFE; MSG_ID 0x02).
+ * Upon receiving this command, SCU will write the 2K MIP header
+ * from 0xFFFC0000 into NAND.
+ * SCU will write a status code into the Mailbox, and then set scu_flag.
+ */
+
+ ipc_command(IPC_CMD_FW_UPDATE_GO);
+
+ /*Driver stalls until scu_flag is set */
+ while (readl(&mailbox->scu_flag) != 1) {
+ rmb();
+ mdelay(1);
+ }
+
+ /* Driver checks Mailbox status.
+ * If the status is 'BADN', then abort (bad NAND).
+ * If the status is 'IPC_FW_TXLOW', then continue.
+ */
+ while (readl(&mailbox->status) != IPC_FW_TXLOW) {
+ rmb();
+ mdelay(10);
+ }
+ mdelay(10);
+
+update_retry:
+ if (retry_cnt > 5)
+ goto update_end;
+
+ if (readl(&mailbox->status) != IPC_FW_TXLOW)
+ goto update_end;
+ buffer = buffer + 0x800;
+ memcpy_toio(fw_update_base, buffer, 0x20000);
+ writel(1, &mailbox->driver_flag);
+ while (readl(&mailbox->scu_flag) == 1) {
+ rmb();
+ mdelay(1);
+ }
+
+ /* check for 'BADN' */
+ if (readl(&mailbox->status) == IPC_FW_UPDATE_BADN)
+ goto update_end;
+
+ while (readl(&mailbox->status) != IPC_FW_TXHIGH) {
+ rmb();
+ mdelay(10);
+ }
+ mdelay(10);
+
+ if (readl(&mailbox->status) != IPC_FW_TXHIGH)
+ goto update_end;
+
+ buffer = buffer + 0x20000;
+ memcpy_toio(fw_update_base, buffer, 0x20000);
+ writel(0, &mailbox->driver_flag);
+
+ while (mailbox->scu_flag == 0) {
+ rmb();
+ mdelay(1);
+ }
+
+ /* check for 'BADN' */
+ if (readl(&mailbox->status) == IPC_FW_UPDATE_BADN)
+ goto update_end;
+
+ if (readl(&mailbox->status) == IPC_FW_TXLOW) {
+ ++retry_cnt;
+ goto update_retry;
+ }
+
+update_end:
+ status = readl(&mailbox->status);
+
+ iounmap(fw_update_base);
+ iounmap(mailbox);
+ mutex_unlock(&ipclock);
+
+ if (status == IPC_FW_UPDATE_SUCCESS)
+ return 0;
+ return -1;
+}
+EXPORT_SYMBOL(intel_scu_ipc_fw_update);
+
+/*
+ * Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
+ * When ioc bit is set to 1, caller api must wait for interrupt handler called
+ * which in turn unlocks the caller api. Currently this is not used
+ *
+ * This is edge triggered so we need take no action to clear anything
+ */
+static irqreturn_t ioc(int irq, void *dev_id)
+{
+ return IRQ_HANDLED;
+}
+
+/**
+ * ipc_probe - probe an Intel SCU IPC
+ * @dev: the PCI device matching
+ * @id: entry in the match table
+ *
+ * Enable and install an intel SCU IPC. This appears in the PCI space
+ * but uses some hard coded addresses as well.
+ */
+static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ int err;
+ resource_size_t pci_resource;
+
+ if (ipcdev.pdev) /* We support only one SCU */
+ return -EBUSY;
+
+ ipcdev.pdev = pci_dev_get(dev);
+
+ err = pci_enable_device(dev);
+ if (err)
+ return err;
+
+ err = pci_request_regions(dev, "intel_scu_ipc");
+ if (err)
+ return err;
+
+ pci_resource = pci_resource_start(dev, 0);
+ if (!pci_resource)
+ return -ENOMEM;
+
+ if (request_irq(dev->irq, ioc, 0, "intel_scu_ipc", &ipcdev))
+ return -EBUSY;
+
+ ipcdev.ipc_base = ioremap_nocache(IPC_BASE_ADDR, IPC_MAX_ADDR);
+ if (!ipcdev.ipc_base)
+ return -ENOMEM;
+
+ ipcdev.i2c_base = ioremap_nocache(IPC_I2C_BASE, IPC_I2C_MAX_ADDR);
+ if (!ipcdev.i2c_base) {
+ iounmap(ipcdev.ipc_base);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
+ * ipc_remove - remove a bound IPC device
+ * @pdev: PCI device
+ *
+ * In practice the SCU is not removable but this function is also
+ * called for each device on a module unload or cleanup which is the
+ * path that will get used.
+ *
+ * Free up the mappings and release the PCI resources
+ */
+static void ipc_remove(struct pci_dev *pdev)
+{
+ free_irq(pdev->irq, &ipcdev);
+ pci_release_regions(pdev);
+ pci_dev_put(ipcdev.pdev);
+ iounmap(ipcdev.ipc_base);
+ iounmap(ipcdev.i2c_base);
+ ipcdev.pdev = NULL;
+}
+
+static const struct pci_device_id pci_ids[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080e)},
+ { 0,}
+};
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+static struct pci_driver ipc_driver = {
+ .name = "intel_scu_ipc",
+ .id_table = pci_ids,
+ .probe = ipc_probe,
+ .remove = ipc_remove,
+};
+
+
+static int __init intel_scu_ipc_init(void)
+{
+ return pci_register_driver(&ipc_driver);
+}
+
+static void __exit intel_scu_ipc_exit(void)
+{
+ pci_unregister_driver(&ipc_driver);
+}
+
+MODULE_AUTHOR("Sreedhara DS <sreedhara.ds@intel.com>");
+MODULE_DESCRIPTION("Intel SCU IPC driver");
+MODULE_LICENSE("GPL");
+
+module_init(intel_scu_ipc_init);
+module_exit(intel_scu_ipc_exit);
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index 996223a7c00..afd762b58ad 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -59,6 +59,7 @@
#include <linux/backlight.h>
#include <linux/platform_device.h>
#include <linux/rfkill.h>
+#include <linux/i8042.h>
#define MSI_DRIVER_VERSION "0.5"
@@ -118,7 +119,8 @@ static int set_lcd_level(int level)
buf[0] = 0x80;
buf[1] = (u8) (level*31);
- return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, buf, sizeof(buf), NULL, 0, 1);
+ return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, buf, sizeof(buf),
+ NULL, 0, 1);
}
static int get_lcd_level(void)
@@ -126,7 +128,8 @@ static int get_lcd_level(void)
u8 wdata = 0, rdata;
int result;
- result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, &rdata, 1, 1);
+ result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1,
+ &rdata, 1, 1);
if (result < 0)
return result;
@@ -138,7 +141,8 @@ static int get_auto_brightness(void)
u8 wdata = 4, rdata;
int result;
- result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, &rdata, 1, 1);
+ result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1,
+ &rdata, 1, 1);
if (result < 0)
return result;
@@ -152,14 +156,16 @@ static int set_auto_brightness(int enable)
wdata[0] = 4;
- result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 1, &rdata, 1, 1);
+ result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 1,
+ &rdata, 1, 1);
if (result < 0)
return result;
wdata[0] = 0x84;
wdata[1] = (rdata & 0xF7) | (enable ? 8 : 0);
- return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 2, NULL, 0, 1);
+ return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 2,
+ NULL, 0, 1);
}
static ssize_t set_device_state(const char *buf, size_t count, u8 mask)
@@ -254,7 +260,7 @@ static int bl_update_status(struct backlight_device *b)
return set_lcd_level(b->props.brightness);
}
-static struct backlight_ops msibl_ops = {
+static const struct backlight_ops msibl_ops = {
.get_brightness = bl_get_brightness,
.update_status = bl_update_status,
};
@@ -353,7 +359,8 @@ static ssize_t store_lcd_level(struct device *dev,
int level, ret;
- if (sscanf(buf, "%i", &level) != 1 || (level < 0 || level >= MSI_LCD_LEVEL_MAX))
+ if (sscanf(buf, "%i", &level) != 1 ||
+ (level < 0 || level >= MSI_LCD_LEVEL_MAX))
return -EINVAL;
ret = set_lcd_level(level);
@@ -393,7 +400,8 @@ static ssize_t store_auto_brightness(struct device *dev,
}
static DEVICE_ATTR(lcd_level, 0644, show_lcd_level, store_lcd_level);
-static DEVICE_ATTR(auto_brightness, 0644, show_auto_brightness, store_auto_brightness);
+static DEVICE_ATTR(auto_brightness, 0644, show_auto_brightness,
+ store_auto_brightness);
static DEVICE_ATTR(bluetooth, 0444, show_bluetooth, NULL);
static DEVICE_ATTR(wlan, 0444, show_wlan, NULL);
static DEVICE_ATTR(threeg, 0444, show_threeg, NULL);
@@ -424,8 +432,9 @@ static struct platform_device *msipf_device;
static int dmi_check_cb(const struct dmi_system_id *id)
{
- printk("msi-laptop: Identified laptop model '%s'.\n", id->ident);
- return 0;
+ printk(KERN_INFO "msi-laptop: Identified laptop model '%s'.\n",
+ id->ident);
+ return 0;
}
static struct dmi_system_id __initdata msi_dmi_table[] = {
@@ -435,7 +444,8 @@ static struct dmi_system_id __initdata msi_dmi_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT'L CO.,LTD"),
DMI_MATCH(DMI_PRODUCT_NAME, "MS-1013"),
DMI_MATCH(DMI_PRODUCT_VERSION, "0131"),
- DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR INT'L CO.,LTD")
+ DMI_MATCH(DMI_CHASSIS_VENDOR,
+ "MICRO-STAR INT'L CO.,LTD")
},
.callback = dmi_check_cb
},
@@ -465,7 +475,8 @@ static struct dmi_system_id __initdata msi_dmi_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "NOTEBOOK"),
DMI_MATCH(DMI_PRODUCT_NAME, "SAM2000"),
DMI_MATCH(DMI_PRODUCT_VERSION, "0131"),
- DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR INT'L CO.,LTD")
+ DMI_MATCH(DMI_CHASSIS_VENDOR,
+ "MICRO-STAR INT'L CO.,LTD")
},
.callback = dmi_check_cb
},
@@ -484,6 +495,35 @@ static struct dmi_system_id __initdata msi_load_scm_models_dmi_table[] = {
},
.callback = dmi_check_cb
},
+ {
+ .ident = "MSI N051",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MS-N051"),
+ DMI_MATCH(DMI_CHASSIS_VENDOR,
+ "MICRO-STAR INTERNATIONAL CO., LTD")
+ },
+ .callback = dmi_check_cb
+ },
+ {
+ .ident = "MSI N014",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MS-N014"),
+ },
+ .callback = dmi_check_cb
+ },
+ {
+ .ident = "MSI CR620",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "Micro-Star International"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CR620"),
+ },
+ .callback = dmi_check_cb
+ },
{ }
};
@@ -552,11 +592,71 @@ static void rfkill_cleanup(void)
}
}
+static void msi_update_rfkill(struct work_struct *ignored)
+{
+ get_wireless_state_ec_standard();
+
+ if (rfk_wlan)
+ rfkill_set_sw_state(rfk_wlan, !wlan_s);
+ if (rfk_bluetooth)
+ rfkill_set_sw_state(rfk_bluetooth, !bluetooth_s);
+ if (rfk_threeg)
+ rfkill_set_sw_state(rfk_threeg, !threeg_s);
+}
+static DECLARE_DELAYED_WORK(msi_rfkill_work, msi_update_rfkill);
+
+static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str,
+ struct serio *port)
+{
+ static bool extended;
+
+ if (str & 0x20)
+ return false;
+
+ /* 0x54 wwan, 0x62 bluetooth, 0x76 wlan*/
+ if (unlikely(data == 0xe0)) {
+ extended = true;
+ return false;
+ } else if (unlikely(extended)) {
+ switch (data) {
+ case 0x54:
+ case 0x62:
+ case 0x76:
+ schedule_delayed_work(&msi_rfkill_work,
+ round_jiffies_relative(0.5 * HZ));
+ break;
+ }
+ extended = false;
+ }
+
+ return false;
+}
+
+static void msi_init_rfkill(struct work_struct *ignored)
+{
+ if (rfk_wlan) {
+ rfkill_set_sw_state(rfk_wlan, !wlan_s);
+ rfkill_wlan_set(NULL, !wlan_s);
+ }
+ if (rfk_bluetooth) {
+ rfkill_set_sw_state(rfk_bluetooth, !bluetooth_s);
+ rfkill_bluetooth_set(NULL, !bluetooth_s);
+ }
+ if (rfk_threeg) {
+ rfkill_set_sw_state(rfk_threeg, !threeg_s);
+ rfkill_threeg_set(NULL, !threeg_s);
+ }
+}
+static DECLARE_DELAYED_WORK(msi_rfkill_init, msi_init_rfkill);
+
static int rfkill_init(struct platform_device *sdev)
{
/* add rfkill */
int retval;
+ /* keep the hardware wireless state */
+ get_wireless_state_ec_standard();
+
rfk_bluetooth = rfkill_alloc("msi-bluetooth", &sdev->dev,
RFKILL_TYPE_BLUETOOTH,
&rfkill_bluetooth_ops, NULL);
@@ -590,6 +690,10 @@ static int rfkill_init(struct platform_device *sdev)
goto err_threeg;
}
+ /* schedule to run rfkill state initial */
+ schedule_delayed_work(&msi_rfkill_init,
+ round_jiffies_relative(1 * HZ));
+
return 0;
err_threeg:
@@ -653,9 +757,24 @@ static int load_scm_model_init(struct platform_device *sdev)
/* initial rfkill */
result = rfkill_init(sdev);
if (result < 0)
- return result;
+ goto fail_rfkill;
+
+ result = i8042_install_filter(msi_laptop_i8042_filter);
+ if (result) {
+ printk(KERN_ERR
+ "msi-laptop: Unable to install key filter\n");
+ goto fail_filter;
+ }
return 0;
+
+fail_filter:
+ rfkill_cleanup();
+
+fail_rfkill:
+
+ return result;
+
}
static int __init msi_init(void)
@@ -714,7 +833,8 @@ static int __init msi_init(void)
goto fail_platform_device1;
}
- ret = sysfs_create_group(&msipf_device->dev.kobj, &msipf_attribute_group);
+ ret = sysfs_create_group(&msipf_device->dev.kobj,
+ &msipf_attribute_group);
if (ret)
goto fail_platform_device2;
@@ -739,6 +859,11 @@ static int __init msi_init(void)
fail_platform_device2:
+ if (load_scm_model) {
+ i8042_remove_filter(msi_laptop_i8042_filter);
+ cancel_delayed_work_sync(&msi_rfkill_work);
+ rfkill_cleanup();
+ }
platform_device_del(msipf_device);
fail_platform_device1:
@@ -758,6 +883,11 @@ fail_backlight:
static void __exit msi_cleanup(void)
{
+ if (load_scm_model) {
+ i8042_remove_filter(msi_laptop_i8042_filter);
+ cancel_delayed_work_sync(&msi_rfkill_work);
+ rfkill_cleanup();
+ }
sysfs_remove_group(&msipf_device->dev.kobj, &msipf_attribute_group);
if (!old_ec_model && threeg_exists)
@@ -766,8 +896,6 @@ static void __exit msi_cleanup(void)
platform_driver_unregister(&msipf_driver);
backlight_device_unregister(msibl_device);
- rfkill_cleanup();
-
/* Enable automatic brightness control again */
if (auto_brightness != 2)
set_auto_brightness(1);
@@ -788,3 +916,6 @@ MODULE_ALIAS("dmi:*:svnMicro-StarInternational:pnMS-1058:pvr0581:rvnMSI:rnMS-105
MODULE_ALIAS("dmi:*:svnMicro-StarInternational:pnMS-1412:*:rvnMSI:rnMS-1412:*:cvnMICRO-STARINT'LCO.,LTD:ct10:*");
MODULE_ALIAS("dmi:*:svnNOTEBOOK:pnSAM2000:pvr0131*:cvnMICRO-STARINT'LCO.,LTD:ct10:*");
MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N034:*");
+MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N051:*");
+MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N014:*");
+MODULE_ALIAS("dmi:*:svnMicro-StarInternational*:pnCR620:*");
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 63290b33c87..4bdb13796e2 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -122,8 +122,14 @@ enum {
TP_NVRAM_POS_LEVEL_VOLUME = 0,
};
+/* Misc NVRAM-related */
+enum {
+ TP_NVRAM_LEVEL_VOLUME_MAX = 14,
+};
+
/* ACPI HIDs */
#define TPACPI_ACPI_HKEY_HID "IBM0068"
+#define TPACPI_ACPI_EC_HID "PNP0C09"
/* Input IDs */
#define TPACPI_HKEY_INPUT_PRODUCT 0x5054 /* "TP" */
@@ -299,8 +305,8 @@ static struct {
u32 hotkey_tablet:1;
u32 light:1;
u32 light_status:1;
- u32 bright_16levels:1;
u32 bright_acpimode:1;
+ u32 bright_unkfw:1;
u32 wan:1;
u32 uwb:1;
u32 fan_ctrl_status_undef:1;
@@ -363,6 +369,9 @@ struct tpacpi_led_classdev {
unsigned int led;
};
+/* brightness level capabilities */
+static unsigned int bright_maxlvl; /* 0 = unknown */
+
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
static int dbg_wlswemul;
static int tpacpi_wlsw_emulstate;
@@ -480,6 +489,15 @@ static unsigned long __init tpacpi_check_quirks(
return 0;
}
+static inline bool __pure __init tpacpi_is_lenovo(void)
+{
+ return thinkpad_id.vendor == PCI_VENDOR_ID_LENOVO;
+}
+
+static inline bool __pure __init tpacpi_is_ibm(void)
+{
+ return thinkpad_id.vendor == PCI_VENDOR_ID_IBM;
+}
/****************************************************************************
****************************************************************************
@@ -494,21 +512,13 @@ static unsigned long __init tpacpi_check_quirks(
*/
static acpi_handle root_handle;
+static acpi_handle ec_handle;
#define TPACPI_HANDLE(object, parent, paths...) \
static acpi_handle object##_handle; \
- static acpi_handle *object##_parent = &parent##_handle; \
- static char *object##_path; \
- static char *object##_paths[] = { paths }
-
-TPACPI_HANDLE(ec, root, "\\_SB.PCI0.ISA.EC0", /* 240, 240x */
- "\\_SB.PCI.ISA.EC", /* 570 */
- "\\_SB.PCI0.ISA0.EC0", /* 600e/x, 770e, 770x */
- "\\_SB.PCI0.ISA.EC", /* A21e, A2xm/p, T20-22, X20-21 */
- "\\_SB.PCI0.AD4S.EC0", /* i1400, R30 */
- "\\_SB.PCI0.ICH3.EC0", /* R31 */
- "\\_SB.PCI0.LPC.EC", /* all others */
- );
+ static const acpi_handle *object##_parent __initdata = \
+ &parent##_handle; \
+ static char *object##_paths[] __initdata = { paths }
TPACPI_HANDLE(ecrd, ec, "ECRD"); /* 570 */
TPACPI_HANDLE(ecwr, ec, "ECWR"); /* 570 */
@@ -528,6 +538,7 @@ TPACPI_HANDLE(vid, root, "\\_SB.PCI.AGP.VGA", /* 570 */
"\\_SB.PCI0.AGP0.VID0", /* 600e/x, 770x */
"\\_SB.PCI0.VID0", /* 770e */
"\\_SB.PCI0.VID", /* A21e, G4x, R50e, X30, X40 */
+ "\\_SB.PCI0.AGP.VGA", /* X100e and a few others */
"\\_SB.PCI0.AGP.VID", /* all others */
); /* R30, R31 */
@@ -594,9 +605,10 @@ static int acpi_evalf(acpi_handle handle,
switch (res_type) {
case 'd': /* int */
- if (res)
+ success = (status == AE_OK &&
+ out_obj.type == ACPI_TYPE_INTEGER);
+ if (success && res)
*(int *)res = out_obj.integer.value;
- success = status == AE_OK && out_obj.type == ACPI_TYPE_INTEGER;
break;
case 'v': /* void */
success = status == AE_OK;
@@ -609,8 +621,8 @@ static int acpi_evalf(acpi_handle handle,
}
if (!success && !quiet)
- printk(TPACPI_ERR "acpi_evalf(%s, %s, ...) failed: %d\n",
- method, fmt0, status);
+ printk(TPACPI_ERR "acpi_evalf(%s, %s, ...) failed: %s\n",
+ method, fmt0, acpi_format_exception(status));
return success;
}
@@ -661,11 +673,11 @@ static int issue_thinkpad_cmos_command(int cmos_cmd)
#define TPACPI_ACPIHANDLE_INIT(object) \
drv_acpi_handle_init(#object, &object##_handle, *object##_parent, \
- object##_paths, ARRAY_SIZE(object##_paths), &object##_path)
+ object##_paths, ARRAY_SIZE(object##_paths))
-static void drv_acpi_handle_init(char *name,
- acpi_handle *handle, acpi_handle parent,
- char **paths, int num_paths, char **path)
+static void __init drv_acpi_handle_init(const char *name,
+ acpi_handle *handle, const acpi_handle parent,
+ char **paths, const int num_paths)
{
int i;
acpi_status status;
@@ -676,10 +688,9 @@ static void drv_acpi_handle_init(char *name,
for (i = 0; i < num_paths; i++) {
status = acpi_get_handle(parent, paths[i], handle);
if (ACPI_SUCCESS(status)) {
- *path = paths[i];
dbg_printk(TPACPI_DBG_INIT,
"Found ACPI handle %s for %s\n",
- *path, name);
+ paths[i], name);
return;
}
}
@@ -689,6 +700,43 @@ static void drv_acpi_handle_init(char *name,
*handle = NULL;
}
+static acpi_status __init tpacpi_acpi_handle_locate_callback(acpi_handle handle,
+ u32 level, void *context, void **return_value)
+{
+ *(acpi_handle *)return_value = handle;
+
+ return AE_CTRL_TERMINATE;
+}
+
+static void __init tpacpi_acpi_handle_locate(const char *name,
+ const char *hid,
+ acpi_handle *handle)
+{
+ acpi_status status;
+ acpi_handle device_found;
+
+ BUG_ON(!name || !hid || !handle);
+ vdbg_printk(TPACPI_DBG_INIT,
+ "trying to locate ACPI handle for %s, using HID %s\n",
+ name, hid);
+
+ memset(&device_found, 0, sizeof(device_found));
+ status = acpi_get_devices(hid, tpacpi_acpi_handle_locate_callback,
+ (void *)name, &device_found);
+
+ *handle = NULL;
+
+ if (ACPI_SUCCESS(status)) {
+ *handle = device_found;
+ dbg_printk(TPACPI_DBG_INIT,
+ "Found ACPI handle for %s\n", name);
+ } else {
+ vdbg_printk(TPACPI_DBG_INIT,
+ "Could not locate an ACPI handle for %s: %s\n",
+ name, acpi_format_exception(status));
+ }
+}
+
static void dispatch_acpi_notify(acpi_handle handle, u32 event, void *data)
{
struct ibm_struct *ibm = data;
@@ -736,8 +784,8 @@ static int __init setup_acpi_notify(struct ibm_struct *ibm)
"handling %s events\n", ibm->name);
} else {
printk(TPACPI_ERR
- "acpi_install_notify_handler(%s) failed: %d\n",
- ibm->name, status);
+ "acpi_install_notify_handler(%s) failed: %s\n",
+ ibm->name, acpi_format_exception(status));
}
return -ENODEV;
}
@@ -1035,80 +1083,6 @@ static void tpacpi_disable_brightness_delay(void)
"ACPI backlight control delay disabled\n");
}
-static int __init tpacpi_query_bcl_levels(acpi_handle handle)
-{
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *obj;
- int rc;
-
- if (ACPI_SUCCESS(acpi_evaluate_object(handle, NULL, NULL, &buffer))) {
- obj = (union acpi_object *)buffer.pointer;
- if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
- printk(TPACPI_ERR "Unknown _BCL data, "
- "please report this to %s\n", TPACPI_MAIL);
- rc = 0;
- } else {
- rc = obj->package.count;
- }
- } else {
- return 0;
- }
-
- kfree(buffer.pointer);
- return rc;
-}
-
-static acpi_status __init tpacpi_acpi_walk_find_bcl(acpi_handle handle,
- u32 lvl, void *context, void **rv)
-{
- char name[ACPI_PATH_SEGMENT_LENGTH];
- struct acpi_buffer buffer = { sizeof(name), &name };
-
- if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer)) &&
- !strncmp("_BCL", name, sizeof(name) - 1)) {
- BUG_ON(!rv || !*rv);
- **(int **)rv = tpacpi_query_bcl_levels(handle);
- return AE_CTRL_TERMINATE;
- } else {
- return AE_OK;
- }
-}
-
-/*
- * Returns 0 (no ACPI _BCL or _BCL invalid), or size of brightness map
- */
-static int __init tpacpi_check_std_acpi_brightness_support(void)
-{
- int status;
- int bcl_levels = 0;
- void *bcl_ptr = &bcl_levels;
-
- if (!vid_handle) {
- TPACPI_ACPIHANDLE_INIT(vid);
- }
- if (!vid_handle)
- return 0;
-
- /*
- * Search for a _BCL method, and execute it. This is safe on all
- * ThinkPads, and as a side-effect, _BCL will place a Lenovo Vista
- * BIOS in ACPI backlight control mode. We do NOT have to care
- * about calling the _BCL method in an enabled video device, any
- * will do for our purposes.
- */
-
- status = acpi_walk_namespace(ACPI_TYPE_METHOD, vid_handle, 3,
- tpacpi_acpi_walk_find_bcl, NULL, NULL,
- &bcl_ptr);
-
- if (ACPI_SUCCESS(status) && bcl_levels > 2) {
- tp_features.bright_acpimode = 1;
- return (bcl_levels - 2);
- }
-
- return 0;
-}
-
static void printk_deprecated_attribute(const char * const what,
const char * const details)
{
@@ -1872,34 +1846,9 @@ static bool __init tpacpi_is_fw_known(void)
****************************************************************************/
/*************************************************************************
- * thinkpad-acpi init subdriver
+ * thinkpad-acpi metadata subdriver
*/
-static int __init thinkpad_acpi_driver_init(struct ibm_init_struct *iibm)
-{
- printk(TPACPI_INFO "%s v%s\n", TPACPI_DESC, TPACPI_VERSION);
- printk(TPACPI_INFO "%s\n", TPACPI_URL);
-
- printk(TPACPI_INFO "ThinkPad BIOS %s, EC %s\n",
- (thinkpad_id.bios_version_str) ?
- thinkpad_id.bios_version_str : "unknown",
- (thinkpad_id.ec_version_str) ?
- thinkpad_id.ec_version_str : "unknown");
-
- if (thinkpad_id.vendor && thinkpad_id.model_str)
- printk(TPACPI_INFO "%s %s, model %s\n",
- (thinkpad_id.vendor == PCI_VENDOR_ID_IBM) ?
- "IBM" : ((thinkpad_id.vendor ==
- PCI_VENDOR_ID_LENOVO) ?
- "Lenovo" : "Unknown vendor"),
- thinkpad_id.model_str,
- (thinkpad_id.nummodel_str) ?
- thinkpad_id.nummodel_str : "unknown");
-
- tpacpi_check_outdated_fw();
- return 0;
-}
-
static int thinkpad_acpi_driver_read(struct seq_file *m)
{
seq_printf(m, "driver:\t\t%s\n", TPACPI_DESC);
@@ -2405,6 +2354,36 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
tpacpi_hotkey_send_key(__scancode); \
} while (0)
+ void issue_volchange(const unsigned int oldvol,
+ const unsigned int newvol)
+ {
+ unsigned int i = oldvol;
+
+ while (i > newvol) {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
+ i--;
+ }
+ while (i < newvol) {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
+ i++;
+ }
+ }
+
+ void issue_brightnesschange(const unsigned int oldbrt,
+ const unsigned int newbrt)
+ {
+ unsigned int i = oldbrt;
+
+ while (i > newbrt) {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
+ i--;
+ }
+ while (i < newbrt) {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
+ i++;
+ }
+ }
+
TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
@@ -2414,41 +2393,61 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF8, displayexp_toggle);
- /* handle volume */
- if (oldn->volume_toggle != newn->volume_toggle) {
- if (oldn->mute != newn->mute) {
+ /*
+ * Handle volume
+ *
+ * This code is supposed to duplicate the IBM firmware behaviour:
+ * - Pressing MUTE issues mute hotkey message, even when already mute
+ * - Pressing Volume up/down issues volume up/down hotkey messages,
+ * even when already at maximum or minumum volume
+ * - The act of unmuting issues volume up/down notification,
+ * depending which key was used to unmute
+ *
+ * We are constrained to what the NVRAM can tell us, which is not much
+ * and certainly not enough if more than one volume hotkey was pressed
+ * since the last poll cycle.
+ *
+ * Just to make our life interesting, some newer Lenovo ThinkPads have
+ * bugs in the BIOS and may fail to update volume_toggle properly.
+ */
+ if (newn->mute) {
+ /* muted */
+ if (!oldn->mute ||
+ oldn->volume_toggle != newn->volume_toggle ||
+ oldn->volume_level != newn->volume_level) {
+ /* recently muted, or repeated mute keypress, or
+ * multiple presses ending in mute */
+ issue_volchange(oldn->volume_level, newn->volume_level);
TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
}
- if (oldn->volume_level > newn->volume_level) {
- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
- } else if (oldn->volume_level < newn->volume_level) {
+ } else {
+ /* unmute */
+ if (oldn->mute) {
+ /* recently unmuted, issue 'unmute' keypress */
TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
- } else if (oldn->mute == newn->mute) {
- /* repeated key presses that didn't change state */
- if (newn->mute) {
- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
- } else if (newn->volume_level != 0) {
- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
- } else {
+ }
+ if (oldn->volume_level != newn->volume_level) {
+ issue_volchange(oldn->volume_level, newn->volume_level);
+ } else if (oldn->volume_toggle != newn->volume_toggle) {
+ /* repeated vol up/down keypress at end of scale ? */
+ if (newn->volume_level == 0)
TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
- }
+ else if (newn->volume_level >= TP_NVRAM_LEVEL_VOLUME_MAX)
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
}
}
/* handle brightness */
- if (oldn->brightness_toggle != newn->brightness_toggle) {
- if (oldn->brightness_level < newn->brightness_level) {
- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
- } else if (oldn->brightness_level > newn->brightness_level) {
+ if (oldn->brightness_level != newn->brightness_level) {
+ issue_brightnesschange(oldn->brightness_level,
+ newn->brightness_level);
+ } else if (oldn->brightness_toggle != newn->brightness_toggle) {
+ /* repeated key presses that didn't change state */
+ if (newn->brightness_level == 0)
TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
- } else {
- /* repeated key presses that didn't change state */
- if (newn->brightness_level != 0) {
- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
- } else {
- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
- }
- }
+ else if (newn->brightness_level >= bright_maxlvl
+ && !tp_features.bright_unkfw)
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
}
#undef TPACPI_COMPARE_KEY
@@ -3353,7 +3352,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
goto err_exit;
}
- if (thinkpad_id.vendor == PCI_VENDOR_ID_LENOVO) {
+ if (tpacpi_is_lenovo()) {
dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
"using Lenovo default hot key map\n");
memcpy(hotkey_keycode_map, &lenovo_keycode_map,
@@ -3391,11 +3390,8 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
}
/* Do not issue duplicate brightness change events to
- * userspace */
- if (!tp_features.bright_acpimode)
- /* update bright_acpimode... */
- tpacpi_check_std_acpi_brightness_support();
-
+ * userspace. tpacpi_detect_brightness_capabilities() must have
+ * been called before this point */
if (tp_features.bright_acpimode && acpi_video_backlight_support()) {
printk(TPACPI_INFO
"This ThinkPad has standard ACPI backlight "
@@ -4422,7 +4418,8 @@ static int __init video_init(struct ibm_init_struct *iibm)
vdbg_printk(TPACPI_DBG_INIT, "initializing video subdriver\n");
TPACPI_ACPIHANDLE_INIT(vid);
- TPACPI_ACPIHANDLE_INIT(vid2);
+ if (tpacpi_is_ibm())
+ TPACPI_ACPIHANDLE_INIT(vid2);
if (vid2_handle && acpi_evalf(NULL, &ivga, "\\IVGA", "d") && ivga)
/* G41, assume IVGA doesn't change */
@@ -4431,10 +4428,12 @@ static int __init video_init(struct ibm_init_struct *iibm)
if (!vid_handle)
/* video switching not supported on R30, R31 */
video_supported = TPACPI_VIDEO_NONE;
- else if (acpi_evalf(vid_handle, &video_orig_autosw, "SWIT", "qd"))
+ else if (tpacpi_is_ibm() &&
+ acpi_evalf(vid_handle, &video_orig_autosw, "SWIT", "qd"))
/* 570 */
video_supported = TPACPI_VIDEO_570;
- else if (acpi_evalf(vid_handle, &video_orig_autosw, "^VADL", "qd"))
+ else if (tpacpi_is_ibm() &&
+ acpi_evalf(vid_handle, &video_orig_autosw, "^VADL", "qd"))
/* 600e/x, 770e, 770x */
video_supported = TPACPI_VIDEO_770;
else
@@ -4811,8 +4810,10 @@ static int __init light_init(struct ibm_init_struct *iibm)
vdbg_printk(TPACPI_DBG_INIT, "initializing light subdriver\n");
- TPACPI_ACPIHANDLE_INIT(ledb);
- TPACPI_ACPIHANDLE_INIT(lght);
+ if (tpacpi_is_ibm()) {
+ TPACPI_ACPIHANDLE_INIT(ledb);
+ TPACPI_ACPIHANDLE_INIT(lght);
+ }
TPACPI_ACPIHANDLE_INIT(cmos);
INIT_WORK(&tpacpi_led_thinklight.work, light_set_status_worker);
@@ -5007,11 +5008,7 @@ enum { /* For TPACPI_LED_OLD */
static enum led_access_mode led_supported;
-TPACPI_HANDLE(led, ec, "SLED", /* 570 */
- "SYSL", /* 600e/x, 770e, 770x, A21e, A2xm/p, */
- /* T20-22, X20-21 */
- "LED", /* all others */
- ); /* R30, R31 */
+static acpi_handle led_handle;
#define TPACPI_LED_NUMLEDS 16
static struct tpacpi_led_classdev *tpacpi_leds;
@@ -5271,6 +5268,32 @@ static const struct tpacpi_quirk led_useful_qtable[] __initconst = {
#undef TPACPI_LEDQ_IBM
#undef TPACPI_LEDQ_LNV
+static enum led_access_mode __init led_init_detect_mode(void)
+{
+ acpi_status status;
+
+ if (tpacpi_is_ibm()) {
+ /* 570 */
+ status = acpi_get_handle(ec_handle, "SLED", &led_handle);
+ if (ACPI_SUCCESS(status))
+ return TPACPI_LED_570;
+
+ /* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20-21 */
+ status = acpi_get_handle(ec_handle, "SYSL", &led_handle);
+ if (ACPI_SUCCESS(status))
+ return TPACPI_LED_OLD;
+ }
+
+ /* most others */
+ status = acpi_get_handle(ec_handle, "LED", &led_handle);
+ if (ACPI_SUCCESS(status))
+ return TPACPI_LED_NEW;
+
+ /* R30, R31, and unknown firmwares */
+ led_handle = NULL;
+ return TPACPI_LED_NONE;
+}
+
static int __init led_init(struct ibm_init_struct *iibm)
{
unsigned int i;
@@ -5279,20 +5302,7 @@ static int __init led_init(struct ibm_init_struct *iibm)
vdbg_printk(TPACPI_DBG_INIT, "initializing LED subdriver\n");
- TPACPI_ACPIHANDLE_INIT(led);
-
- if (!led_handle)
- /* led not supported on R30, R31 */
- led_supported = TPACPI_LED_NONE;
- else if (strlencmp(led_path, "SLED") == 0)
- /* 570 */
- led_supported = TPACPI_LED_570;
- else if (strlencmp(led_path, "SYSL") == 0)
- /* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20-21 */
- led_supported = TPACPI_LED_OLD;
- else
- /* all others */
- led_supported = TPACPI_LED_NEW;
+ led_supported = led_init_detect_mode();
vdbg_printk(TPACPI_DBG_INIT, "LED commands are %s, mode %d\n",
str_supported(led_supported), led_supported);
@@ -5741,11 +5751,12 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;
}
} else if (acpi_tmp7) {
- if (acpi_evalf(ec_handle, NULL, "UPDT", "qv")) {
+ if (tpacpi_is_ibm() &&
+ acpi_evalf(ec_handle, NULL, "UPDT", "qv")) {
/* 600e/x, 770e, 770x */
thermal_read_mode = TPACPI_THERMAL_ACPI_UPDT;
} else {
- /* Standard ACPI TMPx access, max 8 sensors */
+ /* IBM/LENOVO DSDT EC.TMPx access, max 8 sensors */
thermal_read_mode = TPACPI_THERMAL_ACPI_TMP07;
}
} else {
@@ -5954,7 +5965,7 @@ static unsigned int tpacpi_brightness_nvram_get(void)
lnvram = (nvram_read_byte(TP_NVRAM_ADDR_BRIGHTNESS)
& TP_NVRAM_MASK_LEVEL_BRIGHTNESS)
>> TP_NVRAM_POS_LEVEL_BRIGHTNESS;
- lnvram &= (tp_features.bright_16levels) ? 0x0f : 0x07;
+ lnvram &= bright_maxlvl;
return lnvram;
}
@@ -6063,8 +6074,7 @@ static int brightness_set(unsigned int value)
{
int res;
- if (value > ((tp_features.bright_16levels)? 15 : 7) ||
- value < 0)
+ if (value > bright_maxlvl || value < 0)
return -EINVAL;
vdbg_printk(TPACPI_DBG_BRGHT,
@@ -6139,6 +6149,80 @@ static struct backlight_ops ibm_backlight_data = {
/* --------------------------------------------------------------------- */
+static int __init tpacpi_query_bcl_levels(acpi_handle handle)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ int rc;
+
+ if (ACPI_SUCCESS(acpi_evaluate_object(handle, NULL, NULL, &buffer))) {
+ obj = (union acpi_object *)buffer.pointer;
+ if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
+ printk(TPACPI_ERR "Unknown _BCL data, "
+ "please report this to %s\n", TPACPI_MAIL);
+ rc = 0;
+ } else {
+ rc = obj->package.count;
+ }
+ } else {
+ return 0;
+ }
+
+ kfree(buffer.pointer);
+ return rc;
+}
+
+static acpi_status __init tpacpi_acpi_walk_find_bcl(acpi_handle handle,
+ u32 lvl, void *context, void **rv)
+{
+ char name[ACPI_PATH_SEGMENT_LENGTH];
+ struct acpi_buffer buffer = { sizeof(name), &name };
+
+ if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer)) &&
+ !strncmp("_BCL", name, sizeof(name) - 1)) {
+ BUG_ON(!rv || !*rv);
+ **(int **)rv = tpacpi_query_bcl_levels(handle);
+ return AE_CTRL_TERMINATE;
+ } else {
+ return AE_OK;
+ }
+}
+
+/*
+ * Returns 0 (no ACPI _BCL or _BCL invalid), or size of brightness map
+ */
+static unsigned int __init tpacpi_check_std_acpi_brightness_support(void)
+{
+ int status;
+ int bcl_levels = 0;
+ void *bcl_ptr = &bcl_levels;
+
+ if (!vid_handle)
+ TPACPI_ACPIHANDLE_INIT(vid);
+
+ if (!vid_handle)
+ return 0;
+
+ /*
+ * Search for a _BCL method, and execute it. This is safe on all
+ * ThinkPads, and as a side-effect, _BCL will place a Lenovo Vista
+ * BIOS in ACPI backlight control mode. We do NOT have to care
+ * about calling the _BCL method in an enabled video device, any
+ * will do for our purposes.
+ */
+
+ status = acpi_walk_namespace(ACPI_TYPE_METHOD, vid_handle, 3,
+ tpacpi_acpi_walk_find_bcl, NULL, NULL,
+ &bcl_ptr);
+
+ if (ACPI_SUCCESS(status) && bcl_levels > 2) {
+ tp_features.bright_acpimode = 1;
+ return bcl_levels - 2;
+ }
+
+ return 0;
+}
+
/*
* These are only useful for models that have only one possibility
* of GPU. If the BIOS model handles both ATI and Intel, don't use
@@ -6169,6 +6253,47 @@ static const struct tpacpi_quirk brightness_quirk_table[] __initconst = {
TPACPI_Q_IBM('7', '5', TPACPI_BRGHT_Q_NOEC), /* X41 Tablet */
};
+/*
+ * Returns < 0 for error, otherwise sets tp_features.bright_*
+ * and bright_maxlvl.
+ */
+static void __init tpacpi_detect_brightness_capabilities(void)
+{
+ unsigned int b;
+
+ vdbg_printk(TPACPI_DBG_INIT,
+ "detecting firmware brightness interface capabilities\n");
+
+ /* we could run a quirks check here (same table used by
+ * brightness_init) if needed */
+
+ /*
+ * We always attempt to detect acpi support, so as to switch
+ * Lenovo Vista BIOS to ACPI brightness mode even if we are not
+ * going to publish a backlight interface
+ */
+ b = tpacpi_check_std_acpi_brightness_support();
+ switch (b) {
+ case 16:
+ bright_maxlvl = 15;
+ printk(TPACPI_INFO
+ "detected a 16-level brightness capable ThinkPad\n");
+ break;
+ case 8:
+ case 0:
+ bright_maxlvl = 7;
+ printk(TPACPI_INFO
+ "detected a 8-level brightness capable ThinkPad\n");
+ break;
+ default:
+ printk(TPACPI_ERR
+ "Unsupported brightness interface, "
+ "please contact %s\n", TPACPI_MAIL);
+ tp_features.bright_unkfw = 1;
+ bright_maxlvl = b - 1;
+ }
+}
+
static int __init brightness_init(struct ibm_init_struct *iibm)
{
struct backlight_properties props;
@@ -6182,14 +6307,13 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
quirks = tpacpi_check_quirks(brightness_quirk_table,
ARRAY_SIZE(brightness_quirk_table));
- /*
- * We always attempt to detect acpi support, so as to switch
- * Lenovo Vista BIOS to ACPI brightness mode even if we are not
- * going to publish a backlight interface
- */
- b = tpacpi_check_std_acpi_brightness_support();
- if (b > 0) {
+ /* tpacpi_detect_brightness_capabilities() must have run already */
+
+ /* if it is unknown, we don't handle it: it wouldn't be safe */
+ if (tp_features.bright_unkfw)
+ return 1;
+ if (tp_features.bright_acpimode) {
if (acpi_video_backlight_support()) {
if (brightness_enable > 1) {
printk(TPACPI_NOTICE
@@ -6218,15 +6342,6 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
return 1;
}
- if (b > 16) {
- printk(TPACPI_ERR
- "Unsupported brightness interface, "
- "please contact %s\n", TPACPI_MAIL);
- return 1;
- }
- if (b == 16)
- tp_features.bright_16levels = 1;
-
/*
* Check for module parameter bogosity, note that we
* init brightness_mode to TPACPI_BRGHT_MODE_MAX in order to be
@@ -6249,7 +6364,7 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
}
/* Safety */
- if (thinkpad_id.vendor != PCI_VENDOR_ID_IBM &&
+ if (!tpacpi_is_ibm() &&
(brightness_mode == TPACPI_BRGHT_MODE_ECNVRAM ||
brightness_mode == TPACPI_BRGHT_MODE_EC))
return -EINVAL;
@@ -6257,12 +6372,9 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
if (tpacpi_brightness_get_raw(&b) < 0)
return 1;
- if (tp_features.bright_16levels)
- printk(TPACPI_INFO
- "detected a 16-level brightness capable ThinkPad\n");
-
memset(&props, 0, sizeof(struct backlight_properties));
- props.max_brightness = (tp_features.bright_16levels) ? 15 : 7;
+ props.max_brightness = bright_maxlvl;
+ props.brightness = b & TP_EC_BACKLIGHT_LVLMSK;
ibm_backlight_device = backlight_device_register(TPACPI_BACKLIGHT_DEV_NAME,
NULL, NULL,
&ibm_backlight_data,
@@ -6285,7 +6397,10 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
"or not on your ThinkPad\n", TPACPI_MAIL);
}
- ibm_backlight_device->props.brightness = b & TP_EC_BACKLIGHT_LVLMSK;
+ /* Added by mistake in early 2007. Probably useless, but it could
+ * be working around some unknown firmware problem where the value
+ * read at startup doesn't match the real hardware state... so leave
+ * it in place just in case */
backlight_update_status(ibm_backlight_device);
vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT,
@@ -6328,9 +6443,8 @@ static int brightness_read(struct seq_file *m)
} else {
seq_printf(m, "level:\t\t%d\n", level);
seq_printf(m, "commands:\tup, down\n");
- seq_printf(m, "commands:\tlevel <level>"
- " (<level> is 0-%d)\n",
- (tp_features.bright_16levels) ? 15 : 7);
+ seq_printf(m, "commands:\tlevel <level> (<level> is 0-%d)\n",
+ bright_maxlvl);
}
return 0;
@@ -6341,7 +6455,6 @@ static int brightness_write(char *buf)
int level;
int rc;
char *cmd;
- int max_level = (tp_features.bright_16levels) ? 15 : 7;
level = brightness_get(NULL);
if (level < 0)
@@ -6349,13 +6462,13 @@ static int brightness_write(char *buf)
while ((cmd = next_cmd(&buf))) {
if (strlencmp(cmd, "up") == 0) {
- if (level < max_level)
+ if (level < bright_maxlvl)
level++;
} else if (strlencmp(cmd, "down") == 0) {
if (level > 0)
level--;
} else if (sscanf(cmd, "level %d", &level) == 1 &&
- level >= 0 && level <= max_level) {
+ level >= 0 && level <= bright_maxlvl) {
/* new level set */
} else
return -EINVAL;
@@ -6669,6 +6782,8 @@ static int volume_alsa_vol_get(struct snd_kcontrol *kcontrol,
static int volume_alsa_vol_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
+ tpacpi_disclose_usertask("ALSA", "set volume to %ld\n",
+ ucontrol->value.integer.value[0]);
return volume_alsa_set_volume(ucontrol->value.integer.value[0]);
}
@@ -6692,6 +6807,9 @@ static int volume_alsa_mute_get(struct snd_kcontrol *kcontrol,
static int volume_alsa_mute_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
+ tpacpi_disclose_usertask("ALSA", "%smute\n",
+ ucontrol->value.integer.value[0] ?
+ "un" : "");
return volume_alsa_set_mute(!ucontrol->value.integer.value[0]);
}
@@ -7968,9 +8086,11 @@ static int __init fan_init(struct ibm_init_struct *iibm)
tp_features.second_fan = 0;
fan_control_desired_level = 7;
- TPACPI_ACPIHANDLE_INIT(fans);
- TPACPI_ACPIHANDLE_INIT(gfan);
- TPACPI_ACPIHANDLE_INIT(sfan);
+ if (tpacpi_is_ibm()) {
+ TPACPI_ACPIHANDLE_INIT(fans);
+ TPACPI_ACPIHANDLE_INIT(gfan);
+ TPACPI_ACPIHANDLE_INIT(sfan);
+ }
quirks = tpacpi_check_quirks(fan_quirk_table,
ARRAY_SIZE(fan_quirk_table));
@@ -8662,6 +8782,10 @@ static int __init probe_for_thinkpad(void)
if (acpi_disabled)
return -ENODEV;
+ /* It would be dangerous to run the driver in this case */
+ if (!tpacpi_is_ibm() && !tpacpi_is_lenovo())
+ return -ENODEV;
+
/*
* Non-ancient models have better DMI tagging, but very old models
* don't. tpacpi_is_fw_known() is a cheat to help in that case.
@@ -8670,8 +8794,8 @@ static int __init probe_for_thinkpad(void)
(thinkpad_id.ec_model != 0) ||
tpacpi_is_fw_known();
- /* ec is required because many other handles are relative to it */
- TPACPI_ACPIHANDLE_INIT(ec);
+ /* The EC handler is required */
+ tpacpi_acpi_handle_locate("ec", TPACPI_ACPI_EC_HID, &ec_handle);
if (!ec_handle) {
if (is_thinkpad)
printk(TPACPI_ERR
@@ -8685,12 +8809,34 @@ static int __init probe_for_thinkpad(void)
return 0;
}
+static void __init thinkpad_acpi_init_banner(void)
+{
+ printk(TPACPI_INFO "%s v%s\n", TPACPI_DESC, TPACPI_VERSION);
+ printk(TPACPI_INFO "%s\n", TPACPI_URL);
+
+ printk(TPACPI_INFO "ThinkPad BIOS %s, EC %s\n",
+ (thinkpad_id.bios_version_str) ?
+ thinkpad_id.bios_version_str : "unknown",
+ (thinkpad_id.ec_version_str) ?
+ thinkpad_id.ec_version_str : "unknown");
+
+ BUG_ON(!thinkpad_id.vendor);
+
+ if (thinkpad_id.model_str)
+ printk(TPACPI_INFO "%s %s, model %s\n",
+ (thinkpad_id.vendor == PCI_VENDOR_ID_IBM) ?
+ "IBM" : ((thinkpad_id.vendor ==
+ PCI_VENDOR_ID_LENOVO) ?
+ "Lenovo" : "Unknown vendor"),
+ thinkpad_id.model_str,
+ (thinkpad_id.nummodel_str) ?
+ thinkpad_id.nummodel_str : "unknown");
+}
/* Module init, exit, parameters */
static struct ibm_init_struct ibms_init[] __initdata = {
{
- .init = thinkpad_acpi_driver_init,
.data = &thinkpad_acpi_driver_data,
},
{
@@ -8960,6 +9106,9 @@ static int __init thinkpad_acpi_module_init(void)
/* Driver initialization */
+ thinkpad_acpi_init_banner();
+ tpacpi_check_outdated_fw();
+
TPACPI_ACPIHANDLE_INIT(ecrd);
TPACPI_ACPIHANDLE_INIT(ecwr);
@@ -9059,13 +9208,16 @@ static int __init thinkpad_acpi_module_init(void)
tpacpi_inputdev->name = "ThinkPad Extra Buttons";
tpacpi_inputdev->phys = TPACPI_DRVR_NAME "/input0";
tpacpi_inputdev->id.bustype = BUS_HOST;
- tpacpi_inputdev->id.vendor = (thinkpad_id.vendor) ?
- thinkpad_id.vendor :
- PCI_VENDOR_ID_IBM;
+ tpacpi_inputdev->id.vendor = thinkpad_id.vendor;
tpacpi_inputdev->id.product = TPACPI_HKEY_INPUT_PRODUCT;
tpacpi_inputdev->id.version = TPACPI_HKEY_INPUT_VERSION;
tpacpi_inputdev->dev.parent = &tpacpi_pdev->dev;
}
+
+ /* Init subdriver dependencies */
+ tpacpi_detect_brightness_capabilities();
+
+ /* Init subdrivers */
for (i = 0; i < ARRAY_SIZE(ibms_init); i++) {
ret = ibm_init(&ibms_init[i]);
if (ret >= 0 && *ibms_init[i].param)
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 39ec5b6c2e3..e4eaa14ed98 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -81,6 +81,16 @@ static struct wmi_block wmi_blocks;
#define ACPI_WMI_STRING 0x4 /* GUID takes & returns a string */
#define ACPI_WMI_EVENT 0x8 /* GUID is an event */
+static int debug_event;
+module_param(debug_event, bool, 0444);
+MODULE_PARM_DESC(debug_event,
+ "Log WMI Events [0/1]");
+
+static int debug_dump_wdg;
+module_param(debug_dump_wdg, bool, 0444);
+MODULE_PARM_DESC(debug_dump_wdg,
+ "Dump available WMI interfaces [0/1]");
+
static int acpi_wmi_remove(struct acpi_device *device, int type);
static int acpi_wmi_add(struct acpi_device *device);
static void acpi_wmi_notify(struct acpi_device *device, u32 event);
@@ -477,6 +487,64 @@ const struct acpi_buffer *in)
}
EXPORT_SYMBOL_GPL(wmi_set_block);
+static void wmi_dump_wdg(struct guid_block *g)
+{
+ char guid_string[37];
+
+ wmi_gtoa(g->guid, guid_string);
+ printk(KERN_INFO PREFIX "%s:\n", guid_string);
+ printk(KERN_INFO PREFIX "\tobject_id: %c%c\n",
+ g->object_id[0], g->object_id[1]);
+ printk(KERN_INFO PREFIX "\tnotify_id: %02X\n", g->notify_id);
+ printk(KERN_INFO PREFIX "\treserved: %02X\n", g->reserved);
+ printk(KERN_INFO PREFIX "\tinstance_count: %d\n", g->instance_count);
+ printk(KERN_INFO PREFIX "\tflags: %#x", g->flags);
+ if (g->flags) {
+ printk(" ");
+ if (g->flags & ACPI_WMI_EXPENSIVE)
+ printk("ACPI_WMI_EXPENSIVE ");
+ if (g->flags & ACPI_WMI_METHOD)
+ printk("ACPI_WMI_METHOD ");
+ if (g->flags & ACPI_WMI_STRING)
+ printk("ACPI_WMI_STRING ");
+ if (g->flags & ACPI_WMI_EVENT)
+ printk("ACPI_WMI_EVENT ");
+ }
+ printk("\n");
+
+}
+
+static void wmi_notify_debug(u32 value, void *context)
+{
+ struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+
+ wmi_get_event_data(value, &response);
+
+ obj = (union acpi_object *)response.pointer;
+
+ if (!obj)
+ return;
+
+ printk(KERN_INFO PREFIX "DEBUG Event ");
+ switch(obj->type) {
+ case ACPI_TYPE_BUFFER:
+ printk("BUFFER_TYPE - length %d\n", obj->buffer.length);
+ break;
+ case ACPI_TYPE_STRING:
+ printk("STRING_TYPE - %s\n", obj->string.pointer);
+ break;
+ case ACPI_TYPE_INTEGER:
+ printk("INTEGER_TYPE - %llu\n", obj->integer.value);
+ break;
+ case ACPI_TYPE_PACKAGE:
+ printk("PACKAGE_TYPE - %d elements\n", obj->package.count);
+ break;
+ default:
+ printk("object type 0x%X\n", obj->type);
+ }
+}
+
/**
* wmi_install_notify_handler - Register handler for WMI events
* @handler: Function to handle notifications
@@ -496,7 +564,7 @@ wmi_notify_handler handler, void *data)
if (!find_guid(guid, &block))
return AE_NOT_EXIST;
- if (block->handler)
+ if (block->handler && block->handler != wmi_notify_debug)
return AE_ALREADY_ACQUIRED;
block->handler = handler;
@@ -516,7 +584,7 @@ EXPORT_SYMBOL_GPL(wmi_install_notify_handler);
acpi_status wmi_remove_notify_handler(const char *guid)
{
struct wmi_block *block;
- acpi_status status;
+ acpi_status status = AE_OK;
if (!guid)
return AE_BAD_PARAMETER;
@@ -524,14 +592,16 @@ acpi_status wmi_remove_notify_handler(const char *guid)
if (!find_guid(guid, &block))
return AE_NOT_EXIST;
- if (!block->handler)
+ if (!block->handler || block->handler == wmi_notify_debug)
return AE_NULL_ENTRY;
- status = wmi_method_enable(block, 0);
-
- block->handler = NULL;
- block->handler_data = NULL;
-
+ if (debug_event) {
+ block->handler = wmi_notify_debug;
+ } else {
+ status = wmi_method_enable(block, 0);
+ block->handler = NULL;
+ block->handler_data = NULL;
+ }
return status;
}
EXPORT_SYMBOL_GPL(wmi_remove_notify_handler);
@@ -756,12 +826,10 @@ static __init acpi_status parse_wdg(acpi_handle handle)
total = obj->buffer.length / sizeof(struct guid_block);
- gblock = kzalloc(obj->buffer.length, GFP_KERNEL);
+ gblock = kmemdup(obj->buffer.pointer, obj->buffer.length, GFP_KERNEL);
if (!gblock)
return AE_NO_MEMORY;
- memcpy(gblock, obj->buffer.pointer, obj->buffer.length);
-
for (i = 0; i < total; i++) {
/*
Some WMI devices, like those for nVidia hooks, have a
@@ -776,12 +844,19 @@ static __init acpi_status parse_wdg(acpi_handle handle)
guid_string);
continue;
}
+ if (debug_dump_wdg)
+ wmi_dump_wdg(&gblock[i]);
+
wblock = kzalloc(sizeof(struct wmi_block), GFP_KERNEL);
if (!wblock)
return AE_NO_MEMORY;
wblock->gblock = gblock[i];
wblock->handle = handle;
+ if (debug_event) {
+ wblock->handler = wmi_notify_debug;
+ status = wmi_method_enable(wblock, 1);
+ }
list_add_tail(&wblock->list, &wmi_blocks.list);
}
@@ -840,6 +915,7 @@ static void acpi_wmi_notify(struct acpi_device *device, u32 event)
struct guid_block *block;
struct wmi_block *wblock;
struct list_head *p;
+ char guid_string[37];
list_for_each(p, &wmi_blocks.list) {
wblock = list_entry(p, struct wmi_block, list);
@@ -849,6 +925,11 @@ static void acpi_wmi_notify(struct acpi_device *device, u32 event)
(block->notify_id == event)) {
if (wblock->handler)
wblock->handler(event, wblock->handler_data);
+ if (debug_event) {
+ wmi_gtoa(wblock->gblock.guid, guid_string);
+ printk(KERN_INFO PREFIX "DEBUG Event GUID:"
+ " %s\n", guid_string);
+ }
acpi_bus_generate_netlink_event(
device->pnp.device_class, dev_name(&device->dev),
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index faaa9b4d0d0..8e9ba177d81 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -57,6 +57,11 @@ config WM8350_POWER
Say Y here to enable support for the power management unit
provided by the Wolfson Microelectronics WM8350 PMIC.
+config TEST_POWER
+ tristate "Test power driver"
+ help
+ This driver is used for testing. It's safe to say M here.
+
config BATTERY_DS2760
tristate "DS2760 battery driver (HP iPAQ & others)"
select W1
@@ -65,10 +70,10 @@ config BATTERY_DS2760
Say Y here to enable support for batteries with ds2760 chip.
config BATTERY_DS2782
- tristate "DS2782 standalone gas-gauge"
+ tristate "DS2782/DS2786 standalone gas-gauge"
depends on I2C
help
- Say Y here to enable support for the DS2782 standalone battery
+ Say Y here to enable support for the DS2782/DS2786 standalone battery
gas-gauge.
config BATTERY_PMU
@@ -125,6 +130,12 @@ config BATTERY_MAX17040
in handheld and portable equipment. The MAX17040 is configured
to operate with a single lithium cell
+config BATTERY_Z2
+ tristate "Z2 battery driver"
+ depends on I2C && MACH_ZIPIT2
+ help
+ Say Y to include support for the battery on the Zipit Z2.
+
config CHARGER_PCF50633
tristate "NXP PCF50633 MBC"
depends on MFD_PCF50633
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index a2ba7c85c97..00050809a6c 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_MAX8925_POWER) += max8925_power.o
obj-$(CONFIG_WM831X_BACKUP) += wm831x_backup.o
obj-$(CONFIG_WM831X_POWER) += wm831x_power.o
obj-$(CONFIG_WM8350_POWER) += wm8350_power.o
+obj-$(CONFIG_TEST_POWER) += test_power.o
obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o
obj-$(CONFIG_BATTERY_DS2782) += ds2782_battery.o
@@ -31,4 +32,5 @@ obj-$(CONFIG_BATTERY_WM97XX) += wm97xx_battery.o
obj-$(CONFIG_BATTERY_BQ27x00) += bq27x00_battery.o
obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o
obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o
+obj-$(CONFIG_BATTERY_Z2) += z2_battery.o
obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
index 3bf8d1f622e..4d3b27228a2 100644
--- a/drivers/power/ds2760_battery.c
+++ b/drivers/power/ds2760_battery.c
@@ -304,6 +304,28 @@ static void ds2760_battery_write_rated_capacity(struct ds2760_device_info *di,
w1_ds2760_recall_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK1);
}
+static void ds2760_battery_write_active_full(struct ds2760_device_info *di,
+ int active_full)
+{
+ unsigned char tmp[2] = {
+ active_full >> 8,
+ active_full & 0xff
+ };
+
+ if (tmp[0] == di->raw[DS2760_ACTIVE_FULL] &&
+ tmp[1] == di->raw[DS2760_ACTIVE_FULL + 1])
+ return;
+
+ w1_ds2760_write(di->w1_dev, tmp, DS2760_ACTIVE_FULL, sizeof(tmp));
+ w1_ds2760_store_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK0);
+ w1_ds2760_recall_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK0);
+
+ /* Write to the di->raw[] buffer directly - the DS2760_ACTIVE_FULL
+ * values won't be read back by ds2760_battery_read_status() */
+ di->raw[DS2760_ACTIVE_FULL] = tmp[0];
+ di->raw[DS2760_ACTIVE_FULL + 1] = tmp[1];
+}
+
static void ds2760_battery_work(struct work_struct *work)
{
struct ds2760_device_info *di = container_of(work,
@@ -426,6 +448,45 @@ static int ds2760_battery_get_property(struct power_supply *psy,
return 0;
}
+static int ds2760_battery_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct ds2760_device_info *di = to_ds2760_device_info(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ /* the interface counts in uAh, convert the value */
+ ds2760_battery_write_active_full(di, val->intval / 1000L);
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ /* ds2760_battery_set_current_accum() does the conversion */
+ ds2760_battery_set_current_accum(di, val->intval);
+ break;
+
+ default:
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static int ds2760_battery_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ return 1;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static enum power_supply_property ds2760_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
@@ -460,6 +521,9 @@ static int ds2760_battery_probe(struct platform_device *pdev)
di->bat.properties = ds2760_battery_props;
di->bat.num_properties = ARRAY_SIZE(ds2760_battery_props);
di->bat.get_property = ds2760_battery_get_property;
+ di->bat.set_property = ds2760_battery_set_property;
+ di->bat.property_is_writeable =
+ ds2760_battery_property_is_writeable;
di->bat.set_charged = ds2760_battery_set_charged;
di->bat.external_power_changed =
ds2760_battery_external_power_changed;
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index 99c89976a90..d762a0cbc6a 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -5,6 +5,8 @@
*
* Author: Ryan Mallon <ryan@bluewatersys.com>
*
+ * DS2786 added by Yulia Vilensky <vilensky@compulab.co.il>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -20,12 +22,13 @@
#include <linux/idr.h>
#include <linux/power_supply.h>
#include <linux/slab.h>
+#include <linux/ds2782_battery.h>
#define DS2782_REG_RARC 0x06 /* Remaining active relative capacity */
-#define DS2782_REG_VOLT_MSB 0x0c
-#define DS2782_REG_TEMP_MSB 0x0a
-#define DS2782_REG_CURRENT_MSB 0x0e
+#define DS278x_REG_VOLT_MSB 0x0c
+#define DS278x_REG_TEMP_MSB 0x0a
+#define DS278x_REG_CURRENT_MSB 0x0e
/* EEPROM Block */
#define DS2782_REG_RSNSP 0x69 /* Sense resistor value */
@@ -33,18 +36,33 @@
/* Current unit measurement in uA for a 1 milli-ohm sense resistor */
#define DS2782_CURRENT_UNITS 1563
-#define to_ds2782_info(x) container_of(x, struct ds2782_info, battery)
+#define DS2786_REG_RARC 0x02 /* Remaining active relative capacity */
+
+#define DS2786_CURRENT_UNITS 25
+
+struct ds278x_info;
+
+struct ds278x_battery_ops {
+ int (*get_current)(struct ds278x_info *info, int *current_uA);
+ int (*get_voltage)(struct ds278x_info *info, int *voltage_uA);
+ int (*get_capacity)(struct ds278x_info *info, int *capacity_uA);
+
+};
+
+#define to_ds278x_info(x) container_of(x, struct ds278x_info, battery)
-struct ds2782_info {
+struct ds278x_info {
struct i2c_client *client;
struct power_supply battery;
+ struct ds278x_battery_ops *ops;
int id;
+ int rsns;
};
static DEFINE_IDR(battery_id);
static DEFINE_MUTEX(battery_lock);
-static inline int ds2782_read_reg(struct ds2782_info *info, int reg, u8 *val)
+static inline int ds278x_read_reg(struct ds278x_info *info, int reg, u8 *val)
{
int ret;
@@ -58,7 +76,7 @@ static inline int ds2782_read_reg(struct ds2782_info *info, int reg, u8 *val)
return 0;
}
-static inline int ds2782_read_reg16(struct ds2782_info *info, int reg_msb,
+static inline int ds278x_read_reg16(struct ds278x_info *info, int reg_msb,
s16 *val)
{
int ret;
@@ -73,7 +91,7 @@ static inline int ds2782_read_reg16(struct ds2782_info *info, int reg_msb,
return 0;
}
-static int ds2782_get_temp(struct ds2782_info *info, int *temp)
+static int ds278x_get_temp(struct ds278x_info *info, int *temp)
{
s16 raw;
int err;
@@ -84,14 +102,14 @@ static int ds2782_get_temp(struct ds2782_info *info, int *temp)
* celsius. The temperature value is stored as a 10 bit number, plus
* sign in the upper bits of a 16 bit register.
*/
- err = ds2782_read_reg16(info, DS2782_REG_TEMP_MSB, &raw);
+ err = ds278x_read_reg16(info, DS278x_REG_TEMP_MSB, &raw);
if (err)
return err;
*temp = ((raw / 32) * 125) / 100;
return 0;
}
-static int ds2782_get_current(struct ds2782_info *info, int *current_uA)
+static int ds2782_get_current(struct ds278x_info *info, int *current_uA)
{
int sense_res;
int err;
@@ -102,7 +120,7 @@ static int ds2782_get_current(struct ds2782_info *info, int *current_uA)
* The units of measurement for current are dependent on the value of
* the sense resistor.
*/
- err = ds2782_read_reg(info, DS2782_REG_RSNSP, &sense_res_raw);
+ err = ds278x_read_reg(info, DS2782_REG_RSNSP, &sense_res_raw);
if (err)
return err;
if (sense_res_raw == 0) {
@@ -113,14 +131,14 @@ static int ds2782_get_current(struct ds2782_info *info, int *current_uA)
dev_dbg(&info->client->dev, "sense resistor = %d milli-ohms\n",
sense_res);
- err = ds2782_read_reg16(info, DS2782_REG_CURRENT_MSB, &raw);
+ err = ds278x_read_reg16(info, DS278x_REG_CURRENT_MSB, &raw);
if (err)
return err;
*current_uA = raw * (DS2782_CURRENT_UNITS / sense_res);
return 0;
}
-static int ds2782_get_voltage(struct ds2782_info *info, int *voltage_uA)
+static int ds2782_get_voltage(struct ds278x_info *info, int *voltage_uA)
{
s16 raw;
int err;
@@ -129,36 +147,77 @@ static int ds2782_get_voltage(struct ds2782_info *info, int *voltage_uA)
* Voltage is measured in units of 4.88mV. The voltage is stored as
* a 10-bit number plus sign, in the upper bits of a 16-bit register
*/
- err = ds2782_read_reg16(info, DS2782_REG_VOLT_MSB, &raw);
+ err = ds278x_read_reg16(info, DS278x_REG_VOLT_MSB, &raw);
if (err)
return err;
*voltage_uA = (raw / 32) * 4800;
return 0;
}
-static int ds2782_get_capacity(struct ds2782_info *info, int *capacity)
+static int ds2782_get_capacity(struct ds278x_info *info, int *capacity)
{
int err;
u8 raw;
- err = ds2782_read_reg(info, DS2782_REG_RARC, &raw);
+ err = ds278x_read_reg(info, DS2782_REG_RARC, &raw);
if (err)
return err;
*capacity = raw;
return raw;
}
-static int ds2782_get_status(struct ds2782_info *info, int *status)
+static int ds2786_get_current(struct ds278x_info *info, int *current_uA)
+{
+ int err;
+ s16 raw;
+
+ err = ds278x_read_reg16(info, DS278x_REG_CURRENT_MSB, &raw);
+ if (err)
+ return err;
+ *current_uA = (raw / 16) * (DS2786_CURRENT_UNITS / info->rsns);
+ return 0;
+}
+
+static int ds2786_get_voltage(struct ds278x_info *info, int *voltage_uA)
+{
+ s16 raw;
+ int err;
+
+ /*
+ * Voltage is measured in units of 1.22mV. The voltage is stored as
+ * a 10-bit number plus sign, in the upper bits of a 16-bit register
+ */
+ err = ds278x_read_reg16(info, DS278x_REG_VOLT_MSB, &raw);
+ if (err)
+ return err;
+ *voltage_uA = (raw / 8) * 1220;
+ return 0;
+}
+
+static int ds2786_get_capacity(struct ds278x_info *info, int *capacity)
+{
+ int err;
+ u8 raw;
+
+ err = ds278x_read_reg(info, DS2786_REG_RARC, &raw);
+ if (err)
+ return err;
+ /* Relative capacity is displayed with resolution 0.5 % */
+ *capacity = raw/2 ;
+ return 0;
+}
+
+static int ds278x_get_status(struct ds278x_info *info, int *status)
{
int err;
int current_uA;
int capacity;
- err = ds2782_get_current(info, &current_uA);
+ err = info->ops->get_current(info, &current_uA);
if (err)
return err;
- err = ds2782_get_capacity(info, &capacity);
+ err = info->ops->get_capacity(info, &capacity);
if (err)
return err;
@@ -174,32 +233,32 @@ static int ds2782_get_status(struct ds2782_info *info, int *status)
return 0;
}
-static int ds2782_battery_get_property(struct power_supply *psy,
+static int ds278x_battery_get_property(struct power_supply *psy,
enum power_supply_property prop,
union power_supply_propval *val)
{
- struct ds2782_info *info = to_ds2782_info(psy);
+ struct ds278x_info *info = to_ds278x_info(psy);
int ret;
switch (prop) {
case POWER_SUPPLY_PROP_STATUS:
- ret = ds2782_get_status(info, &val->intval);
+ ret = ds278x_get_status(info, &val->intval);
break;
case POWER_SUPPLY_PROP_CAPACITY:
- ret = ds2782_get_capacity(info, &val->intval);
+ ret = info->ops->get_capacity(info, &val->intval);
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- ret = ds2782_get_voltage(info, &val->intval);
+ ret = info->ops->get_voltage(info, &val->intval);
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
- ret = ds2782_get_current(info, &val->intval);
+ ret = info->ops->get_current(info, &val->intval);
break;
case POWER_SUPPLY_PROP_TEMP:
- ret = ds2782_get_temp(info, &val->intval);
+ ret = ds278x_get_temp(info, &val->intval);
break;
default:
@@ -209,7 +268,7 @@ static int ds2782_battery_get_property(struct power_supply *psy,
return ret;
}
-static enum power_supply_property ds2782_battery_props[] = {
+static enum power_supply_property ds278x_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
@@ -217,18 +276,18 @@ static enum power_supply_property ds2782_battery_props[] = {
POWER_SUPPLY_PROP_TEMP,
};
-static void ds2782_power_supply_init(struct power_supply *battery)
+static void ds278x_power_supply_init(struct power_supply *battery)
{
battery->type = POWER_SUPPLY_TYPE_BATTERY;
- battery->properties = ds2782_battery_props;
- battery->num_properties = ARRAY_SIZE(ds2782_battery_props);
- battery->get_property = ds2782_battery_get_property;
+ battery->properties = ds278x_battery_props;
+ battery->num_properties = ARRAY_SIZE(ds278x_battery_props);
+ battery->get_property = ds278x_battery_get_property;
battery->external_power_changed = NULL;
}
-static int ds2782_battery_remove(struct i2c_client *client)
+static int ds278x_battery_remove(struct i2c_client *client)
{
- struct ds2782_info *info = i2c_get_clientdata(client);
+ struct ds278x_info *info = i2c_get_clientdata(client);
power_supply_unregister(&info->battery);
kfree(info->battery.name);
@@ -237,19 +296,45 @@ static int ds2782_battery_remove(struct i2c_client *client)
idr_remove(&battery_id, info->id);
mutex_unlock(&battery_lock);
- i2c_set_clientdata(client, info);
-
kfree(info);
return 0;
}
-static int ds2782_battery_probe(struct i2c_client *client,
+enum ds278x_num_id {
+ DS2782 = 0,
+ DS2786,
+};
+
+static struct ds278x_battery_ops ds278x_ops[] = {
+ [DS2782] = {
+ .get_current = ds2782_get_current,
+ .get_voltage = ds2782_get_voltage,
+ .get_capacity = ds2782_get_capacity,
+ },
+ [DS2786] = {
+ .get_current = ds2786_get_current,
+ .get_voltage = ds2786_get_voltage,
+ .get_capacity = ds2786_get_capacity,
+ }
+};
+
+static int ds278x_battery_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct ds2782_info *info;
+ struct ds278x_platform_data *pdata = client->dev.platform_data;
+ struct ds278x_info *info;
int ret;
int num;
+ /*
+ * ds2786 should have the sense resistor value set
+ * in the platform data
+ */
+ if (id->driver_data == DS2786 && !pdata) {
+ dev_err(&client->dev, "missing platform data for ds2786\n");
+ return -EINVAL;
+ }
+
/* Get an ID for this battery */
ret = idr_pre_get(&battery_id, GFP_KERNEL);
if (ret == 0) {
@@ -269,15 +354,20 @@ static int ds2782_battery_probe(struct i2c_client *client,
goto fail_info;
}
- info->battery.name = kasprintf(GFP_KERNEL, "ds2782-%d", num);
+ info->battery.name = kasprintf(GFP_KERNEL, "%s-%d", client->name, num);
if (!info->battery.name) {
ret = -ENOMEM;
goto fail_name;
}
+ if (id->driver_data == DS2786)
+ info->rsns = pdata->rsns;
+
i2c_set_clientdata(client, info);
info->client = client;
- ds2782_power_supply_init(&info->battery);
+ info->id = num;
+ info->ops = &ds278x_ops[id->driver_data];
+ ds278x_power_supply_init(&info->battery);
ret = power_supply_register(&client->dev, &info->battery);
if (ret) {
@@ -290,7 +380,6 @@ static int ds2782_battery_probe(struct i2c_client *client,
fail_register:
kfree(info->battery.name);
fail_name:
- i2c_set_clientdata(client, info);
kfree(info);
fail_info:
mutex_lock(&battery_lock);
@@ -300,31 +389,32 @@ fail_id:
return ret;
}
-static const struct i2c_device_id ds2782_id[] = {
- {"ds2782", 0},
+static const struct i2c_device_id ds278x_id[] = {
+ {"ds2782", DS2782},
+ {"ds2786", DS2786},
{},
};
-static struct i2c_driver ds2782_battery_driver = {
+static struct i2c_driver ds278x_battery_driver = {
.driver = {
.name = "ds2782-battery",
},
- .probe = ds2782_battery_probe,
- .remove = ds2782_battery_remove,
- .id_table = ds2782_id,
+ .probe = ds278x_battery_probe,
+ .remove = ds278x_battery_remove,
+ .id_table = ds278x_id,
};
-static int __init ds2782_init(void)
+static int __init ds278x_init(void)
{
- return i2c_add_driver(&ds2782_battery_driver);
+ return i2c_add_driver(&ds278x_battery_driver);
}
-module_init(ds2782_init);
+module_init(ds278x_init);
-static void __exit ds2782_exit(void)
+static void __exit ds278x_exit(void)
{
- i2c_del_driver(&ds2782_battery_driver);
+ i2c_del_driver(&ds278x_battery_driver);
}
-module_exit(ds2782_exit);
+module_exit(ds278x_exit);
MODULE_AUTHOR("Ryan Mallon <ryan@bluewatersys.com>");
MODULE_DESCRIPTION("Maxim/Dallas DS2782 Stand-Alone Fuel Gauage IC driver");
diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c
index 8fefe5a7355..baefcf1cffc 100644
--- a/drivers/power/olpc_battery.c
+++ b/drivers/power/olpc_battery.c
@@ -354,7 +354,7 @@ static enum power_supply_property olpc_bat_props[] = {
#define EEPROM_END 0x80
#define EEPROM_SIZE (EEPROM_END - EEPROM_START)
-static ssize_t olpc_bat_eeprom_read(struct kobject *kobj,
+static ssize_t olpc_bat_eeprom_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf, loff_t off, size_t count)
{
uint8_t ec_byte;
diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
index a232de6a570..69f8aa3a6a4 100644
--- a/drivers/power/pda_power.c
+++ b/drivers/power/pda_power.c
@@ -404,6 +404,13 @@ static int usb_wakeup_enabled;
static int pda_power_suspend(struct platform_device *pdev, pm_message_t state)
{
+ if (pdata->suspend) {
+ int ret = pdata->suspend(state);
+
+ if (ret)
+ return ret;
+ }
+
if (device_may_wakeup(&pdev->dev)) {
if (ac_irq)
ac_wakeup_enabled = !enable_irq_wake(ac_irq->start);
@@ -423,6 +430,9 @@ static int pda_power_resume(struct platform_device *pdev)
disable_irq_wake(ac_irq->start);
}
+ if (pdata->resume)
+ return pdata->resume();
+
return 0;
}
#else
diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
index f38ba482be7..018de2b2699 100644
--- a/drivers/power/power_supply.h
+++ b/drivers/power/power_supply.h
@@ -12,15 +12,12 @@
#ifdef CONFIG_SYSFS
-extern int power_supply_create_attrs(struct power_supply *psy);
-extern void power_supply_remove_attrs(struct power_supply *psy);
+extern void power_supply_init_attrs(struct device_type *dev_type);
extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
#else
-static inline int power_supply_create_attrs(struct power_supply *psy)
-{ return 0; }
-static inline void power_supply_remove_attrs(struct power_supply *psy) {}
+static inline void power_supply_init_attrs(struct device_type *dev_type) {}
#define power_supply_uevent NULL
#endif /* CONFIG_SYSFS */
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index cce75b40b43..91606bb5531 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
+#include <linux/slab.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/power_supply.h>
@@ -22,6 +23,8 @@
struct class *power_supply_class;
EXPORT_SYMBOL_GPL(power_supply_class);
+static struct device_type power_supply_dev_type;
+
static int __power_supply_changed_work(struct device *dev, void *data)
{
struct power_supply *psy = (struct power_supply *)data;
@@ -144,22 +147,39 @@ struct power_supply *power_supply_get_by_name(char *name)
}
EXPORT_SYMBOL_GPL(power_supply_get_by_name);
+static void power_supply_dev_release(struct device *dev)
+{
+ pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
+ kfree(dev);
+}
+
int power_supply_register(struct device *parent, struct power_supply *psy)
{
- int rc = 0;
+ struct device *dev;
+ int rc;
- psy->dev = device_create(power_supply_class, parent, 0, psy,
- "%s", psy->name);
- if (IS_ERR(psy->dev)) {
- rc = PTR_ERR(psy->dev);
- goto dev_create_failed;
- }
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
- INIT_WORK(&psy->changed_work, power_supply_changed_work);
+ device_initialize(dev);
- rc = power_supply_create_attrs(psy);
+ dev->class = power_supply_class;
+ dev->type = &power_supply_dev_type;
+ dev->parent = parent;
+ dev->release = power_supply_dev_release;
+ dev_set_drvdata(dev, psy);
+ psy->dev = dev;
+
+ rc = kobject_set_name(&dev->kobj, "%s", psy->name);
+ if (rc)
+ goto kobject_set_name_failed;
+
+ rc = device_add(dev);
if (rc)
- goto create_attrs_failed;
+ goto device_add_failed;
+
+ INIT_WORK(&psy->changed_work, power_supply_changed_work);
rc = power_supply_create_triggers(psy);
if (rc)
@@ -170,10 +190,10 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
goto success;
create_triggers_failed:
- power_supply_remove_attrs(psy);
-create_attrs_failed:
device_unregister(psy->dev);
-dev_create_failed:
+kobject_set_name_failed:
+device_add_failed:
+ kfree(dev);
success:
return rc;
}
@@ -183,7 +203,6 @@ void power_supply_unregister(struct power_supply *psy)
{
flush_scheduled_work();
power_supply_remove_triggers(psy);
- power_supply_remove_attrs(psy);
device_unregister(psy->dev);
}
EXPORT_SYMBOL_GPL(power_supply_unregister);
@@ -196,6 +215,7 @@ static int __init power_supply_class_init(void)
return PTR_ERR(power_supply_class);
power_supply_class->dev_uevent = power_supply_uevent;
+ power_supply_init_attrs(&power_supply_dev_type);
return 0;
}
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 5b6e352ac7c..9d30eeb8c81 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -31,9 +31,9 @@
#define POWER_SUPPLY_ATTR(_name) \
{ \
- .attr = { .name = #_name, .mode = 0444 }, \
+ .attr = { .name = #_name }, \
.show = power_supply_show_property, \
- .store = NULL, \
+ .store = power_supply_store_property, \
}
static struct device_attribute power_supply_attrs[];
@@ -41,6 +41,9 @@ static struct device_attribute power_supply_attrs[];
static ssize_t power_supply_show_property(struct device *dev,
struct device_attribute *attr,
char *buf) {
+ static char *type_text[] = {
+ "Battery", "UPS", "Mains", "USB"
+ };
static char *status_text[] = {
"Unknown", "Charging", "Discharging", "Not charging", "Full"
};
@@ -58,12 +61,15 @@ static ssize_t power_supply_show_property(struct device *dev,
static char *capacity_level_text[] = {
"Unknown", "Critical", "Low", "Normal", "High", "Full"
};
- ssize_t ret;
+ ssize_t ret = 0;
struct power_supply *psy = dev_get_drvdata(dev);
const ptrdiff_t off = attr - power_supply_attrs;
union power_supply_propval value;
- ret = psy->get_property(psy, off, &value);
+ if (off == POWER_SUPPLY_PROP_TYPE)
+ value.intval = psy->type;
+ else
+ ret = psy->get_property(psy, off, &value);
if (ret < 0) {
if (ret == -ENODATA)
@@ -85,12 +91,37 @@ static ssize_t power_supply_show_property(struct device *dev,
return sprintf(buf, "%s\n", technology_text[value.intval]);
else if (off == POWER_SUPPLY_PROP_CAPACITY_LEVEL)
return sprintf(buf, "%s\n", capacity_level_text[value.intval]);
+ else if (off == POWER_SUPPLY_PROP_TYPE)
+ return sprintf(buf, "%s\n", type_text[value.intval]);
else if (off >= POWER_SUPPLY_PROP_MODEL_NAME)
return sprintf(buf, "%s\n", value.strval);
return sprintf(buf, "%d\n", value.intval);
}
+static ssize_t power_supply_store_property(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count) {
+ ssize_t ret;
+ struct power_supply *psy = dev_get_drvdata(dev);
+ const ptrdiff_t off = attr - power_supply_attrs;
+ union power_supply_propval value;
+ long long_val;
+
+ /* TODO: support other types than int */
+ ret = strict_strtol(buf, 10, &long_val);
+ if (ret < 0)
+ return ret;
+
+ value.intval = long_val;
+
+ ret = psy->set_property(psy, off, &value);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
/* Must be in the same order as POWER_SUPPLY_PROP_* */
static struct device_attribute power_supply_attrs[] = {
/* Properties of type `int' */
@@ -132,67 +163,61 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(time_to_empty_avg),
POWER_SUPPLY_ATTR(time_to_full_now),
POWER_SUPPLY_ATTR(time_to_full_avg),
+ POWER_SUPPLY_ATTR(type),
/* Properties of type `const char *' */
POWER_SUPPLY_ATTR(model_name),
POWER_SUPPLY_ATTR(manufacturer),
POWER_SUPPLY_ATTR(serial_number),
};
-static ssize_t power_supply_show_static_attrs(struct device *dev,
- struct device_attribute *attr,
- char *buf) {
- static char *type_text[] = { "Battery", "UPS", "Mains", "USB" };
+static struct attribute *
+__power_supply_attrs[ARRAY_SIZE(power_supply_attrs) + 1];
+
+static mode_t power_supply_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int attrno)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
struct power_supply *psy = dev_get_drvdata(dev);
+ mode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
+ int i;
- return sprintf(buf, "%s\n", type_text[psy->type]);
-}
+ if (attrno == POWER_SUPPLY_PROP_TYPE)
+ return mode;
-static struct device_attribute power_supply_static_attrs[] = {
- __ATTR(type, 0444, power_supply_show_static_attrs, NULL),
-};
+ for (i = 0; i < psy->num_properties; i++) {
+ int property = psy->properties[i];
-int power_supply_create_attrs(struct power_supply *psy)
-{
- int rc = 0;
- int i, j;
-
- for (i = 0; i < ARRAY_SIZE(power_supply_static_attrs); i++) {
- rc = device_create_file(psy->dev,
- &power_supply_static_attrs[i]);
- if (rc)
- goto statics_failed;
- }
+ if (property == attrno) {
+ if (psy->property_is_writeable &&
+ psy->property_is_writeable(psy, property) > 0)
+ mode |= S_IWUSR;
- for (j = 0; j < psy->num_properties; j++) {
- rc = device_create_file(psy->dev,
- &power_supply_attrs[psy->properties[j]]);
- if (rc)
- goto dynamics_failed;
+ return mode;
+ }
}
- goto succeed;
-
-dynamics_failed:
- while (j--)
- device_remove_file(psy->dev,
- &power_supply_attrs[psy->properties[j]]);
-statics_failed:
- while (i--)
- device_remove_file(psy->dev, &power_supply_static_attrs[i]);
-succeed:
- return rc;
+ return 0;
}
-void power_supply_remove_attrs(struct power_supply *psy)
+static struct attribute_group power_supply_attr_group = {
+ .attrs = __power_supply_attrs,
+ .is_visible = power_supply_attr_is_visible,
+};
+
+static const struct attribute_group *power_supply_attr_groups[] = {
+ &power_supply_attr_group,
+ NULL,
+};
+
+void power_supply_init_attrs(struct device_type *dev_type)
{
int i;
- for (i = 0; i < ARRAY_SIZE(power_supply_static_attrs); i++)
- device_remove_file(psy->dev, &power_supply_static_attrs[i]);
+ dev_type->groups = power_supply_attr_groups;
- for (i = 0; i < psy->num_properties; i++)
- device_remove_file(psy->dev,
- &power_supply_attrs[psy->properties[i]]);
+ for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
+ __power_supply_attrs[i] = &power_supply_attrs[i].attr;
}
static char *kstruprdup(const char *str, gfp_t gfp)
@@ -236,36 +261,6 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env)
if (!prop_buf)
return -ENOMEM;
- for (j = 0; j < ARRAY_SIZE(power_supply_static_attrs); j++) {
- struct device_attribute *attr;
- char *line;
-
- attr = &power_supply_static_attrs[j];
-
- ret = power_supply_show_static_attrs(dev, attr, prop_buf);
- if (ret < 0)
- goto out;
-
- line = strchr(prop_buf, '\n');
- if (line)
- *line = 0;
-
- attrname = kstruprdup(attr->attr.name, GFP_KERNEL);
- if (!attrname) {
- ret = -ENOMEM;
- goto out;
- }
-
- dev_dbg(dev, "Static prop %s=%s\n", attrname, prop_buf);
-
- ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", attrname, prop_buf);
- kfree(attrname);
- if (ret)
- goto out;
- }
-
- dev_dbg(dev, "%zd dynamic props\n", psy->num_properties);
-
for (j = 0; j < psy->num_properties; j++) {
struct device_attribute *attr;
char *line;
diff --git a/drivers/power/test_power.c b/drivers/power/test_power.c
new file mode 100644
index 00000000000..0cd9f67d33e
--- /dev/null
+++ b/drivers/power/test_power.c
@@ -0,0 +1,163 @@
+/*
+ * Power supply driver for testing.
+ *
+ * Copyright 2010 Anton Vorontsov <cbouatmailru@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/power_supply.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/vermagic.h>
+
+static int test_power_ac_online = 1;
+static int test_power_battery_status = POWER_SUPPLY_STATUS_CHARGING;
+
+static int test_power_get_ac_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = test_power_ac_online;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int test_power_get_battery_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = "Test battery";
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = "Linux";
+ break;
+ case POWER_SUPPLY_PROP_SERIAL_NUMBER:
+ val->strval = UTS_RELEASE;
+ break;
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = test_power_battery_status;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
+ val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = 50;
+ break;
+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
+ case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW:
+ val->intval = 3600;
+ break;
+ default:
+ pr_info("%s: some properties deliberately report errors.\n",
+ __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static enum power_supply_property test_power_ac_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static enum power_supply_property test_power_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_EMPTY,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
+ POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
+};
+
+static char *test_power_ac_supplied_to[] = {
+ "test_battery",
+};
+
+static struct power_supply test_power_supplies[] = {
+ {
+ .name = "test_ac",
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .supplied_to = test_power_ac_supplied_to,
+ .num_supplicants = ARRAY_SIZE(test_power_ac_supplied_to),
+ .properties = test_power_ac_props,
+ .num_properties = ARRAY_SIZE(test_power_ac_props),
+ .get_property = test_power_get_ac_property,
+ }, {
+ .name = "test_battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = test_power_battery_props,
+ .num_properties = ARRAY_SIZE(test_power_battery_props),
+ .get_property = test_power_get_battery_property,
+ },
+};
+
+static int __init test_power_init(void)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(test_power_supplies); i++) {
+ ret = power_supply_register(NULL, &test_power_supplies[i]);
+ if (ret) {
+ pr_err("%s: failed to register %s\n", __func__,
+ test_power_supplies[i].name);
+ goto failed;
+ }
+ }
+
+ return 0;
+failed:
+ while (--i >= 0)
+ power_supply_unregister(&test_power_supplies[i]);
+ return ret;
+}
+module_init(test_power_init);
+
+static void __exit test_power_exit(void)
+{
+ int i;
+
+ /* Let's see how we handle changes... */
+ test_power_ac_online = 0;
+ test_power_battery_status = POWER_SUPPLY_STATUS_DISCHARGING;
+ for (i = 0; i < ARRAY_SIZE(test_power_supplies); i++)
+ power_supply_changed(&test_power_supplies[i]);
+ pr_info("%s: 'changed' event sent, sleeping for 10 seconds...\n",
+ __func__);
+ ssleep(10);
+
+ for (i = 0; i < ARRAY_SIZE(test_power_supplies); i++)
+ power_supply_unregister(&test_power_supplies[i]);
+}
+module_exit(test_power_exit);
+
+MODULE_DESCRIPTION("Power supply driver for testing");
+MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/tosa_battery.c b/drivers/power/tosa_battery.c
index 2eab35aab31..ee04936b2db 100644
--- a/drivers/power/tosa_battery.c
+++ b/drivers/power/tosa_battery.c
@@ -61,7 +61,7 @@ static unsigned long tosa_read_bat(struct tosa_bat *bat)
mutex_lock(&bat_lock);
gpio_set_value(bat->gpio_bat, 1);
msleep(5);
- value = wm97xx_read_aux_adc(bat->psy.dev->parent->driver_data,
+ value = wm97xx_read_aux_adc(dev_get_drvdata(bat->psy.dev->parent),
bat->adc_bat);
gpio_set_value(bat->gpio_bat, 0);
mutex_unlock(&bat_lock);
@@ -81,7 +81,7 @@ static unsigned long tosa_read_temp(struct tosa_bat *bat)
mutex_lock(&bat_lock);
gpio_set_value(bat->gpio_temp, 1);
msleep(5);
- value = wm97xx_read_aux_adc(bat->psy.dev->parent->driver_data,
+ value = wm97xx_read_aux_adc(dev_get_drvdata(bat->psy.dev->parent),
bat->adc_temp);
gpio_set_value(bat->gpio_temp, 0);
mutex_unlock(&bat_lock);
diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c
index 875c4d0f776..fbcc36dae47 100644
--- a/drivers/power/wm831x_power.c
+++ b/drivers/power/wm831x_power.c
@@ -537,9 +537,9 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
goto err_battery;
irq = platform_get_irq_byname(pdev, "SYSLO");
- ret = wm831x_request_irq(wm831x, irq, wm831x_syslo_irq,
- IRQF_TRIGGER_RISING, "SYSLO",
- power);
+ ret = request_threaded_irq(irq, NULL, wm831x_syslo_irq,
+ IRQF_TRIGGER_RISING, "System power low",
+ power);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request SYSLO IRQ %d: %d\n",
irq, ret);
@@ -547,9 +547,9 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
}
irq = platform_get_irq_byname(pdev, "PWR SRC");
- ret = wm831x_request_irq(wm831x, irq, wm831x_pwr_src_irq,
- IRQF_TRIGGER_RISING, "Power source",
- power);
+ ret = request_threaded_irq(irq, NULL, wm831x_pwr_src_irq,
+ IRQF_TRIGGER_RISING, "Power source",
+ power);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request PWR SRC IRQ %d: %d\n",
irq, ret);
@@ -558,10 +558,10 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) {
irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]);
- ret = wm831x_request_irq(wm831x, irq, wm831x_bat_irq,
- IRQF_TRIGGER_RISING,
- wm831x_bat_irqs[i],
- power);
+ ret = request_threaded_irq(irq, NULL, wm831x_bat_irq,
+ IRQF_TRIGGER_RISING,
+ wm831x_bat_irqs[i],
+ power);
if (ret != 0) {
dev_err(&pdev->dev,
"Failed to request %s IRQ %d: %d\n",
@@ -575,13 +575,13 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
err_bat_irq:
for (; i >= 0; i--) {
irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]);
- wm831x_free_irq(wm831x, irq, power);
+ free_irq(irq, power);
}
irq = platform_get_irq_byname(pdev, "PWR SRC");
- wm831x_free_irq(wm831x, irq, power);
+ free_irq(irq, power);
err_syslo:
irq = platform_get_irq_byname(pdev, "SYSLO");
- wm831x_free_irq(wm831x, irq, power);
+ free_irq(irq, power);
err_usb:
power_supply_unregister(usb);
err_battery:
@@ -596,19 +596,18 @@ err_kmalloc:
static __devexit int wm831x_power_remove(struct platform_device *pdev)
{
struct wm831x_power *wm831x_power = platform_get_drvdata(pdev);
- struct wm831x *wm831x = wm831x_power->wm831x;
int irq, i;
for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) {
irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]);
- wm831x_free_irq(wm831x, irq, wm831x_power);
+ free_irq(irq, wm831x_power);
}
irq = platform_get_irq_byname(pdev, "PWR SRC");
- wm831x_free_irq(wm831x, irq, wm831x_power);
+ free_irq(irq, wm831x_power);
irq = platform_get_irq_byname(pdev, "SYSLO");
- wm831x_free_irq(wm831x, irq, wm831x_power);
+ free_irq(irq, wm831x_power);
power_supply_unregister(&wm831x_power->battery);
power_supply_unregister(&wm831x_power->wall);
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c
index 94c70650aaf..4e8afce0c81 100644
--- a/drivers/power/wm97xx_battery.c
+++ b/drivers/power/wm97xx_battery.c
@@ -308,6 +308,9 @@ static void __exit wm97xx_bat_exit(void)
platform_driver_unregister(&wm97xx_bat_driver);
}
+/* The interface is deprecated, as well as linux/wm97xx_batt.h */
+void wm97xx_bat_set_pdata(struct wm97xx_batt_info *data);
+
void wm97xx_bat_set_pdata(struct wm97xx_batt_info *data)
{
gpdata = data;
diff --git a/drivers/power/z2_battery.c b/drivers/power/z2_battery.c
new file mode 100644
index 00000000000..9cca465436e
--- /dev/null
+++ b/drivers/power/z2_battery.c
@@ -0,0 +1,328 @@
+/*
+ * Battery measurement code for Zipit Z2
+ *
+ * Copyright (C) 2009 Peter Edwards <sweetlilmre@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/i2c.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <asm/irq.h>
+#include <asm/mach/irq.h>
+#include <linux/z2_battery.h>
+
+#define Z2_DEFAULT_NAME "Z2"
+
+struct z2_charger {
+ struct z2_battery_info *info;
+ int bat_status;
+ struct i2c_client *client;
+ struct power_supply batt_ps;
+ struct mutex work_lock;
+ struct work_struct bat_work;
+};
+
+static unsigned long z2_read_bat(struct z2_charger *charger)
+{
+ int data;
+ data = i2c_smbus_read_byte_data(charger->client,
+ charger->info->batt_I2C_reg);
+ if (data < 0)
+ return 0;
+
+ return data * charger->info->batt_mult / charger->info->batt_div;
+}
+
+static int z2_batt_get_property(struct power_supply *batt_ps,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct z2_charger *charger = container_of(batt_ps, struct z2_charger,
+ batt_ps);
+ struct z2_battery_info *info = charger->info;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = charger->bat_status;
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = info->batt_tech;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ if (info->batt_I2C_reg >= 0)
+ val->intval = z2_read_bat(charger);
+ else
+ return -EINVAL;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ if (info->max_voltage >= 0)
+ val->intval = info->max_voltage;
+ else
+ return -EINVAL;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ if (info->min_voltage >= 0)
+ val->intval = info->min_voltage;
+ else
+ return -EINVAL;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void z2_batt_ext_power_changed(struct power_supply *batt_ps)
+{
+ struct z2_charger *charger = container_of(batt_ps, struct z2_charger,
+ batt_ps);
+ schedule_work(&charger->bat_work);
+}
+
+static void z2_batt_update(struct z2_charger *charger)
+{
+ int old_status = charger->bat_status;
+ struct z2_battery_info *info;
+
+ info = charger->info;
+
+ mutex_lock(&charger->work_lock);
+
+ charger->bat_status = (info->charge_gpio >= 0) ?
+ (gpio_get_value(info->charge_gpio) ?
+ POWER_SUPPLY_STATUS_CHARGING :
+ POWER_SUPPLY_STATUS_DISCHARGING) :
+ POWER_SUPPLY_STATUS_UNKNOWN;
+
+ if (old_status != charger->bat_status) {
+ pr_debug("%s: %i -> %i\n", charger->batt_ps.name, old_status,
+ charger->bat_status);
+ power_supply_changed(&charger->batt_ps);
+ }
+
+ mutex_unlock(&charger->work_lock);
+}
+
+static void z2_batt_work(struct work_struct *work)
+{
+ struct z2_charger *charger;
+ charger = container_of(work, struct z2_charger, bat_work);
+ z2_batt_update(charger);
+}
+
+static irqreturn_t z2_charge_switch_irq(int irq, void *devid)
+{
+ struct z2_charger *charger = devid;
+ schedule_work(&charger->bat_work);
+ return IRQ_HANDLED;
+}
+
+static int z2_batt_ps_init(struct z2_charger *charger, int props)
+{
+ int i = 0;
+ enum power_supply_property *prop;
+ struct z2_battery_info *info = charger->info;
+
+ if (info->batt_tech >= 0)
+ props++; /* POWER_SUPPLY_PROP_TECHNOLOGY */
+ if (info->batt_I2C_reg >= 0)
+ props++; /* POWER_SUPPLY_PROP_VOLTAGE_NOW */
+ if (info->max_voltage >= 0)
+ props++; /* POWER_SUPPLY_PROP_VOLTAGE_MAX */
+ if (info->min_voltage >= 0)
+ props++; /* POWER_SUPPLY_PROP_VOLTAGE_MIN */
+
+ prop = kzalloc(props * sizeof(*prop), GFP_KERNEL);
+ if (!prop)
+ return -ENOMEM;
+
+ prop[i++] = POWER_SUPPLY_PROP_PRESENT;
+ if (info->charge_gpio >= 0)
+ prop[i++] = POWER_SUPPLY_PROP_STATUS;
+ if (info->batt_tech >= 0)
+ prop[i++] = POWER_SUPPLY_PROP_TECHNOLOGY;
+ if (info->batt_I2C_reg >= 0)
+ prop[i++] = POWER_SUPPLY_PROP_VOLTAGE_NOW;
+ if (info->max_voltage >= 0)
+ prop[i++] = POWER_SUPPLY_PROP_VOLTAGE_MAX;
+ if (info->min_voltage >= 0)
+ prop[i++] = POWER_SUPPLY_PROP_VOLTAGE_MIN;
+
+ if (!info->batt_name) {
+ dev_info(&charger->client->dev,
+ "Please consider setting proper battery "
+ "name in platform definition file, falling "
+ "back to name \" Z2_DEFAULT_NAME \"\n");
+ charger->batt_ps.name = Z2_DEFAULT_NAME;
+ } else
+ charger->batt_ps.name = info->batt_name;
+
+ charger->batt_ps.properties = prop;
+ charger->batt_ps.num_properties = props;
+ charger->batt_ps.type = POWER_SUPPLY_TYPE_BATTERY;
+ charger->batt_ps.get_property = z2_batt_get_property;
+ charger->batt_ps.external_power_changed = z2_batt_ext_power_changed;
+ charger->batt_ps.use_for_apm = 1;
+
+ return 0;
+}
+
+static int __devinit z2_batt_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret = 0;
+ int props = 1; /* POWER_SUPPLY_PROP_PRESENT */
+ struct z2_charger *charger;
+ struct z2_battery_info *info = client->dev.platform_data;
+
+ if (info == NULL) {
+ dev_err(&client->dev,
+ "Please set platform device platform_data"
+ " to a valid z2_battery_info pointer!\n");
+ return -EINVAL;
+ }
+
+ charger = kzalloc(sizeof(*charger), GFP_KERNEL);
+ if (charger == NULL)
+ return -ENOMEM;
+
+ charger->bat_status = POWER_SUPPLY_STATUS_UNKNOWN;
+ charger->info = info;
+ charger->client = client;
+ i2c_set_clientdata(client, charger);
+
+ mutex_init(&charger->work_lock);
+
+ if (info->charge_gpio >= 0 && gpio_is_valid(info->charge_gpio)) {
+ ret = gpio_request(info->charge_gpio, "BATT CHRG");
+ if (ret)
+ goto err;
+
+ ret = gpio_direction_input(info->charge_gpio);
+ if (ret)
+ goto err2;
+
+ set_irq_type(gpio_to_irq(info->charge_gpio),
+ IRQ_TYPE_EDGE_BOTH);
+ ret = request_irq(gpio_to_irq(info->charge_gpio),
+ z2_charge_switch_irq, IRQF_DISABLED,
+ "AC Detect", charger);
+ if (ret)
+ goto err3;
+ }
+
+ ret = z2_batt_ps_init(charger, props);
+ if (ret)
+ goto err3;
+
+ INIT_WORK(&charger->bat_work, z2_batt_work);
+
+ ret = power_supply_register(&client->dev, &charger->batt_ps);
+ if (ret)
+ goto err4;
+
+ schedule_work(&charger->bat_work);
+
+ return 0;
+
+err4:
+ kfree(charger->batt_ps.properties);
+err3:
+ if (info->charge_gpio >= 0 && gpio_is_valid(info->charge_gpio))
+ free_irq(gpio_to_irq(info->charge_gpio), charger);
+err2:
+ if (info->charge_gpio >= 0 && gpio_is_valid(info->charge_gpio))
+ gpio_free(info->charge_gpio);
+err:
+ kfree(charger);
+ return ret;
+}
+
+static int __devexit z2_batt_remove(struct i2c_client *client)
+{
+ struct z2_charger *charger = i2c_get_clientdata(client);
+ struct z2_battery_info *info = charger->info;
+
+ flush_scheduled_work();
+ power_supply_unregister(&charger->batt_ps);
+
+ kfree(charger->batt_ps.properties);
+ if (info->charge_gpio >= 0 && gpio_is_valid(info->charge_gpio)) {
+ free_irq(gpio_to_irq(info->charge_gpio), charger);
+ gpio_free(info->charge_gpio);
+ }
+
+ kfree(charger);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int z2_batt_suspend(struct i2c_client *client, pm_message_t state)
+{
+ flush_scheduled_work();
+ return 0;
+}
+
+static int z2_batt_resume(struct i2c_client *client)
+{
+ struct z2_charger *charger = i2c_get_clientdata(client);
+
+ schedule_work(&charger->bat_work);
+ return 0;
+}
+#else
+#define z2_batt_suspend NULL
+#define z2_batt_resume NULL
+#endif
+
+static const struct i2c_device_id z2_batt_id[] = {
+ { "aer915", 0 },
+ { }
+};
+
+static struct i2c_driver z2_batt_driver = {
+ .driver = {
+ .name = "z2-battery",
+ .owner = THIS_MODULE,
+ },
+ .probe = z2_batt_probe,
+ .remove = z2_batt_remove,
+ .suspend = z2_batt_suspend,
+ .resume = z2_batt_resume,
+ .id_table = z2_batt_id,
+};
+
+static int __init z2_batt_init(void)
+{
+ return i2c_add_driver(&z2_batt_driver);
+}
+
+static void __exit z2_batt_exit(void)
+{
+ i2c_del_driver(&z2_batt_driver);
+}
+
+module_init(z2_batt_init);
+module_exit(z2_batt_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Peter Edwards <sweetlilmre@gmail.com>");
+MODULE_DESCRIPTION("Zipit Z2 battery driver");
diff --git a/drivers/ps3/ps3-sys-manager.c b/drivers/ps3/ps3-sys-manager.c
index 3cbaf1811bd..d37c445f0ed 100644
--- a/drivers/ps3/ps3-sys-manager.c
+++ b/drivers/ps3/ps3-sys-manager.c
@@ -119,7 +119,7 @@ enum ps3_sys_manager_service_id {
* enum ps3_sys_manager_attr - Notification attribute (bit position mask).
* @PS3_SM_ATTR_POWER: Power button.
* @PS3_SM_ATTR_RESET: Reset button, not available on retail console.
- * @PS3_SM_ATTR_THERMAL: Sytem thermal alert.
+ * @PS3_SM_ATTR_THERMAL: System thermal alert.
* @PS3_SM_ATTR_CONTROLLER: Remote controller event.
* @PS3_SM_ATTR_ALL: Logical OR of all.
*
diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig
index c32822ad84a..070211a5955 100644
--- a/drivers/rapidio/Kconfig
+++ b/drivers/rapidio/Kconfig
@@ -8,3 +8,27 @@ config RAPIDIO_DISC_TIMEOUT
---help---
Amount of time a discovery node waits for a host to complete
enumeration before giving up.
+
+config RAPIDIO_ENABLE_RX_TX_PORTS
+ bool "Enable RapidIO Input/Output Ports"
+ depends on RAPIDIO
+ ---help---
+ The RapidIO specification describes a Output port transmit
+ enable and a Input port receive enable. The recommended state
+ for Input ports and Output ports should be disabled. When
+ this switch is set the RapidIO subsystem will enable all
+ ports for Input/Output direction to allow other traffic
+ than Maintenance transfers.
+
+source "drivers/rapidio/switches/Kconfig"
+
+config RAPIDIO_DEBUG
+ bool "RapidIO subsystem debug messages"
+ depends on RAPIDIO
+ help
+ Say Y here if you want the RapidIO subsystem to produce a bunch of
+ debug messages to the system log. Select this if you are having a
+ problem with the RapidIO subsystem and want to see more of what is
+ going on.
+
+ If you are unsure about this, say N here.
diff --git a/drivers/rapidio/Makefile b/drivers/rapidio/Makefile
index 7c0e1818de5..b6139fe187b 100644
--- a/drivers/rapidio/Makefile
+++ b/drivers/rapidio/Makefile
@@ -4,3 +4,7 @@
obj-y += rio.o rio-access.o rio-driver.o rio-scan.o rio-sysfs.o
obj-$(CONFIG_RAPIDIO) += switches/
+
+ifeq ($(CONFIG_RAPIDIO_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index 45415096c29..8070e074c73 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -4,6 +4,14 @@
* Copyright 2005 MontaVista Software, Inc.
* Matt Porter <mporter@kernel.crashing.org>
*
+ * Copyright 2009 Integrated Device Technology, Inc.
+ * Alex Bounine <alexandre.bounine@idt.com>
+ * - Added Port-Write/Error Management initialization and handling
+ *
+ * Copyright 2009 Sysgo AG
+ * Thomas Moll <thomas.moll@sysgo.com>
+ * - Added Input- Output- enable functionality, to allow full communication
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
@@ -31,15 +39,16 @@
LIST_HEAD(rio_devices);
static LIST_HEAD(rio_switches);
-#define RIO_ENUM_CMPL_MAGIC 0xdeadbeef
-
static void rio_enum_timeout(unsigned long);
+static void rio_init_em(struct rio_dev *rdev);
+
DEFINE_SPINLOCK(rio_global_list_lock);
static int next_destid = 0;
static int next_switchid = 0;
static int next_net = 0;
+static int next_comptag;
static struct timer_list rio_enum_timer =
TIMER_INITIALIZER(rio_enum_timeout, 0, 0);
@@ -52,12 +61,6 @@ static int rio_mport_phys_table[] = {
-1,
};
-static int rio_sport_phys_table[] = {
- RIO_EFB_PAR_EP_FREE_ID,
- RIO_EFB_SER_EP_FREE_ID,
- -1,
-};
-
/**
* rio_get_device_id - Get the base/extended device id for a device
* @port: RIO master port
@@ -118,12 +121,26 @@ static int rio_clear_locks(struct rio_mport *port)
u32 result;
int ret = 0;
- /* Write component tag CSR magic complete value */
- rio_local_write_config_32(port, RIO_COMPONENT_TAG_CSR,
- RIO_ENUM_CMPL_MAGIC);
- list_for_each_entry(rdev, &rio_devices, global_list)
- rio_write_config_32(rdev, RIO_COMPONENT_TAG_CSR,
- RIO_ENUM_CMPL_MAGIC);
+ /* Assign component tag to all devices */
+ next_comptag = 1;
+ rio_local_write_config_32(port, RIO_COMPONENT_TAG_CSR, next_comptag++);
+
+ list_for_each_entry(rdev, &rio_devices, global_list) {
+ /* Mark device as discovered */
+ rio_read_config_32(rdev,
+ rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
+ &result);
+ rio_write_config_32(rdev,
+ rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
+ result | RIO_PORT_GEN_DISCOVERED);
+
+ rio_write_config_32(rdev, RIO_COMPONENT_TAG_CSR, next_comptag);
+ rdev->comp_tag = next_comptag++;
+ if (next_comptag >= 0x10000) {
+ pr_err("RIO: Component Tag Counter Overflow\n");
+ break;
+ }
+ }
/* Release host device id locks */
rio_local_write_config_32(port, RIO_HOST_DID_LOCK_CSR,
@@ -229,27 +246,37 @@ static int rio_is_switch(struct rio_dev *rdev)
}
/**
- * rio_route_set_ops- Sets routing operations for a particular vendor switch
+ * rio_switch_init - Sets switch operations for a particular vendor switch
* @rdev: RIO device
+ * @do_enum: Enumeration/Discovery mode flag
*
- * Searches the RIO route ops table for known switch types. If the vid
- * and did match a switch table entry, then set the add_entry() and
- * get_entry() ops to the table entry values.
+ * Searches the RIO switch ops table for known switch types. If the vid
+ * and did match a switch table entry, then call switch initialization
+ * routine to setup switch-specific routines.
*/
-static void rio_route_set_ops(struct rio_dev *rdev)
+static void rio_switch_init(struct rio_dev *rdev, int do_enum)
{
- struct rio_route_ops *cur = __start_rio_route_ops;
- struct rio_route_ops *end = __end_rio_route_ops;
+ struct rio_switch_ops *cur = __start_rio_switch_ops;
+ struct rio_switch_ops *end = __end_rio_switch_ops;
while (cur < end) {
if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) {
- pr_debug("RIO: adding routing ops for %s\n", rio_name(rdev));
- rdev->rswitch->add_entry = cur->add_hook;
- rdev->rswitch->get_entry = cur->get_hook;
+ pr_debug("RIO: calling init routine for %s\n",
+ rio_name(rdev));
+ cur->init_hook(rdev, do_enum);
+ break;
}
cur++;
}
+ if ((cur >= end) && (rdev->pef & RIO_PEF_STD_RT)) {
+ pr_debug("RIO: adding STD routing ops for %s\n",
+ rio_name(rdev));
+ rdev->rswitch->add_entry = rio_std_route_add_entry;
+ rdev->rswitch->get_entry = rio_std_route_get_entry;
+ rdev->rswitch->clr_table = rio_std_route_clr_table;
+ }
+
if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry)
printk(KERN_ERR "RIO: missing routing ops for %s\n",
rio_name(rdev));
@@ -281,6 +308,65 @@ static int __devinit rio_add_device(struct rio_dev *rdev)
}
/**
+ * rio_enable_rx_tx_port - enable input reciever and output transmitter of
+ * given port
+ * @port: Master port associated with the RIO network
+ * @local: local=1 select local port otherwise a far device is reached
+ * @destid: Destination ID of the device to check host bit
+ * @hopcount: Number of hops to reach the target
+ * @port_num: Port (-number on switch) to enable on a far end device
+ *
+ * Returns 0 or 1 from on General Control Command and Status Register
+ * (EXT_PTR+0x3C)
+ */
+inline int rio_enable_rx_tx_port(struct rio_mport *port,
+ int local, u16 destid,
+ u8 hopcount, u8 port_num) {
+#ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS
+ u32 regval;
+ u32 ext_ftr_ptr;
+
+ /*
+ * enable rx input tx output port
+ */
+ pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = "
+ "%d, port_num = %d)\n", local, destid, hopcount, port_num);
+
+ ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount);
+
+ if (local) {
+ rio_local_read_config_32(port, ext_ftr_ptr +
+ RIO_PORT_N_CTL_CSR(0),
+ &regval);
+ } else {
+ if (rio_mport_read_config_32(port, destid, hopcount,
+ ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), &regval) < 0)
+ return -EIO;
+ }
+
+ if (regval & RIO_PORT_N_CTL_P_TYP_SER) {
+ /* serial */
+ regval = regval | RIO_PORT_N_CTL_EN_RX_SER
+ | RIO_PORT_N_CTL_EN_TX_SER;
+ } else {
+ /* parallel */
+ regval = regval | RIO_PORT_N_CTL_EN_RX_PAR
+ | RIO_PORT_N_CTL_EN_TX_PAR;
+ }
+
+ if (local) {
+ rio_local_write_config_32(port, ext_ftr_ptr +
+ RIO_PORT_N_CTL_CSR(0), regval);
+ } else {
+ if (rio_mport_write_config_32(port, destid, hopcount,
+ ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0)
+ return -EIO;
+ }
+#endif
+ return 0;
+}
+
+/**
* rio_setup_device- Allocates and sets up a RIO device
* @net: RIO network
* @port: Master port to send transactions
@@ -325,8 +411,14 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
rdev->asm_rev = result >> 16;
rio_mport_read_config_32(port, destid, hopcount, RIO_PEF_CAR,
&rdev->pef);
- if (rdev->pef & RIO_PEF_EXT_FEATURES)
+ if (rdev->pef & RIO_PEF_EXT_FEATURES) {
rdev->efptr = result & 0xffff;
+ rdev->phys_efptr = rio_mport_get_physefb(port, 0, destid,
+ hopcount);
+
+ rdev->em_efptr = rio_mport_get_feature(port, 0, destid,
+ hopcount, RIO_EFB_ERR_MGMNT);
+ }
rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR,
&rdev->src_ops);
@@ -349,12 +441,13 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
if (rio_is_switch(rdev)) {
rio_mport_read_config_32(port, destid, hopcount,
RIO_SWP_INFO_CAR, &rdev->swpinfo);
- rswitch = kmalloc(sizeof(struct rio_switch), GFP_KERNEL);
+ rswitch = kzalloc(sizeof(struct rio_switch), GFP_KERNEL);
if (!rswitch)
goto cleanup;
rswitch->switchid = next_switchid;
rswitch->hopcount = hopcount;
rswitch->destid = destid;
+ rswitch->port_ok = 0;
rswitch->route_table = kzalloc(sizeof(u8)*
RIO_MAX_ROUTE_ENTRIES(port->sys_size),
GFP_KERNEL);
@@ -367,13 +460,22 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
rdev->rswitch = rswitch;
dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id,
rdev->rswitch->switchid);
- rio_route_set_ops(rdev);
+ rio_switch_init(rdev, do_enum);
+
+ if (do_enum && rdev->rswitch->clr_table)
+ rdev->rswitch->clr_table(port, destid, hopcount,
+ RIO_GLOBAL_TABLE);
list_add_tail(&rswitch->node, &rio_switches);
- } else
+ } else {
+ if (do_enum)
+ /*Enable Input Output Port (transmitter reviever)*/
+ rio_enable_rx_tx_port(port, 0, destid, hopcount, 0);
+
dev_set_name(&rdev->dev, "%02x:e:%04x", rdev->net->id,
rdev->destid);
+ }
rdev->dev.bus = &rio_bus_type;
@@ -414,23 +516,29 @@ cleanup:
*
* Reads the port error status CSR for a particular switch port to
* determine if the port has an active link. Returns
- * %PORT_N_ERR_STS_PORT_OK if the port is active or %0 if it is
+ * %RIO_PORT_N_ERR_STS_PORT_OK if the port is active or %0 if it is
* inactive.
*/
static int
rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport)
{
- u32 result;
+ u32 result = 0;
u32 ext_ftr_ptr;
- int *entry = rio_sport_phys_table;
-
- do {
- if ((ext_ftr_ptr =
- rio_mport_get_feature(port, 0, destid, hopcount, *entry)))
+ ext_ftr_ptr = rio_mport_get_efb(port, 0, destid, hopcount, 0);
+ while (ext_ftr_ptr) {
+ rio_mport_read_config_32(port, destid, hopcount,
+ ext_ftr_ptr, &result);
+ result = RIO_GET_BLOCK_ID(result);
+ if ((result == RIO_EFB_SER_EP_FREE_ID) ||
+ (result == RIO_EFB_SER_EP_FREE_ID_V13P) ||
+ (result == RIO_EFB_SER_EP_FREC_ID))
break;
- } while (*++entry >= 0);
+
+ ext_ftr_ptr = rio_mport_get_efb(port, 0, destid, hopcount,
+ ext_ftr_ptr);
+ }
if (ext_ftr_ptr)
rio_mport_read_config_32(port, destid, hopcount,
@@ -438,7 +546,81 @@ rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport)
RIO_PORT_N_ERR_STS_CSR(sport),
&result);
- return (result & PORT_N_ERR_STS_PORT_OK);
+ return result & RIO_PORT_N_ERR_STS_PORT_OK;
+}
+
+/**
+ * rio_lock_device - Acquires host device lock for specified device
+ * @port: Master port to send transaction
+ * @destid: Destination ID for device/switch
+ * @hopcount: Hopcount to reach switch
+ * @wait_ms: Max wait time in msec (0 = no timeout)
+ *
+ * Attepts to acquire host device lock for specified device
+ * Returns 0 if device lock acquired or EINVAL if timeout expires.
+ */
+static int
+rio_lock_device(struct rio_mport *port, u16 destid, u8 hopcount, int wait_ms)
+{
+ u32 result;
+ int tcnt = 0;
+
+ /* Attempt to acquire device lock */
+ rio_mport_write_config_32(port, destid, hopcount,
+ RIO_HOST_DID_LOCK_CSR, port->host_deviceid);
+ rio_mport_read_config_32(port, destid, hopcount,
+ RIO_HOST_DID_LOCK_CSR, &result);
+
+ while (result != port->host_deviceid) {
+ if (wait_ms != 0 && tcnt == wait_ms) {
+ pr_debug("RIO: timeout when locking device %x:%x\n",
+ destid, hopcount);
+ return -EINVAL;
+ }
+
+ /* Delay a bit */
+ mdelay(1);
+ tcnt++;
+ /* Try to acquire device lock again */
+ rio_mport_write_config_32(port, destid,
+ hopcount,
+ RIO_HOST_DID_LOCK_CSR,
+ port->host_deviceid);
+ rio_mport_read_config_32(port, destid,
+ hopcount,
+ RIO_HOST_DID_LOCK_CSR, &result);
+ }
+
+ return 0;
+}
+
+/**
+ * rio_unlock_device - Releases host device lock for specified device
+ * @port: Master port to send transaction
+ * @destid: Destination ID for device/switch
+ * @hopcount: Hopcount to reach switch
+ *
+ * Returns 0 if device lock released or EINVAL if fails.
+ */
+static int
+rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount)
+{
+ u32 result;
+
+ /* Release device lock */
+ rio_mport_write_config_32(port, destid,
+ hopcount,
+ RIO_HOST_DID_LOCK_CSR,
+ port->host_deviceid);
+ rio_mport_read_config_32(port, destid, hopcount,
+ RIO_HOST_DID_LOCK_CSR, &result);
+ if ((result & 0xffff) != 0xffff) {
+ pr_debug("RIO: badness when releasing device lock %x:%x\n",
+ destid, hopcount);
+ return -EINVAL;
+ }
+
+ return 0;
}
/**
@@ -448,6 +630,7 @@ rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport)
* @table: Routing table ID
* @route_destid: Destination ID to be routed
* @route_port: Port number to be routed
+ * @lock: lock switch device flag
*
* Calls the switch specific add_entry() method to add a route entry
* on a switch. The route table can be specified using the @table
@@ -456,12 +639,26 @@ rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport)
* %RIO_GLOBAL_TABLE in @table. Returns %0 on success or %-EINVAL
* on failure.
*/
-static int rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswitch,
- u16 table, u16 route_destid, u8 route_port)
+static int
+rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswitch,
+ u16 table, u16 route_destid, u8 route_port, int lock)
{
- return rswitch->add_entry(mport, rswitch->destid,
+ int rc;
+
+ if (lock) {
+ rc = rio_lock_device(mport, rswitch->destid,
+ rswitch->hopcount, 1000);
+ if (rc)
+ return rc;
+ }
+
+ rc = rswitch->add_entry(mport, rswitch->destid,
rswitch->hopcount, table,
route_destid, route_port);
+ if (lock)
+ rio_unlock_device(mport, rswitch->destid, rswitch->hopcount);
+
+ return rc;
}
/**
@@ -471,6 +668,7 @@ static int rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswit
* @table: Routing table ID
* @route_destid: Destination ID to be routed
* @route_port: Pointer to read port number into
+ * @lock: lock switch device flag
*
* Calls the switch specific get_entry() method to read a route entry
* in a switch. The route table can be specified using the @table
@@ -481,11 +679,24 @@ static int rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswit
*/
static int
rio_route_get_entry(struct rio_mport *mport, struct rio_switch *rswitch, u16 table,
- u16 route_destid, u8 * route_port)
+ u16 route_destid, u8 *route_port, int lock)
{
- return rswitch->get_entry(mport, rswitch->destid,
+ int rc;
+
+ if (lock) {
+ rc = rio_lock_device(mport, rswitch->destid,
+ rswitch->hopcount, 1000);
+ if (rc)
+ return rc;
+ }
+
+ rc = rswitch->get_entry(mport, rswitch->destid,
rswitch->hopcount, table,
route_destid, route_port);
+ if (lock)
+ rio_unlock_device(mport, rswitch->destid, rswitch->hopcount);
+
+ return rc;
}
/**
@@ -625,14 +836,14 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
sw_inport = rio_get_swpinfo_inport(port,
RIO_ANY_DESTID(port->sys_size), hopcount);
rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE,
- port->host_deviceid, sw_inport);
+ port->host_deviceid, sw_inport, 0);
rdev->rswitch->route_table[port->host_deviceid] = sw_inport;
for (destid = 0; destid < next_destid; destid++) {
if (destid == port->host_deviceid)
continue;
rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE,
- destid, sw_inport);
+ destid, sw_inport, 0);
rdev->rswitch->route_table[destid] = sw_inport;
}
@@ -644,8 +855,15 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
rio_name(rdev), rdev->vid, rdev->did, num_ports);
sw_destid = next_destid;
for (port_num = 0; port_num < num_ports; port_num++) {
- if (sw_inport == port_num)
+ /*Enable Input Output Port (transmitter reviever)*/
+ rio_enable_rx_tx_port(port, 0,
+ RIO_ANY_DESTID(port->sys_size),
+ hopcount, port_num);
+
+ if (sw_inport == port_num) {
+ rdev->rswitch->port_ok |= (1 << port_num);
continue;
+ }
cur_destid = next_destid;
@@ -655,10 +873,11 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
pr_debug(
"RIO: scanning device on port %d\n",
port_num);
+ rdev->rswitch->port_ok |= (1 << port_num);
rio_route_add_entry(port, rdev->rswitch,
RIO_GLOBAL_TABLE,
RIO_ANY_DESTID(port->sys_size),
- port_num);
+ port_num, 0);
if (rio_enum_peer(net, port, hopcount + 1) < 0)
return -1;
@@ -672,15 +891,35 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
rio_route_add_entry(port, rdev->rswitch,
RIO_GLOBAL_TABLE,
destid,
- port_num);
+ port_num,
+ 0);
rdev->rswitch->
route_table[destid] =
port_num;
}
}
+ } else {
+ /* If switch supports Error Management,
+ * set PORT_LOCKOUT bit for unused port
+ */
+ if (rdev->em_efptr)
+ rio_set_port_lockout(rdev, port_num, 1);
+
+ rdev->rswitch->port_ok &= ~(1 << port_num);
}
}
+ /* Direct Port-write messages to the enumeratiing host */
+ if ((rdev->src_ops & RIO_SRC_OPS_PORT_WRITE) &&
+ (rdev->em_efptr)) {
+ rio_write_config_32(rdev,
+ rdev->em_efptr + RIO_EM_PW_TGT_DEVID,
+ (port->host_deviceid << 16) |
+ (port->sys_size << 15));
+ }
+
+ rio_init_em(rdev);
+
/* Check for empty switch */
if (next_destid == sw_destid) {
next_destid++;
@@ -700,21 +939,16 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
* rio_enum_complete- Tests if enumeration of a network is complete
* @port: Master port to send transaction
*
- * Tests the Component Tag CSR for presence of the magic enumeration
- * complete flag. Return %1 if enumeration is complete or %0 if
+ * Tests the Component Tag CSR for non-zero value (enumeration
+ * complete flag). Return %1 if enumeration is complete or %0 if
* enumeration is incomplete.
*/
static int rio_enum_complete(struct rio_mport *port)
{
u32 tag_csr;
- int ret = 0;
rio_local_read_config_32(port, RIO_COMPONENT_TAG_CSR, &tag_csr);
-
- if (tag_csr == RIO_ENUM_CMPL_MAGIC)
- ret = 1;
-
- return ret;
+ return (tag_csr & 0xffff) ? 1 : 0;
}
/**
@@ -763,17 +997,21 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
pr_debug(
"RIO: scanning device on port %d\n",
port_num);
+
+ rio_lock_device(port, destid, hopcount, 1000);
+
for (ndestid = 0;
ndestid < RIO_ANY_DESTID(port->sys_size);
ndestid++) {
rio_route_get_entry(port, rdev->rswitch,
RIO_GLOBAL_TABLE,
ndestid,
- &route_port);
+ &route_port, 0);
if (route_port == port_num)
break;
}
+ rio_unlock_device(port, destid, hopcount);
if (rio_disc_peer
(net, port, ndestid, hopcount + 1) < 0)
return -1;
@@ -792,7 +1030,7 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
*
* Reads the port error status CSR for the master port to
* determine if the port has an active link. Returns
- * %PORT_N_ERR_STS_PORT_OK if the master port is active
+ * %RIO_PORT_N_ERR_STS_PORT_OK if the master port is active
* or %0 if it is inactive.
*/
static int rio_mport_is_active(struct rio_mport *port)
@@ -813,7 +1051,7 @@ static int rio_mport_is_active(struct rio_mport *port)
RIO_PORT_N_ERR_STS_CSR(port->index),
&result);
- return (result & PORT_N_ERR_STS_PORT_OK);
+ return result & RIO_PORT_N_ERR_STS_PORT_OK;
}
/**
@@ -866,12 +1104,17 @@ static void rio_update_route_tables(struct rio_mport *port)
continue;
if (RIO_INVALID_ROUTE == rswitch->route_table[destid]) {
+ /* Skip if destid ends in empty switch*/
+ if (rswitch->destid == destid)
+ continue;
sport = rio_get_swpinfo_inport(port,
rswitch->destid, rswitch->hopcount);
if (rswitch->add_entry) {
- rio_route_add_entry(port, rswitch, RIO_GLOBAL_TABLE, destid, sport);
+ rio_route_add_entry(port, rswitch,
+ RIO_GLOBAL_TABLE, destid,
+ sport, 0);
rswitch->route_table[destid] = sport;
}
}
@@ -880,6 +1123,32 @@ static void rio_update_route_tables(struct rio_mport *port)
}
/**
+ * rio_init_em - Initializes RIO Error Management (for switches)
+ * @rdev: RIO device
+ *
+ * For each enumerated switch, call device-specific error management
+ * initialization routine (if supplied by the switch driver).
+ */
+static void rio_init_em(struct rio_dev *rdev)
+{
+ if (rio_is_switch(rdev) && (rdev->em_efptr) &&
+ (rdev->rswitch->em_init)) {
+ rdev->rswitch->em_init(rdev);
+ }
+}
+
+/**
+ * rio_pw_enable - Enables/disables port-write handling by a master port
+ * @port: Master port associated with port-write handling
+ * @enable: 1=enable, 0=disable
+ */
+static void rio_pw_enable(struct rio_mport *port, int enable)
+{
+ if (port->ops->pwenable)
+ port->ops->pwenable(port, enable);
+}
+
+/**
* rio_enum_mport- Start enumeration through a master port
* @mport: Master port to send transactions
*
@@ -911,6 +1180,10 @@ int __devinit rio_enum_mport(struct rio_mport *mport)
rc = -ENOMEM;
goto out;
}
+
+ /* Enable Input Output Port (transmitter reviever) */
+ rio_enable_rx_tx_port(mport, 1, 0, 0, 0);
+
if (rio_enum_peer(net, mport, 0) < 0) {
/* A higher priority host won enumeration, bail. */
printk(KERN_INFO
@@ -922,6 +1195,7 @@ int __devinit rio_enum_mport(struct rio_mport *mport)
}
rio_update_route_tables(mport);
rio_clear_locks(mport);
+ rio_pw_enable(mport, 1);
} else {
printk(KERN_INFO "RIO: master port %d link inactive\n",
mport->id);
@@ -945,15 +1219,22 @@ static void rio_build_route_tables(void)
u8 sport;
list_for_each_entry(rdev, &rio_devices, global_list)
- if (rio_is_switch(rdev))
- for (i = 0;
- i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size);
- i++) {
- if (rio_route_get_entry
- (rdev->net->hport, rdev->rswitch, RIO_GLOBAL_TABLE,
- i, &sport) < 0)
- continue;
- rdev->rswitch->route_table[i] = sport;
+ if (rio_is_switch(rdev)) {
+ rio_lock_device(rdev->net->hport, rdev->rswitch->destid,
+ rdev->rswitch->hopcount, 1000);
+ for (i = 0;
+ i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size);
+ i++) {
+ if (rio_route_get_entry
+ (rdev->net->hport, rdev->rswitch,
+ RIO_GLOBAL_TABLE, i, &sport, 0) < 0)
+ continue;
+ rdev->rswitch->route_table[i] = sport;
+ }
+
+ rio_unlock_device(rdev->net->hport,
+ rdev->rswitch->destid,
+ rdev->rswitch->hopcount);
}
}
@@ -1012,6 +1293,13 @@ int __devinit rio_disc_mport(struct rio_mport *mport)
del_timer_sync(&rio_enum_timer);
pr_debug("done\n");
+
+ /* Read DestID assigned by enumerator */
+ rio_local_read_config_32(mport, RIO_DID_CSR,
+ &mport->host_deviceid);
+ mport->host_deviceid = RIO_GET_DID(mport->sys_size,
+ mport->host_deviceid);
+
if (rio_disc_peer(net, mport, RIO_ANY_DESTID(mport->sys_size),
0) < 0) {
printk(KERN_INFO
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index ba742e82c57..00b47565835 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -68,7 +68,8 @@ struct device_attribute rio_dev_attrs[] = {
};
static ssize_t
-rio_read_config(struct kobject *kobj, struct bin_attribute *bin_attr,
+rio_read_config(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct rio_dev *dev =
@@ -139,7 +140,8 @@ rio_read_config(struct kobject *kobj, struct bin_attribute *bin_attr,
}
static ssize_t
-rio_write_config(struct kobject *kobj, struct bin_attribute *bin_attr,
+rio_write_config(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct rio_dev *dev =
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 6395c780008..08fa453af97 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -5,6 +5,10 @@
* Copyright 2005 MontaVista Software, Inc.
* Matt Porter <mporter@kernel.crashing.org>
*
+ * Copyright 2009 Integrated Device Technology, Inc.
+ * Alex Bounine <alexandre.bounine@idt.com>
+ * - Added Port-Write/Error Management initialization and handling
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
@@ -333,6 +337,331 @@ int rio_release_outb_dbell(struct rio_dev *rdev, struct resource *res)
}
/**
+ * rio_request_inb_pwrite - request inbound port-write message service
+ * @rdev: RIO device to which register inbound port-write callback routine
+ * @pwcback: Callback routine to execute when port-write is received
+ *
+ * Binds a port-write callback function to the RapidIO device.
+ * Returns 0 if the request has been satisfied.
+ */
+int rio_request_inb_pwrite(struct rio_dev *rdev,
+ int (*pwcback)(struct rio_dev *rdev, union rio_pw_msg *msg, int step))
+{
+ int rc = 0;
+
+ spin_lock(&rio_global_list_lock);
+ if (rdev->pwcback != NULL)
+ rc = -ENOMEM;
+ else
+ rdev->pwcback = pwcback;
+
+ spin_unlock(&rio_global_list_lock);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(rio_request_inb_pwrite);
+
+/**
+ * rio_release_inb_pwrite - release inbound port-write message service
+ * @rdev: RIO device which registered for inbound port-write callback
+ *
+ * Removes callback from the rio_dev structure. Returns 0 if the request
+ * has been satisfied.
+ */
+int rio_release_inb_pwrite(struct rio_dev *rdev)
+{
+ int rc = -ENOMEM;
+
+ spin_lock(&rio_global_list_lock);
+ if (rdev->pwcback) {
+ rdev->pwcback = NULL;
+ rc = 0;
+ }
+
+ spin_unlock(&rio_global_list_lock);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(rio_release_inb_pwrite);
+
+/**
+ * rio_mport_get_physefb - Helper function that returns register offset
+ * for Physical Layer Extended Features Block.
+ * @port: Master port to issue transaction
+ * @local: Indicate a local master port or remote device access
+ * @destid: Destination ID of the device
+ * @hopcount: Number of switch hops to the device
+ */
+u32
+rio_mport_get_physefb(struct rio_mport *port, int local,
+ u16 destid, u8 hopcount)
+{
+ u32 ext_ftr_ptr;
+ u32 ftr_header;
+
+ ext_ftr_ptr = rio_mport_get_efb(port, local, destid, hopcount, 0);
+
+ while (ext_ftr_ptr) {
+ if (local)
+ rio_local_read_config_32(port, ext_ftr_ptr,
+ &ftr_header);
+ else
+ rio_mport_read_config_32(port, destid, hopcount,
+ ext_ftr_ptr, &ftr_header);
+
+ ftr_header = RIO_GET_BLOCK_ID(ftr_header);
+ switch (ftr_header) {
+
+ case RIO_EFB_SER_EP_ID_V13P:
+ case RIO_EFB_SER_EP_REC_ID_V13P:
+ case RIO_EFB_SER_EP_FREE_ID_V13P:
+ case RIO_EFB_SER_EP_ID:
+ case RIO_EFB_SER_EP_REC_ID:
+ case RIO_EFB_SER_EP_FREE_ID:
+ case RIO_EFB_SER_EP_FREC_ID:
+
+ return ext_ftr_ptr;
+
+ default:
+ break;
+ }
+
+ ext_ftr_ptr = rio_mport_get_efb(port, local, destid,
+ hopcount, ext_ftr_ptr);
+ }
+
+ return ext_ftr_ptr;
+}
+
+/**
+ * rio_get_comptag - Begin or continue searching for a RIO device by component tag
+ * @comp_tag: RIO component tag to match
+ * @from: Previous RIO device found in search, or %NULL for new search
+ *
+ * Iterates through the list of known RIO devices. If a RIO device is
+ * found with a matching @comp_tag, a pointer to its device
+ * structure is returned. Otherwise, %NULL is returned. A new search
+ * is initiated by passing %NULL to the @from argument. Otherwise, if
+ * @from is not %NULL, searches continue from next device on the global
+ * list.
+ */
+static struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from)
+{
+ struct list_head *n;
+ struct rio_dev *rdev;
+
+ spin_lock(&rio_global_list_lock);
+ n = from ? from->global_list.next : rio_devices.next;
+
+ while (n && (n != &rio_devices)) {
+ rdev = rio_dev_g(n);
+ if (rdev->comp_tag == comp_tag)
+ goto exit;
+ n = n->next;
+ }
+ rdev = NULL;
+exit:
+ spin_unlock(&rio_global_list_lock);
+ return rdev;
+}
+
+/**
+ * rio_set_port_lockout - Sets/clears LOCKOUT bit (RIO EM 1.3) for a switch port.
+ * @rdev: Pointer to RIO device control structure
+ * @pnum: Switch port number to set LOCKOUT bit
+ * @lock: Operation : set (=1) or clear (=0)
+ */
+int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock)
+{
+ u8 hopcount = 0xff;
+ u16 destid = rdev->destid;
+ u32 regval;
+
+ if (rdev->rswitch) {
+ destid = rdev->rswitch->destid;
+ hopcount = rdev->rswitch->hopcount;
+ }
+
+ rio_mport_read_config_32(rdev->net->hport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),
+ &regval);
+ if (lock)
+ regval |= RIO_PORT_N_CTL_LOCKOUT;
+ else
+ regval &= ~RIO_PORT_N_CTL_LOCKOUT;
+
+ rio_mport_write_config_32(rdev->net->hport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),
+ regval);
+ return 0;
+}
+
+/**
+ * rio_inb_pwrite_handler - process inbound port-write message
+ * @pw_msg: pointer to inbound port-write message
+ *
+ * Processes an inbound port-write message. Returns 0 if the request
+ * has been satisfied.
+ */
+int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
+{
+ struct rio_dev *rdev;
+ struct rio_mport *mport;
+ u8 hopcount;
+ u16 destid;
+ u32 err_status;
+ int rc, portnum;
+
+ rdev = rio_get_comptag(pw_msg->em.comptag, NULL);
+ if (rdev == NULL) {
+ /* Someting bad here (probably enumeration error) */
+ pr_err("RIO: %s No matching device for CTag 0x%08x\n",
+ __func__, pw_msg->em.comptag);
+ return -EIO;
+ }
+
+ pr_debug("RIO: Port-Write message from %s\n", rio_name(rdev));
+
+#ifdef DEBUG_PW
+ {
+ u32 i;
+ for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32);) {
+ pr_debug("0x%02x: %08x %08x %08x %08x",
+ i*4, pw_msg->raw[i], pw_msg->raw[i + 1],
+ pw_msg->raw[i + 2], pw_msg->raw[i + 3]);
+ i += 4;
+ }
+ pr_debug("\n");
+ }
+#endif
+
+ /* Call an external service function (if such is registered
+ * for this device). This may be the service for endpoints that send
+ * device-specific port-write messages. End-point messages expected
+ * to be handled completely by EP specific device driver.
+ * For switches rc==0 signals that no standard processing required.
+ */
+ if (rdev->pwcback != NULL) {
+ rc = rdev->pwcback(rdev, pw_msg, 0);
+ if (rc == 0)
+ return 0;
+ }
+
+ /* For End-point devices processing stops here */
+ if (!(rdev->pef & RIO_PEF_SWITCH))
+ return 0;
+
+ if (rdev->phys_efptr == 0) {
+ pr_err("RIO_PW: Bad switch initialization for %s\n",
+ rio_name(rdev));
+ return 0;
+ }
+
+ mport = rdev->net->hport;
+ destid = rdev->rswitch->destid;
+ hopcount = rdev->rswitch->hopcount;
+
+ /*
+ * Process the port-write notification from switch
+ */
+
+ portnum = pw_msg->em.is_port & 0xFF;
+
+ if (rdev->rswitch->em_handle)
+ rdev->rswitch->em_handle(rdev, portnum);
+
+ rio_mport_read_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
+ &err_status);
+ pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status);
+
+ if (pw_msg->em.errdetect) {
+ pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n",
+ portnum, pw_msg->em.errdetect);
+ /* Clear EM Port N Error Detect CSR */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), 0);
+ }
+
+ if (pw_msg->em.ltlerrdet) {
+ pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n",
+ pw_msg->em.ltlerrdet);
+ /* Clear EM L/T Layer Error Detect CSR */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, 0);
+ }
+
+ /* Clear Port Errors */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
+ err_status & RIO_PORT_N_ERR_STS_CLR_MASK);
+
+ if (rdev->rswitch->port_ok & (1 << portnum)) {
+ if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT) {
+ rdev->rswitch->port_ok &= ~(1 << portnum);
+ rio_set_port_lockout(rdev, portnum, 1);
+
+ rio_mport_write_config_32(mport, destid, hopcount,
+ rdev->phys_efptr +
+ RIO_PORT_N_ACK_STS_CSR(portnum),
+ RIO_PORT_N_ACK_CLEAR);
+
+ /* Schedule Extraction Service */
+ pr_debug("RIO_PW: Device Extraction on [%s]-P%d\n",
+ rio_name(rdev), portnum);
+ }
+ } else {
+ if (err_status & RIO_PORT_N_ERR_STS_PORT_OK) {
+ rdev->rswitch->port_ok |= (1 << portnum);
+ rio_set_port_lockout(rdev, portnum, 0);
+
+ /* Schedule Insertion Service */
+ pr_debug("RIO_PW: Device Insertion on [%s]-P%d\n",
+ rio_name(rdev), portnum);
+ }
+ }
+
+ /* Clear Port-Write Pending bit */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
+ RIO_PORT_N_ERR_STS_PW_PEND);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rio_inb_pwrite_handler);
+
+/**
+ * rio_mport_get_efb - get pointer to next extended features block
+ * @port: Master port to issue transaction
+ * @local: Indicate a local master port or remote device access
+ * @destid: Destination ID of the device
+ * @hopcount: Number of switch hops to the device
+ * @from: Offset of current Extended Feature block header (if 0 starts
+ * from ExtFeaturePtr)
+ */
+u32
+rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
+ u8 hopcount, u32 from)
+{
+ u32 reg_val;
+
+ if (from == 0) {
+ if (local)
+ rio_local_read_config_32(port, RIO_ASM_INFO_CAR,
+ &reg_val);
+ else
+ rio_mport_read_config_32(port, destid, hopcount,
+ RIO_ASM_INFO_CAR, &reg_val);
+ return reg_val & RIO_EXT_FTR_PTR_MASK;
+ } else {
+ if (local)
+ rio_local_read_config_32(port, from, &reg_val);
+ else
+ rio_mport_read_config_32(port, destid, hopcount,
+ from, &reg_val);
+ return RIO_GET_BLOCK_ID(reg_val);
+ }
+}
+
+/**
* rio_mport_get_feature - query for devices' extended features
* @port: Master port to issue transaction
* @local: Indicate a local master port or remote device access
@@ -451,6 +780,110 @@ struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from)
return rio_get_asm(vid, did, RIO_ANY_ID, RIO_ANY_ID, from);
}
+/**
+ * rio_std_route_add_entry - Add switch route table entry using standard
+ * registers defined in RIO specification rev.1.3
+ * @mport: Master port to issue transaction
+ * @destid: Destination ID of the device
+ * @hopcount: Number of switch hops to the device
+ * @table: routing table ID (global or port-specific)
+ * @route_destid: destID entry in the RT
+ * @route_port: destination port for specified destID
+ */
+int rio_std_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u16 table, u16 route_destid, u8 route_port)
+{
+ if (table == RIO_GLOBAL_TABLE) {
+ rio_mport_write_config_32(mport, destid, hopcount,
+ RIO_STD_RTE_CONF_DESTID_SEL_CSR,
+ (u32)route_destid);
+ rio_mport_write_config_32(mport, destid, hopcount,
+ RIO_STD_RTE_CONF_PORT_SEL_CSR,
+ (u32)route_port);
+ }
+
+ udelay(10);
+ return 0;
+}
+
+/**
+ * rio_std_route_get_entry - Read switch route table entry (port number)
+ * assosiated with specified destID using standard registers defined in RIO
+ * specification rev.1.3
+ * @mport: Master port to issue transaction
+ * @destid: Destination ID of the device
+ * @hopcount: Number of switch hops to the device
+ * @table: routing table ID (global or port-specific)
+ * @route_destid: destID entry in the RT
+ * @route_port: returned destination port for specified destID
+ */
+int rio_std_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u16 table, u16 route_destid, u8 *route_port)
+{
+ u32 result;
+
+ if (table == RIO_GLOBAL_TABLE) {
+ rio_mport_write_config_32(mport, destid, hopcount,
+ RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid);
+ rio_mport_read_config_32(mport, destid, hopcount,
+ RIO_STD_RTE_CONF_PORT_SEL_CSR, &result);
+
+ *route_port = (u8)result;
+ }
+
+ return 0;
+}
+
+/**
+ * rio_std_route_clr_table - Clear swotch route table using standard registers
+ * defined in RIO specification rev.1.3.
+ * @mport: Master port to issue transaction
+ * @destid: Destination ID of the device
+ * @hopcount: Number of switch hops to the device
+ * @table: routing table ID (global or port-specific)
+ */
+int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u16 table)
+{
+ u32 max_destid = 0xff;
+ u32 i, pef, id_inc = 1, ext_cfg = 0;
+ u32 port_sel = RIO_INVALID_ROUTE;
+
+ if (table == RIO_GLOBAL_TABLE) {
+ rio_mport_read_config_32(mport, destid, hopcount,
+ RIO_PEF_CAR, &pef);
+
+ if (mport->sys_size) {
+ rio_mport_read_config_32(mport, destid, hopcount,
+ RIO_SWITCH_RT_LIMIT,
+ &max_destid);
+ max_destid &= RIO_RT_MAX_DESTID;
+ }
+
+ if (pef & RIO_PEF_EXT_RT) {
+ ext_cfg = 0x80000000;
+ id_inc = 4;
+ port_sel = (RIO_INVALID_ROUTE << 24) |
+ (RIO_INVALID_ROUTE << 16) |
+ (RIO_INVALID_ROUTE << 8) |
+ RIO_INVALID_ROUTE;
+ }
+
+ for (i = 0; i <= max_destid;) {
+ rio_mport_write_config_32(mport, destid, hopcount,
+ RIO_STD_RTE_CONF_DESTID_SEL_CSR,
+ ext_cfg | i);
+ rio_mport_write_config_32(mport, destid, hopcount,
+ RIO_STD_RTE_CONF_PORT_SEL_CSR,
+ port_sel);
+ i += id_inc;
+ }
+ }
+
+ udelay(10);
+ return 0;
+}
+
static void rio_fixup_device(struct rio_dev *dev)
{
}
diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h
index 7786d02581f..f27b7a9c47d 100644
--- a/drivers/rapidio/rio.h
+++ b/drivers/rapidio/rio.h
@@ -18,38 +18,50 @@
extern u32 rio_mport_get_feature(struct rio_mport *mport, int local, u16 destid,
u8 hopcount, int ftr);
+extern u32 rio_mport_get_physefb(struct rio_mport *port, int local,
+ u16 destid, u8 hopcount);
+extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
+ u8 hopcount, u32 from);
extern int rio_create_sysfs_dev_files(struct rio_dev *rdev);
extern int rio_enum_mport(struct rio_mport *mport);
extern int rio_disc_mport(struct rio_mport *mport);
+extern int rio_std_route_add_entry(struct rio_mport *mport, u16 destid,
+ u8 hopcount, u16 table, u16 route_destid,
+ u8 route_port);
+extern int rio_std_route_get_entry(struct rio_mport *mport, u16 destid,
+ u8 hopcount, u16 table, u16 route_destid,
+ u8 *route_port);
+extern int rio_std_route_clr_table(struct rio_mport *mport, u16 destid,
+ u8 hopcount, u16 table);
+extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock);
/* Structures internal to the RIO core code */
extern struct device_attribute rio_dev_attrs[];
extern spinlock_t rio_global_list_lock;
-extern struct rio_route_ops __start_rio_route_ops[];
-extern struct rio_route_ops __end_rio_route_ops[];
+extern struct rio_switch_ops __start_rio_switch_ops[];
+extern struct rio_switch_ops __end_rio_switch_ops[];
/* Helpers internal to the RIO core code */
-#define DECLARE_RIO_ROUTE_SECTION(section, vid, did, add_hook, get_hook) \
- static struct rio_route_ops __rio_route_ops __used \
- __section(section)= { vid, did, add_hook, get_hook };
+#define DECLARE_RIO_SWITCH_SECTION(section, name, vid, did, init_hook) \
+ static const struct rio_switch_ops __rio_switch_##name __used \
+ __section(section) = { vid, did, init_hook };
/**
- * DECLARE_RIO_ROUTE_OPS - Registers switch routing operations
+ * DECLARE_RIO_SWITCH_INIT - Registers switch initialization routine
* @vid: RIO vendor ID
* @did: RIO device ID
- * @add_hook: Callback that adds a route entry
- * @get_hook: Callback that gets a route entry
+ * @init_hook: Callback that performs switch-specific initialization
*
- * Manipulating switch route tables in RIO is switch specific. This
- * registers a switch by vendor and device ID with two callbacks for
- * modifying and retrieving route entries in a switch. A &struct
- * rio_route_ops is initialized with the ops and placed into a
- * RIO-specific kernel section.
+ * Manipulating switch route tables and error management in RIO
+ * is switch specific. This registers a switch by vendor and device ID with
+ * initialization callback for setting up switch operations and (if required)
+ * hardware initialization. A &struct rio_switch_ops is initialized with
+ * pointer to the init routine and placed into a RIO-specific kernel section.
*/
-#define DECLARE_RIO_ROUTE_OPS(vid, did, add_hook, get_hook) \
- DECLARE_RIO_ROUTE_SECTION(.rio_route_ops, \
- vid, did, add_hook, get_hook)
+#define DECLARE_RIO_SWITCH_INIT(vid, did, init_hook) \
+ DECLARE_RIO_SWITCH_SECTION(.rio_switch_ops, vid##did, \
+ vid, did, init_hook)
#define RIO_GET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x00ff0000) >> 16))
#define RIO_SET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x000000ff) << 16))
diff --git a/drivers/rapidio/switches/Kconfig b/drivers/rapidio/switches/Kconfig
new file mode 100644
index 00000000000..2b4e9b2b663
--- /dev/null
+++ b/drivers/rapidio/switches/Kconfig
@@ -0,0 +1,28 @@
+#
+# RapidIO switches configuration
+#
+config RAPIDIO_TSI57X
+ bool "IDT Tsi57x SRIO switches support"
+ depends on RAPIDIO
+ ---help---
+ Includes support for IDT Tsi57x family of serial RapidIO switches.
+
+config RAPIDIO_CPS_XX
+ bool "IDT CPS-xx SRIO switches support"
+ depends on RAPIDIO
+ ---help---
+ Includes support for IDT CPS-16/12/10/8 serial RapidIO switches.
+
+config RAPIDIO_TSI568
+ bool "Tsi568 SRIO switch support"
+ depends on RAPIDIO
+ default n
+ ---help---
+ Includes support for IDT Tsi568 serial RapidIO switch.
+
+config RAPIDIO_TSI500
+ bool "Tsi500 Parallel RapidIO switch support"
+ depends on RAPIDIO
+ default n
+ ---help---
+ Includes support for IDT Tsi500 parallel RapidIO switch.
diff --git a/drivers/rapidio/switches/Makefile b/drivers/rapidio/switches/Makefile
index b924f830176..fe4adc3e8d5 100644
--- a/drivers/rapidio/switches/Makefile
+++ b/drivers/rapidio/switches/Makefile
@@ -2,4 +2,11 @@
# Makefile for RIO switches
#
-obj-$(CONFIG_RAPIDIO) += tsi500.o
+obj-$(CONFIG_RAPIDIO_TSI57X) += tsi57x.o
+obj-$(CONFIG_RAPIDIO_CPS_XX) += idtcps.o
+obj-$(CONFIG_RAPIDIO_TSI568) += tsi568.o
+obj-$(CONFIG_RAPIDIO_TSI500) += tsi500.o
+
+ifeq ($(CONFIG_RAPIDIO_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
diff --git a/drivers/rapidio/switches/idtcps.c b/drivers/rapidio/switches/idtcps.c
new file mode 100644
index 00000000000..2c790c144f8
--- /dev/null
+++ b/drivers/rapidio/switches/idtcps.c
@@ -0,0 +1,137 @@
+/*
+ * IDT CPS RapidIO switches support
+ *
+ * Copyright 2009-2010 Integrated Device Technology, Inc.
+ * Alexandre Bounine <alexandre.bounine@idt.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/rio_ids.h>
+#include "../rio.h"
+
+#define CPS_DEFAULT_ROUTE 0xde
+#define CPS_NO_ROUTE 0xdf
+
+#define IDTCPS_RIO_DOMAIN 0xf20020
+
+static int
+idtcps_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u16 table, u16 route_destid, u8 route_port)
+{
+ u32 result;
+
+ if (table == RIO_GLOBAL_TABLE) {
+ rio_mport_write_config_32(mport, destid, hopcount,
+ RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid);
+
+ rio_mport_read_config_32(mport, destid, hopcount,
+ RIO_STD_RTE_CONF_PORT_SEL_CSR, &result);
+
+ result = (0xffffff00 & result) | (u32)route_port;
+ rio_mport_write_config_32(mport, destid, hopcount,
+ RIO_STD_RTE_CONF_PORT_SEL_CSR, result);
+ }
+
+ return 0;
+}
+
+static int
+idtcps_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u16 table, u16 route_destid, u8 *route_port)
+{
+ u32 result;
+
+ if (table == RIO_GLOBAL_TABLE) {
+ rio_mport_write_config_32(mport, destid, hopcount,
+ RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid);
+
+ rio_mport_read_config_32(mport, destid, hopcount,
+ RIO_STD_RTE_CONF_PORT_SEL_CSR, &result);
+
+ if (CPS_DEFAULT_ROUTE == (u8)result ||
+ CPS_NO_ROUTE == (u8)result)
+ *route_port = RIO_INVALID_ROUTE;
+ else
+ *route_port = (u8)result;
+ }
+
+ return 0;
+}
+
+static int
+idtcps_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u16 table)
+{
+ u32 i;
+
+ if (table == RIO_GLOBAL_TABLE) {
+ for (i = 0x80000000; i <= 0x800000ff;) {
+ rio_mport_write_config_32(mport, destid, hopcount,
+ RIO_STD_RTE_CONF_DESTID_SEL_CSR, i);
+ rio_mport_write_config_32(mport, destid, hopcount,
+ RIO_STD_RTE_CONF_PORT_SEL_CSR,
+ (CPS_DEFAULT_ROUTE << 24) |
+ (CPS_DEFAULT_ROUTE << 16) |
+ (CPS_DEFAULT_ROUTE << 8) | CPS_DEFAULT_ROUTE);
+ i += 4;
+ }
+ }
+
+ return 0;
+}
+
+static int
+idtcps_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u8 sw_domain)
+{
+ /*
+ * Switch domain configuration operates only at global level
+ */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ IDTCPS_RIO_DOMAIN, (u32)sw_domain);
+ return 0;
+}
+
+static int
+idtcps_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u8 *sw_domain)
+{
+ u32 regval;
+
+ /*
+ * Switch domain configuration operates only at global level
+ */
+ rio_mport_read_config_32(mport, destid, hopcount,
+ IDTCPS_RIO_DOMAIN, &regval);
+
+ *sw_domain = (u8)(regval & 0xff);
+
+ return 0;
+}
+
+static int idtcps_switch_init(struct rio_dev *rdev, int do_enum)
+{
+ pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
+ rdev->rswitch->add_entry = idtcps_route_add_entry;
+ rdev->rswitch->get_entry = idtcps_route_get_entry;
+ rdev->rswitch->clr_table = idtcps_route_clr_table;
+ rdev->rswitch->set_domain = idtcps_set_domain;
+ rdev->rswitch->get_domain = idtcps_get_domain;
+ rdev->rswitch->em_init = NULL;
+ rdev->rswitch->em_handle = NULL;
+
+ return 0;
+}
+
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS6Q, idtcps_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS8, idtcps_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS10Q, idtcps_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS12, idtcps_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS16, idtcps_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDT70K200, idtcps_switch_init);
diff --git a/drivers/rapidio/switches/tsi500.c b/drivers/rapidio/switches/tsi500.c
index c77c23bd984..914eddd5aa4 100644
--- a/drivers/rapidio/switches/tsi500.c
+++ b/drivers/rapidio/switches/tsi500.c
@@ -1,6 +1,10 @@
/*
* RapidIO Tsi500 switch support
*
+ * Copyright 2009-2010 Integrated Device Technology, Inc.
+ * Alexandre Bounine <alexandre.bounine@idt.com>
+ * - Modified switch operations initialization.
+ *
* Copyright 2005 MontaVista Software, Inc.
* Matt Porter <mporter@kernel.crashing.org>
*
@@ -57,4 +61,18 @@ tsi500_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 tab
return ret;
}
-DECLARE_RIO_ROUTE_OPS(RIO_VID_TUNDRA, RIO_DID_TSI500, tsi500_route_add_entry, tsi500_route_get_entry);
+static int tsi500_switch_init(struct rio_dev *rdev, int do_enum)
+{
+ pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
+ rdev->rswitch->add_entry = tsi500_route_add_entry;
+ rdev->rswitch->get_entry = tsi500_route_get_entry;
+ rdev->rswitch->clr_table = NULL;
+ rdev->rswitch->set_domain = NULL;
+ rdev->rswitch->get_domain = NULL;
+ rdev->rswitch->em_init = NULL;
+ rdev->rswitch->em_handle = NULL;
+
+ return 0;
+}
+
+DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI500, tsi500_switch_init);
diff --git a/drivers/rapidio/switches/tsi568.c b/drivers/rapidio/switches/tsi568.c
new file mode 100644
index 00000000000..f7fd7898606
--- /dev/null
+++ b/drivers/rapidio/switches/tsi568.c
@@ -0,0 +1,146 @@
+/*
+ * RapidIO Tsi568 switch support
+ *
+ * Copyright 2009-2010 Integrated Device Technology, Inc.
+ * Alexandre Bounine <alexandre.bounine@idt.com>
+ * - Added EM support
+ * - Modified switch operations initialization.
+ *
+ * Copyright 2005 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/rio_ids.h>
+#include <linux/delay.h>
+#include "../rio.h"
+
+/* Global (broadcast) route registers */
+#define SPBC_ROUTE_CFG_DESTID 0x10070
+#define SPBC_ROUTE_CFG_PORT 0x10074
+
+/* Per port route registers */
+#define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n)
+#define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n)
+
+#define TSI568_SP_MODE_BC 0x10004
+#define TSI568_SP_MODE_PW_DIS 0x08000000
+
+static int
+tsi568_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u16 table, u16 route_destid, u8 route_port)
+{
+ if (table == RIO_GLOBAL_TABLE) {
+ rio_mport_write_config_32(mport, destid, hopcount,
+ SPBC_ROUTE_CFG_DESTID, route_destid);
+ rio_mport_write_config_32(mport, destid, hopcount,
+ SPBC_ROUTE_CFG_PORT, route_port);
+ } else {
+ rio_mport_write_config_32(mport, destid, hopcount,
+ SPP_ROUTE_CFG_DESTID(table),
+ route_destid);
+ rio_mport_write_config_32(mport, destid, hopcount,
+ SPP_ROUTE_CFG_PORT(table), route_port);
+ }
+
+ udelay(10);
+
+ return 0;
+}
+
+static int
+tsi568_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u16 table, u16 route_destid, u8 *route_port)
+{
+ int ret = 0;
+ u32 result;
+
+ if (table == RIO_GLOBAL_TABLE) {
+ rio_mport_write_config_32(mport, destid, hopcount,
+ SPBC_ROUTE_CFG_DESTID, route_destid);
+ rio_mport_read_config_32(mport, destid, hopcount,
+ SPBC_ROUTE_CFG_PORT, &result);
+ } else {
+ rio_mport_write_config_32(mport, destid, hopcount,
+ SPP_ROUTE_CFG_DESTID(table),
+ route_destid);
+ rio_mport_read_config_32(mport, destid, hopcount,
+ SPP_ROUTE_CFG_PORT(table), &result);
+ }
+
+ *route_port = result;
+ if (*route_port > 15)
+ ret = -1;
+
+ return ret;
+}
+
+static int
+tsi568_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u16 table)
+{
+ u32 route_idx;
+ u32 lut_size;
+
+ lut_size = (mport->sys_size) ? 0x1ff : 0xff;
+
+ if (table == RIO_GLOBAL_TABLE) {
+ rio_mport_write_config_32(mport, destid, hopcount,
+ SPBC_ROUTE_CFG_DESTID, 0x80000000);
+ for (route_idx = 0; route_idx <= lut_size; route_idx++)
+ rio_mport_write_config_32(mport, destid, hopcount,
+ SPBC_ROUTE_CFG_PORT,
+ RIO_INVALID_ROUTE);
+ } else {
+ rio_mport_write_config_32(mport, destid, hopcount,
+ SPP_ROUTE_CFG_DESTID(table),
+ 0x80000000);
+ for (route_idx = 0; route_idx <= lut_size; route_idx++)
+ rio_mport_write_config_32(mport, destid, hopcount,
+ SPP_ROUTE_CFG_PORT(table),
+ RIO_INVALID_ROUTE);
+ }
+
+ return 0;
+}
+
+static int
+tsi568_em_init(struct rio_dev *rdev)
+{
+ struct rio_mport *mport = rdev->net->hport;
+ u16 destid = rdev->rswitch->destid;
+ u8 hopcount = rdev->rswitch->hopcount;
+ u32 regval;
+
+ pr_debug("TSI568 %s [%d:%d]\n", __func__, destid, hopcount);
+
+ /* Make sure that Port-Writes are disabled (for all ports) */
+ rio_mport_read_config_32(mport, destid, hopcount,
+ TSI568_SP_MODE_BC, &regval);
+ rio_mport_write_config_32(mport, destid, hopcount,
+ TSI568_SP_MODE_BC, regval | TSI568_SP_MODE_PW_DIS);
+
+ return 0;
+}
+
+static int tsi568_switch_init(struct rio_dev *rdev, int do_enum)
+{
+ pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
+ rdev->rswitch->add_entry = tsi568_route_add_entry;
+ rdev->rswitch->get_entry = tsi568_route_get_entry;
+ rdev->rswitch->clr_table = tsi568_route_clr_table;
+ rdev->rswitch->set_domain = NULL;
+ rdev->rswitch->get_domain = NULL;
+ rdev->rswitch->em_init = tsi568_em_init;
+ rdev->rswitch->em_handle = NULL;
+
+ return 0;
+}
+
+DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI568, tsi568_switch_init);
diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c
new file mode 100644
index 00000000000..d34df722d95
--- /dev/null
+++ b/drivers/rapidio/switches/tsi57x.c
@@ -0,0 +1,315 @@
+/*
+ * RapidIO Tsi57x switch family support
+ *
+ * Copyright 2009-2010 Integrated Device Technology, Inc.
+ * Alexandre Bounine <alexandre.bounine@idt.com>
+ * - Added EM support
+ * - Modified switch operations initialization.
+ *
+ * Copyright 2005 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/rio_ids.h>
+#include <linux/delay.h>
+#include "../rio.h"
+
+/* Global (broadcast) route registers */
+#define SPBC_ROUTE_CFG_DESTID 0x10070
+#define SPBC_ROUTE_CFG_PORT 0x10074
+
+/* Per port route registers */
+#define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n)
+#define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n)
+
+#define TSI578_SP_MODE(n) (0x11004 + n*0x100)
+#define TSI578_SP_MODE_GLBL 0x10004
+#define TSI578_SP_MODE_PW_DIS 0x08000000
+#define TSI578_SP_MODE_LUT_512 0x01000000
+
+#define TSI578_SP_CTL_INDEP(n) (0x13004 + n*0x100)
+#define TSI578_SP_LUT_PEINF(n) (0x13010 + n*0x100)
+#define TSI578_SP_CS_TX(n) (0x13014 + n*0x100)
+#define TSI578_SP_INT_STATUS(n) (0x13018 + n*0x100)
+
+#define TSI578_GLBL_ROUTE_BASE 0x10078
+
+static int
+tsi57x_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u16 table, u16 route_destid, u8 route_port)
+{
+ if (table == RIO_GLOBAL_TABLE) {
+ rio_mport_write_config_32(mport, destid, hopcount,
+ SPBC_ROUTE_CFG_DESTID, route_destid);
+ rio_mport_write_config_32(mport, destid, hopcount,
+ SPBC_ROUTE_CFG_PORT, route_port);
+ } else {
+ rio_mport_write_config_32(mport, destid, hopcount,
+ SPP_ROUTE_CFG_DESTID(table), route_destid);
+ rio_mport_write_config_32(mport, destid, hopcount,
+ SPP_ROUTE_CFG_PORT(table), route_port);
+ }
+
+ udelay(10);
+
+ return 0;
+}
+
+static int
+tsi57x_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u16 table, u16 route_destid, u8 *route_port)
+{
+ int ret = 0;
+ u32 result;
+
+ if (table == RIO_GLOBAL_TABLE) {
+ /* Use local RT of the ingress port to avoid possible
+ race condition */
+ rio_mport_read_config_32(mport, destid, hopcount,
+ RIO_SWP_INFO_CAR, &result);
+ table = (result & RIO_SWP_INFO_PORT_NUM_MASK);
+ }
+
+ rio_mport_write_config_32(mport, destid, hopcount,
+ SPP_ROUTE_CFG_DESTID(table), route_destid);
+ rio_mport_read_config_32(mport, destid, hopcount,
+ SPP_ROUTE_CFG_PORT(table), &result);
+
+ *route_port = (u8)result;
+ if (*route_port > 15)
+ ret = -1;
+
+ return ret;
+}
+
+static int
+tsi57x_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u16 table)
+{
+ u32 route_idx;
+ u32 lut_size;
+
+ lut_size = (mport->sys_size) ? 0x1ff : 0xff;
+
+ if (table == RIO_GLOBAL_TABLE) {
+ rio_mport_write_config_32(mport, destid, hopcount,
+ SPBC_ROUTE_CFG_DESTID, 0x80000000);
+ for (route_idx = 0; route_idx <= lut_size; route_idx++)
+ rio_mport_write_config_32(mport, destid, hopcount,
+ SPBC_ROUTE_CFG_PORT,
+ RIO_INVALID_ROUTE);
+ } else {
+ rio_mport_write_config_32(mport, destid, hopcount,
+ SPP_ROUTE_CFG_DESTID(table), 0x80000000);
+ for (route_idx = 0; route_idx <= lut_size; route_idx++)
+ rio_mport_write_config_32(mport, destid, hopcount,
+ SPP_ROUTE_CFG_PORT(table) , RIO_INVALID_ROUTE);
+ }
+
+ return 0;
+}
+
+static int
+tsi57x_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u8 sw_domain)
+{
+ u32 regval;
+
+ /*
+ * Switch domain configuration operates only at global level
+ */
+
+ /* Turn off flat (LUT_512) mode */
+ rio_mport_read_config_32(mport, destid, hopcount,
+ TSI578_SP_MODE_GLBL, &regval);
+ rio_mport_write_config_32(mport, destid, hopcount, TSI578_SP_MODE_GLBL,
+ regval & ~TSI578_SP_MODE_LUT_512);
+ /* Set switch domain base */
+ rio_mport_write_config_32(mport, destid, hopcount,
+ TSI578_GLBL_ROUTE_BASE,
+ (u32)(sw_domain << 24));
+ return 0;
+}
+
+static int
+tsi57x_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u8 *sw_domain)
+{
+ u32 regval;
+
+ /*
+ * Switch domain configuration operates only at global level
+ */
+ rio_mport_read_config_32(mport, destid, hopcount,
+ TSI578_GLBL_ROUTE_BASE, &regval);
+
+ *sw_domain = (u8)(regval >> 24);
+
+ return 0;
+}
+
+static int
+tsi57x_em_init(struct rio_dev *rdev)
+{
+ struct rio_mport *mport = rdev->net->hport;
+ u16 destid = rdev->rswitch->destid;
+ u8 hopcount = rdev->rswitch->hopcount;
+ u32 regval;
+ int portnum;
+
+ pr_debug("TSI578 %s [%d:%d]\n", __func__, destid, hopcount);
+
+ for (portnum = 0; portnum < 16; portnum++) {
+ /* Make sure that Port-Writes are enabled (for all ports) */
+ rio_mport_read_config_32(mport, destid, hopcount,
+ TSI578_SP_MODE(portnum), &regval);
+ rio_mport_write_config_32(mport, destid, hopcount,
+ TSI578_SP_MODE(portnum),
+ regval & ~TSI578_SP_MODE_PW_DIS);
+
+ /* Clear all pending interrupts */
+ rio_mport_read_config_32(mport, destid, hopcount,
+ rdev->phys_efptr +
+ RIO_PORT_N_ERR_STS_CSR(portnum),
+ &regval);
+ rio_mport_write_config_32(mport, destid, hopcount,
+ rdev->phys_efptr +
+ RIO_PORT_N_ERR_STS_CSR(portnum),
+ regval & 0x07120214);
+
+ rio_mport_read_config_32(mport, destid, hopcount,
+ TSI578_SP_INT_STATUS(portnum), &regval);
+ rio_mport_write_config_32(mport, destid, hopcount,
+ TSI578_SP_INT_STATUS(portnum),
+ regval & 0x000700bd);
+
+ /* Enable all interrupts to allow ports to send a port-write */
+ rio_mport_read_config_32(mport, destid, hopcount,
+ TSI578_SP_CTL_INDEP(portnum), &regval);
+ rio_mport_write_config_32(mport, destid, hopcount,
+ TSI578_SP_CTL_INDEP(portnum),
+ regval | 0x000b0000);
+
+ /* Skip next (odd) port if the current port is in x4 mode */
+ rio_mport_read_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
+ &regval);
+ if ((regval & RIO_PORT_N_CTL_PWIDTH) == RIO_PORT_N_CTL_PWIDTH_4)
+ portnum++;
+ }
+
+ return 0;
+}
+
+static int
+tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)
+{
+ struct rio_mport *mport = rdev->net->hport;
+ u16 destid = rdev->rswitch->destid;
+ u8 hopcount = rdev->rswitch->hopcount;
+ u32 intstat, err_status;
+ int sendcount, checkcount;
+ u8 route_port;
+ u32 regval;
+
+ rio_mport_read_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
+ &err_status);
+
+ if ((err_status & RIO_PORT_N_ERR_STS_PORT_OK) &&
+ (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
+ RIO_PORT_N_ERR_STS_PW_INP_ES))) {
+ /* Remove any queued packets by locking/unlocking port */
+ rio_mport_read_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
+ &regval);
+ if (!(regval & RIO_PORT_N_CTL_LOCKOUT)) {
+ rio_mport_write_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
+ regval | RIO_PORT_N_CTL_LOCKOUT);
+ udelay(50);
+ rio_mport_write_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
+ regval);
+ }
+
+ /* Read from link maintenance response register to clear
+ * valid bit
+ */
+ rio_mport_read_config_32(mport, destid, hopcount,
+ rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(portnum),
+ &regval);
+
+ /* Send a Packet-Not-Accepted/Link-Request-Input-Status control
+ * symbol to recover from IES/OES
+ */
+ sendcount = 3;
+ while (sendcount) {
+ rio_mport_write_config_32(mport, destid, hopcount,
+ TSI578_SP_CS_TX(portnum), 0x40fc8000);
+ checkcount = 3;
+ while (checkcount--) {
+ udelay(50);
+ rio_mport_read_config_32(
+ mport, destid, hopcount,
+ rdev->phys_efptr +
+ RIO_PORT_N_MNT_RSP_CSR(portnum),
+ &regval);
+ if (regval & RIO_PORT_N_MNT_RSP_RVAL)
+ goto exit_es;
+ }
+
+ sendcount--;
+ }
+ }
+
+exit_es:
+ /* Clear implementation specific error status bits */
+ rio_mport_read_config_32(mport, destid, hopcount,
+ TSI578_SP_INT_STATUS(portnum), &intstat);
+ pr_debug("TSI578[%x:%x] SP%d_INT_STATUS=0x%08x\n",
+ destid, hopcount, portnum, intstat);
+
+ if (intstat & 0x10000) {
+ rio_mport_read_config_32(mport, destid, hopcount,
+ TSI578_SP_LUT_PEINF(portnum), &regval);
+ regval = (mport->sys_size) ? (regval >> 16) : (regval >> 24);
+ route_port = rdev->rswitch->route_table[regval];
+ pr_debug("RIO: TSI578[%s] P%d LUT Parity Error (destID=%d)\n",
+ rio_name(rdev), portnum, regval);
+ tsi57x_route_add_entry(mport, destid, hopcount,
+ RIO_GLOBAL_TABLE, regval, route_port);
+ }
+
+ rio_mport_write_config_32(mport, destid, hopcount,
+ TSI578_SP_INT_STATUS(portnum),
+ intstat & 0x000700bd);
+
+ return 0;
+}
+
+static int tsi57x_switch_init(struct rio_dev *rdev, int do_enum)
+{
+ pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
+ rdev->rswitch->add_entry = tsi57x_route_add_entry;
+ rdev->rswitch->get_entry = tsi57x_route_get_entry;
+ rdev->rswitch->clr_table = tsi57x_route_clr_table;
+ rdev->rswitch->set_domain = tsi57x_set_domain;
+ rdev->rswitch->get_domain = tsi57x_get_domain;
+ rdev->rswitch->em_init = tsi57x_em_init;
+ rdev->rswitch->em_handle = tsi57x_em_handler;
+
+ return 0;
+}
+
+DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI572, tsi57x_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI574, tsi57x_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI577, tsi57x_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI578, tsi57x_switch_init);
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 5fb83e2ced2..7d149a8d8d9 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -23,9 +23,9 @@ struct pm8607_regulator_info {
struct regulator_dev *regulator;
struct i2c_client *i2c;
- int min_uV;
- int max_uV;
- int step_uV;
+ unsigned int *vol_table;
+ unsigned int *vol_suspend;
+
int vol_reg;
int vol_shift;
int vol_nbits;
@@ -36,83 +36,189 @@ struct pm8607_regulator_info {
int slope_double;
};
-static inline int check_range(struct pm8607_regulator_info *info,
- int min_uV, int max_uV)
-{
- if (max_uV < info->min_uV || min_uV > info->max_uV || min_uV > max_uV)
- return -EINVAL;
+static const unsigned int BUCK1_table[] = {
+ 725000, 750000, 775000, 800000, 825000, 850000, 875000, 900000,
+ 925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, 1100000,
+ 1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000, 1300000,
+ 1325000, 1350000, 1375000, 1400000, 1425000, 1450000, 1475000, 1500000,
+ 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000,
+ 200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000,
+ 400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000,
+ 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000,
+};
- return 0;
-}
+static const unsigned int BUCK1_suspend_table[] = {
+ 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000,
+ 200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000,
+ 400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000,
+ 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000,
+ 800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000,
+ 1000000, 1025000, 1050000, 1075000, 1100000, 1125000, 1150000, 1175000,
+ 1200000, 1225000, 1250000, 1275000, 1300000, 1325000, 1350000, 1375000,
+ 1400000, 1425000, 1450000, 1475000, 1500000, 1500000, 1500000, 1500000,
+};
+
+static const unsigned int BUCK2_table[] = {
+ 0, 50000, 100000, 150000, 200000, 250000, 300000, 350000,
+ 400000, 450000, 500000, 550000, 600000, 650000, 700000, 750000,
+ 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000, 1150000,
+ 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000, 1550000,
+ 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000, 1950000,
+ 2000000, 2050000, 2100000, 2150000, 2200000, 2250000, 2300000, 2350000,
+ 2400000, 2450000, 2500000, 2550000, 2600000, 2650000, 2700000, 2750000,
+ 2800000, 2850000, 2900000, 2950000, 3000000, 3000000, 3000000, 3000000,
+};
+
+static const unsigned int BUCK2_suspend_table[] = {
+ 0, 50000, 100000, 150000, 200000, 250000, 300000, 350000,
+ 400000, 450000, 500000, 550000, 600000, 650000, 700000, 750000,
+ 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000, 1150000,
+ 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000, 1550000,
+ 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000, 1950000,
+ 2000000, 2050000, 2100000, 2150000, 2200000, 2250000, 2300000, 2350000,
+ 2400000, 2450000, 2500000, 2550000, 2600000, 2650000, 2700000, 2750000,
+ 2800000, 2850000, 2900000, 2950000, 3000000, 3000000, 3000000, 3000000,
+};
+
+static const unsigned int BUCK3_table[] = {
+ 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000,
+ 200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000,
+ 400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000,
+ 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000,
+ 800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000,
+ 1000000, 1025000, 1050000, 1075000, 1100000, 1125000, 1150000, 1175000,
+ 1200000, 1225000, 1250000, 1275000, 1300000, 1325000, 1350000, 1375000,
+ 1400000, 1425000, 1450000, 1475000, 1500000, 1500000, 1500000, 1500000,
+};
+
+static const unsigned int BUCK3_suspend_table[] = {
+ 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000,
+ 200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000,
+ 400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000,
+ 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000,
+ 800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000,
+ 1000000, 1025000, 1050000, 1075000, 1100000, 1125000, 1150000, 1175000,
+ 1200000, 1225000, 1250000, 1275000, 1300000, 1325000, 1350000, 1375000,
+ 1400000, 1425000, 1450000, 1475000, 1500000, 1500000, 1500000, 1500000,
+};
+
+static const unsigned int LDO1_table[] = {
+ 1800000, 1200000, 2800000, 0,
+};
+
+static const unsigned int LDO1_suspend_table[] = {
+ 1800000, 1200000, 0, 0,
+};
+
+static const unsigned int LDO2_table[] = {
+ 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 3300000,
+};
+
+static const unsigned int LDO2_suspend_table[] = {
+ 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
+};
+
+static const unsigned int LDO3_table[] = {
+ 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 3300000,
+};
+
+static const unsigned int LDO3_suspend_table[] = {
+ 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
+};
+
+static const unsigned int LDO4_table[] = {
+ 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2900000, 3300000,
+};
+
+static const unsigned int LDO4_suspend_table[] = {
+ 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2900000, 2900000,
+};
+
+static const unsigned int LDO5_table[] = {
+ 2900000, 3000000, 3100000, 3300000,
+};
+
+static const unsigned int LDO5_suspend_table[] = {
+ 2900000, 0, 0, 0,
+};
+
+static const unsigned int LDO6_table[] = {
+ 1800000, 1850000, 2600000, 2650000, 2700000, 2750000, 2800000, 3300000,
+};
+
+static const unsigned int LDO6_suspend_table[] = {
+ 1800000, 1850000, 2600000, 2650000, 2700000, 2750000, 2800000, 2900000,
+};
+
+static const unsigned int LDO7_table[] = {
+ 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
+};
+
+static const unsigned int LDO7_suspend_table[] = {
+ 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
+};
+
+static const unsigned int LDO8_table[] = {
+ 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
+};
+
+static const unsigned int LDO8_suspend_table[] = {
+ 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
+};
+
+static const unsigned int LDO9_table[] = {
+ 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 3300000,
+};
+
+static const unsigned int LDO9_suspend_table[] = {
+ 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
+};
+
+static const unsigned int LDO10_table[] = {
+ 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 3300000,
+ 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000,
+};
+
+static const unsigned int LDO10_suspend_table[] = {
+ 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
+ 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000,
+};
+
+static const unsigned int LDO12_table[] = {
+ 1800000, 1900000, 2700000, 2800000, 2900000, 3000000, 3100000, 3300000,
+ 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000,
+};
+
+static const unsigned int LDO12_suspend_table[] = {
+ 1800000, 1900000, 2700000, 2800000, 2900000, 2900000, 2900000, 2900000,
+ 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000,
+};
+
+static const unsigned int LDO13_table[] = {
+ 1300000, 1800000, 2000000, 2500000, 2800000, 3000000, 0, 0,
+};
+
+static const unsigned int LDO13_suspend_table[] = {
+ 0,
+};
+
+static const unsigned int LDO14_table[] = {
+ 1800000, 1850000, 2700000, 2750000, 2800000, 2850000, 2900000, 3300000,
+};
+
+static const unsigned int LDO14_suspend_table[] = {
+ 1800000, 1850000, 2700000, 2750000, 2800000, 2850000, 2900000, 2900000,
+};
static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
{
struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
int ret = -EINVAL;
- switch (info->desc.id) {
- case PM8607_ID_BUCK1:
- ret = (index < 0x1d) ? (index * 25000 + 800000) :
- ((index < 0x20) ? 1500000 :
- ((index < 0x40) ? ((index - 0x20) * 25000) :
- -EINVAL));
- break;
- case PM8607_ID_BUCK3:
- ret = (index < 0x3d) ? (index * 25000) :
- ((index < 0x40) ? 1500000 : -EINVAL);
- if (ret < 0)
- break;
+ if (info->vol_table && (index < (2 << info->vol_nbits))) {
+ ret = info->vol_table[index];
if (info->slope_double)
ret <<= 1;
- break;
- case PM8607_ID_LDO1:
- ret = (index == 0) ? 1800000 :
- ((index == 1) ? 1200000 :
- ((index == 2) ? 2800000 : -EINVAL));
- break;
- case PM8607_ID_LDO5:
- ret = (index == 0) ? 2900000 :
- ((index == 1) ? 3000000 :
- ((index == 2) ? 3100000 : 3300000));
- break;
- case PM8607_ID_LDO7:
- case PM8607_ID_LDO8:
- ret = (index < 3) ? (index * 50000 + 1800000) :
- ((index < 8) ? (index * 50000 + 2550000) :
- -EINVAL);
- break;
- case PM8607_ID_LDO12:
- ret = (index < 2) ? (index * 100000 + 1800000) :
- ((index < 7) ? (index * 100000 + 2500000) :
- ((index == 7) ? 3300000 : 1200000));
- break;
- case PM8607_ID_LDO2:
- case PM8607_ID_LDO3:
- case PM8607_ID_LDO9:
- ret = (index < 3) ? (index * 50000 + 1800000) :
- ((index < 7) ? (index * 50000 + 2550000) :
- 3300000);
- break;
- case PM8607_ID_LDO4:
- ret = (index < 3) ? (index * 50000 + 1800000) :
- ((index < 6) ? (index * 50000 + 2550000) :
- ((index == 6) ? 2900000 : 3300000));
- break;
- case PM8607_ID_LDO6:
- ret = (index < 2) ? (index * 50000 + 1800000) :
- ((index < 7) ? (index * 50000 + 2500000) :
- 3300000);
- break;
- case PM8607_ID_LDO10:
- ret = (index < 3) ? (index * 50000 + 1800000) :
- ((index < 7) ? (index * 50000 + 2550000) :
- ((index == 7) ? 3300000 : 1200000));
- break;
- case PM8607_ID_LDO14:
- ret = (index < 2) ? (index * 50000 + 1800000) :
- ((index < 7) ? (index * 50000 + 2600000) :
- 3300000);
- break;
}
return ret;
}
@@ -120,174 +226,26 @@ static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
static int choose_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
{
struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
- int val = -ENOENT;
- int ret;
+ int i, ret = -ENOENT;
- switch (info->desc.id) {
- case PM8607_ID_BUCK1:
- if (min_uV >= 800000) /* 800mV ~ 1500mV / 25mV */
- val = (min_uV - 775001) / 25000;
- else { /* 25mV ~ 775mV / 25mV */
- val = (min_uV + 249999) / 25000;
- val += 32;
- }
- break;
- case PM8607_ID_BUCK3:
- if (info->slope_double)
- min_uV = min_uV >> 1;
- val = (min_uV + 249999) / 25000; /* 0mV ~ 1500mV / 25mV */
-
- break;
- case PM8607_ID_LDO1:
- if (min_uV > 1800000)
- val = 2;
- else if (min_uV > 1200000)
- val = 0;
- else
- val = 1;
- break;
- case PM8607_ID_LDO5:
- if (min_uV > 3100000)
- val = 3;
- else /* 2900mV ~ 3100mV / 100mV */
- val = (min_uV - 2800001) / 100000;
- break;
- case PM8607_ID_LDO7:
- case PM8607_ID_LDO8:
- if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
- if (min_uV <= 1800000)
- val = 0; /* 1800mv */
- else if (min_uV <= 1900000)
- val = (min_uV - 1750001) / 50000;
- else
- val = 3; /* 2700mV */
- } else { /* 2700mV ~ 2900mV / 50mV */
- if (min_uV <= 2900000) {
- val = (min_uV - 2650001) / 50000;
- val += 3;
- } else
- val = -EINVAL;
- }
- break;
- case PM8607_ID_LDO10:
- if (min_uV > 2850000)
- val = 7;
- else if (min_uV <= 1200000)
- val = 8;
- else if (min_uV < 2700000) /* 1800mV ~ 1900mV / 50mV */
- val = (min_uV - 1750001) / 50000;
- else { /* 2700mV ~ 2850mV / 50mV */
- val = (min_uV - 2650001) / 50000;
- val += 3;
- }
- break;
- case PM8607_ID_LDO12:
- if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 100mV */
- if (min_uV <= 1200000)
- val = 8; /* 1200mV */
- else if (min_uV <= 1800000)
- val = 0; /* 1800mV */
- else if (min_uV <= 1900000)
- val = (min_uV - 1700001) / 100000;
- else
- val = 2; /* 2700mV */
- } else { /* 2700mV ~ 3100mV / 100mV */
- if (min_uV <= 3100000) {
- val = (min_uV - 2600001) / 100000;
- val += 2;
- } else if (min_uV <= 3300000)
- val = 7;
- else
- val = -EINVAL;
- }
- break;
- case PM8607_ID_LDO2:
- case PM8607_ID_LDO3:
- case PM8607_ID_LDO9:
- if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
- if (min_uV <= 1800000)
- val = 0;
- else if (min_uV <= 1900000)
- val = (min_uV - 1750001) / 50000;
- else
- val = 3; /* 2700mV */
- } else { /* 2700mV ~ 2850mV / 50mV */
- if (min_uV <= 2850000) {
- val = (min_uV - 2650001) / 50000;
- val += 3;
- } else if (min_uV <= 3300000)
- val = 7;
- else
- val = -EINVAL;
- }
- break;
- case PM8607_ID_LDO4:
- if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
- if (min_uV <= 1800000)
- val = 0;
- else if (min_uV <= 1900000)
- val = (min_uV - 1750001) / 50000;
- else
- val = 3; /* 2700mV */
- } else { /* 2700mV ~ 2800mV / 50mV */
- if (min_uV <= 2850000) {
- val = (min_uV - 2650001) / 50000;
- val += 3;
- } else if (min_uV <= 2900000)
- val = 6;
- else if (min_uV <= 3300000)
- val = 7;
- else
- val = -EINVAL;
- }
- break;
- case PM8607_ID_LDO6:
- if (min_uV < 2600000) { /* 1800mV ~ 1850mV / 50mV */
- if (min_uV <= 1800000)
- val = 0;
- else if (min_uV <= 1850000)
- val = (min_uV - 1750001) / 50000;
- else
- val = 2; /* 2600mV */
- } else { /* 2600mV ~ 2800mV / 50mV */
- if (min_uV <= 2800000) {
- val = (min_uV - 2550001) / 50000;
- val += 2;
- } else if (min_uV <= 3300000)
- val = 7;
- else
- val = -EINVAL;
- }
- break;
- case PM8607_ID_LDO14:
- if (min_uV < 2700000) { /* 1800mV ~ 1850mV / 50mV */
- if (min_uV <= 1800000)
- val = 0;
- else if (min_uV <= 1850000)
- val = (min_uV - 1750001) / 50000;
- else
- val = 2; /* 2700mV */
- } else { /* 2700mV ~ 2900mV / 50mV */
- if (min_uV <= 2900000) {
- val = (min_uV - 2650001) / 50000;
- val += 2;
- } else if (min_uV <= 3300000)
- val = 7;
- else
- val = -EINVAL;
- }
- break;
+ if (info->slope_double) {
+ min_uV = min_uV >> 1;
+ max_uV = max_uV >> 1;
}
- if (val >= 0) {
- ret = pm8607_list_voltage(rdev, val);
- if (ret > max_uV) {
- pr_err("exceed voltage range (%d %d) uV",
- min_uV, max_uV);
- return -EINVAL;
+ if (info->vol_table) {
+ for (i = 0; i < (2 << info->vol_nbits); i++) {
+ if (!info->vol_table[i])
+ break;
+ if ((min_uV <= info->vol_table[i])
+ && (max_uV >= info->vol_table[i])) {
+ ret = i;
+ break;
+ }
}
- } else
- pr_err("invalid voltage range (%d %d) uV", min_uV, max_uV);
- return val;
+ }
+ if (ret < 0)
+ pr_err("invalid voltage range (%d %d) uV\n", min_uV, max_uV);
+ return ret;
}
static int pm8607_set_voltage(struct regulator_dev *rdev,
@@ -297,7 +255,7 @@ static int pm8607_set_voltage(struct regulator_dev *rdev,
uint8_t val, mask;
int ret;
- if (check_range(info, min_uV, max_uV)) {
+ if (min_uV > max_uV) {
pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV);
return -EINVAL;
}
@@ -375,18 +333,15 @@ static struct regulator_ops pm8607_regulator_ops = {
.is_enabled = pm8607_is_enabled,
};
-#define PM8607_DVC(_id, min, max, step, vreg, nbits, ureg, ubit, ereg, ebit) \
+#define PM8607_DVC(vreg, nbits, ureg, ubit, ereg, ebit) \
{ \
.desc = { \
- .name = "BUCK" #_id, \
+ .name = #vreg, \
.ops = &pm8607_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
- .id = PM8607_ID_BUCK##_id, \
+ .id = PM8607_ID_##vreg, \
.owner = THIS_MODULE, \
}, \
- .min_uV = (min) * 1000, \
- .max_uV = (max) * 1000, \
- .step_uV = (step) * 1000, \
.vol_reg = PM8607_##vreg, \
.vol_shift = (0), \
.vol_nbits = (nbits), \
@@ -395,9 +350,11 @@ static struct regulator_ops pm8607_regulator_ops = {
.enable_reg = PM8607_##ereg, \
.enable_bit = (ebit), \
.slope_double = (0), \
+ .vol_table = (unsigned int *)&vreg##_table, \
+ .vol_suspend = (unsigned int *)&vreg##_suspend_table, \
}
-#define PM8607_LDO(_id, min, max, step, vreg, shift, nbits, ereg, ebit) \
+#define PM8607_LDO(_id, vreg, shift, nbits, ereg, ebit) \
{ \
.desc = { \
.name = "LDO" #_id, \
@@ -406,33 +363,34 @@ static struct regulator_ops pm8607_regulator_ops = {
.id = PM8607_ID_LDO##_id, \
.owner = THIS_MODULE, \
}, \
- .min_uV = (min) * 1000, \
- .max_uV = (max) * 1000, \
- .step_uV = (step) * 1000, \
.vol_reg = PM8607_##vreg, \
.vol_shift = (shift), \
.vol_nbits = (nbits), \
.enable_reg = PM8607_##ereg, \
.enable_bit = (ebit), \
.slope_double = (0), \
+ .vol_table = (unsigned int *)&LDO##_id##_table, \
+ .vol_suspend = (unsigned int *)&LDO##_id##_suspend_table, \
}
static struct pm8607_regulator_info pm8607_regulator_info[] = {
- PM8607_DVC(1, 0, 1500, 25, BUCK1, 6, GO, 0, SUPPLIES_EN11, 0),
- PM8607_DVC(3, 0, 1500, 25, BUCK3, 6, GO, 2, SUPPLIES_EN11, 2),
-
- PM8607_LDO(1 , 1200, 2800, 0, LDO1 , 0, 2, SUPPLIES_EN11, 3),
- PM8607_LDO(2 , 1800, 3300, 0, LDO2 , 0, 3, SUPPLIES_EN11, 4),
- PM8607_LDO(3 , 1800, 3300, 0, LDO3 , 0, 3, SUPPLIES_EN11, 5),
- PM8607_LDO(4 , 1800, 3300, 0, LDO4 , 0, 3, SUPPLIES_EN11, 6),
- PM8607_LDO(5 , 2900, 3300, 0, LDO5 , 0, 2, SUPPLIES_EN11, 7),
- PM8607_LDO(6 , 1800, 3300, 0, LDO6 , 0, 3, SUPPLIES_EN12, 0),
- PM8607_LDO(7 , 1800, 2900, 0, LDO7 , 0, 3, SUPPLIES_EN12, 1),
- PM8607_LDO(8 , 1800, 2900, 0, LDO8 , 0, 3, SUPPLIES_EN12, 2),
- PM8607_LDO(9 , 1800, 3300, 0, LDO9 , 0, 3, SUPPLIES_EN12, 3),
- PM8607_LDO(10, 1200, 3300, 0, LDO10, 0, 4, SUPPLIES_EN11, 4),
- PM8607_LDO(12, 1200, 3300, 0, LDO12, 0, 4, SUPPLIES_EN11, 5),
- PM8607_LDO(14, 1800, 3300, 0, LDO14, 0, 3, SUPPLIES_EN11, 6),
+ PM8607_DVC(BUCK1, 6, GO, 0, SUPPLIES_EN11, 0),
+ PM8607_DVC(BUCK2, 6, GO, 1, SUPPLIES_EN11, 1),
+ PM8607_DVC(BUCK3, 6, GO, 2, SUPPLIES_EN11, 2),
+
+ PM8607_LDO( 1, LDO1, 0, 2, SUPPLIES_EN11, 3),
+ PM8607_LDO( 2, LDO2, 0, 3, SUPPLIES_EN11, 4),
+ PM8607_LDO( 3, LDO3, 0, 3, SUPPLIES_EN11, 5),
+ PM8607_LDO( 4, LDO4, 0, 3, SUPPLIES_EN11, 6),
+ PM8607_LDO( 5, LDO5, 0, 2, SUPPLIES_EN11, 7),
+ PM8607_LDO( 6, LDO6, 0, 3, SUPPLIES_EN12, 0),
+ PM8607_LDO( 7, LDO7, 0, 3, SUPPLIES_EN12, 1),
+ PM8607_LDO( 8, LDO8, 0, 3, SUPPLIES_EN12, 2),
+ PM8607_LDO( 9, LDO9, 0, 3, SUPPLIES_EN12, 3),
+ PM8607_LDO(10, LDO10, 0, 3, SUPPLIES_EN12, 4),
+ PM8607_LDO(12, LDO12, 0, 4, SUPPLIES_EN12, 5),
+ PM8607_LDO(13, VIBRATOR_SET, 1, 3, VIBRATOR_SET, 0),
+ PM8607_LDO(14, LDO14, 0, 4, SUPPLIES_EN12, 6),
};
static inline struct pm8607_regulator_info *find_regulator_info(int id)
@@ -484,60 +442,29 @@ static int __devexit pm8607_regulator_remove(struct platform_device *pdev)
{
struct pm8607_regulator_info *info = platform_get_drvdata(pdev);
+ platform_set_drvdata(pdev, NULL);
regulator_unregister(info->regulator);
return 0;
}
-#define PM8607_REGULATOR_DRIVER(_name) \
-{ \
- .driver = { \
- .name = "88pm8607-" #_name, \
- .owner = THIS_MODULE, \
- }, \
- .probe = pm8607_regulator_probe, \
- .remove = __devexit_p(pm8607_regulator_remove), \
-}
-
-static struct platform_driver pm8607_regulator_driver[] = {
- PM8607_REGULATOR_DRIVER(buck1),
- PM8607_REGULATOR_DRIVER(buck2),
- PM8607_REGULATOR_DRIVER(buck3),
- PM8607_REGULATOR_DRIVER(ldo1),
- PM8607_REGULATOR_DRIVER(ldo2),
- PM8607_REGULATOR_DRIVER(ldo3),
- PM8607_REGULATOR_DRIVER(ldo4),
- PM8607_REGULATOR_DRIVER(ldo5),
- PM8607_REGULATOR_DRIVER(ldo6),
- PM8607_REGULATOR_DRIVER(ldo7),
- PM8607_REGULATOR_DRIVER(ldo8),
- PM8607_REGULATOR_DRIVER(ldo9),
- PM8607_REGULATOR_DRIVER(ldo10),
- PM8607_REGULATOR_DRIVER(ldo12),
- PM8607_REGULATOR_DRIVER(ldo14),
+static struct platform_driver pm8607_regulator_driver = {
+ .driver = {
+ .name = "88pm860x-regulator",
+ .owner = THIS_MODULE,
+ },
+ .probe = pm8607_regulator_probe,
+ .remove = __devexit_p(pm8607_regulator_remove),
};
static int __init pm8607_regulator_init(void)
{
- int i, count, ret;
-
- count = ARRAY_SIZE(pm8607_regulator_driver);
- for (i = 0; i < count; i++) {
- ret = platform_driver_register(&pm8607_regulator_driver[i]);
- if (ret != 0)
- pr_err("Failed to register regulator driver: %d\n",
- ret);
- }
- return 0;
+ return platform_driver_register(&pm8607_regulator_driver);
}
subsys_initcall(pm8607_regulator_init);
static void __exit pm8607_regulator_exit(void)
{
- int i, count;
-
- count = ARRAY_SIZE(pm8607_regulator_driver);
- for (i = 0; i < count; i++)
- platform_driver_unregister(&pm8607_regulator_driver[i]);
+ platform_driver_unregister(&pm8607_regulator_driver);
}
module_exit(pm8607_regulator_exit);
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 7de950959ed..7b14a67bdca 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -16,7 +16,7 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
-#include <linux/mfd/ab3100.h>
+#include <linux/mfd/abx500.h>
/* LDO registers and some handy masking definitions for AB3100 */
#define AB3100_LDO_A 0x40
@@ -41,7 +41,7 @@
* struct ab3100_regulator
* A struct passed around the individual regulator functions
* @platform_device: platform device holding this regulator
- * @ab3100: handle to the AB3100 parent chip
+ * @dev: handle to the device
* @plfdata: AB3100 platform data passed in at probe time
* @regreg: regulator register number in the AB3100
* @fixed_voltage: a fixed voltage for this regulator, if this
@@ -52,7 +52,7 @@
*/
struct ab3100_regulator {
struct regulator_dev *rdev;
- struct ab3100 *ab3100;
+ struct device *dev;
struct ab3100_platform_data *plfdata;
u8 regreg;
int fixed_voltage;
@@ -183,7 +183,7 @@ static int ab3100_enable_regulator(struct regulator_dev *reg)
int err;
u8 regval;
- err = ab3100_get_register_interruptible(abreg->ab3100, abreg->regreg,
+ err = abx500_get_register_interruptible(abreg->dev, 0, abreg->regreg,
&regval);
if (err) {
dev_warn(&reg->dev, "failed to get regid %d value\n",
@@ -197,7 +197,7 @@ static int ab3100_enable_regulator(struct regulator_dev *reg)
regval |= AB3100_REG_ON_MASK;
- err = ab3100_set_register_interruptible(abreg->ab3100, abreg->regreg,
+ err = abx500_set_register_interruptible(abreg->dev, 0, abreg->regreg,
regval);
if (err) {
dev_warn(&reg->dev, "failed to set regid %d value\n",
@@ -245,14 +245,14 @@ static int ab3100_disable_regulator(struct regulator_dev *reg)
if (abreg->regreg == AB3100_LDO_D) {
dev_info(&reg->dev, "disabling LDO D - shut down system\n");
/* Setting LDO D to 0x00 cuts the power to the SoC */
- return ab3100_set_register_interruptible(abreg->ab3100,
+ return abx500_set_register_interruptible(abreg->dev, 0,
AB3100_LDO_D, 0x00U);
}
/*
* All other regulators are handled here
*/
- err = ab3100_get_register_interruptible(abreg->ab3100, abreg->regreg,
+ err = abx500_get_register_interruptible(abreg->dev, 0, abreg->regreg,
&regval);
if (err) {
dev_err(&reg->dev, "unable to get register 0x%x\n",
@@ -260,7 +260,7 @@ static int ab3100_disable_regulator(struct regulator_dev *reg)
return err;
}
regval &= ~AB3100_REG_ON_MASK;
- return ab3100_set_register_interruptible(abreg->ab3100, abreg->regreg,
+ return abx500_set_register_interruptible(abreg->dev, 0, abreg->regreg,
regval);
}
@@ -270,7 +270,7 @@ static int ab3100_is_enabled_regulator(struct regulator_dev *reg)
u8 regval;
int err;
- err = ab3100_get_register_interruptible(abreg->ab3100, abreg->regreg,
+ err = abx500_get_register_interruptible(abreg->dev, 0, abreg->regreg,
&regval);
if (err) {
dev_err(&reg->dev, "unable to get register 0x%x\n",
@@ -305,7 +305,7 @@ static int ab3100_get_voltage_regulator(struct regulator_dev *reg)
* For variable types, read out setting and index into
* supplied voltage list.
*/
- err = ab3100_get_register_interruptible(abreg->ab3100,
+ err = abx500_get_register_interruptible(abreg->dev, 0,
abreg->regreg, &regval);
if (err) {
dev_warn(&reg->dev,
@@ -373,7 +373,7 @@ static int ab3100_set_voltage_regulator(struct regulator_dev *reg,
if (bestindex < 0)
return bestindex;
- err = ab3100_get_register_interruptible(abreg->ab3100,
+ err = abx500_get_register_interruptible(abreg->dev, 0,
abreg->regreg, &regval);
if (err) {
dev_warn(&reg->dev,
@@ -386,7 +386,7 @@ static int ab3100_set_voltage_regulator(struct regulator_dev *reg,
regval &= ~0xE0;
regval |= (bestindex << 5);
- err = ab3100_set_register_interruptible(abreg->ab3100,
+ err = abx500_set_register_interruptible(abreg->dev, 0,
abreg->regreg, regval);
if (err)
dev_warn(&reg->dev, "failed to set regulator register %02x\n",
@@ -414,7 +414,7 @@ static int ab3100_set_suspend_voltage_regulator(struct regulator_dev *reg,
/* LDO E and BUCK have special suspend voltages you can set */
bestindex = ab3100_get_best_voltage_index(reg, uV, uV);
- err = ab3100_get_register_interruptible(abreg->ab3100,
+ err = abx500_get_register_interruptible(abreg->dev, 0,
targetreg, &regval);
if (err) {
dev_warn(&reg->dev,
@@ -427,7 +427,7 @@ static int ab3100_set_suspend_voltage_regulator(struct regulator_dev *reg,
regval &= ~0xE0;
regval |= (bestindex << 5);
- err = ab3100_set_register_interruptible(abreg->ab3100,
+ err = abx500_set_register_interruptible(abreg->dev, 0,
targetreg, regval);
if (err)
dev_warn(&reg->dev, "failed to set regulator register %02x\n",
@@ -492,18 +492,21 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
.id = AB3100_LDO_A,
.ops = &regulator_ops_fixed,
.type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
},
{
.name = "LDO_C",
.id = AB3100_LDO_C,
.ops = &regulator_ops_fixed,
.type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
},
{
.name = "LDO_D",
.id = AB3100_LDO_D,
.ops = &regulator_ops_fixed,
.type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
},
{
.name = "LDO_E",
@@ -511,6 +514,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
.ops = &regulator_ops_variable_sleepable,
.n_voltages = ARRAY_SIZE(ldo_e_buck_typ_voltages),
.type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
},
{
.name = "LDO_F",
@@ -518,6 +522,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
.ops = &regulator_ops_variable,
.n_voltages = ARRAY_SIZE(ldo_f_typ_voltages),
.type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
},
{
.name = "LDO_G",
@@ -525,6 +530,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
.ops = &regulator_ops_variable,
.n_voltages = ARRAY_SIZE(ldo_g_typ_voltages),
.type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
},
{
.name = "LDO_H",
@@ -532,6 +538,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
.ops = &regulator_ops_variable,
.n_voltages = ARRAY_SIZE(ldo_h_typ_voltages),
.type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
},
{
.name = "LDO_K",
@@ -539,12 +546,14 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
.ops = &regulator_ops_variable,
.n_voltages = ARRAY_SIZE(ldo_k_typ_voltages),
.type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
},
{
.name = "LDO_EXT",
.id = AB3100_LDO_EXT,
.ops = &regulator_ops_external,
.type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
},
{
.name = "BUCK",
@@ -552,6 +561,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
.ops = &regulator_ops_variable_sleepable,
.n_voltages = ARRAY_SIZE(ldo_e_buck_typ_voltages),
.type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
},
};
@@ -564,13 +574,12 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
{
struct ab3100_platform_data *plfdata = pdev->dev.platform_data;
- struct ab3100 *ab3100 = platform_get_drvdata(pdev);
int err = 0;
u8 data;
int i;
/* Check chip state */
- err = ab3100_get_register_interruptible(ab3100,
+ err = abx500_get_register_interruptible(&pdev->dev, 0,
AB3100_LDO_D, &data);
if (err) {
dev_err(&pdev->dev, "could not read initial status of LDO_D\n");
@@ -585,7 +594,7 @@ static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
/* Set up regulators */
for (i = 0; i < ARRAY_SIZE(ab3100_reg_init_order); i++) {
- err = ab3100_set_register_interruptible(ab3100,
+ err = abx500_set_register_interruptible(&pdev->dev, 0,
ab3100_reg_init_order[i],
plfdata->reg_initvals[i]);
if (err) {
@@ -607,7 +616,7 @@ static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
* see what it looks like for a certain machine, go
* into the machine I2C setup.
*/
- reg->ab3100 = ab3100;
+ reg->dev = &pdev->dev;
reg->plfdata = plfdata;
/*
diff --git a/drivers/regulator/bq24022.c b/drivers/regulator/bq24022.c
index d08cd9b66c6..068d488a4f7 100644
--- a/drivers/regulator/bq24022.c
+++ b/drivers/regulator/bq24022.c
@@ -78,6 +78,7 @@ static struct regulator_desc bq24022_desc = {
.name = "bq24022",
.ops = &bq24022_ops,
.type = REGULATOR_CURRENT,
+ .owner = THIS_MODULE,
};
static int __init bq24022_probe(struct platform_device *pdev)
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 2b4e40d3119..2248087b9be 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -944,8 +944,13 @@ static int set_consumer_device_supply(struct regulator_dev *rdev,
has_dev = 0;
list_for_each_entry(node, &regulator_map_list, list) {
- if (consumer_dev_name != node->dev_name)
+ if (node->dev_name && consumer_dev_name) {
+ if (strcmp(node->dev_name, consumer_dev_name) != 0)
+ continue;
+ } else if (node->dev_name || consumer_dev_name) {
continue;
+ }
+
if (strcmp(node->supply, supply) != 0)
continue;
@@ -976,29 +981,6 @@ static int set_consumer_device_supply(struct regulator_dev *rdev,
return 0;
}
-static void unset_consumer_device_supply(struct regulator_dev *rdev,
- const char *consumer_dev_name, struct device *consumer_dev)
-{
- struct regulator_map *node, *n;
-
- if (consumer_dev && !consumer_dev_name)
- consumer_dev_name = dev_name(consumer_dev);
-
- list_for_each_entry_safe(node, n, &regulator_map_list, list) {
- if (rdev != node->regulator)
- continue;
-
- if (consumer_dev_name && node->dev_name &&
- strcmp(consumer_dev_name, node->dev_name))
- continue;
-
- list_del(&node->list);
- kfree(node->dev_name);
- kfree(node);
- return;
- }
-}
-
static void unset_regulator_supplies(struct regulator_dev *rdev)
{
struct regulator_map *node, *n;
@@ -1008,7 +990,6 @@ static void unset_regulator_supplies(struct regulator_dev *rdev)
list_del(&node->list);
kfree(node->dev_name);
kfree(node);
- return;
}
}
}
@@ -1540,7 +1521,7 @@ EXPORT_SYMBOL_GPL(regulator_count_voltages);
* Context: can sleep
*
* Returns a voltage that can be passed to @regulator_set_voltage(),
- * zero if this selector code can't be used on this sytem, or a
+ * zero if this selector code can't be used on this system, or a
* negative errno.
*/
int regulator_list_voltage(struct regulator *regulator, unsigned selector)
@@ -1764,6 +1745,7 @@ int regulator_set_mode(struct regulator *regulator, unsigned int mode)
{
struct regulator_dev *rdev = regulator->rdev;
int ret;
+ int regulator_curr_mode;
mutex_lock(&rdev->mutex);
@@ -1773,6 +1755,15 @@ int regulator_set_mode(struct regulator *regulator, unsigned int mode)
goto out;
}
+ /* return if the same mode is requested */
+ if (rdev->desc->ops->get_mode) {
+ regulator_curr_mode = rdev->desc->ops->get_mode(rdev);
+ if (regulator_curr_mode == mode) {
+ ret = 0;
+ goto out;
+ }
+ }
+
/* constraints check */
ret = regulator_check_mode(rdev, mode);
if (ret < 0)
@@ -2328,7 +2319,37 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
goto scrub;
/* set supply regulator if it exists */
+ if (init_data->supply_regulator && init_data->supply_regulator_dev) {
+ dev_err(dev,
+ "Supply regulator specified by both name and dev\n");
+ goto scrub;
+ }
+
+ if (init_data->supply_regulator) {
+ struct regulator_dev *r;
+ int found = 0;
+
+ list_for_each_entry(r, &regulator_list, list) {
+ if (strcmp(rdev_get_name(r),
+ init_data->supply_regulator) == 0) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ dev_err(dev, "Failed to find supply %s\n",
+ init_data->supply_regulator);
+ goto scrub;
+ }
+
+ ret = set_supply(rdev, r);
+ if (ret < 0)
+ goto scrub;
+ }
+
if (init_data->supply_regulator_dev) {
+ dev_warn(dev, "Uses supply_regulator_dev instead of regulator_supply\n");
ret = set_supply(rdev,
dev_get_drvdata(init_data->supply_regulator_dev));
if (ret < 0)
@@ -2341,13 +2362,8 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
init_data->consumer_supplies[i].dev,
init_data->consumer_supplies[i].dev_name,
init_data->consumer_supplies[i].supply);
- if (ret < 0) {
- for (--i; i >= 0; i--)
- unset_consumer_device_supply(rdev,
- init_data->consumer_supplies[i].dev_name,
- init_data->consumer_supplies[i].dev);
- goto scrub;
- }
+ if (ret < 0)
+ goto unset_supplies;
}
list_add(&rdev->list, &regulator_list);
@@ -2355,6 +2371,9 @@ out:
mutex_unlock(&regulator_list_mutex);
return rdev;
+unset_supplies:
+ unset_regulator_supplies(rdev);
+
scrub:
device_unregister(&rdev->dev);
/* device core frees rdev */
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index ad036dd8da1..4597d508a22 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -440,8 +440,8 @@ static int mc13783_fixed_regulator_set_voltage(struct regulator_dev *rdev,
dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
__func__, id, min_uV, max_uV);
- if (min_uV > mc13783_regulators[id].voltages[0] &&
- max_uV < mc13783_regulators[id].voltages[0])
+ if (min_uV >= mc13783_regulators[id].voltages[0] &&
+ max_uV <= mc13783_regulators[id].voltages[0])
return 0;
else
return -EINVAL;
@@ -649,6 +649,6 @@ static void __exit mc13783_regulator_exit(void)
module_exit(mc13783_regulator_exit);
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de");
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION("Regulator Driver for Freescale MC13783 PMIC");
MODULE_ALIAS("platform:mc13783-regulator");
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index 74841abcc9c..14b4576281c 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -22,68 +22,9 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
-#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/slab.h>
-
-/* Register definitions */
-#define TPS6507X_REG_PPATH1 0X01
-#define TPS6507X_REG_INT 0X02
-#define TPS6507X_REG_CHGCONFIG0 0X03
-#define TPS6507X_REG_CHGCONFIG1 0X04
-#define TPS6507X_REG_CHGCONFIG2 0X05
-#define TPS6507X_REG_CHGCONFIG3 0X06
-#define TPS6507X_REG_REG_ADCONFIG 0X07
-#define TPS6507X_REG_TSCMODE 0X08
-#define TPS6507X_REG_ADRESULT_1 0X09
-#define TPS6507X_REG_ADRESULT_2 0X0A
-#define TPS6507X_REG_PGOOD 0X0B
-#define TPS6507X_REG_PGOODMASK 0X0C
-#define TPS6507X_REG_CON_CTRL1 0X0D
-#define TPS6507X_REG_CON_CTRL2 0X0E
-#define TPS6507X_REG_CON_CTRL3 0X0F
-#define TPS6507X_REG_DEFDCDC1 0X10
-#define TPS6507X_REG_DEFDCDC2_LOW 0X11
-#define TPS6507X_REG_DEFDCDC2_HIGH 0X12
-#define TPS6507X_REG_DEFDCDC3_LOW 0X13
-#define TPS6507X_REG_DEFDCDC3_HIGH 0X14
-#define TPS6507X_REG_DEFSLEW 0X15
-#define TPS6507X_REG_LDO_CTRL1 0X16
-#define TPS6507X_REG_DEFLDO2 0X17
-#define TPS6507X_REG_WLED_CTRL1 0X18
-#define TPS6507X_REG_WLED_CTRL2 0X19
-
-/* CON_CTRL1 bitfields */
-#define TPS6507X_CON_CTRL1_DCDC1_ENABLE BIT(4)
-#define TPS6507X_CON_CTRL1_DCDC2_ENABLE BIT(3)
-#define TPS6507X_CON_CTRL1_DCDC3_ENABLE BIT(2)
-#define TPS6507X_CON_CTRL1_LDO1_ENABLE BIT(1)
-#define TPS6507X_CON_CTRL1_LDO2_ENABLE BIT(0)
-
-/* DEFDCDC1 bitfields */
-#define TPS6507X_DEFDCDC1_DCDC1_EXT_ADJ_EN BIT(7)
-#define TPS6507X_DEFDCDC1_DCDC1_MASK 0X3F
-
-/* DEFDCDC2_LOW bitfields */
-#define TPS6507X_DEFDCDC2_LOW_DCDC2_MASK 0X3F
-
-/* DEFDCDC2_HIGH bitfields */
-#define TPS6507X_DEFDCDC2_HIGH_DCDC2_MASK 0X3F
-
-/* DEFDCDC3_LOW bitfields */
-#define TPS6507X_DEFDCDC3_LOW_DCDC3_MASK 0X3F
-
-/* DEFDCDC3_HIGH bitfields */
-#define TPS6507X_DEFDCDC3_HIGH_DCDC3_MASK 0X3F
-
-/* TPS6507X_REG_LDO_CTRL1 bitfields */
-#define TPS6507X_REG_LDO_CTRL1_LDO1_MASK 0X0F
-
-/* TPS6507X_REG_DEFLDO2 bitfields */
-#define TPS6507X_REG_DEFLDO2_LDO2_MASK 0X3F
-
-/* VDCDC MASK */
-#define TPS6507X_DEFDCDCX_DCDC_MASK 0X3F
+#include <linux/mfd/tps6507x.h>
/* DCDC's */
#define TPS6507X_DCDC_1 0
@@ -162,101 +103,146 @@ struct tps_info {
const u16 *table;
};
-struct tps_pmic {
+static const struct tps_info tps6507x_pmic_regs[] = {
+ {
+ .name = "VDCDC1",
+ .min_uV = 725000,
+ .max_uV = 3300000,
+ .table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
+ .table = VDCDCx_VSEL_table,
+ },
+ {
+ .name = "VDCDC2",
+ .min_uV = 725000,
+ .max_uV = 3300000,
+ .table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
+ .table = VDCDCx_VSEL_table,
+ },
+ {
+ .name = "VDCDC3",
+ .min_uV = 725000,
+ .max_uV = 3300000,
+ .table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
+ .table = VDCDCx_VSEL_table,
+ },
+ {
+ .name = "LDO1",
+ .min_uV = 1000000,
+ .max_uV = 3300000,
+ .table_len = ARRAY_SIZE(LDO1_VSEL_table),
+ .table = LDO1_VSEL_table,
+ },
+ {
+ .name = "LDO2",
+ .min_uV = 725000,
+ .max_uV = 3300000,
+ .table_len = ARRAY_SIZE(LDO2_VSEL_table),
+ .table = LDO2_VSEL_table,
+ },
+};
+
+struct tps6507x_pmic {
struct regulator_desc desc[TPS6507X_NUM_REGULATOR];
- struct i2c_client *client;
+ struct tps6507x_dev *mfd;
struct regulator_dev *rdev[TPS6507X_NUM_REGULATOR];
const struct tps_info *info[TPS6507X_NUM_REGULATOR];
struct mutex io_lock;
};
-
-static inline int tps_6507x_read(struct tps_pmic *tps, u8 reg)
+static inline int tps6507x_pmic_read(struct tps6507x_pmic *tps, u8 reg)
{
- return i2c_smbus_read_byte_data(tps->client, reg);
+ u8 val;
+ int err;
+
+ err = tps->mfd->read_dev(tps->mfd, reg, 1, &val);
+
+ if (err)
+ return err;
+
+ return val;
}
-static inline int tps_6507x_write(struct tps_pmic *tps, u8 reg, u8 val)
+static inline int tps6507x_pmic_write(struct tps6507x_pmic *tps, u8 reg, u8 val)
{
- return i2c_smbus_write_byte_data(tps->client, reg, val);
+ return tps->mfd->write_dev(tps->mfd, reg, 1, &val);
}
-static int tps_6507x_set_bits(struct tps_pmic *tps, u8 reg, u8 mask)
+static int tps6507x_pmic_set_bits(struct tps6507x_pmic *tps, u8 reg, u8 mask)
{
int err, data;
mutex_lock(&tps->io_lock);
- data = tps_6507x_read(tps, reg);
+ data = tps6507x_pmic_read(tps, reg);
if (data < 0) {
- dev_err(&tps->client->dev, "Read from reg 0x%x failed\n", reg);
+ dev_err(tps->mfd->dev, "Read from reg 0x%x failed\n", reg);
err = data;
goto out;
}
data |= mask;
- err = tps_6507x_write(tps, reg, data);
+ err = tps6507x_pmic_write(tps, reg, data);
if (err)
- dev_err(&tps->client->dev, "Write for reg 0x%x failed\n", reg);
+ dev_err(tps->mfd->dev, "Write for reg 0x%x failed\n", reg);
out:
mutex_unlock(&tps->io_lock);
return err;
}
-static int tps_6507x_clear_bits(struct tps_pmic *tps, u8 reg, u8 mask)
+static int tps6507x_pmic_clear_bits(struct tps6507x_pmic *tps, u8 reg, u8 mask)
{
int err, data;
mutex_lock(&tps->io_lock);
- data = tps_6507x_read(tps, reg);
+ data = tps6507x_pmic_read(tps, reg);
if (data < 0) {
- dev_err(&tps->client->dev, "Read from reg 0x%x failed\n", reg);
+ dev_err(tps->mfd->dev, "Read from reg 0x%x failed\n", reg);
err = data;
goto out;
}
data &= ~mask;
- err = tps_6507x_write(tps, reg, data);
+ err = tps6507x_pmic_write(tps, reg, data);
if (err)
- dev_err(&tps->client->dev, "Write for reg 0x%x failed\n", reg);
+ dev_err(tps->mfd->dev, "Write for reg 0x%x failed\n", reg);
out:
mutex_unlock(&tps->io_lock);
return err;
}
-static int tps_6507x_reg_read(struct tps_pmic *tps, u8 reg)
+static int tps6507x_pmic_reg_read(struct tps6507x_pmic *tps, u8 reg)
{
int data;
mutex_lock(&tps->io_lock);
- data = tps_6507x_read(tps, reg);
+ data = tps6507x_pmic_read(tps, reg);
if (data < 0)
- dev_err(&tps->client->dev, "Read from reg 0x%x failed\n", reg);
+ dev_err(tps->mfd->dev, "Read from reg 0x%x failed\n", reg);
mutex_unlock(&tps->io_lock);
return data;
}
-static int tps_6507x_reg_write(struct tps_pmic *tps, u8 reg, u8 val)
+static int tps6507x_pmic_reg_write(struct tps6507x_pmic *tps, u8 reg, u8 val)
{
int err;
mutex_lock(&tps->io_lock);
- err = tps_6507x_write(tps, reg, val);
+ err = tps6507x_pmic_write(tps, reg, val);
if (err < 0)
- dev_err(&tps->client->dev, "Write for reg 0x%x failed\n", reg);
+ dev_err(tps->mfd->dev, "Write for reg 0x%x failed\n", reg);
mutex_unlock(&tps->io_lock);
return err;
}
-static int tps6507x_dcdc_is_enabled(struct regulator_dev *dev)
+static int tps6507x_pmic_dcdc_is_enabled(struct regulator_dev *dev)
{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
+ struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int data, dcdc = rdev_get_id(dev);
u8 shift;
@@ -264,7 +250,7 @@ static int tps6507x_dcdc_is_enabled(struct regulator_dev *dev)
return -EINVAL;
shift = TPS6507X_MAX_REG_ID - dcdc;
- data = tps_6507x_reg_read(tps, TPS6507X_REG_CON_CTRL1);
+ data = tps6507x_pmic_reg_read(tps, TPS6507X_REG_CON_CTRL1);
if (data < 0)
return data;
@@ -272,9 +258,9 @@ static int tps6507x_dcdc_is_enabled(struct regulator_dev *dev)
return (data & 1<<shift) ? 1 : 0;
}
-static int tps6507x_ldo_is_enabled(struct regulator_dev *dev)
+static int tps6507x_pmic_ldo_is_enabled(struct regulator_dev *dev)
{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
+ struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int data, ldo = rdev_get_id(dev);
u8 shift;
@@ -282,7 +268,7 @@ static int tps6507x_ldo_is_enabled(struct regulator_dev *dev)
return -EINVAL;
shift = TPS6507X_MAX_REG_ID - ldo;
- data = tps_6507x_reg_read(tps, TPS6507X_REG_CON_CTRL1);
+ data = tps6507x_pmic_reg_read(tps, TPS6507X_REG_CON_CTRL1);
if (data < 0)
return data;
@@ -290,9 +276,9 @@ static int tps6507x_ldo_is_enabled(struct regulator_dev *dev)
return (data & 1<<shift) ? 1 : 0;
}
-static int tps6507x_dcdc_enable(struct regulator_dev *dev)
+static int tps6507x_pmic_dcdc_enable(struct regulator_dev *dev)
{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
+ struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int dcdc = rdev_get_id(dev);
u8 shift;
@@ -300,12 +286,12 @@ static int tps6507x_dcdc_enable(struct regulator_dev *dev)
return -EINVAL;
shift = TPS6507X_MAX_REG_ID - dcdc;
- return tps_6507x_set_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift);
+ return tps6507x_pmic_set_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift);
}
-static int tps6507x_dcdc_disable(struct regulator_dev *dev)
+static int tps6507x_pmic_dcdc_disable(struct regulator_dev *dev)
{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
+ struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int dcdc = rdev_get_id(dev);
u8 shift;
@@ -313,12 +299,13 @@ static int tps6507x_dcdc_disable(struct regulator_dev *dev)
return -EINVAL;
shift = TPS6507X_MAX_REG_ID - dcdc;
- return tps_6507x_clear_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift);
+ return tps6507x_pmic_clear_bits(tps, TPS6507X_REG_CON_CTRL1,
+ 1 << shift);
}
-static int tps6507x_ldo_enable(struct regulator_dev *dev)
+static int tps6507x_pmic_ldo_enable(struct regulator_dev *dev)
{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
+ struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int ldo = rdev_get_id(dev);
u8 shift;
@@ -326,12 +313,12 @@ static int tps6507x_ldo_enable(struct regulator_dev *dev)
return -EINVAL;
shift = TPS6507X_MAX_REG_ID - ldo;
- return tps_6507x_set_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift);
+ return tps6507x_pmic_set_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift);
}
-static int tps6507x_ldo_disable(struct regulator_dev *dev)
+static int tps6507x_pmic_ldo_disable(struct regulator_dev *dev)
{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
+ struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int ldo = rdev_get_id(dev);
u8 shift;
@@ -339,12 +326,13 @@ static int tps6507x_ldo_disable(struct regulator_dev *dev)
return -EINVAL;
shift = TPS6507X_MAX_REG_ID - ldo;
- return tps_6507x_clear_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift);
+ return tps6507x_pmic_clear_bits(tps, TPS6507X_REG_CON_CTRL1,
+ 1 << shift);
}
-static int tps6507x_dcdc_get_voltage(struct regulator_dev *dev)
+static int tps6507x_pmic_dcdc_get_voltage(struct regulator_dev *dev)
{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
+ struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int data, dcdc = rdev_get_id(dev);
u8 reg;
@@ -362,7 +350,7 @@ static int tps6507x_dcdc_get_voltage(struct regulator_dev *dev)
return -EINVAL;
}
- data = tps_6507x_reg_read(tps, reg);
+ data = tps6507x_pmic_reg_read(tps, reg);
if (data < 0)
return data;
@@ -370,10 +358,10 @@ static int tps6507x_dcdc_get_voltage(struct regulator_dev *dev)
return tps->info[dcdc]->table[data] * 1000;
}
-static int tps6507x_dcdc_set_voltage(struct regulator_dev *dev,
+static int tps6507x_pmic_dcdc_set_voltage(struct regulator_dev *dev,
int min_uV, int max_uV)
{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
+ struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int data, vsel, dcdc = rdev_get_id(dev);
u8 reg;
@@ -411,19 +399,19 @@ static int tps6507x_dcdc_set_voltage(struct regulator_dev *dev,
if (vsel == tps->info[dcdc]->table_len)
return -EINVAL;
- data = tps_6507x_reg_read(tps, reg);
+ data = tps6507x_pmic_reg_read(tps, reg);
if (data < 0)
return data;
data &= ~TPS6507X_DEFDCDCX_DCDC_MASK;
data |= vsel;
- return tps_6507x_reg_write(tps, reg, data);
+ return tps6507x_pmic_reg_write(tps, reg, data);
}
-static int tps6507x_ldo_get_voltage(struct regulator_dev *dev)
+static int tps6507x_pmic_ldo_get_voltage(struct regulator_dev *dev)
{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
+ struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int data, ldo = rdev_get_id(dev);
u8 reg, mask;
@@ -437,7 +425,7 @@ static int tps6507x_ldo_get_voltage(struct regulator_dev *dev)
TPS6507X_REG_DEFLDO2_LDO2_MASK);
}
- data = tps_6507x_reg_read(tps, reg);
+ data = tps6507x_pmic_reg_read(tps, reg);
if (data < 0)
return data;
@@ -445,10 +433,10 @@ static int tps6507x_ldo_get_voltage(struct regulator_dev *dev)
return tps->info[ldo]->table[data] * 1000;
}
-static int tps6507x_ldo_set_voltage(struct regulator_dev *dev,
+static int tps6507x_pmic_ldo_set_voltage(struct regulator_dev *dev,
int min_uV, int max_uV)
{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
+ struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int data, vsel, ldo = rdev_get_id(dev);
u8 reg, mask;
@@ -479,20 +467,20 @@ static int tps6507x_ldo_set_voltage(struct regulator_dev *dev,
if (vsel == tps->info[ldo]->table_len)
return -EINVAL;
- data = tps_6507x_reg_read(tps, reg);
+ data = tps6507x_pmic_reg_read(tps, reg);
if (data < 0)
return data;
data &= ~mask;
data |= vsel;
- return tps_6507x_reg_write(tps, reg, data);
+ return tps6507x_pmic_reg_write(tps, reg, data);
}
-static int tps6507x_dcdc_list_voltage(struct regulator_dev *dev,
+static int tps6507x_pmic_dcdc_list_voltage(struct regulator_dev *dev,
unsigned selector)
{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
+ struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int dcdc = rdev_get_id(dev);
if (dcdc < TPS6507X_DCDC_1 || dcdc > TPS6507X_DCDC_3)
@@ -504,10 +492,10 @@ static int tps6507x_dcdc_list_voltage(struct regulator_dev *dev,
return tps->info[dcdc]->table[selector] * 1000;
}
-static int tps6507x_ldo_list_voltage(struct regulator_dev *dev,
+static int tps6507x_pmic_ldo_list_voltage(struct regulator_dev *dev,
unsigned selector)
{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
+ struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
int ldo = rdev_get_id(dev);
if (ldo < TPS6507X_LDO_1 || ldo > TPS6507X_LDO_2)
@@ -520,47 +508,54 @@ static int tps6507x_ldo_list_voltage(struct regulator_dev *dev,
}
/* Operations permitted on VDCDCx */
-static struct regulator_ops tps6507x_dcdc_ops = {
- .is_enabled = tps6507x_dcdc_is_enabled,
- .enable = tps6507x_dcdc_enable,
- .disable = tps6507x_dcdc_disable,
- .get_voltage = tps6507x_dcdc_get_voltage,
- .set_voltage = tps6507x_dcdc_set_voltage,
- .list_voltage = tps6507x_dcdc_list_voltage,
+static struct regulator_ops tps6507x_pmic_dcdc_ops = {
+ .is_enabled = tps6507x_pmic_dcdc_is_enabled,
+ .enable = tps6507x_pmic_dcdc_enable,
+ .disable = tps6507x_pmic_dcdc_disable,
+ .get_voltage = tps6507x_pmic_dcdc_get_voltage,
+ .set_voltage = tps6507x_pmic_dcdc_set_voltage,
+ .list_voltage = tps6507x_pmic_dcdc_list_voltage,
};
/* Operations permitted on LDOx */
-static struct regulator_ops tps6507x_ldo_ops = {
- .is_enabled = tps6507x_ldo_is_enabled,
- .enable = tps6507x_ldo_enable,
- .disable = tps6507x_ldo_disable,
- .get_voltage = tps6507x_ldo_get_voltage,
- .set_voltage = tps6507x_ldo_set_voltage,
- .list_voltage = tps6507x_ldo_list_voltage,
+static struct regulator_ops tps6507x_pmic_ldo_ops = {
+ .is_enabled = tps6507x_pmic_ldo_is_enabled,
+ .enable = tps6507x_pmic_ldo_enable,
+ .disable = tps6507x_pmic_ldo_disable,
+ .get_voltage = tps6507x_pmic_ldo_get_voltage,
+ .set_voltage = tps6507x_pmic_ldo_set_voltage,
+ .list_voltage = tps6507x_pmic_ldo_list_voltage,
};
-static int __devinit tps_6507x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static __devinit
+int tps6507x_pmic_probe(struct platform_device *pdev)
{
+ struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent);
static int desc_id;
- const struct tps_info *info = (void *)id->driver_data;
+ const struct tps_info *info = &tps6507x_pmic_regs[0];
struct regulator_init_data *init_data;
struct regulator_dev *rdev;
- struct tps_pmic *tps;
+ struct tps6507x_pmic *tps;
+ struct tps6507x_board *tps_board;
int i;
int error;
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_BYTE_DATA))
- return -EIO;
+ /**
+ * tps_board points to pmic related constants
+ * coming from the board-evm file.
+ */
+
+ tps_board = dev_get_platdata(tps6507x_dev->dev);
+ if (!tps_board)
+ return -EINVAL;
/**
* init_data points to array of regulator_init structures
* coming from the board-evm file.
*/
- init_data = client->dev.platform_data;
+ init_data = tps_board->tps6507x_pmic_init_data;
if (!init_data)
- return -EIO;
+ return -EINVAL;
tps = kzalloc(sizeof(*tps), GFP_KERNEL);
if (!tps)
@@ -569,7 +564,7 @@ static int __devinit tps_6507x_probe(struct i2c_client *client,
mutex_init(&tps->io_lock);
/* common for all regulators */
- tps->client = client;
+ tps->mfd = tps6507x_dev;
for (i = 0; i < TPS6507X_NUM_REGULATOR; i++, info++, init_data++) {
/* Register the regulators */
@@ -578,15 +573,16 @@ static int __devinit tps_6507x_probe(struct i2c_client *client,
tps->desc[i].id = desc_id++;
tps->desc[i].n_voltages = num_voltages[i];
tps->desc[i].ops = (i > TPS6507X_DCDC_3 ?
- &tps6507x_ldo_ops : &tps6507x_dcdc_ops);
+ &tps6507x_pmic_ldo_ops : &tps6507x_pmic_dcdc_ops);
tps->desc[i].type = REGULATOR_VOLTAGE;
tps->desc[i].owner = THIS_MODULE;
rdev = regulator_register(&tps->desc[i],
- &client->dev, init_data, tps);
+ tps6507x_dev->dev, init_data, tps);
if (IS_ERR(rdev)) {
- dev_err(&client->dev, "failed to register %s\n",
- id->name);
+ dev_err(tps6507x_dev->dev,
+ "failed to register %s regulator\n",
+ pdev->name);
error = PTR_ERR(rdev);
goto fail;
}
@@ -595,7 +591,7 @@ static int __devinit tps_6507x_probe(struct i2c_client *client,
tps->rdev[i] = rdev;
}
- i2c_set_clientdata(client, tps);
+ tps6507x_dev->pmic = tps;
return 0;
@@ -608,19 +604,17 @@ fail:
}
/**
- * tps_6507x_remove - TPS6507x driver i2c remove handler
+ * tps6507x_remove - TPS6507x driver i2c remove handler
* @client: i2c driver client device structure
*
* Unregister TPS driver as an i2c client device driver
*/
-static int __devexit tps_6507x_remove(struct i2c_client *client)
+static int __devexit tps6507x_pmic_remove(struct platform_device *pdev)
{
- struct tps_pmic *tps = i2c_get_clientdata(client);
+ struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev);
+ struct tps6507x_pmic *tps = tps6507x_dev->pmic;
int i;
- /* clear the client data in i2c */
- i2c_set_clientdata(client, NULL);
-
for (i = 0; i < TPS6507X_NUM_REGULATOR; i++)
regulator_unregister(tps->rdev[i]);
@@ -629,83 +623,38 @@ static int __devexit tps_6507x_remove(struct i2c_client *client)
return 0;
}
-static const struct tps_info tps6507x_regs[] = {
- {
- .name = "VDCDC1",
- .min_uV = 725000,
- .max_uV = 3300000,
- .table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
- .table = VDCDCx_VSEL_table,
- },
- {
- .name = "VDCDC2",
- .min_uV = 725000,
- .max_uV = 3300000,
- .table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
- .table = VDCDCx_VSEL_table,
- },
- {
- .name = "VDCDC3",
- .min_uV = 725000,
- .max_uV = 3300000,
- .table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
- .table = VDCDCx_VSEL_table,
- },
- {
- .name = "LDO1",
- .min_uV = 1000000,
- .max_uV = 3300000,
- .table_len = ARRAY_SIZE(LDO1_VSEL_table),
- .table = LDO1_VSEL_table,
- },
- {
- .name = "LDO2",
- .min_uV = 725000,
- .max_uV = 3300000,
- .table_len = ARRAY_SIZE(LDO2_VSEL_table),
- .table = LDO2_VSEL_table,
- },
-};
-
-static const struct i2c_device_id tps_6507x_id[] = {
- {.name = "tps6507x",
- .driver_data = (unsigned long) tps6507x_regs,},
- { },
-};
-MODULE_DEVICE_TABLE(i2c, tps_6507x_id);
-
-static struct i2c_driver tps_6507x_i2c_driver = {
+static struct platform_driver tps6507x_pmic_driver = {
.driver = {
- .name = "tps6507x",
+ .name = "tps6507x-pmic",
.owner = THIS_MODULE,
},
- .probe = tps_6507x_probe,
- .remove = __devexit_p(tps_6507x_remove),
- .id_table = tps_6507x_id,
+ .probe = tps6507x_pmic_probe,
+ .remove = __devexit_p(tps6507x_pmic_remove),
};
/**
- * tps_6507x_init
+ * tps6507x_pmic_init
*
* Module init function
*/
-static int __init tps_6507x_init(void)
+static int __init tps6507x_pmic_init(void)
{
- return i2c_add_driver(&tps_6507x_i2c_driver);
+ return platform_driver_register(&tps6507x_pmic_driver);
}
-subsys_initcall(tps_6507x_init);
+subsys_initcall(tps6507x_pmic_init);
/**
- * tps_6507x_cleanup
+ * tps6507x_pmic_cleanup
*
* Module exit function
*/
-static void __exit tps_6507x_cleanup(void)
+static void __exit tps6507x_pmic_cleanup(void)
{
- i2c_del_driver(&tps_6507x_i2c_driver);
+ platform_driver_unregister(&tps6507x_pmic_driver);
}
-module_exit(tps_6507x_cleanup);
+module_exit(tps6507x_pmic_cleanup);
MODULE_AUTHOR("Texas Instruments");
MODULE_DESCRIPTION("TPS6507x voltage regulator driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tps6507x-pmic");
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 9729d760fb4..7e5892efc43 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -49,6 +49,7 @@ struct twlreg_info {
/* chip constraints on regulator behavior */
u16 min_mV;
+ u16 max_mV;
/* used by regulator core */
struct regulator_desc desc;
@@ -318,31 +319,8 @@ static const u16 VIO_VSEL_table[] = {
static const u16 VINTANA2_VSEL_table[] = {
2500, 2750,
};
-static const u16 VAUX1_6030_VSEL_table[] = {
- 1000, 1300, 1800, 2500,
- 2800, 2900, 3000, 3000,
-};
-static const u16 VAUX2_6030_VSEL_table[] = {
- 1200, 1800, 2500, 2750,
- 2800, 2800, 2800, 2800,
-};
-static const u16 VAUX3_6030_VSEL_table[] = {
- 1000, 1200, 1300, 1800,
- 2500, 2800, 3000, 3000,
-};
-static const u16 VMMC_VSEL_table[] = {
- 1200, 1800, 2800, 2900,
- 3000, 3000, 3000, 3000,
-};
-static const u16 VPP_VSEL_table[] = {
- 1800, 1900, 2000, 2100,
- 2200, 2300, 2400, 2500,
-};
-static const u16 VUSIM_VSEL_table[] = {
- 1200, 1800, 2500, 2900,
-};
-static int twlldo_list_voltage(struct regulator_dev *rdev, unsigned index)
+static int twl4030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int mV = info->table[index];
@@ -351,7 +329,7 @@ static int twlldo_list_voltage(struct regulator_dev *rdev, unsigned index)
}
static int
-twlldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
+twl4030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int vsel;
@@ -375,7 +353,7 @@ twlldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
return -EDOM;
}
-static int twlldo_get_voltage(struct regulator_dev *rdev)
+static int twl4030ldo_get_voltage(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER,
@@ -388,11 +366,67 @@ static int twlldo_get_voltage(struct regulator_dev *rdev)
return LDO_MV(info->table[vsel]) * 1000;
}
-static struct regulator_ops twlldo_ops = {
- .list_voltage = twlldo_list_voltage,
+static struct regulator_ops twl4030ldo_ops = {
+ .list_voltage = twl4030ldo_list_voltage,
- .set_voltage = twlldo_set_voltage,
- .get_voltage = twlldo_get_voltage,
+ .set_voltage = twl4030ldo_set_voltage,
+ .get_voltage = twl4030ldo_get_voltage,
+
+ .enable = twlreg_enable,
+ .disable = twlreg_disable,
+ .is_enabled = twlreg_is_enabled,
+
+ .set_mode = twlreg_set_mode,
+
+ .get_status = twlreg_get_status,
+};
+
+static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+
+ return ((info->min_mV + (index * 100)) * 1000);
+}
+
+static int
+twl6030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int vsel;
+
+ if ((min_uV/1000 < info->min_mV) || (max_uV/1000 > info->max_mV))
+ return -EDOM;
+
+ /*
+ * Use the below formula to calculate vsel
+ * mV = 1000mv + 100mv * (vsel - 1)
+ */
+ vsel = (min_uV/1000 - 1000)/100 + 1;
+ return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE, vsel);
+
+}
+
+static int twl6030ldo_get_voltage(struct regulator_dev *rdev)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER,
+ VREG_VOLTAGE);
+
+ if (vsel < 0)
+ return vsel;
+
+ /*
+ * Use the below formula to calculate vsel
+ * mV = 1000mv + 100mv * (vsel - 1)
+ */
+ return (1000 + (100 * (vsel - 1))) * 1000;
+}
+
+static struct regulator_ops twl6030ldo_ops = {
+ .list_voltage = twl6030ldo_list_voltage,
+
+ .set_voltage = twl6030ldo_set_voltage,
+ .get_voltage = twl6030ldo_get_voltage,
.enable = twlreg_enable,
.disable = twlreg_disable,
@@ -438,24 +472,16 @@ static struct regulator_ops twlfixed_ops = {
/*----------------------------------------------------------------------*/
-#define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) \
- TWL_ADJUSTABLE_LDO(label, offset, num, turnon_delay, \
- remap_conf, TWL4030)
#define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
remap_conf) \
TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
remap_conf, TWL4030)
-#define TWL6030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, \
- remap_conf) \
- TWL_ADJUSTABLE_LDO(label, offset, num, turnon_delay, \
- remap_conf, TWL6030)
#define TWL6030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
remap_conf) \
TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
remap_conf, TWL6030)
-#define TWL_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf, \
- family) { \
+#define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) { \
.base = offset, \
.id = num, \
.table_len = ARRAY_SIZE(label##_VSEL_table), \
@@ -464,14 +490,32 @@ static struct regulator_ops twlfixed_ops = {
.remap = remap_conf, \
.desc = { \
.name = #label, \
- .id = family##_REG_##label, \
+ .id = TWL4030_REG_##label, \
.n_voltages = ARRAY_SIZE(label##_VSEL_table), \
- .ops = &twlldo_ops, \
+ .ops = &twl4030ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}, \
}
+#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num, \
+ remap_conf) { \
+ .base = offset, \
+ .id = num, \
+ .min_mV = min_mVolts, \
+ .max_mV = max_mVolts, \
+ .remap = remap_conf, \
+ .desc = { \
+ .name = #label, \
+ .id = TWL6030_REG_##label, \
+ .n_voltages = (max_mVolts - min_mVolts)/100, \
+ .ops = &twl6030ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
+
+
#define TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, remap_conf, \
family) { \
.base = offset, \
@@ -519,12 +563,12 @@ static struct twlreg_info twl_regs[] = {
/* 6030 REG with base as PMC Slave Misc : 0x0030 */
/* Turnon-delay and remap configuration values for 6030 are not
verified since the specification is not public */
- TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1, 0, 0x21),
- TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 2, 0, 0x21),
- TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 3, 0, 0x21),
- TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 4, 0, 0x21),
- TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 5, 0, 0x21),
- TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 7, 0, 0x21),
+ TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300, 1, 0x21),
+ TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300, 2, 0x21),
+ TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300, 3, 0x21),
+ TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300, 4, 0x21),
+ TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300, 5, 0x21),
+ TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300, 7, 0x21),
TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0, 0x21),
TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0, 0x21),
TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0, 0x21),
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 6a130375943..10ba12c8c5e 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -611,6 +611,13 @@ config RTC_DRV_AB3100
Select this to enable the ST-Ericsson AB3100 Mixed Signal IC RTC
support. This chip contains a battery- and capacitor-backed RTC.
+config RTC_DRV_AB8500
+ tristate "ST-Ericsson AB8500 RTC"
+ depends on AB8500_CORE
+ help
+ Select this to enable the ST-Ericsson AB8500 power management IC RTC
+ support. This chip contains a battery- and capacitor-backed RTC.
+
config RTC_DRV_NUC900
tristate "NUC910/NUC920 RTC driver"
depends on RTC_CLASS && ARCH_W90X900
@@ -620,6 +627,16 @@ config RTC_DRV_NUC900
comment "on-CPU RTC drivers"
+config RTC_DRV_DAVINCI
+ tristate "TI DaVinci RTC"
+ depends on ARCH_DAVINCI_DM365
+ help
+ If you say yes here you get support for the RTC on the
+ DaVinci platforms (DM365).
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-davinci.
+
config RTC_DRV_OMAP
tristate "TI OMAP1"
depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX
@@ -630,7 +647,7 @@ config RTC_DRV_OMAP
config RTC_DRV_S3C
tristate "Samsung S3C series SoC RTC"
- depends on ARCH_S3C2410
+ depends on ARCH_S3C2410 || ARCH_S3C64XX
help
RTC (Realtime Clock) driver for the clock inbuilt into the
Samsung S3C24XX series of SoCs. This can provide periodic
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 44ef194a957..5adbba7cf89 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -18,6 +18,7 @@ rtc-core-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o
# Keep the list ordered.
obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o
+obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o
obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o
obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
obj-$(CONFIG_RTC_DRV_AT91SAM9) += rtc-at91sam9.o
@@ -27,6 +28,7 @@ obj-$(CONFIG_RTC_DRV_BQ32K) += rtc-bq32k.o
obj-$(CONFIG_RTC_DRV_BQ4802) += rtc-bq4802.o
obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o
obj-$(CONFIG_RTC_DRV_COH901331) += rtc-coh901331.o
+obj-$(CONFIG_RTC_DRV_DAVINCI) += rtc-davinci.o
obj-$(CONFIG_RTC_DRV_DM355EVM) += rtc-dm355evm.o
obj-$(CONFIG_RTC_DRV_DS1216) += rtc-ds1216.o
obj-$(CONFIG_RTC_DRV_DS1286) += rtc-ds1286.o
diff --git a/drivers/rtc/rtc-ab3100.c b/drivers/rtc/rtc-ab3100.c
index 4704aac2b5a..d26780ea254 100644
--- a/drivers/rtc/rtc-ab3100.c
+++ b/drivers/rtc/rtc-ab3100.c
@@ -9,7 +9,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
-#include <linux/mfd/ab3100.h>
+#include <linux/mfd/abx500.h>
/* Clock rate in Hz */
#define AB3100_RTC_CLOCK_RATE 32768
@@ -45,7 +45,6 @@
*/
static int ab3100_rtc_set_mmss(struct device *dev, unsigned long secs)
{
- struct ab3100 *ab3100_data = dev_get_drvdata(dev);
u8 regs[] = {AB3100_TI0, AB3100_TI1, AB3100_TI2,
AB3100_TI3, AB3100_TI4, AB3100_TI5};
unsigned char buf[6];
@@ -61,27 +60,26 @@ static int ab3100_rtc_set_mmss(struct device *dev, unsigned long secs)
buf[5] = (fat_time >> 40) & 0xFF;
for (i = 0; i < 6; i++) {
- err = ab3100_set_register_interruptible(ab3100_data,
+ err = abx500_set_register_interruptible(dev, 0,
regs[i], buf[i]);
if (err)
return err;
}
/* Set the flag to mark that the clock is now set */
- return ab3100_mask_and_set_register_interruptible(ab3100_data,
+ return abx500_mask_and_set_register_interruptible(dev, 0,
AB3100_RTC,
- 0xFE, 0x01);
+ 0x01, 0x01);
}
static int ab3100_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- struct ab3100 *ab3100_data = dev_get_drvdata(dev);
unsigned long time;
u8 rtcval;
int err;
- err = ab3100_get_register_interruptible(ab3100_data,
+ err = abx500_get_register_interruptible(dev, 0,
AB3100_RTC, &rtcval);
if (err)
return err;
@@ -94,7 +92,7 @@ static int ab3100_rtc_read_time(struct device *dev, struct rtc_time *tm)
u8 buf[6];
/* Read out time registers */
- err = ab3100_get_register_page_interruptible(ab3100_data,
+ err = abx500_get_register_page_interruptible(dev, 0,
AB3100_TI0,
buf, 6);
if (err != 0)
@@ -114,7 +112,6 @@ static int ab3100_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int ab3100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
- struct ab3100 *ab3100_data = dev_get_drvdata(dev);
unsigned long time;
u64 fat_time;
u8 buf[6];
@@ -122,7 +119,7 @@ static int ab3100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
int err;
/* Figure out if alarm is enabled or not */
- err = ab3100_get_register_interruptible(ab3100_data,
+ err = abx500_get_register_interruptible(dev, 0,
AB3100_RTC, &rtcval);
if (err)
return err;
@@ -133,7 +130,7 @@ static int ab3100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
/* No idea how this could be represented */
alarm->pending = 0;
/* Read out alarm registers, only 4 bytes */
- err = ab3100_get_register_page_interruptible(ab3100_data,
+ err = abx500_get_register_page_interruptible(dev, 0,
AB3100_AL0, buf, 4);
if (err)
return err;
@@ -148,7 +145,6 @@ static int ab3100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
static int ab3100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
- struct ab3100 *ab3100_data = dev_get_drvdata(dev);
u8 regs[] = {AB3100_AL0, AB3100_AL1, AB3100_AL2, AB3100_AL3};
unsigned char buf[4];
unsigned long secs;
@@ -165,21 +161,19 @@ static int ab3100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
/* Set the alarm */
for (i = 0; i < 4; i++) {
- err = ab3100_set_register_interruptible(ab3100_data,
+ err = abx500_set_register_interruptible(dev, 0,
regs[i], buf[i]);
if (err)
return err;
}
/* Then enable the alarm */
- return ab3100_mask_and_set_register_interruptible(ab3100_data,
- AB3100_RTC, ~(1 << 2),
+ return abx500_mask_and_set_register_interruptible(dev, 0,
+ AB3100_RTC, (1 << 2),
alarm->enabled << 2);
}
static int ab3100_rtc_irq_enable(struct device *dev, unsigned int enabled)
{
- struct ab3100 *ab3100_data = dev_get_drvdata(dev);
-
/*
* It's not possible to enable/disable the alarm IRQ for this RTC.
* It does not actually trigger any IRQ: instead its only function is
@@ -188,12 +182,12 @@ static int ab3100_rtc_irq_enable(struct device *dev, unsigned int enabled)
* and need to be handled there instead.
*/
if (enabled)
- return ab3100_mask_and_set_register_interruptible(ab3100_data,
- AB3100_RTC, ~(1 << 2),
+ return abx500_mask_and_set_register_interruptible(dev, 0,
+ AB3100_RTC, (1 << 2),
1 << 2);
else
- return ab3100_mask_and_set_register_interruptible(ab3100_data,
- AB3100_RTC, ~(1 << 2),
+ return abx500_mask_and_set_register_interruptible(dev, 0,
+ AB3100_RTC, (1 << 2),
0);
}
@@ -210,10 +204,9 @@ static int __init ab3100_rtc_probe(struct platform_device *pdev)
int err;
u8 regval;
struct rtc_device *rtc;
- struct ab3100 *ab3100_data = platform_get_drvdata(pdev);
/* The first RTC register needs special treatment */
- err = ab3100_get_register_interruptible(ab3100_data,
+ err = abx500_get_register_interruptible(&pdev->dev, 0,
AB3100_RTC, &regval);
if (err) {
dev_err(&pdev->dev, "unable to read RTC register\n");
@@ -231,7 +224,7 @@ static int __init ab3100_rtc_probe(struct platform_device *pdev)
* This bit remains until RTC power is lost.
*/
regval = 1 | RTC_SETTING;
- err = ab3100_set_register_interruptible(ab3100_data,
+ err = abx500_set_register_interruptible(&pdev->dev, 0,
AB3100_RTC, regval);
/* Ignore any error on this write */
}
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
new file mode 100644
index 00000000000..2fda03125e5
--- /dev/null
+++ b/drivers/rtc/rtc-ab8500.c
@@ -0,0 +1,363 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Virupax Sadashivpetimath <virupax.sadashivpetimath@stericsson.com>
+ *
+ * RTC clock driver for the RTC part of the AB8500 Power management chip.
+ * Based on RTC clock driver for the AB3100 Analog Baseband Chip by
+ * Linus Walleij <linus.walleij@stericsson.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/mfd/ab8500.h>
+#include <linux/delay.h>
+
+#define AB8500_RTC_SOFF_STAT_REG 0x0F00
+#define AB8500_RTC_CC_CONF_REG 0x0F01
+#define AB8500_RTC_READ_REQ_REG 0x0F02
+#define AB8500_RTC_WATCH_TSECMID_REG 0x0F03
+#define AB8500_RTC_WATCH_TSECHI_REG 0x0F04
+#define AB8500_RTC_WATCH_TMIN_LOW_REG 0x0F05
+#define AB8500_RTC_WATCH_TMIN_MID_REG 0x0F06
+#define AB8500_RTC_WATCH_TMIN_HI_REG 0x0F07
+#define AB8500_RTC_ALRM_MIN_LOW_REG 0x0F08
+#define AB8500_RTC_ALRM_MIN_MID_REG 0x0F09
+#define AB8500_RTC_ALRM_MIN_HI_REG 0x0F0A
+#define AB8500_RTC_STAT_REG 0x0F0B
+#define AB8500_RTC_BKUP_CHG_REG 0x0F0C
+#define AB8500_RTC_FORCE_BKUP_REG 0x0F0D
+#define AB8500_RTC_CALIB_REG 0x0F0E
+#define AB8500_RTC_SWITCH_STAT_REG 0x0F0F
+#define AB8500_REV_REG 0x1080
+
+/* RtcReadRequest bits */
+#define RTC_READ_REQUEST 0x01
+#define RTC_WRITE_REQUEST 0x02
+
+/* RtcCtrl bits */
+#define RTC_ALARM_ENA 0x04
+#define RTC_STATUS_DATA 0x01
+
+#define COUNTS_PER_SEC (0xF000 / 60)
+#define AB8500_RTC_EPOCH 2000
+
+static const unsigned long ab8500_rtc_time_regs[] = {
+ AB8500_RTC_WATCH_TMIN_HI_REG, AB8500_RTC_WATCH_TMIN_MID_REG,
+ AB8500_RTC_WATCH_TMIN_LOW_REG, AB8500_RTC_WATCH_TSECHI_REG,
+ AB8500_RTC_WATCH_TSECMID_REG
+};
+
+static const unsigned long ab8500_rtc_alarm_regs[] = {
+ AB8500_RTC_ALRM_MIN_HI_REG, AB8500_RTC_ALRM_MIN_MID_REG,
+ AB8500_RTC_ALRM_MIN_LOW_REG
+};
+
+/* Calculate the seconds from 1970 to 01-01-2000 00:00:00 */
+static unsigned long get_elapsed_seconds(int year)
+{
+ unsigned long secs;
+ struct rtc_time tm = {
+ .tm_year = year - 1900,
+ .tm_mday = 1,
+ };
+
+ /*
+ * This function calculates secs from 1970 and not from
+ * 1900, even if we supply the offset from year 1900.
+ */
+ rtc_tm_to_time(&tm, &secs);
+ return secs;
+}
+
+static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
+ unsigned long timeout = jiffies + HZ;
+ int retval, i;
+ unsigned long mins, secs;
+ unsigned char buf[ARRAY_SIZE(ab8500_rtc_time_regs)];
+
+ /* Request a data read */
+ retval = ab8500_write(ab8500, AB8500_RTC_READ_REQ_REG,
+ RTC_READ_REQUEST);
+ if (retval < 0)
+ return retval;
+
+ /* Early AB8500 chips will not clear the rtc read request bit */
+ if (ab8500->revision == 0) {
+ msleep(1);
+ } else {
+ /* Wait for some cycles after enabling the rtc read in ab8500 */
+ while (time_before(jiffies, timeout)) {
+ retval = ab8500_read(ab8500, AB8500_RTC_READ_REQ_REG);
+ if (retval < 0)
+ return retval;
+
+ if (!(retval & RTC_READ_REQUEST))
+ break;
+
+ msleep(1);
+ }
+ }
+
+ /* Read the Watchtime registers */
+ for (i = 0; i < ARRAY_SIZE(ab8500_rtc_time_regs); i++) {
+ retval = ab8500_read(ab8500, ab8500_rtc_time_regs[i]);
+ if (retval < 0)
+ return retval;
+ buf[i] = retval;
+ }
+
+ mins = (buf[0] << 16) | (buf[1] << 8) | buf[2];
+
+ secs = (buf[3] << 8) | buf[4];
+ secs = secs / COUNTS_PER_SEC;
+ secs = secs + (mins * 60);
+
+ /* Add back the initially subtracted number of seconds */
+ secs += get_elapsed_seconds(AB8500_RTC_EPOCH);
+
+ rtc_time_to_tm(secs, tm);
+ return rtc_valid_tm(tm);
+}
+
+static int ab8500_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
+ int retval, i;
+ unsigned char buf[ARRAY_SIZE(ab8500_rtc_time_regs)];
+ unsigned long no_secs, no_mins, secs = 0;
+
+ if (tm->tm_year < (AB8500_RTC_EPOCH - 1900)) {
+ dev_dbg(dev, "year should be equal to or greater than %d\n",
+ AB8500_RTC_EPOCH);
+ return -EINVAL;
+ }
+
+ /* Get the number of seconds since 1970 */
+ rtc_tm_to_time(tm, &secs);
+
+ /*
+ * Convert it to the number of seconds since 01-01-2000 00:00:00, since
+ * we only have a small counter in the RTC.
+ */
+ secs -= get_elapsed_seconds(AB8500_RTC_EPOCH);
+
+ no_mins = secs / 60;
+
+ no_secs = secs % 60;
+ /* Make the seconds count as per the RTC resolution */
+ no_secs = no_secs * COUNTS_PER_SEC;
+
+ buf[4] = no_secs & 0xFF;
+ buf[3] = (no_secs >> 8) & 0xFF;
+
+ buf[2] = no_mins & 0xFF;
+ buf[1] = (no_mins >> 8) & 0xFF;
+ buf[0] = (no_mins >> 16) & 0xFF;
+
+ for (i = 0; i < ARRAY_SIZE(ab8500_rtc_time_regs); i++) {
+ retval = ab8500_write(ab8500, ab8500_rtc_time_regs[i], buf[i]);
+ if (retval < 0)
+ return retval;
+ }
+
+ /* Request a data write */
+ return ab8500_write(ab8500, AB8500_RTC_READ_REQ_REG, RTC_WRITE_REQUEST);
+}
+
+static int ab8500_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
+ int retval, i;
+ int rtc_ctrl;
+ unsigned char buf[ARRAY_SIZE(ab8500_rtc_alarm_regs)];
+ unsigned long secs, mins;
+
+ /* Check if the alarm is enabled or not */
+ rtc_ctrl = ab8500_read(ab8500, AB8500_RTC_STAT_REG);
+ if (rtc_ctrl < 0)
+ return rtc_ctrl;
+
+ if (rtc_ctrl & RTC_ALARM_ENA)
+ alarm->enabled = 1;
+ else
+ alarm->enabled = 0;
+
+ alarm->pending = 0;
+
+ for (i = 0; i < ARRAY_SIZE(ab8500_rtc_alarm_regs); i++) {
+ retval = ab8500_read(ab8500, ab8500_rtc_alarm_regs[i]);
+ if (retval < 0)
+ return retval;
+ buf[i] = retval;
+ }
+
+ mins = (buf[0] << 16) | (buf[1] << 8) | (buf[2]);
+ secs = mins * 60;
+
+ /* Add back the initially subtracted number of seconds */
+ secs += get_elapsed_seconds(AB8500_RTC_EPOCH);
+
+ rtc_time_to_tm(secs, &alarm->time);
+
+ return rtc_valid_tm(&alarm->time);
+}
+
+static int ab8500_rtc_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
+
+ return ab8500_set_bits(ab8500, AB8500_RTC_STAT_REG, RTC_ALARM_ENA,
+ enabled ? RTC_ALARM_ENA : 0);
+}
+
+static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
+ int retval, i;
+ unsigned char buf[ARRAY_SIZE(ab8500_rtc_alarm_regs)];
+ unsigned long mins, secs = 0;
+
+ if (alarm->time.tm_year < (AB8500_RTC_EPOCH - 1900)) {
+ dev_dbg(dev, "year should be equal to or greater than %d\n",
+ AB8500_RTC_EPOCH);
+ return -EINVAL;
+ }
+
+ /* Get the number of seconds since 1970 */
+ rtc_tm_to_time(&alarm->time, &secs);
+
+ /*
+ * Convert it to the number of seconds since 01-01-2000 00:00:00, since
+ * we only have a small counter in the RTC.
+ */
+ secs -= get_elapsed_seconds(AB8500_RTC_EPOCH);
+
+ mins = secs / 60;
+
+ buf[2] = mins & 0xFF;
+ buf[1] = (mins >> 8) & 0xFF;
+ buf[0] = (mins >> 16) & 0xFF;
+
+ /* Set the alarm time */
+ for (i = 0; i < ARRAY_SIZE(ab8500_rtc_alarm_regs); i++) {
+ retval = ab8500_write(ab8500, ab8500_rtc_alarm_regs[i], buf[i]);
+ if (retval < 0)
+ return retval;
+ }
+
+ return ab8500_rtc_irq_enable(dev, alarm->enabled);
+}
+
+static irqreturn_t rtc_alarm_handler(int irq, void *data)
+{
+ struct rtc_device *rtc = data;
+ unsigned long events = RTC_IRQF | RTC_AF;
+
+ dev_dbg(&rtc->dev, "%s\n", __func__);
+ rtc_update_irq(rtc, 1, events);
+
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops ab8500_rtc_ops = {
+ .read_time = ab8500_rtc_read_time,
+ .set_time = ab8500_rtc_set_time,
+ .read_alarm = ab8500_rtc_read_alarm,
+ .set_alarm = ab8500_rtc_set_alarm,
+ .alarm_irq_enable = ab8500_rtc_irq_enable,
+};
+
+static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
+{
+ struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
+ int err;
+ struct rtc_device *rtc;
+ int rtc_ctrl;
+ int irq;
+
+ irq = platform_get_irq_byname(pdev, "ALARM");
+ if (irq < 0)
+ return irq;
+
+ /* For RTC supply test */
+ err = ab8500_set_bits(ab8500, AB8500_RTC_STAT_REG, RTC_STATUS_DATA,
+ RTC_STATUS_DATA);
+ if (err < 0)
+ return err;
+
+ /* Wait for reset by the PorRtc */
+ msleep(1);
+
+ rtc_ctrl = ab8500_read(ab8500, AB8500_RTC_STAT_REG);
+ if (rtc_ctrl < 0)
+ return rtc_ctrl;
+
+ /* Check if the RTC Supply fails */
+ if (!(rtc_ctrl & RTC_STATUS_DATA)) {
+ dev_err(&pdev->dev, "RTC supply failure\n");
+ return -ENODEV;
+ }
+
+ rtc = rtc_device_register("ab8500-rtc", &pdev->dev, &ab8500_rtc_ops,
+ THIS_MODULE);
+ if (IS_ERR(rtc)) {
+ dev_err(&pdev->dev, "Registration failed\n");
+ err = PTR_ERR(rtc);
+ return err;
+ }
+
+ err = request_threaded_irq(irq, NULL, rtc_alarm_handler, 0,
+ "ab8500-rtc", rtc);
+ if (err < 0) {
+ rtc_device_unregister(rtc);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, rtc);
+
+ return 0;
+}
+
+static int __devexit ab8500_rtc_remove(struct platform_device *pdev)
+{
+ struct rtc_device *rtc = platform_get_drvdata(pdev);
+ int irq = platform_get_irq_byname(pdev, "ALARM");
+
+ free_irq(irq, rtc);
+ rtc_device_unregister(rtc);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver ab8500_rtc_driver = {
+ .driver = {
+ .name = "ab8500-rtc",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab8500_rtc_probe,
+ .remove = __devexit_p(ab8500_rtc_remove),
+};
+
+static int __init ab8500_rtc_init(void)
+{
+ return platform_driver_register(&ab8500_rtc_driver);
+}
+
+static void __exit ab8500_rtc_exit(void)
+{
+ platform_driver_unregister(&ab8500_rtc_driver);
+}
+
+module_init(ab8500_rtc_init);
+module_exit(ab8500_rtc_exit);
+MODULE_AUTHOR("Virupax Sadashivpetimath <virupax.sadashivpetimath@stericsson.com>");
+MODULE_DESCRIPTION("AB8500 RTC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index e9aa814ddd2..11b8ea29d2b 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -238,31 +238,32 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
rtc_control = CMOS_READ(RTC_CONTROL);
spin_unlock_irq(&rtc_lock);
- /* REVISIT this assumes PC style usage: always BCD */
-
- if (((unsigned)t->time.tm_sec) < 0x60)
- t->time.tm_sec = bcd2bin(t->time.tm_sec);
- else
- t->time.tm_sec = -1;
- if (((unsigned)t->time.tm_min) < 0x60)
- t->time.tm_min = bcd2bin(t->time.tm_min);
- else
- t->time.tm_min = -1;
- if (((unsigned)t->time.tm_hour) < 0x24)
- t->time.tm_hour = bcd2bin(t->time.tm_hour);
- else
- t->time.tm_hour = -1;
-
- if (cmos->day_alrm) {
- if (((unsigned)t->time.tm_mday) <= 0x31)
- t->time.tm_mday = bcd2bin(t->time.tm_mday);
+ if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ if (((unsigned)t->time.tm_sec) < 0x60)
+ t->time.tm_sec = bcd2bin(t->time.tm_sec);
else
- t->time.tm_mday = -1;
- if (cmos->mon_alrm) {
- if (((unsigned)t->time.tm_mon) <= 0x12)
- t->time.tm_mon = bcd2bin(t->time.tm_mon) - 1;
+ t->time.tm_sec = -1;
+ if (((unsigned)t->time.tm_min) < 0x60)
+ t->time.tm_min = bcd2bin(t->time.tm_min);
+ else
+ t->time.tm_min = -1;
+ if (((unsigned)t->time.tm_hour) < 0x24)
+ t->time.tm_hour = bcd2bin(t->time.tm_hour);
+ else
+ t->time.tm_hour = -1;
+
+ if (cmos->day_alrm) {
+ if (((unsigned)t->time.tm_mday) <= 0x31)
+ t->time.tm_mday = bcd2bin(t->time.tm_mday);
else
- t->time.tm_mon = -1;
+ t->time.tm_mday = -1;
+
+ if (cmos->mon_alrm) {
+ if (((unsigned)t->time.tm_mon) <= 0x12)
+ t->time.tm_mon = bcd2bin(t->time.tm_mon)-1;
+ else
+ t->time.tm_mon = -1;
+ }
}
}
t->time.tm_year = -1;
@@ -322,29 +323,26 @@ static void cmos_irq_disable(struct cmos_rtc *cmos, unsigned char mask)
static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
- unsigned char mon, mday, hrs, min, sec;
+ unsigned char mon, mday, hrs, min, sec, rtc_control;
if (!is_valid_irq(cmos->irq))
return -EIO;
- /* REVISIT this assumes PC style usage: always BCD */
-
- /* Writing 0xff means "don't care" or "match all". */
-
mon = t->time.tm_mon + 1;
- mon = (mon <= 12) ? bin2bcd(mon) : 0xff;
-
mday = t->time.tm_mday;
- mday = (mday >= 1 && mday <= 31) ? bin2bcd(mday) : 0xff;
-
hrs = t->time.tm_hour;
- hrs = (hrs < 24) ? bin2bcd(hrs) : 0xff;
-
min = t->time.tm_min;
- min = (min < 60) ? bin2bcd(min) : 0xff;
-
sec = t->time.tm_sec;
- sec = (sec < 60) ? bin2bcd(sec) : 0xff;
+
+ rtc_control = CMOS_READ(RTC_CONTROL);
+ if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ /* Writing 0xff means "don't care" or "match all". */
+ mon = (mon <= 12) ? bin2bcd(mon) : 0xff;
+ mday = (mday >= 1 && mday <= 31) ? bin2bcd(mday) : 0xff;
+ hrs = (hrs < 24) ? bin2bcd(hrs) : 0xff;
+ min = (min < 60) ? bin2bcd(min) : 0xff;
+ sec = (sec < 60) ? bin2bcd(sec) : 0xff;
+ }
spin_lock_irq(&rtc_lock);
@@ -478,7 +476,7 @@ static int cmos_procfs(struct device *dev, struct seq_file *seq)
"update_IRQ\t: %s\n"
"HPET_emulated\t: %s\n"
// "square_wave\t: %s\n"
- // "BCD\t\t: %s\n"
+ "BCD\t\t: %s\n"
"DST_enable\t: %s\n"
"periodic_freq\t: %d\n"
"batt_status\t: %s\n",
@@ -486,7 +484,7 @@ static int cmos_procfs(struct device *dev, struct seq_file *seq)
(rtc_control & RTC_UIE) ? "yes" : "no",
is_hpet_enabled() ? "yes" : "no",
// (rtc_control & RTC_SQWE) ? "yes" : "no",
- // (rtc_control & RTC_DM_BINARY) ? "no" : "yes",
+ (rtc_control & RTC_DM_BINARY) ? "no" : "yes",
(rtc_control & RTC_DST_EN) ? "yes" : "no",
cmos->rtc->irq_freq,
(valid & RTC_VRT) ? "okay" : "dead");
@@ -519,7 +517,8 @@ static const struct rtc_class_ops cmos_rtc_ops = {
#define NVRAM_OFFSET (RTC_REG_D + 1)
static ssize_t
-cmos_nvram_read(struct kobject *kobj, struct bin_attribute *attr,
+cmos_nvram_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
int retval;
@@ -547,7 +546,8 @@ cmos_nvram_read(struct kobject *kobj, struct bin_attribute *attr,
}
static ssize_t
-cmos_nvram_write(struct kobject *kobj, struct bin_attribute *attr,
+cmos_nvram_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct cmos_rtc *cmos;
@@ -719,6 +719,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
}
}
+ cmos_rtc.dev = dev;
+ dev_set_drvdata(dev, &cmos_rtc);
+
cmos_rtc.rtc = rtc_device_register(driver_name, dev,
&cmos_rtc_ops, THIS_MODULE);
if (IS_ERR(cmos_rtc.rtc)) {
@@ -726,8 +729,6 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
goto cleanup0;
}
- cmos_rtc.dev = dev;
- dev_set_drvdata(dev, &cmos_rtc);
rename_region(ports, dev_name(&cmos_rtc.rtc->dev));
spin_lock_irq(&rtc_lock);
@@ -749,12 +750,11 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
spin_unlock_irq(&rtc_lock);
- /* FIXME teach the alarm code how to handle binary mode;
+ /* FIXME:
* <asm-generic/rtc.h> doesn't know 12-hour mode either.
*/
- if (is_valid_irq(rtc_irq) &&
- (!(rtc_control & RTC_24H) || (rtc_control & (RTC_DM_BINARY)))) {
- dev_dbg(dev, "only 24-hr BCD mode supported\n");
+ if (is_valid_irq(rtc_irq) && !(rtc_control & RTC_24H)) {
+ dev_warn(dev, "only 24-hr supported\n");
retval = -ENXIO;
goto cleanup1;
}
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c
new file mode 100644
index 00000000000..92a8f6cacda
--- /dev/null
+++ b/drivers/rtc/rtc-davinci.c
@@ -0,0 +1,673 @@
+/*
+ * DaVinci Power Management and Real Time Clock Driver for TI platforms
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc
+ *
+ * Author: Miguel Aguilar <miguel.aguilar@ridgerun.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+/*
+ * The DaVinci RTC is a simple RTC with the following
+ * Sec: 0 - 59 : BCD count
+ * Min: 0 - 59 : BCD count
+ * Hour: 0 - 23 : BCD count
+ * Day: 0 - 0x7FFF(32767) : Binary count ( Over 89 years )
+ */
+
+/* PRTC interface registers */
+#define DAVINCI_PRTCIF_PID 0x00
+#define PRTCIF_CTLR 0x04
+#define PRTCIF_LDATA 0x08
+#define PRTCIF_UDATA 0x0C
+#define PRTCIF_INTEN 0x10
+#define PRTCIF_INTFLG 0x14
+
+/* PRTCIF_CTLR bit fields */
+#define PRTCIF_CTLR_BUSY BIT(31)
+#define PRTCIF_CTLR_SIZE BIT(25)
+#define PRTCIF_CTLR_DIR BIT(24)
+#define PRTCIF_CTLR_BENU_MSB BIT(23)
+#define PRTCIF_CTLR_BENU_3RD_BYTE BIT(22)
+#define PRTCIF_CTLR_BENU_2ND_BYTE BIT(21)
+#define PRTCIF_CTLR_BENU_LSB BIT(20)
+#define PRTCIF_CTLR_BENU_MASK (0x00F00000)
+#define PRTCIF_CTLR_BENL_MSB BIT(19)
+#define PRTCIF_CTLR_BENL_3RD_BYTE BIT(18)
+#define PRTCIF_CTLR_BENL_2ND_BYTE BIT(17)
+#define PRTCIF_CTLR_BENL_LSB BIT(16)
+#define PRTCIF_CTLR_BENL_MASK (0x000F0000)
+
+/* PRTCIF_INTEN bit fields */
+#define PRTCIF_INTEN_RTCSS BIT(1)
+#define PRTCIF_INTEN_RTCIF BIT(0)
+#define PRTCIF_INTEN_MASK (PRTCIF_INTEN_RTCSS \
+ | PRTCIF_INTEN_RTCIF)
+
+/* PRTCIF_INTFLG bit fields */
+#define PRTCIF_INTFLG_RTCSS BIT(1)
+#define PRTCIF_INTFLG_RTCIF BIT(0)
+#define PRTCIF_INTFLG_MASK (PRTCIF_INTFLG_RTCSS \
+ | PRTCIF_INTFLG_RTCIF)
+
+/* PRTC subsystem registers */
+#define PRTCSS_RTC_INTC_EXTENA1 (0x0C)
+#define PRTCSS_RTC_CTRL (0x10)
+#define PRTCSS_RTC_WDT (0x11)
+#define PRTCSS_RTC_TMR0 (0x12)
+#define PRTCSS_RTC_TMR1 (0x13)
+#define PRTCSS_RTC_CCTRL (0x14)
+#define PRTCSS_RTC_SEC (0x15)
+#define PRTCSS_RTC_MIN (0x16)
+#define PRTCSS_RTC_HOUR (0x17)
+#define PRTCSS_RTC_DAY0 (0x18)
+#define PRTCSS_RTC_DAY1 (0x19)
+#define PRTCSS_RTC_AMIN (0x1A)
+#define PRTCSS_RTC_AHOUR (0x1B)
+#define PRTCSS_RTC_ADAY0 (0x1C)
+#define PRTCSS_RTC_ADAY1 (0x1D)
+#define PRTCSS_RTC_CLKC_CNT (0x20)
+
+/* PRTCSS_RTC_INTC_EXTENA1 */
+#define PRTCSS_RTC_INTC_EXTENA1_MASK (0x07)
+
+/* PRTCSS_RTC_CTRL bit fields */
+#define PRTCSS_RTC_CTRL_WDTBUS BIT(7)
+#define PRTCSS_RTC_CTRL_WEN BIT(6)
+#define PRTCSS_RTC_CTRL_WDRT BIT(5)
+#define PRTCSS_RTC_CTRL_WDTFLG BIT(4)
+#define PRTCSS_RTC_CTRL_TE BIT(3)
+#define PRTCSS_RTC_CTRL_TIEN BIT(2)
+#define PRTCSS_RTC_CTRL_TMRFLG BIT(1)
+#define PRTCSS_RTC_CTRL_TMMD BIT(0)
+
+/* PRTCSS_RTC_CCTRL bit fields */
+#define PRTCSS_RTC_CCTRL_CALBUSY BIT(7)
+#define PRTCSS_RTC_CCTRL_DAEN BIT(5)
+#define PRTCSS_RTC_CCTRL_HAEN BIT(4)
+#define PRTCSS_RTC_CCTRL_MAEN BIT(3)
+#define PRTCSS_RTC_CCTRL_ALMFLG BIT(2)
+#define PRTCSS_RTC_CCTRL_AIEN BIT(1)
+#define PRTCSS_RTC_CCTRL_CAEN BIT(0)
+
+static DEFINE_SPINLOCK(davinci_rtc_lock);
+
+struct davinci_rtc {
+ struct rtc_device *rtc;
+ void __iomem *base;
+ resource_size_t pbase;
+ size_t base_size;
+ int irq;
+};
+
+static inline void rtcif_write(struct davinci_rtc *davinci_rtc,
+ u32 val, u32 addr)
+{
+ writel(val, davinci_rtc->base + addr);
+}
+
+static inline u32 rtcif_read(struct davinci_rtc *davinci_rtc, u32 addr)
+{
+ return readl(davinci_rtc->base + addr);
+}
+
+static inline void rtcif_wait(struct davinci_rtc *davinci_rtc)
+{
+ while (rtcif_read(davinci_rtc, PRTCIF_CTLR) & PRTCIF_CTLR_BUSY)
+ cpu_relax();
+}
+
+static inline void rtcss_write(struct davinci_rtc *davinci_rtc,
+ unsigned long val, u8 addr)
+{
+ rtcif_wait(davinci_rtc);
+
+ rtcif_write(davinci_rtc, PRTCIF_CTLR_BENL_LSB | addr, PRTCIF_CTLR);
+ rtcif_write(davinci_rtc, val, PRTCIF_LDATA);
+
+ rtcif_wait(davinci_rtc);
+}
+
+static inline u8 rtcss_read(struct davinci_rtc *davinci_rtc, u8 addr)
+{
+ rtcif_wait(davinci_rtc);
+
+ rtcif_write(davinci_rtc, PRTCIF_CTLR_DIR | PRTCIF_CTLR_BENL_LSB | addr,
+ PRTCIF_CTLR);
+
+ rtcif_wait(davinci_rtc);
+
+ return rtcif_read(davinci_rtc, PRTCIF_LDATA);
+}
+
+static inline void davinci_rtcss_calendar_wait(struct davinci_rtc *davinci_rtc)
+{
+ while (rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL) &
+ PRTCSS_RTC_CCTRL_CALBUSY)
+ cpu_relax();
+}
+
+static irqreturn_t davinci_rtc_interrupt(int irq, void *class_dev)
+{
+ struct davinci_rtc *davinci_rtc = class_dev;
+ unsigned long events = 0;
+ u32 irq_flg;
+ u8 alm_irq, tmr_irq;
+ u8 rtc_ctrl, rtc_cctrl;
+ int ret = IRQ_NONE;
+
+ irq_flg = rtcif_read(davinci_rtc, PRTCIF_INTFLG) &
+ PRTCIF_INTFLG_RTCSS;
+
+ alm_irq = rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL) &
+ PRTCSS_RTC_CCTRL_ALMFLG;
+
+ tmr_irq = rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL) &
+ PRTCSS_RTC_CTRL_TMRFLG;
+
+ if (irq_flg) {
+ if (alm_irq) {
+ events |= RTC_IRQF | RTC_AF;
+ rtc_cctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL);
+ rtc_cctrl |= PRTCSS_RTC_CCTRL_ALMFLG;
+ rtcss_write(davinci_rtc, rtc_cctrl, PRTCSS_RTC_CCTRL);
+ } else if (tmr_irq) {
+ events |= RTC_IRQF | RTC_PF;
+ rtc_ctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL);
+ rtc_ctrl |= PRTCSS_RTC_CTRL_TMRFLG;
+ rtcss_write(davinci_rtc, rtc_ctrl, PRTCSS_RTC_CTRL);
+ }
+
+ rtcif_write(davinci_rtc, PRTCIF_INTFLG_RTCSS,
+ PRTCIF_INTFLG);
+ rtc_update_irq(davinci_rtc->rtc, 1, events);
+
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static int
+davinci_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+ struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
+ u8 rtc_ctrl;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&davinci_rtc_lock, flags);
+
+ rtc_ctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL);
+
+ switch (cmd) {
+ case RTC_WIE_ON:
+ rtc_ctrl |= PRTCSS_RTC_CTRL_WEN | PRTCSS_RTC_CTRL_WDTFLG;
+ break;
+ case RTC_WIE_OFF:
+ rtc_ctrl &= ~PRTCSS_RTC_CTRL_WEN;
+ break;
+ case RTC_UIE_OFF:
+ case RTC_UIE_ON:
+ ret = -ENOTTY;
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ }
+
+ rtcss_write(davinci_rtc, rtc_ctrl, PRTCSS_RTC_CTRL);
+
+ spin_unlock_irqrestore(&davinci_rtc_lock, flags);
+
+ return ret;
+}
+
+static int convertfromdays(u16 days, struct rtc_time *tm)
+{
+ int tmp_days, year, mon;
+
+ for (year = 2000;; year++) {
+ tmp_days = rtc_year_days(1, 12, year);
+ if (days >= tmp_days)
+ days -= tmp_days;
+ else {
+ for (mon = 0;; mon++) {
+ tmp_days = rtc_month_days(mon, year);
+ if (days >= tmp_days) {
+ days -= tmp_days;
+ } else {
+ tm->tm_year = year - 1900;
+ tm->tm_mon = mon;
+ tm->tm_mday = days + 1;
+ break;
+ }
+ }
+ break;
+ }
+ }
+ return 0;
+}
+
+static int convert2days(u16 *days, struct rtc_time *tm)
+{
+ int i;
+ *days = 0;
+
+ /* epoch == 1900 */
+ if (tm->tm_year < 100 || tm->tm_year > 199)
+ return -EINVAL;
+
+ for (i = 2000; i < 1900 + tm->tm_year; i++)
+ *days += rtc_year_days(1, 12, i);
+
+ *days += rtc_year_days(tm->tm_mday, tm->tm_mon, 1900 + tm->tm_year);
+
+ return 0;
+}
+
+static int davinci_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
+ u16 days = 0;
+ u8 day0, day1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&davinci_rtc_lock, flags);
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ tm->tm_sec = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_SEC));
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ tm->tm_min = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_MIN));
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ tm->tm_hour = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_HOUR));
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ day0 = rtcss_read(davinci_rtc, PRTCSS_RTC_DAY0);
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ day1 = rtcss_read(davinci_rtc, PRTCSS_RTC_DAY1);
+
+ spin_unlock_irqrestore(&davinci_rtc_lock, flags);
+
+ days |= day1;
+ days <<= 8;
+ days |= day0;
+
+ if (convertfromdays(days, tm) < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int davinci_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
+ u16 days;
+ u8 rtc_cctrl;
+ unsigned long flags;
+
+ if (convert2days(&days, tm) < 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&davinci_rtc_lock, flags);
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ rtcss_write(davinci_rtc, bin2bcd(tm->tm_sec), PRTCSS_RTC_SEC);
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ rtcss_write(davinci_rtc, bin2bcd(tm->tm_min), PRTCSS_RTC_MIN);
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ rtcss_write(davinci_rtc, bin2bcd(tm->tm_hour), PRTCSS_RTC_HOUR);
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ rtcss_write(davinci_rtc, days & 0xFF, PRTCSS_RTC_DAY0);
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ rtcss_write(davinci_rtc, (days & 0xFF00) >> 8, PRTCSS_RTC_DAY1);
+
+ rtc_cctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL);
+ rtc_cctrl |= PRTCSS_RTC_CCTRL_CAEN;
+ rtcss_write(davinci_rtc, rtc_cctrl, PRTCSS_RTC_CCTRL);
+
+ spin_unlock_irqrestore(&davinci_rtc_lock, flags);
+
+ return 0;
+}
+
+static int davinci_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
+ unsigned long flags;
+ u8 rtc_cctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL);
+
+ spin_lock_irqsave(&davinci_rtc_lock, flags);
+
+ if (enabled)
+ rtc_cctrl |= PRTCSS_RTC_CCTRL_DAEN |
+ PRTCSS_RTC_CCTRL_HAEN |
+ PRTCSS_RTC_CCTRL_MAEN |
+ PRTCSS_RTC_CCTRL_ALMFLG |
+ PRTCSS_RTC_CCTRL_AIEN;
+ else
+ rtc_cctrl &= ~PRTCSS_RTC_CCTRL_AIEN;
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ rtcss_write(davinci_rtc, rtc_cctrl, PRTCSS_RTC_CCTRL);
+
+ spin_unlock_irqrestore(&davinci_rtc_lock, flags);
+
+ return 0;
+}
+
+static int davinci_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+ struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
+ u16 days = 0;
+ u8 day0, day1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&davinci_rtc_lock, flags);
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ alm->time.tm_min = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_AMIN));
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ alm->time.tm_hour = bcd2bin(rtcss_read(davinci_rtc, PRTCSS_RTC_AHOUR));
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ day0 = rtcss_read(davinci_rtc, PRTCSS_RTC_ADAY0);
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ day1 = rtcss_read(davinci_rtc, PRTCSS_RTC_ADAY1);
+
+ spin_unlock_irqrestore(&davinci_rtc_lock, flags);
+ days |= day1;
+ days <<= 8;
+ days |= day0;
+
+ if (convertfromdays(days, &alm->time) < 0)
+ return -EINVAL;
+
+ alm->pending = !!(rtcss_read(davinci_rtc,
+ PRTCSS_RTC_CCTRL) &
+ PRTCSS_RTC_CCTRL_AIEN);
+ alm->enabled = alm->pending && device_may_wakeup(dev);
+
+ return 0;
+}
+
+static int davinci_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+ struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
+ unsigned long flags;
+ u16 days;
+
+ if (alm->time.tm_mday <= 0 && alm->time.tm_mon < 0
+ && alm->time.tm_year < 0) {
+ struct rtc_time tm;
+ unsigned long now, then;
+
+ davinci_rtc_read_time(dev, &tm);
+ rtc_tm_to_time(&tm, &now);
+
+ alm->time.tm_mday = tm.tm_mday;
+ alm->time.tm_mon = tm.tm_mon;
+ alm->time.tm_year = tm.tm_year;
+ rtc_tm_to_time(&alm->time, &then);
+
+ if (then < now) {
+ rtc_time_to_tm(now + 24 * 60 * 60, &tm);
+ alm->time.tm_mday = tm.tm_mday;
+ alm->time.tm_mon = tm.tm_mon;
+ alm->time.tm_year = tm.tm_year;
+ }
+ }
+
+ if (convert2days(&days, &alm->time) < 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&davinci_rtc_lock, flags);
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ rtcss_write(davinci_rtc, bin2bcd(alm->time.tm_min), PRTCSS_RTC_AMIN);
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ rtcss_write(davinci_rtc, bin2bcd(alm->time.tm_hour), PRTCSS_RTC_AHOUR);
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ rtcss_write(davinci_rtc, days & 0xFF, PRTCSS_RTC_ADAY0);
+
+ davinci_rtcss_calendar_wait(davinci_rtc);
+ rtcss_write(davinci_rtc, (days & 0xFF00) >> 8, PRTCSS_RTC_ADAY1);
+
+ spin_unlock_irqrestore(&davinci_rtc_lock, flags);
+
+ return 0;
+}
+
+static int davinci_rtc_irq_set_state(struct device *dev, int enabled)
+{
+ struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
+ unsigned long flags;
+ u8 rtc_ctrl;
+
+ spin_lock_irqsave(&davinci_rtc_lock, flags);
+
+ rtc_ctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL);
+
+ if (enabled) {
+ while (rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL)
+ & PRTCSS_RTC_CTRL_WDTBUS)
+ cpu_relax();
+
+ rtc_ctrl |= PRTCSS_RTC_CTRL_TE;
+ rtcss_write(davinci_rtc, rtc_ctrl, PRTCSS_RTC_CTRL);
+
+ rtcss_write(davinci_rtc, 0x0, PRTCSS_RTC_CLKC_CNT);
+
+ rtc_ctrl |= PRTCSS_RTC_CTRL_TIEN |
+ PRTCSS_RTC_CTRL_TMMD |
+ PRTCSS_RTC_CTRL_TMRFLG;
+ } else
+ rtc_ctrl &= ~PRTCSS_RTC_CTRL_TIEN;
+
+ rtcss_write(davinci_rtc, rtc_ctrl, PRTCSS_RTC_CTRL);
+
+ spin_unlock_irqrestore(&davinci_rtc_lock, flags);
+
+ return 0;
+}
+
+static int davinci_rtc_irq_set_freq(struct device *dev, int freq)
+{
+ struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
+ unsigned long flags;
+ u16 tmr_counter = (0x8000 >> (ffs(freq) - 1));
+
+ spin_lock_irqsave(&davinci_rtc_lock, flags);
+
+ rtcss_write(davinci_rtc, tmr_counter & 0xFF, PRTCSS_RTC_TMR0);
+ rtcss_write(davinci_rtc, (tmr_counter & 0xFF00) >> 8, PRTCSS_RTC_TMR1);
+
+ spin_unlock_irqrestore(&davinci_rtc_lock, flags);
+
+ return 0;
+}
+
+static struct rtc_class_ops davinci_rtc_ops = {
+ .ioctl = davinci_rtc_ioctl,
+ .read_time = davinci_rtc_read_time,
+ .set_time = davinci_rtc_set_time,
+ .alarm_irq_enable = davinci_rtc_alarm_irq_enable,
+ .read_alarm = davinci_rtc_read_alarm,
+ .set_alarm = davinci_rtc_set_alarm,
+ .irq_set_state = davinci_rtc_irq_set_state,
+ .irq_set_freq = davinci_rtc_irq_set_freq,
+};
+
+static int __init davinci_rtc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct davinci_rtc *davinci_rtc;
+ struct resource *res, *mem;
+ int ret = 0;
+
+ davinci_rtc = kzalloc(sizeof(struct davinci_rtc), GFP_KERNEL);
+ if (!davinci_rtc) {
+ dev_dbg(dev, "could not allocate memory for private data\n");
+ return -ENOMEM;
+ }
+
+ davinci_rtc->irq = platform_get_irq(pdev, 0);
+ if (davinci_rtc->irq < 0) {
+ dev_err(dev, "no RTC irq\n");
+ ret = davinci_rtc->irq;
+ goto fail1;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "no mem resource\n");
+ ret = -EINVAL;
+ goto fail1;
+ }
+
+ davinci_rtc->pbase = res->start;
+ davinci_rtc->base_size = resource_size(res);
+
+ mem = request_mem_region(davinci_rtc->pbase, davinci_rtc->base_size,
+ pdev->name);
+ if (!mem) {
+ dev_err(dev, "RTC registers at %08x are not free\n",
+ davinci_rtc->pbase);
+ ret = -EBUSY;
+ goto fail1;
+ }
+
+ davinci_rtc->base = ioremap(davinci_rtc->pbase, davinci_rtc->base_size);
+ if (!davinci_rtc->base) {
+ dev_err(dev, "unable to ioremap MEM resource\n");
+ ret = -ENOMEM;
+ goto fail2;
+ }
+
+ davinci_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
+ &davinci_rtc_ops, THIS_MODULE);
+ if (IS_ERR(davinci_rtc->rtc)) {
+ dev_err(dev, "unable to register RTC device, err %ld\n",
+ PTR_ERR(davinci_rtc->rtc));
+ goto fail3;
+ }
+
+ rtcif_write(davinci_rtc, PRTCIF_INTFLG_RTCSS, PRTCIF_INTFLG);
+ rtcif_write(davinci_rtc, 0, PRTCIF_INTEN);
+ rtcss_write(davinci_rtc, 0, PRTCSS_RTC_INTC_EXTENA1);
+
+ rtcss_write(davinci_rtc, 0, PRTCSS_RTC_CTRL);
+ rtcss_write(davinci_rtc, 0, PRTCSS_RTC_CCTRL);
+
+ ret = request_irq(davinci_rtc->irq, davinci_rtc_interrupt,
+ IRQF_DISABLED, "davinci_rtc", davinci_rtc);
+ if (ret < 0) {
+ dev_err(dev, "unable to register davinci RTC interrupt\n");
+ goto fail4;
+ }
+
+ /* Enable interrupts */
+ rtcif_write(davinci_rtc, PRTCIF_INTEN_RTCSS, PRTCIF_INTEN);
+ rtcss_write(davinci_rtc, PRTCSS_RTC_INTC_EXTENA1_MASK,
+ PRTCSS_RTC_INTC_EXTENA1);
+
+ rtcss_write(davinci_rtc, PRTCSS_RTC_CCTRL_CAEN, PRTCSS_RTC_CCTRL);
+
+ platform_set_drvdata(pdev, davinci_rtc);
+
+ device_init_wakeup(&pdev->dev, 0);
+
+ return 0;
+
+fail4:
+ rtc_device_unregister(davinci_rtc->rtc);
+fail3:
+ iounmap(davinci_rtc->base);
+fail2:
+ release_mem_region(davinci_rtc->pbase, davinci_rtc->base_size);
+fail1:
+ kfree(davinci_rtc);
+
+ return ret;
+}
+
+static int __devexit davinci_rtc_remove(struct platform_device *pdev)
+{
+ struct davinci_rtc *davinci_rtc = platform_get_drvdata(pdev);
+
+ device_init_wakeup(&pdev->dev, 0);
+
+ rtcif_write(davinci_rtc, 0, PRTCIF_INTEN);
+
+ free_irq(davinci_rtc->irq, davinci_rtc);
+
+ rtc_device_unregister(davinci_rtc->rtc);
+
+ iounmap(davinci_rtc->base);
+ release_mem_region(davinci_rtc->pbase, davinci_rtc->base_size);
+
+ platform_set_drvdata(pdev, NULL);
+
+ kfree(davinci_rtc);
+
+ return 0;
+}
+
+static struct platform_driver davinci_rtc_driver = {
+ .probe = davinci_rtc_probe,
+ .remove = __devexit_p(davinci_rtc_remove),
+ .driver = {
+ .name = "rtc_davinci",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init rtc_init(void)
+{
+ return platform_driver_probe(&davinci_rtc_driver, davinci_rtc_probe);
+}
+module_init(rtc_init);
+
+static void __exit rtc_exit(void)
+{
+ platform_driver_unregister(&davinci_rtc_driver);
+}
+module_exit(rtc_exit);
+
+MODULE_AUTHOR("Miguel Aguilar <miguel.aguilar@ridgerun.com>");
+MODULE_DESCRIPTION("Texas Instruments DaVinci PRTC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c
index 532acf9b05d..359d1e04626 100644
--- a/drivers/rtc/rtc-ds1302.c
+++ b/drivers/rtc/rtc-ds1302.c
@@ -16,7 +16,6 @@
#include <linux/rtc.h>
#include <linux/io.h>
#include <linux/bcd.h>
-#include <asm/rtc.h>
#define DRV_NAME "rtc-ds1302"
#define DRV_VERSION "0.1.1"
@@ -34,14 +33,55 @@
#define RTC_ADDR_MIN 0x01 /* Address of minute register */
#define RTC_ADDR_SEC 0x00 /* Address of second register */
+#ifdef CONFIG_SH_SECUREEDGE5410
+#include <asm/rtc.h>
+#include <mach/snapgear.h>
+
#define RTC_RESET 0x1000
#define RTC_IODATA 0x0800
#define RTC_SCLK 0x0400
-#ifdef CONFIG_SH_SECUREEDGE5410
-#include <mach/snapgear.h>
#define set_dp(x) SECUREEDGE_WRITE_IOPORT(x, 0x1c00)
#define get_dp() SECUREEDGE_READ_IOPORT()
+#define ds1302_set_tx()
+#define ds1302_set_rx()
+
+static inline int ds1302_hw_init(void)
+{
+ return 0;
+}
+
+static inline void ds1302_reset(void)
+{
+ set_dp(get_dp() & ~(RTC_RESET | RTC_IODATA | RTC_SCLK));
+}
+
+static inline void ds1302_clock(void)
+{
+ set_dp(get_dp() | RTC_SCLK); /* clock high */
+ set_dp(get_dp() & ~RTC_SCLK); /* clock low */
+}
+
+static inline void ds1302_start(void)
+{
+ set_dp(get_dp() | RTC_RESET);
+}
+
+static inline void ds1302_stop(void)
+{
+ set_dp(get_dp() & ~RTC_RESET);
+}
+
+static inline void ds1302_txbit(int bit)
+{
+ set_dp((get_dp() & ~RTC_IODATA) | (bit ? RTC_IODATA : 0));
+}
+
+static inline int ds1302_rxbit(void)
+{
+ return !!(get_dp() & RTC_IODATA);
+}
+
#else
#error "Add support for your platform"
#endif
@@ -50,11 +90,11 @@ static void ds1302_sendbits(unsigned int val)
{
int i;
+ ds1302_set_tx();
+
for (i = 8; (i); i--, val >>= 1) {
- set_dp((get_dp() & ~RTC_IODATA) | ((val & 0x1) ?
- RTC_IODATA : 0));
- set_dp(get_dp() | RTC_SCLK); /* clock high */
- set_dp(get_dp() & ~RTC_SCLK); /* clock low */
+ ds1302_txbit(val & 0x1);
+ ds1302_clock();
}
}
@@ -63,10 +103,11 @@ static unsigned int ds1302_recvbits(void)
unsigned int val;
int i;
+ ds1302_set_rx();
+
for (i = 0, val = 0; (i < 8); i++) {
- val |= (((get_dp() & RTC_IODATA) ? 1 : 0) << i);
- set_dp(get_dp() | RTC_SCLK); /* clock high */
- set_dp(get_dp() & ~RTC_SCLK); /* clock low */
+ val |= (ds1302_rxbit() << i);
+ ds1302_clock();
}
return val;
@@ -76,23 +117,24 @@ static unsigned int ds1302_readbyte(unsigned int addr)
{
unsigned int val;
- set_dp(get_dp() & ~(RTC_RESET | RTC_IODATA | RTC_SCLK));
+ ds1302_reset();
- set_dp(get_dp() | RTC_RESET);
+ ds1302_start();
ds1302_sendbits(((addr & 0x3f) << 1) | RTC_CMD_READ);
val = ds1302_recvbits();
- set_dp(get_dp() & ~RTC_RESET);
+ ds1302_stop();
return val;
}
static void ds1302_writebyte(unsigned int addr, unsigned int val)
{
- set_dp(get_dp() & ~(RTC_RESET | RTC_IODATA | RTC_SCLK));
- set_dp(get_dp() | RTC_RESET);
+ ds1302_reset();
+
+ ds1302_start();
ds1302_sendbits(((addr & 0x3f) << 1) | RTC_CMD_WRITE);
ds1302_sendbits(val);
- set_dp(get_dp() & ~RTC_RESET);
+ ds1302_stop();
}
static int ds1302_rtc_read_time(struct device *dev, struct rtc_time *tm)
@@ -167,13 +209,20 @@ static int __init ds1302_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
+ if (ds1302_hw_init()) {
+ dev_err(&pdev->dev, "Failed to init communication channel");
+ return -EINVAL;
+ }
+
/* Reset */
- set_dp(get_dp() & ~(RTC_RESET | RTC_IODATA | RTC_SCLK));
+ ds1302_reset();
/* Write a magic value to the DS1302 RAM, and see if it sticks. */
ds1302_writebyte(RTC_ADDR_RAM0, 0x42);
- if (ds1302_readbyte(RTC_ADDR_RAM0) != 0x42)
+ if (ds1302_readbyte(RTC_ADDR_RAM0) != 0x42) {
+ dev_err(&pdev->dev, "Failed to probe");
return -ENODEV;
+ }
rtc = rtc_device_register("ds1302", &pdev->dev,
&ds1302_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 7836c9cec55..48da85e97ca 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -542,7 +542,8 @@ static void msg_init(struct spi_message *m, struct spi_transfer *x,
}
static ssize_t
-ds1305_nvram_read(struct kobject *kobj, struct bin_attribute *attr,
+ds1305_nvram_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct spi_device *spi;
@@ -572,7 +573,8 @@ ds1305_nvram_read(struct kobject *kobj, struct bin_attribute *attr,
}
static ssize_t
-ds1305_nvram_write(struct kobject *kobj, struct bin_attribute *attr,
+ds1305_nvram_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct spi_device *spi;
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index c4ec5c158aa..de033b7ac21 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -556,7 +556,8 @@ static const struct rtc_class_ops ds13xx_rtc_ops = {
#define NVRAM_SIZE 56
static ssize_t
-ds1307_nvram_read(struct kobject *kobj, struct bin_attribute *attr,
+ds1307_nvram_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client;
@@ -580,7 +581,8 @@ ds1307_nvram_read(struct kobject *kobj, struct bin_attribute *attr,
}
static ssize_t
-ds1307_nvram_write(struct kobject *kobj, struct bin_attribute *attr,
+ds1307_nvram_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client;
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 06b8566c453..37268e97de4 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -423,8 +423,9 @@ static const struct rtc_class_ops ds1511_rtc_ops = {
};
static ssize_t
-ds1511_nvram_read(struct kobject *kobj, struct bin_attribute *ba,
- char *buf, loff_t pos, size_t size)
+ds1511_nvram_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *ba,
+ char *buf, loff_t pos, size_t size)
{
ssize_t count;
@@ -452,8 +453,9 @@ ds1511_nvram_read(struct kobject *kobj, struct bin_attribute *ba,
}
static ssize_t
-ds1511_nvram_write(struct kobject *kobj, struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t size)
+ds1511_nvram_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t size)
{
ssize_t count;
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index 244f9994bcb..ff432e2ca27 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -252,7 +252,7 @@ static const struct rtc_class_ops ds1553_rtc_ops = {
.update_irq_enable = ds1553_rtc_update_irq_enable,
};
-static ssize_t ds1553_nvram_read(struct kobject *kobj,
+static ssize_t ds1553_nvram_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
@@ -267,7 +267,7 @@ static ssize_t ds1553_nvram_read(struct kobject *kobj,
return count;
}
-static ssize_t ds1553_nvram_write(struct kobject *kobj,
+static ssize_t ds1553_nvram_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index 2b4b0bc42d6..042630c90dd 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -128,7 +128,7 @@ static const struct rtc_class_ops ds1742_rtc_ops = {
.set_time = ds1742_rtc_set_time,
};
-static ssize_t ds1742_nvram_read(struct kobject *kobj,
+static ssize_t ds1742_nvram_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
@@ -143,7 +143,7 @@ static ssize_t ds1742_nvram_read(struct kobject *kobj,
return count;
}
-static ssize_t ds1742_nvram_write(struct kobject *kobj,
+static ssize_t ds1742_nvram_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index 054e05294af..468200c38ec 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -462,39 +462,16 @@ isl1208_sysfs_store_usr(struct device *dev,
static DEVICE_ATTR(usr, S_IRUGO | S_IWUSR, isl1208_sysfs_show_usr,
isl1208_sysfs_store_usr);
-static int
-isl1208_sysfs_register(struct device *dev)
-{
- int err;
-
- err = device_create_file(dev, &dev_attr_atrim);
- if (err)
- return err;
-
- err = device_create_file(dev, &dev_attr_dtrim);
- if (err) {
- device_remove_file(dev, &dev_attr_atrim);
- return err;
- }
-
- err = device_create_file(dev, &dev_attr_usr);
- if (err) {
- device_remove_file(dev, &dev_attr_atrim);
- device_remove_file(dev, &dev_attr_dtrim);
- }
-
- return 0;
-}
-
-static int
-isl1208_sysfs_unregister(struct device *dev)
-{
- device_remove_file(dev, &dev_attr_dtrim);
- device_remove_file(dev, &dev_attr_atrim);
- device_remove_file(dev, &dev_attr_usr);
+static struct attribute *isl1208_rtc_attrs[] = {
+ &dev_attr_atrim.attr,
+ &dev_attr_dtrim.attr,
+ &dev_attr_usr.attr,
+ NULL
+};
- return 0;
-}
+static const struct attribute_group isl1208_rtc_sysfs_files = {
+ .attrs = isl1208_rtc_attrs,
+};
static int
isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id)
@@ -529,7 +506,7 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id)
dev_warn(&client->dev, "rtc power failure detected, "
"please set clock.\n");
- rc = isl1208_sysfs_register(&client->dev);
+ rc = sysfs_create_group(&client->dev.kobj, &isl1208_rtc_sysfs_files);
if (rc)
goto exit_unregister;
@@ -546,7 +523,7 @@ isl1208_remove(struct i2c_client *client)
{
struct rtc_device *rtc = i2c_get_clientdata(client);
- isl1208_sysfs_unregister(&client->dev);
+ sysfs_remove_group(&client->dev.kobj, &isl1208_rtc_sysfs_files);
rtc_device_unregister(rtc);
return 0;
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 60fe266f0f4..6dc4e624141 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -595,10 +595,6 @@ static void wdt_disable(void)
static ssize_t wdt_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- /* Can't seek (pwrite) on this device
- if (ppos != &file->f_pos)
- return -ESPIPE;
- */
if (count) {
wdt_ping();
return 1;
@@ -623,7 +619,7 @@ static ssize_t wdt_read(struct file *file, char __user *buf,
* according to their available features. We only actually usefully support
* querying capabilities and current status.
*/
-static int wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+static int wdt_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int new_margin, rv;
@@ -676,6 +672,18 @@ static int wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
return -ENOTTY;
}
+static long wdt_unlocked_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = wdt_ioctl(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
/**
* wdt_open:
* @inode: inode of device
@@ -695,7 +703,7 @@ static int wdt_open(struct inode *inode, struct file *file)
*/
wdt_is_open = 1;
unlock_kernel();
- return 0;
+ return nonseekable_open(inode, file);
}
return -ENODEV;
}
@@ -736,7 +744,7 @@ static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
static const struct file_operations wdt_fops = {
.owner = THIS_MODULE,
.read = wdt_read,
- .ioctl = wdt_ioctl,
+ .unlocked_ioctl = wdt_unlocked_ioctl,
.write = wdt_write,
.open = wdt_open,
.release = wdt_release,
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index 365ff3ac234..be8359fdb65 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -343,7 +343,7 @@ static const struct rtc_class_ops m48t02_rtc_ops = {
.set_time = m48t59_rtc_set_time,
};
-static ssize_t m48t59_nvram_read(struct kobject *kobj,
+static ssize_t m48t59_nvram_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
@@ -363,7 +363,7 @@ static ssize_t m48t59_nvram_read(struct kobject *kobj,
return cnt;
}
-static ssize_t m48t59_nvram_write(struct kobject *kobj,
+static ssize_t m48t59_nvram_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index d71fe61db1d..25ec921db07 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -379,7 +379,6 @@ static struct rtc_class_ops mxc_rtc_ops = {
static int __init mxc_rtc_probe(struct platform_device *pdev)
{
- struct clk *clk;
struct resource *res;
struct rtc_device *rtc;
struct rtc_plat_data *pdata = NULL;
@@ -402,14 +401,15 @@ static int __init mxc_rtc_probe(struct platform_device *pdev)
pdata->ioaddr = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
- clk = clk_get(&pdev->dev, "ckil");
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
+ pdata->clk = clk_get(&pdev->dev, "rtc");
+ if (IS_ERR(pdata->clk)) {
+ dev_err(&pdev->dev, "unable to get clock!\n");
+ ret = PTR_ERR(pdata->clk);
goto exit_free_pdata;
}
- rate = clk_get_rate(clk);
- clk_put(clk);
+ clk_enable(pdata->clk);
+ rate = clk_get_rate(pdata->clk);
if (rate == 32768)
reg = RTC_INPUT_CLK_32768HZ;
@@ -420,7 +420,7 @@ static int __init mxc_rtc_probe(struct platform_device *pdev)
else {
dev_err(&pdev->dev, "rtc clock is not valid (%lu)\n", rate);
ret = -EINVAL;
- goto exit_free_pdata;
+ goto exit_put_clk;
}
reg |= RTC_ENABLE_BIT;
@@ -428,18 +428,9 @@ static int __init mxc_rtc_probe(struct platform_device *pdev)
if (((readw(pdata->ioaddr + RTC_RTCCTL)) & RTC_ENABLE_BIT) == 0) {
dev_err(&pdev->dev, "hardware module can't be enabled!\n");
ret = -EIO;
- goto exit_free_pdata;
- }
-
- pdata->clk = clk_get(&pdev->dev, "rtc");
- if (IS_ERR(pdata->clk)) {
- dev_err(&pdev->dev, "unable to get clock!\n");
- ret = PTR_ERR(pdata->clk);
- goto exit_free_pdata;
+ goto exit_put_clk;
}
- clk_enable(pdata->clk);
-
rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops,
THIS_MODULE);
if (IS_ERR(rtc)) {
diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c
index c9522f3bc21..9718aaaa821 100644
--- a/drivers/rtc/rtc-rx8581.c
+++ b/drivers/rtc/rtc-rx8581.c
@@ -1,8 +1,8 @@
/*
* An I2C driver for the Epson RX8581 RTC
*
- * Author: Martyn Welch <martyn.welch@gefanuc.com>
- * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc.
+ * Author: Martyn Welch <martyn.welch@ge.com>
+ * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -272,7 +272,7 @@ static void __exit rx8581_exit(void)
i2c_del_driver(&rx8581_driver);
}
-MODULE_AUTHOR("Martyn Welch <martyn.welch@gefanuc.com>");
+MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com>");
MODULE_DESCRIPTION("Epson RX-8581 RTC driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 4969b6059c8..e5972b2c17b 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -29,6 +29,11 @@
#include <asm/irq.h>
#include <plat/regs-rtc.h>
+enum s3c_cpu_type {
+ TYPE_S3C2410,
+ TYPE_S3C64XX,
+};
+
/* I have yet to find an S3C implementation with more than one
* of these rtc blocks in */
@@ -37,6 +42,7 @@ static struct resource *s3c_rtc_mem;
static void __iomem *s3c_rtc_base;
static int s3c_rtc_alarmno = NO_IRQ;
static int s3c_rtc_tickno = NO_IRQ;
+static enum s3c_cpu_type s3c_rtc_cpu_type;
static DEFINE_SPINLOCK(s3c_rtc_pie_lock);
@@ -80,12 +86,25 @@ static int s3c_rtc_setpie(struct device *dev, int enabled)
pr_debug("%s: pie=%d\n", __func__, enabled);
spin_lock_irq(&s3c_rtc_pie_lock);
- tmp = readb(s3c_rtc_base + S3C2410_TICNT) & ~S3C2410_TICNT_ENABLE;
- if (enabled)
- tmp |= S3C2410_TICNT_ENABLE;
+ if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
+ tmp = readb(s3c_rtc_base + S3C2410_RTCCON);
+ tmp &= ~S3C64XX_RTCCON_TICEN;
+
+ if (enabled)
+ tmp |= S3C64XX_RTCCON_TICEN;
+
+ writeb(tmp, s3c_rtc_base + S3C2410_RTCCON);
+ } else {
+ tmp = readb(s3c_rtc_base + S3C2410_TICNT);
+ tmp &= ~S3C2410_TICNT_ENABLE;
+
+ if (enabled)
+ tmp |= S3C2410_TICNT_ENABLE;
+
+ writeb(tmp, s3c_rtc_base + S3C2410_TICNT);
+ }
- writeb(tmp, s3c_rtc_base + S3C2410_TICNT);
spin_unlock_irq(&s3c_rtc_pie_lock);
return 0;
@@ -93,15 +112,21 @@ static int s3c_rtc_setpie(struct device *dev, int enabled)
static int s3c_rtc_setfreq(struct device *dev, int freq)
{
- unsigned int tmp;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_device *rtc_dev = platform_get_drvdata(pdev);
+ unsigned int tmp = 0;
if (!is_power_of_2(freq))
return -EINVAL;
spin_lock_irq(&s3c_rtc_pie_lock);
- tmp = readb(s3c_rtc_base + S3C2410_TICNT) & S3C2410_TICNT_ENABLE;
- tmp |= (128 / freq)-1;
+ if (s3c_rtc_cpu_type == TYPE_S3C2410) {
+ tmp = readb(s3c_rtc_base + S3C2410_TICNT);
+ tmp &= S3C2410_TICNT_ENABLE;
+ }
+
+ tmp |= (rtc_dev->max_user_freq / freq)-1;
writeb(tmp, s3c_rtc_base + S3C2410_TICNT);
spin_unlock_irq(&s3c_rtc_pie_lock);
@@ -283,10 +308,17 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
{
- unsigned int ticnt = readb(s3c_rtc_base + S3C2410_TICNT);
+ unsigned int ticnt;
- seq_printf(seq, "periodic_IRQ\t: %s\n",
- (ticnt & S3C2410_TICNT_ENABLE) ? "yes" : "no" );
+ if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
+ ticnt = readb(s3c_rtc_base + S3C2410_RTCCON);
+ ticnt &= S3C64XX_RTCCON_TICEN;
+ } else {
+ ticnt = readb(s3c_rtc_base + S3C2410_TICNT);
+ ticnt &= S3C2410_TICNT_ENABLE;
+ }
+
+ seq_printf(seq, "periodic_IRQ\t: %s\n", ticnt ? "yes" : "no");
return 0;
}
@@ -353,10 +385,16 @@ static void s3c_rtc_enable(struct platform_device *pdev, int en)
if (!en) {
tmp = readb(base + S3C2410_RTCCON);
- writeb(tmp & ~S3C2410_RTCCON_RTCEN, base + S3C2410_RTCCON);
-
- tmp = readb(base + S3C2410_TICNT);
- writeb(tmp & ~S3C2410_TICNT_ENABLE, base + S3C2410_TICNT);
+ if (s3c_rtc_cpu_type == TYPE_S3C64XX)
+ tmp &= ~S3C64XX_RTCCON_TICEN;
+ tmp &= ~S3C2410_RTCCON_RTCEN;
+ writeb(tmp, base + S3C2410_RTCCON);
+
+ if (s3c_rtc_cpu_type == TYPE_S3C2410) {
+ tmp = readb(base + S3C2410_TICNT);
+ tmp &= ~S3C2410_TICNT_ENABLE;
+ writeb(tmp, base + S3C2410_TICNT);
+ }
} else {
/* re-enable the device, and check it is ok */
@@ -472,7 +510,12 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
goto err_nortc;
}
- rtc->max_user_freq = 128;
+ if (s3c_rtc_cpu_type == TYPE_S3C64XX)
+ rtc->max_user_freq = 32768;
+ else
+ rtc->max_user_freq = 128;
+
+ s3c_rtc_cpu_type = platform_get_device_id(pdev)->driver_data;
platform_set_drvdata(pdev, rtc);
return 0;
@@ -492,20 +535,30 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
/* RTC Power management control */
-static int ticnt_save;
+static int ticnt_save, ticnt_en_save;
static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
{
/* save TICNT for anyone using periodic interrupts */
ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT);
+ if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
+ ticnt_en_save = readb(s3c_rtc_base + S3C2410_RTCCON);
+ ticnt_en_save &= S3C64XX_RTCCON_TICEN;
+ }
s3c_rtc_enable(pdev, 0);
return 0;
}
static int s3c_rtc_resume(struct platform_device *pdev)
{
+ unsigned int tmp;
+
s3c_rtc_enable(pdev, 1);
writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
+ if (s3c_rtc_cpu_type == TYPE_S3C64XX && ticnt_en_save) {
+ tmp = readb(s3c_rtc_base + S3C2410_RTCCON);
+ writeb(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
+ }
return 0;
}
#else
@@ -513,13 +566,27 @@ static int s3c_rtc_resume(struct platform_device *pdev)
#define s3c_rtc_resume NULL
#endif
-static struct platform_driver s3c2410_rtc_driver = {
+static struct platform_device_id s3c_rtc_driver_ids[] = {
+ {
+ .name = "s3c2410-rtc",
+ .driver_data = TYPE_S3C2410,
+ }, {
+ .name = "s3c64xx-rtc",
+ .driver_data = TYPE_S3C64XX,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(platform, s3c_rtc_driver_ids);
+
+static struct platform_driver s3c_rtc_driver = {
.probe = s3c_rtc_probe,
.remove = __devexit_p(s3c_rtc_remove),
.suspend = s3c_rtc_suspend,
.resume = s3c_rtc_resume,
+ .id_table = s3c_rtc_driver_ids,
.driver = {
- .name = "s3c2410-rtc",
+ .name = "s3c-rtc",
.owner = THIS_MODULE,
},
};
@@ -529,12 +596,12 @@ static char __initdata banner[] = "S3C24XX RTC, (c) 2004,2006 Simtec Electronics
static int __init s3c_rtc_init(void)
{
printk(banner);
- return platform_driver_register(&s3c2410_rtc_driver);
+ return platform_driver_register(&s3c_rtc_driver);
}
static void __exit s3c_rtc_exit(void)
{
- platform_driver_unregister(&s3c2410_rtc_driver);
+ platform_driver_unregister(&s3c_rtc_driver);
}
module_init(s3c_rtc_init);
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index 875ba099e7a..3b943673cd3 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -1,7 +1,7 @@
/*
* A RTC driver for the Simtek STK17TA8
*
- * By Thomas Hommel <thomas.hommel@gefanuc.com>
+ * By Thomas Hommel <thomas.hommel@ge.com>
*
* Based on the DS1553 driver from
* Atsushi Nemoto <anemo@mba.ocn.ne.jp>
@@ -244,7 +244,7 @@ static const struct rtc_class_ops stk17ta8_rtc_ops = {
.alarm_irq_enable = stk17ta8_rtc_alarm_irq_enable,
};
-static ssize_t stk17ta8_nvram_read(struct kobject *kobj,
+static ssize_t stk17ta8_nvram_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t pos, size_t size)
{
@@ -259,7 +259,7 @@ static ssize_t stk17ta8_nvram_read(struct kobject *kobj,
return count;
}
-static ssize_t stk17ta8_nvram_write(struct kobject *kobj,
+static ssize_t stk17ta8_nvram_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t pos, size_t size)
{
@@ -382,7 +382,7 @@ static __exit void stk17ta8_exit(void)
module_init(stk17ta8_init);
module_exit(stk17ta8_exit);
-MODULE_AUTHOR("Thomas Hommel <thomas.hommel@gefanuc.com>");
+MODULE_AUTHOR("Thomas Hommel <thomas.hommel@ge.com>");
MODULE_DESCRIPTION("Simtek STK17TA8 RTC driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
index 20bfc64a15c..ec6313d1535 100644
--- a/drivers/rtc/rtc-tx4939.c
+++ b/drivers/rtc/rtc-tx4939.c
@@ -188,7 +188,7 @@ static const struct rtc_class_ops tx4939_rtc_ops = {
.alarm_irq_enable = tx4939_rtc_alarm_irq_enable,
};
-static ssize_t tx4939_rtc_nvram_read(struct kobject *kobj,
+static ssize_t tx4939_rtc_nvram_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
@@ -207,7 +207,7 @@ static ssize_t tx4939_rtc_nvram_read(struct kobject *kobj,
return count;
}
-static ssize_t tx4939_rtc_nvram_write(struct kobject *kobj,
+static ssize_t tx4939_rtc_nvram_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
index b16cfe57a48..82931dc65c0 100644
--- a/drivers/rtc/rtc-wm831x.c
+++ b/drivers/rtc/rtc-wm831x.c
@@ -449,17 +449,17 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
goto err;
}
- ret = wm831x_request_irq(wm831x, per_irq, wm831x_per_irq,
- IRQF_TRIGGER_RISING, "wm831x_rtc_per",
- wm831x_rtc);
+ ret = request_threaded_irq(per_irq, NULL, wm831x_per_irq,
+ IRQF_TRIGGER_RISING, "RTC period",
+ wm831x_rtc);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request periodic IRQ %d: %d\n",
per_irq, ret);
}
- ret = wm831x_request_irq(wm831x, alm_irq, wm831x_alm_irq,
- IRQF_TRIGGER_RISING, "wm831x_rtc_alm",
- wm831x_rtc);
+ ret = request_threaded_irq(alm_irq, NULL, wm831x_alm_irq,
+ IRQF_TRIGGER_RISING, "RTC alarm",
+ wm831x_rtc);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request alarm IRQ %d: %d\n",
alm_irq, ret);
@@ -478,8 +478,8 @@ static int __devexit wm831x_rtc_remove(struct platform_device *pdev)
int per_irq = platform_get_irq_byname(pdev, "PER");
int alm_irq = platform_get_irq_byname(pdev, "ALM");
- wm831x_free_irq(wm831x_rtc->wm831x, alm_irq, wm831x_rtc);
- wm831x_free_irq(wm831x_rtc->wm831x, per_irq, wm831x_rtc);
+ free_irq(alm_irq, wm831x_rtc);
+ free_irq(per_irq, wm831x_rtc);
rtc_device_unregister(wm831x_rtc->rtc);
kfree(wm831x_rtc);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index fa2339cb168..33975e922d6 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -65,6 +65,7 @@ static void dasd_device_tasklet(struct dasd_device *);
static void dasd_block_tasklet(struct dasd_block *);
static void do_kick_device(struct work_struct *);
static void do_restore_device(struct work_struct *);
+static void do_reload_device(struct work_struct *);
static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
static void dasd_device_timeout(unsigned long);
static void dasd_block_timeout(unsigned long);
@@ -115,6 +116,7 @@ struct dasd_device *dasd_alloc_device(void)
device->timer.data = (unsigned long) device;
INIT_WORK(&device->kick_work, do_kick_device);
INIT_WORK(&device->restore_device, do_restore_device);
+ INIT_WORK(&device->reload_device, do_reload_device);
device->state = DASD_STATE_NEW;
device->target = DASD_STATE_NEW;
mutex_init(&device->state_mutex);
@@ -521,6 +523,26 @@ void dasd_kick_device(struct dasd_device *device)
}
/*
+ * dasd_reload_device will schedule a call do do_reload_device to the kernel
+ * event daemon.
+ */
+static void do_reload_device(struct work_struct *work)
+{
+ struct dasd_device *device = container_of(work, struct dasd_device,
+ reload_device);
+ device->discipline->reload(device);
+ dasd_put_device(device);
+}
+
+void dasd_reload_device(struct dasd_device *device)
+{
+ dasd_get_device(device);
+ /* queue call to dasd_reload_device to the kernel event daemon. */
+ schedule_work(&device->reload_device);
+}
+EXPORT_SYMBOL(dasd_reload_device);
+
+/*
* dasd_restore_device will schedule a call do do_restore_device to the kernel
* event daemon.
*/
@@ -1164,6 +1186,29 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
dasd_schedule_device_bh(device);
}
+enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
+{
+ struct dasd_device *device;
+
+ device = dasd_device_from_cdev_locked(cdev);
+
+ if (IS_ERR(device))
+ goto out;
+ if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
+ device->state != device->target ||
+ !device->discipline->handle_unsolicited_interrupt){
+ dasd_put_device(device);
+ goto out;
+ }
+
+ dasd_device_clear_timer(device);
+ device->discipline->handle_unsolicited_interrupt(device, irb);
+ dasd_put_device(device);
+out:
+ return UC_TODO_RETRY;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_uc_handler);
+
/*
* If we have an error on a dasd_block layer request then we cancel
* and return all further requests from the same dasd_block as well.
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 6632649dd6a..85bfd879485 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1418,9 +1418,29 @@ static struct dasd_ccw_req *dasd_3990_erp_inspect_alias(
struct dasd_ccw_req *erp)
{
struct dasd_ccw_req *cqr = erp->refers;
+ char *sense;
if (cqr->block &&
(cqr->block->base != cqr->startdev)) {
+
+ sense = dasd_get_sense(&erp->refers->irb);
+ /*
+ * dynamic pav may have changed base alias mapping
+ */
+ if (!test_bit(DASD_FLAG_OFFLINE, &cqr->startdev->flags) && sense
+ && (sense[0] == 0x10) && (sense[7] == 0x0F)
+ && (sense[8] == 0x67)) {
+ /*
+ * remove device from alias handling to prevent new
+ * requests from being scheduled on the
+ * wrong alias device
+ */
+ dasd_alias_remove_device(cqr->startdev);
+
+ /* schedule worker to reload device */
+ dasd_reload_device(cqr->startdev);
+ }
+
if (cqr->startdev->features & DASD_FEATURE_ERPLOG) {
DBF_DEV_EVENT(DBF_ERR, cqr->startdev,
"ERP on alias device for request %p,"
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 8c4814258e9..4155805dcdf 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -190,20 +190,21 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
struct alias_server *server, *newserver;
struct alias_lcu *lcu, *newlcu;
int is_lcu_known;
- struct dasd_uid *uid;
+ struct dasd_uid uid;
private = (struct dasd_eckd_private *) device->private;
- uid = &private->uid;
+
+ device->discipline->get_uid(device, &uid);
spin_lock_irqsave(&aliastree.lock, flags);
is_lcu_known = 1;
- server = _find_server(uid);
+ server = _find_server(&uid);
if (!server) {
spin_unlock_irqrestore(&aliastree.lock, flags);
- newserver = _allocate_server(uid);
+ newserver = _allocate_server(&uid);
if (IS_ERR(newserver))
return PTR_ERR(newserver);
spin_lock_irqsave(&aliastree.lock, flags);
- server = _find_server(uid);
+ server = _find_server(&uid);
if (!server) {
list_add(&newserver->server, &aliastree.serverlist);
server = newserver;
@@ -214,14 +215,14 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
}
}
- lcu = _find_lcu(server, uid);
+ lcu = _find_lcu(server, &uid);
if (!lcu) {
spin_unlock_irqrestore(&aliastree.lock, flags);
- newlcu = _allocate_lcu(uid);
+ newlcu = _allocate_lcu(&uid);
if (IS_ERR(newlcu))
return PTR_ERR(newlcu);
spin_lock_irqsave(&aliastree.lock, flags);
- lcu = _find_lcu(server, uid);
+ lcu = _find_lcu(server, &uid);
if (!lcu) {
list_add(&newlcu->lcu, &server->lculist);
lcu = newlcu;
@@ -256,20 +257,20 @@ void dasd_alias_lcu_setup_complete(struct dasd_device *device)
unsigned long flags;
struct alias_server *server;
struct alias_lcu *lcu;
- struct dasd_uid *uid;
+ struct dasd_uid uid;
private = (struct dasd_eckd_private *) device->private;
- uid = &private->uid;
+ device->discipline->get_uid(device, &uid);
lcu = NULL;
spin_lock_irqsave(&aliastree.lock, flags);
- server = _find_server(uid);
+ server = _find_server(&uid);
if (server)
- lcu = _find_lcu(server, uid);
+ lcu = _find_lcu(server, &uid);
spin_unlock_irqrestore(&aliastree.lock, flags);
if (!lcu) {
DBF_EVENT_DEVID(DBF_ERR, device->cdev,
"could not find lcu for %04x %02x",
- uid->ssid, uid->real_unit_addr);
+ uid.ssid, uid.real_unit_addr);
WARN_ON(1);
return;
}
@@ -282,20 +283,20 @@ void dasd_alias_wait_for_lcu_setup(struct dasd_device *device)
unsigned long flags;
struct alias_server *server;
struct alias_lcu *lcu;
- struct dasd_uid *uid;
+ struct dasd_uid uid;
private = (struct dasd_eckd_private *) device->private;
- uid = &private->uid;
+ device->discipline->get_uid(device, &uid);
lcu = NULL;
spin_lock_irqsave(&aliastree.lock, flags);
- server = _find_server(uid);
+ server = _find_server(&uid);
if (server)
- lcu = _find_lcu(server, uid);
+ lcu = _find_lcu(server, &uid);
spin_unlock_irqrestore(&aliastree.lock, flags);
if (!lcu) {
DBF_EVENT_DEVID(DBF_ERR, device->cdev,
"could not find lcu for %04x %02x",
- uid->ssid, uid->real_unit_addr);
+ uid.ssid, uid.real_unit_addr);
WARN_ON(1);
return;
}
@@ -314,9 +315,11 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
struct alias_lcu *lcu;
struct alias_server *server;
int was_pending;
+ struct dasd_uid uid;
private = (struct dasd_eckd_private *) device->private;
lcu = private->lcu;
+ device->discipline->get_uid(device, &uid);
spin_lock_irqsave(&lcu->lock, flags);
list_del_init(&device->alias_list);
/* make sure that the workers don't use this device */
@@ -353,7 +356,7 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
_schedule_lcu_update(lcu, NULL);
spin_unlock(&lcu->lock);
}
- server = _find_server(&private->uid);
+ server = _find_server(&uid);
if (server && list_empty(&server->lculist)) {
list_del(&server->server);
_free_server(server);
@@ -366,19 +369,30 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
* in the lcu is up to date and will update the device uid before
* adding it to a pav group.
*/
+
static int _add_device_to_lcu(struct alias_lcu *lcu,
- struct dasd_device *device)
+ struct dasd_device *device,
+ struct dasd_device *pos)
{
struct dasd_eckd_private *private;
struct alias_pav_group *group;
- struct dasd_uid *uid;
+ struct dasd_uid uid;
+ unsigned long flags;
private = (struct dasd_eckd_private *) device->private;
- uid = &private->uid;
- uid->type = lcu->uac->unit[uid->real_unit_addr].ua_type;
- uid->base_unit_addr = lcu->uac->unit[uid->real_unit_addr].base_ua;
- dasd_set_uid(device->cdev, &private->uid);
+
+ /* only lock if not already locked */
+ if (device != pos)
+ spin_lock_irqsave_nested(get_ccwdev_lock(device->cdev), flags,
+ CDEV_NESTED_SECOND);
+ private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
+ private->uid.base_unit_addr =
+ lcu->uac->unit[private->uid.real_unit_addr].base_ua;
+ uid = private->uid;
+
+ if (device != pos)
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
/* if we have no PAV anyway, we don't need to bother with PAV groups */
if (lcu->pav == NO_PAV) {
@@ -386,25 +400,25 @@ static int _add_device_to_lcu(struct alias_lcu *lcu,
return 0;
}
- group = _find_group(lcu, uid);
+ group = _find_group(lcu, &uid);
if (!group) {
group = kzalloc(sizeof(*group), GFP_ATOMIC);
if (!group)
return -ENOMEM;
- memcpy(group->uid.vendor, uid->vendor, sizeof(uid->vendor));
- memcpy(group->uid.serial, uid->serial, sizeof(uid->serial));
- group->uid.ssid = uid->ssid;
- if (uid->type == UA_BASE_DEVICE)
- group->uid.base_unit_addr = uid->real_unit_addr;
+ memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor));
+ memcpy(group->uid.serial, uid.serial, sizeof(uid.serial));
+ group->uid.ssid = uid.ssid;
+ if (uid.type == UA_BASE_DEVICE)
+ group->uid.base_unit_addr = uid.real_unit_addr;
else
- group->uid.base_unit_addr = uid->base_unit_addr;
- memcpy(group->uid.vduit, uid->vduit, sizeof(uid->vduit));
+ group->uid.base_unit_addr = uid.base_unit_addr;
+ memcpy(group->uid.vduit, uid.vduit, sizeof(uid.vduit));
INIT_LIST_HEAD(&group->group);
INIT_LIST_HEAD(&group->baselist);
INIT_LIST_HEAD(&group->aliaslist);
list_add(&group->group, &lcu->grouplist);
}
- if (uid->type == UA_BASE_DEVICE)
+ if (uid.type == UA_BASE_DEVICE)
list_move(&device->alias_list, &group->baselist);
else
list_move(&device->alias_list, &group->aliaslist);
@@ -525,7 +539,10 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
if (rc)
return rc;
- spin_lock_irqsave(&lcu->lock, flags);
+ /* need to take cdev lock before lcu lock */
+ spin_lock_irqsave_nested(get_ccwdev_lock(refdev->cdev), flags,
+ CDEV_NESTED_FIRST);
+ spin_lock(&lcu->lock);
lcu->pav = NO_PAV;
for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
switch (lcu->uac->unit[i].ua_type) {
@@ -542,9 +559,10 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
alias_list) {
- _add_device_to_lcu(lcu, device);
+ _add_device_to_lcu(lcu, device, refdev);
}
- spin_unlock_irqrestore(&lcu->lock, flags);
+ spin_unlock(&lcu->lock);
+ spin_unlock_irqrestore(get_ccwdev_lock(refdev->cdev), flags);
return 0;
}
@@ -628,9 +646,12 @@ int dasd_alias_add_device(struct dasd_device *device)
private = (struct dasd_eckd_private *) device->private;
lcu = private->lcu;
rc = 0;
- spin_lock_irqsave(&lcu->lock, flags);
+
+ /* need to take cdev lock before lcu lock */
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ spin_lock(&lcu->lock);
if (!(lcu->flags & UPDATE_PENDING)) {
- rc = _add_device_to_lcu(lcu, device);
+ rc = _add_device_to_lcu(lcu, device, device);
if (rc)
lcu->flags |= UPDATE_PENDING;
}
@@ -638,10 +659,19 @@ int dasd_alias_add_device(struct dasd_device *device)
list_move(&device->alias_list, &lcu->active_devices);
_schedule_lcu_update(lcu, device);
}
- spin_unlock_irqrestore(&lcu->lock, flags);
+ spin_unlock(&lcu->lock);
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
return rc;
}
+int dasd_alias_update_add_device(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private;
+ private = (struct dasd_eckd_private *) device->private;
+ private->lcu->flags |= UPDATE_PENDING;
+ return dasd_alias_add_device(device);
+}
+
int dasd_alias_remove_device(struct dasd_device *device)
{
struct dasd_eckd_private *private;
@@ -740,19 +770,30 @@ static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
struct alias_pav_group *pavgroup;
struct dasd_device *device;
struct dasd_eckd_private *private;
+ unsigned long flags;
/* active and inactive list can contain alias as well as base devices */
list_for_each_entry(device, &lcu->active_devices, alias_list) {
private = (struct dasd_eckd_private *) device->private;
- if (private->uid.type != UA_BASE_DEVICE)
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ if (private->uid.type != UA_BASE_DEVICE) {
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
+ flags);
continue;
+ }
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
dasd_schedule_block_bh(device->block);
dasd_schedule_device_bh(device);
}
list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
private = (struct dasd_eckd_private *) device->private;
- if (private->uid.type != UA_BASE_DEVICE)
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ if (private->uid.type != UA_BASE_DEVICE) {
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
+ flags);
continue;
+ }
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
dasd_schedule_block_bh(device->block);
dasd_schedule_device_bh(device);
}
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index eff9c812c5c..34d51dd4c53 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -49,7 +49,6 @@ struct dasd_devmap {
unsigned int devindex;
unsigned short features;
struct dasd_device *device;
- struct dasd_uid uid;
};
/*
@@ -936,42 +935,46 @@ dasd_device_status_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(status, 0444, dasd_device_status_show, NULL);
-static ssize_t
-dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t dasd_alias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct dasd_devmap *devmap;
- int alias;
+ struct dasd_device *device;
+ struct dasd_uid uid;
- devmap = dasd_find_busid(dev_name(dev));
- spin_lock(&dasd_devmap_lock);
- if (IS_ERR(devmap) || strlen(devmap->uid.vendor) == 0) {
- spin_unlock(&dasd_devmap_lock);
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
return sprintf(buf, "0\n");
+
+ if (device->discipline && device->discipline->get_uid &&
+ !device->discipline->get_uid(device, &uid)) {
+ if (uid.type == UA_BASE_PAV_ALIAS ||
+ uid.type == UA_HYPER_PAV_ALIAS)
+ return sprintf(buf, "1\n");
}
- if (devmap->uid.type == UA_BASE_PAV_ALIAS ||
- devmap->uid.type == UA_HYPER_PAV_ALIAS)
- alias = 1;
- else
- alias = 0;
- spin_unlock(&dasd_devmap_lock);
- return sprintf(buf, alias ? "1\n" : "0\n");
+ dasd_put_device(device);
+
+ return sprintf(buf, "0\n");
}
static DEVICE_ATTR(alias, 0444, dasd_alias_show, NULL);
-static ssize_t
-dasd_vendor_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t dasd_vendor_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct dasd_devmap *devmap;
+ struct dasd_device *device;
+ struct dasd_uid uid;
char *vendor;
- devmap = dasd_find_busid(dev_name(dev));
- spin_lock(&dasd_devmap_lock);
- if (!IS_ERR(devmap) && strlen(devmap->uid.vendor) > 0)
- vendor = devmap->uid.vendor;
- else
- vendor = "";
- spin_unlock(&dasd_devmap_lock);
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ vendor = "";
+ if (IS_ERR(device))
+ return snprintf(buf, PAGE_SIZE, "%s\n", vendor);
+
+ if (device->discipline && device->discipline->get_uid &&
+ !device->discipline->get_uid(device, &uid))
+ vendor = uid.vendor;
+
+ dasd_put_device(device);
return snprintf(buf, PAGE_SIZE, "%s\n", vendor);
}
@@ -985,48 +988,51 @@ static DEVICE_ATTR(vendor, 0444, dasd_vendor_show, NULL);
static ssize_t
dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dasd_devmap *devmap;
+ struct dasd_device *device;
+ struct dasd_uid uid;
char uid_string[UID_STRLEN];
char ua_string[3];
- struct dasd_uid *uid;
- devmap = dasd_find_busid(dev_name(dev));
- spin_lock(&dasd_devmap_lock);
- if (IS_ERR(devmap) || strlen(devmap->uid.vendor) == 0) {
- spin_unlock(&dasd_devmap_lock);
- return sprintf(buf, "\n");
- }
- uid = &devmap->uid;
- switch (uid->type) {
- case UA_BASE_DEVICE:
- sprintf(ua_string, "%02x", uid->real_unit_addr);
- break;
- case UA_BASE_PAV_ALIAS:
- sprintf(ua_string, "%02x", uid->base_unit_addr);
- break;
- case UA_HYPER_PAV_ALIAS:
- sprintf(ua_string, "xx");
- break;
- default:
- /* should not happen, treat like base device */
- sprintf(ua_string, "%02x", uid->real_unit_addr);
- break;
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ uid_string[0] = 0;
+ if (IS_ERR(device))
+ return snprintf(buf, PAGE_SIZE, "%s\n", uid_string);
+
+ if (device->discipline && device->discipline->get_uid &&
+ !device->discipline->get_uid(device, &uid)) {
+ switch (uid.type) {
+ case UA_BASE_DEVICE:
+ snprintf(ua_string, sizeof(ua_string), "%02x",
+ uid.real_unit_addr);
+ break;
+ case UA_BASE_PAV_ALIAS:
+ snprintf(ua_string, sizeof(ua_string), "%02x",
+ uid.base_unit_addr);
+ break;
+ case UA_HYPER_PAV_ALIAS:
+ snprintf(ua_string, sizeof(ua_string), "xx");
+ break;
+ default:
+ /* should not happen, treat like base device */
+ snprintf(ua_string, sizeof(ua_string), "%02x",
+ uid.real_unit_addr);
+ break;
+ }
+
+ if (strlen(uid.vduit) > 0)
+ snprintf(uid_string, sizeof(uid_string),
+ "%s.%s.%04x.%s.%s",
+ uid.vendor, uid.serial, uid.ssid, ua_string,
+ uid.vduit);
+ else
+ snprintf(uid_string, sizeof(uid_string),
+ "%s.%s.%04x.%s",
+ uid.vendor, uid.serial, uid.ssid, ua_string);
}
- if (strlen(uid->vduit) > 0)
- snprintf(uid_string, sizeof(uid_string),
- "%s.%s.%04x.%s.%s",
- uid->vendor, uid->serial,
- uid->ssid, ua_string,
- uid->vduit);
- else
- snprintf(uid_string, sizeof(uid_string),
- "%s.%s.%04x.%s",
- uid->vendor, uid->serial,
- uid->ssid, ua_string);
- spin_unlock(&dasd_devmap_lock);
+ dasd_put_device(device);
+
return snprintf(buf, PAGE_SIZE, "%s\n", uid_string);
}
-
static DEVICE_ATTR(uid, 0444, dasd_uid_show, NULL);
/*
@@ -1094,50 +1100,6 @@ static struct attribute_group dasd_attr_group = {
};
/*
- * Return copy of the device unique identifier.
- */
-int
-dasd_get_uid(struct ccw_device *cdev, struct dasd_uid *uid)
-{
- struct dasd_devmap *devmap;
-
- devmap = dasd_find_busid(dev_name(&cdev->dev));
- if (IS_ERR(devmap))
- return PTR_ERR(devmap);
- spin_lock(&dasd_devmap_lock);
- *uid = devmap->uid;
- spin_unlock(&dasd_devmap_lock);
- return 0;
-}
-EXPORT_SYMBOL_GPL(dasd_get_uid);
-
-/*
- * Register the given device unique identifier into devmap struct.
- * In addition check if the related storage server subsystem ID is already
- * contained in the dasd_server_ssid_list. If subsystem ID is not contained,
- * create new entry.
- * Return 0 if server was already in serverlist,
- * 1 if the server was added successful
- * <0 in case of error.
- */
-int
-dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid)
-{
- struct dasd_devmap *devmap;
-
- devmap = dasd_find_busid(dev_name(&cdev->dev));
- if (IS_ERR(devmap))
- return PTR_ERR(devmap);
-
- spin_lock(&dasd_devmap_lock);
- devmap->uid = *uid;
- spin_unlock(&dasd_devmap_lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(dasd_set_uid);
-
-/*
* Return value of the specified feature.
*/
int
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 0cb23311685..ab84da5592e 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -692,18 +692,20 @@ dasd_eckd_cdl_reclen(int recid)
/*
* Generate device unique id that specifies the physical device.
*/
-static int dasd_eckd_generate_uid(struct dasd_device *device,
- struct dasd_uid *uid)
+static int dasd_eckd_generate_uid(struct dasd_device *device)
{
struct dasd_eckd_private *private;
+ struct dasd_uid *uid;
int count;
+ unsigned long flags;
private = (struct dasd_eckd_private *) device->private;
if (!private)
return -ENODEV;
if (!private->ned || !private->gneq)
return -ENODEV;
-
+ uid = &private->uid;
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
memset(uid, 0, sizeof(struct dasd_uid));
memcpy(uid->vendor, private->ned->HDA_manufacturer,
sizeof(uid->vendor) - 1);
@@ -726,9 +728,25 @@ static int dasd_eckd_generate_uid(struct dasd_device *device,
private->vdsneq->uit[count]);
}
}
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
return 0;
}
+static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
+{
+ struct dasd_eckd_private *private;
+ unsigned long flags;
+
+ if (device->private) {
+ private = (struct dasd_eckd_private *)device->private;
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ *uid = private->uid;
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ return 0;
+ }
+ return -EINVAL;
+}
+
static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
void *rcd_buffer,
struct ciw *ciw, __u8 lpm)
@@ -1088,6 +1106,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
{
struct dasd_eckd_private *private;
struct dasd_block *block;
+ struct dasd_uid temp_uid;
int is_known, rc;
int readonly;
@@ -1124,13 +1143,13 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
if (rc)
goto out_err1;
- /* Generate device unique id and register in devmap */
- rc = dasd_eckd_generate_uid(device, &private->uid);
+ /* Generate device unique id */
+ rc = dasd_eckd_generate_uid(device);
if (rc)
goto out_err1;
- dasd_set_uid(device->cdev, &private->uid);
- if (private->uid.type == UA_BASE_DEVICE) {
+ dasd_eckd_get_uid(device, &temp_uid);
+ if (temp_uid.type == UA_BASE_DEVICE) {
block = dasd_alloc_block();
if (IS_ERR(block)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
@@ -1451,6 +1470,7 @@ static int dasd_eckd_ready_to_online(struct dasd_device *device)
static int dasd_eckd_online_to_ready(struct dasd_device *device)
{
+ cancel_work_sync(&device->reload_device);
return dasd_alias_remove_device(device);
};
@@ -1709,10 +1729,27 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
{
char mask;
char *sense = NULL;
+ struct dasd_eckd_private *private;
+ private = (struct dasd_eckd_private *) device->private;
/* first of all check for state change pending interrupt */
mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
if ((scsw_dstat(&irb->scsw) & mask) == mask) {
+ /* for alias only and not in offline processing*/
+ if (!device->block && private->lcu &&
+ !test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ /*
+ * the state change could be caused by an alias
+ * reassignment remove device from alias handling
+ * to prevent new requests from being scheduled on
+ * the wrong alias device
+ */
+ dasd_alias_remove_device(device);
+
+ /* schedule worker to reload device */
+ dasd_reload_device(device);
+ }
+
dasd_generic_handle_state_change(device);
return;
}
@@ -3259,7 +3296,7 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
dasd_eckd_dump_sense_ccw(device, req, irb);
}
-int dasd_eckd_pm_freeze(struct dasd_device *device)
+static int dasd_eckd_pm_freeze(struct dasd_device *device)
{
/*
* the device should be disconnected from our LCU structure
@@ -3272,7 +3309,7 @@ int dasd_eckd_pm_freeze(struct dasd_device *device)
return 0;
}
-int dasd_eckd_restore_device(struct dasd_device *device)
+static int dasd_eckd_restore_device(struct dasd_device *device)
{
struct dasd_eckd_private *private;
struct dasd_eckd_characteristics temp_rdc_data;
@@ -3287,15 +3324,16 @@ int dasd_eckd_restore_device(struct dasd_device *device)
if (rc)
goto out_err;
- /* Generate device unique id and register in devmap */
- rc = dasd_eckd_generate_uid(device, &private->uid);
- dasd_get_uid(device->cdev, &temp_uid);
+ dasd_eckd_get_uid(device, &temp_uid);
+ /* Generate device unique id */
+ rc = dasd_eckd_generate_uid(device);
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
dev_err(&device->cdev->dev, "The UID of the DASD has "
"changed\n");
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
if (rc)
goto out_err;
- dasd_set_uid(device->cdev, &private->uid);
/* register lcu with alias handling, enable PAV if this is a new lcu */
is_known = dasd_alias_make_device_known_to_lcu(device);
@@ -3336,6 +3374,56 @@ out_err:
return -1;
}
+static int dasd_eckd_reload_device(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private;
+ int rc, old_base;
+ char print_uid[60];
+ struct dasd_uid uid;
+ unsigned long flags;
+
+ private = (struct dasd_eckd_private *) device->private;
+
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ old_base = private->uid.base_unit_addr;
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+
+ /* Read Configuration Data */
+ rc = dasd_eckd_read_conf(device);
+ if (rc)
+ goto out_err;
+
+ rc = dasd_eckd_generate_uid(device);
+ if (rc)
+ goto out_err;
+ /*
+ * update unit address configuration and
+ * add device to alias management
+ */
+ dasd_alias_update_add_device(device);
+
+ dasd_eckd_get_uid(device, &uid);
+
+ if (old_base != uid.base_unit_addr) {
+ if (strlen(uid.vduit) > 0)
+ snprintf(print_uid, sizeof(print_uid),
+ "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial,
+ uid.ssid, uid.base_unit_addr, uid.vduit);
+ else
+ snprintf(print_uid, sizeof(print_uid),
+ "%s.%s.%04x.%02x", uid.vendor, uid.serial,
+ uid.ssid, uid.base_unit_addr);
+
+ dev_info(&device->cdev->dev,
+ "An Alias device was reassigned to a new base device "
+ "with UID: %s\n", print_uid);
+ }
+ return 0;
+
+out_err:
+ return -1;
+}
+
static struct ccw_driver dasd_eckd_driver = {
.name = "dasd-eckd",
.owner = THIS_MODULE,
@@ -3348,6 +3436,7 @@ static struct ccw_driver dasd_eckd_driver = {
.freeze = dasd_generic_pm_freeze,
.thaw = dasd_generic_restore_device,
.restore = dasd_generic_restore_device,
+ .uc_handler = dasd_generic_uc_handler,
};
/*
@@ -3389,6 +3478,8 @@ static struct dasd_discipline dasd_eckd_discipline = {
.ioctl = dasd_eckd_ioctl,
.freeze = dasd_eckd_pm_freeze,
.restore = dasd_eckd_restore_device,
+ .reload = dasd_eckd_reload_device,
+ .get_uid = dasd_eckd_get_uid,
};
static int __init
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 864d53c0420..dd6385a5af1 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -426,7 +426,6 @@ struct alias_pav_group {
struct dasd_device *next;
};
-
struct dasd_eckd_private {
struct dasd_eckd_characteristics rdc_data;
u8 *conf_data;
@@ -463,4 +462,5 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *);
void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *);
void dasd_alias_lcu_setup_complete(struct dasd_device *);
void dasd_alias_wait_for_lcu_setup(struct dasd_device *);
+int dasd_alias_update_add_device(struct dasd_device *);
#endif /* DASD_ECKD_H */
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index a91d4a97d4f..49b431d135e 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -81,6 +81,10 @@ struct dasd_block;
#define DASD_SIM_MSG_TO_OP 0x03
#define DASD_SIM_LOG 0x0C
+/* lock class for nested cdev lock */
+#define CDEV_NESTED_FIRST 1
+#define CDEV_NESTED_SECOND 2
+
/*
* SECTION: MACROs for klogd and s390 debug feature (dbf)
*/
@@ -229,6 +233,24 @@ struct dasd_ccw_req {
typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
/*
+ * Unique identifier for dasd device.
+ */
+#define UA_NOT_CONFIGURED 0x00
+#define UA_BASE_DEVICE 0x01
+#define UA_BASE_PAV_ALIAS 0x02
+#define UA_HYPER_PAV_ALIAS 0x03
+
+struct dasd_uid {
+ __u8 type;
+ char vendor[4];
+ char serial[15];
+ __u16 ssid;
+ __u8 real_unit_addr;
+ __u8 base_unit_addr;
+ char vduit[33];
+};
+
+/*
* the struct dasd_discipline is
* sth like a table of virtual functions, if you think of dasd_eckd
* inheriting dasd...
@@ -312,28 +334,15 @@ struct dasd_discipline {
/* suspend/resume functions */
int (*freeze) (struct dasd_device *);
int (*restore) (struct dasd_device *);
-};
-extern struct dasd_discipline *dasd_diag_discipline_pointer;
-
-/*
- * Unique identifier for dasd device.
- */
-#define UA_NOT_CONFIGURED 0x00
-#define UA_BASE_DEVICE 0x01
-#define UA_BASE_PAV_ALIAS 0x02
-#define UA_HYPER_PAV_ALIAS 0x03
+ /* reload device after state change */
+ int (*reload) (struct dasd_device *);
-struct dasd_uid {
- __u8 type;
- char vendor[4];
- char serial[15];
- __u16 ssid;
- __u8 real_unit_addr;
- __u8 base_unit_addr;
- char vduit[33];
+ int (*get_uid) (struct dasd_device *, struct dasd_uid *);
};
+extern struct dasd_discipline *dasd_diag_discipline_pointer;
+
/*
* Notification numbers for extended error reporting notifications:
* The DASD_EER_DISABLE notification is sent before a dasd_device (and it's
@@ -386,6 +395,7 @@ struct dasd_device {
struct tasklet_struct tasklet;
struct work_struct kick_work;
struct work_struct restore_device;
+ struct work_struct reload_device;
struct timer_list timer;
debug_info_t *debug_area;
@@ -582,6 +592,7 @@ void dasd_enable_device(struct dasd_device *);
void dasd_set_target_state(struct dasd_device *, int);
void dasd_kick_device(struct dasd_device *);
void dasd_restore_device(struct dasd_device *);
+void dasd_reload_device(struct dasd_device *);
void dasd_add_request_head(struct dasd_ccw_req *);
void dasd_add_request_tail(struct dasd_ccw_req *);
@@ -606,6 +617,7 @@ int dasd_generic_notify(struct ccw_device *, int);
void dasd_generic_handle_state_change(struct dasd_device *);
int dasd_generic_pm_freeze(struct ccw_device *);
int dasd_generic_restore_device(struct ccw_device *);
+enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *);
int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
char *dasd_get_sense(struct irb *);
@@ -629,8 +641,6 @@ void dasd_devmap_exit(void);
struct dasd_device *dasd_create_device(struct ccw_device *);
void dasd_delete_device(struct dasd_device *);
-int dasd_get_uid(struct ccw_device *, struct dasd_uid *);
-int dasd_set_uid(struct ccw_device *, struct dasd_uid *);
int dasd_get_feature(struct ccw_device *, int);
int dasd_set_feature(struct ccw_device *, int, int);
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 4e34d3686c2..40834f18754 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -148,13 +148,12 @@ config VMLOGRDR
This driver depends on the IUCV support driver.
config VMCP
- tristate "Support for the z/VM CP interface (VM only)"
+ bool "Support for the z/VM CP interface"
depends on S390
help
Select this option if you want to be able to interact with the control
program on z/VM
-
config MONREADER
tristate "API for reading z/VM monitor service records"
depends on IUCV
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 0eabcca3c92..857dfcb7b35 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -484,6 +484,7 @@ fs3270_open(struct inode *inode, struct file *filp)
raw3270_del_view(&fp->view);
goto out;
}
+ nonseekable_open(inode, filp);
filp->private_data = fp;
out:
mutex_unlock(&fs3270_mutex);
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index cb6bffe7141..18d9a497863 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -49,7 +49,7 @@ static unsigned char ret_diacr[NR_DEAD] = {
struct kbd_data *
kbd_alloc(void) {
struct kbd_data *kbd;
- int i, len;
+ int i;
kbd = kzalloc(sizeof(struct kbd_data), GFP_KERNEL);
if (!kbd)
@@ -59,12 +59,11 @@ kbd_alloc(void) {
goto out_kbd;
for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
if (key_maps[i]) {
- kbd->key_maps[i] =
- kmalloc(sizeof(u_short)*NR_KEYS, GFP_KERNEL);
+ kbd->key_maps[i] = kmemdup(key_maps[i],
+ sizeof(u_short) * NR_KEYS,
+ GFP_KERNEL);
if (!kbd->key_maps[i])
goto out_maps;
- memcpy(kbd->key_maps[i], key_maps[i],
- sizeof(u_short)*NR_KEYS);
}
}
kbd->func_table = kzalloc(sizeof(func_table), GFP_KERNEL);
@@ -72,23 +71,21 @@ kbd_alloc(void) {
goto out_maps;
for (i = 0; i < ARRAY_SIZE(func_table); i++) {
if (func_table[i]) {
- len = strlen(func_table[i]) + 1;
- kbd->func_table[i] = kmalloc(len, GFP_KERNEL);
+ kbd->func_table[i] = kstrdup(func_table[i],
+ GFP_KERNEL);
if (!kbd->func_table[i])
goto out_func;
- memcpy(kbd->func_table[i], func_table[i], len);
}
}
kbd->fn_handler =
kzalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL);
if (!kbd->fn_handler)
goto out_func;
- kbd->accent_table =
- kmalloc(sizeof(struct kbdiacruc)*MAX_DIACR, GFP_KERNEL);
+ kbd->accent_table = kmemdup(accent_table,
+ sizeof(struct kbdiacruc) * MAX_DIACR,
+ GFP_KERNEL);
if (!kbd->accent_table)
goto out_fn_handler;
- memcpy(kbd->accent_table, accent_table,
- sizeof(struct kbdiacruc)*MAX_DIACR);
kbd->accent_table_size = accent_table_size;
return kbd;
diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c
index 62c2647f37f..4a51e3f0968 100644
--- a/drivers/s390/char/sclp_cpi_sys.c
+++ b/drivers/s390/char/sclp_cpi_sys.c
@@ -102,7 +102,7 @@ static struct sclp_req *cpi_prepare_req(void)
/* set system name */
set_data(evb->system_name, system_name);
- /* set sytem level */
+ /* set system level */
evb->system_level = system_level;
/* set sysplex name */
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 5bb59d36a6d..04e532eec03 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -1,24 +1,20 @@
/*
- * Copyright IBM Corp. 2004,2007
+ * Copyright IBM Corp. 2004,2010
* Interface implementation for communication with the z/VM control program
- * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
*
+ * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
*
* z/VMs CP offers the possibility to issue commands via the diagnose code 8
* this driver implements a character device that issues these commands and
* returns the answer of CP.
-
+ *
* The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS
*/
-#define KMSG_COMPONENT "vmcp"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
-#include <linux/module.h>
#include <linux/slab.h>
#include <asm/compat.h>
#include <asm/cpcmd.h>
@@ -26,10 +22,6 @@
#include <asm/uaccess.h>
#include "vmcp.h"
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Borntraeger <borntraeger@de.ibm.com>");
-MODULE_DESCRIPTION("z/VM CP interface");
-
static debug_info_t *vmcp_debug;
static int vmcp_open(struct inode *inode, struct file *file)
@@ -197,11 +189,8 @@ static int __init vmcp_init(void)
{
int ret;
- if (!MACHINE_IS_VM) {
- pr_warning("The z/VM CP interface device driver cannot be "
- "loaded without z/VM\n");
- return -ENODEV;
- }
+ if (!MACHINE_IS_VM)
+ return 0;
vmcp_debug = debug_register("vmcp", 1, 1, 240);
if (!vmcp_debug)
@@ -214,19 +203,8 @@ static int __init vmcp_init(void)
}
ret = misc_register(&vmcp_dev);
- if (ret) {
+ if (ret)
debug_unregister(vmcp_debug);
- return ret;
- }
-
- return 0;
-}
-
-static void __exit vmcp_exit(void)
-{
- misc_deregister(&vmcp_dev);
- debug_unregister(vmcp_debug);
+ return ret;
}
-
-module_init(vmcp_init);
-module_exit(vmcp_exit);
+device_initcall(vmcp_init);
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 7217966f7d3..f5ea3384a4b 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -445,7 +445,7 @@ static int zcore_memmap_open(struct inode *inode, struct file *filp)
}
kfree(chunk_array);
filp->private_data = buf;
- return 0;
+ return nonseekable_open(inode, filp);
}
static int zcore_memmap_release(struct inode *inode, struct file *filp)
@@ -473,7 +473,7 @@ static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
static int zcore_reipl_open(struct inode *inode, struct file *filp)
{
- return 0;
+ return nonseekable_open(inode, filp);
}
static int zcore_reipl_release(struct inode *inode, struct file *filp)
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 5f97ea2ee6b..97b25d68e3e 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -123,8 +123,10 @@ ccwgroup_release (struct device *dev)
for (i = 0; i < gdev->count; i++) {
if (gdev->cdev[i]) {
+ spin_lock_irq(gdev->cdev[i]->ccwlock);
if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
+ spin_unlock_irq(gdev->cdev[i]->ccwlock);
put_device(&gdev->cdev[i]->dev);
}
}
@@ -262,11 +264,14 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
goto error;
}
/* Don't allow a device to belong to more than one group. */
+ spin_lock_irq(gdev->cdev[i]->ccwlock);
if (dev_get_drvdata(&gdev->cdev[i]->dev)) {
+ spin_unlock_irq(gdev->cdev[i]->ccwlock);
rc = -EINVAL;
goto error;
}
dev_set_drvdata(&gdev->cdev[i]->dev, gdev);
+ spin_unlock_irq(gdev->cdev[i]->ccwlock);
}
/* Check for sufficient number of bus ids. */
if (i < num_devices && !curr_buf) {
@@ -303,8 +308,10 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
error:
for (i = 0; i < num_devices; i++)
if (gdev->cdev[i]) {
+ spin_lock_irq(gdev->cdev[i]->ccwlock);
if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
+ spin_unlock_irq(gdev->cdev[i]->ccwlock);
put_device(&gdev->cdev[i]->dev);
gdev->cdev[i] = NULL;
}
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
index 37df42af05e..7f206ed44fd 100644
--- a/drivers/s390/cio/ccwreq.c
+++ b/drivers/s390/cio/ccwreq.c
@@ -159,6 +159,7 @@ static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
{
struct irb *irb = &cdev->private->irb;
struct cmd_scsw *scsw = &irb->scsw.cmd;
+ enum uc_todo todo;
/* Perform BASIC SENSE if needed. */
if (ccw_device_accumulate_and_sense(cdev, lcirb))
@@ -178,6 +179,20 @@ static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
/* Check for command reject. */
if (irb->ecw[0] & SNS0_CMD_REJECT)
return IO_REJECTED;
+ /* Ask the driver what to do */
+ if (cdev->drv && cdev->drv->uc_handler) {
+ todo = cdev->drv->uc_handler(cdev, lcirb);
+ switch (todo) {
+ case UC_TODO_RETRY:
+ return IO_STATUS_ERROR;
+ case UC_TODO_RETRY_ON_NEW_PATH:
+ return IO_PATH_ERROR;
+ case UC_TODO_STOP:
+ return IO_REJECTED;
+ default:
+ return IO_STATUS_ERROR;
+ }
+ }
/* Assume that unexpected SENSE data implies an error. */
return IO_STATUS_ERROR;
}
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 1d16189f2f2..6c9fa15aac7 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -135,7 +135,8 @@ static int s390_vary_chpid(struct chp_id chpid, int on)
/*
* Channel measurement related functions
*/
-static ssize_t chp_measurement_chars_read(struct kobject *kobj,
+static ssize_t chp_measurement_chars_read(struct file *filp,
+ struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -182,7 +183,7 @@ static void chp_measurement_copy_block(struct cmg_entry *buf,
} while (reference_buf.values[0] != buf->values[0]);
}
-static ssize_t chp_measurement_read(struct kobject *kobj,
+static ssize_t chp_measurement_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 3b6f4adc509..a83877c664a 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -803,6 +803,7 @@ static long chsc_ioctl(struct file *filp, unsigned int cmd,
static const struct file_operations chsc_fops = {
.owner = THIS_MODULE,
+ .open = nonseekable_open,
.unlocked_ioctl = chsc_ioctl,
.compat_ioctl = chsc_ioctl,
};
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 5feea1a371e..f4e6cf3aceb 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -616,7 +616,8 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
struct pt_regs *old_regs;
old_regs = set_irq_regs(regs);
- s390_idle_check();
+ s390_idle_check(regs, S390_lowcore.int_clock,
+ S390_lowcore.async_enter_timer);
irq_enter();
__get_cpu_var(s390_idle).nohz_delay = 1;
if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 511649115bd..ac94ac75145 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -648,6 +648,8 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
static void __init
css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
{
+ struct cpuid cpu_id;
+
if (css_general_characteristics.mcss) {
css->global_pgid.pgid_high.ext_cssid.version = 0x80;
css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
@@ -658,8 +660,9 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
css->global_pgid.pgid_high.cpu_addr = 0;
#endif
}
- css->global_pgid.cpu_id = S390_lowcore.cpu_id.ident;
- css->global_pgid.cpu_model = S390_lowcore.cpu_id.machine;
+ get_cpu_id(&cpu_id);
+ css->global_pgid.cpu_id = cpu_id.ident;
+ css->global_pgid.cpu_model = cpu_id.machine;
css->global_pgid.tod_high = tod_high;
}
@@ -1062,6 +1065,7 @@ static ssize_t cio_settle_write(struct file *file, const char __user *buf,
}
static const struct file_operations cio_settle_proc_fops = {
+ .open = nonseekable_open,
.write = cio_settle_write,
};
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index 75926279263..fac06155773 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -23,21 +23,6 @@ struct tpi_info {
* Some S390 specific IO instructions as inline
*/
-static inline int stsch(struct subchannel_id schid, struct schib *addr)
-{
- register struct subchannel_id reg1 asm ("1") = schid;
- int ccode;
-
- asm volatile(
- " stsch 0(%3)\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode), "=m" (*addr)
- : "d" (reg1), "a" (addr)
- : "cc");
- return ccode;
-}
-
static inline int stsch_err(struct subchannel_id schid, struct schib *addr)
{
register struct subchannel_id reg1 asm ("1") = schid;
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 48aa0647432..f0037eefd44 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -13,8 +13,8 @@
#include <asm/debug.h>
#include "chsc.h"
-#define QDIO_BUSY_BIT_PATIENCE 100 /* 100 microseconds */
-#define QDIO_INPUT_THRESHOLD 500 /* 500 microseconds */
+#define QDIO_BUSY_BIT_PATIENCE (100 << 12) /* 100 microseconds */
+#define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */
/*
* if an asynchronous HiperSockets queue runs full, the 10 seconds timer wait
@@ -296,10 +296,8 @@ struct qdio_q {
struct qdio_irq *irq_ptr;
struct sl *sl;
/*
- * Warning: Leave this member at the end so it won't be cleared in
- * qdio_fill_qs. A page is allocated under this pointer and used for
- * slib and sl. slib is 2048 bytes big and sl points to offset
- * PAGE_SIZE / 2.
+ * A page is allocated under this pointer and used for slib and sl.
+ * slib is 2048 bytes big and sl points to offset PAGE_SIZE / 2.
*/
struct slib *slib;
} __attribute__ ((aligned(256)));
@@ -372,11 +370,6 @@ static inline int multicast_outbound(struct qdio_q *q)
(q->nr == q->irq_ptr->nr_output_qs - 1);
}
-static inline unsigned long long get_usecs(void)
-{
- return monotonic_clock() >> 12;
-}
-
#define pci_out_supported(q) \
(q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
#define is_qebsm(q) (q->irq_ptr->sch_token != 0)
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 88be7b9ea6e..00520f9a7a8 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -336,10 +336,10 @@ again:
WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
if (!start_time) {
- start_time = get_usecs();
+ start_time = get_clock();
goto again;
}
- if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE)
+ if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
goto again;
}
return cc;
@@ -536,7 +536,7 @@ static int qdio_inbound_q_moved(struct qdio_q *q)
if ((bufnr != q->last_move) || q->qdio_error) {
q->last_move = bufnr;
if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
- q->u.in.timestamp = get_usecs();
+ q->u.in.timestamp = get_clock();
return 1;
} else
return 0;
@@ -567,7 +567,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
* At this point we know, that inbound first_to_check
* has (probably) not moved (see qdio_inbound_processing).
*/
- if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
+ if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
q->first_to_check);
return 1;
@@ -606,7 +606,7 @@ static void qdio_kick_handler(struct qdio_q *q)
static void __qdio_inbound_processing(struct qdio_q *q)
{
qperf_inc(q, tasklet_inbound);
-again:
+
if (!qdio_inbound_q_moved(q))
return;
@@ -615,7 +615,10 @@ again:
if (!qdio_inbound_q_done(q)) {
/* means poll time is not yet over */
qperf_inc(q, tasklet_inbound_resched);
- goto again;
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
+ tasklet_schedule(&q->tasklet);
+ return;
+ }
}
qdio_stop_polling(q);
@@ -625,7 +628,8 @@ again:
*/
if (!qdio_inbound_q_done(q)) {
qperf_inc(q, tasklet_inbound_resched2);
- goto again;
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
+ tasklet_schedule(&q->tasklet);
}
}
@@ -955,6 +959,9 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
return;
}
+ if (irq_ptr->perf_stat_enabled)
+ irq_ptr->perf_stat.qdio_int++;
+
if (IS_ERR(irb)) {
switch (PTR_ERR(irb)) {
case -EIO:
@@ -1016,30 +1023,6 @@ int qdio_get_ssqd_desc(struct ccw_device *cdev,
}
EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
-/**
- * qdio_cleanup - shutdown queues and free data structures
- * @cdev: associated ccw device
- * @how: use halt or clear to shutdown
- *
- * This function calls qdio_shutdown() for @cdev with method @how.
- * and qdio_free(). The qdio_free() return value is ignored since
- * !irq_ptr is already checked.
- */
-int qdio_cleanup(struct ccw_device *cdev, int how)
-{
- struct qdio_irq *irq_ptr = cdev->private->qdio_data;
- int rc;
-
- if (!irq_ptr)
- return -ENODEV;
-
- rc = qdio_shutdown(cdev, how);
-
- qdio_free(cdev);
- return rc;
-}
-EXPORT_SYMBOL_GPL(qdio_cleanup);
-
static void qdio_shutdown_queues(struct ccw_device *cdev)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
@@ -1157,28 +1140,6 @@ int qdio_free(struct ccw_device *cdev)
EXPORT_SYMBOL_GPL(qdio_free);
/**
- * qdio_initialize - allocate and establish queues for a qdio subchannel
- * @init_data: initialization data
- *
- * This function first allocates queues via qdio_allocate() and on success
- * establishes them via qdio_establish().
- */
-int qdio_initialize(struct qdio_initialize *init_data)
-{
- int rc;
-
- rc = qdio_allocate(init_data);
- if (rc)
- return rc;
-
- rc = qdio_establish(init_data);
- if (rc)
- qdio_free(init_data->cdev);
- return rc;
-}
-EXPORT_SYMBOL_GPL(qdio_initialize);
-
-/**
* qdio_allocate - allocate qdio queues and associated data
* @init_data: initialization data
*/
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 7f4a7546514..6326b67c45d 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -106,10 +106,12 @@ int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs
static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
qdio_handler_t *handler, int i)
{
- /* must be cleared by every qdio_establish */
- memset(q, 0, ((char *)&q->slib) - ((char *)q));
- memset(q->slib, 0, PAGE_SIZE);
+ struct slib *slib = q->slib;
+ /* queue must be cleared for qdio_establish */
+ memset(q, 0, sizeof(*q));
+ memset(slib, 0, PAGE_SIZE);
+ q->slib = slib;
q->irq_ptr = irq_ptr;
q->mask = 1 << (31 - i);
q->nr = i;
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index ce5f8910ff8..8daf1b99f15 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -95,7 +95,7 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
for_each_input_queue(irq_ptr, q, i)
list_add_rcu(&q->entry, &tiq_list);
mutex_unlock(&tiq_list_lock);
- xchg(irq_ptr->dsci, 1);
+ xchg(irq_ptr->dsci, 1 << 7);
}
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
@@ -173,7 +173,7 @@ static void tiqdio_thinint_handler(void *ind, void *drv_data)
/* prevent racing */
if (*tiqdio_alsi)
- xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1);
+ xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1 << 7);
}
}
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 304caf54997..41e0aaefafd 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -302,7 +302,7 @@ static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
static int zcrypt_open(struct inode *inode, struct file *filp)
{
atomic_inc(&zcrypt_open_count);
- return 0;
+ return nonseekable_open(inode, filp);
}
/**
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index e35713dd050..4ecafbf9121 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1364,8 +1364,7 @@ static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
ch->protocol = priv->protocol;
if (IS_MPC(priv)) {
- ch->discontact_th = (struct th_header *)
- kzalloc(TH_HEADER_LENGTH, gfp_type());
+ ch->discontact_th = kzalloc(TH_HEADER_LENGTH, gfp_type());
if (ch->discontact_th == NULL)
goto nomem_return;
@@ -1379,8 +1378,7 @@ static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
} else
ccw_num = 8;
- ch->ccw = (struct ccw1 *)
- kzalloc(ccw_num * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
+ ch->ccw = kzalloc(ccw_num * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
if (ch->ccw == NULL)
goto nomem_return;
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 5978b390153..87c24d2936d 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -669,8 +669,7 @@ static void ctcmpc_send_sweep_resp(struct channel *rch)
goto done;
}
- header = (struct th_sweep *)
- kmalloc(sizeof(struct th_sweep), gfp_type());
+ header = kmalloc(sizeof(struct th_sweep), gfp_type());
if (!header) {
dev_kfree_skb_any(sweep_skb);
@@ -1191,8 +1190,7 @@ static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
skb_pull(pskb, new_len); /* point to next PDU */
}
} else {
- mpcginfo = (struct mpcg_info *)
- kmalloc(sizeof(struct mpcg_info), gfp_type());
+ mpcginfo = kmalloc(sizeof(struct mpcg_info), gfp_type());
if (mpcginfo == NULL)
goto done;
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 9b19ea13b4d..0f19d540b65 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1238,8 +1238,7 @@ lcs_set_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
ipm = lcs_check_addr_entry(card, im4, buf);
if (ipm != NULL)
continue; /* Address already in list. */
- ipm = (struct lcs_ipm_list *)
- kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC);
+ ipm = kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC);
if (ipm == NULL) {
pr_info("Not enough memory to add"
" new multicast entry!\n");
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index fcd005aad98..7a44c38aaf6 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -179,25 +179,23 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
((prot == QETH_PROT_IPV6) ? \
qeth_is_enabled6(c, f) : qeth_is_enabled(c, f))
-#define QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT 0x0101
-#define QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT 0x0101
+#define QETH_IDX_FUNC_LEVEL_OSD 0x0101
#define QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT 0x4108
#define QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT 0x5108
#define QETH_MODELLIST_ARRAY \
- {{0x1731, 0x01, 0x1732, 0x01, QETH_CARD_TYPE_OSAE, 1, \
- QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \
- QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \
- QETH_MAX_QUEUES, 0}, \
- {0x1731, 0x05, 0x1732, 0x05, QETH_CARD_TYPE_IQD, 0, \
- QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT, \
- QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT, \
- QETH_MAX_QUEUES, 0x103}, \
- {0x1731, 0x06, 0x1732, 0x06, QETH_CARD_TYPE_OSN, 0, \
- QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \
- QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \
- QETH_MAX_QUEUES, 0}, \
- {0, 0, 0, 0, 0, 0, 0, 0, 0} }
+ {{0x1731, 0x01, 0x1732, QETH_CARD_TYPE_OSD, QETH_MAX_QUEUES, 0}, \
+ {0x1731, 0x05, 0x1732, QETH_CARD_TYPE_IQD, QETH_MAX_QUEUES, 0x103}, \
+ {0x1731, 0x06, 0x1732, QETH_CARD_TYPE_OSN, QETH_MAX_QUEUES, 0}, \
+ {0x1731, 0x02, 0x1732, QETH_CARD_TYPE_OSM, QETH_MAX_QUEUES, 0}, \
+ {0x1731, 0x02, 0x1732, QETH_CARD_TYPE_OSX, QETH_MAX_QUEUES, 0}, \
+ {0, 0, 0, 0, 0, 0} }
+#define QETH_CU_TYPE_IND 0
+#define QETH_CU_MODEL_IND 1
+#define QETH_DEV_TYPE_IND 2
+#define QETH_DEV_MODEL_IND 3
+#define QETH_QUEUE_NO_IND 4
+#define QETH_MULTICAST_IND 5
#define QETH_REAL_CARD 1
#define QETH_VLAN_CARD 2
@@ -351,7 +349,7 @@ enum qeth_header_ids {
#define QETH_HDR_EXT_SRC_MAC_ADDR 0x08
#define QETH_HDR_EXT_CSUM_HDR_REQ 0x10
#define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20
-#define QETH_HDR_EXT_UDP_TSO 0x40 /*bit off for TCP*/
+#define QETH_HDR_EXT_UDP 0x40 /*bit off for TCP*/
static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
{
@@ -630,6 +628,7 @@ struct qeth_card_info {
int unique_id;
struct qeth_card_blkt blkt;
__u32 csum_mask;
+ __u32 tx_csum_mask;
enum qeth_ipa_promisc_modes promisc_mode;
};
@@ -739,6 +738,7 @@ struct qeth_card {
atomic_t force_alloc_skb;
struct service_level qeth_service_level;
struct qdio_ssqd_desc ssqd;
+ struct mutex conf_mutex;
};
struct qeth_card_list_struct {
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 3ba738b2e27..13ef46b9d38 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -53,7 +53,7 @@ struct kmem_cache *qeth_core_header_cache;
EXPORT_SYMBOL_GPL(qeth_core_header_cache);
static struct device *qeth_core_root_dev;
-static unsigned int known_devices[][10] = QETH_MODELLIST_ARRAY;
+static unsigned int known_devices[][6] = QETH_MODELLIST_ARRAY;
static struct lock_class_key qdio_out_skb_queue_key;
static void qeth_send_control_data_cb(struct qeth_channel *,
@@ -111,21 +111,29 @@ static inline const char *qeth_get_cardname(struct qeth_card *card)
{
if (card->info.guestlan) {
switch (card->info.type) {
- case QETH_CARD_TYPE_OSAE:
+ case QETH_CARD_TYPE_OSD:
return " Guest LAN QDIO";
case QETH_CARD_TYPE_IQD:
return " Guest LAN Hiper";
+ case QETH_CARD_TYPE_OSM:
+ return " Guest LAN QDIO - OSM";
+ case QETH_CARD_TYPE_OSX:
+ return " Guest LAN QDIO - OSX";
default:
return " unknown";
}
} else {
switch (card->info.type) {
- case QETH_CARD_TYPE_OSAE:
+ case QETH_CARD_TYPE_OSD:
return " OSD Express";
case QETH_CARD_TYPE_IQD:
return " HiperSockets";
case QETH_CARD_TYPE_OSN:
return " OSN QDIO";
+ case QETH_CARD_TYPE_OSM:
+ return " OSM QDIO";
+ case QETH_CARD_TYPE_OSX:
+ return " OSX QDIO";
default:
return " unknown";
}
@@ -138,16 +146,20 @@ const char *qeth_get_cardname_short(struct qeth_card *card)
{
if (card->info.guestlan) {
switch (card->info.type) {
- case QETH_CARD_TYPE_OSAE:
+ case QETH_CARD_TYPE_OSD:
return "GuestLAN QDIO";
case QETH_CARD_TYPE_IQD:
return "GuestLAN Hiper";
+ case QETH_CARD_TYPE_OSM:
+ return "GuestLAN OSM";
+ case QETH_CARD_TYPE_OSX:
+ return "GuestLAN OSX";
default:
return "unknown";
}
} else {
switch (card->info.type) {
- case QETH_CARD_TYPE_OSAE:
+ case QETH_CARD_TYPE_OSD:
switch (card->info.link_type) {
case QETH_LINK_TYPE_FAST_ETH:
return "OSD_100";
@@ -172,6 +184,10 @@ const char *qeth_get_cardname_short(struct qeth_card *card)
return "HiperSockets";
case QETH_CARD_TYPE_OSN:
return "OSN";
+ case QETH_CARD_TYPE_OSM:
+ return "OSM_1000";
+ case QETH_CARD_TYPE_OSX:
+ return "OSX_10GIG";
default:
return "unknown";
}
@@ -419,7 +435,8 @@ void qeth_clear_ipacmd_list(struct qeth_card *card)
}
EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
-static int qeth_check_idx_response(unsigned char *buffer)
+static int qeth_check_idx_response(struct qeth_card *card,
+ unsigned char *buffer)
{
if (!buffer)
return 0;
@@ -434,6 +451,12 @@ static int qeth_check_idx_response(unsigned char *buffer)
QETH_DBF_TEXT(TRACE, 2, "ckidxres");
QETH_DBF_TEXT(TRACE, 2, " idxterm");
QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO);
+ if (buffer[4] == 0xf6) {
+ dev_err(&card->gdev->dev,
+ "The qeth device is not configured "
+ "for the OSI layer required by z/VM\n");
+ return -EPERM;
+ }
return -EIO;
}
return 0;
@@ -528,18 +551,19 @@ static void qeth_send_control_data_cb(struct qeth_channel *channel,
struct qeth_ipa_cmd *cmd;
unsigned long flags;
int keep_reply;
+ int rc = 0;
QETH_DBF_TEXT(TRACE, 4, "sndctlcb");
card = CARD_FROM_CDEV(channel->ccwdev);
- if (qeth_check_idx_response(iob->data)) {
+ rc = qeth_check_idx_response(card, iob->data);
+ switch (rc) {
+ case 0:
+ break;
+ case -EIO:
qeth_clear_ipacmd_list(card);
- if (((iob->data[2] & 0xc0) == 0xc0) && iob->data[4] == 0xf6)
- dev_err(&card->gdev->dev,
- "The qeth device is not configured "
- "for the OSI layer required by z/VM\n");
- else
- qeth_schedule_recovery(card);
+ qeth_schedule_recovery(card);
+ default:
goto out;
}
@@ -606,7 +630,7 @@ static int qeth_setup_channel(struct qeth_channel *channel)
QETH_DBF_TEXT(SETUP, 2, "setupch");
for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
- channel->iob[cnt].data = (char *)
+ channel->iob[cnt].data =
kmalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
if (channel->iob[cnt].data == NULL)
break;
@@ -719,7 +743,7 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
QETH_DBF_TEXT(TRACE, 2, "CGENCHK");
dev_warn(&cdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
- QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x ",
+ QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n",
dev_name(&cdev->dev), dstat, cstat);
print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
16, 1, irb, 64, 1);
@@ -998,9 +1022,8 @@ static void qeth_clean_channel(struct qeth_channel *channel)
kfree(channel->iob[cnt].data);
}
-static int qeth_is_1920_device(struct qeth_card *card)
+static void qeth_get_channel_path_desc(struct qeth_card *card)
{
- int single_queue = 0;
struct ccw_device *ccwdev;
struct channelPath_dsc {
u8 flags;
@@ -1013,17 +1036,25 @@ static int qeth_is_1920_device(struct qeth_card *card)
u8 chpp;
} *chp_dsc;
- QETH_DBF_TEXT(SETUP, 2, "chk_1920");
+ QETH_DBF_TEXT(SETUP, 2, "chp_desc");
ccwdev = card->data.ccwdev;
chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
if (chp_dsc != NULL) {
/* CHPP field bit 6 == 1 -> single queue */
- single_queue = ((chp_dsc->chpp & 0x02) == 0x02);
+ if ((chp_dsc->chpp & 0x02) == 0x02)
+ card->qdio.no_out_queues = 1;
+ card->info.func_level = 0x4100 + chp_dsc->desc;
kfree(chp_dsc);
}
- QETH_DBF_TEXT_(SETUP, 2, "rc:%x", single_queue);
- return single_queue;
+ if (card->qdio.no_out_queues == 1) {
+ card->qdio.default_out_queue = 0;
+ dev_info(&card->gdev->dev,
+ "Priority Queueing not supported\n");
+ }
+ QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
+ QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
+ return;
}
static void qeth_init_qdio_info(struct qeth_card *card)
@@ -1100,6 +1131,7 @@ static int qeth_setup_card(struct qeth_card *card)
spin_lock_init(&card->lock);
spin_lock_init(&card->ip_lock);
spin_lock_init(&card->thread_mask_lock);
+ mutex_init(&card->conf_mutex);
card->thread_start_mask = 0;
card->thread_allowed_mask = 0;
card->thread_running_mask = 0;
@@ -1170,18 +1202,17 @@ static int qeth_determine_card_type(struct qeth_card *card)
card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
- while (known_devices[i][4]) {
- if ((CARD_RDEV(card)->id.dev_type == known_devices[i][2]) &&
- (CARD_RDEV(card)->id.dev_model == known_devices[i][3])) {
- card->info.type = known_devices[i][4];
- card->qdio.no_out_queues = known_devices[i][8];
- card->info.is_multicast_different = known_devices[i][9];
- if (qeth_is_1920_device(card)) {
- dev_info(&card->gdev->dev,
- "Priority Queueing not supported\n");
- card->qdio.no_out_queues = 1;
- card->qdio.default_out_queue = 0;
- }
+ while (known_devices[i][QETH_DEV_MODEL_IND]) {
+ if ((CARD_RDEV(card)->id.dev_type ==
+ known_devices[i][QETH_DEV_TYPE_IND]) &&
+ (CARD_RDEV(card)->id.dev_model ==
+ known_devices[i][QETH_DEV_MODEL_IND])) {
+ card->info.type = known_devices[i][QETH_DEV_MODEL_IND];
+ card->qdio.no_out_queues =
+ known_devices[i][QETH_QUEUE_NO_IND];
+ card->info.is_multicast_different =
+ known_devices[i][QETH_MULTICAST_IND];
+ qeth_get_channel_path_desc(card);
return 0;
}
i++;
@@ -1292,13 +1323,14 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
QETH_QDIO_CLEANING)) {
case QETH_QDIO_ESTABLISHED:
if (card->info.type == QETH_CARD_TYPE_IQD)
- rc = qdio_cleanup(CARD_DDEV(card),
+ rc = qdio_shutdown(CARD_DDEV(card),
QDIO_FLAG_CLEANUP_USING_HALT);
else
- rc = qdio_cleanup(CARD_DDEV(card),
+ rc = qdio_shutdown(CARD_DDEV(card),
QDIO_FLAG_CLEANUP_USING_CLEAR);
if (rc)
QETH_DBF_TEXT_(TRACE, 3, "1err%d", rc);
+ qdio_free(CARD_DDEV(card));
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
break;
case QETH_QDIO_CLEANING:
@@ -1398,22 +1430,20 @@ static void qeth_init_tokens(struct qeth_card *card)
static void qeth_init_func_level(struct qeth_card *card)
{
- if (card->ipato.enabled) {
- if (card->info.type == QETH_CARD_TYPE_IQD)
- card->info.func_level =
- QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT;
- else
- card->info.func_level =
- QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT;
- } else {
- if (card->info.type == QETH_CARD_TYPE_IQD)
- /*FIXME:why do we have same values for dis and ena for
- osae??? */
+ switch (card->info.type) {
+ case QETH_CARD_TYPE_IQD:
+ if (card->ipato.enabled)
card->info.func_level =
- QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
+ QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT;
else
card->info.func_level =
- QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT;
+ QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT;
+ break;
+ case QETH_CARD_TYPE_OSD:
+ card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
+ break;
+ default:
+ break;
}
}
@@ -1560,7 +1590,7 @@ static void qeth_idx_write_cb(struct qeth_channel *channel,
card = CARD_FROM_CDEV(channel->ccwdev);
if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
- if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19)
+ if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL)
dev_err(&card->write.ccwdev->dev,
"The adapter is used exclusively by another "
"host\n");
@@ -1596,27 +1626,35 @@ static void qeth_idx_read_cb(struct qeth_channel *channel,
}
card = CARD_FROM_CDEV(channel->ccwdev);
- if (qeth_check_idx_response(iob->data))
+ if (qeth_check_idx_response(card, iob->data))
goto out;
if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
- if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19)
+ switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
+ case QETH_IDX_ACT_ERR_EXCL:
dev_err(&card->write.ccwdev->dev,
"The adapter is used exclusively by another "
"host\n");
- else
+ break;
+ case QETH_IDX_ACT_ERR_AUTH:
+ dev_err(&card->read.ccwdev->dev,
+ "Setting the device online failed because of "
+ "insufficient LPAR authorization\n");
+ break;
+ default:
QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:"
" negative reply\n",
dev_name(&card->read.ccwdev->dev));
+ }
goto out;
}
/**
- * temporary fix for microcode bug
- * to revert it,replace OR by AND
- */
+ * * temporary fix for microcode bug
+ * * to revert it,replace OR by AND
+ * */
if ((!QETH_IDX_NO_PORTNAME_REQUIRED(iob->data)) ||
- (card->info.type == QETH_CARD_TYPE_OSAE))
+ (card->info.type == QETH_CARD_TYPE_OSD))
card->info.portname_required = 1;
memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
@@ -1825,7 +1863,7 @@ static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card)
return 1500;
case QETH_CARD_TYPE_IQD:
return card->info.max_mtu;
- case QETH_CARD_TYPE_OSAE:
+ case QETH_CARD_TYPE_OSD:
switch (card->info.link_type) {
case QETH_LINK_TYPE_HSTR:
case QETH_LINK_TYPE_LANE_TR:
@@ -1833,6 +1871,9 @@ static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card)
default:
return 1492;
}
+ case QETH_CARD_TYPE_OSM:
+ case QETH_CARD_TYPE_OSX:
+ return 1492;
default:
return 1500;
}
@@ -1843,8 +1884,10 @@ static inline int qeth_get_max_mtu_for_card(int cardtype)
switch (cardtype) {
case QETH_CARD_TYPE_UNKNOWN:
- case QETH_CARD_TYPE_OSAE:
+ case QETH_CARD_TYPE_OSD:
case QETH_CARD_TYPE_OSN:
+ case QETH_CARD_TYPE_OSM:
+ case QETH_CARD_TYPE_OSX:
return 61440;
case QETH_CARD_TYPE_IQD:
return 57344;
@@ -1882,7 +1925,9 @@ static inline int qeth_get_mtu_outof_framesize(int framesize)
static inline int qeth_mtu_is_valid(struct qeth_card *card, int mtu)
{
switch (card->info.type) {
- case QETH_CARD_TYPE_OSAE:
+ case QETH_CARD_TYPE_OSD:
+ case QETH_CARD_TYPE_OSM:
+ case QETH_CARD_TYPE_OSX:
return ((mtu >= 576) && (mtu <= 61440));
case QETH_CARD_TYPE_IQD:
return ((mtu >= 576) &&
@@ -1933,6 +1978,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
card->info.link_type = link_type;
} else
card->info.link_type = 0;
+ QETH_DBF_TEXT_(SETUP, 2, "link%d", link_type);
QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
return 0;
}
@@ -1976,6 +2022,7 @@ static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
unsigned long data)
{
struct qeth_cmd_buffer *iob;
+ int rc = 0;
QETH_DBF_TEXT(SETUP, 2, "ulpstpcb");
@@ -1983,8 +2030,15 @@ static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
memcpy(&card->token.ulp_connection_r,
QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
QETH_MPC_TOKEN_LENGTH);
+ if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
+ 3)) {
+ QETH_DBF_TEXT(SETUP, 2, "olmlimit");
+ dev_err(&card->gdev->dev, "A connection could not be "
+ "established because of an OLM limit\n");
+ rc = -EMLINK;
+ }
QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
- return 0;
+ return rc;
}
static int qeth_ulp_setup(struct qeth_card *card)
@@ -2237,7 +2291,9 @@ static void qeth_print_status_no_portname(struct qeth_card *card)
void qeth_print_status_message(struct qeth_card *card)
{
switch (card->info.type) {
- case QETH_CARD_TYPE_OSAE:
+ case QETH_CARD_TYPE_OSD:
+ case QETH_CARD_TYPE_OSM:
+ case QETH_CARD_TYPE_OSX:
/* VM will use a non-zero first character
* to indicate a HiperSockets like reporting
* of the level OSA sets the first character to zero
@@ -2544,9 +2600,11 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
QETH_DBF_TEXT(TRACE, 3, "quyadpcb");
cmd = (struct qeth_ipa_cmd *) data;
- if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f)
+ if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
card->info.link_type =
cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
+ QETH_DBF_TEXT_(SETUP, 2, "lnk %d", card->info.link_type);
+ }
card->options.adp.supported_funcs =
cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
@@ -2936,7 +2994,8 @@ EXPORT_SYMBOL_GPL(qeth_qdio_output_handler);
int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
int ipv, int cast_type)
{
- if (!ipv && (card->info.type == QETH_CARD_TYPE_OSAE))
+ if (!ipv && (card->info.type == QETH_CARD_TYPE_OSD ||
+ card->info.type == QETH_CARD_TYPE_OSX))
return card->qdio.default_out_queue;
switch (card->qdio.no_out_queues) {
case 4:
@@ -3498,13 +3557,14 @@ int qeth_set_access_ctrl_online(struct qeth_card *card)
QETH_DBF_TEXT(TRACE, 4, "setactlo");
- if (card->info.type == QETH_CARD_TYPE_OSAE &&
- qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
+ if ((card->info.type == QETH_CARD_TYPE_OSD ||
+ card->info.type == QETH_CARD_TYPE_OSX) &&
+ qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
rc = qeth_setadpparms_set_access_ctrl(card,
card->options.isolation);
if (rc) {
QETH_DBF_MESSAGE(3,
- "IPA(SET_ACCESS_CTRL,%s,%d) sent failed",
+ "IPA(SET_ACCESS_CTRL,%s,%d) sent failed\n",
card->gdev->dev.kobj.name,
rc);
}
@@ -3810,10 +3870,18 @@ static int qeth_qdio_establish(struct qeth_card *card)
if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
- rc = qdio_initialize(&init_data);
- if (rc)
+ rc = qdio_allocate(&init_data);
+ if (rc) {
+ atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
+ goto out;
+ }
+ rc = qdio_establish(&init_data);
+ if (rc) {
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
+ qdio_free(CARD_DDEV(card));
+ }
}
+out:
kfree(out_sbal_ptrs);
kfree(in_sbal_ptrs);
kfree(qib_param_field);
@@ -3836,9 +3904,16 @@ static void qeth_core_free_card(struct qeth_card *card)
}
static struct ccw_device_id qeth_ids[] = {
- {CCW_DEVICE(0x1731, 0x01), .driver_info = QETH_CARD_TYPE_OSAE},
- {CCW_DEVICE(0x1731, 0x05), .driver_info = QETH_CARD_TYPE_IQD},
- {CCW_DEVICE(0x1731, 0x06), .driver_info = QETH_CARD_TYPE_OSN},
+ {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
+ .driver_info = QETH_CARD_TYPE_OSD},
+ {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
+ .driver_info = QETH_CARD_TYPE_IQD},
+ {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
+ .driver_info = QETH_CARD_TYPE_OSN},
+ {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
+ .driver_info = QETH_CARD_TYPE_OSM},
+ {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
+ .driver_info = QETH_CARD_TYPE_OSX},
{},
};
MODULE_DEVICE_TABLE(ccw, qeth_ids);
@@ -4242,25 +4317,25 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
goto err_card;
}
- if (card->info.type == QETH_CARD_TYPE_OSN) {
+ if (card->info.type == QETH_CARD_TYPE_OSN)
rc = qeth_core_create_osn_attributes(dev);
- if (rc)
- goto err_card;
+ else
+ rc = qeth_core_create_device_attributes(dev);
+ if (rc)
+ goto err_card;
+ switch (card->info.type) {
+ case QETH_CARD_TYPE_OSN:
+ case QETH_CARD_TYPE_OSM:
rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2);
- if (rc) {
- qeth_core_remove_osn_attributes(dev);
- goto err_card;
- }
+ if (rc)
+ goto err_attr;
rc = card->discipline.ccwgdriver->probe(card->gdev);
- if (rc) {
- qeth_core_free_discipline(card);
- qeth_core_remove_osn_attributes(dev);
- goto err_card;
- }
- } else {
- rc = qeth_core_create_device_attributes(dev);
if (rc)
- goto err_card;
+ goto err_disc;
+ case QETH_CARD_TYPE_OSD:
+ case QETH_CARD_TYPE_OSX:
+ default:
+ break;
}
write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
@@ -4270,6 +4345,13 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
qeth_determine_capabilities(card);
return 0;
+err_disc:
+ qeth_core_free_discipline(card);
+err_attr:
+ if (card->info.type == QETH_CARD_TYPE_OSN)
+ qeth_core_remove_osn_attributes(dev);
+ else
+ qeth_core_remove_device_attributes(dev);
err_card:
qeth_core_free_card(card);
err_dev:
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 104a3351e02..f9ed24de751 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -48,9 +48,11 @@ extern unsigned char IPA_PDU_HEADER[];
enum qeth_card_types {
QETH_CARD_TYPE_UNKNOWN = 0,
- QETH_CARD_TYPE_OSAE = 10,
- QETH_CARD_TYPE_IQD = 1234,
- QETH_CARD_TYPE_OSN = 11,
+ QETH_CARD_TYPE_OSD = 1,
+ QETH_CARD_TYPE_IQD = 5,
+ QETH_CARD_TYPE_OSN = 6,
+ QETH_CARD_TYPE_OSM = 3,
+ QETH_CARD_TYPE_OSX = 2,
};
#define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18
@@ -614,6 +616,8 @@ extern unsigned char IDX_ACTIVATE_WRITE[];
#define QETH_IS_IDX_ACT_POS_REPLY(buffer) (((buffer)[0x08] & 3) == 2)
#define QETH_IDX_REPLY_LEVEL(buffer) (buffer + 0x12)
#define QETH_IDX_ACT_CAUSE_CODE(buffer) (buffer)[0x09]
+#define QETH_IDX_ACT_ERR_EXCL 0x19
+#define QETH_IDX_ACT_ERR_AUTH 0x1E
#define PDU_ENCAPSULATION(buffer) \
(buffer + *(buffer + (*(buffer + 0x0b)) + \
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 25dfd5abd19..2eb022ff261 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -122,23 +122,32 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
unsigned int portno, limit;
+ int rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
portno = simple_strtoul(buf, &tmp, 16);
- if (portno > QETH_MAX_PORTNO)
- return -EINVAL;
+ if (portno > QETH_MAX_PORTNO) {
+ rc = -EINVAL;
+ goto out;
+ }
limit = (card->ssqd.pcnt ? card->ssqd.pcnt - 1 : card->ssqd.pcnt);
- if (portno > limit)
- return -EINVAL;
-
+ if (portno > limit) {
+ rc = -EINVAL;
+ goto out;
+ }
card->info.portno = portno;
- return count;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(portno, 0644, qeth_dev_portno_show, qeth_dev_portno_store);
@@ -165,18 +174,23 @@ static ssize_t qeth_dev_portname_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
- int i;
+ int i, rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
tmp = strsep((char **) &buf, "\n");
- if ((strlen(tmp) > 8) || (strlen(tmp) == 0))
- return -EINVAL;
+ if ((strlen(tmp) > 8) || (strlen(tmp) == 0)) {
+ rc = -EINVAL;
+ goto out;
+ }
card->info.portname[0] = strlen(tmp);
/* for beauty reasons */
@@ -184,8 +198,9 @@ static ssize_t qeth_dev_portname_store(struct device *dev,
card->info.portname[i] = ' ';
strcpy(card->info.portname + 1, tmp);
ASCEBC(card->info.portname + 1, 8);
-
- return count;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(portname, 0644, qeth_dev_portname_show,
@@ -215,20 +230,25 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
+ int rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
/* check if 1920 devices are supported ,
* if though we have to permit priority queueing
*/
if (card->qdio.no_out_queues == 1) {
card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
- return -EPERM;
+ rc = -EPERM;
+ goto out;
}
tmp = strsep((char **) &buf, "\n");
@@ -251,10 +271,11 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
} else if (!strcmp(tmp, "no_prio_queueing")) {
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
- } else {
- return -EINVAL;
- }
- return count;
+ } else
+ rc = -EINVAL;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(priority_queueing, 0644, qeth_dev_prioqing_show,
@@ -277,14 +298,17 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
int cnt, old_cnt;
- int rc;
+ int rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
old_cnt = card->qdio.in_buf_pool.buf_count;
cnt = simple_strtoul(buf, &tmp, 10);
@@ -293,7 +317,9 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev,
if (old_cnt != cnt) {
rc = qeth_realloc_buffer_pool(card, cnt);
}
- return count;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show,
@@ -337,25 +363,27 @@ static ssize_t qeth_dev_performance_stats_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
- int i;
+ int i, rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
i = simple_strtoul(buf, &tmp, 16);
if ((i == 0) || (i == 1)) {
if (i == card->options.performance_stats)
- return count;
+ goto out;;
card->options.performance_stats = i;
if (i == 0)
memset(&card->perf_stats, 0,
sizeof(struct qeth_perf_stats));
card->perf_stats.initial_rx_packets = card->stats.rx_packets;
card->perf_stats.initial_tx_packets = card->stats.tx_packets;
- } else {
- return -EINVAL;
- }
- return count;
+ } else
+ rc = -EINVAL;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(performance_stats, 0644, qeth_dev_performance_stats_show,
@@ -377,15 +405,17 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
- int i, rc;
+ int i, rc = 0;
enum qeth_discipline_id newdis;
if (!card)
return -EINVAL;
- if (((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER)))
- return -EPERM;
+ mutex_lock(&card->conf_mutex);
+ if (card->state != CARD_STATE_DOWN) {
+ rc = -EPERM;
+ goto out;
+ }
i = simple_strtoul(buf, &tmp, 16);
switch (i) {
@@ -396,12 +426,13 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
newdis = QETH_DISCIPLINE_LAYER2;
break;
default:
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
- if (card->options.layer2 == newdis) {
- return count;
- } else {
+ if (card->options.layer2 == newdis)
+ goto out;
+ else {
if (card->discipline.ccwgdriver) {
card->discipline.ccwgdriver->remove(card->gdev);
qeth_core_free_discipline(card);
@@ -410,12 +441,12 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
rc = qeth_core_load_discipline(card, newdis);
if (rc)
- return rc;
+ goto out;
rc = card->discipline.ccwgdriver->probe(card->gdev);
- if (rc)
- return rc;
- return count;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show,
@@ -454,13 +485,13 @@ static ssize_t qeth_dev_isolation_store(struct device *dev,
char *tmp, *curtoken;
curtoken = (char *) buf;
- if (!card) {
- rc = -EINVAL;
- goto out;
- }
+ if (!card)
+ return -EINVAL;
+ mutex_lock(&card->conf_mutex);
/* check for unknown, too, in case we do not yet know who we are */
- if (card->info.type != QETH_CARD_TYPE_OSAE &&
+ if (card->info.type != QETH_CARD_TYPE_OSD &&
+ card->info.type != QETH_CARD_TYPE_OSX &&
card->info.type != QETH_CARD_TYPE_UNKNOWN) {
rc = -EOPNOTSUPP;
dev_err(&card->gdev->dev, "Adapter does not "
@@ -491,6 +522,7 @@ static ssize_t qeth_dev_isolation_store(struct device *dev,
rc = ipa_rc;
}
out:
+ mutex_unlock(&card->conf_mutex);
return rc;
}
@@ -510,22 +542,25 @@ static ssize_t qeth_dev_blkt_store(struct qeth_card *card,
const char *buf, size_t count, int *value, int max_value)
{
char *tmp;
- int i;
+ int i, rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
-
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
i = simple_strtoul(buf, &tmp, 10);
- if (i <= max_value) {
+ if (i <= max_value)
*value = i;
- } else {
- return -EINVAL;
- }
- return count;
+ else
+ rc = -EINVAL;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static ssize_t qeth_dev_blkt_total_show(struct device *dev,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 6a801dc3bf8..d43f57a4ac6 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -56,7 +56,9 @@ static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
break;
case SIOC_QETH_GET_CARD_TYPE:
- if ((card->info.type == QETH_CARD_TYPE_OSAE) &&
+ if ((card->info.type == QETH_CARD_TYPE_OSD ||
+ card->info.type == QETH_CARD_TYPE_OSM ||
+ card->info.type == QETH_CARD_TYPE_OSX) &&
!card->info.guestlan)
return 1;
return 0;
@@ -309,6 +311,10 @@ static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
struct qeth_vlan_vid *id;
QETH_DBF_TEXT_(TRACE, 4, "aid:%d", vid);
+ if (card->info.type == QETH_CARD_TYPE_OSM) {
+ QETH_DBF_TEXT(TRACE, 3, "aidOSM");
+ return;
+ }
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
QETH_DBF_TEXT(TRACE, 3, "aidREC");
return;
@@ -329,6 +335,10 @@ static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
struct qeth_card *card = dev->ml_priv;
QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid);
+ if (card->info.type == QETH_CARD_TYPE_OSM) {
+ QETH_DBF_TEXT(TRACE, 3, "kidOSM");
+ return;
+ }
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
QETH_DBF_TEXT(TRACE, 3, "kidREC");
return;
@@ -559,8 +569,10 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
"device %s: x%x\n", CARD_BUS_ID(card), rc);
}
- if ((card->info.type == QETH_CARD_TYPE_IQD) ||
- (card->info.guestlan)) {
+ if (card->info.type == QETH_CARD_TYPE_IQD ||
+ card->info.type == QETH_CARD_TYPE_OSM ||
+ card->info.type == QETH_CARD_TYPE_OSX ||
+ card->info.guestlan) {
rc = qeth_setadpparms_change_macaddr(card);
if (rc) {
QETH_DBF_MESSAGE(2, "couldn't get MAC address on "
@@ -589,8 +601,10 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
return -EOPNOTSUPP;
}
- if (card->info.type == QETH_CARD_TYPE_OSN) {
- QETH_DBF_TEXT(TRACE, 3, "setmcOSN");
+ if (card->info.type == QETH_CARD_TYPE_OSN ||
+ card->info.type == QETH_CARD_TYPE_OSM ||
+ card->info.type == QETH_CARD_TYPE_OSX) {
+ QETH_DBF_TEXT(TRACE, 3, "setmcTYP");
return -EOPNOTSUPP;
}
QETH_DBF_TEXT_(TRACE, 3, "%s", CARD_BUS_ID(card));
@@ -608,7 +622,6 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
static void qeth_l2_set_multicast_list(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
- struct dev_addr_list *dm;
struct netdev_hw_addr *ha;
if (card->info.type == QETH_CARD_TYPE_OSN)
@@ -620,8 +633,8 @@ static void qeth_l2_set_multicast_list(struct net_device *dev)
return;
qeth_l2_del_all_mc(card);
spin_lock_bh(&card->mclock);
- for (dm = dev->mc_list; dm; dm = dm->next)
- qeth_l2_add_mc(card, dm->da_addr, 0);
+ netdev_for_each_mc_addr(ha, dev)
+ qeth_l2_add_mc(card, ha->addr, 0);
netdev_for_each_uc_addr(ha, dev)
qeth_l2_add_mc(card, ha->addr, 1);
@@ -886,9 +899,6 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
static int qeth_l2_setup_netdev(struct qeth_card *card)
{
switch (card->info.type) {
- case QETH_CARD_TYPE_OSAE:
- card->dev = alloc_etherdev(0);
- break;
case QETH_CARD_TYPE_IQD:
card->dev = alloc_netdev(0, "hsi%d", ether_setup);
break;
@@ -925,6 +935,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
enum qeth_card_states recover_flag;
BUG_ON(!card);
+ mutex_lock(&card->conf_mutex);
QETH_DBF_TEXT(SETUP, 2, "setonlin");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
@@ -957,18 +968,21 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
dev_warn(&card->gdev->dev,
"The LAN is offline\n");
card->lan_online = 0;
- return 0;
+ goto out;
}
rc = -ENODEV;
goto out_remove;
} else
card->lan_online = 1;
- if (card->info.type != QETH_CARD_TYPE_OSN) {
+ if ((card->info.type == QETH_CARD_TYPE_OSD) ||
+ (card->info.type == QETH_CARD_TYPE_OSX))
/* configure isolation level */
qeth_set_access_ctrl_online(card);
+
+ if (card->info.type != QETH_CARD_TYPE_OSN &&
+ card->info.type != QETH_CARD_TYPE_OSM)
qeth_l2_process_vlans(card, 0);
- }
netif_tx_disable(card->dev);
@@ -996,6 +1010,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
}
/* let user_space know that device is online */
kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
+out:
+ mutex_unlock(&card->conf_mutex);
return 0;
out_remove:
@@ -1008,6 +1024,7 @@ out_remove:
card->state = CARD_STATE_RECOVER;
else
card->state = CARD_STATE_DOWN;
+ mutex_unlock(&card->conf_mutex);
return rc;
}
@@ -1023,6 +1040,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
int rc = 0, rc2 = 0, rc3 = 0;
enum qeth_card_states recover_flag;
+ mutex_lock(&card->conf_mutex);
QETH_DBF_TEXT(SETUP, 3, "setoffl");
QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
@@ -1041,6 +1059,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
card->state = CARD_STATE_RECOVER;
/* let user_space know that device is offline */
kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
+ mutex_unlock(&card->conf_mutex);
return 0;
}
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index fc6ca1da8b9..61adae21a46 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -54,16 +54,16 @@ int qeth_l3_set_large_send(struct qeth_card *card,
if (card->options.large_send == QETH_LARGE_SEND_TSO) {
if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
- NETIF_F_HW_CSUM;
+ NETIF_F_IP_CSUM;
} else {
card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
- NETIF_F_HW_CSUM);
+ NETIF_F_IP_CSUM);
card->options.large_send = QETH_LARGE_SEND_NO;
rc = -EOPNOTSUPP;
}
} else {
card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
- NETIF_F_HW_CSUM);
+ NETIF_F_IP_CSUM);
card->options.large_send = QETH_LARGE_SEND_NO;
}
return rc;
@@ -1108,6 +1108,13 @@ static int qeth_l3_default_setassparms_cb(struct qeth_card *card,
card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
QETH_DBF_TEXT_(TRACE, 3, "csum:%d", card->info.csum_mask);
}
+ if (cmd->data.setassparms.hdr.assist_no == IPA_OUTBOUND_CHECKSUM &&
+ cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
+ card->info.tx_csum_mask =
+ cmd->data.setassparms.data.flags_32bit;
+ QETH_DBF_TEXT_(TRACE, 3, "tcsu:%d", card->info.tx_csum_mask);
+ }
+
return 0;
}
@@ -1536,6 +1543,28 @@ static int qeth_l3_start_ipa_checksum(struct qeth_card *card)
return rc;
}
+static int qeth_l3_start_ipa_tx_checksum(struct qeth_card *card)
+{
+ int rc = 0;
+
+ if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
+ return rc;
+ rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_CHECKSUM,
+ IPA_CMD_ASS_START, 0);
+ if (rc)
+ goto err_out;
+ rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_CHECKSUM,
+ IPA_CMD_ASS_ENABLE, card->info.tx_csum_mask);
+ if (rc)
+ goto err_out;
+ dev_info(&card->gdev->dev, "HW TX Checksumming enabled\n");
+ return rc;
+err_out:
+ dev_warn(&card->gdev->dev, "Enabling HW TX checksumming for %s "
+ "failed, using SW TX checksumming\n", QETH_CARD_IFNAME(card));
+ return rc;
+}
+
static int qeth_l3_start_ipa_tso(struct qeth_card *card)
{
int rc;
@@ -1578,6 +1607,7 @@ static int qeth_l3_start_ipassists(struct qeth_card *card)
qeth_l3_start_ipa_ipv6(card); /* go on*/
qeth_l3_start_ipa_broadcast(card); /* go on*/
qeth_l3_start_ipa_checksum(card); /* go on*/
+ qeth_l3_start_ipa_tx_checksum(card);
qeth_l3_start_ipa_tso(card); /* go on*/
return 0;
}
@@ -1929,7 +1959,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid));
if (!in6_dev)
return;
- for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next) {
+ list_for_each_entry(ifa, &in6_dev->addr_list, if_list) {
addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
if (addr) {
memcpy(&addr->u.a6.addr, &ifa->addr,
@@ -2681,7 +2711,8 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
break;
case SIOC_QETH_GET_CARD_TYPE:
- if ((card->info.type == QETH_CARD_TYPE_OSAE) &&
+ if ((card->info.type == QETH_CARD_TYPE_OSD ||
+ card->info.type == QETH_CARD_TYPE_OSX) &&
!card->info.guestlan)
return 1;
return 0;
@@ -2817,6 +2848,21 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
}
}
+static inline void qeth_l3_hdr_csum(struct qeth_card *card,
+ struct qeth_hdr *hdr, struct sk_buff *skb)
+{
+ struct iphdr *iph = ip_hdr(skb);
+
+ /* tcph->check contains already the pseudo hdr checksum
+ * so just set the header flags
+ */
+ if (iph->protocol == IPPROTO_UDP)
+ hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_UDP;
+ hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ;
+ if (card->options.performance_stats)
+ card->perf_stats.tx_csum++;
+}
+
static void qeth_tso_fill_header(struct qeth_card *card,
struct qeth_hdr *qhdr, struct sk_buff *skb)
{
@@ -2852,21 +2898,6 @@ static void qeth_tso_fill_header(struct qeth_card *card,
}
}
-static void qeth_tx_csum(struct sk_buff *skb)
-{
- __wsum csum;
- int offset;
-
- skb_set_transport_header(skb, skb->csum_start - skb_headroom(skb));
- offset = skb->csum_start - skb_headroom(skb);
- BUG_ON(offset >= skb_headlen(skb));
- csum = skb_checksum(skb, offset, skb->len - offset, 0);
-
- offset += skb->csum_offset;
- BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
- *(__sum16 *)(skb->data + offset) = csum_fold(csum);
-}
-
static inline int qeth_l3_tso_elements(struct sk_buff *skb)
{
unsigned long tcpd = (unsigned long)tcp_hdr(skb) +
@@ -2923,12 +2954,6 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb_is_gso(skb))
large_send = card->options.large_send;
- else
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- qeth_tx_csum(skb);
- if (card->options.performance_stats)
- card->perf_stats.tx_csum++;
- }
if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) &&
(skb_shinfo(skb)->nr_frags == 0)) {
@@ -3007,6 +3032,9 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
cast_type);
hdr->hdr.l3.length = new_skb->len - data_offset;
}
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ qeth_l3_hdr_csum(card, hdr, new_skb);
}
elems = qeth_get_elements_no(card, (void *)hdr, new_skb,
@@ -3132,10 +3160,25 @@ static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data)
return rc;
}
+static int qeth_l3_ethtool_set_tx_csum(struct net_device *dev, u32 data)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ if (data) {
+ if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
+ dev->features |= NETIF_F_IP_CSUM;
+ else
+ return -EPERM;
+ } else
+ dev->features &= ~NETIF_F_IP_CSUM;
+
+ return 0;
+}
+
static const struct ethtool_ops qeth_l3_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_hw_csum,
+ .set_tx_csum = qeth_l3_ethtool_set_tx_csum,
.get_rx_csum = qeth_l3_ethtool_get_rx_csum,
.set_rx_csum = qeth_l3_ethtool_set_rx_csum,
.get_sg = ethtool_op_get_sg,
@@ -3206,7 +3249,8 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
static int qeth_l3_setup_netdev(struct qeth_card *card)
{
- if (card->info.type == QETH_CARD_TYPE_OSAE) {
+ if (card->info.type == QETH_CARD_TYPE_OSD ||
+ card->info.type == QETH_CARD_TYPE_OSX) {
if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
(card->info.link_type == QETH_LINK_TYPE_HSTR)) {
#ifdef CONFIG_TR
@@ -3336,6 +3380,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
enum qeth_card_states recover_flag;
BUG_ON(!card);
+ mutex_lock(&card->conf_mutex);
QETH_DBF_TEXT(SETUP, 2, "setonlin");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
@@ -3367,7 +3412,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
dev_warn(&card->gdev->dev,
"The LAN is offline\n");
card->lan_online = 0;
- return 0;
+ goto out;
}
rc = -ENODEV;
goto out_remove;
@@ -3414,6 +3459,8 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
}
/* let user_space know that device is online */
kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
+out:
+ mutex_unlock(&card->conf_mutex);
return 0;
out_remove:
card->use_hard_stop = 1;
@@ -3425,6 +3472,7 @@ out_remove:
card->state = CARD_STATE_RECOVER;
else
card->state = CARD_STATE_DOWN;
+ mutex_unlock(&card->conf_mutex);
return rc;
}
@@ -3440,6 +3488,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
int rc = 0, rc2 = 0, rc3 = 0;
enum qeth_card_states recover_flag;
+ mutex_lock(&card->conf_mutex);
QETH_DBF_TEXT(SETUP, 3, "setoffl");
QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *));
@@ -3458,6 +3507,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
card->state = CARD_STATE_RECOVER;
/* let user_space know that device is offline */
kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE);
+ mutex_unlock(&card->conf_mutex);
return 0;
}
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 25b3e7aae44..fb5318b30e9 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -70,10 +70,10 @@ static ssize_t qeth_l3_dev_route_store(struct qeth_card *card,
{
enum qeth_routing_types old_route_type = route->type;
char *tmp;
- int rc;
+ int rc = 0;
tmp = strsep((char **) &buf, "\n");
-
+ mutex_lock(&card->conf_mutex);
if (!strcmp(tmp, "no_router")) {
route->type = NO_ROUTER;
} else if (!strcmp(tmp, "primary_connector")) {
@@ -87,7 +87,8 @@ static ssize_t qeth_l3_dev_route_store(struct qeth_card *card,
} else if (!strcmp(tmp, "multicast_router")) {
route->type = MULTICAST_ROUTER;
} else {
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
if (((card->state == CARD_STATE_SOFTSETUP) ||
(card->state == CARD_STATE_UP)) &&
@@ -97,7 +98,9 @@ static ssize_t qeth_l3_dev_route_store(struct qeth_card *card,
else if (prot == QETH_PROT_IPV6)
rc = qeth_l3_setrouting_v6(card);
}
- return count;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static ssize_t qeth_l3_dev_route4_store(struct device *dev,
@@ -157,22 +160,26 @@ static ssize_t qeth_l3_dev_fake_broadcast_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
- int i;
+ int i, rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
i = simple_strtoul(buf, &tmp, 16);
if ((i == 0) || (i == 1))
card->options.fake_broadcast = i;
- else {
- return -EINVAL;
- }
- return count;
+ else
+ rc = -EINVAL;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(fake_broadcast, 0644, qeth_l3_dev_fake_broadcast_show,
@@ -200,31 +207,35 @@ static ssize_t qeth_l3_dev_broadcast_mode_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
+ int rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
(card->info.link_type == QETH_LINK_TYPE_LANE_TR))) {
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
tmp = strsep((char **) &buf, "\n");
- if (!strcmp(tmp, "local")) {
+ if (!strcmp(tmp, "local"))
card->options.broadcast_mode = QETH_TR_BROADCAST_LOCAL;
- return count;
- } else if (!strcmp(tmp, "all_rings")) {
+ else if (!strcmp(tmp, "all_rings"))
card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
- return count;
- } else {
- return -EINVAL;
- }
- return count;
+ else
+ rc = -EINVAL;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(broadcast_mode, 0644, qeth_l3_dev_broadcast_mode_show,
@@ -251,18 +262,22 @@ static ssize_t qeth_l3_dev_canonical_macaddr_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
- int i;
+ int i, rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
(card->info.link_type == QETH_LINK_TYPE_LANE_TR))) {
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
i = simple_strtoul(buf, &tmp, 16);
@@ -270,10 +285,11 @@ static ssize_t qeth_l3_dev_canonical_macaddr_store(struct device *dev,
card->options.macaddr_mode = i?
QETH_TR_MACADDR_CANONICAL :
QETH_TR_MACADDR_NONCANONICAL;
- else {
- return -EINVAL;
- }
- return count;
+ else
+ rc = -EINVAL;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(canonical_macaddr, 0644, qeth_l3_dev_canonical_macaddr_show,
@@ -297,11 +313,12 @@ static ssize_t qeth_l3_dev_checksum_store(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
enum qeth_checksum_types csum_type;
char *tmp;
- int rc;
+ int rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
tmp = strsep((char **) &buf, "\n");
if (!strcmp(tmp, "sw_checksumming"))
csum_type = SW_CHECKSUMMING;
@@ -309,13 +326,15 @@ static ssize_t qeth_l3_dev_checksum_store(struct device *dev,
csum_type = HW_CHECKSUMMING;
else if (!strcmp(tmp, "no_checksumming"))
csum_type = NO_CHECKSUMMING;
- else
- return -EINVAL;
+ else {
+ rc = -EINVAL;
+ goto out;
+ }
rc = qeth_l3_set_rx_csum(card, csum_type);
- if (rc)
- return rc;
- return count;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show,
@@ -336,7 +355,7 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
- int ret;
+ int rc = 0;
unsigned long i;
if (!card)
@@ -345,19 +364,24 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
if (card->info.type != QETH_CARD_TYPE_IQD)
return -EPERM;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
- ret = strict_strtoul(buf, 16, &i);
- if (ret)
- return -EINVAL;
+ rc = strict_strtoul(buf, 16, &i);
+ if (rc) {
+ rc = -EINVAL;
+ goto out;
+ }
switch (i) {
case 0:
card->options.sniffer = i;
break;
case 1:
- ret = qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
+ qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
if (card->ssqd.qdioac2 & QETH_SNIFF_AVAIL) {
card->options.sniffer = i;
if (card->qdio.init_pool.buf_count !=
@@ -366,11 +390,13 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
QETH_IN_BUF_COUNT_MAX);
break;
} else
- return -EPERM;
+ rc = -EPERM;
default: /* fall through */
- return -EINVAL;
+ rc = -EINVAL;
}
- return count;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show,
@@ -412,12 +438,11 @@ static ssize_t qeth_l3_dev_large_send_store(struct device *dev,
else
return -EINVAL;
- if (card->options.large_send == type)
- return count;
- rc = qeth_l3_set_large_send(card, type);
- if (rc)
- return rc;
- return count;
+ mutex_lock(&card->conf_mutex);
+ if (card->options.large_send != type)
+ rc = qeth_l3_set_large_send(card, type);
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static DEVICE_ATTR(large_send, 0644, qeth_l3_dev_large_send_show,
@@ -455,13 +480,17 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
+ int rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
- (card->state != CARD_STATE_RECOVER))
- return -EPERM;
+ (card->state != CARD_STATE_RECOVER)) {
+ rc = -EPERM;
+ goto out;
+ }
tmp = strsep((char **) &buf, "\n");
if (!strcmp(tmp, "toggle")) {
@@ -470,10 +499,11 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
card->ipato.enabled = 1;
} else if (!strcmp(tmp, "0")) {
card->ipato.enabled = 0;
- } else {
- return -EINVAL;
- }
- return count;
+ } else
+ rc = -EINVAL;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static QETH_DEVICE_ATTR(ipato_enable, enable, 0644,
@@ -497,10 +527,12 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
+ int rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
tmp = strsep((char **) &buf, "\n");
if (!strcmp(tmp, "toggle")) {
card->ipato.invert4 = (card->ipato.invert4)? 0 : 1;
@@ -508,10 +540,10 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
card->ipato.invert4 = 1;
} else if (!strcmp(tmp, "0")) {
card->ipato.invert4 = 0;
- } else {
- return -EINVAL;
- }
- return count;
+ } else
+ rc = -EINVAL;
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644,
@@ -593,27 +625,28 @@ static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count,
struct qeth_ipato_entry *ipatoe;
u8 addr[16];
int mask_bits;
- int rc;
+ int rc = 0;
+ mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
if (rc)
- return rc;
+ goto out;
ipatoe = kzalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL);
if (!ipatoe) {
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out;
}
ipatoe->proto = proto;
memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4)? 4:16);
ipatoe->mask_bits = mask_bits;
rc = qeth_l3_add_ipato_entry(card, ipatoe);
- if (rc) {
+ if (rc)
kfree(ipatoe);
- return rc;
- }
-
- return count;
+out:
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static ssize_t qeth_l3_dev_ipato_add4_store(struct device *dev,
@@ -636,15 +669,14 @@ static ssize_t qeth_l3_dev_ipato_del_store(const char *buf, size_t count,
{
u8 addr[16];
int mask_bits;
- int rc;
+ int rc = 0;
+ mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
- if (rc)
- return rc;
-
- qeth_l3_del_ipato_entry(card, proto, addr, mask_bits);
-
- return count;
+ if (!rc)
+ qeth_l3_del_ipato_entry(card, proto, addr, mask_bits);
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static ssize_t qeth_l3_dev_ipato_del4_store(struct device *dev,
@@ -677,10 +709,12 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
+ int rc = 0;
if (!card)
return -EINVAL;
+ mutex_lock(&card->conf_mutex);
tmp = strsep((char **) &buf, "\n");
if (!strcmp(tmp, "toggle")) {
card->ipato.invert6 = (card->ipato.invert6)? 0 : 1;
@@ -688,10 +722,10 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
card->ipato.invert6 = 1;
} else if (!strcmp(tmp, "0")) {
card->ipato.invert6 = 0;
- } else {
- return -EINVAL;
- }
- return count;
+ } else
+ rc = -EINVAL;
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static QETH_DEVICE_ATTR(ipato_invert6, invert6, 0644,
@@ -813,15 +847,12 @@ static ssize_t qeth_l3_dev_vipa_add_store(const char *buf, size_t count,
u8 addr[16] = {0, };
int rc;
+ mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_vipae(buf, proto, addr);
- if (rc)
- return rc;
-
- rc = qeth_l3_add_vipa(card, proto, addr);
- if (rc)
- return rc;
-
- return count;
+ if (!rc)
+ rc = qeth_l3_add_vipa(card, proto, addr);
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static ssize_t qeth_l3_dev_vipa_add4_store(struct device *dev,
@@ -845,13 +876,12 @@ static ssize_t qeth_l3_dev_vipa_del_store(const char *buf, size_t count,
u8 addr[16];
int rc;
+ mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_vipae(buf, proto, addr);
- if (rc)
- return rc;
-
- qeth_l3_del_vipa(card, proto, addr);
-
- return count;
+ if (!rc)
+ qeth_l3_del_vipa(card, proto, addr);
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static ssize_t qeth_l3_dev_vipa_del4_store(struct device *dev,
@@ -979,15 +1009,12 @@ static ssize_t qeth_l3_dev_rxip_add_store(const char *buf, size_t count,
u8 addr[16] = {0, };
int rc;
+ mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_rxipe(buf, proto, addr);
- if (rc)
- return rc;
-
- rc = qeth_l3_add_rxip(card, proto, addr);
- if (rc)
- return rc;
-
- return count;
+ if (!rc)
+ rc = qeth_l3_add_rxip(card, proto, addr);
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static ssize_t qeth_l3_dev_rxip_add4_store(struct device *dev,
@@ -1011,13 +1038,12 @@ static ssize_t qeth_l3_dev_rxip_del_store(const char *buf, size_t count,
u8 addr[16];
int rc;
+ mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_rxipe(buf, proto, addr);
- if (rc)
- return rc;
-
- qeth_l3_del_rxip(card, proto, addr);
-
- return count;
+ if (!rc)
+ qeth_l3_del_rxip(card, proto, addr);
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
}
static ssize_t qeth_l3_dev_rxip_del4_store(struct device *dev,
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 1e6183a86ce..e331df2122f 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -425,7 +425,8 @@ int zfcp_status_read_refill(struct zfcp_adapter *adapter)
{
while (atomic_read(&adapter->stat_miss) > 0)
if (zfcp_fsf_status_read(adapter->qdio)) {
- if (atomic_read(&adapter->stat_miss) >= 16) {
+ if (atomic_read(&adapter->stat_miss) >=
+ adapter->stat_read_buf_num) {
zfcp_erp_adapter_reopen(adapter, 0, "axsref1",
NULL);
return 1;
@@ -545,6 +546,10 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
&zfcp_sysfs_adapter_attrs))
goto failed;
+ /* report size limit per scatter-gather segment */
+ adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
+ adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
+
if (!zfcp_adapter_scsi_register(adapter))
return adapter;
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index 25d9e0ae9c5..1a2db0a3573 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -254,6 +254,7 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
}
static const struct file_operations zfcp_cfdc_fops = {
+ .open = nonseekable_open,
.unlocked_ioctl = zfcp_cfdc_dev_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = zfcp_cfdc_dev_ioctl
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 7131c7db1f0..9fa1b064893 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -44,23 +44,6 @@ struct zfcp_reqlist;
/********************* SCSI SPECIFIC DEFINES *********************************/
#define ZFCP_SCSI_ER_TIMEOUT (10*HZ)
-/********************* CIO/QDIO SPECIFIC DEFINES *****************************/
-
-/* DMQ bug workaround: don't use last SBALE */
-#define ZFCP_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
-
-/* index of last SBALE (with respect to DMQ bug workaround) */
-#define ZFCP_LAST_SBALE_PER_SBAL (ZFCP_MAX_SBALES_PER_SBAL - 1)
-
-/* max. number of (data buffer) SBALEs in largest SBAL chain */
-#define ZFCP_MAX_SBALES_PER_REQ \
- (FSF_MAX_SBALS_PER_REQ * ZFCP_MAX_SBALES_PER_SBAL - 2)
- /* request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
-
-#define ZFCP_MAX_SECTORS (ZFCP_MAX_SBALES_PER_REQ * 8)
- /* max. number of (data buffer) SBALEs in largest SBAL chain
- multiplied with number of sectors per 4k block */
-
/********************* FSF SPECIFIC DEFINES *********************************/
/* ATTENTION: value must not be used by hardware */
@@ -181,6 +164,7 @@ struct zfcp_adapter {
stack abort/command
completion races */
atomic_t stat_miss; /* # missing status reads*/
+ unsigned int stat_read_buf_num;
struct work_struct stat_work;
atomic_t status; /* status of this adapter */
struct list_head erp_ready_head; /* error recovery for this
@@ -205,6 +189,7 @@ struct zfcp_adapter {
struct work_struct scan_work;
struct service_level service_level;
struct workqueue_struct *work_queue;
+ struct device_dma_parameters dma_parms;
};
struct zfcp_port {
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 0be5e7ea282..e3dbeda9717 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -714,7 +714,7 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
return ZFCP_ERP_FAILED;
- atomic_set(&act->adapter->stat_miss, 16);
+ atomic_set(&act->adapter->stat_miss, act->adapter->stat_read_buf_num);
if (zfcp_status_read_refill(act->adapter))
return ZFCP_ERP_FAILED;
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 8786a79c7f8..48a8f93b72f 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -3,7 +3,7 @@
*
* External function declarations.
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
*/
#ifndef ZFCP_EXT_H
@@ -143,9 +143,9 @@ extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
/* zfcp_qdio.c */
extern int zfcp_qdio_setup(struct zfcp_adapter *);
extern void zfcp_qdio_destroy(struct zfcp_qdio *);
+extern int zfcp_qdio_sbal_get(struct zfcp_qdio *);
extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *);
-extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *,
- struct zfcp_qdio_req *, unsigned long,
+extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *,
struct scatterlist *, int);
extern int zfcp_qdio_open(struct zfcp_qdio *);
extern void zfcp_qdio_close(struct zfcp_qdio *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 2a1cbb74b99..6f8ab43a485 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -400,7 +400,7 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
struct zfcp_adapter *adapter = port->adapter;
int ret;
- adisc = kmem_cache_alloc(zfcp_data.adisc_cache, GFP_ATOMIC);
+ adisc = kmem_cache_zalloc(zfcp_data.adisc_cache, GFP_ATOMIC);
if (!adisc)
return -ENOMEM;
@@ -493,7 +493,7 @@ static struct zfcp_fc_gpn_ft *zfcp_alloc_sg_env(int buf_num)
if (!gpn_ft)
return NULL;
- req = kmem_cache_alloc(zfcp_data.gpn_ft_cache, GFP_KERNEL);
+ req = kmem_cache_zalloc(zfcp_data.gpn_ft_cache, GFP_KERNEL);
if (!req) {
kfree(gpn_ft);
gpn_ft = NULL;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index b3b1d2f7939..9ac6a6e4a60 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -496,6 +496,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
adapter->hydra_version = bottom->adapter_type;
adapter->timer_ticks = bottom->timer_interval;
+ adapter->stat_read_buf_num = max(bottom->status_read_buf_num, (u16)16);
if (fc_host_permanent_port_name(shost) == -1)
fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
@@ -640,37 +641,6 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
}
}
-static int zfcp_fsf_sbal_check(struct zfcp_qdio *qdio)
-{
- struct zfcp_qdio_queue *req_q = &qdio->req_q;
-
- spin_lock_bh(&qdio->req_q_lock);
- if (atomic_read(&req_q->count))
- return 1;
- spin_unlock_bh(&qdio->req_q_lock);
- return 0;
-}
-
-static int zfcp_fsf_req_sbal_get(struct zfcp_qdio *qdio)
-{
- struct zfcp_adapter *adapter = qdio->adapter;
- long ret;
-
- spin_unlock_bh(&qdio->req_q_lock);
- ret = wait_event_interruptible_timeout(qdio->req_q_wq,
- zfcp_fsf_sbal_check(qdio), 5 * HZ);
- if (ret > 0)
- return 0;
- if (!ret) {
- atomic_inc(&qdio->req_q_full);
- /* assume hanging outbound queue, try queue recovery */
- zfcp_erp_adapter_reopen(adapter, 0, "fsrsg_1", NULL);
- }
-
- spin_lock_bh(&qdio->req_q_lock);
- return -EIO;
-}
-
static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
{
struct zfcp_fsf_req *req;
@@ -705,10 +675,9 @@ static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
}
static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
- u32 fsf_cmd, mempool_t *pool)
+ u32 fsf_cmd, u32 sbtype,
+ mempool_t *pool)
{
- struct qdio_buffer_element *sbale;
- struct zfcp_qdio_queue *req_q = &qdio->req_q;
struct zfcp_adapter *adapter = qdio->adapter;
struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
@@ -725,14 +694,6 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
req->adapter = adapter;
req->fsf_command = fsf_cmd;
req->req_id = adapter->req_no;
- req->qdio_req.sbal_number = 1;
- req->qdio_req.sbal_first = req_q->first;
- req->qdio_req.sbal_last = req_q->first;
- req->qdio_req.sbale_curr = 1;
-
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].addr = (void *) req->req_id;
- sbale[0].flags |= SBAL_FLAGS0_COMMAND;
if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
if (likely(pool))
@@ -753,10 +714,11 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
req->qtcb->header.req_handle = req->req_id;
req->qtcb->header.fsf_command = req->fsf_command;
- sbale[1].addr = (void *) req->qtcb;
- sbale[1].length = sizeof(struct fsf_qtcb);
}
+ zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
+ req->qtcb, sizeof(struct fsf_qtcb));
+
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) {
zfcp_fsf_req_free(req);
return ERR_PTR(-EIO);
@@ -803,24 +765,19 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
struct zfcp_adapter *adapter = qdio->adapter;
struct zfcp_fsf_req *req;
struct fsf_status_read_buffer *sr_buf;
- struct qdio_buffer_element *sbale;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS,
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS, 0,
adapter->pool.status_read_req);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
- req->qdio_req.sbale_curr = 2;
-
sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
if (!sr_buf) {
retval = -ENOMEM;
@@ -828,9 +785,9 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
}
memset(sr_buf, 0, sizeof(*sr_buf));
req->data = sr_buf;
- sbale = zfcp_qdio_sbale_curr(qdio, &req->qdio_req);
- sbale->addr = (void *) sr_buf;
- sbale->length = sizeof(*sr_buf);
+
+ zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf));
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
retval = zfcp_fsf_req_send(req);
if (retval)
@@ -907,14 +864,14 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
struct zfcp_unit *unit)
{
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req = NULL;
struct zfcp_qdio *qdio = unit->port->adapter->qdio;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.scsi_abort);
if (IS_ERR(req)) {
req = NULL;
@@ -925,9 +882,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
ZFCP_STATUS_COMMON_UNBLOCKED)))
goto out_error_free;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->data = unit;
req->handler = zfcp_fsf_abort_fcp_command_handler;
@@ -996,21 +951,14 @@ skip_fsfstatus:
ct->handler(ct->handler_data);
}
-static void zfcp_fsf_setup_ct_els_unchained(struct qdio_buffer_element *sbale,
+static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
+ struct zfcp_qdio_req *q_req,
struct scatterlist *sg_req,
struct scatterlist *sg_resp)
{
- sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ;
- sbale[2].addr = sg_virt(sg_req);
- sbale[2].length = sg_req->length;
- sbale[3].addr = sg_virt(sg_resp);
- sbale[3].length = sg_resp->length;
- sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
-}
-
-static int zfcp_fsf_one_sbal(struct scatterlist *sg)
-{
- return sg_is_last(sg) && sg->length <= PAGE_SIZE;
+ zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length);
+ zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length);
+ zfcp_qdio_set_sbale_last(qdio, q_req);
}
static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
@@ -1019,35 +967,34 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
int max_sbals)
{
struct zfcp_adapter *adapter = req->adapter;
- struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio,
- &req->qdio_req);
u32 feat = adapter->adapter_features;
int bytes;
if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) {
- if (!zfcp_fsf_one_sbal(sg_req) || !zfcp_fsf_one_sbal(sg_resp))
+ if (!zfcp_qdio_sg_one_sbale(sg_req) ||
+ !zfcp_qdio_sg_one_sbale(sg_resp))
return -EOPNOTSUPP;
- zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp);
+ zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req,
+ sg_req, sg_resp);
return 0;
}
/* use single, unchained SBAL if it can hold the request */
- if (zfcp_fsf_one_sbal(sg_req) && zfcp_fsf_one_sbal(sg_resp)) {
- zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp);
+ if (zfcp_qdio_sg_one_sbale(sg_req) || zfcp_qdio_sg_one_sbale(sg_resp)) {
+ zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req,
+ sg_req, sg_resp);
return 0;
}
bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
- SBAL_FLAGS0_TYPE_WRITE_READ,
sg_req, max_sbals);
if (bytes <= 0)
return -EIO;
req->qtcb->bottom.support.req_buf_length = bytes;
- req->qdio_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
+ zfcp_qdio_skip_to_last_sbale(&req->qdio_req);
bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
- SBAL_FLAGS0_TYPE_WRITE_READ,
sg_resp, max_sbals);
req->qtcb->bottom.support.resp_buf_length = bytes;
if (bytes <= 0)
@@ -1091,10 +1038,11 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
int ret = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, pool);
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
+ SBAL_FLAGS0_TYPE_WRITE_READ, pool);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
@@ -1103,7 +1051,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp,
- FSF_MAX_SBALS_PER_REQ, timeout);
+ ZFCP_FSF_MAX_SBALS_PER_REQ, timeout);
if (ret)
goto failed_send;
@@ -1187,10 +1135,11 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
int ret = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, NULL);
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
+ SBAL_FLAGS0_TYPE_WRITE_READ, NULL);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
@@ -1224,16 +1173,16 @@ out:
int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req;
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1242,9 +1191,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->qtcb->bottom.config.feature_selection =
FSF_FEATURE_CFDC |
@@ -1269,24 +1216,22 @@ out:
int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
struct fsf_qtcb_bottom_config *data)
{
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req = NULL;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out_unlock;
- req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, NULL);
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
+ SBAL_FLAGS0_TYPE_READ, NULL);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out_unlock;
}
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_exchange_config_data_handler;
req->qtcb->bottom.config.feature_selection =
@@ -1320,7 +1265,6 @@ out_unlock:
int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
{
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req;
int retval = -EIO;
@@ -1328,10 +1272,11 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
return -EOPNOTSUPP;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1340,9 +1285,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_exchange_port_data_handler;
req->erp_action = erp_action;
@@ -1368,7 +1311,6 @@ out:
int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
struct fsf_qtcb_bottom_port *data)
{
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req = NULL;
int retval = -EIO;
@@ -1376,10 +1318,11 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
return -EOPNOTSUPP;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out_unlock;
- req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, NULL);
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
+ SBAL_FLAGS0_TYPE_READ, NULL);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
@@ -1389,9 +1332,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
if (data)
req->data = data;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_exchange_port_data_handler;
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
@@ -1485,17 +1426,17 @@ out:
*/
int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
struct zfcp_port *port = erp_action->port;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1504,9 +1445,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_open_port_handler;
hton24(req->qtcb->bottom.support.d_id, port->d_id);
@@ -1556,16 +1495,16 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
*/
int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1574,9 +1513,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_close_port_handler;
req->data = erp_action->port;
@@ -1633,16 +1570,16 @@ out:
*/
int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
{
- struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (unlikely(IS_ERR(req))) {
@@ -1651,9 +1588,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_open_wka_port_handler;
hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
@@ -1688,16 +1623,16 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
*/
int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
{
- struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (unlikely(IS_ERR(req))) {
@@ -1706,9 +1641,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->handler = zfcp_fsf_close_wka_port_handler;
req->data = wka_port;
@@ -1782,16 +1715,16 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
*/
int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1800,9 +1733,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->data = erp_action->port;
req->qtcb->header.port_handle = erp_action->port->handle;
@@ -1954,17 +1885,17 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
*/
int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
struct zfcp_adapter *adapter = erp_action->adapter;
struct zfcp_qdio *qdio = adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
+ SBAL_FLAGS0_TYPE_READ,
adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -1973,9 +1904,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->qtcb->header.port_handle = erp_action->port->handle;
req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun;
@@ -2041,16 +1970,16 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
*/
int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
{
- struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = erp_action->adapter->qdio;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
+ SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
if (IS_ERR(req)) {
@@ -2059,9 +1988,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
req->qtcb->header.port_handle = erp_action->port->handle;
req->qtcb->header.lun_handle = erp_action->unit->handle;
@@ -2289,8 +2216,11 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
goto out;
}
+ if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
+ sbtype = SBAL_FLAGS0_TYPE_WRITE;
+
req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
- adapter->pool.scsi_req);
+ sbtype, adapter->pool.scsi_req);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
@@ -2298,7 +2228,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- get_device(&unit->dev);
req->unit = unit;
req->data = scsi_cmnd;
req->handler = zfcp_fsf_send_fcp_command_handler;
@@ -2323,20 +2252,21 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
break;
case DMA_TO_DEVICE:
req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
- sbtype = SBAL_FLAGS0_TYPE_WRITE;
break;
case DMA_BIDIRECTIONAL:
goto failed_scsi_cmnd;
}
+ get_device(&unit->dev);
+
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
- real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sbtype,
+ real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
scsi_sglist(scsi_cmnd),
- FSF_MAX_SBALS_PER_REQ);
+ ZFCP_FSF_MAX_SBALS_PER_REQ);
if (unlikely(real_bytes < 0)) {
- if (req->qdio_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) {
+ if (req->qdio_req.sbal_number >= ZFCP_FSF_MAX_SBALS_PER_REQ) {
dev_err(&adapter->ccw_device->dev,
"Oversize data package, unit 0x%016Lx "
"on port 0x%016Lx closed\n",
@@ -2371,7 +2301,6 @@ out:
*/
struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
{
- struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req = NULL;
struct fcp_cmnd *fcp_cmnd;
struct zfcp_qdio *qdio = unit->port->adapter->qdio;
@@ -2381,10 +2310,11 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
return NULL;
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
+ SBAL_FLAGS0_TYPE_WRITE,
qdio->adapter->pool.scsi_req);
if (IS_ERR(req)) {
@@ -2401,9 +2331,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
req->qtcb->bottom.io.service_class = FSF_CLASS_3;
req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
- sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+ zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
zfcp_fc_fcp_tm(fcp_cmnd, unit->device, tm_flags);
@@ -2432,7 +2360,6 @@ static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
struct zfcp_fsf_cfdc *fsf_cfdc)
{
- struct qdio_buffer_element *sbale;
struct zfcp_qdio *qdio = adapter->qdio;
struct zfcp_fsf_req *req = NULL;
struct fsf_qtcb_bottom_support *bottom;
@@ -2453,10 +2380,10 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
}
spin_lock_bh(&qdio->req_q_lock);
- if (zfcp_fsf_req_sbal_get(qdio))
+ if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, NULL);
+ req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, direction, NULL);
if (IS_ERR(req)) {
retval = -EPERM;
goto out;
@@ -2464,16 +2391,13 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
req->handler = zfcp_fsf_control_file_handler;
- sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
- sbale[0].flags |= direction;
-
bottom = &req->qtcb->bottom.support;
bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
bottom->option = fsf_cfdc->option;
bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
- direction, fsf_cfdc->sg,
- FSF_MAX_SBALS_PER_REQ);
+ fsf_cfdc->sg,
+ ZFCP_FSF_MAX_SBALS_PER_REQ);
if (bytes != ZFCP_CFDC_MAX_SIZE) {
zfcp_fsf_req_free(req);
goto out;
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index b3de682b64c..519083fd6e8 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -3,7 +3,7 @@
*
* Interface to the FSF support functions.
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
*/
#ifndef FSF_H
@@ -152,7 +152,12 @@
#define FSF_CLASS_3 0x00000003
/* SBAL chaining */
-#define FSF_MAX_SBALS_PER_REQ 36
+#define ZFCP_FSF_MAX_SBALS_PER_REQ 36
+
+/* max. number of (data buffer) SBALEs in largest SBAL chain
+ * request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
+#define ZFCP_FSF_MAX_SBALES_PER_REQ \
+ (ZFCP_FSF_MAX_SBALS_PER_REQ * ZFCP_QDIO_MAX_SBALES_PER_SBAL - 2)
/* logging space behind QTCB */
#define FSF_QTCB_LOG_SIZE 1024
@@ -361,7 +366,7 @@ struct fsf_qtcb_bottom_config {
u32 adapter_type;
u8 res0;
u8 peer_d_id[3];
- u8 res1[2];
+ u16 status_read_buf_num;
u16 timer_interval;
u8 res2[9];
u8 s_id[3];
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index dbfa312a7f5..28117e130e2 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -3,7 +3,7 @@
*
* Setup and helper functions to access QDIO.
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
@@ -151,8 +151,7 @@ static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
}
static struct qdio_buffer_element *
-zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
- unsigned long sbtype)
+zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
struct qdio_buffer_element *sbale;
@@ -180,17 +179,16 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
/* set storage-block type for new SBAL */
sbale = zfcp_qdio_sbale_curr(qdio, q_req);
- sbale->flags |= sbtype;
+ sbale->flags |= q_req->sbtype;
return sbale;
}
static struct qdio_buffer_element *
-zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
- unsigned int sbtype)
+zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
- if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
- return zfcp_qdio_sbal_chain(qdio, q_req, sbtype);
+ if (q_req->sbale_curr == ZFCP_QDIO_LAST_SBALE_PER_SBAL)
+ return zfcp_qdio_sbal_chain(qdio, q_req);
q_req->sbale_curr++;
return zfcp_qdio_sbale_curr(qdio, q_req);
}
@@ -206,62 +204,38 @@ static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
zfcp_qdio_zero_sbals(sbal, first, count);
}
-static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio,
- struct zfcp_qdio_req *q_req,
- unsigned int sbtype, void *start_addr,
- unsigned int total_length)
-{
- struct qdio_buffer_element *sbale;
- unsigned long remaining, length;
- void *addr;
-
- /* split segment up */
- for (addr = start_addr, remaining = total_length; remaining > 0;
- addr += length, remaining -= length) {
- sbale = zfcp_qdio_sbale_next(qdio, q_req, sbtype);
- if (!sbale) {
- atomic_inc(&qdio->req_q_full);
- zfcp_qdio_undo_sbals(qdio, q_req);
- return -EINVAL;
- }
-
- /* new piece must not exceed next page boundary */
- length = min(remaining,
- (PAGE_SIZE - ((unsigned long)addr &
- (PAGE_SIZE - 1))));
- sbale->addr = addr;
- sbale->length = length;
- }
- return 0;
-}
-
/**
* zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
- * @fsf_req: request to be processed
- * @sbtype: SBALE flags
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: pointer to struct zfcp_qdio_req
* @sg: scatter-gather list
* @max_sbals: upper bound for number of SBALs to be used
* Returns: number of bytes, or error (negativ)
*/
int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
- unsigned long sbtype, struct scatterlist *sg,
- int max_sbals)
+ struct scatterlist *sg, int max_sbals)
{
struct qdio_buffer_element *sbale;
- int retval, bytes = 0;
+ int bytes = 0;
/* figure out last allowed SBAL */
zfcp_qdio_sbal_limit(qdio, q_req, max_sbals);
/* set storage-block type for this request */
sbale = zfcp_qdio_sbale_req(qdio, q_req);
- sbale->flags |= sbtype;
+ sbale->flags |= q_req->sbtype;
for (; sg; sg = sg_next(sg)) {
- retval = zfcp_qdio_fill_sbals(qdio, q_req, sbtype,
- sg_virt(sg), sg->length);
- if (retval < 0)
- return retval;
+ sbale = zfcp_qdio_sbale_next(qdio, q_req);
+ if (!sbale) {
+ atomic_inc(&qdio->req_q_full);
+ zfcp_qdio_undo_sbals(qdio, q_req);
+ return -EINVAL;
+ }
+
+ sbale->addr = sg_virt(sg);
+ sbale->length = sg->length;
+
bytes += sg->length;
}
@@ -272,6 +246,46 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
return bytes;
}
+static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
+{
+ struct zfcp_qdio_queue *req_q = &qdio->req_q;
+
+ spin_lock_bh(&qdio->req_q_lock);
+ if (atomic_read(&req_q->count))
+ return 1;
+ spin_unlock_bh(&qdio->req_q_lock);
+ return 0;
+}
+
+/**
+ * zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary
+ * @qdio: pointer to struct zfcp_qdio
+ *
+ * The req_q_lock must be held by the caller of this function, and
+ * this function may only be called from process context; it will
+ * sleep when waiting for a free sbal.
+ *
+ * Returns: 0 on success, -EIO if there is no free sbal after waiting.
+ */
+int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
+{
+ long ret;
+
+ spin_unlock_bh(&qdio->req_q_lock);
+ ret = wait_event_interruptible_timeout(qdio->req_q_wq,
+ zfcp_qdio_sbal_check(qdio), 5 * HZ);
+ if (ret > 0)
+ return 0;
+ if (!ret) {
+ atomic_inc(&qdio->req_q_full);
+ /* assume hanging outbound queue, try queue recovery */
+ zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1", NULL);
+ }
+
+ spin_lock_bh(&qdio->req_q_lock);
+ return -EIO;
+}
+
/**
* zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
* @qdio: pointer to struct zfcp_qdio
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 8cca54631e1..138fba577b4 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -11,6 +11,14 @@
#include <asm/qdio.h>
+#define ZFCP_QDIO_SBALE_LEN PAGE_SIZE
+
+/* DMQ bug workaround: don't use last SBALE */
+#define ZFCP_QDIO_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
+
+/* index of last SBALE (with respect to DMQ bug workaround) */
+#define ZFCP_QDIO_LAST_SBALE_PER_SBAL (ZFCP_QDIO_MAX_SBALES_PER_SBAL - 1)
+
/**
* struct zfcp_qdio_queue - qdio queue buffer, zfcp index and free count
* @sbal: qdio buffers
@@ -49,6 +57,7 @@ struct zfcp_qdio {
/**
* struct zfcp_qdio_req - qdio queue related values for a request
+ * @sbtype: sbal type flags for sbale 0
* @sbal_number: number of free sbals
* @sbal_first: first sbal for this request
* @sbal_last: last sbal for this request
@@ -59,6 +68,7 @@ struct zfcp_qdio {
* @qdio_inb_usage: usage of inbound queue
*/
struct zfcp_qdio_req {
+ u32 sbtype;
u8 sbal_number;
u8 sbal_first;
u8 sbal_last;
@@ -106,4 +116,98 @@ zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
q_req->sbale_curr);
}
+/**
+ * zfcp_qdio_req_init - initialize qdio request
+ * @qdio: request queue where to start putting the request
+ * @q_req: the qdio request to start
+ * @req_id: The request id
+ * @sbtype: type flags to set for all sbals
+ * @data: First data block
+ * @len: Length of first data block
+ *
+ * This is the start of putting the request into the queue, the last
+ * step is passing the request to zfcp_qdio_send. The request queue
+ * lock must be held during the whole process from init to send.
+ */
+static inline
+void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
+ unsigned long req_id, u32 sbtype, void *data, u32 len)
+{
+ struct qdio_buffer_element *sbale;
+
+ q_req->sbal_first = q_req->sbal_last = qdio->req_q.first;
+ q_req->sbal_number = 1;
+ q_req->sbtype = sbtype;
+
+ sbale = zfcp_qdio_sbale_req(qdio, q_req);
+ sbale->addr = (void *) req_id;
+ sbale->flags |= SBAL_FLAGS0_COMMAND;
+ sbale->flags |= sbtype;
+
+ q_req->sbale_curr = 1;
+ sbale++;
+ sbale->addr = data;
+ if (likely(data))
+ sbale->length = len;
+}
+
+/**
+ * zfcp_qdio_fill_next - Fill next sbale, only for single sbal requests
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: pointer to struct zfcp_queue_req
+ *
+ * This is only required for single sbal requests, calling it when
+ * wrapping around to the next sbal is a bug.
+ */
+static inline
+void zfcp_qdio_fill_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
+ void *data, u32 len)
+{
+ struct qdio_buffer_element *sbale;
+
+ BUG_ON(q_req->sbale_curr == ZFCP_QDIO_LAST_SBALE_PER_SBAL);
+ q_req->sbale_curr++;
+ sbale = zfcp_qdio_sbale_curr(qdio, q_req);
+ sbale->addr = data;
+ sbale->length = len;
+}
+
+/**
+ * zfcp_qdio_set_sbale_last - set last entry flag in current sbale
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: pointer to struct zfcp_queue_req
+ */
+static inline
+void zfcp_qdio_set_sbale_last(struct zfcp_qdio *qdio,
+ struct zfcp_qdio_req *q_req)
+{
+ struct qdio_buffer_element *sbale;
+
+ sbale = zfcp_qdio_sbale_curr(qdio, q_req);
+ sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
+}
+
+/**
+ * zfcp_qdio_sg_one_sbal - check if one sbale is enough for sg data
+ * @sg: The scatterlist where to check the data size
+ *
+ * Returns: 1 when one sbale is enough for the data in the scatterlist,
+ * 0 if not.
+ */
+static inline
+int zfcp_qdio_sg_one_sbale(struct scatterlist *sg)
+{
+ return sg_is_last(sg) && sg->length <= ZFCP_QDIO_SBALE_LEN;
+}
+
+/**
+ * zfcp_qdio_skip_to_last_sbale - skip to last sbale in sbal
+ * @q_req: The current zfcp_qdio_req
+ */
+static inline
+void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio_req *q_req)
+{
+ q_req->sbale_curr = ZFCP_QDIO_LAST_SBALE_PER_SBAL;
+}
+
#endif /* ZFCP_QDIO_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 174b6d57d57..be5d2c60453 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -175,7 +175,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
struct zfcp_fsf_req *old_req, *abrt_req;
unsigned long flags;
unsigned long old_reqid = (unsigned long) scpnt->host_scribble;
- int retval = SUCCESS;
+ int retval = SUCCESS, ret;
int retry = 3;
char *dbf_tag;
@@ -200,7 +200,9 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
break;
zfcp_erp_wait(adapter);
- fc_block_scsi_eh(scpnt);
+ ret = fc_block_scsi_eh(scpnt);
+ if (ret)
+ return ret;
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
zfcp_dbf_scsi_abort("nres", adapter->dbf, scpnt, NULL,
@@ -231,7 +233,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
struct zfcp_unit *unit = scpnt->device->hostdata;
struct zfcp_adapter *adapter = unit->port->adapter;
struct zfcp_fsf_req *fsf_req = NULL;
- int retval = SUCCESS;
+ int retval = SUCCESS, ret;
int retry = 3;
while (retry--) {
@@ -240,7 +242,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
break;
zfcp_erp_wait(adapter);
- fc_block_scsi_eh(scpnt);
+ ret = fc_block_scsi_eh(scpnt);
+ if (ret)
+ return ret;
+
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt);
@@ -276,10 +281,13 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
{
struct zfcp_unit *unit = scpnt->device->hostdata;
struct zfcp_adapter *adapter = unit->port->adapter;
+ int ret;
zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt);
zfcp_erp_wait(adapter);
- fc_block_scsi_eh(scpnt);
+ ret = fc_block_scsi_eh(scpnt);
+ if (ret)
+ return ret;
return SUCCESS;
}
@@ -669,11 +677,12 @@ struct zfcp_data zfcp_data = {
.eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
.can_queue = 4096,
.this_id = -1,
- .sg_tablesize = ZFCP_MAX_SBALES_PER_REQ,
+ .sg_tablesize = ZFCP_FSF_MAX_SBALES_PER_REQ,
.cmd_per_lun = 1,
.use_clustering = 1,
.sdev_attrs = zfcp_sysfs_sdev_attrs,
- .max_sectors = (ZFCP_MAX_SBALES_PER_REQ * 8),
+ .max_sectors = (ZFCP_FSF_MAX_SBALES_PER_REQ * 8),
+ .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
.shost_attrs = zfcp_sysfs_shost_attrs,
},
};
diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c
index b4951eb0358..103fdf6b0b8 100644
--- a/drivers/sbus/char/bbc_envctrl.c
+++ b/drivers/sbus/char/bbc_envctrl.c
@@ -565,9 +565,9 @@ int bbc_envctrl_init(struct bbc_i2c_bus *bp)
int devidx = 0;
while ((op = bbc_i2c_getdev(bp, devidx++)) != NULL) {
- if (!strcmp(op->node->name, "temperature"))
+ if (!strcmp(op->dev.of_node->name, "temperature"))
attach_one_temp(bp, op, temp_index++);
- if (!strcmp(op->node->name, "fan-control"))
+ if (!strcmp(op->dev.of_node->name, "fan-control"))
attach_one_fan(bp, op, fan_index++);
}
if (temp_index != 0 && fan_index != 0) {
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
index 7e30e5f6e03..8bfdd63a1fc 100644
--- a/drivers/sbus/char/bbc_i2c.c
+++ b/drivers/sbus/char/bbc_i2c.c
@@ -97,7 +97,7 @@ struct bbc_i2c_client *bbc_i2c_attach(struct bbc_i2c_bus *bp, struct of_device *
client->bp = bp;
client->op = op;
- reg = of_get_property(op->node, "reg", NULL);
+ reg = of_get_property(op->dev.of_node, "reg", NULL);
if (!reg) {
kfree(client);
return NULL;
@@ -327,7 +327,7 @@ static struct bbc_i2c_bus * __init attach_one_i2c(struct of_device *op, int inde
spin_lock_init(&bp->lock);
entry = 0;
- for (dp = op->node->child;
+ for (dp = op->dev.of_node->child;
dp && entry < 8;
dp = dp->sibling, entry++) {
struct of_device *child_op;
@@ -414,8 +414,11 @@ static const struct of_device_id bbc_i2c_match[] = {
MODULE_DEVICE_TABLE(of, bbc_i2c_match);
static struct of_platform_driver bbc_i2c_driver = {
- .name = "bbc_i2c",
- .match_table = bbc_i2c_match,
+ .driver = {
+ .name = "bbc_i2c",
+ .owner = THIS_MODULE,
+ .of_match_table = bbc_i2c_match,
+ },
.probe = bbc_i2c_probe,
.remove = __devexit_p(bbc_i2c_remove),
};
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 3e59189f413..7baf1b64403 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -216,7 +216,7 @@ static int __devinit d7s_probe(struct of_device *op,
writeb(regs, p->regs);
printk(KERN_INFO PFX "7-Segment Display%s at [%s:0x%llx] %s\n",
- op->node->full_name,
+ op->dev.of_node->full_name,
(regs & D7S_FLIP) ? " (FLIPPED)" : "",
op->resource[0].start,
sol_compat ? "in sol_compat mode" : "");
@@ -266,8 +266,11 @@ static const struct of_device_id d7s_match[] = {
MODULE_DEVICE_TABLE(of, d7s_match);
static struct of_platform_driver d7s_driver = {
- .name = DRIVER_NAME,
- .match_table = d7s_match,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = d7s_match,
+ },
.probe = d7s_probe,
.remove = __devexit_p(d7s_remove),
};
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index c6e2eff1940..c8166ecf527 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -1043,7 +1043,7 @@ static int __devinit envctrl_probe(struct of_device *op,
return -ENOMEM;
index = 0;
- dp = op->node->child;
+ dp = op->dev.of_node->child;
while (dp) {
if (!strcmp(dp->name, "gpio")) {
i2c_childlist[index].i2ctype = I2C_GPIO;
@@ -1131,8 +1131,11 @@ static const struct of_device_id envctrl_match[] = {
MODULE_DEVICE_TABLE(of, envctrl_match);
static struct of_platform_driver envctrl_driver = {
- .name = DRIVER_NAME,
- .match_table = envctrl_match,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = envctrl_match,
+ },
.probe = envctrl_probe,
.remove = __devexit_p(envctrl_remove),
};
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index 19f255b97c8..368d66294d8 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -105,9 +105,9 @@ static ssize_t
flash_read(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
{
- unsigned long p = file->f_pos;
+ loff_t p = *ppos;
int i;
-
+
if (count > flash.read_size - p)
count = flash.read_size - p;
@@ -118,7 +118,7 @@ flash_read(struct file * file, char __user * buf,
buf++;
}
- file->f_pos += count;
+ *ppos += count;
return count;
}
@@ -162,7 +162,7 @@ static struct miscdevice flash_dev = { FLASH_MINOR, "flash", &flash_fops };
static int __devinit flash_probe(struct of_device *op,
const struct of_device_id *match)
{
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
struct device_node *parent;
parent = dp->parent;
@@ -184,7 +184,7 @@ static int __devinit flash_probe(struct of_device *op,
flash.busy = 0;
printk(KERN_INFO "%s: OBP Flash, RD %lx[%lx] WR %lx[%lx]\n",
- op->node->full_name,
+ op->dev.of_node->full_name,
flash.read_base, flash.read_size,
flash.write_base, flash.write_size);
@@ -207,8 +207,11 @@ static const struct of_device_id flash_match[] = {
MODULE_DEVICE_TABLE(of, flash_match);
static struct of_platform_driver flash_driver = {
- .name = "flash",
- .match_table = flash_match,
+ .driver = {
+ .name = "flash",
+ .owner = THIS_MODULE,
+ .of_match_table = flash_match,
+ },
.probe = flash_probe,
.remove = __devexit_p(flash_remove),
};
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c
index fc2f676e984..d53e62ab09d 100644
--- a/drivers/sbus/char/openprom.c
+++ b/drivers/sbus/char/openprom.c
@@ -298,9 +298,9 @@ static int opromgetbootargs(void __user *argp, struct openpromio *op, int bufsiz
/*
* SunOS and Solaris /dev/openprom ioctl calls.
*/
-static int openprom_sunos_ioctl(struct inode * inode, struct file * file,
- unsigned int cmd, unsigned long arg,
- struct device_node *dp)
+static long openprom_sunos_ioctl(struct file * file,
+ unsigned int cmd, unsigned long arg,
+ struct device_node *dp)
{
DATA *data = file->private_data;
struct openpromio *opp = NULL;
@@ -316,6 +316,8 @@ static int openprom_sunos_ioctl(struct inode * inode, struct file * file,
if (bufsize < 0)
return bufsize;
+ lock_kernel();
+
switch (cmd) {
case OPROMGETOPT:
case OPROMGETPROP:
@@ -365,6 +367,8 @@ static int openprom_sunos_ioctl(struct inode * inode, struct file * file,
}
kfree(opp);
+ unlock_kernel();
+
return error;
}
@@ -547,13 +551,14 @@ static int opiocgetnext(unsigned int cmd, void __user *argp)
return 0;
}
-static int openprom_bsd_ioctl(struct inode * inode, struct file * file,
+static int openprom_bsd_ioctl(struct file * file,
unsigned int cmd, unsigned long arg)
{
DATA *data = (DATA *) file->private_data;
void __user *argp = (void __user *)arg;
int err;
+ lock_kernel();
switch (cmd) {
case OPIOCGET:
err = opiocget(argp, data);
@@ -570,10 +575,10 @@ static int openprom_bsd_ioctl(struct inode * inode, struct file * file,
case OPIOCGETOPTNODE:
BUILD_BUG_ON(sizeof(phandle) != sizeof(int));
+ err = 0;
if (copy_to_user(argp, &options_node->phandle, sizeof(phandle)))
- return -EFAULT;
-
- return 0;
+ err = -EFAULT;
+ break;
case OPIOCGETNEXT:
case OPIOCGETCHILD:
@@ -581,9 +586,10 @@ static int openprom_bsd_ioctl(struct inode * inode, struct file * file,
break;
default:
- return -EINVAL;
-
+ err = -EINVAL;
+ break;
};
+ unlock_kernel();
return err;
}
@@ -592,8 +598,8 @@ static int openprom_bsd_ioctl(struct inode * inode, struct file * file,
/*
* Handoff control to the correct ioctl handler.
*/
-static int openprom_ioctl(struct inode * inode, struct file * file,
- unsigned int cmd, unsigned long arg)
+static long openprom_ioctl(struct file * file,
+ unsigned int cmd, unsigned long arg)
{
DATA *data = (DATA *) file->private_data;
@@ -602,14 +608,14 @@ static int openprom_ioctl(struct inode * inode, struct file * file,
case OPROMNXTOPT:
if ((file->f_mode & FMODE_READ) == 0)
return -EPERM;
- return openprom_sunos_ioctl(inode, file, cmd, arg,
+ return openprom_sunos_ioctl(file, cmd, arg,
options_node);
case OPROMSETOPT:
case OPROMSETOPT2:
if ((file->f_mode & FMODE_WRITE) == 0)
return -EPERM;
- return openprom_sunos_ioctl(inode, file, cmd, arg,
+ return openprom_sunos_ioctl(file, cmd, arg,
options_node);
case OPROMNEXT:
@@ -618,7 +624,7 @@ static int openprom_ioctl(struct inode * inode, struct file * file,
case OPROMNXTPROP:
if ((file->f_mode & FMODE_READ) == 0)
return -EPERM;
- return openprom_sunos_ioctl(inode, file, cmd, arg,
+ return openprom_sunos_ioctl(file, cmd, arg,
data->current_node);
case OPROMU2P:
@@ -630,7 +636,7 @@ static int openprom_ioctl(struct inode * inode, struct file * file,
case OPROMPATH2NODE:
if ((file->f_mode & FMODE_READ) == 0)
return -EPERM;
- return openprom_sunos_ioctl(inode, file, cmd, arg, NULL);
+ return openprom_sunos_ioctl(file, cmd, arg, NULL);
case OPIOCGET:
case OPIOCNEXTPROP:
@@ -639,12 +645,12 @@ static int openprom_ioctl(struct inode * inode, struct file * file,
case OPIOCGETCHILD:
if ((file->f_mode & FMODE_READ) == 0)
return -EBADF;
- return openprom_bsd_ioctl(inode,file,cmd,arg);
+ return openprom_bsd_ioctl(file,cmd,arg);
case OPIOCSET:
if ((file->f_mode & FMODE_WRITE) == 0)
return -EBADF;
- return openprom_bsd_ioctl(inode,file,cmd,arg);
+ return openprom_bsd_ioctl(file,cmd,arg);
default:
return -EINVAL;
@@ -676,7 +682,7 @@ static long openprom_compat_ioctl(struct file *file, unsigned int cmd,
case OPROMSETCUR:
case OPROMPCI2NODE:
case OPROMPATH2NODE:
- rval = openprom_ioctl(file->f_path.dentry->d_inode, file, cmd, arg);
+ rval = openprom_ioctl(file, cmd, arg);
break;
}
@@ -709,7 +715,7 @@ static int openprom_release(struct inode * inode, struct file * file)
static const struct file_operations openprom_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
- .ioctl = openprom_ioctl,
+ .unlocked_ioctl = openprom_ioctl,
.compat_ioctl = openprom_compat_ioctl,
.open = openprom_open,
.release = openprom_release,
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index 2c56fd56ec6..5f253665a1d 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -382,7 +382,7 @@ static int __devinit uctrl_probe(struct of_device *op,
sbus_writel(UCTRL_INTR_RXNE_REQ|UCTRL_INTR_RXNE_MSK, &p->regs->uctrl_intr);
printk(KERN_INFO "%s: uctrl regs[0x%p] (irq %d)\n",
- op->node->full_name, p->regs, p->irq);
+ op->dev.of_node->full_name, p->regs, p->irq);
uctrl_get_event_status(p);
uctrl_get_external_status(p);
@@ -425,8 +425,11 @@ static const struct of_device_id uctrl_match[] = {
MODULE_DEVICE_TABLE(of, uctrl_match);
static struct of_platform_driver uctrl_driver = {
- .name = "uctrl",
- .match_table = uctrl_match,
+ .driver = {
+ .name = "uctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = uctrl_match,
+ },
.probe = uctrl_probe,
.remove = __devexit_p(uctrl_remove),
};
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index e9788f55ab1..e20b7bdd4c7 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1,10 +1,11 @@
/*
3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@amcc.com>
- Modifications By: Tom Couch <linuxraid@amcc.com>
+ Written By: Adam Radford <linuxraid@lsi.com>
+ Modifications By: Tom Couch <linuxraid@lsi.com>
Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
+ Copyright (C) 2010 LSI Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -40,10 +41,10 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@amcc.com
+ linuxraid@lsi.com
For more information, goto:
- http://www.amcc.com
+ http://www.lsi.com
Note: This version of the driver does not contain a bundled firmware
image.
@@ -77,6 +78,7 @@
Use pci_resource_len() for ioremap().
2.26.02.012 - Add power management support.
2.26.02.013 - Fix bug in twa_load_sgl().
+ 2.26.02.014 - Force 60 second timeout default.
*/
#include <linux/module.h>
@@ -102,14 +104,14 @@
#include "3w-9xxx.h"
/* Globals */
-#define TW_DRIVER_VERSION "2.26.02.013"
+#define TW_DRIVER_VERSION "2.26.02.014"
static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
static unsigned int twa_device_extension_count;
static int twa_major = -1;
extern struct timezone sys_tz;
/* Module parameters */
-MODULE_AUTHOR ("AMCC");
+MODULE_AUTHOR ("LSI");
MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(TW_DRIVER_VERSION);
@@ -123,7 +125,7 @@ static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_H
static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
static char *twa_aen_severity_lookup(unsigned char severity_code);
static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
-static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg);
+static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
static int twa_chrdev_open(struct inode *inode, struct file *file);
static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
@@ -218,7 +220,7 @@ static struct device_attribute *twa_host_attrs[] = {
/* File operations struct for character device */
static const struct file_operations twa_fops = {
.owner = THIS_MODULE,
- .ioctl = twa_chrdev_ioctl,
+ .unlocked_ioctl = twa_chrdev_ioctl,
.open = twa_chrdev_open,
.release = NULL
};
@@ -635,8 +637,9 @@ out:
} /* End twa_check_srl() */
/* This function handles ioctl for the character device */
-static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
+ struct inode *inode = file->f_path.dentry->d_inode;
long timeout;
unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
dma_addr_t dma_handle;
@@ -655,6 +658,8 @@ static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int
int retval = TW_IOCTL_ERROR_OS_EFAULT;
void __user *argp = (void __user *)arg;
+ lock_kernel();
+
/* Only let one of these through at a time */
if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
retval = TW_IOCTL_ERROR_OS_EINTR;
@@ -874,6 +879,7 @@ out3:
out2:
mutex_unlock(&tw_dev->ioctl_lock);
out:
+ unlock_kernel();
return retval;
} /* End twa_chrdev_ioctl() */
@@ -1990,6 +1996,15 @@ static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
scsi_dma_unmap(cmd);
} /* End twa_unmap_scsi_data() */
+/* This function gets called when a disk is coming on-line */
+static int twa_slave_configure(struct scsi_device *sdev)
+{
+ /* Force 60 second timeout */
+ blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
+
+ return 0;
+} /* End twa_slave_configure() */
+
/* scsi_host_template initializer */
static struct scsi_host_template driver_template = {
.module = THIS_MODULE,
@@ -1999,6 +2014,7 @@ static struct scsi_host_template driver_template = {
.bios_param = twa_scsi_biosparam,
.change_queue_depth = twa_change_queue_depth,
.can_queue = TW_Q_LENGTH-2,
+ .slave_configure = twa_slave_configure,
.this_id = -1,
.sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
.max_sectors = TW_MAX_SECTORS,
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index 2893eec78ed..3343824855d 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -1,10 +1,11 @@
/*
3w-9xxx.h -- 3ware 9000 Storage Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@amcc.com>
- Modifications By: Tom Couch <linuxraid@amcc.com>
+ Written By: Adam Radford <linuxraid@lsi.com>
+ Modifications By: Tom Couch <linuxraid@lsi.com>
Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
+ Copyright (C) 2010 LSI Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -40,10 +41,10 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@amcc.com
+ linuxraid@lsi.com
For more information, goto:
- http://www.amcc.com
+ http://www.lsi.com
*/
#ifndef _3W_9XXX_H
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 54c5ffb1eaa..f481e734aad 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -98,7 +98,7 @@ static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_res
/* Functions */
/* This function returns AENs through sysfs */
-static ssize_t twl_sysfs_aen_read(struct kobject *kobj,
+static ssize_t twl_sysfs_aen_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *outbuf, loff_t offset, size_t count)
{
@@ -129,7 +129,7 @@ static struct bin_attribute twl_sysfs_aen_read_attr = {
};
/* This function returns driver compatibility info through sysfs */
-static ssize_t twl_sysfs_compat_info(struct kobject *kobj,
+static ssize_t twl_sysfs_compat_info(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *outbuf, loff_t offset, size_t count)
{
@@ -750,19 +750,22 @@ static void twl_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
/* This function handles ioctl for the character device
This interface is used by smartmontools open source software */
-static int twl_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+static long twl_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
long timeout;
unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
dma_addr_t dma_handle;
int request_id = 0;
TW_Ioctl_Driver_Command driver_command;
+ struct inode *inode = file->f_dentry->d_inode;
TW_Ioctl_Buf_Apache *tw_ioctl;
TW_Command_Full *full_command_packet;
TW_Device_Extension *tw_dev = twl_device_extension_list[iminor(inode)];
int retval = -EFAULT;
void __user *argp = (void __user *)arg;
+ lock_kernel();
+
/* Only let one of these through at a time */
if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
retval = -EINTR;
@@ -858,6 +861,7 @@ out3:
out2:
mutex_unlock(&tw_dev->ioctl_lock);
out:
+ unlock_kernel();
return retval;
} /* End twl_chrdev_ioctl() */
@@ -884,7 +888,7 @@ out:
/* File operations struct for character device */
static const struct file_operations twl_fops = {
.owner = THIS_MODULE,
- .ioctl = twl_chrdev_ioctl,
+ .unlocked_ioctl = twl_chrdev_ioctl,
.open = twl_chrdev_open,
.release = NULL
};
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 5faf903ca8c..30d735ad35b 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1,12 +1,12 @@
/*
3w-xxxx.c -- 3ware Storage Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@amcc.com>
+ Written By: Adam Radford <linuxraid@lsi.com>
Modifications By: Joel Jacobson <linux@3ware.com>
Arnaldo Carvalho de Melo <acme@conectiva.com.br>
Brad Strand <linux@3ware.com>
- Copyright (C) 1999-2009 3ware Inc.
+ Copyright (C) 1999-2010 3ware Inc.
Kernel compatibility By: Andre Hedrick <andre@suse.com>
Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
@@ -47,10 +47,10 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@amcc.com
+ linuxraid@lsi.com
For more information, goto:
- http://www.amcc.com
+ http://www.lsi.com
History
-------
@@ -194,6 +194,7 @@
1.26.02.002 - Free irq handler in __tw_shutdown().
Turn on RCD bit for caching mode page.
Serialize reset code.
+ 1.26.02.003 - Force 60 second timeout default.
*/
#include <linux/module.h>
@@ -219,13 +220,13 @@
#include "3w-xxxx.h"
/* Globals */
-#define TW_DRIVER_VERSION "1.26.02.002"
+#define TW_DRIVER_VERSION "1.26.02.003"
static TW_Device_Extension *tw_device_extension_list[TW_MAX_SLOT];
static int tw_device_extension_count = 0;
static int twe_major = -1;
/* Module parameters */
-MODULE_AUTHOR("AMCC");
+MODULE_AUTHOR("LSI");
MODULE_DESCRIPTION("3ware Storage Controller Linux Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(TW_DRIVER_VERSION);
@@ -880,7 +881,7 @@ static int tw_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
} /* End tw_allocate_memory() */
/* This function handles ioctl for the character device */
-static int tw_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+static long tw_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int request_id;
dma_addr_t dma_handle;
@@ -888,6 +889,7 @@ static int tw_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int
unsigned long flags;
unsigned int data_buffer_length = 0;
unsigned long data_buffer_length_adjusted = 0;
+ struct inode *inode = file->f_dentry->d_inode;
unsigned long *cpu_addr;
long timeout;
TW_New_Ioctl *tw_ioctl;
@@ -898,9 +900,12 @@ static int tw_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int
dprintk(KERN_WARNING "3w-xxxx: tw_chrdev_ioctl()\n");
+ lock_kernel();
/* Only let one of these through at a time */
- if (mutex_lock_interruptible(&tw_dev->ioctl_lock))
+ if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
+ unlock_kernel();
return -EINTR;
+ }
/* First copy down the buffer length */
if (copy_from_user(&data_buffer_length, argp, sizeof(unsigned int)))
@@ -1029,6 +1034,7 @@ out2:
dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_New_Ioctl) - 1, cpu_addr, dma_handle);
out:
mutex_unlock(&tw_dev->ioctl_lock);
+ unlock_kernel();
return retval;
} /* End tw_chrdev_ioctl() */
@@ -1051,7 +1057,7 @@ static int tw_chrdev_open(struct inode *inode, struct file *file)
/* File operations struct for character device */
static const struct file_operations tw_fops = {
.owner = THIS_MODULE,
- .ioctl = tw_chrdev_ioctl,
+ .unlocked_ioctl = tw_chrdev_ioctl,
.open = tw_chrdev_open,
.release = NULL
};
@@ -2245,6 +2251,15 @@ static void tw_shutdown(struct pci_dev *pdev)
__tw_shutdown(tw_dev);
} /* End tw_shutdown() */
+/* This function gets called when a disk is coming online */
+static int tw_slave_configure(struct scsi_device *sdev)
+{
+ /* Force 60 second timeout */
+ blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
+
+ return 0;
+} /* End tw_slave_configure() */
+
static struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.name = "3ware Storage Controller",
@@ -2253,6 +2268,7 @@ static struct scsi_host_template driver_template = {
.bios_param = tw_scsi_biosparam,
.change_queue_depth = tw_change_queue_depth,
.can_queue = TW_Q_LENGTH-2,
+ .slave_configure = tw_slave_configure,
.this_id = -1,
.sg_tablesize = TW_MAX_SGL_LENGTH,
.max_sectors = TW_MAX_SECTORS,
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index a5a2ba2561d..8b9f9d17e7f 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -1,12 +1,12 @@
/*
3w-xxxx.h -- 3ware Storage Controller device driver for Linux.
- Written By: Adam Radford <linuxraid@amcc.com>
+ Written By: Adam Radford <linuxraid@lsi.com>
Modifications By: Joel Jacobson <linux@3ware.com>
Arnaldo Carvalho de Melo <acme@conectiva.com.br>
Brad Strand <linux@3ware.com>
- Copyright (C) 1999-2009 3ware Inc.
+ Copyright (C) 1999-2010 3ware Inc.
Kernel compatiblity By: Andre Hedrick <andre@suse.com>
Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
@@ -45,10 +45,10 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bugs/Comments/Suggestions should be mailed to:
- linuxraid@amcc.com
+ linuxraid@lsi.com
For more information, goto:
- http://www.amcc.com
+ http://www.lsi.com
*/
#ifndef _3W_XXXX_H
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 92a8c500b23..1c7ac49be64 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -162,6 +162,7 @@ scsi_mod-y += scsi_scan.o scsi_sysfs.o scsi_devinfo.o
scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o
scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o
scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
+scsi_mod-y += scsi_trace.o
scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index d8fe5b76fee..1bb5d3f0e26 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -1,227 +1,172 @@
#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/blkdev.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/zorro.h>
-#include <asm/setup.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
-#include <linux/zorro.h>
-#include <asm/irq.h>
-#include <linux/spinlock.h>
#include "scsi.h"
-#include <scsi/scsi_host.h>
#include "wd33c93.h"
#include "a2091.h"
-#include<linux/stat.h>
-#define DMA(ptr) ((a2091_scsiregs *)((ptr)->base))
-#define HDATA(ptr) ((struct WD33C93_hostdata *)((ptr)->hostdata))
-
-static int a2091_release(struct Scsi_Host *instance);
+struct a2091_hostdata {
+ struct WD33C93_hostdata wh;
+ struct a2091_scsiregs *regs;
+};
-static irqreturn_t a2091_intr (int irq, void *_instance)
+static irqreturn_t a2091_intr(int irq, void *data)
{
- unsigned long flags;
- unsigned int status;
- struct Scsi_Host *instance = (struct Scsi_Host *)_instance;
-
- status = DMA(instance)->ISTR;
- if (!(status & (ISTR_INT_F|ISTR_INT_P)) || !(status & ISTR_INTS))
- return IRQ_NONE;
-
- spin_lock_irqsave(instance->host_lock, flags);
- wd33c93_intr(instance);
- spin_unlock_irqrestore(instance->host_lock, flags);
- return IRQ_HANDLED;
+ struct Scsi_Host *instance = data;
+ struct a2091_hostdata *hdata = shost_priv(instance);
+ unsigned int status = hdata->regs->ISTR;
+ unsigned long flags;
+
+ if (!(status & (ISTR_INT_F | ISTR_INT_P)) || !(status & ISTR_INTS))
+ return IRQ_NONE;
+
+ spin_lock_irqsave(instance->host_lock, flags);
+ wd33c93_intr(instance);
+ spin_unlock_irqrestore(instance->host_lock, flags);
+ return IRQ_HANDLED;
}
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
- unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
- unsigned long addr = virt_to_bus(cmd->SCp.ptr);
- struct Scsi_Host *instance = cmd->device->host;
-
- /* don't allow DMA if the physical address is bad */
- if (addr & A2091_XFER_MASK)
- {
- HDATA(instance)->dma_bounce_len = (cmd->SCp.this_residual + 511)
- & ~0x1ff;
- HDATA(instance)->dma_bounce_buffer =
- kmalloc (HDATA(instance)->dma_bounce_len, GFP_KERNEL);
-
- /* can't allocate memory; use PIO */
- if (!HDATA(instance)->dma_bounce_buffer) {
- HDATA(instance)->dma_bounce_len = 0;
- return 1;
- }
-
- /* get the physical address of the bounce buffer */
- addr = virt_to_bus(HDATA(instance)->dma_bounce_buffer);
-
- /* the bounce buffer may not be in the first 16M of physmem */
+ struct Scsi_Host *instance = cmd->device->host;
+ struct a2091_hostdata *hdata = shost_priv(instance);
+ struct WD33C93_hostdata *wh = &hdata->wh;
+ struct a2091_scsiregs *regs = hdata->regs;
+ unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
+ unsigned long addr = virt_to_bus(cmd->SCp.ptr);
+
+ /* don't allow DMA if the physical address is bad */
if (addr & A2091_XFER_MASK) {
- /* we could use chipmem... maybe later */
- kfree (HDATA(instance)->dma_bounce_buffer);
- HDATA(instance)->dma_bounce_buffer = NULL;
- HDATA(instance)->dma_bounce_len = 0;
- return 1;
- }
-
- if (!dir_in) {
- /* copy to bounce buffer for a write */
- memcpy (HDATA(instance)->dma_bounce_buffer,
- cmd->SCp.ptr, cmd->SCp.this_residual);
+ wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
+ wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
+ GFP_KERNEL);
+
+ /* can't allocate memory; use PIO */
+ if (!wh->dma_bounce_buffer) {
+ wh->dma_bounce_len = 0;
+ return 1;
+ }
+
+ /* get the physical address of the bounce buffer */
+ addr = virt_to_bus(wh->dma_bounce_buffer);
+
+ /* the bounce buffer may not be in the first 16M of physmem */
+ if (addr & A2091_XFER_MASK) {
+ /* we could use chipmem... maybe later */
+ kfree(wh->dma_bounce_buffer);
+ wh->dma_bounce_buffer = NULL;
+ wh->dma_bounce_len = 0;
+ return 1;
+ }
+
+ if (!dir_in) {
+ /* copy to bounce buffer for a write */
+ memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
+ cmd->SCp.this_residual);
+ }
}
- }
- /* setup dma direction */
- if (!dir_in)
- cntr |= CNTR_DDIR;
+ /* setup dma direction */
+ if (!dir_in)
+ cntr |= CNTR_DDIR;
- /* remember direction */
- HDATA(cmd->device->host)->dma_dir = dir_in;
+ /* remember direction */
+ wh->dma_dir = dir_in;
- DMA(cmd->device->host)->CNTR = cntr;
+ regs->CNTR = cntr;
- /* setup DMA *physical* address */
- DMA(cmd->device->host)->ACR = addr;
+ /* setup DMA *physical* address */
+ regs->ACR = addr;
- if (dir_in){
- /* invalidate any cache */
- cache_clear (addr, cmd->SCp.this_residual);
- }else{
- /* push any dirty cache */
- cache_push (addr, cmd->SCp.this_residual);
- }
- /* start DMA */
- DMA(cmd->device->host)->ST_DMA = 1;
+ if (dir_in) {
+ /* invalidate any cache */
+ cache_clear(addr, cmd->SCp.this_residual);
+ } else {
+ /* push any dirty cache */
+ cache_push(addr, cmd->SCp.this_residual);
+ }
+ /* start DMA */
+ regs->ST_DMA = 1;
- /* return success */
- return 0;
+ /* return success */
+ return 0;
}
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
- int status)
+ int status)
{
- /* disable SCSI interrupts */
- unsigned short cntr = CNTR_PDMD;
-
- if (!HDATA(instance)->dma_dir)
- cntr |= CNTR_DDIR;
-
- /* disable SCSI interrupts */
- DMA(instance)->CNTR = cntr;
-
- /* flush if we were reading */
- if (HDATA(instance)->dma_dir) {
- DMA(instance)->FLUSH = 1;
- while (!(DMA(instance)->ISTR & ISTR_FE_FLG))
- ;
- }
-
- /* clear a possible interrupt */
- DMA(instance)->CINT = 1;
-
- /* stop DMA */
- DMA(instance)->SP_DMA = 1;
-
- /* restore the CONTROL bits (minus the direction flag) */
- DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN;
-
- /* copy from a bounce buffer, if necessary */
- if (status && HDATA(instance)->dma_bounce_buffer) {
- if( HDATA(instance)->dma_dir )
- memcpy (SCpnt->SCp.ptr,
- HDATA(instance)->dma_bounce_buffer,
- SCpnt->SCp.this_residual);
- kfree (HDATA(instance)->dma_bounce_buffer);
- HDATA(instance)->dma_bounce_buffer = NULL;
- HDATA(instance)->dma_bounce_len = 0;
- }
-}
+ struct a2091_hostdata *hdata = shost_priv(instance);
+ struct WD33C93_hostdata *wh = &hdata->wh;
+ struct a2091_scsiregs *regs = hdata->regs;
-static int __init a2091_detect(struct scsi_host_template *tpnt)
-{
- static unsigned char called = 0;
- struct Scsi_Host *instance;
- unsigned long address;
- struct zorro_dev *z = NULL;
- wd33c93_regs regs;
- int num_a2091 = 0;
-
- if (!MACH_IS_AMIGA || called)
- return 0;
- called = 1;
-
- tpnt->proc_name = "A2091";
- tpnt->proc_info = &wd33c93_proc_info;
-
- while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
- if (z->id != ZORRO_PROD_CBM_A590_A2091_1 &&
- z->id != ZORRO_PROD_CBM_A590_A2091_2)
- continue;
- address = z->resource.start;
- if (!request_mem_region(address, 256, "wd33c93"))
- continue;
-
- instance = scsi_register (tpnt, sizeof (struct WD33C93_hostdata));
- if (instance == NULL)
- goto release;
- instance->base = ZTWO_VADDR(address);
- instance->irq = IRQ_AMIGA_PORTS;
- instance->unique_id = z->slotaddr;
- DMA(instance)->DAWR = DAWR_A2091;
- regs.SASR = &(DMA(instance)->SASR);
- regs.SCMD = &(DMA(instance)->SCMD);
- HDATA(instance)->no_sync = 0xff;
- HDATA(instance)->fast = 0;
- HDATA(instance)->dma_mode = CTRL_DMA;
- wd33c93_init(instance, regs, dma_setup, dma_stop, WD33C93_FS_8_10);
- if (request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, "A2091 SCSI",
- instance))
- goto unregister;
- DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN;
- num_a2091++;
- continue;
-
-unregister:
- scsi_unregister(instance);
- wd33c93_release();
-release:
- release_mem_region(address, 256);
- }
-
- return num_a2091;
+ /* disable SCSI interrupts */
+ unsigned short cntr = CNTR_PDMD;
+
+ if (!wh->dma_dir)
+ cntr |= CNTR_DDIR;
+
+ /* disable SCSI interrupts */
+ regs->CNTR = cntr;
+
+ /* flush if we were reading */
+ if (wh->dma_dir) {
+ regs->FLUSH = 1;
+ while (!(regs->ISTR & ISTR_FE_FLG))
+ ;
+ }
+
+ /* clear a possible interrupt */
+ regs->CINT = 1;
+
+ /* stop DMA */
+ regs->SP_DMA = 1;
+
+ /* restore the CONTROL bits (minus the direction flag) */
+ regs->CNTR = CNTR_PDMD | CNTR_INTEN;
+
+ /* copy from a bounce buffer, if necessary */
+ if (status && wh->dma_bounce_buffer) {
+ if (wh->dma_dir)
+ memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer,
+ SCpnt->SCp.this_residual);
+ kfree(wh->dma_bounce_buffer);
+ wh->dma_bounce_buffer = NULL;
+ wh->dma_bounce_len = 0;
+ }
}
static int a2091_bus_reset(struct scsi_cmnd *cmd)
{
+ struct Scsi_Host *instance = cmd->device->host;
+
/* FIXME perform bus-specific reset */
/* FIXME 2: kill this function, and let midlayer fall back
to the same action, calling wd33c93_host_reset() */
- spin_lock_irq(cmd->device->host->host_lock);
+ spin_lock_irq(instance->host_lock);
wd33c93_host_reset(cmd);
- spin_unlock_irq(cmd->device->host->host_lock);
+ spin_unlock_irq(instance->host_lock);
return SUCCESS;
}
-#define HOSTS_C
-
-static struct scsi_host_template driver_template = {
- .proc_name = "A2901",
+static struct scsi_host_template a2091_scsi_template = {
+ .module = THIS_MODULE,
.name = "Commodore A2091/A590 SCSI",
- .detect = a2091_detect,
- .release = a2091_release,
+ .proc_info = wd33c93_proc_info,
+ .proc_name = "A2901",
.queuecommand = wd33c93_queuecommand,
.eh_abort_handler = wd33c93_abort,
.eh_bus_reset_handler = a2091_bus_reset,
@@ -233,18 +178,103 @@ static struct scsi_host_template driver_template = {
.use_clustering = DISABLE_CLUSTERING
};
+static int __devinit a2091_probe(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
+{
+ struct Scsi_Host *instance;
+ int error;
+ struct a2091_scsiregs *regs;
+ wd33c93_regs wdregs;
+ struct a2091_hostdata *hdata;
+
+ if (!request_mem_region(z->resource.start, 256, "wd33c93"))
+ return -EBUSY;
+
+ instance = scsi_host_alloc(&a2091_scsi_template,
+ sizeof(struct a2091_hostdata));
+ if (!instance) {
+ error = -ENOMEM;
+ goto fail_alloc;
+ }
+
+ instance->irq = IRQ_AMIGA_PORTS;
+ instance->unique_id = z->slotaddr;
+
+ regs = (struct a2091_scsiregs *)ZTWO_VADDR(z->resource.start);
+ regs->DAWR = DAWR_A2091;
+
+ wdregs.SASR = &regs->SASR;
+ wdregs.SCMD = &regs->SCMD;
+
+ hdata = shost_priv(instance);
+ hdata->wh.no_sync = 0xff;
+ hdata->wh.fast = 0;
+ hdata->wh.dma_mode = CTRL_DMA;
+ hdata->regs = regs;
-#include "scsi_module.c"
+ wd33c93_init(instance, wdregs, dma_setup, dma_stop, WD33C93_FS_8_10);
+ error = request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED,
+ "A2091 SCSI", instance);
+ if (error)
+ goto fail_irq;
-static int a2091_release(struct Scsi_Host *instance)
+ regs->CNTR = CNTR_PDMD | CNTR_INTEN;
+
+ error = scsi_add_host(instance, NULL);
+ if (error)
+ goto fail_host;
+
+ zorro_set_drvdata(z, instance);
+
+ scsi_scan_host(instance);
+ return 0;
+
+fail_host:
+ free_irq(IRQ_AMIGA_PORTS, instance);
+fail_irq:
+ scsi_host_put(instance);
+fail_alloc:
+ release_mem_region(z->resource.start, 256);
+ return error;
+}
+
+static void __devexit a2091_remove(struct zorro_dev *z)
{
-#ifdef MODULE
- DMA(instance)->CNTR = 0;
- release_mem_region(ZTWO_PADDR(instance->base), 256);
+ struct Scsi_Host *instance = zorro_get_drvdata(z);
+ struct a2091_hostdata *hdata = shost_priv(instance);
+
+ hdata->regs->CNTR = 0;
+ scsi_remove_host(instance);
free_irq(IRQ_AMIGA_PORTS, instance);
- wd33c93_release();
-#endif
- return 1;
+ scsi_host_put(instance);
+ release_mem_region(z->resource.start, 256);
+}
+
+static struct zorro_device_id a2091_zorro_tbl[] __devinitdata = {
+ { ZORRO_PROD_CBM_A590_A2091_1 },
+ { ZORRO_PROD_CBM_A590_A2091_2 },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(zorro, a2091_zorro_tbl);
+
+static struct zorro_driver a2091_driver = {
+ .name = "a2091",
+ .id_table = a2091_zorro_tbl,
+ .probe = a2091_probe,
+ .remove = __devexit_p(a2091_remove),
+};
+
+static int __init a2091_init(void)
+{
+ return zorro_register_driver(&a2091_driver);
+}
+module_init(a2091_init);
+
+static void __exit a2091_exit(void)
+{
+ zorro_unregister_driver(&a2091_driver);
}
+module_exit(a2091_exit);
+MODULE_DESCRIPTION("Commodore A2091/A590 SCSI");
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/a2091.h b/drivers/scsi/a2091.h
index 252528f2672..794b8e65c71 100644
--- a/drivers/scsi/a2091.h
+++ b/drivers/scsi/a2091.h
@@ -12,39 +12,39 @@
#include <linux/types.h>
#ifndef CMD_PER_LUN
-#define CMD_PER_LUN 2
+#define CMD_PER_LUN 2
#endif
#ifndef CAN_QUEUE
-#define CAN_QUEUE 16
+#define CAN_QUEUE 16
#endif
/*
* if the transfer address ANDed with this results in a non-zero
* result, then we can't use DMA.
*/
-#define A2091_XFER_MASK (0xff000001)
+#define A2091_XFER_MASK (0xff000001)
-typedef struct {
- unsigned char pad1[64];
- volatile unsigned short ISTR;
- volatile unsigned short CNTR;
- unsigned char pad2[60];
- volatile unsigned int WTC;
- volatile unsigned long ACR;
- unsigned char pad3[6];
- volatile unsigned short DAWR;
- unsigned char pad4;
- volatile unsigned char SASR;
- unsigned char pad5;
- volatile unsigned char SCMD;
- unsigned char pad6[76];
- volatile unsigned short ST_DMA;
- volatile unsigned short SP_DMA;
- volatile unsigned short CINT;
- unsigned char pad7[2];
- volatile unsigned short FLUSH;
-} a2091_scsiregs;
+struct a2091_scsiregs {
+ unsigned char pad1[64];
+ volatile unsigned short ISTR;
+ volatile unsigned short CNTR;
+ unsigned char pad2[60];
+ volatile unsigned int WTC;
+ volatile unsigned long ACR;
+ unsigned char pad3[6];
+ volatile unsigned short DAWR;
+ unsigned char pad4;
+ volatile unsigned char SASR;
+ unsigned char pad5;
+ volatile unsigned char SCMD;
+ unsigned char pad6[76];
+ volatile unsigned short ST_DMA;
+ volatile unsigned short SP_DMA;
+ volatile unsigned short CINT;
+ unsigned char pad7[2];
+ volatile unsigned short FLUSH;
+};
#define DAWR_A2091 (3)
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index c35fc55f1c9..d9468027fb6 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -1,226 +1,187 @@
#include <linux/types.h>
#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/blkdev.h>
#include <linux/ioport.h>
#include <linux/init.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
+#include <linux/platform_device.h>
-#include <asm/setup.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
-#include <asm/irq.h>
#include "scsi.h"
-#include <scsi/scsi_host.h>
#include "wd33c93.h"
#include "a3000.h"
-#include<linux/stat.h>
-
-#define DMA(ptr) ((a3000_scsiregs *)((ptr)->base))
-#define HDATA(ptr) ((struct WD33C93_hostdata *)((ptr)->hostdata))
-
-static struct Scsi_Host *a3000_host = NULL;
-static int a3000_release(struct Scsi_Host *instance);
+struct a3000_hostdata {
+ struct WD33C93_hostdata wh;
+ struct a3000_scsiregs *regs;
+};
-static irqreturn_t a3000_intr (int irq, void *dummy)
+static irqreturn_t a3000_intr(int irq, void *data)
{
+ struct Scsi_Host *instance = data;
+ struct a3000_hostdata *hdata = shost_priv(instance);
+ unsigned int status = hdata->regs->ISTR;
unsigned long flags;
- unsigned int status = DMA(a3000_host)->ISTR;
if (!(status & ISTR_INT_P))
return IRQ_NONE;
- if (status & ISTR_INTS)
- {
- spin_lock_irqsave(a3000_host->host_lock, flags);
- wd33c93_intr (a3000_host);
- spin_unlock_irqrestore(a3000_host->host_lock, flags);
+ if (status & ISTR_INTS) {
+ spin_lock_irqsave(instance->host_lock, flags);
+ wd33c93_intr(instance);
+ spin_unlock_irqrestore(instance->host_lock, flags);
return IRQ_HANDLED;
}
- printk("Non-serviced A3000 SCSI-interrupt? ISTR = %02x\n", status);
+ pr_warning("Non-serviced A3000 SCSI-interrupt? ISTR = %02x\n", status);
return IRQ_NONE;
}
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
- unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
- unsigned long addr = virt_to_bus(cmd->SCp.ptr);
-
- /*
- * if the physical address has the wrong alignment, or if
- * physical address is bad, or if it is a write and at the
- * end of a physical memory chunk, then allocate a bounce
- * buffer
- */
- if (addr & A3000_XFER_MASK)
- {
- HDATA(a3000_host)->dma_bounce_len = (cmd->SCp.this_residual + 511)
- & ~0x1ff;
- HDATA(a3000_host)->dma_bounce_buffer =
- kmalloc (HDATA(a3000_host)->dma_bounce_len, GFP_KERNEL);
-
- /* can't allocate memory; use PIO */
- if (!HDATA(a3000_host)->dma_bounce_buffer) {
- HDATA(a3000_host)->dma_bounce_len = 0;
- return 1;
- }
-
- if (!dir_in) {
- /* copy to bounce buffer for a write */
- memcpy (HDATA(a3000_host)->dma_bounce_buffer,
- cmd->SCp.ptr, cmd->SCp.this_residual);
+ struct Scsi_Host *instance = cmd->device->host;
+ struct a3000_hostdata *hdata = shost_priv(instance);
+ struct WD33C93_hostdata *wh = &hdata->wh;
+ struct a3000_scsiregs *regs = hdata->regs;
+ unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
+ unsigned long addr = virt_to_bus(cmd->SCp.ptr);
+
+ /*
+ * if the physical address has the wrong alignment, or if
+ * physical address is bad, or if it is a write and at the
+ * end of a physical memory chunk, then allocate a bounce
+ * buffer
+ */
+ if (addr & A3000_XFER_MASK) {
+ wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
+ wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
+ GFP_KERNEL);
+
+ /* can't allocate memory; use PIO */
+ if (!wh->dma_bounce_buffer) {
+ wh->dma_bounce_len = 0;
+ return 1;
+ }
+
+ if (!dir_in) {
+ /* copy to bounce buffer for a write */
+ memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
+ cmd->SCp.this_residual);
+ }
+
+ addr = virt_to_bus(wh->dma_bounce_buffer);
}
- addr = virt_to_bus(HDATA(a3000_host)->dma_bounce_buffer);
- }
+ /* setup dma direction */
+ if (!dir_in)
+ cntr |= CNTR_DDIR;
- /* setup dma direction */
- if (!dir_in)
- cntr |= CNTR_DDIR;
+ /* remember direction */
+ wh->dma_dir = dir_in;
- /* remember direction */
- HDATA(a3000_host)->dma_dir = dir_in;
+ regs->CNTR = cntr;
- DMA(a3000_host)->CNTR = cntr;
+ /* setup DMA *physical* address */
+ regs->ACR = addr;
- /* setup DMA *physical* address */
- DMA(a3000_host)->ACR = addr;
-
- if (dir_in)
- /* invalidate any cache */
- cache_clear (addr, cmd->SCp.this_residual);
- else
- /* push any dirty cache */
- cache_push (addr, cmd->SCp.this_residual);
+ if (dir_in) {
+ /* invalidate any cache */
+ cache_clear(addr, cmd->SCp.this_residual);
+ } else {
+ /* push any dirty cache */
+ cache_push(addr, cmd->SCp.this_residual);
+ }
- /* start DMA */
- mb(); /* make sure setup is completed */
- DMA(a3000_host)->ST_DMA = 1;
- mb(); /* make sure DMA has started before next IO */
+ /* start DMA */
+ mb(); /* make sure setup is completed */
+ regs->ST_DMA = 1;
+ mb(); /* make sure DMA has started before next IO */
- /* return success */
- return 0;
+ /* return success */
+ return 0;
}
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
int status)
{
- /* disable SCSI interrupts */
- unsigned short cntr = CNTR_PDMD;
-
- if (!HDATA(instance)->dma_dir)
- cntr |= CNTR_DDIR;
-
- DMA(instance)->CNTR = cntr;
- mb(); /* make sure CNTR is updated before next IO */
-
- /* flush if we were reading */
- if (HDATA(instance)->dma_dir) {
- DMA(instance)->FLUSH = 1;
- mb(); /* don't allow prefetch */
- while (!(DMA(instance)->ISTR & ISTR_FE_FLG))
- barrier();
- mb(); /* no IO until FLUSH is done */
- }
-
- /* clear a possible interrupt */
- /* I think that this CINT is only necessary if you are
- * using the terminal count features. HM 7 Mar 1994
- */
- DMA(instance)->CINT = 1;
-
- /* stop DMA */
- DMA(instance)->SP_DMA = 1;
- mb(); /* make sure DMA is stopped before next IO */
-
- /* restore the CONTROL bits (minus the direction flag) */
- DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN;
- mb(); /* make sure CNTR is updated before next IO */
-
- /* copy from a bounce buffer, if necessary */
- if (status && HDATA(instance)->dma_bounce_buffer) {
- if (SCpnt) {
- if (HDATA(instance)->dma_dir && SCpnt)
- memcpy (SCpnt->SCp.ptr,
- HDATA(instance)->dma_bounce_buffer,
- SCpnt->SCp.this_residual);
- kfree (HDATA(instance)->dma_bounce_buffer);
- HDATA(instance)->dma_bounce_buffer = NULL;
- HDATA(instance)->dma_bounce_len = 0;
- } else {
- kfree (HDATA(instance)->dma_bounce_buffer);
- HDATA(instance)->dma_bounce_buffer = NULL;
- HDATA(instance)->dma_bounce_len = 0;
+ struct a3000_hostdata *hdata = shost_priv(instance);
+ struct WD33C93_hostdata *wh = &hdata->wh;
+ struct a3000_scsiregs *regs = hdata->regs;
+
+ /* disable SCSI interrupts */
+ unsigned short cntr = CNTR_PDMD;
+
+ if (!wh->dma_dir)
+ cntr |= CNTR_DDIR;
+
+ regs->CNTR = cntr;
+ mb(); /* make sure CNTR is updated before next IO */
+
+ /* flush if we were reading */
+ if (wh->dma_dir) {
+ regs->FLUSH = 1;
+ mb(); /* don't allow prefetch */
+ while (!(regs->ISTR & ISTR_FE_FLG))
+ barrier();
+ mb(); /* no IO until FLUSH is done */
}
- }
-}
-static int __init a3000_detect(struct scsi_host_template *tpnt)
-{
- wd33c93_regs regs;
-
- if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(A3000_SCSI))
- return 0;
- if (!request_mem_region(0xDD0000, 256, "wd33c93"))
- return 0;
-
- tpnt->proc_name = "A3000";
- tpnt->proc_info = &wd33c93_proc_info;
-
- a3000_host = scsi_register (tpnt, sizeof(struct WD33C93_hostdata));
- if (a3000_host == NULL)
- goto fail_register;
-
- a3000_host->base = ZTWO_VADDR(0xDD0000);
- a3000_host->irq = IRQ_AMIGA_PORTS;
- DMA(a3000_host)->DAWR = DAWR_A3000;
- regs.SASR = &(DMA(a3000_host)->SASR);
- regs.SCMD = &(DMA(a3000_host)->SCMD);
- HDATA(a3000_host)->no_sync = 0xff;
- HDATA(a3000_host)->fast = 0;
- HDATA(a3000_host)->dma_mode = CTRL_DMA;
- wd33c93_init(a3000_host, regs, dma_setup, dma_stop, WD33C93_FS_12_15);
- if (request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED, "A3000 SCSI",
- a3000_intr))
- goto fail_irq;
- DMA(a3000_host)->CNTR = CNTR_PDMD | CNTR_INTEN;
-
- return 1;
-
-fail_irq:
- wd33c93_release();
- scsi_unregister(a3000_host);
-fail_register:
- release_mem_region(0xDD0000, 256);
- return 0;
+ /* clear a possible interrupt */
+ /* I think that this CINT is only necessary if you are
+ * using the terminal count features. HM 7 Mar 1994
+ */
+ regs->CINT = 1;
+
+ /* stop DMA */
+ regs->SP_DMA = 1;
+ mb(); /* make sure DMA is stopped before next IO */
+
+ /* restore the CONTROL bits (minus the direction flag) */
+ regs->CNTR = CNTR_PDMD | CNTR_INTEN;
+ mb(); /* make sure CNTR is updated before next IO */
+
+ /* copy from a bounce buffer, if necessary */
+ if (status && wh->dma_bounce_buffer) {
+ if (SCpnt) {
+ if (wh->dma_dir && SCpnt)
+ memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer,
+ SCpnt->SCp.this_residual);
+ kfree(wh->dma_bounce_buffer);
+ wh->dma_bounce_buffer = NULL;
+ wh->dma_bounce_len = 0;
+ } else {
+ kfree(wh->dma_bounce_buffer);
+ wh->dma_bounce_buffer = NULL;
+ wh->dma_bounce_len = 0;
+ }
+ }
}
static int a3000_bus_reset(struct scsi_cmnd *cmd)
{
+ struct Scsi_Host *instance = cmd->device->host;
+
/* FIXME perform bus-specific reset */
-
+
/* FIXME 2: kill this entire function, which should
cause mid-layer to call wd33c93_host_reset anyway? */
- spin_lock_irq(cmd->device->host->host_lock);
+ spin_lock_irq(instance->host_lock);
wd33c93_host_reset(cmd);
- spin_unlock_irq(cmd->device->host->host_lock);
+ spin_unlock_irq(instance->host_lock);
return SUCCESS;
}
-#define HOSTS_C
-
-static struct scsi_host_template driver_template = {
- .proc_name = "A3000",
+static struct scsi_host_template amiga_a3000_scsi_template = {
+ .module = THIS_MODULE,
.name = "Amiga 3000 built-in SCSI",
- .detect = a3000_detect,
- .release = a3000_release,
+ .proc_info = wd33c93_proc_info,
+ .proc_name = "A3000",
.queuecommand = wd33c93_queuecommand,
.eh_abort_handler = wd33c93_abort,
.eh_bus_reset_handler = a3000_bus_reset,
@@ -232,16 +193,104 @@ static struct scsi_host_template driver_template = {
.use_clustering = ENABLE_CLUSTERING
};
+static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct Scsi_Host *instance;
+ int error;
+ struct a3000_scsiregs *regs;
+ wd33c93_regs wdregs;
+ struct a3000_hostdata *hdata;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ if (!request_mem_region(res->start, resource_size(res), "wd33c93"))
+ return -EBUSY;
+
+ instance = scsi_host_alloc(&amiga_a3000_scsi_template,
+ sizeof(struct a3000_hostdata));
+ if (!instance) {
+ error = -ENOMEM;
+ goto fail_alloc;
+ }
+
+ instance->irq = IRQ_AMIGA_PORTS;
+
+ regs = (struct a3000_scsiregs *)ZTWO_VADDR(res->start);
+ regs->DAWR = DAWR_A3000;
+
+ wdregs.SASR = &regs->SASR;
+ wdregs.SCMD = &regs->SCMD;
+
+ hdata = shost_priv(instance);
+ hdata->wh.no_sync = 0xff;
+ hdata->wh.fast = 0;
+ hdata->wh.dma_mode = CTRL_DMA;
+ hdata->regs = regs;
+
+ wd33c93_init(instance, wdregs, dma_setup, dma_stop, WD33C93_FS_12_15);
+ error = request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED,
+ "A3000 SCSI", instance);
+ if (error)
+ goto fail_irq;
+
+ regs->CNTR = CNTR_PDMD | CNTR_INTEN;
+
+ error = scsi_add_host(instance, NULL);
+ if (error)
+ goto fail_host;
+
+ platform_set_drvdata(pdev, instance);
+
+ scsi_scan_host(instance);
+ return 0;
+
+fail_host:
+ free_irq(IRQ_AMIGA_PORTS, instance);
+fail_irq:
+ scsi_host_put(instance);
+fail_alloc:
+ release_mem_region(res->start, resource_size(res));
+ return error;
+}
+
+static int __exit amiga_a3000_scsi_remove(struct platform_device *pdev)
+{
+ struct Scsi_Host *instance = platform_get_drvdata(pdev);
+ struct a3000_hostdata *hdata = shost_priv(instance);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ hdata->regs->CNTR = 0;
+ scsi_remove_host(instance);
+ free_irq(IRQ_AMIGA_PORTS, instance);
+ scsi_host_put(instance);
+ release_mem_region(res->start, resource_size(res));
+ return 0;
+}
+
+static struct platform_driver amiga_a3000_scsi_driver = {
+ .remove = __exit_p(amiga_a3000_scsi_remove),
+ .driver = {
+ .name = "amiga-a3000-scsi",
+ .owner = THIS_MODULE,
+ },
+};
-#include "scsi_module.c"
+static int __init amiga_a3000_scsi_init(void)
+{
+ return platform_driver_probe(&amiga_a3000_scsi_driver,
+ amiga_a3000_scsi_probe);
+}
+module_init(amiga_a3000_scsi_init);
-static int a3000_release(struct Scsi_Host *instance)
+static void __exit amiga_a3000_scsi_exit(void)
{
- wd33c93_release();
- DMA(instance)->CNTR = 0;
- release_mem_region(0xDD0000, 256);
- free_irq(IRQ_AMIGA_PORTS, a3000_intr);
- return 1;
+ platform_driver_unregister(&amiga_a3000_scsi_driver);
}
+module_exit(amiga_a3000_scsi_exit);
+MODULE_DESCRIPTION("Amiga 3000 built-in SCSI");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:amiga-a3000-scsi");
diff --git a/drivers/scsi/a3000.h b/drivers/scsi/a3000.h
index c7afe16fd6e..49db4a335aa 100644
--- a/drivers/scsi/a3000.h
+++ b/drivers/scsi/a3000.h
@@ -12,41 +12,41 @@
#include <linux/types.h>
#ifndef CMD_PER_LUN
-#define CMD_PER_LUN 2
+#define CMD_PER_LUN 2
#endif
#ifndef CAN_QUEUE
-#define CAN_QUEUE 16
+#define CAN_QUEUE 16
#endif
/*
* if the transfer address ANDed with this results in a non-zero
* result, then we can't use DMA.
*/
-#define A3000_XFER_MASK (0x00000003)
+#define A3000_XFER_MASK (0x00000003)
-typedef struct {
- unsigned char pad1[2];
- volatile unsigned short DAWR;
- volatile unsigned int WTC;
- unsigned char pad2[2];
- volatile unsigned short CNTR;
- volatile unsigned long ACR;
- unsigned char pad3[2];
- volatile unsigned short ST_DMA;
- unsigned char pad4[2];
- volatile unsigned short FLUSH;
- unsigned char pad5[2];
- volatile unsigned short CINT;
- unsigned char pad6[2];
- volatile unsigned short ISTR;
- unsigned char pad7[30];
- volatile unsigned short SP_DMA;
- unsigned char pad8;
- volatile unsigned char SASR;
- unsigned char pad9;
- volatile unsigned char SCMD;
-} a3000_scsiregs;
+struct a3000_scsiregs {
+ unsigned char pad1[2];
+ volatile unsigned short DAWR;
+ volatile unsigned int WTC;
+ unsigned char pad2[2];
+ volatile unsigned short CNTR;
+ volatile unsigned long ACR;
+ unsigned char pad3[2];
+ volatile unsigned short ST_DMA;
+ unsigned char pad4[2];
+ volatile unsigned short FLUSH;
+ unsigned char pad5[2];
+ volatile unsigned short CINT;
+ unsigned char pad6[2];
+ volatile unsigned short ISTR;
+ unsigned char pad7[30];
+ volatile unsigned short SP_DMA;
+ unsigned char pad8;
+ volatile unsigned char SASR;
+ unsigned char pad9;
+ volatile unsigned char SCMD;
+};
#define DAWR_A3000 (3)
diff --git a/drivers/scsi/a4000t.c b/drivers/scsi/a4000t.c
index 11ae6be8aea..23c76f41883 100644
--- a/drivers/scsi/a4000t.c
+++ b/drivers/scsi/a4000t.c
@@ -20,10 +20,6 @@
#include "53c700.h"
-MODULE_AUTHOR("Alan Hourihane <alanh@fairlite.demon.co.uk> / Kars de Jong <jongk@linux-m68k.org>");
-MODULE_DESCRIPTION("Amiga A4000T NCR53C710 driver");
-MODULE_LICENSE("GPL");
-
static struct scsi_host_template a4000t_scsi_driver_template = {
.name = "A4000T builtin SCSI",
@@ -32,30 +28,35 @@ static struct scsi_host_template a4000t_scsi_driver_template = {
.module = THIS_MODULE,
};
-static struct platform_device *a4000t_scsi_device;
-#define A4000T_SCSI_ADDR 0xdd0040
+#define A4000T_SCSI_OFFSET 0x40
-static int __devinit a4000t_probe(struct platform_device *dev)
+static int __init amiga_a4000t_scsi_probe(struct platform_device *pdev)
{
- struct Scsi_Host *host;
+ struct resource *res;
+ phys_addr_t scsi_addr;
struct NCR_700_Host_Parameters *hostdata;
+ struct Scsi_Host *host;
- if (!(MACH_IS_AMIGA && AMIGAHW_PRESENT(A4000_SCSI)))
- goto out;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
- if (!request_mem_region(A4000T_SCSI_ADDR, 0x1000,
+ if (!request_mem_region(res->start, resource_size(res),
"A4000T builtin SCSI"))
- goto out;
+ return -EBUSY;
- hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL);
+ hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters),
+ GFP_KERNEL);
if (!hostdata) {
- printk(KERN_ERR "a4000t-scsi: Failed to allocate host data\n");
+ dev_err(&pdev->dev, "Failed to allocate host data\n");
goto out_release;
}
+ scsi_addr = res->start + A4000T_SCSI_OFFSET;
+
/* Fill in the required pieces of hostdata */
- hostdata->base = (void __iomem *)ZTWO_VADDR(A4000T_SCSI_ADDR);
+ hostdata->base = (void __iomem *)ZTWO_VADDR(scsi_addr);
hostdata->clock = 50;
hostdata->chip710 = 1;
hostdata->dmode_extra = DMODE_FC2;
@@ -63,26 +64,25 @@ static int __devinit a4000t_probe(struct platform_device *dev)
/* and register the chip */
host = NCR_700_detect(&a4000t_scsi_driver_template, hostdata,
- &dev->dev);
+ &pdev->dev);
if (!host) {
- printk(KERN_ERR "a4000t-scsi: No host detected; "
- "board configuration problem?\n");
+ dev_err(&pdev->dev,
+ "No host detected; board configuration problem?\n");
goto out_free;
}
host->this_id = 7;
- host->base = A4000T_SCSI_ADDR;
+ host->base = scsi_addr;
host->irq = IRQ_AMIGA_PORTS;
if (request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "a4000t-scsi",
host)) {
- printk(KERN_ERR "a4000t-scsi: request_irq failed\n");
+ dev_err(&pdev->dev, "request_irq failed\n");
goto out_put_host;
}
- platform_set_drvdata(dev, host);
+ platform_set_drvdata(pdev, host);
scsi_scan_host(host);
-
return 0;
out_put_host:
@@ -90,58 +90,49 @@ static int __devinit a4000t_probe(struct platform_device *dev)
out_free:
kfree(hostdata);
out_release:
- release_mem_region(A4000T_SCSI_ADDR, 0x1000);
- out:
+ release_mem_region(res->start, resource_size(res));
return -ENODEV;
}
-static __devexit int a4000t_device_remove(struct platform_device *dev)
+static int __exit amiga_a4000t_scsi_remove(struct platform_device *pdev)
{
- struct Scsi_Host *host = platform_get_drvdata(dev);
+ struct Scsi_Host *host = platform_get_drvdata(pdev);
struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
scsi_remove_host(host);
-
NCR_700_release(host);
kfree(hostdata);
free_irq(host->irq, host);
- release_mem_region(A4000T_SCSI_ADDR, 0x1000);
-
+ release_mem_region(res->start, resource_size(res));
return 0;
}
-static struct platform_driver a4000t_scsi_driver = {
- .driver = {
- .name = "a4000t-scsi",
- .owner = THIS_MODULE,
+static struct platform_driver amiga_a4000t_scsi_driver = {
+ .remove = __exit_p(amiga_a4000t_scsi_remove),
+ .driver = {
+ .name = "amiga-a4000t-scsi",
+ .owner = THIS_MODULE,
},
- .probe = a4000t_probe,
- .remove = __devexit_p(a4000t_device_remove),
};
-static int __init a4000t_scsi_init(void)
+static int __init amiga_a4000t_scsi_init(void)
{
- int err;
-
- err = platform_driver_register(&a4000t_scsi_driver);
- if (err)
- return err;
-
- a4000t_scsi_device = platform_device_register_simple("a4000t-scsi",
- -1, NULL, 0);
- if (IS_ERR(a4000t_scsi_device)) {
- platform_driver_unregister(&a4000t_scsi_driver);
- return PTR_ERR(a4000t_scsi_device);
- }
-
- return err;
+ return platform_driver_probe(&amiga_a4000t_scsi_driver,
+ amiga_a4000t_scsi_probe);
}
-static void __exit a4000t_scsi_exit(void)
+module_init(amiga_a4000t_scsi_init);
+
+static void __exit amiga_a4000t_scsi_exit(void)
{
- platform_device_unregister(a4000t_scsi_device);
- platform_driver_unregister(&a4000t_scsi_driver);
+ platform_driver_unregister(&amiga_a4000t_scsi_driver);
}
-module_init(a4000t_scsi_init);
-module_exit(a4000t_scsi_exit);
+module_exit(amiga_a4000t_scsi_exit);
+
+MODULE_AUTHOR("Alan Hourihane <alanh@fairlite.demon.co.uk> / "
+ "Kars de Jong <jongk@linux-m68k.org>");
+MODULE_DESCRIPTION("Amiga A4000T NCR53C710 driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:amiga-a4000t-scsi");
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 7e26ebc2666..7df2dd1d2c6 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -328,6 +328,16 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag)
return status;
}
+static void aac_expose_phy_device(struct scsi_cmnd *scsicmd)
+{
+ char inq_data;
+ scsi_sg_copy_to_buffer(scsicmd, &inq_data, sizeof(inq_data));
+ if ((inq_data & 0x20) && (inq_data & 0x1f) == TYPE_DISK) {
+ inq_data &= 0xdf;
+ scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data));
+ }
+}
+
/**
* aac_get_containers - list containers
* @common: adapter to probe
@@ -1598,6 +1608,7 @@ static int aac_read(struct scsi_cmnd * scsicmd)
int status;
struct aac_dev *dev;
struct fib * cmd_fibcontext;
+ int cid;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
/*
@@ -1647,6 +1658,22 @@ static int aac_read(struct scsi_cmnd * scsicmd)
count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
break;
}
+
+ if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
+ cid = scmd_id(scsicmd);
+ dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+ ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
+ memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+ min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
+ scsicmd->scsi_done(scsicmd);
+ return 1;
+ }
+
dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
smp_processor_id(), (unsigned long long)lba, jiffies));
if (aac_adapter_bounds(dev,scsicmd,lba))
@@ -1688,6 +1715,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
int status;
struct aac_dev *dev;
struct fib * cmd_fibcontext;
+ int cid;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
/*
@@ -1727,6 +1755,22 @@ static int aac_write(struct scsi_cmnd * scsicmd)
count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
fua = scsicmd->cmnd[1] & 0x8;
}
+
+ if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
+ cid = scmd_id(scsicmd);
+ dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+ ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
+ memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+ min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
+ scsicmd->scsi_done(scsicmd);
+ return 1;
+ }
+
dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
smp_processor_id(), (unsigned long long)lba, jiffies));
if (aac_adapter_bounds(dev,scsicmd,lba))
@@ -2573,6 +2617,11 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
scsi_dma_unmap(scsicmd);
+ /* expose physical device if expose_physicald flag is on */
+ if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
+ && expose_physicals > 0)
+ aac_expose_phy_device(scsicmd);
+
/*
* First check the fib status
*/
@@ -2678,8 +2727,22 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
scsicmd->cmnd[0],
le32_to_cpu(srbreply->scsi_status));
#endif
- scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
- break;
+ if ((scsicmd->cmnd[0] == ATA_12)
+ || (scsicmd->cmnd[0] == ATA_16)) {
+ if (scsicmd->cmnd[2] & (0x01 << 5)) {
+ scsicmd->result = DID_OK << 16
+ | COMMAND_COMPLETE << 8;
+ break;
+ } else {
+ scsicmd->result = DID_ERROR << 16
+ | COMMAND_COMPLETE << 8;
+ break;
+ }
+ } else {
+ scsicmd->result = DID_ERROR << 16
+ | COMMAND_COMPLETE << 8;
+ break;
+ }
}
if (le32_to_cpu(srbreply->scsi_status) == SAM_STAT_CHECK_CONDITION) {
int len;
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 619c02d9c86..4dbcc055ac7 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,7 +12,7 @@
*----------------------------------------------------------------------------*/
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 24702
+# define AAC_DRIVER_BUILD 26400
# define AAC_DRIVER_BRANCH "-ms"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -26,6 +26,8 @@
#define AAC_MAX_HOSTPHYSMEMPAGES (0xfffff)
#define AAC_MAX_32BIT_SGBCOUNT ((unsigned short)256)
+#define AAC_DEBUG_INSTRUMENT_AIF_DELETE
+
/*
* These macros convert from physical channels to virtual channels
*/
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 9c0c9117853..1a5bf572475 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -655,9 +655,9 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
/* Does this really need to be GFP_DMA? */
p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
if(!p) {
- kfree (usg);
- dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
+ dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
usg->sg[i].count,i,usg->count));
+ kfree(usg);
rcode = -ENOMEM;
goto cleanup;
}
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 94d2954d79a..70079146e20 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -966,6 +966,16 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
device_config_needed =
(((__le32 *)aifcmd->data)[0] ==
cpu_to_le32(AifEnAddJBOD)) ? ADD : DELETE;
+ if (device_config_needed == ADD) {
+ device = scsi_device_lookup(dev->scsi_host_ptr,
+ channel,
+ id,
+ lun);
+ if (device) {
+ scsi_remove_device(device);
+ scsi_device_put(device);
+ }
+ }
break;
case AifEnEnclosureManagement:
@@ -1123,6 +1133,9 @@ retry_next:
if (device) {
switch (device_config_needed) {
case DELETE:
+#if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
+ scsi_remove_device(device);
+#else
if (scsi_device_online(device)) {
scsi_device_set_state(device, SDEV_OFFLINE);
sdev_printk(KERN_INFO, device,
@@ -1131,6 +1144,7 @@ retry_next:
"array deleted" :
"enclosure services event");
}
+#endif
break;
case ADD:
if (!scsi_device_online(device)) {
@@ -1145,12 +1159,16 @@ retry_next:
case CHANGE:
if ((channel == CONTAINER_CHANNEL)
&& (!dev->fsa_dev[container].valid)) {
+#if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
+ scsi_remove_device(device);
+#else
if (!scsi_device_online(device))
break;
scsi_device_set_state(device, SDEV_OFFLINE);
sdev_printk(KERN_INFO, device,
"Device offlined - %s\n",
"array failed");
+#endif
break;
}
scsi_rescan_device(&device->sdev_gendev);
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index e9373a2d14f..33898b61fdb 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -705,12 +705,17 @@ static int aac_cfg_open(struct inode *inode, struct file *file)
* Bugs: Needs to handle hot plugging
*/
-static int aac_cfg_ioctl(struct inode *inode, struct file *file,
+static long aac_cfg_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
+ int ret;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
- return aac_do_ioctl(file->private_data, cmd, (void __user *)arg);
+ lock_kernel();
+ ret = aac_do_ioctl(file->private_data, cmd, (void __user *)arg);
+ unlock_kernel();
+
+ return ret;
}
#ifdef CONFIG_COMPAT
@@ -1029,7 +1034,7 @@ ssize_t aac_get_serial_number(struct device *device, char *buf)
static const struct file_operations aac_cfg_fops = {
.owner = THIS_MODULE,
- .ioctl = aac_cfg_ioctl,
+ .unlocked_ioctl = aac_cfg_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = aac_compat_cfg_ioctl,
#endif
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index ab646e580d6..ce5371b3cdd 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -48,7 +48,7 @@ struct device_attribute;
/*The limit of outstanding scsi command that firmware can handle*/
#define ARCMSR_MAX_OUTSTANDING_CMD 256
#define ARCMSR_MAX_FREECCB_NUM 320
-#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2008/02/27"
+#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2008/11/03"
#define ARCMSR_SCSI_INITIATOR_ID 255
#define ARCMSR_MAX_XFER_SECTORS 512
#define ARCMSR_MAX_XFER_SECTORS_B 4096
@@ -110,6 +110,8 @@ struct CMD_MESSAGE_FIELD
#define FUNCTION_SAY_HELLO 0x0807
#define FUNCTION_SAY_GOODBYE 0x0808
#define FUNCTION_FLUSH_ADAPTER_CACHE 0x0809
+#define FUNCTION_GET_FIRMWARE_STATUS 0x080A
+#define FUNCTION_HARDWARE_RESET 0x080B
/* ARECA IO CONTROL CODE*/
#define ARCMSR_MESSAGE_READ_RQBUFFER \
ARECA_SATA_RAID | FUNCTION_READ_RQBUFFER
@@ -133,6 +135,7 @@ struct CMD_MESSAGE_FIELD
#define ARCMSR_MESSAGE_RETURNCODE_OK 0x00000001
#define ARCMSR_MESSAGE_RETURNCODE_ERROR 0x00000006
#define ARCMSR_MESSAGE_RETURNCODE_3F 0x0000003F
+#define ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON 0x00000088
/*
*************************************************************
** structure for holding DMA address data
@@ -341,13 +344,13 @@ struct MessageUnit_B
uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE];
uint32_t postq_index;
uint32_t doneq_index;
- void __iomem *drv2iop_doorbell_reg;
- void __iomem *drv2iop_doorbell_mask_reg;
- void __iomem *iop2drv_doorbell_reg;
- void __iomem *iop2drv_doorbell_mask_reg;
- void __iomem *msgcode_rwbuffer_reg;
- void __iomem *ioctl_wbuffer_reg;
- void __iomem *ioctl_rbuffer_reg;
+ uint32_t __iomem *drv2iop_doorbell_reg;
+ uint32_t __iomem *drv2iop_doorbell_mask_reg;
+ uint32_t __iomem *iop2drv_doorbell_reg;
+ uint32_t __iomem *iop2drv_doorbell_mask_reg;
+ uint32_t __iomem *msgcode_rwbuffer_reg;
+ uint32_t __iomem *ioctl_wbuffer_reg;
+ uint32_t __iomem *ioctl_rbuffer_reg;
};
/*
@@ -375,6 +378,7 @@ struct AdapterControlBlock
/* message unit ATU inbound base address0 */
uint32_t acb_flags;
+ uint8_t adapter_index;
#define ACB_F_SCSISTOPADAPTER 0x0001
#define ACB_F_MSG_STOP_BGRB 0x0002
/* stop RAID background rebuild */
@@ -390,7 +394,7 @@ struct AdapterControlBlock
#define ACB_F_BUS_RESET 0x0080
#define ACB_F_IOP_INITED 0x0100
/* iop init */
-
+ #define ACB_F_FIRMWARE_TRAP 0x0400
struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
/* used for memory free */
struct list_head ccb_free_list;
@@ -423,12 +427,19 @@ struct AdapterControlBlock
#define ARECA_RAID_GOOD 0xaa
uint32_t num_resets;
uint32_t num_aborts;
+ uint32_t signature;
uint32_t firm_request_len;
uint32_t firm_numbers_queue;
uint32_t firm_sdram_size;
uint32_t firm_hd_channels;
char firm_model[12];
char firm_version[20];
+ char device_map[20]; /*21,84-99*/
+ struct work_struct arcmsr_do_message_isr_bh;
+ struct timer_list eternal_timer;
+ unsigned short fw_state;
+ atomic_t rq_map_token;
+ int ante_token_value;
};/* HW_DEVICE_EXTENSION */
/*
*******************************************************************************
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index 5877f29a600..07fdfe57e38 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -59,7 +59,8 @@
struct device_attribute *arcmsr_host_attrs[];
-static ssize_t arcmsr_sysfs_iop_message_read(struct kobject *kobj,
+static ssize_t arcmsr_sysfs_iop_message_read(struct file *filp,
+ struct kobject *kobj,
struct bin_attribute *bin,
char *buf, loff_t off,
size_t count)
@@ -105,7 +106,8 @@ static ssize_t arcmsr_sysfs_iop_message_read(struct kobject *kobj,
return (allxfer_len);
}
-static ssize_t arcmsr_sysfs_iop_message_write(struct kobject *kobj,
+static ssize_t arcmsr_sysfs_iop_message_write(struct file *filp,
+ struct kobject *kobj,
struct bin_attribute *bin,
char *buf, loff_t off,
size_t count)
@@ -153,7 +155,8 @@ static ssize_t arcmsr_sysfs_iop_message_write(struct kobject *kobj,
}
}
-static ssize_t arcmsr_sysfs_iop_message_clear(struct kobject *kobj,
+static ssize_t arcmsr_sysfs_iop_message_clear(struct file *filp,
+ struct kobject *kobj,
struct bin_attribute *bin,
char *buf, loff_t off,
size_t count)
@@ -189,6 +192,7 @@ static struct bin_attribute arcmsr_sysfs_message_read_attr = {
.attr = {
.name = "mu_read",
.mode = S_IRUSR ,
+ .owner = THIS_MODULE,
},
.size = 1032,
.read = arcmsr_sysfs_iop_message_read,
@@ -198,6 +202,7 @@ static struct bin_attribute arcmsr_sysfs_message_write_attr = {
.attr = {
.name = "mu_write",
.mode = S_IWUSR,
+ .owner = THIS_MODULE,
},
.size = 1032,
.write = arcmsr_sysfs_iop_message_write,
@@ -207,6 +212,7 @@ static struct bin_attribute arcmsr_sysfs_message_clear_attr = {
.attr = {
.name = "mu_clear",
.mode = S_IWUSR,
+ .owner = THIS_MODULE,
},
.size = 1,
.write = arcmsr_sysfs_iop_message_clear,
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index ffbe2192da3..ffa54792bb3 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -72,8 +72,16 @@
#include <scsi/scsicam.h>
#include "arcmsr.h"
+#ifdef CONFIG_SCSI_ARCMSR_RESET
+ static int sleeptime = 20;
+ static int retrycount = 12;
+ module_param(sleeptime, int, S_IRUGO|S_IWUSR);
+ MODULE_PARM_DESC(sleeptime, "The waiting period for FW ready while bus reset");
+ module_param(retrycount, int, S_IRUGO|S_IWUSR);
+ MODULE_PARM_DESC(retrycount, "The retry count for FW ready while bus reset");
+#endif
MODULE_AUTHOR("Erich Chen <support@areca.com.tw>");
-MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID HOST Adapter");
+MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID Host Bus Adapter");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(ARCMSR_DRIVER_VERSION);
@@ -96,6 +104,13 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb);
static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb);
+static void arcmsr_request_device_map(unsigned long pacb);
+static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb);
+static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb);
+static void arcmsr_message_isr_bh_fn(struct work_struct *work);
+static void *arcmsr_get_firmware_spec(struct AdapterControlBlock *acb, int mode);
+static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
+
static const char *arcmsr_info(struct Scsi_Host *);
static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
@@ -112,7 +127,7 @@ static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
static struct scsi_host_template arcmsr_scsi_host_template = {
.module = THIS_MODULE,
- .name = "ARCMSR ARECA SATA/SAS RAID HOST Adapter"
+ .name = "ARCMSR ARECA SATA/SAS RAID Host Bus Adapter"
ARCMSR_DRIVER_VERSION,
.info = arcmsr_info,
.queuecommand = arcmsr_queue_command,
@@ -128,16 +143,6 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = arcmsr_host_attrs,
};
-#ifdef CONFIG_SCSI_ARCMSR_AER
-static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev);
-static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state);
-
-static struct pci_error_handlers arcmsr_pci_error_handlers = {
- .error_detected = arcmsr_pci_error_detected,
- .slot_reset = arcmsr_pci_slot_reset,
-};
-#endif
static struct pci_device_id arcmsr_device_id_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)},
@@ -166,9 +171,6 @@ static struct pci_driver arcmsr_pci_driver = {
.probe = arcmsr_probe,
.remove = arcmsr_remove,
.shutdown = arcmsr_shutdown,
- #ifdef CONFIG_SCSI_ARCMSR_AER
- .err_handler = &arcmsr_pci_error_handlers,
- #endif
};
static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
@@ -236,10 +238,9 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
void *dma_coherent;
dma_addr_t dma_coherent_handle, dma_addr;
struct CommandControlBlock *ccb_tmp;
- uint32_t intmask_org;
int i, j;
- acb->pmuA = pci_ioremap_bar(pdev, 0);
+ acb->pmuA = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
if (!acb->pmuA) {
printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n",
acb->host->host_no);
@@ -281,12 +282,6 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
acb->devstate[i][j] = ARECA_RAID_GONE;
-
- /*
- ** here we need to tell iop 331 our ccb_tmp.HighPart
- ** if ccb_tmp.HighPart is not zero
- */
- intmask_org = arcmsr_disable_outbound_ints(acb);
}
break;
@@ -297,7 +292,6 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
void __iomem *mem_base0, *mem_base1;
void *dma_coherent;
dma_addr_t dma_coherent_handle, dma_addr;
- uint32_t intmask_org;
struct CommandControlBlock *ccb_tmp;
int i, j;
@@ -333,11 +327,13 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
reg = (struct MessageUnit_B *)(dma_coherent +
ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
acb->pmuB = reg;
- mem_base0 = pci_ioremap_bar(pdev, 0);
+ mem_base0 = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
if (!mem_base0)
goto out;
- mem_base1 = pci_ioremap_bar(pdev, 2);
+ mem_base1 = ioremap(pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2));
if (!mem_base1) {
iounmap(mem_base0);
goto out;
@@ -357,12 +353,6 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
acb->devstate[i][j] = ARECA_RAID_GOOD;
-
- /*
- ** here we need to tell iop 331 our ccb_tmp.HighPart
- ** if ccb_tmp.HighPart is not zero
- */
- intmask_org = arcmsr_disable_outbound_ints(acb);
}
break;
}
@@ -374,6 +364,88 @@ out:
sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
return -ENOMEM;
}
+static void arcmsr_message_isr_bh_fn(struct work_struct *work)
+{
+ struct AdapterControlBlock *acb = container_of(work, struct AdapterControlBlock, arcmsr_do_message_isr_bh);
+
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A: {
+
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ char *acb_dev_map = (char *)acb->device_map;
+ uint32_t __iomem *signature = (uint32_t __iomem *) (&reg->message_rwbuffer[0]);
+ char __iomem *devicemap = (char __iomem *) (&reg->message_rwbuffer[21]);
+ int target, lun;
+ struct scsi_device *psdev;
+ char diff;
+
+ atomic_inc(&acb->rq_map_token);
+ if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
+ for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) {
+ diff = (*acb_dev_map)^readb(devicemap);
+ if (diff != 0) {
+ char temp;
+ *acb_dev_map = readb(devicemap);
+ temp = *acb_dev_map;
+ for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
+ if ((temp & 0x01) == 1 && (diff & 0x01) == 1) {
+ scsi_add_device(acb->host, 0, target, lun);
+ } else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) {
+ psdev = scsi_device_lookup(acb->host, 0, target, lun);
+ if (psdev != NULL) {
+ scsi_remove_device(psdev);
+ scsi_device_put(psdev);
+ }
+ }
+ temp >>= 1;
+ diff >>= 1;
+ }
+ }
+ devicemap++;
+ acb_dev_map++;
+ }
+ }
+ break;
+ }
+
+ case ACB_ADAPTER_TYPE_B: {
+ struct MessageUnit_B *reg = acb->pmuB;
+ char *acb_dev_map = (char *)acb->device_map;
+ uint32_t __iomem *signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer_reg[0]);
+ char __iomem *devicemap = (char __iomem *)(&reg->msgcode_rwbuffer_reg[21]);
+ int target, lun;
+ struct scsi_device *psdev;
+ char diff;
+
+ atomic_inc(&acb->rq_map_token);
+ if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
+ for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) {
+ diff = (*acb_dev_map)^readb(devicemap);
+ if (diff != 0) {
+ char temp;
+ *acb_dev_map = readb(devicemap);
+ temp = *acb_dev_map;
+ for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
+ if ((temp & 0x01) == 1 && (diff & 0x01) == 1) {
+ scsi_add_device(acb->host, 0, target, lun);
+ } else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) {
+ psdev = scsi_device_lookup(acb->host, 0, target, lun);
+ if (psdev != NULL) {
+ scsi_remove_device(psdev);
+ scsi_device_put(psdev);
+ }
+ }
+ temp >>= 1;
+ diff >>= 1;
+ }
+ }
+ devicemap++;
+ acb_dev_map++;
+ }
+ }
+ }
+ }
+}
static int arcmsr_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
@@ -432,17 +504,17 @@ static int arcmsr_probe(struct pci_dev *pdev,
ACB_F_MESSAGE_WQBUFFER_READED);
acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
INIT_LIST_HEAD(&acb->ccb_free_list);
-
+ INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
error = arcmsr_alloc_ccb_pool(acb);
if (error)
goto out_release_regions;
+ arcmsr_iop_init(acb);
error = request_irq(pdev->irq, arcmsr_do_interrupt,
IRQF_SHARED, "arcmsr", acb);
if (error)
goto out_free_ccb_pool;
- arcmsr_iop_init(acb);
pci_set_drvdata(pdev, host);
if (strncmp(acb->firm_version, "V1.42", 5) >= 0)
host->max_sectors= ARCMSR_MAX_XFER_SECTORS_B;
@@ -459,6 +531,14 @@ static int arcmsr_probe(struct pci_dev *pdev,
#ifdef CONFIG_SCSI_ARCMSR_AER
pci_enable_pcie_error_reporting(pdev);
#endif
+ atomic_set(&acb->rq_map_token, 16);
+ acb->fw_state = true;
+ init_timer(&acb->eternal_timer);
+ acb->eternal_timer.expires = jiffies + msecs_to_jiffies(10*HZ);
+ acb->eternal_timer.data = (unsigned long) acb;
+ acb->eternal_timer.function = &arcmsr_request_device_map;
+ add_timer(&acb->eternal_timer);
+
return 0;
out_free_sysfs:
out_free_irq:
@@ -518,40 +598,48 @@ static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
return 0xff;
}
-static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
+static uint8_t arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
{
struct MessageUnit_A __iomem *reg = acb->pmuA;
writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
- if (arcmsr_hba_wait_msgint_ready(acb))
+ if (arcmsr_hba_wait_msgint_ready(acb)) {
printk(KERN_NOTICE
"arcmsr%d: wait 'abort all outstanding command' timeout \n"
, acb->host->host_no);
+ return 0xff;
+ }
+ return 0x00;
}
-static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
+static uint8_t arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
{
struct MessageUnit_B *reg = acb->pmuB;
writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg);
- if (arcmsr_hbb_wait_msgint_ready(acb))
+ if (arcmsr_hbb_wait_msgint_ready(acb)) {
printk(KERN_NOTICE
"arcmsr%d: wait 'abort all outstanding command' timeout \n"
, acb->host->host_no);
+ return 0xff;
+ }
+ return 0x00;
}
-static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
+static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
{
+ uint8_t rtnval = 0;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
- arcmsr_abort_hba_allcmd(acb);
+ rtnval = arcmsr_abort_hba_allcmd(acb);
}
break;
case ACB_ADAPTER_TYPE_B: {
- arcmsr_abort_hbb_allcmd(acb);
+ rtnval = arcmsr_abort_hbb_allcmd(acb);
}
}
+ return rtnval;
}
static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
@@ -649,8 +737,7 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_A : {
struct MessageUnit_A __iomem *reg = acb->pmuA;
- orig_mask = readl(&reg->outbound_intmask)|\
- ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE;
+ orig_mask = readl(&reg->outbound_intmask);
writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
&reg->outbound_intmask);
}
@@ -658,8 +745,7 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_B : {
struct MessageUnit_B *reg = acb->pmuB;
- orig_mask = readl(reg->iop2drv_doorbell_mask_reg) & \
- (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
+ orig_mask = readl(reg->iop2drv_doorbell_mask_reg);
writel(0, reg->iop2drv_doorbell_mask_reg);
}
break;
@@ -795,12 +881,13 @@ static void arcmsr_remove(struct pci_dev *pdev)
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
int poll_count = 0;
-
arcmsr_free_sysfs_attr(acb);
scsi_remove_host(host);
+ flush_scheduled_work();
+ del_timer_sync(&acb->eternal_timer);
+ arcmsr_disable_outbound_ints(acb);
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
- arcmsr_disable_outbound_ints(acb);
acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
acb->acb_flags &= ~ACB_F_IOP_INITED;
@@ -841,7 +928,9 @@ static void arcmsr_shutdown(struct pci_dev *pdev)
struct Scsi_Host *host = pci_get_drvdata(pdev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *)host->hostdata;
-
+ del_timer_sync(&acb->eternal_timer);
+ arcmsr_disable_outbound_ints(acb);
+ flush_scheduled_work();
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
}
@@ -861,7 +950,7 @@ static void arcmsr_module_exit(void)
module_init(arcmsr_module_init);
module_exit(arcmsr_module_exit);
-static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \
+static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
u32 intmask_org)
{
u32 mask;
@@ -871,7 +960,8 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \
case ACB_ADAPTER_TYPE_A : {
struct MessageUnit_A __iomem *reg = acb->pmuA;
mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
- ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE);
+ ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|
+ ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
writel(mask, &reg->outbound_intmask);
acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
}
@@ -879,8 +969,10 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \
case ACB_ADAPTER_TYPE_B : {
struct MessageUnit_B *reg = acb->pmuB;
- mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | \
- ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE);
+ mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK |
+ ARCMSR_IOP2DRV_DATA_READ_OK |
+ ARCMSR_IOP2DRV_CDB_DONE |
+ ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
writel(mask, reg->iop2drv_doorbell_mask_reg);
acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
}
@@ -1048,8 +1140,8 @@ static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
}
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
- iounmap(reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL);
- iounmap(reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER);
+ iounmap((u8 *)reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL);
+ iounmap((u8 *)reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER);
dma_free_coherent(&acb->pdev->dev,
(ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 +
sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
@@ -1249,13 +1341,36 @@ static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
reg->doneq_index = index;
}
}
+/*
+**********************************************************************************
+** Handle a message interrupt
+**
+** The only message interrupt we expect is in response to a query for the current adapter config.
+** We want this in order to compare the drivemap so that we can detect newly-attached drives.
+**********************************************************************************
+*/
+static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_A *reg = acb->pmuA;
+
+ /*clear interrupt and message state*/
+ writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus);
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
+}
+static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_B *reg = acb->pmuB;
+ /*clear interrupt and message state*/
+ writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg);
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
+}
static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb)
{
uint32_t outbound_intstatus;
struct MessageUnit_A __iomem *reg = acb->pmuA;
- outbound_intstatus = readl(&reg->outbound_intstatus) & \
+ outbound_intstatus = readl(&reg->outbound_intstatus) &
acb->outbound_int_enable;
if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) {
return 1;
@@ -1267,6 +1382,10 @@ static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb)
if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
arcmsr_hba_postqueue_isr(acb);
}
+ if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
+ /* messenger of "driver to iop commands" */
+ arcmsr_hba_message_isr(acb);
+ }
return 0;
}
@@ -1275,13 +1394,14 @@ static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb)
uint32_t outbound_doorbell;
struct MessageUnit_B *reg = acb->pmuB;
- outbound_doorbell = readl(reg->iop2drv_doorbell_reg) & \
+ outbound_doorbell = readl(reg->iop2drv_doorbell_reg) &
acb->outbound_int_enable;
if (!outbound_doorbell)
return 1;
writel(~outbound_doorbell, reg->iop2drv_doorbell_reg);
- /*in case the last action of doorbell interrupt clearance is cached, this action can push HW to write down the clear bit*/
+ /*in case the last action of doorbell interrupt clearance is cached,
+ this action can push HW to write down the clear bit*/
readl(reg->iop2drv_doorbell_reg);
writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
@@ -1293,6 +1413,10 @@ static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb)
if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
arcmsr_hbb_postqueue_isr(acb);
}
+ if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
+ /* messenger of "driver to iop commands" */
+ arcmsr_hbb_message_isr(acb);
+ }
return 0;
}
@@ -1360,7 +1484,7 @@ void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb)
}
}
-static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
+static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
struct scsi_cmnd *cmd)
{
struct CMD_MESSAGE_FIELD *pcmdmessagefld;
@@ -1398,6 +1522,13 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
retvalue = ARCMSR_MESSAGE_FAIL;
goto message_out;
}
+
+ if (!acb->fw_state) {
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+ goto message_out;
+ }
+
ptmpQbuffer = ver_addr;
while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
&& (allxfer_len < 1031)) {
@@ -1444,6 +1575,12 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
retvalue = ARCMSR_MESSAGE_FAIL;
goto message_out;
}
+ if (!acb->fw_state) {
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+ goto message_out;
+ }
+
ptmpuserbuffer = ver_addr;
user_len = pcmdmessagefld->cmdmessage.Length;
memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
@@ -1496,6 +1633,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
uint8_t *pQbuffer = acb->rqbuffer;
+ if (!acb->fw_state) {
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+ goto message_out;
+ }
if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
@@ -1511,6 +1653,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
uint8_t *pQbuffer = acb->wqbuffer;
+ if (!acb->fw_state) {
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+ goto message_out;
+ }
if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
@@ -1529,6 +1676,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
uint8_t *pQbuffer;
+ if (!acb->fw_state) {
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+ goto message_out;
+ }
if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
@@ -1551,13 +1703,22 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
break;
case ARCMSR_MESSAGE_RETURN_CODE_3F: {
+ if (!acb->fw_state) {
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+ goto message_out;
+ }
pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
}
break;
case ARCMSR_MESSAGE_SAY_HELLO: {
int8_t *hello_string = "Hello! I am ARCMSR";
-
+ if (!acb->fw_state) {
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+ goto message_out;
+ }
memcpy(pcmdmessagefld->messagedatabuffer, hello_string
, (int16_t)strlen(hello_string));
pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
@@ -1565,10 +1726,20 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
break;
case ARCMSR_MESSAGE_SAY_GOODBYE:
+ if (!acb->fw_state) {
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+ goto message_out;
+ }
arcmsr_iop_parking(acb);
break;
case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
+ if (!acb->fw_state) {
+ pcmdmessagefld->cmdmessage.ReturnCode =
+ ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+ goto message_out;
+ }
arcmsr_flush_adapter_cache(acb);
break;
@@ -1651,16 +1822,57 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd,
struct CommandControlBlock *ccb;
int target = cmd->device->id;
int lun = cmd->device->lun;
-
+ uint8_t scsicmd = cmd->cmnd[0];
cmd->scsi_done = done;
cmd->host_scribble = NULL;
cmd->result = 0;
+
+ if ((scsicmd == SYNCHRONIZE_CACHE) || (scsicmd == SEND_DIAGNOSTIC)) {
+ if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
+ cmd->result = (DID_NO_CONNECT << 16);
+ }
+ cmd->scsi_done(cmd);
+ return 0;
+ }
+
if (acb->acb_flags & ACB_F_BUS_RESET) {
- printk(KERN_NOTICE "arcmsr%d: bus reset"
- " and return busy \n"
- , acb->host->host_no);
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A: {
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ uint32_t intmask_org, outbound_doorbell;
+
+ if ((readl(&reg->outbound_msgaddr1) &
+ ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
+ printk(KERN_NOTICE "arcmsr%d: bus reset and return busy\n",
+ acb->host->host_no);
return SCSI_MLQUEUE_HOST_BUSY;
}
+
+ acb->acb_flags &= ~ACB_F_FIRMWARE_TRAP;
+ printk(KERN_NOTICE "arcmsr%d: hardware bus reset and reset ok\n",
+ acb->host->host_no);
+ /* disable all outbound interrupt */
+ intmask_org = arcmsr_disable_outbound_ints(acb);
+ arcmsr_get_firmware_spec(acb, 1);
+ /*start background rebuild*/
+ arcmsr_start_adapter_bgrb(acb);
+ /* clear Qbuffer if door bell ringed */
+ outbound_doorbell = readl(&reg->outbound_doorbell);
+ /*clear interrupt */
+ writel(outbound_doorbell, &reg->outbound_doorbell);
+ writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
+ &reg->inbound_doorbell);
+ /* enable outbound Post Queue,outbound doorbell Interrupt */
+ arcmsr_enable_outbound_ints(acb, intmask_org);
+ acb->acb_flags |= ACB_F_IOP_INITED;
+ acb->acb_flags &= ~ACB_F_BUS_RESET;
+ }
+ break;
+ case ACB_ADAPTER_TYPE_B: {
+ }
+ }
+ }
+
if (target == 16) {
/* virtual device for iop message transfer */
arcmsr_handle_virtual_command(acb, cmd);
@@ -1699,21 +1911,25 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd,
return 0;
}
-static void arcmsr_get_hba_config(struct AdapterControlBlock *acb)
+static void *arcmsr_get_hba_config(struct AdapterControlBlock *acb, int mode)
{
struct MessageUnit_A __iomem *reg = acb->pmuA;
char *acb_firm_model = acb->firm_model;
char *acb_firm_version = acb->firm_version;
+ char *acb_device_map = acb->device_map;
char __iomem *iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]);
char __iomem *iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]);
+ char __iomem *iop_device_map = (char __iomem *) (&reg->message_rwbuffer[21]);
int count;
writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
if (arcmsr_hba_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
miscellaneous data' timeout \n", acb->host->host_no);
+ return NULL;
}
+ if (mode == 1) {
count = 8;
while (count) {
*acb_firm_model = readb(iop_firm_model);
@@ -1730,34 +1946,48 @@ static void arcmsr_get_hba_config(struct AdapterControlBlock *acb)
count--;
}
+ count = 16;
+ while (count) {
+ *acb_device_map = readb(iop_device_map);
+ acb_device_map++;
+ iop_device_map++;
+ count--;
+ }
+
printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n"
, acb->host->host_no
, acb->firm_version);
-
+ acb->signature = readl(&reg->message_rwbuffer[0]);
acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
}
-
-static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
+ return reg->message_rwbuffer;
+}
+static void __iomem *arcmsr_get_hbb_config(struct AdapterControlBlock *acb, int mode)
{
struct MessageUnit_B *reg = acb->pmuB;
uint32_t __iomem *lrwbuffer = reg->msgcode_rwbuffer_reg;
char *acb_firm_model = acb->firm_model;
char *acb_firm_version = acb->firm_version;
+ char *acb_device_map = acb->device_map;
char __iomem *iop_firm_model = (char __iomem *)(&lrwbuffer[15]);
/*firm_model,15,60-67*/
char __iomem *iop_firm_version = (char __iomem *)(&lrwbuffer[17]);
/*firm_version,17,68-83*/
+ char __iomem *iop_device_map = (char __iomem *) (&lrwbuffer[21]);
+ /*firm_version,21,84-99*/
int count;
writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg);
if (arcmsr_hbb_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
miscellaneous data' timeout \n", acb->host->host_no);
+ return NULL;
}
+ if (mode == 1) {
count = 8;
while (count)
{
@@ -1776,11 +2006,20 @@ static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
count--;
}
+ count = 16;
+ while (count) {
+ *acb_device_map = readb(iop_device_map);
+ acb_device_map++;
+ iop_device_map++;
+ count--;
+ }
+
printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n",
acb->host->host_no,
acb->firm_version);
- lrwbuffer++;
+ acb->signature = readl(lrwbuffer++);
+ /*firm_signature,1,00-03*/
acb->firm_request_len = readl(lrwbuffer++);
/*firm_request_len,1,04-07*/
acb->firm_numbers_queue = readl(lrwbuffer++);
@@ -1790,20 +2029,23 @@ static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
acb->firm_hd_channels = readl(lrwbuffer);
/*firm_ide_channels,4,16-19*/
}
-
-static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
+ return reg->msgcode_rwbuffer_reg;
+}
+static void *arcmsr_get_firmware_spec(struct AdapterControlBlock *acb, int mode)
{
+ void *rtnval = 0;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
- arcmsr_get_hba_config(acb);
+ rtnval = arcmsr_get_hba_config(acb, mode);
}
break;
case ACB_ADAPTER_TYPE_B: {
- arcmsr_get_hbb_config(acb);
+ rtnval = arcmsr_get_hbb_config(acb, mode);
}
break;
}
+ return rtnval;
}
static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
@@ -2043,6 +2285,66 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
}
}
+static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+
+ if (unlikely(atomic_read(&acb->rq_map_token) == 0)) {
+ acb->fw_state = false;
+ } else {
+ /*to prevent rq_map_token from changing by other interrupt, then
+ avoid the dead-lock*/
+ acb->fw_state = true;
+ atomic_dec(&acb->rq_map_token);
+ if (!(acb->fw_state) ||
+ (acb->ante_token_value == atomic_read(&acb->rq_map_token))) {
+ atomic_set(&acb->rq_map_token, 16);
+ }
+ acb->ante_token_value = atomic_read(&acb->rq_map_token);
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+ }
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6000));
+ return;
+}
+
+static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_B __iomem *reg = acb->pmuB;
+
+ if (unlikely(atomic_read(&acb->rq_map_token) == 0)) {
+ acb->fw_state = false;
+ } else {
+ /*to prevent rq_map_token from changing by other interrupt, then
+ avoid the dead-lock*/
+ acb->fw_state = true;
+ atomic_dec(&acb->rq_map_token);
+ if (!(acb->fw_state) ||
+ (acb->ante_token_value == atomic_read(&acb->rq_map_token))) {
+ atomic_set(&acb->rq_map_token, 16);
+ }
+ acb->ante_token_value = atomic_read(&acb->rq_map_token);
+ writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg);
+ }
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6000));
+ return;
+}
+
+static void arcmsr_request_device_map(unsigned long pacb)
+{
+ struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb;
+
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A: {
+ arcmsr_request_hba_device_map(acb);
+ }
+ break;
+ case ACB_ADAPTER_TYPE_B: {
+ arcmsr_request_hbb_device_map(acb);
+ }
+ break;
+ }
+}
+
static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
{
struct MessageUnit_A __iomem *reg = acb->pmuA;
@@ -2121,6 +2423,60 @@ static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
return;
}
+static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
+{
+ uint8_t value[64];
+ int i;
+
+ /* backup pci config data */
+ for (i = 0; i < 64; i++) {
+ pci_read_config_byte(acb->pdev, i, &value[i]);
+ }
+ /* hardware reset signal */
+ pci_write_config_byte(acb->pdev, 0x84, 0x20);
+ msleep(1000);
+ /* write back pci config data */
+ for (i = 0; i < 64; i++) {
+ pci_write_config_byte(acb->pdev, i, value[i]);
+ }
+ msleep(1000);
+ return;
+}
+/*
+****************************************************************************
+****************************************************************************
+*/
+#ifdef CONFIG_SCSI_ARCMSR_RESET
+ int arcmsr_sleep_for_bus_reset(struct scsi_cmnd *cmd)
+ {
+ struct Scsi_Host *shost = NULL;
+ spinlock_t *host_lock = NULL;
+ int i, isleep;
+
+ shost = cmd->device->host;
+ host_lock = shost->host_lock;
+
+ printk(KERN_NOTICE "Host %d bus reset over, sleep %d seconds (busy %d, can queue %d) ...........\n",
+ shost->host_no, sleeptime, shost->host_busy, shost->can_queue);
+ isleep = sleeptime / 10;
+ spin_unlock_irq(host_lock);
+ if (isleep > 0) {
+ for (i = 0; i < isleep; i++) {
+ msleep(10000);
+ printk(KERN_NOTICE "^%d^\n", i);
+ }
+ }
+
+ isleep = sleeptime % 10;
+ if (isleep > 0) {
+ msleep(isleep * 1000);
+ printk(KERN_NOTICE "^v^\n");
+ }
+ spin_lock_irq(host_lock);
+ printk(KERN_NOTICE "***** wake up *****\n");
+ return 0;
+ }
+#endif
static void arcmsr_iop_init(struct AdapterControlBlock *acb)
{
uint32_t intmask_org;
@@ -2129,7 +2485,7 @@ static void arcmsr_iop_init(struct AdapterControlBlock *acb)
intmask_org = arcmsr_disable_outbound_ints(acb);
arcmsr_wait_firmware_ready(acb);
arcmsr_iop_confirm(acb);
- arcmsr_get_firmware_spec(acb);
+ arcmsr_get_firmware_spec(acb, 1);
/*start background rebuild*/
arcmsr_start_adapter_bgrb(acb);
/* empty doorbell Qbuffer if door bell ringed */
@@ -2140,51 +2496,110 @@ static void arcmsr_iop_init(struct AdapterControlBlock *acb)
acb->acb_flags |= ACB_F_IOP_INITED;
}
-static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
+static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
{
struct CommandControlBlock *ccb;
uint32_t intmask_org;
+ uint8_t rtnval = 0x00;
int i = 0;
if (atomic_read(&acb->ccboutstandingcount) != 0) {
+ /* disable all outbound interrupt */
+ intmask_org = arcmsr_disable_outbound_ints(acb);
/* talk to iop 331 outstanding command aborted */
- arcmsr_abort_allcmd(acb);
-
+ rtnval = arcmsr_abort_allcmd(acb);
/* wait for 3 sec for all command aborted*/
ssleep(3);
-
- /* disable all outbound interrupt */
- intmask_org = arcmsr_disable_outbound_ints(acb);
/* clear all outbound posted Q */
arcmsr_done4abort_postqueue(acb);
for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
ccb = acb->pccb_pool[i];
if (ccb->startdone == ARCMSR_CCB_START) {
- ccb->startdone = ARCMSR_CCB_ABORTED;
arcmsr_ccb_complete(ccb, 1);
}
}
+ atomic_set(&acb->ccboutstandingcount, 0);
/* enable all outbound interrupt */
arcmsr_enable_outbound_ints(acb, intmask_org);
+ return rtnval;
}
+ return rtnval;
}
static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
{
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *)cmd->device->host->hostdata;
- int i;
+ int retry = 0;
- acb->num_resets++;
+ if (acb->acb_flags & ACB_F_BUS_RESET)
+ return SUCCESS;
+
+ printk(KERN_NOTICE "arcmsr%d: bus reset ..... \n", acb->adapter_index);
acb->acb_flags |= ACB_F_BUS_RESET;
- for (i = 0; i < 400; i++) {
- if (!atomic_read(&acb->ccboutstandingcount))
+ acb->num_resets++;
+ while (atomic_read(&acb->ccboutstandingcount) != 0 && retry < 4) {
+ arcmsr_interrupt(acb);
+ retry++;
+ }
+
+ if (arcmsr_iop_reset(acb)) {
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A: {
+ printk(KERN_NOTICE "arcmsr%d: do hardware bus reset, num_resets = %d num_aborts = %d \n",
+ acb->adapter_index, acb->num_resets, acb->num_aborts);
+ arcmsr_hardware_reset(acb);
+ acb->acb_flags |= ACB_F_FIRMWARE_TRAP;
+ acb->acb_flags &= ~ACB_F_IOP_INITED;
+ #ifdef CONFIG_SCSI_ARCMSR_RESET
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ uint32_t intmask_org, outbound_doorbell;
+ int retry_count = 0;
+sleep_again:
+ arcmsr_sleep_for_bus_reset(cmd);
+ if ((readl(&reg->outbound_msgaddr1) &
+ ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
+ printk(KERN_NOTICE "arcmsr%d: hardware bus reset and return busy, retry=%d \n",
+ acb->host->host_no, retry_count);
+ if (retry_count > retrycount) {
+ printk(KERN_NOTICE "arcmsr%d: hardware bus reset and return busy, retry aborted \n",
+ acb->host->host_no);
+ return SUCCESS;
+ }
+ retry_count++;
+ goto sleep_again;
+ }
+ acb->acb_flags &= ~ACB_F_FIRMWARE_TRAP;
+ acb->acb_flags |= ACB_F_IOP_INITED;
+ acb->acb_flags &= ~ACB_F_BUS_RESET;
+ printk(KERN_NOTICE "arcmsr%d: hardware bus reset and reset ok \n",
+ acb->host->host_no);
+ /* disable all outbound interrupt */
+ intmask_org = arcmsr_disable_outbound_ints(acb);
+ arcmsr_get_firmware_spec(acb, 1);
+ /*start background rebuild*/
+ arcmsr_start_adapter_bgrb(acb);
+ /* clear Qbuffer if door bell ringed */
+ outbound_doorbell = readl(&reg->outbound_doorbell);
+ writel(outbound_doorbell, &reg->outbound_doorbell); /*clear interrupt */
+ writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
+ /* enable outbound Post Queue,outbound doorbell Interrupt */
+ arcmsr_enable_outbound_ints(acb, intmask_org);
+ atomic_set(&acb->rq_map_token, 16);
+ init_timer(&acb->eternal_timer);
+ acb->eternal_timer.expires = jiffies + msecs_to_jiffies(20*HZ);
+ acb->eternal_timer.data = (unsigned long) acb;
+ acb->eternal_timer.function = &arcmsr_request_device_map;
+ add_timer(&acb->eternal_timer);
+ #endif
+ }
break;
- arcmsr_interrupt(acb);/* FIXME: need spinlock */
- msleep(25);
+ case ACB_ADAPTER_TYPE_B: {
}
- arcmsr_iop_reset(acb);
+ }
+ } else {
acb->acb_flags &= ~ACB_F_BUS_RESET;
+ }
return SUCCESS;
}
@@ -2277,98 +2692,3 @@ static const char *arcmsr_info(struct Scsi_Host *host)
ARCMSR_DRIVER_VERSION);
return buf;
}
-#ifdef CONFIG_SCSI_ARCMSR_AER
-static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev)
-{
- struct Scsi_Host *host = pci_get_drvdata(pdev);
- struct AdapterControlBlock *acb =
- (struct AdapterControlBlock *) host->hostdata;
- uint32_t intmask_org;
- int i, j;
-
- if (pci_enable_device(pdev)) {
- return PCI_ERS_RESULT_DISCONNECT;
- }
- pci_set_master(pdev);
- intmask_org = arcmsr_disable_outbound_ints(acb);
- acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
- ACB_F_MESSAGE_RQBUFFER_CLEARED |
- ACB_F_MESSAGE_WQBUFFER_READED);
- acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
- for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
- for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
- acb->devstate[i][j] = ARECA_RAID_GONE;
-
- arcmsr_wait_firmware_ready(acb);
- arcmsr_iop_confirm(acb);
- /* disable all outbound interrupt */
- arcmsr_get_firmware_spec(acb);
- /*start background rebuild*/
- arcmsr_start_adapter_bgrb(acb);
- /* empty doorbell Qbuffer if door bell ringed */
- arcmsr_clear_doorbell_queue_buffer(acb);
- arcmsr_enable_eoi_mode(acb);
- /* enable outbound Post Queue,outbound doorbell Interrupt */
- arcmsr_enable_outbound_ints(acb, intmask_org);
- acb->acb_flags |= ACB_F_IOP_INITED;
-
- pci_enable_pcie_error_reporting(pdev);
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-static void arcmsr_pci_ers_need_reset_forepart(struct pci_dev *pdev)
-{
- struct Scsi_Host *host = pci_get_drvdata(pdev);
- struct AdapterControlBlock *acb = (struct AdapterControlBlock *)host->hostdata;
- struct CommandControlBlock *ccb;
- uint32_t intmask_org;
- int i = 0;
-
- if (atomic_read(&acb->ccboutstandingcount) != 0) {
- /* talk to iop 331 outstanding command aborted */
- arcmsr_abort_allcmd(acb);
- /* wait for 3 sec for all command aborted*/
- ssleep(3);
- /* disable all outbound interrupt */
- intmask_org = arcmsr_disable_outbound_ints(acb);
- /* clear all outbound posted Q */
- arcmsr_done4abort_postqueue(acb);
- for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
- ccb = acb->pccb_pool[i];
- if (ccb->startdone == ARCMSR_CCB_START) {
- ccb->startdone = ARCMSR_CCB_ABORTED;
- arcmsr_ccb_complete(ccb, 1);
- }
- }
- /* enable all outbound interrupt */
- arcmsr_enable_outbound_ints(acb, intmask_org);
- }
- pci_disable_device(pdev);
-}
-
-static void arcmsr_pci_ers_disconnect_forepart(struct pci_dev *pdev)
-{
- struct Scsi_Host *host = pci_get_drvdata(pdev);
- struct AdapterControlBlock *acb = \
- (struct AdapterControlBlock *)host->hostdata;
-
- arcmsr_stop_adapter_bgrb(acb);
- arcmsr_flush_adapter_cache(acb);
-}
-
-static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- switch (state) {
- case pci_channel_io_frozen:
- arcmsr_pci_ers_need_reset_forepart(pdev);
- return PCI_ERS_RESULT_NEED_RESET;
- case pci_channel_io_perm_failure:
- arcmsr_pci_ers_disconnect_forepart(pdev);
- return PCI_ERS_RESULT_DISCONNECT;
- break;
- default:
- return PCI_ERS_RESULT_NEED_RESET;
- }
-}
-#endif
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index e641922f20b..350cbeaae16 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -167,10 +167,9 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
&nonemb_cmd.dma);
if (nonemb_cmd.va == NULL) {
SE_DEBUG(DBG_LVL_1,
- "Failed to allocate memory for"
- "mgmt_invalidate_icds \n");
+ "Failed to allocate memory for mgmt_invalidate_icds\n");
spin_unlock(&ctrl->mbox_lock);
- return -1;
+ return 0;
}
nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
req = nonemb_cmd.va;
diff --git a/drivers/scsi/bfa/bfa_cb_ioim_macros.h b/drivers/scsi/bfa/bfa_cb_ioim_macros.h
index 961fe439daa..53a616f5f50 100644
--- a/drivers/scsi/bfa/bfa_cb_ioim_macros.h
+++ b/drivers/scsi/bfa/bfa_cb_ioim_macros.h
@@ -117,35 +117,6 @@ bfa_cb_ioim_get_timeout(struct bfad_ioim_s *dio)
}
/**
- * Get SG element for the I/O request given the SG element index
- */
-static inline union bfi_addr_u
-bfa_cb_ioim_get_sgaddr(struct bfad_ioim_s *dio, int sgeid)
-{
- struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
- struct scatterlist *sge;
- u64 addr;
-
- sge = (struct scatterlist *)scsi_sglist(cmnd) + sgeid;
- addr = (u64) sg_dma_address(sge);
-
- return *((union bfi_addr_u *) &addr);
-}
-
-static inline u32
-bfa_cb_ioim_get_sglen(struct bfad_ioim_s *dio, int sgeid)
-{
- struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
- struct scatterlist *sge;
- u32 len;
-
- sge = (struct scatterlist *)scsi_sglist(cmnd) + sgeid;
- len = sg_dma_len(sge);
-
- return len;
-}
-
-/**
* Get Command Reference Number for the I/O request. 0 if none.
*/
static inline u8
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 0c08e185a76..3a7b3f88932 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -84,11 +84,32 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
for (i = 0; hal_mods[i]; i++)
hal_mods[i]->meminfo(cfg, &km_len, &dm_len);
+ dm_len += bfa_port_meminfo();
meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len;
meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
}
+static void
+bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
+{
+ struct bfa_port_s *port = &bfa->modules.port;
+ uint32_t dm_len;
+ uint8_t *dm_kva;
+ uint64_t dm_pa;
+
+ dm_len = bfa_port_meminfo();
+ dm_kva = bfa_meminfo_dma_virt(mi);
+ dm_pa = bfa_meminfo_dma_phys(mi);
+
+ memset(port, 0, sizeof(struct bfa_port_s));
+ bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod, bfa->logm);
+ bfa_port_mem_claim(port, dm_kva, dm_pa);
+
+ bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
+ bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
+}
+
/**
* Use this function to do attach the driver instance with the BFA
* library. This function will not trigger any HW initialization
@@ -140,6 +161,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
for (i = 0; hal_mods[i]; i++)
hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev);
+ bfa_com_port_attach(bfa, meminfo);
}
/**
diff --git a/drivers/scsi/bfa/bfa_ioim.c b/drivers/scsi/bfa/bfa_ioim.c
index 5b107abe46e..687f3d6e252 100644
--- a/drivers/scsi/bfa/bfa_ioim.c
+++ b/drivers/scsi/bfa/bfa_ioim.c
@@ -731,6 +731,9 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
static struct fcp_cmnd_s cmnd_z0 = { 0 };
struct bfi_sge_s *sge;
u32 pgdlen = 0;
+ u64 addr;
+ struct scatterlist *sg;
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
/**
* check for room in queue to send request now
@@ -754,8 +757,10 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
*/
sge = &m->sges[0];
if (ioim->nsges) {
- sge->sga = bfa_cb_ioim_get_sgaddr(ioim->dio, 0);
- pgdlen = bfa_cb_ioim_get_sglen(ioim->dio, 0);
+ sg = (struct scatterlist *)scsi_sglist(cmnd);
+ addr = bfa_os_sgaddr(sg_dma_address(sg));
+ sge->sga = *(union bfi_addr_u *) &addr;
+ pgdlen = sg_dma_len(sg);
sge->sg_len = pgdlen;
sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
@@ -868,10 +873,16 @@ bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
struct bfi_sge_s *sge;
struct bfa_sgpg_s *sgpg;
u32 pgcumsz;
+ u64 addr;
+ struct scatterlist *sg;
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
sgeid = BFI_SGE_INLINE;
ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q);
+ sg = scsi_sglist(cmnd);
+ sg = sg_next(sg);
+
do {
sge = sgpg->sgpg->sges;
nsges = ioim->nsges - sgeid;
@@ -879,9 +890,10 @@ bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
nsges = BFI_SGPG_DATA_SGES;
pgcumsz = 0;
- for (i = 0; i < nsges; i++, sge++, sgeid++) {
- sge->sga = bfa_cb_ioim_get_sgaddr(ioim->dio, sgeid);
- sge->sg_len = bfa_cb_ioim_get_sglen(ioim->dio, sgeid);
+ for (i = 0; i < nsges; i++, sge++, sgeid++, sg = sg_next(sg)) {
+ addr = bfa_os_sgaddr(sg_dma_address(sg));
+ sge->sga = *(union bfi_addr_u *) &addr;
+ sge->sg_len = sg_dma_len(sg);
pgcumsz += sge->sg_len;
/**
diff --git a/drivers/scsi/bfa/bfa_os_inc.h b/drivers/scsi/bfa/bfa_os_inc.h
index 10a89f75fa9..bd1cd3ee302 100644
--- a/drivers/scsi/bfa/bfa_os_inc.h
+++ b/drivers/scsi/bfa/bfa_os_inc.h
@@ -50,6 +50,10 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_transport.h>
+#ifdef __BIG_ENDIAN
+#define __BIGENDIAN
+#endif
+
#define BFA_ERR KERN_ERR
#define BFA_WARNING KERN_WARNING
#define BFA_NOTICE KERN_NOTICE
@@ -123,6 +127,15 @@ int bfa_os_MWB(void *);
(((_x) & 0x00ff0000) >> 8) | \
(((_x) & 0xff000000) >> 24))
+#define bfa_os_swap_sgaddr(_x) ((u64)( \
+ (((u64)(_x) & (u64)0x00000000000000ffull) << 32) | \
+ (((u64)(_x) & (u64)0x000000000000ff00ull) << 32) | \
+ (((u64)(_x) & (u64)0x0000000000ff0000ull) << 32) | \
+ (((u64)(_x) & (u64)0x00000000ff000000ull) << 32) | \
+ (((u64)(_x) & (u64)0x000000ff00000000ull) >> 32) | \
+ (((u64)(_x) & (u64)0x0000ff0000000000ull) >> 32) | \
+ (((u64)(_x) & (u64)0x00ff000000000000ull) >> 32) | \
+ (((u64)(_x) & (u64)0xff00000000000000ull) >> 32)))
#ifndef __BIGENDIAN
#define bfa_os_htons(_x) ((u16)((((_x) & 0xff00) >> 8) | \
@@ -133,6 +146,7 @@ int bfa_os_MWB(void *);
#define bfa_os_hton3b(_x) bfa_swap_3b(_x)
#define bfa_os_wtole(_x) (_x)
+#define bfa_os_sgaddr(_x) (_x)
#else
@@ -141,6 +155,7 @@ int bfa_os_MWB(void *);
#define bfa_os_hton3b(_x) (_x)
#define bfa_os_htonll(_x) (_x)
#define bfa_os_wtole(_x) bfa_os_swap32(_x)
+#define bfa_os_sgaddr(_x) bfa_os_swap_sgaddr(_x)
#endif
@@ -161,12 +176,12 @@ int bfa_os_MWB(void *);
#define bfa_os_addr_t char __iomem *
#define bfa_os_panic()
-#define bfa_os_reg_read(_raddr) bfa_os_wtole(readl(_raddr))
-#define bfa_os_reg_write(_raddr, _val) writel(bfa_os_wtole((_val)), (_raddr))
+#define bfa_os_reg_read(_raddr) readl(_raddr)
+#define bfa_os_reg_write(_raddr, _val) writel((_val), (_raddr))
#define bfa_os_mem_read(_raddr, _off) \
- bfa_os_ntohl(readl(((_raddr) + (_off))))
+ bfa_os_swap32(readl(((_raddr) + (_off))))
#define bfa_os_mem_write(_raddr, _off, _val) \
- writel(bfa_os_htonl((_val)), ((_raddr) + (_off)))
+ writel(bfa_os_swap32((_val)), ((_raddr) + (_off)))
#define BFA_TRC_TS(_trcm) \
({ \
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 13f5feb308c..d4fc4287ebd 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -33,7 +33,7 @@
#include <fcb/bfa_fcb.h>
BFA_TRC_FILE(LDRV, BFAD);
-static DEFINE_MUTEX(bfad_mutex);
+DEFINE_MUTEX(bfad_mutex);
LIST_HEAD(bfad_list);
static int bfad_inst;
int bfad_supported_fc4s;
@@ -299,8 +299,6 @@ bfa_fcb_vport_delete(struct bfad_vport_s *vport_drv)
complete(vport_drv->comp_del);
return;
}
-
- kfree(vport_drv);
}
/**
@@ -483,7 +481,7 @@ ext:
*/
bfa_status_t
bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
- struct bfa_port_cfg_s *port_cfg)
+ struct bfa_port_cfg_s *port_cfg, struct device *dev)
{
struct bfad_vport_s *vport;
int rc = BFA_STATUS_OK;
@@ -506,7 +504,8 @@ bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
goto ext_free_vport;
if (port_cfg->roles & BFA_PORT_ROLE_FCP_IM) {
- rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port);
+ rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port,
+ dev);
if (rc != BFA_STATUS_OK)
goto ext_free_fcs_vport;
}
@@ -591,7 +590,6 @@ bfad_init_timer(struct bfad_s *bfad)
int
bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
{
- unsigned long bar0_len;
int rc = -ENODEV;
if (pci_enable_device(pdev)) {
@@ -611,9 +609,7 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
goto out_release_region;
}
- bfad->pci_bar0_map = pci_resource_start(pdev, 0);
- bar0_len = pci_resource_len(pdev, 0);
- bfad->pci_bar0_kva = ioremap(bfad->pci_bar0_map, bar0_len);
+ bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
if (bfad->pci_bar0_kva == NULL) {
BFA_PRINTF(BFA_ERR, "Fail to map bar0\n");
@@ -646,11 +642,7 @@ out:
void
bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
{
-#if defined(__ia64__)
pci_iounmap(pdev, bfad->pci_bar0_kva);
-#else
- iounmap(bfad->pci_bar0_kva);
-#endif
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
@@ -848,7 +840,8 @@ bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role)
goto out;
}
- rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port);
+ rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port,
+ &bfad->pcidev->dev);
if (rc != BFA_STATUS_OK)
goto out;
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 6a2efdd5ef2..e477bfbfa7d 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -364,6 +364,152 @@ bfad_im_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
}
+static int
+bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
+{
+ char *vname = fc_vport->symbolic_name;
+ struct Scsi_Host *shost = fc_vport->shost;
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+ struct bfa_port_cfg_s port_cfg;
+ int status = 0, rc;
+ unsigned long flags;
+
+ memset(&port_cfg, 0, sizeof(port_cfg));
+
+ port_cfg.pwwn = wwn_to_u64((u8 *) &fc_vport->port_name);
+ port_cfg.nwwn = wwn_to_u64((u8 *) &fc_vport->node_name);
+
+ if (strlen(vname) > 0)
+ strcpy((char *)&port_cfg.sym_name, vname);
+
+ port_cfg.roles = BFA_PORT_ROLE_FCP_IM;
+ rc = bfad_vport_create(bfad, 0, &port_cfg, &fc_vport->dev);
+
+ if (rc == BFA_STATUS_OK) {
+ struct bfad_vport_s *vport;
+ struct bfa_fcs_vport_s *fcs_vport;
+ struct Scsi_Host *vshost;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0,
+ port_cfg.pwwn);
+ if (fcs_vport == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return VPCERR_BAD_WWN;
+ }
+
+ fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
+ if (disable) {
+ bfa_fcs_vport_stop(fcs_vport);
+ fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ vport = fcs_vport->vport_drv;
+ vshost = vport->drv_port.im_port->shost;
+ fc_host_node_name(vshost) = wwn_to_u64((u8 *) &port_cfg.nwwn);
+ fc_host_port_name(vshost) = wwn_to_u64((u8 *) &port_cfg.pwwn);
+ fc_vport->dd_data = vport;
+ vport->drv_port.im_port->fc_vport = fc_vport;
+
+ } else if (rc == BFA_STATUS_INVALID_WWN)
+ return VPCERR_BAD_WWN;
+ else if (rc == BFA_STATUS_VPORT_EXISTS)
+ return VPCERR_BAD_WWN;
+ else if (rc == BFA_STATUS_VPORT_MAX)
+ return VPCERR_NO_FABRIC_SUPP;
+ else if (rc == BFA_STATUS_VPORT_WWN_BP)
+ return VPCERR_BAD_WWN;
+ else
+ return FC_VPORT_FAILED;
+
+ return status;
+}
+
+static int
+bfad_im_vport_delete(struct fc_vport *fc_vport)
+{
+ struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data;
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) vport->drv_port.im_port;
+ struct bfad_s *bfad = im_port->bfad;
+ struct bfad_port_s *port;
+ struct bfa_fcs_vport_s *fcs_vport;
+ struct Scsi_Host *vshost;
+ wwn_t pwwn;
+ int rc;
+ unsigned long flags;
+ struct completion fcomp;
+
+ if (im_port->flags & BFAD_PORT_DELETE)
+ goto free_scsi_host;
+
+ port = im_port->port;
+
+ vshost = vport->drv_port.im_port->shost;
+ pwwn = wwn_to_u64((u8 *) &fc_host_port_name(vshost));
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (fcs_vport == NULL)
+ return VPCERR_BAD_WWN;
+
+ vport->drv_port.flags |= BFAD_PORT_DELETE;
+
+ vport->comp_del = &fcomp;
+ init_completion(vport->comp_del);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ rc = bfa_fcs_vport_delete(&vport->fcs_vport);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ wait_for_completion(vport->comp_del);
+
+free_scsi_host:
+ bfad_os_scsi_host_free(bfad, im_port);
+
+ kfree(vport);
+
+ return 0;
+}
+
+static int
+bfad_im_vport_disable(struct fc_vport *fc_vport, bool disable)
+{
+ struct bfad_vport_s *vport;
+ struct bfad_s *bfad;
+ struct bfa_fcs_vport_s *fcs_vport;
+ struct Scsi_Host *vshost;
+ wwn_t pwwn;
+ unsigned long flags;
+
+ vport = (struct bfad_vport_s *)fc_vport->dd_data;
+ bfad = vport->drv_port.bfad;
+ vshost = vport->drv_port.im_port->shost;
+ pwwn = wwn_to_u64((u8 *) &fc_vport->port_name);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (fcs_vport == NULL)
+ return VPCERR_BAD_WWN;
+
+ if (disable) {
+ bfa_fcs_vport_stop(fcs_vport);
+ fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
+ } else {
+ bfa_fcs_vport_start(fcs_vport);
+ fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
+ }
+
+ return 0;
+}
+
struct fc_function_template bfad_im_fc_function_template = {
/* Target dynamic attributes */
@@ -413,6 +559,61 @@ struct fc_function_template bfad_im_fc_function_template = {
.show_rport_dev_loss_tmo = 1,
.get_rport_dev_loss_tmo = bfad_im_get_rport_loss_tmo,
.set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
+
+ .vport_create = bfad_im_vport_create,
+ .vport_delete = bfad_im_vport_delete,
+ .vport_disable = bfad_im_vport_disable,
+};
+
+struct fc_function_template bfad_im_vport_fc_function_template = {
+
+ /* Target dynamic attributes */
+ .get_starget_port_id = bfad_im_get_starget_port_id,
+ .show_starget_port_id = 1,
+ .get_starget_node_name = bfad_im_get_starget_node_name,
+ .show_starget_node_name = 1,
+ .get_starget_port_name = bfad_im_get_starget_port_name,
+ .show_starget_port_name = 1,
+
+ /* Host dynamic attribute */
+ .get_host_port_id = bfad_im_get_host_port_id,
+ .show_host_port_id = 1,
+
+ /* Host fixed attributes */
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_supported_speeds = 1,
+ .show_host_maxframe_size = 1,
+
+ /* More host dynamic attributes */
+ .show_host_port_type = 1,
+ .get_host_port_type = bfad_im_get_host_port_type,
+ .show_host_port_state = 1,
+ .get_host_port_state = bfad_im_get_host_port_state,
+ .show_host_active_fc4s = 1,
+ .get_host_active_fc4s = bfad_im_get_host_active_fc4s,
+ .show_host_speed = 1,
+ .get_host_speed = bfad_im_get_host_speed,
+ .show_host_fabric_name = 1,
+ .get_host_fabric_name = bfad_im_get_host_fabric_name,
+
+ .show_host_symbolic_name = 1,
+
+ /* Statistics */
+ .get_fc_host_stats = bfad_im_get_stats,
+ .reset_fc_host_stats = bfad_im_reset_stats,
+
+ /* Allocation length for host specific data */
+ .dd_fcrport_size = sizeof(struct bfad_itnim_data_s *),
+
+ /* Remote port fixed attributes */
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+ .show_rport_dev_loss_tmo = 1,
+ .get_rport_dev_loss_tmo = bfad_im_get_rport_loss_tmo,
+ .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
};
/**
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 107848cd3b6..6c920c1b53a 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -162,7 +162,6 @@ struct bfad_s {
const char *pci_name;
struct bfa_pcidev_s hal_pcidev;
struct bfa_ioc_pci_attr_s pci_attr;
- unsigned long pci_bar0_map;
void __iomem *pci_bar0_kva;
struct completion comp;
struct completion suspend;
@@ -254,7 +253,7 @@ do { \
bfa_status_t bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
- struct bfa_port_cfg_s *port_cfg);
+ struct bfa_port_cfg_s *port_cfg, struct device *dev);
bfa_status_t bfad_vf_create(struct bfad_s *bfad, u16 vf_id,
struct bfa_port_cfg_s *port_cfg);
bfa_status_t bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role);
@@ -294,5 +293,6 @@ extern struct list_head bfad_list;
extern int bfa_lun_queue_depth;
extern int bfad_supported_fc4s;
extern int bfa_linkup_delay;
+extern struct mutex bfad_mutex;
#endif /* __BFAD_DRV_H__ */
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 78f42aa5736..5b7cf539e50 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -30,6 +30,7 @@ BFA_TRC_FILE(LDRV, IM);
DEFINE_IDR(bfad_im_port_index);
struct scsi_transport_template *bfad_im_scsi_transport_template;
+struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
static void bfad_im_itnim_work_handler(struct work_struct *work);
static int bfad_im_queuecommand(struct scsi_cmnd *cmnd,
void (*done)(struct scsi_cmnd *));
@@ -252,7 +253,6 @@ bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd,
struct bfa_itnim_s *bfa_itnim;
bfa_status_t rc = BFA_STATUS_OK;
- bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
if (!tskim) {
BFA_DEV_PRINTF(bfad, BFA_ERR,
@@ -513,11 +513,14 @@ void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim)
* Allocate a Scsi_Host for a port.
*/
int
-bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
+bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
+ struct device *dev)
{
int error = 1;
+ mutex_lock(&bfad_mutex);
if (!idr_pre_get(&bfad_im_port_index, GFP_KERNEL)) {
+ mutex_unlock(&bfad_mutex);
printk(KERN_WARNING "idr_pre_get failure\n");
goto out;
}
@@ -525,10 +528,13 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
error = idr_get_new(&bfad_im_port_index, im_port,
&im_port->idr_id);
if (error) {
+ mutex_unlock(&bfad_mutex);
printk(KERN_WARNING "idr_get_new failure\n");
goto out;
}
+ mutex_unlock(&bfad_mutex);
+
im_port->shost = bfad_os_scsi_host_alloc(im_port, bfad);
if (!im_port->shost) {
error = 1;
@@ -542,12 +548,15 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
im_port->shost->max_lun = MAX_FCP_LUN;
im_port->shost->max_cmd_len = 16;
im_port->shost->can_queue = bfad->cfg_data.ioc_queue_depth;
- im_port->shost->transportt = bfad_im_scsi_transport_template;
+ if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE)
+ im_port->shost->transportt = bfad_im_scsi_transport_template;
+ else
+ im_port->shost->transportt =
+ bfad_im_scsi_vport_transport_template;
- error = bfad_os_scsi_add_host(im_port->shost, im_port, bfad);
+ error = scsi_add_host(im_port->shost, dev);
if (error) {
- printk(KERN_WARNING "bfad_os_scsi_add_host failure %d\n",
- error);
+ printk(KERN_WARNING "scsi_add_host failure %d\n", error);
goto out_fc_rel;
}
@@ -559,7 +568,9 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
out_fc_rel:
scsi_host_put(im_port->shost);
out_free_idr:
+ mutex_lock(&bfad_mutex);
idr_remove(&bfad_im_port_index, im_port->idr_id);
+ mutex_unlock(&bfad_mutex);
out:
return error;
}
@@ -567,8 +578,6 @@ out:
void
bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
{
- unsigned long flags;
-
bfa_trc(bfad, bfad->inst_no);
bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_HOST_FREE,
im_port->shost->host_no);
@@ -578,9 +587,9 @@ bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
scsi_remove_host(im_port->shost);
scsi_host_put(im_port->shost);
- spin_lock_irqsave(&bfad->bfad_lock, flags);
+ mutex_lock(&bfad_mutex);
idr_remove(&bfad_im_port_index, im_port->idr_id);
- spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ mutex_unlock(&bfad_mutex);
}
static void
@@ -589,9 +598,11 @@ bfad_im_port_delete_handler(struct work_struct *work)
struct bfad_im_port_s *im_port =
container_of(work, struct bfad_im_port_s, port_delete_work);
- bfad_im_scsi_host_free(im_port->bfad, im_port);
- bfad_im_port_clean(im_port);
- kfree(im_port);
+ if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) {
+ im_port->flags |= BFAD_PORT_DELETE;
+ fc_vport_terminate(im_port->fc_vport);
+ }
+
}
bfa_status_t
@@ -690,23 +701,6 @@ bfad_im_probe_undo(struct bfad_s *bfad)
}
}
-
-
-
-int
-bfad_os_scsi_add_host(struct Scsi_Host *shost, struct bfad_im_port_s *im_port,
- struct bfad_s *bfad)
-{
- struct device *dev;
-
- if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE)
- dev = &bfad->pcidev->dev;
- else
- dev = &bfad->pport.im_port->shost->shost_gendev;
-
- return scsi_add_host(shost, dev);
-}
-
struct Scsi_Host *
bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
{
@@ -725,7 +719,8 @@ bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
void
bfad_os_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
{
- flush_workqueue(bfad->im->drv_workq);
+ if (!(im_port->flags & BFAD_PORT_DELETE))
+ flush_workqueue(bfad->im->drv_workq);
bfad_im_scsi_host_free(im_port->bfad, im_port);
bfad_im_port_clean(im_port);
kfree(im_port);
@@ -830,6 +825,13 @@ bfad_im_module_init(void)
if (!bfad_im_scsi_transport_template)
return BFA_STATUS_ENOMEM;
+ bfad_im_scsi_vport_transport_template =
+ fc_attach_transport(&bfad_im_vport_fc_function_template);
+ if (!bfad_im_scsi_vport_transport_template) {
+ fc_release_transport(bfad_im_scsi_transport_template);
+ return BFA_STATUS_ENOMEM;
+ }
+
return BFA_STATUS_OK;
}
@@ -838,6 +840,8 @@ bfad_im_module_exit(void)
{
if (bfad_im_scsi_transport_template)
fc_release_transport(bfad_im_scsi_transport_template);
+ if (bfad_im_scsi_vport_transport_template)
+ fc_release_transport(bfad_im_scsi_vport_transport_template);
}
void
@@ -938,6 +942,7 @@ bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
bfa_os_htonll((bfa_fcs_port_get_nwwn(port->fcs_port)));
fc_host_port_name(host) =
bfa_os_htonll((bfa_fcs_port_get_pwwn(port->fcs_port)));
+ fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa);
fc_host_supported_classes(host) = FC_COS_CLASS3;
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 85ab2da2132..973cab4d09c 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -34,7 +34,7 @@ void bfad_im_port_online(struct bfad_s *bfad, struct bfad_port_s *port);
void bfad_im_port_offline(struct bfad_s *bfad, struct bfad_port_s *port);
void bfad_im_port_clean(struct bfad_im_port_s *im_port);
int bfad_im_scsi_host_alloc(struct bfad_s *bfad,
- struct bfad_im_port_s *im_port);
+ struct bfad_im_port_s *im_port, struct device *dev);
void bfad_im_scsi_host_free(struct bfad_s *bfad,
struct bfad_im_port_s *im_port);
@@ -64,9 +64,11 @@ struct bfad_im_port_s {
struct work_struct port_delete_work;
int idr_id;
u16 cur_scsi_id;
+ u16 flags;
struct list_head binding_list;
struct Scsi_Host *shost;
struct list_head itnim_mapped_list;
+ struct fc_vport *fc_vport;
};
enum bfad_itnim_state {
@@ -140,6 +142,8 @@ void bfad_im_itnim_unmap(struct bfad_im_port_s *im_port,
extern struct scsi_host_template bfad_im_scsi_host_template;
extern struct scsi_host_template bfad_im_vport_template;
extern struct fc_function_template bfad_im_fc_function_template;
+extern struct fc_function_template bfad_im_vport_fc_function_template;
extern struct scsi_transport_template *bfad_im_scsi_transport_template;
+extern struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
#endif
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_cee.h b/drivers/scsi/bfa/include/defs/bfa_defs_cee.h
index b0ac9ac15c5..6eaf519eccd 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_cee.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_cee.h
@@ -50,7 +50,7 @@ struct bfa_cee_lldp_str_s {
};
-/* LLDP paramters */
+/* LLDP parameters */
struct bfa_cee_lldp_cfg_s {
struct bfa_cee_lldp_str_s chassis_id;
struct bfa_cee_lldp_str_s port_id;
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_status.h b/drivers/scsi/bfa/include/defs/bfa_defs_status.h
index 4374494bd56..ec78b4cb121 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_status.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_status.h
@@ -223,9 +223,9 @@ enum bfa_status {
BFA_STATUS_IM_PVID_NON_ZERO = 140, /* Port VLAN ID (PVID) is Set to
* Non-Zero Value */
BFA_STATUS_IM_INETCFG_LOCK_FAILED = 141, /* Acquiring Network
- * Subsytem Lock Failed.Please
+ * Subsystem Lock Failed.Please
* try after some time */
- BFA_STATUS_IM_GET_INETCFG_FAILED = 142, /* Acquiring Network Subsytem
+ BFA_STATUS_IM_GET_INETCFG_FAILED = 142, /* Acquiring Network Subsystem
* handle Failed. Please try
* after some time */
BFA_STATUS_IM_NOT_BOUND = 143, /* IM driver is not active */
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 18352ff8210..3a66ca24c7b 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -347,6 +347,7 @@ int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
login_wqe->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
login_wqe->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
+ login_wqe->flags = ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN;
login_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma;
login_wqe->resp_bd_list_addr_hi =
@@ -356,7 +357,6 @@ int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
(bnx2i_conn->gen_pdu.resp_buf_size <<
ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT));
login_wqe->resp_buffer = dword;
- login_wqe->flags = 0;
login_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma;
login_wqe->bd_list_addr_hi =
(u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32);
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 5d9296c599f..af6a00a600f 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -17,8 +17,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
static u32 adapter_count;
#define DRV_MODULE_NAME "bnx2i"
-#define DRV_MODULE_VERSION "2.1.0"
-#define DRV_MODULE_RELDATE "Dec 06, 2009"
+#define DRV_MODULE_VERSION "2.1.1"
+#define DRV_MODULE_RELDATE "Mar 24, 2010"
static char version[] __devinitdata =
"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@@ -26,7 +26,8 @@ static char version[] __devinitdata =
MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com>");
-MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 iSCSI Driver");
+MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711"
+ " iSCSI Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
@@ -289,6 +290,7 @@ static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic)
int rc;
mutex_lock(&bnx2i_dev_lock);
+ hba->cnic = cnic;
rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba);
if (!rc) {
hba->age++;
@@ -335,8 +337,7 @@ void bnx2i_ulp_init(struct cnic_dev *dev)
if (bnx2i_init_one(hba, dev)) {
printk(KERN_ERR "bnx2i - hba %p init failed\n", hba);
bnx2i_free_hba(hba);
- } else
- hba->cnic = dev;
+ }
}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_init.c b/drivers/scsi/cxgb3i/cxgb3i_init.c
index d0ab23a5835..685af369851 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_init.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_init.c
@@ -104,8 +104,10 @@ static int __init cxgb3i_init_module(void)
return err;
err = cxgb3i_pdu_init();
- if (err < 0)
+ if (err < 0) {
+ cxgb3i_iscsi_cleanup();
return err;
+ }
cxgb3_register_client(&t3c_client);
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index e8a0bc3efd4..6faf472f753 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -285,13 +285,11 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
switch (cmd) {
case MODE_SELECT:
len = sizeof(short_trespass);
- rq->cmd_flags |= REQ_RW;
rq->cmd[1] = 0x10;
rq->cmd[4] = len;
break;
case MODE_SELECT_10:
len = sizeof(long_trespass);
- rq->cmd_flags |= REQ_RW;
rq->cmd[1] = 0x10;
rq->cmd[8] = len;
break;
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 0435d044c9d..b0c576f84b2 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -114,12 +114,13 @@ static int hba_count = 0;
static struct class *adpt_sysfs_class;
+static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
#ifdef CONFIG_COMPAT
static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
#endif
static const struct file_operations adpt_fops = {
- .ioctl = adpt_ioctl,
+ .unlocked_ioctl = adpt_unlocked_ioctl,
.open = adpt_open,
.release = adpt_close,
#ifdef CONFIG_COMPAT
@@ -2069,8 +2070,7 @@ static int adpt_system_info(void __user *buffer)
return 0;
}
-static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
- ulong arg)
+static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
{
int minor;
int error = 0;
@@ -2153,6 +2153,20 @@ static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
return error;
}
+static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
+{
+ struct inode *inode;
+ long ret;
+
+ inode = file->f_dentry->d_inode;
+
+ lock_kernel();
+ ret = adpt_ioctl(inode, file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
#ifdef CONFIG_COMPAT
static long compat_adpt_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index f01b9b44e8a..44a07593de5 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -74,6 +74,7 @@ static int fcoe_rcv(struct sk_buff *, struct net_device *,
static int fcoe_percpu_receive_thread(void *);
static void fcoe_clean_pending_queue(struct fc_lport *);
static void fcoe_percpu_clean(struct fc_lport *);
+static int fcoe_link_speed_update(struct fc_lport *);
static int fcoe_link_ok(struct fc_lport *);
static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
@@ -146,6 +147,7 @@ static int fcoe_vport_destroy(struct fc_vport *);
static int fcoe_vport_create(struct fc_vport *, bool disabled);
static int fcoe_vport_disable(struct fc_vport *, bool disable);
static void fcoe_set_vport_symbolic_name(struct fc_vport *);
+static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
static struct libfc_function_template fcoe_libfc_fcn_templ = {
.frame_send = fcoe_xmit,
@@ -153,6 +155,7 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = {
.ddp_done = fcoe_ddp_done,
.elsct_send = fcoe_elsct_send,
.get_lesb = fcoe_get_lesb,
+ .lport_set_port_id = fcoe_set_port_id,
};
struct fc_function_template fcoe_transport_function = {
@@ -309,10 +312,10 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
* for multiple unicast MACs.
*/
memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
- dev_unicast_add(netdev, flogi_maddr);
+ dev_uc_add(netdev, flogi_maddr);
if (fip->spma)
- dev_unicast_add(netdev, fip->ctl_src_addr);
- dev_mc_add(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
+ dev_uc_add(netdev, fip->ctl_src_addr);
+ dev_mc_add(netdev, FIP_ALL_ENODE_MACS);
/*
* setup the receive function from ethernet driver
@@ -395,10 +398,10 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
/* Delete secondary MAC addresses */
memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
- dev_unicast_delete(netdev, flogi_maddr);
+ dev_uc_del(netdev, flogi_maddr);
if (fip->spma)
- dev_unicast_delete(netdev, fip->ctl_src_addr);
- dev_mc_delete(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
+ dev_uc_del(netdev, fip->ctl_src_addr);
+ dev_mc_del(netdev, FIP_ALL_ENODE_MACS);
/* Tell the LLD we are done w/ FCoE */
ops = netdev->netdev_ops;
@@ -491,9 +494,9 @@ static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr)
rtnl_lock();
if (!is_zero_ether_addr(port->data_src_addr))
- dev_unicast_delete(fcoe->netdev, port->data_src_addr);
+ dev_uc_del(fcoe->netdev, port->data_src_addr);
if (!is_zero_ether_addr(addr))
- dev_unicast_add(fcoe->netdev, addr);
+ dev_uc_add(fcoe->netdev, addr);
memcpy(port->data_src_addr, addr, ETH_ALEN);
rtnl_unlock();
}
@@ -629,6 +632,8 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
port->fcoe_pending_queue_active = 0;
setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lport);
+ fcoe_link_speed_update(lport);
+
if (!lport->vport) {
/*
* Use NAA 1&2 (FC-FS Rev. 2.0, Sec. 15) to generate WWNN/WWPN:
@@ -653,15 +658,13 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
/**
* fcoe_shost_config() - Set up the SCSI host associated with a local port
* @lport: The local port
- * @shost: The SCSI host to associate with the local port
* @dev: The device associated with the SCSI host
*
* Must be called after fcoe_lport_config() and fcoe_netdev_config()
*
* Returns: 0 for success
*/
-static int fcoe_shost_config(struct fc_lport *lport, struct Scsi_Host *shost,
- struct device *dev)
+static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
{
int rc = 0;
@@ -669,6 +672,8 @@ static int fcoe_shost_config(struct fc_lport *lport, struct Scsi_Host *shost,
lport->host->max_lun = FCOE_MAX_LUN;
lport->host->max_id = FCOE_MAX_FCP_TARGET;
lport->host->max_channel = 0;
+ lport->host->max_cmd_len = FCOE_MAX_CMD_LEN;
+
if (lport->vport)
lport->host->transportt = fcoe_vport_transport_template;
else
@@ -683,7 +688,7 @@ static int fcoe_shost_config(struct fc_lport *lport, struct Scsi_Host *shost,
}
if (!lport->vport)
- fc_host_max_npiv_vports(lport->host) = USHORT_MAX;
+ fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE,
"%s v%s over %s", FCOE_NAME, FCOE_VERSION,
@@ -796,6 +801,12 @@ skip_oem:
/**
* fcoe_if_destroy() - Tear down a SW FCoE instance
* @lport: The local port to be destroyed
+ *
+ * Locking: must be called with the RTNL mutex held and RTNL mutex
+ * needed to be dropped by this function since not dropping RTNL
+ * would cause circular locking warning on synchronous fip worker
+ * cancelling thru fcoe_interface_put invoked by this function.
+ *
*/
static void fcoe_if_destroy(struct fc_lport *lport)
{
@@ -818,9 +829,8 @@ static void fcoe_if_destroy(struct fc_lport *lport)
/* Free existing transmit skbs */
fcoe_clean_pending_queue(lport);
- rtnl_lock();
if (!is_zero_ether_addr(port->data_src_addr))
- dev_unicast_delete(netdev, port->data_src_addr);
+ dev_uc_del(netdev, port->data_src_addr);
rtnl_unlock();
/* receives may not be stopped until after this */
@@ -841,6 +851,7 @@ static void fcoe_if_destroy(struct fc_lport *lport)
/* Release the Scsi_Host */
scsi_host_put(lport->host);
+ module_put(THIS_MODULE);
}
/**
@@ -897,7 +908,6 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
struct net_device *netdev = fcoe->netdev;
struct fc_lport *lport = NULL;
struct fcoe_port *port;
- struct Scsi_Host *shost;
int rc;
/*
* parent is only a vport if npiv is 1,
@@ -919,7 +929,6 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
rc = -ENOMEM;
goto out;
}
- shost = lport->host;
port = lport_priv(lport);
port->lport = lport;
port->fcoe = fcoe;
@@ -934,7 +943,8 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
}
if (npiv) {
- FCOE_NETDEV_DBG(netdev, "Setting vport names, 0x%llX 0x%llX\n",
+ FCOE_NETDEV_DBG(netdev, "Setting vport names, "
+ "%16.16llx %16.16llx\n",
vport->node_name, vport->port_name);
fc_set_wwnn(lport, vport->node_name);
fc_set_wwpn(lport, vport->port_name);
@@ -949,7 +959,7 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
}
/* configure lport scsi host properties */
- rc = fcoe_shost_config(lport, shost, parent);
+ rc = fcoe_shost_config(lport, parent);
if (rc) {
FCOE_NETDEV_DBG(netdev, "Could not configure shost for the "
"interface\n");
@@ -1073,7 +1083,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu)
struct sk_buff *skb;
#ifdef CONFIG_SMP
struct fcoe_percpu_s *p0;
- unsigned targ_cpu = smp_processor_id();
+ unsigned targ_cpu = get_cpu();
#endif /* CONFIG_SMP */
FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
@@ -1129,6 +1139,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu)
kfree_skb(skb);
spin_unlock_bh(&p->fcoe_rx_list.lock);
}
+ put_cpu();
#else
/*
* This a non-SMP scenario where the singular Rx thread is
@@ -1297,8 +1308,8 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
return 0;
err:
- fc_lport_get_stats(lport)->ErrorFrames++;
-
+ per_cpu_ptr(lport->dev_stats, get_cpu())->ErrorFrames++;
+ put_cpu();
err2:
kfree_skb(skb);
return -1;
@@ -1444,7 +1455,7 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
return 0;
}
- if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
+ if (unlikely(fh->fh_type == FC_TYPE_ELS) &&
fcoe_ctlr_els_send(&fcoe->ctlr, lport, skb))
return 0;
@@ -1527,9 +1538,10 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
skb_shinfo(skb)->gso_size = 0;
}
/* update tx stats: regardless if LLD fails */
- stats = fc_lport_get_stats(lport);
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
stats->TxFrames++;
stats->TxWords += wlen;
+ put_cpu();
/* send down to lld */
fr_dev(fp) = lport;
@@ -1563,7 +1575,6 @@ static void fcoe_recv_frame(struct sk_buff *skb)
struct fc_frame_header *fh;
struct fcoe_crc_eof crc_eof;
struct fc_frame *fp;
- u8 *mac = NULL;
struct fcoe_port *port;
struct fcoe_hdr *hp;
@@ -1583,13 +1594,9 @@ static void fcoe_recv_frame(struct sk_buff *skb)
skb_end_pointer(skb), skb->csum,
skb->dev ? skb->dev->name : "<NULL>");
- /*
- * Save source MAC address before discarding header.
- */
port = lport_priv(lport);
if (skb_is_nonlinear(skb))
skb_linearize(skb); /* not ideal */
- mac = eth_hdr(skb)->h_source;
/*
* Frame length checks and setting up the header pointers
@@ -1598,7 +1605,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
hp = (struct fcoe_hdr *) skb_network_header(skb);
fh = (struct fc_frame_header *) skb_transport_header(skb);
- stats = fc_lport_get_stats(lport);
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
if (stats->ErrorFrames < 5)
printk(KERN_WARNING "fcoe: FCoE version "
@@ -1607,9 +1614,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
"initiator supports version "
"%x\n", FC_FCOE_DECAPS_VER(hp),
FC_FCOE_VER);
- stats->ErrorFrames++;
- kfree_skb(skb);
- return;
+ goto drop;
}
skb_pull(skb, sizeof(struct fcoe_hdr));
@@ -1624,16 +1629,12 @@ static void fcoe_recv_frame(struct sk_buff *skb)
fr_sof(fp) = hp->fcoe_sof;
/* Copy out the CRC and EOF trailer for access */
- if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
- kfree_skb(skb);
- return;
- }
+ if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof)))
+ goto drop;
fr_eof(fp) = crc_eof.fcoe_eof;
fr_crc(fp) = crc_eof.fcoe_crc32;
- if (pskb_trim(skb, fr_len)) {
- kfree_skb(skb);
- return;
- }
+ if (pskb_trim(skb, fr_len))
+ goto drop;
/*
* We only check CRC if no offload is available and if it is
@@ -1647,25 +1648,27 @@ static void fcoe_recv_frame(struct sk_buff *skb)
fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
fh = fc_frame_header_get(fp);
- if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
- fh->fh_type == FC_TYPE_FCP) {
- fc_exch_recv(lport, fp);
- return;
- }
- if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
+ if ((fh->fh_r_ctl != FC_RCTL_DD_SOL_DATA ||
+ fh->fh_type != FC_TYPE_FCP) &&
+ (fr_flags(fp) & FCPHF_CRC_UNCHECKED)) {
if (le32_to_cpu(fr_crc(fp)) !=
~crc32(~0, skb->data, fr_len)) {
if (stats->InvalidCRCCount < 5)
printk(KERN_WARNING "fcoe: dropping "
"frame with CRC error\n");
stats->InvalidCRCCount++;
- stats->ErrorFrames++;
- fc_frame_free(fp);
- return;
+ goto drop;
}
fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
}
+ put_cpu();
fc_exch_recv(lport, fp);
+ return;
+
+drop:
+ stats->ErrorFrames++;
+ put_cpu();
+ kfree_skb(skb);
}
/**
@@ -1835,11 +1838,15 @@ static int fcoe_device_notification(struct notifier_block *notifier,
FCOE_NETDEV_DBG(netdev, "Unknown event %ld "
"from netdev netlink\n", event);
}
+
+ fcoe_link_speed_update(lport);
+
if (link_possible && !fcoe_link_ok(lport))
fcoe_ctlr_link_up(&fcoe->ctlr);
else if (fcoe_ctlr_link_down(&fcoe->ctlr)) {
- stats = fc_lport_get_stats(lport);
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
stats->LinkFailureCount++;
+ put_cpu();
fcoe_clean_pending_queue(lport);
}
out:
@@ -1901,13 +1908,19 @@ static int fcoe_disable(const char *buffer, struct kernel_param *kp)
goto out_nodev;
}
- rtnl_lock();
+ if (!rtnl_trylock()) {
+ dev_put(netdev);
+ mutex_unlock(&fcoe_config_mutex);
+ return restart_syscall();
+ }
+
fcoe = fcoe_hostlist_lookup_port(netdev);
rtnl_unlock();
- if (fcoe)
+ if (fcoe) {
fc_fabric_logoff(fcoe->ctlr.lp);
- else
+ fcoe_ctlr_link_down(&fcoe->ctlr);
+ } else
rc = -ENODEV;
dev_put(netdev);
@@ -1950,13 +1963,20 @@ static int fcoe_enable(const char *buffer, struct kernel_param *kp)
goto out_nodev;
}
- rtnl_lock();
+ if (!rtnl_trylock()) {
+ dev_put(netdev);
+ mutex_unlock(&fcoe_config_mutex);
+ return restart_syscall();
+ }
+
fcoe = fcoe_hostlist_lookup_port(netdev);
rtnl_unlock();
- if (fcoe)
+ if (fcoe) {
+ if (!fcoe_link_ok(fcoe->ctlr.lp))
+ fcoe_ctlr_link_up(&fcoe->ctlr);
rc = fc_fabric_login(fcoe->ctlr.lp);
- else
+ } else
rc = -ENODEV;
dev_put(netdev);
@@ -1999,7 +2019,12 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
goto out_nodev;
}
- rtnl_lock();
+ if (!rtnl_trylock()) {
+ dev_put(netdev);
+ mutex_unlock(&fcoe_config_mutex);
+ return restart_syscall();
+ }
+
fcoe = fcoe_hostlist_lookup_port(netdev);
if (!fcoe) {
rtnl_unlock();
@@ -2008,9 +2033,8 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
}
list_del(&fcoe->list);
fcoe_interface_cleanup(fcoe);
- rtnl_unlock();
+ /* RTNL mutex is dropped by fcoe_if_destroy */
fcoe_if_destroy(fcoe->ctlr.lp);
- module_put(THIS_MODULE);
out_putdev:
dev_put(netdev);
@@ -2029,6 +2053,8 @@ static void fcoe_destroy_work(struct work_struct *work)
port = container_of(work, struct fcoe_port, destroy_work);
mutex_lock(&fcoe_config_mutex);
+ rtnl_lock();
+ /* RTNL mutex is dropped by fcoe_if_destroy */
fcoe_if_destroy(port->lport);
mutex_unlock(&fcoe_config_mutex);
}
@@ -2050,6 +2076,12 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
struct net_device *netdev;
mutex_lock(&fcoe_config_mutex);
+
+ if (!rtnl_trylock()) {
+ mutex_unlock(&fcoe_config_mutex);
+ return restart_syscall();
+ }
+
#ifdef CONFIG_FCOE_MODULE
/*
* Make sure the module has been initialized, and is not about to be
@@ -2058,7 +2090,7 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
*/
if (THIS_MODULE->state != MODULE_STATE_LIVE) {
rc = -ENODEV;
- goto out_nodev;
+ goto out_nomod;
}
#endif
@@ -2067,7 +2099,6 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
goto out_nomod;
}
- rtnl_lock();
netdev = fcoe_if_to_netdev(buffer);
if (!netdev) {
rc = -ENODEV;
@@ -2122,35 +2153,27 @@ out_free:
out_putdev:
dev_put(netdev);
out_nodev:
- rtnl_unlock();
module_put(THIS_MODULE);
out_nomod:
+ rtnl_unlock();
mutex_unlock(&fcoe_config_mutex);
return rc;
}
/**
- * fcoe_link_ok() - Check if the link is OK for a local port
- * @lport: The local port to check link on
- *
- * Any permanently-disqualifying conditions have been previously checked.
- * This also updates the speed setting, which may change with link for 100/1000.
- *
- * This function should probably be checking for PAUSE support at some point
- * in the future. Currently Per-priority-pause is not determinable using
- * ethtool, so we shouldn't be restrictive until that problem is resolved.
- *
- * Returns: 0 if link is OK for use by FCoE.
+ * fcoe_link_speed_update() - Update the supported and actual link speeds
+ * @lport: The local port to update speeds for
*
+ * Returns: 0 if the ethtool query was successful
+ * -1 if the ethtool query failed
*/
-int fcoe_link_ok(struct fc_lport *lport)
+int fcoe_link_speed_update(struct fc_lport *lport)
{
struct fcoe_port *port = lport_priv(lport);
struct net_device *netdev = port->fcoe->netdev;
struct ethtool_cmd ecmd = { ETHTOOL_GSET };
- if ((netdev->flags & IFF_UP) && netif_carrier_ok(netdev) &&
- (!dev_ethtool_get_settings(netdev, &ecmd))) {
+ if (!dev_ethtool_get_settings(netdev, &ecmd)) {
lport->link_supported_speeds &=
~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
if (ecmd.supported & (SUPPORTED_1000baseT_Half |
@@ -2170,6 +2193,23 @@ int fcoe_link_ok(struct fc_lport *lport)
}
/**
+ * fcoe_link_ok() - Check if the link is OK for a local port
+ * @lport: The local port to check link on
+ *
+ * Returns: 0 if link is UP and OK, -1 if not
+ *
+ */
+int fcoe_link_ok(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct net_device *netdev = port->fcoe->netdev;
+
+ if (netif_oper_up(netdev))
+ return 0;
+ return -1;
+}
+
+/**
* fcoe_percpu_clean() - Clear all pending skbs for an local port
* @lport: The local port whose skbs are to be cleared
*
@@ -2631,3 +2671,25 @@ static void fcoe_get_lesb(struct fc_lport *lport,
lesb->lesb_miss_fka = htonl(mdac);
lesb->lesb_fcs_error = htonl(dev_get_stats(netdev)->rx_crc_errors);
}
+
+/**
+ * fcoe_set_port_id() - Callback from libfc when Port_ID is set.
+ * @lport: the local port
+ * @port_id: the port ID
+ * @fp: the received frame, if any, that caused the port_id to be set.
+ *
+ * This routine handles the case where we received a FLOGI and are
+ * entering point-to-point mode. We need to call fcoe_ctlr_recv_flogi()
+ * so it can set the non-mapped mode and gateway address.
+ *
+ * The FLOGI LS_ACC is handled by fcoe_flogi_resp().
+ */
+static void fcoe_set_port_id(struct fc_lport *lport,
+ u32 port_id, struct fc_frame *fp)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct fcoe_interface *fcoe = port->fcoe;
+
+ if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
+ fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
+}
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index 3440da48d16..50aaa4bcfc5 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -51,7 +51,7 @@ MODULE_LICENSE("GPL v2");
#define FCOE_CTLR_DEF_FKA FIP_DEF_FKA /* default keep alive (mS) */
static void fcoe_ctlr_timeout(unsigned long);
-static void fcoe_ctlr_link_work(struct work_struct *);
+static void fcoe_ctlr_timer_work(struct work_struct *);
static void fcoe_ctlr_recv_work(struct work_struct *);
static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
@@ -116,7 +116,7 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip)
spin_lock_init(&fip->lock);
fip->flogi_oxid = FC_XID_UNKNOWN;
setup_timer(&fip->timer, fcoe_ctlr_timeout, (unsigned long)fip);
- INIT_WORK(&fip->link_work, fcoe_ctlr_link_work);
+ INIT_WORK(&fip->timer_work, fcoe_ctlr_timer_work);
INIT_WORK(&fip->recv_work, fcoe_ctlr_recv_work);
skb_queue_head_init(&fip->fip_recv_list);
}
@@ -164,7 +164,7 @@ void fcoe_ctlr_destroy(struct fcoe_ctlr *fip)
fcoe_ctlr_reset_fcfs(fip);
spin_unlock_bh(&fip->lock);
del_timer_sync(&fip->timer);
- cancel_work_sync(&fip->link_work);
+ cancel_work_sync(&fip->timer_work);
}
EXPORT_SYMBOL(fcoe_ctlr_destroy);
@@ -257,14 +257,10 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip)
{
spin_lock_bh(&fip->lock);
if (fip->state == FIP_ST_NON_FIP || fip->state == FIP_ST_AUTO) {
- fip->last_link = 1;
- fip->link = 1;
spin_unlock_bh(&fip->lock);
fc_linkup(fip->lp);
} else if (fip->state == FIP_ST_LINK_WAIT) {
fip->state = fip->mode;
- fip->last_link = 1;
- fip->link = 1;
spin_unlock_bh(&fip->lock);
if (fip->state == FIP_ST_AUTO)
LIBFCOE_FIP_DBG(fip, "%s", "setting AUTO mode.\n");
@@ -306,9 +302,7 @@ int fcoe_ctlr_link_down(struct fcoe_ctlr *fip)
LIBFCOE_FIP_DBG(fip, "link down.\n");
spin_lock_bh(&fip->lock);
fcoe_ctlr_reset(fip);
- link_dropped = fip->link;
- fip->link = 0;
- fip->last_link = 0;
+ link_dropped = fip->state != FIP_ST_LINK_WAIT;
fip->state = FIP_ST_LINK_WAIT;
spin_unlock_bh(&fip->lock);
@@ -349,7 +343,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
fcf = fip->sel_fcf;
lp = fip->lp;
- if (!fcf || !fc_host_port_id(lp->host))
+ if (!fcf || !lp->port_id)
return;
len = sizeof(*kal) + ports * sizeof(*vn);
@@ -380,8 +374,8 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
vn->fd_desc.fip_dtype = FIP_DT_VN_ID;
vn->fd_desc.fip_dlen = sizeof(*vn) / FIP_BPW;
memcpy(vn->fd_mac, fip->get_src_addr(lport), ETH_ALEN);
- hton24(vn->fd_fc_id, fc_host_port_id(lp->host));
- put_unaligned_be64(lp->wwpn, &vn->fd_wwpn);
+ hton24(vn->fd_fc_id, lport->port_id);
+ put_unaligned_be64(lport->wwpn, &vn->fd_wwpn);
}
skb_put(skb, len);
skb->protocol = htons(ETH_P_FIP);
@@ -445,13 +439,18 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
cap->encaps.fd_desc.fip_dlen = dlen / FIP_BPW;
mac = (struct fip_mac_desc *)skb_put(skb, sizeof(*mac));
- memset(mac, 0, sizeof(mac));
+ memset(mac, 0, sizeof(*mac));
mac->fd_desc.fip_dtype = FIP_DT_MAC;
mac->fd_desc.fip_dlen = sizeof(*mac) / FIP_BPW;
- if (dtype != FIP_DT_FLOGI && dtype != FIP_DT_FDISC)
+ if (dtype != FIP_DT_FLOGI && dtype != FIP_DT_FDISC) {
memcpy(mac->fd_mac, fip->get_src_addr(lport), ETH_ALEN);
- else if (fip->spma)
+ } else if (fip_flags & FIP_FL_SPMA) {
+ LIBFCOE_FIP_DBG(fip, "FLOGI/FDISC sent with SPMA\n");
memcpy(mac->fd_mac, fip->ctl_src_addr, ETH_ALEN);
+ } else {
+ LIBFCOE_FIP_DBG(fip, "FLOGI/FDISC sent with FPMA\n");
+ /* FPMA only FLOGI must leave the MAC desc set to all 0s */
+ }
skb->protocol = htons(ETH_P_FIP);
skb_reset_mac_header(skb);
@@ -556,7 +555,7 @@ EXPORT_SYMBOL(fcoe_ctlr_els_send);
* fcoe_ctlr_age_fcfs() - Reset and free all old FCFs for a controller
* @fip: The FCoE controller to free FCFs on
*
- * Called with lock held.
+ * Called with lock held and preemption disabled.
*
* An FCF is considered old if we have missed three advertisements.
* That is, there have been no valid advertisement from it for three
@@ -573,17 +572,20 @@ static void fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
struct fcoe_fcf *next;
unsigned long sel_time = 0;
unsigned long mda_time = 0;
+ struct fcoe_dev_stats *stats;
list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
mda_time = fcf->fka_period + (fcf->fka_period >> 1);
if ((fip->sel_fcf == fcf) &&
(time_after(jiffies, fcf->time + mda_time))) {
mod_timer(&fip->timer, jiffies + mda_time);
- fc_lport_get_stats(fip->lp)->MissDiscAdvCount++;
+ stats = per_cpu_ptr(fip->lp->dev_stats,
+ smp_processor_id());
+ stats->MissDiscAdvCount++;
printk(KERN_INFO "libfcoe: host%d: Missing Discovery "
- "Advertisement for fab %llx count %lld\n",
+ "Advertisement for fab %16.16llx count %lld\n",
fip->lp->host->host_no, fcf->fabric_name,
- fc_lport_get_stats(fip->lp)->MissDiscAdvCount);
+ stats->MissDiscAdvCount);
}
if (time_after(jiffies, fcf->time + fcf->fka_period * 3 +
msecs_to_jiffies(FIP_FCF_FUZZ * 3))) {
@@ -593,7 +595,9 @@ static void fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
WARN_ON(!fip->fcf_count);
fip->fcf_count--;
kfree(fcf);
- fc_lport_get_stats(fip->lp)->VLinkFailureCount++;
+ stats = per_cpu_ptr(fip->lp->dev_stats,
+ smp_processor_id());
+ stats->VLinkFailureCount++;
} else if (fcoe_ctlr_mtu_valid(fcf) &&
(!sel_time || time_before(sel_time, fcf->time))) {
sel_time = fcf->time;
@@ -776,7 +780,8 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
mtu_valid = fcoe_ctlr_mtu_valid(fcf);
fcf->time = jiffies;
if (!found) {
- LIBFCOE_FIP_DBG(fip, "New FCF for fab %llx map %x val %d\n",
+ LIBFCOE_FIP_DBG(fip, "New FCF for fab %16.16llx "
+ "map %x val %d\n",
fcf->fabric_name, fcf->fc_map, mtu_valid);
}
@@ -906,9 +911,10 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
fr_eof(fp) = FC_EOF_T;
fr_dev(fp) = lport;
- stats = fc_lport_get_stats(lport);
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
stats->RxFrames++;
stats->RxWords += skb->len / FIP_BPW;
+ put_cpu();
fc_exch_recv(lport, fp);
return;
@@ -942,9 +948,8 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
u32 desc_mask;
LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
- if (!fcf)
- return;
- if (!fcf || !fc_host_port_id(lport->host))
+
+ if (!fcf || !lport->port_id)
return;
/*
@@ -982,8 +987,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
if (compare_ether_addr(vp->fd_mac,
fip->get_src_addr(lport)) == 0 &&
get_unaligned_be64(&vp->fd_wwpn) == lport->wwpn &&
- ntoh24(vp->fd_fc_id) ==
- fc_host_port_id(lport->host))
+ ntoh24(vp->fd_fc_id) == lport->port_id)
desc_mask &= ~BIT(FIP_DT_VN_ID);
break;
default:
@@ -1006,7 +1010,8 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n");
spin_lock_bh(&fip->lock);
- fc_lport_get_stats(lport)->VLinkFailureCount++;
+ per_cpu_ptr(lport->dev_stats,
+ smp_processor_id())->VLinkFailureCount++;
fcoe_ctlr_reset(fip);
spin_unlock_bh(&fip->lock);
@@ -1102,15 +1107,17 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip)
struct fcoe_fcf *best = NULL;
list_for_each_entry(fcf, &fip->fcfs, list) {
- LIBFCOE_FIP_DBG(fip, "consider FCF for fab %llx VFID %d map %x "
- "val %d\n", fcf->fabric_name, fcf->vfid,
+ LIBFCOE_FIP_DBG(fip, "consider FCF for fab %16.16llx "
+ "VFID %d map %x val %d\n",
+ fcf->fabric_name, fcf->vfid,
fcf->fc_map, fcoe_ctlr_mtu_valid(fcf));
if (!fcoe_ctlr_fcf_usable(fcf)) {
- LIBFCOE_FIP_DBG(fip, "FCF for fab %llx map %x %svalid "
- "%savailable\n", fcf->fabric_name,
- fcf->fc_map, (fcf->flags & FIP_FL_SOL)
- ? "" : "in", (fcf->flags & FIP_FL_AVAIL)
- ? "" : "un");
+ LIBFCOE_FIP_DBG(fip, "FCF for fab %16.16llx "
+ "map %x %svalid %savailable\n",
+ fcf->fabric_name, fcf->fc_map,
+ (fcf->flags & FIP_FL_SOL) ? "" : "in",
+ (fcf->flags & FIP_FL_AVAIL) ?
+ "" : "un");
continue;
}
if (!best) {
@@ -1175,7 +1182,7 @@ static void fcoe_ctlr_timeout(unsigned long arg)
"Starting FCF discovery.\n",
fip->lp->host->host_no);
fip->reset_req = 1;
- schedule_work(&fip->link_work);
+ schedule_work(&fip->timer_work);
}
}
@@ -1201,43 +1208,31 @@ static void fcoe_ctlr_timeout(unsigned long arg)
mod_timer(&fip->timer, next_timer);
}
if (fip->send_ctlr_ka || fip->send_port_ka)
- schedule_work(&fip->link_work);
+ schedule_work(&fip->timer_work);
spin_unlock_bh(&fip->lock);
}
/**
- * fcoe_ctlr_link_work() - Worker thread function for link changes
+ * fcoe_ctlr_timer_work() - Worker thread function for timer work
* @work: Handle to a FCoE controller
*
- * See if the link status has changed and if so, report it.
- *
- * This is here because fc_linkup() and fc_linkdown() must not
+ * Sends keep-alives and resets which must not
* be called from the timer directly, since they use a mutex.
*/
-static void fcoe_ctlr_link_work(struct work_struct *work)
+static void fcoe_ctlr_timer_work(struct work_struct *work)
{
struct fcoe_ctlr *fip;
struct fc_lport *vport;
u8 *mac;
- int link;
- int last_link;
int reset;
- fip = container_of(work, struct fcoe_ctlr, link_work);
+ fip = container_of(work, struct fcoe_ctlr, timer_work);
spin_lock_bh(&fip->lock);
- last_link = fip->last_link;
- link = fip->link;
- fip->last_link = link;
reset = fip->reset_req;
fip->reset_req = 0;
spin_unlock_bh(&fip->lock);
- if (last_link != link) {
- if (link)
- fc_linkup(fip->lp);
- else
- fc_linkdown(fip->lp);
- } else if (reset && link)
+ if (reset)
fc_lport_reset(fip->lp);
if (fip->send_ctlr_ka) {
@@ -1334,9 +1329,9 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport,
if (fip->state == FIP_ST_AUTO || fip->state == FIP_ST_NON_FIP) {
memcpy(fip->dest_addr, sa, ETH_ALEN);
fip->map_dest = 0;
- if (fip->state == FIP_ST_NON_FIP)
- LIBFCOE_FIP_DBG(fip, "received FLOGI REQ, "
- "using non-FIP mode\n");
+ if (fip->state == FIP_ST_AUTO)
+ LIBFCOE_FIP_DBG(fip, "received non-FIP FLOGI. "
+ "Setting non-FIP mode\n");
fip->state = FIP_ST_NON_FIP;
}
spin_unlock_bh(&fip->lock);
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 3966c71d009..19338e0ba2c 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -36,7 +36,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.4.0.98"
+#define DRV_VERSION "1.4.0.145"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
@@ -45,7 +45,7 @@
#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */
#define FNIC_DFLT_QUEUE_DEPTH 32
#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */
-#define FNIC_MAX_CMD_LEN 16 /* Supported CDB length */
+
/*
* Tag bits used for special requests.
*/
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 5259888fbfb..2b48d79bad9 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -617,7 +617,7 @@ void fnic_flush_tx(struct fnic *fnic)
struct sk_buff *skb;
struct fc_frame *fp;
- while ((skb = skb_dequeue(&fnic->frame_queue))) {
+ while ((skb = skb_dequeue(&fnic->tx_queue))) {
fp = (struct fc_frame *)skb;
fnic_send_frame(fnic, fp);
}
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 97b212570bc..265e73d9cd6 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -556,7 +556,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
}
host->max_lun = fnic->config.luns_per_tgt;
host->max_id = FNIC_MAX_FCP_TARGET;
- host->max_cmd_len = FNIC_MAX_CMD_LEN;
+ host->max_cmd_len = FCOE_MAX_CMD_LEN;
fnic_get_res_counts(fnic);
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 35a4b3073ec..f672d6213ee 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -180,8 +180,8 @@ static const char *gdth_ctr_name(gdth_ha_str *ha);
static int gdth_open(struct inode *inode, struct file *filep);
static int gdth_close(struct inode *inode, struct file *filep);
-static int gdth_ioctl(struct inode *inode, struct file *filep,
- unsigned int cmd, unsigned long arg);
+static long gdth_unlocked_ioctl(struct file *filep, unsigned int cmd,
+ unsigned long arg);
static void gdth_flush(gdth_ha_str *ha);
static int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *));
@@ -369,7 +369,7 @@ MODULE_LICENSE("GPL");
/* ioctl interface */
static const struct file_operations gdth_fops = {
- .ioctl = gdth_ioctl,
+ .unlocked_ioctl = gdth_unlocked_ioctl,
.open = gdth_open,
.release = gdth_close,
};
@@ -3842,7 +3842,7 @@ int __init option_setup(char *str)
TRACE2(("option_setup() str %s\n", str ? str:"NULL"));
- while (cur && isdigit(*cur) && i <= MAXHA) {
+ while (cur && isdigit(*cur) && i < MAXHA) {
ints[i++] = simple_strtoul(cur, NULL, 0);
if ((cur = strchr(cur, ',')) != NULL) cur++;
}
@@ -4462,8 +4462,7 @@ free_fail:
return rc;
}
-static int gdth_ioctl(struct inode *inode, struct file *filep,
- unsigned int cmd, unsigned long arg)
+static int gdth_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
gdth_ha_str *ha;
Scsi_Cmnd *scp;
@@ -4611,6 +4610,17 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
return 0;
}
+static long gdth_unlocked_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = gdth_ioctl(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
/* flush routine */
static void gdth_flush(gdth_ha_str *ha)
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index 48f406850c6..2ce26eb7a1e 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -1,229 +1,214 @@
#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/blkdev.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/zorro.h>
-#include <asm/setup.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
-#include <linux/zorro.h>
-#include <asm/irq.h>
-#include <linux/spinlock.h>
#include "scsi.h"
-#include <scsi/scsi_host.h>
#include "wd33c93.h"
#include "gvp11.h"
-#include<linux/stat.h>
-#define DMA(ptr) ((gvp11_scsiregs *)((ptr)->base))
-#define HDATA(ptr) ((struct WD33C93_hostdata *)((ptr)->hostdata))
+#define CHECK_WD33C93
-static irqreturn_t gvp11_intr (int irq, void *_instance)
+struct gvp11_hostdata {
+ struct WD33C93_hostdata wh;
+ struct gvp11_scsiregs *regs;
+};
+
+static irqreturn_t gvp11_intr(int irq, void *data)
{
- unsigned long flags;
- unsigned int status;
- struct Scsi_Host *instance = (struct Scsi_Host *)_instance;
-
- status = DMA(instance)->CNTR;
- if (!(status & GVP11_DMAC_INT_PENDING))
- return IRQ_NONE;
-
- spin_lock_irqsave(instance->host_lock, flags);
- wd33c93_intr(instance);
- spin_unlock_irqrestore(instance->host_lock, flags);
- return IRQ_HANDLED;
+ struct Scsi_Host *instance = data;
+ struct gvp11_hostdata *hdata = shost_priv(instance);
+ unsigned int status = hdata->regs->CNTR;
+ unsigned long flags;
+
+ if (!(status & GVP11_DMAC_INT_PENDING))
+ return IRQ_NONE;
+
+ spin_lock_irqsave(instance->host_lock, flags);
+ wd33c93_intr(instance);
+ spin_unlock_irqrestore(instance->host_lock, flags);
+ return IRQ_HANDLED;
}
static int gvp11_xfer_mask = 0;
-void gvp11_setup (char *str, int *ints)
+void gvp11_setup(char *str, int *ints)
{
- gvp11_xfer_mask = ints[1];
+ gvp11_xfer_mask = ints[1];
}
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
- unsigned short cntr = GVP11_DMAC_INT_ENABLE;
- unsigned long addr = virt_to_bus(cmd->SCp.ptr);
- int bank_mask;
- static int scsi_alloc_out_of_range = 0;
-
- /* use bounce buffer if the physical address is bad */
- if (addr & HDATA(cmd->device->host)->dma_xfer_mask)
- {
- HDATA(cmd->device->host)->dma_bounce_len = (cmd->SCp.this_residual + 511)
- & ~0x1ff;
-
- if( !scsi_alloc_out_of_range ) {
- HDATA(cmd->device->host)->dma_bounce_buffer =
- kmalloc (HDATA(cmd->device->host)->dma_bounce_len, GFP_KERNEL);
- HDATA(cmd->device->host)->dma_buffer_pool = BUF_SCSI_ALLOCED;
- }
+ struct Scsi_Host *instance = cmd->device->host;
+ struct gvp11_hostdata *hdata = shost_priv(instance);
+ struct WD33C93_hostdata *wh = &hdata->wh;
+ struct gvp11_scsiregs *regs = hdata->regs;
+ unsigned short cntr = GVP11_DMAC_INT_ENABLE;
+ unsigned long addr = virt_to_bus(cmd->SCp.ptr);
+ int bank_mask;
+ static int scsi_alloc_out_of_range = 0;
+
+ /* use bounce buffer if the physical address is bad */
+ if (addr & wh->dma_xfer_mask) {
+ wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
+
+ if (!scsi_alloc_out_of_range) {
+ wh->dma_bounce_buffer =
+ kmalloc(wh->dma_bounce_len, GFP_KERNEL);
+ wh->dma_buffer_pool = BUF_SCSI_ALLOCED;
+ }
- if (scsi_alloc_out_of_range ||
- !HDATA(cmd->device->host)->dma_bounce_buffer) {
- HDATA(cmd->device->host)->dma_bounce_buffer =
- amiga_chip_alloc(HDATA(cmd->device->host)->dma_bounce_len,
- "GVP II SCSI Bounce Buffer");
+ if (scsi_alloc_out_of_range ||
+ !wh->dma_bounce_buffer) {
+ wh->dma_bounce_buffer =
+ amiga_chip_alloc(wh->dma_bounce_len,
+ "GVP II SCSI Bounce Buffer");
- if(!HDATA(cmd->device->host)->dma_bounce_buffer)
- {
- HDATA(cmd->device->host)->dma_bounce_len = 0;
- return 1;
- }
+ if (!wh->dma_bounce_buffer) {
+ wh->dma_bounce_len = 0;
+ return 1;
+ }
- HDATA(cmd->device->host)->dma_buffer_pool = BUF_CHIP_ALLOCED;
- }
+ wh->dma_buffer_pool = BUF_CHIP_ALLOCED;
+ }
- /* check if the address of the bounce buffer is OK */
- addr = virt_to_bus(HDATA(cmd->device->host)->dma_bounce_buffer);
-
- if (addr & HDATA(cmd->device->host)->dma_xfer_mask) {
- /* fall back to Chip RAM if address out of range */
- if( HDATA(cmd->device->host)->dma_buffer_pool == BUF_SCSI_ALLOCED) {
- kfree (HDATA(cmd->device->host)->dma_bounce_buffer);
- scsi_alloc_out_of_range = 1;
- } else {
- amiga_chip_free (HDATA(cmd->device->host)->dma_bounce_buffer);
- }
-
- HDATA(cmd->device->host)->dma_bounce_buffer =
- amiga_chip_alloc(HDATA(cmd->device->host)->dma_bounce_len,
- "GVP II SCSI Bounce Buffer");
-
- if(!HDATA(cmd->device->host)->dma_bounce_buffer)
- {
- HDATA(cmd->device->host)->dma_bounce_len = 0;
- return 1;
- }
-
- addr = virt_to_bus(HDATA(cmd->device->host)->dma_bounce_buffer);
- HDATA(cmd->device->host)->dma_buffer_pool = BUF_CHIP_ALLOCED;
- }
-
- if (!dir_in) {
- /* copy to bounce buffer for a write */
- memcpy (HDATA(cmd->device->host)->dma_bounce_buffer,
- cmd->SCp.ptr, cmd->SCp.this_residual);
+ /* check if the address of the bounce buffer is OK */
+ addr = virt_to_bus(wh->dma_bounce_buffer);
+
+ if (addr & wh->dma_xfer_mask) {
+ /* fall back to Chip RAM if address out of range */
+ if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) {
+ kfree(wh->dma_bounce_buffer);
+ scsi_alloc_out_of_range = 1;
+ } else {
+ amiga_chip_free(wh->dma_bounce_buffer);
+ }
+
+ wh->dma_bounce_buffer =
+ amiga_chip_alloc(wh->dma_bounce_len,
+ "GVP II SCSI Bounce Buffer");
+
+ if (!wh->dma_bounce_buffer) {
+ wh->dma_bounce_len = 0;
+ return 1;
+ }
+
+ addr = virt_to_bus(wh->dma_bounce_buffer);
+ wh->dma_buffer_pool = BUF_CHIP_ALLOCED;
+ }
+
+ if (!dir_in) {
+ /* copy to bounce buffer for a write */
+ memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
+ cmd->SCp.this_residual);
+ }
}
- }
- /* setup dma direction */
- if (!dir_in)
- cntr |= GVP11_DMAC_DIR_WRITE;
+ /* setup dma direction */
+ if (!dir_in)
+ cntr |= GVP11_DMAC_DIR_WRITE;
- HDATA(cmd->device->host)->dma_dir = dir_in;
- DMA(cmd->device->host)->CNTR = cntr;
+ wh->dma_dir = dir_in;
+ regs->CNTR = cntr;
- /* setup DMA *physical* address */
- DMA(cmd->device->host)->ACR = addr;
+ /* setup DMA *physical* address */
+ regs->ACR = addr;
- if (dir_in)
- /* invalidate any cache */
- cache_clear (addr, cmd->SCp.this_residual);
- else
- /* push any dirty cache */
- cache_push (addr, cmd->SCp.this_residual);
+ if (dir_in) {
+ /* invalidate any cache */
+ cache_clear(addr, cmd->SCp.this_residual);
+ } else {
+ /* push any dirty cache */
+ cache_push(addr, cmd->SCp.this_residual);
+ }
- if ((bank_mask = (~HDATA(cmd->device->host)->dma_xfer_mask >> 18) & 0x01c0))
- DMA(cmd->device->host)->BANK = bank_mask & (addr >> 18);
+ bank_mask = (~wh->dma_xfer_mask >> 18) & 0x01c0;
+ if (bank_mask)
+ regs->BANK = bank_mask & (addr >> 18);
- /* start DMA */
- DMA(cmd->device->host)->ST_DMA = 1;
+ /* start DMA */
+ regs->ST_DMA = 1;
- /* return success */
- return 0;
+ /* return success */
+ return 0;
}
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
int status)
{
- /* stop DMA */
- DMA(instance)->SP_DMA = 1;
- /* remove write bit from CONTROL bits */
- DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE;
-
- /* copy from a bounce buffer, if necessary */
- if (status && HDATA(instance)->dma_bounce_buffer) {
- if (HDATA(instance)->dma_dir && SCpnt)
- memcpy (SCpnt->SCp.ptr,
- HDATA(instance)->dma_bounce_buffer,
- SCpnt->SCp.this_residual);
-
- if (HDATA(instance)->dma_buffer_pool == BUF_SCSI_ALLOCED)
- kfree (HDATA(instance)->dma_bounce_buffer);
- else
- amiga_chip_free(HDATA(instance)->dma_bounce_buffer);
-
- HDATA(instance)->dma_bounce_buffer = NULL;
- HDATA(instance)->dma_bounce_len = 0;
- }
+ struct gvp11_hostdata *hdata = shost_priv(instance);
+ struct WD33C93_hostdata *wh = &hdata->wh;
+ struct gvp11_scsiregs *regs = hdata->regs;
+
+ /* stop DMA */
+ regs->SP_DMA = 1;
+ /* remove write bit from CONTROL bits */
+ regs->CNTR = GVP11_DMAC_INT_ENABLE;
+
+ /* copy from a bounce buffer, if necessary */
+ if (status && wh->dma_bounce_buffer) {
+ if (wh->dma_dir && SCpnt)
+ memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer,
+ SCpnt->SCp.this_residual);
+
+ if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED)
+ kfree(wh->dma_bounce_buffer);
+ else
+ amiga_chip_free(wh->dma_bounce_buffer);
+
+ wh->dma_bounce_buffer = NULL;
+ wh->dma_bounce_len = 0;
+ }
}
-#define CHECK_WD33C93
-
-int __init gvp11_detect(struct scsi_host_template *tpnt)
+static int gvp11_bus_reset(struct scsi_cmnd *cmd)
{
- static unsigned char called = 0;
- struct Scsi_Host *instance;
- unsigned long address;
- unsigned int epc;
- struct zorro_dev *z = NULL;
- unsigned int default_dma_xfer_mask;
- wd33c93_regs regs;
- int num_gvp11 = 0;
-#ifdef CHECK_WD33C93
- volatile unsigned char *sasr_3393, *scmd_3393;
- unsigned char save_sasr;
- unsigned char q, qq;
-#endif
-
- if (!MACH_IS_AMIGA || called)
- return 0;
- called = 1;
+ struct Scsi_Host *instance = cmd->device->host;
- tpnt->proc_name = "GVP11";
- tpnt->proc_info = &wd33c93_proc_info;
+ /* FIXME perform bus-specific reset */
- while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
- /*
- * This should (hopefully) be the correct way to identify
- * all the different GVP SCSI controllers (except for the
- * SERIES I though).
- */
+ /* FIXME 2: shouldn't we no-op this function (return
+ FAILED), and fall back to host reset function,
+ wd33c93_host_reset ? */
- if (z->id == ZORRO_PROD_GVP_COMBO_030_R3_SCSI ||
- z->id == ZORRO_PROD_GVP_SERIES_II)
- default_dma_xfer_mask = ~0x00ffffff;
- else if (z->id == ZORRO_PROD_GVP_GFORCE_030_SCSI ||
- z->id == ZORRO_PROD_GVP_A530_SCSI ||
- z->id == ZORRO_PROD_GVP_COMBO_030_R4_SCSI)
- default_dma_xfer_mask = ~0x01ffffff;
- else if (z->id == ZORRO_PROD_GVP_A1291 ||
- z->id == ZORRO_PROD_GVP_GFORCE_040_SCSI_1)
- default_dma_xfer_mask = ~0x07ffffff;
- else
- continue;
+ spin_lock_irq(instance->host_lock);
+ wd33c93_host_reset(cmd);
+ spin_unlock_irq(instance->host_lock);
- /*
- * Rumors state that some GVP ram boards use the same product
- * code as the SCSI controllers. Therefore if the board-size
- * is not 64KB we asume it is a ram board and bail out.
- */
- if (z->resource.end-z->resource.start != 0xffff)
- continue;
+ return SUCCESS;
+}
- address = z->resource.start;
- if (!request_mem_region(address, 256, "wd33c93"))
- continue;
+static struct scsi_host_template gvp11_scsi_template = {
+ .module = THIS_MODULE,
+ .name = "GVP Series II SCSI",
+ .proc_info = wd33c93_proc_info,
+ .proc_name = "GVP11",
+ .queuecommand = wd33c93_queuecommand,
+ .eh_abort_handler = wd33c93_abort,
+ .eh_bus_reset_handler = gvp11_bus_reset,
+ .eh_host_reset_handler = wd33c93_host_reset,
+ .can_queue = CAN_QUEUE,
+ .this_id = 7,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = CMD_PER_LUN,
+ .use_clustering = DISABLE_CLUSTERING
+};
+static int __devinit check_wd33c93(struct gvp11_scsiregs *regs)
+{
#ifdef CHECK_WD33C93
+ volatile unsigned char *sasr_3393, *scmd_3393;
+ unsigned char save_sasr;
+ unsigned char q, qq;
/*
* These darn GVP boards are a problem - it can be tough to tell
@@ -231,37 +216,37 @@ int __init gvp11_detect(struct scsi_host_template *tpnt)
* ultimate Yet-Another-GVP-Detection-Hack in that it actually
* probes for a WD33c93 chip: If we find one, it's extremely
* likely that this card supports SCSI, regardless of Product_
- * Code, Board_Size, etc.
+ * Code, Board_Size, etc.
*/
- /* Get pointers to the presumed register locations and save contents */
+ /* Get pointers to the presumed register locations and save contents */
- sasr_3393 = &(((gvp11_scsiregs *)(ZTWO_VADDR(address)))->SASR);
- scmd_3393 = &(((gvp11_scsiregs *)(ZTWO_VADDR(address)))->SCMD);
+ sasr_3393 = &regs->SASR;
+ scmd_3393 = &regs->SCMD;
save_sasr = *sasr_3393;
- /* First test the AuxStatus Reg */
+ /* First test the AuxStatus Reg */
- q = *sasr_3393; /* read it */
- if (q & 0x08) /* bit 3 should always be clear */
- goto release;
- *sasr_3393 = WD_AUXILIARY_STATUS; /* setup indirect address */
- if (*sasr_3393 == WD_AUXILIARY_STATUS) { /* shouldn't retain the write */
+ q = *sasr_3393; /* read it */
+ if (q & 0x08) /* bit 3 should always be clear */
+ return -ENODEV;
+ *sasr_3393 = WD_AUXILIARY_STATUS; /* setup indirect address */
+ if (*sasr_3393 == WD_AUXILIARY_STATUS) { /* shouldn't retain the write */
*sasr_3393 = save_sasr; /* Oops - restore this byte */
- goto release;
- }
+ return -ENODEV;
+ }
if (*sasr_3393 != q) { /* should still read the same */
*sasr_3393 = save_sasr; /* Oops - restore this byte */
- goto release;
- }
+ return -ENODEV;
+ }
if (*scmd_3393 != q) /* and so should the image at 0x1f */
- goto release;
-
+ return -ENODEV;
- /* Ok, we probably have a wd33c93, but let's check a few other places
- * for good measure. Make sure that this works for both 'A and 'B
- * chip versions.
- */
+ /*
+ * Ok, we probably have a wd33c93, but let's check a few other places
+ * for good measure. Make sure that this works for both 'A and 'B
+ * chip versions.
+ */
*sasr_3393 = WD_SCSI_STATUS;
q = *scmd_3393;
@@ -271,8 +256,8 @@ int __init gvp11_detect(struct scsi_host_template *tpnt)
qq = *scmd_3393;
*sasr_3393 = WD_SCSI_STATUS;
*scmd_3393 = q;
- if (qq != q) /* should be read only */
- goto release;
+ if (qq != q) /* should be read only */
+ return -ENODEV;
*sasr_3393 = 0x1e; /* this register is unimplemented */
q = *scmd_3393;
*sasr_3393 = 0x1e;
@@ -282,7 +267,7 @@ int __init gvp11_detect(struct scsi_host_template *tpnt)
*sasr_3393 = 0x1e;
*scmd_3393 = q;
if (qq != q || qq != 0xff) /* should be read only, all 1's */
- goto release;
+ return -ENODEV;
*sasr_3393 = WD_TIMEOUT_PERIOD;
q = *scmd_3393;
*sasr_3393 = WD_TIMEOUT_PERIOD;
@@ -291,110 +276,157 @@ int __init gvp11_detect(struct scsi_host_template *tpnt)
qq = *scmd_3393;
*sasr_3393 = WD_TIMEOUT_PERIOD;
*scmd_3393 = q;
- if (qq != (~q & 0xff)) /* should be read/write */
- goto release;
-#endif
-
- instance = scsi_register (tpnt, sizeof (struct WD33C93_hostdata));
- if(instance == NULL)
- goto release;
- instance->base = ZTWO_VADDR(address);
+ if (qq != (~q & 0xff)) /* should be read/write */
+ return -ENODEV;
+#endif /* CHECK_WD33C93 */
+
+ return 0;
+}
+
+static int __devinit gvp11_probe(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
+{
+ struct Scsi_Host *instance;
+ unsigned long address;
+ int error;
+ unsigned int epc;
+ unsigned int default_dma_xfer_mask;
+ struct gvp11_hostdata *hdata;
+ struct gvp11_scsiregs *regs;
+ wd33c93_regs wdregs;
+
+ default_dma_xfer_mask = ent->driver_data;
+
+ /*
+ * Rumors state that some GVP ram boards use the same product
+ * code as the SCSI controllers. Therefore if the board-size
+ * is not 64KB we asume it is a ram board and bail out.
+ */
+ if (zorro_resource_len(z) != 0x10000)
+ return -ENODEV;
+
+ address = z->resource.start;
+ if (!request_mem_region(address, 256, "wd33c93"))
+ return -EBUSY;
+
+ regs = (struct gvp11_scsiregs *)(ZTWO_VADDR(address));
+
+ error = check_wd33c93(regs);
+ if (error)
+ goto fail_check_or_alloc;
+
+ instance = scsi_host_alloc(&gvp11_scsi_template,
+ sizeof(struct gvp11_hostdata));
+ if (!instance) {
+ error = -ENOMEM;
+ goto fail_check_or_alloc;
+ }
+
instance->irq = IRQ_AMIGA_PORTS;
instance->unique_id = z->slotaddr;
- if (gvp11_xfer_mask)
- HDATA(instance)->dma_xfer_mask = gvp11_xfer_mask;
- else
- HDATA(instance)->dma_xfer_mask = default_dma_xfer_mask;
-
+ regs->secret2 = 1;
+ regs->secret1 = 0;
+ regs->secret3 = 15;
+ while (regs->CNTR & GVP11_DMAC_BUSY)
+ ;
+ regs->CNTR = 0;
+ regs->BANK = 0;
- DMA(instance)->secret2 = 1;
- DMA(instance)->secret1 = 0;
- DMA(instance)->secret3 = 15;
- while (DMA(instance)->CNTR & GVP11_DMAC_BUSY) ;
- DMA(instance)->CNTR = 0;
+ wdregs.SASR = &regs->SASR;
+ wdregs.SCMD = &regs->SCMD;
- DMA(instance)->BANK = 0;
+ hdata = shost_priv(instance);
+ if (gvp11_xfer_mask)
+ hdata->wh.dma_xfer_mask = gvp11_xfer_mask;
+ else
+ hdata->wh.dma_xfer_mask = default_dma_xfer_mask;
- epc = *(unsigned short *)(ZTWO_VADDR(address) + 0x8000);
+ hdata->wh.no_sync = 0xff;
+ hdata->wh.fast = 0;
+ hdata->wh.dma_mode = CTRL_DMA;
+ hdata->regs = regs;
/*
* Check for 14MHz SCSI clock
*/
- regs.SASR = &(DMA(instance)->SASR);
- regs.SCMD = &(DMA(instance)->SCMD);
- HDATA(instance)->no_sync = 0xff;
- HDATA(instance)->fast = 0;
- HDATA(instance)->dma_mode = CTRL_DMA;
- wd33c93_init(instance, regs, dma_setup, dma_stop,
+ epc = *(unsigned short *)(ZTWO_VADDR(address) + 0x8000);
+ wd33c93_init(instance, wdregs, dma_setup, dma_stop,
(epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10
: WD33C93_FS_12_15);
- if (request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED, "GVP11 SCSI",
- instance))
- goto unregister;
- DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE;
- num_gvp11++;
- continue;
-
-unregister:
- scsi_unregister(instance);
- wd33c93_release();
-release:
- release_mem_region(address, 256);
- }
+ error = request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED,
+ "GVP11 SCSI", instance);
+ if (error)
+ goto fail_irq;
- return num_gvp11;
-}
+ regs->CNTR = GVP11_DMAC_INT_ENABLE;
-static int gvp11_bus_reset(struct scsi_cmnd *cmd)
-{
- /* FIXME perform bus-specific reset */
-
- /* FIXME 2: shouldn't we no-op this function (return
- FAILED), and fall back to host reset function,
- wd33c93_host_reset ? */
+ error = scsi_add_host(instance, NULL);
+ if (error)
+ goto fail_host;
- spin_lock_irq(cmd->device->host->host_lock);
- wd33c93_host_reset(cmd);
- spin_unlock_irq(cmd->device->host->host_lock);
+ zorro_set_drvdata(z, instance);
+ scsi_scan_host(instance);
+ return 0;
- return SUCCESS;
+fail_host:
+ free_irq(IRQ_AMIGA_PORTS, instance);
+fail_irq:
+ scsi_host_put(instance);
+fail_check_or_alloc:
+ release_mem_region(address, 256);
+ return error;
}
+static void __devexit gvp11_remove(struct zorro_dev *z)
+{
+ struct Scsi_Host *instance = zorro_get_drvdata(z);
+ struct gvp11_hostdata *hdata = shost_priv(instance);
+
+ hdata->regs->CNTR = 0;
+ scsi_remove_host(instance);
+ free_irq(IRQ_AMIGA_PORTS, instance);
+ scsi_host_put(instance);
+ release_mem_region(z->resource.start, 256);
+}
-#define HOSTS_C
-
-#include "gvp11.h"
+ /*
+ * This should (hopefully) be the correct way to identify
+ * all the different GVP SCSI controllers (except for the
+ * SERIES I though).
+ */
-static struct scsi_host_template driver_template = {
- .proc_name = "GVP11",
- .name = "GVP Series II SCSI",
- .detect = gvp11_detect,
- .release = gvp11_release,
- .queuecommand = wd33c93_queuecommand,
- .eh_abort_handler = wd33c93_abort,
- .eh_bus_reset_handler = gvp11_bus_reset,
- .eh_host_reset_handler = wd33c93_host_reset,
- .can_queue = CAN_QUEUE,
- .this_id = 7,
- .sg_tablesize = SG_ALL,
- .cmd_per_lun = CMD_PER_LUN,
- .use_clustering = DISABLE_CLUSTERING
+static struct zorro_device_id gvp11_zorro_tbl[] __devinitdata = {
+ { ZORRO_PROD_GVP_COMBO_030_R3_SCSI, ~0x00ffffff },
+ { ZORRO_PROD_GVP_SERIES_II, ~0x00ffffff },
+ { ZORRO_PROD_GVP_GFORCE_030_SCSI, ~0x01ffffff },
+ { ZORRO_PROD_GVP_A530_SCSI, ~0x01ffffff },
+ { ZORRO_PROD_GVP_COMBO_030_R4_SCSI, ~0x01ffffff },
+ { ZORRO_PROD_GVP_A1291, ~0x07ffffff },
+ { ZORRO_PROD_GVP_GFORCE_040_SCSI_1, ~0x07ffffff },
+ { 0 }
};
+MODULE_DEVICE_TABLE(zorro, gvp11_zorro_tbl);
+static struct zorro_driver gvp11_driver = {
+ .name = "gvp11",
+ .id_table = gvp11_zorro_tbl,
+ .probe = gvp11_probe,
+ .remove = __devexit_p(gvp11_remove),
+};
-#include "scsi_module.c"
+static int __init gvp11_init(void)
+{
+ return zorro_register_driver(&gvp11_driver);
+}
+module_init(gvp11_init);
-int gvp11_release(struct Scsi_Host *instance)
+static void __exit gvp11_exit(void)
{
-#ifdef MODULE
- DMA(instance)->CNTR = 0;
- release_mem_region(ZTWO_PADDR(instance->base), 256);
- free_irq(IRQ_AMIGA_PORTS, instance);
- wd33c93_release();
-#endif
- return 1;
+ zorro_unregister_driver(&gvp11_driver);
}
+module_exit(gvp11_exit);
+MODULE_DESCRIPTION("GVP Series II SCSI");
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/gvp11.h b/drivers/scsi/gvp11.h
index bf22859a503..852913cde5d 100644
--- a/drivers/scsi/gvp11.h
+++ b/drivers/scsi/gvp11.h
@@ -11,42 +11,37 @@
#include <linux/types.h>
-int gvp11_detect(struct scsi_host_template *);
-int gvp11_release(struct Scsi_Host *);
-
#ifndef CMD_PER_LUN
-#define CMD_PER_LUN 2
+#define CMD_PER_LUN 2
#endif
#ifndef CAN_QUEUE
-#define CAN_QUEUE 16
+#define CAN_QUEUE 16
#endif
-#ifndef HOSTS_C
-
/*
* if the transfer address ANDed with this results in a non-zero
* result, then we can't use DMA.
*/
-#define GVP11_XFER_MASK (0xff000001)
-
-typedef struct {
- unsigned char pad1[64];
- volatile unsigned short CNTR;
- unsigned char pad2[31];
- volatile unsigned char SASR;
- unsigned char pad3;
- volatile unsigned char SCMD;
- unsigned char pad4[4];
- volatile unsigned short BANK;
- unsigned char pad5[6];
- volatile unsigned long ACR;
- volatile unsigned short secret1; /* store 0 here */
- volatile unsigned short ST_DMA;
- volatile unsigned short SP_DMA;
- volatile unsigned short secret2; /* store 1 here */
- volatile unsigned short secret3; /* store 15 here */
-} gvp11_scsiregs;
+#define GVP11_XFER_MASK (0xff000001)
+
+struct gvp11_scsiregs {
+ unsigned char pad1[64];
+ volatile unsigned short CNTR;
+ unsigned char pad2[31];
+ volatile unsigned char SASR;
+ unsigned char pad3;
+ volatile unsigned char SCMD;
+ unsigned char pad4[4];
+ volatile unsigned short BANK;
+ unsigned char pad5[6];
+ volatile unsigned long ACR;
+ volatile unsigned short secret1; /* store 0 here */
+ volatile unsigned short ST_DMA;
+ volatile unsigned short SP_DMA;
+ volatile unsigned short secret2; /* store 1 here */
+ volatile unsigned short secret3; /* store 15 here */
+};
/* bits in CNTR */
#define GVP11_DMAC_BUSY (1<<0)
@@ -54,6 +49,4 @@ typedef struct {
#define GVP11_DMAC_INT_ENABLE (1<<3)
#define GVP11_DMAC_DIR_WRITE (1<<4)
-#endif /* else def HOSTS_C */
-
#endif /* GVP11_H */
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 183d3a43c28..c016426b31b 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2708,14 +2708,6 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
c->Request.CDB[8] = (size >> 8) & 0xFF;
c->Request.CDB[9] = size & 0xFF;
break;
-
- case HPSA_READ_CAPACITY:
- c->Request.CDBLen = 10;
- c->Request.Type.Attribute = ATTR_SIMPLE;
- c->Request.Type.Direction = XFER_READ;
- c->Request.Timeout = 0;
- c->Request.CDB[0] = cmd;
- break;
case HPSA_CACHE_FLUSH:
c->Request.CDBLen = 12;
c->Request.Type.Attribute = ATTR_SIMPLE;
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 56fb9827681..78de9b6d1e0 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -152,21 +152,6 @@ struct SenseSubsystem_info {
u8 reserved1[1108];
};
-#define HPSA_READ_CAPACITY 0x25 /* Read Capacity */
-struct ReadCapdata {
- u8 total_size[4]; /* Total size in blocks */
- u8 block_size[4]; /* Size of blocks in bytes */
-};
-
-#if 0
-/* 12 byte commands not implemented in firmware yet. */
-#define HPSA_READ 0xa8
-#define HPSA_WRITE 0xaa
-#endif
-
-#define HPSA_READ 0x28 /* Read(10) */
-#define HPSA_WRITE 0x2a /* Write(10) */
-
/* BMIC commands */
#define BMIC_READ 0x26
#define BMIC_WRITE 0x27
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index c2eea711a5c..fef49521cbc 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -1157,7 +1157,7 @@ static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
{
struct ibmvfc_npiv_login *login_info = &vhost->login_info;
- struct device_node *of_node = vhost->dev->archdata.of_node;
+ struct device_node *of_node = vhost->dev->of_node;
const char *location;
memset(login_info, 0, sizeof(*login_info));
@@ -2245,7 +2245,7 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
DECLARE_COMPLETION_ONSTACK(comp);
int wait;
unsigned long flags;
- signed long timeout = init_timeout * HZ;
+ signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
ENTER;
do {
@@ -2919,6 +2919,7 @@ static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
#ifdef CONFIG_SCSI_IBMVFC_TRACE
/**
* ibmvfc_read_trace - Dump the adapter trace
+ * @filp: open sysfs file
* @kobj: kobject struct
* @bin_attr: bin_attribute struct
* @buf: buffer
@@ -2928,7 +2929,7 @@ static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
* Return value:
* number of bytes printed to buffer
**/
-static ssize_t ibmvfc_read_trace(struct kobject *kobj,
+static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -3013,6 +3014,7 @@ static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
if (crq->valid & 0x80) {
if (++async_crq->cur == async_crq->size)
async_crq->cur = 0;
+ rmb();
} else
crq = NULL;
@@ -3035,6 +3037,7 @@ static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
if (crq->valid & 0x80) {
if (++queue->cur == queue->size)
queue->cur = 0;
+ rmb();
} else
crq = NULL;
@@ -3083,12 +3086,14 @@ static void ibmvfc_tasklet(void *data)
while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
ibmvfc_handle_async(async, vhost);
async->valid = 0;
+ wmb();
}
/* Pull all the valid messages off the CRQ */
while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
ibmvfc_handle_crq(crq, vhost);
crq->valid = 0;
+ wmb();
}
vio_enable_interrupts(vdev);
@@ -3096,10 +3101,12 @@ static void ibmvfc_tasklet(void *data)
vio_disable_interrupts(vdev);
ibmvfc_handle_async(async, vhost);
async->valid = 0;
+ wmb();
} else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
vio_disable_interrupts(vdev);
ibmvfc_handle_crq(crq, vhost);
crq->valid = 0;
+ wmb();
} else
done = 1;
}
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index d25106a958d..7e9742764e4 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -38,6 +38,7 @@
#define IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT \
(IBMVFC_ADISC_TIMEOUT + IBMVFC_ADISC_CANCEL_TIMEOUT)
#define IBMVFC_INIT_TIMEOUT 120
+#define IBMVFC_ABORT_WAIT_TIMEOUT 40
#define IBMVFC_MAX_REQUESTS_DEFAULT 100
#define IBMVFC_DEBUG 0
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 88bad0e81bd..aad35cc41e4 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -932,7 +932,7 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
struct viosrp_capabilities *req;
struct srp_event_struct *evt_struct;
unsigned long flags;
- struct device_node *of_node = hostdata->dev->archdata.of_node;
+ struct device_node *of_node = hostdata->dev->of_node;
const char *location;
evt_struct = get_event_struct(&hostdata->pool);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 520461b9bc0..82ea4a8226b 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -567,7 +567,8 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
- struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
+ struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
+ struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
dma_addr_t dma_addr = ipr_cmd->dma_addr;
memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
@@ -576,19 +577,19 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
ioarcb->ioadl_len = 0;
ioarcb->read_ioadl_len = 0;
- if (ipr_cmd->ioa_cfg->sis64)
+ if (ipr_cmd->ioa_cfg->sis64) {
ioarcb->u.sis64_addr_data.data_ioadl_addr =
cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
- else {
+ ioasa64->u.gata.status = 0;
+ } else {
ioarcb->write_ioadl_addr =
cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
+ ioasa->u.gata.status = 0;
}
- ioasa->ioasc = 0;
- ioasa->residual_data_len = 0;
- ioasa->u.gata.status = 0;
-
+ ioasa->hdr.ioasc = 0;
+ ioasa->hdr.residual_data_len = 0;
ipr_cmd->scsi_cmd = NULL;
ipr_cmd->qc = NULL;
ipr_cmd->sense_buffer[0] = 0;
@@ -768,8 +769,8 @@ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
list_del(&ipr_cmd->queue);
- ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
- ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
+ ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
+ ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID);
if (ipr_cmd->scsi_cmd)
ipr_cmd->done = ipr_scsi_eh_done;
@@ -1040,7 +1041,7 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res,
proto = cfgtew->u.cfgte64->proto;
res->res_flags = cfgtew->u.cfgte64->res_flags;
res->qmodel = IPR_QUEUEING_MODEL64(res);
- res->type = cfgtew->u.cfgte64->res_type & 0x0f;
+ res->type = cfgtew->u.cfgte64->res_type;
memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
sizeof(res->res_path));
@@ -1319,7 +1320,7 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
- u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+ u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
list_del(&hostrcb->queue);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
@@ -2354,7 +2355,7 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
- u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+ u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
u32 fd_ioasc;
if (ioa_cfg->sis64)
@@ -3120,6 +3121,7 @@ restart:
#ifdef CONFIG_SCSI_IPR_TRACE
/**
* ipr_read_trace - Dump the adapter trace
+ * @filp: open sysfs file
* @kobj: kobject struct
* @bin_attr: bin_attribute struct
* @buf: buffer
@@ -3129,7 +3131,7 @@ restart:
* Return value:
* number of bytes printed to buffer
**/
-static ssize_t ipr_read_trace(struct kobject *kobj,
+static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -3764,6 +3766,7 @@ static struct device_attribute *ipr_ioa_attrs[] = {
#ifdef CONFIG_SCSI_IPR_DUMP
/**
* ipr_read_dump - Dump the adapter
+ * @filp: open sysfs file
* @kobj: kobject struct
* @bin_attr: bin_attribute struct
* @buf: buffer
@@ -3773,7 +3776,7 @@ static struct device_attribute *ipr_ioa_attrs[] = {
* Return value:
* number of bytes printed to buffer
**/
-static ssize_t ipr_read_dump(struct kobject *kobj,
+static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -3927,6 +3930,7 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
/**
* ipr_write_dump - Setup dump state of adapter
+ * @filp: open sysfs file
* @kobj: kobject struct
* @bin_attr: bin_attribute struct
* @buf: buffer
@@ -3936,7 +3940,7 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
* Return value:
* number of bytes printed to buffer
**/
-static ssize_t ipr_write_dump(struct kobject *kobj,
+static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -4295,7 +4299,7 @@ static void ipr_slave_destroy(struct scsi_device *sdev)
res = (struct ipr_resource_entry *) sdev->hostdata;
if (res) {
if (res->sata_port)
- ata_port_disable(res->sata_port->ap);
+ res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
sdev->hostdata = NULL;
res->sdev = NULL;
res->sata_port = NULL;
@@ -4506,11 +4510,16 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
}
ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
- ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+ ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
- if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
- memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
- sizeof(struct ipr_ioasa_gata));
+ if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
+ if (ipr_cmd->ioa_cfg->sis64)
+ memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
+ sizeof(struct ipr_ioasa_gata));
+ else
+ memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
+ sizeof(struct ipr_ioasa_gata));
+ }
LEAVE;
return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
@@ -4765,7 +4774,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
scsi_cmd->cmnd[0]);
ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
- ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+ ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
/*
* If the abort task timed out and we sent a bus reset, we will get
@@ -4809,15 +4818,39 @@ static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
/**
* ipr_handle_other_interrupt - Handle "other" interrupts
* @ioa_cfg: ioa config struct
- * @int_reg: interrupt register
*
* Return value:
* IRQ_NONE / IRQ_HANDLED
**/
-static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
- volatile u32 int_reg)
+static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg)
{
irqreturn_t rc = IRQ_HANDLED;
+ volatile u32 int_reg, int_mask_reg;
+
+ int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
+
+ /* If an interrupt on the adapter did not occur, ignore it.
+ * Or in the case of SIS 64, check for a stage change interrupt.
+ */
+ if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
+ if (ioa_cfg->sis64) {
+ int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
+ if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
+
+ /* clear stage change */
+ writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
+ list_del(&ioa_cfg->reset_cmd->queue);
+ del_timer(&ioa_cfg->reset_cmd->timer);
+ ipr_reset_ioa_job(ioa_cfg->reset_cmd);
+ return IRQ_HANDLED;
+ }
+ }
+
+ return IRQ_NONE;
+ }
if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
/* Mask the interrupt */
@@ -4878,7 +4911,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
{
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
unsigned long lock_flags = 0;
- volatile u32 int_reg, int_mask_reg;
+ volatile u32 int_reg;
u32 ioasc;
u16 cmd_index;
int num_hrrq = 0;
@@ -4893,33 +4926,6 @@ static irqreturn_t ipr_isr(int irq, void *devp)
return IRQ_NONE;
}
- int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
- int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
-
- /* If an interrupt on the adapter did not occur, ignore it.
- * Or in the case of SIS 64, check for a stage change interrupt.
- */
- if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
- if (ioa_cfg->sis64) {
- int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
- int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
- if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
-
- /* clear stage change */
- writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
- int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
- list_del(&ioa_cfg->reset_cmd->queue);
- del_timer(&ioa_cfg->reset_cmd->timer);
- ipr_reset_ioa_job(ioa_cfg->reset_cmd);
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- return IRQ_HANDLED;
- }
- }
-
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- return IRQ_NONE;
- }
-
while (1) {
ipr_cmd = NULL;
@@ -4937,7 +4943,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
- ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+ ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
@@ -4959,7 +4965,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
/* Clear the PCI interrupt */
do {
writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
- int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
@@ -4974,7 +4980,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
}
if (unlikely(rc == IRQ_NONE))
- rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
+ rc = ipr_handle_other_interrupt(ioa_cfg);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
return rc;
@@ -5011,6 +5017,10 @@ static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
ipr_cmd->dma_use_sg = nseg;
+ ioarcb->data_transfer_length = cpu_to_be32(length);
+ ioarcb->ioadl_len =
+ cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
+
if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
ioadl_flags = IPR_IOADL_FLAGS_WRITE;
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
@@ -5132,7 +5142,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
- u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+ u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
scsi_cmd->result |= (DID_ERROR << 16);
@@ -5163,7 +5173,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
- struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
+ struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
dma_addr_t dma_addr = ipr_cmd->dma_addr;
memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
@@ -5171,8 +5181,8 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
ioarcb->read_data_transfer_length = 0;
ioarcb->ioadl_len = 0;
ioarcb->read_ioadl_len = 0;
- ioasa->ioasc = 0;
- ioasa->residual_data_len = 0;
+ ioasa->hdr.ioasc = 0;
+ ioasa->hdr.residual_data_len = 0;
if (ipr_cmd->ioa_cfg->sis64)
ioarcb->u.sis64_addr_data.data_ioadl_addr =
@@ -5197,7 +5207,7 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
{
struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
- u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+ u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
ipr_erp_done(ipr_cmd);
@@ -5274,12 +5284,12 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
int i;
u16 data_len;
u32 ioasc, fd_ioasc;
- struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
+ struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
__be32 *ioasa_data = (__be32 *)ioasa;
int error_index;
- ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
- fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK;
+ ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
+ fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
if (0 == ioasc)
return;
@@ -5294,7 +5304,7 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
/* Don't log an error if the IOA already logged one */
- if (ioasa->ilid != 0)
+ if (ioasa->hdr.ilid != 0)
return;
if (!ipr_is_gscsi(res))
@@ -5306,10 +5316,11 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
- if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
+ data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
+ if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
+ data_len = sizeof(struct ipr_ioasa64);
+ else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
data_len = sizeof(struct ipr_ioasa);
- else
- data_len = be16_to_cpu(ioasa->ret_stat_len);
ipr_err("IOASA Dump:\n");
@@ -5335,8 +5346,8 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
u32 failing_lba;
u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
- struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
- u32 ioasc = be32_to_cpu(ioasa->ioasc);
+ struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
+ u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
@@ -5379,7 +5390,7 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
/* Illegal request */
if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
- (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
+ (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
sense_buf[7] = 10; /* additional length */
/* IOARCB was in error */
@@ -5390,10 +5401,10 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
sense_buf[16] =
((IPR_FIELD_POINTER_MASK &
- be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
+ be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
sense_buf[17] =
(IPR_FIELD_POINTER_MASK &
- be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
+ be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
} else {
if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
if (ipr_is_vset_device(res))
@@ -5425,14 +5436,20 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
**/
static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
{
- struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
+ struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
+ struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
- if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
+ if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
return 0;
- memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
- min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
- SCSI_SENSE_BUFFERSIZE));
+ if (ipr_cmd->ioa_cfg->sis64)
+ memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
+ min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
+ SCSI_SENSE_BUFFERSIZE));
+ else
+ memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
+ min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
+ SCSI_SENSE_BUFFERSIZE));
return 1;
}
@@ -5452,7 +5469,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
{
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
- u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+ u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
if (!res) {
@@ -5544,9 +5561,9 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
- u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+ u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
- scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len));
+ scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
scsi_dma_unmap(ipr_cmd->scsi_cmd);
@@ -5751,13 +5768,13 @@ static void ipr_ata_phy_reset(struct ata_port *ap)
rc = ipr_device_reset(ioa_cfg, res);
if (rc) {
- ata_port_disable(ap);
+ ap->link.device[0].class = ATA_DEV_NONE;
goto out_unlock;
}
ap->link.device[0].class = res->ata_class;
if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
- ata_port_disable(ap);
+ ap->link.device[0].class = ATA_DEV_NONE;
out_unlock:
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
@@ -5836,19 +5853,23 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
struct ata_queued_cmd *qc = ipr_cmd->qc;
struct ipr_sata_port *sata_port = qc->ap->private_data;
struct ipr_resource_entry *res = sata_port->res;
- u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+ u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
- memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
- sizeof(struct ipr_ioasa_gata));
+ if (ipr_cmd->ioa_cfg->sis64)
+ memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
+ sizeof(struct ipr_ioasa_gata));
+ else
+ memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
+ sizeof(struct ipr_ioasa_gata));
ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
- if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
+ if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
- qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
+ qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
else
- qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
+ qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
ata_qc_complete(qc);
}
@@ -6517,7 +6538,7 @@ static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
- u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+ u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dev_err(&ioa_cfg->pdev->dev,
"0x%02X failed with IOASC: 0x%08X\n",
@@ -6541,7 +6562,7 @@ static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
- u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+ u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
ipr_cmd->job_step = ipr_set_supported_devs;
@@ -6631,7 +6652,7 @@ static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
**/
static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
{
- u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+ u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
@@ -6703,7 +6724,7 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
list_move_tail(&res->queue, &old_res);
if (ioa_cfg->sis64)
- entries = ioa_cfg->u.cfg_table64->hdr64.num_entries;
+ entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
else
entries = ioa_cfg->u.cfg_table->hdr.num_entries;
@@ -6789,6 +6810,7 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
+ ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
@@ -7119,7 +7141,9 @@ static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
/* sanity check the stage_time value */
- if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
+ if (stage_time == 0)
+ stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
+ else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
@@ -7162,13 +7186,14 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
volatile u32 int_reg;
+ volatile u64 maskval;
ENTER;
ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
ipr_init_ioa_mem(ioa_cfg);
ioa_cfg->allow_interrupts = 1;
- int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
@@ -7180,9 +7205,12 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
/* Enable destructive diagnostics on IOA */
writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
- writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
- if (ioa_cfg->sis64)
- writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_mask_reg);
+ if (ioa_cfg->sis64) {
+ maskval = IPR_PCII_IPL_STAGE_CHANGE;
+ maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
+ writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
+ } else
+ writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
@@ -7329,12 +7357,12 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
rc = pci_restore_state(ioa_cfg->pdev);
if (rc != PCIBIOS_SUCCESSFUL) {
- ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
+ ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
return IPR_RC_JOB_CONTINUE;
}
if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
- ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
+ ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
return IPR_RC_JOB_CONTINUE;
}
@@ -7361,7 +7389,7 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
}
}
- ENTER;
+ LEAVE;
return IPR_RC_JOB_CONTINUE;
}
@@ -7403,7 +7431,7 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
if (rc != PCIBIOS_SUCCESSFUL) {
pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
- ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
+ ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
rc = IPR_RC_JOB_CONTINUE;
} else {
ipr_cmd->job_step = ipr_reset_bist_done;
@@ -7662,7 +7690,7 @@ static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
do {
- ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+ ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
if (ioa_cfg->reset_cmd != ipr_cmd) {
/*
@@ -8045,13 +8073,13 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
ioarcb->u.sis64_addr_data.data_ioadl_addr =
cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
- cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, ioasa));
+ cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
} else {
ioarcb->write_ioadl_addr =
cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
ioarcb->ioasa_host_pci_addr =
- cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
+ cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
}
ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
ipr_cmd->cmd_index = i;
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 4c267b5e0b9..9ecd2259eb3 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -244,6 +244,7 @@
#define IPR_RUNTIME_RESET 0x40000000
#define IPR_IPL_INIT_MIN_STAGE_TIME 5
+#define IPR_IPL_INIT_DEFAULT_STAGE_TIME 15
#define IPR_IPL_INIT_STAGE_UNKNOWN 0x0
#define IPR_IPL_INIT_STAGE_TRANSOP 0xB0000000
#define IPR_IPL_INIT_STAGE_MASK 0xff000000
@@ -613,7 +614,7 @@ struct ipr_auto_sense {
__be32 data[SCSI_SENSE_BUFFERSIZE/sizeof(__be32)];
};
-struct ipr_ioasa {
+struct ipr_ioasa_hdr {
__be32 ioasc;
#define IPR_IOASC_SENSE_KEY(ioasc) ((ioasc) >> 24)
#define IPR_IOASC_SENSE_CODE(ioasc) (((ioasc) & 0x00ff0000) >> 16)
@@ -645,6 +646,25 @@ struct ipr_ioasa {
#define IPR_FIELD_POINTER_VALID (0x80000000 >> 8)
#define IPR_FIELD_POINTER_MASK 0x0000ffff
+}__attribute__((packed, aligned (4)));
+
+struct ipr_ioasa {
+ struct ipr_ioasa_hdr hdr;
+
+ union {
+ struct ipr_ioasa_vset vset;
+ struct ipr_ioasa_af_dasd dasd;
+ struct ipr_ioasa_gpdd gpdd;
+ struct ipr_ioasa_gata gata;
+ } u;
+
+ struct ipr_auto_sense auto_sense;
+}__attribute__((packed, aligned (4)));
+
+struct ipr_ioasa64 {
+ struct ipr_ioasa_hdr hdr;
+ u8 fd_res_path[8];
+
union {
struct ipr_ioasa_vset vset;
struct ipr_ioasa_af_dasd dasd;
@@ -804,7 +824,7 @@ struct ipr_hostrcb_array_data_entry_enhanced {
}__attribute__((packed, aligned (4)));
struct ipr_hostrcb_type_ff_error {
- __be32 ioa_data[502];
+ __be32 ioa_data[758];
}__attribute__((packed, aligned (4)));
struct ipr_hostrcb_type_01_error {
@@ -1181,7 +1201,7 @@ struct ipr_resource_entry {
u8 flags;
__be16 res_flags;
- __be32 type;
+ u8 type;
u8 qmodel;
struct ipr_std_inq_data std_inq_data;
@@ -1464,7 +1484,10 @@ struct ipr_cmnd {
struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES];
struct ipr_ata64_ioadl ata_ioadl;
} i;
- struct ipr_ioasa ioasa;
+ union {
+ struct ipr_ioasa ioasa;
+ struct ipr_ioasa64 ioasa64;
+ } s;
struct list_head queue;
struct scsi_cmnd *scsi_cmd;
struct ata_queued_cmd *qc;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 02143af7c1a..fec47de7253 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -206,8 +206,10 @@ static void iscsi_sw_tcp_conn_set_callbacks(struct iscsi_conn *conn)
}
static void
-iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_sw_tcp_conn *tcp_sw_conn)
+iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_conn *conn)
{
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
struct sock *sk = tcp_sw_conn->sock->sk;
/* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
@@ -555,7 +557,7 @@ static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
return;
sock_hold(sock->sk);
- iscsi_sw_tcp_conn_restore_callbacks(tcp_sw_conn);
+ iscsi_sw_tcp_conn_restore_callbacks(conn);
sock_put(sock->sk);
spin_lock_bh(&session->lock);
@@ -599,10 +601,8 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
- if (sock->sk->sk_sleep) {
- sock->sk->sk_err = EIO;
- wake_up_interruptible(sock->sk->sk_sleep);
- }
+ sock->sk->sk_err = EIO;
+ wake_up_interruptible(sk_sleep(sock->sk));
iscsi_conn_stop(cls_conn, flag);
iscsi_sw_tcp_release_conn(conn);
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index ca6b7bc64de..94644bad0ed 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -36,7 +36,6 @@ struct iscsi_sw_tcp_send {
};
struct iscsi_sw_tcp_conn {
- struct iscsi_conn *iscsi_conn;
struct socket *sock;
struct iscsi_sw_tcp_send out;
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 1087a7f18e8..c7985da8809 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -132,7 +132,7 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
switch (fmt) {
case ELS_ADDR_FMT_PORT:
FC_DISC_DBG(disc, "Port address format for port "
- "(%6x)\n", ntoh24(pp->rscn_fid));
+ "(%6.6x)\n", ntoh24(pp->rscn_fid));
dp = kzalloc(sizeof(*dp), GFP_KERNEL);
if (!dp) {
redisc = 1;
@@ -440,7 +440,7 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
ids.port_id = ntoh24(np->fp_fid);
ids.port_name = ntohll(np->fp_wwpn);
- if (ids.port_id != fc_host_port_id(lport->host) &&
+ if (ids.port_id != lport->port_id &&
ids.port_name != lport->wwpn) {
rdata = lport->tt.rport_create(lport, ids.port_id);
if (rdata) {
@@ -449,7 +449,7 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
} else {
printk(KERN_WARNING "libfc: Failed to allocate "
"memory for the newly discovered port "
- "(%6x)\n", ids.port_id);
+ "(%6.6x)\n", ids.port_id);
error = -ENOMEM;
}
}
@@ -607,7 +607,7 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
rdata->ids.port_name = port_name;
else if (rdata->ids.port_name != port_name) {
FC_DISC_DBG(disc, "GPN_ID accepted. WWPN changed. "
- "Port-id %x wwpn %llx\n",
+ "Port-id %6.6x wwpn %16.16llx\n",
rdata->ids.port_id, port_name);
lport->tt.rport_logoff(rdata);
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
index 53748724f2c..e9412b710fa 100644
--- a/drivers/scsi/libfc/fc_elsct.c
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -63,7 +63,7 @@ struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did,
return NULL;
}
- fc_fill_fc_hdr(fp, r_ctl, did, fc_host_port_id(lport->host), fh_type,
+ fc_fill_fc_hdr(fp, r_ctl, did, lport->port_id, fh_type,
FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index e5df0d4db67..104e0fba7c4 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -488,7 +488,7 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
*/
spin_lock_bh(&ep->ex_lock);
ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */
- if (f_ctl & (FC_FC_END_SEQ | FC_FC_SEQ_INIT))
+ if (f_ctl & FC_FC_SEQ_INIT)
ep->esb_stat &= ~ESB_ST_SEQ_INIT;
spin_unlock_bh(&ep->ex_lock);
return error;
@@ -676,9 +676,10 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
}
memset(ep, 0, sizeof(*ep));
- cpu = smp_processor_id();
+ cpu = get_cpu();
pool = per_cpu_ptr(mp->pool, cpu);
spin_lock_bh(&pool->lock);
+ put_cpu();
index = pool->next_index;
/* allocate new exch from pool */
while (fc_exch_ptr_get(pool, index)) {
@@ -734,19 +735,14 @@ err:
* EM is selected when a NULL match function pointer is encountered
* or when a call to a match function returns true.
*/
-static struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
- struct fc_frame *fp)
+static inline struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
+ struct fc_frame *fp)
{
struct fc_exch_mgr_anchor *ema;
- struct fc_exch *ep;
- list_for_each_entry(ema, &lport->ema_list, ema_list) {
- if (!ema->match || ema->match(fp)) {
- ep = fc_exch_em_alloc(lport, ema->mp);
- if (ep)
- return ep;
- }
- }
+ list_for_each_entry(ema, &lport->ema_list, ema_list)
+ if (!ema->match || ema->match(fp))
+ return fc_exch_em_alloc(lport, ema->mp);
return NULL;
}
@@ -920,13 +916,9 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
* Find or create the sequence.
*/
if (fc_sof_is_init(fr_sof(fp))) {
- sp = fc_seq_start_next(&ep->seq);
- if (!sp) {
- reject = FC_RJT_SEQ_XS; /* exchange shortage */
- goto rel;
- }
- sp->id = fh->fh_seq_id;
+ sp = &ep->seq;
sp->ssb_stat |= SSB_ST_RESP;
+ sp->id = fh->fh_seq_id;
} else {
sp = &ep->seq;
if (sp->id != fh->fh_seq_id) {
@@ -1250,9 +1242,6 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
struct fc_frame_header *fh = fc_frame_header_get(fp);
struct fc_seq *sp = NULL;
struct fc_exch *ep = NULL;
- enum fc_sof sof;
- enum fc_eof eof;
- u32 f_ctl;
enum fc_pf_rjt_reason reject;
/* We can have the wrong fc_lport at this point with NPIV, which is a
@@ -1269,9 +1258,6 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
if (reject == FC_RJT_NONE) {
sp = fr_seq(fp); /* sequence will be held */
ep = fc_seq_exch(sp);
- sof = fr_sof(fp);
- eof = fr_eof(fp);
- f_ctl = ntoh24(fh->fh_f_ctl);
fc_seq_send_ack(sp, fp);
/*
@@ -1336,17 +1322,15 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
goto rel;
}
sof = fr_sof(fp);
+ sp = &ep->seq;
if (fc_sof_is_init(sof)) {
- sp = fc_seq_start_next(&ep->seq);
- sp->id = fh->fh_seq_id;
sp->ssb_stat |= SSB_ST_RESP;
- } else {
- sp = &ep->seq;
- if (sp->id != fh->fh_seq_id) {
- atomic_inc(&mp->stats.seq_not_found);
- goto rel;
- }
+ sp->id = fh->fh_seq_id;
+ } else if (sp->id != fh->fh_seq_id) {
+ atomic_inc(&mp->stats.seq_not_found);
+ goto rel;
}
+
f_ctl = ntoh24(fh->fh_f_ctl);
fr_seq(fp) = sp;
if (f_ctl & FC_FC_SEQ_INIT)
@@ -1763,7 +1747,6 @@ static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp)
fc_exch_done(sp);
goto out;
}
- sp = fc_seq_start_next(sp);
acc = fc_frame_payload_get(fp, sizeof(*acc));
memset(acc, 0, sizeof(*acc));
acc->reca_cmd = ELS_LS_ACC;
@@ -1944,7 +1927,7 @@ static void fc_exch_rrq(struct fc_exch *ep)
did = ep->sid;
fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did,
- fc_host_port_id(lport->host), FC_TYPE_ELS,
+ lport->port_id, FC_TYPE_ELS,
FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
if (fc_exch_seq_send(lport, fp, fc_exch_rrq_resp, NULL, ep,
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 17396c708b0..ec1f66c4a9d 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -97,7 +97,7 @@ static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *);
static void fc_fcp_complete_locked(struct fc_fcp_pkt *);
static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *);
-static void fc_timeout_error(struct fc_fcp_pkt *);
+static void fc_fcp_recovery(struct fc_fcp_pkt *);
static void fc_fcp_timeout(unsigned long);
static void fc_fcp_rec(struct fc_fcp_pkt *);
static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
@@ -121,7 +121,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
#define FC_DATA_UNDRUN 7
#define FC_ERROR 8
#define FC_HRD_ERROR 9
-#define FC_CMD_TIME_OUT 10
+#define FC_CMD_RECOVERY 10
/*
* Error recovery timeout values.
@@ -446,9 +446,16 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
len = fr_len(fp) - sizeof(*fh);
buf = fc_frame_payload_get(fp, 0);
- /* if this I/O is ddped, update xfer len */
- fc_fcp_ddp_done(fsp);
-
+ /*
+ * if this I/O is ddped then clear it
+ * and initiate recovery since data
+ * frames are expected to be placed
+ * directly in that case.
+ */
+ if (fsp->xfer_ddp != FC_XID_UNKNOWN) {
+ fc_fcp_ddp_done(fsp);
+ goto err;
+ }
if (offset + len > fsp->data_len) {
/* this should never happen */
if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
@@ -456,8 +463,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
goto crc_err;
FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx "
"data_len %x\n", len, offset, fsp->data_len);
- fc_fcp_retry_cmd(fsp);
- return;
+ goto err;
}
if (offset != fsp->xfer_len)
fsp->state |= FC_SRB_DISCONTIG;
@@ -478,13 +484,14 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
if (~crc != le32_to_cpu(fr_crc(fp))) {
crc_err:
- stats = fc_lport_get_stats(lport);
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
stats->ErrorFrames++;
- /* FIXME - per cpu count, not total count! */
+ /* per cpu count, not total count, but OK for limit */
if (stats->InvalidCRCCount++ < 5)
printk(KERN_WARNING "libfc: CRC error on data "
- "frame for port (%6x)\n",
- fc_host_port_id(lport->host));
+ "frame for port (%6.6x)\n",
+ lport->port_id);
+ put_cpu();
/*
* Assume the frame is total garbage.
* We may have copied it over the good part
@@ -493,7 +500,7 @@ crc_err:
* Otherwise, ignore it.
*/
if (fsp->state & FC_SRB_DISCONTIG)
- fc_fcp_retry_cmd(fsp);
+ goto err;
return;
}
}
@@ -509,6 +516,9 @@ crc_err:
if (unlikely(fsp->state & FC_SRB_RCV_STATUS) &&
fsp->xfer_len == fsp->data_len - fsp->scsi_resid)
fc_fcp_complete_locked(fsp);
+ return;
+err:
+ fc_fcp_recovery(fsp);
}
/**
@@ -834,8 +844,7 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
* exit here
*/
return;
- } else
- goto err;
+ }
}
if (flags & FCP_SNS_LEN_VAL) {
snsl = ntohl(rp_ex->fr_sns_len);
@@ -885,7 +894,7 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
return;
}
fsp->status_code = FC_DATA_OVRRUN;
- FC_FCP_DBG(fsp, "tgt %6x xfer len %zx greater than expected, "
+ FC_FCP_DBG(fsp, "tgt %6.6x xfer len %zx greater than expected, "
"len %x, data len %x\n",
fsp->rport->port_id,
fsp->xfer_len, expected_len, fsp->data_len);
@@ -1100,7 +1109,7 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
rpriv = rport->dd_data;
fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id,
- fc_host_port_id(rpriv->local_port->host), FC_TYPE_FCP,
+ rpriv->local_port->port_id, FC_TYPE_FCP,
FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
seq = lport->tt.exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy,
@@ -1341,7 +1350,7 @@ static void fc_fcp_timeout(unsigned long data)
else if (fsp->state & FC_SRB_RCV_STATUS)
fc_fcp_complete_locked(fsp);
else
- fc_timeout_error(fsp);
+ fc_fcp_recovery(fsp);
fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
unlock:
fc_fcp_unlock_pkt(fsp);
@@ -1373,7 +1382,7 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
fr_seq(fp) = fsp->seq_ptr;
fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id,
- fc_host_port_id(rpriv->local_port->host), FC_TYPE_ELS,
+ rpriv->local_port->port_id, FC_TYPE_ELS,
FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
if (lport->tt.elsct_send(lport, rport->port_id, fp, ELS_REC,
fc_fcp_rec_resp, fsp,
@@ -1385,7 +1394,7 @@ retry:
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
else
- fc_timeout_error(fsp);
+ fc_fcp_recovery(fsp);
}
/**
@@ -1454,7 +1463,7 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
fc_fcp_retry_cmd(fsp);
break;
}
- fc_timeout_error(fsp);
+ fc_fcp_recovery(fsp);
break;
}
} else if (opcode == ELS_LS_ACC) {
@@ -1553,7 +1562,7 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
break;
default:
- FC_FCP_DBG(fsp, "REC %p fid %x error unexpected error %d\n",
+ FC_FCP_DBG(fsp, "REC %p fid %6.6x error unexpected error %d\n",
fsp, fsp->rport->port_id, error);
fsp->status_code = FC_CMD_PLOGO;
/* fall through */
@@ -1563,13 +1572,13 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
* Assume REC or LS_ACC was lost.
* The exchange manager will have aborted REC, so retry.
*/
- FC_FCP_DBG(fsp, "REC fid %x error error %d retry %d/%d\n",
+ FC_FCP_DBG(fsp, "REC fid %6.6x error error %d retry %d/%d\n",
fsp->rport->port_id, error, fsp->recov_retry,
FC_MAX_RECOV_RETRY);
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
fc_fcp_rec(fsp);
else
- fc_timeout_error(fsp);
+ fc_fcp_recovery(fsp);
break;
}
fc_fcp_unlock_pkt(fsp);
@@ -1578,12 +1587,12 @@ out:
}
/**
- * fc_timeout_error() - Handler for fcp_pkt timeouts
- * @fsp: The FCP packt that has timed out
+ * fc_fcp_recovery() - Handler for fcp_pkt recovery
+ * @fsp: The FCP pkt that needs to be aborted
*/
-static void fc_timeout_error(struct fc_fcp_pkt *fsp)
+static void fc_fcp_recovery(struct fc_fcp_pkt *fsp)
{
- fsp->status_code = FC_CMD_TIME_OUT;
+ fsp->status_code = FC_CMD_RECOVERY;
fsp->cdb_status = 0;
fsp->io_status = 0;
/*
@@ -1631,7 +1640,7 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
srr->srr_rel_off = htonl(offset);
fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id,
- fc_host_port_id(rpriv->local_port->host), FC_TYPE_FCP,
+ rpriv->local_port->port_id, FC_TYPE_FCP,
FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL,
@@ -1689,7 +1698,7 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
break;
case ELS_LS_RJT:
default:
- fc_timeout_error(fsp);
+ fc_fcp_recovery(fsp);
break;
}
fc_fcp_unlock_pkt(fsp);
@@ -1715,7 +1724,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
fc_fcp_rec(fsp);
else
- fc_timeout_error(fsp);
+ fc_fcp_recovery(fsp);
break;
case -FC_EX_CLOSED: /* e.g., link failure */
/* fall through */
@@ -1810,7 +1819,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
/*
* setup the data direction
*/
- stats = fc_lport_get_stats(lport);
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
fsp->req_flags = FC_SRB_READ;
stats->InputRequests++;
@@ -1823,6 +1832,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
fsp->req_flags = 0;
stats->ControlRequests++;
}
+ put_cpu();
fsp->tgt_flags = rpriv->flags;
@@ -1907,6 +1917,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
}
break;
case FC_ERROR:
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to FC_ERROR\n");
sc_cmd->result = DID_ERROR << 16;
break;
case FC_DATA_UNDRUN:
@@ -1915,12 +1927,19 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
* scsi status is good but transport level
* underrun.
*/
- sc_cmd->result = (fsp->state & FC_SRB_RCV_STATUS ?
- DID_OK : DID_ERROR) << 16;
+ if (fsp->state & FC_SRB_RCV_STATUS) {
+ sc_cmd->result = DID_OK << 16;
+ } else {
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml"
+ " due to FC_DATA_UNDRUN (trans)\n");
+ sc_cmd->result = DID_ERROR << 16;
+ }
} else {
/*
* scsi got underrun, this is an error
*/
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to FC_DATA_UNDRUN (scsi)\n");
CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
}
@@ -1929,12 +1948,16 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
/*
* overrun is an error
*/
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to FC_DATA_OVRRUN\n");
sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
break;
case FC_CMD_ABORTED:
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to FC_CMD_ABORTED\n");
sc_cmd->result = (DID_ERROR << 16) | fsp->io_status;
break;
- case FC_CMD_TIME_OUT:
+ case FC_CMD_RECOVERY:
sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
break;
case FC_CMD_RESET:
@@ -1944,6 +1967,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
sc_cmd->result = (DID_NO_CONNECT << 16);
break;
default:
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to unknown error\n");
sc_cmd->result = (DID_ERROR << 16);
break;
}
@@ -2028,7 +2053,7 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
if (lport->state != LPORT_ST_READY)
return rc;
- FC_SCSI_DBG(lport, "Resetting rport (%6x)\n", rport->port_id);
+ FC_SCSI_DBG(lport, "Resetting rport (%6.6x)\n", rport->port_id);
fsp = fc_fcp_pkt_alloc(lport, GFP_NOIO);
if (fsp == NULL) {
@@ -2076,12 +2101,12 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
if (fc_fcp_lport_queue_ready(lport)) {
shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded "
- "on port (%6x)\n", fc_host_port_id(lport->host));
+ "on port (%6.6x)\n", lport->port_id);
return SUCCESS;
} else {
shost_printk(KERN_INFO, shost, "libfc: Host reset failed, "
- "port (%6x) is not ready.\n",
- fc_host_port_id(lport->host));
+ "port (%6.6x) is not ready.\n",
+ lport->port_id);
return FAILED;
}
}
@@ -2166,7 +2191,7 @@ void fc_fcp_destroy(struct fc_lport *lport)
if (!list_empty(&si->scsi_pkt_queue))
printk(KERN_ERR "libfc: Leaked SCSI packets when destroying "
- "port (%6x)\n", fc_host_port_id(lport->host));
+ "port (%6.6x)\n", lport->port_id);
mempool_destroy(si->scsi_pkt_pool);
kfree(si);
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
index 741fd5c72e1..f5c0ca4b6ef 100644
--- a/drivers/scsi/libfc/fc_libfc.h
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -45,9 +45,9 @@ extern unsigned int fc_debug_logging;
#define FC_LPORT_DBG(lport, fmt, args...) \
FC_CHECK_LOGGING(FC_LPORT_LOGGING, \
- printk(KERN_INFO "host%u: lport %6x: " fmt, \
+ printk(KERN_INFO "host%u: lport %6.6x: " fmt, \
(lport)->host->host_no, \
- fc_host_port_id((lport)->host), ##args))
+ (lport)->port_id, ##args))
#define FC_DISC_DBG(disc, fmt, args...) \
FC_CHECK_LOGGING(FC_DISC_LOGGING, \
@@ -57,7 +57,7 @@ extern unsigned int fc_debug_logging;
#define FC_RPORT_ID_DBG(lport, port_id, fmt, args...) \
FC_CHECK_LOGGING(FC_RPORT_LOGGING, \
- printk(KERN_INFO "host%u: rport %6x: " fmt, \
+ printk(KERN_INFO "host%u: rport %6.6x: " fmt, \
(lport)->host->host_no, \
(port_id), ##args))
@@ -66,7 +66,7 @@ extern unsigned int fc_debug_logging;
#define FC_FCP_DBG(pkt, fmt, args...) \
FC_CHECK_LOGGING(FC_FCP_LOGGING, \
- printk(KERN_INFO "host%u: fcp: %6x: " fmt, \
+ printk(KERN_INFO "host%u: fcp: %6.6x: " fmt, \
(pkt)->lp->host->host_no, \
pkt->rport->port_id, ##args))
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index d126ecfff70..79c9e3ccd34 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -172,7 +172,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
struct fc_rport_priv *rdata,
enum fc_rport_event event)
{
- FC_LPORT_DBG(lport, "Received a %d event for port (%6x)\n", event,
+ FC_LPORT_DBG(lport, "Received a %d event for port (%6.6x)\n", event,
rdata->ids.port_id);
mutex_lock(&lport->lp_mutex);
@@ -183,7 +183,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
fc_lport_enter_ns(lport, LPORT_ST_RNN_ID);
} else {
FC_LPORT_DBG(lport, "Received an READY event "
- "on port (%6x) for the directory "
+ "on port (%6.6x) for the directory "
"server, but the lport is not "
"in the DNS state, it's in the "
"%d state", rdata->ids.port_id,
@@ -228,9 +228,12 @@ static void fc_lport_ptp_setup(struct fc_lport *lport,
u64 remote_wwnn)
{
mutex_lock(&lport->disc.disc_mutex);
- if (lport->ptp_rdata)
+ if (lport->ptp_rdata) {
lport->tt.rport_logoff(lport->ptp_rdata);
+ kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
+ }
lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid);
+ kref_get(&lport->ptp_rdata->kref);
lport->ptp_rdata->ids.port_name = remote_wwpn;
lport->ptp_rdata->ids.node_name = remote_wwnn;
mutex_unlock(&lport->disc.disc_mutex);
@@ -241,17 +244,6 @@ static void fc_lport_ptp_setup(struct fc_lport *lport,
}
/**
- * fc_get_host_port_type() - Return the port type of the given Scsi_Host
- * @shost: The SCSI host whose port type is to be determined
- */
-void fc_get_host_port_type(struct Scsi_Host *shost)
-{
- /* TODO - currently just NPORT */
- fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
-}
-EXPORT_SYMBOL(fc_get_host_port_type);
-
-/**
* fc_get_host_port_state() - Return the port state of the given Scsi_Host
* @shost: The SCSI host whose port state is to be determined
*/
@@ -572,8 +564,8 @@ void __fc_linkup(struct fc_lport *lport)
*/
void fc_linkup(struct fc_lport *lport)
{
- printk(KERN_INFO "host%d: libfc: Link up on port (%6x)\n",
- lport->host->host_no, fc_host_port_id(lport->host));
+ printk(KERN_INFO "host%d: libfc: Link up on port (%6.6x)\n",
+ lport->host->host_no, lport->port_id);
mutex_lock(&lport->lp_mutex);
__fc_linkup(lport);
@@ -602,8 +594,8 @@ void __fc_linkdown(struct fc_lport *lport)
*/
void fc_linkdown(struct fc_lport *lport)
{
- printk(KERN_INFO "host%d: libfc: Link down on port (%6x)\n",
- lport->host->host_no, fc_host_port_id(lport->host));
+ printk(KERN_INFO "host%d: libfc: Link down on port (%6.6x)\n",
+ lport->host->host_no, lport->port_id);
mutex_lock(&lport->lp_mutex);
__fc_linkdown(lport);
@@ -704,8 +696,8 @@ void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
break;
case DISC_EV_FAILED:
printk(KERN_ERR "host%d: libfc: "
- "Discovery failed for port (%6x)\n",
- lport->host->host_no, fc_host_port_id(lport->host));
+ "Discovery failed for port (%6.6x)\n",
+ lport->host->host_no, lport->port_id);
mutex_lock(&lport->lp_mutex);
fc_lport_enter_reset(lport);
mutex_unlock(&lport->lp_mutex);
@@ -750,10 +742,14 @@ static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id,
struct fc_frame *fp)
{
if (port_id)
- printk(KERN_INFO "host%d: Assigned Port ID %6x\n",
+ printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n",
lport->host->host_no, port_id);
+ lport->port_id = port_id;
+
+ /* Update the fc_host */
fc_host_port_id(lport->host) = port_id;
+
if (lport->tt.lport_set_port_id)
lport->tt.lport_set_port_id(lport, port_id, fp);
}
@@ -797,11 +793,11 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
if (remote_wwpn == lport->wwpn) {
printk(KERN_WARNING "host%d: libfc: Received FLOGI from port "
- "with same WWPN %llx\n",
+ "with same WWPN %16.16llx\n",
lport->host->host_no, remote_wwpn);
goto out;
}
- FC_LPORT_DBG(lport, "FLOGI from port WWPN %llx\n", remote_wwpn);
+ FC_LPORT_DBG(lport, "FLOGI from port WWPN %16.16llx\n", remote_wwpn);
/*
* XXX what is the right thing to do for FIDs?
@@ -832,7 +828,7 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
*/
f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
ep = fc_seq_exch(sp);
- fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
+ fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, remote_fid, local_fid,
FC_TYPE_ELS, f_ctl, 0);
lport->tt.seq_send(lport, sp, fp);
@@ -947,14 +943,18 @@ static void fc_lport_reset_locked(struct fc_lport *lport)
if (lport->dns_rdata)
lport->tt.rport_logoff(lport->dns_rdata);
- lport->ptp_rdata = NULL;
+ if (lport->ptp_rdata) {
+ lport->tt.rport_logoff(lport->ptp_rdata);
+ kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
+ lport->ptp_rdata = NULL;
+ }
lport->tt.disc_stop(lport);
lport->tt.exch_mgr_reset(lport, 0, 0);
fc_host_fabric_name(lport->host) = 0;
- if (fc_host_port_id(lport->host))
+ if (lport->port_id)
fc_lport_set_port_id(lport, 0, NULL);
}
@@ -1492,7 +1492,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
lport->r_a_tov = 2 * e_d_tov;
fc_lport_set_port_id(lport, did, fp);
printk(KERN_INFO "host%d: libfc: "
- "Port (%6x) entered "
+ "Port (%6.6x) entered "
"point-to-point mode\n",
lport->host->host_no, did);
fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id),
@@ -1699,7 +1699,7 @@ static int fc_lport_els_request(struct fc_bsg_job *job,
fh = fc_frame_header_get(fp);
fh->fh_r_ctl = FC_RCTL_ELS_REQ;
hton24(fh->fh_d_id, did);
- hton24(fh->fh_s_id, fc_host_port_id(lport->host));
+ hton24(fh->fh_s_id, lport->port_id);
fh->fh_type = FC_TYPE_ELS;
hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ |
FC_FC_END_SEQ | FC_FC_SEQ_INIT);
@@ -1759,7 +1759,7 @@ static int fc_lport_ct_request(struct fc_bsg_job *job,
fh = fc_frame_header_get(fp);
fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL;
hton24(fh->fh_d_id, did);
- hton24(fh->fh_s_id, fc_host_port_id(lport->host));
+ hton24(fh->fh_s_id, lport->port_id);
fh->fh_type = FC_TYPE_CT;
hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ |
FC_FC_END_SEQ | FC_FC_SEQ_INIT);
diff --git a/drivers/scsi/libfc/fc_npiv.c b/drivers/scsi/libfc/fc_npiv.c
index c68f6c7341c..dd2b43bb1c7 100644
--- a/drivers/scsi/libfc/fc_npiv.c
+++ b/drivers/scsi/libfc/fc_npiv.c
@@ -69,12 +69,15 @@ struct fc_lport *fc_vport_id_lookup(struct fc_lport *n_port, u32 port_id)
struct fc_lport *lport = NULL;
struct fc_lport *vn_port;
- if (fc_host_port_id(n_port->host) == port_id)
+ if (n_port->port_id == port_id)
return n_port;
+ if (port_id == FC_FID_FLOGI)
+ return n_port; /* for point-to-point */
+
mutex_lock(&n_port->lp_mutex);
list_for_each_entry(vn_port, &n_port->vports, list) {
- if (fc_host_port_id(vn_port->host) == port_id) {
+ if (vn_port->port_id == port_id) {
lport = vn_port;
break;
}
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index b37d0ff28b3..39e440f0f54 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -1442,136 +1442,115 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
struct fc_els_spp *spp; /* response spp */
unsigned int len;
unsigned int plen;
- enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
- enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
enum fc_els_spp_resp resp;
struct fc_seq_els_data rjt_data;
u32 f_ctl;
u32 fcp_parm;
u32 roles = FC_RPORT_ROLE_UNKNOWN;
- rjt_data.fp = NULL;
+ rjt_data.fp = NULL;
fh = fc_frame_header_get(rx_fp);
FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
fc_rport_state(rdata));
- switch (rdata->rp_state) {
- case RPORT_ST_PRLI:
- case RPORT_ST_RTV:
- case RPORT_ST_READY:
- case RPORT_ST_ADISC:
- reason = ELS_RJT_NONE;
- break;
- default:
- fc_frame_free(rx_fp);
- return;
- break;
- }
len = fr_len(rx_fp) - sizeof(*fh);
pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
- if (pp == NULL) {
- reason = ELS_RJT_PROT;
- explan = ELS_EXPL_INV_LEN;
- } else {
- plen = ntohs(pp->prli.prli_len);
- if ((plen % 4) != 0 || plen > len) {
- reason = ELS_RJT_PROT;
- explan = ELS_EXPL_INV_LEN;
- } else if (plen < len) {
- len = plen;
- }
- plen = pp->prli.prli_spp_len;
- if ((plen % 4) != 0 || plen < sizeof(*spp) ||
- plen > len || len < sizeof(*pp)) {
- reason = ELS_RJT_PROT;
- explan = ELS_EXPL_INV_LEN;
- }
- rspp = &pp->spp;
+ if (!pp)
+ goto reject_len;
+ plen = ntohs(pp->prli.prli_len);
+ if ((plen % 4) != 0 || plen > len || plen < 16)
+ goto reject_len;
+ if (plen < len)
+ len = plen;
+ plen = pp->prli.prli_spp_len;
+ if ((plen % 4) != 0 || plen < sizeof(*spp) ||
+ plen > len || len < sizeof(*pp) || plen < 12)
+ goto reject_len;
+ rspp = &pp->spp;
+
+ fp = fc_frame_alloc(lport, len);
+ if (!fp) {
+ rjt_data.reason = ELS_RJT_UNAB;
+ rjt_data.explan = ELS_EXPL_INSUF_RES;
+ goto reject;
}
- if (reason != ELS_RJT_NONE ||
- (fp = fc_frame_alloc(lport, len)) == NULL) {
- rjt_data.reason = reason;
- rjt_data.explan = explan;
- lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
- } else {
- sp = lport->tt.seq_start_next(sp);
- WARN_ON(!sp);
- pp = fc_frame_payload_get(fp, len);
- WARN_ON(!pp);
- memset(pp, 0, len);
- pp->prli.prli_cmd = ELS_LS_ACC;
- pp->prli.prli_spp_len = plen;
- pp->prli.prli_len = htons(len);
- len -= sizeof(struct fc_els_prli);
-
- /* reinitialize remote port roles */
- rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
-
- /*
- * Go through all the service parameter pages and build
- * response. If plen indicates longer SPP than standard,
- * use that. The entire response has been pre-cleared above.
- */
- spp = &pp->spp;
- while (len >= plen) {
- spp->spp_type = rspp->spp_type;
- spp->spp_type_ext = rspp->spp_type_ext;
- spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
- resp = FC_SPP_RESP_ACK;
- if (rspp->spp_flags & FC_SPP_RPA_VAL)
- resp = FC_SPP_RESP_NO_PA;
- switch (rspp->spp_type) {
- case 0: /* common to all FC-4 types */
- break;
- case FC_TYPE_FCP:
- fcp_parm = ntohl(rspp->spp_params);
- if (fcp_parm & FCP_SPPF_RETRY)
- rdata->flags |= FC_RP_FLAGS_RETRY;
- rdata->supported_classes = FC_COS_CLASS3;
- if (fcp_parm & FCP_SPPF_INIT_FCN)
- roles |= FC_RPORT_ROLE_FCP_INITIATOR;
- if (fcp_parm & FCP_SPPF_TARG_FCN)
- roles |= FC_RPORT_ROLE_FCP_TARGET;
- rdata->ids.roles = roles;
-
- spp->spp_params =
- htonl(lport->service_params);
- break;
- default:
- resp = FC_SPP_RESP_INVL;
- break;
- }
- spp->spp_flags |= resp;
- len -= plen;
- rspp = (struct fc_els_spp *)((char *)rspp + plen);
- spp = (struct fc_els_spp *)((char *)spp + plen);
- }
+ sp = lport->tt.seq_start_next(sp);
+ WARN_ON(!sp);
+ pp = fc_frame_payload_get(fp, len);
+ WARN_ON(!pp);
+ memset(pp, 0, len);
+ pp->prli.prli_cmd = ELS_LS_ACC;
+ pp->prli.prli_spp_len = plen;
+ pp->prli.prli_len = htons(len);
+ len -= sizeof(struct fc_els_prli);
- /*
- * Send LS_ACC. If this fails, the originator should retry.
- */
- f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
- f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
- ep = fc_seq_exch(sp);
- fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
- FC_TYPE_ELS, f_ctl, 0);
- lport->tt.seq_send(lport, sp, fp);
+ /* reinitialize remote port roles */
+ rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
- /*
- * Get lock and re-check state.
- */
- switch (rdata->rp_state) {
- case RPORT_ST_PRLI:
- fc_rport_enter_ready(rdata);
+ /*
+ * Go through all the service parameter pages and build
+ * response. If plen indicates longer SPP than standard,
+ * use that. The entire response has been pre-cleared above.
+ */
+ spp = &pp->spp;
+ while (len >= plen) {
+ spp->spp_type = rspp->spp_type;
+ spp->spp_type_ext = rspp->spp_type_ext;
+ spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
+ resp = FC_SPP_RESP_ACK;
+
+ switch (rspp->spp_type) {
+ case 0: /* common to all FC-4 types */
break;
- case RPORT_ST_READY:
- case RPORT_ST_ADISC:
+ case FC_TYPE_FCP:
+ fcp_parm = ntohl(rspp->spp_params);
+ if (fcp_parm & FCP_SPPF_RETRY)
+ rdata->flags |= FC_RP_FLAGS_RETRY;
+ rdata->supported_classes = FC_COS_CLASS3;
+ if (fcp_parm & FCP_SPPF_INIT_FCN)
+ roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+ if (fcp_parm & FCP_SPPF_TARG_FCN)
+ roles |= FC_RPORT_ROLE_FCP_TARGET;
+ rdata->ids.roles = roles;
+
+ spp->spp_params = htonl(lport->service_params);
break;
default:
+ resp = FC_SPP_RESP_INVL;
break;
}
+ spp->spp_flags |= resp;
+ len -= plen;
+ rspp = (struct fc_els_spp *)((char *)rspp + plen);
+ spp = (struct fc_els_spp *)((char *)spp + plen);
+ }
+
+ /*
+ * Send LS_ACC. If this fails, the originator should retry.
+ */
+ f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
+ f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
+ ep = fc_seq_exch(sp);
+ fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
+ FC_TYPE_ELS, f_ctl, 0);
+ lport->tt.seq_send(lport, sp, fp);
+
+ switch (rdata->rp_state) {
+ case RPORT_ST_PRLI:
+ fc_rport_enter_ready(rdata);
+ break;
+ default:
+ break;
}
+ goto drop;
+
+reject_len:
+ rjt_data.reason = ELS_RJT_PROT;
+ rjt_data.explan = ELS_EXPL_INV_LEN;
+reject:
+ lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+drop:
fc_frame_free(rx_fp);
}
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 5c92620292f..8eeb39ffa37 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -421,7 +421,7 @@ iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
struct iscsi_conn *conn = tcp_conn->iscsi_conn;
struct hash_desc *rx_hash = NULL;
- if (conn->datadgst_en &
+ if (conn->datadgst_en &&
!(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
rx_hash = tcp_conn->rx_hash;
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 88f74467257..8c496b56556 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -395,12 +395,13 @@ int sas_ata_init_host_and_port(struct domain_device *found_dev,
void sas_ata_task_abort(struct sas_task *task)
{
struct ata_queued_cmd *qc = task->uldd_task;
- struct request_queue *q = qc->scsicmd->device->request_queue;
struct completion *waiting;
- unsigned long flags;
/* Bounce SCSI-initiated commands to the SCSI EH */
if (qc->scsicmd) {
+ struct request_queue *q = qc->scsicmd->device->request_queue;
+ unsigned long flags;
+
spin_lock_irqsave(q->queue_lock, flags);
blk_abort_request(qc->scsicmd->request);
spin_unlock_irqrestore(q->queue_lock, flags);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 822835055ce..a7890c6d878 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -818,7 +818,7 @@ void sas_slave_destroy(struct scsi_device *scsi_dev)
struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
if (dev_is_sata(dev))
- ata_port_disable(dev->sata_dev.ap);
+ dev->sata_dev.ap->link.device[0].class = ATA_DEV_NONE;
}
int sas_change_queue_depth(struct scsi_device *scsi_dev, int new_depth,
@@ -1030,8 +1030,6 @@ int __sas_task_abort(struct sas_task *task)
void sas_task_abort(struct sas_task *task)
{
struct scsi_cmnd *sc = task->uldd_task;
- struct request_queue *q = sc->device->request_queue;
- unsigned long flags;
/* Escape for libsas internal commands */
if (!sc) {
@@ -1043,13 +1041,15 @@ void sas_task_abort(struct sas_task *task)
if (dev_is_sata(task->dev)) {
sas_ata_task_abort(task);
- return;
- }
+ } else {
+ struct request_queue *q = sc->device->request_queue;
+ unsigned long flags;
- spin_lock_irqsave(q->queue_lock, flags);
- blk_abort_request(sc->request);
- spin_unlock_irqrestore(q->queue_lock, flags);
- scsi_schedule_eh(sc->device->host);
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_abort_request(sc->request);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ scsi_schedule_eh(sc->device->host);
+ }
}
int sas_slave_alloc(struct scsi_device *scsi_dev)
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 565e16dd74f..e35a4c71eb9 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -310,7 +310,9 @@ struct lpfc_vport {
#define FC_NLP_MORE 0x40 /* More node to process in node tbl */
#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */
#define FC_FABRIC 0x100 /* We are fabric attached */
+#define FC_VPORT_LOGO_RCVD 0x200 /* LOGO received on vport */
#define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */
+#define FC_LOGO_RCVD_DID_CHNG 0x800 /* FDISC on phys port detect DID chng*/
#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
@@ -554,6 +556,7 @@ struct lpfc_hba {
struct lpfc_dmabuf slim2p;
MAILBOX_t *mbox;
+ uint32_t *mbox_ext;
uint32_t *inb_ha_copy;
uint32_t *inb_counter;
uint32_t inb_last_counter;
@@ -622,6 +625,7 @@ struct lpfc_hba {
uint32_t cfg_enable_hba_reset;
uint32_t cfg_enable_hba_heartbeat;
uint32_t cfg_enable_bg;
+ uint32_t cfg_hostmem_hgp;
uint32_t cfg_log_verbose;
uint32_t cfg_aer_support;
uint32_t cfg_suppress_link_up;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 1849e33e68f..bf33b315f93 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -869,6 +869,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *pmb;
int rc = 0;
+ uint32_t max_vpi;
/*
* prevent udev from issuing mailbox commands until the port is
@@ -916,11 +917,17 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
if (axri)
*axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
phba->sli4_hba.max_cfg_param.xri_used;
+
+ /* Account for differences with SLI-3. Get vpi count from
+ * mailbox data and subtract one for max vpi value.
+ */
+ max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ?
+ (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0;
+
if (mvpi)
- *mvpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
+ *mvpi = max_vpi;
if (avpi)
- *avpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) -
- phba->sli4_hba.max_cfg_param.vpi_used;
+ *avpi = max_vpi - phba->sli4_hba.max_cfg_param.vpi_used;
} else {
if (mrpi)
*mrpi = pmb->un.varRdConfig.max_rpi;
@@ -1925,13 +1932,12 @@ MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:"
" 2 - select SLI-2 even on SLI-3 capable HBAs,"
" 3 - select SLI-3");
-int lpfc_enable_npiv = 0;
+int lpfc_enable_npiv = 1;
module_param(lpfc_enable_npiv, int, 0);
MODULE_PARM_DESC(lpfc_enable_npiv, "Enable NPIV functionality");
lpfc_param_show(enable_npiv);
-lpfc_param_init(enable_npiv, 0, 0, 1);
-static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO,
- lpfc_enable_npiv_show, NULL);
+lpfc_param_init(enable_npiv, 1, 0, 1);
+static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
/*
# lpfc_suppress_link_up: Bring link up at initialization
@@ -2637,6 +2643,7 @@ static DEVICE_ATTR(lpfc_stat_data_ctrl, S_IRUGO | S_IWUSR,
/**
* sysfs_drvr_stat_data_read - Read function for lpfc_drvr_stat_data attribute
+ * @filp: sysfs file
* @kobj: Pointer to the kernel object
* @bin_attr: Attribute object
* @buff: Buffer pointer
@@ -2648,7 +2655,8 @@ static DEVICE_ATTR(lpfc_stat_data_ctrl, S_IRUGO | S_IWUSR,
* applications.
**/
static ssize_t
-sysfs_drvr_stat_data_read(struct kobject *kobj, struct bin_attribute *bin_attr,
+sysfs_drvr_stat_data_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = container_of(kobj, struct device,
@@ -3356,6 +3364,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
/**
* sysfs_ctlreg_write - Write method for writing to ctlreg
+ * @filp: open sysfs file
* @kobj: kernel kobject that contains the kernel class device.
* @bin_attr: kernel attributes passed to us.
* @buf: contains the data to be written to the adapter IOREG space.
@@ -3373,7 +3382,8 @@ struct device_attribute *lpfc_vport_attrs[] = {
* value of count, buf contents written
**/
static ssize_t
-sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
+sysfs_ctlreg_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
size_t buf_off;
@@ -3409,6 +3419,7 @@ sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
/**
* sysfs_ctlreg_read - Read method for reading from ctlreg
+ * @filp: open sysfs file
* @kobj: kernel kobject that contains the kernel class device.
* @bin_attr: kernel attributes passed to us.
* @buf: if successful contains the data from the adapter IOREG space.
@@ -3425,7 +3436,8 @@ sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
* value of count, buf contents read
**/
static ssize_t
-sysfs_ctlreg_read(struct kobject *kobj, struct bin_attribute *bin_attr,
+sysfs_ctlreg_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
size_t buf_off;
@@ -3490,6 +3502,7 @@ sysfs_mbox_idle(struct lpfc_hba *phba)
/**
* sysfs_mbox_write - Write method for writing information via mbox
+ * @filp: open sysfs file
* @kobj: kernel kobject that contains the kernel class device.
* @bin_attr: kernel attributes passed to us.
* @buf: contains the data to be written to sysfs mbox.
@@ -3510,7 +3523,8 @@ sysfs_mbox_idle(struct lpfc_hba *phba)
* count number of bytes transferred
**/
static ssize_t
-sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
+sysfs_mbox_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -3565,6 +3579,7 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
/**
* sysfs_mbox_read - Read method for reading information via mbox
+ * @filp: open sysfs file
* @kobj: kernel kobject that contains the kernel class device.
* @bin_attr: kernel attributes passed to us.
* @buf: contains the data to be read from sysfs mbox.
@@ -3587,7 +3602,8 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
* count number of bytes transferred
**/
static ssize_t
-sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
+sysfs_mbox_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index d62b3e46792..dcf088262b2 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -79,6 +79,12 @@ struct lpfc_bsg_iocb {
struct lpfc_bsg_mbox {
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *mb;
+ struct lpfc_dmabuf *rxbmp; /* for BIU diags */
+ struct lpfc_dmabufext *dmp; /* for BIU diags */
+ uint8_t *ext; /* extended mailbox data */
+ uint32_t mbOffset; /* from app */
+ uint32_t inExtWLen; /* from app */
+ uint32_t outExtWLen; /* from app */
/* job waiting for this mbox command to finish */
struct fc_bsg_job *set_job;
@@ -1708,21 +1714,26 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (dmabuf) {
dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
- INIT_LIST_HEAD(&dmabuf->list);
- bpl = (struct ulp_bde64 *) dmabuf->virt;
- memset(bpl, 0, sizeof(*bpl));
- ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
- bpl->addrHigh =
- le32_to_cpu(putPaddrHigh(dmabuf->phys + sizeof(*bpl)));
- bpl->addrLow =
- le32_to_cpu(putPaddrLow(dmabuf->phys + sizeof(*bpl)));
- bpl->tus.f.bdeFlags = 0;
- bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
- bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ if (dmabuf->virt) {
+ INIT_LIST_HEAD(&dmabuf->list);
+ bpl = (struct ulp_bde64 *) dmabuf->virt;
+ memset(bpl, 0, sizeof(*bpl));
+ ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
+ bpl->addrHigh =
+ le32_to_cpu(putPaddrHigh(dmabuf->phys +
+ sizeof(*bpl)));
+ bpl->addrLow =
+ le32_to_cpu(putPaddrLow(dmabuf->phys +
+ sizeof(*bpl)));
+ bpl->tus.f.bdeFlags = 0;
+ bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ }
}
if (cmdiocbq == NULL || rspiocbq == NULL ||
- dmabuf == NULL || bpl == NULL || ctreq == NULL) {
+ dmabuf == NULL || bpl == NULL || ctreq == NULL ||
+ dmabuf->virt == NULL) {
ret_val = ENOMEM;
goto err_get_xri_exit;
}
@@ -1918,9 +1929,11 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (rxbmp != NULL) {
rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
- INIT_LIST_HEAD(&rxbmp->list);
- rxbpl = (struct ulp_bde64 *) rxbmp->virt;
- rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
+ if (rxbmp->virt) {
+ INIT_LIST_HEAD(&rxbmp->list);
+ rxbpl = (struct ulp_bde64 *) rxbmp->virt;
+ rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
+ }
}
if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
@@ -2174,14 +2187,16 @@ lpfc_bsg_diag_test(struct fc_bsg_job *job)
if (txbmp) {
txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
- INIT_LIST_HEAD(&txbmp->list);
- txbpl = (struct ulp_bde64 *) txbmp->virt;
- if (txbpl)
+ if (txbmp->virt) {
+ INIT_LIST_HEAD(&txbmp->list);
+ txbpl = (struct ulp_bde64 *) txbmp->virt;
txbuffer = diag_cmd_data_alloc(phba,
txbpl, full_size, 0);
+ }
}
- if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer) {
+ if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer ||
+ !txbmp->virt) {
rc = -ENOMEM;
goto err_loopback_test_exit;
}
@@ -2377,35 +2392,90 @@ void
lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct bsg_job_data *dd_data;
- MAILBOX_t *pmb;
- MAILBOX_t *mb;
struct fc_bsg_job *job;
uint32_t size;
unsigned long flags;
+ uint8_t *to;
+ uint8_t *from;
spin_lock_irqsave(&phba->ct_ev_lock, flags);
dd_data = pmboxq->context1;
+ /* job already timed out? */
if (!dd_data) {
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
return;
}
- pmb = &dd_data->context_un.mbox.pmboxq->u.mb;
- mb = dd_data->context_un.mbox.mb;
+ /* build the outgoing buffer to do an sg copy
+ * the format is the response mailbox followed by any extended
+ * mailbox data
+ */
+ from = (uint8_t *)&pmboxq->u.mb;
+ to = (uint8_t *)dd_data->context_un.mbox.mb;
+ memcpy(to, from, sizeof(MAILBOX_t));
+ if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) {
+ /* copy the extended data if any, count is in words */
+ if (dd_data->context_un.mbox.outExtWLen) {
+ from = (uint8_t *)dd_data->context_un.mbox.ext;
+ to += sizeof(MAILBOX_t);
+ size = dd_data->context_un.mbox.outExtWLen *
+ sizeof(uint32_t);
+ memcpy(to, from, size);
+ } else if (pmboxq->u.mb.mbxCommand == MBX_RUN_BIU_DIAG64) {
+ from = (uint8_t *)dd_data->context_un.mbox.
+ dmp->dma.virt;
+ to += sizeof(MAILBOX_t);
+ size = dd_data->context_un.mbox.dmp->size;
+ memcpy(to, from, size);
+ } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ (pmboxq->u.mb.mbxCommand == MBX_DUMP_MEMORY)) {
+ from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
+ virt;
+ to += sizeof(MAILBOX_t);
+ size = pmboxq->u.mb.un.varWords[5];
+ memcpy(to, from, size);
+ } else if (pmboxq->u.mb.mbxCommand == MBX_READ_EVENT_LOG) {
+ from = (uint8_t *)dd_data->context_un.
+ mbox.dmp->dma.virt;
+ to += sizeof(MAILBOX_t);
+ size = dd_data->context_un.mbox.dmp->size;
+ memcpy(to, from, size);
+ }
+ }
+
+ from = (uint8_t *)dd_data->context_un.mbox.mb;
job = dd_data->context_un.mbox.set_job;
- memcpy(mb, pmb, sizeof(*pmb));
- size = job->request_payload.payload_len;
+ size = job->reply_payload.payload_len;
job->reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
- mb, size);
+ from, size);
job->reply->result = 0;
+
dd_data->context_un.mbox.set_job = NULL;
job->dd_data = NULL;
job->job_done(job);
+ /* need to hold the lock until we call job done to hold off
+ * the timeout handler returning to the midlayer while
+ * we are stillprocessing the job
+ */
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ kfree(dd_data->context_un.mbox.mb);
mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
- kfree(mb);
+ kfree(dd_data->context_un.mbox.ext);
+ if (dd_data->context_un.mbox.dmp) {
+ dma_free_coherent(&phba->pcidev->dev,
+ dd_data->context_un.mbox.dmp->size,
+ dd_data->context_un.mbox.dmp->dma.virt,
+ dd_data->context_un.mbox.dmp->dma.phys);
+ kfree(dd_data->context_un.mbox.dmp);
+ }
+ if (dd_data->context_un.mbox.rxbmp) {
+ lpfc_mbuf_free(phba, dd_data->context_un.mbox.rxbmp->virt,
+ dd_data->context_un.mbox.rxbmp->phys);
+ kfree(dd_data->context_un.mbox.rxbmp);
+ }
kfree(dd_data);
return;
}
@@ -2464,10 +2534,12 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
case MBX_SET_DEBUG:
case MBX_WRITE_WWN:
case MBX_SLI4_CONFIG:
+ case MBX_READ_EVENT_LOG:
case MBX_READ_EVENT_LOG_STATUS:
case MBX_WRITE_EVENT_LOG:
case MBX_PORT_CAPABILITIES:
case MBX_PORT_IOV_CONTROL:
+ case MBX_RUN_BIU_DIAG64:
break;
case MBX_SET_VARIABLE:
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -2482,8 +2554,6 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
phba->fc_topology = TOPOLOGY_PT_PT;
}
break;
- case MBX_RUN_BIU_DIAG64:
- case MBX_READ_EVENT_LOG:
case MBX_READ_SPARM64:
case MBX_READ_LA:
case MBX_READ_LA64:
@@ -2518,97 +2588,365 @@ static uint32_t
lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
struct lpfc_vport *vport)
{
- LPFC_MBOXQ_t *pmboxq;
- MAILBOX_t *pmb;
- MAILBOX_t *mb;
- struct bsg_job_data *dd_data;
+ LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
+ MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
+ /* a 4k buffer to hold the mb and extended data from/to the bsg */
+ MAILBOX_t *mb = NULL;
+ struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
uint32_t size;
+ struct lpfc_dmabuf *rxbmp = NULL; /* for biu diag */
+ struct lpfc_dmabufext *dmp = NULL; /* for biu diag */
+ struct ulp_bde64 *rxbpl = NULL;
+ struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ uint8_t *ext = NULL;
int rc = 0;
+ uint8_t *from;
+
+ /* in case no data is transferred */
+ job->reply->reply_payload_rcv_len = 0;
+
+ /* check if requested extended data lengths are valid */
+ if ((mbox_req->inExtWLen > MAILBOX_EXT_SIZE) ||
+ (mbox_req->outExtWLen > MAILBOX_EXT_SIZE)) {
+ rc = -ERANGE;
+ goto job_done;
+ }
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
if (!dd_data) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2727 Failed allocation of dd_data\n");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto job_done;
}
- mb = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ mb = kzalloc(BSG_MBOX_SIZE, GFP_KERNEL);
if (!mb) {
- kfree(dd_data);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto job_done;
}
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq) {
- kfree(dd_data);
- kfree(mb);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto job_done;
}
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
size = job->request_payload.payload_len;
- job->reply->reply_payload_rcv_len =
- sg_copy_to_buffer(job->request_payload.sg_list,
- job->request_payload.sg_cnt,
- mb, size);
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ mb, size);
rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
- if (rc != 0) {
- kfree(dd_data);
- kfree(mb);
- mempool_free(pmboxq, phba->mbox_mem_pool);
- return rc; /* must be negative */
- }
+ if (rc != 0)
+ goto job_done; /* must be negative */
- memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
pmb = &pmboxq->u.mb;
memcpy(pmb, mb, sizeof(*pmb));
pmb->mbxOwner = OWN_HOST;
- pmboxq->context1 = NULL;
pmboxq->vport = vport;
- if ((vport->fc_flag & FC_OFFLINE_MODE) ||
- (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
- rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
- if (rc != MBX_SUCCESS) {
- if (rc != MBX_TIMEOUT) {
- kfree(dd_data);
- kfree(mb);
- mempool_free(pmboxq, phba->mbox_mem_pool);
+ /* If HBA encountered an error attention, allow only DUMP
+ * or RESTART mailbox commands until the HBA is restarted.
+ */
+ if (phba->pport->stopped &&
+ pmb->mbxCommand != MBX_DUMP_MEMORY &&
+ pmb->mbxCommand != MBX_RESTART &&
+ pmb->mbxCommand != MBX_WRITE_VPARMS &&
+ pmb->mbxCommand != MBX_WRITE_WWN)
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+ "2797 mbox: Issued mailbox cmd "
+ "0x%x while in stopped state.\n",
+ pmb->mbxCommand);
+
+ /* Don't allow mailbox commands to be sent when blocked
+ * or when in the middle of discovery
+ */
+ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
+ rc = -EAGAIN;
+ goto job_done;
+ }
+
+ /* extended mailbox commands will need an extended buffer */
+ if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
+ ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL);
+ if (!ext) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ /* any data for the device? */
+ if (mbox_req->inExtWLen) {
+ from = (uint8_t *)mb;
+ from += sizeof(MAILBOX_t);
+ memcpy((uint8_t *)ext, from,
+ mbox_req->inExtWLen * sizeof(uint32_t));
+ }
+
+ pmboxq->context2 = ext;
+ pmboxq->in_ext_byte_len =
+ mbox_req->inExtWLen *
+ sizeof(uint32_t);
+ pmboxq->out_ext_byte_len =
+ mbox_req->outExtWLen *
+ sizeof(uint32_t);
+ pmboxq->mbox_offset_word =
+ mbox_req->mbOffset;
+ pmboxq->context2 = ext;
+ pmboxq->in_ext_byte_len =
+ mbox_req->inExtWLen * sizeof(uint32_t);
+ pmboxq->out_ext_byte_len =
+ mbox_req->outExtWLen * sizeof(uint32_t);
+ pmboxq->mbox_offset_word = mbox_req->mbOffset;
+ }
+
+ /* biu diag will need a kernel buffer to transfer the data
+ * allocate our own buffer and setup the mailbox command to
+ * use ours
+ */
+ if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
+ uint32_t transmit_length = pmb->un.varWords[1];
+ uint32_t receive_length = pmb->un.varWords[4];
+ /* transmit length cannot be greater than receive length or
+ * mailbox extension size
+ */
+ if ((transmit_length > receive_length) ||
+ (transmit_length > MAILBOX_EXT_SIZE)) {
+ rc = -ERANGE;
+ goto job_done;
+ }
+
+ rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!rxbmp) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
+ if (!rxbmp->virt) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ INIT_LIST_HEAD(&rxbmp->list);
+ rxbpl = (struct ulp_bde64 *) rxbmp->virt;
+ dmp = diag_cmd_data_alloc(phba, rxbpl, transmit_length, 0);
+ if (!dmp) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ INIT_LIST_HEAD(&dmp->dma.list);
+ pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
+ putPaddrHigh(dmp->dma.phys);
+ pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
+ putPaddrLow(dmp->dma.phys);
+
+ pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
+ putPaddrHigh(dmp->dma.phys +
+ pmb->un.varBIUdiag.un.s2.
+ xmit_bde64.tus.f.bdeSize);
+ pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
+ putPaddrLow(dmp->dma.phys +
+ pmb->un.varBIUdiag.un.s2.
+ xmit_bde64.tus.f.bdeSize);
+
+ /* copy the transmit data found in the mailbox extension area */
+ from = (uint8_t *)mb;
+ from += sizeof(MAILBOX_t);
+ memcpy((uint8_t *)dmp->dma.virt, from, transmit_length);
+ } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
+ struct READ_EVENT_LOG_VAR *rdEventLog =
+ &pmb->un.varRdEventLog ;
+ uint32_t receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
+ uint32_t mode = bf_get(lpfc_event_log, rdEventLog);
+
+ /* receive length cannot be greater than mailbox
+ * extension size
+ */
+ if (receive_length > MAILBOX_EXT_SIZE) {
+ rc = -ERANGE;
+ goto job_done;
+ }
+
+ /* mode zero uses a bde like biu diags command */
+ if (mode == 0) {
+
+ /* rebuild the command for sli4 using our own buffers
+ * like we do for biu diags
+ */
+
+ rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!rxbmp) {
+ rc = -ENOMEM;
+ goto job_done;
}
- return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
+
+ rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
+ rxbpl = (struct ulp_bde64 *) rxbmp->virt;
+ if (rxbpl) {
+ INIT_LIST_HEAD(&rxbmp->list);
+ dmp = diag_cmd_data_alloc(phba, rxbpl,
+ receive_length, 0);
+ }
+
+ if (!dmp) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ INIT_LIST_HEAD(&dmp->dma.list);
+ pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
+ pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
}
+ } else if (phba->sli_rev == LPFC_SLI_REV4) {
+ if (pmb->mbxCommand == MBX_DUMP_MEMORY) {
+ /* rebuild the command for sli4 using our own buffers
+ * like we do for biu diags
+ */
+ uint32_t receive_length = pmb->un.varWords[2];
+ /* receive length cannot be greater than mailbox
+ * extension size
+ */
+ if ((receive_length == 0) ||
+ (receive_length > MAILBOX_EXT_SIZE)) {
+ rc = -ERANGE;
+ goto job_done;
+ }
- memcpy(mb, pmb, sizeof(*pmb));
- job->reply->reply_payload_rcv_len =
- sg_copy_from_buffer(job->reply_payload.sg_list,
- job->reply_payload.sg_cnt,
- mb, size);
- kfree(dd_data);
- kfree(mb);
- mempool_free(pmboxq, phba->mbox_mem_pool);
- /* not waiting mbox already done */
- return 0;
+ rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!rxbmp) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
+ if (!rxbmp->virt) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ INIT_LIST_HEAD(&rxbmp->list);
+ rxbpl = (struct ulp_bde64 *) rxbmp->virt;
+ dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length,
+ 0);
+ if (!dmp) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ INIT_LIST_HEAD(&dmp->dma.list);
+ pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
+ pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
+ } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
+ pmb->un.varUpdateCfg.co) {
+ struct ulp_bde64 *bde =
+ (struct ulp_bde64 *)&pmb->un.varWords[4];
+
+ /* bde size cannot be greater than mailbox ext size */
+ if (bde->tus.f.bdeSize > MAILBOX_EXT_SIZE) {
+ rc = -ERANGE;
+ goto job_done;
+ }
+
+ rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!rxbmp) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
+ if (!rxbmp->virt) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ INIT_LIST_HEAD(&rxbmp->list);
+ rxbpl = (struct ulp_bde64 *) rxbmp->virt;
+ dmp = diag_cmd_data_alloc(phba, rxbpl,
+ bde->tus.f.bdeSize, 0);
+ if (!dmp) {
+ rc = -ENOMEM;
+ goto job_done;
+ }
+
+ INIT_LIST_HEAD(&dmp->dma.list);
+ bde->addrHigh = putPaddrHigh(dmp->dma.phys);
+ bde->addrLow = putPaddrLow(dmp->dma.phys);
+
+ /* copy the transmit data found in the mailbox
+ * extension area
+ */
+ from = (uint8_t *)mb;
+ from += sizeof(MAILBOX_t);
+ memcpy((uint8_t *)dmp->dma.virt, from,
+ bde->tus.f.bdeSize);
+ }
}
+ dd_data->context_un.mbox.rxbmp = rxbmp;
+ dd_data->context_un.mbox.dmp = dmp;
+
/* setup wake call as IOCB callback */
pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait;
+
/* setup context field to pass wait_queue pointer to wake function */
pmboxq->context1 = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->context_un.mbox.pmboxq = pmboxq;
dd_data->context_un.mbox.mb = mb;
dd_data->context_un.mbox.set_job = job;
+ dd_data->context_un.mbox.ext = ext;
+ dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
+ dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
+ dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
job->dd_data = dd_data;
+
+ if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+ (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+ if (rc != MBX_SUCCESS) {
+ rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
+ goto job_done;
+ }
+
+ /* job finished, copy the data */
+ memcpy(mb, pmb, sizeof(*pmb));
+ job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ mb, size);
+ /* not waiting mbox already done */
+ rc = 0;
+ goto job_done;
+ }
+
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
- if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
- kfree(dd_data);
- kfree(mb);
+ if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
+ return 1; /* job started */
+
+job_done:
+ /* common exit for error or job completed inline */
+ kfree(mb);
+ if (pmboxq)
mempool_free(pmboxq, phba->mbox_mem_pool);
- return -EIO;
+ kfree(ext);
+ if (dmp) {
+ dma_free_coherent(&phba->pcidev->dev,
+ dmp->size, dmp->dma.virt,
+ dmp->dma.phys);
+ kfree(dmp);
}
+ if (rxbmp) {
+ lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
+ kfree(rxbmp);
+ }
+ kfree(dd_data);
- return 1;
+ return rc;
}
/**
@@ -2633,7 +2971,12 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
goto job_error;
}
- if (job->request_payload.payload_len != PAGE_SIZE) {
+ if (job->request_payload.payload_len != BSG_MBOX_SIZE) {
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ if (job->reply_payload.payload_len != BSG_MBOX_SIZE) {
rc = -EINVAL;
goto job_error;
}
@@ -3094,6 +3437,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
job->dd_data = NULL;
job->reply->reply_payload_rcv_len = 0;
job->reply->result = -EAGAIN;
+ /* the mbox completion handler can now be run */
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
job->job_done(job);
break;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 5bc630819b9..a2c33e7c915 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -91,11 +91,12 @@ struct get_mgmt_rev_reply {
struct MgmtRevInfo info;
};
+#define BSG_MBOX_SIZE 4096 /* mailbox command plus extended data */
struct dfc_mbox_req {
uint32_t command;
+ uint32_t mbOffset;
uint32_t inExtWLen;
uint32_t outExtWLen;
- uint8_t mbOffset;
};
/* Used for menlo command or menlo data. The xri is only used for menlo data */
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 5087c4211b4..fbc9baeb604 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -65,6 +65,7 @@ void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
void lpfc_retry_pport_discovery(struct lpfc_hba *);
+void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t);
void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 2851d75ffc6..36257a68550 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -38,6 +38,7 @@ enum lpfc_work_type {
LPFC_EVT_ELS_RETRY,
LPFC_EVT_DEV_LOSS,
LPFC_EVT_FASTPATH_MGMT_EVT,
+ LPFC_EVT_RESET_HBA,
};
/* structure used to queue event to the discovery tasklet */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 5fbdb22c189..c4c7f0ad746 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -584,6 +584,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_unlock_irq(shost->host_lock);
lpfc_unreg_rpi(vport, np);
}
+ lpfc_cleanup_pending_mbox(vport);
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
lpfc_mbx_unreg_vpi(vport);
spin_lock_irq(shost->host_lock);
@@ -864,6 +865,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
+ vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
spin_unlock_irq(shost->host_lock);
/*
@@ -893,11 +895,14 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (!rc) {
/* Mark the FCF discovery process done */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | LOG_ELS,
- "2769 FLOGI successful on FCF record: "
- "current_fcf_index:x%x, terminate FCF "
- "round robin failover process\n",
- phba->fcf.current_rec.fcf_indx);
+ if (phba->hba_flag & HBA_FIP_SUPPORT)
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
+ LOG_ELS,
+ "2769 FLOGI successful on FCF "
+ "record: current_fcf_index:"
+ "x%x, terminate FCF round "
+ "robin failover process\n",
+ phba->fcf.current_rec.fcf_indx);
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
spin_unlock_irq(&phba->hbalock);
@@ -5366,7 +5371,7 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
sizeof(struct lpfc_name));
pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
cmdiocbp->context2)->virt);
- lsrjt_event.command = *pcmd;
+ lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0;
stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
@@ -6050,7 +6055,8 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
- if (vport->port_type == LPFC_PHYSICAL_PORT)
+ if (vport->port_type == LPFC_PHYSICAL_PORT
+ && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
lpfc_initial_flogi(vport);
else
lpfc_initial_fdisc(vport);
@@ -6286,6 +6292,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
+ vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
vport->fc_flag |= FC_FABRIC;
if (vport->phba->fc_topology == TOPOLOGY_LOOP)
vport->fc_flag |= FC_PUBLIC_LOOP;
@@ -6310,11 +6317,14 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
spin_unlock_irq(shost->host_lock);
lpfc_unreg_rpi(vport, np);
}
+ lpfc_cleanup_pending_mbox(vport);
lpfc_mbx_unreg_vpi(vport);
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
if (phba->sli_rev == LPFC_SLI_REV4)
vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+ else
+ vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG;
spin_unlock_irq(shost->host_lock);
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index e1466eec56b..1f87b4fb8b5 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -475,6 +475,10 @@ lpfc_work_list_done(struct lpfc_hba *phba)
lpfc_send_fastpath_evt(phba, evtp);
free_evt = 0;
break;
+ case LPFC_EVT_RESET_HBA:
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ lpfc_reset_hba(phba);
+ break;
}
if (free_evt)
kfree(evtp);
@@ -1531,7 +1535,37 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
}
/**
- * lpfc_sli4_fcf_rec_mbox_parse - parse non-embedded fcf record mailbox command
+ * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_cnt: number of eligible fcf record seen so far.
+ *
+ * This function makes an running random selection decision on FCF record to
+ * use through a sequence of @fcf_cnt eligible FCF records with equal
+ * probability. To perform integer manunipulation of random numbers with
+ * size unit32_t, the lower 16 bits of the 32-bit random number returned
+ * from random32() are taken as the random random number generated.
+ *
+ * Returns true when outcome is for the newly read FCF record should be
+ * chosen; otherwise, return false when outcome is for keeping the previously
+ * chosen FCF record.
+ **/
+static bool
+lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
+{
+ uint32_t rand_num;
+
+ /* Get 16-bit uniform random number */
+ rand_num = (0xFFFF & random32());
+
+ /* Decision with probability 1/fcf_cnt */
+ if ((fcf_cnt * rand_num) < 0xFFFF)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox.
* @phba: pointer to lpfc hba data structure.
* @mboxq: pointer to mailbox object.
* @next_fcf_index: pointer to holder of next fcf index.
@@ -1592,7 +1626,9 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
new_fcf_record = (struct fcf_record *)(virt_addr +
sizeof(struct lpfc_mbx_read_fcf_tbl));
lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
- sizeof(struct fcf_record));
+ offsetof(struct fcf_record, vlan_bitmap));
+ new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137);
+ new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138);
return new_fcf_record;
}
@@ -1679,6 +1715,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
uint16_t fcf_index, next_fcf_index;
struct lpfc_fcf_rec *fcf_rec = NULL;
uint16_t vlan_id;
+ uint32_t seed;
+ bool select_new_fcf;
int rc;
/* If there is pending FCoE event restart FCF table scan */
@@ -1809,9 +1847,21 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
* than the driver FCF record, use the new record.
*/
if (new_fcf_record->fip_priority < fcf_rec->priority) {
- /* Choose this FCF record */
+ /* Choose the new FCF record with lower priority */
__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
addr_mode, vlan_id, 0);
+ /* Reset running random FCF selection count */
+ phba->fcf.eligible_fcf_cnt = 1;
+ } else if (new_fcf_record->fip_priority == fcf_rec->priority) {
+ /* Update running random FCF selection count */
+ phba->fcf.eligible_fcf_cnt++;
+ select_new_fcf = lpfc_sli4_new_fcf_random_select(phba,
+ phba->fcf.eligible_fcf_cnt);
+ if (select_new_fcf)
+ /* Choose the new FCF by random selection */
+ __lpfc_update_fcf_record(phba, fcf_rec,
+ new_fcf_record,
+ addr_mode, vlan_id, 0);
}
spin_unlock_irq(&phba->hbalock);
goto read_next_fcf;
@@ -1825,6 +1875,11 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
addr_mode, vlan_id, (boot_flag ?
BOOT_ENABLE : 0));
phba->fcf.fcf_flag |= FCF_AVAILABLE;
+ /* Setup initial running random FCF selection count */
+ phba->fcf.eligible_fcf_cnt = 1;
+ /* Seeding the random number generator for random selection */
+ seed = (uint32_t)(0xFFFFFFFF & jiffies);
+ srandom32(seed);
}
spin_unlock_irq(&phba->hbalock);
goto read_next_fcf;
@@ -2686,11 +2741,18 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
switch (mb->mbxStatus) {
case 0x0011:
case 0x0020:
- case 0x9700:
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0911 cmpl_unreg_vpi, mb status = 0x%x\n",
mb->mbxStatus);
break;
+ /* If VPI is busy, reset the HBA */
+ case 0x9700:
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
+ vport->vpi, mb->mbxStatus);
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ lpfc_workq_post_event(phba, NULL, NULL,
+ LPFC_EVT_RESET_HBA);
}
spin_lock_irq(shost->host_lock);
vport->vpi_state &= ~LPFC_VPI_REGISTERED;
@@ -2965,7 +3027,12 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
- lpfc_start_fdiscs(phba);
+ /* when physical port receive logo donot start
+ * vport discovery */
+ if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
+ lpfc_start_fdiscs(phba);
+ else
+ vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
lpfc_do_scr_ns_plogi(phba, vport);
}
@@ -3177,7 +3244,6 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if (new_state == NLP_STE_UNMAPPED_NODE) {
- ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
ndlp->nlp_type |= NLP_FC_NODE;
}
@@ -4935,6 +5001,7 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
if (ndlp)
lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
+ lpfc_cleanup_pending_mbox(vports[i]);
lpfc_mbx_unreg_vpi(vports[i]);
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 89ff7c09e29..e654d01dad2 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1565,95 +1565,83 @@ enum lpfc_protgrp_type {
};
/* PDE Descriptors */
-#define LPFC_PDE1_DESCRIPTOR 0x81
-#define LPFC_PDE2_DESCRIPTOR 0x82
-#define LPFC_PDE3_DESCRIPTOR 0x83
-
-/* BlockGuard Profiles */
-enum lpfc_bg_prof_codes {
- LPFC_PROF_INVALID,
- LPFC_PROF_A1 = 128, /* Full Protection */
- LPFC_PROF_A2, /* Disabled Protection Checks:A2~A4 */
- LPFC_PROF_A3,
- LPFC_PROF_A4,
- LPFC_PROF_B1, /* Embedded DIFs: B1~B3 */
- LPFC_PROF_B2,
- LPFC_PROF_B3,
- LPFC_PROF_C1, /* Separate DIFs: C1~C3 */
- LPFC_PROF_C2,
- LPFC_PROF_C3,
- LPFC_PROF_D1, /* Full Protection */
- LPFC_PROF_D2, /* Partial Protection & Check Disabling */
- LPFC_PROF_D3,
- LPFC_PROF_E1, /* E1~E4:out - check-only, in - update apptag */
- LPFC_PROF_E2,
- LPFC_PROF_E3,
- LPFC_PROF_E4,
- LPFC_PROF_F1, /* Full Translation - F1 Prot Descriptor */
- /* F1 Translation BDE */
- LPFC_PROF_ANT1, /* TCP checksum, DIF inline with data buffers */
- LPFC_PROF_AST1, /* TCP checksum, DIF split from data buffer */
- LPFC_PROF_ANT2,
- LPFC_PROF_AST2
+#define LPFC_PDE5_DESCRIPTOR 0x85
+#define LPFC_PDE6_DESCRIPTOR 0x86
+#define LPFC_PDE7_DESCRIPTOR 0x87
+
+/* BlockGuard Opcodes */
+#define BG_OP_IN_NODIF_OUT_CRC 0x0
+#define BG_OP_IN_CRC_OUT_NODIF 0x1
+#define BG_OP_IN_NODIF_OUT_CSUM 0x2
+#define BG_OP_IN_CSUM_OUT_NODIF 0x3
+#define BG_OP_IN_CRC_OUT_CRC 0x4
+#define BG_OP_IN_CSUM_OUT_CSUM 0x5
+#define BG_OP_IN_CRC_OUT_CSUM 0x6
+#define BG_OP_IN_CSUM_OUT_CRC 0x7
+
+struct lpfc_pde5 {
+ uint32_t word0;
+#define pde5_type_SHIFT 24
+#define pde5_type_MASK 0x000000ff
+#define pde5_type_WORD word0
+#define pde5_rsvd0_SHIFT 0
+#define pde5_rsvd0_MASK 0x00ffffff
+#define pde5_rsvd0_WORD word0
+ uint32_t reftag; /* Reference Tag Value */
+ uint32_t reftagtr; /* Reference Tag Translation Value */
};
-/* BlockGuard error-control defines */
-#define BG_EC_STOP_ERR 0x00
-#define BG_EC_CONT_ERR 0x01
-#define BG_EC_IGN_UNINIT_STOP_ERR 0x10
-#define BG_EC_IGN_UNINIT_CONT_ERR 0x11
-
-/* PDE (Protection Descriptor Entry) word 0 bit masks and shifts */
-#define PDE_DESC_TYPE_MASK 0xff000000
-#define PDE_DESC_TYPE_SHIFT 24
-#define PDE_BG_PROFILE_MASK 0x00ff0000
-#define PDE_BG_PROFILE_SHIFT 16
-#define PDE_BLOCK_LEN_MASK 0x0000fffc
-#define PDE_BLOCK_LEN_SHIFT 2
-#define PDE_ERR_CTRL_MASK 0x00000003
-#define PDE_ERR_CTRL_SHIFT 0
-/* PDE word 1 bit masks and shifts */
-#define PDE_APPTAG_MASK_MASK 0xffff0000
-#define PDE_APPTAG_MASK_SHIFT 16
-#define PDE_APPTAG_VAL_MASK 0x0000ffff
-#define PDE_APPTAG_VAL_SHIFT 0
-struct lpfc_pde {
- uint32_t parms; /* bitfields of descriptor, prof, len, and ec */
- uint32_t apptag; /* bitfields of app tag maskand app tag value */
- uint32_t reftag; /* reference tag occupying all 32 bits */
+struct lpfc_pde6 {
+ uint32_t word0;
+#define pde6_type_SHIFT 24
+#define pde6_type_MASK 0x000000ff
+#define pde6_type_WORD word0
+#define pde6_rsvd0_SHIFT 0
+#define pde6_rsvd0_MASK 0x00ffffff
+#define pde6_rsvd0_WORD word0
+ uint32_t word1;
+#define pde6_rsvd1_SHIFT 26
+#define pde6_rsvd1_MASK 0x0000003f
+#define pde6_rsvd1_WORD word1
+#define pde6_na_SHIFT 25
+#define pde6_na_MASK 0x00000001
+#define pde6_na_WORD word1
+#define pde6_rsvd2_SHIFT 16
+#define pde6_rsvd2_MASK 0x000001FF
+#define pde6_rsvd2_WORD word1
+#define pde6_apptagtr_SHIFT 0
+#define pde6_apptagtr_MASK 0x0000ffff
+#define pde6_apptagtr_WORD word1
+ uint32_t word2;
+#define pde6_optx_SHIFT 28
+#define pde6_optx_MASK 0x0000000f
+#define pde6_optx_WORD word2
+#define pde6_oprx_SHIFT 24
+#define pde6_oprx_MASK 0x0000000f
+#define pde6_oprx_WORD word2
+#define pde6_nr_SHIFT 23
+#define pde6_nr_MASK 0x00000001
+#define pde6_nr_WORD word2
+#define pde6_ce_SHIFT 22
+#define pde6_ce_MASK 0x00000001
+#define pde6_ce_WORD word2
+#define pde6_re_SHIFT 21
+#define pde6_re_MASK 0x00000001
+#define pde6_re_WORD word2
+#define pde6_ae_SHIFT 20
+#define pde6_ae_MASK 0x00000001
+#define pde6_ae_WORD word2
+#define pde6_ai_SHIFT 19
+#define pde6_ai_MASK 0x00000001
+#define pde6_ai_WORD word2
+#define pde6_bs_SHIFT 16
+#define pde6_bs_MASK 0x00000007
+#define pde6_bs_WORD word2
+#define pde6_apptagval_SHIFT 0
+#define pde6_apptagval_MASK 0x0000ffff
+#define pde6_apptagval_WORD word2
};
-/* inline function to set fields in parms of PDE */
-static inline void
-lpfc_pde_set_bg_parms(struct lpfc_pde *p, u8 desc, u8 prof, u16 len, u8 ec)
-{
- uint32_t *wp = &p->parms;
-
- /* spec indicates that adapter appends two 0's to length field */
- len = len >> 2;
-
- *wp &= 0;
- *wp |= ((desc << PDE_DESC_TYPE_SHIFT) & PDE_DESC_TYPE_MASK);
- *wp |= ((prof << PDE_BG_PROFILE_SHIFT) & PDE_BG_PROFILE_MASK);
- *wp |= ((len << PDE_BLOCK_LEN_SHIFT) & PDE_BLOCK_LEN_MASK);
- *wp |= ((ec << PDE_ERR_CTRL_SHIFT) & PDE_ERR_CTRL_MASK);
- *wp = le32_to_cpu(*wp);
-}
-
-/* inline function to set apptag and reftag fields of PDE */
-static inline void
-lpfc_pde_set_dif_parms(struct lpfc_pde *p, u16 apptagmask, u16 apptagval,
- u32 reftag)
-{
- uint32_t *wp = &p->apptag;
- *wp &= 0;
- *wp |= ((apptagmask << PDE_APPTAG_MASK_SHIFT) & PDE_APPTAG_MASK_MASK);
- *wp |= ((apptagval << PDE_APPTAG_VAL_SHIFT) & PDE_APPTAG_VAL_MASK);
- *wp = le32_to_cpu(*wp);
- wp = &p->reftag;
- *wp = le32_to_cpu(reftag);
-}
-
/* Structure for MB Command LOAD_SM and DOWN_LOAD */
@@ -1744,6 +1732,17 @@ typedef struct {
} un;
} BIU_DIAG_VAR;
+/* Structure for MB command READ_EVENT_LOG (0x38) */
+struct READ_EVENT_LOG_VAR {
+ uint32_t word1;
+#define lpfc_event_log_SHIFT 29
+#define lpfc_event_log_MASK 0x00000001
+#define lpfc_event_log_WORD word1
+#define USE_MAILBOX_RESPONSE 1
+ uint32_t offset;
+ struct ulp_bde64 rcv_bde64;
+};
+
/* Structure for MB Command INIT_LINK (05) */
typedef struct {
@@ -2487,8 +2486,8 @@ typedef struct {
#define DMP_VPORT_REGION_SIZE 0x200
#define DMP_MBOX_OFFSET_WORD 0x5
-#define DMP_REGION_23 0x17 /* fcoe param and port state region */
-#define DMP_RGN23_SIZE 0x400
+#define DMP_REGION_23 0x17 /* fcoe param and port state region */
+#define DMP_RGN23_SIZE 0x400
#define WAKE_UP_PARMS_REGION_ID 4
#define WAKE_UP_PARMS_WORD_SIZE 15
@@ -2503,9 +2502,9 @@ struct vport_rec {
#define VPORT_INFO_REV 0x1
#define MAX_STATIC_VPORT_COUNT 16
struct static_vport_info {
- uint32_t signature;
+ uint32_t signature;
uint32_t rev;
- struct vport_rec vport_list[MAX_STATIC_VPORT_COUNT];
+ struct vport_rec vport_list[MAX_STATIC_VPORT_COUNT];
uint32_t resvd[66];
};
@@ -2934,6 +2933,12 @@ typedef struct {
/* Union of all Mailbox Command types */
#define MAILBOX_CMD_WSIZE 32
#define MAILBOX_CMD_SIZE (MAILBOX_CMD_WSIZE * sizeof(uint32_t))
+/* ext_wsize times 4 bytes should not be greater than max xmit size */
+#define MAILBOX_EXT_WSIZE 512
+#define MAILBOX_EXT_SIZE (MAILBOX_EXT_WSIZE * sizeof(uint32_t))
+#define MAILBOX_HBA_EXT_OFFSET 0x100
+/* max mbox xmit size is a page size for sysfs IO operations */
+#define MAILBOX_MAX_XMIT_SIZE PAGE_SIZE
typedef union {
uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/
@@ -2972,6 +2977,9 @@ typedef union {
REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */
UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */
ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */
+ struct READ_EVENT_LOG_VAR varRdEventLog; /* cmd = 0x38
+ * (READ_EVENT_LOG)
+ */
struct config_msi_var varCfgMSI;/* cmd = x30 (CONFIG_MSI) */
} MAILVARIANTS;
@@ -3652,7 +3660,8 @@ typedef struct _IOCB { /* IOCB structure */
/* Maximum IOCBs that will fit in SLI2 slim */
#define MAX_SLI2_IOCB 498
#define MAX_SLIM_IOCB_SIZE (SLI2_SLIM_SIZE - \
- (sizeof(MAILBOX_t) + sizeof(PCB_t)))
+ (sizeof(MAILBOX_t) + sizeof(PCB_t) + \
+ sizeof(uint32_t) * MAILBOX_EXT_WSIZE))
/* HBQ entries are 4 words each = 4k */
#define LPFC_TOTAL_HBQ_SIZE (sizeof(struct lpfc_hbq_entry) * \
@@ -3660,6 +3669,7 @@ typedef struct _IOCB { /* IOCB structure */
struct lpfc_sli2_slim {
MAILBOX_t mbx;
+ uint32_t mbx_ext_words[MAILBOX_EXT_WSIZE];
PCB_t pcb;
IOCB_t IOCBs[MAX_SLIM_IOCB_SIZE];
};
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 820015fbc4d..bbdcf96800f 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -41,8 +41,14 @@
* Or clear that bit field:
* bf_set(example_bit_field, &t1, 0);
*/
+#define bf_get_le32(name, ptr) \
+ ((le32_to_cpu((ptr)->name##_WORD) >> name##_SHIFT) & name##_MASK)
#define bf_get(name, ptr) \
(((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK)
+#define bf_set_le32(name, ptr, value) \
+ ((ptr)->name##_WORD = cpu_to_le32(((((value) & \
+ name##_MASK) << name##_SHIFT) | (le32_to_cpu((ptr)->name##_WORD) & \
+ ~(name##_MASK << name##_SHIFT)))))
#define bf_set(name, ptr, value) \
((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \
((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT))))
@@ -781,6 +787,7 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37
#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
+#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
/* FCoE Opcodes */
#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01
@@ -1102,6 +1109,39 @@ struct lpfc_mbx_mq_create {
} u;
};
+struct lpfc_mbx_mq_create_ext {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_mq_create_ext_num_pages_SHIFT 0
+#define lpfc_mbx_mq_create_ext_num_pages_MASK 0x0000FFFF
+#define lpfc_mbx_mq_create_ext_num_pages_WORD word0
+ uint32_t async_evt_bmap;
+#define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK
+#define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_link_WORD async_evt_bmap
+#define lpfc_mbx_mq_create_ext_async_evt_fcfste_SHIFT LPFC_TRAILER_CODE_FCOE
+#define lpfc_mbx_mq_create_ext_async_evt_fcfste_MASK 0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_fcfste_WORD async_evt_bmap
+#define lpfc_mbx_mq_create_ext_async_evt_group5_SHIFT LPFC_TRAILER_CODE_GRP5
+#define lpfc_mbx_mq_create_ext_async_evt_group5_MASK 0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_group5_WORD async_evt_bmap
+ struct mq_context context;
+ struct dma_address page[LPFC_MAX_MQ_PAGE];
+ } request;
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_mq_create_q_id_SHIFT 0
+#define lpfc_mbx_mq_create_q_id_MASK 0x0000FFFF
+#define lpfc_mbx_mq_create_q_id_WORD word0
+ } response;
+ } u;
+#define LPFC_ASYNC_EVENT_LINK_STATE 0x2
+#define LPFC_ASYNC_EVENT_FCF_STATE 0x4
+#define LPFC_ASYNC_EVENT_GROUP5 0x20
+};
+
struct lpfc_mbx_mq_destroy {
struct mbox_header header;
union {
@@ -1428,8 +1468,8 @@ struct lpfc_mbx_reg_vfi {
#define lpfc_reg_vfi_fcfi_WORD word2
uint32_t wwn[2];
struct ulp_bde64 bde;
- uint32_t word8_rsvd;
- uint32_t word9_rsvd;
+ uint32_t e_d_tov;
+ uint32_t r_a_tov;
uint32_t word10;
#define lpfc_reg_vfi_nport_id_SHIFT 0
#define lpfc_reg_vfi_nport_id_MASK 0x00FFFFFF
@@ -1940,6 +1980,7 @@ struct lpfc_mbx_sli4_params {
#define rdma_MASK 0x00000001
#define rdma_WORD word3
uint32_t sge_supp_len;
+#define SLI4_PAGE_SIZE 4096
uint32_t word5;
#define if_page_sz_SHIFT 0
#define if_page_sz_MASK 0x0000ffff
@@ -2041,6 +2082,7 @@ struct lpfc_mqe {
struct lpfc_mbx_reg_fcfi reg_fcfi;
struct lpfc_mbx_unreg_fcfi unreg_fcfi;
struct lpfc_mbx_mq_create mq_create;
+ struct lpfc_mbx_mq_create_ext mq_create_ext;
struct lpfc_mbx_eq_create eq_create;
struct lpfc_mbx_cq_create cq_create;
struct lpfc_mbx_wq_create wq_create;
@@ -2099,6 +2141,7 @@ struct lpfc_mcqe {
#define LPFC_TRAILER_CODE_LINK 0x1
#define LPFC_TRAILER_CODE_FCOE 0x2
#define LPFC_TRAILER_CODE_DCBX 0x3
+#define LPFC_TRAILER_CODE_GRP5 0x5
};
struct lpfc_acqe_link {
@@ -2168,6 +2211,19 @@ struct lpfc_acqe_dcbx {
uint32_t trailer;
};
+struct lpfc_acqe_grp5 {
+ uint32_t word0;
+#define lpfc_acqe_grp5_pport_SHIFT 0
+#define lpfc_acqe_grp5_pport_MASK 0x000000FF
+#define lpfc_acqe_grp5_pport_WORD word0
+ uint32_t word1;
+#define lpfc_acqe_grp5_llink_spd_SHIFT 16
+#define lpfc_acqe_grp5_llink_spd_MASK 0x0000FFFF
+#define lpfc_acqe_grp5_llink_spd_WORD word1
+ uint32_t event_tag;
+ uint32_t trailer;
+};
+
/*
* Define the bootstrap mailbox (bmbx) region used to communicate
* mailbox command between the host and port. The mailbox consists
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 774663e8e1f..cd9697edf86 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2566,7 +2566,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
shost->max_cmd_len = 16;
if (phba->sli_rev == LPFC_SLI_REV4) {
shost->dma_boundary =
- phba->sli4_hba.pc_sli4_params.sge_supp_len;
+ phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
shost->sg_tablesize = phba->cfg_sg_seg_cnt;
}
@@ -2600,15 +2600,6 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
init_timer(&vport->els_tmofunc);
vport->els_tmofunc.function = lpfc_els_timeout;
vport->els_tmofunc.data = (unsigned long)vport;
- if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
- phba->menlo_flag |= HBA_MENLO_SUPPORT;
- /* check for menlo minimum sg count */
- if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) {
- phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
- shost->sg_tablesize = phba->cfg_sg_seg_cnt;
- }
- }
-
error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
if (error)
goto out_put_shost;
@@ -3236,12 +3227,26 @@ lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
if (!vport)
return NULL;
- ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (!ndlp)
- return NULL;
phba = vport->phba;
if (!phba)
return NULL;
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp) {
+ /* Cannot find existing Fabric ndlp, so allocate a new one */
+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp)
+ return 0;
+ lpfc_nlp_init(vport, ndlp, Fabric_DID);
+ /* Set the node type */
+ ndlp->nlp_type |= NLP_FABRIC;
+ /* Put ndlp onto node list */
+ lpfc_enqueue_node(vport, ndlp);
+ } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ /* re-setup ndlp without removing from node list */
+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+ if (!ndlp)
+ return 0;
+ }
if (phba->pport->port_state <= LPFC_FLOGI)
return NULL;
/* If virtual link is not yet instantiated ignore CVL */
@@ -3304,11 +3309,20 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
switch (event_type) {
case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
- "2546 New FCF found/FCF parameter modified event: "
- "evt_tag:x%x, fcf_index:x%x\n",
- acqe_fcoe->event_tag, acqe_fcoe->index);
-
+ if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF)
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
+ LOG_DISCOVERY,
+ "2546 New FCF found event: "
+ "evt_tag:x%x, fcf_index:x%x\n",
+ acqe_fcoe->event_tag,
+ acqe_fcoe->index);
+ else
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
+ LOG_DISCOVERY,
+ "2788 FCF parameter modified event: "
+ "evt_tag:x%x, fcf_index:x%x\n",
+ acqe_fcoe->event_tag,
+ acqe_fcoe->index);
spin_lock_irq(&phba->hbalock);
if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) ||
(phba->hba_flag & FCF_DISC_INPROGRESS)) {
@@ -3517,6 +3531,32 @@ lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
}
/**
+ * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async grp5 completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
+ * is an asynchronous notified of a logical link speed change. The Port
+ * reports the logical link speed in units of 10Mbps.
+ **/
+static void
+lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
+ struct lpfc_acqe_grp5 *acqe_grp5)
+{
+ uint16_t prev_ll_spd;
+
+ phba->fc_eventTag = acqe_grp5->event_tag;
+ phba->fcoe_eventtag = acqe_grp5->event_tag;
+ prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
+ phba->sli4_hba.link_state.logical_speed =
+ (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5));
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2789 GRP5 Async Event: Updating logical link speed "
+ "from %dMbps to %dMbps\n", (prev_ll_spd * 10),
+ (phba->sli4_hba.link_state.logical_speed*10));
+}
+
+/**
* lpfc_sli4_async_event_proc - Process all the pending asynchronous event
* @phba: pointer to lpfc hba data structure.
*
@@ -3552,6 +3592,10 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
lpfc_sli4_async_dcbx_evt(phba,
&cq_event->cqe.acqe_dcbx);
break;
+ case LPFC_TRAILER_CODE_GRP5:
+ lpfc_sli4_async_grp5_evt(phba,
+ &cq_event->cqe.acqe_grp5);
+ break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"1804 Invalid asynchrous event code: "
@@ -3813,6 +3857,13 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
/* Get all the module params for configuring this host */
lpfc_get_cfgparam(phba);
+ if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
+ phba->menlo_flag |= HBA_MENLO_SUPPORT;
+ /* check for menlo minimum sg count */
+ if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
+ phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
+ }
+
/*
* Since the sg_tablesize is module parameter, the sg_dma_buf_size
* used to create the sg_dma_buf_pool must be dynamically calculated.
@@ -4030,6 +4081,43 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
if (unlikely(rc))
goto out_free_bsmbx;
+ mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!mboxq) {
+ rc = -ENOMEM;
+ goto out_free_bsmbx;
+ }
+
+ /* Get the Supported Pages. It is always available. */
+ lpfc_supported_pages(mboxq);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (unlikely(rc)) {
+ rc = -EIO;
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ goto out_free_bsmbx;
+ }
+
+ mqe = &mboxq->u.mqe;
+ memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
+ LPFC_MAX_SUPPORTED_PAGES);
+ for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
+ switch (pn_page[i]) {
+ case LPFC_SLI4_PARAMETERS:
+ phba->sli4_hba.pc_sli4_params.supported = 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Read the port's SLI4 Parameters capabilities if supported. */
+ if (phba->sli4_hba.pc_sli4_params.supported)
+ rc = lpfc_pc_sli4_params_get(phba, mboxq);
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ if (rc) {
+ rc = -EIO;
+ goto out_free_bsmbx;
+ }
/* Create all the SLI4 queues */
rc = lpfc_sli4_queue_create(phba);
if (rc)
@@ -4090,43 +4178,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
goto out_free_fcp_eq_hdl;
}
- mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
- GFP_KERNEL);
- if (!mboxq) {
- rc = -ENOMEM;
- goto out_free_fcp_eq_hdl;
- }
-
- /* Get the Supported Pages. It is always available. */
- lpfc_supported_pages(mboxq);
- rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- if (unlikely(rc)) {
- rc = -EIO;
- mempool_free(mboxq, phba->mbox_mem_pool);
- goto out_free_fcp_eq_hdl;
- }
-
- mqe = &mboxq->u.mqe;
- memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
- LPFC_MAX_SUPPORTED_PAGES);
- for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
- switch (pn_page[i]) {
- case LPFC_SLI4_PARAMETERS:
- phba->sli4_hba.pc_sli4_params.supported = 1;
- break;
- default:
- break;
- }
- }
-
- /* Read the port's SLI4 Parameters capabilities if supported. */
- if (phba->sli4_hba.pc_sli4_params.supported)
- rc = lpfc_pc_sli4_params_get(phba, mboxq);
- mempool_free(mboxq, phba->mbox_mem_pool);
- if (rc) {
- rc = -EIO;
- goto out_free_fcp_eq_hdl;
- }
return rc;
out_free_fcp_eq_hdl:
@@ -5050,6 +5101,8 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
+ phba->mbox_ext = (phba->slim2p.virt +
+ offsetof(struct lpfc_sli2_slim, mbx_ext_words));
phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
phba->IOCBs = (phba->slim2p.virt +
offsetof(struct lpfc_sli2_slim, IOCBs));
@@ -7753,21 +7806,23 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
* @phba: pointer to lpfc hba data structure.
*
* This routine is called to prepare the SLI3 device for PCI slot recover. It
- * aborts and stops all the on-going I/Os on the pci device.
+ * aborts all the outstanding SCSI I/Os to the pci device.
**/
static void
lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
{
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2723 PCI channel I/O abort preparing for recovery\n");
- /* Prepare for bringing HBA offline */
- lpfc_offline_prep(phba);
- /* Clear sli active flag to prevent sysfs access to HBA */
- spin_lock_irq(&phba->hbalock);
- phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
- spin_unlock_irq(&phba->hbalock);
- /* Stop and flush all I/Os and bring HBA offline */
- lpfc_offline(phba);
+
+ /*
+ * There may be errored I/Os through HBA, abort all I/Os on txcmplq
+ * and let the SCSI mid-layer to retry them to recover.
+ */
+ pring = &psli->ring[psli->fcp_ring];
+ lpfc_sli_abort_iocb_ring(phba, pring);
}
/**
@@ -7781,21 +7836,20 @@ lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
static void
lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
{
- struct lpfc_sli *psli = &phba->sli;
- struct lpfc_sli_ring *pring;
-
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2710 PCI channel disable preparing for reset\n");
+
+ /* Block all SCSI devices' I/Os on the host */
+ lpfc_scsi_dev_block(phba);
+
+ /* stop all timers */
+ lpfc_stop_hba_timers(phba);
+
/* Disable interrupt and pci device */
lpfc_sli_disable_intr(phba);
pci_disable_device(phba->pcidev);
- /*
- * There may be I/Os dropped by the firmware.
- * Error iocb (I/O) on txcmplq and let the SCSI layer
- * retry it after re-establishing link.
- */
- pring = &psli->ring[psli->fcp_ring];
- lpfc_sli_abort_iocb_ring(phba, pring);
+ /* Flush all driver's outstanding SCSI I/Os as we are to reset */
+ lpfc_sli_flush_fcp_rings(phba);
}
/**
@@ -7811,6 +7865,12 @@ lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba)
{
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2711 PCI channel permanent disable for failure\n");
+ /* Block all SCSI devices' I/Os on the host */
+ lpfc_scsi_dev_block(phba);
+
+ /* stop all timers */
+ lpfc_stop_hba_timers(phba);
+
/* Clean up all driver's outstanding SCSI I/Os */
lpfc_sli_flush_fcp_rings(phba);
}
@@ -7839,9 +7899,6 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
- /* Block all SCSI devices' I/Os on the host */
- lpfc_scsi_dev_block(phba);
-
switch (state) {
case pci_channel_io_normal:
/* Non-fatal error, prepare for recovery */
@@ -7948,7 +8005,7 @@ lpfc_io_resume_s3(struct pci_dev *pdev)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
- /* Bring the device online */
+ /* Bring device online, it will be no-op for non-fatal error resume */
lpfc_online(phba);
/* Clean up Advanced Error Reporting (AER) if needed */
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 72e6adb0643..e84dc33ca20 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1216,7 +1216,7 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
phba->pcb->feature = FEATURE_INITIAL_SLI2;
/* Setup Mailbox pointers */
- phba->pcb->mailBoxSize = sizeof(MAILBOX_t);
+ phba->pcb->mailBoxSize = sizeof(MAILBOX_t) + MAILBOX_EXT_SIZE;
offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt;
pdma_addr = phba->slim2p.phys + offset;
phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr);
@@ -1272,28 +1272,41 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
*
*/
- if (phba->sli_rev == 3) {
- phba->host_gp = &mb_slim->us.s3.host[0];
- phba->hbq_put = &mb_slim->us.s3.hbq_put[0];
- } else {
- phba->host_gp = &mb_slim->us.s2.host[0];
+ if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) {
+ phba->host_gp = &phba->mbox->us.s2.host[0];
phba->hbq_put = NULL;
- }
+ offset = (uint8_t *)&phba->mbox->us.s2.host -
+ (uint8_t *)phba->slim2p.virt;
+ pdma_addr = phba->slim2p.phys + offset;
+ phba->pcb->hgpAddrHigh = putPaddrHigh(pdma_addr);
+ phba->pcb->hgpAddrLow = putPaddrLow(pdma_addr);
+ } else {
+ /* Always Host Group Pointer is in SLIM */
+ mb->un.varCfgPort.hps = 1;
- /* mask off BAR0's flag bits 0 - 3 */
- phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
- (void __iomem *)phba->host_gp -
- (void __iomem *)phba->MBslimaddr;
- if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
- phba->pcb->hgpAddrHigh = bar_high;
- else
- phba->pcb->hgpAddrHigh = 0;
- /* write HGP data to SLIM at the required longword offset */
- memset(&hgp, 0, sizeof(struct lpfc_hgp));
+ if (phba->sli_rev == 3) {
+ phba->host_gp = &mb_slim->us.s3.host[0];
+ phba->hbq_put = &mb_slim->us.s3.hbq_put[0];
+ } else {
+ phba->host_gp = &mb_slim->us.s2.host[0];
+ phba->hbq_put = NULL;
+ }
- for (i=0; i < phba->sli.num_rings; i++) {
- lpfc_memcpy_to_slim(phba->host_gp + i, &hgp,
+ /* mask off BAR0's flag bits 0 - 3 */
+ phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
+ (void __iomem *)phba->host_gp -
+ (void __iomem *)phba->MBslimaddr;
+ if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ phba->pcb->hgpAddrHigh = bar_high;
+ else
+ phba->pcb->hgpAddrHigh = 0;
+ /* write HGP data to SLIM at the required longword offset */
+ memset(&hgp, 0, sizeof(struct lpfc_hgp));
+
+ for (i = 0; i < phba->sli.num_rings; i++) {
+ lpfc_memcpy_to_slim(phba->host_gp + i, &hgp,
sizeof(*phba->host_gp));
+ }
}
/* Setup Port Group offset */
@@ -1598,7 +1611,7 @@ lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
for (sgentry = 0; sgentry < sgecount; sgentry++) {
lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
- dma_free_coherent(&phba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
mbox->sge_array->addr[sgentry], phyaddr);
}
/* Free the sge address array memory */
@@ -1656,7 +1669,7 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
}
/* Setup for the none-embedded mbox command */
- pcount = (PAGE_ALIGN(length))/PAGE_SIZE;
+ pcount = (PAGE_ALIGN(length))/SLI4_PAGE_SIZE;
pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
/* Allocate record for keeping SGE virtual addresses */
@@ -1671,24 +1684,24 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
/* The DMA memory is always allocated in the length of a
* page even though the last SGE might not fill up to a
- * page, this is used as a priori size of PAGE_SIZE for
+ * page, this is used as a priori size of SLI4_PAGE_SIZE for
* the later DMA memory free.
*/
- viraddr = dma_alloc_coherent(&phba->pcidev->dev, PAGE_SIZE,
+ viraddr = dma_alloc_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
&phyaddr, GFP_KERNEL);
/* In case of malloc fails, proceed with whatever we have */
if (!viraddr)
break;
- memset(viraddr, 0, PAGE_SIZE);
+ memset(viraddr, 0, SLI4_PAGE_SIZE);
mbox->sge_array->addr[pagen] = viraddr;
/* Keep the first page for later sub-header construction */
if (pagen == 0)
cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
resid_len = length - alloc_len;
- if (resid_len > PAGE_SIZE) {
+ if (resid_len > SLI4_PAGE_SIZE) {
lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
- PAGE_SIZE);
- alloc_len += PAGE_SIZE;
+ SLI4_PAGE_SIZE);
+ alloc_len += SLI4_PAGE_SIZE;
} else {
lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
resid_len);
@@ -1886,6 +1899,8 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
+ reg_vfi->e_d_tov = vport->phba->fc_edtov;
+ reg_vfi->r_a_tov = vport->phba->fc_ratov;
reg_vfi->bde.addrHigh = putPaddrHigh(phys);
reg_vfi->bde.addrLow = putPaddrLow(phys);
reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index e331204a4d5..b90820a699f 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -493,6 +493,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_vport **vports;
+ int i, active_vlink_present = 0 ;
/* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
/* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
@@ -505,15 +508,44 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
else
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
- if ((ndlp->nlp_DID == Fabric_DID) &&
- vport->port_type == LPFC_NPIV_PORT) {
+ if (ndlp->nlp_DID == Fabric_DID) {
+ if (vport->port_state <= LPFC_FDISC)
+ goto out;
lpfc_linkdown_port(vport);
- mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag |= NLP_DELAY_TMO;
+ vport->fc_flag |= FC_VPORT_LOGO_RCVD;
spin_unlock_irq(shost->host_lock);
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports) {
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL;
+ i++) {
+ if ((!(vports[i]->fc_flag &
+ FC_VPORT_LOGO_RCVD)) &&
+ (vports[i]->port_state > LPFC_FDISC)) {
+ active_vlink_present = 1;
+ break;
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ }
- ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
+ if (active_vlink_present) {
+ /*
+ * If there are other active VLinks present,
+ * re-instantiate the Vlink using FDISC.
+ */
+ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+ spin_unlock_irq(shost->host_lock);
+ ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
+ vport->port_state = LPFC_FDISC;
+ } else {
+ spin_lock_irq(shost->host_lock);
+ phba->pport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_retry_pport_discovery(phba);
+ }
} else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
((ndlp->nlp_type & NLP_FCP_TARGET) ||
!(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
@@ -526,6 +558,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
}
+out:
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
@@ -604,11 +637,55 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_unreg_rpi(vport, ndlp);
return 0;
}
+/**
+ * lpfc_release_rpi - Release a RPI by issueing unreg_login mailbox cmd.
+ * @phba : Pointer to lpfc_hba structure.
+ * @vport: Pointer to lpfc_vport structure.
+ * @rpi : rpi to be release.
+ *
+ * This function will send a unreg_login mailbox command to the firmware
+ * to release a rpi.
+ **/
+void
+lpfc_release_rpi(struct lpfc_hba *phba,
+ struct lpfc_vport *vport,
+ uint16_t rpi)
+{
+ LPFC_MBOXQ_t *pmb;
+ int rc;
+
+ pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!pmb)
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+ "2796 mailbox memory allocation failed \n");
+ else {
+ lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
+ pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ mempool_free(pmb, phba->mbox_mem_pool);
+ }
+}
static uint32_t
lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
+ struct lpfc_hba *phba;
+ LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
+ MAILBOX_t *mb;
+ uint16_t rpi;
+
+ phba = vport->phba;
+ /* Release the RPI if reglogin completing */
+ if (!(phba->pport->load_flag & FC_UNLOADING) &&
+ (evt == NLP_EVT_CMPL_REG_LOGIN) &&
+ (!pmb->u.mb.mbxStatus)) {
+ mb = &pmb->u.mb;
+ rpi = pmb->u.mb.un.varWords[0];
+ lpfc_release_rpi(phba, vport, rpi);
+ }
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0271 Illegal State Transition: node x%x "
"event x%x, state x%x Data: x%x x%x\n",
@@ -944,6 +1021,18 @@ static uint32_t
lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp, void *arg, uint32_t evt)
{
+ struct lpfc_hba *phba;
+ LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
+ MAILBOX_t *mb = &pmb->u.mb;
+ uint16_t rpi;
+
+ phba = vport->phba;
+ /* Release the RPI */
+ if (!(phba->pport->load_flag & FC_UNLOADING) &&
+ !mb->mbxStatus) {
+ rpi = pmb->u.mb.un.varWords[0];
+ lpfc_release_rpi(phba, vport, rpi);
+ }
return ndlp->nlp_state;
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index dccdb822328..f4a3b2e79ee 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1141,37 +1141,47 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
}
/*
- * Given a scsi cmnd, determine the BlockGuard profile to be used
- * with the cmd
+ * Given a scsi cmnd, determine the BlockGuard opcodes to be used with it
+ * @sc: The SCSI command to examine
+ * @txopt: (out) BlockGuard operation for transmitted data
+ * @rxopt: (out) BlockGuard operation for received data
+ *
+ * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
+ *
*/
static int
-lpfc_sc_to_sli_prof(struct lpfc_hba *phba, struct scsi_cmnd *sc)
+lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ uint8_t *txop, uint8_t *rxop)
{
uint8_t guard_type = scsi_host_get_guard(sc->device->host);
- uint8_t ret_prof = LPFC_PROF_INVALID;
+ uint8_t ret = 0;
if (guard_type == SHOST_DIX_GUARD_IP) {
switch (scsi_get_prot_op(sc)) {
case SCSI_PROT_READ_INSERT:
case SCSI_PROT_WRITE_STRIP:
- ret_prof = LPFC_PROF_AST2;
+ *txop = BG_OP_IN_CSUM_OUT_NODIF;
+ *rxop = BG_OP_IN_NODIF_OUT_CSUM;
break;
case SCSI_PROT_READ_STRIP:
case SCSI_PROT_WRITE_INSERT:
- ret_prof = LPFC_PROF_A1;
+ *txop = BG_OP_IN_NODIF_OUT_CRC;
+ *rxop = BG_OP_IN_CRC_OUT_NODIF;
break;
case SCSI_PROT_READ_PASS:
case SCSI_PROT_WRITE_PASS:
- ret_prof = LPFC_PROF_AST1;
+ *txop = BG_OP_IN_CSUM_OUT_CRC;
+ *rxop = BG_OP_IN_CRC_OUT_CSUM;
break;
case SCSI_PROT_NORMAL:
default:
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9063 BLKGRD:Bad op/guard:%d/%d combination\n",
+ "9063 BLKGRD: Bad op/guard:%d/%d combination\n",
scsi_get_prot_op(sc), guard_type);
+ ret = 1;
break;
}
@@ -1179,12 +1189,14 @@ lpfc_sc_to_sli_prof(struct lpfc_hba *phba, struct scsi_cmnd *sc)
switch (scsi_get_prot_op(sc)) {
case SCSI_PROT_READ_STRIP:
case SCSI_PROT_WRITE_INSERT:
- ret_prof = LPFC_PROF_A1;
+ *txop = BG_OP_IN_NODIF_OUT_CRC;
+ *rxop = BG_OP_IN_CRC_OUT_NODIF;
break;
case SCSI_PROT_READ_PASS:
case SCSI_PROT_WRITE_PASS:
- ret_prof = LPFC_PROF_C1;
+ *txop = BG_OP_IN_CRC_OUT_CRC;
+ *rxop = BG_OP_IN_CRC_OUT_CRC;
break;
case SCSI_PROT_READ_INSERT:
@@ -1194,6 +1206,7 @@ lpfc_sc_to_sli_prof(struct lpfc_hba *phba, struct scsi_cmnd *sc)
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
"9075 BLKGRD: Bad op/guard:%d/%d combination\n",
scsi_get_prot_op(sc), guard_type);
+ ret = 1;
break;
}
} else {
@@ -1201,7 +1214,7 @@ lpfc_sc_to_sli_prof(struct lpfc_hba *phba, struct scsi_cmnd *sc)
BUG();
}
- return ret_prof;
+ return ret;
}
struct scsi_dif_tuple {
@@ -1266,7 +1279,9 @@ lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask,
* The buffer list consists of just one protection group described
* below:
* +-------------------------+
- * start of prot group --> | PDE_1 |
+ * start of prot group --> | PDE_5 |
+ * +-------------------------+
+ * | PDE_6 |
* +-------------------------+
* | Data BDE |
* +-------------------------+
@@ -1284,30 +1299,49 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
struct ulp_bde64 *bpl, int datasegcnt)
{
struct scatterlist *sgde = NULL; /* s/g data entry */
- struct lpfc_pde *pde1 = NULL;
+ struct lpfc_pde5 *pde5 = NULL;
+ struct lpfc_pde6 *pde6 = NULL;
dma_addr_t physaddr;
- int i = 0, num_bde = 0;
+ int i = 0, num_bde = 0, status;
int datadir = sc->sc_data_direction;
- int prof = LPFC_PROF_INVALID;
unsigned blksize;
uint32_t reftag;
uint16_t apptagmask, apptagval;
+ uint8_t txop, rxop;
- pde1 = (struct lpfc_pde *) bpl;
- prof = lpfc_sc_to_sli_prof(phba, sc);
-
- if (prof == LPFC_PROF_INVALID)
+ status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
+ if (status)
goto out;
- /* extract some info from the scsi command for PDE1*/
+ /* extract some info from the scsi command for pde*/
blksize = lpfc_cmd_blksize(sc);
lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
- /* setup PDE1 with what we have */
- lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
- BG_EC_STOP_ERR);
- lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
+ /* setup PDE5 with what we have */
+ pde5 = (struct lpfc_pde5 *) bpl;
+ memset(pde5, 0, sizeof(struct lpfc_pde5));
+ bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
+ pde5->reftag = reftag;
+ /* advance bpl and increment bde count */
+ num_bde++;
+ bpl++;
+ pde6 = (struct lpfc_pde6 *) bpl;
+
+ /* setup PDE6 with the rest of the info */
+ memset(pde6, 0, sizeof(struct lpfc_pde6));
+ bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
+ bf_set(pde6_optx, pde6, txop);
+ bf_set(pde6_oprx, pde6, rxop);
+ if (datadir == DMA_FROM_DEVICE) {
+ bf_set(pde6_ce, pde6, 1);
+ bf_set(pde6_re, pde6, 1);
+ bf_set(pde6_ae, pde6, 1);
+ }
+ bf_set(pde6_ai, pde6, 1);
+ bf_set(pde6_apptagval, pde6, apptagval);
+
+ /* advance bpl and increment bde count */
num_bde++;
bpl++;
@@ -1342,15 +1376,17 @@ out:
* The buffer list for this type consists of one or more of the
* protection groups described below:
* +-------------------------+
- * start of first prot group --> | PDE_1 |
+ * start of first prot group --> | PDE_5 |
+ * +-------------------------+
+ * | PDE_6 |
* +-------------------------+
- * | PDE_3 (Prot BDE) |
+ * | PDE_7 (Prot BDE) |
* +-------------------------+
* | Data BDE |
* +-------------------------+
* |more Data BDE's ... (opt)|
* +-------------------------+
- * start of new prot group --> | PDE_1 |
+ * start of new prot group --> | PDE_5 |
* +-------------------------+
* | ... |
* +-------------------------+
@@ -1369,19 +1405,21 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
{
struct scatterlist *sgde = NULL; /* s/g data entry */
struct scatterlist *sgpe = NULL; /* s/g prot entry */
- struct lpfc_pde *pde1 = NULL;
+ struct lpfc_pde5 *pde5 = NULL;
+ struct lpfc_pde6 *pde6 = NULL;
struct ulp_bde64 *prot_bde = NULL;
dma_addr_t dataphysaddr, protphysaddr;
unsigned short curr_data = 0, curr_prot = 0;
unsigned int split_offset, protgroup_len;
unsigned int protgrp_blks, protgrp_bytes;
unsigned int remainder, subtotal;
- int prof = LPFC_PROF_INVALID;
+ int status;
int datadir = sc->sc_data_direction;
unsigned char pgdone = 0, alldone = 0;
unsigned blksize;
uint32_t reftag;
uint16_t apptagmask, apptagval;
+ uint8_t txop, rxop;
int num_bde = 0;
sgpe = scsi_prot_sglist(sc);
@@ -1394,31 +1432,47 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
return 0;
}
- prof = lpfc_sc_to_sli_prof(phba, sc);
- if (prof == LPFC_PROF_INVALID)
+ status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
+ if (status)
goto out;
- /* extract some info from the scsi command for PDE1*/
+ /* extract some info from the scsi command */
blksize = lpfc_cmd_blksize(sc);
lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
split_offset = 0;
do {
- /* setup the first PDE_1 */
- pde1 = (struct lpfc_pde *) bpl;
-
- lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
- BG_EC_STOP_ERR);
- lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
+ /* setup PDE5 with what we have */
+ pde5 = (struct lpfc_pde5 *) bpl;
+ memset(pde5, 0, sizeof(struct lpfc_pde5));
+ bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
+ pde5->reftag = reftag;
+ /* advance bpl and increment bde count */
+ num_bde++;
+ bpl++;
+ pde6 = (struct lpfc_pde6 *) bpl;
+
+ /* setup PDE6 with the rest of the info */
+ memset(pde6, 0, sizeof(struct lpfc_pde6));
+ bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
+ bf_set(pde6_optx, pde6, txop);
+ bf_set(pde6_oprx, pde6, rxop);
+ bf_set(pde6_ce, pde6, 1);
+ bf_set(pde6_re, pde6, 1);
+ bf_set(pde6_ae, pde6, 1);
+ bf_set(pde6_ai, pde6, 1);
+ bf_set(pde6_apptagval, pde6, apptagval);
+
+ /* advance bpl and increment bde count */
num_bde++;
bpl++;
/* setup the first BDE that points to protection buffer */
prot_bde = (struct ulp_bde64 *) bpl;
protphysaddr = sg_dma_address(sgpe);
- prot_bde->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
- prot_bde->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
+ prot_bde->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr));
+ prot_bde->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr));
protgroup_len = sg_dma_len(sgpe);
@@ -1429,10 +1483,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
protgrp_bytes = protgrp_blks * blksize;
prot_bde->tus.f.bdeSize = protgroup_len;
- if (datadir == DMA_TO_DEVICE)
- prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- else
- prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+ prot_bde->tus.f.bdeFlags = LPFC_PDE7_DESCRIPTOR;
prot_bde->tus.w = le32_to_cpu(bpl->tus.w);
curr_prot++;
@@ -1484,6 +1535,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
/* Move to the next s/g segment if possible */
sgde = sg_next(sgde);
+
}
/* are we done ? */
@@ -1506,7 +1558,6 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
out:
-
return num_bde;
}
/*
@@ -1828,8 +1879,8 @@ out:
* field of @lpfc_cmd for device with SLI-4 interface spec.
*
* Return codes:
- * 1 - Error
- * 0 - Success
+ * 1 - Error
+ * 0 - Success
**/
static int
lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
@@ -1937,8 +1988,8 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
* lpfc_hba struct.
*
* Return codes:
- * 1 - Error
- * 0 - Success
+ * 1 - Error
+ * 0 - Success
**/
static inline int
lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 049fb9a17b3..7a61455140b 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -212,7 +212,7 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)
struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
/* If the next EQE is not valid then we are done */
- if (!bf_get(lpfc_eqe_valid, eqe))
+ if (!bf_get_le32(lpfc_eqe_valid, eqe))
return NULL;
/* If the host has not yet processed the next entry then we are done */
if (((q->hba_index + 1) % q->entry_count) == q->host_index)
@@ -247,7 +247,7 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
/* while there are valid entries */
while (q->hba_index != q->host_index) {
temp_eqe = q->qe[q->host_index].eqe;
- bf_set(lpfc_eqe_valid, temp_eqe, 0);
+ bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
released++;
q->host_index = ((q->host_index + 1) % q->entry_count);
}
@@ -285,7 +285,7 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
struct lpfc_cqe *cqe;
/* If the next CQE is not valid then we are done */
- if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
+ if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
return NULL;
/* If the host has not yet processed the next entry then we are done */
if (((q->hba_index + 1) % q->entry_count) == q->host_index)
@@ -321,7 +321,7 @@ lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
/* while there are valid entries */
while (q->hba_index != q->host_index) {
temp_qe = q->qe[q->host_index].cqe;
- bf_set(lpfc_cqe_valid, temp_qe, 0);
+ bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
released++;
q->host_index = ((q->host_index + 1) % q->entry_count);
}
@@ -1659,6 +1659,8 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
case MBX_INIT_VPI:
case MBX_INIT_VFI:
case MBX_RESUME_RPI:
+ case MBX_READ_EVENT_LOG_STATUS:
+ case MBX_READ_EVENT_LOG:
ret = mbxCommand;
break;
default:
@@ -4296,7 +4298,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
"2570 Failed to read FCoE parameters\n");
/* Issue READ_REV to collect vpd and FW information. */
- vpd_size = PAGE_SIZE;
+ vpd_size = SLI4_PAGE_SIZE;
vpd = kzalloc(vpd_size, GFP_KERNEL);
if (!vpd) {
rc = -ENOMEM;
@@ -4891,9 +4893,34 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
mb->mbxOwner = OWN_CHIP;
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
- /* First copy command data to host SLIM area */
+ /* Populate mbox extension offset word. */
+ if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
+ *(((uint32_t *)mb) + pmbox->mbox_offset_word)
+ = (uint8_t *)phba->mbox_ext
+ - (uint8_t *)phba->mbox;
+ }
+
+ /* Copy the mailbox extension data */
+ if (pmbox->in_ext_byte_len && pmbox->context2) {
+ lpfc_sli_pcimem_bcopy(pmbox->context2,
+ (uint8_t *)phba->mbox_ext,
+ pmbox->in_ext_byte_len);
+ }
+ /* Copy command data to host SLIM area */
lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
} else {
+ /* Populate mbox extension offset word. */
+ if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
+ *(((uint32_t *)mb) + pmbox->mbox_offset_word)
+ = MAILBOX_HBA_EXT_OFFSET;
+
+ /* Copy the mailbox extension data */
+ if (pmbox->in_ext_byte_len && pmbox->context2) {
+ lpfc_memcpy_to_slim(phba->MBslimaddr +
+ MAILBOX_HBA_EXT_OFFSET,
+ pmbox->context2, pmbox->in_ext_byte_len);
+
+ }
if (mb->mbxCommand == MBX_CONFIG_PORT) {
/* copy command data into host mbox for cmpl */
lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
@@ -5003,15 +5030,22 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
/* copy results back to user */
lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
+ /* Copy the mailbox extension data */
+ if (pmbox->out_ext_byte_len && pmbox->context2) {
+ lpfc_sli_pcimem_bcopy(phba->mbox_ext,
+ pmbox->context2,
+ pmbox->out_ext_byte_len);
+ }
} else {
/* First copy command data */
lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
MAILBOX_CMD_SIZE);
- if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
- pmbox->context2) {
- lpfc_memcpy_from_slim((void *)pmbox->context2,
- phba->MBslimaddr + DMP_RSP_OFFSET,
- mb->un.varDmp.word_cnt);
+ /* Copy the mailbox extension data */
+ if (pmbox->out_ext_byte_len && pmbox->context2) {
+ lpfc_memcpy_from_slim(pmbox->context2,
+ phba->MBslimaddr +
+ MAILBOX_HBA_EXT_OFFSET,
+ pmbox->out_ext_byte_len);
}
}
@@ -7104,13 +7138,11 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
*/
list_del_init(&abort_iocb->list);
pring->txcmplq_cnt--;
- spin_unlock_irq(&phba->hbalock);
/* Firmware could still be in progress of DMAing
* payload, so don't free data buffer till after
* a hbeat.
*/
- spin_lock_irq(&phba->hbalock);
abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
spin_unlock_irq(&phba->hbalock);
@@ -7118,7 +7150,8 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED;
(abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
- }
+ } else
+ spin_unlock_irq(&phba->hbalock);
}
lpfc_sli_release_iocbq(phba, cmdiocb);
@@ -8133,6 +8166,12 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
if (pmb->mbox_cmpl) {
lpfc_sli_pcimem_bcopy(mbox, pmbox,
MAILBOX_CMD_SIZE);
+ if (pmb->out_ext_byte_len &&
+ pmb->context2)
+ lpfc_sli_pcimem_bcopy(
+ phba->mbox_ext,
+ pmb->context2,
+ pmb->out_ext_byte_len);
}
if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
@@ -8983,17 +9022,17 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
int ecount = 0;
uint16_t cqid;
- if (bf_get(lpfc_eqe_major_code, eqe) != 0) {
+ if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0359 Not a valid slow-path completion "
"event: majorcode=x%x, minorcode=x%x\n",
- bf_get(lpfc_eqe_major_code, eqe),
- bf_get(lpfc_eqe_minor_code, eqe));
+ bf_get_le32(lpfc_eqe_major_code, eqe),
+ bf_get_le32(lpfc_eqe_minor_code, eqe));
return;
}
/* Get the reference to the corresponding CQ */
- cqid = bf_get(lpfc_eqe_resource_id, eqe);
+ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
/* Search for completion queue pointer matching this cqid */
speq = phba->sli4_hba.sp_eq;
@@ -9221,12 +9260,12 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
uint16_t cqid;
int ecount = 0;
- if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0)) {
+ if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0366 Not a valid fast-path completion "
"event: majorcode=x%x, minorcode=x%x\n",
- bf_get(lpfc_eqe_major_code, eqe),
- bf_get(lpfc_eqe_minor_code, eqe));
+ bf_get_le32(lpfc_eqe_major_code, eqe),
+ bf_get_le32(lpfc_eqe_minor_code, eqe));
return;
}
@@ -9239,7 +9278,7 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
}
/* Get the reference to the corresponding CQ */
- cqid = bf_get(lpfc_eqe_resource_id, eqe);
+ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
if (unlikely(cqid != cq->queue_id)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0368 Miss-matched fast-path completion "
@@ -9506,7 +9545,7 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
while (!list_empty(&queue->page_list)) {
list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
list);
- dma_free_coherent(&queue->phba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
dmabuf->virt, dmabuf->phys);
kfree(dmabuf);
}
@@ -9532,13 +9571,17 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
struct lpfc_dmabuf *dmabuf;
int x, total_qe_count;
void *dma_pointer;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
queue = kzalloc(sizeof(struct lpfc_queue) +
(sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
if (!queue)
return NULL;
- queue->page_count = (PAGE_ALIGN(entry_size * entry_count))/PAGE_SIZE;
+ queue->page_count = (ALIGN(entry_size * entry_count,
+ hw_page_size))/hw_page_size;
INIT_LIST_HEAD(&queue->list);
INIT_LIST_HEAD(&queue->page_list);
INIT_LIST_HEAD(&queue->child_list);
@@ -9547,19 +9590,19 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
if (!dmabuf)
goto out_fail;
dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
- PAGE_SIZE, &dmabuf->phys,
+ hw_page_size, &dmabuf->phys,
GFP_KERNEL);
if (!dmabuf->virt) {
kfree(dmabuf);
goto out_fail;
}
- memset(dmabuf->virt, 0, PAGE_SIZE);
+ memset(dmabuf->virt, 0, hw_page_size);
dmabuf->buffer_tag = x;
list_add_tail(&dmabuf->list, &queue->page_list);
/* initialize queue's entry array */
dma_pointer = dmabuf->virt;
for (; total_qe_count < entry_count &&
- dma_pointer < (PAGE_SIZE + dmabuf->virt);
+ dma_pointer < (hw_page_size + dmabuf->virt);
total_qe_count++, dma_pointer += entry_size) {
queue->qe[total_qe_count].address = dma_pointer;
}
@@ -9604,6 +9647,10 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
uint16_t dmult;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -9653,6 +9700,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
break;
}
list_for_each_entry(dmabuf, &eq->page_list, list) {
+ memset(dmabuf->virt, 0, hw_page_size);
eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
@@ -9715,6 +9763,11 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
+
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -9752,6 +9805,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
break;
}
list_for_each_entry(dmabuf, &cq->page_list, list) {
+ memset(dmabuf->virt, 0, hw_page_size);
cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
@@ -9791,9 +9845,70 @@ out:
}
/**
+ * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @mq: The queue structure to use to create the mailbox queue.
+ * @mbox: An allocated pointer to type LPFC_MBOXQ_t
+ * @cq: The completion queue to associate with this cq.
+ *
+ * This function provides failback (fb) functionality when the
+ * mq_create_ext fails on older FW generations. It's purpose is identical
+ * to mq_create_ext otherwise.
+ *
+ * This routine cannot fail as all attributes were previously accessed and
+ * initialized in mq_create_ext.
+ **/
+static void
+lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
+ LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
+{
+ struct lpfc_mbx_mq_create *mq_create;
+ struct lpfc_dmabuf *dmabuf;
+ int length;
+
+ length = (sizeof(struct lpfc_mbx_mq_create) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_MQ_CREATE,
+ length, LPFC_SLI4_MBX_EMBED);
+ mq_create = &mbox->u.mqe.un.mq_create;
+ bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
+ mq->page_count);
+ bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
+ cq->queue_id);
+ bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
+ switch (mq->entry_count) {
+ case 16:
+ bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ LPFC_MQ_CNT_16);
+ break;
+ case 32:
+ bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ LPFC_MQ_CNT_32);
+ break;
+ case 64:
+ bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ LPFC_MQ_CNT_64);
+ break;
+ case 128:
+ bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ LPFC_MQ_CNT_128);
+ break;
+ }
+ list_for_each_entry(dmabuf, &mq->page_list, list) {
+ mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+ putPaddrLow(dmabuf->phys);
+ mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+ putPaddrHigh(dmabuf->phys);
+ }
+}
+
+/**
* lpfc_mq_create - Create a mailbox Queue on the HBA
* @phba: HBA structure that indicates port to create a queue on.
* @mq: The queue structure to use to create the mailbox queue.
+ * @cq: The completion queue to associate with this cq.
+ * @subtype: The queue's subtype.
*
* This function creates a mailbox queue, as detailed in @mq, on a port,
* described by @phba by sending a MQ_CREATE mailbox command to the HBA.
@@ -9809,31 +9924,43 @@ out:
* memory this function will return ENOMEM. If the queue create mailbox command
* fails this function will return ENXIO.
**/
-uint32_t
+int32_t
lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
struct lpfc_queue *cq, uint32_t subtype)
{
struct lpfc_mbx_mq_create *mq_create;
+ struct lpfc_mbx_mq_create_ext *mq_create_ext;
struct lpfc_dmabuf *dmabuf;
LPFC_MBOXQ_t *mbox;
int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
- length = (sizeof(struct lpfc_mbx_mq_create) -
+ length = (sizeof(struct lpfc_mbx_mq_create_ext) -
sizeof(struct lpfc_sli4_cfg_mhdr));
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
- LPFC_MBOX_OPCODE_MQ_CREATE,
+ LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
length, LPFC_SLI4_MBX_EMBED);
- mq_create = &mbox->u.mqe.un.mq_create;
- bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
+
+ mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
+ bf_set(lpfc_mbx_mq_create_ext_num_pages, &mq_create_ext->u.request,
mq->page_count);
- bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
- cq->queue_id);
- bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
+ bf_set(lpfc_mbx_mq_create_ext_async_evt_link, &mq_create_ext->u.request,
+ 1);
+ bf_set(lpfc_mbx_mq_create_ext_async_evt_fcfste,
+ &mq_create_ext->u.request, 1);
+ bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
+ &mq_create_ext->u.request, 1);
+ bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
+ cq->queue_id);
+ bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
switch (mq->entry_count) {
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -9843,31 +9970,47 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
return -EINVAL;
/* otherwise default to smallest count (drop through) */
case 16:
- bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
LPFC_MQ_CNT_16);
break;
case 32:
- bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
LPFC_MQ_CNT_32);
break;
case 64:
- bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
LPFC_MQ_CNT_64);
break;
case 128:
- bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+ bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
LPFC_MQ_CNT_128);
break;
}
list_for_each_entry(dmabuf, &mq->page_list, list) {
- mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+ memset(dmabuf->virt, 0, hw_page_size);
+ mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
- mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+ mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
putPaddrHigh(dmabuf->phys);
}
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
+ mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
+ &mq_create_ext->u.response);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2795 MQ_CREATE_EXT failed with "
+ "status x%x. Failback to MQ_CREATE.\n",
+ rc);
+ lpfc_mq_create_fb_init(phba, mq, mbox, cq);
+ mq_create = &mbox->u.mqe.un.mq_create;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
+ mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
+ &mq_create->u.response);
+ }
+
/* The IOCTL status is embedded in the mailbox subheader. */
- shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
@@ -9878,7 +10021,6 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
status = -ENXIO;
goto out;
}
- mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create->u.response);
if (mq->queue_id == 0xFFFF) {
status = -ENXIO;
goto out;
@@ -9927,6 +10069,10 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -9942,6 +10088,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
cq->queue_id);
list_for_each_entry(dmabuf, &wq->page_list, list) {
+ memset(dmabuf->virt, 0, hw_page_size);
wq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
wq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
@@ -10010,6 +10157,10 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
if (hrq->entry_count != drq->entry_count)
return -EINVAL;
@@ -10054,6 +10205,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
LPFC_HDR_BUF_SIZE);
list_for_each_entry(dmabuf, &hrq->page_list, list) {
+ memset(dmabuf->virt, 0, hw_page_size);
rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
@@ -10626,7 +10778,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
- if (reqlen > PAGE_SIZE) {
+ if (reqlen > SLI4_PAGE_SIZE) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"2559 Block sgl registration required DMA "
"size (%d) great than a page\n", reqlen);
@@ -10732,7 +10884,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
/* Calculate the requested length of the dma memory */
reqlen = cnt * sizeof(struct sgl_page_pairs) +
sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
- if (reqlen > PAGE_SIZE) {
+ if (reqlen > SLI4_PAGE_SIZE) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0217 Block sgl registration required DMA "
"size (%d) great than a page\n", reqlen);
@@ -11568,8 +11720,8 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
*
* This routine is invoked to post rpi header templates to the
* HBA consistent with the SLI-4 interface spec. This routine
- * posts a PAGE_SIZE memory region to the port to hold up to
- * PAGE_SIZE modulo 64 rpi context headers.
+ * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
+ * SLI4_PAGE_SIZE modulo 64 rpi context headers.
*
* This routine does not require any locks. It's usage is expected
* to be driver load or reset recovery when the driver is
@@ -11672,8 +11824,8 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
*
* This routine is invoked to post rpi header templates to the
* HBA consistent with the SLI-4 interface spec. This routine
- * posts a PAGE_SIZE memory region to the port to hold up to
- * PAGE_SIZE modulo 64 rpi context headers.
+ * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
+ * SLI4_PAGE_SIZE modulo 64 rpi context headers.
*
* Returns
* A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
@@ -12040,9 +12192,11 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
phba->hba_flag |= FCF_DISC_INPROGRESS;
spin_unlock_irq(&phba->hbalock);
/* Reset FCF round robin index bmask for new scan */
- if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
+ if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) {
memset(phba->fcf.fcf_rr_bmask, 0,
sizeof(*phba->fcf.fcf_rr_bmask));
+ phba->fcf.eligible_fcf_cnt = 0;
+ }
error = 0;
}
fail_fcf_scan:
@@ -12507,6 +12661,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mb, *nextmb;
struct lpfc_dmabuf *mp;
+ struct lpfc_nodelist *ndlp;
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
@@ -12523,6 +12678,11 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
__lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
+ ndlp = (struct lpfc_nodelist *) mb->context2;
+ if (ndlp) {
+ lpfc_nlp_put(ndlp);
+ mb->context2 = NULL;
+ }
}
list_del(&mb->list);
mempool_free(mb, phba->mbox_mem_pool);
@@ -12532,6 +12692,15 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
(mb->u.mb.mbxCommand == MBX_REG_VPI))
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
+ ndlp = (struct lpfc_nodelist *) mb->context2;
+ if (ndlp) {
+ lpfc_nlp_put(ndlp);
+ mb->context2 = NULL;
+ }
+ /* Unregister the RPI when mailbox complete */
+ mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
+ }
}
spin_unlock_irq(&phba->hbalock);
}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index b4a639c4761..e3792151ca0 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -36,6 +36,7 @@ struct lpfc_cq_event {
struct lpfc_acqe_link acqe_link;
struct lpfc_acqe_fcoe acqe_fcoe;
struct lpfc_acqe_dcbx acqe_dcbx;
+ struct lpfc_acqe_grp5 acqe_grp5;
struct lpfc_rcqe rcqe_cmpl;
struct sli4_wcqe_xri_aborted wcqe_axri;
struct lpfc_wcqe_complete wcqe_cmpl;
@@ -110,6 +111,9 @@ typedef struct lpfcMboxq {
void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
uint8_t mbox_flag;
+ uint16_t in_ext_byte_len;
+ uint16_t out_ext_byte_len;
+ uint8_t mbox_offset_word;
struct lpfc_mcqe mcqe;
struct lpfc_mbx_nembed_sge_virt *sge_array;
} LPFC_MBOXQ_t;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 4a35e7b9bc5..58bb4c81b54 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -162,6 +162,7 @@ struct lpfc_fcf {
#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
uint32_t addr_mode;
uint16_t fcf_rr_init_indx;
+ uint32_t eligible_fcf_cnt;
struct lpfc_fcf_rec current_rec;
struct lpfc_fcf_rec failover_rec;
struct timer_list redisc_wait;
@@ -492,8 +493,8 @@ void lpfc_sli4_queue_free(struct lpfc_queue *);
uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t);
uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t, uint32_t);
-uint32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
- struct lpfc_queue *, uint32_t);
+int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
+ struct lpfc_queue *, uint32_t);
uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t);
uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 013deec5dae..5294c3a515a 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.10"
+#define LPFC_DRIVER_VERSION "8.3.12"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index ffd575c379f..ab91359bde2 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -763,7 +763,9 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
list_for_each_entry(port_iterator, &phba->port_list, listentry) {
if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
- lpfc_printf_vlog(port_iterator, KERN_WARNING, LOG_VPORT,
+ if (!(port_iterator->load_flag & FC_UNLOADING))
+ lpfc_printf_vlog(port_iterator, KERN_ERR,
+ LOG_VPORT,
"1801 Create vport work array FAILED: "
"cannot do scsi_host_get\n");
continue;
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 4bf7edca9e6..0b6e3228610 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -91,12 +91,15 @@ static struct proc_dir_entry *mega_proc_dir_entry;
/* For controller re-ordering */
static struct mega_hbas mega_hbas[MAX_CONTROLLERS];
+static long
+megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
+
/*
* The File Operations structure for the serial/ioctl interface of the driver
*/
static const struct file_operations megadev_fops = {
.owner = THIS_MODULE,
- .ioctl = megadev_ioctl,
+ .unlocked_ioctl = megadev_unlocked_ioctl,
.open = megadev_open,
};
@@ -3302,8 +3305,7 @@ megadev_open (struct inode *inode, struct file *filep)
* controller.
*/
static int
-megadev_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
- unsigned long arg)
+megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
adapter_t *adapter;
nitioctl_t uioc;
@@ -3694,6 +3696,18 @@ freemem_and_return:
return 0;
}
+static long
+megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = megadev_ioctl(filep, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
/**
* mega_m_to_n()
* @arg - user address
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index d310f49d077..2b4a048cadf 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -1013,8 +1013,7 @@ static void mega_8_to_40ld (mraid_inquiry *inquiry,
mega_inquiry3 *enquiry3, mega_product_info *);
static int megadev_open (struct inode *, struct file *);
-static int megadev_ioctl (struct inode *, struct file *, unsigned int,
- unsigned long);
+static int megadev_ioctl (struct file *, unsigned int, unsigned long);
static int mega_m_to_n(void __user *, nitioctl_t *);
static int mega_n_to_m(void __user *, megacmd_t *);
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index 36e0b7d05c1..41f82f76d88 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -22,7 +22,7 @@
// Entry points for char node driver
static int mraid_mm_open(struct inode *, struct file *);
-static int mraid_mm_ioctl(struct inode *, struct file *, uint, unsigned long);
+static long mraid_mm_unlocked_ioctl(struct file *, uint, unsigned long);
// routines to convert to and from the old the format
@@ -70,7 +70,7 @@ static wait_queue_head_t wait_q;
static const struct file_operations lsi_fops = {
.open = mraid_mm_open,
- .ioctl = mraid_mm_ioctl,
+ .unlocked_ioctl = mraid_mm_unlocked_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = mraid_mm_compat_ioctl,
#endif
@@ -110,8 +110,7 @@ mraid_mm_open(struct inode *inode, struct file *filep)
* @arg : user ioctl packet
*/
static int
-mraid_mm_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
- unsigned long arg)
+mraid_mm_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
uioc_t *kioc;
char signature[EXT_IOCTL_SIGN_SZ] = {0};
@@ -218,6 +217,19 @@ mraid_mm_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
return rval;
}
+static long
+mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd,
+ unsigned long arg)
+{
+ int err;
+
+ /* inconsistant: mraid_mm_compat_ioctl doesn't take the BKL */
+ lock_kernel();
+ err = mraid_mm_ioctl(filep, cmd, arg);
+ unlock_kernel();
+
+ return err;
+}
/**
* mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet
@@ -1225,7 +1237,7 @@ mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd,
{
int err;
- err = mraid_mm_ioctl(NULL, filep, cmd, arg);
+ err = mraid_mm_ioctl(filep, cmd, arg);
return err;
}
diff --git a/drivers/scsi/mpt2sas/Kconfig b/drivers/scsi/mpt2sas/Kconfig
index ba8e128de23..bbb7e4bf30a 100644
--- a/drivers/scsi/mpt2sas/Kconfig
+++ b/drivers/scsi/mpt2sas/Kconfig
@@ -2,7 +2,7 @@
# Kernel configuration file for the MPT2SAS
#
# This code is based on drivers/scsi/mpt2sas/Kconfig
-# Copyright (C) 2007-2009 LSI Corporation
+# Copyright (C) 2007-2010 LSI Corporation
# (mailto:DL-MPTFusionLinux@lsi.com)
# This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index 9958d847a88..dada0a13223 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2009 LSI Corporation.
+ * Copyright (c) 2000-2010 LSI Corporation.
*
*
* Name: mpi2.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index cf0ac9f40c9..d4e9d6f8452 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2009 LSI Corporation.
+ * Copyright (c) 2000-2010 LSI Corporation.
*
*
* Name: mpi2_cnfg.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
index c4adf76b49d..bd6c92b5fae 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
@@ -2,7 +2,7 @@
Fusion-MPT MPI 2.0 Header File Change History
==============================
- Copyright (c) 2000-2009 LSI Corporation.
+ Copyright (c) 2000-2010 LSI Corporation.
---------------------------------------
Header Set Release Version: 02.00.14
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
index 6541945e97c..220bf65a921 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2009 LSI Corporation.
+ * Copyright (c) 2000-2010 LSI Corporation.
*
*
* Name: mpi2_init.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index 754938422f6..f18f114922b 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2009 LSI Corporation.
+ * Copyright (c) 2000-2010 LSI Corporation.
*
*
* Name: mpi2_ioc.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
index 73fcdbf9263..686b09b8121 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2009 LSI Corporation.
+ * Copyright (c) 2000-2010 LSI Corporation.
*
*
* Name: mpi2_tool.h
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 88e6eebc315..0ec1ed389c2 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -3,7 +3,7 @@
* for access to MPT (Message Passing Technology) firmware.
*
* This code is based on drivers/scsi/mpt2sas/mpt2_base.c
- * Copyright (C) 2007-2009 LSI Corporation
+ * Copyright (C) 2007-2010 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -58,6 +58,7 @@
#include <linux/sort.h>
#include <linux/io.h>
#include <linux/time.h>
+#include <linux/aer.h>
#include "mpt2sas_base.h"
@@ -285,6 +286,9 @@ _base_sas_ioc_info(struct MPT2SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
return;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ return;
+
switch (ioc_status) {
/****************************************************************************
@@ -517,8 +521,18 @@ _base_display_event_data(struct MPT2SAS_ADAPTER *ioc,
desc = "IR Operation Status";
break;
case MPI2_EVENT_SAS_DISCOVERY:
- desc = "Discovery";
- break;
+ {
+ Mpi2EventDataSasDiscovery_t *event_data =
+ (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
+ printk(MPT2SAS_INFO_FMT "Discovery: (%s)", ioc->name,
+ (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
+ "start" : "stop");
+ if (event_data->DiscoveryStatus)
+ printk("discovery_status(0x%08x)",
+ le32_to_cpu(event_data->DiscoveryStatus));
+ printk("\n");
+ return;
+ }
case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
desc = "SAS Broadcast Primitive";
break;
@@ -1243,6 +1257,9 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
goto out_fail;
}
+ /* AER (Advanced Error Reporting) hooks */
+ pci_enable_pcie_error_reporting(pdev);
+
pci_set_master(pdev);
if (_base_config_dma_addressing(ioc, pdev) != 0) {
@@ -1253,7 +1270,7 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
}
for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) {
- if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO) {
+ if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
if (pio_sz)
continue;
pio_chip = (u64)pci_resource_start(pdev, i);
@@ -1261,15 +1278,18 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
} else {
if (memap_sz)
continue;
- ioc->chip_phys = pci_resource_start(pdev, i);
- chip_phys = (u64)ioc->chip_phys;
- memap_sz = pci_resource_len(pdev, i);
- ioc->chip = ioremap(ioc->chip_phys, memap_sz);
- if (ioc->chip == NULL) {
- printk(MPT2SAS_ERR_FMT "unable to map adapter "
- "memory!\n", ioc->name);
- r = -EINVAL;
- goto out_fail;
+ /* verify memory resource is valid before using */
+ if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
+ ioc->chip_phys = pci_resource_start(pdev, i);
+ chip_phys = (u64)ioc->chip_phys;
+ memap_sz = pci_resource_len(pdev, i);
+ ioc->chip = ioremap(ioc->chip_phys, memap_sz);
+ if (ioc->chip == NULL) {
+ printk(MPT2SAS_ERR_FMT "unable to map "
+ "adapter memory!\n", ioc->name);
+ r = -EINVAL;
+ goto out_fail;
+ }
}
}
}
@@ -1295,6 +1315,7 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
ioc->chip_phys = 0;
ioc->pci_irq = -1;
pci_release_selected_regions(ioc->pdev, ioc->bars);
+ pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
return r;
}
@@ -1898,7 +1919,10 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
ioc->config_page, ioc->config_page_dma);
}
- kfree(ioc->scsi_lookup);
+ if (ioc->scsi_lookup) {
+ free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
+ ioc->scsi_lookup = NULL;
+ }
kfree(ioc->hpr_lookup);
kfree(ioc->internal_lookup);
}
@@ -2110,11 +2134,13 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
ioc->name, (unsigned long long) ioc->request_dma));
total_sz += sz;
- ioc->scsi_lookup = kcalloc(ioc->scsiio_depth,
- sizeof(struct request_tracker), GFP_KERNEL);
+ sz = ioc->scsiio_depth * sizeof(struct request_tracker);
+ ioc->scsi_lookup_pages = get_order(sz);
+ ioc->scsi_lookup = (struct request_tracker *)__get_free_pages(
+ GFP_KERNEL, ioc->scsi_lookup_pages);
if (!ioc->scsi_lookup) {
- printk(MPT2SAS_ERR_FMT "scsi_lookup: kcalloc failed\n",
- ioc->name);
+ printk(MPT2SAS_ERR_FMT "scsi_lookup: get_free_pages failed, "
+ "sz(%d)\n", ioc->name, (int)sz);
goto out;
}
@@ -3006,8 +3032,8 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
* since epoch ~ midnight January 1, 1970.
*/
do_gettimeofday(&current_time);
- mpi_request.TimeStamp = (current_time.tv_sec * 1000) +
- (current_time.tv_usec >> 3);
+ mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 +
+ (current_time.tv_usec / 1000));
if (ioc->logging_level & MPT_DEBUG_INIT) {
u32 *mfp;
@@ -3179,7 +3205,7 @@ _base_event_notification(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
mpi_request->VP_ID = 0;
for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
mpi_request->EventMasks[i] =
- le32_to_cpu(ioc->event_masks[i]);
+ cpu_to_le32(ioc->event_masks[i]);
mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->base_cmds.done);
timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
@@ -3516,7 +3542,9 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
__func__));
_base_mask_interrupts(ioc);
+ ioc->shost_recovery = 1;
_base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
+ ioc->shost_recovery = 0;
if (ioc->pci_irq) {
synchronize_irq(pdev->irq);
free_irq(ioc->pci_irq, ioc);
@@ -3527,6 +3555,7 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
ioc->pci_irq = -1;
ioc->chip_phys = 0;
pci_release_selected_regions(ioc->pdev, ioc->bars);
+ pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
return;
}
@@ -3560,8 +3589,10 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
sizeof(Mpi2PortFactsReply_t), GFP_KERNEL);
- if (!ioc->pfacts)
+ if (!ioc->pfacts) {
+ r = -ENOMEM;
goto out_free_resources;
+ }
for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
r = _base_get_port_facts(ioc, i, CAN_SLEEP);
@@ -3607,6 +3638,15 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
mutex_init(&ioc->ctl_cmds.mutex);
+ if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
+ !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
+ !ioc->config_cmds.reply || !ioc->ctl_cmds.reply) {
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+
+ init_completion(&ioc->shost_recovery_done);
+
for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
ioc->event_masks[i] = -1;
@@ -3639,6 +3679,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
pci_set_drvdata(ioc->pdev, NULL);
kfree(ioc->tm_cmds.reply);
kfree(ioc->transport_cmds.reply);
+ kfree(ioc->scsih_cmds.reply);
kfree(ioc->config_cmds.reply);
kfree(ioc->base_cmds.reply);
kfree(ioc->ctl_cmds.reply);
@@ -3646,6 +3687,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
ioc->ctl_cmds.reply = NULL;
ioc->base_cmds.reply = NULL;
ioc->tm_cmds.reply = NULL;
+ ioc->scsih_cmds.reply = NULL;
ioc->transport_cmds.reply = NULL;
ioc->config_cmds.reply = NULL;
ioc->pfacts = NULL;
@@ -3675,6 +3717,7 @@ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
kfree(ioc->base_cmds.reply);
kfree(ioc->tm_cmds.reply);
kfree(ioc->transport_cmds.reply);
+ kfree(ioc->scsih_cmds.reply);
kfree(ioc->config_cmds.reply);
}
@@ -3714,7 +3757,7 @@ _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
if (ioc->config_cmds.status & MPT2_CMD_PENDING) {
ioc->config_cmds.status |= MPT2_CMD_RESET;
mpt2sas_base_free_smid(ioc, ioc->config_cmds.smid);
- ioc->config_cmds.smid = USHORT_MAX;
+ ioc->config_cmds.smid = USHRT_MAX;
complete(&ioc->config_cmds.done);
}
break;
@@ -3811,9 +3854,8 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
ioc->shost_recovery = 0;
+ complete(&ioc->shost_recovery_done);
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
- if (!r)
- _base_reset_handler(ioc, MPT2_IOC_RUNNING);
return r;
}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index e18b0544c38..b4afe431ac1 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -3,7 +3,7 @@
* for access to MPT (Message Passing Technology) firmware.
*
* This code is based on drivers/scsi/mpt2sas/mpt2_base.h
- * Copyright (C) 2007-2009 LSI Corporation
+ * Copyright (C) 2007-2010 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -69,11 +69,11 @@
#define MPT2SAS_DRIVER_NAME "mpt2sas"
#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION "04.100.01.00"
-#define MPT2SAS_MAJOR_VERSION 04
+#define MPT2SAS_DRIVER_VERSION "05.100.00.02"
+#define MPT2SAS_MAJOR_VERSION 05
#define MPT2SAS_MINOR_VERSION 100
-#define MPT2SAS_BUILD_VERSION 01
-#define MPT2SAS_RELEASE_VERSION 00
+#define MPT2SAS_BUILD_VERSION 00
+#define MPT2SAS_RELEASE_VERSION 02
/*
* Set MPT2SAS_SG_DEPTH value based on user input.
@@ -119,7 +119,6 @@
#define MPT2_IOC_PRE_RESET 1 /* prior to host reset */
#define MPT2_IOC_AFTER_RESET 2 /* just after host reset */
#define MPT2_IOC_DONE_RESET 3 /* links re-initialized */
-#define MPT2_IOC_RUNNING 4 /* shost running */
/*
* logging format
@@ -260,16 +259,6 @@ struct _internal_cmd {
u16 smid;
};
-/*
- * SAS Topology Structures
- */
-
-#define MPTSAS_STATE_TR_SEND 0x0001
-#define MPTSAS_STATE_TR_COMPLETE 0x0002
-#define MPTSAS_STATE_CNTRL_SEND 0x0004
-#define MPTSAS_STATE_CNTRL_COMPLETE 0x0008
-
-#define MPT2SAS_REQ_SAS_CNTRL 0x0010
/**
* struct _sas_device - attached device information
@@ -307,7 +296,6 @@ struct _sas_device {
u16 slot;
u8 hidden_raid_component;
u8 responding;
- u16 state;
};
/**
@@ -378,6 +366,7 @@ struct _sas_port {
* @phy_id: unique phy id
* @handle: device handle for this phy
* @attached_handle: device handle for attached device
+ * @phy_belongs_to_port: port has been created for this phy
*/
struct _sas_phy {
struct list_head port_siblings;
@@ -387,6 +376,7 @@ struct _sas_phy {
u8 phy_id;
u16 handle;
u16 attached_handle;
+ u8 phy_belongs_to_port;
};
/**
@@ -603,7 +593,6 @@ struct MPT2SAS_ADAPTER {
/* fw event handler */
char firmware_event_name[20];
struct workqueue_struct *firmware_event_thread;
- u8 fw_events_off;
spinlock_t fw_event_lock;
struct list_head fw_event_list;
@@ -611,6 +600,7 @@ struct MPT2SAS_ADAPTER {
int aen_event_read_flag;
u8 broadcast_aen_busy;
u8 shost_recovery;
+ struct completion shost_recovery_done;
spinlock_t ioc_reset_in_progress_lock;
u8 ioc_link_reset_in_progress;
u8 ignore_loginfos;
@@ -688,7 +678,8 @@ struct MPT2SAS_ADAPTER {
dma_addr_t request_dma;
u32 request_dma_sz;
struct request_tracker *scsi_lookup;
- spinlock_t scsi_lookup_lock;
+ ulong scsi_lookup_pages;
+ spinlock_t scsi_lookup_lock;
struct list_head free_list;
int pending_io_count;
wait_queue_head_t reset_wq;
@@ -700,7 +691,7 @@ struct MPT2SAS_ADAPTER {
u16 max_sges_in_chain_message;
u16 chains_needed_per_io;
u16 chain_offset_value_for_main_message;
- u16 chain_depth;
+ u32 chain_depth;
/* hi-priority queue */
u16 hi_priority_smid;
@@ -814,8 +805,9 @@ void mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc);
/* scsih shared API */
u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
u32 reply);
-void mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
- u8 type, u16 smid_task, ulong timeout);
+int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle,
+ uint channel, uint id, uint lun, u8 type, u16 smid_task,
+ ulong timeout, struct scsi_cmnd *scmd);
void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
struct _sas_node *mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *ioc,
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index cf44b355bc9..c65442982d7 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -2,7 +2,7 @@
* This module provides common API for accessing firmware configuration pages
*
* This code is based on drivers/scsi/mpt2sas/mpt2_base.c
- * Copyright (C) 2007-2009 LSI Corporation
+ * Copyright (C) 2007-2010 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -258,7 +258,7 @@ mpt2sas_config_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
_config_display_some_debug(ioc, smid, "config_done", mpi_reply);
#endif
- ioc->config_cmds.smid = USHORT_MAX;
+ ioc->config_cmds.smid = USHRT_MAX;
complete(&ioc->config_cmds.done);
return 1;
}
@@ -1390,12 +1390,12 @@ mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle,
if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
goto out;
for (i = 0; i < config_page->NumElements; i++) {
- if ((config_page->ConfigElement[i].ElementFlags &
+ if ((le16_to_cpu(config_page->ConfigElement[i].ElementFlags) &
MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE) !=
MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT)
continue;
- if (config_page->ConfigElement[i].PhysDiskDevHandle ==
- pd_handle) {
+ if (le16_to_cpu(config_page->ConfigElement[i].
+ PhysDiskDevHandle) == pd_handle) {
*volume_handle = le16_to_cpu(config_page->
ConfigElement[i].VolDevHandle);
r = 0;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index fa9bf83819d..d88e9756d8f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -3,7 +3,7 @@
* controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c
- * Copyright (C) 2007-2009 LSI Corporation
+ * Copyright (C) 2007-2010 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -533,7 +533,7 @@ _ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
if (!found) {
dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"handle(0x%04x), lun(%d), no active mid!!\n", ioc->name,
- desc, tm_request->DevHandle, lun));
+ desc, le16_to_cpu(tm_request->DevHandle), lun));
tm_reply = ioc->ctl_cmds.reply;
tm_reply->DevHandle = tm_request->DevHandle;
tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
@@ -551,7 +551,8 @@ _ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
- desc, tm_request->DevHandle, lun, tm_request->TaskMID));
+ desc, le16_to_cpu(tm_request->DevHandle), lun,
+ le16_to_cpu(tm_request->TaskMID)));
return 0;
}
@@ -647,9 +648,9 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
- if (!mpi_request->FunctionDependent1 ||
- mpi_request->FunctionDependent1 >
- cpu_to_le16(ioc->facts.MaxDevHandle)) {
+ if (!le16_to_cpu(mpi_request->FunctionDependent1) ||
+ le16_to_cpu(mpi_request->FunctionDependent1) >
+ ioc->facts.MaxDevHandle) {
ret = -EINVAL;
mpt2sas_base_free_smid(ioc, smid);
goto out;
@@ -743,8 +744,11 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
mpt2sas_base_get_sense_buffer_dma(ioc, smid);
priv_sense = mpt2sas_base_get_sense_buffer(ioc, smid);
memset(priv_sense, 0, SCSI_SENSE_BUFFERSIZE);
- mpt2sas_base_put_smid_scsi_io(ioc, smid,
- le16_to_cpu(mpi_request->FunctionDependent1));
+ if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)
+ mpt2sas_base_put_smid_scsi_io(ioc, smid,
+ le16_to_cpu(mpi_request->FunctionDependent1));
+ else
+ mpt2sas_base_put_smid_default(ioc, smid);
break;
}
case MPI2_FUNCTION_SCSI_TASK_MGMT:
@@ -752,6 +756,10 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
Mpi2SCSITaskManagementRequest_t *tm_request =
(Mpi2SCSITaskManagementRequest_t *)mpi_request;
+ dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "TASK_MGMT: "
+ "handle(0x%04x), task_type(0x%02x)\n", ioc->name,
+ le16_to_cpu(tm_request->DevHandle), tm_request->TaskType));
+
if (tm_request->TaskType ==
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
tm_request->TaskType ==
@@ -762,7 +770,6 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
}
}
- mutex_lock(&ioc->tm_cmds.mutex);
mpt2sas_scsih_set_tm_flag(ioc, le16_to_cpu(
tm_request->DevHandle));
mpt2sas_base_put_smid_hi_priority(ioc, smid);
@@ -818,7 +825,6 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
Mpi2SCSITaskManagementRequest_t *tm_request =
(Mpi2SCSITaskManagementRequest_t *)mpi_request;
- mutex_unlock(&ioc->tm_cmds.mutex);
mpt2sas_scsih_clear_tm_flag(ioc, le16_to_cpu(
tm_request->DevHandle));
} else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH ||
@@ -897,14 +903,13 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
printk(MPT2SAS_INFO_FMT "issue target reset: handle "
"= (0x%04x)\n", ioc->name,
- mpi_request->FunctionDependent1);
+ le16_to_cpu(mpi_request->FunctionDependent1));
mpt2sas_halt_firmware(ioc);
- mutex_lock(&ioc->tm_cmds.mutex);
mpt2sas_scsih_issue_tm(ioc,
- mpi_request->FunctionDependent1, 0,
- MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10);
+ le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
+ 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10,
+ NULL);
ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
- mutex_unlock(&ioc->tm_cmds.mutex);
} else
mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
FORCE_BIG_HAMMER);
@@ -1373,7 +1378,8 @@ _ctl_diag_register_2(struct MPT2SAS_ADAPTER *ioc,
dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: diag_buffer(0x%p), "
"dma(0x%llx), sz(%d)\n", ioc->name, __func__, request_data,
- (unsigned long long)request_data_dma, mpi_request->BufferLength));
+ (unsigned long long)request_data_dma,
+ le32_to_cpu(mpi_request->BufferLength)));
for (i = 0; i < MPT2_PRODUCT_SPECIFIC_DWORDS; i++)
mpi_request->ProductSpecific[i] =
@@ -2334,8 +2340,8 @@ _ctl_version_nvdata_persistent_show(struct device *cdev,
struct Scsi_Host *shost = class_to_shost(cdev);
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
- return snprintf(buf, PAGE_SIZE, "%02xh\n",
- le16_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word));
+ return snprintf(buf, PAGE_SIZE, "%08xh\n",
+ le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word));
}
static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO,
_ctl_version_nvdata_persistent_show, NULL);
@@ -2354,8 +2360,8 @@ _ctl_version_nvdata_default_show(struct device *cdev,
struct Scsi_Host *shost = class_to_shost(cdev);
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
- return snprintf(buf, PAGE_SIZE, "%02xh\n",
- le16_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word));
+ return snprintf(buf, PAGE_SIZE, "%08xh\n",
+ le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word));
}
static DEVICE_ATTR(version_nvdata_default, S_IRUGO,
_ctl_version_nvdata_default_show, NULL);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
index 8a5eeb1a5c8..69916e46e04 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
@@ -3,7 +3,7 @@
* controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h
- * Copyright (C) 2007-2009 LSI Corporation
+ * Copyright (C) 2007-2010 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_debug.h b/drivers/scsi/mpt2sas/mpt2sas_debug.h
index 5308a25cb30..3dcddfeb6f4 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_debug.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_debug.h
@@ -2,7 +2,7 @@
* Logging Support for MPT (Message Passing Technology) based controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_debug.c
- * Copyright (C) 2007-2009 LSI Corporation
+ * Copyright (C) 2007-2010 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index be171ed682e..c5ff26a2a51 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -2,7 +2,7 @@
* Scsi Host Layer for MPT (Message Passing Technology) based controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c
- * Copyright (C) 2007-2009 LSI Corporation
+ * Copyright (C) 2007-2010 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -52,6 +52,7 @@
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
+#include <linux/aer.h>
#include <linux/raid_class.h>
#include <linux/slab.h>
@@ -109,14 +110,16 @@ struct sense_info {
};
+#define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF)
+
/**
* struct fw_event_work - firmware event struct
* @list: link list framework
* @work: work object (ioc->fault_reset_work_q)
+ * @cancel_pending_work: flag set during reset handling
* @ioc: per adapter object
* @VF_ID: virtual function id
* @VP_ID: virtual port id
- * @host_reset_handling: handling events during host reset
* @ignore: flag meaning this event has been marked to ignore
* @event: firmware event MPI2_EVENT_XXX defined in mpt2_ioc.h
* @event_data: reply event data payload follows
@@ -125,11 +128,11 @@ struct sense_info {
*/
struct fw_event_work {
struct list_head list;
- struct work_struct work;
+ u8 cancel_pending_work;
+ struct delayed_work delayed_work;
struct MPT2SAS_ADAPTER *ioc;
u8 VF_ID;
u8 VP_ID;
- u8 host_reset_handling;
u8 ignore;
u16 event;
void *event_data;
@@ -482,27 +485,17 @@ struct _sas_device *
mpt2sas_scsih_sas_device_find_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
u64 sas_address)
{
- struct _sas_device *sas_device, *r;
+ struct _sas_device *sas_device;
- r = NULL;
- /* check the sas_device_init_list */
- list_for_each_entry(sas_device, &ioc->sas_device_init_list,
- list) {
- if (sas_device->sas_address != sas_address)
- continue;
- r = sas_device;
- goto out;
- }
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list)
+ if (sas_device->sas_address == sas_address)
+ return sas_device;
- /* then check the sas_device_list */
- list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
- if (sas_device->sas_address != sas_address)
- continue;
- r = sas_device;
- goto out;
- }
- out:
- return r;
+ list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
+ if (sas_device->sas_address == sas_address)
+ return sas_device;
+
+ return NULL;
}
/**
@@ -517,28 +510,17 @@ mpt2sas_scsih_sas_device_find_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
static struct _sas_device *
_scsih_sas_device_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
{
- struct _sas_device *sas_device, *r;
+ struct _sas_device *sas_device;
- r = NULL;
- if (ioc->wait_for_port_enable_to_complete) {
- list_for_each_entry(sas_device, &ioc->sas_device_init_list,
- list) {
- if (sas_device->handle != handle)
- continue;
- r = sas_device;
- goto out;
- }
- } else {
- list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
- if (sas_device->handle != handle)
- continue;
- r = sas_device;
- goto out;
- }
- }
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list)
+ if (sas_device->handle == handle)
+ return sas_device;
- out:
- return r;
+ list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
+ if (sas_device->handle == handle)
+ return sas_device;
+
+ return NULL;
}
/**
@@ -555,10 +537,15 @@ _scsih_sas_device_remove(struct MPT2SAS_ADAPTER *ioc,
{
unsigned long flags;
+ if (!sas_device)
+ return;
+
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- list_del(&sas_device->list);
- memset(sas_device, 0, sizeof(struct _sas_device));
- kfree(sas_device);
+ if (mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_device->sas_address)) {
+ list_del(&sas_device->list);
+ kfree(sas_device);
+ }
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
@@ -988,7 +975,7 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc,
u32 chain_offset;
u32 chain_length;
u32 chain_flags;
- u32 sges_left;
+ int sges_left;
u32 sges_in_segment;
u32 sgl_flags;
u32 sgl_flags_last_element;
@@ -1009,7 +996,7 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc,
sg_scmd = scsi_sglist(scmd);
sges_left = scsi_dma_map(scmd);
- if (!sges_left) {
+ if (sges_left < 0) {
sdev_printk(KERN_ERR, scmd->device, "pci_map_sg"
" failed: request for %d bytes!\n", scsi_bufflen(scmd));
return -ENOMEM;
@@ -1395,7 +1382,7 @@ _scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc,
}
flags = le16_to_cpu(sas_device_pg0.Flags);
- device_info = le16_to_cpu(sas_device_pg0.DeviceInfo);
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
sdev_printk(KERN_INFO, sdev,
"atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
@@ -1963,65 +1950,78 @@ mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle)
}
}
+
/**
* mpt2sas_scsih_issue_tm - main routine for sending tm requests
* @ioc: per adapter struct
* @device_handle: device handle
+ * @channel: the channel assigned by the OS
+ * @id: the id assigned by the OS
* @lun: lun number
* @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
* @smid_task: smid assigned to the task
* @timeout: timeout in seconds
- * Context: The calling function needs to acquire the tm_cmds.mutex
+ * Context: user
*
* A generic API for sending task management requests to firmware.
*
- * The ioc->tm_cmds.status flag should be MPT2_CMD_NOT_USED before calling
- * this API.
- *
* The callback index is set inside `ioc->tm_cb_idx`.
*
- * Return nothing.
+ * Return SUCCESS or FAILED.
*/
-void
-mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
- u8 type, u16 smid_task, ulong timeout)
+int
+mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
+ uint id, uint lun, u8 type, u16 smid_task, ulong timeout,
+ struct scsi_cmnd *scmd)
{
Mpi2SCSITaskManagementRequest_t *mpi_request;
Mpi2SCSITaskManagementReply_t *mpi_reply;
u16 smid = 0;
u32 ioc_state;
unsigned long timeleft;
+ struct scsi_cmnd *scmd_lookup;
+ int rc;
+ mutex_lock(&ioc->tm_cmds.mutex);
if (ioc->tm_cmds.status != MPT2_CMD_NOT_USED) {
printk(MPT2SAS_INFO_FMT "%s: tm_cmd busy!!!\n",
__func__, ioc->name);
- return;
+ rc = FAILED;
+ goto err_out;
}
- if (ioc->shost_recovery) {
+ if (ioc->shost_recovery || ioc->remove_host) {
printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
__func__, ioc->name);
- return;
+ rc = FAILED;
+ goto err_out;
}
ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
if (ioc_state & MPI2_DOORBELL_USED) {
dhsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "unexpected doorbell "
"active!\n", ioc->name));
- goto issue_host_reset;
+ mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = SUCCESS;
+ goto err_out;
}
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
mpt2sas_base_fault_info(ioc, ioc_state &
MPI2_DOORBELL_DATA_MASK);
- goto issue_host_reset;
+ mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = SUCCESS;
+ goto err_out;
}
smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
if (!smid) {
printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
ioc->name, __func__);
- return;
+ rc = FAILED;
+ goto err_out;
}
dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "sending tm: handle(0x%04x),"
@@ -2035,21 +2035,24 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = type;
mpi_request->TaskMID = cpu_to_le16(smid_task);
- mpi_request->VP_ID = 0; /* TODO */
- mpi_request->VF_ID = 0;
int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
mpt2sas_scsih_set_tm_flag(ioc, handle);
init_completion(&ioc->tm_cmds.done);
mpt2sas_base_put_smid_hi_priority(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
- mpt2sas_scsih_clear_tm_flag(ioc, handle);
if (!(ioc->tm_cmds.status & MPT2_CMD_COMPLETE)) {
printk(MPT2SAS_ERR_FMT "%s: timeout\n",
ioc->name, __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi2SCSITaskManagementRequest_t)/4);
- if (!(ioc->tm_cmds.status & MPT2_CMD_RESET))
- goto issue_host_reset;
+ if (!(ioc->tm_cmds.status & MPT2_CMD_RESET)) {
+ mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = SUCCESS;
+ ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
+ mpt2sas_scsih_clear_tm_flag(ioc, handle);
+ goto err_out;
+ }
}
if (ioc->tm_cmds.status & MPT2_CMD_REPLY_VALID) {
@@ -2059,12 +2062,57 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
le32_to_cpu(mpi_reply->IOCLogInfo),
le32_to_cpu(mpi_reply->TerminationCount)));
- if (ioc->logging_level & MPT_DEBUG_TM)
+ if (ioc->logging_level & MPT_DEBUG_TM) {
_scsih_response_code(ioc, mpi_reply->ResponseCode);
+ if (mpi_reply->IOCStatus)
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SCSITaskManagementRequest_t)/4);
+ }
}
- return;
- issue_host_reset:
- mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER);
+
+ /* sanity check:
+ * Check to see the commands were terminated.
+ * This is only needed for eh callbacks, hence the scmd check.
+ */
+ rc = FAILED;
+ if (scmd == NULL)
+ goto bypass_sanity_checks;
+ switch (type) {
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
+ scmd_lookup = _scsih_scsi_lookup_get(ioc, smid_task);
+ if (scmd_lookup && (scmd_lookup->serial_number ==
+ scmd->serial_number))
+ rc = FAILED;
+ else
+ rc = SUCCESS;
+ break;
+
+ case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+ if (_scsih_scsi_lookup_find_by_target(ioc, id, channel))
+ rc = FAILED;
+ else
+ rc = SUCCESS;
+ break;
+
+ case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
+ if (_scsih_scsi_lookup_find_by_lun(ioc, id, lun, channel))
+ rc = FAILED;
+ else
+ rc = SUCCESS;
+ break;
+ }
+
+ bypass_sanity_checks:
+
+ mpt2sas_scsih_clear_tm_flag(ioc, handle);
+ ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
+ mutex_unlock(&ioc->tm_cmds.mutex);
+
+ return rc;
+
+ err_out:
+ mutex_unlock(&ioc->tm_cmds.mutex);
+ return rc;
}
/**
@@ -2081,7 +2129,6 @@ _scsih_abort(struct scsi_cmnd *scmd)
u16 smid;
u16 handle;
int r;
- struct scsi_cmnd *scmd_lookup;
printk(MPT2SAS_INFO_FMT "attempting task abort! scmd(%p)\n",
ioc->name, scmd);
@@ -2116,19 +2163,10 @@ _scsih_abort(struct scsi_cmnd *scmd)
mpt2sas_halt_firmware(ioc);
- mutex_lock(&ioc->tm_cmds.mutex);
handle = sas_device_priv_data->sas_target->handle;
- mpt2sas_scsih_issue_tm(ioc, handle, sas_device_priv_data->lun,
- MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30);
-
- /* sanity check - see whether command actually completed */
- scmd_lookup = _scsih_scsi_lookup_get(ioc, smid);
- if (scmd_lookup && (scmd_lookup->serial_number == scmd->serial_number))
- r = FAILED;
- else
- r = SUCCESS;
- ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
- mutex_unlock(&ioc->tm_cmds.mutex);
+ r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, scmd->device->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, scmd);
out:
printk(MPT2SAS_INFO_FMT "task abort: %s scmd(%p)\n",
@@ -2185,22 +2223,9 @@ _scsih_dev_reset(struct scsi_cmnd *scmd)
goto out;
}
- mutex_lock(&ioc->tm_cmds.mutex);
- mpt2sas_scsih_issue_tm(ioc, handle, 0,
- MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, scmd->device->lun,
- 30);
-
- /*
- * sanity check see whether all commands to this device been
- * completed
- */
- if (_scsih_scsi_lookup_find_by_lun(ioc, scmd->device->id,
- scmd->device->lun, scmd->device->channel))
- r = FAILED;
- else
- r = SUCCESS;
- ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
- mutex_unlock(&ioc->tm_cmds.mutex);
+ r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, scmd->device->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, scmd);
out:
printk(MPT2SAS_INFO_FMT "device reset: %s scmd(%p)\n",
@@ -2257,21 +2282,9 @@ _scsih_target_reset(struct scsi_cmnd *scmd)
goto out;
}
- mutex_lock(&ioc->tm_cmds.mutex);
- mpt2sas_scsih_issue_tm(ioc, handle, 0,
- MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30);
-
- /*
- * sanity check see whether all commands to this target been
- * completed
- */
- if (_scsih_scsi_lookup_find_by_target(ioc, scmd->device->id,
- scmd->device->channel))
- r = FAILED;
- else
- r = SUCCESS;
- ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
- mutex_unlock(&ioc->tm_cmds.mutex);
+ r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
+ 30, scmd);
out:
printk(MPT2SAS_INFO_FMT "target reset: %s scmd(%p)\n",
@@ -2325,8 +2338,9 @@ _scsih_fw_event_add(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
spin_lock_irqsave(&ioc->fw_event_lock, flags);
list_add_tail(&fw_event->list, &ioc->fw_event_list);
- INIT_WORK(&fw_event->work, _firmware_event_work);
- queue_work(ioc->firmware_event_thread, &fw_event->work);
+ INIT_DELAYED_WORK(&fw_event->delayed_work, _firmware_event_work);
+ queue_delayed_work(ioc->firmware_event_thread,
+ &fw_event->delayed_work, 0);
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
@@ -2353,61 +2367,53 @@ _scsih_fw_event_free(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
+
/**
- * _scsih_fw_event_add - requeue an event
+ * _scsih_queue_rescan - queue a topology rescan from user context
* @ioc: per adapter object
- * @fw_event: object describing the event
- * Context: This function will acquire ioc->fw_event_lock.
*
* Return nothing.
*/
static void
-_scsih_fw_event_requeue(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
- *fw_event, unsigned long delay)
+_scsih_queue_rescan(struct MPT2SAS_ADAPTER *ioc)
{
- unsigned long flags;
- if (ioc->firmware_event_thread == NULL)
- return;
+ struct fw_event_work *fw_event;
- spin_lock_irqsave(&ioc->fw_event_lock, flags);
- queue_work(ioc->firmware_event_thread, &fw_event->work);
- spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ if (ioc->wait_for_port_enable_to_complete)
+ return;
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event)
+ return;
+ fw_event->event = MPT2SAS_RESCAN_AFTER_HOST_RESET;
+ fw_event->ioc = ioc;
+ _scsih_fw_event_add(ioc, fw_event);
}
/**
- * _scsih_fw_event_off - turn flag off preventing event handling
+ * _scsih_fw_event_cleanup_queue - cleanup event queue
* @ioc: per adapter object
*
- * Used to prevent handling of firmware events during adapter reset
- * driver unload.
+ * Walk the firmware event queue, either killing timers, or waiting
+ * for outstanding events to complete
*
* Return nothing.
*/
static void
-_scsih_fw_event_off(struct MPT2SAS_ADAPTER *ioc)
+_scsih_fw_event_cleanup_queue(struct MPT2SAS_ADAPTER *ioc)
{
- unsigned long flags;
+ struct fw_event_work *fw_event, *next;
- spin_lock_irqsave(&ioc->fw_event_lock, flags);
- ioc->fw_events_off = 1;
- spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
-
-}
-
-/**
- * _scsih_fw_event_on - turn flag on allowing firmware event handling
- * @ioc: per adapter object
- *
- * Returns nothing.
- */
-static void
-_scsih_fw_event_on(struct MPT2SAS_ADAPTER *ioc)
-{
- unsigned long flags;
+ if (list_empty(&ioc->fw_event_list) ||
+ !ioc->firmware_event_thread || in_interrupt())
+ return;
- spin_lock_irqsave(&ioc->fw_event_lock, flags);
- ioc->fw_events_off = 0;
- spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
+ if (cancel_delayed_work(&fw_event->delayed_work)) {
+ _scsih_fw_event_free(ioc, fw_event);
+ continue;
+ }
+ fw_event->cancel_pending_work = 1;
+ }
}
/**
@@ -2571,25 +2577,24 @@ static void
_scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
{
Mpi2SCSITaskManagementRequest_t *mpi_request;
- struct MPT2SAS_TARGET *sas_target_priv_data;
u16 smid;
struct _sas_device *sas_device;
unsigned long flags;
struct _tr_list *delayed_tr;
- if (ioc->shost_recovery) {
- printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
- __func__, ioc->name);
+ if (ioc->shost_recovery || ioc->remove_host) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host reset in "
+ "progress!\n", __func__, ioc->name));
return;
}
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-
- /* skip is hidden raid component */
- if (sas_device && sas_device->hidden_raid_component)
+ if (sas_device && sas_device->hidden_raid_component) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
return;
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
if (!smid) {
@@ -2598,36 +2603,16 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
return;
INIT_LIST_HEAD(&delayed_tr->list);
delayed_tr->handle = handle;
- delayed_tr->state = MPT2SAS_REQ_SAS_CNTRL;
- list_add_tail(&delayed_tr->list,
- &ioc->delayed_tr_list);
- if (sas_device && sas_device->starget) {
- dewtprintk(ioc, starget_printk(KERN_INFO,
- sas_device->starget, "DELAYED:tr:handle(0x%04x), "
- "(open)\n", handle));
- } else {
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
- "DELAYED:tr:handle(0x%04x), (open)\n",
- ioc->name, handle));
- }
- return;
- }
-
- if (sas_device) {
- sas_device->state |= MPTSAS_STATE_TR_SEND;
- sas_device->state |= MPT2SAS_REQ_SAS_CNTRL;
- if (sas_device->starget && sas_device->starget->hostdata) {
- sas_target_priv_data = sas_device->starget->hostdata;
- sas_target_priv_data->tm_busy = 1;
- dewtprintk(ioc, starget_printk(KERN_INFO,
- sas_device->starget, "tr:handle(0x%04x), (open)\n",
- handle));
- }
- } else {
+ list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
- "tr:handle(0x%04x), (open)\n", ioc->name, handle));
+ "DELAYED:tr:handle(0x%04x), (open)\n",
+ ioc->name, handle));
+ return;
}
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "tr_send:handle(0x%04x), "
+ "(open), smid(%d), cb(%d)\n", ioc->name, handle, smid,
+ ioc->tm_tr_cb_idx));
mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
@@ -2657,35 +2642,15 @@ static u8
_scsih_sas_control_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid,
u8 msix_index, u32 reply)
{
- unsigned long flags;
- u16 handle;
- struct _sas_device *sas_device;
Mpi2SasIoUnitControlReply_t *mpi_reply =
mpt2sas_base_get_reply_virt_addr(ioc, reply);
- handle = le16_to_cpu(mpi_reply->DevHandle);
-
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-
- if (sas_device) {
- sas_device->state |= MPTSAS_STATE_CNTRL_COMPLETE;
- if (sas_device->starget)
- dewtprintk(ioc, starget_printk(KERN_INFO,
- sas_device->starget,
- "sc_complete:handle(0x%04x), "
- "ioc_status(0x%04x), loginfo(0x%08x)\n",
- handle, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo)));
- } else {
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
- "sc_complete:handle(0x%04x), "
- "ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, handle, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo)));
- }
-
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "sc_complete:handle(0x%04x), (open) "
+ "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo)));
return 1;
}
@@ -2709,87 +2674,63 @@ static u8
_scsih_tm_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
u32 reply)
{
- unsigned long flags;
u16 handle;
- struct _sas_device *sas_device;
+ Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
Mpi2SCSITaskManagementReply_t *mpi_reply =
mpt2sas_base_get_reply_virt_addr(ioc, reply);
Mpi2SasIoUnitControlRequest_t *mpi_request;
u16 smid_sas_ctrl;
- struct MPT2SAS_TARGET *sas_target_priv_data;
struct _tr_list *delayed_tr;
- u8 rc;
- handle = le16_to_cpu(mpi_reply->DevHandle);
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-
- if (sas_device) {
- sas_device->state |= MPTSAS_STATE_TR_COMPLETE;
- if (sas_device->starget) {
- dewtprintk(ioc, starget_printk(KERN_INFO,
- sas_device->starget, "tr_complete:handle(0x%04x), "
- "(%s) ioc_status(0x%04x), loginfo(0x%08x), "
- "completed(%d)\n", sas_device->handle,
- (sas_device->state & MPT2SAS_REQ_SAS_CNTRL) ?
- "open" : "active",
- le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo),
- le32_to_cpu(mpi_reply->TerminationCount)));
- if (sas_device->starget->hostdata) {
- sas_target_priv_data =
- sas_device->starget->hostdata;
- sas_target_priv_data->tm_busy = 0;
- }
- }
- } else {
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
- "tr_complete:handle(0x%04x), (open) ioc_status(0x%04x), "
- "loginfo(0x%08x), completed(%d)\n", ioc->name,
- handle, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo),
- le32_to_cpu(mpi_reply->TerminationCount)));
+ if (ioc->shost_recovery || ioc->remove_host) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host reset in "
+ "progress!\n", __func__, ioc->name));
+ return 1;
}
- if (!list_empty(&ioc->delayed_tr_list)) {
- delayed_tr = list_entry(ioc->delayed_tr_list.next,
- struct _tr_list, list);
- mpt2sas_base_free_smid(ioc, smid);
- if (delayed_tr->state & MPT2SAS_REQ_SAS_CNTRL)
- _scsih_tm_tr_send(ioc, delayed_tr->handle);
- list_del(&delayed_tr->list);
- kfree(delayed_tr);
- rc = 0; /* tells base_interrupt not to free mf */
- } else
- rc = 1;
-
- if (sas_device && !(sas_device->state & MPT2SAS_REQ_SAS_CNTRL))
- return rc;
-
- if (ioc->shost_recovery) {
- printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
- __func__, ioc->name);
- return rc;
+ mpi_request_tm = mpt2sas_base_get_msg_frame(ioc, smid);
+ handle = le16_to_cpu(mpi_request_tm->DevHandle);
+ if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "spurious interrupt: "
+ "handle(0x%04x:0x%04x), smid(%d)!!!\n", ioc->name, handle,
+ le16_to_cpu(mpi_reply->DevHandle), smid));
+ return 0;
}
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
+ "loginfo(0x%08x), completed(%d)\n", ioc->name,
+ handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
+
smid_sas_ctrl = mpt2sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
if (!smid_sas_ctrl) {
printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
ioc->name, __func__);
- return rc;
+ return 1;
}
- if (sas_device)
- sas_device->state |= MPTSAS_STATE_CNTRL_SEND;
-
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "sc_send:handle(0x%04x), "
+ "(open), smid(%d), cb(%d)\n", ioc->name, handle, smid_sas_ctrl,
+ ioc->tm_sas_control_cb_idx));
mpi_request = mpt2sas_base_get_msg_frame(ioc, smid_sas_ctrl);
memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
- mpi_request->DevHandle = mpi_reply->DevHandle;
+ mpi_request->DevHandle = mpi_request_tm->DevHandle;
mpt2sas_base_put_smid_default(ioc, smid_sas_ctrl);
- return rc;
+
+ if (!list_empty(&ioc->delayed_tr_list)) {
+ delayed_tr = list_entry(ioc->delayed_tr_list.next,
+ struct _tr_list, list);
+ mpt2sas_base_free_smid(ioc, smid);
+ _scsih_tm_tr_send(ioc, delayed_tr->handle);
+ list_del(&delayed_tr->list);
+ kfree(delayed_tr);
+ return 0; /* tells base_interrupt not to free mf */
+ }
+ return 1;
}
/**
@@ -3021,25 +2962,32 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
scmd->scsi_done = done;
sas_device_priv_data = scmd->device->hostdata;
- if (!sas_device_priv_data) {
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
return 0;
}
sas_target_priv_data = sas_device_priv_data->sas_target;
- if (!sas_target_priv_data || sas_target_priv_data->handle ==
- MPT2SAS_INVALID_DEVICE_HANDLE || sas_target_priv_data->deleted) {
+ /* invalid device handle */
+ if (sas_target_priv_data->handle == MPT2SAS_INVALID_DEVICE_HANDLE) {
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
return 0;
}
- /* see if we are busy with task managment stuff */
- if (sas_device_priv_data->block || sas_target_priv_data->tm_busy)
- return SCSI_MLQUEUE_DEVICE_BUSY;
- else if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress)
+ /* host recovery or link resets sent via IOCTLs */
+ if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress)
return SCSI_MLQUEUE_HOST_BUSY;
+ /* device busy with task managment */
+ else if (sas_device_priv_data->block || sas_target_priv_data->tm_busy)
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+ /* device has been deleted */
+ else if (sas_target_priv_data->deleted) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
if (scmd->sc_data_direction == DMA_FROM_DEVICE)
mpi_control = MPI2_SCSIIO_CONTROL_READ;
@@ -3110,8 +3058,11 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
}
}
- mpt2sas_base_put_smid_scsi_io(ioc, smid,
- sas_device_priv_data->sas_target->handle);
+ if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST))
+ mpt2sas_base_put_smid_scsi_io(ioc, smid,
+ sas_device_priv_data->sas_target->handle);
+ else
+ mpt2sas_base_put_smid_default(ioc, smid);
return 0;
out:
@@ -3301,8 +3252,8 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
struct sense_info data;
_scsih_normalize_sense(scmd->sense_buffer, &data);
printk(MPT2SAS_WARN_FMT "\t[sense_key,asc,ascq]: "
- "[0x%02x,0x%02x,0x%02x]\n", ioc->name, data.skey,
- data.asc, data.ascq);
+ "[0x%02x,0x%02x,0x%02x], count(%d)\n", ioc->name, data.skey,
+ data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount));
}
if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
@@ -3356,7 +3307,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
mpi_request.SlotStatus =
- MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT;
+ cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
mpi_request.DevHandle = cpu_to_le16(handle);
mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
@@ -4008,6 +3959,134 @@ _scsih_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address)
}
/**
+ * _scsih_check_access_status - check access flags
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * @handle: sas device handle
+ * @access_flags: errors returned during discovery of the device
+ *
+ * Return 0 for success, else failure
+ */
+static u8
+_scsih_check_access_status(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
+ u16 handle, u8 access_status)
+{
+ u8 rc = 1;
+ char *desc = NULL;
+
+ switch (access_status) {
+ case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
+ rc = 0;
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
+ desc = "sata capability failed";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
+ desc = "sata affiliation conflict";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
+ desc = "route not addressable";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
+ desc = "smp error not addressable";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
+ desc = "device blocked";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
+ desc = "sata initialization failed";
+ break;
+ default:
+ desc = "unknown";
+ break;
+ }
+
+ if (!rc)
+ return 0;
+
+ printk(MPT2SAS_ERR_FMT "discovery errors(%s): sas_address(0x%016llx), "
+ "handle(0x%04x)\n", ioc->name, desc,
+ (unsigned long long)sas_address, handle);
+ return rc;
+}
+
+static void
+_scsih_check_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ struct _sas_device *sas_device;
+ u32 ioc_status;
+ unsigned long flags;
+ u64 sas_address;
+ struct scsi_target *starget;
+ struct MPT2SAS_TARGET *sas_target_priv_data;
+ u32 device_info;
+
+ if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
+ return;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ return;
+
+ /* check if this is end device */
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
+ if (!(_scsih_is_end_device(device_info)))
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+ sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_address);
+
+ if (!sas_device) {
+ printk(MPT2SAS_ERR_FMT "device is not present "
+ "handle(0x%04x), no sas_device!!!\n", ioc->name, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+
+ if (unlikely(sas_device->handle != handle)) {
+ starget = sas_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ starget_printk(KERN_INFO, starget, "handle changed from(0x%04x)"
+ " to (0x%04x)!!!\n", sas_device->handle, handle);
+ sas_target_priv_data->handle = handle;
+ sas_device->handle = handle;
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ /* check if device is present */
+ if (!(le16_to_cpu(sas_device_pg0.Flags) &
+ MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
+ printk(MPT2SAS_ERR_FMT "device is not present "
+ "handle(0x%04x), flags!!!\n", ioc->name, handle);
+ return;
+ }
+
+ /* check if there were any issues with discovery */
+ if (_scsih_check_access_status(ioc, sas_address, handle,
+ sas_device_pg0.AccessStatus))
+ return;
+ _scsih_ublock_io_device(ioc, handle);
+
+}
+
+/**
* _scsih_add_device - creating sas device object
* @ioc: per adapter object
* @handle: sas device handle
@@ -4045,6 +4124,8 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
return -1;
}
+ sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+
/* check if device is present */
if (!(le16_to_cpu(sas_device_pg0.Flags) &
MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
@@ -4055,15 +4136,10 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
return -1;
}
- /* check if there were any issus with discovery */
- if (sas_device_pg0.AccessStatus ==
- MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED) {
- printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- printk(MPT2SAS_ERR_FMT "AccessStatus = 0x%02x\n",
- ioc->name, sas_device_pg0.AccessStatus);
+ /* check if there were any issues with discovery */
+ if (_scsih_check_access_status(ioc, sas_address, handle,
+ sas_device_pg0.AccessStatus))
return -1;
- }
/* check if this is end device */
device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
@@ -4073,17 +4149,14 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
return -1;
}
- sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
sas_address);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (sas_device) {
- _scsih_ublock_io_device(ioc, handle);
+ if (sas_device)
return 0;
- }
sas_device = kzalloc(sizeof(struct _sas_device),
GFP_KERNEL);
@@ -4126,67 +4199,38 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
}
/**
- * _scsih_remove_device - removing sas device object
+ * _scsih_remove_pd_device - removing sas device pd object
* @ioc: per adapter object
- * @sas_device: the sas_device object
+ * @sas_device_delete: the sas_device object
*
+ * For hidden raid components, we do driver-fw handshake from
+ * hotplug work threads.
* Return nothing.
*/
static void
-_scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, struct _sas_device
- *sas_device)
+_scsih_remove_pd_device(struct MPT2SAS_ADAPTER *ioc, struct _sas_device
+ sas_device)
{
- struct MPT2SAS_TARGET *sas_target_priv_data;
Mpi2SasIoUnitControlReply_t mpi_reply;
Mpi2SasIoUnitControlRequest_t mpi_request;
- u16 device_handle, handle;
-
- if (!sas_device)
- return;
+ u16 vol_handle, handle;
- handle = sas_device->handle;
+ handle = sas_device.handle;
dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: handle(0x%04x),"
" sas_addr(0x%016llx)\n", ioc->name, __func__, handle,
- (unsigned long long) sas_device->sas_address));
-
- if (sas_device->starget && sas_device->starget->hostdata) {
- sas_target_priv_data = sas_device->starget->hostdata;
- sas_target_priv_data->deleted = 1;
- }
-
- if (ioc->remove_host || ioc->shost_recovery || !handle)
- goto out;
+ (unsigned long long) sas_device.sas_address));
- if ((sas_device->state & MPTSAS_STATE_TR_COMPLETE)) {
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "\tskip "
- "target_reset handle(0x%04x)\n", ioc->name,
- handle));
- goto skip_tr;
- }
-
- /* Target Reset to flush out all the outstanding IO */
- device_handle = (sas_device->hidden_raid_component) ?
- sas_device->volume_handle : handle;
- if (device_handle) {
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "issue target reset: "
- "handle(0x%04x)\n", ioc->name, device_handle));
- mutex_lock(&ioc->tm_cmds.mutex);
- mpt2sas_scsih_issue_tm(ioc, device_handle, 0,
- MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10);
- ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
- mutex_unlock(&ioc->tm_cmds.mutex);
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "issue target reset "
- "done: handle(0x%04x)\n", ioc->name, device_handle));
- if (ioc->shost_recovery)
- goto out;
- }
- skip_tr:
-
- if ((sas_device->state & MPTSAS_STATE_CNTRL_COMPLETE)) {
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "\tskip "
- "sas_cntrl handle(0x%04x)\n", ioc->name, handle));
- goto out;
- }
+ vol_handle = sas_device.volume_handle;
+ if (!vol_handle)
+ return;
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "issue target reset: "
+ "handle(0x%04x)\n", ioc->name, vol_handle));
+ mpt2sas_scsih_issue_tm(ioc, vol_handle, 0, 0, 0,
+ MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30, NULL);
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "issue target reset "
+ "done: handle(0x%04x)\n", ioc->name, vol_handle));
+ if (ioc->shost_recovery)
+ return;
/* SAS_IO_UNIT_CNTR - send REMOVE_DEVICE */
dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "sas_iounit: handle"
@@ -4194,34 +4238,68 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, struct _sas_device
memset(&mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
mpi_request.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
mpi_request.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
- mpi_request.DevHandle = handle;
- mpi_request.VF_ID = 0; /* TODO */
- mpi_request.VP_ID = 0;
+ mpi_request.DevHandle = cpu_to_le16(handle);
if ((mpt2sas_base_sas_iounit_control(ioc, &mpi_reply,
- &mpi_request)) != 0) {
+ &mpi_request)) != 0)
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
- }
dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "sas_iounit: ioc_status"
"(0x%04x), loginfo(0x%08x)\n", ioc->name,
le16_to_cpu(mpi_reply.IOCStatus),
le32_to_cpu(mpi_reply.IOCLogInfo)));
- out:
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit: handle(0x%04x),"
+ " sas_addr(0x%016llx)\n", ioc->name, __func__, handle,
+ (unsigned long long) sas_device.sas_address));
+}
- _scsih_ublock_io_device(ioc, handle);
+/**
+ * _scsih_remove_device - removing sas device object
+ * @ioc: per adapter object
+ * @sas_device_delete: the sas_device object
+ *
+ * Return nothing.
+ */
+static void
+_scsih_remove_device(struct MPT2SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ struct _sas_device sas_device_backup;
+ struct MPT2SAS_TARGET *sas_target_priv_data;
- mpt2sas_transport_port_remove(ioc, sas_device->sas_address,
- sas_device->sas_address_parent);
+ if (!sas_device)
+ return;
- printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), sas_addr"
- "(0x%016llx)\n", ioc->name, handle,
- (unsigned long long) sas_device->sas_address);
+ memcpy(&sas_device_backup, sas_device, sizeof(struct _sas_device));
_scsih_sas_device_remove(ioc, sas_device);
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit: handle"
- "(0x%04x)\n", ioc->name, __func__, handle));
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: "
+ "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
+ sas_device_backup.handle, (unsigned long long)
+ sas_device_backup.sas_address));
+
+ if (sas_device_backup.starget && sas_device_backup.starget->hostdata) {
+ sas_target_priv_data = sas_device_backup.starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ }
+
+ if (sas_device_backup.hidden_raid_component)
+ _scsih_remove_pd_device(ioc, sas_device_backup);
+
+ _scsih_ublock_io_device(ioc, sas_device_backup.handle);
+
+ mpt2sas_transport_port_remove(ioc, sas_device_backup.sas_address,
+ sas_device_backup.sas_address_parent);
+
+ printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), sas_addr"
+ "(0x%016llx)\n", ioc->name, sas_device_backup.handle,
+ (unsigned long long) sas_device_backup.sas_address);
+
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit: "
+ "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
+ sas_device_backup.handle, (unsigned long long)
+ sas_device_backup.sas_address));
}
#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
@@ -4331,7 +4409,7 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
_scsih_sas_topology_change_event_debug(ioc, event_data);
#endif
- if (ioc->shost_recovery)
+ if (ioc->shost_recovery || ioc->remove_host)
return;
if (!ioc->sas_hba.num_phys)
@@ -4370,7 +4448,7 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
"expander event\n", ioc->name));
return;
}
- if (ioc->shost_recovery)
+ if (ioc->shost_recovery || ioc->remove_host)
return;
phy_number = event_data->StartPhyNum + i;
reason_code = event_data->PHY[i].PhyStatus &
@@ -4393,8 +4471,10 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
mpt2sas_transport_update_links(ioc, sas_address,
handle, phy_number, link_rate);
- if (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)
- _scsih_ublock_io_device(ioc, handle);
+ if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
+ break;
+
+ _scsih_check_device(ioc, handle);
break;
case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
@@ -4520,10 +4600,10 @@ _scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc,
event_data);
#endif
- if (!(event_data->ReasonCode ==
+ if (event_data->ReasonCode !=
MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
- event_data->ReasonCode ==
- MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET))
+ event_data->ReasonCode !=
+ MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
return;
spin_lock_irqsave(&ioc->sas_device_lock, flags);
@@ -4630,7 +4710,6 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name,
__func__));
- mutex_lock(&ioc->tm_cmds.mutex);
termination_count = 0;
query_count = 0;
mpi_reply = ioc->tm_cmds.reply;
@@ -4654,8 +4733,8 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
lun = sas_device_priv_data->lun;
query_count++;
- mpt2sas_scsih_issue_tm(ioc, handle, lun,
- MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30);
+ mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, NULL);
ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
& MPI2_IOCSTATUS_MASK;
@@ -4666,13 +4745,11 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC))
continue;
- mpt2sas_scsih_issue_tm(ioc, handle, lun,
- MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, 0, 30);
- ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
+ mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, 0, 30, NULL);
termination_count += le32_to_cpu(mpi_reply->TerminationCount);
}
ioc->broadcast_aen_busy = 0;
- mutex_unlock(&ioc->tm_cmds.mutex);
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT
"%s - exit, query_count = %d termination_count = %d\n",
@@ -5442,6 +5519,26 @@ _scsih_task_set_full(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
}
/**
+ * _scsih_prep_device_scan - initialize parameters prior to device scan
+ * @ioc: per adapter object
+ *
+ * Set the deleted flag prior to device scan. If the device is found during
+ * the scan, then we clear the deleted flag.
+ */
+static void
+_scsih_prep_device_scan(struct MPT2SAS_ADAPTER *ioc)
+{
+ struct MPT2SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (sas_device_priv_data && sas_device_priv_data->sas_target)
+ sas_device_priv_data->sas_target->deleted = 1;
+ }
+}
+
+/**
* _scsih_mark_responding_sas_device - mark a sas_devices as responding
* @ioc: per adapter object
* @sas_address: sas address
@@ -5467,10 +5564,13 @@ _scsih_mark_responding_sas_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
if (sas_device->sas_address == sas_address &&
sas_device->slot == slot && sas_device->starget) {
sas_device->responding = 1;
- sas_device->state = 0;
starget = sas_device->starget;
- sas_target_priv_data = starget->hostdata;
- sas_target_priv_data->tm_busy = 0;
+ if (starget && starget->hostdata) {
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->tm_busy = 0;
+ sas_target_priv_data->deleted = 0;
+ } else
+ sas_target_priv_data = NULL;
starget_printk(KERN_INFO, sas_device->starget,
"handle(0x%04x), sas_addr(0x%016llx), enclosure "
"logical id(0x%016llx), slot(%d)\n", handle,
@@ -5483,7 +5583,8 @@ _scsih_mark_responding_sas_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
sas_device->handle);
sas_device->handle = handle;
- sas_target_priv_data->handle = handle;
+ if (sas_target_priv_data)
+ sas_target_priv_data->handle = handle;
goto out;
}
}
@@ -5558,6 +5659,12 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
spin_lock_irqsave(&ioc->raid_device_lock, flags);
list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
if (raid_device->wwid == wwid && raid_device->starget) {
+ starget = raid_device->starget;
+ if (starget && starget->hostdata) {
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->deleted = 0;
+ } else
+ sas_target_priv_data = NULL;
raid_device->responding = 1;
starget_printk(KERN_INFO, raid_device->starget,
"handle(0x%04x), wwid(0x%016llx)\n", handle,
@@ -5567,9 +5674,8 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
raid_device->handle);
raid_device->handle = handle;
- starget = raid_device->starget;
- sas_target_priv_data = starget->hostdata;
- sas_target_priv_data->handle = handle;
+ if (sas_target_priv_data)
+ sas_target_priv_data->handle = handle;
goto out;
}
}
@@ -5694,13 +5800,13 @@ _scsih_search_responding_expanders(struct MPT2SAS_ADAPTER *ioc)
}
/**
- * _scsih_remove_unresponding_devices - removing unresponding devices
+ * _scsih_remove_unresponding_sas_devices - removing unresponding devices
* @ioc: per adapter object
*
* Return nothing.
*/
static void
-_scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc)
+_scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
{
struct _sas_device *sas_device, *sas_device_next;
struct _sas_node *sas_expander;
@@ -5722,8 +5828,6 @@ _scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc)
(unsigned long long)
sas_device->enclosure_logical_id,
sas_device->slot);
- /* invalidate the device handle */
- sas_device->handle = 0;
_scsih_remove_device(ioc, sas_device);
}
@@ -5774,32 +5878,33 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
case MPT2_IOC_PRE_RESET:
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
- _scsih_fw_event_off(ioc);
break;
case MPT2_IOC_AFTER_RESET:
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
+ if (ioc->scsih_cmds.status & MPT2_CMD_PENDING) {
+ ioc->scsih_cmds.status |= MPT2_CMD_RESET;
+ mpt2sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
+ complete(&ioc->scsih_cmds.done);
+ }
if (ioc->tm_cmds.status & MPT2_CMD_PENDING) {
ioc->tm_cmds.status |= MPT2_CMD_RESET;
mpt2sas_base_free_smid(ioc, ioc->tm_cmds.smid);
complete(&ioc->tm_cmds.done);
}
- _scsih_fw_event_on(ioc);
+ _scsih_fw_event_cleanup_queue(ioc);
_scsih_flush_running_cmds(ioc);
+ _scsih_queue_rescan(ioc);
break;
case MPT2_IOC_DONE_RESET:
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
_scsih_sas_host_refresh(ioc);
+ _scsih_prep_device_scan(ioc);
_scsih_search_responding_sas_devices(ioc);
_scsih_search_responding_raid_devices(ioc);
_scsih_search_responding_expanders(ioc);
break;
- case MPT2_IOC_RUNNING:
- dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
- "MPT2_IOC_RUNNING\n", ioc->name, __func__));
- _scsih_remove_unresponding_devices(ioc);
- break;
}
}
@@ -5815,21 +5920,28 @@ static void
_firmware_event_work(struct work_struct *work)
{
struct fw_event_work *fw_event = container_of(work,
- struct fw_event_work, work);
+ struct fw_event_work, delayed_work.work);
unsigned long flags;
struct MPT2SAS_ADAPTER *ioc = fw_event->ioc;
/* the queue is being flushed so ignore this event */
- spin_lock_irqsave(&ioc->fw_event_lock, flags);
- if (ioc->fw_events_off || ioc->remove_host) {
- spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ if (ioc->remove_host || fw_event->cancel_pending_work) {
_scsih_fw_event_free(ioc, fw_event);
return;
}
- spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
- if (ioc->shost_recovery) {
- _scsih_fw_event_requeue(ioc, fw_event, 1000);
+ if (fw_event->event == MPT2SAS_RESCAN_AFTER_HOST_RESET) {
+ _scsih_fw_event_free(ioc, fw_event);
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ if (ioc->shost_recovery) {
+ init_completion(&ioc->shost_recovery_done);
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
+ flags);
+ wait_for_completion(&ioc->shost_recovery_done);
+ } else
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
+ flags);
+ _scsih_remove_unresponding_sas_devices(ioc);
return;
}
@@ -5891,16 +6003,12 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
{
struct fw_event_work *fw_event;
Mpi2EventNotificationReply_t *mpi_reply;
- unsigned long flags;
u16 event;
+ u16 sz;
/* events turned off due to host reset or driver unloading */
- spin_lock_irqsave(&ioc->fw_event_lock, flags);
- if (ioc->fw_events_off || ioc->remove_host) {
- spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ if (ioc->remove_host)
return 1;
- }
- spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
event = le16_to_cpu(mpi_reply->Event);
@@ -5947,8 +6055,8 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
ioc->name, __FILE__, __LINE__, __func__);
return 1;
}
- fw_event->event_data =
- kzalloc(mpi_reply->EventDataLength*4, GFP_ATOMIC);
+ sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
+ fw_event->event_data = kzalloc(sz, GFP_ATOMIC);
if (!fw_event->event_data) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
@@ -5957,7 +6065,7 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
}
memcpy(fw_event->event_data, mpi_reply->EventData,
- mpi_reply->EventDataLength*4);
+ sz);
fw_event->ioc = ioc;
fw_event->VF_ID = mpi_reply->VF_ID;
fw_event->VP_ID = mpi_reply->VP_ID;
@@ -6158,6 +6266,18 @@ _scsih_shutdown(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ ioc->remove_host = 1;
+ _scsih_fw_event_cleanup_queue(ioc);
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ wq = ioc->firmware_event_thread;
+ ioc->firmware_event_thread = NULL;
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
_scsih_ir_shutdown(ioc);
mpt2sas_base_detach(ioc);
@@ -6184,7 +6304,7 @@ _scsih_remove(struct pci_dev *pdev)
unsigned long flags;
ioc->remove_host = 1;
- _scsih_fw_event_off(ioc);
+ _scsih_fw_event_cleanup_queue(ioc);
spin_lock_irqsave(&ioc->fw_event_lock, flags);
wq = ioc->firmware_event_thread;
@@ -6557,6 +6677,122 @@ _scsih_resume(struct pci_dev *pdev)
}
#endif /* CONFIG_PM */
+/**
+ * _scsih_pci_error_detected - Called when a PCI error is detected.
+ * @pdev: PCI device struct
+ * @state: PCI channel state
+ *
+ * Description: Called when a PCI error is detected.
+ *
+ * Return value:
+ * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
+ */
+static pci_ers_result_t
+_scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ printk(MPT2SAS_INFO_FMT "PCI error: detected callback, state(%d)!!\n",
+ ioc->name, state);
+
+ switch (state) {
+ case pci_channel_io_normal:
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ scsi_block_requests(ioc->shost);
+ mpt2sas_base_stop_watchdog(ioc);
+ mpt2sas_base_free_resources(ioc);
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ _scsih_remove(pdev);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * _scsih_pci_slot_reset - Called when PCI slot has been reset.
+ * @pdev: PCI device struct
+ *
+ * Description: This routine is called by the pci error recovery
+ * code after the PCI slot has been reset, just before we
+ * should resume normal operations.
+ */
+static pci_ers_result_t
+_scsih_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+ int rc;
+
+ printk(MPT2SAS_INFO_FMT "PCI error: slot reset callback!!\n",
+ ioc->name);
+
+ ioc->pdev = pdev;
+ rc = mpt2sas_base_map_resources(ioc);
+ if (rc)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+
+ rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+
+ printk(MPT2SAS_WARN_FMT "hard reset: %s\n", ioc->name,
+ (rc == 0) ? "success" : "failed");
+
+ if (!rc)
+ return PCI_ERS_RESULT_RECOVERED;
+ else
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+/**
+ * _scsih_pci_resume() - resume normal ops after PCI reset
+ * @pdev: pointer to PCI device
+ *
+ * Called when the error recovery driver tells us that its
+ * OK to resume normal operation. Use completion to allow
+ * halted scsi ops to resume.
+ */
+static void
+_scsih_pci_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ printk(MPT2SAS_INFO_FMT "PCI error: resume callback!!\n", ioc->name);
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ mpt2sas_base_start_watchdog(ioc);
+ scsi_unblock_requests(ioc->shost);
+}
+
+/**
+ * _scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
+ * @pdev: pointer to PCI device
+ */
+static pci_ers_result_t
+_scsih_pci_mmio_enabled(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+
+ printk(MPT2SAS_INFO_FMT "PCI error: mmio enabled callback!!\n",
+ ioc->name);
+
+ /* TODO - dump whatever for debugging purposes */
+
+ /* Request a slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static struct pci_error_handlers _scsih_err_handler = {
+ .error_detected = _scsih_pci_error_detected,
+ .mmio_enabled = _scsih_pci_mmio_enabled,
+ .slot_reset = _scsih_pci_slot_reset,
+ .resume = _scsih_pci_resume,
+};
static struct pci_driver scsih_driver = {
.name = MPT2SAS_DRIVER_NAME,
@@ -6564,6 +6800,7 @@ static struct pci_driver scsih_driver = {
.probe = _scsih_probe,
.remove = __devexit_p(_scsih_remove),
.shutdown = _scsih_shutdown,
+ .err_handler = &_scsih_err_handler,
#ifdef CONFIG_PM
.suspend = _scsih_suspend,
.resume = _scsih_resume,
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index bd7ca2b49f8..2727c3b6510 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -2,7 +2,7 @@
* SAS Transport Layer for MPT (Message Passing Technology) based controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_transport.c
- * Copyright (C) 2007-2009 LSI Corporation
+ * Copyright (C) 2007-2010 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -465,6 +465,85 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
return rc;
}
+
+/**
+ * _transport_delete_duplicate_port - (see below description)
+ * @ioc: per adapter object
+ * @sas_node: sas node object (either expander or sas host)
+ * @sas_address: sas address of device being added
+ * @phy_num: phy number
+ *
+ * This function is called when attempting to add a new port that is claiming
+ * the same phy resources already in use by another port. If we don't release
+ * the claimed phy resources, the sas transport layer will hang from the BUG
+ * in sas_port_add_phy.
+ *
+ * The reason we would hit this issue is becuase someone is changing the
+ * sas address of a device on the fly, meanwhile controller firmware sends
+ * EVENTs out of order when removing the previous instance of the device.
+ */
+static void
+_transport_delete_duplicate_port(struct MPT2SAS_ADAPTER *ioc,
+ struct _sas_node *sas_node, u64 sas_address, int phy_num)
+{
+ struct _sas_port *mpt2sas_port, *mpt2sas_port_duplicate;
+ struct _sas_phy *mpt2sas_phy;
+
+ printk(MPT2SAS_ERR_FMT "new device located at sas_addr(0x%016llx), "
+ "phy_id(%d)\n", ioc->name, (unsigned long long)sas_address,
+ phy_num);
+
+ mpt2sas_port_duplicate = NULL;
+ list_for_each_entry(mpt2sas_port, &sas_node->sas_port_list, port_list) {
+ dev_printk(KERN_ERR, &mpt2sas_port->port->dev,
+ "existing device at sas_addr(0x%016llx), num_phys(%d)\n",
+ (unsigned long long)
+ mpt2sas_port->remote_identify.sas_address,
+ mpt2sas_port->num_phys);
+ list_for_each_entry(mpt2sas_phy, &mpt2sas_port->phy_list,
+ port_siblings) {
+ dev_printk(KERN_ERR, &mpt2sas_phy->phy->dev,
+ "phy_number(%d)\n", mpt2sas_phy->phy_id);
+ if (mpt2sas_phy->phy_id == phy_num)
+ mpt2sas_port_duplicate = mpt2sas_port;
+ }
+ }
+
+ if (!mpt2sas_port_duplicate)
+ return;
+
+ dev_printk(KERN_ERR, &mpt2sas_port_duplicate->port->dev,
+ "deleting duplicate device at sas_addr(0x%016llx), phy(%d)!!!!\n",
+ (unsigned long long)
+ mpt2sas_port_duplicate->remote_identify.sas_address, phy_num);
+ ioc->logging_level |= MPT_DEBUG_TRANSPORT;
+ mpt2sas_transport_port_remove(ioc,
+ mpt2sas_port_duplicate->remote_identify.sas_address,
+ sas_node->sas_address);
+ ioc->logging_level &= ~MPT_DEBUG_TRANSPORT;
+}
+
+/**
+ * _transport_sanity_check - sanity check when adding a new port
+ * @ioc: per adapter object
+ * @sas_node: sas node object (either expander or sas host)
+ * @sas_address: sas address of device being added
+ *
+ * See the explanation above from _transport_delete_duplicate_port
+ */
+static void
+_transport_sanity_check(struct MPT2SAS_ADAPTER *ioc, struct _sas_node *sas_node,
+ u64 sas_address)
+{
+ int i;
+
+ for (i = 0; i < sas_node->num_phys; i++)
+ if (sas_node->phy[i].remote_identify.sas_address == sas_address)
+ if (sas_node->phy[i].phy_belongs_to_port)
+ _transport_delete_duplicate_port(ioc, sas_node,
+ sas_address, i);
+}
+
/**
* mpt2sas_transport_port_add - insert port to the list
* @ioc: per adapter object
@@ -522,6 +601,9 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
goto out_fail;
}
+ _transport_sanity_check(ioc, sas_node,
+ mpt2sas_port->remote_identify.sas_address);
+
for (i = 0; i < sas_node->num_phys; i++) {
if (sas_node->phy[i].remote_identify.sas_address !=
mpt2sas_port->remote_identify.sas_address)
@@ -553,6 +635,7 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
mpt2sas_port->remote_identify.sas_address,
mpt2sas_phy->phy_id);
sas_port_add_phy(port, mpt2sas_phy->phy);
+ mpt2sas_phy->phy_belongs_to_port = 1;
}
mpt2sas_port->port = port;
@@ -651,6 +734,7 @@ mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
(unsigned long long)
mpt2sas_port->remote_identify.sas_address,
mpt2sas_phy->phy_id);
+ mpt2sas_phy->phy_belongs_to_port = 0;
sas_port_delete_phy(mpt2sas_port->port, mpt2sas_phy->phy);
list_del(&mpt2sas_phy->port_siblings);
}
@@ -1341,7 +1425,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
memcpy(req->sense, mpi_reply, sizeof(*mpi_reply));
req->sense_len = sizeof(*mpi_reply);
req->resid_len = 0;
- rsp->resid_len -= mpi_reply->ResponseDataLength;
+ rsp->resid_len -=
+ le16_to_cpu(mpi_reply->ResponseDataLength);
} else {
dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT
"%s - no reply\n", ioc->name, __func__));
diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c
index d722235111a..c29d0dbb966 100644
--- a/drivers/scsi/mvme147.c
+++ b/drivers/scsi/mvme147.c
@@ -13,112 +13,118 @@
#include "wd33c93.h"
#include "mvme147.h"
-#include<linux/stat.h>
+#include <linux/stat.h>
-#define HDATA(ptr) ((struct WD33C93_hostdata *)((ptr)->hostdata))
-static struct Scsi_Host *mvme147_host = NULL;
-
-static irqreturn_t mvme147_intr (int irq, void *dummy)
+static irqreturn_t mvme147_intr(int irq, void *data)
{
- if (irq == MVME147_IRQ_SCSI_PORT)
- wd33c93_intr (mvme147_host);
- else
- m147_pcc->dma_intr = 0x89; /* Ack and enable ints */
- return IRQ_HANDLED;
+ struct Scsi_Host *instance = data;
+
+ if (irq == MVME147_IRQ_SCSI_PORT)
+ wd33c93_intr(instance);
+ else
+ m147_pcc->dma_intr = 0x89; /* Ack and enable ints */
+ return IRQ_HANDLED;
}
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
- unsigned char flags = 0x01;
- unsigned long addr = virt_to_bus(cmd->SCp.ptr);
-
- /* setup dma direction */
- if (!dir_in)
- flags |= 0x04;
-
- /* remember direction */
- HDATA(mvme147_host)->dma_dir = dir_in;
-
- if (dir_in)
- /* invalidate any cache */
- cache_clear (addr, cmd->SCp.this_residual);
- else
- /* push any dirty cache */
- cache_push (addr, cmd->SCp.this_residual);
-
- /* start DMA */
- m147_pcc->dma_bcr = cmd->SCp.this_residual | (1<<24);
- m147_pcc->dma_dadr = addr;
- m147_pcc->dma_cntrl = flags;
-
- /* return success */
- return 0;
+ struct Scsi_Host *instance = cmd->device->host;
+ struct WD33C93_hostdata *hdata = shost_priv(instance);
+ unsigned char flags = 0x01;
+ unsigned long addr = virt_to_bus(cmd->SCp.ptr);
+
+ /* setup dma direction */
+ if (!dir_in)
+ flags |= 0x04;
+
+ /* remember direction */
+ hdata->dma_dir = dir_in;
+
+ if (dir_in) {
+ /* invalidate any cache */
+ cache_clear(addr, cmd->SCp.this_residual);
+ } else {
+ /* push any dirty cache */
+ cache_push(addr, cmd->SCp.this_residual);
+ }
+
+ /* start DMA */
+ m147_pcc->dma_bcr = cmd->SCp.this_residual | (1 << 24);
+ m147_pcc->dma_dadr = addr;
+ m147_pcc->dma_cntrl = flags;
+
+ /* return success */
+ return 0;
}
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
- int status)
+ int status)
{
- m147_pcc->dma_cntrl = 0;
+ m147_pcc->dma_cntrl = 0;
}
int mvme147_detect(struct scsi_host_template *tpnt)
{
- static unsigned char called = 0;
- wd33c93_regs regs;
-
- if (!MACH_IS_MVME147 || called)
- return 0;
- called++;
-
- tpnt->proc_name = "MVME147";
- tpnt->proc_info = &wd33c93_proc_info;
-
- mvme147_host = scsi_register (tpnt, sizeof(struct WD33C93_hostdata));
- if (!mvme147_host)
- goto err_out;
-
- mvme147_host->base = 0xfffe4000;
- mvme147_host->irq = MVME147_IRQ_SCSI_PORT;
- regs.SASR = (volatile unsigned char *)0xfffe4000;
- regs.SCMD = (volatile unsigned char *)0xfffe4001;
- HDATA(mvme147_host)->no_sync = 0xff;
- HDATA(mvme147_host)->fast = 0;
- HDATA(mvme147_host)->dma_mode = CTRL_DMA;
- wd33c93_init(mvme147_host, regs, dma_setup, dma_stop, WD33C93_FS_8_10);
-
- if (request_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr, 0, "MVME147 SCSI PORT", mvme147_intr))
- goto err_unregister;
- if (request_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr, 0, "MVME147 SCSI DMA", mvme147_intr))
- goto err_free_irq;
+ static unsigned char called = 0;
+ struct Scsi_Host *instance;
+ wd33c93_regs regs;
+ struct WD33C93_hostdata *hdata;
+
+ if (!MACH_IS_MVME147 || called)
+ return 0;
+ called++;
+
+ tpnt->proc_name = "MVME147";
+ tpnt->proc_info = &wd33c93_proc_info;
+
+ instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata));
+ if (!instance)
+ goto err_out;
+
+ instance->base = 0xfffe4000;
+ instance->irq = MVME147_IRQ_SCSI_PORT;
+ regs.SASR = (volatile unsigned char *)0xfffe4000;
+ regs.SCMD = (volatile unsigned char *)0xfffe4001;
+ hdata = shost_priv(instance);
+ hdata->no_sync = 0xff;
+ hdata->fast = 0;
+ hdata->dma_mode = CTRL_DMA;
+ wd33c93_init(instance, regs, dma_setup, dma_stop, WD33C93_FS_8_10);
+
+ if (request_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr, 0,
+ "MVME147 SCSI PORT", instance))
+ goto err_unregister;
+ if (request_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr, 0,
+ "MVME147 SCSI DMA", instance))
+ goto err_free_irq;
#if 0 /* Disabled; causes problems booting */
- m147_pcc->scsi_interrupt = 0x10; /* Assert SCSI bus reset */
- udelay(100);
- m147_pcc->scsi_interrupt = 0x00; /* Negate SCSI bus reset */
- udelay(2000);
- m147_pcc->scsi_interrupt = 0x40; /* Clear bus reset interrupt */
+ m147_pcc->scsi_interrupt = 0x10; /* Assert SCSI bus reset */
+ udelay(100);
+ m147_pcc->scsi_interrupt = 0x00; /* Negate SCSI bus reset */
+ udelay(2000);
+ m147_pcc->scsi_interrupt = 0x40; /* Clear bus reset interrupt */
#endif
- m147_pcc->scsi_interrupt = 0x09; /* Enable interrupt */
+ m147_pcc->scsi_interrupt = 0x09; /* Enable interrupt */
- m147_pcc->dma_cntrl = 0x00; /* ensure DMA is stopped */
- m147_pcc->dma_intr = 0x89; /* Ack and enable ints */
+ m147_pcc->dma_cntrl = 0x00; /* ensure DMA is stopped */
+ m147_pcc->dma_intr = 0x89; /* Ack and enable ints */
- return 1;
+ return 1;
- err_free_irq:
- free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr);
- err_unregister:
- wd33c93_release();
- scsi_unregister(mvme147_host);
- err_out:
- return 0;
+err_free_irq:
+ free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr);
+err_unregister:
+ scsi_unregister(instance);
+err_out:
+ return 0;
}
static int mvme147_bus_reset(struct scsi_cmnd *cmd)
{
/* FIXME perform bus-specific reset */
- /* FIXME 2: kill this function, and let midlayer fallback to
+ /* FIXME 2: kill this function, and let midlayer fallback to
the same result, calling wd33c93_host_reset() */
spin_lock_irq(cmd->device->host->host_lock);
@@ -128,9 +134,6 @@ static int mvme147_bus_reset(struct scsi_cmnd *cmd)
return SUCCESS;
}
-#define HOSTS_C
-
-#include "mvme147.h"
static struct scsi_host_template driver_template = {
.proc_name = "MVME147",
@@ -154,10 +157,9 @@ static struct scsi_host_template driver_template = {
int mvme147_release(struct Scsi_Host *instance)
{
#ifdef MODULE
- /* XXX Make sure DMA is stopped! */
- wd33c93_release();
- free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr);
- free_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr);
+ /* XXX Make sure DMA is stopped! */
+ free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr);
+ free_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr);
#endif
- return 1;
+ return 1;
}
diff --git a/drivers/scsi/mvme147.h b/drivers/scsi/mvme147.h
index 32aee85434d..bfd4566ef05 100644
--- a/drivers/scsi/mvme147.h
+++ b/drivers/scsi/mvme147.h
@@ -14,11 +14,11 @@ int mvme147_detect(struct scsi_host_template *);
int mvme147_release(struct Scsi_Host *);
#ifndef CMD_PER_LUN
-#define CMD_PER_LUN 2
+#define CMD_PER_LUN 2
#endif
#ifndef CAN_QUEUE
-#define CAN_QUEUE 16
+#define CAN_QUEUE 16
#endif
#endif /* MVME147_H */
diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c
index 10a5077b6ae..afc7f6f3a13 100644
--- a/drivers/scsi/mvsas/mv_64xx.c
+++ b/drivers/scsi/mvsas/mv_64xx.c
@@ -132,9 +132,9 @@ static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
tmp &= ~PHYEV_RDY_CH;
mvs_write_port_irq_stat(mvi, phy_id, tmp);
tmp = mvs_read_phy_ctl(mvi, phy_id);
- if (hard)
+ if (hard == 1)
tmp |= PHY_RST_HARD;
- else
+ else if (hard == 0)
tmp |= PHY_RST;
mvs_write_phy_ctl(mvi, phy_id, tmp);
if (hard) {
@@ -144,6 +144,26 @@ static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
}
}
+void mvs_64xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp;
+ if (clear_all) {
+ tmp = mr32(MVS_INT_STAT_SRS_0);
+ if (tmp) {
+ printk(KERN_DEBUG "check SRS 0 %08X.\n", tmp);
+ mw32(MVS_INT_STAT_SRS_0, tmp);
+ }
+ } else {
+ tmp = mr32(MVS_INT_STAT_SRS_0);
+ if (tmp & (1 << (reg_set % 32))) {
+ printk(KERN_DEBUG "register set 0x%x was stopped.\n",
+ reg_set);
+ mw32(MVS_INT_STAT_SRS_0, 1 << (reg_set % 32));
+ }
+ }
+}
+
static int __devinit mvs_64xx_chip_reset(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs;
@@ -761,6 +781,7 @@ const struct mvs_dispatch mvs_64xx_dispatch = {
mvs_write_port_irq_mask,
mvs_get_sas_addr,
mvs_64xx_command_active,
+ mvs_64xx_clear_srs_irq,
mvs_64xx_issue_stop,
mvs_start_delivery,
mvs_rx_update,
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index 0940fae19d2..eed4c5c7201 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -616,6 +616,15 @@ void mvs_94xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
}
#endif
+/*
+ * FIXME JEJB: temporary nop clear_srs_irq to make 94xx still work
+ * with 64xx fixes
+ */
+static void mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set,
+ u8 clear_all)
+{
+}
+
const struct mvs_dispatch mvs_94xx_dispatch = {
"mv94xx",
mvs_94xx_init,
@@ -640,6 +649,7 @@ const struct mvs_dispatch mvs_94xx_dispatch = {
mvs_write_port_irq_mask,
mvs_get_sas_addr,
mvs_94xx_command_active,
+ mvs_94xx_clear_srs_irq,
mvs_94xx_issue_stop,
mvs_start_delivery,
mvs_rx_update,
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index cae6b2cf492..19ad34f381a 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -37,6 +37,7 @@ static const struct mvs_chip_info mvs_chips[] = {
};
#define SOC_SAS_NUM 2
+#define SG_MX 64
static struct scsi_host_template mvs_sht = {
.module = THIS_MODULE,
@@ -53,10 +54,10 @@ static struct scsi_host_template mvs_sht = {
.can_queue = 1,
.cmd_per_lun = 1,
.this_id = -1,
- .sg_tablesize = SG_ALL,
+ .sg_tablesize = SG_MX,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.use_clustering = ENABLE_CLUSTERING,
- .eh_device_reset_handler = sas_eh_device_reset_handler,
+ .eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_bus_reset_handler = sas_eh_bus_reset_handler,
.slave_alloc = mvs_slave_alloc,
.target_destroy = sas_target_destroy,
@@ -65,19 +66,17 @@ static struct scsi_host_template mvs_sht = {
static struct sas_domain_function_template mvs_transport_ops = {
.lldd_dev_found = mvs_dev_found,
- .lldd_dev_gone = mvs_dev_gone,
-
+ .lldd_dev_gone = mvs_dev_gone,
.lldd_execute_task = mvs_queue_command,
.lldd_control_phy = mvs_phy_control,
.lldd_abort_task = mvs_abort_task,
.lldd_abort_task_set = mvs_abort_task_set,
.lldd_clear_aca = mvs_clear_aca,
- .lldd_clear_task_set = mvs_clear_task_set,
+ .lldd_clear_task_set = mvs_clear_task_set,
.lldd_I_T_nexus_reset = mvs_I_T_nexus_reset,
.lldd_lu_reset = mvs_lu_reset,
.lldd_query_task = mvs_query_task,
-
.lldd_port_formed = mvs_port_formed,
.lldd_port_deformed = mvs_port_deformed,
@@ -213,7 +212,7 @@ static irqreturn_t mvs_interrupt(int irq, void *opaque)
static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
{
- int i, slot_nr;
+ int i = 0, slot_nr;
if (mvi->flags & MVF_FLAG_SOC)
slot_nr = MVS_SOC_SLOTS;
@@ -232,6 +231,7 @@ static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
mvi->devices[i].dev_type = NO_DEVICE;
mvi->devices[i].device_id = i;
mvi->devices[i].dev_status = MVS_DEV_NORMAL;
+ init_timer(&mvi->devices[i].timer);
}
/*
@@ -437,6 +437,7 @@ static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost,
sha->sas_phy = arr_phy;
sha->sas_port = arr_port;
+ sha->core.shost = shost;
sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL);
if (!sha->lldd_ha)
@@ -574,6 +575,10 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev,
}
nhost++;
} while (nhost < chip->n_host);
+#ifdef MVS_USE_TASKLET
+ tasklet_init(&mv_tasklet, mvs_tasklet,
+ (unsigned long)SHOST_TO_SAS_HA(shost));
+#endif
mvs_post_sas_ha_init(shost, chip);
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 0d213864121..f5e32179190 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -259,8 +259,6 @@ static inline void mvs_free_reg_set(struct mvs_info *mvi,
mv_printk("device has been free.\n");
return;
}
- if (dev->runing_req != 0)
- return;
if (dev->taskfileset == MVS_ID_NOT_MAPPED)
return;
MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset);
@@ -762,8 +760,6 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
}
if (is_tmf)
flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
- else
- flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT);
hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT));
hdr->tags = cpu_to_le32(tag);
hdr->data_len = cpu_to_le32(task->total_xfer_len);
@@ -878,14 +874,15 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
struct mvs_slot_info *slot;
u32 tag = 0xdeadbeef, rc, n_elem = 0;
u32 n = num, pass = 0;
- unsigned long flags = 0;
+ unsigned long flags = 0, flags_libsas = 0;
if (!dev->port) {
struct task_status_struct *tsm = &t->task_status;
tsm->resp = SAS_TASK_UNDELIVERED;
tsm->stat = SAS_PHY_DOWN;
- t->task_done(t);
+ if (dev->dev_type != SATA_DEV)
+ t->task_done(t);
return 0;
}
@@ -910,12 +907,25 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
else
tei.port = &mvi->port[dev->port->id];
- if (!tei.port->port_attached) {
+ if (tei.port && !tei.port->port_attached) {
if (sas_protocol_ata(t->task_proto)) {
+ struct task_status_struct *ts = &t->task_status;
+
mv_dprintk("port %d does not"
"attached device.\n", dev->port->id);
- rc = SAS_PHY_DOWN;
- goto out_done;
+ ts->stat = SAS_PROTO_RESPONSE;
+ ts->stat = SAS_PHY_DOWN;
+ spin_unlock_irqrestore(dev->sata_dev.ap->lock,
+ flags_libsas);
+ spin_unlock_irqrestore(&mvi->lock, flags);
+ t->task_done(t);
+ spin_lock_irqsave(&mvi->lock, flags);
+ spin_lock_irqsave(dev->sata_dev.ap->lock,
+ flags_libsas);
+ if (n > 1)
+ t = list_entry(t->list.next,
+ struct sas_task, list);
+ continue;
} else {
struct task_status_struct *ts = &t->task_status;
ts->resp = SAS_TASK_UNDELIVERED;
@@ -973,8 +983,8 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
break;
default:
dev_printk(KERN_ERR, mvi->dev,
- "unknown sas_task proto: 0x%x\n",
- t->task_proto);
+ "unknown sas_task proto: 0x%x\n",
+ t->task_proto);
rc = -EINVAL;
break;
}
@@ -993,11 +1003,15 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
spin_unlock(&t->task_state_lock);
mvs_hba_memory_dump(mvi, tag, t->task_proto);
- mvi_dev->runing_req++;
+ mvi_dev->running_req++;
++pass;
mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
if (n > 1)
t = list_entry(t->list.next, struct sas_task, list);
+ if (likely(pass))
+ MVS_CHIP_DISP->start_delivery(mvi, (mvi->tx_prod - 1) &
+ (MVS_CHIP_SLOT_SZ - 1));
+
} while (--n);
rc = 0;
goto out_done;
@@ -1012,10 +1026,6 @@ err_out:
dma_unmap_sg(mvi->dev, t->scatter, n_elem,
t->data_dir);
out_done:
- if (likely(pass)) {
- MVS_CHIP_DISP->start_delivery(mvi,
- (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
- }
spin_unlock_irqrestore(&mvi->lock, flags);
return rc;
}
@@ -1187,7 +1197,7 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
MVS_CHIP_DISP->phy_reset(mvi, i, 0);
goto out_done;
}
- } else if (phy->phy_type & PORT_TYPE_SAS
+ } else if (phy->phy_type & PORT_TYPE_SAS
|| phy->att_dev_info & PORT_SSP_INIT_MASK) {
phy->phy_attached = 1;
phy->identify.device_type =
@@ -1256,7 +1266,20 @@ static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
{
- /*Nothing*/
+ struct domain_device *dev;
+ struct mvs_phy *phy = sas_phy->lldd_phy;
+ struct mvs_info *mvi = phy->mvi;
+ struct asd_sas_port *port = sas_phy->port;
+ int phy_no = 0;
+
+ while (phy != &mvi->phy[phy_no]) {
+ phy_no++;
+ if (phy_no >= MVS_MAX_PHYS)
+ return;
+ }
+ list_for_each_entry(dev, &port->dev_list, dev_list_node)
+ mvs_do_release_task(phy->mvi, phy_no, NULL);
+
}
@@ -1316,6 +1339,7 @@ int mvs_dev_found_notify(struct domain_device *dev, int lock)
goto found_out;
}
dev->lldd_dev = mvi_device;
+ mvi_device->dev_status = MVS_DEV_NORMAL;
mvi_device->dev_type = dev->dev_type;
mvi_device->mvi_info = mvi;
if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
@@ -1351,18 +1375,18 @@ int mvs_dev_found(struct domain_device *dev)
return mvs_dev_found_notify(dev, 1);
}
-void mvs_dev_gone_notify(struct domain_device *dev, int lock)
+void mvs_dev_gone_notify(struct domain_device *dev)
{
unsigned long flags = 0;
struct mvs_device *mvi_dev = dev->lldd_dev;
struct mvs_info *mvi = mvi_dev->mvi_info;
- if (lock)
- spin_lock_irqsave(&mvi->lock, flags);
+ spin_lock_irqsave(&mvi->lock, flags);
if (mvi_dev) {
mv_dprintk("found dev[%d:%x] is gone.\n",
mvi_dev->device_id, mvi_dev->dev_type);
+ mvs_release_task(mvi, dev);
mvs_free_reg_set(mvi, mvi_dev);
mvs_free_dev(mvi_dev);
} else {
@@ -1370,14 +1394,13 @@ void mvs_dev_gone_notify(struct domain_device *dev, int lock)
}
dev->lldd_dev = NULL;
- if (lock)
- spin_unlock_irqrestore(&mvi->lock, flags);
+ spin_unlock_irqrestore(&mvi->lock, flags);
}
void mvs_dev_gone(struct domain_device *dev)
{
- mvs_dev_gone_notify(dev, 1);
+ mvs_dev_gone_notify(dev);
}
static struct sas_task *mvs_alloc_task(void)
@@ -1540,7 +1563,7 @@ int mvs_lu_reset(struct domain_device *dev, u8 *lun)
num = mvs_find_dev_phyno(dev, phyno);
spin_lock_irqsave(&mvi->lock, flags);
for (i = 0; i < num; i++)
- mvs_release_task(mvi, phyno[i], dev);
+ mvs_release_task(mvi, dev);
spin_unlock_irqrestore(&mvi->lock, flags);
}
/* If failed, fall-through I_T_Nexus reset */
@@ -1552,8 +1575,8 @@ int mvs_lu_reset(struct domain_device *dev, u8 *lun)
int mvs_I_T_nexus_reset(struct domain_device *dev)
{
unsigned long flags;
- int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
- struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
+ int rc = TMF_RESP_FUNC_FAILED;
+ struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
struct mvs_info *mvi = mvi_dev->mvi_info;
if (mvi_dev->dev_status != MVS_DEV_EH)
@@ -1563,10 +1586,8 @@ int mvs_I_T_nexus_reset(struct domain_device *dev)
__func__, mvi_dev->device_id, rc);
/* housekeeper */
- num = mvs_find_dev_phyno(dev, phyno);
spin_lock_irqsave(&mvi->lock, flags);
- for (i = 0; i < num; i++)
- mvs_release_task(mvi, phyno[i], dev);
+ mvs_release_task(mvi, dev);
spin_unlock_irqrestore(&mvi->lock, flags);
return rc;
@@ -1603,6 +1624,9 @@ int mvs_query_task(struct sas_task *task)
case TMF_RESP_FUNC_FAILED:
case TMF_RESP_FUNC_COMPLETE:
break;
+ default:
+ rc = TMF_RESP_FUNC_COMPLETE;
+ break;
}
}
mv_printk("%s:rc= %d\n", __func__, rc);
@@ -1621,8 +1645,11 @@ int mvs_abort_task(struct sas_task *task)
unsigned long flags;
u32 tag;
- if (mvi->exp_req)
- mvi->exp_req--;
+ if (!mvi_dev) {
+ mv_printk("%s:%d TMF_RESP_FUNC_FAILED\n", __func__, __LINE__);
+ rc = TMF_RESP_FUNC_FAILED;
+ }
+
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -1630,6 +1657,7 @@ int mvs_abort_task(struct sas_task *task)
goto out;
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
+ mvi_dev->dev_status = MVS_DEV_EH;
if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
@@ -1654,12 +1682,31 @@ int mvs_abort_task(struct sas_task *task)
if (task->lldd_task) {
slot = task->lldd_task;
slot_no = (u32) (slot - mvi->slot_info);
+ spin_lock_irqsave(&mvi->lock, flags);
mvs_slot_complete(mvi, slot_no, 1);
+ spin_unlock_irqrestore(&mvi->lock, flags);
}
}
+
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
task->task_proto & SAS_PROTOCOL_STP) {
/* to do free register_set */
+ if (SATA_DEV == dev->dev_type) {
+ struct mvs_slot_info *slot = task->lldd_task;
+ struct task_status_struct *tstat;
+ u32 slot_idx = (u32)(slot - mvi->slot_info);
+ tstat = &task->task_status;
+ mv_dprintk(KERN_DEBUG "mv_abort_task() mvi=%p task=%p "
+ "slot=%p slot_idx=x%x\n",
+ mvi, task, slot, slot_idx);
+ tstat->stat = SAS_ABORTED_TASK;
+ if (mvi_dev && mvi_dev->running_req)
+ mvi_dev->running_req--;
+ if (sas_protocol_ata(task->task_proto))
+ mvs_free_reg_set(mvi, mvi_dev);
+ mvs_slot_task_free(mvi, task, slot, slot_idx);
+ return -1;
+ }
} else {
/* SMP */
@@ -1717,8 +1764,13 @@ static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset),
sizeof(struct dev_to_host_fis));
tstat->buf_valid_size = sizeof(*resp);
- if (unlikely(err))
- stat = SAS_PROTO_RESPONSE;
+ if (unlikely(err)) {
+ if (unlikely(err & CMD_ISS_STPD))
+ stat = SAS_OPEN_REJECT;
+ else
+ stat = SAS_PROTO_RESPONSE;
+ }
+
return stat;
}
@@ -1753,9 +1805,7 @@ static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
mv_printk("find reserved error, why?\n");
task->ata_task.use_ncq = 0;
- stat = SAS_PROTO_RESPONSE;
- mvs_sata_done(mvi, task, slot_idx, 1);
-
+ mvs_sata_done(mvi, task, slot_idx, err_dw0);
}
break;
default:
@@ -1772,18 +1822,20 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
struct sas_task *task = slot->task;
struct mvs_device *mvi_dev = NULL;
struct task_status_struct *tstat;
+ struct domain_device *dev;
+ u32 aborted;
- bool aborted;
void *to;
enum exec_status sts;
if (mvi->exp_req)
mvi->exp_req--;
- if (unlikely(!task || !task->lldd_task))
+ if (unlikely(!task || !task->lldd_task || !task->dev))
return -1;
tstat = &task->task_status;
- mvi_dev = task->dev->lldd_dev;
+ dev = task->dev;
+ mvi_dev = dev->lldd_dev;
mvs_hba_cq_dump(mvi);
@@ -1800,8 +1852,8 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
if (unlikely(aborted)) {
tstat->stat = SAS_ABORTED_TASK;
- if (mvi_dev)
- mvi_dev->runing_req--;
+ if (mvi_dev && mvi_dev->running_req)
+ mvi_dev->running_req--;
if (sas_protocol_ata(task->task_proto))
mvs_free_reg_set(mvi, mvi_dev);
@@ -1809,24 +1861,17 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
return -1;
}
- if (unlikely(!mvi_dev || !slot->port->port_attached || flags)) {
- mv_dprintk("port has not device.\n");
+ if (unlikely(!mvi_dev || flags)) {
+ if (!mvi_dev)
+ mv_dprintk("port has not device.\n");
tstat->stat = SAS_PHY_DOWN;
goto out;
}
- /*
- if (unlikely((rx_desc & RXQ_ERR) || (*(u64 *) slot->response))) {
- mv_dprintk("Find device[%016llx] RXQ_ERR %X,
- err info:%016llx\n",
- SAS_ADDR(task->dev->sas_addr),
- rx_desc, (u64)(*(u64 *) slot->response));
- }
- */
-
/* error info record present */
if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
tstat->stat = mvs_slot_err(mvi, task, slot_idx);
+ tstat->resp = SAS_TASK_COMPLETE;
goto out;
}
@@ -1868,11 +1913,16 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
tstat->stat = SAM_CHECK_COND;
break;
}
+ if (!slot->port->port_attached) {
+ mv_dprintk("port %d has removed.\n", slot->port->sas_port.id);
+ tstat->stat = SAS_PHY_DOWN;
+ }
+
out:
- if (mvi_dev) {
- mvi_dev->runing_req--;
- if (sas_protocol_ata(task->task_proto))
+ if (mvi_dev && mvi_dev->running_req) {
+ mvi_dev->running_req--;
+ if (sas_protocol_ata(task->task_proto) && !mvi_dev->running_req)
mvs_free_reg_set(mvi, mvi_dev);
}
mvs_slot_task_free(mvi, task, slot, slot_idx);
@@ -1888,10 +1938,10 @@ out:
return sts;
}
-void mvs_release_task(struct mvs_info *mvi,
+void mvs_do_release_task(struct mvs_info *mvi,
int phy_no, struct domain_device *dev)
{
- int i = 0; u32 slot_idx;
+ u32 slot_idx;
struct mvs_phy *phy;
struct mvs_port *port;
struct mvs_slot_info *slot, *slot2;
@@ -1900,6 +1950,10 @@ void mvs_release_task(struct mvs_info *mvi,
port = phy->port;
if (!port)
return;
+ /* clean cmpl queue in case request is already finished */
+ mvs_int_rx(mvi, false);
+
+
list_for_each_entry_safe(slot, slot2, &port->list, entry) {
struct sas_task *task;
@@ -1911,18 +1965,22 @@ void mvs_release_task(struct mvs_info *mvi,
mv_printk("Release slot [%x] tag[%x], task [%p]:\n",
slot_idx, slot->slot_tag, task);
-
- if (task->task_proto & SAS_PROTOCOL_SSP) {
- mv_printk("attached with SSP task CDB[");
- for (i = 0; i < 16; i++)
- mv_printk(" %02x", task->ssp_task.cdb[i]);
- mv_printk(" ]\n");
- }
+ MVS_CHIP_DISP->command_active(mvi, slot_idx);
mvs_slot_complete(mvi, slot_idx, 1);
}
}
+void mvs_release_task(struct mvs_info *mvi,
+ struct domain_device *dev)
+{
+ int i, phyno[WIDE_PORT_MAX_PHY], num;
+ /* housekeeper */
+ num = mvs_find_dev_phyno(dev, phyno);
+ for (i = 0; i < num; i++)
+ mvs_do_release_task(mvi, phyno[i], dev);
+}
+
static void mvs_phy_disconnected(struct mvs_phy *phy)
{
phy->phy_attached = 0;
@@ -2029,16 +2087,18 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
* we need check the interrupt status which belongs to per port.
*/
- if (phy->irq_status & PHYEV_DCDR_ERR)
+ if (phy->irq_status & PHYEV_DCDR_ERR) {
mv_dprintk("port %d STP decoding error.\n",
- phy_no+mvi->id*mvi->chip->n_phy);
+ phy_no + mvi->id*mvi->chip->n_phy);
+ }
if (phy->irq_status & PHYEV_POOF) {
if (!(phy->phy_event & PHY_PLUG_OUT)) {
int dev_sata = phy->phy_type & PORT_TYPE_SATA;
int ready;
- mvs_release_task(mvi, phy_no, NULL);
+ mvs_do_release_task(mvi, phy_no, NULL);
phy->phy_event |= PHY_PLUG_OUT;
+ MVS_CHIP_DISP->clear_srs_irq(mvi, 0, 1);
mvs_handle_event(mvi,
(void *)(unsigned long)phy_no,
PHY_PLUG_EVENT);
@@ -2085,6 +2145,11 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
phy_no, tmp);
}
mvs_update_phyinfo(mvi, phy_no, 0);
+ if (phy->phy_type & PORT_TYPE_SAS) {
+ MVS_CHIP_DISP->phy_reset(mvi, phy_no, 2);
+ mdelay(10);
+ }
+
mvs_bytes_dmaed(mvi, phy_no);
/* whether driver is going to handle hot plug */
if (phy->phy_event & PHY_PLUG_OUT) {
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 885858bcc40..77ddc7c1e5f 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -39,6 +39,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <scsi/libsas.h>
+#include <scsi/scsi.h>
#include <scsi/scsi_tcq.h>
#include <scsi/sas_ata.h>
#include <linux/version.h>
@@ -49,7 +50,7 @@
#define _MV_DUMP 0
#define MVS_ID_NOT_MAPPED 0x7f
/* #define DISABLE_HOTPLUG_DMA_FIX */
-#define MAX_EXP_RUNNING_REQ 2
+// #define MAX_EXP_RUNNING_REQ 2
#define WIDE_PORT_MAX_PHY 4
#define MV_DISABLE_NCQ 0
#define mv_printk(fmt, arg ...) \
@@ -129,6 +130,7 @@ struct mvs_dispatch {
void (*get_sas_addr)(void *buf, u32 buflen);
void (*command_active)(struct mvs_info *mvi, u32 slot_idx);
+ void (*clear_srs_irq)(struct mvs_info *mvi, u8 reg_set, u8 clear_all);
void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type,
u32 tfs);
void (*start_delivery)(struct mvs_info *mvi, u32 tx);
@@ -236,9 +238,10 @@ struct mvs_device {
enum sas_dev_type dev_type;
struct mvs_info *mvi_info;
struct domain_device *sas_device;
+ struct timer_list timer;
u32 attached_phy;
u32 device_id;
- u32 runing_req;
+ u32 running_req;
u8 taskfileset;
u8 dev_status;
u16 reserved;
@@ -397,7 +400,9 @@ int mvs_lu_reset(struct domain_device *dev, u8 *lun);
int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags);
int mvs_I_T_nexus_reset(struct domain_device *dev);
int mvs_query_task(struct sas_task *task);
-void mvs_release_task(struct mvs_info *mvi, int phy_no,
+void mvs_release_task(struct mvs_info *mvi,
+ struct domain_device *dev);
+void mvs_do_release_task(struct mvs_info *mvi, int phy_no,
struct domain_device *dev);
void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events);
void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index b219118f8bd..d64b7178fa0 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -3587,7 +3587,7 @@ if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name
if (i == (-ENOSPC)) {
transfer = STp->buffer->writing; /* FIXME -- check this logic */
if (transfer <= do_count) {
- filp->f_pos += do_count - transfer;
+ *ppos += do_count - transfer;
count -= do_count - transfer;
if (STps->drv_block >= 0) {
STps->drv_block += (do_count - transfer) / STp->block_size;
@@ -3625,7 +3625,7 @@ if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name
goto out;
}
- filp->f_pos += do_count;
+ *ppos += do_count;
b_point += do_count;
count -= do_count;
if (STps->drv_block >= 0) {
@@ -3647,7 +3647,7 @@ if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name
if (STps->drv_block >= 0) {
STps->drv_block += blks;
}
- filp->f_pos += count;
+ *ppos += count;
count = 0;
}
@@ -3823,7 +3823,7 @@ static ssize_t osst_read(struct file * filp, char __user * buf, size_t count, lo
}
STp->logical_blk_num += transfer / STp->block_size;
STps->drv_block += transfer / STp->block_size;
- filp->f_pos += transfer;
+ *ppos += transfer;
buf += transfer;
total += transfer;
}
@@ -4932,7 +4932,7 @@ static int os_scsi_tape_close(struct inode * inode, struct file * filp)
/* The ioctl command */
-static int osst_ioctl(struct inode * inode,struct file * file,
+static long osst_ioctl(struct file * file,
unsigned int cmd_in, unsigned long arg)
{
int i, cmd_nr, cmd_type, blk, retval = 0;
@@ -4943,8 +4943,11 @@ static int osst_ioctl(struct inode * inode,struct file * file,
char * name = tape_name(STp);
void __user * p = (void __user *)arg;
- if (mutex_lock_interruptible(&STp->lock))
+ lock_kernel();
+ if (mutex_lock_interruptible(&STp->lock)) {
+ unlock_kernel();
return -ERESTARTSYS;
+ }
#if DEBUG
if (debugging && !STp->in_use) {
@@ -5256,12 +5259,15 @@ static int osst_ioctl(struct inode * inode,struct file * file,
mutex_unlock(&STp->lock);
- return scsi_ioctl(STp->device, cmd_in, p);
+ retval = scsi_ioctl(STp->device, cmd_in, p);
+ unlock_kernel();
+ return retval;
out:
if (SRpnt) osst_release_request(SRpnt);
mutex_unlock(&STp->lock);
+ unlock_kernel();
return retval;
}
@@ -5613,13 +5619,14 @@ static const struct file_operations osst_fops = {
.owner = THIS_MODULE,
.read = osst_read,
.write = osst_write,
- .ioctl = osst_ioctl,
+ .unlocked_ioctl = osst_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = osst_compat_ioctl,
#endif
.open = os_scsi_tape_open,
.flush = os_scsi_tape_flush,
.release = os_scsi_tape_close,
+ .llseek = noop_llseek,
};
static int osst_supports(struct scsi_device * SDp)
diff --git a/drivers/scsi/pcmcia/aha152x_stub.c b/drivers/scsi/pcmcia/aha152x_stub.c
index 528733b4a39..9d70aef9922 100644
--- a/drivers/scsi/pcmcia/aha152x_stub.c
+++ b/drivers/scsi/pcmcia/aha152x_stub.c
@@ -80,7 +80,6 @@ MODULE_LICENSE("Dual MPL/GPL");
typedef struct scsi_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
struct Scsi_Host *host;
} scsi_info_t;
@@ -105,7 +104,6 @@ static int aha152x_probe(struct pcmcia_device *link)
link->io.NumPorts1 = 0x20;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
link->io.IOAddrLines = 10;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.Present = PRESENT_OPTION;
@@ -160,8 +158,7 @@ static int aha152x_config_cs(struct pcmcia_device *link)
if (ret)
goto failed;
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
+ if (!link->irq)
goto failed;
ret = pcmcia_request_configuration(link, &link->conf);
@@ -172,7 +169,7 @@ static int aha152x_config_cs(struct pcmcia_device *link)
memset(&s, 0, sizeof(s));
s.conf = "PCMCIA setup";
s.io_port = link->io.BasePort1;
- s.irq = link->irq.AssignedIRQ;
+ s.irq = link->irq;
s.scsiid = host_id;
s.reconnect = reconnect;
s.parity = parity;
@@ -187,8 +184,6 @@ static int aha152x_config_cs(struct pcmcia_device *link)
goto failed;
}
- sprintf(info->node.dev_name, "scsi%d", host->host_no);
- link->dev_node = &info->node;
info->host = host;
return 0;
diff --git a/drivers/scsi/pcmcia/fdomain_stub.c b/drivers/scsi/pcmcia/fdomain_stub.c
index 91404068407..21b141151df 100644
--- a/drivers/scsi/pcmcia/fdomain_stub.c
+++ b/drivers/scsi/pcmcia/fdomain_stub.c
@@ -63,7 +63,6 @@ MODULE_LICENSE("Dual MPL/GPL");
typedef struct scsi_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
struct Scsi_Host *host;
} scsi_info_t;
@@ -88,7 +87,6 @@ static int fdomain_probe(struct pcmcia_device *link)
link->io.NumPorts1 = 0x10;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
link->io.IOAddrLines = 10;
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.Present = PRESENT_OPTION;
@@ -133,8 +131,7 @@ static int fdomain_config(struct pcmcia_device *link)
if (ret)
goto failed;
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
+ if (!link->irq)
goto failed;
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
@@ -144,7 +141,7 @@ static int fdomain_config(struct pcmcia_device *link)
release_region(link->io.BasePort1, link->io.NumPorts1);
/* Set configuration options for the fdomain driver */
- sprintf(str, "%d,%d", link->io.BasePort1, link->irq.AssignedIRQ);
+ sprintf(str, "%d,%d", link->io.BasePort1, link->irq);
fdomain_setup(str);
host = __fdomain_16x0_detect(&fdomain_driver_template);
@@ -157,8 +154,6 @@ static int fdomain_config(struct pcmcia_device *link)
goto failed;
scsi_scan_host(host);
- sprintf(info->node.dev_name, "scsi%d", host->host_no);
- link->dev_node = &info->node;
info->host = host;
return 0;
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 02124645487..0f0e112c3f8 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1563,13 +1563,6 @@ static int nsp_cs_probe(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
link->io.IOAddrLines = 10; /* not used */
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
-
- /* Interrupt handler */
- link->irq.Handler = &nspintr;
- link->irq.Attributes |= IRQF_SHARED;
-
/* General socket configuration */
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -1646,8 +1639,7 @@ static int nsp_cs_config_check(struct pcmcia_device *p_dev,
}
/* Do we need to allocate an interrupt? */
- if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
+ p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -1720,10 +1712,8 @@ static int nsp_cs_config(struct pcmcia_device *link)
if (ret)
goto cs_failed;
- if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- if (pcmcia_request_irq(link, &link->irq))
- goto cs_failed;
- }
+ if (pcmcia_request_irq(link, nspintr))
+ goto cs_failed;
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
@@ -1741,7 +1731,7 @@ static int nsp_cs_config(struct pcmcia_device *link)
/* Set port and IRQ */
data->BaseAddress = link->io.BasePort1;
data->NumAddress = link->io.NumPorts1;
- data->IrqNumber = link->irq.AssignedIRQ;
+ data->IrqNumber = link->irq;
nsp_dbg(NSP_DEBUG_INIT, "I/O[0x%x+0x%x] IRQ %d",
data->BaseAddress, data->NumAddress, data->IrqNumber);
@@ -1764,8 +1754,6 @@ static int nsp_cs_config(struct pcmcia_device *link)
scsi_scan_host(host);
- snprintf(info->node.dev_name, sizeof(info->node.dev_name), "scsi%d", host->host_no);
- link->dev_node = &info->node;
info->host = host;
/* Finally, report what we've done */
@@ -1775,7 +1763,7 @@ static int nsp_cs_config(struct pcmcia_device *link)
printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10);
}
if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- printk(", irq %d", link->irq.AssignedIRQ);
+ printk(", irq %d", link->irq);
}
if (link->io.NumPorts1) {
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
@@ -1823,7 +1811,6 @@ static void nsp_cs_release(struct pcmcia_device *link)
if (info->host != NULL) {
scsi_remove_host(info->host);
}
- link->dev_node = NULL;
if (link->win) {
if (data != NULL) {
diff --git a/drivers/scsi/pcmcia/nsp_cs.h b/drivers/scsi/pcmcia/nsp_cs.h
index 8c61a4fe1db..d68c9f267c5 100644
--- a/drivers/scsi/pcmcia/nsp_cs.h
+++ b/drivers/scsi/pcmcia/nsp_cs.h
@@ -224,7 +224,6 @@
typedef struct scsi_info_t {
struct pcmcia_device *p_dev;
struct Scsi_Host *host;
- dev_node_t node;
int stop;
} scsi_info_t;
diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c
index f85f094870b..f0fc6baed9f 100644
--- a/drivers/scsi/pcmcia/qlogic_stub.c
+++ b/drivers/scsi/pcmcia/qlogic_stub.c
@@ -82,7 +82,6 @@ static struct scsi_host_template qlogicfas_driver_template = {
typedef struct scsi_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
struct Scsi_Host *host;
unsigned short manf_id;
} scsi_info_t;
@@ -161,7 +160,6 @@ static int qlogic_probe(struct pcmcia_device *link)
link->io.NumPorts1 = 16;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
link->io.IOAddrLines = 10;
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.Present = PRESENT_OPTION;
@@ -209,8 +207,7 @@ static int qlogic_config(struct pcmcia_device * link)
if (ret)
goto failed;
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
+ if (!link->irq)
goto failed;
ret = pcmcia_request_configuration(link, &link->conf);
@@ -227,18 +224,16 @@ static int qlogic_config(struct pcmcia_device * link)
/* The KXL-810AN has a bigger IO port window */
if (link->io.NumPorts1 == 32)
host = qlogic_detect(&qlogicfas_driver_template, link,
- link->io.BasePort1 + 16, link->irq.AssignedIRQ);
+ link->io.BasePort1 + 16, link->irq);
else
host = qlogic_detect(&qlogicfas_driver_template, link,
- link->io.BasePort1, link->irq.AssignedIRQ);
+ link->io.BasePort1, link->irq);
if (!host) {
printk(KERN_INFO "%s: no SCSI devices found\n", qlogic_name);
goto failed;
}
- sprintf(info->node.dev_name, "scsi%d", host->host_no);
- link->dev_node = &info->node;
info->host = host;
return 0;
@@ -258,7 +253,7 @@ static void qlogic_release(struct pcmcia_device *link)
scsi_remove_host(info->host);
- free_irq(link->irq.AssignedIRQ, info->host);
+ free_irq(link->irq, info->host);
pcmcia_disable_device(link);
scsi_host_put(info->host);
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index e7564d8f0cb..a5116417117 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -191,7 +191,6 @@
struct scsi_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
struct Scsi_Host *host;
unsigned short manf_id;
};
@@ -719,8 +718,7 @@ SYM53C500_config(struct pcmcia_device *link)
if (ret)
goto failed;
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
+ if (!link->irq)
goto failed;
ret = pcmcia_request_configuration(link, &link->conf);
@@ -752,7 +750,7 @@ SYM53C500_config(struct pcmcia_device *link)
* 0x320, 0x330, 0x340, 0x350
*/
port_base = link->io.BasePort1;
- irq_level = link->irq.AssignedIRQ;
+ irq_level = link->irq;
DEB(printk("SYM53C500: port_base=0x%x, irq=%d, fast_pio=%d\n",
port_base, irq_level, USE_FAST_PIO);)
@@ -793,8 +791,6 @@ SYM53C500_config(struct pcmcia_device *link)
*/
data->fast_pio = USE_FAST_PIO;
- sprintf(info->node.dev_name, "scsi%d", host->host_no);
- link->dev_node = &info->node;
info->host = host;
if (scsi_add_host(host, NULL))
@@ -866,7 +862,6 @@ SYM53C500_probe(struct pcmcia_device *link)
link->io.NumPorts1 = 16;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
link->io.IOAddrLines = 10;
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 909c00ec044..5ff8261c5d6 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -4390,7 +4390,6 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
return -ENOMEM;
}
}
- memset(buffer, 0, fw_control->len);
memcpy(buffer, fw_control->buffer, fw_control->len);
flash_update_info.sgl.addr = cpu_to_le64(phys_addr);
flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len);
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index bff4f5139b9..cd02ceaf67f 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -885,11 +885,13 @@ static void pm8001_dev_gone_notify(struct domain_device *dev)
u32 tag;
struct pm8001_hba_info *pm8001_ha;
struct pm8001_device *pm8001_dev = dev->lldd_dev;
- u32 device_id = pm8001_dev->device_id;
+
pm8001_ha = pm8001_find_ha_by_dev(dev);
spin_lock_irqsave(&pm8001_ha->lock, flags);
pm8001_tag_alloc(pm8001_ha, &tag);
if (pm8001_dev) {
+ u32 device_id = pm8001_dev->device_id;
+
PM8001_DISC_DBG(pm8001_ha,
pm8001_printk("found dev[%d:%x] is gone.\n",
pm8001_dev->device_id, pm8001_dev->dev_type));
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 53aefffbaea..c44e4ab4e93 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3751,12 +3751,6 @@ static int pmcraid_check_ioctl_buffer(
return -EINVAL;
}
- /* buffer length can't be negetive */
- if (hdr->buffer_length < 0) {
- pmcraid_err("ioctl: invalid buffer length specified\n");
- return -EINVAL;
- }
-
/* check for appropriate buffer access */
if ((_IOC_DIR(cmd) & _IOC_READ) == _IOC_READ)
access = VERIFY_WRITE;
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index c51fd1f8663..5df782f4a09 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,4 +1,5 @@
qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
- qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o
+ qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
+ qla_nx.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 1c7ef55966f..1e4cafabba1 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -12,13 +12,11 @@
#include <linux/delay.h>
static int qla24xx_vport_disable(struct fc_vport *, bool);
-static int qla84xx_reset(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
-int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t, uint16_t *);
-static int qla84xx_mgmt_cmd(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
+
/* SYSFS attributes --------------------------------------------------------- */
static ssize_t
-qla2x00_sysfs_read_fw_dump(struct kobject *kobj,
+qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -34,7 +32,7 @@ qla2x00_sysfs_read_fw_dump(struct kobject *kobj,
}
static ssize_t
-qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
+qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -43,6 +41,12 @@ qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
struct qla_hw_data *ha = vha->hw;
int reading;
+ if (IS_QLA82XX(ha)) {
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "Firmware dump not supported for ISP82xx\n"));
+ return count;
+ }
+
if (off != 0)
return (0);
@@ -88,7 +92,7 @@ static struct bin_attribute sysfs_fw_dump_attr = {
};
static ssize_t
-qla2x00_sysfs_read_nvram(struct kobject *kobj,
+qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -107,7 +111,7 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj,
}
static ssize_t
-qla2x00_sysfs_write_nvram(struct kobject *kobj,
+qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -173,7 +177,7 @@ static struct bin_attribute sysfs_nvram_attr = {
};
static ssize_t
-qla2x00_sysfs_read_optrom(struct kobject *kobj,
+qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -189,7 +193,7 @@ qla2x00_sysfs_read_optrom(struct kobject *kobj,
}
static ssize_t
-qla2x00_sysfs_write_optrom(struct kobject *kobj,
+qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -220,7 +224,7 @@ static struct bin_attribute sysfs_optrom_attr = {
};
static ssize_t
-qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
+qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -277,6 +281,12 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
return count;
}
+ if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
+ qla_printk(KERN_WARNING, ha,
+ "HBA not online, failing NVRAM update.\n");
+ return -EAGAIN;
+ }
+
DEBUG2(qla_printk(KERN_INFO, ha,
"Reading flash region -- 0x%x/0x%x.\n",
ha->optrom_region_start, ha->optrom_region_size));
@@ -315,8 +325,8 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
else if (start == (ha->flt_region_boot * 4) ||
start == (ha->flt_region_fw * 4))
valid = 1;
- else if (IS_QLA25XX(ha) || IS_QLA81XX(ha))
- valid = 1;
+ else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
+ valid = 1;
if (!valid) {
qla_printk(KERN_WARNING, ha,
"Invalid start region 0x%x/0x%x.\n", start, size);
@@ -377,7 +387,7 @@ static struct bin_attribute sysfs_optrom_ctl_attr = {
};
static ssize_t
-qla2x00_sysfs_read_vpd(struct kobject *kobj,
+qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -398,7 +408,7 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj,
}
static ssize_t
-qla2x00_sysfs_write_vpd(struct kobject *kobj,
+qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -451,7 +461,7 @@ static struct bin_attribute sysfs_vpd_attr = {
};
static ssize_t
-qla2x00_sysfs_read_sfp(struct kobject *kobj,
+qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -512,13 +522,14 @@ static struct bin_attribute sysfs_sfp_attr = {
};
static ssize_t
-qla2x00_sysfs_write_reset(struct kobject *kobj,
+qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
int type;
if (off != 0)
@@ -553,6 +564,20 @@ qla2x00_sysfs_write_reset(struct kobject *kobj,
"MPI reset failed on (%ld).\n", vha->host_no);
scsi_unblock_requests(vha->host);
break;
+ case 0x2025e:
+ if (!IS_QLA82XX(ha) || vha != base_vha) {
+ qla_printk(KERN_INFO, ha,
+ "FCoE ctx reset not supported for host%ld.\n",
+ vha->host_no);
+ return count;
+ }
+
+ qla_printk(KERN_INFO, ha,
+ "Issuing FCoE CTX reset on host%ld.\n", vha->host_no);
+ set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ qla2x00_wait_for_fcoe_ctx_reset(vha);
+ break;
}
return count;
}
@@ -567,7 +592,7 @@ static struct bin_attribute sysfs_reset_attr = {
};
static ssize_t
-qla2x00_sysfs_write_edc(struct kobject *kobj,
+qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -625,7 +650,7 @@ static struct bin_attribute sysfs_edc_attr = {
};
static ssize_t
-qla2x00_sysfs_write_edc_status(struct kobject *kobj,
+qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -675,7 +700,7 @@ qla2x00_sysfs_write_edc_status(struct kobject *kobj,
}
static ssize_t
-qla2x00_sysfs_read_edc_status(struct kobject *kobj,
+qla2x00_sysfs_read_edc_status(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -705,7 +730,7 @@ static struct bin_attribute sysfs_edc_status_attr = {
};
static ssize_t
-qla2x00_sysfs_read_xgmac_stats(struct kobject *kobj,
+qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -757,7 +782,7 @@ static struct bin_attribute sysfs_xgmac_stats_attr = {
};
static ssize_t
-qla2x00_sysfs_read_dcbx_tlv(struct kobject *kobj,
+qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -838,7 +863,7 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
continue;
if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
continue;
- if (iter->is4GBp_only == 3 && !IS_QLA81XX(vha->hw))
+ if (iter->is4GBp_only == 3 && !(IS_QLA8XXX_TYPE(vha->hw)))
continue;
ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
@@ -862,7 +887,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
continue;
if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
continue;
- if (iter->is4GBp_only == 3 && !IS_QLA81XX(ha))
+ if (iter->is4GBp_only == 3 && !!(IS_QLA8XXX_TYPE(vha->hw)))
continue;
sysfs_remove_bin_file(&host->shost_gendev.kobj,
@@ -968,7 +993,8 @@ qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
int len = 0;
if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
- atomic_read(&vha->loop_state) == LOOP_DEAD)
+ atomic_read(&vha->loop_state) == LOOP_DEAD ||
+ vha->device_flags & DFLG_NO_CABLE)
len = snprintf(buf, PAGE_SIZE, "Link Down\n");
else if (atomic_read(&vha->loop_state) != LOOP_READY ||
test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
@@ -1179,15 +1205,15 @@ qla24xx_84xx_fw_version_show(struct device *dev,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
- if (IS_QLA84XX(ha) && ha->cs84xx) {
- if (ha->cs84xx->op_fw_version == 0) {
- rval = qla84xx_verify_chip(vha, status);
- }
+ if (!IS_QLA84XX(ha))
+ return snprintf(buf, PAGE_SIZE, "\n");
+
+ if (ha->cs84xx && ha->cs84xx->op_fw_version == 0)
+ rval = qla84xx_verify_chip(vha, status);
if ((rval == QLA_SUCCESS) && (status[0] == 0))
return snprintf(buf, PAGE_SIZE, "%u\n",
(uint32_t)ha->cs84xx->op_fw_version);
- }
return snprintf(buf, PAGE_SIZE, "\n");
}
@@ -1237,7 +1263,7 @@ qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- if (!IS_QLA81XX(vha->hw))
+ if (!IS_QLA8XXX_TYPE(vha->hw))
return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
@@ -1249,7 +1275,7 @@ qla2x00_vn_port_mac_address_show(struct device *dev,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- if (!IS_QLA81XX(vha->hw))
+ if (!IS_QLA8XXX_TYPE(vha->hw))
return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
@@ -1706,6 +1732,22 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
}
+ if (IS_QLA25XX(ha) && ql2xenabledif) {
+ if (ha->fw_attributes & BIT_4) {
+ vha->flags.difdix_supported = 1;
+ DEBUG18(qla_printk(KERN_INFO, ha,
+ "Registering for DIF/DIX type 1 and 3"
+ " protection.\n"));
+ scsi_host_set_prot(vha->host,
+ SHOST_DIF_TYPE1_PROTECTION
+ | SHOST_DIF_TYPE3_PROTECTION
+ | SHOST_DIX_TYPE1_PROTECTION
+ | SHOST_DIX_TYPE3_PROTECTION);
+ scsi_host_set_guard(vha->host, SHOST_DIX_GUARD_CRC);
+ } else
+ vha->flags.difdix_supported = 0;
+ }
+
if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
&ha->pdev->dev)) {
DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n",
@@ -1825,582 +1867,6 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
return 0;
}
-/* BSG support for ELS/CT pass through */
-inline srb_t *
-qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
-{
- srb_t *sp;
- struct qla_hw_data *ha = vha->hw;
- struct srb_bsg_ctx *ctx;
-
- sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
- if (!sp)
- goto done;
- ctx = kzalloc(size, GFP_KERNEL);
- if (!ctx) {
- mempool_free(sp, ha->srb_mempool);
- goto done;
- }
-
- memset(sp, 0, sizeof(*sp));
- sp->fcport = fcport;
- sp->ctx = ctx;
-done:
- return sp;
-}
-
-static int
-qla2x00_process_els(struct fc_bsg_job *bsg_job)
-{
- struct fc_rport *rport;
- fc_port_t *fcport;
- struct Scsi_Host *host;
- scsi_qla_host_t *vha;
- struct qla_hw_data *ha;
- srb_t *sp;
- const char *type;
- int req_sg_cnt, rsp_sg_cnt;
- int rval = (DRIVER_ERROR << 16);
- uint16_t nextlid = 0;
- struct srb_bsg *els;
-
- /* Multiple SG's are not supported for ELS requests */
- if (bsg_job->request_payload.sg_cnt > 1 ||
- bsg_job->reply_payload.sg_cnt > 1) {
- DEBUG2(printk(KERN_INFO
- "multiple SG's are not supported for ELS requests"
- " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
- bsg_job->request_payload.sg_cnt,
- bsg_job->reply_payload.sg_cnt));
- rval = -EPERM;
- goto done;
- }
-
- /* ELS request for rport */
- if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
- rport = bsg_job->rport;
- fcport = *(fc_port_t **) rport->dd_data;
- host = rport_to_shost(rport);
- vha = shost_priv(host);
- ha = vha->hw;
- type = "FC_BSG_RPT_ELS";
-
- /* make sure the rport is logged in,
- * if not perform fabric login
- */
- if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "failed to login port %06X for ELS passthru\n",
- fcport->d_id.b24));
- rval = -EIO;
- goto done;
- }
- } else {
- host = bsg_job->shost;
- vha = shost_priv(host);
- ha = vha->hw;
- type = "FC_BSG_HST_ELS_NOLOGIN";
-
- /* Allocate a dummy fcport structure, since functions
- * preparing the IOCB and mailbox command retrieves port
- * specific information from fcport structure. For Host based
- * ELS commands there will be no fcport structure allocated
- */
- fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
- if (!fcport) {
- rval = -ENOMEM;
- goto done;
- }
-
- /* Initialize all required fields of fcport */
- fcport->vha = vha;
- fcport->vp_idx = vha->vp_idx;
- fcport->d_id.b.al_pa =
- bsg_job->request->rqst_data.h_els.port_id[0];
- fcport->d_id.b.area =
- bsg_job->request->rqst_data.h_els.port_id[1];
- fcport->d_id.b.domain =
- bsg_job->request->rqst_data.h_els.port_id[2];
- fcport->loop_id =
- (fcport->d_id.b.al_pa == 0xFD) ?
- NPH_FABRIC_CONTROLLER : NPH_F_PORT;
- }
-
- if (!vha->flags.online) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "host not online\n"));
- rval = -EIO;
- goto done;
- }
-
- req_sg_cnt =
- dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
- if (!req_sg_cnt) {
- rval = -ENOMEM;
- goto done_free_fcport;
- }
- rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
- if (!rsp_sg_cnt) {
- rval = -ENOMEM;
- goto done_free_fcport;
- }
-
- if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
- (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
- {
- DEBUG2(printk(KERN_INFO
- "dma mapping resulted in different sg counts \
- [request_sg_cnt: %x dma_request_sg_cnt: %x\
- reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
- bsg_job->request_payload.sg_cnt, req_sg_cnt,
- bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
- rval = -EAGAIN;
- goto done_unmap_sg;
- }
-
- /* Alloc SRB structure */
- sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
- if (!sp) {
- rval = -ENOMEM;
- goto done_unmap_sg;
- }
-
- els = sp->ctx;
- els->ctx.type =
- (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
- SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
- els->bsg_job = bsg_job;
-
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
- "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
- bsg_job->request->rqst_data.h_els.command_code,
- fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa));
-
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS) {
- kfree(sp->ctx);
- mempool_free(sp, ha->srb_mempool);
- rval = -EIO;
- goto done_unmap_sg;
- }
- return rval;
-
-done_unmap_sg:
- dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
- dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
- goto done_free_fcport;
-
-done_free_fcport:
- if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
- kfree(fcport);
-done:
- return rval;
-}
-
-static int
-qla2x00_process_ct(struct fc_bsg_job *bsg_job)
-{
- srb_t *sp;
- struct Scsi_Host *host = bsg_job->shost;
- scsi_qla_host_t *vha = shost_priv(host);
- struct qla_hw_data *ha = vha->hw;
- int rval = (DRIVER_ERROR << 16);
- int req_sg_cnt, rsp_sg_cnt;
- uint16_t loop_id;
- struct fc_port *fcport;
- char *type = "FC_BSG_HST_CT";
- struct srb_bsg *ct;
-
- /* pass through is supported only for ISP 4Gb or higher */
- if (!IS_FWI2_CAPABLE(ha)) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld):Firmware is not capable to support FC "
- "CT pass thru\n", vha->host_no));
- rval = -EPERM;
- goto done;
- }
-
- req_sg_cnt =
- dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
- if (!req_sg_cnt) {
- rval = -ENOMEM;
- goto done;
- }
-
- rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
- if (!rsp_sg_cnt) {
- rval = -ENOMEM;
- goto done;
- }
-
- if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
- (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
- {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "dma mapping resulted in different sg counts \
- [request_sg_cnt: %x dma_request_sg_cnt: %x\
- reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
- bsg_job->request_payload.sg_cnt, req_sg_cnt,
- bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
- rval = -EAGAIN;
- goto done_unmap_sg;
- }
-
- if (!vha->flags.online) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "host not online\n"));
- rval = -EIO;
- goto done_unmap_sg;
- }
-
- loop_id =
- (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
- >> 24;
- switch (loop_id) {
- case 0xFC:
- loop_id = cpu_to_le16(NPH_SNS);
- break;
- case 0xFA:
- loop_id = vha->mgmt_svr_loop_id;
- break;
- default:
- DEBUG2(qla_printk(KERN_INFO, ha,
- "Unknown loop id: %x\n", loop_id));
- rval = -EINVAL;
- goto done_unmap_sg;
- }
-
- /* Allocate a dummy fcport structure, since functions preparing the
- * IOCB and mailbox command retrieves port specific information
- * from fcport structure. For Host based ELS commands there will be
- * no fcport structure allocated
- */
- fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
- if (!fcport)
- {
- rval = -ENOMEM;
- goto done_unmap_sg;
- }
-
- /* Initialize all required fields of fcport */
- fcport->vha = vha;
- fcport->vp_idx = vha->vp_idx;
- fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
- fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
- fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
- fcport->loop_id = loop_id;
-
- /* Alloc SRB structure */
- sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
- if (!sp) {
- rval = -ENOMEM;
- goto done_free_fcport;
- }
-
- ct = sp->ctx;
- ct->ctx.type = SRB_CT_CMD;
- ct->bsg_job = bsg_job;
-
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
- "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
- (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
- fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa));
-
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS) {
- kfree(sp->ctx);
- mempool_free(sp, ha->srb_mempool);
- rval = -EIO;
- goto done_free_fcport;
- }
- return rval;
-
-done_free_fcport:
- kfree(fcport);
-done_unmap_sg:
- dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
- dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
-done:
- return rval;
-}
-
-static int
-qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
-{
- struct Scsi_Host *host = bsg_job->shost;
- scsi_qla_host_t *vha = shost_priv(host);
- struct qla_hw_data *ha = vha->hw;
- int rval;
- uint8_t command_sent;
- uint32_t vendor_cmd;
- char *type;
- struct msg_echo_lb elreq;
- uint16_t response[MAILBOX_REGISTER_COUNT];
- uint8_t* fw_sts_ptr;
- uint8_t *req_data;
- dma_addr_t req_data_dma;
- uint32_t req_data_len;
- uint8_t *rsp_data;
- dma_addr_t rsp_data_dma;
- uint32_t rsp_data_len;
-
- if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
- test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
- rval = -EBUSY;
- goto done;
- }
-
- if (!vha->flags.online) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "host not online\n"));
- rval = -EIO;
- goto done;
- }
-
- elreq.req_sg_cnt =
- dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
- if (!elreq.req_sg_cnt) {
- rval = -ENOMEM;
- goto done;
- }
- elreq.rsp_sg_cnt =
- dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
- if (!elreq.rsp_sg_cnt) {
- rval = -ENOMEM;
- goto done;
- }
-
- if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
- (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
- {
- DEBUG2(printk(KERN_INFO
- "dma mapping resulted in different sg counts \
- [request_sg_cnt: %x dma_request_sg_cnt: %x\
- reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
- bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
- bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
- rval = -EAGAIN;
- goto done_unmap_sg;
- }
- req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
- req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
- &req_data_dma, GFP_KERNEL);
-
- rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
- &rsp_data_dma, GFP_KERNEL);
-
- /* Copy the request buffer in req_data now */
- sg_copy_to_buffer(bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, req_data,
- req_data_len);
-
- elreq.send_dma = req_data_dma;
- elreq.rcv_dma = rsp_data_dma;
- elreq.transfer_size = req_data_len;
-
- /* Vendor cmd : loopback or ECHO diagnostic
- * Options:
- * Loopback : Either internal or external loopback
- * ECHO: ECHO ELS or Vendor specific FC4 link data
- */
- vendor_cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd[0];
- elreq.options =
- *(((uint32_t *)bsg_job->request->rqst_data.h_vendor.vendor_cmd)
- + 1);
-
- switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
- case QL_VND_LOOPBACK:
- if (ha->current_topology != ISP_CFG_F) {
- type = "FC_BSG_HST_VENDOR_LOOPBACK";
-
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
- vha->host_no, type, vendor_cmd, elreq.options));
-
- command_sent = INT_DEF_LB_LOOPBACK_CMD;
- rval = qla2x00_loopback_test(vha, &elreq, response);
- if (IS_QLA81XX(ha)) {
- if (response[0] == MBS_COMMAND_ERROR && response[1] == MBS_LB_RESET) {
- DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
- "ISP\n", __func__, vha->host_no));
- set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
- }
- }
- } else {
- type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
- vha->host_no, type, vendor_cmd, elreq.options));
-
- command_sent = INT_DEF_LB_ECHO_CMD;
- rval = qla2x00_echo_test(vha, &elreq, response);
- }
- break;
- case QLA84_RESET:
- if (!IS_QLA84XX(vha->hw)) {
- rval = -EINVAL;
- DEBUG16(printk(
- "%s(%ld): 8xxx exiting.\n",
- __func__, vha->host_no));
- return rval;
- }
- rval = qla84xx_reset(vha, &elreq, bsg_job);
- break;
- case QLA84_MGMT_CMD:
- if (!IS_QLA84XX(vha->hw)) {
- rval = -EINVAL;
- DEBUG16(printk(
- "%s(%ld): 8xxx exiting.\n",
- __func__, vha->host_no));
- return rval;
- }
- rval = qla84xx_mgmt_cmd(vha, &elreq, bsg_job);
- break;
- default:
- rval = -ENOSYS;
- }
-
- if (rval != QLA_SUCCESS) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "scsi(%ld) Vendor request %s failed\n", vha->host_no, type));
- rval = 0;
- bsg_job->reply->result = (DID_ERROR << 16);
- bsg_job->reply->reply_payload_rcv_len = 0;
- fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
- memcpy( fw_sts_ptr, response, sizeof(response));
- fw_sts_ptr += sizeof(response);
- *fw_sts_ptr = command_sent;
- } else {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "scsi(%ld) Vendor request %s completed\n", vha->host_no, type));
- rval = bsg_job->reply->result = 0;
- bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(response) + sizeof(uint8_t);
- bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
- fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
- memcpy(fw_sts_ptr, response, sizeof(response));
- fw_sts_ptr += sizeof(response);
- *fw_sts_ptr = command_sent;
- sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, rsp_data,
- rsp_data_len);
- }
- bsg_job->job_done(bsg_job);
-
-done_unmap_sg:
-
- if(req_data)
- dma_free_coherent(&ha->pdev->dev, req_data_len,
- req_data, req_data_dma);
- dma_unmap_sg(&ha->pdev->dev,
- bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
- dma_unmap_sg(&ha->pdev->dev,
- bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
-
-done:
- return rval;
-}
-
-static int
-qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
-{
- int ret = -EINVAL;
-
- switch (bsg_job->request->msgcode) {
- case FC_BSG_RPT_ELS:
- case FC_BSG_HST_ELS_NOLOGIN:
- ret = qla2x00_process_els(bsg_job);
- break;
- case FC_BSG_HST_CT:
- ret = qla2x00_process_ct(bsg_job);
- break;
- case FC_BSG_HST_VENDOR:
- ret = qla2x00_process_vendor_specific(bsg_job);
- break;
- case FC_BSG_HST_ADD_RPORT:
- case FC_BSG_HST_DEL_RPORT:
- case FC_BSG_RPT_CT:
- default:
- DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
- break;
- }
- return ret;
-}
-
-static int
-qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
-{
- scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
- struct qla_hw_data *ha = vha->hw;
- srb_t *sp;
- int cnt, que;
- unsigned long flags;
- struct req_que *req;
- struct srb_bsg *sp_bsg;
-
- /* find the bsg job from the active list of commands */
- spin_lock_irqsave(&ha->hardware_lock, flags);
- for (que = 0; que < ha->max_req_queues; que++) {
- req = ha->req_q_map[que];
- if (!req)
- continue;
-
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++ ) {
- sp = req->outstanding_cmds[cnt];
-
- if (sp) {
- sp_bsg = (struct srb_bsg*)sp->ctx;
-
- if (((sp_bsg->ctx.type == SRB_CT_CMD) ||
- (sp_bsg->ctx.type == SRB_ELS_CMD_RPT)
- || ( sp_bsg->ctx.type == SRB_ELS_CMD_HST)) &&
- (sp_bsg->bsg_job == bsg_job)) {
- if (ha->isp_ops->abort_command(sp)) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld): mbx abort_command failed\n", vha->host_no));
- bsg_job->req->errors = bsg_job->reply->result = -EIO;
- } else {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld): mbx abort_command success\n", vha->host_no));
- bsg_job->req->errors = bsg_job->reply->result = 0;
- }
- goto done;
- }
- }
- }
- }
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld) SRB not found to abort\n", vha->host_no));
- bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
- return 0;
-
-done:
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (bsg_job->request->msgcode == FC_BSG_HST_CT)
- kfree(sp->fcport);
- kfree(sp->ctx);
- mempool_free(sp, ha->srb_mempool);
- return 0;
-}
-
struct fc_function_template qla2xxx_transport_functions = {
.show_host_node_name = 1,
@@ -2502,7 +1968,7 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
- if (IS_QLA81XX(ha))
+ if (IS_QLA8XXX_TYPE(ha))
speed = FC_PORTSPEED_10GBIT;
else if (IS_QLA25XX(ha))
speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
@@ -2516,125 +1982,3 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
speed = FC_PORTSPEED_1GBIT;
fc_host_supported_speeds(vha->host) = speed;
}
-static int
-qla84xx_reset(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
-{
- int ret = 0;
- int cmd;
- uint16_t cmd_status;
-
- DEBUG16(printk("%s(%ld): entered.\n", __func__, ha->host_no));
-
- cmd = (*((bsg_job->request->rqst_data.h_vendor.vendor_cmd) + 2))
- == A84_RESET_FLAG_ENABLE_DIAG_FW ?
- A84_ISSUE_RESET_DIAG_FW : A84_ISSUE_RESET_OP_FW;
- ret = qla84xx_reset_chip(ha, cmd == A84_ISSUE_RESET_DIAG_FW,
- &cmd_status);
- return ret;
-}
-
-static int
-qla84xx_mgmt_cmd(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
-{
- struct access_chip_84xx *mn;
- dma_addr_t mn_dma, mgmt_dma;
- void *mgmt_b = NULL;
- int ret = 0;
- int rsp_hdr_len, len = 0;
- struct qla84_msg_mgmt *ql84_mgmt;
-
- ql84_mgmt = (struct qla84_msg_mgmt *) vmalloc(sizeof(struct qla84_msg_mgmt));
- ql84_mgmt->cmd =
- *((uint16_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 2));
- ql84_mgmt->mgmtp.u.mem.start_addr =
- *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 3));
- ql84_mgmt->len =
- *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 4));
- ql84_mgmt->mgmtp.u.config.id =
- *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 5));
- ql84_mgmt->mgmtp.u.config.param0 =
- *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 6));
- ql84_mgmt->mgmtp.u.config.param1 =
- *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 7));
- ql84_mgmt->mgmtp.u.info.type =
- *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 8));
- ql84_mgmt->mgmtp.u.info.context =
- *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 9));
-
- rsp_hdr_len = bsg_job->request_payload.payload_len;
-
- mn = dma_pool_alloc(ha->hw->s_dma_pool, GFP_KERNEL, &mn_dma);
- if (mn == NULL) {
- DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
- "failed%lu\n", __func__, ha->host_no));
- return -ENOMEM;
- }
-
- memset(mn, 0, sizeof (struct access_chip_84xx));
-
- mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
- mn->entry_count = 1;
-
- switch (ql84_mgmt->cmd) {
- case QLA84_MGMT_READ_MEM:
- mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
- mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
- break;
- case QLA84_MGMT_WRITE_MEM:
- mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
- mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
- break;
- case QLA84_MGMT_CHNG_CONFIG:
- mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
- mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.id);
- mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param0);
- mn->parameter3 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param1);
- break;
- case QLA84_MGMT_GET_INFO:
- mn->options = cpu_to_le16(ACO_REQUEST_INFO);
- mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.type);
- mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.context);
- break;
- default:
- ret = -EIO;
- goto exit_mgmt0;
- }
-
- if ((len == ql84_mgmt->len) &&
- ql84_mgmt->cmd != QLA84_MGMT_CHNG_CONFIG) {
- mgmt_b = dma_alloc_coherent(&ha->hw->pdev->dev, len,
- &mgmt_dma, GFP_KERNEL);
- if (mgmt_b == NULL) {
- DEBUG2(printk(KERN_ERR "%s: dma alloc mgmt_b "
- "failed%lu\n", __func__, ha->host_no));
- ret = -ENOMEM;
- goto exit_mgmt0;
- }
- mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->len);
- mn->dseg_count = cpu_to_le16(1);
- mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
- mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
- mn->dseg_length = cpu_to_le32(len);
-
- if (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM) {
- memcpy(mgmt_b, ql84_mgmt->payload, len);
- }
- }
-
- ret = qla2x00_issue_iocb(ha, mn, mn_dma, 0);
- if ((ret != QLA_SUCCESS) || (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM)
- || (ql84_mgmt->cmd == QLA84_MGMT_CHNG_CONFIG)) {
- if (ret != QLA_SUCCESS)
- DEBUG2(printk(KERN_ERR "%s(%lu): failed\n",
- __func__, ha->host_no));
- } else if ((ql84_mgmt->cmd == QLA84_MGMT_READ_MEM) ||
- (ql84_mgmt->cmd == QLA84_MGMT_GET_INFO)) {
- }
-
- if (mgmt_b)
- dma_free_coherent(&ha->hw->pdev->dev, len, mgmt_b, mgmt_dma);
-
-exit_mgmt0:
- dma_pool_free(ha->hw->s_dma_pool, mn, mn_dma);
- return ret;
-}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
new file mode 100644
index 00000000000..b905dfe5ea6
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -0,0 +1,1212 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2008 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+
+#include <linux/kthread.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+
+/* BSG support for ELS/CT pass through */
+inline srb_t *
+qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
+{
+ srb_t *sp;
+ struct qla_hw_data *ha = vha->hw;
+ struct srb_ctx *ctx;
+
+ sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
+ if (!sp)
+ goto done;
+ ctx = kzalloc(size, GFP_KERNEL);
+ if (!ctx) {
+ mempool_free(sp, ha->srb_mempool);
+ sp = NULL;
+ goto done;
+ }
+
+ memset(sp, 0, sizeof(*sp));
+ sp->fcport = fcport;
+ sp->ctx = ctx;
+done:
+ return sp;
+}
+
+int
+qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
+{
+ int i, ret, num_valid;
+ uint8_t *bcode;
+ struct qla_fcp_prio_entry *pri_entry;
+
+ ret = 1;
+ num_valid = 0;
+ bcode = (uint8_t *)pri_cfg;
+
+ if (bcode[0x0] != 'H' || bcode[0x1] != 'Q' || bcode[0x2] != 'O' ||
+ bcode[0x3] != 'S') {
+ return 0;
+ }
+ if (flag != 1)
+ return ret;
+
+ pri_entry = &pri_cfg->entry[0];
+ for (i = 0; i < pri_cfg->num_entries; i++) {
+ if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
+ num_valid++;
+ pri_entry++;
+ }
+
+ if (num_valid == 0)
+ ret = 0;
+
+ return ret;
+}
+
+static int
+qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int ret = 0;
+ uint32_t len;
+ uint32_t oper;
+
+ bsg_job->reply->reply_payload_rcv_len = 0;
+
+ if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+ ret = -EBUSY;
+ goto exit_fcp_prio_cfg;
+ }
+
+ /* Get the sub command */
+ oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+
+ /* Only set config is allowed if config memory is not allocated */
+ if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
+ ret = -EINVAL;
+ goto exit_fcp_prio_cfg;
+ }
+ switch (oper) {
+ case QLFC_FCP_PRIO_DISABLE:
+ if (ha->flags.fcp_prio_enabled) {
+ ha->flags.fcp_prio_enabled = 0;
+ ha->fcp_prio_cfg->attributes &=
+ ~FCP_PRIO_ATTR_ENABLE;
+ qla24xx_update_all_fcp_prio(vha);
+ bsg_job->reply->result = DID_OK;
+ } else {
+ ret = -EINVAL;
+ bsg_job->reply->result = (DID_ERROR << 16);
+ goto exit_fcp_prio_cfg;
+ }
+ break;
+
+ case QLFC_FCP_PRIO_ENABLE:
+ if (!ha->flags.fcp_prio_enabled) {
+ if (ha->fcp_prio_cfg) {
+ ha->flags.fcp_prio_enabled = 1;
+ ha->fcp_prio_cfg->attributes |=
+ FCP_PRIO_ATTR_ENABLE;
+ qla24xx_update_all_fcp_prio(vha);
+ bsg_job->reply->result = DID_OK;
+ } else {
+ ret = -EINVAL;
+ bsg_job->reply->result = (DID_ERROR << 16);
+ goto exit_fcp_prio_cfg;
+ }
+ }
+ break;
+
+ case QLFC_FCP_PRIO_GET_CONFIG:
+ len = bsg_job->reply_payload.payload_len;
+ if (!len || len > FCP_PRIO_CFG_SIZE) {
+ ret = -EINVAL;
+ bsg_job->reply->result = (DID_ERROR << 16);
+ goto exit_fcp_prio_cfg;
+ }
+
+ bsg_job->reply->result = DID_OK;
+ bsg_job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(
+ bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
+ len);
+
+ break;
+
+ case QLFC_FCP_PRIO_SET_CONFIG:
+ len = bsg_job->request_payload.payload_len;
+ if (!len || len > FCP_PRIO_CFG_SIZE) {
+ bsg_job->reply->result = (DID_ERROR << 16);
+ ret = -EINVAL;
+ goto exit_fcp_prio_cfg;
+ }
+
+ if (!ha->fcp_prio_cfg) {
+ ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
+ if (!ha->fcp_prio_cfg) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to allocate memory "
+ "for fcp prio config data (%x).\n",
+ FCP_PRIO_CFG_SIZE);
+ bsg_job->reply->result = (DID_ERROR << 16);
+ ret = -ENOMEM;
+ goto exit_fcp_prio_cfg;
+ }
+ }
+
+ memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
+ FCP_PRIO_CFG_SIZE);
+
+ /* validate fcp priority data */
+ if (!qla24xx_fcp_prio_cfg_valid(
+ (struct qla_fcp_prio_cfg *)
+ ha->fcp_prio_cfg, 1)) {
+ bsg_job->reply->result = (DID_ERROR << 16);
+ ret = -EINVAL;
+ /* If buffer was invalidatic int
+ * fcp_prio_cfg is of no use
+ */
+ vfree(ha->fcp_prio_cfg);
+ ha->fcp_prio_cfg = NULL;
+ goto exit_fcp_prio_cfg;
+ }
+
+ ha->flags.fcp_prio_enabled = 0;
+ if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
+ ha->flags.fcp_prio_enabled = 1;
+ qla24xx_update_all_fcp_prio(vha);
+ bsg_job->reply->result = DID_OK;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+exit_fcp_prio_cfg:
+ bsg_job->job_done(bsg_job);
+ return ret;
+}
+static int
+qla2x00_process_els(struct fc_bsg_job *bsg_job)
+{
+ struct fc_rport *rport;
+ fc_port_t *fcport;
+ struct Scsi_Host *host;
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ srb_t *sp;
+ const char *type;
+ int req_sg_cnt, rsp_sg_cnt;
+ int rval = (DRIVER_ERROR << 16);
+ uint16_t nextlid = 0;
+ struct srb_ctx *els;
+
+ /* Multiple SG's are not supported for ELS requests */
+ if (bsg_job->request_payload.sg_cnt > 1 ||
+ bsg_job->reply_payload.sg_cnt > 1) {
+ DEBUG2(printk(KERN_INFO
+ "multiple SG's are not supported for ELS requests"
+ " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
+ bsg_job->request_payload.sg_cnt,
+ bsg_job->reply_payload.sg_cnt));
+ rval = -EPERM;
+ goto done;
+ }
+
+ /* ELS request for rport */
+ if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
+ rport = bsg_job->rport;
+ fcport = *(fc_port_t **) rport->dd_data;
+ host = rport_to_shost(rport);
+ vha = shost_priv(host);
+ ha = vha->hw;
+ type = "FC_BSG_RPT_ELS";
+
+ /* make sure the rport is logged in,
+ * if not perform fabric login
+ */
+ if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "failed to login port %06X for ELS passthru\n",
+ fcport->d_id.b24));
+ rval = -EIO;
+ goto done;
+ }
+ } else {
+ host = bsg_job->shost;
+ vha = shost_priv(host);
+ ha = vha->hw;
+ type = "FC_BSG_HST_ELS_NOLOGIN";
+
+ /* Allocate a dummy fcport structure, since functions
+ * preparing the IOCB and mailbox command retrieves port
+ * specific information from fcport structure. For Host based
+ * ELS commands there will be no fcport structure allocated
+ */
+ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+ if (!fcport) {
+ rval = -ENOMEM;
+ goto done;
+ }
+
+ /* Initialize all required fields of fcport */
+ fcport->vha = vha;
+ fcport->vp_idx = vha->vp_idx;
+ fcport->d_id.b.al_pa =
+ bsg_job->request->rqst_data.h_els.port_id[0];
+ fcport->d_id.b.area =
+ bsg_job->request->rqst_data.h_els.port_id[1];
+ fcport->d_id.b.domain =
+ bsg_job->request->rqst_data.h_els.port_id[2];
+ fcport->loop_id =
+ (fcport->d_id.b.al_pa == 0xFD) ?
+ NPH_FABRIC_CONTROLLER : NPH_F_PORT;
+ }
+
+ if (!vha->flags.online) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "host not online\n"));
+ rval = -EIO;
+ goto done;
+ }
+
+ req_sg_cnt =
+ dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ if (!req_sg_cnt) {
+ rval = -ENOMEM;
+ goto done_free_fcport;
+ }
+
+ rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ if (!rsp_sg_cnt) {
+ rval = -ENOMEM;
+ goto done_free_fcport;
+ }
+
+ if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
+ (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
+ DEBUG2(printk(KERN_INFO
+ "dma mapping resulted in different sg counts \
+ [request_sg_cnt: %x dma_request_sg_cnt: %x\
+ reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
+ bsg_job->request_payload.sg_cnt, req_sg_cnt,
+ bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+
+ /* Alloc SRB structure */
+ sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
+ if (!sp) {
+ rval = -ENOMEM;
+ goto done_unmap_sg;
+ }
+
+ els = sp->ctx;
+ els->type =
+ (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
+ SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
+ els->name =
+ (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
+ "bsg_els_rpt" : "bsg_els_hst");
+ els->u.bsg_job = bsg_job;
+
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
+ "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
+ bsg_job->request->rqst_data.h_els.command_code,
+ fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa));
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ kfree(sp->ctx);
+ mempool_free(sp, ha->srb_mempool);
+ rval = -EIO;
+ goto done_unmap_sg;
+ }
+ return rval;
+
+done_unmap_sg:
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ goto done_free_fcport;
+
+done_free_fcport:
+ if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
+ kfree(fcport);
+done:
+ return rval;
+}
+
+static int
+qla2x00_process_ct(struct fc_bsg_job *bsg_job)
+{
+ srb_t *sp;
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = (DRIVER_ERROR << 16);
+ int req_sg_cnt, rsp_sg_cnt;
+ uint16_t loop_id;
+ struct fc_port *fcport;
+ char *type = "FC_BSG_HST_CT";
+ struct srb_ctx *ct;
+
+ /* pass through is supported only for ISP 4Gb or higher */
+ if (!IS_FWI2_CAPABLE(ha)) {
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld):Firmware is not capable to support FC "
+ "CT pass thru\n", vha->host_no));
+ rval = -EPERM;
+ goto done;
+ }
+
+ req_sg_cnt =
+ dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ if (!req_sg_cnt) {
+ rval = -ENOMEM;
+ goto done;
+ }
+
+ rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ if (!rsp_sg_cnt) {
+ rval = -ENOMEM;
+ goto done;
+ }
+
+ if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
+ (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "[request_sg_cnt: %x dma_request_sg_cnt: %x\
+ reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
+ bsg_job->request_payload.sg_cnt, req_sg_cnt,
+ bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+
+ if (!vha->flags.online) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "host not online\n"));
+ rval = -EIO;
+ goto done_unmap_sg;
+ }
+
+ loop_id =
+ (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
+ >> 24;
+ switch (loop_id) {
+ case 0xFC:
+ loop_id = cpu_to_le16(NPH_SNS);
+ break;
+ case 0xFA:
+ loop_id = vha->mgmt_svr_loop_id;
+ break;
+ default:
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "Unknown loop id: %x\n", loop_id));
+ rval = -EINVAL;
+ goto done_unmap_sg;
+ }
+
+ /* Allocate a dummy fcport structure, since functions preparing the
+ * IOCB and mailbox command retrieves port specific information
+ * from fcport structure. For Host based ELS commands there will be
+ * no fcport structure allocated
+ */
+ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+ if (!fcport) {
+ rval = -ENOMEM;
+ goto done_unmap_sg;
+ }
+
+ /* Initialize all required fields of fcport */
+ fcport->vha = vha;
+ fcport->vp_idx = vha->vp_idx;
+ fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
+ fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
+ fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
+ fcport->loop_id = loop_id;
+
+ /* Alloc SRB structure */
+ sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
+ if (!sp) {
+ rval = -ENOMEM;
+ goto done_free_fcport;
+ }
+
+ ct = sp->ctx;
+ ct->type = SRB_CT_CMD;
+ ct->name = "bsg_ct";
+ ct->u.bsg_job = bsg_job;
+
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
+ "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
+ (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
+ fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa));
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ kfree(sp->ctx);
+ mempool_free(sp, ha->srb_mempool);
+ rval = -EIO;
+ goto done_free_fcport;
+ }
+ return rval;
+
+done_free_fcport:
+ kfree(fcport);
+done_unmap_sg:
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+done:
+ return rval;
+}
+
+static int
+qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval;
+ uint8_t command_sent;
+ char *type;
+ struct msg_echo_lb elreq;
+ uint16_t response[MAILBOX_REGISTER_COUNT];
+ uint8_t *fw_sts_ptr;
+ uint8_t *req_data = NULL;
+ dma_addr_t req_data_dma;
+ uint32_t req_data_len;
+ uint8_t *rsp_data = NULL;
+ dma_addr_t rsp_data_dma;
+ uint32_t rsp_data_len;
+
+ if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
+ return -EBUSY;
+
+ if (!vha->flags.online) {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "host not online\n"));
+ return -EIO;
+ }
+
+ elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
+ bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
+ DMA_TO_DEVICE);
+
+ if (!elreq.req_sg_cnt)
+ return -ENOMEM;
+
+ elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
+ bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
+ DMA_FROM_DEVICE);
+
+ if (!elreq.rsp_sg_cnt) {
+ rval = -ENOMEM;
+ goto done_unmap_req_sg;
+ }
+
+ if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
+ (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
+ DEBUG2(printk(KERN_INFO
+ "dma mapping resulted in different sg counts "
+ "[request_sg_cnt: %x dma_request_sg_cnt: %x "
+ "reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
+ bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
+ bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+ req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
+ req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
+ &req_data_dma, GFP_KERNEL);
+ if (!req_data) {
+ DEBUG2(printk(KERN_ERR "%s: dma alloc for req_data "
+ "failed for host=%lu\n", __func__, vha->host_no));
+ rval = -ENOMEM;
+ goto done_unmap_sg;
+ }
+
+ rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
+ &rsp_data_dma, GFP_KERNEL);
+ if (!rsp_data) {
+ DEBUG2(printk(KERN_ERR "%s: dma alloc for rsp_data "
+ "failed for host=%lu\n", __func__, vha->host_no));
+ rval = -ENOMEM;
+ goto done_free_dma_req;
+ }
+
+ /* Copy the request buffer in req_data now */
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, req_data, req_data_len);
+
+ elreq.send_dma = req_data_dma;
+ elreq.rcv_dma = rsp_data_dma;
+ elreq.transfer_size = req_data_len;
+
+ elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+
+ if (ha->current_topology != ISP_CFG_F) {
+ type = "FC_BSG_HST_VENDOR_LOOPBACK";
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld) bsg rqst type: %s\n",
+ vha->host_no, type));
+
+ command_sent = INT_DEF_LB_LOOPBACK_CMD;
+ rval = qla2x00_loopback_test(vha, &elreq, response);
+ if (IS_QLA81XX(ha)) {
+ if (response[0] == MBS_COMMAND_ERROR &&
+ response[1] == MBS_LB_RESET) {
+ DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
+ "ISP\n", __func__, vha->host_no));
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ }
+ } else {
+ type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld) bsg rqst type: %s\n", vha->host_no, type));
+ command_sent = INT_DEF_LB_ECHO_CMD;
+ rval = qla2x00_echo_test(vha, &elreq, response);
+ }
+
+ if (rval) {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
+ "request %s failed\n", vha->host_no, type));
+
+ fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
+ sizeof(struct fc_bsg_reply);
+
+ memcpy(fw_sts_ptr, response, sizeof(response));
+ fw_sts_ptr += sizeof(response);
+ *fw_sts_ptr = command_sent;
+ rval = 0;
+ bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_job->reply->result = (DID_ERROR << 16);
+ } else {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
+ "request %s completed\n", vha->host_no, type));
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
+ sizeof(response) + sizeof(uint8_t);
+ bsg_job->reply->reply_payload_rcv_len =
+ bsg_job->reply_payload.payload_len;
+ fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
+ sizeof(struct fc_bsg_reply);
+ memcpy(fw_sts_ptr, response, sizeof(response));
+ fw_sts_ptr += sizeof(response);
+ *fw_sts_ptr = command_sent;
+ bsg_job->reply->result = DID_OK;
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, rsp_data,
+ rsp_data_len);
+ }
+ bsg_job->job_done(bsg_job);
+
+ dma_free_coherent(&ha->pdev->dev, rsp_data_len,
+ rsp_data, rsp_data_dma);
+done_free_dma_req:
+ dma_free_coherent(&ha->pdev->dev, req_data_len,
+ req_data, req_data_dma);
+done_unmap_sg:
+ dma_unmap_sg(&ha->pdev->dev,
+ bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+done_unmap_req_sg:
+ dma_unmap_sg(&ha->pdev->dev,
+ bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ return rval;
+}
+
+static int
+qla84xx_reset(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = 0;
+ uint32_t flag;
+
+ if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
+ return -EBUSY;
+
+ if (!IS_QLA84XX(ha)) {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
+ "exiting.\n", vha->host_no));
+ return -EINVAL;
+ }
+
+ flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+
+ rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
+
+ if (rval) {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
+ "request 84xx reset failed\n", vha->host_no));
+ rval = bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_job->reply->result = (DID_ERROR << 16);
+
+ } else {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
+ "request 84xx reset completed\n", vha->host_no));
+ bsg_job->reply->result = DID_OK;
+ }
+
+ bsg_job->job_done(bsg_job);
+ return rval;
+}
+
+static int
+qla84xx_updatefw(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ struct verify_chip_entry_84xx *mn = NULL;
+ dma_addr_t mn_dma, fw_dma;
+ void *fw_buf = NULL;
+ int rval = 0;
+ uint32_t sg_cnt;
+ uint32_t data_len;
+ uint16_t options;
+ uint32_t flag;
+ uint32_t fw_ver;
+
+ if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
+ return -EBUSY;
+
+ if (!IS_QLA84XX(ha)) {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
+ "exiting.\n", vha->host_no));
+ return -EINVAL;
+ }
+
+ sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ if (!sg_cnt)
+ return -ENOMEM;
+
+ if (sg_cnt != bsg_job->request_payload.sg_cnt) {
+ DEBUG2(printk(KERN_INFO
+ "dma mapping resulted in different sg counts "
+ "request_sg_cnt: %x dma_request_sg_cnt: %x ",
+ bsg_job->request_payload.sg_cnt, sg_cnt));
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+
+ data_len = bsg_job->request_payload.payload_len;
+ fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
+ &fw_dma, GFP_KERNEL);
+ if (!fw_buf) {
+ DEBUG2(printk(KERN_ERR "%s: dma alloc for fw_buf "
+ "failed for host=%lu\n", __func__, vha->host_no));
+ rval = -ENOMEM;
+ goto done_unmap_sg;
+ }
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, fw_buf, data_len);
+
+ mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
+ if (!mn) {
+ DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
+ "failed for host=%lu\n", __func__, vha->host_no));
+ rval = -ENOMEM;
+ goto done_free_fw_buf;
+ }
+
+ flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
+
+ memset(mn, 0, sizeof(struct access_chip_84xx));
+ mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
+ mn->entry_count = 1;
+
+ options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
+ if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
+ options |= VCO_DIAG_FW;
+
+ mn->options = cpu_to_le16(options);
+ mn->fw_ver = cpu_to_le32(fw_ver);
+ mn->fw_size = cpu_to_le32(data_len);
+ mn->fw_seq_size = cpu_to_le32(data_len);
+ mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
+ mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
+ mn->dseg_length = cpu_to_le32(data_len);
+ mn->data_seg_cnt = cpu_to_le16(1);
+
+ rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
+
+ if (rval) {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
+ "request 84xx updatefw failed\n", vha->host_no));
+
+ rval = bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_job->reply->result = (DID_ERROR << 16);
+
+ } else {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
+ "request 84xx updatefw completed\n", vha->host_no));
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->result = DID_OK;
+ }
+
+ bsg_job->job_done(bsg_job);
+ dma_pool_free(ha->s_dma_pool, mn, mn_dma);
+
+done_free_fw_buf:
+ dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
+
+done_unmap_sg:
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+ return rval;
+}
+
+static int
+qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ struct access_chip_84xx *mn = NULL;
+ dma_addr_t mn_dma, mgmt_dma;
+ void *mgmt_b = NULL;
+ int rval = 0;
+ struct qla_bsg_a84_mgmt *ql84_mgmt;
+ uint32_t sg_cnt;
+ uint32_t data_len = 0;
+ uint32_t dma_direction = DMA_NONE;
+
+ if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
+ return -EBUSY;
+
+ if (!IS_QLA84XX(ha)) {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
+ "exiting.\n", vha->host_no));
+ return -EINVAL;
+ }
+
+ ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
+ sizeof(struct fc_bsg_request));
+ if (!ql84_mgmt) {
+ DEBUG2(printk("%s(%ld): mgmt header not provided, exiting.\n",
+ __func__, vha->host_no));
+ return -EINVAL;
+ }
+
+ mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
+ if (!mn) {
+ DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
+ "failed for host=%lu\n", __func__, vha->host_no));
+ return -ENOMEM;
+ }
+
+ memset(mn, 0, sizeof(struct access_chip_84xx));
+ mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
+ mn->entry_count = 1;
+
+ switch (ql84_mgmt->mgmt.cmd) {
+ case QLA84_MGMT_READ_MEM:
+ case QLA84_MGMT_GET_INFO:
+ sg_cnt = dma_map_sg(&ha->pdev->dev,
+ bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ if (!sg_cnt) {
+ rval = -ENOMEM;
+ goto exit_mgmt;
+ }
+
+ dma_direction = DMA_FROM_DEVICE;
+
+ if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
+ DEBUG2(printk(KERN_INFO
+ "dma mapping resulted in different sg counts "
+ "reply_sg_cnt: %x dma_reply_sg_cnt: %x\n",
+ bsg_job->reply_payload.sg_cnt, sg_cnt));
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+
+ data_len = bsg_job->reply_payload.payload_len;
+
+ mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
+ &mgmt_dma, GFP_KERNEL);
+ if (!mgmt_b) {
+ DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
+ "failed for host=%lu\n",
+ __func__, vha->host_no));
+ rval = -ENOMEM;
+ goto done_unmap_sg;
+ }
+
+ if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
+ mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
+ mn->parameter1 =
+ cpu_to_le32(
+ ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
+
+ } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
+ mn->options = cpu_to_le16(ACO_REQUEST_INFO);
+ mn->parameter1 =
+ cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
+
+ mn->parameter2 =
+ cpu_to_le32(
+ ql84_mgmt->mgmt.mgmtp.u.info.context);
+ }
+ break;
+
+ case QLA84_MGMT_WRITE_MEM:
+ sg_cnt = dma_map_sg(&ha->pdev->dev,
+ bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+ if (!sg_cnt) {
+ rval = -ENOMEM;
+ goto exit_mgmt;
+ }
+
+ dma_direction = DMA_TO_DEVICE;
+
+ if (sg_cnt != bsg_job->request_payload.sg_cnt) {
+ DEBUG2(printk(KERN_INFO
+ "dma mapping resulted in different sg counts "
+ "request_sg_cnt: %x dma_request_sg_cnt: %x ",
+ bsg_job->request_payload.sg_cnt, sg_cnt));
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+
+ data_len = bsg_job->request_payload.payload_len;
+ mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
+ &mgmt_dma, GFP_KERNEL);
+ if (!mgmt_b) {
+ DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
+ "failed for host=%lu\n",
+ __func__, vha->host_no));
+ rval = -ENOMEM;
+ goto done_unmap_sg;
+ }
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
+
+ mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
+ mn->parameter1 =
+ cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
+ break;
+
+ case QLA84_MGMT_CHNG_CONFIG:
+ mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
+ mn->parameter1 =
+ cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
+
+ mn->parameter2 =
+ cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
+
+ mn->parameter3 =
+ cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
+ break;
+
+ default:
+ rval = -EIO;
+ goto exit_mgmt;
+ }
+
+ if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
+ mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
+ mn->dseg_count = cpu_to_le16(1);
+ mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
+ mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
+ mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
+ }
+
+ rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
+
+ if (rval) {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
+ "request 84xx mgmt failed\n", vha->host_no));
+
+ rval = bsg_job->reply->reply_payload_rcv_len = 0;
+ bsg_job->reply->result = (DID_ERROR << 16);
+
+ } else {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
+ "request 84xx mgmt completed\n", vha->host_no));
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->result = DID_OK;
+
+ if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
+ (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
+ bsg_job->reply->reply_payload_rcv_len =
+ bsg_job->reply_payload.payload_len;
+
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, mgmt_b,
+ data_len);
+ }
+ }
+
+ bsg_job->job_done(bsg_job);
+
+done_unmap_sg:
+ if (mgmt_b)
+ dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
+
+ if (dma_direction == DMA_TO_DEVICE)
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ else if (dma_direction == DMA_FROM_DEVICE)
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+
+exit_mgmt:
+ dma_pool_free(ha->s_dma_pool, mn, mn_dma);
+
+ return rval;
+}
+
+static int
+qla24xx_iidma(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = 0;
+ struct qla_port_param *port_param = NULL;
+ fc_port_t *fcport = NULL;
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
+ uint8_t *rsp_ptr = NULL;
+
+ bsg_job->reply->reply_payload_rcv_len = 0;
+
+ if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
+ return -EBUSY;
+
+ if (!IS_IIDMA_CAPABLE(vha->hw)) {
+ DEBUG2(qla_printk(KERN_WARNING, ha, "%s(%lu): iiDMA not "
+ "supported\n", __func__, vha->host_no));
+ return -EINVAL;
+ }
+
+ port_param = (struct qla_port_param *)((char *)bsg_job->request +
+ sizeof(struct fc_bsg_request));
+ if (!port_param) {
+ DEBUG2(printk("%s(%ld): port_param header not provided, "
+ "exiting.\n", __func__, vha->host_no));
+ return -EINVAL;
+ }
+
+ if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
+ DEBUG2(printk(KERN_ERR "%s(%ld): Invalid destination type\n",
+ __func__, vha->host_no));
+ return -EINVAL;
+ }
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->port_type != FCT_TARGET)
+ continue;
+
+ if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
+ fcport->port_name, sizeof(fcport->port_name)))
+ continue;
+ break;
+ }
+
+ if (!fcport) {
+ DEBUG2(printk(KERN_ERR "%s(%ld): Failed to find port\n",
+ __func__, vha->host_no));
+ return -EINVAL;
+ }
+
+ if (port_param->mode)
+ rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
+ port_param->speed, mb);
+ else
+ rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
+ &port_param->speed, mb);
+
+ if (rval) {
+ DEBUG16(printk(KERN_ERR "scsi(%ld): iIDMA cmd failed for "
+ "%02x%02x%02x%02x%02x%02x%02x%02x -- "
+ "%04x %x %04x %04x.\n",
+ vha->host_no, fcport->port_name[0],
+ fcport->port_name[1],
+ fcport->port_name[2], fcport->port_name[3],
+ fcport->port_name[4], fcport->port_name[5],
+ fcport->port_name[6], fcport->port_name[7], rval,
+ fcport->fp_speed, mb[0], mb[1]));
+ rval = 0;
+ bsg_job->reply->result = (DID_ERROR << 16);
+
+ } else {
+ if (!port_param->mode) {
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
+ sizeof(struct qla_port_param);
+
+ rsp_ptr = ((uint8_t *)bsg_job->reply) +
+ sizeof(struct fc_bsg_reply);
+
+ memcpy(rsp_ptr, port_param,
+ sizeof(struct qla_port_param));
+ }
+
+ bsg_job->reply->result = DID_OK;
+ }
+
+ bsg_job->job_done(bsg_job);
+ return rval;
+}
+
+static int
+qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
+{
+ switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
+ case QL_VND_LOOPBACK:
+ return qla2x00_process_loopback(bsg_job);
+
+ case QL_VND_A84_RESET:
+ return qla84xx_reset(bsg_job);
+
+ case QL_VND_A84_UPDATE_FW:
+ return qla84xx_updatefw(bsg_job);
+
+ case QL_VND_A84_MGMT_CMD:
+ return qla84xx_mgmt_cmd(bsg_job);
+
+ case QL_VND_IIDMA:
+ return qla24xx_iidma(bsg_job);
+
+ case QL_VND_FCP_PRIO_CFG_CMD:
+ return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
+
+ default:
+ bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_job->job_done(bsg_job);
+ return -ENOSYS;
+ }
+}
+
+int
+qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
+{
+ int ret = -EINVAL;
+
+ switch (bsg_job->request->msgcode) {
+ case FC_BSG_RPT_ELS:
+ case FC_BSG_HST_ELS_NOLOGIN:
+ ret = qla2x00_process_els(bsg_job);
+ break;
+ case FC_BSG_HST_CT:
+ ret = qla2x00_process_ct(bsg_job);
+ break;
+ case FC_BSG_HST_VENDOR:
+ ret = qla2x00_process_vendor_specific(bsg_job);
+ break;
+ case FC_BSG_HST_ADD_RPORT:
+ case FC_BSG_HST_DEL_RPORT:
+ case FC_BSG_RPT_CT:
+ default:
+ DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
+ break;
+ }
+ return ret;
+}
+
+int
+qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
+{
+ scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
+ struct qla_hw_data *ha = vha->hw;
+ srb_t *sp;
+ int cnt, que;
+ unsigned long flags;
+ struct req_que *req;
+ struct srb_ctx *sp_bsg;
+
+ /* find the bsg job from the active list of commands */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (que = 0; que < ha->max_req_queues; que++) {
+ req = ha->req_q_map[que];
+ if (!req)
+ continue;
+
+ for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if (sp) {
+ sp_bsg = sp->ctx;
+
+ if (((sp_bsg->type == SRB_CT_CMD) ||
+ (sp_bsg->type == SRB_ELS_CMD_HST))
+ && (sp_bsg->u.bsg_job == bsg_job)) {
+ if (ha->isp_ops->abort_command(sp)) {
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld): mbx "
+ "abort_command failed\n",
+ vha->host_no));
+ bsg_job->req->errors =
+ bsg_job->reply->result = -EIO;
+ } else {
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld): mbx "
+ "abort_command success\n",
+ vha->host_no));
+ bsg_job->req->errors =
+ bsg_job->reply->result = 0;
+ }
+ goto done;
+ }
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld) SRB not found to abort\n", vha->host_no));
+ bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
+ return 0;
+
+done:
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (bsg_job->request->msgcode == FC_BSG_HST_CT)
+ kfree(sp->fcport);
+ kfree(sp->ctx);
+ mempool_free(sp, ha->srb_mempool);
+ return 0;
+}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
new file mode 100644
index 00000000000..76ed92dd2ef
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -0,0 +1,135 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2008 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#ifndef __QLA_BSG_H
+#define __QLA_BSG_H
+
+/* BSG Vendor specific commands */
+#define QL_VND_LOOPBACK 0x01
+#define QL_VND_A84_RESET 0x02
+#define QL_VND_A84_UPDATE_FW 0x03
+#define QL_VND_A84_MGMT_CMD 0x04
+#define QL_VND_IIDMA 0x05
+#define QL_VND_FCP_PRIO_CFG_CMD 0x06
+
+/* BSG definations for interpreting CommandSent field */
+#define INT_DEF_LB_LOOPBACK_CMD 0
+#define INT_DEF_LB_ECHO_CMD 1
+
+/* BSG Vendor specific definations */
+#define A84_ISSUE_WRITE_TYPE_CMD 0
+#define A84_ISSUE_READ_TYPE_CMD 1
+#define A84_CLEANUP_CMD 2
+#define A84_ISSUE_RESET_OP_FW 3
+#define A84_ISSUE_RESET_DIAG_FW 4
+#define A84_ISSUE_UPDATE_OPFW_CMD 5
+#define A84_ISSUE_UPDATE_DIAGFW_CMD 6
+
+struct qla84_mgmt_param {
+ union {
+ struct {
+ uint32_t start_addr;
+ } mem; /* for QLA84_MGMT_READ/WRITE_MEM */
+ struct {
+ uint32_t id;
+#define QLA84_MGMT_CONFIG_ID_UIF 1
+#define QLA84_MGMT_CONFIG_ID_FCOE_COS 2
+#define QLA84_MGMT_CONFIG_ID_PAUSE 3
+#define QLA84_MGMT_CONFIG_ID_TIMEOUTS 4
+
+ uint32_t param0;
+ uint32_t param1;
+ } config; /* for QLA84_MGMT_CHNG_CONFIG */
+
+ struct {
+ uint32_t type;
+#define QLA84_MGMT_INFO_CONFIG_LOG_DATA 1 /* Get Config Log Data */
+#define QLA84_MGMT_INFO_LOG_DATA 2 /* Get Log Data */
+#define QLA84_MGMT_INFO_PORT_STAT 3 /* Get Port Statistics */
+#define QLA84_MGMT_INFO_LIF_STAT 4 /* Get LIF Statistics */
+#define QLA84_MGMT_INFO_ASIC_STAT 5 /* Get ASIC Statistics */
+#define QLA84_MGMT_INFO_CONFIG_PARAMS 6 /* Get Config Parameters */
+#define QLA84_MGMT_INFO_PANIC_LOG 7 /* Get Panic Log */
+
+ uint32_t context;
+/*
+* context definitions for QLA84_MGMT_INFO_CONFIG_LOG_DATA
+*/
+#define IC_LOG_DATA_LOG_ID_DEBUG_LOG 0
+#define IC_LOG_DATA_LOG_ID_LEARN_LOG 1
+#define IC_LOG_DATA_LOG_ID_FC_ACL_INGRESS_LOG 2
+#define IC_LOG_DATA_LOG_ID_FC_ACL_EGRESS_LOG 3
+#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_INGRESS_LOG 4
+#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_EGRESS_LOG 5
+#define IC_LOG_DATA_LOG_ID_MESSAGE_TRANSMIT_LOG 6
+#define IC_LOG_DATA_LOG_ID_MESSAGE_RECEIVE_LOG 7
+#define IC_LOG_DATA_LOG_ID_LINK_EVENT_LOG 8
+#define IC_LOG_DATA_LOG_ID_DCX_LOG 9
+
+/*
+* context definitions for QLA84_MGMT_INFO_PORT_STAT
+*/
+#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT0 0
+#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT1 1
+#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT0 2
+#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT1 3
+#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT0 4
+#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT1 5
+
+
+/*
+* context definitions for QLA84_MGMT_INFO_LIF_STAT
+*/
+#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT0 0
+#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT1 1
+#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT0 2
+#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT1 3
+#define IC_LIF_STATISTICS_LIF_NUMBER_CPU 6
+
+ } info; /* for QLA84_MGMT_GET_INFO */
+ } u;
+};
+
+struct qla84_msg_mgmt {
+ uint16_t cmd;
+#define QLA84_MGMT_READ_MEM 0x00
+#define QLA84_MGMT_WRITE_MEM 0x01
+#define QLA84_MGMT_CHNG_CONFIG 0x02
+#define QLA84_MGMT_GET_INFO 0x03
+ uint16_t rsrvd;
+ struct qla84_mgmt_param mgmtp;/* parameters for cmd */
+ uint32_t len; /* bytes in payload following this struct */
+ uint8_t payload[0]; /* payload for cmd */
+};
+
+struct qla_bsg_a84_mgmt {
+ struct qla84_msg_mgmt mgmt;
+} __attribute__ ((packed));
+
+struct qla_scsi_addr {
+ uint16_t bus;
+ uint16_t target;
+} __attribute__ ((packed));
+
+struct qla_ext_dest_addr {
+ union {
+ uint8_t wwnn[8];
+ uint8_t wwpn[8];
+ uint8_t id[4];
+ struct qla_scsi_addr scsi_addr;
+ } dest_addr;
+ uint16_t dest_type;
+#define EXT_DEF_TYPE_WWPN 2
+ uint16_t lun;
+ uint16_t padding[2];
+} __attribute__ ((packed));
+
+struct qla_port_param {
+ struct qla_ext_dest_addr fc_scsi_addr;
+ uint16_t mode;
+ uint16_t speed;
+} __attribute__ ((packed));
+#endif
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index cb2eca4c26d..2afc8a362f2 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -769,6 +769,9 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
void *nxt;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ if (IS_QLA82XX(ha))
+ return;
+
risc_address = ext_mem_cnt = 0;
flags = 0;
@@ -1660,4 +1663,62 @@ qla2x00_dump_buffer(uint8_t * b, uint32_t size)
printk("\n");
}
+void
+qla2x00_dump_buffer_zipped(uint8_t *b, uint32_t size)
+{
+ uint32_t cnt;
+ uint8_t c;
+ uint8_t last16[16], cur16[16];
+ uint32_t lc = 0, num_same16 = 0, j;
+
+ printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 "
+ "Ah Bh Ch Dh Eh Fh\n");
+ printk(KERN_DEBUG "----------------------------------------"
+ "----------------------\n");
+
+ for (cnt = 0; cnt < size;) {
+ c = *b++;
+ cur16[lc++] = c;
+
+ cnt++;
+ if (cnt % 16)
+ continue;
+
+ /* We have 16 now */
+ lc = 0;
+ if (num_same16 == 0) {
+ memcpy(last16, cur16, 16);
+ num_same16++;
+ continue;
+ }
+ if (memcmp(cur16, last16, 16) == 0) {
+ num_same16++;
+ continue;
+ }
+ for (j = 0; j < 16; j++)
+ printk(KERN_DEBUG "%02x ", (uint32_t)last16[j]);
+ printk(KERN_DEBUG "\n");
+
+ if (num_same16 > 1)
+ printk(KERN_DEBUG "> prev pattern repeats (%u)"
+ "more times\n", num_same16-1);
+ memcpy(last16, cur16, 16);
+ num_same16 = 1;
+ }
+
+ if (num_same16) {
+ for (j = 0; j < 16; j++)
+ printk(KERN_DEBUG "%02x ", (uint32_t)last16[j]);
+ printk(KERN_DEBUG "\n");
+
+ if (num_same16 > 1)
+ printk(KERN_DEBUG "> prev pattern repeats (%u)"
+ "more times\n", num_same16-1);
+ }
+ if (lc) {
+ for (j = 0; j < lc; j++)
+ printk(KERN_DEBUG "%02x ", (uint32_t)cur16[j]);
+ printk(KERN_DEBUG "\n");
+ }
+}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index d6d9c86cb05..916c81f3f55 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -27,6 +27,9 @@
/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */
/* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */
/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */
+/* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */
+
+/* #define QL_PRINTK_BUF */ /* Captures printk to buffer */
/*
* Macros use for debugging the driver.
@@ -139,6 +142,13 @@
#define DEBUG17(x) do {} while (0)
#endif
+#if defined(QL_DEBUG_LEVEL_18)
+#define DEBUG18(x) do {if (ql2xextended_error_logging) x; } while (0)
+#else
+#define DEBUG18(x) do {} while (0)
+#endif
+
+
/*
* Firmware Dump structure definition
*/
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index afa95614aaf..83961090901 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -33,7 +33,10 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_bsg_fc.h>
-#define QLA2XXX_DRIVER_NAME "qla2xxx"
+#include "qla_bsg.h"
+#include "qla_nx.h"
+#define QLA2XXX_DRIVER_NAME "qla2xxx"
+#define QLA2XXX_APIDEV "ql2xapidev"
/*
* We have MAILBOX_REGISTER_COUNT sized arrays in a few places,
@@ -186,6 +189,16 @@
struct req_que;
/*
+ * (sd.h is not exported, hence local inclusion)
+ * Data Integrity Field tuple.
+ */
+struct sd_dif_tuple {
+ __be16 guard_tag; /* Checksum */
+ __be16 app_tag; /* Opaque storage */
+ __be32 ref_tag; /* Target LBA or indirect LBA */
+};
+
+/*
* SCSI Request Block
*/
typedef struct srb {
@@ -205,40 +218,73 @@ typedef struct srb {
/*
* SRB flag definitions
*/
-#define SRB_DMA_VALID BIT_0 /* Command sent to ISP */
+#define SRB_DMA_VALID BIT_0 /* Command sent to ISP */
+#define SRB_FCP_CMND_DMA_VALID BIT_12 /* DIF: DSD List valid */
+#define SRB_CRC_CTX_DMA_VALID BIT_2 /* DIF: context DMA valid */
+#define SRB_CRC_PROT_DMA_VALID BIT_4 /* DIF: prot DMA valid */
+#define SRB_CRC_CTX_DSD_VALID BIT_5 /* DIF: dsd_list valid */
+
+/* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */
+#define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID)
/*
* SRB extensions.
*/
-struct srb_ctx {
-#define SRB_LOGIN_CMD 1
-#define SRB_LOGOUT_CMD 2
- uint16_t type;
- struct timer_list timer;
-
- void (*free)(srb_t *sp);
- void (*timeout)(srb_t *sp);
-};
-
-struct srb_logio {
- struct srb_ctx ctx;
-
+struct srb_iocb {
+ union {
+ struct {
+ uint16_t flags;
#define SRB_LOGIN_RETRIED BIT_0
#define SRB_LOGIN_COND_PLOGI BIT_1
#define SRB_LOGIN_SKIP_PRLI BIT_2
- uint16_t flags;
+ uint16_t data[2];
+ } logio;
+ struct {
+ /*
+ * Values for flags field below are as
+ * defined in tsk_mgmt_entry struct
+ * for control_flags field in qla_fw.h.
+ */
+ uint32_t flags;
+ uint32_t lun;
+ uint32_t data;
+ } tmf;
+ struct {
+ /*
+ * values for modif field below are as
+ * defined in mrk_entry_24xx struct
+ * for the modifier field in qla_fw.h.
+ */
+ uint8_t modif;
+ uint16_t lun;
+ uint32_t data;
+ } marker;
+ } u;
+
+ struct timer_list timer;
+
+ void (*done)(srb_t *);
+ void (*free)(srb_t *);
+ void (*timeout)(srb_t *);
};
-struct srb_bsg_ctx {
+/* Values for srb_ctx type */
+#define SRB_LOGIN_CMD 1
+#define SRB_LOGOUT_CMD 2
#define SRB_ELS_CMD_RPT 3
#define SRB_ELS_CMD_HST 4
-#define SRB_CT_CMD 5
- uint16_t type;
-};
+#define SRB_CT_CMD 5
+#define SRB_ADISC_CMD 6
+#define SRB_TM_CMD 7
+#define SRB_MARKER_CMD 8
-struct srb_bsg {
- struct srb_bsg_ctx ctx;
- struct fc_bsg_job *bsg_job;
+struct srb_ctx {
+ uint16_t type;
+ char *name;
+ union {
+ struct srb_iocb *iocb_cmd;
+ struct fc_bsg_job *bsg_job;
+ } u;
};
struct msg_echo_lb {
@@ -416,6 +462,7 @@ typedef union {
struct device_reg_2xxx isp;
struct device_reg_24xx isp24;
struct device_reg_25xxmq isp25mq;
+ struct device_reg_82xx isp82;
} device_reg_t;
#define ISP_REQ_Q_IN(ha, reg) \
@@ -1299,6 +1346,66 @@ typedef struct {
uint32_t dseg_4_length; /* Data segment 4 length. */
} cont_a64_entry_t;
+#define PO_MODE_DIF_INSERT 0
+#define PO_MODE_DIF_REMOVE BIT_0
+#define PO_MODE_DIF_PASS BIT_1
+#define PO_MODE_DIF_REPLACE (BIT_0 + BIT_1)
+#define PO_ENABLE_DIF_BUNDLING BIT_8
+#define PO_ENABLE_INCR_GUARD_SEED BIT_3
+#define PO_DISABLE_INCR_REF_TAG BIT_5
+#define PO_DISABLE_GUARD_CHECK BIT_4
+/*
+ * ISP queue - 64-Bit addressing, continuation crc entry structure definition.
+ */
+struct crc_context {
+ uint32_t handle; /* System handle. */
+ uint32_t ref_tag;
+ uint16_t app_tag;
+ uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
+ uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
+ uint16_t guard_seed; /* Initial Guard Seed */
+ uint16_t prot_opts; /* Requested Data Protection Mode */
+ uint16_t blk_size; /* Data size in bytes */
+ uint16_t runt_blk_guard; /* Guard value for runt block (tape
+ * only) */
+ uint32_t byte_count; /* Total byte count/ total data
+ * transfer count */
+ union {
+ struct {
+ uint32_t reserved_1;
+ uint16_t reserved_2;
+ uint16_t reserved_3;
+ uint32_t reserved_4;
+ uint32_t data_address[2];
+ uint32_t data_length;
+ uint32_t reserved_5[2];
+ uint32_t reserved_6;
+ } nobundling;
+ struct {
+ uint32_t dif_byte_count; /* Total DIF byte
+ * count */
+ uint16_t reserved_1;
+ uint16_t dseg_count; /* Data segment count */
+ uint32_t reserved_2;
+ uint32_t data_address[2];
+ uint32_t data_length;
+ uint32_t dif_address[2];
+ uint32_t dif_length; /* Data segment 0
+ * length */
+ } bundling;
+ } u;
+
+ struct fcp_cmnd fcp_cmnd;
+ dma_addr_t crc_ctx_dma;
+ /* List of DMA context transfers */
+ struct list_head dsd_list;
+
+ /* This structure should not exceed 512 bytes */
+};
+
+#define CRC_CONTEXT_LEN_FW (offsetof(struct crc_context, fcp_cmnd.lun))
+#define CRC_CONTEXT_FCPCMND_OFF (offsetof(struct crc_context, fcp_cmnd.lun))
+
/*
* ISP queue - status entry structure definition.
*/
@@ -1359,6 +1466,7 @@ typedef struct {
#define CS_ABORTED 0x5 /* System aborted command. */
#define CS_TIMEOUT 0x6 /* Timeout error. */
#define CS_DATA_OVERRUN 0x7 /* Data overrun. */
+#define CS_DIF_ERROR 0xC /* DIF error detected */
#define CS_DATA_UNDERRUN 0x15 /* Data Underrun. */
#define CS_QUEUE_FULL 0x1C /* Queue Full. */
@@ -1579,6 +1687,8 @@ typedef struct fc_port {
uint16_t loop_id;
uint16_t old_loop_id;
+ uint8_t fcp_prio;
+
uint8_t fabric_port_name[WWN_SIZE];
uint16_t fp_speed;
@@ -1611,6 +1721,7 @@ typedef struct fc_port {
#define FCF_FABRIC_DEVICE BIT_0
#define FCF_LOGIN_NEEDED BIT_1
#define FCF_FCP2_DEVICE BIT_2
+#define FCF_ASYNC_SENT BIT_3
/* No loop ID flag. */
#define FC_NO_LOOP_ID 0x1000
@@ -2109,6 +2220,7 @@ struct isp_operations {
int (*get_flash_version) (struct scsi_qla_host *, void *);
int (*start_scsi) (srb_t *);
+ int (*abort_isp) (struct scsi_qla_host *);
};
/* MSI-X Support *************************************************************/
@@ -2143,6 +2255,8 @@ enum qla_work_type {
QLA_EVT_ASYNC_LOGIN_DONE,
QLA_EVT_ASYNC_LOGOUT,
QLA_EVT_ASYNC_LOGOUT_DONE,
+ QLA_EVT_ASYNC_ADISC,
+ QLA_EVT_ASYNC_ADISC_DONE,
QLA_EVT_UEVENT,
};
@@ -2295,6 +2409,7 @@ struct qla_hw_data {
uint32_t eeh_busy :1;
uint32_t cpu_affinity_enabled :1;
uint32_t disable_msix_handshake :1;
+ uint32_t fcp_prio_enabled :1;
} flags;
/* This spinlock is used to protect "io transactions", you must
@@ -2382,7 +2497,8 @@ struct qla_hw_data {
#define DT_ISP2532 BIT_11
#define DT_ISP8432 BIT_12
#define DT_ISP8001 BIT_13
-#define DT_ISP_LAST (DT_ISP8001 << 1)
+#define DT_ISP8021 BIT_14
+#define DT_ISP_LAST (DT_ISP8021 << 1)
#define DT_IIDMA BIT_26
#define DT_FWI2 BIT_27
@@ -2405,6 +2521,7 @@ struct qla_hw_data {
#define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532)
#define IS_QLA8432(ha) (DT_MASK(ha) & DT_ISP8432)
#define IS_QLA8001(ha) (DT_MASK(ha) & DT_ISP8001)
+#define IS_QLA82XX(ha) (DT_MASK(ha) & DT_ISP8021)
#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
IS_QLA6312(ha) || IS_QLA6322(ha))
@@ -2415,8 +2532,10 @@ struct qla_hw_data {
#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
IS_QLA84XX(ha))
#define IS_QLA81XX(ha) (IS_QLA8001(ha))
+#define IS_QLA8XXX_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha))
#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
- IS_QLA25XX(ha) || IS_QLA81XX(ha))
+ IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
+ IS_QLA82XX(ha))
#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha))
#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \
(ha)->flags.msix_enabled)
@@ -2496,6 +2615,9 @@ struct qla_hw_data {
dma_addr_t ex_init_cb_dma;
struct ex_init_cb_81xx *ex_init_cb;
+ void *async_pd;
+ dma_addr_t async_pd_dma;
+
/* These are used by mailbox operations. */
volatile uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
@@ -2598,6 +2720,8 @@ struct qla_hw_data {
uint32_t flt_region_nvram;
uint32_t flt_region_npiv_conf;
uint32_t flt_region_gold_fw;
+ uint32_t flt_region_fcp_prio;
+ uint32_t flt_region_bootload;
/* Needed for BEACON */
uint16_t beacon_blink_led;
@@ -2626,6 +2750,39 @@ struct qla_hw_data {
struct isp_operations *isp_ops;
struct workqueue_struct *wq;
struct qlfc_fw fw_buf;
+
+ /* FCP_CMND priority support */
+ struct qla_fcp_prio_cfg *fcp_prio_cfg;
+
+ struct dma_pool *dl_dma_pool;
+#define DSD_LIST_DMA_POOL_SIZE 512
+
+ struct dma_pool *fcp_cmnd_dma_pool;
+ mempool_t *ctx_mempool;
+#define FCP_CMND_DMA_POOL_SIZE 512
+
+ unsigned long nx_pcibase; /* Base I/O address */
+ uint8_t *nxdb_rd_ptr; /* Doorbell read pointer */
+ unsigned long nxdb_wr_ptr; /* Door bell write pointer */
+
+ uint32_t crb_win;
+ uint32_t curr_window;
+ uint32_t ddr_mn_window;
+ unsigned long mn_win_crb;
+ unsigned long ms_win_crb;
+ int qdr_sn_window;
+ uint32_t nx_dev_init_timeout;
+ uint32_t nx_reset_timeout;
+ rwlock_t hw_lock;
+ uint16_t portnum; /* port number */
+ int link_width;
+ struct fw_blob *hablob;
+ struct qla82xx_legacy_intr_set nx_legacy_intr;
+
+ uint16_t gbl_dsd_inuse;
+ uint16_t gbl_dsd_avail;
+ struct list_head gbl_dsd_list;
+#define NUM_DSD_CHAIN 4096
};
/*
@@ -2650,6 +2807,7 @@ typedef struct scsi_qla_host {
uint32_t management_server_logged_in :1;
uint32_t process_response_queue :1;
+ uint32_t difdix_supported:1;
} flags;
atomic_t loop_state;
@@ -2678,10 +2836,13 @@ typedef struct scsi_qla_host {
#define VP_DPC_NEEDED 14 /* wake up for VP dpc handling */
#define UNLOADING 15
#define NPIV_CONFIG_NEEDED 16
+#define ISP_UNRECOVERABLE 17
+#define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */
uint32_t device_flags;
#define SWITCH_FOUND BIT_0
#define DFLG_NO_CABLE BIT_1
+#define DFLG_DEV_FAILED BIT_5
/* ISP configuration data. */
uint16_t loop_id; /* Host adapter loop id */
@@ -2739,6 +2900,8 @@ typedef struct scsi_qla_host {
#define VP_ERR_ADAP_NORESOURCES 5
struct qla_hw_data *hw;
struct req_que *req;
+ int fw_heartbeat_counter;
+ int seconds_since_last_heartbeat;
} scsi_qla_host_t;
/*
@@ -2791,134 +2954,16 @@ typedef struct scsi_qla_host {
#define OPTROM_SIZE_24XX 0x100000
#define OPTROM_SIZE_25XX 0x200000
#define OPTROM_SIZE_81XX 0x400000
+#define OPTROM_SIZE_82XX 0x800000
+
+#define OPTROM_BURST_SIZE 0x1000
+#define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4)
+
+#define QLA_DSDS_PER_IOCB 37
#include "qla_gbl.h"
#include "qla_dbg.h"
#include "qla_inline.h"
#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
-
-/*
- * BSG Vendor specific commands
- */
-
-#define QL_VND_LOOPBACK 0x01
-#define QLA84_RESET 0x02
-#define QLA84_UPDATE_FW 0x03
-#define QLA84_MGMT_CMD 0x04
-
-/* BSG definations for interpreting CommandSent field */
-#define INT_DEF_LB_LOOPBACK_CMD 0
-#define INT_DEF_LB_ECHO_CMD 1
-
-/* BSG Vendor specific definations */
-typedef struct _A84_RESET {
- uint16_t Flags;
- uint16_t Reserved;
-#define A84_RESET_FLAG_ENABLE_DIAG_FW 1
-} __attribute__((packed)) A84_RESET, *PA84_RESET;
-
-#define A84_ISSUE_WRITE_TYPE_CMD 0
-#define A84_ISSUE_READ_TYPE_CMD 1
-#define A84_CLEANUP_CMD 2
-#define A84_ISSUE_RESET_OP_FW 3
-#define A84_ISSUE_RESET_DIAG_FW 4
-#define A84_ISSUE_UPDATE_OPFW_CMD 5
-#define A84_ISSUE_UPDATE_DIAGFW_CMD 6
-
-struct qla84_mgmt_param {
- union {
- struct {
- uint32_t start_addr;
- } mem; /* for QLA84_MGMT_READ/WRITE_MEM */
- struct {
- uint32_t id;
-#define QLA84_MGMT_CONFIG_ID_UIF 1
-#define QLA84_MGMT_CONFIG_ID_FCOE_COS 2
-#define QLA84_MGMT_CONFIG_ID_PAUSE 3
-#define QLA84_MGMT_CONFIG_ID_TIMEOUTS 4
-
- uint32_t param0;
- uint32_t param1;
- } config; /* for QLA84_MGMT_CHNG_CONFIG */
-
- struct {
- uint32_t type;
-#define QLA84_MGMT_INFO_CONFIG_LOG_DATA 1 /* Get Config Log Data */
-#define QLA84_MGMT_INFO_LOG_DATA 2 /* Get Log Data */
-#define QLA84_MGMT_INFO_PORT_STAT 3 /* Get Port Statistics */
-#define QLA84_MGMT_INFO_LIF_STAT 4 /* Get LIF Statistics */
-#define QLA84_MGMT_INFO_ASIC_STAT 5 /* Get ASIC Statistics */
-#define QLA84_MGMT_INFO_CONFIG_PARAMS 6 /* Get Config Parameters */
-#define QLA84_MGMT_INFO_PANIC_LOG 7 /* Get Panic Log */
-
- uint32_t context;
-/*
-* context definitions for QLA84_MGMT_INFO_CONFIG_LOG_DATA
-*/
-#define IC_LOG_DATA_LOG_ID_DEBUG_LOG 0
-#define IC_LOG_DATA_LOG_ID_LEARN_LOG 1
-#define IC_LOG_DATA_LOG_ID_FC_ACL_INGRESS_LOG 2
-#define IC_LOG_DATA_LOG_ID_FC_ACL_EGRESS_LOG 3
-#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_INGRESS_LOG 4
-#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_EGRESS_LOG 5
-#define IC_LOG_DATA_LOG_ID_MESSAGE_TRANSMIT_LOG 6
-#define IC_LOG_DATA_LOG_ID_MESSAGE_RECEIVE_LOG 7
-#define IC_LOG_DATA_LOG_ID_LINK_EVENT_LOG 8
-#define IC_LOG_DATA_LOG_ID_DCX_LOG 9
-
-/*
-* context definitions for QLA84_MGMT_INFO_PORT_STAT
-*/
-#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT0 0
-#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT1 1
-#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT0 2
-#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT1 3
-#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT0 4
-#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT1 5
-
-
-/*
-* context definitions for QLA84_MGMT_INFO_LIF_STAT
-*/
-#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT0 0
-#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT1 1
-#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT0 2
-#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT1 3
-#define IC_LIF_STATISTICS_LIF_NUMBER_CPU 6
-
- } info; /* for QLA84_MGMT_GET_INFO */
- } u;
-};
-
-struct qla84_msg_mgmt {
- uint16_t cmd;
-#define QLA84_MGMT_READ_MEM 0x00
-#define QLA84_MGMT_WRITE_MEM 0x01
-#define QLA84_MGMT_CHNG_CONFIG 0x02
-#define QLA84_MGMT_GET_INFO 0x03
- uint16_t rsrvd;
- struct qla84_mgmt_param mgmtp;/* parameters for cmd */
- uint32_t len; /* bytes in payload following this struct */
- uint8_t payload[0]; /* payload for cmd */
-};
-
-struct msg_update_fw {
- /*
- * diag_fw = 0 operational fw
- * otherwise diagnostic fw
- * offset, len, fw_len are present to overcome the current limitation
- * of 128Kb xfer size. The fw is sent in smaller chunks. Each chunk
- * specifies the byte "offset" where it fits in the fw buffer. The
- * number of bytes in each chunk is specified in "len". "fw_len"
- * is the total size of fw. The first chunk should start at offset = 0.
- * When offset+len == fw_len, the fw is written to the HBA.
- */
- uint32_t diag_fw;
- uint32_t offset;/* start offset */
- uint32_t len; /* num bytes in cur xfer */
- uint32_t fw_len; /* size of fw in bytes */
- uint8_t fw_bytes[0];
-};
-
#endif
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 42c5587cc50..93f83396014 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -400,6 +400,7 @@ struct cmd_type_6 {
struct scsi_lun lun; /* FCP LUN (BE). */
uint16_t control_flags; /* Control flags. */
+#define CF_DIF_SEG_DESCR_ENABLE BIT_3
#define CF_DATA_SEG_DESCR_ENABLE BIT_2
#define CF_READ_DATA BIT_1
#define CF_WRITE_DATA BIT_0
@@ -466,6 +467,43 @@ struct cmd_type_7 {
uint32_t dseg_0_len; /* Data segment 0 length. */
};
+#define COMMAND_TYPE_CRC_2 0x6A /* Command Type CRC_2 (Type 6)
+ * (T10-DIF) */
+struct cmd_type_crc_2 {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+
+ uint16_t nport_handle; /* N_PORT handle. */
+ uint16_t timeout; /* Command timeout. */
+
+ uint16_t dseg_count; /* Data segment count. */
+
+ uint16_t fcp_rsp_dseg_len; /* FCP_RSP DSD length. */
+
+ struct scsi_lun lun; /* FCP LUN (BE). */
+
+ uint16_t control_flags; /* Control flags. */
+
+ uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
+ uint32_t fcp_cmnd_dseg_address[2]; /* Data segment address. */
+
+ uint32_t fcp_rsp_dseg_address[2]; /* Data segment address. */
+
+ uint32_t byte_count; /* Total byte count. */
+
+ uint8_t port_id[3]; /* PortID of destination port. */
+ uint8_t vp_index;
+
+ uint32_t crc_context_address[2]; /* Data segment address. */
+ uint16_t crc_context_len; /* Data segment length. */
+ uint16_t reserved_1; /* MUST be set to 0. */
+};
+
+
/*
* ISP queue - status entry structure definition.
*/
@@ -496,10 +534,17 @@ struct sts_entry_24xx {
uint32_t sense_len; /* FCP SENSE length. */
uint32_t rsp_data_len; /* FCP response data length. */
-
uint8_t data[28]; /* FCP response/sense information. */
+ /*
+ * If DIF Error is set in comp_status, these additional fields are
+ * defined:
+ * &data[10] : uint8_t report_runt_bg[2]; - computed guard
+ * &data[12] : uint8_t actual_dif[8]; - DIF Data recieved
+ * &data[20] : uint8_t expected_dif[8]; - DIF Data computed
+ */
};
+
/*
* Status entry completion status
*/
@@ -841,6 +886,8 @@ struct device_reg_24xx {
#define FA_HW_EVENT_ENTRY_SIZE 4
#define FA_NPIV_CONF0_ADDR 0x5C000
#define FA_NPIV_CONF1_ADDR 0x5D000
+#define FA_FCP_PRIO0_ADDR 0x10000
+#define FA_FCP_PRIO1_ADDR 0x12000
/*
* Flash Error Log Event Codes.
@@ -1274,6 +1321,8 @@ struct qla_flt_header {
#define FLT_REG_NPIV_CONF_0 0x29
#define FLT_REG_NPIV_CONF_1 0x2a
#define FLT_REG_GOLD_FW 0x2f
+#define FLT_REG_FCP_PRIO_0 0x87
+#define FLT_REG_FCP_PRIO_1 0x88
struct qla_flt_region {
uint32_t code;
@@ -1750,6 +1799,61 @@ struct ex_init_cb_81xx {
#define FARX_ACCESS_FLASH_CONF_81XX 0x7FFD0000
#define FARX_ACCESS_FLASH_DATA_81XX 0x7F800000
+/* FCP priority config defines *************************************/
+/* operations */
+#define QLFC_FCP_PRIO_DISABLE 0x0
+#define QLFC_FCP_PRIO_ENABLE 0x1
+#define QLFC_FCP_PRIO_GET_CONFIG 0x2
+#define QLFC_FCP_PRIO_SET_CONFIG 0x3
+
+struct qla_fcp_prio_entry {
+ uint16_t flags; /* Describes parameter(s) in FCP */
+ /* priority entry that are valid */
+#define FCP_PRIO_ENTRY_VALID 0x1
+#define FCP_PRIO_ENTRY_TAG_VALID 0x2
+#define FCP_PRIO_ENTRY_SPID_VALID 0x4
+#define FCP_PRIO_ENTRY_DPID_VALID 0x8
+#define FCP_PRIO_ENTRY_LUNB_VALID 0x10
+#define FCP_PRIO_ENTRY_LUNE_VALID 0x20
+#define FCP_PRIO_ENTRY_SWWN_VALID 0x40
+#define FCP_PRIO_ENTRY_DWWN_VALID 0x80
+ uint8_t tag; /* Priority value */
+ uint8_t reserved; /* Reserved for future use */
+ uint32_t src_pid; /* Src port id. high order byte */
+ /* unused; -1 (wild card) */
+ uint32_t dst_pid; /* Src port id. high order byte */
+ /* unused; -1 (wild card) */
+ uint16_t lun_beg; /* 1st lun num of lun range. */
+ /* -1 (wild card) */
+ uint16_t lun_end; /* 2nd lun num of lun range. */
+ /* -1 (wild card) */
+ uint8_t src_wwpn[8]; /* Source WWPN: -1 (wild card) */
+ uint8_t dst_wwpn[8]; /* Destination WWPN: -1 (wild card) */
+};
+
+struct qla_fcp_prio_cfg {
+ uint8_t signature[4]; /* "HQOS" signature of config data */
+ uint16_t version; /* 1: Initial version */
+ uint16_t length; /* config data size in num bytes */
+ uint16_t checksum; /* config data bytes checksum */
+ uint16_t num_entries; /* Number of entries */
+ uint16_t size_of_entry; /* Size of each entry in num bytes */
+ uint8_t attributes; /* enable/disable, persistence */
+#define FCP_PRIO_ATTR_DISABLE 0x0
+#define FCP_PRIO_ATTR_ENABLE 0x1
+#define FCP_PRIO_ATTR_PERSIST 0x2
+ uint8_t reserved; /* Reserved for future use */
+#define FCP_PRIO_CFG_HDR_SIZE 0x10
+ struct qla_fcp_prio_entry entry[1]; /* fcp priority entries */
+#define FCP_PRIO_CFG_ENTRY_SIZE 0x20
+};
+
+#define FCP_PRIO_CFG_SIZE (32*1024) /* fcp prio data per port*/
+
+/* 25XX Support ****************************************************/
+#define FA_FCP_PRIO0_ADDR_25 0x3C000
+#define FA_FCP_PRIO1_ADDR_25 0x3E000
+
/* 81XX Flash locations -- occupies second 2MB region. */
#define FA_BOOT_CODE_ADDR_81 0x80000
#define FA_RISC_CODE_ADDR_81 0xA0000
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 3a89bc514e2..8217c3bcbc2 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -44,6 +44,7 @@ extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
extern void qla2x00_update_fcports(scsi_qla_host_t *);
extern int qla2x00_abort_isp(scsi_qla_host_t *);
+extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *);
extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
@@ -55,10 +56,20 @@ extern void qla84xx_put_chip(struct scsi_qla_host *);
extern int qla2x00_async_login(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_async_logout(struct scsi_qla_host *, fc_port_t *);
-extern int qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
+extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
-extern int qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
+extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t);
+extern int qla2x00_async_marker(fc_port_t *, uint16_t, uint8_t);
+extern void qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
+extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
+ uint16_t *);
+extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *,
+ uint16_t *);
+extern void qla2x00_async_tm_cmd_done(struct scsi_qla_host *, fc_port_t *,
+ struct srb_iocb *);
+extern void qla2x00_async_marker_done(struct scsi_qla_host *, fc_port_t *,
+ struct srb_iocb *);
extern fc_port_t *
qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t );
@@ -79,6 +90,13 @@ extern int ql2xmaxqueues;
extern int ql2xmultique_tag;
extern int ql2xfwloadbin;
extern int ql2xetsenable;
+extern int ql2xshiftctondsd;
+extern int ql2xdbwr;
+extern int ql2xdontresethba;
+extern int ql2xasynctmfenable;
+extern int ql2xenabledif;
+extern int ql2xenablehba_err_chk;
+extern int ql2xtargetreset;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -93,6 +111,10 @@ extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_post_async_logout_done_work(struct scsi_qla_host *,
fc_port_t *, uint16_t *);
+extern int qla2x00_post_async_adisc_work(struct scsi_qla_host *, fc_port_t *,
+ uint16_t *);
+extern int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *,
+ fc_port_t *, uint16_t *);
extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
@@ -135,6 +157,7 @@ extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *);
extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *);
extern int qla2x00_wait_for_chip_reset(scsi_qla_host_t *);
+extern int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *);
extern void qla2xxx_wake_dpc(struct scsi_qla_host *);
extern void qla2x00_alert_all_vps(struct rsp_que *, uint16_t *);
@@ -157,6 +180,10 @@ int __qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
uint16_t, uint16_t, uint8_t);
extern int qla2x00_start_sp(srb_t *);
extern void qla2x00_ctx_sp_free(srb_t *);
+extern uint16_t qla24xx_calc_iocbs(uint16_t);
+extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
+extern int qla24xx_dif_start_scsi(srb_t *);
+
/*
* Global Function Prototypes in qla_mbx.c source file.
@@ -328,6 +355,9 @@ extern int
qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t);
extern int qla2x00_get_data_rate(scsi_qla_host_t *);
+extern int qla24xx_set_fcp_prio(scsi_qla_host_t *, uint16_t, uint16_t,
+ uint16_t *);
+
/*
* Global Function Prototypes in qla_isr.c source file.
*/
@@ -340,6 +370,7 @@ qla24xx_process_response_queue(struct scsi_qla_host *, struct rsp_que *);
extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
extern void qla2x00_free_irqs(scsi_qla_host_t *);
+extern int qla2x00_get_data_rate(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_sup.c source file.
*/
@@ -384,6 +415,7 @@ extern int qla2xxx_get_flash_info(scsi_qla_host_t *);
extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *);
+extern int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_dbg.c source file.
@@ -395,6 +427,7 @@ extern void qla25xx_fw_dump(scsi_qla_host_t *, int);
extern void qla81xx_fw_dump(scsi_qla_host_t *, int);
extern void qla2x00_dump_regs(scsi_qla_host_t *);
extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
+extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t);
/*
* Global Function Prototypes in qla_gs.c source file.
@@ -430,7 +463,10 @@ extern void qla2x00_init_host_attr(scsi_qla_host_t *);
extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
extern int qla2x00_loopback_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *);
-extern int qla2x00_echo_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *);
+extern int qla2x00_echo_test(scsi_qla_host_t *,
+ struct msg_echo_lb *, uint16_t *);
+extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *);
+extern int qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *, uint8_t);
/*
* Global Function Prototypes in qla_dfs.c source file.
@@ -459,4 +495,88 @@ extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
+/* qla82xx related functions */
+
+/* PCI related functions */
+extern int qla82xx_pci_config(struct scsi_qla_host *);
+extern int qla82xx_pci_mem_read_2M(struct qla_hw_data *, u64, void *, int);
+extern int qla82xx_pci_mem_write_2M(struct qla_hw_data *, u64, void *, int);
+extern char *qla82xx_pci_info_str(struct scsi_qla_host *, char *);
+extern int qla82xx_pci_region_offset(struct pci_dev *, int);
+extern int qla82xx_pci_region_len(struct pci_dev *, int);
+extern int qla82xx_iospace_config(struct qla_hw_data *);
+
+/* Initialization related functions */
+extern void qla82xx_reset_chip(struct scsi_qla_host *);
+extern void qla82xx_config_rings(struct scsi_qla_host *);
+extern int qla82xx_nvram_config(struct scsi_qla_host *);
+extern int qla82xx_pinit_from_rom(scsi_qla_host_t *);
+extern int qla82xx_load_firmware(scsi_qla_host_t *);
+extern int qla82xx_reset_hw(scsi_qla_host_t *);
+extern int qla82xx_load_risc_blob(scsi_qla_host_t *, uint32_t *);
+extern void qla82xx_watchdog(scsi_qla_host_t *);
+
+/* Firmware and flash related functions */
+extern int qla82xx_load_risc(scsi_qla_host_t *, uint32_t *);
+extern uint8_t *qla82xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
+ uint32_t, uint32_t);
+extern int qla82xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
+ uint32_t, uint32_t);
+
+/* Mailbox related functions */
+extern int qla82xx_abort_isp(scsi_qla_host_t *);
+extern int qla82xx_restart_isp(scsi_qla_host_t *);
+
+/* IOCB related functions */
+extern int qla82xx_start_scsi(srb_t *);
+
+/* Interrupt related */
+extern irqreturn_t qla82xx_intr_handler(int, void *);
+extern irqreturn_t qla82xx_msi_handler(int, void *);
+extern irqreturn_t qla82xx_msix_default(int, void *);
+extern irqreturn_t qla82xx_msix_rsp_q(int, void *);
+extern void qla82xx_enable_intrs(struct qla_hw_data *);
+extern void qla82xx_disable_intrs(struct qla_hw_data *);
+extern void qla82xx_mbx_completion(scsi_qla_host_t *, uint16_t);
+extern void qla82xx_poll(int, void *);
+extern void qla82xx_init_flags(struct qla_hw_data *);
+
+/* ISP 8021 hardware related */
+extern int qla82xx_crb_win_lock(struct qla_hw_data *);
+extern void qla82xx_crb_win_unlock(struct qla_hw_data *);
+extern int qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *, ulong *);
+extern int qla82xx_wr_32(struct qla_hw_data *, ulong, u32);
+extern int qla82xx_rd_32(struct qla_hw_data *, ulong);
+extern int qla82xx_rdmem(struct qla_hw_data *, u64, void *, int);
+extern int qla82xx_wrmem(struct qla_hw_data *, u64, void *, int);
+extern int qla82xx_check_for_bad_spd(struct qla_hw_data *);
+extern int qla82xx_load_fw(scsi_qla_host_t *);
+extern int qla82xx_rom_lock(struct qla_hw_data *);
+extern void qla82xx_rom_unlock(struct qla_hw_data *);
+extern int qla82xx_rom_fast_read(struct qla_hw_data *, int , int *);
+extern int qla82xx_do_rom_fast_read(struct qla_hw_data *, int, int *);
+extern unsigned long qla82xx_decode_crb_addr(unsigned long);
+
+/* ISP 8021 IDC */
+extern void qla82xx_clear_drv_active(struct qla_hw_data *);
+extern int qla82xx_idc_lock(struct qla_hw_data *);
+extern void qla82xx_idc_unlock(struct qla_hw_data *);
+extern int qla82xx_device_state_handler(scsi_qla_host_t *);
+
+extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *,
+ size_t, char *);
+extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *);
+extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
+extern void qla82xx_start_iocbs(srb_t *);
+extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *);
+extern void qla82xx_wait_for_pending_commands(scsi_qla_host_t *);
+
+/* BSG related functions */
+extern int qla24xx_bsg_request(struct fc_bsg_job *);
+extern int qla24xx_bsg_timeout(struct fc_bsg_job *);
+extern int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t);
+extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *,
+ dma_addr_t, size_t, uint32_t);
+extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t,
+ uint16_t *, uint16_t *);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 4647015eba6..872c55f049a 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1535,7 +1535,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
eiter = (struct ct_fdmi_port_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
eiter->len = __constant_cpu_to_be16(4 + 4);
- if (IS_QLA81XX(ha))
+ if (IS_QLA8XXX_TYPE(ha))
eiter->a.sup_speed = __constant_cpu_to_be32(
FDMI_PORT_SPEED_10GB);
else if (IS_QLA25XX(ha))
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 4229bb483c5..ab2cc71994c 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -48,6 +48,7 @@ qla2x00_ctx_sp_timeout(unsigned long __data)
{
srb_t *sp = (srb_t *)__data;
struct srb_ctx *ctx;
+ struct srb_iocb *iocb;
fc_port_t *fcport = sp->fcport;
struct qla_hw_data *ha = fcport->vha->hw;
struct req_que *req;
@@ -57,17 +58,21 @@ qla2x00_ctx_sp_timeout(unsigned long __data)
req = ha->req_q_map[0];
req->outstanding_cmds[sp->handle] = NULL;
ctx = sp->ctx;
- ctx->timeout(sp);
+ iocb = ctx->u.iocb_cmd;
+ iocb->timeout(sp);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- ctx->free(sp);
+ iocb->free(sp);
}
void
qla2x00_ctx_sp_free(srb_t *sp)
{
struct srb_ctx *ctx = sp->ctx;
+ struct srb_iocb *iocb = ctx->u.iocb_cmd;
+ del_timer_sync(&iocb->timer);
+ kfree(iocb);
kfree(ctx);
mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
}
@@ -79,6 +84,7 @@ qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
srb_t *sp;
struct qla_hw_data *ha = vha->hw;
struct srb_ctx *ctx;
+ struct srb_iocb *iocb;
sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
if (!sp)
@@ -86,21 +92,30 @@ qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
ctx = kzalloc(size, GFP_KERNEL);
if (!ctx) {
mempool_free(sp, ha->srb_mempool);
+ sp = NULL;
+ goto done;
+ }
+ iocb = kzalloc(sizeof(struct srb_iocb), GFP_KERNEL);
+ if (!iocb) {
+ mempool_free(sp, ha->srb_mempool);
+ sp = NULL;
+ kfree(ctx);
goto done;
}
memset(sp, 0, sizeof(*sp));
sp->fcport = fcport;
sp->ctx = ctx;
- ctx->free = qla2x00_ctx_sp_free;
+ ctx->u.iocb_cmd = iocb;
+ iocb->free = qla2x00_ctx_sp_free;
- init_timer(&ctx->timer);
+ init_timer(&iocb->timer);
if (!tmo)
goto done;
- ctx->timer.expires = jiffies + tmo * HZ;
- ctx->timer.data = (unsigned long)sp;
- ctx->timer.function = qla2x00_ctx_sp_timeout;
- add_timer(&ctx->timer);
+ iocb->timer.expires = jiffies + tmo * HZ;
+ iocb->timer.data = (unsigned long)sp;
+ iocb->timer.function = qla2x00_ctx_sp_timeout;
+ add_timer(&iocb->timer);
done:
return sp;
}
@@ -110,41 +125,56 @@ done:
#define ELS_TMO_2_RATOV(ha) ((ha)->r_a_tov / 10 * 2)
static void
-qla2x00_async_logio_timeout(srb_t *sp)
+qla2x00_async_iocb_timeout(srb_t *sp)
{
fc_port_t *fcport = sp->fcport;
- struct srb_logio *lio = sp->ctx;
+ struct srb_ctx *ctx = sp->ctx;
DEBUG2(printk(KERN_WARNING
"scsi(%ld:%x): Async-%s timeout.\n",
- fcport->vha->host_no, sp->handle,
- lio->ctx.type == SRB_LOGIN_CMD ? "login": "logout"));
+ fcport->vha->host_no, sp->handle, ctx->name));
- if (lio->ctx.type == SRB_LOGIN_CMD)
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ if (ctx->type == SRB_LOGIN_CMD)
qla2x00_post_async_logout_work(fcport->vha, fcport, NULL);
}
+static void
+qla2x00_async_login_ctx_done(srb_t *sp)
+{
+ struct srb_ctx *ctx = sp->ctx;
+ struct srb_iocb *lio = ctx->u.iocb_cmd;
+
+ qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport,
+ lio->u.logio.data);
+ lio->free(sp);
+}
+
int
qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
{
struct qla_hw_data *ha = vha->hw;
srb_t *sp;
- struct srb_logio *lio;
+ struct srb_ctx *ctx;
+ struct srb_iocb *lio;
int rval;
rval = QLA_FUNCTION_FAILED;
- sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_logio),
+ sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
ELS_TMO_2_RATOV(ha) + 2);
if (!sp)
goto done;
- lio = sp->ctx;
- lio->ctx.type = SRB_LOGIN_CMD;
- lio->ctx.timeout = qla2x00_async_logio_timeout;
- lio->flags |= SRB_LOGIN_COND_PLOGI;
+ ctx = sp->ctx;
+ ctx->type = SRB_LOGIN_CMD;
+ ctx->name = "login";
+ lio = ctx->u.iocb_cmd;
+ lio->timeout = qla2x00_async_iocb_timeout;
+ lio->done = qla2x00_async_login_ctx_done;
+ lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
- lio->flags |= SRB_LOGIN_RETRIED;
+ lio->u.logio.flags |= SRB_LOGIN_RETRIED;
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
goto done_free_sp;
@@ -157,29 +187,43 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
return rval;
done_free_sp:
- del_timer_sync(&lio->ctx.timer);
- lio->ctx.free(sp);
+ lio->free(sp);
done:
return rval;
}
+static void
+qla2x00_async_logout_ctx_done(srb_t *sp)
+{
+ struct srb_ctx *ctx = sp->ctx;
+ struct srb_iocb *lio = ctx->u.iocb_cmd;
+
+ qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport,
+ lio->u.logio.data);
+ lio->free(sp);
+}
+
int
qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
{
struct qla_hw_data *ha = vha->hw;
srb_t *sp;
- struct srb_logio *lio;
+ struct srb_ctx *ctx;
+ struct srb_iocb *lio;
int rval;
rval = QLA_FUNCTION_FAILED;
- sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_logio),
+ sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
ELS_TMO_2_RATOV(ha) + 2);
if (!sp)
goto done;
- lio = sp->ctx;
- lio->ctx.type = SRB_LOGOUT_CMD;
- lio->ctx.timeout = qla2x00_async_logio_timeout;
+ ctx = sp->ctx;
+ ctx->type = SRB_LOGOUT_CMD;
+ ctx->name = "logout";
+ lio = ctx->u.iocb_cmd;
+ lio->timeout = qla2x00_async_iocb_timeout;
+ lio->done = qla2x00_async_logout_ctx_done;
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
goto done_free_sp;
@@ -191,30 +235,186 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
return rval;
done_free_sp:
- del_timer_sync(&lio->ctx.timer);
- lio->ctx.free(sp);
+ lio->free(sp);
done:
return rval;
}
+static void
+qla2x00_async_adisc_ctx_done(srb_t *sp)
+{
+ struct srb_ctx *ctx = sp->ctx;
+ struct srb_iocb *lio = ctx->u.iocb_cmd;
+
+ qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport,
+ lio->u.logio.data);
+ lio->free(sp);
+}
+
int
+qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
+ uint16_t *data)
+{
+ struct qla_hw_data *ha = vha->hw;
+ srb_t *sp;
+ struct srb_ctx *ctx;
+ struct srb_iocb *lio;
+ int rval;
+
+ rval = QLA_FUNCTION_FAILED;
+ sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
+ ELS_TMO_2_RATOV(ha) + 2);
+ if (!sp)
+ goto done;
+
+ ctx = sp->ctx;
+ ctx->type = SRB_ADISC_CMD;
+ ctx->name = "adisc";
+ lio = ctx->u.iocb_cmd;
+ lio->timeout = qla2x00_async_iocb_timeout;
+ lio->done = qla2x00_async_adisc_ctx_done;
+ if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
+ lio->u.logio.flags |= SRB_LOGIN_RETRIED;
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ DEBUG2(printk(KERN_DEBUG
+ "scsi(%ld:%x): Async-adisc - loop-id=%x portid=%02x%02x%02x.\n",
+ fcport->vha->host_no, sp->handle, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
+
+ return rval;
+
+done_free_sp:
+ lio->free(sp);
+done:
+ return rval;
+}
+
+static void
+qla2x00_async_tm_cmd_ctx_done(srb_t *sp)
+{
+ struct srb_ctx *ctx = sp->ctx;
+ struct srb_iocb *iocb = (struct srb_iocb *)ctx->u.iocb_cmd;
+
+ qla2x00_async_tm_cmd_done(sp->fcport->vha, sp->fcport, iocb);
+ iocb->free(sp);
+}
+
+int
+qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
+ uint32_t tag)
+{
+ struct scsi_qla_host *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ srb_t *sp;
+ struct srb_ctx *ctx;
+ struct srb_iocb *tcf;
+ int rval;
+
+ rval = QLA_FUNCTION_FAILED;
+ sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
+ ELS_TMO_2_RATOV(ha) + 2);
+ if (!sp)
+ goto done;
+
+ ctx = sp->ctx;
+ ctx->type = SRB_TM_CMD;
+ ctx->name = "tmf";
+ tcf = ctx->u.iocb_cmd;
+ tcf->u.tmf.flags = flags;
+ tcf->u.tmf.lun = lun;
+ tcf->u.tmf.data = tag;
+ tcf->timeout = qla2x00_async_iocb_timeout;
+ tcf->done = qla2x00_async_tm_cmd_ctx_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ DEBUG2(printk(KERN_DEBUG
+ "scsi(%ld:%x): Async-tmf - loop-id=%x portid=%02x%02x%02x.\n",
+ fcport->vha->host_no, sp->handle, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
+
+ return rval;
+
+done_free_sp:
+ tcf->free(sp);
+done:
+ return rval;
+}
+
+static void
+qla2x00_async_marker_ctx_done(srb_t *sp)
+{
+ struct srb_ctx *ctx = sp->ctx;
+ struct srb_iocb *iocb = (struct srb_iocb *)ctx->u.iocb_cmd;
+
+ qla2x00_async_marker_done(sp->fcport->vha, sp->fcport, iocb);
+ iocb->free(sp);
+}
+
+int
+qla2x00_async_marker(fc_port_t *fcport, uint16_t lun, uint8_t modif)
+{
+ struct scsi_qla_host *vha = fcport->vha;
+ srb_t *sp;
+ struct srb_ctx *ctx;
+ struct srb_iocb *mrk;
+ int rval;
+
+ rval = QLA_FUNCTION_FAILED;
+ sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx), 0);
+ if (!sp)
+ goto done;
+
+ ctx = sp->ctx;
+ ctx->type = SRB_MARKER_CMD;
+ ctx->name = "marker";
+ mrk = ctx->u.iocb_cmd;
+ mrk->u.marker.lun = lun;
+ mrk->u.marker.modif = modif;
+ mrk->timeout = qla2x00_async_iocb_timeout;
+ mrk->done = qla2x00_async_marker_ctx_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ DEBUG2(printk(KERN_DEBUG
+ "scsi(%ld:%x): Async-marker - loop-id=%x "
+ "portid=%02x%02x%02x.\n",
+ fcport->vha->host_no, sp->handle, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa));
+
+ return rval;
+
+done_free_sp:
+ mrk->free(sp);
+done:
+ return rval;
+}
+
+void
qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
{
int rval;
- uint8_t opts = 0;
switch (data[0]) {
case MBS_COMMAND_COMPLETE:
- if (fcport->flags & FCF_FCP2_DEVICE)
- opts |= BIT_1;
- rval = qla2x00_get_port_database(vha, fcport, opts);
- if (rval != QLA_SUCCESS)
- qla2x00_mark_device_lost(vha, fcport, 1, 0);
- else
- qla2x00_update_fcport(vha, fcport);
+ if (fcport->flags & FCF_FCP2_DEVICE) {
+ fcport->flags |= FCF_ASYNC_SENT;
+ qla2x00_post_async_adisc_work(vha, fcport, data);
+ break;
+ }
+ qla2x00_update_fcport(vha, fcport);
break;
case MBS_COMMAND_ERROR:
+ fcport->flags &= ~FCF_ASYNC_SENT;
if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
else
@@ -228,21 +428,84 @@ qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
fcport->loop_id++;
rval = qla2x00_find_new_loop_id(vha, fcport);
if (rval != QLA_SUCCESS) {
+ fcport->flags &= ~FCF_ASYNC_SENT;
qla2x00_mark_device_lost(vha, fcport, 1, 0);
break;
}
qla2x00_post_async_login_work(vha, fcport, NULL);
break;
}
- return QLA_SUCCESS;
+ return;
}
-int
+void
qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
{
qla2x00_mark_device_lost(vha, fcport, 1, 0);
- return QLA_SUCCESS;
+ return;
+}
+
+void
+qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
+ uint16_t *data)
+{
+ if (data[0] == MBS_COMMAND_COMPLETE) {
+ qla2x00_update_fcport(vha, fcport);
+
+ return;
+ }
+
+ /* Retry login. */
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ else
+ qla2x00_mark_device_lost(vha, fcport, 1, 0);
+
+ return;
+}
+
+void
+qla2x00_async_tm_cmd_done(struct scsi_qla_host *vha, fc_port_t *fcport,
+ struct srb_iocb *iocb)
+{
+ int rval;
+ uint32_t flags;
+ uint16_t lun;
+
+ flags = iocb->u.tmf.flags;
+ lun = (uint16_t)iocb->u.tmf.lun;
+
+ /* Issue Marker IOCB */
+ rval = qla2x00_async_marker(fcport, lun,
+ flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
+
+ if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
+ DEBUG2_3_11(printk(KERN_WARNING
+ "%s(%ld): TM IOCB failed (%x).\n",
+ __func__, vha->host_no, rval));
+ }
+
+ return;
+}
+
+void
+qla2x00_async_marker_done(struct scsi_qla_host *vha, fc_port_t *fcport,
+ struct srb_iocb *iocb)
+{
+ /*
+ * Currently we dont have any specific post response processing
+ * for this IOCB. We'll just return success or failed
+ * depending on whether the IOCB command succeeded or failed.
+ */
+ if (iocb->u.tmf.data) {
+ DEBUG2_3_11(printk(KERN_WARNING
+ "%s(%ld): Marker IOCB failed (%x).\n",
+ __func__, vha->host_no, iocb->u.tmf.data));
+ }
+
+ return;
}
/****************************************************************************/
@@ -328,6 +591,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
if (rval)
return (rval);
}
+
if (IS_QLA84XX(ha)) {
ha->cs84xx = qla84xx_get_chip(vha);
if (!ha->cs84xx) {
@@ -340,7 +604,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
ha->flags.chip_reset_done = 1;
if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
- /* Issue verify 84xx FW IOCB to complete 84xx initialization */
+ /* Issue verify 84xx FW IOCB to complete 84xx initialization */
rval = qla84xx_init_chip(vha);
if (rval != QLA_SUCCESS) {
qla_printk(KERN_ERR, ha,
@@ -349,6 +613,12 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
}
}
+ if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) {
+ if (qla24xx_read_fcp_prio_cfg(vha))
+ qla_printk(KERN_ERR, ha,
+ "Unable to read FCP priority data.\n");
+ }
+
return (rval);
}
@@ -955,6 +1225,9 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
+ if (IS_QLA82XX(ha))
+ return QLA_SUCCESS;
+
ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
rval = qla2x00_mbx_reg_test(vha);
@@ -1177,6 +1450,12 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
unsigned long flags;
uint16_t fw_major_version;
+ if (IS_QLA82XX(ha)) {
+ rval = ha->isp_ops->load_risc(vha, &srisc_address);
+ if (rval == QLA_SUCCESS)
+ goto enable_82xx_npiv;
+ }
+
if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
/* Disable SRAM, Instruction RAM and GP RAM parity. */
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1202,6 +1481,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
rval = qla2x00_execute_fw(vha, srisc_address);
/* Retrieve firmware information. */
if (rval == QLA_SUCCESS) {
+enable_82xx_npiv:
fw_major_version = ha->fw_major_version;
rval = qla2x00_get_fw_version(vha,
&ha->fw_major_version,
@@ -1226,8 +1506,10 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
&ha->fw_xcb_count, NULL, NULL,
&ha->max_npiv_vports, NULL);
- if (!fw_major_version && ql2xallocfwdump)
- qla2x00_alloc_fw_dump(vha);
+ if (!fw_major_version && ql2xallocfwdump) {
+ if (!IS_QLA82XX(ha))
+ qla2x00_alloc_fw_dump(vha);
+ }
}
} else {
DEBUG2(printk(KERN_INFO
@@ -1384,6 +1666,9 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
int rval;
struct qla_hw_data *ha = vha->hw;
+ if (IS_QLA82XX(ha))
+ return;
+
/* Update Serial Link options. */
if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
return;
@@ -1818,7 +2103,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
return(rval);
}
-static inline void
+inline void
qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
char *def)
{
@@ -1826,7 +2111,7 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
uint16_t index;
struct qla_hw_data *ha = vha->hw;
int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
- !IS_QLA81XX(ha);
+ !IS_QLA8XXX_TYPE(ha);
if (memcmp(model, BINZERO, len) != 0) {
strncpy(ha->model_number, model, len);
@@ -2017,6 +2302,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
if (IS_QLA23XX(ha)) {
nv->firmware_options[0] |= BIT_2;
nv->firmware_options[0] &= ~BIT_3;
+ nv->firmware_options[0] &= ~BIT_6;
nv->add_firmware_options[1] |= BIT_5 | BIT_4;
if (IS_QLA2300(ha)) {
@@ -2635,7 +2921,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
PORT_RETRY_TIME;
atomic_set(&fcport->port_down_timer, ha->port_down_retry_count *
PORT_RETRY_TIME);
- fcport->flags &= ~FCF_LOGIN_NEEDED;
+ fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
qla2x00_iidma_fcport(vha, fcport);
@@ -2864,7 +3150,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
sw_info_t *swl;
int swl_idx;
int first_dev, last_dev;
- port_id_t wrap, nxt_d_id;
+ port_id_t wrap = {}, nxt_d_id;
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
struct scsi_qla_host *tvp;
@@ -3167,7 +3453,7 @@ qla2x00_device_resync(scsi_qla_host_t *vha)
uint32_t rscn_entry;
uint8_t rscn_out_iter;
uint8_t format;
- port_id_t d_id;
+ port_id_t d_id = {};
rval = QLA_RSCNS_HANDLED;
@@ -3281,11 +3567,15 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
retry = 0;
if (IS_ALOGIO_CAPABLE(ha)) {
+ if (fcport->flags & FCF_ASYNC_SENT)
+ return rval;
+ fcport->flags |= FCF_ASYNC_SENT;
rval = qla2x00_post_async_login_work(vha, fcport, NULL);
if (!rval)
return rval;
}
+ fcport->flags &= ~FCF_ASYNC_SENT;
rval = qla2x00_fabric_login(vha, fcport, next_loopid);
if (rval == QLA_SUCCESS) {
/* Send an ADISC to FCP2 devices.*/
@@ -3546,6 +3836,45 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
qla2x00_rport_del(fcport);
}
+void
+qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
+ struct scsi_qla_host *tvp;
+
+ vha->flags.online = 0;
+ ha->flags.chip_reset_done = 0;
+ clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ ha->qla_stats.total_isp_aborts++;
+
+ qla_printk(KERN_INFO, ha,
+ "Performing ISP error recovery - ha= %p.\n", ha);
+
+ /* Chip reset does not apply to 82XX */
+ if (!IS_QLA82XX(ha))
+ ha->isp_ops->reset_chip(vha);
+
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ qla2x00_mark_all_devices_lost(vha, 0);
+ list_for_each_entry_safe(vp, tvp, &base_vha->hw->vp_list, list)
+ qla2x00_mark_all_devices_lost(vp, 0);
+ } else {
+ if (!atomic_read(&vha->loop_down_timer))
+ atomic_set(&vha->loop_down_timer,
+ LOOP_DOWN_TIME);
+ }
+
+ /* Make sure for ISP 82XX IO DMA is complete */
+ if (IS_QLA82XX(ha))
+ qla82xx_wait_for_pending_commands(vha);
+
+ /* Requeue all commands in outstanding command list. */
+ qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+}
+
/*
* qla2x00_abort_isp
* Resets ISP and aborts all outstanding commands.
@@ -3567,27 +3896,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
struct req_que *req = ha->req_q_map[0];
if (vha->flags.online) {
- vha->flags.online = 0;
- ha->flags.chip_reset_done = 0;
- clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- ha->qla_stats.total_isp_aborts++;
-
- qla_printk(KERN_INFO, ha,
- "Performing ISP error recovery - ha= %p.\n", ha);
- ha->isp_ops->reset_chip(vha);
-
- atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
- if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
- atomic_set(&vha->loop_state, LOOP_DOWN);
- qla2x00_mark_all_devices_lost(vha, 0);
- } else {
- if (!atomic_read(&vha->loop_down_timer))
- atomic_set(&vha->loop_down_timer,
- LOOP_DOWN_TIME);
- }
-
- /* Requeue all commands in outstanding command list. */
- qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+ qla2x00_abort_isp_cleanup(vha);
if (unlikely(pci_channel_offline(ha->pdev) &&
ha->flags.pci_channel_io_perm_failure)) {
@@ -3843,6 +4152,9 @@ qla24xx_reset_adapter(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ if (IS_QLA82XX(ha))
+ return;
+
vha->flags.online = 0;
ha->isp_ops->disable_intrs(ha);
@@ -3906,6 +4218,8 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
}
ha->nvram_size = sizeof(struct nvram_24xx);
ha->vpd_size = FA_NVRAM_VPD_SIZE;
+ if (IS_QLA82XX(ha))
+ ha->vpd_size = FA_VPD_SIZE_82XX;
/* Get VPD data into cache */
ha->vpd = ha->nvram + VPD_OFFSET;
@@ -4769,7 +5083,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
* Setup driver NVRAM options.
*/
qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
- "QLE81XX");
+ "QLE8XXX");
/* Use alternate WWN? */
if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
@@ -4892,6 +5206,114 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
return (rval);
}
+int
+qla82xx_restart_isp(scsi_qla_host_t *vha)
+{
+ int status, rval;
+ uint32_t wait_time;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
+ struct scsi_qla_host *vp;
+ struct scsi_qla_host *tvp;
+
+ status = qla2x00_init_rings(vha);
+ if (!status) {
+ clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+ ha->flags.chip_reset_done = 1;
+
+ status = qla2x00_fw_ready(vha);
+ if (!status) {
+ qla_printk(KERN_INFO, ha,
+ "%s(): Start configure loop, "
+ "status = %d\n", __func__, status);
+
+ /* Issue a marker after FW becomes ready. */
+ qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
+
+ vha->flags.online = 1;
+ /* Wait at most MAX_TARGET RSCNs for a stable link. */
+ wait_time = 256;
+ do {
+ clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ qla2x00_configure_loop(vha);
+ wait_time--;
+ } while (!atomic_read(&vha->loop_down_timer) &&
+ !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) &&
+ wait_time &&
+ (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)));
+ }
+
+ /* if no cable then assume it's good */
+ if ((vha->device_flags & DFLG_NO_CABLE))
+ status = 0;
+
+ qla_printk(KERN_INFO, ha,
+ "%s(): Configure loop done, status = 0x%x\n",
+ __func__, status);
+ }
+
+ if (!status) {
+ clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+
+ if (!atomic_read(&vha->loop_down_timer)) {
+ /*
+ * Issue marker command only when we are going
+ * to start the I/O .
+ */
+ vha->marker_needed = 1;
+ }
+
+ vha->flags.online = 1;
+
+ ha->isp_ops->enable_intrs(ha);
+
+ ha->isp_abort_cnt = 0;
+ clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+
+ if (ha->fce) {
+ ha->flags.fce_enabled = 1;
+ memset(ha->fce, 0,
+ fce_calc_size(ha->fce_bufs));
+ rval = qla2x00_enable_fce_trace(vha,
+ ha->fce_dma, ha->fce_bufs, ha->fce_mb,
+ &ha->fce_bufs);
+ if (rval) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to reinitialize FCE "
+ "(%d).\n", rval);
+ ha->flags.fce_enabled = 0;
+ }
+ }
+
+ if (ha->eft) {
+ memset(ha->eft, 0, EFT_SIZE);
+ rval = qla2x00_enable_eft_trace(vha,
+ ha->eft_dma, EFT_NUM_BUFFERS);
+ if (rval) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to reinitialize EFT "
+ "(%d).\n", rval);
+ }
+ }
+ }
+
+ if (!status) {
+ DEBUG(printk(KERN_INFO
+ "qla82xx_restart_isp(%ld): succeeded.\n",
+ vha->host_no));
+ list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
+ if (vp->vp_idx)
+ qla2x00_vp_abort_isp(vp);
+ }
+ } else {
+ qla_printk(KERN_INFO, ha,
+ "qla82xx_restart_isp: **** FAILED ****\n");
+ }
+
+ return status;
+}
+
void
qla81xx_update_fw_options(scsi_qla_host_t *vha)
{
@@ -4905,3 +5327,165 @@ qla81xx_update_fw_options(scsi_qla_host_t *vha)
ha->fw_options[2] |= BIT_9;
qla2x00_set_fw_options(vha, ha->fw_options);
}
+
+/*
+ * qla24xx_get_fcp_prio
+ * Gets the fcp cmd priority value for the logged in port.
+ * Looks for a match of the port descriptors within
+ * each of the fcp prio config entries. If a match is found,
+ * the tag (priority) value is returned.
+ *
+ * Input:
+ * ha = adapter block po
+ * fcport = port structure pointer.
+ *
+ * Return:
+ * non-zero (if found)
+ * 0 (if not found)
+ *
+ * Context:
+ * Kernel context
+ */
+uint8_t
+qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ int i, entries;
+ uint8_t pid_match, wwn_match;
+ uint8_t priority;
+ uint32_t pid1, pid2;
+ uint64_t wwn1, wwn2;
+ struct qla_fcp_prio_entry *pri_entry;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
+ return 0;
+
+ priority = 0;
+ entries = ha->fcp_prio_cfg->num_entries;
+ pri_entry = &ha->fcp_prio_cfg->entry[0];
+
+ for (i = 0; i < entries; i++) {
+ pid_match = wwn_match = 0;
+
+ if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
+ pri_entry++;
+ continue;
+ }
+
+ /* check source pid for a match */
+ if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
+ pid1 = pri_entry->src_pid & INVALID_PORT_ID;
+ pid2 = vha->d_id.b24 & INVALID_PORT_ID;
+ if (pid1 == INVALID_PORT_ID)
+ pid_match++;
+ else if (pid1 == pid2)
+ pid_match++;
+ }
+
+ /* check destination pid for a match */
+ if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
+ pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
+ pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
+ if (pid1 == INVALID_PORT_ID)
+ pid_match++;
+ else if (pid1 == pid2)
+ pid_match++;
+ }
+
+ /* check source WWN for a match */
+ if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
+ wwn1 = wwn_to_u64(vha->port_name);
+ wwn2 = wwn_to_u64(pri_entry->src_wwpn);
+ if (wwn2 == (uint64_t)-1)
+ wwn_match++;
+ else if (wwn1 == wwn2)
+ wwn_match++;
+ }
+
+ /* check destination WWN for a match */
+ if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
+ wwn1 = wwn_to_u64(fcport->port_name);
+ wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
+ if (wwn2 == (uint64_t)-1)
+ wwn_match++;
+ else if (wwn1 == wwn2)
+ wwn_match++;
+ }
+
+ if (pid_match == 2 || wwn_match == 2) {
+ /* Found a matching entry */
+ if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
+ priority = pri_entry->tag;
+ break;
+ }
+
+ pri_entry++;
+ }
+
+ return priority;
+}
+
+/*
+ * qla24xx_update_fcport_fcp_prio
+ * Activates fcp priority for the logged in fc port
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * fcp = port structure pointer.
+ *
+ * Return:
+ * QLA_SUCCESS or QLA_FUNCTION_FAILED
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *ha, fc_port_t *fcport)
+{
+ int ret;
+ uint8_t priority;
+ uint16_t mb[5];
+
+ if (atomic_read(&fcport->state) == FCS_UNCONFIGURED ||
+ fcport->port_type != FCT_TARGET ||
+ fcport->loop_id == FC_NO_LOOP_ID)
+ return QLA_FUNCTION_FAILED;
+
+ priority = qla24xx_get_fcp_prio(ha, fcport);
+ ret = qla24xx_set_fcp_prio(ha, fcport->loop_id, priority, mb);
+ if (ret == QLA_SUCCESS)
+ fcport->fcp_prio = priority;
+ else
+ DEBUG2(printk(KERN_WARNING
+ "scsi(%ld): Unable to activate fcp priority, "
+ " ret=0x%x\n", ha->host_no, ret));
+
+ return ret;
+}
+
+/*
+ * qla24xx_update_all_fcp_prio
+ * Activates fcp priority for all the logged in ports
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Return:
+ * QLA_SUCCESS or QLA_FUNCTION_FAILED
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
+{
+ int ret;
+ fc_port_t *fcport;
+
+ ret = QLA_FUNCTION_FAILED;
+ /* We need to set priority for all logged in ports */
+ list_for_each_entry(fcport, &vha->vp_fcports, list)
+ ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
+
+ return ret;
+}
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 5e0a7095c9f..84c2fea154d 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -37,7 +37,10 @@ qla2x00_poll(struct rsp_que *rsp)
unsigned long flags;
struct qla_hw_data *ha = rsp->hw;
local_irq_save(flags);
- ha->isp_ops->intr_handler(0, rsp);
+ if (IS_QLA82XX(ha))
+ qla82xx_poll(0, rsp);
+ else
+ ha->isp_ops->intr_handler(0, rsp);
local_irq_restore(flags);
}
@@ -64,3 +67,19 @@ qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
return ((loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST);
}
+
+static inline void
+qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp)
+{
+ struct dsd_dma *dsd_ptr, *tdsd_ptr;
+
+ /* clean up allocated prev pool */
+ list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
+ &((struct crc_context *)sp->ctx)->dsd_list, list) {
+ dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
+ dsd_ptr->dsd_list_dma);
+ list_del(&dsd_ptr->list);
+ kfree(dsd_ptr);
+ }
+ INIT_LIST_HEAD(&((struct crc_context *)sp->ctx)->dsd_list);
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 8299a9891bf..8ef94536541 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -145,7 +145,49 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
return (cont_pkt);
}
-/**
+static inline int
+qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
+{
+ uint8_t guard = scsi_host_get_guard(sp->cmd->device->host);
+
+ /* We only support T10 DIF right now */
+ if (guard != SHOST_DIX_GUARD_CRC) {
+ DEBUG2(printk(KERN_ERR "Unsupported guard: %d\n", guard));
+ return 0;
+ }
+
+ /* We always use DIFF Bundling for best performance */
+ *fw_prot_opts = 0;
+
+ /* Translate SCSI opcode to a protection opcode */
+ switch (scsi_get_prot_op(sp->cmd)) {
+ case SCSI_PROT_READ_STRIP:
+ *fw_prot_opts |= PO_MODE_DIF_REMOVE;
+ break;
+ case SCSI_PROT_WRITE_INSERT:
+ *fw_prot_opts |= PO_MODE_DIF_INSERT;
+ break;
+ case SCSI_PROT_READ_INSERT:
+ *fw_prot_opts |= PO_MODE_DIF_INSERT;
+ break;
+ case SCSI_PROT_WRITE_STRIP:
+ *fw_prot_opts |= PO_MODE_DIF_REMOVE;
+ break;
+ case SCSI_PROT_READ_PASS:
+ *fw_prot_opts |= PO_MODE_DIF_PASS;
+ break;
+ case SCSI_PROT_WRITE_PASS:
+ *fw_prot_opts |= PO_MODE_DIF_PASS;
+ break;
+ default: /* Normal Request */
+ *fw_prot_opts |= PO_MODE_DIF_PASS;
+ break;
+ }
+
+ return scsi_prot_sg_count(sp->cmd);
+}
+
+/*
* qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
* capable IOCB types.
*
@@ -506,7 +548,10 @@ qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
cnt = (uint16_t)
RD_REG_DWORD(&reg->isp25mq.req_q_out);
else {
- if (IS_FWI2_CAPABLE(ha))
+ if (IS_QLA82XX(ha))
+ cnt = (uint16_t)RD_REG_DWORD(
+ &reg->isp82.req_q_out);
+ else if (IS_FWI2_CAPABLE(ha))
cnt = (uint16_t)RD_REG_DWORD(
&reg->isp24.req_q_out);
else
@@ -579,11 +624,29 @@ qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
req->ring_ptr++;
/* Set chip new ring index. */
- if (ha->mqenable) {
+ if (IS_QLA82XX(ha)) {
+ uint32_t dbval = 0x04 | (ha->portnum << 5);
+
+ /* write, read and verify logic */
+ dbval = dbval | (req->id << 8) | (req->ring_index << 16);
+ if (ql2xdbwr)
+ qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
+ else {
+ WRT_REG_DWORD(
+ (unsigned long __iomem *)ha->nxdb_wr_ptr,
+ dbval);
+ wmb();
+ while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
+ WRT_REG_DWORD((unsigned long __iomem *)
+ ha->nxdb_wr_ptr, dbval);
+ wmb();
+ }
+ }
+ } else if (ha->mqenable) {
+ /* Set chip new ring index. */
WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
RD_REG_DWORD(&ioreg->hccr);
- }
- else {
+ } else {
if (IS_FWI2_CAPABLE(ha)) {
WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
@@ -604,7 +667,7 @@ qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
*
* Returns the number of IOCB entries needed to store @dsds.
*/
-static inline uint16_t
+inline uint16_t
qla24xx_calc_iocbs(uint16_t dsds)
{
uint16_t iocbs;
@@ -615,6 +678,8 @@ qla24xx_calc_iocbs(uint16_t dsds)
if ((dsds - 1) % 5)
iocbs++;
}
+ DEBUG3(printk(KERN_DEBUG "%s(): Required PKT(s) = %d\n",
+ __func__, iocbs));
return iocbs;
}
@@ -626,7 +691,7 @@ qla24xx_calc_iocbs(uint16_t dsds)
* @cmd_pkt: Command type 3 IOCB
* @tot_dsds: Total number of segments to transfer
*/
-static inline void
+inline void
qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
uint16_t tot_dsds)
{
@@ -695,6 +760,453 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
}
}
+struct fw_dif_context {
+ uint32_t ref_tag;
+ uint16_t app_tag;
+ uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
+ uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
+};
+
+/*
+ * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
+ *
+ */
+static inline void
+qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
+ unsigned int protcnt)
+{
+ struct sd_dif_tuple *spt;
+ unsigned char op = scsi_get_prot_op(cmd);
+
+ switch (scsi_get_prot_type(cmd)) {
+ /* For TYPE 0 protection: no checking */
+ case SCSI_PROT_DIF_TYPE0:
+ pkt->ref_tag_mask[0] = 0x00;
+ pkt->ref_tag_mask[1] = 0x00;
+ pkt->ref_tag_mask[2] = 0x00;
+ pkt->ref_tag_mask[3] = 0x00;
+ break;
+
+ /*
+ * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
+ * match LBA in CDB + N
+ */
+ case SCSI_PROT_DIF_TYPE2:
+ break;
+
+ /* For Type 3 protection: 16 bit GUARD only */
+ case SCSI_PROT_DIF_TYPE3:
+ pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
+ pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
+ 0x00;
+ break;
+
+ /*
+ * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
+ * 16 bit app tag.
+ */
+ case SCSI_PROT_DIF_TYPE1:
+ if (!ql2xenablehba_err_chk)
+ break;
+
+ if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
+ op == SCSI_PROT_WRITE_PASS)) {
+ spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
+ scsi_prot_sglist(cmd)[0].offset;
+ DEBUG18(printk(KERN_DEBUG
+ "%s(): LBA from user %p, lba = 0x%x\n",
+ __func__, spt, (int)spt->ref_tag));
+ pkt->ref_tag = swab32(spt->ref_tag);
+ pkt->app_tag_mask[0] = 0x0;
+ pkt->app_tag_mask[1] = 0x0;
+ } else {
+ pkt->ref_tag = cpu_to_le32((uint32_t)
+ (0xffffffff & scsi_get_lba(cmd)));
+ pkt->app_tag = __constant_cpu_to_le16(0);
+ pkt->app_tag_mask[0] = 0x0;
+ pkt->app_tag_mask[1] = 0x0;
+ }
+ /* enable ALL bytes of the ref tag */
+ pkt->ref_tag_mask[0] = 0xff;
+ pkt->ref_tag_mask[1] = 0xff;
+ pkt->ref_tag_mask[2] = 0xff;
+ pkt->ref_tag_mask[3] = 0xff;
+ break;
+ }
+
+ DEBUG18(printk(KERN_DEBUG
+ "%s(): Setting protection Tags: (BIG) ref tag = 0x%x,"
+ " app tag = 0x%x, prot SG count %d , cmd lba 0x%x,"
+ " prot_type=%u\n", __func__, pkt->ref_tag, pkt->app_tag, protcnt,
+ (int)scsi_get_lba(cmd), scsi_get_prot_type(cmd)));
+}
+
+
+static int
+qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
+ uint16_t tot_dsds)
+{
+ void *next_dsd;
+ uint8_t avail_dsds = 0;
+ uint32_t dsd_list_len;
+ struct dsd_dma *dsd_ptr;
+ struct scatterlist *sg;
+ uint32_t *cur_dsd = dsd;
+ int i;
+ uint16_t used_dsds = tot_dsds;
+
+ uint8_t *cp;
+
+ scsi_for_each_sg(sp->cmd, sg, tot_dsds, i) {
+ dma_addr_t sle_dma;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
+ QLA_DSDS_PER_IOCB : used_dsds;
+ dsd_list_len = (avail_dsds + 1) * 12;
+ used_dsds -= avail_dsds;
+
+ /* allocate tracking DS */
+ dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
+ if (!dsd_ptr)
+ return 1;
+
+ /* allocate new list */
+ dsd_ptr->dsd_addr = next_dsd =
+ dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
+ &dsd_ptr->dsd_list_dma);
+
+ if (!next_dsd) {
+ /*
+ * Need to cleanup only this dsd_ptr, rest
+ * will be done by sp_free_dma()
+ */
+ kfree(dsd_ptr);
+ return 1;
+ }
+
+ list_add_tail(&dsd_ptr->list,
+ &((struct crc_context *)sp->ctx)->dsd_list);
+
+ sp->flags |= SRB_CRC_CTX_DSD_VALID;
+
+ /* add new list to cmd iocb or last list */
+ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = dsd_list_len;
+ cur_dsd = (uint32_t *)next_dsd;
+ }
+ sle_dma = sg_dma_address(sg);
+ DEBUG18(printk("%s(): %p, sg entry %d - addr =0x%x 0x%x,"
+ " len =%d\n", __func__ , cur_dsd, i, LSD(sle_dma),
+ MSD(sle_dma), sg_dma_len(sg)));
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
+
+ if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
+ cp = page_address(sg_page(sg)) + sg->offset;
+ DEBUG18(printk("%s(): User Data buffer= %p:\n",
+ __func__ , cp));
+ }
+ }
+ /* Null termination */
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ return 0;
+}
+
+static int
+qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
+ uint32_t *dsd,
+ uint16_t tot_dsds)
+{
+ void *next_dsd;
+ uint8_t avail_dsds = 0;
+ uint32_t dsd_list_len;
+ struct dsd_dma *dsd_ptr;
+ struct scatterlist *sg;
+ int i;
+ struct scsi_cmnd *cmd;
+ uint32_t *cur_dsd = dsd;
+ uint16_t used_dsds = tot_dsds;
+
+ uint8_t *cp;
+
+
+ cmd = sp->cmd;
+ scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
+ dma_addr_t sle_dma;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
+ QLA_DSDS_PER_IOCB : used_dsds;
+ dsd_list_len = (avail_dsds + 1) * 12;
+ used_dsds -= avail_dsds;
+
+ /* allocate tracking DS */
+ dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
+ if (!dsd_ptr)
+ return 1;
+
+ /* allocate new list */
+ dsd_ptr->dsd_addr = next_dsd =
+ dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
+ &dsd_ptr->dsd_list_dma);
+
+ if (!next_dsd) {
+ /*
+ * Need to cleanup only this dsd_ptr, rest
+ * will be done by sp_free_dma()
+ */
+ kfree(dsd_ptr);
+ return 1;
+ }
+
+ list_add_tail(&dsd_ptr->list,
+ &((struct crc_context *)sp->ctx)->dsd_list);
+
+ sp->flags |= SRB_CRC_CTX_DSD_VALID;
+
+ /* add new list to cmd iocb or last list */
+ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = dsd_list_len;
+ cur_dsd = (uint32_t *)next_dsd;
+ }
+ sle_dma = sg_dma_address(sg);
+ if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
+ DEBUG18(printk(KERN_DEBUG
+ "%s(): %p, sg entry %d - addr =0x%x"
+ "0x%x, len =%d\n", __func__ , cur_dsd, i,
+ LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg)));
+ }
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+
+ if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
+ cp = page_address(sg_page(sg)) + sg->offset;
+ DEBUG18(printk("%s(): Protection Data buffer = %p:\n",
+ __func__ , cp));
+ }
+ avail_dsds--;
+ }
+ /* Null termination */
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ return 0;
+}
+
+/**
+ * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
+ * Type 6 IOCB types.
+ *
+ * @sp: SRB command to process
+ * @cmd_pkt: Command type 3 IOCB
+ * @tot_dsds: Total number of segments to transfer
+ */
+static inline int
+qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
+ uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
+{
+ uint32_t *cur_dsd, *fcp_dl;
+ scsi_qla_host_t *vha;
+ struct scsi_cmnd *cmd;
+ struct scatterlist *cur_seg;
+ int sgc;
+ uint32_t total_bytes;
+ uint32_t data_bytes;
+ uint32_t dif_bytes;
+ uint8_t bundling = 1;
+ uint16_t blk_size;
+ uint8_t *clr_ptr;
+ struct crc_context *crc_ctx_pkt = NULL;
+ struct qla_hw_data *ha;
+ uint8_t additional_fcpcdb_len;
+ uint16_t fcp_cmnd_len;
+ struct fcp_cmnd *fcp_cmnd;
+ dma_addr_t crc_ctx_dma;
+
+ cmd = sp->cmd;
+
+ sgc = 0;
+ /* Update entry type to indicate Command Type CRC_2 IOCB */
+ *((uint32_t *)(&cmd_pkt->entry_type)) =
+ __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
+
+ /* No data transfer */
+ data_bytes = scsi_bufflen(cmd);
+ if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
+ DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
+ __func__, data_bytes));
+ cmd_pkt->byte_count = __constant_cpu_to_le32(0);
+ return QLA_SUCCESS;
+ }
+
+ vha = sp->fcport->vha;
+ ha = vha->hw;
+
+ DEBUG18(printk(KERN_DEBUG
+ "%s(%ld): Executing cmd sp %p, pid=%ld, prot_op=%u.\n", __func__,
+ vha->host_no, sp, cmd->serial_number, scsi_get_prot_op(sp->cmd)));
+
+ cmd_pkt->vp_index = sp->fcport->vp_idx;
+
+ /* Set transfer direction */
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_WRITE_DATA);
+ } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_READ_DATA);
+ }
+
+ tot_prot_dsds = scsi_prot_sg_count(cmd);
+ if (!tot_prot_dsds)
+ bundling = 0;
+
+ /* Allocate CRC context from global pool */
+ crc_ctx_pkt = sp->ctx = dma_pool_alloc(ha->dl_dma_pool,
+ GFP_ATOMIC, &crc_ctx_dma);
+
+ if (!crc_ctx_pkt)
+ goto crc_queuing_error;
+
+ /* Zero out CTX area. */
+ clr_ptr = (uint8_t *)crc_ctx_pkt;
+ memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
+
+ crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
+
+ sp->flags |= SRB_CRC_CTX_DMA_VALID;
+
+ /* Set handle */
+ crc_ctx_pkt->handle = cmd_pkt->handle;
+
+ INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
+
+ qla24xx_set_t10dif_tags(cmd, (struct fw_dif_context *)
+ &crc_ctx_pkt->ref_tag, tot_prot_dsds);
+
+ cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
+ cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
+ cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
+
+ /* Determine SCSI command length -- align to 4 byte boundary */
+ if (cmd->cmd_len > 16) {
+ DEBUG18(printk(KERN_INFO "%s(): **** SCSI CMD > 16\n",
+ __func__));
+ additional_fcpcdb_len = cmd->cmd_len - 16;
+ if ((cmd->cmd_len % 4) != 0) {
+ /* SCSI cmd > 16 bytes must be multiple of 4 */
+ goto crc_queuing_error;
+ }
+ fcp_cmnd_len = 12 + cmd->cmd_len + 4;
+ } else {
+ additional_fcpcdb_len = 0;
+ fcp_cmnd_len = 12 + 16 + 4;
+ }
+
+ fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
+
+ fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ fcp_cmnd->additional_cdb_len |= 1;
+ else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ fcp_cmnd->additional_cdb_len |= 2;
+
+ int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
+ memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
+ cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
+ cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
+ LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
+ cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
+ MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
+ fcp_cmnd->task_attribute = 0;
+ fcp_cmnd->task_managment = 0;
+
+ cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
+
+ DEBUG18(printk(KERN_INFO "%s(%ld): Total SG(s) Entries %d, Data"
+ "entries %d, data bytes %d, Protection entries %d\n",
+ __func__, vha->host_no, tot_dsds, (tot_dsds-tot_prot_dsds),
+ data_bytes, tot_prot_dsds));
+
+ /* Compute dif len and adjust data len to incude protection */
+ total_bytes = data_bytes;
+ dif_bytes = 0;
+ blk_size = cmd->device->sector_size;
+ if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE1) {
+ dif_bytes = (data_bytes / blk_size) * 8;
+ total_bytes += dif_bytes;
+ }
+
+ if (!ql2xenablehba_err_chk)
+ fw_prot_opts |= 0x10; /* Disable Guard tag checking */
+
+ if (!bundling) {
+ cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
+ } else {
+ /*
+ * Configure Bundling if we need to fetch interlaving
+ * protection PCI accesses
+ */
+ fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
+ crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
+ crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
+ tot_prot_dsds);
+ cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
+ }
+
+ /* Finish the common fields of CRC pkt */
+ crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
+ crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
+ crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
+ crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
+ /* Fibre channel byte count */
+ cmd_pkt->byte_count = cpu_to_le32(total_bytes);
+ fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
+ additional_fcpcdb_len);
+ *fcp_dl = htonl(total_bytes);
+
+ DEBUG18(printk(KERN_INFO "%s(%ld): dif bytes = 0x%x (%d), total bytes"
+ " = 0x%x (%d), dat block size =0x%x (%d)\n", __func__,
+ vha->host_no, dif_bytes, dif_bytes, total_bytes, total_bytes,
+ crc_ctx_pkt->blk_size, crc_ctx_pkt->blk_size));
+
+ /* Walks data segments */
+
+ cmd_pkt->control_flags |=
+ __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
+ if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
+ (tot_dsds - tot_prot_dsds)))
+ goto crc_queuing_error;
+
+ if (bundling && tot_prot_dsds) {
+ /* Walks dif segments */
+ cur_seg = scsi_prot_sglist(cmd);
+ cmd_pkt->control_flags |=
+ __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
+ cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
+ if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
+ tot_prot_dsds))
+ goto crc_queuing_error;
+ }
+ return QLA_SUCCESS;
+
+crc_queuing_error:
+ DEBUG18(qla_printk(KERN_INFO, ha,
+ "CMD sent FAILED crc_q error:sp = %p\n", sp));
+ /* Cleanup will be performed by the caller */
+
+ return QLA_FUNCTION_FAILED;
+}
/**
* qla24xx_start_scsi() - Send a SCSI command to the ISP
@@ -848,6 +1360,191 @@ queuing_error:
return QLA_FUNCTION_FAILED;
}
+
+/**
+ * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+int
+qla24xx_dif_start_scsi(srb_t *sp)
+{
+ int nseg;
+ unsigned long flags;
+ uint32_t *clr_ptr;
+ uint32_t index;
+ uint32_t handle;
+ uint16_t cnt;
+ uint16_t req_cnt = 0;
+ uint16_t tot_dsds;
+ uint16_t tot_prot_dsds;
+ uint16_t fw_prot_opts = 0;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
+ struct scsi_cmnd *cmd = sp->cmd;
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct cmd_type_crc_2 *cmd_pkt;
+ uint32_t status = 0;
+
+#define QDSS_GOT_Q_SPACE BIT_0
+
+ /* Only process protection in this routine */
+ if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL)
+ return qla24xx_start_scsi(sp);
+
+ /* Setup device pointers. */
+
+ qla25xx_set_que(sp, &rsp);
+ req = vha->req;
+
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+ QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+ vha->marker_needed = 0;
+ }
+
+ /* Acquire ring specific lock */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ handle++;
+ if (handle == MAX_OUTSTANDING_COMMANDS)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+
+ if (index == MAX_OUTSTANDING_COMMANDS)
+ goto queuing_error;
+
+ /* Compute number of required data segments */
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ else
+ sp->flags |= SRB_DMA_VALID;
+ } else
+ nseg = 0;
+
+ /* number of required data segments */
+ tot_dsds = nseg;
+
+ /* Compute number of required protection segments */
+ if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
+ scsi_prot_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ else
+ sp->flags |= SRB_CRC_PROT_DMA_VALID;
+ } else {
+ nseg = 0;
+ }
+
+ req_cnt = 1;
+ /* Total Data and protection sg segment(s) */
+ tot_prot_dsds = nseg;
+ tot_dsds += nseg;
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
+
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ }
+
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+
+ status |= QDSS_GOT_Q_SPACE;
+
+ /* Build header part of command packet (excluding the OPCODE). */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+
+ /* Fill-in common area */
+ cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+
+ int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+ /* Total Data and protection segment(s) */
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Build IOCB segments and adjust for data protection segments */
+ if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
+ req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
+ QLA_SUCCESS)
+ goto queuing_error;
+
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ /* Specify response queue number where completion should happen */
+ cmd_pkt->entry_status = (uint8_t) rsp->id;
+ cmd_pkt->timeout = __constant_cpu_to_le16(0);
+ wmb();
+
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ /* Set chip new ring index. */
+ WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
+
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return QLA_SUCCESS;
+
+queuing_error:
+ if (status & QDSS_GOT_Q_SPACE) {
+ req->outstanding_cmds[handle] = NULL;
+ req->cnt += req_cnt;
+ }
+ /* Cleanup will be performed by the caller (queuecommand) */
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ DEBUG18(qla_printk(KERN_INFO, ha,
+ "CMD sent FAILED SCSI prot_op:%02x\n", scsi_get_prot_op(cmd)));
+ return QLA_FUNCTION_FAILED;
+}
+
+
static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
{
struct scsi_cmnd *cmd = sp->cmd;
@@ -931,37 +1628,45 @@ qla2x00_start_iocbs(srb_t *sp)
device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
- /* Adjust ring index. */
- req->ring_index++;
- if (req->ring_index == req->length) {
- req->ring_index = 0;
- req->ring_ptr = req->ring;
- } else
- req->ring_ptr++;
-
- /* Set chip new ring index. */
- if (ha->mqenable) {
- WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
- RD_REG_DWORD(&ioreg->hccr);
- } else if (IS_FWI2_CAPABLE(ha)) {
- WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
- RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+ if (IS_QLA82XX(ha)) {
+ qla82xx_start_iocbs(sp);
} else {
- WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), req->ring_index);
- RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ /* Set chip new ring index. */
+ if (ha->mqenable) {
+ WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
+ RD_REG_DWORD(&ioreg->hccr);
+ } else if (IS_QLA82XX(ha)) {
+ qla82xx_start_iocbs(sp);
+ } else if (IS_FWI2_CAPABLE(ha)) {
+ WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
+ RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+ } else {
+ WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
+ req->ring_index);
+ RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+ }
}
}
static void
qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
{
- struct srb_logio *lio = sp->ctx;
+ struct srb_ctx *ctx = sp->ctx;
+ struct srb_iocb *lio = ctx->u.iocb_cmd;
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
- if (lio->flags & SRB_LOGIN_COND_PLOGI)
+ if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
- if (lio->flags & SRB_LOGIN_SKIP_PRLI)
+ if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -974,14 +1679,15 @@ static void
qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
{
struct qla_hw_data *ha = sp->fcport->vha->hw;
- struct srb_logio *lio = sp->ctx;
+ struct srb_ctx *ctx = sp->ctx;
+ struct srb_iocb *lio = ctx->u.iocb_cmd;
uint16_t opts;
mbx->entry_type = MBX_IOCB_TYPE;;
SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
- opts = lio->flags & SRB_LOGIN_COND_PLOGI ? BIT_0: 0;
- opts |= lio->flags & SRB_LOGIN_SKIP_PRLI ? BIT_1: 0;
+ opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
+ opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
if (HAS_EXTENDED_IDS(ha)) {
mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
mbx->mb10 = cpu_to_le16(opts);
@@ -1026,9 +1732,97 @@ qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
}
static void
+qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
+{
+ logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
+ logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
+ logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ logio->vp_index = sp->fcport->vp_idx;
+}
+
+static void
+qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
+{
+ struct qla_hw_data *ha = sp->fcport->vha->hw;
+
+ mbx->entry_type = MBX_IOCB_TYPE;
+ SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
+ mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
+ if (HAS_EXTENDED_IDS(ha)) {
+ mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
+ mbx->mb10 = cpu_to_le16(BIT_0);
+ } else {
+ mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
+ }
+ mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
+ mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
+ mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
+ mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
+ mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
+}
+
+static void
+qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
+{
+ uint32_t flags;
+ unsigned int lun;
+ struct fc_port *fcport = sp->fcport;
+ scsi_qla_host_t *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct srb_ctx *ctx = sp->ctx;
+ struct srb_iocb *iocb = ctx->u.iocb_cmd;
+ struct req_que *req = vha->req;
+
+ flags = iocb->u.tmf.flags;
+ lun = iocb->u.tmf.lun;
+
+ tsk->entry_type = TSK_MGMT_IOCB_TYPE;
+ tsk->entry_count = 1;
+ tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
+ tsk->nport_handle = cpu_to_le16(fcport->loop_id);
+ tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
+ tsk->control_flags = cpu_to_le32(flags);
+ tsk->port_id[0] = fcport->d_id.b.al_pa;
+ tsk->port_id[1] = fcport->d_id.b.area;
+ tsk->port_id[2] = fcport->d_id.b.domain;
+ tsk->vp_index = fcport->vp_idx;
+
+ if (flags == TCF_LUN_RESET) {
+ int_to_scsilun(lun, &tsk->lun);
+ host_to_fcp_swap((uint8_t *)&tsk->lun,
+ sizeof(tsk->lun));
+ }
+}
+
+static void
+qla24xx_marker_iocb(srb_t *sp, struct mrk_entry_24xx *mrk)
+{
+ uint16_t lun;
+ uint8_t modif;
+ struct fc_port *fcport = sp->fcport;
+ scsi_qla_host_t *vha = fcport->vha;
+ struct srb_ctx *ctx = sp->ctx;
+ struct srb_iocb *iocb = ctx->u.iocb_cmd;
+ struct req_que *req = vha->req;
+
+ lun = iocb->u.marker.lun;
+ modif = iocb->u.marker.modif;
+ mrk->entry_type = MARKER_TYPE;
+ mrk->modifier = modif;
+ if (modif != MK_SYNC_ALL) {
+ mrk->nport_handle = cpu_to_le16(fcport->loop_id);
+ mrk->lun[1] = LSB(lun);
+ mrk->lun[2] = MSB(lun);
+ host_to_fcp_swap(mrk->lun, sizeof(mrk->lun));
+ mrk->vp_index = vha->vp_idx;
+ mrk->handle = MAKE_HANDLE(req->id, mrk->handle);
+ }
+}
+
+static void
qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
{
- struct fc_bsg_job *bsg_job = ((struct srb_bsg*)sp->ctx)->bsg_job;
+ struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
els_iocb->entry_type = ELS_IOCB_TYPE;
els_iocb->entry_count = 1;
@@ -1041,8 +1835,10 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->sof_type = EST_SOFI3;
els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
- els_iocb->opcode =(((struct srb_bsg*)sp->ctx)->ctx.type == SRB_ELS_CMD_RPT) ?
- bsg_job->request->rqst_data.r_els.els_code : bsg_job->request->rqst_data.h_els.command_code;
+ els_iocb->opcode =
+ (((struct srb_ctx *)sp->ctx)->type == SRB_ELS_CMD_RPT) ?
+ bsg_job->request->rqst_data.r_els.els_code :
+ bsg_job->request->rqst_data.h_els.command_code;
els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
els_iocb->port_id[1] = sp->fcport->d_id.b.area;
els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
@@ -1076,7 +1872,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
int index;
uint16_t tot_dsds;
scsi_qla_host_t *vha = sp->fcport->vha;
- struct fc_bsg_job *bsg_job = ((struct srb_bsg*)sp->ctx)->bsg_job;
+ struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
int loop_iterartion = 0;
int cont_iocb_prsnt = 0;
int entry_count = 1;
@@ -1157,12 +1953,12 @@ qla2x00_start_sp(srb_t *sp)
switch (ctx->type) {
case SRB_LOGIN_CMD:
IS_FWI2_CAPABLE(ha) ?
- qla24xx_login_iocb(sp, pkt):
+ qla24xx_login_iocb(sp, pkt) :
qla2x00_login_iocb(sp, pkt);
break;
case SRB_LOGOUT_CMD:
IS_FWI2_CAPABLE(ha) ?
- qla24xx_logout_iocb(sp, pkt):
+ qla24xx_logout_iocb(sp, pkt) :
qla2x00_logout_iocb(sp, pkt);
break;
case SRB_ELS_CMD_RPT:
@@ -1172,6 +1968,17 @@ qla2x00_start_sp(srb_t *sp)
case SRB_CT_CMD:
qla24xx_ct_iocb(sp, pkt);
break;
+ case SRB_ADISC_CMD:
+ IS_FWI2_CAPABLE(ha) ?
+ qla24xx_adisc_iocb(sp, pkt) :
+ qla2x00_adisc_iocb(sp, pkt);
+ break;
+ case SRB_TM_CMD:
+ qla24xx_tm_iocb(sp, pkt);
+ break;
+ case SRB_MARKER_CMD:
+ qla24xx_marker_iocb(sp, pkt);
+ break;
default:
break;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index db539b0c3da..be3d8bed2ec 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_bsg_fc.h>
+#include <scsi/scsi_eh.h>
static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
static void qla2x00_process_completed_request(struct scsi_qla_host *,
@@ -326,7 +327,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
/* Setup to process RIO completion. */
handle_cnt = 0;
- if (IS_QLA81XX(ha))
+ if (IS_QLA8XXX_TYPE(ha))
goto skip_rio;
switch (mb[0]) {
case MBA_SCSI_COMPLETION:
@@ -544,7 +545,7 @@ skip_rio:
if (IS_QLA2100(ha))
break;
- if (IS_QLA81XX(ha))
+ if (IS_QLA8XXX_TYPE(ha))
DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x "
"%04x\n", vha->host_no, mb[1], mb[2], mb[3]));
else
@@ -845,7 +846,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
qla2x00_sp_compl(ha, sp);
} else {
DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
- " handle(%d)\n", vha->host_no, req->id, index));
+ " handle(0x%x)\n", vha->host_no, req->id, index));
qla_printk(KERN_WARNING, ha,
"Invalid ISP SCSI completion handle\n");
@@ -895,36 +896,26 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
{
const char func[] = "MBX-IOCB";
const char *type;
- struct qla_hw_data *ha = vha->hw;
fc_port_t *fcport;
srb_t *sp;
- struct srb_logio *lio;
- uint16_t data[2];
+ struct srb_iocb *lio;
+ struct srb_ctx *ctx;
+ uint16_t *data;
+ uint16_t status;
sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
if (!sp)
return;
- type = NULL;
- lio = sp->ctx;
- switch (lio->ctx.type) {
- case SRB_LOGIN_CMD:
- type = "login";
- break;
- case SRB_LOGOUT_CMD:
- type = "logout";
- break;
- default:
- qla_printk(KERN_WARNING, ha,
- "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
- lio->ctx.type);
- return;
- }
-
- del_timer(&lio->ctx.timer);
+ ctx = sp->ctx;
+ lio = ctx->u.iocb_cmd;
+ type = ctx->name;
fcport = sp->fcport;
+ data = lio->u.logio.data;
- data[0] = data[1] = 0;
+ data[0] = MBS_COMMAND_ERROR;
+ data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
+ QLA_LOGIO_LOGIN_RETRIED : 0;
if (mbx->entry_status) {
DEBUG2(printk(KERN_WARNING
"scsi(%ld:%x): Async-%s error entry - entry-status=%x "
@@ -935,23 +926,28 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
le16_to_cpu(mbx->status_flags)));
DEBUG2(qla2x00_dump_buffer((uint8_t *)mbx, sizeof(*mbx)));
- data[0] = MBS_COMMAND_ERROR;
- data[1] = lio->flags & SRB_LOGIN_RETRIED ?
- QLA_LOGIO_LOGIN_RETRIED: 0;
- goto done_post_logio_done_work;
+ goto logio_done;
}
- if (!mbx->status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
+ status = le16_to_cpu(mbx->status);
+ if (status == 0x30 && ctx->type == SRB_LOGIN_CMD &&
+ le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
+ status = 0;
+ if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
DEBUG2(printk(KERN_DEBUG
"scsi(%ld:%x): Async-%s complete - mbx1=%x.\n",
fcport->vha->host_no, sp->handle, type,
le16_to_cpu(mbx->mb1)));
data[0] = MBS_COMMAND_COMPLETE;
- if (lio->ctx.type == SRB_LOGIN_CMD && le16_to_cpu(mbx->mb1) & BIT_1)
- fcport->flags |= FCF_FCP2_DEVICE;
-
- goto done_post_logio_done_work;
+ if (ctx->type == SRB_LOGIN_CMD) {
+ fcport->port_type = FCT_TARGET;
+ if (le16_to_cpu(mbx->mb1) & BIT_0)
+ fcport->port_type = FCT_INITIATOR;
+ if (le16_to_cpu(mbx->mb1) & BIT_1)
+ fcport->flags |= FCF_FCP2_DEVICE;
+ }
+ goto logio_done;
}
data[0] = le16_to_cpu(mbx->mb0);
@@ -963,25 +959,19 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
break;
default:
data[0] = MBS_COMMAND_ERROR;
- data[1] = lio->flags & SRB_LOGIN_RETRIED ?
- QLA_LOGIO_LOGIN_RETRIED: 0;
break;
}
DEBUG2(printk(KERN_WARNING
"scsi(%ld:%x): Async-%s failed - status=%x mb0=%x mb1=%x mb2=%x "
"mb6=%x mb7=%x.\n",
- fcport->vha->host_no, sp->handle, type, le16_to_cpu(mbx->status),
+ fcport->vha->host_no, sp->handle, type, status,
le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
le16_to_cpu(mbx->mb7)));
-done_post_logio_done_work:
- lio->ctx.type == SRB_LOGIN_CMD ?
- qla2x00_post_async_login_done_work(fcport->vha, fcport, data):
- qla2x00_post_async_logout_done_work(fcport->vha, fcport, data);
-
- lio->ctx.free(sp);
+logio_done:
+ lio->done(sp);
}
static void
@@ -992,7 +982,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
const char *type;
struct qla_hw_data *ha = vha->hw;
srb_t *sp;
- struct srb_bsg *sp_bsg;
+ struct srb_ctx *sp_bsg;
struct fc_bsg_job *bsg_job;
uint16_t comp_status;
uint32_t fw_status[3];
@@ -1001,11 +991,11 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (!sp)
return;
- sp_bsg = (struct srb_bsg*)sp->ctx;
- bsg_job = sp_bsg->bsg_job;
+ sp_bsg = sp->ctx;
+ bsg_job = sp_bsg->u.bsg_job;
type = NULL;
- switch (sp_bsg->ctx.type) {
+ switch (sp_bsg->type) {
case SRB_ELS_CMD_RPT:
case SRB_ELS_CMD_HST:
type = "els";
@@ -1016,7 +1006,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
default:
qla_printk(KERN_WARNING, ha,
"%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
- sp_bsg->ctx.type);
+ sp_bsg->type);
return;
}
@@ -1070,8 +1060,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
dma_unmap_sg(&ha->pdev->dev,
bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
- if ((sp_bsg->ctx.type == SRB_ELS_CMD_HST) ||
- (sp_bsg->ctx.type == SRB_CT_CMD))
+ if ((sp_bsg->type == SRB_ELS_CMD_HST) ||
+ (sp_bsg->type == SRB_CT_CMD))
kfree(sp->fcport);
kfree(sp->ctx);
mempool_free(sp, ha->srb_mempool);
@@ -1084,37 +1074,26 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
{
const char func[] = "LOGIO-IOCB";
const char *type;
- struct qla_hw_data *ha = vha->hw;
fc_port_t *fcport;
srb_t *sp;
- struct srb_logio *lio;
- uint16_t data[2];
+ struct srb_iocb *lio;
+ struct srb_ctx *ctx;
+ uint16_t *data;
uint32_t iop[2];
sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
if (!sp)
return;
- type = NULL;
- lio = sp->ctx;
- switch (lio->ctx.type) {
- case SRB_LOGIN_CMD:
- type = "login";
- break;
- case SRB_LOGOUT_CMD:
- type = "logout";
- break;
- default:
- qla_printk(KERN_WARNING, ha,
- "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
- lio->ctx.type);
- return;
- }
-
- del_timer(&lio->ctx.timer);
+ ctx = sp->ctx;
+ lio = ctx->u.iocb_cmd;
+ type = ctx->name;
fcport = sp->fcport;
+ data = lio->u.logio.data;
- data[0] = data[1] = 0;
+ data[0] = MBS_COMMAND_ERROR;
+ data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
+ QLA_LOGIO_LOGIN_RETRIED : 0;
if (logio->entry_status) {
DEBUG2(printk(KERN_WARNING
"scsi(%ld:%x): Async-%s error entry - entry-status=%x.\n",
@@ -1122,10 +1101,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
logio->entry_status));
DEBUG2(qla2x00_dump_buffer((uint8_t *)logio, sizeof(*logio)));
- data[0] = MBS_COMMAND_ERROR;
- data[1] = lio->flags & SRB_LOGIN_RETRIED ?
- QLA_LOGIO_LOGIN_RETRIED: 0;
- goto done_post_logio_done_work;
+ goto logio_done;
}
if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
@@ -1135,8 +1111,8 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
le32_to_cpu(logio->io_parameter[0])));
data[0] = MBS_COMMAND_COMPLETE;
- if (lio->ctx.type == SRB_LOGOUT_CMD)
- goto done_post_logio_done_work;
+ if (ctx->type != SRB_LOGIN_CMD)
+ goto logio_done;
iop[0] = le32_to_cpu(logio->io_parameter[0]);
if (iop[0] & BIT_4) {
@@ -1151,7 +1127,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
if (logio->io_parameter[9] || logio->io_parameter[10])
fcport->supported_classes |= FC_COS_CLASS3;
- goto done_post_logio_done_work;
+ goto logio_done;
}
iop[0] = le32_to_cpu(logio->io_parameter[0]);
@@ -1172,8 +1148,6 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
/* Fall through. */
default:
data[0] = MBS_COMMAND_ERROR;
- data[1] = lio->flags & SRB_LOGIN_RETRIED ?
- QLA_LOGIO_LOGIN_RETRIED: 0;
break;
}
@@ -1184,12 +1158,101 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
le32_to_cpu(logio->io_parameter[0]),
le32_to_cpu(logio->io_parameter[1])));
-done_post_logio_done_work:
- lio->ctx.type == SRB_LOGIN_CMD ?
- qla2x00_post_async_login_done_work(fcport->vha, fcport, data):
- qla2x00_post_async_logout_done_work(fcport->vha, fcport, data);
+logio_done:
+ lio->done(sp);
+}
- lio->ctx.free(sp);
+static void
+qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+ struct tsk_mgmt_entry *tsk)
+{
+ const char func[] = "TMF-IOCB";
+ const char *type;
+ fc_port_t *fcport;
+ srb_t *sp;
+ struct srb_iocb *iocb;
+ struct srb_ctx *ctx;
+ struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
+ int error = 1;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
+ if (!sp)
+ return;
+
+ ctx = sp->ctx;
+ iocb = ctx->u.iocb_cmd;
+ type = ctx->name;
+ fcport = sp->fcport;
+
+ if (sts->entry_status) {
+ DEBUG2(printk(KERN_WARNING
+ "scsi(%ld:%x): Async-%s error - entry-status(%x).\n",
+ fcport->vha->host_no, sp->handle, type,
+ sts->entry_status));
+ } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
+ DEBUG2(printk(KERN_WARNING
+ "scsi(%ld:%x): Async-%s error - completion status(%x).\n",
+ fcport->vha->host_no, sp->handle, type,
+ sts->comp_status));
+ } else if (!(le16_to_cpu(sts->scsi_status) &
+ SS_RESPONSE_INFO_LEN_VALID)) {
+ DEBUG2(printk(KERN_WARNING
+ "scsi(%ld:%x): Async-%s error - no response info(%x).\n",
+ fcport->vha->host_no, sp->handle, type,
+ sts->scsi_status));
+ } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
+ DEBUG2(printk(KERN_WARNING
+ "scsi(%ld:%x): Async-%s error - not enough response(%d).\n",
+ fcport->vha->host_no, sp->handle, type,
+ sts->rsp_data_len));
+ } else if (sts->data[3]) {
+ DEBUG2(printk(KERN_WARNING
+ "scsi(%ld:%x): Async-%s error - response(%x).\n",
+ fcport->vha->host_no, sp->handle, type,
+ sts->data[3]));
+ } else {
+ error = 0;
+ }
+
+ if (error) {
+ iocb->u.tmf.data = error;
+ DEBUG2(qla2x00_dump_buffer((uint8_t *)sts, sizeof(*sts)));
+ }
+
+ iocb->done(sp);
+}
+
+static void
+qla24xx_marker_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+ struct mrk_entry_24xx *mrk)
+{
+ const char func[] = "MRK-IOCB";
+ const char *type;
+ fc_port_t *fcport;
+ srb_t *sp;
+ struct srb_iocb *iocb;
+ struct srb_ctx *ctx;
+ struct sts_entry_24xx *sts = (struct sts_entry_24xx *)mrk;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, mrk);
+ if (!sp)
+ return;
+
+ ctx = sp->ctx;
+ iocb = ctx->u.iocb_cmd;
+ type = ctx->name;
+ fcport = sp->fcport;
+
+ if (sts->entry_status) {
+ iocb->u.marker.data = 1;
+ DEBUG2(printk(KERN_WARNING
+ "scsi(%ld:%x): Async-%s error entry - entry-status=%x.\n",
+ fcport->vha->host_no, sp->handle, type,
+ sts->entry_status));
+ DEBUG2(qla2x00_dump_buffer((uint8_t *)mrk, sizeof(*sts)));
+ }
+
+ iocb->done(sp);
}
/**
@@ -1256,6 +1319,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
case MBX_IOCB_TYPE:
qla2x00_mbx_iocb_entry(vha, rsp->req,
(struct mbx_entry *)pkt);
+ break;
default:
/* Type Not Supported. */
DEBUG4(printk(KERN_WARNING
@@ -1301,6 +1365,78 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len,
DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len));
}
+struct scsi_dif_tuple {
+ __be16 guard; /* Checksum */
+ __be16 app_tag; /* APPL identifer */
+ __be32 ref_tag; /* Target LBA or indirect LBA */
+};
+
+/*
+ * Checks the guard or meta-data for the type of error
+ * detected by the HBA. In case of errors, we set the
+ * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
+ * to indicate to the kernel that the HBA detected error.
+ */
+static inline void
+qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
+{
+ struct scsi_cmnd *cmd = sp->cmd;
+ struct scsi_dif_tuple *ep =
+ (struct scsi_dif_tuple *)&sts24->data[20];
+ struct scsi_dif_tuple *ap =
+ (struct scsi_dif_tuple *)&sts24->data[12];
+ uint32_t e_ref_tag, a_ref_tag;
+ uint16_t e_app_tag, a_app_tag;
+ uint16_t e_guard, a_guard;
+
+ e_ref_tag = be32_to_cpu(ep->ref_tag);
+ a_ref_tag = be32_to_cpu(ap->ref_tag);
+ e_app_tag = be16_to_cpu(ep->app_tag);
+ a_app_tag = be16_to_cpu(ap->app_tag);
+ e_guard = be16_to_cpu(ep->guard);
+ a_guard = be16_to_cpu(ap->guard);
+
+ DEBUG18(printk(KERN_DEBUG
+ "%s(): iocb(s) %p Returned STATUS\n", __func__, sts24));
+
+ DEBUG18(printk(KERN_ERR "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
+ " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
+ " tag=0x%x, act guard=0x%x, exp guard=0x%x\n",
+ cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
+ a_app_tag, e_app_tag, a_guard, e_guard));
+
+
+ /* check guard */
+ if (e_guard != a_guard) {
+ scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, 0x1);
+ set_driver_byte(cmd, DRIVER_SENSE);
+ set_host_byte(cmd, DID_ABORT);
+ cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
+ return;
+ }
+
+ /* check appl tag */
+ if (e_app_tag != a_app_tag) {
+ scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, 0x2);
+ set_driver_byte(cmd, DRIVER_SENSE);
+ set_host_byte(cmd, DID_ABORT);
+ cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
+ return;
+ }
+
+ /* check ref tag */
+ if (e_ref_tag != a_ref_tag) {
+ scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, 0x3);
+ set_driver_byte(cmd, DRIVER_SENSE);
+ set_host_byte(cmd, DID_ABORT);
+ cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
+ return;
+ }
+}
+
/**
* qla2x00_status_entry() - Process a Status IOCB entry.
* @ha: SCSI driver HA context
@@ -1316,6 +1452,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
struct sts_entry_24xx *sts24;
uint16_t comp_status;
uint16_t scsi_status;
+ uint16_t ox_id;
uint8_t lscsi_status;
int32_t resid;
uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len;
@@ -1324,6 +1461,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
uint32_t handle;
uint16_t que;
struct req_que *req;
+ int logit = 1;
sts = (sts_entry_t *) pkt;
sts24 = (struct sts_entry_24xx *) pkt;
@@ -1337,6 +1475,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
handle = (uint32_t) LSW(sts->handle);
que = MSW(sts->handle);
req = ha->req_q_map[que];
+
/* Fast path completion. */
if (comp_status == CS_COMPLETE && scsi_status == 0) {
qla2x00_process_completed_request(vha, req, handle);
@@ -1352,9 +1491,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
sp = NULL;
if (sp == NULL) {
- DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
- vha->host_no));
- qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n");
+ qla_printk(KERN_WARNING, ha,
+ "scsi(%ld): Invalid status handle (0x%x).\n", vha->host_no,
+ sts->handle);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
@@ -1362,10 +1501,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
}
cp = sp->cmd;
if (cp == NULL) {
- DEBUG2(printk("scsi(%ld): Command already returned back to OS "
- "pkt->handle=%d sp=%p.\n", vha->host_no, handle, sp));
qla_printk(KERN_WARNING, ha,
- "Command is NULL: already returned to OS (sp=%p)\n", sp);
+ "scsi(%ld): Command already returned (0x%x/%p).\n",
+ vha->host_no, sts->handle, sp);
return;
}
@@ -1374,6 +1512,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
fcport = sp->fcport;
+ ox_id = 0;
sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
if (IS_FWI2_CAPABLE(ha)) {
if (scsi_status & SS_SENSE_LEN_VALID)
@@ -1387,6 +1526,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
rsp_info = sts24->data;
sense_data = sts24->data;
host_to_fcp_swap(sts24->data, sizeof(sts24->data));
+ ox_id = le16_to_cpu(sts24->ox_id);
} else {
if (scsi_status & SS_SENSE_LEN_VALID)
sense_len = le16_to_cpu(sts->req_sense_length);
@@ -1403,17 +1543,13 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
if (IS_FWI2_CAPABLE(ha))
sense_data += rsp_info_len;
if (rsp_info_len > 3 && rsp_info[3]) {
- DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
- "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
- "retrying command\n", vha->host_no,
- cp->device->channel, cp->device->id,
- cp->device->lun, rsp_info_len, rsp_info[0],
- rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
- rsp_info[5], rsp_info[6], rsp_info[7]));
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld:%d:%d): FCP I/O protocol failure "
+ "(0x%x/0x%x).\n", vha->host_no, cp->device->id,
+ cp->device->lun, rsp_info_len, rsp_info[3]));
cp->result = DID_BUS_BUSY << 16;
- qla2x00_sp_compl(ha, sp);
- return;
+ goto out;
}
}
@@ -1440,12 +1576,10 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
((unsigned)(scsi_bufflen(cp) - resid) <
cp->underflow)) {
qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d:%d): Mid-layer underflow "
- "detected (%x of %x bytes)...returning "
- "error status.\n", vha->host_no,
- cp->device->channel, cp->device->id,
- cp->device->lun, resid,
- scsi_bufflen(cp));
+ "scsi(%ld:%d:%d): Mid-layer underflow "
+ "detected (0x%x of 0x%x bytes).\n",
+ vha->host_no, cp->device->id,
+ cp->device->lun, resid, scsi_bufflen(cp));
cp->result = DID_ERROR << 16;
break;
@@ -1454,12 +1588,12 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
cp->result = DID_OK << 16 | lscsi_status;
if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
- DEBUG2(printk(KERN_INFO
- "scsi(%ld): QUEUE FULL status detected "
- "0x%x-0x%x.\n", vha->host_no, comp_status,
- scsi_status));
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld:%d:%d) QUEUE FULL detected.\n",
+ vha->host_no, cp->device->id, cp->device->lun));
break;
}
+ logit = 0;
if (lscsi_status != SS_CHECK_CONDITION)
break;
@@ -1471,23 +1605,14 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
break;
case CS_DATA_UNDERRUN:
- DEBUG2(printk(KERN_INFO
- "scsi(%ld:%d:%d) UNDERRUN status detected 0x%x-0x%x. "
- "resid=0x%x fw_resid=0x%x cdb=0x%x os_underflow=0x%x\n",
- vha->host_no, cp->device->id, cp->device->lun, comp_status,
- scsi_status, resid_len, fw_resid_len, cp->cmnd[0],
- cp->underflow));
-
/* Use F/W calculated residual length. */
resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
scsi_set_resid(cp, resid);
if (scsi_status & SS_RESIDUAL_UNDER) {
if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
- DEBUG2(printk(
- "scsi(%ld:%d:%d:%d) Dropped frame(s) "
- "detected (%x of %x bytes)...residual "
- "length mismatch...retrying command.\n",
- vha->host_no, cp->device->channel,
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld:%d:%d) Dropped frame(s) detected "
+ "(0x%x of 0x%x bytes).\n", vha->host_no,
cp->device->id, cp->device->lun, resid,
scsi_bufflen(cp)));
@@ -1499,21 +1624,18 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
((unsigned)(scsi_bufflen(cp) - resid) <
cp->underflow)) {
qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d:%d): Mid-layer underflow "
- "detected (%x of %x bytes)...returning "
- "error status.\n", vha->host_no,
- cp->device->channel, cp->device->id,
+ "scsi(%ld:%d:%d): Mid-layer underflow "
+ "detected (0x%x of 0x%x bytes).\n",
+ vha->host_no, cp->device->id,
cp->device->lun, resid, scsi_bufflen(cp));
cp->result = DID_ERROR << 16;
break;
}
} else if (!lscsi_status) {
- DEBUG2(printk(
- "scsi(%ld:%d:%d:%d) Dropped frame(s) detected "
- "(%x of %x bytes)...firmware reported underrun..."
- "retrying command.\n", vha->host_no,
- cp->device->channel, cp->device->id,
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x "
+ "of 0x%x bytes).\n", vha->host_no, cp->device->id,
cp->device->lun, resid, scsi_bufflen(cp)));
cp->result = DID_ERROR << 16;
@@ -1521,6 +1643,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
}
cp->result = DID_OK << 16 | lscsi_status;
+ logit = 0;
/*
* Check to see if SCSI Status is non zero. If so report SCSI
@@ -1528,10 +1651,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
*/
if (lscsi_status != 0) {
if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
- DEBUG2(printk(KERN_INFO
- "scsi(%ld): QUEUE FULL status detected "
- "0x%x-0x%x.\n", vha->host_no, comp_status,
- scsi_status));
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld:%d:%d) QUEUE FULL detected.\n",
+ vha->host_no, cp->device->id,
+ cp->device->lun));
+ logit = 1;
break;
}
if (lscsi_status != SS_CHECK_CONDITION)
@@ -1545,109 +1669,60 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
}
break;
- case CS_DATA_OVERRUN:
- DEBUG2(printk(KERN_INFO
- "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
- vha->host_no, cp->device->id, cp->device->lun, comp_status,
- scsi_status));
- DEBUG2(printk(KERN_INFO
- "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
- cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
- cp->cmnd[4], cp->cmnd[5]));
- DEBUG2(printk(KERN_INFO
- "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
- "status!\n",
- cp->serial_number, scsi_bufflen(cp), resid_len));
-
- cp->result = DID_ERROR << 16;
- break;
-
case CS_PORT_LOGGED_OUT:
case CS_PORT_CONFIG_CHG:
case CS_PORT_BUSY:
case CS_INCOMPLETE:
case CS_PORT_UNAVAILABLE:
- /*
- * If the port is in Target Down state, return all IOs for this
- * Target with DID_NO_CONNECT ELSE Queue the IOs in the
- * retry_queue.
- */
- DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
- "pid=%ld, compl status=0x%x, port state=0x%x\n",
- vha->host_no, cp->device->id, cp->device->lun,
- cp->serial_number, comp_status,
- atomic_read(&fcport->state)));
-
+ case CS_TIMEOUT:
/*
* We are going to have the fc class block the rport
* while we try to recover so instruct the mid layer
* to requeue until the class decides how to handle this.
*/
cp->result = DID_TRANSPORT_DISRUPTED << 16;
+
+ if (comp_status == CS_TIMEOUT) {
+ if (IS_FWI2_CAPABLE(ha))
+ break;
+ else if ((le16_to_cpu(sts->status_flags) &
+ SF_LOGOUT_SENT) == 0)
+ break;
+ }
+
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld:%d:%d) Port down status: port-state=0x%x\n",
+ vha->host_no, cp->device->id, cp->device->lun,
+ atomic_read(&fcport->state)));
+
if (atomic_read(&fcport->state) == FCS_ONLINE)
qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
break;
case CS_RESET:
- DEBUG2(printk(KERN_INFO
- "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
- vha->host_no, comp_status, scsi_status));
-
- cp->result = DID_RESET << 16;
- break;
-
case CS_ABORTED:
- /*
- * hv2.19.12 - DID_ABORT does not retry the request if we
- * aborted this request then abort otherwise it must be a
- * reset.
- */
- DEBUG2(printk(KERN_INFO
- "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
- vha->host_no, comp_status, scsi_status));
-
cp->result = DID_RESET << 16;
break;
- case CS_TIMEOUT:
- /*
- * We are going to have the fc class block the rport
- * while we try to recover so instruct the mid layer
- * to requeue until the class decides how to handle this.
- */
- cp->result = DID_TRANSPORT_DISRUPTED << 16;
-
- if (IS_FWI2_CAPABLE(ha)) {
- DEBUG2(printk(KERN_INFO
- "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
- "0x%x-0x%x\n", vha->host_no, cp->device->channel,
- cp->device->id, cp->device->lun, comp_status,
- scsi_status));
- break;
- }
- DEBUG2(printk(KERN_INFO
- "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
- "sflags=%x.\n", vha->host_no, cp->device->channel,
- cp->device->id, cp->device->lun, comp_status, scsi_status,
- le16_to_cpu(sts->status_flags)));
-
- /* Check to see if logout occurred. */
- if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
- qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
+ case CS_DIF_ERROR:
+ qla2x00_handle_dif_error(sp, sts24);
break;
-
default:
- DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
- "0x%x-0x%x.\n", vha->host_no, comp_status, scsi_status));
- qla_printk(KERN_INFO, ha,
- "Unknown status detected 0x%x-0x%x.\n",
- comp_status, scsi_status);
-
cp->result = DID_ERROR << 16;
break;
}
- /* Place command on done queue. */
+out:
+ if (logit)
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld:%d:%d) FCP command status: 0x%x-0x%x (0x%x) "
+ "oxid=0x%x ser=0x%lx cdb=%02x%02x%02x len=0x%x "
+ "rsp_info=0x%x resid=0x%x fw_resid=0x%x\n", vha->host_no,
+ cp->device->id, cp->device->lun, comp_status, scsi_status,
+ cp->result, ox_id, cp->serial_number, cp->cmnd[0],
+ cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len,
+ resid_len, fw_resid_len));
+
if (rsp->status_srb == NULL)
qla2x00_sp_compl(ha, sp);
}
@@ -1806,6 +1881,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
struct rsp_que *rsp)
{
struct sts_entry_24xx *pkt;
+ struct qla_hw_data *ha = vha->hw;
if (!vha->flags.online)
return;
@@ -1846,6 +1922,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
qla24xx_logio_entry(vha, rsp->req,
(struct logio_entry_24xx *)pkt);
break;
+ case TSK_MGMT_IOCB_TYPE:
+ qla24xx_tm_iocb_entry(vha, rsp->req,
+ (struct tsk_mgmt_entry *)pkt);
+ break;
+ case MARKER_TYPE:
+ qla24xx_marker_iocb_entry(vha, rsp->req,
+ (struct mrk_entry_24xx *)pkt);
+ break;
case CT_IOCB_TYPE:
qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
clear_bit(MBX_INTERRUPT, &vha->hw->mbx_cmd_flags);
@@ -1866,7 +1950,11 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
}
/* Adjust ring index */
- WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
+ if (IS_QLA82XX(ha)) {
+ struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
+ WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
+ } else
+ WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
}
static void
@@ -2169,6 +2257,11 @@ static struct qla_init_msix_entry msix_entries[3] = {
{ "qla2xxx (multiq)", qla25xx_msix_rsp_q },
};
+static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
+ { "qla2xxx (default)", qla82xx_msix_default },
+ { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
+};
+
static void
qla24xx_disable_msix(struct qla_hw_data *ha)
{
@@ -2195,7 +2288,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
struct qla_msix_entry *qentry;
entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
- GFP_KERNEL);
+ GFP_KERNEL);
if (!entries)
return -ENOMEM;
@@ -2240,8 +2333,15 @@ msix_failed:
/* Enable MSI-X vectors for the base queue */
for (i = 0; i < 2; i++) {
qentry = &ha->msix_entries[i];
- ret = request_irq(qentry->vector, msix_entries[i].handler,
- 0, msix_entries[i].name, rsp);
+ if (IS_QLA82XX(ha)) {
+ ret = request_irq(qentry->vector,
+ qla82xx_msix_entries[i].handler,
+ 0, qla82xx_msix_entries[i].name, rsp);
+ } else {
+ ret = request_irq(qentry->vector,
+ msix_entries[i].handler,
+ 0, msix_entries[i].name, rsp);
+ }
if (ret) {
qla_printk(KERN_WARNING, ha,
"MSI-X: Unable to register handler -- %x/%d.\n",
@@ -2272,7 +2372,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
/* If possible, enable MSI-X. */
if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
- !IS_QLA8432(ha) && !IS_QLA8001(ha))
+ !IS_QLA8432(ha) && !IS_QLA8XXX_TYPE(ha))
goto skip_msi;
if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
@@ -2302,7 +2402,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
goto clear_risc_ints;
}
qla_printk(KERN_WARNING, ha,
- "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
+ "MSI-X: Falling back-to MSI mode -- %d.\n", ret);
skip_msix:
if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
@@ -2313,7 +2413,9 @@ skip_msix:
if (!ret) {
DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
ha->flags.msi_enabled = 1;
- }
+ } else
+ qla_printk(KERN_WARNING, ha,
+ "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
skip_msi:
ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
@@ -2331,7 +2433,7 @@ clear_risc_ints:
* FIXME: Noted that 8014s were being dropped during NK testing.
* Timing deltas during MSI-X/INTa transitions?
*/
- if (IS_QLA81XX(ha))
+ if (IS_QLA81XX(ha) || IS_QLA82XX(ha))
goto fail;
spin_lock_irq(&ha->hardware_lock);
if (IS_FWI2_CAPABLE(ha)) {
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 42eb7ffd594..f3650d0434c 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -49,6 +49,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (ha->pdev->error_state > pci_channel_io_frozen)
return QLA_FUNCTION_TIMEOUT;
+ if (vha->device_flags & DFLG_DEV_FAILED) {
+ DEBUG2_3_11(qla_printk(KERN_WARNING, ha,
+ "%s(%ld): Device in failed state, "
+ "timeout MBX Exiting.\n",
+ __func__, base_vha->host_no));
+ return QLA_FUNCTION_TIMEOUT;
+ }
+
reg = ha->iobase;
io_lock_on = base_vha->flags.init_done;
@@ -85,7 +93,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Load mailbox registers. */
- if (IS_FWI2_CAPABLE(ha))
+ if (IS_QLA82XX(ha))
+ optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
+ else if (IS_FWI2_CAPABLE(ha) && !IS_QLA82XX(ha))
optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
else
optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
@@ -133,7 +143,18 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
- if (IS_FWI2_CAPABLE(ha))
+ if (IS_QLA82XX(ha)) {
+ if (RD_REG_DWORD(&reg->isp82.hint) &
+ HINT_MBX_INT_PENDING) {
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
+ DEBUG2_3_11(printk(KERN_INFO
+ "%s(%ld): Pending Mailbox timeout. "
+ "Exiting.\n", __func__, base_vha->host_no));
+ return QLA_FUNCTION_TIMEOUT;
+ }
+ WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
+ } else if (IS_FWI2_CAPABLE(ha))
WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
else
WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
@@ -147,7 +168,18 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__,
base_vha->host_no, command));
- if (IS_FWI2_CAPABLE(ha))
+ if (IS_QLA82XX(ha)) {
+ if (RD_REG_DWORD(&reg->isp82.hint) &
+ HINT_MBX_INT_PENDING) {
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
+ DEBUG2_3_11(printk(KERN_INFO
+ "%s(%ld): Pending Mailbox timeout. "
+ "Exiting.\n", __func__, base_vha->host_no));
+ return QLA_FUNCTION_TIMEOUT;
+ }
+ WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
+ } else if (IS_FWI2_CAPABLE(ha))
WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
else
WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
@@ -264,7 +296,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
clear_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
- if (qla2x00_abort_isp(base_vha)) {
+ if (ha->isp_ops->abort_isp(base_vha)) {
/* Failed. retry later. */
set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
}
@@ -711,7 +743,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
* Context:
* Kernel context.
*/
-static int
+int
qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
dma_addr_t phys_addr, size_t size, uint32_t tov)
{
@@ -952,7 +984,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
mcp->mb[9] = vha->vp_idx;
mcp->out_mb = MBX_9|MBX_0;
mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
- if (IS_QLA81XX(vha->hw))
+ if (IS_QLA8XXX_TYPE(vha->hw))
mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
@@ -978,7 +1010,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n",
vha->host_no));
- if (IS_QLA81XX(vha->hw)) {
+ if (IS_QLA8XXX_TYPE(vha->hw)) {
vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
vha->fcoe_fcf_idx = mcp->mb[10];
vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
@@ -1076,6 +1108,10 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n",
vha->host_no));
+ if (IS_QLA82XX(ha) && ql2xdbwr)
+ qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
+ (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
+
if (ha->flags.npiv_supported)
mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
else
@@ -1408,7 +1444,7 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
- if (IS_QLA81XX(vha->hw)) {
+ if (IS_QLA8XXX_TYPE(vha->hw)) {
/* Logout across all FCFs. */
mcp->mb[0] = MBC_LIP_FULL_LOGIN;
mcp->mb[1] = BIT_1;
@@ -2428,12 +2464,22 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
int
qla24xx_abort_target(struct fc_port *fcport, unsigned int l, int tag)
{
+ struct qla_hw_data *ha = fcport->vha->hw;
+
+ if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
+ return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
+
return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
}
int
qla24xx_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
{
+ struct qla_hw_data *ha = fcport->vha->hw;
+
+ if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
+ return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
+
return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
}
@@ -2740,6 +2786,48 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint16_t addr,
}
int
+qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
+ uint16_t *port_speed, uint16_t *mb)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_IIDMA_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+
+ mcp->mb[0] = MBC_PORT_PARAMS;
+ mcp->mb[1] = loop_id;
+ mcp->mb[2] = mcp->mb[3] = 0;
+ mcp->mb[9] = vha->vp_idx;
+ mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_3|MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ /* Return mailbox statuses. */
+ if (mb != NULL) {
+ mb[0] = mcp->mb[0];
+ mb[1] = mcp->mb[1];
+ mb[3] = mcp->mb[3];
+ }
+
+ if (rval != QLA_SUCCESS) {
+ DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
+ vha->host_no, rval));
+ } else {
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ if (port_speed)
+ *port_speed = mcp->mb[3];
+ }
+
+ return rval;
+}
+
+int
qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
uint16_t port_speed, uint16_t *mb)
{
@@ -2755,7 +2843,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
mcp->mb[0] = MBC_PORT_PARAMS;
mcp->mb[1] = loop_id;
mcp->mb[2] = BIT_0;
- if (IS_QLA81XX(vha->hw))
+ if (IS_QLA8XXX_TYPE(vha->hw))
mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
else
mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
@@ -3544,7 +3632,7 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA81XX(vha->hw))
+ if (!IS_QLA8XXX_TYPE(vha->hw))
return QLA_FUNCTION_FAILED;
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
@@ -3582,7 +3670,7 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA81XX(vha->hw))
+ if (!IS_QLA8XXX_TYPE(vha->hw))
return QLA_FUNCTION_FAILED;
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
@@ -3643,7 +3731,8 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
}
int
-qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp)
+qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
+ uint16_t *mresp)
{
int rval;
mbx_cmd_t mc;
@@ -3678,7 +3767,7 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *
mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
- if (IS_QLA81XX(vha->hw))
+ if (IS_QLA8XXX_TYPE(vha->hw))
mcp->out_mb |= MBX_2;
mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
@@ -3690,9 +3779,11 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *
if (rval != QLA_SUCCESS) {
DEBUG2(printk(KERN_WARNING
- "(%ld): failed=%x mb[0]=0x%x "
- "mb[1]=0x%x mb[2]=0x%x mb[3]=0x%x mb[18]=0x%x mb[19]=0x%x. \n", vha->host_no, rval,
- mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[18], mcp->mb[19]));
+ "(%ld): failed=%x mb[0]=0x%x "
+ "mb[1]=0x%x mb[2]=0x%x mb[3]=0x%x mb[18]=0x%x "
+ "mb[19]=0x%x.\n",
+ vha->host_no, rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
+ mcp->mb[3], mcp->mb[18], mcp->mb[19]));
} else {
DEBUG2(printk(KERN_WARNING
"scsi(%ld): done.\n", vha->host_no));
@@ -3706,7 +3797,8 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *
}
int
-qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp)
+qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
+ uint16_t *mresp)
{
int rval;
mbx_cmd_t mc;
@@ -3718,9 +3810,10 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mres
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */
- if (IS_QLA81XX(ha))
+ if (IS_QLA8XXX_TYPE(ha)) {
mcp->mb[1] |= BIT_15;
- mcp->mb[2] = IS_QLA81XX(ha) ? vha->fcoe_fcf_idx : 0;
+ mcp->mb[2] = vha->fcoe_fcf_idx;
+ }
mcp->mb[16] = LSW(mreq->rcv_dma);
mcp->mb[17] = MSW(mreq->rcv_dma);
mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
@@ -3735,13 +3828,13 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mres
mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
- if (IS_QLA81XX(ha))
+ if (IS_QLA8XXX_TYPE(ha))
mcp->out_mb |= MBX_2;
mcp->in_mb = MBX_0;
- if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha))
+ if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
mcp->in_mb |= MBX_1;
- if (IS_QLA81XX(ha))
+ if (IS_QLA8XXX_TYPE(ha))
mcp->in_mb |= MBX_3;
mcp->tov = MBX_TOV_SECONDS;
@@ -3764,8 +3857,7 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mres
return rval;
}
int
-qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic,
- uint16_t *cmd_status)
+qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic)
{
int rval;
mbx_cmd_t mc;
@@ -3782,8 +3874,6 @@ qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic,
mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
rval = qla2x00_mailbox_command(ha, mcp);
- /* Return mailbox statuses. */
- *cmd_status = mcp->mb[0];
if (rval != QLA_SUCCESS)
DEBUG16(printk("%s(%ld): failed=%x.\n", __func__, ha->host_no,
rval));
@@ -3801,7 +3891,7 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
mbx_cmd_t *mcp = &mc;
if (!IS_FWI2_CAPABLE(vha->hw))
- return QLA_FUNCTION_FAILED;
+ return QLA_FUNCTION_FAILED;
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
@@ -3836,7 +3926,8 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, vha->host_no));
+ DEBUG11(qla_printk(KERN_INFO, ha,
+ "%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_DATA_RATE;
mcp->mb[1] = 0;
@@ -3857,3 +3948,122 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
return rval;
}
+
+int
+qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
+ uint16_t *mb)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ DEBUG11(printk(KERN_INFO
+ "%s(%ld): entered.\n", __func__, ha->host_no));
+
+ mcp->mb[0] = MBC_PORT_PARAMS;
+ mcp->mb[1] = loop_id;
+ if (ha->flags.fcp_prio_enabled)
+ mcp->mb[2] = BIT_1;
+ else
+ mcp->mb[2] = BIT_2;
+ mcp->mb[4] = priority & 0xf;
+ mcp->mb[9] = vha->vp_idx;
+ mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
+ mcp->tov = 30;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (mb != NULL) {
+ mb[0] = mcp->mb[0];
+ mb[1] = mcp->mb[1];
+ mb[3] = mcp->mb[3];
+ mb[4] = mcp->mb[4];
+ }
+
+ if (rval != QLA_SUCCESS) {
+ DEBUG2_3_11(printk(KERN_WARNING
+ "%s(%ld): failed=%x.\n", __func__,
+ vha->host_no, rval));
+ } else {
+ DEBUG11(printk(KERN_INFO
+ "%s(%ld): done.\n", __func__, vha->host_no));
+ }
+
+ return rval;
+}
+
+int
+qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_FWI2_CAPABLE(ha))
+ return QLA_FUNCTION_FAILED;
+
+ DEBUG11(qla_printk(KERN_INFO, ha,
+ "%s(%ld): entered.\n", __func__, vha->host_no));
+
+ memset(mcp, 0, sizeof(mbx_cmd_t));
+ mcp->mb[0] = MBC_TOGGLE_INTR;
+ mcp->mb[1] = 1;
+
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = 30;
+ mcp->flags = 0;
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2_3_11(qla_printk(KERN_WARNING, ha,
+ "%s(%ld): failed=%x mb[0]=%x.\n", __func__,
+ vha->host_no, rval, mcp->mb[0]));
+ } else {
+ DEBUG11(qla_printk(KERN_INFO, ha,
+ "%s(%ld): done.\n", __func__, vha->host_no));
+ }
+
+ return rval;
+}
+
+int
+qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA82XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ DEBUG11(qla_printk(KERN_INFO, ha,
+ "%s(%ld): entered.\n", __func__, vha->host_no));
+
+ memset(mcp, 0, sizeof(mbx_cmd_t));
+ mcp->mb[0] = MBC_TOGGLE_INTR;
+ mcp->mb[1] = 0;
+
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = 30;
+ mcp->flags = 0;
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2_3_11(qla_printk(KERN_WARNING, ha,
+ "%s(%ld): failed=%x mb[0]=%x.\n", __func__,
+ vha->host_no, rval, mcp->mb[0]));
+ } else {
+ DEBUG11(qla_printk(KERN_INFO, ha,
+ "%s(%ld): done.\n", __func__, vha->host_no));
+ }
+
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
new file mode 100644
index 00000000000..ff562de0e8e
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -0,0 +1,3636 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2008 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#define MASK(n) ((1ULL<<(n))-1)
+#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \
+ ((addr >> 25) & 0x3ff))
+#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | \
+ ((addr >> 25) & 0x3ff))
+#define MS_WIN(addr) (addr & 0x0ffc0000)
+#define QLA82XX_PCI_MN_2M (0)
+#define QLA82XX_PCI_MS_2M (0x80000)
+#define QLA82XX_PCI_OCM0_2M (0xc0000)
+#define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800)
+#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
+
+/* CRB window related */
+#define CRB_BLK(off) ((off >> 20) & 0x3f)
+#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
+#define CRB_WINDOW_2M (0x130060)
+#define QLA82XX_PCI_CAMQM_2M_END (0x04800800UL)
+#define CRB_HI(off) ((qla82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \
+ ((off) & 0xf0000))
+#define QLA82XX_PCI_CAMQM_2M_BASE (0x000ff800UL)
+#define CRB_INDIRECT_2M (0x1e0000UL)
+
+#define MAX_CRB_XFORM 60
+static unsigned long crb_addr_xform[MAX_CRB_XFORM];
+int qla82xx_crb_table_initialized;
+
+#define qla82xx_crb_addr_transform(name) \
+ (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
+ QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
+
+static void qla82xx_crb_addr_transform_setup(void)
+{
+ qla82xx_crb_addr_transform(XDMA);
+ qla82xx_crb_addr_transform(TIMR);
+ qla82xx_crb_addr_transform(SRE);
+ qla82xx_crb_addr_transform(SQN3);
+ qla82xx_crb_addr_transform(SQN2);
+ qla82xx_crb_addr_transform(SQN1);
+ qla82xx_crb_addr_transform(SQN0);
+ qla82xx_crb_addr_transform(SQS3);
+ qla82xx_crb_addr_transform(SQS2);
+ qla82xx_crb_addr_transform(SQS1);
+ qla82xx_crb_addr_transform(SQS0);
+ qla82xx_crb_addr_transform(RPMX7);
+ qla82xx_crb_addr_transform(RPMX6);
+ qla82xx_crb_addr_transform(RPMX5);
+ qla82xx_crb_addr_transform(RPMX4);
+ qla82xx_crb_addr_transform(RPMX3);
+ qla82xx_crb_addr_transform(RPMX2);
+ qla82xx_crb_addr_transform(RPMX1);
+ qla82xx_crb_addr_transform(RPMX0);
+ qla82xx_crb_addr_transform(ROMUSB);
+ qla82xx_crb_addr_transform(SN);
+ qla82xx_crb_addr_transform(QMN);
+ qla82xx_crb_addr_transform(QMS);
+ qla82xx_crb_addr_transform(PGNI);
+ qla82xx_crb_addr_transform(PGND);
+ qla82xx_crb_addr_transform(PGN3);
+ qla82xx_crb_addr_transform(PGN2);
+ qla82xx_crb_addr_transform(PGN1);
+ qla82xx_crb_addr_transform(PGN0);
+ qla82xx_crb_addr_transform(PGSI);
+ qla82xx_crb_addr_transform(PGSD);
+ qla82xx_crb_addr_transform(PGS3);
+ qla82xx_crb_addr_transform(PGS2);
+ qla82xx_crb_addr_transform(PGS1);
+ qla82xx_crb_addr_transform(PGS0);
+ qla82xx_crb_addr_transform(PS);
+ qla82xx_crb_addr_transform(PH);
+ qla82xx_crb_addr_transform(NIU);
+ qla82xx_crb_addr_transform(I2Q);
+ qla82xx_crb_addr_transform(EG);
+ qla82xx_crb_addr_transform(MN);
+ qla82xx_crb_addr_transform(MS);
+ qla82xx_crb_addr_transform(CAS2);
+ qla82xx_crb_addr_transform(CAS1);
+ qla82xx_crb_addr_transform(CAS0);
+ qla82xx_crb_addr_transform(CAM);
+ qla82xx_crb_addr_transform(C2C1);
+ qla82xx_crb_addr_transform(C2C0);
+ qla82xx_crb_addr_transform(SMB);
+ qla82xx_crb_addr_transform(OCM0);
+ /*
+ * Used only in P3 just define it for P2 also.
+ */
+ qla82xx_crb_addr_transform(I2C0);
+
+ qla82xx_crb_table_initialized = 1;
+}
+
+struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
+ {{{0, 0, 0, 0} } },
+ {{{1, 0x0100000, 0x0102000, 0x120000},
+ {1, 0x0110000, 0x0120000, 0x130000},
+ {1, 0x0120000, 0x0122000, 0x124000},
+ {1, 0x0130000, 0x0132000, 0x126000},
+ {1, 0x0140000, 0x0142000, 0x128000},
+ {1, 0x0150000, 0x0152000, 0x12a000},
+ {1, 0x0160000, 0x0170000, 0x110000},
+ {1, 0x0170000, 0x0172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x01e0000, 0x01e0800, 0x122000},
+ {0, 0x0000000, 0x0000000, 0x000000} } } ,
+ {{{1, 0x0200000, 0x0210000, 0x180000} } },
+ {{{0, 0, 0, 0} } },
+ {{{1, 0x0400000, 0x0401000, 0x169000} } },
+ {{{1, 0x0500000, 0x0510000, 0x140000} } },
+ {{{1, 0x0600000, 0x0610000, 0x1c0000} } },
+ {{{1, 0x0700000, 0x0704000, 0x1b8000} } },
+ {{{1, 0x0800000, 0x0802000, 0x170000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x08f0000, 0x08f2000, 0x172000} } },
+ {{{1, 0x0900000, 0x0902000, 0x174000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x09f0000, 0x09f2000, 0x176000} } },
+ {{{0, 0x0a00000, 0x0a02000, 0x178000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0af0000, 0x0af2000, 0x17a000} } },
+ {{{0, 0x0b00000, 0x0b02000, 0x17c000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
+ {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },
+ {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },
+ {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },
+ {{{1, 0x0f00000, 0x0f01000, 0x164000} } },
+ {{{0, 0x1000000, 0x1004000, 0x1a8000} } },
+ {{{1, 0x1100000, 0x1101000, 0x160000} } },
+ {{{1, 0x1200000, 0x1201000, 0x161000} } },
+ {{{1, 0x1300000, 0x1301000, 0x162000} } },
+ {{{1, 0x1400000, 0x1401000, 0x163000} } },
+ {{{1, 0x1500000, 0x1501000, 0x165000} } },
+ {{{1, 0x1600000, 0x1601000, 0x166000} } },
+ {{{0, 0, 0, 0} } },
+ {{{0, 0, 0, 0} } },
+ {{{0, 0, 0, 0} } },
+ {{{0, 0, 0, 0} } },
+ {{{0, 0, 0, 0} } },
+ {{{0, 0, 0, 0} } },
+ {{{1, 0x1d00000, 0x1d10000, 0x190000} } },
+ {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },
+ {{{1, 0x1f00000, 0x1f10000, 0x150000} } },
+ {{{0} } },
+ {{{1, 0x2100000, 0x2102000, 0x120000},
+ {1, 0x2110000, 0x2120000, 0x130000},
+ {1, 0x2120000, 0x2122000, 0x124000},
+ {1, 0x2130000, 0x2132000, 0x126000},
+ {1, 0x2140000, 0x2142000, 0x128000},
+ {1, 0x2150000, 0x2152000, 0x12a000},
+ {1, 0x2160000, 0x2170000, 0x110000},
+ {1, 0x2170000, 0x2172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000} } },
+ {{{1, 0x2200000, 0x2204000, 0x1b0000} } },
+ {{{0} } },
+ {{{0} } },
+ {{{0} } },
+ {{{0} } },
+ {{{0} } },
+ {{{1, 0x2800000, 0x2804000, 0x1a4000} } },
+ {{{1, 0x2900000, 0x2901000, 0x16b000} } },
+ {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },
+ {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },
+ {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },
+ {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },
+ {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },
+ {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },
+ {{{1, 0x3000000, 0x3000400, 0x1adc00} } },
+ {{{0, 0x3100000, 0x3104000, 0x1a8000} } },
+ {{{1, 0x3200000, 0x3204000, 0x1d4000} } },
+ {{{1, 0x3300000, 0x3304000, 0x1a0000} } },
+ {{{0} } },
+ {{{1, 0x3500000, 0x3500400, 0x1ac000} } },
+ {{{1, 0x3600000, 0x3600400, 0x1ae000} } },
+ {{{1, 0x3700000, 0x3700400, 0x1ae400} } },
+ {{{1, 0x3800000, 0x3804000, 0x1d0000} } },
+ {{{1, 0x3900000, 0x3904000, 0x1b4000} } },
+ {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },
+ {{{0} } },
+ {{{0} } },
+ {{{1, 0x3d00000, 0x3d04000, 0x1dc000} } },
+ {{{1, 0x3e00000, 0x3e01000, 0x167000} } },
+ {{{1, 0x3f00000, 0x3f01000, 0x168000} } }
+};
+
+/*
+ * top 12 bits of crb internal address (hub, agent)
+ */
+unsigned qla82xx_crb_hub_agt[64] = {
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_MN,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_MS,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SRE,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_NIU,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_QMN,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGND,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SN,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_EG,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_CAM,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_SMB,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1,
+ 0,
+ QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC,
+ 0,
+};
+
+/* Device states */
+char *qdev_state[] = {
+ "Unknown",
+ "Cold",
+ "Initializing",
+ "Ready",
+ "Need Reset",
+ "Need Quiescent",
+ "Failed",
+ "Quiescent",
+};
+
+/*
+ * In: 'off' is offset from CRB space in 128M pci map
+ * Out: 'off' is 2M pci map addr
+ * side effect: lock crb window
+ */
+static void
+qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
+{
+ u32 win_read;
+
+ ha->crb_win = CRB_HI(*off);
+ writel(ha->crb_win,
+ (void *)(CRB_WINDOW_2M + ha->nx_pcibase));
+
+ /* Read back value to make sure write has gone through before trying
+ * to use it.
+ */
+ win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
+ if (win_read != ha->crb_win) {
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "%s: Written crbwin (0x%x) != Read crbwin (0x%x), "
+ "off=0x%lx\n", __func__, ha->crb_win, win_read, *off));
+ }
+ *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
+}
+
+static inline unsigned long
+qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
+{
+ /* See if we are currently pointing to the region we want to use next */
+ if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) {
+ /* No need to change window. PCIX and PCIEregs are in both
+ * regs are in both windows.
+ */
+ return off;
+ }
+
+ if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_PCIX_HOST2)) {
+ /* We are in first CRB window */
+ if (ha->curr_window != 0)
+ WARN_ON(1);
+ return off;
+ }
+
+ if ((off > QLA82XX_CRB_PCIX_HOST2) && (off < QLA82XX_CRB_MAX)) {
+ /* We are in second CRB window */
+ off = off - QLA82XX_CRB_PCIX_HOST2 + QLA82XX_CRB_PCIX_HOST;
+
+ if (ha->curr_window != 1)
+ return off;
+
+ /* We are in the QM or direct access
+ * register region - do nothing
+ */
+ if ((off >= QLA82XX_PCI_DIRECT_CRB) &&
+ (off < QLA82XX_PCI_CAMQM_MAX))
+ return off;
+ }
+ /* strange address given */
+ qla_printk(KERN_WARNING, ha,
+ "%s: Warning: unm_nic_pci_set_crbwindow called with"
+ " an unknown address(%llx)\n", QLA2XXX_DRIVER_NAME, off);
+ return off;
+}
+
+int
+qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data)
+{
+ unsigned long flags = 0;
+ int rv;
+
+ rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
+
+ BUG_ON(rv == -1);
+
+ if (rv == 1) {
+ write_lock_irqsave(&ha->hw_lock, flags);
+ qla82xx_crb_win_lock(ha);
+ qla82xx_pci_set_crbwindow_2M(ha, &off);
+ }
+
+ writel(data, (void __iomem *)off);
+
+ if (rv == 1) {
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ }
+ return 0;
+}
+
+int
+qla82xx_rd_32(struct qla_hw_data *ha, ulong off)
+{
+ unsigned long flags = 0;
+ int rv;
+ u32 data;
+
+ rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
+
+ BUG_ON(rv == -1);
+
+ if (rv == 1) {
+ write_lock_irqsave(&ha->hw_lock, flags);
+ qla82xx_crb_win_lock(ha);
+ qla82xx_pci_set_crbwindow_2M(ha, &off);
+ }
+ data = RD_REG_DWORD((void __iomem *)off);
+
+ if (rv == 1) {
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ }
+ return data;
+}
+
+#define CRB_WIN_LOCK_TIMEOUT 100000000
+int qla82xx_crb_win_lock(struct qla_hw_data *ha)
+{
+ int done = 0, timeout = 0;
+
+ while (!done) {
+ /* acquire semaphore3 from PCI HW block */
+ done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
+ if (done == 1)
+ break;
+ if (timeout >= CRB_WIN_LOCK_TIMEOUT)
+ return -1;
+ timeout++;
+ }
+ qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum);
+ return 0;
+}
+
+#define IDC_LOCK_TIMEOUT 100000000
+int qla82xx_idc_lock(struct qla_hw_data *ha)
+{
+ int i;
+ int done = 0, timeout = 0;
+
+ while (!done) {
+ /* acquire semaphore5 from PCI HW block */
+ done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
+ if (done == 1)
+ break;
+ if (timeout >= IDC_LOCK_TIMEOUT)
+ return -1;
+
+ timeout++;
+
+ /* Yield CPU */
+ if (!in_interrupt())
+ schedule();
+ else {
+ for (i = 0; i < 20; i++)
+ cpu_relax();
+ }
+ }
+
+ return 0;
+}
+
+void qla82xx_idc_unlock(struct qla_hw_data *ha)
+{
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
+}
+
+int
+qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off)
+{
+ struct crb_128M_2M_sub_block_map *m;
+
+ if (*off >= QLA82XX_CRB_MAX)
+ return -1;
+
+ if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) {
+ *off = (*off - QLA82XX_PCI_CAMQM) +
+ QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
+ return 0;
+ }
+
+ if (*off < QLA82XX_PCI_CRBSPACE)
+ return -1;
+
+ *off -= QLA82XX_PCI_CRBSPACE;
+
+ /* Try direct map */
+ m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
+
+ if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
+ *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase;
+ return 0;
+ }
+ /* Not in direct map, use crb window */
+ return 1;
+}
+
+/* PCI Windowing for DDR regions. */
+#define QLA82XX_ADDR_IN_RANGE(addr, low, high) \
+ (((addr) <= (high)) && ((addr) >= (low)))
+/*
+ * check memory access boundary.
+ * used by test agent. support ddr access only for now
+ */
+static unsigned long
+qla82xx_pci_mem_bound_check(struct qla_hw_data *ha,
+ unsigned long long addr, int size)
+{
+ if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
+ QLA82XX_ADDR_DDR_NET_MAX) ||
+ !QLA82XX_ADDR_IN_RANGE(addr + size - 1, QLA82XX_ADDR_DDR_NET,
+ QLA82XX_ADDR_DDR_NET_MAX) ||
+ ((size != 1) && (size != 2) && (size != 4) && (size != 8)))
+ return 0;
+ else
+ return 1;
+}
+
+int qla82xx_pci_set_window_warning_count;
+
+unsigned long
+qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
+{
+ int window;
+ u32 win_read;
+
+ if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
+ QLA82XX_ADDR_DDR_NET_MAX)) {
+ /* DDR network side */
+ window = MN_WIN(addr);
+ ha->ddr_mn_window = window;
+ qla82xx_wr_32(ha,
+ ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
+ win_read = qla82xx_rd_32(ha,
+ ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
+ if ((win_read << 17) != window) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: Written MNwin (0x%x) != Read MNwin (0x%x)\n",
+ __func__, window, win_read);
+ }
+ addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
+ } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
+ QLA82XX_ADDR_OCM0_MAX)) {
+ unsigned int temp1;
+ if ((addr & 0x00ff800) == 0xff800) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: QM access not handled.\n", __func__);
+ addr = -1UL;
+ }
+ window = OCM_WIN(addr);
+ ha->ddr_mn_window = window;
+ qla82xx_wr_32(ha,
+ ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
+ win_read = qla82xx_rd_32(ha,
+ ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
+ temp1 = ((window & 0x1FF) << 7) |
+ ((window & 0x0FFFE0000) >> 17);
+ if (win_read != temp1) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x)\n",
+ __func__, temp1, win_read);
+ }
+ addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
+
+ } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET,
+ QLA82XX_P3_ADDR_QDR_NET_MAX)) {
+ /* QDR network side */
+ window = MS_WIN(addr);
+ ha->qdr_sn_window = window;
+ qla82xx_wr_32(ha,
+ ha->ms_win_crb | QLA82XX_PCI_CRBSPACE, window);
+ win_read = qla82xx_rd_32(ha,
+ ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
+ if (win_read != window) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: Written MSwin (0x%x) != Read MSwin (0x%x)\n",
+ __func__, window, win_read);
+ }
+ addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET;
+ } else {
+ /*
+ * peg gdb frequently accesses memory that doesn't exist,
+ * this limits the chit chat so debugging isn't slowed down.
+ */
+ if ((qla82xx_pci_set_window_warning_count++ < 8) ||
+ (qla82xx_pci_set_window_warning_count%64 == 0)) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: Warning:%s Unknown address range!\n", __func__,
+ QLA2XXX_DRIVER_NAME);
+ }
+ addr = -1UL;
+ }
+ return addr;
+}
+
+/* check if address is in the same windows as the previous access */
+static int qla82xx_pci_is_same_window(struct qla_hw_data *ha,
+ unsigned long long addr)
+{
+ int window;
+ unsigned long long qdr_max;
+
+ qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
+
+ /* DDR network side */
+ if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
+ QLA82XX_ADDR_DDR_NET_MAX))
+ BUG();
+ else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
+ QLA82XX_ADDR_OCM0_MAX))
+ return 1;
+ else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1,
+ QLA82XX_ADDR_OCM1_MAX))
+ return 1;
+ else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) {
+ /* QDR network side */
+ window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f;
+ if (ha->qdr_sn_window == window)
+ return 1;
+ }
+ return 0;
+}
+
+static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
+ u64 off, void *data, int size)
+{
+ unsigned long flags;
+ void *addr = NULL;
+ int ret = 0;
+ u64 start;
+ uint8_t *mem_ptr = NULL;
+ unsigned long mem_base;
+ unsigned long mem_page;
+
+ write_lock_irqsave(&ha->hw_lock, flags);
+
+ /*
+ * If attempting to access unknown address or straddle hw windows,
+ * do not access.
+ */
+ start = qla82xx_pci_set_window(ha, off);
+ if ((start == -1UL) ||
+ (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ qla_printk(KERN_ERR, ha,
+ "%s out of bound pci memory access. "
+ "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off);
+ return -1;
+ }
+
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ mem_base = pci_resource_start(ha->pdev, 0);
+ mem_page = start & PAGE_MASK;
+ /* Map two pages whenever user tries to access addresses in two
+ * consecutive pages.
+ */
+ if (mem_page != ((start + size - 1) & PAGE_MASK))
+ mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
+ else
+ mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
+ if (mem_ptr == 0UL) {
+ *(u8 *)data = 0;
+ return -1;
+ }
+ addr = mem_ptr;
+ addr += start & (PAGE_SIZE - 1);
+ write_lock_irqsave(&ha->hw_lock, flags);
+
+ switch (size) {
+ case 1:
+ *(u8 *)data = readb(addr);
+ break;
+ case 2:
+ *(u16 *)data = readw(addr);
+ break;
+ case 4:
+ *(u32 *)data = readl(addr);
+ break;
+ case 8:
+ *(u64 *)data = readq(addr);
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+
+ if (mem_ptr)
+ iounmap(mem_ptr);
+ return ret;
+}
+
+static int
+qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
+ u64 off, void *data, int size)
+{
+ unsigned long flags;
+ void *addr = NULL;
+ int ret = 0;
+ u64 start;
+ uint8_t *mem_ptr = NULL;
+ unsigned long mem_base;
+ unsigned long mem_page;
+
+ write_lock_irqsave(&ha->hw_lock, flags);
+
+ /*
+ * If attempting to access unknown address or straddle hw windows,
+ * do not access.
+ */
+ start = qla82xx_pci_set_window(ha, off);
+ if ((start == -1UL) ||
+ (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ qla_printk(KERN_ERR, ha,
+ "%s out of bound pci memory access. "
+ "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off);
+ return -1;
+ }
+
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ mem_base = pci_resource_start(ha->pdev, 0);
+ mem_page = start & PAGE_MASK;
+ /* Map two pages whenever user tries to access addresses in two
+ * consecutive pages.
+ */
+ if (mem_page != ((start + size - 1) & PAGE_MASK))
+ mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
+ else
+ mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
+ if (mem_ptr == 0UL)
+ return -1;
+
+ addr = mem_ptr;
+ addr += start & (PAGE_SIZE - 1);
+ write_lock_irqsave(&ha->hw_lock, flags);
+
+ switch (size) {
+ case 1:
+ writeb(*(u8 *)data, addr);
+ break;
+ case 2:
+ writew(*(u16 *)data, addr);
+ break;
+ case 4:
+ writel(*(u32 *)data, addr);
+ break;
+ case 8:
+ writeq(*(u64 *)data, addr);
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ if (mem_ptr)
+ iounmap(mem_ptr);
+ return ret;
+}
+
+int
+qla82xx_wrmem(struct qla_hw_data *ha, u64 off, void *data, int size)
+{
+ int i, j, ret = 0, loop, sz[2], off0;
+ u32 temp;
+ u64 off8, mem_crb, tmpw, word[2] = {0, 0};
+#define MAX_CTL_CHECK 1000
+ /*
+ * If not MN, go check for MS or invalid.
+ */
+ if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) {
+ mem_crb = QLA82XX_CRB_QDR_NET;
+ } else {
+ mem_crb = QLA82XX_CRB_DDR_NET;
+ if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
+ return qla82xx_pci_mem_write_direct(ha, off,
+ data, size);
+ }
+
+ off8 = off & 0xfffffff8;
+ off0 = off & 0x7;
+ sz[0] = (size < (8 - off0)) ? size : (8 - off0);
+ sz[1] = size - sz[0];
+ loop = ((off0 + size - 1) >> 3) + 1;
+
+ if ((size != 8) || (off0 != 0)) {
+ for (i = 0; i < loop; i++) {
+ if (qla82xx_rdmem(ha, off8 + (i << 3), &word[i], 8))
+ return -1;
+ }
+ }
+
+ switch (size) {
+ case 1:
+ tmpw = *((u8 *)data);
+ break;
+ case 2:
+ tmpw = *((u16 *)data);
+ break;
+ case 4:
+ tmpw = *((u32 *)data);
+ break;
+ case 8:
+ default:
+ tmpw = *((u64 *)data);
+ break;
+ }
+
+ word[0] &= ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
+ word[0] |= tmpw << (off0 * 8);
+
+ if (loop == 2) {
+ word[1] &= ~(~0ULL << (sz[1] * 8));
+ word[1] |= tmpw >> (sz[0] * 8);
+ }
+
+ for (i = 0; i < loop; i++) {
+ temp = off8 + (i << 3);
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
+ temp = 0;
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
+ temp = word[i] & 0xffffffff;
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
+ temp = (word[i] >> 32) & 0xffffffff;
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
+ temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
+ temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
+ if ((temp & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: Fail to write through agent\n",
+ QLA2XXX_DRIVER_NAME);
+ ret = -1;
+ break;
+ }
+ }
+ return ret;
+}
+
+int
+qla82xx_rdmem(struct qla_hw_data *ha, u64 off, void *data, int size)
+{
+ int i, j = 0, k, start, end, loop, sz[2], off0[2];
+ u32 temp;
+ u64 off8, val, mem_crb, word[2] = {0, 0};
+#define MAX_CTL_CHECK 1000
+
+ /*
+ * If not MN, go check for MS or invalid.
+ */
+ if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
+ mem_crb = QLA82XX_CRB_QDR_NET;
+ else {
+ mem_crb = QLA82XX_CRB_DDR_NET;
+ if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
+ return qla82xx_pci_mem_read_direct(ha, off,
+ data, size);
+ }
+
+ off8 = off & 0xfffffff8;
+ off0[0] = off & 0x7;
+ off0[1] = 0;
+ sz[0] = (size < (8 - off0[0])) ? size : (8 - off0[0]);
+ sz[1] = size - sz[0];
+ loop = ((off0[0] + size - 1) >> 3) + 1;
+
+ for (i = 0; i < loop; i++) {
+ temp = off8 + (i << 3);
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
+ temp = 0;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
+ temp = MIU_TA_CTL_ENABLE;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+ temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
+ if ((temp & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ qla_printk(KERN_INFO, ha,
+ "%s: Fail to read through agent\n",
+ QLA2XXX_DRIVER_NAME);
+ break;
+ }
+
+ start = off0[i] >> 2;
+ end = (off0[i] + sz[i] - 1) >> 2;
+ for (k = start; k <= end; k++) {
+ temp = qla82xx_rd_32(ha,
+ mem_crb + MIU_TEST_AGT_RDDATA(k));
+ word[i] |= ((u64)temp << (32 * k));
+ }
+ }
+
+ if (j >= MAX_CTL_CHECK)
+ return -1;
+
+ if (sz[0] == 8) {
+ val = word[0];
+ } else {
+ val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
+ ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
+ }
+
+ switch (size) {
+ case 1:
+ *(u8 *)data = val;
+ break;
+ case 2:
+ *(u16 *)data = val;
+ break;
+ case 4:
+ *(u32 *)data = val;
+ break;
+ case 8:
+ *(u64 *)data = val;
+ break;
+ }
+ return 0;
+}
+
+#define MTU_FUDGE_FACTOR 100
+unsigned long qla82xx_decode_crb_addr(unsigned long addr)
+{
+ int i;
+ unsigned long base_addr, offset, pci_base;
+
+ if (!qla82xx_crb_table_initialized)
+ qla82xx_crb_addr_transform_setup();
+
+ pci_base = ADDR_ERROR;
+ base_addr = addr & 0xfff00000;
+ offset = addr & 0x000fffff;
+
+ for (i = 0; i < MAX_CRB_XFORM; i++) {
+ if (crb_addr_xform[i] == base_addr) {
+ pci_base = i << 20;
+ break;
+ }
+ }
+ if (pci_base == ADDR_ERROR)
+ return pci_base;
+ return pci_base + offset;
+}
+
+static long rom_max_timeout = 100;
+static long qla82xx_rom_lock_timeout = 100;
+
+int
+qla82xx_rom_lock(struct qla_hw_data *ha)
+{
+ int done = 0, timeout = 0;
+
+ while (!done) {
+ /* acquire semaphore2 from PCI HW block */
+ done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
+ if (done == 1)
+ break;
+ if (timeout >= qla82xx_rom_lock_timeout)
+ return -1;
+ timeout++;
+ }
+ qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
+ return 0;
+}
+
+int
+qla82xx_wait_rom_busy(struct qla_hw_data *ha)
+{
+ long timeout = 0;
+ long done = 0 ;
+
+ while (done == 0) {
+ done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
+ done &= 4;
+ timeout++;
+ if (timeout >= rom_max_timeout) {
+ DEBUG(qla_printk(KERN_INFO, ha,
+ "%s: Timeout reached waiting for rom busy",
+ QLA2XXX_DRIVER_NAME));
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int
+qla82xx_wait_rom_done(struct qla_hw_data *ha)
+{
+ long timeout = 0;
+ long done = 0 ;
+
+ while (done == 0) {
+ done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
+ done &= 2;
+ timeout++;
+ if (timeout >= rom_max_timeout) {
+ DEBUG(qla_printk(KERN_INFO, ha,
+ "%s: Timeout reached waiting for rom done",
+ QLA2XXX_DRIVER_NAME));
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int
+qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
+{
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
+ qla82xx_wait_rom_busy(ha);
+ if (qla82xx_wait_rom_done(ha)) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: Error waiting for rom done\n",
+ QLA2XXX_DRIVER_NAME);
+ return -1;
+ }
+ /* Reset abyte_cnt and dummy_byte_cnt */
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+ udelay(10);
+ cond_resched();
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
+ *valp = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
+ return 0;
+}
+
+int
+qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
+{
+ int ret, loops = 0;
+
+ while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
+ udelay(100);
+ schedule();
+ loops++;
+ }
+ if (loops >= 50000) {
+ qla_printk(KERN_INFO, ha,
+ "%s: qla82xx_rom_lock failed\n",
+ QLA2XXX_DRIVER_NAME);
+ return -1;
+ }
+ ret = qla82xx_do_rom_fast_read(ha, addr, valp);
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
+ return ret;
+}
+
+int
+qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
+{
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR);
+ qla82xx_wait_rom_busy(ha);
+ if (qla82xx_wait_rom_done(ha)) {
+ qla_printk(KERN_WARNING, ha,
+ "Error waiting for rom done\n");
+ return -1;
+ }
+ *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
+ return 0;
+}
+
+int
+qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
+{
+ long timeout = 0;
+ uint32_t done = 1 ;
+ uint32_t val;
+ int ret = 0;
+
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
+ while ((done != 0) && (ret == 0)) {
+ ret = qla82xx_read_status_reg(ha, &val);
+ done = val & 1;
+ timeout++;
+ udelay(10);
+ cond_resched();
+ if (timeout >= 50000) {
+ qla_printk(KERN_WARNING, ha,
+ "Timeout reached waiting for write finish");
+ return -1;
+ }
+ }
+ return ret;
+}
+
+int
+qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
+{
+ uint32_t val;
+ qla82xx_wait_rom_busy(ha);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WREN);
+ qla82xx_wait_rom_busy(ha);
+ if (qla82xx_wait_rom_done(ha))
+ return -1;
+ if (qla82xx_read_status_reg(ha, &val) != 0)
+ return -1;
+ if ((val & 2) != 2)
+ return -1;
+ return 0;
+}
+
+int
+qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
+{
+ if (qla82xx_flash_set_write_enable(ha))
+ return -1;
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1);
+ if (qla82xx_wait_rom_done(ha)) {
+ qla_printk(KERN_WARNING, ha,
+ "Error waiting for rom done\n");
+ return -1;
+ }
+ return qla82xx_flash_wait_write_finish(ha);
+}
+
+int
+qla82xx_write_disable_flash(struct qla_hw_data *ha)
+{
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI);
+ if (qla82xx_wait_rom_done(ha)) {
+ qla_printk(KERN_WARNING, ha,
+ "Error waiting for rom done\n");
+ return -1;
+ }
+ return 0;
+}
+
+int
+ql82xx_rom_lock_d(struct qla_hw_data *ha)
+{
+ int loops = 0;
+ while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
+ udelay(100);
+ cond_resched();
+ loops++;
+ }
+ if (loops >= 50000) {
+ qla_printk(KERN_WARNING, ha, "ROM lock failed\n");
+ return -1;
+ }
+ return 0;;
+}
+
+int
+qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
+ uint32_t data)
+{
+ int ret = 0;
+
+ ret = ql82xx_rom_lock_d(ha);
+ if (ret < 0) {
+ qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
+ return ret;
+ }
+
+ if (qla82xx_flash_set_write_enable(ha))
+ goto done_write;
+
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, flashaddr);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP);
+ qla82xx_wait_rom_busy(ha);
+ if (qla82xx_wait_rom_done(ha)) {
+ qla_printk(KERN_WARNING, ha,
+ "Error waiting for rom done\n");
+ ret = -1;
+ goto done_write;
+ }
+
+ ret = qla82xx_flash_wait_write_finish(ha);
+
+done_write:
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
+ return ret;
+}
+
+/* This routine does CRB initialize sequence
+ * to put the ISP into operational state
+ */
+int qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
+{
+ int addr, val;
+ int i ;
+ struct crb_addr_pair *buf;
+ unsigned long off;
+ unsigned offset, n;
+ struct qla_hw_data *ha = vha->hw;
+
+ struct crb_addr_pair {
+ long addr;
+ long data;
+ };
+
+ /* Halt all the indiviual PEGs and other blocks of the ISP */
+ qla82xx_rom_lock(ha);
+ if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
+ /* don't reset CAM block on reset */
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
+ else
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
+
+ /* Read the signature value from the flash.
+ * Offset 0: Contain signature (0xcafecafe)
+ * Offset 4: Offset and number of addr/value pairs
+ * that present in CRB initialize sequence
+ */
+ if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
+ qla82xx_rom_fast_read(ha, 4, &n) != 0) {
+ qla_printk(KERN_WARNING, ha,
+ "[ERROR] Reading crb_init area: n: %08x\n", n);
+ return -1;
+ }
+
+ /* Offset in flash = lower 16 bits
+ * Number of enteries = upper 16 bits
+ */
+ offset = n & 0xffffU;
+ n = (n >> 16) & 0xffffU;
+
+ /* number of addr/value pair should not exceed 1024 enteries */
+ if (n >= 1024) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: %s:n=0x%x [ERROR] Card flash not initialized.\n",
+ QLA2XXX_DRIVER_NAME, __func__, n);
+ return -1;
+ }
+
+ qla_printk(KERN_INFO, ha,
+ "%s: %d CRB init values found in ROM.\n", QLA2XXX_DRIVER_NAME, n);
+
+ buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL);
+ if (buf == NULL) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: [ERROR] Unable to malloc memory.\n",
+ QLA2XXX_DRIVER_NAME);
+ return -1;
+ }
+
+ for (i = 0; i < n; i++) {
+ if (qla82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 ||
+ qla82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != 0) {
+ kfree(buf);
+ return -1;
+ }
+
+ buf[i].addr = addr;
+ buf[i].data = val;
+ }
+
+ for (i = 0; i < n; i++) {
+ /* Translate internal CRB initialization
+ * address to PCI bus address
+ */
+ off = qla82xx_decode_crb_addr((unsigned long)buf[i].addr) +
+ QLA82XX_PCI_CRBSPACE;
+ /* Not all CRB addr/value pair to be written,
+ * some of them are skipped
+ */
+
+ /* skipping cold reboot MAGIC */
+ if (off == QLA82XX_CAM_RAM(0x1fc))
+ continue;
+
+ /* do not reset PCI */
+ if (off == (ROMUSB_GLB + 0xbc))
+ continue;
+
+ /* skip core clock, so that firmware can increase the clock */
+ if (off == (ROMUSB_GLB + 0xc8))
+ continue;
+
+ /* skip the function enable register */
+ if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION))
+ continue;
+
+ if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2))
+ continue;
+
+ if ((off & 0x0ff00000) == QLA82XX_CRB_SMB)
+ continue;
+
+ if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET)
+ continue;
+
+ if (off == ADDR_ERROR) {
+ qla_printk(KERN_WARNING, ha,
+ "%s: [ERROR] Unknown addr: 0x%08lx\n",
+ QLA2XXX_DRIVER_NAME, buf[i].addr);
+ continue;
+ }
+
+ if (off == (QLA82XX_CRB_PEG_NET_1 + 0x18)) {
+ if (!QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision))
+ buf[i].data = 0x1020;
+ }
+
+ qla82xx_wr_32(ha, off, buf[i].data);
+
+ /* ISP requires much bigger delay to settle down,
+ * else crb_window returns 0xffffffff
+ */
+ if (off == QLA82XX_ROMUSB_GLB_SW_RESET)
+ msleep(1000);
+
+ /* ISP requires millisec delay between
+ * successive CRB register updation
+ */
+ msleep(1);
+ }
+
+ kfree(buf);
+
+ /* Resetting the data and instruction cache */
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8);
+
+ /* Clear all protocol processing engines */
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0);
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0);
+ return 0;
+}
+
+int qla82xx_check_for_bad_spd(struct qla_hw_data *ha)
+{
+ u32 val = 0;
+ val = qla82xx_rd_32(ha, BOOT_LOADER_DIMM_STATUS);
+ val &= QLA82XX_BOOT_LOADER_MN_ISSUE;
+ if (val & QLA82XX_PEG_TUNE_MN_SPD_ZEROED) {
+ qla_printk(KERN_INFO, ha,
+ "Memory DIMM SPD not programmed. "
+ " Assumed valid.\n");
+ return 1;
+ } else if (val) {
+ qla_printk(KERN_INFO, ha,
+ "Memory DIMM type incorrect.Info:%08X.\n", val);
+ return 2;
+ }
+ return 0;
+}
+
+int
+qla82xx_fw_load_from_flash(struct qla_hw_data *ha)
+{
+ int i;
+ long size = 0;
+ long flashaddr = BOOTLD_START, memaddr = BOOTLD_START;
+ u64 data;
+ u32 high, low;
+ size = (IMAGE_START - BOOTLD_START) / 8;
+
+ for (i = 0; i < size; i++) {
+ if ((qla82xx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
+ (qla82xx_rom_fast_read(ha, flashaddr + 4, (int *)&high))) {
+ return -1;
+ }
+ data = ((u64)high << 32) | low ;
+ qla82xx_pci_mem_write_2M(ha, memaddr, &data, 8);
+ flashaddr += 8;
+ memaddr += 8;
+
+ if (i % 0x1000 == 0)
+ msleep(1);
+ }
+ udelay(100);
+ read_lock(&ha->hw_lock);
+ if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) {
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
+ } else {
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001d);
+ }
+ read_unlock(&ha->hw_lock);
+ return 0;
+}
+
+int
+qla82xx_pci_mem_read_2M(struct qla_hw_data *ha,
+ u64 off, void *data, int size)
+{
+ int i, j = 0, k, start, end, loop, sz[2], off0[2];
+ int shift_amount;
+ uint32_t temp;
+ uint64_t off8, val, mem_crb, word[2] = {0, 0};
+
+ /*
+ * If not MN, go check for MS or invalid.
+ */
+
+ if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
+ mem_crb = QLA82XX_CRB_QDR_NET;
+ else {
+ mem_crb = QLA82XX_CRB_DDR_NET;
+ if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
+ return qla82xx_pci_mem_read_direct(ha,
+ off, data, size);
+ }
+
+ if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) {
+ off8 = off & 0xfffffff0;
+ off0[0] = off & 0xf;
+ sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]);
+ shift_amount = 4;
+ } else {
+ off8 = off & 0xfffffff8;
+ off0[0] = off & 0x7;
+ sz[0] = (size < (8 - off0[0])) ? size : (8 - off0[0]);
+ shift_amount = 4;
+ }
+ loop = ((off0[0] + size - 1) >> shift_amount) + 1;
+ off0[1] = 0;
+ sz[1] = size - sz[0];
+
+ /*
+ * don't lock here - write_wx gets the lock if each time
+ * write_lock_irqsave(&adapter->adapter_lock, flags);
+ * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
+ */
+
+ for (i = 0; i < loop; i++) {
+ temp = off8 + (i << shift_amount);
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
+ temp = 0;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
+ temp = MIU_TA_CTL_ENABLE;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+ temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
+ if ((temp & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&ha->pdev->dev,
+ "failed to read through agent\n");
+ break;
+ }
+
+ start = off0[i] >> 2;
+ end = (off0[i] + sz[i] - 1) >> 2;
+ for (k = start; k <= end; k++) {
+ temp = qla82xx_rd_32(ha,
+ mem_crb + MIU_TEST_AGT_RDDATA(k));
+ word[i] |= ((uint64_t)temp << (32 * (k & 1)));
+ }
+ }
+
+ /*
+ * netxen_nic_pci_change_crbwindow_128M(adapter, 1);
+ * write_unlock_irqrestore(&adapter->adapter_lock, flags);
+ */
+
+ if (j >= MAX_CTL_CHECK)
+ return -1;
+
+ if ((off0[0] & 7) == 0) {
+ val = word[0];
+ } else {
+ val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
+ ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
+ }
+
+ switch (size) {
+ case 1:
+ *(uint8_t *)data = val;
+ break;
+ case 2:
+ *(uint16_t *)data = val;
+ break;
+ case 4:
+ *(uint32_t *)data = val;
+ break;
+ case 8:
+ *(uint64_t *)data = val;
+ break;
+ }
+ return 0;
+}
+
+int
+qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
+ u64 off, void *data, int size)
+{
+ int i, j, ret = 0, loop, sz[2], off0;
+ int scale, shift_amount, p3p, startword;
+ uint32_t temp;
+ uint64_t off8, mem_crb, tmpw, word[2] = {0, 0};
+
+ /*
+ * If not MN, go check for MS or invalid.
+ */
+ if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
+ mem_crb = QLA82XX_CRB_QDR_NET;
+ else {
+ mem_crb = QLA82XX_CRB_DDR_NET;
+ if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
+ return qla82xx_pci_mem_write_direct(ha,
+ off, data, size);
+ }
+
+ off0 = off & 0x7;
+ sz[0] = (size < (8 - off0)) ? size : (8 - off0);
+ sz[1] = size - sz[0];
+
+ if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) {
+ off8 = off & 0xfffffff0;
+ loop = (((off & 0xf) + size - 1) >> 4) + 1;
+ shift_amount = 4;
+ scale = 2;
+ p3p = 1;
+ startword = (off & 0xf)/8;
+ } else {
+ off8 = off & 0xfffffff8;
+ loop = ((off0 + size - 1) >> 3) + 1;
+ shift_amount = 3;
+ scale = 1;
+ p3p = 0;
+ startword = 0;
+ }
+
+ if (p3p || (size != 8) || (off0 != 0)) {
+ for (i = 0; i < loop; i++) {
+ if (qla82xx_pci_mem_read_2M(ha, off8 +
+ (i << shift_amount), &word[i * scale], 8))
+ return -1;
+ }
+ }
+
+ switch (size) {
+ case 1:
+ tmpw = *((uint8_t *)data);
+ break;
+ case 2:
+ tmpw = *((uint16_t *)data);
+ break;
+ case 4:
+ tmpw = *((uint32_t *)data);
+ break;
+ case 8:
+ default:
+ tmpw = *((uint64_t *)data);
+ break;
+ }
+
+ if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) {
+ if (sz[0] == 8) {
+ word[startword] = tmpw;
+ } else {
+ word[startword] &=
+ ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
+ word[startword] |= tmpw << (off0 * 8);
+ }
+ if (sz[1] != 0) {
+ word[startword+1] &= ~(~0ULL << (sz[1] * 8));
+ word[startword+1] |= tmpw >> (sz[0] * 8);
+ }
+ } else {
+ word[startword] &= ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
+ word[startword] |= tmpw << (off0 * 8);
+
+ if (loop == 2) {
+ word[1] &= ~(~0ULL << (sz[1] * 8));
+ word[1] |= tmpw >> (sz[0] * 8);
+ }
+ }
+
+ /*
+ * don't lock here - write_wx gets the lock if each time
+ * write_lock_irqsave(&adapter->adapter_lock, flags);
+ * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
+ */
+ for (i = 0; i < loop; i++) {
+ temp = off8 + (i << shift_amount);
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
+ temp = 0;
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
+ temp = word[i * scale] & 0xffffffff;
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
+ temp = (word[i * scale] >> 32) & 0xffffffff;
+ qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
+ if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) {
+ temp = word[i*scale + 1] & 0xffffffff;
+ qla82xx_wr_32(ha, mem_crb +
+ MIU_TEST_AGT_WRDATA_UPPER_LO, temp);
+ temp = (word[i*scale + 1] >> 32) & 0xffffffff;
+ qla82xx_wr_32(ha, mem_crb +
+ MIU_TEST_AGT_WRDATA_UPPER_HI, temp);
+ }
+
+ temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+ temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
+ qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
+ if ((temp & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&ha->pdev->dev,
+ "failed to write through agent\n");
+ ret = -1;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/* PCI related functions */
+char *
+qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str)
+{
+ int pcie_reg;
+ struct qla_hw_data *ha = vha->hw;
+ char lwstr[6];
+ uint16_t lnk;
+
+ pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
+ pci_read_config_word(ha->pdev, pcie_reg + PCI_EXP_LNKSTA, &lnk);
+ ha->link_width = (lnk >> 4) & 0x3f;
+
+ strcpy(str, "PCIe (");
+ strcat(str, "2.5Gb/s ");
+ snprintf(lwstr, sizeof(lwstr), "x%d)", ha->link_width);
+ strcat(str, lwstr);
+ return str;
+}
+
+int qla82xx_pci_region_offset(struct pci_dev *pdev, int region)
+{
+ unsigned long val = 0;
+ u32 control;
+
+ switch (region) {
+ case 0:
+ val = 0;
+ break;
+ case 1:
+ pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control);
+ val = control + QLA82XX_MSIX_TBL_SPACE;
+ break;
+ }
+ return val;
+}
+
+int qla82xx_pci_region_len(struct pci_dev *pdev, int region)
+{
+ unsigned long val = 0;
+ u32 control;
+ switch (region) {
+ case 0:
+ pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control);
+ val = control;
+ break;
+ case 1:
+ val = pci_resource_len(pdev, 0) -
+ qla82xx_pci_region_offset(pdev, 1);
+ break;
+ }
+ return val;
+}
+
+int
+qla82xx_iospace_config(struct qla_hw_data *ha)
+{
+ uint32_t len = 0;
+
+ if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) {
+ qla_printk(KERN_WARNING, ha,
+ "Failed to reserve selected regions (%s)\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ /* Use MMIO operations for all accesses. */
+ if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
+ qla_printk(KERN_ERR, ha,
+ "region #0 not an MMIO resource (%s), aborting\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ len = pci_resource_len(ha->pdev, 0);
+ ha->nx_pcibase =
+ (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len);
+ if (!ha->nx_pcibase) {
+ qla_printk(KERN_ERR, ha,
+ "cannot remap pcibase MMIO (%s), aborting\n",
+ pci_name(ha->pdev));
+ pci_release_regions(ha->pdev);
+ goto iospace_error_exit;
+ }
+
+ /* Mapping of IO base pointer */
+ ha->iobase = (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase +
+ 0xbc000 + (ha->pdev->devfn << 11));
+
+ if (!ql2xdbwr) {
+ ha->nxdb_wr_ptr =
+ (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) +
+ (ha->pdev->devfn << 12)), 4);
+ if (!ha->nxdb_wr_ptr) {
+ qla_printk(KERN_ERR, ha,
+ "cannot remap MMIO (%s), aborting\n",
+ pci_name(ha->pdev));
+ pci_release_regions(ha->pdev);
+ goto iospace_error_exit;
+ }
+
+ /* Mapping of IO base pointer,
+ * door bell read and write pointer
+ */
+ ha->nxdb_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) +
+ (ha->pdev->devfn * 8);
+ } else {
+ ha->nxdb_wr_ptr = (ha->pdev->devfn == 6 ?
+ QLA82XX_CAMRAM_DB1 :
+ QLA82XX_CAMRAM_DB2);
+ }
+
+ ha->max_req_queues = ha->max_rsp_queues = 1;
+ ha->msix_count = ha->max_rsp_queues + 1;
+ return 0;
+
+iospace_error_exit:
+ return -ENOMEM;
+}
+
+/* GS related functions */
+
+/* Initialization related functions */
+
+/**
+ * qla82xx_pci_config() - Setup ISP82xx PCI configuration registers.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+*/
+int
+qla82xx_pci_config(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int ret;
+
+ pci_set_master(ha->pdev);
+ ret = pci_set_mwi(ha->pdev);
+ ha->chip_revision = ha->pdev->revision;
+ return 0;
+}
+
+/**
+ * qla82xx_reset_chip() - Setup ISP82xx PCI configuration registers.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+void
+qla82xx_reset_chip(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ ha->isp_ops->disable_intrs(ha);
+}
+
+void qla82xx_config_rings(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
+ struct init_cb_81xx *icb;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
+
+ /* Setup ring parameters in initialization control block. */
+ icb = (struct init_cb_81xx *)ha->init_cb;
+ icb->request_q_outpointer = __constant_cpu_to_le16(0);
+ icb->response_q_inpointer = __constant_cpu_to_le16(0);
+ icb->request_q_length = cpu_to_le16(req->length);
+ icb->response_q_length = cpu_to_le16(rsp->length);
+ icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
+ icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
+ icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
+ icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
+
+ icb->version = 1;
+ icb->frame_payload_size = 2112;
+ icb->execution_throttle = 8;
+ icb->exchange_count = 128;
+ icb->login_retry_count = 8;
+
+ WRT_REG_DWORD((unsigned long __iomem *)&reg->req_q_out[0], 0);
+ WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_in[0], 0);
+ WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_out[0], 0);
+}
+
+void qla82xx_reset_adapter(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ vha->flags.online = 0;
+ qla2x00_try_to_stop_firmware(vha);
+ ha->isp_ops->disable_intrs(ha);
+}
+
+int qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
+{
+ u64 *ptr64;
+ u32 i, flashaddr, size;
+ __le64 data;
+
+ size = (IMAGE_START - BOOTLD_START) / 8;
+
+ ptr64 = (u64 *)&ha->hablob->fw->data[BOOTLD_START];
+ flashaddr = BOOTLD_START;
+
+ for (i = 0; i < size; i++) {
+ data = cpu_to_le64(ptr64[i]);
+ qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8);
+ flashaddr += 8;
+ }
+
+ size = *(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET];
+ size = (__force u32)cpu_to_le32(size) / 8;
+ ptr64 = (u64 *)&ha->hablob->fw->data[IMAGE_START];
+ flashaddr = FLASH_ADDR_START;
+
+ for (i = 0; i < size; i++) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
+ return -EIO;
+ flashaddr += 8;
+ }
+
+ /* Write a magic value to CAMRAM register
+ * at a specified offset to indicate
+ * that all data is written and
+ * ready for firmware to initialize.
+ */
+ qla82xx_wr_32(ha, QLA82XX_CAM_RAM(0x1fc), 0x12345678);
+
+ if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) {
+ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
+ } else
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001d);
+ return 0;
+}
+
+int qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
+{
+ u32 val = 0;
+ int retries = 60;
+
+ do {
+ read_lock(&ha->hw_lock);
+ val = qla82xx_rd_32(ha, CRB_CMDPEG_STATE);
+ read_unlock(&ha->hw_lock);
+
+ switch (val) {
+ case PHAN_INITIALIZE_COMPLETE:
+ case PHAN_INITIALIZE_ACK:
+ return QLA_SUCCESS;
+ case PHAN_INITIALIZE_FAILED:
+ break;
+ default:
+ break;
+ }
+ qla_printk(KERN_WARNING, ha,
+ "CRB_CMDPEG_STATE: 0x%x and retries: 0x%x\n",
+ val, retries);
+
+ msleep(500);
+
+ } while (--retries);
+
+ qla_printk(KERN_INFO, ha,
+ "Cmd Peg initialization failed: 0x%x.\n", val);
+
+ qla82xx_check_for_bad_spd(ha);
+ val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
+ read_lock(&ha->hw_lock);
+ qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
+ read_unlock(&ha->hw_lock);
+ return QLA_FUNCTION_FAILED;
+}
+
+int qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
+{
+ u32 val = 0;
+ int retries = 60;
+
+ do {
+ read_lock(&ha->hw_lock);
+ val = qla82xx_rd_32(ha, CRB_RCVPEG_STATE);
+ read_unlock(&ha->hw_lock);
+
+ switch (val) {
+ case PHAN_INITIALIZE_COMPLETE:
+ case PHAN_INITIALIZE_ACK:
+ return QLA_SUCCESS;
+ case PHAN_INITIALIZE_FAILED:
+ break;
+ default:
+ break;
+ }
+
+ qla_printk(KERN_WARNING, ha,
+ "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x\n",
+ val, retries);
+
+ msleep(500);
+
+ } while (--retries);
+
+ qla_printk(KERN_INFO, ha,
+ "Rcv Peg initialization failed: 0x%x.\n", val);
+ read_lock(&ha->hw_lock);
+ qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED);
+ read_unlock(&ha->hw_lock);
+ return QLA_FUNCTION_FAILED;
+}
+
+/* ISR related functions */
+uint32_t qla82xx_isr_int_target_mask_enable[8] = {
+ ISR_INT_TARGET_MASK, ISR_INT_TARGET_MASK_F1,
+ ISR_INT_TARGET_MASK_F2, ISR_INT_TARGET_MASK_F3,
+ ISR_INT_TARGET_MASK_F4, ISR_INT_TARGET_MASK_F5,
+ ISR_INT_TARGET_MASK_F7, ISR_INT_TARGET_MASK_F7
+};
+
+uint32_t qla82xx_isr_int_target_status[8] = {
+ ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
+ ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
+ ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
+ ISR_INT_TARGET_STATUS_F7, ISR_INT_TARGET_STATUS_F7
+};
+
+static struct qla82xx_legacy_intr_set legacy_intr[] = \
+ QLA82XX_LEGACY_INTR_CONFIG;
+
+/*
+ * qla82xx_mbx_completion() - Process mailbox command completions.
+ * @ha: SCSI driver HA context
+ * @mb0: Mailbox0 register
+ */
+void
+qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
+{
+ uint16_t cnt;
+ uint16_t __iomem *wptr;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
+ wptr = (uint16_t __iomem *)&reg->mailbox_out[1];
+
+ /* Load return mailbox registers. */
+ ha->flags.mbox_int = 1;
+ ha->mailbox_out[0] = mb0;
+
+ for (cnt = 1; cnt < ha->mbx_count; cnt++) {
+ ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
+ wptr++;
+ }
+
+ if (ha->mcp) {
+ DEBUG3_11(printk(KERN_INFO "%s(%ld): "
+ "Got mailbox completion. cmd=%x.\n",
+ __func__, vha->host_no, ha->mcp->mb[0]));
+ } else {
+ qla_printk(KERN_INFO, ha,
+ "%s(%ld): MBX pointer ERROR!\n",
+ __func__, vha->host_no);
+ }
+}
+
+/*
+ * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
+ * @irq:
+ * @dev_id: SCSI driver HA context
+ * @regs:
+ *
+ * Called by system whenever the host adapter generates an interrupt.
+ *
+ * Returns handled flag.
+ */
+irqreturn_t
+qla82xx_intr_handler(int irq, void *dev_id)
+{
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct rsp_que *rsp;
+ struct device_reg_82xx __iomem *reg;
+ int status = 0, status1 = 0;
+ unsigned long flags;
+ unsigned long iter;
+ uint32_t stat;
+ uint16_t mb[4];
+
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ printk(KERN_INFO
+ "%s(): NULL response queue pointer\n", __func__);
+ return IRQ_NONE;
+ }
+ ha = rsp->hw;
+
+ if (!ha->flags.msi_enabled) {
+ status = qla82xx_rd_32(ha, ISR_INT_VECTOR);
+ if (!(status & ha->nx_legacy_intr.int_vec_bit))
+ return IRQ_NONE;
+
+ status1 = qla82xx_rd_32(ha, ISR_INT_STATE_REG);
+ if (!ISR_IS_LEGACY_INTR_TRIGGERED(status1))
+ return IRQ_NONE;
+ }
+
+ /* clear the interrupt */
+ qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
+
+ /* read twice to ensure write is flushed */
+ qla82xx_rd_32(ha, ISR_INT_VECTOR);
+ qla82xx_rd_32(ha, ISR_INT_VECTOR);
+
+ reg = &ha->iobase->isp82;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ vha = pci_get_drvdata(ha->pdev);
+ for (iter = 1; iter--; ) {
+
+ if (RD_REG_DWORD(&reg->host_int)) {
+ stat = RD_REG_DWORD(&reg->host_status);
+ if ((stat & HSRX_RISC_INT) == 0)
+ break;
+
+ switch (stat & 0xff) {
+ case 0x1:
+ case 0x2:
+ case 0x10:
+ case 0x11:
+ qla82xx_mbx_completion(vha, MSW(stat));
+ status |= MBX_INTERRUPT;
+ break;
+ case 0x12:
+ mb[0] = MSW(stat);
+ mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
+ mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
+ mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ qla2x00_async_event(vha, rsp, mb);
+ break;
+ case 0x13:
+ qla24xx_process_response_queue(vha, rsp);
+ break;
+ default:
+ DEBUG2(printk("scsi(%ld): "
+ " Unrecognized interrupt type (%d).\n",
+ vha->host_no, stat & 0xff));
+ break;
+ }
+ }
+ WRT_REG_DWORD(&reg->host_int, 0);
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (!ha->flags.msi_enabled)
+ qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
+
+#ifdef QL_DEBUG_LEVEL_17
+ if (!irq && ha->flags.eeh_busy)
+ qla_printk(KERN_WARNING, ha,
+ "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n",
+ status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
+#endif
+
+ if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
+ (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
+ set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+ complete(&ha->mbx_intr_comp);
+ }
+ return IRQ_HANDLED;
+}
+
+irqreturn_t
+qla82xx_msix_default(int irq, void *dev_id)
+{
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct rsp_que *rsp;
+ struct device_reg_82xx __iomem *reg;
+ int status = 0;
+ unsigned long flags;
+ uint32_t stat;
+ uint16_t mb[4];
+
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ printk(KERN_INFO
+ "%s(): NULL response queue pointer\n", __func__);
+ return IRQ_NONE;
+ }
+ ha = rsp->hw;
+
+ reg = &ha->iobase->isp82;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ vha = pci_get_drvdata(ha->pdev);
+ do {
+ if (RD_REG_DWORD(&reg->host_int)) {
+ stat = RD_REG_DWORD(&reg->host_status);
+ if ((stat & HSRX_RISC_INT) == 0)
+ break;
+
+ switch (stat & 0xff) {
+ case 0x1:
+ case 0x2:
+ case 0x10:
+ case 0x11:
+ qla82xx_mbx_completion(vha, MSW(stat));
+ status |= MBX_INTERRUPT;
+ break;
+ case 0x12:
+ mb[0] = MSW(stat);
+ mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
+ mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
+ mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ qla2x00_async_event(vha, rsp, mb);
+ break;
+ case 0x13:
+ qla24xx_process_response_queue(vha, rsp);
+ break;
+ default:
+ DEBUG2(printk("scsi(%ld): "
+ " Unrecognized interrupt type (%d).\n",
+ vha->host_no, stat & 0xff));
+ break;
+ }
+ }
+ WRT_REG_DWORD(&reg->host_int, 0);
+ } while (0);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+#ifdef QL_DEBUG_LEVEL_17
+ if (!irq && ha->flags.eeh_busy)
+ qla_printk(KERN_WARNING, ha,
+ "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n",
+ status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
+#endif
+
+ if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
+ (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
+ set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+ complete(&ha->mbx_intr_comp);
+ }
+ return IRQ_HANDLED;
+}
+
+irqreturn_t
+qla82xx_msix_rsp_q(int irq, void *dev_id)
+{
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct rsp_que *rsp;
+ struct device_reg_82xx __iomem *reg;
+
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ printk(KERN_INFO
+ "%s(): NULL response queue pointer\n", __func__);
+ return IRQ_NONE;
+ }
+
+ ha = rsp->hw;
+ reg = &ha->iobase->isp82;
+ spin_lock_irq(&ha->hardware_lock);
+ vha = pci_get_drvdata(ha->pdev);
+ qla24xx_process_response_queue(vha, rsp);
+ WRT_REG_DWORD(&reg->host_int, 0);
+ spin_unlock_irq(&ha->hardware_lock);
+ return IRQ_HANDLED;
+}
+
+void
+qla82xx_poll(int irq, void *dev_id)
+{
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct rsp_que *rsp;
+ struct device_reg_82xx __iomem *reg;
+ int status = 0;
+ uint32_t stat;
+ uint16_t mb[4];
+ unsigned long flags;
+
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ printk(KERN_INFO
+ "%s(): NULL response queue pointer\n", __func__);
+ return;
+ }
+ ha = rsp->hw;
+
+ reg = &ha->iobase->isp82;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ vha = pci_get_drvdata(ha->pdev);
+
+ if (RD_REG_DWORD(&reg->host_int)) {
+ stat = RD_REG_DWORD(&reg->host_status);
+ switch (stat & 0xff) {
+ case 0x1:
+ case 0x2:
+ case 0x10:
+ case 0x11:
+ qla82xx_mbx_completion(vha, MSW(stat));
+ status |= MBX_INTERRUPT;
+ break;
+ case 0x12:
+ mb[0] = MSW(stat);
+ mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
+ mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
+ mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ qla2x00_async_event(vha, rsp, mb);
+ break;
+ case 0x13:
+ qla24xx_process_response_queue(vha, rsp);
+ break;
+ default:
+ DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
+ "(%d).\n",
+ vha->host_no, stat & 0xff));
+ break;
+ }
+ }
+ WRT_REG_DWORD(&reg->host_int, 0);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+void
+qla82xx_enable_intrs(struct qla_hw_data *ha)
+{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+ qla82xx_mbx_intr_enable(vha);
+ spin_lock_irq(&ha->hardware_lock);
+ qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
+ spin_unlock_irq(&ha->hardware_lock);
+ ha->interrupts_on = 1;
+}
+
+void
+qla82xx_disable_intrs(struct qla_hw_data *ha)
+{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+ qla82xx_mbx_intr_disable(vha);
+ spin_lock_irq(&ha->hardware_lock);
+ qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
+ spin_unlock_irq(&ha->hardware_lock);
+ ha->interrupts_on = 0;
+}
+
+void qla82xx_init_flags(struct qla_hw_data *ha)
+{
+ struct qla82xx_legacy_intr_set *nx_legacy_intr;
+
+ /* ISP 8021 initializations */
+ rwlock_init(&ha->hw_lock);
+ ha->qdr_sn_window = -1;
+ ha->ddr_mn_window = -1;
+ ha->curr_window = 255;
+ ha->portnum = PCI_FUNC(ha->pdev->devfn);
+ nx_legacy_intr = &legacy_intr[ha->portnum];
+ ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
+ ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg;
+ ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
+ ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
+}
+
+static inline void
+qla82xx_set_drv_active(scsi_qla_host_t *vha)
+{
+ uint32_t drv_active;
+ struct qla_hw_data *ha = vha->hw;
+
+ drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+
+ /* If reset value is all FF's, initialize DRV_ACTIVE */
+ if (drv_active == 0xffffffff) {
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, 0);
+ drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+ }
+ drv_active |= (1 << (ha->portnum * 4));
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
+}
+
+inline void
+qla82xx_clear_drv_active(struct qla_hw_data *ha)
+{
+ uint32_t drv_active;
+
+ drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+ drv_active &= ~(1 << (ha->portnum * 4));
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
+}
+
+static inline int
+qla82xx_need_reset(struct qla_hw_data *ha)
+{
+ uint32_t drv_state;
+ int rval;
+
+ drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ rval = drv_state & (1 << (ha->portnum * 4));
+ return rval;
+}
+
+static inline void
+qla82xx_set_rst_ready(struct qla_hw_data *ha)
+{
+ uint32_t drv_state;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+
+ /* If reset value is all FF's, initialize DRV_STATE */
+ if (drv_state == 0xffffffff) {
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
+ drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ }
+ drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
+ qla_printk(KERN_INFO, ha,
+ "%s(%ld):drv_state = 0x%x\n",
+ __func__, vha->host_no, drv_state);
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
+}
+
+static inline void
+qla82xx_clear_rst_ready(struct qla_hw_data *ha)
+{
+ uint32_t drv_state;
+
+ drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ drv_state &= ~(QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
+}
+
+static inline void
+qla82xx_set_qsnt_ready(struct qla_hw_data *ha)
+{
+ uint32_t qsnt_state;
+
+ qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ qsnt_state |= (QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
+}
+
+int qla82xx_load_fw(scsi_qla_host_t *vha)
+{
+ int rst;
+ struct fw_blob *blob;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Put both the PEG CMD and RCV PEG to default state
+ * of 0 before resetting the hardware
+ */
+ qla82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
+ qla82xx_wr_32(ha, CRB_RCVPEG_STATE, 0);
+
+ if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) {
+ qla_printk(KERN_ERR, ha,
+ "%s: Error during CRB Initialization\n", __func__);
+ return QLA_FUNCTION_FAILED;
+ }
+ udelay(500);
+
+ /* Bring QM and CAMRAM out of reset */
+ rst = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET);
+ rst &= ~((1 << 28) | (1 << 24));
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst);
+
+ /*
+ * FW Load priority:
+ * 1) Operational firmware residing in flash.
+ * 2) Firmware via request-firmware interface (.bin file).
+ */
+ if (ql2xfwloadbin == 2)
+ goto try_blob_fw;
+
+ qla_printk(KERN_INFO, ha,
+ "Attempting to load firmware from flash\n");
+
+ if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
+ qla_printk(KERN_ERR, ha,
+ "Firmware loaded successfully from flash\n");
+ return QLA_SUCCESS;
+ }
+try_blob_fw:
+ qla_printk(KERN_INFO, ha,
+ "Attempting to load firmware from blob\n");
+
+ /* Load firmware blob. */
+ blob = ha->hablob = qla2x00_request_firmware(vha);
+ if (!blob) {
+ qla_printk(KERN_ERR, ha,
+ "Firmware image not present.\n");
+ goto fw_load_failed;
+ }
+
+ if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) {
+ qla_printk(KERN_ERR, ha,
+ "%s: Firmware loaded successfully "
+ " from binary blob\n", __func__);
+ return QLA_SUCCESS;
+ } else {
+ qla_printk(KERN_ERR, ha,
+ "Firmware load failed from binary blob\n");
+ blob->fw = NULL;
+ blob = NULL;
+ goto fw_load_failed;
+ }
+ return QLA_SUCCESS;
+
+fw_load_failed:
+ return QLA_FUNCTION_FAILED;
+}
+
+static int
+qla82xx_start_firmware(scsi_qla_host_t *vha)
+{
+ int pcie_cap;
+ uint16_t lnk;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* scrub dma mask expansion register */
+ qla82xx_wr_32(ha, CRB_DMA_SHIFT, 0x55555555);
+
+ /* Overwrite stale initialization register values */
+ qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0);
+ qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
+
+ if (qla82xx_load_fw(vha) != QLA_SUCCESS) {
+ qla_printk(KERN_INFO, ha,
+ "%s: Error trying to start fw!\n", __func__);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ /* Handshake with the card before we register the devices. */
+ if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) {
+ qla_printk(KERN_INFO, ha,
+ "%s: Error during card handshake!\n", __func__);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ /* Negotiated Link width */
+ pcie_cap = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
+ pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk);
+ ha->link_width = (lnk >> 4) & 0x3f;
+
+ /* Synchronize with Receive peg */
+ return qla82xx_check_rcvpeg_state(ha);
+}
+
+static inline int
+qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
+ uint16_t tot_dsds)
+{
+ uint32_t *cur_dsd = NULL;
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct scsi_cmnd *cmd;
+ struct scatterlist *cur_seg;
+ uint32_t *dsd_seg;
+ void *next_dsd;
+ uint8_t avail_dsds;
+ uint8_t first_iocb = 1;
+ uint32_t dsd_list_len;
+ struct dsd_dma *dsd_ptr;
+ struct ct6_dsd *ctx;
+
+ cmd = sp->cmd;
+
+ /* Update entry type to indicate Command Type 3 IOCB */
+ *((uint32_t *)(&cmd_pkt->entry_type)) =
+ __constant_cpu_to_le32(COMMAND_TYPE_6);
+
+ /* No data transfer */
+ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+ cmd_pkt->byte_count = __constant_cpu_to_le32(0);
+ return 0;
+ }
+
+ vha = sp->fcport->vha;
+ ha = vha->hw;
+
+ /* Set transfer direction */
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_WRITE_DATA);
+ ha->qla_stats.output_bytes += scsi_bufflen(cmd);
+ } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_READ_DATA);
+ ha->qla_stats.input_bytes += scsi_bufflen(cmd);
+ }
+
+ cur_seg = scsi_sglist(cmd);
+ ctx = sp->ctx;
+
+ while (tot_dsds) {
+ avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
+ QLA_DSDS_PER_IOCB : tot_dsds;
+ tot_dsds -= avail_dsds;
+ dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
+
+ dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
+ struct dsd_dma, list);
+ next_dsd = dsd_ptr->dsd_addr;
+ list_del(&dsd_ptr->list);
+ ha->gbl_dsd_avail--;
+ list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
+ ctx->dsd_use_cnt++;
+ ha->gbl_dsd_inuse++;
+
+ if (first_iocb) {
+ first_iocb = 0;
+ dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
+ *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+ *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+ *dsd_seg++ = dsd_list_len;
+ } else {
+ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = dsd_list_len;
+ }
+ cur_dsd = (uint32_t *)next_dsd;
+ while (avail_dsds) {
+ dma_addr_t sle_dma;
+
+ sle_dma = sg_dma_address(cur_seg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
+ cur_seg++;
+ avail_dsds--;
+ }
+ }
+
+ /* Null termination */
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
+ return 0;
+}
+
+/*
+ * qla82xx_calc_dsd_lists() - Determine number of DSD list required
+ * for Command Type 6.
+ *
+ * @dsds: number of data segment decriptors needed
+ *
+ * Returns the number of dsd list needed to store @dsds.
+ */
+inline uint16_t
+qla82xx_calc_dsd_lists(uint16_t dsds)
+{
+ uint16_t dsd_lists = 0;
+
+ dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
+ if (dsds % QLA_DSDS_PER_IOCB)
+ dsd_lists++;
+ return dsd_lists;
+}
+
+/*
+ * qla82xx_start_scsi() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occured, else zero.
+ */
+int
+qla82xx_start_scsi(srb_t *sp)
+{
+ int ret, nseg;
+ unsigned long flags;
+ struct scsi_cmnd *cmd;
+ uint32_t *clr_ptr;
+ uint32_t index;
+ uint32_t handle;
+ uint16_t cnt;
+ uint16_t req_cnt;
+ uint16_t tot_dsds;
+ struct device_reg_82xx __iomem *reg;
+ uint32_t dbval;
+ uint32_t *fcp_dl;
+ uint8_t additional_cdb_len;
+ struct ct6_dsd *ctx;
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
+
+ /* Setup device pointers. */
+ ret = 0;
+ reg = &ha->iobase->isp82;
+ cmd = sp->cmd;
+ req = vha->req;
+ rsp = ha->rsp_q_map[0];
+
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ dbval = 0x04 | (ha->portnum << 5);
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, req,
+ rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+ vha->marker_needed = 0;
+ }
+
+ /* Acquire ring specific lock */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ handle++;
+ if (handle == MAX_OUTSTANDING_COMMANDS)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+ if (index == MAX_OUTSTANDING_COMMANDS)
+ goto queuing_error;
+
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ } else
+ nseg = 0;
+
+ tot_dsds = nseg;
+
+ if (tot_dsds > ql2xshiftctondsd) {
+ struct cmd_type_6 *cmd_pkt;
+ uint16_t more_dsd_lists = 0;
+ struct dsd_dma *dsd_ptr;
+ uint16_t i;
+
+ more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds);
+ if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN)
+ goto queuing_error;
+
+ if (more_dsd_lists <= ha->gbl_dsd_avail)
+ goto sufficient_dsds;
+ else
+ more_dsd_lists -= ha->gbl_dsd_avail;
+
+ for (i = 0; i < more_dsd_lists; i++) {
+ dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
+ if (!dsd_ptr)
+ goto queuing_error;
+
+ dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
+ GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
+ if (!dsd_ptr->dsd_addr) {
+ kfree(dsd_ptr);
+ goto queuing_error;
+ }
+ list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
+ ha->gbl_dsd_avail++;
+ }
+
+sufficient_dsds:
+ req_cnt = 1;
+
+ ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
+ if (!sp->ctx) {
+ DEBUG(printk(KERN_INFO
+ "%s(%ld): failed to allocate"
+ " ctx.\n", __func__, vha->host_no));
+ goto queuing_error;
+ }
+ memset(ctx, 0, sizeof(struct ct6_dsd));
+ ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
+ GFP_ATOMIC, &ctx->fcp_cmnd_dma);
+ if (!ctx->fcp_cmnd) {
+ DEBUG2_3(printk("%s(%ld): failed to allocate"
+ " fcp_cmnd.\n", __func__, vha->host_no));
+ goto queuing_error_fcp_cmnd;
+ }
+
+ /* Initialize the DSD list and dma handle */
+ INIT_LIST_HEAD(&ctx->dsd_list);
+ ctx->dsd_use_cnt = 0;
+
+ if (cmd->cmd_len > 16) {
+ additional_cdb_len = cmd->cmd_len - 16;
+ if ((cmd->cmd_len % 4) != 0) {
+ /* SCSI command bigger than 16 bytes must be
+ * multiple of 4
+ */
+ goto queuing_error_fcp_cmnd;
+ }
+ ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
+ } else {
+ additional_cdb_len = 0;
+ ctx->fcp_cmnd_len = 12 + 16 + 4;
+ }
+
+ cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ /* Zero out remaining portion of packet. */
+ /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->fcport->vp_idx;
+
+ /* Build IOCB segments */
+ if (qla2xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
+ goto queuing_error_fcp_cmnd;
+
+ int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+
+ /* build FCP_CMND IU */
+ memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+ int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
+ ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
+
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 1;
+ else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 2;
+
+ memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
+
+ fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
+ additional_cdb_len);
+ *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
+
+ cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
+ cmd_pkt->fcp_cmnd_dseg_address[0] =
+ cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
+ cmd_pkt->fcp_cmnd_dseg_address[1] =
+ cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
+
+ sp->flags |= SRB_FCP_CMND_DMA_VALID;
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ /* Specify response queue number where
+ * completion should happen
+ */
+ cmd_pkt->entry_status = (uint8_t) rsp->id;
+ } else {
+ struct cmd_type_7 *cmd_pkt;
+ req_cnt = qla24xx_calc_iocbs(tot_dsds);
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+ &reg->req_q_out[0]);
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ }
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+
+ cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ /* Zero out remaining portion of packet. */
+ /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->fcport->vp_idx;
+
+ int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
+ sizeof(cmd_pkt->lun));
+
+ /* Load SCSI command packet. */
+ memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
+ host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
+
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+
+ /* Build IOCB segments */
+ qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
+
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ /* Specify response queue number where
+ * completion should happen.
+ */
+ cmd_pkt->entry_status = (uint8_t) rsp->id;
+
+ }
+ /* Build command packet. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+ wmb();
+
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ sp->flags |= SRB_DMA_VALID;
+
+ /* Set chip new ring index. */
+ /* write, read and verify logic */
+ dbval = dbval | (req->id << 8) | (req->ring_index << 16);
+ if (ql2xdbwr)
+ qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
+ else {
+ WRT_REG_DWORD(
+ (unsigned long __iomem *)ha->nxdb_wr_ptr,
+ dbval);
+ wmb();
+ while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
+ WRT_REG_DWORD(
+ (unsigned long __iomem *)ha->nxdb_wr_ptr,
+ dbval);
+ wmb();
+ }
+ }
+
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return QLA_SUCCESS;
+
+queuing_error_fcp_cmnd:
+ dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
+queuing_error:
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
+
+ if (sp->ctx) {
+ mempool_free(sp->ctx, ha->ctx_mempool);
+ sp->ctx = NULL;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return QLA_FUNCTION_FAILED;
+}
+
+uint32_t *
+qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
+ uint32_t length)
+{
+ uint32_t i;
+ uint32_t val;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Dword reads to flash. */
+ for (i = 0; i < length/4; i++, faddr += 4) {
+ if (qla82xx_rom_fast_read(ha, faddr, &val)) {
+ qla_printk(KERN_WARNING, ha,
+ "Do ROM fast read failed\n");
+ goto done_read;
+ }
+ dwptr[i] = __constant_cpu_to_le32(val);
+ }
+done_read:
+ return dwptr;
+}
+
+int
+qla82xx_unprotect_flash(struct qla_hw_data *ha)
+{
+ int ret;
+ uint32_t val;
+
+ ret = ql82xx_rom_lock_d(ha);
+ if (ret < 0) {
+ qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
+ return ret;
+ }
+
+ ret = qla82xx_read_status_reg(ha, &val);
+ if (ret < 0)
+ goto done_unprotect;
+
+ val &= ~(0x7 << 2);
+ ret = qla82xx_write_status_reg(ha, val);
+ if (ret < 0) {
+ val |= (0x7 << 2);
+ qla82xx_write_status_reg(ha, val);
+ }
+
+ if (qla82xx_write_disable_flash(ha) != 0)
+ qla_printk(KERN_WARNING, ha, "Write disable failed\n");
+
+done_unprotect:
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
+ return ret;
+}
+
+int
+qla82xx_protect_flash(struct qla_hw_data *ha)
+{
+ int ret;
+ uint32_t val;
+
+ ret = ql82xx_rom_lock_d(ha);
+ if (ret < 0) {
+ qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
+ return ret;
+ }
+
+ ret = qla82xx_read_status_reg(ha, &val);
+ if (ret < 0)
+ goto done_protect;
+
+ val |= (0x7 << 2);
+ /* LOCK all sectors */
+ ret = qla82xx_write_status_reg(ha, val);
+ if (ret < 0)
+ qla_printk(KERN_WARNING, ha, "Write status register failed\n");
+
+ if (qla82xx_write_disable_flash(ha) != 0)
+ qla_printk(KERN_WARNING, ha, "Write disable failed\n");
+done_protect:
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
+ return ret;
+}
+
+int
+qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
+{
+ int ret = 0;
+
+ ret = ql82xx_rom_lock_d(ha);
+ if (ret < 0) {
+ qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
+ return ret;
+ }
+
+ qla82xx_flash_set_write_enable(ha);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
+ qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE);
+
+ if (qla82xx_wait_rom_done(ha)) {
+ qla_printk(KERN_WARNING, ha,
+ "Error waiting for rom done\n");
+ ret = -1;
+ goto done;
+ }
+ ret = qla82xx_flash_wait_write_finish(ha);
+done:
+ qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
+ return ret;
+}
+
+/*
+ * Address and length are byte address
+ */
+uint8_t *
+qla82xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+ uint32_t offset, uint32_t length)
+{
+ scsi_block_requests(vha->host);
+ qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length);
+ scsi_unblock_requests(vha->host);
+ return buf;
+}
+
+static int
+qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
+ uint32_t faddr, uint32_t dwords)
+{
+ int ret;
+ uint32_t liter;
+ uint32_t sec_mask, rest_addr;
+ dma_addr_t optrom_dma;
+ void *optrom = NULL;
+ int page_mode = 0;
+ struct qla_hw_data *ha = vha->hw;
+
+ ret = -1;
+
+ /* Prepare burst-capable write on supported ISPs. */
+ if (page_mode && !(faddr & 0xfff) &&
+ dwords > OPTROM_BURST_DWORDS) {
+ optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
+ &optrom_dma, GFP_KERNEL);
+ if (!optrom) {
+ qla_printk(KERN_DEBUG, ha,
+ "Unable to allocate memory for optrom "
+ "burst write (%x KB).\n",
+ OPTROM_BURST_SIZE / 1024);
+ }
+ }
+
+ rest_addr = ha->fdt_block_size - 1;
+ sec_mask = ~rest_addr;
+
+ ret = qla82xx_unprotect_flash(ha);
+ if (ret) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to unprotect flash for update.\n");
+ goto write_done;
+ }
+
+ for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) {
+ /* Are we at the beginning of a sector? */
+ if ((faddr & rest_addr) == 0) {
+
+ ret = qla82xx_erase_sector(ha, faddr);
+ if (ret) {
+ DEBUG9(qla_printk(KERN_ERR, ha,
+ "Unable to erase sector: "
+ "address=%x.\n", faddr));
+ break;
+ }
+ }
+
+ /* Go with burst-write. */
+ if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) {
+ /* Copy data to DMA'ble buffer. */
+ memcpy(optrom, dwptr, OPTROM_BURST_SIZE);
+
+ ret = qla2x00_load_ram(vha, optrom_dma,
+ (ha->flash_data_off | faddr),
+ OPTROM_BURST_DWORDS);
+ if (ret != QLA_SUCCESS) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to burst-write optrom segment "
+ "(%x/%x/%llx).\n", ret,
+ (ha->flash_data_off | faddr),
+ (unsigned long long)optrom_dma);
+ qla_printk(KERN_WARNING, ha,
+ "Reverting to slow-write.\n");
+
+ dma_free_coherent(&ha->pdev->dev,
+ OPTROM_BURST_SIZE, optrom, optrom_dma);
+ optrom = NULL;
+ } else {
+ liter += OPTROM_BURST_DWORDS - 1;
+ faddr += OPTROM_BURST_DWORDS - 1;
+ dwptr += OPTROM_BURST_DWORDS - 1;
+ continue;
+ }
+ }
+
+ ret = qla82xx_write_flash_dword(ha, faddr,
+ cpu_to_le32(*dwptr));
+ if (ret) {
+ DEBUG9(printk(KERN_DEBUG "%s(%ld) Unable to program"
+ "flash address=%x data=%x.\n", __func__,
+ ha->host_no, faddr, *dwptr));
+ break;
+ }
+ }
+
+ ret = qla82xx_protect_flash(ha);
+ if (ret)
+ qla_printk(KERN_WARNING, ha,
+ "Unable to protect flash after update.\n");
+write_done:
+ if (optrom)
+ dma_free_coherent(&ha->pdev->dev,
+ OPTROM_BURST_SIZE, optrom, optrom_dma);
+ return ret;
+}
+
+int
+qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+ uint32_t offset, uint32_t length)
+{
+ int rval;
+
+ /* Suspend HBA. */
+ scsi_block_requests(vha->host);
+ rval = qla82xx_write_flash_data(vha, (uint32_t *)buf, offset,
+ length >> 2);
+ scsi_unblock_requests(vha->host);
+
+ /* Convert return ISP82xx to generic */
+ if (rval)
+ rval = QLA_FUNCTION_FAILED;
+ else
+ rval = QLA_SUCCESS;
+ return rval;
+}
+
+void
+qla82xx_start_iocbs(srb_t *sp)
+{
+ struct qla_hw_data *ha = sp->fcport->vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+ struct device_reg_82xx __iomem *reg;
+ uint32_t dbval;
+
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ reg = &ha->iobase->isp82;
+ dbval = 0x04 | (ha->portnum << 5);
+
+ dbval = dbval | (req->id << 8) | (req->ring_index << 16);
+ WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval);
+ wmb();
+ while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
+ WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval);
+ wmb();
+ }
+}
+
+/*
+ * qla82xx_device_bootstrap
+ * Initialize device, set DEV_READY, start fw
+ *
+ * Note:
+ * IDC lock must be held upon entry
+ *
+ * Return:
+ * Success : 0
+ * Failed : 1
+ */
+static int
+qla82xx_device_bootstrap(scsi_qla_host_t *vha)
+{
+ int rval, i, timeout;
+ uint32_t old_count, count;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (qla82xx_need_reset(ha))
+ goto dev_initialize;
+
+ old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
+
+ for (i = 0; i < 10; i++) {
+ timeout = msleep_interruptible(200);
+ if (timeout) {
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA82XX_DEV_FAILED);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
+ if (count != old_count)
+ goto dev_ready;
+ }
+
+dev_initialize:
+ /* set to DEV_INITIALIZING */
+ qla_printk(KERN_INFO, ha, "HW State: INITIALIZING\n");
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING);
+
+ /* Driver that sets device state to initializating sets IDC version */
+ qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
+
+ qla82xx_idc_unlock(ha);
+ rval = qla82xx_start_firmware(vha);
+ qla82xx_idc_lock(ha);
+
+ if (rval != QLA_SUCCESS) {
+ qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
+ qla82xx_clear_drv_active(ha);
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED);
+ return rval;
+ }
+
+dev_ready:
+ qla_printk(KERN_INFO, ha, "HW State: READY\n");
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
+
+ return QLA_SUCCESS;
+}
+
+static void
+qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Disable the board */
+ qla_printk(KERN_INFO, ha, "Disabling the board\n");
+
+ /* Set DEV_FAILED flag to disable timer */
+ vha->device_flags |= DFLG_DEV_FAILED;
+ qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
+ qla2x00_mark_all_devices_lost(vha, 0);
+ vha->flags.online = 0;
+ vha->flags.init_done = 0;
+}
+
+/*
+ * qla82xx_need_reset_handler
+ * Code to start reset sequence
+ *
+ * Note:
+ * IDC lock must be held upon entry
+ *
+ * Return:
+ * Success : 0
+ * Failed : 1
+ */
+static void
+qla82xx_need_reset_handler(scsi_qla_host_t *vha)
+{
+ uint32_t dev_state, drv_state, drv_active;
+ unsigned long reset_timeout;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+
+ if (vha->flags.online) {
+ qla82xx_idc_unlock(ha);
+ qla2x00_abort_isp_cleanup(vha);
+ ha->isp_ops->get_flash_version(vha, req->ring);
+ ha->isp_ops->nvram_config(vha);
+ qla82xx_idc_lock(ha);
+ }
+
+ qla82xx_set_rst_ready(ha);
+
+ /* wait for 10 seconds for reset ack from all functions */
+ reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
+
+ drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+
+ while (drv_state != drv_active) {
+ if (time_after_eq(jiffies, reset_timeout)) {
+ qla_printk(KERN_INFO, ha,
+ "%s: RESET TIMEOUT!\n", QLA2XXX_DRIVER_NAME);
+ break;
+ }
+ qla82xx_idc_unlock(ha);
+ msleep(1000);
+ qla82xx_idc_lock(ha);
+ drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+ drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+ }
+
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ qla_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state,
+ dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+
+ /* Force to DEV_COLD unless someone else is starting a reset */
+ if (dev_state != QLA82XX_DEV_INITIALIZING) {
+ qla_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
+ }
+}
+
+static void
+qla82xx_check_fw_alive(scsi_qla_host_t *vha)
+{
+ uint32_t fw_heartbeat_counter, halt_status;
+ struct qla_hw_data *ha = vha->hw;
+
+ fw_heartbeat_counter = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
+ if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
+ vha->seconds_since_last_heartbeat++;
+ /* FW not alive after 2 seconds */
+ if (vha->seconds_since_last_heartbeat == 2) {
+ vha->seconds_since_last_heartbeat = 0;
+ halt_status = qla82xx_rd_32(ha,
+ QLA82XX_PEG_HALT_STATUS1);
+ if (halt_status & HALT_STATUS_UNRECOVERABLE) {
+ set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
+ } else {
+ qla_printk(KERN_INFO, ha,
+ "scsi(%ld): %s - detect abort needed\n",
+ vha->host_no, __func__);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ }
+ qla2xxx_wake_dpc(vha);
+ }
+ }
+ vha->fw_heartbeat_counter = fw_heartbeat_counter;
+}
+
+/*
+ * qla82xx_device_state_handler
+ * Main state handler
+ *
+ * Note:
+ * IDC lock must be held upon entry
+ *
+ * Return:
+ * Success : 0
+ * Failed : 1
+ */
+int
+qla82xx_device_state_handler(scsi_qla_host_t *vha)
+{
+ uint32_t dev_state;
+ int rval = QLA_SUCCESS;
+ unsigned long dev_init_timeout;
+ struct qla_hw_data *ha = vha->hw;
+
+ qla82xx_idc_lock(ha);
+ if (!vha->flags.init_done)
+ qla82xx_set_drv_active(vha);
+
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ qla_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state,
+ dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+
+ /* wait for 30 seconds for device to go ready */
+ dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
+
+ while (1) {
+
+ if (time_after_eq(jiffies, dev_init_timeout)) {
+ DEBUG(qla_printk(KERN_INFO, ha,
+ "%s: device init failed!\n",
+ QLA2XXX_DRIVER_NAME));
+ rval = QLA_FUNCTION_FAILED;
+ break;
+ }
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ qla_printk(KERN_INFO, ha,
+ "2:Device state is 0x%x = %s\n", dev_state,
+ dev_state < MAX_STATES ?
+ qdev_state[dev_state] : "Unknown");
+
+ switch (dev_state) {
+ case QLA82XX_DEV_READY:
+ goto exit;
+ case QLA82XX_DEV_COLD:
+ rval = qla82xx_device_bootstrap(vha);
+ goto exit;
+ case QLA82XX_DEV_INITIALIZING:
+ qla82xx_idc_unlock(ha);
+ msleep(1000);
+ qla82xx_idc_lock(ha);
+ break;
+ case QLA82XX_DEV_NEED_RESET:
+ if (!ql2xdontresethba)
+ qla82xx_need_reset_handler(vha);
+ break;
+ case QLA82XX_DEV_NEED_QUIESCENT:
+ qla82xx_set_qsnt_ready(ha);
+ case QLA82XX_DEV_QUIESCENT:
+ qla82xx_idc_unlock(ha);
+ msleep(1000);
+ qla82xx_idc_lock(ha);
+ break;
+ case QLA82XX_DEV_FAILED:
+ qla82xx_dev_failed_handler(vha);
+ rval = QLA_FUNCTION_FAILED;
+ goto exit;
+ default:
+ qla82xx_idc_unlock(ha);
+ msleep(1000);
+ qla82xx_idc_lock(ha);
+ }
+ }
+exit:
+ qla82xx_idc_unlock(ha);
+ return rval;
+}
+
+void qla82xx_watchdog(scsi_qla_host_t *vha)
+{
+ uint32_t dev_state;
+ struct qla_hw_data *ha = vha->hw;
+
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+
+ /* don't poll if reset is going on */
+ if (!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))) {
+ if (dev_state == QLA82XX_DEV_NEED_RESET) {
+ qla_printk(KERN_WARNING, ha,
+ "%s(): Adapter reset needed!\n", __func__);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ } else {
+ qla82xx_check_fw_alive(vha);
+ }
+ }
+}
+
+int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
+{
+ int rval;
+ rval = qla82xx_device_state_handler(vha);
+ return rval;
+}
+
+/*
+ * qla82xx_abort_isp
+ * Resets ISP and aborts all outstanding commands.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ * 0 = success
+ */
+int
+qla82xx_abort_isp(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t dev_state;
+
+ if (vha->device_flags & DFLG_DEV_FAILED) {
+ qla_printk(KERN_WARNING, ha,
+ "%s(%ld): Device in failed state, "
+ "Exiting.\n", __func__, vha->host_no);
+ return QLA_SUCCESS;
+ }
+
+ qla82xx_idc_lock(ha);
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ if (dev_state == QLA82XX_DEV_READY) {
+ qla_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA82XX_DEV_NEED_RESET);
+ } else
+ qla_printk(KERN_INFO, ha, "HW State: %s\n",
+ dev_state < MAX_STATES ?
+ qdev_state[dev_state] : "Unknown");
+ qla82xx_idc_unlock(ha);
+
+ rval = qla82xx_device_state_handler(vha);
+
+ qla82xx_idc_lock(ha);
+ qla82xx_clear_rst_ready(ha);
+ qla82xx_idc_unlock(ha);
+
+ if (rval == QLA_SUCCESS)
+ qla82xx_restart_isp(vha);
+
+ if (rval) {
+ vha->flags.online = 1;
+ if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+ if (ha->isp_abort_cnt == 0) {
+ qla_printk(KERN_WARNING, ha,
+ "ISP error recovery failed - "
+ "board disabled\n");
+ /*
+ * The next call disables the board
+ * completely.
+ */
+ ha->isp_ops->reset_adapter(vha);
+ vha->flags.online = 0;
+ clear_bit(ISP_ABORT_RETRY,
+ &vha->dpc_flags);
+ rval = QLA_SUCCESS;
+ } else { /* schedule another ISP abort */
+ ha->isp_abort_cnt--;
+ DEBUG(qla_printk(KERN_INFO, ha,
+ "qla%ld: ISP abort - retry remaining %d\n",
+ vha->host_no, ha->isp_abort_cnt));
+ rval = QLA_FUNCTION_FAILED;
+ }
+ } else {
+ ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
+ DEBUG(qla_printk(KERN_INFO, ha,
+ "(%ld): ISP error recovery - retrying (%d) "
+ "more times\n", vha->host_no, ha->isp_abort_cnt));
+ set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+ rval = QLA_FUNCTION_FAILED;
+ }
+ }
+ return rval;
+}
+
+/*
+ * qla82xx_fcoe_ctx_reset
+ * Perform a quick reset and aborts all outstanding commands.
+ * This will only perform an FCoE context reset and avoids a full blown
+ * chip reset.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * is_reset_path = flag for identifying the reset path.
+ *
+ * Returns:
+ * 0 = success
+ */
+int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *vha)
+{
+ int rval = QLA_FUNCTION_FAILED;
+
+ if (vha->flags.online) {
+ /* Abort all outstanding commands, so as to be requeued later */
+ qla2x00_abort_isp_cleanup(vha);
+ }
+
+ /* Stop currently executing firmware.
+ * This will destroy existing FCoE context at the F/W end.
+ */
+ qla2x00_try_to_stop_firmware(vha);
+
+ /* Restart. Creates a new FCoE context on INIT_FIRMWARE. */
+ rval = qla82xx_restart_isp(vha);
+
+ return rval;
+}
+
+/*
+ * qla2x00_wait_for_fcoe_ctx_reset
+ * Wait till the FCoE context is reset.
+ *
+ * Note:
+ * Does context switching here.
+ * Release SPIN_LOCK (if any) before calling this routine.
+ *
+ * Return:
+ * Success (fcoe_ctx reset is done) : 0
+ * Failed (fcoe_ctx reset not completed within max loop timout ) : 1
+ */
+int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
+{
+ int status = QLA_FUNCTION_FAILED;
+ unsigned long wait_reset;
+
+ wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
+ while ((test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
+ && time_before(jiffies, wait_reset)) {
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ);
+
+ if (!test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) &&
+ !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
+ status = QLA_SUCCESS;
+ break;
+ }
+ }
+ DEBUG2(printk(KERN_INFO
+ "%s status=%d\n", __func__, status));
+
+ return status;
+}
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
new file mode 100644
index 00000000000..f8f99a5ea53
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -0,0 +1,889 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2008 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#ifndef __QLA_NX_H
+#define __QLA_NX_H
+
+/*
+ * Following are the states of the Phantom. Phantom will set them and
+ * Host will read to check if the fields are correct.
+*/
+#define PHAN_INITIALIZE_FAILED 0xffff
+#define PHAN_INITIALIZE_COMPLETE 0xff01
+
+/* Host writes the following to notify that it has done the init-handshake */
+#define PHAN_INITIALIZE_ACK 0xf00f
+#define PHAN_PEG_RCV_INITIALIZED 0xff01
+
+/*CRB_RELATED*/
+#define QLA82XX_CRB_BASE QLA82XX_CAM_RAM(0x200)
+#define QLA82XX_REG(X) (QLA82XX_CRB_BASE+(X))
+
+#define CRB_CMDPEG_STATE QLA82XX_REG(0x50)
+#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
+#define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54)
+#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
+
+#define QLA82XX_HW_H0_CH_HUB_ADR 0x05
+#define QLA82XX_HW_H1_CH_HUB_ADR 0x0E
+#define QLA82XX_HW_H2_CH_HUB_ADR 0x03
+#define QLA82XX_HW_H3_CH_HUB_ADR 0x01
+#define QLA82XX_HW_H4_CH_HUB_ADR 0x06
+#define QLA82XX_HW_H5_CH_HUB_ADR 0x07
+#define QLA82XX_HW_H6_CH_HUB_ADR 0x08
+
+/* Hub 0 */
+#define QLA82XX_HW_MN_CRB_AGT_ADR 0x15
+#define QLA82XX_HW_MS_CRB_AGT_ADR 0x25
+
+/* Hub 1 */
+#define QLA82XX_HW_PS_CRB_AGT_ADR 0x73
+#define QLA82XX_HW_QMS_CRB_AGT_ADR 0x00
+#define QLA82XX_HW_RPMX3_CRB_AGT_ADR 0x0b
+#define QLA82XX_HW_SQGS0_CRB_AGT_ADR 0x01
+#define QLA82XX_HW_SQGS1_CRB_AGT_ADR 0x02
+#define QLA82XX_HW_SQGS2_CRB_AGT_ADR 0x03
+#define QLA82XX_HW_SQGS3_CRB_AGT_ADR 0x04
+#define QLA82XX_HW_C2C0_CRB_AGT_ADR 0x58
+#define QLA82XX_HW_C2C1_CRB_AGT_ADR 0x59
+#define QLA82XX_HW_C2C2_CRB_AGT_ADR 0x5a
+#define QLA82XX_HW_RPMX2_CRB_AGT_ADR 0x0a
+#define QLA82XX_HW_RPMX4_CRB_AGT_ADR 0x0c
+#define QLA82XX_HW_RPMX7_CRB_AGT_ADR 0x0f
+#define QLA82XX_HW_RPMX9_CRB_AGT_ADR 0x12
+#define QLA82XX_HW_SMB_CRB_AGT_ADR 0x18
+
+/* Hub 2 */
+#define QLA82XX_HW_NIU_CRB_AGT_ADR 0x31
+#define QLA82XX_HW_I2C0_CRB_AGT_ADR 0x19
+#define QLA82XX_HW_I2C1_CRB_AGT_ADR 0x29
+
+#define QLA82XX_HW_SN_CRB_AGT_ADR 0x10
+#define QLA82XX_HW_I2Q_CRB_AGT_ADR 0x20
+#define QLA82XX_HW_LPC_CRB_AGT_ADR 0x22
+#define QLA82XX_HW_ROMUSB_CRB_AGT_ADR 0x21
+#define QLA82XX_HW_QM_CRB_AGT_ADR 0x66
+#define QLA82XX_HW_SQG0_CRB_AGT_ADR 0x60
+#define QLA82XX_HW_SQG1_CRB_AGT_ADR 0x61
+#define QLA82XX_HW_SQG2_CRB_AGT_ADR 0x62
+#define QLA82XX_HW_SQG3_CRB_AGT_ADR 0x63
+#define QLA82XX_HW_RPMX1_CRB_AGT_ADR 0x09
+#define QLA82XX_HW_RPMX5_CRB_AGT_ADR 0x0d
+#define QLA82XX_HW_RPMX6_CRB_AGT_ADR 0x0e
+#define QLA82XX_HW_RPMX8_CRB_AGT_ADR 0x11
+
+/* Hub 3 */
+#define QLA82XX_HW_PH_CRB_AGT_ADR 0x1A
+#define QLA82XX_HW_SRE_CRB_AGT_ADR 0x50
+#define QLA82XX_HW_EG_CRB_AGT_ADR 0x51
+#define QLA82XX_HW_RPMX0_CRB_AGT_ADR 0x08
+
+/* Hub 4 */
+#define QLA82XX_HW_PEGN0_CRB_AGT_ADR 0x40
+#define QLA82XX_HW_PEGN1_CRB_AGT_ADR 0x41
+#define QLA82XX_HW_PEGN2_CRB_AGT_ADR 0x42
+#define QLA82XX_HW_PEGN3_CRB_AGT_ADR 0x43
+#define QLA82XX_HW_PEGNI_CRB_AGT_ADR 0x44
+#define QLA82XX_HW_PEGND_CRB_AGT_ADR 0x45
+#define QLA82XX_HW_PEGNC_CRB_AGT_ADR 0x46
+#define QLA82XX_HW_PEGR0_CRB_AGT_ADR 0x47
+#define QLA82XX_HW_PEGR1_CRB_AGT_ADR 0x48
+#define QLA82XX_HW_PEGR2_CRB_AGT_ADR 0x49
+#define QLA82XX_HW_PEGR3_CRB_AGT_ADR 0x4a
+#define QLA82XX_HW_PEGN4_CRB_AGT_ADR 0x4b
+
+/* Hub 5 */
+#define QLA82XX_HW_PEGS0_CRB_AGT_ADR 0x40
+#define QLA82XX_HW_PEGS1_CRB_AGT_ADR 0x41
+#define QLA82XX_HW_PEGS2_CRB_AGT_ADR 0x42
+#define QLA82XX_HW_PEGS3_CRB_AGT_ADR 0x43
+#define QLA82XX_HW_PEGSI_CRB_AGT_ADR 0x44
+#define QLA82XX_HW_PEGSD_CRB_AGT_ADR 0x45
+#define QLA82XX_HW_PEGSC_CRB_AGT_ADR 0x46
+
+/* Hub 6 */
+#define QLA82XX_HW_CAS0_CRB_AGT_ADR 0x46
+#define QLA82XX_HW_CAS1_CRB_AGT_ADR 0x47
+#define QLA82XX_HW_CAS2_CRB_AGT_ADR 0x48
+#define QLA82XX_HW_CAS3_CRB_AGT_ADR 0x49
+#define QLA82XX_HW_NCM_CRB_AGT_ADR 0x16
+#define QLA82XX_HW_TMR_CRB_AGT_ADR 0x17
+#define QLA82XX_HW_XDMA_CRB_AGT_ADR 0x05
+#define QLA82XX_HW_OCM0_CRB_AGT_ADR 0x06
+#define QLA82XX_HW_OCM1_CRB_AGT_ADR 0x07
+
+/* This field defines PCI/X adr [25:20] of agents on the CRB */
+/* */
+#define QLA82XX_HW_PX_MAP_CRB_PH 0
+#define QLA82XX_HW_PX_MAP_CRB_PS 1
+#define QLA82XX_HW_PX_MAP_CRB_MN 2
+#define QLA82XX_HW_PX_MAP_CRB_MS 3
+#define QLA82XX_HW_PX_MAP_CRB_SRE 5
+#define QLA82XX_HW_PX_MAP_CRB_NIU 6
+#define QLA82XX_HW_PX_MAP_CRB_QMN 7
+#define QLA82XX_HW_PX_MAP_CRB_SQN0 8
+#define QLA82XX_HW_PX_MAP_CRB_SQN1 9
+#define QLA82XX_HW_PX_MAP_CRB_SQN2 10
+#define QLA82XX_HW_PX_MAP_CRB_SQN3 11
+#define QLA82XX_HW_PX_MAP_CRB_QMS 12
+#define QLA82XX_HW_PX_MAP_CRB_SQS0 13
+#define QLA82XX_HW_PX_MAP_CRB_SQS1 14
+#define QLA82XX_HW_PX_MAP_CRB_SQS2 15
+#define QLA82XX_HW_PX_MAP_CRB_SQS3 16
+#define QLA82XX_HW_PX_MAP_CRB_PGN0 17
+#define QLA82XX_HW_PX_MAP_CRB_PGN1 18
+#define QLA82XX_HW_PX_MAP_CRB_PGN2 19
+#define QLA82XX_HW_PX_MAP_CRB_PGN3 20
+#define QLA82XX_HW_PX_MAP_CRB_PGN4 QLA82XX_HW_PX_MAP_CRB_SQS2
+#define QLA82XX_HW_PX_MAP_CRB_PGND 21
+#define QLA82XX_HW_PX_MAP_CRB_PGNI 22
+#define QLA82XX_HW_PX_MAP_CRB_PGS0 23
+#define QLA82XX_HW_PX_MAP_CRB_PGS1 24
+#define QLA82XX_HW_PX_MAP_CRB_PGS2 25
+#define QLA82XX_HW_PX_MAP_CRB_PGS3 26
+#define QLA82XX_HW_PX_MAP_CRB_PGSD 27
+#define QLA82XX_HW_PX_MAP_CRB_PGSI 28
+#define QLA82XX_HW_PX_MAP_CRB_SN 29
+#define QLA82XX_HW_PX_MAP_CRB_EG 31
+#define QLA82XX_HW_PX_MAP_CRB_PH2 32
+#define QLA82XX_HW_PX_MAP_CRB_PS2 33
+#define QLA82XX_HW_PX_MAP_CRB_CAM 34
+#define QLA82XX_HW_PX_MAP_CRB_CAS0 35
+#define QLA82XX_HW_PX_MAP_CRB_CAS1 36
+#define QLA82XX_HW_PX_MAP_CRB_CAS2 37
+#define QLA82XX_HW_PX_MAP_CRB_C2C0 38
+#define QLA82XX_HW_PX_MAP_CRB_C2C1 39
+#define QLA82XX_HW_PX_MAP_CRB_TIMR 40
+#define QLA82XX_HW_PX_MAP_CRB_RPMX1 42
+#define QLA82XX_HW_PX_MAP_CRB_RPMX2 43
+#define QLA82XX_HW_PX_MAP_CRB_RPMX3 44
+#define QLA82XX_HW_PX_MAP_CRB_RPMX4 45
+#define QLA82XX_HW_PX_MAP_CRB_RPMX5 46
+#define QLA82XX_HW_PX_MAP_CRB_RPMX6 47
+#define QLA82XX_HW_PX_MAP_CRB_RPMX7 48
+#define QLA82XX_HW_PX_MAP_CRB_XDMA 49
+#define QLA82XX_HW_PX_MAP_CRB_I2Q 50
+#define QLA82XX_HW_PX_MAP_CRB_ROMUSB 51
+#define QLA82XX_HW_PX_MAP_CRB_CAS3 52
+#define QLA82XX_HW_PX_MAP_CRB_RPMX0 53
+#define QLA82XX_HW_PX_MAP_CRB_RPMX8 54
+#define QLA82XX_HW_PX_MAP_CRB_RPMX9 55
+#define QLA82XX_HW_PX_MAP_CRB_OCM0 56
+#define QLA82XX_HW_PX_MAP_CRB_OCM1 57
+#define QLA82XX_HW_PX_MAP_CRB_SMB 58
+#define QLA82XX_HW_PX_MAP_CRB_I2C0 59
+#define QLA82XX_HW_PX_MAP_CRB_I2C1 60
+#define QLA82XX_HW_PX_MAP_CRB_LPC 61
+#define QLA82XX_HW_PX_MAP_CRB_PGNC 62
+#define QLA82XX_HW_PX_MAP_CRB_PGR0 63
+#define QLA82XX_HW_PX_MAP_CRB_PGR1 4
+#define QLA82XX_HW_PX_MAP_CRB_PGR2 30
+#define QLA82XX_HW_PX_MAP_CRB_PGR3 41
+
+/* This field defines CRB adr [31:20] of the agents */
+/* */
+
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_MN ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_MN_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PH ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PH_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_MS ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_MS_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PS ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PS_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SS ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SS_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_QMS ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_QMS_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS0 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQGS0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS1 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQGS1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS2 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQGS2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS3 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQGS3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_C2C0 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_C2C0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_C2C1 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_C2C1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX4_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX7_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX9_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SMB ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SMB_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_NIU ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_NIU_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0 ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_I2C0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1 ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_I2C1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SRE ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SRE_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_EG ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_EG_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_QMN ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_QM_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQG0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQG1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQG2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SQG3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX5_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX6_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_RPMX8_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS0 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_CAS0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS1 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_CAS1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS2 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_CAS2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS3 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_CAS3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGNI_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGND ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGND_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGN0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGN1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGN2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGN3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGN4_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGNC_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR0 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGR0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR1 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGR1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR2 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGR2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR3 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGR3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGSI_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSD ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGSD_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGS0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGS1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGS2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGS3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSC ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_PEGSC_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAM ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_NCM_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_TMR_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_XDMA_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SN ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_SN_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_I2Q_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_ROMUSB_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0 ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_OCM0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_OCM1 ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_OCM1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_LPC ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+ QLA82XX_HW_LPC_CRB_AGT_ADR)
+
+#define ROMUSB_GLB (QLA82XX_CRB_ROMUSB + 0x00000)
+#define QLA82XX_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c)
+#define QLA82XX_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004)
+#define QLA82XX_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008)
+#define QLA82XX_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008)
+#define QLA82XX_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c)
+#define QLA82XX_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010)
+#define QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
+#define QLA82XX_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018)
+
+#define ROMUSB_ROM (QLA82XX_CRB_ROMUSB + 0x10000)
+#define QLA82XX_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
+#define QLA82XX_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038)
+
+/* Lock IDs for ROM lock */
+#define ROM_LOCK_DRIVER 0x0d417340
+
+#define QLA82XX_PCI_CRB_WINDOWSIZE 0x00100000 /* all are 1MB windows */
+#define QLA82XX_PCI_CRB_WINDOW(A) \
+ (QLA82XX_PCI_CRBSPACE + (A)*QLA82XX_PCI_CRB_WINDOWSIZE)
+#define QLA82XX_CRB_C2C_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C0)
+#define QLA82XX_CRB_C2C_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C1)
+#define QLA82XX_CRB_C2C_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C2)
+#define QLA82XX_CRB_CAM \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAM)
+#define QLA82XX_CRB_CASPER \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS)
+#define QLA82XX_CRB_CASPER_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS0)
+#define QLA82XX_CRB_CASPER_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS1)
+#define QLA82XX_CRB_CASPER_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS2)
+#define QLA82XX_CRB_DDR_MD \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_MS)
+#define QLA82XX_CRB_DDR_NET \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_MN)
+#define QLA82XX_CRB_EPG \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_EG)
+#define QLA82XX_CRB_I2Q \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2Q)
+#define QLA82XX_CRB_NIU \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_NIU)
+
+#define QLA82XX_CRB_PCIX_HOST \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PH)
+#define QLA82XX_CRB_PCIX_HOST2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PH2)
+#define QLA82XX_CRB_PCIX_MD \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PS)
+#define QLA82XX_CRB_PCIE \
+ QLA82XX_CRB_PCIX_MD
+
+/* window 1 pcie slot */
+#define QLA82XX_CRB_PCIE2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PS2)
+#define QLA82XX_CRB_PEG_MD_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS0)
+#define QLA82XX_CRB_PEG_MD_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS1)
+#define QLA82XX_CRB_PEG_MD_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS2)
+#define QLA82XX_CRB_PEG_MD_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS3)
+#define QLA82XX_CRB_PEG_MD_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS3)
+#define QLA82XX_CRB_PEG_MD_D \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGSD)
+#define QLA82XX_CRB_PEG_MD_I \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGSI)
+#define QLA82XX_CRB_PEG_NET_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN0)
+#define QLA82XX_CRB_PEG_NET_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN1)
+#define QLA82XX_CRB_PEG_NET_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN2)
+#define QLA82XX_CRB_PEG_NET_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN3)
+#define QLA82XX_CRB_PEG_NET_4 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN4)
+#define QLA82XX_CRB_PEG_NET_D \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGND)
+#define QLA82XX_CRB_PEG_NET_I \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGNI)
+#define QLA82XX_CRB_PQM_MD \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_QMS)
+#define QLA82XX_CRB_PQM_NET \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_QMN)
+#define QLA82XX_CRB_QDR_MD \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SS)
+#define QLA82XX_CRB_QDR_NET \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SN)
+#define QLA82XX_CRB_ROMUSB \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_ROMUSB)
+#define QLA82XX_CRB_RPMX_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX0)
+#define QLA82XX_CRB_RPMX_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX1)
+#define QLA82XX_CRB_RPMX_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX2)
+#define QLA82XX_CRB_RPMX_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX3)
+#define QLA82XX_CRB_RPMX_4 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX4)
+#define QLA82XX_CRB_RPMX_5 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX5)
+#define QLA82XX_CRB_RPMX_6 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX6)
+#define QLA82XX_CRB_RPMX_7 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX7)
+#define QLA82XX_CRB_SQM_MD_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS0)
+#define QLA82XX_CRB_SQM_MD_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS1)
+#define QLA82XX_CRB_SQM_MD_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS2)
+#define QLA82XX_CRB_SQM_MD_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS3)
+#define QLA82XX_CRB_SQM_NET_0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN0)
+#define QLA82XX_CRB_SQM_NET_1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN1)
+#define QLA82XX_CRB_SQM_NET_2 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN2)
+#define QLA82XX_CRB_SQM_NET_3 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN3)
+#define QLA82XX_CRB_SRE \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SRE)
+#define QLA82XX_CRB_TIMER \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_TIMR)
+#define QLA82XX_CRB_XDMA \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_XDMA)
+#define QLA82XX_CRB_I2C0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2C0)
+#define QLA82XX_CRB_I2C1 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2C1)
+#define QLA82XX_CRB_OCM0 \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_OCM0)
+#define QLA82XX_CRB_SMB \
+ QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SMB)
+#define QLA82XX_CRB_MAX \
+ QLA82XX_PCI_CRB_WINDOW(64)
+
+/*
+ * ====================== BASE ADDRESSES ON-CHIP ======================
+ * Base addresses of major components on-chip.
+ * ====================== BASE ADDRESSES ON-CHIP ======================
+ */
+#define QLA82XX_ADDR_DDR_NET (0x0000000000000000ULL)
+#define QLA82XX_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
+
+/* Imbus address bit used to indicate a host address. This bit is
+ * eliminated by the pcie bar and bar select before presentation
+ * over pcie. */
+/* host memory via IMBUS */
+#define QLA82XX_P2_ADDR_PCIE (0x0000000800000000ULL)
+#define QLA82XX_P3_ADDR_PCIE (0x0000008000000000ULL)
+#define QLA82XX_ADDR_PCIE_MAX (0x0000000FFFFFFFFFULL)
+#define QLA82XX_ADDR_OCM0 (0x0000000200000000ULL)
+#define QLA82XX_ADDR_OCM0_MAX (0x00000002000fffffULL)
+#define QLA82XX_ADDR_OCM1 (0x0000000200400000ULL)
+#define QLA82XX_ADDR_OCM1_MAX (0x00000002004fffffULL)
+#define QLA82XX_ADDR_QDR_NET (0x0000000300000000ULL)
+
+#define QLA82XX_P2_ADDR_QDR_NET_MAX (0x00000003001fffffULL)
+#define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL)
+
+#define QLA82XX_PCI_CRBSPACE (unsigned long)0x06000000
+#define QLA82XX_PCI_DIRECT_CRB (unsigned long)0x04400000
+#define QLA82XX_PCI_CAMQM (unsigned long)0x04800000
+#define QLA82XX_PCI_CAMQM_MAX (unsigned long)0x04ffffff
+#define QLA82XX_PCI_DDR_NET (unsigned long)0x00000000
+#define QLA82XX_PCI_QDR_NET (unsigned long)0x04000000
+#define QLA82XX_PCI_QDR_NET_MAX (unsigned long)0x043fffff
+
+/*
+ * Register offsets for MN
+ */
+#define MIU_CONTROL (0x000)
+#define MIU_TAG (0x004)
+#define MIU_TEST_AGT_CTRL (0x090)
+#define MIU_TEST_AGT_ADDR_LO (0x094)
+#define MIU_TEST_AGT_ADDR_HI (0x098)
+#define MIU_TEST_AGT_WRDATA_LO (0x0a0)
+#define MIU_TEST_AGT_WRDATA_HI (0x0a4)
+#define MIU_TEST_AGT_WRDATA(i) (0x0a0+(4*(i)))
+#define MIU_TEST_AGT_RDDATA_LO (0x0a8)
+#define MIU_TEST_AGT_RDDATA_HI (0x0ac)
+#define MIU_TEST_AGT_RDDATA(i) (0x0a8+(4*(i)))
+#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
+#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
+
+/* MIU_TEST_AGT_CTRL flags. work for SIU as well */
+#define MIU_TA_CTL_START 1
+#define MIU_TA_CTL_ENABLE 2
+#define MIU_TA_CTL_WRITE 4
+#define MIU_TA_CTL_BUSY 8
+
+/*CAM RAM */
+# define QLA82XX_CAM_RAM_BASE (QLA82XX_CRB_CAM + 0x02000)
+# define QLA82XX_CAM_RAM(reg) (QLA82XX_CAM_RAM_BASE + (reg))
+
+#define QLA82XX_PEG_TUNE_MN_SPD_ZEROED 0x80000000
+#define QLA82XX_BOOT_LOADER_MN_ISSUE 0xff00ffff
+#define QLA82XX_PORT_MODE_ADDR (QLA82XX_CAM_RAM(0x24))
+#define QLA82XX_PEG_HALT_STATUS1 (QLA82XX_CAM_RAM(0xa8))
+#define QLA82XX_PEG_HALT_STATUS2 (QLA82XX_CAM_RAM(0xac))
+#define QLA82XX_PEG_ALIVE_COUNTER (QLA82XX_CAM_RAM(0xb0))
+
+#define QLA82XX_CAMRAM_DB1 (QLA82XX_CAM_RAM(0x1b8))
+#define QLA82XX_CAMRAM_DB2 (QLA82XX_CAM_RAM(0x1bc))
+
+#define HALT_STATUS_UNRECOVERABLE 0x80000000
+#define HALT_STATUS_RECOVERABLE 0x40000000
+
+/* Driver Coexistence Defines */
+#define QLA82XX_CRB_DRV_ACTIVE (QLA82XX_CAM_RAM(0x138))
+#define QLA82XX_CRB_DEV_STATE (QLA82XX_CAM_RAM(0x140))
+#define QLA82XX_CRB_DEV_PART_INFO (QLA82XX_CAM_RAM(0x14c))
+#define QLA82XX_CRB_DRV_IDC_VERSION (QLA82XX_CAM_RAM(0x174))
+#define QLA82XX_CRB_DRV_STATE (QLA82XX_CAM_RAM(0x144))
+#define QLA82XX_CRB_DRV_SCRATCH (QLA82XX_CAM_RAM(0x148))
+#define QLA82XX_CRB_DEV_PART_INFO (QLA82XX_CAM_RAM(0x14c))
+
+/* Every driver should use these Device State */
+#define QLA82XX_DEV_COLD 1
+#define QLA82XX_DEV_INITIALIZING 2
+#define QLA82XX_DEV_READY 3
+#define QLA82XX_DEV_NEED_RESET 4
+#define QLA82XX_DEV_NEED_QUIESCENT 5
+#define QLA82XX_DEV_FAILED 6
+#define QLA82XX_DEV_QUIESCENT 7
+#define MAX_STATES 8 /* Increment if new state added */
+
+#define QLA82XX_IDC_VERSION 1
+#define QLA82XX_ROM_DEV_INIT_TIMEOUT 30
+#define QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT 10
+
+#define QLA82XX_ROM_LOCK_ID (QLA82XX_CAM_RAM(0x100))
+#define QLA82XX_CRB_WIN_LOCK_ID (QLA82XX_CAM_RAM(0x124))
+#define QLA82XX_FW_VERSION_MAJOR (QLA82XX_CAM_RAM(0x150))
+#define QLA82XX_FW_VERSION_MINOR (QLA82XX_CAM_RAM(0x154))
+#define QLA82XX_FW_VERSION_SUB (QLA82XX_CAM_RAM(0x158))
+#define QLA82XX_PCIE_REG(reg) (QLA82XX_CRB_PCIE + (reg))
+
+#define PCIE_CHICKEN3 (0x120c8)
+#define PCIE_SETUP_FUNCTION (0x12040)
+#define PCIE_SETUP_FUNCTION2 (0x12048)
+
+#define QLA82XX_PCIX_PS_REG(reg) (QLA82XX_CRB_PCIX_MD + (reg))
+#define QLA82XX_PCIX_PS2_REG(reg) (QLA82XX_CRB_PCIE2 + (reg))
+
+#define PCIE_SEM2_LOCK (0x1c010) /* Flash lock */
+#define PCIE_SEM2_UNLOCK (0x1c014) /* Flash unlock */
+#define PCIE_SEM5_LOCK (0x1c028) /* Coexistence lock */
+#define PCIE_SEM5_UNLOCK (0x1c02c) /* Coexistence unlock */
+#define PCIE_SEM7_LOCK (0x1c038) /* crb win lock */
+#define PCIE_SEM7_UNLOCK (0x1c03c) /* crbwin unlock*/
+
+/* Different drive state */
+#define QLA82XX_DRVST_NOT_RDY 0
+#define QLA82XX_DRVST_RST_RDY 1
+#define QLA82XX_DRVST_QSNT_RDY 2
+
+/*
+ * The PCI VendorID and DeviceID for our board.
+ */
+#define PCI_DEVICE_ID_QLOGIC_ISP8021 0x8021
+
+#define QLA82XX_MSIX_TBL_SPACE 8192
+#define QLA82XX_PCI_REG_MSIX_TBL 0x44
+#define QLA82XX_PCI_MSIX_CONTROL 0x40
+
+struct crb_128M_2M_sub_block_map {
+ unsigned valid;
+ unsigned start_128M;
+ unsigned end_128M;
+ unsigned start_2M;
+};
+
+struct crb_128M_2M_block_map {
+ struct crb_128M_2M_sub_block_map sub_block[16];
+};
+
+struct crb_addr_pair {
+ long addr;
+ long data;
+};
+
+#define ADDR_ERROR ((unsigned long) 0xffffffff)
+#define MAX_CTL_CHECK 1000
+
+/***************************************************************************
+ * PCI related defines.
+ **************************************************************************/
+
+/*
+ * Interrupt related defines.
+ */
+#define PCIX_TARGET_STATUS (0x10118)
+#define PCIX_TARGET_STATUS_F1 (0x10160)
+#define PCIX_TARGET_STATUS_F2 (0x10164)
+#define PCIX_TARGET_STATUS_F3 (0x10168)
+#define PCIX_TARGET_STATUS_F4 (0x10360)
+#define PCIX_TARGET_STATUS_F5 (0x10364)
+#define PCIX_TARGET_STATUS_F6 (0x10368)
+#define PCIX_TARGET_STATUS_F7 (0x1036c)
+
+#define PCIX_TARGET_MASK (0x10128)
+#define PCIX_TARGET_MASK_F1 (0x10170)
+#define PCIX_TARGET_MASK_F2 (0x10174)
+#define PCIX_TARGET_MASK_F3 (0x10178)
+#define PCIX_TARGET_MASK_F4 (0x10370)
+#define PCIX_TARGET_MASK_F5 (0x10374)
+#define PCIX_TARGET_MASK_F6 (0x10378)
+#define PCIX_TARGET_MASK_F7 (0x1037c)
+
+/*
+ * Message Signaled Interrupts
+ */
+#define PCIX_MSI_F0 (0x13000)
+#define PCIX_MSI_F1 (0x13004)
+#define PCIX_MSI_F2 (0x13008)
+#define PCIX_MSI_F3 (0x1300c)
+#define PCIX_MSI_F4 (0x13010)
+#define PCIX_MSI_F5 (0x13014)
+#define PCIX_MSI_F6 (0x13018)
+#define PCIX_MSI_F7 (0x1301c)
+#define PCIX_MSI_F(FUNC) (0x13000 + ((FUNC) * 4))
+#define PCIX_INT_VECTOR (0x10100)
+#define PCIX_INT_MASK (0x10104)
+
+/*
+ * Interrupt state machine and other bits.
+ */
+#define PCIE_MISCCFG_RC (0x1206c)
+
+#define ISR_INT_TARGET_STATUS \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS))
+#define ISR_INT_TARGET_STATUS_F1 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F1))
+#define ISR_INT_TARGET_STATUS_F2 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F2))
+#define ISR_INT_TARGET_STATUS_F3 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
+#define ISR_INT_TARGET_STATUS_F4 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
+#define ISR_INT_TARGET_STATUS_F5 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
+#define ISR_INT_TARGET_STATUS_F6 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
+#define ISR_INT_TARGET_STATUS_F7 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
+
+#define ISR_INT_TARGET_MASK \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK))
+#define ISR_INT_TARGET_MASK_F1 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F1))
+#define ISR_INT_TARGET_MASK_F2 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
+#define ISR_INT_TARGET_MASK_F3 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
+#define ISR_INT_TARGET_MASK_F4 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
+#define ISR_INT_TARGET_MASK_F5 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
+#define ISR_INT_TARGET_MASK_F6 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
+#define ISR_INT_TARGET_MASK_F7 \
+ (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
+
+#define ISR_INT_VECTOR \
+ (QLA82XX_PCIX_PS_REG(PCIX_INT_VECTOR))
+#define ISR_INT_MASK \
+ (QLA82XX_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_STATE_REG \
+ (QLA82XX_PCIX_PS_REG(PCIE_MISCCFG_RC))
+
+#define ISR_MSI_INT_TRIGGER(FUNC) \
+ (QLA82XX_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
+
+#define ISR_IS_LEGACY_INTR_IDLE(VAL) (((VAL) & 0x300) == 0)
+#define ISR_IS_LEGACY_INTR_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
+
+/*
+ * PCI Interrupt Vector Values.
+ */
+#define PCIX_INT_VECTOR_BIT_F0 0x0080
+#define PCIX_INT_VECTOR_BIT_F1 0x0100
+#define PCIX_INT_VECTOR_BIT_F2 0x0200
+#define PCIX_INT_VECTOR_BIT_F3 0x0400
+#define PCIX_INT_VECTOR_BIT_F4 0x0800
+#define PCIX_INT_VECTOR_BIT_F5 0x1000
+#define PCIX_INT_VECTOR_BIT_F6 0x2000
+#define PCIX_INT_VECTOR_BIT_F7 0x4000
+
+struct qla82xx_legacy_intr_set {
+ uint32_t int_vec_bit;
+ uint32_t tgt_status_reg;
+ uint32_t tgt_mask_reg;
+ uint32_t pci_int_reg;
+};
+
+#define QLA82XX_LEGACY_INTR_CONFIG \
+{ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \
+}
+
+#define BOOTLD_START 0x10000
+#define IMAGE_START 0x100000
+#define FLASH_ADDR_START 0x43000
+
+/* Magic number to let user know flash is programmed */
+#define QLA82XX_BDINFO_MAGIC 0x12345678
+#define FW_SIZE_OFFSET (0x3e840c)
+
+#define QLA82XX_IS_REVISION_P3PLUS(_rev_) ((_rev_) >= 0x50)
+#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x0b0)
+#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x0b4)
+
+#ifndef readq
+static inline u64 readq(void __iomem *addr)
+{
+ return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(u64 val, void __iomem *addr)
+{
+ writel(((u32) (val)), (addr));
+ writel(((u32) (val >> 32)), (addr + 4));
+}
+#endif
+
+/* Request and response queue size */
+#define REQUEST_ENTRY_CNT_82XX 128 /* Number of request entries. */
+#define RESPONSE_ENTRY_CNT_82XX 128 /* Number of response entries.*/
+
+/*
+ * ISP 8021 I/O Register Set structure definitions.
+ */
+struct device_reg_82xx {
+ uint32_t req_q_out[64]; /* Request Queue out-Pointer (64 * 4) */
+ uint32_t rsp_q_in[64]; /* Response Queue In-Pointer. */
+ uint32_t rsp_q_out[64]; /* Response Queue Out-Pointer. */
+
+ uint16_t mailbox_in[32]; /* Mail box In registers */
+ uint16_t unused_1[32];
+ uint32_t hint; /* Host interrupt register */
+#define HINT_MBX_INT_PENDING BIT_0
+ uint16_t unused_2[62];
+ uint16_t mailbox_out[32]; /* Mail box Out registers */
+ uint32_t unused_3[48];
+
+ uint32_t host_status; /* host status */
+#define HSRX_RISC_INT BIT_15 /* RISC to Host interrupt. */
+#define HSRX_RISC_PAUSED BIT_8 /* RISC Paused. */
+ uint32_t host_int; /* Interrupt status. */
+#define ISRX_NX_RISC_INT BIT_0 /* RISC interrupt. */
+};
+
+struct fcp_cmnd {
+ struct scsi_lun lun;
+ uint8_t crn;
+ uint8_t task_attribute;
+ uint8_t task_managment;
+ uint8_t additional_cdb_len;
+ uint8_t cdb[260]; /* 256 for CDB len and 4 for FCP_DL */
+};
+
+struct dsd_dma {
+ struct list_head list;
+ dma_addr_t dsd_list_dma;
+ void *dsd_addr;
+};
+
+#define QLA_DSDS_PER_IOCB 37
+#define QLA_DSD_SIZE 12
+struct ct6_dsd {
+ uint16_t fcp_cmnd_len;
+ dma_addr_t fcp_cmnd_dma;
+ struct fcp_cmnd *fcp_cmnd;
+ int dsd_use_cnt;
+ struct list_head dsd_list;
+};
+
+#define MBC_TOGGLE_INTR 0x10
+
+/* Flash offset */
+#define FLT_REG_BOOTLOAD_82XX 0x72
+#define FLT_REG_BOOT_CODE_82XX 0x78
+#define FLT_REG_FW_82XX 0x74
+#define FLT_REG_GOLD_FW_82XX 0x75
+#define FLT_REG_VPD_82XX 0x81
+
+#define FA_VPD_SIZE_82XX 0x400
+
+#define FA_FLASH_LAYOUT_ADDR_82 0xFC400
+
+/******************************************************************************
+*
+* Definitions specific to M25P flash
+*
+*******************************************************************************
+* Instructions
+*/
+#define M25P_INSTR_WREN 0x06
+#define M25P_INSTR_WRDI 0x04
+#define M25P_INSTR_RDID 0x9f
+#define M25P_INSTR_RDSR 0x05
+#define M25P_INSTR_WRSR 0x01
+#define M25P_INSTR_READ 0x03
+#define M25P_INSTR_FAST_READ 0x0b
+#define M25P_INSTR_PP 0x02
+#define M25P_INSTR_SE 0xd8
+#define M25P_INSTR_BE 0xc7
+#define M25P_INSTR_DP 0xb9
+#define M25P_INSTR_RES 0xab
+
+#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 48c37e38ed0..be1a8fcbb1f 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -24,11 +24,18 @@
*/
char qla2x00_version_str[40];
+static int apidev_major;
+
/*
* SRB allocation cache
*/
static struct kmem_cache *srb_cachep;
+/*
+ * CT6 CTX allocation cache
+ */
+static struct kmem_cache *ctx_cachep;
+
int ql2xlogintimeout = 20;
module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xlogintimeout,
@@ -65,13 +72,19 @@ MODULE_PARM_DESC(ql2xextended_error_logging,
"Option to enable extended error logging, "
"Default is 0 - no logging. 1 - log errors.");
+int ql2xshiftctondsd = 6;
+module_param(ql2xshiftctondsd, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xshiftctondsd,
+ "Set to control shifting of command type processing "
+ "based on total number of SG elements.");
+
static void qla2x00_free_device(scsi_qla_host_t *);
int ql2xfdmienable=1;
module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xfdmienable,
- "Enables FDMI registratons "
- "Default is 0 - no FDMI. 1 - perfom FDMI.");
+ "Enables FDMI registrations. "
+ "0 - no FDMI. Default is 1 - perform FDMI.");
#define MAX_Q_DEPTH 32
static int ql2xmaxqdepth = MAX_Q_DEPTH;
@@ -79,6 +92,19 @@ module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xmaxqdepth,
"Maximum queue depth to report for target devices.");
+/* Do not change the value of this after module load */
+int ql2xenabledif = 1;
+module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xenabledif,
+ " Enable T10-CRC-DIF "
+ " Default is 0 - No DIF Support. 1 - Enable it");
+
+int ql2xenablehba_err_chk;
+module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xenablehba_err_chk,
+ " Enable T10-CRC-DIF Error isolation by HBA"
+ " Default is 0 - Error isolation disabled, 1 - Enable it");
+
int ql2xiidmaenable=1;
module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xiidmaenable,
@@ -114,6 +140,32 @@ MODULE_PARM_DESC(ql2xetsenable,
"Enables firmware ETS burst."
"Default is 0 - skip ETS enablement.");
+int ql2xdbwr;
+module_param(ql2xdbwr, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xdbwr,
+ "Option to specify scheme for request queue posting\n"
+ " 0 -- Regular doorbell.\n"
+ " 1 -- CAMRAM doorbell (faster).\n");
+
+int ql2xdontresethba;
+module_param(ql2xdontresethba, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xdontresethba,
+ "Option to specify reset behaviour\n"
+ " 0 (Default) -- Reset on failure.\n"
+ " 1 -- Do not reset on failure.\n");
+
+int ql2xtargetreset = 1;
+module_param(ql2xtargetreset, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xtargetreset,
+ "Enable target reset."
+ "Default is 1 - use hw defaults.");
+
+
+int ql2xasynctmfenable;
+module_param(ql2xasynctmfenable, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xasynctmfenable,
+ "Enables issue of TM IOCBs asynchronously via IOCB mechanism"
+ "Default is 0 - Issue TM IOCBs via mailbox mechanism.");
/*
* SCSI host template entry points
*/
@@ -183,6 +235,10 @@ qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval)
static inline void
qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
{
+ /* Currently used for 82XX only. */
+ if (vha->device_flags & DFLG_DEV_FAILED)
+ return;
+
mod_timer(&vha->timer, jiffies + interval * HZ);
}
@@ -500,6 +556,14 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
if (fcport->drport)
goto qc24_target_busy;
+ if (!vha->flags.difdix_supported &&
+ scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
+ DEBUG2(qla_printk(KERN_ERR, ha,
+ "DIF Cap Not Reg, fail DIF capable cmd's:%x\n",
+ cmd->cmnd[0]));
+ cmd->result = DID_NO_CONNECT << 16;
+ goto qc24_fail_command;
+ }
if (atomic_read(&fcport->state) != FCS_ONLINE) {
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
@@ -618,6 +682,50 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
return (return_status);
}
+/*
+ * qla2x00_wait_for_reset_ready
+ * Wait till the HBA is online after going through
+ * <= MAX_RETRIES_OF_ISP_ABORT or
+ * finally HBA is disabled ie marked offline or flash
+ * operations are in progress.
+ *
+ * Input:
+ * ha - pointer to host adapter structure
+ *
+ * Note:
+ * Does context switching-Release SPIN_LOCK
+ * (if any) before calling this routine.
+ *
+ * Return:
+ * Success (Adapter is online/no flash ops) : 0
+ * Failed (Adapter is offline/disabled/flash ops in progress) : 1
+ */
+int
+qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha)
+{
+ int return_status;
+ unsigned long wait_online;
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+ wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
+ while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
+ test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
+ ha->optrom_state != QLA_SWAITING ||
+ ha->dpc_active) && time_before(jiffies, wait_online))
+ msleep(1000);
+
+ if (base_vha->flags.online && ha->optrom_state == QLA_SWAITING)
+ return_status = QLA_SUCCESS;
+ else
+ return_status = QLA_FUNCTION_FAILED;
+
+ DEBUG2(printk("%s return_status=%d\n", __func__, return_status));
+
+ return return_status;
+}
+
int
qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
{
@@ -739,7 +847,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
if (sp == NULL)
continue;
- if (sp->ctx)
+ if ((sp->ctx) && !(sp->flags & SRB_FCP_CMND_DMA_VALID) &&
+ !IS_PROT_IO(sp))
continue;
if (sp->cmd != cmd)
continue;
@@ -805,7 +914,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
sp = req->outstanding_cmds[cnt];
if (!sp)
continue;
- if (sp->ctx)
+ if ((sp->ctx) && !IS_PROT_IO(sp))
continue;
if (vha->vp_idx != sp->fcport->vha->vp_idx)
continue;
@@ -834,6 +943,24 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
return status;
}
+void qla82xx_wait_for_pending_commands(scsi_qla_host_t *vha)
+{
+ int cnt;
+ srb_t *sp;
+ struct req_que *req = vha->req;
+
+ DEBUG2(qla_printk(KERN_INFO, vha->hw,
+ "Waiting for pending commands\n"));
+ for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
+ sp, WAIT_HOST) == QLA_SUCCESS) {
+ DEBUG2(qla_printk(KERN_INFO, vha->hw,
+ "Done wait for pending commands\n"));
+ }
+ }
+}
+
static char *reset_errors[] = {
"HBA not online",
"HBA not ready",
@@ -1004,7 +1131,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
qla_printk(KERN_INFO, ha,
"scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun);
- if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
+ if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
goto eh_host_reset_lock;
/*
@@ -1020,11 +1147,19 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
if (qla2x00_vp_abort_isp(vha))
goto eh_host_reset_lock;
} else {
+ if (IS_QLA82XX(vha->hw)) {
+ if (!qla82xx_fcoe_ctx_reset(vha)) {
+ /* Ctx reset success */
+ ret = SUCCESS;
+ goto eh_host_reset_lock;
+ }
+ /* fall thru if ctx reset failed */
+ }
if (ha->wq)
flush_workqueue(ha->wq);
set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
- if (qla2x00_abort_isp(base_vha)) {
+ if (ha->isp_ops->abort_isp(base_vha)) {
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
/* failed. schedule dpc to try */
set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
@@ -1064,7 +1199,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
struct fc_port *fcport;
struct qla_hw_data *ha = vha->hw;
- if (ha->flags.enable_target_reset) {
+ if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) {
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->port_type != FCT_TARGET)
continue;
@@ -1078,7 +1213,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
}
}
- if (ha->flags.enable_lip_full_login && !IS_QLA81XX(ha)) {
+ if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) {
ret = qla2x00_full_login_lip(vha);
if (ret != QLA_SUCCESS) {
DEBUG2_3(printk("%s(%ld): failed: "
@@ -1125,23 +1260,28 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
sp = req->outstanding_cmds[cnt];
if (sp) {
req->outstanding_cmds[cnt] = NULL;
- if (!sp->ctx) {
+ if (!sp->ctx ||
+ (sp->flags & SRB_FCP_CMND_DMA_VALID) ||
+ IS_PROT_IO(sp)) {
sp->cmd->result = res;
qla2x00_sp_compl(ha, sp);
} else {
ctx = sp->ctx;
- if (ctx->type == SRB_LOGIN_CMD || ctx->type == SRB_LOGOUT_CMD) {
- del_timer_sync(&ctx->timer);
- ctx->free(sp);
+ if (ctx->type == SRB_LOGIN_CMD ||
+ ctx->type == SRB_LOGOUT_CMD) {
+ ctx->u.iocb_cmd->free(sp);
} else {
- struct srb_bsg* sp_bsg = (struct srb_bsg*)sp->ctx;
- if (sp_bsg->bsg_job->request->msgcode == FC_BSG_HST_CT)
+ struct fc_bsg_job *bsg_job =
+ ctx->u.bsg_job;
+ if (bsg_job->request->msgcode
+ == FC_BSG_HST_CT)
kfree(sp->fcport);
- sp_bsg->bsg_job->req->errors = 0;
- sp_bsg->bsg_job->reply->result = res;
- sp_bsg->bsg_job->job_done(sp_bsg->bsg_job);
+ bsg_job->req->errors = 0;
+ bsg_job->reply->result = res;
+ bsg_job->job_done(bsg_job);
kfree(sp->ctx);
- mempool_free(sp, ha->srb_mempool);
+ mempool_free(sp,
+ ha->srb_mempool);
}
}
}
@@ -1379,6 +1519,7 @@ static struct isp_operations qla2100_isp_ops = {
.write_optrom = qla2x00_write_optrom_data,
.get_flash_version = qla2x00_get_flash_version,
.start_scsi = qla2x00_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
};
static struct isp_operations qla2300_isp_ops = {
@@ -1414,6 +1555,7 @@ static struct isp_operations qla2300_isp_ops = {
.write_optrom = qla2x00_write_optrom_data,
.get_flash_version = qla2x00_get_flash_version,
.start_scsi = qla2x00_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
};
static struct isp_operations qla24xx_isp_ops = {
@@ -1449,6 +1591,7 @@ static struct isp_operations qla24xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
};
static struct isp_operations qla25xx_isp_ops = {
@@ -1483,7 +1626,8 @@ static struct isp_operations qla25xx_isp_ops = {
.read_optrom = qla25xx_read_optrom_data,
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
- .start_scsi = qla24xx_start_scsi,
+ .start_scsi = qla24xx_dif_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
};
static struct isp_operations qla81xx_isp_ops = {
@@ -1519,6 +1663,43 @@ static struct isp_operations qla81xx_isp_ops = {
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_start_scsi,
+ .abort_isp = qla2x00_abort_isp,
+};
+
+static struct isp_operations qla82xx_isp_ops = {
+ .pci_config = qla82xx_pci_config,
+ .reset_chip = qla82xx_reset_chip,
+ .chip_diag = qla24xx_chip_diag,
+ .config_rings = qla82xx_config_rings,
+ .reset_adapter = qla24xx_reset_adapter,
+ .nvram_config = qla81xx_nvram_config,
+ .update_fw_options = qla24xx_update_fw_options,
+ .load_risc = qla82xx_load_risc,
+ .pci_info_str = qla82xx_pci_info_str,
+ .fw_version_str = qla24xx_fw_version_str,
+ .intr_handler = qla82xx_intr_handler,
+ .enable_intrs = qla82xx_enable_intrs,
+ .disable_intrs = qla82xx_disable_intrs,
+ .abort_command = qla24xx_abort_command,
+ .target_reset = qla24xx_abort_target,
+ .lun_reset = qla24xx_lun_reset,
+ .fabric_login = qla24xx_login_fabric,
+ .fabric_logout = qla24xx_fabric_logout,
+ .calc_req_entries = NULL,
+ .build_iocbs = NULL,
+ .prep_ms_iocb = qla24xx_prep_ms_iocb,
+ .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
+ .read_nvram = qla24xx_read_nvram_data,
+ .write_nvram = qla24xx_write_nvram_data,
+ .fw_dump = qla24xx_fw_dump,
+ .beacon_on = qla24xx_beacon_on,
+ .beacon_off = qla24xx_beacon_off,
+ .beacon_blink = qla24xx_beacon_blink,
+ .read_optrom = qla82xx_read_optrom_data,
+ .write_optrom = qla82xx_write_optrom_data,
+ .get_flash_version = qla24xx_get_flash_version,
+ .start_scsi = qla82xx_start_scsi,
+ .abort_isp = qla82xx_abort_isp,
};
static inline void
@@ -1607,10 +1788,22 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
ha->device_type |= DT_IIDMA;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
+ case PCI_DEVICE_ID_QLOGIC_ISP8021:
+ ha->device_type |= DT_ISP8021;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ /* Initialize 82XX ISP flags */
+ qla82xx_init_flags(ha);
+ break;
}
- /* Get adapter physical port no from interrupt pin register. */
- pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
+ if (IS_QLA82XX(ha))
+ ha->port_no = !(ha->portnum & 1);
+ else
+ /* Get adapter physical port no from interrupt pin register. */
+ pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
+
if (ha->port_no & 1)
ha->flags.port0 = 1;
else
@@ -1624,6 +1817,9 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
uint16_t msix;
int cpus;
+ if (IS_QLA82XX(ha))
+ return qla82xx_iospace_config(ha);
+
if (pci_request_selected_regions(ha->pdev, ha->bars,
QLA2XXX_DRIVER_NAME)) {
qla_printk(KERN_WARNING, ha,
@@ -1767,7 +1963,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
- pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001) {
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) {
bars = pci_select_bars(pdev, IORESOURCE_MEM);
mem_only = 1;
}
@@ -1897,6 +2094,19 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
ha->nvram_conf_off = ~0;
ha->nvram_data_off = ~0;
+ } else if (IS_QLA82XX(ha)) {
+ ha->mbx_count = MAILBOX_REGISTER_COUNT;
+ req_length = REQUEST_ENTRY_CNT_82XX;
+ rsp_length = RESPONSE_ENTRY_CNT_82XX;
+ ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+ ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
+ ha->gid_list_info_size = 8;
+ ha->optrom_size = OPTROM_SIZE_82XX;
+ ha->isp_ops = &qla82xx_isp_ops;
+ ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
+ ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
+ ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
+ ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
}
mutex_init(&ha->vport_lock);
@@ -1969,6 +2179,7 @@ que_init:
" pointers\n");
goto probe_init_failed;
}
+
ha->rsp_q_map[0] = rsp;
ha->req_q_map[0] = req;
rsp->req = req;
@@ -1987,6 +2198,12 @@ que_init:
rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out;
}
+ if (IS_QLA82XX(ha)) {
+ req->req_q_out = &ha->iobase->isp82.req_q_out[0];
+ rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
+ rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
+ }
+
if (qla2x00_initialize_adapter(base_vha)) {
qla_printk(KERN_WARNING, ha,
"Failed to initialize adapter\n");
@@ -1995,6 +2212,14 @@ que_init:
"Adapter flags %x.\n",
base_vha->host_no, base_vha->device_flags));
+ if (IS_QLA82XX(ha)) {
+ qla82xx_idc_lock(ha);
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA82XX_DEV_FAILED);
+ qla82xx_idc_unlock(ha);
+ qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
+ }
+
ret = -ENODEV;
goto probe_failed;
}
@@ -2033,6 +2258,24 @@ skip_dpc:
DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
base_vha->host_no, ha));
+ if (IS_QLA25XX(ha) && ql2xenabledif) {
+ if (ha->fw_attributes & BIT_4) {
+ base_vha->flags.difdix_supported = 1;
+ DEBUG18(qla_printk(KERN_INFO, ha,
+ "Registering for DIF/DIX type 1 and 3"
+ " protection.\n"));
+ scsi_host_set_prot(host,
+ SHOST_DIF_TYPE1_PROTECTION
+ | SHOST_DIF_TYPE3_PROTECTION
+ | SHOST_DIX_TYPE1_PROTECTION
+ | SHOST_DIX_TYPE3_PROTECTION);
+ scsi_host_set_guard(host, SHOST_DIX_GUARD_CRC);
+ } else
+ base_vha->flags.difdix_supported = 0;
+ }
+
+ ha->isp_ops->enable_intrs(ha);
+
ret = scsi_add_host(host, &pdev->dev);
if (ret)
goto probe_failed;
@@ -2040,8 +2283,6 @@ skip_dpc:
base_vha->flags.init_done = 1;
base_vha->flags.online = 1;
- ha->isp_ops->enable_intrs(ha);
-
scsi_scan_host(host);
qla2x00_alloc_sysfs_attr(base_vha);
@@ -2083,9 +2324,17 @@ probe_failed:
scsi_host_put(base_vha->host);
probe_hw_failed:
- if (ha->iobase)
- iounmap(ha->iobase);
-
+ if (IS_QLA82XX(ha)) {
+ qla82xx_idc_lock(ha);
+ qla82xx_clear_drv_active(ha);
+ qla82xx_idc_unlock(ha);
+ iounmap((device_reg_t __iomem *)ha->nx_pcibase);
+ if (!ql2xdbwr)
+ iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
+ } else {
+ if (ha->iobase)
+ iounmap(ha->iobase);
+ }
pci_release_selected_regions(ha->pdev, ha->bars);
kfree(ha);
ha = NULL;
@@ -2152,11 +2401,17 @@ qla2x00_remove_one(struct pci_dev *pdev)
scsi_host_put(base_vha->host);
- if (ha->iobase)
- iounmap(ha->iobase);
+ if (IS_QLA82XX(ha)) {
+ iounmap((device_reg_t __iomem *)ha->nx_pcibase);
+ if (!ql2xdbwr)
+ iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
+ } else {
+ if (ha->iobase)
+ iounmap(ha->iobase);
- if (ha->mqiobase)
- iounmap(ha->mqiobase);
+ if (ha->mqiobase)
+ iounmap(ha->mqiobase);
+ }
pci_release_selected_regions(ha->pdev, ha->bars);
kfree(ha);
@@ -2205,8 +2460,10 @@ qla2x00_free_device(scsi_qla_host_t *vha)
vha->flags.online = 0;
/* turn-off interrupts on the card */
- if (ha->interrupts_on)
+ if (ha->interrupts_on) {
+ vha->flags.init_done = 0;
ha->isp_ops->disable_intrs(ha);
+ }
qla2x00_free_irqs(vha);
@@ -2351,10 +2608,25 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
if (!ha->srb_mempool)
goto fail_free_gid_list;
+ if (IS_QLA82XX(ha)) {
+ /* Allocate cache for CT6 Ctx. */
+ if (!ctx_cachep) {
+ ctx_cachep = kmem_cache_create("qla2xxx_ctx",
+ sizeof(struct ct6_dsd), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!ctx_cachep)
+ goto fail_free_gid_list;
+ }
+ ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
+ ctx_cachep);
+ if (!ha->ctx_mempool)
+ goto fail_free_srb_mempool;
+ }
+
/* Get memory for cached NVRAM */
ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
if (!ha->nvram)
- goto fail_free_srb_mempool;
+ goto fail_free_ctx_mempool;
snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
ha->pdev->device);
@@ -2363,6 +2635,24 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
if (!ha->s_dma_pool)
goto fail_free_nvram;
+ if (IS_QLA82XX(ha) || ql2xenabledif) {
+ ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
+ DSD_LIST_DMA_POOL_SIZE, 8, 0);
+ if (!ha->dl_dma_pool) {
+ qla_printk(KERN_WARNING, ha,
+ "Memory Allocation failed - dl_dma_pool\n");
+ goto fail_s_dma_pool;
+ }
+
+ ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev,
+ FCP_CMND_DMA_POOL_SIZE, 8, 0);
+ if (!ha->fcp_cmnd_dma_pool) {
+ qla_printk(KERN_WARNING, ha,
+ "Memory Allocation failed - fcp_cmnd_dma_pool\n");
+ goto fail_dl_dma_pool;
+ }
+ }
+
/* Allocate memory for SNS commands */
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
/* Get consistent memory allocated for SNS commands */
@@ -2429,16 +2719,28 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ha->npiv_info = NULL;
/* Get consistent memory allocated for EX-INIT-CB. */
- if (IS_QLA81XX(ha)) {
+ if (IS_QLA8XXX_TYPE(ha)) {
ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
&ha->ex_init_cb_dma);
if (!ha->ex_init_cb)
goto fail_ex_init_cb;
}
+ INIT_LIST_HEAD(&ha->gbl_dsd_list);
+
+ /* Get consistent memory allocated for Async Port-Database. */
+ if (!IS_FWI2_CAPABLE(ha)) {
+ ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
+ &ha->async_pd_dma);
+ if (!ha->async_pd)
+ goto fail_async_pd;
+ }
+
INIT_LIST_HEAD(&ha->vp_list);
return 1;
+fail_async_pd:
+ dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
fail_ex_init_cb:
kfree(ha->npiv_info);
fail_npiv_info:
@@ -2465,11 +2767,24 @@ fail_free_ms_iocb:
ha->ms_iocb = NULL;
ha->ms_iocb_dma = 0;
fail_dma_pool:
+ if (IS_QLA82XX(ha) || ql2xenabledif) {
+ dma_pool_destroy(ha->fcp_cmnd_dma_pool);
+ ha->fcp_cmnd_dma_pool = NULL;
+ }
+fail_dl_dma_pool:
+ if (IS_QLA82XX(ha) || ql2xenabledif) {
+ dma_pool_destroy(ha->dl_dma_pool);
+ ha->dl_dma_pool = NULL;
+ }
+fail_s_dma_pool:
dma_pool_destroy(ha->s_dma_pool);
ha->s_dma_pool = NULL;
fail_free_nvram:
kfree(ha->nvram);
ha->nvram = NULL;
+fail_free_ctx_mempool:
+ mempool_destroy(ha->ctx_mempool);
+ ha->ctx_mempool = NULL;
fail_free_srb_mempool:
mempool_destroy(ha->srb_mempool);
ha->srb_mempool = NULL;
@@ -2538,7 +2853,11 @@ qla2x00_mem_free(struct qla_hw_data *ha)
dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
if (ha->ex_init_cb)
- dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
+ dma_pool_free(ha->s_dma_pool,
+ ha->ex_init_cb, ha->ex_init_cb_dma);
+
+ if (ha->async_pd)
+ dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
if (ha->s_dma_pool)
dma_pool_destroy(ha->s_dma_pool);
@@ -2547,14 +2866,39 @@ qla2x00_mem_free(struct qla_hw_data *ha)
dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
ha->gid_list_dma);
+ if (IS_QLA82XX(ha)) {
+ if (!list_empty(&ha->gbl_dsd_list)) {
+ struct dsd_dma *dsd_ptr, *tdsd_ptr;
+
+ /* clean up allocated prev pool */
+ list_for_each_entry_safe(dsd_ptr,
+ tdsd_ptr, &ha->gbl_dsd_list, list) {
+ dma_pool_free(ha->dl_dma_pool,
+ dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma);
+ list_del(&dsd_ptr->list);
+ kfree(dsd_ptr);
+ }
+ }
+ }
+
+ if (ha->dl_dma_pool)
+ dma_pool_destroy(ha->dl_dma_pool);
+
+ if (ha->fcp_cmnd_dma_pool)
+ dma_pool_destroy(ha->fcp_cmnd_dma_pool);
+
+ if (ha->ctx_mempool)
+ mempool_destroy(ha->ctx_mempool);
+
if (ha->init_cb)
dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
- ha->init_cb, ha->init_cb_dma);
+ ha->init_cb, ha->init_cb_dma);
vfree(ha->optrom_buffer);
kfree(ha->nvram);
kfree(ha->npiv_info);
ha->srb_mempool = NULL;
+ ha->ctx_mempool = NULL;
ha->eft = NULL;
ha->eft_dma = 0;
ha->sns_cmd = NULL;
@@ -2567,8 +2911,12 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->init_cb_dma = 0;
ha->ex_init_cb = NULL;
ha->ex_init_cb_dma = 0;
+ ha->async_pd = NULL;
+ ha->async_pd_dma = 0;
ha->s_dma_pool = NULL;
+ ha->dl_dma_pool = NULL;
+ ha->fcp_cmnd_dma_pool = NULL;
ha->gid_list = NULL;
ha->gid_list_dma = 0;
@@ -2691,6 +3039,8 @@ qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE);
qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
+qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
+qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE);
int
qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
@@ -2760,6 +3110,14 @@ qla2x00_do_work(struct scsi_qla_host *vha)
qla2x00_async_logout_done(vha, e->u.logio.fcport,
e->u.logio.data);
break;
+ case QLA_EVT_ASYNC_ADISC:
+ qla2x00_async_adisc(vha, e->u.logio.fcport,
+ e->u.logio.data);
+ break;
+ case QLA_EVT_ASYNC_ADISC_DONE:
+ qla2x00_async_adisc_done(vha, e->u.logio.fcport,
+ e->u.logio.data);
+ break;
case QLA_EVT_UEVENT:
qla2x00_uevent_emit(vha, e->u.uevent.code);
break;
@@ -2785,9 +3143,8 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
* If the port is not ONLINE then try to login
* to it if we haven't run out of retries.
*/
- if (atomic_read(&fcport->state) !=
- FCS_ONLINE && fcport->login_retry) {
-
+ if (atomic_read(&fcport->state) != FCS_ONLINE &&
+ fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) {
fcport->login_retry--;
if (fcport->flags & FCF_FABRIC_DEVICE) {
if (fcport->flags & FCF_FCP2_DEVICE)
@@ -2798,6 +3155,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
fcport->d_id.b.al_pa);
if (IS_ALOGIO_CAPABLE(ha)) {
+ fcport->flags |= FCF_ASYNC_SENT;
data[0] = 0;
data[1] = QLA_LOGIO_LOGIN_RETRIED;
status = qla2x00_post_async_login_work(
@@ -2896,6 +3254,45 @@ qla2x00_do_dpc(void *data)
qla2x00_do_work(base_vha);
+ if (IS_QLA82XX(ha)) {
+ if (test_and_clear_bit(ISP_UNRECOVERABLE,
+ &base_vha->dpc_flags)) {
+ qla82xx_idc_lock(ha);
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA82XX_DEV_FAILED);
+ qla82xx_idc_unlock(ha);
+ qla_printk(KERN_INFO, ha,
+ "HW State: FAILED\n");
+ qla82xx_device_state_handler(base_vha);
+ continue;
+ }
+
+ if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
+ &base_vha->dpc_flags)) {
+
+ DEBUG(printk(KERN_INFO
+ "scsi(%ld): dpc: sched "
+ "qla82xx_fcoe_ctx_reset ha = %p\n",
+ base_vha->host_no, ha));
+ if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
+ &base_vha->dpc_flags))) {
+ if (qla82xx_fcoe_ctx_reset(base_vha)) {
+ /* FCoE-ctx reset failed.
+ * Escalate to chip-reset
+ */
+ set_bit(ISP_ABORT_NEEDED,
+ &base_vha->dpc_flags);
+ }
+ clear_bit(ABORT_ISP_ACTIVE,
+ &base_vha->dpc_flags);
+ }
+
+ DEBUG(printk("scsi(%ld): dpc:"
+ " qla82xx_fcoe_ctx_reset end\n",
+ base_vha->host_no));
+ }
+ }
+
if (test_and_clear_bit(ISP_ABORT_NEEDED,
&base_vha->dpc_flags)) {
@@ -2905,7 +3302,7 @@ qla2x00_do_dpc(void *data)
if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
&base_vha->dpc_flags))) {
- if (qla2x00_abort_isp(base_vha)) {
+ if (ha->isp_ops->abort_isp(base_vha)) {
/* failed. retry later */
set_bit(ISP_ABORT_NEEDED,
&base_vha->dpc_flags);
@@ -3038,11 +3435,31 @@ static void
qla2x00_sp_free_dma(srb_t *sp)
{
struct scsi_cmnd *cmd = sp->cmd;
+ struct qla_hw_data *ha = sp->fcport->vha->hw;
if (sp->flags & SRB_DMA_VALID) {
scsi_dma_unmap(cmd);
sp->flags &= ~SRB_DMA_VALID;
}
+
+ if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
+ dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
+ scsi_prot_sg_count(cmd), cmd->sc_data_direction);
+ sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
+ }
+
+ if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
+ /* List assured to be having elements */
+ qla2x00_clean_dsd_pool(ha, sp);
+ sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
+ }
+
+ if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
+ dma_pool_free(ha->dl_dma_pool, sp->ctx,
+ ((struct crc_context *)sp->ctx)->crc_ctx_dma);
+ sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
+ }
+
CMD_SP(cmd) = NULL;
}
@@ -3053,8 +3470,18 @@ qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
qla2x00_sp_free_dma(sp);
- mempool_free(sp, ha->srb_mempool);
+ if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
+ struct ct6_dsd *ctx = sp->ctx;
+ dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd,
+ ctx->fcp_cmnd_dma);
+ list_splice(&ctx->dsd_list, &ha->gbl_dsd_list);
+ ha->gbl_dsd_inuse -= ctx->dsd_use_cnt;
+ ha->gbl_dsd_avail += ctx->dsd_use_cnt;
+ mempool_free(sp->ctx, ha->ctx_mempool);
+ sp->ctx = NULL;
+ }
+ mempool_free(sp, ha->srb_mempool);
cmd->scsi_done(cmd);
}
@@ -3079,6 +3506,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
+ if (IS_QLA82XX(ha))
+ qla82xx_watchdog(vha);
+
/* Hardware read to raise pending EEH errors during mailbox waits. */
if (!pci_channel_offline(ha->pdev))
pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
@@ -3143,7 +3573,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
sp = req->outstanding_cmds[index];
if (!sp)
continue;
- if (sp->ctx)
+ if (sp->ctx && !IS_PROT_IO(sp))
continue;
sfcp = sp->fcport;
if (!(sfcp->flags & FCF_FCP2_DEVICE))
@@ -3193,6 +3623,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
start_dpc ||
test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
+ test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
+ test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
test_bit(RELOGIN_NEEDED, &vha->dpc_flags)))
qla2xxx_wake_dpc(vha);
@@ -3202,7 +3634,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
/* Firmware interface routines. */
-#define FW_BLOBS 7
+#define FW_BLOBS 8
#define FW_ISP21XX 0
#define FW_ISP22XX 1
#define FW_ISP2300 2
@@ -3210,6 +3642,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
#define FW_ISP24XX 4
#define FW_ISP25XX 5
#define FW_ISP81XX 6
+#define FW_ISP82XX 7
#define FW_FILE_ISP21XX "ql2100_fw.bin"
#define FW_FILE_ISP22XX "ql2200_fw.bin"
@@ -3218,6 +3651,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
#define FW_FILE_ISP24XX "ql2400_fw.bin"
#define FW_FILE_ISP25XX "ql2500_fw.bin"
#define FW_FILE_ISP81XX "ql8100_fw.bin"
+#define FW_FILE_ISP82XX "ql8200_fw.bin"
static DEFINE_MUTEX(qla_fw_lock);
@@ -3229,6 +3663,7 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
{ .name = FW_FILE_ISP24XX, },
{ .name = FW_FILE_ISP25XX, },
{ .name = FW_FILE_ISP81XX, },
+ { .name = FW_FILE_ISP82XX, },
};
struct fw_blob *
@@ -3252,6 +3687,8 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
blob = &qla_fw_blobs[FW_ISP25XX];
} else if (IS_QLA81XX(ha)) {
blob = &qla_fw_blobs[FW_ISP81XX];
+ } else if (IS_QLA82XX(ha)) {
+ blob = &qla_fw_blobs[FW_ISP82XX];
}
mutex_lock(&qla_fw_lock);
@@ -3392,11 +3829,10 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
msleep(1000);
set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
- if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS)
+ if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS)
ret = PCI_ERS_RESULT_RECOVERED;
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
- pci_cleanup_aer_uncorrect_error_status(pdev);
DEBUG17(qla_printk(KERN_WARNING, ha,
"slot_reset-return:ret=%x\n", ret));
@@ -3420,6 +3856,8 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
"from slot/link_reset");
}
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
ha->flags.eeh_busy = 0;
}
@@ -3445,6 +3883,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
{ 0 },
};
MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
@@ -3460,6 +3899,10 @@ static struct pci_driver qla2xxx_pci_driver = {
.err_handler = &qla2xxx_err_handler,
};
+static struct file_operations apidev_fops = {
+ .owner = THIS_MODULE,
+};
+
/**
* qla2x00_module_init - Module initialization.
**/
@@ -3488,6 +3931,13 @@ qla2x00_module_init(void)
kmem_cache_destroy(srb_cachep);
return -ENODEV;
}
+
+ apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
+ if (apidev_major < 0) {
+ printk(KERN_WARNING "qla2xxx: Unable to register char device "
+ "%s\n", QLA2XXX_APIDEV);
+ }
+
qla2xxx_transport_vport_template =
fc_attach_transport(&qla2xxx_transport_vport_functions);
if (!qla2xxx_transport_vport_template) {
@@ -3513,9 +3963,12 @@ qla2x00_module_init(void)
static void __exit
qla2x00_module_exit(void)
{
+ unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
pci_unregister_driver(&qla2xxx_pci_driver);
qla2x00_release_firmware();
kmem_cache_destroy(srb_cachep);
+ if (ctx_cachep)
+ kmem_cache_destroy(ctx_cachep);
fc_release_transport(qla2xxx_transport_template);
fc_release_transport(qla2xxx_transport_vport_template);
}
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 8b3de4e54c2..de92504d758 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -423,9 +423,6 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
/* Flash Manipulation Routines */
/*****************************************************************************/
-#define OPTROM_BURST_SIZE 0x1000
-#define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4)
-
static inline uint32_t
flash_conf_addr(struct qla_hw_data *ha, uint32_t faddr)
{
@@ -565,6 +562,10 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
*start = FA_FLASH_LAYOUT_ADDR;
else if (IS_QLA81XX(ha))
*start = FA_FLASH_LAYOUT_ADDR_81;
+ else if (IS_QLA82XX(ha)) {
+ *start = FA_FLASH_LAYOUT_ADDR_82;
+ goto end;
+ }
/* Begin with first PCI expansion ROM header. */
buf = (uint8_t *)req->ring;
dcode = (uint32_t *)req->ring;
@@ -648,6 +649,12 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
const uint32_t def_npiv_conf1[] =
{ FA_NPIV_CONF1_ADDR_24, FA_NPIV_CONF1_ADDR,
FA_NPIV_CONF1_ADDR_81 };
+ const uint32_t fcp_prio_cfg0[] =
+ { FA_FCP_PRIO0_ADDR, FA_FCP_PRIO0_ADDR_25,
+ 0 };
+ const uint32_t fcp_prio_cfg1[] =
+ { FA_FCP_PRIO1_ADDR, FA_FCP_PRIO1_ADDR_25,
+ 0 };
uint32_t def;
uint16_t *wptr;
uint16_t cnt, chksum;
@@ -703,10 +710,14 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
break;
case FLT_REG_VPD_0:
ha->flt_region_vpd_nvram = start;
+ if (IS_QLA82XX(ha))
+ break;
if (ha->flags.port0)
ha->flt_region_vpd = start;
break;
case FLT_REG_VPD_1:
+ if (IS_QLA82XX(ha))
+ break;
if (!ha->flags.port0)
ha->flt_region_vpd = start;
break;
@@ -732,6 +743,29 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
case FLT_REG_GOLD_FW:
ha->flt_region_gold_fw = start;
break;
+ case FLT_REG_FCP_PRIO_0:
+ if (ha->flags.port0)
+ ha->flt_region_fcp_prio = start;
+ break;
+ case FLT_REG_FCP_PRIO_1:
+ if (!ha->flags.port0)
+ ha->flt_region_fcp_prio = start;
+ break;
+ case FLT_REG_BOOT_CODE_82XX:
+ ha->flt_region_boot = start;
+ break;
+ case FLT_REG_FW_82XX:
+ ha->flt_region_fw = start;
+ break;
+ case FLT_REG_GOLD_FW_82XX:
+ ha->flt_region_gold_fw = start;
+ break;
+ case FLT_REG_BOOTLOAD_82XX:
+ ha->flt_region_bootload = start;
+ break;
+ case FLT_REG_VPD_82XX:
+ ha->flt_region_vpd = start;
+ break;
}
}
goto done;
@@ -750,12 +784,14 @@ no_flash_data:
ha->flt_region_boot = def_boot[def];
ha->flt_region_vpd_nvram = def_vpd_nvram[def];
ha->flt_region_vpd = ha->flags.port0 ?
- def_vpd0[def]: def_vpd1[def];
+ def_vpd0[def] : def_vpd1[def];
ha->flt_region_nvram = ha->flags.port0 ?
- def_nvram0[def]: def_nvram1[def];
+ def_nvram0[def] : def_nvram1[def];
ha->flt_region_fdt = def_fdt[def];
ha->flt_region_npiv_conf = ha->flags.port0 ?
- def_npiv_conf0[def]: def_npiv_conf1[def];
+ def_npiv_conf0[def] : def_npiv_conf1[def];
+ ha->flt_region_fcp_prio = ha->flags.port0 ?
+ fcp_prio_cfg0[def] : fcp_prio_cfg1[def];
done:
DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x "
"vpd_nvram=0x%x vpd=0x%x nvram=0x%x fdt=0x%x flt=0x%x "
@@ -775,7 +811,7 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
uint16_t *wptr;
struct qla_fdt_layout *fdt;
uint8_t man_id, flash_id;
- uint16_t mid, fid;
+ uint16_t mid = 0, fid = 0;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
@@ -816,6 +852,10 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
goto done;
no_flash_data:
loc = locations[0];
+ if (IS_QLA82XX(ha)) {
+ ha->fdt_block_size = FLASH_BLK_SIZE_64K;
+ goto done;
+ }
qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id);
mid = man_id;
fid = flash_id;
@@ -853,6 +893,31 @@ done:
ha->fdt_block_size));
}
+static void
+qla2xxx_get_idc_param(scsi_qla_host_t *vha)
+{
+#define QLA82XX_IDC_PARAM_ADDR 0x003e885c
+ uint32_t *wptr;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+
+ if (!IS_QLA82XX(ha))
+ return;
+
+ wptr = (uint32_t *)req->ring;
+ ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
+ QLA82XX_IDC_PARAM_ADDR , 8);
+
+ if (*wptr == __constant_cpu_to_le32(0xffffffff)) {
+ ha->nx_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT;
+ ha->nx_reset_timeout = QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT;
+ } else {
+ ha->nx_dev_init_timeout = le32_to_cpu(*wptr++);
+ ha->nx_reset_timeout = le32_to_cpu(*wptr);
+ }
+ return;
+}
+
int
qla2xxx_get_flash_info(scsi_qla_host_t *vha)
{
@@ -860,7 +925,7 @@ qla2xxx_get_flash_info(scsi_qla_host_t *vha)
uint32_t flt_addr;
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA81XX(ha))
+ if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA8XXX_TYPE(ha))
return QLA_SUCCESS;
ret = qla2xxx_find_flt_start(vha, &flt_addr);
@@ -869,6 +934,7 @@ qla2xxx_get_flash_info(scsi_qla_host_t *vha)
qla2xxx_get_flt_info(vha, flt_addr);
qla2xxx_get_fdt_info(vha);
+ qla2xxx_get_idc_param(vha);
return QLA_SUCCESS;
}
@@ -885,7 +951,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
struct qla_npiv_entry *entry;
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA81XX(ha))
+ if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA8XXX_TYPE(ha))
return;
ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
@@ -1178,6 +1244,9 @@ qla24xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
uint32_t *dwptr;
struct qla_hw_data *ha = vha->hw;
+ if (IS_QLA82XX(ha))
+ return buf;
+
/* Dword reads to flash. */
dwptr = (uint32_t *)buf;
for (i = 0; i < bytes >> 2; i++, naddr++)
@@ -1233,6 +1302,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
ret = QLA_SUCCESS;
+ if (IS_QLA82XX(ha))
+ return ret;
+
/* Enable flash write. */
WRT_REG_DWORD(&reg->ctrl_status,
RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
@@ -1344,6 +1416,9 @@ qla2x00_beacon_blink(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ if (IS_QLA82XX(ha))
+ return;
+
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Save the Original GPIOE. */
@@ -1525,6 +1600,9 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ if (IS_QLA82XX(ha))
+ return QLA_SUCCESS;
+
if (ha->beacon_blink_led == 0) {
/* Enable firmware for update */
ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL;
@@ -1567,6 +1645,9 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ if (IS_QLA82XX(ha))
+ return QLA_SUCCESS;
+
ha->beacon_blink_led = 0;
ha->beacon_color_state = QLA_LED_ALL_ON;
@@ -2576,6 +2657,9 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
int i;
struct qla_hw_data *ha = vha->hw;
+ if (IS_QLA82XX(ha))
+ return ret;
+
if (!mbuf)
return QLA_FUNCTION_FAILED;
@@ -2722,3 +2806,50 @@ qla2xxx_get_vpd_field(scsi_qla_host_t *vha, char *key, char *str, size_t size)
return 0;
}
+
+int
+qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
+{
+ int len, max_len;
+ uint32_t fcp_prio_addr;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ha->fcp_prio_cfg) {
+ ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
+ if (!ha->fcp_prio_cfg) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to allocate memory for fcp priority data "
+ "(%x).\n", FCP_PRIO_CFG_SIZE);
+ return QLA_FUNCTION_FAILED;
+ }
+ }
+ memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
+
+ fcp_prio_addr = ha->flt_region_fcp_prio;
+
+ /* first read the fcp priority data header from flash */
+ ha->isp_ops->read_optrom(vha, (uint8_t *)ha->fcp_prio_cfg,
+ fcp_prio_addr << 2, FCP_PRIO_CFG_HDR_SIZE);
+
+ if (!qla24xx_fcp_prio_cfg_valid(ha->fcp_prio_cfg, 0))
+ goto fail;
+
+ /* read remaining FCP CMD config data from flash */
+ fcp_prio_addr += (FCP_PRIO_CFG_HDR_SIZE >> 2);
+ len = ha->fcp_prio_cfg->num_entries * FCP_PRIO_CFG_ENTRY_SIZE;
+ max_len = FCP_PRIO_CFG_SIZE - FCP_PRIO_CFG_HDR_SIZE;
+
+ ha->isp_ops->read_optrom(vha, (uint8_t *)&ha->fcp_prio_cfg->entry[0],
+ fcp_prio_addr << 2, (len < max_len ? len : max_len));
+
+ /* revalidate the entire FCP priority config data, including entries */
+ if (!qla24xx_fcp_prio_cfg_valid(ha->fcp_prio_cfg, 1))
+ goto fail;
+
+ ha->flags.fcp_prio_enabled = 1;
+ return QLA_SUCCESS;
+fail:
+ vfree(ha->fcp_prio_cfg);
+ ha->fcp_prio_cfg = NULL;
+ return QLA_FUNCTION_FAILED;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 81b5f29254e..428802616e3 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -114,6 +114,7 @@
*/
#define MAC_ADDR_LEN 6 /* in bytes */
#define IP_ADDR_LEN 4 /* in bytes */
+#define IPv6_ADDR_LEN 16 /* IPv6 address size */
#define DRIVER_NAME "qla4xxx"
#define MAX_LINKED_CMDS_PER_LUN 3
@@ -147,6 +148,8 @@
#define MAX_RESET_HA_RETRIES 2
+#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
+
/*
* SCSI Request Block structure (srb) that is placed
* on cmd->SCp location of every I/O [We have 22 bytes available]
@@ -169,7 +172,7 @@ struct srb {
struct scsi_cmnd *cmd; /* (4) SCSI command block */
dma_addr_t dma_handle; /* (4) for unmap of single transfers */
- atomic_t ref_count; /* reference count for this srb */
+ struct kref srb_ref; /* reference count for this srb */
uint32_t fw_ddb_index;
uint8_t err_id; /* error id */
#define SRB_ERR_PORT 1 /* Request failed because "port down" */
@@ -220,7 +223,7 @@ struct ddb_entry {
uint16_t os_target_id; /* Target ID */
uint16_t fw_ddb_index; /* DDB firmware index */
- uint8_t reserved[2];
+ uint16_t options;
uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */
uint32_t CmdSn;
@@ -245,10 +248,18 @@ struct ddb_entry {
uint16_t port;
uint32_t tpgt;
- uint8_t ip_addr[ISCSI_IPADDR_SIZE];
+ uint8_t ip_addr[IP_ADDR_LEN];
uint8_t iscsi_name[ISCSI_NAME_SIZE]; /* 72 x48 */
uint8_t iscsi_alias[0x20];
uint8_t isid[6];
+ uint16_t iscsi_max_burst_len;
+ uint16_t iscsi_max_outsnd_r2t;
+ uint16_t iscsi_first_burst_len;
+ uint16_t iscsi_max_rcv_data_seg_len;
+ uint16_t iscsi_max_snd_data_seg_len;
+
+ struct in6_addr remote_ipv6_addr;
+ struct in6_addr link_local_ipv6_addr;
};
/*
@@ -301,6 +312,7 @@ struct scsi_qla_host {
#define DPC_ISNS_RESTART 7 /* 0x00000080 */
#define DPC_AEN 9 /* 0x00000200 */
#define DPC_GET_DHCP_IP_ADDR 15 /* 0x00008000 */
+#define DPC_LINK_CHANGED 18 /* 0x00040000 */
struct Scsi_Host *host; /* pointer to host data */
uint32_t tot_ddbs;
@@ -320,8 +332,7 @@ struct scsi_qla_host {
#define MIN_IOBASE_LEN 0x100
uint16_t req_q_count;
- uint8_t marker_needed;
- uint8_t rsvd1;
+ uint8_t rsvd1[2];
unsigned long host_no;
@@ -441,8 +452,35 @@ struct scsi_qla_host {
/* Saved srb for status continuation entry processing */
struct srb *status_srb;
+
+ /* IPv6 support info from InitFW */
+ uint8_t acb_version;
+ uint8_t ipv4_addr_state;
+ uint16_t ipv4_options;
+
+ uint32_t resvd2;
+ uint32_t ipv6_options;
+ uint32_t ipv6_addl_options;
+ uint8_t ipv6_link_local_state;
+ uint8_t ipv6_addr0_state;
+ uint8_t ipv6_addr1_state;
+ uint8_t ipv6_default_router_state;
+ struct in6_addr ipv6_link_local_addr;
+ struct in6_addr ipv6_addr0;
+ struct in6_addr ipv6_addr1;
+ struct in6_addr ipv6_default_router_addr;
};
+static inline int is_ipv4_enabled(struct scsi_qla_host *ha)
+{
+ return ((ha->ipv4_options & IPOPT_IPv4_PROTOCOL_ENABLE) != 0);
+}
+
+static inline int is_ipv6_enabled(struct scsi_qla_host *ha)
+{
+ return ((ha->ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE) != 0);
+}
+
static inline int is_qla4010(struct scsi_qla_host *ha)
{
return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4010;
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 9cd7a608df3..855226e0866 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -215,6 +215,7 @@ union external_hw_config_reg {
/* Mailbox command definitions */
#define MBOX_CMD_ABOUT_FW 0x0009
#define MBOX_CMD_PING 0x000B
+#define MBOX_CMD_ABORT_TASK 0x0015
#define MBOX_CMD_LUN_RESET 0x0016
#define MBOX_CMD_TARGET_WARM_RESET 0x0017
#define MBOX_CMD_GET_MANAGEMENT_DATA 0x001E
@@ -258,13 +259,15 @@ union external_hw_config_reg {
/* Mailbox 1 */
#define FW_STATE_READY 0x0000
#define FW_STATE_CONFIG_WAIT 0x0001
-#define FW_STATE_WAIT_LOGIN 0x0002
+#define FW_STATE_WAIT_AUTOCONNECT 0x0002
#define FW_STATE_ERROR 0x0004
-#define FW_STATE_DHCP_IN_PROGRESS 0x0008
+#define FW_STATE_CONFIGURING_IP 0x0008
/* Mailbox 3 */
#define FW_ADDSTATE_OPTICAL_MEDIA 0x0001
-#define FW_ADDSTATE_DHCP_ENABLED 0x0002
+#define FW_ADDSTATE_DHCPv4_ENABLED 0x0002
+#define FW_ADDSTATE_DHCPv4_LEASE_ACQUIRED 0x0004
+#define FW_ADDSTATE_DHCPv4_LEASE_EXPIRED 0x0008
#define FW_ADDSTATE_LINK_UP 0x0010
#define FW_ADDSTATE_ISNS_SVC_ENABLED 0x0020
#define MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS 0x006B
@@ -320,6 +323,8 @@ union external_hw_config_reg {
/* Host Adapter Initialization Control Block (from host) */
struct addr_ctrl_blk {
uint8_t version; /* 00 */
+#define IFCB_VER_MIN 0x01
+#define IFCB_VER_MAX 0x02
uint8_t control; /* 01 */
uint16_t fw_options; /* 02-03 */
@@ -351,11 +356,16 @@ struct addr_ctrl_blk {
uint16_t iscsi_opts; /* 30-31 */
uint16_t ipv4_tcp_opts; /* 32-33 */
uint16_t ipv4_ip_opts; /* 34-35 */
+#define IPOPT_IPv4_PROTOCOL_ENABLE 0x8000
uint16_t iscsi_max_pdu_size; /* 36-37 */
uint8_t ipv4_tos; /* 38 */
uint8_t ipv4_ttl; /* 39 */
uint8_t acb_version; /* 3A */
+#define ACB_NOT_SUPPORTED 0x00
+#define ACB_SUPPORTED 0x02 /* Capable of ACB Version 2
+ Features */
+
uint8_t res2; /* 3B */
uint16_t def_timeout; /* 3C-3D */
uint16_t iscsi_fburst_len; /* 3E-3F */
@@ -397,16 +407,35 @@ struct addr_ctrl_blk {
uint32_t cookie; /* 200-203 */
uint16_t ipv6_port; /* 204-205 */
uint16_t ipv6_opts; /* 206-207 */
+#define IPV6_OPT_IPV6_PROTOCOL_ENABLE 0x8000
+
uint16_t ipv6_addtl_opts; /* 208-209 */
+#define IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE 0x0002 /* Pri ACB
+ Only */
+#define IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR 0x0001
+
uint16_t ipv6_tcp_opts; /* 20A-20B */
uint8_t ipv6_tcp_wsf; /* 20C */
uint16_t ipv6_flow_lbl; /* 20D-20F */
- uint8_t ipv6_gw_addr[16]; /* 210-21F */
+ uint8_t ipv6_dflt_rtr_addr[16]; /* 210-21F */
uint16_t ipv6_vlan_tag; /* 220-221 */
uint8_t ipv6_lnk_lcl_addr_state;/* 222 */
uint8_t ipv6_addr0_state; /* 223 */
uint8_t ipv6_addr1_state; /* 224 */
- uint8_t ipv6_gw_state; /* 225 */
+#define IP_ADDRSTATE_UNCONFIGURED 0
+#define IP_ADDRSTATE_INVALID 1
+#define IP_ADDRSTATE_ACQUIRING 2
+#define IP_ADDRSTATE_TENTATIVE 3
+#define IP_ADDRSTATE_DEPRICATED 4
+#define IP_ADDRSTATE_PREFERRED 5
+#define IP_ADDRSTATE_DISABLING 6
+
+ uint8_t ipv6_dflt_rtr_state; /* 225 */
+#define IPV6_RTRSTATE_UNKNOWN 0
+#define IPV6_RTRSTATE_MANUAL 1
+#define IPV6_RTRSTATE_ADVERTISED 3
+#define IPV6_RTRSTATE_STALE 4
+
uint8_t ipv6_traffic_class; /* 226 */
uint8_t ipv6_hop_limit; /* 227 */
uint8_t ipv6_if_id[8]; /* 228-22F */
@@ -424,7 +453,7 @@ struct addr_ctrl_blk {
struct init_fw_ctrl_blk {
struct addr_ctrl_blk pri;
- struct addr_ctrl_blk sec;
+/* struct addr_ctrl_blk sec;*/
};
/*************************************************************************/
@@ -433,6 +462,9 @@ struct dev_db_entry {
uint16_t options; /* 00-01 */
#define DDB_OPT_DISC_SESSION 0x10
#define DDB_OPT_TARGET 0x02 /* device is a target */
+#define DDB_OPT_IPV6_DEVICE 0x100
+#define DDB_OPT_IPV6_NULL_LINK_LOCAL 0x800 /* post connection */
+#define DDB_OPT_IPV6_FW_DEFINED_LINK_LOCAL 0x800 /* pre connection */
uint16_t exec_throttle; /* 02-03 */
uint16_t exec_count; /* 04-05 */
@@ -468,7 +500,7 @@ struct dev_db_entry {
* pointer to a string so we
* don't have to reserve soooo
* much RAM */
- uint8_t ipv6_addr[0x10];/* 1A0-1AF */
+ uint8_t link_local_ipv6_addr[0x10]; /* 1A0-1AF */
uint8_t res5[0x10]; /* 1B0-1BF */
uint16_t ddb_link; /* 1C0-1C1 */
uint16_t chap_tbl_idx; /* 1C2-1C3 */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 96ebfb021f6..c4636f6cb3c 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -25,6 +25,7 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen);
int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha);
int qla4xxx_relogin_device(struct scsi_qla_host * ha,
struct ddb_entry * ddb_entry);
+int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb);
int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
int lun);
int qla4xxx_reset_target(struct scsi_qla_host * ha,
@@ -65,13 +66,14 @@ void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
int qla4xxx_init_rings(struct scsi_qla_host * ha);
struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
uint32_t index);
-void qla4xxx_srb_compl(struct scsi_qla_host *ha, struct srb *srb);
+void qla4xxx_srb_compl(struct kref *ref);
int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host * ha);
-int qla4xxx_process_ddb_changed(struct scsi_qla_host * ha,
- uint32_t fw_ddb_index, uint32_t state);
+int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ uint32_t state, uint32_t conn_error);
void qla4xxx_dump_buffer(void *b, uint32_t size);
int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry, int lun, uint16_t mrkr_mod);
+int qla4_is_relogin_allowed(struct scsi_qla_host *ha, uint32_t conn_err);
extern int ql4xextended_error_logging;
extern int ql4xdiscoverywait;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 92329a461c6..5510df8a7fa 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -189,6 +189,78 @@ static int qla4xxx_init_local_data(struct scsi_qla_host *ha)
return qla4xxx_get_firmware_status(ha);
}
+static uint8_t
+qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha)
+{
+ uint8_t ipv4_wait = 0;
+ uint8_t ipv6_wait = 0;
+ int8_t ip_address[IPv6_ADDR_LEN] = {0} ;
+
+ /* If both IPv4 & IPv6 are enabled, possibly only one
+ * IP address may be acquired, so check to see if we
+ * need to wait for another */
+ if (is_ipv4_enabled(ha) && is_ipv6_enabled(ha)) {
+ if (((ha->addl_fw_state & FW_ADDSTATE_DHCPv4_ENABLED) != 0) &&
+ ((ha->addl_fw_state &
+ FW_ADDSTATE_DHCPv4_LEASE_ACQUIRED) == 0)) {
+ ipv4_wait = 1;
+ }
+ if (((ha->ipv6_addl_options &
+ IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) != 0) &&
+ ((ha->ipv6_link_local_state == IP_ADDRSTATE_ACQUIRING) ||
+ (ha->ipv6_addr0_state == IP_ADDRSTATE_ACQUIRING) ||
+ (ha->ipv6_addr1_state == IP_ADDRSTATE_ACQUIRING))) {
+
+ ipv6_wait = 1;
+
+ if ((ha->ipv6_link_local_state ==
+ IP_ADDRSTATE_PREFERRED) ||
+ (ha->ipv6_addr0_state == IP_ADDRSTATE_PREFERRED) ||
+ (ha->ipv6_addr1_state == IP_ADDRSTATE_PREFERRED)) {
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: "
+ "Preferred IP configured."
+ " Don't wait!\n", ha->host_no,
+ __func__));
+ ipv6_wait = 0;
+ }
+ if (memcmp(&ha->ipv6_default_router_addr, ip_address,
+ IPv6_ADDR_LEN) == 0) {
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: "
+ "No Router configured. "
+ "Don't wait!\n", ha->host_no,
+ __func__));
+ ipv6_wait = 0;
+ }
+ if ((ha->ipv6_default_router_state ==
+ IPV6_RTRSTATE_MANUAL) &&
+ (ha->ipv6_link_local_state ==
+ IP_ADDRSTATE_TENTATIVE) &&
+ (memcmp(&ha->ipv6_link_local_addr,
+ &ha->ipv6_default_router_addr, 4) == 0)) {
+ DEBUG2(printk("scsi%ld: %s: LinkLocal Router & "
+ "IP configured. Don't wait!\n",
+ ha->host_no, __func__));
+ ipv6_wait = 0;
+ }
+ }
+ if (ipv4_wait || ipv6_wait) {
+ DEBUG2(printk("scsi%ld: %s: Wait for additional "
+ "IP(s) \"", ha->host_no, __func__));
+ if (ipv4_wait)
+ DEBUG2(printk("IPv4 "));
+ if (ha->ipv6_link_local_state == IP_ADDRSTATE_ACQUIRING)
+ DEBUG2(printk("IPv6LinkLocal "));
+ if (ha->ipv6_addr0_state == IP_ADDRSTATE_ACQUIRING)
+ DEBUG2(printk("IPv6Addr0 "));
+ if (ha->ipv6_addr1_state == IP_ADDRSTATE_ACQUIRING)
+ DEBUG2(printk("IPv6Addr1 "));
+ DEBUG2(printk("\"\n"));
+ }
+ }
+
+ return ipv4_wait|ipv6_wait;
+}
+
static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
{
uint32_t timeout_count;
@@ -226,38 +298,80 @@ static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
continue;
}
+ if (ha->firmware_state & FW_STATE_WAIT_AUTOCONNECT) {
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: fwstate:"
+ "AUTOCONNECT in progress\n",
+ ha->host_no, __func__));
+ }
+
+ if (ha->firmware_state & FW_STATE_CONFIGURING_IP) {
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: fwstate:"
+ " CONFIGURING IP\n",
+ ha->host_no, __func__));
+ /*
+ * Check for link state after 15 secs and if link is
+ * still DOWN then, cable is unplugged. Ignore "DHCP
+ * in Progress/CONFIGURING IP" bit to check if firmware
+ * is in ready state or not after 15 secs.
+ * This is applicable for both 2.x & 3.x firmware
+ */
+ if (timeout_count <= (ADAPTER_INIT_TOV - 15)) {
+ if (ha->addl_fw_state & FW_ADDSTATE_LINK_UP) {
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s:"
+ " LINK UP (Cable plugged)\n",
+ ha->host_no, __func__));
+ } else if (ha->firmware_state &
+ (FW_STATE_CONFIGURING_IP |
+ FW_STATE_READY)) {
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: "
+ "LINK DOWN (Cable unplugged)\n",
+ ha->host_no, __func__));
+ ha->firmware_state = FW_STATE_READY;
+ }
+ }
+ }
+
if (ha->firmware_state == FW_STATE_READY) {
- DEBUG2(dev_info(&ha->pdev->dev, "Firmware Ready..\n"));
- /* The firmware is ready to process SCSI commands. */
- DEBUG2(dev_info(&ha->pdev->dev,
- "scsi%ld: %s: MEDIA TYPE - %s\n",
- ha->host_no,
- __func__, (ha->addl_fw_state &
- FW_ADDSTATE_OPTICAL_MEDIA)
- != 0 ? "OPTICAL" : "COPPER"));
- DEBUG2(dev_info(&ha->pdev->dev,
- "scsi%ld: %s: DHCP STATE Enabled "
- "%s\n",
- ha->host_no, __func__,
- (ha->addl_fw_state &
- FW_ADDSTATE_DHCP_ENABLED) != 0 ?
- "YES" : "NO"));
- DEBUG2(dev_info(&ha->pdev->dev,
- "scsi%ld: %s: LINK %s\n",
- ha->host_no, __func__,
- (ha->addl_fw_state &
- FW_ADDSTATE_LINK_UP) != 0 ?
- "UP" : "DOWN"));
- DEBUG2(dev_info(&ha->pdev->dev,
- "scsi%ld: %s: iSNS Service "
- "Started %s\n",
- ha->host_no, __func__,
- (ha->addl_fw_state &
- FW_ADDSTATE_ISNS_SVC_ENABLED) != 0 ?
- "YES" : "NO"));
-
- ready = 1;
- break;
+ /* If DHCP IP Addr is available, retrieve it now. */
+ if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR,
+ &ha->dpc_flags))
+ qla4xxx_get_dhcp_ip_address(ha);
+
+ if (!qla4xxx_wait_for_ip_config(ha) ||
+ timeout_count == 1) {
+ DEBUG2(dev_info(&ha->pdev->dev,
+ "Firmware Ready..\n"));
+ /* The firmware is ready to process SCSI
+ commands. */
+ DEBUG2(dev_info(&ha->pdev->dev,
+ "scsi%ld: %s: MEDIA TYPE"
+ " - %s\n", ha->host_no,
+ __func__, (ha->addl_fw_state &
+ FW_ADDSTATE_OPTICAL_MEDIA)
+ != 0 ? "OPTICAL" : "COPPER"));
+ DEBUG2(dev_info(&ha->pdev->dev,
+ "scsi%ld: %s: DHCPv4 STATE"
+ " Enabled %s\n", ha->host_no,
+ __func__, (ha->addl_fw_state &
+ FW_ADDSTATE_DHCPv4_ENABLED) != 0 ?
+ "YES" : "NO"));
+ DEBUG2(dev_info(&ha->pdev->dev,
+ "scsi%ld: %s: LINK %s\n",
+ ha->host_no, __func__,
+ (ha->addl_fw_state &
+ FW_ADDSTATE_LINK_UP) != 0 ?
+ "UP" : "DOWN"));
+ DEBUG2(dev_info(&ha->pdev->dev,
+ "scsi%ld: %s: iSNS Service "
+ "Started %s\n",
+ ha->host_no, __func__,
+ (ha->addl_fw_state &
+ FW_ADDSTATE_ISNS_SVC_ENABLED) != 0 ?
+ "YES" : "NO"));
+
+ ready = 1;
+ break;
+ }
}
DEBUG2(printk("scsi%ld: %s: waiting on fw, state=%x:%x - "
"seconds expired= %d\n", ha->host_no, __func__,
@@ -272,15 +386,19 @@ static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
msleep(1000);
} /* end of for */
- if (timeout_count == 0)
+ if (timeout_count <= 0)
DEBUG2(printk("scsi%ld: %s: FW Initialization timed out!\n",
ha->host_no, __func__));
- if (ha->firmware_state & FW_STATE_DHCP_IN_PROGRESS) {
- DEBUG2(printk("scsi%ld: %s: FW is reporting its waiting to"
- " grab an IP address from DHCP server\n",
- ha->host_no, __func__));
+ if (ha->firmware_state & FW_STATE_CONFIGURING_IP) {
+ DEBUG2(printk("scsi%ld: %s: FW initialized, but is reporting "
+ "it's waiting to configure an IP address\n",
+ ha->host_no, __func__));
ready = 1;
+ } else if (ha->firmware_state & FW_STATE_WAIT_AUTOCONNECT) {
+ DEBUG2(printk("scsi%ld: %s: FW initialized, but "
+ "auto-discovery still in process\n",
+ ha->host_no, __func__));
}
return ready;
@@ -387,6 +505,7 @@ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry = NULL;
dma_addr_t fw_ddb_entry_dma;
int status = QLA_ERROR;
+ uint32_t conn_err;
if (ddb_entry == NULL) {
DEBUG2(printk("scsi%ld: %s: ddb_entry is NULL\n", ha->host_no,
@@ -407,7 +526,7 @@ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry,
fw_ddb_entry_dma, NULL, NULL,
- &ddb_entry->fw_ddb_device_state, NULL,
+ &ddb_entry->fw_ddb_device_state, &conn_err,
&ddb_entry->tcp_source_port_num,
&ddb_entry->connection_id) ==
QLA_ERROR) {
@@ -419,6 +538,7 @@ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
}
status = QLA_SUCCESS;
+ ddb_entry->options = le16_to_cpu(fw_ddb_entry->options);
ddb_entry->target_session_id = le16_to_cpu(fw_ddb_entry->tsid);
ddb_entry->task_mgmt_timeout =
le16_to_cpu(fw_ddb_entry->def_timeout);
@@ -442,11 +562,44 @@ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
memcpy(&ddb_entry->ip_addr[0], &fw_ddb_entry->ip_addr[0],
min(sizeof(ddb_entry->ip_addr), sizeof(fw_ddb_entry->ip_addr)));
- DEBUG2(printk("scsi%ld: %s: ddb[%d] - State= %x status= %d.\n",
- ha->host_no, __func__, fw_ddb_index,
- ddb_entry->fw_ddb_device_state, status));
-
- exit_update_ddb:
+ ddb_entry->iscsi_max_burst_len = fw_ddb_entry->iscsi_max_burst_len;
+ ddb_entry->iscsi_max_outsnd_r2t = fw_ddb_entry->iscsi_max_outsnd_r2t;
+ ddb_entry->iscsi_first_burst_len = fw_ddb_entry->iscsi_first_burst_len;
+ ddb_entry->iscsi_max_rcv_data_seg_len =
+ fw_ddb_entry->iscsi_max_rcv_data_seg_len;
+ ddb_entry->iscsi_max_snd_data_seg_len =
+ fw_ddb_entry->iscsi_max_snd_data_seg_len;
+
+ if (ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
+ memcpy(&ddb_entry->remote_ipv6_addr,
+ fw_ddb_entry->ip_addr,
+ min(sizeof(ddb_entry->remote_ipv6_addr),
+ sizeof(fw_ddb_entry->ip_addr)));
+ memcpy(&ddb_entry->link_local_ipv6_addr,
+ fw_ddb_entry->link_local_ipv6_addr,
+ min(sizeof(ddb_entry->link_local_ipv6_addr),
+ sizeof(fw_ddb_entry->link_local_ipv6_addr)));
+
+ DEBUG2(dev_info(&ha->pdev->dev, "%s: DDB[%d] osIdx = %d "
+ "State %04x ConnErr %08x IP %pI6 "
+ ":%04d \"%s\"\n",
+ __func__, fw_ddb_index,
+ ddb_entry->os_target_id,
+ ddb_entry->fw_ddb_device_state,
+ conn_err, fw_ddb_entry->ip_addr,
+ le16_to_cpu(fw_ddb_entry->port),
+ fw_ddb_entry->iscsi_name));
+ } else
+ DEBUG2(dev_info(&ha->pdev->dev, "%s: DDB[%d] osIdx = %d "
+ "State %04x ConnErr %08x IP %pI4 "
+ ":%04d \"%s\"\n",
+ __func__, fw_ddb_index,
+ ddb_entry->os_target_id,
+ ddb_entry->fw_ddb_device_state,
+ conn_err, fw_ddb_entry->ip_addr,
+ le16_to_cpu(fw_ddb_entry->port),
+ fw_ddb_entry->iscsi_name));
+exit_update_ddb:
if (fw_ddb_entry)
dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
fw_ddb_entry, fw_ddb_entry_dma);
@@ -492,6 +645,40 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
}
/**
+ * qla4_is_relogin_allowed - Are we allowed to login?
+ * @ha: Pointer to host adapter structure.
+ * @conn_err: Last connection error associated with the ddb
+ *
+ * This routine tests the given connection error to determine if
+ * we are allowed to login.
+ **/
+int qla4_is_relogin_allowed(struct scsi_qla_host *ha, uint32_t conn_err)
+{
+ uint32_t err_code, login_rsp_sts_class;
+ int relogin = 1;
+
+ err_code = ((conn_err & 0x00ff0000) >> 16);
+ login_rsp_sts_class = ((conn_err & 0x0000ff00) >> 8);
+ if (err_code == 0x1c || err_code == 0x06) {
+ DEBUG2(dev_info(&ha->pdev->dev,
+ ": conn_err=0x%08x, send target completed"
+ " or access denied failure\n", conn_err));
+ relogin = 0;
+ }
+ if ((err_code == 0x08) && (login_rsp_sts_class == 0x02)) {
+ /* Login Response PDU returned an error.
+ Login Response Status in Error Code Detail
+ indicates login should not be retried.*/
+ DEBUG2(dev_info(&ha->pdev->dev,
+ ": conn_err=0x%08x, do not retry relogin\n",
+ conn_err));
+ relogin = 0;
+ }
+
+ return relogin;
+}
+
+/**
* qla4xxx_configure_ddbs - builds driver ddb list
* @ha: Pointer to host adapter structure.
*
@@ -505,18 +692,30 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
uint32_t fw_ddb_index = 0;
uint32_t next_fw_ddb_index = 0;
uint32_t ddb_state;
- uint32_t conn_err, err_code;
+ uint32_t conn_err;
struct ddb_entry *ddb_entry;
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_entry_dma;
+ uint32_t ipv6_device;
uint32_t new_tgt;
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (fw_ddb_entry == NULL) {
+ DEBUG2(dev_info(&ha->pdev->dev, "%s: DMA alloc failed\n",
+ __func__));
+ return QLA_ERROR;
+ }
+
dev_info(&ha->pdev->dev, "Initializing DDBs ...\n");
for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES;
fw_ddb_index = next_fw_ddb_index) {
/* First, let's see if a device exists here */
- if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, NULL, 0, NULL,
- &next_fw_ddb_index, &ddb_state,
- &conn_err, NULL, NULL) ==
- QLA_ERROR) {
+ if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry,
+ 0, NULL, &next_fw_ddb_index,
+ &ddb_state, &conn_err,
+ NULL, NULL) ==
+ QLA_ERROR) {
DEBUG2(printk("scsi%ld: %s: get_ddb_entry, "
"fw_ddb_index %d failed", ha->host_no,
__func__, fw_ddb_index));
@@ -533,18 +732,19 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
/* Try and login to device */
DEBUG2(printk("scsi%ld: %s: Login to DDB[%d]\n",
ha->host_no, __func__, fw_ddb_index));
- err_code = ((conn_err & 0x00ff0000) >> 16);
- if (err_code == 0x1c || err_code == 0x06) {
- DEBUG2(printk("scsi%ld: %s send target "
- "completed "
- "or access denied failure\n",
- ha->host_no, __func__));
- } else {
+ ipv6_device = le16_to_cpu(fw_ddb_entry->options) &
+ DDB_OPT_IPV6_DEVICE;
+ if (qla4_is_relogin_allowed(ha, conn_err) &&
+ ((!ipv6_device &&
+ *((uint32_t *)fw_ddb_entry->ip_addr))
+ || ipv6_device)) {
qla4xxx_set_ddb_entry(ha, fw_ddb_index, 0);
if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index,
- NULL, 0, NULL, &next_fw_ddb_index,
- &ddb_state, &conn_err, NULL, NULL)
- == QLA_ERROR) {
+ NULL, 0, NULL,
+ &next_fw_ddb_index,
+ &ddb_state, &conn_err,
+ NULL, NULL)
+ == QLA_ERROR) {
DEBUG2(printk("scsi%ld: %s:"
"get_ddb_entry %d failed\n",
ha->host_no,
@@ -599,7 +799,6 @@ next_one:
struct qla4_relog_scan {
int halt_wait;
uint32_t conn_err;
- uint32_t err_code;
uint32_t fw_ddb_index;
uint32_t next_fw_ddb_index;
uint32_t fw_ddb_device_state;
@@ -609,18 +808,7 @@ static int qla4_test_rdy(struct scsi_qla_host *ha, struct qla4_relog_scan *rs)
{
struct ddb_entry *ddb_entry;
- /*
- * Don't want to do a relogin if connection
- * error is 0x1c.
- */
- rs->err_code = ((rs->conn_err & 0x00ff0000) >> 16);
- if (rs->err_code == 0x1c || rs->err_code == 0x06) {
- DEBUG2(printk(
- "scsi%ld: %s send target"
- " completed or "
- "access denied failure\n",
- ha->host_no, __func__));
- } else {
+ if (qla4_is_relogin_allowed(ha, rs->conn_err)) {
/* We either have a device that is in
* the process of relogging in or a
* device that is waiting to be
@@ -908,7 +1096,7 @@ static void qla4x00_pci_config(struct scsi_qla_host *ha)
static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
{
int status = QLA_ERROR;
- uint32_t max_wait_time;
+ unsigned long max_wait_time;
unsigned long flags;
uint32_t mbox_status;
@@ -940,7 +1128,10 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Wait for firmware to come UP. */
- max_wait_time = FIRMWARE_UP_TOV * 4;
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: Wait up to %d seconds for "
+ "boot firmware to complete...\n",
+ ha->host_no, __func__, FIRMWARE_UP_TOV));
+ max_wait_time = jiffies + (FIRMWARE_UP_TOV * HZ);
do {
uint32_t ctrl_status;
@@ -954,16 +1145,15 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
if (mbox_status == MBOX_STS_COMMAND_COMPLETE)
break;
- DEBUG2(printk("scsi%ld: %s: Waiting for boot firmware to "
- "complete... ctrl_sts=0x%x, remaining=%d\n",
- ha->host_no, __func__, ctrl_status,
- max_wait_time));
+ DEBUG2(printk(KERN_INFO "scsi%ld: %s: Waiting for boot "
+ "firmware to complete... ctrl_sts=0x%x\n",
+ ha->host_no, __func__, ctrl_status));
- msleep(250);
- } while ((max_wait_time--));
+ msleep_interruptible(250);
+ } while (!time_after_eq(jiffies, max_wait_time));
if (mbox_status == MBOX_STS_COMMAND_COMPLETE) {
- DEBUG(printk("scsi%ld: %s: Firmware has started\n",
+ DEBUG(printk(KERN_INFO "scsi%ld: %s: Firmware has started\n",
ha->host_no, __func__));
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1141,6 +1331,7 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
int status = QLA_ERROR;
int8_t ip_address[IP_ADDR_LEN] = {0} ;
+ clear_bit(AF_ONLINE, &ha->flags);
ha->eeprom_cmd_data = 0;
qla4x00_pci_config(ha);
@@ -1166,7 +1357,7 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
* the ddb_list and wait for DHCP lease acquired aen to come in
* followed by 0x8014 aen" to trigger the tgt discovery process.
*/
- if (ha->firmware_state & FW_STATE_DHCP_IN_PROGRESS)
+ if (ha->firmware_state & FW_STATE_CONFIGURING_IP)
goto exit_init_online;
/* Skip device discovery if ip and subnet is zero */
@@ -1270,8 +1461,8 @@ static void qla4xxx_add_device_dynamically(struct scsi_qla_host *ha,
*
* This routine processes a Decive Database Changed AEN Event.
**/
-int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
- uint32_t fw_ddb_index, uint32_t state)
+int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ uint32_t state, uint32_t conn_err)
{
struct ddb_entry * ddb_entry;
uint32_t old_fw_ddb_device_state;
@@ -1318,19 +1509,24 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
* the device came back.
*/
} else {
- /* Device went away, try to relogin. */
- /* Mark device missing */
- if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
+ /* Device went away, mark device missing */
+ if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) {
+ DEBUG2(dev_info(&ha->pdev->dev, "%s mark missing "
+ "ddb_entry 0x%p sess 0x%p conn 0x%p\n",
+ __func__, ddb_entry,
+ ddb_entry->sess, ddb_entry->conn));
qla4xxx_mark_device_missing(ha, ddb_entry);
+ }
+
/*
* Relogin if device state changed to a not active state.
- * However, do not relogin if this aen is a result of an IOCTL
- * logout (DF_NO_RELOGIN) or if this is a discovered device.
+ * However, do not relogin if a RELOGIN is in process, or
+ * we are not allowed to relogin to this DDB.
*/
if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_FAILED &&
!test_bit(DF_RELOGIN, &ddb_entry->flags) &&
!test_bit(DF_NO_RELOGIN, &ddb_entry->flags) &&
- !test_bit(DF_ISNS_DISCOVERED, &ddb_entry->flags)) {
+ qla4_is_relogin_allowed(ha, conn_err)) {
/*
* This triggers a relogin. After the relogin_timer
* expires, the relogin gets scheduled. We must wait a
@@ -1338,7 +1534,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
* with failed device_state or a logout response before
* we can issue another relogin.
*/
- /* Firmware padds this timeout: (time2wait +1).
+ /* Firmware pads this timeout: (time2wait +1).
* Driver retry to login should be longer than F/W.
* Otherwise F/W will fail
* set_ddb() mbx cmd with 0x4005 since it still
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index e0c32159749..e66f3f263f4 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -299,7 +299,7 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds);
wmb();
- srb->cmd->host_scribble = (unsigned char *)srb;
+ srb->cmd->host_scribble = (unsigned char *)(unsigned long)index;
/* update counters */
srb->state = SRB_ACTIVE_STATE;
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index c196d55eae3..596c3031483 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -97,7 +97,7 @@ qla4xxx_status_cont_entry(struct scsi_qla_host *ha,
/* Place command on done queue. */
if (srb->req_sense_len == 0) {
- qla4xxx_srb_compl(ha, srb);
+ kref_put(&srb->srb_ref, qla4xxx_srb_compl);
ha->status_srb = NULL;
}
}
@@ -329,7 +329,7 @@ status_entry_exit:
/* complete the request, if not waiting for status_continuation pkt */
srb->cc_stat = sts_entry->completionStatus;
if (ha->status_srb == NULL)
- qla4xxx_srb_compl(ha, srb);
+ kref_put(&srb->srb_ref, qla4xxx_srb_compl);
}
/**
@@ -393,7 +393,7 @@ static void qla4xxx_process_response_queue(struct scsi_qla_host * ha)
/* ETRY normally by sending it back with
* DID_BUS_BUSY */
srb->cmd->result = DID_BUS_BUSY << 16;
- qla4xxx_srb_compl(ha, srb);
+ kref_put(&srb->srb_ref, qla4xxx_srb_compl);
break;
case ET_CONTINUE:
@@ -498,15 +498,22 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
break;
case MBOX_ASTS_LINK_UP:
- DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK UP\n",
- ha->host_no, mbox_status));
set_bit(AF_LINK_UP, &ha->flags);
+ if (test_bit(AF_INIT_DONE, &ha->flags))
+ set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
+
+ DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x Adapter"
+ " LINK UP\n", ha->host_no,
+ mbox_status));
break;
case MBOX_ASTS_LINK_DOWN:
- DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK DOWN\n",
- ha->host_no, mbox_status));
clear_bit(AF_LINK_UP, &ha->flags);
+ set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
+
+ DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x Adapter"
+ " LINK DOWN\n", ha->host_no,
+ mbox_status));
break;
case MBOX_ASTS_HEARTBEAT:
@@ -831,7 +838,7 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
qla4xxx_reinitialize_ddb_list(ha);
} else if (mbox_sts[1] == 1) { /* Specific device. */
qla4xxx_process_ddb_changed(ha, mbox_sts[2],
- mbox_sts[3]);
+ mbox_sts[3], mbox_sts[4]);
}
break;
}
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index caeb7d10ae0..75496fb0ae7 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -172,108 +172,207 @@ mbox_exit:
return status;
}
+uint8_t
+qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
+ uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma)
+{
+ memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
+ memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
+ mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
+ mbox_cmd[1] = 0;
+ mbox_cmd[2] = LSDW(init_fw_cb_dma);
+ mbox_cmd[3] = MSDW(init_fw_cb_dma);
+ mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
+ mbox_cmd[5] = (IFCB_VER_MAX << 8) | IFCB_VER_MIN;
+
+ if (qla4xxx_mailbox_command(ha, 6, 6, mbox_cmd, mbox_sts) !=
+ QLA_SUCCESS) {
+ DEBUG2(printk(KERN_WARNING "scsi%ld: %s: "
+ "MBOX_CMD_INITIALIZE_FIRMWARE"
+ " failed w/ status %04X\n",
+ ha->host_no, __func__, mbox_sts[0]));
+ return QLA_ERROR;
+ }
+ return QLA_SUCCESS;
+}
+
+uint8_t
+qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
+ uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma)
+{
+ memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
+ memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
+ mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
+ mbox_cmd[2] = LSDW(init_fw_cb_dma);
+ mbox_cmd[3] = MSDW(init_fw_cb_dma);
+ mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
+
+ if (qla4xxx_mailbox_command(ha, 5, 5, mbox_cmd, mbox_sts) !=
+ QLA_SUCCESS) {
+ DEBUG2(printk(KERN_WARNING "scsi%ld: %s: "
+ "MBOX_CMD_GET_INIT_FW_CTRL_BLOCK"
+ " failed w/ status %04X\n",
+ ha->host_no, __func__, mbox_sts[0]));
+ return QLA_ERROR;
+ }
+ return QLA_SUCCESS;
+}
+
+void
+qla4xxx_update_local_ip(struct scsi_qla_host *ha,
+ struct addr_ctrl_blk *init_fw_cb)
+{
+ /* Save IPv4 Address Info */
+ memcpy(ha->ip_address, init_fw_cb->ipv4_addr,
+ min(sizeof(ha->ip_address), sizeof(init_fw_cb->ipv4_addr)));
+ memcpy(ha->subnet_mask, init_fw_cb->ipv4_subnet,
+ min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->ipv4_subnet)));
+ memcpy(ha->gateway, init_fw_cb->ipv4_gw_addr,
+ min(sizeof(ha->gateway), sizeof(init_fw_cb->ipv4_gw_addr)));
+
+ if (is_ipv6_enabled(ha)) {
+ /* Save IPv6 Address */
+ ha->ipv6_link_local_state = init_fw_cb->ipv6_lnk_lcl_addr_state;
+ ha->ipv6_addr0_state = init_fw_cb->ipv6_addr0_state;
+ ha->ipv6_addr1_state = init_fw_cb->ipv6_addr1_state;
+ ha->ipv6_default_router_state = init_fw_cb->ipv6_dflt_rtr_state;
+ ha->ipv6_link_local_addr.in6_u.u6_addr8[0] = 0xFE;
+ ha->ipv6_link_local_addr.in6_u.u6_addr8[1] = 0x80;
+
+ memcpy(&ha->ipv6_link_local_addr.in6_u.u6_addr8[8],
+ init_fw_cb->ipv6_if_id,
+ min(sizeof(ha->ipv6_link_local_addr)/2,
+ sizeof(init_fw_cb->ipv6_if_id)));
+ memcpy(&ha->ipv6_addr0, init_fw_cb->ipv6_addr0,
+ min(sizeof(ha->ipv6_addr0),
+ sizeof(init_fw_cb->ipv6_addr0)));
+ memcpy(&ha->ipv6_addr1, init_fw_cb->ipv6_addr1,
+ min(sizeof(ha->ipv6_addr1),
+ sizeof(init_fw_cb->ipv6_addr1)));
+ memcpy(&ha->ipv6_default_router_addr,
+ init_fw_cb->ipv6_dflt_rtr_addr,
+ min(sizeof(ha->ipv6_default_router_addr),
+ sizeof(init_fw_cb->ipv6_dflt_rtr_addr)));
+ }
+}
+
+uint8_t
+qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
+ uint32_t *mbox_cmd,
+ uint32_t *mbox_sts,
+ struct addr_ctrl_blk *init_fw_cb,
+ dma_addr_t init_fw_cb_dma)
+{
+ if (qla4xxx_get_ifcb(ha, mbox_cmd, mbox_sts, init_fw_cb_dma)
+ != QLA_SUCCESS) {
+ DEBUG2(printk(KERN_WARNING
+ "scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
+ ha->host_no, __func__));
+ return QLA_ERROR;
+ }
+
+ DEBUG2(qla4xxx_dump_buffer(init_fw_cb, sizeof(struct addr_ctrl_blk)));
+
+ /* Save some info in adapter structure. */
+ ha->acb_version = init_fw_cb->acb_version;
+ ha->firmware_options = le16_to_cpu(init_fw_cb->fw_options);
+ ha->tcp_options = le16_to_cpu(init_fw_cb->ipv4_tcp_opts);
+ ha->ipv4_options = le16_to_cpu(init_fw_cb->ipv4_ip_opts);
+ ha->ipv4_addr_state = le16_to_cpu(init_fw_cb->ipv4_addr_state);
+ ha->heartbeat_interval = init_fw_cb->hb_interval;
+ memcpy(ha->name_string, init_fw_cb->iscsi_name,
+ min(sizeof(ha->name_string),
+ sizeof(init_fw_cb->iscsi_name)));
+ /*memcpy(ha->alias, init_fw_cb->Alias,
+ min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
+
+ /* Save Command Line Paramater info */
+ ha->port_down_retry_count = le16_to_cpu(init_fw_cb->conn_ka_timeout);
+ ha->discovery_wait = ql4xdiscoverywait;
+
+ if (ha->acb_version == ACB_SUPPORTED) {
+ ha->ipv6_options = init_fw_cb->ipv6_opts;
+ ha->ipv6_addl_options = init_fw_cb->ipv6_addtl_opts;
+ }
+ qla4xxx_update_local_ip(ha, init_fw_cb);
+
+ return QLA_SUCCESS;
+}
+
/**
* qla4xxx_initialize_fw_cb - initializes firmware control block.
* @ha: Pointer to host adapter structure.
**/
int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
{
- struct init_fw_ctrl_blk *init_fw_cb;
+ struct addr_ctrl_blk *init_fw_cb;
dma_addr_t init_fw_cb_dma;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_ERROR;
init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct init_fw_ctrl_blk),
+ sizeof(struct addr_ctrl_blk),
&init_fw_cb_dma, GFP_KERNEL);
if (init_fw_cb == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
ha->host_no, __func__));
return 10;
}
- memset(init_fw_cb, 0, sizeof(struct init_fw_ctrl_blk));
+ memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
/* Get Initialize Firmware Control Block. */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
- mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
- mbox_cmd[2] = LSDW(init_fw_cb_dma);
- mbox_cmd[3] = MSDW(init_fw_cb_dma);
- mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk);
-
- if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) !=
+ if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
QLA_SUCCESS) {
dma_free_coherent(&ha->pdev->dev,
- sizeof(struct init_fw_ctrl_blk),
+ sizeof(struct addr_ctrl_blk),
init_fw_cb, init_fw_cb_dma);
- return status;
+ goto exit_init_fw_cb;
}
/* Initialize request and response queues. */
qla4xxx_init_rings(ha);
/* Fill in the request and response queue information. */
- init_fw_cb->pri.rqq_consumer_idx = cpu_to_le16(ha->request_out);
- init_fw_cb->pri.compq_producer_idx = cpu_to_le16(ha->response_in);
- init_fw_cb->pri.rqq_len = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
- init_fw_cb->pri.compq_len = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
- init_fw_cb->pri.rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma));
- init_fw_cb->pri.rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma));
- init_fw_cb->pri.compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma));
- init_fw_cb->pri.compq_addr_hi = cpu_to_le32(MSDW(ha->response_dma));
- init_fw_cb->pri.shdwreg_addr_lo =
- cpu_to_le32(LSDW(ha->shadow_regs_dma));
- init_fw_cb->pri.shdwreg_addr_hi =
- cpu_to_le32(MSDW(ha->shadow_regs_dma));
+ init_fw_cb->rqq_consumer_idx = cpu_to_le16(ha->request_out);
+ init_fw_cb->compq_producer_idx = cpu_to_le16(ha->response_in);
+ init_fw_cb->rqq_len = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
+ init_fw_cb->compq_len = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
+ init_fw_cb->rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma));
+ init_fw_cb->rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma));
+ init_fw_cb->compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma));
+ init_fw_cb->compq_addr_hi = cpu_to_le32(MSDW(ha->response_dma));
+ init_fw_cb->shdwreg_addr_lo = cpu_to_le32(LSDW(ha->shadow_regs_dma));
+ init_fw_cb->shdwreg_addr_hi = cpu_to_le32(MSDW(ha->shadow_regs_dma));
/* Set up required options. */
- init_fw_cb->pri.fw_options |=
+ init_fw_cb->fw_options |=
__constant_cpu_to_le16(FWOPT_SESSION_MODE |
FWOPT_INITIATOR_MODE);
- init_fw_cb->pri.fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
-
- /* Save some info in adapter structure. */
- ha->firmware_options = le16_to_cpu(init_fw_cb->pri.fw_options);
- ha->tcp_options = le16_to_cpu(init_fw_cb->pri.ipv4_tcp_opts);
- ha->heartbeat_interval = init_fw_cb->pri.hb_interval;
- memcpy(ha->ip_address, init_fw_cb->pri.ipv4_addr,
- min(sizeof(ha->ip_address), sizeof(init_fw_cb->pri.ipv4_addr)));
- memcpy(ha->subnet_mask, init_fw_cb->pri.ipv4_subnet,
- min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->pri.ipv4_subnet)));
- memcpy(ha->gateway, init_fw_cb->pri.ipv4_gw_addr,
- min(sizeof(ha->gateway), sizeof(init_fw_cb->pri.ipv4_gw_addr)));
- memcpy(ha->name_string, init_fw_cb->pri.iscsi_name,
- min(sizeof(ha->name_string),
- sizeof(init_fw_cb->pri.iscsi_name)));
- /*memcpy(ha->alias, init_fw_cb->Alias,
- min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
-
- /* Save Command Line Paramater info */
- ha->port_down_retry_count = le16_to_cpu(init_fw_cb->pri.conn_ka_timeout);
- ha->discovery_wait = ql4xdiscoverywait;
+ init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
- /* Send Initialize Firmware Control Block. */
- memset(&mbox_cmd, 0, sizeof(mbox_cmd));
- memset(&mbox_sts, 0, sizeof(mbox_sts));
-
- mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
- mbox_cmd[1] = 0;
- mbox_cmd[2] = LSDW(init_fw_cb_dma);
- mbox_cmd[3] = MSDW(init_fw_cb_dma);
- mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk);
+ if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)
+ != QLA_SUCCESS) {
+ DEBUG2(printk(KERN_WARNING
+ "scsi%ld: %s: Failed to set init_fw_ctrl_blk\n",
+ ha->host_no, __func__));
+ goto exit_init_fw_cb;
+ }
- if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) ==
- QLA_SUCCESS)
- status = QLA_SUCCESS;
- else {
- DEBUG2(printk("scsi%ld: %s: MBOX_CMD_INITIALIZE_FIRMWARE "
- "failed w/ status %04X\n", ha->host_no, __func__,
- mbox_sts[0]));
+ if (qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0],
+ init_fw_cb, init_fw_cb_dma) != QLA_SUCCESS) {
+ DEBUG2(printk("scsi%ld: %s: Failed to update local ifcb\n",
+ ha->host_no, __func__));
+ goto exit_init_fw_cb;
}
- dma_free_coherent(&ha->pdev->dev, sizeof(struct init_fw_ctrl_blk),
- init_fw_cb, init_fw_cb_dma);
+ status = QLA_SUCCESS;
+
+exit_init_fw_cb:
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
+ init_fw_cb, init_fw_cb_dma);
return status;
}
@@ -284,13 +383,13 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
**/
int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
{
- struct init_fw_ctrl_blk *init_fw_cb;
+ struct addr_ctrl_blk *init_fw_cb;
dma_addr_t init_fw_cb_dma;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct init_fw_ctrl_blk),
+ sizeof(struct addr_ctrl_blk),
&init_fw_cb_dma, GFP_KERNEL);
if (init_fw_cb == NULL) {
printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
@@ -299,35 +398,21 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
}
/* Get Initialize Firmware Control Block. */
- memset(&mbox_cmd, 0, sizeof(mbox_cmd));
- memset(&mbox_sts, 0, sizeof(mbox_sts));
-
- memset(init_fw_cb, 0, sizeof(struct init_fw_ctrl_blk));
- mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
- mbox_cmd[2] = LSDW(init_fw_cb_dma);
- mbox_cmd[3] = MSDW(init_fw_cb_dma);
- mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk);
-
- if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) !=
+ memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
+ if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
ha->host_no, __func__));
dma_free_coherent(&ha->pdev->dev,
- sizeof(struct init_fw_ctrl_blk),
+ sizeof(struct addr_ctrl_blk),
init_fw_cb, init_fw_cb_dma);
return QLA_ERROR;
}
/* Save IP Address. */
- memcpy(ha->ip_address, init_fw_cb->pri.ipv4_addr,
- min(sizeof(ha->ip_address), sizeof(init_fw_cb->pri.ipv4_addr)));
- memcpy(ha->subnet_mask, init_fw_cb->pri.ipv4_subnet,
- min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->pri.ipv4_subnet)));
- memcpy(ha->gateway, init_fw_cb->pri.ipv4_gw_addr,
- min(sizeof(ha->gateway), sizeof(init_fw_cb->pri.ipv4_gw_addr)));
-
- dma_free_coherent(&ha->pdev->dev, sizeof(struct init_fw_ctrl_blk),
- init_fw_cb, init_fw_cb_dma);
+ qla4xxx_update_local_ip(ha, init_fw_cb);
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
+ init_fw_cb, init_fw_cb_dma);
return QLA_SUCCESS;
}
@@ -409,6 +494,7 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
uint16_t *connection_id)
{
int status = QLA_ERROR;
+ uint16_t options;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
@@ -441,14 +527,26 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
goto exit_get_fwddb;
}
if (fw_ddb_entry) {
- dev_info(&ha->pdev->dev, "DDB[%d] MB0 %04x Tot %d Next %d "
- "State %04x ConnErr %08x %d.%d.%d.%d:%04d \"%s\"\n",
- fw_ddb_index, mbox_sts[0], mbox_sts[2], mbox_sts[3],
- mbox_sts[4], mbox_sts[5], fw_ddb_entry->ip_addr[0],
- fw_ddb_entry->ip_addr[1], fw_ddb_entry->ip_addr[2],
- fw_ddb_entry->ip_addr[3],
- le16_to_cpu(fw_ddb_entry->port),
- fw_ddb_entry->iscsi_name);
+ options = le16_to_cpu(fw_ddb_entry->options);
+ if (options & DDB_OPT_IPV6_DEVICE) {
+ dev_info(&ha->pdev->dev, "%s: DDB[%d] MB0 %04x Tot %d "
+ "Next %d State %04x ConnErr %08x %pI6 "
+ ":%04d \"%s\"\n", __func__, fw_ddb_index,
+ mbox_sts[0], mbox_sts[2], mbox_sts[3],
+ mbox_sts[4], mbox_sts[5],
+ fw_ddb_entry->ip_addr,
+ le16_to_cpu(fw_ddb_entry->port),
+ fw_ddb_entry->iscsi_name);
+ } else {
+ dev_info(&ha->pdev->dev, "%s: DDB[%d] MB0 %04x Tot %d "
+ "Next %d State %04x ConnErr %08x %pI4 "
+ ":%04d \"%s\"\n", __func__, fw_ddb_index,
+ mbox_sts[0], mbox_sts[2], mbox_sts[3],
+ mbox_sts[4], mbox_sts[5],
+ fw_ddb_entry->ip_addr,
+ le16_to_cpu(fw_ddb_entry->port),
+ fw_ddb_entry->iscsi_name);
+ }
}
if (num_valid_ddb_entries)
*num_valid_ddb_entries = mbox_sts[2];
@@ -664,6 +762,59 @@ exit_get_event_log:
}
/**
+ * qla4xxx_abort_task - issues Abort Task
+ * @ha: Pointer to host adapter structure.
+ * @srb: Pointer to srb entry
+ *
+ * This routine performs a LUN RESET on the specified target/lun.
+ * The caller must ensure that the ddb_entry and lun_entry pointers
+ * are valid before calling this routine.
+ **/
+int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ struct scsi_cmnd *cmd = srb->cmd;
+ int status = QLA_SUCCESS;
+ unsigned long flags = 0;
+ uint32_t index;
+
+ /*
+ * Send abort task command to ISP, so that the ISP will return
+ * request with ABORT status
+ */
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ index = (unsigned long)(unsigned char *)cmd->host_scribble;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ /* Firmware already posted completion on response queue */
+ if (index == MAX_SRBS)
+ return status;
+
+ mbox_cmd[0] = MBOX_CMD_ABORT_TASK;
+ mbox_cmd[1] = srb->fw_ddb_index;
+ mbox_cmd[2] = index;
+ /* Immediate Command Enable */
+ mbox_cmd[5] = 0x01;
+
+ qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0],
+ &mbox_sts[0]);
+ if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE) {
+ status = QLA_ERROR;
+
+ DEBUG2(printk(KERN_WARNING "scsi%ld:%d:%d: abort task FAILED: "
+ "mbx0=%04X, mb1=%04X, mb2=%04X, mb3=%04X, mb4=%04X\n",
+ ha->host_no, cmd->device->id, cmd->device->lun, mbox_sts[0],
+ mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4]));
+ }
+
+ return status;
+}
+
+/**
* qla4xxx_reset_lun - issues LUN Reset
* @ha: Pointer to host adapter structure.
* @db_entry: Pointer to device database entry
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 2ccad36bee9..38b1d38afca 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -74,6 +74,7 @@ static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
*/
static int qla4xxx_queuecommand(struct scsi_cmnd *cmd,
void (*done) (struct scsi_cmnd *));
+static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
@@ -88,6 +89,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
.proc_name = DRIVER_NAME,
.queuecommand = qla4xxx_queuecommand,
+ .eh_abort_handler = qla4xxx_eh_abort,
.eh_device_reset_handler = qla4xxx_eh_device_reset,
.eh_target_reset_handler = qla4xxx_eh_target_reset,
.eh_host_reset_handler = qla4xxx_eh_host_reset,
@@ -384,12 +386,12 @@ static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
if (!srb)
return srb;
- atomic_set(&srb->ref_count, 1);
+ kref_init(&srb->srb_ref);
srb->ha = ha;
srb->ddb = ddb_entry;
srb->cmd = cmd;
srb->flags = 0;
- cmd->SCp.ptr = (void *)srb;
+ CMD_SP(cmd) = (void *)srb;
cmd->scsi_done = done;
return srb;
@@ -403,12 +405,14 @@ static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
scsi_dma_unmap(cmd);
srb->flags &= ~SRB_DMA_VALID;
}
- cmd->SCp.ptr = NULL;
+ CMD_SP(cmd) = NULL;
}
-void qla4xxx_srb_compl(struct scsi_qla_host *ha, struct srb *srb)
+void qla4xxx_srb_compl(struct kref *ref)
{
+ struct srb *srb = container_of(ref, struct srb, srb_ref);
struct scsi_cmnd *cmd = srb->cmd;
+ struct scsi_qla_host *ha = srb->ha;
qla4xxx_srb_free_dma(ha, srb);
@@ -685,6 +689,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
+ test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
test_bit(DPC_AEN, &ha->dpc_flags)) &&
ha->dpc_thread) {
DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
@@ -886,11 +891,10 @@ static void qla4xxx_flush_active_srbs(struct scsi_qla_host *ha)
srb = qla4xxx_del_from_active_array(ha, i);
if (srb != NULL) {
srb->cmd->result = DID_RESET << 16;
- qla4xxx_srb_compl(ha, srb);
+ kref_put(&srb->srb_ref, qla4xxx_srb_compl);
}
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
}
/**
@@ -1069,6 +1073,54 @@ static void qla4xxx_do_dpc(struct work_struct *work)
if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
qla4xxx_get_dhcp_ip_address(ha);
+ /* ---- link change? --- */
+ if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
+ if (!test_bit(AF_LINK_UP, &ha->flags)) {
+ /* ---- link down? --- */
+ list_for_each_entry_safe(ddb_entry, dtemp,
+ &ha->ddb_list, list) {
+ if (atomic_read(&ddb_entry->state) ==
+ DDB_STATE_ONLINE)
+ qla4xxx_mark_device_missing(ha,
+ ddb_entry);
+ }
+ } else {
+ /* ---- link up? --- *
+ * F/W will auto login to all devices ONLY ONCE after
+ * link up during driver initialization and runtime
+ * fatal error recovery. Therefore, the driver must
+ * manually relogin to devices when recovering from
+ * connection failures, logouts, expired KATO, etc. */
+
+ list_for_each_entry_safe(ddb_entry, dtemp,
+ &ha->ddb_list, list) {
+ if ((atomic_read(&ddb_entry->state) ==
+ DDB_STATE_MISSING) ||
+ (atomic_read(&ddb_entry->state) ==
+ DDB_STATE_DEAD)) {
+ if (ddb_entry->fw_ddb_device_state ==
+ DDB_DS_SESSION_ACTIVE) {
+ atomic_set(&ddb_entry->state,
+ DDB_STATE_ONLINE);
+ dev_info(&ha->pdev->dev,
+ "scsi%ld: %s: ddb[%d]"
+ " os[%d] marked"
+ " ONLINE\n",
+ ha->host_no, __func__,
+ ddb_entry->fw_ddb_index,
+ ddb_entry->os_target_id);
+
+ iscsi_unblock_session(
+ ddb_entry->sess);
+ } else
+ qla4xxx_relogin_device(
+ ha, ddb_entry);
+ }
+
+ }
+ }
+ }
+
/* ---- relogin device? --- */
if (adapter_up(ha) &&
test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
@@ -1430,12 +1482,14 @@ static void qla4xxx_slave_destroy(struct scsi_device *sdev)
struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t index)
{
struct srb *srb = NULL;
- struct scsi_cmnd *cmd;
+ struct scsi_cmnd *cmd = NULL;
- if (!(cmd = scsi_host_find_tag(ha->host, index)))
+ cmd = scsi_host_find_tag(ha->host, index);
+ if (!cmd)
return srb;
- if (!(srb = (struct srb *)cmd->host_scribble))
+ srb = (struct srb *)CMD_SP(cmd);
+ if (!srb)
return srb;
/* update counters */
@@ -1443,14 +1497,15 @@ struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t in
ha->req_q_count += srb->iocb_cnt;
ha->iocb_cnt -= srb->iocb_cnt;
if (srb->cmd)
- srb->cmd->host_scribble = NULL;
+ srb->cmd->host_scribble =
+ (unsigned char *)(unsigned long) MAX_SRBS;
}
return srb;
}
/**
* qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
- * @ha: actual ha whose done queue will contain the comd returned by firmware.
+ * @ha: Pointer to host adapter structure.
* @cmd: Scsi Command to wait on.
*
* This routine waits for the command to be returned by the Firmware
@@ -1465,7 +1520,7 @@ static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
do {
/* Checking to see if its returned to OS */
- rp = (struct srb *) cmd->SCp.ptr;
+ rp = (struct srb *) CMD_SP(cmd);
if (rp == NULL) {
done++;
break;
@@ -1534,6 +1589,62 @@ static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
}
/**
+ * qla4xxx_eh_abort - callback for abort task.
+ * @cmd: Pointer to Linux's SCSI command structure
+ *
+ * This routine is called by the Linux OS to abort the specified
+ * command.
+ **/
+static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
+{
+ struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
+ unsigned int id = cmd->device->id;
+ unsigned int lun = cmd->device->lun;
+ unsigned long serial = cmd->serial_number;
+ struct srb *srb = NULL;
+ int ret = SUCCESS;
+ int wait = 0;
+
+ dev_info(&ha->pdev->dev,
+ "scsi%ld:%d:%d: Abort command issued cmd=%p, pid=%ld\n",
+ ha->host_no, id, lun, cmd, serial);
+
+ srb = (struct srb *) CMD_SP(cmd);
+
+ if (!srb)
+ return SUCCESS;
+
+ kref_get(&srb->srb_ref);
+
+ if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
+ DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
+ ha->host_no, id, lun));
+ ret = FAILED;
+ } else {
+ DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n",
+ ha->host_no, id, lun));
+ wait = 1;
+ }
+
+ kref_put(&srb->srb_ref, qla4xxx_srb_compl);
+
+ /* Wait for command to complete */
+ if (wait) {
+ if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
+ DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n",
+ ha->host_no, id, lun));
+ ret = FAILED;
+ }
+ }
+
+ dev_info(&ha->pdev->dev,
+ "scsi%ld:%d:%d: Abort command - %s\n",
+ ha->host_no, id, lun, (ret == SUCCESS) ? "succeded" : "failed");
+
+ return ret;
+}
+
+/**
* qla4xxx_eh_device_reset - callback for target reset.
* @cmd: Pointer to Linux's SCSI command structure
*
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 6980cb279c8..28a6c494a2e 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,5 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.01.00-k9"
-
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k1"
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index aa406497eeb..ca5c15c779c 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -755,7 +755,7 @@ static void __devinit qpti_get_scsi_id(struct qlogicpti *qpti)
struct of_device *op = qpti->op;
struct device_node *dp;
- dp = op->node;
+ dp = op->dev.of_node;
qpti->scsi_id = of_getintprop_default(dp, "initiator-id", -1);
if (qpti->scsi_id == -1)
@@ -776,8 +776,8 @@ static void qpti_get_bursts(struct qlogicpti *qpti)
struct of_device *op = qpti->op;
u8 bursts, bmask;
- bursts = of_getintprop_default(op->node, "burst-sizes", 0xff);
- bmask = of_getintprop_default(op->node->parent, "burst-sizes", 0xff);
+ bursts = of_getintprop_default(op->dev.of_node, "burst-sizes", 0xff);
+ bmask = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0xff);
if (bmask != 0xff)
bursts &= bmask;
if (bursts == 0xff ||
@@ -1293,7 +1293,7 @@ static struct scsi_host_template qpti_template = {
static int __devinit qpti_sbus_probe(struct of_device *op, const struct of_device_id *match)
{
struct scsi_host_template *tpnt = match->data;
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
struct Scsi_Host *host;
struct qlogicpti *qpti;
static int nqptis;
@@ -1315,7 +1315,7 @@ static int __devinit qpti_sbus_probe(struct of_device *op, const struct of_devic
qpti->qhost = host;
qpti->op = op;
qpti->qpti_id = nqptis;
- strcpy(qpti->prom_name, op->node->name);
+ strcpy(qpti->prom_name, op->dev.of_node->name);
qpti->is_pti = strcmp(qpti->prom_name, "QLGC,isp");
if (qpti_map_regs(qpti) < 0)
@@ -1456,8 +1456,11 @@ static const struct of_device_id qpti_match[] = {
MODULE_DEVICE_TABLE(of, qpti_match);
static struct of_platform_driver qpti_sbus_driver = {
- .name = "qpti",
- .match_table = qpti_match,
+ .driver = {
+ .name = "qpti",
+ .owner = THIS_MODULE,
+ .of_match_table = qpti_match,
+ },
.probe = qpti_sbus_probe,
.remove = __devexit_p(qpti_sbus_remove),
};
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 1c08f616465..ad0ed212db4 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -67,6 +67,9 @@
#include "scsi_priv.h"
#include "scsi_logging.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/scsi.h>
+
static void scsi_done(struct scsi_cmnd *cmd);
/*
@@ -747,10 +750,12 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
cmd->result = (DID_NO_CONNECT << 16);
scsi_done(cmd);
} else {
+ trace_scsi_dispatch_cmd_start(cmd);
rtn = host->hostt->queuecommand(cmd, scsi_done);
}
spin_unlock_irqrestore(host->host_lock, flags);
if (rtn) {
+ trace_scsi_dispatch_cmd_error(cmd, rtn);
if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
rtn != SCSI_MLQUEUE_TARGET_BUSY)
rtn = SCSI_MLQUEUE_HOST_BUSY;
@@ -781,6 +786,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
*/
static void scsi_done(struct scsi_cmnd *cmd)
{
+ trace_scsi_dispatch_cmd_done(cmd);
blk_complete_request(cmd->request);
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 3a5bfd10b2c..136329b4027 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -12,7 +12,7 @@
* SAS disks.
*
*
- * For documentation see http://www.torque.net/sg/sdebug26.html
+ * For documentation see http://sg.danny.cz/sg/sdebug26.html
*
* D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
* dpg: work for devfs large number of disks [20010809]
@@ -58,8 +58,8 @@
#include "sd.h"
#include "scsi_logging.h"
-#define SCSI_DEBUG_VERSION "1.81"
-static const char * scsi_debug_version_date = "20070104";
+#define SCSI_DEBUG_VERSION "1.82"
+static const char * scsi_debug_version_date = "20100324";
/* Additional Sense Code (ASC) */
#define NO_ADDITIONAL_SENSE 0x0
@@ -108,6 +108,7 @@ static const char * scsi_debug_version_date = "20070104";
#define DEF_ATO 1
#define DEF_PHYSBLK_EXP 0
#define DEF_LOWEST_ALIGNED 0
+#define DEF_OPT_BLKS 64
#define DEF_UNMAP_MAX_BLOCKS 0
#define DEF_UNMAP_MAX_DESC 0
#define DEF_UNMAP_GRANULARITY 0
@@ -147,12 +148,18 @@ static const char * scsi_debug_version_date = "20070104";
#define SAM2_LUN_ADDRESS_METHOD 0
#define SAM2_WLUN_REPORT_LUNS 0xc101
+/* Can queue up to this number of commands. Typically commands that
+ * that have a non-zero delay are queued. */
+#define SCSI_DEBUG_CANQUEUE 255
+
static int scsi_debug_add_host = DEF_NUM_HOST;
static int scsi_debug_delay = DEF_DELAY;
static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
static int scsi_debug_every_nth = DEF_EVERY_NTH;
static int scsi_debug_max_luns = DEF_MAX_LUNS;
+static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
static int scsi_debug_num_parts = DEF_NUM_PARTS;
+static int scsi_debug_no_uld = 0;
static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
static int scsi_debug_opts = DEF_OPTS;
static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
@@ -169,6 +176,7 @@ static int scsi_debug_guard = DEF_GUARD;
static int scsi_debug_ato = DEF_ATO;
static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
+static int scsi_debug_opt_blks = DEF_OPT_BLKS;
static int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
static int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
static int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
@@ -192,7 +200,6 @@ static int sdebug_sectors_per; /* sectors per cylinder */
#define SDEBUG_SENSE_LEN 32
-#define SCSI_DEBUG_CANQUEUE 255
#define SCSI_DEBUG_MAX_CMD_LEN 32
struct sdebug_dev_info {
@@ -699,9 +706,13 @@ static int inquiry_evpd_b0(unsigned char * arr)
unsigned int gran;
memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
+
+ /* Optimal transfer length granularity */
gran = 1 << scsi_debug_physblk_exp;
arr[2] = (gran >> 8) & 0xff;
arr[3] = gran & 0xff;
+
+ /* Maximum Transfer Length */
if (sdebug_store_sectors > 0x400) {
arr[4] = (sdebug_store_sectors >> 24) & 0xff;
arr[5] = (sdebug_store_sectors >> 16) & 0xff;
@@ -709,6 +720,9 @@ static int inquiry_evpd_b0(unsigned char * arr)
arr[7] = sdebug_store_sectors & 0xff;
}
+ /* Optimal Transfer Length */
+ put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
+
if (scsi_debug_unmap_max_desc) {
unsigned int blocks;
@@ -717,15 +731,20 @@ static int inquiry_evpd_b0(unsigned char * arr)
else
blocks = 0xffffffff;
+ /* Maximum Unmap LBA Count */
put_unaligned_be32(blocks, &arr[16]);
+
+ /* Maximum Unmap Block Descriptor Count */
put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
}
+ /* Unmap Granularity Alignment */
if (scsi_debug_unmap_alignment) {
put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
arr[28] |= 0x80; /* UGAVALID */
}
+ /* Optimal Unmap Granularity */
if (scsi_debug_unmap_granularity) {
put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
return 0x3c; /* Mandatory page length for thin provisioning */
@@ -2266,7 +2285,7 @@ static void timer_intr_handler(unsigned long indx)
struct sdebug_queued_cmd * sqcp;
unsigned long iflags;
- if (indx >= SCSI_DEBUG_CANQUEUE) {
+ if (indx >= scsi_debug_max_queue) {
printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too "
"large\n");
return;
@@ -2380,6 +2399,8 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
sdp->host->cmd_per_lun);
blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
+ if (scsi_debug_no_uld)
+ sdp->no_uld_attach = 1;
return 0;
}
@@ -2406,7 +2427,7 @@ static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
struct sdebug_queued_cmd *sqcp;
spin_lock_irqsave(&queued_arr_lock, iflags);
- for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
+ for (k = 0; k < scsi_debug_max_queue; ++k) {
sqcp = &queued_arr[k];
if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
del_timer_sync(&sqcp->cmnd_timer);
@@ -2416,7 +2437,7 @@ static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
}
}
spin_unlock_irqrestore(&queued_arr_lock, iflags);
- return (k < SCSI_DEBUG_CANQUEUE) ? 1 : 0;
+ return (k < scsi_debug_max_queue) ? 1 : 0;
}
/* Deletes (stops) timers of all queued commands */
@@ -2427,7 +2448,7 @@ static void stop_all_queued(void)
struct sdebug_queued_cmd *sqcp;
spin_lock_irqsave(&queued_arr_lock, iflags);
- for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
+ for (k = 0; k < scsi_debug_max_queue; ++k) {
sqcp = &queued_arr[k];
if (sqcp->in_use && sqcp->a_cmnd) {
del_timer_sync(&sqcp->cmnd_timer);
@@ -2533,7 +2554,7 @@ static void __init init_all_queued(void)
struct sdebug_queued_cmd * sqcp;
spin_lock_irqsave(&queued_arr_lock, iflags);
- for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
+ for (k = 0; k < scsi_debug_max_queue; ++k) {
sqcp = &queued_arr[k];
init_timer(&sqcp->cmnd_timer);
sqcp->in_use = 0;
@@ -2625,12 +2646,12 @@ static int schedule_resp(struct scsi_cmnd * cmnd,
struct sdebug_queued_cmd * sqcp = NULL;
spin_lock_irqsave(&queued_arr_lock, iflags);
- for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
+ for (k = 0; k < scsi_debug_max_queue; ++k) {
sqcp = &queued_arr[k];
if (! sqcp->in_use)
break;
}
- if (k >= SCSI_DEBUG_CANQUEUE) {
+ if (k >= scsi_debug_max_queue) {
spin_unlock_irqrestore(&queued_arr_lock, iflags);
printk(KERN_WARNING "scsi_debug: can_queue exceeded\n");
return 1; /* report busy to mid level */
@@ -2662,7 +2683,9 @@ module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
+module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
+module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
@@ -2677,6 +2700,7 @@ module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
+module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
@@ -2695,7 +2719,9 @@ MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
+MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
+MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
@@ -2705,6 +2731,7 @@ MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)")
MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
+MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
@@ -2970,6 +2997,31 @@ static ssize_t sdebug_max_luns_store(struct device_driver * ddp,
DRIVER_ATTR(max_luns, S_IRUGO | S_IWUSR, sdebug_max_luns_show,
sdebug_max_luns_store);
+static ssize_t sdebug_max_queue_show(struct device_driver * ddp, char * buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
+}
+static ssize_t sdebug_max_queue_store(struct device_driver * ddp,
+ const char * buf, size_t count)
+{
+ int n;
+
+ if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
+ (n <= SCSI_DEBUG_CANQUEUE)) {
+ scsi_debug_max_queue = n;
+ return count;
+ }
+ return -EINVAL;
+}
+DRIVER_ATTR(max_queue, S_IRUGO | S_IWUSR, sdebug_max_queue_show,
+ sdebug_max_queue_store);
+
+static ssize_t sdebug_no_uld_show(struct device_driver * ddp, char * buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
+}
+DRIVER_ATTR(no_uld, S_IRUGO, sdebug_no_uld_show, NULL);
+
static ssize_t sdebug_scsi_level_show(struct device_driver * ddp, char * buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
@@ -3107,7 +3159,9 @@ static int do_create_driverfs_files(void)
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
+ ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
+ ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
@@ -3139,7 +3193,9 @@ static void do_remove_driverfs_files(void)
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
+ driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
+ driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
@@ -3830,12 +3886,13 @@ static int sdebug_driver_probe(struct device * dev)
sdbg_host = to_sdebug_host(dev);
- hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
- if (NULL == hpnt) {
- printk(KERN_ERR "%s: scsi_register failed\n", __func__);
- error = -ENODEV;
+ sdebug_driver_template.can_queue = scsi_debug_max_queue;
+ hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
+ if (NULL == hpnt) {
+ printk(KERN_ERR "%s: scsi_register failed\n", __func__);
+ error = -ENODEV;
return error;
- }
+ }
sdbg_host->shost = hpnt;
*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 7ad53fa4276..a5d630f5f51 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -39,6 +39,8 @@
#include "scsi_logging.h"
#include "scsi_transport_api.h"
+#include <trace/events/scsi.h>
+
#define SENSE_TIMEOUT (10*HZ)
/*
@@ -52,6 +54,7 @@
void scsi_eh_wakeup(struct Scsi_Host *shost)
{
if (shost->host_busy == shost->host_failed) {
+ trace_scsi_eh_wakeup(shost);
wake_up_process(shost->ehandler);
SCSI_LOG_ERROR_RECOVERY(5,
printk("Waking error handler thread\n"));
@@ -127,6 +130,7 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
struct scsi_cmnd *scmd = req->special;
enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
+ trace_scsi_dispatch_cmd_timeout(scmd);
scsi_log_completion(scmd, TIMEOUT_ERROR);
if (scmd->device->host->transportt->eh_timed_out)
@@ -970,9 +974,10 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
"0x%p\n", current->comm,
scmd));
rtn = scsi_try_to_abort_cmd(scmd);
- if (rtn == SUCCESS) {
+ if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
if (!scsi_device_online(scmd->device) ||
+ rtn == FAST_IO_FAIL ||
!scsi_eh_tur(scmd)) {
scsi_eh_finish_cmd(scmd, done_q);
}
@@ -1099,8 +1104,9 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
" 0x%p\n", current->comm,
sdev));
rtn = scsi_try_bus_device_reset(bdr_scmd);
- if (rtn == SUCCESS) {
+ if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
if (!scsi_device_online(sdev) ||
+ rtn == FAST_IO_FAIL ||
!scsi_eh_tur(bdr_scmd)) {
list_for_each_entry_safe(scmd, next,
work_q, eh_entry) {
@@ -1163,10 +1169,11 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
"to target %d\n",
current->comm, id));
rtn = scsi_try_target_reset(tgtr_scmd);
- if (rtn == SUCCESS) {
+ if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
if (id == scmd_id(scmd))
if (!scsi_device_online(scmd->device) ||
+ rtn == FAST_IO_FAIL ||
!scsi_eh_tur(tgtr_scmd))
scsi_eh_finish_cmd(scmd,
done_q);
@@ -1222,10 +1229,11 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
" %d\n", current->comm,
channel));
rtn = scsi_try_bus_reset(chan_scmd);
- if (rtn == SUCCESS) {
+ if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
if (channel == scmd_channel(scmd))
if (!scsi_device_online(scmd->device) ||
+ rtn == FAST_IO_FAIL ||
!scsi_eh_tur(scmd))
scsi_eh_finish_cmd(scmd,
done_q);
@@ -1259,9 +1267,10 @@ static int scsi_eh_host_reset(struct list_head *work_q,
, current->comm));
rtn = scsi_try_host_reset(scmd);
- if (rtn == SUCCESS) {
+ if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
if (!scsi_device_online(scmd->device) ||
+ rtn == FAST_IO_FAIL ||
(!scsi_eh_try_stu(scmd) && !scsi_eh_tur(scmd)) ||
!scsi_eh_tur(scmd))
scsi_eh_finish_cmd(scmd, done_q);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 38518b08807..1c027a97d8b 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -459,8 +459,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
found_target->reap_ref++;
spin_unlock_irqrestore(shost->host_lock, flags);
if (found_target->state != STARGET_DEL) {
- put_device(parent);
- kfree(starget);
+ put_device(dev);
return found_target;
}
/* Unfortunately, we found a dying target; need to
@@ -493,19 +492,20 @@ void scsi_target_reap(struct scsi_target *starget)
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
unsigned long flags;
enum scsi_target_state state;
- int empty;
+ int empty = 0;
spin_lock_irqsave(shost->host_lock, flags);
state = starget->state;
- empty = --starget->reap_ref == 0 &&
- list_empty(&starget->devices) ? 1 : 0;
+ if (--starget->reap_ref == 0 && list_empty(&starget->devices)) {
+ empty = 1;
+ starget->state = STARGET_DEL;
+ }
spin_unlock_irqrestore(shost->host_lock, flags);
if (!empty)
return;
BUG_ON(state == STARGET_DEL);
- starget->state = STARGET_DEL;
if (state == STARGET_CREATED)
scsi_target_destroy(starget);
else
@@ -1221,7 +1221,7 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
}
/**
- * scsilun_to_int: convert a scsi_lun to an int
+ * scsilun_to_int - convert a scsi_lun to an int
* @scsilun: struct scsi_lun to be converted.
*
* Description:
@@ -1253,7 +1253,7 @@ int scsilun_to_int(struct scsi_lun *scsilun)
EXPORT_SYMBOL(scsilun_to_int);
/**
- * int_to_scsilun: reverts an int into a scsi_lun
+ * int_to_scsilun - reverts an int into a scsi_lun
* @lun: integer to be reverted
* @scsilun: struct scsi_lun to be set.
*
@@ -1877,12 +1877,9 @@ void scsi_forget_host(struct Scsi_Host *shost)
spin_unlock_irqrestore(shost->host_lock, flags);
}
-/*
- * Function: scsi_get_host_dev()
- *
- * Purpose: Create a scsi_device that points to the host adapter itself.
- *
- * Arguments: SHpnt - Host that needs a scsi_device
+/**
+ * scsi_get_host_dev - Create a scsi_device that points to the host adapter itself
+ * @shost: Host that needs a scsi_device
*
* Lock status: None assumed.
*
@@ -1895,7 +1892,7 @@ void scsi_forget_host(struct Scsi_Host *shost)
*
* Note - this device is not accessible from any high-level
* drivers (including generics), which is probably not
- * optimal. We can add hooks later to attach
+ * optimal. We can add hooks later to attach.
*/
struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
{
@@ -1921,18 +1918,13 @@ struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
}
EXPORT_SYMBOL(scsi_get_host_dev);
-/*
- * Function: scsi_free_host_dev()
- *
- * Purpose: Free a scsi_device that points to the host adapter itself.
- *
- * Arguments: SHpnt - Host that needs a scsi_device
+/**
+ * scsi_free_host_dev - Free a scsi_device that points to the host adapter itself
+ * @sdev: Host device to be freed
*
* Lock status: None assumed.
*
* Returns: Nothing
- *
- * Notes:
*/
void scsi_free_host_dev(struct scsi_device *sdev)
{
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 429c9b73e3e..c23ab978c3b 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -474,7 +474,7 @@ static DEVICE_ATTR(field, S_IRUGO, sdev_show_##field, NULL);
/*
- * sdev_rd_attr: create a function and attribute variable for a
+ * sdev_rw_attr: create a function and attribute variable for a
* read/write field.
*/
#define sdev_rw_attr(field, format_string) \
@@ -486,7 +486,7 @@ sdev_store_##field (struct device *dev, struct device_attribute *attr, \
{ \
struct scsi_device *sdev; \
sdev = to_scsi_device(dev); \
- snscanf (buf, 20, format_string, &sdev->field); \
+ sscanf (buf, format_string, &sdev->field); \
return count; \
} \
static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
@@ -853,9 +853,6 @@ static int scsi_target_add(struct scsi_target *starget)
error = device_add(&starget->dev);
if (error) {
dev_err(&starget->dev, "target device_add failed, error %d\n", error);
- get_device(&starget->dev);
- scsi_target_reap(starget);
- put_device(&starget->dev);
return error;
}
transport_add_device(&starget->dev);
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
new file mode 100644
index 00000000000..b587289cfac
--- /dev/null
+++ b/drivers/scsi/scsi_trace.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright (C) 2010 FUJITSU LIMITED
+ * Copyright (C) 2010 Tomohiro Kusumi <kusumi.tomohiro@jp.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/kernel.h>
+#include <linux/trace_seq.h>
+#include <trace/events/scsi.h>
+
+#define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f)
+#define SERVICE_ACTION32(cdb) ((cdb[8] << 8) | cdb[9])
+
+static const char *
+scsi_trace_misc(struct trace_seq *, unsigned char *, int);
+
+static const char *
+scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len;
+ sector_t lba = 0, txlen = 0;
+
+ lba |= ((cdb[1] & 0x1F) << 16);
+ lba |= (cdb[2] << 8);
+ lba |= cdb[3];
+ txlen = cdb[4];
+
+ trace_seq_printf(p, "lba=%llu txlen=%llu",
+ (unsigned long long)lba, (unsigned long long)txlen);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len;
+ sector_t lba = 0, txlen = 0;
+
+ lba |= (cdb[2] << 24);
+ lba |= (cdb[3] << 16);
+ lba |= (cdb[4] << 8);
+ lba |= cdb[5];
+ txlen |= (cdb[7] << 8);
+ txlen |= cdb[8];
+
+ trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
+ (unsigned long long)lba, (unsigned long long)txlen,
+ cdb[1] >> 5);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len;
+ sector_t lba = 0, txlen = 0;
+
+ lba |= (cdb[2] << 24);
+ lba |= (cdb[3] << 16);
+ lba |= (cdb[4] << 8);
+ lba |= cdb[5];
+ txlen |= (cdb[6] << 24);
+ txlen |= (cdb[7] << 16);
+ txlen |= (cdb[8] << 8);
+ txlen |= cdb[9];
+
+ trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
+ (unsigned long long)lba, (unsigned long long)txlen,
+ cdb[1] >> 5);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len;
+ sector_t lba = 0, txlen = 0;
+
+ lba |= ((u64)cdb[2] << 56);
+ lba |= ((u64)cdb[3] << 48);
+ lba |= ((u64)cdb[4] << 40);
+ lba |= ((u64)cdb[5] << 32);
+ lba |= (cdb[6] << 24);
+ lba |= (cdb[7] << 16);
+ lba |= (cdb[8] << 8);
+ lba |= cdb[9];
+ txlen |= (cdb[10] << 24);
+ txlen |= (cdb[11] << 16);
+ txlen |= (cdb[12] << 8);
+ txlen |= cdb[13];
+
+ trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
+ (unsigned long long)lba, (unsigned long long)txlen,
+ cdb[1] >> 5);
+
+ if (cdb[0] == WRITE_SAME_16)
+ trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1);
+
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len, *cmd;
+ sector_t lba = 0, txlen = 0;
+ u32 ei_lbrt = 0;
+
+ switch (SERVICE_ACTION32(cdb)) {
+ case READ_32:
+ cmd = "READ";
+ break;
+ case VERIFY_32:
+ cmd = "VERIFY";
+ break;
+ case WRITE_32:
+ cmd = "WRITE";
+ break;
+ case WRITE_SAME_32:
+ cmd = "WRITE_SAME";
+ break;
+ default:
+ trace_seq_printf(p, "UNKNOWN");
+ goto out;
+ }
+
+ lba |= ((u64)cdb[12] << 56);
+ lba |= ((u64)cdb[13] << 48);
+ lba |= ((u64)cdb[14] << 40);
+ lba |= ((u64)cdb[15] << 32);
+ lba |= (cdb[16] << 24);
+ lba |= (cdb[17] << 16);
+ lba |= (cdb[18] << 8);
+ lba |= cdb[19];
+ ei_lbrt |= (cdb[20] << 24);
+ ei_lbrt |= (cdb[21] << 16);
+ ei_lbrt |= (cdb[22] << 8);
+ ei_lbrt |= cdb[23];
+ txlen |= (cdb[28] << 24);
+ txlen |= (cdb[29] << 16);
+ txlen |= (cdb[30] << 8);
+ txlen |= cdb[31];
+
+ trace_seq_printf(p, "%s_32 lba=%llu txlen=%llu protect=%u ei_lbrt=%u",
+ cmd, (unsigned long long)lba,
+ (unsigned long long)txlen, cdb[10] >> 5, ei_lbrt);
+
+ if (SERVICE_ACTION32(cdb) == WRITE_SAME_32)
+ trace_seq_printf(p, " unmap=%u", cdb[10] >> 3 & 1);
+
+out:
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len;
+ unsigned int regions = cdb[7] << 8 | cdb[8];
+
+ trace_seq_printf(p, "regions=%u", (regions - 8) / 16);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len, *cmd;
+ sector_t lba = 0;
+ u32 alloc_len = 0;
+
+ switch (SERVICE_ACTION16(cdb)) {
+ case SAI_READ_CAPACITY_16:
+ cmd = "READ_CAPACITY_16";
+ break;
+ case SAI_GET_LBA_STATUS:
+ cmd = "GET_LBA_STATUS";
+ break;
+ default:
+ trace_seq_printf(p, "UNKNOWN");
+ goto out;
+ }
+
+ lba |= ((u64)cdb[2] << 56);
+ lba |= ((u64)cdb[3] << 48);
+ lba |= ((u64)cdb[4] << 40);
+ lba |= ((u64)cdb[5] << 32);
+ lba |= (cdb[6] << 24);
+ lba |= (cdb[7] << 16);
+ lba |= (cdb[8] << 8);
+ lba |= cdb[9];
+ alloc_len |= (cdb[10] << 24);
+ alloc_len |= (cdb[11] << 16);
+ alloc_len |= (cdb[12] << 8);
+ alloc_len |= cdb[13];
+
+ trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd,
+ (unsigned long long)lba, alloc_len);
+
+out:
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ switch (SERVICE_ACTION32(cdb)) {
+ case READ_32:
+ case VERIFY_32:
+ case WRITE_32:
+ case WRITE_SAME_32:
+ return scsi_trace_rw32(p, cdb, len);
+ default:
+ return scsi_trace_misc(p, cdb, len);
+ }
+}
+
+static const char *
+scsi_trace_misc(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = p->buffer + p->len;
+
+ trace_seq_printf(p, "-");
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+const char *
+scsi_trace_parse_cdb(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ switch (cdb[0]) {
+ case READ_6:
+ case WRITE_6:
+ return scsi_trace_rw6(p, cdb, len);
+ case READ_10:
+ case VERIFY:
+ case WRITE_10:
+ case WRITE_SAME:
+ return scsi_trace_rw10(p, cdb, len);
+ case READ_12:
+ case VERIFY_12:
+ case WRITE_12:
+ return scsi_trace_rw12(p, cdb, len);
+ case READ_16:
+ case VERIFY_16:
+ case WRITE_16:
+ case WRITE_SAME_16:
+ return scsi_trace_rw16(p, cdb, len);
+ case UNMAP:
+ return scsi_trace_unmap(p, cdb, len);
+ case SERVICE_ACTION_IN:
+ return scsi_trace_service_action_in(p, cdb, len);
+ case VARIABLE_LENGTH_CMD:
+ return scsi_trace_varlen(p, cdb, len);
+ default:
+ return scsi_trace_misc(p, cdb, len);
+ }
+}
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 6cfffc88022..06813789145 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -834,7 +834,7 @@ static ssize_t
store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- int val;
+ unsigned long val;
struct fc_rport *rport = transport_class_to_rport(dev);
struct Scsi_Host *shost = rport_to_shost(rport);
struct fc_internal *i = to_fc_internal(shost->transportt);
@@ -848,6 +848,12 @@ store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
return -EINVAL;
/*
+ * Check for overflow; dev_loss_tmo is u32
+ */
+ if (val > UINT_MAX)
+ return -EINVAL;
+
+ /*
* If fast_io_fail is off we have to cap
* dev_loss_tmo at SCSI_DEVICE_BLOCK_MAX_TIMEOUT
*/
@@ -2865,7 +2871,7 @@ void
fc_remote_port_delete(struct fc_rport *rport)
{
struct Scsi_Host *shost = rport_to_shost(rport);
- int timeout = rport->dev_loss_tmo;
+ unsigned long timeout = rport->dev_loss_tmo;
unsigned long flags;
/*
@@ -3191,23 +3197,33 @@ fc_scsi_scan_rport(struct work_struct *work)
*
* This routine can be called from a FC LLD scsi_eh callback. It
* blocks the scsi_eh thread until the fc_rport leaves the
- * FC_PORTSTATE_BLOCKED. This is necessary to avoid the scsi_eh
- * failing recovery actions for blocked rports which would lead to
- * offlined SCSI devices.
+ * FC_PORTSTATE_BLOCKED, or the fast_io_fail_tmo fires. This is
+ * necessary to avoid the scsi_eh failing recovery actions for blocked
+ * rports which would lead to offlined SCSI devices.
+ *
+ * Returns: 0 if the fc_rport left the state FC_PORTSTATE_BLOCKED.
+ * FAST_IO_FAIL if the fast_io_fail_tmo fired, this should be
+ * passed back to scsi_eh.
*/
-void fc_block_scsi_eh(struct scsi_cmnd *cmnd)
+int fc_block_scsi_eh(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
- while (rport->port_state == FC_PORTSTATE_BLOCKED) {
+ while (rport->port_state == FC_PORTSTATE_BLOCKED &&
+ !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) {
spin_unlock_irqrestore(shost->host_lock, flags);
msleep(1000);
spin_lock_irqsave(shost->host_lock, flags);
}
spin_unlock_irqrestore(shost->host_lock, flags);
+
+ if (rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)
+ return FAST_IO_FAIL;
+
+ return 0;
}
EXPORT_SYMBOL(fc_block_scsi_eh);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index de6c60320f6..829cc37abc4 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1434,6 +1434,8 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
#error RC16_LEN must not be more than SD_BUF_SIZE
#endif
+#define READ_CAPACITY_RETRIES_ON_RESET 10
+
static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
unsigned char *buffer)
{
@@ -1441,7 +1443,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
struct scsi_sense_hdr sshdr;
int sense_valid = 0;
int the_result;
- int retries = 3;
+ int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
unsigned int alignment;
unsigned long long lba;
unsigned sector_size;
@@ -1470,6 +1472,13 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
* Invalid Field in CDB, just retry
* silently with RC10 */
return -EINVAL;
+ if (sense_valid &&
+ sshdr.sense_key == UNIT_ATTENTION &&
+ sshdr.asc == 0x29 && sshdr.ascq == 0x00)
+ /* Device reset might occur several times,
+ * give it one more chance */
+ if (--reset_retries > 0)
+ continue;
}
retries--;
@@ -1528,7 +1537,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
struct scsi_sense_hdr sshdr;
int sense_valid = 0;
int the_result;
- int retries = 3;
+ int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
sector_t lba;
unsigned sector_size;
@@ -1544,8 +1553,16 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
if (media_not_present(sdkp, &sshdr))
return -ENODEV;
- if (the_result)
+ if (the_result) {
sense_valid = scsi_sense_valid(&sshdr);
+ if (sense_valid &&
+ sshdr.sense_key == UNIT_ATTENTION &&
+ sshdr.asc == 0x29 && sshdr.ascq == 0x00)
+ /* Device reset might occur several times,
+ * give it one more chance */
+ if (--reset_retries > 0)
+ continue;
+ }
retries--;
} while (the_result && retries);
@@ -1574,6 +1591,8 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
static int sd_try_rc16_first(struct scsi_device *sdp)
{
+ if (sdp->host->max_cmd_len < 16)
+ return 0;
if (sdp->scsi_level > SCSI_SPC_2)
return 1;
if (scsi_device_protection(sdp))
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index dee1c96288d..ef752b248c4 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -758,8 +758,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
}
static int
-sg_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd_in, unsigned long arg)
+sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
{
void __user *p = (void __user *)arg;
int __user *ip = p;
@@ -1078,6 +1077,18 @@ sg_ioctl(struct inode *inode, struct file *filp,
}
}
+static long
+sg_unlocked_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = sg_ioctl(filp, cmd_in, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
#ifdef CONFIG_COMPAT
static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
{
@@ -1322,7 +1333,7 @@ static const struct file_operations sg_fops = {
.read = sg_read,
.write = sg_write,
.poll = sg_poll,
- .ioctl = sg_ioctl,
+ .unlocked_ioctl = sg_unlocked_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = sg_compat_ioctl,
#endif
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 3ea1a713ef2..24211d0efa6 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3962,6 +3962,7 @@ static const struct file_operations st_fops =
.open = st_open,
.flush = st_flush,
.release = st_release,
+ .llseek = noop_llseek,
};
static int st_probe(struct device *dev)
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index fc23d273fb1..386dd9d602b 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -125,7 +125,7 @@ static void __devinit esp_get_scsi_id(struct esp *esp, struct of_device *espdma)
struct of_device *op = esp->dev;
struct device_node *dp;
- dp = op->node;
+ dp = op->dev.of_node;
esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff);
if (esp->scsi_id != 0xff)
goto done;
@@ -134,7 +134,7 @@ static void __devinit esp_get_scsi_id(struct esp *esp, struct of_device *espdma)
if (esp->scsi_id != 0xff)
goto done;
- esp->scsi_id = of_getintprop_default(espdma->node,
+ esp->scsi_id = of_getintprop_default(espdma->dev.of_node,
"scsi-initiator-id", 7);
done:
@@ -147,7 +147,7 @@ static void __devinit esp_get_differential(struct esp *esp)
struct of_device *op = esp->dev;
struct device_node *dp;
- dp = op->node;
+ dp = op->dev.of_node;
if (of_find_property(dp, "differential", NULL))
esp->flags |= ESP_FLAG_DIFFERENTIAL;
else
@@ -160,7 +160,7 @@ static void __devinit esp_get_clock_params(struct esp *esp)
struct device_node *bus_dp, *dp;
int fmhz;
- dp = op->node;
+ dp = op->dev.of_node;
bus_dp = dp->parent;
fmhz = of_getintprop_default(dp, "clock-frequency", 0);
@@ -172,12 +172,12 @@ static void __devinit esp_get_clock_params(struct esp *esp)
static void __devinit esp_get_bursts(struct esp *esp, struct of_device *dma_of)
{
- struct device_node *dma_dp = dma_of->node;
+ struct device_node *dma_dp = dma_of->dev.of_node;
struct of_device *op = esp->dev;
struct device_node *dp;
u8 bursts, val;
- dp = op->node;
+ dp = op->dev.of_node;
bursts = of_getintprop_default(dp, "burst-sizes", 0xff);
val = of_getintprop_default(dma_dp, "burst-sizes", 0xff);
if (val != 0xff)
@@ -565,7 +565,7 @@ fail:
static int __devinit esp_sbus_probe(struct of_device *op, const struct of_device_id *match)
{
struct device_node *dma_node = NULL;
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
struct of_device *dma_of = NULL;
int hme = 0;
@@ -574,7 +574,7 @@ static int __devinit esp_sbus_probe(struct of_device *op, const struct of_device
!strcmp(dp->parent->name, "dma")))
dma_node = dp->parent;
else if (!strcmp(dp->name, "SUNW,fas")) {
- dma_node = op->node;
+ dma_node = op->dev.of_node;
hme = 1;
}
if (dma_node)
@@ -633,8 +633,11 @@ static const struct of_device_id esp_match[] = {
MODULE_DEVICE_TABLE(of, esp_match);
static struct of_platform_driver esp_sbus_driver = {
- .name = "esp",
- .match_table = esp_match,
+ .driver = {
+ .name = "esp",
+ .owner = THIS_MODULE,
+ .of_match_table = esp_match,
+ },
.probe = esp_sbus_probe,
.remove = __devexit_p(esp_sbus_remove),
};
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index 5fda881c247..b701bf2cc18 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -2224,14 +2224,8 @@ wd33c93_proc_info(struct Scsi_Host *instance, char *buf, char **start, off_t off
}
-void
-wd33c93_release(void)
-{
-}
-
EXPORT_SYMBOL(wd33c93_host_reset);
EXPORT_SYMBOL(wd33c93_init);
-EXPORT_SYMBOL(wd33c93_release);
EXPORT_SYMBOL(wd33c93_abort);
EXPORT_SYMBOL(wd33c93_queuecommand);
EXPORT_SYMBOL(wd33c93_intr);
diff --git a/drivers/scsi/wd33c93.h b/drivers/scsi/wd33c93.h
index 00123f2383d..1ed5f3bf388 100644
--- a/drivers/scsi/wd33c93.h
+++ b/drivers/scsi/wd33c93.h
@@ -348,6 +348,5 @@ int wd33c93_queuecommand (struct scsi_cmnd *cmd,
void wd33c93_intr (struct Scsi_Host *instance);
int wd33c93_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int);
int wd33c93_host_reset (struct scsi_cmnd *);
-void wd33c93_release(void);
#endif /* WD33C93_H */
diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c
index 105449c15fa..e17764d7147 100644
--- a/drivers/scsi/zorro7xx.c
+++ b/drivers/scsi/zorro7xx.c
@@ -69,6 +69,7 @@ static struct zorro_device_id zorro7xx_zorro_tbl[] __devinitdata = {
},
{ 0 }
};
+MODULE_DEVICE_TABLE(zorro, zorro7xx_zorro_tbl);
static int __devinit zorro7xx_init_one(struct zorro_dev *z,
const struct zorro_device_id *ent)
diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c
index 78ed24bb6a3..30463862603 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/serial/68328serial.c
@@ -1437,7 +1437,7 @@ int m68328_console_setup(struct console *cp, char *arg)
for (i = 0; i < ARRAY_SIZE(baud_table); i++)
if (baud_table[i] == n)
break;
- if (i < BAUD_TABLE_SIZE) {
+ if (i < ARRAY_SIZE(baud_table)) {
m68328_console_baud = n;
m68328_console_cbaud = 0;
if (i > 15) {
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 2b1ea3d4c4f..891e1dd65f2 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -1891,8 +1891,8 @@ static int serial8250_get_poll_char(struct uart_port *port)
struct uart_8250_port *up = (struct uart_8250_port *)port;
unsigned char lsr = serial_inp(up, UART_LSR);
- while (!(lsr & UART_LSR_DR))
- lsr = serial_inp(up, UART_LSR);
+ if (!(lsr & UART_LSR_DR))
+ return NO_POLL_CHAR;
return serial_inp(up, UART_RX);
}
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index f55c49475a8..8b23165bc5d 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -518,12 +518,13 @@ config SERIAL_S3C2412
Serial port support for the Samsung S3C2412 and S3C2413 SoC
config SERIAL_S3C2440
- tristate "Samsung S3C2440/S3C2442 Serial port support"
- depends on SERIAL_SAMSUNG && (CPU_S3C2440 || CPU_S3C2442)
+ tristate "Samsung S3C2440/S3C2442/S3C2416 Serial port support"
+ depends on SERIAL_SAMSUNG && (CPU_S3C2440 || CPU_S3C2442 || CPU_S3C2416)
default y if CPU_S3C2440
default y if CPU_S3C2442
+ select SERIAL_SAMSUNG_UARTS_4 if CPU_S3C2416
help
- Serial port support for the Samsung S3C2440 and S3C2442 SoC
+ Serial port support for the Samsung S3C2440, S3C2416 and S3C2442 SoC
config SERIAL_S3C24A0
tristate "Samsung S3C24A0 Serial port support"
@@ -533,21 +534,13 @@ config SERIAL_S3C24A0
Serial port support for the Samsung S3C24A0 SoC
config SERIAL_S3C6400
- tristate "Samsung S3C6400/S3C6410/S5P6440 Seria port support"
- depends on SERIAL_SAMSUNG && (CPU_S3C6400 || CPU_S3C6410 || CPU_S5P6440)
+ tristate "Samsung S3C6400/S3C6410/S5P6440/S5PC100 Serial port support"
+ depends on SERIAL_SAMSUNG && (CPU_S3C6400 || CPU_S3C6410 || CPU_S5P6440 || CPU_S5PC100)
select SERIAL_SAMSUNG_UARTS_4
default y
help
- Serial port support for the Samsung S3C6400, S3C6410 and S5P6440
- SoCs
-
-config SERIAL_S5PC100
- tristate "Samsung S5PC100 Serial port support"
- depends on SERIAL_SAMSUNG && CPU_S5PC100
- select SERIAL_SAMSUNG_UARTS_4
- default y
- help
- Serial port support for the Samsung S5PC100 SoCs
+ Serial port support for the Samsung S3C6400, S3C6410, S5P6440
+ and S5PC100 SoCs
config SERIAL_S5PV210
tristate "Samsung S5PV210 Serial port support"
@@ -1430,8 +1423,8 @@ config SERIAL_SC26XX_CONSOLE
Support for Console on SC2681/SC2692 serial ports.
config SERIAL_BFIN_SPORT
- tristate "Blackfin SPORT emulate UART (EXPERIMENTAL)"
- depends on BLACKFIN && EXPERIMENTAL
+ tristate "Blackfin SPORT emulate UART"
+ depends on BLACKFIN
select SERIAL_CORE
help
Enable SPORT emulate UART on Blackfin series.
@@ -1446,28 +1439,52 @@ config SERIAL_BFIN_SPORT_CONSOLE
config SERIAL_BFIN_SPORT0_UART
bool "Enable UART over SPORT0"
- depends on SERIAL_BFIN_SPORT && !(BF542 || BF542M || BF544 || BF544M)
+ depends on SERIAL_BFIN_SPORT && !(BF542 || BF544)
help
Enable UART over SPORT0
+config SERIAL_BFIN_SPORT0_UART_CTSRTS
+ bool "Enable UART over SPORT0 hardware flow control"
+ depends on SERIAL_BFIN_SPORT0_UART
+ help
+ Enable hardware flow control in the driver.
+
config SERIAL_BFIN_SPORT1_UART
bool "Enable UART over SPORT1"
depends on SERIAL_BFIN_SPORT
help
Enable UART over SPORT1
+config SERIAL_BFIN_SPORT1_UART_CTSRTS
+ bool "Enable UART over SPORT1 hardware flow control"
+ depends on SERIAL_BFIN_SPORT1_UART
+ help
+ Enable hardware flow control in the driver.
+
config SERIAL_BFIN_SPORT2_UART
bool "Enable UART over SPORT2"
depends on SERIAL_BFIN_SPORT && (BF54x || BF538 || BF539)
help
Enable UART over SPORT2
+config SERIAL_BFIN_SPORT2_UART_CTSRTS
+ bool "Enable UART over SPORT2 hardware flow control"
+ depends on SERIAL_BFIN_SPORT2_UART
+ help
+ Enable hardware flow control in the driver.
+
config SERIAL_BFIN_SPORT3_UART
bool "Enable UART over SPORT3"
depends on SERIAL_BFIN_SPORT && (BF54x || BF538 || BF539)
help
Enable UART over SPORT3
+config SERIAL_BFIN_SPORT3_UART_CTSRTS
+ bool "Enable UART over SPORT3 hardware flow control"
+ depends on SERIAL_BFIN_SPORT3_UART
+ help
+ Enable hardware flow control in the driver.
+
config SERIAL_TIMBERDALE
tristate "Support for timberdale UART"
select SERIAL_CORE
@@ -1506,4 +1523,56 @@ config SERIAL_GRLIB_GAISLER_APBUART_CONSOLE
help
Support for running a console on the GRLIB APBUART
+config SERIAL_ALTERA_JTAGUART
+ tristate "Altera JTAG UART support"
+ select SERIAL_CORE
+ help
+ This driver supports the Altera JTAG UART port.
+
+config SERIAL_ALTERA_JTAGUART_CONSOLE
+ bool "Altera JTAG UART console support"
+ depends on SERIAL_ALTERA_JTAGUART=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Enable a Altera JTAG UART port to be the system console.
+
+config SERIAL_ALTERA_JTAGUART_CONSOLE_BYPASS
+ bool "Bypass output when no connection"
+ depends on SERIAL_ALTERA_JTAGUART_CONSOLE
+ select SERIAL_CORE_CONSOLE
+ help
+ Bypass console output and keep going even if there is no
+ JTAG terminal connection with the host.
+
+config SERIAL_ALTERA_UART
+ tristate "Altera UART support"
+ select SERIAL_CORE
+ help
+ This driver supports the Altera softcore UART port.
+
+config SERIAL_ALTERA_UART_MAXPORTS
+ int "Maximum number of Altera UART ports"
+ depends on SERIAL_ALTERA_UART
+ default 4
+ help
+ This setting lets you define the maximum number of the Altera
+ UART ports. The usual default varies from board to board, and
+ this setting is a way of catering for that.
+
+config SERIAL_ALTERA_UART_BAUDRATE
+ int "Default baudrate for Altera UART ports"
+ depends on SERIAL_ALTERA_UART
+ default 115200
+ help
+ This setting lets you define what the default baudrate is for the
+ Altera UART ports. The usual default varies from board to board,
+ and this setting is a way of catering for that.
+
+config SERIAL_ALTERA_UART_CONSOLE
+ bool "Altera UART console support"
+ depends on SERIAL_ALTERA_UART=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Enable a Altera UART port to be the system console.
+
endmenu
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 6aa4723b74e..208a85572c3 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -44,7 +44,6 @@ obj-$(CONFIG_SERIAL_S3C2412) += s3c2412.o
obj-$(CONFIG_SERIAL_S3C2440) += s3c2440.o
obj-$(CONFIG_SERIAL_S3C24A0) += s3c24a0.o
obj-$(CONFIG_SERIAL_S3C6400) += s3c6400.o
-obj-$(CONFIG_SERIAL_S5PC100) += s3c6400.o
obj-$(CONFIG_SERIAL_S5PV210) += s5pv210.o
obj-$(CONFIG_SERIAL_MAX3100) += max3100.o
obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o
@@ -83,3 +82,5 @@ obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o
obj-$(CONFIG_SERIAL_QE) += ucc_uart.o
obj-$(CONFIG_SERIAL_TIMBERDALE) += timbuart.o
obj-$(CONFIG_SERIAL_GRLIB_GAISLER_APBUART) += apbuart.o
+obj-$(CONFIG_SERIAL_ALTERA_JTAGUART) += altera_jtaguart.o
+obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o
diff --git a/drivers/serial/altera_jtaguart.c b/drivers/serial/altera_jtaguart.c
new file mode 100644
index 00000000000..f9b49b5ff5e
--- /dev/null
+++ b/drivers/serial/altera_jtaguart.c
@@ -0,0 +1,504 @@
+/*
+ * altera_jtaguart.c -- Altera JTAG UART driver
+ *
+ * Based on mcf.c -- Freescale ColdFire UART driver
+ *
+ * (C) Copyright 2003-2007, Greg Ungerer <gerg@snapgear.com>
+ * (C) Copyright 2008, Thomas Chou <thomas@wytron.com.tw>
+ * (C) Copyright 2010, Tobias Klauser <tklauser@distanz.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/altera_jtaguart.h>
+
+#define DRV_NAME "altera_jtaguart"
+
+/*
+ * Altera JTAG UART register definitions according to the Altera JTAG UART
+ * datasheet: http://www.altera.com/literature/hb/nios2/n2cpu_nii51009.pdf
+ */
+
+#define ALTERA_JTAGUART_SIZE 8
+
+#define ALTERA_JTAGUART_DATA_REG 0
+
+#define ALTERA_JTAGUART_DATA_DATA_MSK 0x000000FF
+#define ALTERA_JTAGUART_DATA_RVALID_MSK 0x00008000
+#define ALTERA_JTAGUART_DATA_RAVAIL_MSK 0xFFFF0000
+#define ALTERA_JTAGUART_DATA_RAVAIL_OFF 16
+
+#define ALTERA_JTAGUART_CONTROL_REG 4
+
+#define ALTERA_JTAGUART_CONTROL_RE_MSK 0x00000001
+#define ALTERA_JTAGUART_CONTROL_WE_MSK 0x00000002
+#define ALTERA_JTAGUART_CONTROL_RI_MSK 0x00000100
+#define ALTERA_JTAGUART_CONTROL_RI_OFF 8
+#define ALTERA_JTAGUART_CONTROL_WI_MSK 0x00000200
+#define ALTERA_JTAGUART_CONTROL_AC_MSK 0x00000400
+#define ALTERA_JTAGUART_CONTROL_WSPACE_MSK 0xFFFF0000
+#define ALTERA_JTAGUART_CONTROL_WSPACE_OFF 16
+
+/*
+ * Local per-uart structure.
+ */
+struct altera_jtaguart {
+ struct uart_port port;
+ unsigned int sigs; /* Local copy of line sigs */
+ unsigned long imr; /* Local IMR mirror */
+};
+
+static unsigned int altera_jtaguart_tx_empty(struct uart_port *port)
+{
+ return (readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) &
+ ALTERA_JTAGUART_CONTROL_WSPACE_MSK) ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int altera_jtaguart_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
+}
+
+static void altera_jtaguart_set_mctrl(struct uart_port *port, unsigned int sigs)
+{
+}
+
+static void altera_jtaguart_start_tx(struct uart_port *port)
+{
+ struct altera_jtaguart *pp =
+ container_of(port, struct altera_jtaguart, port);
+
+ pp->imr |= ALTERA_JTAGUART_CONTROL_WE_MSK;
+ writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG);
+}
+
+static void altera_jtaguart_stop_tx(struct uart_port *port)
+{
+ struct altera_jtaguart *pp =
+ container_of(port, struct altera_jtaguart, port);
+
+ pp->imr &= ~ALTERA_JTAGUART_CONTROL_WE_MSK;
+ writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG);
+}
+
+static void altera_jtaguart_stop_rx(struct uart_port *port)
+{
+ struct altera_jtaguart *pp =
+ container_of(port, struct altera_jtaguart, port);
+
+ pp->imr &= ~ALTERA_JTAGUART_CONTROL_RE_MSK;
+ writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG);
+}
+
+static void altera_jtaguart_break_ctl(struct uart_port *port, int break_state)
+{
+}
+
+static void altera_jtaguart_enable_ms(struct uart_port *port)
+{
+}
+
+static void altera_jtaguart_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ /* Just copy the old termios settings back */
+ if (old)
+ tty_termios_copy_hw(termios, old);
+}
+
+static void altera_jtaguart_rx_chars(struct altera_jtaguart *pp)
+{
+ struct uart_port *port = &pp->port;
+ unsigned char ch, flag;
+ unsigned long status;
+
+ while ((status = readl(port->membase + ALTERA_JTAGUART_DATA_REG)) &
+ ALTERA_JTAGUART_DATA_RVALID_MSK) {
+ ch = status & ALTERA_JTAGUART_DATA_DATA_MSK;
+ flag = TTY_NORMAL;
+ port->icount.rx++;
+
+ if (uart_handle_sysrq_char(port, ch))
+ continue;
+ uart_insert_char(port, 0, 0, ch, flag);
+ }
+
+ tty_flip_buffer_push(port->state->port.tty);
+}
+
+static void altera_jtaguart_tx_chars(struct altera_jtaguart *pp)
+{
+ struct uart_port *port = &pp->port;
+ struct circ_buf *xmit = &port->state->xmit;
+ unsigned int pending, count;
+
+ if (port->x_char) {
+ /* Send special char - probably flow control */
+ writel(port->x_char, port->membase + ALTERA_JTAGUART_DATA_REG);
+ port->x_char = 0;
+ port->icount.tx++;
+ return;
+ }
+
+ pending = uart_circ_chars_pending(xmit);
+ if (pending > 0) {
+ count = (readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) &
+ ALTERA_JTAGUART_CONTROL_WSPACE_MSK) >>
+ ALTERA_JTAGUART_CONTROL_WSPACE_OFF;
+ if (count > pending)
+ count = pending;
+ if (count > 0) {
+ pending -= count;
+ while (count--) {
+ writel(xmit->buf[xmit->tail],
+ port->membase + ALTERA_JTAGUART_DATA_REG);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ }
+ if (pending < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+ }
+ }
+
+ if (pending == 0) {
+ pp->imr &= ~ALTERA_JTAGUART_CONTROL_WE_MSK;
+ writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG);
+ }
+}
+
+static irqreturn_t altera_jtaguart_interrupt(int irq, void *data)
+{
+ struct uart_port *port = data;
+ struct altera_jtaguart *pp =
+ container_of(port, struct altera_jtaguart, port);
+ unsigned int isr;
+
+ isr = (readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) >>
+ ALTERA_JTAGUART_CONTROL_RI_OFF) & pp->imr;
+
+ spin_lock(&port->lock);
+
+ if (isr & ALTERA_JTAGUART_CONTROL_RE_MSK)
+ altera_jtaguart_rx_chars(pp);
+ if (isr & ALTERA_JTAGUART_CONTROL_WE_MSK)
+ altera_jtaguart_tx_chars(pp);
+
+ spin_unlock(&port->lock);
+
+ return IRQ_RETVAL(isr);
+}
+
+static void altera_jtaguart_config_port(struct uart_port *port, int flags)
+{
+ port->type = PORT_ALTERA_JTAGUART;
+
+ /* Clear mask, so no surprise interrupts. */
+ writel(0, port->membase + ALTERA_JTAGUART_CONTROL_REG);
+}
+
+static int altera_jtaguart_startup(struct uart_port *port)
+{
+ struct altera_jtaguart *pp =
+ container_of(port, struct altera_jtaguart, port);
+ unsigned long flags;
+ int ret;
+
+ ret = request_irq(port->irq, altera_jtaguart_interrupt, IRQF_DISABLED,
+ DRV_NAME, port);
+ if (ret) {
+ pr_err(DRV_NAME ": unable to attach Altera JTAG UART %d "
+ "interrupt vector=%d\n", port->line, port->irq);
+ return ret;
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Enable RX interrupts now */
+ pp->imr = ALTERA_JTAGUART_CONTROL_RE_MSK;
+ writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return 0;
+}
+
+static void altera_jtaguart_shutdown(struct uart_port *port)
+{
+ struct altera_jtaguart *pp =
+ container_of(port, struct altera_jtaguart, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Disable all interrupts now */
+ pp->imr = 0;
+ writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ free_irq(port->irq, port);
+}
+
+static const char *altera_jtaguart_type(struct uart_port *port)
+{
+ return (port->type == PORT_ALTERA_JTAGUART) ? "Altera JTAG UART" : NULL;
+}
+
+static int altera_jtaguart_request_port(struct uart_port *port)
+{
+ /* UARTs always present */
+ return 0;
+}
+
+static void altera_jtaguart_release_port(struct uart_port *port)
+{
+ /* Nothing to release... */
+}
+
+static int altera_jtaguart_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_ALTERA_JTAGUART)
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * Define the basic serial functions we support.
+ */
+static struct uart_ops altera_jtaguart_ops = {
+ .tx_empty = altera_jtaguart_tx_empty,
+ .get_mctrl = altera_jtaguart_get_mctrl,
+ .set_mctrl = altera_jtaguart_set_mctrl,
+ .start_tx = altera_jtaguart_start_tx,
+ .stop_tx = altera_jtaguart_stop_tx,
+ .stop_rx = altera_jtaguart_stop_rx,
+ .enable_ms = altera_jtaguart_enable_ms,
+ .break_ctl = altera_jtaguart_break_ctl,
+ .startup = altera_jtaguart_startup,
+ .shutdown = altera_jtaguart_shutdown,
+ .set_termios = altera_jtaguart_set_termios,
+ .type = altera_jtaguart_type,
+ .request_port = altera_jtaguart_request_port,
+ .release_port = altera_jtaguart_release_port,
+ .config_port = altera_jtaguart_config_port,
+ .verify_port = altera_jtaguart_verify_port,
+};
+
+#define ALTERA_JTAGUART_MAXPORTS 1
+static struct altera_jtaguart altera_jtaguart_ports[ALTERA_JTAGUART_MAXPORTS];
+
+#if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE)
+
+int __init early_altera_jtaguart_setup(struct altera_jtaguart_platform_uart
+ *platp)
+{
+ struct uart_port *port;
+ int i;
+
+ for (i = 0; i < ALTERA_JTAGUART_MAXPORTS && platp[i].mapbase; i++) {
+ port = &altera_jtaguart_ports[i].port;
+
+ port->line = i;
+ port->type = PORT_ALTERA_JTAGUART;
+ port->mapbase = platp[i].mapbase;
+ port->membase = ioremap(port->mapbase, ALTERA_JTAGUART_SIZE);
+ port->iotype = SERIAL_IO_MEM;
+ port->irq = platp[i].irq;
+ port->flags = ASYNC_BOOT_AUTOCONF;
+ port->ops = &altera_jtaguart_ops;
+ }
+
+ return 0;
+}
+
+#if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE_BYPASS)
+static void altera_jtaguart_console_putc(struct console *co, const char c)
+{
+ struct uart_port *port = &(altera_jtaguart_ports + co->index)->port;
+ unsigned long status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ while (((status = readl(port->membase + ALTERA_JTAGUART_CONTROL_REG)) &
+ ALTERA_JTAGUART_CONTROL_WSPACE_MSK) == 0) {
+ if ((status & ALTERA_JTAGUART_CONTROL_AC_MSK) == 0) {
+ spin_unlock_irqrestore(&port->lock, flags);
+ return; /* no connection activity */
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+ cpu_relax();
+ spin_lock_irqsave(&port->lock, flags);
+ }
+ writel(c, port->membase + ALTERA_JTAGUART_DATA_REG);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+#else
+static void altera_jtaguart_console_putc(struct console *co, const char c)
+{
+ struct uart_port *port = &(altera_jtaguart_ports + co->index)->port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ while ((readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) &
+ ALTERA_JTAGUART_CONTROL_WSPACE_MSK) == 0) {
+ spin_unlock_irqrestore(&port->lock, flags);
+ cpu_relax();
+ spin_lock_irqsave(&port->lock, flags);
+ }
+ writel(c, port->membase + ALTERA_JTAGUART_DATA_REG);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+#endif
+
+static void altera_jtaguart_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ for (; count; count--, s++) {
+ altera_jtaguart_console_putc(co, *s);
+ if (*s == '\n')
+ altera_jtaguart_console_putc(co, '\r');
+ }
+}
+
+static int __init altera_jtaguart_console_setup(struct console *co,
+ char *options)
+{
+ struct uart_port *port;
+
+ if (co->index < 0 || co->index >= ALTERA_JTAGUART_MAXPORTS)
+ return -EINVAL;
+ port = &altera_jtaguart_ports[co->index].port;
+ if (port->membase == 0)
+ return -ENODEV;
+ return 0;
+}
+
+static struct uart_driver altera_jtaguart_driver;
+
+static struct console altera_jtaguart_console = {
+ .name = "ttyJ",
+ .write = altera_jtaguart_console_write,
+ .device = uart_console_device,
+ .setup = altera_jtaguart_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &altera_jtaguart_driver,
+};
+
+static int __init altera_jtaguart_console_init(void)
+{
+ register_console(&altera_jtaguart_console);
+ return 0;
+}
+
+console_initcall(altera_jtaguart_console_init);
+
+#define ALTERA_JTAGUART_CONSOLE (&altera_jtaguart_console)
+
+#else
+
+#define ALTERA_JTAGUART_CONSOLE NULL
+
+#endif /* CONFIG_ALTERA_JTAGUART_CONSOLE */
+
+static struct uart_driver altera_jtaguart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "altera_jtaguart",
+ .dev_name = "ttyJ",
+ .major = ALTERA_JTAGUART_MAJOR,
+ .minor = ALTERA_JTAGUART_MINOR,
+ .nr = ALTERA_JTAGUART_MAXPORTS,
+ .cons = ALTERA_JTAGUART_CONSOLE,
+};
+
+static int __devinit altera_jtaguart_probe(struct platform_device *pdev)
+{
+ struct altera_jtaguart_platform_uart *platp = pdev->dev.platform_data;
+ struct uart_port *port;
+ int i;
+
+ for (i = 0; i < ALTERA_JTAGUART_MAXPORTS && platp[i].mapbase; i++) {
+ port = &altera_jtaguart_ports[i].port;
+
+ port->line = i;
+ port->type = PORT_ALTERA_JTAGUART;
+ port->mapbase = platp[i].mapbase;
+ port->membase = ioremap(port->mapbase, ALTERA_JTAGUART_SIZE);
+ port->iotype = SERIAL_IO_MEM;
+ port->irq = platp[i].irq;
+ port->ops = &altera_jtaguart_ops;
+ port->flags = ASYNC_BOOT_AUTOCONF;
+
+ uart_add_one_port(&altera_jtaguart_driver, port);
+ }
+
+ return 0;
+}
+
+static int __devexit altera_jtaguart_remove(struct platform_device *pdev)
+{
+ struct uart_port *port;
+ int i;
+
+ for (i = 0; i < ALTERA_JTAGUART_MAXPORTS; i++) {
+ port = &altera_jtaguart_ports[i].port;
+ if (port)
+ uart_remove_one_port(&altera_jtaguart_driver, port);
+ }
+
+ return 0;
+}
+
+static struct platform_driver altera_jtaguart_platform_driver = {
+ .probe = altera_jtaguart_probe,
+ .remove = __devexit_p(altera_jtaguart_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init altera_jtaguart_init(void)
+{
+ int rc;
+
+ rc = uart_register_driver(&altera_jtaguart_driver);
+ if (rc)
+ return rc;
+ rc = platform_driver_register(&altera_jtaguart_platform_driver);
+ if (rc) {
+ uart_unregister_driver(&altera_jtaguart_driver);
+ return rc;
+ }
+ return 0;
+}
+
+static void __exit altera_jtaguart_exit(void)
+{
+ platform_driver_unregister(&altera_jtaguart_platform_driver);
+ uart_unregister_driver(&altera_jtaguart_driver);
+}
+
+module_init(altera_jtaguart_init);
+module_exit(altera_jtaguart_exit);
+
+MODULE_DESCRIPTION("Altera JTAG UART driver");
+MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/serial/altera_uart.c b/drivers/serial/altera_uart.c
new file mode 100644
index 00000000000..bcee156d2f2
--- /dev/null
+++ b/drivers/serial/altera_uart.c
@@ -0,0 +1,570 @@
+/*
+ * altera_uart.c -- Altera UART driver
+ *
+ * Based on mcf.c -- Freescale ColdFire UART driver
+ *
+ * (C) Copyright 2003-2007, Greg Ungerer <gerg@snapgear.com>
+ * (C) Copyright 2008, Thomas Chou <thomas@wytron.com.tw>
+ * (C) Copyright 2010, Tobias Klauser <tklauser@distanz.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/altera_uart.h>
+
+#define DRV_NAME "altera_uart"
+
+/*
+ * Altera UART register definitions according to the Nios UART datasheet:
+ * http://www.altera.com/literature/ds/ds_nios_uart.pdf
+ */
+
+#define ALTERA_UART_SIZE 32
+
+#define ALTERA_UART_RXDATA_REG 0
+#define ALTERA_UART_TXDATA_REG 4
+#define ALTERA_UART_STATUS_REG 8
+#define ALTERA_UART_CONTROL_REG 12
+#define ALTERA_UART_DIVISOR_REG 16
+#define ALTERA_UART_EOP_REG 20
+
+#define ALTERA_UART_STATUS_PE_MSK 0x0001 /* parity error */
+#define ALTERA_UART_STATUS_FE_MSK 0x0002 /* framing error */
+#define ALTERA_UART_STATUS_BRK_MSK 0x0004 /* break */
+#define ALTERA_UART_STATUS_ROE_MSK 0x0008 /* RX overrun error */
+#define ALTERA_UART_STATUS_TOE_MSK 0x0010 /* TX overrun error */
+#define ALTERA_UART_STATUS_TMT_MSK 0x0020 /* TX shift register state */
+#define ALTERA_UART_STATUS_TRDY_MSK 0x0040 /* TX ready */
+#define ALTERA_UART_STATUS_RRDY_MSK 0x0080 /* RX ready */
+#define ALTERA_UART_STATUS_E_MSK 0x0100 /* exception condition */
+#define ALTERA_UART_STATUS_DCTS_MSK 0x0400 /* CTS logic-level change */
+#define ALTERA_UART_STATUS_CTS_MSK 0x0800 /* CTS logic state */
+#define ALTERA_UART_STATUS_EOP_MSK 0x1000 /* EOP written/read */
+
+ /* Enable interrupt on... */
+#define ALTERA_UART_CONTROL_PE_MSK 0x0001 /* ...parity error */
+#define ALTERA_UART_CONTROL_FE_MSK 0x0002 /* ...framing error */
+#define ALTERA_UART_CONTROL_BRK_MSK 0x0004 /* ...break */
+#define ALTERA_UART_CONTROL_ROE_MSK 0x0008 /* ...RX overrun */
+#define ALTERA_UART_CONTROL_TOE_MSK 0x0010 /* ...TX overrun */
+#define ALTERA_UART_CONTROL_TMT_MSK 0x0020 /* ...TX shift register empty */
+#define ALTERA_UART_CONTROL_TRDY_MSK 0x0040 /* ...TX ready */
+#define ALTERA_UART_CONTROL_RRDY_MSK 0x0080 /* ...RX ready */
+#define ALTERA_UART_CONTROL_E_MSK 0x0100 /* ...exception*/
+
+#define ALTERA_UART_CONTROL_TRBK_MSK 0x0200 /* TX break */
+#define ALTERA_UART_CONTROL_DCTS_MSK 0x0400 /* Interrupt on CTS change */
+#define ALTERA_UART_CONTROL_RTS_MSK 0x0800 /* RTS signal */
+#define ALTERA_UART_CONTROL_EOP_MSK 0x1000 /* Interrupt on EOP */
+
+/*
+ * Local per-uart structure.
+ */
+struct altera_uart {
+ struct uart_port port;
+ unsigned int sigs; /* Local copy of line sigs */
+ unsigned short imr; /* Local IMR mirror */
+};
+
+static unsigned int altera_uart_tx_empty(struct uart_port *port)
+{
+ return (readl(port->membase + ALTERA_UART_STATUS_REG) &
+ ALTERA_UART_STATUS_TMT_MSK) ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int altera_uart_get_mctrl(struct uart_port *port)
+{
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+ unsigned long flags;
+ unsigned int sigs;
+
+ spin_lock_irqsave(&port->lock, flags);
+ sigs =
+ (readl(port->membase + ALTERA_UART_STATUS_REG) &
+ ALTERA_UART_STATUS_CTS_MSK) ? TIOCM_CTS : 0;
+ sigs |= (pp->sigs & TIOCM_RTS);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return sigs;
+}
+
+static void altera_uart_set_mctrl(struct uart_port *port, unsigned int sigs)
+{
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ pp->sigs = sigs;
+ if (sigs & TIOCM_RTS)
+ pp->imr |= ALTERA_UART_CONTROL_RTS_MSK;
+ else
+ pp->imr &= ~ALTERA_UART_CONTROL_RTS_MSK;
+ writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void altera_uart_start_tx(struct uart_port *port)
+{
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ pp->imr |= ALTERA_UART_CONTROL_TRDY_MSK;
+ writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void altera_uart_stop_tx(struct uart_port *port)
+{
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ pp->imr &= ~ALTERA_UART_CONTROL_TRDY_MSK;
+ writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void altera_uart_stop_rx(struct uart_port *port)
+{
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ pp->imr &= ~ALTERA_UART_CONTROL_RRDY_MSK;
+ writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void altera_uart_break_ctl(struct uart_port *port, int break_state)
+{
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (break_state == -1)
+ pp->imr |= ALTERA_UART_CONTROL_TRBK_MSK;
+ else
+ pp->imr &= ~ALTERA_UART_CONTROL_TRBK_MSK;
+ writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void altera_uart_enable_ms(struct uart_port *port)
+{
+}
+
+static void altera_uart_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned long flags;
+ unsigned int baud, baudclk;
+
+ baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
+ baudclk = port->uartclk / baud;
+
+ if (old)
+ tty_termios_copy_hw(termios, old);
+ tty_termios_encode_baud_rate(termios, baud, baud);
+
+ spin_lock_irqsave(&port->lock, flags);
+ writel(baudclk, port->membase + ALTERA_UART_DIVISOR_REG);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void altera_uart_rx_chars(struct altera_uart *pp)
+{
+ struct uart_port *port = &pp->port;
+ unsigned char ch, flag;
+ unsigned short status;
+
+ while ((status = readl(port->membase + ALTERA_UART_STATUS_REG)) &
+ ALTERA_UART_STATUS_RRDY_MSK) {
+ ch = readl(port->membase + ALTERA_UART_RXDATA_REG);
+ flag = TTY_NORMAL;
+ port->icount.rx++;
+
+ if (status & ALTERA_UART_STATUS_E_MSK) {
+ writel(status, port->membase + ALTERA_UART_STATUS_REG);
+
+ if (status & ALTERA_UART_STATUS_BRK_MSK) {
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ continue;
+ } else if (status & ALTERA_UART_STATUS_PE_MSK) {
+ port->icount.parity++;
+ } else if (status & ALTERA_UART_STATUS_ROE_MSK) {
+ port->icount.overrun++;
+ } else if (status & ALTERA_UART_STATUS_FE_MSK) {
+ port->icount.frame++;
+ }
+
+ status &= port->read_status_mask;
+
+ if (status & ALTERA_UART_STATUS_BRK_MSK)
+ flag = TTY_BREAK;
+ else if (status & ALTERA_UART_STATUS_PE_MSK)
+ flag = TTY_PARITY;
+ else if (status & ALTERA_UART_STATUS_FE_MSK)
+ flag = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(port, ch))
+ continue;
+ uart_insert_char(port, status, ALTERA_UART_STATUS_ROE_MSK, ch,
+ flag);
+ }
+
+ tty_flip_buffer_push(port->state->port.tty);
+}
+
+static void altera_uart_tx_chars(struct altera_uart *pp)
+{
+ struct uart_port *port = &pp->port;
+ struct circ_buf *xmit = &port->state->xmit;
+
+ if (port->x_char) {
+ /* Send special char - probably flow control */
+ writel(port->x_char, port->membase + ALTERA_UART_TXDATA_REG);
+ port->x_char = 0;
+ port->icount.tx++;
+ return;
+ }
+
+ while (readl(port->membase + ALTERA_UART_STATUS_REG) &
+ ALTERA_UART_STATUS_TRDY_MSK) {
+ if (xmit->head == xmit->tail)
+ break;
+ writel(xmit->buf[xmit->tail],
+ port->membase + ALTERA_UART_TXDATA_REG);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (xmit->head == xmit->tail) {
+ pp->imr &= ~ALTERA_UART_CONTROL_TRDY_MSK;
+ writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
+ }
+}
+
+static irqreturn_t altera_uart_interrupt(int irq, void *data)
+{
+ struct uart_port *port = data;
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+ unsigned int isr;
+
+ isr = readl(port->membase + ALTERA_UART_STATUS_REG) & pp->imr;
+ if (isr & ALTERA_UART_STATUS_RRDY_MSK)
+ altera_uart_rx_chars(pp);
+ if (isr & ALTERA_UART_STATUS_TRDY_MSK)
+ altera_uart_tx_chars(pp);
+ return IRQ_RETVAL(isr);
+}
+
+static void altera_uart_config_port(struct uart_port *port, int flags)
+{
+ port->type = PORT_ALTERA_UART;
+
+ /* Clear mask, so no surprise interrupts. */
+ writel(0, port->membase + ALTERA_UART_CONTROL_REG);
+ /* Clear status register */
+ writel(0, port->membase + ALTERA_UART_STATUS_REG);
+}
+
+static int altera_uart_startup(struct uart_port *port)
+{
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+ unsigned long flags;
+ int ret;
+
+ ret = request_irq(port->irq, altera_uart_interrupt, IRQF_DISABLED,
+ DRV_NAME, port);
+ if (ret) {
+ pr_err(DRV_NAME ": unable to attach Altera UART %d "
+ "interrupt vector=%d\n", port->line, port->irq);
+ return ret;
+ }
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Enable RX interrupts now */
+ pp->imr = ALTERA_UART_CONTROL_RRDY_MSK;
+ writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return 0;
+}
+
+static void altera_uart_shutdown(struct uart_port *port)
+{
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Disable all interrupts now */
+ pp->imr = 0;
+ writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ free_irq(port->irq, port);
+}
+
+static const char *altera_uart_type(struct uart_port *port)
+{
+ return (port->type == PORT_ALTERA_UART) ? "Altera UART" : NULL;
+}
+
+static int altera_uart_request_port(struct uart_port *port)
+{
+ /* UARTs always present */
+ return 0;
+}
+
+static void altera_uart_release_port(struct uart_port *port)
+{
+ /* Nothing to release... */
+}
+
+static int altera_uart_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ if ((ser->type != PORT_UNKNOWN) && (ser->type != PORT_ALTERA_UART))
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * Define the basic serial functions we support.
+ */
+static struct uart_ops altera_uart_ops = {
+ .tx_empty = altera_uart_tx_empty,
+ .get_mctrl = altera_uart_get_mctrl,
+ .set_mctrl = altera_uart_set_mctrl,
+ .start_tx = altera_uart_start_tx,
+ .stop_tx = altera_uart_stop_tx,
+ .stop_rx = altera_uart_stop_rx,
+ .enable_ms = altera_uart_enable_ms,
+ .break_ctl = altera_uart_break_ctl,
+ .startup = altera_uart_startup,
+ .shutdown = altera_uart_shutdown,
+ .set_termios = altera_uart_set_termios,
+ .type = altera_uart_type,
+ .request_port = altera_uart_request_port,
+ .release_port = altera_uart_release_port,
+ .config_port = altera_uart_config_port,
+ .verify_port = altera_uart_verify_port,
+};
+
+static struct altera_uart altera_uart_ports[CONFIG_SERIAL_ALTERA_UART_MAXPORTS];
+
+#if defined(CONFIG_SERIAL_ALTERA_UART_CONSOLE)
+
+int __init early_altera_uart_setup(struct altera_uart_platform_uart *platp)
+{
+ struct uart_port *port;
+ int i;
+
+ for (i = 0; i < CONFIG_SERIAL_ALTERA_UART_MAXPORTS && platp[i].mapbase; i++) {
+ port = &altera_uart_ports[i].port;
+
+ port->line = i;
+ port->type = PORT_ALTERA_UART;
+ port->mapbase = platp[i].mapbase;
+ port->membase = ioremap(port->mapbase, ALTERA_UART_SIZE);
+ port->iotype = SERIAL_IO_MEM;
+ port->irq = platp[i].irq;
+ port->uartclk = platp[i].uartclk;
+ port->flags = ASYNC_BOOT_AUTOCONF;
+ port->ops = &altera_uart_ops;
+ }
+
+ return 0;
+}
+
+static void altera_uart_console_putc(struct console *co, const char c)
+{
+ struct uart_port *port = &(altera_uart_ports + co->index)->port;
+ int i;
+
+ for (i = 0; i < 0x10000; i++) {
+ if (readl(port->membase + ALTERA_UART_STATUS_REG) &
+ ALTERA_UART_STATUS_TRDY_MSK)
+ break;
+ }
+ writel(c, port->membase + ALTERA_UART_TXDATA_REG);
+ for (i = 0; i < 0x10000; i++) {
+ if (readl(port->membase + ALTERA_UART_STATUS_REG) &
+ ALTERA_UART_STATUS_TRDY_MSK)
+ break;
+ }
+}
+
+static void altera_uart_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ for (; count; count--, s++) {
+ altera_uart_console_putc(co, *s);
+ if (*s == '\n')
+ altera_uart_console_putc(co, '\r');
+ }
+}
+
+static int __init altera_uart_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port;
+ int baud = CONFIG_SERIAL_ALTERA_UART_BAUDRATE;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (co->index < 0 || co->index >= CONFIG_SERIAL_ALTERA_UART_MAXPORTS)
+ return -EINVAL;
+ port = &altera_uart_ports[co->index].port;
+ if (port->membase == 0)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver altera_uart_driver;
+
+static struct console altera_uart_console = {
+ .name = "ttyS",
+ .write = altera_uart_console_write,
+ .device = uart_console_device,
+ .setup = altera_uart_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &altera_uart_driver,
+};
+
+static int __init altera_uart_console_init(void)
+{
+ register_console(&altera_uart_console);
+ return 0;
+}
+
+console_initcall(altera_uart_console_init);
+
+#define ALTERA_UART_CONSOLE (&altera_uart_console)
+
+#else
+
+#define ALTERA_UART_CONSOLE NULL
+
+#endif /* CONFIG_ALTERA_UART_CONSOLE */
+
+/*
+ * Define the altera_uart UART driver structure.
+ */
+static struct uart_driver altera_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = DRV_NAME,
+ .dev_name = "ttyS",
+ .major = TTY_MAJOR,
+ .minor = 64,
+ .nr = CONFIG_SERIAL_ALTERA_UART_MAXPORTS,
+ .cons = ALTERA_UART_CONSOLE,
+};
+
+static int __devinit altera_uart_probe(struct platform_device *pdev)
+{
+ struct altera_uart_platform_uart *platp = pdev->dev.platform_data;
+ struct uart_port *port;
+ int i;
+
+ for (i = 0; i < CONFIG_SERIAL_ALTERA_UART_MAXPORTS && platp[i].mapbase; i++) {
+ port = &altera_uart_ports[i].port;
+
+ port->line = i;
+ port->type = PORT_ALTERA_UART;
+ port->mapbase = platp[i].mapbase;
+ port->membase = ioremap(port->mapbase, ALTERA_UART_SIZE);
+ port->iotype = SERIAL_IO_MEM;
+ port->irq = platp[i].irq;
+ port->uartclk = platp[i].uartclk;
+ port->ops = &altera_uart_ops;
+ port->flags = ASYNC_BOOT_AUTOCONF;
+
+ uart_add_one_port(&altera_uart_driver, port);
+ }
+
+ return 0;
+}
+
+static int altera_uart_remove(struct platform_device *pdev)
+{
+ struct uart_port *port;
+ int i;
+
+ for (i = 0; i < CONFIG_SERIAL_ALTERA_UART_MAXPORTS; i++) {
+ port = &altera_uart_ports[i].port;
+ if (port)
+ uart_remove_one_port(&altera_uart_driver, port);
+ }
+
+ return 0;
+}
+
+static struct platform_driver altera_uart_platform_driver = {
+ .probe = altera_uart_probe,
+ .remove = __devexit_p(altera_uart_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = NULL,
+ },
+};
+
+static int __init altera_uart_init(void)
+{
+ int rc;
+
+ rc = uart_register_driver(&altera_uart_driver);
+ if (rc)
+ return rc;
+ rc = platform_driver_register(&altera_uart_platform_driver);
+ if (rc) {
+ uart_unregister_driver(&altera_uart_driver);
+ return rc;
+ }
+ return 0;
+}
+
+static void __exit altera_uart_exit(void)
+{
+ platform_driver_unregister(&altera_uart_platform_driver);
+ uart_unregister_driver(&altera_uart_driver);
+}
+
+module_init(altera_uart_init);
+module_exit(altera_uart_exit);
+
+MODULE_DESCRIPTION("Altera UART driver");
+MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/serial/amba-pl011.c b/drivers/serial/amba-pl011.c
index 743ebf5f16d..eb4cb480b93 100644
--- a/drivers/serial/amba-pl011.c
+++ b/drivers/serial/amba-pl011.c
@@ -342,9 +342,9 @@ static int pl010_get_poll_char(struct uart_port *port)
struct uart_amba_port *uap = (struct uart_amba_port *)port;
unsigned int status;
- do {
- status = readw(uap->port.membase + UART01x_FR);
- } while (status & UART01x_FR_RXFE);
+ status = readw(uap->port.membase + UART01x_FR);
+ if (status & UART01x_FR_RXFE)
+ return NO_POLL_CHAR;
return readw(uap->port.membase + UART01x_DR);
}
diff --git a/drivers/serial/apbuart.c b/drivers/serial/apbuart.c
index fe91319b5f6..0099b8692b6 100644
--- a/drivers/serial/apbuart.c
+++ b/drivers/serial/apbuart.c
@@ -559,7 +559,7 @@ static int __devinit apbuart_probe(struct of_device *op,
i = 0;
for (i = 0; i < grlib_apbuart_port_nr; i++) {
- if (op->node == grlib_apbuart_nodes[i])
+ if (op->dev.of_node == grlib_apbuart_nodes[i])
break;
}
@@ -584,12 +584,12 @@ static struct of_device_id __initdata apbuart_match[] = {
};
static struct of_platform_driver grlib_apbuart_of_driver = {
- .match_table = apbuart_match,
.probe = apbuart_probe,
.driver = {
- .owner = THIS_MODULE,
- .name = "grlib-apbuart",
- },
+ .owner = THIS_MODULE,
+ .name = "grlib-apbuart",
+ .of_match_table = apbuart_match,
+ },
};
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index 2c9bf9b6832..eed3c2d8dd1 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -38,6 +38,7 @@
#include <linux/dma-mapping.h>
#include <linux/atmel_pdc.h>
#include <linux/atmel_serial.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
@@ -59,6 +60,9 @@
#include <linux/serial_core.h>
+static void atmel_start_rx(struct uart_port *port);
+static void atmel_stop_rx(struct uart_port *port);
+
#ifdef CONFIG_SERIAL_ATMEL_TTYAT
/* Use device name ttyAT, major 204 and minor 154-169. This is necessary if we
@@ -93,6 +97,7 @@
#define UART_GET_BRGR(port) __raw_readl((port)->membase + ATMEL_US_BRGR)
#define UART_PUT_BRGR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_BRGR)
#define UART_PUT_RTOR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_RTOR)
+#define UART_PUT_TTGR(port, v) __raw_writel(v, (port)->membase + ATMEL_US_TTGR)
/* PDC registers */
#define UART_PUT_PTCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_PTCR)
@@ -147,6 +152,9 @@ struct atmel_uart_port {
unsigned int irq_status_prev;
struct circ_buf rx_ring;
+
+ struct serial_rs485 rs485; /* rs485 settings */
+ unsigned int tx_done_mask;
};
static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
@@ -187,6 +195,46 @@ static bool atmel_use_dma_tx(struct uart_port *port)
}
#endif
+/* Enable or disable the rs485 support */
+void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ unsigned int mode;
+
+ spin_lock(&port->lock);
+
+ /* Disable interrupts */
+ UART_PUT_IDR(port, atmel_port->tx_done_mask);
+
+ mode = UART_GET_MR(port);
+
+ /* Resetting serial mode to RS232 (0x0) */
+ mode &= ~ATMEL_US_USMODE;
+
+ atmel_port->rs485 = *rs485conf;
+
+ if (rs485conf->flags & SER_RS485_ENABLED) {
+ dev_dbg(port->dev, "Setting UART to RS485\n");
+ atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
+ UART_PUT_TTGR(port, rs485conf->delay_rts_before_send);
+ mode |= ATMEL_US_USMODE_RS485;
+ } else {
+ dev_dbg(port->dev, "Setting UART to RS232\n");
+ if (atmel_use_dma_tx(port))
+ atmel_port->tx_done_mask = ATMEL_US_ENDTX |
+ ATMEL_US_TXBUFE;
+ else
+ atmel_port->tx_done_mask = ATMEL_US_TXRDY;
+ }
+ UART_PUT_MR(port, mode);
+
+ /* Enable interrupts */
+ UART_PUT_IER(port, atmel_port->tx_done_mask);
+
+ spin_unlock(&port->lock);
+
+}
+
/*
* Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty.
*/
@@ -202,6 +250,7 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
{
unsigned int control = 0;
unsigned int mode;
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
#ifdef CONFIG_ARCH_AT91RM9200
if (cpu_is_at91rm9200()) {
@@ -236,6 +285,17 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
mode |= ATMEL_US_CHMODE_LOC_LOOP;
else
mode |= ATMEL_US_CHMODE_NORMAL;
+
+ /* Resetting serial mode to RS232 (0x0) */
+ mode &= ~ATMEL_US_USMODE;
+
+ if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
+ dev_dbg(port->dev, "Setting UART to RS485\n");
+ UART_PUT_TTGR(port, atmel_port->rs485.delay_rts_before_send);
+ mode |= ATMEL_US_USMODE_RS485;
+ } else {
+ dev_dbg(port->dev, "Setting UART to RS232\n");
+ }
UART_PUT_MR(port, mode);
}
@@ -268,12 +328,17 @@ static u_int atmel_get_mctrl(struct uart_port *port)
*/
static void atmel_stop_tx(struct uart_port *port)
{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
if (atmel_use_dma_tx(port)) {
/* disable PDC transmit */
UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
- UART_PUT_IDR(port, ATMEL_US_ENDTX | ATMEL_US_TXBUFE);
- } else
- UART_PUT_IDR(port, ATMEL_US_TXRDY);
+ }
+ /* Disable interrupts */
+ UART_PUT_IDR(port, atmel_port->tx_done_mask);
+
+ if (atmel_port->rs485.flags & SER_RS485_ENABLED)
+ atmel_start_rx(port);
}
/*
@@ -281,17 +346,39 @@ static void atmel_stop_tx(struct uart_port *port)
*/
static void atmel_start_tx(struct uart_port *port)
{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
if (atmel_use_dma_tx(port)) {
if (UART_GET_PTSR(port) & ATMEL_PDC_TXTEN)
/* The transmitter is already running. Yes, we
really need this.*/
return;
- UART_PUT_IER(port, ATMEL_US_ENDTX | ATMEL_US_TXBUFE);
+ if (atmel_port->rs485.flags & SER_RS485_ENABLED)
+ atmel_stop_rx(port);
+
/* re-enable PDC transmit */
UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
- } else
- UART_PUT_IER(port, ATMEL_US_TXRDY);
+ }
+ /* Enable interrupts */
+ UART_PUT_IER(port, atmel_port->tx_done_mask);
+}
+
+/*
+ * start receiving - port is in process of being opened.
+ */
+static void atmel_start_rx(struct uart_port *port)
+{
+ UART_PUT_CR(port, ATMEL_US_RSTSTA); /* reset status and receiver */
+
+ if (atmel_use_dma_rx(port)) {
+ /* enable PDC controller */
+ UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
+ port->read_status_mask);
+ UART_PUT_PTCR(port, ATMEL_PDC_RXTEN);
+ } else {
+ UART_PUT_IER(port, ATMEL_US_RXRDY);
+ }
}
/*
@@ -302,9 +389,11 @@ static void atmel_stop_rx(struct uart_port *port)
if (atmel_use_dma_rx(port)) {
/* disable PDC receive */
UART_PUT_PTCR(port, ATMEL_PDC_RXTDIS);
- UART_PUT_IDR(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
- } else
+ UART_PUT_IDR(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
+ port->read_status_mask);
+ } else {
UART_PUT_IDR(port, ATMEL_US_RXRDY);
+ }
}
/*
@@ -428,8 +517,9 @@ static void atmel_rx_chars(struct uart_port *port)
static void atmel_tx_chars(struct uart_port *port)
{
struct circ_buf *xmit = &port->state->xmit;
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- if (port->x_char && UART_GET_CSR(port) & ATMEL_US_TXRDY) {
+ if (port->x_char && UART_GET_CSR(port) & atmel_port->tx_done_mask) {
UART_PUT_CHAR(port, port->x_char);
port->icount.tx++;
port->x_char = 0;
@@ -437,7 +527,7 @@ static void atmel_tx_chars(struct uart_port *port)
if (uart_circ_empty(xmit) || uart_tx_stopped(port))
return;
- while (UART_GET_CSR(port) & ATMEL_US_TXRDY) {
+ while (UART_GET_CSR(port) & atmel_port->tx_done_mask) {
UART_PUT_CHAR(port, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
@@ -449,7 +539,8 @@ static void atmel_tx_chars(struct uart_port *port)
uart_write_wakeup(port);
if (!uart_circ_empty(xmit))
- UART_PUT_IER(port, ATMEL_US_TXRDY);
+ /* Enable interrupts */
+ UART_PUT_IER(port, atmel_port->tx_done_mask);
}
/*
@@ -501,18 +592,10 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- if (atmel_use_dma_tx(port)) {
- /* PDC transmit */
- if (pending & (ATMEL_US_ENDTX | ATMEL_US_TXBUFE)) {
- UART_PUT_IDR(port, ATMEL_US_ENDTX | ATMEL_US_TXBUFE);
- tasklet_schedule(&atmel_port->tasklet);
- }
- } else {
- /* Interrupt transmit */
- if (pending & ATMEL_US_TXRDY) {
- UART_PUT_IDR(port, ATMEL_US_TXRDY);
- tasklet_schedule(&atmel_port->tasklet);
- }
+ if (pending & atmel_port->tx_done_mask) {
+ /* Either PDC or interrupt transmission */
+ UART_PUT_IDR(port, atmel_port->tx_done_mask);
+ tasklet_schedule(&atmel_port->tasklet);
}
}
@@ -590,9 +673,15 @@ static void atmel_tx_dma(struct uart_port *port)
UART_PUT_TPR(port, pdc->dma_addr + xmit->tail);
UART_PUT_TCR(port, count);
- /* re-enable PDC transmit and interrupts */
+ /* re-enable PDC transmit */
UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
- UART_PUT_IER(port, ATMEL_US_ENDTX | ATMEL_US_TXBUFE);
+ /* Enable interrupts */
+ UART_PUT_IER(port, atmel_port->tx_done_mask);
+ } else {
+ if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
+ /* DMA done, stop TX, start RX for RS485 */
+ atmel_start_rx(port);
+ }
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
@@ -1017,6 +1106,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
{
unsigned long flags;
unsigned int mode, imr, quot, baud;
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
/* Get current mode register */
mode = UART_GET_MR(port) & ~(ATMEL_US_USCLKS | ATMEL_US_CHRL
@@ -1115,6 +1205,17 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
/* disable receiver and transmitter */
UART_PUT_CR(port, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
+ /* Resetting serial mode to RS232 (0x0) */
+ mode &= ~ATMEL_US_USMODE;
+
+ if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
+ dev_dbg(port->dev, "Setting UART to RS485\n");
+ UART_PUT_TTGR(port, atmel_port->rs485.delay_rts_before_send);
+ mode |= ATMEL_US_USMODE_RS485;
+ } else {
+ dev_dbg(port->dev, "Setting UART to RS232\n");
+ }
+
/* set the parity, stop bits and data size */
UART_PUT_MR(port, mode);
@@ -1231,6 +1332,35 @@ static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
}
#endif
+static int
+atmel_ioctl(struct uart_port *port, unsigned int cmd, unsigned long arg)
+{
+ struct serial_rs485 rs485conf;
+
+ switch (cmd) {
+ case TIOCSRS485:
+ if (copy_from_user(&rs485conf, (struct serial_rs485 *) arg,
+ sizeof(rs485conf)))
+ return -EFAULT;
+
+ atmel_config_rs485(port, &rs485conf);
+ break;
+
+ case TIOCGRS485:
+ if (copy_to_user((struct serial_rs485 *) arg,
+ &(to_atmel_uart_port(port)->rs485),
+ sizeof(rs485conf)))
+ return -EFAULT;
+ break;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+
+
static struct uart_ops atmel_pops = {
.tx_empty = atmel_tx_empty,
.set_mctrl = atmel_set_mctrl,
@@ -1250,6 +1380,7 @@ static struct uart_ops atmel_pops = {
.config_port = atmel_config_port,
.verify_port = atmel_verify_port,
.pm = atmel_serial_pm,
+ .ioctl = atmel_ioctl,
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = atmel_poll_get_char,
.poll_put_char = atmel_poll_put_char,
@@ -1265,13 +1396,12 @@ static void __devinit atmel_init_port(struct atmel_uart_port *atmel_port,
struct uart_port *port = &atmel_port->uart;
struct atmel_uart_data *data = pdev->dev.platform_data;
- port->iotype = UPIO_MEM;
- port->flags = UPF_BOOT_AUTOCONF;
- port->ops = &atmel_pops;
- port->fifosize = 1;
- port->line = pdev->id;
- port->dev = &pdev->dev;
-
+ port->iotype = UPIO_MEM;
+ port->flags = UPF_BOOT_AUTOCONF;
+ port->ops = &atmel_pops;
+ port->fifosize = 1;
+ port->line = pdev->id;
+ port->dev = &pdev->dev;
port->mapbase = pdev->resource[0].start;
port->irq = pdev->resource[1].start;
@@ -1299,8 +1429,16 @@ static void __devinit atmel_init_port(struct atmel_uart_port *atmel_port,
atmel_port->use_dma_rx = data->use_dma_rx;
atmel_port->use_dma_tx = data->use_dma_tx;
- if (atmel_use_dma_tx(port))
+ atmel_port->rs485 = data->rs485;
+ /* Use TXEMPTY for interrupt when rs485 else TXRDY or ENDTX|TXBUFE */
+ if (atmel_port->rs485.flags & SER_RS485_ENABLED)
+ atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
+ else if (atmel_use_dma_tx(port)) {
port->fifosize = PDC_BUFFER_SIZE;
+ atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE;
+ } else {
+ atmel_port->tx_done_mask = ATMEL_US_TXRDY;
+ }
}
/*
@@ -1334,6 +1472,7 @@ static void atmel_console_putchar(struct uart_port *port, int ch)
static void atmel_console_write(struct console *co, const char *s, u_int count)
{
struct uart_port *port = &atmel_ports[co->index].uart;
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned int status, imr;
unsigned int pdc_tx;
@@ -1341,7 +1480,7 @@ static void atmel_console_write(struct console *co, const char *s, u_int count)
* First, save IMR and then disable interrupts
*/
imr = UART_GET_IMR(port);
- UART_PUT_IDR(port, ATMEL_US_RXRDY | ATMEL_US_TXRDY);
+ UART_PUT_IDR(port, ATMEL_US_RXRDY | atmel_port->tx_done_mask);
/* Store PDC transmit status and disable it */
pdc_tx = UART_GET_PTSR(port) & ATMEL_PDC_TXTEN;
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c
index c88f8ad3ff8..e57fb3d228e 100644
--- a/drivers/serial/bfin_sport_uart.c
+++ b/drivers/serial/bfin_sport_uart.c
@@ -34,32 +34,12 @@
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
+#include <asm/bfin_sport.h>
#include <asm/delay.h>
#include <asm/portmux.h>
#include "bfin_sport_uart.h"
-#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
-unsigned short bfin_uart_pin_req_sport0[] =
- {P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, \
- P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0};
-#endif
-#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
-unsigned short bfin_uart_pin_req_sport1[] =
- {P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, \
- P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0};
-#endif
-#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
-unsigned short bfin_uart_pin_req_sport2[] =
- {P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS, \
- P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0};
-#endif
-#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART
-unsigned short bfin_uart_pin_req_sport3[] =
- {P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS, \
- P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0};
-#endif
-
struct sport_uart_port {
struct uart_port port;
int err_irq;
@@ -69,9 +49,13 @@ struct sport_uart_port {
unsigned short txmask2;
unsigned char stopb;
/* unsigned char parib; */
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS
+ int cts_pin;
+ int rts_pin;
+#endif
};
-static void sport_uart_tx_chars(struct sport_uart_port *up);
+static int sport_uart_tx_chars(struct sport_uart_port *up);
static void sport_stop_tx(struct uart_port *port);
static inline void tx_one_byte(struct sport_uart_port *up, unsigned int value)
@@ -219,6 +203,59 @@ static irqreturn_t sport_uart_err_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS
+static unsigned int sport_get_mctrl(struct uart_port *port)
+{
+ struct sport_uart_port *up = (struct sport_uart_port *)port;
+ if (up->cts_pin < 0)
+ return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+
+ /* CTS PIN is negative assertive. */
+ if (SPORT_UART_GET_CTS(up))
+ return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+ else
+ return TIOCM_DSR | TIOCM_CAR;
+}
+
+static void sport_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct sport_uart_port *up = (struct sport_uart_port *)port;
+ if (up->rts_pin < 0)
+ return;
+
+ /* RTS PIN is negative assertive. */
+ if (mctrl & TIOCM_RTS)
+ SPORT_UART_ENABLE_RTS(up);
+ else
+ SPORT_UART_DISABLE_RTS(up);
+}
+
+/*
+ * Handle any change of modem status signal.
+ */
+static irqreturn_t sport_mctrl_cts_int(int irq, void *dev_id)
+{
+ struct sport_uart_port *up = (struct sport_uart_port *)dev_id;
+ unsigned int status;
+
+ status = sport_get_mctrl(&up->port);
+ uart_handle_cts_change(&up->port, status & TIOCM_CTS);
+
+ return IRQ_HANDLED;
+}
+#else
+static unsigned int sport_get_mctrl(struct uart_port *port)
+{
+ pr_debug("%s enter\n", __func__);
+ return TIOCM_CTS | TIOCM_CD | TIOCM_DSR;
+}
+
+static void sport_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ pr_debug("%s enter\n", __func__);
+}
+#endif
+
/* Reqeust IRQ, Setup clock */
static int sport_startup(struct uart_port *port)
{
@@ -247,6 +284,21 @@ static int sport_startup(struct uart_port *port)
goto fail2;
}
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS
+ if (up->cts_pin >= 0) {
+ if (request_irq(gpio_to_irq(up->cts_pin),
+ sport_mctrl_cts_int,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_DISABLED, "BFIN_SPORT_UART_CTS", up)) {
+ up->cts_pin = -1;
+ dev_info(port->dev, "Unable to attach BlackFin UART \
+ over SPORT CTS interrupt. So, disable it.\n");
+ }
+ }
+ if (up->rts_pin >= 0)
+ gpio_direction_output(up->rts_pin, 0);
+#endif
+
return 0;
fail2:
free_irq(up->port.irq+1, up);
@@ -256,23 +308,35 @@ static int sport_startup(struct uart_port *port)
return ret;
}
-static void sport_uart_tx_chars(struct sport_uart_port *up)
+/*
+ * sport_uart_tx_chars
+ *
+ * ret 1 means need to enable sport.
+ * ret 0 means do nothing.
+ */
+static int sport_uart_tx_chars(struct sport_uart_port *up)
{
struct circ_buf *xmit = &up->port.state->xmit;
if (SPORT_GET_STAT(up) & TXF)
- return;
+ return 0;
if (up->port.x_char) {
tx_one_byte(up, up->port.x_char);
up->port.icount.tx++;
up->port.x_char = 0;
- return;
+ return 1;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
- sport_stop_tx(&up->port);
- return;
+ /* The waiting loop to stop SPORT TX from TX interrupt is
+ * too long. This may block SPORT RX interrupts and cause
+ * RX FIFO overflow. So, do stop sport TX only after the last
+ * char in TX FIFO is moved into the shift register.
+ */
+ if (SPORT_GET_STAT(up) & TXHRE)
+ sport_stop_tx(&up->port);
+ return 0;
}
while(!(SPORT_GET_STAT(up) & TXF) && !uart_circ_empty(xmit)) {
@@ -283,6 +347,8 @@ static void sport_uart_tx_chars(struct sport_uart_port *up)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&up->port);
+
+ return 1;
}
static unsigned int sport_tx_empty(struct uart_port *port)
@@ -298,23 +364,15 @@ static unsigned int sport_tx_empty(struct uart_port *port)
return 0;
}
-static unsigned int sport_get_mctrl(struct uart_port *port)
-{
- pr_debug("%s enter\n", __func__);
- return (TIOCM_CTS | TIOCM_CD | TIOCM_DSR);
-}
-
-static void sport_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
- pr_debug("%s enter\n", __func__);
-}
-
static void sport_stop_tx(struct uart_port *port)
{
struct sport_uart_port *up = (struct sport_uart_port *)port;
pr_debug("%s enter\n", __func__);
+ if (!(SPORT_GET_TCR1(up) & TSPEN))
+ return;
+
/* Although the hold register is empty, last byte is still in shift
* register and not sent out yet. So, put a dummy data into TX FIFO.
* Then, sport tx stops when last byte is shift out and the dummy
@@ -337,11 +395,12 @@ static void sport_start_tx(struct uart_port *port)
pr_debug("%s enter\n", __func__);
/* Write data into SPORT FIFO before enable SPROT to transmit */
- sport_uart_tx_chars(up);
+ if (sport_uart_tx_chars(up)) {
+ /* Enable transmit, then an interrupt will generated */
+ SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN));
+ SSYNC();
+ }
- /* Enable transmit, then an interrupt will generated */
- SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN));
- SSYNC();
pr_debug("%s exit\n", __func__);
}
@@ -379,6 +438,10 @@ static void sport_shutdown(struct uart_port *port)
free_irq(up->port.irq, up);
free_irq(up->port.irq+1, up);
free_irq(up->err_irq, up);
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS
+ if (up->cts_pin >= 0)
+ free_irq(gpio_to_irq(up->cts_pin), up);
+#endif
}
static const char *sport_type(struct uart_port *port)
@@ -448,27 +511,14 @@ static void sport_set_termios(struct uart_port *port,
/* up->parib = 1; */
}
- port->read_status_mask = OE;
- if (termios->c_iflag & INPCK)
- port->read_status_mask |= (FE | PE);
- if (termios->c_iflag & (BRKINT | PARMRK))
- port->read_status_mask |= BI;
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ port->read_status_mask = 0;
/*
* Characters to ignore
*/
port->ignore_status_mask = 0;
- if (termios->c_iflag & IGNPAR)
- port->ignore_status_mask |= FE | PE;
- if (termios->c_iflag & IGNBRK) {
- port->ignore_status_mask |= BI;
- /*
- * If we're ignoring parity and break indicators,
- * ignore overruns too (for real raw support).
- */
- if (termios->c_iflag & IGNPAR)
- port->ignore_status_mask |= OE;
- }
/* RX extract mask */
up->rxmask = 0x01 | (((up->csize + up->stopb) * 2 - 1) << 0x8);
@@ -488,8 +538,6 @@ static void sport_set_termios(struct uart_port *port,
/* uart baud rate */
port->uartclk = uart_get_baud_rate(port, termios, old, 0, get_sclk()/16);
- spin_lock_irqsave(&up->port.lock, flags);
-
/* Disable UART */
SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN);
SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) & ~RSPEN);
@@ -542,6 +590,8 @@ struct uart_ops sport_uart_ops = {
static struct sport_uart_port *bfin_sport_uart_ports[BFIN_SPORT_UART_MAX_PORTS];
#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
+#define CLASS_BFIN_SPORT_CONSOLE "bfin-sport-console"
+
static int __init
sport_uart_console_setup(struct console *co, char *options)
{
@@ -549,7 +599,11 @@ sport_uart_console_setup(struct console *co, char *options)
int baud = 57600;
int bits = 8;
int parity = 'n';
+# ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS
+ int flow = 'r';
+# else
int flow = 'n';
+# endif
/* Check whether an invalid uart number has been specified */
if (co->index < 0 || co->index >= BFIN_SPORT_UART_MAX_PORTS)
@@ -690,11 +744,11 @@ static int __devinit sport_uart_probe(struct platform_device *pdev)
if (bfin_sport_uart_ports[pdev->id] == NULL) {
bfin_sport_uart_ports[pdev->id] =
- kmalloc(sizeof(struct sport_uart_port), GFP_KERNEL);
+ kzalloc(sizeof(struct sport_uart_port), GFP_KERNEL);
sport = bfin_sport_uart_ports[pdev->id];
if (!sport) {
dev_err(&pdev->dev,
- "Fail to kmalloc sport_uart_port\n");
+ "Fail to malloc sport_uart_port\n");
return -ENOMEM;
}
@@ -720,13 +774,13 @@ static int __devinit sport_uart_probe(struct platform_device *pdev)
goto out_error_free_peripherals;
}
- sport->port.membase = ioremap(res->start,
- res->end - res->start);
+ sport->port.membase = ioremap(res->start, resource_size(res));
if (!sport->port.membase) {
dev_err(&pdev->dev, "Cannot map sport IO\n");
ret = -ENXIO;
goto out_error_free_peripherals;
}
+ sport->port.mapbase = res->start;
sport->port.irq = platform_get_irq(pdev, 0);
if (sport->port.irq < 0) {
@@ -741,6 +795,22 @@ static int __devinit sport_uart_probe(struct platform_device *pdev)
ret = -ENOENT;
goto out_error_unmap;
}
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (res == NULL)
+ sport->cts_pin = -1;
+ else
+ sport->cts_pin = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 1);
+ if (res == NULL)
+ sport->rts_pin = -1;
+ else
+ sport->rts_pin = res->start;
+
+ if (sport->rts_pin >= 0)
+ gpio_request(sport->rts_pin, DRV_NAME);
+#endif
}
#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
@@ -779,6 +849,10 @@ static int __devexit sport_uart_remove(struct platform_device *pdev)
if (sport) {
uart_remove_one_port(&sport_uart_reg, &sport->port);
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
+ if (sport->rts_pin >= 0)
+ gpio_free(sport->rts_pin);
+#endif
iounmap(sport->port.membase);
peripheral_free_list(
(unsigned short *)pdev->dev.platform_data);
@@ -802,7 +876,7 @@ static struct platform_driver sport_uart_driver = {
#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
static __initdata struct early_platform_driver early_sport_uart_driver = {
- .class_str = DRV_NAME,
+ .class_str = CLASS_BFIN_SPORT_CONSOLE,
.pdrv = &sport_uart_driver,
.requested_id = EARLY_PLATFORM_ID_UNSET,
};
@@ -811,7 +885,8 @@ static int __init sport_uart_rs_console_init(void)
{
early_platform_driver_register(&early_sport_uart_driver, DRV_NAME);
- early_platform_driver_probe(DRV_NAME, BFIN_SPORT_UART_MAX_PORTS, 0);
+ early_platform_driver_probe(CLASS_BFIN_SPORT_CONSOLE,
+ BFIN_SPORT_UART_MAX_PORTS, 0);
register_console(&sport_uart_console);
@@ -824,7 +899,7 @@ static int __init sport_uart_init(void)
{
int ret;
- pr_info("Serial: Blackfin uart over sport driver\n");
+ pr_info("Blackfin uart over sport driver\n");
ret = uart_register_driver(&sport_uart_reg);
if (ret) {
diff --git a/drivers/serial/bfin_sport_uart.h b/drivers/serial/bfin_sport_uart.h
index abe03614e4d..9ce253e381d 100644
--- a/drivers/serial/bfin_sport_uart.h
+++ b/drivers/serial/bfin_sport_uart.h
@@ -37,7 +37,21 @@
#define SPORT_GET_TFSDIV(sport) bfin_read16(((sport)->port.membase + OFFSET_TFSDIV))
#define SPORT_GET_TX(sport) bfin_read16(((sport)->port.membase + OFFSET_TX))
#define SPORT_GET_RX(sport) bfin_read16(((sport)->port.membase + OFFSET_RX))
-#define SPORT_GET_RX32(sport) bfin_read32(((sport)->port.membase + OFFSET_RX))
+/*
+ * If another interrupt fires while doing a 32-bit read from RX FIFO,
+ * a fake RX underflow error will be generated. So disable interrupts
+ * to prevent interruption while reading the FIFO.
+ */
+#define SPORT_GET_RX32(sport) \
+({ \
+ unsigned int __ret; \
+ if (ANOMALY_05000473) \
+ local_irq_disable(); \
+ __ret = bfin_read32((sport)->port.membase + OFFSET_RX); \
+ if (ANOMALY_05000473) \
+ local_irq_enable(); \
+ __ret; \
+})
#define SPORT_GET_RCR1(sport) bfin_read16(((sport)->port.membase + OFFSET_RCR1))
#define SPORT_GET_RCR2(sport) bfin_read16(((sport)->port.membase + OFFSET_RCR2))
#define SPORT_GET_RCLKDIV(sport) bfin_read16(((sport)->port.membase + OFFSET_RCLKDIV))
@@ -58,4 +72,15 @@
#define SPORT_TX_FIFO_SIZE 8
+#define SPORT_UART_GET_CTS(x) gpio_get_value(x->cts_pin)
+#define SPORT_UART_DISABLE_RTS(x) gpio_set_value(x->rts_pin, 1)
+#define SPORT_UART_ENABLE_RTS(x) gpio_set_value(x->rts_pin, 0)
+
+#if defined(CONFIG_SERIAL_BFIN_SPORT0_UART_CTSRTS) \
+ || defined(CONFIG_SERIAL_BFIN_SPORT1_UART_CTSRTS) \
+ || defined(CONFIG_SERIAL_BFIN_SPORT2_UART_CTSRTS) \
+ || defined(CONFIG_SERIAL_BFIN_SPORT3_UART_CTSRTS)
+# define CONFIG_SERIAL_BFIN_SPORT_CTSRTS
+#endif
+
#endif /* _BFIN_SPORT_UART_H */
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c
index 300cea768d7..9eb62a256e9 100644
--- a/drivers/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/serial/cpm_uart/cpm_uart_core.c
@@ -1342,7 +1342,7 @@ static int __devinit cpm_uart_probe(struct of_device *ofdev,
/* initialize the device pointer for the port */
pinfo->port.dev = &ofdev->dev;
- ret = cpm_uart_init_port(ofdev->node, pinfo);
+ ret = cpm_uart_init_port(ofdev->dev.of_node, pinfo);
if (ret)
return ret;
@@ -1372,8 +1372,11 @@ static struct of_device_id cpm_uart_match[] = {
};
static struct of_platform_driver cpm_uart_driver = {
- .name = "cpm_uart",
- .match_table = cpm_uart_match,
+ .driver = {
+ .name = "cpm_uart",
+ .owner = THIS_MODULE,
+ .of_match_table = cpm_uart_match,
+ },
.probe = cpm_uart_probe,
.remove = cpm_uart_remove,
};
diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
index eadc1ab6bbc..a9a94ae7234 100644
--- a/drivers/serial/kgdboc.c
+++ b/drivers/serial/kgdboc.c
@@ -14,7 +14,9 @@
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/kgdb.h>
+#include <linux/kdb.h>
#include <linux/tty.h>
+#include <linux/console.h>
#define MAX_CONFIG_LEN 40
@@ -32,6 +34,40 @@ static struct kparam_string kps = {
static struct tty_driver *kgdb_tty_driver;
static int kgdb_tty_line;
+#ifdef CONFIG_KDB_KEYBOARD
+static int kgdboc_register_kbd(char **cptr)
+{
+ if (strncmp(*cptr, "kbd", 3) == 0) {
+ if (kdb_poll_idx < KDB_POLL_FUNC_MAX) {
+ kdb_poll_funcs[kdb_poll_idx] = kdb_get_kbd_char;
+ kdb_poll_idx++;
+ if (cptr[0][3] == ',')
+ *cptr += 4;
+ else
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static void kgdboc_unregister_kbd(void)
+{
+ int i;
+
+ for (i = 0; i < kdb_poll_idx; i++) {
+ if (kdb_poll_funcs[i] == kdb_get_kbd_char) {
+ kdb_poll_idx--;
+ kdb_poll_funcs[i] = kdb_poll_funcs[kdb_poll_idx];
+ kdb_poll_funcs[kdb_poll_idx] = NULL;
+ i--;
+ }
+ }
+}
+#else /* ! CONFIG_KDB_KEYBOARD */
+#define kgdboc_register_kbd(x) 0
+#define kgdboc_unregister_kbd()
+#endif /* ! CONFIG_KDB_KEYBOARD */
+
static int kgdboc_option_setup(char *opt)
{
if (strlen(opt) > MAX_CONFIG_LEN) {
@@ -45,25 +81,51 @@ static int kgdboc_option_setup(char *opt)
__setup("kgdboc=", kgdboc_option_setup);
+static void cleanup_kgdboc(void)
+{
+ kgdboc_unregister_kbd();
+ if (configured == 1)
+ kgdb_unregister_io_module(&kgdboc_io_ops);
+}
+
static int configure_kgdboc(void)
{
struct tty_driver *p;
int tty_line = 0;
int err;
+ char *cptr = config;
+ struct console *cons;
err = kgdboc_option_setup(config);
if (err || !strlen(config) || isspace(config[0]))
goto noconfig;
err = -ENODEV;
+ kgdboc_io_ops.is_console = 0;
+ kgdb_tty_driver = NULL;
- p = tty_find_polling_driver(config, &tty_line);
+ if (kgdboc_register_kbd(&cptr))
+ goto do_register;
+
+ p = tty_find_polling_driver(cptr, &tty_line);
if (!p)
goto noconfig;
+ cons = console_drivers;
+ while (cons) {
+ int idx;
+ if (cons->device && cons->device(cons, &idx) == p &&
+ idx == tty_line) {
+ kgdboc_io_ops.is_console = 1;
+ break;
+ }
+ cons = cons->next;
+ }
+
kgdb_tty_driver = p;
kgdb_tty_line = tty_line;
+do_register:
err = kgdb_register_io_module(&kgdboc_io_ops);
if (err)
goto noconfig;
@@ -75,6 +137,7 @@ static int configure_kgdboc(void)
noconfig:
config[0] = 0;
configured = 0;
+ cleanup_kgdboc();
return err;
}
@@ -88,20 +151,18 @@ static int __init init_kgdboc(void)
return configure_kgdboc();
}
-static void cleanup_kgdboc(void)
-{
- if (configured == 1)
- kgdb_unregister_io_module(&kgdboc_io_ops);
-}
-
static int kgdboc_get_char(void)
{
+ if (!kgdb_tty_driver)
+ return -1;
return kgdb_tty_driver->ops->poll_get_char(kgdb_tty_driver,
kgdb_tty_line);
}
static void kgdboc_put_char(u8 chr)
{
+ if (!kgdb_tty_driver)
+ return;
kgdb_tty_driver->ops->poll_put_char(kgdb_tty_driver,
kgdb_tty_line, chr);
}
@@ -162,6 +223,25 @@ static struct kgdb_io kgdboc_io_ops = {
.post_exception = kgdboc_post_exp_handler,
};
+#ifdef CONFIG_KGDB_SERIAL_CONSOLE
+/* This is only available if kgdboc is a built in for early debugging */
+int __init kgdboc_early_init(char *opt)
+{
+ /* save the first character of the config string because the
+ * init routine can destroy it.
+ */
+ char save_ch;
+
+ kgdboc_option_setup(opt);
+ save_ch = config[0];
+ init_kgdboc();
+ config[0] = save_ch;
+ return 0;
+}
+
+early_param("ekgdboc", kgdboc_early_init);
+#endif /* CONFIG_KGDB_SERIAL_CONSOLE */
+
module_init(init_kgdboc);
module_exit(cleanup_kgdboc);
module_param_call(kgdboc, param_set_kgdboc_var, param_get_string, &kps, 0644);
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 02469c31bf0..84a35f69901 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -397,34 +397,10 @@ static unsigned long mpc512x_getuartclk(void *p)
return mpc5xxx_get_bus_frequency(p);
}
-#define DEFAULT_FIFO_SIZE 16
-
-static unsigned int __init get_fifo_size(struct device_node *np,
- char *fifo_name)
-{
- const unsigned int *fp;
-
- fp = of_get_property(np, fifo_name, NULL);
- if (fp)
- return *fp;
-
- pr_warning("no %s property in %s node, defaulting to %d\n",
- fifo_name, np->full_name, DEFAULT_FIFO_SIZE);
-
- return DEFAULT_FIFO_SIZE;
-}
-
-#define FIFOC(_base) ((struct mpc512x_psc_fifo __iomem *) \
- ((u32)(_base) + sizeof(struct mpc52xx_psc)))
-
/* Init PSC FIFO Controller */
static int __init mpc512x_psc_fifoc_init(void)
{
struct device_node *np;
- void __iomem *psc;
- unsigned int tx_fifo_size;
- unsigned int rx_fifo_size;
- int fifobase = 0; /* current fifo address in 32 bit words */
np = of_find_compatible_node(NULL, NULL,
"fsl,mpc5121-psc-fifo");
@@ -447,51 +423,6 @@ static int __init mpc512x_psc_fifoc_init(void)
return -ENODEV;
}
- for_each_compatible_node(np, NULL, "fsl,mpc5121-psc-uart") {
- tx_fifo_size = get_fifo_size(np, "fsl,tx-fifo-size");
- rx_fifo_size = get_fifo_size(np, "fsl,rx-fifo-size");
-
- /* size in register is in 4 byte units */
- tx_fifo_size /= 4;
- rx_fifo_size /= 4;
- if (!tx_fifo_size)
- tx_fifo_size = 1;
- if (!rx_fifo_size)
- rx_fifo_size = 1;
-
- psc = of_iomap(np, 0);
- if (!psc) {
- pr_err("%s: Can't map %s device\n",
- __func__, np->full_name);
- continue;
- }
-
- /* FIFO space is 4KiB, check if requested size is available */
- if ((fifobase + tx_fifo_size + rx_fifo_size) > 0x1000) {
- pr_err("%s: no fifo space available for %s\n",
- __func__, np->full_name);
- iounmap(psc);
- /*
- * chances are that another device requests less
- * fifo space, so we continue.
- */
- continue;
- }
- /* set tx and rx fifo size registers */
- out_be32(&FIFOC(psc)->txsz, (fifobase << 16) | tx_fifo_size);
- fifobase += tx_fifo_size;
- out_be32(&FIFOC(psc)->rxsz, (fifobase << 16) | rx_fifo_size);
- fifobase += rx_fifo_size;
-
- /* reset and enable the slices */
- out_be32(&FIFOC(psc)->txcmd, 0x80);
- out_be32(&FIFOC(psc)->txcmd, 0x01);
- out_be32(&FIFOC(psc)->rxcmd, 0x80);
- out_be32(&FIFOC(psc)->rxcmd, 0x01);
-
- iounmap(psc);
- }
-
return 0;
}
@@ -1295,14 +1226,14 @@ mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match)
/* Check validity & presence */
for (idx = 0; idx < MPC52xx_PSC_MAXNUM; idx++)
- if (mpc52xx_uart_nodes[idx] == op->node)
+ if (mpc52xx_uart_nodes[idx] == op->dev.of_node)
break;
if (idx >= MPC52xx_PSC_MAXNUM)
return -EINVAL;
pr_debug("Found %s assigned to ttyPSC%x\n",
mpc52xx_uart_nodes[idx]->full_name, idx);
- uartclk = psc_ops->getuartclk(op->node);
+ uartclk = psc_ops->getuartclk(op->dev.of_node);
if (uartclk == 0) {
dev_dbg(&op->dev, "Could not find uart clock frequency!\n");
return -EINVAL;
@@ -1322,7 +1253,7 @@ mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match)
port->dev = &op->dev;
/* Search for IRQ and mapbase */
- ret = of_address_to_resource(op->node, 0, &res);
+ ret = of_address_to_resource(op->dev.of_node, 0, &res);
if (ret)
return ret;
@@ -1332,7 +1263,7 @@ mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match)
return -EINVAL;
}
- psc_ops->get_irq(port, op->node);
+ psc_ops->get_irq(port, op->dev.of_node);
if (port->irq == NO_IRQ) {
dev_dbg(&op->dev, "Could not get irq\n");
return -EINVAL;
@@ -1431,15 +1362,16 @@ mpc52xx_uart_of_enumerate(void)
MODULE_DEVICE_TABLE(of, mpc52xx_uart_of_match);
static struct of_platform_driver mpc52xx_uart_of_driver = {
- .match_table = mpc52xx_uart_of_match,
.probe = mpc52xx_uart_of_probe,
.remove = mpc52xx_uart_of_remove,
#ifdef CONFIG_PM
.suspend = mpc52xx_uart_of_suspend,
.resume = mpc52xx_uart_of_resume,
#endif
- .driver = {
- .name = "mpc52xx-psc-uart",
+ .driver = {
+ .name = "mpc52xx-psc-uart",
+ .owner = THIS_MODULE,
+ .of_match_table = mpc52xx_uart_of_match,
},
};
diff --git a/drivers/serial/mpsc.c b/drivers/serial/mpsc.c
index 55e113a0be0..6a9c6605666 100644
--- a/drivers/serial/mpsc.c
+++ b/drivers/serial/mpsc.c
@@ -2071,6 +2071,7 @@ static int mpsc_drv_probe(struct platform_device *dev)
if (!(rc = mpsc_drv_map_regs(pi, dev))) {
mpsc_drv_get_platform_data(pi, dev, dev->id);
+ pi->port.dev = &dev->dev;
if (!(rc = mpsc_make_ready(pi))) {
spin_lock_init(&pi->tx_lock);
diff --git a/drivers/serial/nwpserial.c b/drivers/serial/nwpserial.c
index e1ab8ec0a4a..3c02fa96f28 100644
--- a/drivers/serial/nwpserial.c
+++ b/drivers/serial/nwpserial.c
@@ -344,7 +344,7 @@ int nwpserial_register_port(struct uart_port *port)
mutex_lock(&nwpserial_mutex);
- dn = to_of_device(port->dev)->node;
+ dn = to_of_device(port->dev)->dev.of_node;
if (dn == NULL)
goto out;
diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c
index 4abfebdb0fc..a48d9080f55 100644
--- a/drivers/serial/of_serial.c
+++ b/drivers/serial/of_serial.c
@@ -31,7 +31,7 @@ static int __devinit of_platform_serial_setup(struct of_device *ofdev,
int type, struct uart_port *port)
{
struct resource resource;
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
const unsigned int *clk, *spd;
const u32 *prop;
int ret, prop_size;
@@ -88,7 +88,7 @@ static int __devinit of_platform_serial_probe(struct of_device *ofdev,
int port_type;
int ret;
- if (of_find_property(ofdev->node, "used-by-rtas", NULL))
+ if (of_find_property(ofdev->dev.of_node, "used-by-rtas", NULL))
return -EBUSY;
info = kmalloc(sizeof(*info), GFP_KERNEL);
@@ -175,11 +175,13 @@ static struct of_device_id __devinitdata of_platform_serial_table[] = {
};
static struct of_platform_driver of_platform_serial_driver = {
- .owner = THIS_MODULE,
- .name = "of_serial",
+ .driver = {
+ .name = "of_serial",
+ .owner = THIS_MODULE,
+ .of_match_table = of_platform_serial_table,
+ },
.probe = of_platform_serial_probe,
.remove = of_platform_serial_remove,
- .match_table = of_platform_serial_table,
};
static int __init of_platform_serial_init(void)
diff --git a/drivers/serial/pmac_zilog.c b/drivers/serial/pmac_zilog.c
index 700e10833bf..cabbdc7ba58 100644
--- a/drivers/serial/pmac_zilog.c
+++ b/drivers/serial/pmac_zilog.c
@@ -1611,7 +1611,7 @@ static int pmz_attach(struct macio_dev *mdev, const struct of_device_id *match)
/* Iterate the pmz_ports array to find a matching entry
*/
for (i = 0; i < MAX_ZS_PORTS; i++)
- if (pmz_ports[i].node == mdev->ofdev.node) {
+ if (pmz_ports[i].node == mdev->ofdev.dev.of_node) {
struct uart_pmac_port *uap = &pmz_ports[i];
uap->dev = mdev;
diff --git a/drivers/serial/s5pv210.c b/drivers/serial/s5pv210.c
index 8dc03837617..4a789e5361a 100644
--- a/drivers/serial/s5pv210.c
+++ b/drivers/serial/s5pv210.c
@@ -119,7 +119,7 @@ static int s5p_serial_probe(struct platform_device *pdev)
return s3c24xx_serial_probe(pdev, s5p_uart_inf[pdev->id]);
}
-static struct platform_driver s5p_serial_drv = {
+static struct platform_driver s5p_serial_driver = {
.probe = s5p_serial_probe,
.remove = __devexit_p(s3c24xx_serial_remove),
.driver = {
@@ -130,19 +130,19 @@ static struct platform_driver s5p_serial_drv = {
static int __init s5pv210_serial_console_init(void)
{
- return s3c24xx_serial_initconsole(&s5p_serial_drv, s5p_uart_inf);
+ return s3c24xx_serial_initconsole(&s5p_serial_driver, s5p_uart_inf);
}
console_initcall(s5pv210_serial_console_init);
static int __init s5p_serial_init(void)
{
- return s3c24xx_serial_init(&s5p_serial_drv, *s5p_uart_inf);
+ return s3c24xx_serial_init(&s5p_serial_driver, *s5p_uart_inf);
}
static void __exit s5p_serial_exit(void)
{
- platform_driver_unregister(&s5p_serial_drv);
+ platform_driver_unregister(&s5p_serial_driver);
}
module_init(s5p_serial_init);
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index 8cfa5b12ea7..dadd686c980 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -89,7 +89,6 @@ struct serial_info {
int manfid;
int prodid;
int c950ctrl;
- dev_node_t node[4];
int line[4];
const struct serial_quirk *quirk;
};
@@ -289,8 +288,6 @@ static void serial_remove(struct pcmcia_device *link)
for (i = 0; i < info->ndev; i++)
serial8250_unregister_port(info->line[i]);
- info->p_dev->dev_node = NULL;
-
if (!info->slave)
pcmcia_disable_device(link);
}
@@ -343,7 +340,6 @@ static int serial_probe(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts1 = 8;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->conf.Attributes = CONF_ENABLE_IRQ;
if (do_sound) {
link->conf.Attributes |= CONF_ENABLE_SPKR;
@@ -411,11 +407,6 @@ static int setup_serial(struct pcmcia_device *handle, struct serial_info * info,
}
info->line[info->ndev] = line;
- sprintf(info->node[info->ndev].dev_name, "ttyS%d", line);
- info->node[info->ndev].major = TTY_MAJOR;
- info->node[info->ndev].minor = 0x40 + line;
- if (info->ndev > 0)
- info->node[info->ndev - 1].next = &info->node[info->ndev];
info->ndev++;
return 0;
@@ -486,7 +477,7 @@ static int simple_config(struct pcmcia_device *link)
}
if (info->slave) {
return setup_serial(link, info, port,
- link->irq.AssignedIRQ);
+ link->irq);
}
}
@@ -507,10 +498,6 @@ static int simple_config(struct pcmcia_device *link)
return -1;
found_port:
- i = pcmcia_request_irq(link, &link->irq);
- if (i != 0)
- link->irq.AssignedIRQ = 0;
-
if (info->multi && (info->manfid == MANFID_3COM))
link->conf.ConfigIndex &= ~(0x08);
@@ -523,7 +510,7 @@ found_port:
i = pcmcia_request_configuration(link, &link->conf);
if (i != 0)
return -1;
- return setup_serial(link, info, link->io.BasePort1, link->irq.AssignedIRQ);
+ return setup_serial(link, info, link->io.BasePort1, link->irq);
}
static int multi_config_check(struct pcmcia_device *p_dev,
@@ -586,13 +573,9 @@ static int multi_config(struct pcmcia_device *link)
}
}
- i = pcmcia_request_irq(link, &link->irq);
- if (i != 0) {
- /* FIXME: comment does not fit, error handling does not fit */
- printk(KERN_NOTICE
- "serial_cs: no usable port range found, giving up\n");
- link->irq.AssignedIRQ = 0;
- }
+ if (!link->irq)
+ dev_warn(&link->dev,
+ "serial_cs: no usable IRQ found, continuing...\n");
/*
* Apply any configuration quirks.
@@ -615,11 +598,11 @@ static int multi_config(struct pcmcia_device *link)
if (link->conf.ConfigIndex == 1 ||
link->conf.ConfigIndex == 3) {
err = setup_serial(link, info, base2,
- link->irq.AssignedIRQ);
+ link->irq);
base2 = link->io.BasePort1;
} else {
err = setup_serial(link, info, link->io.BasePort1,
- link->irq.AssignedIRQ);
+ link->irq);
}
info->c950ctrl = base2;
@@ -633,10 +616,10 @@ static int multi_config(struct pcmcia_device *link)
return 0;
}
- setup_serial(link, info, link->io.BasePort1, link->irq.AssignedIRQ);
+ setup_serial(link, info, link->io.BasePort1, link->irq);
for (i = 0; i < info->multi - 1; i++)
setup_serial(link, info, base2 + (8 * i),
- link->irq.AssignedIRQ);
+ link->irq);
return 0;
}
@@ -720,7 +703,6 @@ static int serial_config(struct pcmcia_device * link)
if (info->quirk->post(link))
goto failed;
- link->dev_node = &info->node[0];
return 0;
failed:
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 8eb094c1f61..5f90fcd7d10 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -83,16 +83,16 @@ struct sci_port {
/* Interface clock */
struct clk *iclk;
- /* Data clock */
- struct clk *dclk;
+ /* Function clock */
+ struct clk *fclk;
struct list_head node;
struct dma_chan *chan_tx;
struct dma_chan *chan_rx;
#ifdef CONFIG_SERIAL_SH_SCI_DMA
struct device *dma_dev;
- enum sh_dmae_slave_chan_id slave_tx;
- enum sh_dmae_slave_chan_id slave_rx;
+ unsigned int slave_tx;
+ unsigned int slave_rx;
struct dma_async_tx_descriptor *desc_tx;
struct dma_async_tx_descriptor *desc_rx[2];
dma_cookie_t cookie_tx;
@@ -107,6 +107,7 @@ struct sci_port {
struct work_struct work_tx;
struct work_struct work_rx;
struct timer_list rx_timer;
+ unsigned int rx_timeout;
#endif
};
@@ -150,7 +151,11 @@ static int sci_poll_get_char(struct uart_port *port)
handle_error(port);
continue;
}
- } while (!(status & SCxSR_RDxF(port)));
+ break;
+ } while (1);
+
+ if (!(status & SCxSR_RDxF(port)))
+ return NO_POLL_CHAR;
c = sci_in(port, SCxRDR);
@@ -674,22 +679,22 @@ static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
struct sci_port *s = to_sci_port(port);
if (s->chan_rx) {
- unsigned long tout;
u16 scr = sci_in(port, SCSCR);
u16 ssr = sci_in(port, SCxSR);
/* Disable future Rx interrupts */
- sci_out(port, SCSCR, scr & ~SCI_CTRL_FLAGS_RIE);
+ if (port->type == PORT_SCIFA) {
+ disable_irq_nosync(irq);
+ scr |= 0x4000;
+ } else {
+ scr &= ~SCI_CTRL_FLAGS_RIE;
+ }
+ sci_out(port, SCSCR, scr);
/* Clear current interrupt */
sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port)));
- /* Calculate delay for 1.5 DMA buffers */
- tout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
- port->fifosize / 2;
- dev_dbg(port->dev, "Rx IRQ: setup timeout in %lu ms\n",
- tout * 1000 / HZ);
- if (tout < 2)
- tout = 2;
- mod_timer(&s->rx_timer, jiffies + tout);
+ dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u jiffies\n",
+ jiffies, s->rx_timeout);
+ mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
return IRQ_HANDLED;
}
@@ -799,7 +804,7 @@ static int sci_notifier(struct notifier_block *self,
(phase == CPUFREQ_RESUMECHANGE)) {
spin_lock_irqsave(&priv->lock, flags);
list_for_each_entry(sci_port, &priv->ports, node)
- sci_port->port.uartclk = clk_get_rate(sci_port->dclk);
+ sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
spin_unlock_irqrestore(&priv->lock, flags);
}
@@ -810,21 +815,17 @@ static void sci_clk_enable(struct uart_port *port)
{
struct sci_port *sci_port = to_sci_port(port);
- clk_enable(sci_port->dclk);
- sci_port->port.uartclk = clk_get_rate(sci_port->dclk);
-
- if (sci_port->iclk)
- clk_enable(sci_port->iclk);
+ clk_enable(sci_port->iclk);
+ sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
+ clk_enable(sci_port->fclk);
}
static void sci_clk_disable(struct uart_port *port)
{
struct sci_port *sci_port = to_sci_port(port);
- if (sci_port->iclk)
- clk_disable(sci_port->iclk);
-
- clk_disable(sci_port->dclk);
+ clk_disable(sci_port->fclk);
+ clk_disable(sci_port->iclk);
}
static int sci_request_irq(struct sci_port *port)
@@ -913,22 +914,26 @@ static void sci_dma_tx_complete(void *arg)
spin_lock_irqsave(&port->lock, flags);
- xmit->tail += s->sg_tx.length;
+ xmit->tail += sg_dma_len(&s->sg_tx);
xmit->tail &= UART_XMIT_SIZE - 1;
- port->icount.tx += s->sg_tx.length;
+ port->icount.tx += sg_dma_len(&s->sg_tx);
async_tx_ack(s->desc_tx);
s->cookie_tx = -EINVAL;
s->desc_tx = NULL;
- spin_unlock_irqrestore(&port->lock, flags);
-
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
- if (uart_circ_chars_pending(xmit))
+ if (!uart_circ_empty(xmit)) {
schedule_work(&s->work_tx);
+ } else if (port->type == PORT_SCIFA) {
+ u16 ctrl = sci_in(port, SCSCR);
+ sci_out(port, SCSCR, ctrl & ~SCI_CTRL_FLAGS_TIE);
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
}
/* Locking: called with port lock held */
@@ -972,13 +977,13 @@ static void sci_dma_rx_complete(void *arg)
unsigned long flags;
int count;
- dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
+ dev_dbg(port->dev, "%s(%d) active #%d\n", __func__, port->line, s->active_rx);
spin_lock_irqsave(&port->lock, flags);
count = sci_dma_rx_push(s, tty, s->buf_len_rx);
- mod_timer(&s->rx_timer, jiffies + msecs_to_jiffies(5));
+ mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
spin_unlock_irqrestore(&port->lock, flags);
@@ -999,8 +1004,9 @@ static void sci_rx_dma_release(struct sci_port *s, bool enable_pio)
s->chan_rx = NULL;
s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL;
dma_release_channel(chan);
- dma_free_coherent(port->dev, s->buf_len_rx * 2,
- sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0]));
+ if (sg_dma_address(&s->sg_rx[0]))
+ dma_free_coherent(port->dev, s->buf_len_rx * 2,
+ sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0]));
if (enable_pio)
sci_start_rx(port);
}
@@ -1050,6 +1056,8 @@ static void sci_submit_rx(struct sci_port *s)
sci_rx_dma_release(s, true);
return;
}
+ dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__,
+ s->cookie_rx[i], i);
}
s->active_rx = s->cookie_rx[0];
@@ -1084,7 +1092,7 @@ static void work_fn_rx(struct work_struct *work)
unsigned long flags;
int count;
- chan->device->device_terminate_all(chan);
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
dev_dbg(port->dev, "Read %u bytes with cookie %d\n",
sh_desc->partial, sh_desc->cookie);
@@ -1107,10 +1115,10 @@ static void work_fn_rx(struct work_struct *work)
return;
}
- dev_dbg(port->dev, "%s: cookie %d #%d\n", __func__,
- s->cookie_rx[new], new);
-
s->active_rx = s->cookie_rx[!new];
+
+ dev_dbg(port->dev, "%s: cookie %d #%d, new active #%d\n", __func__,
+ s->cookie_rx[new], new, s->active_rx);
}
static void work_fn_tx(struct work_struct *work)
@@ -1131,14 +1139,13 @@ static void work_fn_tx(struct work_struct *work)
*/
spin_lock_irq(&port->lock);
sg->offset = xmit->tail & (UART_XMIT_SIZE - 1);
- sg->dma_address = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
+ sg_dma_address(sg) = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
sg->offset;
- sg->length = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
+ sg_dma_len(sg) = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
- sg->dma_length = sg->length;
spin_unlock_irq(&port->lock);
- BUG_ON(!sg->length);
+ BUG_ON(!sg_dma_len(sg));
desc = chan->device->device_prep_slave_sg(chan,
sg, s->sg_len_tx, DMA_TO_DEVICE,
@@ -1173,23 +1180,28 @@ static void work_fn_tx(struct work_struct *work)
static void sci_start_tx(struct uart_port *port)
{
+ struct sci_port *s = to_sci_port(port);
unsigned short ctrl;
#ifdef CONFIG_SERIAL_SH_SCI_DMA
- struct sci_port *s = to_sci_port(port);
-
- if (s->chan_tx) {
- if (!uart_circ_empty(&s->port.state->xmit) && s->cookie_tx < 0)
- schedule_work(&s->work_tx);
-
- return;
+ if (port->type == PORT_SCIFA) {
+ u16 new, scr = sci_in(port, SCSCR);
+ if (s->chan_tx)
+ new = scr | 0x8000;
+ else
+ new = scr & ~0x8000;
+ if (new != scr)
+ sci_out(port, SCSCR, new);
}
+ if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
+ s->cookie_tx < 0)
+ schedule_work(&s->work_tx);
#endif
-
- /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
- ctrl = sci_in(port, SCSCR);
- ctrl |= SCI_CTRL_FLAGS_TIE;
- sci_out(port, SCSCR, ctrl);
+ if (!s->chan_tx || port->type == PORT_SCIFA) {
+ /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
+ ctrl = sci_in(port, SCSCR);
+ sci_out(port, SCSCR, ctrl | SCI_CTRL_FLAGS_TIE);
+ }
}
static void sci_stop_tx(struct uart_port *port)
@@ -1198,6 +1210,8 @@ static void sci_stop_tx(struct uart_port *port)
/* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
ctrl = sci_in(port, SCSCR);
+ if (port->type == PORT_SCIFA)
+ ctrl &= ~0x8000;
ctrl &= ~SCI_CTRL_FLAGS_TIE;
sci_out(port, SCSCR, ctrl);
}
@@ -1208,6 +1222,8 @@ static void sci_start_rx(struct uart_port *port)
/* Set RIE (Receive Interrupt Enable) bit in SCSCR */
ctrl |= sci_in(port, SCSCR);
+ if (port->type == PORT_SCIFA)
+ ctrl &= ~0x4000;
sci_out(port, SCSCR, ctrl);
}
@@ -1217,6 +1233,8 @@ static void sci_stop_rx(struct uart_port *port)
/* Clear RIE (Receive Interrupt Enable) bit in SCSCR */
ctrl = sci_in(port, SCSCR);
+ if (port->type == PORT_SCIFA)
+ ctrl &= ~0x4000;
ctrl &= ~(SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE);
sci_out(port, SCSCR, ctrl);
}
@@ -1251,8 +1269,12 @@ static void rx_timer_fn(unsigned long arg)
{
struct sci_port *s = (struct sci_port *)arg;
struct uart_port *port = &s->port;
-
u16 scr = sci_in(port, SCSCR);
+
+ if (port->type == PORT_SCIFA) {
+ scr &= ~0x4000;
+ enable_irq(s->irqs[1]);
+ }
sci_out(port, SCSCR, scr | SCI_CTRL_FLAGS_RIE);
dev_dbg(port->dev, "DMA Rx timed out\n");
schedule_work(&s->work_rx);
@@ -1339,8 +1361,7 @@ static void sci_request_dma(struct uart_port *port)
sg_init_table(sg, 1);
sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx,
(int)buf[i] & ~PAGE_MASK);
- sg->dma_address = dma[i];
- sg->dma_length = sg->length;
+ sg_dma_address(sg) = dma[i];
}
INIT_WORK(&s->work_rx, work_fn_rx);
@@ -1403,8 +1424,12 @@ static void sci_shutdown(struct uart_port *port)
static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
+#ifdef CONFIG_SERIAL_SH_SCI_DMA
+ struct sci_port *s = to_sci_port(port);
+#endif
unsigned int status, baud, smr_val, max_baud;
int t = -1;
+ u16 scfcr = 0;
/*
* earlyprintk comes here early on with port->uartclk set to zero.
@@ -1427,7 +1452,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
if (port->type != PORT_SCI)
- sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
+ sci_out(port, SCFCR, scfcr | SCFCR_RFRST | SCFCR_TFRST);
smr_val = sci_in(port, SCSMR) & 3;
if ((termios->c_cflag & CSIZE) == CS7)
@@ -1458,10 +1483,32 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
}
sci_init_pins(port, termios->c_cflag);
- sci_out(port, SCFCR, (termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0);
+ sci_out(port, SCFCR, scfcr | ((termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0));
sci_out(port, SCSCR, SCSCR_INIT(port));
+#ifdef CONFIG_SERIAL_SH_SCI_DMA
+ /*
+ * Calculate delay for 1.5 DMA buffers: see
+ * drivers/serial/serial_core.c::uart_update_timeout(). With 10 bits
+ * (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above function
+ * calculates 1 jiffie for the data plus 5 jiffies for the "slop(e)."
+ * Then below we calculate 3 jiffies (12ms) for 1.5 DMA buffers (3 FIFO
+ * sizes), but it has been found out experimentally, that this is not
+ * enough: the driver too often needlessly runs on a DMA timeout. 20ms
+ * as a minimum seem to work perfectly.
+ */
+ if (s->chan_rx) {
+ s->rx_timeout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
+ port->fifosize / 2;
+ dev_dbg(port->dev,
+ "DMA Rx t-out %ums, tty t-out %u jiffies\n",
+ s->rx_timeout * 1000 / HZ, port->timeout);
+ if (s->rx_timeout < msecs_to_jiffies(20))
+ s->rx_timeout = msecs_to_jiffies(20);
+ }
+#endif
+
if ((termios->c_cflag & CREAD) != 0)
sci_start_rx(port);
}
@@ -1553,10 +1600,10 @@ static struct uart_ops sci_uart_ops = {
#endif
};
-static void __devinit sci_init_single(struct platform_device *dev,
- struct sci_port *sci_port,
- unsigned int index,
- struct plat_sci_port *p)
+static int __devinit sci_init_single(struct platform_device *dev,
+ struct sci_port *sci_port,
+ unsigned int index,
+ struct plat_sci_port *p)
{
struct uart_port *port = &sci_port->port;
@@ -1577,8 +1624,23 @@ static void __devinit sci_init_single(struct platform_device *dev,
}
if (dev) {
- sci_port->iclk = p->clk ? clk_get(&dev->dev, p->clk) : NULL;
- sci_port->dclk = clk_get(&dev->dev, "peripheral_clk");
+ sci_port->iclk = clk_get(&dev->dev, "sci_ick");
+ if (IS_ERR(sci_port->iclk)) {
+ sci_port->iclk = clk_get(&dev->dev, "peripheral_clk");
+ if (IS_ERR(sci_port->iclk)) {
+ dev_err(&dev->dev, "can't get iclk\n");
+ return PTR_ERR(sci_port->iclk);
+ }
+ }
+
+ /*
+ * The function clock is optional, ignore it if we can't
+ * find it.
+ */
+ sci_port->fclk = clk_get(&dev->dev, "sci_fck");
+ if (IS_ERR(sci_port->fclk))
+ sci_port->fclk = NULL;
+
sci_port->enable = sci_clk_enable;
sci_port->disable = sci_clk_disable;
port->dev = &dev->dev;
@@ -1605,6 +1667,7 @@ static void __devinit sci_init_single(struct platform_device *dev,
#endif
memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs));
+ return 0;
}
#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
@@ -1754,8 +1817,11 @@ static int sci_remove(struct platform_device *dev)
cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER);
spin_lock_irqsave(&priv->lock, flags);
- list_for_each_entry(p, &priv->ports, node)
+ list_for_each_entry(p, &priv->ports, node) {
uart_remove_one_port(&sci_uart_driver, &p->port);
+ clk_put(p->iclk);
+ clk_put(p->fclk);
+ }
spin_unlock_irqrestore(&priv->lock, flags);
kfree(priv);
@@ -1781,7 +1847,9 @@ static int __devinit sci_probe_single(struct platform_device *dev,
return 0;
}
- sci_init_single(dev, sciport, index, p);
+ ret = sci_init_single(dev, sciport, index, p);
+ if (ret)
+ return ret;
ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
if (ret)
diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c
index d14cca7fb88..890f9174296 100644
--- a/drivers/serial/sunhv.c
+++ b/drivers/serial/sunhv.c
@@ -565,7 +565,7 @@ static int __devinit hv_probe(struct of_device *op, const struct of_device_id *m
if (err)
goto out_free_con_read_page;
- sunserial_console_match(&sunhv_console, op->node,
+ sunserial_console_match(&sunhv_console, op->dev.of_node,
&sunhv_reg, port->line, false);
err = uart_add_one_port(&sunhv_reg, port);
@@ -630,8 +630,11 @@ static const struct of_device_id hv_match[] = {
MODULE_DEVICE_TABLE(of, hv_match);
static struct of_platform_driver hv_driver = {
- .name = "hv",
- .match_table = hv_match,
+ .driver = {
+ .name = "hv",
+ .owner = THIS_MODULE,
+ .of_match_table = hv_match,
+ },
.probe = hv_probe,
.remove = __devexit_p(hv_remove),
};
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index d2e0321049e..5e81bc6b48b 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -883,7 +883,7 @@ static int sunsab_console_setup(struct console *con, char *options)
printk("Console: ttyS%d (SAB82532)\n",
(sunsab_reg.minor - 64) + con->index);
- sunserial_console_termios(con, to_of_device(up->port.dev)->node);
+ sunserial_console_termios(con, to_of_device(up->port.dev)->dev.of_node);
switch (con->cflag & CBAUD) {
case B150: baud = 150; break;
@@ -1026,11 +1026,11 @@ static int __devinit sab_probe(struct of_device *op, const struct of_device_id *
if (err)
goto out1;
- sunserial_console_match(SUNSAB_CONSOLE(), op->node,
+ sunserial_console_match(SUNSAB_CONSOLE(), op->dev.of_node,
&sunsab_reg, up[0].port.line,
false);
- sunserial_console_match(SUNSAB_CONSOLE(), op->node,
+ sunserial_console_match(SUNSAB_CONSOLE(), op->dev.of_node,
&sunsab_reg, up[1].port.line,
false);
@@ -1093,8 +1093,11 @@ static const struct of_device_id sab_match[] = {
MODULE_DEVICE_TABLE(of, sab_match);
static struct of_platform_driver sab_driver = {
- .name = "sab",
- .match_table = sab_match,
+ .driver = {
+ .name = "sab",
+ .owner = THIS_MODULE,
+ .of_match_table = sab_match,
+ },
.probe = sab_probe,
.remove = __devexit_p(sab_remove),
};
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
index 01f7731e59b..234459c2f01 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/serial/sunsu.c
@@ -1200,7 +1200,7 @@ static int __devinit sunsu_kbd_ms_init(struct uart_sunsu_port *up)
return -ENODEV;
printk("%s: %s port at %llx, irq %u\n",
- to_of_device(up->port.dev)->node->full_name,
+ to_of_device(up->port.dev)->dev.of_node->full_name,
(up->su_type == SU_PORT_KBD) ? "Keyboard" : "Mouse",
(unsigned long long) up->port.mapbase,
up->port.irq);
@@ -1352,7 +1352,7 @@ static int __init sunsu_console_setup(struct console *co, char *options)
spin_lock_init(&port->lock);
/* Get firmware console settings. */
- sunserial_console_termios(co, to_of_device(port->dev)->node);
+ sunserial_console_termios(co, to_of_device(port->dev)->dev.of_node);
memset(&termios, 0, sizeof(struct ktermios));
termios.c_cflag = co->cflag;
@@ -1409,7 +1409,7 @@ static enum su_type __devinit su_get_type(struct device_node *dp)
static int __devinit su_probe(struct of_device *op, const struct of_device_id *match)
{
static int inst;
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
struct uart_sunsu_port *up;
struct resource *rp;
enum su_type type;
@@ -1539,8 +1539,11 @@ static const struct of_device_id su_match[] = {
MODULE_DEVICE_TABLE(of, su_match);
static struct of_platform_driver su_driver = {
- .name = "su",
- .match_table = su_match,
+ .driver = {
+ .name = "su",
+ .owner = THIS_MODULE,
+ .of_match_table = su_match,
+ },
.probe = su_probe,
.remove = __devexit_p(su_remove),
};
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index 2c7a66af4f5..f9a24f4ebb3 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -102,6 +102,8 @@ struct uart_sunzilog_port {
#endif
};
+static void sunzilog_putchar(struct uart_port *port, int ch);
+
#define ZILOG_CHANNEL_FROM_PORT(PORT) ((struct zilog_channel __iomem *)((PORT)->membase))
#define UART_ZILOG(PORT) ((struct uart_sunzilog_port *)(PORT))
@@ -996,6 +998,50 @@ static int sunzilog_verify_port(struct uart_port *port, struct serial_struct *se
return -EINVAL;
}
+#ifdef CONFIG_CONSOLE_POLL
+static int sunzilog_get_poll_char(struct uart_port *port)
+{
+ unsigned char ch, r1;
+ struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port;
+ struct zilog_channel __iomem *channel
+ = ZILOG_CHANNEL_FROM_PORT(&up->port);
+
+
+ r1 = read_zsreg(channel, R1);
+ if (r1 & (PAR_ERR | Rx_OVR | CRC_ERR)) {
+ writeb(ERR_RES, &channel->control);
+ ZSDELAY();
+ ZS_WSYNC(channel);
+ }
+
+ ch = readb(&channel->control);
+ ZSDELAY();
+
+ /* This funny hack depends upon BRK_ABRT not interfering
+ * with the other bits we care about in R1.
+ */
+ if (ch & BRK_ABRT)
+ r1 |= BRK_ABRT;
+
+ if (!(ch & Rx_CH_AV))
+ return NO_POLL_CHAR;
+
+ ch = readb(&channel->data);
+ ZSDELAY();
+
+ ch &= up->parity_mask;
+ return ch;
+}
+
+static void sunzilog_put_poll_char(struct uart_port *port,
+ unsigned char ch)
+{
+ struct uart_sunzilog_port *up = (struct uart_sunzilog_port *)port;
+
+ sunzilog_putchar(&up->port, ch);
+}
+#endif /* CONFIG_CONSOLE_POLL */
+
static struct uart_ops sunzilog_pops = {
.tx_empty = sunzilog_tx_empty,
.set_mctrl = sunzilog_set_mctrl,
@@ -1013,6 +1059,10 @@ static struct uart_ops sunzilog_pops = {
.request_port = sunzilog_request_port,
.config_port = sunzilog_config_port,
.verify_port = sunzilog_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = sunzilog_get_poll_char,
+ .poll_put_char = sunzilog_put_poll_char,
+#endif
};
static int uart_chip_count;
@@ -1180,7 +1230,7 @@ static int __init sunzilog_console_setup(struct console *con, char *options)
(sunzilog_reg.minor - 64) + con->index, con->index);
/* Get firmware console settings. */
- sunserial_console_termios(con, to_of_device(up->port.dev)->node);
+ sunserial_console_termios(con, to_of_device(up->port.dev)->dev.of_node);
/* Firmware console speed is limited to 150-->38400 baud so
* this hackish cflag thing is OK.
@@ -1358,7 +1408,7 @@ static int __devinit zs_probe(struct of_device *op, const struct of_device_id *m
int keyboard_mouse = 0;
int err;
- if (of_find_property(op->node, "keyboard", NULL))
+ if (of_find_property(op->dev.of_node, "keyboard", NULL))
keyboard_mouse = 1;
/* uarts must come before keyboards/mice */
@@ -1415,7 +1465,7 @@ static int __devinit zs_probe(struct of_device *op, const struct of_device_id *m
sunzilog_init_hw(&up[1]);
if (!keyboard_mouse) {
- if (sunserial_console_match(SUNZILOG_CONSOLE(), op->node,
+ if (sunserial_console_match(SUNZILOG_CONSOLE(), op->dev.of_node,
&sunzilog_reg, up[0].port.line,
false))
up->flags |= SUNZILOG_FLAG_IS_CONS;
@@ -1425,7 +1475,7 @@ static int __devinit zs_probe(struct of_device *op, const struct of_device_id *m
rp, sizeof(struct zilog_layout));
return err;
}
- if (sunserial_console_match(SUNZILOG_CONSOLE(), op->node,
+ if (sunserial_console_match(SUNZILOG_CONSOLE(), op->dev.of_node,
&sunzilog_reg, up[1].port.line,
false))
up->flags |= SUNZILOG_FLAG_IS_CONS;
@@ -1491,8 +1541,11 @@ static const struct of_device_id zs_match[] = {
MODULE_DEVICE_TABLE(of, zs_match);
static struct of_platform_driver zs_driver = {
- .name = "zs",
- .match_table = zs_match,
+ .driver = {
+ .name = "zs",
+ .owner = THIS_MODULE,
+ .of_match_table = zs_match,
+ },
.probe = zs_probe,
.remove = __devexit_p(zs_remove),
};
diff --git a/drivers/serial/timbuart.c b/drivers/serial/timbuart.c
index 786ba85c170..67ca642713b 100644
--- a/drivers/serial/timbuart.c
+++ b/drivers/serial/timbuart.c
@@ -68,12 +68,22 @@ static void timbuart_start_tx(struct uart_port *port)
tasklet_schedule(&uart->tasklet);
}
+static unsigned int timbuart_tx_empty(struct uart_port *port)
+{
+ u32 isr = ioread32(port->membase + TIMBUART_ISR);
+
+ return (isr & TXBE) ? TIOCSER_TEMT : 0;
+}
+
static void timbuart_flush_buffer(struct uart_port *port)
{
- u8 ctl = ioread8(port->membase + TIMBUART_CTRL) | TIMBUART_CTRL_FLSHTX;
+ if (!timbuart_tx_empty(port)) {
+ u8 ctl = ioread8(port->membase + TIMBUART_CTRL) |
+ TIMBUART_CTRL_FLSHTX;
- iowrite8(ctl, port->membase + TIMBUART_CTRL);
- iowrite32(TXBF, port->membase + TIMBUART_ISR);
+ iowrite8(ctl, port->membase + TIMBUART_CTRL);
+ iowrite32(TXBF, port->membase + TIMBUART_ISR);
+ }
}
static void timbuart_rx_chars(struct uart_port *port)
@@ -195,13 +205,6 @@ void timbuart_tasklet(unsigned long arg)
dev_dbg(uart->port.dev, "%s leaving\n", __func__);
}
-static unsigned int timbuart_tx_empty(struct uart_port *port)
-{
- u32 isr = ioread32(port->membase + TIMBUART_ISR);
-
- return (isr & TXBE) ? TIOCSER_TEMT : 0;
-}
-
static unsigned int timbuart_get_mctrl(struct uart_port *port)
{
u8 cts = ioread8(port->membase + TIMBUART_CTRL);
@@ -220,7 +223,7 @@ static void timbuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
if (mctrl & TIOCM_RTS)
iowrite8(TIMBUART_CTRL_RTS, port->membase + TIMBUART_CTRL);
else
- iowrite8(TIMBUART_CTRL_RTS, port->membase + TIMBUART_CTRL);
+ iowrite8(0, port->membase + TIMBUART_CTRL);
}
static void timbuart_mctrl_check(struct uart_port *port, u32 isr, u32 *ier)
diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c
index f0a6c61b17f..8acccd56437 100644
--- a/drivers/serial/uartlite.c
+++ b/drivers/serial/uartlite.c
@@ -86,7 +86,7 @@ static int ulite_receive(struct uart_port *port, int stat)
/* stats */
if (stat & ULITE_STATUS_RXVALID) {
port->icount.rx++;
- ch = readb(port->membase + ULITE_RX);
+ ch = ioread32be(port->membase + ULITE_RX);
if (stat & ULITE_STATUS_PARITY)
port->icount.parity++;
@@ -131,7 +131,7 @@ static int ulite_transmit(struct uart_port *port, int stat)
return 0;
if (port->x_char) {
- writeb(port->x_char, port->membase + ULITE_TX);
+ iowrite32be(port->x_char, port->membase + ULITE_TX);
port->x_char = 0;
port->icount.tx++;
return 1;
@@ -140,7 +140,7 @@ static int ulite_transmit(struct uart_port *port, int stat)
if (uart_circ_empty(xmit) || uart_tx_stopped(port))
return 0;
- writeb(xmit->buf[xmit->tail], port->membase + ULITE_TX);
+ iowrite32be(xmit->buf[xmit->tail], port->membase + ULITE_TX);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE-1);
port->icount.tx++;
@@ -157,7 +157,7 @@ static irqreturn_t ulite_isr(int irq, void *dev_id)
int busy, n = 0;
do {
- int stat = readb(port->membase + ULITE_STATUS);
+ int stat = ioread32be(port->membase + ULITE_STATUS);
busy = ulite_receive(port, stat);
busy |= ulite_transmit(port, stat);
n++;
@@ -178,7 +178,7 @@ static unsigned int ulite_tx_empty(struct uart_port *port)
unsigned int ret;
spin_lock_irqsave(&port->lock, flags);
- ret = readb(port->membase + ULITE_STATUS);
+ ret = ioread32be(port->membase + ULITE_STATUS);
spin_unlock_irqrestore(&port->lock, flags);
return ret & ULITE_STATUS_TXEMPTY ? TIOCSER_TEMT : 0;
@@ -201,7 +201,7 @@ static void ulite_stop_tx(struct uart_port *port)
static void ulite_start_tx(struct uart_port *port)
{
- ulite_transmit(port, readb(port->membase + ULITE_STATUS));
+ ulite_transmit(port, ioread32be(port->membase + ULITE_STATUS));
}
static void ulite_stop_rx(struct uart_port *port)
@@ -230,17 +230,17 @@ static int ulite_startup(struct uart_port *port)
if (ret)
return ret;
- writeb(ULITE_CONTROL_RST_RX | ULITE_CONTROL_RST_TX,
+ iowrite32be(ULITE_CONTROL_RST_RX | ULITE_CONTROL_RST_TX,
port->membase + ULITE_CONTROL);
- writeb(ULITE_CONTROL_IE, port->membase + ULITE_CONTROL);
+ iowrite32be(ULITE_CONTROL_IE, port->membase + ULITE_CONTROL);
return 0;
}
static void ulite_shutdown(struct uart_port *port)
{
- writeb(0, port->membase + ULITE_CONTROL);
- readb(port->membase + ULITE_CONTROL); /* dummy */
+ iowrite32be(0, port->membase + ULITE_CONTROL);
+ ioread32be(port->membase + ULITE_CONTROL); /* dummy */
free_irq(port->irq, port);
}
@@ -352,7 +352,7 @@ static void ulite_console_wait_tx(struct uart_port *port)
/* Spin waiting for TX fifo to have space available */
for (i = 0; i < 100000; i++) {
- val = readb(port->membase + ULITE_STATUS);
+ val = ioread32be(port->membase + ULITE_STATUS);
if ((val & ULITE_STATUS_TXFULL) == 0)
break;
cpu_relax();
@@ -362,7 +362,7 @@ static void ulite_console_wait_tx(struct uart_port *port)
static void ulite_console_putchar(struct uart_port *port, int ch)
{
ulite_console_wait_tx(port);
- writeb(ch, port->membase + ULITE_TX);
+ iowrite32be(ch, port->membase + ULITE_TX);
}
static void ulite_console_write(struct console *co, const char *s,
@@ -379,8 +379,8 @@ static void ulite_console_write(struct console *co, const char *s,
spin_lock_irqsave(&port->lock, flags);
/* save and disable interrupt */
- ier = readb(port->membase + ULITE_STATUS) & ULITE_STATUS_IE;
- writeb(0, port->membase + ULITE_CONTROL);
+ ier = ioread32be(port->membase + ULITE_STATUS) & ULITE_STATUS_IE;
+ iowrite32be(0, port->membase + ULITE_CONTROL);
uart_console_write(port, s, count, ulite_console_putchar);
@@ -388,7 +388,7 @@ static void ulite_console_write(struct console *co, const char *s,
/* restore interrupt state */
if (ier)
- writeb(ULITE_CONTROL_IE, port->membase + ULITE_CONTROL);
+ iowrite32be(ULITE_CONTROL_IE, port->membase + ULITE_CONTROL);
if (locked)
spin_unlock_irqrestore(&port->lock, flags);
@@ -591,17 +591,17 @@ ulite_of_probe(struct of_device *op, const struct of_device_id *match)
dev_dbg(&op->dev, "%s(%p, %p)\n", __func__, op, match);
- rc = of_address_to_resource(op->node, 0, &res);
+ rc = of_address_to_resource(op->dev.of_node, 0, &res);
if (rc) {
dev_err(&op->dev, "invalid address\n");
return rc;
}
- irq = irq_of_parse_and_map(op->node, 0);
+ irq = irq_of_parse_and_map(op->dev.of_node, 0);
- id = of_get_property(op->node, "port-number", NULL);
+ id = of_get_property(op->dev.of_node, "port-number", NULL);
- return ulite_assign(&op->dev, id ? *id : -1, res.start+3, irq);
+ return ulite_assign(&op->dev, id ? *id : -1, res.start, irq);
}
static int __devexit ulite_of_remove(struct of_device *op)
@@ -610,13 +610,12 @@ static int __devexit ulite_of_remove(struct of_device *op)
}
static struct of_platform_driver ulite_of_driver = {
- .owner = THIS_MODULE,
- .name = "uartlite",
- .match_table = ulite_of_match,
.probe = ulite_of_probe,
.remove = __devexit_p(ulite_of_remove),
.driver = {
.name = "uartlite",
+ .owner = THIS_MODULE,
+ .of_match_table = ulite_of_match,
},
};
diff --git a/drivers/serial/ucc_uart.c b/drivers/serial/ucc_uart.c
index 074904912f6..907b06f5c44 100644
--- a/drivers/serial/ucc_uart.c
+++ b/drivers/serial/ucc_uart.c
@@ -1197,7 +1197,7 @@ static void uart_firmware_cont(const struct firmware *fw, void *context)
static int ucc_uart_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
const unsigned int *iprop; /* Integer OF properties */
const char *sprop; /* String OF properties */
struct uart_qe_port *qe_port = NULL;
@@ -1486,9 +1486,11 @@ static struct of_device_id ucc_uart_match[] = {
MODULE_DEVICE_TABLE(of, ucc_uart_match);
static struct of_platform_driver ucc_uart_of_driver = {
- .owner = THIS_MODULE,
- .name = "ucc_uart",
- .match_table = ucc_uart_match,
+ .driver = {
+ .name = "ucc_uart",
+ .owner = THIS_MODULE,
+ .of_match_table = ucc_uart_match,
+ },
.probe = ucc_uart_probe,
.remove = ucc_uart_remove,
};
diff --git a/drivers/sfi/sfi_acpi.c b/drivers/sfi/sfi_acpi.c
index 34aba30eb84..f5b4ca58154 100644
--- a/drivers/sfi/sfi_acpi.c
+++ b/drivers/sfi/sfi_acpi.c
@@ -173,3 +173,44 @@ int sfi_acpi_table_parse(char *signature, char *oem_id, char *oem_table_id,
sfi_acpi_put_table(table);
return ret;
}
+
+static ssize_t sfi_acpi_table_show(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t offset, size_t count)
+{
+ struct sfi_table_attr *tbl_attr =
+ container_of(bin_attr, struct sfi_table_attr, attr);
+ struct acpi_table_header *th = NULL;
+ struct sfi_table_key key;
+ ssize_t cnt;
+
+ key.sig = tbl_attr->name;
+ key.oem_id = NULL;
+ key.oem_table_id = NULL;
+
+ th = sfi_acpi_get_table(&key);
+ if (!th)
+ return 0;
+
+ cnt = memory_read_from_buffer(buf, count, &offset,
+ th, th->length);
+ sfi_acpi_put_table(th);
+
+ return cnt;
+}
+
+
+void __init sfi_acpi_sysfs_init(void)
+{
+ u32 tbl_cnt, i;
+ struct sfi_table_attr *tbl_attr;
+
+ tbl_cnt = XSDT_GET_NUM_ENTRIES(xsdt_va, u64);
+ for (i = 0; i < tbl_cnt; i++) {
+ tbl_attr =
+ sfi_sysfs_install_table(xsdt_va->table_offset_entry[i]);
+ tbl_attr->attr.read = sfi_acpi_table_show;
+ }
+
+ return;
+}
diff --git a/drivers/sfi/sfi_core.c b/drivers/sfi/sfi_core.c
index b204a092913..00519595864 100644
--- a/drivers/sfi/sfi_core.c
+++ b/drivers/sfi/sfi_core.c
@@ -67,6 +67,7 @@
#include <linux/acpi.h>
#include <linux/init.h>
#include <linux/sfi.h>
+#include <linux/slab.h>
#include "sfi_core.h"
@@ -382,6 +383,102 @@ static __init int sfi_find_syst(void)
return -1;
}
+static struct kobject *sfi_kobj;
+static struct kobject *tables_kobj;
+
+static ssize_t sfi_table_show(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t offset, size_t count)
+{
+ struct sfi_table_attr *tbl_attr =
+ container_of(bin_attr, struct sfi_table_attr, attr);
+ struct sfi_table_header *th = NULL;
+ struct sfi_table_key key;
+ ssize_t cnt;
+
+ key.sig = tbl_attr->name;
+ key.oem_id = NULL;
+ key.oem_table_id = NULL;
+
+ if (strncmp(SFI_SIG_SYST, tbl_attr->name, SFI_SIGNATURE_SIZE)) {
+ th = sfi_get_table(&key);
+ if (!th)
+ return 0;
+
+ cnt = memory_read_from_buffer(buf, count, &offset,
+ th, th->len);
+ sfi_put_table(th);
+ } else
+ cnt = memory_read_from_buffer(buf, count, &offset,
+ syst_va, syst_va->header.len);
+
+ return cnt;
+}
+
+struct sfi_table_attr __init *sfi_sysfs_install_table(u64 pa)
+{
+ struct sfi_table_attr *tbl_attr;
+ struct sfi_table_header *th;
+ int ret;
+
+ tbl_attr = kzalloc(sizeof(struct sfi_table_attr), GFP_KERNEL);
+ if (!tbl_attr)
+ return NULL;
+
+ th = sfi_map_table(pa);
+ if (!th || !th->sig[0]) {
+ kfree(tbl_attr);
+ return NULL;
+ }
+
+ sysfs_attr_init(&tbl_attr->attr.attr);
+ memcpy(tbl_attr->name, th->sig, SFI_SIGNATURE_SIZE);
+
+ tbl_attr->attr.size = 0;
+ tbl_attr->attr.read = sfi_table_show;
+ tbl_attr->attr.attr.name = tbl_attr->name;
+ tbl_attr->attr.attr.mode = 0400;
+
+ ret = sysfs_create_bin_file(tables_kobj,
+ &tbl_attr->attr);
+ if (ret)
+ kfree(tbl_attr);
+
+ sfi_unmap_table(th);
+ return tbl_attr;
+}
+
+static int __init sfi_sysfs_init(void)
+{
+ int tbl_cnt, i;
+
+ if (sfi_disabled)
+ return 0;
+
+ sfi_kobj = kobject_create_and_add("sfi", firmware_kobj);
+ if (!sfi_kobj)
+ return 0;
+
+ tables_kobj = kobject_create_and_add("tables", sfi_kobj);
+ if (!tables_kobj) {
+ kobject_put(sfi_kobj);
+ return 0;
+ }
+
+ sfi_sysfs_install_table(syst_pa);
+
+ tbl_cnt = SFI_GET_NUM_ENTRIES(syst_va, u64);
+
+ for (i = 0; i < tbl_cnt; i++)
+ sfi_sysfs_install_table(syst_va->pentry[i]);
+
+ sfi_acpi_sysfs_init();
+ kobject_uevent(sfi_kobj, KOBJ_ADD);
+ kobject_uevent(tables_kobj, KOBJ_ADD);
+ pr_info("SFI sysfs interfaces init success\n");
+ return 0;
+}
+
void __init sfi_init(void)
{
if (!acpi_disabled)
@@ -390,7 +487,7 @@ void __init sfi_init(void)
if (sfi_disabled)
return;
- pr_info("Simple Firmware Interface v0.7 http://simplefirmware.org\n");
+ pr_info("Simple Firmware Interface v0.81 http://simplefirmware.org\n");
if (sfi_find_syst() || sfi_parse_syst() || sfi_platform_init())
disable_sfi();
@@ -414,3 +511,9 @@ void __init sfi_init_late(void)
sfi_acpi_init();
}
+
+/*
+ * The reason we put it here becasue we need wait till the /sys/firmware
+ * is setup, then our interface can be registered in /sys/firmware/sfi
+ */
+core_initcall(sfi_sysfs_init);
diff --git a/drivers/sfi/sfi_core.h b/drivers/sfi/sfi_core.h
index da82d39e104..b7cf220d44e 100644
--- a/drivers/sfi/sfi_core.h
+++ b/drivers/sfi/sfi_core.h
@@ -61,6 +61,12 @@ struct sfi_table_key{
char *oem_table_id;
};
+/* sysfs interface */
+struct sfi_table_attr {
+ struct bin_attribute attr;
+ char name[8];
+};
+
#define SFI_ANY_KEY { .sig = NULL, .oem_id = NULL, .oem_table_id = NULL }
extern int __init sfi_acpi_init(void);
@@ -68,3 +74,5 @@ extern struct sfi_table_header *sfi_check_table(u64 paddr,
struct sfi_table_key *key);
struct sfi_table_header *sfi_get_table(struct sfi_table_key *key);
extern void sfi_put_table(struct sfi_table_header *table);
+extern struct sfi_table_attr __init *sfi_sysfs_install_table(u64 pa);
+extern void __init sfi_acpi_sysfs_init(void);
diff --git a/drivers/sh/Kconfig b/drivers/sh/Kconfig
new file mode 100644
index 00000000000..a54de0b9b3d
--- /dev/null
+++ b/drivers/sh/Kconfig
@@ -0,0 +1,24 @@
+config INTC_USERIMASK
+ bool "Userspace interrupt masking support"
+ depends on ARCH_SHMOBILE || (SUPERH && CPU_SH4A)
+ help
+ This enables support for hardware-assisted userspace hardirq
+ masking.
+
+ SH-4A and newer interrupt blocks all support a special shadowed
+ page with all non-masking registers obscured when mapped in to
+ userspace. This is primarily for use by userspace device
+ drivers that are using special priority levels.
+
+ If in doubt, say N.
+
+config INTC_BALANCING
+ bool "Hardware IRQ balancing support"
+ depends on SMP && SUPERH && CPU_SUBTYPE_SH7786
+ help
+ This enables support for IRQ auto-distribution mode on SH-X3
+ SMP parts. All of the balancing and CPU wakeup decisions are
+ taken care of automatically by hardware for distributed
+ vectors.
+
+ If in doubt, say N.
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index 4956bf1f213..78bb5127abd 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -4,4 +4,6 @@
obj-$(CONFIG_SUPERHYWAY) += superhyway/
obj-$(CONFIG_MAPLE) += maple/
obj-$(CONFIG_GENERIC_GPIO) += pfc.o
+obj-$(CONFIG_SUPERH) += clk.o
+obj-$(CONFIG_SH_CLK_CPG) += clk-cpg.o
obj-y += intc.o
diff --git a/drivers/sh/clk-cpg.c b/drivers/sh/clk-cpg.c
new file mode 100644
index 00000000000..f5c80ba9ab1
--- /dev/null
+++ b/drivers/sh/clk-cpg.c
@@ -0,0 +1,298 @@
+#include <linux/clk.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/sh_clk.h>
+
+static int sh_clk_mstp32_enable(struct clk *clk)
+{
+ __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << clk->enable_bit),
+ clk->enable_reg);
+ return 0;
+}
+
+static void sh_clk_mstp32_disable(struct clk *clk)
+{
+ __raw_writel(__raw_readl(clk->enable_reg) | (1 << clk->enable_bit),
+ clk->enable_reg);
+}
+
+static struct clk_ops sh_clk_mstp32_clk_ops = {
+ .enable = sh_clk_mstp32_enable,
+ .disable = sh_clk_mstp32_disable,
+ .recalc = followparent_recalc,
+};
+
+int __init sh_clk_mstp32_register(struct clk *clks, int nr)
+{
+ struct clk *clkp;
+ int ret = 0;
+ int k;
+
+ for (k = 0; !ret && (k < nr); k++) {
+ clkp = clks + k;
+ clkp->ops = &sh_clk_mstp32_clk_ops;
+ ret |= clk_register(clkp);
+ }
+
+ return ret;
+}
+
+static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
+{
+ return clk_rate_table_round(clk, clk->freq_table, rate);
+}
+
+static int sh_clk_div6_divisors[64] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
+};
+
+static struct clk_div_mult_table sh_clk_div6_table = {
+ .divisors = sh_clk_div6_divisors,
+ .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
+};
+
+static unsigned long sh_clk_div6_recalc(struct clk *clk)
+{
+ struct clk_div_mult_table *table = &sh_clk_div6_table;
+ unsigned int idx;
+
+ clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
+ table, NULL);
+
+ idx = __raw_readl(clk->enable_reg) & 0x003f;
+
+ return clk->freq_table[idx].frequency;
+}
+
+static int sh_clk_div6_set_rate(struct clk *clk,
+ unsigned long rate, int algo_id)
+{
+ unsigned long value;
+ int idx;
+
+ idx = clk_rate_table_find(clk, clk->freq_table, rate);
+ if (idx < 0)
+ return idx;
+
+ value = __raw_readl(clk->enable_reg);
+ value &= ~0x3f;
+ value |= idx;
+ __raw_writel(value, clk->enable_reg);
+ return 0;
+}
+
+static int sh_clk_div6_enable(struct clk *clk)
+{
+ unsigned long value;
+ int ret;
+
+ ret = sh_clk_div6_set_rate(clk, clk->rate, 0);
+ if (ret == 0) {
+ value = __raw_readl(clk->enable_reg);
+ value &= ~0x100; /* clear stop bit to enable clock */
+ __raw_writel(value, clk->enable_reg);
+ }
+ return ret;
+}
+
+static void sh_clk_div6_disable(struct clk *clk)
+{
+ unsigned long value;
+
+ value = __raw_readl(clk->enable_reg);
+ value |= 0x100; /* stop clock */
+ value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */
+ __raw_writel(value, clk->enable_reg);
+}
+
+static struct clk_ops sh_clk_div6_clk_ops = {
+ .recalc = sh_clk_div6_recalc,
+ .round_rate = sh_clk_div_round_rate,
+ .set_rate = sh_clk_div6_set_rate,
+ .enable = sh_clk_div6_enable,
+ .disable = sh_clk_div6_disable,
+};
+
+int __init sh_clk_div6_register(struct clk *clks, int nr)
+{
+ struct clk *clkp;
+ void *freq_table;
+ int nr_divs = sh_clk_div6_table.nr_divisors;
+ int freq_table_size = sizeof(struct cpufreq_frequency_table);
+ int ret = 0;
+ int k;
+
+ freq_table_size *= (nr_divs + 1);
+ freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
+ if (!freq_table) {
+ pr_err("sh_clk_div6_register: unable to alloc memory\n");
+ return -ENOMEM;
+ }
+
+ for (k = 0; !ret && (k < nr); k++) {
+ clkp = clks + k;
+
+ clkp->ops = &sh_clk_div6_clk_ops;
+ clkp->id = -1;
+ clkp->freq_table = freq_table + (k * freq_table_size);
+ clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
+
+ ret = clk_register(clkp);
+ }
+
+ return ret;
+}
+
+static unsigned long sh_clk_div4_recalc(struct clk *clk)
+{
+ struct clk_div4_table *d4t = clk->priv;
+ struct clk_div_mult_table *table = d4t->div_mult_table;
+ unsigned int idx;
+
+ clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
+ table, &clk->arch_flags);
+
+ idx = (__raw_readl(clk->enable_reg) >> clk->enable_bit) & 0x000f;
+
+ return clk->freq_table[idx].frequency;
+}
+
+static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
+{
+ struct clk_div4_table *d4t = clk->priv;
+ struct clk_div_mult_table *table = d4t->div_mult_table;
+ u32 value;
+ int ret;
+
+ /* we really need a better way to determine parent index, but for
+ * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
+ * no CLK_ENABLE_ON_INIT means external clock...
+ */
+
+ if (parent->flags & CLK_ENABLE_ON_INIT)
+ value = __raw_readl(clk->enable_reg) & ~(1 << 7);
+ else
+ value = __raw_readl(clk->enable_reg) | (1 << 7);
+
+ ret = clk_reparent(clk, parent);
+ if (ret < 0)
+ return ret;
+
+ __raw_writel(value, clk->enable_reg);
+
+ /* Rebiuld the frequency table */
+ clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
+ table, &clk->arch_flags);
+
+ return 0;
+}
+
+static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate, int algo_id)
+{
+ struct clk_div4_table *d4t = clk->priv;
+ unsigned long value;
+ int idx = clk_rate_table_find(clk, clk->freq_table, rate);
+ if (idx < 0)
+ return idx;
+
+ value = __raw_readl(clk->enable_reg);
+ value &= ~(0xf << clk->enable_bit);
+ value |= (idx << clk->enable_bit);
+ __raw_writel(value, clk->enable_reg);
+
+ if (d4t->kick)
+ d4t->kick(clk);
+
+ return 0;
+}
+
+static int sh_clk_div4_enable(struct clk *clk)
+{
+ __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << 8), clk->enable_reg);
+ return 0;
+}
+
+static void sh_clk_div4_disable(struct clk *clk)
+{
+ __raw_writel(__raw_readl(clk->enable_reg) | (1 << 8), clk->enable_reg);
+}
+
+static struct clk_ops sh_clk_div4_clk_ops = {
+ .recalc = sh_clk_div4_recalc,
+ .set_rate = sh_clk_div4_set_rate,
+ .round_rate = sh_clk_div_round_rate,
+};
+
+static struct clk_ops sh_clk_div4_enable_clk_ops = {
+ .recalc = sh_clk_div4_recalc,
+ .set_rate = sh_clk_div4_set_rate,
+ .round_rate = sh_clk_div_round_rate,
+ .enable = sh_clk_div4_enable,
+ .disable = sh_clk_div4_disable,
+};
+
+static struct clk_ops sh_clk_div4_reparent_clk_ops = {
+ .recalc = sh_clk_div4_recalc,
+ .set_rate = sh_clk_div4_set_rate,
+ .round_rate = sh_clk_div_round_rate,
+ .enable = sh_clk_div4_enable,
+ .disable = sh_clk_div4_disable,
+ .set_parent = sh_clk_div4_set_parent,
+};
+
+static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
+ struct clk_div4_table *table, struct clk_ops *ops)
+{
+ struct clk *clkp;
+ void *freq_table;
+ int nr_divs = table->div_mult_table->nr_divisors;
+ int freq_table_size = sizeof(struct cpufreq_frequency_table);
+ int ret = 0;
+ int k;
+
+ freq_table_size *= (nr_divs + 1);
+ freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
+ if (!freq_table) {
+ pr_err("sh_clk_div4_register: unable to alloc memory\n");
+ return -ENOMEM;
+ }
+
+ for (k = 0; !ret && (k < nr); k++) {
+ clkp = clks + k;
+
+ clkp->ops = ops;
+ clkp->id = -1;
+ clkp->priv = table;
+
+ clkp->freq_table = freq_table + (k * freq_table_size);
+ clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
+
+ ret = clk_register(clkp);
+ }
+
+ return ret;
+}
+
+int __init sh_clk_div4_register(struct clk *clks, int nr,
+ struct clk_div4_table *table)
+{
+ return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops);
+}
+
+int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
+ struct clk_div4_table *table)
+{
+ return sh_clk_div4_register_ops(clks, nr, table,
+ &sh_clk_div4_enable_clk_ops);
+}
+
+int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
+ struct clk_div4_table *table)
+{
+ return sh_clk_div4_register_ops(clks, nr, table,
+ &sh_clk_div4_reparent_clk_ops);
+}
diff --git a/drivers/sh/clk.c b/drivers/sh/clk.c
new file mode 100644
index 00000000000..5d84adac9ec
--- /dev/null
+++ b/drivers/sh/clk.c
@@ -0,0 +1,545 @@
+/*
+ * drivers/sh/clk.c - SuperH clock framework
+ *
+ * Copyright (C) 2005 - 2009 Paul Mundt
+ *
+ * This clock framework is derived from the OMAP version by:
+ *
+ * Copyright (C) 2004 - 2008 Nokia Corporation
+ * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
+ *
+ * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/kobject.h>
+#include <linux/sysdev.h>
+#include <linux/seq_file.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/cpufreq.h>
+#include <linux/clk.h>
+#include <linux/sh_clk.h>
+
+static LIST_HEAD(clock_list);
+static DEFINE_SPINLOCK(clock_lock);
+static DEFINE_MUTEX(clock_list_sem);
+
+void clk_rate_table_build(struct clk *clk,
+ struct cpufreq_frequency_table *freq_table,
+ int nr_freqs,
+ struct clk_div_mult_table *src_table,
+ unsigned long *bitmap)
+{
+ unsigned long mult, div;
+ unsigned long freq;
+ int i;
+
+ for (i = 0; i < nr_freqs; i++) {
+ div = 1;
+ mult = 1;
+
+ if (src_table->divisors && i < src_table->nr_divisors)
+ div = src_table->divisors[i];
+
+ if (src_table->multipliers && i < src_table->nr_multipliers)
+ mult = src_table->multipliers[i];
+
+ if (!div || !mult || (bitmap && !test_bit(i, bitmap)))
+ freq = CPUFREQ_ENTRY_INVALID;
+ else
+ freq = clk->parent->rate * mult / div;
+
+ freq_table[i].index = i;
+ freq_table[i].frequency = freq;
+ }
+
+ /* Termination entry */
+ freq_table[i].index = i;
+ freq_table[i].frequency = CPUFREQ_TABLE_END;
+}
+
+long clk_rate_table_round(struct clk *clk,
+ struct cpufreq_frequency_table *freq_table,
+ unsigned long rate)
+{
+ unsigned long rate_error, rate_error_prev = ~0UL;
+ unsigned long rate_best_fit = rate;
+ unsigned long highest, lowest;
+ int i;
+
+ highest = lowest = 0;
+
+ for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ unsigned long freq = freq_table[i].frequency;
+
+ if (freq == CPUFREQ_ENTRY_INVALID)
+ continue;
+
+ if (freq > highest)
+ highest = freq;
+ if (freq < lowest)
+ lowest = freq;
+
+ rate_error = abs(freq - rate);
+ if (rate_error < rate_error_prev) {
+ rate_best_fit = freq;
+ rate_error_prev = rate_error;
+ }
+
+ if (rate_error == 0)
+ break;
+ }
+
+ if (rate >= highest)
+ rate_best_fit = highest;
+ if (rate <= lowest)
+ rate_best_fit = lowest;
+
+ return rate_best_fit;
+}
+
+int clk_rate_table_find(struct clk *clk,
+ struct cpufreq_frequency_table *freq_table,
+ unsigned long rate)
+{
+ int i;
+
+ for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ unsigned long freq = freq_table[i].frequency;
+
+ if (freq == CPUFREQ_ENTRY_INVALID)
+ continue;
+
+ if (freq == rate)
+ return i;
+ }
+
+ return -ENOENT;
+}
+
+/* Used for clocks that always have same value as the parent clock */
+unsigned long followparent_recalc(struct clk *clk)
+{
+ return clk->parent ? clk->parent->rate : 0;
+}
+
+int clk_reparent(struct clk *child, struct clk *parent)
+{
+ list_del_init(&child->sibling);
+ if (parent)
+ list_add(&child->sibling, &parent->children);
+ child->parent = parent;
+
+ /* now do the debugfs renaming to reattach the child
+ to the proper parent */
+
+ return 0;
+}
+
+/* Propagate rate to children */
+void propagate_rate(struct clk *tclk)
+{
+ struct clk *clkp;
+
+ list_for_each_entry(clkp, &tclk->children, sibling) {
+ if (clkp->ops && clkp->ops->recalc)
+ clkp->rate = clkp->ops->recalc(clkp);
+
+ propagate_rate(clkp);
+ }
+}
+
+static void __clk_disable(struct clk *clk)
+{
+ if (WARN(!clk->usecount, "Trying to disable clock %s with 0 usecount\n",
+ clk->name))
+ return;
+
+ if (!(--clk->usecount)) {
+ if (likely(clk->ops && clk->ops->disable))
+ clk->ops->disable(clk);
+ if (likely(clk->parent))
+ __clk_disable(clk->parent);
+ }
+}
+
+void clk_disable(struct clk *clk)
+{
+ unsigned long flags;
+
+ if (!clk)
+ return;
+
+ spin_lock_irqsave(&clock_lock, flags);
+ __clk_disable(clk);
+ spin_unlock_irqrestore(&clock_lock, flags);
+}
+EXPORT_SYMBOL_GPL(clk_disable);
+
+static int __clk_enable(struct clk *clk)
+{
+ int ret = 0;
+
+ if (clk->usecount++ == 0) {
+ if (clk->parent) {
+ ret = __clk_enable(clk->parent);
+ if (unlikely(ret))
+ goto err;
+ }
+
+ if (clk->ops && clk->ops->enable) {
+ ret = clk->ops->enable(clk);
+ if (ret) {
+ if (clk->parent)
+ __clk_disable(clk->parent);
+ goto err;
+ }
+ }
+ }
+
+ return ret;
+err:
+ clk->usecount--;
+ return ret;
+}
+
+int clk_enable(struct clk *clk)
+{
+ unsigned long flags;
+ int ret;
+
+ if (!clk)
+ return -EINVAL;
+
+ spin_lock_irqsave(&clock_lock, flags);
+ ret = __clk_enable(clk);
+ spin_unlock_irqrestore(&clock_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(clk_enable);
+
+static LIST_HEAD(root_clks);
+
+/**
+ * recalculate_root_clocks - recalculate and propagate all root clocks
+ *
+ * Recalculates all root clocks (clocks with no parent), which if the
+ * clock's .recalc is set correctly, should also propagate their rates.
+ * Called at init.
+ */
+void recalculate_root_clocks(void)
+{
+ struct clk *clkp;
+
+ list_for_each_entry(clkp, &root_clks, sibling) {
+ if (clkp->ops && clkp->ops->recalc)
+ clkp->rate = clkp->ops->recalc(clkp);
+ propagate_rate(clkp);
+ }
+}
+
+int clk_register(struct clk *clk)
+{
+ if (clk == NULL || IS_ERR(clk))
+ return -EINVAL;
+
+ /*
+ * trap out already registered clocks
+ */
+ if (clk->node.next || clk->node.prev)
+ return 0;
+
+ mutex_lock(&clock_list_sem);
+
+ INIT_LIST_HEAD(&clk->children);
+ clk->usecount = 0;
+
+ if (clk->parent)
+ list_add(&clk->sibling, &clk->parent->children);
+ else
+ list_add(&clk->sibling, &root_clks);
+
+ list_add(&clk->node, &clock_list);
+ if (clk->ops && clk->ops->init)
+ clk->ops->init(clk);
+ mutex_unlock(&clock_list_sem);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(clk_register);
+
+void clk_unregister(struct clk *clk)
+{
+ mutex_lock(&clock_list_sem);
+ list_del(&clk->sibling);
+ list_del(&clk->node);
+ mutex_unlock(&clock_list_sem);
+}
+EXPORT_SYMBOL_GPL(clk_unregister);
+
+void clk_enable_init_clocks(void)
+{
+ struct clk *clkp;
+
+ list_for_each_entry(clkp, &clock_list, node)
+ if (clkp->flags & CLK_ENABLE_ON_INIT)
+ clk_enable(clkp);
+}
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+ return clk->rate;
+}
+EXPORT_SYMBOL_GPL(clk_get_rate);
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ return clk_set_rate_ex(clk, rate, 0);
+}
+EXPORT_SYMBOL_GPL(clk_set_rate);
+
+int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
+{
+ int ret = -EOPNOTSUPP;
+ unsigned long flags;
+
+ spin_lock_irqsave(&clock_lock, flags);
+
+ if (likely(clk->ops && clk->ops->set_rate)) {
+ ret = clk->ops->set_rate(clk, rate, algo_id);
+ if (ret != 0)
+ goto out_unlock;
+ } else {
+ clk->rate = rate;
+ ret = 0;
+ }
+
+ if (clk->ops && clk->ops->recalc)
+ clk->rate = clk->ops->recalc(clk);
+
+ propagate_rate(clk);
+
+out_unlock:
+ spin_unlock_irqrestore(&clock_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(clk_set_rate_ex);
+
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ if (!parent || !clk)
+ return ret;
+ if (clk->parent == parent)
+ return 0;
+
+ spin_lock_irqsave(&clock_lock, flags);
+ if (clk->usecount == 0) {
+ if (clk->ops->set_parent)
+ ret = clk->ops->set_parent(clk, parent);
+ else
+ ret = clk_reparent(clk, parent);
+
+ if (ret == 0) {
+ pr_debug("clock: set parent of %s to %s (new rate %ld)\n",
+ clk->name, clk->parent->name, clk->rate);
+ if (clk->ops->recalc)
+ clk->rate = clk->ops->recalc(clk);
+ propagate_rate(clk);
+ }
+ } else
+ ret = -EBUSY;
+ spin_unlock_irqrestore(&clock_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(clk_set_parent);
+
+struct clk *clk_get_parent(struct clk *clk)
+{
+ return clk->parent;
+}
+EXPORT_SYMBOL_GPL(clk_get_parent);
+
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ if (likely(clk->ops && clk->ops->round_rate)) {
+ unsigned long flags, rounded;
+
+ spin_lock_irqsave(&clock_lock, flags);
+ rounded = clk->ops->round_rate(clk, rate);
+ spin_unlock_irqrestore(&clock_lock, flags);
+
+ return rounded;
+ }
+
+ return clk_get_rate(clk);
+}
+EXPORT_SYMBOL_GPL(clk_round_rate);
+
+#ifdef CONFIG_PM
+static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
+{
+ static pm_message_t prev_state;
+ struct clk *clkp;
+
+ switch (state.event) {
+ case PM_EVENT_ON:
+ /* Resumeing from hibernation */
+ if (prev_state.event != PM_EVENT_FREEZE)
+ break;
+
+ list_for_each_entry(clkp, &clock_list, node) {
+ if (likely(clkp->ops)) {
+ unsigned long rate = clkp->rate;
+
+ if (likely(clkp->ops->set_parent))
+ clkp->ops->set_parent(clkp,
+ clkp->parent);
+ if (likely(clkp->ops->set_rate))
+ clkp->ops->set_rate(clkp,
+ rate, NO_CHANGE);
+ else if (likely(clkp->ops->recalc))
+ clkp->rate = clkp->ops->recalc(clkp);
+ }
+ }
+ break;
+ case PM_EVENT_FREEZE:
+ break;
+ case PM_EVENT_SUSPEND:
+ break;
+ }
+
+ prev_state = state;
+ return 0;
+}
+
+static int clks_sysdev_resume(struct sys_device *dev)
+{
+ return clks_sysdev_suspend(dev, PMSG_ON);
+}
+
+static struct sysdev_class clks_sysdev_class = {
+ .name = "clks",
+};
+
+static struct sysdev_driver clks_sysdev_driver = {
+ .suspend = clks_sysdev_suspend,
+ .resume = clks_sysdev_resume,
+};
+
+static struct sys_device clks_sysdev_dev = {
+ .cls = &clks_sysdev_class,
+};
+
+static int __init clk_sysdev_init(void)
+{
+ sysdev_class_register(&clks_sysdev_class);
+ sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
+ sysdev_register(&clks_sysdev_dev);
+
+ return 0;
+}
+subsys_initcall(clk_sysdev_init);
+#endif
+
+/*
+ * debugfs support to trace clock tree hierarchy and attributes
+ */
+static struct dentry *clk_debugfs_root;
+
+static int clk_debugfs_register_one(struct clk *c)
+{
+ int err;
+ struct dentry *d, *child, *child_tmp;
+ struct clk *pa = c->parent;
+ char s[255];
+ char *p = s;
+
+ p += sprintf(p, "%s", c->name);
+ if (c->id >= 0)
+ sprintf(p, ":%d", c->id);
+ d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root);
+ if (!d)
+ return -ENOMEM;
+ c->dentry = d;
+
+ d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount);
+ if (!d) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate);
+ if (!d) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags);
+ if (!d) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ return 0;
+
+err_out:
+ d = c->dentry;
+ list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
+ debugfs_remove(child);
+ debugfs_remove(c->dentry);
+ return err;
+}
+
+static int clk_debugfs_register(struct clk *c)
+{
+ int err;
+ struct clk *pa = c->parent;
+
+ if (pa && !pa->dentry) {
+ err = clk_debugfs_register(pa);
+ if (err)
+ return err;
+ }
+
+ if (!c->dentry && c->name) {
+ err = clk_debugfs_register_one(c);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int __init clk_debugfs_init(void)
+{
+ struct clk *c;
+ struct dentry *d;
+ int err;
+
+ d = debugfs_create_dir("clock", NULL);
+ if (!d)
+ return -ENOMEM;
+ clk_debugfs_root = d;
+
+ list_for_each_entry(c, &clock_list, node) {
+ err = clk_debugfs_register(c);
+ if (err)
+ goto err_out;
+ }
+ return 0;
+err_out:
+ debugfs_remove_recursive(clk_debugfs_root);
+ return err;
+}
+late_initcall(clk_debugfs_init);
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c
index 94ad6bd86a0..c585574b9ae 100644
--- a/drivers/sh/intc.c
+++ b/drivers/sh/intc.c
@@ -28,6 +28,7 @@
#include <linux/topology.h>
#include <linux/bitmap.h>
#include <linux/cpumask.h>
+#include <asm/sizes.h>
#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
@@ -45,6 +46,12 @@ struct intc_handle_int {
unsigned long handle;
};
+struct intc_window {
+ phys_addr_t phys;
+ void __iomem *virt;
+ unsigned long size;
+};
+
struct intc_desc_int {
struct list_head list;
struct sys_device sysdev;
@@ -58,6 +65,8 @@ struct intc_desc_int {
unsigned int nr_prio;
struct intc_handle_int *sense;
unsigned int nr_sense;
+ struct intc_window *window;
+ unsigned int nr_windows;
struct irq_chip chip;
};
@@ -87,8 +96,12 @@ static DEFINE_SPINLOCK(vector_lock);
#define SMP_NR(d, x) 1
#endif
-static unsigned int intc_prio_level[NR_IRQS]; /* for now */
+static unsigned int intc_prio_level[NR_IRQS]; /* for now */
+static unsigned int default_prio_level = 2; /* 2 - 16 */
static unsigned long ack_handle[NR_IRQS];
+#ifdef CONFIG_INTC_BALANCING
+static unsigned long dist_handle[NR_IRQS];
+#endif
static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
{
@@ -96,6 +109,47 @@ static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
return container_of(chip, struct intc_desc_int, chip);
}
+static unsigned long intc_phys_to_virt(struct intc_desc_int *d,
+ unsigned long address)
+{
+ struct intc_window *window;
+ int k;
+
+ /* scan through physical windows and convert address */
+ for (k = 0; k < d->nr_windows; k++) {
+ window = d->window + k;
+
+ if (address < window->phys)
+ continue;
+
+ if (address >= (window->phys + window->size))
+ continue;
+
+ address -= window->phys;
+ address += (unsigned long)window->virt;
+
+ return address;
+ }
+
+ /* no windows defined, register must be 1:1 mapped virt:phys */
+ return address;
+}
+
+static unsigned int intc_get_reg(struct intc_desc_int *d, unsigned long address)
+{
+ unsigned int k;
+
+ address = intc_phys_to_virt(d, address);
+
+ for (k = 0; k < d->nr_reg; k++) {
+ if (d->reg[k] == address)
+ return k;
+ }
+
+ BUG();
+ return 0;
+}
+
static inline unsigned int set_field(unsigned int value,
unsigned int field_value,
unsigned int handle)
@@ -229,6 +283,85 @@ static void (*intc_disable_fns[])(unsigned long addr,
[MODE_PCLR_REG] = intc_mode_field,
};
+#ifdef CONFIG_INTC_BALANCING
+static inline void intc_balancing_enable(unsigned int irq)
+{
+ struct intc_desc_int *d = get_intc_desc(irq);
+ unsigned long handle = dist_handle[irq];
+ unsigned long addr;
+
+ if (irq_balancing_disabled(irq) || !handle)
+ return;
+
+ addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
+ intc_reg_fns[_INTC_FN(handle)](addr, handle, 1);
+}
+
+static inline void intc_balancing_disable(unsigned int irq)
+{
+ struct intc_desc_int *d = get_intc_desc(irq);
+ unsigned long handle = dist_handle[irq];
+ unsigned long addr;
+
+ if (irq_balancing_disabled(irq) || !handle)
+ return;
+
+ addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
+ intc_reg_fns[_INTC_FN(handle)](addr, handle, 0);
+}
+
+static unsigned int intc_dist_data(struct intc_desc *desc,
+ struct intc_desc_int *d,
+ intc_enum enum_id)
+{
+ struct intc_mask_reg *mr = desc->hw.mask_regs;
+ unsigned int i, j, fn, mode;
+ unsigned long reg_e, reg_d;
+
+ for (i = 0; mr && enum_id && i < desc->hw.nr_mask_regs; i++) {
+ mr = desc->hw.mask_regs + i;
+
+ /*
+ * Skip this entry if there's no auto-distribution
+ * register associated with it.
+ */
+ if (!mr->dist_reg)
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
+ if (mr->enum_ids[j] != enum_id)
+ continue;
+
+ fn = REG_FN_MODIFY_BASE;
+ mode = MODE_ENABLE_REG;
+ reg_e = mr->dist_reg;
+ reg_d = mr->dist_reg;
+
+ fn += (mr->reg_width >> 3) - 1;
+ return _INTC_MK(fn, mode,
+ intc_get_reg(d, reg_e),
+ intc_get_reg(d, reg_d),
+ 1,
+ (mr->reg_width - 1) - j);
+ }
+ }
+
+ /*
+ * It's possible we've gotten here with no distribution options
+ * available for the IRQ in question, so we just skip over those.
+ */
+ return 0;
+}
+#else
+static inline void intc_balancing_enable(unsigned int irq)
+{
+}
+
+static inline void intc_balancing_disable(unsigned int irq)
+{
+}
+#endif
+
static inline void _intc_enable(unsigned int irq, unsigned long handle)
{
struct intc_desc_int *d = get_intc_desc(irq);
@@ -244,6 +377,8 @@ static inline void _intc_enable(unsigned int irq, unsigned long handle)
intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
[_INTC_FN(handle)], irq);
}
+
+ intc_balancing_enable(irq);
}
static void intc_enable(unsigned int irq)
@@ -254,10 +389,12 @@ static void intc_enable(unsigned int irq)
static void intc_disable(unsigned int irq)
{
struct intc_desc_int *d = get_intc_desc(irq);
- unsigned long handle = (unsigned long) get_irq_chip_data(irq);
+ unsigned long handle = (unsigned long)get_irq_chip_data(irq);
unsigned long addr;
unsigned int cpu;
+ intc_balancing_disable(irq);
+
for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
#ifdef CONFIG_SMP
if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
@@ -336,8 +473,7 @@ static void intc_mask_ack(unsigned int irq)
intc_disable(irq);
- /* read register and write zero only to the assocaited bit */
-
+ /* read register and write zero only to the associated bit */
if (handle) {
addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
switch (_INTC_FN(handle)) {
@@ -366,7 +502,8 @@ static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
{
int i;
- /* this doesn't scale well, but...
+ /*
+ * this doesn't scale well, but...
*
* this function should only be used for cerain uncommon
* operations such as intc_set_priority() and intc_set_sense()
@@ -377,7 +514,6 @@ static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
* memory footprint down is to make sure the array is sorted
* and then perform a bisect to lookup the irq.
*/
-
for (i = 0; i < nr_hp; i++) {
if ((hp + i)->irq != irq)
continue;
@@ -408,7 +544,6 @@ int intc_set_priority(unsigned int irq, unsigned int prio)
* primary masking method is using intc_prio_level[irq]
* priority level will be set during next enable()
*/
-
if (_INTC_FN(ihp->handle) != REG_FN_ERR)
_intc_enable(irq, ihp->handle);
}
@@ -447,20 +582,6 @@ static int intc_set_sense(unsigned int irq, unsigned int type)
return 0;
}
-static unsigned int __init intc_get_reg(struct intc_desc_int *d,
- unsigned long address)
-{
- unsigned int k;
-
- for (k = 0; k < d->nr_reg; k++) {
- if (d->reg[k] == address)
- return k;
- }
-
- BUG();
- return 0;
-}
-
static intc_enum __init intc_grp_id(struct intc_desc *desc,
intc_enum enum_id)
{
@@ -718,13 +839,14 @@ static void __init intc_register_irq(struct intc_desc *desc,
*/
set_bit(irq, intc_irq_map);
- /* Prefer single interrupt source bitmap over other combinations:
+ /*
+ * Prefer single interrupt source bitmap over other combinations:
+ *
* 1. bitmap, single interrupt source
* 2. priority, single interrupt source
* 3. bitmap, multiple interrupt sources (groups)
* 4. priority, multiple interrupt sources (groups)
*/
-
data[0] = intc_mask_data(desc, d, enum_id, 0);
data[1] = intc_prio_data(desc, d, enum_id, 0);
@@ -749,10 +871,11 @@ static void __init intc_register_irq(struct intc_desc *desc,
handle_level_irq, "level");
set_irq_chip_data(irq, (void *)data[primary]);
- /* set priority level
+ /*
+ * set priority level
* - this needs to be at least 2 for 5-bit priorities on 7780
*/
- intc_prio_level[irq] = 2;
+ intc_prio_level[irq] = default_prio_level;
/* enable secondary masking method if present */
if (data[!primary])
@@ -769,7 +892,6 @@ static void __init intc_register_irq(struct intc_desc *desc,
* only secondary priority should access registers, so
* set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
*/
-
hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0);
}
@@ -790,6 +912,11 @@ static void __init intc_register_irq(struct intc_desc *desc,
if (desc->hw.ack_regs)
ack_handle[irq] = intc_ack_data(desc, d, enum_id);
+#ifdef CONFIG_INTC_BALANCING
+ if (desc->hw.mask_regs)
+ dist_handle[irq] = intc_dist_data(desc, d, enum_id);
+#endif
+
#ifdef CONFIG_ARM
set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */
#endif
@@ -801,6 +928,8 @@ static unsigned int __init save_reg(struct intc_desc_int *d,
unsigned int smp)
{
if (value) {
+ value = intc_phys_to_virt(d, value);
+
d->reg[cnt] = value;
#ifdef CONFIG_SMP
d->smp[cnt] = smp;
@@ -816,25 +945,59 @@ static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
generic_handle_irq((unsigned int)get_irq_data(irq));
}
-void __init register_intc_controller(struct intc_desc *desc)
+int __init register_intc_controller(struct intc_desc *desc)
{
unsigned int i, k, smp;
struct intc_hw_desc *hw = &desc->hw;
struct intc_desc_int *d;
+ struct resource *res;
+
+ pr_info("intc: Registered controller '%s' with %u IRQs\n",
+ desc->name, hw->nr_vectors);
d = kzalloc(sizeof(*d), GFP_NOWAIT);
+ if (!d)
+ goto err0;
INIT_LIST_HEAD(&d->list);
list_add(&d->list, &intc_list);
+ if (desc->num_resources) {
+ d->nr_windows = desc->num_resources;
+ d->window = kzalloc(d->nr_windows * sizeof(*d->window),
+ GFP_NOWAIT);
+ if (!d->window)
+ goto err1;
+
+ for (k = 0; k < d->nr_windows; k++) {
+ res = desc->resource + k;
+ WARN_ON(resource_type(res) != IORESOURCE_MEM);
+ d->window[k].phys = res->start;
+ d->window[k].size = resource_size(res);
+ d->window[k].virt = ioremap_nocache(res->start,
+ resource_size(res));
+ if (!d->window[k].virt)
+ goto err2;
+ }
+ }
+
d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0;
+#ifdef CONFIG_INTC_BALANCING
+ if (d->nr_reg)
+ d->nr_reg += hw->nr_mask_regs;
+#endif
d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0;
d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0;
d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0;
d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
+ if (!d->reg)
+ goto err2;
+
#ifdef CONFIG_SMP
d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
+ if (!d->smp)
+ goto err3;
#endif
k = 0;
@@ -843,12 +1006,17 @@ void __init register_intc_controller(struct intc_desc *desc)
smp = IS_SMP(hw->mask_regs[i]);
k += save_reg(d, k, hw->mask_regs[i].set_reg, smp);
k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp);
+#ifdef CONFIG_INTC_BALANCING
+ k += save_reg(d, k, hw->mask_regs[i].dist_reg, 0);
+#endif
}
}
if (hw->prio_regs) {
d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio),
GFP_NOWAIT);
+ if (!d->prio)
+ goto err4;
for (i = 0; i < hw->nr_prio_regs; i++) {
smp = IS_SMP(hw->prio_regs[i]);
@@ -860,6 +1028,8 @@ void __init register_intc_controller(struct intc_desc *desc)
if (hw->sense_regs) {
d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense),
GFP_NOWAIT);
+ if (!d->sense)
+ goto err5;
for (i = 0; i < hw->nr_sense_regs; i++)
k += save_reg(d, k, hw->sense_regs[i].reg, 0);
@@ -906,7 +1076,7 @@ void __init register_intc_controller(struct intc_desc *desc)
irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
if (unlikely(!irq_desc)) {
- pr_info("can't get irq_desc for %d\n", irq);
+ pr_err("can't get irq_desc for %d\n", irq);
continue;
}
@@ -926,7 +1096,7 @@ void __init register_intc_controller(struct intc_desc *desc)
*/
irq_desc = irq_to_desc_alloc_node(irq2, numa_node_id());
if (unlikely(!irq_desc)) {
- pr_info("can't get irq_desc for %d\n", irq2);
+ pr_err("can't get irq_desc for %d\n", irq2);
continue;
}
@@ -942,8 +1112,100 @@ void __init register_intc_controller(struct intc_desc *desc)
/* enable bits matching force_enable after registering irqs */
if (desc->force_enable)
intc_enable_disable_enum(desc, d, desc->force_enable, 1);
+
+ return 0;
+err5:
+ kfree(d->prio);
+err4:
+#ifdef CONFIG_SMP
+ kfree(d->smp);
+err3:
+#endif
+ kfree(d->reg);
+err2:
+ for (k = 0; k < d->nr_windows; k++)
+ if (d->window[k].virt)
+ iounmap(d->window[k].virt);
+
+ kfree(d->window);
+err1:
+ kfree(d);
+err0:
+ pr_err("unable to allocate INTC memory\n");
+
+ return -ENOMEM;
+}
+
+#ifdef CONFIG_INTC_USERIMASK
+static void __iomem *uimask;
+
+int register_intc_userimask(unsigned long addr)
+{
+ if (unlikely(uimask))
+ return -EBUSY;
+
+ uimask = ioremap_nocache(addr, SZ_4K);
+ if (unlikely(!uimask))
+ return -ENOMEM;
+
+ pr_info("intc: userimask support registered for levels 0 -> %d\n",
+ default_prio_level - 1);
+
+ return 0;
+}
+
+static ssize_t
+show_intc_userimask(struct sysdev_class *cls,
+ struct sysdev_class_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", (__raw_readl(uimask) >> 4) & 0xf);
+}
+
+static ssize_t
+store_intc_userimask(struct sysdev_class *cls,
+ struct sysdev_class_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long level;
+
+ level = simple_strtoul(buf, NULL, 10);
+
+ /*
+ * Minimal acceptable IRQ levels are in the 2 - 16 range, but
+ * these are chomped so as to not interfere with normal IRQs.
+ *
+ * Level 1 is a special case on some CPUs in that it's not
+ * directly settable, but given that USERIMASK cuts off below a
+ * certain level, we don't care about this limitation here.
+ * Level 0 on the other hand equates to user masking disabled.
+ *
+ * We use default_prio_level as a cut off so that only special
+ * case opt-in IRQs can be mangled.
+ */
+ if (level >= default_prio_level)
+ return -EINVAL;
+
+ __raw_writel(0xa5 << 24 | level << 4, uimask);
+
+ return count;
}
+static SYSDEV_CLASS_ATTR(userimask, S_IRUSR | S_IWUSR,
+ show_intc_userimask, store_intc_userimask);
+#endif
+
+static ssize_t
+show_intc_name(struct sys_device *dev, struct sysdev_attribute *attr, char *buf)
+{
+ struct intc_desc_int *d;
+
+ d = container_of(dev, struct intc_desc_int, sysdev);
+
+ return sprintf(buf, "%s\n", d->chip.name);
+}
+
+static SYSDEV_ATTR(name, S_IRUGO, show_intc_name, NULL);
+
static int intc_suspend(struct sys_device *dev, pm_message_t state)
{
struct intc_desc_int *d;
@@ -1003,19 +1265,28 @@ static int __init register_intc_sysdevs(void)
int id = 0;
error = sysdev_class_register(&intc_sysdev_class);
+#ifdef CONFIG_INTC_USERIMASK
+ if (!error && uimask)
+ error = sysdev_class_create_file(&intc_sysdev_class,
+ &attr_userimask);
+#endif
if (!error) {
list_for_each_entry(d, &intc_list, list) {
d->sysdev.id = id;
d->sysdev.cls = &intc_sysdev_class;
error = sysdev_register(&d->sysdev);
+ if (error == 0)
+ error = sysdev_create_file(&d->sysdev,
+ &attr_name);
if (error)
break;
+
id++;
}
}
if (error)
- pr_warning("intc: sysdev registration error\n");
+ pr_err("intc: sysdev registration error\n");
return error;
}
@@ -1048,7 +1319,7 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
desc = irq_to_desc_alloc_node(new, node);
if (unlikely(!desc)) {
- pr_info("can't get irq_desc for %d\n", new);
+ pr_err("can't get irq_desc for %d\n", new);
goto out_unlock;
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index a191fa2be7c..91c2f4f3af1 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -117,6 +117,16 @@ config SPI_DAVINCI
help
SPI master controller for DaVinci and DA8xx SPI modules.
+config SPI_EP93XX
+ tristate "Cirrus Logic EP93xx SPI controller"
+ depends on ARCH_EP93XX
+ help
+ This enables using the Cirrus EP93xx SPI controller in master
+ mode.
+
+ To compile this driver as a module, choose M here. The module will be
+ called ep93xx_spi.
+
config SPI_GPIO
tristate "GPIO-based bitbanging SPI Master"
depends on GENERIC_GPIO
@@ -165,6 +175,13 @@ config SPI_MPC52xx_PSC
This enables using the Freescale MPC52xx Programmable Serial
Controller in master SPI mode.
+config SPI_MPC512x_PSC
+ tristate "Freescale MPC512x PSC SPI controller"
+ depends on SPI_MASTER && PPC_MPC512x
+ help
+ This enables using the Freescale MPC5121 Programmable Serial
+ Controller in SPI master mode.
+
config SPI_MPC8xxx
tristate "Freescale MPC8xxx SPI controller"
depends on FSL_SOC
@@ -180,10 +197,10 @@ config SPI_OMAP_UWIRE
This hooks up to the MicroWire controller on OMAP1 chips.
config SPI_OMAP24XX
- tristate "McSPI driver for OMAP24xx/OMAP34xx"
- depends on ARCH_OMAP2 || ARCH_OMAP3
+ tristate "McSPI driver for OMAP"
+ depends on ARCH_OMAP2PLUS
help
- SPI master controller for OMAP24xx/OMAP34xx Multichannel SPI
+ SPI master controller for OMAP24XX and later Multichannel SPI
(McSPI) modules.
config SPI_OMAP_100K
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index d7d0f89b797..e9cbd18217a 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_SPI_DAVINCI) += davinci_spi.o
obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o
obj-$(CONFIG_SPI_DW_PCI) += dw_spi_pci.o
obj-$(CONFIG_SPI_DW_MMIO) += dw_spi_mmio.o
+obj-$(CONFIG_SPI_EP93XX) += ep93xx_spi.o
obj-$(CONFIG_SPI_GPIO) += spi_gpio.o
obj-$(CONFIG_SPI_IMX) += spi_imx.o
obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o
@@ -30,6 +31,7 @@ obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o
obj-$(CONFIG_SPI_OMAP_100K) += omap_spi_100k.o
obj-$(CONFIG_SPI_ORION) += orion_spi.o
obj-$(CONFIG_SPI_PL022) += amba-pl022.o
+obj-$(CONFIG_SPI_MPC512x_PSC) += mpc512x_psc_spi.o
obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o
obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o
obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index e9aeee16d92..f0a1418ce66 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -102,13 +102,21 @@
/*
* SSP Control Register 0 - SSP_CR0
*/
-#define SSP_CR0_MASK_DSS (0x1FUL << 0)
-#define SSP_CR0_MASK_HALFDUP (0x1UL << 5)
+#define SSP_CR0_MASK_DSS (0x0FUL << 0)
+#define SSP_CR0_MASK_FRF (0x3UL << 4)
#define SSP_CR0_MASK_SPO (0x1UL << 6)
#define SSP_CR0_MASK_SPH (0x1UL << 7)
#define SSP_CR0_MASK_SCR (0xFFUL << 8)
-#define SSP_CR0_MASK_CSS (0x1FUL << 16)
-#define SSP_CR0_MASK_FRF (0x3UL << 21)
+
+/*
+ * The ST version of this block moves som bits
+ * in SSP_CR0 and extends it to 32 bits
+ */
+#define SSP_CR0_MASK_DSS_ST (0x1FUL << 0)
+#define SSP_CR0_MASK_HALFDUP_ST (0x1UL << 5)
+#define SSP_CR0_MASK_CSS_ST (0x1FUL << 16)
+#define SSP_CR0_MASK_FRF_ST (0x3UL << 21)
+
/*
* SSP Control Register 0 - SSP_CR1
@@ -117,16 +125,18 @@
#define SSP_CR1_MASK_SSE (0x1UL << 1)
#define SSP_CR1_MASK_MS (0x1UL << 2)
#define SSP_CR1_MASK_SOD (0x1UL << 3)
-#define SSP_CR1_MASK_RENDN (0x1UL << 4)
-#define SSP_CR1_MASK_TENDN (0x1UL << 5)
-#define SSP_CR1_MASK_MWAIT (0x1UL << 6)
-#define SSP_CR1_MASK_RXIFLSEL (0x7UL << 7)
-#define SSP_CR1_MASK_TXIFLSEL (0x7UL << 10)
/*
- * SSP Data Register - SSP_DR
+ * The ST version of this block adds some bits
+ * in SSP_CR1
*/
-#define SSP_DR_MASK_DATA 0xFFFFFFFF
+#define SSP_CR1_MASK_RENDN_ST (0x1UL << 4)
+#define SSP_CR1_MASK_TENDN_ST (0x1UL << 5)
+#define SSP_CR1_MASK_MWAIT_ST (0x1UL << 6)
+#define SSP_CR1_MASK_RXIFLSEL_ST (0x7UL << 7)
+#define SSP_CR1_MASK_TXIFLSEL_ST (0x7UL << 10)
+/* This one is only in the PL023 variant */
+#define SSP_CR1_MASK_FBCLKDEL_ST (0x7UL << 13)
/*
* SSP Status Register - SSP_SR
@@ -134,7 +144,7 @@
#define SSP_SR_MASK_TFE (0x1UL << 0) /* Transmit FIFO empty */
#define SSP_SR_MASK_TNF (0x1UL << 1) /* Transmit FIFO not full */
#define SSP_SR_MASK_RNE (0x1UL << 2) /* Receive FIFO not empty */
-#define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */
+#define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */
#define SSP_SR_MASK_BSY (0x1UL << 4) /* Busy Flag */
/*
@@ -227,7 +237,7 @@
/*
* SSP Test Data Register - SSP_TDR
*/
-#define TDR_MASK_TESTDATA (0xFFFFFFFF)
+#define TDR_MASK_TESTDATA (0xFFFFFFFF)
/*
* Message State
@@ -235,33 +245,33 @@
* hold a single state value, that's why all this
* (void *) casting is done here.
*/
-#define STATE_START ((void *) 0)
-#define STATE_RUNNING ((void *) 1)
-#define STATE_DONE ((void *) 2)
-#define STATE_ERROR ((void *) -1)
+#define STATE_START ((void *) 0)
+#define STATE_RUNNING ((void *) 1)
+#define STATE_DONE ((void *) 2)
+#define STATE_ERROR ((void *) -1)
/*
* Queue State
*/
-#define QUEUE_RUNNING (0)
-#define QUEUE_STOPPED (1)
+#define QUEUE_RUNNING (0)
+#define QUEUE_STOPPED (1)
/*
* SSP State - Whether Enabled or Disabled
*/
-#define SSP_DISABLED (0)
-#define SSP_ENABLED (1)
+#define SSP_DISABLED (0)
+#define SSP_ENABLED (1)
/*
* SSP DMA State - Whether DMA Enabled or Disabled
*/
-#define SSP_DMA_DISABLED (0)
-#define SSP_DMA_ENABLED (1)
+#define SSP_DMA_DISABLED (0)
+#define SSP_DMA_ENABLED (1)
/*
* SSP Clock Defaults
*/
-#define NMDK_SSP_DEFAULT_CLKRATE 0x2
-#define NMDK_SSP_DEFAULT_PRESCALE 0x40
+#define SSP_DEFAULT_CLKRATE 0x2
+#define SSP_DEFAULT_PRESCALE 0x40
/*
* SSP Clock Parameter ranges
@@ -307,16 +317,22 @@ enum ssp_writing {
* @fifodepth: depth of FIFOs (both)
* @max_bpw: maximum number of bits per word
* @unidir: supports unidirection transfers
+ * @extended_cr: 32 bit wide control register 0 with extra
+ * features and extra features in CR1 as found in the ST variants
+ * @pl023: supports a subset of the ST extensions called "PL023"
*/
struct vendor_data {
int fifodepth;
int max_bpw;
bool unidir;
+ bool extended_cr;
+ bool pl023;
};
/**
* struct pl022 - This is the private SSP driver data structure
* @adev: AMBA device model hookup
+ * @vendor: Vendor data for the IP block
* @phybase: The physical memory where the SSP device resides
* @virtbase: The virtual memory where the SSP is mapped
* @master: SPI framework hookup
@@ -369,7 +385,8 @@ struct pl022 {
/**
* struct chip_data - To maintain runtime state of SSP for each client chip
- * @cr0: Value of control register CR0 of SSP
+ * @cr0: Value of control register CR0 of SSP - on later ST variants this
+ * register is 32 bits wide rather than just 16
* @cr1: Value of control register CR1 of SSP
* @dmacr: Value of DMA control Register of SSP
* @cpsr: Value of Clock prescale register
@@ -384,7 +401,7 @@ struct pl022 {
* This would be set according to the current message that would be served
*/
struct chip_data {
- u16 cr0;
+ u32 cr0;
u16 cr1;
u16 dmacr;
u16 cpsr;
@@ -517,7 +534,10 @@ static void restore_state(struct pl022 *pl022)
{
struct chip_data *chip = pl022->cur_chip;
- writew(chip->cr0, SSP_CR0(pl022->virtbase));
+ if (pl022->vendor->extended_cr)
+ writel(chip->cr0, SSP_CR0(pl022->virtbase));
+ else
+ writew(chip->cr0, SSP_CR0(pl022->virtbase));
writew(chip->cr1, SSP_CR1(pl022->virtbase));
writew(chip->dmacr, SSP_DMACR(pl022->virtbase));
writew(chip->cpsr, SSP_CPSR(pl022->virtbase));
@@ -525,38 +545,70 @@ static void restore_state(struct pl022 *pl022)
writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
}
-/**
- * load_ssp_default_config - Load default configuration for SSP
- * @pl022: SSP driver private data structure
- */
-
/*
* Default SSP Register Values
*/
#define DEFAULT_SSP_REG_CR0 ( \
GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \
- GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP, 5) | \
+ GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 4) | \
GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
- GEN_MASK_BITS(NMDK_SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \
- GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS, 16) | \
- GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 21) \
+ GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \
+)
+
+/* ST versions have slightly different bit layout */
+#define DEFAULT_SSP_REG_CR0_ST ( \
+ GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \
+ GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP_ST, 5) | \
+ GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
+ GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
+ GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \
+ GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS_ST, 16) | \
+ GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF_ST, 21) \
+)
+
+/* The PL023 version is slightly different again */
+#define DEFAULT_SSP_REG_CR0_ST_PL023 ( \
+ GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \
+ GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
+ GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
+ GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \
)
#define DEFAULT_SSP_REG_CR1 ( \
GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \
GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
+ GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) \
+)
+
+/* ST versions extend this register to use all 16 bits */
+#define DEFAULT_SSP_REG_CR1_ST ( \
+ DEFAULT_SSP_REG_CR1 | \
+ GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \
+ GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \
+ GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT_ST, 6) |\
+ GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \
+ GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) \
+)
+
+/*
+ * The PL023 variant has further differences: no loopback mode, no microwire
+ * support, and a new clock feedback delay setting.
+ */
+#define DEFAULT_SSP_REG_CR1_ST_PL023 ( \
+ GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
+ GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \
- GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN, 4) | \
- GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN, 5) | \
- GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT, 6) |\
- GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL, 7) | \
- GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL, 10) \
+ GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \
+ GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \
+ GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \
+ GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) | \
+ GEN_MASK_BITS(SSP_FEEDBACK_CLK_DELAY_NONE, SSP_CR1_MASK_FBCLKDEL_ST, 13) \
)
#define DEFAULT_SSP_REG_CPSR ( \
- GEN_MASK_BITS(NMDK_SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \
+ GEN_MASK_BITS(SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \
)
#define DEFAULT_SSP_REG_DMACR (\
@@ -564,11 +616,22 @@ static void restore_state(struct pl022 *pl022)
GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \
)
-
+/**
+ * load_ssp_default_config - Load default configuration for SSP
+ * @pl022: SSP driver private data structure
+ */
static void load_ssp_default_config(struct pl022 *pl022)
{
- writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase));
- writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase));
+ if (pl022->vendor->pl023) {
+ writel(DEFAULT_SSP_REG_CR0_ST_PL023, SSP_CR0(pl022->virtbase));
+ writew(DEFAULT_SSP_REG_CR1_ST_PL023, SSP_CR1(pl022->virtbase));
+ } else if (pl022->vendor->extended_cr) {
+ writel(DEFAULT_SSP_REG_CR0_ST, SSP_CR0(pl022->virtbase));
+ writew(DEFAULT_SSP_REG_CR1_ST, SSP_CR1(pl022->virtbase));
+ } else {
+ writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase));
+ writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase));
+ }
writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase));
writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase));
writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
@@ -1008,7 +1071,7 @@ static void do_polling_transfer(void *data)
writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
SSP_CR1(pl022->virtbase));
- dev_dbg(&pl022->adev->dev, "POLLING TRANSFER ONGOING ... \n");
+ dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n");
/* FIXME: insert a timeout so we don't hang here indefinately */
while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end)
readwriter(pl022);
@@ -1148,7 +1211,6 @@ static int stop_queue(struct pl022 *pl022)
* A wait_queue on the pl022->busy could be used, but then the common
* execution path (pump_messages) would be required to call wake_up or
* friends on every SPI message. Do this instead */
- pl022->run = QUEUE_STOPPED;
while (!list_empty(&pl022->queue) && pl022->busy && limit--) {
spin_unlock_irqrestore(&pl022->queue_lock, flags);
msleep(10);
@@ -1157,6 +1219,7 @@ static int stop_queue(struct pl022 *pl022)
if (!list_empty(&pl022->queue) || pl022->busy)
status = -EBUSY;
+ else pl022->run = QUEUE_STOPPED;
spin_unlock_irqrestore(&pl022->queue_lock, flags);
@@ -1280,11 +1343,21 @@ static int verify_controller_parameters(struct pl022 *pl022,
"Wait State is configured incorrectly\n");
return -EINVAL;
}
- if ((chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
- && (chip_info->duplex !=
- SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) {
- dev_err(chip_info->dev,
- "DUPLEX is configured incorrectly\n");
+ /* Half duplex is only available in the ST Micro version */
+ if (pl022->vendor->extended_cr) {
+ if ((chip_info->duplex !=
+ SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
+ && (chip_info->duplex !=
+ SSP_MICROWIRE_CHANNEL_HALF_DUPLEX))
+ dev_err(chip_info->dev,
+ "Microwire duplex mode is configured incorrectly\n");
+ return -EINVAL;
+ } else {
+ if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
+ dev_err(chip_info->dev,
+ "Microwire half duplex mode requested,"
+ " but this is only available in the"
+ " ST version of PL022\n");
return -EINVAL;
}
}
@@ -1581,22 +1654,49 @@ static int pl022_setup(struct spi_device *spi)
chip->cpsr = chip_info->clk_freq.cpsdvsr;
- SSP_WRITE_BITS(chip->cr0, chip_info->data_size, SSP_CR0_MASK_DSS, 0);
- SSP_WRITE_BITS(chip->cr0, chip_info->duplex, SSP_CR0_MASK_HALFDUP, 5);
+ /* Special setup for the ST micro extended control registers */
+ if (pl022->vendor->extended_cr) {
+ if (pl022->vendor->pl023) {
+ /* These bits are only in the PL023 */
+ SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay,
+ SSP_CR1_MASK_FBCLKDEL_ST, 13);
+ } else {
+ /* These bits are in the PL022 but not PL023 */
+ SSP_WRITE_BITS(chip->cr0, chip_info->duplex,
+ SSP_CR0_MASK_HALFDUP_ST, 5);
+ SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len,
+ SSP_CR0_MASK_CSS_ST, 16);
+ SSP_WRITE_BITS(chip->cr0, chip_info->iface,
+ SSP_CR0_MASK_FRF_ST, 21);
+ SSP_WRITE_BITS(chip->cr1, chip_info->wait_state,
+ SSP_CR1_MASK_MWAIT_ST, 6);
+ }
+ SSP_WRITE_BITS(chip->cr0, chip_info->data_size,
+ SSP_CR0_MASK_DSS_ST, 0);
+ SSP_WRITE_BITS(chip->cr1, chip_info->endian_rx,
+ SSP_CR1_MASK_RENDN_ST, 4);
+ SSP_WRITE_BITS(chip->cr1, chip_info->endian_tx,
+ SSP_CR1_MASK_TENDN_ST, 5);
+ SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig,
+ SSP_CR1_MASK_RXIFLSEL_ST, 7);
+ SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig,
+ SSP_CR1_MASK_TXIFLSEL_ST, 10);
+ } else {
+ SSP_WRITE_BITS(chip->cr0, chip_info->data_size,
+ SSP_CR0_MASK_DSS, 0);
+ SSP_WRITE_BITS(chip->cr0, chip_info->iface,
+ SSP_CR0_MASK_FRF, 4);
+ }
+ /* Stuff that is common for all versions */
SSP_WRITE_BITS(chip->cr0, chip_info->clk_pol, SSP_CR0_MASK_SPO, 6);
SSP_WRITE_BITS(chip->cr0, chip_info->clk_phase, SSP_CR0_MASK_SPH, 7);
SSP_WRITE_BITS(chip->cr0, chip_info->clk_freq.scr, SSP_CR0_MASK_SCR, 8);
- SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len, SSP_CR0_MASK_CSS, 16);
- SSP_WRITE_BITS(chip->cr0, chip_info->iface, SSP_CR0_MASK_FRF, 21);
- SSP_WRITE_BITS(chip->cr1, chip_info->lbm, SSP_CR1_MASK_LBM, 0);
+ /* Loopback is available on all versions except PL023 */
+ if (!pl022->vendor->pl023)
+ SSP_WRITE_BITS(chip->cr1, chip_info->lbm, SSP_CR1_MASK_LBM, 0);
SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1);
SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2);
SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3);
- SSP_WRITE_BITS(chip->cr1, chip_info->endian_rx, SSP_CR1_MASK_RENDN, 4);
- SSP_WRITE_BITS(chip->cr1, chip_info->endian_tx, SSP_CR1_MASK_TENDN, 5);
- SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, SSP_CR1_MASK_MWAIT, 6);
- SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, SSP_CR1_MASK_RXIFLSEL, 7);
- SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, SSP_CR1_MASK_TXIFLSEL, 10);
/* Save controller_state */
spi_set_ctldata(spi, chip);
@@ -1809,6 +1909,8 @@ static struct vendor_data vendor_arm = {
.fifodepth = 8,
.max_bpw = 16,
.unidir = false,
+ .extended_cr = false,
+ .pl023 = false,
};
@@ -1816,6 +1918,16 @@ static struct vendor_data vendor_st = {
.fifodepth = 32,
.max_bpw = 32,
.unidir = false,
+ .extended_cr = true,
+ .pl023 = false,
+};
+
+static struct vendor_data vendor_st_pl023 = {
+ .fifodepth = 32,
+ .max_bpw = 32,
+ .unidir = false,
+ .extended_cr = true,
+ .pl023 = true,
};
static struct amba_id pl022_ids[] = {
@@ -1837,6 +1949,18 @@ static struct amba_id pl022_ids[] = {
.mask = 0xffffffff,
.data = &vendor_st,
},
+ {
+ /*
+ * ST-Ericsson derivative "PL023" (this is not
+ * an official ARM number), this is a PL022 SSP block
+ * stripped to SPI mode only, it has 32bit wide
+ * and 32 locations deep TX/RX FIFO but no extended
+ * CR0/CR1 register
+ */
+ .id = 0x00080023,
+ .mask = 0xffffffff,
+ .data = &vendor_st_pl023,
+ },
{ 0, 0 },
};
diff --git a/drivers/spi/davinci_spi.c b/drivers/spi/davinci_spi.c
index 95afb6b7739..b85090caf7c 100644
--- a/drivers/spi/davinci_spi.c
+++ b/drivers/spi/davinci_spi.c
@@ -301,7 +301,7 @@ static int davinci_spi_setup_transfer(struct spi_device *spi,
struct davinci_spi *davinci_spi;
struct davinci_spi_platform_data *pdata;
u8 bits_per_word = 0;
- u32 hz = 0, prescale;
+ u32 hz = 0, prescale = 0, clkspeed;
davinci_spi = spi_master_get_devdata(spi->master);
pdata = davinci_spi->pdata;
@@ -338,10 +338,16 @@ static int davinci_spi_setup_transfer(struct spi_device *spi,
set_fmt_bits(davinci_spi->base, bits_per_word & 0x1f,
spi->chip_select);
- prescale = ((clk_get_rate(davinci_spi->clk) / hz) - 1) & 0xff;
+ clkspeed = clk_get_rate(davinci_spi->clk);
+ if (hz > clkspeed / 2)
+ prescale = 1 << 8;
+ if (hz < clkspeed / 256)
+ prescale = 255 << 8;
+ if (!prescale)
+ prescale = ((clkspeed / hz - 1) << 8) & 0x0000ff00;
clear_fmt_bits(davinci_spi->base, 0x0000ff00, spi->chip_select);
- set_fmt_bits(davinci_spi->base, prescale << 8, spi->chip_select);
+ set_fmt_bits(davinci_spi->base, prescale, spi->chip_select);
return 0;
}
diff --git a/drivers/spi/ep93xx_spi.c b/drivers/spi/ep93xx_spi.c
new file mode 100644
index 00000000000..0ba35df9a6d
--- /dev/null
+++ b/drivers/spi/ep93xx_spi.c
@@ -0,0 +1,938 @@
+/*
+ * Driver for Cirrus Logic EP93xx SPI controller.
+ *
+ * Copyright (c) 2010 Mika Westerberg
+ *
+ * Explicit FIFO handling code was inspired by amba-pl022 driver.
+ *
+ * Chip select support using other than built-in GPIOs by H. Hartley Sweeten.
+ *
+ * For more information about the SPI controller see documentation on Cirrus
+ * Logic web site:
+ * http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/spi/spi.h>
+
+#include <mach/ep93xx_spi.h>
+
+#define SSPCR0 0x0000
+#define SSPCR0_MODE_SHIFT 6
+#define SSPCR0_SCR_SHIFT 8
+
+#define SSPCR1 0x0004
+#define SSPCR1_RIE BIT(0)
+#define SSPCR1_TIE BIT(1)
+#define SSPCR1_RORIE BIT(2)
+#define SSPCR1_LBM BIT(3)
+#define SSPCR1_SSE BIT(4)
+#define SSPCR1_MS BIT(5)
+#define SSPCR1_SOD BIT(6)
+
+#define SSPDR 0x0008
+
+#define SSPSR 0x000c
+#define SSPSR_TFE BIT(0)
+#define SSPSR_TNF BIT(1)
+#define SSPSR_RNE BIT(2)
+#define SSPSR_RFF BIT(3)
+#define SSPSR_BSY BIT(4)
+#define SSPCPSR 0x0010
+
+#define SSPIIR 0x0014
+#define SSPIIR_RIS BIT(0)
+#define SSPIIR_TIS BIT(1)
+#define SSPIIR_RORIS BIT(2)
+#define SSPICR SSPIIR
+
+/* timeout in milliseconds */
+#define SPI_TIMEOUT 5
+/* maximum depth of RX/TX FIFO */
+#define SPI_FIFO_SIZE 8
+
+/**
+ * struct ep93xx_spi - EP93xx SPI controller structure
+ * @lock: spinlock that protects concurrent accesses to fields @running,
+ * @current_msg and @msg_queue
+ * @pdev: pointer to platform device
+ * @clk: clock for the controller
+ * @regs_base: pointer to ioremap()'d registers
+ * @irq: IRQ number used by the driver
+ * @min_rate: minimum clock rate (in Hz) supported by the controller
+ * @max_rate: maximum clock rate (in Hz) supported by the controller
+ * @running: is the queue running
+ * @wq: workqueue used by the driver
+ * @msg_work: work that is queued for the driver
+ * @wait: wait here until given transfer is completed
+ * @msg_queue: queue for the messages
+ * @current_msg: message that is currently processed (or %NULL if none)
+ * @tx: current byte in transfer to transmit
+ * @rx: current byte in transfer to receive
+ * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
+ * frame decreases this level and sending one frame increases it.
+ *
+ * This structure holds EP93xx SPI controller specific information. When
+ * @running is %true, driver accepts transfer requests from protocol drivers.
+ * @current_msg is used to hold pointer to the message that is currently
+ * processed. If @current_msg is %NULL, it means that no processing is going
+ * on.
+ *
+ * Most of the fields are only written once and they can be accessed without
+ * taking the @lock. Fields that are accessed concurrently are: @current_msg,
+ * @running, and @msg_queue.
+ */
+struct ep93xx_spi {
+ spinlock_t lock;
+ const struct platform_device *pdev;
+ struct clk *clk;
+ void __iomem *regs_base;
+ int irq;
+ unsigned long min_rate;
+ unsigned long max_rate;
+ bool running;
+ struct workqueue_struct *wq;
+ struct work_struct msg_work;
+ struct completion wait;
+ struct list_head msg_queue;
+ struct spi_message *current_msg;
+ size_t tx;
+ size_t rx;
+ size_t fifo_level;
+};
+
+/**
+ * struct ep93xx_spi_chip - SPI device hardware settings
+ * @spi: back pointer to the SPI device
+ * @rate: max rate in hz this chip supports
+ * @div_cpsr: cpsr (pre-scaler) divider
+ * @div_scr: scr divider
+ * @dss: bits per word (4 - 16 bits)
+ * @ops: private chip operations
+ *
+ * This structure is used to store hardware register specific settings for each
+ * SPI device. Settings are written to hardware by function
+ * ep93xx_spi_chip_setup().
+ */
+struct ep93xx_spi_chip {
+ const struct spi_device *spi;
+ unsigned long rate;
+ u8 div_cpsr;
+ u8 div_scr;
+ u8 dss;
+ struct ep93xx_spi_chip_ops *ops;
+};
+
+/* converts bits per word to CR0.DSS value */
+#define bits_per_word_to_dss(bpw) ((bpw) - 1)
+
+static inline void
+ep93xx_spi_write_u8(const struct ep93xx_spi *espi, u16 reg, u8 value)
+{
+ __raw_writeb(value, espi->regs_base + reg);
+}
+
+static inline u8
+ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg)
+{
+ return __raw_readb(spi->regs_base + reg);
+}
+
+static inline void
+ep93xx_spi_write_u16(const struct ep93xx_spi *espi, u16 reg, u16 value)
+{
+ __raw_writew(value, espi->regs_base + reg);
+}
+
+static inline u16
+ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg)
+{
+ return __raw_readw(spi->regs_base + reg);
+}
+
+static int ep93xx_spi_enable(const struct ep93xx_spi *espi)
+{
+ u8 regval;
+ int err;
+
+ err = clk_enable(espi->clk);
+ if (err)
+ return err;
+
+ regval = ep93xx_spi_read_u8(espi, SSPCR1);
+ regval |= SSPCR1_SSE;
+ ep93xx_spi_write_u8(espi, SSPCR1, regval);
+
+ return 0;
+}
+
+static void ep93xx_spi_disable(const struct ep93xx_spi *espi)
+{
+ u8 regval;
+
+ regval = ep93xx_spi_read_u8(espi, SSPCR1);
+ regval &= ~SSPCR1_SSE;
+ ep93xx_spi_write_u8(espi, SSPCR1, regval);
+
+ clk_disable(espi->clk);
+}
+
+static void ep93xx_spi_enable_interrupts(const struct ep93xx_spi *espi)
+{
+ u8 regval;
+
+ regval = ep93xx_spi_read_u8(espi, SSPCR1);
+ regval |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
+ ep93xx_spi_write_u8(espi, SSPCR1, regval);
+}
+
+static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi)
+{
+ u8 regval;
+
+ regval = ep93xx_spi_read_u8(espi, SSPCR1);
+ regval &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
+ ep93xx_spi_write_u8(espi, SSPCR1, regval);
+}
+
+/**
+ * ep93xx_spi_calc_divisors() - calculates SPI clock divisors
+ * @espi: ep93xx SPI controller struct
+ * @chip: divisors are calculated for this chip
+ * @rate: desired SPI output clock rate
+ *
+ * Function calculates cpsr (clock pre-scaler) and scr divisors based on
+ * given @rate and places them to @chip->div_cpsr and @chip->div_scr. If,
+ * for some reason, divisors cannot be calculated nothing is stored and
+ * %-EINVAL is returned.
+ */
+static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
+ struct ep93xx_spi_chip *chip,
+ unsigned long rate)
+{
+ unsigned long spi_clk_rate = clk_get_rate(espi->clk);
+ int cpsr, scr;
+
+ /*
+ * Make sure that max value is between values supported by the
+ * controller. Note that minimum value is already checked in
+ * ep93xx_spi_transfer().
+ */
+ rate = clamp(rate, espi->min_rate, espi->max_rate);
+
+ /*
+ * Calculate divisors so that we can get speed according the
+ * following formula:
+ * rate = spi_clock_rate / (cpsr * (1 + scr))
+ *
+ * cpsr must be even number and starts from 2, scr can be any number
+ * between 0 and 255.
+ */
+ for (cpsr = 2; cpsr <= 254; cpsr += 2) {
+ for (scr = 0; scr <= 255; scr++) {
+ if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) {
+ chip->div_scr = (u8)scr;
+ chip->div_cpsr = (u8)cpsr;
+ return 0;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void ep93xx_spi_cs_control(struct spi_device *spi, bool control)
+{
+ struct ep93xx_spi_chip *chip = spi_get_ctldata(spi);
+ int value = (spi->mode & SPI_CS_HIGH) ? control : !control;
+
+ if (chip->ops && chip->ops->cs_control)
+ chip->ops->cs_control(spi, value);
+}
+
+/**
+ * ep93xx_spi_setup() - setup an SPI device
+ * @spi: SPI device to setup
+ *
+ * This function sets up SPI device mode, speed etc. Can be called multiple
+ * times for a single device. Returns %0 in case of success, negative error in
+ * case of failure. When this function returns success, the device is
+ * deselected.
+ */
+static int ep93xx_spi_setup(struct spi_device *spi)
+{
+ struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
+ struct ep93xx_spi_chip *chip;
+
+ if (spi->bits_per_word < 4 || spi->bits_per_word > 16) {
+ dev_err(&espi->pdev->dev, "invalid bits per word %d\n",
+ spi->bits_per_word);
+ return -EINVAL;
+ }
+
+ chip = spi_get_ctldata(spi);
+ if (!chip) {
+ dev_dbg(&espi->pdev->dev, "initial setup for %s\n",
+ spi->modalias);
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->spi = spi;
+ chip->ops = spi->controller_data;
+
+ if (chip->ops && chip->ops->setup) {
+ int ret = chip->ops->setup(spi);
+ if (ret) {
+ kfree(chip);
+ return ret;
+ }
+ }
+
+ spi_set_ctldata(spi, chip);
+ }
+
+ if (spi->max_speed_hz != chip->rate) {
+ int err;
+
+ err = ep93xx_spi_calc_divisors(espi, chip, spi->max_speed_hz);
+ if (err != 0) {
+ spi_set_ctldata(spi, NULL);
+ kfree(chip);
+ return err;
+ }
+ chip->rate = spi->max_speed_hz;
+ }
+
+ chip->dss = bits_per_word_to_dss(spi->bits_per_word);
+
+ ep93xx_spi_cs_control(spi, false);
+ return 0;
+}
+
+/**
+ * ep93xx_spi_transfer() - queue message to be transferred
+ * @spi: target SPI device
+ * @msg: message to be transferred
+ *
+ * This function is called by SPI device drivers when they are going to transfer
+ * a new message. It simply puts the message in the queue and schedules
+ * workqueue to perform the actual transfer later on.
+ *
+ * Returns %0 on success and negative error in case of failure.
+ */
+static int ep93xx_spi_transfer(struct spi_device *spi, struct spi_message *msg)
+{
+ struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
+ struct spi_transfer *t;
+ unsigned long flags;
+
+ if (!msg || !msg->complete)
+ return -EINVAL;
+
+ /* first validate each transfer */
+ list_for_each_entry(t, &msg->transfers, transfer_list) {
+ if (t->bits_per_word) {
+ if (t->bits_per_word < 4 || t->bits_per_word > 16)
+ return -EINVAL;
+ }
+ if (t->speed_hz && t->speed_hz < espi->min_rate)
+ return -EINVAL;
+ }
+
+ /*
+ * Now that we own the message, let's initialize it so that it is
+ * suitable for us. We use @msg->status to signal whether there was
+ * error in transfer and @msg->state is used to hold pointer to the
+ * current transfer (or %NULL if no active current transfer).
+ */
+ msg->state = NULL;
+ msg->status = 0;
+ msg->actual_length = 0;
+
+ spin_lock_irqsave(&espi->lock, flags);
+ if (!espi->running) {
+ spin_unlock_irqrestore(&espi->lock, flags);
+ return -ESHUTDOWN;
+ }
+ list_add_tail(&msg->queue, &espi->msg_queue);
+ queue_work(espi->wq, &espi->msg_work);
+ spin_unlock_irqrestore(&espi->lock, flags);
+
+ return 0;
+}
+
+/**
+ * ep93xx_spi_cleanup() - cleans up master controller specific state
+ * @spi: SPI device to cleanup
+ *
+ * This function releases master controller specific state for given @spi
+ * device.
+ */
+static void ep93xx_spi_cleanup(struct spi_device *spi)
+{
+ struct ep93xx_spi_chip *chip;
+
+ chip = spi_get_ctldata(spi);
+ if (chip) {
+ if (chip->ops && chip->ops->cleanup)
+ chip->ops->cleanup(spi);
+ spi_set_ctldata(spi, NULL);
+ kfree(chip);
+ }
+}
+
+/**
+ * ep93xx_spi_chip_setup() - configures hardware according to given @chip
+ * @espi: ep93xx SPI controller struct
+ * @chip: chip specific settings
+ *
+ * This function sets up the actual hardware registers with settings given in
+ * @chip. Note that no validation is done so make sure that callers validate
+ * settings before calling this.
+ */
+static void ep93xx_spi_chip_setup(const struct ep93xx_spi *espi,
+ const struct ep93xx_spi_chip *chip)
+{
+ u16 cr0;
+
+ cr0 = chip->div_scr << SSPCR0_SCR_SHIFT;
+ cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT;
+ cr0 |= chip->dss;
+
+ dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
+ chip->spi->mode, chip->div_cpsr, chip->div_scr, chip->dss);
+ dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0);
+
+ ep93xx_spi_write_u8(espi, SSPCPSR, chip->div_cpsr);
+ ep93xx_spi_write_u16(espi, SSPCR0, cr0);
+}
+
+static inline int bits_per_word(const struct ep93xx_spi *espi)
+{
+ struct spi_message *msg = espi->current_msg;
+ struct spi_transfer *t = msg->state;
+
+ return t->bits_per_word ? t->bits_per_word : msg->spi->bits_per_word;
+}
+
+static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t)
+{
+ if (bits_per_word(espi) > 8) {
+ u16 tx_val = 0;
+
+ if (t->tx_buf)
+ tx_val = ((u16 *)t->tx_buf)[espi->tx];
+ ep93xx_spi_write_u16(espi, SSPDR, tx_val);
+ espi->tx += sizeof(tx_val);
+ } else {
+ u8 tx_val = 0;
+
+ if (t->tx_buf)
+ tx_val = ((u8 *)t->tx_buf)[espi->tx];
+ ep93xx_spi_write_u8(espi, SSPDR, tx_val);
+ espi->tx += sizeof(tx_val);
+ }
+}
+
+static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t)
+{
+ if (bits_per_word(espi) > 8) {
+ u16 rx_val;
+
+ rx_val = ep93xx_spi_read_u16(espi, SSPDR);
+ if (t->rx_buf)
+ ((u16 *)t->rx_buf)[espi->rx] = rx_val;
+ espi->rx += sizeof(rx_val);
+ } else {
+ u8 rx_val;
+
+ rx_val = ep93xx_spi_read_u8(espi, SSPDR);
+ if (t->rx_buf)
+ ((u8 *)t->rx_buf)[espi->rx] = rx_val;
+ espi->rx += sizeof(rx_val);
+ }
+}
+
+/**
+ * ep93xx_spi_read_write() - perform next RX/TX transfer
+ * @espi: ep93xx SPI controller struct
+ *
+ * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If
+ * called several times, the whole transfer will be completed. Returns
+ * %-EINPROGRESS when current transfer was not yet completed otherwise %0.
+ *
+ * When this function is finished, RX FIFO should be empty and TX FIFO should be
+ * full.
+ */
+static int ep93xx_spi_read_write(struct ep93xx_spi *espi)
+{
+ struct spi_message *msg = espi->current_msg;
+ struct spi_transfer *t = msg->state;
+
+ /* read as long as RX FIFO has frames in it */
+ while ((ep93xx_spi_read_u8(espi, SSPSR) & SSPSR_RNE)) {
+ ep93xx_do_read(espi, t);
+ espi->fifo_level--;
+ }
+
+ /* write as long as TX FIFO has room */
+ while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) {
+ ep93xx_do_write(espi, t);
+ espi->fifo_level++;
+ }
+
+ if (espi->rx == t->len) {
+ msg->actual_length += t->len;
+ return 0;
+ }
+
+ return -EINPROGRESS;
+}
+
+/**
+ * ep93xx_spi_process_transfer() - processes one SPI transfer
+ * @espi: ep93xx SPI controller struct
+ * @msg: current message
+ * @t: transfer to process
+ *
+ * This function processes one SPI transfer given in @t. Function waits until
+ * transfer is complete (may sleep) and updates @msg->status based on whether
+ * transfer was succesfully processed or not.
+ */
+static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
+ struct spi_message *msg,
+ struct spi_transfer *t)
+{
+ struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi);
+
+ msg->state = t;
+
+ /*
+ * Handle any transfer specific settings if needed. We use
+ * temporary chip settings here and restore original later when
+ * the transfer is finished.
+ */
+ if (t->speed_hz || t->bits_per_word) {
+ struct ep93xx_spi_chip tmp_chip = *chip;
+
+ if (t->speed_hz) {
+ int err;
+
+ err = ep93xx_spi_calc_divisors(espi, &tmp_chip,
+ t->speed_hz);
+ if (err) {
+ dev_err(&espi->pdev->dev,
+ "failed to adjust speed\n");
+ msg->status = err;
+ return;
+ }
+ }
+
+ if (t->bits_per_word)
+ tmp_chip.dss = bits_per_word_to_dss(t->bits_per_word);
+
+ /*
+ * Set up temporary new hw settings for this transfer.
+ */
+ ep93xx_spi_chip_setup(espi, &tmp_chip);
+ }
+
+ espi->rx = 0;
+ espi->tx = 0;
+
+ /*
+ * Now everything is set up for the current transfer. We prime the TX
+ * FIFO, enable interrupts, and wait for the transfer to complete.
+ */
+ if (ep93xx_spi_read_write(espi)) {
+ ep93xx_spi_enable_interrupts(espi);
+ wait_for_completion(&espi->wait);
+ }
+
+ /*
+ * In case of error during transmit, we bail out from processing
+ * the message.
+ */
+ if (msg->status)
+ return;
+
+ /*
+ * After this transfer is finished, perform any possible
+ * post-transfer actions requested by the protocol driver.
+ */
+ if (t->delay_usecs) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(usecs_to_jiffies(t->delay_usecs));
+ }
+ if (t->cs_change) {
+ if (!list_is_last(&t->transfer_list, &msg->transfers)) {
+ /*
+ * In case protocol driver is asking us to drop the
+ * chipselect briefly, we let the scheduler to handle
+ * any "delay" here.
+ */
+ ep93xx_spi_cs_control(msg->spi, false);
+ cond_resched();
+ ep93xx_spi_cs_control(msg->spi, true);
+ }
+ }
+
+ if (t->speed_hz || t->bits_per_word)
+ ep93xx_spi_chip_setup(espi, chip);
+}
+
+/*
+ * ep93xx_spi_process_message() - process one SPI message
+ * @espi: ep93xx SPI controller struct
+ * @msg: message to process
+ *
+ * This function processes a single SPI message. We go through all transfers in
+ * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is
+ * asserted during the whole message (unless per transfer cs_change is set).
+ *
+ * @msg->status contains %0 in case of success or negative error code in case of
+ * failure.
+ */
+static void ep93xx_spi_process_message(struct ep93xx_spi *espi,
+ struct spi_message *msg)
+{
+ unsigned long timeout;
+ struct spi_transfer *t;
+ int err;
+
+ /*
+ * Enable the SPI controller and its clock.
+ */
+ err = ep93xx_spi_enable(espi);
+ if (err) {
+ dev_err(&espi->pdev->dev, "failed to enable SPI controller\n");
+ msg->status = err;
+ return;
+ }
+
+ /*
+ * Just to be sure: flush any data from RX FIFO.
+ */
+ timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT);
+ while (ep93xx_spi_read_u16(espi, SSPSR) & SSPSR_RNE) {
+ if (time_after(jiffies, timeout)) {
+ dev_warn(&espi->pdev->dev,
+ "timeout while flushing RX FIFO\n");
+ msg->status = -ETIMEDOUT;
+ return;
+ }
+ ep93xx_spi_read_u16(espi, SSPDR);
+ }
+
+ /*
+ * We explicitly handle FIFO level. This way we don't have to check TX
+ * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns.
+ */
+ espi->fifo_level = 0;
+
+ /*
+ * Update SPI controller registers according to spi device and assert
+ * the chipselect.
+ */
+ ep93xx_spi_chip_setup(espi, spi_get_ctldata(msg->spi));
+ ep93xx_spi_cs_control(msg->spi, true);
+
+ list_for_each_entry(t, &msg->transfers, transfer_list) {
+ ep93xx_spi_process_transfer(espi, msg, t);
+ if (msg->status)
+ break;
+ }
+
+ /*
+ * Now the whole message is transferred (or failed for some reason). We
+ * deselect the device and disable the SPI controller.
+ */
+ ep93xx_spi_cs_control(msg->spi, false);
+ ep93xx_spi_disable(espi);
+}
+
+#define work_to_espi(work) (container_of((work), struct ep93xx_spi, msg_work))
+
+/**
+ * ep93xx_spi_work() - EP93xx SPI workqueue worker function
+ * @work: work struct
+ *
+ * Workqueue worker function. This function is called when there are new
+ * SPI messages to be processed. Message is taken out from the queue and then
+ * passed to ep93xx_spi_process_message().
+ *
+ * After message is transferred, protocol driver is notified by calling
+ * @msg->complete(). In case of error, @msg->status is set to negative error
+ * number, otherwise it contains zero (and @msg->actual_length is updated).
+ */
+static void ep93xx_spi_work(struct work_struct *work)
+{
+ struct ep93xx_spi *espi = work_to_espi(work);
+ struct spi_message *msg;
+
+ spin_lock_irq(&espi->lock);
+ if (!espi->running || espi->current_msg ||
+ list_empty(&espi->msg_queue)) {
+ spin_unlock_irq(&espi->lock);
+ return;
+ }
+ msg = list_first_entry(&espi->msg_queue, struct spi_message, queue);
+ list_del_init(&msg->queue);
+ espi->current_msg = msg;
+ spin_unlock_irq(&espi->lock);
+
+ ep93xx_spi_process_message(espi, msg);
+
+ /*
+ * Update the current message and re-schedule ourselves if there are
+ * more messages in the queue.
+ */
+ spin_lock_irq(&espi->lock);
+ espi->current_msg = NULL;
+ if (espi->running && !list_empty(&espi->msg_queue))
+ queue_work(espi->wq, &espi->msg_work);
+ spin_unlock_irq(&espi->lock);
+
+ /* notify the protocol driver that we are done with this message */
+ msg->complete(msg->context);
+}
+
+static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
+{
+ struct ep93xx_spi *espi = dev_id;
+ u8 irq_status = ep93xx_spi_read_u8(espi, SSPIIR);
+
+ /*
+ * If we got ROR (receive overrun) interrupt we know that something is
+ * wrong. Just abort the message.
+ */
+ if (unlikely(irq_status & SSPIIR_RORIS)) {
+ /* clear the overrun interrupt */
+ ep93xx_spi_write_u8(espi, SSPICR, 0);
+ dev_warn(&espi->pdev->dev,
+ "receive overrun, aborting the message\n");
+ espi->current_msg->status = -EIO;
+ } else {
+ /*
+ * Interrupt is either RX (RIS) or TX (TIS). For both cases we
+ * simply execute next data transfer.
+ */
+ if (ep93xx_spi_read_write(espi)) {
+ /*
+ * In normal case, there still is some processing left
+ * for current transfer. Let's wait for the next
+ * interrupt then.
+ */
+ return IRQ_HANDLED;
+ }
+ }
+
+ /*
+ * Current transfer is finished, either with error or with success. In
+ * any case we disable interrupts and notify the worker to handle
+ * any post-processing of the message.
+ */
+ ep93xx_spi_disable_interrupts(espi);
+ complete(&espi->wait);
+ return IRQ_HANDLED;
+}
+
+static int __init ep93xx_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct ep93xx_spi_info *info;
+ struct ep93xx_spi *espi;
+ struct resource *res;
+ int error;
+
+ info = pdev->dev.platform_data;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*espi));
+ if (!master) {
+ dev_err(&pdev->dev, "failed to allocate spi master\n");
+ return -ENOMEM;
+ }
+
+ master->setup = ep93xx_spi_setup;
+ master->transfer = ep93xx_spi_transfer;
+ master->cleanup = ep93xx_spi_cleanup;
+ master->bus_num = pdev->id;
+ master->num_chipselect = info->num_chipselect;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+
+ platform_set_drvdata(pdev, master);
+
+ espi = spi_master_get_devdata(master);
+
+ espi->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(espi->clk)) {
+ dev_err(&pdev->dev, "unable to get spi clock\n");
+ error = PTR_ERR(espi->clk);
+ goto fail_release_master;
+ }
+
+ spin_lock_init(&espi->lock);
+ init_completion(&espi->wait);
+
+ /*
+ * Calculate maximum and minimum supported clock rates
+ * for the controller.
+ */
+ espi->max_rate = clk_get_rate(espi->clk) / 2;
+ espi->min_rate = clk_get_rate(espi->clk) / (254 * 256);
+ espi->pdev = pdev;
+
+ espi->irq = platform_get_irq(pdev, 0);
+ if (espi->irq < 0) {
+ error = -EBUSY;
+ dev_err(&pdev->dev, "failed to get irq resources\n");
+ goto fail_put_clock;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "unable to get iomem resource\n");
+ error = -ENODEV;
+ goto fail_put_clock;
+ }
+
+ res = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (!res) {
+ dev_err(&pdev->dev, "unable to request iomem resources\n");
+ error = -EBUSY;
+ goto fail_put_clock;
+ }
+
+ espi->regs_base = ioremap(res->start, resource_size(res));
+ if (!espi->regs_base) {
+ dev_err(&pdev->dev, "failed to map resources\n");
+ error = -ENODEV;
+ goto fail_free_mem;
+ }
+
+ error = request_irq(espi->irq, ep93xx_spi_interrupt, 0,
+ "ep93xx-spi", espi);
+ if (error) {
+ dev_err(&pdev->dev, "failed to request irq\n");
+ goto fail_unmap_regs;
+ }
+
+ espi->wq = create_singlethread_workqueue("ep93xx_spid");
+ if (!espi->wq) {
+ dev_err(&pdev->dev, "unable to create workqueue\n");
+ goto fail_free_irq;
+ }
+ INIT_WORK(&espi->msg_work, ep93xx_spi_work);
+ INIT_LIST_HEAD(&espi->msg_queue);
+ espi->running = true;
+
+ /* make sure that the hardware is disabled */
+ ep93xx_spi_write_u8(espi, SSPCR1, 0);
+
+ error = spi_register_master(master);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register SPI master\n");
+ goto fail_free_queue;
+ }
+
+ dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n",
+ (unsigned long)res->start, espi->irq);
+
+ return 0;
+
+fail_free_queue:
+ destroy_workqueue(espi->wq);
+fail_free_irq:
+ free_irq(espi->irq, espi);
+fail_unmap_regs:
+ iounmap(espi->regs_base);
+fail_free_mem:
+ release_mem_region(res->start, resource_size(res));
+fail_put_clock:
+ clk_put(espi->clk);
+fail_release_master:
+ spi_master_put(master);
+ platform_set_drvdata(pdev, NULL);
+
+ return error;
+}
+
+static int __exit ep93xx_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct ep93xx_spi *espi = spi_master_get_devdata(master);
+ struct resource *res;
+
+ spin_lock_irq(&espi->lock);
+ espi->running = false;
+ spin_unlock_irq(&espi->lock);
+
+ destroy_workqueue(espi->wq);
+
+ /*
+ * Complete remaining messages with %-ESHUTDOWN status.
+ */
+ spin_lock_irq(&espi->lock);
+ while (!list_empty(&espi->msg_queue)) {
+ struct spi_message *msg;
+
+ msg = list_first_entry(&espi->msg_queue,
+ struct spi_message, queue);
+ list_del_init(&msg->queue);
+ msg->status = -ESHUTDOWN;
+ spin_unlock_irq(&espi->lock);
+ msg->complete(msg->context);
+ spin_lock_irq(&espi->lock);
+ }
+ spin_unlock_irq(&espi->lock);
+
+ free_irq(espi->irq, espi);
+ iounmap(espi->regs_base);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+ clk_put(espi->clk);
+ platform_set_drvdata(pdev, NULL);
+
+ spi_unregister_master(master);
+ return 0;
+}
+
+static struct platform_driver ep93xx_spi_driver = {
+ .driver = {
+ .name = "ep93xx-spi",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(ep93xx_spi_remove),
+};
+
+static int __init ep93xx_spi_init(void)
+{
+ return platform_driver_probe(&ep93xx_spi_driver, ep93xx_spi_probe);
+}
+module_init(ep93xx_spi_init);
+
+static void __exit ep93xx_spi_exit(void)
+{
+ platform_driver_unregister(&ep93xx_spi_driver);
+}
+module_exit(ep93xx_spi_exit);
+
+MODULE_DESCRIPTION("EP93xx SPI Controller driver");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ep93xx-spi");
diff --git a/drivers/spi/mpc512x_psc_spi.c b/drivers/spi/mpc512x_psc_spi.c
new file mode 100644
index 00000000000..28a126d2742
--- /dev/null
+++ b/drivers/spi/mpc512x_psc_spi.c
@@ -0,0 +1,576 @@
+/*
+ * MPC512x PSC in SPI mode driver.
+ *
+ * Copyright (C) 2007,2008 Freescale Semiconductor Inc.
+ * Original port from 52xx driver:
+ * Hongjun Chen <hong-jun.chen@freescale.com>
+ *
+ * Fork of mpc52xx_psc_spi.c:
+ * Copyright (C) 2006 TOPTICA Photonics AG., Dragos Carp
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/of_platform.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/spi/spi.h>
+#include <linux/fsl_devices.h>
+#include <asm/mpc52xx_psc.h>
+
+struct mpc512x_psc_spi {
+ void (*cs_control)(struct spi_device *spi, bool on);
+ u32 sysclk;
+
+ /* driver internal data */
+ struct mpc52xx_psc __iomem *psc;
+ struct mpc512x_psc_fifo __iomem *fifo;
+ unsigned int irq;
+ u8 bits_per_word;
+ u8 busy;
+ u32 mclk;
+ u8 eofbyte;
+
+ struct workqueue_struct *workqueue;
+ struct work_struct work;
+
+ struct list_head queue;
+ spinlock_t lock; /* Message queue lock */
+
+ struct completion done;
+};
+
+/* controller state */
+struct mpc512x_psc_spi_cs {
+ int bits_per_word;
+ int speed_hz;
+};
+
+/* set clock freq, clock ramp, bits per work
+ * if t is NULL then reset the values to the default values
+ */
+static int mpc512x_psc_spi_transfer_setup(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct mpc512x_psc_spi_cs *cs = spi->controller_state;
+
+ cs->speed_hz = (t && t->speed_hz)
+ ? t->speed_hz : spi->max_speed_hz;
+ cs->bits_per_word = (t && t->bits_per_word)
+ ? t->bits_per_word : spi->bits_per_word;
+ cs->bits_per_word = ((cs->bits_per_word + 7) / 8) * 8;
+ return 0;
+}
+
+static void mpc512x_psc_spi_activate_cs(struct spi_device *spi)
+{
+ struct mpc512x_psc_spi_cs *cs = spi->controller_state;
+ struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master);
+ struct mpc52xx_psc __iomem *psc = mps->psc;
+ u32 sicr;
+ u32 ccr;
+ u16 bclkdiv;
+
+ sicr = in_be32(&psc->sicr);
+
+ /* Set clock phase and polarity */
+ if (spi->mode & SPI_CPHA)
+ sicr |= 0x00001000;
+ else
+ sicr &= ~0x00001000;
+
+ if (spi->mode & SPI_CPOL)
+ sicr |= 0x00002000;
+ else
+ sicr &= ~0x00002000;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ sicr |= 0x10000000;
+ else
+ sicr &= ~0x10000000;
+ out_be32(&psc->sicr, sicr);
+
+ ccr = in_be32(&psc->ccr);
+ ccr &= 0xFF000000;
+ if (cs->speed_hz)
+ bclkdiv = (mps->mclk / cs->speed_hz) - 1;
+ else
+ bclkdiv = (mps->mclk / 1000000) - 1; /* default 1MHz */
+
+ ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8));
+ out_be32(&psc->ccr, ccr);
+ mps->bits_per_word = cs->bits_per_word;
+
+ if (mps->cs_control)
+ mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 1 : 0);
+}
+
+static void mpc512x_psc_spi_deactivate_cs(struct spi_device *spi)
+{
+ struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master);
+
+ if (mps->cs_control)
+ mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 0 : 1);
+
+}
+
+/* extract and scale size field in txsz or rxsz */
+#define MPC512x_PSC_FIFO_SZ(sz) ((sz & 0x7ff) << 2);
+
+#define EOFBYTE 1
+
+static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master);
+ struct mpc52xx_psc __iomem *psc = mps->psc;
+ struct mpc512x_psc_fifo __iomem *fifo = mps->fifo;
+ size_t len = t->len;
+ u8 *tx_buf = (u8 *)t->tx_buf;
+ u8 *rx_buf = (u8 *)t->rx_buf;
+
+ if (!tx_buf && !rx_buf && t->len)
+ return -EINVAL;
+
+ /* Zero MR2 */
+ in_8(&psc->mode);
+ out_8(&psc->mode, 0x0);
+
+ while (len) {
+ int count;
+ int i;
+ u8 data;
+ size_t fifosz;
+ int rxcount;
+
+ /*
+ * The number of bytes that can be sent at a time
+ * depends on the fifo size.
+ */
+ fifosz = MPC512x_PSC_FIFO_SZ(in_be32(&fifo->txsz));
+ count = min(fifosz, len);
+
+ for (i = count; i > 0; i--) {
+ data = tx_buf ? *tx_buf++ : 0;
+ if (len == EOFBYTE)
+ setbits32(&fifo->txcmd, MPC512x_PSC_FIFO_EOF);
+ out_8(&fifo->txdata_8, data);
+ len--;
+ }
+
+ INIT_COMPLETION(mps->done);
+
+ /* interrupt on tx fifo empty */
+ out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY);
+ out_be32(&fifo->tximr, MPC512x_PSC_FIFO_EMPTY);
+
+ /* enable transmiter/receiver */
+ out_8(&psc->command,
+ MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE);
+
+ wait_for_completion(&mps->done);
+
+ mdelay(1);
+
+ /* rx fifo should have count bytes in it */
+ rxcount = in_be32(&fifo->rxcnt);
+ if (rxcount != count)
+ mdelay(1);
+
+ rxcount = in_be32(&fifo->rxcnt);
+ if (rxcount != count) {
+ dev_warn(&spi->dev, "expected %d bytes in rx fifo "
+ "but got %d\n", count, rxcount);
+ }
+
+ rxcount = min(rxcount, count);
+ for (i = rxcount; i > 0; i--) {
+ data = in_8(&fifo->rxdata_8);
+ if (rx_buf)
+ *rx_buf++ = data;
+ }
+ while (in_be32(&fifo->rxcnt)) {
+ in_8(&fifo->rxdata_8);
+ }
+
+ out_8(&psc->command,
+ MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
+ }
+ /* disable transmiter/receiver and fifo interrupt */
+ out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
+ out_be32(&fifo->tximr, 0);
+ return 0;
+}
+
+static void mpc512x_psc_spi_work(struct work_struct *work)
+{
+ struct mpc512x_psc_spi *mps = container_of(work,
+ struct mpc512x_psc_spi,
+ work);
+
+ spin_lock_irq(&mps->lock);
+ mps->busy = 1;
+ while (!list_empty(&mps->queue)) {
+ struct spi_message *m;
+ struct spi_device *spi;
+ struct spi_transfer *t = NULL;
+ unsigned cs_change;
+ int status;
+
+ m = container_of(mps->queue.next, struct spi_message, queue);
+ list_del_init(&m->queue);
+ spin_unlock_irq(&mps->lock);
+
+ spi = m->spi;
+ cs_change = 1;
+ status = 0;
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->bits_per_word || t->speed_hz) {
+ status = mpc512x_psc_spi_transfer_setup(spi, t);
+ if (status < 0)
+ break;
+ }
+
+ if (cs_change)
+ mpc512x_psc_spi_activate_cs(spi);
+ cs_change = t->cs_change;
+
+ status = mpc512x_psc_spi_transfer_rxtx(spi, t);
+ if (status)
+ break;
+ m->actual_length += t->len;
+
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ if (cs_change)
+ mpc512x_psc_spi_deactivate_cs(spi);
+ }
+
+ m->status = status;
+ m->complete(m->context);
+
+ if (status || !cs_change)
+ mpc512x_psc_spi_deactivate_cs(spi);
+
+ mpc512x_psc_spi_transfer_setup(spi, NULL);
+
+ spin_lock_irq(&mps->lock);
+ }
+ mps->busy = 0;
+ spin_unlock_irq(&mps->lock);
+}
+
+static int mpc512x_psc_spi_setup(struct spi_device *spi)
+{
+ struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master);
+ struct mpc512x_psc_spi_cs *cs = spi->controller_state;
+ unsigned long flags;
+
+ if (spi->bits_per_word % 8)
+ return -EINVAL;
+
+ if (!cs) {
+ cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+ spi->controller_state = cs;
+ }
+
+ cs->bits_per_word = spi->bits_per_word;
+ cs->speed_hz = spi->max_speed_hz;
+
+ spin_lock_irqsave(&mps->lock, flags);
+ if (!mps->busy)
+ mpc512x_psc_spi_deactivate_cs(spi);
+ spin_unlock_irqrestore(&mps->lock, flags);
+
+ return 0;
+}
+
+static int mpc512x_psc_spi_transfer(struct spi_device *spi,
+ struct spi_message *m)
+{
+ struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master);
+ unsigned long flags;
+
+ m->actual_length = 0;
+ m->status = -EINPROGRESS;
+
+ spin_lock_irqsave(&mps->lock, flags);
+ list_add_tail(&m->queue, &mps->queue);
+ queue_work(mps->workqueue, &mps->work);
+ spin_unlock_irqrestore(&mps->lock, flags);
+
+ return 0;
+}
+
+static void mpc512x_psc_spi_cleanup(struct spi_device *spi)
+{
+ kfree(spi->controller_state);
+}
+
+static int mpc512x_psc_spi_port_config(struct spi_master *master,
+ struct mpc512x_psc_spi *mps)
+{
+ struct mpc52xx_psc __iomem *psc = mps->psc;
+ struct mpc512x_psc_fifo __iomem *fifo = mps->fifo;
+ struct clk *spiclk;
+ int ret = 0;
+ char name[32];
+ u32 sicr;
+ u32 ccr;
+ u16 bclkdiv;
+
+ sprintf(name, "psc%d_mclk", master->bus_num);
+ spiclk = clk_get(&master->dev, name);
+ clk_enable(spiclk);
+ mps->mclk = clk_get_rate(spiclk);
+ clk_put(spiclk);
+
+ /* Reset the PSC into a known state */
+ out_8(&psc->command, MPC52xx_PSC_RST_RX);
+ out_8(&psc->command, MPC52xx_PSC_RST_TX);
+ out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
+
+ /* Disable psc interrupts all useful interrupts are in fifo */
+ out_be16(&psc->isr_imr.imr, 0);
+
+ /* Disable fifo interrupts, will be enabled later */
+ out_be32(&fifo->tximr, 0);
+ out_be32(&fifo->rximr, 0);
+
+ /* Setup fifo slice address and size */
+ /*out_be32(&fifo->txsz, 0x0fe00004);*/
+ /*out_be32(&fifo->rxsz, 0x0ff00004);*/
+
+ sicr = 0x01000000 | /* SIM = 0001 -- 8 bit */
+ 0x00800000 | /* GenClk = 1 -- internal clk */
+ 0x00008000 | /* SPI = 1 */
+ 0x00004000 | /* MSTR = 1 -- SPI master */
+ 0x00000800; /* UseEOF = 1 -- SS low until EOF */
+
+ out_be32(&psc->sicr, sicr);
+
+ ccr = in_be32(&psc->ccr);
+ ccr &= 0xFF000000;
+ bclkdiv = (mps->mclk / 1000000) - 1; /* default 1MHz */
+ ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8));
+ out_be32(&psc->ccr, ccr);
+
+ /* Set 2ms DTL delay */
+ out_8(&psc->ctur, 0x00);
+ out_8(&psc->ctlr, 0x82);
+
+ /* we don't use the alarms */
+ out_be32(&fifo->rxalarm, 0xfff);
+ out_be32(&fifo->txalarm, 0);
+
+ /* Enable FIFO slices for Rx/Tx */
+ out_be32(&fifo->rxcmd,
+ MPC512x_PSC_FIFO_ENABLE_SLICE | MPC512x_PSC_FIFO_ENABLE_DMA);
+ out_be32(&fifo->txcmd,
+ MPC512x_PSC_FIFO_ENABLE_SLICE | MPC512x_PSC_FIFO_ENABLE_DMA);
+
+ mps->bits_per_word = 8;
+
+ return ret;
+}
+
+static irqreturn_t mpc512x_psc_spi_isr(int irq, void *dev_id)
+{
+ struct mpc512x_psc_spi *mps = (struct mpc512x_psc_spi *)dev_id;
+ struct mpc512x_psc_fifo __iomem *fifo = mps->fifo;
+
+ /* clear interrupt and wake up the work queue */
+ if (in_be32(&fifo->txisr) &
+ in_be32(&fifo->tximr) & MPC512x_PSC_FIFO_EMPTY) {
+ out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY);
+ out_be32(&fifo->tximr, 0);
+ complete(&mps->done);
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+/* bus_num is used only for the case dev->platform_data == NULL */
+static int __init mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
+ u32 size, unsigned int irq,
+ s16 bus_num)
+{
+ struct fsl_spi_platform_data *pdata = dev->platform_data;
+ struct mpc512x_psc_spi *mps;
+ struct spi_master *master;
+ int ret;
+ void *tempp;
+
+ master = spi_alloc_master(dev, sizeof *mps);
+ if (master == NULL)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, master);
+ mps = spi_master_get_devdata(master);
+ mps->irq = irq;
+
+ if (pdata == NULL) {
+ dev_err(dev, "probe called without platform data, no "
+ "cs_control function will be called\n");
+ mps->cs_control = NULL;
+ mps->sysclk = 0;
+ master->bus_num = bus_num;
+ master->num_chipselect = 255;
+ } else {
+ mps->cs_control = pdata->cs_control;
+ mps->sysclk = pdata->sysclk;
+ master->bus_num = pdata->bus_num;
+ master->num_chipselect = pdata->max_chipselect;
+ }
+
+ master->setup = mpc512x_psc_spi_setup;
+ master->transfer = mpc512x_psc_spi_transfer;
+ master->cleanup = mpc512x_psc_spi_cleanup;
+
+ tempp = ioremap(regaddr, size);
+ if (!tempp) {
+ dev_err(dev, "could not ioremap I/O port range\n");
+ ret = -EFAULT;
+ goto free_master;
+ }
+ mps->psc = tempp;
+ mps->fifo =
+ (struct mpc512x_psc_fifo *)(tempp + sizeof(struct mpc52xx_psc));
+
+ ret = request_irq(mps->irq, mpc512x_psc_spi_isr, IRQF_SHARED,
+ "mpc512x-psc-spi", mps);
+ if (ret)
+ goto free_master;
+
+ ret = mpc512x_psc_spi_port_config(master, mps);
+ if (ret < 0)
+ goto free_irq;
+
+ spin_lock_init(&mps->lock);
+ init_completion(&mps->done);
+ INIT_WORK(&mps->work, mpc512x_psc_spi_work);
+ INIT_LIST_HEAD(&mps->queue);
+
+ mps->workqueue =
+ create_singlethread_workqueue(dev_name(master->dev.parent));
+ if (mps->workqueue == NULL) {
+ ret = -EBUSY;
+ goto free_irq;
+ }
+
+ ret = spi_register_master(master);
+ if (ret < 0)
+ goto unreg_master;
+
+ return ret;
+
+unreg_master:
+ destroy_workqueue(mps->workqueue);
+free_irq:
+ free_irq(mps->irq, mps);
+free_master:
+ if (mps->psc)
+ iounmap(mps->psc);
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int __exit mpc512x_psc_spi_do_remove(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
+
+ flush_workqueue(mps->workqueue);
+ destroy_workqueue(mps->workqueue);
+ spi_unregister_master(master);
+ free_irq(mps->irq, mps);
+ if (mps->psc)
+ iounmap(mps->psc);
+
+ return 0;
+}
+
+static int __init mpc512x_psc_spi_of_probe(struct of_device *op,
+ const struct of_device_id *match)
+{
+ const u32 *regaddr_p;
+ u64 regaddr64, size64;
+ s16 id = -1;
+
+ regaddr_p = of_get_address(op->node, 0, &size64, NULL);
+ if (!regaddr_p) {
+ dev_err(&op->dev, "Invalid PSC address\n");
+ return -EINVAL;
+ }
+ regaddr64 = of_translate_address(op->node, regaddr_p);
+
+ /* get PSC id (0..11, used by port_config) */
+ if (op->dev.platform_data == NULL) {
+ const u32 *psc_nump;
+
+ psc_nump = of_get_property(op->node, "cell-index", NULL);
+ if (!psc_nump || *psc_nump > 11) {
+ dev_err(&op->dev, "mpc512x_psc_spi: Device node %s "
+ "has invalid cell-index property\n",
+ op->node->full_name);
+ return -EINVAL;
+ }
+ id = *psc_nump;
+ }
+
+ return mpc512x_psc_spi_do_probe(&op->dev, (u32) regaddr64, (u32) size64,
+ irq_of_parse_and_map(op->node, 0), id);
+}
+
+static int __exit mpc512x_psc_spi_of_remove(struct of_device *op)
+{
+ return mpc512x_psc_spi_do_remove(&op->dev);
+}
+
+static struct of_device_id mpc512x_psc_spi_of_match[] = {
+ { .compatible = "fsl,mpc5121-psc-spi", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mpc512x_psc_spi_of_match);
+
+static struct of_platform_driver mpc512x_psc_spi_of_driver = {
+ .match_table = mpc512x_psc_spi_of_match,
+ .probe = mpc512x_psc_spi_of_probe,
+ .remove = __exit_p(mpc512x_psc_spi_of_remove),
+ .driver = {
+ .name = "mpc512x-psc-spi",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init mpc512x_psc_spi_init(void)
+{
+ return of_register_platform_driver(&mpc512x_psc_spi_of_driver);
+}
+module_init(mpc512x_psc_spi_init);
+
+static void __exit mpc512x_psc_spi_exit(void)
+{
+ of_unregister_platform_driver(&mpc512x_psc_spi_of_driver);
+}
+module_exit(mpc512x_psc_spi_exit);
+
+MODULE_AUTHOR("John Rigby");
+MODULE_DESCRIPTION("MPC512x PSC SPI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c
index 77d4cc88ede..7104cb739da 100644
--- a/drivers/spi/mpc52xx_psc_spi.c
+++ b/drivers/spi/mpc52xx_psc_spi.c
@@ -472,18 +472,18 @@ static int __init mpc52xx_psc_spi_of_probe(struct of_device *op,
s16 id = -1;
int rc;
- regaddr_p = of_get_address(op->node, 0, &size64, NULL);
+ regaddr_p = of_get_address(op->dev.of_node, 0, &size64, NULL);
if (!regaddr_p) {
dev_err(&op->dev, "Invalid PSC address\n");
return -EINVAL;
}
- regaddr64 = of_translate_address(op->node, regaddr_p);
+ regaddr64 = of_translate_address(op->dev.of_node, regaddr_p);
/* get PSC id (1..6, used by port_config) */
if (op->dev.platform_data == NULL) {
const u32 *psc_nump;
- psc_nump = of_get_property(op->node, "cell-index", NULL);
+ psc_nump = of_get_property(op->dev.of_node, "cell-index", NULL);
if (!psc_nump || *psc_nump > 5) {
dev_err(&op->dev, "Invalid cell-index property\n");
return -EINVAL;
@@ -492,9 +492,10 @@ static int __init mpc52xx_psc_spi_of_probe(struct of_device *op,
}
rc = mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64,
- irq_of_parse_and_map(op->node, 0), id);
+ irq_of_parse_and_map(op->dev.of_node, 0), id);
if (rc == 0)
- of_register_spi_devices(dev_get_drvdata(&op->dev), op->node);
+ of_register_spi_devices(dev_get_drvdata(&op->dev),
+ op->dev.of_node);
return rc;
}
@@ -513,14 +514,12 @@ static const struct of_device_id mpc52xx_psc_spi_of_match[] = {
MODULE_DEVICE_TABLE(of, mpc52xx_psc_spi_of_match);
static struct of_platform_driver mpc52xx_psc_spi_of_driver = {
- .owner = THIS_MODULE,
- .name = "mpc52xx-psc-spi",
- .match_table = mpc52xx_psc_spi_of_match,
.probe = mpc52xx_psc_spi_of_probe,
.remove = __exit_p(mpc52xx_psc_spi_of_remove),
.driver = {
.name = "mpc52xx-psc-spi",
.owner = THIS_MODULE,
+ .of_match_table = mpc52xx_psc_spi_of_match,
},
};
diff --git a/drivers/spi/mpc52xx_spi.c b/drivers/spi/mpc52xx_spi.c
index cd68f1ce5cc..b1a76bff775 100644
--- a/drivers/spi/mpc52xx_spi.c
+++ b/drivers/spi/mpc52xx_spi.c
@@ -403,7 +403,7 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op,
/* MMIO registers */
dev_dbg(&op->dev, "probing mpc5200 SPI device\n");
- regs = of_iomap(op->node, 0);
+ regs = of_iomap(op->dev.of_node, 0);
if (!regs)
return -ENODEV;
@@ -445,11 +445,11 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op,
ms = spi_master_get_devdata(master);
ms->master = master;
ms->regs = regs;
- ms->irq0 = irq_of_parse_and_map(op->node, 0);
- ms->irq1 = irq_of_parse_and_map(op->node, 1);
+ ms->irq0 = irq_of_parse_and_map(op->dev.of_node, 0);
+ ms->irq1 = irq_of_parse_and_map(op->dev.of_node, 1);
ms->state = mpc52xx_spi_fsmstate_idle;
- ms->ipb_freq = mpc5xxx_get_bus_frequency(op->node);
- ms->gpio_cs_count = of_gpio_count(op->node);
+ ms->ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node);
+ ms->gpio_cs_count = of_gpio_count(op->dev.of_node);
if (ms->gpio_cs_count > 0) {
master->num_chipselect = ms->gpio_cs_count;
ms->gpio_cs = kmalloc(ms->gpio_cs_count * sizeof(unsigned int),
@@ -460,7 +460,7 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op,
}
for (i = 0; i < ms->gpio_cs_count; i++) {
- gpio_cs = of_get_gpio(op->node, i);
+ gpio_cs = of_get_gpio(op->dev.of_node, i);
if (gpio_cs < 0) {
dev_err(&op->dev,
"could not parse the gpio field "
@@ -512,7 +512,7 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op,
if (rc)
goto err_register;
- of_register_spi_devices(master, op->node);
+ of_register_spi_devices(master, op->dev.of_node);
dev_info(&ms->master->dev, "registered MPC5200 SPI bus\n");
return rc;
@@ -558,9 +558,11 @@ static const struct of_device_id mpc52xx_spi_match[] __devinitconst = {
MODULE_DEVICE_TABLE(of, mpc52xx_spi_match);
static struct of_platform_driver mpc52xx_spi_of_driver = {
- .owner = THIS_MODULE,
- .name = "mpc52xx-spi",
- .match_table = mpc52xx_spi_match,
+ .driver = {
+ .name = "mpc52xx-spi",
+ .owner = THIS_MODULE,
+ .of_match_table = mpc52xx_spi_match,
+ },
.probe = mpc52xx_spi_probe,
.remove = __exit_p(mpc52xx_spi_remove),
};
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index e0de0d0eede..b3a94ca0a75 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -38,7 +38,7 @@
#include <plat/dma.h>
#include <plat/clock.h>
-
+#include <plat/mcspi.h>
#define OMAP2_MCSPI_MAX_FREQ 48000000
@@ -113,7 +113,7 @@ struct omap2_mcspi_dma {
/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
* cache operations; better heuristics consider wordsize and bitrate.
*/
-#define DMA_MIN_BYTES 8
+#define DMA_MIN_BYTES 160
struct omap2_mcspi {
@@ -229,6 +229,8 @@ static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
l = enable ? OMAP2_MCSPI_CHCTRL_EN : 0;
mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, l);
+ /* Flash post-writes */
+ mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0);
}
static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active)
@@ -303,11 +305,14 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
unsigned int count, c;
unsigned long base, tx_reg, rx_reg;
int word_len, data_type, element_count;
+ int elements;
+ u32 l;
u8 * rx;
const u8 * tx;
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+ l = mcspi_cached_chconf0(spi);
count = xfer->len;
c = count;
@@ -346,8 +351,12 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
}
if (rx != NULL) {
+ elements = element_count - 1;
+ if (l & OMAP2_MCSPI_CHCONF_TURBO)
+ elements--;
+
omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel,
- data_type, element_count - 1, 1,
+ data_type, elements, 1,
OMAP_DMA_SYNC_ELEMENT,
mcspi_dma->dma_rx_sync_dev, 1);
@@ -379,17 +388,42 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
wait_for_completion(&mcspi_dma->dma_rx_completion);
dma_unmap_single(NULL, xfer->rx_dma, count, DMA_FROM_DEVICE);
omap2_mcspi_set_enable(spi, 0);
+
+ if (l & OMAP2_MCSPI_CHCONF_TURBO) {
+
+ if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
+ & OMAP2_MCSPI_CHSTAT_RXS)) {
+ u32 w;
+
+ w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
+ if (word_len <= 8)
+ ((u8 *)xfer->rx_buf)[elements++] = w;
+ else if (word_len <= 16)
+ ((u16 *)xfer->rx_buf)[elements++] = w;
+ else /* word_len <= 32 */
+ ((u32 *)xfer->rx_buf)[elements++] = w;
+ } else {
+ dev_err(&spi->dev,
+ "DMA RX penultimate word empty");
+ count -= (word_len <= 8) ? 2 :
+ (word_len <= 16) ? 4 :
+ /* word_len <= 32 */ 8;
+ omap2_mcspi_set_enable(spi, 1);
+ return count;
+ }
+ }
+
if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
& OMAP2_MCSPI_CHSTAT_RXS)) {
u32 w;
w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
if (word_len <= 8)
- ((u8 *)xfer->rx_buf)[element_count - 1] = w;
+ ((u8 *)xfer->rx_buf)[elements] = w;
else if (word_len <= 16)
- ((u16 *)xfer->rx_buf)[element_count - 1] = w;
+ ((u16 *)xfer->rx_buf)[elements] = w;
else /* word_len <= 32 */
- ((u32 *)xfer->rx_buf)[element_count - 1] = w;
+ ((u32 *)xfer->rx_buf)[elements] = w;
} else {
dev_err(&spi->dev, "DMA RX last word empty");
count -= (word_len <= 8) ? 1 :
@@ -433,7 +467,6 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
word_len = cs->word_len;
l = mcspi_cached_chconf0(spi);
- l &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
/* We store the pre-calculated register addresses on stack to speed
* up the transfer loop. */
@@ -468,11 +501,26 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
dev_err(&spi->dev, "RXS timed out\n");
goto out;
}
- /* prevent last RX_ONLY read from triggering
- * more word i/o: switch to rx+tx
- */
- if (c == 0 && tx == NULL)
- mcspi_write_chconf0(spi, l);
+
+ if (c == 1 && tx == NULL &&
+ (l & OMAP2_MCSPI_CHCONF_TURBO)) {
+ omap2_mcspi_set_enable(spi, 0);
+ *rx++ = __raw_readl(rx_reg);
+#ifdef VERBOSE
+ dev_dbg(&spi->dev, "read-%d %02x\n",
+ word_len, *(rx - 1));
+#endif
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_RXS) < 0) {
+ dev_err(&spi->dev,
+ "RXS timed out\n");
+ goto out;
+ }
+ c = 0;
+ } else if (c == 0 && tx == NULL) {
+ omap2_mcspi_set_enable(spi, 0);
+ }
+
*rx++ = __raw_readl(rx_reg);
#ifdef VERBOSE
dev_dbg(&spi->dev, "read-%d %02x\n",
@@ -506,11 +554,26 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
dev_err(&spi->dev, "RXS timed out\n");
goto out;
}
- /* prevent last RX_ONLY read from triggering
- * more word i/o: switch to rx+tx
- */
- if (c == 0 && tx == NULL)
- mcspi_write_chconf0(spi, l);
+
+ if (c == 2 && tx == NULL &&
+ (l & OMAP2_MCSPI_CHCONF_TURBO)) {
+ omap2_mcspi_set_enable(spi, 0);
+ *rx++ = __raw_readl(rx_reg);
+#ifdef VERBOSE
+ dev_dbg(&spi->dev, "read-%d %04x\n",
+ word_len, *(rx - 1));
+#endif
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_RXS) < 0) {
+ dev_err(&spi->dev,
+ "RXS timed out\n");
+ goto out;
+ }
+ c = 0;
+ } else if (c == 0 && tx == NULL) {
+ omap2_mcspi_set_enable(spi, 0);
+ }
+
*rx++ = __raw_readl(rx_reg);
#ifdef VERBOSE
dev_dbg(&spi->dev, "read-%d %04x\n",
@@ -544,11 +607,26 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
dev_err(&spi->dev, "RXS timed out\n");
goto out;
}
- /* prevent last RX_ONLY read from triggering
- * more word i/o: switch to rx+tx
- */
- if (c == 0 && tx == NULL)
- mcspi_write_chconf0(spi, l);
+
+ if (c == 4 && tx == NULL &&
+ (l & OMAP2_MCSPI_CHCONF_TURBO)) {
+ omap2_mcspi_set_enable(spi, 0);
+ *rx++ = __raw_readl(rx_reg);
+#ifdef VERBOSE
+ dev_dbg(&spi->dev, "read-%d %08x\n",
+ word_len, *(rx - 1));
+#endif
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_RXS) < 0) {
+ dev_err(&spi->dev,
+ "RXS timed out\n");
+ goto out;
+ }
+ c = 0;
+ } else if (c == 0 && tx == NULL) {
+ omap2_mcspi_set_enable(spi, 0);
+ }
+
*rx++ = __raw_readl(rx_reg);
#ifdef VERBOSE
dev_dbg(&spi->dev, "read-%d %08x\n",
@@ -568,6 +646,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
dev_err(&spi->dev, "EOT timed out\n");
}
out:
+ omap2_mcspi_set_enable(spi, 1);
return count - c;
}
@@ -755,7 +834,6 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
struct omap2_mcspi_cs *cs;
mcspi = spi_master_get_devdata(spi->master);
- mcspi_dma = &mcspi->dma_channels[spi->chip_select];
if (spi->controller_state) {
/* Unlink controller state from context save list */
@@ -765,13 +843,17 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
kfree(spi->controller_state);
}
- if (mcspi_dma->dma_rx_channel != -1) {
- omap_free_dma(mcspi_dma->dma_rx_channel);
- mcspi_dma->dma_rx_channel = -1;
- }
- if (mcspi_dma->dma_tx_channel != -1) {
- omap_free_dma(mcspi_dma->dma_tx_channel);
- mcspi_dma->dma_tx_channel = -1;
+ if (spi->chip_select < spi->master->num_chipselect) {
+ mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+
+ if (mcspi_dma->dma_rx_channel != -1) {
+ omap_free_dma(mcspi_dma->dma_rx_channel);
+ mcspi_dma->dma_rx_channel = -1;
+ }
+ if (mcspi_dma->dma_tx_channel != -1) {
+ omap_free_dma(mcspi_dma->dma_tx_channel);
+ mcspi_dma->dma_tx_channel = -1;
+ }
}
}
@@ -797,6 +879,7 @@ static void omap2_mcspi_work(struct work_struct *work)
struct spi_transfer *t = NULL;
int cs_active = 0;
struct omap2_mcspi_cs *cs;
+ struct omap2_mcspi_device_config *cd;
int par_override = 0;
int status = 0;
u32 chconf;
@@ -809,6 +892,7 @@ static void omap2_mcspi_work(struct work_struct *work)
spi = m->spi;
cs = spi->controller_state;
+ cd = spi->controller_data;
omap2_mcspi_set_enable(spi, 1);
list_for_each_entry(t, &m->transfers, transfer_list) {
@@ -832,10 +916,19 @@ static void omap2_mcspi_work(struct work_struct *work)
chconf = mcspi_cached_chconf0(spi);
chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
+ chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
+
if (t->tx_buf == NULL)
chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
else if (t->rx_buf == NULL)
chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
+
+ if (cd && cd->turbo_mode && t->tx_buf == NULL) {
+ /* Turbo mode is for more than one word */
+ if (t->len > ((cs->word_len + 7) >> 3))
+ chconf |= OMAP2_MCSPI_CHCONF_TURBO;
+ }
+
mcspi_write_chconf0(spi, chconf);
if (t->len) {
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 36828358a4d..e76b1afafe0 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -36,8 +36,7 @@
#include <asm/delay.h>
#include <mach/dma.h>
-#include <mach/regs-ssp.h>
-#include <mach/ssp.h>
+#include <plat/ssp.h>
#include <mach/pxa2xx_spi.h>
MODULE_AUTHOR("Stephen Street");
@@ -1318,14 +1317,14 @@ static int setup(struct spi_device *spi)
/* NOTE: PXA25x_SSP _could_ use external clocking ... */
if (drv_data->ssp_type != PXA25x_SSP)
dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
- clk_get_rate(ssp->clk)
- / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)),
- chip->enable_dma ? "DMA" : "PIO");
+ clk_get_rate(ssp->clk)
+ / (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)),
+ chip->enable_dma ? "DMA" : "PIO");
else
dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
- clk_get_rate(ssp->clk) / 2
- / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)),
- chip->enable_dma ? "DMA" : "PIO");
+ clk_get_rate(ssp->clk) / 2
+ / (1 + ((chip->cr0 & SSCR0_SCR(0x0ff)) >> 8)),
+ chip->enable_dma ? "DMA" : "PIO");
if (spi->bits_per_word <= 8) {
chip->n_bytes = 1;
@@ -1466,7 +1465,7 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev)
platform_info = dev->platform_data;
- ssp = ssp_request(pdev->id, pdev->name);
+ ssp = pxa_ssp_request(pdev->id, pdev->name);
if (ssp == NULL) {
dev_err(&pdev->dev, "failed to request SSP%d\n", pdev->id);
return -ENODEV;
@@ -1476,7 +1475,7 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev)
master = spi_alloc_master(dev, sizeof(struct driver_data) + 16);
if (!master) {
dev_err(&pdev->dev, "cannot alloc spi_master\n");
- ssp_free(ssp);
+ pxa_ssp_free(ssp);
return -ENOMEM;
}
drv_data = spi_master_get_devdata(master);
@@ -1558,7 +1557,7 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev)
write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) |
SSCR1_TxTresh(TX_THRESH_DFLT),
drv_data->ioaddr);
- write_SSCR0(SSCR0_SerClkDiv(2)
+ write_SSCR0(SSCR0_SCR(2)
| SSCR0_Motorola
| SSCR0_DataSize(8),
drv_data->ioaddr);
@@ -1605,7 +1604,7 @@ out_error_irq_alloc:
out_error_master_alloc:
spi_master_put(master);
- ssp_free(ssp);
+ pxa_ssp_free(ssp);
return status;
}
@@ -1649,7 +1648,7 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
free_irq(ssp->irq, drv_data);
/* Release SSP */
- ssp_free(ssp);
+ pxa_ssp_free(ssp);
/* Disconnect from the SPI framework */
spi_unregister_master(drv_data->master);
diff --git a/drivers/spi/spi_bitbang_txrx.h b/drivers/spi/spi_bitbang_txrx.h
new file mode 100644
index 00000000000..fc033bbf918
--- /dev/null
+++ b/drivers/spi/spi_bitbang_txrx.h
@@ -0,0 +1,93 @@
+/*
+ * Mix this utility code with some glue code to get one of several types of
+ * simple SPI master driver. Two do polled word-at-a-time I/O:
+ *
+ * - GPIO/parport bitbangers. Provide chipselect() and txrx_word[](),
+ * expanding the per-word routines from the inline templates below.
+ *
+ * - Drivers for controllers resembling bare shift registers. Provide
+ * chipselect() and txrx_word[](), with custom setup()/cleanup() methods
+ * that use your controller's clock and chipselect registers.
+ *
+ * Some hardware works well with requests at spi_transfer scope:
+ *
+ * - Drivers leveraging smarter hardware, with fifos or DMA; or for half
+ * duplex (MicroWire) controllers. Provide chipselect() and txrx_bufs(),
+ * and custom setup()/cleanup() methods.
+ */
+
+/*
+ * The code that knows what GPIO pins do what should have declared four
+ * functions, ideally as inlines, before including this header:
+ *
+ * void setsck(struct spi_device *, int is_on);
+ * void setmosi(struct spi_device *, int is_on);
+ * int getmiso(struct spi_device *);
+ * void spidelay(unsigned);
+ *
+ * setsck()'s is_on parameter is a zero/nonzero boolean.
+ *
+ * setmosi()'s is_on parameter is a zero/nonzero boolean.
+ *
+ * getmiso() is required to return 0 or 1 only. Any other value is invalid
+ * and will result in improper operation.
+ *
+ * A non-inlined routine would call bitbang_txrx_*() routines. The
+ * main loop could easily compile down to a handful of instructions,
+ * especially if the delay is a NOP (to run at peak speed).
+ *
+ * Since this is software, the timings may not be exactly what your board's
+ * chips need ... there may be several reasons you'd need to tweak timings
+ * in these routines, not just make to make it faster or slower to match a
+ * particular CPU clock rate.
+ */
+
+static inline u32
+bitbang_txrx_be_cpha0(struct spi_device *spi,
+ unsigned nsecs, unsigned cpol,
+ u32 word, u8 bits)
+{
+ /* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */
+
+ /* clock starts at inactive polarity */
+ for (word <<= (32 - bits); likely(bits); bits--) {
+
+ /* setup MSB (to slave) on trailing edge */
+ setmosi(spi, word & (1 << 31));
+ spidelay(nsecs); /* T(setup) */
+
+ setsck(spi, !cpol);
+ spidelay(nsecs);
+
+ /* sample MSB (from slave) on leading edge */
+ word <<= 1;
+ word |= getmiso(spi);
+ setsck(spi, cpol);
+ }
+ return word;
+}
+
+static inline u32
+bitbang_txrx_be_cpha1(struct spi_device *spi,
+ unsigned nsecs, unsigned cpol,
+ u32 word, u8 bits)
+{
+ /* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */
+
+ /* clock starts at inactive polarity */
+ for (word <<= (32 - bits); likely(bits); bits--) {
+
+ /* setup MSB (to slave) on leading edge */
+ setsck(spi, !cpol);
+ setmosi(spi, word & (1 << 31));
+ spidelay(nsecs); /* T(setup) */
+
+ setsck(spi, cpol);
+ spidelay(nsecs);
+
+ /* sample MSB (from slave) on trailing edge */
+ word <<= 1;
+ word |= getmiso(spi);
+ }
+ return word;
+}
diff --git a/drivers/spi/spi_butterfly.c b/drivers/spi/spi_butterfly.c
index c2184866fa9..8b528128111 100644
--- a/drivers/spi/spi_butterfly.c
+++ b/drivers/spi/spi_butterfly.c
@@ -149,8 +149,7 @@ static void butterfly_chipselect(struct spi_device *spi, int value)
#define spidelay(X) do{}while(0)
//#define spidelay ndelay
-#define EXPAND_BITBANG_TXRX
-#include <linux/spi/spi_bitbang.h>
+#include "spi_bitbang_txrx.h"
static u32
butterfly_txrx_word_mode0(struct spi_device *spi,
diff --git a/drivers/spi/spi_gpio.c b/drivers/spi/spi_gpio.c
index 26bd03e6185..7edbd5807e0 100644
--- a/drivers/spi/spi_gpio.c
+++ b/drivers/spi/spi_gpio.c
@@ -127,8 +127,7 @@ static inline int getmiso(const struct spi_device *spi)
*/
#define spidelay(nsecs) do {} while (0)
-#define EXPAND_BITBANG_TXRX
-#include <linux/spi/spi_bitbang.h>
+#include "spi_bitbang_txrx.h"
/*
* These functions can leverage inline expansion of GPIO calls to shrink
diff --git a/drivers/spi/spi_lm70llp.c b/drivers/spi/spi_lm70llp.c
index 568c781ad91..86fb7b5993d 100644
--- a/drivers/spi/spi_lm70llp.c
+++ b/drivers/spi/spi_lm70llp.c
@@ -174,8 +174,7 @@ static inline int getmiso(struct spi_device *s)
}
/*--------------------------------------------------------------------*/
-#define EXPAND_BITBANG_TXRX 1
-#include <linux/spi/spi_bitbang.h>
+#include "spi_bitbang_txrx.h"
static void lm70_chipselect(struct spi_device *spi, int value)
{
diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_mpc8xxx.c
index 14d05231650..ffa111a7e9d 100644
--- a/drivers/spi/spi_mpc8xxx.c
+++ b/drivers/spi/spi_mpc8xxx.c
@@ -241,7 +241,6 @@ static void mpc8xxx_spi_change_mode(struct spi_device *spi)
/* Turn off SPI unit prior changing mode */
mpc8xxx_spi_write_reg(mode, cs->hw_mode & ~SPMODE_ENABLE);
- mpc8xxx_spi_write_reg(mode, cs->hw_mode);
/* When in CPM mode, we need to reinit tx and rx. */
if (mspi->flags & SPI_CPM_MODE) {
@@ -258,7 +257,7 @@ static void mpc8xxx_spi_change_mode(struct spi_device *spi)
}
}
}
-
+ mpc8xxx_spi_write_reg(mode, cs->hw_mode);
local_irq_restore(flags);
}
@@ -287,36 +286,12 @@ static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value)
}
}
-static
-int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
+static int
+mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs,
+ struct spi_device *spi,
+ struct mpc8xxx_spi *mpc8xxx_spi,
+ int bits_per_word)
{
- struct mpc8xxx_spi *mpc8xxx_spi;
- u8 bits_per_word, pm;
- u32 hz;
- struct spi_mpc8xxx_cs *cs = spi->controller_state;
-
- mpc8xxx_spi = spi_master_get_devdata(spi->master);
-
- if (t) {
- bits_per_word = t->bits_per_word;
- hz = t->speed_hz;
- } else {
- bits_per_word = 0;
- hz = 0;
- }
-
- /* spi_transfer level calls that work per-word */
- if (!bits_per_word)
- bits_per_word = spi->bits_per_word;
-
- /* Make sure its a bit width we support [4..16, 32] */
- if ((bits_per_word < 4)
- || ((bits_per_word > 16) && (bits_per_word != 32)))
- return -EINVAL;
-
- if (!hz)
- hz = spi->max_speed_hz;
-
cs->rx_shift = 0;
cs->tx_shift = 0;
if (bits_per_word <= 8) {
@@ -340,19 +315,82 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
return -EINVAL;
if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE &&
- spi->mode & SPI_LSB_FIRST) {
+ spi->mode & SPI_LSB_FIRST) {
cs->tx_shift = 0;
if (bits_per_word <= 8)
cs->rx_shift = 8;
else
cs->rx_shift = 0;
}
-
mpc8xxx_spi->rx_shift = cs->rx_shift;
mpc8xxx_spi->tx_shift = cs->tx_shift;
mpc8xxx_spi->get_rx = cs->get_rx;
mpc8xxx_spi->get_tx = cs->get_tx;
+ return bits_per_word;
+}
+
+static int
+mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs,
+ struct spi_device *spi,
+ int bits_per_word)
+{
+ /* QE uses Little Endian for words > 8
+ * so transform all words > 8 into 8 bits
+ * Unfortnatly that doesn't work for LSB so
+ * reject these for now */
+ /* Note: 32 bits word, LSB works iff
+ * tfcr/rfcr is set to CPMFCR_GBL */
+ if (spi->mode & SPI_LSB_FIRST &&
+ bits_per_word > 8)
+ return -EINVAL;
+ if (bits_per_word > 8)
+ return 8; /* pretend its 8 bits */
+ return bits_per_word;
+}
+
+static
+int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct mpc8xxx_spi *mpc8xxx_spi;
+ int bits_per_word;
+ u8 pm;
+ u32 hz;
+ struct spi_mpc8xxx_cs *cs = spi->controller_state;
+
+ mpc8xxx_spi = spi_master_get_devdata(spi->master);
+
+ if (t) {
+ bits_per_word = t->bits_per_word;
+ hz = t->speed_hz;
+ } else {
+ bits_per_word = 0;
+ hz = 0;
+ }
+
+ /* spi_transfer level calls that work per-word */
+ if (!bits_per_word)
+ bits_per_word = spi->bits_per_word;
+
+ /* Make sure its a bit width we support [4..16, 32] */
+ if ((bits_per_word < 4)
+ || ((bits_per_word > 16) && (bits_per_word != 32)))
+ return -EINVAL;
+
+ if (!hz)
+ hz = spi->max_speed_hz;
+
+ if (!(mpc8xxx_spi->flags & SPI_CPM_MODE))
+ bits_per_word = mspi_apply_cpu_mode_quirks(cs, spi,
+ mpc8xxx_spi,
+ bits_per_word);
+ else if (mpc8xxx_spi->flags & SPI_QE)
+ bits_per_word = mspi_apply_qe_mode_quirks(cs, spi,
+ bits_per_word);
+
+ if (bits_per_word < 0)
+ return bits_per_word;
+
if (bits_per_word == 32)
bits_per_word = 0;
else
@@ -438,7 +476,7 @@ static int mpc8xxx_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
dev_err(dev, "unable to map tx dma\n");
return -ENOMEM;
}
- } else {
+ } else if (t->tx_buf) {
mspi->tx_dma = t->tx_dma;
}
@@ -449,7 +487,7 @@ static int mpc8xxx_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
dev_err(dev, "unable to map rx dma\n");
goto err_rx_dma;
}
- } else {
+ } else if (t->rx_buf) {
mspi->rx_dma = t->rx_dma;
}
@@ -477,7 +515,7 @@ static void mpc8xxx_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
if (mspi->map_tx_dma)
dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
- if (mspi->map_tx_dma)
+ if (mspi->map_rx_dma)
dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
mspi->xfer_in_progress = NULL;
}
@@ -640,7 +678,7 @@ static int mpc8xxx_spi_setup(struct spi_device *spi)
}
mpc8xxx_spi = spi_master_get_devdata(spi->master);
- hw_mode = cs->hw_mode; /* Save orginal settings */
+ hw_mode = cs->hw_mode; /* Save original settings */
cs->hw_mode = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->mode);
/* mask out bits we are going to set */
cs->hw_mode &= ~(SPMODE_CP_BEGIN_EDGECLK | SPMODE_CI_INACTIVEHIGH
@@ -797,7 +835,7 @@ static void mpc8xxx_spi_free_dummy_rx(void)
static unsigned long mpc8xxx_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
{
struct device *dev = mspi->dev;
- struct device_node *np = dev_archdata_get_node(&dev->archdata);
+ struct device_node *np = dev->of_node;
const u32 *iprop;
int size;
unsigned long spi_base_ofs;
@@ -851,7 +889,7 @@ static unsigned long mpc8xxx_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi)
{
struct device *dev = mspi->dev;
- struct device_node *np = dev_archdata_get_node(&dev->archdata);
+ struct device_node *np = dev->of_node;
const u32 *iprop;
int size;
unsigned long pram_ofs;
@@ -1123,7 +1161,7 @@ static void mpc8xxx_spi_cs_control(struct spi_device *spi, bool on)
static int of_mpc8xxx_spi_get_chipselects(struct device *dev)
{
- struct device_node *np = dev_archdata_get_node(&dev->archdata);
+ struct device_node *np = dev->of_node;
struct fsl_spi_platform_data *pdata = dev->platform_data;
struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
unsigned int ngpios;
@@ -1224,7 +1262,7 @@ static int __devinit of_mpc8xxx_spi_probe(struct of_device *ofdev,
const struct of_device_id *ofid)
{
struct device *dev = &ofdev->dev;
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct mpc8xxx_spi_probe_info *pinfo;
struct fsl_spi_platform_data *pdata;
struct spi_master *master;
@@ -1312,8 +1350,11 @@ static const struct of_device_id of_mpc8xxx_spi_match[] = {
MODULE_DEVICE_TABLE(of, of_mpc8xxx_spi_match);
static struct of_platform_driver of_mpc8xxx_spi_driver = {
- .name = "mpc8xxx_spi",
- .match_table = of_mpc8xxx_spi_match,
+ .driver = {
+ .name = "mpc8xxx_spi",
+ .owner = THIS_MODULE,
+ .of_match_table = of_mpc8xxx_spi_match,
+ },
.probe = of_mpc8xxx_spi_probe,
.remove = __devexit_p(of_mpc8xxx_spi_remove),
};
diff --git a/drivers/spi/spi_ppc4xx.c b/drivers/spi/spi_ppc4xx.c
index 7cb5ff37f6e..19c0b3b34fc 100644
--- a/drivers/spi/spi_ppc4xx.c
+++ b/drivers/spi/spi_ppc4xx.c
@@ -587,12 +587,12 @@ static const struct of_device_id spi_ppc4xx_of_match[] = {
MODULE_DEVICE_TABLE(of, spi_ppc4xx_of_match);
static struct of_platform_driver spi_ppc4xx_of_driver = {
- .match_table = spi_ppc4xx_of_match,
.probe = spi_ppc4xx_of_probe,
.remove = __exit_p(spi_ppc4xx_of_remove),
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
+ .of_match_table = spi_ppc4xx_of_match,
},
};
diff --git a/drivers/spi/spi_s3c24xx_gpio.c b/drivers/spi/spi_s3c24xx_gpio.c
index bbf9371cd28..8979a75dbd7 100644
--- a/drivers/spi/spi_s3c24xx_gpio.c
+++ b/drivers/spi/spi_s3c24xx_gpio.c
@@ -58,8 +58,7 @@ static inline u32 getmiso(struct spi_device *dev)
#define spidelay(x) ndelay(x)
-#define EXPAND_BITBANG_TXRX
-#include <linux/spi/spi_bitbang.h>
+#include "spi_bitbang_txrx.h"
static u32 s3c2410_spigpio_txrx_mode0(struct spi_device *spi,
diff --git a/drivers/spi/spi_sh_sci.c b/drivers/spi/spi_sh_sci.c
index a65c12ffa73..a511be7961a 100644
--- a/drivers/spi/spi_sh_sci.c
+++ b/drivers/spi/spi_sh_sci.c
@@ -78,8 +78,7 @@ static inline u32 getmiso(struct spi_device *dev)
#define spidelay(x) ndelay(x)
-#define EXPAND_BITBANG_TXRX
-#include <linux/spi/spi_bitbang.h>
+#include "spi_bitbang_txrx.h"
static u32 sh_sci_spi_txrx_mode0(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits)
diff --git a/drivers/spi/xilinx_spi_of.c b/drivers/spi/xilinx_spi_of.c
index 748d33a76d2..4654805b08d 100644
--- a/drivers/spi/xilinx_spi_of.c
+++ b/drivers/spi/xilinx_spi_of.c
@@ -48,13 +48,13 @@ static int __devinit xilinx_spi_of_probe(struct of_device *ofdev,
const u32 *prop;
int len;
- rc = of_address_to_resource(ofdev->node, 0, &r_mem);
+ rc = of_address_to_resource(ofdev->dev.of_node, 0, &r_mem);
if (rc) {
dev_warn(&ofdev->dev, "invalid address\n");
return rc;
}
- rc = of_irq_to_resource(ofdev->node, 0, &r_irq);
+ rc = of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq);
if (rc == NO_IRQ) {
dev_warn(&ofdev->dev, "no IRQ found\n");
return -ENODEV;
@@ -67,7 +67,7 @@ static int __devinit xilinx_spi_of_probe(struct of_device *ofdev,
return -ENOMEM;
/* number of slave select bits is required */
- prop = of_get_property(ofdev->node, "xlnx,num-ss-bits", &len);
+ prop = of_get_property(ofdev->dev.of_node, "xlnx,num-ss-bits", &len);
if (!prop || len < sizeof(*prop)) {
dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n");
return -EINVAL;
@@ -81,7 +81,7 @@ static int __devinit xilinx_spi_of_probe(struct of_device *ofdev,
dev_set_drvdata(&ofdev->dev, master);
/* Add any subnodes on the SPI bus */
- of_register_spi_devices(master, ofdev->node);
+ of_register_spi_devices(master, ofdev->dev.of_node);
return 0;
}
@@ -109,12 +109,12 @@ static const struct of_device_id xilinx_spi_of_match[] = {
MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
static struct of_platform_driver xilinx_spi_of_driver = {
- .match_table = xilinx_spi_of_match,
.probe = xilinx_spi_of_probe,
.remove = __exit_p(xilinx_spi_of_remove),
.driver = {
.name = "xilinx-xps-spi",
.owner = THIS_MODULE,
+ .of_match_table = xilinx_spi_of_match,
},
};
diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
index 9681536163c..59ae76bace1 100644
--- a/drivers/ssb/driver_chipcommon.c
+++ b/drivers/ssb/driver_chipcommon.c
@@ -233,6 +233,8 @@ void ssb_chipcommon_init(struct ssb_chipcommon *cc)
{
if (!cc->dev)
return; /* We don't have a ChipCommon */
+ if (cc->dev->id.revision >= 11)
+ cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT);
ssb_pmu_init(cc);
chipco_powercontrol_init(cc);
ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST);
@@ -370,6 +372,7 @@ u32 ssb_chipco_gpio_control(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
return chipco_write32_masked(cc, SSB_CHIPCO_GPIOCTL, mask, value);
}
+EXPORT_SYMBOL(ssb_chipco_gpio_control);
u32 ssb_chipco_gpio_intmask(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 80ff7d9e60d..51275aac5b3 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -490,7 +490,7 @@ static int ssb_devices_register(struct ssb_bus *bus)
break;
case SSB_BUSTYPE_PCMCIA:
#ifdef CONFIG_SSB_PCMCIAHOST
- sdev->irq = bus->host_pcmcia->irq.AssignedIRQ;
+ sdev->irq = bus->host_pcmcia->irq;
dev->parent = &bus->host_pcmcia->dev;
#endif
break;
@@ -834,6 +834,9 @@ int ssb_bus_pcibus_register(struct ssb_bus *bus,
if (!err) {
ssb_printk(KERN_INFO PFX "Sonics Silicon Backplane found on "
"PCI device %s\n", dev_name(&host_pci->dev));
+ } else {
+ ssb_printk(KERN_ERR PFX "Failed to register PCI version"
+ " of SSB with error %d\n", err);
}
return err;
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index a8dbb06623c..989e2752cc3 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -168,7 +168,7 @@ err_pci:
}
/* Get the word-offset for a SSB_SPROM_XXX define. */
-#define SPOFF(offset) (((offset) - SSB_SPROM_BASE) / sizeof(u16))
+#define SPOFF(offset) ((offset) / sizeof(u16))
/* Helper to extract some _offset, which is one of the SSB_SPROM_XXX defines. */
#define SPEX16(_outvar, _offset, _mask, _shift) \
out->_outvar = ((in[SPOFF(_offset)] & (_mask)) >> (_shift))
@@ -254,7 +254,7 @@ static int sprom_do_read(struct ssb_bus *bus, u16 *sprom)
int i;
for (i = 0; i < bus->sprom_size; i++)
- sprom[i] = ioread16(bus->mmio + SSB_SPROM_BASE + (i * 2));
+ sprom[i] = ioread16(bus->mmio + bus->sprom_offset + (i * 2));
return 0;
}
@@ -285,7 +285,7 @@ static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom)
ssb_printk("75%%");
else if (i % 2)
ssb_printk(".");
- writew(sprom[i], bus->mmio + SSB_SPROM_BASE + (i * 2));
+ writew(sprom[i], bus->mmio + bus->sprom_offset + (i * 2));
mmiowb();
msleep(20);
}
@@ -621,6 +621,14 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
int err = -ENOMEM;
u16 *buf;
+ if (!ssb_is_sprom_available(bus)) {
+ ssb_printk(KERN_ERR PFX "No SPROM available!\n");
+ return -ENODEV;
+ }
+
+ bus->sprom_offset = (bus->chipco.dev->id.revision < 31) ?
+ SSB_SPROM_BASE1 : SSB_SPROM_BASE31;
+
buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL);
if (!buf)
goto out;
diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c
index f2f920fef10..007bc3a0348 100644
--- a/drivers/ssb/sprom.c
+++ b/drivers/ssb/sprom.c
@@ -176,3 +176,17 @@ const struct ssb_sprom *ssb_get_fallback_sprom(void)
{
return fallback_sprom;
}
+
+/* http://bcm-v4.sipsolutions.net/802.11/IsSpromAvailable */
+bool ssb_is_sprom_available(struct ssb_bus *bus)
+{
+ /* status register only exists on chipcomon rev >= 11 and we need check
+ for >= 31 only */
+ /* this routine differs from specs as we do not access SPROM directly
+ on PCMCIA */
+ if (bus->bustype == SSB_BUSTYPE_PCI &&
+ bus->chipco.dev->id.revision >= 31)
+ return bus->chipco.capabilities & SSB_CHIPCO_CAP_SPROM;
+
+ return true;
+}
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 7696a664f8a..b5c3b301303 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -49,6 +49,8 @@ source "drivers/staging/go7007/Kconfig"
source "drivers/staging/cx25821/Kconfig"
+source "drivers/staging/tm6000/Kconfig"
+
source "drivers/staging/usbip/Kconfig"
source "drivers/staging/winbond/Kconfig"
@@ -57,8 +59,6 @@ source "drivers/staging/wlan-ng/Kconfig"
source "drivers/staging/echo/Kconfig"
-source "drivers/staging/poch/Kconfig"
-
source "drivers/staging/otus/Kconfig"
source "drivers/staging/rt2860/Kconfig"
@@ -111,6 +111,8 @@ source "drivers/staging/vme/Kconfig"
source "drivers/staging/rar_register/Kconfig"
+source "drivers/staging/memrar/Kconfig"
+
source "drivers/staging/sep/Kconfig"
source "drivers/staging/iio/Kconfig"
@@ -125,19 +127,19 @@ source "drivers/staging/batman-adv/Kconfig"
source "drivers/staging/samsung-laptop/Kconfig"
-source "drivers/staging/strip/Kconfig"
+source "drivers/staging/sm7xx/Kconfig"
-source "drivers/staging/arlan/Kconfig"
+source "drivers/staging/dt3155/Kconfig"
-source "drivers/staging/wavelan/Kconfig"
+source "drivers/staging/dt3155v4l/Kconfig"
-source "drivers/staging/netwave/Kconfig"
+source "drivers/staging/crystalhd/Kconfig"
-source "drivers/staging/sm7xx/Kconfig"
+source "drivers/staging/cxt1e1/Kconfig"
-source "drivers/staging/dt3155/Kconfig"
+source "drivers/staging/ti-st/Kconfig"
-source "drivers/staging/crystalhd/Kconfig"
+source "drivers/staging/adis16255/Kconfig"
endif # !STAGING_EXCLUDE_BUILD
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index ea2e70e2fed..e330dd5e843 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -7,11 +7,11 @@ obj-$(CONFIG_ET131X) += et131x/
obj-$(CONFIG_SLICOSS) += slicoss/
obj-$(CONFIG_VIDEO_GO7007) += go7007/
obj-$(CONFIG_VIDEO_CX25821) += cx25821/
+obj-$(CONFIG_VIDEO_TM6000) += tm6000/
obj-$(CONFIG_USB_IP_COMMON) += usbip/
obj-$(CONFIG_W35UND) += winbond/
obj-$(CONFIG_PRISM2_USB) += wlan-ng/
obj-$(CONFIG_ECHO) += echo/
-obj-$(CONFIG_POCH) += poch/
obj-$(CONFIG_OTUS) += otus/
obj-$(CONFIG_RT2860) += rt2860/
obj-$(CONFIG_RT2870) += rt2870/
@@ -36,6 +36,7 @@ obj-$(CONFIG_FB_UDL) += udlfb/
obj-$(CONFIG_HYPERV) += hv/
obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_RAR_REGISTER) += rar_register/
+obj-$(CONFIG_MRST_RAR_HANDLER) += memrar/
obj-$(CONFIG_DX_SEP) += sep/
obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_RAMZSWAP) += ramzswap/
@@ -43,11 +44,10 @@ obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/
obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/
obj-$(CONFIG_BATMAN_ADV) += batman-adv/
obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop/
-obj-$(CONFIG_STRIP) += strip/
-obj-$(CONFIG_ARLAN) += arlan/
-obj-$(CONFIG_WAVELAN) += wavelan/
-obj-$(CONFIG_PCMCIA_WAVELAN) += wavelan/
-obj-$(CONFIG_PCMCIA_NETWAVE) += netwave/
obj-$(CONFIG_FB_SM7XX) += sm7xx/
obj-$(CONFIG_DT3155) += dt3155/
+obj-$(CONFIG_VIDEO_DT3155) += dt3155v4l/
obj-$(CONFIG_CRYSTALHD) += crystalhd/
+obj-$(CONFIG_CXT1E1) += cxt1e1/
+obj-$(CONFIG_TI_ST) += ti-st/
+obj-$(CONFIG_ADIS16255) += adis16255/
diff --git a/drivers/staging/adis16255/Kconfig b/drivers/staging/adis16255/Kconfig
new file mode 100644
index 00000000000..a642be66ade
--- /dev/null
+++ b/drivers/staging/adis16255/Kconfig
@@ -0,0 +1,11 @@
+config ADIS16255
+ tristate "Ananlog Devices ADIS16250/16255"
+ depends on SPI && SYSFS
+ ---help---
+ If you say yes here you get support for the Analog Devices
+ ADIS16250/16255 Low Power Gyroscope. The driver exposes
+ orientation and gyroscope value, as well as sample rate
+ to the sysfs.
+
+ This driver can also be built as a module. If so, the module
+ will be called adis16255.
diff --git a/drivers/staging/adis16255/Makefile b/drivers/staging/adis16255/Makefile
new file mode 100644
index 00000000000..8c3908106bf
--- /dev/null
+++ b/drivers/staging/adis16255/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_ADIS16255) += adis16255.o
diff --git a/drivers/staging/adis16255/adis16255.c b/drivers/staging/adis16255/adis16255.c
new file mode 100644
index 00000000000..1ba11f00b2e
--- /dev/null
+++ b/drivers/staging/adis16255/adis16255.c
@@ -0,0 +1,466 @@
+/*
+ * Analog Devices ADIS16250/ADIS16255 Low Power Gyroscope
+ *
+ * Written by: Matthias Brugger <m_brugger@web.de>
+ *
+ * Copyright (C) 2010 Fraunhofer Institute for Integrated Circuits
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ * The driver just has a bare interface to the sysfs (sample rate in Hz,
+ * orientation (x, y, z) and gyroscope data in °/sec.
+ *
+ * It should be added to iio subsystem when this has left staging.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <linux/interrupt.h>
+#include <linux/sysfs.h>
+#include <linux/stat.h>
+#include <linux/delay.h>
+
+#include <linux/gpio.h>
+
+#include <linux/spi/spi.h>
+#include <linux/workqueue.h>
+
+#include "adis16255.h"
+
+#define ADIS_STATUS 0x3d
+#define ADIS_SMPL_PRD_MSB 0x37
+#define ADIS_SMPL_PRD_LSB 0x36
+#define ADIS_MSC_CTRL_MSB 0x35
+#define ADIS_MSC_CTRL_LSB 0x34
+#define ADIS_GPIO_CTRL 0x33
+#define ADIS_ALM_SMPL1 0x25
+#define ADIS_ALM_MAG1 0x21
+#define ADIS_GYRO_SCALE 0x17
+#define ADIS_GYRO_OUT 0x05
+#define ADIS_SUPPLY_OUT 0x03
+#define ADIS_ENDURANCE 0x01
+
+/*
+ * data structure for every sensor
+ *
+ * @dev: Driver model representation of the device.
+ * @spi: Pointer to the spi device which will manage i/o to spi bus.
+ * @data: Last read data from device.
+ * @irq_adis: GPIO Number of IRQ signal
+ * @irq: irq line manage by kernel
+ * @negative: indicates if sensor is upside down (negative == 1)
+ * @direction: indicates axis (x, y, z) the sensor is meassuring
+ */
+struct spi_adis16255_data {
+ struct device dev;
+ struct spi_device *spi;
+ s16 data;
+ int irq;
+ u8 negative;
+ char direction;
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int spi_adis16255_read_data(struct spi_adis16255_data *spiadis,
+ u8 adr,
+ u8 *rbuf)
+{
+ struct spi_device *spi = spiadis->spi;
+ struct spi_message msg;
+ struct spi_transfer xfer1, xfer2;
+ u8 *buf, *rx;
+ int ret;
+
+ buf = kzalloc(4, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ rx = kzalloc(4, GFP_KERNEL);
+ if (rx == NULL) {
+ ret = -ENOMEM;
+ goto err_buf;
+ }
+
+ buf[0] = adr;
+
+ spi_message_init(&msg);
+ memset(&xfer1, 0, sizeof(xfer1));
+ memset(&xfer2, 0, sizeof(xfer2));
+
+ xfer1.tx_buf = buf;
+ xfer1.rx_buf = buf + 2;
+ xfer1.len = 2;
+ xfer1.delay_usecs = 9;
+
+ xfer2.tx_buf = rx + 2;
+ xfer2.rx_buf = rx;
+ xfer2.len = 2;
+
+ spi_message_add_tail(&xfer1, &msg);
+ spi_message_add_tail(&xfer2, &msg);
+
+ ret = spi_sync(spi, &msg);
+ if (ret == 0) {
+ rbuf[0] = rx[0];
+ rbuf[1] = rx[1];
+ }
+
+ kfree(rx);
+err_buf:
+ kfree(buf);
+
+ return ret;
+}
+
+static int spi_adis16255_write_data(struct spi_adis16255_data *spiadis,
+ u8 adr1,
+ u8 adr2,
+ u8 *wbuf)
+{
+ struct spi_device *spi = spiadis->spi;
+ struct spi_message msg;
+ struct spi_transfer xfer1, xfer2;
+ u8 *buf, *rx;
+ int ret;
+
+ buf = kmalloc(4, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ rx = kzalloc(4, GFP_KERNEL);
+ if (rx == NULL) {
+ ret = -ENOMEM;
+ goto err_buf;
+ }
+
+ spi_message_init(&msg);
+ memset(&xfer1, 0, sizeof(xfer1));
+ memset(&xfer2, 0, sizeof(xfer2));
+
+ buf[0] = adr1 | 0x80;
+ buf[1] = *wbuf;
+
+ buf[2] = adr2 | 0x80;
+ buf[3] = *(wbuf + 1);
+
+ xfer1.tx_buf = buf;
+ xfer1.rx_buf = rx;
+ xfer1.len = 2;
+ xfer1.delay_usecs = 9;
+
+ xfer2.tx_buf = buf+2;
+ xfer2.rx_buf = rx+2;
+ xfer2.len = 2;
+
+ spi_message_add_tail(&xfer1, &msg);
+ spi_message_add_tail(&xfer2, &msg);
+
+ ret = spi_sync(spi, &msg);
+ if (ret != 0)
+ dev_warn(&spi->dev, "write data to %#x %#x failed\n",
+ buf[0], buf[2]);
+
+ kfree(rx);
+err_buf:
+ kfree(buf);
+ return ret;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static irqreturn_t adis_irq_thread(int irq, void *dev_id)
+{
+ struct spi_adis16255_data *spiadis = dev_id;
+ int status;
+ u16 value = 0;
+
+ status = spi_adis16255_read_data(spiadis, ADIS_GYRO_OUT, (u8 *)&value);
+ if (status != 0) {
+ dev_warn(&spiadis->spi->dev, "SPI FAILED\n");
+ goto exit;
+ }
+
+ /* perform on new data only... */
+ if (value & 0x8000) {
+ /* delete error and new data bit */
+ value = value & 0x3fff;
+ /* set negative value */
+ if (value & 0x2000)
+ value = value | 0xe000;
+
+ if (likely(spiadis->negative))
+ value = -value;
+
+ spiadis->data = (s16) value;
+ }
+
+exit:
+ return IRQ_HANDLED;
+}
+
+/*-------------------------------------------------------------------------*/
+
+ssize_t adis16255_show_data(struct device *device,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct spi_adis16255_data *spiadis = dev_get_drvdata(device);
+ return snprintf(buf, PAGE_SIZE, "%d\n", spiadis->data);
+}
+DEVICE_ATTR(data, S_IRUGO , adis16255_show_data, NULL);
+
+ssize_t adis16255_show_direction(struct device *device,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct spi_adis16255_data *spiadis = dev_get_drvdata(device);
+ return snprintf(buf, PAGE_SIZE, "%c\n", spiadis->direction);
+}
+DEVICE_ATTR(direction, S_IRUGO , adis16255_show_direction, NULL);
+
+ssize_t adis16255_show_sample_rate(struct device *device,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct spi_adis16255_data *spiadis = dev_get_drvdata(device);
+ int status = 0;
+ u16 value = 0;
+ int ts = 0;
+
+ status = spi_adis16255_read_data(spiadis, ADIS_SMPL_PRD_MSB,
+ (u8 *)&value);
+ if (status != 0)
+ return -EINVAL;
+
+ if (value & 0x80) {
+ /* timebase = 60.54 ms */
+ ts = 60540 * ((0x7f & value) + 1);
+ } else {
+ /* timebase = 1.953 ms */
+ ts = 1953 * ((0x7f & value) + 1);
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", (1000*1000)/ts);
+}
+DEVICE_ATTR(sample_rate, S_IRUGO , adis16255_show_sample_rate, NULL);
+
+static struct attribute *adis16255_attributes[] = {
+ &dev_attr_data.attr,
+ &dev_attr_direction.attr,
+ &dev_attr_sample_rate.attr,
+ NULL
+};
+
+static const struct attribute_group adis16255_attr_group = {
+ .attrs = adis16255_attributes,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int spi_adis16255_shutdown(struct spi_adis16255_data *spiadis)
+{
+ u16 value = 0;
+ /* turn sensor off */
+ spi_adis16255_write_data(spiadis,
+ ADIS_SMPL_PRD_MSB, ADIS_SMPL_PRD_LSB,
+ (u8 *)&value);
+ spi_adis16255_write_data(spiadis,
+ ADIS_MSC_CTRL_MSB, ADIS_MSC_CTRL_LSB,
+ (u8 *)&value);
+ return 0;
+}
+
+static int spi_adis16255_bringup(struct spi_adis16255_data *spiadis)
+{
+ int status = 0;
+ u16 value = 0;
+
+ status = spi_adis16255_read_data(spiadis, ADIS_GYRO_SCALE,
+ (u8 *)&value);
+ if (status != 0)
+ goto err;
+ if (value != 0x0800) {
+ dev_warn(&spiadis->spi->dev, "Scale factor is none default"
+ "value (%.4x)\n", value);
+ }
+
+ /* timebase = 1.953 ms, Ns = 0 -> 512 Hz sample rate */
+ value = 0x0001;
+ status = spi_adis16255_write_data(spiadis,
+ ADIS_SMPL_PRD_MSB, ADIS_SMPL_PRD_LSB,
+ (u8 *)&value);
+ if (status != 0)
+ goto err;
+
+ /* start internal self-test */
+ value = 0x0400;
+ status = spi_adis16255_write_data(spiadis,
+ ADIS_MSC_CTRL_MSB, ADIS_MSC_CTRL_LSB,
+ (u8 *)&value);
+ if (status != 0)
+ goto err;
+
+ /* wait 35 ms to finish self-test */
+ msleep(35);
+
+ value = 0x0000;
+ status = spi_adis16255_read_data(spiadis, ADIS_STATUS,
+ (u8 *)&value);
+ if (status != 0)
+ goto err;
+
+ if (value & 0x23) {
+ if (value & 0x20) {
+ dev_warn(&spiadis->spi->dev, "self-test error\n");
+ status = -ENODEV;
+ goto err;
+ } else if (value & 0x3) {
+ dev_warn(&spiadis->spi->dev, "Sensor voltage"
+ "out of range.\n");
+ status = -ENODEV;
+ goto err;
+ }
+ }
+
+ /* set interrupt to active high on DIO0 when data ready */
+ value = 0x0006;
+ status = spi_adis16255_write_data(spiadis,
+ ADIS_MSC_CTRL_MSB, ADIS_MSC_CTRL_LSB,
+ (u8 *)&value);
+ if (status != 0)
+ goto err;
+ return status;
+
+err:
+ spi_adis16255_shutdown(spiadis);
+ return status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int spi_adis16255_probe(struct spi_device *spi)
+{
+
+ struct adis16255_init_data *init_data = spi->dev.platform_data;
+ struct spi_adis16255_data *spiadis;
+ int status = 0;
+
+ spiadis = kzalloc(sizeof(*spiadis), GFP_KERNEL);
+ if (!spiadis)
+ return -ENOMEM;
+
+ spiadis->spi = spi;
+ spiadis->direction = init_data->direction;
+
+ if (init_data->negative)
+ spiadis->negative = 1;
+
+ status = gpio_request(init_data->irq, "adis16255");
+ if (status != 0)
+ goto err;
+
+ status = gpio_direction_input(init_data->irq);
+ if (status != 0)
+ goto gpio_err;
+
+ spiadis->irq = gpio_to_irq(init_data->irq);
+
+ status = request_threaded_irq(spiadis->irq,
+ NULL, adis_irq_thread,
+ IRQF_DISABLED, "adis-driver", spiadis);
+
+ if (status != 0) {
+ dev_err(&spi->dev, "IRQ request failed\n");
+ goto gpio_err;
+ }
+
+ dev_dbg(&spi->dev, "GPIO %d IRQ %d\n", init_data->irq, spiadis->irq);
+
+ dev_set_drvdata(&spi->dev, spiadis);
+ status = sysfs_create_group(&spi->dev.kobj, &adis16255_attr_group);
+ if (status != 0)
+ goto irq_err;
+
+ status = spi_adis16255_bringup(spiadis);
+ if (status != 0)
+ goto irq_err;
+
+ dev_info(&spi->dev, "spi_adis16255 driver added!\n");
+
+ return status;
+
+irq_err:
+ free_irq(spiadis->irq, spiadis);
+gpio_err:
+ gpio_free(init_data->irq);
+err:
+ kfree(spiadis);
+ return status;
+}
+
+static int spi_adis16255_remove(struct spi_device *spi)
+{
+ struct spi_adis16255_data *spiadis = dev_get_drvdata(&spi->dev);
+
+ spi_adis16255_shutdown(spiadis);
+
+ free_irq(spiadis->irq, spiadis);
+ gpio_free(irq_to_gpio(spiadis->irq));
+
+ sysfs_remove_group(&spiadis->spi->dev.kobj, &adis16255_attr_group);
+
+ kfree(spiadis);
+
+ dev_info(&spi->dev, "spi_adis16255 driver removed!\n");
+ return 0;
+}
+
+static struct spi_driver spi_adis16255_drv = {
+ .driver = {
+ .name = "spi_adis16255",
+ .owner = THIS_MODULE,
+ },
+ .probe = spi_adis16255_probe,
+ .remove = __devexit_p(spi_adis16255_remove),
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init spi_adis16255_init(void)
+{
+ return spi_register_driver(&spi_adis16255_drv);
+}
+module_init(spi_adis16255_init);
+
+static void __exit spi_adis16255_exit(void)
+{
+ spi_unregister_driver(&spi_adis16255_drv);
+}
+module_exit(spi_adis16255_exit);
+
+MODULE_AUTHOR("Matthias Brugger");
+MODULE_DESCRIPTION("SPI device driver for ADIS16255 sensor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/adis16255/adis16255.h b/drivers/staging/adis16255/adis16255.h
new file mode 100644
index 00000000000..03e07001bab
--- /dev/null
+++ b/drivers/staging/adis16255/adis16255.h
@@ -0,0 +1,12 @@
+#ifndef ADIS16255_H
+#define ADIS16255_H
+
+#include <linux/types.h>
+
+struct adis16255_init_data {
+ char direction;
+ u8 negative;
+ int irq;
+};
+
+#endif
diff --git a/drivers/staging/arlan/Kconfig b/drivers/staging/arlan/Kconfig
deleted file mode 100644
index 5e42b81f97b..00000000000
--- a/drivers/staging/arlan/Kconfig
+++ /dev/null
@@ -1,15 +0,0 @@
-config ARLAN
- tristate "Aironet Arlan 655 & IC2200 DS support"
- depends on ISA && !64BIT && WLAN
- select WIRELESS_EXT
- ---help---
- Aironet makes Arlan, a class of wireless LAN adapters. These use the
- www.Telxon.com chip, which is also used on several similar cards.
- This driver is tested on the 655 and IC2200 series cards. Look at
- <http://www.ylenurme.ee/~elmer/655/> for the latest information.
-
- The driver is built as two modules, arlan and arlan-proc. The latter
- is the /proc interface and is not needed most of time.
-
- On some computers the card ends up in non-valid state after some
- time. Use a ping-reset script to clear it.
diff --git a/drivers/staging/arlan/Makefile b/drivers/staging/arlan/Makefile
deleted file mode 100644
index 5a84d4402f2..00000000000
--- a/drivers/staging/arlan/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_ARLAN) += arlan.o
-
-arlan-objs := arlan-main.o arlan-proc.o
diff --git a/drivers/staging/arlan/TODO b/drivers/staging/arlan/TODO
deleted file mode 100644
index 9bd15a2f6d9..00000000000
--- a/drivers/staging/arlan/TODO
+++ /dev/null
@@ -1,7 +0,0 @@
-TODO:
- - step up and maintain this driver to ensure that it continues
- to work. Having the hardware for this is pretty much a
- requirement. If this does not happen, the will be removed in
- the 2.6.35 kernel release.
-
-Please send patches to Greg Kroah-Hartman <greg@kroah.com>.
diff --git a/drivers/staging/arlan/arlan-main.c b/drivers/staging/arlan/arlan-main.c
deleted file mode 100644
index 88fdd53cf5d..00000000000
--- a/drivers/staging/arlan/arlan-main.c
+++ /dev/null
@@ -1,1882 +0,0 @@
-/*
- * Copyright (C) 1997 Cullen Jennings
- * Copyright (C) 1998 Elmer Joandiu, elmer@ylenurme.ee
- * GNU General Public License applies
- * This module provides support for the Arlan 655 card made by Aironet
- */
-
-#include "arlan.h"
-
-#if BITS_PER_LONG != 32
-# error FIXME: this driver requires a 32-bit platform
-#endif
-
-static const char *arlan_version = "C.Jennigs 97 & Elmer.Joandi@ut.ee Oct'98, http://www.ylenurme.ee/~elmer/655/";
-
-struct net_device *arlan_device[MAX_ARLANS];
-
-static int SID = SIDUNKNOWN;
-static int radioNodeId = radioNodeIdUNKNOWN;
-static char encryptionKey[12] = {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'};
-int arlan_debug = debugUNKNOWN;
-static int spreadingCode = spreadingCodeUNKNOWN;
-static int channelNumber = channelNumberUNKNOWN;
-static int channelSet = channelSetUNKNOWN;
-static int systemId = systemIdUNKNOWN;
-static int registrationMode = registrationModeUNKNOWN;
-static int keyStart;
-static int tx_delay_ms;
-static int retries = 5;
-static int tx_queue_len = 1;
-static int arlan_EEPROM_bad;
-
-#ifdef ARLAN_DEBUGGING
-
-static int testMemory = testMemoryUNKNOWN;
-static int irq = irqUNKNOWN;
-static int txScrambled = 1;
-static int mdebug;
-
-module_param(irq, int, 0);
-module_param(mdebug, int, 0);
-module_param(testMemory, int, 0);
-module_param(txScrambled, int, 0);
-MODULE_PARM_DESC(irq, "(unused)");
-MODULE_PARM_DESC(testMemory, "(unused)");
-MODULE_PARM_DESC(mdebug, "Arlan multicast debugging (0-1)");
-#endif
-
-module_param_named(debug, arlan_debug, int, 0);
-module_param(spreadingCode, int, 0);
-module_param(channelNumber, int, 0);
-module_param(channelSet, int, 0);
-module_param(systemId, int, 0);
-module_param(registrationMode, int, 0);
-module_param(radioNodeId, int, 0);
-module_param(SID, int, 0);
-module_param(keyStart, int, 0);
-module_param(tx_delay_ms, int, 0);
-module_param(retries, int, 0);
-module_param(tx_queue_len, int, 0);
-module_param_named(EEPROM_bad, arlan_EEPROM_bad, int, 0);
-MODULE_PARM_DESC(debug, "Arlan debug enable (0-1)");
-MODULE_PARM_DESC(retries, "Arlan maximum packet retransmisions");
-#ifdef ARLAN_ENTRY_EXIT_DEBUGGING
-static int arlan_entry_debug;
-static int arlan_exit_debug;
-static int arlan_entry_and_exit_debug;
-module_param_named(entry_debug, arlan_entry_debug, int, 0);
-module_param_named(exit_debug, arlan_exit_debug, int, 0);
-module_param_named(entry_and_exit_debug, arlan_entry_and_exit_debug, int, 0);
-MODULE_PARM_DESC(entry_debug, "Arlan driver function entry debugging");
-MODULE_PARM_DESC(exit_debug, "Arlan driver function exit debugging");
-MODULE_PARM_DESC(entry_and_exit_debug, "Arlan driver function entry and exit debugging");
-#endif
-
-struct arlan_conf_stru arlan_conf[MAX_ARLANS];
-static int arlans_found;
-
-static int arlan_open(struct net_device *dev);
-static netdev_tx_t arlan_tx(struct sk_buff *skb, struct net_device *dev);
-static irqreturn_t arlan_interrupt(int irq, void *dev_id);
-static int arlan_close(struct net_device *dev);
-static struct net_device_stats *
- arlan_statistics (struct net_device *dev);
-static void arlan_set_multicast (struct net_device *dev);
-static int arlan_hw_tx (struct net_device* dev, char *buf, int length );
-static int arlan_hw_config (struct net_device * dev);
-static void arlan_tx_done_interrupt (struct net_device * dev, int status);
-static void arlan_rx_interrupt (struct net_device * dev, u_char rxStatus, u_short, u_short);
-static void arlan_process_interrupt (struct net_device * dev);
-static void arlan_tx_timeout (struct net_device *dev);
-
-static inline long us2ticks(int us)
-{
- return us * (1000000 / HZ);
-}
-
-
-#ifdef ARLAN_ENTRY_EXIT_DEBUGGING
-#define ARLAN_DEBUG_ENTRY(name) \
- {\
- struct timeval timev;\
- do_gettimeofday(&timev);\
- if (arlan_entry_debug || arlan_entry_and_exit_debug)\
- printk("--->>>" name " %ld " "\n",((long int) timev.tv_sec * 1000000 + timev.tv_usec));\
- }
-#define ARLAN_DEBUG_EXIT(name) \
- {\
- struct timeval timev;\
- do_gettimeofday(&timev);\
- if (arlan_exit_debug || arlan_entry_and_exit_debug)\
- printk("<<<---" name " %ld " "\n",((long int) timev.tv_sec * 1000000 + timev.tv_usec) );\
- }
-#else
-#define ARLAN_DEBUG_ENTRY(name)
-#define ARLAN_DEBUG_EXIT(name)
-#endif
-
-
-#define arlan_interrupt_ack(dev)\
- clearClearInterrupt(dev);\
- setClearInterrupt(dev);
-
-static inline int arlan_drop_tx(struct net_device *dev)
-{
- struct arlan_private *priv = netdev_priv(dev);
-
- dev->stats.tx_errors++;
- if (priv->Conf->tx_delay_ms)
- {
- priv->tx_done_delayed = jiffies + priv->Conf->tx_delay_ms * HZ / 1000 + 1;
- }
- else
- {
- priv->waiting_command_mask &= ~ARLAN_COMMAND_TX;
- TXHEAD(dev).offset = 0;
- TXTAIL(dev).offset = 0;
- priv->txLast = 0;
- priv->bad = 0;
- if (!priv->under_reset && !priv->under_config)
- netif_wake_queue (dev);
- }
- return 1;
-}
-
-
-int arlan_command(struct net_device *dev, int command_p)
-{
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
- struct arlan_conf_stru *conf = priv->Conf;
- int udelayed = 0;
- int i = 0;
- unsigned long flags;
-
- ARLAN_DEBUG_ENTRY("arlan_command");
-
- if (priv->card_polling_interval)
- priv->card_polling_interval = 1;
-
- if (arlan_debug & ARLAN_DEBUG_CHAIN_LOCKS)
- printk(KERN_DEBUG "arlan_command, %lx commandByte %x waiting %lx incoming %x \n",
- jiffies, READSHMB(arlan->commandByte),
- priv->waiting_command_mask, command_p);
-
- priv->waiting_command_mask |= command_p;
-
- if (priv->waiting_command_mask & ARLAN_COMMAND_RESET)
- if (time_after(jiffies, priv->lastReset + 5 * HZ))
- priv->waiting_command_mask &= ~ARLAN_COMMAND_RESET;
-
- if (priv->waiting_command_mask & ARLAN_COMMAND_INT_ACK)
- {
- arlan_interrupt_ack(dev);
- priv->waiting_command_mask &= ~ARLAN_COMMAND_INT_ACK;
- }
- if (priv->waiting_command_mask & ARLAN_COMMAND_INT_ENABLE)
- {
- setInterruptEnable(dev);
- priv->waiting_command_mask &= ~ARLAN_COMMAND_INT_ENABLE;
- }
-
- /* Card access serializing lock */
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Check cards status and waiting */
-
- if (priv->waiting_command_mask & (ARLAN_COMMAND_LONG_WAIT_NOW | ARLAN_COMMAND_WAIT_NOW))
- {
- while (priv->waiting_command_mask & (ARLAN_COMMAND_LONG_WAIT_NOW | ARLAN_COMMAND_WAIT_NOW))
- {
- if (READSHMB(arlan->resetFlag) ||
- READSHMB(arlan->commandByte)) /* ||
- (readControlRegister(dev) & ARLAN_ACCESS))
- */
- udelay(40);
- else
- priv->waiting_command_mask &= ~(ARLAN_COMMAND_LONG_WAIT_NOW | ARLAN_COMMAND_WAIT_NOW);
-
- udelayed++;
-
- if (priv->waiting_command_mask & ARLAN_COMMAND_LONG_WAIT_NOW)
- {
- if (udelayed * 40 > 1000000)
- {
- printk(KERN_ERR "%s long wait too long \n", dev->name);
- priv->waiting_command_mask |= ARLAN_COMMAND_RESET;
- break;
- }
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_WAIT_NOW)
- {
- if (udelayed * 40 > 1000)
- {
- printk(KERN_ERR "%s short wait too long \n", dev->name);
- goto bad_end;
- }
- }
- }
- }
- else
- {
- i = 0;
- while ((READSHMB(arlan->resetFlag) ||
- READSHMB(arlan->commandByte)) &&
- conf->pre_Command_Wait > (i++) * 10)
- udelay(10);
-
-
- if ((READSHMB(arlan->resetFlag) ||
- READSHMB(arlan->commandByte)) &&
- !(priv->waiting_command_mask & ARLAN_COMMAND_RESET))
- {
- goto card_busy_end;
- }
- }
- if (priv->waiting_command_mask & ARLAN_COMMAND_RESET)
- priv->under_reset = 1;
- if (priv->waiting_command_mask & ARLAN_COMMAND_CONF)
- priv->under_config = 1;
-
- /* Issuing command */
- arlan_lock_card_access(dev);
- if (priv->waiting_command_mask & ARLAN_COMMAND_POWERUP)
- {
- // if (readControlRegister(dev) & (ARLAN_ACCESS && ARLAN_POWER))
- setPowerOn(dev);
- arlan_interrupt_lancpu(dev);
- priv->waiting_command_mask &= ~ARLAN_COMMAND_POWERUP;
- priv->waiting_command_mask |= ARLAN_COMMAND_RESET;
- priv->card_polling_interval = HZ / 10;
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_ACTIVATE)
- {
- WRITESHMB(arlan->commandByte, ARLAN_COM_ACTIVATE);
- arlan_interrupt_lancpu(dev);
- priv->waiting_command_mask &= ~ARLAN_COMMAND_ACTIVATE;
- priv->card_polling_interval = HZ / 10;
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_RX_ABORT)
- {
- if (priv->rx_command_given)
- {
- WRITESHMB(arlan->commandByte, ARLAN_COM_RX_ABORT);
- arlan_interrupt_lancpu(dev);
- priv->rx_command_given = 0;
- }
- priv->waiting_command_mask &= ~ARLAN_COMMAND_RX_ABORT;
- priv->card_polling_interval = 1;
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_TX_ABORT)
- {
- if (priv->tx_command_given)
- {
- WRITESHMB(arlan->commandByte, ARLAN_COM_TX_ABORT);
- arlan_interrupt_lancpu(dev);
- priv->tx_command_given = 0;
- }
- priv->waiting_command_mask &= ~ARLAN_COMMAND_TX_ABORT;
- priv->card_polling_interval = 1;
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_RESET)
- {
- priv->under_reset=1;
- netif_stop_queue (dev);
-
- arlan_drop_tx(dev);
- if (priv->tx_command_given || priv->rx_command_given)
- {
- printk(KERN_ERR "%s: Reset under tx or rx command \n", dev->name);
- }
- netif_stop_queue (dev);
- if (arlan_debug & ARLAN_DEBUG_RESET)
- printk(KERN_ERR "%s: Doing chip reset\n", dev->name);
- priv->lastReset = jiffies;
- WRITESHM(arlan->commandByte, 0, u_char);
- /* hold card in reset state */
- setHardwareReset(dev);
- /* set reset flag and then release reset */
- WRITESHM(arlan->resetFlag, 0xff, u_char);
- clearChannelAttention(dev);
- clearHardwareReset(dev);
- priv->card_polling_interval = HZ / 4;
- priv->waiting_command_mask &= ~ARLAN_COMMAND_RESET;
- priv->waiting_command_mask |= ARLAN_COMMAND_INT_RACK;
-// priv->waiting_command_mask |= ARLAN_COMMAND_INT_RENABLE;
-// priv->waiting_command_mask |= ARLAN_COMMAND_RX;
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_INT_RACK)
- {
- clearHardwareReset(dev);
- clearClearInterrupt(dev);
- setClearInterrupt(dev);
- setInterruptEnable(dev);
- priv->waiting_command_mask &= ~ARLAN_COMMAND_INT_RACK;
- priv->waiting_command_mask |= ARLAN_COMMAND_CONF;
- priv->under_config = 1;
- priv->under_reset = 0;
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_INT_RENABLE)
- {
- setInterruptEnable(dev);
- priv->waiting_command_mask &= ~ARLAN_COMMAND_INT_RENABLE;
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_CONF)
- {
- if (priv->tx_command_given || priv->rx_command_given)
- {
- printk(KERN_ERR "%s: Reset under tx or rx command \n", dev->name);
- }
- arlan_drop_tx(dev);
- setInterruptEnable(dev);
- arlan_hw_config(dev);
- arlan_interrupt_lancpu(dev);
- priv->waiting_command_mask &= ~ARLAN_COMMAND_CONF;
- priv->card_polling_interval = HZ / 10;
-// priv->waiting_command_mask |= ARLAN_COMMAND_INT_RACK;
-// priv->waiting_command_mask |= ARLAN_COMMAND_INT_ENABLE;
- priv->waiting_command_mask |= ARLAN_COMMAND_CONF_WAIT;
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_CONF_WAIT)
- {
- if (READSHMB(arlan->configuredStatusFlag) != 0 &&
- READSHMB(arlan->diagnosticInfo) == 0xff)
- {
- priv->waiting_command_mask &= ~ARLAN_COMMAND_CONF_WAIT;
- priv->waiting_command_mask |= ARLAN_COMMAND_RX;
- priv->waiting_command_mask |= ARLAN_COMMAND_TBUSY_CLEAR;
- priv->card_polling_interval = HZ / 10;
- priv->tx_command_given = 0;
- priv->under_config = 0;
- }
- else
- {
- priv->card_polling_interval = 1;
- if (arlan_debug & ARLAN_DEBUG_TIMING)
- printk(KERN_ERR "configure delayed \n");
- }
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_RX)
- {
- if (!registrationBad(dev))
- {
- setInterruptEnable(dev);
- memset_io(arlan->commandParameter, 0, 0xf);
- WRITESHMB(arlan->commandByte, ARLAN_COM_INT | ARLAN_COM_RX_ENABLE);
- WRITESHMB(arlan->commandParameter[0], conf->rxParameter);
- arlan_interrupt_lancpu(dev);
- priv->rx_command_given = 0; // mnjah, bad
- priv->waiting_command_mask &= ~ARLAN_COMMAND_RX;
- priv->card_polling_interval = 1;
- }
- else
- priv->card_polling_interval = 2;
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_TBUSY_CLEAR)
- {
- if ( !registrationBad(dev) &&
- (netif_queue_stopped(dev) || !netif_running(dev)) )
- {
- priv->waiting_command_mask &= ~ARLAN_COMMAND_TBUSY_CLEAR;
- netif_wake_queue (dev);
- }
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_TX)
- {
- if (!test_and_set_bit(0, (void *) &priv->tx_command_given))
- {
- if (time_after(jiffies,
- priv->tx_last_sent + us2ticks(conf->rx_tweak1))
- || time_before(jiffies,
- priv->last_rx_int_ack_time + us2ticks(conf->rx_tweak2)))
- {
- setInterruptEnable(dev);
- memset_io(arlan->commandParameter, 0, 0xf);
- WRITESHMB(arlan->commandByte, ARLAN_COM_TX_ENABLE | ARLAN_COM_INT);
- memcpy_toio(arlan->commandParameter, &TXLAST(dev), 14);
-// for ( i=1 ; i < 15 ; i++) printk("%02x:",READSHMB(arlan->commandParameter[i]));
- priv->tx_last_sent = jiffies;
- arlan_interrupt_lancpu(dev);
- priv->tx_command_given = 1;
- priv->waiting_command_mask &= ~ARLAN_COMMAND_TX;
- priv->card_polling_interval = 1;
- }
- else
- {
- priv->tx_command_given = 0;
- priv->card_polling_interval = 1;
- }
- }
- else if (arlan_debug & ARLAN_DEBUG_CHAIN_LOCKS)
- printk(KERN_ERR "tx command when tx chain locked \n");
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_NOOPINT)
- {
- {
- WRITESHMB(arlan->commandByte, ARLAN_COM_NOP | ARLAN_COM_INT);
- }
- arlan_interrupt_lancpu(dev);
- priv->waiting_command_mask &= ~ARLAN_COMMAND_NOOPINT;
- priv->card_polling_interval = HZ / 3;
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_NOOP)
- {
- WRITESHMB(arlan->commandByte, ARLAN_COM_NOP);
- arlan_interrupt_lancpu(dev);
- priv->waiting_command_mask &= ~ARLAN_COMMAND_NOOP;
- priv->card_polling_interval = HZ / 3;
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_SLOW_POLL)
- {
- WRITESHMB(arlan->commandByte, ARLAN_COM_GOTO_SLOW_POLL);
- arlan_interrupt_lancpu(dev);
- priv->waiting_command_mask &= ~ARLAN_COMMAND_SLOW_POLL;
- priv->card_polling_interval = HZ / 3;
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_POWERDOWN)
- {
- setPowerOff(dev);
- if (arlan_debug & ARLAN_DEBUG_CARD_STATE)
- printk(KERN_WARNING "%s: Arlan Going Standby\n", dev->name);
- priv->waiting_command_mask &= ~ARLAN_COMMAND_POWERDOWN;
- priv->card_polling_interval = 3 * HZ;
- }
- arlan_unlock_card_access(dev);
- for (i = 0; READSHMB(arlan->commandByte) && i < 20; i++)
- udelay(10);
- if (READSHMB(arlan->commandByte))
- if (arlan_debug & ARLAN_DEBUG_CARD_STATE)
- printk(KERN_ERR "card busy leaving command %lx\n", priv->waiting_command_mask);
-
- spin_unlock_irqrestore(&priv->lock, flags);
- ARLAN_DEBUG_EXIT("arlan_command");
- priv->last_command_buff_free_time = jiffies;
- return 0;
-
-card_busy_end:
- if (time_after(jiffies, priv->last_command_buff_free_time + HZ))
- priv->waiting_command_mask |= ARLAN_COMMAND_CLEAN_AND_RESET;
-
- if (arlan_debug & ARLAN_DEBUG_CARD_STATE)
- printk(KERN_ERR "%s arlan_command card busy end \n", dev->name);
- spin_unlock_irqrestore(&priv->lock, flags);
- ARLAN_DEBUG_EXIT("arlan_command");
- return 1;
-
-bad_end:
- printk(KERN_ERR "%s arlan_command bad end \n", dev->name);
-
- spin_unlock_irqrestore(&priv->lock, flags);
- ARLAN_DEBUG_EXIT("arlan_command");
-
- return -1;
-}
-
-static inline void arlan_command_process(struct net_device *dev)
-{
- struct arlan_private *priv = netdev_priv(dev);
-
- int times = 0;
- while (priv->waiting_command_mask && times < 8)
- {
- if (priv->waiting_command_mask)
- {
- if (arlan_command(dev, 0))
- break;
- times++;
- }
- /* if long command, we won't repeat trying */ ;
- if (priv->card_polling_interval > 1)
- break;
- times++;
- }
-}
-
-
-static inline void arlan_retransmit_now(struct net_device *dev)
-{
- struct arlan_private *priv = netdev_priv(dev);
-
-
- ARLAN_DEBUG_ENTRY("arlan_retransmit_now");
- if (TXLAST(dev).offset == 0)
- {
- if (TXHEAD(dev).offset)
- {
- priv->txLast = 0;
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN) printk(KERN_DEBUG "TX buff switch to head \n");
-
- }
- else if (TXTAIL(dev).offset)
- {
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN) printk(KERN_DEBUG "TX buff switch to tail \n");
- priv->txLast = 1;
- }
- else
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN) printk(KERN_ERR "ReTransmit buff empty");
- netif_wake_queue (dev);
- return;
-
- }
- arlan_command(dev, ARLAN_COMMAND_TX);
-
- priv->Conf->driverRetransmissions++;
- priv->retransmissions++;
-
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN) printk("Retransmit %d bytes \n", TXLAST(dev).length);
-
- ARLAN_DEBUG_EXIT("arlan_retransmit_now");
-}
-
-
-
-static void arlan_registration_timer(unsigned long data)
-{
- struct net_device *dev = (struct net_device *) data;
- struct arlan_private *priv = netdev_priv(dev);
- int bh_mark_needed = 0;
- int next_tick = 1;
- long lostTime = ((long)jiffies - (long)priv->registrationLastSeen)
- * (1000/HZ);
-
- if (registrationBad(dev))
- {
- priv->registrationLostCount++;
- if (lostTime > 7000 && lostTime < 7200)
- {
- printk(KERN_NOTICE "%s registration Lost \n", dev->name);
- }
- if (lostTime / priv->reRegisterExp > 2000)
- arlan_command(dev, ARLAN_COMMAND_CLEAN_AND_CONF);
- if (lostTime / (priv->reRegisterExp) > 3500)
- arlan_command(dev, ARLAN_COMMAND_CLEAN_AND_RESET);
- if (priv->reRegisterExp < 400)
- priv->reRegisterExp += 2;
- if (lostTime > 7200)
- {
- next_tick = HZ;
- arlan_command(dev, ARLAN_COMMAND_CLEAN_AND_RESET);
- }
- }
- else
- {
- if (priv->Conf->registrationMode && lostTime > 10000 &&
- priv->registrationLostCount)
- {
- printk(KERN_NOTICE "%s registration is back after %ld milliseconds\n",
- dev->name, lostTime);
- }
- priv->registrationLastSeen = jiffies;
- priv->registrationLostCount = 0;
- priv->reRegisterExp = 1;
- if (!netif_running(dev) )
- netif_wake_queue(dev);
- if (time_after(priv->tx_last_sent,priv->tx_last_cleared) &&
- time_after(jiffies, priv->tx_last_sent * 5*HZ) ){
- arlan_command(dev, ARLAN_COMMAND_CLEAN_AND_RESET);
- priv->tx_last_cleared = jiffies;
- }
- }
-
-
- if (!registrationBad(dev) && priv->ReTransmitRequested)
- {
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
- printk(KERN_ERR "Retransmit from timer \n");
- priv->ReTransmitRequested = 0;
- arlan_retransmit_now(dev);
- }
- if (!registrationBad(dev) &&
- time_after(jiffies, priv->tx_done_delayed) &&
- priv->tx_done_delayed != 0)
- {
- TXLAST(dev).offset = 0;
- if (priv->txLast)
- priv->txLast = 0;
- else if (TXTAIL(dev).offset)
- priv->txLast = 1;
- if (TXLAST(dev).offset)
- {
- arlan_retransmit_now(dev);
- dev->trans_start = jiffies;
- }
- if (!(TXHEAD(dev).offset && TXTAIL(dev).offset))
- {
- netif_wake_queue (dev);
- }
- priv->tx_done_delayed = 0;
- bh_mark_needed = 1;
- }
- if (bh_mark_needed)
- {
- netif_wake_queue (dev);
- }
- arlan_process_interrupt(dev);
-
- if (next_tick < priv->card_polling_interval)
- next_tick = priv->card_polling_interval;
-
- priv->timer.expires = jiffies + next_tick;
-
- add_timer(&priv->timer);
-}
-
-
-#ifdef ARLAN_DEBUGGING
-
-static void arlan_print_registers(struct net_device *dev, int line)
-{
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem *arlan = priv->card;
-
- u_char hostcpuLock, lancpuLock, controlRegister, cntrlRegImage,
- txStatus, rxStatus, interruptInProgress, commandByte;
-
-
- ARLAN_DEBUG_ENTRY("arlan_print_registers");
- READSHM(interruptInProgress, arlan->interruptInProgress, u_char);
- READSHM(hostcpuLock, arlan->hostcpuLock, u_char);
- READSHM(lancpuLock, arlan->lancpuLock, u_char);
- READSHM(controlRegister, arlan->controlRegister, u_char);
- READSHM(cntrlRegImage, arlan->cntrlRegImage, u_char);
- READSHM(txStatus, arlan->txStatus, u_char);
- READSHM(rxStatus, arlan->rxStatus, u_char);
- READSHM(commandByte, arlan->commandByte, u_char);
-
- printk(KERN_WARNING "line %04d IP %02x HL %02x LL %02x CB %02x CR %02x CRI %02x TX %02x RX %02x\n",
- line, interruptInProgress, hostcpuLock, lancpuLock, commandByte,
- controlRegister, cntrlRegImage, txStatus, rxStatus);
-
- ARLAN_DEBUG_EXIT("arlan_print_registers");
-}
-#endif
-
-
-static int arlan_hw_tx(struct net_device *dev, char *buf, int length)
-{
- int i;
-
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
- struct arlan_conf_stru *conf = priv->Conf;
-
- int tailStarts = 0x800;
- int headEnds = 0x0;
-
-
- ARLAN_DEBUG_ENTRY("arlan_hw_tx");
- if (TXHEAD(dev).offset)
- headEnds = (((TXHEAD(dev).offset + TXHEAD(dev).length - offsetof(struct arlan_shmem, txBuffer)) / 64) + 1) * 64;
- if (TXTAIL(dev).offset)
- tailStarts = 0x800 - (((TXTAIL(dev).offset - offsetof(struct arlan_shmem, txBuffer)) / 64) + 2) * 64;
-
-
- if (!TXHEAD(dev).offset && length < tailStarts)
- {
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
- printk(KERN_ERR "TXHEAD insert, tailStart %d\n", tailStarts);
-
- TXHEAD(dev).offset =
- offsetof(struct arlan_shmem, txBuffer);
- TXHEAD(dev).length = length - ARLAN_FAKE_HDR_LEN;
- for (i = 0; i < 6; i++)
- TXHEAD(dev).dest[i] = buf[i];
- TXHEAD(dev).clear = conf->txClear;
- TXHEAD(dev).retries = conf->txRetries; /* 0 is use default */
- TXHEAD(dev).routing = conf->txRouting;
- TXHEAD(dev).scrambled = conf->txScrambled;
- memcpy_toio((char __iomem *)arlan + TXHEAD(dev).offset, buf + ARLAN_FAKE_HDR_LEN, TXHEAD(dev).length);
- }
- else if (!TXTAIL(dev).offset && length < (0x800 - headEnds))
- {
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
- printk(KERN_ERR "TXTAIL insert, headEnd %d\n", headEnds);
-
- TXTAIL(dev).offset =
- offsetof(struct arlan_shmem, txBuffer) + 0x800 - (length / 64 + 2) * 64;
- TXTAIL(dev).length = length - ARLAN_FAKE_HDR_LEN;
- for (i = 0; i < 6; i++)
- TXTAIL(dev).dest[i] = buf[i];
- TXTAIL(dev).clear = conf->txClear;
- TXTAIL(dev).retries = conf->txRetries;
- TXTAIL(dev).routing = conf->txRouting;
- TXTAIL(dev).scrambled = conf->txScrambled;
- memcpy_toio(((char __iomem *)arlan + TXTAIL(dev).offset), buf + ARLAN_FAKE_HDR_LEN, TXTAIL(dev).length);
- }
- else
- {
- netif_stop_queue (dev);
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
- printk(KERN_ERR "TX TAIL & HEAD full, return, tailStart %d headEnd %d\n", tailStarts, headEnds);
- return -1;
- }
- priv->out_bytes += length;
- priv->out_bytes10 += length;
- if (conf->measure_rate < 1)
- conf->measure_rate = 1;
- if (time_after(jiffies, priv->out_time + conf->measure_rate * HZ))
- {
- conf->out_speed = priv->out_bytes / conf->measure_rate;
- priv->out_bytes = 0;
- priv->out_time = jiffies;
- }
- if (time_after(jiffies, priv->out_time10 + conf->measure_rate * 10*HZ))
- {
- conf->out_speed10 = priv->out_bytes10 / (10 * conf->measure_rate);
- priv->out_bytes10 = 0;
- priv->out_time10 = jiffies;
- }
- if (TXHEAD(dev).offset && TXTAIL(dev).offset)
- {
- netif_stop_queue (dev);
- return 0;
- }
- else
- netif_start_queue (dev);
-
-
- IFDEBUG(ARLAN_DEBUG_HEADER_DUMP)
- printk(KERN_WARNING "%s Transmit t %2x:%2x:%2x:%2x:%2x:%2x f %2x:%2x:%2x:%2x:%2x:%2x \n", dev->name,
- (unsigned char) buf[0], (unsigned char) buf[1], (unsigned char) buf[2], (unsigned char) buf[3],
- (unsigned char) buf[4], (unsigned char) buf[5], (unsigned char) buf[6], (unsigned char) buf[7],
- (unsigned char) buf[8], (unsigned char) buf[9], (unsigned char) buf[10], (unsigned char) buf[11]);
-
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN) printk(KERN_ERR "TX command prepare for buffer %d\n", priv->txLast);
-
- arlan_command(dev, ARLAN_COMMAND_TX);
-
- priv->tx_last_sent = jiffies;
-
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN) printk("%s TX Qued %d bytes \n", dev->name, length);
-
- ARLAN_DEBUG_EXIT("arlan_hw_tx");
-
- return 0;
-}
-
-
-static int arlan_hw_config(struct net_device *dev)
-{
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
- struct arlan_conf_stru *conf = priv->Conf;
-
- ARLAN_DEBUG_ENTRY("arlan_hw_config");
-
- printk(KERN_NOTICE "%s arlan configure called \n", dev->name);
- if (arlan_EEPROM_bad)
- printk(KERN_NOTICE "arlan configure with eeprom bad option \n");
-
-
- WRITESHM(arlan->spreadingCode, conf->spreadingCode, u_char);
- WRITESHM(arlan->channelSet, conf->channelSet, u_char);
-
- if (arlan_EEPROM_bad)
- WRITESHM(arlan->defaultChannelSet, conf->channelSet, u_char);
-
- WRITESHM(arlan->channelNumber, conf->channelNumber, u_char);
-
- WRITESHM(arlan->scramblingDisable, conf->scramblingDisable, u_char);
- WRITESHM(arlan->txAttenuation, conf->txAttenuation, u_char);
-
- WRITESHM(arlan->systemId, conf->systemId, u_int);
-
- WRITESHM(arlan->maxRetries, conf->maxRetries, u_char);
- WRITESHM(arlan->receiveMode, conf->receiveMode, u_char);
- WRITESHM(arlan->priority, conf->priority, u_char);
- WRITESHM(arlan->rootOrRepeater, conf->rootOrRepeater, u_char);
- WRITESHM(arlan->SID, conf->SID, u_int);
-
- WRITESHM(arlan->registrationMode, conf->registrationMode, u_char);
-
- WRITESHM(arlan->registrationFill, conf->registrationFill, u_char);
- WRITESHM(arlan->localTalkAddress, conf->localTalkAddress, u_char);
- WRITESHM(arlan->codeFormat, conf->codeFormat, u_char);
- WRITESHM(arlan->numChannels, conf->numChannels, u_char);
- WRITESHM(arlan->channel1, conf->channel1, u_char);
- WRITESHM(arlan->channel2, conf->channel2, u_char);
- WRITESHM(arlan->channel3, conf->channel3, u_char);
- WRITESHM(arlan->channel4, conf->channel4, u_char);
- WRITESHM(arlan->radioNodeId, conf->radioNodeId, u_short);
- WRITESHM(arlan->SID, conf->SID, u_int);
- WRITESHM(arlan->waitTime, conf->waitTime, u_short);
- WRITESHM(arlan->lParameter, conf->lParameter, u_short);
- memcpy_toio(&(arlan->_15), &(conf->_15), 3);
- WRITESHM(arlan->_15, conf->_15, u_short);
- WRITESHM(arlan->headerSize, conf->headerSize, u_short);
- if (arlan_EEPROM_bad)
- WRITESHM(arlan->hardwareType, conf->hardwareType, u_char);
- WRITESHM(arlan->radioType, conf->radioType, u_char);
- if (arlan_EEPROM_bad)
- WRITESHM(arlan->radioModule, conf->radioType, u_char);
-
- memcpy_toio(arlan->encryptionKey + keyStart, encryptionKey, 8);
- memcpy_toio(arlan->name, conf->siteName, 16);
-
- WRITESHMB(arlan->commandByte, ARLAN_COM_INT | ARLAN_COM_CONF); /* do configure */
- memset_io(arlan->commandParameter, 0, 0xf); /* 0xf */
- memset_io(arlan->commandParameter + 1, 0, 2);
- if (conf->writeEEPROM)
- {
- memset_io(arlan->commandParameter, conf->writeEEPROM, 1);
-// conf->writeEEPROM=0;
- }
- if (conf->registrationMode && conf->registrationInterrupts)
- memset_io(arlan->commandParameter + 3, 1, 1);
- else
- memset_io(arlan->commandParameter + 3, 0, 1);
-
- priv->irq_test_done = 0;
-
- if (conf->tx_queue_len)
- dev->tx_queue_len = conf->tx_queue_len;
- udelay(100);
-
- ARLAN_DEBUG_EXIT("arlan_hw_config");
- return 0;
-}
-
-
-static int arlan_read_card_configuration(struct net_device *dev)
-{
- u_char tlx415;
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
- struct arlan_conf_stru *conf = priv->Conf;
-
- ARLAN_DEBUG_ENTRY("arlan_read_card_configuration");
-
- if (radioNodeId == radioNodeIdUNKNOWN)
- {
- READSHM(conf->radioNodeId, arlan->radioNodeId, u_short);
- }
- else
- conf->radioNodeId = radioNodeId;
-
- if (SID == SIDUNKNOWN)
- {
- READSHM(conf->SID, arlan->SID, u_int);
- }
- else conf->SID = SID;
-
- if (spreadingCode == spreadingCodeUNKNOWN)
- {
- READSHM(conf->spreadingCode, arlan->spreadingCode, u_char);
- }
- else
- conf->spreadingCode = spreadingCode;
-
- if (channelSet == channelSetUNKNOWN)
- {
- READSHM(conf->channelSet, arlan->channelSet, u_char);
- }
- else conf->channelSet = channelSet;
-
- if (channelNumber == channelNumberUNKNOWN)
- {
- READSHM(conf->channelNumber, arlan->channelNumber, u_char);
- }
- else conf->channelNumber = channelNumber;
-
- READSHM(conf->scramblingDisable, arlan->scramblingDisable, u_char);
- READSHM(conf->txAttenuation, arlan->txAttenuation, u_char);
-
- if (systemId == systemIdUNKNOWN)
- {
- READSHM(conf->systemId, arlan->systemId, u_int);
- }
- else conf->systemId = systemId;
-
- READSHM(conf->maxDatagramSize, arlan->maxDatagramSize, u_short);
- READSHM(conf->maxFrameSize, arlan->maxFrameSize, u_short);
- READSHM(conf->maxRetries, arlan->maxRetries, u_char);
- READSHM(conf->receiveMode, arlan->receiveMode, u_char);
- READSHM(conf->priority, arlan->priority, u_char);
- READSHM(conf->rootOrRepeater, arlan->rootOrRepeater, u_char);
-
- if (SID == SIDUNKNOWN)
- {
- READSHM(conf->SID, arlan->SID, u_int);
- }
- else conf->SID = SID;
-
- if (registrationMode == registrationModeUNKNOWN)
- {
- READSHM(conf->registrationMode, arlan->registrationMode, u_char);
- }
- else conf->registrationMode = registrationMode;
-
- READSHM(conf->registrationFill, arlan->registrationFill, u_char);
- READSHM(conf->localTalkAddress, arlan->localTalkAddress, u_char);
- READSHM(conf->codeFormat, arlan->codeFormat, u_char);
- READSHM(conf->numChannels, arlan->numChannels, u_char);
- READSHM(conf->channel1, arlan->channel1, u_char);
- READSHM(conf->channel2, arlan->channel2, u_char);
- READSHM(conf->channel3, arlan->channel3, u_char);
- READSHM(conf->channel4, arlan->channel4, u_char);
- READSHM(conf->waitTime, arlan->waitTime, u_short);
- READSHM(conf->lParameter, arlan->lParameter, u_short);
- READSHM(conf->_15, arlan->_15, u_short);
- READSHM(conf->headerSize, arlan->headerSize, u_short);
- READSHM(conf->hardwareType, arlan->hardwareType, u_char);
- READSHM(conf->radioType, arlan->radioModule, u_char);
-
- if (conf->radioType == 0)
- conf->radioType = 0xc;
-
- WRITESHM(arlan->configStatus, 0xA5, u_char);
- READSHM(tlx415, arlan->configStatus, u_char);
-
- if (tlx415 != 0xA5)
- printk(KERN_INFO "%s tlx415 chip \n", dev->name);
-
- conf->txClear = 0;
- conf->txRetries = 1;
- conf->txRouting = 1;
- conf->txScrambled = 0;
- conf->rxParameter = 1;
- conf->txTimeoutMs = 4000;
- conf->waitCardTimeout = 100000;
- conf->receiveMode = ARLAN_RCV_CLEAN;
- memcpy_fromio(conf->siteName, arlan->name, 16);
- conf->siteName[16] = '\0';
- conf->retries = retries;
- conf->tx_delay_ms = tx_delay_ms;
- conf->ReTransmitPacketMaxSize = 200;
- conf->waitReTransmitPacketMaxSize = 200;
- conf->txAckTimeoutMs = 900;
- conf->fastReTransCount = 3;
-
- ARLAN_DEBUG_EXIT("arlan_read_card_configuration");
-
- return 0;
-}
-
-
-static int lastFoundAt = 0xbe000;
-
-
-/*
- * This is the real probe routine. Linux has a history of friendly device
- * probes on the ISA bus. A good device probes avoids doing writes, and
- * verifies that the correct device exists and functions.
- */
-#define ARLAN_SHMEM_SIZE 0x2000
-static int __init arlan_check_fingerprint(unsigned long memaddr)
-{
- static const char probeText[] = "TELESYSTEM SLW INC. ARLAN \0";
- volatile struct arlan_shmem __iomem *arlan = (struct arlan_shmem *) memaddr;
- unsigned long paddr = virt_to_phys((void *) memaddr);
- char tempBuf[49];
-
- ARLAN_DEBUG_ENTRY("arlan_check_fingerprint");
-
- if (!request_mem_region(paddr, ARLAN_SHMEM_SIZE, "arlan")) {
- // printk(KERN_WARNING "arlan: memory region %lx excluded from probing \n",paddr);
- return -ENODEV;
- }
-
- memcpy_fromio(tempBuf, arlan->textRegion, 29);
- tempBuf[30] = 0;
-
- /* check for card at this address */
- if (0 != strncmp(tempBuf, probeText, 29)){
- release_mem_region(paddr, ARLAN_SHMEM_SIZE);
- return -ENODEV;
- }
-
-// printk(KERN_INFO "arlan found at 0x%x \n",memaddr);
- ARLAN_DEBUG_EXIT("arlan_check_fingerprint");
-
- return 0;
-}
-
-static int arlan_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct arlan_private *priv = netdev_priv(dev);
- struct arlan_conf_stru *conf = priv->Conf;
-
- ARLAN_DEBUG_ENTRY("arlan_change_mtu");
- if (new_mtu > 2032)
- return -EINVAL;
- dev->mtu = new_mtu;
- if (new_mtu < 256)
- new_mtu = 256; /* cards book suggests 1600 */
- conf->maxDatagramSize = new_mtu;
- conf->maxFrameSize = new_mtu + 48;
-
- arlan_command(dev, ARLAN_COMMAND_CLEAN_AND_CONF);
- printk(KERN_NOTICE "%s mtu changed to %d \n", dev->name, new_mtu);
-
- ARLAN_DEBUG_EXIT("arlan_change_mtu");
-
- return 0;
-}
-
-static int arlan_mac_addr(struct net_device *dev, void *p)
-{
- struct sockaddr *addr = p;
-
-
- ARLAN_DEBUG_ENTRY("arlan_mac_addr");
- return -EINVAL;
-
- if (netif_running(dev))
- return -EBUSY;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
-
- ARLAN_DEBUG_EXIT("arlan_mac_addr");
- return 0;
-}
-
-static const struct net_device_ops arlan_netdev_ops = {
- .ndo_open = arlan_open,
- .ndo_stop = arlan_close,
- .ndo_start_xmit = arlan_tx,
- .ndo_get_stats = arlan_statistics,
- .ndo_set_multicast_list = arlan_set_multicast,
- .ndo_change_mtu = arlan_change_mtu,
- .ndo_set_mac_address = arlan_mac_addr,
- .ndo_tx_timeout = arlan_tx_timeout,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int __init arlan_setup_device(struct net_device *dev, int num)
-{
- struct arlan_private *ap = netdev_priv(dev);
- int err;
-
- ARLAN_DEBUG_ENTRY("arlan_setup_device");
-
- ap->conf = (struct arlan_shmem *)(ap+1);
-
- dev->tx_queue_len = tx_queue_len;
- dev->netdev_ops = &arlan_netdev_ops;
- dev->watchdog_timeo = 3*HZ;
-
- ap->irq_test_done = 0;
- ap->Conf = &arlan_conf[num];
-
- ap->Conf->pre_Command_Wait = 40;
- ap->Conf->rx_tweak1 = 30;
- ap->Conf->rx_tweak2 = 0;
-
-
- err = register_netdev(dev);
- if (err) {
- release_mem_region(virt_to_phys((void *) dev->mem_start),
- ARLAN_SHMEM_SIZE);
- free_netdev(dev);
- return err;
- }
- arlan_device[num] = dev;
- ARLAN_DEBUG_EXIT("arlan_setup_device");
- return 0;
-}
-
-static int __init arlan_probe_here(struct net_device *dev,
- unsigned long memaddr)
-{
- struct arlan_private *ap = netdev_priv(dev);
-
- ARLAN_DEBUG_ENTRY("arlan_probe_here");
-
- if (arlan_check_fingerprint(memaddr))
- return -ENODEV;
-
- printk(KERN_NOTICE "%s: Arlan found at %llx, \n ", dev->name,
- (u64) virt_to_phys((void*)memaddr));
-
- ap->card = (void *) memaddr;
- dev->mem_start = memaddr;
- dev->mem_end = memaddr + ARLAN_SHMEM_SIZE-1;
-
- if (dev->irq < 2)
- {
- READSHM(dev->irq, ap->card->irqLevel, u_char);
- } else if (dev->irq == 2)
- dev->irq = 9;
-
- arlan_read_card_configuration(dev);
-
- ARLAN_DEBUG_EXIT("arlan_probe_here");
- return 0;
-}
-
-
-static int arlan_open(struct net_device *dev)
-{
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
- int ret = 0;
-
- ARLAN_DEBUG_ENTRY("arlan_open");
-
- ret = request_irq(dev->irq, &arlan_interrupt, 0, dev->name, dev);
- if (ret)
- {
- printk(KERN_ERR "%s: unable to get IRQ %d .\n",
- dev->name, dev->irq);
- return ret;
- }
-
-
- priv->bad = 0;
- priv->lastReset = 0;
- priv->reset = 0;
- memcpy_fromio(dev->dev_addr, arlan->lanCardNodeId, 6);
- memset(dev->broadcast, 0xff, 6);
- dev->tx_queue_len = tx_queue_len;
- priv->interrupt_processing_active = 0;
- spin_lock_init(&priv->lock);
-
- netif_start_queue (dev);
-
- priv->registrationLostCount = 0;
- priv->registrationLastSeen = jiffies;
- priv->txLast = 0;
- priv->tx_command_given = 0;
- priv->rx_command_given = 0;
-
- priv->reRegisterExp = 1;
- priv->tx_last_sent = jiffies - 1;
- priv->tx_last_cleared = jiffies;
- priv->Conf->writeEEPROM = 0;
- priv->Conf->registrationInterrupts = 1;
-
- init_timer(&priv->timer);
- priv->timer.expires = jiffies + HZ / 10;
- priv->timer.data = (unsigned long) dev;
- priv->timer.function = &arlan_registration_timer; /* timer handler */
-
- arlan_command(dev, ARLAN_COMMAND_POWERUP | ARLAN_COMMAND_LONG_WAIT_NOW);
- mdelay(200);
- add_timer(&priv->timer);
-
- ARLAN_DEBUG_EXIT("arlan_open");
- return 0;
-}
-
-
-static void arlan_tx_timeout (struct net_device *dev)
-{
- printk(KERN_ERR "%s: arlan transmit timed out, kernel decided\n", dev->name);
- /* Try to restart the adaptor. */
- arlan_command(dev, ARLAN_COMMAND_CLEAN_AND_RESET);
- // dev->trans_start = jiffies;
- // netif_start_queue (dev);
-}
-
-
-static netdev_tx_t arlan_tx(struct sk_buff *skb, struct net_device *dev)
-{
- short length;
- unsigned char *buf;
-
- ARLAN_DEBUG_ENTRY("arlan_tx");
-
- length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
- buf = skb->data;
-
- if (length + 0x12 > 0x800) {
- printk(KERN_ERR "TX RING overflow \n");
- netif_stop_queue (dev);
- }
-
- if (arlan_hw_tx(dev, buf, length) == -1)
- goto bad_end;
-
- dev->trans_start = jiffies;
-
- dev_kfree_skb(skb);
-
- arlan_process_interrupt(dev);
- ARLAN_DEBUG_EXIT("arlan_tx");
- return NETDEV_TX_OK;
-
-bad_end:
- arlan_process_interrupt(dev);
- netif_stop_queue (dev);
- ARLAN_DEBUG_EXIT("arlan_tx");
- return NETDEV_TX_BUSY;
-}
-
-
-static inline int DoNotReTransmitCrap(struct net_device *dev)
-{
- struct arlan_private *priv = netdev_priv(dev);
-
- if (TXLAST(dev).length < priv->Conf->ReTransmitPacketMaxSize)
- return 1;
- return 0;
-
-}
-
-static inline int DoNotWaitReTransmitCrap(struct net_device *dev)
-{
- struct arlan_private *priv = netdev_priv(dev);
-
- if (TXLAST(dev).length < priv->Conf->waitReTransmitPacketMaxSize)
- return 1;
- return 0;
-}
-
-static inline void arlan_queue_retransmit(struct net_device *dev)
-{
- struct arlan_private *priv = netdev_priv(dev);
-
- ARLAN_DEBUG_ENTRY("arlan_queue_retransmit");
-
- if (DoNotWaitReTransmitCrap(dev))
- {
- arlan_drop_tx(dev);
- } else
- priv->ReTransmitRequested++;
-
- ARLAN_DEBUG_EXIT("arlan_queue_retransmit");
-}
-
-static inline void RetryOrFail(struct net_device *dev)
-{
- struct arlan_private *priv = netdev_priv(dev);
-
- ARLAN_DEBUG_ENTRY("RetryOrFail");
-
- if (priv->retransmissions > priv->Conf->retries ||
- DoNotReTransmitCrap(dev))
- {
- arlan_drop_tx(dev);
- }
- else if (priv->bad <= priv->Conf->fastReTransCount)
- {
- arlan_retransmit_now(dev);
- }
- else arlan_queue_retransmit(dev);
-
- ARLAN_DEBUG_EXIT("RetryOrFail");
-}
-
-
-static void arlan_tx_done_interrupt(struct net_device *dev, int status)
-{
- struct arlan_private *priv = netdev_priv(dev);
-
- ARLAN_DEBUG_ENTRY("arlan_tx_done_interrupt");
-
- priv->tx_last_cleared = jiffies;
- priv->tx_command_given = 0;
- switch (status)
- {
- case 1:
- {
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
- printk("arlan intr: transmit OK\n");
- dev->stats.tx_packets++;
- priv->bad = 0;
- priv->reset = 0;
- priv->retransmissions = 0;
- if (priv->Conf->tx_delay_ms)
- {
- priv->tx_done_delayed = jiffies + (priv->Conf->tx_delay_ms * HZ) / 1000 + 1;
- }
- else
- {
- TXLAST(dev).offset = 0;
- if (priv->txLast)
- priv->txLast = 0;
- else if (TXTAIL(dev).offset)
- priv->txLast = 1;
- if (TXLAST(dev).offset)
- {
- arlan_retransmit_now(dev);
- dev->trans_start = jiffies;
- }
- if (!TXHEAD(dev).offset || !TXTAIL(dev).offset)
- {
- netif_wake_queue (dev);
- }
- }
- }
- break;
-
- case 2:
- {
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
- printk("arlan intr: transmit timed out\n");
- priv->bad += 1;
- //arlan_queue_retransmit(dev);
- RetryOrFail(dev);
- }
- break;
-
- case 3:
- {
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
- printk("arlan intr: transmit max retries\n");
- priv->bad += 1;
- priv->reset = 0;
- //arlan_queue_retransmit(dev);
- RetryOrFail(dev);
- }
- break;
-
- case 4:
- {
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
- printk("arlan intr: transmit aborted\n");
- priv->bad += 1;
- arlan_queue_retransmit(dev);
- //RetryOrFail(dev);
- }
- break;
-
- case 5:
- {
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
- printk("arlan intr: transmit not registered\n");
- priv->bad += 1;
- //debug=101;
- arlan_queue_retransmit(dev);
- }
- break;
-
- case 6:
- {
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
- printk("arlan intr: transmit destination full\n");
- priv->bad += 1;
- priv->reset = 0;
- //arlan_drop_tx(dev);
- arlan_queue_retransmit(dev);
- }
- break;
-
- case 7:
- {
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
- printk("arlan intr: transmit unknown ack\n");
- priv->bad += 1;
- priv->reset = 0;
- arlan_queue_retransmit(dev);
- }
- break;
-
- case 8:
- {
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
- printk("arlan intr: transmit dest mail box full\n");
- priv->bad += 1;
- priv->reset = 0;
- //arlan_drop_tx(dev);
- arlan_queue_retransmit(dev);
- }
- break;
-
- case 9:
- {
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
- printk("arlan intr: transmit root dest not reg.\n");
- priv->bad += 1;
- priv->reset = 1;
- //arlan_drop_tx(dev);
- arlan_queue_retransmit(dev);
- }
- break;
-
- default:
- {
- printk(KERN_ERR "arlan intr: transmit status unknown\n");
- priv->bad += 1;
- priv->reset = 1;
- arlan_drop_tx(dev);
- }
- }
-
- ARLAN_DEBUG_EXIT("arlan_tx_done_interrupt");
-}
-
-
-static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short rxOffset, u_short pkt_len)
-{
- char *skbtmp;
- int i = 0;
-
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
- struct arlan_conf_stru *conf = priv->Conf;
-
-
- ARLAN_DEBUG_ENTRY("arlan_rx_interrupt");
- // by spec, not WRITESHMB(arlan->rxStatus,0x00);
- // prohibited here arlan_command(dev, ARLAN_COMMAND_RX);
-
- if (pkt_len < 10 || pkt_len > 2048)
- {
- printk(KERN_WARNING "%s: got too short or long packet, len %d \n", dev->name, pkt_len);
- return;
- }
- if (rxOffset + pkt_len > 0x2000)
- {
- printk("%s: got too long packet, len %d offset %x\n", dev->name, pkt_len, rxOffset);
- return;
- }
- priv->in_bytes += pkt_len;
- priv->in_bytes10 += pkt_len;
- if (conf->measure_rate < 1)
- conf->measure_rate = 1;
- if (time_after(jiffies, priv->in_time + conf->measure_rate * HZ))
- {
- conf->in_speed = priv->in_bytes / conf->measure_rate;
- priv->in_bytes = 0;
- priv->in_time = jiffies;
- }
- if (time_after(jiffies, priv->in_time10 + conf->measure_rate * 10*HZ))
- {
- conf->in_speed10 = priv->in_bytes10 / (10 * conf->measure_rate);
- priv->in_bytes10 = 0;
- priv->in_time10 = jiffies;
- }
- DEBUGSHM(1, "arlan rcv pkt rxStatus= %d ", arlan->rxStatus, u_char);
- switch (rxStatus)
- {
- case 1:
- case 2:
- case 3:
- {
- /* Malloc up new buffer. */
- struct sk_buff *skb;
-
- DEBUGSHM(50, "arlan recv pkt offs=%d\n", arlan->rxOffset, u_short);
- DEBUGSHM(1, "arlan rxFrmType = %d \n", arlan->rxFrmType, u_char);
- DEBUGSHM(1, KERN_INFO "arlan rx scrambled = %d \n", arlan->scrambled, u_char);
-
- /* here we do multicast filtering to avoid slow 8-bit memcopy */
-#ifdef ARLAN_MULTICAST
- if (!(dev->flags & IFF_ALLMULTI) &&
- !(dev->flags & IFF_PROMISC) &&
- !netdev_mc_empty(dev))
- {
- char hw_dst_addr[6];
- struct dev_mc_list *dmi;
- int i;
-
- memcpy_fromio(hw_dst_addr, arlan->ultimateDestAddress, 6);
- if (hw_dst_addr[0] == 0x01)
- {
- if (mdebug)
- if (hw_dst_addr[1] == 0x00)
- printk(KERN_ERR "%s mcast 0x0100 \n", dev->name);
- else if (hw_dst_addr[1] == 0x40)
- printk(KERN_ERR "%s m/bcast 0x0140 \n", dev->name);
- netdev_for_each_mc_entry(dmi, dev) {
- if (arlan_debug & ARLAN_DEBUG_HEADER_DUMP)
- printk(KERN_ERR "%s mcl %pM\n",
- dev->name, dmi->dmi_addr);
- for (i = 0; i < 6; i++)
- if (dmi->dmi_addr[i] != hw_dst_addr[i])
- break;
- if (i == 6)
- break;
- }
- /* we reach here if multicast filtering is on and packet
- * is multicast and not for receive */
- goto end_of_interrupt;
- }
- }
-#endif // ARLAN_MULTICAST
- /* multicast filtering ends here */
- pkt_len += ARLAN_FAKE_HDR_LEN;
-
- skb = dev_alloc_skb(pkt_len + 4);
- if (skb == NULL)
- {
- printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name);
- dev->stats.rx_dropped++;
- break;
- }
- skb_reserve(skb, 2);
- skbtmp = skb_put(skb, pkt_len);
-
- memcpy_fromio(skbtmp + ARLAN_FAKE_HDR_LEN, ((char __iomem *) arlan) + rxOffset, pkt_len - ARLAN_FAKE_HDR_LEN);
- memcpy_fromio(skbtmp, arlan->ultimateDestAddress, 6);
- memcpy_fromio(skbtmp + 6, arlan->rxSrc, 6);
- WRITESHMB(arlan->rxStatus, 0x00);
- arlan_command(dev, ARLAN_COMMAND_RX);
-
- IFDEBUG(ARLAN_DEBUG_HEADER_DUMP)
- {
- char immedDestAddress[6];
- char immedSrcAddress[6];
- memcpy_fromio(immedDestAddress, arlan->immedDestAddress, 6);
- memcpy_fromio(immedSrcAddress, arlan->immedSrcAddress, 6);
-
- printk(KERN_WARNING "%s t %pM f %pM imd %pM ims %pM\n",
- dev->name, skbtmp,
- &skbtmp[6],
- immedDestAddress,
- immedSrcAddress);
- }
- skb->protocol = eth_type_trans(skb, dev);
- IFDEBUG(ARLAN_DEBUG_HEADER_DUMP)
- if (skb->protocol != 0x608 && skb->protocol != 0x8)
- {
- for (i = 0; i <= 22; i++)
- printk("%02x:", (u_char) skbtmp[i + 12]);
- printk(KERN_ERR "\n");
- printk(KERN_WARNING "arlan kernel pkt type trans %x \n", skb->protocol);
- }
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- }
- break;
-
- default:
- printk(KERN_ERR "arlan intr: received unknown status\n");
- dev->stats.rx_crc_errors++;
- break;
- }
- ARLAN_DEBUG_EXIT("arlan_rx_interrupt");
-}
-
-static void arlan_process_interrupt(struct net_device *dev)
-{
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
- u_char rxStatus = READSHMB(arlan->rxStatus);
- u_char txStatus = READSHMB(arlan->txStatus);
- u_short rxOffset = READSHMS(arlan->rxOffset);
- u_short pkt_len = READSHMS(arlan->rxLength);
- int interrupt_count = 0;
-
- ARLAN_DEBUG_ENTRY("arlan_process_interrupt");
-
- if (test_and_set_bit(0, (void *) &priv->interrupt_processing_active))
- {
- if (arlan_debug & ARLAN_DEBUG_CHAIN_LOCKS)
- printk(KERN_ERR "interrupt chain reentering \n");
- goto end_int_process;
- }
- while ((rxStatus || txStatus || priv->interrupt_ack_requested)
- && (interrupt_count < 5))
- {
- if (rxStatus)
- priv->last_rx_int_ack_time = jiffies;
-
- arlan_command(dev, ARLAN_COMMAND_INT_ACK);
- arlan_command(dev, ARLAN_COMMAND_INT_ENABLE);
-
- IFDEBUG(ARLAN_DEBUG_INTERRUPT)
- printk(KERN_ERR "%s: got IRQ rx %x tx %x comm %x rxOff %x rxLen %x \n",
- dev->name, rxStatus, txStatus, READSHMB(arlan->commandByte),
- rxOffset, pkt_len);
-
- if (rxStatus == 0 && txStatus == 0)
- {
- if (priv->irq_test_done)
- {
- if (!registrationBad(dev))
- IFDEBUG(ARLAN_DEBUG_INTERRUPT) printk(KERN_ERR "%s unknown interrupt(nop? regLost ?) reason tx %d rx %d ",
- dev->name, txStatus, rxStatus);
- } else {
- IFDEBUG(ARLAN_DEBUG_INTERRUPT)
- printk(KERN_INFO "%s irq $%d test OK \n", dev->name, dev->irq);
-
- }
- priv->interrupt_ack_requested = 0;
- goto ends;
- }
- if (txStatus != 0)
- {
- WRITESHMB(arlan->txStatus, 0x00);
- arlan_tx_done_interrupt(dev, txStatus);
- goto ends;
- }
- if (rxStatus == 1 || rxStatus == 2)
- { /* a packet waiting */
- arlan_rx_interrupt(dev, rxStatus, rxOffset, pkt_len);
- goto ends;
- }
- if (rxStatus > 2 && rxStatus < 0xff)
- {
- WRITESHMB(arlan->rxStatus, 0x00);
- printk(KERN_ERR "%s unknown rxStatus reason tx %d rx %d ",
- dev->name, txStatus, rxStatus);
- goto ends;
- }
- if (rxStatus == 0xff)
- {
- WRITESHMB(arlan->rxStatus, 0x00);
- arlan_command(dev, ARLAN_COMMAND_RX);
- if (registrationBad(dev))
- netif_device_detach(dev);
- if (!registrationBad(dev))
- {
- priv->registrationLastSeen = jiffies;
- if (!netif_queue_stopped(dev) && !priv->under_reset && !priv->under_config)
- netif_wake_queue (dev);
- }
- goto ends;
- }
-ends:
-
- arlan_command_process(dev);
-
- rxStatus = READSHMB(arlan->rxStatus);
- txStatus = READSHMB(arlan->txStatus);
- rxOffset = READSHMS(arlan->rxOffset);
- pkt_len = READSHMS(arlan->rxLength);
-
-
- priv->irq_test_done = 1;
-
- interrupt_count++;
- }
- priv->interrupt_processing_active = 0;
-
-end_int_process:
- arlan_command_process(dev);
-
- ARLAN_DEBUG_EXIT("arlan_process_interrupt");
- return;
-}
-
-static irqreturn_t arlan_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
- u_char rxStatus = READSHMB(arlan->rxStatus);
- u_char txStatus = READSHMB(arlan->txStatus);
-
- ARLAN_DEBUG_ENTRY("arlan_interrupt");
-
-
- if (!rxStatus && !txStatus)
- priv->interrupt_ack_requested++;
-
- arlan_process_interrupt(dev);
-
- priv->irq_test_done = 1;
-
- ARLAN_DEBUG_EXIT("arlan_interrupt");
- return IRQ_HANDLED;
-
-}
-
-
-static int arlan_close(struct net_device *dev)
-{
- struct arlan_private *priv = netdev_priv(dev);
-
- ARLAN_DEBUG_ENTRY("arlan_close");
-
- del_timer_sync(&priv->timer);
-
- arlan_command(dev, ARLAN_COMMAND_POWERDOWN);
-
- IFDEBUG(ARLAN_DEBUG_STARTUP)
- printk(KERN_NOTICE "%s: Closing device\n", dev->name);
-
- netif_stop_queue(dev);
- free_irq(dev->irq, dev);
-
- ARLAN_DEBUG_EXIT("arlan_close");
- return 0;
-}
-
-#ifdef ARLAN_DEBUGGING
-static long alignLong(volatile u_char * ptr)
-{
- long ret;
- memcpy_fromio(&ret, (void *) ptr, 4);
- return ret;
-}
-#endif
-
-/*
- * Get the current statistics.
- * This may be called with the card open or closed.
- */
-
-static struct net_device_stats *arlan_statistics(struct net_device *dev)
-{
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
-
-
- ARLAN_DEBUG_ENTRY("arlan_statistics");
-
- /* Update the statistics from the device registers. */
-
- READSHM(dev->stats.collisions, arlan->numReTransmissions, u_int);
- READSHM(dev->stats.rx_crc_errors, arlan->numCRCErrors, u_int);
- READSHM(dev->stats.rx_dropped, arlan->numFramesDiscarded, u_int);
- READSHM(dev->stats.rx_fifo_errors, arlan->numRXBufferOverflows, u_int);
- READSHM(dev->stats.rx_frame_errors, arlan->numReceiveFramesLost, u_int);
- READSHM(dev->stats.rx_over_errors, arlan->numRXOverruns, u_int);
- READSHM(dev->stats.rx_packets, arlan->numDatagramsReceived, u_int);
- READSHM(dev->stats.tx_aborted_errors, arlan->numAbortErrors, u_int);
- READSHM(dev->stats.tx_carrier_errors, arlan->numStatusTimeouts, u_int);
- READSHM(dev->stats.tx_dropped, arlan->numDatagramsDiscarded, u_int);
- READSHM(dev->stats.tx_fifo_errors, arlan->numTXUnderruns, u_int);
- READSHM(dev->stats.tx_packets, arlan->numDatagramsTransmitted, u_int);
- READSHM(dev->stats.tx_window_errors, arlan->numHoldOffs, u_int);
-
- ARLAN_DEBUG_EXIT("arlan_statistics");
-
- return &dev->stats;
-}
-
-
-static void arlan_set_multicast(struct net_device *dev)
-{
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
- struct arlan_conf_stru *conf = priv->Conf;
- int board_conf_needed = 0;
-
-
- ARLAN_DEBUG_ENTRY("arlan_set_multicast");
-
- if (dev->flags & IFF_PROMISC)
- {
- unsigned char recMode;
- READSHM(recMode, arlan->receiveMode, u_char);
- conf->receiveMode = (ARLAN_RCV_PROMISC | ARLAN_RCV_CONTROL);
- if (conf->receiveMode != recMode)
- board_conf_needed = 1;
- }
- else
- {
- /* turn off promiscuous mode */
- unsigned char recMode;
- READSHM(recMode, arlan->receiveMode, u_char);
- conf->receiveMode = ARLAN_RCV_CLEAN | ARLAN_RCV_CONTROL;
- if (conf->receiveMode != recMode)
- board_conf_needed = 1;
- }
- if (board_conf_needed)
- arlan_command(dev, ARLAN_COMMAND_CONF);
-
- ARLAN_DEBUG_EXIT("arlan_set_multicast");
-}
-
-
-struct net_device * __init arlan_probe(int unit)
-{
- struct net_device *dev;
- int err;
- int m;
-
- ARLAN_DEBUG_ENTRY("arlan_probe");
-
- if (arlans_found == MAX_ARLANS)
- return ERR_PTR(-ENODEV);
-
- /*
- * Reserve space for local data and a copy of the shared memory
- * that is used by the /proc interface.
- */
- dev = alloc_etherdev(sizeof(struct arlan_private)
- + sizeof(struct arlan_shmem));
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- if (dev->mem_start) {
- if (arlan_probe_here(dev, dev->mem_start) == 0)
- goto found;
- goto not_found;
- }
-
- }
-
-
- for (m = (int)phys_to_virt(lastFoundAt) + ARLAN_SHMEM_SIZE;
- m <= (int)phys_to_virt(0xDE000);
- m += ARLAN_SHMEM_SIZE)
- {
- if (arlan_probe_here(dev, m) == 0)
- {
- lastFoundAt = (int)virt_to_phys((void*)m);
- goto found;
- }
- }
-
- if (lastFoundAt == 0xbe000)
- printk(KERN_ERR "arlan: No Arlan devices found \n");
-
- not_found:
- free_netdev(dev);
- return ERR_PTR(-ENODEV);
-
- found:
- err = arlan_setup_device(dev, arlans_found);
- if (err)
- dev = ERR_PTR(err);
- else if (!arlans_found++)
- printk(KERN_INFO "Arlan driver %s\n", arlan_version);
-
- return dev;
-}
-
-#ifdef MODULE
-int __init init_module(void)
-{
- int i = 0;
-
- ARLAN_DEBUG_ENTRY("init_module");
-
- if (channelSet != channelSetUNKNOWN || channelNumber != channelNumberUNKNOWN || systemId != systemIdUNKNOWN)
- return -EINVAL;
-
- for (i = 0; i < MAX_ARLANS; i++) {
- struct net_device *dev = arlan_probe(i);
-
- if (IS_ERR(dev))
- return PTR_ERR(dev);
- }
- init_arlan_proc();
- printk(KERN_INFO "Arlan driver %s\n", arlan_version);
- ARLAN_DEBUG_EXIT("init_module");
- return 0;
-}
-
-
-void __exit cleanup_module(void)
-{
- int i = 0;
- struct net_device *dev;
-
- ARLAN_DEBUG_ENTRY("cleanup_module");
-
- IFDEBUG(ARLAN_DEBUG_SHUTDOWN)
- printk(KERN_INFO "arlan: unloading module\n");
-
- cleanup_arlan_proc();
-
- for (i = 0; i < MAX_ARLANS; i++)
- {
- dev = arlan_device[i];
- if (dev) {
- arlan_command(dev, ARLAN_COMMAND_POWERDOWN );
-
- unregister_netdev(dev);
- release_mem_region(virt_to_phys((void *) dev->mem_start),
- ARLAN_SHMEM_SIZE);
- free_netdev(dev);
- arlan_device[i] = NULL;
- }
- }
-
- ARLAN_DEBUG_EXIT("cleanup_module");
-}
-
-
-#endif
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/arlan/arlan-proc.c b/drivers/staging/arlan/arlan-proc.c
deleted file mode 100644
index b22983e6c0c..00000000000
--- a/drivers/staging/arlan/arlan-proc.c
+++ /dev/null
@@ -1,1210 +0,0 @@
-#include "arlan.h"
-
-#include <linux/sysctl.h>
-
-#ifdef CONFIG_PROC_FS
-
-/* void enableReceive(struct net_device* dev);
-*/
-
-
-
-#define ARLAN_STR_SIZE 0x2ff0
-#define DEV_ARLAN_INFO 1
-#define DEV_ARLAN 1
-#define SARLG(type,var) {\
- pos += sprintf(arlan_drive_info+pos, "%s\t=\t0x%x\n", #var, READSHMB(priva->card->var)); \
- }
-
-#define SARLBN(type,var,nn) {\
- pos += sprintf(arlan_drive_info+pos, "%s\t=\t0x",#var);\
- for (i=0; i < nn; i++ ) pos += sprintf(arlan_drive_info+pos, "%02x",READSHMB(priva->card->var[i]));\
- pos += sprintf(arlan_drive_info+pos, "\n"); \
- }
-
-#define SARLBNpln(type,var,nn) {\
- for (i=0; i < nn; i++ ) pos += sprintf(arlan_drive_info+pos, "%02x",READSHMB(priva->card->var[i]));\
- }
-
-#define SARLSTR(var,nn) {\
- char tmpStr[400];\
- int tmpLn = nn;\
- if (nn > 399 ) tmpLn = 399; \
- memcpy(tmpStr,(char *) priva->conf->var,tmpLn);\
- tmpStr[tmpLn] = 0; \
- pos += sprintf(arlan_drive_info+pos, "%s\t=\t%s \n",#var,priva->conf->var);\
- }
-
-#define SARLUC(var) SARLG(u_char, var)
-#define SARLUCN(var,nn) SARLBN(u_char,var, nn)
-#define SARLUS(var) SARLG(u_short, var)
-#define SARLUSN(var,nn) SARLBN(u_short,var, nn)
-#define SARLUI(var) SARLG(u_int, var)
-
-#define SARLUSA(var) {\
- u_short tmpVar;\
- memcpy(&tmpVar, (short *) priva->conf->var,2); \
- pos += sprintf(arlan_drive_info+pos, "%s\t=\t0x%x\n",#var, tmpVar);\
-}
-
-#define SARLUIA(var) {\
- u_int tmpVar;\
- memcpy(&tmpVar, (int* )priva->conf->var,4); \
- pos += sprintf(arlan_drive_info+pos, "%s\t=\t0x%x\n",#var, tmpVar);\
-}
-
-
-static const char *arlan_diagnostic_info_string(struct net_device *dev)
-{
-
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
- u_char diagnosticInfo;
-
- READSHM(diagnosticInfo, arlan->diagnosticInfo, u_char);
-
- switch (diagnosticInfo)
- {
- case 0xFF:
- return "Diagnostic info is OK";
- case 0xFE:
- return "ERROR EPROM Checksum error ";
- case 0xFD:
- return "ERROR Local Ram Test Failed ";
- case 0xFC:
- return "ERROR SCC failure ";
- case 0xFB:
- return "ERROR BackBone failure ";
- case 0xFA:
- return "ERROR transceiver not found ";
- case 0xF9:
- return "ERROR no more address space ";
- case 0xF8:
- return "ERROR Checksum error ";
- case 0xF7:
- return "ERROR Missing SS Code";
- case 0xF6:
- return "ERROR Invalid config format";
- case 0xF5:
- return "ERROR Reserved errorcode F5";
- case 0xF4:
- return "ERROR Invalid spreading code/channel number";
- case 0xF3:
- return "ERROR Load Code Error";
- case 0xF2:
- return "ERROR Reserver errorcode F2 ";
- case 0xF1:
- return "ERROR Invalid command receivec by LAN card ";
- case 0xF0:
- return "ERROR Invalid parameter found in command ";
- case 0xEF:
- return "ERROR On-chip timer failure ";
- case 0xEE:
- return "ERROR T410 timer failure ";
- case 0xED:
- return "ERROR Too Many TxEnable commands ";
- case 0xEC:
- return "ERROR EEPROM error on radio module ";
- default:
- return "ERROR unknown Diagnostic info reply code ";
- }
-}
-
-static const char *arlan_hardware_type_string(struct net_device *dev)
-{
- u_char hardwareType;
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
-
- READSHM(hardwareType, arlan->hardwareType, u_char);
- switch (hardwareType)
- {
- case 0x00:
- return "type A450";
- case 0x01:
- return "type A650 ";
- case 0x04:
- return "type TMA coproc";
- case 0x0D:
- return "type A650E ";
- case 0x18:
- return "type TMA coproc Australian";
- case 0x19:
- return "type A650A ";
- case 0x26:
- return "type TMA coproc European";
- case 0x2E:
- return "type A655 ";
- case 0x2F:
- return "type A655A ";
- case 0x30:
- return "type A655E ";
- case 0x0B:
- return "type A670 ";
- case 0x0C:
- return "type A670E ";
- case 0x2D:
- return "type A670A ";
- case 0x0F:
- return "type A411T";
- case 0x16:
- return "type A411TA";
- case 0x1B:
- return "type A440T";
- case 0x1C:
- return "type A412T";
- case 0x1E:
- return "type A412TA";
- case 0x22:
- return "type A411TE";
- case 0x24:
- return "type A412TE";
- case 0x27:
- return "type A671T ";
- case 0x29:
- return "type A671TA ";
- case 0x2B:
- return "type A671TE ";
- case 0x31:
- return "type A415T ";
- case 0x33:
- return "type A415TA ";
- case 0x35:
- return "type A415TE ";
- case 0x37:
- return "type A672";
- case 0x39:
- return "type A672A ";
- case 0x3B:
- return "type A672T";
- case 0x6B:
- return "type IC2200";
- default:
- return "type A672T";
- }
-}
-#ifdef ARLAN_DEBUGGING
-static void arlan_print_diagnostic_info(struct net_device *dev)
-{
- int i;
- u_char diagnosticInfo;
- u_short diagnosticOffset;
- u_char hardwareType;
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
-
- // ARLAN_DEBUG_ENTRY("arlan_print_diagnostic_info");
-
- if (READSHMB(arlan->configuredStatusFlag) == 0)
- printk("Arlan: Card NOT configured\n");
- else
- printk("Arlan: Card is configured\n");
-
- READSHM(diagnosticInfo, arlan->diagnosticInfo, u_char);
- READSHM(diagnosticOffset, arlan->diagnosticOffset, u_short);
-
- printk(KERN_INFO "%s\n", arlan_diagnostic_info_string(dev));
-
- if (diagnosticInfo != 0xff)
- printk("%s arlan: Diagnostic Offset %d \n", dev->name, diagnosticOffset);
-
- printk("arlan: LAN CODE ID = ");
- for (i = 0; i < 6; i++)
- DEBUGSHM(1, "%03d:", arlan->lanCardNodeId[i], u_char);
- printk("\n");
-
- printk("arlan: Arlan BroadCast address = ");
- for (i = 0; i < 6; i++)
- DEBUGSHM(1, "%03d:", arlan->broadcastAddress[i], u_char);
- printk("\n");
-
- READSHM(hardwareType, arlan->hardwareType, u_char);
- printk(KERN_INFO "%s\n", arlan_hardware_type_string(dev));
-
-
- DEBUGSHM(1, "arlan: channelNumber=%d\n", arlan->channelNumber, u_char);
- DEBUGSHM(1, "arlan: channelSet=%d\n", arlan->channelSet, u_char);
- DEBUGSHM(1, "arlan: spreadingCode=%d\n", arlan->spreadingCode, u_char);
- DEBUGSHM(1, "arlan: radioNodeId=%d\n", arlan->radioNodeId, u_short);
- DEBUGSHM(1, "arlan: SID =%d\n", arlan->SID, u_short);
- DEBUGSHM(1, "arlan: rxOffset=%d\n", arlan->rxOffset, u_short);
-
- DEBUGSHM(1, "arlan: registration mode is %d\n", arlan->registrationMode, u_char);
-
- printk("arlan: name= ");
- IFDEBUG(1)
-
- for (i = 0; i < 16; i++)
- {
- char c;
- READSHM(c, arlan->name[i], char);
- if (c)
- printk("%c", c);
- }
- printk("\n");
-
-// ARLAN_DEBUG_EXIT("arlan_print_diagnostic_info");
-
-}
-
-
-/****************************** TEST MEMORY **************/
-
-static int arlan_hw_test_memory(struct net_device *dev)
-{
- u_char *ptr;
- int i;
- int memlen = sizeof(struct arlan_shmem) - 0xF; /* avoid control register */
- volatile char *arlan_mem = (char *) (dev->mem_start);
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
- char pattern;
-
- ptr = NULL;
-
- /* hold card in reset state */
- setHardwareReset(dev);
-
- /* test memory */
- pattern = 0;
- for (i = 0; i < memlen; i++)
- WRITESHM(arlan_mem[i], ((u_char) pattern++), u_char);
-
- pattern = 0;
- for (i = 0; i < memlen; i++)
- {
- char res;
- READSHM(res, arlan_mem[i], char);
- if (res != pattern++)
- {
- printk(KERN_ERR "Arlan driver memory test 1 failed \n");
- return -1;
- }
- }
-
- pattern = 0;
- for (i = 0; i < memlen; i++)
- WRITESHM(arlan_mem[i], ~(pattern++), char);
-
- pattern = 0;
- for (i = 0; i < memlen; i++)
- {
- char res;
- READSHM(res, arlan_mem[i], char);
- if (res != ~(pattern++))
- {
- printk(KERN_ERR "Arlan driver memory test 2 failed \n");
- return -1;
- }
- }
-
- /* zero memory */
- for (i = 0; i < memlen; i++)
- WRITESHM(arlan_mem[i], 0x00, char);
-
- IFDEBUG(1) printk(KERN_INFO "Arlan: memory tests ok\n");
-
- /* set reset flag and then release reset */
- WRITESHM(arlan->resetFlag, 0xff, u_char);
-
- clearChannelAttention(dev);
- clearHardwareReset(dev);
-
- /* wait for reset flag to become zero, we'll wait for two seconds */
- if (arlan_command(dev, ARLAN_COMMAND_LONG_WAIT_NOW))
- {
- printk(KERN_ERR "%s arlan: failed to come back from memory test\n", dev->name);
- return -1;
- }
- return 0;
-}
-
-static int arlan_setup_card_by_book(struct net_device *dev)
-{
- u_char irqLevel, configuredStatusFlag;
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
-
-// ARLAN_DEBUG_ENTRY("arlan_setup_card");
-
- READSHM(configuredStatusFlag, arlan->configuredStatusFlag, u_char);
-
- IFDEBUG(10)
- if (configuredStatusFlag != 0)
- IFDEBUG(10) printk("arlan: CARD IS CONFIGURED\n");
- else
- IFDEBUG(10) printk("arlan: card is NOT configured\n");
-
- if (testMemory || (READSHMB(arlan->diagnosticInfo) != 0xff))
- if (arlan_hw_test_memory(dev))
- return -1;
-
- DEBUGSHM(4, "arlan configuredStatus = %d \n", arlan->configuredStatusFlag, u_char);
- DEBUGSHM(4, "arlan driver diagnostic: 0x%2x\n", arlan->diagnosticInfo, u_char);
-
- /* issue nop command - no interrupt */
- arlan_command(dev, ARLAN_COMMAND_NOOP);
- if (arlan_command(dev, ARLAN_COMMAND_WAIT_NOW) != 0)
- return -1;
-
- IFDEBUG(50) printk("1st Noop successfully executed !!\n");
-
- /* try to turn on the arlan interrupts */
- clearClearInterrupt(dev);
- setClearInterrupt(dev);
- setInterruptEnable(dev);
-
- /* issue nop command - with interrupt */
-
- arlan_command(dev, ARLAN_COMMAND_NOOPINT);
- if (arlan_command(dev, ARLAN_COMMAND_WAIT_NOW) != 0)
- return -1;
-
-
- IFDEBUG(50) printk("2nd Noop successfully executed !!\n");
-
- READSHM(irqLevel, arlan->irqLevel, u_char)
-
- if (irqLevel != dev->irq)
- {
- IFDEBUG(1) printk(KERN_WARNING "arlan dip switches set irq to %d\n", irqLevel);
- printk(KERN_WARNING "device driver irq set to %d - does not match\n", dev->irq);
- dev->irq = irqLevel;
- }
- else
- IFDEBUG(2) printk("irq level is OK\n");
-
-
- IFDEBUG(3) arlan_print_diagnostic_info(dev);
-
- arlan_command(dev, ARLAN_COMMAND_CONF);
-
- READSHM(configuredStatusFlag, arlan->configuredStatusFlag, u_char);
- if (configuredStatusFlag == 0)
- {
- printk(KERN_WARNING "arlan configure failed\n");
- return -1;
- }
- arlan_command(dev, ARLAN_COMMAND_LONG_WAIT_NOW);
- arlan_command(dev, ARLAN_COMMAND_RX);
- arlan_command(dev, ARLAN_COMMAND_LONG_WAIT_NOW);
- printk(KERN_NOTICE "%s: arlan driver version %s loaded\n",
- dev->name, arlan_version);
-
-// ARLAN_DEBUG_EXIT("arlan_setup_card");
-
- return 0; /* no errors */
-}
-#endif
-
-#ifdef ARLAN_PROC_INTERFACE
-#ifdef ARLAN_PROC_SHM_DUMP
-
-static char arlan_drive_info[ARLAN_STR_SIZE] = "A655\n\0";
-
-static int arlan_sysctl_info(ctl_table * ctl, int write,
- void __user *buffer, size_t * lenp, loff_t *ppos)
-{
- int i;
- int retv, pos, devnum;
- struct arlan_private *priva = NULL;
- struct net_device *dev;
- pos = 0;
- if (write)
- {
- printk("wrirte: ");
- for (i = 0; i < 100; i++)
- printk("adi %x \n", arlan_drive_info[i]);
- }
- if (ctl->procname == NULL || arlan_drive_info == NULL)
- {
- printk(KERN_WARNING " procname is NULL in sysctl_table or arlan_drive_info is NULL \n at arlan module\n ");
- return -1;
- }
- devnum = ctl->procname[5] - '0';
- if (devnum < 0 || devnum > MAX_ARLANS - 1)
- {
- printk(KERN_WARNING "too strange devnum in procfs parse\n ");
- return -1;
- }
- else if (arlan_device[devnum] == NULL)
- {
- if (ctl->procname)
- pos += sprintf(arlan_drive_info + pos, "\t%s\n\n", ctl->procname);
- pos += sprintf(arlan_drive_info + pos, "No device found here \n");
- goto final;
- }
- else
- priva = netdev_priv(arlan_device[devnum]);
-
- if (priva == NULL)
- {
- printk(KERN_WARNING " Could not find the device private in arlan procsys, bad\n ");
- return -1;
- }
- dev = arlan_device[devnum];
-
- memcpy_fromio(priva->conf, priva->card, sizeof(struct arlan_shmem));
-
- pos = sprintf(arlan_drive_info, "Arlan info \n");
- /* Header Signature */
- SARLSTR(textRegion, 48);
- SARLUC(resetFlag);
- pos += sprintf(arlan_drive_info + pos, "diagnosticInfo\t=\t%s \n", arlan_diagnostic_info_string(dev));
- SARLUC(diagnosticInfo);
- SARLUS(diagnosticOffset);
- SARLUCN(_1, 12);
- SARLUCN(lanCardNodeId, 6);
- SARLUCN(broadcastAddress, 6);
- pos += sprintf(arlan_drive_info + pos, "hardwareType =\t %s \n", arlan_hardware_type_string(dev));
- SARLUC(hardwareType);
- SARLUC(majorHardwareVersion);
- SARLUC(minorHardwareVersion);
- SARLUC(radioModule);
- SARLUC(defaultChannelSet);
- SARLUCN(_2, 47);
-
- /* Control/Status Block - 0x0080 */
- SARLUC(interruptInProgress);
- SARLUC(cntrlRegImage);
-
- SARLUCN(_3, 14);
- SARLUC(commandByte);
- SARLUCN(commandParameter, 15);
-
- /* Receive Status - 0x00a0 */
- SARLUC(rxStatus);
- SARLUC(rxFrmType);
- SARLUS(rxOffset);
- SARLUS(rxLength);
- SARLUCN(rxSrc, 6);
- SARLUC(rxBroadcastFlag);
- SARLUC(rxQuality);
- SARLUC(scrambled);
- SARLUCN(_4, 1);
-
- /* Transmit Status - 0x00b0 */
- SARLUC(txStatus);
- SARLUC(txAckQuality);
- SARLUC(numRetries);
- SARLUCN(_5, 14);
- SARLUCN(registeredRouter, 6);
- SARLUCN(backboneRouter, 6);
- SARLUC(registrationStatus);
- SARLUC(configuredStatusFlag);
- SARLUCN(_6, 1);
- SARLUCN(ultimateDestAddress, 6);
- SARLUCN(immedDestAddress, 6);
- SARLUCN(immedSrcAddress, 6);
- SARLUS(rxSequenceNumber);
- SARLUC(assignedLocaltalkAddress);
- SARLUCN(_7, 27);
-
- /* System Parameter Block */
-
- /* - Driver Parameters (Novell Specific) */
-
- SARLUS(txTimeout);
- SARLUS(transportTime);
- SARLUCN(_8, 4);
-
- /* - Configuration Parameters */
- SARLUC(irqLevel);
- SARLUC(spreadingCode);
- SARLUC(channelSet);
- SARLUC(channelNumber);
- SARLUS(radioNodeId);
- SARLUCN(_9, 2);
- SARLUC(scramblingDisable);
- SARLUC(radioType);
- SARLUS(routerId);
- SARLUCN(_10, 9);
- SARLUC(txAttenuation);
- SARLUIA(systemId);
- SARLUS(globalChecksum);
- SARLUCN(_11, 4);
- SARLUS(maxDatagramSize);
- SARLUS(maxFrameSize);
- SARLUC(maxRetries);
- SARLUC(receiveMode);
- SARLUC(priority);
- SARLUC(rootOrRepeater);
- SARLUCN(specifiedRouter, 6);
- SARLUS(fastPollPeriod);
- SARLUC(pollDecay);
- SARLUSA(fastPollDelay);
- SARLUC(arlThreshold);
- SARLUC(arlDecay);
- SARLUCN(_12, 1);
- SARLUS(specRouterTimeout);
- SARLUCN(_13, 5);
-
- /* Scrambled Area */
- SARLUIA(SID);
- SARLUCN(encryptionKey, 12);
- SARLUIA(_14);
- SARLUSA(waitTime);
- SARLUSA(lParameter);
- SARLUCN(_15, 3);
- SARLUS(headerSize);
- SARLUS(sectionChecksum);
-
- SARLUC(registrationMode);
- SARLUC(registrationFill);
- SARLUS(pollPeriod);
- SARLUS(refreshPeriod);
- SARLSTR(name, 16);
- SARLUCN(NID, 6);
- SARLUC(localTalkAddress);
- SARLUC(codeFormat);
- SARLUC(numChannels);
- SARLUC(channel1);
- SARLUC(channel2);
- SARLUC(channel3);
- SARLUC(channel4);
- SARLUCN(SSCode, 59);
-
-/* SARLUCN( _16, 0x140);
- */
- /* Statistics Block - 0x0300 */
- SARLUC(hostcpuLock);
- SARLUC(lancpuLock);
- SARLUCN(resetTime, 18);
- SARLUIA(numDatagramsTransmitted);
- SARLUIA(numReTransmissions);
- SARLUIA(numFramesDiscarded);
- SARLUIA(numDatagramsReceived);
- SARLUIA(numDuplicateReceivedFrames);
- SARLUIA(numDatagramsDiscarded);
- SARLUS(maxNumReTransmitDatagram);
- SARLUS(maxNumReTransmitFrames);
- SARLUS(maxNumConsecutiveDuplicateFrames);
- /* misaligned here so we have to go to characters */
- SARLUIA(numBytesTransmitted);
- SARLUIA(numBytesReceived);
- SARLUIA(numCRCErrors);
- SARLUIA(numLengthErrors);
- SARLUIA(numAbortErrors);
- SARLUIA(numTXUnderruns);
- SARLUIA(numRXOverruns);
- SARLUIA(numHoldOffs);
- SARLUIA(numFramesTransmitted);
- SARLUIA(numFramesReceived);
- SARLUIA(numReceiveFramesLost);
- SARLUIA(numRXBufferOverflows);
- SARLUIA(numFramesDiscardedAddrMismatch);
- SARLUIA(numFramesDiscardedSIDMismatch);
- SARLUIA(numPollsTransmistted);
- SARLUIA(numPollAcknowledges);
- SARLUIA(numStatusTimeouts);
- SARLUIA(numNACKReceived);
- SARLUS(auxCmd);
- SARLUCN(dumpPtr, 4);
- SARLUC(dumpVal);
- SARLUC(wireTest);
-
- /* next 4 seems too long for procfs, over single page ?
- SARLUCN( _17, 0x86);
- SARLUCN( txBuffer, 0x800);
- SARLUCN( rxBuffer, 0x800);
- SARLUCN( _18, 0x0bff);
- */
-
- pos += sprintf(arlan_drive_info + pos, "rxRing\t=\t0x");
- for (i = 0; i < 0x50; i++)
- pos += sprintf(arlan_drive_info + pos, "%02x", ((char *) priva->conf)[priva->conf->rxOffset + i]);
- pos += sprintf(arlan_drive_info + pos, "\n");
-
- SARLUC(configStatus);
- SARLUC(_22);
- SARLUC(progIOCtrl);
- SARLUC(shareMBase);
- SARLUC(controlRegister);
-
- pos += sprintf(arlan_drive_info + pos, " total %d chars\n", pos);
- if (ctl)
- if (ctl->procname)
- pos += sprintf(arlan_drive_info + pos, " driver name : %s\n", ctl->procname);
-final:
- *lenp = pos;
-
- if (!write)
- retv = proc_dostring(ctl, write, buffer, lenp, ppos);
- else
- {
- *lenp = 0;
- return -1;
- }
- return retv;
-}
-
-
-static int arlan_sysctl_info161719(ctl_table * ctl, int write,
- void __user *buffer, size_t * lenp, loff_t *ppos)
-{
- int i;
- int retv, pos, devnum;
- struct arlan_private *priva = NULL;
-
- pos = 0;
- devnum = ctl->procname[5] - '0';
- if (arlan_device[devnum] == NULL)
- {
- pos += sprintf(arlan_drive_info + pos, "No device found here \n");
- goto final;
- }
- else
- priva = netdev_priv(arlan_device[devnum]);
- if (priva == NULL)
- {
- printk(KERN_WARNING " Could not find the device private in arlan procsys, bad\n ");
- return -1;
- }
- memcpy_fromio(priva->conf, priva->card, sizeof(struct arlan_shmem));
- SARLUCN(_16, 0xC0);
- SARLUCN(_17, 0x6A);
- SARLUCN(_18, 14);
- SARLUCN(_19, 0x86);
- SARLUCN(_21, 0x3fd);
-
-final:
- *lenp = pos;
- retv = proc_dostring(ctl, write, buffer, lenp, ppos);
- return retv;
-}
-
-static int arlan_sysctl_infotxRing(ctl_table * ctl, int write,
- void __user *buffer, size_t * lenp, loff_t *ppos)
-{
- int i;
- int retv, pos, devnum;
- struct arlan_private *priva = NULL;
-
- pos = 0;
- devnum = ctl->procname[5] - '0';
- if (arlan_device[devnum] == NULL)
- {
- pos += sprintf(arlan_drive_info + pos, "No device found here \n");
- goto final;
- }
- else
- priva = netdev_priv(arlan_device[devnum]);
- if (priva == NULL)
- {
- printk(KERN_WARNING " Could not find the device private in arlan procsys, bad\n ");
- return -1;
- }
- memcpy_fromio(priva->conf, priva->card, sizeof(struct arlan_shmem));
- SARLBNpln(u_char, txBuffer, 0x800);
-final:
- *lenp = pos;
- retv = proc_dostring(ctl, write, buffer, lenp, ppos);
- return retv;
-}
-
-static int arlan_sysctl_inforxRing(ctl_table * ctl, int write,
- void __user *buffer, size_t * lenp, loff_t *ppos)
-{
- int i;
- int retv, pos, devnum;
- struct arlan_private *priva = NULL;
-
- pos = 0;
- devnum = ctl->procname[5] - '0';
- if (arlan_device[devnum] == NULL)
- {
- pos += sprintf(arlan_drive_info + pos, "No device found here \n");
- goto final;
- } else
- priva = netdev_priv(arlan_device[devnum]);
- if (priva == NULL)
- {
- printk(KERN_WARNING " Could not find the device private in arlan procsys, bad\n ");
- return -1;
- }
- memcpy_fromio(priva->conf, priva->card, sizeof(struct arlan_shmem));
- SARLBNpln(u_char, rxBuffer, 0x800);
-final:
- *lenp = pos;
- retv = proc_dostring(ctl, write, buffer, lenp, ppos);
- return retv;
-}
-
-static int arlan_sysctl_info18(ctl_table * ctl, int write,
- void __user *buffer, size_t * lenp, loff_t *ppos)
-{
- int i;
- int retv, pos, devnum;
- struct arlan_private *priva = NULL;
-
- pos = 0;
- devnum = ctl->procname[5] - '0';
- if (arlan_device[devnum] == NULL)
- {
- pos += sprintf(arlan_drive_info + pos, "No device found here \n");
- goto final;
- }
- else
- priva = netdev_priv(arlan_device[devnum]);
- if (priva == NULL)
- {
- printk(KERN_WARNING " Could not find the device private in arlan procsys, bad\n ");
- return -1;
- }
- memcpy_fromio(priva->conf, priva->card, sizeof(struct arlan_shmem));
- SARLBNpln(u_char, _18, 0x800);
-
-final:
- *lenp = pos;
- retv = proc_dostring(ctl, write, buffer, lenp, ppos);
- return retv;
-}
-
-
-#endif /* #ifdef ARLAN_PROC_SHM_DUMP */
-
-
-static char conf_reset_result[200];
-
-static int arlan_configure(ctl_table * ctl, int write,
- void __user *buffer, size_t * lenp, loff_t *ppos)
-{
- int pos = 0;
- int devnum = ctl->procname[6] - '0';
- struct arlan_private *priv;
-
- if (devnum < 0 || devnum > MAX_ARLANS - 1)
- {
- printk(KERN_WARNING "too strange devnum in procfs parse\n ");
- return -1;
- }
- else if (arlan_device[devnum] != NULL)
- {
- priv = netdev_priv(arlan_device[devnum]);
-
- arlan_command(arlan_device[devnum], ARLAN_COMMAND_CLEAN_AND_CONF);
- }
- else
- return -1;
-
- *lenp = pos;
- return proc_dostring(ctl, write, buffer, lenp, ppos);
-}
-
-static int arlan_sysctl_reset(ctl_table * ctl, int write,
- void __user *buffer, size_t * lenp, loff_t *ppos)
-{
- int pos = 0;
- int devnum = ctl->procname[5] - '0';
- struct arlan_private *priv;
-
- if (devnum < 0 || devnum > MAX_ARLANS - 1)
- {
- printk(KERN_WARNING "too strange devnum in procfs parse\n ");
- return -1;
- }
- else if (arlan_device[devnum] != NULL)
- {
- priv = netdev_priv(arlan_device[devnum]);
- arlan_command(arlan_device[devnum], ARLAN_COMMAND_CLEAN_AND_RESET);
-
- } else
- return -1;
- *lenp = pos + 3;
- return proc_dostring(ctl, write, buffer, lenp, ppos);
-}
-
-
-/* Place files in /proc/sys/dev/arlan */
-#define CTBLN(card,nam) \
- { .procname = #nam,\
- .data = &(arlan_conf[card].nam),\
- .maxlen = sizeof(int), .mode = 0600, .proc_handler = proc_dointvec}
-#ifdef ARLAN_DEBUGGING
-
-#define ARLAN_PROC_DEBUG_ENTRIES \
- { .procname = "entry_exit_debug",\
- .data = &arlan_entry_and_exit_debug,\
- .maxlen = sizeof(int), .mode = 0600, .proc_handler = proc_dointvec},\
- { .procname = "debug", .data = &arlan_debug,\
- .maxlen = sizeof(int), .mode = 0600, .proc_handler = proc_dointvec},
-#else
-#define ARLAN_PROC_DEBUG_ENTRIES
-#endif
-
-#define ARLAN_SYSCTL_TABLE_TOTAL(cardNo)\
- CTBLN(cardNo,spreadingCode),\
- CTBLN(cardNo, channelNumber),\
- CTBLN(cardNo, scramblingDisable),\
- CTBLN(cardNo, txAttenuation),\
- CTBLN(cardNo, systemId), \
- CTBLN(cardNo, maxDatagramSize),\
- CTBLN(cardNo, maxFrameSize),\
- CTBLN(cardNo, maxRetries),\
- CTBLN(cardNo, receiveMode),\
- CTBLN(cardNo, priority),\
- CTBLN(cardNo, rootOrRepeater),\
- CTBLN(cardNo, SID),\
- CTBLN(cardNo, registrationMode),\
- CTBLN(cardNo, registrationFill),\
- CTBLN(cardNo, localTalkAddress),\
- CTBLN(cardNo, codeFormat),\
- CTBLN(cardNo, numChannels),\
- CTBLN(cardNo, channel1),\
- CTBLN(cardNo, channel2),\
- CTBLN(cardNo, channel3),\
- CTBLN(cardNo, channel4),\
- CTBLN(cardNo, txClear),\
- CTBLN(cardNo, txRetries),\
- CTBLN(cardNo, txRouting),\
- CTBLN(cardNo, txScrambled),\
- CTBLN(cardNo, rxParameter),\
- CTBLN(cardNo, txTimeoutMs),\
- CTBLN(cardNo, waitCardTimeout),\
- CTBLN(cardNo, channelSet), \
- { .procname = "name",\
- .data = arlan_conf[cardNo].siteName,\
- .maxlen = 16, .mode = 0600, .proc_handler = proc_dostring},\
- CTBLN(cardNo,waitTime),\
- CTBLN(cardNo,lParameter),\
- CTBLN(cardNo,_15),\
- CTBLN(cardNo,headerSize),\
- CTBLN(cardNo,tx_delay_ms),\
- CTBLN(cardNo,retries),\
- CTBLN(cardNo,ReTransmitPacketMaxSize),\
- CTBLN(cardNo,waitReTransmitPacketMaxSize),\
- CTBLN(cardNo,fastReTransCount),\
- CTBLN(cardNo,driverRetransmissions),\
- CTBLN(cardNo,txAckTimeoutMs),\
- CTBLN(cardNo,registrationInterrupts),\
- CTBLN(cardNo,hardwareType),\
- CTBLN(cardNo,radioType),\
- CTBLN(cardNo,writeEEPROM),\
- CTBLN(cardNo,writeRadioType),\
- ARLAN_PROC_DEBUG_ENTRIES\
- CTBLN(cardNo,in_speed),\
- CTBLN(cardNo,out_speed),\
- CTBLN(cardNo,in_speed10),\
- CTBLN(cardNo,out_speed10),\
- CTBLN(cardNo,in_speed_max),\
- CTBLN(cardNo,out_speed_max),\
- CTBLN(cardNo,measure_rate),\
- CTBLN(cardNo,pre_Command_Wait),\
- CTBLN(cardNo,rx_tweak1),\
- CTBLN(cardNo,rx_tweak2),\
- CTBLN(cardNo,tx_queue_len),\
-
-
-
-static ctl_table arlan_conf_table0[] =
-{
- ARLAN_SYSCTL_TABLE_TOTAL(0)
-
-#ifdef ARLAN_PROC_SHM_DUMP
- {
- .procname = "arlan0-txRing",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = arlan_sysctl_infotxRing,
- },
- {
- .procname = "arlan0-rxRing",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = arlan_sysctl_inforxRing,
- },
- {
- .procname = "arlan0-18",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = arlan_sysctl_info18,
- },
- {
- .procname = "arlan0-ring",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = arlan_sysctl_info161719,
- },
- {
- .procname = "arlan0-shm-cpy",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = arlan_sysctl_info,
- },
-#endif
- {
- .procname = "config0",
- .data = &conf_reset_result,
- .maxlen = 100,
- .mode = 0400,
- .proc_handler = arlan_configure
- },
- {
- .procname = "reset0",
- .data = &conf_reset_result,
- .maxlen = 100,
- .mode = 0400,
- .proc_handler = arlan_sysctl_reset,
- },
- { }
-};
-
-static ctl_table arlan_conf_table1[] =
-{
-
- ARLAN_SYSCTL_TABLE_TOTAL(1)
-
-#ifdef ARLAN_PROC_SHM_DUMP
- {
- .procname = "arlan1-txRing",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = arlan_sysctl_infotxRing,
- },
- {
- .procname = "arlan1-rxRing",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = arlan_sysctl_inforxRing,
- },
- {
- .procname = "arlan1-18",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = arlan_sysctl_info18,
- },
- {
- .procname = "arlan1-ring",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = arlan_sysctl_info161719,
- },
- {
- .procname = "arlan1-shm-cpy",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = arlan_sysctl_info,
- },
-#endif
- {
- .procname = "config1",
- .data = &conf_reset_result,
- .maxlen = 100,
- .mode = 0400,
- .proc_handler = arlan_configure,
- },
- {
- .procname = "reset1",
- .data = &conf_reset_result,
- .maxlen = 100,
- .mode = 0400,
- .proc_handler = arlan_sysctl_reset,
- },
- { }
-};
-
-static ctl_table arlan_conf_table2[] =
-{
-
- ARLAN_SYSCTL_TABLE_TOTAL(2)
-
-#ifdef ARLAN_PROC_SHM_DUMP
- {
- .procname = "arlan2-txRing",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = arlan_sysctl_infotxRing,
- },
- {
- .procname = "arlan2-rxRing",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = arlan_sysctl_inforxRing,
- },
- {
- .procname = "arlan2-18",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = arlan_sysctl_info18,
- },
- {
- .procname = "arlan2-ring",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = arlan_sysctl_info161719,
- },
- {
- .procname = "arlan2-shm-cpy",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = arlan_sysctl_info,
- },
-#endif
- {
- .procname = "config2",
- .data = &conf_reset_result,
- .maxlen = 100,
- .mode = 0400,
- .proc_handler = arlan_configure,
- },
- {
- .procname = "reset2",
- .data = &conf_reset_result,
- .maxlen = 100,
- .mode = 0400,
- .proc_handler = arlan_sysctl_reset,
- },
- { }
-};
-
-static ctl_table arlan_conf_table3[] =
-{
-
- ARLAN_SYSCTL_TABLE_TOTAL(3)
-
-#ifdef ARLAN_PROC_SHM_DUMP
- {
- .procname = "arlan3-txRing",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = arlan_sysctl_infotxRing,
- },
- {
- .procname = "arlan3-rxRing",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = arlan_sysctl_inforxRing,
- },
- {
- .procname = "arlan3-18",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = arlan_sysctl_info18,
- },
- {
- .procname = "arlan3-ring",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = arlan_sysctl_info161719,
- },
- {
- .procname = "arlan3-shm-cpy",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = arlan_sysctl_info,
- },
-#endif
- {
- .procname = "config3",
- .data = &conf_reset_result,
- .maxlen = 100,
- .mode = 0400,
- .proc_handler = arlan_configure,
- },
- {
- .procname = "reset3",
- .data = &conf_reset_result,
- .maxlen = 100,
- .mode = 0400,
- .proc_handler = arlan_sysctl_reset,
- },
- { }
-};
-
-
-
-static ctl_table arlan_table[] =
-{
- {
- .procname = "arlan0",
- .maxlen = 0,
- .mode = 0600,
- .child = arlan_conf_table0,
- },
- {
- .procname = "arlan1",
- .maxlen = 0,
- .mode = 0600,
- .child = arlan_conf_table1,
- },
- {
- .procname = "arlan2",
- .maxlen = 0,
- .mode = 0600,
- .child = arlan_conf_table2,
- },
- {
- .procname = "arlan3",
- .maxlen = 0,
- .mode = 0600,
- .child = arlan_conf_table3,
- },
- { }
-};
-
-#else
-
-static ctl_table arlan_table[] =
-{
- { }
-};
-#endif
-
-
-// static int mmtu = 1234;
-
-static ctl_table arlan_root_table[] =
-{
- {
- .procname = "arlan",
- .maxlen = 0,
- .mode = 0555,
- .child = arlan_table,
- },
- { }
-};
-
-
-static struct ctl_table_header *arlan_device_sysctl_header;
-
-int __init init_arlan_proc(void)
-{
-
- int i = 0;
- if (arlan_device_sysctl_header)
- return 0;
- arlan_device_sysctl_header = register_sysctl_table(arlan_root_table);
- if (!arlan_device_sysctl_header)
- return -1;
-
- return 0;
-
-}
-
-void __exit cleanup_arlan_proc(void)
-{
- unregister_sysctl_table(arlan_device_sysctl_header);
- arlan_device_sysctl_header = NULL;
-
-}
-#endif
diff --git a/drivers/staging/arlan/arlan.h b/drivers/staging/arlan/arlan.h
deleted file mode 100644
index ffcd3ea048a..00000000000
--- a/drivers/staging/arlan/arlan.h
+++ /dev/null
@@ -1,535 +0,0 @@
-/*
- * Copyright (C) 1997 Cullen Jennings
- * Copyright (C) 1998 Elmer.Joandi@ut.ee, +37-255-13500
- * GNU General Public License applies
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/skbuff.h>
-#include <linux/if_ether.h> /* For the statistics structure. */
-#include <linux/if_arp.h> /* For ARPHRD_ETHER */
-#include <linux/ptrace.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <asm/system.h>
-#include <linux/io.h>
-#include <linux/errno.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-
-
-/* #define ARLAN_DEBUGGING 1 */
-
-#define ARLAN_PROC_INTERFACE
-#define MAX_ARLANS 4 /* not more than 4 ! */
-#define ARLAN_PROC_SHM_DUMP /* shows all card registers, makes driver way larger */
-
-#define ARLAN_MAX_MULTICAST_ADDRS 16
-#define ARLAN_RCV_CLEAN 0
-#define ARLAN_RCV_PROMISC 1
-#define ARLAN_RCV_CONTROL 2
-
-#ifdef CONFIG_PROC_FS
-extern int init_arlan_proc(void);
-extern void cleanup_arlan_proc(void);
-#else
-#define init_arlan_proc() ({ 0; })
-#define cleanup_arlan_proc() do { } while (0)
-#endif
-
-extern struct net_device *arlan_device[MAX_ARLANS];
-extern int arlan_debug;
-extern int arlan_entry_debug;
-extern int arlan_exit_debug;
-extern int testMemory;
-extern int arlan_command(struct net_device *dev, int command);
-
-#define SIDUNKNOWN -1
-#define radioNodeIdUNKNOWN -1
-#define irqUNKNOWN 0
-#define debugUNKNOWN 0
-#define testMemoryUNKNOWN 1
-#define spreadingCodeUNKNOWN 0
-#define channelNumberUNKNOWN 0
-#define channelSetUNKNOWN 0
-#define systemIdUNKNOWN -1
-#define registrationModeUNKNOWN -1
-
-
-#define IFDEBUG(L) if ((L) & arlan_debug)
-#define ARLAN_FAKE_HDR_LEN 12
-
-#ifdef ARLAN_DEBUGGING
- #define DEBUG 1
- #define ARLAN_ENTRY_EXIT_DEBUGGING 1
- #define ARLAN_DEBUG(a, b) printk(KERN_DEBUG a, b)
-#else
- #define ARLAN_DEBUG(a, b)
-#endif
-
-#define ARLAN_SHMEM_SIZE 0x2000
-
-struct arlan_shmem {
- /* Header Signature */
- volatile char textRegion[48];
- volatile u_char resetFlag;
- volatile u_char diagnosticInfo;
- volatile u_short diagnosticOffset;
- volatile u_char _1[12];
- volatile u_char lanCardNodeId[6];
- volatile u_char broadcastAddress[6];
- volatile u_char hardwareType;
- volatile u_char majorHardwareVersion;
- volatile u_char minorHardwareVersion;
- volatile u_char radioModule;/* shows EEPROM, can be overridden at 0x111 */
- volatile u_char defaultChannelSet; /* shows EEProm, can be overriiden at 0x10A */
- volatile u_char _2[47];
-
- /* Control/Status Block - 0x0080 */
- volatile u_char interruptInProgress; /* not used by lancpu */
- volatile u_char cntrlRegImage; /* not used by lancpu */
- volatile u_char _3[13];
- volatile u_char dumpByte;
- volatile u_char commandByte; /* non-zero = active */
- volatile u_char commandParameter[15];
-
- /* Receive Status - 0x00a0 */
- volatile u_char rxStatus; /* 1- data, 2-control, 0xff - registr change */
- volatile u_char rxFrmType;
- volatile u_short rxOffset;
- volatile u_short rxLength;
- volatile u_char rxSrc[6];
- volatile u_char rxBroadcastFlag;
- volatile u_char rxQuality;
- volatile u_char scrambled;
- volatile u_char _4[1];
-
- /* Transmit Status - 0x00b0 */
- volatile u_char txStatus;
- volatile u_char txAckQuality;
- volatile u_char numRetries;
- volatile u_char _5[14];
- volatile u_char registeredRouter[6];
- volatile u_char backboneRouter[6];
- volatile u_char registrationStatus;
- volatile u_char configuredStatusFlag;
- volatile u_char _6[1];
- volatile u_char ultimateDestAddress[6];
- volatile u_char immedDestAddress[6];
- volatile u_char immedSrcAddress[6];
- volatile u_short rxSequenceNumber;
- volatile u_char assignedLocaltalkAddress;
- volatile u_char _7[27];
-
- /* System Parameter Block */
-
- /* - Driver Parameters (Novell Specific) */
-
- volatile u_short txTimeout;
- volatile u_short transportTime;
- volatile u_char _8[4];
-
- /* - Configuration Parameters */
- volatile u_char irqLevel;
- volatile u_char spreadingCode;
- volatile u_char channelSet;
- volatile u_char channelNumber;
- volatile u_short radioNodeId;
- volatile u_char _9[2];
- volatile u_char scramblingDisable;
- volatile u_char radioType;
- volatile u_short routerId;
- volatile u_char _10[9];
- volatile u_char txAttenuation;
- volatile u_char systemId[4];
- volatile u_short globalChecksum;
- volatile u_char _11[4];
- volatile u_short maxDatagramSize;
- volatile u_short maxFrameSize;
- volatile u_char maxRetries;
- volatile u_char receiveMode;
- volatile u_char priority;
- volatile u_char rootOrRepeater;
- volatile u_char specifiedRouter[6];
- volatile u_short fastPollPeriod;
- volatile u_char pollDecay;
- volatile u_char fastPollDelay[2];
- volatile u_char arlThreshold;
- volatile u_char arlDecay;
- volatile u_char _12[1];
- volatile u_short specRouterTimeout;
- volatile u_char _13[5];
-
- /* Scrambled Area */
- volatile u_char SID[4];
- volatile u_char encryptionKey[12];
- volatile u_char _14[2];
- volatile u_char waitTime[2];
- volatile u_char lParameter[2];
- volatile u_char _15[3];
- volatile u_short headerSize;
- volatile u_short sectionChecksum;
-
- volatile u_char registrationMode;
- volatile u_char registrationFill;
- volatile u_short pollPeriod;
- volatile u_short refreshPeriod;
- volatile u_char name[16];
- volatile u_char NID[6];
- volatile u_char localTalkAddress;
- volatile u_char codeFormat;
- volatile u_char numChannels;
- volatile u_char channel1;
- volatile u_char channel2;
- volatile u_char channel3;
- volatile u_char channel4;
- volatile u_char SSCode[59];
-
- volatile u_char _16[0xC0];
- volatile u_short auxCmd;
- volatile u_char dumpPtr[4];
- volatile u_char dumpVal;
- volatile u_char _17[0x6A];
- volatile u_char wireTest;
- volatile u_char _18[14];
-
- /* Statistics Block - 0x0300 */
- volatile u_char hostcpuLock;
- volatile u_char lancpuLock;
- volatile u_char resetTime[18];
-
- volatile u_char numDatagramsTransmitted[4];
- volatile u_char numReTransmissions[4];
- volatile u_char numFramesDiscarded[4];
- volatile u_char numDatagramsReceived[4];
- volatile u_char numDuplicateReceivedFrames[4];
- volatile u_char numDatagramsDiscarded[4];
-
- volatile u_short maxNumReTransmitDatagram;
- volatile u_short maxNumReTransmitFrames;
- volatile u_short maxNumConsecutiveDuplicateFrames;
- /* misaligned here so we have to go to characters */
-
- volatile u_char numBytesTransmitted[4];
- volatile u_char numBytesReceived[4];
- volatile u_char numCRCErrors[4];
- volatile u_char numLengthErrors[4];
- volatile u_char numAbortErrors[4];
- volatile u_char numTXUnderruns[4];
- volatile u_char numRXOverruns[4];
- volatile u_char numHoldOffs[4];
- volatile u_char numFramesTransmitted[4];
- volatile u_char numFramesReceived[4];
- volatile u_char numReceiveFramesLost[4];
- volatile u_char numRXBufferOverflows[4];
- volatile u_char numFramesDiscardedAddrMismatch[4];
- volatile u_char numFramesDiscardedSIDMismatch[4];
- volatile u_char numPollsTransmistted[4];
- volatile u_char numPollAcknowledges[4];
- volatile u_char numStatusTimeouts[4];
- volatile u_char numNACKReceived[4];
-
- volatile u_char _19[0x86];
-
- volatile u_char txBuffer[0x800];
- volatile u_char rxBuffer[0x800];
-
- volatile u_char _20[0x800];
- volatile u_char _21[0x3fb];
- volatile u_char configStatus;
- volatile u_char _22;
- volatile u_char progIOCtrl;
- volatile u_char shareMBase;
- volatile u_char controlRegister;
-};
-
-struct arlan_conf_stru {
- int spreadingCode;
- int channelSet;
- int channelNumber;
- int scramblingDisable;
- int txAttenuation;
- int systemId;
- int maxDatagramSize;
- int maxFrameSize;
- int maxRetries;
- int receiveMode;
- int priority;
- int rootOrRepeater;
- int SID;
- int radioNodeId;
- int registrationMode;
- int registrationFill;
- int localTalkAddress;
- int codeFormat;
- int numChannels;
- int channel1;
- int channel2;
- int channel3;
- int channel4;
- int txClear;
- int txRetries;
- int txRouting;
- int txScrambled;
- int rxParameter;
- int txTimeoutMs;
- int txAckTimeoutMs;
- int waitCardTimeout;
- int waitTime;
- int lParameter;
- int _15;
- int headerSize;
- int retries;
- int tx_delay_ms;
- int waitReTransmitPacketMaxSize;
- int ReTransmitPacketMaxSize;
- int fastReTransCount;
- int driverRetransmissions;
- int registrationInterrupts;
- int hardwareType;
- int radioType;
- int writeRadioType;
- int writeEEPROM;
- char siteName[17];
- int measure_rate;
- int in_speed;
- int out_speed;
- int in_speed10;
- int out_speed10;
- int in_speed_max;
- int out_speed_max;
- int pre_Command_Wait;
- int rx_tweak1;
- int rx_tweak2;
- int tx_queue_len;
-};
-
-extern struct arlan_conf_stru arlan_conf[MAX_ARLANS];
-
-struct TxParam {
- volatile short offset;
- volatile short length;
- volatile u_char dest[6];
- volatile unsigned char clear;
- volatile unsigned char retries;
- volatile unsigned char routing;
- volatile unsigned char scrambled;
-};
-
-#define TX_RING_SIZE 2
-/* Information that need to be kept for each board. */
-struct arlan_private {
- struct arlan_shmem __iomem *card;
- struct arlan_shmem *conf;
-
- struct arlan_conf_stru *Conf;
- int bad;
- int reset;
- unsigned long lastReset;
- struct timer_list timer;
- struct timer_list tx_delay_timer;
- struct timer_list tx_retry_timer;
- struct timer_list rx_check_timer;
-
- int registrationLostCount;
- int reRegisterExp;
- int irq_test_done;
-
- struct TxParam txRing[TX_RING_SIZE];
- char reTransmitBuff[0x800];
- int txLast;
- unsigned ReTransmitRequested;
- unsigned long tx_done_delayed;
- unsigned long registrationLastSeen;
-
- unsigned long tx_last_sent;
- unsigned long tx_last_cleared;
- unsigned long retransmissions;
- unsigned long interrupt_ack_requested;
- spinlock_t lock;
- unsigned long waiting_command_mask;
- unsigned long card_polling_interval;
- unsigned long last_command_buff_free_time;
-
- int under_reset;
- int under_config;
- int rx_command_given;
- int tx_command_given;
- unsigned long interrupt_processing_active;
- unsigned long last_rx_int_ack_time;
- unsigned long in_bytes;
- unsigned long out_bytes;
- unsigned long in_time;
- unsigned long out_time;
- unsigned long in_time10;
- unsigned long out_time10;
- unsigned long in_bytes10;
- unsigned long out_bytes10;
- int init_etherdev_alloc;
-};
-
-
-
-#define ARLAN_CLEAR 0x00
-#define ARLAN_RESET 0x01
-#define ARLAN_CHANNEL_ATTENTION 0x02
-#define ARLAN_INTERRUPT_ENABLE 0x04
-#define ARLAN_CLEAR_INTERRUPT 0x08
-#define ARLAN_POWER 0x40
-#define ARLAN_ACCESS 0x80
-
-#define ARLAN_COM_CONF 0x01
-#define ARLAN_COM_RX_ENABLE 0x03
-#define ARLAN_COM_RX_ABORT 0x04
-#define ARLAN_COM_TX_ENABLE 0x05
-#define ARLAN_COM_TX_ABORT 0x06
-#define ARLAN_COM_NOP 0x07
-#define ARLAN_COM_STANDBY 0x08
-#define ARLAN_COM_ACTIVATE 0x09
-#define ARLAN_COM_GOTO_SLOW_POLL 0x0a
-#define ARLAN_COM_INT 0x80
-
-
-#define TXLAST(dev) (((struct arlan_private *)netdev_priv(dev))->txRing[((struct arlan_private *)netdev_priv(dev))->txLast])
-#define TXHEAD(dev) (((struct arlan_private *)netdev_priv(dev))->txRing[0])
-#define TXTAIL(dev) (((struct arlan_private *)netdev_priv(dev))->txRing[1])
-
-#define TXBuffStart(dev) offsetof(struct arlan_shmem, txBuffer)
-#define TXBuffEnd(dev) offsetof(struct arlan_shmem, xxBuffer)
-
-#define READSHM(to, from, atype) {\
- atype tmp;\
- memcpy_fromio(&(tmp), &(from), sizeof(atype));\
- to = tmp;\
- }
-
-#define READSHMEM(from, atype)\
- atype from; \
- READSHM(from, arlan->from, atype);
-
-#define WRITESHM(to, from, atype) \
- { atype tmpSHM = from;\
- memcpy_toio(&(to), &tmpSHM, sizeof(atype));\
- }
-
-#define DEBUGSHM(levelSHM, stringSHM, stuff, atype) \
- { atype tmpSHM; \
- memcpy_fromio(&tmpSHM, &(stuff), sizeof(atype));\
- IFDEBUG(levelSHM) printk(stringSHM, tmpSHM);\
- }
-
-#define WRITESHMB(to, val) \
- writeb(val, &(to))
-#define READSHMB(to) \
- readb(&(to))
-#define WRITESHMS(to, val) \
- writew(val, &(to))
-#define READSHMS(to) \
- readw(&(to))
-#define WRITESHMI(to, val) \
- writel(val, &(to))
-#define READSHMI(to) \
- readl(&(to))
-
-
-
-
-
-#define registrationBad(dev)\
- (( READSHMB(((struct arlan_private *)netdev_priv(dev))->card->registrationMode) > 0) && \
- ( READSHMB(((struct arlan_private *)netdev_priv(dev))->card->registrationStatus) == 0))
-
-
-#define readControlRegister(dev)\
- READSHMB(((struct arlan_private *)netdev_priv(dev))->card->cntrlRegImage)
-
-#define writeControlRegister(dev, v) {\
- WRITESHMB(((struct arlan_private *)netdev_priv(dev))->card->cntrlRegImage, ((v) & 0xF));\
- WRITESHMB(((struct arlan_private *)netdev_priv(dev))->card->controlRegister, (v)); }
-
-
-#define arlan_interrupt_lancpu(dev) {\
- int cr; \
- \
- cr = readControlRegister(dev);\
- if (cr & ARLAN_CHANNEL_ATTENTION) { \
- writeControlRegister(dev, (cr & ~ARLAN_CHANNEL_ATTENTION));\
- } else \
- writeControlRegister(dev, (cr | ARLAN_CHANNEL_ATTENTION));\
-}
-
-#define clearChannelAttention(dev) { \
- writeControlRegister(dev, readControlRegister(dev) & ~ARLAN_CHANNEL_ATTENTION); }
-#define setHardwareReset(dev) {\
- writeControlRegister(dev, readControlRegister(dev) | ARLAN_RESET); }
-#define clearHardwareReset(dev) {\
- writeControlRegister(dev, readControlRegister(dev) & ~ARLAN_RESET); }
-#define setInterruptEnable(dev) {\
- writeControlRegister(dev, readControlRegister(dev) | ARLAN_INTERRUPT_ENABLE) ; }
-#define clearInterruptEnable(dev) {\
- writeControlRegister(dev, readControlRegister(dev) & ~ARLAN_INTERRUPT_ENABLE) ; }
-#define setClearInterrupt(dev) {\
- writeControlRegister(dev, readControlRegister(dev) | ARLAN_CLEAR_INTERRUPT) ; }
-#define clearClearInterrupt(dev) {\
- writeControlRegister(dev, readControlRegister(dev) & ~ARLAN_CLEAR_INTERRUPT); }
-#define setPowerOff(dev) {\
- writeControlRegister(dev, readControlRegister(dev) | (ARLAN_POWER && ARLAN_ACCESS));\
- writeControlRegister(dev, readControlRegister(dev) & ~ARLAN_ACCESS); }
-#define setPowerOn(dev) {\
- writeControlRegister(dev, readControlRegister(dev) & ~(ARLAN_POWER)); }
-#define arlan_lock_card_access(dev) {\
- writeControlRegister(dev, readControlRegister(dev) & ~ARLAN_ACCESS); }
-#define arlan_unlock_card_access(dev) {\
- writeControlRegister(dev, readControlRegister(dev) | ARLAN_ACCESS); }
-
-
-
-
-#define ARLAN_COMMAND_RX 0x000001
-#define ARLAN_COMMAND_NOOP 0x000002
-#define ARLAN_COMMAND_NOOPINT 0x000004
-#define ARLAN_COMMAND_TX 0x000008
-#define ARLAN_COMMAND_CONF 0x000010
-#define ARLAN_COMMAND_RESET 0x000020
-#define ARLAN_COMMAND_TX_ABORT 0x000040
-#define ARLAN_COMMAND_RX_ABORT 0x000080
-#define ARLAN_COMMAND_POWERDOWN 0x000100
-#define ARLAN_COMMAND_POWERUP 0x000200
-#define ARLAN_COMMAND_SLOW_POLL 0x000400
-#define ARLAN_COMMAND_ACTIVATE 0x000800
-#define ARLAN_COMMAND_INT_ACK 0x001000
-#define ARLAN_COMMAND_INT_ENABLE 0x002000
-#define ARLAN_COMMAND_WAIT_NOW 0x004000
-#define ARLAN_COMMAND_LONG_WAIT_NOW 0x008000
-#define ARLAN_COMMAND_STANDBY 0x010000
-#define ARLAN_COMMAND_INT_RACK 0x020000
-#define ARLAN_COMMAND_INT_RENABLE 0x040000
-#define ARLAN_COMMAND_CONF_WAIT 0x080000
-#define ARLAN_COMMAND_TBUSY_CLEAR 0x100000
-#define ARLAN_COMMAND_CLEAN_AND_CONF (ARLAN_COMMAND_TX_ABORT\
- | ARLAN_COMMAND_RX_ABORT\
- | ARLAN_COMMAND_CONF)
-#define ARLAN_COMMAND_CLEAN_AND_RESET (ARLAN_COMMAND_TX_ABORT\
- | ARLAN_COMMAND_RX_ABORT\
- | ARLAN_COMMAND_RESET)
-
-
-#define ARLAN_DEBUG_CHAIN_LOCKS 0x00001
-#define ARLAN_DEBUG_RESET 0x00002
-#define ARLAN_DEBUG_TIMING 0x00004
-#define ARLAN_DEBUG_CARD_STATE 0x00008
-#define ARLAN_DEBUG_TX_CHAIN 0x00010
-#define ARLAN_DEBUG_MULTICAST 0x00020
-#define ARLAN_DEBUG_HEADER_DUMP 0x00040
-#define ARLAN_DEBUG_INTERRUPT 0x00080
-#define ARLAN_DEBUG_STARTUP 0x00100
-#define ARLAN_DEBUG_SHUTDOWN 0x00200
diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c
index 7ebecc92c61..5b279fb30f3 100644
--- a/drivers/staging/asus_oled/asus_oled.c
+++ b/drivers/staging/asus_oled/asus_oled.c
@@ -771,7 +771,7 @@ static struct usb_driver oled_driver = {
};
static CLASS_ATTR_STRING(version, S_IRUGO,
- ASUS_OLED_UNDERSCORE_NAME " " ASUS_OLED_VERSION);
+ ASUS_OLED_UNDERSCORE_NAME " " ASUS_OLED_VERSION);
static int __init asus_oled_init(void)
{
diff --git a/drivers/staging/batman-adv/CHANGELOG b/drivers/staging/batman-adv/CHANGELOG
index 8a181639cea..c8f9d9e06bb 100644
--- a/drivers/staging/batman-adv/CHANGELOG
+++ b/drivers/staging/batman-adv/CHANGELOG
@@ -1,3 +1,17 @@
+batman-adv 0.2.1:
+
+* support latest kernels (2.6.20 - 2.6.33)
+* receive packets directly using skbs, remove old sockets and threads
+* fix various regressions in the vis server
+* don't disable interrupts while sending
+* replace internal logging mechanism with standard kernel logging
+* move vis formats into userland, one general format remains in the kernel
+* allow MAC address to be set, correctly initialize them
+* code refactoring and cleaning for coding style
+* many bugs (null pointers, locking, hash iterators) squashed
+
+ -- Sun, 21 Mar 2010 20:46:47 +0100
+
batman-adv 0.2:
* support latest kernels (2.6.20 - 2.6.31)
diff --git a/drivers/staging/batman-adv/Makefile b/drivers/staging/batman-adv/Makefile
index 42b4e637026..f25068c0fae 100644
--- a/drivers/staging/batman-adv/Makefile
+++ b/drivers/staging/batman-adv/Makefile
@@ -1,5 +1,5 @@
#
-# Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
+# Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
#
# Marek Lindner, Simon Wunderlich
#
@@ -19,4 +19,4 @@
#
obj-m += batman-adv.o
-batman-adv-objs := main.o proc.o send.o routing.o soft-interface.o device.o translation-table.o bitarray.o hash.o ring_buffer.o vis.o hard-interface.o aggregation.o originator.o
+batman-adv-objs := main.o send.o routing.o soft-interface.o device.o translation-table.o bitarray.o hash.o ring_buffer.o vis.o hard-interface.o aggregation.o originator.o bat_sysfs.o
diff --git a/drivers/staging/batman-adv/README b/drivers/staging/batman-adv/README
index 7d666ad0435..14244a2c4e4 100644
--- a/drivers/staging/batman-adv/README
+++ b/drivers/staging/batman-adv/README
@@ -1,149 +1,240 @@
-[state: 06-01-2010]
+[state: 03-05-2010]
BATMAN-ADV
----------
-Batman-advanced is a new approach to wireless networking which does no longer
-operate on the IP basis. Unlike B.A.T.M.A.N, which exchanges information
-using UDP packets and sets routing tables, batman-advanced operates on ISO/OSI
-Layer 2 only and uses and routes (or better: bridges) Ethernet Frames. It
-emulates a virtual network switch of all nodes participating. Therefore all
-nodes appear to be link local, thus all higher operating protocols won't be
-affected by any changes within the network. You can run almost any protocol
-above B.A.T.M.A.N. Advanced, prominent examples are: IPv4, IPv6, DHCP, IPX.
+Batman advanced is a new approach to wireless networking which
+does no longer operate on the IP basis. Unlike the batman daemon,
+which exchanges information using UDP packets and sets routing
+tables, batman-advanced operates on ISO/OSI Layer 2 only and uses
+and routes (or better: bridges) Ethernet Frames. It emulates a
+virtual network switch of all nodes participating. Therefore all
+nodes appear to be link local, thus all higher operating proto-
+cols won't be affected by any changes within the network. You can
+run almost any protocol above batman advanced, prominent examples
+are: IPv4, IPv6, DHCP, IPX.
-This is batman-advanced implemented as Linux kernel driver. It does not depend
-on any network (other) driver, and can be used on wifi as well as ethernet,
-vpn, etc ... (anything with ethernet-style layer 2).
+Batman advanced was implemented as a Linux kernel driver to re-
+duce the overhead to a minimum. It does not depend on any (other)
+network driver, and can be used on wifi as well as ethernet lan,
+vpn, etc ... (anything with ethernet-style layer 2).
-USAGE
------
+CONFIGURATION
+-------------
-insmod the batman-adv.ko in your kernel:
+Load the batman-adv module into your kernel:
# insmod batman-adv.ko
-the module is now waiting for activation. You must add some interfaces
-on which batman can operate. Each interface must be added separately:
+The module is now waiting for activation. You must add some in-
+terfaces on which batman can operate. After loading the module
+batman advanced will scan your systems interfaces to search for
+compatible interfaces. Once found, it will create subfolders in
+the /sys directories of each supported interface, e.g.
+
+# ls /sys/class/net/eth0/batman_adv/
+# iface_status mesh_iface
+
+If an interface does not have the "batman_adv" subfolder it prob-
+ably is not supported. Not supported interfaces are: loopback,
+non-ethernet and batman's own interfaces.
+
+Note: After the module was loaded it will continuously watch for
+new interfaces to verify the compatibility. There is no need to
+reload the module if you plug your USB wifi adapter into your ma-
+chine after batman advanced was initially loaded.
+
+To activate a given interface simply write "bat0" into its
+"mesh_iface" file inside the batman_adv subfolder:
+
+# echo bat0 > /sys/class/net/eth0/batman_adv/mesh_iface
+
+Repeat this step for all interfaces you wish to add. Now batman
+starts using/broadcasting on this/these interface(s).
-# echo wlan0 > /proc/net/batman-adv/interfaces
+By reading the "iface_status" file you can check its status:
-( # echo wlan1 > /proc/net/batman-adv/interfaces )
-( # echo eth0 > /proc/net/batman-adv/interfaces )
-( ... )
+# cat /sys/class/net/eth0/batman_adv/iface_status
+# active
-Now batman starts broadcasting on this interface.
-You can now view the table of originators (mesh participants) with:
+To deactivate an interface you have to write "none" into its
+"mesh_iface" file:
-# cat /proc/net/batman-adv/originators
+# echo none > /sys/class/net/eth0/batman_adv/mesh_iface
-The module will create a new interface "bat0", which can be used as a
-regular interface:
-# ifconfig bat0 inet 192.168.0.1 up
-# ping 192.168.0.2
-...
+All mesh wide settings can be found in batman's own interface
+folder:
-If you want topology visualization, your meshnode must be configured
-as VIS-server:
+# ls /sys/class/net/bat0/mesh/
+# aggregate_ogm originators transtable_global vis_mode
+# orig_interval transtable_local vis_data
-# echo "server" > /proc/net/batman-adv/vis
-Each node is either configured as "server" or as "client" (default:
-"client"). Clients send their topology data to the server next to them,
-and server synchronize with other servers. If there is no server
-configured (default) within the mesh, no topology information will be
-transmitted. With these "synchronizing servers", there can be 1 or
-more vis servers sharing the same (or at least very similar) data.
+Some of the files contain all sort of status information regard-
+ing the mesh network. For example, you can view the table of
+originators (mesh participants) with:
-When configured as server, you can get a topology snapshot of your mesh:
+# cat /sys/class/net/bat0/mesh/originators
-# cat /proc/net/batman-adv/vis
+Other files allow to change batman's behaviour to better fit your
+requirements. For instance, you can check the current originator
+interval (value in milliseconds which determines how often batman
+sends its broadcast packets):
-The output is in a generic raw format. Use the batctl tool (See below)
-to convert this to other formats more suitable for graphing, eg
-graphviz dot, or JSON data-interchange format.
+# cat /sys/class/net/bat0/mesh/orig_interval
+# status: 1000
+
+and also change its value:
+
+# echo 3000 > /sys/class/net/bat0/mesh/orig_interval
In very mobile scenarios, you might want to adjust the originator
-interval to a lower value. This will make the mesh more responsive to
-topology changes, but will also increase the overhead. Please make sure
-that all nodes in your mesh use the same interval. The default value
-is 1000 ms (1 second).
+interval to a lower value. This will make the mesh more respon-
+sive to topology changes, but will also increase the overhead.
+
+
+USAGE
+-----
+
+To make use of your newly created mesh, batman advanced provides
+a new interface "bat0" which you should use from this point on.
+All interfaces added to batman advanced are not relevant any
+longer because batman handles them for you. Basically, one "hands
+over" the data by using the batman interface and batman will make
+sure it reaches its destination.
-# echo 1000 > /proc/net/batman-adv/orig_interval
+The "bat0" interface can be used like any other regular inter-
+face. It needs an IP address which can be either statically con-
+figured or dynamically (by using DHCP or similar services):
-To deactivate batman, do:
+# NodeA: ifconfig bat0 192.168.0.1
+# NodeB: ifconfig bat0 192.168.0.2
+# NodeB: ping 192.168.0.1
+
+Note: In order to avoid problems remove all IP addresses previ-
+ously assigned to interfaces now used by batman advanced, e.g.
+
+# ifconfig eth0 0.0.0.0
+
+
+VISUALIZATION
+-------------
+
+If you want topology visualization, at least one mesh node must
+be configured as VIS-server:
+
+# echo "server" > /sys/class/net/bat0/mesh/vis_mode
+
+Each node is either configured as "server" or as "client" (de-
+fault: "client"). Clients send their topology data to the server
+next to them, and server synchronize with other servers. If there
+is no server configured (default) within the mesh, no topology
+information will be transmitted. With these "synchronizing
+servers", there can be 1 or more vis servers sharing the same (or
+at least very similar) data.
+
+When configured as server, you can get a topology snapshot of
+your mesh:
+
+# cat /sys/class/net/bat0/mesh/vis_data
+
+This raw output is intended to be easily parsable and convertable
+with other tools. Have a look at the batctl README if you want a
+vis output in dot or json format for instance and how those out-
+puts could then be visualised in an image.
+
+The raw format consists of comma separated values per entry where
+each entry is giving information about a certain source inter-
+face. Each entry can/has to have the following values:
+-> "mac" - mac address of an originator's source interface
+ (each line begins with it)
+-> "TQ mac value" - src mac's link quality towards mac address
+ of a neighbor originator's interface which
+ is being used for routing
+-> "HNA mac" - HNA announced by source mac
+-> "PRIMARY" - this is a primary interface
+-> "SEC mac" - secondary mac address of source
+ (requires preceding PRIMARY)
+
+The TQ value has a range from 4 to 255 with 255 being the best.
+The HNA entries are showing which hosts are connected to the mesh
+via bat0 or being bridged into the mesh network. The PRIMARY/SEC
+values are only applied on primary interfaces
-# echo "" > /proc/net/batman-adv/interfaces
LOGGING/DEBUGGING
-----------------
-All error messages, warnings and information messages are sent to the
-kernel log. Depending on your operating system distribution this can be
-read in one of a number of ways. Try using the commands: dmesg,
-logread, or looking in the files /var/log/kern.log or
-/var/log/syslog. All batman-adv messages are prefixed with
+All error messages, warnings and information messages are sent to
+the kernel log. Depending on your operating system distribution
+this can be read in one of a number of ways. Try using the com-
+mands: dmesg, logread, or looking in the files /var/log/kern.log
+or /var/log/syslog. All batman-adv messages are prefixed with
"batman-adv:" So to see just these messages try
-dmesg | grep batman-adv
+# dmesg | grep batman-adv
-When investigating problems with your mesh network it is sometimes
-necessary to see more detail debug messages. This must be enabled when
-compiling the batman-adv module. Use "make menuconfig" and enable the
+When investigating problems with your mesh network it is some-
+times necessary to see more detail debug messages. This must be
+enabled when compiling the batman-adv module. When building bat-
+man-adv as part of kernel, use "make menuconfig" and enable the
option "B.A.T.M.A.N. debugging".
-The additional debug output is by default disabled. It can be enabled
-either at kernel module load time or during run time. To enable debug
-output at module load time, add the module parameter debug=<value>.
-<value> can take one of four values.
+The additional debug output is by default disabled. It can be en-
+abled either at kernel modules load time or during run time. To
+enable debug output at module load time, add the module parameter
+debug=<value>. <value> can take one of four values.
-0 - All debug output disabled
+0 - All debug output disabled
1 - Enable messages related to routing / flooding / broadcasting
2 - Enable route or hna added / changed / deleted
3 - Enable all messages
e.g.
-modprobe batman-adv debug=2
+# modprobe batman-adv debug=2
-will load the module and enable debug messages for when routes or HNAs
-change.
+will load the module and enable debug messages for when routes or
+HNAs change.
-The debug output can also be changed at runtime using the file
+The debug output can also be changed at runtime using the file
/sys/module/batman-adv/parameters/debug. e.g.
-echo 2 > /sys/module/batman-adv/parameters/debug
+# echo 2 > /sys/module/batman-adv/parameters/debug
enables debug messages for when routes or HNAs
-The debug output is sent to the kernel logs. So try dmesg, logread etc
-to see the debug messages.
+The debug output is sent to the kernel logs. So try dmesg, lo-
+gread, etc to see the debug messages.
+
BATCTL
------
-B.A.T.M.A.N. advanced operates on layer 2 and thus all hosts
-participating in the virtual switch are completely transparent for all
-protocols above layer 2. Therefore the common diagnosis tools do not
-work as expected. To overcome these problems batctl was created. At
-the moment the batctl contains ping, traceroute, tcpdump and
+As batman advanced operates on layer 2 all hosts participating in
+the virtual switch are completely transparent for all protocols
+above layer 2. Therefore the common diagnosis tools do not work
+as expected. To overcome these problems batctl was created. At
+the moment the batctl contains ping, traceroute, tcpdump and
interfaces to the kernel module settings.
For more information, please see the manpage (man batctl).
-batctl is available on http://www.open-mesh.net/
+batctl is available on http://www.open-mesh.org/
+
CONTACT
-------
Please send us comments, experiences, questions, anything :)
-IRC: #batman on irc.freenode.org
-Mailing-list: b.a.t.m.a.n@open-mesh.net
-(subscription at https://list.open-mesh.net/mm/listinfo/b.a.t.m.a.n )
+IRC: #batman on irc.freenode.org
+Mailing-list: b.a.t.m.a.n@open-mesh.net (optional subscription
+ at https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n)
You can also contact the Authors:
-Marek Lindner <lindner_marek@yahoo.de>
-Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
+Marek Lindner <lindner_marek@yahoo.de>
+Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
+
diff --git a/drivers/staging/batman-adv/TODO b/drivers/staging/batman-adv/TODO
index 2f15136b18e..518db7fd41b 100644
--- a/drivers/staging/batman-adv/TODO
+++ b/drivers/staging/batman-adv/TODO
@@ -1,23 +1,6 @@
-=> proc interface
-* implement new interface to add/delete interfaces and setting options
-* /proc/sys/net/batman-adv/ as main folder
-* in interfaces/ list every available interface of the host
-* each interfaces/$iface/ contains the following files:
--> enable (def: 0) [add/remove this interface to batman-adv]
--> ogm_interval (def: 1000) [ogm interval of that interface]
--> context (def: bat0) [later we want to support multiple mesh instances via
--> bat0/bat1/bat2/..]
--> status (read-only) [outputs the interface status from batman's
--> perspective]
-* in mesh/batX/ list every available mesh subnet
--> vis_server (def: 0) [enable/disable vis server for that mesh]
--> vis_data (read-only) [outputs the vis data in a raw format]
--> aggregate_ogm (def: 1) [enable/disable ogm aggregation for that mesh]
--> originators (read-only) [outputs the originator table]
--> transtable_global (read-only) [outputs the global translation table]
--> transtable_local (read-only) [outputs the local translation table]
-
-=> fix checkpatch.pl errors
+Request a review.
+Process the comments from the review.
+Move into mainline proper.
Please send all patches to:
Marek Lindner <lindner_marek@yahoo.de>
diff --git a/drivers/staging/batman-adv/aggregation.c b/drivers/staging/batman-adv/aggregation.c
index 7917322a7e2..ce8b8a6e5ae 100644
--- a/drivers/staging/batman-adv/aggregation.c
+++ b/drivers/staging/batman-adv/aggregation.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -52,6 +52,8 @@ static bool can_aggregate_with(struct batman_packet *new_batman_packet,
*/
if (time_before(send_time, forw_packet->send_time) &&
+ time_after_eq(send_time + msecs_to_jiffies(MAX_AGGREGATION_MS),
+ forw_packet->send_time) &&
(aggregated_bytes <= MAX_AGGREGATION_BYTES)) {
/**
@@ -79,14 +81,21 @@ static bool can_aggregate_with(struct batman_packet *new_batman_packet,
* interface only - we still can aggregate */
if ((directlink) &&
(new_batman_packet->ttl == 1) &&
- (forw_packet->if_incoming == if_incoming))
- return true;
+ (forw_packet->if_incoming == if_incoming) &&
+ /* packets from direct neighbors or
+ * own secondary interface packets
+ * (= secondary interface packets in general) */
+ (batman_packet->flags & DIRECTLINK ||
+ (forw_packet->own &&
+ forw_packet->if_incoming->if_num != 0)))
+ return true;
}
return false;
}
+#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
/* create a new aggregated packet and add this packet to it */
static void new_aggregated_packet(unsigned char *packet_buff,
int packet_len,
@@ -98,13 +107,26 @@ static void new_aggregated_packet(unsigned char *packet_buff,
struct forw_packet *forw_packet_aggr;
unsigned long flags;
+ /* own packet should always be scheduled */
+ if (!own_packet) {
+ if (!atomic_dec_not_zero(&batman_queue_left)) {
+ bat_dbg(DBG_BATMAN, "batman packet queue full\n");
+ return;
+ }
+ }
+
forw_packet_aggr = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
- if (!forw_packet_aggr)
+ if (!forw_packet_aggr) {
+ if (!own_packet)
+ atomic_inc(&batman_queue_left);
return;
+ }
forw_packet_aggr->packet_buff = kmalloc(MAX_AGGREGATION_BYTES,
GFP_ATOMIC);
if (!forw_packet_aggr->packet_buff) {
+ if (!own_packet)
+ atomic_inc(&batman_queue_left);
kfree(forw_packet_aggr);
return;
}
@@ -157,7 +179,8 @@ static void aggregate(struct forw_packet *forw_packet_aggr,
(1 << forw_packet_aggr->num_packets);
}
-void add_bat_packet_to_list(unsigned char *packet_buff, int packet_len,
+void add_bat_packet_to_list(struct bat_priv *bat_priv,
+ unsigned char *packet_buff, int packet_len,
struct batman_if *if_incoming, char own_packet,
unsigned long send_time)
{
@@ -175,7 +198,7 @@ void add_bat_packet_to_list(unsigned char *packet_buff, int packet_len,
/* find position for the packet in the forward queue */
spin_lock_irqsave(&forw_bat_list_lock, flags);
/* own packets are not to be aggregated */
- if ((atomic_read(&aggregation_enabled)) && (!own_packet)) {
+ if ((atomic_read(&bat_priv->aggregation_enabled)) && (!own_packet)) {
hlist_for_each_entry(forw_packet_pos, tmp_node, &forw_bat_list,
list) {
if (can_aggregate_with(batman_packet,
@@ -195,6 +218,16 @@ void add_bat_packet_to_list(unsigned char *packet_buff, int packet_len,
if (forw_packet_aggr == NULL) {
/* the following section can run without the lock */
spin_unlock_irqrestore(&forw_bat_list_lock, flags);
+
+ /**
+ * if we could not aggregate this packet with one of the others
+ * we hold it back for a while, so that it might be aggregated
+ * later on
+ */
+ if ((!own_packet) &&
+ (atomic_read(&bat_priv->aggregation_enabled)))
+ send_time += msecs_to_jiffies(MAX_AGGREGATION_MS);
+
new_aggregated_packet(packet_buff, packet_len,
send_time, direct_link,
if_incoming, own_packet);
diff --git a/drivers/staging/batman-adv/aggregation.h b/drivers/staging/batman-adv/aggregation.h
index 6da8df9f99b..84401ca24c3 100644
--- a/drivers/staging/batman-adv/aggregation.h
+++ b/drivers/staging/batman-adv/aggregation.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -30,8 +30,9 @@ static inline int aggregated_packet(int buff_pos, int packet_len, int num_hna)
(next_buff_pos <= MAX_AGGREGATION_BYTES);
}
-void add_bat_packet_to_list(unsigned char *packet_buff, int packet_len,
- struct batman_if *if_outgoing, char own_packet,
+void add_bat_packet_to_list(struct bat_priv *bat_priv,
+ unsigned char *packet_buff, int packet_len,
+ struct batman_if *if_incoming, char own_packet,
unsigned long send_time);
void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff,
int packet_len, struct batman_if *if_incoming);
diff --git a/drivers/staging/batman-adv/bat_sysfs.c b/drivers/staging/batman-adv/bat_sysfs.c
new file mode 100644
index 00000000000..e2c000b80ca
--- /dev/null
+++ b/drivers/staging/batman-adv/bat_sysfs.c
@@ -0,0 +1,484 @@
+/*
+ * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ *
+ */
+
+#include "main.h"
+#include "bat_sysfs.h"
+#include "translation-table.h"
+#include "originator.h"
+#include "hard-interface.h"
+#include "vis.h"
+
+#define to_dev(obj) container_of(obj, struct device, kobj)
+
+struct bat_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct kobject *kobj, struct attribute *attr,
+ char *buf, size_t count);
+};
+
+struct hardif_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct kobject *kobj, struct attribute *attr,
+ char *buf, size_t count);
+};
+
+#define BAT_ATTR(_name, _mode, _show, _store) \
+struct bat_attribute bat_attr_##_name = { \
+ .attr = {.name = __stringify(_name), \
+ .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+};
+
+#define BAT_BIN_ATTR(_name, _mode, _read, _write) \
+struct bin_attribute bat_attr_##_name = { \
+ .attr = { .name = __stringify(_name), \
+ .mode = _mode, }, \
+ .read = _read, \
+ .write = _write, \
+};
+
+#define HARDIF_ATTR(_name, _mode, _show, _store) \
+struct hardif_attribute hardif_attr_##_name = { \
+ .attr = {.name = __stringify(_name), \
+ .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+};
+
+static ssize_t show_aggr_ogm(struct kobject *kobj, struct attribute *attr,
+ char *buff)
+{
+ struct device *dev = to_dev(kobj->parent);
+ struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
+ int aggr_status = atomic_read(&bat_priv->aggregation_enabled);
+
+ return sprintf(buff, "status: %s\ncommands: enable, disable, 0, 1\n",
+ aggr_status == 0 ? "disabled" : "enabled");
+}
+
+static ssize_t store_aggr_ogm(struct kobject *kobj, struct attribute *attr,
+ char *buff, size_t count)
+{
+ struct device *dev = to_dev(kobj->parent);
+ struct net_device *net_dev = to_net_dev(dev);
+ struct bat_priv *bat_priv = netdev_priv(net_dev);
+ int aggr_tmp = -1;
+
+ if (((count == 2) && (buff[0] == '1')) ||
+ (strncmp(buff, "enable", 6) == 0))
+ aggr_tmp = 1;
+
+ if (((count == 2) && (buff[0] == '0')) ||
+ (strncmp(buff, "disable", 7) == 0))
+ aggr_tmp = 0;
+
+ if (aggr_tmp < 0) {
+ if (buff[count - 1] == '\n')
+ buff[count - 1] = '\0';
+
+ printk(KERN_INFO "batman-adv:Invalid parameter for 'aggregate OGM' setting on mesh %s received: %s\n",
+ net_dev->name, buff);
+ return -EINVAL;
+ }
+
+ if (atomic_read(&bat_priv->aggregation_enabled) == aggr_tmp)
+ return count;
+
+ printk(KERN_INFO "batman-adv:Changing aggregation from: %s to: %s on mesh: %s\n",
+ atomic_read(&bat_priv->aggregation_enabled) == 1 ?
+ "enabled" : "disabled", aggr_tmp == 1 ? "enabled" : "disabled",
+ net_dev->name);
+
+ atomic_set(&bat_priv->aggregation_enabled, (unsigned)aggr_tmp);
+ return count;
+}
+
+static ssize_t show_vis_mode(struct kobject *kobj, struct attribute *attr,
+ char *buff)
+{
+ struct device *dev = to_dev(kobj->parent);
+ struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
+ int vis_mode = atomic_read(&bat_priv->vis_mode);
+
+ return sprintf(buff, "status: %s\ncommands: client, server, %d, %d\n",
+ vis_mode == VIS_TYPE_CLIENT_UPDATE ?
+ "client" : "server",
+ VIS_TYPE_SERVER_SYNC, VIS_TYPE_CLIENT_UPDATE);
+}
+
+static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr,
+ char *buff, size_t count)
+{
+ struct device *dev = to_dev(kobj->parent);
+ struct net_device *net_dev = to_net_dev(dev);
+ struct bat_priv *bat_priv = netdev_priv(net_dev);
+ unsigned long val;
+ int ret, vis_mode_tmp = -1;
+
+ ret = strict_strtoul(buff, 10, &val);
+
+ if (((count == 2) && (!ret) && (val == VIS_TYPE_CLIENT_UPDATE)) ||
+ (strncmp(buff, "client", 6) == 0))
+ vis_mode_tmp = VIS_TYPE_CLIENT_UPDATE;
+
+ if (((count == 2) && (!ret) && (val == VIS_TYPE_SERVER_SYNC)) ||
+ (strncmp(buff, "server", 6) == 0))
+ vis_mode_tmp = VIS_TYPE_SERVER_SYNC;
+
+ if (vis_mode_tmp < 0) {
+ if (buff[count - 1] == '\n')
+ buff[count - 1] = '\0';
+
+ printk(KERN_INFO "batman-adv:Invalid parameter for 'vis mode' setting on mesh %s received: %s\n",
+ net_dev->name, buff);
+ return -EINVAL;
+ }
+
+ if (atomic_read(&bat_priv->vis_mode) == vis_mode_tmp)
+ return count;
+
+ printk(KERN_INFO "batman-adv:Changing vis mode from: %s to: %s on mesh: %s\n",
+ atomic_read(&bat_priv->vis_mode) == VIS_TYPE_CLIENT_UPDATE ?
+ "client" : "server", vis_mode_tmp == VIS_TYPE_CLIENT_UPDATE ?
+ "client" : "server", net_dev->name);
+
+ atomic_set(&bat_priv->vis_mode, (unsigned)vis_mode_tmp);
+ return count;
+}
+
+static ssize_t show_orig_interval(struct kobject *kobj, struct attribute *attr,
+ char *buff)
+{
+ struct device *dev = to_dev(kobj->parent);
+ struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
+
+ return sprintf(buff, "status: %i\n",
+ atomic_read(&bat_priv->orig_interval));
+}
+
+static ssize_t store_orig_interval(struct kobject *kobj, struct attribute *attr,
+ char *buff, size_t count)
+{
+ struct device *dev = to_dev(kobj->parent);
+ struct net_device *net_dev = to_net_dev(dev);
+ struct bat_priv *bat_priv = netdev_priv(net_dev);
+ unsigned long orig_interval_tmp;
+ int ret;
+
+ ret = strict_strtoul(buff, 10, &orig_interval_tmp);
+ if (ret) {
+ printk(KERN_INFO "batman-adv:Invalid parameter for 'orig_interval' setting on mesh %s received: %s\n",
+ net_dev->name, buff);
+ return -EINVAL;
+ }
+
+ if (orig_interval_tmp <= JITTER * 2) {
+ printk(KERN_INFO "batman-adv:New originator interval too small: %li (min: %i)\n",
+ orig_interval_tmp, JITTER * 2);
+ return -EINVAL;
+ }
+
+ if (atomic_read(&bat_priv->orig_interval) == orig_interval_tmp)
+ return count;
+
+ printk(KERN_INFO "batman-adv:Changing originator interval from: %i to: %li on mesh: %s\n",
+ atomic_read(&bat_priv->orig_interval),
+ orig_interval_tmp, net_dev->name);
+
+ atomic_set(&bat_priv->orig_interval, orig_interval_tmp);
+ return count;
+}
+
+static BAT_ATTR(aggregate_ogm, S_IRUGO | S_IWUSR,
+ show_aggr_ogm, store_aggr_ogm);
+static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode);
+static BAT_ATTR(orig_interval, S_IRUGO | S_IWUSR,
+ show_orig_interval, store_orig_interval);
+
+static struct bat_attribute *mesh_attrs[] = {
+ &bat_attr_aggregate_ogm,
+ &bat_attr_vis_mode,
+ &bat_attr_orig_interval,
+ NULL,
+};
+
+static ssize_t transtable_local_read(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buff, loff_t off, size_t count)
+{
+ struct device *dev = to_dev(kobj->parent);
+ struct net_device *net_dev = to_net_dev(dev);
+
+ return hna_local_fill_buffer_text(net_dev, buff, count, off);
+}
+
+static ssize_t transtable_global_read(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buff, loff_t off, size_t count)
+{
+ struct device *dev = to_dev(kobj->parent);
+ struct net_device *net_dev = to_net_dev(dev);
+
+ return hna_global_fill_buffer_text(net_dev, buff, count, off);
+}
+
+static ssize_t originators_read(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buff, loff_t off, size_t count)
+{
+ struct device *dev = to_dev(kobj->parent);
+ struct net_device *net_dev = to_net_dev(dev);
+
+ return orig_fill_buffer_text(net_dev, buff, count, off);
+}
+
+static ssize_t vis_data_read(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buff, loff_t off, size_t count)
+{
+ struct device *dev = to_dev(kobj->parent);
+ struct net_device *net_dev = to_net_dev(dev);
+
+ return vis_fill_buffer_text(net_dev, buff, count, off);
+}
+
+static BAT_BIN_ATTR(transtable_local, S_IRUGO, transtable_local_read, NULL);
+static BAT_BIN_ATTR(transtable_global, S_IRUGO, transtable_global_read, NULL);
+static BAT_BIN_ATTR(originators, S_IRUGO, originators_read, NULL);
+static BAT_BIN_ATTR(vis_data, S_IRUGO, vis_data_read, NULL);
+
+static struct bin_attribute *mesh_bin_attrs[] = {
+ &bat_attr_transtable_local,
+ &bat_attr_transtable_global,
+ &bat_attr_originators,
+ &bat_attr_vis_data,
+ NULL,
+};
+
+int sysfs_add_meshif(struct net_device *dev)
+{
+ struct kobject *batif_kobject = &dev->dev.kobj;
+ struct bat_priv *bat_priv = netdev_priv(dev);
+ struct bat_attribute **bat_attr;
+ struct bin_attribute **bin_attr;
+ int err;
+
+ /* FIXME: should be done in the general mesh setup
+ routine as soon as we have it */
+ atomic_set(&bat_priv->aggregation_enabled, 1);
+ atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE);
+ atomic_set(&bat_priv->orig_interval, 1000);
+ bat_priv->primary_if = NULL;
+ bat_priv->num_ifaces = 0;
+
+ bat_priv->mesh_obj = kobject_create_and_add(SYSFS_IF_MESH_SUBDIR,
+ batif_kobject);
+ if (!bat_priv->mesh_obj) {
+ printk(KERN_ERR "batman-adv:Can't add sysfs directory: %s/%s\n",
+ dev->name, SYSFS_IF_MESH_SUBDIR);
+ goto out;
+ }
+
+ for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr) {
+ err = sysfs_create_file(bat_priv->mesh_obj,
+ &((*bat_attr)->attr));
+ if (err) {
+ printk(KERN_ERR "batman-adv:Can't add sysfs file: %s/%s/%s\n",
+ dev->name, SYSFS_IF_MESH_SUBDIR,
+ ((*bat_attr)->attr).name);
+ goto rem_attr;
+ }
+ }
+
+ for (bin_attr = mesh_bin_attrs; *bin_attr; ++bin_attr) {
+ err = sysfs_create_bin_file(bat_priv->mesh_obj, (*bin_attr));
+ if (err) {
+ printk(KERN_ERR "batman-adv:Can't add sysfs file: %s/%s/%s\n",
+ dev->name, SYSFS_IF_MESH_SUBDIR,
+ ((*bin_attr)->attr).name);
+ goto rem_bin_attr;
+ }
+ }
+
+ return 0;
+
+rem_bin_attr:
+ for (bin_attr = mesh_bin_attrs; *bin_attr; ++bin_attr)
+ sysfs_remove_bin_file(bat_priv->mesh_obj, (*bin_attr));
+rem_attr:
+ for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr)
+ sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
+
+ kobject_put(bat_priv->mesh_obj);
+ bat_priv->mesh_obj = NULL;
+out:
+ return -ENOMEM;
+}
+
+void sysfs_del_meshif(struct net_device *dev)
+{
+ struct bat_priv *bat_priv = netdev_priv(dev);
+ struct bat_attribute **bat_attr;
+ struct bin_attribute **bin_attr;
+
+ for (bin_attr = mesh_bin_attrs; *bin_attr; ++bin_attr)
+ sysfs_remove_bin_file(bat_priv->mesh_obj, (*bin_attr));
+
+ for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr)
+ sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
+
+ kobject_put(bat_priv->mesh_obj);
+ bat_priv->mesh_obj = NULL;
+}
+
+static ssize_t show_mesh_iface(struct kobject *kobj, struct attribute *attr,
+ char *buff)
+{
+ struct device *dev = to_dev(kobj->parent);
+ struct net_device *net_dev = to_net_dev(dev);
+ struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
+
+ if (!batman_if)
+ return 0;
+
+ return sprintf(buff, "status: %s\ncommands: none, bat0\n",
+ batman_if->if_status == IF_NOT_IN_USE ?
+ "none" : "bat0");
+}
+
+static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
+ char *buff, size_t count)
+{
+ struct device *dev = to_dev(kobj->parent);
+ struct net_device *net_dev = to_net_dev(dev);
+ struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
+ int status_tmp = -1;
+
+ if (!batman_if)
+ return count;
+
+ if (strncmp(buff, "none", 4) == 0)
+ status_tmp = IF_NOT_IN_USE;
+
+ if (strncmp(buff, "bat0", 4) == 0)
+ status_tmp = IF_I_WANT_YOU;
+
+ if (status_tmp < 0) {
+ if (buff[count - 1] == '\n')
+ buff[count - 1] = '\0';
+
+ printk(KERN_ERR "batman-adv:Invalid parameter for 'mesh_iface' setting received: %s\n",
+ buff);
+ return -EINVAL;
+ }
+
+ if ((batman_if->if_status == status_tmp) ||
+ ((status_tmp == IF_I_WANT_YOU) &&
+ (batman_if->if_status != IF_NOT_IN_USE)))
+ return count;
+
+ if (status_tmp == IF_I_WANT_YOU)
+ status_tmp = hardif_enable_interface(batman_if);
+ else
+ hardif_disable_interface(batman_if);
+
+ return (status_tmp < 0 ? status_tmp : count);
+}
+
+static ssize_t show_iface_status(struct kobject *kobj, struct attribute *attr,
+ char *buff)
+{
+ struct device *dev = to_dev(kobj->parent);
+ struct net_device *net_dev = to_net_dev(dev);
+ struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
+
+ if (!batman_if)
+ return 0;
+
+ switch (batman_if->if_status) {
+ case IF_TO_BE_REMOVED:
+ return sprintf(buff, "disabling\n");
+ case IF_INACTIVE:
+ return sprintf(buff, "inactive\n");
+ case IF_ACTIVE:
+ return sprintf(buff, "active\n");
+ case IF_TO_BE_ACTIVATED:
+ return sprintf(buff, "enabling\n");
+ case IF_NOT_IN_USE:
+ default:
+ return sprintf(buff, "not in use\n");
+ }
+}
+
+static HARDIF_ATTR(mesh_iface, S_IRUGO | S_IWUSR,
+ show_mesh_iface, store_mesh_iface);
+static HARDIF_ATTR(iface_status, S_IRUGO, show_iface_status, NULL);
+
+static struct hardif_attribute *batman_attrs[] = {
+ &hardif_attr_mesh_iface,
+ &hardif_attr_iface_status,
+ NULL,
+};
+
+int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev)
+{
+ struct kobject *hardif_kobject = &dev->dev.kobj;
+ struct hardif_attribute **hardif_attr;
+ int err;
+
+ *hardif_obj = kobject_create_and_add(SYSFS_IF_BAT_SUBDIR,
+ hardif_kobject);
+
+ if (!*hardif_obj) {
+ printk(KERN_ERR "batman-adv:Can't add sysfs directory: %s/%s\n",
+ dev->name, SYSFS_IF_BAT_SUBDIR);
+ goto out;
+ }
+
+ for (hardif_attr = batman_attrs; *hardif_attr; ++hardif_attr) {
+ err = sysfs_create_file(*hardif_obj, &((*hardif_attr)->attr));
+ if (err) {
+ printk(KERN_ERR "batman-adv:Can't add sysfs file: %s/%s/%s\n",
+ dev->name, SYSFS_IF_BAT_SUBDIR,
+ ((*hardif_attr)->attr).name);
+ goto rem_attr;
+ }
+ }
+
+ return 0;
+
+rem_attr:
+ for (hardif_attr = batman_attrs; *hardif_attr; ++hardif_attr)
+ sysfs_remove_file(*hardif_obj, &((*hardif_attr)->attr));
+out:
+ return -ENOMEM;
+}
+
+void sysfs_del_hardif(struct kobject **hardif_obj)
+{
+ kobject_put(*hardif_obj);
+ *hardif_obj = NULL;
+}
diff --git a/drivers/staging/batman-adv/bat_sysfs.h b/drivers/staging/batman-adv/bat_sysfs.h
new file mode 100644
index 00000000000..e1893411871
--- /dev/null
+++ b/drivers/staging/batman-adv/bat_sysfs.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ *
+ */
+
+
+#define SYSFS_IF_MESH_SUBDIR "mesh"
+#define SYSFS_IF_BAT_SUBDIR "batman_adv"
+
+int sysfs_add_meshif(struct net_device *dev);
+void sysfs_del_meshif(struct net_device *dev);
+int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev);
+void sysfs_del_hardif(struct kobject **hardif_obj);
diff --git a/drivers/staging/batman-adv/bitarray.c b/drivers/staging/batman-adv/bitarray.c
index 212eef93afe..2fef6e35f8c 100644
--- a/drivers/staging/batman-adv/bitarray.c
+++ b/drivers/staging/batman-adv/bitarray.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
@@ -68,7 +68,7 @@ void bit_shift(TYPE_OF_WORD *seq_bits, int32_t n)
int32_t word_offset, word_num;
int32_t i;
- if (n <= 0)
+ if (n <= 0 || n >= TQ_LOCAL_WINDOW_SIZE)
return;
word_offset = n % WORD_BIT_SIZE;/* shift how much inside each word */
@@ -111,48 +111,76 @@ void bit_shift(TYPE_OF_WORD *seq_bits, int32_t n)
seq_bits[i] = 0;
}
+static void bit_reset_window(TYPE_OF_WORD *seq_bits)
+{
+ int i;
+ for (i = 0; i < NUM_WORDS; i++)
+ seq_bits[i] = 0;
+}
+
-/* receive and process one packet, returns 1 if received seq_num is considered
- * new, 0 if old */
+/* receive and process one packet within the sequence number window.
+ *
+ * returns:
+ * 1 if the window was moved (either new or very old)
+ * 0 if the window was not moved/shifted.
+ */
char bit_get_packet(TYPE_OF_WORD *seq_bits, int16_t seq_num_diff,
int8_t set_mark)
{
- int i;
+ /* sequence number is slightly older. We already got a sequence number
+ * higher than this one, so we just mark it. */
- /* we already got a sequence number higher than this one, so we just
- * mark it. this should wrap around the integer just fine */
- if ((seq_num_diff < 0) && (seq_num_diff >= -TQ_LOCAL_WINDOW_SIZE)) {
+ if ((seq_num_diff <= 0) && (seq_num_diff > -TQ_LOCAL_WINDOW_SIZE)) {
if (set_mark)
bit_mark(seq_bits, -seq_num_diff);
return 0;
}
- /* it seems we missed a lot of packets or the other host restarted */
- if ((seq_num_diff > TQ_LOCAL_WINDOW_SIZE) ||
- (seq_num_diff < -TQ_LOCAL_WINDOW_SIZE)) {
+ /* sequence number is slightly newer, so we shift the window and
+ * set the mark if required */
- if (seq_num_diff > TQ_LOCAL_WINDOW_SIZE)
- bat_dbg(DBG_BATMAN,
- "We missed a lot of packets (%i) !\n",
- seq_num_diff-1);
+ if ((seq_num_diff > 0) && (seq_num_diff < TQ_LOCAL_WINDOW_SIZE)) {
+ bit_shift(seq_bits, seq_num_diff);
- if (-seq_num_diff > TQ_LOCAL_WINDOW_SIZE)
- bat_dbg(DBG_BATMAN,
- "Other host probably restarted !\n");
+ if (set_mark)
+ bit_mark(seq_bits, 0);
+ return 1;
+ }
- for (i = 0; i < NUM_WORDS; i++)
- seq_bits[i] = 0;
+ /* sequence number is much newer, probably missed a lot of packets */
+ if ((seq_num_diff >= TQ_LOCAL_WINDOW_SIZE)
+ || (seq_num_diff < EXPECTED_SEQNO_RANGE)) {
+ bat_dbg(DBG_BATMAN,
+ "We missed a lot of packets (%i) !\n",
+ seq_num_diff - 1);
+ bit_reset_window(seq_bits);
if (set_mark)
- seq_bits[0] = 1; /* we only have the latest packet */
- } else {
- bit_shift(seq_bits, seq_num_diff);
+ bit_mark(seq_bits, 0);
+ return 1;
+ }
+
+ /* received a much older packet. The other host either restarted
+ * or the old packet got delayed somewhere in the network. The
+ * packet should be dropped without calling this function if the
+ * seqno window is protected. */
+
+ if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
+ || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
+ bat_dbg(DBG_BATMAN,
+ "Other host probably restarted!\n");
+
+ bit_reset_window(seq_bits);
if (set_mark)
bit_mark(seq_bits, 0);
+
+ return 1;
}
- return 1;
+ /* never reached */
+ return 0;
}
/* count the hamming weight, how many good packets did we receive? just count
diff --git a/drivers/staging/batman-adv/bitarray.h b/drivers/staging/batman-adv/bitarray.h
index ec72dd78436..76ad24c9f3d 100644
--- a/drivers/staging/batman-adv/bitarray.h
+++ b/drivers/staging/batman-adv/bitarray.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
diff --git a/drivers/staging/batman-adv/device.c b/drivers/staging/batman-adv/device.c
index 2f61500186f..ad82ec4a485 100644
--- a/drivers/staging/batman-adv/device.c
+++ b/drivers/staging/batman-adv/device.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -44,10 +44,7 @@ static struct device_client *device_client_hash[256];
void bat_device_init(void)
{
- int i;
-
- for (i = 0; i < 256; i++)
- device_client_hash[i] = NULL;
+ memset(device_client_hash, 0, sizeof(device_client_hash));
}
int bat_device_setup(void)
@@ -60,7 +57,8 @@ int bat_device_setup(void)
/* register our device - kernel assigns a free major number */
tmp_major = register_chrdev(0, DRIVER_DEVICE, &fops);
if (tmp_major < 0) {
- printk(KERN_ERR "batman-adv:Registering the character device failed with %d\n",
+ printk(KERN_ERR "batman-adv:"
+ "Registering the character device failed with %d\n",
tmp_major);
return 0;
}
@@ -68,7 +66,8 @@ int bat_device_setup(void)
batman_class = class_create(THIS_MODULE, "batman-adv");
if (IS_ERR(batman_class)) {
- printk(KERN_ERR "batman-adv:Could not register class 'batman-adv' \n");
+ printk(KERN_ERR "batman-adv:"
+ "Could not register class 'batman-adv'\n");
return 0;
}
@@ -103,15 +102,17 @@ int bat_device_open(struct inode *inode, struct file *file)
if (!device_client)
return -ENOMEM;
- for (i = 0; i < 256; i++) {
+ for (i = 0; i < ARRAY_SIZE(device_client_hash); i++) {
if (!device_client_hash[i]) {
device_client_hash[i] = device_client;
break;
}
}
- if (device_client_hash[i] != device_client) {
- printk(KERN_ERR "batman-adv:Error - can't add another packet client: maximum number of clients reached \n");
+ if (i == ARRAY_SIZE(device_client_hash)) {
+ printk(KERN_ERR "batman-adv:"
+ "Error - can't add another packet client: "
+ "maximum number of clients reached\n");
kfree(device_client);
return -EXFULL;
}
@@ -212,7 +213,9 @@ ssize_t bat_device_write(struct file *file, const char __user *buff,
unsigned long flags;
if (len < sizeof(struct icmp_packet)) {
- bat_dbg(DBG_BATMAN, "batman-adv:Error - can't send packet from char device: invalid packet size\n");
+ bat_dbg(DBG_BATMAN, "batman-adv:"
+ "Error - can't send packet from char device: "
+ "invalid packet size\n");
return -EINVAL;
}
@@ -223,12 +226,16 @@ ssize_t bat_device_write(struct file *file, const char __user *buff,
return -EFAULT;
if (icmp_packet.packet_type != BAT_ICMP) {
- bat_dbg(DBG_BATMAN, "batman-adv:Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n");
+ bat_dbg(DBG_BATMAN, "batman-adv:"
+ "Error - can't send packet from char device: "
+ "got bogus packet type (expected: BAT_ICMP)\n");
return -EINVAL;
}
if (icmp_packet.msg_type != ECHO_REQUEST) {
- bat_dbg(DBG_BATMAN, "batman-adv:Error - can't send packet from char device: got bogus message type (expected: ECHO_REQUEST)\n");
+ bat_dbg(DBG_BATMAN, "batman-adv:"
+ "Error - can't send packet from char device: "
+ "got bogus message type (expected: ECHO_REQUEST)\n");
return -EINVAL;
}
@@ -253,7 +260,7 @@ ssize_t bat_device_write(struct file *file, const char __user *buff,
if (!orig_node->router)
goto unlock;
- batman_if = orig_node->batman_if;
+ batman_if = orig_node->router->if_incoming;
memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
spin_unlock_irqrestore(&orig_hash_lock, flags);
@@ -261,7 +268,7 @@ ssize_t bat_device_write(struct file *file, const char __user *buff,
if (!batman_if)
goto dst_unreach;
- if (batman_if->if_active != IF_ACTIVE)
+ if (batman_if->if_status != IF_ACTIVE)
goto dst_unreach;
memcpy(icmp_packet.orig,
diff --git a/drivers/staging/batman-adv/device.h b/drivers/staging/batman-adv/device.h
index 46c0f449652..eb14b371cea 100644
--- a/drivers/staging/batman-adv/device.h
+++ b/drivers/staging/batman-adv/device.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c
index befd4883951..7a582e80de1 100644
--- a/drivers/staging/batman-adv/hard-interface.c
+++ b/drivers/staging/batman-adv/hard-interface.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -25,22 +25,21 @@
#include "send.h"
#include "translation-table.h"
#include "routing.h"
+#include "bat_sysfs.h"
+#include "originator.h"
#include "hash.h"
-#define MIN(x, y) ((x) < (y) ? (x) : (y))
-
-static char avail_ifs;
-static char active_ifs;
+#include <linux/if_arp.h>
-static void hardif_free_interface(struct rcu_head *rcu);
+#define MIN(x, y) ((x) < (y) ? (x) : (y))
-static struct batman_if *get_batman_if_by_name(char *name)
+struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev)
{
struct batman_if *batman_if;
rcu_read_lock();
list_for_each_entry_rcu(batman_if, &if_list, list) {
- if (strncmp(batman_if->dev, name, IFNAMSIZ) == 0)
+ if (batman_if->net_dev == net_dev)
goto out;
}
@@ -51,23 +50,90 @@ out:
return batman_if;
}
-int hardif_min_mtu(void)
+static int is_valid_iface(struct net_device *net_dev)
+{
+ if (net_dev->flags & IFF_LOOPBACK)
+ return 0;
+
+ if (net_dev->type != ARPHRD_ETHER)
+ return 0;
+
+ if (net_dev->addr_len != ETH_ALEN)
+ return 0;
+
+ /* no batman over batman */
+#ifdef HAVE_NET_DEVICE_OPS
+ if (net_dev->netdev_ops->ndo_start_xmit == interface_tx)
+ return 0;
+#else
+ if (net_dev->hard_start_xmit == interface_tx)
+ return 0;
+#endif
+
+ /* Device is being bridged */
+ /* if (net_dev->br_port != NULL)
+ return 0; */
+
+ return 1;
+}
+
+static struct batman_if *get_active_batman_if(void)
{
struct batman_if *batman_if;
- /* allow big frames if all devices are capable to do so
- * (have MTU > 1500 + BAT_HEADER_LEN) */
- int min_mtu = ETH_DATA_LEN;
+ /* TODO: should check interfaces belonging to bat_priv */
rcu_read_lock();
list_for_each_entry_rcu(batman_if, &if_list, list) {
- if ((batman_if->if_active == IF_ACTIVE) ||
- (batman_if->if_active == IF_TO_BE_ACTIVATED))
- min_mtu = MIN(batman_if->net_dev->mtu - BAT_HEADER_LEN,
- min_mtu);
+ if (batman_if->if_status == IF_ACTIVE)
+ goto out;
}
+
+ batman_if = NULL;
+
+out:
rcu_read_unlock();
+ return batman_if;
+}
- return min_mtu;
+static void set_primary_if(struct bat_priv *bat_priv,
+ struct batman_if *batman_if)
+{
+ struct batman_packet *batman_packet;
+
+ bat_priv->primary_if = batman_if;
+
+ if (!bat_priv->primary_if)
+ return;
+
+ set_main_if_addr(batman_if->net_dev->dev_addr);
+
+ batman_packet = (struct batman_packet *)(batman_if->packet_buff);
+ batman_packet->flags = 0;
+ batman_packet->ttl = TTL;
+
+ /***
+ * hacky trick to make sure that we send the HNA information via
+ * our new primary interface
+ */
+ atomic_set(&hna_local_changed, 1);
+}
+
+static bool hardif_is_iface_up(struct batman_if *batman_if)
+{
+ if (batman_if->net_dev->flags & IFF_UP)
+ return true;
+
+ return false;
+}
+
+static void update_mac_addresses(struct batman_if *batman_if)
+{
+ addr_to_string(batman_if->addr_str, batman_if->net_dev->dev_addr);
+
+ memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig,
+ batman_if->net_dev->dev_addr, ETH_ALEN);
+ memcpy(((struct batman_packet *)(batman_if->packet_buff))->prev_sender,
+ batman_if->net_dev->dev_addr, ETH_ALEN);
}
static void check_known_mac_addr(uint8_t *addr)
@@ -76,18 +142,40 @@ static void check_known_mac_addr(uint8_t *addr)
rcu_read_lock();
list_for_each_entry_rcu(batman_if, &if_list, list) {
- if ((batman_if->if_active != IF_ACTIVE) &&
- (batman_if->if_active != IF_TO_BE_ACTIVATED))
+ if ((batman_if->if_status != IF_ACTIVE) &&
+ (batman_if->if_status != IF_TO_BE_ACTIVATED))
continue;
if (!compare_orig(batman_if->net_dev->dev_addr, addr))
continue;
- printk(KERN_WARNING "batman-adv:The newly added mac address (%pM) already exists on: %s\n",
- addr, batman_if->dev);
- printk(KERN_WARNING "batman-adv:It is strongly recommended to keep mac addresses unique to avoid problems!\n");
+ printk(KERN_WARNING "batman-adv:"
+ "The newly added mac address (%pM) already exists on: %s\n",
+ addr, batman_if->dev);
+ printk(KERN_WARNING "batman-adv:"
+ "It is strongly recommended to keep mac addresses unique"
+ "to avoid problems!\n");
+ }
+ rcu_read_unlock();
+}
+
+int hardif_min_mtu(void)
+{
+ struct batman_if *batman_if;
+ /* allow big frames if all devices are capable to do so
+ * (have MTU > 1500 + BAT_HEADER_LEN) */
+ int min_mtu = ETH_DATA_LEN;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(batman_if, &if_list, list) {
+ if ((batman_if->if_status == IF_ACTIVE) ||
+ (batman_if->if_status == IF_TO_BE_ACTIVATED))
+ min_mtu = MIN(batman_if->net_dev->mtu - BAT_HEADER_LEN,
+ min_mtu);
}
rcu_read_unlock();
+
+ return min_mtu;
}
/* adjusts the MTU if a new interface with a smaller MTU appeared. */
@@ -100,322 +188,250 @@ void update_min_mtu(void)
soft_device->mtu = min_mtu;
}
-/* checks if the interface is up. (returns 1 if it is) */
-static int hardif_is_interface_up(char *dev)
+static void hardif_activate_interface(struct bat_priv *bat_priv,
+ struct batman_if *batman_if)
{
- struct net_device *net_dev;
+ if (batman_if->if_status != IF_INACTIVE)
+ return;
+
+ dev_hold(batman_if->net_dev);
+
+ update_mac_addresses(batman_if);
+ batman_if->if_status = IF_TO_BE_ACTIVATED;
/**
- * if we already have an interface in our interface list and
- * the current interface is not the primary interface and
- * the primary interface is not up and
- * the primary interface has never been up - don't activate any
- * secondary interface !
+ * the first active interface becomes our primary interface or
+ * the next active interface after the old primay interface was removed
*/
+ if (!bat_priv->primary_if)
+ set_primary_if(bat_priv, batman_if);
- rcu_read_lock();
- if ((!list_empty(&if_list)) &&
- strncmp(((struct batman_if *)if_list.next)->dev, dev, IFNAMSIZ) &&
- !(((struct batman_if *)if_list.next)->if_active == IF_ACTIVE) &&
- !(((struct batman_if *)if_list.next)->if_active == IF_TO_BE_ACTIVATED) &&
- (!main_if_was_up())) {
- rcu_read_unlock();
- goto end;
- }
- rcu_read_unlock();
-
-#ifdef __NET_NET_NAMESPACE_H
- net_dev = dev_get_by_name(&init_net, dev);
-#else
- net_dev = dev_get_by_name(dev);
-#endif
- if (!net_dev)
- goto end;
+ printk(KERN_INFO "batman-adv:Interface activated: %s\n",
+ batman_if->dev);
- if (!(net_dev->flags & IFF_UP))
- goto failure;
+ if (atomic_read(&module_state) == MODULE_INACTIVE)
+ activate_module();
- dev_put(net_dev);
- return 1;
-
-failure:
- dev_put(net_dev);
-end:
- return 0;
+ update_min_mtu();
+ return;
}
-/* deactivates the interface. */
-void hardif_deactivate_interface(struct batman_if *batman_if)
+static void hardif_deactivate_interface(struct batman_if *batman_if)
{
- if (batman_if->if_active != IF_ACTIVE)
+ if ((batman_if->if_status != IF_ACTIVE) &&
+ (batman_if->if_status != IF_TO_BE_ACTIVATED))
return;
- /**
- * batman_if->net_dev has been acquired by dev_get_by_name() in
- * proc_interfaces_write() and has to be unreferenced.
- */
-
- if (batman_if->net_dev)
- dev_put(batman_if->net_dev);
+ dev_put(batman_if->net_dev);
- batman_if->if_active = IF_INACTIVE;
- active_ifs--;
+ batman_if->if_status = IF_INACTIVE;
printk(KERN_INFO "batman-adv:Interface deactivated: %s\n",
- batman_if->dev);
+ batman_if->dev);
+
+ update_min_mtu();
}
-/* (re)activate given interface. */
-static void hardif_activate_interface(struct batman_if *batman_if)
+int hardif_enable_interface(struct batman_if *batman_if)
{
- if (batman_if->if_active != IF_INACTIVE)
- return;
-
-#ifdef __NET_NET_NAMESPACE_H
- batman_if->net_dev = dev_get_by_name(&init_net, batman_if->dev);
-#else
- batman_if->net_dev = dev_get_by_name(batman_if->dev);
-#endif
- if (!batman_if->net_dev)
- goto dev_err;
+ /* FIXME: each batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
+ struct batman_packet *batman_packet;
- check_known_mac_addr(batman_if->net_dev->dev_addr);
+ if (batman_if->if_status != IF_NOT_IN_USE)
+ goto out;
- addr_to_string(batman_if->addr_str, batman_if->net_dev->dev_addr);
+ batman_if->packet_len = BAT_PACKET_LEN;
+ batman_if->packet_buff = kmalloc(batman_if->packet_len, GFP_ATOMIC);
- memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig,
- batman_if->net_dev->dev_addr, ETH_ALEN);
- memcpy(((struct batman_packet *)(batman_if->packet_buff))->prev_sender,
- batman_if->net_dev->dev_addr, ETH_ALEN);
+ if (!batman_if->packet_buff) {
+ printk(KERN_ERR "batman-adv:"
+ "Can't add interface packet (%s): out of memory\n",
+ batman_if->dev);
+ goto err;
+ }
- batman_if->if_active = IF_TO_BE_ACTIVATED;
- active_ifs++;
+ batman_packet = (struct batman_packet *)(batman_if->packet_buff);
+ batman_packet->packet_type = BAT_PACKET;
+ batman_packet->version = COMPAT_VERSION;
+ batman_packet->flags = 0;
+ batman_packet->ttl = 2;
+ batman_packet->tq = TQ_MAX_VALUE;
+ batman_packet->num_hna = 0;
- /* save the mac address if it is our primary interface */
- if (batman_if->if_num == 0)
- set_main_if_addr(batman_if->net_dev->dev_addr);
+ batman_if->if_num = bat_priv->num_ifaces;
+ bat_priv->num_ifaces++;
+ batman_if->if_status = IF_INACTIVE;
+ orig_hash_add_if(batman_if, bat_priv->num_ifaces);
- printk(KERN_INFO "batman-adv:Interface activated: %s\n",
- batman_if->dev);
+ atomic_set(&batman_if->seqno, 1);
+ printk(KERN_INFO "batman-adv:Adding interface: %s\n", batman_if->dev);
- return;
+ if (hardif_is_iface_up(batman_if))
+ hardif_activate_interface(bat_priv, batman_if);
+ else
+ printk(KERN_ERR "batman-adv:"
+ "Not using interface %s "
+ "(retrying later): interface not active\n",
+ batman_if->dev);
-dev_err:
- batman_if->net_dev = NULL;
-}
+ /* begin scheduling originator messages on that interface */
+ schedule_own_packet(batman_if);
-static void hardif_free_interface(struct rcu_head *rcu)
-{
- struct batman_if *batman_if = container_of(rcu, struct batman_if, rcu);
+out:
+ return 0;
- kfree(batman_if->packet_buff);
- kfree(batman_if->dev);
- kfree(batman_if);
+err:
+ return -ENOMEM;
}
-/**
- * called by
- * - echo '' > /proc/.../interfaces
- * - modprobe -r batman-adv-core
- */
-/* removes and frees all interfaces */
-void hardif_remove_interfaces(void)
+void hardif_disable_interface(struct batman_if *batman_if)
{
- struct batman_if *batman_if = NULL;
-
- avail_ifs = 0;
-
- /* no lock needed - we don't delete somewhere else */
- list_for_each_entry(batman_if, &if_list, list) {
+ /* FIXME: each batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
- list_del_rcu(&batman_if->list);
-
- /* first deactivate interface */
- if (batman_if->if_active != IF_INACTIVE)
- hardif_deactivate_interface(batman_if);
-
- call_rcu(&batman_if->rcu, hardif_free_interface);
- }
-}
-
-static int resize_orig(struct orig_node *orig_node, int if_num)
-{
- void *data_ptr;
+ if (batman_if->if_status == IF_ACTIVE)
+ hardif_deactivate_interface(batman_if);
- data_ptr = kmalloc((if_num + 1) * sizeof(TYPE_OF_WORD) * NUM_WORDS,
- GFP_ATOMIC);
- if (!data_ptr) {
- printk(KERN_ERR "batman-adv:Can't resize orig: out of memory\n");
- return -1;
- }
+ if (batman_if->if_status != IF_INACTIVE)
+ return;
- memcpy(data_ptr, orig_node->bcast_own,
- if_num * sizeof(TYPE_OF_WORD) * NUM_WORDS);
- kfree(orig_node->bcast_own);
- orig_node->bcast_own = data_ptr;
+ printk(KERN_INFO "batman-adv:Removing interface: %s\n", batman_if->dev);
+ bat_priv->num_ifaces--;
+ orig_hash_del_if(batman_if, bat_priv->num_ifaces);
- data_ptr = kmalloc((if_num + 1) * sizeof(uint8_t), GFP_ATOMIC);
- if (!data_ptr) {
- printk(KERN_ERR "batman-adv:Can't resize orig: out of memory\n");
- return -1;
- }
+ if (batman_if == bat_priv->primary_if)
+ set_primary_if(bat_priv, get_active_batman_if());
- memcpy(data_ptr, orig_node->bcast_own_sum, if_num * sizeof(uint8_t));
- kfree(orig_node->bcast_own_sum);
- orig_node->bcast_own_sum = data_ptr;
+ kfree(batman_if->packet_buff);
+ batman_if->packet_buff = NULL;
+ batman_if->if_status = IF_NOT_IN_USE;
- return 0;
+ if ((atomic_read(&module_state) == MODULE_ACTIVE) &&
+ (bat_priv->num_ifaces == 0))
+ deactivate_module();
}
-
-/* adds an interface the interface list and activate it, if possible */
-int hardif_add_interface(char *dev, int if_num)
+static struct batman_if *hardif_add_interface(struct net_device *net_dev)
{
struct batman_if *batman_if;
- struct batman_packet *batman_packet;
- struct orig_node *orig_node;
- unsigned long flags;
- HASHIT(hashit);
+ int ret;
- batman_if = kmalloc(sizeof(struct batman_if), GFP_KERNEL);
+ ret = is_valid_iface(net_dev);
+ if (ret != 1)
+ goto out;
+ batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC);
if (!batman_if) {
- printk(KERN_ERR "batman-adv:Can't add interface (%s): out of memory\n", dev);
- return -1;
- }
-
- batman_if->net_dev = NULL;
-
- if ((if_num == 0) && (num_hna > 0))
- batman_if->packet_len = BAT_PACKET_LEN + num_hna * ETH_ALEN;
- else
- batman_if->packet_len = BAT_PACKET_LEN;
-
- batman_if->packet_buff = kmalloc(batman_if->packet_len, GFP_KERNEL);
-
- if (!batman_if->packet_buff) {
- printk(KERN_ERR "batman-adv:Can't add interface packet (%s): out of memory\n", dev);
+ printk(KERN_ERR "batman-adv:"
+ "Can't add interface (%s): out of memory\n",
+ net_dev->name);
goto out;
}
- batman_if->if_num = if_num;
- batman_if->dev = dev;
- batman_if->if_active = IF_INACTIVE;
- INIT_RCU_HEAD(&batman_if->rcu);
+ batman_if->dev = kstrdup(net_dev->name, GFP_ATOMIC);
+ if (!batman_if->dev)
+ goto free_if;
- printk(KERN_INFO "batman-adv:Adding interface: %s\n", dev);
- avail_ifs++;
+ ret = sysfs_add_hardif(&batman_if->hardif_obj, net_dev);
+ if (ret)
+ goto free_dev;
+ batman_if->if_num = -1;
+ batman_if->net_dev = net_dev;
+ batman_if->if_status = IF_NOT_IN_USE;
INIT_LIST_HEAD(&batman_if->list);
- batman_packet = (struct batman_packet *)(batman_if->packet_buff);
- batman_packet->packet_type = BAT_PACKET;
- batman_packet->version = COMPAT_VERSION;
- batman_packet->flags = 0x00;
- batman_packet->ttl = (batman_if->if_num > 0 ? 2 : TTL);
- batman_packet->flags = 0;
- batman_packet->tq = TQ_MAX_VALUE;
- batman_packet->num_hna = 0;
-
- if (batman_if->packet_len != BAT_PACKET_LEN) {
- unsigned char *hna_buff;
- int hna_len;
-
- hna_buff = batman_if->packet_buff + BAT_PACKET_LEN;
- hna_len = batman_if->packet_len - BAT_PACKET_LEN;
- batman_packet->num_hna = hna_local_fill_buffer(hna_buff,
- hna_len);
- }
-
- atomic_set(&batman_if->seqno, 1);
+ check_known_mac_addr(batman_if->net_dev->dev_addr);
+ list_add_tail_rcu(&batman_if->list, &if_list);
+ return batman_if;
- /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
- * if_num */
- spin_lock_irqsave(&orig_hash_lock, flags);
+free_dev:
+ kfree(batman_if->dev);
+free_if:
+ kfree(batman_if);
+out:
+ return NULL;
+}
- while (hash_iterate(orig_hash, &hashit)) {
- orig_node = hashit.bucket->data;
- if (resize_orig(orig_node, if_num) == -1) {
- spin_unlock_irqrestore(&orig_hash_lock, flags);
- goto out;
- }
- }
+static void hardif_free_interface(struct rcu_head *rcu)
+{
+ struct batman_if *batman_if = container_of(rcu, struct batman_if, rcu);
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ /* delete all references to this batman_if */
+ purge_orig(NULL);
+ purge_outstanding_packets(batman_if);
- if (!hardif_is_interface_up(batman_if->dev))
- printk(KERN_ERR "batman-adv:Not using interface %s (retrying later): interface not active\n", batman_if->dev);
- else
- hardif_activate_interface(batman_if);
+ kfree(batman_if->dev);
+ kfree(batman_if);
+}
- list_add_tail_rcu(&batman_if->list, &if_list);
+static void hardif_remove_interface(struct batman_if *batman_if)
+{
+ /* first deactivate interface */
+ if (batman_if->if_status != IF_NOT_IN_USE)
+ hardif_disable_interface(batman_if);
- /* begin sending originator messages on that interface */
- schedule_own_packet(batman_if);
- return 1;
+ if (batman_if->if_status != IF_NOT_IN_USE)
+ return;
-out:
- kfree(batman_if->packet_buff);
- kfree(batman_if);
- kfree(dev);
- return -1;
+ batman_if->if_status = IF_TO_BE_REMOVED;
+ list_del_rcu(&batman_if->list);
+ sysfs_del_hardif(&batman_if->hardif_obj);
+ call_rcu(&batman_if->rcu, hardif_free_interface);
}
-char hardif_get_active_if_num(void)
+void hardif_remove_interfaces(void)
{
- return active_ifs;
+ struct batman_if *batman_if, *batman_if_tmp;
+
+ list_for_each_entry_safe(batman_if, batman_if_tmp, &if_list, list)
+ hardif_remove_interface(batman_if);
}
static int hard_if_event(struct notifier_block *this,
- unsigned long event, void *ptr)
+ unsigned long event, void *ptr)
{
- struct net_device *dev = (struct net_device *)ptr;
- struct batman_if *batman_if = get_batman_if_by_name(dev->name);
+ struct net_device *net_dev = (struct net_device *)ptr;
+ struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
+ /* FIXME: each batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
+
+ if (!batman_if)
+ batman_if = hardif_add_interface(net_dev);
if (!batman_if)
goto out;
switch (event) {
+ case NETDEV_REGISTER:
+ break;
+ case NETDEV_UP:
+ hardif_activate_interface(bat_priv, batman_if);
+ break;
case NETDEV_GOING_DOWN:
case NETDEV_DOWN:
- case NETDEV_UNREGISTER:
hardif_deactivate_interface(batman_if);
break;
- case NETDEV_UP:
- hardif_activate_interface(batman_if);
- if ((atomic_read(&module_state) == MODULE_INACTIVE) &&
- (hardif_get_active_if_num() > 0)) {
- activate_module();
- }
+ case NETDEV_UNREGISTER:
+ hardif_remove_interface(batman_if);
+ break;
+ case NETDEV_CHANGENAME:
+ break;
+ case NETDEV_CHANGEADDR:
+ check_known_mac_addr(batman_if->net_dev->dev_addr);
+ update_mac_addresses(batman_if);
+ if (batman_if == bat_priv->primary_if)
+ set_primary_if(bat_priv, batman_if);
break;
- /* NETDEV_CHANGEADDR - mac address change - what are we doing here ? */
default:
break;
};
- update_min_mtu();
-
out:
return NOTIFY_DONE;
}
-/* find batman interface by netdev. assumes rcu_read_lock on */
-static struct batman_if *find_batman_if(struct net_device *dev)
-{
- struct batman_if *batman_if;
-
- rcu_read_lock();
- list_for_each_entry_rcu(batman_if, &if_list, list) {
- if (batman_if->net_dev == dev) {
- rcu_read_unlock();
- return batman_if;
- }
- }
- rcu_read_unlock();
- return NULL;
-}
-
-
/* receive a packet with the batman ethertype coming on a hard
* interface */
int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
@@ -444,12 +460,12 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
|| !skb_mac_header(skb)))
goto err_free;
- batman_if = find_batman_if(skb->dev);
+ batman_if = get_batman_if_by_netdev(skb->dev);
if (!batman_if)
goto err_free;
/* discard frames on not active interfaces */
- if (batman_if->if_active != IF_ACTIVE)
+ if (batman_if->if_status != IF_ACTIVE)
goto err_free;
stats = (struct net_device_stats *)dev_get_stats(skb->dev);
diff --git a/drivers/staging/batman-adv/hard-interface.h b/drivers/staging/batman-adv/hard-interface.h
index 97c6ecb9e08..1e5fc3e720f 100644
--- a/drivers/staging/batman-adv/hard-interface.h
+++ b/drivers/staging/batman-adv/hard-interface.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -19,19 +19,19 @@
*
*/
-#define IF_INACTIVE 0
-#define IF_ACTIVE 1
-/* #define IF_TO_BE_DEACTIVATED 2 - not needed anymore */
-#define IF_TO_BE_ACTIVATED 3
+#define IF_NOT_IN_USE 0
+#define IF_TO_BE_REMOVED 1
+#define IF_INACTIVE 2
+#define IF_ACTIVE 3
+#define IF_TO_BE_ACTIVATED 4
+#define IF_I_WANT_YOU 5
extern struct notifier_block hard_if_notifier;
+struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev);
+int hardif_enable_interface(struct batman_if *batman_if);
+void hardif_disable_interface(struct batman_if *batman_if);
void hardif_remove_interfaces(void);
-int hardif_add_interface(char *dev, int if_num);
-void hardif_deactivate_interface(struct batman_if *batman_if);
-char hardif_get_active_if_num(void);
-void hardif_check_interfaces_status(void);
-void hardif_check_interfaces_status_wq(struct work_struct *work);
int batman_skb_recv(struct sk_buff *skb,
struct net_device *dev,
struct packet_type *ptype,
diff --git a/drivers/staging/batman-adv/hash.c b/drivers/staging/batman-adv/hash.c
index 5a2018de3ff..d4a4adc5704 100644
--- a/drivers/staging/batman-adv/hash.c
+++ b/drivers/staging/batman-adv/hash.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
diff --git a/drivers/staging/batman-adv/hash.h b/drivers/staging/batman-adv/hash.h
index a70d6d6e1c7..ea6d21e0125 100644
--- a/drivers/staging/batman-adv/hash.h
+++ b/drivers/staging/batman-adv/hash.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
diff --git a/drivers/staging/batman-adv/main.c b/drivers/staging/batman-adv/main.c
index 2e0b482e710..9d13979c2d8 100644
--- a/drivers/staging/batman-adv/main.c
+++ b/drivers/staging/batman-adv/main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -20,7 +20,7 @@
*/
#include "main.h"
-#include "proc.h"
+#include "bat_sysfs.h"
#include "routing.h"
#include "send.h"
#include "originator.h"
@@ -41,12 +41,11 @@ DEFINE_SPINLOCK(orig_hash_lock);
DEFINE_SPINLOCK(forw_bat_list_lock);
DEFINE_SPINLOCK(forw_bcast_list_lock);
-atomic_t originator_interval;
atomic_t vis_interval;
-atomic_t vis_mode;
-atomic_t aggregation_enabled;
+atomic_t bcast_queue_left;
+atomic_t batman_queue_left;
+
int16_t num_hna;
-int16_t num_ifs;
struct net_device *soft_device;
@@ -81,11 +80,10 @@ int init_module(void)
atomic_set(&module_state, MODULE_INACTIVE);
- atomic_set(&originator_interval, 1000);
atomic_set(&vis_interval, 1000);/* TODO: raise this later, this is only
* for debugging now. */
- atomic_set(&vis_mode, VIS_TYPE_CLIENT_UPDATE);
- atomic_set(&aggregation_enabled, 1);
+ atomic_set(&bcast_queue_left, BCAST_QUEUE_LEN);
+ atomic_set(&batman_queue_left, BATMAN_QUEUE_LEN);
/* the name should not be longer than 10 chars - see
* http://lwn.net/Articles/23634/ */
@@ -94,10 +92,6 @@ int init_module(void)
if (!bat_event_workqueue)
return -ENOMEM;
- retval = setup_procfs();
- if (retval < 0)
- return retval;
-
bat_device_init();
/* initialize layer 2 interface */
@@ -105,25 +99,35 @@ int init_module(void)
interface_setup);
if (!soft_device) {
- printk(KERN_ERR "batman-adv:Unable to allocate the batman interface\n");
+ printk(KERN_ERR "batman-adv:"
+ "Unable to allocate the batman interface\n");
goto end;
}
retval = register_netdev(soft_device);
if (retval < 0) {
- printk(KERN_ERR "batman-adv:Unable to register the batman interface: %i\n", retval);
+ printk(KERN_ERR "batman-adv:"
+ "Unable to register the batman interface: %i\n", retval);
goto free_soft_device;
}
+ retval = sysfs_add_meshif(soft_device);
+
+ if (retval < 0)
+ goto unreg_soft_device;
+
register_netdevice_notifier(&hard_if_notifier);
dev_add_pack(&batman_adv_packet_type);
- printk(KERN_INFO "batman-adv:B.A.T.M.A.N. advanced %s%s (compatibility version %i) loaded \n",
- SOURCE_VERSION, REVISION_VERSION_STR, COMPAT_VERSION);
+ printk(KERN_INFO "batman-adv:"
+ "B.A.T.M.A.N. advanced %s%s (compatibility version %i) loaded\n",
+ SOURCE_VERSION, REVISION_VERSION_STR, COMPAT_VERSION);
return 0;
+unreg_soft_device:
+ unregister_netdevice(soft_device);
free_soft_device:
free_netdev(soft_device);
soft_device = NULL;
@@ -133,18 +137,19 @@ end:
void cleanup_module(void)
{
- shutdown_module();
+ deactivate_module();
+
+ unregister_netdevice_notifier(&hard_if_notifier);
+ hardif_remove_interfaces();
if (soft_device) {
+ sysfs_del_meshif(soft_device);
unregister_netdev(soft_device);
soft_device = NULL;
}
dev_remove_pack(&batman_adv_packet_type);
- unregister_netdevice_notifier(&hard_if_notifier);
- cleanup_procfs();
-
destroy_workqueue(bat_event_workqueue);
bat_event_workqueue = NULL;
}
@@ -174,18 +179,20 @@ void activate_module(void)
goto end;
err:
- printk(KERN_ERR "batman-adv:Unable to allocate memory for mesh information structures: out of mem ?\n");
- shutdown_module();
+ printk(KERN_ERR "batman-adv:"
+ "Unable to allocate memory for mesh information structures: "
+ "out of mem ?\n");
+ deactivate_module();
end:
return;
}
/* shuts down the whole module.*/
-void shutdown_module(void)
+void deactivate_module(void)
{
atomic_set(&module_state, MODULE_DEACTIVATING);
- purge_outstanding_packets();
+ purge_outstanding_packets(NULL);
flush_workqueue(bat_event_workqueue);
vis_quit();
@@ -200,7 +207,6 @@ void shutdown_module(void)
synchronize_net();
bat_device_destroy();
- hardif_remove_interfaces();
synchronize_rcu();
atomic_set(&module_state, MODULE_INACTIVE);
}
@@ -217,7 +223,7 @@ void dec_module_count(void)
int addr_to_string(char *buff, uint8_t *addr)
{
- return sprintf(buff, "%02x:%02x:%02x:%02x:%02x:%02x",
+ return sprintf(buff, MAC_FMT,
addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
}
diff --git a/drivers/staging/batman-adv/main.h b/drivers/staging/batman-adv/main.h
index 2e9bb891a5d..5f8343d360f 100644
--- a/drivers/staging/batman-adv/main.h
+++ b/drivers/staging/batman-adv/main.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -22,11 +22,12 @@
/* Kernel Programming */
#define LINUX
-#define DRIVER_AUTHOR "Marek Lindner <lindner_marek@yahoo.de>, Simon Wunderlich <siwu@hrz.tu-chemnitz.de>"
+#define DRIVER_AUTHOR "Marek Lindner <lindner_marek@yahoo.de>, " \
+ "Simon Wunderlich <siwu@hrz.tu-chemnitz.de>"
#define DRIVER_DESC "B.A.T.M.A.N. advanced"
#define DRIVER_DEVICE "batman-adv"
-#define SOURCE_VERSION "0.2.1-beta"
+#define SOURCE_VERSION "0.2.2-beta"
/* B.A.T.M.A.N. parameters */
@@ -34,8 +35,6 @@
#define TQ_MAX_VALUE 255
#define JITTER 20
#define TTL 50 /* Time To Live of broadcast messages */
-#define MAX_ADDR 16 /* number of interfaces which can be added to
- * batman. */
#define PURGE_TIMEOUT 200000 /* purge originators after time in ms if no
* valid packet comes in -> TODO: check
@@ -63,10 +62,16 @@
* forw_packet->direct_link_flags */
#define MAX_AGGREGATION_MS 100
+#define RESET_PROTECTION_MS 30000
+#define EXPECTED_SEQNO_RANGE 4096
+/* don't reset again within 30 seconds */
+
#define MODULE_INACTIVE 0
#define MODULE_ACTIVE 1
#define MODULE_DEACTIVATING 2
+#define BCAST_QUEUE_LEN 256
+#define BATMAN_QUEUE_LEN 256
/*
* Debug Messages
@@ -129,12 +134,10 @@ extern spinlock_t orig_hash_lock;
extern spinlock_t forw_bat_list_lock;
extern spinlock_t forw_bcast_list_lock;
-extern atomic_t originator_interval;
extern atomic_t vis_interval;
-extern atomic_t vis_mode;
-extern atomic_t aggregation_enabled;
+extern atomic_t bcast_queue_left;
+extern atomic_t batman_queue_left;
extern int16_t num_hna;
-extern int16_t num_ifs;
extern struct net_device *soft_device;
@@ -143,7 +146,7 @@ extern atomic_t module_state;
extern struct workqueue_struct *bat_event_workqueue;
void activate_module(void);
-void shutdown_module(void);
+void deactivate_module(void);
void inc_module_count(void);
void dec_module_count(void);
int addr_to_string(char *buff, uint8_t *addr);
diff --git a/drivers/staging/batman-adv/originator.c b/drivers/staging/batman-adv/originator.c
index 29c241119a3..568aef8371b 100644
--- a/drivers/staging/batman-adv/originator.c
+++ b/drivers/staging/batman-adv/originator.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -26,6 +26,7 @@
#include "hash.h"
#include "translation-table.h"
#include "routing.h"
+#include "hard-interface.h"
static DECLARE_DELAYED_WORK(purge_orig_wq, purge_orig);
@@ -117,6 +118,8 @@ void free_orig_node(void *data)
* address if it does not exits */
struct orig_node *get_orig_node(uint8_t *addr)
{
+ /* FIXME: each batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
struct orig_node *orig_node;
struct hashtable_t *swaphash;
int size;
@@ -126,7 +129,7 @@ struct orig_node *get_orig_node(uint8_t *addr)
if (orig_node != NULL)
return orig_node;
- bat_dbg(DBG_BATMAN, "Creating new originator: %pM \n", addr);
+ bat_dbg(DBG_BATMAN, "Creating new originator: %pM\n", addr);
orig_node = kzalloc(sizeof(struct orig_node), GFP_ATOMIC);
if (!orig_node)
@@ -136,16 +139,19 @@ struct orig_node *get_orig_node(uint8_t *addr)
memcpy(orig_node->orig, addr, ETH_ALEN);
orig_node->router = NULL;
- orig_node->batman_if = NULL;
orig_node->hna_buff = NULL;
+ orig_node->bcast_seqno_reset = jiffies - 1
+ - msecs_to_jiffies(RESET_PROTECTION_MS);
+ orig_node->batman_seqno_reset = jiffies - 1
+ - msecs_to_jiffies(RESET_PROTECTION_MS);
- size = num_ifs * sizeof(TYPE_OF_WORD) * NUM_WORDS;
+ size = bat_priv->num_ifaces * sizeof(TYPE_OF_WORD) * NUM_WORDS;
orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
if (!orig_node->bcast_own)
goto free_orig_node;
- size = num_ifs * sizeof(uint8_t);
+ size = bat_priv->num_ifaces * sizeof(uint8_t);
orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC);
if (!orig_node->bcast_own_sum)
goto free_bcast_own;
@@ -158,7 +164,7 @@ struct orig_node *get_orig_node(uint8_t *addr)
if (swaphash == NULL)
printk(KERN_ERR
- "batman-adv:Couldn't resize orig hash table \n");
+ "batman-adv:Couldn't resize orig hash table\n");
else
orig_hash = swaphash;
}
@@ -182,16 +188,29 @@ static bool purge_orig_neighbors(struct orig_node *orig_node,
*best_neigh_node = NULL;
-
/* for all neighbors towards this originator ... */
list_for_each_safe(list_pos, list_pos_tmp, &orig_node->neigh_list) {
neigh_node = list_entry(list_pos, struct neigh_node, list);
- if (time_after(jiffies,
+ if ((time_after(jiffies,
(neigh_node->last_valid +
- ((PURGE_TIMEOUT * HZ) / 1000)))) {
-
- bat_dbg(DBG_BATMAN, "neighbor timeout: originator %pM, neighbor: %pM, last_valid %lu\n", orig_node->orig, neigh_node->addr, (neigh_node->last_valid / HZ));
+ ((PURGE_TIMEOUT * HZ) / 1000)))) ||
+ (neigh_node->if_incoming->if_status ==
+ IF_TO_BE_REMOVED)) {
+
+ if (neigh_node->if_incoming->if_status ==
+ IF_TO_BE_REMOVED)
+ bat_dbg(DBG_BATMAN,
+ "neighbor purge: originator %pM, "
+ "neighbor: %pM, iface: %s\n",
+ orig_node->orig, neigh_node->addr,
+ neigh_node->if_incoming->dev);
+ else
+ bat_dbg(DBG_BATMAN,
+ "neighbor timeout: originator %pM, "
+ "neighbor: %pM, last_valid: %lu\n",
+ orig_node->orig, neigh_node->addr,
+ (neigh_node->last_valid / HZ));
neigh_purged = true;
list_del(list_pos);
@@ -205,7 +224,6 @@ static bool purge_orig_neighbors(struct orig_node *orig_node,
return neigh_purged;
}
-
static bool purge_orig_node(struct orig_node *orig_node)
{
struct neigh_node *best_neigh_node;
@@ -224,6 +242,7 @@ static bool purge_orig_node(struct orig_node *orig_node)
orig_node->hna_buff,
orig_node->hna_buff_len);
}
+
return false;
}
@@ -246,7 +265,257 @@ void purge_orig(struct work_struct *work)
spin_unlock_irqrestore(&orig_hash_lock, flags);
- start_purge_timer();
+ /* if work == NULL we were not called by the timer
+ * and thus do not need to re-arm the timer */
+ if (work)
+ start_purge_timer();
+}
+
+ssize_t orig_fill_buffer_text(struct net_device *net_dev, char *buff,
+ size_t count, loff_t off)
+{
+ HASHIT(hashit);
+ struct bat_priv *bat_priv = netdev_priv(net_dev);
+ struct orig_node *orig_node;
+ struct neigh_node *neigh_node;
+ size_t hdr_len, tmp_len;
+ int batman_count = 0, bytes_written = 0;
+ unsigned long flags;
+ char orig_str[ETH_STR_LEN], router_str[ETH_STR_LEN];
+
+ if (!bat_priv->primary_if) {
+ if (off == 0)
+ return sprintf(buff,
+ "BATMAN mesh %s disabled - "
+ "please specify interfaces to enable it\n",
+ net_dev->name);
+
+ return 0;
+ }
+
+ if (bat_priv->primary_if->if_status != IF_ACTIVE && off == 0)
+ return sprintf(buff,
+ "BATMAN mesh %s "
+ "disabled - primary interface not active\n",
+ net_dev->name);
+ else if (bat_priv->primary_if->if_status != IF_ACTIVE)
+ return 0;
+
+ rcu_read_lock();
+ hdr_len = sprintf(buff,
+ " %-14s (%s/%i) %17s [%10s]: %20s "
+ "... [B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%s (%s)]\n",
+ "Originator", "#", TQ_MAX_VALUE, "Nexthop", "outgoingIF",
+ "Potential nexthops", SOURCE_VERSION, REVISION_VERSION_STR,
+ bat_priv->primary_if->dev, bat_priv->primary_if->addr_str,
+ net_dev->name);
+ rcu_read_unlock();
+
+ if (off < hdr_len)
+ bytes_written = hdr_len;
+
+ spin_lock_irqsave(&orig_hash_lock, flags);
+
+ while (hash_iterate(orig_hash, &hashit)) {
+
+ orig_node = hashit.bucket->data;
+
+ if (!orig_node->router)
+ continue;
+
+ if (orig_node->router->tq_avg == 0)
+ continue;
+
+ /* estimated line length */
+ if (count < bytes_written + 200)
+ break;
+
+ addr_to_string(orig_str, orig_node->orig);
+ addr_to_string(router_str, orig_node->router->addr);
+
+ tmp_len = sprintf(buff + bytes_written,
+ "%-17s (%3i) %17s [%10s]:",
+ orig_str, orig_node->router->tq_avg,
+ router_str,
+ orig_node->router->if_incoming->dev);
+
+ list_for_each_entry(neigh_node, &orig_node->neigh_list, list) {
+ addr_to_string(orig_str, neigh_node->addr);
+ tmp_len += sprintf(buff + bytes_written + tmp_len,
+ " %17s (%3i)", orig_str,
+ neigh_node->tq_avg);
+ }
+
+ tmp_len += sprintf(buff + bytes_written + tmp_len, "\n");
+
+ batman_count++;
+ hdr_len += tmp_len;
+
+ if (off >= hdr_len)
+ continue;
+
+ bytes_written += tmp_len;
+ }
+
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
+
+ if ((batman_count == 0) && (off == 0))
+ bytes_written += sprintf(buff + bytes_written,
+ "No batman nodes in range ...\n");
+
+ return bytes_written;
+}
+
+static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
+{
+ void *data_ptr;
+
+ data_ptr = kmalloc(max_if_num * sizeof(TYPE_OF_WORD) * NUM_WORDS,
+ GFP_ATOMIC);
+ if (!data_ptr) {
+ printk(KERN_ERR
+ "batman-adv:Can't resize orig: out of memory\n");
+ return -1;
+ }
+
+ memcpy(data_ptr, orig_node->bcast_own,
+ (max_if_num - 1) * sizeof(TYPE_OF_WORD) * NUM_WORDS);
+ kfree(orig_node->bcast_own);
+ orig_node->bcast_own = data_ptr;
+
+ data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
+ if (!data_ptr) {
+ printk(KERN_ERR
+ "batman-adv:Can't resize orig: out of memory\n");
+ return -1;
+ }
+
+ memcpy(data_ptr, orig_node->bcast_own_sum,
+ (max_if_num - 1) * sizeof(uint8_t));
+ kfree(orig_node->bcast_own_sum);
+ orig_node->bcast_own_sum = data_ptr;
+
+ return 0;
+}
+
+int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
+{
+ struct orig_node *orig_node;
+ HASHIT(hashit);
+
+ /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
+ * if_num */
+ spin_lock(&orig_hash_lock);
+
+ while (hash_iterate(orig_hash, &hashit)) {
+ orig_node = hashit.bucket->data;
+
+ if (orig_node_add_if(orig_node, max_if_num) == -1)
+ goto err;
+ }
+
+ spin_unlock(&orig_hash_lock);
+ return 0;
+
+err:
+ spin_unlock(&orig_hash_lock);
+ return -ENOMEM;
+}
+
+static int orig_node_del_if(struct orig_node *orig_node,
+ int max_if_num, int del_if_num)
+{
+ void *data_ptr = NULL;
+ int chunk_size;
+
+ /* last interface was removed */
+ if (max_if_num == 0)
+ goto free_bcast_own;
+
+ chunk_size = sizeof(TYPE_OF_WORD) * NUM_WORDS;
+ data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
+ if (!data_ptr) {
+ printk(KERN_ERR
+ "batman-adv:Can't resize orig: out of memory\n");
+ return -1;
+ }
+
+ /* copy first part */
+ memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
+
+ /* copy second part */
+ memcpy(data_ptr,
+ orig_node->bcast_own + ((del_if_num + 1) * chunk_size),
+ (max_if_num - del_if_num) * chunk_size);
+
+free_bcast_own:
+ kfree(orig_node->bcast_own);
+ orig_node->bcast_own = data_ptr;
+
+ if (max_if_num == 0)
+ goto free_own_sum;
+
+ data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
+ if (!data_ptr) {
+ printk(KERN_ERR
+ "batman-adv:Can't resize orig: out of memory\n");
+ return -1;
+ }
+
+ memcpy(data_ptr, orig_node->bcast_own_sum,
+ del_if_num * sizeof(uint8_t));
+
+ memcpy(data_ptr,
+ orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)),
+ (max_if_num - del_if_num) * sizeof(uint8_t));
+
+free_own_sum:
+ kfree(orig_node->bcast_own_sum);
+ orig_node->bcast_own_sum = data_ptr;
+
+ return 0;
}
+int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
+{
+ struct batman_if *batman_if_tmp;
+ struct orig_node *orig_node;
+ HASHIT(hashit);
+ int ret;
+
+ /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
+ * if_num */
+ spin_lock(&orig_hash_lock);
+
+ while (hash_iterate(orig_hash, &hashit)) {
+ orig_node = hashit.bucket->data;
+
+ ret = orig_node_del_if(orig_node, max_if_num,
+ batman_if->if_num);
+
+ if (ret == -1)
+ goto err;
+ }
+
+ /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
+ rcu_read_lock();
+ list_for_each_entry_rcu(batman_if_tmp, &if_list, list) {
+ if (batman_if_tmp->if_status == IF_NOT_IN_USE)
+ continue;
+
+ if (batman_if == batman_if_tmp)
+ continue;
+ if (batman_if_tmp->if_num > batman_if->if_num)
+ batman_if_tmp->if_num--;
+ }
+ rcu_read_unlock();
+
+ batman_if->if_num = -1;
+ spin_unlock(&orig_hash_lock);
+ return 0;
+
+err:
+ spin_unlock(&orig_hash_lock);
+ return -ENOMEM;
+}
diff --git a/drivers/staging/batman-adv/originator.h b/drivers/staging/batman-adv/originator.h
index 6ef7a054a0a..afbc7c0e8aa 100644
--- a/drivers/staging/batman-adv/originator.h
+++ b/drivers/staging/batman-adv/originator.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -28,4 +28,7 @@ struct orig_node *get_orig_node(uint8_t *addr);
struct neigh_node *
create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node,
uint8_t *neigh, struct batman_if *if_incoming);
-
+ssize_t orig_fill_buffer_text(struct net_device *net_dev, char *buff,
+ size_t count, loff_t off);
+int orig_hash_add_if(struct batman_if *batman_if, int max_if_num);
+int orig_hash_del_if(struct batman_if *batman_if, int max_if_num);
diff --git a/drivers/staging/batman-adv/packet.h b/drivers/staging/batman-adv/packet.h
index ad006ce8b13..152f57b1c6c 100644
--- a/drivers/staging/batman-adv/packet.h
+++ b/drivers/staging/batman-adv/packet.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/drivers/staging/batman-adv/proc.c b/drivers/staging/batman-adv/proc.c
deleted file mode 100644
index 7de60e84bc9..00000000000
--- a/drivers/staging/batman-adv/proc.c
+++ /dev/null
@@ -1,670 +0,0 @@
-/*
- * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "proc.h"
-#include "routing.h"
-#include "translation-table.h"
-#include "hard-interface.h"
-#include "types.h"
-#include "hash.h"
-#include "vis.h"
-
-static struct proc_dir_entry *proc_batman_dir, *proc_interface_file;
-static struct proc_dir_entry *proc_orig_interval_file, *proc_originators_file;
-static struct proc_dir_entry *proc_transt_local_file;
-static struct proc_dir_entry *proc_transt_global_file;
-static struct proc_dir_entry *proc_vis_srv_file, *proc_vis_data_file;
-static struct proc_dir_entry *proc_aggr_file;
-
-static int proc_interfaces_read(struct seq_file *seq, void *offset)
-{
- struct batman_if *batman_if;
-
- rcu_read_lock();
- list_for_each_entry_rcu(batman_if, &if_list, list) {
- seq_printf(seq, "[%8s] %s %s \n",
- (batman_if->if_active == IF_ACTIVE ?
- "active" : "inactive"),
- batman_if->dev,
- (batman_if->if_active == IF_ACTIVE ?
- batman_if->addr_str : " "));
- }
- rcu_read_unlock();
-
- return 0;
-}
-
-static int proc_interfaces_open(struct inode *inode, struct file *file)
-{
- return single_open(file, proc_interfaces_read, NULL);
-}
-
-static ssize_t proc_interfaces_write(struct file *instance,
- const char __user *userbuffer,
- size_t count, loff_t *data)
-{
- char *if_string, *colon_ptr = NULL, *cr_ptr = NULL;
- int not_copied = 0, if_num = 0, add_success;
- struct batman_if *batman_if = NULL;
-
- if_string = kmalloc(count, GFP_KERNEL);
-
- if (!if_string)
- return -ENOMEM;
-
- if (count > IFNAMSIZ - 1) {
- printk(KERN_WARNING "batman-adv:Can't add interface: device name is too long\n");
- goto end;
- }
-
- not_copied = copy_from_user(if_string, userbuffer, count);
- if_string[count - not_copied - 1] = 0;
-
- colon_ptr = strchr(if_string, ':');
- if (colon_ptr)
- *colon_ptr = 0;
-
- if (!colon_ptr) {
- cr_ptr = strchr(if_string, '\n');
- if (cr_ptr)
- *cr_ptr = 0;
- }
-
- if (strlen(if_string) == 0) {
- shutdown_module();
- num_ifs = 0;
- goto end;
- }
-
- /* add interface */
- rcu_read_lock();
- list_for_each_entry_rcu(batman_if, &if_list, list) {
- if (strncmp(batman_if->dev, if_string, count) == 0) {
- printk(KERN_ERR "batman-adv:Given interface is already active: %s\n", if_string);
- rcu_read_unlock();
- goto end;
-
- }
-
- if_num++;
- }
- rcu_read_unlock();
-
- add_success = hardif_add_interface(if_string, if_num);
- if (add_success < 0)
- goto end;
-
- num_ifs = if_num + 1;
-
- if ((atomic_read(&module_state) == MODULE_INACTIVE) &&
- (hardif_get_active_if_num() > 0))
- activate_module();
-
- return count;
-end:
- kfree(if_string);
- return count;
-}
-
-static int proc_orig_interval_read(struct seq_file *seq, void *offset)
-{
- seq_printf(seq, "%i\n", atomic_read(&originator_interval));
-
- return 0;
-}
-
-static ssize_t proc_orig_interval_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *ppos)
-{
- char *interval_string;
- int not_copied = 0;
- unsigned long originator_interval_tmp;
- int retval;
-
- interval_string = kmalloc(count, GFP_KERNEL);
-
- if (!interval_string)
- return -ENOMEM;
-
- not_copied = copy_from_user(interval_string, buffer, count);
- interval_string[count - not_copied - 1] = 0;
-
- retval = strict_strtoul(interval_string, 10, &originator_interval_tmp);
- if (retval) {
- printk(KERN_ERR "batman-adv:New originator interval invalid\n");
- goto end;
- }
-
- if (originator_interval_tmp <= JITTER * 2) {
- printk(KERN_WARNING "batman-adv:New originator interval too small: %li (min: %i)\n",
- originator_interval_tmp, JITTER * 2);
- goto end;
- }
-
- printk(KERN_INFO "batman-adv:Changing originator interval from: %i to: %li\n",
- atomic_read(&originator_interval), originator_interval_tmp);
-
- atomic_set(&originator_interval, originator_interval_tmp);
-
-end:
- kfree(interval_string);
- return count;
-}
-
-static int proc_orig_interval_open(struct inode *inode, struct file *file)
-{
- return single_open(file, proc_orig_interval_read, NULL);
-}
-
-static int proc_originators_read(struct seq_file *seq, void *offset)
-{
- HASHIT(hashit);
- struct orig_node *orig_node;
- struct neigh_node *neigh_node;
- int batman_count = 0;
- char orig_str[ETH_STR_LEN], router_str[ETH_STR_LEN];
- unsigned long flags;
-
- rcu_read_lock();
- if (list_empty(&if_list)) {
- rcu_read_unlock();
- seq_printf(seq, "BATMAN disabled - please specify interfaces to enable it \n");
- goto end;
- }
-
- if (((struct batman_if *)if_list.next)->if_active != IF_ACTIVE) {
- rcu_read_unlock();
- seq_printf(seq, "BATMAN disabled - primary interface not active \n");
- goto end;
- }
-
- seq_printf(seq,
- " %-14s (%s/%i) %17s [%10s]: %20s ... [B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%s] \n",
- "Originator", "#", TQ_MAX_VALUE, "Nexthop", "outgoingIF",
- "Potential nexthops", SOURCE_VERSION, REVISION_VERSION_STR,
- ((struct batman_if *)if_list.next)->dev,
- ((struct batman_if *)if_list.next)->addr_str);
-
- rcu_read_unlock();
- spin_lock_irqsave(&orig_hash_lock, flags);
-
- while (hash_iterate(orig_hash, &hashit)) {
-
- orig_node = hashit.bucket->data;
-
- if (!orig_node->router)
- continue;
-
- if (orig_node->router->tq_avg == 0)
- continue;
-
- batman_count++;
-
- addr_to_string(orig_str, orig_node->orig);
- addr_to_string(router_str, orig_node->router->addr);
-
- seq_printf(seq, "%-17s (%3i) %17s [%10s]:",
- orig_str, orig_node->router->tq_avg,
- router_str, orig_node->router->if_incoming->dev);
-
- list_for_each_entry(neigh_node, &orig_node->neigh_list, list) {
- addr_to_string(orig_str, neigh_node->addr);
- seq_printf(seq, " %17s (%3i)",
- orig_str, neigh_node->tq_avg);
- }
-
- seq_printf(seq, "\n");
-
- }
-
- spin_unlock_irqrestore(&orig_hash_lock, flags);
-
- if (batman_count == 0)
- seq_printf(seq, "No batman nodes in range ... \n");
-
-end:
- return 0;
-}
-
-static int proc_originators_open(struct inode *inode, struct file *file)
-{
- return single_open(file, proc_originators_read, NULL);
-}
-
-static int proc_transt_local_read(struct seq_file *seq, void *offset)
-{
- char *buf;
-
- buf = kmalloc(4096, GFP_KERNEL);
- if (!buf)
- return 0;
-
- rcu_read_lock();
- if (list_empty(&if_list)) {
- rcu_read_unlock();
- seq_printf(seq, "BATMAN disabled - please specify interfaces to enable it \n");
- goto end;
- }
-
- rcu_read_unlock();
-
- seq_printf(seq, "Locally retrieved addresses (from %s) announced via HNA:\n", soft_device->name);
-
- hna_local_fill_buffer_text(buf, 4096);
- seq_printf(seq, "%s", buf);
-
-end:
- kfree(buf);
- return 0;
-}
-
-static int proc_transt_local_open(struct inode *inode, struct file *file)
-{
- return single_open(file, proc_transt_local_read, NULL);
-}
-
-static int proc_transt_global_read(struct seq_file *seq, void *offset)
-{
- char *buf;
-
- buf = kmalloc(4096, GFP_KERNEL);
- if (!buf)
- return 0;
-
- rcu_read_lock();
- if (list_empty(&if_list)) {
- rcu_read_unlock();
- seq_printf(seq, "BATMAN disabled - please specify interfaces to enable it \n");
- goto end;
- }
- rcu_read_unlock();
-
-
- seq_printf(seq, "Globally announced HNAs received via the mesh (translation table):\n");
-
- hna_global_fill_buffer_text(buf, 4096);
- seq_printf(seq, "%s", buf);
-
-end:
- kfree(buf);
- return 0;
-}
-
-static int proc_transt_global_open(struct inode *inode, struct file *file)
-{
- return single_open(file, proc_transt_global_read, NULL);
-}
-
-/* setting the mode of the vis server by the user */
-static ssize_t proc_vis_srv_write(struct file *file, const char __user * buffer,
- size_t count, loff_t *ppos)
-{
- char *vis_mode_string;
- int not_copied = 0;
-
- vis_mode_string = kmalloc(count, GFP_KERNEL);
-
- if (!vis_mode_string)
- return -ENOMEM;
-
- not_copied = copy_from_user(vis_mode_string, buffer, count);
- vis_mode_string[count - not_copied - 1] = 0;
-
- if ((strcmp(vis_mode_string, "client") == 0) ||
- (strcmp(vis_mode_string, "disabled") == 0)) {
- printk(KERN_INFO "batman-adv:Setting VIS mode to client (disabling vis server)\n");
- atomic_set(&vis_mode, VIS_TYPE_CLIENT_UPDATE);
- } else if ((strcmp(vis_mode_string, "server") == 0) ||
- (strcmp(vis_mode_string, "enabled") == 0)) {
- printk(KERN_INFO "batman-adv:Setting VIS mode to server (enabling vis server)\n");
- atomic_set(&vis_mode, VIS_TYPE_SERVER_SYNC);
- } else
- printk(KERN_ERR "batman-adv:Unknown VIS mode: %s\n",
- vis_mode_string);
-
- kfree(vis_mode_string);
- return count;
-}
-
-static int proc_vis_srv_read(struct seq_file *seq, void *offset)
-{
- int vis_server = atomic_read(&vis_mode);
-
- seq_printf(seq, "[%c] client mode (server disabled) \n",
- (vis_server == VIS_TYPE_CLIENT_UPDATE) ? 'x' : ' ');
- seq_printf(seq, "[%c] server mode (server enabled) \n",
- (vis_server == VIS_TYPE_SERVER_SYNC) ? 'x' : ' ');
-
- return 0;
-}
-
-static int proc_vis_srv_open(struct inode *inode, struct file *file)
-{
- return single_open(file, proc_vis_srv_read, NULL);
-}
-
-static int proc_vis_data_read(struct seq_file *seq, void *offset)
-{
- HASHIT(hashit);
- struct vis_info *info;
- struct vis_info_entry *entries;
- HLIST_HEAD(vis_if_list);
- int i;
- char tmp_addr_str[ETH_STR_LEN];
- unsigned long flags;
- int vis_server = atomic_read(&vis_mode);
-
- rcu_read_lock();
- if (list_empty(&if_list) || (vis_server == VIS_TYPE_CLIENT_UPDATE)) {
- rcu_read_unlock();
- goto end;
- }
-
- rcu_read_unlock();
-
- spin_lock_irqsave(&vis_hash_lock, flags);
- while (hash_iterate(vis_hash, &hashit)) {
- info = hashit.bucket->data;
- entries = (struct vis_info_entry *)
- ((char *)info + sizeof(struct vis_info));
- addr_to_string(tmp_addr_str, info->packet.vis_orig);
- seq_printf(seq, "%s,", tmp_addr_str);
-
- for (i = 0; i < info->packet.entries; i++) {
- proc_vis_read_entry(seq, &entries[i], &vis_if_list,
- info->packet.vis_orig);
- }
-
- /* add primary/secondary records */
- proc_vis_read_prim_sec(seq, &vis_if_list);
- seq_printf(seq, "\n");
- }
- spin_unlock_irqrestore(&vis_hash_lock, flags);
-
-end:
- return 0;
-}
-
-static int proc_vis_data_open(struct inode *inode, struct file *file)
-{
- return single_open(file, proc_vis_data_read, NULL);
-}
-
-static int proc_aggr_read(struct seq_file *seq, void *offset)
-{
- seq_printf(seq, "%i\n", atomic_read(&aggregation_enabled));
-
- return 0;
-}
-
-static ssize_t proc_aggr_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *ppos)
-{
- char *aggr_string;
- int not_copied = 0;
- unsigned long aggregation_enabled_tmp;
- int retval;
-
- aggr_string = kmalloc(count, GFP_KERNEL);
-
- if (!aggr_string)
- return -ENOMEM;
-
- not_copied = copy_from_user(aggr_string, buffer, count);
- aggr_string[count - not_copied - 1] = 0;
-
- retval = strict_strtoul(aggr_string, 10, &aggregation_enabled_tmp);
-
- if (retval || aggregation_enabled_tmp > 1) {
- printk(KERN_ERR "batman-adv:Aggregation can only be enabled (1) or disabled (0), given value: %li\n", aggregation_enabled_tmp);
- } else {
- printk(KERN_INFO "batman-adv:Changing aggregation from: %s (%i) to: %s (%li)\n",
- (atomic_read(&aggregation_enabled) == 1 ?
- "enabled" : "disabled"),
- atomic_read(&aggregation_enabled),
- (aggregation_enabled_tmp == 1 ? "enabled" : "disabled"),
- aggregation_enabled_tmp);
- atomic_set(&aggregation_enabled,
- (unsigned)aggregation_enabled_tmp);
- }
-
- kfree(aggr_string);
- return count;
-}
-
-static int proc_aggr_open(struct inode *inode, struct file *file)
-{
- return single_open(file, proc_aggr_read, NULL);
-}
-
-/* satisfying different prototypes ... */
-static ssize_t proc_dummy_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *ppos)
-{
- return count;
-}
-
-static const struct file_operations proc_aggr_fops = {
- .owner = THIS_MODULE,
- .open = proc_aggr_open,
- .read = seq_read,
- .write = proc_aggr_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static const struct file_operations proc_vis_srv_fops = {
- .owner = THIS_MODULE,
- .open = proc_vis_srv_open,
- .read = seq_read,
- .write = proc_vis_srv_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static const struct file_operations proc_vis_data_fops = {
- .owner = THIS_MODULE,
- .open = proc_vis_data_open,
- .read = seq_read,
- .write = proc_dummy_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static const struct file_operations proc_originators_fops = {
- .owner = THIS_MODULE,
- .open = proc_originators_open,
- .read = seq_read,
- .write = proc_dummy_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static const struct file_operations proc_transt_local_fops = {
- .owner = THIS_MODULE,
- .open = proc_transt_local_open,
- .read = seq_read,
- .write = proc_dummy_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static const struct file_operations proc_transt_global_fops = {
- .owner = THIS_MODULE,
- .open = proc_transt_global_open,
- .read = seq_read,
- .write = proc_dummy_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static const struct file_operations proc_interfaces_fops = {
- .owner = THIS_MODULE,
- .open = proc_interfaces_open,
- .read = seq_read,
- .write = proc_interfaces_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static const struct file_operations proc_orig_interval_fops = {
- .owner = THIS_MODULE,
- .open = proc_orig_interval_open,
- .read = seq_read,
- .write = proc_orig_interval_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-void cleanup_procfs(void)
-{
- if (proc_transt_global_file)
- remove_proc_entry(PROC_FILE_TRANST_GLOBAL, proc_batman_dir);
-
- if (proc_transt_local_file)
- remove_proc_entry(PROC_FILE_TRANST_LOCAL, proc_batman_dir);
-
- if (proc_originators_file)
- remove_proc_entry(PROC_FILE_ORIGINATORS, proc_batman_dir);
-
- if (proc_orig_interval_file)
- remove_proc_entry(PROC_FILE_ORIG_INTERVAL, proc_batman_dir);
-
- if (proc_interface_file)
- remove_proc_entry(PROC_FILE_INTERFACES, proc_batman_dir);
-
- if (proc_vis_data_file)
- remove_proc_entry(PROC_FILE_VIS_DATA, proc_batman_dir);
-
- if (proc_vis_srv_file)
- remove_proc_entry(PROC_FILE_VIS_SRV, proc_batman_dir);
-
- if (proc_aggr_file)
- remove_proc_entry(PROC_FILE_AGGR, proc_batman_dir);
-
- if (proc_batman_dir)
-#ifdef __NET_NET_NAMESPACE_H
- remove_proc_entry(PROC_ROOT_DIR, init_net.proc_net);
-#else
- remove_proc_entry(PROC_ROOT_DIR, proc_net);
-#endif
-}
-
-int setup_procfs(void)
-{
-#ifdef __NET_NET_NAMESPACE_H
- proc_batman_dir = proc_mkdir(PROC_ROOT_DIR, init_net.proc_net);
-#else
- proc_batman_dir = proc_mkdir(PROC_ROOT_DIR, proc_net);
-#endif
-
- if (!proc_batman_dir) {
- printk(KERN_ERR "batman-adv: Registering the '/proc/net/%s' folder failed\n", PROC_ROOT_DIR);
- return -EFAULT;
- }
-
- proc_interface_file = create_proc_entry(PROC_FILE_INTERFACES,
- S_IWUSR | S_IRUGO,
- proc_batman_dir);
- if (proc_interface_file) {
- proc_interface_file->proc_fops = &proc_interfaces_fops;
- } else {
- printk(KERN_ERR "batman-adv: Registering the '/proc/net/%s/%s' file failed\n", PROC_ROOT_DIR, PROC_FILE_INTERFACES);
- cleanup_procfs();
- return -EFAULT;
- }
-
- proc_orig_interval_file = create_proc_entry(PROC_FILE_ORIG_INTERVAL,
- S_IWUSR | S_IRUGO,
- proc_batman_dir);
- if (proc_orig_interval_file) {
- proc_orig_interval_file->proc_fops = &proc_orig_interval_fops;
- } else {
- printk(KERN_ERR "batman-adv: Registering the '/proc/net/%s/%s' file failed\n", PROC_ROOT_DIR, PROC_FILE_ORIG_INTERVAL);
- cleanup_procfs();
- return -EFAULT;
- }
-
- proc_originators_file = create_proc_entry(PROC_FILE_ORIGINATORS,
- S_IRUGO, proc_batman_dir);
- if (proc_originators_file) {
- proc_originators_file->proc_fops = &proc_originators_fops;
- } else {
- printk(KERN_ERR "batman-adv: Registering the '/proc/net/%s/%s' file failed\n", PROC_ROOT_DIR, PROC_FILE_ORIGINATORS);
- cleanup_procfs();
- return -EFAULT;
- }
-
- proc_transt_local_file = create_proc_entry(PROC_FILE_TRANST_LOCAL,
- S_IRUGO, proc_batman_dir);
- if (proc_transt_local_file) {
- proc_transt_local_file->proc_fops = &proc_transt_local_fops;
- } else {
- printk(KERN_ERR "batman-adv: Registering the '/proc/net/%s/%s' file failed\n", PROC_ROOT_DIR, PROC_FILE_TRANST_LOCAL);
- cleanup_procfs();
- return -EFAULT;
- }
-
- proc_transt_global_file = create_proc_entry(PROC_FILE_TRANST_GLOBAL,
- S_IRUGO, proc_batman_dir);
- if (proc_transt_global_file) {
- proc_transt_global_file->proc_fops = &proc_transt_global_fops;
- } else {
- printk(KERN_ERR "batman-adv: Registering the '/proc/net/%s/%s' file failed\n", PROC_ROOT_DIR, PROC_FILE_TRANST_GLOBAL);
- cleanup_procfs();
- return -EFAULT;
- }
-
- proc_vis_srv_file = create_proc_entry(PROC_FILE_VIS_SRV,
- S_IWUSR | S_IRUGO,
- proc_batman_dir);
- if (proc_vis_srv_file) {
- proc_vis_srv_file->proc_fops = &proc_vis_srv_fops;
- } else {
- printk(KERN_ERR "batman-adv: Registering the '/proc/net/%s/%s' file failed\n", PROC_ROOT_DIR, PROC_FILE_VIS_SRV);
- cleanup_procfs();
- return -EFAULT;
- }
-
- proc_vis_data_file = create_proc_entry(PROC_FILE_VIS_DATA, S_IRUGO,
- proc_batman_dir);
- if (proc_vis_data_file) {
- proc_vis_data_file->proc_fops = &proc_vis_data_fops;
- } else {
- printk(KERN_ERR "batman-adv: Registering the '/proc/net/%s/%s' file failed\n", PROC_ROOT_DIR, PROC_FILE_VIS_DATA);
- cleanup_procfs();
- return -EFAULT;
- }
-
- proc_aggr_file = create_proc_entry(PROC_FILE_AGGR, S_IWUSR | S_IRUGO,
- proc_batman_dir);
- if (proc_aggr_file) {
- proc_aggr_file->proc_fops = &proc_aggr_fops;
- } else {
- printk(KERN_ERR "batman-adv: Registering the '/proc/net/%s/%s' file failed\n", PROC_ROOT_DIR, PROC_FILE_AGGR);
- cleanup_procfs();
- return -EFAULT;
- }
-
- return 0;
-}
diff --git a/drivers/staging/batman-adv/proc.h b/drivers/staging/batman-adv/proc.h
deleted file mode 100644
index cd690e0f3e4..00000000000
--- a/drivers/staging/batman-adv/proc.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-
-#define PROC_ROOT_DIR "batman-adv"
-#define PROC_FILE_INTERFACES "interfaces"
-#define PROC_FILE_ORIG_INTERVAL "orig_interval"
-#define PROC_FILE_ORIGINATORS "originators"
-#define PROC_FILE_GATEWAYS "gateways"
-#define PROC_FILE_LOG "log"
-#define PROC_FILE_LOG_LEVEL "log_level"
-#define PROC_FILE_TRANST_LOCAL "transtable_local"
-#define PROC_FILE_TRANST_GLOBAL "transtable_global"
-#define PROC_FILE_VIS_SRV "vis_server"
-#define PROC_FILE_VIS_DATA "vis_data"
-#define PROC_FILE_AGGR "aggregate_ogm"
-
-void cleanup_procfs(void);
-int setup_procfs(void);
-
diff --git a/drivers/staging/batman-adv/ring_buffer.c b/drivers/staging/batman-adv/ring_buffer.c
index 751c899f54c..defd37c9be1 100644
--- a/drivers/staging/batman-adv/ring_buffer.c
+++ b/drivers/staging/batman-adv/ring_buffer.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/drivers/staging/batman-adv/ring_buffer.h b/drivers/staging/batman-adv/ring_buffer.h
index 6839ba97eeb..b8c9456558b 100644
--- a/drivers/staging/batman-adv/ring_buffer.h
+++ b/drivers/staging/batman-adv/ring_buffer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/drivers/staging/batman-adv/routing.c b/drivers/staging/batman-adv/routing.c
index d89048beebe..066dc8b3881 100644
--- a/drivers/staging/batman-adv/routing.c
+++ b/drivers/staging/batman-adv/routing.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -94,14 +94,13 @@ static void update_route(struct orig_node *orig_node,
/* route changed */
} else {
- bat_dbg(DBG_ROUTES, "Changing route towards: %pM (now via %pM - was via %pM)\n", orig_node->orig, neigh_node->addr, orig_node->router->addr);
+ bat_dbg(DBG_ROUTES,
+ "Changing route towards: %pM "
+ "(now via %pM - was via %pM)\n",
+ orig_node->orig, neigh_node->addr,
+ orig_node->router->addr);
}
- if (neigh_node != NULL)
- orig_node->batman_if = neigh_node->if_incoming;
- else
- orig_node->batman_if = NULL;
-
orig_node->router = neigh_node;
}
@@ -210,9 +209,13 @@ static int isBidirectionalNeigh(struct orig_node *orig_node,
batman_packet->tq = ((batman_packet->tq *
orig_neigh_node->tq_own *
orig_neigh_node->tq_asym_penalty) /
- (TQ_MAX_VALUE * TQ_MAX_VALUE));
+ (TQ_MAX_VALUE * TQ_MAX_VALUE));
- bat_dbg(DBG_BATMAN, "bidirectional: orig = %-15pM neigh = %-15pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, total tq: %3i \n",
+ bat_dbg(DBG_BATMAN,
+ "bidirectional: "
+ "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
+ "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
+ "total tq: %3i\n",
orig_node->orig, orig_neigh_node->orig, total_count,
neigh_node->real_packet_count, orig_neigh_node->tq_own,
orig_neigh_node->tq_asym_penalty, batman_packet->tq);
@@ -234,7 +237,8 @@ static void update_orig(struct orig_node *orig_node, struct ethhdr *ethhdr,
struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
int tmp_hna_buff_len;
- bat_dbg(DBG_BATMAN, "update_originator(): Searching and updating originator entry of received packet \n");
+ bat_dbg(DBG_BATMAN, "update_originator(): "
+ "Searching and updating originator entry of received packet\n");
list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
@@ -309,6 +313,38 @@ update_hna:
update_routes(orig_node, orig_node->router, hna_buff, tmp_hna_buff_len);
}
+/* checks whether the host restarted and is in the protection time.
+ * returns:
+ * 0 if the packet is to be accepted
+ * 1 if the packet is to be ignored.
+ */
+static int window_protected(int16_t seq_num_diff,
+ unsigned long *last_reset)
+{
+ if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
+ || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
+ if (time_after(jiffies, *last_reset +
+ msecs_to_jiffies(RESET_PROTECTION_MS))) {
+
+ *last_reset = jiffies;
+ bat_dbg(DBG_BATMAN,
+ "old packet received, start protection\n");
+
+ return 0;
+ } else
+ return 1;
+ }
+ return 0;
+}
+
+/* processes a batman packet for all interfaces, adjusts the sequence number and
+ * finds out whether it is a duplicate.
+ * returns:
+ * 1 the packet is a duplicate
+ * 0 the packet has not yet been received
+ * -1 the packet is old and has been received while the seqno window
+ * was protected. Caller should drop it.
+ */
static char count_real_packets(struct ethhdr *ethhdr,
struct batman_packet *batman_packet,
struct batman_if *if_incoming)
@@ -316,32 +352,42 @@ static char count_real_packets(struct ethhdr *ethhdr,
struct orig_node *orig_node;
struct neigh_node *tmp_neigh_node;
char is_duplicate = 0;
- uint16_t seq_diff;
+ int16_t seq_diff;
+ int need_update = 0;
+ int set_mark;
orig_node = get_orig_node(batman_packet->orig);
if (orig_node == NULL)
return 0;
+ seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
+
+ /* signalize caller that the packet is to be dropped. */
+ if (window_protected(seq_diff, &orig_node->batman_seqno_reset))
+ return -1;
+
list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
- if (!is_duplicate)
- is_duplicate =
- get_bit_status(tmp_neigh_node->real_bits,
+ is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
orig_node->last_real_seqno,
batman_packet->seqno);
- seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
+
if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
(tmp_neigh_node->if_incoming == if_incoming))
- bit_get_packet(tmp_neigh_node->real_bits, seq_diff, 1);
+ set_mark = 1;
else
- bit_get_packet(tmp_neigh_node->real_bits, seq_diff, 0);
+ set_mark = 0;
+
+ /* if the window moved, set the update flag. */
+ need_update |= bit_get_packet(tmp_neigh_node->real_bits,
+ seq_diff, set_mark);
tmp_neigh_node->real_packet_count =
bit_packet_count(tmp_neigh_node->real_bits);
}
- if (!is_duplicate) {
- bat_dbg(DBG_BATMAN, "updating last_seqno: old %d, new %d \n",
+ if (need_update) {
+ bat_dbg(DBG_BATMAN, "updating last_seqno: old %d, new %d\n",
orig_node->last_real_seqno, batman_packet->seqno);
orig_node->last_real_seqno = batman_packet->seqno;
}
@@ -385,14 +431,16 @@ void receive_bat_packet(struct ethhdr *ethhdr,
is_single_hop_neigh = (compare_orig(ethhdr->h_source,
batman_packet->orig) ? 1 : 0);
- bat_dbg(DBG_BATMAN, "Received BATMAN packet via NB: %pM, IF: %s [%s] (from OG: %pM, via prev OG: %pM, seqno %d, tq %d, TTL %d, V %d, IDF %d) \n",
+ bat_dbg(DBG_BATMAN, "Received BATMAN packet via NB: %pM, IF: %s [%s] "
+ "(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, "
+ "TTL %d, V %d, IDF %d)\n",
ethhdr->h_source, if_incoming->dev, if_incoming->addr_str,
batman_packet->orig, batman_packet->prev_sender,
batman_packet->seqno, batman_packet->tq, batman_packet->ttl,
batman_packet->version, has_directlink_flag);
list_for_each_entry_rcu(batman_if, &if_list, list) {
- if (batman_if->if_active != IF_ACTIVE)
+ if (batman_if->if_status != IF_ACTIVE)
continue;
if (compare_orig(ethhdr->h_source,
@@ -420,13 +468,16 @@ void receive_bat_packet(struct ethhdr *ethhdr,
if (is_my_addr) {
bat_dbg(DBG_BATMAN,
- "Drop packet: received my own broadcast (sender: %pM)\n",
+ "Drop packet: received my own broadcast (sender: %pM"
+ ")\n",
ethhdr->h_source);
return;
}
if (is_broadcast) {
- bat_dbg(DBG_BATMAN, "Drop packet: ignoring all packets with broadcast source addr (sender: %pM) \n", ethhdr->h_source);
+ bat_dbg(DBG_BATMAN, "Drop packet: "
+ "ignoring all packets with broadcast source addr (sender: %pM"
+ ")\n", ethhdr->h_source);
return;
}
@@ -454,27 +505,36 @@ void receive_bat_packet(struct ethhdr *ethhdr,
bit_packet_count(word);
}
- bat_dbg(DBG_BATMAN, "Drop packet: originator packet from myself (via neighbor) \n");
+ bat_dbg(DBG_BATMAN, "Drop packet: "
+ "originator packet from myself (via neighbor)\n");
return;
}
- if (batman_packet->tq == 0) {
- count_real_packets(ethhdr, batman_packet, if_incoming);
-
- bat_dbg(DBG_BATMAN, "Drop packet: originator packet with tq equal 0 \n");
+ if (is_my_oldorig) {
+ bat_dbg(DBG_BATMAN,
+ "Drop packet: ignoring all rebroadcast echos (sender: "
+ "%pM)\n", ethhdr->h_source);
return;
}
- if (is_my_oldorig) {
- bat_dbg(DBG_BATMAN, "Drop packet: ignoring all rebroadcast echos (sender: %pM) \n", ethhdr->h_source);
+ orig_node = get_orig_node(batman_packet->orig);
+ if (orig_node == NULL)
return;
- }
is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
- orig_node = get_orig_node(batman_packet->orig);
- if (orig_node == NULL)
+ if (is_duplicate == -1) {
+ bat_dbg(DBG_BATMAN,
+ "Drop packet: packet within seqno protection time "
+ "(sender: %pM)\n", ethhdr->h_source);
+ return;
+ }
+
+ if (batman_packet->tq == 0) {
+ bat_dbg(DBG_BATMAN,
+ "Drop packet: originator packet with tq equal 0\n");
return;
+ }
/* avoid temporary routing loops */
if ((orig_node->router) &&
@@ -484,7 +544,9 @@ void receive_bat_packet(struct ethhdr *ethhdr,
!(compare_orig(batman_packet->orig, batman_packet->prev_sender)) &&
(compare_orig(orig_node->router->addr,
orig_node->router->orig_node->router->addr))) {
- bat_dbg(DBG_BATMAN, "Drop packet: ignoring all rebroadcast packets that may make me loop (sender: %pM) \n", ethhdr->h_source);
+ bat_dbg(DBG_BATMAN,
+ "Drop packet: ignoring all rebroadcast packets that "
+ "may make me loop (sender: %pM)\n", ethhdr->h_source);
return;
}
@@ -522,7 +584,8 @@ void receive_bat_packet(struct ethhdr *ethhdr,
schedule_forward_packet(orig_node, ethhdr, batman_packet,
1, hna_buff_len, if_incoming);
- bat_dbg(DBG_BATMAN, "Forwarding packet: rebroadcast neighbor packet with direct link flag\n");
+ bat_dbg(DBG_BATMAN, "Forwarding packet: "
+ "rebroadcast neighbor packet with direct link flag\n");
return;
}
@@ -549,6 +612,7 @@ int recv_bat_packet(struct sk_buff *skb,
{
struct ethhdr *ethhdr;
unsigned long flags;
+ struct sk_buff *skb_old;
/* drop packet if it has not necessary minimum size */
if (skb_headlen(skb) < sizeof(struct batman_packet))
@@ -564,12 +628,20 @@ int recv_bat_packet(struct sk_buff *skb,
if (is_bcast(ethhdr->h_source))
return NET_RX_DROP;
- spin_lock_irqsave(&orig_hash_lock, flags);
/* TODO: we use headlen instead of "length", because
* only this data is paged in. */
- /* TODO: is another skb_copy needed here? there will be
- * written on the data, but nobody (?) should further use
- * this data */
+
+ /* create a copy of the skb, if needed, to modify it. */
+ if (!skb_clone_writable(skb, skb_headlen(skb))) {
+ skb_old = skb;
+ skb = skb_copy(skb, GFP_ATOMIC);
+ if (!skb)
+ return NET_RX_DROP;
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
+ kfree_skb(skb_old);
+ }
+
+ spin_lock_irqsave(&orig_hash_lock, flags);
receive_aggr_bat_packet(ethhdr,
skb->data,
skb_headlen(skb),
@@ -591,8 +663,8 @@ static int recv_my_icmp_packet(struct sk_buff *skb)
unsigned long flags;
uint8_t dstaddr[ETH_ALEN];
- icmp_packet = (struct icmp_packet *) skb->data;
- ethhdr = (struct ethhdr *) skb_mac_header(skb);
+ icmp_packet = (struct icmp_packet *)skb->data;
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* add data to device queue */
if (icmp_packet->msg_type != ECHO_REQUEST) {
@@ -608,12 +680,11 @@ static int recv_my_icmp_packet(struct sk_buff *skb)
ret = NET_RX_DROP;
if ((orig_node != NULL) &&
- (orig_node->batman_if != NULL) &&
(orig_node->router != NULL)) {
/* don't lock while sending the packets ... we therefore
* copy the required data before sending */
- batman_if = orig_node->batman_if;
+ batman_if = orig_node->router->if_incoming;
memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
spin_unlock_irqrestore(&orig_hash_lock, flags);
@@ -624,7 +695,9 @@ static int recv_my_icmp_packet(struct sk_buff *skb)
skb = skb_copy(skb, GFP_ATOMIC);
if (!skb)
return NET_RX_DROP;
- icmp_packet = (struct icmp_packet *) skb->data;
+
+ icmp_packet = (struct icmp_packet *)skb->data;
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
kfree_skb(skb_old);
}
@@ -658,8 +731,10 @@ static int recv_icmp_ttl_exceeded(struct sk_buff *skb)
/* send TTL exceeded if packet is an echo request (traceroute) */
if (icmp_packet->msg_type != ECHO_REQUEST) {
- printk(KERN_WARNING "batman-adv:Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
- icmp_packet->orig, icmp_packet->dst);
+ printk(KERN_WARNING "batman-adv:"
+ "Warning - can't forward icmp packet from %pM to %pM: "
+ "ttl exceeded\n",
+ icmp_packet->orig, icmp_packet->dst);
return NET_RX_DROP;
}
@@ -670,12 +745,11 @@ static int recv_icmp_ttl_exceeded(struct sk_buff *skb)
ret = NET_RX_DROP;
if ((orig_node != NULL) &&
- (orig_node->batman_if != NULL) &&
(orig_node->router != NULL)) {
/* don't lock while sending the packets ... we therefore
* copy the required data before sending */
- batman_if = orig_node->batman_if;
+ batman_if = orig_node->router->if_incoming;
memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
spin_unlock_irqrestore(&orig_hash_lock, flags);
@@ -686,6 +760,7 @@ static int recv_icmp_ttl_exceeded(struct sk_buff *skb)
if (!skb)
return NET_RX_DROP;
icmp_packet = (struct icmp_packet *) skb->data;
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
kfree_skb(skb_old);
}
@@ -734,7 +809,7 @@ int recv_icmp_packet(struct sk_buff *skb)
if (!is_my_mac(ethhdr->h_dest))
return NET_RX_DROP;
- icmp_packet = (struct icmp_packet *) skb->data;
+ icmp_packet = (struct icmp_packet *)skb->data;
/* packet for me */
if (is_my_mac(icmp_packet->dst))
@@ -752,12 +827,11 @@ int recv_icmp_packet(struct sk_buff *skb)
hash_find(orig_hash, icmp_packet->dst));
if ((orig_node != NULL) &&
- (orig_node->batman_if != NULL) &&
(orig_node->router != NULL)) {
/* don't lock while sending the packets ... we therefore
* copy the required data before sending */
- batman_if = orig_node->batman_if;
+ batman_if = orig_node->router->if_incoming;
memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
spin_unlock_irqrestore(&orig_hash_lock, flags);
@@ -767,7 +841,8 @@ int recv_icmp_packet(struct sk_buff *skb)
skb = skb_copy(skb, GFP_ATOMIC);
if (!skb)
return NET_RX_DROP;
- icmp_packet = (struct icmp_packet *) skb->data;
+ icmp_packet = (struct icmp_packet *)skb->data;
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
kfree_skb(skb_old);
}
@@ -824,7 +899,9 @@ int recv_unicast_packet(struct sk_buff *skb)
/* TTL exceeded */
if (unicast_packet->ttl < 2) {
- printk(KERN_WARNING "batman-adv:Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n",
+ printk(KERN_WARNING "batman-adv:Warning - "
+ "can't forward unicast packet from %pM to %pM: "
+ "ttl exceeded\n",
ethhdr->h_source, unicast_packet->dest);
return NET_RX_DROP;
}
@@ -836,12 +913,11 @@ int recv_unicast_packet(struct sk_buff *skb)
hash_find(orig_hash, unicast_packet->dest));
if ((orig_node != NULL) &&
- (orig_node->batman_if != NULL) &&
(orig_node->router != NULL)) {
/* don't lock while sending the packets ... we therefore
* copy the required data before sending */
- batman_if = orig_node->batman_if;
+ batman_if = orig_node->router->if_incoming;
memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
spin_unlock_irqrestore(&orig_hash_lock, flags);
@@ -851,7 +927,8 @@ int recv_unicast_packet(struct sk_buff *skb)
skb = skb_copy(skb, GFP_ATOMIC);
if (!skb)
return NET_RX_DROP;
- unicast_packet = (struct unicast_packet *) skb->data;
+ unicast_packet = (struct unicast_packet *)skb->data;
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
kfree_skb(skb_old);
}
/* decrement ttl */
@@ -867,13 +944,13 @@ int recv_unicast_packet(struct sk_buff *skb)
return ret;
}
-
int recv_bcast_packet(struct sk_buff *skb)
{
struct orig_node *orig_node;
struct bcast_packet *bcast_packet;
struct ethhdr *ethhdr;
int hdr_size = sizeof(struct bcast_packet);
+ int16_t seq_diff;
unsigned long flags;
/* drop packet if it has not necessary minimum size */
@@ -894,7 +971,7 @@ int recv_bcast_packet(struct sk_buff *skb)
if (is_my_mac(ethhdr->h_source))
return NET_RX_DROP;
- bcast_packet = (struct bcast_packet *) skb->data;
+ bcast_packet = (struct bcast_packet *)skb->data;
/* ignore broadcasts originated by myself */
if (is_my_mac(bcast_packet->orig))
@@ -909,7 +986,7 @@ int recv_bcast_packet(struct sk_buff *skb)
return NET_RX_DROP;
}
- /* check flood history */
+ /* check whether the packet is a duplicate */
if (get_bit_status(orig_node->bcast_bits,
orig_node->last_bcast_seqno,
ntohs(bcast_packet->seqno))) {
@@ -917,14 +994,20 @@ int recv_bcast_packet(struct sk_buff *skb)
return NET_RX_DROP;
}
- /* mark broadcast in flood history */
- if (bit_get_packet(orig_node->bcast_bits,
- ntohs(bcast_packet->seqno) -
- orig_node->last_bcast_seqno, 1))
+ seq_diff = ntohs(bcast_packet->seqno) - orig_node->last_bcast_seqno;
+
+ /* check whether the packet is old and the host just restarted. */
+ if (window_protected(seq_diff, &orig_node->bcast_seqno_reset)) {
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
+ return NET_RX_DROP;
+ }
+
+ /* mark broadcast in flood history, update window position
+ * if required. */
+ if (bit_get_packet(orig_node->bcast_bits, seq_diff, 1))
orig_node->last_bcast_seqno = ntohs(bcast_packet->seqno);
spin_unlock_irqrestore(&orig_hash_lock, flags);
-
/* rebroadcast packet */
add_bcast_packet_to_list(skb);
@@ -938,6 +1021,7 @@ int recv_vis_packet(struct sk_buff *skb)
{
struct vis_packet *vis_packet;
struct ethhdr *ethhdr;
+ struct bat_priv *bat_priv;
int hdr_size = sizeof(struct vis_packet);
if (skb_headlen(skb) < hdr_size)
@@ -957,15 +1041,20 @@ int recv_vis_packet(struct sk_buff *skb)
if (is_my_mac(vis_packet->sender_orig))
return NET_RX_DROP;
+ /* FIXME: each batman_if will be attached to a softif */
+ bat_priv = netdev_priv(soft_device);
+
switch (vis_packet->vis_type) {
case VIS_TYPE_SERVER_SYNC:
/* TODO: handle fragmented skbs properly */
- receive_server_sync_packet(vis_packet, skb_headlen(skb));
+ receive_server_sync_packet(bat_priv, vis_packet,
+ skb_headlen(skb));
break;
case VIS_TYPE_CLIENT_UPDATE:
/* TODO: handle fragmented skbs properly */
- receive_client_update_packet(vis_packet, skb_headlen(skb));
+ receive_client_update_packet(bat_priv, vis_packet,
+ skb_headlen(skb));
break;
default: /* ignore unknown packet */
diff --git a/drivers/staging/batman-adv/routing.h b/drivers/staging/batman-adv/routing.h
index 939b8d4f733..8288decea37 100644
--- a/drivers/staging/batman-adv/routing.h
+++ b/drivers/staging/batman-adv/routing.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/drivers/staging/batman-adv/send.c b/drivers/staging/batman-adv/send.c
index 2a9fac8c240..d8536e277a2 100644
--- a/drivers/staging/batman-adv/send.c
+++ b/drivers/staging/batman-adv/send.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -36,25 +36,17 @@ static uint8_t hop_penalty(const uint8_t tq)
}
/* when do we schedule our own packet to be sent */
-static unsigned long own_send_time(void)
+static unsigned long own_send_time(struct bat_priv *bat_priv)
{
return jiffies +
- (((atomic_read(&originator_interval) - JITTER +
+ (((atomic_read(&bat_priv->orig_interval) - JITTER +
(random32() % 2*JITTER)) * HZ) / 1000);
}
/* when do we schedule a forwarded packet to be sent */
-static unsigned long forward_send_time(void)
+static unsigned long forward_send_time(struct bat_priv *bat_priv)
{
- unsigned long send_time = jiffies; /* Starting now plus... */
-
- if (atomic_read(&aggregation_enabled))
- send_time += (((MAX_AGGREGATION_MS - (JITTER/2) +
- (random32() % JITTER)) * HZ) / 1000);
- else
- send_time += (((random32() % (JITTER/2)) * HZ) / 1000);
-
- return send_time;
+ return jiffies + (((random32() % (JITTER/2)) * HZ) / 1000);
}
/* send out an already prepared packet to the given address via the
@@ -65,7 +57,7 @@ int send_skb_packet(struct sk_buff *skb,
{
struct ethhdr *ethhdr;
- if (batman_if->if_active != IF_ACTIVE)
+ if (batman_if->if_status != IF_ACTIVE)
goto send_skb_err;
if (unlikely(!batman_if->net_dev))
@@ -73,7 +65,8 @@ int send_skb_packet(struct sk_buff *skb,
if (!(batman_if->net_dev->flags & IFF_UP)) {
printk(KERN_WARNING
- "batman-adv:Interface %s is not up - can't send packet via that interface!\n",
+ "batman-adv:Interface %s "
+ "is not up - can't send packet via that interface!\n",
batman_if->dev);
goto send_skb_err;
}
@@ -131,10 +124,11 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
int16_t buff_pos;
struct batman_packet *batman_packet;
- if (batman_if->if_active != IF_ACTIVE)
+ if (batman_if->if_status != IF_ACTIVE)
return;
- packet_num = buff_pos = 0;
+ packet_num = 0;
+ buff_pos = 0;
batman_packet = (struct batman_packet *)
(forw_packet->packet_buff);
@@ -155,9 +149,9 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
"Sending own" :
"Forwarding"));
bat_dbg(DBG_BATMAN,
- "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d, IDF %s) on interface %s [%s]\n",
- fwd_str,
- (packet_num > 0 ? "aggregated " : ""),
+ "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
+ " IDF %s) on interface %s [%s]\n",
+ fwd_str, (packet_num > 0 ? "aggregated " : ""),
batman_packet->orig, ntohs(batman_packet->seqno),
batman_packet->tq, batman_packet->ttl,
(batman_packet->flags & DIRECTLINK ?
@@ -185,11 +179,12 @@ static void send_packet(struct forw_packet *forw_packet)
unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
if (!forw_packet->if_incoming) {
- printk(KERN_ERR "batman-adv: Error - can't forward packet: incoming iface not specified\n");
+ printk(KERN_ERR "batman-adv: Error - can't forward packet: "
+ "incoming iface not specified\n");
return;
}
- if (forw_packet->if_incoming->if_active != IF_ACTIVE)
+ if (forw_packet->if_incoming->if_status != IF_ACTIVE)
return;
/* multihomed peer assumed */
@@ -199,7 +194,8 @@ static void send_packet(struct forw_packet *forw_packet)
/* FIXME: what about aggregated packets ? */
bat_dbg(DBG_BATMAN,
- "%s packet (originator %pM, seqno %d, TTL %d) on interface %s [%s]\n",
+ "%s packet (originator %pM, seqno %d, TTL %d) "
+ "on interface %s [%s]\n",
(forw_packet->own ? "Sending own" : "Forwarding"),
batman_packet->orig, ntohs(batman_packet->seqno),
batman_packet->ttl, forw_packet->if_incoming->dev,
@@ -246,9 +242,17 @@ static void rebuild_batman_packet(struct batman_if *batman_if)
void schedule_own_packet(struct batman_if *batman_if)
{
+ /* FIXME: each batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
unsigned long send_time;
struct batman_packet *batman_packet;
- int vis_server = atomic_read(&vis_mode);
+ int vis_server;
+
+ if ((batman_if->if_status == IF_NOT_IN_USE) ||
+ (batman_if->if_status == IF_TO_BE_REMOVED))
+ return;
+
+ vis_server = atomic_read(&bat_priv->vis_mode);
/**
* the interface gets activated here to avoid race conditions between
@@ -257,11 +261,12 @@ void schedule_own_packet(struct batman_if *batman_if)
* outdated packets (especially uninitialized mac addresses) in the
* packet queue
*/
- if (batman_if->if_active == IF_TO_BE_ACTIVATED)
- batman_if->if_active = IF_ACTIVE;
+ if (batman_if->if_status == IF_TO_BE_ACTIVATED)
+ batman_if->if_status = IF_ACTIVE;
/* if local hna has changed and interface is a primary interface */
- if ((atomic_read(&hna_local_changed)) && (batman_if->if_num == 0))
+ if ((atomic_read(&hna_local_changed)) &&
+ (batman_if == bat_priv->primary_if))
rebuild_batman_packet(batman_if);
/**
@@ -276,15 +281,17 @@ void schedule_own_packet(struct batman_if *batman_if)
if (vis_server == VIS_TYPE_SERVER_SYNC)
batman_packet->flags = VIS_SERVER;
else
- batman_packet->flags = 0;
+ batman_packet->flags &= ~VIS_SERVER;
/* could be read by receive_bat_packet() */
atomic_inc(&batman_if->seqno);
slide_own_bcast_window(batman_if);
- send_time = own_send_time();
- add_bat_packet_to_list(batman_if->packet_buff,
- batman_if->packet_len, batman_if, 1, send_time);
+ send_time = own_send_time(bat_priv);
+ add_bat_packet_to_list(bat_priv,
+ batman_if->packet_buff,
+ batman_if->packet_len,
+ batman_if, 1, send_time);
}
void schedule_forward_packet(struct orig_node *orig_node,
@@ -293,11 +300,13 @@ void schedule_forward_packet(struct orig_node *orig_node,
uint8_t directlink, int hna_buff_len,
struct batman_if *if_incoming)
{
+ /* FIXME: each batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
unsigned char in_tq, in_ttl, tq_avg = 0;
unsigned long send_time;
if (batman_packet->ttl <= 1) {
- bat_dbg(DBG_BATMAN, "ttl exceeded \n");
+ bat_dbg(DBG_BATMAN, "ttl exceeded\n");
return;
}
@@ -316,7 +325,8 @@ void schedule_forward_packet(struct orig_node *orig_node,
batman_packet->tq = orig_node->router->tq_avg;
if (orig_node->router->last_ttl)
- batman_packet->ttl = orig_node->router->last_ttl - 1;
+ batman_packet->ttl = orig_node->router->last_ttl
+ - 1;
}
tq_avg = orig_node->router->tq_avg;
@@ -325,7 +335,8 @@ void schedule_forward_packet(struct orig_node *orig_node,
/* apply hop penalty */
batman_packet->tq = hop_penalty(batman_packet->tq);
- bat_dbg(DBG_BATMAN, "Forwarding packet: tq_orig: %i, tq_avg: %i, tq_forw: %i, ttl_orig: %i, ttl_forw: %i \n",
+ bat_dbg(DBG_BATMAN, "Forwarding packet: tq_orig: %i, tq_avg: %i, "
+ "tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
batman_packet->ttl);
@@ -336,8 +347,9 @@ void schedule_forward_packet(struct orig_node *orig_node,
else
batman_packet->flags &= ~DIRECTLINK;
- send_time = forward_send_time();
- add_bat_packet_to_list((unsigned char *)batman_packet,
+ send_time = forward_send_time(bat_priv);
+ add_bat_packet_to_list(bat_priv,
+ (unsigned char *)batman_packet,
sizeof(struct batman_packet) + hna_buff_len,
if_incoming, 0, send_time);
}
@@ -368,19 +380,32 @@ static void _add_bcast_packet_to_list(struct forw_packet *forw_packet,
send_time);
}
-void add_bcast_packet_to_list(struct sk_buff *skb)
+#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
+/* add a broadcast packet to the queue and setup timers. broadcast packets
+ * are sent multiple times to increase probability for beeing received.
+ *
+ * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
+ * errors.
+ *
+ * The skb is not consumed, so the caller should make sure that the
+ * skb is freed. */
+int add_bcast_packet_to_list(struct sk_buff *skb)
{
struct forw_packet *forw_packet;
+ if (!atomic_dec_not_zero(&bcast_queue_left)) {
+ bat_dbg(DBG_BATMAN, "bcast packet queue full\n");
+ goto out;
+ }
+
forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
+
if (!forw_packet)
- return;
+ goto out_and_inc;
skb = skb_copy(skb, GFP_ATOMIC);
- if (!skb) {
- kfree(forw_packet);
- return;
- }
+ if (!skb)
+ goto packet_free;
skb_reset_mac_header(skb);
@@ -391,6 +416,14 @@ void add_bcast_packet_to_list(struct sk_buff *skb)
forw_packet->num_packets = 0;
_add_bcast_packet_to_list(forw_packet, 1);
+ return NETDEV_TX_OK;
+
+packet_free:
+ kfree(forw_packet);
+out_and_inc:
+ atomic_inc(&bcast_queue_left);
+out:
+ return NETDEV_TX_BUSY;
}
void send_outstanding_bcast_packet(struct work_struct *work)
@@ -425,8 +458,10 @@ void send_outstanding_bcast_packet(struct work_struct *work)
if ((forw_packet->num_packets < 3) &&
(atomic_read(&module_state) != MODULE_DEACTIVATING))
_add_bcast_packet_to_list(forw_packet, ((5 * HZ) / 1000));
- else
+ else {
forw_packet_free(forw_packet);
+ atomic_inc(&bcast_queue_left);
+ }
}
void send_outstanding_bat_packet(struct work_struct *work)
@@ -452,22 +487,38 @@ void send_outstanding_bat_packet(struct work_struct *work)
(atomic_read(&module_state) != MODULE_DEACTIVATING))
schedule_own_packet(forw_packet->if_incoming);
+ /* don't count own packet */
+ if (!forw_packet->own)
+ atomic_inc(&batman_queue_left);
+
forw_packet_free(forw_packet);
}
-void purge_outstanding_packets(void)
+void purge_outstanding_packets(struct batman_if *batman_if)
{
struct forw_packet *forw_packet;
struct hlist_node *tmp_node, *safe_tmp_node;
unsigned long flags;
- bat_dbg(DBG_BATMAN, "purge_outstanding_packets()\n");
+ if (batman_if)
+ bat_dbg(DBG_BATMAN, "purge_outstanding_packets(): %s\n",
+ batman_if->dev);
+ else
+ bat_dbg(DBG_BATMAN, "purge_outstanding_packets()\n");
/* free bcast list */
spin_lock_irqsave(&forw_bcast_list_lock, flags);
hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
&forw_bcast_list, list) {
+ /**
+ * if purge_outstanding_packets() was called with an argmument
+ * we delete only packets belonging to the given interface
+ */
+ if ((batman_if) &&
+ (forw_packet->if_incoming != batman_if))
+ continue;
+
spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
/**
@@ -484,6 +535,14 @@ void purge_outstanding_packets(void)
hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
&forw_bat_list, list) {
+ /**
+ * if purge_outstanding_packets() was called with an argmument
+ * we delete only packets belonging to the given interface
+ */
+ if ((batman_if) &&
+ (forw_packet->if_incoming != batman_if))
+ continue;
+
spin_unlock_irqrestore(&forw_bat_list_lock, flags);
/**
diff --git a/drivers/staging/batman-adv/send.h b/drivers/staging/batman-adv/send.h
index 5fc6f3417cb..feaa2fc7f9a 100644
--- a/drivers/staging/batman-adv/send.h
+++ b/drivers/staging/batman-adv/send.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -33,7 +33,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
struct batman_packet *batman_packet,
uint8_t directlink, int hna_buff_len,
struct batman_if *if_outgoing);
-void add_bcast_packet_to_list(struct sk_buff *skb);
+int add_bcast_packet_to_list(struct sk_buff *skb);
void send_outstanding_bcast_packet(struct work_struct *work);
void send_outstanding_bat_packet(struct work_struct *work);
-void purge_outstanding_packets(void);
+void purge_outstanding_packets(struct batman_if *batman_if);
diff --git a/drivers/staging/batman-adv/soft-interface.c b/drivers/staging/batman-adv/soft-interface.c
index 0e2307f3cb9..51c40b77c8d 100644
--- a/drivers/staging/batman-adv/soft-interface.c
+++ b/drivers/staging/batman-adv/soft-interface.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -152,9 +152,13 @@ int interface_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- hna_local_remove(dev->dev_addr, "mac address changed");
+ /* only modify hna-table if it has been initialised before */
+ if (atomic_read(&module_state) == MODULE_ACTIVE) {
+ hna_local_remove(dev->dev_addr, "mac address changed");
+ hna_local_add(addr->sa_data);
+ }
+
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
- hna_local_add(dev->dev_addr);
return 0;
}
@@ -178,6 +182,7 @@ int interface_tx(struct sk_buff *skb, struct net_device *dev)
struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
struct bat_priv *priv = netdev_priv(dev);
struct batman_if *batman_if;
+ struct bat_priv *bat_priv;
uint8_t dstaddr[6];
int data_len = skb->len;
unsigned long flags;
@@ -185,6 +190,9 @@ int interface_tx(struct sk_buff *skb, struct net_device *dev)
if (atomic_read(&module_state) != MODULE_ACTIVE)
goto dropped;
+ /* FIXME: each batman_if will be attached to a softif */
+ bat_priv = netdev_priv(soft_device);
+
dev->trans_start = jiffies;
/* TODO: check this for locks */
hna_local_add(ethhdr->h_source);
@@ -208,10 +216,10 @@ int interface_tx(struct sk_buff *skb, struct net_device *dev)
/* set broadcast sequence number */
bcast_packet->seqno = htons(bcast_seqno);
- bcast_seqno++;
+ /* broadcast packet. on success, increase seqno. */
+ if (add_bcast_packet_to_list(skb) == NETDEV_TX_OK)
+ bcast_seqno++;
- /* broadcast packet */
- add_bcast_packet_to_list(skb);
/* a copy is stored in the bcast list, therefore removing
* the original skb. */
kfree_skb(skb);
@@ -228,8 +236,9 @@ int interface_tx(struct sk_buff *skb, struct net_device *dev)
orig_node = transtable_search(ethhdr->h_dest);
if ((orig_node) &&
- (orig_node->batman_if) &&
(orig_node->router)) {
+ struct neigh_node *router = orig_node->router;
+
if (my_skb_push(skb, sizeof(struct unicast_packet)) < 0)
goto unlock;
@@ -244,14 +253,14 @@ int interface_tx(struct sk_buff *skb, struct net_device *dev)
memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
/* net_dev won't be available when not active */
- if (orig_node->batman_if->if_active != IF_ACTIVE)
+ if (router->if_incoming->if_status != IF_ACTIVE)
goto unlock;
/* don't lock while sending the packets ... we therefore
* copy the required data before sending */
- batman_if = orig_node->batman_if;
- memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
+ batman_if = router->if_incoming;
+ memcpy(dstaddr, router->addr, ETH_ALEN);
spin_unlock_irqrestore(&orig_hash_lock, flags);
send_skb_packet(skb, batman_if, dstaddr);
@@ -268,6 +277,7 @@ unlock:
spin_unlock_irqrestore(&orig_hash_lock, flags);
dropped:
priv->stats.tx_dropped++;
+ kfree_skb(skb);
end:
return NETDEV_TX_OK;
}
diff --git a/drivers/staging/batman-adv/soft-interface.h b/drivers/staging/batman-adv/soft-interface.h
index c0cad8134b2..e7f59af7df3 100644
--- a/drivers/staging/batman-adv/soft-interface.h
+++ b/drivers/staging/batman-adv/soft-interface.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/drivers/staging/batman-adv/translation-table.c b/drivers/staging/batman-adv/translation-table.c
index d56f6654de0..e01ff2151f7 100644
--- a/drivers/staging/batman-adv/translation-table.c
+++ b/drivers/staging/batman-adv/translation-table.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -77,11 +77,14 @@ void hna_local_add(uint8_t *addr)
MAC-flooding. */
if ((num_hna + 1 > (ETH_DATA_LEN - BAT_PACKET_LEN) / ETH_ALEN) ||
(num_hna + 1 > 255)) {
- bat_dbg(DBG_ROUTES, "Can't add new local hna entry (%pM): number of local hna entries exceeds packet size \n", addr);
+ bat_dbg(DBG_ROUTES,
+ "Can't add new local hna entry (%pM): "
+ "number of local hna entries exceeds packet size\n",
+ addr);
return;
}
- bat_dbg(DBG_ROUTES, "Creating new local hna entry: %pM \n",
+ bat_dbg(DBG_ROUTES, "Creating new local hna entry: %pM\n",
addr);
hna_local_entry = kmalloc(sizeof(struct hna_local_entry), GFP_ATOMIC);
@@ -108,7 +111,8 @@ void hna_local_add(uint8_t *addr)
hna_local_hash->size * 2);
if (swaphash == NULL)
- printk(KERN_ERR "batman-adv:Couldn't resize local hna hash table \n");
+ printk(KERN_ERR "batman-adv:"
+ "Couldn't resize local hna hash table\n");
else
hna_local_hash = swaphash;
}
@@ -156,24 +160,49 @@ int hna_local_fill_buffer(unsigned char *buff, int buff_len)
return i;
}
-int hna_local_fill_buffer_text(unsigned char *buff, int buff_len)
+int hna_local_fill_buffer_text(struct net_device *net_dev, char *buff,
+ size_t count, loff_t off)
{
+ struct bat_priv *bat_priv = netdev_priv(net_dev);
struct hna_local_entry *hna_local_entry;
HASHIT(hashit);
int bytes_written = 0;
unsigned long flags;
+ size_t hdr_len;
+
+ if (!bat_priv->primary_if) {
+ if (off == 0)
+ return sprintf(buff,
+ "BATMAN mesh %s disabled - "
+ "please specify interfaces to enable it\n",
+ net_dev->name);
+
+ return 0;
+ }
+
+ hdr_len = sprintf(buff,
+ "Locally retrieved addresses (from %s) "
+ "announced via HNA:\n",
+ net_dev->name);
+
+ if (off < hdr_len)
+ bytes_written = hdr_len;
spin_lock_irqsave(&hna_local_hash_lock, flags);
while (hash_iterate(hna_local_hash, &hashit)) {
+ hdr_len += 21;
- if (buff_len < bytes_written + ETH_STR_LEN + 4)
+ if (count < bytes_written + 22)
break;
+ if (off >= hdr_len)
+ continue;
+
hna_local_entry = hashit.bucket->data;
- bytes_written += snprintf(buff + bytes_written, ETH_STR_LEN + 4,
- " * %02x:%02x:%02x:%02x:%02x:%02x\n",
+ bytes_written += snprintf(buff + bytes_written, 22,
+ " * " MAC_FMT "\n",
hna_local_entry->addr[0],
hna_local_entry->addr[1],
hna_local_entry->addr[2],
@@ -183,7 +212,6 @@ int hna_local_fill_buffer_text(unsigned char *buff, int buff_len)
}
spin_unlock_irqrestore(&hna_local_hash_lock, flags);
-
return bytes_written;
}
@@ -197,7 +225,7 @@ static void _hna_local_del(void *data)
static void hna_local_del(struct hna_local_entry *hna_local_entry,
char *message)
{
- bat_dbg(DBG_ROUTES, "Deleting local hna entry (%pM): %s \n",
+ bat_dbg(DBG_ROUTES, "Deleting local hna entry (%pM): %s\n",
hna_local_entry->addr, message);
hash_remove(hna_local_hash, hna_local_entry->addr);
@@ -295,7 +323,8 @@ void hna_global_add_orig(struct orig_node *orig_node,
memcpy(hna_global_entry->addr, hna_ptr, ETH_ALEN);
bat_dbg(DBG_ROUTES,
- "Creating new global hna entry: %pM (via %pM)\n",
+ "Creating new global hna entry: "
+ "%pM (via %pM)\n",
hna_global_entry->addr, orig_node->orig);
spin_lock_irqsave(&hna_global_hash_lock, flags);
@@ -340,7 +369,8 @@ void hna_global_add_orig(struct orig_node *orig_node,
hna_global_hash->size * 2);
if (swaphash == NULL)
- printk(KERN_ERR "batman-adv:Couldn't resize global hna hash table \n");
+ printk(KERN_ERR "batman-adv:"
+ "Couldn't resize global hna hash table\n");
else
hna_global_hash = swaphash;
}
@@ -348,24 +378,49 @@ void hna_global_add_orig(struct orig_node *orig_node,
spin_unlock_irqrestore(&hna_global_hash_lock, flags);
}
-int hna_global_fill_buffer_text(unsigned char *buff, int buff_len)
+int hna_global_fill_buffer_text(struct net_device *net_dev, char *buff,
+ size_t count, loff_t off)
{
+ struct bat_priv *bat_priv = netdev_priv(net_dev);
struct hna_global_entry *hna_global_entry;
HASHIT(hashit);
int bytes_written = 0;
unsigned long flags;
+ size_t hdr_len;
+
+ if (!bat_priv->primary_if) {
+ if (off == 0)
+ return sprintf(buff,
+ "BATMAN mesh %s disabled - "
+ "please specify interfaces to enable it\n",
+ net_dev->name);
+
+ return 0;
+ }
+
+ hdr_len = sprintf(buff,
+ "Globally announced HNAs received via the mesh %s "
+ "(translation table):\n",
+ net_dev->name);
+
+ if (off < hdr_len)
+ bytes_written = hdr_len;
spin_lock_irqsave(&hna_global_hash_lock, flags);
while (hash_iterate(hna_global_hash, &hashit)) {
- if (buff_len < bytes_written + (2 * ETH_STR_LEN) + 10)
+ hdr_len += 43;
+
+ if (count < bytes_written + 44)
break;
+ if (off >= hdr_len)
+ continue;
+
hna_global_entry = hashit.bucket->data;
- bytes_written += snprintf(buff + bytes_written,
- (2 * ETH_STR_LEN) + 10,
- " * %02x:%02x:%02x:%02x:%02x:%02x via %02x:%02x:%02x:%02x:%02x:%02x \n",
+ bytes_written += snprintf(buff + bytes_written, 44,
+ " * " MAC_FMT " via " MAC_FMT "\n",
hna_global_entry->addr[0],
hna_global_entry->addr[1],
hna_global_entry->addr[2],
@@ -381,14 +436,13 @@ int hna_global_fill_buffer_text(unsigned char *buff, int buff_len)
}
spin_unlock_irqrestore(&hna_global_hash_lock, flags);
-
return bytes_written;
}
void _hna_global_del_orig(struct hna_global_entry *hna_global_entry,
char *message)
{
- bat_dbg(DBG_ROUTES, "Deleting global hna entry %pM (via %pM): %s \n",
+ bat_dbg(DBG_ROUTES, "Deleting global hna entry %pM (via %pM): %s\n",
hna_global_entry->addr, hna_global_entry->orig_node->orig,
message);
diff --git a/drivers/staging/batman-adv/translation-table.h b/drivers/staging/batman-adv/translation-table.h
index 281125b729f..8f412fca87f 100644
--- a/drivers/staging/batman-adv/translation-table.h
+++ b/drivers/staging/batman-adv/translation-table.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -25,13 +25,15 @@ int hna_local_init(void);
void hna_local_add(uint8_t *addr);
void hna_local_remove(uint8_t *addr, char *message);
int hna_local_fill_buffer(unsigned char *buff, int buff_len);
-int hna_local_fill_buffer_text(unsigned char *buff, int buff_len);
+int hna_local_fill_buffer_text(struct net_device *net_dev, char *buff,
+ size_t count, loff_t off);
void hna_local_purge(struct work_struct *work);
void hna_local_free(void);
int hna_global_init(void);
void hna_global_add_orig(struct orig_node *orig_node, unsigned char *hna_buff,
int hna_buff_len);
-int hna_global_fill_buffer_text(unsigned char *buff, int buff_len);
+int hna_global_fill_buffer_text(struct net_device *net_dev, char *buff,
+ size_t count, loff_t off);
void _hna_global_del_orig(struct hna_global_entry *hna_global_entry,
char *orig_str);
void hna_global_del_orig(struct orig_node *orig_node, char *message);
diff --git a/drivers/staging/batman-adv/types.h b/drivers/staging/batman-adv/types.h
index dec1b54031b..86007c7eb44 100644
--- a/drivers/staging/batman-adv/types.h
+++ b/drivers/staging/batman-adv/types.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -29,43 +29,61 @@
#include "packet.h"
#include "bitarray.h"
-#define BAT_HEADER_LEN (sizeof(struct ethhdr) + ((sizeof(struct unicast_packet) > sizeof(struct bcast_packet) ? sizeof(struct unicast_packet) : sizeof(struct bcast_packet))))
+#define BAT_HEADER_LEN (sizeof(struct ethhdr) + \
+ ((sizeof(struct unicast_packet) > sizeof(struct bcast_packet) ? \
+ sizeof(struct unicast_packet) : \
+ sizeof(struct bcast_packet))))
struct batman_if {
struct list_head list;
int16_t if_num;
char *dev;
- char if_active;
+ char if_status;
char addr_str[ETH_STR_LEN];
struct net_device *net_dev;
atomic_t seqno;
unsigned char *packet_buff;
int packet_len;
+ struct kobject *hardif_obj;
struct rcu_head rcu;
};
-struct orig_node { /* structure for orig_list maintaining nodes of mesh */
+/**
+ * orig_node - structure for orig_list maintaining nodes of mesh
+ * @last_valid: when last packet from this node was received
+ * @bcast_seqno_reset: time when the broadcast seqno window was reset
+ * @batman_seqno_reset: time when the batman seqno window was reset
+ * @flags: for now only VIS_SERVER flag
+ * @last_real_seqno: last and best known squence number
+ * @last_ttl: ttl of last received packet
+ * @last_bcast_seqno: last broadcast sequence number received by this host
+ */
+struct orig_node {
uint8_t orig[ETH_ALEN];
struct neigh_node *router;
- struct batman_if *batman_if;
TYPE_OF_WORD *bcast_own;
uint8_t *bcast_own_sum;
uint8_t tq_own;
int tq_asym_penalty;
- unsigned long last_valid; /* when last packet from this node was received */
-/* uint8_t gwflags; * flags related to gateway functions: gateway class */
- uint8_t flags; /* for now only VIS_SERVER flag. */
+ unsigned long last_valid;
+ unsigned long bcast_seqno_reset;
+ unsigned long batman_seqno_reset;
+ uint8_t flags;
unsigned char *hna_buff;
int16_t hna_buff_len;
- uint16_t last_real_seqno; /* last and best known squence number */
- uint8_t last_ttl; /* ttl of last received packet */
+ uint16_t last_real_seqno;
+ uint8_t last_ttl;
TYPE_OF_WORD bcast_bits[NUM_WORDS];
- uint16_t last_bcast_seqno; /* last broadcast sequence number received by this host */
+ uint16_t last_bcast_seqno;
struct list_head neigh_list;
};
+/**
+ * neigh_node
+ * @last_valid: when last packet via this neighbor was received
+ */
struct neigh_node {
struct list_head list;
uint8_t addr[ETH_ALEN];
@@ -74,7 +92,7 @@ struct neigh_node {
uint8_t tq_index;
uint8_t tq_avg;
uint8_t last_ttl;
- unsigned long last_valid; /* when last packet via this neighbor was received */
+ unsigned long last_valid;
TYPE_OF_WORD real_bits[NUM_WORDS];
struct orig_node *orig_node;
struct batman_if *if_incoming;
@@ -82,6 +100,12 @@ struct neigh_node {
struct bat_priv {
struct net_device_stats stats;
+ atomic_t aggregation_enabled;
+ atomic_t vis_mode;
+ atomic_t orig_interval;
+ char num_ifaces;
+ struct batman_if *primary_if;
+ struct kobject *mesh_obj;
};
struct device_client {
@@ -108,7 +132,11 @@ struct hna_global_entry {
struct orig_node *orig_node;
};
-struct forw_packet { /* structure for forw_list maintaining packets to be send/forwarded */
+/**
+ * forw_packet - structure for forw_list maintaining packets to be
+ * send/forwarded
+ */
+struct forw_packet {
struct hlist_node list;
unsigned long send_time;
uint8_t own;
diff --git a/drivers/staging/batman-adv/vis.c b/drivers/staging/batman-adv/vis.c
index fedec1bb309..1d3d954847f 100644
--- a/drivers/staging/batman-adv/vis.c
+++ b/drivers/staging/batman-adv/vis.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008-2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2008-2010 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich
*
@@ -27,24 +27,44 @@
#include "hard-interface.h"
#include "hash.h"
+/* Returns the smallest signed integer in two's complement with the sizeof x */
+#define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
+
+/* Checks if a sequence number x is a predecessor/successor of y.
+ they handle overflows/underflows and can correctly check for a
+ predecessor/successor unless the variable sequence number has grown by
+ more then 2**(bitwidth(x)-1)-1.
+ This means that for a uint8_t with the maximum value 255, it would think:
+ * when adding nothing - it is neither a predecessor nor a successor
+ * before adding more than 127 to the starting value - it is a predecessor,
+ * when adding 128 - it is neither a predecessor nor a successor,
+ * after adding more than 127 to the starting value - it is a successor */
+#define seq_before(x, y) ({typeof(x) _dummy = (x - y); \
+ _dummy > smallest_signed_int(_dummy); })
+#define seq_after(x, y) seq_before(y, x)
+
struct hashtable_t *vis_hash;
DEFINE_SPINLOCK(vis_hash_lock);
+static DEFINE_SPINLOCK(recv_list_lock);
static struct vis_info *my_vis_info;
static struct list_head send_list; /* always locked with vis_hash_lock */
static void start_vis_timer(void);
/* free the info */
-static void free_info(void *data)
+static void free_info(struct kref *ref)
{
- struct vis_info *info = data;
+ struct vis_info *info = container_of(ref, struct vis_info, refcount);
struct recvlist_node *entry, *tmp;
+ unsigned long flags;
list_del_init(&info->send_list);
+ spin_lock_irqsave(&recv_list_lock, flags);
list_for_each_entry_safe(entry, tmp, &info->recv_list, list) {
list_del(&entry->list);
kfree(entry);
}
+ spin_unlock_irqrestore(&recv_list_lock, flags);
kfree(info);
}
@@ -82,7 +102,7 @@ static int vis_info_choose(void *data, int size)
/* insert interface to the list of interfaces of one originator, if it
* does not already exist in the list */
-static void proc_vis_insert_interface(const uint8_t *interface,
+static void vis_data_insert_interface(const uint8_t *interface,
struct hlist_head *if_list,
bool primary)
{
@@ -103,42 +123,135 @@ static void proc_vis_insert_interface(const uint8_t *interface,
hlist_add_head(&entry->list, if_list);
}
-void proc_vis_read_prim_sec(struct seq_file *seq,
- struct hlist_head *if_list)
+static ssize_t vis_data_read_prim_sec(char *buff, struct hlist_head *if_list)
{
struct if_list_entry *entry;
- struct hlist_node *pos, *n;
+ struct hlist_node *pos;
char tmp_addr_str[ETH_STR_LEN];
+ size_t len = 0;
- hlist_for_each_entry_safe(entry, pos, n, if_list, list) {
- if (entry->primary) {
- seq_printf(seq, "PRIMARY, ");
- } else {
+ hlist_for_each_entry(entry, pos, if_list, list) {
+ if (entry->primary)
+ len += sprintf(buff + len, "PRIMARY, ");
+ else {
addr_to_string(tmp_addr_str, entry->addr);
- seq_printf(seq, "SEC %s, ", tmp_addr_str);
+ len += sprintf(buff + len, "SEC %s, ", tmp_addr_str);
}
-
- hlist_del(&entry->list);
- kfree(entry);
}
+
+ return len;
}
/* read an entry */
-void proc_vis_read_entry(struct seq_file *seq,
- struct vis_info_entry *entry,
- struct hlist_head *if_list,
- uint8_t *vis_orig)
+static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
+ uint8_t *src, bool primary)
{
char to[40];
addr_to_string(to, entry->dest);
- if (entry->quality == 0) {
- proc_vis_insert_interface(vis_orig, if_list, true);
- seq_printf(seq, "HNA %s, ", to);
- } else {
- proc_vis_insert_interface(entry->src, if_list,
- compare_orig(entry->src, vis_orig));
- seq_printf(seq, "TQ %s %d, ", to, entry->quality);
+ if (primary && entry->quality == 0)
+ return sprintf(buff, "HNA %s, ", to);
+ else if (compare_orig(entry->src, src))
+ return sprintf(buff, "TQ %s %d, ", to, entry->quality);
+
+ return 0;
+}
+
+ssize_t vis_fill_buffer_text(struct net_device *net_dev, char *buff,
+ size_t count, loff_t off)
+{
+ HASHIT(hashit);
+ struct vis_info *info;
+ struct vis_info_entry *entries;
+ struct bat_priv *bat_priv = netdev_priv(net_dev);
+ HLIST_HEAD(vis_if_list);
+ struct if_list_entry *entry;
+ struct hlist_node *pos, *n;
+ size_t hdr_len, tmp_len;
+ int i, bytes_written = 0;
+ char tmp_addr_str[ETH_STR_LEN];
+ unsigned long flags;
+ int vis_server = atomic_read(&bat_priv->vis_mode);
+
+ if ((!bat_priv->primary_if) ||
+ (vis_server == VIS_TYPE_CLIENT_UPDATE))
+ return 0;
+
+ hdr_len = 0;
+
+ spin_lock_irqsave(&vis_hash_lock, flags);
+ while (hash_iterate(vis_hash, &hashit)) {
+ info = hashit.bucket->data;
+ entries = (struct vis_info_entry *)
+ ((char *)info + sizeof(struct vis_info));
+
+ /* estimated line length */
+ if (count < bytes_written + 200)
+ break;
+
+ for (i = 0; i < info->packet.entries; i++) {
+ if (entries[i].quality == 0)
+ continue;
+ vis_data_insert_interface(entries[i].src, &vis_if_list,
+ compare_orig(entries[i].src,
+ info->packet.vis_orig));
+ }
+
+ hlist_for_each_entry(entry, pos, &vis_if_list, list) {
+ addr_to_string(tmp_addr_str, entry->addr);
+ tmp_len = sprintf(buff + bytes_written,
+ "%s,", tmp_addr_str);
+
+ for (i = 0; i < info->packet.entries; i++)
+ tmp_len += vis_data_read_entry(
+ buff + bytes_written + tmp_len,
+ &entries[i], entry->addr,
+ entry->primary);
+
+ /* add primary/secondary records */
+ if (compare_orig(entry->addr, info->packet.vis_orig))
+ tmp_len += vis_data_read_prim_sec(
+ buff + bytes_written + tmp_len,
+ &vis_if_list);
+
+ tmp_len += sprintf(buff + bytes_written + tmp_len,
+ "\n");
+
+ hdr_len += tmp_len;
+
+ if (off >= hdr_len)
+ continue;
+
+ bytes_written += tmp_len;
+ }
+
+ hlist_for_each_entry_safe(entry, pos, n, &vis_if_list, list) {
+ hlist_del(&entry->list);
+ kfree(entry);
+ }
+ }
+ spin_unlock_irqrestore(&vis_hash_lock, flags);
+
+ return bytes_written;
+}
+
+/* add the info packet to the send list, if it was not
+ * already linked in. */
+static void send_list_add(struct vis_info *info)
+{
+ if (list_empty(&info->send_list)) {
+ kref_get(&info->refcount);
+ list_add_tail(&info->send_list, &send_list);
+ }
+}
+
+/* delete the info packet from the send list, if it was
+ * linked in. */
+static void send_list_del(struct vis_info *info)
+{
+ if (!list_empty(&info->send_list)) {
+ list_del_init(&info->send_list);
+ kref_put(&info->refcount, free_info);
}
}
@@ -146,32 +259,41 @@ void proc_vis_read_entry(struct seq_file *seq,
static void recv_list_add(struct list_head *recv_list, char *mac)
{
struct recvlist_node *entry;
+ unsigned long flags;
+
entry = kmalloc(sizeof(struct recvlist_node), GFP_ATOMIC);
if (!entry)
return;
memcpy(entry->mac, mac, ETH_ALEN);
+ spin_lock_irqsave(&recv_list_lock, flags);
list_add_tail(&entry->list, recv_list);
+ spin_unlock_irqrestore(&recv_list_lock, flags);
}
/* returns 1 if this mac is in the recv_list */
static int recv_list_is_in(struct list_head *recv_list, char *mac)
{
struct recvlist_node *entry;
+ unsigned long flags;
+ spin_lock_irqsave(&recv_list_lock, flags);
list_for_each_entry(entry, recv_list, list) {
- if (memcmp(entry->mac, mac, ETH_ALEN) == 0)
+ if (memcmp(entry->mac, mac, ETH_ALEN) == 0) {
+ spin_unlock_irqrestore(&recv_list_lock, flags);
return 1;
+ }
}
-
+ spin_unlock_irqrestore(&recv_list_lock, flags);
return 0;
}
/* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old,
- * broken.. ). vis hash must be locked outside. is_new is set when the packet
+ * broken.. ). vis hash must be locked outside. is_new is set when the packet
* is newer than old entries in the hash. */
static struct vis_info *add_packet(struct vis_packet *vis_packet,
- int vis_info_len, int *is_new)
+ int vis_info_len, int *is_new,
+ int make_broadcast)
{
struct vis_info *info, *old_info;
struct vis_info search_elem;
@@ -186,7 +308,7 @@ static struct vis_info *add_packet(struct vis_packet *vis_packet,
old_info = hash_find(vis_hash, &search_elem);
if (old_info != NULL) {
- if (vis_packet->seqno - old_info->packet.seqno <= 0) {
+ if (!seq_after(vis_packet->seqno, old_info->packet.seqno)) {
if (old_info->packet.seqno == vis_packet->seqno) {
recv_list_add(&old_info->recv_list,
vis_packet->sender_orig);
@@ -198,13 +320,15 @@ static struct vis_info *add_packet(struct vis_packet *vis_packet,
}
/* remove old entry */
hash_remove(vis_hash, old_info);
- free_info(old_info);
+ send_list_del(old_info);
+ kref_put(&old_info->refcount, free_info);
}
info = kmalloc(sizeof(struct vis_info) + vis_info_len, GFP_ATOMIC);
if (info == NULL)
return NULL;
+ kref_init(&info->refcount);
INIT_LIST_HEAD(&info->send_list);
INIT_LIST_HEAD(&info->recv_list);
info->first_seen = jiffies;
@@ -214,16 +338,21 @@ static struct vis_info *add_packet(struct vis_packet *vis_packet,
/* initialize and add new packet. */
*is_new = 1;
+ /* Make it a broadcast packet, if required */
+ if (make_broadcast)
+ memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN);
+
/* repair if entries is longer than packet. */
if (info->packet.entries * sizeof(struct vis_info_entry) > vis_info_len)
- info->packet.entries = vis_info_len / sizeof(struct vis_info_entry);
+ info->packet.entries = vis_info_len /
+ sizeof(struct vis_info_entry);
recv_list_add(&info->recv_list, info->packet.sender_orig);
/* try to add it */
if (hash_add(vis_hash, info) < 0) {
/* did not work (for some reason) */
- free_info(info);
+ kref_put(&old_info->refcount, free_info);
info = NULL;
}
@@ -231,62 +360,65 @@ static struct vis_info *add_packet(struct vis_packet *vis_packet,
}
/* handle the server sync packet, forward if needed. */
-void receive_server_sync_packet(struct vis_packet *vis_packet, int vis_info_len)
+void receive_server_sync_packet(struct bat_priv *bat_priv,
+ struct vis_packet *vis_packet,
+ int vis_info_len)
{
struct vis_info *info;
- int is_new;
+ int is_new, make_broadcast;
unsigned long flags;
- int vis_server = atomic_read(&vis_mode);
+ int vis_server = atomic_read(&bat_priv->vis_mode);
+
+ make_broadcast = (vis_server == VIS_TYPE_SERVER_SYNC);
spin_lock_irqsave(&vis_hash_lock, flags);
- info = add_packet(vis_packet, vis_info_len, &is_new);
+ info = add_packet(vis_packet, vis_info_len, &is_new, make_broadcast);
if (info == NULL)
goto end;
/* only if we are server ourselves and packet is newer than the one in
* hash.*/
- if (vis_server == VIS_TYPE_SERVER_SYNC && is_new) {
- memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN);
- if (list_empty(&info->send_list))
- list_add_tail(&info->send_list, &send_list);
- }
+ if (vis_server == VIS_TYPE_SERVER_SYNC && is_new)
+ send_list_add(info);
end:
spin_unlock_irqrestore(&vis_hash_lock, flags);
}
/* handle an incoming client update packet and schedule forward if needed. */
-void receive_client_update_packet(struct vis_packet *vis_packet,
+void receive_client_update_packet(struct bat_priv *bat_priv,
+ struct vis_packet *vis_packet,
int vis_info_len)
{
struct vis_info *info;
int is_new;
unsigned long flags;
- int vis_server = atomic_read(&vis_mode);
+ int vis_server = atomic_read(&bat_priv->vis_mode);
+ int are_target = 0;
/* clients shall not broadcast. */
if (is_bcast(vis_packet->target_orig))
return;
+ /* Are we the target for this VIS packet? */
+ if (vis_server == VIS_TYPE_SERVER_SYNC &&
+ is_my_mac(vis_packet->target_orig))
+ are_target = 1;
+
spin_lock_irqsave(&vis_hash_lock, flags);
- info = add_packet(vis_packet, vis_info_len, &is_new);
+ info = add_packet(vis_packet, vis_info_len, &is_new, are_target);
if (info == NULL)
goto end;
/* note that outdated packets will be dropped at this point. */
/* send only if we're the target server or ... */
- if (vis_server == VIS_TYPE_SERVER_SYNC &&
- is_my_mac(info->packet.target_orig) &&
- is_new) {
+ if (are_target && is_new) {
info->packet.vis_type = VIS_TYPE_SERVER_SYNC; /* upgrade! */
- memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN);
- if (list_empty(&info->send_list))
- list_add_tail(&info->send_list, &send_list);
+ send_list_add(info);
/* ... we're not the recipient (and thus need to forward). */
} else if (!is_my_mac(info->packet.target_orig)) {
- if (list_empty(&info->send_list))
- list_add_tail(&info->send_list, &send_list);
+ send_list_add(info);
}
end:
spin_unlock_irqrestore(&vis_hash_lock, flags);
@@ -327,7 +459,7 @@ static bool vis_packet_full(struct vis_info *info)
/* generates a packet of own vis data,
* returns 0 on success, -1 if no packet could be generated */
-static int generate_vis_packet(void)
+static int generate_vis_packet(struct bat_priv *bat_priv)
{
HASHIT(hashit_local);
HASHIT(hashit_global);
@@ -339,7 +471,7 @@ static int generate_vis_packet(void)
unsigned long flags;
info->first_seen = jiffies;
- info->packet.vis_type = atomic_read(&vis_mode);
+ info->packet.vis_type = atomic_read(&bat_priv->vis_mode);
spin_lock_irqsave(&orig_hash_lock, flags);
memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN);
@@ -361,14 +493,17 @@ static int generate_vis_packet(void)
while (hash_iterate(orig_hash, &hashit_global)) {
orig_node = hashit_global.bucket->data;
if (orig_node->router != NULL
- && compare_orig(orig_node->router->addr, orig_node->orig)
- && orig_node->batman_if
- && (orig_node->batman_if->if_active == IF_ACTIVE)
+ && compare_orig(orig_node->router->addr,
+ orig_node->orig)
+ && (orig_node->router->if_incoming->if_status ==
+ IF_ACTIVE)
&& orig_node->router->tq_avg > 0) {
/* fill one entry into buffer. */
entry = &entry_array[info->packet.entries];
- memcpy(entry->src, orig_node->batman_if->net_dev->dev_addr, ETH_ALEN);
+ memcpy(entry->src,
+ orig_node->router->if_incoming->net_dev->dev_addr,
+ ETH_ALEN);
memcpy(entry->dest, orig_node->orig, ETH_ALEN);
entry->quality = orig_node->router->tq_avg;
info->packet.entries++;
@@ -400,6 +535,8 @@ static int generate_vis_packet(void)
return 0;
}
+/* free old vis packets. Must be called with this vis_hash_lock
+ * held */
static void purge_vis_packets(void)
{
HASHIT(hashit);
@@ -412,7 +549,8 @@ static void purge_vis_packets(void)
if (time_after(jiffies,
info->first_seen + (VIS_TIMEOUT*HZ)/1000)) {
hash_remove_bucket(vis_hash, &hashit);
- free_info(info);
+ send_list_del(info);
+ kref_put(&info->refcount, free_info);
}
}
}
@@ -422,6 +560,8 @@ static void broadcast_vis_packet(struct vis_info *info, int packet_length)
HASHIT(hashit);
struct orig_node *orig_node;
unsigned long flags;
+ struct batman_if *batman_if;
+ uint8_t dstaddr[ETH_ALEN];
spin_lock_irqsave(&orig_hash_lock, flags);
@@ -430,45 +570,55 @@ static void broadcast_vis_packet(struct vis_info *info, int packet_length)
orig_node = hashit.bucket->data;
/* if it's a vis server and reachable, send it. */
- if (orig_node &&
- (orig_node->flags & VIS_SERVER) &&
- orig_node->batman_if &&
- orig_node->router) {
+ if ((!orig_node) || (!orig_node->router))
+ continue;
+ if (!(orig_node->flags & VIS_SERVER))
+ continue;
+ /* don't send it if we already received the packet from
+ * this node. */
+ if (recv_list_is_in(&info->recv_list, orig_node->orig))
+ continue;
- /* don't send it if we already received the packet from
- * this node. */
- if (recv_list_is_in(&info->recv_list, orig_node->orig))
- continue;
+ memcpy(info->packet.target_orig, orig_node->orig, ETH_ALEN);
+ batman_if = orig_node->router->if_incoming;
+ memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
- memcpy(info->packet.target_orig,
- orig_node->orig, ETH_ALEN);
+ send_raw_packet((unsigned char *)&info->packet,
+ packet_length, batman_if, dstaddr);
+
+ spin_lock_irqsave(&orig_hash_lock, flags);
- send_raw_packet((unsigned char *) &info->packet,
- packet_length,
- orig_node->batman_if,
- orig_node->router->addr);
- }
}
- memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN);
spin_unlock_irqrestore(&orig_hash_lock, flags);
+ memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN);
}
static void unicast_vis_packet(struct vis_info *info, int packet_length)
{
struct orig_node *orig_node;
unsigned long flags;
+ struct batman_if *batman_if;
+ uint8_t dstaddr[ETH_ALEN];
spin_lock_irqsave(&orig_hash_lock, flags);
orig_node = ((struct orig_node *)
hash_find(orig_hash, info->packet.target_orig));
- if ((orig_node != NULL) &&
- (orig_node->batman_if != NULL) &&
- (orig_node->router != NULL)) {
- send_raw_packet((unsigned char *) &info->packet, packet_length,
- orig_node->batman_if,
- orig_node->router->addr);
- }
+ if ((!orig_node) || (!orig_node->router))
+ goto out;
+
+ /* don't lock while sending the packets ... we therefore
+ * copy the required data before sending */
+ batman_if = orig_node->router->if_incoming;
+ memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
+ spin_unlock_irqrestore(&orig_hash_lock, flags);
+
+ send_raw_packet((unsigned char *)&info->packet,
+ packet_length, batman_if, dstaddr);
+ return;
+
+out:
spin_unlock_irqrestore(&orig_hash_lock, flags);
}
@@ -500,17 +650,28 @@ static void send_vis_packets(struct work_struct *work)
{
struct vis_info *info, *temp;
unsigned long flags;
+ /* FIXME: each batman_if will be attached to a softif */
+ struct bat_priv *bat_priv = netdev_priv(soft_device);
spin_lock_irqsave(&vis_hash_lock, flags);
+
purge_vis_packets();
- if (generate_vis_packet() == 0)
+ if (generate_vis_packet(bat_priv) == 0) {
/* schedule if generation was successful */
- list_add_tail(&my_vis_info->send_list, &send_list);
+ send_list_add(my_vis_info);
+ }
list_for_each_entry_safe(info, temp, &send_list, send_list) {
- list_del_init(&info->send_list);
+
+ kref_get(&info->refcount);
+ spin_unlock_irqrestore(&vis_hash_lock, flags);
+
send_vis_packet(info);
+
+ spin_lock_irqsave(&vis_hash_lock, flags);
+ send_list_del(info);
+ kref_put(&info->refcount, free_info);
}
spin_unlock_irqrestore(&vis_hash_lock, flags);
start_vis_timer();
@@ -543,6 +704,7 @@ int vis_init(void)
my_vis_info->first_seen = jiffies - atomic_read(&vis_interval);
INIT_LIST_HEAD(&my_vis_info->recv_list);
INIT_LIST_HEAD(&my_vis_info->send_list);
+ kref_init(&my_vis_info->refcount);
my_vis_info->packet.version = COMPAT_VERSION;
my_vis_info->packet.packet_type = BAT_VIS;
my_vis_info->packet.ttl = TTL;
@@ -556,9 +718,9 @@ int vis_init(void)
if (hash_add(vis_hash, my_vis_info) < 0) {
printk(KERN_ERR
- "batman-adv:Can't add own vis packet into hash\n");
- free_info(my_vis_info); /* not in hash, need to remove it
- * manually. */
+ "batman-adv:Can't add own vis packet into hash\n");
+ /* not in hash, need to remove it manually. */
+ kref_put(&my_vis_info->refcount, free_info);
goto err;
}
@@ -572,6 +734,15 @@ err:
return 0;
}
+/* Decrease the reference count on a hash item info */
+static void free_info_ref(void *data)
+{
+ struct vis_info *info = data;
+
+ send_list_del(info);
+ kref_put(&info->refcount, free_info);
+}
+
/* shutdown vis-server */
void vis_quit(void)
{
@@ -583,7 +754,7 @@ void vis_quit(void)
spin_lock_irqsave(&vis_hash_lock, flags);
/* properly remove, kill timers ... */
- hash_delete(vis_hash, free_info);
+ hash_delete(vis_hash, free_info_ref);
vis_hash = NULL;
my_vis_info = NULL;
spin_unlock_irqrestore(&vis_hash_lock, flags);
diff --git a/drivers/staging/batman-adv/vis.h b/drivers/staging/batman-adv/vis.h
index 0cdafde0ec3..9c1fd771cba 100644
--- a/drivers/staging/batman-adv/vis.h
+++ b/drivers/staging/batman-adv/vis.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008-2009 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2008-2010 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
@@ -20,8 +20,6 @@
*/
#define VIS_TIMEOUT 200000
-#define VIS_FORMAT_DD_NAME "dot_draw"
-#define VIS_FORMAT_JSON_NAME "json"
struct vis_info {
unsigned long first_seen;
@@ -29,6 +27,7 @@ struct vis_info {
/* list of server-neighbors we received a vis-packet
* from. we should not reply to them. */
struct list_head send_list;
+ struct kref refcount;
/* this packet might be part of the vis send queue. */
struct vis_packet packet;
/* vis_info may follow here*/
@@ -48,15 +47,13 @@ struct recvlist_node {
extern struct hashtable_t *vis_hash;
extern spinlock_t vis_hash_lock;
-void proc_vis_read_entry(struct seq_file *seq,
- struct vis_info_entry *entry,
- struct hlist_head *if_list,
- uint8_t *vis_orig);
-void proc_vis_read_prim_sec(struct seq_file *seq,
- struct hlist_head *if_list);
-void receive_server_sync_packet(struct vis_packet *vis_packet,
+ssize_t vis_fill_buffer_text(struct net_device *net_dev, char *buff,
+ size_t count, loff_t off);
+void receive_server_sync_packet(struct bat_priv *bat_priv,
+ struct vis_packet *vis_packet,
int vis_info_len);
-void receive_client_update_packet(struct vis_packet *vis_packet,
+void receive_client_update_packet(struct bat_priv *bat_priv,
+ struct vis_packet *vis_packet,
int vis_info_len);
int vis_init(void);
void vis_quit(void);
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index d63c889ce55..8ce307e64b5 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -1,7 +1,7 @@
config COMEDI
tristate "Data acquisition support (comedi)"
default N
- depends on m && (PCI || PCMCIA || PCCARD || USB)
+ depends on m
---help---
Enable support a wide range of data acquisition devices
for Linux.
@@ -9,27 +9,1295 @@ config COMEDI
config COMEDI_DEBUG
bool "Comedi debugging"
depends on COMEDI != n
- help
+ ---help---
This is an option for use by developers; most people should
say N here. This enables comedi core and driver debugging.
-config COMEDI_PCI_DRIVERS
+menuconfig COMEDI_MISC_DRIVERS
+ tristate "Comedi misc drivers"
+ depends on COMEDI
+ default N
+ ---help---
+ Enable comedi misc drivers to be built
+
+ Note that the answer to this question won't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about misc non-hardware comedi drivers.
+
+if COMEDI_MISC_DRIVERS
+
+config COMEDI_KCOMEDILIB
+ tristate "Comedi kcomedilib"
+ ---help---
+ Build the kcomedilib
+
+config COMEDI_BOND
+ tristate "Device bonding support"
+ depends on COMEDI_KCOMEDILIB
+ default N
+ ---help---
+ Enable support for a driver to 'bond' (merge) multiple subdevices
+ from multiple devices together as one.
+
+ To compile this driver as a module, choose M here: the module will be
+ called comedi_bond.
+
+config COMEDI_TEST
+ tristate "Fake waveform generator support"
+ select COMEDI_FC
+ default N
+ ---help---
+ Enable support for the fake waveform generator.
+ This driver is mainly for testing purposes, but can also be used to
+ generate sample waveforms on systems that don't have data acquisition
+ hardware.
+
+ To compile this driver as a module, choose M here: the module will be
+ called comedi_test.
+
+config COMEDI_PARPORT
+ tristate "Parallel port support"
+ default N
+ ---help---
+ Enable support for the standard parallel port.
+ A cheap and easy way to get a few more digital I/O lines. Steal
+ additional parallel ports from old computers or your neighbors'
+ computers.
+
+ To compile this driver as a module, choose M here: the module will be
+ called comedi_parport.
+
+config COMEDI_SERIAL2002
+ tristate "Driver for serial connected hardware"
+ default N
+ ---help---
+ Enable support for serial connected hardware
+
+ To compile this driver as a module, choose M here: the module will be
+ called serial2002.
+
+config COMEDI_SKEL
+ tristate "Comedi skeleton driver"
+ default N
+ ---help---
+ Build the Skeleton driver, an example for driver writers
+
+ To compile this driver as a module, choose M here: the module will be
+ called skel.
+
+endif # COMEDI_MISC_DRIVERS
+
+menuconfig COMEDI_ISA_DRIVERS
+ tristate "Comedi ISA and PC/104 drivers"
+ depends on COMEDI && ISA
+ default N
+ ---help---
+ Enable comedi ISA and PC/104 drivers to be built
+
+ Note that the answer to this question won't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about ISA and PC/104 comedi drivers.
+
+if COMEDI_ISA_DRIVERS && ISA
+
+config COMEDI_8255
+ tristate "Generic 8255 support"
+ default N
+ ---help---
+ Enable generic 8255 support.
+
+ To compile this driver as a module, choose M here: the module will be
+ called 8255.
+
+config COMEDI_ACL7225B
+ tristate "ADlink NuDAQ ACL-7225b and compatibles support"
+ default N
+ ---help---
+ Enable support for ADlink NuDAQ ACL-7225b and compatibles,
+ ADlink ACL-7225b (acl7225b), ICP P16R16DIO (p16r16dio)
+
+ To compile this driver as a module, choose M here: the module will be
+ called acl7225b.
+
+config COMEDI_PCL711
+ tristate "Advantech PCL-711/711b and ADlink ACL-8112 ISA card support"
+ default N
+ ---help---
+ Enable support for Advantech PCL-711 and 711b, ADlink ACL-8112
+
+ To compile this driver as a module, choose M here: the module will be
+ called pcl711.
+
+config COMEDI_PCL724
+ tristate "Advantech PCL-722/724/731 and ADlink ACL-7122/7124/PET-48DIO"
+ default N
+ ---help---
+ Enable support for Advantech PCL-724, PCL-722, PCL-731 and
+ ADlink ACL-7122, ACL-7124, PET-48DIO ISA cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called pcl724.
+
+config COMEDI_PCL725
+ tristate "Advantech PCL-725 and compatible ISA card support"
+ default N
+ ---help---
+ Enable support for Advantech PCL-725 and compatible ISA cards.
+
+ To compile this driver as a module, choose M here: the module will be
+ called pcl725.
+
+config COMEDI_PCL726
+ tristate "Advantech PCL-726 and compatible ISA card support"
+ default N
+ ---help---
+ Enable support for Advantech PCL-726 and compatible ISA cards.
+
+ To compile this driver as a module, choose M here: the module will be
+ called pcl726.
+
+config COMEDI_PCL730
+ tristate "Advantech PCL-730 and ADlink ACL-7130 ISA card support"
+ default N
+ ---help---
+ Enable support for Advantech PCL-730, ICP ISO-730 and ADlink
+ ACL-7130 ISA cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called pcl730.
+
+config COMEDI_PCL812
+ tristate "Advantech PCL-812/813 and ADlink ACL-8112/8113/8113/8216"
+ default N
+ ---help---
+ Enable support for Advantech PCL-812/PG, PCL-813/B, ADLink
+ ACL-8112DG/HG/PG, ACL-8113, ACL-8216, ICP DAS A-821PGH/PGL/PGL-NDA,
+ A-822PGH/PGL, A-823PGH/PGL, A-826PG and ICP DAS ISO-813 ISA cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called pcl812.
+
+config COMEDI_PCL816
+ tristate "Advantech PCL-814 and PCL-816 ISA card support"
+ default N
+ ---help---
+ Enable support for Advantech PCL-814 and PCL-816 ISA cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called pcl816.
+
+config COMEDI_PCL818
+ tristate "Advantech PCL-718 and PCL-818 ISA card support"
+ default N
+ ---help---
+ Enable support for Advantech PCL-818 ISA cards
+ PCL-818L, PCL-818H, PCL-818HD, PCL-818HG, PCL-818 and PCL-718
+
+ To compile this driver as a module, choose M here: the module will be
+ called pcl818.
+
+config COMEDI_PCM3724
+ tristate "Advantech PCM-3724 PC/104 card support"
+ default N
+ ---help---
+ Enable support for Advantech PCM-3724 PC/104 cards.
+
+ To compile this driver as a module, choose M here: the module will be
+ called pcm3724.
+
+config COMEDI_PCM3730
+ tristate "Advantech PCM-3730 and clone PC/104 board support"
+ default N
+ ---help---
+ Enable support for Advantech PCM-3730 and clone PC/104 boards
+
+ To compile this driver as a module, choose M here: the module will be
+ called pcm3730.
+
+config COMEDI_RTI800
+ tristate "Analog Devices RTI-800/815 ISA card support"
+ default N
+ ---help---
+ Enable support for Analog Devices RTI-800/815 ISA cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called rti800.
+
+config COMEDI_RTI802
+ tristate "Analog Devices RTI-802 ISA card support"
+ default N
+ ---help---
+ Enable support for Analog Devices RTI-802 ISA cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called rti802.
+
+config COMEDI_DAS08
+ tristate "DAS-08 compatible ISA, PC/104 and PCMCIA card support"
+ default N
+ ---help---
+ Enable support for Keithley Metrabyte/ComputerBoards DAS08
+ and compatible ISA and PC/104 cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called das08.
+
+config COMEDI_DAS16M1
+ tristate "MeasurementComputing CIO-DAS16/M1DAS-16 ISA card support"
+ select COMEDI_FC
+ default N
+ ---help---
+ Enable support for Measurement Computing CIO-DAS16/M1 ISA cards.
+
+ To compile this driver as a module, choose M here: the module will be
+ called das16m1.
+
+config COMEDI_DAS16
+ tristate "DAS-16 compatible ISA and PC/104 card support"
+ select COMEDI_FC
+ default N
+ ---help---
+ Enable support for Keithley Metrabyte/ComputerBoards DAS16
+ and compatible ISA and PC/104 cards:
+ Keithley Metrabyte DAS-16, DAS-16G, DAS-16F, DAS-1201, DAS-1202,
+ DAS-1401, DAS-1402, DAS-1601, DAS-1602 and
+ ComputerBoards/MeasurementComputing PC104-DAS16/JR/,
+ PC104-DAS16JR/16, CIO-DAS16JR/16, CIO-DAS16/JR, CIO-DAS1401/12,
+ CIO-DAS1402/12, CIO-DAS1402/16, CIO-DAS1601/12, CIO-DAS1602/12,
+ CIO-DAS1602/16, CIO-DAS16/330
+
+ To compile this driver as a module, choose M here: the module will be
+ called das16.
+
+config COMEDI_DAS800
+ tristate "DAS800 and compatible ISA card support"
+ select COMEDI_FC
+ default N
+ ---help---
+ Enable support for Keithley Metrabyte DAS800 and compatible ISA cards
+ Keithley Metrabyte DAS-800, DAS-801, DAS-802
+ Measurement Computing CIO-DAS800, CIO-DAS801, CIO-DAS802 and
+ CIO-DAS802/16
+
+ To compile this driver as a module, choose M here: the module will be
+ called das800.
+
+config COMEDI_DAS1800
+ tristate "DAS1800 and compatible ISA card support"
+ select COMEDI_FC
+ default N
+ ---help---
+ Enable support for DAS1800 and compatible ISA cards
+ Keithley Metrabyte DAS-1701ST, DAS-1701ST-DA, DAS-1701/AO,
+ DAS-1702ST, DAS-1702ST-DA, DAS-1702HR, DAS-1702HR-DA, DAS-1702/AO,
+ DAS-1801ST, DAS-1801ST-DA, DAS-1801HC, DAS-1801AO, DAS-1802ST,
+ DAS-1802ST-DA, DAS-1802HR, DAS-1802HR-DA, DAS-1802HC and
+ DAS-1802AO
+
+ To compile this driver as a module, choose M here: the module will be
+ called das1800.
+
+config COMEDI_DAS6402
+ tristate "DAS6402 and compatible ISA card support"
+ default N
+ ---help---
+ Enable support for DAS6402 and compatible ISA cards
+ Computerboards, Keithley Metrabyte DAS6402 and compatibles
+
+ To compile this driver as a module, choose M here: the module will be
+ called das6402.
+
+config COMEDI_DT2801
+ tristate "Data Translation DT2801 ISA card support"
+ default N
+ ---help---
+ Enable support for Data Translation DT2801 ISA cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called dt2801.
+
+config COMEDI_DT2811
+ tristate "Data Translation DT2811 ISA card support"
+ default N
+ ---help---
+ Enable support for Data Translation DT2811 ISA cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called dt2811.
+
+config COMEDI_DT2814
+ tristate "Data Translation DT2814 ISA card support"
+ default N
+ ---help---
+ Enable support for Data Translation DT2814 ISA cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called dt2814.
+
+config COMEDI_DT2815
+ tristate "Data Translation DT2815 ISA card support"
+ default N
+ ---help---
+ Enable support for Data Translation DT2815 ISA cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called dt2815.
+
+config COMEDI_DT2817
+ tristate "Data Translation DT2817 ISA card support"
+ default N
+ ---help---
+ Enable support for Data Translation DT2817 ISA cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called dt2817.
+
+config COMEDI_DT282X
+ tristate "Data Translation DT2821 series and DT-EZ ISA card support"
+ select COMEDI_FC
+ default N
+ ---help---
+ Enable support for Data Translation DT2821 series including DT-EZ
+ DT2821, DT2821-F-16SE, DT2821-F-8DI, DT2821-G-16SE, DT2821-G-8DI,
+ DT2823 (dt2823), DT2824-PGH, DT2824-PGL, DT2825, DT2827, DT2828,
+ DT21-EZ, DT23-EZ, DT24-EZ and DT24-EZ-PGL
+
+ To compile this driver as a module, choose M here: the module will be
+ called dt282x.
+
+config COMEDI_DMM32AT
+ tristate "Diamond Systems MM-32-AT PC/104 board support"
+ default N
+ ---help---
+ Enable support for Diamond Systems MM-32-AT PC/104 boards
+
+ To compile this driver as a module, choose M here: the module will be
+ called dmm32at.
+
+config COMEDI_FL512
+ tristate "FL512 ISA card support"
+ default N
+ ---help---
+ Enable support for FL512 ISA card
+
+ To compile this driver as a module, choose M here: the module will be
+ called fl512.
+
+config COMEDI_AIO_AIO12_8
+ tristate "I/O Products PC/104 AIO12-8 Analog I/O Board support"
+ default N
+ ---help---
+ Enable support for I/O Products PC/104 AIO12-8 Analog I/O Board
+
+ To compile this driver as a module, choose M here: the module will be
+ called aio_aio12_8.
+
+config COMEDI_AIO_IIRO_16
+ tristate "I/O Products PC/104 IIRO16 Board support"
+ default N
+ ---help---
+ Enable support for I/O Products PC/104 IIRO16 Relay And Isolated
+ Input Board
+
+ To compile this driver as a module, choose M here: the module will be
+ called aio_iiro_16.
+
+config COMEDI_C6XDIGIO
+ tristate "Mechatronic Systems Inc. C6x_DIGIO DSP daughter card support"
+ default N
+ ---help---
+ Enable support for Mechatronic Systems Inc. C6x_DIGIO DSP daughter
+ card
+
+ To compile this driver as a module, choose M here: the module will be
+ called c6xdigio.
+
+config COMEDI_MPC624
+ tristate "Micro/sys MPC-624 PC/104 board support"
+ default N
+ ---help---
+ Enable support for Micro/sys MPC-624 PC/104 board
+
+ To compile this driver as a module, choose M here: the module will be
+ called mpc624.
+
+config COMEDI_ADQ12B
+ tristate "MicroAxial ADQ12-B data acquisition and control card support"
+ default N
+ ---help---
+ Enable MicroAxial ADQ12-B daq and control card support.
+
+ To compile this driver as a module, choose M here: the module will be
+ called adq12b.
+
+config COMEDI_NI_AT_A2150
+ tristate "NI AT-A2150 ISA card support"
+ depends on COMEDI_NI_COMMON
+ default N
+ ---help---
+ Enable support for National Instruments AT-A2150 cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called ni_at_a2150.
+
+config COMEDI_NI_AT_AO
+ tristate "NI AT-AO-6/10 EISA card support"
+ depends on COMEDI_NI_COMMON
+ default N
+ ---help---
+ Enable support for National Instruments AT-AO-6/10 cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called ni_at_ao.
+
+config COMEDI_NI_ATMIO
+ tristate "NI AT-MIO E series ISA-PNP card support"
+ depends on ISAPNP && COMEDI_NI_TIO && COMEDI_NI_COMMON
+ default N
+ ---help---
+ Enable support for National Instruments AT-MIO E series cards
+ National Instruments AT-MIO-16E-1 (ni_atmio),
+ AT-MIO-16E-2, AT-MIO-16E-10, AT-MIO-16DE-10, AT-MIO-64E-3,
+ AT-MIO-16XE-50, AT-MIO-16XE-10, AT-AI-16XE-10
+
+ To compile this driver as a module, choose M here: the module will be
+ called ni_atmio.
+
+config COMEDI_NI_ATMIO16D
+ tristate "NI AT-MIO16/AT-MIO16D series ISA-PNP card support"
+ depends on ISAPNP && COMEDI_NI_COMMON
+ default N
+ ---help---
+ Enable support for National Instruments AT-MIO16/AT-MIO16D cards.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ni_atmio16d.
+
+config COMEDI_PCMAD
+ tristate "Winsystems PCM-A/D12 and PCM-A/D16 PC/104 board support"
+ default N
+ ---help---
+ Enable support for Winsystems PCM-A/D12 and PCM-A/D16 PC/104 boards.
+
+ To compile this driver as a module, choose M here: the module will be
+ called pcmad.
+
+config COMEDI_PCMDA12
+ tristate "Winsystems PCM-D/A-12 8-channel AO PC/104 board support"
+ default N
+ ---help---
+ Enable support for Winsystems PCM-D/A-12 8-channel AO PC/104 boards.
+ Note that the board is not ISA-PNP capable and thus needs the I/O
+ port comedi_config parameter.
+
+ To compile this driver as a module, choose M here: the module will be
+ called pcmda12.
+
+config COMEDI_PCMMIO
+ tristate "Winsystems PCM-MIO PC/104 board support"
+ default N
+ ---help---
+ Enable support for Winsystems PCM-MIO multifunction PC/104 boards.
+
+ To compile this driver as a module, choose M here: the module will be
+ called pcmmio.
+
+config COMEDI_PCMUIO
+ tristate "Winsystems PCM-UIO48A and PCM-UIO96A PC/104 board support"
+ default N
+ ---help---
+ Enable support for PCM-UIO48A and PCM-UIO96A PC/104 boards.
+
+ To compile this driver as a module, choose M here: the module will be
+ called pcmuio.
+
+config COMEDI_MULTIQ3
+ tristate "Quanser Consulting MultiQ-3 ISA card support"
+ default N
+ ---help---
+ Enable support for Quanser Consulting MultiQ-3 ISA cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called multiq3.
+
+config COMEDI_POC
+ tristate "Generic driver for very simple devices"
+ default N
+ ---help---
+ Enable generic support for very simple / POC (Piece of Crap) boards,
+ Keithley Metrabyte DAC-02 (dac02), Advantech PCL-733 (pcl733) and
+ PCL-734 (pcl734)
+
+ To compile this driver as a module, choose M here: the module will be
+ called poc.
+
+endif # COMEDI_ISA_DRIVERS
+
+menuconfig COMEDI_PCI_DRIVERS
tristate "Comedi PCI drivers"
depends on COMEDI && PCI
default N
---help---
- Enable lots of comedi PCI drivers to be built
+ Enable comedi PCI drivers to be built
+
+ Note that the answer to this question won't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about PCI comedi drivers.
+
+if COMEDI_PCI_DRIVERS && PCI
+
+config COMEDI_ADDI_APCI_035
+ tristate "ADDI-DATA APCI_035 support"
+ default N
+ ---help---
+ Enable support for ADDI-DATA APCI_035 cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called addi_apci_035.
+
+config COMEDI_ADDI_APCI_1032
+ tristate "ADDI-DATA APCI_1032 support"
+ default N
+ ---help---
+ Enable support for ADDI-DATA APCI_1032 cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called addi_apci_1032.
+
+config COMEDI_ADDI_APCI_1500
+ tristate "ADDI-DATA APCI_1500 support"
+ default N
+ ---help---
+ Enable support for ADDI-DATA APCI_1500 cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called addi_apci_1500.
+
+config COMEDI_ADDI_APCI_1516
+ tristate "ADDI-DATA APCI_1516 support"
+ default N
+ ---help---
+ Enable support for ADDI-DATA APCI_1516 cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called addi_apci_1516.
+
+config COMEDI_ADDI_APCI_1564
+ tristate "ADDI-DATA APCI_1564 support"
+ default N
+ ---help---
+ Enable support for ADDI-DATA APCI_1564 cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called addi_apci_1564.
+
+config COMEDI_ADDI_APCI_16XX
+ tristate "ADDI-DATA APCI_16xx support"
+ default N
+ ---help---
+ Enable support for ADDI-DATA APCI_16xx cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called addi_apci_16xx.
+
+config COMEDI_ADDI_APCI_2016
+ tristate "ADDI-DATA APCI_2016 support"
+ default N
+ ---help---
+ Enable support for ADDI-DATA APCI_2016 cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called addi_apci_2016.
+
+config COMEDI_ADDI_APCI_2032
+ tristate "ADDI-DATA APCI_2032 support"
+ default N
+ ---help---
+ Enable support for ADDI-DATA APCI_2032 cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called addi_apci_2032.
+
+config COMEDI_ADDI_APCI_2200
+ tristate "ADDI-DATA APCI_2200 support"
+ default N
+ ---help---
+ Enable support for ADDI-DATA APCI_2200 cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called addi_apci_2200.
+
+config COMEDI_ADDI_APCI_3001
+ tristate "ADDI-DATA APCI_3001 support"
+ select COMEDI_FC
+ default N
+ ---help---
+ Enable support for ADDI-DATA APCI_3001 cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called addi_apci_3001.
+
+config COMEDI_ADDI_APCI_3120
+ tristate "ADDI-DATA APCI_3520 support"
+ select COMEDI_FC
+ default N
+ ---help---
+ Enable support for ADDI-DATA APCI_3520 cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called addi_apci_3120.
+
+config COMEDI_ADDI_APCI_3501
+ tristate "ADDI-DATA APCI_3501 support"
+ default N
+ ---help---
+ Enable support for ADDI-DATA APCI_3501 cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called addi_apci_3501.
+
+config COMEDI_ADDI_APCI_3XXX
+ tristate "ADDI-DATA APCI_3xxx support"
+ default N
+ ---help---
+ Enable support for ADDI-DATA APCI_3xxx cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called addi_apci_3xxx.
+
+config COMEDI_ADL_PCI6208
+ tristate "ADLink PCI-6208A support"
+ default N
+ ---help---
+ Enable support for ADLink PCI-6208A cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called adl_pci6208.
+
+config COMEDI_ADL_PCI7230
+ tristate "ADLink PCI-7230 digital io board support"
+ default N
+ ---help---
+ Enable support for ADlink PCI-7230 digital io board support
+
+ To compile this driver as a module, choose M here: the module will be
+ called adl_pci7230.
+
+config COMEDI_ADL_PCI7296
+ tristate "ADLink PCI-7296 96 ch. digital io board support"
+ default N
+ ---help---
+ Enable support for ADlink PCI-7296 96 ch. digital io board support
+
+ To compile this driver as a module, choose M here: the module will be
+ called adl_pci7296.
+
+config COMEDI_ADL_PCI7432
+ tristate "ADLink PCI-7432 64 ch. isolated digital io board support"
+ default N
+ ---help---
+ Enable support for ADlink PCI-7432 64 ch. isolated digital io board
+
+ To compile this driver as a module, choose M here: the module will be
+ called adl_pci7432.
+
+config COMEDI_ADL_PCI8164
+ tristate "ADLink PCI-8164 4 Axes Motion Control board support"
+ default N
+ ---help---
+ Enable support for ADlink PCI-8164 4 Axes Motion Control board
+
+ To compile this driver as a module, choose M here: the module will be
+ called adl_pci8164.
+
+config COMEDI_ADL_PCI9111
+ tristate "ADLink PCI-9111HR support"
+ select COMEDI_FC
+ default N
+ ---help---
+ Enable support for ADlink PCI9111 cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called adl_pci9111.
+
+config COMEDI_ADL_PCI9118
+ tristate "ADLink PCI-9118DG, PCI-9118HG, PCI-9118HR support"
+ select COMEDI_FC
+ default N
+ ---help---
+ Enable support for ADlink PCI-9118DG, PCI-9118HG, PCI-9118HR cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called adl_pci9118.
+
+config COMEDI_ADV_PCI1710
+ tristate "Advantech PCI-171x, PCI-1720 and PCI-1731 support"
+ default N
+ ---help---
+ Enable support for Advantech PCI-1710, PCI-1710HG, PCI-1711,
+ PCI-1713, PCI-1720 and PCI-1731
+
+ To compile this driver as a module, choose M here: the module will be
+ called adv_pci1710.
+
+config COMEDI_ADV_PCI1723
+ tristate "Advantech PCI-1723 support"
+ default N
+ ---help---
+ Enable support for Advantech PCI-1723 cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called adv_pci1723.
+
+config COMEDI_ADV_PCI_DIO
+ tristate "Advantech PCI DIO card support"
+ default N
+ ---help---
+ Enable support for Advantech PCI DIO cards
+ PCI-1730, PCI-1733, PCI-1734, PCI-1736UP, PCI-1750, PCI-1751,
+ PCI-1752, PCI-1753/E, PCI-1754, PCI-1756 and PCI-1762
+
+ To compile this driver as a module, choose M here: the module will be
+ called adv_pci_dio.
+
+config COMEDI_AMPLC_DIO200
+ tristate "Amplicon PC272E and PCI272 DIO board support"
+ default N
+ ---help---
+ Enable support for Amplicon PC272E and PCI272 DIO boards
+
+ To compile this driver as a module, choose M here: the module will be
+ called amplc_dio200.
+
+config COMEDI_AMPLC_PC236
+ tristate "Amplicon PC36AT and PCI236 DIO board support"
+ default N
+ ---help---
+ Enable support for Amplicon PC36AT and PCI236 DIO boards
+
+ To compile this driver as a module, choose M here: the module will be
+ called amplc_pc236.
+
+config COMEDI_AMPLC_PC263
+ tristate "Amplicon PC263 and PCI263 relay board support"
+ default N
+ ---help---
+ Enable support for Amplicon PC263 and PCI263 relay boards
+
+ To compile this driver as a module, choose M here: the module will be
+ called amplc_pc263.
+
+config COMEDI_AMPLC_PCI224
+ tristate "Amplicon PCI224 and PCI234 support"
+ select COMEDI_FC
+ default N
+ ---help---
+ Enable support for Amplicon PCI224 and PCI234 AO boards
+
+ To compile this driver as a module, choose M here: the module will be
+ called amplc_pci224.
+
+config COMEDI_AMPLC_PCI230
+ tristate "Amplicon PCI230 and PCI260 support"
+ default N
+ ---help---
+ Enable support for Amplicon PCI230 and PCI260 Multifunction I/O
+ boards
+
+ To compile this driver as a module, choose M here: the module will be
+ called amplc_pci230.
+
+config COMEDI_CONTEC_PCI_DIO
+ tristate "Contec PIO1616L digital I/O board support"
+ default N
+ ---help---
+ Enable support for the Contec PIO1616L digital I/O board
+
+ To compile this driver as a module, choose M here: the module will be
+ called contec_pci_dio.
+
+config COMEDI_DT3000
+ tristate "Data Translation DT3000 series support"
+ default N
+ ---help---
+ Enable support for Data Translation DT3000 series
+ DT3001, DT3001-PGL, DT3002, DT3003, DT3003-PGL, DT3004, DT3005 and
+ DT3004-200
+
+ To compile this driver as a module, choose M here: the module will be
+ called dt3000.
+
+config COMEDI_UNIOXX5
+ tristate "Fastwel UNIOxx-5 analog and digital io board support"
+ default N
+ ---help---
+ Enable support for Fastwel UNIOxx-5 (analog and digital i/o) boards
+
+ To compile this driver as a module, choose M here: the module will be
+ called unioxx5.
+
+config COMEDI_GSC_HPDI
+ tristate "General Standards PCI-HPDI32 / PMC-HPDI32 support"
+ select COMEDI_FC
+ default N
+ ---help---
+ Enable support for General Standards Corporation high speed parallel
+ digital interface rs485 boards PCI-HPDI32 and PMC-HPDI32.
+ Only receive mode works, transmit not supported.
+
+ To compile this driver as a module, choose M here: the module will be
+ called gsc_hpdi.
+
+config COMEDI_ICP_MULTI
+ tristate "Inova ICP_MULTI support"
+ default N
+ ---help---
+ Enable support for Inova ICP_MULTI card
+
+ To compile this driver as a module, choose M here: the module will be
+ called icp_multi.
+
+config COMEDI_II_PCI20KC
+ tristate "Intelligent Instruments PCI-20001C carrier support"
+ default N
+ ---help---
+ Enable support for Intelligent Instruments PCI-20001C carrier
+ PCI-20001, PCI-20006 and PCI-20341
+
+ To compile this driver as a module, choose M here: the module will be
+ called ii_pci20kc.
+
+config COMEDI_DAQBOARD2000
+ tristate "IOtech DAQboard/2000 support"
+ default N
+ ---help---
+ Enable support for the IOtech DAQboard/2000
+
+ To compile this driver as a module, choose M here: the module will be
+ called daqboard2000.
+
+config COMEDI_JR3_PCI
+ tristate "JR3/PCI force sensor board support"
+ default N
+ ---help---
+ Enable support for JR3/PCI force sensor boards
+
+ To compile this driver as a module, choose M here: the module will be
+ called jr3_pci.
+
+config COMEDI_KE_COUNTER
+ tristate "Kolter-Electronic PCI Counter 1 card support"
+ default N
+ ---help---
+ Enable support for Kolter-Electronic PCI Counter 1 cards
+
+ To compile this driver as a module, choose M here: the module will be
+ called ke_counter.
+
+config COMEDI_CB_PCIDAS64
+ tristate "MeasurementComputing PCI-DAS 64xx, 60xx, and 4020 support"
+ select COMEDI_FC
+ default N
+ ---help---
+ Enable support for ComputerBoards/MeasurementComputing PCI-DAS 64xx,
+ 60xx, and 4020 series with the PLX 9080 PCI controller
+
+ To compile this driver as a module, choose M here: the module will be
+ called cb_pcidas64.
+
+config COMEDI_CB_PCIDAS
+ tristate "MeasurementComputing PCI-DAS support"
+ select COMEDI_FC
+ default N
+ ---help---
+ Enable support for ComputerBoards/MeasurementComputing PCI-DAS with
+ AMCC S5933 PCIcontroller: PCI-DAS1602/16, PCI-DAS1602/16jr,
+ PCI-DAS1602/12, PCI-DAS1200, PCI-DAS1200jr, PCI-DAS1000, PCI-DAS1001
+ and PCI_DAS1002.
+
+ To compile this driver as a module, choose M here: the module will be
+ called cb_pcidas.
+
+config COMEDI_CB_PCIDDA
+ tristate "MeasurementComputing PCI-DDA series support"
+ default N
+ ---help---
+ Enable support for ComputerBoards/MeasurementComputing PCI-DDA
+ series: PCI-DDA08/12, PCI-DDA04/12, PCI-DDA02/12, PCI-DDA08/16,
+ PCI-DDA04/16 and PCI-DDA02/16
+
+ To compile this driver as a module, choose M here: the module will be
+ called cb_pcidda.
+
+config COMEDI_CB_PCIDIO
+ tristate "MeasurementComputing PCI-DIO series support"
+ default N
+ ---help---
+ Enable support for ComputerBoards/MeasurementComputing PCI-DIO series
+ PCI-DIO24, PCI-DIO24H and PCI-DIO48H
+
+ To compile this driver as a module, choose M here: the module will be
+ called cb_pcidio.
+
+config COMEDI_CB_PCIMDAS
+ tristate "MeasurementComputing PCIM-DAS1602/16 support"
+ default N
+ ---help---
+ Enable support for ComputerBoards/MeasurementComputing PCI Migration
+ series PCIM-DAS1602/16
+
+ To compile this driver as a module, choose M here: the module will be
+ called cb_pcimdas.
-config COMEDI_PCMCIA_DRIVERS
+config COMEDI_CB_PCIMDDA
+ tristate "MeasurementComputing PCIM-DDA06-16 support"
+ default N
+ ---help---
+ Enable support for ComputerBoards/MeasurementComputing PCIM-DDA06-16
+
+ To compile this driver as a module, choose M here: the module will be
+ called cb_pcimdda.
+
+config COMEDI_ME4000
+ tristate "Meilhaus ME-4000 support"
+ default N
+ ---help---
+ Enable support for Meilhaus PCI data acquisition cards
+ ME-4650, ME-4670i, ME-4680, ME-4680i and ME-4680is
+
+ To compile this driver as a module, choose M here: the module will be
+ called me4000.
+
+config COMEDI_ME_DAQ
+ tristate "Meilhaus ME-2000i, ME-2600i, ME-3000vm1 support"
+ default N
+ ---help---
+ Enable support for Meilhaus PCI data acquisition cards
+ ME-2000i, ME-2600i and ME-3000vm1
+
+ To compile this driver as a module, choose M here: the module will be
+ called me_daq.
+
+config COMEDI_NI_6527
+ tristate "NI 6527 support"
+ depends on COMEDI_MITE
+ default N
+ ---help---
+ Enable support for the National Instruments 6527 PCI card
+
+ To compile this driver as a module, choose M here: the module will be
+ called ni_6527.
+
+config COMEDI_NI_65XX
+ tristate "NI 65xx static dio PCI card support"
+ depends on COMEDI_MITE
+ default N
+ ---help---
+ Enable support for National Instruments 65xx static dio boards.
+ Supported devices: National Instruments PCI-6509 (ni_65xx),
+ PXI-6509, PCI-6510, PCI-6511, PXI-6511, PCI-6512, PXI-6512, PCI-6513,
+ PXI-6513, PCI-6514, PXI-6514, PCI-6515, PXI-6515, PCI-6516, PCI-6517,
+ PCI-6518, PCI-6519, PCI-6520, PCI-6521, PXI-6521, PCI-6528, PXI-6528
+
+ To compile this driver as a module, choose M here: the module will be
+ called ni_65xx.
+
+config COMEDI_NI_660X
+ tristate "NI 660x counter/timer PCI card support"
+ depends on COMEDI_NI_TIO && COMEDI_NI_COMMON
+ default N
+ ---help---
+ Enable support for National Instruments PCI-6601 (ni_660x), PCI-6602,
+ PXI-6602 and PXI-6608.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ni_660x.
+
+config COMEDI_NI_670X
+ tristate "NI 670x PCI card support"
+ depends on COMEDI_MITE
+ default N
+ ---help---
+ Enable support for National Instruments PCI-6703 and PCI-6704
+
+ To compile this driver as a module, choose M here: the module will be
+ called ni_670x.
+
+config COMEDI_NI_PCIDIO
+ tristate "NI PCI-DIO32HS, PCI-DIO96, PCI-6533, PCI-6503 support"
+ depends on COMEDI_MITE
+ default N
+ ---help---
+ Enable support for National Instruments PCI-DIO-32HS, PXI-6533,
+ PCI-DIO-96, PCI-DIO-96B, PXI-6508, PCI-6503, PCI-6503B, PCI-6503X,
+ PXI-6503, PCI-6533 and PCI-6534
+ The DIO-96 appears as four 8255 subdevices. See the 8255
+ driver notes for details.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ni_pcidio.
+
+config COMEDI_NI_PCIMIO
+ tristate "NI PCI-MIO-E series and M series support"
+ depends on COMEDI_NI_TIO && COMEDI_NI_COMMON
+ default N
+ ---help---
+ Enable support for National Instruments PCI-MIO-E series and M series
+ (all boards): PCI-MIO-16XE-10, PXI-6030E, PCI-MIO-16E-1,
+ PCI-MIO-16E-4, PCI-6014, PCI-6040E, PXI-6040E, PCI-6030E, PCI-6031E,
+ PCI-6032E, PCI-6033E, PCI-6071E, PCI-6023E, PCI-6024E, PCI-6025E,
+ PXI-6025E, PCI-6034E, PCI-6035E, PCI-6052E, PCI-6110, PCI-6111,
+ PCI-6220, PCI-6221, PCI-6224, PXI-6224, PCI-6225, PXI-6225, PCI-6229,
+ PCI-6250, PCI-6251, PCIe-6251, PCI-6254, PCI-6259, PCIe-6259,
+ PCI-6280, PCI-6281, PXI-6281, PCI-6284, PCI-6289, PCI-6711, PXI-6711,
+ PCI-6713, PXI-6713, PXI-6071E, PCI-6070E, PXI-6070E, PXI-6052E,
+ PCI-6036E, PCI-6731, PCI-6733, PXI-6733, PCI-6143, PXI-6143
+
+ To compile this driver as a module, choose M here: the module will be
+ called ni_pcimio.
+
+config COMEDI_RTD520
+ tristate "Real Time Devices PCI4520/DM7520 support"
+ default N
+ ---help---
+ Enable support for Real Time Devices PCI4520/DM7520
+
+ To compile this driver as a module, choose M here: the module will be
+ called rtd520.
+
+config COMEDI_S526
+ tristate "Sensoray s526 support"
+ default N
+ ---help---
+ Enable support for Sensoray s526
+
+ To compile this driver as a module, choose M here: the module will be
+ called s526.
+
+config COMEDI_S626
+ tristate "Sensoray 626 support"
+ select COMEDI_FC
+ default N
+ ---help---
+ Enable support for Sensoray 626
+
+ To compile this driver as a module, choose M here: the module will be
+ called s626.
+
+config COMEDI_SSV_DNP
+ tristate "SSV Embedded Systems DIL/Net-PC support"
+ default N
+ ---help---
+ Enable support for SSV Embedded Systems DIL/Net-PC
+
+ To compile this driver as a module, choose M here: the module will be
+ called ssv_dnp.
+
+endif # COMEDI_PCI_DRIVERS
+
+menuconfig COMEDI_PCMCIA_DRIVERS
tristate "Comedi PCMCIA drivers"
depends on COMEDI && PCMCIA && PCCARD
default N
---help---
- Enable lots of comedi PCMCIA and PCCARD drivers to be built
+ Enable comedi PCMCIA and PCCARD drivers to be built
+
+ Note that the answer to this question won't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about PCMCIA comedi drivers.
+
+if COMEDI_PCMCIA_DRIVERS && PCMCIA
+
+config COMEDI_CB_DAS16_CS
+ tristate "CB DAS16 series PCMCIA support"
+ default N
+ ---help---
+ Enable support for the ComputerBoards/MeasurementComputing PCMCIA
+ cards DAS16/16, PCM-DAS16D/12 and PCM-DAS16s/16
+
+ To compile this driver as a module, choose M here: the module will be
+ called cb_das16_cs.
+
+config COMEDI_DAS08_CS
+ tristate "CB DAS08 PCMCIA support"
+ select COMEDI_DAS08
+ default N
+ ---help---
+ Enable support for the ComputerBoards/MeasurementComputing DAS-08
+ PCMCIA card
+
+ To compile this driver as a module, choose M here: the module will be
+ called das08_cs.
-config COMEDI_USB_DRIVERS
+config COMEDI_NI_DAQ_700_CS
+ tristate "NI DAQCard-700 PCMCIA support"
+ depends on COMEDI_NI_COMMON
+ default N
+ ---help---
+ Enable support for the National Instruments PCMCIA DAQCard-700 DIO
+
+ To compile this driver as a module, choose M here: the module will be
+ called ni_daq_700.
+
+config COMEDI_NI_DAQ_DIO24_CS
+ tristate "NI DAQ-Card DIO-24 PCMCIA support"
+ depends on COMEDI_NI_COMMON
+ default N
+ ---help---
+ Enable support for the National Instruments PCMCIA DAQ-Card DIO-24
+
+ To compile this driver as a module, choose M here: the module will be
+ called ni_daq_dio24.
+
+config COMEDI_NI_LABPC_CS
+ tristate "NI DAQCard-1200 PCMCIA support"
+ depends on COMEDI_NI_LABPC
+ default N
+ ---help---
+ Enable support for the National Instruments PCMCIA DAQCard-1200
+
+ To compile this driver as a module, choose M here: the module will be
+ called ni_labpc_cs.
+
+config COMEDI_NI_MIO_CS
+ tristate "NI DAQCard E series PCMCIA support"
+ depends on COMEDI_NI_TIO && COMEDI_NI_COMMON
+ default N
+ select COMEDI_FC
+ ---help---
+ Enable support for the National Instruments PCMCIA DAQCard E series
+ DAQCard-ai-16xe-50, DAQCard-ai-16e-4, DAQCard-6062E, DAQCard-6024E
+ and DAQCard-6036E
+
+ To compile this driver as a module, choose M here: the module will be
+ called ni_mio_cs.
+
+config COMEDI_QUATECH_DAQP_CS
+ tristate "Quatech DAQP PCMCIA data capture card support"
+ default N
+ ---help---
+ Enable support for the Quatech DAQP PCMCIA data capture cards
+ DAQP-208 and DAQP-308
+
+ To compile this driver as a module, choose M here: the module will be
+ called quatech_daqp_cs.
+
+endif # COMEDI_PCMCIA_DRIVERS
+
+menuconfig COMEDI_USB_DRIVERS
tristate "Comedi USB drivers"
depends on COMEDI && USB
default N
---help---
- Enable lots of comedi USB drivers to be built
+ Enable comedi USB drivers to be built
+
+ Note that the answer to this question won't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about USB comedi drivers.
+
+if COMEDI_USB_DRIVERS && USB
+
+config COMEDI_DT9812
+ tristate "DataTranslation DT9812 USB module support"
+ default N
+ ---help---
+ Enable support for the Data Translation DT9812 USB module
+
+ To compile this driver as a module, choose M here: the module will be
+ called dt9812.
+
+config COMEDI_USBDUX
+ tristate "ITL USBDUX support"
+ default N
+ ---help---
+ Enable support for the University of Stirling USB DAQ and INCITE
+ Technology Limited driver
+
+ To compile this driver as a module, choose M here: the module will be
+ called usbdux.
+
+config COMEDI_USBDUXFAST
+ tristate "ITL USB-DUXfast support"
+ select COMEDI_FC
+ default N
+ ---help---
+ Enable support for the University of Stirling USB-DUXfast and INCITE
+ Technology Limited driver
+
+ To compile this driver as a module, choose M here: the module will be
+ called usbduxfast.
+
+config COMEDI_VMK80XX
+ tristate "Velleman VM110/VM140 USB Board support"
+ default N
+ ---help---
+ Build the Velleman USB Board Low-Level Driver supporting the
+ K8055/K8061 aka VM110/VM140 devices
+
+ To compile this driver as a module, choose M here: the module will be
+ called vmk80xx.
+
+endif # COMEDI_USB_DRIVERS
+
+menuconfig COMEDI_NI_COMMON
+ tristate "Comedi National Instruments card support"
+ depends on COMEDI
+ default N
+ ---help---
+ Enable comedi support for National Instruments cards.
+ Modules in this section are used by many comedi NI drivers.
+
+ Note that the answer to this question won't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about National Instruments cards.
+
+if COMEDI_NI_COMMON
+
+config COMEDI_MITE
+ tristate "NI Mite PCI interface chip support"
+ depends on PCI
+ default N
+ ---help---
+ Enable support for National Instruments Mite PCI interface chip
+
+ To compile this driver as a module, choose M here: the module will be
+ called mite.
+
+config COMEDI_NI_TIO
+ tristate "NI general purpose counter support"
+ select COMEDI_MITE
+ default N
+ ---help---
+ Enable support for National Instruments general purpose counters.
+ This module is not used directly by end-users. Rather, it
+ is used by other drivers (for example ni_660x and ni_pcimio)
+ to provide support for NI's general purpose counters.
+
+ To compile this driver as a modules, choose M here: two modules will
+ be build: ni_tio and ni_tiocmd.
+
+config COMEDI_NI_LABPC
+ tristate "NI Lab-PC and compatibles ISA and PCI support"
+ select COMEDI_FC
+ default N
+ ---help---
+ Enable support for National Instruments Lab-PC and compatibles
+ Lab-PC-1200, Lab-PC-1200AI, Lab-PC+ and PCI-1200.
+ Kernel-level ISA plug-and-play support for the lab-pc-1200 boards has
+ not yet been added to the driver.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ni_labpc.
+
+endif # COMEDI_NI_COMMON
+
+config COMEDI_FC
+ tristate "Comedi shared functions for low-level driver support"
+ default N
+ ---help---
+ Enable support for shared functions for low-level drivers.
+ This module is not used directly by end-users. Rather, it
+ is used by many other comedi drivers.
+
+ To compile this driver as a module, choose M here: the module will be
+ called comedi_fc.
diff --git a/drivers/staging/comedi/Makefile b/drivers/staging/comedi/Makefile
index 05811f79d85..20afea301c8 100644
--- a/drivers/staging/comedi/Makefile
+++ b/drivers/staging/comedi/Makefile
@@ -9,4 +9,3 @@ comedi-objs := \
range.o \
drivers.o \
comedi_compat32.o \
- comedi_ksyms.o \
diff --git a/drivers/staging/comedi/comedi.h b/drivers/staging/comedi/comedi.h
index b559a9c2f85..6c900e2756f 100644
--- a/drivers/staging/comedi/comedi.h
+++ b/drivers/staging/comedi/comedi.h
@@ -46,8 +46,10 @@
#define COMEDI_DEVCONF_AUX_DATA2_LENGTH 26
#define COMEDI_DEVCONF_AUX_DATA1_LENGTH 27
#define COMEDI_DEVCONF_AUX_DATA0_LENGTH 28
-#define COMEDI_DEVCONF_AUX_DATA_HI 29 /* most significant 32 bits of pointer address (if needed) */
-#define COMEDI_DEVCONF_AUX_DATA_LO 30 /* least significant 32 bits of pointer address */
+/* most significant 32 bits of pointer address (if needed) */
+#define COMEDI_DEVCONF_AUX_DATA_HI 29
+/* least significant 32 bits of pointer address */
+#define COMEDI_DEVCONF_AUX_DATA_LO 30
#define COMEDI_DEVCONF_AUX_DATA_LENGTH 31 /* total data length */
/* max length of device and driver names */
@@ -55,8 +57,10 @@
/* packs and unpacks a channel/range number */
-#define CR_PACK(chan, rng, aref) ((((aref)&0x3)<<24) | (((rng)&0xff)<<16) | (chan))
-#define CR_PACK_FLAGS(chan, range, aref, flags) (CR_PACK(chan, range, aref) | ((flags) & CR_FLAGS_MASK))
+#define CR_PACK(chan, rng, aref) \
+ ((((aref)&0x3)<<24) | (((rng)&0xff)<<16) | (chan))
+#define CR_PACK_FLAGS(chan, range, aref, flags) \
+ (CR_PACK(chan, range, aref) | ((flags) & CR_FLAGS_MASK))
#define CR_CHAN(a) ((a)&0xffff)
#define CR_RANGE(a) (((a)>>16)&0xff)
@@ -125,7 +129,8 @@
/* command flags */
/* These flags are used in comedi_cmd structures */
-#define CMDF_PRIORITY 0x00000008 /* try to use a real-time interrupt while performing command */
+/* try to use a real-time interrupt while performing command */
+#define CMDF_PRIORITY 0x00000008
#define TRIG_RT CMDF_PRIORITY /* compatibility definition */
@@ -151,15 +156,15 @@
#define TRIG_ANY 0xffffffff
#define TRIG_INVALID 0x00000000
-#define TRIG_NONE 0x00000001 /* never trigger */
-#define TRIG_NOW 0x00000002 /* trigger now + N ns */
-#define TRIG_FOLLOW 0x00000004 /* trigger on next lower level trig */
-#define TRIG_TIME 0x00000008 /* trigger at time N ns */
-#define TRIG_TIMER 0x00000010 /* trigger at rate N ns */
-#define TRIG_COUNT 0x00000020 /* trigger when count reaches N */
-#define TRIG_EXT 0x00000040 /* trigger on external signal N */
-#define TRIG_INT 0x00000080 /* trigger on comedi-internal signal N */
-#define TRIG_OTHER 0x00000100 /* driver defined */
+#define TRIG_NONE 0x00000001 /* never trigger */
+#define TRIG_NOW 0x00000002 /* trigger now + N ns */
+#define TRIG_FOLLOW 0x00000004 /* trigger on next lower level trig */
+#define TRIG_TIME 0x00000008 /* trigger at time N ns */
+#define TRIG_TIMER 0x00000010 /* trigger at rate N ns */
+#define TRIG_COUNT 0x00000020 /* trigger when count reaches N */
+#define TRIG_EXT 0x00000040 /* trigger on external signal N */
+#define TRIG_INT 0x00000080 /* trigger on comedi-internal signal N */
+#define TRIG_OTHER 0x00000100 /* driver defined */
/* subdevice flags */
@@ -176,14 +181,17 @@
#define SDF_MODE3 0x0400 /* can do mode 3 */
#define SDF_MODE4 0x0800 /* can do mode 4 */
#define SDF_CMD 0x1000 /* can do commands (deprecated) */
-#define SDF_SOFT_CALIBRATED 0x2000 /* subdevice uses software calibration */
-#define SDF_CMD_WRITE 0x4000 /* can do output commands */
-#define SDF_CMD_READ 0x8000 /* can do input commands */
-
-#define SDF_READABLE 0x00010000 /* subdevice can be read (e.g. analog input) */
-#define SDF_WRITABLE 0x00020000 /* subdevice can be written (e.g. analog output) */
+#define SDF_SOFT_CALIBRATED 0x2000 /* subdevice uses software calibration */
+#define SDF_CMD_WRITE 0x4000 /* can do output commands */
+#define SDF_CMD_READ 0x8000 /* can do input commands */
+
+/* subdevice can be read (e.g. analog input) */
+#define SDF_READABLE 0x00010000
+/* subdevice can be written (e.g. analog output) */
+#define SDF_WRITABLE 0x00020000
#define SDF_WRITEABLE SDF_WRITABLE /* spelling error in API */
-#define SDF_INTERNAL 0x00040000 /* subdevice does not have externally visible lines */
+/* subdevice does not have externally visible lines */
+#define SDF_INTERNAL 0x00040000
#define SDF_GROUND 0x00100000 /* can do aref=ground */
#define SDF_COMMON 0x00200000 /* can do aref=common */
#define SDF_DIFF 0x00400000 /* can do aref=diff */
@@ -242,22 +250,25 @@
INSN_CONFIG_DISARM = 32,
INSN_CONFIG_GET_COUNTER_STATUS = 33,
INSN_CONFIG_RESET = 34,
- INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR = 1001, /* Use CTR as single pulsegenerator */
- INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR = 1002, /* Use CTR as pulsetraingenerator */
- INSN_CONFIG_GPCT_QUADRATURE_ENCODER = 1003, /* Use the counter as encoder */
+ /* Use CTR as single pulsegenerator */
+ INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR = 1001,
+ /* Use CTR as pulsetraingenerator */
+ INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR = 1002,
+ /* Use the counter as encoder */
+ INSN_CONFIG_GPCT_QUADRATURE_ENCODER = 1003,
INSN_CONFIG_SET_GATE_SRC = 2001, /* Set gate source */
INSN_CONFIG_GET_GATE_SRC = 2002, /* Get gate source */
- INSN_CONFIG_SET_CLOCK_SRC = 2003, /* Set master clock source */
- INSN_CONFIG_GET_CLOCK_SRC = 2004, /* Get master clock source */
- INSN_CONFIG_SET_OTHER_SRC = 2005, /* Set other source */
- /* INSN_CONFIG_GET_OTHER_SRC = 2006,*//* Get other source */
- INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE = 2006, /* Get size in bytes of
- subdevice's on-board
- fifos used during
- streaming
- input/output */
+ /* Set master clock source */
+ INSN_CONFIG_SET_CLOCK_SRC = 2003,
+ INSN_CONFIG_GET_CLOCK_SRC = 2004, /* Get master clock source */
+ INSN_CONFIG_SET_OTHER_SRC = 2005, /* Set other source */
+ /* INSN_CONFIG_GET_OTHER_SRC = 2006,*//* Get other source */
+ /* Get size in bytes of subdevice's on-board fifos used during
+ * streaming input/output */
+ INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE = 2006,
INSN_CONFIG_SET_COUNTER_MODE = 4097,
- INSN_CONFIG_8254_SET_MODE = INSN_CONFIG_SET_COUNTER_MODE, /* deprecated */
+ /* INSN_CONFIG_8254_SET_MODE is deprecated */
+ INSN_CONFIG_8254_SET_MODE = INSN_CONFIG_SET_COUNTER_MODE,
INSN_CONFIG_8254_READ_STATUS = 4098,
INSN_CONFIG_SET_ROUTING = 4099,
INSN_CONFIG_GET_ROUTING = 4109,
@@ -265,8 +276,11 @@
INSN_CONFIG_PWM_SET_PERIOD = 5000, /* sets frequency */
INSN_CONFIG_PWM_GET_PERIOD = 5001, /* gets frequency */
INSN_CONFIG_GET_PWM_STATUS = 5002, /* is it running? */
- INSN_CONFIG_PWM_SET_H_BRIDGE = 5003, /* sets H bridge: duty cycle and sign bit for a relay at the same time */
- INSN_CONFIG_PWM_GET_H_BRIDGE = 5004 /* gets H bridge data: duty cycle and the sign bit */
+ /* sets H bridge: duty cycle and sign bit for a relay at the
+ * same time */
+ INSN_CONFIG_PWM_SET_H_BRIDGE = 5003,
+ /* gets H bridge data: duty cycle and the sign bit */
+ INSN_CONFIG_PWM_GET_H_BRIDGE = 5004
};
enum comedi_io_direction {
@@ -321,7 +335,7 @@
struct comedi_insn {
unsigned int insn;
unsigned int n;
- unsigned int *data;
+ unsigned int __user *data;
unsigned int subdev;
unsigned int chanspec;
unsigned int unused[3];
@@ -329,7 +343,7 @@
struct comedi_insnlist {
unsigned int n_insns;
- struct comedi_insn *insns;
+ struct comedi_insn __user *insns;
};
struct comedi_cmd {
@@ -351,24 +365,24 @@
unsigned int stop_src;
unsigned int stop_arg;
- unsigned int *chanlist; /* channel/range list */
+ unsigned int __user *chanlist; /* channel/range list */
unsigned int chanlist_len;
- short *data; /* data list, size depends on subd flags */
+ short __user *data; /* data list, size depends on subd flags */
unsigned int data_len;
};
struct comedi_chaninfo {
unsigned int subdev;
- unsigned int *maxdata_list;
- unsigned int *flaglist;
- unsigned int *rangelist;
+ unsigned int __user *maxdata_list;
+ unsigned int __user *flaglist;
+ unsigned int __user *rangelist;
unsigned int unused[4];
};
struct comedi_rangeinfo {
unsigned int range_type;
- void *range_ptr;
+ void __user *range_ptr;
};
struct comedi_krange {
@@ -387,7 +401,8 @@
unsigned int flags; /* channel flags */
unsigned int range_type; /* lookup in kernel */
unsigned int settling_time_0;
- unsigned insn_bits_support; /* see support_level enum for values */
+ /* see support_level enum for values */
+ unsigned insn_bits_support;
unsigned int unused[8];
};
@@ -451,7 +466,8 @@
#define COMEDI_CB_EOS 1 /* end of scan */
#define COMEDI_CB_EOA 2 /* end of acquisition */
-#define COMEDI_CB_BLOCK 4 /* data has arrived: wakes up read() / write() */
+#define COMEDI_CB_BLOCK 4 /* data has arrived:
+ * wakes up read() / write() */
#define COMEDI_CB_EOBUF 8 /* DEPRECATED: end of buffer */
#define COMEDI_CB_ERROR 16 /* card error during acquisition */
#define COMEDI_CB_OVERFLOW 32 /* buffer overflow/underflow */
@@ -485,12 +501,15 @@
I8254_MODE2 = (2 << 1), /* Rate generator */
I8254_MODE3 = (3 << 1), /* Square wave mode */
I8254_MODE4 = (4 << 1), /* Software triggered strobe */
- I8254_MODE5 = (5 << 1), /* Hardware triggered strobe (retriggerable) */
- I8254_BCD = 1, /* use binary-coded decimal instead of binary (pretty useless) */
+ I8254_MODE5 = (5 << 1), /* Hardware triggered strobe
+ * (retriggerable) */
+ I8254_BCD = 1, /* use binary-coded decimal instead of binary
+ * (pretty useless) */
I8254_BINARY = 0
};
- static inline unsigned NI_USUAL_PFI_SELECT(unsigned pfi_channel) {
+ static inline unsigned NI_USUAL_PFI_SELECT(unsigned pfi_channel)
+ {
if (pfi_channel < 10)
return 0x1 + pfi_channel;
else
@@ -580,24 +599,30 @@
NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS = 0x3,
NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS = 0x4,
NI_GPCT_NEXT_TC_CLOCK_SRC_BITS = 0x5,
- NI_GPCT_SOURCE_PIN_i_CLOCK_SRC_BITS = 0x6, /* NI 660x-specific */
+ /* NI 660x-specific */
+ NI_GPCT_SOURCE_PIN_i_CLOCK_SRC_BITS = 0x6,
NI_GPCT_PXI10_CLOCK_SRC_BITS = 0x7,
NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS = 0x8,
NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS = 0x9,
NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK = 0x30000000,
NI_GPCT_NO_PRESCALE_CLOCK_SRC_BITS = 0x0,
- NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS = 0x10000000, /* divide source by 2 */
- NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS = 0x20000000, /* divide source by 8 */
+ /* divide source by 2 */
+ NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS = 0x10000000,
+ /* divide source by 8 */
+ NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS = 0x20000000,
NI_GPCT_INVERT_CLOCK_SRC_BIT = 0x80000000
};
- static inline unsigned NI_GPCT_SOURCE_PIN_CLOCK_SRC_BITS(unsigned n) {
+ static inline unsigned NI_GPCT_SOURCE_PIN_CLOCK_SRC_BITS(unsigned n)
+ {
/* NI 660x-specific */
return 0x10 + n;
}
- static inline unsigned NI_GPCT_RTSI_CLOCK_SRC_BITS(unsigned n) {
+ static inline unsigned NI_GPCT_RTSI_CLOCK_SRC_BITS(unsigned n)
+ {
return 0x18 + n;
}
- static inline unsigned NI_GPCT_PFI_CLOCK_SRC_BITS(unsigned n) {
+ static inline unsigned NI_GPCT_PFI_CLOCK_SRC_BITS(unsigned n)
+ {
/* no pfi on NI 660x */
return 0x20 + n;
}
@@ -622,19 +647,24 @@ May be bitwise-or'd with CR_EDGE or CR_INVERT. */
NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT = 0x201,
NI_GPCT_SELECTED_GATE_GATE_SELECT = 0x21e,
/* m-series "second gate" sources are unknown,
- we should add them here with an offset of 0x300 when known. */
+ * we should add them here with an offset of 0x300 when
+ * known. */
NI_GPCT_DISABLED_GATE_SELECT = 0x8000,
};
- static inline unsigned NI_GPCT_GATE_PIN_GATE_SELECT(unsigned n) {
+ static inline unsigned NI_GPCT_GATE_PIN_GATE_SELECT(unsigned n)
+ {
return 0x102 + n;
}
- static inline unsigned NI_GPCT_RTSI_GATE_SELECT(unsigned n) {
+ static inline unsigned NI_GPCT_RTSI_GATE_SELECT(unsigned n)
+ {
return NI_USUAL_RTSI_SELECT(n);
}
- static inline unsigned NI_GPCT_PFI_GATE_SELECT(unsigned n) {
+ static inline unsigned NI_GPCT_PFI_GATE_SELECT(unsigned n)
+ {
return NI_USUAL_PFI_SELECT(n);
}
- static inline unsigned NI_GPCT_UP_DOWN_PIN_GATE_SELECT(unsigned n) {
+ static inline unsigned NI_GPCT_UP_DOWN_PIN_GATE_SELECT(unsigned n)
+ {
return 0x202 + n;
}
@@ -650,7 +680,8 @@ INSN_CONFIG_SET_OTHER_SRC when using NI general-purpose counters. */
/* Still unknown, probably only need NI_GPCT_PFI_OTHER_SELECT */
NI_GPCT_DISABLED_OTHER_SELECT = 0x8000,
};
- static inline unsigned NI_GPCT_PFI_OTHER_SELECT(unsigned n) {
+ static inline unsigned NI_GPCT_PFI_OTHER_SELECT(unsigned n)
+ {
return NI_USUAL_PFI_SELECT(n);
}
@@ -658,14 +689,14 @@ INSN_CONFIG_SET_OTHER_SRC when using NI general-purpose counters. */
INSN_CONFIG_ARM */
enum ni_gpct_arm_source {
NI_GPCT_ARM_IMMEDIATE = 0x0,
- NI_GPCT_ARM_PAIRED_IMMEDIATE = 0x1, /* Start both the counter and
- the adjacent paired counter
- simultaneously */
- /* NI doesn't document bits for selecting hardware arm triggers. If
- * the NI_GPCT_ARM_UNKNOWN bit is set, we will pass the least
- * significant bits (3 bits for 660x or 5 bits for m-series) through to
- * the hardware. This will at least allow someone to figure out what
- * the bits do later. */
+ NI_GPCT_ARM_PAIRED_IMMEDIATE = 0x1, /* Start both the counter
+ * and the adjacent paired
+ * counter simultaneously */
+ /* NI doesn't document bits for selecting hardware arm triggers.
+ * If the NI_GPCT_ARM_UNKNOWN bit is set, we will pass the least
+ * significant bits (3 bits for 660x or 5 bits for m-series)
+ * through to the hardware. This will at least allow someone to
+ * figure out what the bits do later. */
NI_GPCT_ARM_UNKNOWN = 0x1000,
};
@@ -699,7 +730,8 @@ INSN_CONFIG_ARM */
NI_MIO_PLL_PXI10_CLOCK = 3,
NI_MIO_PLL_RTSI0_CLOCK = 4
};
- static inline unsigned NI_MIO_PLL_RTSI_CLOCK(unsigned rtsi_channel) {
+ static inline unsigned NI_MIO_PLL_RTSI_CLOCK(unsigned rtsi_channel)
+ {
return NI_MIO_PLL_RTSI0_CLOCK + rtsi_channel;
}
@@ -716,10 +748,11 @@ INSN_CONFIG_ARM */
NI_RTSI_OUTPUT_G_GATE0 = 6,
NI_RTSI_OUTPUT_RGOUT0 = 7,
NI_RTSI_OUTPUT_RTSI_BRD_0 = 8,
- NI_RTSI_OUTPUT_RTSI_OSC = 12 /* pre-m-series always have RTSI clock
- on line 7 */
+ NI_RTSI_OUTPUT_RTSI_OSC = 12 /* pre-m-series always have RTSI
+ * clock on line 7 */
};
- static inline unsigned NI_RTSI_OUTPUT_RTSI_BRD(unsigned n) {
+ static inline unsigned NI_RTSI_OUTPUT_RTSI_BRD(unsigned n)
+ {
return NI_RTSI_OUTPUT_RTSI_BRD_0 + n;
}
@@ -754,7 +787,8 @@ INSN_CONFIG_ARM */
NI_PFI_OUTPUT_CDI_SAMPLE = 29,
NI_PFI_OUTPUT_CDO_UPDATE = 30
};
- static inline unsigned NI_PFI_OUTPUT_RTSI(unsigned rtsi_channel) {
+ static inline unsigned NI_PFI_OUTPUT_RTSI(unsigned rtsi_channel)
+ {
return NI_PFI_OUTPUT_RTSI0 + rtsi_channel;
}
@@ -772,10 +806,12 @@ INSN_CONFIG_ARM */
/* NI External Trigger lines. These values are not arbitrary, but are related
* to the bits required to program the board (offset by 1 for historical
* reasons). */
- static inline unsigned NI_EXT_PFI(unsigned pfi_channel) {
+ static inline unsigned NI_EXT_PFI(unsigned pfi_channel)
+ {
return NI_USUAL_PFI_SELECT(pfi_channel) - 1;
}
- static inline unsigned NI_EXT_RTSI(unsigned rtsi_channel) {
+ static inline unsigned NI_EXT_RTSI(unsigned rtsi_channel)
+ {
return NI_USUAL_RTSI_SELECT(rtsi_channel) - 1;
}
@@ -801,21 +837,25 @@ INSN_CONFIG_ARM */
NI_CDIO_SCAN_BEGIN_SRC_FREQ_OUT = 32,
NI_CDIO_SCAN_BEGIN_SRC_DIO_CHANGE_DETECT_IRQ = 33
};
- static inline unsigned NI_CDIO_SCAN_BEGIN_SRC_PFI(unsigned pfi_channel) {
+ static inline unsigned NI_CDIO_SCAN_BEGIN_SRC_PFI(unsigned pfi_channel)
+ {
return NI_USUAL_PFI_SELECT(pfi_channel);
}
- static inline unsigned NI_CDIO_SCAN_BEGIN_SRC_RTSI(unsigned
- rtsi_channel) {
+ static inline unsigned
+ NI_CDIO_SCAN_BEGIN_SRC_RTSI(unsigned rtsi_channel)
+ {
return NI_USUAL_RTSI_SELECT(rtsi_channel);
}
/* scan_begin_src for scan_begin_arg==TRIG_EXT with analog output command on NI
* boards. These scan begin sources can also be bitwise-or'd with CR_INVERT to
* change polarity. */
- static inline unsigned NI_AO_SCAN_BEGIN_SRC_PFI(unsigned pfi_channel) {
+ static inline unsigned NI_AO_SCAN_BEGIN_SRC_PFI(unsigned pfi_channel)
+ {
return NI_USUAL_PFI_SELECT(pfi_channel);
}
- static inline unsigned NI_AO_SCAN_BEGIN_SRC_RTSI(unsigned rtsi_channel) {
+ static inline unsigned NI_AO_SCAN_BEGIN_SRC_RTSI(unsigned rtsi_channel)
+ {
return NI_USUAL_RTSI_SELECT(rtsi_channel);
}
diff --git a/drivers/staging/comedi/comedi_compat32.c b/drivers/staging/comedi/comedi_compat32.c
index 581aa5fee2e..41a7a62ba49 100644
--- a/drivers/staging/comedi/comedi_compat32.c
+++ b/drivers/staging/comedi/comedi_compat32.c
@@ -25,9 +25,8 @@
*/
#define __NO_VERSION__
-#include "comedi.h"
#include <linux/uaccess.h>
-
+#include "comedi.h"
#include "comedi_compat32.h"
#ifdef CONFIG_COMPAT
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index aca96747e5e..aced00e5cd1 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -49,7 +49,7 @@
#include <linux/io.h>
#include <linux/uaccess.h>
-/* #include "kvmem.h" */
+#include "internal.h"
MODULE_AUTHOR("http://www.comedi.org");
MODULE_DESCRIPTION("Comedi core module");
@@ -57,13 +57,14 @@ MODULE_LICENSE("GPL");
#ifdef CONFIG_COMEDI_DEBUG
int comedi_debug;
+EXPORT_SYMBOL(comedi_debug);
module_param(comedi_debug, int, 0644);
#endif
int comedi_autoconfig = 1;
module_param(comedi_autoconfig, bool, 0444);
-int comedi_num_legacy_minors;
+static int comedi_num_legacy_minors;
module_param(comedi_num_legacy_minors, int, 0444);
static DEFINE_SPINLOCK(comedi_file_info_table_lock);
@@ -71,25 +72,32 @@ static struct comedi_device_file_info
*comedi_file_info_table[COMEDI_NUM_MINORS];
static int do_devconfig_ioctl(struct comedi_device *dev,
- struct comedi_devconfig *arg);
-static int do_bufconfig_ioctl(struct comedi_device *dev, void *arg);
+ struct comedi_devconfig __user *arg);
+static int do_bufconfig_ioctl(struct comedi_device *dev,
+ struct comedi_bufconfig __user *arg);
static int do_devinfo_ioctl(struct comedi_device *dev,
- struct comedi_devinfo *arg, struct file *file);
+ struct comedi_devinfo __user *arg,
+ struct file *file);
static int do_subdinfo_ioctl(struct comedi_device *dev,
- struct comedi_subdinfo *arg, void *file);
+ struct comedi_subdinfo __user *arg, void *file);
static int do_chaninfo_ioctl(struct comedi_device *dev,
- struct comedi_chaninfo *arg);
-static int do_bufinfo_ioctl(struct comedi_device *dev, void *arg);
-static int do_cmd_ioctl(struct comedi_device *dev, void *arg, void *file);
+ struct comedi_chaninfo __user *arg);
+static int do_bufinfo_ioctl(struct comedi_device *dev,
+ struct comedi_bufinfo __user *arg);
+static int do_cmd_ioctl(struct comedi_device *dev,
+ struct comedi_cmd __user *arg, void *file);
static int do_lock_ioctl(struct comedi_device *dev, unsigned int arg,
void *file);
static int do_unlock_ioctl(struct comedi_device *dev, unsigned int arg,
void *file);
static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg,
void *file);
-static int do_cmdtest_ioctl(struct comedi_device *dev, void *arg, void *file);
-static int do_insnlist_ioctl(struct comedi_device *dev, void *arg, void *file);
-static int do_insn_ioctl(struct comedi_device *dev, void *arg, void *file);
+static int do_cmdtest_ioctl(struct comedi_device *dev,
+ struct comedi_cmd __user *arg, void *file);
+static int do_insnlist_ioctl(struct comedi_device *dev,
+ struct comedi_insnlist __user *arg, void *file);
+static int do_insn_ioctl(struct comedi_device *dev,
+ struct comedi_insn __user *arg, void *file);
static int do_poll_ioctl(struct comedi_device *dev, unsigned int subd,
void *file);
@@ -128,7 +136,8 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
/* Device config is special, because it must work on
* an unconfigured device. */
if (cmd == COMEDI_DEVCONFIG) {
- rc = do_devconfig_ioctl(dev, (void *)arg);
+ rc = do_devconfig_ioctl(dev,
+ (struct comedi_devconfig __user *)arg);
goto done;
}
@@ -140,22 +149,27 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
switch (cmd) {
case COMEDI_BUFCONFIG:
- rc = do_bufconfig_ioctl(dev, (void *)arg);
+ rc = do_bufconfig_ioctl(dev,
+ (struct comedi_bufconfig __user *)arg);
break;
case COMEDI_DEVINFO:
- rc = do_devinfo_ioctl(dev, (void *)arg, file);
+ rc = do_devinfo_ioctl(dev, (struct comedi_devinfo __user *)arg,
+ file);
break;
case COMEDI_SUBDINFO:
- rc = do_subdinfo_ioctl(dev, (void *)arg, file);
+ rc = do_subdinfo_ioctl(dev,
+ (struct comedi_subdinfo __user *)arg,
+ file);
break;
case COMEDI_CHANINFO:
- rc = do_chaninfo_ioctl(dev, (void *)arg);
+ rc = do_chaninfo_ioctl(dev, (void __user *)arg);
break;
case COMEDI_RANGEINFO:
- rc = do_rangeinfo_ioctl(dev, (void *)arg);
+ rc = do_rangeinfo_ioctl(dev, (void __user *)arg);
break;
case COMEDI_BUFINFO:
- rc = do_bufinfo_ioctl(dev, (void *)arg);
+ rc = do_bufinfo_ioctl(dev,
+ (struct comedi_bufinfo __user *)arg);
break;
case COMEDI_LOCK:
rc = do_lock_ioctl(dev, arg, file);
@@ -167,16 +181,20 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
rc = do_cancel_ioctl(dev, arg, file);
break;
case COMEDI_CMD:
- rc = do_cmd_ioctl(dev, (void *)arg, file);
+ rc = do_cmd_ioctl(dev, (struct comedi_cmd __user *)arg, file);
break;
case COMEDI_CMDTEST:
- rc = do_cmdtest_ioctl(dev, (void *)arg, file);
+ rc = do_cmdtest_ioctl(dev, (struct comedi_cmd __user *)arg,
+ file);
break;
case COMEDI_INSNLIST:
- rc = do_insnlist_ioctl(dev, (void *)arg, file);
+ rc = do_insnlist_ioctl(dev,
+ (struct comedi_insnlist __user *)arg,
+ file);
break;
case COMEDI_INSN:
- rc = do_insn_ioctl(dev, (void *)arg, file);
+ rc = do_insn_ioctl(dev, (struct comedi_insn __user *)arg,
+ file);
break;
case COMEDI_POLL:
rc = do_poll_ioctl(dev, arg, file);
@@ -205,7 +223,7 @@ done:
none
*/
static int do_devconfig_ioctl(struct comedi_device *dev,
- struct comedi_devconfig *arg)
+ struct comedi_devconfig __user *arg)
{
struct comedi_devconfig it;
int ret;
@@ -285,7 +303,8 @@ static int do_devconfig_ioctl(struct comedi_device *dev,
modified bufconfig at arg
*/
-static int do_bufconfig_ioctl(struct comedi_device *dev, void *arg)
+static int do_bufconfig_ioctl(struct comedi_device *dev,
+ struct comedi_bufconfig __user *arg)
{
struct comedi_bufconfig bc;
struct comedi_async *async;
@@ -346,7 +365,8 @@ copyback:
*/
static int do_devinfo_ioctl(struct comedi_device *dev,
- struct comedi_devinfo *arg, struct file *file)
+ struct comedi_devinfo __user *arg,
+ struct file *file)
{
struct comedi_devinfo devinfo;
const unsigned minor = iminor(file->f_dentry->d_inode);
@@ -396,7 +416,7 @@ static int do_devinfo_ioctl(struct comedi_device *dev,
*/
static int do_subdinfo_ioctl(struct comedi_device *dev,
- struct comedi_subdinfo *arg, void *file)
+ struct comedi_subdinfo __user *arg, void *file)
{
int ret, i;
struct comedi_subdinfo *tmp, *us;
@@ -478,7 +498,7 @@ static int do_subdinfo_ioctl(struct comedi_device *dev,
*/
static int do_chaninfo_ioctl(struct comedi_device *dev,
- struct comedi_chaninfo *arg)
+ struct comedi_chaninfo __user *arg)
{
struct comedi_subdevice *s;
struct comedi_chaninfo it;
@@ -542,7 +562,8 @@ static int do_chaninfo_ioctl(struct comedi_device *dev,
modified bufinfo at arg
*/
-static int do_bufinfo_ioctl(struct comedi_device *dev, void *arg)
+static int do_bufinfo_ioctl(struct comedi_device *dev,
+ struct comedi_bufinfo __user *arg)
{
struct comedi_bufinfo bi;
struct comedi_subdevice *s;
@@ -598,23 +619,24 @@ copyback:
static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn,
unsigned int *data, void *file);
/*
- * COMEDI_INSNLIST
- * synchronous instructions
+ * COMEDI_INSNLIST
+ * synchronous instructions
*
- * arg:
- * pointer to sync cmd structure
+ * arg:
+ * pointer to sync cmd structure
*
- * reads:
- * sync cmd struct at arg
- * instruction list
- * data (for writes)
+ * reads:
+ * sync cmd struct at arg
+ * instruction list
+ * data (for writes)
*
- * writes:
- * data (for reads)
+ * writes:
+ * data (for reads)
*/
/* arbitrary limits */
#define MAX_SAMPLES 256
-static int do_insnlist_ioctl(struct comedi_device *dev, void *arg, void *file)
+static int do_insnlist_ioctl(struct comedi_device *dev,
+ struct comedi_insnlist __user *arg, void *file)
{
struct comedi_insnlist insnlist;
struct comedi_insn *insns = NULL;
@@ -736,7 +758,8 @@ static int check_insn_config_length(struct comedi_insn *insn,
/* by default we allow the insn since we don't have checks for
* all possible cases yet */
default:
- printk("comedi: no check for data length of config insn id "
+ printk(KERN_WARNING
+ "comedi: no check for data length of config insn id "
"%i is implemented.\n"
" Add a check to %s in %s.\n"
" Assuming n=%i is correct.\n", data[0], __func__,
@@ -837,7 +860,7 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn,
goto out;
}
- ret = check_chanlist(s, 1, &insn->chanspec);
+ ret = comedi_check_chanlist(s, 1, &insn->chanspec);
if (ret < 0) {
ret = -EINVAL;
DPRINTK("bad chanspec\n");
@@ -894,20 +917,21 @@ out:
}
/*
- * COMEDI_INSN
- * synchronous instructions
+ * COMEDI_INSN
+ * synchronous instructions
*
- * arg:
- * pointer to insn
+ * arg:
+ * pointer to insn
*
- * reads:
- * struct comedi_insn struct at arg
- * data (for writes)
+ * reads:
+ * struct comedi_insn struct at arg
+ * data (for writes)
*
- * writes:
- * data (for reads)
+ * writes:
+ * data (for reads)
*/
-static int do_insn_ioctl(struct comedi_device *dev, void *arg, void *file)
+static int do_insn_ioctl(struct comedi_device *dev,
+ struct comedi_insn __user *arg, void *file)
{
struct comedi_insn insn;
unsigned int *data = NULL;
@@ -928,8 +952,9 @@ static int do_insn_ioctl(struct comedi_device *dev, void *arg, void *file)
if (insn.n > MAX_SAMPLES)
insn.n = MAX_SAMPLES;
if (insn.insn & INSN_MASK_WRITE) {
- if (copy_from_user
- (data, insn.data, insn.n * sizeof(unsigned int))) {
+ if (copy_from_user(data,
+ insn.data,
+ insn.n * sizeof(unsigned int))) {
ret = -EFAULT;
goto error;
}
@@ -938,8 +963,9 @@ static int do_insn_ioctl(struct comedi_device *dev, void *arg, void *file)
if (ret < 0)
goto error;
if (insn.insn & INSN_MASK_READ) {
- if (copy_to_user
- (insn.data, data, insn.n * sizeof(unsigned int))) {
+ if (copy_to_user(insn.data,
+ data,
+ insn.n * sizeof(unsigned int))) {
ret = -EFAULT;
goto error;
}
@@ -952,30 +978,27 @@ error:
return ret;
}
-/*
- COMEDI_CMD
- command ioctl
-
- arg:
- pointer to cmd structure
-
- reads:
- cmd structure at arg
- channel/range list
+static void comedi_set_subdevice_runflags(struct comedi_subdevice *s,
+ unsigned mask, unsigned bits)
+{
+ unsigned long flags;
- writes:
- modified cmd structure at arg
+ spin_lock_irqsave(&s->spin_lock, flags);
+ s->runflags &= ~mask;
+ s->runflags |= (bits & mask);
+ spin_unlock_irqrestore(&s->spin_lock, flags);
+}
-*/
-static int do_cmd_ioctl(struct comedi_device *dev, void *arg, void *file)
+static int do_cmd_ioctl(struct comedi_device *dev,
+ struct comedi_cmd __user *cmd, void *file)
{
struct comedi_cmd user_cmd;
struct comedi_subdevice *s;
struct comedi_async *async;
int ret = 0;
- unsigned int *chanlist_saver = NULL;
+ unsigned int __user *chanlist_saver = NULL;
- if (copy_from_user(&user_cmd, arg, sizeof(struct comedi_cmd))) {
+ if (copy_from_user(&user_cmd, cmd, sizeof(struct comedi_cmd))) {
DPRINTK("bad cmd address\n");
return -EFAULT;
}
@@ -1050,7 +1073,9 @@ static int do_cmd_ioctl(struct comedi_device *dev, void *arg, void *file)
}
/* make sure each element in channel/gain list is valid */
- ret = check_chanlist(s, async->cmd.chanlist_len, async->cmd.chanlist);
+ ret = comedi_check_chanlist(s,
+ async->cmd.chanlist_len,
+ async->cmd.chanlist);
if (ret < 0) {
DPRINTK("bad chanlist\n");
goto cleanup;
@@ -1064,7 +1089,7 @@ static int do_cmd_ioctl(struct comedi_device *dev, void *arg, void *file)
/* restore chanlist pointer before copying back */
user_cmd.chanlist = chanlist_saver;
user_cmd.data = NULL;
- if (copy_to_user(arg, &user_cmd, sizeof(struct comedi_cmd))) {
+ if (copy_to_user(cmd, &user_cmd, sizeof(struct comedi_cmd))) {
DPRINTK("fault writing cmd\n");
ret = -EFAULT;
goto cleanup;
@@ -1114,13 +1139,14 @@ cleanup:
modified cmd structure at arg
*/
-static int do_cmdtest_ioctl(struct comedi_device *dev, void *arg, void *file)
+static int do_cmdtest_ioctl(struct comedi_device *dev,
+ struct comedi_cmd __user *arg, void *file)
{
struct comedi_cmd user_cmd;
struct comedi_subdevice *s;
int ret = 0;
unsigned int *chanlist = NULL;
- unsigned int *chanlist_saver = NULL;
+ unsigned int __user *chanlist_saver = NULL;
if (copy_from_user(&user_cmd, arg, sizeof(struct comedi_cmd))) {
DPRINTK("bad cmd address\n");
@@ -1172,7 +1198,7 @@ static int do_cmdtest_ioctl(struct comedi_device *dev, void *arg, void *file)
}
/* make sure each element in channel/gain list is valid */
- ret = check_chanlist(s, user_cmd.chanlist_len, chanlist);
+ ret = comedi_check_chanlist(s, user_cmd.chanlist_len, chanlist);
if (ret < 0) {
DPRINTK("bad chanlist\n");
goto cleanup;
@@ -1371,7 +1397,7 @@ static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
return ret;
}
-void comedi_unmap(struct vm_area_struct *area)
+static void comedi_unmap(struct vm_area_struct *area)
{
struct comedi_async *async;
struct comedi_device *dev;
@@ -1509,8 +1535,8 @@ static unsigned int comedi_poll(struct file *file, poll_table * wait)
return mask;
}
-static ssize_t comedi_write(struct file *file, const char *buf, size_t nbytes,
- loff_t *offset)
+static ssize_t comedi_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *offset)
{
struct comedi_subdevice *s;
struct comedi_async *async;
@@ -1611,7 +1637,7 @@ done:
return count ? count : retval;
}
-static ssize_t comedi_read(struct file *file, char *buf, size_t nbytes,
+static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
loff_t *offset)
{
struct comedi_subdevice *s;
@@ -1925,7 +1951,7 @@ static int __init comedi_init(void)
}
comedi_class = class_create(THIS_MODULE, "comedi");
if (IS_ERR(comedi_class)) {
- printk("comedi: failed to create class");
+ printk(KERN_ERR "comedi: failed to create class");
cdev_del(&comedi_cdev);
unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
COMEDI_NUM_MINORS);
@@ -1971,8 +1997,10 @@ module_exit(comedi_cleanup);
void comedi_error(const struct comedi_device *dev, const char *s)
{
- printk("comedi%d: %s: %s\n", dev->minor, dev->driver->driver_name, s);
+ printk(KERN_ERR "comedi%d: %s: %s\n", dev->minor,
+ dev->driver->driver_name, s);
}
+EXPORT_SYMBOL(comedi_error);
void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s)
{
@@ -2015,17 +2043,7 @@ void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s)
}
s->async->events = 0;
}
-
-void comedi_set_subdevice_runflags(struct comedi_subdevice *s, unsigned mask,
- unsigned bits)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&s->spin_lock, flags);
- s->runflags &= ~mask;
- s->runflags |= (bits & mask);
- spin_unlock_irqrestore(&s->spin_lock, flags);
-}
+EXPORT_SYMBOL(comedi_event);
unsigned comedi_get_subdevice_runflags(struct comedi_subdevice *s)
{
@@ -2037,6 +2055,7 @@ unsigned comedi_get_subdevice_runflags(struct comedi_subdevice *s)
spin_unlock_irqrestore(&s->spin_lock, flags);
return runflags;
}
+EXPORT_SYMBOL(comedi_get_subdevice_runflags);
static int is_device_busy(struct comedi_device *dev)
{
@@ -2057,7 +2076,7 @@ static int is_device_busy(struct comedi_device *dev)
return 0;
}
-void comedi_device_init(struct comedi_device *dev)
+static void comedi_device_init(struct comedi_device *dev)
{
memset(dev, 0, sizeof(struct comedi_device));
spin_lock_init(&dev->spinlock);
@@ -2065,7 +2084,7 @@ void comedi_device_init(struct comedi_device *dev)
dev->minor = -1;
}
-void comedi_device_cleanup(struct comedi_device *dev)
+static void comedi_device_cleanup(struct comedi_device *dev)
{
if (dev == NULL)
return;
@@ -2105,7 +2124,8 @@ int comedi_alloc_board_minor(struct device *hardware_device)
kfree(info->device);
kfree(info);
printk(KERN_ERR
- "comedi: error: ran out of minor numbers for board device files.\n");
+ "comedi: error: "
+ "ran out of minor numbers for board device files.\n");
return -EBUSY;
}
info->device->minor = i;
@@ -2118,7 +2138,8 @@ int comedi_alloc_board_minor(struct device *hardware_device)
retval = device_create_file(csdev, &dev_attr_max_read_buffer_kb);
if (retval) {
printk(KERN_ERR
- "comedi: failed to create sysfs attribute file \"%s\".\n",
+ "comedi: "
+ "failed to create sysfs attribute file \"%s\".\n",
dev_attr_max_read_buffer_kb.attr.name);
comedi_free_board_minor(i);
return retval;
@@ -2126,7 +2147,8 @@ int comedi_alloc_board_minor(struct device *hardware_device)
retval = device_create_file(csdev, &dev_attr_read_buffer_kb);
if (retval) {
printk(KERN_ERR
- "comedi: failed to create sysfs attribute file \"%s\".\n",
+ "comedi: "
+ "failed to create sysfs attribute file \"%s\".\n",
dev_attr_read_buffer_kb.attr.name);
comedi_free_board_minor(i);
return retval;
@@ -2134,7 +2156,8 @@ int comedi_alloc_board_minor(struct device *hardware_device)
retval = device_create_file(csdev, &dev_attr_max_write_buffer_kb);
if (retval) {
printk(KERN_ERR
- "comedi: failed to create sysfs attribute file \"%s\".\n",
+ "comedi: "
+ "failed to create sysfs attribute file \"%s\".\n",
dev_attr_max_write_buffer_kb.attr.name);
comedi_free_board_minor(i);
return retval;
@@ -2142,7 +2165,8 @@ int comedi_alloc_board_minor(struct device *hardware_device)
retval = device_create_file(csdev, &dev_attr_write_buffer_kb);
if (retval) {
printk(KERN_ERR
- "comedi: failed to create sysfs attribute file \"%s\".\n",
+ "comedi: "
+ "failed to create sysfs attribute file \"%s\".\n",
dev_attr_write_buffer_kb.attr.name);
comedi_free_board_minor(i);
return retval;
@@ -2201,7 +2225,8 @@ int comedi_alloc_subdevice_minor(struct comedi_device *dev,
if (i == COMEDI_NUM_MINORS) {
kfree(info);
printk(KERN_ERR
- "comedi: error: ran out of minor numbers for board device files.\n");
+ "comedi: error: "
+ "ran out of minor numbers for board device files.\n");
return -EBUSY;
}
s->minor = i;
@@ -2215,7 +2240,8 @@ int comedi_alloc_subdevice_minor(struct comedi_device *dev,
retval = device_create_file(csdev, &dev_attr_max_read_buffer_kb);
if (retval) {
printk(KERN_ERR
- "comedi: failed to create sysfs attribute file \"%s\".\n",
+ "comedi: "
+ "failed to create sysfs attribute file \"%s\".\n",
dev_attr_max_read_buffer_kb.attr.name);
comedi_free_subdevice_minor(s);
return retval;
@@ -2223,7 +2249,8 @@ int comedi_alloc_subdevice_minor(struct comedi_device *dev,
retval = device_create_file(csdev, &dev_attr_read_buffer_kb);
if (retval) {
printk(KERN_ERR
- "comedi: failed to create sysfs attribute file \"%s\".\n",
+ "comedi: "
+ "failed to create sysfs attribute file \"%s\".\n",
dev_attr_read_buffer_kb.attr.name);
comedi_free_subdevice_minor(s);
return retval;
@@ -2231,7 +2258,8 @@ int comedi_alloc_subdevice_minor(struct comedi_device *dev,
retval = device_create_file(csdev, &dev_attr_max_write_buffer_kb);
if (retval) {
printk(KERN_ERR
- "comedi: failed to create sysfs attribute file \"%s\".\n",
+ "comedi: "
+ "failed to create sysfs attribute file \"%s\".\n",
dev_attr_max_write_buffer_kb.attr.name);
comedi_free_subdevice_minor(s);
return retval;
@@ -2239,7 +2267,8 @@ int comedi_alloc_subdevice_minor(struct comedi_device *dev,
retval = device_create_file(csdev, &dev_attr_write_buffer_kb);
if (retval) {
printk(KERN_ERR
- "comedi: failed to create sysfs attribute file \"%s\".\n",
+ "comedi: "
+ "failed to create sysfs attribute file \"%s\".\n",
dev_attr_write_buffer_kb.attr.name);
comedi_free_subdevice_minor(s);
return retval;
@@ -2283,6 +2312,7 @@ struct comedi_device_file_info *comedi_get_device_file_info(unsigned minor)
spin_unlock_irqrestore(&comedi_file_info_table_lock, flags);
return info;
}
+EXPORT_SYMBOL_GPL(comedi_get_device_file_info);
static int resize_async_buffer(struct comedi_device *dev,
struct comedi_subdevice *s,
diff --git a/drivers/staging/comedi/comedi_fops.h b/drivers/staging/comedi/comedi_fops.h
index cb503c88c7f..da4b4f5553f 100644
--- a/drivers/staging/comedi/comedi_fops.h
+++ b/drivers/staging/comedi/comedi_fops.h
@@ -5,5 +5,6 @@
extern struct class *comedi_class;
extern const struct file_operations comedi_fops;
extern int comedi_autoconfig;
+extern struct comedi_driver *comedi_drivers;
#endif /* _COMEDI_FOPS_H */
diff --git a/drivers/staging/comedi/comedi_ksyms.c b/drivers/staging/comedi/comedi_ksyms.c
deleted file mode 100644
index 87803e69a14..00000000000
--- a/drivers/staging/comedi/comedi_ksyms.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- module/exp_ioctl.c
- exported comedi functions
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*/
-
-#define __NO_VERSION__
-
-#include "comedidev.h"
-
-/* for drivers */
-EXPORT_SYMBOL(comedi_driver_register);
-EXPORT_SYMBOL(comedi_driver_unregister);
-/* EXPORT_SYMBOL(comedi_bufcheck); */
-/* EXPORT_SYMBOL(comedi_done); */
-/* EXPORT_SYMBOL(comedi_error_done); */
-EXPORT_SYMBOL(comedi_error);
-/* EXPORT_SYMBOL(comedi_eobuf); */
-/* EXPORT_SYMBOL(comedi_eos); */
-EXPORT_SYMBOL(comedi_event);
-EXPORT_SYMBOL(comedi_get_subdevice_runflags);
-EXPORT_SYMBOL(comedi_set_subdevice_runflags);
-EXPORT_SYMBOL(range_bipolar10);
-EXPORT_SYMBOL(range_bipolar5);
-EXPORT_SYMBOL(range_bipolar2_5);
-EXPORT_SYMBOL(range_unipolar10);
-EXPORT_SYMBOL(range_unipolar5);
-EXPORT_SYMBOL(range_unknown);
-#ifdef CONFIG_COMEDI_DEBUG
-EXPORT_SYMBOL(comedi_debug);
-#endif
-EXPORT_SYMBOL_GPL(comedi_alloc_board_minor);
-EXPORT_SYMBOL_GPL(comedi_free_board_minor);
-EXPORT_SYMBOL_GPL(comedi_pci_auto_config);
-EXPORT_SYMBOL_GPL(comedi_pci_auto_unconfig);
-EXPORT_SYMBOL_GPL(comedi_usb_auto_config);
-EXPORT_SYMBOL_GPL(comedi_usb_auto_unconfig);
-
-/* for kcomedilib */
-EXPORT_SYMBOL(check_chanlist);
-EXPORT_SYMBOL_GPL(comedi_get_device_file_info);
-
-EXPORT_SYMBOL(comedi_buf_put);
-EXPORT_SYMBOL(comedi_buf_get);
-EXPORT_SYMBOL(comedi_buf_read_n_available);
-EXPORT_SYMBOL(comedi_buf_write_free);
-EXPORT_SYMBOL(comedi_buf_write_alloc);
-EXPORT_SYMBOL(comedi_buf_read_free);
-EXPORT_SYMBOL(comedi_buf_read_alloc);
-EXPORT_SYMBOL(comedi_buf_memcpy_to);
-EXPORT_SYMBOL(comedi_buf_memcpy_from);
-EXPORT_SYMBOL(comedi_reset_async_buf);
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index ebdccfdf220..4eb2b77f56d 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -57,7 +57,7 @@
static int __init x ## _init_module(void) \
{return comedi_driver_register(&(x)); } \
static void __exit x ## _cleanup_module(void) \
- {comedi_driver_unregister(&(x)); } \
+ {comedi_driver_unregister(&(x)); } \
module_init(x ## _init_module); \
module_exit(x ## _cleanup_module);
@@ -109,17 +109,9 @@
COMEDI_MODULE_MACROS \
COMEDI_PCI_INITCLEANUP_NOMODULE(comedi_driver, pci_id_table)
-#define PCI_VENDOR_ID_INOVA 0x104c
-#define PCI_VENDOR_ID_NATINST 0x1093
-#define PCI_VENDOR_ID_DATX 0x1116
-#define PCI_VENDOR_ID_COMPUTERBOARDS 0x1307
-#define PCI_VENDOR_ID_ADVANTECH 0x13fe
-#define PCI_VENDOR_ID_RTD 0x1435
-#define PCI_VENDOR_ID_AMPLICON 0x14dc
#define PCI_VENDOR_ID_ADLINK 0x144a
#define PCI_VENDOR_ID_ICP 0x104c
#define PCI_VENDOR_ID_CONTEC 0x1221
-#define PCI_VENDOR_ID_MEILHAUS 0x1402
#define COMEDI_NUM_MINORS 0x100
#define COMEDI_NUM_BOARD_MINORS 0x30
@@ -132,7 +124,7 @@ struct comedi_subdevice {
struct comedi_device *device;
int type;
int n_chan;
- volatile int subdev_flags;
+ int subdev_flags;
int len_chanlist; /* maximum length of channel/gain list */
void *private;
@@ -359,9 +351,6 @@ void cleanup_polling(void);
void start_polling(struct comedi_device *);
void stop_polling(struct comedi_device *);
-int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
- unsigned long new_size);
-
#ifdef CONFIG_PROC_FS
void comedi_proc_init(void);
void comedi_proc_cleanup(void);
@@ -385,24 +374,17 @@ enum subdevice_runflags {
SRF_RUNNING = 0x08000000
};
-/*
- various internal comedi functions
- */
-
-int do_rangeinfo_ioctl(struct comedi_device *dev, struct comedi_rangeinfo *arg);
-int check_chanlist(struct comedi_subdevice *s, int n, unsigned int *chanlist);
-void comedi_set_subdevice_runflags(struct comedi_subdevice *s, unsigned mask,
- unsigned bits);
+int comedi_check_chanlist(struct comedi_subdevice *s,
+ int n,
+ unsigned int *chanlist);
unsigned comedi_get_subdevice_runflags(struct comedi_subdevice *s);
-int insn_inval(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data);
/* range stuff */
#define RANGE(a, b) {(a)*1e6, (b)*1e6, 0}
#define RANGE_ext(a, b) {(a)*1e6, (b)*1e6, RF_EXTERNAL}
#define RANGE_mA(a, b) {(a)*1e6, (b)*1e6, UNIT_mA}
-#define RANGE_unitless(a, b) {(a)*1e6, (b)*1e6, 0} /* XXX */
+#define RANGE_unitless(a, b) {(a)*1e6, (b)*1e6, 0}
#define BIP_RANGE(a) {-(a)*1e6, (a)*1e6, 0}
#define UNI_RANGE(a) {0, (a)*1e6, 0}
@@ -505,8 +487,6 @@ static inline unsigned comedi_buf_read_n_allocated(struct comedi_async *async)
return async->buf_read_alloc_count - async->buf_read_count;
}
-void comedi_reset_async_buf(struct comedi_async *async);
-
static inline void *comedi_aux_data(int options[], int n)
{
unsigned long address;
@@ -532,8 +512,6 @@ static inline void *comedi_aux_data(int options[], int n)
return (void *)address;
}
-int comedi_alloc_board_minor(struct device *hardware_device);
-void comedi_free_board_minor(unsigned minor);
int comedi_alloc_subdevice_minor(struct comedi_device *dev,
struct comedi_subdevice *s);
void comedi_free_subdevice_minor(struct comedi_subdevice *s);
diff --git a/drivers/staging/comedi/comedilib.h b/drivers/staging/comedi/comedilib.h
index 3918d53b304..ca92c43fdb3 100644
--- a/drivers/staging/comedi/comedilib.h
+++ b/drivers/staging/comedi/comedilib.h
@@ -24,170 +24,14 @@
#ifndef _LINUX_COMEDILIB_H
#define _LINUX_COMEDILIB_H
-#include "comedi.h"
-
-/* Kernel internal stuff. Needed by real-time modules and such. */
-
-#ifndef __KERNEL__
-#error linux/comedilib.h should not be included by non-kernel-space code
-#endif
-
-/* exported functions */
-
-#ifndef KCOMEDILIB_DEPRECATED
-
-/* these functions may not be called at real-time priority */
-
-void *comedi_open(const char *path);
-int comedi_close(void *dev);
-
-/* these functions may be called at any priority, but may fail at
- real-time priority */
-
-int comedi_lock(void *dev, unsigned int subdev);
-int comedi_unlock(void *dev, unsigned int subdev);
-
-/* these functions may be called at any priority, but you must hold
- the lock for the subdevice */
-
-int comedi_loglevel(int loglevel);
-void comedi_perror(const char *s);
-char *comedi_strerror(int errnum);
-int comedi_errno(void);
-int comedi_fileno(void *dev);
-
-int comedi_cancel(void *dev, unsigned int subdev);
-int comedi_register_callback(void *dev, unsigned int subdev,
- unsigned int mask, int (*cb) (unsigned int,
- void *), void *arg);
-
-int comedi_command(void *dev, struct comedi_cmd *cmd);
-int comedi_command_test(void *dev, struct comedi_cmd *cmd);
-int comedi_trigger(void *dev, unsigned int subdev, struct comedi_trig *it);
-int __comedi_trigger(void *dev, unsigned int subdev, struct comedi_trig *it);
-int comedi_data_write(void *dev, unsigned int subdev, unsigned int chan,
- unsigned int range, unsigned int aref, unsigned int data);
-int comedi_data_read(void *dev, unsigned int subdev, unsigned int chan,
- unsigned int range, unsigned int aref, unsigned int *data);
-int comedi_data_read_hint(void *dev, unsigned int subdev,
- unsigned int chan, unsigned int range,
- unsigned int aref);
-int comedi_data_read_delayed(void *dev, unsigned int subdev, unsigned int chan,
- unsigned int range, unsigned int aref,
- unsigned int *data, unsigned int nano_sec);
-int comedi_dio_config(void *dev, unsigned int subdev, unsigned int chan,
- unsigned int io);
-int comedi_dio_read(void *dev, unsigned int subdev, unsigned int chan,
- unsigned int *val);
-int comedi_dio_write(void *dev, unsigned int subdev, unsigned int chan,
- unsigned int val);
-int comedi_dio_bitfield(void *dev, unsigned int subdev, unsigned int mask,
- unsigned int *bits);
-int comedi_get_n_subdevices(void *dev);
-int comedi_get_version_code(void *dev);
-const char *comedi_get_driver_name(void *dev);
-const char *comedi_get_board_name(void *dev);
-int comedi_get_subdevice_type(void *dev, unsigned int subdevice);
-int comedi_find_subdevice_by_type(void *dev, int type, unsigned int subd);
-int comedi_get_n_channels(void *dev, unsigned int subdevice);
-unsigned int comedi_get_maxdata(void *dev, unsigned int subdevice, unsigned
- int chan);
-int comedi_get_n_ranges(void *dev, unsigned int subdevice, unsigned int chan);
-int comedi_do_insn(void *dev, struct comedi_insn *insn);
-int comedi_poll(void *dev, unsigned int subdev);
-
-/* DEPRECATED functions */
-int comedi_get_rangetype(void *dev, unsigned int subdevice, unsigned int chan);
-
-/* ALPHA functions */
-unsigned int comedi_get_subdevice_flags(void *dev, unsigned int subdevice);
-int comedi_get_len_chanlist(void *dev, unsigned int subdevice);
-int comedi_get_krange(void *dev, unsigned int subdevice, unsigned int
- chan, unsigned int range, struct comedi_krange *krange);
-unsigned int comedi_get_buf_head_pos(void *dev, unsigned int subdevice);
-int comedi_set_user_int_count(void *dev, unsigned int subdevice,
- unsigned int buf_user_count);
-int comedi_map(void *dev, unsigned int subdev, void *ptr);
-int comedi_unmap(void *dev, unsigned int subdev);
-int comedi_get_buffer_size(void *dev, unsigned int subdev);
-int comedi_mark_buffer_read(void *dev, unsigned int subdevice,
- unsigned int num_bytes);
-int comedi_mark_buffer_written(void *d, unsigned int subdevice,
- unsigned int num_bytes);
-int comedi_get_buffer_contents(void *dev, unsigned int subdevice);
-int comedi_get_buffer_offset(void *dev, unsigned int subdevice);
-
-#else
-
-/* these functions may not be called at real-time priority */
-
-int comedi_open(unsigned int minor);
-void comedi_close(unsigned int minor);
-
-/* these functions may be called at any priority, but may fail at
- real-time priority */
-
-int comedi_lock(unsigned int minor, unsigned int subdev);
-int comedi_unlock(unsigned int minor, unsigned int subdev);
-
-/* these functions may be called at any priority, but you must hold
- the lock for the subdevice */
-
-int comedi_cancel(unsigned int minor, unsigned int subdev);
-int comedi_register_callback(unsigned int minor, unsigned int subdev,
- unsigned int mask, int (*cb) (unsigned int,
- void *), void *arg);
-
-int comedi_command(unsigned int minor, struct comedi_cmd *cmd);
-int comedi_command_test(unsigned int minor, struct comedi_cmd *cmd);
-int comedi_trigger(unsigned int minor, unsigned int subdev,
- struct comedi_trig *it);
-int __comedi_trigger(unsigned int minor, unsigned int subdev,
- struct comedi_trig *it);
-int comedi_data_write(unsigned int dev, unsigned int subdev, unsigned int chan,
- unsigned int range, unsigned int aref, unsigned int data);
-int comedi_data_read(unsigned int dev, unsigned int subdev, unsigned int chan,
- unsigned int range, unsigned int aref, unsigned int *data);
-int comedi_dio_config(unsigned int dev, unsigned int subdev, unsigned int chan,
- unsigned int io);
-int comedi_dio_read(unsigned int dev, unsigned int subdev, unsigned int chan,
- unsigned int *val);
-int comedi_dio_write(unsigned int dev, unsigned int subdev, unsigned int chan,
- unsigned int val);
-int comedi_dio_bitfield(unsigned int dev, unsigned int subdev,
+struct comedi_device *comedi_open(const char *path);
+int comedi_close(struct comedi_device *dev);
+int comedi_dio_config(struct comedi_device *dev, unsigned int subdev,
+ unsigned int chan, unsigned int io);
+int comedi_dio_bitfield(struct comedi_device *dev, unsigned int subdev,
unsigned int mask, unsigned int *bits);
-int comedi_get_n_subdevices(unsigned int dev);
-int comedi_get_version_code(unsigned int dev);
-char *comedi_get_driver_name(unsigned int dev);
-char *comedi_get_board_name(unsigned int minor);
-int comedi_get_subdevice_type(unsigned int minor, unsigned int subdevice);
-int comedi_find_subdevice_by_type(unsigned int minor, int type,
+int comedi_find_subdevice_by_type(struct comedi_device *dev, int type,
unsigned int subd);
-int comedi_get_n_channels(unsigned int minor, unsigned int subdevice);
-unsigned int comedi_get_maxdata(unsigned int minor, unsigned int subdevice, unsigned
- int chan);
-int comedi_get_n_ranges(unsigned int minor, unsigned int subdevice, unsigned int
- chan);
-int comedi_do_insn(unsigned int minor, struct comedi_insn *insn);
-int comedi_poll(unsigned int minor, unsigned int subdev);
-
-/* DEPRECATED functions */
-int comedi_get_rangetype(unsigned int minor, unsigned int subdevice,
- unsigned int chan);
-
-/* ALPHA functions */
-unsigned int comedi_get_subdevice_flags(unsigned int minor, unsigned int
- subdevice);
-int comedi_get_len_chanlist(unsigned int minor, unsigned int subdevice);
-int comedi_get_krange(unsigned int minor, unsigned int subdevice, unsigned int
- chan, unsigned int range, struct comedi_krange *krange);
-unsigned int comedi_get_buf_head_pos(unsigned int minor, unsigned int
- subdevice);
-int comedi_set_user_int_count(unsigned int minor, unsigned int subdevice,
- unsigned int buf_user_count);
-int comedi_map(unsigned int minor, unsigned int subdev, void **ptr);
-int comedi_unmap(unsigned int minor, unsigned int subdev);
-
-#endif
+int comedi_get_n_channels(struct comedi_device *dev, unsigned int subdevice);
#endif
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 44d6b62c230..4a29ed737e3 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -37,16 +37,16 @@
#include <linux/ioport.h>
#include <linux/mm.h>
#include <linux/slab.h>
-#include "comedidev.h"
-#include "wrapper.h"
#include <linux/highmem.h> /* for SuSE brokenness */
#include <linux/vmalloc.h>
#include <linux/cdev.h>
#include <linux/dma-mapping.h>
-
#include <linux/io.h>
#include <asm/system.h>
+#include "comedidev.h"
+#include "internal.h"
+
static int postconfig(struct comedi_device *dev);
static int insn_rw_emulate_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
@@ -54,16 +54,9 @@ static int insn_rw_emulate_bits(struct comedi_device *dev,
static void *comedi_recognize(struct comedi_driver *driv, const char *name);
static void comedi_report_boards(struct comedi_driver *driv);
static int poll_invalid(struct comedi_device *dev, struct comedi_subdevice *s);
-int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
- unsigned long new_size);
struct comedi_driver *comedi_drivers;
-int comedi_modprobe(int minor)
-{
- return -EINVAL;
-}
-
static void cleanup_device(struct comedi_device *dev)
{
int i;
@@ -84,7 +77,7 @@ static void cleanup_device(struct comedi_device *dev)
}
kfree(dev->private);
dev->private = NULL;
- dev->driver = 0;
+ dev->driver = NULL;
dev->board_name = NULL;
dev->board_ptr = NULL;
dev->iobase = 0;
@@ -102,7 +95,8 @@ static void __comedi_device_detach(struct comedi_device *dev)
if (dev->driver)
dev->driver->detach(dev);
else
- printk("BUG: dev->driver=NULL in comedi_device_detach()\n");
+ printk(KERN_WARNING
+ "BUG: dev->driver=NULL in comedi_device_detach()\n");
cleanup_device(dev);
}
@@ -124,7 +118,7 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
for (driv = comedi_drivers; driv; driv = driv->next) {
if (!try_module_get(driv->module)) {
printk
- ("comedi: failed to increment module count, skipping\n");
+ (KERN_INFO "comedi: failed to increment module count, skipping\n");
continue;
}
if (driv->num_names) {
@@ -139,7 +133,8 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
continue;
}
}
- /* initialize dev->driver here so comedi_error() can be called from attach */
+ /* initialize dev->driver here so
+ * comedi_error() can be called from attach */
dev->driver = driv;
ret = driv->attach(dev, it);
if (ret < 0) {
@@ -154,7 +149,8 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* report valid board names before returning error */
for (driv = comedi_drivers; driv; driv = driv->next) {
if (!try_module_get(driv->module)) {
- printk("comedi: failed to increment module count\n");
+ printk(KERN_INFO
+ "comedi: failed to increment module count\n");
continue;
}
comedi_report_boards(driv);
@@ -172,7 +168,8 @@ attached:
}
if (!dev->board_name) {
- printk("BUG: dev->board_name=<%p>\n", dev->board_name);
+ printk(KERN_WARNING "BUG: dev->board_name=<%p>\n",
+ dev->board_name);
dev->board_name = "BUG";
}
smp_wmb();
@@ -188,6 +185,7 @@ int comedi_driver_register(struct comedi_driver *driver)
return 0;
}
+EXPORT_SYMBOL(comedi_driver_register);
int comedi_driver_unregister(struct comedi_driver *driver)
{
@@ -208,7 +206,7 @@ int comedi_driver_unregister(struct comedi_driver *driver)
if (dev->attached && dev->driver == driver) {
if (dev->use_count)
printk
- ("BUG! detaching device with use_count=%d\n",
+ (KERN_WARNING "BUG! detaching device with use_count=%d\n",
dev->use_count);
comedi_device_detach(dev);
}
@@ -228,6 +226,7 @@ int comedi_driver_unregister(struct comedi_driver *driver)
}
return -EINVAL;
}
+EXPORT_SYMBOL(comedi_driver_unregister);
static int postconfig(struct comedi_device *dev)
{
@@ -253,7 +252,8 @@ static int postconfig(struct comedi_device *dev)
async =
kzalloc(sizeof(struct comedi_async), GFP_KERNEL);
if (async == NULL) {
- printk("failed to allocate async struct\n");
+ printk(KERN_INFO
+ "failed to allocate async struct\n");
return -ENOMEM;
}
init_waitqueue_head(&async->wait_head);
@@ -268,7 +268,7 @@ static int postconfig(struct comedi_device *dev)
async->prealloc_buf = NULL;
async->prealloc_bufsz = 0;
if (comedi_buf_alloc(dev, s, DEFAULT_BUF_SIZE) < 0) {
- printk("Buffer allocation failed\n");
+ printk(KERN_INFO "Buffer allocation failed\n");
return -ENOMEM;
}
if (s->buf_change) {
@@ -303,8 +303,9 @@ static int postconfig(struct comedi_device *dev)
return 0;
}
-/* generic recognize function for drivers that register their supported board names */
-void *comedi_recognize(struct comedi_driver *driv, const char *name)
+/* generic recognize function for drivers
+ * that register their supported board names */
+static void *comedi_recognize(struct comedi_driver *driv, const char *name)
{
unsigned i;
const char *const *name_ptr = driv->board_name;
@@ -319,22 +320,22 @@ void *comedi_recognize(struct comedi_driver *driv, const char *name)
return NULL;
}
-void comedi_report_boards(struct comedi_driver *driv)
+static void comedi_report_boards(struct comedi_driver *driv)
{
unsigned int i;
const char *const *name_ptr;
- printk("comedi: valid board names for %s driver are:\n",
+ printk(KERN_INFO "comedi: valid board names for %s driver are:\n",
driv->driver_name);
name_ptr = driv->board_name;
for (i = 0; i < driv->num_names; i++) {
- printk(" %s\n", *name_ptr);
+ printk(KERN_INFO " %s\n", *name_ptr);
name_ptr = (const char **)((char *)name_ptr + driv->offset);
}
if (driv->num_names == 0)
- printk(" %s\n", driv->driver_name);
+ printk(KERN_INFO " %s\n", driv->driver_name);
}
static int poll_invalid(struct comedi_device *dev, struct comedi_subdevice *s)
@@ -371,8 +372,9 @@ static int insn_rw_emulate_bits(struct comedi_device *dev,
if (insn->insn == INSN_WRITE) {
if (!(s->subdev_flags & SDF_WRITABLE))
return -EINVAL;
- new_data[0] = 1 << (chan - base_bitfield_channel); /* mask */
- new_data[1] = data[0] ? (1 << (chan - base_bitfield_channel)) : 0; /* bits */
+ new_data[0] = 1 << (chan - base_bitfield_channel); /* mask */
+ new_data[1] = data[0] ? (1 << (chan - base_bitfield_channel))
+ : 0; /* bits */
}
ret = s->insn_bits(dev, s, &new_insn, new_data);
@@ -440,9 +442,7 @@ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned i;
for (i = 0; i < async->n_buf_pages; ++i) {
if (async->buf_page_list[i].virt_addr) {
- mem_map_unreserve(virt_to_page
- (async->buf_page_list[i].
- virt_addr));
+ clear_bit(PG_reserved, &(virt_to_page(async->buf_page_list[i].virt_addr)->flags));
if (s->async_dma_dir != DMA_NONE) {
dma_free_coherent(dev->hw_dev,
PAGE_SIZE,
@@ -495,12 +495,9 @@ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
if (async->buf_page_list[i].virt_addr == NULL)
break;
- mem_map_reserve(virt_to_page
- (async->buf_page_list[i].
- virt_addr));
- pages[i] =
- virt_to_page(async->
- buf_page_list[i].virt_addr);
+ set_bit(PG_reserved,
+ &(virt_to_page(async->buf_page_list[i].virt_addr)->flags));
+ pages[i] = virt_to_page(async->buf_page_list[i].virt_addr);
}
}
if (i == n_pages) {
@@ -517,9 +514,7 @@ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
NULL) {
break;
}
- mem_map_unreserve(virt_to_page
- (async->buf_page_list
- [i].virt_addr));
+ clear_bit(PG_reserved, &(virt_to_page(async->buf_page_list[i].virt_addr)->flags));
if (s->async_dma_dir != DMA_NONE) {
dma_free_coherent(dev->hw_dev,
PAGE_SIZE,
@@ -549,8 +544,8 @@ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
/* munging is applied to data by core as it passes between user
* and kernel space */
-unsigned int comedi_buf_munge(struct comedi_async *async,
- unsigned int num_bytes)
+static unsigned int comedi_buf_munge(struct comedi_async *async,
+ unsigned int num_bytes)
{
struct comedi_subdevice *s = async->subdevice;
unsigned int count = 0;
@@ -568,7 +563,8 @@ unsigned int comedi_buf_munge(struct comedi_async *async,
block_size = num_bytes - count;
if (block_size < 0) {
- printk("%s: %s: bug! block_size is negative\n",
+ printk(KERN_WARNING
+ "%s: %s: bug! block_size is negative\n",
__FILE__, __func__);
break;
}
@@ -579,7 +575,8 @@ unsigned int comedi_buf_munge(struct comedi_async *async,
s->munge(s->device, s, async->prealloc_buf + async->munge_ptr,
block_size, async->munge_chan);
- smp_wmb(); /* barrier insures data is munged in buffer before munge_count is incremented */
+ smp_wmb(); /* barrier insures data is munged in buffer
+ * before munge_count is incremented */
async->munge_chan += block_size / num_sample_bytes;
async->munge_chan %= async->cmd.chanlist_len;
@@ -626,6 +623,7 @@ unsigned int comedi_buf_write_alloc(struct comedi_async *async,
smp_mb();
return nbytes;
}
+EXPORT_SYMBOL(comedi_buf_write_alloc);
/* allocates nothing unless it can completely fulfill the request */
unsigned int comedi_buf_write_alloc_strict(struct comedi_async *async,
@@ -649,7 +647,7 @@ unsigned comedi_buf_write_free(struct comedi_async *async, unsigned int nbytes)
if ((int)(async->buf_write_count + nbytes -
async->buf_write_alloc_count) > 0) {
printk
- ("comedi: attempted to write-free more bytes than have been write-allocated.\n");
+ (KERN_INFO "comedi: attempted to write-free more bytes than have been write-allocated.\n");
nbytes = async->buf_write_alloc_count - async->buf_write_count;
}
async->buf_write_count += nbytes;
@@ -660,6 +658,7 @@ unsigned comedi_buf_write_free(struct comedi_async *async, unsigned int nbytes)
return nbytes;
}
+EXPORT_SYMBOL(comedi_buf_write_free);
/* allocates a chunk for the reader from filled (and munged) buffer space */
unsigned comedi_buf_read_alloc(struct comedi_async *async, unsigned nbytes)
@@ -674,16 +673,18 @@ unsigned comedi_buf_read_alloc(struct comedi_async *async, unsigned nbytes)
smp_rmb();
return nbytes;
}
+EXPORT_SYMBOL(comedi_buf_read_alloc);
/* transfers control of a chunk from reader to free buffer space */
unsigned comedi_buf_read_free(struct comedi_async *async, unsigned int nbytes)
{
- /* barrier insures data has been read out of buffer before read count is incremented */
+ /* barrier insures data has been read out of
+ * buffer before read count is incremented */
smp_mb();
if ((int)(async->buf_read_count + nbytes -
async->buf_read_alloc_count) > 0) {
- printk
- ("comedi: attempted to read-free more bytes than have been read-allocated.\n");
+ printk(KERN_INFO
+ "comedi: attempted to read-free more bytes than have been read-allocated.\n");
nbytes = async->buf_read_alloc_count - async->buf_read_count;
}
async->buf_read_count += nbytes;
@@ -691,6 +692,7 @@ unsigned comedi_buf_read_free(struct comedi_async *async, unsigned int nbytes)
async->buf_read_ptr %= async->prealloc_bufsz;
return nbytes;
}
+EXPORT_SYMBOL(comedi_buf_read_free);
void comedi_buf_memcpy_to(struct comedi_async *async, unsigned int offset,
const void *data, unsigned int num_bytes)
@@ -716,6 +718,7 @@ void comedi_buf_memcpy_to(struct comedi_async *async, unsigned int offset,
write_ptr = 0;
}
}
+EXPORT_SYMBOL(comedi_buf_memcpy_to);
void comedi_buf_memcpy_from(struct comedi_async *async, unsigned int offset,
void *dest, unsigned int nbytes)
@@ -742,6 +745,7 @@ void comedi_buf_memcpy_from(struct comedi_async *async, unsigned int offset,
read_ptr = 0;
}
}
+EXPORT_SYMBOL(comedi_buf_memcpy_from);
unsigned int comedi_buf_read_n_available(struct comedi_async *async)
{
@@ -757,6 +761,7 @@ unsigned int comedi_buf_read_n_available(struct comedi_async *async)
smp_rmb();
return num_bytes;
}
+EXPORT_SYMBOL(comedi_buf_read_n_available);
int comedi_buf_get(struct comedi_async *async, short *x)
{
@@ -769,6 +774,7 @@ int comedi_buf_get(struct comedi_async *async, short *x)
comedi_buf_read_free(async, sizeof(short));
return 1;
}
+EXPORT_SYMBOL(comedi_buf_get);
int comedi_buf_put(struct comedi_async *async, short x)
{
@@ -782,6 +788,7 @@ int comedi_buf_put(struct comedi_async *async, short x)
comedi_buf_write_free(async, sizeof(short));
return 1;
}
+EXPORT_SYMBOL(comedi_buf_put);
void comedi_reset_async_buf(struct comedi_async *async)
{
@@ -802,8 +809,9 @@ void comedi_reset_async_buf(struct comedi_async *async)
async->events = 0;
}
-int comedi_auto_config(struct device *hardware_device, const char *board_name,
- const int *options, unsigned num_options)
+static int comedi_auto_config(struct device *hardware_device,
+ const char *board_name, const int *options,
+ unsigned num_options)
{
struct comedi_devconfig it;
int minor;
@@ -848,7 +856,7 @@ cleanup:
return retval;
}
-void comedi_auto_unconfig(struct device *hardware_device)
+static void comedi_auto_unconfig(struct device *hardware_device)
{
unsigned *minor = (unsigned *)dev_get_drvdata(hardware_device);
if (minor == NULL)
@@ -873,20 +881,24 @@ int comedi_pci_auto_config(struct pci_dev *pcidev, const char *board_name)
return comedi_auto_config(&pcidev->dev, board_name,
options, ARRAY_SIZE(options));
}
+EXPORT_SYMBOL_GPL(comedi_pci_auto_config);
void comedi_pci_auto_unconfig(struct pci_dev *pcidev)
{
comedi_auto_unconfig(&pcidev->dev);
}
+EXPORT_SYMBOL_GPL(comedi_pci_auto_unconfig);
int comedi_usb_auto_config(struct usb_device *usbdev, const char *board_name)
{
BUG_ON(usbdev == NULL);
return comedi_auto_config(&usbdev->dev, board_name, NULL, 0);
}
+EXPORT_SYMBOL_GPL(comedi_usb_auto_config);
void comedi_usb_auto_unconfig(struct usb_device *usbdev)
{
BUG_ON(usbdev == NULL);
comedi_auto_unconfig(&usbdev->dev);
}
+EXPORT_SYMBOL_GPL(comedi_usb_auto_unconfig);
diff --git a/drivers/staging/comedi/drivers/8253.h b/drivers/staging/comedi/drivers/8253.h
index 0bb35db4ea3..3eb45d46e05 100644
--- a/drivers/staging/comedi/drivers/8253.h
+++ b/drivers/staging/comedi/drivers/8253.h
@@ -214,7 +214,8 @@ static inline void i8253_cascade_ns_to_timer_2div(int i8253_osc_base,
#ifndef CMDTEST
/* i8254_load programs 8254 counter chip. It should also work for the 8253.
- * base_address is the lowest io address for the chip (the address of counter 0).
+ * base_address is the lowest io address
+ * for the chip (the address of counter 0).
* counter_number is the counter you want to load (0,1 or 2)
* count is the number to load into the counter.
*
diff --git a/drivers/staging/comedi/drivers/8255.c b/drivers/staging/comedi/drivers/8255.c
index 2d54993ffb1..fe63830bd85 100644
--- a/drivers/staging/comedi/drivers/8255.c
+++ b/drivers/staging/comedi/drivers/8255.c
@@ -82,6 +82,7 @@ I/O port base address can be found in the output of 'lspci -v'.
#include <linux/ioport.h>
#include <linux/slab.h>
+#include "8255.h"
#define _8255_SIZE 4
@@ -395,8 +396,6 @@ static int dev_8255_attach(struct comedi_device *dev,
unsigned long iobase;
int i;
- printk("comedi%d: 8255:", dev->minor);
-
dev->board_name = "8255";
for (i = 0; i < COMEDI_NDEVCONFOPTS; i++) {
@@ -405,13 +404,20 @@ static int dev_8255_attach(struct comedi_device *dev,
break;
}
if (i == 0) {
- printk(" no devices specified\n");
+ printk(KERN_WARNING
+ "comedi%d: 8255: no devices specified\n", dev->minor);
return -EINVAL;
}
ret = alloc_subdevices(dev, i);
- if (ret < 0)
+ if (ret < 0) {
+ /* FIXME this printk call should give a proper message, the
+ * below line just maintains previous functionality */
+ printk("comedi%d: 8255:", dev->minor);
return ret;
+ }
+
+ printk(KERN_INFO "comedi%d: 8255:", dev->minor);
for (i = 0; i < dev->n_subdevices; i++) {
iobase = it->options[i];
@@ -438,7 +444,7 @@ static int dev_8255_detach(struct comedi_device *dev)
unsigned long iobase;
struct comedi_subdevice *s;
- printk("comedi%d: 8255: remove\n", dev->minor);
+ printk(KERN_INFO "comedi%d: 8255: remove\n", dev->minor);
for (i = 0; i < dev->n_subdevices; i++) {
s = dev->subdevices + i;
diff --git a/drivers/staging/comedi/drivers/8255.h b/drivers/staging/comedi/drivers/8255.h
index 02c5a361b1a..b6314c9b7ea 100644
--- a/drivers/staging/comedi/drivers/8255.h
+++ b/drivers/staging/comedi/drivers/8255.h
@@ -26,8 +26,6 @@
#include "../comedidev.h"
-#if defined(CONFIG_COMEDI_8255) || defined(CONFIG_COMEDI_8255_MODULE)
-
int subdev_8255_init(struct comedi_device *dev, struct comedi_subdevice *s,
int (*cb) (int, int, int, unsigned long),
unsigned long arg);
@@ -38,24 +36,4 @@ void subdev_8255_cleanup(struct comedi_device *dev, struct comedi_subdevice *s);
void subdev_8255_interrupt(struct comedi_device *dev,
struct comedi_subdevice *s);
-#else
-
-static inline int subdev_8255_init(struct comedi_device *dev,
- struct comedi_subdevice *s, void *x,
- unsigned long y)
-{
- printk("8255 support not configured -- disabling subdevice\n");
-
- s->type = COMEDI_SUBD_UNUSED;
-
- return 0;
-}
-
-static inline void subdev_8255_cleanup(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
-}
-
-#endif
-
#endif
diff --git a/drivers/staging/comedi/drivers/Makefile b/drivers/staging/comedi/drivers/Makefile
index df2854d543c..5ccf246e252 100644
--- a/drivers/staging/comedi/drivers/Makefile
+++ b/drivers/staging/comedi/drivers/Makefile
@@ -2,132 +2,137 @@
#
# Comedi "helper" modules
-obj-$(CONFIG_COMEDI) += comedi_fc.o
-obj-$(CONFIG_COMEDI) += comedi_bond.o
-obj-$(CONFIG_COMEDI) += comedi_test.o
-obj-$(CONFIG_COMEDI) += comedi_parport.o
obj-$(CONFIG_COMEDI) += pcm_common.o
+# Comedi misc drivers
+obj-$(CONFIG_COMEDI_BOND) += comedi_bond.o
+obj-$(CONFIG_COMEDI_TEST) += comedi_test.o
+obj-$(CONFIG_COMEDI_PARPORT) += comedi_parport.o
+obj-$(CONFIG_COMEDI_SERIAL2002) += serial2002.o
+obj-$(CONFIG_COMEDI_SKEL) += skel.o
+
+# Comedi ISA drivers
+obj-$(CONFIG_COMEDI_8255) += 8255.o
+obj-$(CONFIG_COMEDI_ACL7225B) += acl7225b.o
+obj-$(CONFIG_COMEDI_PCL711) += pcl711.o
+obj-$(CONFIG_COMEDI_PCL724) += pcl724.o
+obj-$(CONFIG_COMEDI_PCL725) += pcl725.o
+obj-$(CONFIG_COMEDI_PCL726) += pcl726.o
+obj-$(CONFIG_COMEDI_PCL730) += pcl730.o
+obj-$(CONFIG_COMEDI_PCL812) += pcl812.o
+obj-$(CONFIG_COMEDI_PCL816) += pcl816.o
+obj-$(CONFIG_COMEDI_PCL818) += pcl818.o
+obj-$(CONFIG_COMEDI_PCM3724) += pcm3724.o
+obj-$(CONFIG_COMEDI_PCM3730) += pcm3730.o
+obj-$(CONFIG_COMEDI_RTI800) += rti800.o
+obj-$(CONFIG_COMEDI_RTI802) += rti802.o
+obj-$(CONFIG_COMEDI_DAS08) += das08.o
+obj-$(CONFIG_COMEDI_DAS16M1) += das16m1.o
+obj-$(CONFIG_COMEDI_DAS16) += das16.o
+obj-$(CONFIG_COMEDI_DAS800) += das800.o
+obj-$(CONFIG_COMEDI_DAS1800) += das1800.o
+obj-$(CONFIG_COMEDI_DAS6402) += das6402.o
+obj-$(CONFIG_COMEDI_DT2801) += dt2801.o
+obj-$(CONFIG_COMEDI_DT2811) += dt2811.o
+obj-$(CONFIG_COMEDI_DT2814) += dt2814.o
+obj-$(CONFIG_COMEDI_DT2815) += dt2815.o
+obj-$(CONFIG_COMEDI_DT2817) += dt2817.o
+obj-$(CONFIG_COMEDI_DT282X) += dt282x.o
+obj-$(CONFIG_COMEDI_DMM32AT) += dmm32at.o
+obj-$(CONFIG_COMEDI_FL512) += fl512.o
+obj-$(CONFIG_COMEDI_AIO_AIO12_8) += aio_aio12_8.o
+obj-$(CONFIG_COMEDI_AIO_IIRO_16) += aio_iiro_16.o
+obj-$(CONFIG_COMEDI_C6XDIGIO) += c6xdigio.o
+obj-$(CONFIG_COMEDI_MPC624) += mpc624.o
+obj-$(CONFIG_COMEDI_ADQ12B) += adq12b.o
+obj-$(CONFIG_COMEDI_NI_AT_A2150) += ni_at_a2150.o
+obj-$(CONFIG_COMEDI_NI_AT_AO) += ni_at_ao.o
+obj-$(CONFIG_COMEDI_NI_ATMIO) += ni_atmio.o
+obj-$(CONFIG_COMEDI_NI_ATMIO16D) += ni_atmio16d.o
+obj-$(CONFIG_COMEDI_PCMAD) += pcmad.o
+obj-$(CONFIG_COMEDI_PCMDA12) += pcmda12.o
+obj-$(CONFIG_COMEDI_PCMMIO) += pcmmio.o
+obj-$(CONFIG_COMEDI_PCMUIO) += pcmuio.o
+obj-$(CONFIG_COMEDI_MULTIQ3) += multiq3.o
+obj-$(CONFIG_COMEDI_POC) += poc.o
+
# Comedi PCI drivers
obj-$(CONFIG_COMEDI_PCI_DRIVERS) += 8255.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += acl7225b.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += addi_apci_035.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += addi_apci_1032.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += addi_apci_1500.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += addi_apci_1516.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += addi_apci_1564.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += addi_apci_16xx.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += addi_apci_2016.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += addi_apci_2032.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += addi_apci_2200.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += addi_apci_3001.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += addi_apci_3120.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += addi_apci_3501.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += addi_apci_3xxx.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += adl_pci6208.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += adl_pci7296.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += adl_pci7432.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += adl_pci8164.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += adl_pci9111.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += adl_pci9118.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += adq12b.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += adv_pci1710.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += adv_pci1723.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += adv_pci_dio.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += aio_aio12_8.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += aio_iiro_16.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += amplc_dio200.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += amplc_pc236.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += amplc_pc263.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += amplc_pci224.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += amplc_pci230.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += c6xdigio.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += cb_pcidas64.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += cb_pcidas.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += cb_pcidda.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += cb_pcidio.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += cb_pcimdas.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += cb_pcimdda.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += comedi_bond.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += comedi_parport.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += comedi_test.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += contec_pci_dio.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += daqboard2000.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += das08.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += das16m1.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += das16.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += das1800.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += das6402.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += das800.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += dmm32at.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += dt2801.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += dt2811.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += dt2814.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += dt2815.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += dt2817.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += dt282x.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += dt3000.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += fl512.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += gsc_hpdi.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += icp_multi.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += ii_pci20kc.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += jr3_pci.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += ke_counter.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += me4000.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += me_daq.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += mite.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += mpc624.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += multiq3.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += ni_6527.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += ni_65xx.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += ni_660x.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += ni_670x.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += ni_at_a2150.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += ni_at_ao.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += ni_atmio16d.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += ni_atmio.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += ni_labpc.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += ni_pcidio.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += ni_pcimio.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += ni_tiocmd.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += ni_tio.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += pcl711.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += pcl724.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += pcl725.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += pcl726.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += pcl730.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += pcl812.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += pcl816.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += pcl818.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += pcm3724.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += pcm3730.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += pcmad.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += pcmda12.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += pcmmio.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += pcmuio.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += poc.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += rtd520.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += rti800.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += rti802.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += s526.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += s626.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += serial2002.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += skel.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += ssv_dnp.o
-obj-$(CONFIG_COMEDI_PCI_DRIVERS) += unioxx5.o
+obj-$(CONFIG_COMEDI_ADDI_APCI_035) += addi_apci_035.o
+obj-$(CONFIG_COMEDI_ADDI_APCI_1032) += addi_apci_1032.o
+obj-$(CONFIG_COMEDI_ADDI_APCI_1500) += addi_apci_1500.o
+obj-$(CONFIG_COMEDI_ADDI_APCI_1516) += addi_apci_1516.o
+obj-$(CONFIG_COMEDI_ADDI_APCI_1564) += addi_apci_1564.o
+obj-$(CONFIG_COMEDI_ADDI_APCI_16XX) += addi_apci_16xx.o
+obj-$(CONFIG_COMEDI_ADDI_APCI_2016) += addi_apci_2016.o
+obj-$(CONFIG_COMEDI_ADDI_APCI_2032) += addi_apci_2032.o
+obj-$(CONFIG_COMEDI_ADDI_APCI_2200) += addi_apci_2200.o
+obj-$(CONFIG_COMEDI_ADDI_APCI_3001) += addi_apci_3001.o
+obj-$(CONFIG_COMEDI_ADDI_APCI_3120) += addi_apci_3120.o
+obj-$(CONFIG_COMEDI_ADDI_APCI_3501) += addi_apci_3501.o
+obj-$(CONFIG_COMEDI_ADDI_APCI_3XXX) += addi_apci_3xxx.o
+obj-$(CONFIG_COMEDI_ADL_PCI6208) += adl_pci6208.o
+obj-$(CONFIG_COMEDI_ADL_PCI7230) += adl_pci7230.o
+obj-$(CONFIG_COMEDI_ADL_PCI7296) += adl_pci7296.o
+obj-$(CONFIG_COMEDI_ADL_PCI7432) += adl_pci7432.o
+obj-$(CONFIG_COMEDI_ADL_PCI8164) += adl_pci8164.o
+obj-$(CONFIG_COMEDI_ADL_PCI9111) += adl_pci9111.o
+obj-$(CONFIG_COMEDI_ADL_PCI9118) += adl_pci9118.o
+obj-$(CONFIG_COMEDI_ADV_PCI1710) += adv_pci1710.o
+obj-$(CONFIG_COMEDI_ADV_PCI1723) += adv_pci1723.o
+obj-$(CONFIG_COMEDI_ADV_PCI_DIO) += adv_pci_dio.o
+obj-$(CONFIG_COMEDI_AMPLC_DIO200) += amplc_dio200.o
+obj-$(CONFIG_COMEDI_AMPLC_PC236) += amplc_pc236.o
+obj-$(CONFIG_COMEDI_AMPLC_PC263) += amplc_pc263.o
+obj-$(CONFIG_COMEDI_AMPLC_PCI224) += amplc_pci224.o
+obj-$(CONFIG_COMEDI_AMPLC_PCI230) += amplc_pci230.o
+obj-$(CONFIG_COMEDI_CONTEC_PCI_DIO) += contec_pci_dio.o
+obj-$(CONFIG_COMEDI_DT3000) += dt3000.o
+obj-$(CONFIG_COMEDI_UNIOXX5) += unioxx5.o
+obj-$(CONFIG_COMEDI_GSC_HPDI) += gsc_hpdi.o
+obj-$(CONFIG_COMEDI_ICP_MULTI) += icp_multi.o
+obj-$(CONFIG_COMEDI_II_PCI20KC) += ii_pci20kc.o
+obj-$(CONFIG_COMEDI_DAQBOARD2000) += daqboard2000.o
+obj-$(CONFIG_COMEDI_JR3_PCI) += jr3_pci.o
+obj-$(CONFIG_COMEDI_KE_COUNTER) += ke_counter.o
+obj-$(CONFIG_COMEDI_CB_PCIDAS64) += cb_pcidas64.o
+obj-$(CONFIG_COMEDI_CB_PCIDAS) += cb_pcidas.o
+obj-$(CONFIG_COMEDI_CB_PCIDDA) += cb_pcidda.o
+obj-$(CONFIG_COMEDI_CB_PCIDIO) += cb_pcidio.o
+obj-$(CONFIG_COMEDI_CB_PCIMDAS) += cb_pcimdas.o
+obj-$(CONFIG_COMEDI_CB_PCIMDDA) += cb_pcimdda.o
+obj-$(CONFIG_COMEDI_ME4000) += me4000.o
+obj-$(CONFIG_COMEDI_ME_DAQ) += me_daq.o
+obj-$(CONFIG_COMEDI_NI_6527) += ni_6527.o
+obj-$(CONFIG_COMEDI_NI_65XX) += ni_65xx.o
+obj-$(CONFIG_COMEDI_NI_660X) += ni_660x.o
+obj-$(CONFIG_COMEDI_NI_670X) += ni_670x.o
+obj-$(CONFIG_COMEDI_NI_PCIDIO) += ni_pcidio.o
+obj-$(CONFIG_COMEDI_NI_PCIMIO) += ni_pcimio.o
+obj-$(CONFIG_COMEDI_RTD520) += rtd520.o
+obj-$(CONFIG_COMEDI_S526) += s526.o
+obj-$(CONFIG_COMEDI_S626) += s626.o
+obj-$(CONFIG_COMEDI_SSV_DNP) += ssv_dnp.o
# Comedi PCMCIA drivers
-obj-$(CONFIG_COMEDI_PCMCIA_DRIVERS) += cb_das16_cs.o
-obj-$(CONFIG_COMEDI_PCMCIA_DRIVERS) += das08_cs.o
-obj-$(CONFIG_COMEDI_PCMCIA_DRIVERS) += ni_daq_700.o
-obj-$(CONFIG_COMEDI_PCMCIA_DRIVERS) += ni_daq_dio24.o
-obj-$(CONFIG_COMEDI_PCMCIA_DRIVERS) += ni_labpc_cs.o
-obj-$(CONFIG_COMEDI_PCMCIA_DRIVERS) += ni_mio_cs.o
-obj-$(CONFIG_COMEDI_PCMCIA_DRIVERS) += quatech_daqp_cs.o
+obj-$(CONFIG_COMEDI_CB_DAS16_CS) += cb_das16_cs.o
+obj-$(CONFIG_COMEDI_DAS08_CS) += das08_cs.o
+obj-$(CONFIG_COMEDI_NI_DAQ_700_CS) += ni_daq_700.o
+obj-$(CONFIG_COMEDI_NI_DAQ_DIO24_CS) += ni_daq_dio24.o
+obj-$(CONFIG_COMEDI_NI_LABPC_CS) += ni_labpc_cs.o
+obj-$(CONFIG_COMEDI_NI_MIO_CS) += ni_mio_cs.o
+obj-$(CONFIG_COMEDI_QUATECH_DAQP_CS) += quatech_daqp_cs.o
# Comedi USB drivers
-obj-$(CONFIG_COMEDI_USB_DRIVERS) += dt9812.o
-obj-$(CONFIG_COMEDI_USB_DRIVERS) += usbdux.o
-obj-$(CONFIG_COMEDI_USB_DRIVERS) += usbduxfast.o
-obj-$(CONFIG_COMEDI_USB_DRIVERS) += vmk80xx.o
+obj-$(CONFIG_COMEDI_DT9812) += dt9812.o
+obj-$(CONFIG_COMEDI_USBDUX) += usbdux.o
+obj-$(CONFIG_COMEDI_USBDUXFAST) += usbduxfast.o
+obj-$(CONFIG_COMEDI_VMK80XX) += vmk80xx.o
+
+# Comedi NI drivers
+obj-$(CONFIG_COMEDI_MITE) += mite.o
+obj-$(CONFIG_COMEDI_NI_TIO) += ni_tio.o
+obj-$(CONFIG_COMEDI_NI_TIO) += ni_tiocmd.o
+obj-$(CONFIG_COMEDI_NI_LABPC) += ni_labpc.o
+obj-$(CONFIG_COMEDI_FC) += comedi_fc.o
diff --git a/drivers/staging/comedi/drivers/addi-data/addi_amcc_s5933.h b/drivers/staging/comedi/drivers/addi-data/addi_amcc_s5933.h
index f96b1289cdf..c3284eb0f0a 100644
--- a/drivers/staging/comedi/drivers/addi-data/addi_amcc_s5933.h
+++ b/drivers/staging/comedi/drivers/addi-data/addi_amcc_s5933.h
@@ -212,7 +212,7 @@ struct pcilst_struct {
};
/* ptr to root list of all amcc devices */
-struct pcilst_struct *amcc_devices;
+static struct pcilst_struct *amcc_devices;
static const int i_ADDIDATADeviceID[] = { 0x15B8, 0x10E8 };
@@ -260,12 +260,10 @@ void v_pci_card_list_init(unsigned short pci_vendor, char display)
for (i_Count = 0; i_Count < 2; i_Count++) {
pci_vendor = i_ADDIDATADeviceID[i_Count];
if (pcidev->vendor == pci_vendor) {
- amcc = kmalloc(sizeof(*amcc), GFP_KERNEL);
+ amcc = kzalloc(sizeof(*amcc), GFP_KERNEL);
if (amcc == NULL)
continue;
- memset(amcc, 0, sizeof(*amcc));
-
amcc->pcidev = pcidev;
if (last)
last->next = amcc;
diff --git a/drivers/staging/comedi/drivers/addi-data/addi_common.c b/drivers/staging/comedi/drivers/addi-data/addi_common.c
index 6625fdc8e90..2c986413a81 100644
--- a/drivers/staging/comedi/drivers/addi-data/addi_common.c
+++ b/drivers/staging/comedi/drivers/addi-data/addi_common.c
@@ -293,8 +293,8 @@ static const struct addi_board boardtypes[] = {
0,
0,
0,
- 0,
- 0,
+ NULL,
+ NULL,
32,
0,
0,
@@ -2527,7 +2527,7 @@ static const struct addi_board boardtypes[] = {
#define n_boardtypes (sizeof(boardtypes)/sizeof(struct addi_board))
-struct comedi_driver driver_addi = {
+static struct comedi_driver driver_addi = {
.driver_name = "addi_common",
.module = THIS_MODULE,
.attach = i_ADDI_Attach,
@@ -2639,9 +2639,8 @@ static int i_ADDI_Attach(struct comedi_device *dev, struct comedi_devconfig *it)
devpriv->ps_BoardInfo = this_board;
devpriv->i_IobaseReserved = (int) io_addr[3];
printk("\nioremap begin");
- devpriv->dw_AiBase =
- (unsigned long) ioremap(io_addr[3],
- this_board->i_IorangeBase3);
+ devpriv->dw_AiBase = ioremap(io_addr[3],
+ this_board->i_IorangeBase3);
printk("\nioremap end");
}
@@ -2952,7 +2951,7 @@ static int i_ADDI_Detach(struct comedi_device *dev)
devpriv->ui_DmaBufferPages[1]);
}
} else {
- iounmap((void *)devpriv->dw_AiBase);
+ iounmap(devpriv->dw_AiBase);
if (devpriv->allocated) {
i_pci_card_free(devpriv->amcc);
diff --git a/drivers/staging/comedi/drivers/addi-data/addi_common.h b/drivers/staging/comedi/drivers/addi-data/addi_common.h
index caeb6fd2d9b..1a2816920ff 100644
--- a/drivers/staging/comedi/drivers/addi-data/addi_common.h
+++ b/drivers/staging/comedi/drivers/addi-data/addi_common.h
@@ -351,7 +351,7 @@ struct addi_private {
int i_IobaseAmcc; /* base+size for AMCC chip */
int i_IobaseAddon; /* addon base address */
int i_IobaseReserved;
- unsigned long dw_AiBase;
+ void __iomem *dw_AiBase;
struct pcilst_struct *amcc; /* ptr too AMCC data */
unsigned char allocated; /* we have blocked card */
unsigned char b_ValidDriver; /* driver is ok */
diff --git a/drivers/staging/comedi/drivers/addi-data/amcc_s5933_58.h b/drivers/staging/comedi/drivers/addi-data/amcc_s5933_58.h
index 49141b3558e..349e93c23e9 100644
--- a/drivers/staging/comedi/drivers/addi-data/amcc_s5933_58.h
+++ b/drivers/staging/comedi/drivers/addi-data/amcc_s5933_58.h
@@ -253,12 +253,10 @@ void v_pci_card_list_init(unsigned short pci_vendor, char display)
pci_for_each_dev(pcidev) {
if (pcidev->vendor == pci_vendor) {
- amcc = kmalloc(sizeof(*amcc), GFP_KERNEL);
+ amcc = kzalloc(sizeof(*amcc), GFP_KERNEL);
if (amcc == NULL)
continue;
- memset(amcc, 0, sizeof(*amcc));
-
amcc->pcidev = pcidev;
if (last) {
last->next = amcc;
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c
index 791297266fc..1369e22b7ee 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c
@@ -52,9 +52,9 @@ You should also find the complete GPL in the COPYING file accompanying this sour
+----------------------------------------------------------------------------+
*/
#include "hwdrv_apci035.h"
-int i_WatchdogNbr = 0;
-int i_Temp = 0;
-int i_Flag = 1;
+static int i_WatchdogNbr = 0;
+static int i_Temp = 0;
+static int i_Flag = 1;
/*
+----------------------------------------------------------------------------+
| Function Name : int i_APCI035_ConfigTimerWatchdog |
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.h b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.h
index e0023c8cb62..68db9c10c99 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.h
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.h
@@ -19,22 +19,8 @@
#define APCI035_BOARD_VENDOR_ID 0x15B8
#define APCI035_ADDRESS_RANGE 255
-int i_TW_Number;
-struct {
- int i_Gain;
- int i_Polarity;
- int i_OffsetRange;
- int i_Coupling;
- int i_SingleDiff;
- int i_AutoCalibration;
- unsigned int ui_ReloadValue;
- unsigned int ui_TimeUnitReloadVal;
- int i_Interrupt;
- int i_ModuleSelection;
-} Config_Parameters_Main;
-
/* ANALOG INPUT RANGE */
-struct comedi_lrange range_apci035_ai = { 8, {
+static struct comedi_lrange range_apci035_ai = { 8, {
BIP_RANGE(10),
BIP_RANGE(5),
BIP_RANGE(2),
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1032.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1032.c
index fe06789699f..faea003e16c 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1032.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1032.c
@@ -53,8 +53,8 @@ You should also find the complete GPL in the COPYING file accompanying this sour
*/
#include "hwdrv_apci1032.h"
#include <linux/delay.h>
-/* Global variables */
-unsigned int ui_InterruptStatus;
+
+static unsigned int ui_InterruptStatus;
/*
+----------------------------------------------------------------------------+
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c
index d5e06ad6acc..b3b921853e6 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c
@@ -47,16 +47,16 @@ You should also find the complete GPL in the COPYING file accompanying this sour
*/
#include "hwdrv_apci1500.h"
-int i_TimerCounter1Init = 0;
-int i_TimerCounter2Init = 0;
-int i_WatchdogCounter3Init = 0;
-int i_Event1Status = 0, i_Event2Status = 0;
-int i_TimerCounterWatchdogInterrupt = 0;
-int i_Logic = 0, i_CounterLogic = 0;
-int i_InterruptMask = 0;
-int i_InputChannel = 0;
-int i_TimerCounter1Enabled = 0, i_TimerCounter2Enabled =
- 0, i_WatchdogCounter3Enabled = 0;
+static int i_TimerCounter1Init = 0;
+static int i_TimerCounter2Init = 0;
+static int i_WatchdogCounter3Init = 0;
+static int i_Event1Status = 0, i_Event2Status = 0;
+static int i_TimerCounterWatchdogInterrupt = 0;
+static int i_Logic = 0, i_CounterLogic = 0;
+static int i_InterruptMask = 0;
+static int i_InputChannel = 0;
+static int i_TimerCounter1Enabled = 0, i_TimerCounter2Enabled = 0,
+ i_WatchdogCounter3Enabled = 0;
/*
+----------------------------------------------------------------------------+
@@ -136,9 +136,10 @@ int i_TimerCounter1Enabled = 0, i_TimerCounter2Enabled =
| |
+----------------------------------------------------------------------------+
*/
-
-int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data)
+static int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
int i_PatternPolarity = 0, i_PatternTransition = 0, i_PatternMask = 0;
int i_MaxChannel = 0, i_Count = 0, i_EventMask = 0;
@@ -519,8 +520,10 @@ int i_APCI1500_ConfigDigitalInputEvent(struct comedi_device *dev,
| |
+----------------------------------------------------------------------------+
*/
-int i_APCI1500_StartStopInputEvent(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int i_APCI1500_StartStopInputEvent(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
int i_Event1InterruptStatus = 0, i_Event2InterruptStatus =
0, i_RegValue;
@@ -784,8 +787,10 @@ int i_APCI1500_StartStopInputEvent(struct comedi_device *dev, struct comedi_subd
| |
+----------------------------------------------------------------------------+
*/
-int i_APCI1500_Initialisation(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int i_APCI1500_Initialisation(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
int i_DummyRead = 0;
/******************/
@@ -956,9 +961,10 @@ int i_APCI1500_Initialisation(struct comedi_device *dev, struct comedi_subdevice
| |
+----------------------------------------------------------------------------+
*/
-
-int i_APCI1500_ReadMoreDigitalInput(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int i_APCI1500_ReadMoreDigitalInput(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
unsigned int ui_PortValue = data[1];
unsigned int ui_Mask = 0;
@@ -1040,8 +1046,10 @@ int i_APCI1500_ReadMoreDigitalInput(struct comedi_device *dev, struct comedi_sub
| |
+----------------------------------------------------------------------------+
*/
-int i_APCI1500_ConfigDigitalOutputErrorInterrupt(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data)
+static int i_APCI1500_ConfigDigitalOutputErrorInterrupt(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
devpriv->b_OutputMemoryStatus = data[0];
return insn->n;
@@ -1066,9 +1074,10 @@ int i_APCI1500_ConfigDigitalOutputErrorInterrupt(struct comedi_device *dev,
| |
+----------------------------------------------------------------------------+
*/
-
-int i_APCI1500_WriteDigitalOutput(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int i_APCI1500_WriteDigitalOutput(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
static unsigned int ui_Temp = 0;
unsigned int ui_Temp1;
@@ -1260,9 +1269,10 @@ int i_APCI1500_WriteDigitalOutput(struct comedi_device *dev, struct comedi_subde
| |
+----------------------------------------------------------------------------+
*/
-
-int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data)
+static int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
int i_TimerCounterMode, i_MasterConfiguration;
@@ -1860,8 +1870,10 @@ int i_APCI1500_ConfigCounterTimerWatchdog(struct comedi_device *dev,
| |
+----------------------------------------------------------------------------+
*/
-int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data)
+static int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
int i_CommandAndStatusValue;
@@ -2181,9 +2193,10 @@ int i_APCI1500_StartStopTriggerTimerCounterWatchdog(struct comedi_device *dev,
| |
+----------------------------------------------------------------------------+
*/
-
-int i_APCI1500_ReadCounterTimerWatchdog(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data)
+static int i_APCI1500_ReadCounterTimerWatchdog(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
int i_CommandAndStatusValue;
switch (data[0]) {
@@ -2370,8 +2383,10 @@ int i_APCI1500_ReadCounterTimerWatchdog(struct comedi_device *dev,
| |
+----------------------------------------------------------------------------+
*/
-int i_APCI1500_ReadInterruptMask(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int i_APCI1500_ReadInterruptMask(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
data[0] = i_InterruptMask;
data[1] = i_InputChannel;
@@ -2401,8 +2416,10 @@ int i_APCI1500_ReadInterruptMask(struct comedi_device *dev, struct comedi_subdev
| |
+----------------------------------------------------------------------------+
*/
-int i_APCI1500_ConfigureInterrupt(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int i_APCI1500_ConfigureInterrupt(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
unsigned int ui_Status;
int i_RegValue;
@@ -2821,8 +2838,7 @@ static void v_APCI1500_Interrupt(int irq, void *d)
| |
+----------------------------------------------------------------------------+
*/
-
-int i_APCI1500_Reset(struct comedi_device *dev)
+static int i_APCI1500_Reset(struct comedi_device *dev)
{
int i_DummyRead = 0;
i_TimerCounter1Init = 0;
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
index 4413279c880..4299ff5214d 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
@@ -56,8 +56,8 @@ You should also find the complete GPL in the COPYING file accompanying this sour
#include "hwdrv_apci1564.h"
/* Global variables */
-unsigned int ui_InterruptStatus_1564 = 0;
-unsigned int ui_InterruptData, ui_Type;
+static unsigned int ui_InterruptStatus_1564 = 0;
+static unsigned int ui_InterruptData, ui_Type;
/*
+----------------------------------------------------------------------------+
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci2032.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci2032.c
index 2d325163c16..d7d22236778 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci2032.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci2032.c
@@ -53,7 +53,7 @@ You should also find the complete GPL in the COPYING file accompanying this sour
*/
#include "hwdrv_apci2032.h"
-unsigned int ui_InterruptData, ui_Type;
+static unsigned int ui_InterruptData, ui_Type;
/*
+----------------------------------------------------------------------------+
| Function Name : int i_APCI2032_ConfigDigitalOutput |
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.h b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.h
index c456d75674b..743523e804a 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.h
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.h
@@ -32,7 +32,7 @@
#define MODE0 0
#define MODE1 1
/* ANALOG OUTPUT RANGE */
-struct comedi_lrange range_apci3501_ao = { 2, {
+static struct comedi_lrange range_apci3501_ao = { 2, {
BIP_RANGE(10),
UNI_RANGE(10)
}
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3xxx.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3xxx.c
index 3692326d474..2e20bc7cdcd 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3xxx.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3xxx.c
@@ -67,10 +67,9 @@ You should also find the complete GPL in the COPYING file accompanying this sour
| 1 : Conversion started |
+----------------------------------------------------------------------------+
*/
-
-int i_APCI3XXX_TestConversionStarted(struct comedi_device *dev)
+static int i_APCI3XXX_TestConversionStarted(struct comedi_device *dev)
{
- if ((readl((void *)(devpriv->dw_AiBase + 8)) & 0x80000UL) == 0x80000UL)
+ if ((readl(devpriv->dw_AiBase + 8) & 0x80000UL) == 0x80000UL)
return 1;
else
return 0;
@@ -104,9 +103,10 @@ int i_APCI3XXX_TestConversionStarted(struct comedi_device *dev)
| -101 : Data size error |
+----------------------------------------------------------------------------+
*/
-
-int i_APCI3XXX_AnalogInputConfigOperatingMode(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data)
+static int i_APCI3XXX_AnalogInputConfigOperatingMode(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
int i_ReturnValue = insn->n;
unsigned char b_TimeBase = 0;
@@ -204,19 +204,14 @@ int i_APCI3XXX_AnalogInputConfigOperatingMode(struct comedi_device *dev,
/* Set the convert timing unit */
/*******************************/
- writel((unsigned int)
- b_TimeBase,
- (void *)
- (devpriv->
- dw_AiBase
- +
- 36));
+ writel((unsigned int)b_TimeBase,
+ devpriv->dw_AiBase + 36);
/**************************/
/* Set the convert timing */
/*************************/
- writel(dw_ReloadValue, (void *)(devpriv->dw_AiBase + 32));
+ writel(dw_ReloadValue, devpriv->dw_AiBase + 32);
} else {
/**************************/
/* Any conversion started */
@@ -294,9 +289,10 @@ int i_APCI3XXX_AnalogInputConfigOperatingMode(struct comedi_device *dev,
| -101 : Data size error |
+----------------------------------------------------------------------------+
*/
-
-int i_APCI3XXX_InsnConfigAnalogInput(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data)
+static int i_APCI3XXX_InsnConfigAnalogInput(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
int i_ReturnValue = insn->n;
@@ -354,9 +350,10 @@ int i_APCI3XXX_InsnConfigAnalogInput(struct comedi_device *dev,
| -101 : Data size error |
+----------------------------------------------------------------------------+
*/
-
-int i_APCI3XXX_InsnReadAnalogInput(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data)
+static int i_APCI3XXX_InsnReadAnalogInput(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
int i_ReturnValue = insn->n;
unsigned char b_Configuration = (unsigned char) CR_RANGE(insn->chanspec);
@@ -422,26 +419,20 @@ int i_APCI3XXX_InsnReadAnalogInput(struct comedi_device *dev,
/* Clear the FIFO */
/******************/
- writel(0x10000UL,
- (void *)(devpriv->dw_AiBase +
- 12));
+ writel(0x10000UL, devpriv->dw_AiBase + 12);
/*******************************/
/* Get and save the delay mode */
/*******************************/
- dw_Temp =
- readl((void *)(devpriv->
- dw_AiBase + 4));
+ dw_Temp = readl(devpriv->dw_AiBase + 4);
dw_Temp = dw_Temp & 0xFFFFFEF0UL;
/***********************************/
/* Channel configuration selection */
/***********************************/
- writel(dw_Temp,
- (void *)(devpriv->dw_AiBase +
- 4));
+ writel(dw_Temp, devpriv->dw_AiBase + 4);
/**************************/
/* Make the configuration */
@@ -458,35 +449,28 @@ int i_APCI3XXX_InsnReadAnalogInput(struct comedi_device *dev,
/***************************/
writel(dw_Configuration,
- (void *)(devpriv->dw_AiBase +
- 0));
+ devpriv->dw_AiBase + 0);
/*********************/
/* Channel selection */
/*********************/
writel(dw_Temp | 0x100UL,
- (void *)(devpriv->dw_AiBase +
- 4));
+ devpriv->dw_AiBase + 4);
writel((unsigned int) b_Channel,
- (void *)(devpriv->dw_AiBase +
- 0));
+ devpriv->dw_AiBase + 0);
/***********************/
/* Restaure delay mode */
/***********************/
- writel(dw_Temp,
- (void *)(devpriv->dw_AiBase +
- 4));
+ writel(dw_Temp, devpriv->dw_AiBase + 4);
/***********************************/
/* Set the number of sequence to 1 */
/***********************************/
- writel(1,
- (void *)(devpriv->dw_AiBase +
- 48));
+ writel(1, devpriv->dw_AiBase + 48);
/***************************/
/* Save the interrupt flag */
@@ -514,50 +498,29 @@ int i_APCI3XXX_InsnReadAnalogInput(struct comedi_device *dev,
/* Start the conversion */
/************************/
- writel(0x80000UL,
- (void *)
- (devpriv->
- dw_AiBase
- + 8));
+ writel(0x80000UL, devpriv->dw_AiBase + 8);
/****************/
/* Wait the EOS */
/****************/
do {
- dw_Temp =
- readl(
- (void *)
- (devpriv->
- dw_AiBase
- +
- 20));
- dw_Temp =
- dw_Temp
- & 1;
+ dw_Temp = readl(devpriv->dw_AiBase + 20);
+ dw_Temp = dw_Temp & 1;
} while (dw_Temp != 1);
/*************************/
/* Read the analog value */
/*************************/
- data[dw_AcquisitionCpt]
- =
- (unsigned int)
- readl((void
- *)
- (devpriv->
- dw_AiBase
- + 28));
+ data[dw_AcquisitionCpt] = (unsigned int)readl(devpriv->dw_AiBase + 28);
}
} else {
/************************/
/* Start the conversion */
/************************/
- writel(0x180000UL,
- (void *)(devpriv->
- dw_AiBase + 8));
+ writel(0x180000UL, devpriv->dw_AiBase + 8);
}
} else {
/**************************/
@@ -603,7 +566,7 @@ int i_APCI3XXX_InsnReadAnalogInput(struct comedi_device *dev,
+----------------------------------------------------------------------------+
*/
-void v_APCI3XXX_Interrupt(int irq, void *d)
+static void v_APCI3XXX_Interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
unsigned char b_CopyCpt = 0;
@@ -613,13 +576,13 @@ void v_APCI3XXX_Interrupt(int irq, void *d)
/* Test if interrupt occur */
/***************************/
- dw_Status = readl((void *)(devpriv->dw_AiBase + 16));
+ dw_Status = readl(devpriv->dw_AiBase + 16);
if ( (dw_Status & 0x2UL) == 0x2UL) {
/***********************/
/* Reset the interrupt */
/***********************/
- writel(dw_Status, (void *)(devpriv->dw_AiBase + 16));
+ writel(dw_Status, devpriv->dw_AiBase + 16);
/*****************************/
/* Test if interrupt enabled */
@@ -634,8 +597,7 @@ void v_APCI3XXX_Interrupt(int irq, void *d)
b_CopyCpt < devpriv->ui_AiNbrofChannels;
b_CopyCpt++) {
devpriv->ui_AiReadData[b_CopyCpt] =
- (unsigned int) readl((void *)(devpriv->
- dw_AiBase + 28));
+ (unsigned int)readl(devpriv->dw_AiBase + 28);
}
/**************************/
@@ -682,9 +644,10 @@ void v_APCI3XXX_Interrupt(int irq, void *d)
| -101 : Data size error |
+----------------------------------------------------------------------------+
*/
-
-int i_APCI3XXX_InsnWriteAnalogOutput(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data)
+static int i_APCI3XXX_InsnWriteAnalogOutput(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
unsigned char b_Range = (unsigned char) CR_RANGE(insn->chanspec);
unsigned char b_Channel = (unsigned char) CR_CHAN(insn->chanspec);
@@ -710,24 +673,21 @@ int i_APCI3XXX_InsnWriteAnalogOutput(struct comedi_device *dev,
/* Set the range selection */
/***************************/
- writel(b_Range,
- (void *)(devpriv->dw_AiBase + 96));
+ writel(b_Range, devpriv->dw_AiBase + 96);
/**************************************************/
/* Write the analog value to the selected channel */
/**************************************************/
writel((data[0] << 8) | b_Channel,
- (void *)(devpriv->dw_AiBase + 100));
+ devpriv->dw_AiBase + 100);
/****************************/
/* Wait the end of transfer */
/****************************/
do {
- dw_Status =
- readl((void *)(devpriv->
- dw_AiBase + 96));
+ dw_Status = readl(devpriv->dw_AiBase + 96);
} while ((dw_Status & 0x100) != 0x100);
} else {
/***************************/
@@ -788,9 +748,10 @@ int i_APCI3XXX_InsnWriteAnalogOutput(struct comedi_device *dev,
| -101 : Data size error |
+----------------------------------------------------------------------------+
*/
-
-int i_APCI3XXX_InsnConfigInitTTLIO(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data)
+static int i_APCI3XXX_InsnConfigInitTTLIO(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
int i_ReturnValue = insn->n;
unsigned char b_Command = 0;
@@ -916,9 +877,10 @@ int i_APCI3XXX_InsnConfigInitTTLIO(struct comedi_device *dev,
| -101 : Data size error |
+----------------------------------------------------------------------------+
*/
-
-int i_APCI3XXX_InsnBitsTTLIO(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data)
+static int i_APCI3XXX_InsnBitsTTLIO(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
int i_ReturnValue = insn->n;
unsigned char b_ChannelCpt = 0;
@@ -1071,9 +1033,10 @@ int i_APCI3XXX_InsnBitsTTLIO(struct comedi_device *dev,
| -101 : Data size error |
+----------------------------------------------------------------------------+
*/
-
-int i_APCI3XXX_InsnReadTTLIO(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data)
+static int i_APCI3XXX_InsnReadTTLIO(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
unsigned char b_Channel = (unsigned char) CR_CHAN(insn->chanspec);
int i_ReturnValue = insn->n;
@@ -1184,9 +1147,10 @@ int i_APCI3XXX_InsnReadTTLIO(struct comedi_device *dev,
| -101 : Data size error |
+----------------------------------------------------------------------------+
*/
-
-int i_APCI3XXX_InsnWriteTTLIO(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data)
+static int i_APCI3XXX_InsnWriteTTLIO(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
int i_ReturnValue = insn->n;
unsigned char b_Channel = (unsigned char) CR_CHAN(insn->chanspec);
@@ -1296,8 +1260,10 @@ int i_APCI3XXX_InsnWriteTTLIO(struct comedi_device *dev,
+----------------------------------------------------------------------------+
*/
-int i_APCI3XXX_InsnReadDigitalInput(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data)
+static int i_APCI3XXX_InsnReadDigitalInput(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
int i_ReturnValue = insn->n;
unsigned char b_Channel = (unsigned char) CR_CHAN(insn->chanspec);
@@ -1354,8 +1320,10 @@ int i_APCI3XXX_InsnReadDigitalInput(struct comedi_device *dev,
| -101 : Data size error |
+----------------------------------------------------------------------------+
*/
-int i_APCI3XXX_InsnBitsDigitalInput(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data)
+static int i_APCI3XXX_InsnBitsDigitalInput(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
int i_ReturnValue = insn->n;
unsigned int dw_Temp = 0;
@@ -1407,8 +1375,10 @@ int i_APCI3XXX_InsnBitsDigitalInput(struct comedi_device *dev,
| -101 : Data size error |
+----------------------------------------------------------------------------+
*/
-int i_APCI3XXX_InsnBitsDigitalOutput(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data)
+static int i_APCI3XXX_InsnBitsDigitalOutput(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
int i_ReturnValue = insn->n;
unsigned char b_ChannelCpt = 0;
@@ -1503,8 +1473,10 @@ int i_APCI3XXX_InsnBitsDigitalOutput(struct comedi_device *dev,
+----------------------------------------------------------------------------+
*/
-int i_APCI3XXX_InsnWriteDigitalOutput(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data)
+static int i_APCI3XXX_InsnWriteDigitalOutput(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
int i_ReturnValue = insn->n;
unsigned char b_Channel = CR_CHAN(insn->chanspec);
@@ -1578,8 +1550,10 @@ int i_APCI3XXX_InsnWriteDigitalOutput(struct comedi_device *dev,
+----------------------------------------------------------------------------+
*/
-int i_APCI3XXX_InsnReadDigitalOutput(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data)
+static int i_APCI3XXX_InsnReadDigitalOutput(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
int i_ReturnValue = insn->n;
unsigned char b_Channel = CR_CHAN(insn->chanspec);
@@ -1636,7 +1610,7 @@ int i_APCI3XXX_InsnReadDigitalOutput(struct comedi_device *dev,
+----------------------------------------------------------------------------+
*/
-int i_APCI3XXX_Reset(struct comedi_device *dev)
+static int i_APCI3XXX_Reset(struct comedi_device *dev)
{
unsigned char b_Cpt = 0;
@@ -1656,27 +1630,26 @@ int i_APCI3XXX_Reset(struct comedi_device *dev)
/* Clear the start command */
/***************************/
- writel(0, (void *)(devpriv->dw_AiBase + 8));
+ writel(0, devpriv->dw_AiBase + 8);
/*****************************/
/* Reset the interrupt flags */
/*****************************/
- writel(readl((void *)(devpriv->dw_AiBase + 16)),
- (void *)(devpriv->dw_AiBase + 16));
+ writel(readl(devpriv->dw_AiBase + 16), devpriv->dw_AiBase + 16);
/*****************/
/* clear the EOS */
/*****************/
- readl((void *)(devpriv->dw_AiBase + 20));
+ readl(devpriv->dw_AiBase + 20);
/******************/
/* Clear the FIFO */
/******************/
for (b_Cpt = 0; b_Cpt < 16; b_Cpt++) {
- readl((void *)(devpriv->dw_AiBase + 28));
+ readl(devpriv->dw_AiBase + 28);
}
/************************/
diff --git a/drivers/staging/comedi/drivers/adl_pci6208.c b/drivers/staging/comedi/drivers/adl_pci6208.c
index 6925faaf529..712b9e0788b 100644
--- a/drivers/staging/comedi/drivers/adl_pci6208.c
+++ b/drivers/staging/comedi/drivers/adl_pci6208.c
@@ -54,7 +54,7 @@ References:
#include "../comedidev.h"
#include "comedi_pci.h"
-#define PCI6208_DRIVER_NAME "adl_pci6208"
+#define PCI6208_DRIVER_NAME "adl_pci6208"
/* Board descriptions */
struct pci6208_board {
@@ -134,10 +134,10 @@ static int pci6208_ao_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
/* static int pci6208_dio_insn_bits (struct comedi_device *dev,
- * struct comedi_subdevice *s, */
+ * struct comedi_subdevice *s, */
/* struct comedi_insn *insn,unsigned int *data); */
/* static int pci6208_dio_insn_config(struct comedi_device *dev,
- * struct comedi_subdevice *s, */
+ * struct comedi_subdevice *s, */
/* struct comedi_insn *insn,unsigned int *data); */
/*
@@ -268,7 +268,7 @@ static int pci6208_ao_rinsn(struct comedi_device *dev,
* This allows packed reading/writing of the DIO channels. The
* comedi core can convert between insn_bits and insn_read/write */
/* static int pci6208_dio_insn_bits(struct comedi_device *dev,
- * struct comedi_subdevice *s, */
+ * struct comedi_subdevice *s, */
/* struct comedi_insn *insn,unsigned int *data) */
/* { */
/* if(insn->n!=2)return -EINVAL; */
@@ -293,7 +293,7 @@ static int pci6208_ao_rinsn(struct comedi_device *dev,
/* } */
/* static int pci6208_dio_insn_config(struct comedi_device *dev,
- * struct comedi_subdevice *s, */
+ * struct comedi_subdevice *s, */
/* struct comedi_insn *insn,unsigned int *data) */
/* { */
/* int chan=CR_CHAN(insn->chanspec); */
diff --git a/drivers/staging/comedi/drivers/adl_pci7230.c b/drivers/staging/comedi/drivers/adl_pci7230.c
new file mode 100644
index 00000000000..24a82eb9d15
--- /dev/null
+++ b/drivers/staging/comedi/drivers/adl_pci7230.c
@@ -0,0 +1,206 @@
+/*
+ comedi/drivers/adl_pci7230.c
+
+ Hardware comedi driver fot PCI7230 Adlink card
+ Copyright (C) 2010 David Fernandez <dfcastelao@gmail.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+/*
+Driver: adl_pci7230
+Description: Driver for the Adlink PCI-7230 32 ch. isolated digital io board
+Devices: [ADLink] PCI-7230 (adl_pci7230)
+Author: David Fernandez <dfcastelao@gmail.com>
+Status: experimental
+Updated: Mon, 14 Apr 2008 15:08:14 +0100
+
+Configuration Options:
+ [0] - PCI bus of device (optional)
+ [1] - PCI slot of device (optional)
+ If bus/slot is not specified, the first supported
+ PCI device found will be used.
+*/
+
+#include "../comedidev.h"
+#include <linux/kernel.h>
+#include "comedi_pci.h"
+
+#define PCI7230_DI 0x00
+#define PCI7230_DO 0x00
+
+#define PCI_DEVICE_ID_PCI7230 0x7230
+
+static DEFINE_PCI_DEVICE_TABLE(adl_pci7230_pci_table) = {
+ {
+ PCI_VENDOR_ID_ADLINK,
+ PCI_DEVICE_ID_PCI7230,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ 0
+ },
+ {0}
+};
+
+MODULE_DEVICE_TABLE(pci, adl_pci7230_pci_table);
+
+struct adl_pci7230_private {
+ int data;
+ struct pci_dev *pci_dev;
+};
+
+#define devpriv ((struct adl_pci7230_private *)dev->private)
+
+static int adl_pci7230_attach(struct comedi_device *dev,
+ struct comedi_devconfig *it);
+static int adl_pci7230_detach(struct comedi_device *dev);
+static struct comedi_driver driver_adl_pci7230 = {
+ .driver_name = "adl_pci7230",
+ .module = THIS_MODULE,
+ .attach = adl_pci7230_attach,
+ .detach = adl_pci7230_detach,
+};
+
+/* Digital IO */
+
+static int adl_pci7230_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data);
+
+static int adl_pci7230_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data);
+
+static int adl_pci7230_attach(struct comedi_device *dev,
+ struct comedi_devconfig *it)
+{
+ struct pci_dev *pcidev;
+ struct comedi_subdevice *s;
+ int bus, slot;
+
+ printk(KERN_INFO "comedi%d: adl_pci7230\n", dev->minor);
+
+ dev->board_name = "pci7230";
+ bus = it->options[0];
+ slot = it->options[1];
+
+ if (alloc_private(dev, sizeof(struct adl_pci7230_private)) < 0)
+ return -ENOMEM;
+
+ if (alloc_subdevices(dev, 2) < 0)
+ return -ENOMEM;
+
+ for (pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
+ pcidev != NULL;
+ pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pcidev)) {
+
+ if (pcidev->vendor == PCI_VENDOR_ID_ADLINK &&
+ pcidev->device == PCI_DEVICE_ID_PCI7230) {
+ if (bus || slot) {
+ /* requested particular bus/slot */
+ if (pcidev->bus->number != bus ||
+ PCI_SLOT(pcidev->devfn) != slot) {
+ continue;
+ }
+ }
+ devpriv->pci_dev = pcidev;
+ break;
+ }
+ }
+ if (pcidev == NULL) {
+ printk(KERN_ERR "comedi%d: no supported board found! (req. bus/slot : %d/%d)\n",
+ dev->minor, bus, slot);
+ return -EIO;
+ }
+ if (comedi_pci_enable(pcidev, "adl_pci7230") < 0) {
+ printk(KERN_ERR "comedi%d: Failed to enable PCI device and request regions\n",
+ dev->minor);
+ return -EIO;
+ }
+ dev->iobase = pci_resource_start(pcidev, 2);
+ printk(KERN_DEBUG "comedi: base addr %4lx\n", dev->iobase);
+
+ s = dev->subdevices + 0;
+ /* Isolated do */
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = adl_pci7230_do_insn_bits;
+
+ s = dev->subdevices + 1;
+ /* Isolated di */
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_COMMON;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = adl_pci7230_di_insn_bits;
+
+ printk(KERN_DEBUG "comedi: attached\n");
+
+ return 1;
+}
+
+static int adl_pci7230_detach(struct comedi_device *dev)
+{
+ printk(KERN_DEBUG "comedi%d: pci7230: remove\n", dev->minor);
+
+ if (devpriv && devpriv->pci_dev) {
+ if (dev->iobase)
+ comedi_pci_disable(devpriv->pci_dev);
+ pci_dev_put(devpriv->pci_dev);
+ }
+
+ return 0;
+}
+
+static int adl_pci7230_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ if (insn->n != 2)
+ return -EINVAL;
+
+ if (data[0]) {
+ s->state &= ~data[0];
+ s->state |= (data[0] & data[1]);
+
+ outl((s->state << 16) & 0xffffffff, dev->iobase + PCI7230_DO);
+ }
+
+ return 2;
+}
+
+static int adl_pci7230_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ if (insn->n != 2)
+ return -EINVAL;
+
+ data[1] = inl(dev->iobase + PCI7230_DI) & 0xffffffff;
+
+ return 2;
+}
+
+COMEDI_PCI_INITCLEANUP(driver_adl_pci7230, adl_pci7230_pci_table);
diff --git a/drivers/staging/comedi/drivers/adl_pci9111.c b/drivers/staging/comedi/drivers/adl_pci9111.c
index da172a553d1..36a254cd441 100644
--- a/drivers/staging/comedi/drivers/adl_pci9111.c
+++ b/drivers/staging/comedi/drivers/adl_pci9111.c
@@ -358,8 +358,8 @@ struct pci9111_private_data {
int ao_readback; /* Last written analog output data */
- int timer_divisor_1; /* Divisor values for the 8254 timer pacer */
- int timer_divisor_2;
+ unsigned int timer_divisor_1; /* Divisor values for the 8254 timer pacer */
+ unsigned int timer_divisor_2;
int is_valid; /* Is device valid */
@@ -585,19 +585,17 @@ pci9111_ai_do_cmd_test(struct comedi_device *dev,
(cmd->scan_begin_src != TRIG_EXT))
error++;
- if ((cmd->convert_src != TRIG_TIMER) && (cmd->convert_src != TRIG_EXT)) {
+ if ((cmd->convert_src != TRIG_TIMER) && (cmd->convert_src != TRIG_EXT))
error++;
- }
if ((cmd->convert_src == TRIG_TIMER) &&
!((cmd->scan_begin_src == TRIG_TIMER) ||
- (cmd->scan_begin_src == TRIG_FOLLOW))) {
+ (cmd->scan_begin_src == TRIG_FOLLOW)))
error++;
- }
if ((cmd->convert_src == TRIG_EXT) &&
!((cmd->scan_begin_src == TRIG_EXT) ||
- (cmd->scan_begin_src == TRIG_FOLLOW))) {
+ (cmd->scan_begin_src == TRIG_FOLLOW)))
error++;
- }
+
if (cmd->scan_end_src != TRIG_COUNT)
error++;
@@ -1067,9 +1065,8 @@ static int pci9111_ai_insn_read(struct comedi_device *dev,
pci9111_ai_channel_set(CR_CHAN((&insn->chanspec)[0]));
- if ((pci9111_ai_range_get()) != CR_RANGE((&insn->chanspec)[0])) {
+ if ((pci9111_ai_range_get()) != CR_RANGE((&insn->chanspec)[0]))
pci9111_ai_range_set(CR_RANGE((&insn->chanspec)[0]));
- }
pci9111_fifo_reset();
@@ -1090,11 +1087,10 @@ static int pci9111_ai_insn_read(struct comedi_device *dev,
conversion_done:
- if (resolution == PCI9111_HR_AI_RESOLUTION) {
+ if (resolution == PCI9111_HR_AI_RESOLUTION)
data[i] = pci9111_hr_ai_get_data();
- } else {
+ else
data[i] = pci9111_ai_get_data();
- }
}
#ifdef AI_INSN_DEBUG
@@ -1131,9 +1127,8 @@ static int pci9111_ao_insn_read(struct comedi_device *dev,
{
int i;
- for (i = 0; i < insn->n; i++) {
+ for (i = 0; i < insn->n; i++)
data[i] = dev_private->ao_readback & PCI9111_AO_RESOLUTION_MASK;
- }
return i;
}
@@ -1222,9 +1217,8 @@ static int pci9111_attach(struct comedi_device *dev,
int error, i;
const struct pci9111_board *board;
- if (alloc_private(dev, sizeof(struct pci9111_private_data)) < 0) {
+ if (alloc_private(dev, sizeof(struct pci9111_private_data)) < 0)
return -ENOMEM;
- }
/* Probe the device to determine what device in the series it is. */
printk("comedi%d: " PCI9111_DRIVER_NAME " driver\n", dev->minor);
@@ -1275,9 +1269,6 @@ found:
/* TODO: Warn about non-tested boards. */
- switch (board->device_id) {
- };
-
/* Read local configuration register base address [PCI_BASE_ADDRESS #1]. */
lcr_io_base = pci_resource_start(pci_device, 1);
@@ -1387,21 +1378,19 @@ static int pci9111_detach(struct comedi_device *dev)
{
/* Reset device */
- if (dev->private != 0) {
+ if (dev->private != NULL) {
if (dev_private->is_valid)
pci9111_reset(dev);
}
/* Release previously allocated irq */
- if (dev->irq != 0) {
+ if (dev->irq != 0)
free_irq(dev->irq, dev);
- }
- if (dev_private != 0 && dev_private->pci_device != 0) {
- if (dev->iobase) {
+ if (dev_private != NULL && dev_private->pci_device != NULL) {
+ if (dev->iobase)
comedi_pci_disable(dev_private->pci_device);
- }
pci_dev_put(dev_private->pci_device);
}
diff --git a/drivers/staging/comedi/drivers/adl_pci9118.c b/drivers/staging/comedi/drivers/adl_pci9118.c
index 944f20ae5a6..ccef549778e 100644
--- a/drivers/staging/comedi/drivers/adl_pci9118.c
+++ b/drivers/staging/comedi/drivers/adl_pci9118.c
@@ -44,26 +44,25 @@ c) If isn't used DMA then you can use only mode where
Configuration options:
[0] - PCI bus of device (optional)
[1] - PCI slot of device (optional)
- If bus/slot is not specified, then first available PCI
- card will be used.
+ If bus/slot is not specified, then first available PCI
+ card will be used.
[2] - 0= standard 8 DIFF/16 SE channels configuration
- n= external multiplexer connected, 1<=n<=256
+ n = external multiplexer connected, 1 <= n <= 256
[3] - 0=autoselect DMA or EOC interrupts operation
- 1=disable DMA mode
- 3=disable DMA and INT, only insn interface will work
+ 1 = disable DMA mode
+ 3 = disable DMA and INT, only insn interface will work
[4] - sample&hold signal - card can generate signal for external S&H board
- 0=use SSHO (pin 45) signal is generated in onboard hardware S&H logic
- 0!=use ADCHN7 (pin 23) signal is generated from driver, number
- say how long delay is requested in ns and sign polarity of the hold
- (in this case external multiplexor can serve only 128 channels)
+ 0 = use SSHO(pin 45) signal is generated in onboard hardware S&H logic
+ 0 != use ADCHN7(pin 23) signal is generated from driver, number say how
+ long delay is requested in ns and sign polarity of the hold
+ (in this case external multiplexor can serve only 128 channels)
[5] - 0=stop measure on all hardware errors
- 2|=ignore ADOR - A/D Overrun status
+ 2 | = ignore ADOR - A/D Overrun status
8|=ignore Bover - A/D Burst Mode Overrun status
256|=ignore nFull - A/D FIFO Full status
*/
#include "../comedidev.h"
-#include "../pci_ids.h"
#include <linux/delay.h>
#include <linux/gfp.h>
@@ -74,10 +73,18 @@ Configuration options:
#include "comedi_pci.h"
#include "comedi_fc.h"
+#define PCI_VENDOR_ID_AMCC 0x10e8
+
/* paranoid checks are broken */
-#undef PCI9118_PARANOIDCHECK /* if defined, then is used code which control correct channel number on every 12 bit sample */
+#undef PCI9118_PARANOIDCHECK /*
+ * if defined, then is used code which control
+ * correct channel number on every 12 bit sample
+ */
-#undef PCI9118_EXTDEBUG /* if defined then driver prints a lot of messages */
+#undef PCI9118_EXTDEBUG /*
+ * if defined then driver prints
+ * a lot of messages
+ */
#undef DPRINTK
#ifdef PCI9118_EXTDEBUG
@@ -87,7 +94,10 @@ Configuration options:
#endif
#define IORANGE_9118 64 /* I hope */
-#define PCI9118_CHANLEN 255 /* len of chanlist, some source say 256, but reality looks like 255 :-( */
+#define PCI9118_CHANLEN 255 /*
+ * len of chanlist, some source say 256,
+ * but reality looks like 255 :-(
+ */
#define PCI9118_CNT0 0x00 /* R/W: 8254 counter 0 */
#define PCI9118_CNT1 0x04 /* R/W: 8254 counter 0 */
@@ -113,20 +123,47 @@ Configuration options:
#define AdControl_UniP 0x80 /* 1=bipolar, 0=unipolar */
#define AdControl_Diff 0x40 /* 1=differential, 0= single end inputs */
#define AdControl_SoftG 0x20 /* 1=8254 counter works, 0=counter stops */
-#define AdControl_ExtG 0x10 /* 1=8254 countrol controlled by TGIN(pin 46), 0=controled by SoftG */
-#define AdControl_ExtM 0x08 /* 1=external hardware trigger (pin 44), 0=internal trigger */
-#define AdControl_TmrTr 0x04 /* 1=8254 is iternal trigger source, 0=software trigger is source (register PCI9118_SOFTTRG) */
+#define AdControl_ExtG 0x10 /*
+ * 1=8254 countrol controlled by TGIN(pin 46),
+ * 0=controlled by SoftG
+ */
+#define AdControl_ExtM 0x08 /*
+ * 1=external hardware trigger (pin 44),
+ * 0=internal trigger
+ */
+#define AdControl_TmrTr 0x04 /*
+ * 1=8254 is iternal trigger source,
+ * 0=software trigger is source
+ * (register PCI9118_SOFTTRG)
+ */
#define AdControl_Int 0x02 /* 1=enable INT, 0=disable */
#define AdControl_Dma 0x01 /* 1=enable DMA, 0=disable */
/* bits from A/D function register (PCI9118_ADFUNC) */
-#define AdFunction_PDTrg 0x80 /* 1=positive, 0=negative digital trigger (only positive is correct) */
-#define AdFunction_PETrg 0x40 /* 1=positive, 0=negative external trigger (only positive is correct) */
+#define AdFunction_PDTrg 0x80 /*
+ * 1=positive,
+ * 0=negative digital trigger
+ * (only positive is correct)
+ */
+#define AdFunction_PETrg 0x40 /*
+ * 1=positive,
+ * 0=negative external trigger
+ * (only positive is correct)
+ */
#define AdFunction_BSSH 0x20 /* 1=with sample&hold, 0=without */
#define AdFunction_BM 0x10 /* 1=burst mode, 0=normal mode */
-#define AdFunction_BS 0x08 /* 1=burst mode start, 0=burst mode stop */
-#define AdFunction_PM 0x04 /* 1=post trigger mode, 0=not post trigger */
-#define AdFunction_AM 0x02 /* 1=about trigger mode, 0=not about trigger */
+#define AdFunction_BS 0x08 /*
+ * 1=burst mode start,
+ * 0=burst mode stop
+ */
+#define AdFunction_PM 0x04 /*
+ * 1=post trigger mode,
+ * 0=not post trigger
+ */
+#define AdFunction_AM 0x02 /*
+ * 1=about trigger mode,
+ * 0=not about trigger
+ */
#define AdFunction_Start 0x01 /* 1=trigger start, 0=trigger stop */
/* bits from A/D status register (PCI9118_ADSTAT) */
@@ -178,30 +215,39 @@ static const struct comedi_lrange range_pci9118hg = { 8, {
}
};
-#define PCI9118_BIPOLAR_RANGES 4 /* used for test on mixture of BIP/UNI ranges */
+#define PCI9118_BIPOLAR_RANGES 4 /*
+ * used for test on mixture
+ * of BIP/UNI ranges
+ */
static int pci9118_attach(struct comedi_device *dev,
struct comedi_devconfig *it);
static int pci9118_detach(struct comedi_device *dev);
struct boardtype {
- const char *name; /* board name */
- int vendor_id; /* PCI vendor a device ID of card */
+ const char *name; /* board name */
+ int vendor_id; /* PCI vendor a device ID of card */
int device_id;
- int iorange_amcc; /* iorange for own S5933 region */
- int iorange_9118; /* pass thru card region size */
- int n_aichan; /* num of A/D chans */
- int n_aichand; /* num of A/D chans in diff mode */
- int mux_aichan; /* num of A/D chans with external multiplexor */
- int n_aichanlist; /* len of chanlist */
- int n_aochan; /* num of D/A chans */
- int ai_maxdata; /* resolution of A/D */
- int ao_maxdata; /* resolution of D/A */
- const struct comedi_lrange *rangelist_ai; /* rangelist for A/D */
- const struct comedi_lrange *rangelist_ao; /* rangelist for D/A */
- unsigned int ai_ns_min; /* max sample speed of card v ns */
- unsigned int ai_pacer_min; /* minimal pacer value (c1*c2 or c1 in burst) */
- int half_fifo_size; /* size of FIFO/2 */
+ int iorange_amcc; /* iorange for own S5933 region */
+ int iorange_9118; /* pass thru card region size */
+ int n_aichan; /* num of A/D chans */
+ int n_aichand; /* num of A/D chans in diff mode */
+ int mux_aichan; /*
+ * num of A/D chans with
+ * external multiplexor
+ */
+ int n_aichanlist; /* len of chanlist */
+ int n_aochan; /* num of D/A chans */
+ int ai_maxdata; /* resolution of A/D */
+ int ao_maxdata; /* resolution of D/A */
+ const struct comedi_lrange *rangelist_ai; /* rangelist for A/D */
+ const struct comedi_lrange *rangelist_ao; /* rangelist for D/A */
+ unsigned int ai_ns_min; /* max sample speed of card v ns */
+ unsigned int ai_pacer_min; /*
+ * minimal pacer value
+ * (c1*c2 or c1 in burst)
+ */
+ int half_fifo_size; /* size of FIFO/2 */
};
@@ -246,63 +292,113 @@ static struct comedi_driver driver_pci9118 = {
COMEDI_PCI_INITCLEANUP(driver_pci9118, pci9118_pci_table);
struct pci9118_private {
- unsigned long iobase_a; /* base+size for AMCC chip */
- unsigned int master; /* master capable */
- struct pci_dev *pcidev; /* ptr to actual pcidev */
- unsigned int usemux; /* we want to use external multiplexor! */
+ unsigned long iobase_a; /* base+size for AMCC chip */
+ unsigned int master; /* master capable */
+ struct pci_dev *pcidev; /* ptr to actual pcidev */
+ unsigned int usemux; /* we want to use external multiplexor! */
#ifdef PCI9118_PARANOIDCHECK
- unsigned short chanlist[PCI9118_CHANLEN + 1]; /* list of scaned channel */
- unsigned char chanlistlen; /* number of scanlist */
+ unsigned short chanlist[PCI9118_CHANLEN + 1]; /*
+ * list of
+ * scanned channel
+ */
+ unsigned char chanlistlen; /* number of scanlist */
#endif
- unsigned char AdControlReg; /* A/D control register */
- unsigned char IntControlReg; /* Interrupt control register */
- unsigned char AdFunctionReg; /* A/D function register */
- char valid; /* driver is ok */
- char ai_neverending; /* we do unlimited AI */
- unsigned int i8254_osc_base; /* frequence of onboard oscilator */
- unsigned int ai_do; /* what do AI? 0=nothing, 1 to 4 mode */
- unsigned int ai_act_scan; /* how many scans we finished */
- unsigned int ai_buf_ptr; /* data buffer ptr in samples */
- unsigned int ai_n_chan; /* how many channels is measured */
- unsigned int ai_n_scanlen; /* len of actual scanlist */
- unsigned int ai_n_realscanlen; /* what we must transfer for one outgoing scan include front/back adds */
- unsigned int ai_act_dmapos; /* position in actual real stream */
- unsigned int ai_add_front; /* how many channels we must add before scan to satisfy S&H? */
- unsigned int ai_add_back; /* how many channels we must add before scan to satisfy DMA? */
- unsigned int *ai_chanlist; /* actaul chanlist */
+ unsigned char AdControlReg; /* A/D control register */
+ unsigned char IntControlReg; /* Interrupt control register */
+ unsigned char AdFunctionReg; /* A/D function register */
+ char valid; /* driver is ok */
+ char ai_neverending; /* we do unlimited AI */
+ unsigned int i8254_osc_base; /* frequence of onboard oscilator */
+ unsigned int ai_do; /* what do AI? 0=nothing, 1 to 4 mode */
+ unsigned int ai_act_scan; /* how many scans we finished */
+ unsigned int ai_buf_ptr; /* data buffer ptr in samples */
+ unsigned int ai_n_chan; /* how many channels is measured */
+ unsigned int ai_n_scanlen; /* len of actual scanlist */
+ unsigned int ai_n_realscanlen; /*
+ * what we must transfer for one
+ * outgoing scan include front/back adds
+ */
+ unsigned int ai_act_dmapos; /* position in actual real stream */
+ unsigned int ai_add_front; /*
+ * how many channels we must add
+ * before scan to satisfy S&H?
+ */
+ unsigned int ai_add_back; /*
+ * how many channels we must add
+ * before scan to satisfy DMA?
+ */
+ unsigned int *ai_chanlist; /* actual chanlist */
unsigned int ai_timer1;
unsigned int ai_timer2;
unsigned int ai_flags;
- char ai12_startstop; /* measure can start/stop on external trigger */
- unsigned int ai_divisor1, ai_divisor2; /* divisors for start of measure on external start */
+ char ai12_startstop; /*
+ * measure can start/stop
+ * on external trigger
+ */
+ unsigned int ai_divisor1, ai_divisor2; /*
+ * divisors for start of measure
+ * on external start
+ */
unsigned int ai_data_len;
short *ai_data;
- short ao_data[2]; /* data output buffer */
- unsigned int ai_scans; /* number of scans to do */
- char dma_doublebuf; /* we can use double buffring */
- unsigned int dma_actbuf; /* which buffer is used now */
- short *dmabuf_virt[2]; /* pointers to begin of DMA buffer */
- unsigned long dmabuf_hw[2]; /* hw address of DMA buff */
- unsigned int dmabuf_size[2]; /* size of dma buffer in bytes */
- unsigned int dmabuf_use_size[2]; /* which size we may now used for transfer */
- unsigned int dmabuf_used_size[2]; /* which size was trully used */
+ short ao_data[2]; /* data output buffer */
+ unsigned int ai_scans; /* number of scans to do */
+ char dma_doublebuf; /* we can use double buffring */
+ unsigned int dma_actbuf; /* which buffer is used now */
+ short *dmabuf_virt[2]; /*
+ * pointers to begin of
+ * DMA buffer
+ */
+ unsigned long dmabuf_hw[2]; /* hw address of DMA buff */
+ unsigned int dmabuf_size[2]; /*
+ * size of dma buffer in bytes
+ */
+ unsigned int dmabuf_use_size[2]; /*
+ * which size we may now use
+ * for transfer
+ */
+ unsigned int dmabuf_used_size[2]; /* which size was truly used */
unsigned int dmabuf_panic_size[2];
- unsigned int dmabuf_samples[2]; /* size in samples */
- int dmabuf_pages[2]; /* number of pages in buffer */
- unsigned char cnt0_users; /* bit field of 8254 CNT0 users (0-unused, 1-AO, 2-DI, 3-DO) */
- unsigned char exttrg_users; /* bit field of external trigger users (0-AI, 1-AO, 2-DI, 3-DO) */
- unsigned int cnt0_divisor; /* actual CNT0 divisor */
- void (*int_ai_func) (struct comedi_device *, struct comedi_subdevice *, unsigned short, unsigned int, unsigned short); /* ptr to actual interrupt AI function */
- unsigned char ai16bits; /* =1 16 bit card */
- unsigned char usedma; /* =1 use DMA transfer and not INT */
- unsigned char useeoshandle; /* =1 change WAKE_EOS DMA transfer to fit on every second */
- unsigned char usessh; /* =1 turn on S&H support */
- int softsshdelay; /* >0 use software S&H, numer is requested delay in ns */
- unsigned char softsshsample; /* polarity of S&H signal in sample state */
- unsigned char softsshhold; /* polarity of S&H signal in hold state */
- unsigned int ai_maskerr; /* which warning was printed */
- unsigned int ai_maskharderr; /* on which error bits stops */
- unsigned int ai_inttrig_start; /* TRIG_INT for start */
+ unsigned int dmabuf_samples[2]; /* size in samples */
+ int dmabuf_pages[2]; /* number of pages in buffer */
+ unsigned char cnt0_users; /*
+ * bit field of 8254 CNT0 users
+ * (0-unused, 1-AO, 2-DI, 3-DO)
+ */
+ unsigned char exttrg_users; /*
+ * bit field of external trigger
+ * users(0-AI, 1-AO, 2-DI, 3-DO)
+ */
+ unsigned int cnt0_divisor; /* actual CNT0 divisor */
+ void (*int_ai_func) (struct comedi_device *, struct comedi_subdevice *,
+ unsigned short,
+ unsigned int,
+ unsigned short); /*
+ * ptr to actual interrupt
+ * AI function
+ */
+ unsigned char ai16bits; /* =1 16 bit card */
+ unsigned char usedma; /* =1 use DMA transfer and not INT */
+ unsigned char useeoshandle; /*
+ * =1 change WAKE_EOS DMA transfer
+ * to fit on every second
+ */
+ unsigned char usessh; /* =1 turn on S&H support */
+ int softsshdelay; /*
+ * >0 use software S&H,
+ * numer is requested delay in ns
+ */
+ unsigned char softsshsample; /*
+ * polarity of S&H signal
+ * in sample state
+ */
+ unsigned char softsshhold; /*
+ * polarity of S&H signal
+ * in hold state
+ */
+ unsigned int ai_maskerr; /* which warning was printed */
+ unsigned int ai_maskharderr; /* on which error bits stops */
+ unsigned int ai_inttrig_start; /* TRIG_INT for start */
};
#define devpriv ((struct pci9118_private *)dev->private)
@@ -346,12 +442,19 @@ static int pci9118_insn_read_ai(struct comedi_device *dev,
devpriv->AdControlReg = AdControl_Int & 0xff;
devpriv->AdFunctionReg = AdFunction_PDTrg | AdFunction_PETrg;
- outl(devpriv->AdFunctionReg, dev->iobase + PCI9118_ADFUNC); /* positive triggers, no S&H, no burst, burst stop, no post trigger, no about trigger, trigger stop */
+ outl(devpriv->AdFunctionReg, dev->iobase + PCI9118_ADFUNC);
+ /*
+ * positive triggers, no S&H,
+ * no burst, burst stop,
+ * no post trigger,
+ * no about trigger,
+ * trigger stop
+ */
if (!setup_channel_list(dev, s, 1, &insn->chanspec, 0, 0, 0, 0, 0))
return -EINVAL;
- outl(0, dev->iobase + PCI9118_DELFIFO); /* flush FIFO */
+ outl(0, dev->iobase + PCI9118_DELFIFO); /* flush FIFO */
for (n = 0; n < insn->n; n++) {
outw(0, dev->iobase + PCI9118_SOFTTRG); /* start conversion */
@@ -365,7 +468,7 @@ static int pci9118_insn_read_ai(struct comedi_device *dev,
comedi_error(dev, "A/D insn timeout");
data[n] = 0;
- outl(0, dev->iobase + PCI9118_DELFIFO); /* flush FIFO */
+ outl(0, dev->iobase + PCI9118_DELFIFO); /* flush FIFO */
return -ETIME;
conv_finish:
@@ -379,7 +482,7 @@ conv_finish:
}
}
- outl(0, dev->iobase + PCI9118_DELFIFO); /* flush FIFO */
+ outl(0, dev->iobase + PCI9118_DELFIFO); /* flush FIFO */
return n;
}
@@ -394,11 +497,11 @@ static int pci9118_insn_write_ao(struct comedi_device *dev,
int n, chanreg, ch;
ch = CR_CHAN(insn->chanspec);
- if (ch) {
+ if (ch)
chanreg = PCI9118_DA2;
- } else {
+ else
chanreg = PCI9118_DA1;
- }
+
for (n = 0; n < insn->n; n++) {
outl(data[n], dev->iobase + chanreg);
@@ -561,11 +664,11 @@ static void pci9118_ai_munge(struct comedi_device *dev,
for (i = 0; i < num_samples; i++) {
if (devpriv->usedma)
array[i] = be16_to_cpu(array[i]);
- if (devpriv->ai16bits) {
+ if (devpriv->ai16bits)
array[i] ^= 0x8000;
- } else {
+ else
array[i] = (array[i] >> 4) & 0x0fff;
- }
+
}
}
@@ -590,11 +693,13 @@ static void interrupt_pci9118_ai_onesample(struct comedi_device *dev,
#ifdef PCI9118_PARANOIDCHECK
if (devpriv->ai16bits == 0) {
- if ((sampl & 0x000f) != devpriv->chanlist[s->async->cur_chan]) { /* data dropout! */
+ if ((sampl & 0x000f) != devpriv->chanlist[s->async->cur_chan]) {
+ /* data dropout! */
printk
- ("comedi: A/D SAMPL - data dropout: received channel %d, expected %d!\n",
- sampl & 0x000f,
- devpriv->chanlist[s->async->cur_chan]);
+ ("comedi: A/D SAMPL - data dropout: "
+ "received channel %d, expected %d!\n",
+ sampl & 0x000f,
+ devpriv->chanlist[s->async->cur_chan]);
s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
pci9118_ai_cancel(dev, s);
comedi_event(dev, s);
@@ -604,11 +709,13 @@ static void interrupt_pci9118_ai_onesample(struct comedi_device *dev,
#endif
cfc_write_to_buffer(s, sampl);
s->async->cur_chan++;
- if (s->async->cur_chan >= devpriv->ai_n_scanlen) { /* one scan done */
+ if (s->async->cur_chan >= devpriv->ai_n_scanlen) {
+ /* one scan done */
s->async->cur_chan %= devpriv->ai_n_scanlen;
devpriv->ai_act_scan++;
if (!(devpriv->ai_neverending))
- if (devpriv->ai_act_scan >= devpriv->ai_scans) { /* all data sampled */
+ if (devpriv->ai_act_scan >= devpriv->ai_scans) {
+ /* all data sampled */
pci9118_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA;
}
@@ -644,16 +751,19 @@ static void interrupt_pci9118_ai_dma(struct comedi_device *dev,
comedi_event(dev, s);
return;
}
-
if (int_adstat & devpriv->ai_maskerr)
-/* if (int_adstat & 0x106) */
+ /* if (int_adstat & 0x106) */
if (pci9118_decode_error_status(dev, s, int_adstat))
return;
- samplesinbuf = devpriv->dmabuf_use_size[devpriv->dma_actbuf] >> 1; /* number of received real samples */
+ samplesinbuf = devpriv->dmabuf_use_size[devpriv->dma_actbuf] >> 1;
+ /* number of received real samples */
/* DPRINTK("dma_actbuf=%d\n",devpriv->dma_actbuf); */
- if (devpriv->dma_doublebuf) { /* switch DMA buffers if is used double buffering */
+ if (devpriv->dma_doublebuf) { /*
+ * switch DMA buffers if is used
+ * double buffering
+ */
next_dma_buf = 1 - devpriv->dma_actbuf;
outl(devpriv->dmabuf_hw[next_dma_buf],
devpriv->iobase_a + AMCC_OP_REG_MWAR);
@@ -666,25 +776,32 @@ static void interrupt_pci9118_ai_dma(struct comedi_device *dev,
}
if (samplesinbuf) {
- m = devpriv->ai_data_len >> 1; /* how many samples is to end of buffer */
-/* DPRINTK("samps=%d m=%d %d %d\n",samplesinbuf,m,s->async->buf_int_count,s->async->buf_int_ptr); */
+ m = devpriv->ai_data_len >> 1; /*
+ * how many samples is to
+ * end of buffer
+ */
+/*
+ * DPRINTK("samps=%d m=%d %d %d\n",
+ * samplesinbuf,m,s->async->buf_int_count,s->async->buf_int_ptr);
+ */
sampls = m;
move_block_from_dma(dev, s,
devpriv->dmabuf_virt[devpriv->dma_actbuf],
samplesinbuf);
- m = m - sampls; /* m= how many samples was transfered */
+ m = m - sampls; /* m= how many samples was transfered */
}
/* DPRINTK("YYY\n"); */
if (!devpriv->ai_neverending)
- if (devpriv->ai_act_scan >= devpriv->ai_scans) { /* all data sampled */
+ if (devpriv->ai_act_scan >= devpriv->ai_scans) {
+ /* all data sampled */
pci9118_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA;
}
- if (devpriv->dma_doublebuf) { /* switch dma buffers */
+ if (devpriv->dma_doublebuf) { /* switch dma buffers */
devpriv->dma_actbuf = 1 - devpriv->dma_actbuf;
- } else { /* restart DMA if is not used double buffering */
+ } else { /* restart DMA if is not used double buffering */
outl(devpriv->dmabuf_hw[0],
devpriv->iobase_a + AMCC_OP_REG_MWAR);
outl(devpriv->dmabuf_use_size[0],
@@ -705,39 +822,62 @@ static irqreturn_t interrupt_pci9118(int irq, void *d)
unsigned int int_daq = 0, int_amcc, int_adstat;
if (!dev->attached)
- return IRQ_NONE; /* not fully initialized */
+ return IRQ_NONE; /* not fully initialized */
- int_daq = inl(dev->iobase + PCI9118_INTSRC) & 0xf; /* get IRQ reasons from card */
- int_amcc = inl(devpriv->iobase_a + AMCC_OP_REG_INTCSR); /* get INT register from AMCC chip */
+ int_daq = inl(dev->iobase + PCI9118_INTSRC) & 0xf;
+ /* get IRQ reasons from card */
+ int_amcc = inl(devpriv->iobase_a + AMCC_OP_REG_INTCSR);
+ /* get INT register from AMCC chip */
-/* DPRINTK("INT daq=0x%01x amcc=0x%08x MWAR=0x%08x MWTC=0x%08x ADSTAT=0x%02x ai_do=%d\n", int_daq, int_amcc, inl(devpriv->iobase_a+AMCC_OP_REG_MWAR), inl(devpriv->iobase_a+AMCC_OP_REG_MWTC), inw(dev->iobase+PCI9118_ADSTAT)&0x1ff,devpriv->ai_do); */
+/*
+ * DPRINTK("INT daq=0x%01x amcc=0x%08x MWAR=0x%08x
+ * MWTC=0x%08x ADSTAT=0x%02x ai_do=%d\n",
+ * int_daq, int_amcc, inl(devpriv->iobase_a+AMCC_OP_REG_MWAR),
+ * inl(devpriv->iobase_a+AMCC_OP_REG_MWTC),
+ * inw(dev->iobase+PCI9118_ADSTAT)&0x1ff,devpriv->ai_do);
+ */
if ((!int_daq) && (!(int_amcc & ANY_S593X_INT)))
- return IRQ_NONE; /* interrupt from other source */
+ return IRQ_NONE; /* interrupt from other source */
- outl(int_amcc | 0x00ff0000, devpriv->iobase_a + AMCC_OP_REG_INTCSR); /* shutdown IRQ reasons in AMCC */
+ outl(int_amcc | 0x00ff0000, devpriv->iobase_a + AMCC_OP_REG_INTCSR);
+ /* shutdown IRQ reasons in AMCC */
- int_adstat = inw(dev->iobase + PCI9118_ADSTAT) & 0x1ff; /* get STATUS register */
+ int_adstat = inw(dev->iobase + PCI9118_ADSTAT) & 0x1ff;
+ /* get STATUS register */
if (devpriv->ai_do) {
if (devpriv->ai12_startstop)
- if ((int_adstat & AdStatus_DTH) && (int_daq & Int_DTrg)) { /* start stop of measure */
+ if ((int_adstat & AdStatus_DTH) &&
+ (int_daq & Int_DTrg)) {
+ /* start stop of measure */
if (devpriv->ai12_startstop & START_AI_EXT) {
devpriv->ai12_startstop &=
~START_AI_EXT;
if (!(devpriv->ai12_startstop &
- STOP_AI_EXT))
- pci9118_exttrg_del(dev, EXTTRG_AI); /* deactivate EXT trigger */
- start_pacer(dev, devpriv->ai_do, devpriv->ai_divisor1, devpriv->ai_divisor2); /* start pacer */
+ STOP_AI_EXT))
+ pci9118_exttrg_del
+ (dev, EXTTRG_AI);
+ /* deactivate EXT trigger */
+ start_pacer(dev, devpriv->ai_do,
+ devpriv->ai_divisor1,
+ devpriv->ai_divisor2);
+ /* start pacer */
outl(devpriv->AdControlReg,
- dev->iobase + PCI9118_ADCNTRL);
+ dev->iobase + PCI9118_ADCNTRL);
} else {
if (devpriv->ai12_startstop &
- STOP_AI_EXT) {
+ STOP_AI_EXT) {
devpriv->ai12_startstop &=
- ~STOP_AI_EXT;
- pci9118_exttrg_del(dev, EXTTRG_AI); /* deactivate EXT trigger */
- devpriv->ai_neverending = 0; /* well, on next interrupt from DMA/EOC measure will stop */
+ ~STOP_AI_EXT;
+ pci9118_exttrg_del
+ (dev, EXTTRG_AI);
+ /* deactivate EXT trigger */
+ devpriv->ai_neverending = 0;
+ /*
+ * well, on next interrupt from
+ * DMA/EOC measure will stop
+ */
}
}
}
@@ -781,7 +921,8 @@ static int pci9118_ai_cmdtest(struct comedi_device *dev,
struct comedi_cmd *cmd)
{
int err = 0;
- int tmp, divisor1 = 0, divisor2 = 0;
+ int tmp;
+ unsigned int divisor1 = 0, divisor2 = 0;
/* step 1: make sure trigger sources are trivially valid */
@@ -791,20 +932,20 @@ static int pci9118_ai_cmdtest(struct comedi_device *dev,
err++;
tmp = cmd->scan_begin_src;
- if (devpriv->master) {
+ if (devpriv->master)
cmd->scan_begin_src &= TRIG_TIMER | TRIG_EXT | TRIG_FOLLOW;
- } else {
+ else
cmd->scan_begin_src &= TRIG_FOLLOW;
- }
+
if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src)
err++;
tmp = cmd->convert_src;
- if (devpriv->master) {
+ if (devpriv->master)
cmd->convert_src &= TRIG_TIMER | TRIG_EXT | TRIG_NOW;
- } else {
+ else
cmd->convert_src &= TRIG_TIMER | TRIG_EXT;
- }
+
if (!cmd->convert_src || tmp != cmd->convert_src)
err++;
@@ -821,7 +962,11 @@ static int pci9118_ai_cmdtest(struct comedi_device *dev,
if (err)
return 1;
- /* step 2: make sure trigger sources are unique and mutually compatible */
+ /*
+ * step 2:
+ * make sure trigger sources are
+ * unique and mutually compatible
+ */
if (cmd->start_src != TRIG_NOW &&
cmd->start_src != TRIG_INT && cmd->start_src != TRIG_EXT) {
@@ -1026,7 +1171,7 @@ static int pci9118_ai_cmdtest(struct comedi_device *dev,
if (cmd->chanlist)
if (!check_channel_list(dev, s, cmd->chanlist_len,
cmd->chanlist, 0, 0))
- return 5; /* incorrect channels list */
+ return 5; /* incorrect channels list */
return 0;
}
@@ -1043,88 +1188,101 @@ static int Compute_and_setup_dma(struct comedi_device *dev)
dmalen1 = devpriv->dmabuf_size[1];
DPRINTK("1 dmalen0=%d dmalen1=%d ai_data_len=%d\n", dmalen0, dmalen1,
devpriv->ai_data_len);
- /* isn't output buff smaller that our DMA buff? */
+ /* isn't output buff smaller that our DMA buff? */
if (dmalen0 > (devpriv->ai_data_len)) {
- dmalen0 = devpriv->ai_data_len & ~3L; /* allign to 32bit down */
+ dmalen0 = devpriv->ai_data_len & ~3L; /*
+ * align to 32bit down
+ */
}
if (dmalen1 > (devpriv->ai_data_len)) {
- dmalen1 = devpriv->ai_data_len & ~3L; /* allign to 32bit down */
+ dmalen1 = devpriv->ai_data_len & ~3L; /*
+ * align to 32bit down
+ */
}
DPRINTK("2 dmalen0=%d dmalen1=%d \n", dmalen0, dmalen1);
- /* we want wake up every scan? */
+ /* we want wake up every scan? */
if (devpriv->ai_flags & TRIG_WAKE_EOS) {
if (dmalen0 < (devpriv->ai_n_realscanlen << 1)) {
- /* uff, too short DMA buffer, disable EOS support! */
+ /* uff, too short DMA buffer, disable EOS support! */
devpriv->ai_flags &= (~TRIG_WAKE_EOS);
printk
- ("comedi%d: WAR: DMA0 buf too short, cann't support TRIG_WAKE_EOS (%d<%d)\n",
+ ("comedi%d: WAR: DMA0 buf too short, can't "
+ "support TRIG_WAKE_EOS (%d<%d)\n",
dev->minor, dmalen0,
devpriv->ai_n_realscanlen << 1);
} else {
- /* short first DMA buffer to one scan */
+ /* short first DMA buffer to one scan */
dmalen0 = devpriv->ai_n_realscanlen << 1;
DPRINTK
- ("21 dmalen0=%d ai_n_realscanlen=%d useeoshandle=%d\n",
- dmalen0, devpriv->ai_n_realscanlen,
- devpriv->useeoshandle);
+ ("21 dmalen0=%d ai_n_realscanlen=%d "
+ "useeoshandle=%d\n",
+ dmalen0, devpriv->ai_n_realscanlen,
+ devpriv->useeoshandle);
if (devpriv->useeoshandle)
dmalen0 += 2;
if (dmalen0 < 4) {
printk
- ("comedi%d: ERR: DMA0 buf len bug? (%d<4)\n",
- dev->minor, dmalen0);
+ ("comedi%d: ERR: DMA0 buf len bug? "
+ "(%d<4)\n",
+ dev->minor, dmalen0);
dmalen0 = 4;
}
}
}
if (devpriv->ai_flags & TRIG_WAKE_EOS) {
if (dmalen1 < (devpriv->ai_n_realscanlen << 1)) {
- /* uff, too short DMA buffer, disable EOS support! */
+ /* uff, too short DMA buffer, disable EOS support! */
devpriv->ai_flags &= (~TRIG_WAKE_EOS);
printk
- ("comedi%d: WAR: DMA1 buf too short, cann't support TRIG_WAKE_EOS (%d<%d)\n",
+ ("comedi%d: WAR: DMA1 buf too short, "
+ "can't support TRIG_WAKE_EOS (%d<%d)\n",
dev->minor, dmalen1,
devpriv->ai_n_realscanlen << 1);
} else {
- /* short second DMA buffer to one scan */
+ /* short second DMA buffer to one scan */
dmalen1 = devpriv->ai_n_realscanlen << 1;
DPRINTK
- ("22 dmalen1=%d ai_n_realscanlen=%d useeoshandle=%d\n",
+ ("22 dmalen1=%d ai_n_realscanlen=%d "
+ "useeoshandle=%d\n",
dmalen1, devpriv->ai_n_realscanlen,
devpriv->useeoshandle);
if (devpriv->useeoshandle)
dmalen1 -= 2;
if (dmalen1 < 4) {
printk
- ("comedi%d: ERR: DMA1 buf len bug? (%d<4)\n",
- dev->minor, dmalen1);
+ ("comedi%d: ERR: DMA1 buf len bug? "
+ "(%d<4)\n",
+ dev->minor, dmalen1);
dmalen1 = 4;
}
}
}
DPRINTK("3 dmalen0=%d dmalen1=%d \n", dmalen0, dmalen1);
- /* transfer without TRIG_WAKE_EOS */
+ /* transfer without TRIG_WAKE_EOS */
if (!(devpriv->ai_flags & TRIG_WAKE_EOS)) {
- /* if it's possible then allign DMA buffers to length of scan */
+ /* if it's possible then allign DMA buffers to length of scan */
i = dmalen0;
dmalen0 =
(dmalen0 / (devpriv->ai_n_realscanlen << 1)) *
(devpriv->ai_n_realscanlen << 1);
dmalen0 &= ~3L;
if (!dmalen0)
- dmalen0 = i; /* uff. very long scan? */
+ dmalen0 = i; /* uff. very long scan? */
i = dmalen1;
dmalen1 =
(dmalen1 / (devpriv->ai_n_realscanlen << 1)) *
(devpriv->ai_n_realscanlen << 1);
dmalen1 &= ~3L;
if (!dmalen1)
- dmalen1 = i; /* uff. very long scan? */
- /* if measure isn't neverending then test, if it whole fits into one or two DMA buffers */
+ dmalen1 = i; /* uff. very long scan? */
+ /*
+ * if measure isn't neverending then test, if it fits whole
+ * into one or two DMA buffers
+ */
if (!devpriv->ai_neverending) {
- /* fits whole measure into one DMA buffer? */
+ /* fits whole measure into one DMA buffer? */
if (dmalen0 >
((devpriv->ai_n_realscanlen << 1) *
devpriv->ai_scans)) {
@@ -1138,7 +1296,10 @@ static int Compute_and_setup_dma(struct comedi_device *dev)
DPRINTK("3.1 dmalen0=%d dmalen1=%d \n", dmalen0,
dmalen1);
dmalen0 &= ~3L;
- } else { /* fits whole measure into two DMA buffer? */
+ } else { /*
+ * fits whole measure into
+ * two DMA buffer?
+ */
if (dmalen1 >
((devpriv->ai_n_realscanlen << 1) *
devpriv->ai_scans - dmalen0))
@@ -1154,7 +1315,7 @@ static int Compute_and_setup_dma(struct comedi_device *dev)
DPRINTK("4 dmalen0=%d dmalen1=%d \n", dmalen0, dmalen1);
- /* these DMA buffer size we'll be used */
+ /* these DMA buffer size will be used */
devpriv->dma_actbuf = 0;
devpriv->dmabuf_use_size[0] = dmalen0;
devpriv->dmabuf_use_size[1] = dmalen1;
@@ -1176,10 +1337,11 @@ static int Compute_and_setup_dma(struct comedi_device *dev)
}
#endif
- outl(inl(devpriv->iobase_a + AMCC_OP_REG_MCSR) & (~EN_A2P_TRANSFERS), devpriv->iobase_a + AMCC_OP_REG_MCSR); /* stop DMA */
+ outl(inl(devpriv->iobase_a + AMCC_OP_REG_MCSR) & (~EN_A2P_TRANSFERS),
+ devpriv->iobase_a + AMCC_OP_REG_MCSR); /* stop DMA */
outl(devpriv->dmabuf_hw[0], devpriv->iobase_a + AMCC_OP_REG_MWAR);
outl(devpriv->dmabuf_use_size[0], devpriv->iobase_a + AMCC_OP_REG_MWTC);
- /* init DMA transfer */
+ /* init DMA transfer */
outl(0x00000000 | AINT_WRITE_COMPL,
devpriv->iobase_a + AMCC_OP_REG_INTCSR);
/* outl(0x02000000|AINT_WRITE_COMPL, devpriv->iobase_a+AMCC_OP_REG_INTCSR); */
@@ -1187,7 +1349,9 @@ static int Compute_and_setup_dma(struct comedi_device *dev)
outl(inl(devpriv->iobase_a +
AMCC_OP_REG_MCSR) | RESET_A2P_FLAGS | A2P_HI_PRIORITY |
EN_A2P_TRANSFERS, devpriv->iobase_a + AMCC_OP_REG_MCSR);
- outl(inl(devpriv->iobase_a + AMCC_OP_REG_INTCSR) | EN_A2P_TRANSFERS, devpriv->iobase_a + AMCC_OP_REG_INTCSR); /* allow bus mastering */
+ outl(inl(devpriv->iobase_a + AMCC_OP_REG_INTCSR) | EN_A2P_TRANSFERS,
+ devpriv->iobase_a + AMCC_OP_REG_INTCSR);
+ /* allow bus mastering */
DPRINTK("adl_pci9118 EDBG: END: Compute_and_setup_dma()\n");
return 0;
@@ -1220,17 +1384,21 @@ static int pci9118_ai_docmd_sampl(struct comedi_device *dev,
return -EIO;
};
- devpriv->int_ai_func = interrupt_pci9118_ai_onesample; /* transfer function */
+ devpriv->int_ai_func = interrupt_pci9118_ai_onesample;
+ /* transfer function */
if (devpriv->ai12_startstop)
- pci9118_exttrg_add(dev, EXTTRG_AI); /* activate EXT trigger */
+ pci9118_exttrg_add(dev, EXTTRG_AI);
+ /* activate EXT trigger */
if ((devpriv->ai_do == 1) || (devpriv->ai_do == 2))
devpriv->IntControlReg |= Int_Timer;
devpriv->AdControlReg |= AdControl_Int;
- outl(inl(devpriv->iobase_a + AMCC_OP_REG_INTCSR) | 0x1f00, devpriv->iobase_a + AMCC_OP_REG_INTCSR); /* allow INT in AMCC */
+ outl(inl(devpriv->iobase_a + AMCC_OP_REG_INTCSR) | 0x1f00,
+ devpriv->iobase_a + AMCC_OP_REG_INTCSR);
+ /* allow INT in AMCC */
if (!(devpriv->ai12_startstop & (START_AI_EXT | START_AI_INT))) {
outl(devpriv->IntControlReg, dev->iobase + PCI9118_INTCTRL);
@@ -1296,10 +1464,12 @@ static int pci9118_ai_docmd_dma(struct comedi_device *dev,
};
if (devpriv->ai12_startstop) {
- pci9118_exttrg_add(dev, EXTTRG_AI); /* activate EXT trigger */
+ pci9118_exttrg_add(dev, EXTTRG_AI);
+ /* activate EXT trigger */
}
- devpriv->int_ai_func = interrupt_pci9118_ai_dma; /* transfer function */
+ devpriv->int_ai_func = interrupt_pci9118_ai_dma;
+ /* transfer function */
outl(0x02000000 | AINT_WRITE_COMPL,
devpriv->iobase_a + AMCC_OP_REG_INTCSR);
@@ -1342,7 +1512,7 @@ static int pci9118_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_add_back = 0;
devpriv->ai_maskerr = 0x10e;
- /* prepare for start/stop conditions */
+ /* prepare for start/stop conditions */
if (cmd->start_src == TRIG_EXT)
devpriv->ai12_startstop |= START_AI_EXT;
if (cmd->stop_src == TRIG_EXT) {
@@ -1369,10 +1539,10 @@ static int pci9118_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_scans = 0;
}
- /* use sample&hold signal? */
+ /* use sample&hold signal? */
if (cmd->convert_src == TRIG_NOW) {
devpriv->usessh = 1;
- } /* yes */
+ } /* yes */
else {
devpriv->usessh = 0;
} /* no */
@@ -1381,7 +1551,10 @@ static int pci9118_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_neverending, devpriv->ai_scans, devpriv->usessh,
devpriv->ai12_startstop);
- /* use additional sample at end of every scan to satisty DMA 32 bit transfer? */
+ /*
+ * use additional sample at end of every scan
+ * to satisty DMA 32 bit transfer?
+ */
devpriv->ai_add_front = 0;
devpriv->ai_add_back = 0;
devpriv->useeoshandle = 0;
@@ -1393,27 +1566,44 @@ static int pci9118_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_add_back = 1;
}
if (cmd->convert_src == TRIG_TIMER) {
- devpriv->usedma = 0; /* use INT transfer if scanlist have only one channel */
+ devpriv->usedma = 0;
+ /*
+ * use INT transfer if scanlist
+ * have only one channel
+ */
}
}
if ((cmd->flags & TRIG_WAKE_EOS) &&
(devpriv->ai_n_scanlen & 1) &&
(devpriv->ai_n_scanlen > 1)) {
if (cmd->scan_begin_src == TRIG_FOLLOW) {
- /* vpriv->useeoshandle=1; // change DMA transfer block to fit EOS on every second call */
- devpriv->usedma = 0; /* XXX maybe can be corrected to use 16 bit DMA */
- } else { /* well, we must insert one sample to end of EOS to meet 32 bit transfer */
+ /*
+ * vpriv->useeoshandle=1; // change DMA transfer
+ * block to fit EOS on every second call
+ */
+ devpriv->usedma = 0;
+ /*
+ * XXX maybe can be corrected to use 16 bit DMA
+ */
+ } else { /*
+ * well, we must insert one sample
+ * to end of EOS to meet 32 bit transfer
+ */
devpriv->ai_add_back = 1;
}
}
- } else { /* interrupt transfer don't need any correction */
+ } else { /* interrupt transfer don't need any correction */
devpriv->usedma = 0;
}
- /* we need software S&H signal? It add two samples before every scan as minimum */
+ /*
+ * we need software S&H signal?
+ * It adds two samples before every scan as minimum
+ */
if (devpriv->usessh && devpriv->softsshdelay) {
devpriv->ai_add_front = 2;
- if ((devpriv->usedma == 1) && (devpriv->ai_add_back == 1)) { /* move it to front */
+ if ((devpriv->usedma == 1) && (devpriv->ai_add_back == 1)) {
+ /* move it to front */
devpriv->ai_add_front++;
devpriv->ai_add_back = 0;
}
@@ -1422,17 +1612,22 @@ static int pci9118_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
addchans = devpriv->softsshdelay / cmd->convert_arg;
if (devpriv->softsshdelay % cmd->convert_arg)
addchans++;
- if (addchans > (devpriv->ai_add_front - 1)) { /* uff, still short :-( */
+ if (addchans > (devpriv->ai_add_front - 1)) {
+ /* uff, still short */
devpriv->ai_add_front = addchans + 1;
if (devpriv->usedma == 1)
if ((devpriv->ai_add_front +
devpriv->ai_n_chan +
devpriv->ai_add_back) & 1)
- devpriv->ai_add_front++; /* round up to 32 bit */
+ devpriv->ai_add_front++;
+ /* round up to 32 bit */
}
}
- /* well, we now know what must be all added */
- devpriv->ai_n_realscanlen = /* what we must take from card in real to have ai_n_scanlen on output? */
+ /* well, we now know what must be all added */
+ devpriv->ai_n_realscanlen = /*
+ * what we must take from card in real
+ * to have ai_n_scanlen on output?
+ */
(devpriv->ai_add_front + devpriv->ai_n_chan +
devpriv->ai_add_back) * (devpriv->ai_n_scanlen /
devpriv->ai_n_chan);
@@ -1443,7 +1638,7 @@ static int pci9118_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_n_chan, devpriv->ai_add_back,
devpriv->ai_n_scanlen);
- /* check and setup channel list */
+ /* check and setup channel list */
if (!check_channel_list(dev, s, devpriv->ai_n_chan,
devpriv->ai_chanlist, devpriv->ai_add_front,
devpriv->ai_add_back))
@@ -1454,9 +1649,16 @@ static int pci9118_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->useeoshandle))
return -EINVAL;
- /* compute timers settings */
- /* simplest way, fr=4Mhz/(tim1*tim2), channel manipulation without timers effect */
- if (((cmd->scan_begin_src == TRIG_FOLLOW) || (cmd->scan_begin_src == TRIG_EXT) || (cmd->scan_begin_src == TRIG_INT)) && (cmd->convert_src == TRIG_TIMER)) { /* both timer is used for one time */
+ /* compute timers settings */
+ /*
+ * simplest way, fr=4Mhz/(tim1*tim2),
+ * channel manipulation without timers effect
+ */
+ if (((cmd->scan_begin_src == TRIG_FOLLOW) ||
+ (cmd->scan_begin_src == TRIG_EXT) ||
+ (cmd->scan_begin_src == TRIG_INT)) &&
+ (cmd->convert_src == TRIG_TIMER)) {
+ /* both timer is used for one time */
if (cmd->scan_begin_src == TRIG_EXT) {
devpriv->ai_do = 4;
} else {
@@ -1472,10 +1674,14 @@ static int pci9118_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_timer2 = cmd->convert_arg;
}
- if ((cmd->scan_begin_src == TRIG_TIMER) && ((cmd->convert_src == TRIG_TIMER) || (cmd->convert_src == TRIG_NOW))) { /* double timed action */
+ if ((cmd->scan_begin_src == TRIG_TIMER) &&
+ ((cmd->convert_src == TRIG_TIMER) ||
+ (cmd->convert_src == TRIG_NOW))) {
+ /* double timed action */
if (!devpriv->usedma) {
comedi_error(dev,
- "cmd->scan_begin_src=TRIG_TIMER works only with bus mastering!");
+ "cmd->scan_begin_src=TRIG_TIMER works "
+ "only with bus mastering!");
return -EIO;
}
@@ -1496,15 +1702,27 @@ static int pci9118_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_do = 3;
}
- start_pacer(dev, -1, 0, 0); /* stop pacer */
+ start_pacer(dev, -1, 0, 0); /* stop pacer */
- devpriv->AdControlReg = 0; /* bipolar, S.E., use 8254, stop 8354, internal trigger, soft trigger, disable DMA */
+ devpriv->AdControlReg = 0; /*
+ * bipolar, S.E., use 8254, stop 8354,
+ * internal trigger, soft trigger,
+ * disable DMA
+ */
outl(devpriv->AdControlReg, dev->iobase + PCI9118_ADCNTRL);
- devpriv->AdFunctionReg = AdFunction_PDTrg | AdFunction_PETrg; /* positive triggers, no S&H, no burst, burst stop, no post trigger, no about trigger, trigger stop */
+ devpriv->AdFunctionReg = AdFunction_PDTrg | AdFunction_PETrg;
+ /*
+ * positive triggers, no S&H, no burst,
+ * burst stop, no post trigger,
+ * no about trigger, trigger stop
+ */
outl(devpriv->AdFunctionReg, dev->iobase + PCI9118_ADFUNC);
udelay(1);
- outl(0, dev->iobase + PCI9118_DELFIFO); /* flush FIFO */
- inl(dev->iobase + PCI9118_ADSTAT); /* flush A/D and INT status register */
+ outl(0, dev->iobase + PCI9118_DELFIFO); /* flush FIFO */
+ inl(dev->iobase + PCI9118_ADSTAT); /*
+ * flush A/D and INT
+ * status register
+ */
inl(dev->iobase + PCI9118_INTSRC);
devpriv->ai_act_scan = 0;
@@ -1537,33 +1755,37 @@ static int check_channel_list(struct comedi_device *dev,
}
if ((frontadd + n_chan + backadd) > s->len_chanlist) {
printk
- ("comedi%d: range/channel list is too long for actual configuration (%d>%d)!",
+ ("comedi%d: range/channel list is too long for "
+ "actual configuration (%d>%d)!",
dev->minor, n_chan, s->len_chanlist - frontadd - backadd);
return 0;
}
if (CR_AREF(chanlist[0]) == AREF_DIFF)
- differencial = 1; /* all input must be diff */
+ differencial = 1; /* all input must be diff */
if (CR_RANGE(chanlist[0]) < PCI9118_BIPOLAR_RANGES)
- bipolar = 1; /* all input must be bipolar */
+ bipolar = 1; /* all input must be bipolar */
if (n_chan > 1)
- for (i = 1; i < n_chan; i++) { /* check S.E/diff */
+ for (i = 1; i < n_chan; i++) { /* check S.E/diff */
if ((CR_AREF(chanlist[i]) == AREF_DIFF) !=
(differencial)) {
comedi_error(dev,
- "Differencial and single ended inputs cann't be mixtured!");
+ "Differencial and single ended "
+ "inputs can't be mixtured!");
return 0;
}
if ((CR_RANGE(chanlist[i]) < PCI9118_BIPOLAR_RANGES) !=
(bipolar)) {
comedi_error(dev,
- "Bipolar and unipolar ranges cann't be mixtured!");
+ "Bipolar and unipolar ranges "
+ "can't be mixtured!");
return 0;
}
if ((!devpriv->usemux) & (differencial) &
(CR_CHAN(chanlist[i]) >= this_board->n_aichand)) {
comedi_error(dev,
- "If AREF_DIFF is used then is available only first 8 channels!");
+ "If AREF_DIFF is used then is "
+ "available only first 8 channels!");
return 0;
}
}
@@ -1583,7 +1805,8 @@ static int setup_channel_list(struct comedi_device *dev,
unsigned int scanquad, gain, ssh = 0x00;
DPRINTK
- ("adl_pci9118 EDBG: BGN: setup_channel_list(%d,.,%d,.,%d,%d,%d,%d)\n",
+ ("adl_pci9118 EDBG: BGN: setup_channel_list"
+ "(%d,.,%d,.,%d,%d,%d,%d)\n",
dev->minor, n_chan, rot, frontadd, backadd, usedma);
if (usedma == 1) {
@@ -1592,27 +1815,33 @@ static int setup_channel_list(struct comedi_device *dev,
}
if (CR_AREF(chanlist[0]) == AREF_DIFF)
- differencial = 1; /* all input must be diff */
+ differencial = 1; /* all input must be diff */
if (CR_RANGE(chanlist[0]) < PCI9118_BIPOLAR_RANGES)
- bipolar = 1; /* all input must be bipolar */
+ bipolar = 1; /* all input must be bipolar */
- /* All is ok, so we can setup channel/range list */
+ /* All is ok, so we can setup channel/range list */
if (!bipolar) {
- devpriv->AdControlReg |= AdControl_UniP; /* set unibipolar */
+ devpriv->AdControlReg |= AdControl_UniP;
+ /* set unibipolar */
} else {
- devpriv->AdControlReg &= ((~AdControl_UniP) & 0xff); /* enable bipolar */
+ devpriv->AdControlReg &= ((~AdControl_UniP) & 0xff);
+ /* enable bipolar */
}
if (differencial) {
- devpriv->AdControlReg |= AdControl_Diff; /* enable diff inputs */
+ devpriv->AdControlReg |= AdControl_Diff;
+ /* enable diff inputs */
} else {
- devpriv->AdControlReg &= ((~AdControl_Diff) & 0xff); /* set single ended inputs */
+ devpriv->AdControlReg &= ((~AdControl_Diff) & 0xff);
+ /* set single ended inputs */
}
- outl(devpriv->AdControlReg, dev->iobase + PCI9118_ADCNTRL); /* setup mode */
+ outl(devpriv->AdControlReg, dev->iobase + PCI9118_ADCNTRL);
+ /* setup mode */
- outl(2, dev->iobase + PCI9118_SCANMOD); /* gods know why this sequence! */
+ outl(2, dev->iobase + PCI9118_SCANMOD);
+ /* gods know why this sequence! */
outl(0, dev->iobase + PCI9118_SCANMOD);
outl(1, dev->iobase + PCI9118_SCANMOD);
@@ -1622,12 +1851,15 @@ static int setup_channel_list(struct comedi_device *dev,
devpriv->chanlist[i] = 0x55aa;
#endif
- if (frontadd) { /* insert channels for S&H */
+ if (frontadd) { /* insert channels for S&H */
ssh = devpriv->softsshsample;
DPRINTK("FA: %04x: ", ssh);
- for (i = 0; i < frontadd; i++) { /* store range list to card */
- scanquad = CR_CHAN(chanlist[0]); /* get channel number; */
- gain = CR_RANGE(chanlist[0]); /* get gain number */
+ for (i = 0; i < frontadd; i++) {
+ /* store range list to card */
+ scanquad = CR_CHAN(chanlist[0]);
+ /* get channel number; */
+ gain = CR_RANGE(chanlist[0]);
+ /* get gain number */
scanquad |= ((gain & 0x03) << 8);
outl(scanquad | ssh, dev->iobase + PCI9118_GAIN);
DPRINTK("%02x ", scanquad | ssh);
@@ -1637,23 +1869,24 @@ static int setup_channel_list(struct comedi_device *dev,
}
DPRINTK("SL: ", ssh);
- for (i = 0; i < n_chan; i++) { /* store range list to card */
- scanquad = CR_CHAN(chanlist[i]); /* get channel number; */
+ for (i = 0; i < n_chan; i++) { /* store range list to card */
+ scanquad = CR_CHAN(chanlist[i]); /* get channel number */
#ifdef PCI9118_PARANOIDCHECK
devpriv->chanlist[i ^ usedma] = (scanquad & 0xf) << rot;
#endif
- gain = CR_RANGE(chanlist[i]); /* get gain number */
+ gain = CR_RANGE(chanlist[i]); /* get gain number */
scanquad |= ((gain & 0x03) << 8);
outl(scanquad | ssh, dev->iobase + PCI9118_GAIN);
DPRINTK("%02x ", scanquad | ssh);
}
DPRINTK("\n ");
- if (backadd) { /* insert channels for fit onto 32bit DMA */
+ if (backadd) { /* insert channels for fit onto 32bit DMA */
DPRINTK("BA: %04x: ", ssh);
- for (i = 0; i < backadd; i++) { /* store range list to card */
- scanquad = CR_CHAN(chanlist[0]); /* get channel number; */
- gain = CR_RANGE(chanlist[0]); /* get gain number */
+ for (i = 0; i < backadd; i++) { /* store range list to card */
+ scanquad = CR_CHAN(chanlist[0]);
+ /* get channel number */
+ gain = CR_RANGE(chanlist[0]); /* get gain number */
scanquad |= ((gain & 0x03) << 8);
outl(scanquad | ssh, dev->iobase + PCI9118_GAIN);
DPRINTK("%02x ", scanquad | ssh);
@@ -1661,30 +1894,33 @@ static int setup_channel_list(struct comedi_device *dev,
DPRINTK("\n ");
}
#ifdef PCI9118_PARANOIDCHECK
- devpriv->chanlist[n_chan ^ usedma] = devpriv->chanlist[0 ^ usedma]; /* for 32bit oerations */
+ devpriv->chanlist[n_chan ^ usedma] = devpriv->chanlist[0 ^ usedma];
+ /* for 32bit operations */
if (useeos) {
- for (i = 1; i < n_chan; i++) { /* store range list to card */
+ for (i = 1; i < n_chan; i++) { /* store range list to card */
devpriv->chanlist[(n_chan + i) ^ usedma] =
(CR_CHAN(chanlist[i]) & 0xf) << rot;
}
- devpriv->chanlist[(2 * n_chan) ^ usedma] = devpriv->chanlist[0 ^ usedma]; /* for 32bit oerations */
+ devpriv->chanlist[(2 * n_chan) ^ usedma] =
+ devpriv->chanlist[0 ^ usedma];
+ /* for 32bit operations */
useeos = 2;
} else {
useeos = 1;
}
#ifdef PCI9118_EXTDEBUG
DPRINTK("CHL: ");
- for (i = 0; i <= (useeos * n_chan); i++) {
+ for (i = 0; i <= (useeos * n_chan); i++)
DPRINTK("%04x ", devpriv->chanlist[i]);
- }
+
DPRINTK("\n ");
#endif
#endif
- outl(0, dev->iobase + PCI9118_SCANMOD); /* close scan queue */
-/* udelay(100); important delay, or first sample will be cripled */
+ outl(0, dev->iobase + PCI9118_SCANMOD); /* close scan queue */
+ /* udelay(100); important delay, or first sample will be crippled */
DPRINTK("adl_pci9118 EDBG: END: setup_channel_list()\n");
- return 1; /* we can serve this with scan logic */
+ return 1; /* we can serve this with scan logic */
}
/*
@@ -1699,7 +1935,8 @@ static void pci9118_calc_divisors(char mode, struct comedi_device *dev,
char usessh, unsigned int chnsshfront)
{
DPRINTK
- ("adl_pci9118 EDBG: BGN: pci9118_calc_divisors(%d,%d,.,%u,%u,%u,%d,.,.,,%u,%u)\n",
+ ("adl_pci9118 EDBG: BGN: pci9118_calc_divisors"
+ "(%d,%d,.,%u,%u,%u,%d,.,.,,%u,%u)\n",
mode, dev->minor, *tim1, *tim2, flags, chans, usessh, chnsshfront);
switch (mode) {
case 1:
@@ -1716,17 +1953,18 @@ static void pci9118_calc_divisors(char mode, struct comedi_device *dev,
*tim2 = this_board->ai_ns_min;
DPRINTK("1 div1=%u div2=%u timer1=%u timer2=%u\n", *div1, *div2,
*tim1, *tim2);
- *div1 = *tim2 / devpriv->i8254_osc_base; /* convert timer (burst) */
+ *div1 = *tim2 / devpriv->i8254_osc_base;
+ /* convert timer (burst) */
DPRINTK("2 div1=%u div2=%u timer1=%u timer2=%u\n", *div1, *div2,
*tim1, *tim2);
if (*div1 < this_board->ai_pacer_min)
*div1 = this_board->ai_pacer_min;
DPRINTK("3 div1=%u div2=%u timer1=%u timer2=%u\n", *div1, *div2,
*tim1, *tim2);
- *div2 = *tim1 / devpriv->i8254_osc_base; /* scan timer */
+ *div2 = *tim1 / devpriv->i8254_osc_base; /* scan timer */
DPRINTK("4 div1=%u div2=%u timer1=%u timer2=%u\n", *div1, *div2,
*tim1, *tim2);
- *div2 = *div2 / *div1; /* major timer is c1*c2 */
+ *div2 = *div2 / *div1; /* major timer is c1*c2 */
DPRINTK("5 div1=%u div2=%u timer1=%u timer2=%u\n", *div1, *div2,
*tim1, *tim2);
if (*div2 < chans)
@@ -1734,9 +1972,10 @@ static void pci9118_calc_divisors(char mode, struct comedi_device *dev,
DPRINTK("6 div1=%u div2=%u timer1=%u timer2=%u\n", *div1, *div2,
*tim1, *tim2);
- *tim2 = *div1 * devpriv->i8254_osc_base; /* real convert timer */
+ *tim2 = *div1 * devpriv->i8254_osc_base;
+ /* real convert timer */
- if (usessh & (chnsshfront == 0)) /* use BSSH signal */
+ if (usessh & (chnsshfront == 0)) /* use BSSH signal */
if (*div2 < (chans + 2))
*div2 = chans + 2;
@@ -1776,11 +2015,13 @@ static void start_pacer(struct comedi_device *dev, int mode,
static int pci9118_exttrg_add(struct comedi_device *dev, unsigned char source)
{
if (source > 3)
- return -1; /* incorrect source */
+ return -1; /* incorrect source */
devpriv->exttrg_users |= (1 << source);
devpriv->IntControlReg |= Int_DTrg;
outl(devpriv->IntControlReg, dev->iobase + PCI9118_INTCTRL);
- outl(inl(devpriv->iobase_a + AMCC_OP_REG_INTCSR) | 0x1f00, devpriv->iobase_a + AMCC_OP_REG_INTCSR); /* allow INT in AMCC */
+ outl(inl(devpriv->iobase_a + AMCC_OP_REG_INTCSR) | 0x1f00,
+ devpriv->iobase_a + AMCC_OP_REG_INTCSR);
+ /* allow INT in AMCC */
return 0;
}
@@ -1790,12 +2031,15 @@ static int pci9118_exttrg_add(struct comedi_device *dev, unsigned char source)
static int pci9118_exttrg_del(struct comedi_device *dev, unsigned char source)
{
if (source > 3)
- return -1; /* incorrect source */
+ return -1; /* incorrect source */
devpriv->exttrg_users &= ~(1 << source);
- if (!devpriv->exttrg_users) { /* shutdown ext trg intterrupts */
+ if (!devpriv->exttrg_users) { /* shutdown ext trg intterrupts */
devpriv->IntControlReg &= ~Int_DTrg;
- if (!devpriv->IntControlReg) /* all IRQ disabled */
- outl(inl(devpriv->iobase_a + AMCC_OP_REG_INTCSR) & (~0x00001f00), devpriv->iobase_a + AMCC_OP_REG_INTCSR); /* disable int in AMCC */
+ if (!devpriv->IntControlReg) /* all IRQ disabled */
+ outl(inl(devpriv->iobase_a + AMCC_OP_REG_INTCSR) &
+ (~0x00001f00),
+ devpriv->iobase_a + AMCC_OP_REG_INTCSR);
+ /* disable int in AMCC */
outl(devpriv->IntControlReg, dev->iobase + PCI9118_INTCTRL);
}
return 0;
@@ -1808,17 +2052,29 @@ static int pci9118_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
if (devpriv->usedma)
- outl(inl(devpriv->iobase_a + AMCC_OP_REG_MCSR) & (~EN_A2P_TRANSFERS), devpriv->iobase_a + AMCC_OP_REG_MCSR); /* stop DMA */
+ outl(inl(devpriv->iobase_a + AMCC_OP_REG_MCSR) &
+ (~EN_A2P_TRANSFERS),
+ devpriv->iobase_a + AMCC_OP_REG_MCSR); /* stop DMA */
pci9118_exttrg_del(dev, EXTTRG_AI);
- start_pacer(dev, 0, 0, 0); /* stop 8254 counters */
+ start_pacer(dev, 0, 0, 0); /* stop 8254 counters */
devpriv->AdFunctionReg = AdFunction_PDTrg | AdFunction_PETrg;
- outl(devpriv->AdFunctionReg, dev->iobase + PCI9118_ADFUNC); /* positive triggers, no S&H, no burst, burst stop, no post trigger, no about trigger, trigger stop */
+ outl(devpriv->AdFunctionReg, dev->iobase + PCI9118_ADFUNC);
+ /*
+ * positive triggers, no S&H, no burst,
+ * burst stop, no post trigger,
+ * no about trigger, trigger stop
+ */
devpriv->AdControlReg = 0x00;
- outl(devpriv->AdControlReg, dev->iobase + PCI9118_ADCNTRL); /* bipolar, S.E., use 8254, stop 8354, internal trigger, soft trigger, disable INT and DMA */
+ outl(devpriv->AdControlReg, dev->iobase + PCI9118_ADCNTRL);
+ /*
+ * bipolar, S.E., use 8254, stop 8354,
+ * internal trigger, soft trigger,
+ * disable INT and DMA
+ */
outl(0, dev->iobase + PCI9118_BURST);
outl(1, dev->iobase + PCI9118_SCANMOD);
- outl(2, dev->iobase + PCI9118_SCANMOD); /* reset scan queue */
- outl(0, dev->iobase + PCI9118_DELFIFO); /* flush FIFO */
+ outl(2, dev->iobase + PCI9118_SCANMOD); /* reset scan queue */
+ outl(0, dev->iobase + PCI9118_DELFIFO); /* flush FIFO */
devpriv->ai_do = 0;
devpriv->usedma = 0;
@@ -1832,7 +2088,9 @@ static int pci9118_ai_cancel(struct comedi_device *dev,
devpriv->dma_actbuf = 0;
if (!devpriv->IntControlReg)
- outl(inl(devpriv->iobase_a + AMCC_OP_REG_INTCSR) | 0x1f00, devpriv->iobase_a + AMCC_OP_REG_INTCSR); /* allow INT in AMCC */
+ outl(inl(devpriv->iobase_a + AMCC_OP_REG_INTCSR) | 0x1f00,
+ devpriv->iobase_a + AMCC_OP_REG_INTCSR);
+ /* allow INT in AMCC */
return 0;
}
@@ -1845,31 +2103,52 @@ static int pci9118_reset(struct comedi_device *dev)
devpriv->IntControlReg = 0;
devpriv->exttrg_users = 0;
inl(dev->iobase + PCI9118_INTCTRL);
- outl(devpriv->IntControlReg, dev->iobase + PCI9118_INTCTRL); /* disable interrupts source */
+ outl(devpriv->IntControlReg, dev->iobase + PCI9118_INTCTRL);
+ /* disable interrupts source */
outl(0x30, dev->iobase + PCI9118_CNTCTRL);
/* outl(0xb4, dev->iobase + PCI9118_CNTCTRL); */
- start_pacer(dev, 0, 0, 0); /* stop 8254 counters */
+ start_pacer(dev, 0, 0, 0); /* stop 8254 counters */
devpriv->AdControlReg = 0;
- outl(devpriv->AdControlReg, dev->iobase + PCI9118_ADCNTRL); /* bipolar, S.E., use 8254, stop 8354, internal trigger, soft trigger, disable INT and DMA */
+ outl(devpriv->AdControlReg, dev->iobase + PCI9118_ADCNTRL);
+ /*
+ * bipolar, S.E., use 8254,
+ * stop 8354, internal trigger,
+ * soft trigger,
+ * disable INT and DMA
+ */
outl(0, dev->iobase + PCI9118_BURST);
outl(1, dev->iobase + PCI9118_SCANMOD);
- outl(2, dev->iobase + PCI9118_SCANMOD); /* reset scan queue */
+ outl(2, dev->iobase + PCI9118_SCANMOD); /* reset scan queue */
devpriv->AdFunctionReg = AdFunction_PDTrg | AdFunction_PETrg;
- outl(devpriv->AdFunctionReg, dev->iobase + PCI9118_ADFUNC); /* positive triggers, no S&H, no burst, burst stop, no post trigger, no about trigger, trigger stop */
+ outl(devpriv->AdFunctionReg, dev->iobase + PCI9118_ADFUNC);
+ /*
+ * positive triggers, no S&H,
+ * no burst, burst stop,
+ * no post trigger,
+ * no about trigger,
+ * trigger stop
+ */
devpriv->ao_data[0] = 2047;
devpriv->ao_data[1] = 2047;
- outl(devpriv->ao_data[0], dev->iobase + PCI9118_DA1); /* reset A/D outs to 0V */
+ outl(devpriv->ao_data[0], dev->iobase + PCI9118_DA1);
+ /* reset A/D outs to 0V */
outl(devpriv->ao_data[1], dev->iobase + PCI9118_DA2);
- outl(0, dev->iobase + PCI9118_DO); /* reset digi outs to L */
+ outl(0, dev->iobase + PCI9118_DO); /* reset digi outs to L */
udelay(10);
inl(dev->iobase + PCI9118_AD_DATA);
- outl(0, dev->iobase + PCI9118_DELFIFO); /* flush FIFO */
- outl(0, dev->iobase + PCI9118_INTSRC); /* remove INT requests */
- inl(dev->iobase + PCI9118_ADSTAT); /* flush A/D status register */
- inl(dev->iobase + PCI9118_INTSRC); /* flush INT requests */
+ outl(0, dev->iobase + PCI9118_DELFIFO); /* flush FIFO */
+ outl(0, dev->iobase + PCI9118_INTSRC); /* remove INT requests */
+ inl(dev->iobase + PCI9118_ADSTAT); /* flush A/D status register */
+ inl(dev->iobase + PCI9118_INTSRC); /* flush INT requests */
devpriv->AdControlReg = 0;
- outl(devpriv->AdControlReg, dev->iobase + PCI9118_ADCNTRL); /* bipolar, S.E., use 8254, stop 8354, internal trigger, soft trigger, disable INT and DMA */
+ outl(devpriv->AdControlReg, dev->iobase + PCI9118_ADCNTRL);
+ /*
+ * bipolar, S.E., use 8254,
+ * stop 8354, internal trigger,
+ * soft trigger,
+ * disable INT and DMA
+ */
devpriv->cnt0_users = 0;
devpriv->exttrg_users = 0;
@@ -1899,7 +2178,7 @@ static int pci9118_attach(struct comedi_device *dev,
opt_bus = it->options[0];
opt_slot = it->options[1];
if (it->options[3] & 1) {
- master = 0; /* user don't want use bus master */
+ master = 0; /* user don't want use bus master */
} else {
master = 1;
}
@@ -1937,17 +2216,17 @@ static int pci9118_attach(struct comedi_device *dev,
if (!pcidev) {
if (opt_bus || opt_slot) {
- printk(" - Card at b:s %d:%d %s\n",
+ printk(KERN_ERR " - Card at b:s %d:%d %s\n",
opt_bus, opt_slot, errstr);
} else {
- printk(" - Card %s\n", errstr);
+ printk(KERN_ERR " - Card %s\n", errstr);
}
return -EIO;
}
- if (master) {
+ if (master)
pci_set_master(pcidev);
- }
+
pci_bus = pcidev->bus->number;
pci_slot = PCI_SLOT(pcidev->devfn);
@@ -1956,8 +2235,8 @@ static int pci9118_attach(struct comedi_device *dev,
iobase_a = pci_resource_start(pcidev, 0);
iobase_9 = pci_resource_start(pcidev, 2);
- printk(", b:s:f=%d:%d:%d, io=0x%4lx, 0x%4lx", pci_bus, pci_slot,
- pci_func, iobase_9, iobase_a);
+ printk(KERN_ERR ", b:s:f=%d:%d:%d, io=0x%4lx, 0x%4lx", pci_bus,
+ pci_slot, pci_func, iobase_9, iobase_a);
dev->iobase = iobase_9;
dev->board_name = this_board->name;
@@ -1968,7 +2247,7 @@ static int pci9118_attach(struct comedi_device *dev,
pci9118_reset(dev);
if (it->options[3] & 2)
- irq = 0; /* user don't want use IRQ */
+ irq = 0; /* user don't want use IRQ */
if (irq > 0) {
if (request_irq(irq, interrupt_pci9118, IRQF_SHARED,
"ADLink PCI-9118", dev)) {
@@ -1984,7 +2263,7 @@ static int pci9118_attach(struct comedi_device *dev,
dev->irq = irq;
- if (master) { /* alloc DMA buffers */
+ if (master) { /* alloc DMA buffers */
devpriv->dma_doublebuf = 0;
for (i = 0; i < 2; i++) {
for (pages = 4; pages >= 0; pages--) {
@@ -2024,16 +2303,18 @@ static int pci9118_attach(struct comedi_device *dev,
if (it->options[2] > 0) {
devpriv->usemux = it->options[2];
if (devpriv->usemux > 256)
- devpriv->usemux = 256; /* max 256 channels! */
+ devpriv->usemux = 256; /* max 256 channels! */
if (it->options[4] > 0)
if (devpriv->usemux > 128) {
- devpriv->usemux = 128; /* max 128 channels with softare S&H! */
+ devpriv->usemux = 128;
+ /* max 128 channels with softare S&H! */
}
printk(", ext. mux %d channels", devpriv->usemux);
}
devpriv->softsshdelay = it->options[4];
- if (devpriv->softsshdelay < 0) { /* select sample&hold signal polarity */
+ if (devpriv->softsshdelay < 0) {
+ /* select sample&hold signal polarity */
devpriv->softsshdelay = -devpriv->softsshdelay;
devpriv->softsshsample = 0x80;
devpriv->softsshhold = 0x00;
@@ -2045,7 +2326,8 @@ static int pci9118_attach(struct comedi_device *dev,
printk(".\n");
pci_read_config_word(devpriv->pcidev, PCI_COMMAND, &u16w);
- pci_write_config_word(devpriv->pcidev, PCI_COMMAND, u16w | 64); /* Enable parity check for parity error */
+ pci_write_config_word(devpriv->pcidev, PCI_COMMAND, u16w | 64);
+ /* Enable parity check for parity error */
ret = alloc_subdevices(dev, 4);
if (ret < 0)
@@ -2055,11 +2337,11 @@ static int pci9118_attach(struct comedi_device *dev,
dev->read_subdev = s;
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_GROUND | SDF_DIFF;
- if (devpriv->usemux) {
+ if (devpriv->usemux)
s->n_chan = devpriv->usemux;
- } else {
+ else
s->n_chan = this_board->n_aichan;
- }
+
s->maxdata = this_board->ai_maxdata;
s->len_chanlist = this_board->n_aichanlist;
s->range_table = this_board->rangelist_ai;
@@ -2103,9 +2385,10 @@ static int pci9118_attach(struct comedi_device *dev,
s->insn_bits = pci9118_insn_bits_do;
devpriv->valid = 1;
- devpriv->i8254_osc_base = 250; /* 250ns=4MHz */
- devpriv->ai_maskharderr = 0x10a; /* default measure crash condition */
- if (it->options[5]) /* disable some requested */
+ devpriv->i8254_osc_base = 250; /* 250ns=4MHz */
+ devpriv->ai_maskharderr = 0x10a;
+ /* default measure crash condition */
+ if (it->options[5]) /* disable some requested */
devpriv->ai_maskharderr &= ~it->options[5];
switch (this_board->ai_maxdata) {
@@ -2130,9 +2413,9 @@ static int pci9118_detach(struct comedi_device *dev)
if (dev->irq)
free_irq(dev->irq, dev);
if (devpriv->pcidev) {
- if (dev->iobase) {
+ if (dev->iobase)
comedi_pci_disable(devpriv->pcidev);
- }
+
pci_dev_put(devpriv->pcidev);
}
if (devpriv->dmabuf_virt[0])
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index 394d2ea19c2..67c4f11a36a 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -63,6 +63,8 @@ Configuration options:
#define DPRINTK(fmt, args...)
#endif
+#define PCI_VENDOR_ID_ADVANTECH 0x13fe
+
/* hardware types of the cards */
#define TYPE_PCI171X 0
#define TYPE_PCI1713 2
@@ -657,9 +659,9 @@ static void interrupt_pci1710_every_sample(void *d)
#endif
++s->async->cur_chan;
- if (s->async->cur_chan >= devpriv->ai_n_chan) {
+ if (s->async->cur_chan >= devpriv->ai_n_chan)
s->async->cur_chan = 0;
- }
+
if (s->async->cur_chan == 0) { /* one scan done */
devpriv->ai_act_scan++;
@@ -863,12 +865,12 @@ static int pci171x_ai_docmd_and_mode(int mode, struct comedi_device *dev,
devpriv->ai_eos = 0;
}
- if ((devpriv->ai_scans == 0) || (devpriv->ai_scans == -1)) {
+ if ((devpriv->ai_scans == 0) || (devpriv->ai_scans == -1))
devpriv->neverending_ai = 1;
- } /* well, user want neverending */
- else {
+ /* well, user want neverending */
+ else
devpriv->neverending_ai = 0;
- }
+
switch (mode) {
case 1:
case 2:
@@ -935,7 +937,8 @@ static int pci171x_ai_cmdtest(struct comedi_device *dev,
struct comedi_cmd *cmd)
{
int err = 0;
- int tmp, divisor1 = 0, divisor2 = 0;
+ int tmp;
+ unsigned int divisor1 = 0, divisor2 = 0;
DPRINTK("adv_pci1710 EDBG: BGN: pci171x_ai_cmdtest(...)\n");
#ifdef PCI171X_EXTDEBUG
@@ -1109,11 +1112,11 @@ static int pci171x_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_timer1 = 0;
devpriv->ai_timer2 = 0;
- if (cmd->stop_src == TRIG_COUNT) {
+ if (cmd->stop_src == TRIG_COUNT)
devpriv->ai_scans = cmd->stop_arg;
- } else {
+ else
devpriv->ai_scans = 0;
- }
+
if (cmd->scan_begin_src == TRIG_FOLLOW) { /* mode 1, 2, 3 */
if (cmd->convert_src == TRIG_TIMER) { /* mode 1 and 2 */
@@ -1593,9 +1596,9 @@ static int pci1710_detach(struct comedi_device *dev)
if (dev->irq)
free_irq(dev->irq, dev);
if (devpriv->pcidev) {
- if (dev->iobase) {
+ if (dev->iobase)
comedi_pci_disable(devpriv->pcidev);
- }
+
pci_dev_put(devpriv->pcidev);
}
}
diff --git a/drivers/staging/comedi/drivers/adv_pci1723.c b/drivers/staging/comedi/drivers/adv_pci1723.c
index 6b0b7eda3be..9fe8fcc7f1d 100644
--- a/drivers/staging/comedi/drivers/adv_pci1723.c
+++ b/drivers/staging/comedi/drivers/adv_pci1723.c
@@ -52,7 +52,7 @@ TODO:
#include "comedi_pci.h"
-#define ADVANTECH_VENDOR 0x13fe /* Advantech PCI vendor ID */
+#define PCI_VENDOR_ID_ADVANTECH 0x13fe /* Advantech PCI vendor ID */
/* hardware types of the cards */
#define TYPE_PCI1723 0
@@ -60,35 +60,57 @@ TODO:
#define IORANGE_1723 0x2A
/* all the registers for the pci1723 board */
-#define PCI1723_DA(N) ((N)<<1) /* W: D/A register N (0 to 7) */
-
-#define PCI1723_SYN_SET 0x12 /*synchronized set register */
-#define PCI1723_ALL_CHNNELE_SYN_STROBE 0x12 /*synchronized status register */
-
-#define PCI1723_RANGE_CALIBRATION_MODE 0x14 /* range and calibration mode */
-#define PCI1723_RANGE_CALIBRATION_STATUS 0x14 /* range and calibration status */
-
-#define PCI1723_CONTROL_CMD_CALIBRATION_FUN 0x16 /* SADC control command for calibration function */
-#define PCI1723_STATUS_CMD_CALIBRATION_FUN 0x16 /* SADC control status for calibration function */
-
-#define PCI1723_CALIBRATION_PARA_STROBE 0x18 /* Calibration parameter strobe */
+#define PCI1723_DA(N) ((N)<<1) /* W: D/A register N (0 to 7) */
+
+#define PCI1723_SYN_SET 0x12 /* synchronized set register */
+#define PCI1723_ALL_CHNNELE_SYN_STROBE 0x12
+ /* synchronized status register */
+
+#define PCI1723_RANGE_CALIBRATION_MODE 0x14
+ /* range and calibration mode */
+#define PCI1723_RANGE_CALIBRATION_STATUS 0x14
+ /* range and calibration status */
+
+#define PCI1723_CONTROL_CMD_CALIBRATION_FUN 0x16
+ /*
+ * SADC control command for
+ * calibration function
+ */
+#define PCI1723_STATUS_CMD_CALIBRATION_FUN 0x16
+ /*
+ * SADC control status for
+ * calibration function
+ */
+
+#define PCI1723_CALIBRATION_PARA_STROBE 0x18
+ /* Calibration parameter strobe */
#define PCI1723_DIGITAL_IO_PORT_SET 0x1A /* Digital I/O port setting */
#define PCI1723_DIGITAL_IO_PORT_MODE 0x1A /* Digital I/O port mode */
-#define PCI1723_WRITE_DIGITAL_OUTPUT_CMD 0x1C /* Write digital output command */
+#define PCI1723_WRITE_DIGITAL_OUTPUT_CMD 0x1C
+ /* Write digital output command */
#define PCI1723_READ_DIGITAL_INPUT_DATA 0x1C /* Read digital input data */
-#define PCI1723_WRITE_CAL_CMD 0x1E /* Write calibration command */
-#define PCI1723_READ_CAL_STATUS 0x1E /* Read calibration status */
+#define PCI1723_WRITE_CAL_CMD 0x1E /* Write calibration command */
+#define PCI1723_READ_CAL_STATUS 0x1E /* Read calibration status */
-#define PCI1723_SYN_STROBE 0x20 /* Synchronized strobe */
+#define PCI1723_SYN_STROBE 0x20 /* Synchronized strobe */
-#define PCI1723_RESET_ALL_CHN_STROBE 0x22 /* Reset all D/A channels strobe */
+#define PCI1723_RESET_ALL_CHN_STROBE 0x22
+ /* Reset all D/A channels strobe */
-#define PCI1723_RESET_CAL_CONTROL_STROBE 0x24 /* Reset the calibration controller strobe */
+#define PCI1723_RESET_CAL_CONTROL_STROBE 0x24
+ /*
+ * Reset the calibration
+ * controller strobe
+ */
-#define PCI1723_CHANGE_CHA_OUTPUT_TYPE_STROBE 0x26 /* Change D/A channels output type strobe */
+#define PCI1723_CHANGE_CHA_OUTPUT_TYPE_STROBE 0x26
+ /*
+ * Change D/A channels output
+ * type strobe
+ */
#define PCI1723_SELECT_CALIBRATION 0x28 /* Select the calibration Ref_V */
@@ -104,20 +126,20 @@ static const struct comedi_lrange range_pci1723 = { 1, {
*/
struct pci1723_board {
const char *name;
- int vendor_id; /* PCI vendor a device ID of card */
+ int vendor_id; /* PCI vendor a device ID of card */
int device_id;
int iorange;
char cardtype;
- int n_aochan; /* num of D/A chans */
- int n_diochan; /* num of DIO chans */
- int ao_maxdata; /* resolution of D/A */
- const struct comedi_lrange *rangelist_ao; /* rangelist for D/A */
+ int n_aochan; /* num of D/A chans */
+ int n_diochan; /* num of DIO chans */
+ int ao_maxdata; /* resolution of D/A */
+ const struct comedi_lrange *rangelist_ao; /* rangelist for D/A */
};
static const struct pci1723_board boardtypes[] = {
{
.name = "pci1723",
- .vendor_id = ADVANTECH_VENDOR,
+ .vendor_id = PCI_VENDOR_ID_ADVANTECH,
.device_id = 0x1723,
.iorange = IORANGE_1723,
.cardtype = TYPE_PCI1723,
@@ -128,8 +150,10 @@ static const struct pci1723_board boardtypes[] = {
},
};
-/* This is used by modprobe to translate PCI IDs to drivers. Should
- * only be used for PCI and ISA-PnP devices */
+/*
+ * This is used by modprobe to translate PCI IDs to drivers.
+ * Should only be used for PCI and ISA-PnP devices
+ */
static DEFINE_PCI_DEVICE_TABLE(pci1723_pci_table) = {
{
PCI_VENDOR_ID_ADVANTECH, 0x1723, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
@@ -157,47 +181,47 @@ static struct comedi_driver driver_pci1723 = {
.detach = pci1723_detach,
};
-/* this structure is for data unique to this hardware driver. */
+/* This structure is for data unique to this hardware driver. */
struct pci1723_private {
int valid; /* card is usable; */
struct pci_dev *pcidev;
- unsigned char da_range[8]; /* D/A output range for each channel */
+ unsigned char da_range[8]; /* D/A output range for each channel */
- short ao_data[8]; /* data output buffer */
+ short ao_data[8]; /* data output buffer */
};
-/*the following macro to make it easy to
-* access the private structure.
-*/
+/* The following macro to make it easy to access the private structure. */
#define devpriv ((struct pci1723_private *)dev->private)
#define this_board boardtypes
/*
- * the pci1723 card reset;
+ * The pci1723 card reset;
*/
static int pci1723_reset(struct comedi_device *dev)
{
int i;
DPRINTK("adv_pci1723 EDBG: BGN: pci1723_reset(...)\n");
- outw(0x01, dev->iobase + PCI1723_SYN_SET); /* set synchronous output mode */
+ outw(0x01, dev->iobase + PCI1723_SYN_SET);
+ /* set synchronous output mode */
for (i = 0; i < 8; i++) {
- /* set all outputs to 0V */
+ /* set all outputs to 0V */
devpriv->ao_data[i] = 0x8000;
outw(devpriv->ao_data[i], dev->iobase + PCI1723_DA(i));
- /* set all ranges to +/- 10V */
+ /* set all ranges to +/- 10V */
devpriv->da_range[i] = 0;
outw(((devpriv->da_range[i] << 4) | i),
PCI1723_RANGE_CALIBRATION_MODE);
}
- outw(0, dev->iobase + PCI1723_CHANGE_CHA_OUTPUT_TYPE_STROBE); /* update ranges */
- outw(0, dev->iobase + PCI1723_SYN_STROBE); /* update outputs */
+ outw(0, dev->iobase + PCI1723_CHANGE_CHA_OUTPUT_TYPE_STROBE);
+ /* update ranges */
+ outw(0, dev->iobase + PCI1723_SYN_STROBE); /* update outputs */
- /* set asynchronous output mode */
+ /* set asynchronous output mode */
outw(0, dev->iobase + PCI1723_SYN_SET);
DPRINTK("adv_pci1723 EDBG: END: pci1723_reset(...)\n");
@@ -251,11 +275,11 @@ static int pci1723_dio_insn_config(struct comedi_device *dev,
unsigned short dio_mode;
mask = 1 << CR_CHAN(insn->chanspec);
- if (mask & 0x00FF) {
+ if (mask & 0x00FF)
bits = 0x00FF;
- } else {
+ else
bits = 0xFF00;
- }
+
switch (data[0]) {
case INSN_CONFIG_DIO_INPUT:
s->io_bits &= ~bits;
@@ -270,12 +294,12 @@ static int pci1723_dio_insn_config(struct comedi_device *dev,
return -EINVAL;
}
- /* update hardware DIO mode */
- dio_mode = 0x0000; /* low byte output, high byte output */
+ /* update hardware DIO mode */
+ dio_mode = 0x0000; /* low byte output, high byte output */
if ((s->io_bits & 0x00FF) == 0)
- dio_mode |= 0x0001; /* low byte input */
+ dio_mode |= 0x0001; /* low byte input */
if ((s->io_bits & 0xFF00) == 0)
- dio_mode |= 0x0002; /* high byte input */
+ dio_mode |= 0x0002; /* high byte input */
outw(dio_mode, dev->iobase + PCI1723_DIGITAL_IO_PORT_SET);
return 1;
}
@@ -311,7 +335,8 @@ static int pci1723_attach(struct comedi_device *dev,
int opt_bus, opt_slot;
const char *errstr;
- printk("comedi%d: adv_pci1723: board=%s", dev->minor, this_board->name);
+ printk(KERN_ERR "comedi%d: adv_pci1723: board=%s",
+ dev->minor, this_board->name);
opt_bus = it->options[0];
opt_slot = it->options[1];
@@ -349,10 +374,10 @@ static int pci1723_attach(struct comedi_device *dev,
if (!pcidev) {
if (opt_bus || opt_slot) {
- printk(" - Card at b:s %d:%d %s\n",
- opt_bus, opt_slot, errstr);
+ printk(KERN_ERR " - Card at b:s %d:%d %s\n",
+ opt_bus, opt_slot, errstr);
} else {
- printk(" - Card %s\n", errstr);
+ printk(KERN_ERR " - Card %s\n", errstr);
}
return -EIO;
}
@@ -362,8 +387,8 @@ static int pci1723_attach(struct comedi_device *dev,
pci_func = PCI_FUNC(pcidev->devfn);
iobase = pci_resource_start(pcidev, 2);
- printk(", b:s:f=%d:%d:%d, io=0x%4x", pci_bus, pci_slot, pci_func,
- iobase);
+ printk(KERN_ERR ", b:s:f=%d:%d:%d, io=0x%4x",
+ pci_bus, pci_slot, pci_func, iobase);
dev->iobase = iobase;
@@ -398,22 +423,23 @@ static int pci1723_attach(struct comedi_device *dev,
s->insn_write = pci1723_ao_write_winsn;
s->insn_read = pci1723_insn_read_ao;
- /* read DIO config */
- switch (inw(dev->iobase + PCI1723_DIGITAL_IO_PORT_MODE) & 0x03) {
- case 0x00: /* low byte output, high byte output */
+ /* read DIO config */
+ switch (inw(dev->iobase + PCI1723_DIGITAL_IO_PORT_MODE)
+ & 0x03) {
+ case 0x00: /* low byte output, high byte output */
s->io_bits = 0xFFFF;
break;
- case 0x01: /* low byte input, high byte output */
+ case 0x01: /* low byte input, high byte output */
s->io_bits = 0xFF00;
break;
- case 0x02: /* low byte output, high byte input */
+ case 0x02: /* low byte output, high byte input */
s->io_bits = 0x00FF;
break;
- case 0x03: /* low byte input, high byte input */
+ case 0x03: /* low byte input, high byte input */
s->io_bits = 0x0000;
break;
}
- /* read DIO port state */
+ /* read DIO port state */
s->state = inw(dev->iobase + PCI1723_READ_DIGITAL_INPUT_DATA);
subdev++;
@@ -450,16 +476,15 @@ static int pci1723_attach(struct comedi_device *dev,
*/
static int pci1723_detach(struct comedi_device *dev)
{
- printk("comedi%d: pci1723: remove\n", dev->minor);
+ printk(KERN_ERR "comedi%d: pci1723: remove\n", dev->minor);
if (dev->private) {
if (devpriv->valid)
pci1723_reset(dev);
if (devpriv->pcidev) {
- if (dev->iobase) {
+ if (dev->iobase)
comedi_pci_disable(devpriv->pcidev);
- }
pci_dev_put(devpriv->pcidev);
}
}
diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c
index 61d35fe6435..40eeecf5347 100644
--- a/drivers/staging/comedi/drivers/adv_pci_dio.c
+++ b/drivers/staging/comedi/drivers/adv_pci_dio.c
@@ -45,6 +45,8 @@ Configuration options:
#define DPRINTK(fmt, args...)
#endif
+#define PCI_VENDOR_ID_ADVANTECH 0x13fe
+
/* hardware types of the cards */
enum hw_cards_id {
TYPE_PCI1730, TYPE_PCI1733, TYPE_PCI1734, TYPE_PCI1736,
@@ -367,9 +369,9 @@ static int pci_dio_insn_bits_di_b(struct comedi_device *dev,
int i;
data[1] = 0;
- for (i = 0; i < d->regs; i++) {
+ for (i = 0; i < d->regs; i++)
data[1] |= inb(dev->iobase + d->addr + i) << (8 * i);
- }
+
return 2;
}
@@ -882,9 +884,9 @@ static int CheckAndAllocCard(struct comedi_device *dev,
struct pci_dio_private *pr, *prev;
for (pr = pci_priv, prev = NULL; pr != NULL; prev = pr, pr = pr->next) {
- if (pr->pcidev == pcidev) {
+ if (pr->pcidev == pcidev)
return 0; /* this card is used, look for another */
- }
+
}
if (prev) {
@@ -1040,22 +1042,22 @@ static int pci_dio_detach(struct comedi_device *dev)
int subdev;
if (dev->private) {
- if (devpriv->valid) {
+ if (devpriv->valid)
pci_dio_reset(dev);
- }
+
/* This shows the silliness of using this kind of
* scheme for numbering subdevices. Don't do it. --ds */
subdev = 0;
for (i = 0; i < MAX_DI_SUBDEVS; i++) {
- if (this_board->sdi[i].chans) {
+ if (this_board->sdi[i].chans)
subdev++;
- }
+
}
for (i = 0; i < MAX_DO_SUBDEVS; i++) {
- if (this_board->sdo[i].chans) {
+ if (this_board->sdo[i].chans)
subdev++;
- }
+
}
for (i = 0; i < MAX_DIO_SUBDEVG; i++) {
for (j = 0; j < this_board->sdio[i].regs; j++) {
@@ -1071,20 +1073,20 @@ static int pci_dio_detach(struct comedi_device *dev)
}
if (devpriv->pcidev) {
- if (dev->iobase) {
+ if (dev->iobase)
comedi_pci_disable(devpriv->pcidev);
- }
+
pci_dev_put(devpriv->pcidev);
}
- if (devpriv->prev) {
+ if (devpriv->prev)
devpriv->prev->next = devpriv->next;
- } else {
+ else
pci_priv = devpriv->next;
- }
- if (devpriv->next) {
+
+ if (devpriv->next)
devpriv->next->prev = devpriv->prev;
- }
+
}
return 0;
diff --git a/drivers/staging/comedi/drivers/aio_aio12_8.c b/drivers/staging/comedi/drivers/aio_aio12_8.c
index c4cac66db12..7a1c636df5b 100644
--- a/drivers/staging/comedi/drivers/aio_aio12_8.c
+++ b/drivers/staging/comedi/drivers/aio_aio12_8.c
@@ -110,7 +110,7 @@ static int aio_aio12_8_ai_read(struct comedi_device *dev,
while (timeout &&
!(inb(dev->iobase + AIO12_8_STATUS) & STATUS_ADC_EOC)) {
timeout--;
- printk("timeout %d\n", timeout);
+ printk(KERN_ERR "timeout %d\n", timeout);
udelay(1);
}
if (timeout == 0) {
@@ -172,7 +172,7 @@ static int aio_aio12_8_attach(struct comedi_device *dev,
iobase = it->options[0];
if (!request_region(iobase, 24, "aio_aio12_8")) {
- printk("I/O port conflict");
+ printk(KERN_ERR "I/O port conflict");
return -EIO;
}
diff --git a/drivers/staging/comedi/drivers/amplc_dio200.c b/drivers/staging/comedi/drivers/amplc_dio200.c
index 92bcc205dd4..8eb67651486 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200.c
@@ -218,7 +218,7 @@ order they appear in the channel list.
#define DIO200_DRIVER_NAME "amplc_dio200"
/* PCI IDs */
-/* #define PCI_VENDOR_ID_AMPLICON 0x14dc */
+#define PCI_VENDOR_ID_AMPLICON 0x14dc
#define PCI_DEVICE_ID_AMPLICON_PCI272 0x000a
#define PCI_DEVICE_ID_AMPLICON_PCI215 0x000b
#define PCI_DEVICE_ID_INVALID 0xffff
@@ -661,7 +661,7 @@ dio200_inttrig_start_intr(struct comedi_device *dev, struct comedi_subdevice *s,
subpriv = s->private;
spin_lock_irqsave(&subpriv->spinlock, flags);
- s->async->inttrig = 0;
+ s->async->inttrig = NULL;
if (subpriv->active)
event = dio200_start_intr(dev, s);
@@ -1364,7 +1364,7 @@ static int dio200_attach(struct comedi_device *dev, struct comedi_devconfig *it)
break;
case sd_8255:
/* digital i/o subdevice (8255) */
- ret = subdev_8255_init(dev, s, 0,
+ ret = subdev_8255_init(dev, s, NULL,
iobase + layout->sdinfo[n]);
if (ret < 0)
return ret;
diff --git a/drivers/staging/comedi/drivers/amplc_pci224.c b/drivers/staging/comedi/drivers/amplc_pci224.c
index c54cca8b256..c486a878e18 100644
--- a/drivers/staging/comedi/drivers/amplc_pci224.c
+++ b/drivers/staging/comedi/drivers/amplc_pci224.c
@@ -118,7 +118,7 @@ Caveats:
/*
* PCI IDs.
*/
-/* #define PCI_VENDOR_ID_AMPLICON 0x14dc */
+#define PCI_VENDOR_ID_AMPLICON 0x14dc
#define PCI_DEVICE_ID_AMPLICON_PCI224 0x0007
#define PCI_DEVICE_ID_AMPLICON_PCI234 0x0008
#define PCI_DEVICE_ID_INVALID 0xffff
@@ -496,9 +496,9 @@ pci224_ao_insn_write(struct comedi_device *dev, struct comedi_subdevice *s,
/* Writing a list of values to an AO channel is probably not
* very useful, but that's how the interface is defined. */
- for (i = 0; i < insn->n; i++) {
+ for (i = 0; i < insn->n; i++)
pci224_ao_set_data(dev, chan, range, data[i]);
- }
+
return i;
}
@@ -519,9 +519,9 @@ pci224_ao_insn_read(struct comedi_device *dev, struct comedi_subdevice *s,
chan = CR_CHAN(insn->chanspec);
- for (i = 0; i < insn->n; i++) {
+ for (i = 0; i < insn->n; i++)
data[i] = devpriv->ao_readback[chan];
- }
+
return i;
}
@@ -544,9 +544,9 @@ static void pci224_ao_stop(struct comedi_device *dev,
{
unsigned long flags;
- if (!test_and_clear_bit(AO_CMD_STARTED, &devpriv->state)) {
+ if (!test_and_clear_bit(AO_CMD_STARTED, &devpriv->state))
return;
- }
+
spin_lock_irqsave(&devpriv->ao_spinlock, flags);
/* Kill the interrupts. */
@@ -597,11 +597,11 @@ static void pci224_ao_start(struct comedi_device *dev,
} else {
/* Enable interrupts. */
spin_lock_irqsave(&devpriv->ao_spinlock, flags);
- if (cmd->stop_src == TRIG_EXT) {
+ if (cmd->stop_src == TRIG_EXT)
devpriv->intsce = PCI224_INTR_EXT | PCI224_INTR_DAC;
- } else {
+ else
devpriv->intsce = PCI224_INTR_DAC;
- }
+
outb(devpriv->intsce, devpriv->iobase1 + PCI224_INT_SCE);
spin_unlock_irqrestore(&devpriv->ao_spinlock, flags);
}
@@ -630,9 +630,9 @@ static void pci224_ao_handle_fifo(struct comedi_device *dev,
num_scans = comedi_buf_read_n_available(s->async) / bytes_per_scan;
if (!devpriv->ao_stop_continuous) {
/* Fixed number of scans. */
- if (num_scans > devpriv->ao_stop_count) {
+ if (num_scans > devpriv->ao_stop_count)
num_scans = devpriv->ao_stop_count;
- }
+
}
/* Determine how much room is in the FIFO (in samples). */
@@ -669,13 +669,13 @@ static void pci224_ao_handle_fifo(struct comedi_device *dev,
}
}
/* Determine how many new scans can be put in the FIFO. */
- if (cmd->chanlist_len) {
+ if (cmd->chanlist_len)
room /= cmd->chanlist_len;
- }
+
/* Determine how many scans to process. */
- if (num_scans > room) {
+ if (num_scans > room)
num_scans = room;
- }
+
/* Process scans. */
for (n = 0; n < num_scans; n++) {
cfc_read_array_from_buffer(s, &devpriv->ao_scan_vals[0],
@@ -718,19 +718,19 @@ static void pci224_ao_handle_fifo(struct comedi_device *dev,
trig = PCI224_DACCON_TRIG_Z2CT0;
} else {
/* cmd->scan_begin_src == TRIG_EXT */
- if (cmd->scan_begin_arg & CR_INVERT) {
+ if (cmd->scan_begin_arg & CR_INVERT)
trig = PCI224_DACCON_TRIG_EXTN;
- } else {
+ else
trig = PCI224_DACCON_TRIG_EXTP;
- }
+
}
devpriv->daccon = COMBINE(devpriv->daccon, trig,
PCI224_DACCON_TRIG_MASK);
outw(devpriv->daccon, dev->iobase + PCI224_DACCON);
}
- if (s->async->events) {
+ if (s->async->events)
comedi_event(dev, s);
- }
+
}
/*
@@ -855,9 +855,9 @@ pci224_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
err++;
}
tmp = cmd->chanlist_len * CONVERT_PERIOD;
- if (tmp < MIN_SCAN_PERIOD) {
+ if (tmp < MIN_SCAN_PERIOD)
tmp = MIN_SCAN_PERIOD;
- }
+
if (cmd->scan_begin_arg < tmp) {
cmd->scan_begin_arg = tmp;
err++;
@@ -966,9 +966,9 @@ pci224_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
devpriv->cached_div1 = div1;
devpriv->cached_div2 = div2;
}
- if (tmp != cmd->scan_begin_arg) {
+ if (tmp != cmd->scan_begin_arg)
err++;
- }
+
}
if (err)
@@ -994,13 +994,13 @@ pci224_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
tmp = 0;
for (n = 0; n < cmd->chanlist_len; n++) {
ch = CR_CHAN(cmd->chanlist[n]);
- if (tmp & (1U << ch)) {
+ if (tmp & (1U << ch))
errors |= dupchan_err;
- }
+
tmp |= (1U << ch);
- if (CR_RANGE(cmd->chanlist[n]) != range) {
+ if (CR_RANGE(cmd->chanlist[n]) != range)
errors |= range_err;
- }
+
}
if (errors) {
if (errors & dupchan_err) {
@@ -1038,9 +1038,9 @@ static int pci224_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
unsigned long flags;
/* Cannot handle null/empty chanlist. */
- if (cmd->chanlist == NULL || cmd->chanlist_len == 0) {
+ if (cmd->chanlist == NULL || cmd->chanlist_len == 0)
return -EINVAL;
- }
+
/* Determine which channels are enabled and their load order. */
devpriv->ao_enab = 0;
@@ -1050,9 +1050,9 @@ static int pci224_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ao_enab |= 1U << ch;
rank = 0;
for (j = 0; j < cmd->chanlist_len; j++) {
- if (CR_CHAN(cmd->chanlist[j]) < ch) {
+ if (CR_CHAN(cmd->chanlist[j]) < ch)
rank++;
- }
+
}
devpriv->ao_scan_order[rank] = i;
}
@@ -1221,9 +1221,9 @@ pci224_ao_munge(struct comedi_device *dev, struct comedi_subdevice *s,
offset = 32768;
}
/* Munge the data. */
- for (i = 0; i < length; i++) {
+ for (i = 0; i < length; i++)
array[i] = (array[i] << shift) - offset;
- }
+
}
/*
@@ -1254,15 +1254,15 @@ static irqreturn_t pci224_interrupt(int irq, void *d)
cmd = &s->async->cmd;
if (valid_intstat & PCI224_INTR_EXT) {
devpriv->intsce &= ~PCI224_INTR_EXT;
- if (cmd->start_src == TRIG_EXT) {
+ if (cmd->start_src == TRIG_EXT)
pci224_ao_start(dev, s);
- } else if (cmd->stop_src == TRIG_EXT) {
+ else if (cmd->stop_src == TRIG_EXT)
pci224_ao_stop(dev, s);
- }
+
}
- if (valid_intstat & PCI224_INTR_DAC) {
+ if (valid_intstat & PCI224_INTR_DAC)
pci224_ao_handle_fifo(dev, s);
- }
+
}
/* Reenable interrupt sources. */
spin_lock_irqsave(&devpriv->ao_spinlock, flags);
@@ -1381,23 +1381,23 @@ static int pci224_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* Allocate readback buffer for AO channels. */
devpriv->ao_readback = kmalloc(sizeof(devpriv->ao_readback[0]) *
thisboard->ao_chans, GFP_KERNEL);
- if (!devpriv->ao_readback) {
+ if (!devpriv->ao_readback)
return -ENOMEM;
- }
+
/* Allocate buffer to hold values for AO channel scan. */
devpriv->ao_scan_vals = kmalloc(sizeof(devpriv->ao_scan_vals[0]) *
thisboard->ao_chans, GFP_KERNEL);
- if (!devpriv->ao_scan_vals) {
+ if (!devpriv->ao_scan_vals)
return -ENOMEM;
- }
+
/* Allocate buffer to hold AO channel scan order. */
devpriv->ao_scan_order = kmalloc(sizeof(devpriv->ao_scan_order[0]) *
thisboard->ao_chans, GFP_KERNEL);
- if (!devpriv->ao_scan_order) {
+ if (!devpriv->ao_scan_order)
return -ENOMEM;
- }
+
/* Disable interrupt sources. */
devpriv->intsce = 0;
@@ -1445,9 +1445,9 @@ static int pci224_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->range_table_list = range_table_list =
kmalloc(sizeof(struct comedi_lrange *) * s->n_chan,
GFP_KERNEL);
- if (!s->range_table_list) {
+ if (!s->range_table_list)
return -ENOMEM;
- }
+
for (n = 2; n < 3 + s->n_chan; n++) {
if (it->options[n] < 0 || it->options[n] > 1) {
printk(KERN_WARNING "comedi%d: %s: warning! "
@@ -1459,11 +1459,11 @@ static int pci224_attach(struct comedi_device *dev, struct comedi_devconfig *it)
for (n = 0; n < s->n_chan; n++) {
if (n < COMEDI_NDEVCONFOPTS - 3 &&
it->options[3 + n] == 1) {
- if (it->options[2] == 1) {
+ if (it->options[2] == 1)
range_table_list[n] = &range_pci234_ext;
- } else {
+ else
range_table_list[n] = &range_bipolar5;
- }
+
} else {
if (it->options[2] == 1) {
range_table_list[n] =
@@ -1506,11 +1506,11 @@ static int pci224_attach(struct comedi_device *dev, struct comedi_devconfig *it)
printk(KERN_INFO "comedi%d: %s ", dev->minor, dev->board_name);
printk("(pci %s) ", pci_name(pci_dev));
- if (irq) {
+ if (irq)
printk("(irq %u%s) ", irq, (dev->irq ? "" : " UNAVAILABLE"));
- } else {
+ else
printk("(no irq) ");
- }
+
printk("attached\n");
@@ -1529,9 +1529,9 @@ static int pci224_detach(struct comedi_device *dev)
{
printk(KERN_DEBUG "comedi%d: %s: detach\n", dev->minor, DRIVER_NAME);
- if (dev->irq) {
+ if (dev->irq)
free_irq(dev->irq, dev);
- }
+
if (dev->subdevices) {
struct comedi_subdevice *s;
@@ -1544,9 +1544,9 @@ static int pci224_detach(struct comedi_device *dev)
kfree(devpriv->ao_scan_vals);
kfree(devpriv->ao_scan_order);
if (devpriv->pci_dev) {
- if (dev->iobase) {
+ if (dev->iobase)
comedi_pci_disable(devpriv->pci_dev);
- }
+
pci_dev_put(devpriv->pci_dev);
}
}
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index 091a1a5822a..7fffd967d47 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -669,9 +669,9 @@ static short pci230_ai_read(struct comedi_device *dev)
/* If a bipolar range was specified, mangle it (twos
* complement->straight binary). */
- if (devpriv->ai_bipolar) {
+ if (devpriv->ai_bipolar)
data ^= 1 << (thisboard->ai_bits - 1);
- }
+
return data;
}
@@ -680,9 +680,9 @@ static inline unsigned short pci230_ao_mangle_datum(struct comedi_device *dev,
{
/* If a bipolar range was specified, mangle it (straight binary->twos
* complement). */
- if (devpriv->ao_bipolar) {
+ if (devpriv->ao_bipolar)
datum ^= 1 << (thisboard->ao_bits - 1);
- }
+
/* PCI230 is 12 bit - stored in upper bits of 16 bit register (lower
* four bits reserved for expansion). */
@@ -734,9 +734,9 @@ static int pci230_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* Allocate the private structure area using alloc_private().
* Macro defined in comedidev.h - memsets struct fields to 0. */
- if ((alloc_private(dev, sizeof(struct pci230_private))) < 0) {
+ if ((alloc_private(dev, sizeof(struct pci230_private))) < 0)
return -ENOMEM;
- }
+
spin_lock_init(&devpriv->isr_spinlock);
spin_lock_init(&devpriv->res_spinlock);
spin_lock_init(&devpriv->ai_stop_spinlock);
@@ -991,9 +991,9 @@ static int pci230_detach(struct comedi_device *dev)
if (devpriv) {
if (devpriv->pci_dev) {
- if (dev->iobase) {
+ if (dev->iobase)
comedi_pci_disable(devpriv->pci_dev);
- }
+
pci_dev_put(devpriv->pci_dev);
}
}
@@ -1055,9 +1055,9 @@ static void put_resources(struct comedi_device *dev, unsigned int res_mask,
&& (res_mask != 0); b <<= 1, i++) {
if ((res_mask & b) != 0) {
res_mask &= ~b;
- if (devpriv->res_owner[i] == owner) {
+ if (devpriv->res_owner[i] == owner)
devpriv->res_owner[i] = OWNER_NONE;
- }
+
}
}
spin_unlock_irqrestore(&devpriv->res_spinlock, irqflags);
@@ -1132,11 +1132,11 @@ static int pci230_ai_rinsn(struct comedi_device *dev,
}
devpriv->adcg = (devpriv->adcg & ~(3 << gainshift))
| (pci230_ai_gain[range] << gainshift);
- if (devpriv->ai_bipolar) {
+ if (devpriv->ai_bipolar)
adccon |= PCI230_ADC_IR_BIP;
- } else {
+ else
adccon |= PCI230_ADC_IR_UNI;
- }
+
/* Enable only this channel in the scan list - otherwise by default
* we'll get one sample from each channel. */
@@ -1408,13 +1408,13 @@ static int pci230_ao_cmdtest(struct comedi_device *dev,
chan = CR_CHAN(cmd->chanlist[n]);
range = CR_RANGE(cmd->chanlist[n]);
/* Channel numbers must strictly increase. */
- if (chan < prev_chan) {
+ if (chan < prev_chan)
errors |= seq_err;
- }
+
/* Ranges must be the same. */
- if (range != first_range) {
+ if (range != first_range)
errors |= range_err;
- }
+
prev_chan = chan;
}
if (errors != 0) {
@@ -1583,9 +1583,9 @@ static int pci230_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (cmd->scan_begin_src == TRIG_TIMER) {
/* Claim Z2-CT1. */
- if (!get_one_resource(dev, RES_Z2CT1, OWNER_AOCMD)) {
+ if (!get_one_resource(dev, RES_Z2CT1, OWNER_AOCMD))
return -EBUSY;
- }
+
}
/* Get number of scans required. */
@@ -1609,9 +1609,9 @@ static int pci230_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
unsigned int i;
dacen = 0;
- for (i = 0; i < cmd->chanlist_len; i++) {
+ for (i = 0; i < cmd->chanlist_len; i++)
dacen |= 1 << CR_CHAN(cmd->chanlist[i]);
- }
+
/* Set channel scan list. */
outw(dacen, dev->iobase + PCI230P2_DACEN);
/*
@@ -1656,9 +1656,9 @@ static int pci230_ai_check_scan_period(struct comedi_cmd *cmd)
int err = 0;
chanlist_len = cmd->chanlist_len;
- if (cmd->chanlist_len == 0) {
+ if (cmd->chanlist_len == 0)
chanlist_len = 1;
- }
+
min_scan_period = chanlist_len * cmd->convert_arg;
if ((min_scan_period < chanlist_len)
|| (min_scan_period < cmd->convert_arg)) {
@@ -1777,11 +1777,11 @@ static int pci230_ai_cmdtest(struct comedi_device *dev,
* single-ended or pseudo-differential. */
if (cmd->chanlist && (cmd->chanlist_len > 0)) {
/* Peek analogue reference of first channel. */
- if (CR_AREF(cmd->chanlist[0]) == AREF_DIFF) {
+ if (CR_AREF(cmd->chanlist[0]) == AREF_DIFF)
max_speed_ai = MAX_SPEED_AI_DIFF;
- } else {
+ else
max_speed_ai = MAX_SPEED_AI_SE;
- }
+
} else {
/* No channel list. Assume single-ended. */
max_speed_ai = MAX_SPEED_AI_SE;
@@ -1871,9 +1871,9 @@ static int pci230_ai_cmdtest(struct comedi_device *dev,
}
} else if (cmd->scan_begin_src == TRIG_TIMER) {
/* N.B. cmd->convert_arg is also TRIG_TIMER */
- if (!pci230_ai_check_scan_period(cmd)) {
+ if (!pci230_ai_check_scan_period(cmd))
err++;
- }
+
} else {
if (cmd->scan_begin_arg != 0) {
cmd->scan_begin_arg = 0;
@@ -1961,13 +1961,13 @@ static int pci230_ai_cmdtest(struct comedi_device *dev,
errors |= seq_err;
}
/* Channels must have same AREF. */
- if (aref != prev_aref) {
+ if (aref != prev_aref)
errors |= aref_err;
- }
+
/* Channel ranges must have same polarity. */
- if (polarity != prev_polarity) {
+ if (polarity != prev_polarity)
errors |= polarity_err;
- }
+
/* Single-ended channel pairs must have same
* range. */
if ((aref != AREF_DIFF)
@@ -1987,9 +1987,9 @@ static int pci230_ai_cmdtest(struct comedi_device *dev,
}
/* If channel list is a repeating subsequence, need a whole
* number of repeats. */
- if ((n % subseq_len) != 0) {
+ if ((n % subseq_len) != 0)
errors |= seq_err;
- }
+
if ((devpriv->hwver > 0) && (devpriv->hwver < 4)) {
/*
* Buggy PCI230+ or PCI260+ requires channel 0 to be
@@ -2228,9 +2228,9 @@ static void pci230_ai_start(struct comedi_device *dev,
devpriv->adccon = (devpriv->adccon & ~PCI230_ADC_TRIG_MASK)
| conv;
outw(devpriv->adccon, dev->iobase + PCI230_ADCCON);
- if (cmd->convert_src == TRIG_INT) {
+ if (cmd->convert_src == TRIG_INT)
async->inttrig = pci230_ai_inttrig_convert;
- }
+
/* Update FIFO interrupt trigger level, which is currently
* set to "full". */
pci230_ai_update_fifo_trigger_level(dev, s);
@@ -2345,9 +2345,9 @@ static int pci230_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
}
}
/* Claim resources. */
- if (!get_resources(dev, res_mask, OWNER_AICMD)) {
+ if (!get_resources(dev, res_mask, OWNER_AICMD))
return -EBUSY;
- }
+
/* Get number of scans required. */
if (cmd->stop_src == TRIG_COUNT) {
@@ -2392,11 +2392,11 @@ static int pci230_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
range = CR_RANGE(cmd->chanlist[0]);
devpriv->ai_bipolar = pci230_ai_bipolar[range];
- if (devpriv->ai_bipolar) {
+ if (devpriv->ai_bipolar)
adccon |= PCI230_ADC_IR_BIP;
- } else {
+ else
adccon |= PCI230_ADC_IR_UNI;
- }
+
for (i = 0; i < cmd->chanlist_len; i++) {
unsigned int gainshift;
@@ -2543,9 +2543,9 @@ static unsigned int pci230_choose_clk_count(uint64_t ns, unsigned int *count,
for (clk_src = CLK_10MHZ;; clk_src++) {
cnt = divide_ns(ns, pci230_timebase[clk_src], round_mode);
- if ((cnt <= 65536) || (clk_src == CLK_1KHZ)) {
+ if ((cnt <= 65536) || (clk_src == CLK_1KHZ))
break;
- }
+
}
*count = cnt;
return clk_src;
@@ -2575,9 +2575,9 @@ static void pci230_ct_setup_ns_mode(struct comedi_device *dev, unsigned int ct,
/* Program clock source. */
outb(CLK_CONFIG(ct, clk_src), devpriv->iobase1 + PCI230_ZCLK_SCE);
/* Set initial count. */
- if (count >= 65536) {
+ if (count >= 65536)
count = 0;
- }
+
i8254_write(devpriv->iobase1 + PCI230_Z2_CT_BASE, 0, ct, count);
}
@@ -2599,9 +2599,9 @@ static irqreturn_t pci230_interrupt(int irq, void *d)
/* Read interrupt status/enable register. */
status_int = inb(devpriv->iobase1 + PCI230_INT_STAT);
- if (status_int == PCI230_INT_DISABLE) {
+ if (status_int == PCI230_INT_DISABLE)
return IRQ_NONE;
- }
+
spin_lock_irqsave(&devpriv->isr_spinlock, irqflags);
valid_status_int = devpriv->int_en & status_int;
@@ -2660,9 +2660,9 @@ static void pci230_handle_ao_nofifo(struct comedi_device *dev,
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
- if (!devpriv->ao_continuous && (devpriv->ao_scan_count == 0)) {
+ if (!devpriv->ao_continuous && (devpriv->ao_scan_count == 0))
return;
- }
+
for (i = 0; i < cmd->chanlist_len; i++) {
/* Read sample from Comedi's circular buffer. */
@@ -2711,9 +2711,9 @@ static int pci230_handle_ao_fifo(struct comedi_device *dev,
num_scans = comedi_buf_read_n_available(async) / bytes_per_scan;
if (!devpriv->ao_continuous) {
/* Fixed number of scans. */
- if (num_scans > devpriv->ao_scan_count) {
+ if (num_scans > devpriv->ao_scan_count)
num_scans = devpriv->ao_scan_count;
- }
+
if (devpriv->ao_scan_count == 0) {
/* End of acquisition. */
events |= COMEDI_CB_EOA;
@@ -2736,21 +2736,21 @@ static int pci230_handle_ao_fifo(struct comedi_device *dev,
}
if (events == 0) {
/* Determine how much room is in the FIFO (in samples). */
- if ((dacstat & PCI230P2_DAC_FIFO_FULL) != 0) {
+ if ((dacstat & PCI230P2_DAC_FIFO_FULL) != 0)
room = PCI230P2_DAC_FIFOROOM_FULL;
- } else if ((dacstat & PCI230P2_DAC_FIFO_HALF) != 0) {
+ else if ((dacstat & PCI230P2_DAC_FIFO_HALF) != 0)
room = PCI230P2_DAC_FIFOROOM_HALFTOFULL;
- } else if ((dacstat & PCI230P2_DAC_FIFO_EMPTY) != 0) {
+ else if ((dacstat & PCI230P2_DAC_FIFO_EMPTY) != 0)
room = PCI230P2_DAC_FIFOROOM_EMPTY;
- } else {
+ else
room = PCI230P2_DAC_FIFOROOM_ONETOHALF;
- }
+
/* Convert room to number of scans that can be added. */
room /= cmd->chanlist_len;
/* Determine number of scans to process. */
- if (num_scans > room) {
+ if (num_scans > room)
num_scans = room;
- }
+
/* Process scans. */
for (n = 0; n < num_scans; n++) {
for (i = 0; i < cmd->chanlist_len; i++) {
@@ -2817,14 +2817,14 @@ static void pci230_handle_ai(struct comedi_device *dev,
} else {
todo = (devpriv->ai_scan_count * scanlen)
- devpriv->ai_scan_pos;
- if (todo > PCI230_ADC_FIFOLEVEL_HALFFULL) {
+ if (todo > PCI230_ADC_FIFOLEVEL_HALFFULL)
todo = PCI230_ADC_FIFOLEVEL_HALFFULL;
- }
+
}
- if (todo == 0) {
+ if (todo == 0)
return;
- }
+
fifoamount = 0;
for (i = 0; i < todo; i++) {
@@ -2906,9 +2906,9 @@ static void pci230_ao_stop(struct comedi_device *dev,
spin_lock_irqsave(&devpriv->ao_stop_spinlock, irqflags);
started = test_and_clear_bit(AO_CMD_STARTED, &devpriv->state);
spin_unlock_irqrestore(&devpriv->ao_stop_spinlock, irqflags);
- if (!started) {
+ if (!started)
return;
- }
+
cmd = &s->async->cmd;
if (cmd->scan_begin_src == TRIG_TIMER) {
@@ -2968,9 +2968,9 @@ static void pci230_ai_stop(struct comedi_device *dev,
spin_lock_irqsave(&devpriv->ai_stop_spinlock, irqflags);
started = test_and_clear_bit(AI_CMD_STARTED, &devpriv->state);
spin_unlock_irqrestore(&devpriv->ai_stop_spinlock, irqflags);
- if (!started) {
+ if (!started)
return;
- }
+
cmd = &s->async->cmd;
if (cmd->convert_src == TRIG_TIMER) {
diff --git a/drivers/staging/comedi/drivers/cb_das16_cs.c b/drivers/staging/comedi/drivers/cb_das16_cs.c
index 5632991760a..cfeb11f443e 100644
--- a/drivers/staging/comedi/drivers/cb_das16_cs.c
+++ b/drivers/staging/comedi/drivers/cb_das16_cs.c
@@ -175,17 +175,18 @@ static int das16cs_attach(struct comedi_device *dev,
printk("I/O base=0x%04lx ", dev->iobase);
printk("fingerprint:\n");
- for (i = 0; i < 48; i += 2) {
+ for (i = 0; i < 48; i += 2)
printk("%04x ", inw(dev->iobase + i));
- }
+
printk("\n");
- ret = request_irq(link->irq.AssignedIRQ, das16cs_interrupt,
+ ret = request_irq(link->irq, das16cs_interrupt,
IRQF_SHARED, "cb_das16_cs", dev);
- if (ret < 0) {
+ if (ret < 0)
return ret;
- }
- dev->irq = link->irq.AssignedIRQ;
+
+ dev->irq = link->irq;
+
printk("irq=%u ", dev->irq);
dev->board_ptr = das16cs_probe(dev, link);
@@ -262,9 +263,9 @@ static int das16cs_detach(struct comedi_device *dev)
{
printk("comedi%d: das16cs: remove\n", dev->minor);
- if (dev->irq) {
+ if (dev->irq)
free_irq(dev->irq, dev);
- }
+
return 0;
}
@@ -671,7 +672,6 @@ static dev_info_t dev_info = "cb_das16_cs";
struct local_info_t {
struct pcmcia_device *link;
- dev_node_t node;
int stop;
struct bus_operations *bus;
};
@@ -702,10 +702,6 @@ static int das16cs_pcmcia_attach(struct pcmcia_device *link)
link->priv = local;
/* Initialize the pcmcia_device structure */
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = NULL;
-
link->conf.Attributes = 0;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -720,10 +716,8 @@ static void das16cs_pcmcia_detach(struct pcmcia_device *link)
{
dev_dbg(&link->dev, "das16cs_pcmcia_detach\n");
- if (link->dev_node) {
- ((struct local_info_t *)link->priv)->stop = 1;
- das16cs_pcmcia_release(link);
- }
+ ((struct local_info_t *)link->priv)->stop = 1;
+ das16cs_pcmcia_release(link);
/* This points to the parent struct local_info_t struct */
if (link->priv)
kfree(link->priv);
@@ -740,8 +734,7 @@ static int das16cs_pcmcia_config_loop(struct pcmcia_device *p_dev,
return -EINVAL;
/* Do we need to allocate an interrupt? */
- if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
+ p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -769,7 +762,6 @@ static int das16cs_pcmcia_config_loop(struct pcmcia_device *p_dev,
static void das16cs_pcmcia_config(struct pcmcia_device *link)
{
- struct local_info_t *dev = link->priv;
int ret;
dev_dbg(&link->dev, "das16cs_pcmcia_config\n");
@@ -780,16 +772,9 @@ static void das16cs_pcmcia_config(struct pcmcia_device *link)
goto failed;
}
- /*
- Allocate an interrupt line. Note that this does not assign a
- handler to the interrupt, unless the 'Handler' member of the
- irq structure is initialized.
- */
- if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
- goto failed;
- }
+ if (!link->irq)
+ goto failed;
+
/*
This actually configures the PCMCIA socket -- setting up
the I/O windows and the interrupt mapping, and putting the
@@ -799,19 +784,10 @@ static void das16cs_pcmcia_config(struct pcmcia_device *link)
if (ret)
goto failed;
- /*
- At this point, the dev_node_t structure(s) need to be
- initialized and arranged in a linked list at link->dev.
- */
- sprintf(dev->node.dev_name, "cb_das16_cs");
- dev->node.major = dev->node.minor = 0;
- link->dev_node = &dev->node;
-
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x",
- dev->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %u", link->irq.AssignedIRQ);
+ printk(", irq %u", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1 + link->io.NumPorts1 - 1);
@@ -859,6 +835,9 @@ static struct pcmcia_device_id das16cs_id_table[] = {
};
MODULE_DEVICE_TABLE(pcmcia, das16cs_id_table);
+MODULE_AUTHOR("David A. Schleef <ds@schleef.org>");
+MODULE_DESCRIPTION("Comedi driver for Computer Boards PC-CARD DAS16/16");
+MODULE_LICENSE("GPL");
struct pcmcia_driver das16cs_driver = {
.probe = das16cs_pcmcia_attach,
diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
index 82295e0f07f..79aa286e9bb 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
@@ -107,6 +107,8 @@ TODO:
#define PRESCALED_TIMER_BASE 10000 /* 100kHz 'prescaled' clock for slow aquisition, maybe I'll support this someday */
#define DMA_BUFFER_SIZE 0x1000
+#define PCI_VENDOR_ID_COMPUTERBOARDS 0x1307
+
/* maximum value that can be loaded into board's 24-bit counters*/
static const int max_counter_value = 0xffffff;
@@ -1099,9 +1101,9 @@ struct pcidas64_private {
resource_size_t main_phys_iobase;
resource_size_t dio_counter_phys_iobase;
/* base addresses (ioremapped) */
- void *plx9080_iobase;
- void *main_iobase;
- void *dio_counter_iobase;
+ void __iomem *plx9080_iobase;
+ void __iomem *main_iobase;
+ void __iomem *dio_counter_iobase;
/* local address (used by dma controller) */
uint32_t local0_iobase;
uint32_t local1_iobase;
@@ -1314,7 +1316,7 @@ static inline int ao_cmd_is_supported(const struct pcidas64_board *board)
static void init_plx9080(struct comedi_device *dev)
{
uint32_t bits;
- void *plx_iobase = priv(dev)->plx9080_iobase;
+ void __iomem *plx_iobase = priv(dev)->plx9080_iobase;
priv(dev)->plx_control_bits =
readl(priv(dev)->plx9080_iobase + PLX_CONTROL_REG);
@@ -1404,7 +1406,7 @@ static void init_plx9080(struct comedi_device *dev)
static int setup_subdevices(struct comedi_device *dev)
{
struct comedi_subdevice *s;
- void *dio_8255_iobase;
+ void __iomem *dio_8255_iobase;
int i;
if (alloc_subdevices(dev, 10) < 0)
@@ -1430,7 +1432,6 @@ static int setup_subdevices(struct comedi_device *dev)
s->do_cmdtest = ai_cmdtest;
s->cancel = ai_cancel;
if (board(dev)->layout == LAYOUT_4020) {
- unsigned int i;
uint8_t data;
/* set adc to read from inputs (not internal calibration sources) */
priv(dev)->i2c_cal_range_bits = adc_src_4020_bits(4);
@@ -1612,7 +1613,7 @@ static void init_stc_registers(struct comedi_device *dev)
disable_ai_pacing(dev);
};
-int alloc_and_init_dma_members(struct comedi_device *dev)
+static int alloc_and_init_dma_members(struct comedi_device *dev)
{
int i;
@@ -1621,9 +1622,9 @@ int alloc_and_init_dma_members(struct comedi_device *dev)
priv(dev)->ai_buffer[i] =
pci_alloc_consistent(priv(dev)->hw_dev, DMA_BUFFER_SIZE,
&priv(dev)->ai_buffer_bus_addr[i]);
- if (priv(dev)->ai_buffer[i] == NULL) {
+ if (priv(dev)->ai_buffer[i] == NULL)
return -ENOMEM;
- }
+
}
for (i = 0; i < AO_DMA_RING_COUNT; i++) {
if (ao_cmd_is_supported(board(dev))) {
@@ -1632,9 +1633,9 @@ int alloc_and_init_dma_members(struct comedi_device *dev)
DMA_BUFFER_SIZE,
&priv(dev)->
ao_buffer_bus_addr[i]);
- if (priv(dev)->ao_buffer[i] == NULL) {
+ if (priv(dev)->ao_buffer[i] == NULL)
return -ENOMEM;
- }
+
}
}
/* allocate dma descriptors */
@@ -1643,9 +1644,9 @@ int alloc_and_init_dma_members(struct comedi_device *dev)
sizeof(struct plx_dma_desc) *
ai_dma_ring_count(board(dev)),
&priv(dev)->ai_dma_desc_bus_addr);
- if (priv(dev)->ai_dma_desc == NULL) {
+ if (priv(dev)->ai_dma_desc == NULL)
return -ENOMEM;
- }
+
DEBUG_PRINT("ai dma descriptors start at bus addr 0x%x\n",
priv(dev)->ai_dma_desc_bus_addr);
if (ao_cmd_is_supported(board(dev))) {
@@ -1654,9 +1655,9 @@ int alloc_and_init_dma_members(struct comedi_device *dev)
sizeof(struct plx_dma_desc) *
AO_DMA_RING_COUNT,
&priv(dev)->ao_dma_desc_bus_addr);
- if (priv(dev)->ao_dma_desc == NULL) {
+ if (priv(dev)->ao_dma_desc == NULL)
return -ENOMEM;
- }
+
DEBUG_PRINT("ao dma descriptors start at bus addr 0x%x\n",
priv(dev)->ao_dma_desc_bus_addr);
}
@@ -1848,9 +1849,9 @@ static int attach(struct comedi_device *dev, struct comedi_devconfig *it)
printk(" irq %u\n", dev->irq);
retval = setup_subdevices(dev);
- if (retval < 0) {
+ if (retval < 0)
return retval;
- }
+
return 0;
}
@@ -1875,12 +1876,12 @@ static int detach(struct comedi_device *dev)
if (priv(dev)->hw_dev) {
if (priv(dev)->plx9080_iobase) {
disable_plx_interrupts(dev);
- iounmap((void *)priv(dev)->plx9080_iobase);
+ iounmap(priv(dev)->plx9080_iobase);
}
if (priv(dev)->main_iobase)
- iounmap((void *)priv(dev)->main_iobase);
+ iounmap(priv(dev)->main_iobase);
if (priv(dev)->dio_counter_iobase)
- iounmap((void *)priv(dev)->dio_counter_iobase);
+ iounmap(priv(dev)->dio_counter_iobase);
/* free pci dma buffers */
for (i = 0; i < ai_dma_ring_count(board(dev)); i++) {
if (priv(dev)->ai_buffer[i])
@@ -1919,9 +1920,9 @@ static int detach(struct comedi_device *dev)
priv(dev)->ao_dma_desc,
priv(dev)->
ao_dma_desc_bus_addr);
- if (priv(dev)->main_phys_iobase) {
+ if (priv(dev)->main_phys_iobase)
comedi_pci_disable(priv(dev)->hw_dev);
- }
+
pci_dev_put(priv(dev)->hw_dev);
}
}
@@ -2902,9 +2903,9 @@ static void pio_drain_ai_fifo_16(struct comedi_device *dev)
if (cmd->stop_src == TRIG_COUNT) {
if (priv(dev)->ai_count == 0)
break;
- if (num_samples > priv(dev)->ai_count) {
+ if (num_samples > priv(dev)->ai_count)
num_samples = priv(dev)->ai_count;
- }
+
priv(dev)->ai_count -= num_samples;
}
@@ -2943,9 +2944,9 @@ static void pio_drain_ai_fifo_32(struct comedi_device *dev)
readw(priv(dev)->main_iobase + ADC_READ_PNTR_REG) & 0x7fff;
if (cmd->stop_src == TRIG_COUNT) {
- if (max_transfer > priv(dev)->ai_count) {
+ if (max_transfer > priv(dev)->ai_count)
max_transfer = priv(dev)->ai_count;
- }
+
}
for (i = 0; read_code != write_code && i < max_transfer;) {
fifo_data = readl(priv(dev)->dio_counter_iobase + ADC_FIFO_REG);
@@ -2964,9 +2965,9 @@ static void pio_drain_ai_fifo_32(struct comedi_device *dev)
/* empty fifo */
static void pio_drain_ai_fifo(struct comedi_device *dev)
{
- if (board(dev)->layout == LAYOUT_4020) {
+ if (board(dev)->layout == LAYOUT_4020)
pio_drain_ai_fifo_32(dev);
- } else
+ else
pio_drain_ai_fifo_16(dev);
}
@@ -2976,7 +2977,7 @@ static void drain_dma_buffers(struct comedi_device *dev, unsigned int channel)
uint32_t next_transfer_addr;
int j;
int num_samples = 0;
- void *pci_addr_reg;
+ void __iomem *pci_addr_reg;
if (channel)
pci_addr_reg =
@@ -3016,8 +3017,9 @@ static void drain_dma_buffers(struct comedi_device *dev, unsigned int channel)
* unused buffer) */
}
-void handle_ai_interrupt(struct comedi_device *dev, unsigned short status,
- unsigned int plx_status)
+static void handle_ai_interrupt(struct comedi_device *dev,
+ unsigned short status,
+ unsigned int plx_status)
{
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s->async;
@@ -3038,9 +3040,9 @@ void handle_ai_interrupt(struct comedi_device *dev, unsigned short status,
priv(dev)->plx9080_iobase + PLX_DMA1_CS_REG);
DEBUG_PRINT("dma1 status 0x%x\n", dma1_status);
- if (dma1_status & PLX_DMA_EN_BIT) {
+ if (dma1_status & PLX_DMA_EN_BIT)
drain_dma_buffers(dev, 1);
- }
+
DEBUG_PRINT(" cleared dma ch1 interrupt\n");
}
spin_unlock_irqrestore(&dev->spinlock, flags);
@@ -3227,7 +3229,7 @@ static irqreturn_t handle_interrupt(int irq, void *d)
return IRQ_HANDLED;
}
-void abort_dma(struct comedi_device *dev, unsigned int channel)
+static void abort_dma(struct comedi_device *dev, unsigned int channel)
{
unsigned long flags;
@@ -3422,7 +3424,7 @@ static void load_ao_dma(struct comedi_device *dev, const struct comedi_cmd *cmd)
{
unsigned int num_bytes;
unsigned int next_transfer_addr;
- void *pci_addr_reg =
+ void __iomem *pci_addr_reg =
priv(dev)->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG;
unsigned int buffer_index;
@@ -3656,24 +3658,26 @@ static int ao_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
return 0;
}
-static int dio_callback(int dir, int port, int data, unsigned long iobase)
+static int dio_callback(int dir, int port, int data, unsigned long arg)
{
+ void __iomem *iobase = (void __iomem *)arg;
if (dir) {
- writeb(data, (void *)(iobase + port));
+ writeb(data, iobase + port);
DEBUG_PRINT("wrote 0x%x to port %i\n", data, port);
return 0;
} else {
- return readb((void *)(iobase + port));
+ return readb(iobase + port);
}
}
-static int dio_callback_4020(int dir, int port, int data, unsigned long iobase)
+static int dio_callback_4020(int dir, int port, int data, unsigned long arg)
{
+ void __iomem *iobase = (void __iomem *)arg;
if (dir) {
- writew(data, (void *)(iobase + 2 * port));
+ writew(data, iobase + 2 * port);
return 0;
} else {
- return readw((void *)(iobase + 2 * port));
+ return readw(iobase + 2 * port);
}
}
@@ -3860,7 +3864,7 @@ static uint16_t read_eeprom(struct comedi_device *dev, uint8_t address)
static const int read_command = 0x6;
unsigned int bitstream = (read_command << 8) | address;
unsigned int bit;
- void *const plx_control_addr =
+ void __iomem * const plx_control_addr =
priv(dev)->plx9080_iobase + PLX_CONTROL_REG;
uint16_t value;
static const int value_length = 16;
@@ -4183,7 +4187,8 @@ static const int i2c_low_udelay = 10;
static void i2c_set_sda(struct comedi_device *dev, int state)
{
static const int data_bit = CTL_EE_W;
- void *plx_control_addr = priv(dev)->plx9080_iobase + PLX_CONTROL_REG;
+ void __iomem *plx_control_addr = priv(dev)->plx9080_iobase +
+ PLX_CONTROL_REG;
if (state) {
/* set data line high */
@@ -4202,7 +4207,8 @@ static void i2c_set_sda(struct comedi_device *dev, int state)
static void i2c_set_scl(struct comedi_device *dev, int state)
{
static const int clock_bit = CTL_USERO;
- void *plx_control_addr = priv(dev)->plx9080_iobase + PLX_CONTROL_REG;
+ void __iomem *plx_control_addr = priv(dev)->plx9080_iobase +
+ PLX_CONTROL_REG;
if (state) {
/* set clock line high */
diff --git a/drivers/staging/comedi/drivers/cb_pcimdas.c b/drivers/staging/comedi/drivers/cb_pcimdas.c
index 2e61727fc9a..49dccbbd713 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdas.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdas.c
@@ -52,6 +52,8 @@ See http://www.measurementcomputing.com/PDFManuals/pcim-das1602_16.pdf for more
/* #define CBPCIMDAS_DEBUG */
#undef CBPCIMDAS_DEBUG
+#define PCI_VENDOR_ID_COMPUTERBOARDS 0x1307
+
/* Registers for the PCIM-DAS1602/16 */
/* sizes of io regions (bytes) */
diff --git a/drivers/staging/comedi/drivers/cb_pcimdda.c b/drivers/staging/comedi/drivers/cb_pcimdda.c
index e32a31763d5..f404ec7723e 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdda.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdda.c
@@ -91,7 +91,8 @@ Configuration Options:
#include "8255.h"
/* device ids of the cards we support -- currently only 1 card supported */
-#define PCI_ID_PCIM_DDA06_16 0x0053
+#define PCI_VENDOR_ID_COMPUTERBOARDS 0x1307
+#define PCI_ID_PCIM_DDA06_16 0x0053
/*
* This is straight from skel.c -- I did this in case this source file
diff --git a/drivers/staging/comedi/drivers/comedi_bond.c b/drivers/staging/comedi/drivers/comedi_bond.c
index 41311d99473..701622280ff 100644
--- a/drivers/staging/comedi/drivers/comedi_bond.c
+++ b/drivers/staging/comedi/drivers/comedi_bond.c
@@ -87,18 +87,17 @@ Configuration Options:
* options that are used with comedi_config.
*/
-#include "../comedilib.h"
-#include "../comedidev.h"
#include <linux/string.h>
#include <linux/slab.h>
+#include "../comedi.h"
+#include "../comedilib.h"
+#include "../comedidev.h"
/* The maxiumum number of channels per subdevice. */
#define MAX_CHANS 256
#define MODULE_NAME "comedi_bond"
-#ifdef MODULE_LICENSE
MODULE_LICENSE("GPL");
-#endif
#ifndef STR
# define STR1(x) #x
# define STR(x) STR1(x)
@@ -143,7 +142,7 @@ static const struct BondingBoard bondingBoards[] = {
#define thisboard ((const struct BondingBoard *)dev->board_ptr)
struct BondedDevice {
- void *dev;
+ struct comedi_device *dev;
unsigned minor;
unsigned subdev;
unsigned subdev_type;
@@ -405,7 +404,7 @@ static void *Realloc(const void *oldmem, size_t newlen, size_t oldlen)
static int doDevConfig(struct comedi_device *dev, struct comedi_devconfig *it)
{
int i;
- void *devs_opened[COMEDI_NUM_BOARD_MINORS];
+ struct comedi_device *devs_opened[COMEDI_NUM_BOARD_MINORS];
memset(devs_opened, 0, sizeof(devs_opened));
devpriv->name[0] = 0;;
@@ -414,7 +413,7 @@ static int doDevConfig(struct comedi_device *dev, struct comedi_devconfig *it)
for (i = 0; i < COMEDI_NDEVCONFOPTS && (!i || it->options[i]); ++i) {
char file[] = "/dev/comediXXXXXX";
int minor = it->options[i];
- void *d;
+ struct comedi_device *d;
int sdev = -1, nchans, tmp;
struct BondedDevice *bdev = NULL;
diff --git a/drivers/staging/comedi/drivers/comedi_parport.c b/drivers/staging/comedi/drivers/comedi_parport.c
index 043afe4439c..fcd7721c553 100644
--- a/drivers/staging/comedi/drivers/comedi_parport.c
+++ b/drivers/staging/comedi/drivers/comedi_parport.c
@@ -309,18 +309,18 @@ static int parport_attach(struct comedi_device *dev,
iobase = it->options[0];
printk(KERN_INFO "comedi%d: parport: 0x%04lx ", dev->minor, iobase);
if (!request_region(iobase, PARPORT_SIZE, "parport (comedi)")) {
- printk("I/O port conflict\n");
+ printk(KERN_ERR "I/O port conflict\n");
return -EIO;
}
dev->iobase = iobase;
irq = it->options[1];
if (irq) {
- printk(" irq=%u", irq);
+ printk(KERN_INFO " irq=%u", irq);
ret = request_irq(irq, parport_interrupt, 0, "comedi_parport",
dev);
if (ret < 0) {
- printk(" irq not available\n");
+ printk(KERN_ERR " irq not available\n");
return -EINVAL;
}
dev->irq = irq;
@@ -380,13 +380,13 @@ static int parport_attach(struct comedi_device *dev,
devpriv->c_data = 0;
outb(devpriv->c_data, dev->iobase + PARPORT_C);
- printk("\n");
+ printk(KERN_INFO "\n");
return 1;
}
static int parport_detach(struct comedi_device *dev)
{
- printk("comedi%d: parport: remove\n", dev->minor);
+ printk(KERN_INFO "comedi%d: parport: remove\n", dev->minor);
if (dev->iobase)
release_region(dev->iobase, PARPORT_SIZE);
diff --git a/drivers/staging/comedi/drivers/das08.c b/drivers/staging/comedi/drivers/das08.c
index f4258334532..9cb144f7e70 100644
--- a/drivers/staging/comedi/drivers/das08.c
+++ b/drivers/staging/comedi/drivers/das08.c
@@ -1,55 +1,55 @@
/*
- comedi/drivers/das08.c
- DAS08 driver
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- Copyright (C) 2001,2002,2003 Frank Mori Hess <fmhess@users.sourceforge.net>
- Copyright (C) 2004 Salvador E. Tropea <set@users.sf.net> <set@ieee.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*****************************************************************
+ * comedi/drivers/das08.c
+ * DAS08 driver
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2001,2002,2003 Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Copyright (C) 2004 Salvador E. Tropea <set@users.sf.net> <set@ieee.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************
+ */
-*/
/*
-Driver: das08
-Description: DAS-08 compatible boards
-Author: Warren Jasper, ds, Frank Hess
-Devices: [Keithley Metrabyte] DAS08 (isa-das08), [ComputerBoards] DAS08 (isa-das08),
- DAS08-PGM (das08-pgm),
- DAS08-PGH (das08-pgh), DAS08-PGL (das08-pgl), DAS08-AOH (das08-aoh),
- DAS08-AOL (das08-aol), DAS08-AOM (das08-aom), DAS08/JR-AO (das08/jr-ao),
- DAS08/JR-16-AO (das08jr-16-ao), PCI-DAS08 (das08),
- PC104-DAS08 (pc104-das08), DAS08/JR/16 (das08jr/16)
-Status: works
-
-This is a rewrite of the das08 and das08jr drivers.
-
-Options (for ISA cards):
- [0] - base io address
-
-Options (for pci-das08):
- [0] - bus (optional)
- [1] = slot (optional)
-
-The das08 driver doesn't support asynchronous commands, since
-the cheap das08 hardware doesn't really support them. The
-comedi_rt_timer driver can be used to emulate commands for this
-driver.
-*/
+ * Driver: das08
+ * Description: DAS-08 compatible boards
+ * Author: Warren Jasper, ds, Frank Hess
+ * Devices: [Keithley Metrabyte] DAS08 (isa-das08),
+ * [ComputerBoards] DAS08 (isa-das08), DAS08-PGM (das08-pgm),
+ * DAS08-PGH (das08-pgh), DAS08-PGL (das08-pgl), DAS08-AOH (das08-aoh),
+ * DAS08-AOL (das08-aol), DAS08-AOM (das08-aom), DAS08/JR-AO (das08/jr-ao),
+ * DAS08/JR-16-AO (das08jr-16-ao), PCI-DAS08 (das08),
+ * PC104-DAS08 (pc104-das08), DAS08/JR/16 (das08jr/16)
+ * Status: works
+ *
+ * This is a rewrite of the das08 and das08jr drivers.
+ *
+ * Options (for ISA cards):
+ * [0] - base io address
+ *
+ * Options (for pci-das08):
+ * [0] - bus (optional)
+ * [1] = slot (optional)
+ *
+ * The das08 driver doesn't support asynchronous commands, since
+ * the cheap das08 hardware doesn't really support them. The
+ * comedi_rt_timer driver can be used to emulate commands for this
+ * driver.
+ */
#include "../comedidev.h"
@@ -122,8 +122,8 @@ driver.
*/
#define DAS08JR_DIO 3
-#define DAS08JR_AO_LSB(x) ((x)?6:4)
-#define DAS08JR_AO_MSB(x) ((x)?7:5)
+#define DAS08JR_AO_LSB(x) ((x) ? 6 : 4)
+#define DAS08JR_AO_MSB(x) ((x) ? 7 : 5)
/*
cio-das08_aox.pdf
@@ -148,8 +148,8 @@ driver.
#define DAS08AO_GAIN_CONTROL 3
#define DAS08AO_GAIN_STATUS 3
-#define DAS08AO_AO_LSB(x) ((x)?0xa:8)
-#define DAS08AO_AO_MSB(x) ((x)?0xb:9)
+#define DAS08AO_AO_LSB(x) ((x) ? 0xa : 8)
+#define DAS08AO_AO_MSB(x) ((x) ? 0xb : 9)
#define DAS08AO_AO_UPDATE 8
/* gainlist same as _pgx_ below */
@@ -239,8 +239,9 @@ static const struct comedi_lrange *const das08_ai_lranges[] = {
&range_das08_pgm,
};
-static const int das08_pgh_gainlist[] =
- { 8, 0, 10, 2, 12, 4, 14, 6, 1, 3, 5, 7 };
+static const int das08_pgh_gainlist[] = {
+ 8, 0, 10, 2, 12, 4, 14, 6, 1, 3, 5, 7
+};
static const int das08_pgl_gainlist[] = { 8, 0, 2, 4, 6, 1, 3, 5, 7 };
static const int das08_pgm_gainlist[] = { 8, 0, 10, 12, 14, 9, 11, 13, 15 };
@@ -535,7 +536,8 @@ static int das08_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
inb(dev->iobase + DAS08_MSB);
/* set multiplexer */
- spin_lock(&dev->spinlock); /* lock to prevent race with digital output */
+ /* lock to prevent race with digital output */
+ spin_lock(&dev->spinlock);
devpriv->do_mux_bits &= ~DAS08_MUX_MASK;
devpriv->do_mux_bits |= DAS08_MUX(chan);
outb(devpriv->do_mux_bits, dev->iobase + DAS08_CONTROL);
@@ -552,7 +554,7 @@ static int das08_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
/* clear over-range bits for 16-bit boards */
if (thisboard->ai_nbits == 16)
if (inb(dev->iobase + DAS08_MSB) & 0x80)
- printk("das08: over-range\n");
+ printk(KERN_INFO "das08: over-range\n");
/* trigger conversion */
outb_p(0, dev->iobase + DAS08_TRIG_12BIT);
@@ -562,7 +564,7 @@ static int das08_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
break;
}
if (i == TIMEOUT) {
- printk("das08: timeout\n");
+ printk(KERN_ERR "das08: timeout\n");
return -ETIME;
}
msb = inb(dev->iobase + DAS08_MSB);
@@ -607,7 +609,8 @@ static int das08_do_wbits(struct comedi_device *dev, struct comedi_subdevice *s,
/* set new bit values */
wbits |= data[0] & data[1];
/* remember digital output bits */
- spin_lock(&dev->spinlock); /* prevent race with setting of analog input mux */
+ /* prevent race with setting of analog input mux */
+ spin_lock(&dev->spinlock);
devpriv->do_mux_bits &= ~DAS08_DO_MASK;
devpriv->do_mux_bits |= DAS08_OP(wbits);
outb(devpriv->do_mux_bits, dev->iobase + DAS08_CONTROL);
@@ -860,9 +863,9 @@ int das08_common_attach(struct comedi_device *dev, unsigned long iobase)
/* allocate ioports for non-pcmcia, non-pci boards */
if ((thisboard->bustype != pcmcia) && (thisboard->bustype != pci)) {
- printk(" iobase 0x%lx\n", iobase);
+ printk(KERN_INFO " iobase 0x%lx\n", iobase);
if (!request_region(iobase, thisboard->iosize, DRV_NAME)) {
- printk(" I/O port conflict\n");
+ printk(KERN_ERR " I/O port conflict\n");
return -EIO;
}
}
@@ -878,8 +881,11 @@ int das08_common_attach(struct comedi_device *dev, unsigned long iobase)
/* ai */
if (thisboard->ai) {
s->type = COMEDI_SUBD_AI;
- /* XXX some boards actually have differential inputs instead of single ended.
- * The driver does nothing with arefs though, so it's no big deal. */
+ /* XXX some boards actually have differential
+ * inputs instead of single ended.
+ * The driver does nothing with arefs though,
+ * so it's no big deal.
+ */
s->subdev_flags = SDF_READABLE | SDF_GROUND;
s->n_chan = 8;
s->maxdata = (1 << thisboard->ai_nbits) - 1;
@@ -966,6 +972,7 @@ int das08_common_attach(struct comedi_device *dev, unsigned long iobase)
return 0;
}
+EXPORT_SYMBOL_GPL(das08_common_attach);
static int das08_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
@@ -980,7 +987,7 @@ static int das08_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret < 0)
return ret;
- printk("comedi%d: das08: ", dev->minor);
+ printk(KERN_INFO "comedi%d: das08: ", dev->minor);
/* deal with a pci board */
if (thisboard->bustype == pci) {
#ifdef CONFIG_COMEDI_PCI
@@ -1007,20 +1014,21 @@ static int das08_attach(struct comedi_device *dev, struct comedi_devconfig *it)
}
}
if (!pdev) {
- printk("No pci das08 cards found\n");
+ printk(KERN_ERR "No pci das08 cards found\n");
return -EIO;
}
devpriv->pdev = pdev;
/* enable PCI device and reserve I/O spaces */
if (comedi_pci_enable(pdev, DRV_NAME)) {
- printk
- (" Error enabling PCI device and requesting regions\n");
+ printk(KERN_ERR " Error enabling PCI device and "
+ "requesting regions\n");
return -EIO;
}
/* read base addresses */
pci_iobase = pci_resource_start(pdev, 1);
iobase = pci_resource_start(pdev, 2);
- printk("pcibase 0x%lx iobase 0x%lx\n", pci_iobase, iobase);
+ printk(KERN_INFO "pcibase 0x%lx iobase 0x%lx\n",
+ pci_iobase, iobase);
devpriv->pci_iobase = pci_iobase;
#if 0
/* We could enable to pci-das08's interrupt here to make it possible
@@ -1034,17 +1042,18 @@ static int das08_attach(struct comedi_device *dev, struct comedi_devconfig *it)
outw(INTR1_ENABLE | PCI_INTR_ENABLE, pci_iobase + INTCSR);
#endif
#else /* CONFIG_COMEDI_PCI */
- printk("this driver has not been built with PCI support.\n");
+ printk(KERN_ERR "this driver has not been built with PCI support.\n");
return -EINVAL;
#endif /* CONFIG_COMEDI_PCI */
} else {
iobase = it->options[0];
}
- printk("\n");
+ printk(KERN_INFO "\n");
return das08_common_attach(dev, iobase);
}
+
int das08_common_detach(struct comedi_device *dev)
{
printk(KERN_INFO "comedi%d: das08: remove\n", dev->minor);
@@ -1060,9 +1069,9 @@ int das08_common_detach(struct comedi_device *dev)
#ifdef CONFIG_COMEDI_PCI
if (devpriv) {
if (devpriv->pdev) {
- if (devpriv->pci_iobase) {
+ if (devpriv->pci_iobase)
comedi_pci_disable(devpriv->pdev);
- }
+
pci_dev_put(devpriv->pdev);
}
}
@@ -1070,6 +1079,7 @@ int das08_common_detach(struct comedi_device *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(das08_common_detach);
#ifdef CONFIG_COMEDI_PCI
COMEDI_PCI_INITCLEANUP(driver_das08, das08_pci_table);
@@ -1077,8 +1087,6 @@ COMEDI_PCI_INITCLEANUP(driver_das08, das08_pci_table);
COMEDI_INITCLEANUP(driver_das08);
#endif
-EXPORT_SYMBOL_GPL(das08_common_attach);
-EXPORT_SYMBOL_GPL(das08_common_detach);
#ifdef CONFIG_COMEDI_PCMCIA
EXPORT_SYMBOL_GPL(das08_cs_boards);
#endif
diff --git a/drivers/staging/comedi/drivers/das08.h b/drivers/staging/comedi/drivers/das08.h
index 35d2660cf93..2a30d764ddf 100644
--- a/drivers/staging/comedi/drivers/das08.h
+++ b/drivers/staging/comedi/drivers/das08.h
@@ -62,7 +62,7 @@ struct i8254_struct {
#define I8254_CTRL 3
struct das08_private_struct {
- unsigned int do_mux_bits; /* bits for do/mux register on boards without seperate do register */
+ unsigned int do_mux_bits; /* bits for do/mux register on boards without separate do register */
unsigned int do_bits; /* bits for do register on boards with register dedicated to digital out only */
const unsigned int *pg_gainlist;
struct pci_dev *pdev; /* struct for pci-das08 */
diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c
index 9164ce158dc..8761a6d285d 100644
--- a/drivers/staging/comedi/drivers/das08_cs.c
+++ b/drivers/staging/comedi/drivers/das08_cs.c
@@ -142,7 +142,6 @@ static const dev_info_t dev_info = "pcm-das08";
struct local_info_t {
struct pcmcia_device *link;
- dev_node_t node;
int stop;
struct bus_operations *bus;
};
@@ -172,10 +171,6 @@ static int das08_pcmcia_attach(struct pcmcia_device *link)
local->link = link;
link->priv = local;
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
- link->irq.Handler = NULL;
-
/*
General socket configuration defaults can go here. In this
client, we assume very little, and rely on the CIS for almost
@@ -207,10 +202,8 @@ static void das08_pcmcia_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "das08_pcmcia_detach\n");
- if (link->dev_node) {
- ((struct local_info_t *)link->priv)->stop = 1;
- das08_pcmcia_release(link);
- }
+ ((struct local_info_t *)link->priv)->stop = 1;
+ das08_pcmcia_release(link);
/* This points to the parent struct local_info_t struct */
if (link->priv)
@@ -229,8 +222,7 @@ static int das08_pcmcia_config_loop(struct pcmcia_device *p_dev,
return -ENODEV;
/* Do we need to allocate an interrupt? */
- if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
+ p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -266,7 +258,6 @@ static int das08_pcmcia_config_loop(struct pcmcia_device *p_dev,
static void das08_pcmcia_config(struct pcmcia_device *link)
{
- struct local_info_t *dev = link->priv;
int ret;
dev_dbg(&link->dev, "das08_pcmcia_config\n");
@@ -277,11 +268,8 @@ static void das08_pcmcia_config(struct pcmcia_device *link)
goto failed;
}
- if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
- goto failed;
- }
+ if (!link->irq)
+ goto failed;
/*
This actually configures the PCMCIA socket -- setting up
@@ -292,19 +280,10 @@ static void das08_pcmcia_config(struct pcmcia_device *link)
if (ret)
goto failed;
- /*
- At this point, the dev_node_t structure(s) need to be
- initialized and arranged in a linked list at link->dev.
- */
- sprintf(dev->node.dev_name, "pcm-das08");
- dev->node.major = dev->node.minor = 0;
- link->dev_node = &dev->node;
-
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x",
- dev->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %u", link->irq.AssignedIRQ);
+ printk(", irq %u", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1 + link->io.NumPorts1 - 1);
@@ -371,6 +350,10 @@ static struct pcmcia_device_id das08_cs_id_table[] = {
};
MODULE_DEVICE_TABLE(pcmcia, das08_cs_id_table);
+MODULE_AUTHOR("David A. Schleef <ds@schleef.org>, "
+ "Frank Mori Hess <fmhess@users.sourceforge.net>");
+MODULE_DESCRIPTION("Comedi driver for ComputerBoards DAS-08 PCMCIA boards");
+MODULE_LICENSE("GPL");
struct pcmcia_driver das08_cs_driver = {
.probe = das08_pcmcia_attach,
@@ -413,6 +396,5 @@ static void __exit das08_cs_exit_module(void)
comedi_driver_unregister(&driver_das08_cs);
}
-MODULE_LICENSE("GPL");
module_init(das08_cs_init_module);
module_exit(das08_cs_exit_module);
diff --git a/drivers/staging/comedi/drivers/das16.c b/drivers/staging/comedi/drivers/das16.c
index f2aadda9b24..ccee4f1802d 100644
--- a/drivers/staging/comedi/drivers/das16.c
+++ b/drivers/staging/comedi/drivers/das16.c
@@ -74,7 +74,8 @@ Keithley Manuals:
4922.PDF (das-1400)
4923.PDF (das1200, 1400, 1600)
-Computer boards manuals also available from their website www.measurementcomputing.com
+Computer boards manuals also available from their website
+www.measurementcomputing.com
*/
@@ -92,7 +93,8 @@ Computer boards manuals also available from their website www.measurementcomputi
/* #define DEBUG */
#ifdef DEBUG
-#define DEBUG_PRINT(format, args...) printk("das16: " format, ## args)
+#define DEBUG_PRINT(format, args...) \
+ printk(KERN_DEBUG "das16: " format, ## args)
#else
#define DEBUG_PRINT(format, args...)
#endif
@@ -186,15 +188,16 @@ Computer boards manuals also available from their website www.measurementcomputi
*/
-static const int sample_size = 2; /* size in bytes of a sample from board */
+/* size in bytes of a sample from board */
+static const int sample_size = 2;
#define DAS16_TRIG 0
#define DAS16_AI_LSB 0
#define DAS16_AI_MSB 1
#define DAS16_MUX 2
#define DAS16_DIO 3
-#define DAS16_AO_LSB(x) ((x)?6:4)
-#define DAS16_AO_MSB(x) ((x)?7:5)
+#define DAS16_AO_LSB(x) ((x) ? 6 : 4)
+#define DAS16_AO_MSB(x) ((x) ? 7 : 5)
#define DAS16_STATUS 8
#define BUSY (1<<7)
#define UNIPOLAR (1<<6)
@@ -271,7 +274,7 @@ static const struct comedi_lrange range_das1x02_unip = { 4, {
};
static const struct comedi_lrange range_das16jr = { 9, {
- /* also used by 16/330 */
+ /* also used by 16/330 */
BIP_RANGE(10),
BIP_RANGE(5),
BIP_RANGE(2.5),
@@ -547,7 +550,8 @@ static const struct das16_board das16_boards[] = {
.id = 0x20,
},
{
- .name = "das-1401", /* 4919.pdf and 4922.pdf (keithley user's manual) */
+ /* 4919.pdf and 4922.pdf (keithley user's manual) */
+ .name = "das-1401",
.ai = das16_ai_rinsn,
.ai_nbits = 12,
.ai_speed = 10000,
@@ -558,10 +562,11 @@ static const struct das16_board das16_boards[] = {
.i8255_offset = 0x0,
.i8254_offset = 0x0c,
.size = 0x408,
- .id = 0xc0 /* 4919.pdf says id bits are 0xe0, 4922.pdf says 0xc0 */
+ .id = 0xc0 /* 4919.pdf says id bits are 0xe0, 4922.pdf says 0xc0 */
},
{
- .name = "das-1402", /* 4919.pdf and 4922.pdf (keithley user's manual) */
+ /* 4919.pdf and 4922.pdf (keithley user's manual) */
+ .name = "das-1402",
.ai = das16_ai_rinsn,
.ai_nbits = 12,
.ai_speed = 10000,
@@ -572,7 +577,7 @@ static const struct das16_board das16_boards[] = {
.i8255_offset = 0x0,
.i8254_offset = 0x0c,
.size = 0x408,
- .id = 0xc0 /* 4919.pdf says id bits are 0xe0, 4922.pdf says 0xc0 */
+ .id = 0xc0 /* 4919.pdf says id bits are 0xe0, 4922.pdf says 0xc0 */
},
{
.name = "das-1601", /* 4919.pdf */
@@ -704,7 +709,8 @@ static const struct das16_board das16_boards[] = {
.name = "das16/jr/ctr5", /* ? */
},
{
- .name = "cio-das16/m1/16", /* cio-das16_m1_16.pdf, this board is a bit quirky, no dma */
+ /* cio-das16_m1_16.pdf, this board is a bit quirky, no dma */
+ .name = "cio-das16/m1/16",
},
#endif
};
@@ -736,14 +742,19 @@ struct das16_private_struct {
unsigned int clockbase; /* master clock speed in ns */
volatile unsigned int control_state; /* dma, interrupt and trigger control bits */
volatile unsigned long adc_byte_count; /* number of bytes remaining */
- unsigned int divisor1; /* divisor dividing master clock to get conversion frequency */
- unsigned int divisor2; /* divisor dividing master clock to get conversion frequency */
+ /* divisor dividing master clock to get conversion frequency */
+ unsigned int divisor1;
+ /* divisor dividing master clock to get conversion frequency */
+ unsigned int divisor2;
unsigned int dma_chan; /* dma channel */
uint16_t *dma_buffer[2];
dma_addr_t dma_buffer_addr[2];
unsigned int current_buffer;
volatile unsigned int dma_transfer_size; /* target number of bytes to transfer per dma shot */
- /* user-defined analog input and output ranges defined from config options */
+ /**
+ * user-defined analog input and output ranges
+ * defined from config options
+ */
struct comedi_lrange *user_ai_range_table;
struct comedi_lrange *user_ao_range_table;
@@ -798,7 +809,10 @@ static int das16_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s,
if (err)
return 1;
- /* step 2: make sure trigger sources are unique and mutually compatible */
+ /**
+ * step 2: make sure trigger sources are unique and
+ * mutually compatible
+ */
if (cmd->scan_begin_src != TRIG_TIMER &&
cmd->scan_begin_src != TRIG_EXT &&
cmd->scan_begin_src != TRIG_FOLLOW)
@@ -893,12 +907,15 @@ static int das16_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s,
if (CR_CHAN(cmd->chanlist[i]) !=
(start_chan + i) % s->n_chan) {
comedi_error(dev,
- "entries in chanlist must be consecutive channels, counting upwards\n");
+ "entries in chanlist must be "
+ "consecutive channels, "
+ "counting upwards\n");
err++;
}
if (CR_RANGE(cmd->chanlist[i]) != gain) {
comedi_error(dev,
- "entries in chanlist must all have the same gain\n");
+ "entries in chanlist must all "
+ "have the same gain\n");
err++;
}
}
@@ -920,12 +937,13 @@ static int das16_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s)
if (devpriv->dma_chan == 0 || (dev->irq == 0
&& devpriv->timer_mode == 0)) {
comedi_error(dev,
- "irq (or use of 'timer mode') dma required to execute comedi_cmd");
+ "irq (or use of 'timer mode') dma required to "
+ "execute comedi_cmd");
return -1;
}
if (cmd->flags & TRIG_RT) {
- comedi_error(dev,
- "isa dma transfers cannot be performed with TRIG_RT, aborting");
+ comedi_error(dev, "isa dma transfers cannot be performed with "
+ "TRIG_RT, aborting");
return -1;
}
@@ -933,16 +951,17 @@ static int das16_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s)
cmd->stop_arg * cmd->chanlist_len * sizeof(uint16_t);
/* disable conversions for das1600 mode */
- if (thisboard->size > 0x400) {
+ if (thisboard->size > 0x400)
outb(DAS1600_CONV_DISABLE, dev->iobase + DAS1600_CONV);
- }
+
/* set scan limits */
byte = CR_CHAN(cmd->chanlist[0]);
byte |= CR_CHAN(cmd->chanlist[cmd->chanlist_len - 1]) << 4;
outb(byte, dev->iobase + DAS16_MUX);
/* set gain (this is also burst rate register but according to
- * computer boards manual, burst rate does nothing, even on keithley cards) */
+ * computer boards manual, burst rate does nothing, even on
+ * keithley cards) */
if (thisboard->ai_pg != das16_pg_none) {
range = CR_RANGE(cmd->chanlist[0]);
outb((das16_gainlists[thisboard->ai_pg])[range],
@@ -1005,9 +1024,9 @@ static int das16_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s)
outb(devpriv->control_state, dev->iobase + DAS16_CONTROL);
/* Enable conversions if using das1600 mode */
- if (thisboard->size > 0x400) {
+ if (thisboard->size > 0x400)
outb(0, dev->iobase + DAS1600_CONV);
- }
+
return 0;
}
@@ -1030,9 +1049,9 @@ static int das16_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
}
/* disable burst mode */
- if (thisboard->size > 0x400) {
+ if (thisboard->size > 0x400)
outb(0, dev->iobase + DAS1600_BURST);
- }
+
spin_unlock_irqrestore(&dev->spinlock, flags);
@@ -1085,11 +1104,11 @@ static int das16_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
}
msb = inb(dev->iobase + DAS16_AI_MSB);
lsb = inb(dev->iobase + DAS16_AI_LSB);
- if (thisboard->ai_nbits == 12) {
+ if (thisboard->ai_nbits == 12)
data[n] = ((lsb >> 4) & 0xf) | (msb << 4);
- } else {
+ else
data[n] = lsb | (msb << 8);
- }
+
}
return n;
@@ -1207,8 +1226,8 @@ static int disable_dma_on_even(struct comedi_device *dev)
residue = get_dma_residue(devpriv->dma_chan);
}
if (i == disable_limit) {
- comedi_error(dev,
- "failed to get an even dma transfer, could be trouble.");
+ comedi_error(dev, "failed to get an even dma transfer, "
+ "could be trouble.");
}
return residue;
}
@@ -1254,7 +1273,8 @@ static void das16_interrupt(struct comedi_device *dev)
} else
num_bytes = devpriv->dma_transfer_size - residue;
- if (cmd->stop_src == TRIG_COUNT && num_bytes >= devpriv->adc_byte_count) {
+ if (cmd->stop_src == TRIG_COUNT &&
+ num_bytes >= devpriv->adc_byte_count) {
num_bytes = devpriv->adc_byte_count;
async->events |= COMEDI_CB_EOA;
}
@@ -1275,9 +1295,9 @@ static void das16_interrupt(struct comedi_device *dev)
set_dma_count(devpriv->dma_chan, devpriv->dma_transfer_size);
enable_dma(devpriv->dma_chan);
/* reenable conversions for das1600 mode, (stupid hardware) */
- if (thisboard->size > 0x400 && devpriv->timer_mode == 0) {
+ if (thisboard->size > 0x400 && devpriv->timer_mode == 0)
outb(0x00, dev->iobase + DAS1600_CONV);
- }
+
}
release_dma_lock(dma_flags);
@@ -1330,25 +1350,25 @@ static int das16_probe(struct comedi_device *dev, struct comedi_devconfig *it)
status = inb(dev->iobase + DAS16_STATUS);
- if ((status & UNIPOLAR)) {
+ if ((status & UNIPOLAR))
devpriv->ai_unipolar = 1;
- } else {
+ else
devpriv->ai_unipolar = 0;
- }
- if ((status & DAS16_MUXBIT)) {
+
+ if ((status & DAS16_MUXBIT))
devpriv->ai_singleended = 1;
- } else {
+ else
devpriv->ai_singleended = 0;
- }
+
/* diobits indicates boards */
diobits = inb(dev->iobase + DAS16_DIO) & 0xf0;
- printk(" id bits are 0x%02x\n", diobits);
+ printk(KERN_INFO " id bits are 0x%02x\n", diobits);
if (thisboard->id != diobits) {
- printk(" requested board's id bits are 0x%x (ignore)\n",
+ printk(KERN_INFO " requested board's id bits are 0x%x (ignore)\n",
thisboard->id);
}
@@ -1363,10 +1383,10 @@ static int das1600_mode_detect(struct comedi_device *dev)
if (status & DAS1600_CLK_10MHZ) {
devpriv->clockbase = 100;
- printk(" 10MHz pacer clock\n");
+ printk(KERN_INFO " 10MHz pacer clock\n");
} else {
devpriv->clockbase = 1000;
- printk(" 1MHz pacer clock\n");
+ printk(KERN_INFO " 1MHz pacer clock\n");
}
reg_dump(dev);
@@ -1406,14 +1426,15 @@ static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (timer_mode)
irq = 0;
- printk("comedi%d: das16:", dev->minor);
+ printk(KERN_INFO "comedi%d: das16:", dev->minor);
/* check that clock setting is valid */
if (it->options[3]) {
if (it->options[3] != 0 &&
it->options[3] != 1 && it->options[3] != 10) {
printk
- ("\n Invalid option. Master clock must be set to 1 or 10 (MHz)\n");
+ ("\n Invalid option. Master clock must be set "
+ "to 1 or 10 (MHz)\n");
return -EINVAL;
}
}
@@ -1425,23 +1446,23 @@ static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (thisboard->size < 0x400) {
printk(" 0x%04lx-0x%04lx\n", iobase, iobase + thisboard->size);
if (!request_region(iobase, thisboard->size, "das16")) {
- printk(" I/O port conflict\n");
+ printk(KERN_ERR " I/O port conflict\n");
return -EIO;
}
} else {
- printk(" 0x%04lx-0x%04lx 0x%04lx-0x%04lx\n",
+ printk(KERN_INFO " 0x%04lx-0x%04lx 0x%04lx-0x%04lx\n",
iobase, iobase + 0x0f,
iobase + 0x400,
iobase + 0x400 + (thisboard->size & 0x3ff));
if (!request_region(iobase, 0x10, "das16")) {
- printk(" I/O port conflict: 0x%04lx-0x%04lx\n",
+ printk(KERN_ERR " I/O port conflict: 0x%04lx-0x%04lx\n",
iobase, iobase + 0x0f);
return -EIO;
}
if (!request_region(iobase + 0x400, thisboard->size & 0x3ff,
"das16")) {
release_region(iobase, 0x10);
- printk(" I/O port conflict: 0x%04lx-0x%04lx\n",
+ printk(KERN_ERR " I/O port conflict: 0x%04lx-0x%04lx\n",
iobase + 0x400,
iobase + 0x400 + (thisboard->size & 0x3ff));
return -EIO;
@@ -1452,7 +1473,7 @@ static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* probe id bits to make sure they are consistent */
if (das16_probe(dev, it)) {
- printk(" id bits do not match selected board, aborting\n");
+ printk(KERN_ERR " id bits do not match selected board, aborting\n");
return -EINVAL;
}
dev->board_name = thisboard->name;
@@ -1474,7 +1495,7 @@ static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret < 0)
return ret;
dev->irq = irq;
- printk(" ( irq = %u )", irq);
+ printk(KERN_INFO " ( irq = %u )", irq);
} else if (irq == 0) {
printk(" ( no irq )");
} else {
@@ -1488,16 +1509,15 @@ static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* allocate dma buffers */
int i;
for (i = 0; i < 2; i++) {
- devpriv->dma_buffer[i] = pci_alloc_consistent(NULL,
- DAS16_DMA_SIZE,
- &devpriv->
- dma_buffer_addr
- [i]);
+ devpriv->dma_buffer[i] = pci_alloc_consistent(
+ NULL, DAS16_DMA_SIZE,
+ &devpriv->dma_buffer_addr[i]);
+
if (devpriv->dma_buffer[i] == NULL)
return -ENOMEM;
}
if (request_dma(dma_chan, "das16")) {
- printk(" failed to allocate dma channel %i\n",
+ printk(KERN_ERR " failed to allocate dma channel %i\n",
dma_chan);
return -EINVAL;
}
@@ -1506,11 +1526,11 @@ static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it)
disable_dma(devpriv->dma_chan);
set_dma_mode(devpriv->dma_chan, DMA_MODE_READ);
release_dma_lock(flags);
- printk(" ( dma = %u)\n", dma_chan);
+ printk(KERN_INFO " ( dma = %u)\n", dma_chan);
} else if (dma_chan == 0) {
- printk(" ( no dma )\n");
+ printk(KERN_INFO " ( no dma )\n");
} else {
- printk(" invalid dma channel\n");
+ printk(KERN_ERR " invalid dma channel\n");
return -EINVAL;
}
@@ -1569,7 +1589,7 @@ static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->subdev_flags |= SDF_DIFF;
}
s->maxdata = (1 << thisboard->ai_nbits) - 1;
- if (devpriv->user_ai_range_table) { /* user defined ai range */
+ if (devpriv->user_ai_range_table) { /* user defined ai range */
s->range_table = devpriv->user_ai_range_table;
} else if (devpriv->ai_unipolar) {
s->range_table = das16_ai_uni_lranges[thisboard->ai_pg];
@@ -1592,11 +1612,12 @@ static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 2;
s->maxdata = (1 << thisboard->ao_nbits) - 1;
- if (devpriv->user_ao_range_table) { /* user defined ao range */
+ /* user defined ao range */
+ if (devpriv->user_ao_range_table)
s->range_table = devpriv->user_ao_range_table;
- } else {
+ else
s->range_table = &range_unknown;
- }
+
s->insn_write = thisboard->ao;
} else {
s->type = COMEDI_SUBD_UNUSED;
@@ -1656,7 +1677,7 @@ static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it)
static int das16_detach(struct comedi_device *dev)
{
- printk("comedi%d: das16: remove\n", dev->minor);
+ printk(KERN_INFO "comedi%d: das16: remove\n", dev->minor);
das16_reset(dev);
@@ -1750,8 +1771,8 @@ static void das16_ai_munge(struct comedi_device *dev,
for (i = 0; i < num_samples; i++) {
data[i] = le16_to_cpu(data[i]);
- if (thisboard->ai_nbits == 12) {
+ if (thisboard->ai_nbits == 12)
data[i] = (data[i] >> 4) & 0xfff;
- }
+
}
}
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
index 3c3e0455c7c..de5e82fec87 100644
--- a/drivers/staging/comedi/drivers/das1800.c
+++ b/drivers/staging/comedi/drivers/das1800.c
@@ -797,10 +797,8 @@ static int das1800_detach(struct comedi_device *dev)
free_dma(devpriv->dma0);
if (devpriv->dma1)
free_dma(devpriv->dma1);
- if (devpriv->ai_buf0)
- kfree(devpriv->ai_buf0);
- if (devpriv->ai_buf1)
- kfree(devpriv->ai_buf1);
+ kfree(devpriv->ai_buf0);
+ kfree(devpriv->ai_buf1);
}
printk("comedi%d: %s: remove\n", dev->minor,
@@ -1639,7 +1637,8 @@ static int das1800_ai_rinsn(struct comedi_device *dev,
}
if (i == timeout) {
comedi_error(dev, "timeout");
- return -ETIME;
+ n = -ETIME;
+ goto exit;
}
dpnt = inw(dev->iobase + DAS1800_FIFO);
/* shift data to offset binary for bipolar ranges */
@@ -1647,6 +1646,7 @@ static int das1800_ai_rinsn(struct comedi_device *dev,
dpnt += 1 << (thisboard->resolution - 1);
data[n] = dpnt;
}
+exit:
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
return n;
diff --git a/drivers/staging/comedi/drivers/dt2801.c b/drivers/staging/comedi/drivers/dt2801.c
index 3f365aee482..83fb6e56c3e 100644
--- a/drivers/staging/comedi/drivers/dt2801.c
+++ b/drivers/staging/comedi/drivers/dt2801.c
@@ -472,7 +472,7 @@ static const struct comedi_lrange *dac_range_table[] = {
static const struct comedi_lrange *dac_range_lkup(int opt)
{
- if (opt < 0 || opt > 5)
+ if (opt < 0 || opt >= 5)
return &range_unknown;
return dac_range_table[opt];
}
diff --git a/drivers/staging/comedi/drivers/dt2811.c b/drivers/staging/comedi/drivers/dt2811.c
index 51ef695698a..ea9bfb7fd88 100644
--- a/drivers/staging/comedi/drivers/dt2811.c
+++ b/drivers/staging/comedi/drivers/dt2811.c
@@ -34,13 +34,13 @@ Configuration options:
[0] - I/O port base address
[1] - IRQ, although this is currently unused
[2] - A/D reference
- 0 = signle-ended
- 1 = differential
+ 0 = signle-ended
+ 1 = differential
2 = pseudo-differential (common reference)
[3] - A/D range
- 0 = [-5,5]
- 1 = [-2.5,2.5]
- 2 = [0,5]
+ 0 = [-5, 5]
+ 1 = [-2.5, 2.5]
+ 2 = [0, 5]
[4] - D/A 0 range (same choices)
[4] - D/A 1 range (same choices)
*/
@@ -52,96 +52,58 @@ Configuration options:
static const char *driver_name = "dt2811";
-static const struct comedi_lrange range_dt2811_pgh_ai_5_unipolar = { 4, {
- RANGE
- (0, 5),
- RANGE
- (0,
- 2.5),
- RANGE
- (0,
- 1.25),
- RANGE
- (0,
- 0.625)
- }
+static const struct comedi_lrange range_dt2811_pgh_ai_5_unipolar = {
+ 4, {
+ RANGE(0, 5),
+ RANGE(0, 2.5),
+ RANGE(0, 1.25),
+ RANGE(0, 0.625)
+ }
};
-static const struct comedi_lrange range_dt2811_pgh_ai_2_5_bipolar = { 4, {
- RANGE
- (-2.5,
- 2.5),
- RANGE
- (-1.25,
- 1.25),
- RANGE
- (-0.625,
- 0.625),
- RANGE
- (-0.3125,
- 0.3125)
- }
+static const struct comedi_lrange range_dt2811_pgh_ai_2_5_bipolar = {
+ 4, {
+ RANGE(-2.5, 2.5),
+ RANGE(-1.25, 1.25),
+ RANGE(-0.625, 0.625),
+ RANGE(-0.3125, 0.3125)
+ }
};
-static const struct comedi_lrange range_dt2811_pgh_ai_5_bipolar = { 4, {
- RANGE
- (-5, 5),
- RANGE
- (-2.5,
- 2.5),
- RANGE
- (-1.25,
- 1.25),
- RANGE
- (-0.625,
- 0.625)
- }
+static const struct comedi_lrange range_dt2811_pgh_ai_5_bipolar = {
+ 4, {
+ RANGE(-5, 5),
+ RANGE(-2.5, 2.5),
+ RANGE(-1.25, 1.25),
+ RANGE(-0.625, 0.625)
+ }
};
-static const struct comedi_lrange range_dt2811_pgl_ai_5_unipolar = { 4, {
- RANGE
- (0, 5),
- RANGE
- (0,
- 0.5),
- RANGE
- (0,
- 0.05),
- RANGE
- (0,
- 0.01)
- }
+static const struct comedi_lrange range_dt2811_pgl_ai_5_unipolar = {
+ 4, {
+ RANGE(0, 5),
+ RANGE(0, 0.5),
+ RANGE(0, 0.05),
+ RANGE(0, 0.01)
+ }
};
-static const struct comedi_lrange range_dt2811_pgl_ai_2_5_bipolar = { 4, {
- RANGE
- (-2.5,
- 2.5),
- RANGE
- (-0.25,
- 0.25),
- RANGE
- (-0.025,
- 0.025),
- RANGE
- (-0.005,
- 0.005)
- }
+static const struct comedi_lrange range_dt2811_pgl_ai_2_5_bipolar = {
+ 4, {
+ RANGE(-2.5, 2.5),
+ RANGE(-0.25, 0.25),
+ RANGE(-0.025, 0.025),
+ RANGE(-0.005, 0.005)
+ }
};
-static const struct comedi_lrange range_dt2811_pgl_ai_5_bipolar = { 4, {
- RANGE
- (-5, 5),
- RANGE
- (-0.5,
- 0.5),
- RANGE
- (-0.05,
- 0.05),
- RANGE
- (-0.01,
- 0.01)
- }
+static const struct comedi_lrange range_dt2811_pgl_ai_5_bipolar = {
+ 4, {
+ RANGE(-5, 5),
+ RANGE(-0.5, 0.5),
+ RANGE(-0.05, 0.05),
+ RANGE(-0.01, 0.01)
+ }
};
/*
@@ -348,21 +310,21 @@ static irqreturn_t dt2811_interrupt(int irq, void *d)
options[0] Board base address
options[1] IRQ
options[2] Input configuration
- 0 == single-ended
- 1 == differential
- 2 == pseudo-differential
+ 0 == single-ended
+ 1 == differential
+ 2 == pseudo-differential
options[3] Analog input range configuration
- 0 == bipolar 5 (-5V -- +5V)
- 1 == bipolar 2.5V (-2.5V -- +2.5V)
- 2 == unipolar 5V (0V -- +5V)
+ 0 == bipolar 5 (-5V -- +5V)
+ 1 == bipolar 2.5V (-2.5V -- +2.5V)
+ 2 == unipolar 5V (0V -- +5V)
options[4] Analog output 0 range configuration
- 0 == bipolar 5 (-5V -- +5V)
- 1 == bipolar 2.5V (-2.5V -- +2.5V)
- 2 == unipolar 5V (0V -- +5V)
+ 0 == bipolar 5 (-5V -- +5V)
+ 1 == bipolar 2.5V (-2.5V -- +2.5V)
+ 2 == unipolar 5V (0V -- +5V)
options[5] Analog output 1 range configuration
- 0 == bipolar 5 (-5V -- +5V)
- 1 == bipolar 2.5V (-2.5V -- +2.5V)
- 2 == unipolar 5V (0V -- +5V)
+ 0 == bipolar 5 (-5V -- +5V)
+ 1 == bipolar 2.5V (-2.5V -- +2.5V)
+ 2 == unipolar 5V (0V -- +5V)
*/
static int dt2811_attach(struct comedi_device *dev, struct comedi_devconfig *it)
@@ -377,10 +339,10 @@ static int dt2811_attach(struct comedi_device *dev, struct comedi_devconfig *it)
iobase = it->options[0];
- printk("comedi%d: dt2811: base=0x%04lx\n", dev->minor, iobase);
+ printk(KERN_INFO "comedi%d: dt2811:base=0x%04lx\n", dev->minor, iobase);
if (!request_region(iobase, DT2811_SIZE, driver_name)) {
- printk("I/O port conflict\n");
+ printk(KERN_ERR "I/O port conflict\n");
return -EIO;
}
@@ -410,25 +372,25 @@ static int dt2811_attach(struct comedi_device *dev, struct comedi_devconfig *it)
irq = probe_irq_off(irqs);
restore_flags(flags);
- /*outb(DT2811_CLRERROR|DT2811_INTENB,dev->iobase+DT2811_ADCSR); */
+ /*outb(DT2811_CLRERROR|DT2811_INTENB,
+ dev->iobase+DT2811_ADCSR);*/
- if (inb(dev->iobase + DT2811_ADCSR) & DT2811_ADERROR) {
- printk("error probing irq (bad) \n");
- }
+ if (inb(dev->iobase + DT2811_ADCSR) & DT2811_ADERROR)
+ printk(KERN_ERR "error probing irq (bad)\n");
dev->irq = 0;
if (irq > 0) {
i = inb(dev->iobase + DT2811_ADDATLO);
i = inb(dev->iobase + DT2811_ADDATHI);
- printk("(irq = %d)\n", irq);
+ printk(KERN_INFO "(irq = %d)\n", irq);
ret = request_irq(irq, dt2811_interrupt, 0,
driver_name, dev);
if (ret < 0)
return -EIO;
dev->irq = irq;
} else if (irq == 0) {
- printk("(no irq)\n");
+ printk(KERN_INFO "(no irq)\n");
} else {
- printk("( multiple irq's -- this is bad! )\n");
+ printk(KERN_ERR "( multiple irq's -- this is bad! )\n");
}
}
#endif
@@ -540,14 +502,12 @@ static int dt2811_attach(struct comedi_device *dev, struct comedi_devconfig *it)
static int dt2811_detach(struct comedi_device *dev)
{
- printk("comedi%d: dt2811: remove\n", dev->minor);
+ printk(KERN_INFO "comedi%d: dt2811: remove\n", dev->minor);
- if (dev->irq) {
+ if (dev->irq)
free_irq(dev->irq, dev);
- }
- if (dev->iobase) {
+ if (dev->iobase)
release_region(dev->iobase, DT2811_SIZE);
- }
return 0;
}
@@ -579,7 +539,7 @@ static int dt2811_ai_insn(struct comedi_device *dev, struct comedi_subdevice *s,
#if 0
/* Wow. This is code from the Comedi stone age. But it hasn't been
* replaced, so I'll let it stay. */
-int dt2811_adtrig(kdev_t minor, comedi_adtrig * adtrig)
+int dt2811_adtrig(kdev_t minor, comedi_adtrig *adtrig)
{
struct comedi_device *dev = comedi_devices + minor;
@@ -589,8 +549,10 @@ int dt2811_adtrig(kdev_t minor, comedi_adtrig * adtrig)
switch (dev->i_admode) {
case COMEDI_MDEMAND:
dev->ntrig = adtrig->n - 1;
+ /* not neccessary */
/*printk("dt2811: AD soft trigger\n"); */
- /*outb(DT2811_CLRERROR|DT2811_INTENB,dev->iobase+DT2811_ADCSR); *//* not neccessary */
+ /*outb(DT2811_CLRERROR|DT2811_INTENB,
+ dev->iobase+DT2811_ADCSR); */
outb(dev->curadchan, dev->iobase + DT2811_ADGCR);
do_gettimeofday(&trigtime);
break;
@@ -630,9 +592,8 @@ static int dt2811_ao_insn_read(struct comedi_device *dev,
chan = CR_CHAN(insn->chanspec);
- for (i = 0; i < insn->n; i++) {
+ for (i = 0; i < insn->n; i++)
data[i] = devpriv->ao_readback[chan];
- }
return i;
}
diff --git a/drivers/staging/comedi/drivers/dt2814.c b/drivers/staging/comedi/drivers/dt2814.c
index e1b73752f60..16fde066d26 100644
--- a/drivers/staging/comedi/drivers/dt2814.c
+++ b/drivers/staging/comedi/drivers/dt2814.c
@@ -99,13 +99,13 @@ static int dt2814_ai_insn_read(struct comedi_device *dev,
outb(chan, dev->iobase + DT2814_CSR);
for (i = 0; i < DT2814_TIMEOUT; i++) {
status = inb(dev->iobase + DT2814_CSR);
- printk("dt2814: status: %02x\n", status);
+ printk(KERN_INFO "dt2814: status: %02x\n", status);
udelay(10);
if (status & DT2814_FINISH)
break;
}
if (i >= DT2814_TIMEOUT) {
- printk("dt2814: status: %02x\n", status);
+ printk(KERN_INFO "dt2814: status: %02x\n", status);
return -ETIMEDOUT;
}
@@ -173,7 +173,8 @@ static int dt2814_ai_cmdtest(struct comedi_device *dev,
if (err)
return 1;
- /* step 2: make sure trigger sources are unique and mutually compatible */
+ /* step 2: make sure trigger sources are
+ * unique and mutually compatible */
/* note that mutual compatibility is not an issue here */
if (cmd->stop_src != TRIG_TIMER && cmd->stop_src != TRIG_EXT)
@@ -256,9 +257,9 @@ static int dt2814_attach(struct comedi_device *dev, struct comedi_devconfig *it)
unsigned long iobase;
iobase = it->options[0];
- printk("comedi%d: dt2814: 0x%04lx ", dev->minor, iobase);
+ printk(KERN_INFO "comedi%d: dt2814: 0x%04lx ", dev->minor, iobase);
if (!request_region(iobase, DT2814_SIZE, "dt2814")) {
- printk("I/O port conflict\n");
+ printk(KERN_ERR "I/O port conflict\n");
return -EIO;
}
dev->iobase = iobase;
@@ -267,7 +268,7 @@ static int dt2814_attach(struct comedi_device *dev, struct comedi_devconfig *it)
outb(0, dev->iobase + DT2814_CSR);
udelay(100);
if (inb(dev->iobase + DT2814_CSR) & DT2814_ERR) {
- printk("reset error (fatal)\n");
+ printk(KERN_ERR "reset error (fatal)\n");
return -EIO;
}
i = inb(dev->iobase + DT2814_DATA);
@@ -286,9 +287,9 @@ static int dt2814_attach(struct comedi_device *dev, struct comedi_devconfig *it)
irq = probe_irq_off(irqs);
restore_flags(flags);
- if (inb(dev->iobase + DT2814_CSR) & DT2814_ERR) {
- printk("error probing irq (bad) \n");
- }
+ if (inb(dev->iobase + DT2814_CSR) & DT2814_ERR)
+ printk(KERN_DEBUG "error probing irq (bad)\n");
+
i = inb(dev->iobase + DT2814_DATA);
i = inb(dev->iobase + DT2814_DATA);
@@ -297,18 +298,18 @@ static int dt2814_attach(struct comedi_device *dev, struct comedi_devconfig *it)
dev->irq = 0;
if (irq > 0) {
if (request_irq(irq, dt2814_interrupt, 0, "dt2814", dev)) {
- printk("(irq %d unavailable)\n", irq);
+ printk(KERN_WARNING "(irq %d unavailable)\n", irq);
} else {
- printk("( irq = %d )\n", irq);
+ printk(KERN_INFO "( irq = %d )\n", irq);
dev->irq = irq;
}
} else if (irq == 0) {
- printk("(no irq)\n");
+ printk(KERN_WARNING "(no irq)\n");
} else {
#if 0
- printk("(probe returned multiple irqs--bad)\n");
+ printk(KERN_DEBUG "(probe returned multiple irqs--bad)\n");
#else
- printk("(irq probe not implemented)\n");
+ printk(KERN_WARNING "(irq probe not implemented)\n");
#endif
}
@@ -337,14 +338,13 @@ static int dt2814_attach(struct comedi_device *dev, struct comedi_devconfig *it)
static int dt2814_detach(struct comedi_device *dev)
{
- printk("comedi%d: dt2814: remove\n", dev->minor);
+ printk(KERN_INFO "comedi%d: dt2814: remove\n", dev->minor);
- if (dev->irq) {
+ if (dev->irq)
free_irq(dev->irq, dev);
- }
- if (dev->iobase) {
+
+ if (dev->iobase)
release_region(dev->iobase, DT2814_SIZE);
- }
return 0;
}
diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
index e548763cf2f..fd8728c8366 100644
--- a/drivers/staging/comedi/drivers/dt282x.c
+++ b/drivers/staging/comedi/drivers/dt282x.c
@@ -45,9 +45,9 @@ Configuration options:
[7] - AO 1 jumpered for 0=straight binary, 1=2's complement
[8] - AI jumpered for 0=[-10,10]V, 1=[0,10], 2=[-5,5], 3=[0,5]
[9] - AO 0 jumpered for 0=[-10,10]V, 1=[0,10], 2=[-5,5], 3=[0,5],
- 4=[-2.5,2.5]
+ 4=[-2.5,2.5]
[10]- A0 1 jumpered for 0=[-10,10]V, 1=[0,10], 2=[-5,5], 3=[0,5],
- 4=[-2.5,2.5]
+ 4=[-2.5,2.5]
Notes:
- AO commands might be broken.
@@ -155,79 +155,58 @@ Notes:
#define DT2821_XCLK 0x0002 /* (R/W) external clock enable */
#define DT2821_BDINIT 0x0001 /* (W) initialize board */
-static const struct comedi_lrange range_dt282x_ai_lo_bipolar = { 4, {
- RANGE(-10,
- 10),
- RANGE(-5,
- 5),
- RANGE(-2.5,
- 2.5),
- RANGE
- (-1.25,
- 1.25)
- }
+static const struct comedi_lrange range_dt282x_ai_lo_bipolar = {
+ 4, {
+ RANGE(-10, 10),
+ RANGE(-5, 5),
+ RANGE(-2.5, 2.5),
+ RANGE(-1.25, 1.25)
+ }
};
-static const struct comedi_lrange range_dt282x_ai_lo_unipolar = { 4, {
- RANGE(0,
- 10),
- RANGE(0,
- 5),
- RANGE(0,
- 2.5),
- RANGE(0,
- 1.25)
- }
+static const struct comedi_lrange range_dt282x_ai_lo_unipolar = {
+ 4, {
+ RANGE(0, 10),
+ RANGE(0, 5),
+ RANGE(0, 2.5),
+ RANGE(0, 1.25)
+ }
};
-static const struct comedi_lrange range_dt282x_ai_5_bipolar = { 4, {
- RANGE(-5,
- 5),
- RANGE(-2.5,
- 2.5),
- RANGE(-1.25,
- 1.25),
- RANGE
- (-0.625,
- 0.625),
- }
+static const struct comedi_lrange range_dt282x_ai_5_bipolar = {
+ 4, {
+ RANGE(-5, 5),
+ RANGE(-2.5, 2.5),
+ RANGE(-1.25, 1.25),
+ RANGE(-0.625, 0.625)
+ }
};
-static const struct comedi_lrange range_dt282x_ai_5_unipolar = { 4, {
- RANGE(0,
- 5),
- RANGE(0,
- 2.5),
- RANGE(0,
- 1.25),
- RANGE(0,
- 0.625),
- }
+static const struct comedi_lrange range_dt282x_ai_5_unipolar = {
+ 4, {
+ RANGE(0, 5),
+ RANGE(0, 2.5),
+ RANGE(0, 1.25),
+ RANGE(0, 0.625),
+ }
};
-static const struct comedi_lrange range_dt282x_ai_hi_bipolar = { 4, {
- RANGE(-10,
- 10),
- RANGE(-1,
- 1),
- RANGE(-0.1,
- 0.1),
- RANGE
- (-0.02,
- 0.02)
- }
+static const struct comedi_lrange range_dt282x_ai_hi_bipolar = {
+ 4, {
+ RANGE(-10, 10),
+ RANGE(-1, 1),
+ RANGE(-0.1, 0.1),
+ RANGE(-0.02, 0.02)
+ }
};
-static const struct comedi_lrange range_dt282x_ai_hi_unipolar = { 4, {
- RANGE(0,
- 10),
- RANGE(0,
- 1),
- RANGE(0,
- 0.1),
- RANGE(0,
- 0.02)
- }
+static const struct comedi_lrange range_dt282x_ai_hi_unipolar = {
+ 4, {
+ RANGE(0, 10),
+ RANGE(0, 1),
+ RANGE(0, 0.1),
+ RANGE(0, 0.02)
+ }
};
struct dt282x_board {
@@ -370,7 +349,7 @@ static const struct dt282x_board boardtypes[] = {
},
};
-#define n_boardtypes sizeof(boardtypes)/sizeof(struct dt282x_board)
+#define n_boardtypes (sizeof(boardtypes)/sizeof(struct dt282x_board))
#define this_board ((const struct dt282x_board *)dev->board_ptr)
struct dt282x_private {
@@ -411,21 +390,25 @@ struct dt282x_private {
#define update_adcsr(a) outw(devpriv->adcsr|(a), dev->iobase+DT2821_ADCSR)
#define mux_busy() (inw(dev->iobase+DT2821_ADCSR)&DT2821_MUXBUSY)
#define ad_done() (inw(dev->iobase+DT2821_ADCSR)&DT2821_ADDONE)
-#define update_supcsr(a) outw(devpriv->supcsr|(a), dev->iobase+DT2821_SUPCSR)
+#define update_supcsr(a) outw(devpriv->supcsr|(a), dev->iobase+DT2821_SUPCSR)
/*
* danger! macro abuse... a is the expression to wait on, and b is
* the statement(s) to execute if it doesn't happen.
*/
-#define wait_for(a, b) \
- do{ \
- int _i; \
- for (_i=0;_i<DT2821_TIMEOUT;_i++){ \
- if (a){_i=0;break;} \
- udelay(5); \
- } \
- if (_i){b} \
- }while (0)
+#define wait_for(a, b) \
+ do { \
+ int _i; \
+ for (_i = 0; _i < DT2821_TIMEOUT; _i++) { \
+ if (a) { \
+ _i = 0; \
+ break; \
+ } \
+ udelay(5); \
+ } \
+ if (_i) \
+ b \
+ } while (0)
static int dt282x_attach(struct comedi_device *dev,
struct comedi_devconfig *it);
@@ -462,18 +445,16 @@ static void dt282x_munge(struct comedi_device *dev, short *buf,
unsigned short sign = 1 << (boardtype.adbits - 1);
int n;
- if (devpriv->ad_2scomp) {
+ if (devpriv->ad_2scomp)
sign = 1 << (boardtype.adbits - 1);
- } else {
+ else
sign = 0;
- }
if (nbytes % 2)
comedi_error(dev, "bug! odd number of bytes from dma xfer");
n = nbytes / 2;
- for (i = 0; i < n; i++) {
+ for (i = 0; i < n; i++)
buf[i] = (buf[i] & mask) ^ sign;
- }
}
static void dt282x_ao_dma_interrupt(struct comedi_device *dev)
@@ -486,7 +467,7 @@ static void dt282x_ao_dma_interrupt(struct comedi_device *dev)
update_supcsr(DT2821_CLRDMADNE);
if (!s->async->prealloc_buf) {
- printk("async->data disappeared. dang!\n");
+ printk(KERN_ERR "async->data disappeared. dang!\n");
return;
}
@@ -499,7 +480,7 @@ static void dt282x_ao_dma_interrupt(struct comedi_device *dev)
size = cfc_read_array_from_buffer(s, ptr, devpriv->dma_maxsize);
if (size == 0) {
- printk("dt282x: AO underrun\n");
+ printk(KERN_ERR "dt282x: AO underrun\n");
dt282x_ao_cancel(dev, s);
s->async->events |= COMEDI_CB_OVERFLOW;
return;
@@ -519,7 +500,7 @@ static void dt282x_ai_dma_interrupt(struct comedi_device *dev)
update_supcsr(DT2821_CLRDMADNE);
if (!s->async->prealloc_buf) {
- printk("async->data disappeared. dang!\n");
+ printk(KERN_ERR "async->data disappeared. dang!\n");
return;
}
@@ -540,7 +521,7 @@ static void dt282x_ai_dma_interrupt(struct comedi_device *dev)
devpriv->nread -= size / 2;
if (devpriv->nread < 0) {
- printk("dt282x: off by one\n");
+ printk(KERN_INFO "dt282x: off by one\n");
devpriv->nread = 0;
}
if (!devpriv->nread) {
@@ -651,7 +632,7 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
static int warn = 5;
if (--warn <= 0) {
disable_irq(dev->irq);
- printk("disabling irq\n");
+ printk(KERN_INFO "disabling irq\n");
}
#endif
comedi_error(dev, "D/A error");
@@ -666,13 +647,13 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
data = (short)inw(dev->iobase + DT2821_ADDAT);
data &= (1 << boardtype.adbits) - 1;
- if (devpriv->ad_2scomp) {
+
+ if (devpriv->ad_2scomp)
data ^= 1 << (boardtype.adbits - 1);
- }
ret = comedi_buf_put(s->async, data);
- if (ret == 0) {
+
+ if (ret == 0)
s->async->events |= COMEDI_CB_OVERFLOW;
- }
devpriv->nread--;
if (!devpriv->nread) {
@@ -685,7 +666,8 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
}
#endif
comedi_event(dev, s);
- /* printk("adcsr=0x%02x dacsr-0x%02x supcsr=0x%02x\n", adcsr, dacsr, supcsr); */
+ /* printk("adcsr=0x%02x dacsr-0x%02x supcsr=0x%02x\n",
+ adcsr, dacsr, supcsr); */
return IRQ_RETVAL(handled);
}
@@ -776,7 +758,10 @@ static int dt282x_ai_cmdtest(struct comedi_device *dev,
if (err)
return 1;
- /* step 2: make sure trigger sources are unique and mutually compatible */
+ /*
+ * step 2: make sure trigger sources are unique
+ * and mutually compatible
+ */
/* note that mutual compatibility is not an issue here */
if (cmd->scan_begin_src != TRIG_FOLLOW &&
@@ -859,7 +844,8 @@ static int dt282x_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (devpriv->usedma == 0) {
comedi_error(dev,
- "driver requires 2 dma channels to execute command");
+ "driver requires 2 dma channels"
+ " to execute command");
return -EIO;
}
@@ -1049,7 +1035,10 @@ static int dt282x_ao_cmdtest(struct comedi_device *dev,
if (err)
return 1;
- /* step 2: make sure trigger sources are unique and mutually compatible */
+ /*
+ * step 2: make sure trigger sources are unique
+ * and mutually compatible
+ */
/* note that mutual compatibility is not an issue here */
if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE)
@@ -1064,7 +1053,7 @@ static int dt282x_ao_cmdtest(struct comedi_device *dev,
cmd->start_arg = 0;
err++;
}
- if (cmd->scan_begin_arg < 5000 /* XXX unknown */ ) {
+ if (cmd->scan_begin_arg < 5000 /* XXX unknown */) {
cmd->scan_begin_arg = 5000;
err++;
}
@@ -1115,7 +1104,7 @@ static int dt282x_ao_inttrig(struct comedi_device *dev,
size = cfc_read_array_from_buffer(s, devpriv->dma[0].buf,
devpriv->dma_maxsize);
if (size == 0) {
- printk("dt282x: AO underrun\n");
+ printk(KERN_ERR "dt282x: AO underrun\n");
return -EPIPE;
}
prep_ao_dma(dev, 0, size);
@@ -1123,7 +1112,7 @@ static int dt282x_ao_inttrig(struct comedi_device *dev,
size = cfc_read_array_from_buffer(s, devpriv->dma[1].buf,
devpriv->dma_maxsize);
if (size == 0) {
- printk("dt282x: AO underrun\n");
+ printk(KERN_ERR "dt282x: AO underrun\n");
return -EPIPE;
}
prep_ao_dma(dev, 1, size);
@@ -1141,7 +1130,8 @@ static int dt282x_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (devpriv->usedma == 0) {
comedi_error(dev,
- "driver requires 2 dma channels to execute command");
+ "driver requires 2 dma channels"
+ " to execute command");
return -EIO;
}
@@ -1262,7 +1252,8 @@ static const struct comedi_lrange *opt_ao_range_lkup(int x)
return ao_range_table[x];
}
-enum { opt_iobase = 0, opt_irq, opt_dma1, opt_dma2, /* i/o base, irq, dma channels */
+enum { /* i/o base, irq, dma channels */
+ opt_iobase = 0, opt_irq, opt_dma1, opt_dma2,
opt_diff, /* differential */
opt_ai_twos, opt_ao0_twos, opt_ao1_twos, /* twos comp */
opt_ai_range, opt_ao0_range, opt_ao1_range, /* range */
@@ -1295,9 +1286,9 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (!iobase)
iobase = 0x240;
- printk("comedi%d: dt282x: 0x%04lx", dev->minor, iobase);
+ printk(KERN_INFO "comedi%d: dt282x: 0x%04lx", dev->minor, iobase);
if (!request_region(iobase, DT2821_SIZE, "dt282x")) {
- printk(" I/O port conflict\n");
+ printk(KERN_INFO " I/O port conflict\n");
return -EBUSY;
}
dev->iobase = iobase;
@@ -1305,7 +1296,7 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
outw(DT2821_BDINIT, dev->iobase + DT2821_SUPCSR);
i = inw(dev->iobase + DT2821_ADCSR);
#ifdef DEBUG
- printk(" fingerprint=%x,%x,%x,%x,%x",
+ printk(KERN_DEBUG " fingerprint=%x,%x,%x,%x,%x",
inw(dev->iobase + DT2821_ADCSR),
inw(dev->iobase + DT2821_CHANCSR),
inw(dev->iobase + DT2821_DACSR),
@@ -1323,7 +1314,7 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
!= DT2821_SUPCSR_VAL) ||
((inw(dev->iobase + DT2821_TMRCTR) & DT2821_TMRCTR_MASK)
!= DT2821_TMRCTR_VAL)) {
- printk(" board not found");
+ printk(KERN_ERR " board not found");
return -EIO;
}
/* should do board test */
@@ -1344,26 +1335,25 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
irq = probe_irq_off(irqs);
restore_flags(flags);
- if (0 /* error */ ) {
- printk(" error probing irq (bad)");
- }
+ if (0 /* error */)
+ printk(KERN_ERR " error probing irq (bad)");
}
#endif
if (irq > 0) {
- printk(" ( irq = %d )", irq);
+ printk(KERN_INFO " ( irq = %d )", irq);
ret = request_irq(irq, dt282x_interrupt, 0, "dt282x", dev);
if (ret < 0) {
- printk(" failed to get irq\n");
+ printk(KERN_ERR " failed to get irq\n");
return -EIO;
}
dev->irq = irq;
} else if (irq == 0) {
- printk(" (no irq)");
+ printk(KERN_INFO " (no irq)");
} else {
#if 0
- printk(" (probe returned multiple irqs--bad)");
+ printk(KERN_INFO " (probe returned multiple irqs--bad)");
#else
- printk(" (irq probe not implemented)");
+ printk(KERN_INFO " (irq probe not implemented)");
#endif
}
@@ -1435,16 +1425,15 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->maxdata = 1;
s->range_table = &range_digital;
- printk("\n");
+ printk(KERN_INFO "\n");
return 0;
}
static void free_resources(struct comedi_device *dev)
{
- if (dev->irq) {
+ if (dev->irq)
free_irq(dev->irq, dev);
- }
if (dev->iobase)
release_region(dev->iobase, DT2821_SIZE);
if (dev->private) {
@@ -1461,7 +1450,7 @@ static void free_resources(struct comedi_device *dev)
static int dt282x_detach(struct comedi_device *dev)
{
- printk("comedi%d: dt282x: remove\n", dev->minor);
+ printk(KERN_INFO "comedi%d: dt282x: remove\n", dev->minor);
free_resources(dev);
@@ -1475,7 +1464,7 @@ static int dt282x_grab_dma(struct comedi_device *dev, int dma1, int dma2)
devpriv->usedma = 0;
if (!dma1 && !dma2) {
- printk(" (no dma)");
+ printk(KERN_ERR " (no dma)");
return 0;
}
@@ -1503,11 +1492,11 @@ static int dt282x_grab_dma(struct comedi_device *dev, int dma1, int dma2)
devpriv->dma[0].buf = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
devpriv->dma[1].buf = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
if (!devpriv->dma[0].buf || !devpriv->dma[1].buf) {
- printk(" can't get DMA memory");
+ printk(KERN_ERR " can't get DMA memory");
return -ENOMEM;
}
- printk(" (dma=%d,%d)", dma1, dma2);
+ printk(KERN_INFO " (dma=%d,%d)", dma1, dma2);
devpriv->usedma = 1;
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index bbbef790c8f..ca687890fc1 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -314,9 +314,8 @@ static int dt3k_send_cmd(struct comedi_device *dev, unsigned int cmd)
break;
udelay(1);
}
- if ((status & DT3000_COMPLETION_MASK) == DT3000_NOERROR) {
+ if ((status & DT3000_COMPLETION_MASK) == DT3000_NOERROR)
return 0;
- }
printk("dt3k_send_cmd() timeout/error status=0x%04x\n", status);
@@ -359,9 +358,8 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
struct comedi_subdevice *s;
unsigned int status;
- if (!dev->attached) {
+ if (!dev->attached)
return IRQ_NONE;
- }
s = dev->subdevices + 0;
status = readw(devpriv->io_addr + DPR_Intr_Flag);
@@ -374,9 +372,8 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
s->async->events |= COMEDI_CB_BLOCK;
}
- if (status & (DT3000_ADSWERR | DT3000_ADHWERR)) {
+ if (status & (DT3000_ADSWERR | DT3000_ADHWERR))
s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- }
debug_n_ints++;
if (debug_n_ints >= 10) {
@@ -399,9 +396,8 @@ static void debug_intr_flags(unsigned int flags)
int i;
printk("dt3k: intr_flags:");
for (i = 0; i < 8; i++) {
- if (flags & (1 << i)) {
+ if (flags & (1 << i))
printk(" %s", intr_flags[i]);
- }
}
printk("\n");
}
@@ -690,9 +686,8 @@ static int dt3k_ai_insn(struct comedi_device *dev, struct comedi_subdevice *s,
/* XXX docs don't explain how to select aref */
aref = CR_AREF(insn->chanspec);
- for (i = 0; i < insn->n; i++) {
+ for (i = 0; i < insn->n; i++)
data[i] = dt3k_readsingle(dev, SUBS_AI, chan, gain);
- }
return i;
}
@@ -720,9 +715,8 @@ static int dt3k_ao_insn_read(struct comedi_device *dev,
unsigned int chan;
chan = CR_CHAN(insn->chanspec);
- for (i = 0; i < insn->n; i++) {
+ for (i = 0; i < insn->n; i++)
data[i] = devpriv->ao_readback[chan];
- }
return i;
}
@@ -911,9 +905,8 @@ static int dt3000_detach(struct comedi_device *dev)
if (devpriv) {
if (devpriv->pci_dev) {
- if (devpriv->phys_addr) {
+ if (devpriv->phys_addr)
comedi_pci_disable(devpriv->pci_dev);
- }
pci_dev_put(devpriv->pci_dev);
}
if (devpriv->io_addr)
diff --git a/drivers/staging/comedi/drivers/icp_multi.h b/drivers/staging/comedi/drivers/icp_multi.h
index 8caadc630ed..2bb96b1d21e 100644
--- a/drivers/staging/comedi/drivers/icp_multi.h
+++ b/drivers/staging/comedi/drivers/icp_multi.h
@@ -73,14 +73,13 @@ static void pci_card_list_init(unsigned short pci_vendor, char display)
pcidev != NULL;
pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pcidev)) {
if (pcidev->vendor == pci_vendor) {
- inova = kmalloc(sizeof(*inova), GFP_KERNEL);
+ inova = kzalloc(sizeof(*inova), GFP_KERNEL);
if (!inova) {
printk
("icp_multi: pci_card_list_init: allocation failed\n");
pci_dev_put(pcidev);
break;
}
- memset(inova, 0, sizeof(*inova));
inova->pcidev = pci_dev_get(pcidev);
if (last) {
diff --git a/drivers/staging/comedi/drivers/me_daq.c b/drivers/staging/comedi/drivers/me_daq.c
index 80e192d2e77..c8484aec657 100644
--- a/drivers/staging/comedi/drivers/me_daq.c
+++ b/drivers/staging/comedi/drivers/me_daq.c
@@ -60,6 +60,7 @@ from http://www.comedi.org
#define ME_DRIVER_NAME "me_daq"
+#define PCI_VENDOR_ID_MEILHAUS 0x1402
#define ME2000_DEVICE_ID 0x2000
#define ME2600_DEVICE_ID 0x2600
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index 188f5804274..99d9985c5b3 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -76,7 +76,7 @@ void mite_init(void)
for (pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
pcidev != NULL;
pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pcidev)) {
- if (pcidev->vendor == PCI_VENDOR_ID_NATINST) {
+ if (pcidev->vendor == PCI_VENDOR_ID_NI) {
unsigned i;
mite = kzalloc(sizeof(*mite), GFP_KERNEL);
diff --git a/drivers/staging/comedi/drivers/mite.h b/drivers/staging/comedi/drivers/mite.h
index 9d5049f8fa8..999551f54c2 100644
--- a/drivers/staging/comedi/drivers/mite.h
+++ b/drivers/staging/comedi/drivers/mite.h
@@ -27,8 +27,6 @@
#include <linux/pci.h>
#include "../comedidev.h"
-#define PCI_VENDOR_ID_NATINST 0x1093
-
/* #define DEBUG_MITE */
#define PCIMIO_COMPAT
diff --git a/drivers/staging/comedi/drivers/mpc624.c b/drivers/staging/comedi/drivers/mpc624.c
index 12e72c82815..9874ac3749c 100644
--- a/drivers/staging/comedi/drivers/mpc624.c
+++ b/drivers/staging/comedi/drivers/mpc624.c
@@ -40,20 +40,20 @@ Status: working
Configuration Options:
[0] - I/O base address
[1] - convertion rate
- Convertion rate RMS noise Effective Number Of Bits
- 0 3.52kHz 23uV 17
- 1 1.76kHz 3.5uV 20
- 2 880Hz 2uV 21.3
- 3 440Hz 1.4uV 21.8
- 4 220Hz 1uV 22.4
- 5 110Hz 750uV 22.9
- 6 55Hz 510nV 23.4
- 7 27.5Hz 375nV 24
- 8 13.75Hz 250nV 24.4
- 9 6.875Hz 200nV 24.6
- [2] - voltage range
- 0 -1.01V .. +1.01V
- 1 -10.1V .. +10.1V
+ Convertion rate RMS noise Effective Number Of Bits
+ 0 3.52kHz 23uV 17
+ 1 1.76kHz 3.5uV 20
+ 2 880Hz 2uV 21.3
+ 3 440Hz 1.4uV 21.8
+ 4 220Hz 1uV 22.4
+ 5 110Hz 750uV 22.9
+ 6 55Hz 510nV 23.4
+ 7 27.5Hz 375nV 24
+ 8 13.75Hz 250nV 24.4
+ 9 6.875Hz 200nV 24.6
+ [2] - voltage range
+ 0 -1.01V .. +1.01V
+ 1 -10.1V .. +10.1V
*/
#include "../comedidev.h"
@@ -65,13 +65,13 @@ Configuration Options:
#define MPC624_SIZE 16
/* Offsets of different ports */
-#define MPC624_MASTER_CONTROL 0 /* not used */
-#define MPC624_GNMUXCH 1 /* Gain, Mux, Channel of ADC */
-#define MPC624_ADC 2 /* read/write to/from ADC */
-#define MPC624_EE 3 /* read/write to/from serial EEPROM via I2C */
-#define MPC624_LEDS 4 /* write to LEDs */
-#define MPC624_DIO 5 /* read/write to/from digital I/O ports */
-#define MPC624_IRQ_MASK 6 /* IRQ masking enable/disable */
+#define MPC624_MASTER_CONTROL 0 /* not used */
+#define MPC624_GNMUXCH 1 /* Gain, Mux, Channel of ADC */
+#define MPC624_ADC 2 /* read/write to/from ADC */
+#define MPC624_EE 3 /* read/write to/from serial EEPROM via I2C */
+#define MPC624_LEDS 4 /* write to LEDs */
+#define MPC624_DIO 5 /* read/write to/from digital I/O ports */
+#define MPC624_IRQ_MASK 6 /* IRQ masking enable/disable */
/* Register bits' names */
#define MPC624_ADBUSY (1<<5)
@@ -109,24 +109,27 @@ Configuration Options:
* ^ - Effective Number Of Bits
*/
-#define MPC624_SPEED_3_52_kHz (MPC624_OSR4 | MPC624_OSR0)
-#define MPC624_SPEED_1_76_kHz (MPC624_OSR4 | MPC624_OSR1)
-#define MPC624_SPEED_880_Hz (MPC624_OSR4 | MPC624_OSR1 | MPC624_OSR0)
-#define MPC624_SPEED_440_Hz (MPC624_OSR4 | MPC624_OSR2)
-#define MPC624_SPEED_220_Hz (MPC624_OSR4 | MPC624_OSR2 | MPC624_OSR0)
-#define MPC624_SPEED_110_Hz (MPC624_OSR4 | MPC624_OSR2 | MPC624_OSR1)
-#define MPC624_SPEED_55_Hz (MPC624_OSR4 | MPC624_OSR2 | MPC624_OSR1 | MPC624_OSR0)
-#define MPC624_SPEED_27_5_Hz (MPC624_OSR4 | MPC624_OSR3)
-#define MPC624_SPEED_13_75_Hz (MPC624_OSR4 | MPC624_OSR3 | MPC624_OSR0)
-#define MPC624_SPEED_6_875_Hz (MPC624_OSR4 | MPC624_OSR3 | MPC624_OSR2 | MPC624_OSR1 | MPC624_OSR0)
-/* ---------------------------------------------------------------------------- */
+#define MPC624_SPEED_3_52_kHz (MPC624_OSR4 | MPC624_OSR0)
+#define MPC624_SPEED_1_76_kHz (MPC624_OSR4 | MPC624_OSR1)
+#define MPC624_SPEED_880_Hz (MPC624_OSR4 | MPC624_OSR1 | MPC624_OSR0)
+#define MPC624_SPEED_440_Hz (MPC624_OSR4 | MPC624_OSR2)
+#define MPC624_SPEED_220_Hz (MPC624_OSR4 | MPC624_OSR2 | MPC624_OSR0)
+#define MPC624_SPEED_110_Hz (MPC624_OSR4 | MPC624_OSR2 | MPC624_OSR1)
+#define MPC624_SPEED_55_Hz \
+ (MPC624_OSR4 | MPC624_OSR2 | MPC624_OSR1 | MPC624_OSR0)
+#define MPC624_SPEED_27_5_Hz (MPC624_OSR4 | MPC624_OSR3)
+#define MPC624_SPEED_13_75_Hz (MPC624_OSR4 | MPC624_OSR3 | MPC624_OSR0)
+#define MPC624_SPEED_6_875_Hz \
+ (MPC624_OSR4 | MPC624_OSR3 | MPC624_OSR2 | MPC624_OSR1 | MPC624_OSR0)
+/* -------------------------------------------------------------------------- */
struct skel_private {
- unsigned long int ulConvertionRate; /* set by mpc624_attach() from driver's parameters */
+ /* set by mpc624_attach() from driver's parameters */
+ unsigned long int ulConvertionRate;
};
#define devpriv ((struct skel_private *)dev->private)
-/* ---------------------------------------------------------------------------- */
+/* -------------------------------------------------------------------------- */
static const struct comedi_lrange range_mpc624_bipolar1 = {
1,
{
@@ -145,11 +148,11 @@ static const struct comedi_lrange range_mpc624_bipolar10 = {
}
};
-/* ---------------------------------------------------------------------------- */
+/* -------------------------------------------------------------------------- */
static int mpc624_attach(struct comedi_device *dev,
struct comedi_devconfig *it);
static int mpc624_detach(struct comedi_device *dev);
-/* ---------------------------------------------------------------------------- */
+/* -------------------------------------------------------------------------- */
static struct comedi_driver driver_mpc624 = {
.driver_name = "mpc624",
.module = THIS_MODULE,
@@ -157,20 +160,20 @@ static struct comedi_driver driver_mpc624 = {
.detach = mpc624_detach
};
-/* ---------------------------------------------------------------------------- */
+/* -------------------------------------------------------------------------- */
static int mpc624_ai_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn,
unsigned int *data);
-/* ---------------------------------------------------------------------------- */
+/* -------------------------------------------------------------------------- */
static int mpc624_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
struct comedi_subdevice *s;
unsigned long iobase;
iobase = it->options[0];
- printk("comedi%d: mpc624 [0x%04lx, ", dev->minor, iobase);
+ printk(KERN_INFO "comedi%d: mpc624 [0x%04lx, ", dev->minor, iobase);
if (request_region(iobase, MPC624_SIZE, "mpc624") == NULL) {
- printk("I/O port(s) in use\n");
+ printk(KERN_ERR "I/O port(s) in use\n");
return -EIO;
}
@@ -184,47 +187,48 @@ static int mpc624_attach(struct comedi_device *dev, struct comedi_devconfig *it)
switch (it->options[1]) {
case 0:
devpriv->ulConvertionRate = MPC624_SPEED_3_52_kHz;
- printk("3.52 kHz, ");
+ printk(KERN_INFO "3.52 kHz, ");
break;
case 1:
devpriv->ulConvertionRate = MPC624_SPEED_1_76_kHz;
- printk("1.76 kHz, ");
+ printk(KERN_INFO "1.76 kHz, ");
break;
case 2:
devpriv->ulConvertionRate = MPC624_SPEED_880_Hz;
- printk("880 Hz, ");
+ printk(KERN_INFO "880 Hz, ");
break;
case 3:
devpriv->ulConvertionRate = MPC624_SPEED_440_Hz;
- printk("440 Hz, ");
+ printk(KERN_INFO "440 Hz, ");
break;
case 4:
devpriv->ulConvertionRate = MPC624_SPEED_220_Hz;
- printk("220 Hz, ");
+ printk(KERN_INFO "220 Hz, ");
break;
case 5:
devpriv->ulConvertionRate = MPC624_SPEED_110_Hz;
- printk("110 Hz, ");
+ printk(KERN_INFO "110 Hz, ");
break;
case 6:
devpriv->ulConvertionRate = MPC624_SPEED_55_Hz;
- printk("55 Hz, ");
+ printk(KERN_INFO "55 Hz, ");
break;
case 7:
devpriv->ulConvertionRate = MPC624_SPEED_27_5_Hz;
- printk("27.5 Hz, ");
+ printk(KERN_INFO "27.5 Hz, ");
break;
case 8:
devpriv->ulConvertionRate = MPC624_SPEED_13_75_Hz;
- printk("13.75 Hz, ");
+ printk(KERN_INFO "13.75 Hz, ");
break;
case 9:
devpriv->ulConvertionRate = MPC624_SPEED_6_875_Hz;
- printk("6.875 Hz, ");
+ printk(KERN_INFO "6.875 Hz, ");
break;
default:
printk
- ("illegal convertion rate setting! Valid numbers are 0..9. Using 9 => 6.875 Hz, ");
+ (KERN_ERR "illegal convertion rate setting!"
+ " Valid numbers are 0..9. Using 9 => 6.875 Hz, ");
devpriv->ulConvertionRate = MPC624_SPEED_3_52_kHz;
}
@@ -239,29 +243,29 @@ static int mpc624_attach(struct comedi_device *dev, struct comedi_devconfig *it)
switch (it->options[1]) {
default:
s->maxdata = 0x3FFFFFFF;
- printk("30 bit, ");
+ printk(KERN_INFO "30 bit, ");
}
switch (it->options[1]) {
case 0:
s->range_table = &range_mpc624_bipolar1;
- printk("1.01V]: ");
+ printk(KERN_INFO "1.01V]: ");
break;
default:
s->range_table = &range_mpc624_bipolar10;
- printk("10.1V]: ");
+ printk(KERN_INFO "10.1V]: ");
}
s->len_chanlist = 1;
s->insn_read = mpc624_ai_rinsn;
- printk("attached\n");
+ printk(KERN_INFO "attached\n");
return 1;
}
static int mpc624_detach(struct comedi_device *dev)
{
- printk("comedi%d: mpc624: remove\n", dev->minor);
+ printk(KERN_INFO "comedi%d: mpc624: remove\n", dev->minor);
if (dev->iobase)
release_region(dev->iobase, MPC624_SIZE);
@@ -280,11 +284,14 @@ static int mpc624_ai_rinsn(struct comedi_device *dev,
unsigned long int data_in, data_out;
unsigned char ucPort;
- /* WARNING: We always write 0 to GNSWA bit, so the channel range is +-/10.1Vdc */
+ /*
+ * WARNING:
+ * We always write 0 to GNSWA bit, so the channel range is +-/10.1Vdc
+ */
outb(insn->chanspec, dev->iobase + MPC624_GNMUXCH);
-/* printk("Channel %d: \n", insn->chanspec); */
+/* printk("Channel %d:\n", insn->chanspec); */
if (!insn->n) {
- printk("MPC624: Warning, no data to acquire\n");
+ printk(KERN_INFO "MPC624: Warning, no data to acquire\n");
return 0;
}
@@ -306,7 +313,7 @@ static int mpc624_ai_rinsn(struct comedi_device *dev,
break;
}
if (i == TIMEOUT) {
- printk("MPC624: timeout (%dms)\n", TIMEOUT);
+ printk(KERN_ERR "MPC624: timeout (%dms)\n", TIMEOUT);
data[n] = 0;
return -ETIMEDOUT;
}
@@ -319,7 +326,7 @@ static int mpc624_ai_rinsn(struct comedi_device *dev,
outb(0, dev->iobase + MPC624_ADC);
udelay(1);
- if (data_out & (1 << 31)) { /* the next bit is a 1 */
+ if (data_out & (1 << 31)) { /* the next bit is a 1 */
/* Set the ADSDI line (send to MPC624) */
outb(MPC624_ADSDI, dev->iobase + MPC624_ADC);
udelay(1);
@@ -344,31 +351,47 @@ static int mpc624_ai_rinsn(struct comedi_device *dev,
data_out <<= 1;
}
- /* Received 32-bit long value consist of: */
- /* 31: EOC (End Of Transmission) bit - should be 0 */
- /* 30: DMY (Dummy) bit - should be 0 */
- /* 29: SIG (Sign) bit - 1 if the voltage is positive, 0 if negative */
- /* 28: MSB (Most Significant Bit) - the first bit of convertion result */
- /* .... */
- /* 05: LSB (Least Significant Bit) - the last bit of convertion result */
- /* 04: sub-LSB - sub-LSBs are basically noise, but when */
- /* 03: sub-LSB averaged properly, they can increase convertion */
- /* 02: sub-LSB precision up to 29 bits; they can be discarded */
- /* 01: sub-LSB without loss of resolution. */
- /* 00: sub-LSB */
+ /*
+ * Received 32-bit long value consist of:
+ * 31: EOC -
+ * (End Of Transmission) bit - should be 0
+ * 30: DMY
+ * (Dummy) bit - should be 0
+ * 29: SIG
+ * (Sign) bit- 1 if the voltage is positive,
+ * 0 if negative
+ * 28: MSB
+ * (Most Significant Bit) - the first bit of
+ * the conversion result
+ * ....
+ * 05: LSB
+ * (Least Significant Bit)- the last bit of the
+ * conversion result
+ * 04-00: sub-LSB
+ * - sub-LSBs are basically noise, but when
+ * averaged properly, they can increase conversion
+ * precision up to 29 bits; they can be discarded
+ * without loss of resolution.
+ */
if (data_in & MPC624_EOC_BIT)
- printk("MPC624: EOC bit is set (data_in=%lu)!",
+ printk(KERN_INFO "MPC624:EOC bit is set (data_in=%lu)!",
data_in);
if (data_in & MPC624_DMY_BIT)
- printk("MPC624: DMY bit is set (data_in=%lu)!",
+ printk(KERN_INFO "MPC624:DMY bit is set (data_in=%lu)!",
data_in);
- if (data_in & MPC624_SGN_BIT) { /* check the sign bit *//* The voltage is positive */
- data_in &= 0x3FFFFFFF; /* EOC and DMY should be 0, but we will mask them out just to be sure */
- data[n] = data_in; /* comedi operates on unsigned numbers, so we don't clear the SGN bit */
- /* SGN bit is still set! It's correct, since we're converting to unsigned. */
- } else { /* The voltage is negative */
- /* data_in contains a number in 30-bit two's complement code and we must deal with it */
+ if (data_in & MPC624_SGN_BIT) { /* Volatge is positive */
+ /*
+ * comedi operates on unsigned numbers, so mask off EOC
+ * and DMY and don't clear the SGN bit
+ */
+ data_in &= 0x3FFFFFFF;
+ data[n] = data_in;
+ } else { /* The voltage is negative */
+ /*
+ * data_in contains a number in 30-bit two's complement
+ * code and we must deal with it
+ */
data_in |= MPC624_SGN_BIT;
data_in = ~data_in;
data_in += 1;
diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c
index 653b4c8700a..1fc76cc6a28 100644
--- a/drivers/staging/comedi/drivers/ni_6527.c
+++ b/drivers/staging/comedi/drivers/ni_6527.c
@@ -107,10 +107,9 @@ static const struct ni6527_board ni6527_boards[] = {
#define this_board ((const struct ni6527_board *)dev->board_ptr)
static DEFINE_PCI_DEVICE_TABLE(ni6527_pci_table) = {
- {
- PCI_VENDOR_ID_NATINST, 0x2b10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x2b20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x2b10)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x2b20)},
+ {0}
};
MODULE_DEVICE_TABLE(pci, ni6527_pci_table);
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
index 9a4fffe5655..d793f5a4ac9 100644
--- a/drivers/staging/comedi/drivers/ni_65xx.c
+++ b/drivers/staging/comedi/drivers/ni_65xx.c
@@ -266,30 +266,29 @@ static inline unsigned ni_65xx_total_num_ports(const struct ni_65xx_board
}
static DEFINE_PCI_DEVICE_TABLE(ni_65xx_pci_table) = {
- {
- PCI_VENDOR_ID_NATINST, 0x1710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x7085, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x7086, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x7087, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x7088, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x70a9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x70c3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x70c8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x70c9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x70cc, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x70CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x70d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x70d2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x70d3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x7124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x7125, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x7126, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x7127, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x7128, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x718b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x718c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x71c5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x1710)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x7085)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x7086)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x7087)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x7088)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x70a9)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x70c3)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x70c8)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x70c9)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x70cc)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x70CD)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x70d1)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x70d2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x70d3)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x7124)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x7125)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x7126)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x7127)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x7128)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x718b)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x718c)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x71c5)},
+ {0}
};
MODULE_DEVICE_TABLE(pci, ni_65xx_pci_table);
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index 017630fb242..6a6fae53ea0 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -420,12 +420,11 @@ static const struct ni_660x_board ni_660x_boards[] = {
#define NI_660X_MAX_NUM_COUNTERS (NI_660X_MAX_NUM_CHIPS * counters_per_chip)
static DEFINE_PCI_DEVICE_TABLE(ni_660x_pci_table) = {
- {
- PCI_VENDOR_ID_NATINST, 0x2c60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x2cc0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x2c60)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x1310)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x1360)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x2cc0)},
+ {0}
};
MODULE_DEVICE_TABLE(pci, ni_660x_pci_table);
diff --git a/drivers/staging/comedi/drivers/ni_670x.c b/drivers/staging/comedi/drivers/ni_670x.c
index 68221bfba5d..44ae8368454 100644
--- a/drivers/staging/comedi/drivers/ni_670x.c
+++ b/drivers/staging/comedi/drivers/ni_670x.c
@@ -47,8 +47,6 @@ Commands are not supported.
#include "mite.h"
-#define PCI_VENDOR_ID_NATINST 0x1093
-
#define AO_VALUE_OFFSET 0x00
#define AO_CHAN_OFFSET 0x0c
#define AO_STATUS_OFFSET 0x10
@@ -91,12 +89,9 @@ static const struct ni_670x_board ni_670x_boards[] = {
};
static DEFINE_PCI_DEVICE_TABLE(ni_670x_pci_table) = {
- {
- PCI_VENDOR_ID_NATINST, 0x2c90, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x1920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- /*{ PCI_VENDOR_ID_NATINST, 0x0000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },*/
- {
- 0}
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x2c90)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x1920)},
+ {0}
};
MODULE_DEVICE_TABLE(pci, ni_670x_pci_table);
diff --git a/drivers/staging/comedi/drivers/ni_at_ao.c b/drivers/staging/comedi/drivers/ni_at_ao.c
index 3778565c1f6..ce60224bb7b 100644
--- a/drivers/staging/comedi/drivers/ni_at_ao.c
+++ b/drivers/staging/comedi/drivers/ni_at_ao.c
@@ -226,7 +226,7 @@ static int atao_attach(struct comedi_device *dev, struct comedi_devconfig *it)
iobase = 0x1c0;
ao_unipolar = it->options[3];
- printk("comedi%d: ni_at_ao: 0x%04lx", dev->minor, iobase);
+ printk(KERN_INFO "comedi%d: ni_at_ao: 0x%04lx", dev->minor, iobase);
if (!request_region(iobase, ATAO_SIZE, "ni_at_ao")) {
printk(" I/O port conflict\n");
@@ -283,14 +283,14 @@ static int atao_attach(struct comedi_device *dev, struct comedi_devconfig *it)
atao_reset(dev);
- printk("\n");
+ printk(KERN_INFO "\n");
return 0;
}
static int atao_detach(struct comedi_device *dev)
{
- printk("comedi%d: atao: remove\n", dev->minor);
+ printk(KERN_INFO "comedi%d: atao: remove\n", dev->minor);
if (dev->iobase)
release_region(dev->iobase, ATAO_SIZE);
diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c b/drivers/staging/comedi/drivers/ni_daq_700.c
index 7ea64538e05..6ec77bf88c6 100644
--- a/drivers/staging/comedi/drivers/ni_daq_700.c
+++ b/drivers/staging/comedi/drivers/ni_daq_700.c
@@ -145,6 +145,7 @@ void subdev_700_interrupt(struct comedi_device *dev, struct comedi_subdevice *s)
comedi_event(dev, s);
}
+EXPORT_SYMBOL(subdev_700_interrupt);
static int subdev_700_cb(int dir, int port, int data, unsigned long arg)
{
@@ -326,6 +327,7 @@ int subdev_700_init(struct comedi_device *dev, struct comedi_subdevice *s,
return 0;
}
+EXPORT_SYMBOL(subdev_700_init);
int subdev_700_init_irq(struct comedi_device *dev, struct comedi_subdevice *s,
int (*cb) (int, int, int, unsigned long),
@@ -345,6 +347,7 @@ int subdev_700_init_irq(struct comedi_device *dev, struct comedi_subdevice *s,
return 0;
}
+EXPORT_SYMBOL(subdev_700_init_irq);
void subdev_700_cleanup(struct comedi_device *dev, struct comedi_subdevice *s)
{
@@ -353,11 +356,7 @@ void subdev_700_cleanup(struct comedi_device *dev, struct comedi_subdevice *s)
kfree(s->private);
}
-
-EXPORT_SYMBOL(subdev_700_init);
-EXPORT_SYMBOL(subdev_700_init_irq);
EXPORT_SYMBOL(subdev_700_cleanup);
-EXPORT_SYMBOL(subdev_700_interrupt);
static int dio700_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
@@ -380,7 +379,7 @@ static int dio700_attach(struct comedi_device *dev, struct comedi_devconfig *it)
return -EIO;
iobase = link->io.BasePort1;
#ifdef incomplete
- irq = link->irq.AssignedIRQ;
+ irq = link->irq;
#endif
break;
default:
@@ -470,7 +469,6 @@ static const dev_info_t dev_info = "ni_daq_700";
struct local_info_t {
struct pcmcia_device *link;
- dev_node_t node;
int stop;
struct bus_operations *bus;
};
@@ -502,10 +500,6 @@ static int dio700_cs_attach(struct pcmcia_device *link)
local->link = link;
link->priv = local;
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = NULL;
-
/*
General socket configuration defaults can go here. In this
client, we assume very little, and rely on the CIS for almost
@@ -539,10 +533,8 @@ static void dio700_cs_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "dio700_cs_detach\n");
- if (link->dev_node) {
- ((struct local_info_t *)link->priv)->stop = 1;
- dio700_release(link);
- }
+ ((struct local_info_t *)link->priv)->stop = 1;
+ dio700_release(link);
/* This points to the parent struct local_info_t struct */
if (link->priv)
@@ -577,8 +569,7 @@ static int dio700_pcmcia_config_loop(struct pcmcia_device *p_dev,
}
/* Do we need to allocate an interrupt? */
- if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
+ p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -625,7 +616,6 @@ static int dio700_pcmcia_config_loop(struct pcmcia_device *p_dev,
static void dio700_config(struct pcmcia_device *link)
{
- struct local_info_t *dev = link->priv;
win_req_t req;
int ret;
@@ -639,16 +629,8 @@ static void dio700_config(struct pcmcia_device *link)
goto failed;
}
- /*
- Allocate an interrupt line. Note that this does not assign a
- handler to the interrupt, unless the 'Handler' member of the
- irq structure is initialized.
- */
- if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
- goto failed;
- }
+ if (!link->irq)
+ goto failed;
/*
This actually configures the PCMCIA socket -- setting up
@@ -659,19 +641,10 @@ static void dio700_config(struct pcmcia_device *link)
if (ret != 0)
goto failed;
- /*
- At this point, the dev_node_t structure(s) need to be
- initialized and arranged in a linked list at link->dev.
- */
- sprintf(dev->node.dev_name, "ni_daq_700");
- dev->node.major = dev->node.minor = 0;
- link->dev_node = &dev->node;
-
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x",
- dev->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %d", link->irq.AssignedIRQ);
+ printk(", irq %d", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1 + link->io.NumPorts1 - 1);
@@ -735,8 +708,12 @@ static struct pcmcia_device_id dio700_cs_ids[] = {
PCMCIA_DEVICE_NULL
};
-MODULE_LICENSE("GPL");
+
MODULE_DEVICE_TABLE(pcmcia, dio700_cs_ids);
+MODULE_AUTHOR("Fred Brooks <nsaspook@nsaspook.com>");
+MODULE_DESCRIPTION("Comedi driver for National Instruments "
+ "PCMCIA DAQCard-700 DIO");
+MODULE_LICENSE("GPL");
struct pcmcia_driver dio700_cs_driver = {
.probe = dio700_cs_attach,
diff --git a/drivers/staging/comedi/drivers/ni_daq_dio24.c b/drivers/staging/comedi/drivers/ni_daq_dio24.c
index ddc312b5d20..e4865b1c231 100644
--- a/drivers/staging/comedi/drivers/ni_daq_dio24.c
+++ b/drivers/staging/comedi/drivers/ni_daq_dio24.c
@@ -131,7 +131,7 @@ static int dio24_attach(struct comedi_device *dev, struct comedi_devconfig *it)
return -EIO;
iobase = link->io.BasePort1;
#ifdef incomplete
- irq = link->irq.AssignedIRQ;
+ irq = link->irq;
#endif
break;
default:
@@ -221,7 +221,6 @@ static const dev_info_t dev_info = "ni_daq_dio24";
struct local_info_t {
struct pcmcia_device *link;
- dev_node_t node;
int stop;
struct bus_operations *bus;
};
@@ -253,10 +252,6 @@ static int dio24_cs_attach(struct pcmcia_device *link)
local->link = link;
link->priv = local;
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = NULL;
-
/*
General socket configuration defaults can go here. In this
client, we assume very little, and rely on the CIS for almost
@@ -290,10 +285,8 @@ static void dio24_cs_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "dio24_cs_detach\n");
- if (link->dev_node) {
- ((struct local_info_t *)link->priv)->stop = 1;
- dio24_release(link);
- }
+ ((struct local_info_t *)link->priv)->stop = 1;
+ dio24_release(link);
/* This points to the parent local_info_t struct */
if (link->priv)
@@ -328,8 +321,7 @@ static int dio24_pcmcia_config_loop(struct pcmcia_device *p_dev,
}
/* Do we need to allocate an interrupt? */
- if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
+ p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -376,7 +368,6 @@ static int dio24_pcmcia_config_loop(struct pcmcia_device *p_dev,
static void dio24_config(struct pcmcia_device *link)
{
- struct local_info_t *dev = link->priv;
int ret;
win_req_t req;
@@ -390,16 +381,8 @@ static void dio24_config(struct pcmcia_device *link)
goto failed;
}
- /*
- Allocate an interrupt line. Note that this does not assign a
- handler to the interrupt, unless the 'Handler' member of the
- irq structure is initialized.
- */
- if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
- goto failed;
- }
+ if (!link->irq)
+ goto failed;
/*
This actually configures the PCMCIA socket -- setting up
@@ -410,19 +393,10 @@ static void dio24_config(struct pcmcia_device *link)
if (ret)
goto failed;
- /*
- At this point, the dev_node_t structure(s) need to be
- initialized and arranged in a linked list at link->dev.
- */
- sprintf(dev->node.dev_name, "ni_daq_dio24");
- dev->node.major = dev->node.minor = 0;
- link->dev_node = &dev->node;
-
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x",
- dev->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %d", link->irq.AssignedIRQ);
+ printk(", irq %d", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1 + link->io.NumPorts1 - 1);
@@ -487,6 +461,10 @@ static struct pcmcia_device_id dio24_cs_ids[] = {
};
MODULE_DEVICE_TABLE(pcmcia, dio24_cs_ids);
+MODULE_AUTHOR("Daniel Vecino Castel <dvecino@able.es>");
+MODULE_DESCRIPTION("Comedi driver for National Instruments "
+ "PCMCIA DAQ-Card DIO-24");
+MODULE_LICENSE("GPL");
struct pcmcia_driver dio24_cs_driver = {
.probe = dio24_cs_attach,
diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
index 558e525fed3..67c8a538802 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.c
+++ b/drivers/staging/comedi/drivers/ni_labpc.c
@@ -499,9 +499,8 @@ static struct comedi_driver driver_labpc = {
#ifdef CONFIG_COMEDI_PCI
static DEFINE_PCI_DEVICE_TABLE(labpc_pci_table) = {
- {
- PCI_VENDOR_ID_NATINST, 0x161, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x161)},
+ {0}
};
MODULE_DEVICE_TABLE(pci, labpc_pci_table);
@@ -536,7 +535,7 @@ int labpc_common_attach(struct comedi_device *dev, unsigned long iobase,
printk("\n");
if (iobase == 0) {
- printk("io base address is zero!\n");
+ printk(KERN_ERR "io base address is zero!\n");
return -EINVAL;
}
/* request io regions for isa boards */
diff --git a/drivers/staging/comedi/drivers/ni_labpc_cs.c b/drivers/staging/comedi/drivers/ni_labpc_cs.c
index 8ad1055a5cc..163245ebb31 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_cs.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_cs.c
@@ -144,7 +144,7 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (!link)
return -EIO;
iobase = link->io.BasePort1;
- irq = link->irq.AssignedIRQ;
+ irq = link->irq;
break;
default:
printk("bug! couldn't determine board type\n");
@@ -199,7 +199,6 @@ static const dev_info_t dev_info = "daqcard-1200";
struct local_info_t {
struct pcmcia_device *link;
- dev_node_t node;
int stop;
struct bus_operations *bus;
};
@@ -229,10 +228,6 @@ static int labpc_cs_attach(struct pcmcia_device *link)
local->link = link;
link->priv = local;
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_FORCED_PULSE;
- link->irq.Handler = NULL;
-
/*
General socket configuration defaults can go here. In this
client, we assume very little, and rely on the CIS for almost
@@ -269,10 +264,8 @@ static void labpc_cs_detach(struct pcmcia_device *link)
the release() function is called, that will trigger a proper
detach().
*/
- if (link->dev_node) {
- ((struct local_info_t *)link->priv)->stop = 1;
- labpc_release(link);
- }
+ ((struct local_info_t *)link->priv)->stop = 1;
+ labpc_release(link);
/* This points to the parent local_info_t struct (may be null) */
kfree(link->priv);
@@ -306,8 +299,7 @@ static int labpc_pcmcia_config_loop(struct pcmcia_device *p_dev,
}
/* Do we need to allocate an interrupt? */
- if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
+ p_dev->conf.Attributes |= CONF_ENABLE_IRQ | CONF_ENABLE_PULSE_IRQ;
/* IO window settings */
p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -355,7 +347,6 @@ static int labpc_pcmcia_config_loop(struct pcmcia_device *p_dev,
static void labpc_config(struct pcmcia_device *link)
{
- struct local_info_t *dev = link->priv;
int ret;
win_req_t req;
@@ -367,16 +358,8 @@ static void labpc_config(struct pcmcia_device *link)
goto failed;
}
- /*
- Allocate an interrupt line. Note that this does not assign a
- handler to the interrupt, unless the 'Handler' member of the
- irq structure is initialized.
- */
- if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
- goto failed;
- }
+ if (!link->irq)
+ goto failed;
/*
This actually configures the PCMCIA socket -- setting up
@@ -387,19 +370,10 @@ static void labpc_config(struct pcmcia_device *link)
if (ret)
goto failed;
- /*
- At this point, the dev_node_t structure(s) need to be
- initialized and arranged in a linked list at link->dev.
- */
- sprintf(dev->node.dev_name, "daqcard-1200");
- dev->node.major = dev->node.minor = 0;
- link->dev_node = &dev->node;
-
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x",
- dev->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %d", link->irq.AssignedIRQ);
+ printk(", irq %d", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1 + link->io.NumPorts1 - 1);
@@ -463,6 +437,9 @@ static struct pcmcia_device_id labpc_cs_ids[] = {
};
MODULE_DEVICE_TABLE(pcmcia, labpc_cs_ids);
+MODULE_AUTHOR("Frank Mori Hess <fmhess@users.sourceforge.net>");
+MODULE_DESCRIPTION("Comedi driver for National Instruments Lab-PC");
+MODULE_LICENSE("GPL");
struct pcmcia_driver labpc_cs_driver = {
.probe = labpc_cs_attach,
@@ -504,6 +481,5 @@ void __exit labpc_exit_module(void)
comedi_driver_unregister(&driver_labpc_cs);
}
-MODULE_LICENSE("GPL");
module_init(labpc_init_module);
module_exit(labpc_exit_module);
diff --git a/drivers/staging/comedi/drivers/ni_mio_cs.c b/drivers/staging/comedi/drivers/ni_mio_cs.c
index dc4849a40c9..cedb02d40f9 100644
--- a/drivers/staging/comedi/drivers/ni_mio_cs.c
+++ b/drivers/staging/comedi/drivers/ni_mio_cs.c
@@ -262,17 +262,11 @@ static void cs_detach(struct pcmcia_device *);
static struct pcmcia_device *cur_dev = NULL;
static const dev_info_t dev_info = "ni_mio_cs";
-static dev_node_t dev_node = {
- "ni_mio_cs",
- COMEDI_MAJOR, 0,
- NULL
-};
static int cs_attach(struct pcmcia_device *link)
{
link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
link->io.NumPorts1 = 16;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -292,8 +286,7 @@ static void cs_detach(struct pcmcia_device *link)
{
DPRINTK("cs_detach(link=%p)\n", link);
- if (link->dev_node)
- cs_release(link);
+ cs_release(link);
}
static int mio_cs_suspend(struct pcmcia_device *link)
@@ -344,14 +337,10 @@ static void mio_cs_config(struct pcmcia_device *link)
return;
}
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret) {
- printk("pcmcia_request_irq() returned error: %i\n", ret);
- }
+ if (!link->irq)
+ dev_info(&link->dev, "no IRQ available\n");
ret = pcmcia_request_configuration(link, &link->conf);
-
- link->dev_node = &dev_node;
}
static int mio_cs_attach(struct comedi_device *dev, struct comedi_devconfig *it)
@@ -369,7 +358,7 @@ static int mio_cs_attach(struct comedi_device *dev, struct comedi_devconfig *it)
dev->driver = &driver_ni_mio_cs;
dev->iobase = link->io.BasePort1;
- irq = link->irq.AssignedIRQ;
+ irq = link->irq;
printk("comedi%d: %s: DAQCard: io 0x%04lx, irq %u, ",
dev->minor, dev->driver->driver_name, dev->iobase, irq);
@@ -439,8 +428,6 @@ static int ni_getboardtype(struct comedi_device *dev,
#ifdef MODULE
-MODULE_LICENSE("GPL");
-
static struct pcmcia_device_id ni_mio_cs_ids[] = {
PCMCIA_DEVICE_MANF_CARD(0x010b, 0x010d), /* DAQCard-ai-16xe-50 */
PCMCIA_DEVICE_MANF_CARD(0x010b, 0x010c), /* DAQCard-ai-16e-4 */
@@ -451,6 +438,9 @@ static struct pcmcia_device_id ni_mio_cs_ids[] = {
};
MODULE_DEVICE_TABLE(pcmcia, ni_mio_cs_ids);
+MODULE_AUTHOR("David A. Schleef <ds@schleef.org>");
+MODULE_DESCRIPTION("Comedi driver for National Instruments DAQCard E series");
+MODULE_LICENSE("GPL");
struct pcmcia_driver ni_mio_cs_driver = {
.probe = &cs_attach,
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index 9d337516409..b126638d33b 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -83,8 +83,6 @@ comedi_nonfree_firmware tarball available from http://www.comedi.org
#define DPRINTK(format, args...)
#endif
-#define PCI_VENDOR_ID_NATINST 0x1093
-
#define PCI_DIO_SIZE 4096
#define PCI_MITE_SIZE 4096
@@ -379,18 +377,17 @@ static const struct nidio_board nidio_boards[] = {
#define this_board ((const struct nidio_board *)dev->board_ptr)
static DEFINE_PCI_DEVICE_TABLE(ni_pcidio_pci_table) = {
- {
- PCI_VENDOR_ID_NATINST, 0x1150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x1320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x12b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x0160, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x1630, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x13c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x0400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x1250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x17d0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x1800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x1150)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x1320)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x12b0)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x0160)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x1630)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x13c0)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x0400)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x1250)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x17d0)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x1800)},
+ {0}
};
MODULE_DEVICE_TABLE(pci, ni_pcidio_pci_table);
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 24c8b8ed5b4..577fda84190 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -130,60 +130,59 @@ Bugs:
/* The following two tables must be in the same order */
static DEFINE_PCI_DEVICE_TABLE(ni_pci_table) = {
- {
- PCI_VENDOR_ID_NATINST, 0x0162, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x1170, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x1180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x1190, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x11b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x11c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x11d0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x1270, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x1330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x1340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x1350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x14e0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x14f0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x1580, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x15b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x1880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x1870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x18b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x18c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x2410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x2420, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x2430, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x2890, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x28c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x2a60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x2a70, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x2a80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x2ab0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x2b80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x2b90, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x2c80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x2ca0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x70aa, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x70ab, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x70ac, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x70af, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x70b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x70b4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x70b6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x70b7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x70b8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x70bc, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x70bd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x70bf, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x70c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x70f2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x710d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x716c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x716d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x717f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x71bc, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_NATINST, 0x717d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x0162)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x1170)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x1180)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x1190)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x11b0)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x11c0)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x11d0)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x1270)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x1330)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x1340)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x1350)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x14e0)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x14f0)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x1580)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x15b0)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x1880)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x1870)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x18b0)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x18c0)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x2410)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x2420)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x2430)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x2890)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x28c0)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x2a60)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x2a70)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x2a80)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x2ab0)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x2b80)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x2b90)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x2c80)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x2ca0)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x70aa)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x70ab)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x70ac)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x70af)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x70b0)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x70b4)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x70b6)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x70b7)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x70b8)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x70bc)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x70bd)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x70bf)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x70c0)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x70f2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x710d)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x716c)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x716d)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x717f)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x71bc)},
+ {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x717d)},
+ {0}
};
MODULE_DEVICE_TABLE(pci, ni_pci_table);
diff --git a/drivers/staging/comedi/drivers/plx9080.h b/drivers/staging/comedi/drivers/plx9080.h
index 53bcdb7b5c5..485d63f9929 100644
--- a/drivers/staging/comedi/drivers/plx9080.h
+++ b/drivers/staging/comedi/drivers/plx9080.h
@@ -380,9 +380,9 @@ enum bigend_bits {
#define MBX_ADDR_SPACE_360 0x80 /* wanXL100s/200/400 */
#define MBX_ADDR_MASK_360 (MBX_ADDR_SPACE_360-1)
-static inline int plx9080_abort_dma(void *iobase, unsigned int channel)
+static inline int plx9080_abort_dma(void __iomem *iobase, unsigned int channel)
{
- void *dma_cs_addr;
+ void __iomem *dma_cs_addr;
uint8_t dma_status;
const int timeout = 10000;
unsigned int i;
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
index 3325f24448b..a91db6c4202 100644
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
@@ -48,6 +48,7 @@ Devices: [Quatech] DAQP-208 (daqp), DAQP-308
*/
#include "../comedidev.h"
+#include <linux/semaphore.h>
#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
@@ -55,19 +56,20 @@ Devices: [Quatech] DAQP-208 (daqp), DAQP-308
#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
+#include <linux/completion.h>
+
/* Maximum number of separate DAQP devices we'll allow */
#define MAX_DEV 4
struct local_info_t {
struct pcmcia_device *link;
- dev_node_t node;
int stop;
int table_index;
char board_name[32];
enum { semaphore, buffer } interrupt_mode;
- struct semaphore eos;
+ struct completion eos;
struct comedi_device *dev;
struct comedi_subdevice *s;
@@ -238,14 +240,13 @@ static int daqp_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
/* Interrupt handler
*
* Operates in one of two modes. If local->interrupt_mode is
- * 'semaphore', just signal the local->eos semaphore and return
+ * 'semaphore', just signal the local->eos completion and return
* (one-shot mode). Otherwise (continuous mode), read data in from
* the card, transfer it to the buffer provided by the higher-level
* comedi kernel module, and signal various comedi callback routines,
* which run pretty quick.
*/
-
-static void daqp_interrupt(int irq, void *dev_id)
+static enum irqreturn daqp_interrupt(int irq, void *dev_id)
{
struct local_info_t *local = (struct local_info_t *)dev_id;
struct comedi_device *dev;
@@ -256,39 +257,39 @@ static void daqp_interrupt(int irq, void *dev_id)
if (local == NULL) {
printk(KERN_WARNING
"daqp_interrupt(): irq %d for unknown device.\n", irq);
- return;
+ return IRQ_NONE;
}
dev = local->dev;
if (dev == NULL) {
printk(KERN_WARNING "daqp_interrupt(): NULL comedi_device.\n");
- return;
+ return IRQ_NONE;
}
if (!dev->attached) {
printk(KERN_WARNING
"daqp_interrupt(): struct comedi_device not yet attached.\n");
- return;
+ return IRQ_NONE;
}
s = local->s;
if (s == NULL) {
printk(KERN_WARNING
"daqp_interrupt(): NULL comedi_subdevice.\n");
- return;
+ return IRQ_NONE;
}
if ((struct local_info_t *)s->private != local) {
printk(KERN_WARNING
"daqp_interrupt(): invalid comedi_subdevice.\n");
- return;
+ return IRQ_NONE;
}
switch (local->interrupt_mode) {
case semaphore:
- up(&local->eos);
+ complete(&local->eos);
break;
case buffer:
@@ -340,6 +341,7 @@ static void daqp_interrupt(int irq, void *dev_id)
comedi_event(dev, s);
}
+ return IRQ_HANDLED;
}
/* One-shot analog data acquisition routine */
@@ -401,8 +403,7 @@ static int daqp_ai_insn_read(struct comedi_device *dev,
return -1;
}
- /* Make sure semaphore is blocked */
- sema_init(&local->eos, 0);
+ init_completion(&local->eos);
local->interrupt_mode = semaphore;
local->dev = dev;
local->s = s;
@@ -413,9 +414,9 @@ static int daqp_ai_insn_read(struct comedi_device *dev,
outb(DAQP_COMMAND_ARM | DAQP_COMMAND_FIFO_DATA,
dev->iobase + DAQP_COMMAND);
- /* Wait for interrupt service routine to unblock semaphore */
+ /* Wait for interrupt service routine to unblock completion */
/* Maybe could use a timeout here, but it's interruptible */
- if (down_interruptible(&local->eos))
+ if (wait_for_completion_interruptible(&local->eos))
return -EINTR;
data[i] = inb(dev->iobase + DAQP_FIFO);
@@ -580,7 +581,7 @@ static int daqp_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct local_info_t *local = (struct local_info_t *)s->private;
struct comedi_cmd *cmd = &s->async->cmd;
- int counter = 100;
+ int counter;
int scanlist_start_on_every_entry;
int threshold;
@@ -613,14 +614,14 @@ static int daqp_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
*/
if (cmd->convert_src == TRIG_TIMER) {
- int counter = daqp_ns_to_timer(&cmd->convert_arg,
+ counter = daqp_ns_to_timer(&cmd->convert_arg,
cmd->flags & TRIG_ROUND_MASK);
outb(counter & 0xff, dev->iobase + DAQP_PACER_LOW);
outb((counter >> 8) & 0xff, dev->iobase + DAQP_PACER_MID);
outb((counter >> 16) & 0xff, dev->iobase + DAQP_PACER_HIGH);
scanlist_start_on_every_entry = 1;
} else {
- int counter = daqp_ns_to_timer(&cmd->scan_begin_arg,
+ counter = daqp_ns_to_timer(&cmd->scan_begin_arg,
cmd->flags & TRIG_ROUND_MASK);
outb(counter & 0xff, dev->iobase + DAQP_PACER_LOW);
outb((counter >> 8) & 0xff, dev->iobase + DAQP_PACER_MID);
@@ -755,7 +756,7 @@ static int daqp_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
/* Reset any pending interrupts (my card has a tendancy to require
* require multiple reads on the status register to achieve this)
*/
-
+ counter = 100;
while (--counter
&& (inb(dev->iobase + DAQP_STATUS) & DAQP_STATUS_EVENTS)) ;
if (!counter) {
@@ -1040,10 +1041,6 @@ static int daqp_cs_attach(struct pcmcia_device *link)
local->link = link;
link->priv = local;
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = daqp_interrupt;
-
/*
General socket configuration defaults can go here. In this
client, we assume very little, and rely on the CIS for almost
@@ -1074,10 +1071,8 @@ static void daqp_cs_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "daqp_cs_detach\n");
- if (link->dev_node) {
- dev->stop = 1;
- daqp_cs_release(link);
- }
+ dev->stop = 1;
+ daqp_cs_release(link);
/* Unlink device structure, and free it */
dev_table[dev->table_index] = NULL;
@@ -1105,8 +1100,7 @@ static int daqp_pcmcia_config_loop(struct pcmcia_device *p_dev,
return -ENODEV;
/* Do we need to allocate an interrupt? */
- if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
+ p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -1133,7 +1127,6 @@ static int daqp_pcmcia_config_loop(struct pcmcia_device *p_dev,
static void daqp_cs_config(struct pcmcia_device *link)
{
- struct local_info_t *dev = link->priv;
int ret;
dev_dbg(&link->dev, "daqp_cs_config\n");
@@ -1144,16 +1137,9 @@ static void daqp_cs_config(struct pcmcia_device *link)
goto failed;
}
- /*
- Allocate an interrupt line. Note that this does not assign a
- handler to the interrupt, unless the 'Handler' member of the
- irq structure is initialized.
- */
- if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
- goto failed;
- }
+ ret = pcmcia_request_irq(link, daqp_interrupt);
+ if (ret)
+ goto failed;
/*
This actually configures the PCMCIA socket -- setting up
@@ -1164,23 +1150,10 @@ static void daqp_cs_config(struct pcmcia_device *link)
if (ret)
goto failed;
- /*
- At this point, the dev_node_t structure(s) need to be
- initialized and arranged in a linked list at link->dev.
- */
- /* Comedi's PCMCIA script uses this device name (extracted
- * from /var/lib/pcmcia/stab) to pass to comedi_config
- */
- /* sprintf(dev->node.dev_name, "daqp%d", dev->table_index); */
- sprintf(dev->node.dev_name, "quatech_daqp_cs");
- dev->node.major = dev->node.minor = 0;
- link->dev_node = &dev->node;
-
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x",
- dev->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %u", link->irq.AssignedIRQ);
+ printk(", irq %u", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1 + link->io.NumPorts1 - 1);
@@ -1243,8 +1216,11 @@ static struct pcmcia_device_id daqp_cs_id_table[] = {
};
MODULE_DEVICE_TABLE(pcmcia, daqp_cs_id_table);
+MODULE_AUTHOR("Brent Baccala <baccala@freesoft.org>");
+MODULE_DESCRIPTION("Comedi driver for Quatech DAQP PCMCIA data capture cards");
+MODULE_LICENSE("GPL");
-struct pcmcia_driver daqp_cs_driver = {
+static struct pcmcia_driver daqp_cs_driver = {
.probe = daqp_cs_attach,
.remove = daqp_cs_detach,
.suspend = daqp_cs_suspend,
diff --git a/drivers/staging/comedi/drivers/skel.c b/drivers/staging/comedi/drivers/skel.c
index aba57d93dd3..490753b3d90 100644
--- a/drivers/staging/comedi/drivers/skel.c
+++ b/drivers/staging/comedi/drivers/skel.c
@@ -131,7 +131,8 @@ MODULE_DEVICE_TABLE(pci, skel_pci_table);
/* this structure is for data unique to this hardware driver. If
several hardware drivers keep similar information in this structure,
- feel free to suggest moving the variable to the struct comedi_device struct. */
+ feel free to suggest moving the variable to the struct comedi_device struct.
+ */
struct skel_private {
int data;
@@ -211,7 +212,7 @@ static int skel_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
struct comedi_subdevice *s;
- printk("comedi%d: skel: ", dev->minor);
+ pr_info("comedi%d: skel: ", dev->minor);
/*
* If you can probe the device to determine what device in a series
@@ -282,7 +283,7 @@ static int skel_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s->type = COMEDI_SUBD_UNUSED;
}
- printk("attached\n");
+ pr_info("attached\n");
return 0;
}
@@ -297,7 +298,7 @@ static int skel_attach(struct comedi_device *dev, struct comedi_devconfig *it)
*/
static int skel_detach(struct comedi_device *dev)
{
- printk("comedi%d: skel: remove\n", dev->minor);
+ pr_info("comedi%d: skel: remove\n", dev->minor);
return 0;
}
@@ -336,7 +337,7 @@ static int skel_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
if (i == TIMEOUT) {
/* printk() should be used instead of printk()
* whenever the code can be called from real-time. */
- printk("timeout\n");
+ pr_info("timeout\n");
return -ETIMEDOUT;
}
@@ -397,7 +398,8 @@ static int skel_ai_cmdtest(struct comedi_device *dev,
if (err)
return 1;
- /* step 2: make sure trigger sources are unique and mutually compatible */
+ /* step 2: make sure trigger sources are unique and mutually compatible
+ */
/* note that mutual compatibility is not an issue here */
if (cmd->scan_begin_src != TRIG_TIMER &&
@@ -529,7 +531,7 @@ static int skel_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s,
int i;
int chan = CR_CHAN(insn->chanspec);
- printk("skel_ao_winsn\n");
+ pr_info("skel_ao_winsn\n");
/* Writing a list of values to an AO channel is probably not
* very useful, but that's how the interface is defined. */
for (i = 0; i < insn->n; i++) {
@@ -623,6 +625,7 @@ static int skel_dio_insn_config(struct comedi_device *dev,
* as necessary.
*/
COMEDI_INITCLEANUP(driver_skel);
-/* If you are writing a PCI driver you should use COMEDI_PCI_INITCLEANUP instead.
-*/
+/* If you are writing a PCI driver you should use COMEDI_PCI_INITCLEANUP
+ * instead.
+ */
/* COMEDI_PCI_INITCLEANUP(driver_skel, skel_pci_table) */
diff --git a/drivers/staging/comedi/drivers/ssv_dnp.c b/drivers/staging/comedi/drivers/ssv_dnp.c
index 17c92a57b0d..18b0a83c4bb 100644
--- a/drivers/staging/comedi/drivers/ssv_dnp.c
+++ b/drivers/staging/comedi/drivers/ssv_dnp.c
@@ -41,14 +41,14 @@ Status: unknown
/* 0..3 remain unchanged! For details about Port C Mode Register see */
/* the remarks in dnp_insn_config() below. */
-#define CSCIR 0x22 /* Chip Setup and Control Index Register */
-#define CSCDR 0x23 /* Chip Setup and Control Data Register */
-#define PAMR 0xa5 /* Port A Mode Register */
-#define PADR 0xa9 /* Port A Data Register */
-#define PBMR 0xa4 /* Port B Mode Register */
-#define PBDR 0xa8 /* Port B Data Register */
-#define PCMR 0xa3 /* Port C Mode Register */
-#define PCDR 0xa7 /* Port C Data Register */
+#define CSCIR 0x22 /* Chip Setup and Control Index Register */
+#define CSCDR 0x23 /* Chip Setup and Control Data Register */
+#define PAMR 0xa5 /* Port A Mode Register */
+#define PADR 0xa9 /* Port A Data Register */
+#define PBMR 0xa4 /* Port B Mode Register */
+#define PBDR 0xa8 /* Port B Data Register */
+#define PCMR 0xa3 /* Port C Mode Register */
+#define PCDR 0xa7 /* Port C Data Register */
/* This data structure holds information about the supported boards -------- */
@@ -59,8 +59,9 @@ struct dnp_board {
int have_dio;
};
-static const struct dnp_board dnp_boards[] = { /* we only support one DNP 'board' */
- { /* variant at the moment */
+/* We only support one DNP 'board' variant at the moment */
+static const struct dnp_board dnp_boards[] = {
+{
.name = "dnp-1486",
.ai_chans = 16,
.ai_bits = 12,
@@ -80,9 +81,9 @@ struct dnp_private_data {
#define devpriv ((dnp_private *)dev->private)
/* ------------------------------------------------------------------------- */
-/* The struct comedi_driver structure tells the Comedi core module which functions */
-/* to call to configure/deconfigure (attach/detach) the board, and also */
-/* about the kernel module that contains the device code. */
+/* The struct comedi_driver structure tells the Comedi core module which */
+/* functions to call to configure/deconfigure (attach/detach) the board, and */
+/* also about the kernel module that contains the device code. */
/* */
/* In the following section we define the API of this driver. */
/* ------------------------------------------------------------------------- */
@@ -97,7 +98,7 @@ static struct comedi_driver driver_dnp = {
.detach = dnp_detach,
.board_name = &dnp_boards[0].name,
/* only necessary for non-PnP devs */
- .offset = sizeof(struct dnp_board), /* like ISA-PnP, PCI or PCMCIA. */
+ .offset = sizeof(struct dnp_board), /* like ISA-PnP, PCI or PCMCIA */
.num_names = ARRAY_SIZE(dnp_boards),
};
@@ -122,28 +123,30 @@ static int dnp_attach(struct comedi_device *dev, struct comedi_devconfig *it)
struct comedi_subdevice *s;
- printk("comedi%d: dnp: ", dev->minor);
+ printk(KERN_INFO "comedi%d: dnp: ", dev->minor);
- /* Autoprobing: this should find out which board we have. Currently only */
- /* the 1486 board is supported and autoprobing is not implemented :-) */
+ /* Autoprobing: this should find out which board we have. Currently */
+ /* only the 1486 board is supported and autoprobing is not */
+ /* implemented :-) */
/* dev->board_ptr = dnp_probe(dev); */
- /* Initialize the name of the board. We can use the "thisboard" macro now. */
+ /* Initialize the name of the board. */
+ /* We can use the "thisboard" macro now. */
dev->board_name = thisboard->name;
- /* Allocate the private structure area. alloc_private() is a convenient */
- /* macro defined in comedidev.h. */
+ /* Allocate the private structure area. alloc_private() is a */
+ /* convenient macro defined in comedidev.h. */
if (alloc_private(dev, sizeof(struct dnp_private_data)) < 0)
return -ENOMEM;
- /* Allocate the subdevice structures. alloc_subdevice() is a convenient */
- /* macro defined in comedidev.h. */
+ /* Allocate the subdevice structures. alloc_subdevice() is a */
+ /* convenient macro defined in comedidev.h. */
if (alloc_subdevices(dev, 1) < 0)
return -ENOMEM;
s = dev->subdevices + 0;
- /* digital i/o subdevice */
+ /* digital i/o subdevice */
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = 20;
@@ -158,7 +161,7 @@ static int dnp_attach(struct comedi_device *dev, struct comedi_devconfig *it)
* allocated for the primary 8259, so we don't need to allocate them
* ourselves. */
- /* configure all ports as input (default) */
+ /* configure all ports as input (default) */
outb(PAMR, CSCIR);
outb(0x00, CSCDR);
outb(PBMR, CSCIR);
@@ -181,7 +184,7 @@ static int dnp_attach(struct comedi_device *dev, struct comedi_devconfig *it)
static int dnp_detach(struct comedi_device *dev)
{
- /* configure all ports as input (default) */
+ /* configure all ports as input (default) */
outb(PAMR, CSCIR);
outb(0x00, CSCDR);
outb(PBMR, CSCIR);
@@ -189,8 +192,8 @@ static int dnp_detach(struct comedi_device *dev)
outb(PCMR, CSCIR);
outb((inb(CSCDR) & 0xAA), CSCDR);
- /* announce that we are finished */
- printk("comedi%d: dnp: remove\n", dev->minor);
+ /* announce that we are finished */
+ printk(KERN_INFO "comedi%d: dnp: remove\n", dev->minor);
return 0;
@@ -210,12 +213,12 @@ static int dnp_dio_insn_bits(struct comedi_device *dev,
if (insn->n != 2)
return -EINVAL; /* insn uses data[0] and data[1] */
- /* The insn data is a mask in data[0] and the new data in data[1], each */
- /* channel cooresponding to a bit. */
+ /* The insn data is a mask in data[0] and the new data in data[1], */
+ /* each channel cooresponding to a bit. */
- /* Ports A and B are straight forward: each bit corresponds to an output */
- /* pin with the same order. Port C is different: bits 0...3 correspond to */
- /* bits 4...7 of the output register (PCDR). */
+ /* Ports A and B are straight forward: each bit corresponds to an */
+ /* output pin with the same order. Port C is different: bits 0...3 */
+ /* correspond to bits 4...7 of the output register (PCDR). */
if (data[0]) {
@@ -235,7 +238,7 @@ static int dnp_dio_insn_bits(struct comedi_device *dev,
| (u8) ((data[1] & 0x0F0000) >> 12), CSCDR);
}
- /* on return, data[1] contains the value of the digital input lines. */
+ /* on return, data[1] contains the value of the digital input lines. */
outb(PADR, CSCIR);
data[0] = inb(CSCDR);
outb(PBDR, CSCIR);
@@ -260,7 +263,8 @@ static int dnp_dio_insn_config(struct comedi_device *dev,
u8 register_buffer;
- int chan = CR_CHAN(insn->chanspec); /* reduces chanspec to lower 16 bits */
+ /* reduces chanspec to lower 16 bits */
+ int chan = CR_CHAN(insn->chanspec);
switch (data[0]) {
case INSN_CONFIG_DIO_OUTPUT:
@@ -275,11 +279,11 @@ static int dnp_dio_insn_config(struct comedi_device *dev,
return -EINVAL;
break;
}
- /* Test: which port does the channel belong to? */
+ /* Test: which port does the channel belong to? */
- /* We have to pay attention with port C: this is the meaning of PCMR: */
- /* Bit in PCMR: 7 6 5 4 3 2 1 0 */
- /* Corresponding port C pin: d 3 d 2 d 1 d 0 d= don't touch */
+ /* We have to pay attention with port C: this is the meaning of PCMR: */
+ /* Bit in PCMR: 7 6 5 4 3 2 1 0 */
+ /* Corresponding port C pin: d 3 d 2 d 1 d 0 d= don't touch */
if ((chan >= 0) && (chan <= 7)) {
/* this is port A */
@@ -289,8 +293,8 @@ static int dnp_dio_insn_config(struct comedi_device *dev,
chan -= 8;
outb(PBMR, CSCIR);
} else if ((chan >= 16) && (chan <= 19)) {
- /* this is port C; multiplication with 2 brings bits into correct */
- /* position for PCMR! */
+ /* this is port C; multiplication with 2 brings bits into */
+ /* correct position for PCMR! */
chan -= 16;
chan *= 2;
outb(PCMR, CSCIR);
@@ -298,7 +302,7 @@ static int dnp_dio_insn_config(struct comedi_device *dev,
return -EINVAL;
}
- /* read 'old' direction of the port and set bits (out=1, in=0) */
+ /* read 'old' direction of the port and set bits (out=1, in=0) */
register_buffer = inb(CSCDR);
if (data[0] == COMEDI_OUTPUT)
register_buffer |= (1 << chan);
diff --git a/drivers/staging/comedi/drivers/unioxx5.c b/drivers/staging/comedi/drivers/unioxx5.c
index be1d83df0de..16d4c9f6916 100644
--- a/drivers/staging/comedi/drivers/unioxx5.c
+++ b/drivers/staging/comedi/drivers/unioxx5.c
@@ -285,7 +285,7 @@ static int __unioxx5_subdev_init(struct comedi_subdevice *subdev,
return -EIO;
}
- usp = (struct unioxx5_subd_priv *)kzalloc(sizeof(*usp), GFP_KERNEL);
+ usp = kzalloc(sizeof(*usp), GFP_KERNEL);
if (usp == NULL) {
printk(KERN_ERR "comedi%d: erorr! --> out of memory!\n", minor);
diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
index 8942ae45708..86f035d0067 100644
--- a/drivers/staging/comedi/drivers/usbdux.c
+++ b/drivers/staging/comedi/drivers/usbdux.c
@@ -793,7 +793,7 @@ static int usbduxsub_stop(struct usbduxsub *usbduxsub)
}
static int usbduxsub_upload(struct usbduxsub *usbduxsub,
- uint8_t * local_transfer_buffer,
+ uint8_t *local_transfer_buffer,
unsigned int startAddr, unsigned int len)
{
int errcode;
@@ -825,7 +825,7 @@ static int usbduxsub_upload(struct usbduxsub *usbduxsub,
#define FIRMWARE_MAX_LEN 0x2000
static int firmwareUpload(struct usbduxsub *usbduxsub,
- const u8 * firmwareBinary, int sizeFirmware)
+ const u8 *firmwareBinary, int sizeFirmware)
{
int ret;
uint8_t *fwBuf;
@@ -835,18 +835,17 @@ static int firmwareUpload(struct usbduxsub *usbduxsub,
if (sizeFirmware > FIRMWARE_MAX_LEN) {
dev_err(&usbduxsub->interface->dev,
- "comedi_: usbdux firmware binary it too large for FX2.\n");
+ "usbdux firmware binary it too large for FX2.\n");
return -ENOMEM;
}
/* we generate a local buffer for the firmware */
- fwBuf = kzalloc(sizeFirmware, GFP_KERNEL);
+ fwBuf = kmemdup(firmwareBinary, sizeFirmware, GFP_KERNEL);
if (!fwBuf) {
dev_err(&usbduxsub->interface->dev,
"comedi_: mem alloc for firmware failed\n");
return -ENOMEM;
}
- memcpy(fwBuf, firmwareBinary, sizeFirmware);
ret = usbduxsub_stop(usbduxsub);
if (ret < 0) {
@@ -1264,8 +1263,8 @@ static int usbdux_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
(this_usbduxsub->ai_interval) * 2;
}
this_usbduxsub->ai_timer = cmd->scan_begin_arg / (125000 *
- (this_usbduxsub->
- ai_interval));
+ (this_usbduxsub->
+ ai_interval));
} else {
/* interval always 1ms */
this_usbduxsub->ai_interval = 1;
diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
index e89b8181253..29c3c016b93 100644
--- a/drivers/staging/comedi/drivers/usbduxfast.c
+++ b/drivers/staging/comedi/drivers/usbduxfast.c
@@ -177,8 +177,8 @@ struct usbduxfastsub_s {
int16_t *insnBuffer; /* input buffer for single insn */
int ifnum; /* interface number */
struct usb_interface *interface; /* interface structure */
- struct comedi_device *comedidev; /* comedi device for the interrupt
- context */
+ /* comedi device for the interrupt context */
+ struct comedi_device *comedidev;
short int ai_cmd_running; /* asynchronous command is running */
short int ai_continous; /* continous aquisition */
long int ai_sample_count; /* number of samples to acquire */
@@ -271,7 +271,8 @@ static int usbduxfast_ai_stop(struct usbduxfastsub_s *udfs, int do_unlink)
udfs->ai_cmd_running = 0;
if (do_unlink)
- ret = usbduxfastsub_unlink_InURBs(udfs); /* stop aquistion */
+ /* stop aquistion */
+ ret = usbduxfastsub_unlink_InURBs(udfs);
return ret;
}
@@ -451,13 +452,15 @@ static int usbduxfastsub_start(struct usbduxfastsub_s *udfs)
/* 7f92 to zero */
local_transfer_buffer[0] = 0;
- ret = usb_control_msg(udfs->usbdev, usb_sndctrlpipe(udfs->usbdev, 0), USBDUXFASTSUB_FIRMWARE, /* bRequest, "Firmware" */
- VENDOR_DIR_OUT, /* bmRequestType */
- USBDUXFASTSUB_CPUCS, /* Value */
- 0x0000, /* Index */
- local_transfer_buffer, /* address of the transfer buffer */
- 1, /* Length */
- EZTIMEOUT); /* Timeout */
+ /* bRequest, "Firmware" */
+ ret = usb_control_msg(udfs->usbdev, usb_sndctrlpipe(udfs->usbdev, 0), USBDUXFASTSUB_FIRMWARE,
+ VENDOR_DIR_OUT, /* bmRequestType */
+ USBDUXFASTSUB_CPUCS, /* Value */
+ 0x0000, /* Index */
+ /* address of the transfer buffer */
+ local_transfer_buffer,
+ 1, /* Length */
+ EZTIMEOUT); /* Timeout */
if (ret < 0) {
printk("comedi_: usbduxfast_: control msg failed (start)\n");
return ret;
@@ -473,7 +476,8 @@ static int usbduxfastsub_stop(struct usbduxfastsub_s *udfs)
/* 7f92 to one */
local_transfer_buffer[0] = 1;
- ret = usb_control_msg(udfs->usbdev, usb_sndctrlpipe(udfs->usbdev, 0), USBDUXFASTSUB_FIRMWARE, /* bRequest, "Firmware" */
+ /* bRequest, "Firmware" */
+ ret = usb_control_msg(udfs->usbdev, usb_sndctrlpipe(udfs->usbdev, 0), USBDUXFASTSUB_FIRMWARE,
VENDOR_DIR_OUT, /* bmRequestType */
USBDUXFASTSUB_CPUCS, /* Value */
0x0000, /* Index */
@@ -499,13 +503,15 @@ static int usbduxfastsub_upload(struct usbduxfastsub_s *udfs,
printk(KERN_DEBUG " to addr %d, first byte=%d.\n",
startAddr, local_transfer_buffer[0]);
#endif
- ret = usb_control_msg(udfs->usbdev, usb_sndctrlpipe(udfs->usbdev, 0), USBDUXFASTSUB_FIRMWARE, /* brequest, firmware */
- VENDOR_DIR_OUT, /* bmRequestType */
- startAddr, /* value */
- 0x0000, /* index */
- local_transfer_buffer, /* our local safe buffer */
- len, /* length */
- EZTIMEOUT); /* timeout */
+ /* brequest, firmware */
+ ret = usb_control_msg(udfs->usbdev, usb_sndctrlpipe(udfs->usbdev, 0), USBDUXFASTSUB_FIRMWARE,
+ VENDOR_DIR_OUT, /* bmRequestType */
+ startAddr, /* value */
+ 0x0000, /* index */
+ /* our local safe buffer */
+ local_transfer_buffer,
+ len, /* length */
+ EZTIMEOUT); /* timeout */
#ifdef CONFIG_COMEDI_DEBUG
printk(KERN_DEBUG "comedi_: usbduxfast: result=%d\n", ret);
@@ -519,7 +525,7 @@ static int usbduxfastsub_upload(struct usbduxfastsub_s *udfs,
return 0;
}
-int usbduxfastsub_submit_InURBs(struct usbduxfastsub_s *udfs)
+static int usbduxfastsub_submit_InURBs(struct usbduxfastsub_s *udfs)
{
int ret;
@@ -1347,7 +1353,7 @@ static int usbduxfast_ai_insn_read(struct comedi_device *dev,
#define FIRMWARE_MAX_LEN 0x2000
static int firmwareUpload(struct usbduxfastsub_s *usbduxfastsub,
- const u8 * firmwareBinary, int sizeFirmware)
+ const u8 *firmwareBinary, int sizeFirmware)
{
int ret;
uint8_t *fwBuf;
@@ -1362,13 +1368,12 @@ static int firmwareUpload(struct usbduxfastsub_s *usbduxfastsub,
}
/* we generate a local buffer for the firmware */
- fwBuf = kzalloc(sizeFirmware, GFP_KERNEL);
+ fwBuf = kmemdup(firmwareBinary, sizeFirmware, GFP_KERNEL);
if (!fwBuf) {
dev_err(&usbduxfastsub->interface->dev,
"comedi_: mem alloc for firmware failed\n");
return -ENOMEM;
}
- memcpy(fwBuf, firmwareBinary, sizeFirmware);
ret = usbduxfastsub_stop(usbduxfastsub);
if (ret < 0) {
diff --git a/drivers/staging/comedi/internal.h b/drivers/staging/comedi/internal.h
new file mode 100644
index 00000000000..434ce343336
--- /dev/null
+++ b/drivers/staging/comedi/internal.h
@@ -0,0 +1,12 @@
+/*
+ * various internal comedi functions
+ */
+int do_rangeinfo_ioctl(struct comedi_device *dev,
+ struct comedi_rangeinfo __user *arg);
+int insn_inval(struct comedi_device *dev, struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data);
+int comedi_alloc_board_minor(struct device *hardware_device);
+void comedi_free_board_minor(unsigned minor);
+void comedi_reset_async_buf(struct comedi_async *async);
+int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
+ unsigned long new_size);
diff --git a/drivers/staging/comedi/kcomedilib/Makefile b/drivers/staging/comedi/kcomedilib/Makefile
index ffcc9ad32ad..18ee99bdde0 100644
--- a/drivers/staging/comedi/kcomedilib/Makefile
+++ b/drivers/staging/comedi/kcomedilib/Makefile
@@ -1,8 +1,3 @@
-obj-$(CONFIG_COMEDI) += kcomedilib.o
+obj-$(CONFIG_COMEDI_KCOMEDILIB) += kcomedilib.o
-kcomedilib-objs := \
- data.o \
- ksyms.o \
- dio.o \
- kcomedilib_main.o \
- get.o
+kcomedilib-objs := kcomedilib_main.o
diff --git a/drivers/staging/comedi/kcomedilib/data.c b/drivers/staging/comedi/kcomedilib/data.c
deleted file mode 100644
index aefc41ab7c4..00000000000
--- a/drivers/staging/comedi/kcomedilib/data.c
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- kcomedilib/data.c
- implements comedi_data_*() functions
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*/
-
-#include "../comedi.h"
-#include "../comedilib.h"
-#include "../comedidev.h"
-
-#include <linux/string.h>
-#include <linux/delay.h>
-
-int comedi_data_write(void *dev, unsigned int subdev, unsigned int chan,
- unsigned int range, unsigned int aref, unsigned int data)
-{
- struct comedi_insn insn;
-
- memset(&insn, 0, sizeof(insn));
- insn.insn = INSN_WRITE;
- insn.n = 1;
- insn.data = &data;
- insn.subdev = subdev;
- insn.chanspec = CR_PACK(chan, range, aref);
-
- return comedi_do_insn(dev, &insn);
-}
-
-int comedi_data_read(void *dev, unsigned int subdev, unsigned int chan,
- unsigned int range, unsigned int aref, unsigned int *data)
-{
- struct comedi_insn insn;
-
- memset(&insn, 0, sizeof(insn));
- insn.insn = INSN_READ;
- insn.n = 1;
- insn.data = data;
- insn.subdev = subdev;
- insn.chanspec = CR_PACK(chan, range, aref);
-
- return comedi_do_insn(dev, &insn);
-}
-
-int comedi_data_read_hint(void *dev, unsigned int subdev,
- unsigned int chan, unsigned int range,
- unsigned int aref)
-{
- struct comedi_insn insn;
- unsigned int dummy_data;
-
- memset(&insn, 0, sizeof(insn));
- insn.insn = INSN_READ;
- insn.n = 0;
- insn.data = &dummy_data;
- insn.subdev = subdev;
- insn.chanspec = CR_PACK(chan, range, aref);
-
- return comedi_do_insn(dev, &insn);
-}
-
-int comedi_data_read_delayed(void *dev, unsigned int subdev,
- unsigned int chan, unsigned int range,
- unsigned int aref, unsigned int *data,
- unsigned int nano_sec)
-{
- int retval;
-
- retval = comedi_data_read_hint(dev, subdev, chan, range, aref);
- if (retval < 0)
- return retval;
-
- udelay((nano_sec + 999) / 1000);
-
- return comedi_data_read(dev, subdev, chan, range, aref, data);
-}
diff --git a/drivers/staging/comedi/kcomedilib/dio.c b/drivers/staging/comedi/kcomedilib/dio.c
deleted file mode 100644
index 30192f3c465..00000000000
--- a/drivers/staging/comedi/kcomedilib/dio.c
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- kcomedilib/dio.c
- implements comedi_dio_*() functions
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*/
-
-#include "../comedi.h"
-#include "../comedilib.h"
-
-#include <linux/string.h>
-
-int comedi_dio_config(void *dev, unsigned int subdev, unsigned int chan,
- unsigned int io)
-{
- struct comedi_insn insn;
-
- memset(&insn, 0, sizeof(insn));
- insn.insn = INSN_CONFIG;
- insn.n = 1;
- insn.data = &io;
- insn.subdev = subdev;
- insn.chanspec = CR_PACK(chan, 0, 0);
-
- return comedi_do_insn(dev, &insn);
-}
-
-int comedi_dio_read(void *dev, unsigned int subdev, unsigned int chan,
- unsigned int *val)
-{
- struct comedi_insn insn;
-
- memset(&insn, 0, sizeof(insn));
- insn.insn = INSN_READ;
- insn.n = 1;
- insn.data = val;
- insn.subdev = subdev;
- insn.chanspec = CR_PACK(chan, 0, 0);
-
- return comedi_do_insn(dev, &insn);
-}
-
-int comedi_dio_write(void *dev, unsigned int subdev, unsigned int chan,
- unsigned int val)
-{
- struct comedi_insn insn;
-
- memset(&insn, 0, sizeof(insn));
- insn.insn = INSN_WRITE;
- insn.n = 1;
- insn.data = &val;
- insn.subdev = subdev;
- insn.chanspec = CR_PACK(chan, 0, 0);
-
- return comedi_do_insn(dev, &insn);
-}
-
-int comedi_dio_bitfield(void *dev, unsigned int subdev, unsigned int mask,
- unsigned int *bits)
-{
- struct comedi_insn insn;
- unsigned int data[2];
- int ret;
-
- memset(&insn, 0, sizeof(insn));
- insn.insn = INSN_BITS;
- insn.n = 2;
- insn.data = data;
- insn.subdev = subdev;
-
- data[0] = mask;
- data[1] = *bits;
-
- ret = comedi_do_insn(dev, &insn);
-
- *bits = data[1];
-
- return ret;
-}
diff --git a/drivers/staging/comedi/kcomedilib/get.c b/drivers/staging/comedi/kcomedilib/get.c
deleted file mode 100644
index 6d841871525..00000000000
--- a/drivers/staging/comedi/kcomedilib/get.c
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- kcomedilib/get.c
- a comedlib interface for kernel modules
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*/
-
-#define __NO_VERSION__
-#include "../comedi.h"
-#include "../comedilib.h"
-#include "../comedidev.h"
-
-int comedi_get_n_subdevices(void *d)
-{
- struct comedi_device *dev = (struct comedi_device *)d;
-
- return dev->n_subdevices;
-}
-
-int comedi_get_version_code(void *d)
-{
- return COMEDI_VERSION_CODE;
-}
-
-const char *comedi_get_driver_name(void *d)
-{
- struct comedi_device *dev = (struct comedi_device *)d;
-
- return dev->driver->driver_name;
-}
-
-const char *comedi_get_board_name(void *d)
-{
- struct comedi_device *dev = (struct comedi_device *)d;
-
- return dev->board_name;
-}
-
-int comedi_get_subdevice_type(void *d, unsigned int subdevice)
-{
- struct comedi_device *dev = (struct comedi_device *)d;
- struct comedi_subdevice *s = dev->subdevices + subdevice;
-
- return s->type;
-}
-
-unsigned int comedi_get_subdevice_flags(void *d, unsigned int subdevice)
-{
- struct comedi_device *dev = (struct comedi_device *)d;
- struct comedi_subdevice *s = dev->subdevices + subdevice;
-
- return s->subdev_flags;
-}
-
-int comedi_find_subdevice_by_type(void *d, int type, unsigned int subd)
-{
- struct comedi_device *dev = (struct comedi_device *)d;
-
- if (subd > dev->n_subdevices)
- return -ENODEV;
-
- for (; subd < dev->n_subdevices; subd++) {
- if (dev->subdevices[subd].type == type)
- return subd;
- }
- return -1;
-}
-
-int comedi_get_n_channels(void *d, unsigned int subdevice)
-{
- struct comedi_device *dev = (struct comedi_device *)d;
- struct comedi_subdevice *s = dev->subdevices + subdevice;
-
- return s->n_chan;
-}
-
-int comedi_get_len_chanlist(void *d, unsigned int subdevice)
-{
- struct comedi_device *dev = (struct comedi_device *)d;
- struct comedi_subdevice *s = dev->subdevices + subdevice;
-
- return s->len_chanlist;
-}
-
-unsigned int comedi_get_maxdata(void *d, unsigned int subdevice,
- unsigned int chan)
-{
- struct comedi_device *dev = (struct comedi_device *)d;
- struct comedi_subdevice *s = dev->subdevices + subdevice;
-
- if (s->maxdata_list)
- return s->maxdata_list[chan];
-
- return s->maxdata;
-}
-
-#ifdef KCOMEDILIB_DEPRECATED
-int comedi_get_rangetype(void *d, unsigned int subdevice, unsigned int chan)
-{
- struct comedi_device *dev = (struct comedi_device *)d;
- struct comedi_subdevice *s = dev->subdevices + subdevice;
- int ret;
-
- if (s->range_table_list) {
- ret = s->range_table_list[chan]->length;
- } else {
- ret = s->range_table->length;
- }
-
- ret = ret | (dev->minor << 28) | (subdevice << 24) | (chan << 16);
-
- return ret;
-}
-#endif
-
-int comedi_get_n_ranges(void *d, unsigned int subdevice, unsigned int chan)
-{
- struct comedi_device *dev = (struct comedi_device *)d;
- struct comedi_subdevice *s = dev->subdevices + subdevice;
- int ret;
-
- if (s->range_table_list) {
- ret = s->range_table_list[chan]->length;
- } else {
- ret = s->range_table->length;
- }
-
- return ret;
-}
-
-/*
- * ALPHA (non-portable)
-*/
-int comedi_get_krange(void *d, unsigned int subdevice, unsigned int chan,
- unsigned int range, struct comedi_krange *krange)
-{
- struct comedi_device *dev = (struct comedi_device *)d;
- struct comedi_subdevice *s = dev->subdevices + subdevice;
- const struct comedi_lrange *lr;
-
- if (s->range_table_list) {
- lr = s->range_table_list[chan];
- } else {
- lr = s->range_table;
- }
- if (range >= lr->length)
- return -EINVAL;
-
- memcpy(krange, lr->range + range, sizeof(struct comedi_krange));
-
- return 0;
-}
-
-/*
- * ALPHA (may be renamed)
-*/
-unsigned int comedi_get_buf_head_pos(void *d, unsigned int subdevice)
-{
- struct comedi_device *dev = (struct comedi_device *)d;
- struct comedi_subdevice *s = dev->subdevices + subdevice;
- struct comedi_async *async;
-
- async = s->async;
- if (async == NULL)
- return 0;
-
- return async->buf_write_count;
-}
-
-int comedi_get_buffer_contents(void *d, unsigned int subdevice)
-{
- struct comedi_device *dev = (struct comedi_device *)d;
- struct comedi_subdevice *s = dev->subdevices + subdevice;
- struct comedi_async *async;
- unsigned int num_bytes;
-
- if (subdevice >= dev->n_subdevices)
- return -1;
- async = s->async;
- if (async == NULL)
- return 0;
- num_bytes = comedi_buf_read_n_available(s->async);
- return num_bytes;
-}
-
-/*
- * ALPHA
-*/
-int comedi_set_user_int_count(void *d, unsigned int subdevice,
- unsigned int buf_user_count)
-{
- struct comedi_device *dev = (struct comedi_device *)d;
- struct comedi_subdevice *s = dev->subdevices + subdevice;
- struct comedi_async *async;
- int num_bytes;
-
- async = s->async;
- if (async == NULL)
- return -1;
-
- num_bytes = buf_user_count - async->buf_read_count;
- if (num_bytes < 0)
- return -1;
- comedi_buf_read_alloc(async, num_bytes);
- comedi_buf_read_free(async, num_bytes);
-
- return 0;
-}
-
-int comedi_mark_buffer_read(void *d, unsigned int subdevice,
- unsigned int num_bytes)
-{
- struct comedi_device *dev = (struct comedi_device *)d;
- struct comedi_subdevice *s = dev->subdevices + subdevice;
- struct comedi_async *async;
-
- if (subdevice >= dev->n_subdevices)
- return -1;
- async = s->async;
- if (async == NULL)
- return -1;
-
- comedi_buf_read_alloc(async, num_bytes);
- comedi_buf_read_free(async, num_bytes);
-
- return 0;
-}
-
-int comedi_mark_buffer_written(void *d, unsigned int subdevice,
- unsigned int num_bytes)
-{
- struct comedi_device *dev = (struct comedi_device *)d;
- struct comedi_subdevice *s = dev->subdevices + subdevice;
- struct comedi_async *async;
- int bytes_written;
-
- if (subdevice >= dev->n_subdevices)
- return -1;
- async = s->async;
- if (async == NULL)
- return -1;
- bytes_written = comedi_buf_write_alloc(async, num_bytes);
- comedi_buf_write_free(async, bytes_written);
- if (bytes_written != num_bytes)
- return -1;
- return 0;
-}
-
-int comedi_get_buffer_size(void *d, unsigned int subdev)
-{
- struct comedi_device *dev = (struct comedi_device *)d;
- struct comedi_subdevice *s = dev->subdevices + subdev;
- struct comedi_async *async;
-
- if (subdev >= dev->n_subdevices)
- return -1;
- async = s->async;
- if (async == NULL)
- return 0;
-
- return async->prealloc_bufsz;
-}
-
-int comedi_get_buffer_offset(void *d, unsigned int subdevice)
-{
- struct comedi_device *dev = (struct comedi_device *)d;
- struct comedi_subdevice *s = dev->subdevices + subdevice;
- struct comedi_async *async;
-
- if (subdevice >= dev->n_subdevices)
- return -1;
- async = s->async;
- if (async == NULL)
- return 0;
-
- return async->buf_read_ptr;
-}
diff --git a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
index 288fef4fcbc..863aae40ede 100644
--- a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
+++ b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
@@ -31,7 +31,7 @@
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/mm.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include "../comedi.h"
#include "../comedilib.h"
@@ -41,7 +41,7 @@ MODULE_AUTHOR("David Schleef <ds@schleef.org>");
MODULE_DESCRIPTION("Comedi kernel library");
MODULE_LICENSE("GPL");
-void *comedi_open(const char *filename)
+struct comedi_device *comedi_open(const char *filename)
{
struct comedi_device_file_info *dev_file_info;
struct comedi_device *dev;
@@ -66,29 +66,11 @@ void *comedi_open(const char *filename)
if (!try_module_get(dev->driver->module))
return NULL;
- return (void *)dev;
+ return dev;
}
+EXPORT_SYMBOL(comedi_open);
-void *comedi_open_old(unsigned int minor)
-{
- struct comedi_device_file_info *dev_file_info;
- struct comedi_device *dev;
-
- if (minor >= COMEDI_NUM_MINORS)
- return NULL;
-
- dev_file_info = comedi_get_device_file_info(minor);
- if (dev_file_info == NULL)
- return NULL;
- dev = dev_file_info->device;
-
- if (dev == NULL || !dev->attached)
- return NULL;
-
- return (void *)dev;
-}
-
-int comedi_close(void *d)
+int comedi_close(struct comedi_device *d)
{
struct comedi_device *dev = (struct comedi_device *)d;
@@ -96,465 +78,118 @@ int comedi_close(void *d)
return 0;
}
+EXPORT_SYMBOL(comedi_close);
-int comedi_loglevel(int newlevel)
-{
- return 0;
-}
-
-void comedi_perror(const char *message)
-{
- printk("%s: unknown error\n", message);
-}
-
-char *comedi_strerror(int err)
-{
- return "unknown error";
-}
-
-int comedi_fileno(void *d)
+static int comedi_do_insn(struct comedi_device *dev, struct comedi_insn *insn)
{
- struct comedi_device *dev = (struct comedi_device *)d;
-
- /* return something random */
- return dev->minor;
-}
-
-int comedi_command(void *d, struct comedi_cmd *cmd)
-{
- struct comedi_device *dev = (struct comedi_device *)d;
- struct comedi_subdevice *s;
- struct comedi_async *async;
- unsigned runflags;
-
- if (cmd->subdev >= dev->n_subdevices)
- return -ENODEV;
-
- s = dev->subdevices + cmd->subdev;
- if (s->type == COMEDI_SUBD_UNUSED)
- return -EIO;
-
- async = s->async;
- if (async == NULL)
- return -ENODEV;
-
- if (s->busy)
- return -EBUSY;
- s->busy = d;
-
- if (async->cb_mask & COMEDI_CB_EOS)
- cmd->flags |= TRIG_WAKE_EOS;
-
- async->cmd = *cmd;
-
- runflags = SRF_RUNNING;
-
- comedi_set_subdevice_runflags(s, ~0, runflags);
-
- comedi_reset_async_buf(async);
-
- return s->do_cmd(dev, s);
-}
-
-int comedi_command_test(void *d, struct comedi_cmd *cmd)
-{
- struct comedi_device *dev = (struct comedi_device *)d;
- struct comedi_subdevice *s;
-
- if (cmd->subdev >= dev->n_subdevices)
- return -ENODEV;
-
- s = dev->subdevices + cmd->subdev;
- if (s->type == COMEDI_SUBD_UNUSED)
- return -EIO;
-
- if (s->async == NULL)
- return -ENODEV;
-
- return s->do_cmdtest(dev, s, cmd);
-}
-
-/*
- * COMEDI_INSN
- * perform an instruction
- */
-int comedi_do_insn(void *d, struct comedi_insn *insn)
-{
- struct comedi_device *dev = (struct comedi_device *)d;
struct comedi_subdevice *s;
int ret = 0;
- if (insn->insn & INSN_MASK_SPECIAL) {
- switch (insn->insn) {
- case INSN_GTOD:
- {
- struct timeval tv;
-
- do_gettimeofday(&tv);
- insn->data[0] = tv.tv_sec;
- insn->data[1] = tv.tv_usec;
- ret = 2;
-
- break;
- }
- case INSN_WAIT:
- /* XXX isn't the value supposed to be nanosecs? */
- if (insn->n != 1 || insn->data[0] >= 100) {
- ret = -EINVAL;
- break;
- }
- udelay(insn->data[0]);
- ret = 1;
- break;
- case INSN_INTTRIG:
- if (insn->n != 1) {
- ret = -EINVAL;
- break;
- }
- if (insn->subdev >= dev->n_subdevices) {
- printk("%d not usable subdevice\n",
- insn->subdev);
- ret = -EINVAL;
- break;
- }
- s = dev->subdevices + insn->subdev;
- if (!s->async) {
- printk("no async\n");
- ret = -EINVAL;
- break;
- }
- if (!s->async->inttrig) {
- printk("no inttrig\n");
- ret = -EAGAIN;
- break;
- }
- ret = s->async->inttrig(dev, s, insn->data[0]);
- if (ret >= 0)
- ret = 1;
- break;
- default:
- ret = -EINVAL;
- }
- } else {
- /* a subdevice instruction */
- if (insn->subdev >= dev->n_subdevices) {
- ret = -EINVAL;
- goto error;
- }
- s = dev->subdevices + insn->subdev;
-
- if (s->type == COMEDI_SUBD_UNUSED) {
- printk("%d not useable subdevice\n", insn->subdev);
- ret = -EIO;
- goto error;
- }
-
- /* XXX check lock */
-
- ret = check_chanlist(s, 1, &insn->chanspec);
- if (ret < 0) {
- printk("bad chanspec\n");
- ret = -EINVAL;
- goto error;
- }
-
- if (s->busy) {
- ret = -EBUSY;
- goto error;
- }
- s->busy = d;
-
- switch (insn->insn) {
- case INSN_READ:
- ret = s->insn_read(dev, s, insn, insn->data);
- break;
- case INSN_WRITE:
- ret = s->insn_write(dev, s, insn, insn->data);
- break;
- case INSN_BITS:
- ret = s->insn_bits(dev, s, insn, insn->data);
- break;
- case INSN_CONFIG:
- /* XXX should check instruction length */
- ret = s->insn_config(dev, s, insn, insn->data);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- s->busy = NULL;
- }
- if (ret < 0)
- goto error;
-#if 0
- /* XXX do we want this? -- abbotti #if'ed it out for now. */
- if (ret != insn->n) {
- printk("BUG: result of insn != insn.n\n");
+ /* a subdevice instruction */
+ if (insn->subdev >= dev->n_subdevices) {
ret = -EINVAL;
goto error;
}
-#endif
-error:
-
- return ret;
-}
-
-/*
- COMEDI_LOCK
- lock subdevice
-
- arg:
- subdevice number
-
- reads:
- none
-
- writes:
- none
-
- necessary locking:
- - ioctl/rt lock (this type)
- - lock while subdevice busy
- - lock while subdevice being programmed
+ s = dev->subdevices + insn->subdev;
-*/
-int comedi_lock(void *d, unsigned int subdevice)
-{
- struct comedi_device *dev = (struct comedi_device *)d;
- struct comedi_subdevice *s;
- unsigned long flags;
- int ret = 0;
-
- if (subdevice >= dev->n_subdevices)
- return -EINVAL;
-
- s = dev->subdevices + subdevice;
-
- spin_lock_irqsave(&s->spin_lock, flags);
-
- if (s->busy) {
- ret = -EBUSY;
- } else {
- if (s->lock) {
- ret = -EBUSY;
- } else {
- s->lock = d;
- }
+ if (s->type == COMEDI_SUBD_UNUSED) {
+ printk("%d not useable subdevice\n", insn->subdev);
+ ret = -EIO;
+ goto error;
}
- spin_unlock_irqrestore(&s->spin_lock, flags);
-
- return ret;
-}
+ /* XXX check lock */
-/*
- COMEDI_UNLOCK
- unlock subdevice
-
- arg:
- subdevice number
-
- reads:
- none
-
- writes:
- none
-
-*/
-int comedi_unlock(void *d, unsigned int subdevice)
-{
- struct comedi_device *dev = (struct comedi_device *)d;
- struct comedi_subdevice *s;
- unsigned long flags;
- struct comedi_async *async;
- int ret;
-
- if (subdevice >= dev->n_subdevices)
- return -EINVAL;
-
- s = dev->subdevices + subdevice;
-
- async = s->async;
-
- spin_lock_irqsave(&s->spin_lock, flags);
+ ret = comedi_check_chanlist(s, 1, &insn->chanspec);
+ if (ret < 0) {
+ printk("bad chanspec\n");
+ ret = -EINVAL;
+ goto error;
+ }
if (s->busy) {
ret = -EBUSY;
- } else if (s->lock && s->lock != (void *)d) {
- ret = -EACCES;
- } else {
- s->lock = NULL;
-
- if (async) {
- async->cb_mask = 0;
- async->cb_func = NULL;
- async->cb_arg = NULL;
- }
-
- ret = 0;
+ goto error;
+ }
+ s->busy = dev;
+
+ switch (insn->insn) {
+ case INSN_BITS:
+ ret = s->insn_bits(dev, s, insn, insn->data);
+ break;
+ case INSN_CONFIG:
+ /* XXX should check instruction length */
+ ret = s->insn_config(dev, s, insn, insn->data);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
- spin_unlock_irqrestore(&s->spin_lock, flags);
-
- return ret;
-}
-
-/*
- COMEDI_CANCEL
- cancel acquisition ioctl
-
- arg:
- subdevice number
-
- reads:
- nothing
-
- writes:
- nothing
-
-*/
-int comedi_cancel(void *d, unsigned int subdevice)
-{
- struct comedi_device *dev = (struct comedi_device *)d;
- struct comedi_subdevice *s;
- int ret = 0;
-
- if (subdevice >= dev->n_subdevices)
- return -EINVAL;
-
- s = dev->subdevices + subdevice;
-
- if (s->lock && s->lock != d)
- return -EACCES;
-
-#if 0
- if (!s->busy)
- return 0;
-
- if (s->busy != d)
- return -EBUSY;
-#endif
-
- if (!s->cancel || !s->async)
- return -EINVAL;
-
- ret = s->cancel(dev, s);
-
- if (ret)
- return ret;
-
- comedi_set_subdevice_runflags(s, SRF_RUNNING | SRF_RT, 0);
- s->async->inttrig = NULL;
s->busy = NULL;
+error:
- return 0;
+ return ret;
}
-/*
- registration of callback functions
- */
-int comedi_register_callback(void *d, unsigned int subdevice,
- unsigned int mask, int (*cb) (unsigned int,
- void *), void *arg)
+int comedi_dio_config(struct comedi_device *dev, unsigned int subdev,
+ unsigned int chan, unsigned int io)
{
- struct comedi_device *dev = (struct comedi_device *)d;
- struct comedi_subdevice *s;
- struct comedi_async *async;
-
- if (subdevice >= dev->n_subdevices)
- return -EINVAL;
-
- s = dev->subdevices + subdevice;
-
- async = s->async;
- if (s->type == COMEDI_SUBD_UNUSED || !async)
- return -EIO;
+ struct comedi_insn insn;
- /* are we locked? (ioctl lock) */
- if (s->lock && s->lock != d)
- return -EACCES;
+ memset(&insn, 0, sizeof(insn));
+ insn.insn = INSN_CONFIG;
+ insn.n = 1;
+ insn.data = &io;
+ insn.subdev = subdev;
+ insn.chanspec = CR_PACK(chan, 0, 0);
- /* are we busy? */
- if (s->busy)
- return -EBUSY;
-
- if (!mask) {
- async->cb_mask = 0;
- async->cb_func = NULL;
- async->cb_arg = NULL;
- } else {
- async->cb_mask = mask;
- async->cb_func = cb;
- async->cb_arg = arg;
- }
-
- return 0;
+ return comedi_do_insn(dev, &insn);
}
+EXPORT_SYMBOL(comedi_dio_config);
-int comedi_poll(void *d, unsigned int subdevice)
+int comedi_dio_bitfield(struct comedi_device *dev, unsigned int subdev,
+ unsigned int mask, unsigned int *bits)
{
- struct comedi_device *dev = (struct comedi_device *)d;
- struct comedi_subdevice *s = dev->subdevices;
- struct comedi_async *async;
-
- if (subdevice >= dev->n_subdevices)
- return -EINVAL;
+ struct comedi_insn insn;
+ unsigned int data[2];
+ int ret;
- s = dev->subdevices + subdevice;
+ memset(&insn, 0, sizeof(insn));
+ insn.insn = INSN_BITS;
+ insn.n = 2;
+ insn.data = data;
+ insn.subdev = subdev;
- async = s->async;
- if (s->type == COMEDI_SUBD_UNUSED || !async)
- return -EIO;
+ data[0] = mask;
+ data[1] = *bits;
- /* are we locked? (ioctl lock) */
- if (s->lock && s->lock != d)
- return -EACCES;
+ ret = comedi_do_insn(dev, &insn);
- /* are we running? XXX wrong? */
- if (!s->busy)
- return -EIO;
+ *bits = data[1];
- return s->poll(dev, s);
+ return ret;
}
+EXPORT_SYMBOL(comedi_dio_bitfield);
-/* WARNING: not portable */
-int comedi_map(void *d, unsigned int subdevice, void *ptr)
+int comedi_find_subdevice_by_type(struct comedi_device *dev, int type,
+ unsigned int subd)
{
- struct comedi_device *dev = (struct comedi_device *)d;
- struct comedi_subdevice *s;
-
- if (subdevice >= dev->n_subdevices)
- return -EINVAL;
-
- s = dev->subdevices + subdevice;
-
- if (!s->async)
- return -EINVAL;
-
- if (ptr)
- *((void **)ptr) = s->async->prealloc_buf;
-
- /* XXX no reference counting */
+ if (subd > dev->n_subdevices)
+ return -ENODEV;
- return 0;
+ for (; subd < dev->n_subdevices; subd++) {
+ if (dev->subdevices[subd].type == type)
+ return subd;
+ }
+ return -1;
}
+EXPORT_SYMBOL(comedi_find_subdevice_by_type);
-/* WARNING: not portable */
-int comedi_unmap(void *d, unsigned int subdevice)
+int comedi_get_n_channels(struct comedi_device *dev, unsigned int subdevice)
{
- struct comedi_device *dev = (struct comedi_device *)d;
- struct comedi_subdevice *s;
+ struct comedi_subdevice *s = dev->subdevices + subdevice;
- if (subdevice >= dev->n_subdevices)
- return -EINVAL;
-
- s = dev->subdevices + subdevice;
-
- if (!s->async)
- return -EINVAL;
-
- /* XXX no reference counting */
-
- return 0;
+ return s->n_chan;
}
+EXPORT_SYMBOL(comedi_get_n_channels);
diff --git a/drivers/staging/comedi/kcomedilib/ksyms.c b/drivers/staging/comedi/kcomedilib/ksyms.c
deleted file mode 100644
index 8bf4471ce6c..00000000000
--- a/drivers/staging/comedi/kcomedilib/ksyms.c
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- comedi/kcomedilib/ksyms.c
- a comedlib interface for kernel modules
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*/
-
-#include "../comedi.h"
-#include "../comedilib.h"
-#include "../comedidev.h"
-
-#include <linux/module.h>
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/fcntl.h>
-#include <linux/delay.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-
-/* functions specific to kcomedilib */
-
-EXPORT_SYMBOL(comedi_register_callback);
-EXPORT_SYMBOL(comedi_get_krange);
-EXPORT_SYMBOL(comedi_get_buf_head_pos);
-EXPORT_SYMBOL(comedi_set_user_int_count);
-EXPORT_SYMBOL(comedi_map);
-EXPORT_SYMBOL(comedi_unmap);
-
-/* This list comes from user-space comedilib, to show which
- * functions are not ported yet. */
-
-EXPORT_SYMBOL(comedi_open);
-EXPORT_SYMBOL(comedi_close);
-
-/* logging */
-EXPORT_SYMBOL(comedi_loglevel);
-EXPORT_SYMBOL(comedi_perror);
-EXPORT_SYMBOL(comedi_strerror);
-/* EXPORT_SYMBOL(comedi_errno); */
-EXPORT_SYMBOL(comedi_fileno);
-
-/* device queries */
-EXPORT_SYMBOL(comedi_get_n_subdevices);
-EXPORT_SYMBOL(comedi_get_version_code);
-EXPORT_SYMBOL(comedi_get_driver_name);
-EXPORT_SYMBOL(comedi_get_board_name);
-
-/* subdevice queries */
-EXPORT_SYMBOL(comedi_get_subdevice_type);
-EXPORT_SYMBOL(comedi_find_subdevice_by_type);
-EXPORT_SYMBOL(comedi_get_subdevice_flags);
-EXPORT_SYMBOL(comedi_get_n_channels);
-/*
-* EXPORT_SYMBOL(comedi_range_is_chan_specific);
-* EXPORT_SYMBOL(comedi_maxdata_is_chan_specific);
-*/
-
-/* channel queries */
-EXPORT_SYMBOL(comedi_get_maxdata);
-#ifdef KCOMEDILIB_DEPRECATED
-EXPORT_SYMBOL(comedi_get_rangetype);
-#endif
-EXPORT_SYMBOL(comedi_get_n_ranges);
-/* EXPORT_SYMBOL(comedi_find_range); */
-
-/* buffer queries */
-EXPORT_SYMBOL(comedi_get_buffer_size);
-/*
-* EXPORT_SYMBOL(comedi_get_max_buffer_size);
-* EXPORT_SYMBOL(comedi_set_buffer_size);
-*/
-EXPORT_SYMBOL(comedi_get_buffer_contents);
-EXPORT_SYMBOL(comedi_get_buffer_offset);
-
-/* low-level stuff */
-/*
-* EXPORT_SYMBOL(comedi_trigger); EXPORT_SYMBOL(comedi_do_insnlist);
-*/
-EXPORT_SYMBOL(comedi_do_insn);
-EXPORT_SYMBOL(comedi_lock);
-EXPORT_SYMBOL(comedi_unlock);
-
-/* physical units */
-/*
-* EXPORT_SYMBOL(comedi_to_phys); EXPORT_SYMBOL(comedi_from_phys);
-*/
-
-/* synchronous stuff */
-EXPORT_SYMBOL(comedi_data_read);
-EXPORT_SYMBOL(comedi_data_read_hint);
-EXPORT_SYMBOL(comedi_data_read_delayed);
-EXPORT_SYMBOL(comedi_data_write);
-EXPORT_SYMBOL(comedi_dio_config);
-EXPORT_SYMBOL(comedi_dio_read);
-EXPORT_SYMBOL(comedi_dio_write);
-EXPORT_SYMBOL(comedi_dio_bitfield);
-
-/* slowly varying stuff */
-/*
-* EXPORT_SYMBOL(comedi_sv_init); EXPORT_SYMBOL(comedi_sv_update);
-* EXPORT_SYMBOL(comedi_sv_measure);
-*/
-
-/* commands */
-/*
-* EXPORT_SYMBOL(comedi_get_cmd_src_mask);
-* EXPORT_SYMBOL(comedi_get_cmd_generic_timed);
-*/
-EXPORT_SYMBOL(comedi_cancel);
-EXPORT_SYMBOL(comedi_command);
-EXPORT_SYMBOL(comedi_command_test);
-EXPORT_SYMBOL(comedi_poll);
-
-/* buffer configuration */
-EXPORT_SYMBOL(comedi_mark_buffer_read);
-EXPORT_SYMBOL(comedi_mark_buffer_written);
-
-/* EXPORT_SYMBOL(comedi_get_range); */
-EXPORT_SYMBOL(comedi_get_len_chanlist);
-
-/* deprecated */
-/*
-* EXPORT_SYMBOL(comedi_get_timer);
-* EXPORT_SYMBOL(comedi_timed_1chan);
-*/
-
-/* alpha */
-/* EXPORT_SYMBOL(comedi_set_global_oor_behavior); */
diff --git a/drivers/staging/comedi/pci_ids.h b/drivers/staging/comedi/pci_ids.h
deleted file mode 100644
index d979aa8e396..00000000000
--- a/drivers/staging/comedi/pci_ids.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/***************************************************************************
- * *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License as published by *
- * the Free Software Foundation; either version 2 of the License, or *
- * (at your option) any later version. *
- * *
- ***************************************************************************/
-
-#ifndef __COMPAT_LINUX_PCI_IDS_H
-#define __COMPAT_LINUX_PCI_IDS_H
-
-#include <linux/pci_ids.h>
-
-#ifndef PCI_VENDOR_ID_AMCC
-#define PCI_VENDOR_ID_AMCC 0x10e8
-#endif
-
-#ifndef PCI_VENDOR_ID_CBOARDS
-#define PCI_VENDOR_ID_CBOARDS 0x1307
-#endif
-
-#ifndef PCI_VENDOR_ID_QUANCOM
-#define PCI_VENDOR_ID_QUANCOM 0x8008
-#endif
-
-#ifndef PCI_DEVICE_ID_QUANCOM_GPIB
-#define PCI_DEVICE_ID_QUANCOM_GPIB 0x3302
-#endif
-
-#endif /* __COMPAT_LINUX_PCI_IDS_H */
diff --git a/drivers/staging/comedi/proc.c b/drivers/staging/comedi/proc.c
index 5a22fe62c40..2aa487b6018 100644
--- a/drivers/staging/comedi/proc.c
+++ b/drivers/staging/comedi/proc.c
@@ -30,16 +30,13 @@
#define __NO_VERSION__
#include "comedidev.h"
+#include "comedi_fops.h"
#include <linux/proc_fs.h>
-/* #include <linux/string.h> */
+#include <linux/string.h>
-int comedi_read_procmem(char *buf, char **start, off_t offset, int len,
- int *eof, void *data);
-
-extern struct comedi_driver *comedi_drivers;
-
-int comedi_read_procmem(char *buf, char **start, off_t offset, int len,
- int *eof, void *data)
+#ifdef CONFIG_PROC_FS
+static int comedi_read(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
{
int i;
int devices_q = 0;
@@ -49,7 +46,8 @@ int comedi_read_procmem(char *buf, char **start, off_t offset, int len,
l += sprintf(buf + l,
"comedi version " COMEDI_RELEASE "\n"
"format string: %s\n",
- "\"%2d: %-20s %-20s %4d\",i,driver_name,board_name,n_subdevices");
+ "\"%2d: %-20s %-20s %4d\", i, "
+ "driver_name, board_name, n_subdevices");
for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) {
struct comedi_device_file_info *dev_file_info =
@@ -85,18 +83,17 @@ int comedi_read_procmem(char *buf, char **start, off_t offset, int len,
return l;
}
-#ifdef CONFIG_PROC_FS
void comedi_proc_init(void)
{
struct proc_dir_entry *comedi_proc;
- comedi_proc = create_proc_entry("comedi", S_IFREG | S_IRUGO, 0);
+ comedi_proc = create_proc_entry("comedi", S_IFREG | S_IRUGO, NULL);
if (comedi_proc)
- comedi_proc->read_proc = comedi_read_procmem;
+ comedi_proc->read_proc = comedi_read;
}
void comedi_proc_cleanup(void)
{
- remove_proc_entry("comedi", 0);
+ remove_proc_entry("comedi", NULL);
}
#endif
diff --git a/drivers/staging/comedi/range.c b/drivers/staging/comedi/range.c
index 8313dfcb673..148ec6fd6fd 100644
--- a/drivers/staging/comedi/range.c
+++ b/drivers/staging/comedi/range.c
@@ -21,15 +21,22 @@
*/
+#include <linux/uaccess.h>
#include "comedidev.h"
-#include <asm/uaccess.h>
+#include "internal.h"
const struct comedi_lrange range_bipolar10 = { 1, {BIP_RANGE(10)} };
+EXPORT_SYMBOL(range_bipolar10);
const struct comedi_lrange range_bipolar5 = { 1, {BIP_RANGE(5)} };
+EXPORT_SYMBOL(range_bipolar5);
const struct comedi_lrange range_bipolar2_5 = { 1, {BIP_RANGE(2.5)} };
+EXPORT_SYMBOL(range_bipolar2_5);
const struct comedi_lrange range_unipolar10 = { 1, {UNI_RANGE(10)} };
+EXPORT_SYMBOL(range_unipolar10);
const struct comedi_lrange range_unipolar5 = { 1, {UNI_RANGE(5)} };
-const struct comedi_lrange range_unknown = { 1, {{0, 1000000, UNIT_none}} };
+EXPORT_SYMBOL(range_unipolar5);
+const struct comedi_lrange range_unknown = { 1, {{0, 1000000, UNIT_none} } };
+EXPORT_SYMBOL(range_unknown);
/*
COMEDI_RANGEINFO
@@ -44,7 +51,8 @@ const struct comedi_lrange range_unknown = { 1, {{0, 1000000, UNIT_none}} };
writes:
n struct comedi_krange structures to rangeinfo->range_ptr
*/
-int do_rangeinfo_ioctl(struct comedi_device *dev, struct comedi_rangeinfo *arg)
+int do_rangeinfo_ioctl(struct comedi_device *dev,
+ struct comedi_rangeinfo __user *arg)
{
struct comedi_rangeinfo it;
int subd, chan;
@@ -120,7 +128,8 @@ static int aref_invalid(struct comedi_subdevice *s, unsigned int chanspec)
This function checks each element in a channel/gain list to make
make sure it is valid.
*/
-int check_chanlist(struct comedi_subdevice *s, int n, unsigned int *chanlist)
+int comedi_check_chanlist(struct comedi_subdevice *s, int n,
+ unsigned int *chanlist)
{
int i;
int chan;
@@ -130,14 +139,10 @@ int check_chanlist(struct comedi_subdevice *s, int n, unsigned int *chanlist)
if (CR_CHAN(chanlist[i]) >= s->n_chan ||
CR_RANGE(chanlist[i]) >= s->range_table->length
|| aref_invalid(s, chanlist[i])) {
- printk
- ("bad chanlist[%d]=0x%08x n_chan=%d range length=%d\n",
- i, chanlist[i], s->n_chan,
- s->range_table->length);
-#if 0
- for (i = 0; i < n; i++)
- printk("[%d]=0x%08x\n", i, chanlist[i]);
-#endif
+ printk(KERN_ERR "bad chanlist[%d]=0x%08x "
+ "in_chan=%d range length=%d\n", i,
+ chanlist[i], s->n_chan,
+ s->range_table->length);
return -EINVAL;
}
} else if (s->range_table_list) {
@@ -147,14 +152,15 @@ int check_chanlist(struct comedi_subdevice *s, int n, unsigned int *chanlist)
CR_RANGE(chanlist[i]) >=
s->range_table_list[chan]->length
|| aref_invalid(s, chanlist[i])) {
- printk("bad chanlist[%d]=0x%08x\n", i,
- chanlist[i]);
+ printk(KERN_ERR "bad chanlist[%d]=0x%08x\n",
+ i, chanlist[i]);
return -EINVAL;
}
}
} else {
- printk("comedi: (bug) no range type list!\n");
+ printk(KERN_ERR "comedi: (bug) no range type list!\n");
return -EINVAL;
}
return 0;
}
+EXPORT_SYMBOL(comedi_check_chanlist);
diff --git a/drivers/staging/comedi/wrapper.h b/drivers/staging/comedi/wrapper.h
deleted file mode 100644
index 77fc673900e..00000000000
--- a/drivers/staging/comedi/wrapper.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- linux/wrapper.h compatibility header
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef __COMPAT_LINUX_WRAPPER_H_
-#define __COMPAT_LINUX_WRAPPER_H_
-
-#define mem_map_reserve(p) set_bit(PG_reserved, &((p)->flags))
-#define mem_map_unreserve(p) clear_bit(PG_reserved, &((p)->flags))
-
-#endif /* __COMPAT_LINUX_WRAPPER_H_ */
diff --git a/drivers/staging/crystalhd/TODO b/drivers/staging/crystalhd/TODO
index 69be5d0cb80..daca2d4d2a2 100644
--- a/drivers/staging/crystalhd/TODO
+++ b/drivers/staging/crystalhd/TODO
@@ -1,7 +1,6 @@
- Testing
- Cleanup return codes
- Cleanup typedefs
-- Cleanup all WIN* references
- Allocate an Accelerator device class specific Major number,
since we don't have any other open sourced accelerators, it is the only
one in that category for now.
diff --git a/drivers/staging/crystalhd/bc_dts_defs.h b/drivers/staging/crystalhd/bc_dts_defs.h
index c34cc07127b..778e76af052 100644
--- a/drivers/staging/crystalhd/bc_dts_defs.h
+++ b/drivers/staging/crystalhd/bc_dts_defs.h
@@ -26,12 +26,10 @@
#ifndef _BC_DTS_DEFS_H_
#define _BC_DTS_DEFS_H_
-#include "bc_dts_types.h"
-
/* BIT Mask */
#define BC_BIT(_x) (1 << (_x))
-typedef enum _BC_STATUS {
+enum BC_STATUS {
BC_STS_SUCCESS = 0,
BC_STS_INV_ARG = 1,
BC_STS_BUSY = 2,
@@ -62,7 +60,7 @@ typedef enum _BC_STATUS {
/* Must be the last one.*/
BC_STS_ERROR = -1
-} BC_STATUS;
+};
/*------------------------------------------------------*
* Registry Key Definitions *
@@ -81,14 +79,14 @@ typedef enum _BC_STATUS {
*
*/
-typedef enum _BC_SW_OPTIONS {
+enum BC_SW_OPTIONS {
BC_OPT_DOSER_OUT_ENCRYPT = BC_BIT(3),
BC_OPT_LINK_OUT_ENCRYPT = BC_BIT(29),
-} BC_SW_OPTIONS;
+};
-typedef struct _BC_REG_CONFIG{
+struct BC_REG_CONFIG{
uint32_t DbgOptions;
-} BC_REG_CONFIG;
+};
#if defined(__KERNEL__) || defined(__LINUX_USER__)
#else
@@ -108,7 +106,7 @@ typedef struct _BC_REG_CONFIG{
*/
/* To allow multiple apps to open the device. */
-enum _DtsDeviceOpenMode {
+enum DtsDeviceOpenMode {
DTS_PLAYBACK_MODE = 0,
DTS_DIAG_MODE,
DTS_MONITOR_MODE,
@@ -116,7 +114,7 @@ enum _DtsDeviceOpenMode {
};
/* To enable the filter to selectively enable/disable fixes or erratas */
-enum _DtsDeviceFixMode {
+enum DtsDeviceFixMode {
DTS_LOAD_NEW_FW = BC_BIT(8),
DTS_LOAD_FILE_PLAY_FW = BC_BIT(9),
DTS_DISK_FMT_BD = BC_BIT(10),
@@ -133,7 +131,7 @@ enum _DtsDeviceFixMode {
#define DTS_DFLT_CLOCK(x) (x<<19)
/* F/W File Version corresponding to S/W Releases */
-enum _FW_FILE_VER {
+enum FW_FILE_VER {
/* S/W release: 02.04.02 F/W release 2.12.2.0 */
BC_FW_VER_020402 = ((12<<16) | (2<<8) | (0))
};
@@ -141,7 +139,7 @@ enum _FW_FILE_VER {
/*------------------------------------------------------*
* Stream Types for DtsOpenDecoder() *
*------------------------------------------------------*/
-enum _DtsOpenDecStreamTypes {
+enum DtsOpenDecStreamTypes {
BC_STREAM_TYPE_ES = 0,
BC_STREAM_TYPE_PES = 1,
BC_STREAM_TYPE_TS = 2,
@@ -151,7 +149,7 @@ enum _DtsOpenDecStreamTypes {
/*------------------------------------------------------*
* Video Algorithms for DtsSetVideoParams() *
*------------------------------------------------------*/
-enum _DtsSetVideoParamsAlgo {
+enum DtsSetVideoParamsAlgo {
BC_VID_ALGO_H264 = 0,
BC_VID_ALGO_MPEG2 = 1,
BC_VID_ALGO_VC1 = 4,
@@ -163,7 +161,7 @@ enum _DtsSetVideoParamsAlgo {
*------------------------------------------------------*/
#define BC_MPEG_VALID_PANSCAN (1)
-typedef struct _BC_PIB_EXT_MPEG {
+struct BC_PIB_EXT_MPEG {
uint32_t valid;
/* Always valid, defaults to picture size if no
* sequence display extension in the stream. */
@@ -175,8 +173,7 @@ typedef struct _BC_PIB_EXT_MPEG {
uint32_t offset_count;
int32_t horizontal_offset[3];
int32_t vertical_offset[3];
-
-} BC_PIB_EXT_MPEG;
+};
/*------------------------------------------------------*
* H.264 Extension to the PPB *
@@ -186,7 +183,7 @@ typedef struct _BC_PIB_EXT_MPEG {
#define H264_VALID_SPS_CROP (2)
#define H264_VALID_VUI (4)
-typedef struct _BC_PIB_EXT_H264 {
+struct BC_PIB_EXT_H264 {
/* 'valid' specifies which fields (or sets of
* fields) below are valid. If the corresponding
* bit in 'valid' is NOT set then that field(s)
@@ -209,15 +206,14 @@ typedef struct _BC_PIB_EXT_H264 {
/* H264_VALID_VUI */
uint32_t chroma_top;
uint32_t chroma_bottom;
-
-} BC_PIB_EXT_H264;
+};
/*------------------------------------------------------*
* VC1 Extension to the PPB *
*------------------------------------------------------*/
#define VC1_VALID_PANSCAN (1)
-typedef struct _BC_PIB_EXT_VC1 {
+struct BC_PIB_EXT_VC1 {
uint32_t valid;
/* Always valid, defaults to picture size if no
@@ -231,14 +227,12 @@ typedef struct _BC_PIB_EXT_VC1 {
int32_t ps_vert_offset[4];
int32_t ps_width[4];
int32_t ps_height[4];
-
-} BC_PIB_EXT_VC1;
-
+};
/*------------------------------------------------------*
* Picture Information Block *
*------------------------------------------------------*/
-#if defined(_WIN32) || defined(_WIN64) || defined(__LINUX_USER__)
+#if defined(__LINUX_USER__)
/* Values for 'pulldown' field. '0' means no pulldown information
* was present for this picture. */
enum {
@@ -358,7 +352,7 @@ enum {
#define VDEC_FLAG_PICTURE_META_DATA_PRESENT (0x40000)
-#endif /* _WIN32 || _WIN64 */
+#endif /* __LINUX_USER__ */
enum _BC_OUTPUT_FORMAT {
MODE420 = 0x0,
@@ -366,7 +360,7 @@ enum _BC_OUTPUT_FORMAT {
MODE422_UYVY = 0x2,
};
-typedef struct _BC_PIC_INFO_BLOCK {
+struct BC_PIC_INFO_BLOCK {
/* Common fields. */
uint64_t timeStamp; /* Timestamp */
uint32_t picture_number; /* Ordinal display number */
@@ -386,18 +380,18 @@ typedef struct _BC_PIC_INFO_BLOCK {
/* Protocol-specific extensions. */
union {
- BC_PIB_EXT_H264 h264;
- BC_PIB_EXT_MPEG mpeg;
- BC_PIB_EXT_VC1 vc1;
+ struct BC_PIB_EXT_H264 h264;
+ struct BC_PIB_EXT_MPEG mpeg;
+ struct BC_PIB_EXT_VC1 vc1;
} other;
-} BC_PIC_INFO_BLOCK, *PBC_PIC_INFO_BLOCK;
+};
/*------------------------------------------------------*
* ProcOut Info *
*------------------------------------------------------*/
/* Optional flags for ProcOut Interface.*/
-enum _POUT_OPTIONAL_IN_FLAGS_{
+enum POUT_OPTIONAL_IN_FLAGS_{
/* Flags from App to Device */
BC_POUT_FLAGS_YV12 = 0x01, /* Copy Data in YV12 format */
BC_POUT_FLAGS_STRIDE = 0x02, /* Stride size is valid. */
@@ -412,17 +406,13 @@ enum _POUT_OPTIONAL_IN_FLAGS_{
BC_POUT_FLAGS_FLD_BOT = 0x80000, /* Bottom Field data */
};
-#if defined(__KERNEL__) || defined(__LINUX_USER__)
-typedef BC_STATUS(*dts_pout_callback)(void *shnd, uint32_t width, uint32_t height, uint32_t stride, void *pOut);
-#else
-typedef BC_STATUS(*dts_pout_callback)(void *shnd, uint32_t width, uint32_t height, uint32_t stride, struct _BC_DTS_PROC_OUT *pOut);
-#endif
+typedef enum BC_STATUS(*dts_pout_callback)(void *shnd, uint32_t width, uint32_t height, uint32_t stride, void *pOut);
/* Line 21 Closed Caption */
/* User Data */
#define MAX_UD_SIZE 1792 /* 1920 - 128 */
-typedef struct _BC_DTS_PROC_OUT {
+struct BC_DTS_PROC_OUT {
uint8_t *Ybuff; /* Caller Supplied buffer for Y data */
uint32_t YbuffSz; /* Caller Supplied Y buffer size */
uint32_t YBuffDoneSz; /* Transferred Y datasize */
@@ -436,7 +426,7 @@ typedef struct _BC_DTS_PROC_OUT {
uint32_t discCnt; /* Picture discontinuity count */
- BC_PIC_INFO_BLOCK PicInfo; /* Picture Information Block Data */
+ struct BC_PIC_INFO_BLOCK PicInfo; /* Picture Information Block Data */
/* Line 21 Closed Caption */
/* User Data */
@@ -450,9 +440,9 @@ typedef struct _BC_DTS_PROC_OUT {
uint8_t bPibEnc; /* PIB encrypted */
uint8_t bRevertScramble;
-} BC_DTS_PROC_OUT;
+};
-typedef struct _BC_DTS_STATUS {
+struct BC_DTS_STATUS {
uint8_t ReadyListCount; /* Number of frames in ready list (reported by driver) */
uint8_t FreeListCount; /* Number of frame buffers free. (reported by driver) */
uint8_t PowerStateChange; /* Number of active state power transitions (reported by driver) */
@@ -479,7 +469,7 @@ typedef struct _BC_DTS_STATUS {
* back from the driver */
uint8_t reserved__[16];
-} BC_DTS_STATUS;
+};
#define BC_SWAP32(_v) \
((((_v) & 0xFF000000)>>24)| \
diff --git a/drivers/staging/crystalhd/bc_dts_glob_lnx.h b/drivers/staging/crystalhd/bc_dts_glob_lnx.h
index b3125e3e037..80b7a73a9d4 100644
--- a/drivers/staging/crystalhd/bc_dts_glob_lnx.h
+++ b/drivers/staging/crystalhd/bc_dts_glob_lnx.h
@@ -40,7 +40,7 @@
#include <sys/time.h>
#include <time.h>
#include <arpa/inet.h>
-#include <asm/param.h>
+#include <linux/param.h>
#include <linux/ioctl.h>
#include <sys/select.h>
@@ -58,7 +58,7 @@
* These are SW stack tunable parameters shared
* between the driver and the application.
*/
-enum _BC_DTS_GLOBALS {
+enum BC_DTS_GLOBALS {
BC_MAX_FW_CMD_BUFF_SZ = 0x40, /* FW passthrough cmd/rsp buffer size */
PCI_CFG_SIZE = 256, /* PCI config size buffer */
BC_IOCTL_DATA_POOL_SIZE = 8, /* BC_IOCTL_DATA Pool size */
@@ -70,62 +70,62 @@ enum _BC_DTS_GLOBALS {
BC_INFIFO_THRESHOLD = 0x10000,
};
-typedef struct _BC_CMD_REG_ACC {
+struct BC_CMD_REG_ACC {
uint32_t Offset;
uint32_t Value;
-} BC_CMD_REG_ACC;
+};
-typedef struct _BC_CMD_DEV_MEM {
+struct BC_CMD_DEV_MEM {
uint32_t StartOff;
uint32_t NumDwords;
uint32_t Rsrd;
-} BC_CMD_DEV_MEM;
+};
/* FW Passthrough command structure */
-enum _bc_fw_cmd_flags {
+enum bc_fw_cmd_flags {
BC_FW_CMD_FLAGS_NONE = 0,
BC_FW_CMD_PIB_QS = 0x01,
};
-typedef struct _BC_FW_CMD {
+struct BC_FW_CMD {
uint32_t cmd[BC_MAX_FW_CMD_BUFF_SZ];
uint32_t rsp[BC_MAX_FW_CMD_BUFF_SZ];
uint32_t flags;
uint32_t add_data;
-} BC_FW_CMD, *PBC_FW_CMD;
+};
-typedef struct _BC_HW_TYPE {
+struct BC_HW_TYPE {
uint16_t PciDevId;
uint16_t PciVenId;
uint8_t HwRev;
uint8_t Align[3];
-} BC_HW_TYPE;
+};
-typedef struct _BC_PCI_CFG {
+struct BC_PCI_CFG {
uint32_t Size;
uint32_t Offset;
uint8_t pci_cfg_space[PCI_CFG_SIZE];
-} BC_PCI_CFG;
+};
-typedef struct _BC_VERSION_INFO_ {
+struct BC_VERSION_INFO {
uint8_t DriverMajor;
uint8_t DriverMinor;
uint16_t DriverRevision;
-} BC_VERSION_INFO;
+};
-typedef struct _BC_START_RX_CAP_ {
+struct BC_START_RX_CAP {
uint32_t Rsrd;
uint32_t StartDeliveryThsh;
uint32_t PauseThsh;
uint32_t ResumeThsh;
-} BC_START_RX_CAP;
+};
-typedef struct _BC_FLUSH_RX_CAP_ {
+struct BC_FLUSH_RX_CAP {
uint32_t Rsrd;
uint32_t bDiscardOnly;
-} BC_FLUSH_RX_CAP;
+};
-typedef struct _BC_DTS_STATS {
+struct BC_DTS_STATS {
uint8_t drvRLL;
uint8_t drvFLL;
uint8_t eosDetected;
@@ -154,18 +154,18 @@ typedef struct _BC_DTS_STATS {
uint32_t DrvRepeatedFrms;
uint32_t res1[13];
-} BC_DTS_STATS;
+};
-typedef struct _BC_PROC_INPUT_ {
+struct BC_PROC_INPUT {
uint8_t *pDmaBuff;
uint32_t BuffSz;
uint8_t Mapped;
uint8_t Encrypted;
uint8_t Rsrd[2];
uint32_t DramOffset; /* For debug use only */
-} BC_PROC_INPUT, *PBC_PROC_INPUT;
+};
-typedef struct _BC_DEC_YUV_BUFFS {
+struct BC_DEC_YUV_BUFFS {
uint32_t b422Mode;
uint8_t *YuvBuff;
uint32_t YuvBuffSz;
@@ -173,9 +173,9 @@ typedef struct _BC_DEC_YUV_BUFFS {
uint32_t YBuffDoneSz;
uint32_t UVBuffDoneSz;
uint32_t RefCnt;
-} BC_DEC_YUV_BUFFS;
+};
-enum _DECOUT_COMPLETION_FLAGS{
+enum DECOUT_COMPLETION_FLAGS{
COMP_FLAG_NO_INFO = 0x00,
COMP_FLAG_FMT_CHANGE = 0x01,
COMP_FLAG_PIB_VALID = 0x02,
@@ -184,47 +184,47 @@ enum _DECOUT_COMPLETION_FLAGS{
COMP_FLAG_DATA_BOT = 0x10,
};
-typedef struct _BC_DEC_OUT_BUFF{
- BC_DEC_YUV_BUFFS OutPutBuffs;
- BC_PIC_INFO_BLOCK PibInfo;
+struct BC_DEC_OUT_BUFF{
+ struct BC_DEC_YUV_BUFFS OutPutBuffs;
+ struct BC_PIC_INFO_BLOCK PibInfo;
uint32_t Flags;
uint32_t BadFrCnt;
-} BC_DEC_OUT_BUFF;
+};
-typedef struct _BC_NOTIFY_MODE {
+struct BC_NOTIFY_MODE {
uint32_t Mode;
uint32_t Rsvr[3];
-} BC_NOTIFY_MODE;
+};
-typedef struct _BC_CLOCK {
+struct BC_CLOCK {
uint32_t clk;
uint32_t Rsvr[3];
-} BC_CLOCK;
+};
-typedef struct _BC_IOCTL_DATA {
- BC_STATUS RetSts;
+struct BC_IOCTL_DATA {
+ enum BC_STATUS RetSts;
uint32_t IoctlDataSz;
uint32_t Timeout;
union {
- BC_CMD_REG_ACC regAcc;
- BC_CMD_DEV_MEM devMem;
- BC_FW_CMD fwCmd;
- BC_HW_TYPE hwType;
- BC_PCI_CFG pciCfg;
- BC_VERSION_INFO VerInfo;
- BC_PROC_INPUT ProcInput;
- BC_DEC_YUV_BUFFS RxBuffs;
- BC_DEC_OUT_BUFF DecOutData;
- BC_START_RX_CAP RxCap;
- BC_FLUSH_RX_CAP FlushRxCap;
- BC_DTS_STATS drvStat;
- BC_NOTIFY_MODE NotifyMode;
- BC_CLOCK clockValue;
+ struct BC_CMD_REG_ACC regAcc;
+ struct BC_CMD_DEV_MEM devMem;
+ struct BC_FW_CMD fwCmd;
+ struct BC_HW_TYPE hwType;
+ struct BC_PCI_CFG pciCfg;
+ struct BC_VERSION_INFO VerInfo;
+ struct BC_PROC_INPUT ProcInput;
+ struct BC_DEC_YUV_BUFFS RxBuffs;
+ struct BC_DEC_OUT_BUFF DecOutData;
+ struct BC_START_RX_CAP RxCap;
+ struct BC_FLUSH_RX_CAP FlushRxCap;
+ struct BC_DTS_STATS drvStat;
+ struct BC_NOTIFY_MODE NotifyMode;
+ struct BC_CLOCK clockValue;
} u;
struct _BC_IOCTL_DATA *next;
-} BC_IOCTL_DATA;
+};
-typedef enum _BC_DRV_CMD{
+enum BC_DRV_CMD {
DRV_CMD_VERSION = 0, /* Get SW version */
DRV_CMD_GET_HWTYPE, /* Get HW version and type Dozer/Tank */
DRV_CMD_REG_RD, /* Read Device Register */
@@ -249,12 +249,12 @@ typedef enum _BC_DRV_CMD{
/* MUST be the last one.. */
DRV_CMD_END, /* End of the List.. */
-} BC_DRV_CMD;
+};
#define BC_IOC_BASE 'b'
#define BC_IOC_VOID _IOC_NONE
#define BC_IOC_IOWR(nr, type) _IOWR(BC_IOC_BASE, nr, type)
-#define BC_IOCTL_MB BC_IOCTL_DATA
+#define BC_IOCTL_MB struct BC_IOCTL_DATA
#define BCM_IOC_GET_VERSION BC_IOC_IOWR(DRV_CMD_VERSION, BC_IOCTL_MB)
#define BCM_IOC_GET_HWTYPE BC_IOC_IOWR(DRV_CMD_GET_HWTYPE, BC_IOCTL_MB)
@@ -280,17 +280,16 @@ typedef enum _BC_DRV_CMD{
#define BCM_IOC_END BC_IOC_VOID
/* Wrapper for main IOCTL data */
-typedef struct _crystalhd_ioctl_data {
- BC_IOCTL_DATA udata; /* IOCTL from App..*/
+struct crystalhd_ioctl_data {
+ struct BC_IOCTL_DATA udata; /* IOCTL from App..*/
uint32_t u_id; /* Driver specific user ID */
uint32_t cmd; /* Cmd ID for driver's use. */
void *add_cdata; /* Additional command specific data..*/
uint32_t add_cdata_sz; /* Additional command specific data size */
- struct _crystalhd_ioctl_data *next; /* List/Fifo management */
-} crystalhd_ioctl_data;
-
+ struct crystalhd_ioctl_data *next; /* List/Fifo management */
+};
-enum _crystalhd_kmod_ver{
+enum crystalhd_kmod_ver{
crystalhd_kmod_major = 0,
crystalhd_kmod_minor = 9,
crystalhd_kmod_rev = 27,
diff --git a/drivers/staging/crystalhd/bc_dts_types.h b/drivers/staging/crystalhd/bc_dts_types.h
index ac0c8171738..6fd8089415d 100644
--- a/drivers/staging/crystalhd/bc_dts_types.h
+++ b/drivers/staging/crystalhd/bc_dts_types.h
@@ -25,19 +25,10 @@
#ifndef _BC_DTS_TYPES_H_
#define _BC_DTS_TYPES_H_
-#ifdef __LINUX_USER__ // Don't include these for KERNEL..
+#ifdef __LINUX_USER__ /* Don't include these for KERNEL.. */
#include <stdint.h>
#endif
-#if defined(_WIN64) || defined(_WIN32)
-typedef uint32_t U32;
-typedef int32_t S32;
-typedef uint16_t U16;
-typedef int16_t S16;
-typedef unsigned char U8;
-typedef char S8;
-#endif
-
#ifndef PVOID
typedef void *PVOID;
#endif
@@ -46,20 +37,6 @@ typedef void *PVOID;
typedef int BOOL;
#endif
-#ifdef WIN32
- typedef unsigned __int64 U64;
-#elif defined(_WIN64)
- typedef uint64_t U64;
-#endif
-
-#ifdef _WIN64
-#if !(defined(POINTER_32))
-#define POINTER_32 __ptr32
-#endif
-#else /* _WIN32 */
-#define POINTER_32
-#endif
-
#if defined(__KERNEL__) || defined(__LINUX_USER__)
#ifdef __LINUX_USER__ /* Don't include these for KERNEL */
diff --git a/drivers/staging/crystalhd/bcm_70012_regs.h b/drivers/staging/crystalhd/bcm_70012_regs.h
index 6922f54e432..f3ab3146cd9 100644
--- a/drivers/staging/crystalhd/bcm_70012_regs.h
+++ b/drivers/staging/crystalhd/bcm_70012_regs.h
@@ -25,22 +25,22 @@
* m = memory, c = core, r = register, f = field, d = data.
*/
#if !defined(GET_FIELD) && !defined(SET_FIELD)
-#define BRCM_ALIGN(c,r,f) c##_##r##_##f##_ALIGN
-#define BRCM_BITS(c,r,f) c##_##r##_##f##_BITS
-#define BRCM_MASK(c,r,f) c##_##r##_##f##_MASK
-#define BRCM_SHIFT(c,r,f) c##_##r##_##f##_SHIFT
+#define BRCM_ALIGN(c, r, f) c##_##r##_##f##_ALIGN
+#define BRCM_BITS(c, r, f) c##_##r##_##f##_BITS
+#define BRCM_MASK(c, r, f) c##_##r##_##f##_MASK
+#define BRCM_SHIFT(c, r, f) c##_##r##_##f##_SHIFT
-#define GET_FIELD(m,c,r,f) \
- ((((m) & BRCM_MASK(c,r,f)) >> BRCM_SHIFT(c,r,f)) << BRCM_ALIGN(c,r,f))
+#define GET_FIELD(m, c, r, f) \
+ ((((m) & BRCM_MASK(c, r, f)) >> BRCM_SHIFT(c, r, f)) << BRCM_ALIGN(c, r, f))
-#define SET_FIELD(m,c,r,f,d) \
- ((m) = (((m) & ~BRCM_MASK(c,r,f)) | ((((d) >> BRCM_ALIGN(c,r,f)) << \
- BRCM_SHIFT(c,r,f)) & BRCM_MASK(c,r,f))) \
+#define SET_FIELD(m, c, r, f, d) \
+ ((m) = (((m) & ~BRCM_MASK(c, r, f)) | ((((d) >> BRCM_ALIGN(c, r, f)) << \
+ BRCM_SHIFT(c, r, f)) & BRCM_MASK(c, r, f))) \
)
-#define SET_TYPE_FIELD(m,c,r,f,d) SET_FIELD(m,c,r,f,c##_##d)
-#define SET_NAME_FIELD(m,c,r,f,d) SET_FIELD(m,c,r,f,c##_##r##_##f##_##d)
-#define SET_VALUE_FIELD(m,c,r,f,d) SET_FIELD(m,c,r,f,d)
+#define SET_TYPE_FIELD(m, c, r, f, d) SET_FIELD(m, c, r, f, c##_##d)
+#define SET_NAME_FIELD(m, c, r, f, d) SET_FIELD(m, c, r, f, c##_##r##_##f##_##d)
+#define SET_VALUE_FIELD(m, c, r, f, d) SET_FIELD(m, c, r, f, d)
#endif /* GET & SET */
diff --git a/drivers/staging/crystalhd/crystalhd_cmds.c b/drivers/staging/crystalhd/crystalhd_cmds.c
index 26145a8d0f7..1429608544d 100644
--- a/drivers/staging/crystalhd/crystalhd_cmds.c
+++ b/drivers/staging/crystalhd/crystalhd_cmds.c
@@ -69,8 +69,8 @@ static void bc_cproc_mark_pwr_state(struct crystalhd_cmd *ctx)
}
}
-static BC_STATUS bc_cproc_notify_mode(struct crystalhd_cmd *ctx,
- crystalhd_ioctl_data *idata)
+static enum BC_STATUS bc_cproc_notify_mode(struct crystalhd_cmd *ctx,
+ struct crystalhd_ioctl_data *idata)
{
int rc = 0, i = 0;
@@ -88,7 +88,7 @@ static BC_STATUS bc_cproc_notify_mode(struct crystalhd_cmd *ctx,
return BC_STS_SUCCESS;
}
if (ctx->state != BC_LINK_INVALID) {
- BCMLOG_ERR("Link invalid state %d \n", ctx->state);
+ BCMLOG_ERR("Link invalid state %d\n", ctx->state);
return BC_STS_ERR_USAGE;
}
/* Check for duplicate playback sessions..*/
@@ -111,8 +111,8 @@ static BC_STATUS bc_cproc_notify_mode(struct crystalhd_cmd *ctx,
return crystalhd_hw_setup_dma_rings(&ctx->hw_ctx);
}
-static BC_STATUS bc_cproc_get_version(struct crystalhd_cmd *ctx,
- crystalhd_ioctl_data *idata)
+static enum BC_STATUS bc_cproc_get_version(struct crystalhd_cmd *ctx,
+ struct crystalhd_ioctl_data *idata)
{
if (!ctx || !idata) {
@@ -126,7 +126,8 @@ static BC_STATUS bc_cproc_get_version(struct crystalhd_cmd *ctx,
}
-static BC_STATUS bc_cproc_get_hwtype(struct crystalhd_cmd *ctx, crystalhd_ioctl_data *idata)
+static enum BC_STATUS bc_cproc_get_hwtype(struct crystalhd_cmd *ctx,
+ struct crystalhd_ioctl_data *idata)
{
if (!ctx || !idata) {
BCMLOG_ERR("Invalid Arg!!\n");
@@ -143,8 +144,8 @@ static BC_STATUS bc_cproc_get_hwtype(struct crystalhd_cmd *ctx, crystalhd_ioctl_
return BC_STS_SUCCESS;
}
-static BC_STATUS bc_cproc_reg_rd(struct crystalhd_cmd *ctx,
- crystalhd_ioctl_data *idata)
+static enum BC_STATUS bc_cproc_reg_rd(struct crystalhd_cmd *ctx,
+ struct crystalhd_ioctl_data *idata)
{
if (!ctx || !idata)
return BC_STS_INV_ARG;
@@ -153,8 +154,8 @@ static BC_STATUS bc_cproc_reg_rd(struct crystalhd_cmd *ctx,
return BC_STS_SUCCESS;
}
-static BC_STATUS bc_cproc_reg_wr(struct crystalhd_cmd *ctx,
- crystalhd_ioctl_data *idata)
+static enum BC_STATUS bc_cproc_reg_wr(struct crystalhd_cmd *ctx,
+ struct crystalhd_ioctl_data *idata)
{
if (!ctx || !idata)
return BC_STS_INV_ARG;
@@ -165,8 +166,8 @@ static BC_STATUS bc_cproc_reg_wr(struct crystalhd_cmd *ctx,
return BC_STS_SUCCESS;
}
-static BC_STATUS bc_cproc_link_reg_rd(struct crystalhd_cmd *ctx,
- crystalhd_ioctl_data *idata)
+static enum BC_STATUS bc_cproc_link_reg_rd(struct crystalhd_cmd *ctx,
+ struct crystalhd_ioctl_data *idata)
{
if (!ctx || !idata)
return BC_STS_INV_ARG;
@@ -176,8 +177,8 @@ static BC_STATUS bc_cproc_link_reg_rd(struct crystalhd_cmd *ctx,
return BC_STS_SUCCESS;
}
-static BC_STATUS bc_cproc_link_reg_wr(struct crystalhd_cmd *ctx,
- crystalhd_ioctl_data *idata)
+static enum BC_STATUS bc_cproc_link_reg_wr(struct crystalhd_cmd *ctx,
+ struct crystalhd_ioctl_data *idata)
{
if (!ctx || !idata)
return BC_STS_INV_ARG;
@@ -188,10 +189,10 @@ static BC_STATUS bc_cproc_link_reg_wr(struct crystalhd_cmd *ctx,
return BC_STS_SUCCESS;
}
-static BC_STATUS bc_cproc_mem_rd(struct crystalhd_cmd *ctx,
- crystalhd_ioctl_data *idata)
+static enum BC_STATUS bc_cproc_mem_rd(struct crystalhd_cmd *ctx,
+ struct crystalhd_ioctl_data *idata)
{
- BC_STATUS sts = BC_STS_SUCCESS;
+ enum BC_STATUS sts = BC_STS_SUCCESS;
if (!ctx || !idata || !idata->add_cdata)
return BC_STS_INV_ARG;
@@ -207,10 +208,10 @@ static BC_STATUS bc_cproc_mem_rd(struct crystalhd_cmd *ctx,
}
-static BC_STATUS bc_cproc_mem_wr(struct crystalhd_cmd *ctx,
- crystalhd_ioctl_data *idata)
+static enum BC_STATUS bc_cproc_mem_wr(struct crystalhd_cmd *ctx,
+ struct crystalhd_ioctl_data *idata)
{
- BC_STATUS sts = BC_STS_SUCCESS;
+ enum BC_STATUS sts = BC_STS_SUCCESS;
if (!ctx || !idata || !idata->add_cdata)
return BC_STS_INV_ARG;
@@ -226,11 +227,11 @@ static BC_STATUS bc_cproc_mem_wr(struct crystalhd_cmd *ctx,
return sts;
}
-static BC_STATUS bc_cproc_cfg_rd(struct crystalhd_cmd *ctx,
- crystalhd_ioctl_data *idata)
+static enum BC_STATUS bc_cproc_cfg_rd(struct crystalhd_cmd *ctx,
+ struct crystalhd_ioctl_data *idata)
{
uint32_t ix, cnt, off, len;
- BC_STATUS sts = BC_STS_SUCCESS;
+ enum BC_STATUS sts = BC_STS_SUCCESS;
uint32_t *temp;
if (!ctx || !idata)
@@ -258,11 +259,11 @@ static BC_STATUS bc_cproc_cfg_rd(struct crystalhd_cmd *ctx,
return sts;
}
-static BC_STATUS bc_cproc_cfg_wr(struct crystalhd_cmd *ctx,
- crystalhd_ioctl_data *idata)
+static enum BC_STATUS bc_cproc_cfg_wr(struct crystalhd_cmd *ctx,
+ struct crystalhd_ioctl_data *idata)
{
uint32_t ix, cnt, off, len;
- BC_STATUS sts = BC_STS_SUCCESS;
+ enum BC_STATUS sts = BC_STS_SUCCESS;
uint32_t *temp;
if (!ctx || !idata)
@@ -290,10 +291,10 @@ static BC_STATUS bc_cproc_cfg_wr(struct crystalhd_cmd *ctx,
return sts;
}
-static BC_STATUS bc_cproc_download_fw(struct crystalhd_cmd *ctx,
- crystalhd_ioctl_data *idata)
+static enum BC_STATUS bc_cproc_download_fw(struct crystalhd_cmd *ctx,
+ struct crystalhd_ioctl_data *idata)
{
- BC_STATUS sts = BC_STS_SUCCESS;
+ enum BC_STATUS sts = BC_STS_SUCCESS;
if (!ctx || !idata || !idata->add_cdata || !idata->add_cdata_sz) {
BCMLOG_ERR("Invalid Arg!!\n");
@@ -301,7 +302,7 @@ static BC_STATUS bc_cproc_download_fw(struct crystalhd_cmd *ctx,
}
if (ctx->state != BC_LINK_INVALID) {
- BCMLOG_ERR("Link invalid state %d \n", ctx->state);
+ BCMLOG_ERR("Link invalid state %d\n", ctx->state);
return BC_STS_ERR_USAGE;
}
@@ -329,13 +330,14 @@ static BC_STATUS bc_cproc_download_fw(struct crystalhd_cmd *ctx,
* Abort pending input transfers and issue decoder flush command.
*
*/
-static BC_STATUS bc_cproc_do_fw_cmd(struct crystalhd_cmd *ctx, crystalhd_ioctl_data *idata)
+static enum BC_STATUS bc_cproc_do_fw_cmd(struct crystalhd_cmd *ctx,
+ struct crystalhd_ioctl_data *idata)
{
- BC_STATUS sts;
+ enum BC_STATUS sts;
uint32_t *cmd;
if (!(ctx->state & BC_LINK_INIT)) {
- BCMLOG_ERR("Link invalid state %d \n", ctx->state);
+ BCMLOG_ERR("Link invalid state %d\n", ctx->state);
return BC_STS_ERR_USAGE;
}
@@ -371,22 +373,22 @@ static BC_STATUS bc_cproc_do_fw_cmd(struct crystalhd_cmd *ctx, crystalhd_ioctl_d
return sts;
}
-static void bc_proc_in_completion(crystalhd_dio_req *dio_hnd,
- wait_queue_head_t *event, BC_STATUS sts)
+static void bc_proc_in_completion(struct crystalhd_dio_req *dio_hnd,
+ wait_queue_head_t *event, enum BC_STATUS sts)
{
if (!dio_hnd || !event) {
BCMLOG_ERR("Invalid Arg!!\n");
return;
}
if (sts == BC_STS_IO_USER_ABORT)
- return;
+ return;
dio_hnd->uinfo.comp_sts = sts;
dio_hnd->uinfo.ev_sts = 1;
crystalhd_set_event(event);
}
-static BC_STATUS bc_cproc_codein_sleep(struct crystalhd_cmd *ctx)
+static enum BC_STATUS bc_cproc_codein_sleep(struct crystalhd_cmd *ctx)
{
wait_queue_head_t sleep_ev;
int rc = 0;
@@ -406,12 +408,12 @@ static BC_STATUS bc_cproc_codein_sleep(struct crystalhd_cmd *ctx)
return BC_STS_SUCCESS;
}
-static BC_STATUS bc_cproc_hw_txdma(struct crystalhd_cmd *ctx,
- crystalhd_ioctl_data *idata,
- crystalhd_dio_req *dio)
+static enum BC_STATUS bc_cproc_hw_txdma(struct crystalhd_cmd *ctx,
+ struct crystalhd_ioctl_data *idata,
+ struct crystalhd_dio_req *dio)
{
uint32_t tx_listid = 0;
- BC_STATUS sts = BC_STS_SUCCESS;
+ enum BC_STATUS sts = BC_STS_SUCCESS;
wait_queue_head_t event;
int rc = 0;
@@ -452,7 +454,7 @@ static BC_STATUS bc_cproc_hw_txdma(struct crystalhd_cmd *ctx,
if (!rc) {
return dio->uinfo.comp_sts;
} else if (rc == -EBUSY) {
- BCMLOG(BCMLOG_DBG, "_tx_post() T/O \n");
+ BCMLOG(BCMLOG_DBG, "_tx_post() T/O\n");
sts = BC_STS_TIMEOUT;
} else if (rc == -EINTR) {
BCMLOG(BCMLOG_DBG, "Tx Wait Signal int.\n");
@@ -471,7 +473,7 @@ static BC_STATUS bc_cproc_hw_txdma(struct crystalhd_cmd *ctx,
}
/* Helper function to check on user buffers */
-static BC_STATUS bc_cproc_check_inbuffs(bool pin, void *ubuff, uint32_t ub_sz,
+static enum BC_STATUS bc_cproc_check_inbuffs(bool pin, void *ubuff, uint32_t ub_sz,
uint32_t uv_off, bool en_422)
{
if (!ubuff || !ub_sz) {
@@ -482,7 +484,7 @@ static BC_STATUS bc_cproc_check_inbuffs(bool pin, void *ubuff, uint32_t ub_sz,
/* Check for alignment */
if (((uintptr_t)ubuff) & 0x03) {
- BCMLOG_ERR("%s-->Un-aligned address not implemented yet.. %p \n",
+ BCMLOG_ERR("%s-->Un-aligned address not implemented yet.. %p\n",
((pin) ? "TX" : "RX"), ubuff);
return BC_STS_NOT_IMPL;
}
@@ -502,12 +504,13 @@ static BC_STATUS bc_cproc_check_inbuffs(bool pin, void *ubuff, uint32_t ub_sz,
return BC_STS_SUCCESS;
}
-static BC_STATUS bc_cproc_proc_input(struct crystalhd_cmd *ctx, crystalhd_ioctl_data *idata)
+static enum BC_STATUS bc_cproc_proc_input(struct crystalhd_cmd *ctx,
+ struct crystalhd_ioctl_data *idata)
{
void *ubuff;
uint32_t ub_sz;
- crystalhd_dio_req *dio_hnd = NULL;
- BC_STATUS sts = BC_STS_SUCCESS;
+ struct crystalhd_dio_req *dio_hnd = NULL;
+ enum BC_STATUS sts = BC_STS_SUCCESS;
if (!ctx || !idata) {
BCMLOG_ERR("Invalid Arg!!\n");
@@ -523,7 +526,7 @@ static BC_STATUS bc_cproc_proc_input(struct crystalhd_cmd *ctx, crystalhd_ioctl_
sts = crystalhd_map_dio(ctx->adp, ubuff, ub_sz, 0, 0, 1, &dio_hnd);
if (sts != BC_STS_SUCCESS) {
- BCMLOG_ERR("dio map - %d \n", sts);
+ BCMLOG_ERR("dio map - %d\n", sts);
return sts;
}
@@ -537,14 +540,14 @@ static BC_STATUS bc_cproc_proc_input(struct crystalhd_cmd *ctx, crystalhd_ioctl_
return sts;
}
-static BC_STATUS bc_cproc_add_cap_buff(struct crystalhd_cmd *ctx,
- crystalhd_ioctl_data *idata)
+static enum BC_STATUS bc_cproc_add_cap_buff(struct crystalhd_cmd *ctx,
+ struct crystalhd_ioctl_data *idata)
{
void *ubuff;
uint32_t ub_sz, uv_off;
bool en_422;
- crystalhd_dio_req *dio_hnd = NULL;
- BC_STATUS sts = BC_STS_SUCCESS;
+ struct crystalhd_dio_req *dio_hnd = NULL;
+ enum BC_STATUS sts = BC_STS_SUCCESS;
if (!ctx || !idata) {
BCMLOG_ERR("Invalid Arg!!\n");
@@ -563,7 +566,7 @@ static BC_STATUS bc_cproc_add_cap_buff(struct crystalhd_cmd *ctx,
sts = crystalhd_map_dio(ctx->adp, ubuff, ub_sz, uv_off,
en_422, 0, &dio_hnd);
if (sts != BC_STS_SUCCESS) {
- BCMLOG_ERR("dio map - %d \n", sts);
+ BCMLOG_ERR("dio map - %d\n", sts);
return sts;
}
@@ -579,10 +582,10 @@ static BC_STATUS bc_cproc_add_cap_buff(struct crystalhd_cmd *ctx,
return BC_STS_SUCCESS;
}
-static BC_STATUS bc_cproc_fmt_change(struct crystalhd_cmd *ctx,
- crystalhd_dio_req *dio)
+static enum BC_STATUS bc_cproc_fmt_change(struct crystalhd_cmd *ctx,
+ struct crystalhd_dio_req *dio)
{
- BC_STATUS sts = BC_STS_SUCCESS;
+ enum BC_STATUS sts = BC_STS_SUCCESS;
sts = crystalhd_hw_add_cap_buffer(&ctx->hw_ctx, dio, 0);
if (sts != BC_STS_SUCCESS)
@@ -595,12 +598,12 @@ static BC_STATUS bc_cproc_fmt_change(struct crystalhd_cmd *ctx,
return sts;
}
-static BC_STATUS bc_cproc_fetch_frame(struct crystalhd_cmd *ctx,
- crystalhd_ioctl_data *idata)
+static enum BC_STATUS bc_cproc_fetch_frame(struct crystalhd_cmd *ctx,
+ struct crystalhd_ioctl_data *idata)
{
- crystalhd_dio_req *dio = NULL;
- BC_STATUS sts = BC_STS_SUCCESS;
- BC_DEC_OUT_BUFF *frame;
+ struct crystalhd_dio_req *dio = NULL;
+ enum BC_STATUS sts = BC_STS_SUCCESS;
+ struct BC_DEC_OUT_BUFF *frame;
if (!ctx || !idata) {
BCMLOG_ERR("Invalid Arg!!\n");
@@ -636,8 +639,8 @@ static BC_STATUS bc_cproc_fetch_frame(struct crystalhd_cmd *ctx,
return BC_STS_SUCCESS;
}
-static BC_STATUS bc_cproc_start_capture(struct crystalhd_cmd *ctx,
- crystalhd_ioctl_data *idata)
+static enum BC_STATUS bc_cproc_start_capture(struct crystalhd_cmd *ctx,
+ struct crystalhd_ioctl_data *idata)
{
ctx->state |= BC_LINK_CAP_EN;
if (ctx->state == BC_LINK_READY)
@@ -646,12 +649,12 @@ static BC_STATUS bc_cproc_start_capture(struct crystalhd_cmd *ctx,
return BC_STS_SUCCESS;
}
-static BC_STATUS bc_cproc_flush_cap_buffs(struct crystalhd_cmd *ctx,
- crystalhd_ioctl_data *idata)
+static enum BC_STATUS bc_cproc_flush_cap_buffs(struct crystalhd_cmd *ctx,
+ struct crystalhd_ioctl_data *idata)
{
- crystalhd_dio_req *dio = NULL;
- BC_STATUS sts = BC_STS_SUCCESS;
- BC_DEC_OUT_BUFF *frame;
+ struct crystalhd_dio_req *dio = NULL;
+ enum BC_STATUS sts = BC_STS_SUCCESS;
+ struct BC_DEC_OUT_BUFF *frame;
uint32_t count;
if (!ctx || !idata) {
@@ -681,10 +684,10 @@ static BC_STATUS bc_cproc_flush_cap_buffs(struct crystalhd_cmd *ctx,
return crystalhd_hw_stop_capture(&ctx->hw_ctx);
}
-static BC_STATUS bc_cproc_get_stats(struct crystalhd_cmd *ctx,
- crystalhd_ioctl_data *idata)
+static enum BC_STATUS bc_cproc_get_stats(struct crystalhd_cmd *ctx,
+ struct crystalhd_ioctl_data *idata)
{
- BC_DTS_STATS *stats;
+ struct BC_DTS_STATS *stats;
struct crystalhd_hw_stats hw_stats;
if (!ctx || !idata) {
@@ -713,20 +716,20 @@ static BC_STATUS bc_cproc_get_stats(struct crystalhd_cmd *ctx,
return BC_STS_SUCCESS;
}
-static BC_STATUS bc_cproc_reset_stats(struct crystalhd_cmd *ctx,
- crystalhd_ioctl_data *idata)
+static enum BC_STATUS bc_cproc_reset_stats(struct crystalhd_cmd *ctx,
+ struct crystalhd_ioctl_data *idata)
{
crystalhd_hw_stats(&ctx->hw_ctx, NULL);
return BC_STS_SUCCESS;
}
-static BC_STATUS bc_cproc_chg_clk(struct crystalhd_cmd *ctx,
- crystalhd_ioctl_data *idata)
+static enum BC_STATUS bc_cproc_chg_clk(struct crystalhd_cmd *ctx,
+ struct crystalhd_ioctl_data *idata)
{
- BC_CLOCK *clock;
+ struct BC_CLOCK *clock;
uint32_t oldClk;
- BC_STATUS sts = BC_STS_SUCCESS;
+ enum BC_STATUS sts = BC_STS_SUCCESS;
if (!ctx || !idata) {
BCMLOG_ERR("Invalid Arg!!\n");
@@ -749,7 +752,7 @@ static BC_STATUS bc_cproc_chg_clk(struct crystalhd_cmd *ctx,
}
/*=============== Cmd Proc Table.. ======================================*/
-static const crystalhd_cmd_tbl_t g_crystalhd_cproc_tbl[] = {
+static const struct crystalhd_cmd_tbl g_crystalhd_cproc_tbl[] = {
{ BCM_IOC_GET_VERSION, bc_cproc_get_version, 0},
{ BCM_IOC_GET_HWTYPE, bc_cproc_get_hwtype, 0},
{ BCM_IOC_REG_RD, bc_cproc_reg_rd, 0},
@@ -796,9 +799,10 @@ static const crystalhd_cmd_tbl_t g_crystalhd_cproc_tbl[] = {
* we pass on the power mangement notification to our plug-in by completing
* all outstanding requests with BC_STS_IO_USER_ABORT return code.
*/
-BC_STATUS crystalhd_suspend(struct crystalhd_cmd *ctx, crystalhd_ioctl_data *idata)
+enum BC_STATUS crystalhd_suspend(struct crystalhd_cmd *ctx,
+ struct crystalhd_ioctl_data *idata)
{
- BC_STATUS sts = BC_STS_SUCCESS;
+ enum BC_STATUS sts = BC_STS_SUCCESS;
if (!ctx || !idata) {
BCMLOG_ERR("Invalid Parameters\n");
@@ -854,7 +858,7 @@ BC_STATUS crystalhd_suspend(struct crystalhd_cmd *ctx, crystalhd_ioctl_data *ida
* start a new playback session from the pre-suspend clip position.
*
*/
-BC_STATUS crystalhd_resume(struct crystalhd_cmd *ctx)
+enum BC_STATUS crystalhd_resume(struct crystalhd_cmd *ctx)
{
BCMLOG(BCMLOG_DBG, "crystalhd_resume Success %x\n", ctx->state);
@@ -875,7 +879,7 @@ BC_STATUS crystalhd_resume(struct crystalhd_cmd *ctx)
* application specific resources. HW layer initialization
* is done for the first open request.
*/
-BC_STATUS crystalhd_user_open(struct crystalhd_cmd *ctx,
+enum BC_STATUS crystalhd_user_open(struct crystalhd_cmd *ctx,
struct crystalhd_user **user_ctx)
{
struct crystalhd_user *uc;
@@ -913,7 +917,7 @@ BC_STATUS crystalhd_user_open(struct crystalhd_cmd *ctx,
* Closer aplication handle and release app specific
* resources.
*/
-BC_STATUS crystalhd_user_close(struct crystalhd_cmd *ctx, struct crystalhd_user *uc)
+enum BC_STATUS crystalhd_user_close(struct crystalhd_cmd *ctx, struct crystalhd_user *uc)
{
uint32_t mode = uc->mode;
@@ -948,7 +952,7 @@ BC_STATUS crystalhd_user_close(struct crystalhd_cmd *ctx, struct crystalhd_user
*
* Called at the time of driver load.
*/
-BC_STATUS __devinit crystalhd_setup_cmd_context(struct crystalhd_cmd *ctx,
+enum BC_STATUS __devinit crystalhd_setup_cmd_context(struct crystalhd_cmd *ctx,
struct crystalhd_adp *adp)
{
int i = 0;
@@ -983,7 +987,7 @@ BC_STATUS __devinit crystalhd_setup_cmd_context(struct crystalhd_cmd *ctx,
*
* Called at the time of driver un-load.
*/
-BC_STATUS __devexit crystalhd_delete_cmd_context(struct crystalhd_cmd *ctx)
+enum BC_STATUS __devexit crystalhd_delete_cmd_context(struct crystalhd_cmd *ctx)
{
BCMLOG(BCMLOG_DBG, "Deleting Command context..\n");
@@ -1021,12 +1025,12 @@ crystalhd_cmd_proc crystalhd_get_cmd_proc(struct crystalhd_cmd *ctx, uint32_t cm
return NULL;
}
- tbl_sz = sizeof(g_crystalhd_cproc_tbl) / sizeof(crystalhd_cmd_tbl_t);
+ tbl_sz = sizeof(g_crystalhd_cproc_tbl) / sizeof(struct crystalhd_cmd_tbl);
for (i = 0; i < tbl_sz; i++) {
if (g_crystalhd_cproc_tbl[i].cmd_id == cmd) {
if ((uc->mode == DTS_MONITOR_MODE) &&
(g_crystalhd_cproc_tbl[i].block_mon)) {
- BCMLOG(BCMLOG_INFO, "Blocking cmd %d \n", cmd);
+ BCMLOG(BCMLOG_INFO, "Blocking cmd %d\n", cmd);
break;
}
cproc = g_crystalhd_cproc_tbl[i].cmd_proc;
diff --git a/drivers/staging/crystalhd/crystalhd_cmds.h b/drivers/staging/crystalhd/crystalhd_cmds.h
index 6b290aed8e0..10130296601 100644
--- a/drivers/staging/crystalhd/crystalhd_cmds.h
+++ b/drivers/staging/crystalhd/crystalhd_cmds.h
@@ -36,7 +36,7 @@
#include "crystalhd_misc.h"
#include "crystalhd_hw.h"
-enum _crystalhd_state{
+enum crystalhd_state{
BC_LINK_INVALID = 0x00,
BC_LINK_INIT = 0x01,
BC_LINK_CAP_EN = 0x02,
@@ -66,23 +66,22 @@ struct crystalhd_cmd {
struct crystalhd_hw hw_ctx;
};
-typedef BC_STATUS (*crystalhd_cmd_proc)(struct crystalhd_cmd *, crystalhd_ioctl_data *);
+typedef enum BC_STATUS(*crystalhd_cmd_proc)(struct crystalhd_cmd *, struct crystalhd_ioctl_data *);
-typedef struct _crystalhd_cmd_tbl {
+struct crystalhd_cmd_tbl {
uint32_t cmd_id;
const crystalhd_cmd_proc cmd_proc;
uint32_t block_mon;
-} crystalhd_cmd_tbl_t;
-
+};
-BC_STATUS crystalhd_suspend(struct crystalhd_cmd *ctx, crystalhd_ioctl_data *idata);
-BC_STATUS crystalhd_resume(struct crystalhd_cmd *ctx);
+enum BC_STATUS crystalhd_suspend(struct crystalhd_cmd *ctx, struct crystalhd_ioctl_data *idata);
+enum BC_STATUS crystalhd_resume(struct crystalhd_cmd *ctx);
crystalhd_cmd_proc crystalhd_get_cmd_proc(struct crystalhd_cmd *ctx, uint32_t cmd,
struct crystalhd_user *uc);
-BC_STATUS crystalhd_user_open(struct crystalhd_cmd *ctx, struct crystalhd_user **user_ctx);
-BC_STATUS crystalhd_user_close(struct crystalhd_cmd *ctx, struct crystalhd_user *uc);
-BC_STATUS crystalhd_setup_cmd_context(struct crystalhd_cmd *ctx, struct crystalhd_adp *adp);
-BC_STATUS crystalhd_delete_cmd_context(struct crystalhd_cmd *ctx);
+enum BC_STATUS crystalhd_user_open(struct crystalhd_cmd *ctx, struct crystalhd_user **user_ctx);
+enum BC_STATUS crystalhd_user_close(struct crystalhd_cmd *ctx, struct crystalhd_user *uc);
+enum BC_STATUS crystalhd_setup_cmd_context(struct crystalhd_cmd *ctx, struct crystalhd_adp *adp);
+enum BC_STATUS crystalhd_delete_cmd_context(struct crystalhd_cmd *ctx);
bool crystalhd_cmd_interrupt(struct crystalhd_cmd *ctx);
#endif
diff --git a/drivers/staging/crystalhd/crystalhd_fw_if.h b/drivers/staging/crystalhd/crystalhd_fw_if.h
index 261cd19a0ee..77560d4b680 100644
--- a/drivers/staging/crystalhd/crystalhd_fw_if.h
+++ b/drivers/staging/crystalhd/crystalhd_fw_if.h
@@ -29,21 +29,17 @@
/* TBD: Pull in only required defs into this file.. */
-
-
/* User Data Header */
-typedef struct user_data {
+struct user_data {
struct user_data *next;
uint32_t type;
uint32_t size;
-} UD_HDR;
-
-
+};
/*------------------------------------------------------*
* MPEG Extension to the PPB *
*------------------------------------------------------*/
-typedef struct {
+struct ppb_mpeg {
uint32_t to_be_defined;
uint32_t valid;
@@ -61,15 +57,15 @@ typedef struct {
/* MPEG_VALID_USERDATA
User data is in the form of a linked list. */
int32_t userDataSize;
- UD_HDR *userData;
+ struct user_data *userData;
-} PPB_MPEG;
+};
/*------------------------------------------------------*
* VC1 Extension to the PPB *
*------------------------------------------------------*/
-typedef struct {
+struct ppb_vc1 {
uint32_t to_be_defined;
uint32_t valid;
@@ -88,9 +84,9 @@ typedef struct {
/* VC1_VALID_USERDATA
User data is in the form of a linked list. */
int32_t userDataSize;
- UD_HDR *userData;
+ struct user_data *userData;
-} PPB_VC1;
+};
/*------------------------------------------------------*
* H.264 Extension to the PPB *
@@ -108,8 +104,8 @@ typedef struct {
/* maximum number of intervals(as many as 256 intervals?) */
#define MAX_FGT_VALUE_INTERVAL (256)
-typedef struct FGT_SEI {
- struct FGT_SEI *next;
+struct fgt_sei {
+ struct fgt_sei *next;
unsigned char model_values[3][MAX_FGT_VALUE_INTERVAL][MAX_FGT_MODEL_VALUE];
unsigned char upper_bound[3][MAX_FGT_VALUE_INTERVAL];
unsigned char lower_bound[3][MAX_FGT_VALUE_INTERVAL];
@@ -134,9 +130,9 @@ typedef struct FGT_SEI {
unsigned char num_model_values[3]; /* Number of model values. */
uint16_t repetition_period; /* Repetition period (0-16384) */
-} FGT_SEI;
+};
-typedef struct {
+struct ppb_h264 {
/* 'valid' specifies which fields (or sets of
* fields) below are valid. If the corresponding
* bit in 'valid' is NOT set then that field(s)
@@ -170,14 +166,14 @@ typedef struct {
/* H264_VALID_USER */
uint32_t user_data_size;
- UD_HDR *user_data;
+ struct user_data *user_data;
/* H264 VALID FGT */
- FGT_SEI *pfgt;
+ struct fgt_sei *pfgt;
-} PPB_H264;
+};
-typedef struct {
+struct ppb {
/* Common fields. */
uint32_t picture_number; /* Ordinal display number */
uint32_t video_buffer; /* Video (picbuf) number */
@@ -215,14 +211,14 @@ typedef struct {
/* Protocol-specific extensions. */
union {
- PPB_H264 h264;
- PPB_MPEG mpeg;
- PPB_VC1 vc1;
+ struct ppb_h264 h264;
+ struct ppb_mpeg mpeg;
+ struct ppb_vc1 vc1;
} other;
-} PPB;
+};
-typedef struct {
+struct c011_pib {
uint32_t bFormatChange;
uint32_t resolution;
uint32_t channelId;
@@ -231,13 +227,11 @@ typedef struct {
uint32_t zeroPanscanValid;
uint32_t dramOutBufAddr;
uint32_t yComponent;
- PPB ppb;
-
-} C011_PIB;
-
+ struct ppb ppb;
+};
-typedef struct {
+struct dec_rsp_channel_start_video {
uint32_t command;
uint32_t sequence;
uint32_t status;
@@ -251,12 +245,12 @@ typedef struct {
uint32_t transportStreamCaptureAddr;
uint32_t asyncEventQ;
-} DecRspChannelStartVideo;
+};
#define eCMD_C011_CMD_BASE (0x73763000)
/* host commands */
-typedef enum {
+enum c011_ts_cmd {
eCMD_TS_GET_NEXT_PIC = 0x7376F100, /* debug get next picture */
eCMD_TS_GET_LAST_PIC = 0x7376F102, /* debug get last pic status */
eCMD_TS_READ_WRITE_MEM = 0x7376F104, /* debug read write memory */
@@ -364,6 +358,6 @@ typedef enum {
eNOTIFY_C011_ENC_CHAN_EVENT = eCMD_C011_CMD_BASE + 0x210,
-} eC011_TS_CMD;
+};
#endif
diff --git a/drivers/staging/crystalhd/crystalhd_hw.c b/drivers/staging/crystalhd/crystalhd_hw.c
index c438c489aa9..f63185790c4 100644
--- a/drivers/staging/crystalhd/crystalhd_hw.c
+++ b/drivers/staging/crystalhd/crystalhd_hw.c
@@ -61,8 +61,8 @@ static void crystalhd_start_dram(struct crystalhd_adp *adp)
static bool crystalhd_bring_out_of_rst(struct crystalhd_adp *adp)
{
- link_misc_perst_deco_ctrl rst_deco_cntrl;
- link_misc_perst_clk_ctrl rst_clk_cntrl;
+ union link_misc_perst_deco_ctrl rst_deco_cntrl;
+ union link_misc_perst_clk_ctrl rst_clk_cntrl;
uint32_t temp;
/*
@@ -122,8 +122,8 @@ static bool crystalhd_bring_out_of_rst(struct crystalhd_adp *adp)
static bool crystalhd_put_in_reset(struct crystalhd_adp *adp)
{
- link_misc_perst_deco_ctrl rst_deco_cntrl;
- link_misc_perst_clk_ctrl rst_clk_cntrl;
+ union link_misc_perst_deco_ctrl rst_deco_cntrl;
+ union link_misc_perst_clk_ctrl rst_clk_cntrl;
uint32_t temp;
/*
@@ -178,7 +178,7 @@ static bool crystalhd_put_in_reset(struct crystalhd_adp *adp)
static void crystalhd_disable_interrupts(struct crystalhd_adp *adp)
{
- intr_mask_reg intr_mask;
+ union intr_mask_reg intr_mask;
intr_mask.whole_reg = crystalhd_reg_rd(adp, INTR_INTR_MSK_STS_REG);
intr_mask.mask_pcie_err = 1;
intr_mask.mask_pcie_rbusmast_err = 1;
@@ -194,7 +194,7 @@ static void crystalhd_disable_interrupts(struct crystalhd_adp *adp)
static void crystalhd_enable_interrupts(struct crystalhd_adp *adp)
{
- intr_mask_reg intr_mask;
+ union intr_mask_reg intr_mask;
intr_mask.whole_reg = crystalhd_reg_rd(adp, INTR_INTR_MSK_STS_REG);
intr_mask.mask_pcie_err = 1;
intr_mask.mask_pcie_rbusmast_err = 1;
@@ -348,10 +348,10 @@ static bool crystalhd_stop_device(struct crystalhd_adp *adp)
return true;
}
-static crystalhd_rx_dma_pkt *crystalhd_hw_alloc_rx_pkt(struct crystalhd_hw *hw)
+static struct crystalhd_rx_dma_pkt *crystalhd_hw_alloc_rx_pkt(struct crystalhd_hw *hw)
{
unsigned long flags = 0;
- crystalhd_rx_dma_pkt *temp = NULL;
+ struct crystalhd_rx_dma_pkt *temp = NULL;
if (!hw)
return NULL;
@@ -370,7 +370,7 @@ static crystalhd_rx_dma_pkt *crystalhd_hw_alloc_rx_pkt(struct crystalhd_hw *hw)
}
static void crystalhd_hw_free_rx_pkt(struct crystalhd_hw *hw,
- crystalhd_rx_dma_pkt *pkt)
+ struct crystalhd_rx_dma_pkt *pkt)
{
unsigned long flags = 0;
@@ -406,7 +406,7 @@ static void crystalhd_tx_desc_rel_call_back(void *context, void *data)
static void crystalhd_rx_pkt_rel_call_back(void *context, void *data)
{
struct crystalhd_hw *hw = (struct crystalhd_hw *)context;
- crystalhd_rx_dma_pkt *pkt = (crystalhd_rx_dma_pkt *)data;
+ struct crystalhd_rx_dma_pkt *pkt = (struct crystalhd_rx_dma_pkt *)data;
if (!pkt || !hw) {
BCMLOG_ERR("Invalid arg - %p %p\n", hw, pkt);
@@ -432,7 +432,7 @@ static void crystalhd_hw_delete_ioqs(struct crystalhd_hw *hw)
if (!hw)
return;
- BCMLOG(BCMLOG_DBG, "Deleting IOQs \n");
+ BCMLOG(BCMLOG_DBG, "Deleting IOQs\n");
crystalhd_hw_delete_ioq(hw->adp, hw->tx_actq);
crystalhd_hw_delete_ioq(hw->adp, hw->tx_freeq);
crystalhd_hw_delete_ioq(hw->adp, hw->rx_actq);
@@ -453,9 +453,9 @@ do { \
* TX - Active & Free
* RX - Active, Ready and Free.
*/
-static BC_STATUS crystalhd_hw_create_ioqs(struct crystalhd_hw *hw)
+static enum BC_STATUS crystalhd_hw_create_ioqs(struct crystalhd_hw *hw)
{
- BC_STATUS sts = BC_STS_SUCCESS;
+ enum BC_STATUS sts = BC_STS_SUCCESS;
if (!hw) {
BCMLOG_ERR("Invalid Arg!!\n");
@@ -523,10 +523,10 @@ static bool crystalhd_code_in_full(struct crystalhd_adp *adp, uint32_t needed_sz
return false;
}
-static BC_STATUS crystalhd_hw_tx_req_complete(struct crystalhd_hw *hw,
- uint32_t list_id, BC_STATUS cs)
+static enum BC_STATUS crystalhd_hw_tx_req_complete(struct crystalhd_hw *hw,
+ uint32_t list_id, enum BC_STATUS cs)
{
- tx_dma_pkt *tx_req;
+ struct tx_dma_pkt *tx_req;
if (!hw || !list_id) {
BCMLOG_ERR("Invalid Arg..\n");
@@ -535,7 +535,7 @@ static BC_STATUS crystalhd_hw_tx_req_complete(struct crystalhd_hw *hw,
hw->pwr_lock--;
- tx_req = (tx_dma_pkt *)crystalhd_dioq_find_and_fetch(hw->tx_actq, list_id);
+ tx_req = (struct tx_dma_pkt *)crystalhd_dioq_find_and_fetch(hw->tx_actq, list_id);
if (!tx_req) {
if (cs != BC_STS_IO_USER_ABORT)
BCMLOG_ERR("Find and Fetch Did not find req\n");
@@ -570,7 +570,7 @@ static bool crystalhd_tx_list0_handler(struct crystalhd_hw *hw, uint32_t err_sts
if (!(err_sts & err_mask))
return false;
- BCMLOG_ERR("Error on Tx-L0 %x \n", err_sts);
+ BCMLOG_ERR("Error on Tx-L0 %x\n", err_sts);
tmp = err_mask;
@@ -602,7 +602,7 @@ static bool crystalhd_tx_list1_handler(struct crystalhd_hw *hw, uint32_t err_sts
if (!(err_sts & err_mask))
return false;
- BCMLOG_ERR("Error on Tx-L1 %x \n", err_sts);
+ BCMLOG_ERR("Error on Tx-L1 %x\n", err_sts);
tmp = err_mask;
@@ -635,9 +635,9 @@ static void crystalhd_tx_isr(struct crystalhd_hw *hw, uint32_t int_sts)
BC_STS_SUCCESS);
if (!(int_sts & (INTR_INTR_STATUS_L0_TX_DMA_ERR_INTR_MASK |
- INTR_INTR_STATUS_L1_TX_DMA_ERR_INTR_MASK))) {
- /* No error mask set.. */
- return;
+ INTR_INTR_STATUS_L1_TX_DMA_ERR_INTR_MASK))) {
+ /* No error mask set.. */
+ return;
}
/* Handle Tx errors. */
@@ -654,7 +654,7 @@ static void crystalhd_tx_isr(struct crystalhd_hw *hw, uint32_t int_sts)
hw->stats.tx_errors++;
}
-static void crystalhd_hw_dump_desc(pdma_descriptor p_dma_desc,
+static void crystalhd_hw_dump_desc(struct dma_descriptor *p_dma_desc,
uint32_t ul_desc_index, uint32_t cnt)
{
uint32_t ix, ll = 0;
@@ -682,15 +682,15 @@ static void crystalhd_hw_dump_desc(pdma_descriptor p_dma_desc,
}
-static BC_STATUS crystalhd_hw_fill_desc(crystalhd_dio_req *ioreq,
- dma_descriptor *desc,
+static enum BC_STATUS crystalhd_hw_fill_desc(struct crystalhd_dio_req *ioreq,
+ struct dma_descriptor *desc,
dma_addr_t desc_paddr_base,
uint32_t sg_cnt, uint32_t sg_st_ix,
uint32_t sg_st_off, uint32_t xfr_sz)
{
uint32_t count = 0, ix = 0, sg_ix = 0, len = 0, last_desc_ix = 0;
dma_addr_t desc_phy_addr = desc_paddr_base;
- addr_64 addr_temp;
+ union addr_64 addr_temp;
if (!ioreq || !desc || !desc_paddr_base || !xfr_sz ||
(!sg_cnt && !ioreq->uinfo.dir_tx)) {
@@ -721,7 +721,7 @@ static BC_STATUS crystalhd_hw_fill_desc(crystalhd_dio_req *ioreq,
desc[ix].dma_dir = ioreq->uinfo.dir_tx;
/* Chain DMA descriptor. */
- addr_temp.full_addr = desc_phy_addr + sizeof(dma_descriptor);
+ addr_temp.full_addr = desc_phy_addr + sizeof(struct dma_descriptor);
desc[ix].next_desc_addr_low = addr_temp.low_part;
desc[ix].next_desc_addr_high = addr_temp.high_part;
@@ -740,7 +740,7 @@ static BC_STATUS crystalhd_hw_fill_desc(crystalhd_dio_req *ioreq,
crystalhd_hw_dump_desc(desc, ix, 1);
count += len;
- desc_phy_addr += sizeof(dma_descriptor);
+ desc_phy_addr += sizeof(struct dma_descriptor);
}
last_desc_ix = ix - 1;
@@ -773,15 +773,15 @@ static BC_STATUS crystalhd_hw_fill_desc(crystalhd_dio_req *ioreq,
return BC_STS_SUCCESS;
}
-static BC_STATUS crystalhd_xlat_sgl_to_dma_desc(crystalhd_dio_req *ioreq,
- pdma_desc_mem pdesc_mem,
+static enum BC_STATUS crystalhd_xlat_sgl_to_dma_desc(struct crystalhd_dio_req *ioreq,
+ struct dma_desc_mem *pdesc_mem,
uint32_t *uv_desc_index)
{
- dma_descriptor *desc = NULL;
+ struct dma_descriptor *desc = NULL;
dma_addr_t desc_paddr_base = 0;
uint32_t sg_cnt = 0, sg_st_ix = 0, sg_st_off = 0;
uint32_t xfr_sz = 0;
- BC_STATUS sts = BC_STS_SUCCESS;
+ enum BC_STATUS sts = BC_STS_SUCCESS;
/* Check params.. */
if (!ioreq || !pdesc_mem || !uv_desc_index) {
@@ -821,7 +821,7 @@ static BC_STATUS crystalhd_xlat_sgl_to_dma_desc(crystalhd_dio_req *ioreq,
/* Prepare for UV mapping.. */
desc = &pdesc_mem->pdma_desc_start[sg_cnt];
desc_paddr_base = pdesc_mem->phy_addr +
- (sg_cnt * sizeof(dma_descriptor));
+ (sg_cnt * sizeof(struct dma_descriptor));
/* Done with desc addr.. now update sg stuff.*/
sg_cnt = ioreq->sg_cnt - ioreq->uinfo.uv_sg_ix;
@@ -858,7 +858,7 @@ static void crystalhd_start_tx_dma_engine(struct crystalhd_hw *hw)
* Verify if the Stop generates a completion interrupt or not.
* if it does not generate an interrupt, then add polling here.
*/
-static BC_STATUS crystalhd_stop_tx_dma_engine(struct crystalhd_hw *hw)
+static enum BC_STATUS crystalhd_stop_tx_dma_engine(struct crystalhd_hw *hw)
{
uint32_t dma_cntrl, cnt = 30;
uint32_t l1 = 1, l2 = 1;
@@ -1021,7 +1021,7 @@ static bool crystalhd_rel_addr_to_pib_Q(struct crystalhd_hw *hw, uint32_t addr_t
return true;
}
-static void cpy_pib_to_app(C011_PIB *src_pib, BC_PIC_INFO_BLOCK *dst_pib)
+static void cpy_pib_to_app(struct c011_pib *src_pib, struct BC_PIC_INFO_BLOCK *dst_pib)
{
if (!src_pib || !dst_pib) {
BCMLOG_ERR("Invalid Arguments\n");
@@ -1046,10 +1046,10 @@ static void cpy_pib_to_app(C011_PIB *src_pib, BC_PIC_INFO_BLOCK *dst_pib)
static void crystalhd_hw_proc_pib(struct crystalhd_hw *hw)
{
unsigned int cnt;
- C011_PIB src_pib;
+ struct c011_pib src_pib;
uint32_t pib_addr, pib_cnt;
- BC_PIC_INFO_BLOCK *AppPib;
- crystalhd_rx_dma_pkt *rx_pkt = NULL;
+ struct BC_PIC_INFO_BLOCK *AppPib;
+ struct crystalhd_rx_dma_pkt *rx_pkt = NULL;
pib_cnt = crystalhd_get_pib_avail_cnt(hw);
@@ -1059,11 +1059,11 @@ static void crystalhd_hw_proc_pib(struct crystalhd_hw *hw)
for (cnt = 0; cnt < pib_cnt; cnt++) {
pib_addr = crystalhd_get_addr_from_pib_Q(hw);
- crystalhd_mem_rd(hw->adp, pib_addr, sizeof(C011_PIB) / 4,
+ crystalhd_mem_rd(hw->adp, pib_addr, sizeof(struct c011_pib) / 4,
(uint32_t *)&src_pib);
if (src_pib.bFormatChange) {
- rx_pkt = (crystalhd_rx_dma_pkt *)crystalhd_dioq_fetch(hw->rx_freeq);
+ rx_pkt = (struct crystalhd_rx_dma_pkt *)crystalhd_dioq_fetch(hw->rx_freeq);
if (!rx_pkt)
return;
rx_pkt->flags = 0;
@@ -1134,33 +1134,29 @@ static void crystalhd_stop_rx_dma_engine(struct crystalhd_hw *hw)
if (l0y) {
l0y = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0);
l0y &= DMA_START_BIT;
- if (!l0y) {
+ if (!l0y)
hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
- }
}
if (l1y) {
l1y = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1);
l1y &= DMA_START_BIT;
- if (!l1y) {
+ if (!l1y)
hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
- }
}
if (l0uv) {
l0uv = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0);
l0uv &= DMA_START_BIT;
- if (!l0uv) {
+ if (!l0uv)
hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
- }
}
if (l1uv) {
l1uv = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1);
l1uv &= DMA_START_BIT;
- if (!l1uv) {
+ if (!l1uv)
hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
- }
}
msleep_interruptible(100);
count--;
@@ -1172,11 +1168,11 @@ static void crystalhd_stop_rx_dma_engine(struct crystalhd_hw *hw)
count, hw->rx_list_sts[0], hw->rx_list_sts[1]);
}
-static BC_STATUS crystalhd_hw_prog_rxdma(struct crystalhd_hw *hw, crystalhd_rx_dma_pkt *rx_pkt)
+static enum BC_STATUS crystalhd_hw_prog_rxdma(struct crystalhd_hw *hw, struct crystalhd_rx_dma_pkt *rx_pkt)
{
uint32_t y_low_addr_reg, y_high_addr_reg;
uint32_t uv_low_addr_reg, uv_high_addr_reg;
- addr_64 desc_addr;
+ union addr_64 desc_addr;
unsigned long flags;
if (!hw || !rx_pkt) {
@@ -1232,10 +1228,10 @@ static BC_STATUS crystalhd_hw_prog_rxdma(struct crystalhd_hw *hw, crystalhd_rx_d
return BC_STS_SUCCESS;
}
-static BC_STATUS crystalhd_hw_post_cap_buff(struct crystalhd_hw *hw,
- crystalhd_rx_dma_pkt *rx_pkt)
+static enum BC_STATUS crystalhd_hw_post_cap_buff(struct crystalhd_hw *hw,
+ struct crystalhd_rx_dma_pkt *rx_pkt)
{
- BC_STATUS sts = crystalhd_hw_prog_rxdma(hw, rx_pkt);
+ enum BC_STATUS sts = crystalhd_hw_prog_rxdma(hw, rx_pkt);
if (sts == BC_STS_BUSY)
crystalhd_dioq_add(hw->rx_freeq, (void *)rx_pkt,
@@ -1291,12 +1287,12 @@ static void crystalhd_hw_finalize_pause(struct crystalhd_hw *hw)
crystalhd_reg_wr(hw->adp, PCIE_DLL_DATA_LINK_CONTROL, aspm);
}
-static BC_STATUS crystalhd_rx_pkt_done(struct crystalhd_hw *hw, uint32_t list_index,
- BC_STATUS comp_sts)
+static enum BC_STATUS crystalhd_rx_pkt_done(struct crystalhd_hw *hw, uint32_t list_index,
+ enum BC_STATUS comp_sts)
{
- crystalhd_rx_dma_pkt *rx_pkt = NULL;
+ struct crystalhd_rx_dma_pkt *rx_pkt = NULL;
uint32_t y_dw_dnsz, uv_dw_dnsz;
- BC_STATUS sts = BC_STS_SUCCESS;
+ enum BC_STATUS sts = BC_STS_SUCCESS;
if (!hw || list_index >= DMA_ENGINE_CNT) {
BCMLOG_ERR("Invalid Arguments\n");
@@ -1332,7 +1328,7 @@ static bool crystalhd_rx_list0_handler(struct crystalhd_hw *hw, uint32_t int_sts
uint32_t y_err_sts, uint32_t uv_err_sts)
{
uint32_t tmp;
- list_sts tmp_lsts;
+ enum list_sts tmp_lsts;
if (!(y_err_sts & GET_Y0_ERR_MSK) && !(uv_err_sts & GET_UV0_ERR_MSK))
return false;
@@ -1400,7 +1396,7 @@ static bool crystalhd_rx_list1_handler(struct crystalhd_hw *hw, uint32_t int_sts
uint32_t y_err_sts, uint32_t uv_err_sts)
{
uint32_t tmp;
- list_sts tmp_lsts;
+ enum list_sts tmp_lsts;
if (!(y_err_sts & GET_Y1_ERR_MSK) && !(uv_err_sts & GET_UV1_ERR_MSK))
return false;
@@ -1432,9 +1428,8 @@ static bool crystalhd_rx_list1_handler(struct crystalhd_hw *hw, uint32_t int_sts
/* UV1 - DMA */
tmp = uv_err_sts & GET_UV1_ERR_MSK;
- if (int_sts & INTR_INTR_STATUS_L1_UV_RX_DMA_DONE_INTR_MASK) {
+ if (int_sts & INTR_INTR_STATUS_L1_UV_RX_DMA_DONE_INTR_MASK)
hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
- }
if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK) {
hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
@@ -1472,7 +1467,7 @@ static void crystalhd_rx_isr(struct crystalhd_hw *hw, uint32_t intr_sts)
{
unsigned long flags;
uint32_t i, list_avail = 0;
- BC_STATUS comp_sts = BC_STS_NO_DATA;
+ enum BC_STATUS comp_sts = BC_STS_NO_DATA;
uint32_t y_err_sts, uv_err_sts, y_dn_sz = 0, uv_dn_sz = 0;
bool ret = 0;
@@ -1540,15 +1535,15 @@ static void crystalhd_rx_isr(struct crystalhd_hw *hw, uint32_t intr_sts)
}
}
-static BC_STATUS crystalhd_fw_cmd_post_proc(struct crystalhd_hw *hw,
- BC_FW_CMD *fw_cmd)
+static enum BC_STATUS crystalhd_fw_cmd_post_proc(struct crystalhd_hw *hw,
+ struct BC_FW_CMD *fw_cmd)
{
- BC_STATUS sts = BC_STS_SUCCESS;
- DecRspChannelStartVideo *st_rsp = NULL;
+ enum BC_STATUS sts = BC_STS_SUCCESS;
+ struct dec_rsp_channel_start_video *st_rsp = NULL;
switch (fw_cmd->cmd[0]) {
case eCMD_C011_DEC_CHAN_START_VIDEO:
- st_rsp = (DecRspChannelStartVideo *)fw_cmd->rsp;
+ st_rsp = (struct dec_rsp_channel_start_video *)fw_cmd->rsp;
hw->pib_del_Q_addr = st_rsp->picInfoDeliveryQ;
hw->pib_rel_Q_addr = st_rsp->picInfoReleaseQ;
BCMLOG(BCMLOG_DBG, "DelQAddr:%x RelQAddr:%x\n",
@@ -1566,10 +1561,10 @@ static BC_STATUS crystalhd_fw_cmd_post_proc(struct crystalhd_hw *hw,
return sts;
}
-static BC_STATUS crystalhd_put_ddr2sleep(struct crystalhd_hw *hw)
+static enum BC_STATUS crystalhd_put_ddr2sleep(struct crystalhd_hw *hw)
{
uint32_t reg;
- link_misc_perst_decoder_ctrl rst_cntrl_reg;
+ union link_misc_perst_decoder_ctrl rst_cntrl_reg;
/* Pulse reset pin of 7412 (MISC_PERST_DECODER_CTRL) */
rst_cntrl_reg.whole_reg = crystalhd_reg_rd(hw->adp, MISC_PERST_DECODER_CTRL);
@@ -1627,7 +1622,7 @@ static BC_STATUS crystalhd_put_ddr2sleep(struct crystalhd_hw *hw)
**
*************************************************/
-BC_STATUS crystalhd_download_fw(struct crystalhd_adp *adp, void *buffer, uint32_t sz)
+enum BC_STATUS crystalhd_download_fw(struct crystalhd_adp *adp, void *buffer, uint32_t sz)
{
uint32_t reg_data, cnt, *temp_buff;
uint32_t fw_sig_len = 36;
@@ -1719,13 +1714,14 @@ BC_STATUS crystalhd_download_fw(struct crystalhd_adp *adp, void *buffer, uint32_
return BC_STS_SUCCESS;;
}
-BC_STATUS crystalhd_do_fw_cmd(struct crystalhd_hw *hw, BC_FW_CMD *fw_cmd)
+enum BC_STATUS crystalhd_do_fw_cmd(struct crystalhd_hw *hw,
+ struct BC_FW_CMD *fw_cmd)
{
uint32_t cnt = 0, cmd_res_addr;
uint32_t *cmd_buff, *res_buff;
wait_queue_head_t fw_cmd_event;
int rc = 0;
- BC_STATUS sts;
+ enum BC_STATUS sts;
crystalhd_create_event(&fw_cmd_event);
@@ -1740,7 +1736,7 @@ BC_STATUS crystalhd_do_fw_cmd(struct crystalhd_hw *hw, BC_FW_CMD *fw_cmd)
res_buff = fw_cmd->rsp;
if (!cmd_buff || !res_buff) {
- BCMLOG_ERR("Invalid Parameters for F/W Command \n");
+ BCMLOG_ERR("Invalid Parameters for F/W Command\n");
return BC_STS_INV_ARG;
}
@@ -1859,7 +1855,7 @@ bool crystalhd_hw_interrupt(struct crystalhd_adp *adp, struct crystalhd_hw *hw)
return rc;
}
-BC_STATUS crystalhd_hw_open(struct crystalhd_hw *hw, struct crystalhd_adp *adp)
+enum BC_STATUS crystalhd_hw_open(struct crystalhd_hw *hw, struct crystalhd_adp *adp)
{
if (!hw || !adp) {
BCMLOG_ERR("Invalid Arguments\n");
@@ -1891,7 +1887,7 @@ BC_STATUS crystalhd_hw_open(struct crystalhd_hw *hw, struct crystalhd_adp *adp)
return BC_STS_SUCCESS;
}
-BC_STATUS crystalhd_hw_close(struct crystalhd_hw *hw)
+enum BC_STATUS crystalhd_hw_close(struct crystalhd_hw *hw)
{
if (!hw) {
BCMLOG_ERR("Invalid Arguments\n");
@@ -1908,14 +1904,14 @@ BC_STATUS crystalhd_hw_close(struct crystalhd_hw *hw)
return BC_STS_SUCCESS;
}
-BC_STATUS crystalhd_hw_setup_dma_rings(struct crystalhd_hw *hw)
+enum BC_STATUS crystalhd_hw_setup_dma_rings(struct crystalhd_hw *hw)
{
unsigned int i;
void *mem;
size_t mem_len;
dma_addr_t phy_addr;
- BC_STATUS sts = BC_STS_SUCCESS;
- crystalhd_rx_dma_pkt *rpkt;
+ enum BC_STATUS sts = BC_STS_SUCCESS;
+ struct crystalhd_rx_dma_pkt *rpkt;
if (!hw || !hw->adp) {
BCMLOG_ERR("Invalid Arguments\n");
@@ -1928,7 +1924,7 @@ BC_STATUS crystalhd_hw_setup_dma_rings(struct crystalhd_hw *hw)
return sts;
}
- mem_len = BC_LINK_MAX_SGLS * sizeof(dma_descriptor);
+ mem_len = BC_LINK_MAX_SGLS * sizeof(struct dma_descriptor);
for (i = 0; i < BC_TX_LIST_CNT; i++) {
mem = bc_kern_dma_alloc(hw->adp, mem_len, &phy_addr);
@@ -1943,7 +1939,7 @@ BC_STATUS crystalhd_hw_setup_dma_rings(struct crystalhd_hw *hw)
hw->tx_pkt_pool[i].desc_mem.pdma_desc_start = mem;
hw->tx_pkt_pool[i].desc_mem.phy_addr = phy_addr;
hw->tx_pkt_pool[i].desc_mem.sz = BC_LINK_MAX_SGLS *
- sizeof(dma_descriptor);
+ sizeof(struct dma_descriptor);
hw->tx_pkt_pool[i].list_tag = 0;
/* Add TX dma requests to Free Queue..*/
@@ -1973,7 +1969,7 @@ BC_STATUS crystalhd_hw_setup_dma_rings(struct crystalhd_hw *hw)
}
rpkt->desc_mem.pdma_desc_start = mem;
rpkt->desc_mem.phy_addr = phy_addr;
- rpkt->desc_mem.sz = BC_LINK_MAX_SGLS * sizeof(dma_descriptor);
+ rpkt->desc_mem.sz = BC_LINK_MAX_SGLS * sizeof(struct dma_descriptor);
rpkt->pkt_tag = hw->rx_pkt_tag_seed + i;
crystalhd_hw_free_rx_pkt(hw, rpkt);
}
@@ -1981,10 +1977,10 @@ BC_STATUS crystalhd_hw_setup_dma_rings(struct crystalhd_hw *hw)
return BC_STS_SUCCESS;
}
-BC_STATUS crystalhd_hw_free_dma_rings(struct crystalhd_hw *hw)
+enum BC_STATUS crystalhd_hw_free_dma_rings(struct crystalhd_hw *hw)
{
unsigned int i;
- crystalhd_rx_dma_pkt *rpkt = NULL;
+ struct crystalhd_rx_dma_pkt *rpkt = NULL;
if (!hw || !hw->adp) {
BCMLOG_ERR("Invalid Arguments\n");
@@ -2019,16 +2015,16 @@ BC_STATUS crystalhd_hw_free_dma_rings(struct crystalhd_hw *hw)
return BC_STS_SUCCESS;
}
-BC_STATUS crystalhd_hw_post_tx(struct crystalhd_hw *hw, crystalhd_dio_req *ioreq,
+enum BC_STATUS crystalhd_hw_post_tx(struct crystalhd_hw *hw, struct crystalhd_dio_req *ioreq,
hw_comp_callback call_back,
wait_queue_head_t *cb_event, uint32_t *list_id,
uint8_t data_flags)
{
- tx_dma_pkt *tx_dma_packet = NULL;
+ struct tx_dma_pkt *tx_dma_packet = NULL;
uint32_t first_desc_u_addr, first_desc_l_addr;
uint32_t low_addr, high_addr;
- addr_64 desc_addr;
- BC_STATUS sts, add_sts;
+ union addr_64 desc_addr;
+ enum BC_STATUS sts, add_sts;
uint32_t dummy_index = 0;
unsigned long flags;
bool rc;
@@ -2053,7 +2049,7 @@ BC_STATUS crystalhd_hw_post_tx(struct crystalhd_hw *hw, crystalhd_dio_req *ioreq
}
/* Get a list from TxFreeQ */
- tx_dma_packet = (tx_dma_pkt *)crystalhd_dioq_fetch(hw->tx_freeq);
+ tx_dma_packet = (struct tx_dma_pkt *)crystalhd_dioq_fetch(hw->tx_freeq);
if (!tx_dma_packet) {
BCMLOG_ERR("No empty elements..\n");
return BC_STS_ERR_USAGE;
@@ -2126,7 +2122,7 @@ BC_STATUS crystalhd_hw_post_tx(struct crystalhd_hw *hw, crystalhd_dio_req *ioreq
*
* FIX_ME: Not Tested the actual condition..
*/
-BC_STATUS crystalhd_hw_cancel_tx(struct crystalhd_hw *hw, uint32_t list_id)
+enum BC_STATUS crystalhd_hw_cancel_tx(struct crystalhd_hw *hw, uint32_t list_id)
{
if (!hw || !list_id) {
BCMLOG_ERR("Invalid Arguments\n");
@@ -2139,12 +2135,12 @@ BC_STATUS crystalhd_hw_cancel_tx(struct crystalhd_hw *hw, uint32_t list_id)
return BC_STS_SUCCESS;
}
-BC_STATUS crystalhd_hw_add_cap_buffer(struct crystalhd_hw *hw,
- crystalhd_dio_req *ioreq, bool en_post)
+enum BC_STATUS crystalhd_hw_add_cap_buffer(struct crystalhd_hw *hw,
+ struct crystalhd_dio_req *ioreq, bool en_post)
{
- crystalhd_rx_dma_pkt *rpkt;
+ struct crystalhd_rx_dma_pkt *rpkt;
uint32_t tag, uv_desc_ix = 0;
- BC_STATUS sts;
+ enum BC_STATUS sts;
if (!hw || !ioreq) {
BCMLOG_ERR("Invalid Arguments\n");
@@ -2169,7 +2165,7 @@ BC_STATUS crystalhd_hw_add_cap_buffer(struct crystalhd_hw *hw,
/* Store the address of UV in the rx packet for post*/
if (uv_desc_ix)
rpkt->uv_phy_addr = rpkt->desc_mem.phy_addr +
- (sizeof(dma_descriptor) * (uv_desc_ix + 1));
+ (sizeof(struct dma_descriptor) * (uv_desc_ix + 1));
if (en_post)
sts = crystalhd_hw_post_cap_buff(hw, rpkt);
@@ -2179,11 +2175,11 @@ BC_STATUS crystalhd_hw_add_cap_buffer(struct crystalhd_hw *hw,
return sts;
}
-BC_STATUS crystalhd_hw_get_cap_buffer(struct crystalhd_hw *hw,
- BC_PIC_INFO_BLOCK *pib,
- crystalhd_dio_req **ioreq)
+enum BC_STATUS crystalhd_hw_get_cap_buffer(struct crystalhd_hw *hw,
+ struct BC_PIC_INFO_BLOCK *pib,
+ struct crystalhd_dio_req **ioreq)
{
- crystalhd_rx_dma_pkt *rpkt;
+ struct crystalhd_rx_dma_pkt *rpkt;
uint32_t timeout = BC_PROC_OUTPUT_TIMEOUT / 1000;
uint32_t sig_pending = 0;
@@ -2215,10 +2211,10 @@ BC_STATUS crystalhd_hw_get_cap_buffer(struct crystalhd_hw *hw,
return BC_STS_SUCCESS;
}
-BC_STATUS crystalhd_hw_start_capture(struct crystalhd_hw *hw)
+enum BC_STATUS crystalhd_hw_start_capture(struct crystalhd_hw *hw)
{
- crystalhd_rx_dma_pkt *rx_pkt;
- BC_STATUS sts;
+ struct crystalhd_rx_dma_pkt *rx_pkt;
+ enum BC_STATUS sts;
uint32_t i;
if (!hw) {
@@ -2240,7 +2236,7 @@ BC_STATUS crystalhd_hw_start_capture(struct crystalhd_hw *hw)
return BC_STS_SUCCESS;
}
-BC_STATUS crystalhd_hw_stop_capture(struct crystalhd_hw *hw)
+enum BC_STATUS crystalhd_hw_stop_capture(struct crystalhd_hw *hw)
{
void *temp = NULL;
@@ -2260,7 +2256,7 @@ BC_STATUS crystalhd_hw_stop_capture(struct crystalhd_hw *hw)
return BC_STS_SUCCESS;
}
-BC_STATUS crystalhd_hw_pause(struct crystalhd_hw *hw)
+enum BC_STATUS crystalhd_hw_pause(struct crystalhd_hw *hw)
{
hw->stats.pause_cnt++;
hw->stop_pending = 1;
@@ -2272,9 +2268,9 @@ BC_STATUS crystalhd_hw_pause(struct crystalhd_hw *hw)
return BC_STS_SUCCESS;
}
-BC_STATUS crystalhd_hw_unpause(struct crystalhd_hw *hw)
+enum BC_STATUS crystalhd_hw_unpause(struct crystalhd_hw *hw)
{
- BC_STATUS sts;
+ enum BC_STATUS sts;
uint32_t aspm;
hw->stop_pending = 0;
@@ -2288,9 +2284,9 @@ BC_STATUS crystalhd_hw_unpause(struct crystalhd_hw *hw)
return sts;
}
-BC_STATUS crystalhd_hw_suspend(struct crystalhd_hw *hw)
+enum BC_STATUS crystalhd_hw_suspend(struct crystalhd_hw *hw)
{
- BC_STATUS sts;
+ enum BC_STATUS sts;
if (!hw) {
BCMLOG_ERR("Invalid Arguments\n");
@@ -2329,7 +2325,7 @@ void crystalhd_hw_stats(struct crystalhd_hw *hw, struct crystalhd_hw_stats *stat
memcpy(stats, &hw->stats, sizeof(*stats));
}
-BC_STATUS crystalhd_hw_set_core_clock(struct crystalhd_hw *hw)
+enum BC_STATUS crystalhd_hw_set_core_clock(struct crystalhd_hw *hw)
{
uint32_t reg, n, i;
uint32_t vco_mg, refresh_reg;
diff --git a/drivers/staging/crystalhd/crystalhd_hw.h b/drivers/staging/crystalhd/crystalhd_hw.h
index 1c6318e912a..3efbf9d4ff5 100644
--- a/drivers/staging/crystalhd/crystalhd_hw.h
+++ b/drivers/staging/crystalhd/crystalhd_hw.h
@@ -109,7 +109,7 @@
#define DecHt_HostSwReset 0x340000
#define BC_DRAM_FW_CFG_ADDR 0x001c2000
-typedef union _addr_64_ {
+union addr_64 {
struct {
uint32_t low_part;
uint32_t high_part;
@@ -117,9 +117,9 @@ typedef union _addr_64_ {
uint64_t full_addr;
-} addr_64;
+};
-typedef union _intr_mask_reg_ {
+union intr_mask_reg {
struct {
uint32_t mask_tx_done:1;
uint32_t mask_tx_err:1;
@@ -133,9 +133,9 @@ typedef union _intr_mask_reg_ {
uint32_t whole_reg;
-} intr_mask_reg;
+};
-typedef union _link_misc_perst_deco_ctrl_ {
+union link_misc_perst_deco_ctrl {
struct {
uint32_t bcm7412_rst:1; /* 1 -> BCM7412 is held in reset. Reset value 1.*/
uint32_t reserved0:3; /* Reserved.No Effect*/
@@ -145,9 +145,9 @@ typedef union _link_misc_perst_deco_ctrl_ {
uint32_t whole_reg;
-} link_misc_perst_deco_ctrl;
+};
-typedef union _link_misc_perst_clk_ctrl_ {
+union link_misc_perst_clk_ctrl {
struct {
uint32_t sel_alt_clk:1; /* When set, selects a 6.75MHz clock as the source of core_clk */
uint32_t stop_core_clk:1; /* When set, stops the branch of core_clk that is not needed for low power operation */
@@ -161,10 +161,9 @@ typedef union _link_misc_perst_clk_ctrl_ {
uint32_t whole_reg;
-} link_misc_perst_clk_ctrl;
-
+};
-typedef union _link_misc_perst_decoder_ctrl_ {
+union link_misc_perst_decoder_ctrl {
struct {
uint32_t bcm_7412_rst:1; /* 1 -> BCM7412 is held in reset. Reset value 1.*/
uint32_t res0:3; /* Reserved.No Effect*/
@@ -174,10 +173,9 @@ typedef union _link_misc_perst_decoder_ctrl_ {
uint32_t whole_reg;
-} link_misc_perst_decoder_ctrl;
-
+};
-typedef union _desc_low_addr_reg_ {
+union desc_low_addr_reg {
struct {
uint32_t list_valid:1;
uint32_t reserved:4;
@@ -186,9 +184,9 @@ typedef union _desc_low_addr_reg_ {
uint32_t whole_reg;
-} desc_low_addr_reg;
+};
-typedef struct _dma_descriptor_ { /* 8 32-bit values */
+struct dma_descriptor { /* 8 32-bit values */
/* 0th u32 */
uint32_t sdram_buff_addr:28; /* bits 0-27: SDRAM Address */
uint32_t res0:4; /* bits 28-31: Reserved */
@@ -220,24 +218,22 @@ typedef struct _dma_descriptor_ { /* 8 32-bit values */
/* 7th u32 */
uint32_t res8; /* Last 32bits reserved */
-} dma_descriptor, *pdma_descriptor;
+};
/*
* We will allocate the memory in 4K pages
* the linked list will be a list of 32 byte descriptors.
* The virtual address will determine what should be freed.
*/
-typedef struct _dma_desc_mem_ {
- pdma_descriptor pdma_desc_start; /* 32-bytes for dma descriptor. should be first element */
+struct dma_desc_mem {
+ struct dma_descriptor *pdma_desc_start; /* 32-bytes for dma descriptor. should be first element */
dma_addr_t phy_addr; /* physical address of each DMA desc */
uint32_t sz;
struct _dma_desc_mem_ *Next; /* points to Next Descriptor in chain */
-} dma_desc_mem, *pdma_desc_mem;
-
-
+};
-typedef enum _list_sts_ {
+enum list_sts {
sts_free = 0,
/* RX-Y Bits 0:7 */
@@ -253,30 +249,27 @@ typedef enum _list_sts_ {
rx_y_mask = 0x000000FF,
rx_uv_mask = 0x0000FF00,
+};
-} list_sts;
-
-typedef struct _tx_dma_pkt_ {
- dma_desc_mem desc_mem;
+struct tx_dma_pkt {
+ struct dma_desc_mem desc_mem;
hw_comp_callback call_back;
- crystalhd_dio_req *dio_req;
+ struct crystalhd_dio_req *dio_req;
wait_queue_head_t *cb_event;
uint32_t list_tag;
+};
-} tx_dma_pkt;
-
-typedef struct _crystalhd_rx_dma_pkt {
- dma_desc_mem desc_mem;
- crystalhd_dio_req *dio_req;
+struct crystalhd_rx_dma_pkt {
+ struct dma_desc_mem desc_mem;
+ struct crystalhd_dio_req *dio_req;
uint32_t pkt_tag;
uint32_t flags;
- BC_PIC_INFO_BLOCK pib;
+ struct BC_PIC_INFO_BLOCK pib;
dma_addr_t uv_phy_addr;
- struct _crystalhd_rx_dma_pkt *next;
-
-} crystalhd_rx_dma_pkt;
+ struct crystalhd_rx_dma_pkt *next;
+};
-struct crystalhd_hw_stats{
+struct crystalhd_hw_stats {
uint32_t rx_errors;
uint32_t tx_errors;
uint32_t freeq_count;
@@ -288,13 +281,13 @@ struct crystalhd_hw_stats{
};
struct crystalhd_hw {
- tx_dma_pkt tx_pkt_pool[DMA_ENGINE_CNT];
+ struct tx_dma_pkt tx_pkt_pool[DMA_ENGINE_CNT];
spinlock_t lock;
uint32_t tx_ioq_tag_seed;
uint32_t tx_list_post_index;
- crystalhd_rx_dma_pkt *rx_pkt_pool_head;
+ struct crystalhd_rx_dma_pkt *rx_pkt_pool_head;
uint32_t rx_pkt_tag_seed;
bool dev_started;
@@ -306,16 +299,16 @@ struct crystalhd_hw {
uint32_t pib_del_Q_addr;
uint32_t pib_rel_Q_addr;
- crystalhd_dioq_t *tx_freeq;
- crystalhd_dioq_t *tx_actq;
+ struct crystalhd_dioq *tx_freeq;
+ struct crystalhd_dioq *tx_actq;
/* Rx DMA Engine Specific Locks */
spinlock_t rx_lock;
uint32_t rx_list_post_index;
- list_sts rx_list_sts[DMA_ENGINE_CNT];
- crystalhd_dioq_t *rx_rdyq;
- crystalhd_dioq_t *rx_freeq;
- crystalhd_dioq_t *rx_actq;
+ enum list_sts rx_list_sts[DMA_ENGINE_CNT];
+ struct crystalhd_dioq *rx_rdyq;
+ struct crystalhd_dioq *rx_freeq;
+ struct crystalhd_dioq *rx_actq;
uint32_t stop_pending;
/* HW counters.. */
@@ -364,35 +357,35 @@ struct crystalhd_hw {
/**** API Exposed to the other layers ****/
-BC_STATUS crystalhd_download_fw(struct crystalhd_adp *adp,
+enum BC_STATUS crystalhd_download_fw(struct crystalhd_adp *adp,
void *buffer, uint32_t sz);
-BC_STATUS crystalhd_do_fw_cmd(struct crystalhd_hw *hw, BC_FW_CMD *fw_cmd);
+enum BC_STATUS crystalhd_do_fw_cmd(struct crystalhd_hw *hw, struct BC_FW_CMD *fw_cmd);
bool crystalhd_hw_interrupt(struct crystalhd_adp *adp, struct crystalhd_hw *hw);
-BC_STATUS crystalhd_hw_open(struct crystalhd_hw *, struct crystalhd_adp *);
-BC_STATUS crystalhd_hw_close(struct crystalhd_hw *);
-BC_STATUS crystalhd_hw_setup_dma_rings(struct crystalhd_hw *);
-BC_STATUS crystalhd_hw_free_dma_rings(struct crystalhd_hw *);
+enum BC_STATUS crystalhd_hw_open(struct crystalhd_hw *, struct crystalhd_adp *);
+enum BC_STATUS crystalhd_hw_close(struct crystalhd_hw *);
+enum BC_STATUS crystalhd_hw_setup_dma_rings(struct crystalhd_hw *);
+enum BC_STATUS crystalhd_hw_free_dma_rings(struct crystalhd_hw *);
-BC_STATUS crystalhd_hw_post_tx(struct crystalhd_hw *hw, crystalhd_dio_req *ioreq,
+enum BC_STATUS crystalhd_hw_post_tx(struct crystalhd_hw *hw, struct crystalhd_dio_req *ioreq,
hw_comp_callback call_back,
wait_queue_head_t *cb_event,
uint32_t *list_id, uint8_t data_flags);
-BC_STATUS crystalhd_hw_pause(struct crystalhd_hw *hw);
-BC_STATUS crystalhd_hw_unpause(struct crystalhd_hw *hw);
-BC_STATUS crystalhd_hw_suspend(struct crystalhd_hw *hw);
-BC_STATUS crystalhd_hw_cancel_tx(struct crystalhd_hw *hw, uint32_t list_id);
-BC_STATUS crystalhd_hw_add_cap_buffer(struct crystalhd_hw *hw,
- crystalhd_dio_req *ioreq, bool en_post);
-BC_STATUS crystalhd_hw_get_cap_buffer(struct crystalhd_hw *hw,
- BC_PIC_INFO_BLOCK *pib,
- crystalhd_dio_req **ioreq);
-BC_STATUS crystalhd_hw_stop_capture(struct crystalhd_hw *hw);
-BC_STATUS crystalhd_hw_start_capture(struct crystalhd_hw *hw);
+enum BC_STATUS crystalhd_hw_pause(struct crystalhd_hw *hw);
+enum BC_STATUS crystalhd_hw_unpause(struct crystalhd_hw *hw);
+enum BC_STATUS crystalhd_hw_suspend(struct crystalhd_hw *hw);
+enum BC_STATUS crystalhd_hw_cancel_tx(struct crystalhd_hw *hw, uint32_t list_id);
+enum BC_STATUS crystalhd_hw_add_cap_buffer(struct crystalhd_hw *hw,
+ struct crystalhd_dio_req *ioreq, bool en_post);
+enum BC_STATUS crystalhd_hw_get_cap_buffer(struct crystalhd_hw *hw,
+ struct BC_PIC_INFO_BLOCK *pib,
+ struct crystalhd_dio_req **ioreq);
+enum BC_STATUS crystalhd_hw_stop_capture(struct crystalhd_hw *hw);
+enum BC_STATUS crystalhd_hw_start_capture(struct crystalhd_hw *hw);
void crystalhd_hw_stats(struct crystalhd_hw *hw, struct crystalhd_hw_stats *stats);
/* API to program the core clock on the decoder */
-BC_STATUS crystalhd_hw_set_core_clock(struct crystalhd_hw *);
+enum BC_STATUS crystalhd_hw_set_core_clock(struct crystalhd_hw *);
#endif
diff --git a/drivers/staging/crystalhd/crystalhd_lnx.c b/drivers/staging/crystalhd/crystalhd_lnx.c
index 54bad652c0c..a4ec891328c 100644
--- a/drivers/staging/crystalhd/crystalhd_lnx.c
+++ b/drivers/staging/crystalhd/crystalhd_lnx.c
@@ -15,7 +15,7 @@
along with this driver. If not, see <http://www.gnu.org/licenses/>.
***************************************************************************/
-#include <linux/version.h>
+#include <linux/smp_lock.h>
#include <linux/slab.h>
#include "crystalhd_lnx.h"
@@ -51,7 +51,7 @@ static int chd_dec_enable_int(struct crystalhd_adp *adp)
rc = request_irq(adp->pdev->irq, chd_dec_isr, IRQF_SHARED,
adp->name, (void *)adp);
if (rc) {
- BCMLOG_ERR("Interrupt request failed.. \n");
+ BCMLOG_ERR("Interrupt request failed..\n");
pci_disable_msi(adp->pdev);
}
@@ -73,10 +73,10 @@ static int chd_dec_disable_int(struct crystalhd_adp *adp)
return 0;
}
-crystalhd_ioctl_data *chd_dec_alloc_iodata(struct crystalhd_adp *adp, bool isr)
+struct crystalhd_ioctl_data *chd_dec_alloc_iodata(struct crystalhd_adp *adp, bool isr)
{
unsigned long flags = 0;
- crystalhd_ioctl_data *temp;
+ struct crystalhd_ioctl_data *temp;
if (!adp)
return NULL;
@@ -93,7 +93,7 @@ crystalhd_ioctl_data *chd_dec_alloc_iodata(struct crystalhd_adp *adp, bool isr)
return temp;
}
-void chd_dec_free_iodata(struct crystalhd_adp *adp, crystalhd_ioctl_data *iodata,
+void chd_dec_free_iodata(struct crystalhd_adp *adp, struct crystalhd_ioctl_data *iodata,
bool isr)
{
unsigned long flags = 0;
@@ -112,7 +112,7 @@ static inline int crystalhd_user_data(unsigned long ud, void *dr, int size, int
int rc;
if (!ud || !dr) {
- BCMLOG_ERR("Invalid arg \n");
+ BCMLOG_ERR("Invalid arg\n");
return -EINVAL;
}
@@ -122,14 +122,14 @@ static inline int crystalhd_user_data(unsigned long ud, void *dr, int size, int
rc = copy_from_user(dr, (void *)ud, size);
if (rc) {
- BCMLOG_ERR("Invalid args for command \n");
+ BCMLOG_ERR("Invalid args for command\n");
rc = -EFAULT;
}
return rc;
}
-static int chd_dec_fetch_cdata(struct crystalhd_adp *adp, crystalhd_ioctl_data *io,
+static int chd_dec_fetch_cdata(struct crystalhd_adp *adp, struct crystalhd_ioctl_data *io,
uint32_t m_sz, unsigned long ua)
{
unsigned long ua_off;
@@ -163,7 +163,7 @@ static int chd_dec_fetch_cdata(struct crystalhd_adp *adp, crystalhd_ioctl_data *
}
static int chd_dec_release_cdata(struct crystalhd_adp *adp,
- crystalhd_ioctl_data *io, unsigned long ua)
+ struct crystalhd_ioctl_data *io, unsigned long ua)
{
unsigned long ua_off;
int rc;
@@ -193,7 +193,7 @@ static int chd_dec_release_cdata(struct crystalhd_adp *adp,
}
static int chd_dec_proc_user_data(struct crystalhd_adp *adp,
- crystalhd_ioctl_data *io,
+ struct crystalhd_ioctl_data *io,
unsigned long ua, int set)
{
int rc;
@@ -206,7 +206,7 @@ static int chd_dec_proc_user_data(struct crystalhd_adp *adp,
rc = crystalhd_user_data(ua, &io->udata, sizeof(io->udata), set);
if (rc) {
- BCMLOG_ERR("failed to %s iodata \n", (set ? "set" : "get"));
+ BCMLOG_ERR("failed to %s iodata\n", (set ? "set" : "get"));
return rc;
}
@@ -231,8 +231,8 @@ static int chd_dec_api_cmd(struct crystalhd_adp *adp, unsigned long ua,
uint32_t uid, uint32_t cmd, crystalhd_cmd_proc func)
{
int rc;
- crystalhd_ioctl_data *temp;
- BC_STATUS sts = BC_STS_SUCCESS;
+ struct crystalhd_ioctl_data *temp;
+ enum BC_STATUS sts = BC_STS_SUCCESS;
temp = chd_dec_alloc_iodata(adp, 0);
if (!temp) {
@@ -261,12 +261,12 @@ static int chd_dec_api_cmd(struct crystalhd_adp *adp, unsigned long ua,
}
/* API interfaces */
-static int chd_dec_ioctl(struct inode *in, struct file *fd,
- unsigned int cmd, unsigned long ua)
+static long chd_dec_ioctl(struct file *fd, unsigned int cmd, unsigned long ua)
{
struct crystalhd_adp *adp = chd_get_adp();
crystalhd_cmd_proc cproc;
struct crystalhd_user *uc;
+ int ret;
if (!adp || !fd) {
BCMLOG_ERR("Invalid adp\n");
@@ -279,20 +279,24 @@ static int chd_dec_ioctl(struct inode *in, struct file *fd,
return -ENODATA;
}
+ lock_kernel();
cproc = crystalhd_get_cmd_proc(&adp->cmds, cmd, uc);
if (!cproc) {
BCMLOG_ERR("Unhandled command: %d\n", cmd);
+ unlock_kernel();
return -EINVAL;
}
- return chd_dec_api_cmd(adp, ua, uc->uid, cmd, cproc);
+ ret = chd_dec_api_cmd(adp, ua, uc->uid, cmd, cproc);
+ unlock_kernel();
+ return ret;
}
static int chd_dec_open(struct inode *in, struct file *fd)
{
struct crystalhd_adp *adp = chd_get_adp();
int rc = 0;
- BC_STATUS sts = BC_STS_SUCCESS;
+ enum BC_STATUS sts = BC_STS_SUCCESS;
struct crystalhd_user *uc = NULL;
BCMLOG_ENTER;
@@ -308,7 +312,7 @@ static int chd_dec_open(struct inode *in, struct file *fd)
sts = crystalhd_user_open(&adp->cmds, &uc);
if (sts != BC_STS_SUCCESS) {
- BCMLOG_ERR("cmd_user_open - %d \n", sts);
+ BCMLOG_ERR("cmd_user_open - %d\n", sts);
rc = -EBUSY;
}
@@ -326,7 +330,7 @@ static int chd_dec_close(struct inode *in, struct file *fd)
BCMLOG_ENTER;
if (!adp) {
- BCMLOG_ERR("Invalid adp \n");
+ BCMLOG_ERR("Invalid adp\n");
return -EINVAL;
}
@@ -345,14 +349,14 @@ static int chd_dec_close(struct inode *in, struct file *fd)
static const struct file_operations chd_dec_fops = {
.owner = THIS_MODULE,
- .ioctl = chd_dec_ioctl,
+ .unlocked_ioctl = chd_dec_ioctl,
.open = chd_dec_open,
.release = chd_dec_close,
};
static int __devinit chd_dec_init_chdev(struct crystalhd_adp *adp)
{
- crystalhd_ioctl_data *temp;
+ struct crystalhd_ioctl_data *temp;
struct device *dev;
int rc = -ENODEV, i = 0;
@@ -376,7 +380,7 @@ static int __devinit chd_dec_init_chdev(struct crystalhd_adp *adp)
dev = device_create(crystalhd_class, NULL, MKDEV(adp->chd_dec_major, 0),
NULL, "crystalhd");
- if (!dev) {
+ if (IS_ERR(dev)) {
BCMLOG_ERR("failed to create device\n");
goto device_create_fail;
}
@@ -390,7 +394,7 @@ static int __devinit chd_dec_init_chdev(struct crystalhd_adp *adp)
/* Allocate general purpose ioctl pool. */
for (i = 0; i < CHD_IODATA_POOL_SZ; i++) {
/* FIXME: jarod: why atomic? */
- temp = kzalloc(sizeof(crystalhd_ioctl_data), GFP_ATOMIC);
+ temp = kzalloc(sizeof(struct crystalhd_ioctl_data), GFP_ATOMIC);
if (!temp) {
BCMLOG_ERR("ioctl data pool kzalloc failed\n");
rc = -ENOMEM;
@@ -414,7 +418,7 @@ fail:
static void __devexit chd_dec_release_chdev(struct crystalhd_adp *adp)
{
- crystalhd_ioctl_data *temp = NULL;
+ struct crystalhd_ioctl_data *temp = NULL;
if (!adp)
return;
@@ -509,7 +513,7 @@ static void __devexit chd_pci_release_mem(struct crystalhd_adp *pinfo)
static void __devexit chd_dec_pci_remove(struct pci_dev *pdev)
{
struct crystalhd_adp *pinfo;
- BC_STATUS sts = BC_STS_SUCCESS;
+ enum BC_STATUS sts = BC_STS_SUCCESS;
BCMLOG_ENTER;
@@ -521,7 +525,7 @@ static void __devexit chd_dec_pci_remove(struct pci_dev *pdev)
sts = crystalhd_delete_cmd_context(&pinfo->cmds);
if (sts != BC_STS_SUCCESS)
- BCMLOG_ERR("cmd delete :%d \n", sts);
+ BCMLOG_ERR("cmd delete :%d\n", sts);
chd_dec_release_chdev(pinfo);
@@ -539,7 +543,7 @@ static int __devinit chd_dec_pci_probe(struct pci_dev *pdev,
{
struct crystalhd_adp *pinfo;
int rc;
- BC_STATUS sts = BC_STS_SUCCESS;
+ enum BC_STATUS sts = BC_STS_SUCCESS;
BCMLOG(BCMLOG_DBG, "PCI_INFO: Vendor:0x%04x Device:0x%04x "
"s_vendor:0x%04x s_device: 0x%04x\n",
@@ -581,7 +585,7 @@ static int __devinit chd_dec_pci_probe(struct pci_dev *pdev,
chd_dec_init_chdev(pinfo);
rc = chd_dec_enable_int(pinfo);
if (rc) {
- BCMLOG_ERR("_enable_int err:%d \n", rc);
+ BCMLOG_ERR("_enable_int err:%d\n", rc);
pci_disable_device(pdev);
return -ENODEV;
}
@@ -601,7 +605,7 @@ static int __devinit chd_dec_pci_probe(struct pci_dev *pdev,
sts = crystalhd_setup_cmd_context(&pinfo->cmds, pinfo);
if (sts != BC_STS_SUCCESS) {
- BCMLOG_ERR("cmd setup :%d \n", sts);
+ BCMLOG_ERR("cmd setup :%d\n", sts);
pci_disable_device(pdev);
return -ENODEV;
}
@@ -619,8 +623,8 @@ static int __devinit chd_dec_pci_probe(struct pci_dev *pdev,
int chd_dec_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct crystalhd_adp *adp;
- crystalhd_ioctl_data *temp;
- BC_STATUS sts = BC_STS_SUCCESS;
+ struct crystalhd_ioctl_data *temp;
+ enum BC_STATUS sts = BC_STS_SUCCESS;
adp = (struct crystalhd_adp *)pci_get_drvdata(pdev);
if (!adp) {
@@ -653,7 +657,7 @@ int chd_dec_pci_suspend(struct pci_dev *pdev, pm_message_t state)
int chd_dec_pci_resume(struct pci_dev *pdev)
{
struct crystalhd_adp *adp;
- BC_STATUS sts = BC_STS_SUCCESS;
+ enum BC_STATUS sts = BC_STS_SUCCESS;
int rc;
adp = (struct crystalhd_adp *)pci_get_drvdata(pdev);
@@ -675,7 +679,7 @@ int chd_dec_pci_resume(struct pci_dev *pdev)
rc = chd_dec_enable_int(adp);
if (rc) {
- BCMLOG_ERR("_enable_int err:%d \n", rc);
+ BCMLOG_ERR("_enable_int err:%d\n", rc);
pci_disable_device(pdev);
return -ENODEV;
}
@@ -738,13 +742,13 @@ static int __init chd_dec_module_init(void)
int rc;
chd_set_log_level(NULL, "debug");
- BCMLOG(BCMLOG_DATA, "Loading crystalhd %d.%d.%d \n",
+ BCMLOG(BCMLOG_DATA, "Loading crystalhd %d.%d.%d\n",
crystalhd_kmod_major, crystalhd_kmod_minor, crystalhd_kmod_rev);
rc = pci_register_driver(&bc_chd_70012_driver);
if (rc < 0)
- BCMLOG_ERR("Could not find any devices. err:%d \n", rc);
+ BCMLOG_ERR("Could not find any devices. err:%d\n", rc);
return rc;
}
@@ -752,7 +756,7 @@ module_init(chd_dec_module_init);
static void __exit chd_dec_module_cleanup(void)
{
- BCMLOG(BCMLOG_DATA, "unloading crystalhd %d.%d.%d \n",
+ BCMLOG(BCMLOG_DATA, "unloading crystalhd %d.%d.%d\n",
crystalhd_kmod_major, crystalhd_kmod_minor, crystalhd_kmod_rev);
pci_unregister_driver(&bc_chd_70012_driver);
diff --git a/drivers/staging/crystalhd/crystalhd_lnx.h b/drivers/staging/crystalhd/crystalhd_lnx.h
index d338ae97a4c..c951e43cbb3 100644
--- a/drivers/staging/crystalhd/crystalhd_lnx.h
+++ b/drivers/staging/crystalhd/crystalhd_lnx.h
@@ -42,11 +42,11 @@
#include <linux/pagemap.h>
#include <linux/vmalloc.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/irq.h>
#include <asm/pgtable.h>
#include <asm/system.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "crystalhd_cmds.h"
@@ -79,12 +79,12 @@ struct crystalhd_adp {
unsigned int chd_dec_major;
unsigned int cfg_users;
- crystalhd_ioctl_data *idata_free_head; /* ioctl data pool */
- crystalhd_elem_t *elem_pool_head; /* Queue element pool */
+ struct crystalhd_ioctl_data *idata_free_head; /* ioctl data pool */
+ struct crystalhd_elem *elem_pool_head; /* Queue element pool */
struct crystalhd_cmd cmds;
- crystalhd_dio_req *ua_map_free_head;
+ struct crystalhd_dio_req *ua_map_free_head;
struct pci_pool *fill_byte_pool;
};
diff --git a/drivers/staging/crystalhd/crystalhd_misc.c b/drivers/staging/crystalhd/crystalhd_misc.c
index 73593b078b3..2c5138e4e1b 100644
--- a/drivers/staging/crystalhd/crystalhd_misc.c
+++ b/drivers/staging/crystalhd/crystalhd_misc.c
@@ -43,15 +43,15 @@ static inline void crystalhd_dram_wr(struct crystalhd_adp *adp, uint32_t mem_off
bc_dec_reg_wr(adp, (0x00380000 | (mem_off & 0x0007FFFF)), val);
}
-static inline BC_STATUS bc_chk_dram_range(struct crystalhd_adp *adp, uint32_t start_off, uint32_t cnt)
+static inline enum BC_STATUS bc_chk_dram_range(struct crystalhd_adp *adp, uint32_t start_off, uint32_t cnt)
{
return BC_STS_SUCCESS;
}
-static crystalhd_dio_req *crystalhd_alloc_dio(struct crystalhd_adp *adp)
+static struct crystalhd_dio_req *crystalhd_alloc_dio(struct crystalhd_adp *adp)
{
unsigned long flags = 0;
- crystalhd_dio_req *temp = NULL;
+ struct crystalhd_dio_req *temp = NULL;
if (!adp) {
BCMLOG_ERR("Invalid Arg!!\n");
@@ -67,7 +67,7 @@ static crystalhd_dio_req *crystalhd_alloc_dio(struct crystalhd_adp *adp)
return temp;
}
-static void crystalhd_free_dio(struct crystalhd_adp *adp, crystalhd_dio_req *dio)
+static void crystalhd_free_dio(struct crystalhd_adp *adp, struct crystalhd_dio_req *dio)
{
unsigned long flags = 0;
@@ -83,10 +83,10 @@ static void crystalhd_free_dio(struct crystalhd_adp *adp, crystalhd_dio_req *dio
spin_unlock_irqrestore(&adp->lock, flags);
}
-static crystalhd_elem_t *crystalhd_alloc_elem(struct crystalhd_adp *adp)
+static struct crystalhd_elem *crystalhd_alloc_elem(struct crystalhd_adp *adp)
{
unsigned long flags = 0;
- crystalhd_elem_t *temp = NULL;
+ struct crystalhd_elem *temp = NULL;
if (!adp)
return temp;
@@ -100,7 +100,7 @@ static crystalhd_elem_t *crystalhd_alloc_elem(struct crystalhd_adp *adp)
return temp;
}
-static void crystalhd_free_elem(struct crystalhd_adp *adp, crystalhd_elem_t *elem)
+static void crystalhd_free_elem(struct crystalhd_adp *adp, struct crystalhd_elem *elem)
{
unsigned long flags = 0;
@@ -230,14 +230,14 @@ void crystalhd_reg_wr(struct crystalhd_adp *adp, uint32_t reg_off, uint32_t val)
*
* 7412's Dram read routine.
*/
-BC_STATUS crystalhd_mem_rd(struct crystalhd_adp *adp, uint32_t start_off,
+enum BC_STATUS crystalhd_mem_rd(struct crystalhd_adp *adp, uint32_t start_off,
uint32_t dw_cnt, uint32_t *rd_buff)
{
uint32_t ix = 0;
if (!adp || !rd_buff ||
(bc_chk_dram_range(adp, start_off, dw_cnt) != BC_STS_SUCCESS)) {
- BCMLOG_ERR("Invalid arg \n");
+ BCMLOG_ERR("Invalid arg\n");
return BC_STS_INV_ARG;
}
for (ix = 0; ix < dw_cnt; ix++)
@@ -258,14 +258,14 @@ BC_STATUS crystalhd_mem_rd(struct crystalhd_adp *adp, uint32_t start_off,
*
* 7412's Dram write routine.
*/
-BC_STATUS crystalhd_mem_wr(struct crystalhd_adp *adp, uint32_t start_off,
+enum BC_STATUS crystalhd_mem_wr(struct crystalhd_adp *adp, uint32_t start_off,
uint32_t dw_cnt, uint32_t *wr_buff)
{
uint32_t ix = 0;
if (!adp || !wr_buff ||
(bc_chk_dram_range(adp, start_off, dw_cnt) != BC_STS_SUCCESS)) {
- BCMLOG_ERR("Invalid arg \n");
+ BCMLOG_ERR("Invalid arg\n");
return BC_STS_INV_ARG;
}
@@ -286,14 +286,14 @@ BC_STATUS crystalhd_mem_wr(struct crystalhd_adp *adp, uint32_t start_off,
*
* Get value from Link's PCIe config space.
*/
-BC_STATUS crystalhd_pci_cfg_rd(struct crystalhd_adp *adp, uint32_t off,
+enum BC_STATUS crystalhd_pci_cfg_rd(struct crystalhd_adp *adp, uint32_t off,
uint32_t len, uint32_t *val)
{
- BC_STATUS sts = BC_STS_SUCCESS;
+ enum BC_STATUS sts = BC_STS_SUCCESS;
int rc = 0;
if (!adp || !val) {
- BCMLOG_ERR("Invalid arg \n");
+ BCMLOG_ERR("Invalid arg\n");
return BC_STS_INV_ARG;
}
@@ -331,14 +331,14 @@ BC_STATUS crystalhd_pci_cfg_rd(struct crystalhd_adp *adp, uint32_t off,
*
* Set value to Link's PCIe config space.
*/
-BC_STATUS crystalhd_pci_cfg_wr(struct crystalhd_adp *adp, uint32_t off,
+enum BC_STATUS crystalhd_pci_cfg_wr(struct crystalhd_adp *adp, uint32_t off,
uint32_t len, uint32_t val)
{
- BC_STATUS sts = BC_STS_SUCCESS;
+ enum BC_STATUS sts = BC_STS_SUCCESS;
int rc = 0;
if (!adp || !val) {
- BCMLOG_ERR("Invalid arg \n");
+ BCMLOG_ERR("Invalid arg\n");
return BC_STS_INV_ARG;
}
@@ -429,11 +429,11 @@ void bc_kern_dma_free(struct crystalhd_adp *adp, uint32_t sz, void *ka,
* Initialize Generic DIO queue to hold any data. Callback
* will be used to free elements while deleting the queue.
*/
-BC_STATUS crystalhd_create_dioq(struct crystalhd_adp *adp,
- crystalhd_dioq_t **dioq_hnd,
+enum BC_STATUS crystalhd_create_dioq(struct crystalhd_adp *adp,
+ struct crystalhd_dioq **dioq_hnd,
crystalhd_data_free_cb cb, void *cbctx)
{
- crystalhd_dioq_t *dioq = NULL;
+ struct crystalhd_dioq *dioq = NULL;
if (!adp || !dioq_hnd) {
BCMLOG_ERR("Invalid arg!!\n");
@@ -446,8 +446,8 @@ BC_STATUS crystalhd_create_dioq(struct crystalhd_adp *adp,
spin_lock_init(&dioq->lock);
dioq->sig = BC_LINK_DIOQ_SIG;
- dioq->head = (crystalhd_elem_t *)&dioq->head;
- dioq->tail = (crystalhd_elem_t *)&dioq->head;
+ dioq->head = (struct crystalhd_elem *)&dioq->head;
+ dioq->tail = (struct crystalhd_elem *)&dioq->head;
crystalhd_create_event(&dioq->event);
dioq->adp = adp;
dioq->data_rel_cb = cb;
@@ -470,7 +470,7 @@ BC_STATUS crystalhd_create_dioq(struct crystalhd_adp *adp,
* by calling the call back provided during creation.
*
*/
-void crystalhd_delete_dioq(struct crystalhd_adp *adp, crystalhd_dioq_t *dioq)
+void crystalhd_delete_dioq(struct crystalhd_adp *adp, struct crystalhd_dioq *dioq)
{
void *temp;
@@ -498,11 +498,11 @@ void crystalhd_delete_dioq(struct crystalhd_adp *adp, crystalhd_dioq_t *dioq)
*
* Insert new element to Q tail.
*/
-BC_STATUS crystalhd_dioq_add(crystalhd_dioq_t *ioq, void *data,
+enum BC_STATUS crystalhd_dioq_add(struct crystalhd_dioq *ioq, void *data,
bool wake, uint32_t tag)
{
unsigned long flags = 0;
- crystalhd_elem_t *tmp;
+ struct crystalhd_elem *tmp;
if (!ioq || (ioq->sig != BC_LINK_DIOQ_SIG) || !data) {
BCMLOG_ERR("Invalid arg!!\n");
@@ -518,7 +518,7 @@ BC_STATUS crystalhd_dioq_add(crystalhd_dioq_t *ioq, void *data,
tmp->data = data;
tmp->tag = tag;
spin_lock_irqsave(&ioq->lock, flags);
- tmp->flink = (crystalhd_elem_t *)&ioq->head;
+ tmp->flink = (struct crystalhd_elem *)&ioq->head;
tmp->blink = ioq->tail;
tmp->flink->blink = tmp;
tmp->blink->flink = tmp;
@@ -540,11 +540,11 @@ BC_STATUS crystalhd_dioq_add(crystalhd_dioq_t *ioq, void *data,
*
* Remove an element from Queue.
*/
-void *crystalhd_dioq_fetch(crystalhd_dioq_t *ioq)
+void *crystalhd_dioq_fetch(struct crystalhd_dioq *ioq)
{
unsigned long flags = 0;
- crystalhd_elem_t *tmp;
- crystalhd_elem_t *ret = NULL;
+ struct crystalhd_elem *tmp;
+ struct crystalhd_elem *ret = NULL;
void *data = NULL;
if (!ioq || (ioq->sig != BC_LINK_DIOQ_SIG)) {
@@ -554,7 +554,7 @@ void *crystalhd_dioq_fetch(crystalhd_dioq_t *ioq)
spin_lock_irqsave(&ioq->lock, flags);
tmp = ioq->head;
- if (tmp != (crystalhd_elem_t *)&ioq->head) {
+ if (tmp != (struct crystalhd_elem *)&ioq->head) {
ret = tmp;
tmp->flink->blink = tmp->blink;
tmp->blink->flink = tmp->flink;
@@ -578,11 +578,11 @@ void *crystalhd_dioq_fetch(crystalhd_dioq_t *ioq)
*
* Search TAG and remove the element.
*/
-void *crystalhd_dioq_find_and_fetch(crystalhd_dioq_t *ioq, uint32_t tag)
+void *crystalhd_dioq_find_and_fetch(struct crystalhd_dioq *ioq, uint32_t tag)
{
unsigned long flags = 0;
- crystalhd_elem_t *tmp;
- crystalhd_elem_t *ret = NULL;
+ struct crystalhd_elem *tmp;
+ struct crystalhd_elem *ret = NULL;
void *data = NULL;
if (!ioq || (ioq->sig != BC_LINK_DIOQ_SIG)) {
@@ -592,7 +592,7 @@ void *crystalhd_dioq_find_and_fetch(crystalhd_dioq_t *ioq, uint32_t tag)
spin_lock_irqsave(&ioq->lock, flags);
tmp = ioq->head;
- while (tmp != (crystalhd_elem_t *)&ioq->head) {
+ while (tmp != (struct crystalhd_elem *)&ioq->head) {
if (tmp->tag == tag) {
ret = tmp;
tmp->flink->blink = tmp->blink;
@@ -623,7 +623,7 @@ void *crystalhd_dioq_find_and_fetch(crystalhd_dioq_t *ioq, uint32_t tag)
* Return element from head if Q is not empty. Wait for new element
* if Q is empty for Timeout seconds.
*/
-void *crystalhd_dioq_fetch_wait(crystalhd_dioq_t *ioq, uint32_t to_secs,
+void *crystalhd_dioq_fetch_wait(struct crystalhd_dioq *ioq, uint32_t to_secs,
uint32_t *sig_pend)
{
unsigned long flags = 0;
@@ -673,19 +673,19 @@ out:
* This routine maps user address and lock pages for DMA.
*
*/
-BC_STATUS crystalhd_map_dio(struct crystalhd_adp *adp, void *ubuff,
+enum BC_STATUS crystalhd_map_dio(struct crystalhd_adp *adp, void *ubuff,
uint32_t ubuff_sz, uint32_t uv_offset,
bool en_422mode, bool dir_tx,
- crystalhd_dio_req **dio_hnd)
+ struct crystalhd_dio_req **dio_hnd)
{
- crystalhd_dio_req *dio;
+ struct crystalhd_dio_req *dio;
/* FIXME: jarod: should some of these unsigned longs be uint32_t or uintptr_t? */
unsigned long start = 0, end = 0, uaddr = 0, count = 0;
unsigned long spsz = 0, uv_start = 0;
int i = 0, rw = 0, res = 0, nr_pages = 0, skip_fb_sg = 0;
if (!adp || !ubuff || !ubuff_sz || !dio_hnd) {
- BCMLOG_ERR("Invalid arg \n");
+ BCMLOG_ERR("Invalid arg\n");
return BC_STS_INV_ARG;
}
/* Compute pages */
@@ -791,7 +791,7 @@ BC_STATUS crystalhd_map_dio(struct crystalhd_adp *adp, void *ubuff,
dio->sg_cnt = pci_map_sg(adp->pdev, dio->sg,
dio->page_cnt, dio->direction);
if (dio->sg_cnt <= 0) {
- BCMLOG_ERR("sg map %d-%d \n", dio->sg_cnt, dio->page_cnt);
+ BCMLOG_ERR("sg map %d-%d\n", dio->sg_cnt, dio->page_cnt);
crystalhd_unmap_dio(adp, dio);
return BC_STS_ERROR;
}
@@ -820,13 +820,13 @@ BC_STATUS crystalhd_map_dio(struct crystalhd_adp *adp, void *ubuff,
*
* This routine is to unmap the user buffer pages.
*/
-BC_STATUS crystalhd_unmap_dio(struct crystalhd_adp *adp, crystalhd_dio_req *dio)
+enum BC_STATUS crystalhd_unmap_dio(struct crystalhd_adp *adp, struct crystalhd_dio_req *dio)
{
struct page *page = NULL;
int j = 0;
if (!adp || !dio) {
- BCMLOG_ERR("Invalid arg \n");
+ BCMLOG_ERR("Invalid arg\n");
return BC_STS_INV_ARG;
}
@@ -864,7 +864,7 @@ int crystalhd_create_dio_pool(struct crystalhd_adp *adp, uint32_t max_pages)
{
uint32_t asz = 0, i = 0;
uint8_t *temp;
- crystalhd_dio_req *dio;
+ struct crystalhd_dio_req *dio;
if (!adp || !max_pages) {
BCMLOG_ERR("Invalid Arg!!\n");
@@ -887,13 +887,13 @@ int crystalhd_create_dio_pool(struct crystalhd_adp *adp, uint32_t max_pages)
BC_LINK_SG_POOL_SZ, max_pages, asz, adp->fill_byte_pool);
for (i = 0; i < BC_LINK_SG_POOL_SZ; i++) {
- temp = (uint8_t *)kzalloc(asz, GFP_KERNEL);
+ temp = kzalloc(asz, GFP_KERNEL);
if ((temp) == NULL) {
BCMLOG_ERR("Failed to alloc %d mem\n", asz);
return -ENOMEM;
}
- dio = (crystalhd_dio_req *)temp;
+ dio = (struct crystalhd_dio_req *)temp;
temp += sizeof(*dio);
dio->pages = (struct page **)temp;
temp += (sizeof(*dio->pages) * max_pages);
@@ -923,7 +923,7 @@ int crystalhd_create_dio_pool(struct crystalhd_adp *adp, uint32_t max_pages)
*/
void crystalhd_destroy_dio_pool(struct crystalhd_adp *adp)
{
- crystalhd_dio_req *dio;
+ struct crystalhd_dio_req *dio;
int count = 0;
if (!adp) {
@@ -947,7 +947,7 @@ void crystalhd_destroy_dio_pool(struct crystalhd_adp *adp)
adp->fill_byte_pool = NULL;
}
- BCMLOG(BCMLOG_DBG, "Released dio pool %d \n", count);
+ BCMLOG(BCMLOG_DBG, "Released dio pool %d\n", count);
}
/**
@@ -965,7 +965,7 @@ int __devinit crystalhd_create_elem_pool(struct crystalhd_adp *adp,
uint32_t pool_size)
{
uint32_t i;
- crystalhd_elem_t *temp;
+ struct crystalhd_elem *temp;
if (!adp || !pool_size)
return -EINVAL;
@@ -973,7 +973,7 @@ int __devinit crystalhd_create_elem_pool(struct crystalhd_adp *adp,
for (i = 0; i < pool_size; i++) {
temp = kzalloc(sizeof(*temp), GFP_KERNEL);
if (!temp) {
- BCMLOG_ERR("kalloc failed \n");
+ BCMLOG_ERR("kalloc failed\n");
return -ENOMEM;
}
crystalhd_free_elem(adp, temp);
@@ -993,7 +993,7 @@ int __devinit crystalhd_create_elem_pool(struct crystalhd_adp *adp,
*/
void crystalhd_delete_elem_pool(struct crystalhd_adp *adp)
{
- crystalhd_elem_t *temp;
+ struct crystalhd_elem *temp;
int dbg_cnt = 0;
if (!adp)
diff --git a/drivers/staging/crystalhd/crystalhd_misc.h b/drivers/staging/crystalhd/crystalhd_misc.h
index a2aa6ad7fc8..382078eafa0 100644
--- a/drivers/staging/crystalhd/crystalhd_misc.h
+++ b/drivers/staging/crystalhd/crystalhd_misc.h
@@ -34,7 +34,6 @@
#include <linux/string.h>
#include <linux/ioctl.h>
#include <linux/dma-mapping.h>
-#include <linux/version.h>
#include <linux/sched.h>
#include <asm/system.h>
#include "bc_dts_glob_lnx.h"
@@ -55,7 +54,7 @@ extern uint32_t g_linklog_level;
/* Scatter Gather memory pool size for Tx and Rx */
#define BC_LINK_SG_POOL_SZ (BC_TX_LIST_CNT + BC_RX_LIST_CNT)
-enum _crystalhd_dio_sig {
+enum crystalhd_dio_sig {
crystalhd_dio_inv = 0,
crystalhd_dio_locked,
crystalhd_dio_sg_mapped,
@@ -77,7 +76,7 @@ struct crystalhd_dio_user_info {
bool b422mode;
};
-typedef struct _crystalhd_dio_req {
+struct crystalhd_dio_req {
uint32_t sig;
uint32_t max_pages;
struct page **pages;
@@ -89,34 +88,34 @@ typedef struct _crystalhd_dio_req {
void *fb_va;
uint32_t fb_size;
dma_addr_t fb_pa;
- struct _crystalhd_dio_req *next;
-} crystalhd_dio_req;
+ struct crystalhd_dio_req *next;
+};
#define BC_LINK_DIOQ_SIG (0x09223280)
-typedef struct _crystalhd_elem_s {
- struct _crystalhd_elem_s *flink;
- struct _crystalhd_elem_s *blink;
+struct crystalhd_elem {
+ struct crystalhd_elem *flink;
+ struct crystalhd_elem *blink;
void *data;
uint32_t tag;
-} crystalhd_elem_t;
+};
typedef void (*crystalhd_data_free_cb)(void *context, void *data);
-typedef struct _crystalhd_dioq_s {
+struct crystalhd_dioq {
uint32_t sig;
struct crystalhd_adp *adp;
- crystalhd_elem_t *head;
- crystalhd_elem_t *tail;
+ struct crystalhd_elem *head;
+ struct crystalhd_elem *tail;
uint32_t count;
spinlock_t lock;
wait_queue_head_t event;
crystalhd_data_free_cb data_rel_cb;
void *cb_context;
-} crystalhd_dioq_t;
+};
-typedef void (*hw_comp_callback)(crystalhd_dio_req *,
- wait_queue_head_t *event, BC_STATUS sts);
+typedef void (*hw_comp_callback)(struct crystalhd_dio_req *,
+ wait_queue_head_t *event, enum BC_STATUS sts);
/*========= Decoder (7412) register access routines.================= */
uint32_t bc_dec_reg_rd(struct crystalhd_adp *, uint32_t);
@@ -127,12 +126,12 @@ uint32_t crystalhd_reg_rd(struct crystalhd_adp *, uint32_t);
void crystalhd_reg_wr(struct crystalhd_adp *, uint32_t, uint32_t);
/*========= Decoder (7412) memory access routines..=================*/
-BC_STATUS crystalhd_mem_rd(struct crystalhd_adp *, uint32_t, uint32_t, uint32_t *);
-BC_STATUS crystalhd_mem_wr(struct crystalhd_adp *, uint32_t, uint32_t, uint32_t *);
+enum BC_STATUS crystalhd_mem_rd(struct crystalhd_adp *, uint32_t, uint32_t, uint32_t *);
+enum BC_STATUS crystalhd_mem_wr(struct crystalhd_adp *, uint32_t, uint32_t, uint32_t *);
/*==========Link (70012) PCIe Config access routines.================*/
-BC_STATUS crystalhd_pci_cfg_rd(struct crystalhd_adp *, uint32_t, uint32_t, uint32_t *);
-BC_STATUS crystalhd_pci_cfg_wr(struct crystalhd_adp *, uint32_t, uint32_t, uint32_t);
+enum BC_STATUS crystalhd_pci_cfg_rd(struct crystalhd_adp *, uint32_t, uint32_t, uint32_t *);
+enum BC_STATUS crystalhd_pci_cfg_wr(struct crystalhd_adp *, uint32_t, uint32_t, uint32_t);
/*========= Linux Kernel Interface routines. ======================= */
void *bc_kern_dma_alloc(struct crystalhd_adp *, uint32_t, dma_addr_t *);
@@ -168,20 +167,20 @@ do { \
/*================ Direct IO mapping routines ==================*/
extern int crystalhd_create_dio_pool(struct crystalhd_adp *, uint32_t);
extern void crystalhd_destroy_dio_pool(struct crystalhd_adp *);
-extern BC_STATUS crystalhd_map_dio(struct crystalhd_adp *, void *, uint32_t,
- uint32_t, bool, bool, crystalhd_dio_req**);
+extern enum BC_STATUS crystalhd_map_dio(struct crystalhd_adp *, void *, uint32_t,
+ uint32_t, bool, bool, struct crystalhd_dio_req**);
-extern BC_STATUS crystalhd_unmap_dio(struct crystalhd_adp *, crystalhd_dio_req*);
+extern enum BC_STATUS crystalhd_unmap_dio(struct crystalhd_adp *, struct crystalhd_dio_req*);
#define crystalhd_get_sgle_paddr(_dio, _ix) (cpu_to_le64(sg_dma_address(&_dio->sg[_ix])))
#define crystalhd_get_sgle_len(_dio, _ix) (cpu_to_le32(sg_dma_len(&_dio->sg[_ix])))
/*================ General Purpose Queues ==================*/
-extern BC_STATUS crystalhd_create_dioq(struct crystalhd_adp *, crystalhd_dioq_t **, crystalhd_data_free_cb , void *);
-extern void crystalhd_delete_dioq(struct crystalhd_adp *, crystalhd_dioq_t *);
-extern BC_STATUS crystalhd_dioq_add(crystalhd_dioq_t *ioq, void *data, bool wake, uint32_t tag);
-extern void *crystalhd_dioq_fetch(crystalhd_dioq_t *ioq);
-extern void *crystalhd_dioq_find_and_fetch(crystalhd_dioq_t *ioq, uint32_t tag);
-extern void *crystalhd_dioq_fetch_wait(crystalhd_dioq_t *ioq, uint32_t to_secs, uint32_t *sig_pend);
+extern enum BC_STATUS crystalhd_create_dioq(struct crystalhd_adp *, struct crystalhd_dioq **, crystalhd_data_free_cb , void *);
+extern void crystalhd_delete_dioq(struct crystalhd_adp *, struct crystalhd_dioq *);
+extern enum BC_STATUS crystalhd_dioq_add(struct crystalhd_dioq *ioq, void *data, bool wake, uint32_t tag);
+extern void *crystalhd_dioq_fetch(struct crystalhd_dioq *ioq);
+extern void *crystalhd_dioq_find_and_fetch(struct crystalhd_dioq *ioq, uint32_t tag);
+extern void *crystalhd_dioq_fetch_wait(struct crystalhd_dioq *ioq, uint32_t to_secs, uint32_t *sig_pend);
#define crystalhd_dioq_count(_ioq) ((_ioq) ? _ioq->count : 0)
diff --git a/drivers/staging/cx25821/cx25821-alsa.c b/drivers/staging/cx25821/cx25821-alsa.c
index 061add30ba8..1798975a69b 100644
--- a/drivers/staging/cx25821/cx25821-alsa.c
+++ b/drivers/staging/cx25821/cx25821-alsa.c
@@ -29,7 +29,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
-#include <asm/delay.h>
+#include <linux/delay.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -42,10 +42,10 @@
#define AUDIO_SRAM_CHANNEL SRAM_CH08
-#define dprintk(level,fmt, arg...) if (debug >= level) \
+#define dprintk(level, fmt, arg...) if (debug >= level) \
printk(KERN_INFO "%s/1: " fmt, chip->dev->name , ## arg)
-#define dprintk_core(level,fmt, arg...) if (debug >= level) \
+#define dprintk_core(level, fmt, arg...) if (debug >= level) \
printk(KERN_DEBUG "%s/1: " fmt, chip->dev->name , ## arg)
/****************************************************************************
@@ -81,7 +81,6 @@ struct cx25821_audio_dev {
struct snd_pcm_substream *substream;
};
-typedef struct cx25821_audio_dev snd_cx25821_card_t;
/****************************************************************************
@@ -90,7 +89,7 @@ typedef struct cx25821_audio_dev snd_cx25821_card_t;
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-static int enable[SNDRV_CARDS] = { 1,[1 ... (SNDRV_CARDS - 1)] = 1 };
+static int enable[SNDRV_CARDS] = { 1, [1 ... (SNDRV_CARDS - 1)] = 1 };
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable cx25821 soundcard. default enabled.");
@@ -105,7 +104,7 @@ MODULE_PARM_DESC(index, "Index value for cx25821 capture interface(s).");
MODULE_DESCRIPTION("ALSA driver module for cx25821 based capture cards");
MODULE_AUTHOR("Hiep Huynh");
MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Conexant,25821}"); //"{{Conexant,23881},"
+MODULE_SUPPORTED_DEVICE("{{Conexant,25821}"); /* "{{Conexant,23881}," */
static unsigned int debug;
module_param(debug, int, 0644);
@@ -135,7 +134,7 @@ MODULE_PARM_DESC(debug, "enable debug messages");
* BOARD Specific: Sets audio DMA
*/
-static int _cx25821_start_audio_dma(snd_cx25821_card_t * chip)
+static int _cx25821_start_audio_dma(struct cx25821_audio_dev *chip)
{
struct cx25821_buffer *buf = chip->buf;
struct cx25821_dev *dev = chip->dev;
@@ -143,7 +142,7 @@ static int _cx25821_start_audio_dma(snd_cx25821_card_t * chip)
&cx25821_sram_channels[AUDIO_SRAM_CHANNEL];
u32 tmp = 0;
- // enable output on the GPIO 0 for the MCLK ADC (Audio)
+ /* enable output on the GPIO 0 for the MCLK ADC (Audio) */
cx25821_set_gpiopin_direction(chip->dev, 0, 0);
/* Make sure RISC/FIFO are off before changing FIFO/RISC settings */
@@ -158,18 +157,23 @@ static int _cx25821_start_audio_dma(snd_cx25821_card_t * chip)
cx_write(AUD_A_LNGTH, buf->bpl);
/* reset counter */
- cx_write(AUD_A_GPCNT_CTL, GP_COUNT_CONTROL_RESET); //GP_COUNT_CONTROL_RESET = 0x3
+ /* GP_COUNT_CONTROL_RESET = 0x3 */
+ cx_write(AUD_A_GPCNT_CTL, GP_COUNT_CONTROL_RESET);
atomic_set(&chip->count, 0);
- //Set the input mode to 16-bit
+ /* Set the input mode to 16-bit */
tmp = cx_read(AUD_A_CFG);
cx_write(AUD_A_CFG,
tmp | FLD_AUD_DST_PK_MODE | FLD_AUD_DST_ENABLE |
FLD_AUD_CLK_ENABLE);
- //printk(KERN_INFO "DEBUG: Start audio DMA, %d B/line, cmds_start(0x%x)= %d lines/FIFO, %d periods, %d "
- // "byte buffer\n", buf->bpl, audio_ch->cmds_start, cx_read(audio_ch->cmds_start + 12)>>1,
- // chip->num_periods, buf->bpl * chip->num_periods);
+ /* printk(KERN_INFO "DEBUG: Start audio DMA, %d B/line,"
+ "cmds_start(0x%x)= %d lines/FIFO, %d periods, "
+ "%d byte buffer\n", buf->bpl,
+ audio_ch->cmds_start,
+ cx_read(audio_ch->cmds_start + 12)>>1,
+ chip->num_periods, buf->bpl *chip->num_periods);
+ */
/* Enables corresponding bits at AUD_INT_STAT */
cx_write(AUD_A_INT_MSK,
@@ -182,7 +186,7 @@ static int _cx25821_start_audio_dma(snd_cx25821_card_t * chip)
/* enable audio irqs */
cx_set(PCI_INT_MSK, chip->dev->pci_irqmask | PCI_MSK_AUD_INT);
- // Turn on audio downstream fifo and risc enable 0x101
+ /* Turn on audio downstream fifo and risc enable 0x101 */
tmp = cx_read(AUD_INT_DMA_CTL);
cx_set(AUD_INT_DMA_CTL,
tmp | (FLD_AUD_DST_A_RISC_EN | FLD_AUD_DST_A_FIFO_EN));
@@ -194,7 +198,7 @@ static int _cx25821_start_audio_dma(snd_cx25821_card_t * chip)
/*
* BOARD Specific: Resets audio DMA
*/
-static int _cx25821_stop_audio_dma(snd_cx25821_card_t * chip)
+static int _cx25821_stop_audio_dma(struct cx25821_audio_dev *chip)
{
struct cx25821_dev *dev = chip->dev;
@@ -232,13 +236,13 @@ static char *cx25821_aud_irqs[32] = {
/*
* BOARD Specific: Threats IRQ audio specific calls
*/
-static void cx25821_aud_irq(snd_cx25821_card_t * chip, u32 status, u32 mask)
+static void cx25821_aud_irq(struct cx25821_audio_dev *chip, u32 status,
+ u32 mask)
{
struct cx25821_dev *dev = chip->dev;
- if (0 == (status & mask)) {
+ if (0 == (status & mask))
return;
- }
cx_write(AUD_A_INT_STAT, status);
if (debug > 1 || (status & mask & ~0xff))
@@ -277,7 +281,7 @@ static void cx25821_aud_irq(snd_cx25821_card_t * chip, u32 status, u32 mask)
*/
static irqreturn_t cx25821_irq(int irq, void *dev_id)
{
- snd_cx25821_card_t *chip = dev_id;
+ struct cx25821_audio_dev *chip = dev_id;
struct cx25821_dev *dev = chip->dev;
u32 status, pci_status;
u32 audint_status, audint_mask;
@@ -318,11 +322,11 @@ static irqreturn_t cx25821_irq(int irq, void *dev_id)
if (handled)
cx_write(PCI_INT_STAT, pci_status);
- out:
+out:
return IRQ_RETVAL(handled);
}
-static int dsp_buffer_free(snd_cx25821_card_t * chip)
+static int dsp_buffer_free(struct cx25821_audio_dev *chip)
{
BUG_ON(!chip->dma_size);
@@ -363,7 +367,8 @@ static struct snd_pcm_hardware snd_cx25821_digital_hw = {
.period_bytes_max = DEFAULT_FIFO_SIZE / 3,
.periods_min = 1,
.periods_max = AUDIO_LINE_SIZE,
- .buffer_bytes_max = (AUDIO_LINE_SIZE * AUDIO_LINE_SIZE), //128*128 = 16384 = 1024 * 16
+ /* 128 * 128 = 16384 = 1024 * 16 */
+ .buffer_bytes_max = (AUDIO_LINE_SIZE * AUDIO_LINE_SIZE),
};
/*
@@ -371,7 +376,7 @@ static struct snd_pcm_hardware snd_cx25821_digital_hw = {
*/
static int snd_cx25821_pcm_open(struct snd_pcm_substream *substream)
{
- snd_cx25821_card_t *chip = snd_pcm_substream_chip(substream);
+ struct cx25821_audio_dev *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
unsigned int bpl = 0;
@@ -393,18 +398,19 @@ static int snd_cx25821_pcm_open(struct snd_pcm_substream *substream)
if (cx25821_sram_channels[AUDIO_SRAM_CHANNEL].fifo_size !=
DEFAULT_FIFO_SIZE) {
- bpl = cx25821_sram_channels[AUDIO_SRAM_CHANNEL].fifo_size / 3; //since there are 3 audio Clusters
+ /* since there are 3 audio Clusters */
+ bpl = cx25821_sram_channels[AUDIO_SRAM_CHANNEL].fifo_size / 3;
bpl &= ~7; /* must be multiple of 8 */
- if (bpl > AUDIO_LINE_SIZE) {
+ if (bpl > AUDIO_LINE_SIZE)
bpl = AUDIO_LINE_SIZE;
- }
+
runtime->hw.period_bytes_min = bpl;
runtime->hw.period_bytes_max = bpl;
}
return 0;
- _error:
+_error:
dprintk(1, "Error opening PCM!\n");
return err;
}
@@ -423,7 +429,7 @@ static int snd_cx25821_close(struct snd_pcm_substream *substream)
static int snd_cx25821_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
- snd_cx25821_card_t *chip = snd_pcm_substream_chip(substream);
+ struct cx25821_audio_dev *chip = snd_pcm_substream_chip(substream);
struct videobuf_dmabuf *dma;
struct cx25821_buffer *buf;
@@ -445,9 +451,8 @@ static int snd_cx25821_hw_params(struct snd_pcm_substream *substream,
if (NULL == buf)
return -ENOMEM;
- if (chip->period_size > AUDIO_LINE_SIZE) {
+ if (chip->period_size > AUDIO_LINE_SIZE)
chip->period_size = AUDIO_LINE_SIZE;
- }
buf->vb.memory = V4L2_MEMORY_MMAP;
buf->vb.field = V4L2_FIELD_NONE;
@@ -474,7 +479,7 @@ static int snd_cx25821_hw_params(struct snd_pcm_substream *substream,
buf->vb.width, buf->vb.height, 1);
if (ret < 0) {
printk(KERN_INFO
- "DEBUG: ERROR after cx25821_risc_databuffer_audio() \n");
+ "DEBUG: ERROR after cx25821_risc_databuffer_audio()\n");
goto error;
}
@@ -494,7 +499,7 @@ static int snd_cx25821_hw_params(struct snd_pcm_substream *substream,
return 0;
- error:
+error:
kfree(buf);
return ret;
}
@@ -504,7 +509,7 @@ static int snd_cx25821_hw_params(struct snd_pcm_substream *substream,
*/
static int snd_cx25821_hw_free(struct snd_pcm_substream *substream)
{
- snd_cx25821_card_t *chip = snd_pcm_substream_chip(substream);
+ struct cx25821_audio_dev *chip = snd_pcm_substream_chip(substream);
if (substream->runtime->dma_area) {
dsp_buffer_free(chip);
@@ -528,7 +533,7 @@ static int snd_cx25821_prepare(struct snd_pcm_substream *substream)
static int snd_cx25821_card_trigger(struct snd_pcm_substream *substream,
int cmd)
{
- snd_cx25821_card_t *chip = snd_pcm_substream_chip(substream);
+ struct cx25821_audio_dev *chip = snd_pcm_substream_chip(substream);
int err = 0;
/* Local interrupts are already disabled by ALSA */
@@ -557,7 +562,7 @@ static int snd_cx25821_card_trigger(struct snd_pcm_substream *substream,
static snd_pcm_uframes_t snd_cx25821_pointer(struct snd_pcm_substream
*substream)
{
- snd_cx25821_card_t *chip = snd_pcm_substream_chip(substream);
+ struct cx25821_audio_dev *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
u16 count;
@@ -593,10 +598,11 @@ static struct snd_pcm_ops snd_cx25821_pcm_ops = {
};
/*
- * ALSA create a PCM device: Called when initializing the board. Sets up the name and hooks up
- * the callbacks
+ * ALSA create a PCM device: Called when initializing the board.
+ * Sets up the name and hooks up the callbacks
*/
-static int snd_cx25821_pcm(snd_cx25821_card_t * chip, int device, char *name)
+static int snd_cx25821_pcm(struct cx25821_audio_dev *chip, int device,
+ char *name)
{
struct snd_pcm *pcm;
int err;
@@ -636,7 +642,7 @@ MODULE_DEVICE_TABLE(pci, cx25821_audio_pci_tbl);
* from the file.
*/
/*
-static int snd_cx25821_free(snd_cx25821_card_t *chip)
+static int snd_cx25821_free(struct cx25821_audio_dev *chip)
{
if (chip->irq >= 0)
free_irq(chip->irq, chip);
@@ -653,9 +659,9 @@ static int snd_cx25821_free(snd_cx25821_card_t *chip)
*/
static void snd_cx25821_dev_free(struct snd_card *card)
{
- snd_cx25821_card_t *chip = card->private_data;
+ struct cx25821_audio_dev *chip = card->private_data;
- //snd_cx25821_free(chip);
+ /* snd_cx25821_free(chip); */
snd_card_free(chip->card);
}
@@ -665,23 +671,23 @@ static void snd_cx25821_dev_free(struct snd_card *card)
static int cx25821_audio_initdev(struct cx25821_dev *dev)
{
struct snd_card *card;
- snd_cx25821_card_t *chip;
+ struct cx25821_audio_dev *chip;
int err;
if (devno >= SNDRV_CARDS) {
printk(KERN_INFO "DEBUG ERROR: devno >= SNDRV_CARDS %s\n",
__func__);
- return (-ENODEV);
+ return -ENODEV;
}
if (!enable[devno]) {
++devno;
printk(KERN_INFO "DEBUG ERROR: !enable[devno] %s\n", __func__);
- return (-ENOENT);
+ return -ENOENT;
}
err = snd_card_create(index[devno], id[devno], THIS_MODULE,
- sizeof(snd_cx25821_card_t), &card);
+ sizeof(struct cx25821_audio_dev), &card);
if (err < 0) {
printk(KERN_INFO
"DEBUG ERROR: cannot create snd_card_new in %s\n",
@@ -693,7 +699,7 @@ static int cx25821_audio_initdev(struct cx25821_dev *dev)
/* Card "creation" */
card->private_free = snd_cx25821_dev_free;
- chip = (snd_cx25821_card_t *) card->private_data;
+ chip = (struct cx25821_audio_dev *) card->private_data;
spin_lock_init(&chip->reg_lock);
chip->dev = dev;
@@ -712,7 +718,8 @@ static int cx25821_audio_initdev(struct cx25821_dev *dev)
goto error;
}
- if ((err = snd_cx25821_pcm(chip, 0, "cx25821 Digital")) < 0) {
+ err = snd_cx25821_pcm(chip, 0, "cx25821 Digital");
+ if (err < 0) {
printk(KERN_INFO
"DEBUG ERROR: cannot create snd_cx25821_pcm %s\n",
__func__);
@@ -741,7 +748,7 @@ static int cx25821_audio_initdev(struct cx25821_dev *dev)
devno++;
return 0;
- error:
+error:
snd_card_free(card);
return err;
}
diff --git a/drivers/staging/cx25821/cx25821-audio-upstream.c b/drivers/staging/cx25821/cx25821-audio-upstream.c
index 11c56bdb0ce..eb39d13f7d7 100644
--- a/drivers/staging/cx25821/cx25821-audio-upstream.c
+++ b/drivers/staging/cx25821/cx25821-audio-upstream.c
@@ -33,7 +33,7 @@
#include <linux/fcntl.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
MODULE_DESCRIPTION("v4l2 driver module for cx25821 based TV cards");
MODULE_AUTHOR("Hiep Huynh <hiep.huynh@conexant.com>");
@@ -62,9 +62,8 @@ int cx25821_sram_channel_setup_upstream_audio(struct cx25821_dev *dev,
cdt = ch->cdt;
lines = ch->fifo_size / bpl;
- if (lines > 3) {
+ if (lines > 3)
lines = 3;
- }
BUG_ON(lines < 2);
@@ -84,7 +83,7 @@ int cx25821_sram_channel_setup_upstream_audio(struct cx25821_dev *dev,
cx_write(ch->cmds_start + 12, AUDIO_CDT_SIZE_QW);
cx_write(ch->cmds_start + 16, ch->ctrl_start);
- //IQ size
+ /* IQ size */
cx_write(ch->cmds_start + 20, AUDIO_IQ_SIZE_DW);
for (i = 24; i < 80; i += 4)
@@ -100,7 +99,7 @@ int cx25821_sram_channel_setup_upstream_audio(struct cx25821_dev *dev,
}
static __le32 *cx25821_risc_field_upstream_audio(struct cx25821_dev *dev,
- __le32 * rp,
+ __le32 *rp,
dma_addr_t databuf_phys_addr,
unsigned int bpl,
int fifo_enable)
@@ -116,8 +115,10 @@ static __le32 *cx25821_risc_field_upstream_audio(struct cx25821_dev *dev,
*(rp++) = cpu_to_le32(databuf_phys_addr + offset);
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
- // Check if we need to enable the FIFO after the first 3 lines
- // For the upstream audio channel, the risc engine will enable the FIFO.
+ /* Check if we need to enable the FIFO
+ * after the first 3 lines.
+ * For the upstream audio channel,
+ * the risc engine will enable the FIFO */
if (fifo_enable && line == 2) {
*(rp++) = RISC_WRITECR;
*(rp++) = sram_ch->dma_ctl;
@@ -160,7 +161,7 @@ int cx25821_risc_buffer_upstream_audio(struct cx25821_dev *dev,
risc_flag = RISC_CNT_INC;
}
- //Calculate physical jump address
+ /* Calculate physical jump address */
if ((frame + 1) == NUM_AUDIO_FRAMES) {
risc_phys_jump_addr =
dev->_risc_phys_start_addr +
@@ -179,17 +180,17 @@ int cx25821_risc_buffer_upstream_audio(struct cx25821_dev *dev,
fifo_enable);
if (USE_RISC_NOOP_AUDIO) {
- for (i = 0; i < NUM_NO_OPS; i++) {
+ for (i = 0; i < NUM_NO_OPS; i++)
*(rp++) = cpu_to_le32(RISC_NOOP);
- }
}
- // Loop to (Nth)FrameRISC or to Start of Risc program & generate IRQ
+ /* Loop to (Nth)FrameRISC or to Start of Risc program &
+ * generate IRQ */
*(rp++) = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | risc_flag);
*(rp++) = cpu_to_le32(risc_phys_jump_addr);
*(rp++) = cpu_to_le32(0);
- //Recalculate virtual address based on frame index
+ /* Recalculate virtual address based on frame index */
rp = dev->_risc_virt_addr + RISC_SYNC_INSTRUCTION_SIZE / 4 +
(AUDIO_RISC_DMA_BUF_SIZE * (frame + 1) / 4);
}
@@ -220,19 +221,19 @@ void cx25821_stop_upstream_audio(struct cx25821_dev *dev)
u32 tmp = 0;
if (!dev->_audio_is_running) {
- printk
- ("cx25821: No audio file is currently running so return!\n");
+ printk(KERN_DEBUG
+ "cx25821: No audio file is currently running so return!\n");
return;
}
- //Disable RISC interrupts
+ /* Disable RISC interrupts */
cx_write(sram_ch->int_msk, 0);
- //Turn OFF risc and fifo enable in AUD_DMA_CNTRL
+ /* Turn OFF risc and fifo enable in AUD_DMA_CNTRL */
tmp = cx_read(sram_ch->dma_ctl);
cx_write(sram_ch->dma_ctl,
tmp & ~(sram_ch->fld_aud_fifo_en | sram_ch->fld_aud_risc_en));
- //Clear data buffer memory
+ /* Clear data buffer memory */
if (dev->_audiodata_buf_virt_addr)
memset(dev->_audiodata_buf_virt_addr, 0,
dev->_audiodata_buf_size);
@@ -253,9 +254,8 @@ void cx25821_stop_upstream_audio(struct cx25821_dev *dev)
void cx25821_free_mem_upstream_audio(struct cx25821_dev *dev)
{
- if (dev->_audio_is_running) {
+ if (dev->_audio_is_running)
cx25821_stop_upstream_audio(dev);
- }
cx25821_free_memory_audio(dev);
}
@@ -282,7 +282,7 @@ int cx25821_get_audio_data(struct cx25821_dev *dev,
if (IS_ERR(myfile)) {
const int open_errno = -PTR_ERR(myfile);
- printk("%s(): ERROR opening file(%s) with errno = %d! \n",
+ printk(KERN_ERR "%s(): ERROR opening file(%s) with errno = %d!\n",
__func__, dev->_audiofilename, open_errno);
return PTR_ERR(myfile);
} else {
@@ -294,7 +294,7 @@ int cx25821_get_audio_data(struct cx25821_dev *dev,
}
if (!myfile->f_op->read) {
- printk("%s: File has no READ operations registered! \n",
+ printk("%s: File has no READ operations registered!\n",
__func__);
filp_close(myfile, NULL);
return -EIO;
@@ -347,7 +347,7 @@ static void cx25821_audioups_handler(struct work_struct *work)
container_of(work, struct cx25821_dev, _audio_work_entry);
if (!dev) {
- printk("ERROR %s(): since container_of(work_struct) FAILED! \n",
+ printk(KERN_ERR "ERROR %s(): since container_of(work_struct) FAILED!\n",
__func__);
return;
}
@@ -373,19 +373,19 @@ int cx25821_openfile_audio(struct cx25821_dev *dev,
if (IS_ERR(myfile)) {
const int open_errno = -PTR_ERR(myfile);
- printk("%s(): ERROR opening file(%s) with errno = %d! \n",
+ printk(KERN_ERR "%s(): ERROR opening file(%s) with errno = %d!\n",
__func__, dev->_audiofilename, open_errno);
return PTR_ERR(myfile);
} else {
if (!(myfile->f_op)) {
- printk("%s: File has no file operations registered! \n",
+ printk("%s: File has no file operations registered!\n",
__func__);
filp_close(myfile, NULL);
return -EIO;
}
if (!myfile->f_op->read) {
- printk("%s: File has no READ operations registered! \n",
+ printk("%s: File has no READ operations registered!\n",
__func__);
filp_close(myfile, NULL);
return -EIO;
@@ -421,13 +421,11 @@ int cx25821_openfile_audio(struct cx25821_dev *dev,
}
}
- if (i > 0) {
+ if (i > 0)
dev->_audioframe_count++;
- }
- if (vfs_read_retval < line_size) {
+ if (vfs_read_retval < line_size)
break;
- }
}
dev->_audiofile_status =
@@ -460,14 +458,14 @@ static int cx25821_audio_upstream_buffer_prepare(struct cx25821_dev *dev,
dev->_audiorisc_size = dev->audio_upstream_riscbuf_size;
if (!dev->_risc_virt_addr) {
- printk
- ("cx25821 ERROR: pci_alloc_consistent() FAILED to allocate memory for RISC program! Returning.\n");
+ printk(KERN_DEBUG
+ "cx25821 ERROR: pci_alloc_consistent() FAILED to allocate memory for RISC program! Returning.\n");
return -ENOMEM;
}
- //Clear out memory at address
+ /* Clear out memory at address */
memset(dev->_risc_virt_addr, 0, dev->_audiorisc_size);
- //For Audio Data buffer allocation
+ /* For Audio Data buffer allocation */
dev->_audiodata_buf_virt_addr =
pci_alloc_consistent(dev->pci, dev->audio_upstream_databuf_size,
&data_dma_addr);
@@ -475,30 +473,30 @@ static int cx25821_audio_upstream_buffer_prepare(struct cx25821_dev *dev,
dev->_audiodata_buf_size = dev->audio_upstream_databuf_size;
if (!dev->_audiodata_buf_virt_addr) {
- printk
- ("cx25821 ERROR: pci_alloc_consistent() FAILED to allocate memory for data buffer! Returning. \n");
+ printk(KERN_DEBUG
+ "cx25821 ERROR: pci_alloc_consistent() FAILED to allocate memory for data buffer! Returning.\n");
return -ENOMEM;
}
- //Clear out memory at address
+ /* Clear out memory at address */
memset(dev->_audiodata_buf_virt_addr, 0, dev->_audiodata_buf_size);
ret = cx25821_openfile_audio(dev, sram_ch);
if (ret < 0)
return ret;
- //Creating RISC programs
+ /* Creating RISC programs */
ret =
cx25821_risc_buffer_upstream_audio(dev, dev->pci, bpl,
dev->_audio_lines_count);
if (ret < 0) {
printk(KERN_DEBUG
- "cx25821 ERROR creating audio upstream RISC programs! \n");
+ "cx25821 ERROR creating audio upstream RISC programs!\n");
goto error;
}
return 0;
- error:
+error:
return ret;
}
@@ -512,22 +510,22 @@ int cx25821_audio_upstream_irq(struct cx25821_dev *dev, int chan_num,
__le32 *rp;
if (status & FLD_AUD_SRC_RISCI1) {
- //Get interrupt_index of the program that interrupted
+ /* Get interrupt_index of the program that interrupted */
u32 prog_cnt = cx_read(channel->gpcnt);
- //Since we've identified our IRQ, clear our bits from the interrupt mask and interrupt status registers
+ /* Since we've identified our IRQ, clear our bits from the
+ * interrupt mask and interrupt status registers */
cx_write(channel->int_msk, 0);
cx_write(channel->int_stat, cx_read(channel->int_stat));
spin_lock(&dev->slock);
while (prog_cnt != dev->_last_index_irq) {
- //Update _last_index_irq
- if (dev->_last_index_irq < (NUMBER_OF_PROGRAMS - 1)) {
+ /* Update _last_index_irq */
+ if (dev->_last_index_irq < (NUMBER_OF_PROGRAMS - 1))
dev->_last_index_irq++;
- } else {
+ else
dev->_last_index_irq = 0;
- }
dev->_audioframe_index = dev->_last_index_irq;
@@ -559,7 +557,7 @@ int cx25821_audio_upstream_irq(struct cx25821_dev *dev, int chan_num,
cpu_to_le32(RISC_NOOP);
}
}
- // Jump to 2nd Audio Frame
+ /* Jump to 2nd Audio Frame */
*(rp++) =
cpu_to_le32(RISC_JUMP | RISC_IRQ1 |
RISC_CNT_RESET);
@@ -582,7 +580,8 @@ int cx25821_audio_upstream_irq(struct cx25821_dev *dev, int chan_num,
printk("%s: Audio Received OpCode Error Interrupt!\n",
__func__);
- // Read and write back the interrupt status register to clear our bits
+ /* Read and write back the interrupt status register to clear
+ * our bits */
cx_write(channel->int_stat, cx_read(channel->int_stat));
}
@@ -591,7 +590,7 @@ int cx25821_audio_upstream_irq(struct cx25821_dev *dev, int chan_num,
dev->_audioframe_count);
return -1;
}
- //ElSE, set the interrupt mask register, re-enable irq.
+ /* ElSE, set the interrupt mask register, re-enable irq. */
int_msk_tmp = cx_read(channel->int_msk);
cx_write(channel->int_msk, int_msk_tmp |= _intr_msk);
@@ -613,7 +612,7 @@ static irqreturn_t cx25821_upstream_irq_audio(int irq, void *dev_id)
msk_stat = cx_read(sram_ch->int_mstat);
audio_status = cx_read(sram_ch->int_stat);
- // Only deal with our interrupt
+ /* Only deal with our interrupt */
if (audio_status) {
handled =
cx25821_audio_upstream_irq(dev,
@@ -622,11 +621,10 @@ static irqreturn_t cx25821_upstream_irq_audio(int irq, void *dev_id)
audio_status);
}
- if (handled < 0) {
+ if (handled < 0)
cx25821_stop_upstream_audio(dev);
- } else {
+ else
handled += handled;
- }
return IRQ_RETVAL(handled);
}
@@ -638,13 +636,14 @@ static void cx25821_wait_fifo_enable(struct cx25821_dev *dev,
u32 tmp;
do {
- //Wait 10 microsecond before checking to see if the FIFO is turned ON.
+ /* Wait 10 microsecond before checking to see if the FIFO is
+ * turned ON. */
udelay(10);
tmp = cx_read(sram_ch->dma_ctl);
- if (count++ > 1000) //10 millisecond timeout
- {
+ /* 10 millisecond timeout */
+ if (count++ > 1000) {
printk
("cx25821 ERROR: %s() fifo is NOT turned on. Timeout!\n",
__func__);
@@ -661,31 +660,34 @@ int cx25821_start_audio_dma_upstream(struct cx25821_dev *dev,
u32 tmp = 0;
int err = 0;
- // Set the physical start address of the RISC program in the initial program counter(IPC) member of the CMDS.
+ /* Set the physical start address of the RISC program in the initial
+ * program counter(IPC) member of the CMDS. */
cx_write(sram_ch->cmds_start + 0, dev->_risc_phys_addr);
- cx_write(sram_ch->cmds_start + 4, 0); /* Risc IPC High 64 bits 63-32 */
+ /* Risc IPC High 64 bits 63-32 */
+ cx_write(sram_ch->cmds_start + 4, 0);
/* reset counter */
cx_write(sram_ch->gpcnt_ctl, 3);
- //Set the line length (It looks like we do not need to set the line length)
+ /* Set the line length (It looks like we do not need to set the
+ * line length) */
cx_write(sram_ch->aud_length, AUDIO_LINE_SIZE & FLD_AUD_DST_LN_LNGTH);
- //Set the input mode to 16-bit
+ /* Set the input mode to 16-bit */
tmp = cx_read(sram_ch->aud_cfg);
tmp |=
FLD_AUD_SRC_ENABLE | FLD_AUD_DST_PK_MODE | FLD_AUD_CLK_ENABLE |
FLD_AUD_MASTER_MODE | FLD_AUD_CLK_SELECT_PLL_D | FLD_AUD_SONY_MODE;
cx_write(sram_ch->aud_cfg, tmp);
- // Read and write back the interrupt status register to clear it
+ /* Read and write back the interrupt status register to clear it */
tmp = cx_read(sram_ch->int_stat);
cx_write(sram_ch->int_stat, tmp);
- // Clear our bits from the interrupt status register.
+ /* Clear our bits from the interrupt status register. */
cx_write(sram_ch->int_stat, _intr_msk);
- //Set the interrupt mask register, enable irq.
+ /* Set the interrupt mask register, enable irq. */
cx_set(PCI_INT_MSK, cx_read(PCI_INT_MSK) | (1 << sram_ch->irq_bit));
tmp = cx_read(sram_ch->int_msk);
cx_write(sram_ch->int_msk, tmp |= _intr_msk);
@@ -699,19 +701,19 @@ int cx25821_start_audio_dma_upstream(struct cx25821_dev *dev,
goto fail_irq;
}
- // Start the DMA engine
+ /* Start the DMA engine */
tmp = cx_read(sram_ch->dma_ctl);
cx_set(sram_ch->dma_ctl, tmp | sram_ch->fld_aud_risc_en);
dev->_audio_is_running = 1;
dev->_is_first_audio_frame = 1;
- // The fifo_en bit turns on by the first Risc program
+ /* The fifo_en bit turns on by the first Risc program */
cx25821_wait_fifo_enable(dev, sram_ch);
return 0;
- fail_irq:
+fail_irq:
cx25821_dev_unregister(dev);
return err;
}
@@ -731,14 +733,14 @@ int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
dev->_audio_upstream_channel_select = channel_select;
sram_ch = &dev->sram_channels[channel_select];
- //Work queue
+ /* Work queue */
INIT_WORK(&dev->_audio_work_entry, cx25821_audioups_handler);
dev->_irq_audio_queues =
create_singlethread_workqueue("cx25821_audioworkqueue");
if (!dev->_irq_audio_queues) {
- printk
- ("cx25821 ERROR: create_singlethread_workqueue() for Audio FAILED!\n");
+ printk(KERN_DEBUG
+ "cx25821 ERROR: create_singlethread_workqueue() for Audio FAILED!\n");
return -ENOMEM;
}
@@ -751,8 +753,7 @@ int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
if (dev->input_audiofilename) {
str_length = strlen(dev->input_audiofilename);
- dev->_audiofilename =
- (char *)kmalloc(str_length + 1, GFP_KERNEL);
+ dev->_audiofilename = kmalloc(str_length + 1, GFP_KERNEL);
if (!dev->_audiofilename)
goto error;
@@ -760,14 +761,13 @@ int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
memcpy(dev->_audiofilename, dev->input_audiofilename,
str_length + 1);
- //Default if filename is empty string
+ /* Default if filename is empty string */
if (strcmp(dev->input_audiofilename, "") == 0) {
dev->_audiofilename = "/root/audioGOOD.wav";
}
} else {
str_length = strlen(_defaultAudioName);
- dev->_audiofilename =
- (char *)kmalloc(str_length + 1, GFP_KERNEL);
+ dev->_audiofilename = kmalloc(str_length + 1, GFP_KERNEL);
if (!dev->_audiofilename)
goto error;
@@ -784,7 +784,7 @@ int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
RISC_SYNC_INSTRUCTION_SIZE;
dev->audio_upstream_databuf_size = AUDIO_DATA_BUF_SZ * NUM_AUDIO_PROGS;
- //Allocating buffers and prepare RISC program
+ /* Allocating buffers and prepare RISC program */
retval =
cx25821_audio_upstream_buffer_prepare(dev, sram_ch, _line_size);
if (retval < 0) {
@@ -793,12 +793,12 @@ int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
dev->name);
goto error;
}
- //Start RISC engine
+ /* Start RISC engine */
cx25821_start_audio_dma_upstream(dev, sram_ch);
return 0;
- error:
+error:
cx25821_dev_unregister(dev);
return err;
diff --git a/drivers/staging/cx25821/cx25821-audups11.c b/drivers/staging/cx25821/cx25821-audups11.c
index e76451c309f..e49ead982f3 100644
--- a/drivers/staging/cx25821/cx25821-audups11.c
+++ b/drivers/staging/cx25821/cx25821-audups11.c
@@ -88,10 +88,10 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
}
static struct videobuf_queue_ops cx25821_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
+ .buf_setup = cx25821_buffer_setup,
+ .buf_prepare = cx25821_buffer_prepare,
.buf_queue = buffer_queue,
- .buf_release = buffer_release,
+ .buf_release = cx25821_buffer_release,
};
static int video_open(struct file *file)
@@ -145,7 +145,7 @@ static ssize_t video_read(struct file *file, char __user * data, size_t count,
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (res_locked(fh->dev, RESOURCE_VIDEO11))
+ if (cx25821_res_locked(fh->dev, RESOURCE_VIDEO11))
return -EBUSY;
return videobuf_read_one(&fh->vidq, data, count, ppos,
@@ -163,7 +163,7 @@ static unsigned int video_poll(struct file *file,
struct cx25821_fh *fh = file->private_data;
struct cx25821_buffer *buf;
- if (res_check(fh, RESOURCE_VIDEO11)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO11)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
return POLLERR;
@@ -191,19 +191,19 @@ static int video_release(struct file *file)
//cx_write(channel11->dma_ctl, 0);
/* stop video capture */
- if (res_check(fh, RESOURCE_VIDEO11)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO11)) {
videobuf_queue_cancel(&fh->vidq);
- res_free(dev, fh, RESOURCE_VIDEO11);
+ cx25821_res_free(dev, fh, RESOURCE_VIDEO11);
}
if (fh->vidq.read_buf) {
- buffer_release(&fh->vidq, fh->vidq.read_buf);
+ cx25821_buffer_release(&fh->vidq, fh->vidq.read_buf);
kfree(fh->vidq.read_buf);
}
videobuf_mmap_free(&fh->vidq);
- v4l2_prio_close(&dev->prio, &fh->prio);
+ v4l2_prio_close(&dev->prio, fh->prio);
file->private_data = NULL;
kfree(fh);
@@ -224,7 +224,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
return -EINVAL;
}
- if (unlikely(!res_get(dev, fh, get_resource(fh, RESOURCE_VIDEO11)))) {
+ if (unlikely(!cx25821_res_get(dev, fh, cx25821_get_resource(fh, RESOURCE_VIDEO11)))) {
return -EBUSY;
}
@@ -242,11 +242,11 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
if (i != fh->type)
return -EINVAL;
- res = get_resource(fh, RESOURCE_VIDEO11);
+ res = cx25821_get_resource(fh, RESOURCE_VIDEO11);
err = videobuf_streamoff(get_queue(fh));
if (err < 0)
return err;
- res_free(dev, fh, res);
+ cx25821_res_free(dev, fh, res);
return 0;
}
@@ -258,13 +258,13 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
int err;
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
dprintk(2, "%s()\n", __func__);
- err = vidioc_try_fmt_vid_cap(file, priv, f);
+ err = cx25821_vidioc_try_fmt_vid_cap(file, priv, f);
if (0 != err)
return err;
@@ -350,7 +350,7 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
if (fh) {
dev = fh->dev;
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
@@ -364,50 +364,50 @@ static const struct v4l2_file_operations video_fops = {
.release = video_release,
.read = video_read,
.poll = video_poll,
- .mmap = video_mmap,
+ .mmap = cx25821_video_mmap,
.ioctl = video_ioctl_upstream11,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_querycap = cx25821_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = cx25821_vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_reqbufs = cx25821_vidioc_reqbufs,
+ .vidioc_querybuf = cx25821_vidioc_querybuf,
+ .vidioc_qbuf = cx25821_vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
#ifdef TUNER_FLAG
- .vidioc_s_std = vidioc_s_std,
- .vidioc_querystd = vidioc_querystd,
+ .vidioc_s_std = cx25821_vidioc_s_std,
+ .vidioc_querystd = cx25821_vidioc_querystd,
#endif
- .vidioc_cropcap = vidioc_cropcap,
- .vidioc_s_crop = vidioc_s_crop,
- .vidioc_g_crop = vidioc_g_crop,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_cropcap = cx25821_vidioc_cropcap,
+ .vidioc_s_crop = cx25821_vidioc_s_crop,
+ .vidioc_g_crop = cx25821_vidioc_g_crop,
+ .vidioc_enum_input = cx25821_vidioc_enum_input,
+ .vidioc_g_input = cx25821_vidioc_g_input,
+ .vidioc_s_input = cx25821_vidioc_s_input,
+ .vidioc_g_ctrl = cx25821_vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_queryctrl = cx25821_vidioc_queryctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_log_status = vidioc_log_status,
- .vidioc_g_priority = vidioc_g_priority,
- .vidioc_s_priority = vidioc_s_priority,
+ .vidioc_g_priority = cx25821_vidioc_g_priority,
+ .vidioc_s_priority = cx25821_vidioc_s_priority,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
- .vidiocgmbuf = vidiocgmbuf,
+ .vidiocgmbuf = cx25821_vidiocgmbuf,
#endif
#ifdef TUNER_FLAG
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_g_tuner = cx25821_vidioc_g_tuner,
+ .vidioc_s_tuner = cx25821_vidioc_s_tuner,
+ .vidioc_g_frequency = cx25821_vidioc_g_frequency,
+ .vidioc_s_frequency = cx25821_vidioc_s_frequency,
#endif
#ifdef CONFIG_VIDEO_ADV_DEBUG
- .vidioc_g_register = vidioc_g_register,
- .vidioc_s_register = vidioc_s_register,
+ .vidioc_g_register = cx25821_vidioc_g_register,
+ .vidioc_s_register = cx25821_vidioc_s_register,
#endif
};
diff --git a/drivers/staging/cx25821/cx25821-cards.c b/drivers/staging/cx25821/cx25821-cards.c
index 4d0b9eac3e4..da0f56d50e2 100644
--- a/drivers/staging/cx25821/cx25821-cards.c
+++ b/drivers/staging/cx25821/cx25821-cards.c
@@ -30,12 +30,12 @@
#include "cx25821.h"
#include "tuner-xc2028.h"
-// board config info
+/* board config info */
struct cx25821_board cx25821_boards[] = {
[UNKNOWN_BOARD] = {
.name = "UNKNOWN/GENERIC",
- // Ensure safe default for unknown boards
+ /* Ensure safe default for unknown boards */
.clk_freq = 0,
},
diff --git a/drivers/staging/cx25821/cx25821-core.c b/drivers/staging/cx25821/cx25821-core.c
index 9e9b8c3c931..d90abb383fc 100644
--- a/drivers/staging/cx25821/cx25821-core.c
+++ b/drivers/staging/cx25821/cx25821-core.c
@@ -32,6 +32,7 @@ MODULE_AUTHOR("Shu Lin - Hiep Huynh");
MODULE_LICENSE("GPL");
struct list_head cx25821_devlist;
+EXPORT_SYMBOL(cx25821_devlist);
static unsigned int debug;
module_param(debug, int, 0644);
@@ -313,6 +314,7 @@ struct sram_channel cx25821_sram_channels[] = {
.irq_bit = 11,
},
};
+EXPORT_SYMBOL(cx25821_sram_channels);
struct sram_channel *channel0 = &cx25821_sram_channels[SRAM_CH00];
struct sram_channel *channel1 = &cx25821_sram_channels[SRAM_CH01];
@@ -388,70 +390,74 @@ static void cx25821_registers_init(struct cx25821_dev *dev)
{
u32 tmp;
- // enable RUN_RISC in Pecos
+ /* enable RUN_RISC in Pecos */
cx_write(DEV_CNTRL2, 0x20);
- // Set the master PCI interrupt masks to enable video, audio, MBIF, and GPIO interrupts
- // I2C interrupt masking is handled by the I2C objects themselves.
+ /* Set the master PCI interrupt masks to enable video, audio, MBIF,
+ * and GPIO interrupts
+ * I2C interrupt masking is handled by the I2C objects themselves. */
cx_write(PCI_INT_MSK, 0x2001FFFF);
tmp = cx_read(RDR_TLCTL0);
- tmp &= ~FLD_CFG_RCB_CK_EN; // Clear the RCB_CK_EN bit
+ tmp &= ~FLD_CFG_RCB_CK_EN; /* Clear the RCB_CK_EN bit */
cx_write(RDR_TLCTL0, tmp);
- // PLL-A setting for the Audio Master Clock
+ /* PLL-A setting for the Audio Master Clock */
cx_write(PLL_A_INT_FRAC, 0x9807A58B);
- // PLL_A_POST = 0x1C, PLL_A_OUT_TO_PIN = 0x1
+ /* PLL_A_POST = 0x1C, PLL_A_OUT_TO_PIN = 0x1 */
cx_write(PLL_A_POST_STAT_BIST, 0x8000019C);
- // clear reset bit [31]
+ /* clear reset bit [31] */
tmp = cx_read(PLL_A_INT_FRAC);
cx_write(PLL_A_INT_FRAC, tmp & 0x7FFFFFFF);
- // PLL-B setting for Mobilygen Host Bus Interface
+ /* PLL-B setting for Mobilygen Host Bus Interface */
cx_write(PLL_B_INT_FRAC, 0x9883A86F);
- // PLL_B_POST = 0xD, PLL_B_OUT_TO_PIN = 0x0
+ /* PLL_B_POST = 0xD, PLL_B_OUT_TO_PIN = 0x0 */
cx_write(PLL_B_POST_STAT_BIST, 0x8000018D);
- // clear reset bit [31]
+ /* clear reset bit [31] */
tmp = cx_read(PLL_B_INT_FRAC);
cx_write(PLL_B_INT_FRAC, tmp & 0x7FFFFFFF);
- // PLL-C setting for video upstream channel
+ /* PLL-C setting for video upstream channel */
cx_write(PLL_C_INT_FRAC, 0x96A0EA3F);
- // PLL_C_POST = 0x3, PLL_C_OUT_TO_PIN = 0x0
+ /* PLL_C_POST = 0x3, PLL_C_OUT_TO_PIN = 0x0 */
cx_write(PLL_C_POST_STAT_BIST, 0x80000103);
- // clear reset bit [31]
+ /* clear reset bit [31] */
tmp = cx_read(PLL_C_INT_FRAC);
cx_write(PLL_C_INT_FRAC, tmp & 0x7FFFFFFF);
- // PLL-D setting for audio upstream channel
+ /* PLL-D setting for audio upstream channel */
cx_write(PLL_D_INT_FRAC, 0x98757F5B);
- // PLL_D_POST = 0x13, PLL_D_OUT_TO_PIN = 0x0
+ /* PLL_D_POST = 0x13, PLL_D_OUT_TO_PIN = 0x0 */
cx_write(PLL_D_POST_STAT_BIST, 0x80000113);
- // clear reset bit [31]
+ /* clear reset bit [31] */
tmp = cx_read(PLL_D_INT_FRAC);
cx_write(PLL_D_INT_FRAC, tmp & 0x7FFFFFFF);
- // This selects the PLL C clock source for the video upstream channel I and J
+ /* This selects the PLL C clock source for the video upstream channel
+ * I and J */
tmp = cx_read(VID_CH_CLK_SEL);
cx_write(VID_CH_CLK_SEL, (tmp & 0x00FFFFFF) | 0x24000000);
- // 656/VIP SRC Upstream Channel I & J and 7 - Host Bus Interface for channel A-C
- //select 656/VIP DST for downstream Channel A - C
+ /* 656/VIP SRC Upstream Channel I & J and 7 - Host Bus Interface for
+ * channel A-C
+ * select 656/VIP DST for downstream Channel A - C */
tmp = cx_read(VID_CH_MODE_SEL);
- //cx_write( VID_CH_MODE_SEL, tmp | 0x1B0001FF);
+ /* cx_write( VID_CH_MODE_SEL, tmp | 0x1B0001FF); */
cx_write(VID_CH_MODE_SEL, tmp & 0xFFFFFE00);
- // enables 656 port I and J as output
+ /* enables 656 port I and J as output */
tmp = cx_read(CLK_RST);
- tmp |= FLD_USE_ALT_PLL_REF; // use external ALT_PLL_REF pin as its reference clock instead
+ /* use external ALT_PLL_REF pin as its reference clock instead */
+ tmp |= FLD_USE_ALT_PLL_REF;
cx_write(CLK_RST, tmp & ~(FLD_VID_I_CLK_NOE | FLD_VID_J_CLK_NOE));
mdelay(100);
@@ -476,9 +482,8 @@ int cx25821_sram_channel_setup(struct cx25821_dev *dev,
cdt = ch->cdt;
lines = ch->fifo_size / bpl;
- if (lines > 4) {
+ if (lines > 4)
lines = 4;
- }
BUG_ON(lines < 2);
@@ -494,16 +499,15 @@ int cx25821_sram_channel_setup(struct cx25821_dev *dev,
cx_write(cdt + 16 * i + 12, 0);
}
- //init the first cdt buffer
+ /* init the first cdt buffer */
for (i = 0; i < 128; i++)
cx_write(ch->fifo_start + 4 * i, i);
/* write CMDS */
- if (ch->jumponly) {
+ if (ch->jumponly)
cx_write(ch->cmds_start + 0, 8);
- } else {
+ else
cx_write(ch->cmds_start + 0, risc);
- }
cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
cx_write(ch->cmds_start + 8, cdt);
@@ -526,6 +530,7 @@ int cx25821_sram_channel_setup(struct cx25821_dev *dev,
return 0;
}
+EXPORT_SYMBOL(cx25821_sram_channel_setup);
int cx25821_sram_channel_setup_audio(struct cx25821_dev *dev,
struct sram_channel *ch,
@@ -546,9 +551,8 @@ int cx25821_sram_channel_setup_audio(struct cx25821_dev *dev,
cdt = ch->cdt;
lines = ch->fifo_size / bpl;
- if (lines > 3) {
- lines = 3; //for AUDIO
- }
+ if (lines > 3)
+ lines = 3; /* for AUDIO */
BUG_ON(lines < 2);
@@ -565,25 +569,23 @@ int cx25821_sram_channel_setup_audio(struct cx25821_dev *dev,
}
/* write CMDS */
- if (ch->jumponly) {
+ if (ch->jumponly)
cx_write(ch->cmds_start + 0, 8);
- } else {
+ else
cx_write(ch->cmds_start + 0, risc);
- }
cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
cx_write(ch->cmds_start + 8, cdt);
cx_write(ch->cmds_start + 12, (lines * 16) >> 3);
cx_write(ch->cmds_start + 16, ch->ctrl_start);
- //IQ size
- if (ch->jumponly) {
+ /* IQ size */
+ if (ch->jumponly)
cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
- } else {
+ else
cx_write(ch->cmds_start + 20, 64 >> 2);
- }
- //zero out
+ /* zero out */
for (i = 24; i < 80; i += 4)
cx_write(ch->cmds_start + i, 0);
@@ -595,6 +597,7 @@ int cx25821_sram_channel_setup_audio(struct cx25821_dev *dev,
return 0;
}
+EXPORT_SYMBOL(cx25821_sram_channel_setup_audio);
void cx25821_sram_channel_dump(struct cx25821_dev *dev, struct sram_channel *ch)
{
@@ -658,6 +661,7 @@ void cx25821_sram_channel_dump(struct cx25821_dev *dev, struct sram_channel *ch)
printk(KERN_WARNING " : cnt2_reg: 0x%08x\n",
cx_read(ch->cnt2_reg));
}
+EXPORT_SYMBOL(cx25821_sram_channel_dump);
void cx25821_sram_channel_dump_audio(struct cx25821_dev *dev,
struct sram_channel *ch)
@@ -731,7 +735,7 @@ void cx25821_sram_channel_dump_audio(struct cx25821_dev *dev,
printk(KERN_WARNING "instruction %d = 0x%x\n", i, risc);
}
- //read data from the first cdt buffer
+ /* read data from the first cdt buffer */
risc = cx_read(AUD_A_CDT);
printk(KERN_WARNING "\nread cdt loc=0x%x\n", risc);
for (i = 0; i < 8; i++) {
@@ -741,31 +745,32 @@ void cx25821_sram_channel_dump_audio(struct cx25821_dev *dev,
printk(KERN_WARNING "\n\n");
value = cx_read(CLK_RST);
- CX25821_INFO(" CLK_RST = 0x%x \n\n", value);
+ CX25821_INFO(" CLK_RST = 0x%x\n\n", value);
value = cx_read(PLL_A_POST_STAT_BIST);
- CX25821_INFO(" PLL_A_POST_STAT_BIST = 0x%x \n\n", value);
+ CX25821_INFO(" PLL_A_POST_STAT_BIST = 0x%x\n\n", value);
value = cx_read(PLL_A_INT_FRAC);
- CX25821_INFO(" PLL_A_INT_FRAC = 0x%x \n\n", value);
+ CX25821_INFO(" PLL_A_INT_FRAC = 0x%x\n\n", value);
value = cx_read(PLL_B_POST_STAT_BIST);
- CX25821_INFO(" PLL_B_POST_STAT_BIST = 0x%x \n\n", value);
+ CX25821_INFO(" PLL_B_POST_STAT_BIST = 0x%x\n\n", value);
value = cx_read(PLL_B_INT_FRAC);
- CX25821_INFO(" PLL_B_INT_FRAC = 0x%x \n\n", value);
+ CX25821_INFO(" PLL_B_INT_FRAC = 0x%x\n\n", value);
value = cx_read(PLL_C_POST_STAT_BIST);
- CX25821_INFO(" PLL_C_POST_STAT_BIST = 0x%x \n\n", value);
+ CX25821_INFO(" PLL_C_POST_STAT_BIST = 0x%x\n\n", value);
value = cx_read(PLL_C_INT_FRAC);
- CX25821_INFO(" PLL_C_INT_FRAC = 0x%x \n\n", value);
+ CX25821_INFO(" PLL_C_INT_FRAC = 0x%x\n\n", value);
value = cx_read(PLL_D_POST_STAT_BIST);
- CX25821_INFO(" PLL_D_POST_STAT_BIST = 0x%x \n\n", value);
+ CX25821_INFO(" PLL_D_POST_STAT_BIST = 0x%x\n\n", value);
value = cx_read(PLL_D_INT_FRAC);
- CX25821_INFO(" PLL_D_INT_FRAC = 0x%x \n\n", value);
+ CX25821_INFO(" PLL_D_INT_FRAC = 0x%x\n\n", value);
value = cx25821_i2c_read(&dev->i2c_bus[0], AFE_AB_DIAG_CTRL, &tmp);
- CX25821_INFO(" AFE_AB_DIAG_CTRL (0x10900090) = 0x%x \n\n", value);
+ CX25821_INFO(" AFE_AB_DIAG_CTRL (0x10900090) = 0x%x\n\n", value);
}
+EXPORT_SYMBOL(cx25821_sram_channel_dump_audio);
static void cx25821_shutdown(struct cx25821_dev *dev)
{
@@ -835,8 +840,8 @@ static void cx25821_initialize(struct cx25821_dev *dev)
cx_write(AUD_E_INT_STAT, 0xffffffff);
cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
- cx_write(PAD_CTRL, 0x12); //for I2C
- cx25821_registers_init(dev); //init Pecos registers
+ cx_write(PAD_CTRL, 0x12); /* for I2C */
+ cx25821_registers_init(dev); /* init Pecos registers */
mdelay(100);
for (i = 0; i < VID_CHANNEL_NUM; i++) {
@@ -847,7 +852,7 @@ static void cx25821_initialize(struct cx25821_dev *dev)
dev->use_cif_resolution[i] = FALSE;
}
- //Probably only affect Downstream
+ /* Probably only affect Downstream */
for (i = VID_UPSTREAM_SRAM_CHANNEL_I; i <= VID_UPSTREAM_SRAM_CHANNEL_J;
i++) {
cx25821_set_vip_mode(dev, &dev->sram_channels[i]);
@@ -859,7 +864,7 @@ static void cx25821_initialize(struct cx25821_dev *dev)
cx25821_gpio_init(dev);
}
-static int get_resources(struct cx25821_dev *dev)
+static int cx25821_get_resources(struct cx25821_dev *dev)
{
if (request_mem_region
(pci_resource_start(dev->pci, 0), pci_resource_len(dev->pci, 0),
@@ -944,12 +949,11 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
dev->clk_freq = 28000000;
dev->sram_channels = cx25821_sram_channels;
- if (dev->nr > 1) {
+ if (dev->nr > 1)
CX25821_INFO("dev->nr > 1!");
- }
/* board config */
- dev->board = 1; //card[dev->nr];
+ dev->board = 1; /* card[dev->nr]; */
dev->_max_num_decoders = MAX_DECODERS;
dev->pci_bus = dev->pci->bus->number;
@@ -967,7 +971,7 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
dev->i2c_bus[0].i2c_period = (0x07 << 24); /* 1.95MHz */
- if (get_resources(dev) < 0) {
+ if (cx25821_get_resources(dev) < 0) {
printk(KERN_ERR "%s No more PCIe resources for "
"subsystem: %04x:%04x\n",
dev->name, dev->pci->subsystem_vendor,
@@ -1007,8 +1011,8 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
cx25821_initialize(dev);
cx25821_i2c_register(&dev->i2c_bus[0]);
-// cx25821_i2c_register(&dev->i2c_bus[1]);
-// cx25821_i2c_register(&dev->i2c_bus[2]);
+/* cx25821_i2c_register(&dev->i2c_bus[1]);
+ * cx25821_i2c_register(&dev->i2c_bus[2]); */
CX25821_INFO("i2c register! bus->i2c_rc = %d\n",
dev->i2c_bus[0].i2c_rc);
@@ -1026,7 +1030,7 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
for (i = VID_UPSTREAM_SRAM_CHANNEL_I;
i <= AUDIO_UPSTREAM_SRAM_CHANNEL_B; i++) {
- //Since we don't have template8 for Audio Downstream
+ /* Since we don't have template8 for Audio Downstream */
if (cx25821_video_register(dev, i, video_template[i - 1]) < 0) {
printk(KERN_ERR
"%s() Failed to register analog video adapters for Upstream channel %d.\n",
@@ -1034,7 +1038,7 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
}
}
- // register IOCTL device
+ /* register IOCTL device */
dev->ioctl_dev =
cx25821_vdev_init(dev, dev->pci, video_template[VIDEO_IOCTL_CH],
"video");
@@ -1112,6 +1116,7 @@ void cx25821_dev_unregister(struct cx25821_dev *dev)
cx25821_i2c_unregister(&dev->i2c_bus[0]);
cx25821_iounmap(dev);
}
+EXPORT_SYMBOL(cx25821_dev_unregister);
static __le32 *cx25821_risc_field(__le32 * rp, struct scatterlist *sglist,
unsigned int offset, u32 sync_line,
@@ -1122,9 +1127,8 @@ static __le32 *cx25821_risc_field(__le32 * rp, struct scatterlist *sglist,
unsigned int line, todo;
/* sync instruction */
- if (sync_line != NO_SYNC_LINE) {
+ if (sync_line != NO_SYNC_LINE)
*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
- }
/* scan lines */
sg = sglist;
@@ -1299,7 +1303,8 @@ int cx25821_risc_databuffer_audio(struct pci_dev *pci,
instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
instructions += 1;
- if ((rc = btcx_riscmem_alloc(pci, risc, instructions * 12)) < 0)
+ rc = btcx_riscmem_alloc(pci, risc, instructions * 12);
+ if (rc < 0)
return rc;
/* write risc instructions */
@@ -1312,6 +1317,7 @@ int cx25821_risc_databuffer_audio(struct pci_dev *pci,
BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
return 0;
}
+EXPORT_SYMBOL(cx25821_risc_databuffer_audio);
int cx25821_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
u32 reg, u32 mask, u32 value)
@@ -1375,7 +1381,7 @@ static irqreturn_t cx25821_irq(int irq, void *dev_id)
}
}
- out:
+out:
return IRQ_RETVAL(handled);
}
@@ -1399,12 +1405,14 @@ void cx25821_print_irqbits(char *name, char *tag, char **strings,
}
printk("\n");
}
+EXPORT_SYMBOL(cx25821_print_irqbits);
struct cx25821_dev *cx25821_dev_get(struct pci_dev *pci)
{
struct cx25821_dev *dev = pci_get_drvdata(pci);
return dev;
}
+EXPORT_SYMBOL(cx25821_dev_get);
static int __devinit cx25821_initdev(struct pci_dev *pci_dev,
const struct pci_device_id *pci_id)
@@ -1430,7 +1438,7 @@ static int __devinit cx25821_initdev(struct pci_dev *pci_dev,
goto fail_unregister_device;
}
- printk(KERN_INFO "cx25821 Athena pci enable ! \n");
+ printk(KERN_INFO "cx25821 Athena pci enable !\n");
if (cx25821_dev_setup(dev) < 0) {
err = -EINVAL;
@@ -1464,14 +1472,14 @@ static int __devinit cx25821_initdev(struct pci_dev *pci_dev,
return 0;
- fail_irq:
- printk(KERN_INFO "cx25821 cx25821_initdev() can't get IRQ ! \n");
+fail_irq:
+ printk(KERN_INFO "cx25821 cx25821_initdev() can't get IRQ !\n");
cx25821_dev_unregister(dev);
- fail_unregister_device:
+fail_unregister_device:
v4l2_device_unregister(&dev->v4l2_dev);
- fail_free:
+fail_free:
kfree(dev);
return err;
}
@@ -1536,16 +1544,6 @@ static void __exit cx25821_fini(void)
pci_unregister_driver(&cx25821_pci_driver);
}
-EXPORT_SYMBOL(cx25821_devlist);
-EXPORT_SYMBOL(cx25821_sram_channels);
-EXPORT_SYMBOL(cx25821_print_irqbits);
-EXPORT_SYMBOL(cx25821_dev_get);
-EXPORT_SYMBOL(cx25821_dev_unregister);
-EXPORT_SYMBOL(cx25821_sram_channel_setup);
-EXPORT_SYMBOL(cx25821_sram_channel_dump);
-EXPORT_SYMBOL(cx25821_sram_channel_setup_audio);
-EXPORT_SYMBOL(cx25821_sram_channel_dump_audio);
-EXPORT_SYMBOL(cx25821_risc_databuffer_audio);
EXPORT_SYMBOL(cx25821_set_gpiopin_direction);
module_init(cx25821_init);
diff --git a/drivers/staging/cx25821/cx25821-gpio.c b/drivers/staging/cx25821/cx25821-gpio.c
index e8a37b47e43..2f154b3658a 100644
--- a/drivers/staging/cx25821/cx25821-gpio.c
+++ b/drivers/staging/cx25821/cx25821-gpio.c
@@ -31,7 +31,7 @@ void cx25821_set_gpiopin_direction(struct cx25821_dev *dev,
u32 gpio_register = 0;
u32 value = 0;
- // Check for valid pinNumber
+ /* Check for valid pinNumber */
if (pin_number >= 47)
return;
@@ -39,14 +39,14 @@ void cx25821_set_gpiopin_direction(struct cx25821_dev *dev,
bit = pin_number - 31;
gpio_oe_reg = GPIO_HI_OE;
}
- // Here we will make sure that the GPIOs 0 and 1 are output. keep the rest as is
+ /* Here we will make sure that the GPIOs 0 and 1 are output. keep the
+ * rest as is */
gpio_register = cx_read(gpio_oe_reg);
- if (pin_logic_value == 1) {
+ if (pin_logic_value == 1)
value = gpio_register | Set_GPIO_Bit(bit);
- } else {
+ else
value = gpio_register & Clear_GPIO_Bit(bit);
- }
cx_write(gpio_oe_reg, value);
}
@@ -58,11 +58,12 @@ static void cx25821_set_gpiopin_logicvalue(struct cx25821_dev *dev,
u32 gpio_reg = GPIO_LO;
u32 value = 0;
- // Check for valid pinNumber
+ /* Check for valid pinNumber */
if (pin_number >= 47)
return;
- cx25821_set_gpiopin_direction(dev, pin_number, 0); // change to output direction
+ /* change to output direction */
+ cx25821_set_gpiopin_direction(dev, pin_number, 0);
if (pin_number > 31) {
bit = pin_number - 31;
@@ -71,25 +72,23 @@ static void cx25821_set_gpiopin_logicvalue(struct cx25821_dev *dev,
value = cx_read(gpio_reg);
- if (pin_logic_value == 0) {
+ if (pin_logic_value == 0)
value &= Clear_GPIO_Bit(bit);
- } else {
+ else
value |= Set_GPIO_Bit(bit);
- }
cx_write(gpio_reg, value);
}
void cx25821_gpio_init(struct cx25821_dev *dev)
{
- if (dev == NULL) {
+ if (dev == NULL)
return;
- }
switch (dev->board) {
case CX25821_BOARD_CONEXANT_ATHENA10:
default:
- //set GPIO 5 to select the path for Medusa/Athena
+ /* set GPIO 5 to select the path for Medusa/Athena */
cx25821_set_gpiopin_logicvalue(dev, 5, 1);
mdelay(20);
break;
diff --git a/drivers/staging/cx25821/cx25821-i2c.c b/drivers/staging/cx25821/cx25821-i2c.c
index f4f2681d8f1..08f45b52df6 100644
--- a/drivers/staging/cx25821/cx25821-i2c.c
+++ b/drivers/staging/cx25821/cx25821-i2c.c
@@ -28,7 +28,7 @@ static unsigned int i2c_debug;
module_param(i2c_debug, int, 0644);
MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]");
-static unsigned int i2c_scan = 0;
+static unsigned int i2c_scan;
module_param(i2c_scan, int, 0444);
MODULE_PARM_DESC(i2c_scan, "scan i2c bus at insmod time");
@@ -159,9 +159,9 @@ static int i2c_sendbytes(struct i2c_adapter *i2c_adap,
return msg->len;
- eio:
+eio:
retval = -EIO;
- err:
+err:
if (i2c_debug)
printk(KERN_ERR " ERR: %d\n", retval);
return retval;
@@ -223,9 +223,9 @@ static int i2c_readbytes(struct i2c_adapter *i2c_adap,
}
return msg->len;
- eio:
+eio:
retval = -EIO;
- err:
+err:
if (i2c_debug)
printk(KERN_ERR " ERR: %d\n", retval);
return retval;
@@ -266,7 +266,7 @@ static int i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num)
}
return num;
- err:
+err:
return retval;
}
@@ -319,7 +319,7 @@ int cx25821_i2c_register(struct cx25821_i2c *bus)
bus->i2c_client.adapter = &bus->i2c_adap;
- //set up the I2c
+ /* set up the I2c */
bus->i2c_client.addr = (0x88 >> 1);
return bus->i2c_rc;
diff --git a/drivers/staging/cx25821/cx25821-medusa-video.c b/drivers/staging/cx25821/cx25821-medusa-video.c
index d6016200d69..34616dc507f 100644
--- a/drivers/staging/cx25821/cx25821-medusa-video.c
+++ b/drivers/staging/cx25821/cx25821-medusa-video.c
@@ -24,11 +24,12 @@
#include "cx25821-medusa-video.h"
#include "cx25821-biffuncs.h"
-/////////////////////////////////////////////////////////////////////////////////////////
-//medusa_enable_bluefield_output()
-//
-// Enable the generation of blue filed output if no video
-//
+/*
+ * medusa_enable_bluefield_output()
+ *
+ * Enable the generation of blue filed output if no video
+ *
+ */
static void medusa_enable_bluefield_output(struct cx25821_dev *dev, int channel,
int enable)
{
@@ -73,15 +74,15 @@ static void medusa_enable_bluefield_output(struct cx25821_dev *dev, int channel,
}
value = cx25821_i2c_read(&dev->i2c_bus[0], out_ctrl, &tmp);
- value &= 0xFFFFFF7F; // clear BLUE_FIELD_EN
+ value &= 0xFFFFFF7F; /* clear BLUE_FIELD_EN */
if (enable)
- value |= 0x00000080; // set BLUE_FIELD_EN
+ value |= 0x00000080; /* set BLUE_FIELD_EN */
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], out_ctrl, value);
value = cx25821_i2c_read(&dev->i2c_bus[0], out_ctrl_ns, &tmp);
value &= 0xFFFFFF7F;
if (enable)
- value |= 0x00000080; // set BLUE_FIELD_EN
+ value |= 0x00000080; /* set BLUE_FIELD_EN */
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], out_ctrl_ns, value);
}
@@ -95,17 +96,18 @@ static int medusa_initialize_ntsc(struct cx25821_dev *dev)
mutex_lock(&dev->lock);
for (i = 0; i < MAX_DECODERS; i++) {
- // set video format NTSC-M
+ /* set video format NTSC-M */
value =
cx25821_i2c_read(&dev->i2c_bus[0], MODE_CTRL + (0x200 * i),
&tmp);
value &= 0xFFFFFFF0;
- value |= 0x10001; // enable the fast locking mode bit[16]
+ /* enable the fast locking mode bit[16] */
+ value |= 0x10001;
ret_val =
cx25821_i2c_write(&dev->i2c_bus[0], MODE_CTRL + (0x200 * i),
value);
- // resolution NTSC 720x480
+ /* resolution NTSC 720x480 */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
HORIZ_TIM_CTRL + (0x200 * i), &tmp);
@@ -119,17 +121,17 @@ static int medusa_initialize_ntsc(struct cx25821_dev *dev)
cx25821_i2c_read(&dev->i2c_bus[0],
VERT_TIM_CTRL + (0x200 * i), &tmp);
value &= 0x00C00C00;
- value |= 0x1C1E001A; // vblank_cnt + 2 to get camera ID
+ value |= 0x1C1E001A; /* vblank_cnt + 2 to get camera ID */
ret_val =
cx25821_i2c_write(&dev->i2c_bus[0],
VERT_TIM_CTRL + (0x200 * i), value);
- // chroma subcarrier step size
+ /* chroma subcarrier step size */
ret_val =
cx25821_i2c_write(&dev->i2c_bus[0],
SC_STEP_SIZE + (0x200 * i), 0x43E00000);
- // enable VIP optional active
+ /* enable VIP optional active */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
OUT_CTRL_NS + (0x200 * i), &tmp);
@@ -139,7 +141,7 @@ static int medusa_initialize_ntsc(struct cx25821_dev *dev)
cx25821_i2c_write(&dev->i2c_bus[0],
OUT_CTRL_NS + (0x200 * i), value);
- // enable VIP optional active (VIP_OPT_AL) for direct output.
+ /* enable VIP optional active (VIP_OPT_AL) for direct output. */
value =
cx25821_i2c_read(&dev->i2c_bus[0], OUT_CTRL1 + (0x200 * i),
&tmp);
@@ -149,19 +151,21 @@ static int medusa_initialize_ntsc(struct cx25821_dev *dev)
cx25821_i2c_write(&dev->i2c_bus[0], OUT_CTRL1 + (0x200 * i),
value);
- // clear VPRES_VERT_EN bit, fixes the chroma run away problem
- // when the input switching rate < 16 fields
- //
+ /*
+ * clear VPRES_VERT_EN bit, fixes the chroma run away problem
+ * when the input switching rate < 16 fields
+ */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
MISC_TIM_CTRL + (0x200 * i), &tmp);
- value = setBitAtPos(value, 14); // disable special play detection
+ /* disable special play detection */
+ value = setBitAtPos(value, 14);
value = clearBitAtPos(value, 15);
ret_val =
cx25821_i2c_write(&dev->i2c_bus[0],
MISC_TIM_CTRL + (0x200 * i), value);
- // set vbi_gate_en to 0
+ /* set vbi_gate_en to 0 */
value =
cx25821_i2c_read(&dev->i2c_bus[0], DFE_CTRL1 + (0x200 * i),
&tmp);
@@ -170,12 +174,12 @@ static int medusa_initialize_ntsc(struct cx25821_dev *dev)
cx25821_i2c_write(&dev->i2c_bus[0], DFE_CTRL1 + (0x200 * i),
value);
- // Enable the generation of blue field output if no video
+ /* Enable the generation of blue field output if no video */
medusa_enable_bluefield_output(dev, i, 1);
}
for (i = 0; i < MAX_ENCODERS; i++) {
- // NTSC hclock
+ /* NTSC hclock */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
DENC_A_REG_1 + (0x100 * i), &tmp);
@@ -185,7 +189,7 @@ static int medusa_initialize_ntsc(struct cx25821_dev *dev)
cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_1 + (0x100 * i), value);
- // burst begin and burst end
+ /* burst begin and burst end */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
DENC_A_REG_2 + (0x100 * i), &tmp);
@@ -204,7 +208,7 @@ static int medusa_initialize_ntsc(struct cx25821_dev *dev)
cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_3 + (0x100 * i), value);
- // set NTSC vblank, no phase alternation, 7.5 IRE pedestal
+ /* set NTSC vblank, no phase alternation, 7.5 IRE pedestal */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
DENC_A_REG_4 + (0x100 * i), &tmp);
@@ -227,17 +231,19 @@ static int medusa_initialize_ntsc(struct cx25821_dev *dev)
cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_6 + (0x100 * i), 0x009A89C1);
- // Subcarrier Increment
+ /* Subcarrier Increment */
ret_val =
cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_7 + (0x100 * i), 0x21F07C1F);
}
- //set picture resolutions
- ret_val = cx25821_i2c_write(&dev->i2c_bus[0], HSCALE_CTRL, 0x0); //0 - 720
- ret_val = cx25821_i2c_write(&dev->i2c_bus[0], VSCALE_CTRL, 0x0); //0 - 480
+ /* set picture resolutions */
+ /* 0 - 720 */
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0], HSCALE_CTRL, 0x0);
+ /* 0 - 480 */
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0], VSCALE_CTRL, 0x0);
- // set Bypass input format to NTSC 525 lines
+ /* set Bypass input format to NTSC 525 lines */
value = cx25821_i2c_read(&dev->i2c_bus[0], BYP_AB_CTRL, &tmp);
value |= 0x00080200;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], BYP_AB_CTRL, value);
@@ -252,7 +258,7 @@ static int medusa_PALCombInit(struct cx25821_dev *dev, int dec)
int ret_val = -1;
u32 value = 0, tmp = 0;
- // Setup for 2D threshold
+ /* Setup for 2D threshold */
ret_val =
cx25821_i2c_write(&dev->i2c_bus[0], COMB_2D_HFS_CFG + (0x200 * dec),
0x20002861);
@@ -263,7 +269,7 @@ static int medusa_PALCombInit(struct cx25821_dev *dev, int dec)
cx25821_i2c_write(&dev->i2c_bus[0], COMB_2D_LF_CFG + (0x200 * dec),
0x200A1023);
- // Setup flat chroma and luma thresholds
+ /* Setup flat chroma and luma thresholds */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
COMB_FLAT_THRESH_CTRL + (0x200 * dec), &tmp);
@@ -272,12 +278,12 @@ static int medusa_PALCombInit(struct cx25821_dev *dev, int dec)
cx25821_i2c_write(&dev->i2c_bus[0],
COMB_FLAT_THRESH_CTRL + (0x200 * dec), value);
- // set comb 2D blend
+ /* set comb 2D blend */
ret_val =
cx25821_i2c_write(&dev->i2c_bus[0], COMB_2D_BLEND + (0x200 * dec),
0x210F0F0F);
- // COMB MISC CONTROL
+ /* COMB MISC CONTROL */
ret_val =
cx25821_i2c_write(&dev->i2c_bus[0], COMB_MISC_CTRL + (0x200 * dec),
0x41120A7F);
@@ -295,17 +301,18 @@ static int medusa_initialize_pal(struct cx25821_dev *dev)
mutex_lock(&dev->lock);
for (i = 0; i < MAX_DECODERS; i++) {
- // set video format PAL-BDGHI
+ /* set video format PAL-BDGHI */
value =
cx25821_i2c_read(&dev->i2c_bus[0], MODE_CTRL + (0x200 * i),
&tmp);
value &= 0xFFFFFFF0;
- value |= 0x10004; // enable the fast locking mode bit[16]
+ /* enable the fast locking mode bit[16] */
+ value |= 0x10004;
ret_val =
cx25821_i2c_write(&dev->i2c_bus[0], MODE_CTRL + (0x200 * i),
value);
- // resolution PAL 720x576
+ /* resolution PAL 720x576 */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
HORIZ_TIM_CTRL + (0x200 * i), &tmp);
@@ -315,22 +322,22 @@ static int medusa_initialize_pal(struct cx25821_dev *dev)
cx25821_i2c_write(&dev->i2c_bus[0],
HORIZ_TIM_CTRL + (0x200 * i), value);
- // vblank656_cnt=x26, vactive_cnt=240h, vblank_cnt=x24
+ /* vblank656_cnt=x26, vactive_cnt=240h, vblank_cnt=x24 */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
VERT_TIM_CTRL + (0x200 * i), &tmp);
value &= 0x00C00C00;
- value |= 0x28240026; // vblank_cnt + 2 to get camera ID
+ value |= 0x28240026; /* vblank_cnt + 2 to get camera ID */
ret_val =
cx25821_i2c_write(&dev->i2c_bus[0],
VERT_TIM_CTRL + (0x200 * i), value);
- // chroma subcarrier step size
+ /* chroma subcarrier step size */
ret_val =
cx25821_i2c_write(&dev->i2c_bus[0],
SC_STEP_SIZE + (0x200 * i), 0x5411E2D0);
- // enable VIP optional active
+ /* enable VIP optional active */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
OUT_CTRL_NS + (0x200 * i), &tmp);
@@ -340,7 +347,7 @@ static int medusa_initialize_pal(struct cx25821_dev *dev)
cx25821_i2c_write(&dev->i2c_bus[0],
OUT_CTRL_NS + (0x200 * i), value);
- // enable VIP optional active (VIP_OPT_AL) for direct output.
+ /* enable VIP optional active (VIP_OPT_AL) for direct output. */
value =
cx25821_i2c_read(&dev->i2c_bus[0], OUT_CTRL1 + (0x200 * i),
&tmp);
@@ -350,18 +357,21 @@ static int medusa_initialize_pal(struct cx25821_dev *dev)
cx25821_i2c_write(&dev->i2c_bus[0], OUT_CTRL1 + (0x200 * i),
value);
- // clear VPRES_VERT_EN bit, fixes the chroma run away problem
- // when the input switching rate < 16 fields
+ /*
+ * clear VPRES_VERT_EN bit, fixes the chroma run away problem
+ * when the input switching rate < 16 fields
+ */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
MISC_TIM_CTRL + (0x200 * i), &tmp);
- value = setBitAtPos(value, 14); // disable special play detection
+ /* disable special play detection */
+ value = setBitAtPos(value, 14);
value = clearBitAtPos(value, 15);
ret_val =
cx25821_i2c_write(&dev->i2c_bus[0],
MISC_TIM_CTRL + (0x200 * i), value);
- // set vbi_gate_en to 0
+ /* set vbi_gate_en to 0 */
value =
cx25821_i2c_read(&dev->i2c_bus[0], DFE_CTRL1 + (0x200 * i),
&tmp);
@@ -372,12 +382,12 @@ static int medusa_initialize_pal(struct cx25821_dev *dev)
medusa_PALCombInit(dev, i);
- // Enable the generation of blue field output if no video
+ /* Enable the generation of blue field output if no video */
medusa_enable_bluefield_output(dev, i, 1);
}
for (i = 0; i < MAX_ENCODERS; i++) {
- // PAL hclock
+ /* PAL hclock */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
DENC_A_REG_1 + (0x100 * i), &tmp);
@@ -387,7 +397,7 @@ static int medusa_initialize_pal(struct cx25821_dev *dev)
cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_1 + (0x100 * i), value);
- // burst begin and burst end
+ /* burst begin and burst end */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
DENC_A_REG_2 + (0x100 * i), &tmp);
@@ -397,7 +407,7 @@ static int medusa_initialize_pal(struct cx25821_dev *dev)
cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_2 + (0x100 * i), value);
- // hblank and vactive
+ /* hblank and vactive */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
DENC_A_REG_3 + (0x100 * i), &tmp);
@@ -407,7 +417,7 @@ static int medusa_initialize_pal(struct cx25821_dev *dev)
cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_3 + (0x100 * i), value);
- // set PAL vblank, phase alternation, 0 IRE pedestal
+ /* set PAL vblank, phase alternation, 0 IRE pedestal */
value =
cx25821_i2c_read(&dev->i2c_bus[0],
DENC_A_REG_4 + (0x100 * i), &tmp);
@@ -430,17 +440,19 @@ static int medusa_initialize_pal(struct cx25821_dev *dev)
cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_6 + (0x100 * i), 0x00A493CF);
- // Subcarrier Increment
+ /* Subcarrier Increment */
ret_val =
cx25821_i2c_write(&dev->i2c_bus[0],
DENC_A_REG_7 + (0x100 * i), 0x2A098ACB);
}
- //set picture resolutions
- ret_val = cx25821_i2c_write(&dev->i2c_bus[0], HSCALE_CTRL, 0x0); //0 - 720
- ret_val = cx25821_i2c_write(&dev->i2c_bus[0], VSCALE_CTRL, 0x0); //0 - 576
+ /* set picture resolutions */
+ /* 0 - 720 */
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0], HSCALE_CTRL, 0x0);
+ /* 0 - 576 */
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0], VSCALE_CTRL, 0x0);
- // set Bypass input format to PAL 625 lines
+ /* set Bypass input format to PAL 625 lines */
value = cx25821_i2c_read(&dev->i2c_bus[0], BYP_AB_CTRL, &tmp);
value &= 0xFFF7FDFF;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], BYP_AB_CTRL, value);
@@ -455,18 +467,17 @@ int medusa_set_videostandard(struct cx25821_dev *dev)
int status = STATUS_SUCCESS;
u32 value = 0, tmp = 0;
- if (dev->tvnorm & V4L2_STD_PAL_BG || dev->tvnorm & V4L2_STD_PAL_DK) {
+ if (dev->tvnorm & V4L2_STD_PAL_BG || dev->tvnorm & V4L2_STD_PAL_DK)
status = medusa_initialize_pal(dev);
- } else {
+ else
status = medusa_initialize_ntsc(dev);
- }
- // Enable DENC_A output
+ /* Enable DENC_A output */
value = cx25821_i2c_read(&dev->i2c_bus[0], DENC_A_REG_4, &tmp);
value = setBitAtPos(value, 4);
status = cx25821_i2c_write(&dev->i2c_bus[0], DENC_A_REG_4, value);
- // Enable DENC_B output
+ /* Enable DENC_B output */
value = cx25821_i2c_read(&dev->i2c_bus[0], DENC_B_REG_4, &tmp);
value = setBitAtPos(value, 4);
status = cx25821_i2c_write(&dev->i2c_bus[0], DENC_B_REG_4, value);
@@ -486,10 +497,10 @@ void medusa_set_resolution(struct cx25821_dev *dev, int width,
mutex_lock(&dev->lock);
- // validate the width - cannot be negative
+ /* validate the width - cannot be negative */
if (width > MAX_WIDTH) {
printk
- ("cx25821 %s() : width %d > MAX_WIDTH %d ! resetting to MAX_WIDTH \n",
+ ("cx25821 %s() : width %d > MAX_WIDTH %d ! resetting to MAX_WIDTH\n",
__func__, width, MAX_WIDTH);
width = MAX_WIDTH;
}
@@ -523,14 +534,14 @@ void medusa_set_resolution(struct cx25821_dev *dev, int width,
vscale = 0x1E00;
break;
- default: //720
+ default: /* 720 */
hscale = 0x0;
vscale = 0x0;
break;
}
for (; decoder < decoder_count; decoder++) {
- // write scaling values for each decoder
+ /* write scaling values for each decoder */
ret_val =
cx25821_i2c_write(&dev->i2c_bus[0],
HSCALE_CTRL + (0x200 * decoder), hscale);
@@ -552,7 +563,7 @@ static void medusa_set_decoderduration(struct cx25821_dev *dev, int decoder,
mutex_lock(&dev->lock);
- // no support
+ /* no support */
if (decoder < VDEC_A && decoder > VDEC_H) {
mutex_unlock(&dev->lock);
return;
@@ -577,11 +588,10 @@ static void medusa_set_decoderduration(struct cx25821_dev *dev, int decoder,
_display_field_cnt[decoder] = duration;
- // update hardware
+ /* update hardware */
fld_cnt = cx25821_i2c_read(&dev->i2c_bus[0], disp_cnt_reg, &tmp);
- if (!(decoder % 2)) // EVEN decoder
- {
+ if (!(decoder % 2)) { /* EVEN decoder */
fld_cnt &= 0xFFFF0000;
fld_cnt |= duration;
} else {
@@ -594,8 +604,7 @@ static void medusa_set_decoderduration(struct cx25821_dev *dev, int decoder,
mutex_unlock(&dev->lock);
}
-/////////////////////////////////////////////////////////////////////////////////////////
-// Map to Medusa register setting
+/* Map to Medusa register setting */
static int mapM(int srcMin,
int srcMax, int srcVal, int dstMin, int dstMax, int *dstVal)
{
@@ -603,20 +612,21 @@ static int mapM(int srcMin,
int denominator;
int quotient;
- if ((srcMin == srcMax) || (srcVal < srcMin) || (srcVal > srcMax)) {
+ if ((srcMin == srcMax) || (srcVal < srcMin) || (srcVal > srcMax))
return -1;
- }
- // This is the overall expression used:
- // *dstVal = (srcVal - srcMin)*(dstMax - dstMin) / (srcMax - srcMin) + dstMin;
- // but we need to account for rounding so below we use the modulus
- // operator to find the remainder and increment if necessary.
+ /*
+ * This is the overall expression used:
+ * *dstVal =
+ * (srcVal - srcMin)*(dstMax - dstMin) / (srcMax - srcMin) + dstMin;
+ * but we need to account for rounding so below we use the modulus
+ * operator to find the remainder and increment if necessary.
+ */
numerator = (srcVal - srcMin) * (dstMax - dstMin);
denominator = srcMax - srcMin;
quotient = numerator / denominator;
- if (2 * (numerator % denominator) >= denominator) {
+ if (2 * (numerator % denominator) >= denominator)
quotient++;
- }
*dstVal = quotient + dstMin;
@@ -636,7 +646,6 @@ static unsigned long convert_to_twos(long numeric, unsigned long bits_len)
}
}
-/////////////////////////////////////////////////////////////////////////////////////////
int medusa_set_brightness(struct cx25821_dev *dev, int brightness, int decoder)
{
int ret_val = 0;
@@ -665,7 +674,6 @@ int medusa_set_brightness(struct cx25821_dev *dev, int brightness, int decoder)
return ret_val;
}
-/////////////////////////////////////////////////////////////////////////////////////////
int medusa_set_contrast(struct cx25821_dev *dev, int contrast, int decoder)
{
int ret_val = 0;
@@ -695,7 +703,6 @@ int medusa_set_contrast(struct cx25821_dev *dev, int contrast, int decoder)
return ret_val;
}
-/////////////////////////////////////////////////////////////////////////////////////////
int medusa_set_hue(struct cx25821_dev *dev, int hue, int decoder)
{
int ret_val = 0;
@@ -727,7 +734,6 @@ int medusa_set_hue(struct cx25821_dev *dev, int hue, int decoder)
return ret_val;
}
-/////////////////////////////////////////////////////////////////////////////////////////
int medusa_set_saturation(struct cx25821_dev *dev, int saturation, int decoder)
{
int ret_val = 0;
@@ -768,98 +774,90 @@ int medusa_set_saturation(struct cx25821_dev *dev, int saturation, int decoder)
return ret_val;
}
-/////////////////////////////////////////////////////////////////////////////////////////
-// Program the display sequence and monitor output.
-//
+/* Program the display sequence and monitor output. */
+
int medusa_video_init(struct cx25821_dev *dev)
{
- u32 value = 0, tmp = 0;
- int ret_val = 0;
- int i = 0;
+ u32 value, tmp = 0;
+ int ret_val;
+ int i;
mutex_lock(&dev->lock);
_num_decoders = dev->_max_num_decoders;
- // disable Auto source selection on all video decoders
+ /* disable Auto source selection on all video decoders */
value = cx25821_i2c_read(&dev->i2c_bus[0], MON_A_CTRL, &tmp);
value &= 0xFFFFF0FF;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], MON_A_CTRL, value);
+ if (ret_val < 0)
+ goto error;
- if (ret_val < 0) {
- mutex_unlock(&dev->lock);
- return -EINVAL;
- }
- // Turn off Master source switch enable
+ /* Turn off Master source switch enable */
value = cx25821_i2c_read(&dev->i2c_bus[0], MON_A_CTRL, &tmp);
value &= 0xFFFFFFDF;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], MON_A_CTRL, value);
-
if (ret_val < 0)
- return -EINVAL;
+ goto error;
mutex_unlock(&dev->lock);
- for (i = 0; i < _num_decoders; i++) {
+ for (i = 0; i < _num_decoders; i++)
medusa_set_decoderduration(dev, i, _display_field_cnt[i]);
- }
mutex_lock(&dev->lock);
- // Select monitor as DENC A input, power up the DAC
+ /* Select monitor as DENC A input, power up the DAC */
value = cx25821_i2c_read(&dev->i2c_bus[0], DENC_AB_CTRL, &tmp);
value &= 0xFF70FF70;
- value |= 0x00090008; // set en_active
+ value |= 0x00090008; /* set en_active */
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], DENC_AB_CTRL, value);
+ if (ret_val < 0)
+ goto error;
- if (ret_val < 0) {
- mutex_unlock(&dev->lock);
- return -EINVAL;
- }
- // enable input is VIP/656
+ /* enable input is VIP/656 */
value = cx25821_i2c_read(&dev->i2c_bus[0], BYP_AB_CTRL, &tmp);
- value |= 0x00040100; // enable VIP
+ value |= 0x00040100; /* enable VIP */
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], BYP_AB_CTRL, value);
- if (ret_val < 0) {
- mutex_unlock(&dev->lock);
- return -EINVAL;
- }
- // select AFE clock to output mode
+ if (ret_val < 0)
+ goto error;
+
+ /* select AFE clock to output mode */
value = cx25821_i2c_read(&dev->i2c_bus[0], AFE_AB_DIAG_CTRL, &tmp);
value &= 0x83FFFFFF;
- ret_val =
- cx25821_i2c_write(&dev->i2c_bus[0], AFE_AB_DIAG_CTRL,
- value | 0x10000000);
+ ret_val = cx25821_i2c_write(&dev->i2c_bus[0], AFE_AB_DIAG_CTRL,
+ value | 0x10000000);
+ if (ret_val < 0)
+ goto error;
- if (ret_val < 0) {
- mutex_unlock(&dev->lock);
- return -EINVAL;
- }
- // Turn on all of the data out and control output pins.
+ /* Turn on all of the data out and control output pins. */
value = cx25821_i2c_read(&dev->i2c_bus[0], PIN_OE_CTRL, &tmp);
value &= 0xFEF0FE00;
if (_num_decoders == MAX_DECODERS) {
- // Note: The octal board does not support control pins(bit16-19).
- // These bits are ignored in the octal board.
- value |= 0x010001F8; // disable VDEC A-C port, default to Mobilygen Interface
+ /*
+ * Note: The octal board does not support control pins(bit16-19)
+ * These bits are ignored in the octal board.
+ *
+ * disable VDEC A-C port, default to Mobilygen Interface
+ */
+ value |= 0x010001F8;
} else {
- value |= 0x010F0108; // disable VDEC A-C port, default to Mobilygen Interface
+ /* disable VDEC A-C port, default to Mobilygen Interface */
+ value |= 0x010F0108;
}
value |= 7;
ret_val = cx25821_i2c_write(&dev->i2c_bus[0], PIN_OE_CTRL, value);
- if (ret_val < 0) {
- mutex_unlock(&dev->lock);
- return -EINVAL;
- }
+ if (ret_val < 0)
+ goto error;
mutex_unlock(&dev->lock);
ret_val = medusa_set_videostandard(dev);
+ return ret_val;
- if (ret_val < 0)
- return -EINVAL;
-
- return 1;
+error:
+ mutex_unlock(&dev->lock);
+ return ret_val;
}
diff --git a/drivers/staging/cx25821/cx25821-video-upstream-ch2.c b/drivers/staging/cx25821/cx25821-video-upstream-ch2.c
index cc51618cffa..343df6619fe 100644
--- a/drivers/staging/cx25821/cx25821-video-upstream-ch2.c
+++ b/drivers/staging/cx25821/cx25821-video-upstream-ch2.c
@@ -769,8 +769,7 @@ int cx25821_vidupstream_init_ch2(struct cx25821_dev *dev, int channel_select,
if (dev->input_filename_ch2) {
str_length = strlen(dev->input_filename_ch2);
- dev->_filename_ch2 =
- (char *)kmalloc(str_length + 1, GFP_KERNEL);
+ dev->_filename_ch2 = kmalloc(str_length + 1, GFP_KERNEL);
if (!dev->_filename_ch2)
goto error;
@@ -779,8 +778,7 @@ int cx25821_vidupstream_init_ch2(struct cx25821_dev *dev, int channel_select,
str_length + 1);
} else {
str_length = strlen(dev->_defaultname_ch2);
- dev->_filename_ch2 =
- (char *)kmalloc(str_length + 1, GFP_KERNEL);
+ dev->_filename_ch2 = kmalloc(str_length + 1, GFP_KERNEL);
if (!dev->_filename_ch2)
goto error;
diff --git a/drivers/staging/cx25821/cx25821-video-upstream.c b/drivers/staging/cx25821/cx25821-video-upstream.c
index 6d48a1e26d1..7a3dad91eba 100644
--- a/drivers/staging/cx25821/cx25821-video-upstream.c
+++ b/drivers/staging/cx25821/cx25821-video-upstream.c
@@ -32,7 +32,7 @@
#include <linux/file.h>
#include <linux/fcntl.h>
#include <linux/slab.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
MODULE_DESCRIPTION("v4l2 driver module for cx25821 based TV cards");
MODULE_AUTHOR("Hiep Huynh <hiep.huynh@conexant.com>");
@@ -60,9 +60,8 @@ int cx25821_sram_channel_setup_upstream(struct cx25821_dev *dev,
cdt = ch->cdt;
lines = ch->fifo_size / bpl;
- if (lines > 4) {
+ if (lines > 4)
lines = 4;
- }
BUG_ON(lines < 2);
@@ -97,7 +96,7 @@ int cx25821_sram_channel_setup_upstream(struct cx25821_dev *dev,
}
static __le32 *cx25821_update_riscprogram(struct cx25821_dev *dev,
- __le32 * rp, unsigned int offset,
+ __le32 *rp, unsigned int offset,
unsigned int bpl, u32 sync_line,
unsigned int lines, int fifo_enable,
int field_type)
@@ -108,9 +107,8 @@ static __le32 *cx25821_update_riscprogram(struct cx25821_dev *dev,
*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
if (USE_RISC_NOOP_VIDEO) {
- for (i = 0; i < NUM_NO_OPS; i++) {
+ for (i = 0; i < NUM_NO_OPS; i++)
*(rp++) = cpu_to_le32(RISC_NOOP);
- }
}
/* scan lines */
@@ -140,14 +138,12 @@ static __le32 *cx25821_risc_field_upstream(struct cx25821_dev *dev, __le32 * rp,
int dist_betwn_starts = bpl * 2;
/* sync instruction */
- if (sync_line != NO_SYNC_LINE) {
+ if (sync_line != NO_SYNC_LINE)
*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
- }
if (USE_RISC_NOOP_VIDEO) {
- for (i = 0; i < NUM_NO_OPS; i++) {
+ for (i = 0; i < NUM_NO_OPS; i++)
*(rp++) = cpu_to_le32(RISC_NOOP);
- }
}
/* scan lines */
@@ -157,12 +153,13 @@ static __le32 *cx25821_risc_field_upstream(struct cx25821_dev *dev, __le32 * rp,
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
if ((lines <= NTSC_FIELD_HEIGHT)
- || (line < (NTSC_FIELD_HEIGHT - 1)) || !(dev->_isNTSC)) {
- offset += dist_betwn_starts; //to skip the other field line
- }
+ || (line < (NTSC_FIELD_HEIGHT - 1)) || !(dev->_isNTSC))
+ /* to skip the other field line */
+ offset += dist_betwn_starts;
- // check if we need to enable the FIFO after the first 4 lines
- // For the upstream video channel, the risc engine will enable the FIFO.
+ /* check if we need to enable the FIFO after the first 4 lines
+ * For the upstream video channel, the risc engine will enable
+ * the FIFO. */
if (fifo_enable && line == 3) {
*(rp++) = RISC_WRITECR;
*(rp++) = sram_ch->dma_ctl;
@@ -181,7 +178,8 @@ int cx25821_risc_buffer_upstream(struct cx25821_dev *dev,
{
__le32 *rp;
int fifo_enable = 0;
- int singlefield_lines = lines >> 1; //get line count for single field
+ /* get line count for single field */
+ int singlefield_lines = lines >> 1;
int odd_num_lines = singlefield_lines;
int frame = 0;
int frame_size = 0;
@@ -225,7 +223,7 @@ int cx25821_risc_buffer_upstream(struct cx25821_dev *dev,
fifo_enable = FIFO_DISABLE;
- //Even Field
+ /* Even Field */
rp = cx25821_risc_field_upstream(dev, rp,
dev->_data_buf_phys_addr +
databuf_offset, bottom_offset,
@@ -241,7 +239,9 @@ int cx25821_risc_buffer_upstream(struct cx25821_dev *dev,
risc_flag = RISC_CNT_INC;
}
- // Loop to 2ndFrameRISC or to Start of Risc program & generate IRQ
+ /* Loop to 2ndFrameRISC or to Start of Risc
+ * program & generate IRQ
+ */
*(rp++) = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | risc_flag);
*(rp++) = cpu_to_le32(risc_phys_jump_addr);
*(rp++) = cpu_to_le32(0);
@@ -258,18 +258,18 @@ void cx25821_stop_upstream_video_ch1(struct cx25821_dev *dev)
if (!dev->_is_running) {
printk
- ("cx25821: No video file is currently running so return!\n");
+ (KERN_INFO "cx25821: No video file is currently running so return!\n");
return;
}
- //Disable RISC interrupts
+ /* Disable RISC interrupts */
tmp = cx_read(sram_ch->int_msk);
cx_write(sram_ch->int_msk, tmp & ~_intr_msk);
- //Turn OFF risc and fifo enable
+ /* Turn OFF risc and fifo enable */
tmp = cx_read(sram_ch->dma_ctl);
cx_write(sram_ch->dma_ctl, tmp & ~(FLD_VID_FIFO_EN | FLD_VID_RISC_EN));
- //Clear data buffer memory
+ /* Clear data buffer memory */
if (dev->_data_buf_virt_addr)
memset(dev->_data_buf_virt_addr, 0, dev->_data_buf_size);
@@ -292,9 +292,8 @@ void cx25821_stop_upstream_video_ch1(struct cx25821_dev *dev)
void cx25821_free_mem_upstream_ch1(struct cx25821_dev *dev)
{
- if (dev->_is_running) {
+ if (dev->_is_running)
cx25821_stop_upstream_video_ch1(dev);
- }
if (dev->_dma_virt_addr) {
pci_free_consistent(dev->pci, dev->_risc_size,
@@ -347,19 +346,19 @@ int cx25821_get_frame(struct cx25821_dev *dev, struct sram_channel *sram_ch)
if (IS_ERR(myfile)) {
const int open_errno = -PTR_ERR(myfile);
- printk("%s(): ERROR opening file(%s) with errno = %d! \n",
+ printk(KERN_ERR "%s(): ERROR opening file(%s) with errno = %d!\n",
__func__, dev->_filename, open_errno);
return PTR_ERR(myfile);
} else {
if (!(myfile->f_op)) {
- printk("%s: File has no file operations registered!",
+ printk(KERN_ERR "%s: File has no file operations registered!",
__func__);
filp_close(myfile, NULL);
return -EIO;
}
if (!myfile->f_op->read) {
- printk("%s: File has no READ operations registered!",
+ printk(KERN_ERR "%s: File has no READ operations registered!",
__func__);
filp_close(myfile, NULL);
return -EIO;
@@ -412,7 +411,7 @@ static void cx25821_vidups_handler(struct work_struct *work)
container_of(work, struct cx25821_dev, _irq_work_entry);
if (!dev) {
- printk("ERROR %s(): since container_of(work_struct) FAILED! \n",
+ printk(KERN_ERR "ERROR %s(): since container_of(work_struct) FAILED!\n",
__func__);
return;
}
@@ -438,12 +437,12 @@ int cx25821_openfile(struct cx25821_dev *dev, struct sram_channel *sram_ch)
if (IS_ERR(myfile)) {
const int open_errno = -PTR_ERR(myfile);
- printk("%s(): ERROR opening file(%s) with errno = %d! \n",
+ printk(KERN_ERR "%s(): ERROR opening file(%s) with errno = %d!\n",
__func__, dev->_filename, open_errno);
return PTR_ERR(myfile);
} else {
if (!(myfile->f_op)) {
- printk("%s: File has no file operations registered!",
+ printk(KERN_ERR "%s: File has no file operations registered!",
__func__);
filp_close(myfile, NULL);
return -EIO;
@@ -451,7 +450,7 @@ int cx25821_openfile(struct cx25821_dev *dev, struct sram_channel *sram_ch)
if (!myfile->f_op->read) {
printk
- ("%s: File has no READ operations registered! Returning.",
+ (KERN_ERR "%s: File has no READ operations registered! Returning.",
__func__);
filp_close(myfile, NULL);
return -EIO;
@@ -490,9 +489,8 @@ int cx25821_openfile(struct cx25821_dev *dev, struct sram_channel *sram_ch)
if (i > 0)
dev->_frame_count++;
- if (vfs_read_retval < line_size) {
+ if (vfs_read_retval < line_size)
break;
- }
}
dev->_file_status =
@@ -528,11 +526,11 @@ int cx25821_upstream_buffer_prepare(struct cx25821_dev *dev,
if (!dev->_dma_virt_addr) {
printk
- ("cx25821: FAILED to allocate memory for Risc buffer! Returning.\n");
+ (KERN_ERR "cx25821: FAILED to allocate memory for Risc buffer! Returning.\n");
return -ENOMEM;
}
- //Clear memory at address
+ /* Clear memory at address */
memset(dev->_dma_virt_addr, 0, dev->_risc_size);
if (dev->_data_buf_virt_addr != NULL) {
@@ -540,7 +538,7 @@ int cx25821_upstream_buffer_prepare(struct cx25821_dev *dev,
dev->_data_buf_virt_addr,
dev->_data_buf_phys_addr);
}
- //For Video Data buffer allocation
+ /* For Video Data buffer allocation */
dev->_data_buf_virt_addr =
pci_alloc_consistent(dev->pci, dev->upstream_databuf_size,
&data_dma_addr);
@@ -549,30 +547,30 @@ int cx25821_upstream_buffer_prepare(struct cx25821_dev *dev,
if (!dev->_data_buf_virt_addr) {
printk
- ("cx25821: FAILED to allocate memory for data buffer! Returning.\n");
+ (KERN_ERR "cx25821: FAILED to allocate memory for data buffer! Returning.\n");
return -ENOMEM;
}
- //Clear memory at address
+ /* Clear memory at address */
memset(dev->_data_buf_virt_addr, 0, dev->_data_buf_size);
ret = cx25821_openfile(dev, sram_ch);
if (ret < 0)
return ret;
- //Create RISC programs
+ /* Create RISC programs */
ret =
cx25821_risc_buffer_upstream(dev, dev->pci, 0, bpl,
dev->_lines_count);
if (ret < 0) {
printk(KERN_INFO
- "cx25821: Failed creating Video Upstream Risc programs! \n");
+ "cx25821: Failed creating Video Upstream Risc programs!\n");
goto error;
}
return 0;
- error:
+error:
return ret;
}
@@ -588,10 +586,11 @@ int cx25821_video_upstream_irq(struct cx25821_dev *dev, int chan_num,
__le32 *rp;
if (status & FLD_VID_SRC_RISC1) {
- // We should only process one program per call
+ /* We should only process one program per call */
u32 prog_cnt = cx_read(channel->gpcnt);
- //Since we've identified our IRQ, clear our bits from the interrupt mask and interrupt status registers
+ /* Since we've identified our IRQ, clear our bits from the
+ * interrupt mask and interrupt status registers */
int_msk_tmp = cx_read(channel->int_msk);
cx_write(channel->int_msk, int_msk_tmp & ~_intr_msk);
cx_write(channel->int_stat, _intr_msk);
@@ -632,7 +631,7 @@ int cx25821_video_upstream_irq(struct cx25821_dev *dev, int chan_num,
FIFO_DISABLE,
ODD_FIELD);
- // Jump to Even Risc program of 1st Frame
+ /* Jump to Even Risc program of 1st Frame */
*(rp++) = cpu_to_le32(RISC_JUMP);
*(rp++) = cpu_to_le32(risc_phys_jump_addr);
*(rp++) = cpu_to_le32(0);
@@ -643,24 +642,24 @@ int cx25821_video_upstream_irq(struct cx25821_dev *dev, int chan_num,
} else {
if (status & FLD_VID_SRC_UF)
printk
- ("%s: Video Received Underflow Error Interrupt!\n",
+ (KERN_ERR "%s: Video Received Underflow Error Interrupt!\n",
__func__);
if (status & FLD_VID_SRC_SYNC)
- printk("%s: Video Received Sync Error Interrupt!\n",
+ printk(KERN_ERR "%s: Video Received Sync Error Interrupt!\n",
__func__);
if (status & FLD_VID_SRC_OPC_ERR)
- printk("%s: Video Received OpCode Error Interrupt!\n",
+ printk(KERN_ERR "%s: Video Received OpCode Error Interrupt!\n",
__func__);
}
if (dev->_file_status == END_OF_FILE) {
- printk("cx25821: EOF Channel 1 Framecount = %d\n",
+ printk(KERN_ERR "cx25821: EOF Channel 1 Framecount = %d\n",
dev->_frame_count);
return -1;
}
- //ElSE, set the interrupt mask register, re-enable irq.
+ /* ElSE, set the interrupt mask register, re-enable irq. */
int_msk_tmp = cx_read(channel->int_msk);
cx_write(channel->int_msk, int_msk_tmp |= _intr_msk);
@@ -685,17 +684,16 @@ static irqreturn_t cx25821_upstream_irq(int irq, void *dev_id)
msk_stat = cx_read(sram_ch->int_mstat);
vid_status = cx_read(sram_ch->int_stat);
- // Only deal with our interrupt
+ /* Only deal with our interrupt */
if (vid_status) {
handled =
cx25821_video_upstream_irq(dev, channel_num, vid_status);
}
- if (handled < 0) {
+ if (handled < 0)
cx25821_stop_upstream_video_ch1(dev);
- } else {
+ else
handled += handled;
- }
return IRQ_RETVAL(handled);
}
@@ -714,19 +712,19 @@ void cx25821_set_pixelengine(struct cx25821_dev *dev, struct sram_channel *ch,
value |= dev->_isNTSC ? 0 : 0x10;
cx_write(ch->vid_fmt_ctl, value);
- // set number of active pixels in each line. Default is 720 pixels in both NTSC and PAL format
+ /* set number of active pixels in each line.
+ * Default is 720 pixels in both NTSC and PAL format */
cx_write(ch->vid_active_ctl1, width);
num_lines = (height / 2) & 0x3FF;
odd_num_lines = num_lines;
- if (dev->_isNTSC) {
+ if (dev->_isNTSC)
odd_num_lines += 1;
- }
value = (num_lines << 16) | odd_num_lines;
- // set number of active lines in field 0 (top) and field 1 (bottom)
+ /* set number of active lines in field 0 (top) and field 1 (bottom) */
cx_write(ch->vid_active_ctl2, value);
cx_write(ch->vid_cdt_size, VID_CDT_SIZE >> 3);
@@ -738,21 +736,26 @@ int cx25821_start_video_dma_upstream(struct cx25821_dev *dev,
u32 tmp = 0;
int err = 0;
- // 656/VIP SRC Upstream Channel I & J and 7 - Host Bus Interface for channel A-C
+ /* 656/VIP SRC Upstream Channel I & J and 7 - Host Bus Interface for
+ * channel A-C
+ */
tmp = cx_read(VID_CH_MODE_SEL);
cx_write(VID_CH_MODE_SEL, tmp | 0x1B0001FF);
- // Set the physical start address of the RISC program in the initial program counter(IPC) member of the cmds.
+ /* Set the physical start address of the RISC program in the initial
+ * program counter(IPC) member of the cmds.
+ */
cx_write(sram_ch->cmds_start + 0, dev->_dma_phys_addr);
- cx_write(sram_ch->cmds_start + 4, 0); /* Risc IPC High 64 bits 63-32 */
+ /* Risc IPC High 64 bits 63-32 */
+ cx_write(sram_ch->cmds_start + 4, 0);
/* reset counter */
cx_write(sram_ch->gpcnt_ctl, 3);
- // Clear our bits from the interrupt status register.
+ /* Clear our bits from the interrupt status register. */
cx_write(sram_ch->int_stat, _intr_msk);
- //Set the interrupt mask register, enable irq.
+ /* Set the interrupt mask register, enable irq. */
cx_set(PCI_INT_MSK, cx_read(PCI_INT_MSK) | (1 << sram_ch->irq_bit));
tmp = cx_read(sram_ch->int_msk);
cx_write(sram_ch->int_msk, tmp |= _intr_msk);
@@ -766,7 +769,7 @@ int cx25821_start_video_dma_upstream(struct cx25821_dev *dev,
goto fail_irq;
}
- // Start the DMA engine
+ /* Start the DMA engine */
tmp = cx_read(sram_ch->dma_ctl);
cx_set(sram_ch->dma_ctl, tmp | FLD_VID_RISC_EN);
@@ -775,7 +778,7 @@ int cx25821_start_video_dma_upstream(struct cx25821_dev *dev,
return 0;
- fail_irq:
+fail_irq:
cx25821_dev_unregister(dev);
return err;
}
@@ -792,7 +795,7 @@ int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select,
int str_length = 0;
if (dev->_is_running) {
- printk("Video Channel is still running so return!\n");
+ printk(KERN_INFO "Video Channel is still running so return!\n");
return 0;
}
@@ -804,10 +807,12 @@ int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select,
if (!dev->_irq_queues) {
printk
- ("cx25821: create_singlethread_workqueue() for Video FAILED!\n");
+ (KERN_ERR "cx25821: create_singlethread_workqueue() for Video FAILED!\n");
return -ENOMEM;
}
- // 656/VIP SRC Upstream Channel I & J and 7 - Host Bus Interface for channel A-C
+ /* 656/VIP SRC Upstream Channel I & J and 7 - Host Bus Interface for
+ * channel A-C
+ */
tmp = cx_read(VID_CH_MODE_SEL);
cx_write(VID_CH_MODE_SEL, tmp | 0x1B0001FF);
@@ -825,7 +830,7 @@ int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select,
if (dev->input_filename) {
str_length = strlen(dev->input_filename);
- dev->_filename = (char *)kmalloc(str_length + 1, GFP_KERNEL);
+ dev->_filename = kmalloc(str_length + 1, GFP_KERNEL);
if (!dev->_filename)
goto error;
@@ -833,7 +838,7 @@ int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select,
memcpy(dev->_filename, dev->input_filename, str_length + 1);
} else {
str_length = strlen(dev->_defaultname);
- dev->_filename = (char *)kmalloc(str_length + 1, GFP_KERNEL);
+ dev->_filename = kmalloc(str_length + 1, GFP_KERNEL);
if (!dev->_filename)
goto error;
@@ -841,7 +846,7 @@ int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select,
memcpy(dev->_filename, dev->_defaultname, str_length + 1);
}
- //Default if filename is empty string
+ /* Default if filename is empty string */
if (strcmp(dev->input_filename, "") == 0) {
if (dev->_isNTSC) {
dev->_filename =
@@ -875,7 +880,7 @@ int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select,
dev->upstream_riscbuf_size = risc_buffer_size * 2;
dev->upstream_databuf_size = data_frame_size * 2;
- //Allocating buffers and prepare RISC program
+ /* Allocating buffers and prepare RISC program */
retval = cx25821_upstream_buffer_prepare(dev, sram_ch, dev->_line_size);
if (retval < 0) {
printk(KERN_ERR
@@ -888,7 +893,7 @@ int cx25821_vidupstream_init_ch1(struct cx25821_dev *dev, int channel_select,
return 0;
- error:
+error:
cx25821_dev_unregister(dev);
return err;
diff --git a/drivers/staging/cx25821/cx25821-video.c b/drivers/staging/cx25821/cx25821-video.c
index 8cd3986d2e5..791212c1a66 100644
--- a/drivers/staging/cx25821/cx25821-video.c
+++ b/drivers/staging/cx25821/cx25821-video.c
@@ -54,34 +54,34 @@ static void init_controls(struct cx25821_dev *dev, int chan_num);
struct cx25821_fmt formats[] = {
{
- .name = "8 bpp, gray",
- .fourcc = V4L2_PIX_FMT_GREY,
- .depth = 8,
- .flags = FORMAT_FLAGS_PACKED,
+ .name = "8 bpp, gray",
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .depth = 8,
+ .flags = FORMAT_FLAGS_PACKED,
}, {
- .name = "4:1:1, packed, Y41P",
- .fourcc = V4L2_PIX_FMT_Y41P,
- .depth = 12,
- .flags = FORMAT_FLAGS_PACKED,
- }, {
- .name = "4:2:2, packed, YUYV",
- .fourcc = V4L2_PIX_FMT_YUYV,
- .depth = 16,
- .flags = FORMAT_FLAGS_PACKED,
- }, {
- .name = "4:2:2, packed, UYVY",
- .fourcc = V4L2_PIX_FMT_UYVY,
- .depth = 16,
- .flags = FORMAT_FLAGS_PACKED,
- }, {
- .name = "4:2:0, YUV",
- .fourcc = V4L2_PIX_FMT_YUV420,
- .depth = 12,
- .flags = FORMAT_FLAGS_PACKED,
- },
+ .name = "4:1:1, packed, Y41P",
+ .fourcc = V4L2_PIX_FMT_Y41P,
+ .depth = 12,
+ .flags = FORMAT_FLAGS_PACKED,
+ }, {
+ .name = "4:2:2, packed, YUYV",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = 16,
+ .flags = FORMAT_FLAGS_PACKED,
+ }, {
+ .name = "4:2:2, packed, UYVY",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .depth = 16,
+ .flags = FORMAT_FLAGS_PACKED,
+ }, {
+ .name = "4:2:0, YUV",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .depth = 12,
+ .flags = FORMAT_FLAGS_PACKED,
+ },
};
-int get_format_size(void)
+int cx25821_get_format_size(void)
{
return ARRAY_SIZE(formats);
}
@@ -102,7 +102,7 @@ struct cx25821_fmt *format_by_fourcc(unsigned int fourcc)
return NULL;
}
-void dump_video_queue(struct cx25821_dev *dev, struct cx25821_dmaqueue *q)
+void cx25821_dump_video_queue(struct cx25821_dev *dev, struct cx25821_dmaqueue *q)
{
struct cx25821_buffer *buf;
struct list_head *item;
@@ -212,7 +212,7 @@ static int cx25821_ctrl_query(struct v4l2_queryctrl *qctrl)
*/
// resource management
-int res_get(struct cx25821_dev *dev, struct cx25821_fh *fh, unsigned int bit)
+int cx25821_res_get(struct cx25821_dev *dev, struct cx25821_fh *fh, unsigned int bit)
{
dprintk(1, "%s()\n", __func__);
if (fh->resources & bit)
@@ -234,17 +234,17 @@ int res_get(struct cx25821_dev *dev, struct cx25821_fh *fh, unsigned int bit)
return 1;
}
-int res_check(struct cx25821_fh *fh, unsigned int bit)
+int cx25821_res_check(struct cx25821_fh *fh, unsigned int bit)
{
return fh->resources & bit;
}
-int res_locked(struct cx25821_dev *dev, unsigned int bit)
+int cx25821_res_locked(struct cx25821_dev *dev, unsigned int bit)
{
return dev->resources & bit;
}
-void res_free(struct cx25821_dev *dev, struct cx25821_fh *fh, unsigned int bits)
+void cx25821_res_free(struct cx25821_dev *dev, struct cx25821_fh *fh, unsigned int bits)
{
BUG_ON((fh->resources & bits) != bits);
dprintk(1, "%s()\n", __func__);
@@ -506,7 +506,7 @@ int cx25821_video_register(struct cx25821_dev *dev, int chan_num,
return err;
}
-int buffer_setup(struct videobuf_queue *q, unsigned int *count,
+int cx25821_buffer_setup(struct videobuf_queue *q, unsigned int *count,
unsigned int *size)
{
struct cx25821_fh *fh = q->priv_data;
@@ -516,13 +516,13 @@ int buffer_setup(struct videobuf_queue *q, unsigned int *count,
if (0 == *count)
*count = 32;
- while (*size * *count > vid_limit * 1024 * 1024)
- (*count)--;
+ if (*size * *count > vid_limit * 1024 * 1024)
+ *count = (vid_limit * 1024 * 1024) / *size;
return 0;
}
-int buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
+int cx25821_buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
enum v4l2_field field)
{
struct cx25821_fh *fh = q->priv_data;
@@ -648,7 +648,7 @@ int buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
return rc;
}
-void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
+void cx25821_buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
{
struct cx25821_buffer *buf =
container_of(vb, struct cx25821_buffer, vb);
@@ -667,7 +667,7 @@ struct videobuf_queue *get_queue(struct cx25821_fh *fh)
}
}
-int get_resource(struct cx25821_fh *fh, int resource)
+int cx25821_get_resource(struct cx25821_fh *fh, int resource)
{
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
@@ -678,7 +678,7 @@ int get_resource(struct cx25821_fh *fh, int resource)
}
}
-int video_mmap(struct file *file, struct vm_area_struct *vma)
+int cx25821_video_mmap(struct file *file, struct vm_area_struct *vma)
{
struct cx25821_fh *fh = file->private_data;
@@ -686,7 +686,7 @@ int video_mmap(struct file *file, struct vm_area_struct *vma)
}
/* VIDEO IOCTLS */
-int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f)
+int cx25821_vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f)
{
struct cx25821_fh *fh = priv;
@@ -700,7 +700,7 @@ int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f)
return 0;
}
-int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f)
+int cx25821_vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f)
{
struct cx25821_fmt *fmt;
enum v4l2_field field;
@@ -746,7 +746,7 @@ int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f)
return 0;
}
-int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap)
+int cx25821_vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap)
{
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
@@ -761,7 +761,7 @@ int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap)
return 0;
}
-int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+int cx25821_vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
if (unlikely(f->index >= ARRAY_SIZE(formats)))
@@ -774,7 +774,7 @@ int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
}
#ifdef CONFIG_VIDEO_V4L1_COMPAT
-int vidiocgmbuf(struct file *file, void *priv, struct video_mbuf *mbuf)
+int cx25821_vidiocgmbuf(struct file *file, void *priv, struct video_mbuf *mbuf)
{
struct cx25821_fh *fh = priv;
struct videobuf_queue *q;
@@ -801,25 +801,25 @@ int vidiocgmbuf(struct file *file, void *priv, struct video_mbuf *mbuf)
}
#endif
-int vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *p)
+int cx25821_vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *p)
{
struct cx25821_fh *fh = priv;
return videobuf_reqbufs(get_queue(fh), p);
}
-int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
+int cx25821_vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
struct cx25821_fh *fh = priv;
return videobuf_querybuf(get_queue(fh), p);
}
-int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+int cx25821_vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
struct cx25821_fh *fh = priv;
return videobuf_qbuf(get_queue(fh), p);
}
-int vidioc_g_priority(struct file *file, void *f, enum v4l2_priority *p)
+int cx25821_vidioc_g_priority(struct file *file, void *f, enum v4l2_priority *p)
{
struct cx25821_dev *dev = ((struct cx25821_fh *)f)->dev;
@@ -828,7 +828,7 @@ int vidioc_g_priority(struct file *file, void *f, enum v4l2_priority *p)
return 0;
}
-int vidioc_s_priority(struct file *file, void *f, enum v4l2_priority prio)
+int cx25821_vidioc_s_priority(struct file *file, void *f, enum v4l2_priority prio)
{
struct cx25821_fh *fh = f;
struct cx25821_dev *dev = ((struct cx25821_fh *)f)->dev;
@@ -837,7 +837,7 @@ int vidioc_s_priority(struct file *file, void *f, enum v4l2_priority prio)
}
#ifdef TUNER_FLAG
-int vidioc_s_std(struct file *file, void *priv, v4l2_std_id * tvnorms)
+int cx25821_vidioc_s_std(struct file *file, void *priv, v4l2_std_id * tvnorms)
{
struct cx25821_fh *fh = priv;
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
@@ -846,7 +846,7 @@ int vidioc_s_std(struct file *file, void *priv, v4l2_std_id * tvnorms)
dprintk(1, "%s()\n", __func__);
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
@@ -891,14 +891,14 @@ int cx25821_enum_input(struct cx25821_dev *dev, struct v4l2_input *i)
return 0;
}
-int vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *i)
+int cx25821_vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *i)
{
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
dprintk(1, "%s()\n", __func__);
return cx25821_enum_input(dev, i);
}
-int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+int cx25821_vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
@@ -907,7 +907,7 @@ int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
return 0;
}
-int vidioc_s_input(struct file *file, void *priv, unsigned int i)
+int cx25821_vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
struct cx25821_fh *fh = priv;
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
@@ -916,7 +916,7 @@ int vidioc_s_input(struct file *file, void *priv, unsigned int i)
dprintk(1, "%s(%d)\n", __func__, i);
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
@@ -933,7 +933,7 @@ int vidioc_s_input(struct file *file, void *priv, unsigned int i)
}
#ifdef TUNER_FLAG
-int vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f)
+int cx25821_vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f)
{
struct cx25821_fh *fh = priv;
struct cx25821_dev *dev = fh->dev;
@@ -960,15 +960,14 @@ int cx25821_set_freq(struct cx25821_dev *dev, struct v4l2_frequency *f)
return 0;
}
-int vidioc_s_frequency(struct file *file, void *priv, struct v4l2_frequency *f)
+int cx25821_vidioc_s_frequency(struct file *file, void *priv, struct v4l2_frequency *f)
{
struct cx25821_fh *fh = priv;
struct cx25821_dev *dev;
int err;
if (fh) {
- dev = fh->dev;
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
@@ -978,7 +977,7 @@ int vidioc_s_frequency(struct file *file, void *priv, struct v4l2_frequency *f)
#endif
#ifdef CONFIG_VIDEO_ADV_DEBUG
-int vidioc_g_register(struct file *file, void *fh,
+int cx25821_vidioc_g_register(struct file *file, void *fh,
struct v4l2_dbg_register *reg)
{
struct cx25821_dev *dev = ((struct cx25821_fh *)fh)->dev;
@@ -991,7 +990,7 @@ int vidioc_g_register(struct file *file, void *fh,
return 0;
}
-int vidioc_s_register(struct file *file, void *fh,
+int cx25821_vidioc_s_register(struct file *file, void *fh,
struct v4l2_dbg_register *reg)
{
struct cx25821_dev *dev = ((struct cx25821_fh *)fh)->dev;
@@ -1007,7 +1006,7 @@ int vidioc_s_register(struct file *file, void *fh,
#endif
#ifdef TUNER_FLAG
-int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
+int cx25821_vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
{
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
@@ -1025,14 +1024,14 @@ int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
return 0;
}
-int vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
+int cx25821_vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
{
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
struct cx25821_fh *fh = priv;
int err;
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
@@ -1108,7 +1107,7 @@ static int cx25821_ctrl_query(struct v4l2_queryctrl *qctrl)
return 0;
}
-int vidioc_queryctrl(struct file *file, void *priv,
+int cx25821_vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qctrl)
{
return cx25821_ctrl_query(qctrl);
@@ -1127,7 +1126,7 @@ static const struct v4l2_queryctrl *ctrl_by_id(unsigned int id)
return NULL;
}
-int vidioc_g_ctrl(struct file *file, void *priv, struct v4l2_control *ctl)
+int cx25821_vidioc_g_ctrl(struct file *file, void *priv, struct v4l2_control *ctl)
{
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
@@ -1216,7 +1215,7 @@ static void init_controls(struct cx25821_dev *dev, int chan_num)
}
}
-int vidioc_cropcap(struct file *file, void *priv, struct v4l2_cropcap *cropcap)
+int cx25821_vidioc_cropcap(struct file *file, void *priv, struct v4l2_cropcap *cropcap)
{
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
@@ -1233,28 +1232,28 @@ int vidioc_cropcap(struct file *file, void *priv, struct v4l2_cropcap *cropcap)
return 0;
}
-int vidioc_s_crop(struct file *file, void *priv, struct v4l2_crop *crop)
+int cx25821_vidioc_s_crop(struct file *file, void *priv, struct v4l2_crop *crop)
{
struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev;
struct cx25821_fh *fh = priv;
int err;
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
- // vidioc_s_crop not supported
+ // cx25821_vidioc_s_crop not supported
return -EINVAL;
}
-int vidioc_g_crop(struct file *file, void *priv, struct v4l2_crop *crop)
+int cx25821_vidioc_g_crop(struct file *file, void *priv, struct v4l2_crop *crop)
{
- // vidioc_g_crop not supported
+ // cx25821_vidioc_g_crop not supported
return -EINVAL;
}
-int vidioc_querystd(struct file *file, void *priv, v4l2_std_id * norm)
+int cx25821_vidioc_querystd(struct file *file, void *priv, v4l2_std_id * norm)
{
// medusa does not support video standard sensing of current input
*norm = CX25821_NORMS;
@@ -1262,7 +1261,7 @@ int vidioc_querystd(struct file *file, void *priv, v4l2_std_id * norm)
return 0;
}
-int is_valid_width(u32 width, v4l2_std_id tvnorm)
+int cx25821_is_valid_width(u32 width, v4l2_std_id tvnorm)
{
if (tvnorm == V4L2_STD_PAL_BG) {
if (width == 352 || width == 720)
@@ -1280,7 +1279,7 @@ int is_valid_width(u32 width, v4l2_std_id tvnorm)
return 0;
}
-int is_valid_height(u32 height, v4l2_std_id tvnorm)
+int cx25821_is_valid_height(u32 height, v4l2_std_id tvnorm)
{
if (tvnorm == V4L2_STD_PAL_BG) {
if (height == 576 || height == 288)
diff --git a/drivers/staging/cx25821/cx25821-video.h b/drivers/staging/cx25821/cx25821-video.h
index 4417ff5d90d..0bddc02be57 100644
--- a/drivers/staging/cx25821/cx25821-video.h
+++ b/drivers/staging/cx25821/cx25821-video.h
@@ -101,7 +101,7 @@ extern struct cx25821_fmt formats[];
extern struct cx25821_fmt *format_by_fourcc(unsigned int fourcc);
extern struct cx25821_data timeout_data[MAX_VID_CHANNEL_NUM];
-extern void dump_video_queue(struct cx25821_dev *dev,
+extern void cx25821_dump_video_queue(struct cx25821_dev *dev,
struct cx25821_dmaqueue *q);
extern void cx25821_video_wakeup(struct cx25821_dev *dev,
struct cx25821_dmaqueue *q, u32 count);
@@ -110,11 +110,11 @@ extern void cx25821_video_wakeup(struct cx25821_dev *dev,
extern int cx25821_set_tvnorm(struct cx25821_dev *dev, v4l2_std_id norm);
#endif
-extern int res_get(struct cx25821_dev *dev, struct cx25821_fh *fh,
+extern int cx25821_res_get(struct cx25821_dev *dev, struct cx25821_fh *fh,
unsigned int bit);
-extern int res_check(struct cx25821_fh *fh, unsigned int bit);
-extern int res_locked(struct cx25821_dev *dev, unsigned int bit);
-extern void res_free(struct cx25821_dev *dev, struct cx25821_fh *fh,
+extern int cx25821_res_check(struct cx25821_fh *fh, unsigned int bit);
+extern int cx25821_res_locked(struct cx25821_dev *dev, unsigned int bit);
+extern void cx25821_res_free(struct cx25821_dev *dev, struct cx25821_fh *fh,
unsigned int bits);
extern int cx25821_video_mux(struct cx25821_dev *dev, unsigned int input);
extern int cx25821_start_video_dma(struct cx25821_dev *dev,
@@ -128,67 +128,67 @@ extern int cx25821_video_irq(struct cx25821_dev *dev, int chan_num, u32 status);
extern void cx25821_video_unregister(struct cx25821_dev *dev, int chan_num);
extern int cx25821_video_register(struct cx25821_dev *dev, int chan_num,
struct video_device *video_template);
-extern int get_format_size(void);
+extern int cx25821_get_format_size(void);
-extern int buffer_setup(struct videobuf_queue *q, unsigned int *count,
+extern int cx25821_buffer_setup(struct videobuf_queue *q, unsigned int *count,
unsigned int *size);
-extern int buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
+extern int cx25821_buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
enum v4l2_field field);
-extern void buffer_release(struct videobuf_queue *q,
+extern void cx25821_buffer_release(struct videobuf_queue *q,
struct videobuf_buffer *vb);
extern struct videobuf_queue *get_queue(struct cx25821_fh *fh);
-extern int get_resource(struct cx25821_fh *fh, int resource);
-extern int video_mmap(struct file *file, struct vm_area_struct *vma);
-extern int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+extern int cx25821_get_resource(struct cx25821_fh *fh, int resource);
+extern int cx25821_video_mmap(struct file *file, struct vm_area_struct *vma);
+extern int cx25821_vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f);
-extern int vidioc_querycap(struct file *file, void *priv,
+extern int cx25821_vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap);
-extern int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+extern int cx25821_vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f);
-extern int vidiocgmbuf(struct file *file, void *priv, struct video_mbuf *mbuf);
-extern int vidioc_reqbufs(struct file *file, void *priv,
+extern int cx25821_vidiocgmbuf(struct file *file, void *priv, struct video_mbuf *mbuf);
+extern int cx25821_vidioc_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *p);
-extern int vidioc_querybuf(struct file *file, void *priv,
+extern int cx25821_vidioc_querybuf(struct file *file, void *priv,
struct v4l2_buffer *p);
-extern int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p);
-extern int vidioc_s_std(struct file *file, void *priv, v4l2_std_id * tvnorms);
+extern int cx25821_vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p);
+extern int cx25821_vidioc_s_std(struct file *file, void *priv, v4l2_std_id * tvnorms);
extern int cx25821_enum_input(struct cx25821_dev *dev, struct v4l2_input *i);
-extern int vidioc_enum_input(struct file *file, void *priv,
+extern int cx25821_vidioc_enum_input(struct file *file, void *priv,
struct v4l2_input *i);
-extern int vidioc_g_input(struct file *file, void *priv, unsigned int *i);
-extern int vidioc_s_input(struct file *file, void *priv, unsigned int i);
-extern int vidioc_g_ctrl(struct file *file, void *priv,
+extern int cx25821_vidioc_g_input(struct file *file, void *priv, unsigned int *i);
+extern int cx25821_vidioc_s_input(struct file *file, void *priv, unsigned int i);
+extern int cx25821_vidioc_g_ctrl(struct file *file, void *priv,
struct v4l2_control *ctl);
-extern int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+extern int cx25821_vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f);
-extern int vidioc_g_frequency(struct file *file, void *priv,
+extern int cx25821_vidioc_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f);
extern int cx25821_set_freq(struct cx25821_dev *dev, struct v4l2_frequency *f);
-extern int vidioc_s_frequency(struct file *file, void *priv,
+extern int cx25821_vidioc_s_frequency(struct file *file, void *priv,
struct v4l2_frequency *f);
-extern int vidioc_g_register(struct file *file, void *fh,
+extern int cx25821_vidioc_g_register(struct file *file, void *fh,
struct v4l2_dbg_register *reg);
-extern int vidioc_s_register(struct file *file, void *fh,
+extern int cx25821_vidioc_s_register(struct file *file, void *fh,
struct v4l2_dbg_register *reg);
-extern int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t);
-extern int vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t);
+extern int cx25821_vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t);
+extern int cx25821_vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t);
-extern int is_valid_width(u32 width, v4l2_std_id tvnorm);
-extern int is_valid_height(u32 height, v4l2_std_id tvnorm);
+extern int cx25821_is_valid_width(u32 width, v4l2_std_id tvnorm);
+extern int cx25821_is_valid_height(u32 height, v4l2_std_id tvnorm);
-extern int vidioc_g_priority(struct file *file, void *f, enum v4l2_priority *p);
-extern int vidioc_s_priority(struct file *file, void *f,
+extern int cx25821_vidioc_g_priority(struct file *file, void *f, enum v4l2_priority *p);
+extern int cx25821_vidioc_s_priority(struct file *file, void *f,
enum v4l2_priority prio);
-extern int vidioc_queryctrl(struct file *file, void *priv,
+extern int cx25821_vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qctrl);
extern int cx25821_set_control(struct cx25821_dev *dev,
struct v4l2_control *ctrl, int chan_num);
-extern int vidioc_cropcap(struct file *file, void *fh,
+extern int cx25821_vidioc_cropcap(struct file *file, void *fh,
struct v4l2_cropcap *cropcap);
-extern int vidioc_s_crop(struct file *file, void *priv, struct v4l2_crop *crop);
-extern int vidioc_g_crop(struct file *file, void *priv, struct v4l2_crop *crop);
+extern int cx25821_vidioc_s_crop(struct file *file, void *priv, struct v4l2_crop *crop);
+extern int cx25821_vidioc_g_crop(struct file *file, void *priv, struct v4l2_crop *crop);
-extern int vidioc_querystd(struct file *file, void *priv, v4l2_std_id * norm);
+extern int cx25821_vidioc_querystd(struct file *file, void *priv, v4l2_std_id * norm);
#endif
diff --git a/drivers/staging/cx25821/cx25821-video0.c b/drivers/staging/cx25821/cx25821-video0.c
index ad7a6912911..0be2cc15d85 100644
--- a/drivers/staging/cx25821/cx25821-video0.c
+++ b/drivers/staging/cx25821/cx25821-video0.c
@@ -86,10 +86,10 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
}
static struct videobuf_queue_ops cx25821_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
+ .buf_setup = cx25821_buffer_setup,
+ .buf_prepare = cx25821_buffer_prepare,
.buf_queue = buffer_queue,
- .buf_release = buffer_release,
+ .buf_release = cx25821_buffer_release,
};
static int video_open(struct file *file)
@@ -147,7 +147,7 @@ static ssize_t video_read(struct file *file, char __user * data, size_t count,
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (res_locked(fh->dev, RESOURCE_VIDEO0))
+ if (cx25821_res_locked(fh->dev, RESOURCE_VIDEO0))
return -EBUSY;
return videobuf_read_one(&fh->vidq, data, count, ppos,
@@ -165,7 +165,7 @@ static unsigned int video_poll(struct file *file,
struct cx25821_fh *fh = file->private_data;
struct cx25821_buffer *buf;
- if (res_check(fh, RESOURCE_VIDEO0)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO0)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
return POLLERR;
@@ -207,19 +207,19 @@ static int video_release(struct file *file)
cx_write(channel0->dma_ctl, 0); /* FIFO and RISC disable */
/* stop video capture */
- if (res_check(fh, RESOURCE_VIDEO0)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO0)) {
videobuf_queue_cancel(&fh->vidq);
- res_free(dev, fh, RESOURCE_VIDEO0);
+ cx25821_res_free(dev, fh, RESOURCE_VIDEO0);
}
if (fh->vidq.read_buf) {
- buffer_release(&fh->vidq, fh->vidq.read_buf);
+ cx25821_buffer_release(&fh->vidq, fh->vidq.read_buf);
kfree(fh->vidq.read_buf);
}
videobuf_mmap_free(&fh->vidq);
- v4l2_prio_close(&dev->prio, &fh->prio);
+ v4l2_prio_close(&dev->prio, fh->prio);
file->private_data = NULL;
kfree(fh);
@@ -239,7 +239,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
return -EINVAL;
}
- if (unlikely(!res_get(dev, fh, get_resource(fh, RESOURCE_VIDEO0)))) {
+ if (unlikely(!cx25821_res_get(dev, fh, cx25821_get_resource(fh, RESOURCE_VIDEO0)))) {
return -EBUSY;
}
@@ -257,11 +257,11 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
if (i != fh->type)
return -EINVAL;
- res = get_resource(fh, RESOURCE_VIDEO0);
+ res = cx25821_get_resource(fh, RESOURCE_VIDEO0);
err = videobuf_streamoff(get_queue(fh));
if (err < 0)
return err;
- res_free(dev, fh, res);
+ cx25821_res_free(dev, fh, res);
return 0;
}
@@ -274,13 +274,13 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
int pix_format = PIXEL_FRMT_422;
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
dprintk(2, "%s()\n", __func__);
- err = vidioc_try_fmt_vid_cap(file, priv, f);
+ err = cx25821_vidioc_try_fmt_vid_cap(file, priv, f);
if (0 != err)
return err;
@@ -289,11 +289,11 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
fh->vidq.field = f->fmt.pix.field;
// check if width and height is valid based on set standard
- if (is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
+ if (cx25821_is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
fh->width = f->fmt.pix.width;
}
- if (is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
+ if (cx25821_is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
fh->height = f->fmt.pix.height;
}
@@ -363,7 +363,7 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
int err;
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
@@ -378,50 +378,50 @@ static const struct v4l2_file_operations video_fops = {
.release = video_release,
.read = video_read,
.poll = video_poll,
- .mmap = video_mmap,
+ .mmap = cx25821_video_mmap,
.ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_querycap = cx25821_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = cx25821_vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_reqbufs = cx25821_vidioc_reqbufs,
+ .vidioc_querybuf = cx25821_vidioc_querybuf,
+ .vidioc_qbuf = cx25821_vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
#ifdef TUNER_FLAG
- .vidioc_s_std = vidioc_s_std,
- .vidioc_querystd = vidioc_querystd,
+ .vidioc_s_std = cx25821_vidioc_s_std,
+ .vidioc_querystd = cx25821_vidioc_querystd,
#endif
- .vidioc_cropcap = vidioc_cropcap,
- .vidioc_s_crop = vidioc_s_crop,
- .vidioc_g_crop = vidioc_g_crop,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_cropcap = cx25821_vidioc_cropcap,
+ .vidioc_s_crop = cx25821_vidioc_s_crop,
+ .vidioc_g_crop = cx25821_vidioc_g_crop,
+ .vidioc_enum_input = cx25821_vidioc_enum_input,
+ .vidioc_g_input = cx25821_vidioc_g_input,
+ .vidioc_s_input = cx25821_vidioc_s_input,
+ .vidioc_g_ctrl = cx25821_vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_queryctrl = cx25821_vidioc_queryctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_log_status = vidioc_log_status,
- .vidioc_g_priority = vidioc_g_priority,
- .vidioc_s_priority = vidioc_s_priority,
+ .vidioc_g_priority = cx25821_vidioc_g_priority,
+ .vidioc_s_priority = cx25821_vidioc_s_priority,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
- .vidiocgmbuf = vidiocgmbuf,
+ .vidiocgmbuf = cx25821_vidiocgmbuf,
#endif
#ifdef TUNER_FLAG
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_g_tuner = cx25821_vidioc_g_tuner,
+ .vidioc_s_tuner = cx25821_vidioc_s_tuner,
+ .vidioc_g_frequency = cx25821_vidioc_g_frequency,
+ .vidioc_s_frequency = cx25821_vidioc_s_frequency,
#endif
#ifdef CONFIG_VIDEO_ADV_DEBUG
- .vidioc_g_register = vidioc_g_register,
- .vidioc_s_register = vidioc_s_register,
+ .vidioc_g_register = cx25821_vidioc_g_register,
+ .vidioc_s_register = cx25821_vidioc_s_register,
#endif
};
diff --git a/drivers/staging/cx25821/cx25821-video1.c b/drivers/staging/cx25821/cx25821-video1.c
index e3f3c4ac790..b0bae627bfb 100644
--- a/drivers/staging/cx25821/cx25821-video1.c
+++ b/drivers/staging/cx25821/cx25821-video1.c
@@ -86,10 +86,10 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
}
static struct videobuf_queue_ops cx25821_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
+ .buf_setup = cx25821_buffer_setup,
+ .buf_prepare = cx25821_buffer_prepare,
.buf_queue = buffer_queue,
- .buf_release = buffer_release,
+ .buf_release = cx25821_buffer_release,
};
static int video_open(struct file *file)
@@ -147,7 +147,7 @@ static ssize_t video_read(struct file *file, char __user * data, size_t count,
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (res_locked(fh->dev, RESOURCE_VIDEO1))
+ if (cx25821_res_locked(fh->dev, RESOURCE_VIDEO1))
return -EBUSY;
return videobuf_read_one(&fh->vidq, data, count, ppos,
@@ -165,7 +165,7 @@ static unsigned int video_poll(struct file *file,
struct cx25821_fh *fh = file->private_data;
struct cx25821_buffer *buf;
- if (res_check(fh, RESOURCE_VIDEO1)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO1)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
return POLLERR;
@@ -207,19 +207,19 @@ static int video_release(struct file *file)
cx_write(channel1->dma_ctl, 0); /* FIFO and RISC disable */
/* stop video capture */
- if (res_check(fh, RESOURCE_VIDEO1)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO1)) {
videobuf_queue_cancel(&fh->vidq);
- res_free(dev, fh, RESOURCE_VIDEO1);
+ cx25821_res_free(dev, fh, RESOURCE_VIDEO1);
}
if (fh->vidq.read_buf) {
- buffer_release(&fh->vidq, fh->vidq.read_buf);
+ cx25821_buffer_release(&fh->vidq, fh->vidq.read_buf);
kfree(fh->vidq.read_buf);
}
videobuf_mmap_free(&fh->vidq);
- v4l2_prio_close(&dev->prio, &fh->prio);
+ v4l2_prio_close(&dev->prio, fh->prio);
file->private_data = NULL;
kfree(fh);
@@ -239,7 +239,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
return -EINVAL;
}
- if (unlikely(!res_get(dev, fh, get_resource(fh, RESOURCE_VIDEO1)))) {
+ if (unlikely(!cx25821_res_get(dev, fh, cx25821_get_resource(fh, RESOURCE_VIDEO1)))) {
return -EBUSY;
}
@@ -257,11 +257,11 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
if (i != fh->type)
return -EINVAL;
- res = get_resource(fh, RESOURCE_VIDEO1);
+ res = cx25821_get_resource(fh, RESOURCE_VIDEO1);
err = videobuf_streamoff(get_queue(fh));
if (err < 0)
return err;
- res_free(dev, fh, res);
+ cx25821_res_free(dev, fh, res);
return 0;
}
@@ -274,13 +274,13 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
int pix_format = 0;
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
dprintk(2, "%s()\n", __func__);
- err = vidioc_try_fmt_vid_cap(file, priv, f);
+ err = cx25821_vidioc_try_fmt_vid_cap(file, priv, f);
if (0 != err)
return err;
@@ -289,11 +289,11 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
fh->vidq.field = f->fmt.pix.field;
// check if width and height is valid based on set standard
- if (is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
+ if (cx25821_is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
fh->width = f->fmt.pix.width;
}
- if (is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
+ if (cx25821_is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
fh->height = f->fmt.pix.height;
}
@@ -363,7 +363,7 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
int err;
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
@@ -378,50 +378,50 @@ static const struct v4l2_file_operations video_fops = {
.release = video_release,
.read = video_read,
.poll = video_poll,
- .mmap = video_mmap,
+ .mmap = cx25821_video_mmap,
.ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_querycap = cx25821_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = cx25821_vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_reqbufs = cx25821_vidioc_reqbufs,
+ .vidioc_querybuf = cx25821_vidioc_querybuf,
+ .vidioc_qbuf = cx25821_vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
#ifdef TUNER_FLAG
- .vidioc_s_std = vidioc_s_std,
- .vidioc_querystd = vidioc_querystd,
+ .vidioc_s_std = cx25821_vidioc_s_std,
+ .vidioc_querystd = cx25821_vidioc_querystd,
#endif
- .vidioc_cropcap = vidioc_cropcap,
- .vidioc_s_crop = vidioc_s_crop,
- .vidioc_g_crop = vidioc_g_crop,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_cropcap = cx25821_vidioc_cropcap,
+ .vidioc_s_crop = cx25821_vidioc_s_crop,
+ .vidioc_g_crop = cx25821_vidioc_g_crop,
+ .vidioc_enum_input = cx25821_vidioc_enum_input,
+ .vidioc_g_input = cx25821_vidioc_g_input,
+ .vidioc_s_input = cx25821_vidioc_s_input,
+ .vidioc_g_ctrl = cx25821_vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_queryctrl = cx25821_vidioc_queryctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_log_status = vidioc_log_status,
- .vidioc_g_priority = vidioc_g_priority,
- .vidioc_s_priority = vidioc_s_priority,
+ .vidioc_g_priority = cx25821_vidioc_g_priority,
+ .vidioc_s_priority = cx25821_vidioc_s_priority,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
- .vidiocgmbuf = vidiocgmbuf,
+ .vidiocgmbuf = cx25821_vidiocgmbuf,
#endif
#ifdef TUNER_FLAG
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_g_tuner = cx25821_vidioc_g_tuner,
+ .vidioc_s_tuner = cx25821_vidioc_s_tuner,
+ .vidioc_g_frequency = cx25821_vidioc_g_frequency,
+ .vidioc_s_frequency = cx25821_vidioc_s_frequency,
#endif
#ifdef CONFIG_VIDEO_ADV_DEBUG
- .vidioc_g_register = vidioc_g_register,
- .vidioc_s_register = vidioc_s_register,
+ .vidioc_g_register = cx25821_vidioc_g_register,
+ .vidioc_s_register = cx25821_vidioc_s_register,
#endif
};
diff --git a/drivers/staging/cx25821/cx25821-video2.c b/drivers/staging/cx25821/cx25821-video2.c
index 36fb855a497..400cdb80674 100644
--- a/drivers/staging/cx25821/cx25821-video2.c
+++ b/drivers/staging/cx25821/cx25821-video2.c
@@ -86,10 +86,10 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
}
static struct videobuf_queue_ops cx25821_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
+ .buf_setup = cx25821_buffer_setup,
+ .buf_prepare = cx25821_buffer_prepare,
.buf_queue = buffer_queue,
- .buf_release = buffer_release,
+ .buf_release = cx25821_buffer_release,
};
static int video_open(struct file *file)
@@ -147,7 +147,7 @@ static ssize_t video_read(struct file *file, char __user * data, size_t count,
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (res_locked(fh->dev, RESOURCE_VIDEO2))
+ if (cx25821_res_locked(fh->dev, RESOURCE_VIDEO2))
return -EBUSY;
return videobuf_read_one(&fh->vidq, data, count, ppos,
@@ -165,7 +165,7 @@ static unsigned int video_poll(struct file *file,
struct cx25821_fh *fh = file->private_data;
struct cx25821_buffer *buf;
- if (res_check(fh, RESOURCE_VIDEO2)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO2)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
return POLLERR;
@@ -207,19 +207,19 @@ static int video_release(struct file *file)
cx_write(channel2->dma_ctl, 0); /* FIFO and RISC disable */
/* stop video capture */
- if (res_check(fh, RESOURCE_VIDEO2)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO2)) {
videobuf_queue_cancel(&fh->vidq);
- res_free(dev, fh, RESOURCE_VIDEO2);
+ cx25821_res_free(dev, fh, RESOURCE_VIDEO2);
}
if (fh->vidq.read_buf) {
- buffer_release(&fh->vidq, fh->vidq.read_buf);
+ cx25821_buffer_release(&fh->vidq, fh->vidq.read_buf);
kfree(fh->vidq.read_buf);
}
videobuf_mmap_free(&fh->vidq);
- v4l2_prio_close(&dev->prio, &fh->prio);
+ v4l2_prio_close(&dev->prio, fh->prio);
file->private_data = NULL;
kfree(fh);
@@ -239,7 +239,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
return -EINVAL;
}
- if (unlikely(!res_get(dev, fh, get_resource(fh, RESOURCE_VIDEO2)))) {
+ if (unlikely(!cx25821_res_get(dev, fh, cx25821_get_resource(fh, RESOURCE_VIDEO2)))) {
return -EBUSY;
}
@@ -257,11 +257,11 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
if (i != fh->type)
return -EINVAL;
- res = get_resource(fh, RESOURCE_VIDEO2);
+ res = cx25821_get_resource(fh, RESOURCE_VIDEO2);
err = videobuf_streamoff(get_queue(fh));
if (err < 0)
return err;
- res_free(dev, fh, res);
+ cx25821_res_free(dev, fh, res);
return 0;
}
@@ -274,13 +274,13 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
int pix_format = 0;
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
dprintk(2, "%s()\n", __func__);
- err = vidioc_try_fmt_vid_cap(file, priv, f);
+ err = cx25821_vidioc_try_fmt_vid_cap(file, priv, f);
if (0 != err)
return err;
@@ -289,11 +289,11 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
fh->vidq.field = f->fmt.pix.field;
// check if width and height is valid based on set standard
- if (is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
+ if (cx25821_is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
fh->width = f->fmt.pix.width;
}
- if (is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
+ if (cx25821_is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
fh->height = f->fmt.pix.height;
}
@@ -365,7 +365,7 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
int err;
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
@@ -380,50 +380,50 @@ static const struct v4l2_file_operations video_fops = {
.release = video_release,
.read = video_read,
.poll = video_poll,
- .mmap = video_mmap,
+ .mmap = cx25821_video_mmap,
.ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_querycap = cx25821_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = cx25821_vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_reqbufs = cx25821_vidioc_reqbufs,
+ .vidioc_querybuf = cx25821_vidioc_querybuf,
+ .vidioc_qbuf = cx25821_vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
#ifdef TUNER_FLAG
- .vidioc_s_std = vidioc_s_std,
- .vidioc_querystd = vidioc_querystd,
+ .vidioc_s_std = cx25821_vidioc_s_std,
+ .vidioc_querystd = cx25821_vidioc_querystd,
#endif
- .vidioc_cropcap = vidioc_cropcap,
- .vidioc_s_crop = vidioc_s_crop,
- .vidioc_g_crop = vidioc_g_crop,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_cropcap = cx25821_vidioc_cropcap,
+ .vidioc_s_crop = cx25821_vidioc_s_crop,
+ .vidioc_g_crop = cx25821_vidioc_g_crop,
+ .vidioc_enum_input = cx25821_vidioc_enum_input,
+ .vidioc_g_input = cx25821_vidioc_g_input,
+ .vidioc_s_input = cx25821_vidioc_s_input,
+ .vidioc_g_ctrl = cx25821_vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_queryctrl = cx25821_vidioc_queryctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_log_status = vidioc_log_status,
- .vidioc_g_priority = vidioc_g_priority,
- .vidioc_s_priority = vidioc_s_priority,
+ .vidioc_g_priority = cx25821_vidioc_g_priority,
+ .vidioc_s_priority = cx25821_vidioc_s_priority,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
- .vidiocgmbuf = vidiocgmbuf,
+ .vidiocgmbuf = cx25821_vidiocgmbuf,
#endif
#ifdef TUNER_FLAG
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_g_tuner = cx25821_vidioc_g_tuner,
+ .vidioc_s_tuner = cx25821_vidioc_s_tuner,
+ .vidioc_g_frequency = cx25821_vidioc_g_frequency,
+ .vidioc_s_frequency = cx25821_vidioc_s_frequency,
#endif
#ifdef CONFIG_VIDEO_ADV_DEBUG
- .vidioc_g_register = vidioc_g_register,
- .vidioc_s_register = vidioc_s_register,
+ .vidioc_g_register = cx25821_vidioc_g_register,
+ .vidioc_s_register = cx25821_vidioc_s_register,
#endif
};
diff --git a/drivers/staging/cx25821/cx25821-video3.c b/drivers/staging/cx25821/cx25821-video3.c
index 1e0f10abdbc..3b216ed0906 100644
--- a/drivers/staging/cx25821/cx25821-video3.c
+++ b/drivers/staging/cx25821/cx25821-video3.c
@@ -86,10 +86,10 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
}
static struct videobuf_queue_ops cx25821_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
+ .buf_setup = cx25821_buffer_setup,
+ .buf_prepare = cx25821_buffer_prepare,
.buf_queue = buffer_queue,
- .buf_release = buffer_release,
+ .buf_release = cx25821_buffer_release,
};
static int video_open(struct file *file)
@@ -147,7 +147,7 @@ static ssize_t video_read(struct file *file, char __user * data, size_t count,
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (res_locked(fh->dev, RESOURCE_VIDEO3))
+ if (cx25821_res_locked(fh->dev, RESOURCE_VIDEO3))
return -EBUSY;
return videobuf_read_one(&fh->vidq, data, count, ppos,
@@ -165,7 +165,7 @@ static unsigned int video_poll(struct file *file,
struct cx25821_fh *fh = file->private_data;
struct cx25821_buffer *buf;
- if (res_check(fh, RESOURCE_VIDEO3)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO3)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
return POLLERR;
@@ -207,19 +207,19 @@ static int video_release(struct file *file)
cx_write(channel3->dma_ctl, 0); /* FIFO and RISC disable */
/* stop video capture */
- if (res_check(fh, RESOURCE_VIDEO3)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO3)) {
videobuf_queue_cancel(&fh->vidq);
- res_free(dev, fh, RESOURCE_VIDEO3);
+ cx25821_res_free(dev, fh, RESOURCE_VIDEO3);
}
if (fh->vidq.read_buf) {
- buffer_release(&fh->vidq, fh->vidq.read_buf);
+ cx25821_buffer_release(&fh->vidq, fh->vidq.read_buf);
kfree(fh->vidq.read_buf);
}
videobuf_mmap_free(&fh->vidq);
- v4l2_prio_close(&dev->prio, &fh->prio);
+ v4l2_prio_close(&dev->prio, fh->prio);
file->private_data = NULL;
kfree(fh);
@@ -239,7 +239,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
return -EINVAL;
}
- if (unlikely(!res_get(dev, fh, get_resource(fh, RESOURCE_VIDEO3)))) {
+ if (unlikely(!cx25821_res_get(dev, fh, cx25821_get_resource(fh, RESOURCE_VIDEO3)))) {
return -EBUSY;
}
@@ -257,11 +257,11 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
if (i != fh->type)
return -EINVAL;
- res = get_resource(fh, RESOURCE_VIDEO3);
+ res = cx25821_get_resource(fh, RESOURCE_VIDEO3);
err = videobuf_streamoff(get_queue(fh));
if (err < 0)
return err;
- res_free(dev, fh, res);
+ cx25821_res_free(dev, fh, res);
return 0;
}
@@ -274,13 +274,13 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
int pix_format = 0;
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
dprintk(2, "%s()\n", __func__);
- err = vidioc_try_fmt_vid_cap(file, priv, f);
+ err = cx25821_vidioc_try_fmt_vid_cap(file, priv, f);
if (0 != err)
return err;
@@ -289,11 +289,11 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
fh->vidq.field = f->fmt.pix.field;
// check if width and height is valid based on set standard
- if (is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
+ if (cx25821_is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
fh->width = f->fmt.pix.width;
}
- if (is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
+ if (cx25821_is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
fh->height = f->fmt.pix.height;
}
@@ -364,7 +364,7 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
int err;
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
@@ -379,50 +379,50 @@ static const struct v4l2_file_operations video_fops = {
.release = video_release,
.read = video_read,
.poll = video_poll,
- .mmap = video_mmap,
+ .mmap = cx25821_video_mmap,
.ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_querycap = cx25821_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = cx25821_vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_reqbufs = cx25821_vidioc_reqbufs,
+ .vidioc_querybuf = cx25821_vidioc_querybuf,
+ .vidioc_qbuf = cx25821_vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
#ifdef TUNER_FLAG
- .vidioc_s_std = vidioc_s_std,
- .vidioc_querystd = vidioc_querystd,
+ .vidioc_s_std = cx25821_vidioc_s_std,
+ .vidioc_querystd = cx25821_vidioc_querystd,
#endif
- .vidioc_cropcap = vidioc_cropcap,
- .vidioc_s_crop = vidioc_s_crop,
- .vidioc_g_crop = vidioc_g_crop,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_cropcap = cx25821_vidioc_cropcap,
+ .vidioc_s_crop = cx25821_vidioc_s_crop,
+ .vidioc_g_crop = cx25821_vidioc_g_crop,
+ .vidioc_enum_input = cx25821_vidioc_enum_input,
+ .vidioc_g_input = cx25821_vidioc_g_input,
+ .vidioc_s_input = cx25821_vidioc_s_input,
+ .vidioc_g_ctrl = cx25821_vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_queryctrl = cx25821_vidioc_queryctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_log_status = vidioc_log_status,
- .vidioc_g_priority = vidioc_g_priority,
- .vidioc_s_priority = vidioc_s_priority,
+ .vidioc_g_priority = cx25821_vidioc_g_priority,
+ .vidioc_s_priority = cx25821_vidioc_s_priority,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
- .vidiocgmbuf = vidiocgmbuf,
+ .vidiocgmbuf = cx25821_vidiocgmbuf,
#endif
#ifdef TUNER_FLAG
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_g_tuner = cx25821_vidioc_g_tuner,
+ .vidioc_s_tuner = cx25821_vidioc_s_tuner,
+ .vidioc_g_frequency = cx25821_vidioc_g_frequency,
+ .vidioc_s_frequency = cx25821_vidioc_s_frequency,
#endif
#ifdef CONFIG_VIDEO_ADV_DEBUG
- .vidioc_g_register = vidioc_g_register,
- .vidioc_s_register = vidioc_s_register,
+ .vidioc_g_register = cx25821_vidioc_g_register,
+ .vidioc_s_register = cx25821_vidioc_s_register,
#endif
};
diff --git a/drivers/staging/cx25821/cx25821-video4.c b/drivers/staging/cx25821/cx25821-video4.c
index 0cbe7a79d8c..f7b08c51868 100644
--- a/drivers/staging/cx25821/cx25821-video4.c
+++ b/drivers/staging/cx25821/cx25821-video4.c
@@ -86,10 +86,10 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
}
static struct videobuf_queue_ops cx25821_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
+ .buf_setup = cx25821_buffer_setup,
+ .buf_prepare = cx25821_buffer_prepare,
.buf_queue = buffer_queue,
- .buf_release = buffer_release,
+ .buf_release = cx25821_buffer_release,
};
static int video_open(struct file *file)
@@ -146,7 +146,7 @@ static ssize_t video_read(struct file *file, char __user * data, size_t count,
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (res_locked(fh->dev, RESOURCE_VIDEO4))
+ if (cx25821_res_locked(fh->dev, RESOURCE_VIDEO4))
return -EBUSY;
return videobuf_read_one(&fh->vidq, data, count, ppos,
@@ -164,7 +164,7 @@ static unsigned int video_poll(struct file *file,
struct cx25821_fh *fh = file->private_data;
struct cx25821_buffer *buf;
- if (res_check(fh, RESOURCE_VIDEO4)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO4)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
return POLLERR;
@@ -206,19 +206,19 @@ static int video_release(struct file *file)
cx_write(channel4->dma_ctl, 0); /* FIFO and RISC disable */
/* stop video capture */
- if (res_check(fh, RESOURCE_VIDEO4)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO4)) {
videobuf_queue_cancel(&fh->vidq);
- res_free(dev, fh, RESOURCE_VIDEO4);
+ cx25821_res_free(dev, fh, RESOURCE_VIDEO4);
}
if (fh->vidq.read_buf) {
- buffer_release(&fh->vidq, fh->vidq.read_buf);
+ cx25821_buffer_release(&fh->vidq, fh->vidq.read_buf);
kfree(fh->vidq.read_buf);
}
videobuf_mmap_free(&fh->vidq);
- v4l2_prio_close(&dev->prio, &fh->prio);
+ v4l2_prio_close(&dev->prio, fh->prio);
file->private_data = NULL;
kfree(fh);
@@ -238,7 +238,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
return -EINVAL;
}
- if (unlikely(!res_get(dev, fh, get_resource(fh, RESOURCE_VIDEO4)))) {
+ if (unlikely(!cx25821_res_get(dev, fh, cx25821_get_resource(fh, RESOURCE_VIDEO4)))) {
return -EBUSY;
}
@@ -256,11 +256,11 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
if (i != fh->type)
return -EINVAL;
- res = get_resource(fh, RESOURCE_VIDEO4);
+ res = cx25821_get_resource(fh, RESOURCE_VIDEO4);
err = videobuf_streamoff(get_queue(fh));
if (err < 0)
return err;
- res_free(dev, fh, res);
+ cx25821_res_free(dev, fh, res);
return 0;
}
@@ -274,12 +274,12 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
// check priority
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
dprintk(2, "%s()\n", __func__);
- err = vidioc_try_fmt_vid_cap(file, priv, f);
+ err = cx25821_vidioc_try_fmt_vid_cap(file, priv, f);
if (0 != err)
return err;
@@ -288,11 +288,11 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
fh->vidq.field = f->fmt.pix.field;
// check if width and height is valid based on set standard
- if (is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
+ if (cx25821_is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
fh->width = f->fmt.pix.width;
}
- if (is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
+ if (cx25821_is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
fh->height = f->fmt.pix.height;
}
@@ -363,7 +363,7 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
int err;
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
@@ -378,50 +378,50 @@ static const struct v4l2_file_operations video_fops = {
.release = video_release,
.read = video_read,
.poll = video_poll,
- .mmap = video_mmap,
+ .mmap = cx25821_video_mmap,
.ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_querycap = cx25821_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = cx25821_vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_reqbufs = cx25821_vidioc_reqbufs,
+ .vidioc_querybuf = cx25821_vidioc_querybuf,
+ .vidioc_qbuf = cx25821_vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
#ifdef TUNER_FLAG
- .vidioc_s_std = vidioc_s_std,
- .vidioc_querystd = vidioc_querystd,
+ .vidioc_s_std = cx25821_vidioc_s_std,
+ .vidioc_querystd = cx25821_vidioc_querystd,
#endif
- .vidioc_cropcap = vidioc_cropcap,
- .vidioc_s_crop = vidioc_s_crop,
- .vidioc_g_crop = vidioc_g_crop,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_cropcap = cx25821_vidioc_cropcap,
+ .vidioc_s_crop = cx25821_vidioc_s_crop,
+ .vidioc_g_crop = cx25821_vidioc_g_crop,
+ .vidioc_enum_input = cx25821_vidioc_enum_input,
+ .vidioc_g_input = cx25821_vidioc_g_input,
+ .vidioc_s_input = cx25821_vidioc_s_input,
+ .vidioc_g_ctrl = cx25821_vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_queryctrl = cx25821_vidioc_queryctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_log_status = vidioc_log_status,
- .vidioc_g_priority = vidioc_g_priority,
- .vidioc_s_priority = vidioc_s_priority,
+ .vidioc_g_priority = cx25821_vidioc_g_priority,
+ .vidioc_s_priority = cx25821_vidioc_s_priority,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
- .vidiocgmbuf = vidiocgmbuf,
+ .vidiocgmbuf = cx25821_vidiocgmbuf,
#endif
#ifdef TUNER_FLAG
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_g_tuner = cx25821_vidioc_g_tuner,
+ .vidioc_s_tuner = cx25821_vidioc_s_tuner,
+ .vidioc_g_frequency = cx25821_vidioc_g_frequency,
+ .vidioc_s_frequency = cx25821_vidioc_s_frequency,
#endif
#ifdef CONFIG_VIDEO_ADV_DEBUG
- .vidioc_g_register = vidioc_g_register,
- .vidioc_s_register = vidioc_s_register,
+ .vidioc_g_register = cx25821_vidioc_g_register,
+ .vidioc_s_register = cx25821_vidioc_s_register,
#endif
};
diff --git a/drivers/staging/cx25821/cx25821-video5.c b/drivers/staging/cx25821/cx25821-video5.c
index 5dc08adc12e..59370337b07 100644
--- a/drivers/staging/cx25821/cx25821-video5.c
+++ b/drivers/staging/cx25821/cx25821-video5.c
@@ -86,10 +86,10 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
}
static struct videobuf_queue_ops cx25821_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
+ .buf_setup = cx25821_buffer_setup,
+ .buf_prepare = cx25821_buffer_prepare,
.buf_queue = buffer_queue,
- .buf_release = buffer_release,
+ .buf_release = cx25821_buffer_release,
};
static int video_open(struct file *file)
@@ -147,7 +147,7 @@ static ssize_t video_read(struct file *file, char __user * data, size_t count,
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (res_locked(fh->dev, RESOURCE_VIDEO5))
+ if (cx25821_res_locked(fh->dev, RESOURCE_VIDEO5))
return -EBUSY;
return videobuf_read_one(&fh->vidq, data, count, ppos,
@@ -165,7 +165,7 @@ static unsigned int video_poll(struct file *file,
struct cx25821_fh *fh = file->private_data;
struct cx25821_buffer *buf;
- if (res_check(fh, RESOURCE_VIDEO5)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO5)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
return POLLERR;
@@ -207,19 +207,19 @@ static int video_release(struct file *file)
cx_write(channel5->dma_ctl, 0); /* FIFO and RISC disable */
/* stop video capture */
- if (res_check(fh, RESOURCE_VIDEO5)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO5)) {
videobuf_queue_cancel(&fh->vidq);
- res_free(dev, fh, RESOURCE_VIDEO5);
+ cx25821_res_free(dev, fh, RESOURCE_VIDEO5);
}
if (fh->vidq.read_buf) {
- buffer_release(&fh->vidq, fh->vidq.read_buf);
+ cx25821_buffer_release(&fh->vidq, fh->vidq.read_buf);
kfree(fh->vidq.read_buf);
}
videobuf_mmap_free(&fh->vidq);
- v4l2_prio_close(&dev->prio, &fh->prio);
+ v4l2_prio_close(&dev->prio, fh->prio);
file->private_data = NULL;
kfree(fh);
@@ -239,7 +239,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
return -EINVAL;
}
- if (unlikely(!res_get(dev, fh, get_resource(fh, RESOURCE_VIDEO5)))) {
+ if (unlikely(!cx25821_res_get(dev, fh, cx25821_get_resource(fh, RESOURCE_VIDEO5)))) {
return -EBUSY;
}
@@ -257,11 +257,11 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
if (i != fh->type)
return -EINVAL;
- res = get_resource(fh, RESOURCE_VIDEO5);
+ res = cx25821_get_resource(fh, RESOURCE_VIDEO5);
err = videobuf_streamoff(get_queue(fh));
if (err < 0)
return err;
- res_free(dev, fh, res);
+ cx25821_res_free(dev, fh, res);
return 0;
}
@@ -274,13 +274,13 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
int pix_format = 0;
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
dprintk(2, "%s()\n", __func__);
- err = vidioc_try_fmt_vid_cap(file, priv, f);
+ err = cx25821_vidioc_try_fmt_vid_cap(file, priv, f);
if (0 != err)
return err;
@@ -289,11 +289,11 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
fh->vidq.field = f->fmt.pix.field;
// check if width and height is valid based on set standard
- if (is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
+ if (cx25821_is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
fh->width = f->fmt.pix.width;
}
- if (is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
+ if (cx25821_is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
fh->height = f->fmt.pix.height;
}
@@ -363,7 +363,7 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
int err;
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
@@ -378,50 +378,50 @@ static const struct v4l2_file_operations video_fops = {
.release = video_release,
.read = video_read,
.poll = video_poll,
- .mmap = video_mmap,
+ .mmap = cx25821_video_mmap,
.ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_querycap = cx25821_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = cx25821_vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_reqbufs = cx25821_vidioc_reqbufs,
+ .vidioc_querybuf = cx25821_vidioc_querybuf,
+ .vidioc_qbuf = cx25821_vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
#ifdef TUNER_FLAG
- .vidioc_s_std = vidioc_s_std,
- .vidioc_querystd = vidioc_querystd,
+ .vidioc_s_std = cx25821_vidioc_s_std,
+ .vidioc_querystd = cx25821_vidioc_querystd,
#endif
- .vidioc_cropcap = vidioc_cropcap,
- .vidioc_s_crop = vidioc_s_crop,
- .vidioc_g_crop = vidioc_g_crop,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_cropcap = cx25821_vidioc_cropcap,
+ .vidioc_s_crop = cx25821_vidioc_s_crop,
+ .vidioc_g_crop = cx25821_vidioc_g_crop,
+ .vidioc_enum_input = cx25821_vidioc_enum_input,
+ .vidioc_g_input = cx25821_vidioc_g_input,
+ .vidioc_s_input = cx25821_vidioc_s_input,
+ .vidioc_g_ctrl = cx25821_vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_queryctrl = cx25821_vidioc_queryctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_log_status = vidioc_log_status,
- .vidioc_g_priority = vidioc_g_priority,
- .vidioc_s_priority = vidioc_s_priority,
+ .vidioc_g_priority = cx25821_vidioc_g_priority,
+ .vidioc_s_priority = cx25821_vidioc_s_priority,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
- .vidiocgmbuf = vidiocgmbuf,
+ .vidiocgmbuf = cx25821_vidiocgmbuf,
#endif
#ifdef TUNER_FLAG
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_g_tuner = cx25821_vidioc_g_tuner,
+ .vidioc_s_tuner = cx25821_vidioc_s_tuner,
+ .vidioc_g_frequency = cx25821_vidioc_g_frequency,
+ .vidioc_s_frequency = cx25821_vidioc_s_frequency,
#endif
#ifdef CONFIG_VIDEO_ADV_DEBUG
- .vidioc_g_register = vidioc_g_register,
- .vidioc_s_register = vidioc_s_register,
+ .vidioc_g_register = cx25821_vidioc_g_register,
+ .vidioc_s_register = cx25821_vidioc_s_register,
#endif
};
diff --git a/drivers/staging/cx25821/cx25821-video6.c b/drivers/staging/cx25821/cx25821-video6.c
index 2938ad3ad3c..4db2eb83d35 100644
--- a/drivers/staging/cx25821/cx25821-video6.c
+++ b/drivers/staging/cx25821/cx25821-video6.c
@@ -86,10 +86,10 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
}
static struct videobuf_queue_ops cx25821_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
+ .buf_setup = cx25821_buffer_setup,
+ .buf_prepare = cx25821_buffer_prepare,
.buf_queue = buffer_queue,
- .buf_release = buffer_release,
+ .buf_release = cx25821_buffer_release,
};
static int video_open(struct file *file)
@@ -147,7 +147,7 @@ static ssize_t video_read(struct file *file, char __user * data, size_t count,
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (res_locked(fh->dev, RESOURCE_VIDEO6))
+ if (cx25821_res_locked(fh->dev, RESOURCE_VIDEO6))
return -EBUSY;
return videobuf_read_one(&fh->vidq, data, count, ppos,
@@ -165,7 +165,7 @@ static unsigned int video_poll(struct file *file,
struct cx25821_fh *fh = file->private_data;
struct cx25821_buffer *buf;
- if (res_check(fh, RESOURCE_VIDEO6)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO6)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
return POLLERR;
@@ -207,18 +207,18 @@ static int video_release(struct file *file)
cx_write(channel6->dma_ctl, 0); /* FIFO and RISC disable */
/* stop video capture */
- if (res_check(fh, RESOURCE_VIDEO6)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO6)) {
videobuf_queue_cancel(&fh->vidq);
- res_free(dev, fh, RESOURCE_VIDEO6);
+ cx25821_res_free(dev, fh, RESOURCE_VIDEO6);
}
if (fh->vidq.read_buf) {
- buffer_release(&fh->vidq, fh->vidq.read_buf);
+ cx25821_buffer_release(&fh->vidq, fh->vidq.read_buf);
kfree(fh->vidq.read_buf);
}
videobuf_mmap_free(&fh->vidq);
- v4l2_prio_close(&dev->prio, &fh->prio);
+ v4l2_prio_close(&dev->prio, fh->prio);
file->private_data = NULL;
kfree(fh);
@@ -238,7 +238,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
return -EINVAL;
}
- if (unlikely(!res_get(dev, fh, get_resource(fh, RESOURCE_VIDEO6)))) {
+ if (unlikely(!cx25821_res_get(dev, fh, cx25821_get_resource(fh, RESOURCE_VIDEO6)))) {
return -EBUSY;
}
@@ -256,11 +256,11 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
if (i != fh->type)
return -EINVAL;
- res = get_resource(fh, RESOURCE_VIDEO6);
+ res = cx25821_get_resource(fh, RESOURCE_VIDEO6);
err = videobuf_streamoff(get_queue(fh));
if (err < 0)
return err;
- res_free(dev, fh, res);
+ cx25821_res_free(dev, fh, res);
return 0;
}
@@ -273,13 +273,13 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
int pix_format = 0;
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
dprintk(2, "%s()\n", __func__);
- err = vidioc_try_fmt_vid_cap(file, priv, f);
+ err = cx25821_vidioc_try_fmt_vid_cap(file, priv, f);
if (0 != err)
return err;
@@ -288,11 +288,11 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
fh->vidq.field = f->fmt.pix.field;
// check if width and height is valid based on set standard
- if (is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
+ if (cx25821_is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
fh->width = f->fmt.pix.width;
}
- if (is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
+ if (cx25821_is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
fh->height = f->fmt.pix.height;
}
@@ -363,7 +363,7 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
int err;
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
@@ -378,50 +378,50 @@ static const struct v4l2_file_operations video_fops = {
.release = video_release,
.read = video_read,
.poll = video_poll,
- .mmap = video_mmap,
+ .mmap = cx25821_video_mmap,
.ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_querycap = cx25821_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = cx25821_vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_reqbufs = cx25821_vidioc_reqbufs,
+ .vidioc_querybuf = cx25821_vidioc_querybuf,
+ .vidioc_qbuf = cx25821_vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
#ifdef TUNER_FLAG
- .vidioc_s_std = vidioc_s_std,
- .vidioc_querystd = vidioc_querystd,
+ .vidioc_s_std = cx25821_vidioc_s_std,
+ .vidioc_querystd = cx25821_vidioc_querystd,
#endif
- .vidioc_cropcap = vidioc_cropcap,
- .vidioc_s_crop = vidioc_s_crop,
- .vidioc_g_crop = vidioc_g_crop,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_cropcap = cx25821_vidioc_cropcap,
+ .vidioc_s_crop = cx25821_vidioc_s_crop,
+ .vidioc_g_crop = cx25821_vidioc_g_crop,
+ .vidioc_enum_input = cx25821_vidioc_enum_input,
+ .vidioc_g_input = cx25821_vidioc_g_input,
+ .vidioc_s_input = cx25821_vidioc_s_input,
+ .vidioc_g_ctrl = cx25821_vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_queryctrl = cx25821_vidioc_queryctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_log_status = vidioc_log_status,
- .vidioc_g_priority = vidioc_g_priority,
- .vidioc_s_priority = vidioc_s_priority,
+ .vidioc_g_priority = cx25821_vidioc_g_priority,
+ .vidioc_s_priority = cx25821_vidioc_s_priority,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
- .vidiocgmbuf = vidiocgmbuf,
+ .vidiocgmbuf = cx25821_vidiocgmbuf,
#endif
#ifdef TUNER_FLAG
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_g_tuner = cx25821_vidioc_g_tuner,
+ .vidioc_s_tuner = cx25821_vidioc_s_tuner,
+ .vidioc_g_frequency = cx25821_vidioc_g_frequency,
+ .vidioc_s_frequency = cx25821_vidioc_s_frequency,
#endif
#ifdef CONFIG_VIDEO_ADV_DEBUG
- .vidioc_g_register = vidioc_g_register,
- .vidioc_s_register = vidioc_s_register,
+ .vidioc_g_register = cx25821_vidioc_g_register,
+ .vidioc_s_register = cx25821_vidioc_s_register,
#endif
};
diff --git a/drivers/staging/cx25821/cx25821-video7.c b/drivers/staging/cx25821/cx25821-video7.c
index 458e525d72a..5e4a769bada 100644
--- a/drivers/staging/cx25821/cx25821-video7.c
+++ b/drivers/staging/cx25821/cx25821-video7.c
@@ -85,10 +85,10 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
}
static struct videobuf_queue_ops cx25821_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
+ .buf_setup = cx25821_buffer_setup,
+ .buf_prepare = cx25821_buffer_prepare,
.buf_queue = buffer_queue,
- .buf_release = buffer_release,
+ .buf_release = cx25821_buffer_release,
};
static int video_open(struct file *file)
@@ -146,7 +146,7 @@ static ssize_t video_read(struct file *file, char __user * data, size_t count,
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (res_locked(fh->dev, RESOURCE_VIDEO7))
+ if (cx25821_res_locked(fh->dev, RESOURCE_VIDEO7))
return -EBUSY;
return videobuf_read_one(&fh->vidq, data, count, ppos,
@@ -164,7 +164,7 @@ static unsigned int video_poll(struct file *file,
struct cx25821_fh *fh = file->private_data;
struct cx25821_buffer *buf;
- if (res_check(fh, RESOURCE_VIDEO7)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO7)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
return POLLERR;
@@ -206,19 +206,19 @@ static int video_release(struct file *file)
cx_write(channel7->dma_ctl, 0); /* FIFO and RISC disable */
/* stop video capture */
- if (res_check(fh, RESOURCE_VIDEO7)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO7)) {
videobuf_queue_cancel(&fh->vidq);
- res_free(dev, fh, RESOURCE_VIDEO7);
+ cx25821_res_free(dev, fh, RESOURCE_VIDEO7);
}
if (fh->vidq.read_buf) {
- buffer_release(&fh->vidq, fh->vidq.read_buf);
+ cx25821_buffer_release(&fh->vidq, fh->vidq.read_buf);
kfree(fh->vidq.read_buf);
}
videobuf_mmap_free(&fh->vidq);
- v4l2_prio_close(&dev->prio, &fh->prio);
+ v4l2_prio_close(&dev->prio, fh->prio);
file->private_data = NULL;
kfree(fh);
@@ -238,7 +238,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
return -EINVAL;
}
- if (unlikely(!res_get(dev, fh, get_resource(fh, RESOURCE_VIDEO7)))) {
+ if (unlikely(!cx25821_res_get(dev, fh, cx25821_get_resource(fh, RESOURCE_VIDEO7)))) {
return -EBUSY;
}
@@ -256,11 +256,11 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
if (i != fh->type)
return -EINVAL;
- res = get_resource(fh, RESOURCE_VIDEO7);
+ res = cx25821_get_resource(fh, RESOURCE_VIDEO7);
err = videobuf_streamoff(get_queue(fh));
if (err < 0)
return err;
- res_free(dev, fh, res);
+ cx25821_res_free(dev, fh, res);
return 0;
}
@@ -273,13 +273,13 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
int pix_format = 0;
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
dprintk(2, "%s()\n", __func__);
- err = vidioc_try_fmt_vid_cap(file, priv, f);
+ err = cx25821_vidioc_try_fmt_vid_cap(file, priv, f);
if (0 != err)
return err;
@@ -288,11 +288,11 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
fh->vidq.field = f->fmt.pix.field;
// check if width and height is valid based on set standard
- if (is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
+ if (cx25821_is_valid_width(f->fmt.pix.width, dev->tvnorm)) {
fh->width = f->fmt.pix.width;
}
- if (is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
+ if (cx25821_is_valid_height(f->fmt.pix.height, dev->tvnorm)) {
fh->height = f->fmt.pix.height;
}
@@ -362,7 +362,7 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
int err;
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
@@ -377,50 +377,50 @@ static const struct v4l2_file_operations video_fops = {
.release = video_release,
.read = video_read,
.poll = video_poll,
- .mmap = video_mmap,
+ .mmap = cx25821_video_mmap,
.ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_querycap = cx25821_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = cx25821_vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_reqbufs = cx25821_vidioc_reqbufs,
+ .vidioc_querybuf = cx25821_vidioc_querybuf,
+ .vidioc_qbuf = cx25821_vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
#ifdef TUNER_FLAG
- .vidioc_s_std = vidioc_s_std,
- .vidioc_querystd = vidioc_querystd,
+ .vidioc_s_std = cx25821_vidioc_s_std,
+ .vidioc_querystd = cx25821_vidioc_querystd,
#endif
- .vidioc_cropcap = vidioc_cropcap,
- .vidioc_s_crop = vidioc_s_crop,
- .vidioc_g_crop = vidioc_g_crop,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_cropcap = cx25821_vidioc_cropcap,
+ .vidioc_s_crop = cx25821_vidioc_s_crop,
+ .vidioc_g_crop = cx25821_vidioc_g_crop,
+ .vidioc_enum_input = cx25821_vidioc_enum_input,
+ .vidioc_g_input = cx25821_vidioc_g_input,
+ .vidioc_s_input = cx25821_vidioc_s_input,
+ .vidioc_g_ctrl = cx25821_vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_queryctrl = cx25821_vidioc_queryctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_log_status = vidioc_log_status,
- .vidioc_g_priority = vidioc_g_priority,
- .vidioc_s_priority = vidioc_s_priority,
+ .vidioc_g_priority = cx25821_vidioc_g_priority,
+ .vidioc_s_priority = cx25821_vidioc_s_priority,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
- .vidiocgmbuf = vidiocgmbuf,
+ .vidiocgmbuf = cx25821_vidiocgmbuf,
#endif
#ifdef TUNER_FLAG
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_g_tuner = cx25821_vidioc_g_tuner,
+ .vidioc_s_tuner = cx25821_vidioc_s_tuner,
+ .vidioc_g_frequency = cx25821_vidioc_g_frequency,
+ .vidioc_s_frequency = cx25821_vidioc_s_frequency,
#endif
#ifdef CONFIG_VIDEO_ADV_DEBUG
- .vidioc_g_register = vidioc_g_register,
- .vidioc_s_register = vidioc_s_register,
+ .vidioc_g_register = cx25821_vidioc_g_register,
+ .vidioc_s_register = cx25821_vidioc_s_register,
#endif
};
diff --git a/drivers/staging/cx25821/cx25821-videoioctl.c b/drivers/staging/cx25821/cx25821-videoioctl.c
index 1da52b54a45..d16807d88be 100644
--- a/drivers/staging/cx25821/cx25821-videoioctl.c
+++ b/drivers/staging/cx25821/cx25821-videoioctl.c
@@ -86,10 +86,10 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
}
static struct videobuf_queue_ops cx25821_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
+ .buf_setup = cx25821_buffer_setup,
+ .buf_prepare = cx25821_buffer_prepare,
.buf_queue = buffer_queue,
- .buf_release = buffer_release,
+ .buf_release = cx25821_buffer_release,
};
static int video_open(struct file *file)
@@ -145,7 +145,7 @@ static ssize_t video_read(struct file *file, char __user * data, size_t count,
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (res_locked(fh->dev, RESOURCE_VIDEO_IOCTL))
+ if (cx25821_res_locked(fh->dev, RESOURCE_VIDEO_IOCTL))
return -EBUSY;
return videobuf_read_one(&fh->vidq, data, count, ppos,
@@ -163,7 +163,7 @@ static unsigned int video_poll(struct file *file,
struct cx25821_fh *fh = file->private_data;
struct cx25821_buffer *buf;
- if (res_check(fh, RESOURCE_VIDEO_IOCTL)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO_IOCTL)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
return POLLERR;
@@ -189,19 +189,19 @@ static int video_release(struct file *file)
struct cx25821_dev *dev = fh->dev;
/* stop video capture */
- if (res_check(fh, RESOURCE_VIDEO_IOCTL)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO_IOCTL)) {
videobuf_queue_cancel(&fh->vidq);
- res_free(dev, fh, RESOURCE_VIDEO_IOCTL);
+ cx25821_res_free(dev, fh, RESOURCE_VIDEO_IOCTL);
}
if (fh->vidq.read_buf) {
- buffer_release(&fh->vidq, fh->vidq.read_buf);
+ cx25821_buffer_release(&fh->vidq, fh->vidq.read_buf);
kfree(fh->vidq.read_buf);
}
videobuf_mmap_free(&fh->vidq);
- v4l2_prio_close(&dev->prio, &fh->prio);
+ v4l2_prio_close(&dev->prio, fh->prio);
file->private_data = NULL;
kfree(fh);
@@ -222,7 +222,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
return -EINVAL;
}
- if (unlikely(!res_get(dev, fh, get_resource(fh, RESOURCE_VIDEO_IOCTL)))) {
+ if (unlikely(!cx25821_res_get(dev, fh, cx25821_get_resource(fh, RESOURCE_VIDEO_IOCTL)))) {
return -EBUSY;
}
@@ -240,11 +240,11 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
if (i != fh->type)
return -EINVAL;
- res = get_resource(fh, RESOURCE_VIDEO_IOCTL);
+ res = cx25821_get_resource(fh, RESOURCE_VIDEO_IOCTL);
err = videobuf_streamoff(get_queue(fh));
if (err < 0)
return err;
- res_free(dev, fh, res);
+ cx25821_res_free(dev, fh, res);
return 0;
}
@@ -256,13 +256,13 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
int err;
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
dprintk(2, "%s()\n", __func__);
- err = vidioc_try_fmt_vid_cap(file, priv, f);
+ err = cx25821_vidioc_try_fmt_vid_cap(file, priv, f);
if (0 != err)
return err;
@@ -409,7 +409,7 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
int err;
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
@@ -424,50 +424,50 @@ static const struct v4l2_file_operations video_fops = {
.release = video_release,
.read = video_read,
.poll = video_poll,
- .mmap = video_mmap,
+ .mmap = cx25821_video_mmap,
.ioctl = video_ioctl_set,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_querycap = cx25821_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = cx25821_vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_reqbufs = cx25821_vidioc_reqbufs,
+ .vidioc_querybuf = cx25821_vidioc_querybuf,
+ .vidioc_qbuf = cx25821_vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
#ifdef TUNER_FLAG
- .vidioc_s_std = vidioc_s_std,
- .vidioc_querystd = vidioc_querystd,
+ .vidioc_s_std = cx25821_vidioc_s_std,
+ .vidioc_querystd = cx25821_vidioc_querystd,
#endif
- .vidioc_cropcap = vidioc_cropcap,
- .vidioc_s_crop = vidioc_s_crop,
- .vidioc_g_crop = vidioc_g_crop,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_cropcap = cx25821_vidioc_cropcap,
+ .vidioc_s_crop = cx25821_vidioc_s_crop,
+ .vidioc_g_crop = cx25821_vidioc_g_crop,
+ .vidioc_enum_input = cx25821_vidioc_enum_input,
+ .vidioc_g_input = cx25821_vidioc_g_input,
+ .vidioc_s_input = cx25821_vidioc_s_input,
+ .vidioc_g_ctrl = cx25821_vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_queryctrl = cx25821_vidioc_queryctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_log_status = vidioc_log_status,
- .vidioc_g_priority = vidioc_g_priority,
- .vidioc_s_priority = vidioc_s_priority,
+ .vidioc_g_priority = cx25821_vidioc_g_priority,
+ .vidioc_s_priority = cx25821_vidioc_s_priority,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
- .vidiocgmbuf = vidiocgmbuf,
+ .vidiocgmbuf = cx25821_vidiocgmbuf,
#endif
#ifdef TUNER_FLAG
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_g_tuner = cx25821_vidioc_g_tuner,
+ .vidioc_s_tuner = cx25821_vidioc_s_tuner,
+ .vidioc_g_frequency = cx25821_vidioc_g_frequency,
+ .vidioc_s_frequency = cx25821_vidioc_s_frequency,
#endif
#ifdef CONFIG_VIDEO_ADV_DEBUG
- .vidioc_g_register = vidioc_g_register,
- .vidioc_s_register = vidioc_s_register,
+ .vidioc_g_register = cx25821_vidioc_g_register,
+ .vidioc_s_register = cx25821_vidioc_s_register,
#endif
};
diff --git a/drivers/staging/cx25821/cx25821-vidups10.c b/drivers/staging/cx25821/cx25821-vidups10.c
index b76d9f62c3d..c746a17ccbd 100644
--- a/drivers/staging/cx25821/cx25821-vidups10.c
+++ b/drivers/staging/cx25821/cx25821-vidups10.c
@@ -86,10 +86,10 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
}
static struct videobuf_queue_ops cx25821_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
+ .buf_setup = cx25821_buffer_setup,
+ .buf_prepare = cx25821_buffer_prepare,
.buf_queue = buffer_queue,
- .buf_release = buffer_release,
+ .buf_release = cx25821_buffer_release,
};
static int video_open(struct file *file)
@@ -143,7 +143,7 @@ static ssize_t video_read(struct file *file, char __user * data, size_t count,
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (res_locked(fh->dev, RESOURCE_VIDEO10))
+ if (cx25821_res_locked(fh->dev, RESOURCE_VIDEO10))
return -EBUSY;
return videobuf_read_one(&fh->vidq, data, count, ppos,
@@ -161,7 +161,7 @@ static unsigned int video_poll(struct file *file,
struct cx25821_fh *fh = file->private_data;
struct cx25821_buffer *buf;
- if (res_check(fh, RESOURCE_VIDEO10)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO10)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
return POLLERR;
@@ -189,19 +189,19 @@ static int video_release(struct file *file)
//cx_write(channel10->dma_ctl, 0);
/* stop video capture */
- if (res_check(fh, RESOURCE_VIDEO10)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO10)) {
videobuf_queue_cancel(&fh->vidq);
- res_free(dev, fh, RESOURCE_VIDEO10);
+ cx25821_res_free(dev, fh, RESOURCE_VIDEO10);
}
if (fh->vidq.read_buf) {
- buffer_release(&fh->vidq, fh->vidq.read_buf);
+ cx25821_buffer_release(&fh->vidq, fh->vidq.read_buf);
kfree(fh->vidq.read_buf);
}
videobuf_mmap_free(&fh->vidq);
- v4l2_prio_close(&dev->prio, &fh->prio);
+ v4l2_prio_close(&dev->prio, fh->prio);
file->private_data = NULL;
kfree(fh);
@@ -222,7 +222,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
return -EINVAL;
}
- if (unlikely(!res_get(dev, fh, get_resource(fh, RESOURCE_VIDEO10)))) {
+ if (unlikely(!cx25821_res_get(dev, fh, cx25821_get_resource(fh, RESOURCE_VIDEO10)))) {
return -EBUSY;
}
@@ -240,11 +240,11 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
if (i != fh->type)
return -EINVAL;
- res = get_resource(fh, RESOURCE_VIDEO10);
+ res = cx25821_get_resource(fh, RESOURCE_VIDEO10);
err = videobuf_streamoff(get_queue(fh));
if (err < 0)
return err;
- res_free(dev, fh, res);
+ cx25821_res_free(dev, fh, res);
return 0;
}
@@ -299,13 +299,13 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
int err;
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
dprintk(2, "%s()\n", __func__);
- err = vidioc_try_fmt_vid_cap(file, priv, f);
+ err = cx25821_vidioc_try_fmt_vid_cap(file, priv, f);
if (0 != err)
return err;
@@ -347,7 +347,7 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
int err;
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
@@ -362,50 +362,50 @@ static const struct v4l2_file_operations video_fops = {
.release = video_release,
.read = video_read,
.poll = video_poll,
- .mmap = video_mmap,
+ .mmap = cx25821_video_mmap,
.ioctl = video_ioctl_upstream10,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_querycap = cx25821_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = cx25821_vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_reqbufs = cx25821_vidioc_reqbufs,
+ .vidioc_querybuf = cx25821_vidioc_querybuf,
+ .vidioc_qbuf = cx25821_vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
#ifdef TUNER_FLAG
- .vidioc_s_std = vidioc_s_std,
- .vidioc_querystd = vidioc_querystd,
+ .vidioc_s_std = cx25821_vidioc_s_std,
+ .vidioc_querystd = cx25821_vidioc_querystd,
#endif
- .vidioc_cropcap = vidioc_cropcap,
- .vidioc_s_crop = vidioc_s_crop,
- .vidioc_g_crop = vidioc_g_crop,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_cropcap = cx25821_vidioc_cropcap,
+ .vidioc_s_crop = cx25821_vidioc_s_crop,
+ .vidioc_g_crop = cx25821_vidioc_g_crop,
+ .vidioc_enum_input = cx25821_vidioc_enum_input,
+ .vidioc_g_input = cx25821_vidioc_g_input,
+ .vidioc_s_input = cx25821_vidioc_s_input,
+ .vidioc_g_ctrl = cx25821_vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_queryctrl = cx25821_vidioc_queryctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_log_status = vidioc_log_status,
- .vidioc_g_priority = vidioc_g_priority,
- .vidioc_s_priority = vidioc_s_priority,
+ .vidioc_g_priority = cx25821_vidioc_g_priority,
+ .vidioc_s_priority = cx25821_vidioc_s_priority,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
- .vidiocgmbuf = vidiocgmbuf,
+ .vidiocgmbuf = cx25821_vidiocgmbuf,
#endif
#ifdef TUNER_FLAG
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_g_tuner = cx25821_vidioc_g_tuner,
+ .vidioc_s_tuner = cx25821_vidioc_s_tuner,
+ .vidioc_g_frequency = cx25821_vidioc_g_frequency,
+ .vidioc_s_frequency = cx25821_vidioc_s_frequency,
#endif
#ifdef CONFIG_VIDEO_ADV_DEBUG
- .vidioc_g_register = vidioc_g_register,
- .vidioc_s_register = vidioc_s_register,
+ .vidioc_g_register = cx25821_vidioc_g_register,
+ .vidioc_s_register = cx25821_vidioc_s_register,
#endif
};
diff --git a/drivers/staging/cx25821/cx25821-vidups9.c b/drivers/staging/cx25821/cx25821-vidups9.c
index 1580da3b29a..466e0f34ae3 100644
--- a/drivers/staging/cx25821/cx25821-vidups9.c
+++ b/drivers/staging/cx25821/cx25821-vidups9.c
@@ -86,10 +86,10 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
}
static struct videobuf_queue_ops cx25821_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
+ .buf_setup = cx25821_buffer_setup,
+ .buf_prepare = cx25821_buffer_prepare,
.buf_queue = buffer_queue,
- .buf_release = buffer_release,
+ .buf_release = cx25821_buffer_release,
};
static int video_open(struct file *file)
@@ -143,7 +143,7 @@ static ssize_t video_read(struct file *file, char __user * data, size_t count,
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (res_locked(fh->dev, RESOURCE_VIDEO9))
+ if (cx25821_res_locked(fh->dev, RESOURCE_VIDEO9))
return -EBUSY;
return videobuf_read_one(&fh->vidq, data, count, ppos,
@@ -161,7 +161,7 @@ static unsigned int video_poll(struct file *file,
struct cx25821_fh *fh = file->private_data;
struct cx25821_buffer *buf;
- if (res_check(fh, RESOURCE_VIDEO9)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO9)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
return POLLERR;
@@ -189,19 +189,19 @@ static int video_release(struct file *file)
//cx_write(channel9->dma_ctl, 0);
/* stop video capture */
- if (res_check(fh, RESOURCE_VIDEO9)) {
+ if (cx25821_res_check(fh, RESOURCE_VIDEO9)) {
videobuf_queue_cancel(&fh->vidq);
- res_free(dev, fh, RESOURCE_VIDEO9);
+ cx25821_res_free(dev, fh, RESOURCE_VIDEO9);
}
if (fh->vidq.read_buf) {
- buffer_release(&fh->vidq, fh->vidq.read_buf);
+ cx25821_buffer_release(&fh->vidq, fh->vidq.read_buf);
kfree(fh->vidq.read_buf);
}
videobuf_mmap_free(&fh->vidq);
- v4l2_prio_close(&dev->prio, &fh->prio);
+ v4l2_prio_close(&dev->prio, fh->prio);
file->private_data = NULL;
kfree(fh);
@@ -222,7 +222,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
return -EINVAL;
}
- if (unlikely(!res_get(dev, fh, get_resource(fh, RESOURCE_VIDEO9)))) {
+ if (unlikely(!cx25821_res_get(dev, fh, cx25821_get_resource(fh, RESOURCE_VIDEO9)))) {
return -EBUSY;
}
@@ -240,11 +240,11 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
if (i != fh->type)
return -EINVAL;
- res = get_resource(fh, RESOURCE_VIDEO9);
+ res = cx25821_get_resource(fh, RESOURCE_VIDEO9);
err = videobuf_streamoff(get_queue(fh));
if (err < 0)
return err;
- res_free(dev, fh, res);
+ cx25821_res_free(dev, fh, res);
return 0;
}
@@ -299,13 +299,13 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
int err;
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
dprintk(2, "%s()\n", __func__);
- err = vidioc_try_fmt_vid_cap(file, priv, f);
+ err = cx25821_vidioc_try_fmt_vid_cap(file, priv, f);
if (0 != err)
return err;
@@ -345,7 +345,7 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
struct cx25821_fh *fh = priv;
int err;
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
@@ -360,50 +360,50 @@ static const struct v4l2_file_operations video_fops = {
.release = video_release,
.read = video_read,
.poll = video_poll,
- .mmap = video_mmap,
+ .mmap = cx25821_video_mmap,
.ioctl = video_ioctl_upstream9,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_querycap = cx25821_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = cx25821_vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_reqbufs = cx25821_vidioc_reqbufs,
+ .vidioc_querybuf = cx25821_vidioc_querybuf,
+ .vidioc_qbuf = cx25821_vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
#ifdef TUNER_FLAG
- .vidioc_s_std = vidioc_s_std,
- .vidioc_querystd = vidioc_querystd,
+ .vidioc_s_std = cx25821_vidioc_s_std,
+ .vidioc_querystd = cx25821_vidioc_querystd,
#endif
- .vidioc_cropcap = vidioc_cropcap,
- .vidioc_s_crop = vidioc_s_crop,
- .vidioc_g_crop = vidioc_g_crop,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_cropcap = cx25821_vidioc_cropcap,
+ .vidioc_s_crop = cx25821_vidioc_s_crop,
+ .vidioc_g_crop = cx25821_vidioc_g_crop,
+ .vidioc_enum_input = cx25821_vidioc_enum_input,
+ .vidioc_g_input = cx25821_vidioc_g_input,
+ .vidioc_s_input = cx25821_vidioc_s_input,
+ .vidioc_g_ctrl = cx25821_vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_queryctrl = cx25821_vidioc_queryctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_log_status = vidioc_log_status,
- .vidioc_g_priority = vidioc_g_priority,
- .vidioc_s_priority = vidioc_s_priority,
+ .vidioc_g_priority = cx25821_vidioc_g_priority,
+ .vidioc_s_priority = cx25821_vidioc_s_priority,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
- .vidiocgmbuf = vidiocgmbuf,
+ .vidiocgmbuf = cx25821_vidiocgmbuf,
#endif
#ifdef TUNER_FLAG
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_g_tuner = cx25821_vidioc_g_tuner,
+ .vidioc_s_tuner = cx25821_vidioc_s_tuner,
+ .vidioc_g_frequency = cx25821_vidioc_g_frequency,
+ .vidioc_s_frequency = cx25821_vidioc_s_frequency,
#endif
#ifdef CONFIG_VIDEO_ADV_DEBUG
- .vidioc_g_register = vidioc_g_register,
- .vidioc_s_register = vidioc_s_register,
+ .vidioc_g_register = cx25821_vidioc_g_register,
+ .vidioc_s_register = cx25821_vidioc_s_register,
#endif
};
diff --git a/drivers/staging/cxt1e1/Kconfig b/drivers/staging/cxt1e1/Kconfig
new file mode 100644
index 00000000000..68e9b6d973f
--- /dev/null
+++ b/drivers/staging/cxt1e1/Kconfig
@@ -0,0 +1,22 @@
+config CXT1E1
+ tristate "SBE wanPMC-C[421]E1T1 hardware support"
+ depends on HDLC && PCI
+ ---help---
+ This driver supports the SBE wanPMC-CxT1E1 1, 2 and 4 port T3
+ channelized stream WAN adapter card which contains a HDLC/Transparent
+ mode controller.
+
+ If you want to compile this driver as a module
+ say M here and read <file:Documentation/modules.txt>.
+ The module will be called 'cxt1e1'.
+
+ If unsure, say N.
+
+config SBE_PMCC4_NCOMM
+ bool "SBE PMCC4 NCOMM support"
+ depends on CXT1E1
+ ---help---
+ SBE supplies optional support for NCOMM products.
+
+ If you have purchased this optional support you must say Y or M
+ here to allow the driver to operate with the NCOMM product.
diff --git a/drivers/staging/cxt1e1/Makefile b/drivers/staging/cxt1e1/Makefile
new file mode 100644
index 00000000000..10020d7b79a
--- /dev/null
+++ b/drivers/staging/cxt1e1/Makefile
@@ -0,0 +1,19 @@
+obj-$(CONFIG_CXT1E1) += cxt1e1.o
+
+EXTRA_CFLAGS += -DSBE_PMCC4_ENABLE
+EXTRA_CFLAGS += -DSBE_ISR_TASKLET
+EXTRA_CFLAGS += -DSBE_INCLUDE_SYMBOLS
+
+cxt1e1-objs += \
+ ossiRelease.o \
+ musycc.o \
+ pmcc4_drv.o \
+ comet.o \
+ linux.o \
+ functions.o \
+ hwprobe.o \
+ sbeproc.o \
+ pmc93x6_eeprom.o \
+ sbecrc.o \
+ comet_tables.o \
+ sbeid.o
diff --git a/drivers/staging/cxt1e1/comet.c b/drivers/staging/cxt1e1/comet.c
new file mode 100644
index 00000000000..dcbe6b62845
--- /dev/null
+++ b/drivers/staging/cxt1e1/comet.c
@@ -0,0 +1,568 @@
+/* Copyright (C) 2003-2005 SBE, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/io.h>
+#include <linux/hdlc.h>
+#include "pmcc4_sysdep.h"
+#include "sbecom_inline_linux.h"
+#include "libsbew.h"
+#include "pmcc4.h"
+#include "comet.h"
+#include "comet_tables.h"
+
+#ifdef SBE_INCLUDE_SYMBOLS
+#define STATIC
+#else
+#define STATIC static
+#endif
+
+
+extern int log_level;
+
+#define COMET_NUM_SAMPLES 24 /* Number of entries in the waveform table */
+#define COMET_NUM_UNITS 5 /* Number of points per entry in table */
+
+/* forward references */
+STATIC void SetPwrLevel (comet_t * comet);
+STATIC void WrtRcvEqualizerTbl (ci_t * ci, comet_t * comet, u_int32_t *table);
+STATIC void WrtXmtWaveformTbl (ci_t * ci, comet_t * comet, u_int8_t table[COMET_NUM_SAMPLES][COMET_NUM_UNITS]);
+
+
+void *TWV_table[12] = {
+ TWVLongHaul0DB, TWVLongHaul7_5DB, TWVLongHaul15DB, TWVLongHaul22_5DB,
+ TWVShortHaul0, TWVShortHaul1, TWVShortHaul2, TWVShortHaul3, TWVShortHaul4,
+ TWVShortHaul5,
+ TWV_E1_75Ohm, /** PORT POINT - 75 Ohm not supported **/
+ TWV_E1_120Ohm
+};
+
+
+static int
+lbo_tbl_lkup (int t1, int lbo)
+{
+ if ((lbo < CFG_LBO_LH0) || (lbo > CFG_LBO_E120)) /* error switches to
+ * default */
+ {
+ if (t1)
+ lbo = CFG_LBO_LH0; /* default T1 waveform table */
+ else
+ lbo = CFG_LBO_E120; /* default E1 waveform table */
+ }
+ return (lbo - 1); /* make index ZERO relative */
+}
+
+
+void
+init_comet (void *ci, comet_t * comet, u_int32_t port_mode, int clockmaster,
+ u_int8_t moreParams)
+{
+ u_int8_t isT1mode;
+ u_int8_t tix = CFG_LBO_LH0; /* T1 default */
+
+ isT1mode = IS_FRAME_ANY_T1 (port_mode);
+ /* T1 or E1 */
+ if (isT1mode)
+ {
+ pci_write_32 ((u_int32_t *) &comet->gbl_cfg, 0xa0); /* Select T1 Mode & PIO
+ * output enabled */
+ tix = lbo_tbl_lkup (isT1mode, CFG_LBO_LH0); /* default T1 waveform
+ * table */
+ } else
+ {
+ pci_write_32 ((u_int32_t *) &comet->gbl_cfg, 0x81); /* Select E1 Mode & PIO
+ * output enabled */
+ tix = lbo_tbl_lkup (isT1mode, CFG_LBO_E120); /* default E1 waveform
+ * table */
+ }
+
+ if (moreParams & CFG_LBO_MASK)
+ tix = lbo_tbl_lkup (isT1mode, moreParams & CFG_LBO_MASK); /* dial-in requested
+ * waveform table */
+
+ /* Tx line Intfc cfg ** Set for analog & no special patterns */
+ pci_write_32 ((u_int32_t *) &comet->tx_line_cfg, 0x00); /* Transmit Line
+ * Interface Config. */
+
+ /* master test ** Ignore Test settings for now */
+ pci_write_32 ((u_int32_t *) &comet->mtest, 0x00); /* making sure it's
+ * Default value */
+
+ /* Turn on Center (CENT) and everything else off */
+ pci_write_32 ((u_int32_t *) &comet->rjat_cfg, 0x10); /* RJAT cfg */
+ /* Set Jitter Attenuation to recommend T1 values */
+ if (isT1mode)
+ {
+ pci_write_32 ((u_int32_t *) &comet->rjat_n1clk, 0x2F); /* RJAT Divider N1
+ * Control */
+ pci_write_32 ((u_int32_t *) &comet->rjat_n2clk, 0x2F); /* RJAT Divider N2
+ * Control */
+ } else
+ {
+ pci_write_32 ((u_int32_t *) &comet->rjat_n1clk, 0xFF); /* RJAT Divider N1
+ * Control */
+ pci_write_32 ((u_int32_t *) &comet->rjat_n2clk, 0xFF); /* RJAT Divider N2
+ * Control */
+ }
+
+ /* Turn on Center (CENT) and everything else off */
+ pci_write_32 ((u_int32_t *) &comet->tjat_cfg, 0x10); /* TJAT Config. */
+
+ /* Do not bypass jitter attenuation and bypass elastic store */
+ pci_write_32 ((u_int32_t *) &comet->rx_opt, 0x00); /* rx opts */
+
+ /* TJAT ctrl & TJAT divider ctrl */
+ /* Set Jitter Attenuation to recommended T1 values */
+ if (isT1mode)
+ {
+ pci_write_32 ((u_int32_t *) &comet->tjat_n1clk, 0x2F); /* TJAT Divider N1
+ * Control */
+ pci_write_32 ((u_int32_t *) &comet->tjat_n2clk, 0x2F); /* TJAT Divider N2
+ * Control */
+ } else
+ {
+ pci_write_32 ((u_int32_t *) &comet->tjat_n1clk, 0xFF); /* TJAT Divider N1
+ * Control */
+ pci_write_32 ((u_int32_t *) &comet->tjat_n2clk, 0xFF); /* TJAT Divider N2
+ * Control */
+ }
+
+ /* 1c: rx ELST cfg 20: tx ELST cfg 28&38: rx&tx data link ctrl */
+ if (isT1mode)
+ { /* Select 193-bit frame format */
+ pci_write_32 ((u_int32_t *) &comet->rx_elst_cfg, 0x00);
+ pci_write_32 ((u_int32_t *) &comet->tx_elst_cfg, 0x00);
+ } else
+ { /* Select 256-bit frame format */
+ pci_write_32 ((u_int32_t *) &comet->rx_elst_cfg, 0x03);
+ pci_write_32 ((u_int32_t *) &comet->tx_elst_cfg, 0x03);
+ pci_write_32 ((u_int32_t *) &comet->rxce1_ctl, 0x00); /* disable T1 data link
+ * receive */
+ pci_write_32 ((u_int32_t *) &comet->txci1_ctl, 0x00); /* disable T1 data link
+ * transmit */
+ }
+
+ /* the following is a default value */
+ /* Enable 8 out of 10 validation */
+ pci_write_32 ((u_int32_t *) &comet->t1_rboc_ena, 0x00); /* t1RBOC
+ * enable(BOC:BitOriented
+ * Code) */
+ if (isT1mode)
+ {
+
+ /* IBCD cfg: aka Inband Code Detection ** loopback code length set to */
+ pci_write_32 ((u_int32_t *) &comet->ibcd_cfg, 0x04); /* 6 bit down, 5 bit up
+ * (assert) */
+ pci_write_32 ((u_int32_t *) &comet->ibcd_act, 0x08); /* line loopback
+ * activate pattern */
+ pci_write_32 ((u_int32_t *) &comet->ibcd_deact, 0x24); /* deactivate code
+ * pattern (i.e.001) */
+ }
+ /* 10: CDRC cfg 28&38: rx&tx data link 1 ctrl 48: t1 frmr cfg */
+ /* 50: SIGX cfg, COSS (change of signaling state) 54: XBAS cfg */
+ /* 60: t1 ALMI cfg */
+ /* Configure Line Coding */
+
+ switch (port_mode)
+ {
+ case CFG_FRAME_SF: /* 1 - T1 B8ZS */
+ pci_write_32 ((u_int32_t *) &comet->cdrc_cfg, 0);
+ pci_write_32 ((u_int32_t *) &comet->t1_frmr_cfg, 0);
+ pci_write_32 ((u_int32_t *) &comet->sigx_cfg, 0);
+ pci_write_32 ((u_int32_t *) &comet->t1_xbas_cfg, 0x20); /* 5:B8ZS */
+ pci_write_32 ((u_int32_t *) &comet->t1_almi_cfg, 0);
+ break;
+ case CFG_FRAME_ESF: /* 2 - T1 B8ZS */
+ pci_write_32 ((u_int32_t *) &comet->cdrc_cfg, 0);
+ pci_write_32 ((u_int32_t *) &comet->rxce1_ctl, 0x20); /* Bit 5: T1 DataLink
+ * Enable */
+ pci_write_32 ((u_int32_t *) &comet->txci1_ctl, 0x20); /* 5: T1 DataLink Enable */
+ pci_write_32 ((u_int32_t *) &comet->t1_frmr_cfg, 0x30); /* 4:ESF 5:ESFFA */
+ pci_write_32 ((u_int32_t *) &comet->sigx_cfg, 0x04); /* 2:ESF */
+ pci_write_32 ((u_int32_t *) &comet->t1_xbas_cfg, 0x30); /* 4:ESF 5:B8ZS */
+ pci_write_32 ((u_int32_t *) &comet->t1_almi_cfg, 0x10); /* 4:ESF */
+ break;
+ case CFG_FRAME_E1PLAIN: /* 3 - HDB3 */
+ pci_write_32 ((u_int32_t *) &comet->cdrc_cfg, 0);
+ pci_write_32 ((u_int32_t *) &comet->sigx_cfg, 0);
+ pci_write_32 ((u_int32_t *) &comet->e1_tran_cfg, 0);
+ pci_write_32 ((u_int32_t *) &comet->e1_frmr_aopts, 0x40);
+ break;
+ case CFG_FRAME_E1CAS: /* 4 - HDB3 */
+ pci_write_32 ((u_int32_t *) &comet->cdrc_cfg, 0);
+ pci_write_32 ((u_int32_t *) &comet->sigx_cfg, 0);
+ pci_write_32 ((u_int32_t *) &comet->e1_tran_cfg, 0x60);
+ pci_write_32 ((u_int32_t *) &comet->e1_frmr_aopts, 0);
+ break;
+ case CFG_FRAME_E1CRC: /* 5 - HDB3 */
+ pci_write_32 ((u_int32_t *) &comet->cdrc_cfg, 0);
+ pci_write_32 ((u_int32_t *) &comet->sigx_cfg, 0);
+ pci_write_32 ((u_int32_t *) &comet->e1_tran_cfg, 0x10);
+ pci_write_32 ((u_int32_t *) &comet->e1_frmr_aopts, 0xc2);
+ break;
+ case CFG_FRAME_E1CRC_CAS: /* 6 - HDB3 */
+ pci_write_32 ((u_int32_t *) &comet->cdrc_cfg, 0);
+ pci_write_32 ((u_int32_t *) &comet->sigx_cfg, 0);
+ pci_write_32 ((u_int32_t *) &comet->e1_tran_cfg, 0x70);
+ pci_write_32 ((u_int32_t *) &comet->e1_frmr_aopts, 0x82);
+ break;
+ case CFG_FRAME_SF_AMI: /* 7 - T1 AMI */
+ pci_write_32 ((u_int32_t *) &comet->cdrc_cfg, 0x80); /* Enable AMI Line
+ * Decoding */
+ pci_write_32 ((u_int32_t *) &comet->t1_frmr_cfg, 0);
+ pci_write_32 ((u_int32_t *) &comet->t1_xbas_cfg, 0);
+ pci_write_32 ((u_int32_t *) &comet->t1_almi_cfg, 0);
+ pci_write_32 ((u_int32_t *) &comet->sigx_cfg, 0);
+ break;
+ case CFG_FRAME_ESF_AMI: /* 8 - T1 AMI */
+ pci_write_32 ((u_int32_t *) &comet->cdrc_cfg, 0x80); /* Enable AMI Line
+ * Decoding */
+ pci_write_32 ((u_int32_t *) &comet->rxce1_ctl, 0x20); /* 5: T1 DataLink Enable */
+ pci_write_32 ((u_int32_t *) &comet->txci1_ctl, 0x20); /* 5: T1 DataLink Enable */
+ pci_write_32 ((u_int32_t *) &comet->t1_frmr_cfg, 0x30); /* Bit 4:ESF 5:ESFFA */
+ pci_write_32 ((u_int32_t *) &comet->sigx_cfg, 0x04); /* 2:ESF */
+ pci_write_32 ((u_int32_t *) &comet->t1_xbas_cfg, 0x10); /* 4:ESF */
+ pci_write_32 ((u_int32_t *) &comet->t1_almi_cfg, 0x10); /* 4:ESF */
+ break;
+ case CFG_FRAME_E1PLAIN_AMI: /* 9 - AMI */
+ pci_write_32 ((u_int32_t *) &comet->cdrc_cfg, 0x80); /* Enable AMI Line
+ * Decoding */
+ pci_write_32 ((u_int32_t *) &comet->sigx_cfg, 0);
+ pci_write_32 ((u_int32_t *) &comet->e1_tran_cfg, 0x80);
+ pci_write_32 ((u_int32_t *) &comet->e1_frmr_aopts, 0x40);
+ break;
+ case CFG_FRAME_E1CAS_AMI: /* 10 - AMI */
+ pci_write_32 ((u_int32_t *) &comet->cdrc_cfg, 0x80); /* Enable AMI Line
+ * Decoding */
+ pci_write_32 ((u_int32_t *) &comet->sigx_cfg, 0);
+ pci_write_32 ((u_int32_t *) &comet->e1_tran_cfg, 0xe0);
+ pci_write_32 ((u_int32_t *) &comet->e1_frmr_aopts, 0);
+ break;
+ case CFG_FRAME_E1CRC_AMI: /* 11 - AMI */
+ pci_write_32 ((u_int32_t *) &comet->cdrc_cfg, 0x80); /* Enable AMI Line
+ * Decoding */
+ pci_write_32 ((u_int32_t *) &comet->sigx_cfg, 0);
+ pci_write_32 ((u_int32_t *) &comet->e1_tran_cfg, 0x90);
+ pci_write_32 ((u_int32_t *) &comet->e1_frmr_aopts, 0xc2);
+ break;
+ case CFG_FRAME_E1CRC_CAS_AMI: /* 12 - AMI */
+ pci_write_32 ((u_int32_t *) &comet->cdrc_cfg, 0x80); /* Enable AMI Line
+ * Decoding */
+ pci_write_32 ((u_int32_t *) &comet->sigx_cfg, 0);
+ pci_write_32 ((u_int32_t *) &comet->e1_tran_cfg, 0xf0);
+ pci_write_32 ((u_int32_t *) &comet->e1_frmr_aopts, 0x82);
+ break;
+ } /* end switch */
+
+ /***
+ * Set Full Frame mode (NXDSO[1] = 0, NXDSO[0] = 0)
+ * CMODE=1: Clock slave mode with BRCLK as an input,
+ * DE=0: Use falling edge of BRCLK for data,
+ * FE=0: Use falling edge of BRCLK for frame,
+ * CMS=0: Use backplane freq,
+ * RATE[1:0]=0,0: T1
+ ***/
+
+
+ /* 0x30: "BRIF cfg"; 0x20 is 'CMODE', 0x03 is (bit) rate */
+ /* note "rate bits can only be set once after reset" */
+ if (clockmaster)
+ { /* CMODE == clockMode, 0=clock master (so
+ * all 3 others should be slave) */
+ if (isT1mode) /* rate = 1.544 Mb/s */
+ pci_write_32 ((u_int32_t *) &comet->brif_cfg, 0x00); /* Comet 0 Master
+ * Mode(CMODE=0) */
+ else /* rate = 2.048 Mb/s */
+ pci_write_32 ((u_int32_t *) &comet->brif_cfg, 0x01); /* Comet 0 Master
+ * Mode(CMODE=0) */
+
+ /* 31: BRIF frame pulse cfg 06: tx timing options */
+ pci_write_32 ((u_int32_t *) &comet->brif_fpcfg, 0x00); /* Master Mode
+ * i.e.FPMODE=0 (@0x20) */
+ if ((moreParams & CFG_CLK_PORT_MASK) == CFG_CLK_PORT_INTERNAL)
+ {
+ if (log_level >= LOG_SBEBUG12)
+ pr_info(">> %s: clockmaster internal clock\n", __func__);
+ pci_write_32 ((u_int32_t *) &comet->tx_time, 0x0d); /* internal oscillator */
+ } else /* external clock source */
+ {
+ if (log_level >= LOG_SBEBUG12)
+ pr_info(">> %s: clockmaster external clock\n", __func__);
+ pci_write_32 ((u_int32_t *) &comet->tx_time, 0x09); /* loop timing
+ * (external) */
+ }
+
+ } else /* slave */
+ {
+ if (isT1mode)
+ pci_write_32 ((u_int32_t *) &comet->brif_cfg, 0x20); /* Slave Mode(CMODE=1,
+ * see above) */
+ else
+ pci_write_32 ((u_int32_t *) &comet->brif_cfg, 0x21); /* Slave Mode (CMODE=1) */
+ pci_write_32 ((u_int32_t *) &comet->brif_fpcfg, 0x20); /* Slave Mode i.e.
+ * FPMODE=1 (@0x20) */
+ if (log_level >= LOG_SBEBUG12)
+ pr_info(">> %s: clockslave internal clock\n", __func__);
+ pci_write_32 ((u_int32_t *) &comet->tx_time, 0x0d); /* oscillator timing */
+ }
+
+ /* 32: BRIF parity F-bit cfg */
+ /* Totem-pole operation */
+ pci_write_32 ((u_int32_t *) &comet->brif_pfcfg, 0x01); /* Receive Backplane
+ * Parity/F-bit */
+
+ /* dc: RLPS equalizer V ref */
+ /* Configuration */
+ if (isT1mode)
+ pci_write_32 ((u_int32_t *) &comet->rlps_eqvr, 0x2c); /* RLPS Equalizer
+ * Voltage */
+ else
+ pci_write_32 ((u_int32_t *) &comet->rlps_eqvr, 0x34); /* RLPS Equalizer
+ * Voltage */
+
+ /* Reserved bit set and SQUELCH enabled */
+ /* f8: RLPS cfg & status f9: RLPS ALOS detect/clear threshold */
+ pci_write_32 ((u_int32_t *) &comet->rlps_cfgsts, 0x11); /* RLPS Configuration
+ * Status */
+ if (isT1mode)
+ pci_write_32 ((u_int32_t *) &comet->rlps_alos_thresh, 0x55); /* ? */
+ else
+ pci_write_32 ((u_int32_t *) &comet->rlps_alos_thresh, 0x22); /* ? */
+
+
+ /* Set Full Frame mode (NXDSO[1] = 0, NXDSO[0] = 0) */
+ /* CMODE=0: Clock slave mode with BTCLK as an input, DE=1: Use rising */
+ /* edge of BTCLK for data, FE=1: Use rising edge of BTCLK for frame, */
+ /* CMS=0: Use backplane freq, RATE[1:0]=0,0: T1 */
+/*** Transmit side is always an Input, Slave Clock*/
+ /* 40: BTIF cfg 41: BTIF frame pulse cfg */
+ if (isT1mode)
+ pci_write_32 ((u_int32_t *) &comet->btif_cfg, 0x38); /* BTIF Configuration
+ * Reg. */
+ else
+ pci_write_32 ((u_int32_t *) &comet->btif_cfg, 0x39); /* BTIF Configuration
+ * Reg. */
+
+ pci_write_32 ((u_int32_t *) &comet->btif_fpcfg, 0x01); /* BTIF Frame Pulse
+ * Config. */
+
+ /* 0a: master diag 06: tx timing options */
+ /* if set Comet to loop back */
+
+ /* Comets set to normal */
+ pci_write_32 ((u_int32_t *) &comet->mdiag, 0x00);
+
+ /* BTCLK driven by TCLKI internally (crystal driven) and Xmt Elasted */
+ /* Store is enabled. */
+
+ WrtXmtWaveformTbl (ci, comet, TWV_table[tix]);
+ if (isT1mode)
+ WrtRcvEqualizerTbl ((ci_t *) ci, comet, &T1_Equalizer[0]);
+ else
+ WrtRcvEqualizerTbl ((ci_t *) ci, comet, &E1_Equalizer[0]);
+ SetPwrLevel (comet);
+}
+
+/*
+** Name: WrtXmtWaveform
+** Description: Formulate the Data for the Pulse Waveform Storage
+** Write register, (F2), from the sample and unit inputs.
+** Write the data to the Pulse Waveform Storage Data register.
+** Returns: Nothing
+*/
+STATIC void
+WrtXmtWaveform (ci_t * ci, comet_t * comet, u_int32_t sample, u_int32_t unit, u_int8_t data)
+{
+ u_int8_t WaveformAddr;
+
+ WaveformAddr = (sample << 3) + (unit & 7);
+ pci_write_32 ((u_int32_t *) &comet->xlpg_pwave_addr, WaveformAddr);
+ pci_flush_write (ci); /* for write order preservation when
+ * Optimizing driver */
+ pci_write_32 ((u_int32_t *) &comet->xlpg_pwave_data, 0x7F & data);
+}
+
+/*
+** Name: WrtXmtWaveformTbl
+** Description: Fill in the Transmit Waveform Values
+** for driving the transmitter DAC.
+** Returns: Nothing
+*/
+STATIC void
+WrtXmtWaveformTbl (ci_t * ci, comet_t * comet,
+ u_int8_t table[COMET_NUM_SAMPLES][COMET_NUM_UNITS])
+{
+ u_int32_t sample, unit;
+
+ for (sample = 0; sample < COMET_NUM_SAMPLES; sample++)
+ {
+ for (unit = 0; unit < COMET_NUM_UNITS; unit++)
+ WrtXmtWaveform (ci, comet, sample, unit, table[sample][unit]);
+ }
+
+ /* Enable transmitter and set output amplitude */
+ pci_write_32 ((u_int32_t *) &comet->xlpg_cfg, table[COMET_NUM_SAMPLES][0]);
+}
+
+
+/*
+** Name: WrtXmtWaveform
+** Description: Fill in the Receive Equalizer RAM from the desired
+** table.
+** Returns: Nothing
+**
+** Remarks: Per PM4351 Device Errata, Receive Equalizer RAM Initialization
+** is coded with early setup of indirect address.
+*/
+
+STATIC void
+WrtRcvEqualizerTbl (ci_t * ci, comet_t * comet, u_int32_t *table)
+{
+ u_int32_t ramaddr;
+ volatile u_int32_t value;
+
+ for (ramaddr = 0; ramaddr < 256; ramaddr++)
+ {
+ /*** the following lines are per Errata 7, 2.5 ***/
+ {
+ pci_write_32 ((u_int32_t *) &comet->rlps_eq_rwsel, 0x80); /* Set up for a read
+ * operation */
+ pci_flush_write (ci); /* for write order preservation when
+ * Optimizing driver */
+ pci_write_32 ((u_int32_t *) &comet->rlps_eq_iaddr, (u_int8_t) ramaddr); /* write the addr,
+ * initiate a read */
+ pci_flush_write (ci); /* for write order preservation when
+ * Optimizing driver */
+ /*
+ * wait 3 line rate clock cycles to ensure address bits are
+ * captured by T1/E1 clock
+ */
+ OS_uwait (4, "wret"); /* 683ns * 3 = 1366 ns, approx 2us (but
+ * use 4us) */
+ }
+
+ value = *table++;
+ pci_write_32 ((u_int32_t *) &comet->rlps_idata3, (u_int8_t) (value >> 24));
+ pci_write_32 ((u_int32_t *) &comet->rlps_idata2, (u_int8_t) (value >> 16));
+ pci_write_32 ((u_int32_t *) &comet->rlps_idata1, (u_int8_t) (value >> 8));
+ pci_write_32 ((u_int32_t *) &comet->rlps_idata0, (u_int8_t) value);
+ pci_flush_write (ci); /* for write order preservation when
+ * Optimizing driver */
+
+ /* Storing RAM address, causes RAM to be updated */
+
+ pci_write_32 ((u_int32_t *) &comet->rlps_eq_rwsel, 0); /* Set up for a write
+ * operation */
+ pci_flush_write (ci); /* for write order preservation when
+ * Optimizing driver */
+ pci_write_32 ((u_int32_t *) &comet->rlps_eq_iaddr, (u_int8_t) ramaddr); /* write the addr,
+ * initiate a read */
+ pci_flush_write (ci); /* for write order preservation when
+ * Optimizing driver */
+ /*
+ * wait 3 line rate clock cycles to ensure address bits are captured
+ * by T1/E1 clock
+ */
+ OS_uwait (4, "wret"); /* 683ns * 3 = 1366 ns, approx 2us (but
+ * use 4us) */
+ }
+
+ pci_write_32 ((u_int32_t *) &comet->rlps_eq_cfg, 0xCB); /* Enable Equalizer &
+ * set it to use 256
+ * periods */
+}
+
+
+/*
+** Name: SetPwrLevel
+** Description: Implement power level setting algorithm described below
+** Returns: Nothing
+*/
+
+STATIC void
+SetPwrLevel (comet_t * comet)
+{
+ volatile u_int32_t temp;
+
+/*
+** Algorithm to Balance the Power Distribution of Ttip Tring
+**
+** Zero register F6
+** Write 0x01 to register F4
+** Write another 0x01 to register F4
+** Read register F4
+** Remove the 0x01 bit by Anding register F4 with 0xFE
+** Write the resultant value to register F4
+** Repeat these steps for register F5
+** Write 0x01 to register F6
+*/
+ pci_write_32 ((u_int32_t *) &comet->xlpg_fdata_sel, 0x00); /* XLPG Fuse Data Select */
+
+ pci_write_32 ((u_int32_t *) &comet->xlpg_atest_pctl, 0x01); /* XLPG Analog Test
+ * Positive control */
+ pci_write_32 ((u_int32_t *) &comet->xlpg_atest_pctl, 0x01);
+
+ temp = pci_read_32 ((u_int32_t *) &comet->xlpg_atest_pctl) & 0xfe;
+ pci_write_32 ((u_int32_t *) &comet->xlpg_atest_pctl, temp);
+
+ pci_write_32 ((u_int32_t *) &comet->xlpg_atest_nctl, 0x01); /* XLPG Analog Test
+ * Negative control */
+ pci_write_32 ((u_int32_t *) &comet->xlpg_atest_nctl, 0x01);
+
+ temp = pci_read_32 ((u_int32_t *) &comet->xlpg_atest_nctl) & 0xfe;
+ pci_write_32 ((u_int32_t *) &comet->xlpg_atest_nctl, temp);
+ pci_write_32 ((u_int32_t *) &comet->xlpg_fdata_sel, 0x01); /* XLPG */
+}
+
+
+/*
+** Name: SetCometOps
+** Description: Set up the selected Comet's clock edge drive for both
+** the transmit out the analog side and receive to the
+** backplane side.
+** Returns: Nothing
+*/
+#if 0
+STATIC void
+SetCometOps (comet_t * comet)
+{
+ volatile u_int8_t rd_value;
+
+ if (comet == mConfig.C4Func1Base + (COMET0_OFFSET >> 2))
+ {
+ rd_value = (u_int8_t) pci_read_32 ((u_int32_t *) &comet->brif_cfg); /* read the BRIF
+ * Configuration */
+ rd_value &= ~0x20;
+ pci_write_32 ((u_int32_t *) &comet->brif_cfg, (u_int32_t) rd_value);
+
+ rd_value = (u_int8_t) pci_read_32 ((u_int32_t *) &comet->brif_fpcfg); /* read the BRIF Frame
+ * Pulse Configuration */
+ rd_value &= ~0x20;
+ pci_write_32 ((u_int32_t *) &comet->brif_fpcfg, (u_int8_t) rd_value);
+ } else
+ {
+ rd_value = (u_int8_t) pci_read_32 ((u_int32_t *) &comet->brif_cfg); /* read the BRIF
+ * Configuration */
+ rd_value |= 0x20;
+ pci_write_32 ((u_int32_t *) &comet->brif_cfg, (u_int32_t) rd_value);
+
+ rd_value = (u_int8_t) pci_read_32 ((u_int32_t *) &comet->brif_fpcfg); /* read the BRIF Frame
+ * Pulse Configuration */
+ rd_value |= 0x20;
+ pci_write_32 ((u_int32_t *) &comet->brif_fpcfg, (u_int8_t) rd_value);
+ }
+}
+#endif
+
+/*** End-of-File ***/
diff --git a/drivers/staging/cxt1e1/comet.h b/drivers/staging/cxt1e1/comet.h
new file mode 100644
index 00000000000..5cb3afda011
--- /dev/null
+++ b/drivers/staging/cxt1e1/comet.h
@@ -0,0 +1,366 @@
+/*
+ * $Id: comet.h,v 1.3 2005/09/28 00:10:07 rickd PMCC4_3_1B $
+ */
+
+#ifndef _INC_COMET_H_
+#define _INC_COMET_H_
+
+/*-----------------------------------------------------------------------------
+ * comet.h -
+ *
+ * Copyright (C) 2005 SBE, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * For further information, contact via email: support@sbei.com
+ * SBE, Inc. San Ramon, California U.S.A.
+ *-----------------------------------------------------------------------------
+ * RCS info:
+ * RCS revision: $Revision: 1.3 $
+ * Last changed on $Date: 2005/09/28 00:10:07 $
+ * Changed by $Author: rickd $
+ *-----------------------------------------------------------------------------
+ * $Log: comet.h,v $
+ * Revision 1.3 2005/09/28 00:10:07 rickd
+ * Add RCS header. Switch to structure usage.
+ *
+ * Revision 1.2 2005/04/28 23:43:03 rickd
+ * Add RCS tracking heading.
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+#if defined(__FreeBSD__) || defined (__NetBSD__)
+#include <sys/types.h>
+#else
+#include <linux/types.h>
+#endif
+
+
+#define VINT32 volatile u_int32_t
+
+struct s_comet_reg
+{
+ VINT32 gbl_cfg; /* 00 Global Cfg */
+ VINT32 clkmon; /* 01 Clk Monitor */
+ VINT32 rx_opt; /* 02 RX Options */
+ VINT32 rx_line_cfg; /* 03 RX Line Interface Cfg */
+ VINT32 tx_line_cfg; /* 04 TX Line Interface Cfg */
+ VINT32 tx_frpass; /* 05 TX Framing & Bypass Options */
+ VINT32 tx_time; /* 06 TX Timing Options */
+ VINT32 intr_1; /* 07 Intr Source #1 */
+ VINT32 intr_2; /* 08 Intr Source #2 */
+ VINT32 intr_3; /* 09 Intr Source #3 */
+ VINT32 mdiag; /* 0A Master Diagnostics */
+ VINT32 mtest; /* 0B Master Test */
+ VINT32 adiag; /* 0C Analog Diagnostics */
+ VINT32 rev_id; /* 0D Rev/Chip Id/Global PMON Update */
+#define pmon rev_id
+ VINT32 reset; /* 0E Reset */
+ VINT32 prgd_phctl; /* 0F PRGD Positioning/Ctl & HDLC Ctl */
+ VINT32 cdrc_cfg; /* 10 CDRC Cfg */
+ VINT32 cdrc_ien; /* 11 CDRC Intr Enable */
+ VINT32 cdrc_ists; /* 12 CDRC Intr Sts */
+ VINT32 cdrc_alos; /* 13 CDRC Alternate Loss of Signal */
+
+ VINT32 rjat_ists; /* 14 RJAT Intr Sts */
+ VINT32 rjat_n1clk; /* 15 RJAT Reference Clk Divisor (N1) Ctl */
+ VINT32 rjat_n2clk; /* 16 RJAT Output Clk Divisor (N2) Ctl */
+ VINT32 rjat_cfg; /* 17 RJAT Cfg */
+
+ VINT32 tjat_ists; /* 18 TJAT Intr Sts */
+ VINT32 tjat_n1clk; /* 19 TJAT Reference Clk Divisor (N1) Ctl */
+ VINT32 tjat_n2clk; /* 1A TJAT Output Clk Divisor (N2) Ctl */
+ VINT32 tjat_cfg; /* 1B TJAT Cfg */
+
+ VINT32 rx_elst_cfg; /* 1C RX-ELST Cfg */
+ VINT32 rx_elst_ists; /* 1D RX-ELST Intr Sts */
+ VINT32 rx_elst_idle; /* 1E RX-ELST Idle Code */
+ VINT32 _rx_elst_res1f; /* 1F RX-ELST Reserved */
+
+ VINT32 tx_elst_cfg; /* 20 TX-ELST Cfg */
+ VINT32 tx_elst_ists; /* 21 TX-ELST Intr Sts */
+ VINT32 _tx_elst_res22; /* 22 TX-ELST Reserved */
+ VINT32 _tx_elst_res23; /* 23 TX-ELST Reserved */
+ VINT32 __res24; /* 24 Reserved */
+ VINT32 __res25; /* 25 Reserved */
+ VINT32 __res26; /* 26 Reserved */
+ VINT32 __res27; /* 27 Reserved */
+
+ VINT32 rxce1_ctl; /* 28 RXCE RX Data Link 1 Ctl */
+ VINT32 rxce1_bits; /* 29 RXCE RX Data Link 1 Bit Select */
+ VINT32 rxce2_ctl; /* 2A RXCE RX Data Link 2 Ctl */
+ VINT32 rxce2_bits; /* 2B RXCE RX Data Link 2 Bit Select */
+ VINT32 rxce3_ctl; /* 2C RXCE RX Data Link 3 Ctl */
+ VINT32 rxce3_bits; /* 2D RXCE RX Data Link 3 Bit Select */
+ VINT32 _rxce_res2E; /* 2E RXCE Reserved */
+ VINT32 _rxce_res2F; /* 2F RXCE Reserved */
+
+ VINT32 brif_cfg; /* 30 BRIF RX Backplane Cfg */
+ VINT32 brif_fpcfg; /* 31 BRIF RX Backplane Frame Pulse Cfg */
+ VINT32 brif_pfcfg; /* 32 BRIF RX Backplane Parity/F-Bit Cfg */
+ VINT32 brif_tsoff; /* 33 BRIF RX Backplane Time Slot Offset */
+ VINT32 brif_boff; /* 34 BRIF RX Backplane Bit Offset */
+ VINT32 _brif_res35; /* 35 BRIF RX Backplane Reserved */
+ VINT32 _brif_res36; /* 36 BRIF RX Backplane Reserved */
+ VINT32 _brif_res37; /* 37 BRIF RX Backplane Reserved */
+
+ VINT32 txci1_ctl; /* 38 TXCI TX Data Link 1 Ctl */
+ VINT32 txci1_bits; /* 39 TXCI TX Data Link 2 Bit Select */
+ VINT32 txci2_ctl; /* 3A TXCI TX Data Link 1 Ctl */
+ VINT32 txci2_bits; /* 3B TXCI TX Data Link 2 Bit Select */
+ VINT32 txci3_ctl; /* 3C TXCI TX Data Link 1 Ctl */
+ VINT32 txci3_bits; /* 3D TXCI TX Data Link 2 Bit Select */
+ VINT32 _txci_res3E; /* 3E TXCI Reserved */
+ VINT32 _txci_res3F; /* 3F TXCI Reserved */
+
+ VINT32 btif_cfg; /* 40 BTIF TX Backplane Cfg */
+ VINT32 btif_fpcfg; /* 41 BTIF TX Backplane Frame Pulse Cfg */
+ VINT32 btif_pcfgsts; /* 42 BTIF TX Backplane Parity Cfg & Sts */
+ VINT32 btif_tsoff; /* 43 BTIF TX Backplane Time Slot Offset */
+ VINT32 btif_boff; /* 44 BTIF TX Backplane Bit Offset */
+ VINT32 _btif_res45; /* 45 BTIF TX Backplane Reserved */
+ VINT32 _btif_res46; /* 46 BTIF TX Backplane Reserved */
+ VINT32 _btif_res47; /* 47 BTIF TX Backplane Reserved */
+ VINT32 t1_frmr_cfg; /* 48 T1 FRMR Cfg */
+ VINT32 t1_frmr_ien; /* 49 T1 FRMR Intr Enable */
+ VINT32 t1_frmr_ists; /* 4A T1 FRMR Intr Sts */
+ VINT32 __res_4B; /* 4B Reserved */
+ VINT32 ibcd_cfg; /* 4C IBCD Cfg */
+ VINT32 ibcd_ies; /* 4D IBCD Intr Enable/Sts */
+ VINT32 ibcd_act; /* 4E IBCD Activate Code */
+ VINT32 ibcd_deact; /* 4F IBCD Deactivate Code */
+
+ VINT32 sigx_cfg; /* 50 SIGX Cfg/Change of Signaling State */
+ VINT32 sigx_acc_cos; /* 51 SIGX uP Access Sts/Change of Signaling State */
+ VINT32 sigx_iac_cos; /* 52 SIGX Channel Indirect
+ * Addr/Ctl/Change of Signaling State */
+ VINT32 sigx_idb_cos; /* 53 SIGX Channel Indirect Data
+ * Buffer/Change of Signaling State */
+
+ VINT32 t1_xbas_cfg; /* 54 T1 XBAS Cfg */
+ VINT32 t1_xbas_altx; /* 55 T1 XBAS Alarm TX */
+ VINT32 t1_xibc_ctl; /* 56 T1 XIBC Ctl */
+ VINT32 t1_xibc_lbcode; /* 57 T1 XIBC Loopback Code */
+
+ VINT32 pmon_ies; /* 58 PMON Intr Enable/Sts */
+ VINT32 pmon_fberr; /* 59 PMON Framing Bit Err Cnt */
+ VINT32 pmon_feb_lsb; /* 5A PMON OFF/COFA/Far End Block Err Cnt (LSB) */
+ VINT32 pmon_feb_msb; /* 5B PMON OFF/COFA/Far End Block Err Cnt (MSB) */
+ VINT32 pmon_bed_lsb; /* 5C PMON Bit/Err/CRCE Cnt (LSB) */
+ VINT32 pmon_bed_msb; /* 5D PMON Bit/Err/CRCE Cnt (MSB) */
+ VINT32 pmon_lvc_lsb; /* 5E PMON LVC Cnt (LSB) */
+ VINT32 pmon_lvc_msb; /* 5F PMON LVC Cnt (MSB) */
+
+ VINT32 t1_almi_cfg; /* 60 T1 ALMI Cfg */
+ VINT32 t1_almi_ien; /* 61 T1 ALMI Intr Enable */
+ VINT32 t1_almi_ists; /* 62 T1 ALMI Intr Sts */
+ VINT32 t1_almi_detsts; /* 63 T1 ALMI Alarm Detection Sts */
+
+ VINT32 _t1_pdvd_res64; /* 64 T1 PDVD Reserved */
+ VINT32 t1_pdvd_ies; /* 65 T1 PDVD Intr Enable/Sts */
+ VINT32 _t1_xboc_res66; /* 66 T1 XBOC Reserved */
+ VINT32 t1_xboc_code; /* 67 T1 XBOC Code */
+ VINT32 _t1_xpde_res68; /* 68 T1 XPDE Reserved */
+ VINT32 t1_xpde_ies; /* 69 T1 XPDE Intr Enable/Sts */
+
+ VINT32 t1_rboc_ena; /* 6A T1 RBOC Enable */
+ VINT32 t1_rboc_sts; /* 6B T1 RBOC Code Sts */
+
+ VINT32 t1_tpsc_cfg; /* 6C TPSC Cfg */
+ VINT32 t1_tpsc_sts; /* 6D TPSC uP Access Sts */
+ VINT32 t1_tpsc_ciaddr; /* 6E TPSC Channel Indirect
+ * Addr/Ctl */
+ VINT32 t1_tpsc_cidata; /* 6F TPSC Channel Indirect Data
+ * Buffer */
+ VINT32 t1_rpsc_cfg; /* 70 RPSC Cfg */
+ VINT32 t1_rpsc_sts; /* 71 RPSC uP Access Sts */
+ VINT32 t1_rpsc_ciaddr; /* 72 RPSC Channel Indirect
+ * Addr/Ctl */
+ VINT32 t1_rpsc_cidata; /* 73 RPSC Channel Indirect Data
+ * Buffer */
+ VINT32 __res74; /* 74 Reserved */
+ VINT32 __res75; /* 75 Reserved */
+ VINT32 __res76; /* 76 Reserved */
+ VINT32 __res77; /* 77 Reserved */
+
+ VINT32 t1_aprm_cfg; /* 78 T1 APRM Cfg/Ctl */
+ VINT32 t1_aprm_load; /* 79 T1 APRM Manual Load */
+ VINT32 t1_aprm_ists; /* 7A T1 APRM Intr Sts */
+ VINT32 t1_aprm_1sec_2; /* 7B T1 APRM One Second Content Octet 2 */
+ VINT32 t1_aprm_1sec_3; /* 7C T1 APRM One Second Content Octet 3 */
+ VINT32 t1_aprm_1sec_4; /* 7D T1 APRM One Second Content Octet 4 */
+ VINT32 t1_aprm_1sec_5; /* 7E T1 APRM One Second Content MSB (Octect 5) */
+ VINT32 t1_aprm_1sec_6; /* 7F T1 APRM One Second Content MSB (Octect 6) */
+
+ VINT32 e1_tran_cfg; /* 80 E1 TRAN Cfg */
+ VINT32 e1_tran_txalarm; /* 81 E1 TRAN TX Alarm/Diagnostic Ctl */
+ VINT32 e1_tran_intctl; /* 82 E1 TRAN International Ctl */
+ VINT32 e1_tran_extrab; /* 83 E1 TRAN Extra Bits Ctl */
+ VINT32 e1_tran_ien; /* 84 E1 TRAN Intr Enable */
+ VINT32 e1_tran_ists; /* 85 E1 TRAN Intr Sts */
+ VINT32 e1_tran_nats; /* 86 E1 TRAN National Bit Codeword
+ * Select */
+ VINT32 e1_tran_nat; /* 87 E1 TRAN National Bit Codeword */
+ VINT32 __res88; /* 88 Reserved */
+ VINT32 __res89; /* 89 Reserved */
+ VINT32 __res8A; /* 8A Reserved */
+ VINT32 __res8B; /* 8B Reserved */
+
+ VINT32 _t1_frmr_res8C; /* 8C T1 FRMR Reserved */
+ VINT32 _t1_frmr_res8D; /* 8D T1 FRMR Reserved */
+ VINT32 __res8E; /* 8E Reserved */
+ VINT32 __res8F; /* 8F Reserved */
+
+ VINT32 e1_frmr_aopts; /* 90 E1 FRMR Frame Alignment Options */
+ VINT32 e1_frmr_mopts; /* 91 E1 FRMR Maintenance Mode Options */
+ VINT32 e1_frmr_ien; /* 92 E1 FRMR Framing Sts Intr Enable */
+ VINT32 e1_frmr_mien; /* 93 E1 FRMR Maintenance/Alarm Sts Intr Enable */
+ VINT32 e1_frmr_ists; /* 94 E1 FRMR Framing Sts Intr Indication */
+ VINT32 e1_frmr_mists; /* 95 E1 FRMR Maintenance/Alarm Sts Indication Enable */
+ VINT32 e1_frmr_sts; /* 96 E1 FRMR Framing Sts */
+ VINT32 e1_frmr_masts; /* 97 E1 FRMR Maintenance/Alarm Sts */
+ VINT32 e1_frmr_nat_bits; /* 98 E1 FRMR International/National Bits */
+ VINT32 e1_frmr_crc_lsb; /* 99 E1 FRMR CRC Err Cnt - LSB */
+ VINT32 e1_frmr_crc_msb; /* 9A E1 FRMR CRC Err Cnt - MSB */
+ VINT32 e1_frmr_nat_ien; /* 9B E1 FRMR National Bit Codeword Intr Enables */
+ VINT32 e1_frmr_nat_ists; /* 9C E1 FRMR National Bit Codeword Intr/Sts */
+ VINT32 e1_frmr_nat; /* 9D E1 FRMR National Bit Codewords */
+ VINT32 e1_frmr_fp_ien; /* 9E E1 FRMR Frame Pulse/Alarm Intr Enables */
+ VINT32 e1_frmr_fp_ists; /* 9F E1 FRMR Frame Pulse/Alarm Intr/Sts */
+
+ VINT32 __resA0; /* A0 Reserved */
+ VINT32 __resA1; /* A1 Reserved */
+ VINT32 __resA2; /* A2 Reserved */
+ VINT32 __resA3; /* A3 Reserved */
+ VINT32 __resA4; /* A4 Reserved */
+ VINT32 __resA5; /* A5 Reserved */
+ VINT32 __resA6; /* A6 Reserved */
+ VINT32 __resA7; /* A7 Reserved */
+
+ VINT32 tdpr1_cfg; /* A8 TDPR #1 Cfg */
+ VINT32 tdpr1_utl; /* A9 TDPR #1 Upper TX Threshold */
+ VINT32 tdpr1_ltl; /* AA TDPR #1 Lower TX Threshold */
+ VINT32 tdpr1_ien; /* AB TDPR #1 Intr Enable */
+ VINT32 tdpr1_ists; /* AC TDPR #1 Intr Sts/UDR Clear */
+ VINT32 tdpr1_data; /* AD TDPR #1 TX Data */
+ VINT32 __resAE; /* AE Reserved */
+ VINT32 __resAF; /* AF Reserved */
+ VINT32 tdpr2_cfg; /* B0 TDPR #2 Cfg */
+ VINT32 tdpr2_utl; /* B1 TDPR #2 Upper TX Threshold */
+ VINT32 tdpr2_ltl; /* B2 TDPR #2 Lower TX Threshold */
+ VINT32 tdpr2_ien; /* B3 TDPR #2 Intr Enable */
+ VINT32 tdpr2_ists; /* B4 TDPR #2 Intr Sts/UDR Clear */
+ VINT32 tdpr2_data; /* B5 TDPR #2 TX Data */
+ VINT32 __resB6; /* B6 Reserved */
+ VINT32 __resB7; /* B7 Reserved1 */
+ VINT32 tdpr3_cfg; /* B8 TDPR #3 Cfg */
+ VINT32 tdpr3_utl; /* B9 TDPR #3 Upper TX Threshold */
+ VINT32 tdpr3_ltl; /* BA TDPR #3 Lower TX Threshold */
+ VINT32 tdpr3_ien; /* BB TDPR #3 Intr Enable */
+ VINT32 tdpr3_ists; /* BC TDPR #3 Intr Sts/UDR Clear */
+ VINT32 tdpr3_data; /* BD TDPR #3 TX Data */
+ VINT32 __resBE; /* BE Reserved */
+ VINT32 __resBF; /* BF Reserved */
+
+ VINT32 rdlc1_cfg; /* C0 RDLC #1 Cfg */
+ VINT32 rdlc1_intctl; /* C1 RDLC #1 Intr Ctl */
+ VINT32 rdlc1_sts; /* C2 RDLC #1 Sts */
+ VINT32 rdlc1_data; /* C3 RDLC #1 Data */
+ VINT32 rdlc1_paddr; /* C4 RDLC #1 Primary Addr Match */
+ VINT32 rdlc1_saddr; /* C5 RDLC #1 Secondary Addr Match */
+ VINT32 __resC6; /* C6 Reserved */
+ VINT32 __resC7; /* C7 Reserved */
+ VINT32 rdlc2_cfg; /* C8 RDLC #2 Cfg */
+ VINT32 rdlc2_intctl; /* C9 RDLC #2 Intr Ctl */
+ VINT32 rdlc2_sts; /* CA RDLC #2 Sts */
+ VINT32 rdlc2_data; /* CB RDLC #2 Data */
+ VINT32 rdlc2_paddr; /* CC RDLC #2 Primary Addr Match */
+ VINT32 rdlc2_saddr; /* CD RDLC #2 Secondary Addr Match */
+ VINT32 __resCE; /* CE Reserved */
+ VINT32 __resCF; /* CF Reserved */
+ VINT32 rdlc3_cfg; /* D0 RDLC #3 Cfg */
+ VINT32 rdlc3_intctl; /* D1 RDLC #3 Intr Ctl */
+ VINT32 rdlc3_sts; /* D2 RDLC #3 Sts */
+ VINT32 rdlc3_data; /* D3 RDLC #3 Data */
+ VINT32 rdlc3_paddr; /* D4 RDLC #3 Primary Addr Match */
+ VINT32 rdlc3_saddr; /* D5 RDLC #3 Secondary Addr Match */
+
+ VINT32 csu_cfg; /* D6 CSU Cfg */
+ VINT32 _csu_resD7; /* D7 CSU Reserved */
+
+ VINT32 rlps_idata3; /* D8 RLPS Indirect Data, 24-31 */
+ VINT32 rlps_idata2; /* D9 RLPS Indirect Data, 16-23 */
+ VINT32 rlps_idata1; /* DA RLPS Indirect Data, 8-15 */
+ VINT32 rlps_idata0; /* DB RLPS Indirect Data, 0-7 */
+ VINT32 rlps_eqvr; /* DC RLPS Equalizer Voltage Reference
+ * (E1 missing) */
+ VINT32 _rlps_resDD; /* DD RLPS Reserved */
+ VINT32 _rlps_resDE; /* DE RLPS Reserved */
+ VINT32 _rlps_resDF; /* DF RLPS Reserved */
+
+ VINT32 prgd_ctl; /* E0 PRGD Ctl */
+ VINT32 prgd_ies; /* E1 PRGD Intr Enable/Sts */
+ VINT32 prgd_shift_len; /* E2 PRGD Shift Length */
+ VINT32 prgd_tap; /* E3 PRGD Tap */
+ VINT32 prgd_errin; /* E4 PRGD Err Insertion */
+ VINT32 _prgd_resE5; /* E5 PRGD Reserved */
+ VINT32 _prgd_resE6; /* E6 PRGD Reserved */
+ VINT32 _prgd_resE7; /* E7 PRGD Reserved */
+ VINT32 prgd_patin1; /* E8 PRGD Pattern Insertion #1 */
+ VINT32 prgd_patin2; /* E9 PRGD Pattern Insertion #2 */
+ VINT32 prgd_patin3; /* EA PRGD Pattern Insertion #3 */
+ VINT32 prgd_patin4; /* EB PRGD Pattern Insertion #4 */
+ VINT32 prgd_patdet1; /* EC PRGD Pattern Detector #1 */
+ VINT32 prgd_patdet2; /* ED PRGD Pattern Detector #2 */
+ VINT32 prgd_patdet3; /* EE PRGD Pattern Detector #3 */
+ VINT32 prgd_patdet4; /* EF PRGD Pattern Detector #4 */
+
+ VINT32 xlpg_cfg; /* F0 XLPG Line Driver Cfg */
+ VINT32 xlpg_ctlsts; /* F1 XLPG Ctl/Sts */
+ VINT32 xlpg_pwave_addr; /* F2 XLPG Pulse Waveform Storage Write Addr */
+ VINT32 xlpg_pwave_data; /* F3 XLPG Pulse Waveform Storage Data */
+ VINT32 xlpg_atest_pctl; /* F4 XLPG Analog Test Positive Ctl */
+ VINT32 xlpg_atest_nctl; /* F5 XLPG Analog Test Negative Ctl */
+ VINT32 xlpg_fdata_sel; /* F6 XLPG Fuse Data Select */
+ VINT32 _xlpg_resF7; /* F7 XLPG Reserved */
+
+ VINT32 rlps_cfgsts; /* F8 RLPS Cfg & Sts */
+ VINT32 rlps_alos_thresh; /* F9 RLPS ALOS Detection/Clearance Threshold */
+ VINT32 rlps_alos_dper; /* FA RLPS ALOS Detection Period */
+ VINT32 rlps_alos_cper; /* FB RLPS ALOS Clearance Period */
+ VINT32 rlps_eq_iaddr; /* FC RLPS Equalization Indirect Addr */
+ VINT32 rlps_eq_rwsel; /* FD RLPS Equalization Read/WriteB Select */
+ VINT32 rlps_eq_ctlsts; /* FE RLPS Equalizer Loop Sts & Ctl */
+ VINT32 rlps_eq_cfg; /* FF RLPS Equalizer Cfg */
+};
+
+typedef struct s_comet_reg comet_t;
+
+/* 00AH: MDIAG Register bit definitions */
+#define COMET_MDIAG_ID5 0x40
+#define COMET_MDIAG_LBMASK 0x3F
+#define COMET_MDIAG_PAYLB 0x20
+#define COMET_MDIAG_LINELB 0x10
+#define COMET_MDIAG_RAIS 0x08
+#define COMET_MDIAG_DDLB 0x04
+#define COMET_MDIAG_TXMFP 0x02
+#define COMET_MDIAG_TXLOS 0x01
+#define COMET_MDIAG_LBOFF 0x00
+
+#undef VINT32
+
+#ifdef __KERNEL__
+extern void
+init_comet (void *, comet_t *, u_int32_t, int, u_int8_t);
+#endif
+
+#endif /* _INC_COMET_H_ */
diff --git a/drivers/staging/cxt1e1/comet_tables.c b/drivers/staging/cxt1e1/comet_tables.c
new file mode 100644
index 00000000000..db1293c71a6
--- /dev/null
+++ b/drivers/staging/cxt1e1/comet_tables.c
@@ -0,0 +1,561 @@
+/*
+ * $Id: comet_tables.c,v 1.2 2005/10/17 23:55:27 rickd PMCC4_3_1B $
+ */
+
+/*-----------------------------------------------------------------------------
+ * comet_tables.c - waveform tables for the PM4351 'COMET'
+ *
+ * Copyright (C) 2003-2005 SBE, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * For further information, contact via email: support@sbei.com
+ * SBE, Inc. San Ramon, California U.S.A.
+ *-----------------------------------------------------------------------------
+ * RCS info:
+ * RCS revision: $Revision: 1.2 $
+ * Last changed on $Date: 2005/10/17 23:55:27 $
+ * Changed by $Author: rickd $
+ *-----------------------------------------------------------------------------
+ * $Log: comet_tables.c,v $
+ * Revision 1.2 2005/10/17 23:55:27 rickd
+ * Note that 75 Ohm transmit waveform is not supported on PMCC4.
+ *
+ * Revision 1.1 2005/09/28 00:10:05 rickd
+ * Cosmetic alignment of tables for readability.
+ *
+ * Revision 1.0 2005/05/10 22:47:53 rickd
+ * Initial revision
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+char SBEid_pmcc4_comet_tblc[] =
+ "@(#)comet_tables.c - $Revision: 1.2 $ (c) Copyright 2004-2005 SBE, Inc.";
+
+
+#include <linux/types.h>
+
+/*****************************************************************************
+*
+* Array names:
+*
+* TWVLongHaul0DB
+* TWVLongHaul7_5DB
+* TWVLongHaul15DB
+* TWVLongHaul22_5DB
+* TWVShortHaul0
+* TWVShortHaul1
+* TWVShortHaul2
+* TWVShortHaul3
+* TWVShortHaul4
+* TWVShortHaul5
+* TWV_E1_120Ohm
+* TWV_E1_75Ohm <not supported>
+* T1_Equalizer
+* E1_Equalizer
+*
+*****************************************************************************/
+
+u_int8_t TWVLongHaul0DB[25][5] =/* T1 Long Haul 0 DB */
+{
+ {0x00, 0x44, 0x00, 0x00, 0x00}, /* Sample 0 */
+ {0x0A, 0x44, 0x00, 0x00, 0x00}, /* Sample 1 */
+ {0x20, 0x43, 0x00, 0x00, 0x00}, /* Sample 2 */
+ {0x32, 0x43, 0x00, 0x00, 0x00}, /* Sample 3 */
+ {0x3E, 0x42, 0x00, 0x00, 0x00}, /* Sample 4 */
+ {0x3D, 0x42, 0x00, 0x00, 0x00}, /* Sample 5 */
+ {0x3C, 0x41, 0x00, 0x00, 0x00}, /* Sample 6 */
+ {0x3B, 0x41, 0x00, 0x00, 0x00}, /* Sample 7 */
+ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 8 */
+ {0x39, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */
+ {0x39, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */
+ {0x38, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */
+ {0x37, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */
+ {0x36, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */
+ {0x34, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */
+ {0x29, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */
+ {0x4F, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */
+ {0x4C, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */
+ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */
+ {0x49, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */
+ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */
+ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */
+ {0x46, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */
+ {0x46, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */
+ {0x0C} /* PMC's suggested value */
+/* {0x14} Output Amplitude */
+};
+
+u_int8_t TWVLongHaul7_5DB[25][5] = /* T1 Long Haul 7.5 DB */
+{
+ {0x00, 0x10, 0x00, 0x00, 0x00}, /* Sample 0 */
+ {0x01, 0x0E, 0x00, 0x00, 0x00}, /* Sample 1 */
+ {0x02, 0x0C, 0x00, 0x00, 0x00}, /* Sample 2 */
+ {0x04, 0x0A, 0x00, 0x00, 0x00}, /* Sample 3 */
+ {0x08, 0x08, 0x00, 0x00, 0x00}, /* Sample 4 */
+ {0x0C, 0x06, 0x00, 0x00, 0x00}, /* Sample 5 */
+ {0x10, 0x04, 0x00, 0x00, 0x00}, /* Sample 6 */
+ {0x16, 0x02, 0x00, 0x00, 0x00}, /* Sample 7 */
+ {0x1A, 0x01, 0x00, 0x00, 0x00}, /* Sample 8 */
+ {0x1E, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */
+ {0x22, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */
+ {0x26, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */
+ {0x2A, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */
+ {0x2B, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */
+ {0x2C, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */
+ {0x2D, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */
+ {0x2C, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */
+ {0x28, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */
+ {0x24, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */
+ {0x20, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */
+ {0x1C, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */
+ {0x18, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */
+ {0x14, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */
+ {0x12, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */
+ {0x07} /* PMC's suggested value */
+/* { 0x0A } Output Amplitude */
+};
+
+u_int8_t TWVLongHaul15DB[25][5] = /* T1 Long Haul 15 DB */
+{
+ {0x00, 0x2A, 0x09, 0x01, 0x00}, /* Sample 0 */
+ {0x00, 0x28, 0x08, 0x01, 0x00}, /* Sample 1 */
+ {0x00, 0x26, 0x08, 0x01, 0x00}, /* Sample 2 */
+ {0x00, 0x24, 0x07, 0x01, 0x00}, /* Sample 3 */
+ {0x01, 0x22, 0x07, 0x01, 0x00}, /* Sample 4 */
+ {0x02, 0x20, 0x06, 0x01, 0x00}, /* Sample 5 */
+ {0x04, 0x1E, 0x06, 0x01, 0x00}, /* Sample 6 */
+ {0x07, 0x1C, 0x05, 0x00, 0x00}, /* Sample 7 */
+ {0x0A, 0x1B, 0x05, 0x00, 0x00}, /* Sample 8 */
+ {0x0D, 0x19, 0x05, 0x00, 0x00}, /* Sample 9 */
+ {0x10, 0x18, 0x04, 0x00, 0x00}, /* Sample 10 */
+ {0x14, 0x16, 0x04, 0x00, 0x00}, /* Sample 11 */
+ {0x18, 0x15, 0x04, 0x00, 0x00}, /* Sample 12 */
+ {0x1B, 0x13, 0x03, 0x00, 0x00}, /* Sample 13 */
+ {0x1E, 0x12, 0x03, 0x00, 0x00}, /* Sample 14 */
+ {0x21, 0x10, 0x03, 0x00, 0x00}, /* Sample 15 */
+ {0x24, 0x0F, 0x03, 0x00, 0x00}, /* Sample 16 */
+ {0x27, 0x0D, 0x03, 0x00, 0x00}, /* Sample 17 */
+ {0x2A, 0x0D, 0x02, 0x00, 0x00}, /* Sample 18 */
+ {0x2D, 0x0B, 0x02, 0x00, 0x00}, /* Sample 19 */
+ {0x30, 0x0B, 0x02, 0x00, 0x00}, /* Sample 20 */
+ {0x30, 0x0A, 0x02, 0x00, 0x00}, /* Sample 21 */
+ {0x2E, 0x0A, 0x02, 0x00, 0x00}, /* Sample 22 */
+ {0x2C, 0x09, 0x02, 0x00, 0x00}, /* Sample 23 */
+ {0x03} /* Output Amplitude */
+};
+
+u_int8_t TWVLongHaul22_5DB[25][5] = /* T1 Long Haul 22.5 DB */
+{
+ {0x00, 0x1F, 0x16, 0x06, 0x01}, /* Sample 0 */
+ {0x00, 0x20, 0x15, 0x05, 0x01}, /* Sample 1 */
+ {0x00, 0x21, 0x15, 0x05, 0x01}, /* Sample 2 */
+ {0x00, 0x22, 0x14, 0x05, 0x01}, /* Sample 3 */
+ {0x00, 0x22, 0x13, 0x04, 0x00}, /* Sample 4 */
+ {0x00, 0x23, 0x12, 0x04, 0x00}, /* Sample 5 */
+ {0x01, 0x23, 0x12, 0x04, 0x00}, /* Sample 6 */
+ {0x01, 0x24, 0x11, 0x03, 0x00}, /* Sample 7 */
+ {0x01, 0x23, 0x10, 0x03, 0x00}, /* Sample 8 */
+ {0x02, 0x23, 0x10, 0x03, 0x00}, /* Sample 9 */
+ {0x03, 0x22, 0x0F, 0x03, 0x00}, /* Sample 10 */
+ {0x05, 0x22, 0x0E, 0x03, 0x00}, /* Sample 11 */
+ {0x07, 0x21, 0x0E, 0x02, 0x00}, /* Sample 12 */
+ {0x09, 0x20, 0x0D, 0x02, 0x00}, /* Sample 13 */
+ {0x0B, 0x1E, 0x0C, 0x02, 0x00}, /* Sample 14 */
+ {0x0E, 0x1D, 0x0C, 0x02, 0x00}, /* Sample 15 */
+ {0x10, 0x1B, 0x0B, 0x02, 0x00}, /* Sample 16 */
+ {0x13, 0x1B, 0x0A, 0x02, 0x00}, /* Sample 17 */
+ {0x15, 0x1A, 0x0A, 0x02, 0x00}, /* Sample 18 */
+ {0x17, 0x19, 0x09, 0x01, 0x00}, /* Sample 19 */
+ {0x19, 0x19, 0x08, 0x01, 0x00}, /* Sample 20 */
+ {0x1B, 0x18, 0x08, 0x01, 0x00}, /* Sample 21 */
+ {0x1D, 0x17, 0x07, 0x01, 0x00}, /* Sample 22 */
+ {0x1E, 0x17, 0x06, 0x01, 0x00}, /* Sample 23 */
+ {0x02} /* Output Amplitude */
+};
+
+u_int8_t TWVShortHaul0[25][5] = /* T1 Short Haul 0 - 110 ft */
+{
+ {0x00, 0x45, 0x00, 0x00, 0x00}, /* Sample 0 */
+ {0x0A, 0x44, 0x00, 0x00, 0x00}, /* Sample 1 */
+ {0x20, 0x43, 0x00, 0x00, 0x00}, /* Sample 2 */
+ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 3 */
+ {0x3F, 0x42, 0x00, 0x00, 0x00}, /* Sample 4 */
+ {0x3F, 0x42, 0x00, 0x00, 0x00}, /* Sample 5 */
+ {0x3C, 0x41, 0x00, 0x00, 0x00}, /* Sample 6 */
+ {0x3B, 0x41, 0x00, 0x00, 0x00}, /* Sample 7 */
+ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 8 */
+ {0x39, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */
+ {0x39, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */
+ {0x38, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */
+ {0x37, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */
+ {0x36, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */
+ {0x34, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */
+ {0x29, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */
+ {0x59, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */
+ {0x55, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */
+ {0x50, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */
+ {0x4D, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */
+ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */
+ {0x48, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */
+ {0x46, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */
+ {0x46, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */
+ {0x0C} /* Output Amplitude */
+};
+
+u_int8_t TWVShortHaul1[25][5] = /* T1 Short Haul 110 - 220 ft */
+{
+ {0x00, 0x44, 0x00, 0x00, 0x00}, /* Sample 0 */
+ {0x0A, 0x44, 0x00, 0x00, 0x00}, /* Sample 1 */
+ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 2 */
+ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 3 */
+ {0x36, 0x42, 0x00, 0x00, 0x00}, /* Sample 4 */
+ {0x34, 0x42, 0x00, 0x00, 0x00}, /* Sample 5 */
+ {0x30, 0x41, 0x00, 0x00, 0x00}, /* Sample 6 */
+ {0x2F, 0x41, 0x00, 0x00, 0x00}, /* Sample 7 */
+ {0x2E, 0x00, 0x00, 0x00, 0x00}, /* Sample 8 */
+ {0x2D, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */
+ {0x2C, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */
+ {0x2B, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */
+ {0x2A, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */
+ {0x28, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */
+ {0x26, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */
+ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */
+ {0x68, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */
+ {0x54, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */
+ {0x4F, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */
+ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */
+ {0x49, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */
+ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */
+ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */
+ {0x46, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */
+ {0x10} /* Output Amplitude */
+};
+
+u_int8_t TWVShortHaul2[25][5] = /* T1 Short Haul 220 - 330 ft */
+{
+ {0x00, 0x44, 0x00, 0x00, 0x00}, /* Sample 0 */
+ {0x0A, 0x44, 0x00, 0x00, 0x00}, /* Sample 1 */
+ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 2 */
+ {0x3A, 0x43, 0x00, 0x00, 0x00}, /* Sample 3 */
+ {0x3A, 0x42, 0x00, 0x00, 0x00}, /* Sample 4 */
+ {0x38, 0x42, 0x00, 0x00, 0x00}, /* Sample 5 */
+ {0x30, 0x41, 0x00, 0x00, 0x00}, /* Sample 6 */
+ {0x2F, 0x41, 0x00, 0x00, 0x00}, /* Sample 7 */
+ {0x2E, 0x00, 0x00, 0x00, 0x00}, /* Sample 8 */
+ {0x2D, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */
+ {0x2C, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */
+ {0x2B, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */
+ {0x2A, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */
+ {0x29, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */
+ {0x23, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */
+ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */
+ {0x6C, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */
+ {0x60, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */
+ {0x4F, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */
+ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */
+ {0x49, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */
+ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */
+ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */
+ {0x46, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */
+ {0x11} /* Output Amplitude */
+};
+
+u_int8_t TWVShortHaul3[25][5] = /* T1 Short Haul 330 - 440 ft */
+{
+ {0x00, 0x44, 0x00, 0x00, 0x00}, /* Sample 0 */
+ {0x0A, 0x44, 0x00, 0x00, 0x00}, /* Sample 1 */
+ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 2 */
+ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 3 */
+ {0x3F, 0x42, 0x00, 0x00, 0x00}, /* Sample 4 */
+ {0x3F, 0x42, 0x00, 0x00, 0x00}, /* Sample 5 */
+ {0x2F, 0x41, 0x00, 0x00, 0x00}, /* Sample 6 */
+ {0x2E, 0x41, 0x00, 0x00, 0x00}, /* Sample 7 */
+ {0x2D, 0x00, 0x00, 0x00, 0x00}, /* Sample 8 */
+ {0x2C, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */
+ {0x2B, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */
+ {0x2A, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */
+ {0x29, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */
+ {0x28, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */
+ {0x19, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */
+ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */
+ {0x7F, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */
+ {0x60, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */
+ {0x4F, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */
+ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */
+ {0x49, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */
+ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */
+ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */
+ {0x46, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */
+ {0x12} /* Output Amplitude */
+};
+
+u_int8_t TWVShortHaul4[25][5] = /* T1 Short Haul 440 - 550 ft */
+{
+ {0x00, 0x44, 0x00, 0x00, 0x00}, /* Sample 0 */
+ {0x0A, 0x44, 0x00, 0x00, 0x00}, /* Sample 1 */
+ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 2 */
+ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 3 */
+ {0x3F, 0x42, 0x00, 0x00, 0x00}, /* Sample 4 */
+ {0x3F, 0x42, 0x00, 0x00, 0x00}, /* Sample 5 */
+ {0x30, 0x41, 0x00, 0x00, 0x00}, /* Sample 6 */
+ {0x2B, 0x41, 0x00, 0x00, 0x00}, /* Sample 7 */
+ {0x2A, 0x00, 0x00, 0x00, 0x00}, /* Sample 8 */
+ {0x29, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */
+ {0x28, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */
+ {0x27, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */
+ {0x26, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */
+ {0x26, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */
+ {0x24, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */
+ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */
+ {0x7F, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */
+ {0x7F, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */
+ {0x4F, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */
+ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */
+ {0x49, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */
+ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */
+ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */
+ {0x46, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */
+ {0x14} /* Output Amplitude */
+};
+
+u_int8_t TWVShortHaul5[25][5] = /* T1 Short Haul 550 - 660 ft */
+{
+ {0x00, 0x44, 0x00, 0x00, 0x00}, /* Sample 0 */
+ {0x0A, 0x44, 0x00, 0x00, 0x00}, /* Sample 1 */
+ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 2 */
+ {0x3F, 0x43, 0x00, 0x00, 0x00}, /* Sample 3 */
+ {0x3F, 0x42, 0x00, 0x00, 0x00}, /* Sample 4 */
+ {0x3F, 0x42, 0x00, 0x00, 0x00}, /* Sample 5 */
+ {0x3F, 0x41, 0x00, 0x00, 0x00}, /* Sample 6 */
+ {0x30, 0x41, 0x00, 0x00, 0x00}, /* Sample 7 */
+ {0x2A, 0x00, 0x00, 0x00, 0x00}, /* Sample 8 */
+ {0x29, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */
+ {0x28, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */
+ {0x27, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */
+ {0x26, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */
+ {0x25, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */
+ {0x24, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */
+ {0x4A, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */
+ {0x7F, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */
+ {0x7F, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */
+ {0x5F, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */
+ {0x50, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */
+ {0x49, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */
+ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */
+ {0x47, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */
+ {0x46, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */
+ {0x15} /* Output Amplitude */
+};
+
+u_int8_t TWV_E1_120Ohm[25][5] = /* E1 120 Ohm */
+{
+ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 0 */
+ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 1 */
+ {0x0A, 0x00, 0x00, 0x00, 0x00}, /* Sample 2 */
+ {0x3F, 0x00, 0x00, 0x00, 0x00}, /* Sample 3 */
+ {0x3F, 0x00, 0x00, 0x00, 0x00}, /* Sample 4 */
+ {0x39, 0x00, 0x00, 0x00, 0x00}, /* Sample 5 */
+ {0x38, 0x00, 0x00, 0x00, 0x00}, /* Sample 6 */
+ {0x36, 0x00, 0x00, 0x00, 0x00}, /* Sample 7 */
+ {0x36, 0x00, 0x00, 0x00, 0x00}, /* Sample 8 */
+ {0x35, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */
+ {0x35, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */
+ {0x35, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */
+ {0x35, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */
+ {0x35, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */
+ {0x35, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */
+ {0x2D, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */
+ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */
+ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */
+ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */
+ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */
+ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */
+ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */
+ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */
+ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */
+ {0x0C} /* PMC's suggested value */
+/* { 0x10 } Output Amplitude */
+};
+
+
+
+u_int8_t TWV_E1_75Ohm[25][5] = /* E1 75 Ohm */
+{
+#ifdef PMCC4_DOES_NOT_SUPPORT
+ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 0 */
+ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 1 */
+ {0x0A, 0x00, 0x00, 0x00, 0x00}, /* Sample 2 */
+ {0x28, 0x00, 0x00, 0x00, 0x00}, /* Sample 3 */
+ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 4 */
+ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 5 */
+ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 6 */
+ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 7 */
+ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 8 */
+ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 9 */
+ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 10 */
+ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 11 */
+ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 12 */
+ {0x3A, 0x00, 0x00, 0x00, 0x00}, /* Sample 13 */
+ {0x32, 0x00, 0x00, 0x00, 0x00}, /* Sample 14 */
+ {0x14, 0x00, 0x00, 0x00, 0x00}, /* Sample 15 */
+ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 16 */
+ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 17 */
+ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 18 */
+ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 19 */
+ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 20 */
+ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 21 */
+ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 22 */
+ {0x00, 0x00, 0x00, 0x00, 0x00}, /* Sample 23 */
+#endif
+ {0x0C} /* Output Amplitude */
+};
+
+
+u_int32_t T1_Equalizer[256] = /* T1 Receiver Equalizer */
+{
+ 0x03FE1840, 0x03F61840, 0x03EE1840, 0x03E61840, /* 000 - 003 */
+ 0x03DE1840, 0x03D61840, 0x03D61840, 0x03D61840, /* 004 - 007 */
+ 0x03CE1840, 0x03CE1840, 0x03CE1840, 0x03CE1840, /* 008 - 011 */
+ 0x03C61840, 0x03C61840, 0x03C61840, 0x0BBE1840, /* 012 - 015 */
+ 0x0BBE1840, 0x0BBE1840, 0x0BBE1840, 0x0BB61840, /* 016 - 019 */
+ 0x0BB61840, 0x0BB61840, 0x0BB61840, 0x13AE1838, /* 020 - 023 */
+ 0x13AE183C, 0x13AE1840, 0x13AE1840, 0x13AE1840, /* 024 - 027 */
+ 0x13AE1840, 0x1BB618B8, 0x1BAE18B8, 0x1BAE18BC, /* 028 - 031 */
+ 0x1BAE18C0, 0x1BAE18C0, 0x23A618C0, 0x23A618C0, /* 032 - 035 */
+ 0x23A618C0, 0x23A618C0, 0x23A618C0, 0x239E18C0, /* 036 - 039 */
+ 0x239E18C0, 0x239E18C0, 0x239E18C0, 0x239E18C0, /* 040 - 043 */
+ 0x2B9618C0, 0x2B9618C0, 0x2B9618C0, 0x33961940, /* 044 - 047 */
+ 0x37961940, 0x37961940, 0x37961940, 0x3F9E19C0, /* 048 - 051 */
+ 0x3F9E19C0, 0x3F9E19C0, 0x3FA61A40, 0x3FA61A40, /* 052 - 055 */
+ 0x3FA61A40, 0x3FA61A40, 0x3F9619C0, 0x3F9619C0, /* 056 - 059 */
+ 0x3F9619C0, 0x3F9619C0, 0x479E1A40, 0x479E1A40, /* 060 - 063 */
+ 0x479E1A40, 0x47961A40, 0x47961A40, 0x47961A40, /* 064 - 067 */
+ 0x47961A40, 0x4F8E1A40, 0x4F8E1A40, 0x4F8E1A40, /* 068 - 071 */
+ 0x4F8E1A40, 0x4F8E1A40, 0x57861A40, 0x57861A40, /* 072 - 075 */
+ 0x57861A40, 0x57861A40, 0x57861A40, 0x5F861AC0, /* 076 - 079 */
+ 0x5F861AC0, 0x5F861AC0, 0x5F861AC0, 0x5F861AC0, /* 080 - 083 */
+ 0x5F861AC0, 0x5F7E1AC0, 0x5F7E1AC0, 0x5F7E1AC0, /* 084 - 087 */
+ 0x5F7E1AC0, 0x5F7E1AC0, 0x677E2AC0, 0x677E2AC0, /* 088 - 091 */
+ 0x677E2AC0, 0x677E2AC0, 0x67762AC0, 0x67762AC0, /* 092 - 095 */
+ 0x67762AC0, 0x67762AC0, 0x67762AC0, 0x6F6E2AC0, /* 096 - 099 */
+ 0x6F6E2AC0, 0x6F6E2AC0, 0x6F6E2AC0, 0x776E3AC0, /* 100 - 103 */
+ 0x776E3AC0, 0x776E3AC0, 0x776E3AC0, 0x7F663AC0, /* 104 - 107 */
+ 0x7F663AC0, 0x7F664AC0, 0x7F664AC0, 0x7F664AC0, /* 108 - 111 */
+ 0x7F664AC0, 0x87665AC0, 0x87665AC0, 0x87665AC0, /* 112 - 115 */
+ 0x87665AC0, 0x87665AC0, 0x875E5AC0, 0x875E5AC0, /* 116 - 119 */
+ 0x875E5AC0, 0x875E5AC0, 0x875E5AC0, 0x8F5E6AC0, /* 120 - 123 */
+ 0x8F5E6AC0, 0x8F5E6AC0, 0x8F5E6AC0, 0x975E7AC0, /* 124 - 127 */
+ 0x975E7AC0, 0x975E7AC0, 0x975E7AC0, 0x9F5E8AC0, /* 128 - 131 */
+ 0x9F5E8AC0, 0x9F5E8AC0, 0x9F5E8AC0, 0x9F5E8AC0, /* 132 - 135 */
+ 0xA7569AC0, 0xA7569AC0, 0xA7569AC0, 0xA7569AC0, /* 136 - 139 */
+ 0xA756AAC0, 0xA756AAC0, 0xA756AAC0, 0xAF4EAAC0, /* 140 - 143 */
+ 0xAF4EAAC0, 0xAF4EAAC0, 0xAF4EAAC0, 0xAF4EAAC0, /* 144 - 147 */
+ 0xB746AAC0, 0xB746AAC0, 0xB746AAC0, 0xB746AAC0, /* 148 - 151 */
+ 0xB746AAC0, 0xB746AAC0, 0xB746AAC0, 0xB746BAC0, /* 152 - 155 */
+ 0xB746BAC0, 0xB746BAC0, 0xBF4EBB40, 0xBF4EBB40, /* 156 - 159 */
+ 0xBF4EBB40, 0xBF4EBB40, 0xBF4EBB40, 0xBF4EBB40, /* 160 - 163 */
+ 0xBF4EBB40, 0xBF4EBB40, 0xBF4EBB40, 0xBE46CB40, /* 164 - 167 */
+ 0xBE46CB40, 0xBE46CB40, 0xBE46CB40, 0xBE46CB40, /* 168 - 171 */
+ 0xBE46CB40, 0xBE46DB40, 0xBE46DB40, 0xBE46DB40, /* 172 - 175 */
+ 0xC63ECB40, 0xC63ECB40, 0xC63EDB40, 0xC63EDB40, /* 176 - 179 */
+ 0xC63EDB40, 0xC644DB40, 0xC644DB40, 0xC644DB40, /* 180 - 183 */
+ 0xC644DB40, 0xC63CDB40, 0xC63CDB40, 0xC63CDB40, /* 184 - 187 */
+ 0xC63CDB40, 0xD634DB40, 0xD634DB40, 0xD634DB40, /* 188 - 191 */
+ 0xD634DB40, 0xD634DB40, 0xDE2CDB3C, 0xDE2CDB3C, /* 192 - 195 */
+ 0xDE2CDB3C, 0xE62CDB40, 0xE62CDB40, 0xE62CDB40, /* 196 - 199 */
+ 0xE62CDB40, 0xE62CDB40, 0xE62CEB40, 0xE62CEB40, /* 200 - 203 */
+ 0xE62CEB40, 0xEE2CFB40, 0xEE2CFB40, 0xEE2CFB40, /* 204 - 207 */
+ 0xEE2D0B40, 0xEE2D0B40, 0xEE2D0B40, 0xEE2D0B40, /* 208 - 211 */
+ 0xEE2D0B40, 0xF5250B38, 0xF5250B3C, 0xF5250B40, /* 212 - 215 */
+ 0xF5251B40, 0xF5251B40, 0xF5251B40, 0xF5251B40, /* 216 - 219 */
+ 0xF5251B40, 0xFD252B40, 0xFD252B40, 0xFD252B40, /* 220 - 223 */
+ 0xFD252B40, 0xFD252740, 0xFD252740, 0xFD252740, /* 224 - 227 */
+ 0xFD252340, 0xFD252340, 0xFD252340, 0xFD253340, /* 228 - 231 */
+ 0xFD253340, 0xFD253340, 0xFD253340, 0xFD253340, /* 232 - 235 */
+ 0xFD253340, 0xFD253340, 0xFD253340, 0xFC254340, /* 236 - 239 */
+ 0xFD254340, 0xFD254340, 0xFD254344, 0xFC254348, /* 240 - 243 */
+ 0xFC25434C, 0xFD2543BC, 0xFD2543C0, 0xFC2543C0, /* 244 - 247 */
+ 0xFC2343C0, 0xFC2343C0, 0xFD2343C0, 0xFC2143C0, /* 248 - 251 */
+ 0xFC2143C0, 0xFC2153C0, 0xFD2153C0, 0xFC2153C0 /* 252 - 255 */
+};
+
+
+u_int32_t E1_Equalizer[256] = /* E1 Receiver Equalizer */
+{
+ 0x07DE182C, 0x07DE182C, 0x07D6182C, 0x07D6182C, /* 000 - 003 */
+ 0x07D6182C, 0x07CE182C, 0x07CE182C, 0x07CE182C, /* 004 - 007 */
+ 0x07C6182C, 0x07C6182C, 0x07C6182C, 0x07BE182C, /* 008 - 011 */
+ 0x07BE182C, 0x07BE182C, 0x07BE182C, 0x07BE182C, /* 012 - 015 */
+ 0x07B6182C, 0x07B6182C, 0x07B6182C, 0x07B6182C, /* 016 - 019 */
+ 0x07B6182C, 0x07AE182C, 0x07AE182C, 0x07AE182C, /* 020 - 023 */
+ 0x07AE182C, 0x07AE182C, 0x07B618AC, 0x07AE18AC, /* 024 - 027 */
+ 0x07AE18AC, 0x07AE18AC, 0x07AE18AC, 0x07A618AC, /* 028 - 031 */
+ 0x07A618AC, 0x07A618AC, 0x07A618AC, 0x079E18AC, /* 032 - 035 */
+ 0x07A6192C, 0x07A6192C, 0x07A6192C, 0x0FA6192C, /* 036 - 039 */
+ 0x0FA6192C, 0x0F9E192C, 0x0F9E192C, 0x0F9E192C, /* 040 - 043 */
+ 0x179E192C, 0x17A619AC, 0x179E19AC, 0x179E19AC, /* 044 - 047 */
+ 0x179619AC, 0x1F9619AC, 0x1F9619AC, 0x1F8E19AC, /* 048 - 051 */
+ 0x1F8E19AC, 0x1F8E19AC, 0x278E19AC, 0x278E1A2C, /* 052 - 055 */
+ 0x278E1A2C, 0x278E1A2C, 0x278E1A2C, 0x2F861A2C, /* 056 - 059 */
+ 0x2F861A2C, 0x2F861A2C, 0x2F7E1A2C, 0x2F7E1A2C, /* 060 - 063 */
+ 0x2F7E1A2C, 0x377E1A2C, 0x377E1AAC, 0x377E1AAC, /* 064 - 067 */
+ 0x377E1AAC, 0x377E1AAC, 0x3F7E2AAC, 0x3F7E2AAC, /* 068 - 071 */
+ 0x3F762AAC, 0x3F862B2C, 0x3F7E2B2C, 0x477E2B2C, /* 072 - 075 */
+ 0x477E2F2C, 0x477E2F2C, 0x477E2F2C, 0x47762F2C, /* 076 - 079 */
+ 0x4F762F2C, 0x4F762F2C, 0x4F6E2F2C, 0x4F6E2F2C, /* 080 - 083 */
+ 0x4F6E2F2C, 0x576E2F2C, 0x576E2F2C, 0x576E3F2C, /* 084 - 087 */
+ 0x576E3F2C, 0x576E3F2C, 0x5F6E3F2C, 0x5F6E4F2C, /* 088 - 091 */
+ 0x5F6E4F2C, 0x5F6E4F2C, 0x5F664F2C, 0x67664F2C, /* 092 - 095 */
+ 0x67664F2C, 0x675E4F2C, 0x675E4F2C, 0x67664F2C, /* 096 - 099 */
+ 0x67664F2C, 0x67665F2C, 0x6F6E5F2C, 0x6F6E6F2C, /* 100 - 103 */
+ 0x6F6E6F2C, 0x6F6E7F2C, 0x6F6E7F2C, 0x6F6E7F2C, /* 104 - 107 */
+ 0x77667F2C, 0x77667F2C, 0x775E6F2C, 0x775E7F2C, /* 108 - 111 */
+ 0x775E7F2C, 0x7F5E7F2C, 0x7F5E8F2C, 0x7F5E8F2C, /* 112 - 115 */
+ 0x7F5E8F2C, 0x87568F2C, 0x87568F2C, 0x87568F2C, /* 116 - 119 */
+ 0x874E8F2C, 0x874E8F2C, 0x874E8F2C, 0x8F4E9F2C, /* 120 - 123 */
+ 0x8F4E9F2C, 0x8F4EAF2C, 0x8F4EAF2C, 0x8F4EAF2C, /* 124 - 127 */
+ 0x974EAF2C, 0x974EAF2C, 0x974EAB2C, 0x974EAB2C, /* 128 - 131 */
+ 0x974EAB2C, 0x9F4EAB2C, 0x9F4EBB2C, 0x9F4EBB2C, /* 132 - 135 */
+ 0x9F4EBB2C, 0x9F4ECB2C, 0xA74ECB2C, 0xA74ECB2C, /* 136 - 139 */
+ 0xA746CB2C, 0xA746CB2C, 0xA746CB2C, 0xA746DB2C, /* 140 - 143 */
+ 0xAF46DB2C, 0xAF46EB2C, 0xAF46EB2C, 0xAF4EEB2C, /* 144 - 147 */
+ 0xAE4EEB2C, 0xAE4EEB2C, 0xB546FB2C, 0xB554FB2C, /* 148 - 151 */
+ 0xB54CEB2C, 0xB554FB2C, 0xB554FB2C, 0xBD54FB2C, /* 152 - 155 */
+ 0xBD4CFB2C, 0xBD4CFB2C, 0xBD4CFB2C, 0xBD44EB2C, /* 156 - 159 */
+ 0xC544FB2C, 0xC544FB2C, 0xC544FB2C, 0xC5450B2C, /* 160 - 163 */
+ 0xC5450B2C, 0xC5450B2C, 0xCD450B2C, 0xCD450B2C, /* 164 - 167 */
+ 0xCD3D0B2C, 0xCD3D0B2C, 0xCD3D0B2C, 0xD53D0B2C, /* 168 - 171 */
+ 0xD53D0B2C, 0xD53D1B2C, 0xD53D1B2C, 0xD53D1B2C, /* 172 - 175 */
+ 0xDD3D1B2C, 0xDD3D1B2C, 0xDD351B2C, 0xDD351B2C, /* 176 - 179 */
+ 0xDD351B2C, 0xE5351B2C, 0xE5351B2C, 0xE52D1B2C, /* 180 - 183 */
+ 0xE52D1B2C, 0xE52D3B2C, 0xED2D4B2C, 0xED2D1BA8, /* 184 - 187 */
+ 0xED2D1BAC, 0xED2D17AC, 0xED2D17AC, 0xED2D27AC, /* 188 - 191 */
+ 0xF52D27AC, 0xF52D27AC, 0xF52D2BAC, 0xF52D2BAC, /* 192 - 195 */
+ 0xF52D2BAC, 0xFD2D2BAC, 0xFD2B2BAC, 0xFD2B2BAC, /* 196 - 199 */
+ 0xFD2B2BAC, 0xFD2B2BAC, 0xFD232BAC, 0xFD232BAC, /* 200 - 203 */
+ 0xFD232BAC, 0xFD212BAC, 0xFD212BAC, 0xFD292BAC, /* 204 - 207 */
+ 0xFD292BAC, 0xFD2927AC, 0xFD2937AC, 0xFD2923AC, /* 208 - 211 */
+ 0xFD2923AC, 0xFD2923AC, 0xFD2923AC, 0xFD2123AC, /* 212 - 215 */
+ 0xFD2123AC, 0xFD2123AC, 0xFD2133AC, 0xFD2133AC, /* 216 - 219 */
+ 0xFD2133AC, 0xFD2143AC, 0xFD2143AC, 0xFD2143AC, /* 220 - 223 */
+ 0xFC2143AC, 0xFC2143AC, 0xFC1943AC, 0xFC1943AC, /* 224 - 227 */
+ 0xFC1943AC, 0xFC1943AC, 0xFC1953AC, 0xFC1953AC, /* 228 - 231 */
+ 0xFC1953AC, 0xFC1953AC, 0xFC1963AC, 0xFC1963AC, /* 232 - 235 */
+ 0xFC1963AC, 0xFC1973AC, 0xFC1973AC, 0xFC1973AC, /* 236 - 239 */
+ 0xFC1973AC, 0xFC1973AC, 0xFC1983AC, 0xFC1983AC, /* 240 - 243 */
+ 0xFC1983AC, 0xFC1983AC, 0xFC1983AC, 0xFC1993AC, /* 244 - 247 */
+ 0xFC1993AC, 0xFC1993AC, 0xFC19A3AC, 0xFC19A3AC, /* 248 - 251 */
+ 0xFC19B3AC, 0xFC19B3AC, 0xFC19B3AC, 0xFC19B3AC /* 252 - 255 */
+};
+
+/*** End-of-Files ***/
diff --git a/drivers/staging/cxt1e1/comet_tables.h b/drivers/staging/cxt1e1/comet_tables.h
new file mode 100644
index 00000000000..80424a26a16
--- /dev/null
+++ b/drivers/staging/cxt1e1/comet_tables.h
@@ -0,0 +1,85 @@
+/*
+ * $Id: comet_tables.h,v 1.5 2006/01/02 22:37:31 rickd PMCC4_3_1B $
+ */
+
+#ifndef _INC_COMET_TBLS_H_
+#define _INC_COMET_TBLS_H_
+
+/*-----------------------------------------------------------------------------
+ * comet_tables.h - Waveform Tables for the PM4351 'COMET'
+ *
+ * Copyright (C) 2005 SBE, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * For further information, contact via email: support@sbei.com
+ * SBE, Inc. San Ramon, California U.S.A.
+ *-----------------------------------------------------------------------------
+ * RCS info:
+ * RCS revision: $Revision: 1.5 $
+ * Last changed on $Date: 2006/01/02 22:37:31 $
+ * Changed by $Author: rickd $
+ *-----------------------------------------------------------------------------
+ * $Log: comet_tables.h,v $
+ * Revision 1.5 2006/01/02 22:37:31 rickd
+ * Double indexed arrays need sizings to avoid CC errors under
+ * gcc 4.0.0
+ *
+ * Revision 1.4 2005/10/17 23:55:28 rickd
+ * The 75 Ohm transmit waveform is not supported on PMCC4.
+ *
+ * Revision 1.3 2005/09/28 00:10:08 rickd
+ * Add GNU License info. Structures moved to -C- file.
+ *
+ * Revision 1.2 2005/04/28 23:43:04 rickd
+ * Add RCS tracking heading.
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+
+/*****************************************************************************
+*
+* Array names:
+*
+* TWVLongHaul0DB
+* TWVLongHaul7_5DB
+* TWVLongHaul15DB
+* TWVLongHaul22_5DB
+* TWVShortHaul0
+* TWVShortHaul1
+* TWVShortHaul2
+* TWVShortHaul3
+* TWVShortHaul4
+* TWVShortHaul5
+* TWV_E1_120Ohm
+* TWV_E1_75Ohm <not supported>
+* T1_Equalizer
+* E1_Equalizer
+*
+*****************************************************************************/
+
+extern u_int8_t TWVLongHaul0DB[25][5]; /* T1 Long Haul 0 DB */
+extern u_int8_t TWVLongHaul7_5DB[25][5]; /* T1 Long Haul 7.5 DB */
+extern u_int8_t TWVLongHaul15DB[25][5]; /* T1 Long Haul 15 DB */
+extern u_int8_t TWVLongHaul22_5DB[25][5]; /* T1 Long Haul 22.5 DB */
+extern u_int8_t TWVShortHaul0[25][5]; /* T1 Short Haul 0-110 ft */
+extern u_int8_t TWVShortHaul1[25][5]; /* T1 Short Haul 110-220 ft */
+extern u_int8_t TWVShortHaul2[25][5]; /* T1 Short Haul 220-330 ft */
+extern u_int8_t TWVShortHaul3[25][5]; /* T1 Short Haul 330-440 ft */
+extern u_int8_t TWVShortHaul4[25][5]; /* T1 Short Haul 440-550 ft */
+extern u_int8_t TWVShortHaul5[25][5]; /* T1 Short Haul 550-660 ft */
+extern u_int8_t TWV_E1_75Ohm[25][5]; /* E1 75 Ohm */
+extern u_int8_t TWV_E1_120Ohm[25][5]; /* E1 120 Ohm */
+extern u_int32_t T1_Equalizer[256]; /* T1 Receiver Equalizer */
+extern u_int32_t E1_Equalizer[256]; /* E1 Receiver Equalizer */
+
+#endif /* _INC_COMET_TBLS_H_ */
diff --git a/drivers/staging/cxt1e1/functions.c b/drivers/staging/cxt1e1/functions.c
new file mode 100644
index 00000000000..86b49809026
--- /dev/null
+++ b/drivers/staging/cxt1e1/functions.c
@@ -0,0 +1,368 @@
+/* Copyright (C) 2003-2005 SBE, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/hdlc.h>
+#include "pmcc4_sysdep.h"
+#include "sbecom_inline_linux.h"
+#include "libsbew.h"
+#include "pmcc4.h"
+
+
+#ifdef SBE_INCLUDE_SYMBOLS
+#define STATIC
+#else
+#define STATIC static
+#endif
+
+#if defined(CONFIG_SBE_HDLC_V7) || defined(CONFIG_SBE_WAN256T3_HDLC_V7) || \
+ defined(CONFIG_SBE_HDLC_V7_MODULE) || defined(CONFIG_SBE_WAN256T3_HDLC_V7_MODULE)
+#define _v7_hdlc_ 1
+#else
+#define _v7_hdlc_ 0
+#endif
+
+#if _v7_hdlc_
+#define V7(x) (x ## _v7)
+extern int hdlc_netif_rx_v7 (hdlc_device *, struct sk_buff *);
+extern int register_hdlc_device_v7 (hdlc_device *);
+extern int unregister_hdlc_device_v7 (hdlc_device *);
+
+#else
+#define V7(x) x
+#endif
+
+
+#ifndef USE_MAX_INT_DELAY
+static int dummy = 0;
+
+#endif
+
+extern int log_level;
+extern int drvr_state;
+
+
+#if 1
+u_int32_t
+pci_read_32 (u_int32_t *p)
+{
+#ifdef FLOW_DEBUG
+ u_int32_t v;
+
+ FLUSH_PCI_READ ();
+ v = le32_to_cpu (*p);
+ if (log_level >= LOG_DEBUG)
+ pr_info("pci_read : %x = %x\n", (u_int32_t) p, v);
+ return v;
+#else
+ FLUSH_PCI_READ (); /* */
+ return le32_to_cpu (*p);
+#endif
+}
+
+void
+pci_write_32 (u_int32_t *p, u_int32_t v)
+{
+#ifdef FLOW_DEBUG
+ if (log_level >= LOG_DEBUG)
+ pr_info("pci_write: %x = %x\n", (u_int32_t) p, v);
+#endif
+ *p = cpu_to_le32 (v);
+ FLUSH_PCI_WRITE (); /* This routine is called from routines
+ * which do multiple register writes
+ * which themselves need flushing between
+ * writes in order to guarantee write
+ * ordering. It is less code-cumbersome
+ * to flush here-in then to investigate
+ * and code the many other register
+ * writing routines. */
+}
+#endif
+
+
+void
+pci_flush_write (ci_t * ci)
+{
+ volatile u_int32_t v;
+
+ /* issue a PCI read to flush PCI write thru bridge */
+ v = *(u_int32_t *) &ci->reg->glcd; /* any address would do */
+
+ /*
+ * return nothing, this just reads PCI bridge interface to flush
+ * previously written data
+ */
+}
+
+
+STATIC void
+watchdog_func (unsigned long arg)
+{
+ struct watchdog *wd = (void *) arg;
+
+ if (drvr_state != SBE_DRVR_AVAILABLE)
+ {
+ if (log_level >= LOG_MONITOR)
+ pr_warning("%s: drvr not available (%x)\n", __func__, drvr_state);
+ return;
+ }
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+ /* Initialize the tq entry only the first time */
+ if (wd->init_tq)
+ {
+ wd->init_tq = 0;
+ wd->tq.routine = wd->func;
+ wd->tq.sync = 0;
+ wd->tq.data = wd->softc;
+ }
+ schedule_task (&wd->tq);
+#else
+ schedule_work (&wd->work);
+#endif
+ mod_timer (&wd->h, jiffies + wd->ticks);
+}
+
+int OS_init_watchdog(struct watchdog *wdp, void (*f) (void *), void *c, int usec)
+{
+ wdp->func = f;
+ wdp->softc = c;
+ wdp->ticks = (HZ) * (usec / 1000) / 1000;
+ INIT_WORK(&wdp->work, (void *)f);
+ init_timer (&wdp->h);
+ {
+ ci_t *ci = (ci_t *) c;
+
+ wdp->h.data = (unsigned long) &ci->wd;
+ }
+ wdp->h.function = watchdog_func;
+ return 0;
+}
+
+void
+OS_uwait (int usec, char *description)
+{
+ int tmp;
+
+ if (usec >= 1000)
+ {
+ mdelay (usec / 1000);
+ /* now delay residual */
+ tmp = (usec / 1000) * 1000; /* round */
+ tmp = usec - tmp; /* residual */
+ if (tmp)
+ { /* wait on residual */
+ udelay (tmp);
+ }
+ } else
+ {
+ udelay (usec);
+ }
+}
+
+/* dummy short delay routine called as a subroutine so that compiler
+ * does not optimize/remove its intent (a short delay)
+ */
+
+void
+OS_uwait_dummy (void)
+{
+#ifndef USE_MAX_INT_DELAY
+ dummy++;
+#else
+ udelay (1);
+#endif
+}
+
+
+void
+OS_sem_init (void *sem, int state)
+{
+ switch (state)
+ {
+ case SEM_TAKEN:
+ init_MUTEX_LOCKED ((struct semaphore *) sem);
+ break;
+ case SEM_AVAILABLE:
+ init_MUTEX ((struct semaphore *) sem);
+ break;
+ default: /* otherwise, set sem.count to state's
+ * value */
+ sema_init (sem, state);
+ break;
+ }
+}
+
+
+int
+sd_line_is_ok (void *user)
+{
+ struct net_device *ndev = (struct net_device *) user;
+
+ return (netif_carrier_ok (ndev));
+}
+
+void
+sd_line_is_up (void *user)
+{
+ struct net_device *ndev = (struct net_device *) user;
+
+ netif_carrier_on (ndev);
+ return;
+}
+
+void
+sd_line_is_down (void *user)
+{
+ struct net_device *ndev = (struct net_device *) user;
+
+ netif_carrier_off (ndev);
+ return;
+}
+
+void
+sd_disable_xmit (void *user)
+{
+ struct net_device *dev = (struct net_device *) user;
+
+ netif_stop_queue (dev);
+ return;
+}
+
+void
+sd_enable_xmit (void *user)
+{
+ struct net_device *dev = (struct net_device *) user;
+
+ netif_wake_queue (dev);
+ return;
+}
+
+int
+sd_queue_stopped (void *user)
+{
+ struct net_device *ndev = (struct net_device *) user;
+
+ return (netif_queue_stopped (ndev));
+}
+
+void sd_recv_consume(void *token, size_t len, void *user)
+{
+ struct net_device *ndev = user;
+ struct sk_buff *skb = token;
+
+ skb->dev = ndev;
+ skb_put (skb, len);
+ skb->protocol = hdlc_type_trans(skb, ndev);
+ netif_rx(skb);
+}
+
+
+/**
+ ** Read some reserved location w/in the COMET chip as a usable
+ ** VMETRO trigger point or other trace marking event.
+ **/
+
+#include "comet.h"
+
+extern ci_t *CI; /* dummy pointer to board ZERO's data */
+void
+VMETRO_TRACE (void *x)
+{
+ u_int32_t y = (u_int32_t) x;
+
+ pci_write_32 ((u_int32_t *) &CI->cpldbase->leds, y);
+}
+
+
+void
+VMETRO_TRIGGER (ci_t * ci, int x)
+{
+ comet_t *comet;
+ volatile u_int32_t data;
+
+ comet = ci->port[0].cometbase; /* default to COMET # 0 */
+
+ switch (x)
+ {
+ default:
+ case 0:
+ data = pci_read_32 ((u_int32_t *) &comet->__res24); /* 0x90 */
+ break;
+ case 1:
+ data = pci_read_32 ((u_int32_t *) &comet->__res25); /* 0x94 */
+ break;
+ case 2:
+ data = pci_read_32 ((u_int32_t *) &comet->__res26); /* 0x98 */
+ break;
+ case 3:
+ data = pci_read_32 ((u_int32_t *) &comet->__res27); /* 0x9C */
+ break;
+ case 4:
+ data = pci_read_32 ((u_int32_t *) &comet->__res88); /* 0x220 */
+ break;
+ case 5:
+ data = pci_read_32 ((u_int32_t *) &comet->__res89); /* 0x224 */
+ break;
+ case 6:
+ data = pci_read_32 ((u_int32_t *) &comet->__res8A); /* 0x228 */
+ break;
+ case 7:
+ data = pci_read_32 ((u_int32_t *) &comet->__res8B); /* 0x22C */
+ break;
+ case 8:
+ data = pci_read_32 ((u_int32_t *) &comet->__resA0); /* 0x280 */
+ break;
+ case 9:
+ data = pci_read_32 ((u_int32_t *) &comet->__resA1); /* 0x284 */
+ break;
+ case 10:
+ data = pci_read_32 ((u_int32_t *) &comet->__resA2); /* 0x288 */
+ break;
+ case 11:
+ data = pci_read_32 ((u_int32_t *) &comet->__resA3); /* 0x28C */
+ break;
+ case 12:
+ data = pci_read_32 ((u_int32_t *) &comet->__resA4); /* 0x290 */
+ break;
+ case 13:
+ data = pci_read_32 ((u_int32_t *) &comet->__resA5); /* 0x294 */
+ break;
+ case 14:
+ data = pci_read_32 ((u_int32_t *) &comet->__resA6); /* 0x298 */
+ break;
+ case 15:
+ data = pci_read_32 ((u_int32_t *) &comet->__resA7); /* 0x29C */
+ break;
+ case 16:
+ data = pci_read_32 ((u_int32_t *) &comet->__res74); /* 0x1D0 */
+ break;
+ case 17:
+ data = pci_read_32 ((u_int32_t *) &comet->__res75); /* 0x1D4 */
+ break;
+ case 18:
+ data = pci_read_32 ((u_int32_t *) &comet->__res76); /* 0x1D8 */
+ break;
+ case 19:
+ data = pci_read_32 ((u_int32_t *) &comet->__res77); /* 0x1DC */
+ break;
+ }
+}
+
+
+/*** End-of-File ***/
diff --git a/drivers/staging/cxt1e1/hwprobe.c b/drivers/staging/cxt1e1/hwprobe.c
new file mode 100644
index 00000000000..4c8610293fc
--- /dev/null
+++ b/drivers/staging/cxt1e1/hwprobe.c
@@ -0,0 +1,402 @@
+/* Copyright (C) 2007 One Stop Systems
+ * Copyright (C) 2003-2005 SBE, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/netdevice.h>
+#include <linux/hdlc.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+#include <linux/rtnetlink.h>
+#include <linux/pci.h>
+#include "pmcc4_sysdep.h"
+#include "sbecom_inline_linux.h"
+#include "libsbew.h"
+#include "pmcc4_private.h"
+#include "pmcc4.h"
+#include "pmcc4_ioctls.h"
+#include "pmc93x6_eeprom.h"
+#ifdef CONFIG_PROC_FS
+#include "sbeproc.h"
+#endif
+
+#ifdef SBE_INCLUDE_SYMBOLS
+#define STATIC
+#else
+#define STATIC static
+#endif
+
+extern int log_level;
+extern int error_flag;
+extern int drvr_state;
+
+/* forward references */
+void c4_stopwd (ci_t *);
+struct net_device * __init c4_add_dev (hdw_info_t *, int, unsigned long, unsigned long, int, int);
+
+
+struct s_hdw_info hdw_info[MAX_BOARDS];
+
+
+void __init
+show_two (hdw_info_t * hi, int brdno)
+{
+ ci_t *ci;
+ struct pci_dev *pdev;
+ char *bid;
+ char *bp, banner[80];
+ char sn[6];
+
+ bp = banner;
+ memset (banner, 0, 80); /* clear print buffer */
+
+ ci = (ci_t *)(netdev_priv(hi->ndev));
+ bid = sbeid_get_bdname (ci);
+ switch (hi->promfmt)
+ {
+ case PROM_FORMAT_TYPE1:
+ memcpy (sn, (FLD_TYPE1 *) (hi->mfg_info.pft1.Serial), 6);
+ break;
+ case PROM_FORMAT_TYPE2:
+ memcpy (sn, (FLD_TYPE2 *) (hi->mfg_info.pft2.Serial), 6);
+ break;
+ default:
+ memset (sn, 0, 6);
+ break;
+ }
+
+ sprintf (banner, "%s: %s S/N %06X, MUSYCC Rev %02X",
+ hi->devname, bid,
+ ((sn[3] << 16) & 0xff0000) |
+ ((sn[4] << 8) & 0x00ff00) |
+ (sn[5] & 0x0000ff),
+ (u_int8_t) hi->revid[0]);
+
+ pr_info("%s\n", banner);
+
+ pdev = hi->pdev[0];
+ pr_info("%s: %s at v/p=%lx/%lx (%02x:%02x.%x) irq %d\n",
+ hi->devname, "MUSYCC",
+ (unsigned long) hi->addr_mapped[0], hi->addr[0],
+ hi->pci_busno, (u_int8_t) PCI_SLOT (pdev->devfn),
+ (u_int8_t) PCI_FUNC (pdev->devfn), pdev->irq);
+
+ pdev = hi->pdev[1];
+ pr_info("%s: %s at v/p=%lx/%lx (%02x:%02x.%x) irq %d\n",
+ hi->devname, "EBUS ",
+ (unsigned long) hi->addr_mapped[1], hi->addr[1],
+ hi->pci_busno, (u_int8_t) PCI_SLOT (pdev->devfn),
+ (u_int8_t) PCI_FUNC (pdev->devfn), pdev->irq);
+}
+
+
+void __init
+hdw_sn_get (hdw_info_t * hi, int brdno)
+{
+ /* obtain hardware EEPROM information */
+ long addr;
+
+ addr = (long) hi->addr_mapped[1] + EEPROM_OFFSET;
+
+ /* read EEPROM with largest known format size... */
+ pmc_eeprom_read_buffer (addr, 0, (char *) hi->mfg_info.data, sizeof (FLD_TYPE2));
+
+#if 0
+ {
+ unsigned char *ucp = (unsigned char *) &hi->mfg_info.data;
+
+ pr_info("eeprom[00]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ *(ucp + 0), *(ucp + 1), *(ucp + 2), *(ucp + 3), *(ucp + 4), *(ucp + 5), *(ucp + 6), *(ucp + 7));
+ pr_info("eeprom[08]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ *(ucp + 8), *(ucp + 9), *(ucp + 10), *(ucp + 11), *(ucp + 12), *(ucp + 13), *(ucp + 14), *(ucp + 15));
+ pr_info("eeprom[16]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ *(ucp + 16), *(ucp + 17), *(ucp + 18), *(ucp + 19), *(ucp + 20), *(ucp + 21), *(ucp + 22), *(ucp + 23));
+ pr_info("eeprom[24]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ *(ucp + 24), *(ucp + 25), *(ucp + 26), *(ucp + 27), *(ucp + 28), *(ucp + 29), *(ucp + 30), *(ucp + 31));
+ pr_info("eeprom[32]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ *(ucp + 32), *(ucp + 33), *(ucp + 34), *(ucp + 35), *(ucp + 36), *(ucp + 37), *(ucp + 38), *(ucp + 39));
+ pr_info("eeprom[40]: %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ *(ucp + 40), *(ucp + 41), *(ucp + 42), *(ucp + 43), *(ucp + 44), *(ucp + 45), *(ucp + 46), *(ucp + 47));
+ }
+#endif
+#if 0
+ pr_info("sn: %x %x %x %x %x %x\n",
+ hi->mfg_info.Serial[0],
+ hi->mfg_info.Serial[1],
+ hi->mfg_info.Serial[2],
+ hi->mfg_info.Serial[3],
+ hi->mfg_info.Serial[4],
+ hi->mfg_info.Serial[5]);
+#endif
+
+ if ((hi->promfmt = pmc_verify_cksum (&hi->mfg_info.data)) == PROM_FORMAT_Unk)
+ {
+ /* bad crc, data is suspect */
+ if (log_level >= LOG_WARN)
+ pr_info("%s: EEPROM cksum error\n", hi->devname);
+ hi->mfg_info_sts = EEPROM_CRCERR;
+ } else
+ hi->mfg_info_sts = EEPROM_OK;
+}
+
+
+void __init
+prep_hdw_info (void)
+{
+ hdw_info_t *hi;
+ int i;
+
+ for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++)
+ {
+ hi->pci_busno = 0xff;
+ hi->pci_slot = 0xff;
+ hi->pci_pin[0] = 0;
+ hi->pci_pin[1] = 0;
+ hi->ndev = 0;
+ hi->addr[0] = 0L;
+ hi->addr[1] = 0L;
+ hi->addr_mapped[0] = 0L;
+ hi->addr_mapped[1] = 0L;
+ }
+}
+
+void
+cleanup_ioremap (void)
+{
+ hdw_info_t *hi;
+ int i;
+
+ for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++)
+ {
+ if (hi->pci_slot == 0xff)
+ break;
+ if (hi->addr_mapped[0])
+ {
+ iounmap ((void *) (hi->addr_mapped[0]));
+ release_mem_region ((long) hi->addr[0], hi->len[0]);
+ hi->addr_mapped[0] = 0;
+ }
+ if (hi->addr_mapped[1])
+ {
+ iounmap ((void *) (hi->addr_mapped[1]));
+ release_mem_region ((long) hi->addr[1], hi->len[1]);
+ hi->addr_mapped[1] = 0;
+ }
+ }
+}
+
+
+void
+cleanup_devs (void)
+{
+ hdw_info_t *hi;
+ int i;
+
+ for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++)
+ {
+ if (hi->pci_slot == 0xff || !hi->ndev)
+ break;
+ c4_stopwd(netdev_priv(hi->ndev));
+#ifdef CONFIG_PROC_FS
+ sbecom_proc_brd_cleanup(netdev_priv(hi->ndev));
+#endif
+ unregister_netdev (hi->ndev);
+ free_irq (hi->pdev[0]->irq, hi->ndev);
+#ifdef CONFIG_SBE_PMCC4_NCOMM
+ free_irq (hi->pdev[1]->irq, hi->ndev);
+#endif
+ OS_kfree (hi->ndev);
+ }
+}
+
+
+STATIC int __init
+c4_hdw_init (struct pci_dev * pdev, int found)
+{
+ hdw_info_t *hi;
+ int i;
+ int fun, slot;
+ unsigned char busno = 0xff;
+
+ /* our MUSYCC chip supports two functions, 0 & 1 */
+ if ((fun = PCI_FUNC (pdev->devfn)) > 1)
+ {
+ pr_warning("unexpected devfun: 0x%x\n", pdev->devfn);
+ return 0;
+ }
+ if (pdev->bus) /* obtain bus number */
+ busno = pdev->bus->number;
+ else
+ busno = 0; /* default for system PCI inconsistency */
+ slot = pdev->devfn & ~0x07;
+
+ /*
+ * Functions 0 & 1 for a given board (identified by same bus(busno) and
+ * slot(slot)) are placed into the same 'hardware' structure. The first
+ * part of the board's functionality will be placed into an unpopulated
+ * element, identified by "slot==(0xff)". The second part of a board's
+ * functionality will match the previously loaded slot/busno.
+ */
+ for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++)
+ {
+ /*
+ * match with board's first found interface, otherwise this is first
+ * found
+ */
+ if ((hi->pci_slot == 0xff) || /* new board */
+ ((hi->pci_slot == slot) && (hi->bus == pdev->bus)))
+ break; /* found for-loop exit */
+ }
+ if (i == MAX_BOARDS) /* no match in above loop means MAX
+ * exceeded */
+ {
+ pr_warning("exceeded number of allowed devices (>%d)?\n", MAX_BOARDS);
+ return 0;
+ }
+ if (pdev->bus)
+ hi->pci_busno = pdev->bus->number;
+ else
+ hi->pci_busno = 0; /* default for system PCI inconsistency */
+ hi->pci_slot = slot;
+ pci_read_config_byte (pdev, PCI_INTERRUPT_PIN, &hi->pci_pin[fun]);
+ pci_read_config_byte (pdev, PCI_REVISION_ID, &hi->revid[fun]);
+ hi->bus = pdev->bus;
+ hi->addr[fun] = pci_resource_start (pdev, 0);
+ hi->len[fun] = pci_resource_end (pdev, 0) - hi->addr[fun] + 1;
+ hi->pdev[fun] = pdev;
+
+ {
+ /*
+ * create device name from module name, plus add the appropriate
+ * board number
+ */
+ char *cp = hi->devname;
+
+ strcpy (cp, KBUILD_MODNAME);
+ cp += strlen (cp); /* reposition */
+ *cp++ = '-';
+ *cp++ = '0' + (found / 2); /* there are two found interfaces per
+ * board */
+ *cp = 0; /* termination */
+ }
+
+ return 1;
+}
+
+
+status_t __init
+c4hw_attach_all (void)
+{
+ hdw_info_t *hi;
+ struct pci_dev *pdev = NULL;
+ int found = 0, i, j;
+
+ error_flag = 0;
+ prep_hdw_info ();
+ /*** scan PCI bus for all possible boards */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ while ((pdev = pci_get_device (PCI_VENDOR_ID_CONEXANT,
+ PCI_DEVICE_ID_CN8474,
+ pdev)))
+#else
+ while ((pdev = pci_find_device (PCI_VENDOR_ID_CONEXANT,
+ PCI_DEVICE_ID_CN8474,
+ pdev)))
+#endif
+ {
+ if (c4_hdw_init (pdev, found))
+ found++;
+ }
+ if (!found)
+ {
+ pr_warning("No boards found\n");
+ return ENODEV;
+ }
+ /* sanity check for consistant hardware found */
+ for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++)
+ {
+ if (hi->pci_slot != 0xff && (!hi->addr[0] || !hi->addr[1]))
+ {
+ pr_warning("%s: something very wrong with pci_get_device\n",
+ hi->devname);
+ return EIO;
+ }
+ }
+ /* bring board's memory regions on/line */
+ for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++)
+ {
+ if (hi->pci_slot == 0xff)
+ break;
+ for (j = 0; j < 2; j++)
+ {
+ if (request_mem_region (hi->addr[j], hi->len[j], hi->devname) == 0)
+ {
+ pr_warning("%s: memory in use, addr=0x%lx, len=0x%lx ?\n",
+ hi->devname, hi->addr[j], hi->len[j]);
+ cleanup_ioremap ();
+ return ENOMEM;
+ }
+ hi->addr_mapped[j] = (unsigned long) ioremap (hi->addr[j], hi->len[j]);
+ if (!hi->addr_mapped[j])
+ {
+ pr_warning("%s: ioremap fails, addr=0x%lx, len=0x%lx ?\n",
+ hi->devname, hi->addr[j], hi->len[j]);
+ cleanup_ioremap ();
+ return ENOMEM;
+ }
+#ifdef SBE_MAP_DEBUG
+ pr_warning("%s: io remapped from phys %x to virt %x\n",
+ hi->devname, (u_int32_t) hi->addr[j], (u_int32_t) hi->addr_mapped[j]);
+#endif
+ }
+ }
+
+ drvr_state = SBE_DRVR_AVAILABLE;
+
+ /* Have now memory mapped all boards. Now allow board's access to system */
+ for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++)
+ {
+ if (hi->pci_slot == 0xff)
+ break;
+ if (pci_enable_device (hi->pdev[0]) ||
+ pci_enable_device (hi->pdev[1]))
+ {
+ drvr_state = SBE_DRVR_DOWN;
+ pr_warning("%s: failed to enable card %d slot %d\n",
+ hi->devname, i, hi->pci_slot);
+ cleanup_devs ();
+ cleanup_ioremap ();
+ return EIO;
+ }
+ pci_set_master (hi->pdev[0]);
+ pci_set_master (hi->pdev[1]);
+ if (!(hi->ndev = c4_add_dev (hi, i, (long) hi->addr_mapped[0],
+ (long) hi->addr_mapped[1],
+ hi->pdev[0]->irq,
+ hi->pdev[1]->irq)))
+ {
+ drvr_state = SBE_DRVR_DOWN;
+ cleanup_ioremap ();
+ /* NOTE: c4_add_dev() does its own device cleanup */
+#if 0
+ cleanup_devs ();
+#endif
+ return error_flag; /* error_flag set w/in add_dev() */
+ }
+ show_two (hi, i); /* displays found information */
+ }
+ return 0;
+}
+
+/*** End-of-File ***/
diff --git a/drivers/staging/cxt1e1/libsbew.h b/drivers/staging/cxt1e1/libsbew.h
new file mode 100644
index 00000000000..5c99646cd10
--- /dev/null
+++ b/drivers/staging/cxt1e1/libsbew.h
@@ -0,0 +1,581 @@
+/*
+ * $Id: libsbew.h,v 2.1 2005/10/27 18:54:19 rickd PMCC4_3_1B $
+ */
+
+#ifndef _INC_LIBSBEW_H_
+#define _INC_LIBSBEW_H_
+
+/*-----------------------------------------------------------------------------
+ * libsbew.h - common library elements, charge across mulitple boards
+ *
+ * This file contains common Ioctl structures and contents definitions.
+ *
+ * Copyright (C) 2004-2005 SBE, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * For further information, contact via email: support@sbei.com
+ * SBE, Inc. San Ramon, California U.S.A.
+ *-----------------------------------------------------------------------------
+ * RCS info:
+ * RCS revision: $Revision: 2.1 $
+ * Last changed on $Date: 2005/10/27 18:54:19 $
+ * Changed by $Author: rickd $
+ *-----------------------------------------------------------------------------
+ * $Log: libsbew.h,v $
+ * Revision 2.1 2005/10/27 18:54:19 rickd
+ * Add E1PLAIN support.
+ *
+ * Revision 2.0 2005/09/28 00:10:08 rickd
+ * Customized for PMCC4 comet-per-port design.
+ *
+ * Revision 1.15 2005/03/29 00:51:31 rickd
+ * File imported from C1T3 port, Revision 1.15
+ *-----------------------------------------------------------------------------
+ */
+
+#ifndef __KERNEL__
+#include <sys/types.h>
+#endif
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/********************************/
+/** set driver logging level **/
+/********************************/
+
+/* routine/ioctl: wancfg_set_loglevel() - SBE_IOC_SET_LOGLEVEL */
+
+#define LOG_NONE 0
+#define LOG_ERROR 1
+#define LOG_SBEBUG3 3 /* hidden, for development/debug usage */
+#define LOG_LSCHANGE 5 /* line state change logging */
+#define LOG_LSIMMEDIATE 6 /* line state change logging w/o hysterisis */
+#define LOG_WARN 8
+#define LOG_MONITOR 10
+#define LOG_SBEBUG12 12 /* hidden, for development/debug usage */
+#define LOG_MONITOR2 14 /* hidden, for development/debug usage */
+#define LOG_DEBUG 16
+
+ /* TEMPORARY DEFINES *//* RLD DEBUG */
+#define c4_LOG_NONE LOG_NONE
+#define c4_LOG_ERROR LOG_ERROR
+#define c4_LOG_WARN LOG_WARN
+#define c4_LOG_sTrace LOG_MONITOR /* do some trace logging into
+ * functions */
+#define c4_LOG_DEBUG LOG_DEBUG
+#define c4_LOG_MAX LOG_DEBUG
+
+
+
+/******************************/
+/** get driver information **/
+/******************************/
+
+/* routine/ioctl: wancfg_get_drvinfo() - SBE_IOC_GET_DRVINFO */
+
+#define REL_STRLEN 80
+ struct sbe_drv_info
+ {
+ int rel_strlen;
+ char release[REL_STRLEN];
+ };
+
+
+/*****************************/
+/** get board information **/
+/*****************************/
+
+/* routine/ioctl: wancfg_get_brdinfo() - SBE_IOC_GET_BRDINFO */
+
+#define CHNM_STRLEN 16
+ struct sbe_brd_info
+ {
+ u_int32_t brd_id; /* SBE's unique PCI VENDOR/DEVID */
+ u_int32_t brd_sn;
+ int brd_chan_cnt; /* number of channels being used */
+ int brd_port_cnt; /* number of ports being used */
+ unsigned char brdno; /* our board number */
+ unsigned char brd_pci_speed; /* PCI speed, 33/66Mhz */
+ u_int8_t brd_mac_addr[6];
+ char first_iname[CHNM_STRLEN]; /* first assigned channel's
+ * interface name */
+ char last_iname[CHNM_STRLEN]; /* last assigned channel's
+ * interface name */
+ u_int8_t brd_hdw_id; /* on/board unique hdw ID */
+ u_int8_t reserved8[3]; /* alignment preservation */
+ u_int32_t reserved32[3]; /* size preservation */
+ };
+
+/* These IDs are sometimes available thru pci_ids.h, but not currently. */
+
+#define PCI_VENDOR_ID_SBE 0x1176
+#define PCI_DEVICE_ID_WANPMC_C4T1E1 0x0701 /* BID 0x0X, BTYP 0x0X */
+#define PCI_DEVICE_ID_WANPTMC_C4T1E1 0x0702 /* BID 0x41 */
+#define PCI_DEVICE_ID_WANADAPT_HC4T1E1 0x0703 /* BID 0x44 */
+#define PCI_DEVICE_ID_WANPTMC_256T3_T1 0x0704 /* BID 0x42 (T1 Version) */
+#define PCI_DEVICE_ID_WANPCI_C4T1E1 0x0705 /* BID 0x1X, BTYP 0x0X */
+#define PCI_DEVICE_ID_WANPMC_C1T3 0x0706 /* BID 0x45 */
+#define PCI_DEVICE_ID_WANPCI_C2T1E1 0x0707 /* BID 0x1X, BTYP 0x2X */
+#define PCI_DEVICE_ID_WANPCI_C1T1E1 0x0708 /* BID 0x1X, BTYP 0x1X */
+#define PCI_DEVICE_ID_WANPMC_C2T1E1 0x0709 /* BID 0x0X, BTYP 0x2X */
+#define PCI_DEVICE_ID_WANPMC_C1T1E1 0x070A /* BID 0x0X, BTYP 0x1X */
+#define PCI_DEVICE_ID_WANPTMC_256T3_E1 0x070B /* BID 0x46 (E1 Version) */
+#define PCI_DEVICE_ID_WANPTMC_C24TE1 0x070C /* BID 0x47 */
+#define PCI_DEVICE_ID_WANPMC_C4T1E1_L 0x070D /* BID 0x2X, BTYPE 0x0X w/FP
+ * LEDs */
+#define PCI_DEVICE_ID_WANPMC_C2T1E1_L 0x070E /* BID 0x2X, BTYPE 0x2X w/FP
+ * LEDs */
+#define PCI_DEVICE_ID_WANPMC_C1T1E1_L 0x070F /* BID 0x2X, BTYPE 0x1X w/FP
+ * LEDs */
+#define PCI_DEVICE_ID_WANPMC_2SSI 0x0801
+#define PCI_DEVICE_ID_WANPCI_4SSI 0x0802
+#define PCI_DEVICE_ID_WANPMC_2T3E3 0x0900 /* BID 0x43 */
+#define SBE_BOARD_ID(v,id) ((v<<16) | id)
+
+#define BINFO_PCI_SPEED_unk 0
+#define BINFO_PCI_SPEED_33 1
+#define BINFO_PCI_SPEED_66 2
+
+/***************************/
+/** obtain interface ID **/
+/***************************/
+
+/* routine/ioctl: wancfg_get_iid() - SBE_IOC_IID_GET */
+
+ struct sbe_iid_info
+ {
+ u_int32_t channum; /* channel requested */
+ char iname[CHNM_STRLEN]; /* channel's interface name */
+ };
+
+/**************************************/
+/** get board address information **/
+/**************************************/
+
+/* routine/ioctl: wancfg_get_brdaddr() - SBE_IOC_BRDADDR_GET */
+
+ struct sbe_brd_addr
+ {
+ unsigned char func; /* select PCI address space function */
+ unsigned char brdno; /* returns brdno requested */
+ unsigned char irq;
+ unsigned char size; /* returns size of address */
+#define BRDADDR_SIZE_64 1
+#define BRDADDR_SIZE_32 2
+ int reserved1; /* mod64 align, reserved for future use */
+
+ union
+ {
+ unsigned long virt64; /* virtual/mapped address */
+ u_int32_t virt32[2];
+ } v;
+ union
+ {
+ unsigned long phys64; /* physical bus address */
+ u_int32_t phys32[2];
+ } p;
+ int reserved2[4]; /* reserved for future use */
+ };
+
+/**********************************/
+/** read/write board registers **/
+/**********************************/
+
+/* routine/ioctl: wancfg_read_vec() - SBE_IOC_READ_VEC */
+/* routine/ioctl: wancfg_write_vec() - SBE_IOC_WRITE_VEC */
+
+ struct sbecom_wrt_vec
+ {
+ u_int32_t reg;
+ u_int32_t data;
+ };
+
+#define C1T3_CHIP_MSCC_32 0x01000000
+#define C1T3_CHIP_TECT3_8 0x02000000
+#define C1T3_CHIP_CPLD_8 0x03000000
+#define C1T3_CHIP_EEPROM_8 0x04000000
+
+#define W256T3_CHIP_MUSYCC_32 0x02000000
+#define W256T3_CHIP_TEMUX_8 0x10000000
+#define W256T3_CHIP_T8110_8 0x20000000
+#define W256T3_CHIP_T8110_32 0x22000000
+#define W256T3_CHIP_CPLD_8 0x30000000
+#define W256T3_CHIP_EEPROM_8 0x40000000
+
+
+/**********************************/
+/** read write port parameters **/
+/**********************************/
+
+/* routine/ioctl: wancfg_getset_port_param() - SBE_IOC_PORT_GET */
+/* routine/ioctl: wancfg_set_port_param() - SBE_IOC_PORT_SET */
+
+/* NOTE: this structure supports hardware which supports individual per/port control */
+
+struct sbecom_port_param
+{
+ u_int8_t portnum;
+ u_int8_t port_mode; /* variations of T1 or E1 mode */
+ u_int8_t portStatus;
+ u_int8_t portP; /* more port parameters (clock source - 0x80;
+ * and LBO - 0xf; */
+ /* bits 0x70 are reserved for future use ) */
+#ifdef SBE_PMCC4_ENABLE
+ u_int32_t hypersize; /* RLD DEBUG - add this in until I learn how to make this entry obsolete */
+#endif
+ int reserved[3-1]; /* reserved for future use */
+ int _res[4];
+};
+
+#define CFG_CLK_PORT_MASK 0x80 /* Loop timing */
+#define CFG_CLK_PORT_INTERNAL 0x80 /* Loop timing */
+#define CFG_CLK_PORT_EXTERNAL 0x00 /* Loop timing */
+
+#define CFG_LBO_MASK 0x0F
+#define CFG_LBO_unk 0 /* <not defined> */
+#define CFG_LBO_LH0 1 /* T1 Long Haul (default) */
+#define CFG_LBO_LH7_5 2 /* T1 Long Haul */
+#define CFG_LBO_LH15 3 /* T1 Long Haul */
+#define CFG_LBO_LH22_5 4 /* T1 Long Haul */
+#define CFG_LBO_SH110 5 /* T1 Short Haul */
+#define CFG_LBO_SH220 6 /* T1 Short Haul */
+#define CFG_LBO_SH330 7 /* T1 Short Haul */
+#define CFG_LBO_SH440 8 /* T1 Short Haul */
+#define CFG_LBO_SH550 9 /* T1 Short Haul */
+#define CFG_LBO_SH660 10 /* T1 Short Haul */
+#define CFG_LBO_E75 11 /* E1 75 Ohm */
+#define CFG_LBO_E120 12 /* E1 120 Ohm (default) */
+
+
+/*************************************/
+/** read write channel parameters **/
+/*************************************/
+
+/* routine/ioctl: wancfg_getset_chan_param() - SBE_IOC_CHAN_GET */
+/* routine/ioctl: wancfg_set_chan_param() - SBE_IOC_CHAN_SET */
+
+/* NOTE: this structure supports hardware which supports individual per/channel control */
+
+ struct sbecom_chan_param
+ {
+ u_int32_t channum; /* 0: */
+#ifdef SBE_PMCC4_ENABLE
+ u_int32_t card; /* RLD DEBUG - add this in until I learn how to make this entry obsolete */
+ u_int32_t port; /* RLD DEBUG - add this in until I learn how to make this entry obsolete */
+ u_int8_t bitmask[32];
+#endif
+ u_int32_t intr_mask; /* 4: interrupt mask, specify ored
+ * (SS7_)INTR_* to disable */
+ u_int8_t status; /* 8: channel transceiver status (TX_ENABLED,
+ * RX_ENABLED) */
+ u_int8_t chan_mode; /* 9: protocol mode */
+ u_int8_t idlecode; /* A: idle code, in (FLAG_7E, FLAG_FF,
+ * FLAG_00) */
+ u_int8_t pad_fill_count; /* B: pad fill count (1-127), 0 - pad
+ * fill disabled */
+ u_int8_t data_inv; /* C: channel data inversion selection */
+ u_int8_t mode_56k; /* D: 56kbps mode */
+ u_int8_t reserved[2 + 8]; /* E: */
+ };
+
+/* SS7 interrupt signals <intr_mask> */
+#define SS7_INTR_SFILT 0x00000020
+#define SS7_INTR_SDEC 0x00000040
+#define SS7_INTR_SINC 0x00000080
+#define SS7_INTR_SUERR 0x00000100
+/* Other interrupts that can be masked */
+#define INTR_BUFF 0x00000002
+#define INTR_EOM 0x00000004
+#define INTR_MSG 0x00000008
+#define INTR_IDLE 0x00000010
+
+/* transceiver status flags <status> */
+#define TX_ENABLED 0x01
+#define RX_ENABLED 0x02
+
+/* Protocol modes <mode> */
+#define CFG_CH_PROTO_TRANS 0
+#define CFG_CH_PROTO_SS7 1
+#define CFG_CH_PROTO_HDLC_FCS16 2
+#define CFG_CH_PROTO_HDLC_FCS32 3
+#define CFG_CH_PROTO_ISLP_MODE 4
+
+/* Possible idle code assignments <idlecode> */
+#define CFG_CH_FLAG_7E 0
+#define CFG_CH_FLAG_FF 1
+#define CFG_CH_FLAG_00 2
+
+/* data inversion selection <data_inv> */
+#define CFG_CH_DINV_NONE 0x00
+#define CFG_CH_DINV_RX 0x01
+#define CFG_CH_DINV_TX 0x02
+
+
+/* Posssible resettable chipsets/functions */
+#define RESET_DEV_TEMUX 1
+#define RESET_DEV_TECT3 RESET_DEV_TEMUX
+#define RESET_DEV_PLL 2
+
+
+/*********************************************/
+/** read reset channel thruput statistics **/
+/*********************************************/
+
+/* routine/ioctl: wancfg_get_chan_stats() - SBE_IOC_CHAN_GET_STAT */
+/* routine/ioctl: wancfg_del_chan_stats() - SBE_IOC_CHAN_DEL_STAT */
+/* routine/ioctl: wancfg_get_card_chan_stats() - SBE_IOC_CARD_CHAN_STAT */
+
+ struct sbecom_chan_stats
+ {
+ unsigned long rx_packets; /* total packets received */
+ unsigned long tx_packets; /* total packets transmitted */
+ unsigned long rx_bytes; /* total bytes received */
+ unsigned long tx_bytes; /* total bytes transmitted */
+ unsigned long rx_errors;/* bad packets received */
+ unsigned long tx_errors;/* packet transmit problems */
+ unsigned long rx_dropped; /* no space in linux buffers */
+ unsigned long tx_dropped; /* no space available in linux */
+
+ /* detailed rx_errors: */
+ unsigned long rx_length_errors;
+ unsigned long rx_over_errors; /* receiver ring buff overflow */
+ unsigned long rx_crc_errors; /* recved pkt with crc error */
+ unsigned long rx_frame_errors; /* recv'd frame alignment error */
+ unsigned long rx_fifo_errors; /* recv'r fifo overrun */
+ unsigned long rx_missed_errors; /* receiver missed packet */
+
+ /* detailed tx_errors */
+ unsigned long tx_aborted_errors;
+ unsigned long tx_fifo_errors;
+ unsigned long tx_pending;
+ };
+
+
+/****************************************/
+/** read write card level parameters **/
+/****************************************/
+
+ /* NOTE: this structure supports hardware which supports per/card control */
+
+ struct sbecom_card_param
+ {
+ u_int8_t framing_type; /* 0: CBP or M13 */
+ u_int8_t loopback; /* 1: one of LOOPBACK_* */
+ u_int8_t line_build_out; /* 2: boolean */
+ u_int8_t receive_eq; /* 3: boolean */
+ u_int8_t transmit_ones; /* 4: boolean */
+ u_int8_t clock; /* 5: 0 - internal, i>0 - external (recovered
+ * from framer i) */
+ u_int8_t h110enable; /* 6: */
+ u_int8_t disable_leds; /* 7: */
+ u_int8_t reserved1; /* 8: available - old 256t3 hypersized, but
+ * never used */
+ u_int8_t rear_io; /* 9: rear I/O off/on */
+ u_int8_t disable_tx; /* A: disable TX off/on */
+ u_int8_t mute_los; /* B: mute LOS off/on */
+ u_int8_t los_threshold; /* C: LOS threshold norm/low
+ * (default: norm) */
+ u_int8_t ds1_mode; /* D: DS1 mode T1/E1 (default: T1) */
+ u_int8_t ds3_unchan; /* E: DS3 unchannelized mode off/on */
+ u_int8_t reserved[1 + 16]; /* reserved for expansion - must be
+ * ZERO filled */
+ };
+
+/* framing types <framing_type> */
+#define FRAMING_M13 0
+#define FRAMING_CBP 1
+
+/* card level loopback options <loopback> */
+#define CFG_CARD_LOOPBACK_NONE 0x00
+#define CFG_CARD_LOOPBACK_DIAG 0x01
+#define CFG_CARD_LOOPBACK_LINE 0x02
+#define CFG_CARD_LOOPBACK_PAYLOAD 0x03
+
+/* line level loopback options <loopback> */
+#define CFG_LIU_LOOPBACK_NONE 0x00
+#define CFG_LIU_LOOPBACK_ANALOG 0x10
+#define CFG_LIU_LOOPBACK_DIGITAL 0x11
+#define CFG_LIU_LOOPBACK_REMOTE 0x12
+
+/* card level clock options <clock> */
+#define CFG_CLK_INTERNAL 0x00
+#define CFG_CLK_EXTERNAL 0x01
+
+/* legacy 256T3 loopback values */
+#define LOOPBACK_NONE 0
+#define LOOPBACK_LIU_ANALOG 1
+#define LOOPBACK_LIU_DIGITAL 2
+#define LOOPBACK_FRAMER_DS3 3
+#define LOOPBACK_FRAMER_T1 4
+#define LOOPBACK_LIU_REMOTE 5
+
+/* DS1 mode <ds1_mode> */
+#define CFG_DS1_MODE_MASK 0x0f
+#define CFG_DS1_MODE_T1 0x00
+#define CFG_DS1_MODE_E1 0x01
+#define CFG_DS1_MODE_CHANGE 0x80
+
+/* DS3 unchannelized values <ds1_unchan> */
+#define CFG_DS3_UNCHAN_MASK 0x01
+#define CFG_DS3_UNCHAN_OFF 0x00
+#define CFG_DS3_UNCHAN_ON 0x01
+
+
+/************************************/
+/** read write framer parameters **/
+/************************************/
+
+/* routine/ioctl: wancfg_get_framer() - SBE_IOC_FRAMER_GET */
+/* routine/ioctl: wancfg_set_framer() - SBE_IOC_FRAMER_SET */
+
+ struct sbecom_framer_param
+ {
+ u_int8_t framer_num;
+ u_int8_t frame_type; /* SF, ESF, E1PLAIN, E1CAS, E1CRC, E1CRC+CAS */
+ u_int8_t loopback_type; /* DIGITAL, LINE, PAYLOAD */
+ u_int8_t auto_alarms;/* auto alarms */
+ u_int8_t reserved[12]; /* reserved for expansion - must be
+ * ZERO filled */
+ };
+
+/* frame types <frame_type> */
+#define CFG_FRAME_NONE 0
+#define CFG_FRAME_SF 1 /* T1 B8ZS */
+#define CFG_FRAME_ESF 2 /* T1 B8ZS */
+#define CFG_FRAME_E1PLAIN 3 /* HDB3 w/o CAS,CRC */
+#define CFG_FRAME_E1CAS 4 /* HDB3 */
+#define CFG_FRAME_E1CRC 5 /* HDB3 */
+#define CFG_FRAME_E1CRC_CAS 6 /* HDB3 */
+#define CFG_FRAME_SF_AMI 7 /* T1 AMI */
+#define CFG_FRAME_ESF_AMI 8 /* T1 AMI */
+#define CFG_FRAME_E1PLAIN_AMI 9 /* E1 AMI w/o CAS,CRC */
+#define CFG_FRAME_E1CAS_AMI 10 /* E1 AMI */
+#define CFG_FRAME_E1CRC_AMI 11 /* E1 AMI */
+#define CFG_FRAME_E1CRC_CAS_AMI 12 /* E1 AMI */
+
+#define IS_FRAME_ANY_T1(field) \
+ (((field) == CFG_FRAME_NONE) || \
+ ((field) == CFG_FRAME_SF) || \
+ ((field) == CFG_FRAME_ESF) || \
+ ((field) == CFG_FRAME_SF_AMI) || \
+ ((field) == CFG_FRAME_ESF_AMI))
+
+#define IS_FRAME_ANY_T1ESF(field) \
+ (((field) == CFG_FRAME_ESF) || \
+ ((field) == CFG_FRAME_ESF_AMI))
+
+#define IS_FRAME_ANY_E1(field) \
+ (((field) == CFG_FRAME_E1PLAIN) || \
+ ((field) == CFG_FRAME_E1CAS) || \
+ ((field) == CFG_FRAME_E1CRC) || \
+ ((field) == CFG_FRAME_E1CRC_CAS) || \
+ ((field) == CFG_FRAME_E1PLAIN_AMI) || \
+ ((field) == CFG_FRAME_E1CAS_AMI) || \
+ ((field) == CFG_FRAME_E1CRC_AMI) || \
+ ((field) == CFG_FRAME_E1CRC_CAS_AMI))
+
+#define IS_FRAME_ANY_AMI(field) \
+ (((field) == CFG_FRAME_SF_AMI) || \
+ ((field) == CFG_FRAME_ESF_AMI) || \
+ ((field) == CFG_FRAME_E1PLAIN_AMI) || \
+ ((field) == CFG_FRAME_E1CAS_AMI) || \
+ ((field) == CFG_FRAME_E1CRC_AMI) || \
+ ((field) == CFG_FRAME_E1CRC_CAS_AMI))
+
+/* frame level loopback options <loopback_type> */
+#define CFG_FRMR_LOOPBACK_NONE 0
+#define CFG_FRMR_LOOPBACK_DIAG 1
+#define CFG_FRMR_LOOPBACK_LINE 2
+#define CFG_FRMR_LOOPBACK_PAYLOAD 3
+
+
+/****************************************/
+/** read reset card error statistics **/
+/****************************************/
+
+/* routine/ioctl: wancfg_get_card_stats() - SBE_IOC_CARD_GET_STAT */
+/* routine/ioctl: wancfg_del_card_stats() - SBE_IOC_CARD_DEL_STAT */
+
+ struct temux_card_stats
+ {
+ struct temux_stats
+ {
+ /* TEMUX DS3 PMON counters */
+ u_int32_t lcv;
+ u_int32_t err_framing;
+ u_int32_t febe;
+ u_int32_t err_cpbit;
+ u_int32_t err_parity;
+ /* TEMUX DS3 FRMR status */
+ u_int8_t los;
+ u_int8_t oof;
+ u_int8_t red;
+ u_int8_t yellow;
+ u_int8_t idle;
+ u_int8_t ais;
+ u_int8_t cbit;
+ /* TEMUX DS3 FEAC receiver */
+ u_int8_t feac;
+ u_int8_t feac_last;
+ } t;
+ u_int32_t tx_pending; /* total */
+ };
+
+/**************************************************************/
+
+ struct wancfg
+ {
+ int cs, ds;
+ char *p;
+ };
+ typedef struct wancfg wcfg_t;
+
+ extern wcfg_t *wancfg_init (char *, char *);
+ extern int wancfg_card_blink (wcfg_t *, int);
+ extern int wancfg_ctl (wcfg_t *, int, void *, int, void *, int);
+ extern int wancfg_del_card_stats (wcfg_t *);
+ extern int wancfg_del_chan_stats (wcfg_t *, int);
+ extern int wancfg_enable_ports (wcfg_t *, int);
+ extern int wancfg_free (wcfg_t *);
+ extern int wancfg_get_brdaddr (wcfg_t *, struct sbe_brd_addr *);
+ extern int wancfg_get_brdinfo (wcfg_t *, struct sbe_brd_info *);
+ extern int wancfg_get_card (wcfg_t *, struct sbecom_card_param *);
+ extern int wancfg_get_card_chan_stats (wcfg_t *, struct sbecom_chan_stats *);
+ extern int wancfg_get_card_sn (wcfg_t *);
+ extern int wancfg_get_card_stats (wcfg_t *, struct temux_card_stats *);
+ extern int wancfg_get_chan (wcfg_t *, int, struct sbecom_chan_param *);
+ extern int wancfg_get_chan_stats (wcfg_t *, int, struct sbecom_chan_stats *);
+ extern int wancfg_get_drvinfo (wcfg_t *, int, struct sbe_drv_info *);
+ extern int wancfg_get_framer (wcfg_t *, int, struct sbecom_framer_param *);
+ extern int wancfg_get_iid (wcfg_t *, int, struct sbe_iid_info *);
+ extern int wancfg_get_sn (wcfg_t *, unsigned int *);
+ extern int wancfg_read (wcfg_t *, int, struct sbecom_wrt_vec *);
+ extern int wancfg_reset_device (wcfg_t *, int);
+ extern int wancfg_set_card (wcfg_t *, struct sbecom_card_param *);
+ extern int wancfg_set_chan (wcfg_t *, int, struct sbecom_chan_param *);
+ extern int wancfg_set_framer (wcfg_t *, int, struct sbecom_framer_param *);
+ extern int wancfg_set_loglevel (wcfg_t *, uint);
+ extern int wancfg_write (wcfg_t *, int, struct sbecom_wrt_vec *);
+
+#ifdef NOT_YET_COMMON
+ extern int wancfg_get_tsioc (wcfg_t *, struct wanc1t3_ts_hdr *, struct wanc1t3_ts_param *);
+ extern int wancfg_set_tsioc (wcfg_t *, struct wanc1t3_ts_param *);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*** _INC_LIBSBEW_H_ ***/
diff --git a/drivers/staging/cxt1e1/linux.c b/drivers/staging/cxt1e1/linux.c
new file mode 100644
index 00000000000..134e7568024
--- /dev/null
+++ b/drivers/staging/cxt1e1/linux.c
@@ -0,0 +1,1356 @@
+/* Copyright (C) 2007-2008 One Stop Systems
+ * Copyright (C) 2003-2006 SBE, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/hdlc.h>
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <asm/uaccess.h>
+#include <linux/rtnetlink.h>
+#include <linux/skbuff.h>
+#include "pmcc4_sysdep.h"
+#include "sbecom_inline_linux.h"
+#include "libsbew.h"
+#include "pmcc4.h"
+#include "pmcc4_ioctls.h"
+#include "pmcc4_private.h"
+#include "sbeproc.h"
+
+/*****************************************************************************************
+ * Error out early if we have compiler trouble.
+ *
+ * (This section is included from the kernel's init/main.c as a friendly
+ * spiderman recommendation...)
+ *
+ * Versions of gcc older than that listed below may actually compile and link
+ * okay, but the end product can have subtle run time bugs. To avoid associated
+ * bogus bug reports, we flatly refuse to compile with a gcc that is known to be
+ * too old from the very beginning.
+ */
+#if (__GNUC__ < 3) || (__GNUC__ == 3 && __GNUC_MINOR__ < 2)
+#error Sorry, your GCC is too old. It builds incorrect kernels.
+#endif
+
+#if __GNUC__ == 4 && __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ == 0
+#warning gcc-4.1.0 is known to miscompile the kernel. A different compiler version is recommended.
+#endif
+
+/*****************************************************************************************/
+
+#ifdef SBE_INCLUDE_SYMBOLS
+#define STATIC
+#else
+#define STATIC static
+#endif
+
+#define CHANNAME "hdlc"
+
+/*******************************************************************/
+/* forward references */
+status_t c4_chan_work_init (mpi_t *, mch_t *);
+void musycc_wq_chan_restart (void *);
+status_t __init c4_init (ci_t *, u_char *, u_char *);
+status_t __init c4_init2 (ci_t *);
+ci_t *__init c4_new (void *);
+int __init c4hw_attach_all (void);
+void __init hdw_sn_get (hdw_info_t *, int);
+
+#ifdef CONFIG_SBE_PMCC4_NCOMM
+irqreturn_t c4_ebus_intr_th_handler (void *);
+
+#endif
+int c4_frame_rw (ci_t *, struct sbecom_port_param *);
+status_t c4_get_port (ci_t *, int);
+int c4_loop_port (ci_t *, int, u_int8_t);
+int c4_musycc_rw (ci_t *, struct c4_musycc_param *);
+int c4_new_chan (ci_t *, int, int, void *);
+status_t c4_set_port (ci_t *, int);
+int c4_pld_rw (ci_t *, struct sbecom_port_param *);
+void cleanup_devs (void);
+void cleanup_ioremap (void);
+status_t musycc_chan_down (ci_t *, int);
+irqreturn_t musycc_intr_th_handler (void *);
+int musycc_start_xmit (ci_t *, int, void *);
+
+extern char pmcc4_OSSI_release[];
+extern ci_t *CI;
+extern struct s_hdw_info hdw_info[];
+
+#if defined(CONFIG_SBE_HDLC_V7) || defined(CONFIG_SBE_WAN256T3_HDLC_V7) || \
+ defined(CONFIG_SBE_HDLC_V7_MODULE) || defined(CONFIG_SBE_WAN256T3_HDLC_V7_MODULE)
+#define _v7_hdlc_ 1
+#else
+#define _v7_hdlc_ 0
+#endif
+
+#if _v7_hdlc_
+#define V7(x) (x ## _v7)
+extern int hdlc_netif_rx_v7 (hdlc_device *, struct sk_buff *);
+extern int register_hdlc_device_v7 (hdlc_device *);
+extern int unregister_hdlc_device_v7 (hdlc_device *);
+
+#else
+#define V7(x) x
+#endif
+
+int error_flag; /* module load error reporting */
+int log_level = LOG_ERROR;
+int log_level_default = LOG_ERROR;
+module_param(log_level, int, 0444);
+
+int max_mru = MUSYCC_MRU;
+int max_mru_default = MUSYCC_MRU;
+module_param(max_mru, int, 0444);
+
+int max_mtu = MUSYCC_MTU;
+int max_mtu_default = MUSYCC_MTU;
+module_param(max_mtu, int, 0444);
+
+int max_txdesc_used = MUSYCC_TXDESC_MIN;
+int max_txdesc_default = MUSYCC_TXDESC_MIN;
+module_param(max_txdesc_used, int, 0444);
+
+int max_rxdesc_used = MUSYCC_RXDESC_MIN;
+int max_rxdesc_default = MUSYCC_RXDESC_MIN;
+module_param(max_rxdesc_used, int, 0444);
+
+/****************************************************************************/
+/****************************************************************************/
+/****************************************************************************/
+
+void *
+getuserbychan (int channum)
+{
+ mch_t *ch;
+
+ ch = c4_find_chan (channum);
+ return ch ? ch->user : 0;
+}
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+#define DEV_TO_PRIV(dev) ( * (struct c4_priv **) ((hdlc_device*)(dev)+1))
+#else
+
+char *
+get_hdlc_name (hdlc_device * hdlc)
+{
+ struct c4_priv *priv = hdlc->priv;
+ struct net_device *dev = getuserbychan (priv->channum);
+
+ return dev->name;
+}
+#endif
+
+
+static status_t
+mkret (int bsd)
+{
+ if (bsd > 0)
+ return -bsd;
+ else
+ return bsd;
+}
+
+/***************************************************************************/
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)
+#include <linux/workqueue.h>
+
+/***
+ * One workqueue (wq) per port (since musycc allows simultaneous group
+ * commands), with individual data for each channel:
+ *
+ * mpi_t -> struct workqueue_struct *wq_port; (dynamically allocated using
+ * create_workqueue())
+ *
+ * With work structure (work) statically allocated for each channel:
+ *
+ * mch_t -> struct work_struct ch_work; (statically allocated using ???)
+ *
+ ***/
+
+
+/*
+ * Called by the start transmit routine when a channel TX_ENABLE is to be
+ * issued. This queues the transmission start request among other channels
+ * within a port's group.
+ */
+void
+c4_wk_chan_restart (mch_t * ch)
+{
+ mpi_t *pi = ch->up;
+
+#ifdef RLD_RESTART_DEBUG
+ pr_info(">> %s: queueing Port %d Chan %d, mch_t @ %p\n",
+ __func__, pi->portnum, ch->channum, ch);
+#endif
+
+ /* create new entry w/in workqueue for this channel and let'er rip */
+
+ /** queue_work (struct workqueue_struct *queue,
+ ** struct work_struct *work);
+ **/
+ queue_work (pi->wq_port, &ch->ch_work);
+}
+
+status_t
+c4_wk_chan_init (mpi_t * pi, mch_t * ch)
+{
+ /*
+ * this will be used to restart a stopped channel
+ */
+
+ /** INIT_WORK (struct work_struct *work,
+ ** void (*function)(void *),
+ ** void *data);
+ **/
+ INIT_WORK(&ch->ch_work, (void *)musycc_wq_chan_restart);
+ return 0; /* success */
+}
+
+status_t
+c4_wq_port_init (mpi_t * pi)
+{
+
+ char name[16], *np; /* NOTE: name of the queue limited by system
+ * to 10 characters */
+
+ if (pi->wq_port)
+ return 0; /* already initialized */
+
+ np = name;
+ memset (name, 0, 16);
+ sprintf (np, "%s%d", pi->up->devname, pi->portnum); /* IE pmcc4-01) */
+
+#ifdef RLD_RESTART_DEBUG
+ pr_info(">> %s: creating workqueue <%s> for Port %d.\n",
+ __func__, name, pi->portnum); /* RLD DEBUG */
+#endif
+ if (!(pi->wq_port = create_singlethread_workqueue (name)))
+ return ENOMEM;
+ return 0; /* success */
+}
+
+void
+c4_wq_port_cleanup (mpi_t * pi)
+{
+ /*
+ * PORT POINT: cannot call this if WQ is statically allocated w/in
+ * structure since it calls kfree(wq);
+ */
+ if (pi->wq_port)
+ {
+ destroy_workqueue (pi->wq_port); /* this also calls
+ * flush_workqueue() */
+ pi->wq_port = 0;
+ }
+}
+#endif
+
+/***************************************************************************/
+
+irqreturn_t
+c4_linux_interrupt (int irq, void *dev_instance)
+{
+ struct net_device *ndev = dev_instance;
+
+ return musycc_intr_th_handler(netdev_priv(ndev));
+}
+
+
+#ifdef CONFIG_SBE_PMCC4_NCOMM
+irqreturn_t
+c4_ebus_interrupt (int irq, void *dev_instance)
+{
+ struct net_device *ndev = dev_instance;
+
+ return c4_ebus_intr_th_handler(netdev_priv(ndev));
+}
+#endif
+
+
+static int
+void_open (struct net_device * ndev)
+{
+ pr_info("%s: trying to open master device !\n", ndev->name);
+ return -1;
+}
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+#if !defined(GENERIC_HDLC_VERSION) || (GENERIC_HDLC_VERSION < 4)
+
+/** Linux 2.4.18-19 **/
+STATIC int
+chan_open (hdlc_device * hdlc)
+{
+ status_t ret;
+
+ if ((ret = c4_chan_up (DEV_TO_PRIV (hdlc)->ci, DEV_TO_PRIV (hdlc)->channum)))
+ return -ret;
+ MOD_INC_USE_COUNT;
+ netif_start_queue (hdlc_to_dev (hdlc));
+ return 0; /* no error = success */
+}
+
+#else
+
+/** Linux 2.4.20 and higher **/
+STATIC int
+chan_open (struct net_device * ndev)
+{
+ hdlc_device *hdlc = dev_to_hdlc (ndev);
+ status_t ret;
+
+ hdlc->proto = IF_PROTO_HDLC;
+ if ((ret = hdlc_open (hdlc)))
+ {
+ pr_info("hdlc_open failure, err %d.\n", ret);
+ return ret;
+ }
+ if ((ret = c4_chan_up (DEV_TO_PRIV (hdlc)->ci, DEV_TO_PRIV (hdlc)->channum)))
+ return -ret;
+ MOD_INC_USE_COUNT;
+ netif_start_queue (hdlc_to_dev (hdlc));
+ return 0; /* no error = success */
+}
+#endif
+
+#else
+
+/** Linux 2.6 **/
+STATIC int
+chan_open (struct net_device * ndev)
+{
+ hdlc_device *hdlc = dev_to_hdlc (ndev);
+ const struct c4_priv *priv = hdlc->priv;
+ int ret;
+
+ if ((ret = hdlc_open (ndev)))
+ {
+ pr_info("hdlc_open failure, err %d.\n", ret);
+ return ret;
+ }
+ if ((ret = c4_chan_up (priv->ci, priv->channum)))
+ return -ret;
+ try_module_get (THIS_MODULE);
+ netif_start_queue (ndev);
+ return 0; /* no error = success */
+}
+#endif
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+#if !defined(GENERIC_HDLC_VERSION) || (GENERIC_HDLC_VERSION < 4)
+
+/** Linux 2.4.18-19 **/
+STATIC void
+chan_close (hdlc_device * hdlc)
+{
+ netif_stop_queue (hdlc_to_dev (hdlc));
+ musycc_chan_down ((ci_t *) 0, DEV_TO_PRIV (hdlc)->channum);
+ MOD_DEC_USE_COUNT;
+}
+#else
+
+/** Linux 2.4.20 and higher **/
+STATIC int
+chan_close (struct net_device * ndev)
+{
+ hdlc_device *hdlc = dev_to_hdlc (ndev);
+
+ netif_stop_queue (hdlc_to_dev (hdlc));
+ musycc_chan_down ((ci_t *) 0, DEV_TO_PRIV (hdlc)->channum);
+ hdlc_close (hdlc);
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+#endif
+
+#else
+
+/** Linux 2.6 **/
+STATIC int
+chan_close (struct net_device * ndev)
+{
+ hdlc_device *hdlc = dev_to_hdlc (ndev);
+ const struct c4_priv *priv = hdlc->priv;
+
+ netif_stop_queue (ndev);
+ musycc_chan_down ((ci_t *) 0, priv->channum);
+ hdlc_close (ndev);
+ module_put (THIS_MODULE);
+ return 0;
+}
+#endif
+
+
+#if !defined(GENERIC_HDLC_VERSION) || (GENERIC_HDLC_VERSION < 4)
+
+/** Linux 2.4.18-19 **/
+STATIC int
+chan_ioctl (hdlc_device * hdlc, struct ifreq * ifr, int cmd)
+{
+ if (cmd == HDLCSCLOCK)
+ {
+ ifr->ifr_ifru.ifru_ivalue = LINE_DEFAULT;
+ return 0;
+ }
+ return -EINVAL;
+}
+#endif
+
+
+#if !defined(GENERIC_HDLC_VERSION) || (GENERIC_HDLC_VERSION < 4)
+STATIC int
+chan_dev_ioctl (struct net_device * hdlc, struct ifreq * ifr, int cmd)
+{
+ if (cmd == HDLCSCLOCK)
+ {
+ ifr->ifr_ifru.ifru_ivalue = LINE_DEFAULT;
+ return 0;
+ }
+ return -EINVAL;
+}
+#else
+STATIC int
+chan_dev_ioctl (struct net_device * dev, struct ifreq * ifr, int cmd)
+{
+ return hdlc_ioctl (dev, ifr, cmd);
+}
+
+
+STATIC int
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+chan_attach_noop (hdlc_device * hdlc, unsigned short foo_1, unsigned short foo_2)
+#else
+chan_attach_noop (struct net_device * ndev, unsigned short foo_1, unsigned short foo_2)
+#endif
+{
+ return 0; /* our driver has nothing to do here, show's
+ * over, go home */
+}
+#endif
+
+
+STATIC struct net_device_stats *
+chan_get_stats (struct net_device * ndev)
+{
+ mch_t *ch;
+ struct net_device_stats *nstats;
+ struct sbecom_chan_stats *stats;
+ int channum;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+ channum = DEV_TO_PRIV (ndev)->channum;
+#else
+ {
+ struct c4_priv *priv;
+
+ priv = (struct c4_priv *) dev_to_hdlc (ndev)->priv;
+ channum = priv->channum;
+ }
+#endif
+
+ ch = c4_find_chan (channum);
+ if (ch == NULL)
+ return NULL;
+
+ nstats = &ndev->stats;
+ stats = &ch->s;
+
+ memset (nstats, 0, sizeof (struct net_device_stats));
+ nstats->rx_packets = stats->rx_packets;
+ nstats->tx_packets = stats->tx_packets;
+ nstats->rx_bytes = stats->rx_bytes;
+ nstats->tx_bytes = stats->tx_bytes;
+ nstats->rx_errors = stats->rx_length_errors +
+ stats->rx_over_errors +
+ stats->rx_crc_errors +
+ stats->rx_frame_errors +
+ stats->rx_fifo_errors +
+ stats->rx_missed_errors;
+ nstats->tx_errors = stats->tx_dropped +
+ stats->tx_aborted_errors +
+ stats->tx_fifo_errors;
+ nstats->rx_dropped = stats->rx_dropped;
+ nstats->tx_dropped = stats->tx_dropped;
+
+ nstats->rx_length_errors = stats->rx_length_errors;
+ nstats->rx_over_errors = stats->rx_over_errors;
+ nstats->rx_crc_errors = stats->rx_crc_errors;
+ nstats->rx_frame_errors = stats->rx_frame_errors;
+ nstats->rx_fifo_errors = stats->rx_fifo_errors;
+ nstats->rx_missed_errors = stats->rx_missed_errors;
+
+ nstats->tx_aborted_errors = stats->tx_aborted_errors;
+ nstats->tx_fifo_errors = stats->tx_fifo_errors;
+
+ return nstats;
+}
+
+
+static ci_t *
+get_ci_by_dev (struct net_device * ndev)
+{
+ return (ci_t *)(netdev_priv(ndev));
+}
+
+
+#if !defined(GENERIC_HDLC_VERSION) || (GENERIC_HDLC_VERSION < 4)
+STATIC int
+c4_linux_xmit (hdlc_device * hdlc, struct sk_buff * skb)
+{
+ int rval;
+
+ rval = musycc_start_xmit (DEV_TO_PRIV (hdlc)->ci, DEV_TO_PRIV (hdlc)->channum, skb);
+ return -rval;
+}
+#else /* new */
+STATIC int
+c4_linux_xmit (struct sk_buff * skb, struct net_device * ndev)
+{
+ const struct c4_priv *priv;
+ int rval;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+ priv = DEV_TO_PRIV (ndev);
+#else
+ hdlc_device *hdlc = dev_to_hdlc (ndev);
+
+ priv = hdlc->priv;
+#endif
+
+ rval = musycc_start_xmit (priv->ci, priv->channum, skb);
+ return -rval;
+}
+#endif /* GENERIC_HDLC_VERSION */
+
+static const struct net_device_ops chan_ops = {
+ .ndo_open = chan_open,
+ .ndo_stop = chan_close,
+ .ndo_start_xmit = c4_linux_xmit,
+ .ndo_do_ioctl = chan_dev_ioctl,
+ .ndo_get_stats = chan_get_stats,
+};
+
+STATIC struct net_device *
+create_chan (struct net_device * ndev, ci_t * ci,
+ struct sbecom_chan_param * cp)
+{
+ hdlc_device *hdlc;
+ struct net_device *dev;
+ hdw_info_t *hi;
+ int ret;
+
+ if (c4_find_chan (cp->channum))
+ return 0; /* channel already exists */
+
+ {
+ struct c4_priv *priv;
+
+ /* allocate then fill in private data structure */
+ priv = OS_kmalloc (sizeof (struct c4_priv));
+ if (!priv)
+ {
+ pr_warning("%s: no memory for net_device !\n", ci->devname);
+ return 0;
+ }
+ dev = alloc_hdlcdev (priv);
+ if (!dev)
+ {
+ pr_warning("%s: no memory for hdlc_device !\n", ci->devname);
+ OS_kfree (priv);
+ return 0;
+ }
+ priv->ci = ci;
+ priv->channum = cp->channum;
+ }
+
+ hdlc = dev_to_hdlc (dev);
+
+ dev->base_addr = 0; /* not I/O mapped */
+ dev->irq = ndev->irq;
+ dev->type = ARPHRD_RAWHDLC;
+ *dev->name = 0; /* default ifconfig name = "hdlc" */
+
+ hi = (hdw_info_t *) ci->hdw_info;
+ if (hi->mfg_info_sts == EEPROM_OK)
+ {
+ switch (hi->promfmt)
+ {
+ case PROM_FORMAT_TYPE1:
+ memcpy (dev->dev_addr, (FLD_TYPE1 *) (hi->mfg_info.pft1.Serial), 6);
+ break;
+ case PROM_FORMAT_TYPE2:
+ memcpy (dev->dev_addr, (FLD_TYPE2 *) (hi->mfg_info.pft2.Serial), 6);
+ break;
+ default:
+ memset (dev->dev_addr, 0, 6);
+ break;
+ }
+ } else
+ {
+ memset (dev->dev_addr, 0, 6);
+ }
+
+ hdlc->xmit = c4_linux_xmit;
+
+ dev->netdev_ops = &chan_ops;
+ /*
+ * The native hdlc stack calls this 'attach' routine during
+ * hdlc_raw_ioctl(), passing parameters for line encoding and parity.
+ * Since hdlc_raw_ioctl() stack does not interrogate whether an 'attach'
+ * routine is actually registered or not, we supply a dummy routine which
+ * does nothing (since encoding and parity are setup for our driver via a
+ * special configuration application).
+ */
+
+ hdlc->attach = chan_attach_noop;
+
+ rtnl_unlock (); /* needed due to Ioctl calling sequence */
+ ret = register_hdlc_device (dev);
+ /* NOTE: <stats> setting must occur AFTER registration in order to "take" */
+ dev->tx_queue_len = MAX_DEFAULT_IFQLEN;
+
+ rtnl_lock (); /* needed due to Ioctl calling sequence */
+ if (ret)
+ {
+ if (log_level >= LOG_WARN)
+ pr_info("%s: create_chan[%d] registration error = %d.\n",
+ ci->devname, cp->channum, ret);
+ free_netdev (dev); /* cleanup */
+ return 0; /* failed to register */
+ }
+ return dev;
+}
+
+
+/* the idea here is to get port information and pass it back (using pointer) */
+STATIC status_t
+do_get_port (struct net_device * ndev, void *data)
+{
+ int ret;
+ ci_t *ci; /* ci stands for card information */
+ struct sbecom_port_param pp;/* copy data to kernel land */
+
+ if (copy_from_user (&pp, data, sizeof (struct sbecom_port_param)))
+ return -EFAULT;
+ if (pp.portnum >= MUSYCC_NPORTS)
+ return -EFAULT;
+ ci = get_ci_by_dev (ndev);
+ if (!ci)
+ return -EINVAL; /* get card info */
+
+ ret = mkret (c4_get_port (ci, pp.portnum));
+ if (ret)
+ return ret;
+ if (copy_to_user (data, &ci->port[pp.portnum].p,
+ sizeof (struct sbecom_port_param)))
+ return -EFAULT;
+ return 0;
+}
+
+/* this function copys the user data and then calls the real action function */
+STATIC status_t
+do_set_port (struct net_device * ndev, void *data)
+{
+ ci_t *ci; /* ci stands for card information */
+ struct sbecom_port_param pp;/* copy data to kernel land */
+
+ if (copy_from_user (&pp, data, sizeof (struct sbecom_port_param)))
+ return -EFAULT;
+ if (pp.portnum >= MUSYCC_NPORTS)
+ return -EFAULT;
+ ci = get_ci_by_dev (ndev);
+ if (!ci)
+ return -EINVAL; /* get card info */
+
+ if (pp.portnum >= ci->max_port) /* sanity check */
+ return ENXIO;
+
+ memcpy (&ci->port[pp.portnum].p, &pp, sizeof (struct sbecom_port_param));
+ return mkret (c4_set_port (ci, pp.portnum));
+}
+
+/* work the port loopback mode as per directed */
+STATIC status_t
+do_port_loop (struct net_device * ndev, void *data)
+{
+ struct sbecom_port_param pp;
+ ci_t *ci;
+
+ if (copy_from_user (&pp, data, sizeof (struct sbecom_port_param)))
+ return -EFAULT;
+ ci = get_ci_by_dev (ndev);
+ if (!ci)
+ return -EINVAL;
+ return mkret (c4_loop_port (ci, pp.portnum, pp.port_mode));
+}
+
+/* set the specified register with the given value / or just read it */
+STATIC status_t
+do_framer_rw (struct net_device * ndev, void *data)
+{
+ struct sbecom_port_param pp;
+ ci_t *ci;
+ int ret;
+
+ if (copy_from_user (&pp, data, sizeof (struct sbecom_port_param)))
+ return -EFAULT;
+ ci = get_ci_by_dev (ndev);
+ if (!ci)
+ return -EINVAL;
+ ret = mkret (c4_frame_rw (ci, &pp));
+ if (ret)
+ return ret;
+ if (copy_to_user (data, &pp, sizeof (struct sbecom_port_param)))
+ return -EFAULT;
+ return 0;
+}
+
+/* set the specified register with the given value / or just read it */
+STATIC status_t
+do_pld_rw (struct net_device * ndev, void *data)
+{
+ struct sbecom_port_param pp;
+ ci_t *ci;
+ int ret;
+
+ if (copy_from_user (&pp, data, sizeof (struct sbecom_port_param)))
+ return -EFAULT;
+ ci = get_ci_by_dev (ndev);
+ if (!ci)
+ return -EINVAL;
+ ret = mkret (c4_pld_rw (ci, &pp));
+ if (ret)
+ return ret;
+ if (copy_to_user (data, &pp, sizeof (struct sbecom_port_param)))
+ return -EFAULT;
+ return 0;
+}
+
+/* set the specified register with the given value / or just read it */
+STATIC status_t
+do_musycc_rw (struct net_device * ndev, void *data)
+{
+ struct c4_musycc_param mp;
+ ci_t *ci;
+ int ret;
+
+ if (copy_from_user (&mp, data, sizeof (struct c4_musycc_param)))
+ return -EFAULT;
+ ci = get_ci_by_dev (ndev);
+ if (!ci)
+ return -EINVAL;
+ ret = mkret (c4_musycc_rw (ci, &mp));
+ if (ret)
+ return ret;
+ if (copy_to_user (data, &mp, sizeof (struct c4_musycc_param)))
+ return -EFAULT;
+ return 0;
+}
+
+STATIC status_t
+do_get_chan (struct net_device * ndev, void *data)
+{
+ struct sbecom_chan_param cp;
+ int ret;
+
+ if (copy_from_user (&cp, data,
+ sizeof (struct sbecom_chan_param)))
+ return -EFAULT;
+
+ if ((ret = mkret (c4_get_chan (cp.channum, &cp))))
+ return ret;
+
+ if (copy_to_user (data, &cp, sizeof (struct sbecom_chan_param)))
+ return -EFAULT;
+ return 0;
+}
+
+STATIC status_t
+do_set_chan (struct net_device * ndev, void *data)
+{
+ struct sbecom_chan_param cp;
+ int ret;
+ ci_t *ci;
+
+ if (copy_from_user (&cp, data, sizeof (struct sbecom_chan_param)))
+ return -EFAULT;
+ ci = get_ci_by_dev (ndev);
+ if (!ci)
+ return -EINVAL;
+ switch (ret = mkret (c4_set_chan (cp.channum, &cp)))
+ {
+ case 0:
+ return 0;
+ default:
+ return ret;
+ }
+}
+
+STATIC status_t
+do_create_chan (struct net_device * ndev, void *data)
+{
+ ci_t *ci;
+ struct net_device *dev;
+ struct sbecom_chan_param cp;
+ int ret;
+
+ if (copy_from_user (&cp, data, sizeof (struct sbecom_chan_param)))
+ return -EFAULT;
+ ci = get_ci_by_dev (ndev);
+ if (!ci)
+ return -EINVAL;
+ dev = create_chan (ndev, ci, &cp);
+ if (!dev)
+ return -EBUSY;
+ ret = mkret (c4_new_chan (ci, cp.port, cp.channum, dev));
+ if (ret)
+ {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+ rtnl_unlock (); /* needed due to Ioctl calling sequence */
+ V7 (unregister_hdlc_device) (dev_to_hdlc (dev));
+ rtnl_lock (); /* needed due to Ioctl calling sequence */
+ OS_kfree (DEV_TO_PRIV (dev));
+ OS_kfree (dev);
+#else
+ rtnl_unlock (); /* needed due to Ioctl calling sequence */
+ unregister_hdlc_device (dev);
+ rtnl_lock (); /* needed due to Ioctl calling sequence */
+ free_netdev (dev);
+#endif
+ }
+ return ret;
+}
+
+STATIC status_t
+do_get_chan_stats (struct net_device * ndev, void *data)
+{
+ struct c4_chan_stats_wrap ccs;
+ int ret;
+
+ if (copy_from_user (&ccs, data,
+ sizeof (struct c4_chan_stats_wrap)))
+ return -EFAULT;
+ switch (ret = mkret (c4_get_chan_stats (ccs.channum, &ccs.stats)))
+ {
+ case 0:
+ break;
+ default:
+ return ret;
+ }
+ if (copy_to_user (data, &ccs,
+ sizeof (struct c4_chan_stats_wrap)))
+ return -EFAULT;
+ return 0;
+}
+STATIC status_t
+do_set_loglevel (struct net_device * ndev, void *data)
+{
+ unsigned int log_level;
+
+ if (copy_from_user (&log_level, data, sizeof (int)))
+ return -EFAULT;
+ sbecom_set_loglevel (log_level);
+ return 0;
+}
+
+STATIC status_t
+do_deluser (struct net_device * ndev, int lockit)
+{
+ if (ndev->flags & IFF_UP)
+ return -EBUSY;
+
+ {
+ ci_t *ci;
+ mch_t *ch;
+ const struct c4_priv *priv;
+ int channum;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+ priv = DEV_TO_PRIV (ndev);
+#else
+ priv = (struct c4_priv *) dev_to_hdlc (ndev)->priv;
+#endif
+ ci = priv->ci;
+ channum = priv->channum;
+
+ ch = c4_find_chan (channum);
+ if (ch == NULL)
+ return -ENOENT;
+ ch->user = 0; /* will be freed, below */
+ }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+ if (lockit)
+ rtnl_unlock (); /* needed if Ioctl calling sequence */
+ V7 (unregister_hdlc_device) (dev_to_hdlc (ndev));
+ if (lockit)
+ rtnl_lock (); /* needed if Ioctl calling sequence */
+ OS_kfree (DEV_TO_PRIV (ndev));
+ OS_kfree (ndev);
+#else
+ if (lockit)
+ rtnl_unlock (); /* needed if Ioctl calling sequence */
+ unregister_hdlc_device (ndev);
+ if (lockit)
+ rtnl_lock (); /* needed if Ioctl calling sequence */
+ free_netdev (ndev);
+#endif
+ return 0;
+}
+
+int
+do_del_chan (struct net_device * musycc_dev, void *data)
+{
+ struct sbecom_chan_param cp;
+ char buf[sizeof (CHANNAME) + 3];
+ struct net_device *dev;
+ int ret;
+
+ if (copy_from_user (&cp, data,
+ sizeof (struct sbecom_chan_param)))
+ return -EFAULT;
+ sprintf (buf, CHANNAME "%d", cp.channum);
+ if (!(dev = dev_get_by_name (&init_net, buf)))
+ return -ENOENT;
+ dev_put (dev);
+ ret = do_deluser (dev, 1);
+ if (ret)
+ return ret;
+ return c4_del_chan (cp.channum);
+}
+int c4_reset_board (void *);
+
+int
+do_reset (struct net_device * musycc_dev, void *data)
+{
+ const struct c4_priv *priv;
+ int i;
+
+ for (i = 0; i < 128; i++)
+ {
+ struct net_device *ndev;
+ char buf[sizeof (CHANNAME) + 3];
+
+ sprintf (buf, CHANNAME "%d", i);
+ if (!(ndev = dev_get_by_name(&init_net, buf)))
+ continue;
+ priv = dev_to_hdlc (ndev)->priv;
+
+ if ((unsigned long) (priv->ci) ==
+ (unsigned long) (netdev_priv(musycc_dev)))
+ {
+ ndev->flags &= ~IFF_UP;
+ dev_put (ndev);
+ netif_stop_queue (ndev);
+ do_deluser (ndev, 1);
+ } else
+ dev_put (ndev);
+ }
+ return 0;
+}
+
+int
+do_reset_chan_stats (struct net_device * musycc_dev, void *data)
+{
+ struct sbecom_chan_param cp;
+
+ if (copy_from_user (&cp, data,
+ sizeof (struct sbecom_chan_param)))
+ return -EFAULT;
+ return mkret (c4_del_chan_stats (cp.channum));
+}
+
+STATIC status_t
+c4_ioctl (struct net_device * ndev, struct ifreq * ifr, int cmd)
+{
+ ci_t *ci;
+ void *data;
+ int iocmd, iolen;
+ status_t ret;
+ static struct data
+ {
+ union
+ {
+ u_int8_t c;
+ u_int32_t i;
+ struct sbe_brd_info bip;
+ struct sbe_drv_info dip;
+ struct sbe_iid_info iip;
+ struct sbe_brd_addr bap;
+ struct sbecom_chan_stats stats;
+ struct sbecom_chan_param param;
+ struct temux_card_stats cards;
+ struct sbecom_card_param cardp;
+ struct sbecom_framer_param frp;
+ } u;
+ } arg;
+
+
+ if (!capable (CAP_SYS_ADMIN))
+ return -EPERM;
+ if (cmd != SIOCDEVPRIVATE + 15)
+ return -EINVAL;
+ if (!(ci = get_ci_by_dev (ndev)))
+ return -EINVAL;
+ if (ci->state != C_RUNNING)
+ return -ENODEV;
+ if (copy_from_user (&iocmd, ifr->ifr_data, sizeof (iocmd)))
+ return -EFAULT;
+#if 0
+ if (copy_from_user (&len, ifr->ifr_data + sizeof (iocmd), sizeof (len)))
+ return -EFAULT;
+#endif
+
+#if 0
+ pr_info("c4_ioctl: iocmd %x, dir %x type %x nr %x iolen %d.\n", iocmd,
+ _IOC_DIR (iocmd), _IOC_TYPE (iocmd), _IOC_NR (iocmd),
+ _IOC_SIZE (iocmd));
+#endif
+ iolen = _IOC_SIZE (iocmd);
+ data = ifr->ifr_data + sizeof (iocmd);
+ if (copy_from_user (&arg, data, iolen))
+ return -EFAULT;
+
+ ret = 0;
+ switch (iocmd)
+ {
+ case SBE_IOC_PORT_GET:
+ //pr_info(">> SBE_IOC_PORT_GET Ioctl...\n");
+ ret = do_get_port (ndev, data);
+ break;
+ case SBE_IOC_PORT_SET:
+ //pr_info(">> SBE_IOC_PORT_SET Ioctl...\n");
+ ret = do_set_port (ndev, data);
+ break;
+ case SBE_IOC_CHAN_GET:
+ //pr_info(">> SBE_IOC_CHAN_GET Ioctl...\n");
+ ret = do_get_chan (ndev, data);
+ break;
+ case SBE_IOC_CHAN_SET:
+ //pr_info(">> SBE_IOC_CHAN_SET Ioctl...\n");
+ ret = do_set_chan (ndev, data);
+ break;
+ case C4_DEL_CHAN:
+ //pr_info(">> C4_DEL_CHAN Ioctl...\n");
+ ret = do_del_chan (ndev, data);
+ break;
+ case SBE_IOC_CHAN_NEW:
+ ret = do_create_chan (ndev, data);
+ break;
+ case SBE_IOC_CHAN_GET_STAT:
+ ret = do_get_chan_stats (ndev, data);
+ break;
+ case SBE_IOC_LOGLEVEL:
+ ret = do_set_loglevel (ndev, data);
+ break;
+ case SBE_IOC_RESET_DEV:
+ ret = do_reset (ndev, data);
+ break;
+ case SBE_IOC_CHAN_DEL_STAT:
+ ret = do_reset_chan_stats (ndev, data);
+ break;
+ case C4_LOOP_PORT:
+ ret = do_port_loop (ndev, data);
+ break;
+ case C4_RW_FRMR:
+ ret = do_framer_rw (ndev, data);
+ break;
+ case C4_RW_MSYC:
+ ret = do_musycc_rw (ndev, data);
+ break;
+ case C4_RW_PLD:
+ ret = do_pld_rw (ndev, data);
+ break;
+ case SBE_IOC_IID_GET:
+ ret = (iolen == sizeof (struct sbe_iid_info)) ? c4_get_iidinfo (ci, &arg.u.iip) : -EFAULT;
+ if (ret == 0) /* no error, copy data */
+ if (copy_to_user (data, &arg, iolen))
+ return -EFAULT;
+ break;
+ default:
+ //pr_info(">> c4_ioctl: EINVAL - unknown iocmd <%x>\n", iocmd);
+ ret = -EINVAL;
+ break;
+ }
+ return mkret (ret);
+}
+
+static const struct net_device_ops c4_ops = {
+ .ndo_open = void_open,
+ .ndo_start_xmit = c4_linux_xmit,
+ .ndo_do_ioctl = c4_ioctl,
+};
+
+static void c4_setup(struct net_device *dev)
+{
+ dev->type = ARPHRD_VOID;
+ dev->netdev_ops = &c4_ops;
+}
+
+struct net_device *__init
+c4_add_dev (hdw_info_t * hi, int brdno, unsigned long f0, unsigned long f1,
+ int irq0, int irq1)
+{
+ struct net_device *ndev;
+ ci_t *ci;
+
+ ndev = alloc_netdev(sizeof(ci_t), SBE_IFACETMPL, c4_setup);
+ if (!ndev)
+ {
+ pr_warning("%s: no memory for struct net_device !\n", hi->devname);
+ error_flag = ENOMEM;
+ return 0;
+ }
+ ci = (ci_t *)(netdev_priv(ndev));
+ ndev->irq = irq0;
+
+ ci->hdw_info = hi;
+ ci->state = C_INIT; /* mark as hardware not available */
+ ci->next = c4_list;
+ c4_list = ci;
+ ci->brdno = ci->next ? ci->next->brdno + 1 : 0;
+
+ if (CI == 0)
+ CI = ci; /* DEBUG, only board 0 usage */
+
+ strcpy (ci->devname, hi->devname);
+ ci->release = &pmcc4_OSSI_release[0];
+
+ /* tasklet */
+#if defined(SBE_ISR_TASKLET)
+ tasklet_init (&ci->ci_musycc_isr_tasklet,
+ (void (*) (unsigned long)) musycc_intr_bh_tasklet,
+ (unsigned long) ci);
+
+ if (atomic_read (&ci->ci_musycc_isr_tasklet.count) == 0)
+ tasklet_disable_nosync (&ci->ci_musycc_isr_tasklet);
+#elif defined(SBE_ISR_IMMEDIATE)
+ ci->ci_musycc_isr_tq.routine = (void *) (unsigned long) musycc_intr_bh_tasklet;
+ ci->ci_musycc_isr_tq.data = ci;
+#endif
+
+
+ if (register_netdev (ndev) ||
+ (c4_init (ci, (u_char *) f0, (u_char *) f1) != SBE_DRVR_SUCCESS))
+ {
+ OS_kfree (netdev_priv(ndev));
+ OS_kfree (ndev);
+ error_flag = ENODEV;
+ return 0;
+ }
+ /*************************************************************
+ * int request_irq(unsigned int irq,
+ * void (*handler)(int, void *, struct pt_regs *),
+ * unsigned long flags, const char *dev_name, void *dev_id);
+ * wherein:
+ * irq -> The interrupt number that is being requested.
+ * handler -> Pointer to handling function being installed.
+ * flags -> A bit mask of options related to interrupt management.
+ * dev_name -> String used in /proc/interrupts to show owner of interrupt.
+ * dev_id -> Pointer (for shared interrupt lines) to point to its own
+ * private data area (to identify which device is interrupting).
+ *
+ * extern void free_irq(unsigned int irq, void *dev_id);
+ **************************************************************/
+
+ if (request_irq (irq0, &c4_linux_interrupt,
+#if defined(SBE_ISR_TASKLET)
+ IRQF_DISABLED | IRQF_SHARED,
+#elif defined(SBE_ISR_IMMEDIATE)
+ IRQF_DISABLED | IRQF_SHARED,
+#elif defined(SBE_ISR_INLINE)
+ IRQF_SHARED,
+#endif
+ ndev->name, ndev))
+ {
+ pr_warning("%s: MUSYCC could not get irq: %d\n", ndev->name, irq0);
+ unregister_netdev (ndev);
+ OS_kfree (netdev_priv(ndev));
+ OS_kfree (ndev);
+ error_flag = EIO;
+ return 0;
+ }
+#ifdef CONFIG_SBE_PMCC4_NCOMM
+ if (request_irq (irq1, &c4_ebus_interrupt, IRQF_SHARED, ndev->name, ndev))
+ {
+ pr_warning("%s: EBUS could not get irq: %d\n", hi->devname, irq1);
+ unregister_netdev (ndev);
+ free_irq (irq0, ndev);
+ OS_kfree (netdev_priv(ndev));
+ OS_kfree (ndev);
+ error_flag = EIO;
+ return 0;
+ }
+#endif
+
+ /* setup board identification information */
+
+ {
+ u_int32_t tmp;
+
+ hdw_sn_get (hi, brdno); /* also sets PROM format type (promfmt)
+ * for later usage */
+
+ switch (hi->promfmt)
+ {
+ case PROM_FORMAT_TYPE1:
+ memcpy (ndev->dev_addr, (FLD_TYPE1 *) (hi->mfg_info.pft1.Serial), 6);
+ memcpy (&tmp, (FLD_TYPE1 *) (hi->mfg_info.pft1.Id), 4); /* unaligned data
+ * acquisition */
+ ci->brd_id = cpu_to_be32 (tmp);
+ break;
+ case PROM_FORMAT_TYPE2:
+ memcpy (ndev->dev_addr, (FLD_TYPE2 *) (hi->mfg_info.pft2.Serial), 6);
+ memcpy (&tmp, (FLD_TYPE2 *) (hi->mfg_info.pft2.Id), 4); /* unaligned data
+ * acquisition */
+ ci->brd_id = cpu_to_be32 (tmp);
+ break;
+ default:
+ ci->brd_id = 0;
+ memset (ndev->dev_addr, 0, 6);
+ break;
+ }
+
+#if 1
+ sbeid_set_hdwbid (ci); /* requires bid to be preset */
+#else
+ sbeid_set_bdtype (ci); /* requires hdw_bid to be preset */
+#endif
+
+ }
+
+#ifdef CONFIG_PROC_FS
+ sbecom_proc_brd_init (ci);
+#endif
+#if defined(SBE_ISR_TASKLET)
+ tasklet_enable (&ci->ci_musycc_isr_tasklet);
+#endif
+
+
+ if ((error_flag = c4_init2 (ci)) != SBE_DRVR_SUCCESS)
+ {
+#ifdef CONFIG_PROC_FS
+ sbecom_proc_brd_cleanup (ci);
+#endif
+ unregister_netdev (ndev);
+ free_irq (irq1, ndev);
+ free_irq (irq0, ndev);
+ OS_kfree (netdev_priv(ndev));
+ OS_kfree (ndev);
+ return 0; /* failure, error_flag is set */
+ }
+ return ndev;
+}
+
+STATIC int __init
+c4_mod_init (void)
+{
+ int rtn;
+
+ pr_warning("%s\n", pmcc4_OSSI_release);
+ if ((rtn = c4hw_attach_all ()))
+ return -rtn; /* installation failure - see system log */
+
+ /* housekeeping notifications */
+ if (log_level != log_level_default)
+ pr_info("NOTE: driver parameter <log_level> changed from default %d to %d.\n",
+ log_level_default, log_level);
+ if (max_mru != max_mru_default)
+ pr_info("NOTE: driver parameter <max_mru> changed from default %d to %d.\n",
+ max_mru_default, max_mru);
+ if (max_mtu != max_mtu_default)
+ pr_info("NOTE: driver parameter <max_mtu> changed from default %d to %d.\n",
+ max_mtu_default, max_mtu);
+ if (max_rxdesc_used != max_rxdesc_default)
+ {
+ if (max_rxdesc_used > 2000)
+ max_rxdesc_used = 2000; /* out-of-bounds reset */
+ pr_info("NOTE: driver parameter <max_rxdesc_used> changed from default %d to %d.\n",
+ max_rxdesc_default, max_rxdesc_used);
+ }
+ if (max_txdesc_used != max_txdesc_default)
+ {
+ if (max_txdesc_used > 1000)
+ max_txdesc_used = 1000; /* out-of-bounds reset */
+ pr_info("NOTE: driver parameter <max_txdesc_used> changed from default %d to %d.\n",
+ max_txdesc_default, max_txdesc_used);
+ }
+ return 0; /* installation success */
+}
+
+
+ /*
+ * find any still allocated hdlc registrations and unregister via call to
+ * do_deluser()
+ */
+
+STATIC void __exit
+cleanup_hdlc (void)
+{
+ hdw_info_t *hi;
+ ci_t *ci;
+ struct net_device *ndev;
+ int i, j, k;
+
+ for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++)
+ {
+ if (hi->ndev) /* a board has been attached */
+ {
+ ci = (ci_t *)(netdev_priv(hi->ndev));
+ for (j = 0; j < ci->max_port; j++)
+ for (k = 0; k < MUSYCC_NCHANS; k++)
+ if ((ndev = ci->port[j].chan[k]->user))
+ {
+ do_deluser (ndev, 0);
+ }
+ }
+ }
+}
+
+
+STATIC void __exit
+c4_mod_remove (void)
+{
+ cleanup_hdlc (); /* delete any missed channels */
+ cleanup_devs ();
+ c4_cleanup ();
+ cleanup_ioremap ();
+ pr_info("SBE - driver removed.\n");
+}
+
+module_init (c4_mod_init);
+module_exit (c4_mod_remove);
+
+#ifndef SBE_INCLUDE_SYMBOLS
+#ifndef CONFIG_SBE_WANC24_NCOMM
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+EXPORT_NO_SYMBOLS;
+#endif
+#endif
+#endif
+
+MODULE_AUTHOR ("SBE Technical Services <support@sbei.com>");
+MODULE_DESCRIPTION ("wanPCI-CxT1E1 Generic HDLC WAN Driver module");
+#ifdef MODULE_LICENSE
+MODULE_LICENSE ("GPL");
+#endif
+
+/*** End-of-File ***/
diff --git a/drivers/staging/cxt1e1/musycc.c b/drivers/staging/cxt1e1/musycc.c
new file mode 100644
index 00000000000..d3f5a5b52dc
--- /dev/null
+++ b/drivers/staging/cxt1e1/musycc.c
@@ -0,0 +1,2185 @@
+/*
+ * $Id: musycc.c,v 2.1 2007/08/15 23:32:17 rickd PMCC4_3_1B $
+ */
+
+unsigned int max_intcnt = 0;
+unsigned int max_bh = 0;
+
+/*-----------------------------------------------------------------------------
+ * musycc.c -
+ *
+ * Copyright (C) 2007 One Stop Systems, Inc.
+ * Copyright (C) 2003-2006 SBE, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * For further information, contact via email: support@onestopsystems.com
+ * One Stop Systems, Inc. Escondido, California U.S.A.
+ *-----------------------------------------------------------------------------
+ * RCS info:
+ * RCS revision: $Revision: 2.1 $
+ * Last changed on $Date: 2007/08/15 23:32:17 $
+ * Changed by $Author: rickd $
+ *-----------------------------------------------------------------------------
+ * $Log: musycc.c,v $
+ * Revision 2.1 2007/08/15 23:32:17 rickd
+ * Use 'if 0' instead of GNU comment delimeter to avoid line wrap induced compiler errors.
+ *
+ * Revision 2.0 2007/08/15 22:13:20 rickd
+ * Update to printf pointer %p usage and correct some UINT to ULONG for
+ * 64bit comptibility.
+ *
+ * Revision 1.7 2006/04/21 00:56:40 rickd
+ * workqueue files now prefixed with <sbecom> prefix.
+ *
+ * Revision 1.6 2005/10/27 18:54:19 rickd
+ * Clean out old code. Default to HDLC_FCS16, not TRANS.
+ *
+ * Revision 1.5 2005/10/17 23:55:28 rickd
+ * Initial port of NCOMM support patches from original work found
+ * in pmc_c4t1e1 as updated by NCOMM. Ref: CONFIG_SBE_PMCC4_NCOMM.
+ *
+ * Revision 1.4 2005/10/13 20:35:25 rickd
+ * Cleanup warning for unused <flags> variable.
+ *
+ * Revision 1.3 2005/10/13 19:19:22 rickd
+ * Disable redundant driver removal cleanup code.
+ *
+ * Revision 1.2 2005/10/11 18:36:16 rickd
+ * Clean up warning messages caused by de-implemented some <flags> associated
+ * with spin_lock() removals.
+ *
+ * Revision 1.1 2005/10/05 00:45:28 rickd
+ * Re-enable xmit on flow-controlled and full channel to fix restart hang.
+ * Add some temp spin-lock debug code (rld_spin_owner).
+ *
+ * Revision 1.0 2005/09/28 00:10:06 rickd
+ * Initial release for C4T1E1 support. Lots of transparent
+ * mode updates.
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+char SBEid_pmcc4_musyccc[] =
+"@(#)musycc.c - $Revision: 2.1 $ (c) Copyright 2004-2006 SBE, Inc.";
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include "pmcc4_sysdep.h"
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include "sbecom_inline_linux.h"
+#include "libsbew.h"
+#include "pmcc4_private.h"
+#include "pmcc4.h"
+#include "musycc.h"
+
+#ifdef SBE_INCLUDE_SYMBOLS
+#define STATIC
+#else
+#define STATIC static
+#endif
+
+#define sd_find_chan(ci,ch) c4_find_chan(ch)
+
+
+/*******************************************************************/
+/* global driver variables */
+extern ci_t *c4_list;
+extern int drvr_state;
+extern int log_level;
+
+extern int max_mru;
+extern int max_mtu;
+extern int max_rxdesc_used;
+extern int max_txdesc_used;
+extern ci_t *CI; /* dummy pointr to board ZEROE's data - DEBUG
+ * USAGE */
+
+
+/*******************************************************************/
+/* forward references */
+void c4_fifo_free (mpi_t *, int);
+void c4_wk_chan_restart (mch_t *);
+void musycc_bh_tx_eom (mpi_t *, int);
+int musycc_chan_up (ci_t *, int);
+status_t __init musycc_init (ci_t *);
+STATIC void __init musycc_init_port (mpi_t *);
+void musycc_intr_bh_tasklet (ci_t *);
+void musycc_serv_req (mpi_t *, u_int32_t);
+void musycc_update_timeslots (mpi_t *);
+
+/*******************************************************************/
+
+#if 1
+STATIC int
+musycc_dump_rxbuffer_ring (mch_t * ch, int lockit)
+{
+ struct mdesc *m;
+ unsigned long flags = 0;
+
+ u_int32_t status;
+ int n;
+
+ if (lockit)
+ {
+ spin_lock_irqsave (&ch->ch_rxlock, flags);
+ }
+ if (ch->rxd_num == 0)
+ {
+ pr_info(" ZERO receive buffers allocated for this channel.");
+ } else
+ {
+ FLUSH_MEM_READ ();
+ m = &ch->mdr[ch->rxix_irq_srv];
+ for (n = ch->rxd_num; n; n--)
+ {
+ status = le32_to_cpu (m->status);
+ {
+ pr_info("%c %08lx[%2d]: sts %08x (%c%c%c%c:%d.) Data [%08x] Next [%08x]\n",
+ (m == &ch->mdr[ch->rxix_irq_srv]) ? 'F' : ' ',
+ (unsigned long) m, n,
+ status,
+ m->data ? (status & HOST_RX_OWNED ? 'H' : 'M') : '-',
+ status & POLL_DISABLED ? 'P' : '-',
+ status & EOBIRQ_ENABLE ? 'b' : '-',
+ status & EOMIRQ_ENABLE ? 'm' : '-',
+ status & LENGTH_MASK,
+ le32_to_cpu (m->data), le32_to_cpu (m->next));
+#ifdef RLD_DUMP_BUFDATA
+ {
+ u_int32_t *dp;
+ int len = status & LENGTH_MASK;
+
+#if 1
+ if (m->data && (status & HOST_RX_OWNED))
+#else
+ if (m->data) /* always dump regardless of valid RX
+ * data */
+#endif
+ {
+ dp = (u_int32_t *) OS_phystov ((void *) (le32_to_cpu (m->data)));
+ if (len >= 0x10)
+ pr_info(" %x[%x]: %08X %08X %08X %08x\n", (u_int32_t) dp, len,
+ *dp, *(dp + 1), *(dp + 2), *(dp + 3));
+ else if (len >= 0x08)
+ pr_info(" %x[%x]: %08X %08X\n", (u_int32_t) dp, len,
+ *dp, *(dp + 1));
+ else
+ pr_info(" %x[%x]: %08X\n", (u_int32_t) dp, len, *dp);
+ }
+ }
+#endif
+ }
+ m = m->snext;
+ }
+ } /* -for- */
+ pr_info("\n");
+
+ if (lockit)
+ {
+ spin_unlock_irqrestore (&ch->ch_rxlock, flags);
+ }
+ return 0;
+}
+#endif
+
+#if 1
+STATIC int
+musycc_dump_txbuffer_ring (mch_t * ch, int lockit)
+{
+ struct mdesc *m;
+ unsigned long flags = 0;
+ u_int32_t status;
+ int n;
+
+ if (lockit)
+ {
+ spin_lock_irqsave (&ch->ch_txlock, flags);
+ }
+ if (ch->txd_num == 0)
+ {
+ pr_info(" ZERO transmit buffers allocated for this channel.");
+ } else
+ {
+ FLUSH_MEM_READ ();
+ m = ch->txd_irq_srv;
+ for (n = ch->txd_num; n; n--)
+ {
+ status = le32_to_cpu (m->status);
+ {
+ pr_info("%c%c %08lx[%2d]: sts %08x (%c%c%c%c:%d.) Data [%08x] Next [%08x]\n",
+ (m == ch->txd_usr_add) ? 'F' : ' ',
+ (m == ch->txd_irq_srv) ? 'L' : ' ',
+ (unsigned long) m, n,
+ status,
+ m->data ? (status & MUSYCC_TX_OWNED ? 'M' : 'H') : '-',
+ status & POLL_DISABLED ? 'P' : '-',
+ status & EOBIRQ_ENABLE ? 'b' : '-',
+ status & EOMIRQ_ENABLE ? 'm' : '-',
+ status & LENGTH_MASK,
+ le32_to_cpu (m->data), le32_to_cpu (m->next));
+#ifdef RLD_DUMP_BUFDATA
+ {
+ u_int32_t *dp;
+ int len = status & LENGTH_MASK;
+
+ if (m->data)
+ {
+ dp = (u_int32_t *) OS_phystov ((void *) (le32_to_cpu (m->data)));
+ if (len >= 0x10)
+ pr_info(" %x[%x]: %08X %08X %08X %08x\n", (u_int32_t) dp, len,
+ *dp, *(dp + 1), *(dp + 2), *(dp + 3));
+ else if (len >= 0x08)
+ pr_info(" %x[%x]: %08X %08X\n", (u_int32_t) dp, len,
+ *dp, *(dp + 1));
+ else
+ pr_info(" %x[%x]: %08X\n", (u_int32_t) dp, len, *dp);
+ }
+ }
+#endif
+ }
+ m = m->snext;
+ }
+ } /* -for- */
+ pr_info("\n");
+
+ if (lockit)
+ {
+ spin_unlock_irqrestore (&ch->ch_txlock, flags);
+ }
+ return 0;
+}
+#endif
+
+
+/*
+ * The following supports a backdoor debug facility which can be used to
+ * display the state of a board's channel.
+ */
+
+status_t
+musycc_dump_ring (ci_t * ci, unsigned int chan)
+{
+ mch_t *ch;
+
+ if (chan >= MAX_CHANS_USED)
+ {
+ return SBE_DRVR_FAIL; /* E2BIG */
+ }
+ {
+ int bh;
+
+ bh = atomic_read (&ci->bh_pending);
+ pr_info(">> bh_pend %d [%d] ihead %d itail %d [%d] th_cnt %d bh_cnt %d wdcnt %d note %d\n",
+ bh, max_bh, ci->iqp_headx, ci->iqp_tailx, max_intcnt,
+ ci->intlog.drvr_intr_thcount,
+ ci->intlog.drvr_intr_bhcount,
+ ci->wdcount, ci->wd_notify);
+ max_bh = 0; /* reset counter */
+ max_intcnt = 0; /* reset counter */
+ }
+
+ if (!(ch = sd_find_chan (dummy, chan)))
+ {
+ pr_info(">> musycc_dump_ring: channel %d not up.\n", chan);
+ return ENOENT;
+ }
+ pr_info(">> CI %p CHANNEL %3d @ %p: state %x status/p %x/%x\n", ci, chan, ch, ch->state,
+ ch->status, ch->p.status);
+ pr_info("--------------------------------\nTX Buffer Ring - Channel %d, txd_num %d. (bd/ch pend %d %d), TXD required %d, txpkt %lu\n",
+ chan, ch->txd_num,
+ (u_int32_t) atomic_read (&ci->tx_pending), (u_int32_t) atomic_read (&ch->tx_pending), ch->txd_required, ch->s.tx_packets);
+ pr_info("++ User 0x%p IRQ_SRV 0x%p USR_ADD 0x%p QStopped %x, start_tx %x tx_full %d txd_free %d mode %x\n",
+ ch->user, ch->txd_irq_srv, ch->txd_usr_add,
+ sd_queue_stopped (ch->user),
+ ch->ch_start_tx, ch->tx_full, ch->txd_free, ch->p.chan_mode);
+ musycc_dump_txbuffer_ring (ch, 1);
+ pr_info("RX Buffer Ring - Channel %d, rxd_num %d. IRQ_SRV[%d] 0x%p, start_rx %x rxpkt %lu\n",
+ chan, ch->rxd_num, ch->rxix_irq_srv,
+ &ch->mdr[ch->rxix_irq_srv], ch->ch_start_rx, ch->s.rx_packets);
+ musycc_dump_rxbuffer_ring (ch, 1);
+
+ return SBE_DRVR_SUCCESS;
+}
+
+
+status_t
+musycc_dump_rings (ci_t * ci, unsigned int start_chan)
+{
+ unsigned int chan;
+
+ for (chan = start_chan; chan < (start_chan + 5); chan++)
+ musycc_dump_ring (ci, chan);
+ return SBE_DRVR_SUCCESS;
+}
+
+
+/*
+ * NOTE on musycc_init_mdt(): These MUSYCC writes are only operational after
+ * a MUSYCC GROUP_INIT command has been issued.
+ */
+
+void
+musycc_init_mdt (mpi_t * pi)
+{
+ u_int32_t *addr, cfg;
+ int i;
+
+ /*
+ * This Idle Code insertion takes effect prior to channel's first
+ * transmitted message. After that, each message contains its own Idle
+ * Code information which is to be issued after the message is
+ * transmitted (Ref.MUSYCC 5.2.2.3: MCENBL bit in Group Configuration
+ * Descriptor).
+ */
+
+ addr = (u_int32_t *) ((u_long) pi->reg + MUSYCC_MDT_BASE03_ADDR);
+ cfg = CFG_CH_FLAG_7E << IDLE_CODE;
+
+ for (i = 0; i < 32; addr++, i++)
+ {
+ pci_write_32 (addr, cfg);
+ }
+}
+
+
+/* Set TX thp to the next unprocessed md */
+
+void
+musycc_update_tx_thp (mch_t * ch)
+{
+ struct mdesc *md;
+ unsigned long flags;
+
+ spin_lock_irqsave (&ch->ch_txlock, flags);
+ while (1)
+ {
+ md = ch->txd_irq_srv;
+ FLUSH_MEM_READ ();
+ if (!md->data)
+ {
+ /* No MDs with buffers to process */
+ spin_unlock_irqrestore (&ch->ch_txlock, flags);
+ return;
+ }
+ if ((le32_to_cpu (md->status)) & MUSYCC_TX_OWNED)
+ {
+ /* this is the MD to restart TX with */
+ break;
+ }
+ /*
+ * Otherwise, we have a valid, host-owned message descriptor which
+ * has been successfully transmitted and whose buffer can be freed,
+ * so... process this MD, it's owned by the host. (This might give
+ * as a new, updated txd_irq_srv.)
+ */
+ musycc_bh_tx_eom (ch->up, ch->gchan);
+ }
+ md = ch->txd_irq_srv;
+ ch->up->regram->thp[ch->gchan] = cpu_to_le32 (OS_vtophys (md));
+ FLUSH_MEM_WRITE ();
+
+ if (ch->tx_full)
+ {
+ ch->tx_full = 0;
+ ch->txd_required = 0;
+ sd_enable_xmit (ch->user); /* re-enable to catch flow controlled
+ * channel */
+ }
+ spin_unlock_irqrestore (&ch->ch_txlock, flags);
+
+#ifdef RLD_TRANS_DEBUG
+ pr_info("++ musycc_update_tx_thp[%d]: setting thp = %p, sts %x\n", ch->channum, md, md->status);
+#endif
+}
+
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)
+/*
+ * This is the workq task executed by the OS when our queue_work() is
+ * scheduled and run. It can fire off either RX or TX ACTIVATION depending
+ * upon the channel's ch_start_tx and ch_start_rx variables. This routine
+ * is implemented as a work queue so that the call to the service request is
+ * able to sleep, awaiting an interrupt acknowledgment response (SACK) from
+ * the hardware.
+ */
+
+void
+musycc_wq_chan_restart (void *arg) /* channel private structure */
+{
+ mch_t *ch;
+ mpi_t *pi;
+ struct mdesc *md;
+#if 0
+ unsigned long flags;
+#endif
+
+ ch = container_of(arg, struct c4_chan_info, ch_work);
+ pi = ch->up;
+
+#ifdef RLD_TRANS_DEBUG
+ pr_info("wq_chan_restart[%d]: start_RT[%d/%d] status %x\n",
+ ch->channum, ch->ch_start_rx, ch->ch_start_tx, ch->status);
+
+#endif
+
+ /**********************************/
+ /** check for RX restart request **/
+ /**********************************/
+
+ if ((ch->ch_start_rx) && (ch->status & RX_ENABLED))
+ {
+
+ ch->ch_start_rx = 0;
+#if defined(RLD_TRANS_DEBUG) || defined(RLD_RXACT_DEBUG)
+ {
+ static int hereb4 = 7;
+
+ if (hereb4) /* RLD DEBUG */
+ {
+ hereb4--;
+#ifdef RLD_TRANS_DEBUG
+ md = &ch->mdr[ch->rxix_irq_srv];
+ pr_info("++ musycc_wq_chan_restart[%d] CHAN RX ACTIVATE: rxix_irq_srv %d, md %p sts %x, rxpkt %lu\n",
+ ch->channum, ch->rxix_irq_srv, md, le32_to_cpu (md->status),
+ ch->s.rx_packets);
+#elif defined(RLD_RXACT_DEBUG)
+ md = &ch->mdr[ch->rxix_irq_srv];
+ pr_info("++ musycc_wq_chan_restart[%d] CHAN RX ACTIVATE: rxix_irq_srv %d, md %p sts %x, rxpkt %lu\n",
+ ch->channum, ch->rxix_irq_srv, md, le32_to_cpu (md->status),
+ ch->s.rx_packets);
+ musycc_dump_rxbuffer_ring (ch, 1); /* RLD DEBUG */
+#endif
+ }
+ }
+#endif
+ musycc_serv_req (pi, SR_CHANNEL_ACTIVATE | SR_RX_DIRECTION | ch->gchan);
+ }
+ /**********************************/
+ /** check for TX restart request **/
+ /**********************************/
+
+ if ((ch->ch_start_tx) && (ch->status & TX_ENABLED))
+ {
+ /* find next unprocessed message, then set TX thp to it */
+ musycc_update_tx_thp (ch);
+
+#if 0
+ spin_lock_irqsave (&ch->ch_txlock, flags);
+#endif
+ md = ch->txd_irq_srv;
+ if (!md)
+ {
+#ifdef RLD_TRANS_DEBUG
+ pr_info("-- musycc_wq_chan_restart[%d]: WARNING, starting NULL md\n", ch->channum);
+#endif
+#if 0
+ spin_unlock_irqrestore (&ch->ch_txlock, flags);
+#endif
+ } else if (md->data && ((le32_to_cpu (md->status)) & MUSYCC_TX_OWNED))
+ {
+ ch->ch_start_tx = 0;
+#if 0
+ spin_unlock_irqrestore (&ch->ch_txlock, flags); /* allow interrupts for service request */
+#endif
+#ifdef RLD_TRANS_DEBUG
+ pr_info("++ musycc_wq_chan_restart() CHAN TX ACTIVATE: chan %d txd_irq_srv %p = sts %x, txpkt %lu\n",
+ ch->channum, ch->txd_irq_srv, ch->txd_irq_srv->status, ch->s.tx_packets);
+#endif
+ musycc_serv_req (pi, SR_CHANNEL_ACTIVATE | SR_TX_DIRECTION | ch->gchan);
+ }
+#ifdef RLD_RESTART_DEBUG
+ else
+ {
+ /* retain request to start until retried and we have data to xmit */
+ pr_info("-- musycc_wq_chan_restart[%d]: DELAYED due to md %p sts %x data %x, start_tx %x\n",
+ ch->channum, md,
+ le32_to_cpu (md->status),
+ le32_to_cpu (md->data), ch->ch_start_tx);
+ musycc_dump_txbuffer_ring (ch, 0);
+#if 0
+ spin_unlock_irqrestore (&ch->ch_txlock, flags); /* allow interrupts for service request */
+#endif
+ }
+#endif
+ }
+}
+#endif
+
+
+ /*
+ * Channel restart either fires of a workqueue request (2.6) or lodges a
+ * watchdog activation sequence (2.4).
+ */
+
+void
+musycc_chan_restart (mch_t * ch)
+{
+#ifdef RLD_RESTART_DEBUG
+ pr_info("++ musycc_chan_restart[%d]: txd_irq_srv @ %p = sts %x\n",
+ ch->channum, ch->txd_irq_srv, ch->txd_irq_srv->status);
+#endif
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)
+ /* 2.6 - find next unprocessed message, then set TX thp to it */
+#ifdef RLD_RESTART_DEBUG
+ pr_info(">> musycc_chan_restart: scheduling Chan %x workQ @ %p\n", ch->channum, &ch->ch_work);
+#endif
+ c4_wk_chan_restart (ch); /* work queue mechanism fires off: Ref:
+ * musycc_wq_chan_restart () */
+
+#else
+
+
+ /* 2.4 - find next unprocessed message, then set TX thp to it */
+#ifdef RLD_RESTART_DEBUG
+ pr_info(">> musycc_chan_restart: scheduling Chan %x start_tx %x\n", ch->channum, ch->ch_start_tx);
+#endif
+ /* restart transmission from background loop */
+ ch->up->up->wd_notify = WD_NOTIFY_1TX;
+#endif
+}
+
+
+#if 0
+void
+musycc_cleanup (ci_t * ci)
+{
+ mpi_t *pi;
+ int i, j;
+
+ /* free up driver resources */
+ ci->state = C_INIT; /* mark as hardware not available */
+
+ for (i = 0; i < ci->max_ports; i++)
+ {
+ pi = &ci->port[i];
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)
+ c4_wq_port_cleanup (pi);
+#endif
+ for (j = 0; j < MUSYCC_NCHANS; j++)
+ {
+ if (pi->chan[j])
+ OS_kfree (pi->chan[j]); /* free mch_t struct */
+ }
+ OS_kfree (pi->regram_saved);
+ }
+#if 0
+ /* obsolete - watchdog is now static w/in ci_t */
+ OS_free_watchdog (ci->wd);
+#endif
+ OS_kfree (ci->iqd_p_saved);
+ OS_kfree (ci);
+}
+#endif
+
+void
+rld_put_led (mpi_t * pi, u_int32_t ledval)
+{
+ static u_int32_t led = 0;
+
+ if (ledval == 0)
+ led = 0;
+ else
+ led |= ledval;
+
+ pci_write_32 ((u_int32_t *) &pi->up->cpldbase->leds, led); /* RLD DEBUG TRANHANG */
+}
+
+
+#define MUSYCC_SR_RETRY_CNT 9
+
+void
+musycc_serv_req (mpi_t * pi, u_int32_t req)
+{
+ volatile u_int32_t r;
+ int rcnt;
+
+ /*
+ * PORT NOTE: Semaphore protect service loop guarantees only a single
+ * operation at a time. Per MUSYCC Manual - "Issuing service requests to
+ * the same channel group without first receiving ACK from each request
+ * may cause the host to lose track of which service request has been
+ * acknowledged."
+ */
+
+ SD_SEM_TAKE (&pi->sr_sem_busy, "serv"); /* only 1 thru here, per
+ * group */
+
+ if (pi->sr_last == req)
+ {
+#ifdef RLD_TRANS_DEBUG
+ pr_info(">> same SR, Port %d Req %x\n", pi->portnum, req);
+#endif
+
+ /*
+ * The most likely repeated request is the channel activation command
+ * which follows the occurrence of a Transparent mode TX ONR or a
+ * BUFF error. If the previous command was a CHANNEL ACTIVATE,
+ * precede it with a NOOP command in order maintain coherent control
+ * of this current (re)ACTIVATE.
+ */
+
+ r = (pi->sr_last & ~SR_GCHANNEL_MASK);
+ if ((r == (SR_CHANNEL_ACTIVATE | SR_TX_DIRECTION)) ||
+ (r == (SR_CHANNEL_ACTIVATE | SR_RX_DIRECTION)))
+ {
+#ifdef RLD_TRANS_DEBUG
+ pr_info(">> same CHAN ACT SR, Port %d Req %x => issue SR_NOOP CMD\n", pi->portnum, req);
+#endif
+ SD_SEM_GIVE (&pi->sr_sem_busy); /* allow this next request */
+ musycc_serv_req (pi, SR_NOOP);
+ SD_SEM_TAKE (&pi->sr_sem_busy, "serv"); /* relock & continue w/
+ * original req */
+ } else if (req == SR_NOOP)
+ {
+ /* no need to issue back-to-back SR_NOOP commands at this time */
+#ifdef RLD_TRANS_DEBUG
+ pr_info(">> same Port SR_NOOP skipped, Port %d\n", pi->portnum);
+#endif
+ SD_SEM_GIVE (&pi->sr_sem_busy); /* allow this next request */
+ return;
+ }
+ }
+ rcnt = 0;
+ pi->sr_last = req;
+rewrite:
+ pci_write_32 ((u_int32_t *) &pi->reg->srd, req);
+ FLUSH_MEM_WRITE ();
+
+ /*
+ * Per MUSYCC Manual, Section 6.1,2 - "When writing an SCR service
+ * request, the host must ensure at least one PCI bus clock cycle has
+ * elapsed before writing another service request. To meet this minimum
+ * elapsed service request write timing interval, it is recommended that
+ * the host follow any SCR write with another operation which reads from
+ * the same address."
+ */
+ r = pci_read_32 ((u_int32_t *) &pi->reg->srd); /* adhere to write
+ * timing imposition */
+
+
+ if ((r != req) && (req != SR_CHIP_RESET) && (++rcnt <= MUSYCC_SR_RETRY_CNT))
+ {
+ if (log_level >= LOG_MONITOR)
+ pr_info("%s: %d - reissue srv req/last %x/%x (hdw reads %x), Chan %d.\n",
+ pi->up->devname, rcnt, req, pi->sr_last, r,
+ (pi->portnum * MUSYCC_NCHANS) + (req & 0x1f));
+ OS_uwait_dummy (); /* this delay helps reduce reissue counts
+ * (reason not yet researched) */
+ goto rewrite;
+ }
+ if (rcnt > MUSYCC_SR_RETRY_CNT)
+ {
+ pr_warning("%s: failed service request (#%d)= %x, group %d.\n",
+ pi->up->devname, MUSYCC_SR_RETRY_CNT, req, pi->portnum);
+ SD_SEM_GIVE (&pi->sr_sem_busy); /* allow any next request */
+ return;
+ }
+ if (req == SR_CHIP_RESET)
+ {
+ /*
+ * PORT NOTE: the CHIP_RESET command is NOT ack'd by the MUSYCC, thus
+ * the upcoming delay is used. Though the MUSYCC documentation
+ * suggests a read-after-write would supply the required delay, it's
+ * unclear what CPU/BUS clock speeds might have been assumed when
+ * suggesting this 'lack of ACK' workaround. Thus the use of uwait.
+ */
+ OS_uwait (100000, "icard"); /* 100ms */
+ } else
+ {
+ FLUSH_MEM_READ ();
+ SD_SEM_TAKE (&pi->sr_sem_wait, "sakack"); /* sleep until SACK
+ * interrupt occurs */
+ }
+ SD_SEM_GIVE (&pi->sr_sem_busy); /* allow any next request */
+}
+
+
+#ifdef SBE_PMCC4_ENABLE
+void
+musycc_update_timeslots (mpi_t * pi)
+{
+ int i, ch;
+ char e1mode = IS_FRAME_ANY_E1 (pi->p.port_mode);
+
+ for (i = 0; i < 32; i++)
+ {
+ int usedby = 0, last = 0, ts, j, bits[8];
+
+ u_int8_t lastval = 0;
+
+ if (((i == 0) && e1mode) || /* disable if E1 mode */
+ ((i == 16) && ((pi->p.port_mode == CFG_FRAME_E1CRC_CAS) || (pi->p.port_mode == CFG_FRAME_E1CRC_CAS_AMI)))
+ || ((i > 23) && (!e1mode))) /* disable if T1 mode */
+ {
+ pi->tsm[i] = 0xff; /* make tslot unavailable for this mode */
+ } else
+ {
+ pi->tsm[i] = 0x00; /* make tslot available for assignment */
+ }
+ for (j = 0; j < 8; j++)
+ bits[j] = -1;
+ for (ch = 0; ch < MUSYCC_NCHANS; ch++)
+ {
+ if ((pi->chan[ch]->state == UP) && (pi->chan[ch]->p.bitmask[i]))
+ {
+ usedby++;
+ last = ch;
+ lastval = pi->chan[ch]->p.bitmask[i];
+ for (j = 0; j < 8; j++)
+ if (lastval & (1 << j))
+ bits[j] = ch;
+ pi->tsm[i] |= lastval;
+ }
+ }
+ if (!usedby)
+ ts = 0;
+ else if ((usedby == 1) && (lastval == 0xff))
+ ts = (4 << 5) | last;
+ else if ((usedby == 1) && (lastval == 0x7f))
+ ts = (5 << 5) | last;
+ else
+ {
+ int idx;
+
+ if (bits[0] < 0)
+ ts = (6 << 5) | (idx = last);
+ else
+ ts = (7 << 5) | (idx = bits[0]);
+ for (j = 1; j < 8; j++)
+ {
+ pi->regram->rscm[idx * 8 + j] = (bits[j] < 0) ? 0 : (0x80 | bits[j]);
+ pi->regram->tscm[idx * 8 + j] = (bits[j] < 0) ? 0 : (0x80 | bits[j]);
+ }
+ }
+ pi->regram->rtsm[i] = ts;
+ pi->regram->ttsm[i] = ts;
+ }
+ FLUSH_MEM_WRITE ();
+
+ musycc_serv_req (pi, SR_TIMESLOT_MAP | SR_RX_DIRECTION);
+ musycc_serv_req (pi, SR_TIMESLOT_MAP | SR_TX_DIRECTION);
+ musycc_serv_req (pi, SR_SUBCHANNEL_MAP | SR_RX_DIRECTION);
+ musycc_serv_req (pi, SR_SUBCHANNEL_MAP | SR_TX_DIRECTION);
+}
+#endif
+
+
+#ifdef SBE_WAN256T3_ENABLE
+void
+musycc_update_timeslots (mpi_t * pi)
+{
+ mch_t *ch;
+
+ u_int8_t ts, hmask, tsen;
+ int gchan;
+ int i;
+
+#ifdef SBE_PMCC4_ENABLE
+ hmask = (0x1f << pi->up->p.hypersize) & 0x1f;
+#endif
+#ifdef SBE_WAN256T3_ENABLE
+ hmask = (0x1f << hyperdummy) & 0x1f;
+#endif
+ for (i = 0; i < 128; i++)
+ {
+ gchan = ((pi->portnum * MUSYCC_NCHANS) + (i & hmask)) % MUSYCC_NCHANS;
+ ch = pi->chan[gchan];
+ if (ch->p.mode_56k)
+ tsen = MODE_56KBPS;
+ else
+ tsen = MODE_64KBPS; /* also the default */
+ ts = ((pi->portnum % 4) == (i / 32)) ? (tsen << 5) | (i & hmask) : 0;
+ pi->regram->rtsm[i] = ts;
+ pi->regram->ttsm[i] = ts;
+ }
+ FLUSH_MEM_WRITE ();
+ musycc_serv_req (pi, SR_TIMESLOT_MAP | SR_RX_DIRECTION);
+ musycc_serv_req (pi, SR_TIMESLOT_MAP | SR_TX_DIRECTION);
+}
+#endif
+
+
+ /*
+ * This routine converts a generic library channel configuration parameter
+ * into a hardware specific register value (IE. MUSYCC CCD Register).
+ */
+u_int32_t
+musycc_chan_proto (int proto)
+{
+ int reg;
+
+ switch (proto)
+ {
+ case CFG_CH_PROTO_TRANS: /* 0 */
+ reg = MUSYCC_CCD_TRANS;
+ break;
+ case CFG_CH_PROTO_SS7: /* 1 */
+ reg = MUSYCC_CCD_SS7;
+ break;
+ default:
+ case CFG_CH_PROTO_ISLP_MODE: /* 4 */
+ case CFG_CH_PROTO_HDLC_FCS16: /* 2 */
+ reg = MUSYCC_CCD_HDLC_FCS16;
+ break;
+ case CFG_CH_PROTO_HDLC_FCS32: /* 3 */
+ reg = MUSYCC_CCD_HDLC_FCS32;
+ break;
+ }
+
+ return reg;
+}
+
+#ifdef SBE_WAN256T3_ENABLE
+STATIC void __init
+musycc_init_port (mpi_t * pi)
+{
+ pci_write_32 ((u_int32_t *) &pi->reg->gbp, OS_vtophys (pi->regram));
+
+ pi->regram->grcd =
+ __constant_cpu_to_le32 (MUSYCC_GRCD_RX_ENABLE |
+ MUSYCC_GRCD_TX_ENABLE |
+ MUSYCC_GRCD_SF_ALIGN |
+ MUSYCC_GRCD_SUBCHAN_DISABLE |
+ MUSYCC_GRCD_OOFMP_DISABLE |
+ MUSYCC_GRCD_COFAIRQ_DISABLE |
+ MUSYCC_GRCD_MC_ENABLE |
+ (MUSYCC_GRCD_POLLTH_32 << MUSYCC_GRCD_POLLTH_SHIFT));
+
+ pi->regram->pcd =
+ __constant_cpu_to_le32 (MUSYCC_PCD_E1X4_MODE |
+ MUSYCC_PCD_TXDATA_RISING |
+ MUSYCC_PCD_TX_DRIVEN);
+
+ /* Message length descriptor */
+ pi->regram->mld = __constant_cpu_to_le32 (max_mru | (max_mru << 16));
+ FLUSH_MEM_WRITE ();
+
+ musycc_serv_req (pi, SR_GROUP_INIT | SR_RX_DIRECTION);
+ musycc_serv_req (pi, SR_GROUP_INIT | SR_TX_DIRECTION);
+
+ musycc_init_mdt (pi);
+
+ musycc_update_timeslots (pi);
+}
+#endif
+
+
+status_t __init
+musycc_init (ci_t * ci)
+{
+ char *regaddr; /* temp for address boundary calculations */
+ int i, gchan;
+
+ OS_sem_init (&ci->sem_wdbusy, SEM_AVAILABLE); /* watchdog exclusion */
+
+ /*
+ * Per MUSYCC manual, Section 6.3.4 - "The host must allocate a dword
+ * aligned memory segment for interrupt queue pointers."
+ */
+
+#define INT_QUEUE_BOUNDARY 4
+
+ regaddr = OS_kmalloc ((INT_QUEUE_SIZE + 1) * sizeof (u_int32_t));
+ if (regaddr == 0)
+ return ENOMEM;
+ ci->iqd_p_saved = regaddr; /* save orig value for free's usage */
+ ci->iqd_p = (u_int32_t *) ((unsigned long) (regaddr + INT_QUEUE_BOUNDARY - 1) &
+ (~(INT_QUEUE_BOUNDARY - 1))); /* this calculates
+ * closest boundary */
+
+ for (i = 0; i < INT_QUEUE_SIZE; i++)
+ {
+ ci->iqd_p[i] = __constant_cpu_to_le32 (INT_EMPTY_ENTRY);
+ }
+
+ for (i = 0; i < ci->max_port; i++)
+ {
+ mpi_t *pi = &ci->port[i];
+
+ /*
+ * Per MUSYCC manual, Section 6.3.2 - "The host must allocate a 2KB
+ * bound memory segment for Channel Group 0."
+ */
+
+#define GROUP_BOUNDARY 0x800
+
+ regaddr = OS_kmalloc (sizeof (struct musycc_groupr) + GROUP_BOUNDARY);
+ if (regaddr == 0)
+ {
+ for (gchan = 0; gchan < i; gchan++)
+ {
+ pi = &ci->port[gchan];
+ OS_kfree (pi->reg);
+ pi->reg = 0;
+ }
+ return ENOMEM;
+ }
+ pi->regram_saved = regaddr; /* save orig value for free's usage */
+ pi->regram = (struct musycc_groupr *) ((unsigned long) (regaddr + GROUP_BOUNDARY - 1) &
+ (~(GROUP_BOUNDARY - 1))); /* this calculates
+ * closest boundary */
+ }
+
+ /* any board centric MUSYCC commands will use group ZERO as its "home" */
+ ci->regram = ci->port[0].regram;
+ musycc_serv_req (&ci->port[0], SR_CHIP_RESET);
+
+ pci_write_32 ((u_int32_t *) &ci->reg->gbp, OS_vtophys (ci->regram));
+ pci_flush_write (ci);
+#ifdef CONFIG_SBE_PMCC4_NCOMM
+ ci->regram->__glcd = __constant_cpu_to_le32 (GCD_MAGIC);
+#else
+ /* standard driver POLLS for INTB via CPLD register */
+ ci->regram->__glcd = __constant_cpu_to_le32 (GCD_MAGIC | MUSYCC_GCD_INTB_DISABLE);
+#endif
+
+ ci->regram->__iqp = cpu_to_le32 (OS_vtophys (&ci->iqd_p[0]));
+ ci->regram->__iql = __constant_cpu_to_le32 (INT_QUEUE_SIZE - 1);
+ pci_write_32 ((u_int32_t *) &ci->reg->dacbp, 0);
+ FLUSH_MEM_WRITE ();
+
+ ci->state = C_RUNNING; /* mark as full interrupt processing
+ * available */
+
+ musycc_serv_req (&ci->port[0], SR_GLOBAL_INIT); /* FIRST INTERRUPT ! */
+
+ /* sanity check settable parameters */
+
+ if (max_mru > 0xffe)
+ {
+ pr_warning("Maximum allowed MRU exceeded, resetting %d to %d.\n",
+ max_mru, 0xffe);
+ max_mru = 0xffe;
+ }
+ if (max_mtu > 0xffe)
+ {
+ pr_warning("Maximum allowed MTU exceeded, resetting %d to %d.\n",
+ max_mtu, 0xffe);
+ max_mtu = 0xffe;
+ }
+#ifdef SBE_WAN256T3_ENABLE
+ for (i = 0; i < MUSYCC_NPORTS; i++)
+ musycc_init_port (&ci->port[i]);
+#endif
+
+ return SBE_DRVR_SUCCESS; /* no error */
+}
+
+
+void
+musycc_bh_tx_eom (mpi_t * pi, int gchan)
+{
+ mch_t *ch;
+ struct mdesc *md;
+
+#if 0
+#ifndef SBE_ISR_INLINE
+ unsigned long flags;
+
+#endif
+#endif
+ volatile u_int32_t status;
+
+ ch = pi->chan[gchan];
+ if (ch == 0 || ch->state != UP)
+ {
+ if (log_level >= LOG_ERROR)
+ pr_info("%s: intr: xmit EOM on uninitialized channel %d\n",
+ pi->up->devname, gchan);
+ }
+ if (ch == 0 || ch->mdt == 0)
+ return; /* note: mdt==0 implies a malloc()
+ * failure w/in chan_up() routine */
+
+#if 0
+#ifdef SBE_ISR_INLINE
+ spin_lock_irq (&ch->ch_txlock);
+#else
+ spin_lock_irqsave (&ch->ch_txlock, flags);
+#endif
+#endif
+ do
+ {
+ FLUSH_MEM_READ ();
+ md = ch->txd_irq_srv;
+ status = le32_to_cpu (md->status);
+
+ /*
+ * Note: Per MUSYCC Ref 6.4.9, the host does not poll a host-owned
+ * Transmit Buffer Descriptor during Transparent Mode.
+ */
+ if (status & MUSYCC_TX_OWNED)
+ {
+ int readCount, loopCount;
+
+ /***********************************************************/
+ /* HW Bug Fix */
+ /* ---------- */
+ /* Under certain PCI Bus loading conditions, the data */
+ /* associated with an update of Shared Memory is delayed */
+ /* relative to its PCI Interrupt. This is caught when */
+ /* the host determines it does not yet OWN the descriptor. */
+ /***********************************************************/
+
+ readCount = 0;
+ while (status & MUSYCC_TX_OWNED)
+ {
+ for (loopCount = 0; loopCount < 0x30; loopCount++)
+ OS_uwait_dummy (); /* use call to avoid optimization
+ * removal of dummy delay */
+ FLUSH_MEM_READ ();
+ status = le32_to_cpu (md->status);
+ if (readCount++ > 40)
+ break; /* don't wait any longer */
+ }
+ if (status & MUSYCC_TX_OWNED)
+ {
+ if (log_level >= LOG_MONITOR)
+ {
+ pr_info("%s: Port %d Chan %2d - unexpected TX msg ownership intr (md %p sts %x)\n",
+ pi->up->devname, pi->portnum, ch->channum,
+ md, status);
+ pr_info("++ User 0x%p IRQ_SRV 0x%p USR_ADD 0x%p QStopped %x, start_tx %x tx_full %d txd_free %d mode %x\n",
+ ch->user, ch->txd_irq_srv, ch->txd_usr_add,
+ sd_queue_stopped (ch->user),
+ ch->ch_start_tx, ch->tx_full, ch->txd_free, ch->p.chan_mode);
+ musycc_dump_txbuffer_ring (ch, 0);
+ }
+ break; /* Not our mdesc, done */
+ } else
+ {
+ if (log_level >= LOG_MONITOR)
+ pr_info("%s: Port %d Chan %2d - recovered TX msg ownership [%d] (md %p sts %x)\n",
+ pi->up->devname, pi->portnum, ch->channum, readCount, md, status);
+ }
+ }
+ ch->txd_irq_srv = md->snext;
+
+ md->data = 0;
+ if (md->mem_token != 0)
+ {
+ /* upcount channel */
+ atomic_sub (OS_mem_token_tlen (md->mem_token), &ch->tx_pending);
+ /* upcount card */
+ atomic_sub (OS_mem_token_tlen (md->mem_token), &pi->up->tx_pending);
+#ifdef SBE_WAN256T3_ENABLE
+ if (!atomic_read (&pi->up->tx_pending))
+ wan256t3_led (pi->up, LED_TX, 0);
+#endif
+
+#ifdef CONFIG_SBE_WAN256T3_NCOMM
+ /* callback that our packet was sent */
+ {
+ int hdlcnum = (pi->portnum * 32 + gchan);
+
+ if (hdlcnum >= 228)
+ {
+ if (nciProcess_TX_complete)
+ (*nciProcess_TX_complete) (hdlcnum,
+ getuserbychan (gchan));
+ }
+ }
+#endif /*** CONFIG_SBE_WAN256T3_NCOMM ***/
+
+ OS_mem_token_free_irq (md->mem_token);
+ md->mem_token = 0;
+ }
+ md->status = 0;
+#ifdef RLD_TXFULL_DEBUG
+ if (log_level >= LOG_MONITOR2)
+ pr_info("~~ tx_eom: tx_full %x txd_free %d -> %d\n",
+ ch->tx_full, ch->txd_free, ch->txd_free + 1);
+#endif
+ ++ch->txd_free;
+ FLUSH_MEM_WRITE ();
+
+ if ((ch->p.chan_mode != CFG_CH_PROTO_TRANS) && (status & EOBIRQ_ENABLE))
+ {
+ if (log_level >= LOG_MONITOR)
+ pr_info("%s: Mode (%x) incorrect EOB status (%x)\n",
+ pi->up->devname, ch->p.chan_mode, status);
+ if ((status & EOMIRQ_ENABLE) == 0)
+ break;
+ }
+ }
+ while ((ch->p.chan_mode != CFG_CH_PROTO_TRANS) && ((status & EOMIRQ_ENABLE) == 0));
+ /*
+ * NOTE: (The above 'while' is coupled w/ previous 'do', way above.) Each
+ * Transparent data buffer has the EOB bit, and NOT the EOM bit, set and
+ * will furthermore have a separate IQD associated with each messages
+ * buffer.
+ */
+
+ FLUSH_MEM_READ ();
+ /*
+ * Smooth flow control hysterisis by maintaining task stoppage until half
+ * the available write buffers are available.
+ */
+ if (ch->tx_full && (ch->txd_free >= (ch->txd_num / 2)))
+ {
+ /*
+ * Then, only releave task stoppage if we actually have enough
+ * buffers to service the last requested packet. It may require MORE
+ * than half the available!
+ */
+ if (ch->txd_free >= ch->txd_required)
+ {
+
+#ifdef RLD_TXFULL_DEBUG
+ if (log_level >= LOG_MONITOR2)
+ pr_info("tx_eom[%d]: enable xmit tx_full no more, txd_free %d txd_num/2 %d\n",
+ ch->channum,
+ ch->txd_free, ch->txd_num / 2);
+#endif
+ ch->tx_full = 0;
+ ch->txd_required = 0;
+ sd_enable_xmit (ch->user); /* re-enable to catch flow controlled
+ * channel */
+ }
+ }
+#ifdef RLD_TXFULL_DEBUG
+ else if (ch->tx_full)
+ {
+ if (log_level >= LOG_MONITOR2)
+ pr_info("tx_eom[%d]: bypass TX enable though room available? (txd_free %d txd_num/2 %d)\n",
+ ch->channum,
+ ch->txd_free, ch->txd_num / 2);
+ }
+#endif
+
+ FLUSH_MEM_WRITE ();
+#if 0
+#ifdef SBE_ISR_INLINE
+ spin_unlock_irq (&ch->ch_txlock);
+#else
+ spin_unlock_irqrestore (&ch->ch_txlock, flags);
+#endif
+#endif
+}
+
+
+STATIC void
+musycc_bh_rx_eom (mpi_t * pi, int gchan)
+{
+ mch_t *ch;
+ void *m, *m2;
+ struct mdesc *md;
+ volatile u_int32_t status;
+ u_int32_t error;
+
+ ch = pi->chan[gchan];
+ if (ch == 0 || ch->state != UP)
+ {
+ if (log_level > LOG_ERROR)
+ pr_info("%s: intr: receive EOM on uninitialized channel %d\n",
+ pi->up->devname, gchan);
+ return;
+ }
+ if (ch->mdr == 0)
+ return; /* can this happen ? */
+
+ for (;;)
+ {
+ FLUSH_MEM_READ ();
+ md = &ch->mdr[ch->rxix_irq_srv];
+ status = le32_to_cpu (md->status);
+ if (!(status & HOST_RX_OWNED))
+ break; /* Not our mdesc, done */
+ m = md->mem_token;
+ error = (status >> 16) & 0xf;
+ if (error == 0)
+ {
+#ifdef CONFIG_SBE_WAN256T3_NCOMM
+ int hdlcnum = (pi->portnum * 32 + gchan);
+
+ /*
+ * if the packet number belongs to NCOMM, then send it to the TMS
+ * driver
+ */
+ if (hdlcnum >= 228)
+ {
+ if (nciProcess_RX_packet)
+ (*nciProcess_RX_packet) (hdlcnum, status & 0x3fff, m, ch->user);
+ } else
+#endif /*** CONFIG_SBE_WAN256T3_NCOMM ***/
+
+ {
+ if ((m2 = OS_mem_token_alloc (max_mru)))
+ {
+ /* substitute the mbuf+cluster */
+ md->mem_token = m2;
+ md->data = cpu_to_le32 (OS_vtophys (OS_mem_token_data (m2)));
+
+ /* pass the received mbuf upward */
+ sd_recv_consume (m, status & LENGTH_MASK, ch->user);
+ ch->s.rx_packets++;
+ ch->s.rx_bytes += status & LENGTH_MASK;
+ } else
+ {
+ ch->s.rx_dropped++;
+ }
+ }
+ } else if (error == ERR_FCS)
+ {
+ ch->s.rx_crc_errors++;
+ } else if (error == ERR_ALIGN)
+ {
+ ch->s.rx_missed_errors++;
+ } else if (error == ERR_ABT)
+ {
+ ch->s.rx_missed_errors++;
+ } else if (error == ERR_LNG)
+ {
+ ch->s.rx_length_errors++;
+ } else if (error == ERR_SHT)
+ {
+ ch->s.rx_length_errors++;
+ }
+ FLUSH_MEM_WRITE ();
+ status = max_mru;
+ if (ch->p.chan_mode == CFG_CH_PROTO_TRANS)
+ status |= EOBIRQ_ENABLE;
+ md->status = cpu_to_le32 (status);
+
+ /* Check next mdesc in the ring */
+ if (++ch->rxix_irq_srv >= ch->rxd_num)
+ ch->rxix_irq_srv = 0;
+ FLUSH_MEM_WRITE ();
+ }
+}
+
+
+irqreturn_t
+musycc_intr_th_handler (void *devp)
+{
+ ci_t *ci = (ci_t *) devp;
+ volatile u_int32_t status, currInt = 0;
+ u_int32_t nextInt, intCnt;
+
+ /*
+ * Hardware not available, potential interrupt hang. But since interrupt
+ * might be shared, just return.
+ */
+ if (ci->state == C_INIT)
+ {
+ return IRQ_NONE;
+ }
+ /*
+ * Marked as hardware available. Don't service interrupts, just clear the
+ * event.
+ */
+
+ if (ci->state == C_IDLE)
+ {
+ status = pci_read_32 ((u_int32_t *) &ci->reg->isd);
+
+ /* clear the interrupt but process nothing else */
+ pci_write_32 ((u_int32_t *) &ci->reg->isd, status);
+ return IRQ_HANDLED;
+ }
+ FLUSH_PCI_READ ();
+ FLUSH_MEM_READ ();
+
+ status = pci_read_32 ((u_int32_t *) &ci->reg->isd);
+ nextInt = INTRPTS_NEXTINT (status);
+ intCnt = INTRPTS_INTCNT (status);
+ ci->intlog.drvr_intr_thcount++;
+
+ /*********************************************************/
+ /* HW Bug Fix */
+ /* ---------- */
+ /* Under certain PCI Bus loading conditions, the */
+ /* MUSYCC looses the data associated with an update */
+ /* of its ISD and erroneously returns the immediately */
+ /* preceding 'nextInt' value. However, the 'intCnt' */
+ /* value appears to be correct. By not starting service */
+ /* where the 'missing' 'nextInt' SHOULD point causes */
+ /* the IQD not to be serviced - the 'not serviced' */
+ /* entries then remain and continue to increase as more */
+ /* incorrect ISD's are encountered. */
+ /*********************************************************/
+
+ if (nextInt != INTRPTS_NEXTINT (ci->intlog.this_status_new))
+ {
+ if (log_level >= LOG_MONITOR)
+ {
+ pr_info("%s: note - updated ISD from %08x to %08x\n",
+ ci->devname, status,
+ (status & (~INTRPTS_NEXTINT_M)) | ci->intlog.this_status_new);
+ }
+ /*
+ * Replace bogus status with software corrected value.
+ *
+ * It's not known whether, during this problem occurrence, if the
+ * INTFULL bit is correctly reported or not.
+ */
+ status = (status & (~INTRPTS_NEXTINT_M)) | (ci->intlog.this_status_new);
+ nextInt = INTRPTS_NEXTINT (status);
+ }
+ /**********************************************/
+ /* Cn847x Bug Fix */
+ /* -------------- */
+ /* Fix for inability to write back same index */
+ /* as read for a full interrupt queue. */
+ /**********************************************/
+
+ if (intCnt == INT_QUEUE_SIZE)
+ {
+ currInt = ((intCnt - 1) + nextInt) & (INT_QUEUE_SIZE - 1);
+ } else
+ /************************************************/
+ /* Interrupt Write Location Issues */
+ /* ------------------------------- */
+ /* When the interrupt status descriptor is */
+ /* written, the interrupt line is de-asserted */
+ /* by the Cn847x. In the case of MIPS */
+ /* microprocessors, this must occur at the */
+ /* beginning of the interrupt handler so that */
+ /* the interrupt handle is not re-entered due */
+ /* to interrupt dis-assertion latency. */
+ /* In the case of all other processors, this */
+ /* action should occur at the end of the */
+ /* interrupt handler to avoid overwriting the */
+ /* interrupt queue. */
+ /************************************************/
+
+ if (intCnt)
+ {
+ currInt = (intCnt + nextInt) & (INT_QUEUE_SIZE - 1);
+ } else
+ {
+ /*
+ * NOTE: Servicing an interrupt whose ISD contains a count of ZERO
+ * can be indicative of a Shared Interrupt chain. Our driver can be
+ * called from the system's interrupt handler as a matter of the OS
+ * walking the chain. As the chain is walked, the interrupt will
+ * eventually be serviced by the correct driver/handler.
+ */
+#if 0
+ /* chained interrupt = not ours */
+ pr_info(">> %s: intCnt NULL, sts %x, possibly a chained interrupt!\n",
+ ci->devname, status);
+#endif
+ return IRQ_NONE;
+ }
+
+ ci->iqp_tailx = currInt;
+
+ currInt <<= INTRPTS_NEXTINT_S;
+ ci->intlog.last_status_new = ci->intlog.this_status_new;
+ ci->intlog.this_status_new = currInt;
+
+ if ((log_level >= LOG_WARN) && (status & INTRPTS_INTFULL_M))
+ {
+ pr_info("%s: Interrupt queue full condition occurred\n", ci->devname);
+ }
+ if (log_level >= LOG_DEBUG)
+ pr_info("%s: interrupts pending, isd @ 0x%p: %x curr %d cnt %d NEXT %d\n",
+ ci->devname, &ci->reg->isd,
+ status, nextInt, intCnt, (intCnt + nextInt) & (INT_QUEUE_SIZE - 1));
+
+ FLUSH_MEM_WRITE ();
+#if defined(SBE_ISR_TASKLET)
+ pci_write_32 ((u_int32_t *) &ci->reg->isd, currInt);
+ atomic_inc (&ci->bh_pending);
+ tasklet_schedule (&ci->ci_musycc_isr_tasklet);
+#elif defined(SBE_ISR_IMMEDIATE)
+ pci_write_32 ((u_int32_t *) &ci->reg->isd, currInt);
+ atomic_inc (&ci->bh_pending);
+ queue_task (&ci->ci_musycc_isr_tq, &tq_immediate);
+ mark_bh (IMMEDIATE_BH);
+#elif defined(SBE_ISR_INLINE)
+ (void) musycc_intr_bh_tasklet (ci);
+ pci_write_32 ((u_int32_t *) &ci->reg->isd, currInt);
+#endif
+ return IRQ_HANDLED;
+}
+
+
+#if defined(SBE_ISR_IMMEDIATE)
+unsigned long
+#else
+void
+#endif
+musycc_intr_bh_tasklet (ci_t * ci)
+{
+ mpi_t *pi;
+ mch_t *ch;
+ unsigned int intCnt;
+ volatile u_int32_t currInt = 0;
+ volatile unsigned int headx, tailx;
+ int readCount, loopCount;
+ int group, gchan, event, err, tx;
+ u_int32_t badInt = INT_EMPTY_ENTRY;
+ u_int32_t badInt2 = INT_EMPTY_ENTRY2;
+
+ /*
+ * Hardware not available, potential interrupt hang. But since interrupt
+ * might be shared, just return.
+ */
+ if ((drvr_state != SBE_DRVR_AVAILABLE) || (ci->state == C_INIT))
+ {
+#if defined(SBE_ISR_IMMEDIATE)
+ return 0L;
+#else
+ return;
+#endif
+ }
+#if defined(SBE_ISR_TASKLET) || defined(SBE_ISR_IMMEDIATE)
+ if (drvr_state != SBE_DRVR_AVAILABLE)
+ {
+#if defined(SBE_ISR_TASKLET)
+ return;
+#elif defined(SBE_ISR_IMMEDIATE)
+ return 0L;
+#endif
+ }
+#elif defined(SBE_ISR_INLINE)
+ /* no semaphore taken, no double checks */
+#endif
+
+ ci->intlog.drvr_intr_bhcount++;
+ FLUSH_MEM_READ ();
+ {
+ unsigned int bh = atomic_read (&ci->bh_pending);
+
+ max_bh = max (bh, max_bh);
+ }
+ atomic_set (&ci->bh_pending, 0);/* if here, no longer pending */
+ while ((headx = ci->iqp_headx) != (tailx = ci->iqp_tailx))
+ {
+ intCnt = (tailx >= headx) ? (tailx - headx) : (tailx - headx + INT_QUEUE_SIZE);
+ currInt = le32_to_cpu (ci->iqd_p[headx]);
+
+ max_intcnt = max (intCnt, max_intcnt); /* RLD DEBUG */
+
+ /**************************************************/
+ /* HW Bug Fix */
+ /* ---------- */
+ /* The following code checks for the condition */
+ /* of interrupt assertion before interrupt */
+ /* queue update. This is a problem on several */
+ /* PCI-Local bridge chips found on some products. */
+ /**************************************************/
+
+ readCount = 0;
+ if ((currInt == badInt) || (currInt == badInt2))
+ ci->intlog.drvr_int_failure++;
+
+ while ((currInt == badInt) || (currInt == badInt2))
+ {
+ for (loopCount = 0; loopCount < 0x30; loopCount++)
+ OS_uwait_dummy (); /* use call to avoid optimization removal
+ * of dummy delay */
+ FLUSH_MEM_READ ();
+ currInt = le32_to_cpu (ci->iqd_p[headx]);
+ if (readCount++ > 20)
+ break;
+ }
+
+ if ((currInt == badInt) || (currInt == badInt2)) /* catch failure of Bug
+ * Fix checking */
+ {
+ if (log_level >= LOG_WARN)
+ pr_info("%s: Illegal Interrupt Detected @ 0x%p, mod %d.)\n",
+ ci->devname, &ci->iqd_p[headx], headx);
+
+ /*
+ * If the descriptor has not recovered, then leaving the EMPTY
+ * entry set will not signal to the MUSYCC that this descriptor
+ * has been serviced. The Interrupt Queue can then start loosing
+ * available descriptors and MUSYCC eventually encounters and
+ * reports the INTFULL condition. Per manual, changing any bit
+ * marks descriptor as available, thus the use of different
+ * EMPTY_ENTRY values.
+ */
+
+ if (currInt == badInt)
+ {
+ ci->iqd_p[headx] = __constant_cpu_to_le32 (INT_EMPTY_ENTRY2);
+ } else
+ {
+ ci->iqd_p[headx] = __constant_cpu_to_le32 (INT_EMPTY_ENTRY);
+ }
+ ci->iqp_headx = (headx + 1) & (INT_QUEUE_SIZE - 1); /* insure wrapness */
+ FLUSH_MEM_WRITE ();
+ FLUSH_MEM_READ ();
+ continue;
+ }
+ group = INTRPT_GRP (currInt);
+ gchan = INTRPT_CH (currInt);
+ event = INTRPT_EVENT (currInt);
+ err = INTRPT_ERROR (currInt);
+ tx = currInt & INTRPT_DIR_M;
+
+ ci->iqd_p[headx] = __constant_cpu_to_le32 (INT_EMPTY_ENTRY);
+ FLUSH_MEM_WRITE ();
+
+ if (log_level >= LOG_DEBUG)
+ {
+ if (err != 0)
+ pr_info(" %08x -> err: %2d,", currInt, err);
+
+ pr_info("+ interrupt event: %d, grp: %d, chan: %2d, side: %cX\n",
+ event, group, gchan, tx ? 'T' : 'R');
+ }
+ pi = &ci->port[group]; /* notice that here we assume 1-1 group -
+ * port mapping */
+ ch = pi->chan[gchan];
+ switch (event)
+ {
+ case EVE_SACK: /* Service Request Acknowledge */
+ if (log_level >= LOG_DEBUG)
+ {
+ volatile u_int32_t r;
+
+ r = pci_read_32 ((u_int32_t *) &pi->reg->srd);
+ pr_info("- SACK cmd: %08x (hdw= %08x)\n", pi->sr_last, r);
+ }
+ SD_SEM_GIVE (&pi->sr_sem_wait); /* wake up waiting process */
+ break;
+ case EVE_CHABT: /* Change To Abort Code (0x7e -> 0xff) */
+ case EVE_CHIC: /* Change To Idle Code (0xff -> 0x7e) */
+ break;
+ case EVE_EOM: /* End Of Message */
+ case EVE_EOB: /* End Of Buffer (Transparent mode) */
+ if (tx)
+ {
+ musycc_bh_tx_eom (pi, gchan);
+ } else
+ {
+ musycc_bh_rx_eom (pi, gchan);
+ }
+#if 0
+ break;
+#else
+ /*
+ * MUSYCC Interrupt Descriptor section states that EOB and EOM
+ * can be combined with the NONE error (as well as others). So
+ * drop thru to catch this...
+ */
+#endif
+ case EVE_NONE:
+ if (err == ERR_SHT)
+ {
+ ch->s.rx_length_errors++;
+ }
+ break;
+ default:
+ if (log_level >= LOG_WARN)
+ pr_info("%s: unexpected interrupt event: %d, iqd[%d]: %08x, port: %d\n", ci->devname,
+ event, headx, currInt, group);
+ break;
+ } /* switch on event */
+
+
+ /*
+ * Per MUSYCC Manual, Section 6.4.8.3 [Transmit Errors], TX errors
+ * are service-affecting and require action to resume normal
+ * bit-level processing.
+ */
+
+ switch (err)
+ {
+ case ERR_ONR:
+ /*
+ * Per MUSYCC manual, Section 6.4.8.3 [Transmit Errors], this
+ * error requires Transmit channel reactivation.
+ *
+ * Per MUSYCC manual, Section 6.4.8.4 [Receive Errors], this error
+ * requires Receive channel reactivation.
+ */
+ if (tx)
+ {
+
+ /*
+ * TX ONR Error only occurs when channel is configured for
+ * Transparent Mode. However, this code will catch and
+ * re-activate on ANY TX ONR error.
+ */
+
+ /*
+ * Set flag to re-enable on any next transmit attempt.
+ */
+ ch->ch_start_tx = CH_START_TX_ONR;
+
+ {
+#ifdef RLD_TRANS_DEBUG
+ if (1 || log_level >= LOG_MONITOR)
+#else
+ if (log_level >= LOG_MONITOR)
+#endif
+ {
+ pr_info("%s: TX buffer underflow [ONR] on channel %d, mode %x QStopped %x free %d\n",
+ ci->devname, ch->channum, ch->p.chan_mode, sd_queue_stopped (ch->user), ch->txd_free);
+#ifdef RLD_DEBUG
+ if (ch->p.chan_mode == 2) /* problem = ONR on HDLC
+ * mode */
+ {
+ pr_info("++ Failed Last %x Next %x QStopped %x, start_tx %x tx_full %d txd_free %d mode %x\n",
+ (u_int32_t) ch->txd_irq_srv, (u_int32_t) ch->txd_usr_add,
+ sd_queue_stopped (ch->user),
+ ch->ch_start_tx, ch->tx_full, ch->txd_free, ch->p.chan_mode);
+ musycc_dump_txbuffer_ring (ch, 0);
+ }
+#endif
+ }
+ }
+ } else /* RX buffer overrun */
+ {
+ /*
+ * Per MUSYCC manual, Section 6.4.8.4 [Receive Errors],
+ * channel recovery for this RX ONR error IS required. It is
+ * also suggested to increase the number of receive buffers
+ * for this channel. Receive channel reactivation IS
+ * required, and data has been lost.
+ */
+ ch->s.rx_over_errors++;
+ ch->ch_start_rx = CH_START_RX_ONR;
+
+ if (log_level >= LOG_WARN)
+ {
+ pr_info("%s: RX buffer overflow [ONR] on channel %d, mode %x\n",
+ ci->devname, ch->channum, ch->p.chan_mode);
+ //musycc_dump_rxbuffer_ring (ch, 0); /* RLD DEBUG */
+ }
+ }
+ musycc_chan_restart (ch);
+ break;
+ case ERR_BUF:
+ if (tx)
+ {
+ ch->s.tx_fifo_errors++;
+ ch->ch_start_tx = CH_START_TX_BUF;
+ /*
+ * Per MUSYCC manual, Section 6.4.8.3 [Transmit Errors],
+ * this BUFF error requires Transmit channel reactivation.
+ */
+ if (log_level >= LOG_MONITOR)
+ pr_info("%s: TX buffer underrun [BUFF] on channel %d, mode %x\n",
+ ci->devname, ch->channum, ch->p.chan_mode);
+ } else /* RX buffer overrun */
+ {
+ ch->s.rx_over_errors++;
+ /*
+ * Per MUSYCC manual, Section 6.4.8.4 [Receive Errors], HDLC
+ * mode requires NO recovery for this RX BUFF error is
+ * required. It is suggested to increase the FIFO buffer
+ * space for this channel. Receive channel reactivation is
+ * not required, but data has been lost.
+ */
+ if (log_level >= LOG_WARN)
+ pr_info("%s: RX buffer overrun [BUFF] on channel %d, mode %x\n",
+ ci->devname, ch->channum, ch->p.chan_mode);
+ /*
+ * Per MUSYCC manual, Section 6.4.9.4 [Receive Errors],
+ * Transparent mode DOES require recovery for the RX BUFF
+ * error. It is suggested to increase the FIFO buffer space
+ * for this channel. Receive channel reactivation IS
+ * required and data has been lost.
+ */
+ if (ch->p.chan_mode == CFG_CH_PROTO_TRANS)
+ ch->ch_start_rx = CH_START_RX_BUF;
+ }
+
+ if (tx || (ch->p.chan_mode == CFG_CH_PROTO_TRANS))
+ musycc_chan_restart (ch);
+ break;
+ default:
+ break;
+ } /* switch on err */
+
+ /* Check for interrupt lost condition */
+ if ((currInt & INTRPT_ILOST_M) && (log_level >= LOG_ERROR))
+ {
+ pr_info("%s: Interrupt queue overflow - ILOST asserted\n",
+ ci->devname);
+ }
+ ci->iqp_headx = (headx + 1) & (INT_QUEUE_SIZE - 1); /* insure wrapness */
+ FLUSH_MEM_WRITE ();
+ FLUSH_MEM_READ ();
+ } /* while */
+ if ((log_level >= LOG_MONITOR2) && (ci->iqp_headx != ci->iqp_tailx))
+ {
+ int bh;
+
+ bh = atomic_read (&CI->bh_pending);
+ pr_info("_bh_: late arrivals, head %d != tail %d, pending %d\n",
+ ci->iqp_headx, ci->iqp_tailx, bh);
+ }
+#if defined(SBE_ISR_IMMEDIATE)
+ return 0L;
+#endif
+ /* else, nothing returned */
+}
+
+#if 0
+int __init
+musycc_new_chan (ci_t * ci, int channum, void *user)
+{
+ mch_t *ch;
+
+ ch = ci->port[channum / MUSYCC_NCHANS].chan[channum % MUSYCC_NCHANS];
+
+ if (ch->state != UNASSIGNED)
+ return EEXIST;
+ /* NOTE: mch_t already cleared during OS_kmalloc() */
+ ch->state = DOWN;
+ ch->user = user;
+#if 0
+ ch->status = 0;
+ ch->p.status = 0;
+ ch->p.intr_mask = 0;
+#endif
+ ch->p.chan_mode = CFG_CH_PROTO_HDLC_FCS16;
+ ch->p.idlecode = CFG_CH_FLAG_7E;
+ ch->p.pad_fill_count = 2;
+ spin_lock_init (&ch->ch_rxlock);
+ spin_lock_init (&ch->ch_txlock);
+
+ return 0;
+}
+#endif
+
+
+#ifdef SBE_PMCC4_ENABLE
+status_t
+musycc_chan_down (ci_t * dummy, int channum)
+{
+ mpi_t *pi;
+ mch_t *ch;
+ int i, gchan;
+
+ if (!(ch = sd_find_chan (dummy, channum)))
+ return EINVAL;
+ pi = ch->up;
+ gchan = ch->gchan;
+
+ /* Deactivate the channel */
+ musycc_serv_req (pi, SR_CHANNEL_DEACTIVATE | SR_RX_DIRECTION | gchan);
+ ch->ch_start_rx = 0;
+ musycc_serv_req (pi, SR_CHANNEL_DEACTIVATE | SR_TX_DIRECTION | gchan);
+ ch->ch_start_tx = 0;
+
+ if (ch->state == DOWN)
+ return 0;
+ ch->state = DOWN;
+
+ pi->regram->thp[gchan] = 0;
+ pi->regram->tmp[gchan] = 0;
+ pi->regram->rhp[gchan] = 0;
+ pi->regram->rmp[gchan] = 0;
+ FLUSH_MEM_WRITE ();
+ for (i = 0; i < ch->txd_num; i++)
+ {
+ if (ch->mdt[i].mem_token != 0)
+ OS_mem_token_free (ch->mdt[i].mem_token);
+ }
+
+ for (i = 0; i < ch->rxd_num; i++)
+ {
+ if (ch->mdr[i].mem_token != 0)
+ OS_mem_token_free (ch->mdr[i].mem_token);
+ }
+
+ OS_kfree (ch->mdr);
+ ch->mdr = 0;
+ ch->rxd_num = 0;
+ OS_kfree (ch->mdt);
+ ch->mdt = 0;
+ ch->txd_num = 0;
+
+ musycc_update_timeslots (pi);
+ c4_fifo_free (pi, ch->gchan);
+
+ pi->openchans--;
+ return 0;
+}
+#endif
+
+
+int
+musycc_del_chan (ci_t * ci, int channum)
+{
+ mch_t *ch;
+
+ if ((channum < 0) || (channum >= (MUSYCC_NPORTS * MUSYCC_NCHANS))) /* sanity chk param */
+ return ECHRNG;
+ if (!(ch = sd_find_chan (ci, channum)))
+ return ENOENT;
+ if (ch->state == UP)
+ musycc_chan_down (ci, channum);
+ ch->state = UNASSIGNED;
+ return 0;
+}
+
+
+int
+musycc_del_chan_stats (ci_t * ci, int channum)
+{
+ mch_t *ch;
+
+ if (channum < 0 || channum >= (MUSYCC_NPORTS * MUSYCC_NCHANS)) /* sanity chk param */
+ return ECHRNG;
+ if (!(ch = sd_find_chan (ci, channum)))
+ return ENOENT;
+
+ memset (&ch->s, 0, sizeof (struct sbecom_chan_stats));
+ return 0;
+}
+
+
+int
+musycc_start_xmit (ci_t * ci, int channum, void *mem_token)
+{
+ mch_t *ch;
+ struct mdesc *md;
+ void *m2;
+#if 0
+ unsigned long flags;
+#endif
+ int txd_need_cnt;
+ u_int32_t len;
+
+ if (!(ch = sd_find_chan (ci, channum)))
+ return ENOENT;
+
+ if (ci->state != C_RUNNING) /* full interrupt processing available */
+ return EINVAL;
+ if (ch->state != UP)
+ return EINVAL;
+
+ if (!(ch->status & TX_ENABLED))
+ return EROFS; /* how else to flag unwritable state ? */
+
+#ifdef RLD_TRANS_DEBUGx
+ if (1 || log_level >= LOG_MONITOR2)
+#else
+ if (log_level >= LOG_MONITOR2)
+#endif
+ {
+ pr_info("++ start_xmt[%d]: state %x start %x full %d free %d required %d stopped %x\n",
+ channum, ch->state, ch->ch_start_tx, ch->tx_full,
+ ch->txd_free, ch->txd_required, sd_queue_stopped (ch->user));
+ }
+ /***********************************************/
+ /** Determine total amount of data to be sent **/
+ /***********************************************/
+ m2 = mem_token;
+ txd_need_cnt = 0;
+ for (len = OS_mem_token_tlen (m2); len > 0;
+ m2 = (void *) OS_mem_token_next (m2))
+ {
+ if (!OS_mem_token_len (m2))
+ continue;
+ txd_need_cnt++;
+ len -= OS_mem_token_len (m2);
+ }
+
+ if (txd_need_cnt == 0)
+ {
+ if (log_level >= LOG_MONITOR2)
+ pr_info("%s channel %d: no TX data in User buffer\n", ci->devname, channum);
+ OS_mem_token_free (mem_token);
+ return 0; /* no data to send */
+ }
+ /*************************************************/
+ /** Are there sufficient descriptors available? **/
+ /*************************************************/
+ if (txd_need_cnt > ch->txd_num) /* never enough descriptors for this
+ * large a buffer */
+ {
+ if (log_level >= LOG_DEBUG)
+ {
+ pr_info("start_xmit: discarding buffer, insufficient descriptor cnt %d, need %d.\n",
+ ch->txd_num, txd_need_cnt + 1);
+ }
+ ch->s.tx_dropped++;
+ OS_mem_token_free (mem_token);
+ return 0;
+ }
+#if 0
+ spin_lock_irqsave (&ch->ch_txlock, flags);
+#endif
+ /************************************************************/
+ /** flow control the line if not enough descriptors remain **/
+ /************************************************************/
+ if (txd_need_cnt > ch->txd_free)
+ {
+ if (log_level >= LOG_MONITOR2)
+ {
+ pr_info("start_xmit[%d]: EBUSY - need more descriptors, have %d of %d need %d\n",
+ channum, ch->txd_free, ch->txd_num, txd_need_cnt);
+ }
+ ch->tx_full = 1;
+ ch->txd_required = txd_need_cnt;
+ sd_disable_xmit (ch->user);
+#if 0
+ spin_unlock_irqrestore (&ch->ch_txlock, flags);
+#endif
+ return EBUSY; /* tell user to try again later */
+ }
+ /**************************************************/
+ /** Put the user data into MUSYCC data buffer(s) **/
+ /**************************************************/
+ m2 = mem_token;
+ md = ch->txd_usr_add; /* get current available descriptor */
+
+ for (len = OS_mem_token_tlen (m2); len > 0; m2 = OS_mem_token_next (m2))
+ {
+ int u = OS_mem_token_len (m2);
+
+ if (!u)
+ continue;
+ len -= u;
+
+ /*
+ * Enable following chunks, yet wait to enable the FIRST chunk until
+ * after ALL subsequent chunks are setup.
+ */
+ if (md != ch->txd_usr_add) /* not first chunk */
+ u |= MUSYCC_TX_OWNED; /* transfer ownership from HOST to MUSYCC */
+
+ if (len) /* not last chunk */
+ u |= EOBIRQ_ENABLE;
+ else if (ch->p.chan_mode == CFG_CH_PROTO_TRANS)
+ {
+ /*
+ * Per MUSYCC Ref 6.4.9 for Transparent Mode, the host must
+ * always clear EOMIRQ_ENABLE in every Transmit Buffer Descriptor
+ * (IE. don't set herein).
+ */
+ u |= EOBIRQ_ENABLE;
+ } else
+ u |= EOMIRQ_ENABLE; /* EOM, last HDLC chunk */
+
+
+ /* last chunk in hdlc mode */
+ u |= (ch->p.idlecode << IDLE_CODE);
+ if (ch->p.pad_fill_count)
+ {
+#if 0
+ /* NOOP NOTE: u_int8_t cannot be > 0xFF */
+ /* sanitize pad_fill_count for maximums allowed by hardware */
+ if (ch->p.pad_fill_count > EXTRA_FLAGS_MASK)
+ ch->p.pad_fill_count = EXTRA_FLAGS_MASK;
+#endif
+ u |= (PADFILL_ENABLE | (ch->p.pad_fill_count << EXTRA_FLAGS));
+ }
+ md->mem_token = len ? 0 : mem_token; /* Fill in mds on last
+ * segment, others set ZERO
+ * so that entire token is
+ * removed ONLY when ALL
+ * segments have been
+ * transmitted. */
+
+ md->data = cpu_to_le32 (OS_vtophys (OS_mem_token_data (m2)));
+ FLUSH_MEM_WRITE ();
+ md->status = cpu_to_le32 (u);
+ --ch->txd_free;
+ md = md->snext;
+ }
+ FLUSH_MEM_WRITE ();
+
+
+ /*
+ * Now transfer ownership of first chunk from HOST to MUSYCC in order to
+ * fire-off this XMIT.
+ */
+ ch->txd_usr_add->status |= __constant_cpu_to_le32 (MUSYCC_TX_OWNED);
+ FLUSH_MEM_WRITE ();
+ ch->txd_usr_add = md;
+
+ len = OS_mem_token_tlen (mem_token);
+ atomic_add (len, &ch->tx_pending);
+ atomic_add (len, &ci->tx_pending);
+ ch->s.tx_packets++;
+ ch->s.tx_bytes += len;
+#if 0
+ spin_unlock_irqrestore (&ch->ch_txlock, flags); /* allow pending
+ * interrupt to sneak
+ * thru */
+#endif
+
+ /*
+ * If an ONR was seen, then channel requires poking to restart
+ * transmission.
+ */
+ if (ch->ch_start_tx)
+ {
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,41)
+ SD_SEM_TAKE (&ci->sem_wdbusy, "_wd_"); /* only 1 thru here, per
+ * board */
+ if ((ch->ch_start_tx == CH_START_TX_ONR) && (ch->p.chan_mode == CFG_CH_PROTO_TRANS))
+ {
+ /* ONR restart transmission from background loop */
+ ci->wd_notify = WD_NOTIFY_ONR; /* enabled global watchdog
+ * scan-thru */
+ } else
+ {
+ /* start first transmission from background loop */
+ ci->wd_notify = WD_NOTIFY_1TX; /* enabled global watchdog
+ * scan-thru */
+ }
+ musycc_chan_restart (ch);
+ SD_SEM_GIVE (&ci->sem_wdbusy);
+#else
+ musycc_chan_restart (ch);
+#endif
+ }
+#ifdef SBE_WAN256T3_ENABLE
+ wan256t3_led (ci, LED_TX, LEDV_G);
+#endif
+ return 0;
+}
+
+
+#if 0
+int
+musycc_set_chan (ci_t * ci, int channum, struct sbecom_chan_param * p)
+{
+ mch_t *ch;
+ int rok = 0;
+ int n = 0;
+
+ if (channum < 0 || channum >= (MUSYCC_NPORTS * MUSYCC_NCHANS)) /* sanity chk param */
+ return ECHRNG;
+ if (!(ch = sd_find_chan (ci, channum)))
+ return ENOENT;
+ if (ch->channum != p->channum)
+ return EINVAL;
+ if (sd_line_is_ok (ch->user))
+ {
+ rok = 1;
+ sd_line_is_down (ch->user);
+ }
+ if (ch->state == UP && /* bring down in current configuration */
+ (ch->p.status != p->status ||
+ ch->p.chan_mode != p->chan_mode ||
+ ch->p.intr_mask != p->intr_mask ||
+ ch->txd_free < ch->txd_num))
+ {
+ if ((n = musycc_chan_down (ci, channum)))
+ return n;
+ if (ch->p.mode_56k != p->mode_56k)
+ {
+ ch->p = *p; /* copy in new parameters */
+ musycc_update_timeslots (&ci->port[ch->channum / MUSYCC_NCHANS]);
+ } else
+ ch->p = *p; /* copy in new parameters */
+ if ((n = musycc_chan_up (ci, channum)))
+ return n;
+ sd_enable_xmit (ch->user); /* re-enable to catch flow controlled
+ * channel */
+ } else
+ {
+ if (ch->p.mode_56k != p->mode_56k)
+ {
+ ch->p = *p; /* copy in new parameters */
+ musycc_update_timeslots (&ci->port[ch->channum / MUSYCC_NCHANS]);
+ } else
+ ch->p = *p; /* copy in new parameters */
+ }
+
+ if (rok)
+ sd_line_is_up (ch->user);
+ return 0;
+}
+#endif
+
+
+int
+musycc_get_chan (ci_t * ci, int channum, struct sbecom_chan_param * p)
+{
+ mch_t *ch;
+
+#if 0
+ if (channum < 0 || channum >= (MUSYCC_NPORTS * MUSYCC_NCHANS)) /* sanity chk param */
+ return ECHRNG;
+#endif
+ if (!(ch = sd_find_chan (ci, channum)))
+ return ENOENT;
+ *p = ch->p;
+ return 0;
+}
+
+
+int
+musycc_get_chan_stats (ci_t * ci, int channum, struct sbecom_chan_stats * p)
+{
+ mch_t *ch;
+
+ if (channum < 0 || channum >= (MUSYCC_NPORTS * MUSYCC_NCHANS)) /* sanity chk param */
+ return ECHRNG;
+ if (!(ch = sd_find_chan (ci, channum)))
+ return ENOENT;
+ *p = ch->s;
+ p->tx_pending = atomic_read (&ch->tx_pending);
+ return 0;
+}
+
+
+
+#ifdef SBE_WAN256T3_ENABLE
+int
+musycc_chan_down (ci_t * ci, int channum)
+{
+ mch_t *ch;
+ mpi_t *pi;
+ int i, gchan;
+
+ if (!(ch = sd_find_chan (ci, channum)))
+ return EINVAL;
+ pi = ch->up;
+ gchan = ch->gchan;
+
+ /* Deactivate the channel */
+ musycc_serv_req (pi, SR_CHANNEL_DEACTIVATE | SR_RX_DIRECTION | gchan);
+ ch->ch_start_rx = 0;
+ musycc_serv_req (pi, SR_CHANNEL_DEACTIVATE | SR_TX_DIRECTION | gchan);
+ ch->ch_start_tx = 0;
+
+ if (ch->state == DOWN)
+ return 0;
+ ch->state = DOWN;
+
+ pi->regram->thp[gchan] = 0;
+ pi->regram->tmp[gchan] = 0;
+ pi->regram->rhp[gchan] = 0;
+ pi->regram->rmp[gchan] = 0;
+ FLUSH_MEM_WRITE ();
+ for (i = 0; i < ch->txd_num; i++)
+ {
+ if (ch->mdt[i].mem_token != 0)
+ OS_mem_token_free (ch->mdt[i].mem_token);
+ }
+
+ for (i = 0; i < ch->rxd_num; i++)
+ {
+ if (ch->mdr[i].mem_token != 0)
+ OS_mem_token_free (ch->mdr[i].mem_token);
+ }
+
+ OS_kfree (ch->mdt);
+ ch->mdt = 0;
+ OS_kfree (ch->mdr);
+ ch->mdr = 0;
+
+ return 0;
+}
+#endif
+
+/*** End-of-File ***/
diff --git a/drivers/staging/cxt1e1/musycc.h b/drivers/staging/cxt1e1/musycc.h
new file mode 100644
index 00000000000..d2c91ef686d
--- /dev/null
+++ b/drivers/staging/cxt1e1/musycc.h
@@ -0,0 +1,460 @@
+/*
+ * $Id: musycc.h,v 1.3 2005/09/28 00:10:08 rickd PMCC4_3_1B $
+ */
+
+#ifndef _INC_MUSYCC_H_
+#define _INC_MUSYCC_H_
+
+/*-----------------------------------------------------------------------------
+ * musycc.h - Multichannel Synchronous Communications Controller
+ * CN8778/8474A/8472A/8471A
+ *
+ * Copyright (C) 2002-2005 SBE, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * For further information, contact via email: support@sbei.com
+ * SBE, Inc. San Ramon, California U.S.A.
+ *-----------------------------------------------------------------------------
+ * RCS info:
+ * RCS revision: $Revision: 1.3 $
+ * Last changed on $Date: 2005/09/28 00:10:08 $
+ * Changed by $Author: rickd $
+ *-----------------------------------------------------------------------------
+ * $Log: musycc.h,v $
+ * Revision 1.3 2005/09/28 00:10:08 rickd
+ * Add GNU license info. Add PMCC4 PCI/DevIDs. Implement new
+ * musycc reg&bits namings. Use PORTMAP_0 GCD grouping.
+ *
+ * Revision 1.2 2005/04/28 23:43:04 rickd
+ * Add RCS tracking heading.
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+#if defined (__FreeBSD__) || defined (__NetBSD__)
+#include <sys/types.h>
+#else
+#include <linux/types.h>
+#endif
+
+#define VINT8 volatile u_int8_t
+#define VINT32 volatile u_int32_t
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include "pmcc4_defs.h"
+
+
+/*------------------------------------------------------------------------
+// Vendor, Board Identification definitions
+//------------------------------------------------------------------------
+*/
+
+#define PCI_VENDOR_ID_CONEXANT 0x14f1
+#define PCI_DEVICE_ID_CN8471 0x8471
+#define PCI_DEVICE_ID_CN8472 0x8472
+#define PCI_DEVICE_ID_CN8474 0x8474
+#define PCI_DEVICE_ID_CN8478 0x8478
+#define PCI_DEVICE_ID_CN8500 0x8500
+#define PCI_DEVICE_ID_CN8501 0x8501
+#define PCI_DEVICE_ID_CN8502 0x8502
+#define PCI_DEVICE_ID_CN8503 0x8503
+
+#define INT_QUEUE_SIZE MUSYCC_NIQD
+
+/* RAM image of MUSYCC registers layed out as a C structure */
+ struct musycc_groupr
+ {
+ VINT32 thp[32]; /* Transmit Head Pointer [5-29] */
+ VINT32 tmp[32]; /* Transmit Message Pointer [5-30] */
+ VINT32 rhp[32]; /* Receive Head Pointer [5-29] */
+ VINT32 rmp[32]; /* Receive Message Pointer [5-30] */
+ VINT8 ttsm[128]; /* Time Slot Map [5-22] */
+ VINT8 tscm[256]; /* Subchannel Map [5-24] */
+ VINT32 tcct[32]; /* Channel Configuration [5-26] */
+ VINT8 rtsm[128]; /* Time Slot Map [5-22] */
+ VINT8 rscm[256]; /* Subchannel Map [5-24] */
+ VINT32 rcct[32]; /* Channel Configuration [5-26] */
+ VINT32 __glcd; /* Global Configuration Descriptor [5-10] */
+ VINT32 __iqp; /* Interrupt Queue Pointer [5-36] */
+ VINT32 __iql; /* Interrupt Queue Length [5-36] */
+ VINT32 grcd; /* Group Configuration Descriptor [5-16] */
+ VINT32 mpd; /* Memory Protection Descriptor [5-18] */
+ VINT32 mld; /* Message Length Descriptor [5-20] */
+ VINT32 pcd; /* Port Configuration Descriptor [5-19] */
+ };
+
+/* hardware MUSYCC registers layed out as a C structure */
+ struct musycc_globalr
+ {
+ VINT32 gbp; /* Group Base Pointer */
+ VINT32 dacbp; /* Dual Address Cycle Base Pointer */
+ VINT32 srd; /* Service Request Descriptor */
+ VINT32 isd; /* Interrupt Service Descriptor */
+ /*
+ * adjust __thp due to above 4 registers, which are not contained
+ * within musycc_groupr[]. All __XXX[] are just place holders,
+ * anyhow.
+ */
+ VINT32 __thp[32 - 4]; /* Transmit Head Pointer [5-29] */
+ VINT32 __tmp[32]; /* Transmit Message Pointer [5-30] */
+ VINT32 __rhp[32]; /* Receive Head Pointer [5-29] */
+ VINT32 __rmp[32]; /* Receive Message Pointer [5-30] */
+ VINT8 ttsm[128]; /* Time Slot Map [5-22] */
+ VINT8 tscm[256]; /* Subchannel Map [5-24] */
+ VINT32 tcct[32]; /* Channel Configuration [5-26] */
+ VINT8 rtsm[128]; /* Time Slot Map [5-22] */
+ VINT8 rscm[256]; /* Subchannel Map [5-24] */
+ VINT32 rcct[32]; /* Channel Configuration [5-26] */
+ VINT32 glcd; /* Global Configuration Descriptor [5-10] */
+ VINT32 iqp; /* Interrupt Queue Pointer [5-36] */
+ VINT32 iql; /* Interrupt Queue Length [5-36] */
+ VINT32 grcd; /* Group Configuration Descriptor [5-16] */
+ VINT32 mpd; /* Memory Protection Descriptor [5-18] */
+ VINT32 mld; /* Message Length Descriptor [5-20] */
+ VINT32 pcd; /* Port Configuration Descriptor [5-19] */
+ VINT32 rbist; /* Receive BIST status [5-4] */
+ VINT32 tbist; /* Receive BIST status [5-4] */
+ };
+
+/* Global Config Descriptor bit macros */
+#define MUSYCC_GCD_ECLK_ENABLE 0x00000800 /* EBUS clock enable */
+#define MUSYCC_GCD_INTEL_SELECT 0x00000400 /* MPU type select */
+#define MUSYCC_GCD_INTA_DISABLE 0x00000008 /* PCI INTA disable */
+#define MUSYCC_GCD_INTB_DISABLE 0x00000004 /* PCI INTB disable */
+#define MUSYCC_GCD_BLAPSE 12 /* Position index for BLAPSE bit
+ * field */
+#define MUSYCC_GCD_ALAPSE 8 /* Position index for ALAPSE bit
+ * field */
+#define MUSYCC_GCD_ELAPSE 4 /* Position index for ELAPSE bit
+ * field */
+#define MUSYCC_GCD_PORTMAP_3 3 /* Reserved */
+#define MUSYCC_GCD_PORTMAP_2 2 /* Port 0=>Grp 0,1,2,3; Port 1=>Grp
+ * 4,5,6,7 */
+#define MUSYCC_GCD_PORTMAP_1 1 /* Port 0=>Grp 0,1; Port 1=>Grp 2,3,
+ * etc... */
+#define MUSYCC_GCD_PORTMAP_0 0 /* Port 0=>Grp 0; Port 1=>Grp 2,
+ * etc... */
+
+/* and board specific assignments... */
+#ifdef SBE_WAN256T3_ENABLE
+#define BLAPSE_VAL 0
+#define ALAPSE_VAL 0
+#define ELAPSE_VAL 7
+#define PORTMAP_VAL MUSYCC_GCD_PORTMAP_2
+#endif
+
+#ifdef SBE_PMCC4_ENABLE
+#define BLAPSE_VAL 7
+#define ALAPSE_VAL 3
+#define ELAPSE_VAL 7
+#define PORTMAP_VAL MUSYCC_GCD_PORTMAP_0
+#endif
+
+#define GCD_MAGIC (((BLAPSE_VAL)<<(MUSYCC_GCD_BLAPSE)) | \
+ ((ALAPSE_VAL)<<(MUSYCC_GCD_ALAPSE)) | \
+ ((ELAPSE_VAL)<<(MUSYCC_GCD_ELAPSE)) | \
+ (MUSYCC_GCD_ECLK_ENABLE) | PORTMAP_VAL)
+
+/* Group Config Descriptor bit macros */
+#define MUSYCC_GRCD_RX_ENABLE 0x00000001 /* Enable receive processing */
+#define MUSYCC_GRCD_TX_ENABLE 0x00000002 /* Enable transmit processing */
+#define MUSYCC_GRCD_SUBCHAN_DISABLE 0x00000004 /* Master disable for
+ * subchanneling */
+#define MUSYCC_GRCD_OOFMP_DISABLE 0x00000008 /* Out of Frame message
+ * processing disabled all
+ * channels */
+#define MUSYCC_GRCD_OOFIRQ_DISABLE 0x00000010 /* Out of Frame/In Frame irqs
+ * disabled */
+#define MUSYCC_GRCD_COFAIRQ_DISABLE 0x00000020 /* Change of Frame Alignment
+ * irq disabled */
+#define MUSYCC_GRCD_INHRBSD 0x00000100 /* Receive Buffer Status
+ * overwrite disabled */
+#define MUSYCC_GRCD_INHTBSD 0x00000200 /* Transmit Buffer Status
+ * overwrite disabled */
+#define MUSYCC_GRCD_SF_ALIGN 0x00008000 /* External frame sync */
+#define MUSYCC_GRCD_MC_ENABLE 0x00000040 /* Message configuration bits
+ * copy enable. Conexant sez
+ * turn this on */
+#define MUSYCC_GRCD_POLLTH_16 0x00000001 /* Poll every 16th frame */
+#define MUSYCC_GRCD_POLLTH_32 0x00000002 /* Poll every 32nd frame */
+#define MUSYCC_GRCD_POLLTH_64 0x00000003 /* Poll every 64th frame */
+#define MUSYCC_GRCD_POLLTH_SHIFT 10 /* Position index for poll throttle
+ * bit field */
+#define MUSYCC_GRCD_SUERM_THRESH_SHIFT 16 /* Position index for SUERM
+ * count threshold */
+
+/* Port Config Descriptor bit macros */
+#define MUSYCC_PCD_E1X2_MODE 2 /* Port mode in bits 0-2. T1 and E1 */
+#define MUSYCC_PCD_E1X4_MODE 3 /* are defined in cn847x.h */
+#define MUSYCC_PCD_NX64_MODE 4
+#define MUSYCC_PCD_TXDATA_RISING 0x00000010 /* Sample Tx data on TCLK
+ * rising edge */
+#define MUSYCC_PCD_TXSYNC_RISING 0x00000020 /* Sample Tx frame sync on
+ * TCLK rising edge */
+#define MUSYCC_PCD_RXDATA_RISING 0x00000040 /* Sample Rx data on RCLK
+ * rising edge */
+#define MUSYCC_PCD_RXSYNC_RISING 0x00000080 /* Sample Rx frame sync on
+ * RCLK rising edge */
+#define MUSYCC_PCD_ROOF_RISING 0x00000100 /* Sample Rx Out Of Frame
+ * signal on RCLK rising edge */
+#define MUSYCC_PCD_TX_DRIVEN 0x00000200 /* No mapped timeslots causes
+ * logic 1 on output, else
+ * tristate */
+#define MUSYCC_PCD_PORTMODE_MASK 0xfffffff8 /* For changing the port mode
+ * between E1 and T1 */
+
+/* Time Slot Descriptor bit macros */
+#define MUSYCC_TSD_MODE_64KBPS 4
+#define MUSYCC_TSD_MODE_56KBPS 5
+#define MUSYCC_TSD_SUBCHANNEL_WO_FIRST 6
+#define MUSYCC_TSD_SUBCHANNEL_WITH_FIRST 7
+
+/* Message Descriptor bit macros */
+#define MUSYCC_MDT_BASE03_ADDR 0x00006000
+
+/* Channel Config Descriptor bit macros */
+#define MUSYCC_CCD_BUFIRQ_DISABLE 0x00000002 /* BUFF and ONR irqs disabled */
+#define MUSYCC_CCD_EOMIRQ_DISABLE 0x00000004 /* EOM irq disabled */
+#define MUSYCC_CCD_MSGIRQ_DISABLE 0x00000008 /* LNG, FCS, ALIGN, and ABT
+ * irqs disabled */
+#define MUSYCC_CCD_IDLEIRQ_DISABLE 0x00000010 /* CHABT, CHIC, and SHT irqs
+ * disabled */
+#define MUSYCC_CCD_FILTIRQ_DISABLE 0x00000020 /* SFILT irq disabled */
+#define MUSYCC_CCD_SDECIRQ_DISABLE 0x00000040 /* SDEC irq disabled */
+#define MUSYCC_CCD_SINCIRQ_DISABLE 0x00000080 /* SINC irq disabled */
+#define MUSYCC_CCD_SUERIRQ_DISABLE 0x00000100 /* SUERR irq disabled */
+#define MUSYCC_CCD_FCS_XFER 0x00000200 /* Propagate FCS along with
+ * received data */
+#define MUSYCC_CCD_PROTO_SHIFT 12 /* Position index for protocol bit
+ * field */
+#define MUSYCC_CCD_TRANS 0 /* Protocol mode in bits 12-14 */
+#define MUSYCC_CCD_SS7 1
+#define MUSYCC_CCD_HDLC_FCS16 2
+#define MUSYCC_CCD_HDLC_FCS32 3
+#define MUSYCC_CCD_EOPIRQ_DISABLE 0x00008000 /* EOP irq disabled */
+#define MUSYCC_CCD_INVERT_DATA 0x00800000 /* Invert data */
+#define MUSYCC_CCD_MAX_LENGTH 10 /* Position index for max length bit
+ * field */
+#define MUSYCC_CCD_BUFFER_LENGTH 16 /* Position index for internal data
+ * buffer length */
+#define MUSYCC_CCD_BUFFER_LOC 24 /* Position index for internal data
+ * buffer starting location */
+
+/****************************************************************************
+ * Interrupt Descriptor Information */
+
+#define INT_EMPTY_ENTRY 0xfeedface
+#define INT_EMPTY_ENTRY2 0xdeadface
+
+/****************************************************************************
+ * Interrupt Status Descriptor
+ *
+ * NOTE: One must first fetch the value of the interrupt status descriptor
+ * into a local variable, then pass that value into the read macros. This
+ * is required to avoid race conditions.
+ ***/
+
+#define INTRPTS_NEXTINT_M 0x7FFF0000
+#define INTRPTS_NEXTINT_S 16
+#define INTRPTS_NEXTINT(x) ((x & INTRPTS_NEXTINT_M) >> INTRPTS_NEXTINT_S)
+
+#define INTRPTS_INTFULL_M 0x00008000
+#define INTRPTS_INTFULL_S 15
+#define INTRPTS_INTFULL(x) ((x & INTRPTS_INTFULL_M) >> INTRPTS_INTFULL_S)
+
+#define INTRPTS_INTCNT_M 0x00007FFF
+#define INTRPTS_INTCNT_S 0
+#define INTRPTS_INTCNT(x) ((x & INTRPTS_INTCNT_M) >> INTRPTS_INTCNT_S)
+
+
+/****************************************************************************
+ * Interrupt Descriptor
+ ***/
+
+#define INTRPT_DIR_M 0x80000000
+#define INTRPT_DIR_S 31
+#define INTRPT_DIR(x) ((x & INTRPT_DIR_M) >> INTRPT_DIR_S)
+
+#define INTRPT_GRP_M 0x60000000
+#define INTRPT_GRP_MSB_M 0x00004000
+#define INTRPT_GRP_S 29
+#define INTRPT_GRP_MSB_S 12
+#define INTRPT_GRP(x) (((x & INTRPT_GRP_M) >> INTRPT_GRP_S) | \
+ ((x & INTRPT_GRP_MSB_M) >> INTRPT_GRP_MSB_S))
+
+#define INTRPT_CH_M 0x1F000000
+#define INTRPT_CH_S 24
+#define INTRPT_CH(x) ((x & INTRPT_CH_M) >> INTRPT_CH_S)
+
+#define INTRPT_EVENT_M 0x00F00000
+#define INTRPT_EVENT_S 20
+#define INTRPT_EVENT(x) ((x & INTRPT_EVENT_M) >> INTRPT_EVENT_S)
+
+#define INTRPT_ERROR_M 0x000F0000
+#define INTRPT_ERROR_S 16
+#define INTRPT_ERROR(x) ((x & INTRPT_ERROR_M) >> INTRPT_ERROR_S)
+
+#define INTRPT_ILOST_M 0x00008000
+#define INTRPT_ILOST_S 15
+#define INTRPT_ILOST(x) ((x & INTRPT_ILOST_M) >> INTRPT_ILOST_S)
+
+#define INTRPT_PERR_M 0x00004000
+#define INTRPT_PERR_S 14
+#define INTRPT_PERR(x) ((x & INTRPT_PERR_M) >> INTRPT_PERR_S)
+
+#define INTRPT_BLEN_M 0x00003FFF
+#define INTRPT_BLEN_S 0
+#define INTRPT_BLEN(x) ((x & INTRPT_BLEN_M) >> INTRPT_BLEN_S)
+
+
+/* Buffer Descriptor bit macros */
+#define OWNER_BIT 0x80000000 /* Set for MUSYCC owner on xmit, host
+ * owner on receive */
+#define HOST_TX_OWNED 0x00000000 /* Host owns descriptor */
+#define MUSYCC_TX_OWNED 0x80000000 /* MUSYCC owns descriptor */
+#define HOST_RX_OWNED 0x80000000 /* Host owns descriptor */
+#define MUSYCC_RX_OWNED 0x00000000 /* MUSYCC owns descriptor */
+
+#define POLL_DISABLED 0x40000000 /* MUSYCC not allowed to poll buffer
+ * for ownership */
+#define EOMIRQ_ENABLE 0x20000000 /* This buffer contains the end of
+ * the message */
+#define EOBIRQ_ENABLE 0x10000000 /* EOB irq enabled */
+#define PADFILL_ENABLE 0x01000000 /* Enable padfill */
+#define REPEAT_BIT 0x00008000 /* Bit on for FISU descriptor */
+#define LENGTH_MASK 0X3fff /* This part of status descriptor is
+ * length */
+#define IDLE_CODE 25 /* Position index for idle code (2
+ * bits) */
+#define EXTRA_FLAGS 16 /* Position index for minimum flags
+ * between messages (8 bits) */
+#define IDLE_CODE_MASK 0x03 /* Gets rid of garbage before the
+ * pattern is OR'd in */
+#define EXTRA_FLAGS_MASK 0xff /* Gets rid of garbage before the
+ * pattern is OR'd in */
+#define PCI_PERMUTED_OWNER_BIT 0x00000080 /* For flipping the bit on
+ * the polled mode descriptor */
+
+/* Service Request Descriptor bit macros */
+#define SREQ 8 /* Position index for service request bit
+ * field */
+#define SR_NOOP (0<<(SREQ)) /* No Operation. Generates SACK */
+#define SR_CHIP_RESET (1<<(SREQ)) /* Soft chip reset */
+#define SR_GROUP_RESET (2<<(SREQ)) /* Group reset */
+#define SR_GLOBAL_INIT (4<<(SREQ)) /* Global init: read global
+ * config deswc and interrupt
+ * queue desc */
+#define SR_GROUP_INIT (5<<(SREQ)) /* Group init: read Timeslot
+ * and Subchannel maps,
+ * Channel Config, */
+ /*
+ * Group Config, Memory Protect, Message Length, and Port Config
+ * Descriptors
+ */
+#define SR_CHANNEL_ACTIVATE (8<<(SREQ)) /* Init channel, read Head
+ * Pointer, process first
+ * Message Descriptor */
+#define SR_GCHANNEL_MASK 0x001F /* channel portion (gchan) */
+#define SR_CHANNEL_DEACTIVATE (9<<(SREQ)) /* Stop channel processing */
+#define SR_JUMP (10<<(SREQ)) /* a: Process new Message
+ * List */
+#define SR_CHANNEL_CONFIG (11<<(SREQ)) /* b: Read channel
+ * Configuration Descriptor */
+#define SR_GLOBAL_CONFIG (16<<(SREQ)) /* 10: Read Global
+ * Configuration Descriptor */
+#define SR_INTERRUPT_Q (17<<(SREQ)) /* 11: Read Interrupt Queue
+ * Descriptor */
+#define SR_GROUP_CONFIG (18<<(SREQ)) /* 12: Read Group
+ * Configuration Descriptor */
+#define SR_MEMORY_PROTECT (19<<(SREQ)) /* 13: Read Memory Protection
+ * Descriptor */
+#define SR_MESSAGE_LENGTH (20<<(SREQ)) /* 14: Read Message Length
+ * Descriptor */
+#define SR_PORT_CONFIG (21<<(SREQ)) /* 15: Read Port
+ * Configuration Descriptor */
+#define SR_TIMESLOT_MAP (24<<(SREQ)) /* 18: Read Timeslot Map */
+#define SR_SUBCHANNEL_MAP (25<<(SREQ)) /* 19: Read Subchannel Map */
+#define SR_CHAN_CONFIG_TABLE (26<<(SREQ)) /* 20: Read Channel
+ * Configuration Table for
+ * the group */
+#define SR_TX_DIRECTION 0x00000020 /* Transmit direction bit.
+ * Bit off indicates receive
+ * direction */
+#define SR_RX_DIRECTION 0x00000000
+
+/* Interrupt Descriptor bit macros */
+#define GROUP10 29 /* Position index for the 2 LS group
+ * bits */
+#define CHANNEL 24 /* Position index for channel bits */
+#define INT_IQD_TX 0x80000000
+#define INT_IQD_GRP 0x60000000
+#define INT_IQD_CHAN 0x1f000000
+#define INT_IQD_EVENT 0x00f00000
+#define INT_IQD_ERROR 0x000f0000
+#define INT_IQD_ILOST 0x00008000
+#define INT_IQD_PERR 0x00004000
+#define INT_IQD_BLEN 0x00003fff
+
+/* Interrupt Descriptor Events */
+#define EVE_EVENT 20 /* Position index for event bits */
+#define EVE_NONE 0 /* No event to report in this
+ * interrupt */
+#define EVE_SACK 1 /* Service Request acknowledge */
+#define EVE_EOB 2 /* End of Buffer */
+#define EVE_EOM 3 /* End of Message */
+#define EVE_EOP 4 /* End of Padfill */
+#define EVE_CHABT 5 /* Change to Abort Code */
+#define EVE_CHIC 6 /* Change to Idle Code */
+#define EVE_FREC 7 /* Frame Recovery */
+#define EVE_SINC 8 /* MTP2 SUERM Increment */
+#define EVE_SDEC 9 /* MTP2 SUERM Decrement */
+#define EVE_SFILT 10 /* MTP2 SUERM Filtered Message */
+/* Interrupt Descriptor Errors */
+#define ERR_ERRORS 16 /* Position index for error bits */
+#define ERR_BUF 1 /* Buffer Error */
+#define ERR_COFA 2 /* Change of Frame Alignment Error */
+#define ERR_ONR 3 /* Owner Bit Error */
+#define ERR_PROT 4 /* Memory Protection Error */
+#define ERR_OOF 8 /* Out of Frame Error */
+#define ERR_FCS 9 /* FCS Error */
+#define ERR_ALIGN 10 /* Octet Alignment Error */
+#define ERR_ABT 11 /* Abort Termination */
+#define ERR_LNG 12 /* Long Message Error */
+#define ERR_SHT 13 /* Short Message Error */
+#define ERR_SUERR 14 /* SUERM threshold exceeded */
+#define ERR_PERR 15 /* PCI Parity Error */
+/* Other Stuff */
+#define TRANSMIT_DIRECTION 0x80000000 /* Transmit direction bit. Bit off
+ * indicates receive direction */
+#define ILOST 0x00008000 /* Interrupt Lost */
+#define GROUPMSB 0x00004000 /* Group number MSB */
+#define SACK_IMAGE 0x00100000 /* Used in IRQ for semaphore test */
+#define INITIAL_STATUS 0x10000 /* IRQ status should be this after
+ * reset */
+
+/* This must be defined on an entire channel group (Port) basis */
+#define SUERM_THRESHOLD 0x1f
+
+#ifdef __cplusplus
+}
+#endif
+
+#undef VINT32
+#undef VINT8
+
+#endif /*** _INC_MUSYCC_H_ ***/
+
+/*** End-of-File ***/
diff --git a/drivers/staging/cxt1e1/ossiRelease.c b/drivers/staging/cxt1e1/ossiRelease.c
new file mode 100644
index 00000000000..a56029866c2
--- /dev/null
+++ b/drivers/staging/cxt1e1/ossiRelease.c
@@ -0,0 +1,39 @@
+/*
+ * $Id: ossiRelease.c,v 1.2 2008/05/08 20:14:03 rdobbs PMCC4_3_1B $
+ */
+
+/*-----------------------------------------------------------------------------
+ * ossiRelease.c -
+ *
+ * This string will be embedded into the executable and will track the
+ * release. The embedded string may be displayed using the following:
+ *
+ * strings <filename> | grep \$Rel
+ *
+ * Copyright (C) 2002-2008 One Stop Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * For further information, contact via email: support@onestopsystems.com
+ * One Stop Systems, Inc. Escondido, California U.S.A.
+ *
+ *-----------------------------------------------------------------------------
+ * RCS info:
+ * RCS revision: $Revision: 1.2 $
+ * Last changed on $Date: 2008/05/08 20:14:03 $
+ * Changed by $Author: rdobbs $
+ *-----------------------------------------------------------------------------
+ */
+
+
+char pmcc4_OSSI_release[] = "$Release: PMCC4_3_1B, Copyright (c) 2008 One Stop Systems$";
+
+/*** End-of-File ***/
diff --git a/drivers/staging/cxt1e1/pmc93x6_eeprom.c b/drivers/staging/cxt1e1/pmc93x6_eeprom.c
new file mode 100644
index 00000000000..1c8dfb80e7d
--- /dev/null
+++ b/drivers/staging/cxt1e1/pmc93x6_eeprom.c
@@ -0,0 +1,561 @@
+/* pmc93x6_eeprom.c - PMC's 93LC46 EEPROM Device
+ *
+ * The 93LC46 is a low-power, serial Electrically Erasable and
+ * Programmable Read Only Memory organized as 128 8-bit bytes.
+ *
+ * Accesses to the 93LC46 are done in a bit serial stream, organized
+ * in a 3 wire format. Writes are internally timed by the device
+ * (the In data bit is pulled low until the write is complete and
+ * then is pulled high) and take about 6 milliseconds.
+ *
+ * Copyright (C) 2003-2005 SBE, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include "pmcc4_sysdep.h"
+#include "sbecom_inline_linux.h"
+#include "pmcc4.h"
+#include "sbe_promformat.h"
+
+#ifndef TRUE
+#define TRUE 1
+#define FALSE 0
+#endif
+
+#ifdef SBE_INCLUDE_SYMBOLS
+#define STATIC
+#else
+#define STATIC static
+#endif
+
+
+/*------------------------------------------------------------------------
+ * EEPROM address definitions
+ *------------------------------------------------------------------------
+ *
+ * The offset in the definitions below allows the test to skip over
+ * areas of the EEPROM that other programs (such a VxWorks) are
+ * using.
+ */
+
+#define EE_MFG (long)0 /* Index to manufacturing record */
+#define EE_FIRST 0x28 /* Index to start testing at */
+#define EE_LIMIT 128 /* Index to end testing at */
+
+
+/* Bit Ordering for Instructions
+**
+** A0, A1, A2, A3, A4, A5, A6, OP0, OP1, SB (lsb, or 1st bit out)
+**
+*/
+
+#define EPROM_EWEN 0x0019 /* Erase/Write enable (reversed) */
+#define EPROM_EWDS 0x0001 /* Erase/Write disable (reversed) */
+#define EPROM_READ 0x0003 /* Read (reversed) */
+#define EPROM_WRITE 0x0005 /* Write (reversed) */
+#define EPROM_ERASE 0x0007 /* Erase (reversed) */
+#define EPROM_ERAL 0x0009 /* Erase All (reversed) */
+#define EPROM_WRAL 0x0011 /* Write All (reversed) */
+
+#define EPROM_ADR_SZ 7 /* Number of bits in offset address */
+#define EPROM_OP_SZ 3 /* Number of bits in command */
+#define SIZE_ADDR_OP (EPROM_ADR_SZ + EPROM_OP_SZ)
+#define LC46A_MAX_OPS 10 /* Number of bits in Instruction */
+#define NUM_OF_BITS 8 /* Number of bits in data */
+
+
+/* EEPROM signal bits */
+#define EPROM_ACTIVE_OUT_BIT 0x0001 /* Out data bit */
+#define EPROM_ACTIVE_IN_BIT 0x0002 /* In data bit */
+#define ACTIVE_IN_BIT_SHIFT 0x0001 /* Shift In data bit to LSB */
+#define EPROM_ENCS 0x0004 /* Set EEPROM CS during operation */
+
+
+/*------------------------------------------------------------------------
+ * The ByteReverse table is used to reverses the 8 bits within a byte
+ *------------------------------------------------------------------------
+ */
+
+static unsigned char ByteReverse[256];
+static int ByteReverseBuilt = FALSE;
+
+
+/*------------------------------------------------------------------------
+ * mfg_template - initial serial EEPROM data structure
+ *------------------------------------------------------------------------
+ */
+
+short mfg_template[sizeof (FLD_TYPE2)] =
+{
+ PROM_FORMAT_TYPE2, /* type; */
+ 0x00, 0x1A, /* length[2]; */
+ 0x00, 0x00, 0x00, 0x00, /* Crc32[4]; */
+ 0x11, 0x76, /* Id[2]; */
+ 0x07, 0x05, /* SubId[2] E1; */
+ 0x00, 0xA0, 0xD6, 0x00, 0x00, 0x00, /* Serial[6]; */
+ 0x00, 0x00, 0x00, 0x00, /* CreateTime[4]; */
+ 0x00, 0x00, 0x00, 0x00, /* HeatRunTime[4]; */
+ 0x00, 0x00, 0x00, 0x00, /* HeatRunIterations[4]; */
+ 0x00, 0x00, 0x00, 0x00, /* HeatRunErrors[4]; */
+};
+
+
+/*------------------------------------------------------------------------
+ * BuildByteReverse - build the 8-bit reverse table
+ *------------------------------------------------------------------------
+ *
+ * The 'ByteReverse' table reverses the 8 bits within a byte
+ * (the MSB becomes the LSB etc.).
+ */
+
+STATIC void
+BuildByteReverse (void)
+{
+ long half; /* Used to build by powers to 2 */
+ int i;
+
+ ByteReverse[0] = 0;
+
+ for (half = 1; half < sizeof (ByteReverse); half <<= 1)
+ for (i = 0; i < half; i++)
+ ByteReverse[half + i] = (char) (ByteReverse[i] | (0x80 / half));
+
+ ByteReverseBuilt = TRUE;
+}
+
+
+/*------------------------------------------------------------------------
+ * eeprom_delay - small delay for EEPROM timing
+ *------------------------------------------------------------------------
+ */
+
+STATIC void
+eeprom_delay (void)
+{
+ int timeout;
+
+ for (timeout = 20; timeout; --timeout)
+ {
+ OS_uwait_dummy ();
+ }
+}
+
+
+/*------------------------------------------------------------------------
+ * eeprom_put_byte - Send a byte to the EEPROM serially
+ *------------------------------------------------------------------------
+ *
+ * Given the PCI address and the data, this routine serially sends
+ * the data to the EEPROM.
+ */
+
+void
+eeprom_put_byte (long addr, long data, int count)
+{
+ u_int32_t output;
+
+ while (--count >= 0)
+ {
+ output = (data & EPROM_ACTIVE_OUT_BIT) ? 1 : 0; /* Get next data bit */
+ output |= EPROM_ENCS; /* Add Chip Select */
+ data >>= 1;
+
+ eeprom_delay ();
+ pci_write_32 ((u_int32_t *) addr, output); /* Output it */
+ }
+}
+
+
+/*------------------------------------------------------------------------
+ * eeprom_get_byte - Receive a byte from the EEPROM serially
+ *------------------------------------------------------------------------
+ *
+ * Given the PCI address, this routine serially fetches the data
+ * from the EEPROM.
+ */
+
+u_int32_t
+eeprom_get_byte (long addr)
+{
+ u_int32_t input;
+ u_int32_t data;
+ int count;
+
+/* Start the Reading of DATA
+**
+** The first read is a dummy as the data is latched in the
+** EPLD and read on the next read access to the EEPROM.
+*/
+
+ input = pci_read_32 ((u_int32_t *) addr);
+
+ data = 0;
+ count = NUM_OF_BITS;
+ while (--count >= 0)
+ {
+ eeprom_delay ();
+ input = pci_read_32 ((u_int32_t *) addr);
+
+ data <<= 1; /* Shift data over */
+ data |= (input & EPROM_ACTIVE_IN_BIT) ? 1 : 0;
+
+ }
+
+ return data;
+}
+
+
+/*------------------------------------------------------------------------
+ * disable_pmc_eeprom - Disable writes to the EEPROM
+ *------------------------------------------------------------------------
+ *
+ * Issue the EEPROM command to disable writes.
+ */
+
+STATIC void
+disable_pmc_eeprom (long addr)
+{
+ eeprom_put_byte (addr, EPROM_EWDS, SIZE_ADDR_OP);
+
+ pci_write_32 ((u_int32_t *) addr, 0); /* this removes Chip Select
+ * from EEPROM */
+}
+
+
+/*------------------------------------------------------------------------
+ * enable_pmc_eeprom - Enable writes to the EEPROM
+ *------------------------------------------------------------------------
+ *
+ * Issue the EEPROM command to enable writes.
+ */
+
+STATIC void
+enable_pmc_eeprom (long addr)
+{
+ eeprom_put_byte (addr, EPROM_EWEN, SIZE_ADDR_OP);
+
+ pci_write_32 ((u_int32_t *) addr, 0); /* this removes Chip Select
+ * from EEPROM */
+}
+
+
+/*------------------------------------------------------------------------
+ * pmc_eeprom_read - EEPROM location read
+ *------------------------------------------------------------------------
+ *
+ * Given a EEPROM PCI address and location offset, this routine returns
+ * the contents of the specified location to the calling routine.
+ */
+
+u_int32_t
+pmc_eeprom_read (long addr, long mem_offset)
+{
+ u_int32_t data; /* Data from chip */
+
+ if (!ByteReverseBuilt)
+ BuildByteReverse ();
+
+ mem_offset = ByteReverse[0x7F & mem_offset]; /* Reverse address */
+ /*
+ * NOTE: The max offset address is 128 or half the reversal table. So the
+ * LSB is always zero and counts as a built in shift of one bit. So even
+ * though we need to shift 3 bits to make room for the command, we only
+ * need to shift twice more because of the built in shift.
+ */
+ mem_offset <<= 2; /* Shift for command */
+ mem_offset |= EPROM_READ; /* Add command */
+
+ eeprom_put_byte (addr, mem_offset, SIZE_ADDR_OP); /* Output chip address */
+
+ data = eeprom_get_byte (addr); /* Read chip data */
+
+ pci_write_32 ((u_int32_t *) addr, 0); /* Remove Chip Select from
+ * EEPROM */
+
+ return (data & 0x000000FF);
+}
+
+
+/*------------------------------------------------------------------------
+ * pmc_eeprom_write - EEPROM location write
+ *------------------------------------------------------------------------
+ *
+ * Given a EEPROM PCI address, location offset and value, this
+ * routine writes the value to the specified location.
+ *
+ * Note: it is up to the caller to determine if the write
+ * operation succeeded.
+ */
+
+int
+pmc_eeprom_write (long addr, long mem_offset, u_int32_t data)
+{
+ volatile u_int32_t temp;
+ int count;
+
+ if (!ByteReverseBuilt)
+ BuildByteReverse ();
+
+ mem_offset = ByteReverse[0x7F & mem_offset]; /* Reverse address */
+ /*
+ * NOTE: The max offset address is 128 or half the reversal table. So the
+ * LSB is always zero and counts as a built in shift of one bit. So even
+ * though we need to shift 3 bits to make room for the command, we only
+ * need to shift twice more because of the built in shift.
+ */
+ mem_offset <<= 2; /* Shift for command */
+ mem_offset |= EPROM_WRITE; /* Add command */
+
+ eeprom_put_byte (addr, mem_offset, SIZE_ADDR_OP); /* Output chip address */
+
+ data = ByteReverse[0xFF & data];/* Reverse data */
+ eeprom_put_byte (addr, data, NUM_OF_BITS); /* Output chip data */
+
+ pci_write_32 ((u_int32_t *) addr, 0); /* Remove Chip Select from
+ * EEPROM */
+
+/*
+** Must see Data In at a low state before completing this transaction.
+**
+** Afterwards, the data bit will return to a high state, ~6 ms, terminating
+** the operation.
+*/
+ pci_write_32 ((u_int32_t *) addr, EPROM_ENCS); /* Re-enable Chip Select */
+ temp = pci_read_32 ((u_int32_t *) addr); /* discard first read */
+ temp = pci_read_32 ((u_int32_t *) addr);
+ if (temp & EPROM_ACTIVE_IN_BIT)
+ {
+ temp = pci_read_32 ((u_int32_t *) addr);
+ if (temp & EPROM_ACTIVE_IN_BIT)
+ {
+ pci_write_32 ((u_int32_t *) addr, 0); /* Remove Chip Select
+ * from EEPROM */
+ return (1);
+ }
+ }
+ count = 1000;
+ while (count--)
+ {
+ for (temp = 0; temp < 0x10; temp++)
+ OS_uwait_dummy ();
+
+ if (pci_read_32 ((u_int32_t *) addr) & EPROM_ACTIVE_IN_BIT)
+ break;
+ }
+
+ if (count == -1)
+ return (2);
+
+ return (0);
+}
+
+
+/*------------------------------------------------------------------------
+ * pmcGetBuffValue - read the specified value from buffer
+ *------------------------------------------------------------------------
+ */
+
+long
+pmcGetBuffValue (char *ptr, int size)
+{
+ long value = 0;
+ int index;
+
+ for (index = 0; index < size; ++index)
+ {
+ value <<= 8;
+ value |= ptr[index] & 0xFF;
+ }
+
+ return value;
+}
+
+
+/*------------------------------------------------------------------------
+ * pmcSetBuffValue - save the specified value to buffer
+ *------------------------------------------------------------------------
+ */
+
+void
+pmcSetBuffValue (char *ptr, long value, int size)
+{
+ int index = size;
+
+ while (--index >= 0)
+ {
+ ptr[index] = (char) (value & 0xFF);
+ value >>= 8;
+ }
+}
+
+
+/*------------------------------------------------------------------------
+ * pmc_eeprom_read_buffer - read EEPROM data into specified buffer
+ *------------------------------------------------------------------------
+ */
+
+void
+pmc_eeprom_read_buffer (long addr, long mem_offset, char *dest_ptr, int size)
+{
+ while (--size >= 0)
+ *dest_ptr++ = (char) pmc_eeprom_read (addr, mem_offset++);
+}
+
+
+/*------------------------------------------------------------------------
+ * pmc_eeprom_write_buffer - write EEPROM data from specified buffer
+ *------------------------------------------------------------------------
+ */
+
+void
+pmc_eeprom_write_buffer (long addr, long mem_offset, char *dest_ptr, int size)
+{
+ enable_pmc_eeprom (addr);
+
+ while (--size >= 0)
+ pmc_eeprom_write (addr, mem_offset++, *dest_ptr++);
+
+ disable_pmc_eeprom (addr);
+}
+
+
+/*------------------------------------------------------------------------
+ * pmcCalcCrc - calculate the CRC for the serial EEPROM structure
+ *------------------------------------------------------------------------
+ */
+
+u_int32_t
+pmcCalcCrc_T01 (void *bufp)
+{
+ FLD_TYPE2 *buf = bufp;
+ u_int32_t crc; /* CRC of the structure */
+
+ /* Calc CRC for type and length fields */
+ sbeCrc (
+ (u_int8_t *) &buf->type,
+ (u_int32_t) STRUCT_OFFSET (FLD_TYPE1, Crc32),
+ (u_int32_t) 0,
+ (u_int32_t *) &crc);
+
+#ifdef EEPROM_TYPE_DEBUG
+ pr_info("sbeCrc: crc 1 calculated as %08x\n", crc); /* RLD DEBUG */
+#endif
+ return ~crc;
+}
+
+u_int32_t
+pmcCalcCrc_T02 (void *bufp)
+{
+ FLD_TYPE2 *buf = bufp;
+ u_int32_t crc; /* CRC of the structure */
+
+ /* Calc CRC for type and length fields */
+ sbeCrc (
+ (u_int8_t *) &buf->type,
+ (u_int32_t) STRUCT_OFFSET (FLD_TYPE2, Crc32),
+ (u_int32_t) 0,
+ (u_int32_t *) &crc);
+
+ /* Calc CRC for remaining fields */
+ sbeCrc (
+ (u_int8_t *) &buf->Id[0],
+ (u_int32_t) (sizeof (FLD_TYPE2) - STRUCT_OFFSET (FLD_TYPE2, Id)),
+ (u_int32_t) crc,
+ (u_int32_t *) &crc);
+
+#ifdef EEPROM_TYPE_DEBUG
+ pr_info("sbeCrc: crc 2 calculated as %08x\n", crc); /* RLD DEBUG */
+#endif
+ return crc;
+}
+
+
+/*------------------------------------------------------------------------
+ * pmc_init_seeprom - initialize the serial EEPROM structure
+ *------------------------------------------------------------------------
+ *
+ * At the front of the serial EEPROM there is a record that contains
+ * manufacturing information. If the info does not already exist, it
+ * is created. The only field modifiable by the operator is the
+ * serial number field.
+ */
+
+void
+pmc_init_seeprom (u_int32_t addr, u_int32_t serialNum)
+{
+ PROMFORMAT buffer; /* Memory image of structure */
+ u_int32_t crc; /* CRC of structure */
+ time_t createTime;
+ int i;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+ createTime = CURRENT_TIME;
+#else
+ createTime = get_seconds ();
+#endif
+
+ /* use template data */
+ for (i = 0; i < sizeof (FLD_TYPE2); ++i)
+ buffer.bytes[i] = mfg_template[i];
+
+ /* Update serial number field in buffer */
+ pmcSetBuffValue (&buffer.fldType2.Serial[3], serialNum, 3);
+
+ /* Update create time field in buffer */
+ pmcSetBuffValue (&buffer.fldType2.CreateTime[0], createTime, 4);
+
+ /* Update CRC field in buffer */
+ crc = pmcCalcCrc_T02 (&buffer);
+ pmcSetBuffValue (&buffer.fldType2.Crc32[0], crc, 4);
+
+#ifdef DEBUG
+ for (i = 0; i < sizeof (FLD_TYPE2); ++i)
+ pr_info("[%02X] = %02X\n", i, buffer.bytes[i] & 0xFF);
+#endif
+
+ /* Write structure to serial EEPROM */
+ pmc_eeprom_write_buffer (addr, EE_MFG, (char *) &buffer, sizeof (FLD_TYPE2));
+}
+
+
+char
+pmc_verify_cksum (void *bufp)
+{
+ FLD_TYPE1 *buf1 = bufp;
+ FLD_TYPE2 *buf2 = bufp;
+ u_int32_t crc1, crc2; /* CRC read from EEPROM */
+
+ /* Retrieve contents of CRC field */
+ crc1 = pmcGetBuffValue (&buf1->Crc32[0], sizeof (buf1->Crc32));
+#ifdef EEPROM_TYPE_DEBUG
+ pr_info("EEPROM: chksum 1 reads as %08x\n", crc1); /* RLD DEBUG */
+#endif
+ if ((buf1->type == PROM_FORMAT_TYPE1) &&
+ (pmcCalcCrc_T01 ((void *) buf1) == crc1))
+ return PROM_FORMAT_TYPE1; /* checksum type 1 verified */
+
+ crc2 = pmcGetBuffValue (&buf2->Crc32[0], sizeof (buf2->Crc32));
+#ifdef EEPROM_TYPE_DEBUG
+ pr_info("EEPROM: chksum 2 reads as %08x\n", crc2); /* RLD DEBUG */
+#endif
+ if ((buf2->type == PROM_FORMAT_TYPE2) &&
+ (pmcCalcCrc_T02 ((void *) buf2) == crc2))
+ return PROM_FORMAT_TYPE2; /* checksum type 2 verified */
+
+ return PROM_FORMAT_Unk; /* failed to validate */
+}
+
+
+/*** End-of-File ***/
diff --git a/drivers/staging/cxt1e1/pmc93x6_eeprom.h b/drivers/staging/cxt1e1/pmc93x6_eeprom.h
new file mode 100644
index 00000000000..c3ada87efd2
--- /dev/null
+++ b/drivers/staging/cxt1e1/pmc93x6_eeprom.h
@@ -0,0 +1,60 @@
+/*
+ * $Id: pmc93x6_eeprom.h,v 1.1 2005/09/28 00:10:08 rickd PMCC4_3_1B $
+ */
+
+#ifndef _INC_PMC93X6_EEPROM_H_
+#define _INC_PMC93X6_EEPROM_H_
+
+/*-----------------------------------------------------------------------------
+ * pmc93x6_eeprom.h -
+ *
+ * Copyright (C) 2002-2004 SBE, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * For further information, contact via email: support@sbei.com
+ * SBE, Inc. San Ramon, California U.S.A.
+ *-----------------------------------------------------------------------------
+ * RCS info:
+ *-----------------------------------------------------------------------------
+ * $Log: pmc93x6_eeprom.h,v $
+ * Revision 1.1 2005/09/28 00:10:08 rickd
+ * pmc_verify_cksum return value is char.
+ *
+ * Revision 1.0 2005/05/04 17:20:51 rickd
+ * Initial revision
+ *
+ * Revision 1.0 2005/04/22 23:48:48 rickd
+ * Initial revision
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+#if defined (__FreeBSD__) || defined (__NetBSD__)
+#include <sys/types.h>
+#else
+#include <linux/types.h>
+#endif
+
+#ifdef __KERNEL__
+
+#include "pmcc4_private.h"
+
+void pmc_eeprom_read_buffer (long, long, char *, int);
+void pmc_eeprom_write_buffer (long, long, char *, int);
+void pmc_init_seeprom (u_int32_t, u_int32_t);
+char pmc_verify_cksum (void *);
+
+#endif /*** __KERNEL__ ***/
+
+#endif
+
+/*** End-of-File ***/
diff --git a/drivers/staging/cxt1e1/pmcc4.h b/drivers/staging/cxt1e1/pmcc4.h
new file mode 100644
index 00000000000..26c1f0ea72e
--- /dev/null
+++ b/drivers/staging/cxt1e1/pmcc4.h
@@ -0,0 +1,155 @@
+/*
+ * $Id: pmcc4.h,v 1.4 2005/11/01 19:24:48 rickd PMCC4_3_1B $
+ */
+
+#ifndef _INC_PMCC4_H_
+#define _INC_PMCC4_H_
+
+/*-----------------------------------------------------------------------------
+ * pmcc4.h -
+ *
+ * Copyright (C) 2005 SBE, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * For further information, contact via email: support@sbei.com
+ * SBE, Inc. San Ramon, California U.S.A.
+ *-----------------------------------------------------------------------------
+ * RCS info:
+ * RCS revision: $Revision: 1.4 $
+ * Last changed on $Date: 2005/11/01 19:24:48 $
+ * Changed by $Author: rickd $
+ *-----------------------------------------------------------------------------
+ * $Log: pmcc4.h,v $
+ * Revision 1.4 2005/11/01 19:24:48 rickd
+ * Remove de-implement function prototypes. Several <int> to
+ * <status_t> changes for consistant usage of same.
+ *
+ * Revision 1.3 2005/09/28 00:10:08 rickd
+ * Add GNU license info. Use config params from libsbew.h
+ *
+ * Revision 1.2 2005/04/28 23:43:03 rickd
+ * Add RCS tracking heading.
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+
+#if defined(__FreeBSD__) || defined(__NetBSD__)
+#include <sys/types.h>
+#else
+#ifndef __KERNEL__
+#include <sys/types.h>
+#else
+#include <linux/types.h>
+#endif
+#endif
+
+
+
+typedef int status_t;
+
+#define SBE_DRVR_FAIL 0
+#define SBE_DRVR_SUCCESS 1
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+/********************/
+/* PMCC4 memory Map */
+/********************/
+
+#define COMET_OFFSET(x) (0x80000+(x)*0x10000)
+#define EEPROM_OFFSET 0xC0000
+#define CPLD_OFFSET 0xD0000
+
+ struct pmcc4_timeslot_param
+ {
+ u_int8_t card; /* the card number */
+ u_int8_t port; /* the port number */
+ u_int8_t _reserved1;
+ u_int8_t _reserved2;
+
+ /*
+ * each byte in bitmask below represents one timeslot (bitmask[0] is
+ * for timeslot 0 and so on), each bit in the byte selects timeslot
+ * bits for this channel (0xff - whole timeslot, 0x7f - 56kbps mode)
+ */
+ u_int8_t bitmask[32];
+ };
+
+ struct c4_musycc_param
+ {
+ u_int8_t RWportnum;
+ u_int16_t offset;
+ u_int32_t value;
+ };
+
+/*Alarm values */
+#define sbeE1RMAI 0x100
+#define sbeYelAlm 0x04
+#define sbeRedAlm 0x02
+#define sbeAISAlm 0x01
+
+#define sbeE1errSMF 0x02
+#define sbeE1CRC 0x01
+
+#ifdef __cplusplus
+}
+#endif
+
+#ifdef __KERNEL__
+
+/*
+ * Device Driver interface, routines are for internal use only.
+ */
+
+#include "pmcc4_private.h"
+
+#if !(LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
+char *get_hdlc_name (hdlc_device *);
+
+#endif
+
+
+/*
+ * external interface
+ */
+
+void c4_cleanup (void);
+status_t c4_chan_up (ci_t *, int channum);
+status_t c4_del_chan_stats (int channum);
+status_t c4_del_chan (int channum);
+status_t c4_get_iidinfo (ci_t * ci, struct sbe_iid_info * iip);
+int c4_is_chan_up (int channum);
+
+void *getuserbychan (int channum);
+void pci_flush_write (ci_t * ci);
+void sbecom_set_loglevel (int debuglevel);
+char *sbeid_get_bdname (ci_t * ci);
+void sbeid_set_bdtype (ci_t * ci);
+void sbeid_set_hdwbid (ci_t * ci);
+u_int32_t sbeCrc (u_int8_t *, u_int32_t, u_int32_t, u_int32_t *);
+
+void VMETRO_TRACE (void *); /* put data into 8 LEDs */
+void VMETRO_TRIGGER (ci_t *, int); /* Note: int = 0(default)
+ * thru 15 */
+
+#if defined (SBE_ISR_TASKLET)
+void musycc_intr_bh_tasklet (ci_t *);
+
+#endif
+
+#endif /*** __KERNEL __ ***/
+#endif /* _INC_PMCC4_H_ */
diff --git a/drivers/staging/cxt1e1/pmcc4_cpld.h b/drivers/staging/cxt1e1/pmcc4_cpld.h
new file mode 100644
index 00000000000..6d8f0337aa3
--- /dev/null
+++ b/drivers/staging/cxt1e1/pmcc4_cpld.h
@@ -0,0 +1,124 @@
+/*
+ * $Id: pmcc4_cpld.h,v 1.0 2005/09/28 00:10:08 rickd PMCC4_3_1B $
+ */
+
+#ifndef _INC_PMCC4_CPLD_H_
+#define _INC_PMCC4_CPLD_H_
+
+/*-----------------------------------------------------------------------------
+ * pmcc4_cpld.h -
+ *
+ * Copyright (C) 2005 SBE, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * For further information, contact via email: support@sbei.com
+ * SBE, Inc. San Ramon, California U.S.A.
+ *-----------------------------------------------------------------------------
+ * RCS info:
+ * RCS revision: $Revision: 1.0 $
+ * Last changed on $Date: 2005/09/28 00:10:08 $
+ * Changed by $Author: rickd $
+ *-----------------------------------------------------------------------------
+ * $Log: pmcc4_cpld.h,v $
+ * Revision 1.0 2005/09/28 00:10:08 rickd
+ * Initial revision
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+
+#if defined(__FreeBSD__) || defined(__NetBSD__)
+#include <sys/types.h>
+#else
+#ifndef __KERNEL__
+#include <sys/types.h>
+#else
+#include <linux/types.h>
+#endif
+#endif
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+/********************************/
+/* iSPLD control chip registers */
+/********************************/
+
+#if 0
+#define CPLD_MCSR 0x0
+#define CPLD_MCLK 0x1
+#define CPLD_LEDS 0x2
+#define CPLD_INTR 0x3
+#endif
+
+ struct c4_cpld
+ {
+ volatile u_int32_t mcsr;/* r/w: Master Clock Source Register */
+ volatile u_int32_t mclk;/* r/w: Master Clock Register */
+ volatile u_int32_t leds;/* r/w: LED Register */
+ volatile u_int32_t intr;/* r: Interrupt Register */
+ };
+
+ typedef struct c4_cpld c4cpld_t;
+
+/* mcsr note: sourcing COMET must be initialized to Master Mode */
+#define PMCC4_CPLD_MCSR_IND 0 /* ports used individual BP Clk as
+ * source, no slaves */
+#define PMCC4_CPLD_MCSR_CMT_1 1 /* COMET 1 BP Clk is source, 2,3,4
+ * are Clk slaves */
+#define PMCC4_CPLD_MCSR_CMT_2 2 /* COMET 2 BP Clk is source, 1,3,4
+ * are Clk slaves */
+#define PMCC4_CPLD_MCSR_CMT_3 3 /* COMET 3 BP Clk is source, 1,2,4
+ * are Clk slaves */
+#define PMCC4_CPLD_MCSR_CMT_4 4 /* COMET 4 BP Clk is source, 1,2,3
+ * are Clk slaves */
+
+#define PMCC4_CPLD_MCLK_MASK 0x0f
+#define PMCC4_CPLD_MCLK_P1 0x1
+#define PMCC4_CPLD_MCLK_P2 0x2
+#define PMCC4_CPLD_MCLK_P3 0x4
+#define PMCC4_CPLD_MCLK_P4 0x8
+#define PMCC4_CPLD_MCLK_T1 0x00
+#define PMCC4_CPLD_MCLK_P1_E1 0x01
+#define PMCC4_CPLD_MCLK_P2_E1 0x02
+#define PMCC4_CPLD_MCLK_P3_E1 0x04
+#define PMCC4_CPLD_MCLK_P4_E1 0x08
+
+#define PMCC4_CPLD_LED_OFF 0
+#define PMCC4_CPLD_LED_ON 1
+#define PMCC4_CPLD_LED_GP0 0x01 /* port 0, green */
+#define PMCC4_CPLD_LED_YP0 0x02 /* port 0, yellow */
+#define PMCC4_CPLD_LED_GP1 0x04 /* port 1, green */
+#define PMCC4_CPLD_LED_YP1 0x08 /* port 1, yellow */
+#define PMCC4_CPLD_LED_GP2 0x10 /* port 2, green */
+#define PMCC4_CPLD_LED_YP2 0x20 /* port 2, yellow */
+#define PMCC4_CPLD_LED_GP3 0x40 /* port 3, green */
+#define PMCC4_CPLD_LED_YP3 0x80 /* port 3, yellow */
+#define PMCC4_CPLD_LED_GREEN (PMCC4_CPLD_LED_GP0 | PMCC4_CPLD_LED_GP1 | \
+ PMCC4_CPLD_LED_GP2 | PMCC4_CPLD_LED_GP3 )
+#define PMCC4_CPLD_LED_YELLOW (PMCC4_CPLD_LED_YP0 | PMCC4_CPLD_LED_YP1 | \
+ PMCC4_CPLD_LED_YP2 | PMCC4_CPLD_LED_YP3)
+
+#define PMCC4_CPLD_INTR_MASK 0x0f
+#define PMCC4_CPLD_INTR_CMT_1 0x01
+#define PMCC4_CPLD_INTR_CMT_2 0x02
+#define PMCC4_CPLD_INTR_CMT_3 0x04
+#define PMCC4_CPLD_INTR_CMT_4 0x08
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _INC_PMCC4_CPLD_H_ */
diff --git a/drivers/staging/cxt1e1/pmcc4_defs.h b/drivers/staging/cxt1e1/pmcc4_defs.h
new file mode 100644
index 00000000000..186347b8d56
--- /dev/null
+++ b/drivers/staging/cxt1e1/pmcc4_defs.h
@@ -0,0 +1,82 @@
+/*
+ * $Id: pmcc4_defs.h,v 1.0 2005/09/28 00:10:09 rickd PMCC4_3_1B $
+ */
+
+#ifndef _INC_PMCC4_DEFS_H_
+#define _INC_PMCC4_DEFS_H_
+
+/*-----------------------------------------------------------------------------
+ * c4_defs.h -
+ *
+ * Implementation elements of the wanPMC-C4T1E1 device driver
+ *
+ * Copyright (C) 2005 SBE, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * For further information, contact via email: support@sbei.com
+ * SBE, Inc. San Ramon, California U.S.A.
+ *-----------------------------------------------------------------------------
+ * RCS info:
+ * RCS revision: $Revision: 1.0 $
+ * Last changed on $Date: 2005/09/28 00:10:09 $
+ * Changed by $Author: rickd $
+ *-----------------------------------------------------------------------------
+ * $Log: pmcc4_defs.h,v $
+ * Revision 1.0 2005/09/28 00:10:09 rickd
+ * Initial revision
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+
+#define MAX_BOARDS 8
+#define MAX_CHANS_USED 128
+
+#ifdef SBE_PMCC4_ENABLE
+#define MUSYCC_NPORTS 4 /* CN8474 */
+#endif
+#ifdef SBE_WAN256T3_ENABLE
+#define MUSYCC_NPORTS 8 /* CN8478 */
+#endif
+#define MUSYCC_NCHANS 32 /* actually, chans per port */
+
+#define MUSYCC_NIQD 0x1000 /* power of 2 */
+#define MUSYCC_MRU 2048 /* default */
+#define MUSYCC_MTU 2048 /* default */
+#define MUSYCC_TXDESC_MIN 10 /* HDLC mode default */
+#define MUSYCC_RXDESC_MIN 18 /* HDLC mode default */
+#define MUSYCC_TXDESC_TRANS 4 /* Transparent mode minumum # of TX descriptors */
+#define MUSYCC_RXDESC_TRANS 12 /* Transparent mode minumum # of RX descriptors */
+
+#define MAX_DEFAULT_IFQLEN 32 /* network qlen */
+
+
+#define SBE_IFACETMPL "pmcc4-%d"
+#ifdef IFNAMSIZ
+#define SBE_IFACETMPL_SIZE IFNAMSIZ
+#else
+#define SBE_IFACETMPL_SIZE 16
+#endif
+
+/* we want the PMCC4 watchdog to fire off every 250ms */
+#define WATCHDOG_TIMEOUT 250000
+
+/* if we restart the watchdog every 250ms, then we'll time out
+ * an additional 300ms later */
+#define WATCHDOG_UTIMEOUT (WATCHDOG_TIMEOUT+300000)
+
+#if !defined(SBE_ISR_TASKLET) && !defined(SBE_ISR_IMMEDIATE) && !defined(SBE_ISR_INLINE)
+#define SBE_ISR_TASKLET
+#endif
+
+#endif /*** _INC_PMCC4_DEFS_H_ ***/
+
diff --git a/drivers/staging/cxt1e1/pmcc4_drv.c b/drivers/staging/cxt1e1/pmcc4_drv.c
new file mode 100644
index 00000000000..333cf2687dd
--- /dev/null
+++ b/drivers/staging/cxt1e1/pmcc4_drv.c
@@ -0,0 +1,1860 @@
+/*
+ * $Id: pmcc4_drv.c,v 3.1 2007/08/15 23:32:17 rickd PMCC4_3_1B $
+ */
+
+
+/*-----------------------------------------------------------------------------
+ * pmcc4_drv.c -
+ *
+ * Copyright (C) 2007 One Stop Systems, Inc.
+ * Copyright (C) 2002-2006 SBE, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * For further information, contact via email: support@onestopsystems.com
+ * One Stop Systems, Inc. Escondido, California U.S.A.
+ *-----------------------------------------------------------------------------
+ * RCS info:
+ * RCS revision: $Revision: 3.1 $
+ * Last changed on $Date: 2007/08/15 23:32:17 $
+ * Changed by $Author: rickd $
+ *-----------------------------------------------------------------------------
+ * $Log: pmcc4_drv.c,v $
+ * Revision 3.1 2007/08/15 23:32:17 rickd
+ * Use 'if 0' instead of GNU comment delimeter to avoid line wrap induced compiler errors.
+ *
+ * Revision 3.0 2007/08/15 22:19:55 rickd
+ * Correct sizeof() castings and pi->regram to support 64bit compatibility.
+ *
+ * Revision 2.10 2006/04/21 00:56:40 rickd
+ * workqueue files now prefixed with <sbecom> prefix.
+ *
+ * Revision 2.9 2005/11/01 19:22:49 rickd
+ * Add sanity checks against max_port for ioctl functions.
+ *
+ * Revision 2.8 2005/10/27 18:59:25 rickd
+ * Code cleanup. Default channel config to HDLC_FCS16.
+ *
+ * Revision 2.7 2005/10/18 18:16:30 rickd
+ * Further NCOMM code repairs - (1) interrupt matrix usage inconsistant
+ * for indexing into nciInterrupt[][], code missing double parameters.
+ * (2) check input of ncomm interrupt registration cardID for correct
+ * boundary values.
+ *
+ * Revision 2.6 2005/10/17 23:55:28 rickd
+ * Initial port of NCOMM support patches from original work found
+ * in pmc_c4t1e1 as updated by NCOMM. Ref: CONFIG_SBE_PMCC4_NCOMM.
+ * Corrected NCOMMs wanpmcC4T1E1_getBaseAddress() to correctly handle
+ * multiple boards.
+ *
+ * Revision 2.5 2005/10/13 23:01:28 rickd
+ * Correct panic for illegal address reference w/in get_brdinfo on
+ * first_if/last_if name acquistion under Linux 2.6
+ *
+ * Revision 2.4 2005/10/13 21:20:19 rickd
+ * Correction of c4_cleanup() wherein next should be acquired before
+ * ci_t structure is free'd.
+ *
+ * Revision 2.3 2005/10/13 19:20:10 rickd
+ * Correct driver removal cleanup code for multiple boards.
+ *
+ * Revision 2.2 2005/10/11 18:34:04 rickd
+ * New routine added to determine number of ports (comets) on board.
+ *
+ * Revision 2.1 2005/10/05 00:48:13 rickd
+ * Add some RX activation trace code.
+ *
+ * Revision 2.0 2005/09/28 00:10:06 rickd
+ * Implement 2.6 workqueue for TX/RX restart. Correction to
+ * hardware register boundary checks allows expanded access of MUSYCC.
+ * Implement new musycc reg&bits namings.
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+char OSSIid_pmcc4_drvc[] =
+"@(#)pmcc4_drv.c - $Revision: 3.1 $ (c) Copyright 2002-2007 One Stop Systems, Inc.";
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#if defined (__FreeBSD__) || defined (__NetBSD__)
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/errno.h>
+#else
+#include <linux/types.h>
+#include "pmcc4_sysdep.h"
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/sched.h> /* include for timer */
+#include <linux/timer.h> /* include for timer */
+#include <linux/hdlc.h>
+#include <asm/io.h>
+#endif
+
+#include "sbecom_inline_linux.h"
+#include "libsbew.h"
+#include "pmcc4_private.h"
+#include "pmcc4.h"
+#include "pmcc4_ioctls.h"
+#include "musycc.h"
+#include "comet.h"
+#include "sbe_bid.h"
+
+#ifdef SBE_INCLUDE_SYMBOLS
+#define STATIC
+#else
+#define STATIC static
+#endif
+
+
+#define KERN_WARN KERN_WARNING
+
+/* forward references */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)
+status_t c4_wk_chan_init (mpi_t *, mch_t *);
+void c4_wq_port_cleanup (mpi_t *);
+status_t c4_wq_port_init (mpi_t *);
+
+#endif
+int c4_loop_port (ci_t *, int, u_int8_t);
+status_t c4_set_port (ci_t *, int);
+status_t musycc_chan_down (ci_t *, int);
+
+u_int32_t musycc_chan_proto (int);
+status_t musycc_dump_ring (ci_t *, unsigned int);
+status_t __init musycc_init (ci_t *);
+void musycc_init_mdt (mpi_t *);
+void musycc_serv_req (mpi_t *, u_int32_t);
+void musycc_update_timeslots (mpi_t *);
+
+extern void musycc_update_tx_thp (mch_t *);
+extern int log_level;
+extern int max_mru;
+extern int max_mtu;
+extern int max_rxdesc_used, max_rxdesc_default;
+extern int max_txdesc_used, max_txdesc_default;
+
+#if defined (__powerpc__)
+extern void *memset (void *s, int c, size_t n);
+
+#endif
+
+int drvr_state = SBE_DRVR_INIT;
+ci_t *c4_list = 0;
+ci_t *CI; /* dummy pointer to board ZEROE's data -
+ * DEBUG USAGE */
+
+
+void
+sbecom_set_loglevel (int d)
+{
+ /*
+ * The code within the following -if- clause is a backdoor debug facility
+ * which can be used to display the state of a board's channel.
+ */
+ if (d > LOG_DEBUG)
+ {
+ unsigned int channum = d - (LOG_DEBUG + 1); /* convert to ZERO
+ * relativity */
+
+ (void) musycc_dump_ring ((ci_t *) CI, channum); /* CI implies support
+ * for card 0 only */
+ } else
+ {
+ if (log_level != d)
+ {
+ pr_info("log level changed from %d to %d\n", log_level, d);
+ log_level = d; /* set new */
+ } else
+ pr_info("log level is %d\n", log_level);
+ }
+}
+
+
+mch_t *
+c4_find_chan (int channum)
+{
+ ci_t *ci;
+ mch_t *ch;
+ int portnum, gchan;
+
+ for (ci = c4_list; ci; ci = ci->next)
+ for (portnum = 0; portnum < ci->max_port; portnum++)
+ for (gchan = 0; gchan < MUSYCC_NCHANS; gchan++)
+ {
+ if ((ch = ci->port[portnum].chan[gchan]))
+ {
+ if ((ch->state != UNASSIGNED) &&
+ (ch->channum == channum))
+ return (ch);
+ }
+ }
+ return 0;
+}
+
+
+ci_t *__init
+c4_new (void *hi)
+{
+ ci_t *ci;
+
+#ifdef SBE_MAP_DEBUG
+ pr_warning("c4_new() entered, ci needs %u.\n",
+ (unsigned int) sizeof (ci_t));
+#endif
+
+ ci = (ci_t *) OS_kmalloc (sizeof (ci_t));
+ if (ci)
+ {
+ ci->hdw_info = hi;
+ ci->state = C_INIT; /* mark as hardware not available */
+ ci->next = c4_list;
+ c4_list = ci;
+ ci->brdno = ci->next ? ci->next->brdno + 1 : 0;
+ } else
+ pr_warning("failed CI malloc, size %u.\n",
+ (unsigned int) sizeof (ci_t));
+
+ if (CI == 0)
+ CI = ci; /* DEBUG, only board 0 usage */
+ return ci;
+}
+
+
+/***
+ * Check port state and set LED states using watchdog or ioctl...
+ * also check for in-band SF loopback commands (& cause results if they are there)
+ *
+ * Alarm function depends on comet bits indicating change in
+ * link status (linkMask) to keep the link status indication straight.
+ *
+ * Indications are only LED and system log -- except when ioctl is invoked.
+ *
+ * "alarmed" record (a.k.a. copyVal, in some cases below) decodes as:
+ *
+ * RMAI (E1 only) 0x100
+ * alarm LED on 0x80
+ * link LED on 0x40
+ * link returned 0x20 (link was down, now it's back and 'port get' hasn't run)
+ * change in LED 0x10 (update LED register because value has changed)
+ * link is down 0x08
+ * YelAlm(RAI) 0x04
+ * RedAlm 0x02
+ * AIS(blue)Alm 0x01
+ *
+ * note "link has returned" indication is reset on read
+ * (e.g. by use of the c4_control port get command)
+ */
+
+#define sbeLinkMask 0x41 /* change in signal status (lost/recovered) +
+ * state */
+#define sbeLinkChange 0x40
+#define sbeLinkDown 0x01
+#define sbeAlarmsMask 0x07 /* red / yellow / blue alarm conditions */
+#define sbeE1AlarmsMask 0x107 /* alarm conditions */
+
+#define COMET_LBCMD_READ 0x80 /* read only (do not set, return read value) */
+
+void
+checkPorts (ci_t * ci)
+{
+#ifndef CONFIG_SBE_PMCC4_NCOMM
+ /*
+ * PORT POINT - NCOMM needs to avoid this code since the polling of
+ * alarms conflicts with NCOMM's interrupt servicing implementation.
+ */
+
+ comet_t *comet;
+ volatile u_int32_t value;
+ u_int32_t copyVal, LEDval;
+
+ u_int8_t portnum;
+
+ LEDval = 0;
+ for (portnum = 0; portnum < ci->max_port; portnum++)
+ {
+ copyVal = 0x12f & (ci->alarmed[portnum]); /* port's alarm record */
+ comet = ci->port[portnum].cometbase;
+ value = pci_read_32 ((u_int32_t *) &comet->cdrc_ists) & sbeLinkMask; /* link loss reg */
+
+ if (value & sbeLinkChange) /* is there a change in the link stuff */
+ {
+ /* if there's been a change (above) and yet it's the same (below) */
+ if (!(((copyVal >> 3) & sbeLinkDown) ^ (value & sbeLinkDown)))
+ {
+ if (value & sbeLinkDown)
+ pr_warning("%s: Port %d momentarily recovered.\n",
+ ci->devname, portnum);
+ else
+ pr_warning("%s: Warning: Port %d link was briefly down.\n",
+ ci->devname, portnum);
+ } else if (value & sbeLinkDown)
+ pr_warning("%s: Warning: Port %d link is down.\n",
+ ci->devname, portnum);
+ else
+ {
+ pr_warning("%s: Port %d link has recovered.\n",
+ ci->devname, portnum);
+ copyVal |= 0x20; /* record link transition to up */
+ }
+ copyVal |= 0x10; /* change (link) --> update LEDs */
+ }
+ copyVal &= 0x137; /* clear LED & link old history bits &
+ * save others */
+ if (value & sbeLinkDown)
+ copyVal |= 0x08; /* record link status (now) */
+ else
+ { /* if link is up, do this */
+ copyVal |= 0x40; /* LED indicate link is up */
+ /* Alarm things & the like ... first if E1, then if T1 */
+ if (IS_FRAME_ANY_E1 (ci->port[portnum].p.port_mode))
+ {
+ /*
+ * first check Codeword (SaX) changes & CRC and
+ * sub-multi-frame errors
+ */
+ /*
+ * note these errors are printed every time they are detected
+ * vs. alarms
+ */
+ value = pci_read_32 ((u_int32_t *) &comet->e1_frmr_nat_ists); /* codeword */
+ if (value & 0x1f)
+ { /* if errors (crc or smf only) */
+ if (value & 0x10)
+ pr_warning("%s: E1 Port %d Codeword Sa4 change detected.\n",
+ ci->devname, portnum);
+ if (value & 0x08)
+ pr_warning("%s: E1 Port %d Codeword Sa5 change detected.\n",
+ ci->devname, portnum);
+ if (value & 0x04)
+ pr_warning("%s: E1 Port %d Codeword Sa6 change detected.\n",
+ ci->devname, portnum);
+ if (value & 0x02)
+ pr_warning("%s: E1 Port %d Codeword Sa7 change detected.\n",
+ ci->devname, portnum);
+ if (value & 0x01)
+ pr_warning("%s: E1 Port %d Codeword Sa8 change detected.\n",
+ ci->devname, portnum);
+ }
+ value = pci_read_32 ((u_int32_t *) &comet->e1_frmr_mists); /* crc & smf */
+ if (value & 0x3)
+ { /* if errors (crc or smf only) */
+ if (value & sbeE1CRC)
+ pr_warning("%s: E1 Port %d CRC-4 error(s) detected.\n",
+ ci->devname, portnum);
+ if (value & sbeE1errSMF) /* error in sub-multiframe */
+ pr_warning("%s: E1 Port %d received errored SMF.\n",
+ ci->devname, portnum);
+ }
+ value = pci_read_32 ((u_int32_t *) &comet->e1_frmr_masts) & 0xcc; /* alarms */
+ /*
+ * pack alarms together (bitmiser), and construct similar to
+ * T1
+ */
+ /* RAI,RMAI,.,.,LOF,AIS,.,. ==> RMAI,.,.,.,.,.,RAI,LOF,AIS */
+ /* see 0x97 */
+ value = (value >> 2);
+ if (value & 0x30)
+ {
+ if (value & 0x20)
+ value |= 0x40; /* RAI */
+ if (value & 0x10)
+ value |= 0x100; /* RMAI */
+ value &= ~0x30;
+ } /* finished packing alarm in handy order */
+ if (value != (copyVal & sbeE1AlarmsMask))
+ { /* if alarms changed */
+ copyVal |= 0x10;/* change LED status */
+ if ((copyVal & sbeRedAlm) && !(value & sbeRedAlm))
+ {
+ copyVal &= ~sbeRedAlm;
+ pr_warning("%s: E1 Port %d LOF alarm ended.\n",
+ ci->devname, portnum);
+ } else if (!(copyVal & sbeRedAlm) && (value & sbeRedAlm))
+ {
+ copyVal |= sbeRedAlm;
+ pr_warning("%s: E1 Warning: Port %d LOF alarm.\n",
+ ci->devname, portnum);
+ } else if ((copyVal & sbeYelAlm) && !(value & sbeYelAlm))
+ {
+ copyVal &= ~sbeYelAlm;
+ pr_warning("%s: E1 Port %d RAI alarm ended.\n",
+ ci->devname, portnum);
+ } else if (!(copyVal & sbeYelAlm) && (value & sbeYelAlm))
+ {
+ copyVal |= sbeYelAlm;
+ pr_warning("%s: E1 Warning: Port %d RAI alarm.\n",
+ ci->devname, portnum);
+ } else if ((copyVal & sbeE1RMAI) && !(value & sbeE1RMAI))
+ {
+ copyVal &= ~sbeE1RMAI;
+ pr_warning("%s: E1 Port %d RMAI alarm ended.\n",
+ ci->devname, portnum);
+ } else if (!(copyVal & sbeE1RMAI) && (value & sbeE1RMAI))
+ {
+ copyVal |= sbeE1RMAI;
+ pr_warning("%s: E1 Warning: Port %d RMAI alarm.\n",
+ ci->devname, portnum);
+ } else if ((copyVal & sbeAISAlm) && !(value & sbeAISAlm))
+ {
+ copyVal &= ~sbeAISAlm;
+ pr_warning("%s: E1 Port %d AIS alarm ended.\n",
+ ci->devname, portnum);
+ } else if (!(copyVal & sbeAISAlm) && (value & sbeAISAlm))
+ {
+ copyVal |= sbeAISAlm;
+ pr_warning("%s: E1 Warning: Port %d AIS alarm.\n",
+ ci->devname, portnum);
+ }
+ }
+ /* end of E1 alarm code */
+ } else
+ { /* if a T1 mode */
+ value = pci_read_32 ((u_int32_t *) &comet->t1_almi_ists); /* alarms */
+ value &= sbeAlarmsMask;
+ if (value != (copyVal & sbeAlarmsMask))
+ { /* if alarms changed */
+ copyVal |= 0x10;/* change LED status */
+ if ((copyVal & sbeRedAlm) && !(value & sbeRedAlm))
+ {
+ copyVal &= ~sbeRedAlm;
+ pr_warning("%s: Port %d red alarm ended.\n",
+ ci->devname, portnum);
+ } else if (!(copyVal & sbeRedAlm) && (value & sbeRedAlm))
+ {
+ copyVal |= sbeRedAlm;
+ pr_warning("%s: Warning: Port %d red alarm.\n",
+ ci->devname, portnum);
+ } else if ((copyVal & sbeYelAlm) && !(value & sbeYelAlm))
+ {
+ copyVal &= ~sbeYelAlm;
+ pr_warning("%s: Port %d yellow (RAI) alarm ended.\n",
+ ci->devname, portnum);
+ } else if (!(copyVal & sbeYelAlm) && (value & sbeYelAlm))
+ {
+ copyVal |= sbeYelAlm;
+ pr_warning("%s: Warning: Port %d yellow (RAI) alarm.\n",
+ ci->devname, portnum);
+ } else if ((copyVal & sbeAISAlm) && !(value & sbeAISAlm))
+ {
+ copyVal &= ~sbeAISAlm;
+ pr_warning("%s: Port %d blue (AIS) alarm ended.\n",
+ ci->devname, portnum);
+ } else if (!(copyVal & sbeAISAlm) && (value & sbeAISAlm))
+ {
+ copyVal |= sbeAISAlm;
+ pr_warning("%s: Warning: Port %d blue (AIS) alarm.\n",
+ ci->devname, portnum);
+ }
+ }
+ } /* end T1 mode alarm checks */
+ }
+ if (copyVal & sbeAlarmsMask)
+ copyVal |= 0x80; /* if alarm turn yel LED on */
+ if (copyVal & 0x10)
+ LEDval |= 0x100; /* tag if LED values have changed */
+ LEDval |= ((copyVal & 0xc0) >> (6 - (portnum * 2)));
+
+ ci->alarmed[portnum] &= 0xfffff000; /* out with the old (it's fff
+ * ... foo) */
+ ci->alarmed[portnum] |= (copyVal); /* in with the new */
+
+ /*
+ * enough with the alarms and LED's, now let's check for loopback
+ * requests
+ */
+
+ if (IS_FRAME_ANY_T1 (ci->port[portnum].p.port_mode))
+ { /* if a T1 mode */
+ /*
+ * begin in-band (SF) loopback code detection -- start by reading
+ * command
+ */
+ value = pci_read_32 ((u_int32_t *) &comet->ibcd_ies); /* detect reg. */
+ value &= 0x3; /* trim to handy bits */
+ if (value & 0x2)
+ { /* activate loopback (sets for deactivate
+ * code length) */
+ copyVal = c4_loop_port (ci, portnum, COMET_LBCMD_READ); /* read line loopback
+ * mode */
+ if (copyVal != COMET_MDIAG_LINELB) /* don't do it again if
+ * already in that mode */
+ c4_loop_port (ci, portnum, COMET_MDIAG_LINELB); /* put port in line
+ * loopback mode */
+ }
+ if (value & 0x1)
+ { /* deactivate loopback (sets for activate
+ * code length) */
+ copyVal = c4_loop_port (ci, portnum, COMET_LBCMD_READ); /* read line loopback
+ * mode */
+ if (copyVal != COMET_MDIAG_LBOFF) /* don't do it again if
+ * already in that mode */
+ c4_loop_port (ci, portnum, COMET_MDIAG_LBOFF); /* take port out of any
+ * loopback mode */
+ }
+ }
+ if (IS_FRAME_ANY_T1ESF (ci->port[portnum].p.port_mode))
+ { /* if a T1 ESF mode */
+ /* begin ESF loopback code */
+ value = pci_read_32 ((u_int32_t *) &comet->t1_rboc_sts) & 0x3f; /* read command */
+ if (value == 0x07)
+ c4_loop_port (ci, portnum, COMET_MDIAG_LINELB); /* put port in line
+ * loopback mode */
+ if (value == 0x0a)
+ c4_loop_port (ci, portnum, COMET_MDIAG_PAYLB); /* put port in payload
+ * loopbk mode */
+ if ((value == 0x1c) || (value == 0x19) || (value == 0x12))
+ c4_loop_port (ci, portnum, COMET_MDIAG_LBOFF); /* take port out of any
+ * loopbk mode */
+ if (log_level >= LOG_DEBUG)
+ if (value != 0x3f)
+ pr_warning("%s: BOC value = %x on Port %d\n",
+ ci->devname, value, portnum);
+ /* end ESF loopback code */
+ }
+ }
+
+ /* if something is new, update LED's */
+ if (LEDval & 0x100)
+ pci_write_32 ((u_int32_t *) &ci->cpldbase->leds, LEDval & 0xff);
+#endif /*** CONFIG_SBE_PMCC4_NCOMM ***/
+}
+
+
+STATIC void
+c4_watchdog (ci_t * ci)
+{
+#if 0
+ //unsigned long flags;
+#endif
+
+ if (drvr_state != SBE_DRVR_AVAILABLE)
+ {
+ if (log_level >= LOG_MONITOR)
+ pr_info("drvr not available (%x)\n", drvr_state);
+ return;
+ }
+#if 0
+ SD_SEM_TAKE (&ci->sem_wdbusy, "_wd_"); /* only 1 thru here, per
+ * board */
+#endif
+
+ ci->wdcount++;
+ checkPorts (ci);
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,41)
+ if (ci->wd_notify)
+ { /* is there a state change to search for */
+ int port, gchan;
+
+ ci->wd_notify = 0; /* reset notification */
+ for (gchan = 0; gchan < MUSYCC_NCHANS; gchan++)
+ {
+ for (port = 0; port < ci->max_port; port++)
+ {
+ mch_t *ch = ci->port[port].chan[gchan];
+
+ if (!ch || ci->state != C_RUNNING) /* state changed while
+ * acquiring semaphore */
+ break;
+ if (ch->state == UP)/* channel must be set up */
+ {
+#if 0
+#ifdef RLD_TRANS_DEBUG
+ if (1 || log_level >= LOG_MONITOR)
+#else
+ if (log_level >= LOG_MONITOR)
+#endif
+ pr_info("%s: watchdog reviving Port %d Channel %d [%d] sts %x/%x, start_TX %x free %x start_RX %x\n",
+ ci->devname, ch->channum, port, gchan, ch->channum,
+ ch->p.status, ch->status,
+ ch->ch_start_tx, ch->txd_free, ch->ch_start_rx);
+#endif
+
+ /**********************************/
+ /** check for RX restart request **/
+ /**********************************/
+
+ if (ch->ch_start_rx &&
+ (ch->status & RX_ENABLED)) /* requires start on
+ * enabled RX */
+ {
+ ch->ch_start_rx = 0; /* we are restarting RX... */
+#ifdef RLD_TRANS_DEBUG
+ pr_info("++ c4_watchdog() CHAN RX ACTIVATE: chan %d\n",
+ ch->channum);
+#endif
+#ifdef RLD_RXACT_DEBUG
+ {
+ struct mdesc *md;
+ static int hereb4 = 7;
+
+ if (hereb4)
+ {
+ hereb4--;
+ md = &ch->mdr[ch->rxix_irq_srv];
+ pr_info("++ c4_watchdog[%d] CHAN RX ACTIVATE: rxix_irq_srv %d, md %p sts %x, rxpkt %lu\n",
+ ch->channum, ch->rxix_irq_srv, md, le32_to_cpu (md->status), ch->s.rx_packets);
+ musycc_dump_rxbuffer_ring (ch, 1); /* RLD DEBUG */
+ }
+ }
+#endif
+ musycc_serv_req (ch->up, SR_CHANNEL_ACTIVATE | SR_RX_DIRECTION | gchan);
+ }
+ /**********************************/
+ /** check for TX restart request **/
+ /**********************************/
+
+ if (ch->ch_start_tx &&
+ (ch->status & TX_ENABLED)) /* requires start on
+ * enabled TX */
+ {
+ struct mdesc *md;
+
+ /*
+ * find next unprocessed message, then set TX thp to
+ * it
+ */
+ musycc_update_tx_thp (ch);
+
+#if 0
+ spin_lock_irqsave (&ch->ch_txlock, flags);
+#endif
+ md = ch->txd_irq_srv;
+ if (!md)
+ {
+ pr_info("-- c4_watchdog[%d]: WARNING, starting NULL md\n",
+ ch->channum);
+ pr_info("-- chan %d txd_irq_srv %p sts %x usr_add %p sts %x, txpkt %lu\n",
+ ch->channum, ch->txd_irq_srv, le32_to_cpu ((struct mdesc *) (ch->txd_irq_srv)->status),
+ ch->txd_usr_add, le32_to_cpu ((struct mdesc *) (ch->txd_usr_add)->status),
+ ch->s.tx_packets);
+#if 0
+ spin_unlock_irqrestore (&ch->ch_txlock, flags);
+#endif
+ } else if (md->data && ((le32_to_cpu (md->status)) & MUSYCC_TX_OWNED))
+ {
+#ifdef RLD_TRANS_DEBUG
+ pr_info("++ c4_watchdog[%d] CHAN TX ACTIVATE: start_tx %x\n",
+ ch->channum, ch->ch_start_tx);
+#endif
+ ch->ch_start_tx = 0; /* we are restarting
+ * TX... */
+#if 0
+ spin_unlock_irqrestore (&ch->ch_txlock, flags); /* allow interrupts for
+ * service request */
+#endif
+ musycc_serv_req (ch->up, SR_CHANNEL_ACTIVATE | SR_TX_DIRECTION | gchan);
+#ifdef RLD_TRANS_DEBUG
+ if (1 || log_level >= LOG_MONITOR)
+#else
+ if (log_level >= LOG_MONITOR)
+#endif
+ pr_info("++ SACK[P%d/C%d] ack'd, continuing...\n",
+ ch->up->portnum, ch->channum);
+ }
+ }
+ }
+ }
+ }
+ }
+#else
+ ci->wd_notify = 0;
+#endif
+#if 0
+ SD_SEM_GIVE (&ci->sem_wdbusy);/* release per-board hold */
+#endif
+}
+
+
+void
+c4_cleanup (void)
+{
+ ci_t *ci, *next;
+ mpi_t *pi;
+ int portnum, j;
+
+ ci = c4_list;
+ while (ci)
+ {
+ next = ci->next; /* protect <next> from upcoming <free> */
+ pci_write_32 ((u_int32_t *) &ci->cpldbase->leds, PMCC4_CPLD_LED_OFF);
+ for (portnum = 0; portnum < ci->max_port; portnum++)
+ {
+ pi = &ci->port[portnum];
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)
+ c4_wq_port_cleanup (pi);
+#endif
+ for (j = 0; j < MUSYCC_NCHANS; j++)
+ {
+ if (pi->chan[j])
+ OS_kfree (pi->chan[j]); /* free mch_t struct */
+ }
+ OS_kfree (pi->regram_saved);
+ }
+#if 0
+ /* obsolete - watchdog is now static w/in ci_t */
+ OS_free_watchdog (ci->wd);
+#endif
+ OS_kfree (ci->iqd_p_saved);
+ OS_kfree (ci);
+ ci = next; /* cleanup next board, if any */
+ }
+}
+
+
+/*
+ * This function issues a write to all comet chips and expects the same data
+ * to be returned from the subsequent read. This determines the board build
+ * to be a 1-port, 2-port, or 4-port build. The value returned represents a
+ * bit-mask of the found ports. Only certain configurations are considered
+ * VALID or LEGAL builds.
+ */
+
+int
+c4_get_portcfg (ci_t * ci)
+{
+ comet_t *comet;
+ int portnum, mask;
+ u_int32_t wdata, rdata;
+
+ wdata = COMET_MDIAG_LBOFF; /* take port out of any loopback mode */
+
+ mask = 0;
+ for (portnum = 0; portnum < MUSYCC_NPORTS; portnum++)
+ {
+ comet = ci->port[portnum].cometbase;
+ pci_write_32 ((u_int32_t *) &comet->mdiag, wdata);
+ rdata = pci_read_32 ((u_int32_t *) &comet->mdiag) & COMET_MDIAG_LBMASK;
+ if (wdata == rdata)
+ mask |= 1 << portnum;
+ }
+ return mask;
+}
+
+
+/* nothing herein should generate interrupts */
+
+status_t __init
+c4_init (ci_t * ci, u_char *func0, u_char *func1)
+{
+ mpi_t *pi;
+ mch_t *ch;
+ static u_int32_t count = 0;
+ int portnum, j;
+
+ ci->state = C_INIT;
+ ci->brdno = count++;
+ ci->intlog.this_status_new = 0;
+ atomic_set (&ci->bh_pending, 0);
+
+ ci->reg = (struct musycc_globalr *) func0;
+ ci->eeprombase = (u_int32_t *) (func1 + EEPROM_OFFSET);
+ ci->cpldbase = (c4cpld_t *) ((u_int32_t *) (func1 + ISPLD_OFFSET));
+
+ /*** PORT POINT - the following is the first access of any type to the hardware ***/
+#ifdef CONFIG_SBE_PMCC4_NCOMM
+ /* NCOMM driver uses INTB interrupt to monitor CPLD register */
+ pci_write_32 ((u_int32_t *) &ci->reg->glcd, GCD_MAGIC);
+#else
+ /* standard driver POLLS for INTB via CPLD register */
+ pci_write_32 ((u_int32_t *) &ci->reg->glcd, GCD_MAGIC | MUSYCC_GCD_INTB_DISABLE);
+#endif
+
+ {
+ int pmsk;
+
+ /* need comet addresses available for determination of hardware build */
+ for (portnum = 0; portnum < MUSYCC_NPORTS; portnum++)
+ {
+ pi = &ci->port[portnum];
+ pi->cometbase = (comet_t *) ((u_int32_t *) (func1 + COMET_OFFSET (portnum)));
+ pi->reg = (struct musycc_globalr *) ((u_char *) ci->reg + (portnum * 0x800));
+ pi->portnum = portnum;
+ pi->p.portnum = portnum;
+ pi->openchans = 0;
+#ifdef SBE_MAP_DEBUG
+ pr_info("Comet-%d: addr = %p\n", portnum, pi->cometbase);
+#endif
+ }
+ pmsk = c4_get_portcfg (ci);
+ switch (pmsk)
+ {
+ case 0x1:
+ ci->max_port = 1;
+ break;
+ case 0x3:
+ ci->max_port = 2;
+ break;
+#if 0
+ case 0x7: /* not built, but could be... */
+ ci->max_port = 3;
+ break;
+#endif
+ case 0xf:
+ ci->max_port = 4;
+ break;
+ default:
+ ci->max_port = 0;
+ pr_warning("%s: illegal port configuration (%x)\n",
+ ci->devname, pmsk);
+ return SBE_DRVR_FAIL;
+ }
+#ifdef SBE_MAP_DEBUG
+ pr_info(">> %s: c4_get_build - pmsk %x max_port %x\n",
+ ci->devname, pmsk, ci->max_port);
+#endif
+ }
+
+ for (portnum = 0; portnum < ci->max_port; portnum++)
+ {
+ pi = &ci->port[portnum];
+ pi->up = ci;
+ pi->sr_last = 0xffffffff;
+ pi->p.port_mode = CFG_FRAME_SF; /* T1 B8ZS, the default */
+ pi->p.portP = (CFG_CLK_PORT_EXTERNAL | CFG_LBO_LH0); /* T1 defaults */
+
+ OS_sem_init (&pi->sr_sem_busy, SEM_AVAILABLE);
+ OS_sem_init (&pi->sr_sem_wait, SEM_TAKEN);
+
+ for (j = 0; j < 32; j++)
+ {
+ pi->fifomap[j] = -1;
+ pi->tsm[j] = 0; /* no assignments, all available */
+ }
+
+ /* allocate channel structures for this port */
+ for (j = 0; j < MUSYCC_NCHANS; j++)
+ {
+ ch = OS_kmalloc (sizeof (mch_t));
+ if (ch)
+ {
+ pi->chan[j] = ch;
+ ch->state = UNASSIGNED;
+ ch->up = pi;
+ ch->gchan = (-1); /* channel assignment not yet known */
+ ch->channum = (-1); /* channel assignment not yet known */
+ ch->p.card = ci->brdno;
+ ch->p.port = portnum;
+ ch->p.channum = (-1); /* channel assignment not yet known */
+ ch->p.mode_56k = 0; /* default is 64kbps mode */
+ } else
+ {
+ pr_warning("failed mch_t malloc, port %d channel %d size %u.\n",
+ portnum, j, (unsigned int) sizeof (mch_t));
+ break;
+ }
+ }
+ }
+
+
+ {
+ /*
+ * Set LEDs through their paces to supply visual proof that LEDs are
+ * functional and not burnt out nor broken.
+ *
+ * YELLOW + GREEN -> OFF.
+ */
+
+ pci_write_32 ((u_int32_t *) &ci->cpldbase->leds,
+ PMCC4_CPLD_LED_GREEN | PMCC4_CPLD_LED_YELLOW);
+ OS_uwait (750000, "leds");
+ pci_write_32 ((u_int32_t *) &ci->cpldbase->leds, PMCC4_CPLD_LED_OFF);
+ }
+
+ OS_init_watchdog (&ci->wd, (void (*) (void *)) c4_watchdog, ci, WATCHDOG_TIMEOUT);
+ return SBE_DRVR_SUCCESS;
+}
+
+
+/* better be fully setup to handle interrupts when you call this */
+
+status_t __init
+c4_init2 (ci_t * ci)
+{
+ status_t ret;
+
+ /* PORT POINT: this routine generates first interrupt */
+ if ((ret = musycc_init (ci)) != SBE_DRVR_SUCCESS)
+ return ret;
+
+#if 0
+ ci->p.framing_type = FRAMING_CBP;
+ ci->p.h110enable = 1;
+#if 0
+ ci->p.hypersize = 0;
+#else
+ hyperdummy = 0;
+#endif
+ ci->p.clock = 0; /* Use internal clocking until set to
+ * external */
+ c4_card_set_params (ci, &ci->p);
+#endif
+ OS_start_watchdog (&ci->wd);
+ return SBE_DRVR_SUCCESS;
+}
+
+
+/* This function sets the loopback mode (or clears it, as the case may be). */
+
+int
+c4_loop_port (ci_t * ci, int portnum, u_int8_t cmd)
+{
+ comet_t *comet;
+ volatile u_int32_t loopValue;
+
+ comet = ci->port[portnum].cometbase;
+ loopValue = pci_read_32 ((u_int32_t *) &comet->mdiag) & COMET_MDIAG_LBMASK;
+
+ if (cmd & COMET_LBCMD_READ)
+ return loopValue; /* return the read value */
+
+ if (loopValue != cmd)
+ {
+ switch (cmd)
+ {
+ case COMET_MDIAG_LINELB:
+ /* set(SF)loopback down (turn off) code length to 6 bits */
+ pci_write_32 ((u_int32_t *) &comet->ibcd_cfg, 0x05);
+ break;
+ case COMET_MDIAG_LBOFF:
+ /* set (SF) loopback up (turn on) code length to 5 bits */
+ pci_write_32 ((u_int32_t *) &comet->ibcd_cfg, 0x00);
+ break;
+ }
+
+ pci_write_32 ((u_int32_t *) &comet->mdiag, cmd);
+ if (log_level >= LOG_WARN)
+ pr_info("%s: loopback mode changed to %2x from %2x on Port %d\n",
+ ci->devname, cmd, loopValue, portnum);
+ loopValue = pci_read_32 ((u_int32_t *) &comet->mdiag) & COMET_MDIAG_LBMASK;
+ if (loopValue != cmd)
+ {
+ if (log_level >= LOG_ERROR)
+ pr_info("%s: write to loop register failed, unknown state for Port %d\n",
+ ci->devname, portnum);
+ }
+ } else
+ {
+ if (log_level >= LOG_WARN)
+ pr_info("%s: loopback already in that mode (%2x)\n",
+ ci->devname, loopValue);
+ }
+ return 0;
+}
+
+
+/* c4_frame_rw: read or write the comet register specified
+ * (modifies use of port_param to non-standard use of struct)
+ * Specifically:
+ * pp.portnum (one guess)
+ * pp.port_mode offset of register
+ * pp.portP write (or not, i.e. read)
+ * pp.portStatus write value
+ * BTW:
+ * pp.portStatus also used to return read value
+ * pp.portP also used during write, to return old reg value
+ */
+
+status_t
+c4_frame_rw (ci_t * ci, struct sbecom_port_param * pp)
+{
+ comet_t *comet;
+ volatile u_int32_t data;
+
+ if (pp->portnum >= ci->max_port)/* sanity check */
+ return ENXIO;
+
+ comet = ci->port[pp->portnum].cometbase;
+ data = pci_read_32 ((u_int32_t *) comet + pp->port_mode) & 0xff;
+
+ if (pp->portP)
+ { /* control says this is a register
+ * _write_ */
+ if (pp->portStatus == data)
+ pr_info("%s: Port %d already that value! Writing again anyhow.\n",
+ ci->devname, pp->portnum);
+ pp->portP = (u_int8_t) data;
+ pci_write_32 ((u_int32_t *) comet + pp->port_mode,
+ pp->portStatus);
+ data = pci_read_32 ((u_int32_t *) comet + pp->port_mode) & 0xff;
+ }
+ pp->portStatus = (u_int8_t) data;
+ return 0;
+}
+
+
+/* c4_pld_rw: read or write the pld register specified
+ * (modifies use of port_param to non-standard use of struct)
+ * Specifically:
+ * pp.port_mode offset of register
+ * pp.portP write (or not, i.e. read)
+ * pp.portStatus write value
+ * BTW:
+ * pp.portStatus also used to return read value
+ * pp.portP also used during write, to return old reg value
+ */
+
+status_t
+c4_pld_rw (ci_t * ci, struct sbecom_port_param * pp)
+{
+ volatile u_int32_t *regaddr;
+ volatile u_int32_t data;
+ int regnum = pp->port_mode;
+
+ regaddr = (u_int32_t *) ci->cpldbase + regnum;
+ data = pci_read_32 ((u_int32_t *) regaddr) & 0xff;
+
+ if (pp->portP)
+ { /* control says this is a register
+ * _write_ */
+ pp->portP = (u_int8_t) data;
+ pci_write_32 ((u_int32_t *) regaddr, pp->portStatus);
+ data = pci_read_32 ((u_int32_t *) regaddr) & 0xff;
+ }
+ pp->portStatus = (u_int8_t) data;
+ return 0;
+}
+
+/* c4_musycc_rw: read or write the musycc register specified
+ * (modifies use of port_param to non-standard use of struct)
+ * Specifically:
+ * mcp.RWportnum port number and write indication bit (0x80)
+ * mcp.offset offset of register
+ * mcp.value write value going in and read value returning
+ */
+
+/* PORT POINT: TX Subchannel Map registers are write-only
+ * areas within the MUSYCC and always return FF */
+/* PORT POINT: regram and reg structures are minorly different and <offset> ioctl
+ * settings are aligned with the <reg> struct musycc_globalr{} usage.
+ * Also, regram is separately allocated shared memory, allocated for each port.
+ * PORT POINT: access offsets of 0x6000 for Msg Cfg Desc Tbl are for 4-port MUSYCC
+ * only. (An 8-port MUSYCC has 0x16000 offsets for accessing its upper 4 tables.)
+ */
+
+status_t
+c4_musycc_rw (ci_t * ci, struct c4_musycc_param * mcp)
+{
+ mpi_t *pi;
+ volatile u_int32_t *dph; /* hardware implemented register */
+ u_int32_t *dpr = 0; /* RAM image of registers for group command
+ * usage */
+ int offset = mcp->offset % 0x800; /* group relative address
+ * offset, mcp->portnum is
+ * not used */
+ int portnum, ramread = 0;
+ volatile u_int32_t data;
+
+ /*
+ * Sanity check hardware accessibility. The 0x6000 portion handles port
+ * numbers associated with Msg Descr Tbl decoding.
+ */
+ portnum = (mcp->offset % 0x6000) / 0x800;
+ if (portnum >= ci->max_port)
+ return ENXIO;
+ pi = &ci->port[portnum];
+ if (mcp->offset >= 0x6000)
+ offset += 0x6000; /* put back in MsgCfgDesc address offset */
+ dph = (u_int32_t *) ((u_long) pi->reg + offset);
+
+ /* read of TX are from RAM image, since hardware returns FF */
+ dpr = (u_int32_t *) ((u_long) pi->regram + offset);
+ if (mcp->offset < 0x6000) /* non MsgDesc Tbl accesses might require
+ * RAM access */
+ {
+ if (offset >= 0x200 && offset < 0x380)
+ ramread = 1;
+ if (offset >= 0x10 && offset < 0x200)
+ ramread = 1;
+ }
+ /* read register from RAM or hardware, depending... */
+ if (ramread)
+ {
+ data = *dpr;
+ //pr_info("c4_musycc_rw: RAM addr %p read data %x (portno %x offset %x RAM ramread %x)\n", dpr, data, portnum, offset, ramread); /* RLD DEBUG */
+ } else
+ {
+ data = pci_read_32 ((u_int32_t *) dph);
+ //pr_info("c4_musycc_rw: REG addr %p read data %x (portno %x offset %x RAM ramread %x)\n", dph, data, portnum, offset, ramread); /* RLD DEBUG */
+ }
+
+
+ if (mcp->RWportnum & 0x80)
+ { /* control says this is a register
+ * _write_ */
+ if (mcp->value == data)
+ pr_info("%s: musycc grp%d already that value! writing again anyhow.\n",
+ ci->devname, (mcp->RWportnum & 0x7));
+ /* write register RAM */
+ if (ramread)
+ *dpr = mcp->value;
+ /* write hardware register */
+ pci_write_32 ((u_int32_t *) dph, mcp->value);
+ }
+ mcp->value = data; /* return the read value (or the 'old
+ * value', if is write) */
+ return 0;
+}
+
+status_t
+c4_get_port (ci_t * ci, int portnum)
+{
+ if (portnum >= ci->max_port) /* sanity check */
+ return ENXIO;
+
+ SD_SEM_TAKE (&ci->sem_wdbusy, "_wd_"); /* only 1 thru here, per
+ * board */
+ checkPorts (ci);
+ ci->port[portnum].p.portStatus = (u_int8_t) ci->alarmed[portnum];
+ ci->alarmed[portnum] &= 0xdf;
+ SD_SEM_GIVE (&ci->sem_wdbusy); /* release per-board hold */
+ return 0;
+}
+
+status_t
+c4_set_port (ci_t * ci, int portnum)
+{
+ mpi_t *pi;
+ struct sbecom_port_param *pp;
+ int e1mode;
+ u_int8_t clck;
+ int i;
+
+ if (portnum >= ci->max_port) /* sanity check */
+ return ENXIO;
+
+ pi = &ci->port[portnum];
+ pp = &ci->port[portnum].p;
+ e1mode = IS_FRAME_ANY_E1 (pp->port_mode);
+ if (log_level >= LOG_MONITOR2)
+ {
+ pr_info("%s: c4_set_port[%d]: entered, e1mode = %x, openchans %d.\n",
+ ci->devname,
+ portnum, e1mode, pi->openchans);
+ }
+ if (pi->openchans)
+ return EBUSY; /* group needs initialization only for
+ * first channel of a group */
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)
+ {
+ status_t ret;
+
+ if ((ret = c4_wq_port_init (pi))) /* create/init
+ * workqueue_struct */
+ return (ret);
+ }
+#endif
+
+ init_comet (ci, pi->cometbase, pp->port_mode, 1 /* clockmaster == true */ , pp->portP);
+ clck = pci_read_32 ((u_int32_t *) &ci->cpldbase->mclk) & PMCC4_CPLD_MCLK_MASK;
+ if (e1mode)
+ clck |= 1 << portnum;
+ else
+ clck &= 0xf ^ (1 << portnum);
+
+ pci_write_32 ((u_int32_t *) &ci->cpldbase->mclk, clck);
+ pci_write_32 ((u_int32_t *) &ci->cpldbase->mcsr, PMCC4_CPLD_MCSR_IND);
+ pci_write_32 ((u_int32_t *) &pi->reg->gbp, OS_vtophys (pi->regram));
+
+ /*********************************************************************/
+ /* ERRATA: If transparent mode is used, do not set OOFMP_DISABLE bit */
+ /*********************************************************************/
+
+ pi->regram->grcd =
+ __constant_cpu_to_le32 (MUSYCC_GRCD_RX_ENABLE |
+ MUSYCC_GRCD_TX_ENABLE |
+ MUSYCC_GRCD_OOFMP_DISABLE |
+ MUSYCC_GRCD_SF_ALIGN | /* per MUSYCC ERRATA,
+ * for T1 * fix */
+ MUSYCC_GRCD_COFAIRQ_DISABLE |
+ MUSYCC_GRCD_MC_ENABLE |
+ (MUSYCC_GRCD_POLLTH_32 << MUSYCC_GRCD_POLLTH_SHIFT));
+
+ pi->regram->pcd =
+ __constant_cpu_to_le32 ((e1mode ? 1 : 0) |
+ MUSYCC_PCD_TXSYNC_RISING |
+ MUSYCC_PCD_RXSYNC_RISING |
+ MUSYCC_PCD_RXDATA_RISING);
+
+ /* Message length descriptor */
+ pi->regram->mld = __constant_cpu_to_le32 (max_mru | (max_mru << 16));
+
+ /* tsm algorithm */
+ for (i = 0; i < 32; i++)
+ {
+
+ /*** ASSIGNMENT NOTES: ***/
+ /*** Group's channel ZERO unavailable if E1. ***/
+ /*** Group's channel 16 unavailable if E1 CAS. ***/
+ /*** Group's channels 24-31 unavailable if T1. ***/
+
+ if (((i == 0) && e1mode) ||
+ ((i == 16) && ((pp->port_mode == CFG_FRAME_E1CRC_CAS) || (pp->port_mode == CFG_FRAME_E1CRC_CAS_AMI)))
+ || ((i > 23) && (!e1mode)))
+ {
+ pi->tsm[i] = 0xff; /* make tslot unavailable for this mode */
+ } else
+ {
+ pi->tsm[i] = 0x00; /* make tslot available for assignment */
+ }
+ }
+ for (i = 0; i < MUSYCC_NCHANS; i++)
+ {
+ pi->regram->ttsm[i] = 0;
+ pi->regram->rtsm[i] = 0;
+ }
+ FLUSH_MEM_WRITE ();
+ musycc_serv_req (pi, SR_GROUP_INIT | SR_RX_DIRECTION);
+ musycc_serv_req (pi, SR_GROUP_INIT | SR_TX_DIRECTION);
+
+ musycc_init_mdt (pi);
+
+ pi->group_is_set = 1;
+ pi->p = *pp;
+ return 0;
+}
+
+
+unsigned int max_int = 0;
+
+status_t
+c4_new_chan (ci_t * ci, int portnum, int channum, void *user)
+{
+ mpi_t *pi;
+ mch_t *ch;
+ int gchan;
+
+ if (c4_find_chan (channum)) /* a new channel shouldn't already exist */
+ return EEXIST;
+
+ if (portnum >= ci->max_port) /* sanity check */
+ return ENXIO;
+
+ pi = &(ci->port[portnum]);
+ /* find any available channel within this port */
+ for (gchan = 0; gchan < MUSYCC_NCHANS; gchan++)
+ {
+ ch = pi->chan[gchan];
+ if (ch && ch->state == UNASSIGNED) /* no assignment is good! */
+ break;
+ }
+ if (gchan == MUSYCC_NCHANS) /* exhausted table, all were assigned */
+ return ENFILE;
+
+ ch->up = pi;
+
+ /* NOTE: mch_t already cleared during OS_kmalloc() */
+ ch->state = DOWN;
+ ch->user = user;
+ ch->gchan = gchan;
+ ch->channum = channum; /* mark our channel assignment */
+ ch->p.channum = channum;
+#if 1
+ ch->p.card = ci->brdno;
+ ch->p.port = portnum;
+#endif
+ ch->p.chan_mode = CFG_CH_PROTO_HDLC_FCS16;
+ ch->p.idlecode = CFG_CH_FLAG_7E;
+ ch->p.pad_fill_count = 2;
+ spin_lock_init (&ch->ch_rxlock);
+ spin_lock_init (&ch->ch_txlock);
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)
+ {
+ status_t ret;
+
+ if ((ret = c4_wk_chan_init (pi, ch)))
+ return ret;
+ }
+#endif
+
+ /* save off interface assignments which bound a board */
+ if (ci->first_if == 0) /* first channel registered is assumed to
+ * be the lowest channel */
+ {
+ ci->first_if = ci->last_if = user;
+ ci->first_channum = ci->last_channum = channum;
+ } else
+ {
+ ci->last_if = user;
+ if (ci->last_channum < channum) /* higher number channel found */
+ ci->last_channum = channum;
+ }
+ return 0;
+}
+
+status_t
+c4_del_chan (int channum)
+{
+ mch_t *ch;
+
+ if (!(ch = c4_find_chan (channum)))
+ return ENOENT;
+ if (ch->state == UP)
+ musycc_chan_down ((ci_t *) 0, channum);
+ ch->state = UNASSIGNED;
+ ch->gchan = (-1);
+ ch->channum = (-1);
+ ch->p.channum = (-1);
+ return 0;
+}
+
+status_t
+c4_del_chan_stats (int channum)
+{
+ mch_t *ch;
+
+ if (!(ch = c4_find_chan (channum)))
+ return ENOENT;
+
+ memset (&ch->s, 0, sizeof (struct sbecom_chan_stats));
+ return 0;
+}
+
+
+status_t
+c4_set_chan (int channum, struct sbecom_chan_param * p)
+{
+ mch_t *ch;
+ int i, x = 0;
+
+ if (!(ch = c4_find_chan (channum)))
+ return ENOENT;
+
+#if 1
+ if (ch->p.card != p->card ||
+ ch->p.port != p->port ||
+ ch->p.channum != p->channum)
+ return EINVAL;
+#endif
+
+ if (!(ch->up->group_is_set))
+ {
+ return EIO; /* out of order, SET_PORT command
+ * required prior to first group's
+ * SET_CHAN command */
+ }
+ /*
+ * Check for change of parameter settings in order to invoke closing of
+ * channel prior to hardware poking.
+ */
+
+ if (ch->p.status != p->status || ch->p.chan_mode != p->chan_mode ||
+ ch->p.data_inv != p->data_inv || ch->p.intr_mask != p->intr_mask ||
+ ch->txd_free < ch->txd_num) /* to clear out queued messages */
+ x = 1; /* we have a change requested */
+ for (i = 0; i < 32; i++) /* check for timeslot mapping changes */
+ if (ch->p.bitmask[i] != p->bitmask[i])
+ x = 1; /* we have a change requested */
+ ch->p = *p;
+ if (x && (ch->state == UP)) /* if change request and channel is
+ * open... */
+ {
+ status_t ret;
+
+ if ((ret = musycc_chan_down ((ci_t *) 0, channum)))
+ return ret;
+ if ((ret = c4_chan_up (ch->up->up, channum)))
+ return ret;
+ sd_enable_xmit (ch->user); /* re-enable to catch flow controlled
+ * channel */
+ }
+ return 0;
+}
+
+
+status_t
+c4_get_chan (int channum, struct sbecom_chan_param * p)
+{
+ mch_t *ch;
+
+ if (!(ch = c4_find_chan (channum)))
+ return ENOENT;
+ *p = ch->p;
+ return 0;
+}
+
+status_t
+c4_get_chan_stats (int channum, struct sbecom_chan_stats * p)
+{
+ mch_t *ch;
+
+ if (!(ch = c4_find_chan (channum)))
+ return ENOENT;
+ *p = ch->s;
+ p->tx_pending = atomic_read (&ch->tx_pending);
+ return 0;
+}
+
+STATIC int
+c4_fifo_alloc (mpi_t * pi, int chan, int *len)
+{
+ int i, l = 0, start = 0, max = 0, maxstart = 0;
+
+ for (i = 0; i < 32; i++)
+ {
+ if (pi->fifomap[i] != -1)
+ {
+ l = 0;
+ start = i + 1;
+ continue;
+ }
+ ++l;
+ if (l > max)
+ {
+ max = l;
+ maxstart = start;
+ }
+ if (max == *len)
+ break;
+ }
+ if (max != *len)
+ {
+ if (log_level >= LOG_WARN)
+ pr_info("%s: wanted to allocate %d fifo space, but got only %d\n",
+ pi->up->devname, *len, max);
+ *len = max;
+ }
+ if (log_level >= LOG_DEBUG)
+ pr_info("%s: allocated %d fifo at %d for channel %d/%d\n",
+ pi->up->devname, max, start, chan, pi->p.portnum);
+ for (i = maxstart; i < (maxstart + max); i++)
+ pi->fifomap[i] = chan;
+ return start;
+}
+
+void
+c4_fifo_free (mpi_t * pi, int chan)
+{
+ int i;
+
+ if (log_level >= LOG_DEBUG)
+ pr_info("%s: deallocated fifo for channel %d/%d\n",
+ pi->up->devname, chan, pi->p.portnum);
+ for (i = 0; i < 32; i++)
+ if (pi->fifomap[i] == chan)
+ pi->fifomap[i] = -1;
+}
+
+
+status_t
+c4_chan_up (ci_t * ci, int channum)
+{
+ mpi_t *pi;
+ mch_t *ch;
+ struct mbuf *m;
+ struct mdesc *md;
+ int nts, nbuf, txnum, rxnum;
+ int addr, i, j, gchan;
+ u_int32_t tmp; /* for optimizing conversion across BE
+ * platform */
+
+ if (!(ch = c4_find_chan (channum)))
+ return ENOENT;
+ if (ch->state == UP)
+ {
+ if (log_level >= LOG_MONITOR)
+ pr_info("%s: channel already UP, graceful early exit\n",
+ ci->devname);
+ return 0;
+ }
+ pi = ch->up;
+ gchan = ch->gchan;
+ /* find nts ('number of timeslots') */
+ nts = 0;
+ for (i = 0; i < 32; i++)
+ {
+ if (ch->p.bitmask[i] & pi->tsm[i])
+ {
+ if (1 || log_level >= LOG_WARN)
+ {
+ pr_info("%s: c4_chan_up[%d] EINVAL (attempt to cfg in-use or unavailable TimeSlot[%d])\n",
+ ci->devname, channum, i);
+ pr_info("+ ask4 %x, currently %x\n",
+ ch->p.bitmask[i], pi->tsm[i]);
+ }
+ return EINVAL;
+ }
+ for (j = 0; j < 8; j++)
+ if (ch->p.bitmask[i] & (1 << j))
+ nts++;
+ }
+
+ nbuf = nts / 8 ? nts / 8 : 1;
+ if (!nbuf)
+ {
+ /* if( log_level >= LOG_WARN) */
+ pr_info("%s: c4_chan_up[%d] ENOBUFS (no TimeSlots assigned)\n",
+ ci->devname, channum);
+ return ENOBUFS; /* this should not happen */
+ }
+ addr = c4_fifo_alloc (pi, gchan, &nbuf);
+ ch->state = UP;
+
+ /* Setup the Time Slot Map */
+ musycc_update_timeslots (pi);
+
+ /* ch->tx_limit = nts; */
+ ch->s.tx_pending = 0;
+
+ /* Set Channel Configuration Descriptors */
+ {
+ u_int32_t ccd;
+
+ ccd = musycc_chan_proto (ch->p.chan_mode) << MUSYCC_CCD_PROTO_SHIFT;
+ if ((ch->p.chan_mode == CFG_CH_PROTO_ISLP_MODE) ||
+ (ch->p.chan_mode == CFG_CH_PROTO_TRANS))
+ {
+ ccd |= MUSYCC_CCD_FCS_XFER; /* Non FSC Mode */
+ }
+ ccd |= 2 << MUSYCC_CCD_MAX_LENGTH; /* Select second MTU */
+ ccd |= ch->p.intr_mask;
+ ccd |= addr << MUSYCC_CCD_BUFFER_LOC;
+ if (ch->p.chan_mode == CFG_CH_PROTO_TRANS)
+ ccd |= (nbuf) << MUSYCC_CCD_BUFFER_LENGTH;
+ else
+ ccd |= (nbuf - 1) << MUSYCC_CCD_BUFFER_LENGTH;
+
+ if (ch->p.data_inv & CFG_CH_DINV_TX)
+ ccd |= MUSYCC_CCD_INVERT_DATA; /* Invert data */
+ pi->regram->tcct[gchan] = cpu_to_le32 (ccd);
+
+ if (ch->p.data_inv & CFG_CH_DINV_RX)
+ ccd |= MUSYCC_CCD_INVERT_DATA; /* Invert data */
+ else
+ ccd &= ~MUSYCC_CCD_INVERT_DATA; /* take away data inversion */
+ pi->regram->rcct[gchan] = cpu_to_le32 (ccd);
+ FLUSH_MEM_WRITE ();
+ }
+
+ /* Reread the Channel Configuration Descriptor for this channel */
+ musycc_serv_req (pi, SR_CHANNEL_CONFIG | SR_RX_DIRECTION | gchan);
+ musycc_serv_req (pi, SR_CHANNEL_CONFIG | SR_TX_DIRECTION | gchan);
+
+ /*
+ * Figure out how many buffers we want. If the customer has changed from
+ * the defaults, then use the changed values. Otherwise, use Transparent
+ * mode's specific minimum default settings.
+ */
+ if (ch->p.chan_mode == CFG_CH_PROTO_TRANS)
+ {
+ if (max_rxdesc_used == max_rxdesc_default) /* use default setting */
+ max_rxdesc_used = MUSYCC_RXDESC_TRANS;
+ if (max_txdesc_used == max_txdesc_default) /* use default setting */
+ max_txdesc_used = MUSYCC_TXDESC_TRANS;
+ }
+ /*
+ * Increase counts when hyperchanneling, since this implies an increase
+ * in throughput per channel
+ */
+ rxnum = max_rxdesc_used + (nts / 4);
+ txnum = max_txdesc_used + (nts / 4);
+
+#if 0
+ /* DEBUG INFO */
+ if (log_level >= LOG_MONITOR)
+ pr_info("%s: mode %x rxnum %d (rxused %d def %d) txnum %d (txused %d def %d)\n",
+ ci->devname, ch->p.chan_mode,
+ rxnum, max_rxdesc_used, max_rxdesc_default,
+ txnum, max_txdesc_used, max_txdesc_default);
+#endif
+
+ ch->rxd_num = rxnum;
+ ch->txd_num = txnum;
+ ch->rxix_irq_srv = 0;
+
+ ch->mdr = OS_kmalloc (sizeof (struct mdesc) * rxnum);
+ ch->mdt = OS_kmalloc (sizeof (struct mdesc) * txnum);
+ if (ch->p.chan_mode == CFG_CH_PROTO_TRANS)
+ tmp = __constant_cpu_to_le32 (max_mru | EOBIRQ_ENABLE);
+ else
+ tmp = __constant_cpu_to_le32 (max_mru);
+
+ for (i = 0, md = ch->mdr; i < rxnum; i++, md++)
+ {
+ if (i == (rxnum - 1))
+ {
+ md->snext = &ch->mdr[0];/* wrapness */
+ } else
+ {
+ md->snext = &ch->mdr[i + 1];
+ }
+ md->next = cpu_to_le32 (OS_vtophys (md->snext));
+
+ if (!(m = OS_mem_token_alloc (max_mru)))
+ {
+ if (log_level >= LOG_MONITOR)
+ pr_info("%s: c4_chan_up[%d] - token alloc failure, size = %d.\n",
+ ci->devname, channum, max_mru);
+ goto errfree;
+ }
+ md->mem_token = m;
+ md->data = cpu_to_le32 (OS_vtophys (OS_mem_token_data (m)));
+ md->status = tmp | MUSYCC_RX_OWNED; /* MUSYCC owns RX descriptor **
+ * CODING NOTE:
+ * MUSYCC_RX_OWNED = 0 so no
+ * need to byteSwap */
+ }
+
+ for (i = 0, md = ch->mdt; i < txnum; i++, md++)
+ {
+ md->status = HOST_TX_OWNED; /* Host owns TX descriptor ** CODING
+ * NOTE: HOST_TX_OWNED = 0 so no need to
+ * byteSwap */
+ md->mem_token = 0;
+ md->data = 0;
+ if (i == (txnum - 1))
+ {
+ md->snext = &ch->mdt[0];/* wrapness */
+ } else
+ {
+ md->snext = &ch->mdt[i + 1];
+ }
+ md->next = cpu_to_le32 (OS_vtophys (md->snext));
+ }
+ ch->txd_irq_srv = ch->txd_usr_add = &ch->mdt[0];
+ ch->txd_free = txnum;
+ ch->tx_full = 0;
+ ch->txd_required = 0;
+
+ /* Configure it into the chip */
+ tmp = cpu_to_le32 (OS_vtophys (&ch->mdt[0]));
+ pi->regram->thp[gchan] = tmp;
+ pi->regram->tmp[gchan] = tmp;
+
+ tmp = cpu_to_le32 (OS_vtophys (&ch->mdr[0]));
+ pi->regram->rhp[gchan] = tmp;
+ pi->regram->rmp[gchan] = tmp;
+
+ /* Activate the Channel */
+ FLUSH_MEM_WRITE ();
+ if (ch->p.status & RX_ENABLED)
+ {
+#ifdef RLD_TRANS_DEBUG
+ pr_info("++ c4_chan_up() CHAN RX ACTIVATE: chan %d\n", ch->channum);
+#endif
+ ch->ch_start_rx = 0; /* we are restarting RX... */
+ musycc_serv_req (pi, SR_CHANNEL_ACTIVATE | SR_RX_DIRECTION | gchan);
+ }
+ if (ch->p.status & TX_ENABLED)
+ {
+#ifdef RLD_TRANS_DEBUG
+ pr_info("++ c4_chan_up() CHAN TX ACTIVATE: chan %d <delayed>\n", ch->channum);
+#endif
+ ch->ch_start_tx = CH_START_TX_1ST; /* we are delaying start
+ * until receipt from user of
+ * first packet to transmit. */
+ }
+ ch->status = ch->p.status;
+ pi->openchans++;
+ return 0;
+
+errfree:
+ while (i > 0)
+ {
+ /* Don't leak all the previously allocated mbufs in this loop */
+ i--;
+ OS_mem_token_free (ch->mdr[i].mem_token);
+ }
+ OS_kfree (ch->mdt);
+ ch->mdt = 0;
+ ch->txd_num = 0;
+ OS_kfree (ch->mdr);
+ ch->mdr = 0;
+ ch->rxd_num = 0;
+ ch->state = DOWN;
+ return ENOBUFS;
+}
+
+/* stop the hardware from servicing & interrupting */
+
+void
+c4_stopwd (ci_t * ci)
+{
+ OS_stop_watchdog (&ci->wd);
+ SD_SEM_TAKE (&ci->sem_wdbusy, "_stop_"); /* ensure WD not running */
+ SD_SEM_GIVE (&ci->sem_wdbusy);
+}
+
+
+void
+sbecom_get_brdinfo (ci_t * ci, struct sbe_brd_info * bip, u_int8_t *bsn)
+{
+ char *np;
+ u_int32_t sn = 0;
+ int i;
+
+ bip->brdno = ci->brdno; /* our board number */
+ bip->brd_id = ci->brd_id;
+ bip->brd_hdw_id = ci->hdw_bid;
+ bip->brd_chan_cnt = MUSYCC_NCHANS * ci->max_port; /* number of channels
+ * being used */
+ bip->brd_port_cnt = ci->max_port; /* number of ports being used */
+ bip->brd_pci_speed = BINFO_PCI_SPEED_unk; /* PCI speed not yet
+ * determinable */
+
+ if (ci->first_if)
+ {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+ np = (char *) hdlc_to_name (ci->first_if);
+#else
+ {
+ struct net_device *dev;
+
+ dev = (struct net_device *) ci->first_if;
+ np = (char *) dev->name;
+ }
+#endif
+ strncpy (bip->first_iname, np, CHNM_STRLEN - 1);
+ } else
+ strcpy (bip->first_iname, "<NULL>");
+ if (ci->last_if)
+ {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+ np = (char *) hdlc_to_name (ci->last_if);
+#else
+ {
+ struct net_device *dev;
+
+ dev = (struct net_device *) ci->last_if;
+ np = (char *) dev->name;
+ }
+#endif
+ strncpy (bip->last_iname, np, CHNM_STRLEN - 1);
+ } else
+ strcpy (bip->last_iname, "<NULL>");
+
+ if (bsn)
+ {
+ for (i = 0; i < 3; i++)
+ {
+ bip->brd_mac_addr[i] = *bsn++;
+ }
+ for (; i < 6; i++)
+ {
+ bip->brd_mac_addr[i] = *bsn;
+ sn = (sn << 8) | *bsn++;
+ }
+ } else
+ {
+ for (i = 0; i < 6; i++)
+ bip->brd_mac_addr[i] = 0;
+ }
+ bip->brd_sn = sn;
+}
+
+
+status_t
+c4_get_iidinfo (ci_t * ci, struct sbe_iid_info * iip)
+{
+ struct net_device *dev;
+ char *np;
+
+ if (!(dev = getuserbychan (iip->channum)))
+ return ENOENT;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+ np = (char *) hdlc_to_name (dev_to_hdlc (dev));
+#else
+ np = dev->name;
+#endif
+ strncpy (iip->iname, np, CHNM_STRLEN - 1);
+ return 0;
+}
+
+
+#ifdef CONFIG_SBE_PMCC4_NCOMM
+void (*nciInterrupt[MAX_BOARDS][4]) (void);
+extern void wanpmcC4T1E1_hookInterrupt (int cardID, int deviceID, void *handler);
+
+void
+wanpmcC4T1E1_hookInterrupt (int cardID, int deviceID, void *handler)
+{
+ if (cardID < MAX_BOARDS) /* sanity check */
+ nciInterrupt[cardID][deviceID] = handler;
+}
+
+irqreturn_t
+c4_ebus_intr_th_handler (void *devp)
+{
+ ci_t *ci = (ci_t *) devp;
+ volatile u_int32_t ists;
+ int handled = 0;
+ int brdno;
+
+ /* which COMET caused the interrupt */
+ brdno = ci->brdno;
+ ists = pci_read_32 ((u_int32_t *) &ci->cpldbase->intr);
+ if (ists & PMCC4_CPLD_INTR_CMT_1)
+ {
+ handled = 0x1;
+ if (nciInterrupt[brdno][0] != NULL)
+ (*nciInterrupt[brdno][0]) ();
+ }
+ if (ists & PMCC4_CPLD_INTR_CMT_2)
+ {
+ handled |= 0x2;
+ if (nciInterrupt[brdno][1] != NULL)
+ (*nciInterrupt[brdno][1]) ();
+ }
+ if (ists & PMCC4_CPLD_INTR_CMT_3)
+ {
+ handled |= 0x4;
+ if (nciInterrupt[brdno][2] != NULL)
+ (*nciInterrupt[brdno][2]) ();
+ }
+ if (ists & PMCC4_CPLD_INTR_CMT_4)
+ {
+ handled |= 0x8;
+ if (nciInterrupt[brdno][3] != NULL)
+ (*nciInterrupt[brdno][3]) ();
+ }
+#if 0
+ /*** Test code just de-implements the asserted interrupt. Alternate
+ vendor will supply COMET interrupt handling code herein or such.
+ ***/
+ pci_write_32 ((u_int32_t *) &ci->reg->glcd, GCD_MAGIC | MUSYCC_GCD_INTB_DISABLE);
+#endif
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,20)
+ return;
+#else
+ return IRQ_RETVAL (handled);
+#endif
+}
+
+
+unsigned long
+wanpmcC4T1E1_getBaseAddress (int cardID, int deviceID)
+{
+ ci_t *ci;
+ unsigned long base = 0;
+
+ ci = c4_list;
+ while (ci)
+ {
+ if (ci->brdno == cardID) /* found valid device */
+ {
+ if (deviceID < ci->max_port) /* comet is supported */
+ base = ((unsigned long) ci->port[deviceID].cometbase);
+ break;
+ }
+ ci = ci->next; /* next board, if any */
+ }
+ return (base);
+}
+
+#endif /*** CONFIG_SBE_PMCC4_NCOMM ***/
+
+
+/*** End-of-File ***/
diff --git a/drivers/staging/cxt1e1/pmcc4_ioctls.h b/drivers/staging/cxt1e1/pmcc4_ioctls.h
new file mode 100644
index 00000000000..6b8d65673c7
--- /dev/null
+++ b/drivers/staging/cxt1e1/pmcc4_ioctls.h
@@ -0,0 +1,81 @@
+/* RCSid: $Header: /home/rickd/projects/pmcc4/include/pmcc4_ioctls.h,v 2.0 2005/09/28 00:10:09 rickd PMCC4_3_1B $
+ */
+
+#ifndef _INC_PMCC4_IOCTLS_H_
+#define _INC_PMCC4_IOCTLS_H_
+
+/*-----------------------------------------------------------------------------
+ * pmcc4_ioctls.h -
+ *
+ * Copyright (C) 2005 SBE, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * For further information, contact via email: support@sbei.com
+ * SBE, Inc. San Ramon, California U.S.A.
+ *-----------------------------------------------------------------------------
+ * RCS info:
+ * RCS revision: $Revision: 2.0 $
+ * Last changed on $Date: 2005/09/28 00:10:09 $
+ * Changed by $Author: rickd $
+ *-----------------------------------------------------------------------------
+ * $Log: pmcc4_ioctls.h,v $
+ * Revision 2.0 2005/09/28 00:10:09 rickd
+ * Add GNU license info. Switch Ioctls to sbe_ioc.h usage.
+ *
+ * Revision 1.2 2005/04/28 23:43:03 rickd
+ * Add RCS tracking heading.
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+#include "sbew_ioc.h"
+
+enum
+{
+ // C4_GET_PORT = 0,
+ // C4_SET_PORT,
+ // C4_GET_CHAN,
+ // C4_SET_CHAN,
+ C4_DEL_CHAN = 0,
+ // C4_CREATE_CHAN,
+ // C4_GET_CHAN_STATS,
+ // C4_RESET,
+ // C4_DEBUG,
+ C4_RESET_STATS,
+ C4_LOOP_PORT,
+ C4_RW_FRMR,
+ C4_RW_MSYC,
+ C4_RW_PLD
+};
+
+#define C4_GET_PORT SBE_IOC_PORT_GET
+#define C4_SET_PORT SBE_IOC_PORT_SET
+#define C4_GET_CHAN SBE_IOC_CHAN_GET
+#define C4_SET_CHAN SBE_IOC_CHAN_SET
+// #define C4_DEL_CHAN XXX
+#define C4_CREATE_CHAN SBE_IOC_CHAN_NEW
+#define C4_GET_CHAN_STATS SBE_IOC_CHAN_GET_STAT
+#define C4_RESET SBE_IOC_RESET_DEV
+#define C4_DEBUG SBE_IOC_LOGLEVEL
+// #define C4_RESET_STATS XXX
+// #define C4_LOOP_PORT XXX
+// #define C4_RW_FRMR XXX
+// #define C4_RW_MSYC XXX
+// #define C4_RW_PLD XXX
+
+struct c4_chan_stats_wrap
+{
+ int channum;
+ struct sbecom_chan_stats stats;
+};
+
+#endif /* _INC_PMCC4_IOCTLS_H_ */
diff --git a/drivers/staging/cxt1e1/pmcc4_private.h b/drivers/staging/cxt1e1/pmcc4_private.h
new file mode 100644
index 00000000000..b2b6e370263
--- /dev/null
+++ b/drivers/staging/cxt1e1/pmcc4_private.h
@@ -0,0 +1,296 @@
+#ifndef _INC_PMCC4_PRIVATE_H_
+#define _INC_PMCC4_PRIVATE_H_
+
+/*-----------------------------------------------------------------------------
+ * pmcc4_private.h -
+ *
+ * Copyright (C) 2005 SBE, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h> /* support for tasklets */
+#include <linux/timer.h> /* support for timer */
+#include <linux/workqueue.h>
+#include <linux/hdlc.h>
+
+#include "libsbew.h"
+#include "pmcc4_defs.h"
+#include "pmcc4_cpld.h"
+#include "musycc.h"
+#include "sbe_promformat.h"
+#include "comet.h"
+
+
+/* driver state */
+#define SBE_DRVR_INIT 0x0
+#define SBE_DRVR_AVAILABLE 0x69734F4E
+#define SBE_DRVR_DOWN 0x1
+
+/******************************************************************************
+ * MUSYCC Message Descriptor - coupled to hardware implementation, the first
+ * three u_int32 must not be reordered.
+ */
+
+struct mdesc
+{
+ volatile u_int32_t status; /* Buffer Descriptor */
+ u_int32_t data; /* Data Pointer */
+ u_int32_t next; /* MUSYCC view of Next Pointer */
+ void *mem_token; /* Data */
+ struct mdesc *snext;
+};
+
+
+/*************************************************************************
+ * Private driver data structures, internal use only.
+ */
+
+struct c4_chan_info
+{
+ int gchan; /* channel number within group/port 0-31 */
+ int channum; /* absolute channel number 0-128 */
+ u_int8_t status;
+#define TX_RECOVERY_MASK 0x0f
+#define TX_ONR_RECOVERY 0x01
+#define TX_BUFF_RECOVERY 0x02
+#define RX_RECOVERY_MASK 0xf0
+#define RX_ONR_RECOVERY 0x10
+
+ unsigned char ch_start_rx;
+#define CH_START_RX_NOW 1
+#define CH_START_RX_ONR 2
+#define CH_START_RX_BUF 3
+
+ unsigned char ch_start_tx;
+#define CH_START_TX_1ST 1
+#define CH_START_TX_ONR 2
+#define CH_START_TX_BUF 3
+
+ char tx_full; /* boolean */
+ short txd_free; /* count of TX Desc available */
+ short txd_required; /* count of TX Desc needed by mesg */
+ unsigned short rxd_num; /* must support range up to 2000 */
+ unsigned short txd_num; /* must support range up to 1000 */
+ int rxix_irq_srv;
+
+ enum
+ {
+ UNASSIGNED, /* AVAILABLE, NOTINUSE */
+ DOWN, /* ASSIGNED, NOTINUSE */
+ UP /* ASSIGNED and INUSE */
+ } state;
+
+ struct c4_port_info *up;
+ void *user;
+
+ struct work_struct ch_work;
+ struct mdesc *mdt;
+ struct mdesc *mdr;
+ struct mdesc *txd_irq_srv;
+ struct mdesc *txd_usr_add;
+
+#if 0
+ /*
+ * FUTURE CODE MIGHT SEPARATE TIMESLOT MAP SETUPS INTO SINGLE IOCTL and
+ * REMOVE MAPS FROM CHANNEL PARAMETER STRUCTURE
+ */
+ /*
+ * each byte in bitmask below represents one timeslot (bitmask[0] is for
+ * timeslot 0 and so on), each bit in the byte selects timeslot bits for
+ * this channel (0xff - whole timeslot, 0x7f - 56kbps mode)
+ */
+
+ u_int8_t ts_bitmask[32];
+#endif
+ spinlock_t ch_rxlock;
+ spinlock_t ch_txlock;
+ atomic_t tx_pending;
+
+ struct sbecom_chan_stats s;
+ struct sbecom_chan_param p;
+};
+typedef struct c4_chan_info mch_t;
+
+struct c4_port_info
+{
+
+ struct musycc_globalr *reg;
+ struct musycc_groupr *regram;
+ void *regram_saved; /* Original malloc value may have non-2KB
+ * boundary. Need to save for use when
+ * freeing. */
+ comet_t *cometbase;
+ struct sbe_card_info *up;
+
+ /*
+ * The workqueue is used for TX restart of ONR'd channels when in
+ * Transparent mode.
+ */
+
+ struct workqueue_struct *wq_port; /* chan restart work queue */
+ struct semaphore sr_sem_busy; /* service request exclusion
+ * semaphore */
+ struct semaphore sr_sem_wait; /* service request handshake
+ * semaphore */
+ u_int32_t sr_last;
+ short openchans;
+ char portnum;
+ char group_is_set; /* GROUP_INIT command issued to MUSYCC,
+ * otherwise SET_CHAN Ioctl fails */
+
+ mch_t *chan[MUSYCC_NCHANS];
+ struct sbecom_port_param p;
+
+ /*
+ * The MUSYCC timeslot mappings are maintained within the driver and are
+ * modified and reloaded as each of a group's channels are configured.
+ */
+ u_int8_t tsm[32]; /* tsm (time slot map) */
+ int fifomap[32];
+};
+typedef struct c4_port_info mpi_t;
+
+
+#define COMET_OFFSET(x) (0x80000+(x)*0x10000)
+#define EEPROM_OFFSET 0xC0000
+#define ISPLD_OFFSET 0xD0000
+
+/* iSPLD control chip registers */
+#define ISPLD_MCSR 0x0
+#define ISPLD_MCLK 0x1
+#define ISPLD_LEDS 0x2
+#define ISPLD_INTR 0x3
+#define ISPLD_MAX 0x3
+
+struct sbe_card_info
+{
+ struct musycc_globalr *reg;
+ struct musycc_groupr *regram;
+ u_int32_t *iqd_p; /* pointer to dword aligned interrupt queue
+ * descriptors */
+ void *iqd_p_saved; /* Original malloc value may have non-dword
+ * aligned boundary. Need to save for use
+ * when freeing. */
+ unsigned int iqp_headx, iqp_tailx;
+
+ struct semaphore sem_wdbusy;/* watchdog exclusion semaphore */
+ struct watchdog wd; /* statically allocated watchdog structure */
+ atomic_t bh_pending; /* bh queued, but not yet running */
+ u_int32_t brd_id; /* unique PCI ID */
+ u_int16_t hdw_bid; /* on/board hardware ID */
+ unsigned short wdcount;
+ unsigned char max_port;
+ unsigned char brdno; /* our board number */
+ unsigned char wd_notify;
+#define WD_NOTIFY_1TX 1
+#define WD_NOTIFY_BUF 2
+#define WD_NOTIFY_ONR 4
+ enum /* state as regards interrupt processing */
+ {
+ C_INIT, /* of-board-address not configured or are in
+ * process of being removed, don't access
+ * hardware */
+ C_IDLE, /* off-board-addresses are configured, but
+ * don't service interrupts, just clear them
+ * from hardware */
+ C_RUNNING /* life is good, service away */
+ } state;
+
+ struct sbe_card_info *next;
+ u_int32_t *eeprombase; /* mapped address of board's EEPROM */
+ c4cpld_t *cpldbase; /* mapped address of board's CPLD hardware */
+ char *release; /* SBE ID string w/in sbeRelease.c */
+ void *hdw_info;
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *dir_dev;
+#endif
+
+ /* saved off interface assignments which bound a board */
+ hdlc_device *first_if;
+ hdlc_device *last_if;
+ short first_channum, last_channum;
+
+ struct intlog
+ {
+ u_int32_t this_status_new;
+ u_int32_t last_status_new;
+ u_int32_t drvr_intr_thcount;
+ u_int32_t drvr_intr_bhcount;
+ u_int32_t drvr_int_failure;
+ } intlog;
+
+ mpi_t port[MUSYCC_NPORTS];
+ char devname[SBE_IFACETMPL_SIZE + 1];
+ atomic_t tx_pending;
+ u_int32_t alarmed[4]; /* dpm211 */
+
+#if defined(SBE_ISR_TASKLET)
+ struct tasklet_struct ci_musycc_isr_tasklet;
+#elif defined(SBE_ISR_IMMEDIATE)
+ struct tq_struct ci_musycc_isr_tq;
+#endif
+};
+typedef struct sbe_card_info ci_t;
+
+struct s_hdw_info
+{
+ u_int8_t pci_busno;
+ u_int8_t pci_slot;
+ u_int8_t pci_pin[2];
+ u_int8_t revid[2];
+ u_int8_t mfg_info_sts;
+#define EEPROM_OK 0x00
+#define EEPROM_CRCERR 0x01
+ char promfmt; /* prom type, from sbe_promformat.h */
+
+ char devname[SBE_IFACETMPL_SIZE];
+ struct pci_bus *bus;
+ struct net_device *ndev;
+ struct pci_dev *pdev[2];
+
+ unsigned long addr[2];
+ unsigned long addr_mapped[2];
+ unsigned long len[2];
+
+ union
+ {
+ char data[128];
+ FLD_TYPE1 pft1; /* prom field, type #1 */
+ FLD_TYPE2 pft2; /* prom field, type #2 */
+ } mfg_info;
+};
+typedef struct s_hdw_info hdw_info_t;
+
+/*****************************************************************/
+
+struct c4_priv
+{
+ int channum;
+ struct sbe_card_info *ci;
+};
+
+
+/*****************************************************************/
+
+extern ci_t *c4_list;
+
+mch_t *c4_find_chan (int);
+int c4_set_chan (int channum, struct sbecom_chan_param *);
+int c4_get_chan (int channum, struct sbecom_chan_param *);
+int c4_get_chan_stats (int channum, struct sbecom_chan_stats *);
+
+#endif /* _INC_PMCC4_PRIVATE_H_ */
diff --git a/drivers/staging/cxt1e1/pmcc4_sysdep.h b/drivers/staging/cxt1e1/pmcc4_sysdep.h
new file mode 100644
index 00000000000..697f1943670
--- /dev/null
+++ b/drivers/staging/cxt1e1/pmcc4_sysdep.h
@@ -0,0 +1,62 @@
+#ifndef _INC_PMCC4_SYSDEP_H_
+#define _INC_PMCC4_SYSDEP_H_
+
+/*-----------------------------------------------------------------------------
+ * pmcc4_sysdep.h -
+ *
+ * Copyright (C) 2005 SBE, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* reduce multiple autoconf entries to a single definition */
+
+#ifdef CONFIG_SBE_PMCC4_HDLC_V7_MODULE
+#undef CONFIG_SBE_PMCC4_HDLC_V7
+#define CONFIG_SBE_PMCC4_HDLC_V7 1
+#endif
+
+#ifdef CONFIG_SBE_PMCC4_NCOMM_MODULE
+#undef CONFIG_SBE_PMCC4_NCOMM
+#define CONFIG_SBE_PMCC4_NCOMM 1
+#endif
+
+
+/* FLUSH MACROS - if using ioremap_nocache(), then these can be NOOPS,
+ * otherwise a memory barrier needs to be inserted.
+ */
+
+#define FLUSH_PCI_READ() rmb()
+#define FLUSH_PCI_WRITE() wmb()
+#define FLUSH_MEM_READ() rmb()
+#define FLUSH_MEM_WRITE() wmb()
+
+
+/*
+ * System dependent callbacks routines, not inlined...
+ * For inlined system dependent routines, see include/sbecom_inlinux_linux.h
+ */
+
+/*
+ * passes received memory token back to the system, <user> is parameter from
+ * sd_new_chan() used to create the channel which the data arrived on
+ */
+
+void sd_recv_consume(void *token, size_t len, void *user);
+
+void sd_disable_xmit (void *user);
+void sd_enable_xmit (void *user);
+int sd_line_is_ok (void *user);
+void sd_line_is_up (void *user);
+void sd_line_is_down (void *user);
+int sd_queue_stopped (void *user);
+
+#endif /*** _INC_PMCC4_SYSDEP_H_ ***/
diff --git a/drivers/staging/cxt1e1/sbe_bid.h b/drivers/staging/cxt1e1/sbe_bid.h
new file mode 100644
index 00000000000..1f49b4061fb
--- /dev/null
+++ b/drivers/staging/cxt1e1/sbe_bid.h
@@ -0,0 +1,61 @@
+/*
+ * $Id: sbe_bid.h,v 1.0 2005/09/28 00:10:09 rickd PMCC4_3_1B $
+ */
+
+#ifndef _INC_SBEBID_H_
+#define _INC_SBEBID_H_
+
+/*-----------------------------------------------------------------------------
+ * sbe_bid.h -
+ *
+ * Copyright (C) 2004-2005 SBE, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * For further information, contact via email: support@sbei.com
+ * SBE, Inc. San Ramon, California U.S.A.
+ *
+ *-----------------------------------------------------------------------------
+ * RCS info:
+ * RCS revision: $Revision: 1.0 $
+ * Last changed on $Date: 2005/09/28 00:10:09 $
+ * Changed by $Author: rickd $
+ *-----------------------------------------------------------------------------
+ * $Log: sbe_bid.h,v $
+ * Revision 1.0 2005/09/28 00:10:09 rickd
+ * Initial revision
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+#define SBE_BID_REG 0x00000000 /* Board ID Register */
+
+#define SBE_BID_256T3_E1 0x46 /* SBE wanPTMC-256T3 (E1 Version) */
+#define SBE_BID_256T3_T1 0x42 /* SBE wanPTMC-256T3 (T1 Version) */
+#define SBE_BID_2T3E3 0x43 /* SBE wanPMC-2T3E3 */
+#define SBE_BID_C1T3 0x45 /* SBE wanPMC-C1T3 */
+#define SBE_BID_C24TE1 0x47 /* SBE wanPTMC-C24TE1 */
+#define SBE_BID_C24TE1_RTM_24 0x48 /* C24TE1 RTM (24 Port) */
+#define SBE_BID_C24TE1_RTM_12 0x49 /* C24TE1 RTM (12 Port) */
+#define SBE_BID_C24TE1_RTM_12DSU 0x4A /* C24TE1 RTM (12 Port/DSU) */
+#define SBE_BID_C24TE1_RTM_T3 0x4B /* C24TE1 RTM (T3) */
+#define SBE_BID_C4T1E1 0x41 /* SBE wanPTMC-C4T1E1 */
+#define SBE_BID_HC4T1E1 0x44 /* SBE wanADAPT-HC4T1E1 */
+
+/* bogus temporary usage values */
+#define SBE_BID_PMC_C4T1E1 0xC4 /* SBE wanPMC-C4T1E1 (4 Port) */
+#define SBE_BID_PMC_C2T1E1 0xC2 /* SBE wanPMC-C2T1E1 (2 Port) */
+#define SBE_BID_PMC_C1T1E1 0xC1 /* SBE wanPMC-C1T1E1 (1 Port) */
+#define SBE_BID_PCI_C4T1E1 0x04 /* SBE wanPCI-C4T1E1 (4 Port) */
+#define SBE_BID_PCI_C2T1E1 0x02 /* SBE wanPCI-C2T1E1 (2 Port) */
+#define SBE_BID_PCI_C1T1E1 0x01 /* SBE wanPCI-C1T1E1 (1 Port) */
+
+#endif /*** _INC_SBEBID_H_ ***/
diff --git a/drivers/staging/cxt1e1/sbe_promformat.h b/drivers/staging/cxt1e1/sbe_promformat.h
new file mode 100644
index 00000000000..746f81b15c7
--- /dev/null
+++ b/drivers/staging/cxt1e1/sbe_promformat.h
@@ -0,0 +1,157 @@
+/*
+ * $Id: sbe_promformat.h,v 2.2 2005/09/28 00:10:09 rickd PMCC4_3_1B $
+ */
+
+#ifndef _INC_SBE_PROMFORMAT_H_
+#define _INC_SBE_PROMFORMAT_H_
+
+/*-----------------------------------------------------------------------------
+ * sbe_promformat.h - Contents of seeprom used by dvt and manufacturing tests
+ *
+ * Copyright (C) 2002-2005 SBE, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * For further information, contact via email: support@sbei.com
+ * SBE, Inc. San Ramon, California U.S.A.
+ *
+ *-----------------------------------------------------------------------------
+ * RCS info:
+ * RCS revision: $Revision: 2.2 $
+ * Last changed on $Date: 2005/09/28 00:10:09 $
+ * Changed by $Author: rickd $
+ *-----------------------------------------------------------------------------
+ * $Log: sbe_promformat.h,v $
+ * Revision 2.2 2005/09/28 00:10:09 rickd
+ * Add EEPROM sample from C4T1E1 board.
+ *
+ * Revision 2.1 2005/05/04 17:18:24 rickd
+ * Initial CI.
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+
+/***
+ * PMCC4 SAMPLE EEPROM IMAGE
+ *
+ * eeprom[00]: 01 11 76 07 01 00 a0 d6
+ * eeprom[08]: 22 34 56 3e 5b c1 1c 3e
+ * eeprom[16]: 5b e1 b6 00 00 00 01 00
+ * eeprom[24]: 00 08 46 d3 7b 5e a8 fb
+ * eeprom[32]: f7 ef df bf 7f 55 00 01
+ * eeprom[40]: 02 04 08 10 20 40 80 ff
+ * eeprom[48]: fe fd fb f7 ef df bf 7f
+ *
+ ***/
+
+
+/*------------------------------------------------------------------------
+ * Type 1 Format
+ * byte:
+ * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
+ * -------------------------------------------------------------------------
+ * 01 11 76 SS SS 00 0A D6 <SERIAL NUM> <Create TIME> <Heatrun TIME>
+ * SBE SUB SERIAL # (BCD) (time_t) (time_t)
+ * ID VENDOR (format) (format)
+ *
+ * 19 20 21 22 23 24 25 26
+ * Heat Run Heat Run
+ * Iterations Errors
+ *------------------------------------------------------------------------
+ *
+ *
+ *
+ * Type 2 Format - Added length, CRC in fixed position
+ * byte:
+ * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
+ * -------------------------------------------------------------------------
+ * 02 00 1A CC CC CC CC 11 76 07 03 00 0A D6 <SERIAL NUM>
+ * Payload SBE Crc32 SUB System System SERIAL/MAC
+ * Length VENDOR ID ID
+ *
+ * 17 18 19 20 21 22 23 24 25 26 27 28 29 39 31 32
+ * --------------------------------------------------------------------------
+ * <Create TIME> <Heatrun TIME> Heat Run Heat Run
+ * (time_t) (time_t) Iterations Errors
+ *
+ */
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+#define STRUCT_OFFSET(type, symbol) ((long)&(((type *)0)->symbol))
+
+/*------------------------------------------------------------------------
+ * Historically different Prom format types.
+ *
+ * For diagnostic and failure purposes, do not create a type 0x00 or a
+ * type 0xff
+ *------------------------------------------------------------------------
+ */
+#define PROM_FORMAT_Unk (-1)
+#define PROM_FORMAT_TYPE1 1
+#define PROM_FORMAT_TYPE2 2
+
+
+/****** bit fields for a type 1 formatted seeprom **************************/
+ typedef struct
+ {
+ char type; /* 0x00 */
+ char Id[2]; /* 0x01-0x02 */
+ char SubId[2]; /* 0x03-0x04 */
+ char Serial[6]; /* 0x05-0x0a */
+ char CreateTime[4]; /* 0x0b-0x0e */
+ char HeatRunTime[4]; /* 0x0f-0x12 */
+ char HeatRunIterations[4]; /* 0x13-0x16 */
+ char HeatRunErrors[4]; /* 0x17-0x1a */
+ char Crc32[4]; /* 0x1b-0x1e */
+ } FLD_TYPE1;
+
+
+/****** bit fields for a type 2 formatted seeprom **************************/
+ typedef struct
+ {
+ char type; /* 0x00 */
+ char length[2]; /* 0x01-0x02 */
+ char Crc32[4]; /* 0x03-0x06 */
+ char Id[2]; /* 0x07-0x08 */
+ char SubId[2]; /* 0x09-0x0a */
+ char Serial[6]; /* 0x0b-0x10 */
+ char CreateTime[4]; /* 0x11-0x14 */
+ char HeatRunTime[4]; /* 0x15-0x18 */
+ char HeatRunIterations[4]; /* 0x19-0x1c */
+ char HeatRunErrors[4]; /* 0x1d-0x20 */
+ } FLD_TYPE2;
+
+
+
+/***** this union allows us to access the seeprom as an array of bytes ***/
+/***** or as individual fields ***/
+
+#define SBE_EEPROM_SIZE 128
+#define SBE_MFG_INFO_SIZE sizeof(FLD_TYPE2)
+
+ typedef union
+ {
+ char bytes[128];
+ FLD_TYPE1 fldType1;
+ FLD_TYPE2 fldType2;
+ } PROMFORMAT;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*** _INC_SBE_PROMFORMAT_H_ ***/
diff --git a/drivers/staging/cxt1e1/sbecom_inline_linux.h b/drivers/staging/cxt1e1/sbecom_inline_linux.h
new file mode 100644
index 00000000000..c65172db2ad
--- /dev/null
+++ b/drivers/staging/cxt1e1/sbecom_inline_linux.h
@@ -0,0 +1,310 @@
+/*
+ * $Id: sbecom_inline_linux.h,v 1.2 2007/08/15 22:51:35 rickd PMCC4_3_1B $
+ */
+
+#ifndef _INC_SBECOM_INLNX_H_
+#define _INC_SBECOM_INLNX_H_
+
+/*-----------------------------------------------------------------------------
+ * sbecom_inline_linux.h - SBE common Linux inlined routines
+ *
+ * Copyright (C) 2007 One Stop Systems, Inc.
+ * Copyright (C) 2005 SBE, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * For further information, contact via email: support@onestopsystems.com
+ * One Stop Systems, Inc. Escondido, California U.S.A.
+ *-----------------------------------------------------------------------------
+ * RCS info:
+ * RCS revision: $Revision: 1.2 $
+ * Last changed on $Date: 2007/08/15 22:51:35 $
+ * Changed by $Author: rickd $
+ *-----------------------------------------------------------------------------
+ * $Log: sbecom_inline_linux.h,v $
+ * Revision 1.2 2007/08/15 22:51:35 rickd
+ * Remove duplicate version.h entry.
+ *
+ * Revision 1.1 2007/08/15 22:50:29 rickd
+ * Update linux/config for 2.6.18 and later.
+ *
+ * Revision 1.0 2005/09/28 00:10:09 rickd
+ * Initial revision
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+
+#if defined (__FreeBSD__) || defined (__NetBSD__)
+#include <sys/types.h>
+#else
+#include <linux/types.h>
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
+#include <linux/config.h>
+#endif
+#if defined(CONFIG_SMP) && ! defined(__SMP__)
+#define __SMP__
+#endif
+#if defined(CONFIG_MODVERSIONS) && defined(MODULE) && ! defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#ifdef MODULE
+#ifdef MODVERSIONS
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+#include <linux/modversions.h>
+#else
+#include <config/modversions.h>
+#endif
+#endif
+#include <linux/module.h>
+#endif
+#endif
+
+#include <linux/kernel.h> /* resolves kmalloc references */
+#include <linux/skbuff.h> /* resolves skb references */
+#include <linux/netdevice.h> /* resolves dev_kree_skb_any */
+#include <asm/byteorder.h> /* resolves cpu_to_le32 */
+
+#if 0
+
+/*** PORT POINT WARNING
+ ***
+ *** Under Linux 2.6 it has been found that compiler is re-ordering
+ *** in-lined pci_write_32() functions to the detrement of correct
+ *** hardware setup. Therefore, inlining of PCI accesses has been
+ *** de-implemented, and subroutine calls have been implemented.
+ ***/
+
+static inline u_int32_t
+pci_read_32 (u_int32_t *p)
+{
+#ifdef FLOW_DEBUG
+ u_int32_t v;
+
+ FLUSH_PCI_READ ();
+ v = le32_to_cpu (*p);
+ if (log_level >= LOG_DEBUG)
+ pr_info("pci_read : %x = %x\n", (u_int32_t) p, v);
+ return v;
+#else
+ FLUSH_PCI_READ (); /* */
+ return le32_to_cpu (*p);
+#endif
+}
+
+static inline void
+pci_write_32 (u_int32_t *p, u_int32_t v)
+{
+#ifdef FLOW_DEBUG
+ if (log_level >= LOG_DEBUG)
+ pr_info("pci_write: %x = %x\n", (u_int32_t) p, v);
+#endif
+ *p = cpu_to_le32 (v);
+ FLUSH_PCI_WRITE (); /* This routine is called from routines
+ * which do multiple register writes
+ * which themselves need flushing between
+ * writes in order to guarantee write
+ * ordering. It is less code-cumbersome
+ * to flush here-in then to investigate
+ * and code the many other register
+ * writing routines. */
+}
+#else
+/* forward reference */
+u_int32_t pci_read_32 (u_int32_t *p);
+void pci_write_32 (u_int32_t *p, u_int32_t v);
+
+#endif
+
+
+/*
+ * system dependent callbacks
+ */
+
+/**********/
+/* malloc */
+/**********/
+
+static inline void *
+OS_kmalloc (size_t size)
+{
+ char *ptr = kmalloc (size, GFP_KERNEL | GFP_DMA);
+
+ if (ptr)
+ memset (ptr, 0, size);
+ return ptr;
+}
+
+static inline void
+OS_kfree (void *x)
+{
+ kfree (x);
+}
+
+
+/****************/
+/* memory token */
+/****************/
+
+static inline void *
+OS_mem_token_alloc (size_t size)
+{
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb (size);
+ if (!skb)
+ {
+ //pr_warning("no mem in OS_mem_token_alloc !\n");
+ return 0;
+ }
+ return skb;
+}
+
+
+static inline void
+OS_mem_token_free (void *token)
+{
+ dev_kfree_skb_any (token);
+}
+
+
+static inline void
+OS_mem_token_free_irq (void *token)
+{
+ dev_kfree_skb_irq (token);
+}
+
+
+static inline void *
+OS_mem_token_data (void *token)
+{
+ return ((struct sk_buff *) token)->data;
+}
+
+
+static inline void *
+OS_mem_token_next (void *token)
+{
+ return 0;
+}
+
+
+static inline int
+OS_mem_token_len (void *token)
+{
+ return ((struct sk_buff *) token)->len;
+}
+
+
+static inline int
+OS_mem_token_tlen (void *token)
+{
+ return ((struct sk_buff *) token)->len;
+}
+
+
+/***************************************/
+/* virtual to physical addr conversion */
+/***************************************/
+
+static inline u_long
+OS_phystov (void *addr)
+{
+ return (u_long) __va (addr);
+}
+
+
+static inline u_long
+OS_vtophys (void *addr)
+{
+ return __pa (addr);
+}
+
+
+/**********/
+/* semops */
+/**********/
+
+void OS_sem_init (void *, int);
+
+
+static inline void
+OS_sem_free (void *sem)
+{
+ /*
+ * NOOP - since semaphores structures predeclared w/in structures, no
+ * longer malloc'd
+ */
+}
+
+#define SD_SEM_TAKE(sem,desc) down(sem)
+#define SD_SEM_GIVE(sem) up(sem)
+#define SEM_AVAILABLE 1
+#define SEM_TAKEN 0
+
+
+/**********************/
+/* watchdog functions */
+/**********************/
+
+struct watchdog
+{
+ struct timer_list h;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+ struct tq_struct tq;
+#else
+ struct work_struct work;
+#endif
+ void *softc;
+ void (*func) (void *softc);
+ int ticks;
+ int init_tq;
+};
+
+
+static inline int
+OS_start_watchdog (struct watchdog * wd)
+{
+ wd->h.expires = jiffies + wd->ticks;
+ add_timer (&wd->h);
+ return 0;
+}
+
+
+static inline int
+OS_stop_watchdog (struct watchdog * wd)
+{
+ del_timer_sync (&wd->h);
+ return 0;
+}
+
+
+static inline int
+OS_free_watchdog (struct watchdog * wd)
+{
+ OS_stop_watchdog (wd);
+ OS_kfree (wd);
+ return 0;
+}
+
+
+/* sleep in microseconds */
+void OS_uwait (int usec, char *description);
+void OS_uwait_dummy (void);
+
+
+/* watchdog functions */
+int OS_init_watchdog(struct watchdog *wdp, void (*f) (void *), void *ci, int usec);
+
+
+#endif /*** _INC_SBECOM_INLNX_H_ ***/
diff --git a/drivers/staging/cxt1e1/sbecrc.c b/drivers/staging/cxt1e1/sbecrc.c
new file mode 100644
index 00000000000..51232948091
--- /dev/null
+++ b/drivers/staging/cxt1e1/sbecrc.c
@@ -0,0 +1,137 @@
+/* Based on "File Verification Using CRC" by Mark R. Nelson in Dr. Dobbs'
+ * Journal, May 1992, pp. 64-67. This algorithm generates the same CRC
+ * values as ZMODEM and PKZIP
+ *
+ * Copyright (C) 2002-2005 SBE, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include "pmcc4_sysdep.h"
+#include "sbecom_inline_linux.h"
+#include "sbe_promformat.h"
+
+/* defines */
+#define CRC32_POLYNOMIAL 0xEDB88320L
+#define CRC_TABLE_ENTRIES 256
+
+
+
+static u_int32_t crcTableInit;
+
+#ifdef STATIC_CRC_TABLE
+static u_int32_t CRCTable[CRC_TABLE_ENTRIES];
+
+#endif
+
+
+/***************************************************************************
+*
+* genCrcTable - fills in CRCTable, as used by sbeCrc()
+*
+* RETURNS: N/A
+*
+* ERRNO: N/A
+***************************************************************************/
+
+static void
+genCrcTable (u_int32_t *CRCTable)
+{
+ int ii, jj;
+ u_int32_t crc;
+
+ for (ii = 0; ii < CRC_TABLE_ENTRIES; ii++)
+ {
+ crc = ii;
+ for (jj = 8; jj > 0; jj--)
+ {
+ if (crc & 1)
+ crc = (crc >> 1) ^ CRC32_POLYNOMIAL;
+ else
+ crc >>= 1;
+ }
+ CRCTable[ii] = crc;
+ }
+
+ crcTableInit++;
+}
+
+
+/***************************************************************************
+*
+* sbeCrc - generates a CRC on a given buffer, and initial CRC
+*
+* This routine calculates the CRC for a buffer of data using the
+* table lookup method. It accepts an original value for the crc,
+* and returns the updated value. This permits "catenation" of
+* discontiguous buffers. An original value of 0 for the "first"
+* buffer is the norm.
+*
+* Based on "File Verification Using CRC" by Mark R. Nelson in Dr. Dobb's
+* Journal, May 1992, pp. 64-67. This algorithm generates the same CRC
+* values as ZMODEM and PKZIP.
+*
+* RETURNS: calculated crc of block
+*
+*/
+
+void
+sbeCrc (u_int8_t *buffer, /* data buffer to crc */
+ u_int32_t count, /* length of block in bytes */
+ u_int32_t initialCrc, /* starting CRC */
+ u_int32_t *result)
+{
+ u_int32_t *tbl = 0;
+ u_int32_t temp1, temp2, crc;
+
+ /*
+ * if table not yet created, do so. Don't care about "extra" time
+ * checking this everytime sbeCrc() is called, since CRC calculations are
+ * already time consuming
+ */
+ if (!crcTableInit)
+ {
+#ifdef STATIC_CRC_TABLE
+ tbl = &CRCTable;
+ genCrcTable (tbl);
+#else
+ tbl = (u_int32_t *) OS_kmalloc (CRC_TABLE_ENTRIES * sizeof (u_int32_t));
+ if (tbl == 0)
+ {
+ *result = 0; /* dummy up return value due to malloc
+ * failure */
+ return;
+ }
+ genCrcTable (tbl);
+#endif
+ }
+ /* inverting bits makes ZMODEM & PKZIP compatible */
+ crc = initialCrc ^ 0xFFFFFFFFL;
+
+ while (count-- != 0)
+ {
+ temp1 = (crc >> 8) & 0x00FFFFFFL;
+ temp2 = tbl[((int) crc ^ *buffer++) & 0xff];
+ crc = temp1 ^ temp2;
+ }
+
+ crc ^= 0xFFFFFFFFL;
+
+ *result = crc;
+
+#ifndef STATIC_CRC_TABLE
+ crcTableInit = 0;
+ OS_kfree (tbl);
+#endif
+}
+
+/*** End-of-File ***/
diff --git a/drivers/staging/cxt1e1/sbeid.c b/drivers/staging/cxt1e1/sbeid.c
new file mode 100644
index 00000000000..a2243b10ef0
--- /dev/null
+++ b/drivers/staging/cxt1e1/sbeid.c
@@ -0,0 +1,217 @@
+/* Copyright (C) 2005 SBE, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include "pmcc4_sysdep.h"
+#include "sbecom_inline_linux.h"
+#include "libsbew.h"
+#include "pmcc4_private.h"
+#include "pmcc4.h"
+#include "sbe_bid.h"
+
+#ifdef SBE_INCLUDE_SYMBOLS
+#define STATIC
+#else
+#define STATIC static
+#endif
+
+
+char *
+sbeid_get_bdname (ci_t * ci)
+{
+ char *np = 0;
+
+ switch (ci->brd_id)
+ {
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_E1):
+ np = "wanPTMC-256T3 <E1>";
+ break;
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_T1):
+ np = "wanPTMC-256T3 <T1>";
+ break;
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1):
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1_L):
+ np = "wanPMC-C4T1E1";
+ break;
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1):
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1_L):
+ np = "wanPMC-C2T1E1";
+ break;
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1):
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1_L):
+ np = "wanPMC-C1T1E1";
+ break;
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1):
+ np = "wanPCI-C4T1E1";
+ break;
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1):
+ np = "wanPCI-C2T1E1";
+ break;
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1):
+ np = "wanPCI-C1T1E1";
+ break;
+ default:
+ /*** np = "<unknown>"; ***/
+ np = "wanPCI-CxT1E1";
+ break;
+ }
+
+ return np;
+}
+
+
+/* given the presetting of brd_id, set the corresponding hdw_id */
+
+void
+sbeid_set_hdwbid (ci_t * ci)
+{
+ /*
+ * set SBE's unique hardware identification (for legacy boards might not
+ * have this register implemented)
+ */
+
+ switch (ci->brd_id)
+ {
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_E1):
+ ci->hdw_bid = SBE_BID_256T3_E1; /* 0x46 - SBE wanPTMC-256T3 (E1
+ * Version) */
+ break;
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_T1):
+ ci->hdw_bid = SBE_BID_256T3_T1; /* 0x42 - SBE wanPTMC-256T3 (T1
+ * Version) */
+ break;
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1):
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1_L):
+ /*
+ * This Board ID is a generic identification. Use the found number
+ * of ports to further define this hardware.
+ */
+ switch (ci->max_port)
+ {
+ default: /* shouldn't need a default, but have one
+ * anyway */
+ case 4:
+ ci->hdw_bid = SBE_BID_PMC_C4T1E1; /* 0xC4 - SBE wanPMC-C4T1E1 */
+ break;
+ case 2:
+ ci->hdw_bid = SBE_BID_PMC_C2T1E1; /* 0xC2 - SBE wanPMC-C2T1E1 */
+ ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1);
+ break;
+ case 1:
+ ci->hdw_bid = SBE_BID_PMC_C1T1E1; /* 0xC1 - SBE wanPMC-C1T1E1 */
+ ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1);
+ break;
+ }
+ break;
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1):
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1_L):
+ ci->hdw_bid = SBE_BID_PMC_C2T1E1; /* 0xC2 - SBE wanPMC-C2T1E1 */
+ break;
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1):
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1_L):
+ ci->hdw_bid = SBE_BID_PMC_C1T1E1; /* 0xC1 - SBE wanPMC-C1T1E1 */
+ break;
+#ifdef SBE_PMCC4_ENABLE
+ /*
+ * This case is entered as a result of the inability to obtain the
+ * <bid> from the board's EEPROM. Assume a PCI board and set
+ * <hdsbid> according to the number ofr found ports.
+ */
+ case 0:
+ /* start by assuming 4-port for ZERO casing */
+ ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1);
+ /* drop thru to set hdw_bid and alternate PCI CxT1E1 settings */
+#endif
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1):
+ /*
+ * This Board ID is a generic identification. Use the number of
+ * found ports to further define this hardware.
+ */
+ switch (ci->max_port)
+ {
+ default: /* shouldn't need a default, but have one
+ * anyway */
+ case 4:
+ ci->hdw_bid = SBE_BID_PCI_C4T1E1; /* 0x04 - SBE wanPCI-C4T1E1 */
+ break;
+ case 2:
+ ci->hdw_bid = SBE_BID_PCI_C2T1E1; /* 0x02 - SBE wanPCI-C2T1E1 */
+ ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1);
+ break;
+ case 1:
+ ci->hdw_bid = SBE_BID_PCI_C1T1E1; /* 0x01 - SBE wanPCI-C1T1E1 */
+ ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1);
+ break;
+ }
+ break;
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1):
+ ci->hdw_bid = SBE_BID_PCI_C2T1E1; /* 0x02 - SBE wanPCI-C2T1E1 */
+ break;
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1):
+ ci->hdw_bid = SBE_BID_PCI_C1T1E1; /* 0x01 - SBE wanPCI-C1T1E1 */
+ break;
+ default:
+ /*** bid = "<unknown>"; ***/
+ ci->hdw_bid = SBE_BID_PMC_C4T1E1; /* 0x41 - SBE wanPTMC-C4T1E1 */
+ break;
+ }
+}
+
+/* given the presetting of hdw_bid, set the corresponding brd_id */
+
+void
+sbeid_set_bdtype (ci_t * ci)
+{
+ /* set SBE's unique PCI VENDOR/DEVID */
+ switch (ci->hdw_bid)
+ {
+ case SBE_BID_C1T3: /* SBE wanPMC-C1T3 */
+ ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T3);
+ break;
+ case SBE_BID_C24TE1: /* SBE wanPTMC-C24TE1 */
+ ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_C24TE1);
+ break;
+ case SBE_BID_256T3_E1: /* SBE wanPTMC-256T3 E1 Version */
+ ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_E1);
+ break;
+ case SBE_BID_256T3_T1: /* SBE wanPTMC-256T3 T1 Version */
+ ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_T1);
+ break;
+ case SBE_BID_PMC_C4T1E1: /* 0xC4 - SBE wanPMC-C4T1E1 */
+ ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1);
+ break;
+ case SBE_BID_PMC_C2T1E1: /* 0xC2 - SBE wanPMC-C2T1E1 */
+ ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1);
+ break;
+ case SBE_BID_PMC_C1T1E1: /* 0xC1 - SBE wanPMC-C1T1E1 */
+ ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1);
+ break;
+ case SBE_BID_PCI_C4T1E1: /* 0x04 - SBE wanPCI-C4T1E1 */
+ ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1);
+ break;
+ case SBE_BID_PCI_C2T1E1: /* 0x02 - SBE wanPCI-C2T1E1 */
+ ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1);
+ break;
+ case SBE_BID_PCI_C1T1E1: /* 0x01 - SBE wanPCI-C1T1E1 */
+ ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1);
+ break;
+
+ default:
+ /*** hdw_bid = "<unknown>"; ***/
+ ci->brd_id = SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1);
+ break;
+ }
+}
+
+
+/*** End-of-File ***/
diff --git a/drivers/staging/cxt1e1/sbeproc.c b/drivers/staging/cxt1e1/sbeproc.c
new file mode 100644
index 00000000000..4f4dcd36bf4
--- /dev/null
+++ b/drivers/staging/cxt1e1/sbeproc.c
@@ -0,0 +1,358 @@
+/* Copyright (C) 2004-2005 SBE, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <asm/uaccess.h>
+#include "pmcc4_sysdep.h"
+#include "sbecom_inline_linux.h"
+#include "pmcc4_private.h"
+#include "sbeproc.h"
+
+/* forwards */
+void sbecom_get_brdinfo (ci_t *, struct sbe_brd_info *, u_int8_t *);
+extern struct s_hdw_info hdw_info[MAX_BOARDS];
+
+#ifdef CONFIG_PROC_FS
+
+/********************************************************************/
+/* procfs stuff */
+/********************************************************************/
+
+
+void
+sbecom_proc_brd_cleanup (ci_t * ci)
+{
+ if (ci->dir_dev)
+ {
+ char dir[7 + SBE_IFACETMPL_SIZE + 1];
+ snprintf(dir, sizeof(dir), "driver/%s", ci->devname);
+ remove_proc_entry("info", ci->dir_dev);
+ remove_proc_entry(dir, NULL);
+ ci->dir_dev = NULL;
+ }
+}
+
+
+static int
+sbecom_proc_get_sbe_info (char *buffer, char **start, off_t offset,
+ int length, int *eof, void *priv)
+{
+ ci_t *ci = (ci_t *) priv;
+ int len = 0;
+ char *spd;
+ struct sbe_brd_info *bip;
+
+ if (!(bip = OS_kmalloc (sizeof (struct sbe_brd_info))))
+ {
+ return -ENOMEM;
+ }
+#if 0
+ /** RLD DEBUG **/
+ pr_info(">> sbecom_proc_get_sbe_info: entered, offset %d. length %d.\n",
+ (int) offset, (int) length);
+#endif
+
+ {
+ hdw_info_t *hi = &hdw_info[ci->brdno];
+
+ u_int8_t *bsn = 0;
+
+ switch (hi->promfmt)
+ {
+ case PROM_FORMAT_TYPE1:
+ bsn = (u_int8_t *) hi->mfg_info.pft1.Serial;
+ break;
+ case PROM_FORMAT_TYPE2:
+ bsn = (u_int8_t *) hi->mfg_info.pft2.Serial;
+ break;
+ }
+
+ sbecom_get_brdinfo (ci, bip, bsn);
+ }
+
+#if 0
+ /** RLD DEBUG **/
+ pr_info(">> sbecom_get_brdinfo: returned, first_if %p <%s> last_if %p <%s>\n",
+ (char *) &bip->first_iname, (char *) &bip->first_iname,
+ (char *) &bip->last_iname, (char *) &bip->last_iname);
+#endif
+ len += sprintf (buffer + len, "Board Type: ");
+ switch (bip->brd_id)
+ {
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T3):
+ len += sprintf (buffer + len, "wanPMC-C1T3");
+ break;
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_E1):
+ len += sprintf (buffer + len, "wanPTMC-256T3 <E1>");
+ break;
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_256T3_T1):
+ len += sprintf (buffer + len, "wanPTMC-256T3 <T1>");
+ break;
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPTMC_C24TE1):
+ len += sprintf (buffer + len, "wanPTMC-C24TE1");
+ break;
+
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1):
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C4T1E1_L):
+ len += sprintf (buffer + len, "wanPMC-C4T1E1");
+ break;
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1):
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C2T1E1_L):
+ len += sprintf (buffer + len, "wanPMC-C2T1E1");
+ break;
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1):
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPMC_C1T1E1_L):
+ len += sprintf (buffer + len, "wanPMC-C1T1E1");
+ break;
+
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C4T1E1):
+ len += sprintf (buffer + len, "wanPCI-C4T1E1");
+ break;
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C2T1E1):
+ len += sprintf (buffer + len, "wanPCI-C2T1E1");
+ break;
+ case SBE_BOARD_ID (PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_WANPCI_C1T1E1):
+ len += sprintf (buffer + len, "wanPCI-C1T1E1");
+ break;
+
+ default:
+ len += sprintf (buffer + len, "unknown");
+ break;
+ }
+ len += sprintf (buffer + len, " [%08X]\n", bip->brd_id);
+
+ len += sprintf (buffer + len, "Board Number: %d\n", bip->brdno);
+ len += sprintf (buffer + len, "Hardware ID: 0x%02X\n", ci->hdw_bid);
+ len += sprintf (buffer + len, "Board SN: %06X\n", bip->brd_sn);
+ len += sprintf (buffer + len, "Board MAC: %02X-%02X-%02X-%02X-%02X-%02X\n",
+ bip->brd_mac_addr[0], bip->brd_mac_addr[1], bip->brd_mac_addr[2],
+ bip->brd_mac_addr[3], bip->brd_mac_addr[4], bip->brd_mac_addr[5]);
+ len += sprintf (buffer + len, "Ports: %d\n", ci->max_port);
+ len += sprintf (buffer + len, "Channels: %d\n", bip->brd_chan_cnt);
+#if 1
+ len += sprintf (buffer + len, "Interface: %s -> %s\n",
+ (char *) &bip->first_iname, (char *) &bip->last_iname);
+#else
+ len += sprintf (buffer + len, "Interface: <not available> 1st %p lst %p\n",
+ (char *) &bip->first_iname, (char *) &bip->last_iname);
+#endif
+
+ switch (bip->brd_pci_speed)
+ {
+ case BINFO_PCI_SPEED_33:
+ spd = "33Mhz";
+ break;
+ case BINFO_PCI_SPEED_66:
+ spd = "66Mhz";
+ break;
+ default:
+ spd = "<not available>";
+ break;
+ }
+ len += sprintf (buffer + len, "PCI Bus Speed: %s\n", spd);
+ len += sprintf (buffer + len, "Release: %s\n", ci->release);
+
+#ifdef SBE_PMCC4_ENABLE
+ {
+ extern int max_mru;
+#if 0
+ extern int max_chans_used;
+ extern int max_mtu;
+#endif
+ extern int max_rxdesc_used, max_txdesc_used;
+
+ len += sprintf (buffer + len, "\nmax_mru: %d\n", max_mru);
+#if 0
+ len += sprintf (buffer + len, "\nmax_chans_used: %d\n", max_chans_used);
+ len += sprintf (buffer + len, "max_mtu: %d\n", max_mtu);
+#endif
+ len += sprintf (buffer + len, "max_rxdesc_used: %d\n", max_rxdesc_used);
+ len += sprintf (buffer + len, "max_txdesc_used: %d\n", max_txdesc_used);
+ }
+#endif
+
+ OS_kfree (bip); /* cleanup */
+
+ /***
+ * How to be a proc read function
+ * ------------------------------
+ * Prototype:
+ * int f(char *buffer, char **start, off_t offset,
+ * int count, int *peof, void *dat)
+ *
+ * Assume that the buffer is "count" bytes in size.
+ *
+ * If you know you have supplied all the data you
+ * have, set *peof.
+ *
+ * You have three ways to return data:
+ * 0) Leave *start = NULL. (This is the default.)
+ * Put the data of the requested offset at that
+ * offset within the buffer. Return the number (n)
+ * of bytes there are from the beginning of the
+ * buffer up to the last byte of data. If the
+ * number of supplied bytes (= n - offset) is
+ * greater than zero and you didn't signal eof
+ * and the reader is prepared to take more data
+ * you will be called again with the requested
+ * offset advanced by the number of bytes
+ * absorbed. This interface is useful for files
+ * no larger than the buffer.
+ * 1) Set *start = an unsigned long value less than
+ * the buffer address but greater than zero.
+ * Put the data of the requested offset at the
+ * beginning of the buffer. Return the number of
+ * bytes of data placed there. If this number is
+ * greater than zero and you didn't signal eof
+ * and the reader is prepared to take more data
+ * you will be called again with the requested
+ * offset advanced by *start. This interface is
+ * useful when you have a large file consisting
+ * of a series of blocks which you want to count
+ * and return as wholes.
+ * (Hack by Paul.Russell@rustcorp.com.au)
+ * 2) Set *start = an address within the buffer.
+ * Put the data of the requested offset at *start.
+ * Return the number of bytes of data placed there.
+ * If this number is greater than zero and you
+ * didn't signal eof and the reader is prepared to
+ * take more data you will be called again with the
+ * requested offset advanced by the number of bytes
+ * absorbed.
+ */
+
+#if 1
+ /* #4 - intepretation of above = set EOF, return len */
+ *eof = 1;
+#endif
+
+#if 0
+ /*
+ * #1 - from net/wireless/atmel.c RLD NOTE -there's something wrong with
+ * this plagarized code which results in this routine being called TWICE.
+ * The second call returns ZERO, resulting in hidden failure, but at
+ * least only a single message set is being displayed.
+ */
+ if (len <= offset + length)
+ *eof = 1;
+ *start = buffer + offset;
+ len -= offset;
+ if (len > length)
+ len = length;
+ if (len < 0)
+ len = 0;
+#endif
+
+#if 0 /* #2 from net/tokenring/olympic.c +
+ * lanstreamer.c */
+ {
+ off_t begin = 0;
+ int size = 0;
+ off_t pos = 0;
+
+ size = len;
+ pos = begin + size;
+ if (pos < offset)
+ {
+ len = 0;
+ begin = pos;
+ }
+ *start = buffer + (offset - begin); /* Start of wanted data */
+ len -= (offset - begin); /* Start slop */
+ if (len > length)
+ len = length; /* Ending slop */
+ }
+#endif
+
+#if 0 /* #3 from
+ * char/ftape/lowlevel/ftape-proc.c */
+ len = strlen (buffer);
+ *start = NULL;
+ if (offset + length >= len)
+ *eof = 1;
+ else
+ *eof = 0;
+#endif
+
+#if 0
+ pr_info(">> proc_fs: returned len = %d., start %p\n", len, start); /* RLD DEBUG */
+#endif
+
+/***
+ using NONE: returns = 314.314.314.
+ using #1 : returns = 314, 0.
+ using #2 : returns = 314, 0, 0.
+ using #3 : returns = 314, 314.
+ using #4 : returns = 314, 314.
+***/
+
+ return len;
+}
+
+/* initialize the /proc subsystem for the specific SBE driver */
+
+int __init
+sbecom_proc_brd_init (ci_t * ci)
+{
+ struct proc_dir_entry *e;
+ char dir[7 + SBE_IFACETMPL_SIZE + 1];
+
+ /* create a directory in the root procfs */
+ snprintf(dir, sizeof(dir), "driver/%s", ci->devname);
+ ci->dir_dev = proc_mkdir(dir, NULL);
+ if (!ci->dir_dev)
+ {
+ pr_err("Unable to create directory /proc/driver/%s\n", ci->devname);
+ goto fail;
+ }
+ e = create_proc_read_entry ("info", S_IFREG | S_IRUGO,
+ ci->dir_dev, sbecom_proc_get_sbe_info, ci);
+ if (!e)
+ {
+ pr_err("Unable to create entry /proc/driver/%s/info\n", ci->devname);
+ goto fail;
+ }
+ return 0;
+
+fail:
+ sbecom_proc_brd_cleanup (ci);
+ return 1;
+}
+
+#else /*** ! CONFIG_PROC_FS ***/
+
+/* stubbed off dummy routines */
+
+void
+sbecom_proc_brd_cleanup (ci_t * ci)
+{
+}
+
+int __init
+sbecom_proc_brd_init (ci_t * ci)
+{
+ return 0;
+}
+
+#endif /*** CONFIG_PROC_FS ***/
+
+
+/*** End-of-File ***/
diff --git a/drivers/staging/cxt1e1/sbeproc.h b/drivers/staging/cxt1e1/sbeproc.h
new file mode 100644
index 00000000000..4aa53f44ec0
--- /dev/null
+++ b/drivers/staging/cxt1e1/sbeproc.h
@@ -0,0 +1,52 @@
+/*
+ * $Id: sbeproc.h,v 1.2 2005/10/17 23:55:28 rickd PMCC4_3_1B $
+ */
+
+#ifndef _INC_SBEPROC_H_
+#define _INC_SBEPROC_H_
+
+/*-----------------------------------------------------------------------------
+ * sbeproc.h -
+ *
+ * Copyright (C) 2004-2005 SBE, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * For further information, contact via email: support@sbei.com
+ * SBE, Inc. San Ramon, California U.S.A.
+ *-----------------------------------------------------------------------------
+ * RCS info:
+ * RCS revision: $Revision: 1.2 $
+ * Last changed on $Date: 2005/10/17 23:55:28 $
+ * Changed by $Author: rickd $
+ *-----------------------------------------------------------------------------
+ * $Log: sbeproc.h,v $
+ * Revision 1.2 2005/10/17 23:55:28 rickd
+ * sbecom_proc_brd_init() is an declared an __init function.
+ *
+ * Revision 1.1 2005/09/28 00:10:09 rickd
+ * Remove unneeded inclusion of c4_private.h.
+ *
+ * Revision 1.0 2005/05/10 22:21:46 rickd
+ * Initial check-in.
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+
+#ifdef CONFIG_PROC_FS
+#ifdef __KERNEL__
+void sbecom_proc_brd_cleanup (ci_t *);
+int __init sbecom_proc_brd_init (ci_t *);
+
+#endif /*** __KERNEL__ ***/
+#endif /*** CONFIG_PROC_FS ***/
+#endif /*** _INC_SBEPROC_H_ ***/
diff --git a/drivers/staging/cxt1e1/sbew_ioc.h b/drivers/staging/cxt1e1/sbew_ioc.h
new file mode 100644
index 00000000000..14d371904d1
--- /dev/null
+++ b/drivers/staging/cxt1e1/sbew_ioc.h
@@ -0,0 +1,136 @@
+/*
+ * $Id: sbew_ioc.h,v 1.0 2005/09/28 00:10:10 rickd PMCC4_3_1B $
+ */
+
+#ifndef _INC_SBEWIOC_H_
+#define _INC_SBEWIOC_H_
+
+/*-----------------------------------------------------------------------------
+ * sbew_ioc.h -
+ *
+ * Copyright (C) 2002-2005 SBE, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * For further information, contact via email: support@sbei.com
+ * SBE, Inc. San Ramon, California U.S.A.
+ *
+ *-----------------------------------------------------------------------------
+ * RCS info:
+ * RCS revision: $Revision: 1.0 $
+ * Last changed on $Date: 2005/09/28 00:10:10 $
+ * Changed by $Author: rickd $
+ *-----------------------------------------------------------------------------
+ * $Log: sbew_ioc.h,v $
+ * Revision 1.0 2005/09/28 00:10:10 rickd
+ * Initial revision
+ *
+ * Revision 1.6 2005/01/11 18:41:01 rickd
+ * Add BRDADDR_GET Ioctl.
+ *
+ * Revision 1.5 2004/09/16 18:55:59 rickd
+ * Start setting up for generic framer configuration Ioctl by switch
+ * from tect3_framer_param[] to sbecom_framer_param[].
+ *
+ * Revision 1.4 2004/06/28 17:58:15 rickd
+ * Rename IOC_TSMAP_[GS] to IOC_TSIOC_[GS] to support need for
+ * multiple formats of data when setting up TimeSlots.
+ *
+ * Revision 1.3 2004/06/22 21:18:13 rickd
+ * read_vec now() ONLY handles a single common wrt_vec array.
+ *
+ * Revision 1.1 2004/06/10 18:11:34 rickd
+ * Add IID_GET Ioctl reference.
+ *
+ * Revision 1.0 2004/06/08 22:59:38 rickd
+ * Initial revision
+ *
+ * Revision 2.0 2004/06/07 17:49:47 rickd
+ * Initial library release following merge of wanc1t3/wan256 into
+ * common elements for lib.
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+#ifndef __KERNEL__
+#include <sys/types.h>
+#endif
+#ifdef SunOS
+#include <sys/ioccom.h>
+#else
+#include <linux/ioctl.h>
+#endif
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#define SBE_LOCKFILE "/tmp/.sbewan.LCK"
+
+#define SBE_IOC_COOKIE 0x19780926
+#define SBE_IOC_MAGIC ('s')
+
+/* IOW write - data has to go into driver from application */
+/* IOR read - data has to be returned to application from driver */
+
+/*
+ * Note: for an IOWR Ioctl, the read and write data do not have to
+ * be the same size, but the entity declared within the IOC must be
+ * the larger of the two.
+ */
+
+#define SBE_IOC_LOGLEVEL _IOW(SBE_IOC_MAGIC, 0x00, int)
+#define SBE_IOC_CHAN_NEW _IOW(SBE_IOC_MAGIC, 0x01,int) /* unused */
+#define SBE_IOC_CHAN_UP _IOW(SBE_IOC_MAGIC, 0x02,int) /* unused */
+#define SBE_IOC_CHAN_DOWN _IOW(SBE_IOC_MAGIC, 0x03,int) /* unused */
+#define SBE_IOC_CHAN_GET _IOWR(SBE_IOC_MAGIC,0x04, struct sbecom_chan_param)
+#define SBE_IOC_CHAN_SET _IOW(SBE_IOC_MAGIC, 0x05, struct sbecom_chan_param)
+#define SBE_IOC_CHAN_GET_STAT _IOWR(SBE_IOC_MAGIC,0x06, struct sbecom_chan_stats)
+#define SBE_IOC_CHAN_DEL_STAT _IOW(SBE_IOC_MAGIC, 0x07, int)
+#define SBE_IOC_PORTS_ENABLE _IOW(SBE_IOC_MAGIC, 0x0A, int)
+#define SBE_IOC_PORT_GET _IOWR(SBE_IOC_MAGIC,0x0C, struct sbecom_port_param)
+#define SBE_IOC_PORT_SET _IOW(SBE_IOC_MAGIC, 0x0D, struct sbecom_port_param)
+#define SBE_IOC_READ_VEC _IOWR(SBE_IOC_MAGIC,0x10, struct sbecom_wrt_vec)
+#define SBE_IOC_WRITE_VEC _IOWR(SBE_IOC_MAGIC,0x11, struct sbecom_wrt_vec)
+#define SBE_IOC_GET_SN _IOR(SBE_IOC_MAGIC, 0x12, u_int32_t)
+#define SBE_IOC_RESET_DEV _IOW(SBE_IOC_MAGIC, 0x13, int)
+#define SBE_IOC_FRAMER_GET _IOWR(SBE_IOC_MAGIC,0x14, struct sbecom_framer_param)
+#define SBE_IOC_FRAMER_SET _IOW(SBE_IOC_MAGIC, 0x15, struct sbecom_framer_param)
+#define SBE_IOC_CARD_GET _IOR(SBE_IOC_MAGIC, 0x20, struct sbecom_card_param)
+#define SBE_IOC_CARD_SET _IOW(SBE_IOC_MAGIC, 0x21, struct sbecom_card_param)
+#define SBE_IOC_CARD_GET_STAT _IOR(SBE_IOC_MAGIC, 0x22, struct temux_card_stats)
+#define SBE_IOC_CARD_DEL_STAT _IO(SBE_IOC_MAGIC, 0x23)
+#define SBE_IOC_CARD_CHAN_STAT _IOR(SBE_IOC_MAGIC, 0x24, struct sbecom_chan_stats)
+#define SBE_IOC_CARD_BLINK _IOW(SBE_IOC_MAGIC, 0x30, int)
+#define SBE_IOC_DRVINFO_GET _IOWR(SBE_IOC_MAGIC,0x31, struct sbe_drv_info)
+#define SBE_IOC_BRDINFO_GET _IOR(SBE_IOC_MAGIC, 0x32, struct sbe_brd_info)
+#define SBE_IOC_IID_GET _IOWR(SBE_IOC_MAGIC,0x33, struct sbe_iid_info)
+#define SBE_IOC_BRDADDR_GET _IOWR(SBE_IOC_MAGIC, 0x34, struct sbe_brd_addr)
+
+#ifdef NOT_YET_COMMON
+#define SBE_IOC_TSIOC_GET _IOWR(SBE_IOC_MAGIC,0x16, struct wanc1t3_ts_param)
+#define SBE_IOC_TSIOC_SET _IOW(SBE_IOC_MAGIC, 0x17, struct wanc1t3_ts_param)
+#endif
+
+/*
+ * Restrict SBE_IOC_WRITE_VEC & READ_VEC to a single parameter pair, application
+ * then must issue multiple Ioctls for large blocks of contiguous data.
+ */
+
+#define SBE_IOC_MAXVEC 1
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*** _INC_SBEWIOC_H_ ***/
diff --git a/drivers/staging/dream/Kconfig b/drivers/staging/dream/Kconfig
index 4afa081c870..0c30b19a5a7 100644
--- a/drivers/staging/dream/Kconfig
+++ b/drivers/staging/dream/Kconfig
@@ -1,16 +1,13 @@
config DREAM
- tristate "HTC Dream support"
- depends on BROKEN
+ tristate "HTC Dream support"
+ depends on MACH_TROUT
-source "drivers/staging/dream/smd/Kconfig"
+if DREAM
source "drivers/staging/dream/camera/Kconfig"
-
config INPUT_GPIO
tristate "GPIO driver support"
help
Say Y here if you want to support gpio based keys, wheels etc...
-
-
-
+endif
diff --git a/drivers/staging/dream/Makefile b/drivers/staging/dream/Makefile
index 2b791519707..fbea0abcc86 100644
--- a/drivers/staging/dream/Makefile
+++ b/drivers/staging/dream/Makefile
@@ -1,4 +1,5 @@
-obj-$(CONFIG_MSM_ADSP) += qdsp5/ smd/
+EXTRA_CFLAGS=-Idrivers/staging/dream/include
+obj-$(CONFIG_MSM_ADSP) += qdsp5/
obj-$(CONFIG_MSM_CAMERA) += camera/
obj-$(CONFIG_INPUT_GPIO) += gpio_axis.o gpio_event.o gpio_input.o gpio_matrix.o gpio_output.o
diff --git a/drivers/staging/dream/TODO b/drivers/staging/dream/TODO
index c07c8803f07..dcd3ba80865 100644
--- a/drivers/staging/dream/TODO
+++ b/drivers/staging/dream/TODO
@@ -1,4 +1,3 @@
-* remove support for wakelocks since those are not in mainline
* camera driver uses old V4L API
diff --git a/drivers/staging/dream/camera/msm_vfe8x_proc.c b/drivers/staging/dream/camera/msm_vfe8x_proc.c
index 10aef0e59ba..f80ef967ba8 100644
--- a/drivers/staging/dream/camera/msm_vfe8x_proc.c
+++ b/drivers/staging/dream/camera/msm_vfe8x_proc.c
@@ -3828,7 +3828,7 @@ void vfe_camif_config(struct vfe_cmd_camif_config *in)
ctrl->vfeImaskLocal.camifEpoch2Irq = 1;
}
- /* save the content to program CAMIF_CONFIG seperately. */
+ /* save the content to program CAMIF_CONFIG separately. */
ctrl->vfeCamifConfigLocal.camifCfgFromCmd = in->camifConfig;
/* EFS_Config */
diff --git a/drivers/staging/dream/pmem.c b/drivers/staging/dream/pmem.c
index 6edfdd4ef80..6387365a833 100644
--- a/drivers/staging/dream/pmem.c
+++ b/drivers/staging/dream/pmem.c
@@ -38,17 +38,17 @@
* the file should not be released until put_pmem_file is called */
#define PMEM_FLAGS_BUSY 0x1
/* indicates that this is a suballocation of a larger master range */
-#define PMEM_FLAGS_CONNECTED ( 0x1 << 1 )
+#define PMEM_FLAGS_CONNECTED (0x1 << 1)
/* indicates this is a master and not a sub allocation and that it is mmaped */
-#define PMEM_FLAGS_MASTERMAP ( 0x1 << 2 )
+#define PMEM_FLAGS_MASTERMAP (0x1 << 2)
/* submap and unsubmap flags indicate:
* 00: subregion has never been mmaped
* 10: subregion has been mmaped, reference to the mm was taken
* 11: subretion has ben released, refernece to the mm still held
* 01: subretion has been released, reference to the mm has been released
*/
-#define PMEM_FLAGS_SUBMAP ( 0x1 << 3 )
-#define PMEM_FLAGS_UNSUBMAP ( 0x1 << 4 )
+#define PMEM_FLAGS_SUBMAP (0x1 << 3)
+#define PMEM_FLAGS_UNSUBMAP (0x1 << 4)
struct pmem_data {
@@ -153,7 +153,7 @@ struct pmem_info {
static struct pmem_info pmem[PMEM_MAX_DEVICES];
static int id_count;
-#define PMEM_IS_FREE(id, index) ( !(pmem[id].bitmap[index].allocated) )
+#define PMEM_IS_FREE(id, index) (!(pmem[id].bitmap[index].allocated))
#define PMEM_ORDER(id, index) pmem[id].bitmap[index].order
#define PMEM_BUDDY_INDEX(id, index) (index ^ (1 << PMEM_ORDER(id, index)))
#define PMEM_NEXT_INDEX(id, index) (index + (1 << PMEM_ORDER(id, index)))
@@ -845,8 +845,8 @@ static int pmem_connect(unsigned long connect, struct file *file)
src_data = (struct pmem_data *)src_file->private_data;
if (has_allocation(file) && (data->index != src_data->index)) {
- printk(KERN_INFO "pmem: file is already mapped but doesn't match this"
- " src_file!\n");
+ printk(KERN_INFO "pmem: file is already mapped but doesn't "
+ "match this src_file!\n");
ret = -EINVAL;
goto err_bad_file;
}
@@ -935,8 +935,8 @@ int pmem_remap(struct pmem_region *region, struct file *file,
if (unlikely(!PMEM_IS_PAGE_ALIGNED(region->offset) ||
!PMEM_IS_PAGE_ALIGNED(region->len))) {
#if PMEM_DEBUG
- printk(KERN_DEBUG "pmem: request for unaligned pmem suballocation "
- "%lx %lx\n", region->offset, region->len);
+ printk(KERN_DEBUG "pmem: request for unaligned pmem "
+ "suballocation %lx %lx\n", region->offset, region->len);
#endif
return -EINVAL;
}
@@ -1086,8 +1086,8 @@ static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
region.offset = pmem_start_addr(id, data);
region.len = pmem_len(id, data);
}
- printk(KERN_INFO "pmem: request for physical address of pmem region "
- "from process %d.\n", current->pid);
+ printk(KERN_INFO "pmem: request for physical address "
+ "of pmem region from process %d.\n", current->pid);
if (copy_to_user((void __user *)arg, &region,
sizeof(struct pmem_region)))
return -EFAULT;
@@ -1245,14 +1245,11 @@ int pmem_setup(struct android_pmem_platform_data *pdata,
}
pmem[id].num_entries = pmem[id].size / PMEM_MIN_ALLOC;
- pmem[id].bitmap = kmalloc(pmem[id].num_entries *
+ pmem[id].bitmap = kcalloc(pmem[id].num_entries,
sizeof(struct pmem_bits), GFP_KERNEL);
if (!pmem[id].bitmap)
goto err_no_mem_for_metadata;
- memset(pmem[id].bitmap, 0, sizeof(struct pmem_bits) *
- pmem[id].num_entries);
-
for (i = sizeof(pmem[id].num_entries) * 8 - 1; i >= 0; i--) {
if ((pmem[id].num_entries) & 1<<i) {
PMEM_ORDER(id, index) = i;
diff --git a/drivers/staging/dream/qdsp5/audio_out.c b/drivers/staging/dream/qdsp5/audio_out.c
index fe7809dd440..76d7fa5667d 100644
--- a/drivers/staging/dream/qdsp5/audio_out.c
+++ b/drivers/staging/dream/qdsp5/audio_out.c
@@ -182,9 +182,6 @@ struct audio {
int stopped; /* set when stopped, cleared on flush */
unsigned volume;
- struct wake_lock wakelock;
- struct wake_lock idlelock;
-
int adrc_enable;
struct adrc_filter adrc;
@@ -198,14 +195,10 @@ struct audio {
static void audio_prevent_sleep(struct audio *audio)
{
printk(KERN_INFO "++++++++++++++++++++++++++++++\n");
- wake_lock(&audio->wakelock);
- wake_lock(&audio->idlelock);
}
static void audio_allow_sleep(struct audio *audio)
{
- wake_unlock(&audio->wakelock);
- wake_unlock(&audio->idlelock);
printk(KERN_INFO "------------------------------\n");
}
@@ -840,8 +833,6 @@ static int __init audio_init(void)
mutex_init(&the_audio.write_lock);
spin_lock_init(&the_audio.dsp_lock);
init_waitqueue_head(&the_audio.wait);
- wake_lock_init(&the_audio.wakelock, WAKE_LOCK_SUSPEND, "audio_pcm");
- wake_lock_init(&the_audio.idlelock, WAKE_LOCK_IDLE, "audio_pcm_idle");
return (misc_register(&audio_misc) || misc_register(&audpp_misc));
}
diff --git a/drivers/staging/dream/smd/Kconfig b/drivers/staging/dream/smd/Kconfig
deleted file mode 100644
index 17b8bdc7b9b..00000000000
--- a/drivers/staging/dream/smd/Kconfig
+++ /dev/null
@@ -1,26 +0,0 @@
-config MSM_SMD
- depends on ARCH_MSM
- default y
- bool "MSM Shared Memory Driver (SMD)"
- help
- Support for the shared memory interface between the apps
- processor and the baseband processor. Provides access to
- the "shared heap", as well as virtual serial channels
- used to communicate with various services on the baseband
- processor.
-
-config MSM_ONCRPCROUTER
- depends on MSM_SMD
- default y
- bool "MSM ONCRPC router support"
- help
- Support for the MSM ONCRPC router for communication between
- the ARM9 and ARM11
-
-config MSM_RPCSERVERS
- depends on MSM_ONCRPCROUTER
- default y
- bool "Kernel side RPC server bundle"
- help
- none
-
diff --git a/drivers/staging/dream/smd/Makefile b/drivers/staging/dream/smd/Makefile
deleted file mode 100644
index 1c87618366a..00000000000
--- a/drivers/staging/dream/smd/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-EXTRA_CFLAGS=-Idrivers/staging/dream/include
-obj-$(CONFIG_MSM_SMD) += smd.o smd_tty.o smd_qmi.o
-obj-$(CONFIG_MSM_ONCRPCROUTER) += smd_rpcrouter.o
-obj-$(CONFIG_MSM_ONCRPCROUTER) += smd_rpcrouter_device.o
-obj-$(CONFIG_MSM_ONCRPCROUTER) += smd_rpcrouter_servers.o
-obj-$(CONFIG_MSM_RPCSERVERS) += rpc_server_dog_keepalive.o
-obj-$(CONFIG_MSM_RPCSERVERS) += rpc_server_time_remote.o
diff --git a/drivers/staging/dream/smd/rpc_server_dog_keepalive.c b/drivers/staging/dream/smd/rpc_server_dog_keepalive.c
deleted file mode 100644
index b23fccfa87e..00000000000
--- a/drivers/staging/dream/smd/rpc_server_dog_keepalive.c
+++ /dev/null
@@ -1,68 +0,0 @@
-/* arch/arm/mach-msm/rpc_server_dog_keepalive.c
- *
- * Copyright (C) 2007 Google, Inc.
- * Author: Iliyan Malchev <ibm@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <mach/msm_rpcrouter.h>
-
-/* dog_keepalive server definitions */
-
-#define DOG_KEEPALIVE_PROG 0x30000015
-#if CONFIG_MSM_AMSS_VERSION==6210
-#define DOG_KEEPALIVE_VERS 0
-#define RPC_DOG_KEEPALIVE_BEACON 1
-#elif (CONFIG_MSM_AMSS_VERSION==6220) || (CONFIG_MSM_AMSS_VERSION==6225)
-#define DOG_KEEPALIVE_VERS 0x731fa727
-#define RPC_DOG_KEEPALIVE_BEACON 2
-#elif CONFIG_MSM_AMSS_VERSION==6350
-#define DOG_KEEPALIVE_VERS 0x00010000
-#define RPC_DOG_KEEPALIVE_BEACON 2
-#else
-#error "Unsupported AMSS version"
-#endif
-#define RPC_DOG_KEEPALIVE_NULL 0
-
-
-/* TODO: Remove server registration with _VERS when modem is upated with _COMP*/
-
-static int handle_rpc_call(struct msm_rpc_server *server,
- struct rpc_request_hdr *req, unsigned len)
-{
- switch (req->procedure) {
- case RPC_DOG_KEEPALIVE_NULL:
- return 0;
- case RPC_DOG_KEEPALIVE_BEACON:
- printk(KERN_INFO "DOG KEEPALIVE PING\n");
- return 0;
- default:
- return -ENODEV;
- }
-}
-
-static struct msm_rpc_server rpc_server = {
- .prog = DOG_KEEPALIVE_PROG,
- .vers = DOG_KEEPALIVE_VERS,
- .rpc_call = handle_rpc_call,
-};
-
-static int __init rpc_server_init(void)
-{
- /* Dual server registration to support backwards compatibility vers */
- return msm_rpc_create_server(&rpc_server);
-}
-
-
-module_init(rpc_server_init);
diff --git a/drivers/staging/dream/smd/rpc_server_time_remote.c b/drivers/staging/dream/smd/rpc_server_time_remote.c
deleted file mode 100644
index 2f90fc88c38..00000000000
--- a/drivers/staging/dream/smd/rpc_server_time_remote.c
+++ /dev/null
@@ -1,77 +0,0 @@
-/* arch/arm/mach-msm/rpc_server_time_remote.c
- *
- * Copyright (C) 2007 Google, Inc.
- * Author: Iliyan Malchev <ibm@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <mach/msm_rpcrouter.h>
-
-/* time_remote_mtoa server definitions. */
-
-#define TIME_REMOTE_MTOA_PROG 0x3000005d
-#if CONFIG_MSM_AMSS_VERSION==6210
-#define TIME_REMOTE_MTOA_VERS 0
-#elif (CONFIG_MSM_AMSS_VERSION==6220) || (CONFIG_MSM_AMSS_VERSION==6225)
-#define TIME_REMOTE_MTOA_VERS 0x9202a8e4
-#elif CONFIG_MSM_AMSS_VERSION==6350
-#define TIME_REMOTE_MTOA_VERS 0x00010000
-#else
-#error "Unknown AMSS version"
-#endif
-#define RPC_TIME_REMOTE_MTOA_NULL 0
-#define RPC_TIME_TOD_SET_APPS_BASES 2
-
-struct rpc_time_tod_set_apps_bases_args {
- uint32_t tick;
- uint64_t stamp;
-};
-
-static int handle_rpc_call(struct msm_rpc_server *server,
- struct rpc_request_hdr *req, unsigned len)
-{
- switch (req->procedure) {
- case RPC_TIME_REMOTE_MTOA_NULL:
- return 0;
-
- case RPC_TIME_TOD_SET_APPS_BASES: {
- struct rpc_time_tod_set_apps_bases_args *args;
- args = (struct rpc_time_tod_set_apps_bases_args *)(req + 1);
- args->tick = be32_to_cpu(args->tick);
- args->stamp = be64_to_cpu(args->stamp);
- printk(KERN_INFO "RPC_TIME_TOD_SET_APPS_BASES:\n"
- "\ttick = %d\n"
- "\tstamp = %lld\n",
- args->tick, args->stamp);
- return 0;
- }
- default:
- return -ENODEV;
- }
-}
-
-static struct msm_rpc_server rpc_server = {
- .prog = TIME_REMOTE_MTOA_PROG,
- .vers = TIME_REMOTE_MTOA_VERS,
- .rpc_call = handle_rpc_call,
-};
-
-static int __init rpc_server_init(void)
-{
- /* Dual server registration to support backwards compatibility vers */
- return msm_rpc_create_server(&rpc_server);
-}
-
-
-module_init(rpc_server_init);
diff --git a/drivers/staging/dream/smd/smd.c b/drivers/staging/dream/smd/smd.c
deleted file mode 100644
index 8f35be7193f..00000000000
--- a/drivers/staging/dream/smd/smd.c
+++ /dev/null
@@ -1,1330 +0,0 @@
-/* arch/arm/mach-msm/smd.c
- *
- * Copyright (C) 2007 Google, Inc.
- * Author: Brian Swetland <swetland@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
-#include <linux/device.h>
-#include <linux/wait.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/debugfs.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-
-#include <mach/msm_smd.h>
-#include <mach/msm_iomap.h>
-#include <mach/system.h>
-
-#include "smd_private.h"
-#include "../../../../arch/arm/mach-msm/proc_comm.h"
-
-void (*msm_hw_reset_hook)(void);
-
-#define MODULE_NAME "msm_smd"
-
-enum {
- MSM_SMD_DEBUG = 1U << 0,
- MSM_SMSM_DEBUG = 1U << 0,
-};
-
-static int msm_smd_debug_mask;
-
-module_param_named(debug_mask, msm_smd_debug_mask,
- int, S_IRUGO | S_IWUSR | S_IWGRP);
-
-void *smem_find(unsigned id, unsigned size);
-static void smd_diag(void);
-
-static unsigned last_heap_free = 0xffffffff;
-
-#define MSM_A2M_INT(n) (MSM_CSR_BASE + 0x400 + (n) * 4)
-
-static inline void notify_other_smsm(void)
-{
- writel(1, MSM_A2M_INT(5));
-}
-
-static inline void notify_other_smd(void)
-{
- writel(1, MSM_A2M_INT(0));
-}
-
-static void smd_diag(void)
-{
- char *x;
-
- x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
- if (x != 0) {
- x[SZ_DIAG_ERR_MSG - 1] = 0;
- pr_info("smem: DIAG '%s'\n", x);
- }
-}
-
-/* call when SMSM_RESET flag is set in the A9's smsm_state */
-static void handle_modem_crash(void)
-{
- pr_err("ARM9 has CRASHED\n");
- smd_diag();
-
- /* hard reboot if possible */
- if (msm_hw_reset_hook)
- msm_hw_reset_hook();
-
- /* in this case the modem or watchdog should reboot us */
- for (;;)
- ;
-}
-
-extern int (*msm_check_for_modem_crash)(void);
-
-static int check_for_modem_crash(void)
-{
- struct smsm_shared *smsm;
-
- smsm = smem_find(ID_SHARED_STATE, 2 * sizeof(struct smsm_shared));
-
- /* if the modem's not ready yet, we have to hope for the best */
- if (!smsm)
- return 0;
-
- if (smsm[1].state & SMSM_RESET) {
- handle_modem_crash();
- return -1;
- } else {
- return 0;
- }
-}
-
-#define SMD_SS_CLOSED 0x00000000
-#define SMD_SS_OPENING 0x00000001
-#define SMD_SS_OPENED 0x00000002
-#define SMD_SS_FLUSHING 0x00000003
-#define SMD_SS_CLOSING 0x00000004
-#define SMD_SS_RESET 0x00000005
-#define SMD_SS_RESET_OPENING 0x00000006
-
-#define SMD_BUF_SIZE 8192
-#define SMD_CHANNELS 64
-
-#define SMD_HEADER_SIZE 20
-
-
-/* the spinlock is used to synchronize between the
-** irq handler and code that mutates the channel
-** list or fiddles with channel state
-*/
-static DEFINE_SPINLOCK(smd_lock);
-static DEFINE_SPINLOCK(smem_lock);
-
-/* the mutex is used during open() and close()
-** operations to avoid races while creating or
-** destroying smd_channel structures
-*/
-static DEFINE_MUTEX(smd_creation_mutex);
-
-static int smd_initialized;
-
-struct smd_alloc_elm {
- char name[20];
- uint32_t cid;
- uint32_t ctype;
- uint32_t ref_count;
-};
-
-struct smd_half_channel {
- unsigned state;
- unsigned char fDSR;
- unsigned char fCTS;
- unsigned char fCD;
- unsigned char fRI;
- unsigned char fHEAD;
- unsigned char fTAIL;
- unsigned char fSTATE;
- unsigned char fUNUSED;
- unsigned tail;
- unsigned head;
- unsigned char data[SMD_BUF_SIZE];
-};
-
-struct smd_shared {
- struct smd_half_channel ch0;
- struct smd_half_channel ch1;
-};
-
-struct smd_channel {
- volatile struct smd_half_channel *send;
- volatile struct smd_half_channel *recv;
- struct list_head ch_list;
-
- unsigned current_packet;
- unsigned n;
- void *priv;
- void (*notify)(void *priv, unsigned flags);
-
- int (*read)(smd_channel_t *ch, void *data, int len);
- int (*write)(smd_channel_t *ch, const void *data, int len);
- int (*read_avail)(smd_channel_t *ch);
- int (*write_avail)(smd_channel_t *ch);
-
- void (*update_state)(smd_channel_t *ch);
- unsigned last_state;
-
- char name[32];
- struct platform_device pdev;
-};
-
-static LIST_HEAD(smd_ch_closed_list);
-static LIST_HEAD(smd_ch_list);
-
-static unsigned char smd_ch_allocated[64];
-static struct work_struct probe_work;
-
-static void smd_alloc_channel(const char *name, uint32_t cid, uint32_t type);
-
-static void smd_channel_probe_worker(struct work_struct *work)
-{
- struct smd_alloc_elm *shared;
- unsigned n;
-
- shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
-
- for (n = 0; n < 64; n++) {
- if (smd_ch_allocated[n])
- continue;
- if (!shared[n].ref_count)
- continue;
- if (!shared[n].name[0])
- continue;
- smd_alloc_channel(shared[n].name,
- shared[n].cid,
- shared[n].ctype);
- smd_ch_allocated[n] = 1;
- }
-}
-
-static char *chstate(unsigned n)
-{
- switch (n) {
- case SMD_SS_CLOSED:
- return "CLOSED";
- case SMD_SS_OPENING:
- return "OPENING";
- case SMD_SS_OPENED:
- return "OPENED";
- case SMD_SS_FLUSHING:
- return "FLUSHING";
- case SMD_SS_CLOSING:
- return "CLOSING";
- case SMD_SS_RESET:
- return "RESET";
- case SMD_SS_RESET_OPENING:
- return "ROPENING";
- default:
- return "UNKNOWN";
- }
-}
-
-/* how many bytes are available for reading */
-static int smd_stream_read_avail(struct smd_channel *ch)
-{
- return (ch->recv->head - ch->recv->tail) & (SMD_BUF_SIZE - 1);
-}
-
-/* how many bytes we are free to write */
-static int smd_stream_write_avail(struct smd_channel *ch)
-{
- return (SMD_BUF_SIZE - 1) -
- ((ch->send->head - ch->send->tail) & (SMD_BUF_SIZE - 1));
-}
-
-static int smd_packet_read_avail(struct smd_channel *ch)
-{
- if (ch->current_packet) {
- int n = smd_stream_read_avail(ch);
- if (n > ch->current_packet)
- n = ch->current_packet;
- return n;
- } else {
- return 0;
- }
-}
-
-static int smd_packet_write_avail(struct smd_channel *ch)
-{
- int n = smd_stream_write_avail(ch);
- return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
-}
-
-static int ch_is_open(struct smd_channel *ch)
-{
- return (ch->recv->state == SMD_SS_OPENED) &&
- (ch->send->state == SMD_SS_OPENED);
-}
-
-/* provide a pointer and length to readable data in the fifo */
-static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
-{
- unsigned head = ch->recv->head;
- unsigned tail = ch->recv->tail;
- *ptr = (void *) (ch->recv->data + tail);
-
- if (tail <= head)
- return head - tail;
- else
- return SMD_BUF_SIZE - tail;
-}
-
-/* advance the fifo read pointer after data from ch_read_buffer is consumed */
-static void ch_read_done(struct smd_channel *ch, unsigned count)
-{
- BUG_ON(count > smd_stream_read_avail(ch));
- ch->recv->tail = (ch->recv->tail + count) & (SMD_BUF_SIZE - 1);
- ch->recv->fTAIL = 1;
-}
-
-/* basic read interface to ch_read_{buffer,done} used
-** by smd_*_read() and update_packet_state()
-** will read-and-discard if the _data pointer is null
-*/
-static int ch_read(struct smd_channel *ch, void *_data, int len)
-{
- void *ptr;
- unsigned n;
- unsigned char *data = _data;
- int orig_len = len;
-
- while (len > 0) {
- n = ch_read_buffer(ch, &ptr);
- if (n == 0)
- break;
-
- if (n > len)
- n = len;
- if (_data)
- memcpy(data, ptr, n);
-
- data += n;
- len -= n;
- ch_read_done(ch, n);
- }
-
- return orig_len - len;
-}
-
-static void update_stream_state(struct smd_channel *ch)
-{
- /* streams have no special state requiring updating */
-}
-
-static void update_packet_state(struct smd_channel *ch)
-{
- unsigned hdr[5];
- int r;
-
- /* can't do anything if we're in the middle of a packet */
- if (ch->current_packet != 0)
- return;
-
- /* don't bother unless we can get the full header */
- if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
- return;
-
- r = ch_read(ch, hdr, SMD_HEADER_SIZE);
- BUG_ON(r != SMD_HEADER_SIZE);
-
- ch->current_packet = hdr[0];
-}
-
-/* provide a pointer and length to next free space in the fifo */
-static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
-{
- unsigned head = ch->send->head;
- unsigned tail = ch->send->tail;
- *ptr = (void *) (ch->send->data + head);
-
- if (head < tail) {
- return tail - head - 1;
- } else {
- if (tail == 0)
- return SMD_BUF_SIZE - head - 1;
- else
- return SMD_BUF_SIZE - head;
- }
-}
-
-/* advace the fifo write pointer after freespace
- * from ch_write_buffer is filled
- */
-static void ch_write_done(struct smd_channel *ch, unsigned count)
-{
- BUG_ON(count > smd_stream_write_avail(ch));
- ch->send->head = (ch->send->head + count) & (SMD_BUF_SIZE - 1);
- ch->send->fHEAD = 1;
-}
-
-static void hc_set_state(volatile struct smd_half_channel *hc, unsigned n)
-{
- if (n == SMD_SS_OPENED) {
- hc->fDSR = 1;
- hc->fCTS = 1;
- hc->fCD = 1;
- } else {
- hc->fDSR = 0;
- hc->fCTS = 0;
- hc->fCD = 0;
- }
- hc->state = n;
- hc->fSTATE = 1;
- notify_other_smd();
-}
-
-static void do_smd_probe(void)
-{
- struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
- if (shared->heap_info.free_offset != last_heap_free) {
- last_heap_free = shared->heap_info.free_offset;
- schedule_work(&probe_work);
- }
-}
-
-static void smd_state_change(struct smd_channel *ch,
- unsigned last, unsigned next)
-{
- ch->last_state = next;
-
- pr_info("SMD: ch %d %s -> %s\n", ch->n,
- chstate(last), chstate(next));
-
- switch (next) {
- case SMD_SS_OPENING:
- ch->recv->tail = 0;
- case SMD_SS_OPENED:
- if (ch->send->state != SMD_SS_OPENED)
- hc_set_state(ch->send, SMD_SS_OPENED);
- ch->notify(ch->priv, SMD_EVENT_OPEN);
- break;
- case SMD_SS_FLUSHING:
- case SMD_SS_RESET:
- /* we should force them to close? */
- default:
- ch->notify(ch->priv, SMD_EVENT_CLOSE);
- }
-}
-
-static irqreturn_t smd_irq_handler(int irq, void *data)
-{
- unsigned long flags;
- struct smd_channel *ch;
- int do_notify = 0;
- unsigned ch_flags;
- unsigned tmp;
-
- spin_lock_irqsave(&smd_lock, flags);
- list_for_each_entry(ch, &smd_ch_list, ch_list) {
- ch_flags = 0;
- if (ch_is_open(ch)) {
- if (ch->recv->fHEAD) {
- ch->recv->fHEAD = 0;
- ch_flags |= 1;
- do_notify |= 1;
- }
- if (ch->recv->fTAIL) {
- ch->recv->fTAIL = 0;
- ch_flags |= 2;
- do_notify |= 1;
- }
- if (ch->recv->fSTATE) {
- ch->recv->fSTATE = 0;
- ch_flags |= 4;
- do_notify |= 1;
- }
- }
- tmp = ch->recv->state;
- if (tmp != ch->last_state)
- smd_state_change(ch, ch->last_state, tmp);
- if (ch_flags) {
- ch->update_state(ch);
- ch->notify(ch->priv, SMD_EVENT_DATA);
- }
- }
- if (do_notify)
- notify_other_smd();
- spin_unlock_irqrestore(&smd_lock, flags);
- do_smd_probe();
- return IRQ_HANDLED;
-}
-
-static void smd_fake_irq_handler(unsigned long arg)
-{
- smd_irq_handler(0, NULL);
-}
-
-static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0);
-
-void smd_sleep_exit(void)
-{
- unsigned long flags;
- struct smd_channel *ch;
- unsigned tmp;
- int need_int = 0;
-
- spin_lock_irqsave(&smd_lock, flags);
- list_for_each_entry(ch, &smd_ch_list, ch_list) {
- if (ch_is_open(ch)) {
- if (ch->recv->fHEAD) {
- if (msm_smd_debug_mask & MSM_SMD_DEBUG)
- pr_info("smd_sleep_exit ch %d fHEAD "
- "%x %x %x\n",
- ch->n, ch->recv->fHEAD,
- ch->recv->head, ch->recv->tail);
- need_int = 1;
- break;
- }
- if (ch->recv->fTAIL) {
- if (msm_smd_debug_mask & MSM_SMD_DEBUG)
- pr_info("smd_sleep_exit ch %d fTAIL "
- "%x %x %x\n",
- ch->n, ch->recv->fTAIL,
- ch->send->head, ch->send->tail);
- need_int = 1;
- break;
- }
- if (ch->recv->fSTATE) {
- if (msm_smd_debug_mask & MSM_SMD_DEBUG)
- pr_info("smd_sleep_exit ch %d fSTATE %x"
- "\n", ch->n, ch->recv->fSTATE);
- need_int = 1;
- break;
- }
- tmp = ch->recv->state;
- if (tmp != ch->last_state) {
- if (msm_smd_debug_mask & MSM_SMD_DEBUG)
- pr_info("smd_sleep_exit ch %d "
- "state %x != %x\n",
- ch->n, tmp, ch->last_state);
- need_int = 1;
- break;
- }
- }
- }
- spin_unlock_irqrestore(&smd_lock, flags);
- do_smd_probe();
- if (need_int) {
- if (msm_smd_debug_mask & MSM_SMD_DEBUG)
- pr_info("smd_sleep_exit need interrupt\n");
- tasklet_schedule(&smd_fake_irq_tasklet);
- }
-}
-
-
-void smd_kick(smd_channel_t *ch)
-{
- unsigned long flags;
- unsigned tmp;
-
- spin_lock_irqsave(&smd_lock, flags);
- ch->update_state(ch);
- tmp = ch->recv->state;
- if (tmp != ch->last_state) {
- ch->last_state = tmp;
- if (tmp == SMD_SS_OPENED)
- ch->notify(ch->priv, SMD_EVENT_OPEN);
- else
- ch->notify(ch->priv, SMD_EVENT_CLOSE);
- }
- ch->notify(ch->priv, SMD_EVENT_DATA);
- notify_other_smd();
- spin_unlock_irqrestore(&smd_lock, flags);
-}
-
-static int smd_is_packet(int chn)
-{
- if ((chn > 4) || (chn == 1))
- return 1;
- else
- return 0;
-}
-
-static int smd_stream_write(smd_channel_t *ch, const void *_data, int len)
-{
- void *ptr;
- const unsigned char *buf = _data;
- unsigned xfer;
- int orig_len = len;
-
- if (len < 0)
- return -EINVAL;
-
- while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
- if (!ch_is_open(ch))
- break;
- if (xfer > len)
- xfer = len;
- memcpy(ptr, buf, xfer);
- ch_write_done(ch, xfer);
- len -= xfer;
- buf += xfer;
- if (len == 0)
- break;
- }
-
- notify_other_smd();
-
- return orig_len - len;
-}
-
-static int smd_packet_write(smd_channel_t *ch, const void *_data, int len)
-{
- unsigned hdr[5];
-
- if (len < 0)
- return -EINVAL;
-
- if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
- return -ENOMEM;
-
- hdr[0] = len;
- hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
-
- smd_stream_write(ch, hdr, sizeof(hdr));
- smd_stream_write(ch, _data, len);
-
- return len;
-}
-
-static int smd_stream_read(smd_channel_t *ch, void *data, int len)
-{
- int r;
-
- if (len < 0)
- return -EINVAL;
-
- r = ch_read(ch, data, len);
- if (r > 0)
- notify_other_smd();
-
- return r;
-}
-
-static int smd_packet_read(smd_channel_t *ch, void *data, int len)
-{
- unsigned long flags;
- int r;
-
- if (len < 0)
- return -EINVAL;
-
- if (len > ch->current_packet)
- len = ch->current_packet;
-
- r = ch_read(ch, data, len);
- if (r > 0)
- notify_other_smd();
-
- spin_lock_irqsave(&smd_lock, flags);
- ch->current_packet -= r;
- update_packet_state(ch);
- spin_unlock_irqrestore(&smd_lock, flags);
-
- return r;
-}
-
-static void smd_alloc_channel(const char *name, uint32_t cid, uint32_t type)
-{
- struct smd_channel *ch;
- struct smd_shared *shared;
-
- shared = smem_alloc(ID_SMD_CHANNELS + cid, sizeof(*shared));
- if (!shared) {
- pr_err("smd_alloc_channel() cid %d does not exist\n", cid);
- return;
- }
-
- ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
- if (ch == 0) {
- pr_err("smd_alloc_channel() out of memory\n");
- return;
- }
-
- ch->send = &shared->ch0;
- ch->recv = &shared->ch1;
- ch->n = cid;
-
- if (smd_is_packet(cid)) {
- ch->read = smd_packet_read;
- ch->write = smd_packet_write;
- ch->read_avail = smd_packet_read_avail;
- ch->write_avail = smd_packet_write_avail;
- ch->update_state = update_packet_state;
- } else {
- ch->read = smd_stream_read;
- ch->write = smd_stream_write;
- ch->read_avail = smd_stream_read_avail;
- ch->write_avail = smd_stream_write_avail;
- ch->update_state = update_stream_state;
- }
-
- memcpy(ch->name, "SMD_", 4);
- memcpy(ch->name + 4, name, 20);
- ch->name[23] = 0;
- ch->pdev.name = ch->name;
- ch->pdev.id = -1;
-
- pr_info("smd_alloc_channel() '%s' cid=%d, shared=%p\n",
- ch->name, ch->n, shared);
-
- mutex_lock(&smd_creation_mutex);
- list_add(&ch->ch_list, &smd_ch_closed_list);
- mutex_unlock(&smd_creation_mutex);
-
- platform_device_register(&ch->pdev);
-}
-
-static void do_nothing_notify(void *priv, unsigned flags)
-{
-}
-
-struct smd_channel *smd_get_channel(const char *name)
-{
- struct smd_channel *ch;
-
- mutex_lock(&smd_creation_mutex);
- list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
- if (!strcmp(name, ch->name)) {
- list_del(&ch->ch_list);
- mutex_unlock(&smd_creation_mutex);
- return ch;
- }
- }
- mutex_unlock(&smd_creation_mutex);
-
- return NULL;
-}
-
-int smd_open(const char *name, smd_channel_t **_ch,
- void *priv, void (*notify)(void *, unsigned))
-{
- struct smd_channel *ch;
- unsigned long flags;
-
- if (smd_initialized == 0) {
- pr_info("smd_open() before smd_init()\n");
- return -ENODEV;
- }
-
- ch = smd_get_channel(name);
- if (!ch)
- return -ENODEV;
-
- if (notify == 0)
- notify = do_nothing_notify;
-
- ch->notify = notify;
- ch->current_packet = 0;
- ch->last_state = SMD_SS_CLOSED;
- ch->priv = priv;
-
- *_ch = ch;
-
- spin_lock_irqsave(&smd_lock, flags);
- list_add(&ch->ch_list, &smd_ch_list);
-
- /* If the remote side is CLOSING, we need to get it to
- * move to OPENING (which we'll do by moving from CLOSED to
- * OPENING) and then get it to move from OPENING to
- * OPENED (by doing the same state change ourselves).
- *
- * Otherwise, it should be OPENING and we can move directly
- * to OPENED so that it will follow.
- */
- if (ch->recv->state == SMD_SS_CLOSING) {
- ch->send->head = 0;
- hc_set_state(ch->send, SMD_SS_OPENING);
- } else {
- hc_set_state(ch->send, SMD_SS_OPENED);
- }
- spin_unlock_irqrestore(&smd_lock, flags);
- smd_kick(ch);
-
- return 0;
-}
-
-int smd_close(smd_channel_t *ch)
-{
- unsigned long flags;
-
- pr_info("smd_close(%p)\n", ch);
-
- if (ch == 0)
- return -1;
-
- spin_lock_irqsave(&smd_lock, flags);
- ch->notify = do_nothing_notify;
- list_del(&ch->ch_list);
- hc_set_state(ch->send, SMD_SS_CLOSED);
- spin_unlock_irqrestore(&smd_lock, flags);
-
- mutex_lock(&smd_creation_mutex);
- list_add(&ch->ch_list, &smd_ch_closed_list);
- mutex_unlock(&smd_creation_mutex);
-
- return 0;
-}
-
-int smd_read(smd_channel_t *ch, void *data, int len)
-{
- return ch->read(ch, data, len);
-}
-
-int smd_write(smd_channel_t *ch, const void *data, int len)
-{
- return ch->write(ch, data, len);
-}
-
-int smd_read_avail(smd_channel_t *ch)
-{
- return ch->read_avail(ch);
-}
-
-int smd_write_avail(smd_channel_t *ch)
-{
- return ch->write_avail(ch);
-}
-
-int smd_wait_until_readable(smd_channel_t *ch, int bytes)
-{
- return -1;
-}
-
-int smd_wait_until_writable(smd_channel_t *ch, int bytes)
-{
- return -1;
-}
-
-int smd_cur_packet_size(smd_channel_t *ch)
-{
- return ch->current_packet;
-}
-
-
-/* ------------------------------------------------------------------------- */
-
-void *smem_alloc(unsigned id, unsigned size)
-{
- return smem_find(id, size);
-}
-
-static void *_smem_find(unsigned id, unsigned *size)
-{
- struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
- struct smem_heap_entry *toc = shared->heap_toc;
-
- if (id >= SMEM_NUM_ITEMS)
- return 0;
-
- if (toc[id].allocated) {
- *size = toc[id].size;
- return (void *) (MSM_SHARED_RAM_BASE + toc[id].offset);
- }
-
- return 0;
-}
-
-void *smem_find(unsigned id, unsigned size_in)
-{
- unsigned size;
- void *ptr;
-
- ptr = _smem_find(id, &size);
- if (!ptr)
- return 0;
-
- size_in = ALIGN(size_in, 8);
- if (size_in != size) {
- pr_err("smem_find(%d, %d): wrong size %d\n",
- id, size_in, size);
- return 0;
- }
-
- return ptr;
-}
-
-static irqreturn_t smsm_irq_handler(int irq, void *data)
-{
- unsigned long flags;
- struct smsm_shared *smsm;
-
- spin_lock_irqsave(&smem_lock, flags);
- smsm = smem_alloc(ID_SHARED_STATE,
- 2 * sizeof(struct smsm_shared));
-
- if (smsm == 0) {
- pr_info("<SM NO STATE>\n");
- } else {
- unsigned apps = smsm[0].state;
- unsigned modm = smsm[1].state;
-
- if (msm_smd_debug_mask & MSM_SMSM_DEBUG)
- pr_info("<SM %08x %08x>\n", apps, modm);
- if (modm & SMSM_RESET) {
- handle_modem_crash();
- } else {
- apps |= SMSM_INIT;
- if (modm & SMSM_SMDINIT)
- apps |= SMSM_SMDINIT;
- if (modm & SMSM_RPCINIT)
- apps |= SMSM_RPCINIT;
- }
-
- if (smsm[0].state != apps) {
- if (msm_smd_debug_mask & MSM_SMSM_DEBUG)
- pr_info("<SM %08x NOTIFY>\n", apps);
- smsm[0].state = apps;
- do_smd_probe();
- notify_other_smsm();
- }
- }
- spin_unlock_irqrestore(&smem_lock, flags);
- return IRQ_HANDLED;
-}
-
-int smsm_change_state(uint32_t clear_mask, uint32_t set_mask)
-{
- unsigned long flags;
- struct smsm_shared *smsm;
-
- spin_lock_irqsave(&smem_lock, flags);
-
- smsm = smem_alloc(ID_SHARED_STATE,
- 2 * sizeof(struct smsm_shared));
-
- if (smsm) {
- if (smsm[1].state & SMSM_RESET)
- handle_modem_crash();
- smsm[0].state = (smsm[0].state & ~clear_mask) | set_mask;
- if (msm_smd_debug_mask & MSM_SMSM_DEBUG)
- pr_info("smsm_change_state %x\n",
- smsm[0].state);
- notify_other_smsm();
- }
-
- spin_unlock_irqrestore(&smem_lock, flags);
-
- if (smsm == NULL) {
- pr_err("smsm_change_state <SM NO STATE>\n");
- return -EIO;
- }
- return 0;
-}
-
-uint32_t smsm_get_state(void)
-{
- unsigned long flags;
- struct smsm_shared *smsm;
- uint32_t rv;
-
- spin_lock_irqsave(&smem_lock, flags);
-
- smsm = smem_alloc(ID_SHARED_STATE,
- 2 * sizeof(struct smsm_shared));
-
- if (smsm)
- rv = smsm[1].state;
- else
- rv = 0;
-
- if (rv & SMSM_RESET)
- handle_modem_crash();
-
- spin_unlock_irqrestore(&smem_lock, flags);
-
- if (smsm == NULL)
- pr_err("smsm_get_state <SM NO STATE>\n");
- return rv;
-}
-
-int smsm_set_sleep_duration(uint32_t delay)
-{
- uint32_t *ptr;
-
- ptr = smem_alloc(SMEM_SMSM_SLEEP_DELAY, sizeof(*ptr));
- if (ptr == NULL) {
- pr_err("smsm_set_sleep_duration <SM NO SLEEP_DELAY>\n");
- return -EIO;
- }
- if (msm_smd_debug_mask & MSM_SMSM_DEBUG)
- pr_info("smsm_set_sleep_duration %d -> %d\n",
- *ptr, delay);
- *ptr = delay;
- return 0;
-}
-
-int smsm_set_interrupt_info(struct smsm_interrupt_info *info)
-{
- struct smsm_interrupt_info *ptr;
-
- ptr = smem_alloc(SMEM_SMSM_INT_INFO, sizeof(*ptr));
- if (ptr == NULL) {
- pr_err("smsm_set_sleep_duration <SM NO INT_INFO>\n");
- return -EIO;
- }
- if (msm_smd_debug_mask & MSM_SMSM_DEBUG)
- pr_info("smsm_set_interrupt_info %x %x -> %x %x\n",
- ptr->aArm_en_mask, ptr->aArm_interrupts_pending,
- info->aArm_en_mask, info->aArm_interrupts_pending);
- *ptr = *info;
- return 0;
-}
-
-#define MAX_NUM_SLEEP_CLIENTS 64
-#define MAX_SLEEP_NAME_LEN 8
-
-#define NUM_GPIO_INT_REGISTERS 6
-#define GPIO_SMEM_NUM_GROUPS 2
-#define GPIO_SMEM_MAX_PC_INTERRUPTS 8
-
-struct tramp_gpio_save {
- unsigned int enable;
- unsigned int detect;
- unsigned int polarity;
-};
-
-struct tramp_gpio_smem {
- uint16_t num_fired[GPIO_SMEM_NUM_GROUPS];
- uint16_t fired[GPIO_SMEM_NUM_GROUPS][GPIO_SMEM_MAX_PC_INTERRUPTS];
- uint32_t enabled[NUM_GPIO_INT_REGISTERS];
- uint32_t detection[NUM_GPIO_INT_REGISTERS];
- uint32_t polarity[NUM_GPIO_INT_REGISTERS];
-};
-
-
-void smsm_print_sleep_info(void)
-{
- unsigned long flags;
- uint32_t *ptr;
- struct tramp_gpio_smem *gpio;
- struct smsm_interrupt_info *int_info;
-
-
- spin_lock_irqsave(&smem_lock, flags);
-
- ptr = smem_alloc(SMEM_SMSM_SLEEP_DELAY, sizeof(*ptr));
- if (ptr)
- pr_info("SMEM_SMSM_SLEEP_DELAY: %x\n", *ptr);
-
- ptr = smem_alloc(SMEM_SMSM_LIMIT_SLEEP, sizeof(*ptr));
- if (ptr)
- pr_info("SMEM_SMSM_LIMIT_SLEEP: %x\n", *ptr);
-
- ptr = smem_alloc(SMEM_SLEEP_POWER_COLLAPSE_DISABLED, sizeof(*ptr));
- if (ptr)
- pr_info("SMEM_SLEEP_POWER_COLLAPSE_DISABLED: %x\n", *ptr);
-
- int_info = smem_alloc(SMEM_SMSM_INT_INFO, sizeof(*int_info));
- if (int_info)
- pr_info("SMEM_SMSM_INT_INFO %x %x %x\n",
- int_info->aArm_en_mask,
- int_info->aArm_interrupts_pending,
- int_info->aArm_wakeup_reason);
-
- gpio = smem_alloc(SMEM_GPIO_INT, sizeof(*gpio));
- if (gpio) {
- int i;
- for (i = 0; i < NUM_GPIO_INT_REGISTERS; i++)
- pr_info("SMEM_GPIO_INT: %d: e %x d %x p %x\n",
- i, gpio->enabled[i], gpio->detection[i],
- gpio->polarity[i]);
-
- for (i = 0; i < GPIO_SMEM_NUM_GROUPS; i++)
- pr_info("SMEM_GPIO_INT: %d: f %d: %d %d...\n",
- i, gpio->num_fired[i], gpio->fired[i][0],
- gpio->fired[i][1]);
- }
-
- spin_unlock_irqrestore(&smem_lock, flags);
-}
-
-int smd_core_init(void)
-{
- int r;
- pr_info("smd_core_init()\n");
-
- r = request_irq(INT_A9_M2A_0, smd_irq_handler,
- IRQF_TRIGGER_RISING, "smd_dev", 0);
- if (r < 0)
- return r;
- r = enable_irq_wake(INT_A9_M2A_0);
- if (r < 0)
- pr_err("smd_core_init: enable_irq_wake failed for A9_M2A_0\n");
-
- r = request_irq(INT_A9_M2A_5, smsm_irq_handler,
- IRQF_TRIGGER_RISING, "smsm_dev", 0);
- if (r < 0) {
- free_irq(INT_A9_M2A_0, 0);
- return r;
- }
- r = enable_irq_wake(INT_A9_M2A_5);
- if (r < 0)
- pr_err("smd_core_init: enable_irq_wake failed for A9_M2A_5\n");
-
- /* we may have missed a signal while booting -- fake
- * an interrupt to make sure we process any existing
- * state
- */
- smsm_irq_handler(0, 0);
-
- pr_info("smd_core_init() done\n");
-
- return 0;
-}
-
-#if defined(CONFIG_DEBUG_FS)
-
-static int dump_ch(char *buf, int max, int n,
- struct smd_half_channel *s,
- struct smd_half_channel *r)
-{
- return scnprintf(
- buf, max,
- "ch%02d:"
- " %8s(%04d/%04d) %c%c%c%c%c%c%c <->"
- " %8s(%04d/%04d) %c%c%c%c%c%c%c\n", n,
- chstate(s->state), s->tail, s->head,
- s->fDSR ? 'D' : 'd',
- s->fCTS ? 'C' : 'c',
- s->fCD ? 'C' : 'c',
- s->fRI ? 'I' : 'i',
- s->fHEAD ? 'W' : 'w',
- s->fTAIL ? 'R' : 'r',
- s->fSTATE ? 'S' : 's',
- chstate(r->state), r->tail, r->head,
- r->fDSR ? 'D' : 'd',
- r->fCTS ? 'R' : 'r',
- r->fCD ? 'C' : 'c',
- r->fRI ? 'I' : 'i',
- r->fHEAD ? 'W' : 'w',
- r->fTAIL ? 'R' : 'r',
- r->fSTATE ? 'S' : 's'
- );
-}
-
-static int debug_read_stat(char *buf, int max)
-{
- struct smsm_shared *smsm;
- char *msg;
- int i = 0;
-
- smsm = smem_find(ID_SHARED_STATE,
- 2 * sizeof(struct smsm_shared));
-
- msg = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
-
- if (smsm) {
- if (smsm[1].state & SMSM_RESET)
- i += scnprintf(buf + i, max - i,
- "smsm: ARM9 HAS CRASHED\n");
- i += scnprintf(buf + i, max - i, "smsm: a9: %08x a11: %08x\n",
- smsm[0].state, smsm[1].state);
- } else {
- i += scnprintf(buf + i, max - i, "smsm: cannot find\n");
- }
- if (msg) {
- msg[SZ_DIAG_ERR_MSG - 1] = 0;
- i += scnprintf(buf + i, max - i, "diag: '%s'\n", msg);
- }
- return i;
-}
-
-static int debug_read_mem(char *buf, int max)
-{
- unsigned n;
- struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
- struct smem_heap_entry *toc = shared->heap_toc;
- int i = 0;
-
- i += scnprintf(buf + i, max - i,
- "heap: init=%d free=%d remain=%d\n",
- shared->heap_info.initialized,
- shared->heap_info.free_offset,
- shared->heap_info.heap_remaining);
-
- for (n = 0; n < SMEM_NUM_ITEMS; n++) {
- if (toc[n].allocated == 0)
- continue;
- i += scnprintf(buf + i, max - i,
- "%04d: offsed %08x size %08x\n",
- n, toc[n].offset, toc[n].size);
- }
- return i;
-}
-
-static int debug_read_ch(char *buf, int max)
-{
- struct smd_shared *shared;
- int n, i = 0;
-
- for (n = 0; n < SMD_CHANNELS; n++) {
- shared = smem_find(ID_SMD_CHANNELS + n,
- sizeof(struct smd_shared));
- if (shared == 0)
- continue;
- i += dump_ch(buf + i, max - i, n, &shared->ch0, &shared->ch1);
- }
-
- return i;
-}
-
-static int debug_read_version(char *buf, int max)
-{
- struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
- unsigned version = shared->version[VERSION_MODEM];
- return sprintf(buf, "%d.%d\n", version >> 16, version & 0xffff);
-}
-
-static int debug_read_build_id(char *buf, int max)
-{
- unsigned size;
- void *data;
-
- data = _smem_find(SMEM_HW_SW_BUILD_ID, &size);
- if (!data)
- return 0;
-
- if (size >= max)
- size = max;
- memcpy(buf, data, size);
-
- return size;
-}
-
-static int debug_read_alloc_tbl(char *buf, int max)
-{
- struct smd_alloc_elm *shared;
- int n, i = 0;
-
- shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
-
- for (n = 0; n < 64; n++) {
- if (shared[n].ref_count == 0)
- continue;
- i += scnprintf(buf + i, max - i,
- "%03d: %20s cid=%02d ctype=%d ref_count=%d\n",
- n, shared[n].name, shared[n].cid,
- shared[n].ctype, shared[n].ref_count);
- }
-
- return i;
-}
-
-static int debug_boom(char *buf, int max)
-{
- unsigned ms = 5000;
- msm_proc_comm(PCOM_RESET_MODEM, &ms, 0);
- return 0;
-}
-
-#define DEBUG_BUFMAX 4096
-static char debug_buffer[DEBUG_BUFMAX];
-
-static ssize_t debug_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- int (*fill)(char *buf, int max) = file->private_data;
- int bsize = fill(debug_buffer, DEBUG_BUFMAX);
- return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
-}
-
-static int debug_open(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-
-static const struct file_operations debug_ops = {
- .read = debug_read,
- .open = debug_open,
-};
-
-static void debug_create(const char *name, mode_t mode,
- struct dentry *dent,
- int (*fill)(char *buf, int max))
-{
- debugfs_create_file(name, mode, dent, fill, &debug_ops);
-}
-
-static void smd_debugfs_init(void)
-{
- struct dentry *dent;
-
- dent = debugfs_create_dir("smd", 0);
- if (IS_ERR(dent))
- return;
-
- debug_create("ch", 0444, dent, debug_read_ch);
- debug_create("stat", 0444, dent, debug_read_stat);
- debug_create("mem", 0444, dent, debug_read_mem);
- debug_create("version", 0444, dent, debug_read_version);
- debug_create("tbl", 0444, dent, debug_read_alloc_tbl);
- debug_create("build", 0444, dent, debug_read_build_id);
- debug_create("boom", 0444, dent, debug_boom);
-}
-#else
-static void smd_debugfs_init(void) {}
-#endif
-
-static int __init msm_smd_probe(struct platform_device *pdev)
-{
- pr_info("smd_init()\n");
-
- INIT_WORK(&probe_work, smd_channel_probe_worker);
-
- if (smd_core_init()) {
- pr_err("smd_core_init() failed\n");
- return -1;
- }
-
- do_smd_probe();
-
- msm_check_for_modem_crash = check_for_modem_crash;
-
- smd_debugfs_init();
- smd_initialized = 1;
-
- return 0;
-}
-
-static struct platform_driver msm_smd_driver = {
- .probe = msm_smd_probe,
- .driver = {
- .name = MODULE_NAME,
- .owner = THIS_MODULE,
- },
-};
-
-static int __init msm_smd_init(void)
-{
- return platform_driver_register(&msm_smd_driver);
-}
-
-module_init(msm_smd_init);
-
-MODULE_DESCRIPTION("MSM Shared Memory Core");
-MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/dream/smd/smd_private.h b/drivers/staging/dream/smd/smd_private.h
deleted file mode 100644
index c0eb3de1be5..00000000000
--- a/drivers/staging/dream/smd/smd_private.h
+++ /dev/null
@@ -1,171 +0,0 @@
-/* arch/arm/mach-msm/smd_private.h
- *
- * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2007 QUALCOMM Incorporated
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#ifndef _ARCH_ARM_MACH_MSM_MSM_SMD_PRIVATE_H_
-#define _ARCH_ARM_MACH_MSM_MSM_SMD_PRIVATE_H_
-
-struct smem_heap_info
-{
- unsigned initialized;
- unsigned free_offset;
- unsigned heap_remaining;
- unsigned reserved;
-};
-
-struct smem_heap_entry
-{
- unsigned allocated;
- unsigned offset;
- unsigned size;
- unsigned reserved;
-};
-
-struct smem_proc_comm
-{
- unsigned command;
- unsigned status;
- unsigned data1;
- unsigned data2;
-};
-
-#define PC_APPS 0
-#define PC_MODEM 1
-
-#define VERSION_QDSP6 4
-#define VERSION_APPS_SBL 6
-#define VERSION_MODEM_SBL 7
-#define VERSION_APPS 8
-#define VERSION_MODEM 9
-
-struct smem_shared
-{
- struct smem_proc_comm proc_comm[4];
- unsigned version[32];
- struct smem_heap_info heap_info;
- struct smem_heap_entry heap_toc[128];
-};
-
-struct smsm_shared
-{
- unsigned host;
- unsigned state;
-};
-
-struct smsm_interrupt_info
-{
- uint32_t aArm_en_mask;
- uint32_t aArm_interrupts_pending;
- uint32_t aArm_wakeup_reason;
-};
-
-#define SZ_DIAG_ERR_MSG 0xC8
-#define ID_DIAG_ERR_MSG SMEM_DIAG_ERR_MESSAGE
-#define ID_SMD_CHANNELS SMEM_SMD_BASE_ID
-#define ID_SHARED_STATE SMEM_SMSM_SHARED_STATE
-#define ID_CH_ALLOC_TBL SMEM_CHANNEL_ALLOC_TBL
-
-#define SMSM_INIT 0x000001
-#define SMSM_SMDINIT 0x000008
-#define SMSM_RPCINIT 0x000020
-#define SMSM_RESET 0x000040
-#define SMSM_RSA 0x0080
-#define SMSM_RUN 0x000100
-#define SMSM_PWRC 0x0200
-#define SMSM_TIMEWAIT 0x0400
-#define SMSM_TIMEINIT 0x0800
-#define SMSM_PWRC_EARLY_EXIT 0x1000
-#define SMSM_WFPI 0x2000
-#define SMSM_SLEEP 0x4000
-#define SMSM_SLEEPEXIT 0x8000
-#define SMSM_OEMSBL_RELEASE 0x10000
-#define SMSM_PWRC_SUSPEND 0x200000
-
-#define SMSM_WKUP_REASON_RPC 0x00000001
-#define SMSM_WKUP_REASON_INT 0x00000002
-#define SMSM_WKUP_REASON_GPIO 0x00000004
-#define SMSM_WKUP_REASON_TIMER 0x00000008
-#define SMSM_WKUP_REASON_ALARM 0x00000010
-#define SMSM_WKUP_REASON_RESET 0x00000020
-
-void *smem_alloc(unsigned id, unsigned size);
-int smsm_change_state(uint32_t clear_mask, uint32_t set_mask);
-uint32_t smsm_get_state(void);
-int smsm_set_sleep_duration(uint32_t delay);
-int smsm_set_interrupt_info(struct smsm_interrupt_info *info);
-void smsm_print_sleep_info(void);
-
-#define SMEM_NUM_SMD_CHANNELS 64
-
-typedef enum
-{
- /* fixed items */
- SMEM_PROC_COMM = 0,
- SMEM_HEAP_INFO,
- SMEM_ALLOCATION_TABLE,
- SMEM_VERSION_INFO,
- SMEM_HW_RESET_DETECT,
- SMEM_AARM_WARM_BOOT,
- SMEM_DIAG_ERR_MESSAGE,
- SMEM_SPINLOCK_ARRAY,
- SMEM_MEMORY_BARRIER_LOCATION,
-
- /* dynamic items */
- SMEM_AARM_PARTITION_TABLE,
- SMEM_AARM_BAD_BLOCK_TABLE,
- SMEM_RESERVE_BAD_BLOCKS,
- SMEM_WM_UUID,
- SMEM_CHANNEL_ALLOC_TBL,
- SMEM_SMD_BASE_ID,
- SMEM_SMEM_LOG_IDX = SMEM_SMD_BASE_ID + SMEM_NUM_SMD_CHANNELS,
- SMEM_SMEM_LOG_EVENTS,
- SMEM_SMEM_STATIC_LOG_IDX,
- SMEM_SMEM_STATIC_LOG_EVENTS,
- SMEM_SMEM_SLOW_CLOCK_SYNC,
- SMEM_SMEM_SLOW_CLOCK_VALUE,
- SMEM_BIO_LED_BUF,
- SMEM_SMSM_SHARED_STATE,
- SMEM_SMSM_INT_INFO,
- SMEM_SMSM_SLEEP_DELAY,
- SMEM_SMSM_LIMIT_SLEEP,
- SMEM_SLEEP_POWER_COLLAPSE_DISABLED,
- SMEM_KEYPAD_KEYS_PRESSED,
- SMEM_KEYPAD_STATE_UPDATED,
- SMEM_KEYPAD_STATE_IDX,
- SMEM_GPIO_INT,
- SMEM_MDDI_LCD_IDX,
- SMEM_MDDI_HOST_DRIVER_STATE,
- SMEM_MDDI_LCD_DISP_STATE,
- SMEM_LCD_CUR_PANEL,
- SMEM_MARM_BOOT_SEGMENT_INFO,
- SMEM_AARM_BOOT_SEGMENT_INFO,
- SMEM_SLEEP_STATIC,
- SMEM_SCORPION_FREQUENCY,
- SMEM_SMD_PROFILES,
- SMEM_TSSC_BUSY,
- SMEM_HS_SUSPEND_FILTER_INFO,
- SMEM_BATT_INFO,
- SMEM_APPS_BOOT_MODE,
- SMEM_VERSION_FIRST,
- SMEM_VERSION_LAST = SMEM_VERSION_FIRST + 24,
- SMEM_OSS_RRCASN1_BUF1,
- SMEM_OSS_RRCASN1_BUF2,
- SMEM_ID_VENDOR0,
- SMEM_ID_VENDOR1,
- SMEM_ID_VENDOR2,
- SMEM_HW_SW_BUILD_ID,
- SMEM_NUM_ITEMS,
-} smem_mem_type;
-
-#endif
diff --git a/drivers/staging/dream/smd/smd_qmi.c b/drivers/staging/dream/smd/smd_qmi.c
deleted file mode 100644
index 687db142904..00000000000
--- a/drivers/staging/dream/smd/smd_qmi.c
+++ /dev/null
@@ -1,855 +0,0 @@
-/* arch/arm/mach-msm/smd_qmi.c
- *
- * QMI Control Driver -- Manages network data connections.
- *
- * Copyright (C) 2007 Google, Inc.
- * Author: Brian Swetland <swetland@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
-#include <linux/device.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/miscdevice.h>
-#include <linux/workqueue.h>
-
-#include <asm/uaccess.h>
-#include <mach/msm_smd.h>
-
-#define QMI_CTL 0x00
-#define QMI_WDS 0x01
-#define QMI_DMS 0x02
-#define QMI_NAS 0x03
-
-#define QMI_RESULT_SUCCESS 0x0000
-#define QMI_RESULT_FAILURE 0x0001
-
-struct qmi_msg {
- unsigned char service;
- unsigned char client_id;
- unsigned short txn_id;
- unsigned short type;
- unsigned short size;
- unsigned char *tlv;
-};
-
-#define qmi_ctl_client_id 0
-
-#define STATE_OFFLINE 0
-#define STATE_QUERYING 1
-#define STATE_ONLINE 2
-
-struct qmi_ctxt {
- struct miscdevice misc;
-
- struct mutex lock;
-
- unsigned char ctl_txn_id;
- unsigned char wds_client_id;
- unsigned short wds_txn_id;
-
- unsigned wds_busy;
- unsigned wds_handle;
- unsigned state_dirty;
- unsigned state;
-
- unsigned char addr[4];
- unsigned char mask[4];
- unsigned char gateway[4];
- unsigned char dns1[4];
- unsigned char dns2[4];
-
- smd_channel_t *ch;
- const char *ch_name;
-
- struct work_struct open_work;
- struct work_struct read_work;
-};
-
-static struct qmi_ctxt *qmi_minor_to_ctxt(unsigned n);
-
-static void qmi_read_work(struct work_struct *ws);
-static void qmi_open_work(struct work_struct *work);
-
-void qmi_ctxt_init(struct qmi_ctxt *ctxt, unsigned n)
-{
- mutex_init(&ctxt->lock);
- INIT_WORK(&ctxt->read_work, qmi_read_work);
- INIT_WORK(&ctxt->open_work, qmi_open_work);
- ctxt->ctl_txn_id = 1;
- ctxt->wds_txn_id = 1;
- ctxt->wds_busy = 1;
- ctxt->state = STATE_OFFLINE;
-
-}
-
-static struct workqueue_struct *qmi_wq;
-
-static int verbose = 0;
-
-/* anyone waiting for a state change waits here */
-static DECLARE_WAIT_QUEUE_HEAD(qmi_wait_queue);
-
-
-static void qmi_dump_msg(struct qmi_msg *msg, const char *prefix)
-{
- unsigned sz, n;
- unsigned char *x;
-
- if (!verbose)
- return;
-
- printk(KERN_INFO
- "qmi: %s: svc=%02x cid=%02x tid=%04x type=%04x size=%04x\n",
- prefix, msg->service, msg->client_id,
- msg->txn_id, msg->type, msg->size);
-
- x = msg->tlv;
- sz = msg->size;
-
- while (sz >= 3) {
- sz -= 3;
-
- n = x[1] | (x[2] << 8);
- if (n > sz)
- break;
-
- printk(KERN_INFO "qmi: %s: tlv: %02x %04x { ",
- prefix, x[0], n);
- x += 3;
- sz -= n;
- while (n-- > 0)
- printk("%02x ", *x++);
- printk("}\n");
- }
-}
-
-int qmi_add_tlv(struct qmi_msg *msg,
- unsigned type, unsigned size, const void *data)
-{
- unsigned char *x = msg->tlv + msg->size;
-
- x[0] = type;
- x[1] = size;
- x[2] = size >> 8;
-
- memcpy(x + 3, data, size);
-
- msg->size += (size + 3);
-
- return 0;
-}
-
-/* Extract a tagged item from a qmi message buffer,
-** taking care not to overrun the buffer.
-*/
-static int qmi_get_tlv(struct qmi_msg *msg,
- unsigned type, unsigned size, void *data)
-{
- unsigned char *x = msg->tlv;
- unsigned len = msg->size;
- unsigned n;
-
- while (len >= 3) {
- len -= 3;
-
- /* size of this item */
- n = x[1] | (x[2] << 8);
- if (n > len)
- break;
-
- if (x[0] == type) {
- if (n != size)
- return -1;
- memcpy(data, x + 3, size);
- return 0;
- }
-
- x += (n + 3);
- len -= n;
- }
-
- return -1;
-}
-
-static unsigned qmi_get_status(struct qmi_msg *msg, unsigned *error)
-{
- unsigned short status[2];
- if (qmi_get_tlv(msg, 0x02, sizeof(status), status)) {
- *error = 0;
- return QMI_RESULT_FAILURE;
- } else {
- *error = status[1];
- return status[0];
- }
-}
-
-/* 0x01 <qmux-header> <payload> */
-#define QMUX_HEADER 13
-
-/* should be >= HEADER + FOOTER */
-#define QMUX_OVERHEAD 16
-
-static int qmi_send(struct qmi_ctxt *ctxt, struct qmi_msg *msg)
-{
- unsigned char *data;
- unsigned hlen;
- unsigned len;
- int r;
-
- qmi_dump_msg(msg, "send");
-
- if (msg->service == QMI_CTL) {
- hlen = QMUX_HEADER - 1;
- } else {
- hlen = QMUX_HEADER;
- }
-
- /* QMUX length is total header + total payload - IFC selector */
- len = hlen + msg->size - 1;
- if (len > 0xffff)
- return -1;
-
- data = msg->tlv - hlen;
-
- /* prepend encap and qmux header */
- *data++ = 0x01; /* ifc selector */
-
- /* qmux header */
- *data++ = len;
- *data++ = len >> 8;
- *data++ = 0x00; /* flags: client */
- *data++ = msg->service;
- *data++ = msg->client_id;
-
- /* qmi header */
- *data++ = 0x00; /* flags: send */
- *data++ = msg->txn_id;
- if (msg->service != QMI_CTL)
- *data++ = msg->txn_id >> 8;
-
- *data++ = msg->type;
- *data++ = msg->type >> 8;
- *data++ = msg->size;
- *data++ = msg->size >> 8;
-
- /* len + 1 takes the interface selector into account */
- r = smd_write(ctxt->ch, msg->tlv - hlen, len + 1);
-
- if (r != len) {
- return -1;
- } else {
- return 0;
- }
-}
-
-static void qmi_process_ctl_msg(struct qmi_ctxt *ctxt, struct qmi_msg *msg)
-{
- unsigned err;
- if (msg->type == 0x0022) {
- unsigned char n[2];
- if (qmi_get_status(msg, &err))
- return;
- if (qmi_get_tlv(msg, 0x01, sizeof(n), n))
- return;
- if (n[0] == QMI_WDS) {
- printk(KERN_INFO
- "qmi: ctl: wds use client_id 0x%02x\n", n[1]);
- ctxt->wds_client_id = n[1];
- ctxt->wds_busy = 0;
- }
- }
-}
-
-static int qmi_network_get_profile(struct qmi_ctxt *ctxt);
-
-static void swapaddr(unsigned char *src, unsigned char *dst)
-{
- dst[0] = src[3];
- dst[1] = src[2];
- dst[2] = src[1];
- dst[3] = src[0];
-}
-
-static unsigned char zero[4];
-static void qmi_read_runtime_profile(struct qmi_ctxt *ctxt, struct qmi_msg *msg)
-{
- unsigned char tmp[4];
- unsigned r;
-
- r = qmi_get_tlv(msg, 0x1e, 4, tmp);
- swapaddr(r ? zero : tmp, ctxt->addr);
- r = qmi_get_tlv(msg, 0x21, 4, tmp);
- swapaddr(r ? zero : tmp, ctxt->mask);
- r = qmi_get_tlv(msg, 0x20, 4, tmp);
- swapaddr(r ? zero : tmp, ctxt->gateway);
- r = qmi_get_tlv(msg, 0x15, 4, tmp);
- swapaddr(r ? zero : tmp, ctxt->dns1);
- r = qmi_get_tlv(msg, 0x16, 4, tmp);
- swapaddr(r ? zero : tmp, ctxt->dns2);
-}
-
-static void qmi_process_unicast_wds_msg(struct qmi_ctxt *ctxt,
- struct qmi_msg *msg)
-{
- unsigned err;
- switch (msg->type) {
- case 0x0021:
- if (qmi_get_status(msg, &err)) {
- printk(KERN_ERR
- "qmi: wds: network stop failed (%04x)\n", err);
- } else {
- printk(KERN_INFO
- "qmi: wds: network stopped\n");
- ctxt->state = STATE_OFFLINE;
- ctxt->state_dirty = 1;
- }
- break;
- case 0x0020:
- if (qmi_get_status(msg, &err)) {
- printk(KERN_ERR
- "qmi: wds: network start failed (%04x)\n", err);
- } else if (qmi_get_tlv(msg, 0x01, sizeof(ctxt->wds_handle), &ctxt->wds_handle)) {
- printk(KERN_INFO
- "qmi: wds no handle?\n");
- } else {
- printk(KERN_INFO
- "qmi: wds: got handle 0x%08x\n",
- ctxt->wds_handle);
- }
- break;
- case 0x002D:
- printk("qmi: got network profile\n");
- if (ctxt->state == STATE_QUERYING) {
- qmi_read_runtime_profile(ctxt, msg);
- ctxt->state = STATE_ONLINE;
- ctxt->state_dirty = 1;
- }
- break;
- default:
- printk(KERN_ERR "qmi: unknown msg type 0x%04x\n", msg->type);
- }
- ctxt->wds_busy = 0;
-}
-
-static void qmi_process_broadcast_wds_msg(struct qmi_ctxt *ctxt,
- struct qmi_msg *msg)
-{
- if (msg->type == 0x0022) {
- unsigned char n[2];
- if (qmi_get_tlv(msg, 0x01, sizeof(n), n))
- return;
- switch (n[0]) {
- case 1:
- printk(KERN_INFO "qmi: wds: DISCONNECTED\n");
- ctxt->state = STATE_OFFLINE;
- ctxt->state_dirty = 1;
- break;
- case 2:
- printk(KERN_INFO "qmi: wds: CONNECTED\n");
- ctxt->state = STATE_QUERYING;
- ctxt->state_dirty = 1;
- qmi_network_get_profile(ctxt);
- break;
- case 3:
- printk(KERN_INFO "qmi: wds: SUSPENDED\n");
- ctxt->state = STATE_OFFLINE;
- ctxt->state_dirty = 1;
- }
- } else {
- printk(KERN_ERR "qmi: unknown bcast msg type 0x%04x\n", msg->type);
- }
-}
-
-static void qmi_process_wds_msg(struct qmi_ctxt *ctxt,
- struct qmi_msg *msg)
-{
- printk("wds: %04x @ %02x\n", msg->type, msg->client_id);
- if (msg->client_id == ctxt->wds_client_id) {
- qmi_process_unicast_wds_msg(ctxt, msg);
- } else if (msg->client_id == 0xff) {
- qmi_process_broadcast_wds_msg(ctxt, msg);
- } else {
- printk(KERN_ERR
- "qmi_process_wds_msg client id 0x%02x unknown\n",
- msg->client_id);
- }
-}
-
-static void qmi_process_qmux(struct qmi_ctxt *ctxt,
- unsigned char *buf, unsigned sz)
-{
- struct qmi_msg msg;
-
- /* require a full header */
- if (sz < 5)
- return;
-
- /* require a size that matches the buffer size */
- if (sz != (buf[0] | (buf[1] << 8)))
- return;
-
- /* only messages from a service (bit7=1) are allowed */
- if (buf[2] != 0x80)
- return;
-
- msg.service = buf[3];
- msg.client_id = buf[4];
-
- /* annoyingly, CTL messages have a shorter TID */
- if (buf[3] == 0) {
- if (sz < 7)
- return;
- msg.txn_id = buf[6];
- buf += 7;
- sz -= 7;
- } else {
- if (sz < 8)
- return;
- msg.txn_id = buf[6] | (buf[7] << 8);
- buf += 8;
- sz -= 8;
- }
-
- /* no type and size!? */
- if (sz < 4)
- return;
- sz -= 4;
-
- msg.type = buf[0] | (buf[1] << 8);
- msg.size = buf[2] | (buf[3] << 8);
- msg.tlv = buf + 4;
-
- if (sz != msg.size)
- return;
-
- qmi_dump_msg(&msg, "recv");
-
- mutex_lock(&ctxt->lock);
- switch (msg.service) {
- case QMI_CTL:
- qmi_process_ctl_msg(ctxt, &msg);
- break;
- case QMI_WDS:
- qmi_process_wds_msg(ctxt, &msg);
- break;
- default:
- printk(KERN_ERR "qmi: msg from unknown svc 0x%02x\n",
- msg.service);
- break;
- }
- mutex_unlock(&ctxt->lock);
- wake_up(&qmi_wait_queue);
-}
-
-#define QMI_MAX_PACKET (256 + QMUX_OVERHEAD)
-
-static void qmi_read_work(struct work_struct *ws)
-{
- struct qmi_ctxt *ctxt = container_of(ws, struct qmi_ctxt, read_work);
- struct smd_channel *ch = ctxt->ch;
- unsigned char buf[QMI_MAX_PACKET];
- int sz;
-
- for (;;) {
- sz = smd_cur_packet_size(ch);
- if (sz == 0)
- break;
- if (sz < smd_read_avail(ch))
- break;
- if (sz > QMI_MAX_PACKET) {
- smd_read(ch, 0, sz);
- continue;
- }
- if (smd_read(ch, buf, sz) != sz) {
- printk(KERN_ERR "qmi: not enough data?!\n");
- continue;
- }
-
- /* interface selector must be 1 */
- if (buf[0] != 0x01)
- continue;
-
- qmi_process_qmux(ctxt, buf + 1, sz - 1);
- }
-}
-
-static int qmi_request_wds_cid(struct qmi_ctxt *ctxt);
-
-static void qmi_open_work(struct work_struct *ws)
-{
- struct qmi_ctxt *ctxt = container_of(ws, struct qmi_ctxt, open_work);
- mutex_lock(&ctxt->lock);
- qmi_request_wds_cid(ctxt);
- mutex_unlock(&ctxt->lock);
-}
-
-static void qmi_notify(void *priv, unsigned event)
-{
- struct qmi_ctxt *ctxt = priv;
-
- switch (event) {
- case SMD_EVENT_DATA: {
- int sz;
- sz = smd_cur_packet_size(ctxt->ch);
- if ((sz > 0) && (sz <= smd_read_avail(ctxt->ch))) {
- queue_work(qmi_wq, &ctxt->read_work);
- }
- break;
- }
- case SMD_EVENT_OPEN:
- printk(KERN_INFO "qmi: smd opened\n");
- queue_work(qmi_wq, &ctxt->open_work);
- break;
- case SMD_EVENT_CLOSE:
- printk(KERN_INFO "qmi: smd closed\n");
- break;
- }
-}
-
-static int qmi_request_wds_cid(struct qmi_ctxt *ctxt)
-{
- unsigned char data[64 + QMUX_OVERHEAD];
- struct qmi_msg msg;
- unsigned char n;
-
- msg.service = QMI_CTL;
- msg.client_id = qmi_ctl_client_id;
- msg.txn_id = ctxt->ctl_txn_id;
- msg.type = 0x0022;
- msg.size = 0;
- msg.tlv = data + QMUX_HEADER;
-
- ctxt->ctl_txn_id += 2;
-
- n = QMI_WDS;
- qmi_add_tlv(&msg, 0x01, 0x01, &n);
-
- return qmi_send(ctxt, &msg);
-}
-
-static int qmi_network_get_profile(struct qmi_ctxt *ctxt)
-{
- unsigned char data[96 + QMUX_OVERHEAD];
- struct qmi_msg msg;
-
- msg.service = QMI_WDS;
- msg.client_id = ctxt->wds_client_id;
- msg.txn_id = ctxt->wds_txn_id;
- msg.type = 0x002D;
- msg.size = 0;
- msg.tlv = data + QMUX_HEADER;
-
- ctxt->wds_txn_id += 2;
-
- return qmi_send(ctxt, &msg);
-}
-
-static int qmi_network_up(struct qmi_ctxt *ctxt, char *apn)
-{
- unsigned char data[96 + QMUX_OVERHEAD];
- struct qmi_msg msg;
- char *auth_type;
- char *user;
- char *pass;
-
- for (user = apn; *user; user++) {
- if (*user == ' ') {
- *user++ = 0;
- break;
- }
- }
- for (pass = user; *pass; pass++) {
- if (*pass == ' ') {
- *pass++ = 0;
- break;
- }
- }
-
- for (auth_type = pass; *auth_type; auth_type++) {
- if (*auth_type == ' ') {
- *auth_type++ = 0;
- break;
- }
- }
-
- msg.service = QMI_WDS;
- msg.client_id = ctxt->wds_client_id;
- msg.txn_id = ctxt->wds_txn_id;
- msg.type = 0x0020;
- msg.size = 0;
- msg.tlv = data + QMUX_HEADER;
-
- ctxt->wds_txn_id += 2;
-
- qmi_add_tlv(&msg, 0x14, strlen(apn), apn);
- if (*auth_type)
- qmi_add_tlv(&msg, 0x16, strlen(auth_type), auth_type);
- if (*user) {
- if (!*auth_type) {
- unsigned char x;
- x = 3;
- qmi_add_tlv(&msg, 0x16, 1, &x);
- }
- qmi_add_tlv(&msg, 0x17, strlen(user), user);
- if (*pass)
- qmi_add_tlv(&msg, 0x18, strlen(pass), pass);
- }
- return qmi_send(ctxt, &msg);
-}
-
-static int qmi_network_down(struct qmi_ctxt *ctxt)
-{
- unsigned char data[16 + QMUX_OVERHEAD];
- struct qmi_msg msg;
-
- msg.service = QMI_WDS;
- msg.client_id = ctxt->wds_client_id;
- msg.txn_id = ctxt->wds_txn_id;
- msg.type = 0x0021;
- msg.size = 0;
- msg.tlv = data + QMUX_HEADER;
-
- ctxt->wds_txn_id += 2;
-
- qmi_add_tlv(&msg, 0x01, sizeof(ctxt->wds_handle), &ctxt->wds_handle);
-
- return qmi_send(ctxt, &msg);
-}
-
-static int qmi_print_state(struct qmi_ctxt *ctxt, char *buf, int max)
-{
- int i;
- char *statename;
-
- if (ctxt->state == STATE_ONLINE) {
- statename = "up";
- } else if (ctxt->state == STATE_OFFLINE) {
- statename = "down";
- } else {
- statename = "busy";
- }
-
- i = scnprintf(buf, max, "STATE=%s\n", statename);
- i += scnprintf(buf + i, max - i, "CID=%d\n",ctxt->wds_client_id);
-
- if (ctxt->state != STATE_ONLINE){
- return i;
- }
-
- i += scnprintf(buf + i, max - i, "ADDR=%d.%d.%d.%d\n",
- ctxt->addr[0], ctxt->addr[1], ctxt->addr[2], ctxt->addr[3]);
- i += scnprintf(buf + i, max - i, "MASK=%d.%d.%d.%d\n",
- ctxt->mask[0], ctxt->mask[1], ctxt->mask[2], ctxt->mask[3]);
- i += scnprintf(buf + i, max - i, "GATEWAY=%d.%d.%d.%d\n",
- ctxt->gateway[0], ctxt->gateway[1], ctxt->gateway[2],
- ctxt->gateway[3]);
- i += scnprintf(buf + i, max - i, "DNS1=%d.%d.%d.%d\n",
- ctxt->dns1[0], ctxt->dns1[1], ctxt->dns1[2], ctxt->dns1[3]);
- i += scnprintf(buf + i, max - i, "DNS2=%d.%d.%d.%d\n",
- ctxt->dns2[0], ctxt->dns2[1], ctxt->dns2[2], ctxt->dns2[3]);
-
- return i;
-}
-
-static ssize_t qmi_read(struct file *fp, char __user *buf,
- size_t count, loff_t *pos)
-{
- struct qmi_ctxt *ctxt = fp->private_data;
- char msg[256];
- int len;
- int r;
-
- mutex_lock(&ctxt->lock);
- for (;;) {
- if (ctxt->state_dirty) {
- ctxt->state_dirty = 0;
- len = qmi_print_state(ctxt, msg, 256);
- break;
- }
- mutex_unlock(&ctxt->lock);
- r = wait_event_interruptible(qmi_wait_queue, ctxt->state_dirty);
- if (r < 0)
- return r;
- mutex_lock(&ctxt->lock);
- }
- mutex_unlock(&ctxt->lock);
-
- if (len > count)
- len = count;
-
- if (copy_to_user(buf, msg, len))
- return -EFAULT;
-
- return len;
-}
-
-
-static ssize_t qmi_write(struct file *fp, const char __user *buf,
- size_t count, loff_t *pos)
-{
- struct qmi_ctxt *ctxt = fp->private_data;
- unsigned char cmd[64];
- int len;
- int r;
-
- if (count < 1)
- return 0;
-
- len = count > 63 ? 63 : count;
-
- if (copy_from_user(cmd, buf, len))
- return -EFAULT;
-
- cmd[len] = 0;
-
- /* lazy */
- if (cmd[len-1] == '\n') {
- cmd[len-1] = 0;
- len--;
- }
-
- if (!strncmp(cmd, "verbose", 7)) {
- verbose = 1;
- } else if (!strncmp(cmd, "terse", 5)) {
- verbose = 0;
- } else if (!strncmp(cmd, "poll", 4)) {
- ctxt->state_dirty = 1;
- wake_up(&qmi_wait_queue);
- } else if (!strncmp(cmd, "down", 4)) {
-retry_down:
- mutex_lock(&ctxt->lock);
- if (ctxt->wds_busy) {
- mutex_unlock(&ctxt->lock);
- r = wait_event_interruptible(qmi_wait_queue, !ctxt->wds_busy);
- if (r < 0)
- return r;
- goto retry_down;
- }
- ctxt->wds_busy = 1;
- qmi_network_down(ctxt);
- mutex_unlock(&ctxt->lock);
- } else if (!strncmp(cmd, "up:", 3)) {
-retry_up:
- mutex_lock(&ctxt->lock);
- if (ctxt->wds_busy) {
- mutex_unlock(&ctxt->lock);
- r = wait_event_interruptible(qmi_wait_queue, !ctxt->wds_busy);
- if (r < 0)
- return r;
- goto retry_up;
- }
- ctxt->wds_busy = 1;
- qmi_network_up(ctxt, cmd+3);
- mutex_unlock(&ctxt->lock);
- } else {
- return -EINVAL;
- }
-
- return count;
-}
-
-static int qmi_open(struct inode *ip, struct file *fp)
-{
- struct qmi_ctxt *ctxt = qmi_minor_to_ctxt(MINOR(ip->i_rdev));
- int r = 0;
-
- if (!ctxt) {
- printk(KERN_ERR "unknown qmi misc %d\n", MINOR(ip->i_rdev));
- return -ENODEV;
- }
-
- fp->private_data = ctxt;
-
- mutex_lock(&ctxt->lock);
- if (ctxt->ch == 0)
- r = smd_open(ctxt->ch_name, &ctxt->ch, ctxt, qmi_notify);
- if (r == 0)
- wake_up(&qmi_wait_queue);
- mutex_unlock(&ctxt->lock);
-
- return r;
-}
-
-static int qmi_release(struct inode *ip, struct file *fp)
-{
- return 0;
-}
-
-static struct file_operations qmi_fops = {
- .owner = THIS_MODULE,
- .read = qmi_read,
- .write = qmi_write,
- .open = qmi_open,
- .release = qmi_release,
-};
-
-static struct qmi_ctxt qmi_device0 = {
- .ch_name = "SMD_DATA5_CNTL",
- .misc = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "qmi0",
- .fops = &qmi_fops,
- }
-};
-static struct qmi_ctxt qmi_device1 = {
- .ch_name = "SMD_DATA6_CNTL",
- .misc = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "qmi1",
- .fops = &qmi_fops,
- }
-};
-static struct qmi_ctxt qmi_device2 = {
- .ch_name = "SMD_DATA7_CNTL",
- .misc = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "qmi2",
- .fops = &qmi_fops,
- }
-};
-
-static struct qmi_ctxt *qmi_minor_to_ctxt(unsigned n)
-{
- if (n == qmi_device0.misc.minor)
- return &qmi_device0;
- if (n == qmi_device1.misc.minor)
- return &qmi_device1;
- if (n == qmi_device2.misc.minor)
- return &qmi_device2;
- return 0;
-}
-
-static int __init qmi_init(void)
-{
- int ret;
-
- qmi_wq = create_singlethread_workqueue("qmi");
- if (qmi_wq == 0)
- return -ENOMEM;
-
- qmi_ctxt_init(&qmi_device0, 0);
- qmi_ctxt_init(&qmi_device1, 1);
- qmi_ctxt_init(&qmi_device2, 2);
-
- ret = misc_register(&qmi_device0.misc);
- if (ret == 0)
- ret = misc_register(&qmi_device1.misc);
- if (ret == 0)
- ret = misc_register(&qmi_device2.misc);
- return ret;
-}
-
-module_init(qmi_init);
diff --git a/drivers/staging/dream/smd/smd_rpcrouter.c b/drivers/staging/dream/smd/smd_rpcrouter.c
deleted file mode 100644
index 8744a6e499c..00000000000
--- a/drivers/staging/dream/smd/smd_rpcrouter.c
+++ /dev/null
@@ -1,1261 +0,0 @@
-/* arch/arm/mach-msm/smd_rpcrouter.c
- *
- * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2007-2009 QUALCOMM Incorporated.
- * Author: San Mehat <san@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-/* TODO: handle cases where smd_write() will tempfail due to full fifo */
-/* TODO: thread priority? schedule a work to bump it? */
-/* TODO: maybe make server_list_lock a mutex */
-/* TODO: pool fragments to avoid kmalloc/kfree churn */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/cdev.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/fs.h>
-#include <linux/err.h>
-#include <linux/sched.h>
-#include <linux/poll.h>
-#include <linux/slab.h>
-#include <asm/uaccess.h>
-#include <asm/byteorder.h>
-#include <linux/platform_device.h>
-#include <linux/uaccess.h>
-
-#include <mach/msm_smd.h>
-#include "smd_rpcrouter.h"
-
-#define TRACE_R2R_MSG 0
-#define TRACE_R2R_RAW 0
-#define TRACE_RPC_MSG 0
-#define TRACE_NOTIFY_MSG 0
-
-#define MSM_RPCROUTER_DEBUG 0
-#define MSM_RPCROUTER_DEBUG_PKT 0
-#define MSM_RPCROUTER_R2R_DEBUG 0
-#define DUMP_ALL_RECEIVED_HEADERS 0
-
-#define DIAG(x...) printk("[RR] ERROR " x)
-
-#if MSM_RPCROUTER_DEBUG
-#define D(x...) printk(x)
-#else
-#define D(x...) do {} while (0)
-#endif
-
-#if TRACE_R2R_MSG
-#define RR(x...) printk("[RR] "x)
-#else
-#define RR(x...) do {} while (0)
-#endif
-
-#if TRACE_RPC_MSG
-#define IO(x...) printk("[RPC] "x)
-#else
-#define IO(x...) do {} while (0)
-#endif
-
-#if TRACE_NOTIFY_MSG
-#define NTFY(x...) printk(KERN_ERR "[NOTIFY] "x)
-#else
-#define NTFY(x...) do {} while (0)
-#endif
-
-static LIST_HEAD(local_endpoints);
-static LIST_HEAD(remote_endpoints);
-
-static LIST_HEAD(server_list);
-
-static smd_channel_t *smd_channel;
-static int initialized;
-static wait_queue_head_t newserver_wait;
-static wait_queue_head_t smd_wait;
-
-static DEFINE_SPINLOCK(local_endpoints_lock);
-static DEFINE_SPINLOCK(remote_endpoints_lock);
-static DEFINE_SPINLOCK(server_list_lock);
-static DEFINE_SPINLOCK(smd_lock);
-
-static struct workqueue_struct *rpcrouter_workqueue;
-static int rpcrouter_need_len;
-
-static atomic_t next_xid = ATOMIC_INIT(1);
-static uint8_t next_pacmarkid;
-
-static void do_read_data(struct work_struct *work);
-static void do_create_pdevs(struct work_struct *work);
-static void do_create_rpcrouter_pdev(struct work_struct *work);
-
-static DECLARE_WORK(work_read_data, do_read_data);
-static DECLARE_WORK(work_create_pdevs, do_create_pdevs);
-static DECLARE_WORK(work_create_rpcrouter_pdev, do_create_rpcrouter_pdev);
-
-#define RR_STATE_IDLE 0
-#define RR_STATE_HEADER 1
-#define RR_STATE_BODY 2
-#define RR_STATE_ERROR 3
-
-struct rr_context {
- struct rr_packet *pkt;
- uint8_t *ptr;
- uint32_t state; /* current assembly state */
- uint32_t count; /* bytes needed in this state */
-};
-
-static struct rr_context the_rr_context;
-
-static struct platform_device rpcrouter_pdev = {
- .name = "oncrpc_router",
- .id = -1,
-};
-
-
-static int rpcrouter_send_control_msg(union rr_control_msg *msg)
-{
- struct rr_header hdr;
- unsigned long flags;
- int need;
-
- if (!(msg->cmd == RPCROUTER_CTRL_CMD_HELLO) && !initialized) {
- printk(KERN_ERR "rpcrouter_send_control_msg(): Warning, "
- "router not initialized\n");
- return -EINVAL;
- }
-
- hdr.version = RPCROUTER_VERSION;
- hdr.type = msg->cmd;
- hdr.src_pid = RPCROUTER_PID_LOCAL;
- hdr.src_cid = RPCROUTER_ROUTER_ADDRESS;
- hdr.confirm_rx = 0;
- hdr.size = sizeof(*msg);
- hdr.dst_pid = 0;
- hdr.dst_cid = RPCROUTER_ROUTER_ADDRESS;
-
- /* TODO: what if channel is full? */
-
- need = sizeof(hdr) + hdr.size;
- spin_lock_irqsave(&smd_lock, flags);
- while (smd_write_avail(smd_channel) < need) {
- spin_unlock_irqrestore(&smd_lock, flags);
- msleep(250);
- spin_lock_irqsave(&smd_lock, flags);
- }
- smd_write(smd_channel, &hdr, sizeof(hdr));
- smd_write(smd_channel, msg, hdr.size);
- spin_unlock_irqrestore(&smd_lock, flags);
- return 0;
-}
-
-static struct rr_server *rpcrouter_create_server(uint32_t pid,
- uint32_t cid,
- uint32_t prog,
- uint32_t ver)
-{
- struct rr_server *server;
- unsigned long flags;
- int rc;
-
- server = kmalloc(sizeof(struct rr_server), GFP_KERNEL);
- if (!server)
- return ERR_PTR(-ENOMEM);
-
- memset(server, 0, sizeof(struct rr_server));
- server->pid = pid;
- server->cid = cid;
- server->prog = prog;
- server->vers = ver;
-
- spin_lock_irqsave(&server_list_lock, flags);
- list_add_tail(&server->list, &server_list);
- spin_unlock_irqrestore(&server_list_lock, flags);
-
- if (pid == RPCROUTER_PID_REMOTE) {
- rc = msm_rpcrouter_create_server_cdev(server);
- if (rc < 0)
- goto out_fail;
- }
- return server;
-out_fail:
- spin_lock_irqsave(&server_list_lock, flags);
- list_del(&server->list);
- spin_unlock_irqrestore(&server_list_lock, flags);
- kfree(server);
- return ERR_PTR(rc);
-}
-
-static void rpcrouter_destroy_server(struct rr_server *server)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&server_list_lock, flags);
- list_del(&server->list);
- spin_unlock_irqrestore(&server_list_lock, flags);
- device_destroy(msm_rpcrouter_class, server->device_number);
- kfree(server);
-}
-
-static struct rr_server *rpcrouter_lookup_server(uint32_t prog, uint32_t ver)
-{
- struct rr_server *server;
- unsigned long flags;
-
- spin_lock_irqsave(&server_list_lock, flags);
- list_for_each_entry(server, &server_list, list) {
- if (server->prog == prog
- && server->vers == ver) {
- spin_unlock_irqrestore(&server_list_lock, flags);
- return server;
- }
- }
- spin_unlock_irqrestore(&server_list_lock, flags);
- return NULL;
-}
-
-static struct rr_server *rpcrouter_lookup_server_by_dev(dev_t dev)
-{
- struct rr_server *server;
- unsigned long flags;
-
- spin_lock_irqsave(&server_list_lock, flags);
- list_for_each_entry(server, &server_list, list) {
- if (server->device_number == dev) {
- spin_unlock_irqrestore(&server_list_lock, flags);
- return server;
- }
- }
- spin_unlock_irqrestore(&server_list_lock, flags);
- return NULL;
-}
-
-struct msm_rpc_endpoint *msm_rpcrouter_create_local_endpoint(dev_t dev)
-{
- struct msm_rpc_endpoint *ept;
- unsigned long flags;
-
- ept = kmalloc(sizeof(struct msm_rpc_endpoint), GFP_KERNEL);
- if (!ept)
- return NULL;
- memset(ept, 0, sizeof(struct msm_rpc_endpoint));
-
- /* mark no reply outstanding */
- ept->reply_pid = 0xffffffff;
-
- ept->cid = (uint32_t) ept;
- ept->pid = RPCROUTER_PID_LOCAL;
- ept->dev = dev;
-
- if ((dev != msm_rpcrouter_devno) && (dev != MKDEV(0, 0))) {
- struct rr_server *srv;
- /*
- * This is a userspace client which opened
- * a program/ver devicenode. Bind the client
- * to that destination
- */
- srv = rpcrouter_lookup_server_by_dev(dev);
- /* TODO: bug? really? */
- BUG_ON(!srv);
-
- ept->dst_pid = srv->pid;
- ept->dst_cid = srv->cid;
- ept->dst_prog = cpu_to_be32(srv->prog);
- ept->dst_vers = cpu_to_be32(srv->vers);
-
- D("Creating local ept %p @ %08x:%08x\n", ept, srv->prog, srv->vers);
- } else {
- /* mark not connected */
- ept->dst_pid = 0xffffffff;
- D("Creating a master local ept %p\n", ept);
- }
-
- init_waitqueue_head(&ept->wait_q);
- INIT_LIST_HEAD(&ept->read_q);
- spin_lock_init(&ept->read_q_lock);
- INIT_LIST_HEAD(&ept->incomplete);
-
- spin_lock_irqsave(&local_endpoints_lock, flags);
- list_add_tail(&ept->list, &local_endpoints);
- spin_unlock_irqrestore(&local_endpoints_lock, flags);
- return ept;
-}
-
-int msm_rpcrouter_destroy_local_endpoint(struct msm_rpc_endpoint *ept)
-{
- int rc;
- union rr_control_msg msg;
-
- msg.cmd = RPCROUTER_CTRL_CMD_REMOVE_CLIENT;
- msg.cli.pid = ept->pid;
- msg.cli.cid = ept->cid;
-
- RR("x REMOVE_CLIENT id=%d:%08x\n", ept->pid, ept->cid);
- rc = rpcrouter_send_control_msg(&msg);
- if (rc < 0)
- return rc;
-
- list_del(&ept->list);
- kfree(ept);
- return 0;
-}
-
-static int rpcrouter_create_remote_endpoint(uint32_t cid)
-{
- struct rr_remote_endpoint *new_c;
- unsigned long flags;
-
- new_c = kmalloc(sizeof(struct rr_remote_endpoint), GFP_KERNEL);
- if (!new_c)
- return -ENOMEM;
- memset(new_c, 0, sizeof(struct rr_remote_endpoint));
-
- new_c->cid = cid;
- new_c->pid = RPCROUTER_PID_REMOTE;
- init_waitqueue_head(&new_c->quota_wait);
- spin_lock_init(&new_c->quota_lock);
-
- spin_lock_irqsave(&remote_endpoints_lock, flags);
- list_add_tail(&new_c->list, &remote_endpoints);
- spin_unlock_irqrestore(&remote_endpoints_lock, flags);
- return 0;
-}
-
-static struct msm_rpc_endpoint *rpcrouter_lookup_local_endpoint(uint32_t cid)
-{
- struct msm_rpc_endpoint *ept;
- unsigned long flags;
-
- spin_lock_irqsave(&local_endpoints_lock, flags);
- list_for_each_entry(ept, &local_endpoints, list) {
- if (ept->cid == cid) {
- spin_unlock_irqrestore(&local_endpoints_lock, flags);
- return ept;
- }
- }
- spin_unlock_irqrestore(&local_endpoints_lock, flags);
- return NULL;
-}
-
-static struct rr_remote_endpoint *rpcrouter_lookup_remote_endpoint(uint32_t cid)
-{
- struct rr_remote_endpoint *ept;
- unsigned long flags;
-
- spin_lock_irqsave(&remote_endpoints_lock, flags);
- list_for_each_entry(ept, &remote_endpoints, list) {
- if (ept->cid == cid) {
- spin_unlock_irqrestore(&remote_endpoints_lock, flags);
- return ept;
- }
- }
- spin_unlock_irqrestore(&remote_endpoints_lock, flags);
- return NULL;
-}
-
-static int process_control_msg(union rr_control_msg *msg, int len)
-{
- union rr_control_msg ctl;
- struct rr_server *server;
- struct rr_remote_endpoint *r_ept;
- int rc = 0;
- unsigned long flags;
-
- if (len != sizeof(*msg)) {
- printk(KERN_ERR "rpcrouter: r2r msg size %d != %d\n",
- len, sizeof(*msg));
- return -EINVAL;
- }
-
- switch (msg->cmd) {
- case RPCROUTER_CTRL_CMD_HELLO:
- RR("o HELLO\n");
-
- RR("x HELLO\n");
- memset(&ctl, 0, sizeof(ctl));
- ctl.cmd = RPCROUTER_CTRL_CMD_HELLO;
- rpcrouter_send_control_msg(&ctl);
-
- initialized = 1;
-
- /* Send list of servers one at a time */
- ctl.cmd = RPCROUTER_CTRL_CMD_NEW_SERVER;
-
- /* TODO: long time to hold a spinlock... */
- spin_lock_irqsave(&server_list_lock, flags);
- list_for_each_entry(server, &server_list, list) {
- ctl.srv.pid = server->pid;
- ctl.srv.cid = server->cid;
- ctl.srv.prog = server->prog;
- ctl.srv.vers = server->vers;
-
- RR("x NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
- server->pid, server->cid,
- server->prog, server->vers);
-
- rpcrouter_send_control_msg(&ctl);
- }
- spin_unlock_irqrestore(&server_list_lock, flags);
-
- queue_work(rpcrouter_workqueue, &work_create_rpcrouter_pdev);
- break;
-
- case RPCROUTER_CTRL_CMD_RESUME_TX:
- RR("o RESUME_TX id=%d:%08x\n", msg->cli.pid, msg->cli.cid);
-
- r_ept = rpcrouter_lookup_remote_endpoint(msg->cli.cid);
- if (!r_ept) {
- printk(KERN_ERR
- "rpcrouter: Unable to resume client\n");
- break;
- }
- spin_lock_irqsave(&r_ept->quota_lock, flags);
- r_ept->tx_quota_cntr = 0;
- spin_unlock_irqrestore(&r_ept->quota_lock, flags);
- wake_up(&r_ept->quota_wait);
- break;
-
- case RPCROUTER_CTRL_CMD_NEW_SERVER:
- RR("o NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
- msg->srv.pid, msg->srv.cid, msg->srv.prog, msg->srv.vers);
-
- server = rpcrouter_lookup_server(msg->srv.prog, msg->srv.vers);
-
- if (!server) {
- server = rpcrouter_create_server(
- msg->srv.pid, msg->srv.cid,
- msg->srv.prog, msg->srv.vers);
- if (!server)
- return -ENOMEM;
- /*
- * XXX: Verify that its okay to add the
- * client to our remote client list
- * if we get a NEW_SERVER notification
- */
- if (!rpcrouter_lookup_remote_endpoint(msg->srv.cid)) {
- rc = rpcrouter_create_remote_endpoint(
- msg->srv.cid);
- if (rc < 0)
- printk(KERN_ERR
- "rpcrouter:Client create"
- "error (%d)\n", rc);
- }
- schedule_work(&work_create_pdevs);
- wake_up(&newserver_wait);
- } else {
- if ((server->pid == msg->srv.pid) &&
- (server->cid == msg->srv.cid)) {
- printk(KERN_ERR "rpcrouter: Duplicate svr\n");
- } else {
- server->pid = msg->srv.pid;
- server->cid = msg->srv.cid;
- }
- }
- break;
-
- case RPCROUTER_CTRL_CMD_REMOVE_SERVER:
- RR("o REMOVE_SERVER prog=%08x:%d\n",
- msg->srv.prog, msg->srv.vers);
- server = rpcrouter_lookup_server(msg->srv.prog, msg->srv.vers);
- if (server)
- rpcrouter_destroy_server(server);
- break;
-
- case RPCROUTER_CTRL_CMD_REMOVE_CLIENT:
- RR("o REMOVE_CLIENT id=%d:%08x\n", msg->cli.pid, msg->cli.cid);
- if (msg->cli.pid != RPCROUTER_PID_REMOTE) {
- printk(KERN_ERR
- "rpcrouter: Denying remote removal of "
- "local client\n");
- break;
- }
- r_ept = rpcrouter_lookup_remote_endpoint(msg->cli.cid);
- if (r_ept) {
- spin_lock_irqsave(&remote_endpoints_lock, flags);
- list_del(&r_ept->list);
- spin_unlock_irqrestore(&remote_endpoints_lock, flags);
- kfree(r_ept);
- }
-
- /* Notify local clients of this event */
- printk(KERN_ERR "rpcrouter: LOCAL NOTIFICATION NOT IMP\n");
- rc = -ENOSYS;
-
- break;
- default:
- RR("o UNKNOWN(%08x)\n", msg->cmd);
- rc = -ENOSYS;
- }
-
- return rc;
-}
-
-static void do_create_rpcrouter_pdev(struct work_struct *work)
-{
- platform_device_register(&rpcrouter_pdev);
-}
-
-static void do_create_pdevs(struct work_struct *work)
-{
- unsigned long flags;
- struct rr_server *server;
-
- /* TODO: race if destroyed while being registered */
- spin_lock_irqsave(&server_list_lock, flags);
- list_for_each_entry(server, &server_list, list) {
- if (server->pid == RPCROUTER_PID_REMOTE) {
- if (server->pdev_name[0] == 0) {
- spin_unlock_irqrestore(&server_list_lock,
- flags);
- msm_rpcrouter_create_server_pdev(server);
- schedule_work(&work_create_pdevs);
- return;
- }
- }
- }
- spin_unlock_irqrestore(&server_list_lock, flags);
-}
-
-static void rpcrouter_smdnotify(void *_dev, unsigned event)
-{
- if (event != SMD_EVENT_DATA)
- return;
-
- wake_up(&smd_wait);
-}
-
-static void *rr_malloc(unsigned sz)
-{
- void *ptr = kmalloc(sz, GFP_KERNEL);
- if (ptr)
- return ptr;
-
- printk(KERN_ERR "rpcrouter: kmalloc of %d failed, retrying...\n", sz);
- do {
- ptr = kmalloc(sz, GFP_KERNEL);
- } while (!ptr);
-
- return ptr;
-}
-
-/* TODO: deal with channel teardown / restore */
-static int rr_read(void *data, int len)
-{
- int rc;
- unsigned long flags;
-// printk("rr_read() %d\n", len);
- for(;;) {
- spin_lock_irqsave(&smd_lock, flags);
- if (smd_read_avail(smd_channel) >= len) {
- rc = smd_read(smd_channel, data, len);
- spin_unlock_irqrestore(&smd_lock, flags);
- if (rc == len)
- return 0;
- else
- return -EIO;
- }
- rpcrouter_need_len = len;
- spin_unlock_irqrestore(&smd_lock, flags);
-
-// printk("rr_read: waiting (%d)\n", len);
- wait_event(smd_wait, smd_read_avail(smd_channel) >= len);
- }
- return 0;
-}
-
-static uint32_t r2r_buf[RPCROUTER_MSGSIZE_MAX];
-
-static void do_read_data(struct work_struct *work)
-{
- struct rr_header hdr;
- struct rr_packet *pkt;
- struct rr_fragment *frag;
- struct msm_rpc_endpoint *ept;
- uint32_t pm, mid;
- unsigned long flags;
-
- if (rr_read(&hdr, sizeof(hdr)))
- goto fail_io;
-
-#if TRACE_R2R_RAW
- RR("- ver=%d type=%d src=%d:%08x crx=%d siz=%d dst=%d:%08x\n",
- hdr.version, hdr.type, hdr.src_pid, hdr.src_cid,
- hdr.confirm_rx, hdr.size, hdr.dst_pid, hdr.dst_cid);
-#endif
-
- if (hdr.version != RPCROUTER_VERSION) {
- DIAG("version %d != %d\n", hdr.version, RPCROUTER_VERSION);
- goto fail_data;
- }
- if (hdr.size > RPCROUTER_MSGSIZE_MAX) {
- DIAG("msg size %d > max %d\n", hdr.size, RPCROUTER_MSGSIZE_MAX);
- goto fail_data;
- }
-
- if (hdr.dst_cid == RPCROUTER_ROUTER_ADDRESS) {
- if (rr_read(r2r_buf, hdr.size))
- goto fail_io;
- process_control_msg((void*) r2r_buf, hdr.size);
- goto done;
- }
-
- if (hdr.size < sizeof(pm)) {
- DIAG("runt packet (no pacmark)\n");
- goto fail_data;
- }
- if (rr_read(&pm, sizeof(pm)))
- goto fail_io;
-
- hdr.size -= sizeof(pm);
-
- frag = rr_malloc(hdr.size + sizeof(*frag));
- frag->next = NULL;
- frag->length = hdr.size;
- if (rr_read(frag->data, hdr.size))
- goto fail_io;
-
- ept = rpcrouter_lookup_local_endpoint(hdr.dst_cid);
- if (!ept) {
- DIAG("no local ept for cid %08x\n", hdr.dst_cid);
- kfree(frag);
- goto done;
- }
-
- /* See if there is already a partial packet that matches our mid
- * and if so, append this fragment to that packet.
- */
- mid = PACMARK_MID(pm);
- list_for_each_entry(pkt, &ept->incomplete, list) {
- if (pkt->mid == mid) {
- pkt->last->next = frag;
- pkt->last = frag;
- pkt->length += frag->length;
- if (PACMARK_LAST(pm)) {
- list_del(&pkt->list);
- goto packet_complete;
- }
- goto done;
- }
- }
- /* This mid is new -- create a packet for it, and put it on
- * the incomplete list if this fragment is not a last fragment,
- * otherwise put it on the read queue.
- */
- pkt = rr_malloc(sizeof(struct rr_packet));
- pkt->first = frag;
- pkt->last = frag;
- memcpy(&pkt->hdr, &hdr, sizeof(hdr));
- pkt->mid = mid;
- pkt->length = frag->length;
- if (!PACMARK_LAST(pm)) {
- list_add_tail(&pkt->list, &ept->incomplete);
- goto done;
- }
-
-packet_complete:
- spin_lock_irqsave(&ept->read_q_lock, flags);
- list_add_tail(&pkt->list, &ept->read_q);
- wake_up(&ept->wait_q);
- spin_unlock_irqrestore(&ept->read_q_lock, flags);
-done:
-
- if (hdr.confirm_rx) {
- union rr_control_msg msg;
-
- msg.cmd = RPCROUTER_CTRL_CMD_RESUME_TX;
- msg.cli.pid = hdr.dst_pid;
- msg.cli.cid = hdr.dst_cid;
-
- RR("x RESUME_TX id=%d:%08x\n", msg.cli.pid, msg.cli.cid);
- rpcrouter_send_control_msg(&msg);
- }
-
- queue_work(rpcrouter_workqueue, &work_read_data);
- return;
-
-fail_io:
-fail_data:
- printk(KERN_ERR "rpc_router has died\n");
-}
-
-void msm_rpc_setup_req(struct rpc_request_hdr *hdr, uint32_t prog,
- uint32_t vers, uint32_t proc)
-{
- memset(hdr, 0, sizeof(struct rpc_request_hdr));
- hdr->xid = cpu_to_be32(atomic_add_return(1, &next_xid));
- hdr->rpc_vers = cpu_to_be32(2);
- hdr->prog = cpu_to_be32(prog);
- hdr->vers = cpu_to_be32(vers);
- hdr->procedure = cpu_to_be32(proc);
-}
-
-struct msm_rpc_endpoint *msm_rpc_open(void)
-{
- struct msm_rpc_endpoint *ept;
-
- ept = msm_rpcrouter_create_local_endpoint(MKDEV(0, 0));
- if (ept == NULL)
- return ERR_PTR(-ENOMEM);
-
- return ept;
-}
-
-int msm_rpc_close(struct msm_rpc_endpoint *ept)
-{
- return msm_rpcrouter_destroy_local_endpoint(ept);
-}
-EXPORT_SYMBOL(msm_rpc_close);
-
-int msm_rpc_write(struct msm_rpc_endpoint *ept, void *buffer, int count)
-{
- struct rr_header hdr;
- uint32_t pacmark;
- struct rpc_request_hdr *rq = buffer;
- struct rr_remote_endpoint *r_ept;
- unsigned long flags;
- int needed;
- DEFINE_WAIT(__wait);
-
- /* TODO: fragmentation for large outbound packets */
- if (count > (RPCROUTER_MSGSIZE_MAX - sizeof(uint32_t)) || !count)
- return -EINVAL;
-
- /* snoop the RPC packet and enforce permissions */
-
- /* has to have at least the xid and type fields */
- if (count < (sizeof(uint32_t) * 2)) {
- printk(KERN_ERR "rr_write: rejecting runt packet\n");
- return -EINVAL;
- }
-
- if (rq->type == 0) {
- /* RPC CALL */
- if (count < (sizeof(uint32_t) * 6)) {
- printk(KERN_ERR
- "rr_write: rejecting runt call packet\n");
- return -EINVAL;
- }
- if (ept->dst_pid == 0xffffffff) {
- printk(KERN_ERR "rr_write: not connected\n");
- return -ENOTCONN;
- }
-
-#if CONFIG_MSM_AMSS_VERSION >= 6350
- if ((ept->dst_prog != rq->prog) ||
- !msm_rpc_is_compatible_version(
- be32_to_cpu(ept->dst_vers),
- be32_to_cpu(rq->vers))) {
-#else
- if (ept->dst_prog != rq->prog || ept->dst_vers != rq->vers) {
-#endif
- printk(KERN_ERR
- "rr_write: cannot write to %08x:%d "
- "(bound to %08x:%d)\n",
- be32_to_cpu(rq->prog), be32_to_cpu(rq->vers),
- be32_to_cpu(ept->dst_prog),
- be32_to_cpu(ept->dst_vers));
- return -EINVAL;
- }
- hdr.dst_pid = ept->dst_pid;
- hdr.dst_cid = ept->dst_cid;
- IO("CALL on ept %p to %08x:%08x @ %d:%08x (%d bytes) (xid %x proc %x)\n",
- ept,
- be32_to_cpu(rq->prog), be32_to_cpu(rq->vers),
- ept->dst_pid, ept->dst_cid, count,
- be32_to_cpu(rq->xid), be32_to_cpu(rq->procedure));
- } else {
- /* RPC REPLY */
- /* TODO: locking */
- if (ept->reply_pid == 0xffffffff) {
- printk(KERN_ERR
- "rr_write: rejecting unexpected reply\n");
- return -EINVAL;
- }
- if (ept->reply_xid != rq->xid) {
- printk(KERN_ERR
- "rr_write: rejecting packet w/ bad xid\n");
- return -EINVAL;
- }
-
- hdr.dst_pid = ept->reply_pid;
- hdr.dst_cid = ept->reply_cid;
-
- /* consume this reply */
- ept->reply_pid = 0xffffffff;
-
- IO("REPLY on ept %p to xid=%d @ %d:%08x (%d bytes)\n",
- ept,
- be32_to_cpu(rq->xid), hdr.dst_pid, hdr.dst_cid, count);
- }
-
- r_ept = rpcrouter_lookup_remote_endpoint(hdr.dst_cid);
-
- if (!r_ept) {
- printk(KERN_ERR
- "msm_rpc_write(): No route to ept "
- "[PID %x CID %x]\n", hdr.dst_pid, hdr.dst_cid);
- return -EHOSTUNREACH;
- }
-
- /* Create routing header */
- hdr.type = RPCROUTER_CTRL_CMD_DATA;
- hdr.version = RPCROUTER_VERSION;
- hdr.src_pid = ept->pid;
- hdr.src_cid = ept->cid;
- hdr.confirm_rx = 0;
- hdr.size = count + sizeof(uint32_t);
-
- for (;;) {
- prepare_to_wait(&r_ept->quota_wait, &__wait,
- TASK_INTERRUPTIBLE);
- spin_lock_irqsave(&r_ept->quota_lock, flags);
- if (r_ept->tx_quota_cntr < RPCROUTER_DEFAULT_RX_QUOTA)
- break;
- if (signal_pending(current) &&
- (!(ept->flags & MSM_RPC_UNINTERRUPTIBLE)))
- break;
- spin_unlock_irqrestore(&r_ept->quota_lock, flags);
- schedule();
- }
- finish_wait(&r_ept->quota_wait, &__wait);
-
- if (signal_pending(current) &&
- (!(ept->flags & MSM_RPC_UNINTERRUPTIBLE))) {
- spin_unlock_irqrestore(&r_ept->quota_lock, flags);
- return -ERESTARTSYS;
- }
- r_ept->tx_quota_cntr++;
- if (r_ept->tx_quota_cntr == RPCROUTER_DEFAULT_RX_QUOTA)
- hdr.confirm_rx = 1;
-
- /* bump pacmark while interrupts disabled to avoid race
- * probably should be atomic op instead
- */
- pacmark = PACMARK(count, ++next_pacmarkid, 0, 1);
-
- spin_unlock_irqrestore(&r_ept->quota_lock, flags);
-
- spin_lock_irqsave(&smd_lock, flags);
-
- needed = sizeof(hdr) + hdr.size;
- while (smd_write_avail(smd_channel) < needed) {
- spin_unlock_irqrestore(&smd_lock, flags);
- msleep(250);
- spin_lock_irqsave(&smd_lock, flags);
- }
-
- /* TODO: deal with full fifo */
- smd_write(smd_channel, &hdr, sizeof(hdr));
- smd_write(smd_channel, &pacmark, sizeof(pacmark));
- smd_write(smd_channel, buffer, count);
-
- spin_unlock_irqrestore(&smd_lock, flags);
-
- return count;
-}
-EXPORT_SYMBOL(msm_rpc_write);
-
-/*
- * NOTE: It is the responsibility of the caller to kfree buffer
- */
-int msm_rpc_read(struct msm_rpc_endpoint *ept, void **buffer,
- unsigned user_len, long timeout)
-{
- struct rr_fragment *frag, *next;
- char *buf;
- int rc;
-
- rc = __msm_rpc_read(ept, &frag, user_len, timeout);
- if (rc <= 0)
- return rc;
-
- /* single-fragment messages conveniently can be
- * returned as-is (the buffer is at the front)
- */
- if (frag->next == 0) {
- *buffer = (void*) frag;
- return rc;
- }
-
- /* multi-fragment messages, we have to do it the
- * hard way, which is rather disgusting right now
- */
- buf = rr_malloc(rc);
- *buffer = buf;
-
- while (frag != NULL) {
- memcpy(buf, frag->data, frag->length);
- next = frag->next;
- buf += frag->length;
- kfree(frag);
- frag = next;
- }
-
- return rc;
-}
-
-int msm_rpc_call(struct msm_rpc_endpoint *ept, uint32_t proc,
- void *_request, int request_size,
- long timeout)
-{
- return msm_rpc_call_reply(ept, proc,
- _request, request_size,
- NULL, 0, timeout);
-}
-EXPORT_SYMBOL(msm_rpc_call);
-
-int msm_rpc_call_reply(struct msm_rpc_endpoint *ept, uint32_t proc,
- void *_request, int request_size,
- void *_reply, int reply_size,
- long timeout)
-{
- struct rpc_request_hdr *req = _request;
- struct rpc_reply_hdr *reply;
- int rc;
-
- if (request_size < sizeof(*req))
- return -ETOOSMALL;
-
- if (ept->dst_pid == 0xffffffff)
- return -ENOTCONN;
-
- /* We can't use msm_rpc_setup_req() here, because dst_prog and
- * dst_vers here are already in BE.
- */
- memset(req, 0, sizeof(*req));
- req->xid = cpu_to_be32(atomic_add_return(1, &next_xid));
- req->rpc_vers = cpu_to_be32(2);
- req->prog = ept->dst_prog;
- req->vers = ept->dst_vers;
- req->procedure = cpu_to_be32(proc);
-
- rc = msm_rpc_write(ept, req, request_size);
- if (rc < 0)
- return rc;
-
- for (;;) {
- rc = msm_rpc_read(ept, (void*) &reply, -1, timeout);
- if (rc < 0)
- return rc;
- if (rc < (3 * sizeof(uint32_t))) {
- rc = -EIO;
- break;
- }
- /* we should not get CALL packets -- ignore them */
- if (reply->type == 0) {
- kfree(reply);
- continue;
- }
- /* If an earlier call timed out, we could get the (no
- * longer wanted) reply for it. Ignore replies that
- * we don't expect.
- */
- if (reply->xid != req->xid) {
- kfree(reply);
- continue;
- }
- if (reply->reply_stat != 0) {
- rc = -EPERM;
- break;
- }
- if (reply->data.acc_hdr.accept_stat != 0) {
- rc = -EINVAL;
- break;
- }
- if (_reply == NULL) {
- rc = 0;
- break;
- }
- if (rc > reply_size) {
- rc = -ENOMEM;
- } else {
- memcpy(_reply, reply, rc);
- }
- break;
- }
- kfree(reply);
- return rc;
-}
-EXPORT_SYMBOL(msm_rpc_call_reply);
-
-
-static inline int ept_packet_available(struct msm_rpc_endpoint *ept)
-{
- unsigned long flags;
- int ret;
- spin_lock_irqsave(&ept->read_q_lock, flags);
- ret = !list_empty(&ept->read_q);
- spin_unlock_irqrestore(&ept->read_q_lock, flags);
- return ret;
-}
-
-int __msm_rpc_read(struct msm_rpc_endpoint *ept,
- struct rr_fragment **frag_ret,
- unsigned len, long timeout)
-{
- struct rr_packet *pkt;
- struct rpc_request_hdr *rq;
- DEFINE_WAIT(__wait);
- unsigned long flags;
- int rc;
-
- IO("READ on ept %p\n", ept);
-
- if (ept->flags & MSM_RPC_UNINTERRUPTIBLE) {
- if (timeout < 0) {
- wait_event(ept->wait_q, ept_packet_available(ept));
- } else {
- rc = wait_event_timeout(
- ept->wait_q, ept_packet_available(ept),
- timeout);
- if (rc == 0)
- return -ETIMEDOUT;
- }
- } else {
- if (timeout < 0) {
- rc = wait_event_interruptible(
- ept->wait_q, ept_packet_available(ept));
- if (rc < 0)
- return rc;
- } else {
- rc = wait_event_interruptible_timeout(
- ept->wait_q, ept_packet_available(ept),
- timeout);
- if (rc == 0)
- return -ETIMEDOUT;
- }
- }
-
- spin_lock_irqsave(&ept->read_q_lock, flags);
- if (list_empty(&ept->read_q)) {
- spin_unlock_irqrestore(&ept->read_q_lock, flags);
- return -EAGAIN;
- }
- pkt = list_first_entry(&ept->read_q, struct rr_packet, list);
- if (pkt->length > len) {
- spin_unlock_irqrestore(&ept->read_q_lock, flags);
- return -ETOOSMALL;
- }
- list_del(&pkt->list);
- spin_unlock_irqrestore(&ept->read_q_lock, flags);
-
- rc = pkt->length;
-
- *frag_ret = pkt->first;
- rq = (void*) pkt->first->data;
- if ((rc >= (sizeof(uint32_t) * 3)) && (rq->type == 0)) {
- IO("READ on ept %p is a CALL on %08x:%08x proc %d xid %d\n",
- ept, be32_to_cpu(rq->prog), be32_to_cpu(rq->vers),
- be32_to_cpu(rq->procedure),
- be32_to_cpu(rq->xid));
- /* RPC CALL */
- if (ept->reply_pid != 0xffffffff) {
- printk(KERN_WARNING
- "rr_read: lost previous reply xid...\n");
- }
- /* TODO: locking? */
- ept->reply_pid = pkt->hdr.src_pid;
- ept->reply_cid = pkt->hdr.src_cid;
- ept->reply_xid = rq->xid;
- }
-#if TRACE_RPC_MSG
- else if ((rc >= (sizeof(uint32_t) * 3)) && (rq->type == 1))
- IO("READ on ept %p is a REPLY\n", ept);
- else IO("READ on ept %p (%d bytes)\n", ept, rc);
-#endif
-
- kfree(pkt);
- return rc;
-}
-
-#if CONFIG_MSM_AMSS_VERSION >= 6350
-int msm_rpc_is_compatible_version(uint32_t server_version,
- uint32_t client_version)
-{
- if ((server_version & RPC_VERSION_MODE_MASK) !=
- (client_version & RPC_VERSION_MODE_MASK))
- return 0;
-
- if (server_version & RPC_VERSION_MODE_MASK)
- return server_version == client_version;
-
- return ((server_version & RPC_VERSION_MAJOR_MASK) ==
- (client_version & RPC_VERSION_MAJOR_MASK)) &&
- ((server_version & RPC_VERSION_MINOR_MASK) >=
- (client_version & RPC_VERSION_MINOR_MASK));
-}
-EXPORT_SYMBOL(msm_rpc_is_compatible_version);
-
-static int msm_rpc_get_compatible_server(uint32_t prog,
- uint32_t ver,
- uint32_t *found_vers)
-{
- struct rr_server *server;
- unsigned long flags;
- if (found_vers == NULL)
- return 0;
-
- spin_lock_irqsave(&server_list_lock, flags);
- list_for_each_entry(server, &server_list, list) {
- if ((server->prog == prog) &&
- msm_rpc_is_compatible_version(server->vers, ver)) {
- *found_vers = server->vers;
- spin_unlock_irqrestore(&server_list_lock, flags);
- return 0;
- }
- }
- spin_unlock_irqrestore(&server_list_lock, flags);
- return -1;
-}
-#endif
-
-struct msm_rpc_endpoint *msm_rpc_connect(uint32_t prog, uint32_t vers, unsigned flags)
-{
- struct msm_rpc_endpoint *ept;
- struct rr_server *server;
-
-#if CONFIG_MSM_AMSS_VERSION >= 6350
- if (!(vers & RPC_VERSION_MODE_MASK)) {
- uint32_t found_vers;
- if (msm_rpc_get_compatible_server(prog, vers, &found_vers) < 0)
- return ERR_PTR(-EHOSTUNREACH);
- if (found_vers != vers) {
- D("RPC using new version %08x:{%08x --> %08x}\n",
- prog, vers, found_vers);
- vers = found_vers;
- }
- }
-#endif
-
- server = rpcrouter_lookup_server(prog, vers);
- if (!server)
- return ERR_PTR(-EHOSTUNREACH);
-
- ept = msm_rpc_open();
- if (IS_ERR(ept))
- return ept;
-
- ept->flags = flags;
- ept->dst_pid = server->pid;
- ept->dst_cid = server->cid;
- ept->dst_prog = cpu_to_be32(prog);
- ept->dst_vers = cpu_to_be32(vers);
-
- return ept;
-}
-EXPORT_SYMBOL(msm_rpc_connect);
-
-uint32_t msm_rpc_get_vers(struct msm_rpc_endpoint *ept)
-{
- return be32_to_cpu(ept->dst_vers);
-}
-EXPORT_SYMBOL(msm_rpc_get_vers);
-
-/* TODO: permission check? */
-int msm_rpc_register_server(struct msm_rpc_endpoint *ept,
- uint32_t prog, uint32_t vers)
-{
- int rc;
- union rr_control_msg msg;
- struct rr_server *server;
-
- server = rpcrouter_create_server(ept->pid, ept->cid,
- prog, vers);
- if (!server)
- return -ENODEV;
-
- msg.srv.cmd = RPCROUTER_CTRL_CMD_NEW_SERVER;
- msg.srv.pid = ept->pid;
- msg.srv.cid = ept->cid;
- msg.srv.prog = prog;
- msg.srv.vers = vers;
-
- RR("x NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
- ept->pid, ept->cid, prog, vers);
-
- rc = rpcrouter_send_control_msg(&msg);
- if (rc < 0)
- return rc;
-
- return 0;
-}
-
-/* TODO: permission check -- disallow unreg of somebody else's server */
-int msm_rpc_unregister_server(struct msm_rpc_endpoint *ept,
- uint32_t prog, uint32_t vers)
-{
- struct rr_server *server;
- server = rpcrouter_lookup_server(prog, vers);
-
- if (!server)
- return -ENOENT;
- rpcrouter_destroy_server(server);
- return 0;
-}
-
-static int msm_rpcrouter_probe(struct platform_device *pdev)
-{
- int rc;
-
- /* Initialize what we need to start processing */
- INIT_LIST_HEAD(&local_endpoints);
- INIT_LIST_HEAD(&remote_endpoints);
-
- init_waitqueue_head(&newserver_wait);
- init_waitqueue_head(&smd_wait);
-
- rpcrouter_workqueue = create_singlethread_workqueue("rpcrouter");
- if (!rpcrouter_workqueue)
- return -ENOMEM;
-
- rc = msm_rpcrouter_init_devices();
- if (rc < 0)
- goto fail_destroy_workqueue;
-
- /* Open up SMD channel 2 */
- initialized = 0;
- rc = smd_open("SMD_RPCCALL", &smd_channel, NULL, rpcrouter_smdnotify);
- if (rc < 0)
- goto fail_remove_devices;
-
- queue_work(rpcrouter_workqueue, &work_read_data);
- return 0;
-
- fail_remove_devices:
- msm_rpcrouter_exit_devices();
- fail_destroy_workqueue:
- destroy_workqueue(rpcrouter_workqueue);
- return rc;
-}
-
-static struct platform_driver msm_smd_channel2_driver = {
- .probe = msm_rpcrouter_probe,
- .driver = {
- .name = "SMD_RPCCALL",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init rpcrouter_init(void)
-{
- return platform_driver_register(&msm_smd_channel2_driver);
-}
-
-module_init(rpcrouter_init);
-MODULE_DESCRIPTION("MSM RPC Router");
-MODULE_AUTHOR("San Mehat <san@android.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/dream/smd/smd_rpcrouter.h b/drivers/staging/dream/smd/smd_rpcrouter.h
deleted file mode 100644
index 86ab997b1b7..00000000000
--- a/drivers/staging/dream/smd/smd_rpcrouter.h
+++ /dev/null
@@ -1,193 +0,0 @@
-/** arch/arm/mach-msm/smd_rpcrouter.h
- *
- * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2007-2008 QUALCOMM Incorporated.
- * Author: San Mehat <san@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _ARCH_ARM_MACH_MSM_SMD_RPCROUTER_H
-#define _ARCH_ARM_MACH_MSM_SMD_RPCROUTER_H
-
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/cdev.h>
-#include <linux/platform_device.h>
-
-#include <mach/msm_smd.h>
-#include <mach/msm_rpcrouter.h>
-
-/* definitions for the R2R wire protcol */
-
-#define RPCROUTER_VERSION 1
-#define RPCROUTER_PROCESSORS_MAX 4
-#define RPCROUTER_MSGSIZE_MAX 512
-
-#define RPCROUTER_CLIENT_BCAST_ID 0xffffffff
-#define RPCROUTER_ROUTER_ADDRESS 0xfffffffe
-
-#define RPCROUTER_PID_LOCAL 1
-#define RPCROUTER_PID_REMOTE 0
-
-#define RPCROUTER_CTRL_CMD_DATA 1
-#define RPCROUTER_CTRL_CMD_HELLO 2
-#define RPCROUTER_CTRL_CMD_BYE 3
-#define RPCROUTER_CTRL_CMD_NEW_SERVER 4
-#define RPCROUTER_CTRL_CMD_REMOVE_SERVER 5
-#define RPCROUTER_CTRL_CMD_REMOVE_CLIENT 6
-#define RPCROUTER_CTRL_CMD_RESUME_TX 7
-#define RPCROUTER_CTRL_CMD_EXIT 8
-
-#define RPCROUTER_DEFAULT_RX_QUOTA 5
-
-union rr_control_msg {
- uint32_t cmd;
- struct {
- uint32_t cmd;
- uint32_t prog;
- uint32_t vers;
- uint32_t pid;
- uint32_t cid;
- } srv;
- struct {
- uint32_t cmd;
- uint32_t pid;
- uint32_t cid;
- } cli;
-};
-
-struct rr_header {
- uint32_t version;
- uint32_t type;
- uint32_t src_pid;
- uint32_t src_cid;
- uint32_t confirm_rx;
- uint32_t size;
- uint32_t dst_pid;
- uint32_t dst_cid;
-};
-
-/* internals */
-
-#define RPCROUTER_MAX_REMOTE_SERVERS 100
-
-struct rr_fragment {
- unsigned char data[RPCROUTER_MSGSIZE_MAX];
- uint32_t length;
- struct rr_fragment *next;
-};
-
-struct rr_packet {
- struct list_head list;
- struct rr_fragment *first;
- struct rr_fragment *last;
- struct rr_header hdr;
- uint32_t mid;
- uint32_t length;
-};
-
-#define PACMARK_LAST(n) ((n) & 0x80000000)
-#define PACMARK_MID(n) (((n) >> 16) & 0xFF)
-#define PACMARK_LEN(n) ((n) & 0xFFFF)
-
-static inline uint32_t PACMARK(uint32_t len, uint32_t mid, uint32_t first,
- uint32_t last)
-{
- return (len & 0xFFFF) |
- ((mid & 0xFF) << 16) |
- ((!!first) << 30) |
- ((!!last) << 31);
-}
-
-struct rr_server {
- struct list_head list;
-
- uint32_t pid;
- uint32_t cid;
- uint32_t prog;
- uint32_t vers;
-
- dev_t device_number;
- struct cdev cdev;
- struct device *device;
- struct rpcsvr_platform_device p_device;
- char pdev_name[32];
-};
-
-struct rr_remote_endpoint {
- uint32_t pid;
- uint32_t cid;
-
- int tx_quota_cntr;
- spinlock_t quota_lock;
- wait_queue_head_t quota_wait;
-
- struct list_head list;
-};
-
-struct msm_rpc_endpoint {
- struct list_head list;
-
- /* incomplete packets waiting for assembly */
- struct list_head incomplete;
-
- /* complete packets waiting to be read */
- struct list_head read_q;
- spinlock_t read_q_lock;
- wait_queue_head_t wait_q;
- unsigned flags;
-
- /* endpoint address */
- uint32_t pid;
- uint32_t cid;
-
- /* bound remote address
- * if not connected (dst_pid == 0xffffffff) RPC_CALL writes fail
- * RPC_CALLs must be to the prog/vers below or they will fail
- */
- uint32_t dst_pid;
- uint32_t dst_cid;
- uint32_t dst_prog; /* be32 */
- uint32_t dst_vers; /* be32 */
-
- /* reply remote address
- * if reply_pid == 0xffffffff, none available
- * RPC_REPLY writes may only go to the pid/cid/xid of the
- * last RPC_CALL we received.
- */
- uint32_t reply_pid;
- uint32_t reply_cid;
- uint32_t reply_xid; /* be32 */
- uint32_t next_pm; /* Pacmark sequence */
-
- /* device node if this endpoint is accessed via userspace */
- dev_t dev;
-};
-
-/* shared between smd_rpcrouter*.c */
-
-int __msm_rpc_read(struct msm_rpc_endpoint *ept,
- struct rr_fragment **frag,
- unsigned len, long timeout);
-
-struct msm_rpc_endpoint *msm_rpcrouter_create_local_endpoint(dev_t dev);
-int msm_rpcrouter_destroy_local_endpoint(struct msm_rpc_endpoint *ept);
-
-int msm_rpcrouter_create_server_cdev(struct rr_server *server);
-int msm_rpcrouter_create_server_pdev(struct rr_server *server);
-
-int msm_rpcrouter_init_devices(void);
-void msm_rpcrouter_exit_devices(void);
-
-extern dev_t msm_rpcrouter_devno;
-extern struct class *msm_rpcrouter_class;
-#endif
diff --git a/drivers/staging/dream/smd/smd_rpcrouter_device.c b/drivers/staging/dream/smd/smd_rpcrouter_device.c
deleted file mode 100644
index e9c28eddce3..00000000000
--- a/drivers/staging/dream/smd/smd_rpcrouter_device.c
+++ /dev/null
@@ -1,377 +0,0 @@
-/* arch/arm/mach-msm/smd_rpcrouter_device.c
- *
- * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2007-2009 QUALCOMM Incorporated.
- * Author: San Mehat <san@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/cdev.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/fs.h>
-#include <linux/err.h>
-#include <linux/sched.h>
-#include <linux/poll.h>
-#include <linux/platform_device.h>
-#include <linux/msm_rpcrouter.h>
-#include <linux/slab.h>
-
-#include <asm/uaccess.h>
-#include <asm/byteorder.h>
-
-#include "smd_rpcrouter.h"
-
-#define SAFETY_MEM_SIZE 65536
-
-/* Next minor # available for a remote server */
-static int next_minor = 1;
-
-struct class *msm_rpcrouter_class;
-dev_t msm_rpcrouter_devno;
-
-static struct cdev rpcrouter_cdev;
-static struct device *rpcrouter_device;
-
-static int rpcrouter_open(struct inode *inode, struct file *filp)
-{
- int rc;
- struct msm_rpc_endpoint *ept;
-
- rc = nonseekable_open(inode, filp);
- if (rc < 0)
- return rc;
-
- ept = msm_rpcrouter_create_local_endpoint(inode->i_rdev);
- if (!ept)
- return -ENOMEM;
-
- filp->private_data = ept;
- return 0;
-}
-
-static int rpcrouter_release(struct inode *inode, struct file *filp)
-{
- struct msm_rpc_endpoint *ept;
- ept = (struct msm_rpc_endpoint *) filp->private_data;
-
- return msm_rpcrouter_destroy_local_endpoint(ept);
-}
-
-static ssize_t rpcrouter_read(struct file *filp, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct msm_rpc_endpoint *ept;
- struct rr_fragment *frag, *next;
- int rc;
-
- ept = (struct msm_rpc_endpoint *) filp->private_data;
-
- rc = __msm_rpc_read(ept, &frag, count, -1);
- if (rc < 0)
- return rc;
-
- count = rc;
-
- while (frag != NULL) {
- if (copy_to_user(buf, frag->data, frag->length)) {
- printk(KERN_ERR
- "rpcrouter: could not copy all read data to user!\n");
- rc = -EFAULT;
- }
- buf += frag->length;
- next = frag->next;
- kfree(frag);
- frag = next;
- }
-
- return rc;
-}
-
-static ssize_t rpcrouter_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct msm_rpc_endpoint *ept;
- int rc = 0;
- void *k_buffer;
-
- ept = (struct msm_rpc_endpoint *) filp->private_data;
-
- /* A check for safety, this seems non-standard */
- if (count > SAFETY_MEM_SIZE)
- return -EINVAL;
-
- k_buffer = kmalloc(count, GFP_KERNEL);
- if (!k_buffer)
- return -ENOMEM;
-
- if (copy_from_user(k_buffer, buf, count)) {
- rc = -EFAULT;
- goto write_out_free;
- }
-
- rc = msm_rpc_write(ept, k_buffer, count);
- if (rc < 0)
- goto write_out_free;
-
- rc = count;
-write_out_free:
- kfree(k_buffer);
- return rc;
-}
-
-static unsigned int rpcrouter_poll(struct file *filp,
- struct poll_table_struct *wait)
-{
- struct msm_rpc_endpoint *ept;
- unsigned mask = 0;
- ept = (struct msm_rpc_endpoint *) filp->private_data;
-
- /* If there's data already in the read queue, return POLLIN.
- * Else, wait for the requested amount of time, and check again.
- */
-
- if (!list_empty(&ept->read_q))
- mask |= POLLIN;
-
- if (!mask) {
- poll_wait(filp, &ept->wait_q, wait);
- if (!list_empty(&ept->read_q))
- mask |= POLLIN;
- }
-
- return mask;
-}
-
-static long rpcrouter_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- struct msm_rpc_endpoint *ept;
- struct rpcrouter_ioctl_server_args server_args;
- int rc = 0;
- uint32_t n;
-
- ept = (struct msm_rpc_endpoint *) filp->private_data;
- switch (cmd) {
-
- case RPC_ROUTER_IOCTL_GET_VERSION:
- n = RPC_ROUTER_VERSION_V1;
- rc = put_user(n, (unsigned int *) arg);
- break;
-
- case RPC_ROUTER_IOCTL_GET_MTU:
- /* the pacmark word reduces the actual payload
- * possible per message
- */
- n = RPCROUTER_MSGSIZE_MAX - sizeof(uint32_t);
- rc = put_user(n, (unsigned int *) arg);
- break;
-
- case RPC_ROUTER_IOCTL_REGISTER_SERVER:
- rc = copy_from_user(&server_args, (void *) arg,
- sizeof(server_args));
- if (rc < 0)
- break;
- msm_rpc_register_server(ept,
- server_args.prog,
- server_args.vers);
- break;
-
- case RPC_ROUTER_IOCTL_UNREGISTER_SERVER:
- rc = copy_from_user(&server_args, (void *) arg,
- sizeof(server_args));
- if (rc < 0)
- break;
-
- msm_rpc_unregister_server(ept,
- server_args.prog,
- server_args.vers);
- break;
-
- case RPC_ROUTER_IOCTL_GET_MINOR_VERSION:
- n = MSM_RPC_GET_MINOR(msm_rpc_get_vers(ept));
- rc = put_user(n, (unsigned int *)arg);
- break;
-
- default:
- rc = -EINVAL;
- break;
- }
-
- return rc;
-}
-
-static struct file_operations rpcrouter_server_fops = {
- .owner = THIS_MODULE,
- .open = rpcrouter_open,
- .release = rpcrouter_release,
- .read = rpcrouter_read,
- .write = rpcrouter_write,
- .poll = rpcrouter_poll,
- .unlocked_ioctl = rpcrouter_ioctl,
-};
-
-static struct file_operations rpcrouter_router_fops = {
- .owner = THIS_MODULE,
- .open = rpcrouter_open,
- .release = rpcrouter_release,
- .read = rpcrouter_read,
- .write = rpcrouter_write,
- .poll = rpcrouter_poll,
- .unlocked_ioctl = rpcrouter_ioctl,
-};
-
-int msm_rpcrouter_create_server_cdev(struct rr_server *server)
-{
- int rc;
- uint32_t dev_vers;
-
- if (next_minor == RPCROUTER_MAX_REMOTE_SERVERS) {
- printk(KERN_ERR
- "rpcrouter: Minor numbers exhausted - Increase "
- "RPCROUTER_MAX_REMOTE_SERVERS\n");
- return -ENOBUFS;
- }
-
-#if CONFIG_MSM_AMSS_VERSION >= 6350
- /* Servers with bit 31 set are remote msm servers with hashkey version.
- * Servers with bit 31 not set are remote msm servers with
- * backwards compatible version type in which case the minor number
- * (lower 16 bits) is set to zero.
- *
- */
- if ((server->vers & RPC_VERSION_MODE_MASK))
- dev_vers = server->vers;
- else
- dev_vers = server->vers & RPC_VERSION_MAJOR_MASK;
-#else
- dev_vers = server->vers;
-#endif
-
- server->device_number =
- MKDEV(MAJOR(msm_rpcrouter_devno), next_minor++);
-
- server->device =
- device_create(msm_rpcrouter_class, rpcrouter_device,
- server->device_number, NULL, "%.8x:%.8x",
- server->prog, dev_vers);
- if (IS_ERR(server->device)) {
- printk(KERN_ERR
- "rpcrouter: Unable to create device (%ld)\n",
- PTR_ERR(server->device));
- return PTR_ERR(server->device);;
- }
-
- cdev_init(&server->cdev, &rpcrouter_server_fops);
- server->cdev.owner = THIS_MODULE;
-
- rc = cdev_add(&server->cdev, server->device_number, 1);
- if (rc < 0) {
- printk(KERN_ERR
- "rpcrouter: Unable to add chrdev (%d)\n", rc);
- device_destroy(msm_rpcrouter_class, server->device_number);
- return rc;
- }
- return 0;
-}
-
-/* for backward compatible version type (31st bit cleared)
- * clearing minor number (lower 16 bits) in device name
- * is neccessary for driver binding
- */
-int msm_rpcrouter_create_server_pdev(struct rr_server *server)
-{
- sprintf(server->pdev_name, "rs%.8x:%.8x",
- server->prog,
-#if CONFIG_MSM_AMSS_VERSION >= 6350
- (server->vers & RPC_VERSION_MODE_MASK) ? server->vers :
- (server->vers & RPC_VERSION_MAJOR_MASK));
-#else
- server->vers);
-#endif
-
- server->p_device.base.id = -1;
- server->p_device.base.name = server->pdev_name;
-
- server->p_device.prog = server->prog;
- server->p_device.vers = server->vers;
-
- platform_device_register(&server->p_device.base);
- return 0;
-}
-
-int msm_rpcrouter_init_devices(void)
-{
- int rc;
- int major;
-
- /* Create the device nodes */
- msm_rpcrouter_class = class_create(THIS_MODULE, "oncrpc");
- if (IS_ERR(msm_rpcrouter_class)) {
- rc = -ENOMEM;
- printk(KERN_ERR
- "rpcrouter: failed to create oncrpc class\n");
- goto fail;
- }
-
- rc = alloc_chrdev_region(&msm_rpcrouter_devno, 0,
- RPCROUTER_MAX_REMOTE_SERVERS + 1,
- "oncrpc");
- if (rc < 0) {
- printk(KERN_ERR
- "rpcrouter: Failed to alloc chardev region (%d)\n", rc);
- goto fail_destroy_class;
- }
-
- major = MAJOR(msm_rpcrouter_devno);
- rpcrouter_device = device_create(msm_rpcrouter_class, NULL,
- msm_rpcrouter_devno, NULL, "%.8x:%d",
- 0, 0);
- if (IS_ERR(rpcrouter_device)) {
- rc = -ENOMEM;
- goto fail_unregister_cdev_region;
- }
-
- cdev_init(&rpcrouter_cdev, &rpcrouter_router_fops);
- rpcrouter_cdev.owner = THIS_MODULE;
-
- rc = cdev_add(&rpcrouter_cdev, msm_rpcrouter_devno, 1);
- if (rc < 0)
- goto fail_destroy_device;
-
- return 0;
-
-fail_destroy_device:
- device_destroy(msm_rpcrouter_class, msm_rpcrouter_devno);
-fail_unregister_cdev_region:
- unregister_chrdev_region(msm_rpcrouter_devno,
- RPCROUTER_MAX_REMOTE_SERVERS + 1);
-fail_destroy_class:
- class_destroy(msm_rpcrouter_class);
-fail:
- return rc;
-}
-
-void msm_rpcrouter_exit_devices(void)
-{
- cdev_del(&rpcrouter_cdev);
- device_destroy(msm_rpcrouter_class, msm_rpcrouter_devno);
- unregister_chrdev_region(msm_rpcrouter_devno,
- RPCROUTER_MAX_REMOTE_SERVERS + 1);
- class_destroy(msm_rpcrouter_class);
-}
-
diff --git a/drivers/staging/dream/smd/smd_rpcrouter_servers.c b/drivers/staging/dream/smd/smd_rpcrouter_servers.c
deleted file mode 100644
index 1b152abb278..00000000000
--- a/drivers/staging/dream/smd/smd_rpcrouter_servers.c
+++ /dev/null
@@ -1,230 +0,0 @@
-/* arch/arm/mach-msm/rpc_servers.c
- *
- * Copyright (C) 2007 Google, Inc.
- * Author: Iliyan Malchev <ibm@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/cdev.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/types.h>
-#include <linux/fs.h>
-#include <linux/kthread.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/wakelock.h>
-#include <linux/slab.h>
-
-#include <linux/msm_rpcrouter.h>
-#include <linux/uaccess.h>
-
-#include <mach/msm_rpcrouter.h>
-#include "smd_rpcrouter.h"
-
-static struct msm_rpc_endpoint *endpoint;
-
-#define FLAG_REGISTERED 0x0001
-
-static LIST_HEAD(rpc_server_list);
-static DEFINE_MUTEX(rpc_server_list_lock);
-static int rpc_servers_active;
-static struct wake_lock rpc_servers_wake_lock;
-
-static void rpc_server_register(struct msm_rpc_server *server)
-{
- int rc;
- rc = msm_rpc_register_server(endpoint, server->prog, server->vers);
- if (rc < 0)
- printk(KERN_ERR "[rpcserver] error registering %p @ %08x:%d\n",
- server, server->prog, server->vers);
-}
-
-static struct msm_rpc_server *rpc_server_find(uint32_t prog, uint32_t vers)
-{
- struct msm_rpc_server *server;
-
- mutex_lock(&rpc_server_list_lock);
- list_for_each_entry(server, &rpc_server_list, list) {
- if ((server->prog == prog) &&
-#if CONFIG_MSM_AMSS_VERSION >= 6350
- msm_rpc_is_compatible_version(server->vers, vers)) {
-#else
- server->vers == vers) {
-#endif
- mutex_unlock(&rpc_server_list_lock);
- return server;
- }
- }
- mutex_unlock(&rpc_server_list_lock);
- return NULL;
-}
-
-static void rpc_server_register_all(void)
-{
- struct msm_rpc_server *server;
-
- mutex_lock(&rpc_server_list_lock);
- list_for_each_entry(server, &rpc_server_list, list) {
- if (!(server->flags & FLAG_REGISTERED)) {
- rpc_server_register(server);
- server->flags |= FLAG_REGISTERED;
- }
- }
- mutex_unlock(&rpc_server_list_lock);
-}
-
-int msm_rpc_create_server(struct msm_rpc_server *server)
-{
- /* make sure we're in a sane state first */
- server->flags = 0;
- INIT_LIST_HEAD(&server->list);
-
- mutex_lock(&rpc_server_list_lock);
- list_add(&server->list, &rpc_server_list);
- if (rpc_servers_active) {
- rpc_server_register(server);
- server->flags |= FLAG_REGISTERED;
- }
- mutex_unlock(&rpc_server_list_lock);
-
- return 0;
-}
-
-static int rpc_send_accepted_void_reply(struct msm_rpc_endpoint *client,
- uint32_t xid, uint32_t accept_status)
-{
- int rc = 0;
- uint8_t reply_buf[sizeof(struct rpc_reply_hdr)];
- struct rpc_reply_hdr *reply = (struct rpc_reply_hdr *)reply_buf;
-
- reply->xid = cpu_to_be32(xid);
- reply->type = cpu_to_be32(1); /* reply */
- reply->reply_stat = cpu_to_be32(RPCMSG_REPLYSTAT_ACCEPTED);
-
- reply->data.acc_hdr.accept_stat = cpu_to_be32(accept_status);
- reply->data.acc_hdr.verf_flavor = 0;
- reply->data.acc_hdr.verf_length = 0;
-
- rc = msm_rpc_write(client, reply_buf, sizeof(reply_buf));
- if (rc < 0)
- printk(KERN_ERR
- "%s: could not write response: %d\n",
- __FUNCTION__, rc);
-
- return rc;
-}
-
-static int rpc_servers_thread(void *data)
-{
- void *buffer;
- struct rpc_request_hdr *req;
- struct msm_rpc_server *server;
- int rc;
-
- for (;;) {
- wake_unlock(&rpc_servers_wake_lock);
- rc = wait_event_interruptible(endpoint->wait_q,
- !list_empty(&endpoint->read_q));
- wake_lock(&rpc_servers_wake_lock);
- rc = msm_rpc_read(endpoint, &buffer, -1, -1);
- if (rc < 0) {
- printk(KERN_ERR "%s: could not read: %d\n",
- __FUNCTION__, rc);
- break;
- }
- req = (struct rpc_request_hdr *)buffer;
-
- req->type = be32_to_cpu(req->type);
- req->xid = be32_to_cpu(req->xid);
- req->rpc_vers = be32_to_cpu(req->rpc_vers);
- req->prog = be32_to_cpu(req->prog);
- req->vers = be32_to_cpu(req->vers);
- req->procedure = be32_to_cpu(req->procedure);
-
- server = rpc_server_find(req->prog, req->vers);
-
- if (req->rpc_vers != 2)
- continue;
- if (req->type != 0)
- continue;
- if (!server) {
- rpc_send_accepted_void_reply(
- endpoint, req->xid,
- RPC_ACCEPTSTAT_PROG_UNAVAIL);
- continue;
- }
-
- rc = server->rpc_call(server, req, rc);
-
- switch (rc) {
- case 0:
- rpc_send_accepted_void_reply(
- endpoint, req->xid,
- RPC_ACCEPTSTAT_SUCCESS);
- break;
- default:
- rpc_send_accepted_void_reply(
- endpoint, req->xid,
- RPC_ACCEPTSTAT_PROG_UNAVAIL);
- break;
- }
-
- kfree(buffer);
- }
-
- do_exit(0);
-}
-
-static int rpcservers_probe(struct platform_device *pdev)
-{
- struct task_struct *server_thread;
-
- endpoint = msm_rpc_open();
- if (IS_ERR(endpoint))
- return PTR_ERR(endpoint);
-
- /* we're online -- register any servers installed beforehand */
- rpc_servers_active = 1;
- rpc_server_register_all();
-
- /* start the kernel thread */
- server_thread = kthread_run(rpc_servers_thread, NULL, "krpcserversd");
- if (IS_ERR(server_thread))
- return PTR_ERR(server_thread);
-
- return 0;
-}
-
-static struct platform_driver rpcservers_driver = {
- .probe = rpcservers_probe,
- .driver = {
- .name = "oncrpc_router",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init rpc_servers_init(void)
-{
- wake_lock_init(&rpc_servers_wake_lock, WAKE_LOCK_SUSPEND, "rpc_server");
- return platform_driver_register(&rpcservers_driver);
-}
-
-module_init(rpc_servers_init);
-
-MODULE_DESCRIPTION("MSM RPC Servers");
-MODULE_AUTHOR("Iliyan Malchev <ibm@android.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/dream/smd/smd_tty.c b/drivers/staging/dream/smd/smd_tty.c
deleted file mode 100644
index f40944958d4..00000000000
--- a/drivers/staging/dream/smd/smd_tty.c
+++ /dev/null
@@ -1,208 +0,0 @@
-/* arch/arm/mach-msm/smd_tty.c
- *
- * Copyright (C) 2007 Google, Inc.
- * Author: Brian Swetland <swetland@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
-#include <linux/device.h>
-#include <linux/wait.h>
-
-#include <linux/tty.h>
-#include <linux/tty_driver.h>
-#include <linux/tty_flip.h>
-
-#include <mach/msm_smd.h>
-
-#define MAX_SMD_TTYS 32
-
-static DEFINE_MUTEX(smd_tty_lock);
-
-struct smd_tty_info {
- smd_channel_t *ch;
- struct tty_struct *tty;
- int open_count;
-};
-
-static struct smd_tty_info smd_tty[MAX_SMD_TTYS];
-
-
-static void smd_tty_notify(void *priv, unsigned event)
-{
- unsigned char *ptr;
- int avail;
- struct smd_tty_info *info = priv;
- struct tty_struct *tty = info->tty;
-
- if (!tty)
- return;
-
- if (event != SMD_EVENT_DATA)
- return;
-
- for (;;) {
- if (test_bit(TTY_THROTTLED, &tty->flags)) break;
- avail = smd_read_avail(info->ch);
- if (avail == 0) break;
-
- avail = tty_prepare_flip_string(tty, &ptr, avail);
-
- if (smd_read(info->ch, ptr, avail) != avail) {
- /* shouldn't be possible since we're in interrupt
- ** context here and nobody else could 'steal' our
- ** characters.
- */
- printk(KERN_ERR "OOPS - smd_tty_buffer mismatch?!");
- }
-
- tty_flip_buffer_push(tty);
- }
-
- /* XXX only when writable and necessary */
- tty_wakeup(tty);
-}
-
-static int smd_tty_open(struct tty_struct *tty, struct file *f)
-{
- int res = 0;
- int n = tty->index;
- struct smd_tty_info *info;
- const char *name;
-
- if (n == 0) {
- name = "SMD_DS";
- } else if (n == 27) {
- name = "SMD_GPSNMEA";
- } else {
- return -ENODEV;
- }
-
- info = smd_tty + n;
-
- mutex_lock(&smd_tty_lock);
- tty->driver_data = info;
-
- if (info->open_count++ == 0) {
- info->tty = tty;
- if (info->ch) {
- smd_kick(info->ch);
- } else {
- res = smd_open(name, &info->ch, info, smd_tty_notify);
- }
- }
- mutex_unlock(&smd_tty_lock);
-
- return res;
-}
-
-static void smd_tty_close(struct tty_struct *tty, struct file *f)
-{
- struct smd_tty_info *info = tty->driver_data;
-
- if (info == 0)
- return;
-
- mutex_lock(&smd_tty_lock);
- if (--info->open_count == 0) {
- info->tty = 0;
- tty->driver_data = 0;
- if (info->ch) {
- smd_close(info->ch);
- info->ch = 0;
- }
- }
- mutex_unlock(&smd_tty_lock);
-}
-
-static int smd_tty_write(struct tty_struct *tty, const unsigned char *buf, int len)
-{
- struct smd_tty_info *info = tty->driver_data;
- int avail;
-
- /* if we're writing to a packet channel we will
- ** never be able to write more data than there
- ** is currently space for
- */
- avail = smd_write_avail(info->ch);
- if (len > avail)
- len = avail;
-
- return smd_write(info->ch, buf, len);
-}
-
-static int smd_tty_write_room(struct tty_struct *tty)
-{
- struct smd_tty_info *info = tty->driver_data;
- return smd_write_avail(info->ch);
-}
-
-static int smd_tty_chars_in_buffer(struct tty_struct *tty)
-{
- struct smd_tty_info *info = tty->driver_data;
- return smd_read_avail(info->ch);
-}
-
-static void smd_tty_unthrottle(struct tty_struct *tty)
-{
- struct smd_tty_info *info = tty->driver_data;
- smd_kick(info->ch);
-}
-
-static struct tty_operations smd_tty_ops = {
- .open = smd_tty_open,
- .close = smd_tty_close,
- .write = smd_tty_write,
- .write_room = smd_tty_write_room,
- .chars_in_buffer = smd_tty_chars_in_buffer,
- .unthrottle = smd_tty_unthrottle,
-};
-
-static struct tty_driver *smd_tty_driver;
-
-static int __init smd_tty_init(void)
-{
- int ret;
-
- smd_tty_driver = alloc_tty_driver(MAX_SMD_TTYS);
- if (smd_tty_driver == 0)
- return -ENOMEM;
-
- smd_tty_driver->owner = THIS_MODULE;
- smd_tty_driver->driver_name = "smd_tty_driver";
- smd_tty_driver->name = "smd";
- smd_tty_driver->major = 0;
- smd_tty_driver->minor_start = 0;
- smd_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
- smd_tty_driver->subtype = SERIAL_TYPE_NORMAL;
- smd_tty_driver->init_termios = tty_std_termios;
- smd_tty_driver->init_termios.c_iflag = 0;
- smd_tty_driver->init_termios.c_oflag = 0;
- smd_tty_driver->init_termios.c_cflag = B38400 | CS8 | CREAD;
- smd_tty_driver->init_termios.c_lflag = 0;
- smd_tty_driver->flags = TTY_DRIVER_RESET_TERMIOS |
- TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
- tty_set_operations(smd_tty_driver, &smd_tty_ops);
-
- ret = tty_register_driver(smd_tty_driver);
- if (ret) return ret;
-
- /* this should be dynamic */
- tty_register_device(smd_tty_driver, 0, 0);
- tty_register_device(smd_tty_driver, 27, 0);
-
- return 0;
-}
-
-module_init(smd_tty_init);
diff --git a/drivers/staging/dream/synaptics_i2c_rmi.c b/drivers/staging/dream/synaptics_i2c_rmi.c
index d2ca116a1c2..1f020dad623 100644
--- a/drivers/staging/dream/synaptics_i2c_rmi.c
+++ b/drivers/staging/dream/synaptics_i2c_rmi.c
@@ -109,9 +109,7 @@ static void decode_report(struct synaptics_ts_data *ts, u8 *buf)
int f, a;
int base = 2;
int z = buf[1];
- int w = buf[0] >> 4;
int finger = buf[0] & 7;
- int finger2_pressed;
for (f = 0; f < 2; f++) {
u32 flip_flag = SYNAPTICS_FLIP_X;
@@ -151,14 +149,7 @@ static void decode_report(struct synaptics_ts_data *ts, u8 *buf)
input_report_abs(ts->input_dev, ABS_Y, pos[0][1]);
}
input_report_abs(ts->input_dev, ABS_PRESSURE, z);
- input_report_abs(ts->input_dev, ABS_TOOL_WIDTH, w);
input_report_key(ts->input_dev, BTN_TOUCH, finger);
- finger2_pressed = finger > 1 && finger != 7;
- input_report_key(ts->input_dev, BTN_2, finger2_pressed);
- if (finger2_pressed) {
- input_report_abs(ts->input_dev, ABS_HAT0X, pos[1][0]);
- input_report_abs(ts->input_dev, ABS_HAT0Y, pos[1][1]);
- }
input_sync(ts->input_dev);
}
@@ -208,8 +199,6 @@ static void synaptics_ts_work_func(struct work_struct *work)
decode_report(ts, buf);
}
- if (ts->use_irq)
- enable_irq(ts->client->irq);
}
static enum hrtimer_restart synaptics_ts_timer_func(struct hrtimer *timer)
@@ -227,8 +216,7 @@ static irqreturn_t synaptics_ts_irq_handler(int irq, void *dev_id)
{
struct synaptics_ts_data *ts = dev_id;
- disable_irq_nosync(ts->client->irq);
- queue_work(synaptics_wq, &ts->work);
+ synaptics_ts_work_func(&ts->work);
return IRQ_HANDLED;
}
@@ -347,11 +335,6 @@ static void compute_areas(struct synaptics_ts_data *ts,
-inactive_area_top, max_y + inactive_area_bottom,
fuzz_y, 0);
input_set_abs_params(ts->input_dev, ABS_PRESSURE, 0, 255, fuzz_p, 0);
- input_set_abs_params(ts->input_dev, ABS_TOOL_WIDTH, 0, 15, fuzz_w, 0);
- input_set_abs_params(ts->input_dev, ABS_HAT0X, -inactive_area_left,
- max_x + inactive_area_right, fuzz_x, 0);
- input_set_abs_params(ts->input_dev, ABS_HAT0Y, -inactive_area_top,
- max_y + inactive_area_bottom, fuzz_y, 0);
}
static struct synaptics_i2c_rmi_platform_data fake_pdata;
@@ -487,7 +470,6 @@ static int __devinit synaptics_ts_probe(
__set_bit(EV_SYN, ts->input_dev->evbit);
__set_bit(EV_KEY, ts->input_dev->evbit);
__set_bit(BTN_TOUCH, ts->input_dev->keybit);
- __set_bit(BTN_2, ts->input_dev->keybit);
__set_bit(EV_ABS, ts->input_dev->evbit);
compute_areas(ts, pdata, max_x, max_y);
@@ -500,8 +482,10 @@ static int __devinit synaptics_ts_probe(
goto err_input_register_device_failed;
}
if (client->irq) {
- ret = request_irq(client->irq, synaptics_ts_irq_handler,
- 0, client->name, ts);
+ ret = request_threaded_irq(client->irq, NULL,
+ synaptics_ts_irq_handler,
+ IRQF_TRIGGER_LOW|IRQF_ONESHOT,
+ client->name, ts);
if (ret == 0) {
ret = i2c_set(ts, 0xf1, 0x01, "enable abs int");
if (ret)
@@ -535,6 +519,7 @@ err_input_register_device_failed:
err_input_dev_alloc_failed:
err_detect_failed:
err_power_failed:
+ i2c_set_clientdata(client, NULL);
kfree(ts);
err_alloc_data_failed:
err_check_functionality_failed:
@@ -552,6 +537,7 @@ static int synaptics_ts_remove(struct i2c_client *client)
else
hrtimer_cancel(&ts->timer);
input_unregister_device(ts->input_dev);
+ i2c_set_clientdata(client, NULL);
kfree(ts);
return 0;
}
diff --git a/drivers/staging/dt3155/allocator.c b/drivers/staging/dt3155/allocator.c
index db382ef9021..bd5adbc2a23 100644
--- a/drivers/staging/dt3155/allocator.c
+++ b/drivers/staging/dt3155/allocator.c
@@ -45,7 +45,6 @@
# define MODULE
#endif
-#include <linux/version.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -59,6 +58,8 @@
#include <asm/page.h>
+#include "allocator.h"
+
/*#define ALL_DEBUG*/
#define ALL_MSG "allocator: "
@@ -84,9 +85,9 @@
/*#define PDEBUGG(fmt, args...) printk( KERN_DEBUG ALL_MSG fmt, ## args)*/
-int allocator_himem = 1; /* 0 = probe, pos. = megs, neg. = disable */
-int allocator_step = 1; /* This is the step size in MB */
-int allocator_probe = 1; /* This is a flag -- 1=probe, 0=don't probe */
+static int allocator_himem = 1; /* 0 = probe, pos. = megs, neg. = disable */
+static int allocator_step = 1; /* This is the step size in MB */
+static int allocator_probe = 1; /* This is a flag -- 1=probe, 0=don't probe */
static unsigned long allocator_buffer; /* physical address */
static unsigned long allocator_buffer_size; /* kilobytes */
@@ -102,8 +103,7 @@ struct allocator_struct {
struct allocator_struct *next;
};
-struct allocator_struct *allocator_list;
-
+static struct allocator_struct *allocator_list;
#ifdef ALL_DEBUG
static int dump_list(void)
@@ -125,7 +125,7 @@ static int dump_list(void)
* be used straight ahead for DMA, but needs remapping for program use).
*/
-unsigned long allocator_allocate_dma(unsigned long kilobytes, int prio)
+unsigned long allocator_allocate_dma(unsigned long kilobytes, gfp_t flags)
{
struct allocator_struct *ptr = allocator_list, *newptr;
unsigned long bytes = kilobytes << 10;
@@ -148,7 +148,7 @@ unsigned long allocator_allocate_dma(unsigned long kilobytes, int prio)
PDEBUG("alloc failed\n");
return 0; /* end of list */
}
- newptr = kmalloc(sizeof(struct allocator_struct), prio);
+ newptr = kmalloc(sizeof(struct allocator_struct), flags);
if (!newptr)
return 0;
diff --git a/drivers/staging/dt3155/allocator.h b/drivers/staging/dt3155/allocator.h
index bdf3268ca52..425b70fcd50 100644
--- a/drivers/staging/dt3155/allocator.h
+++ b/drivers/staging/dt3155/allocator.h
@@ -22,7 +22,7 @@
*
*/
-void allocator_free_dma(unsigned long address);
-unsigned long allocator_allocate_dma(unsigned long kilobytes, int priority);
+int allocator_free_dma(unsigned long address);
+unsigned long allocator_allocate_dma(unsigned long kilobytes, gfp_t flags);
int allocator_init(u32 *);
void allocator_cleanup(void);
diff --git a/drivers/staging/dt3155/dt3155.h b/drivers/staging/dt3155/dt3155.h
index 1bf786364ee..793e2fcf446 100644
--- a/drivers/staging/dt3155/dt3155.h
+++ b/drivers/staging/dt3155/dt3155.h
@@ -34,19 +34,9 @@ MA 02111-1307 USA
#ifndef _DT3155_INC
#define _DT3155_INC
-#ifdef __KERNEL__
#include <linux/types.h>
#include <linux/time.h> /* struct timeval */
-#else
-#include <sys/ioctl.h>
-#include <sys/param.h>
-#include <sys/time.h>
-#include <unistd.h>
-#endif
-
-#define TRUE 1
-#define FALSE 0
/* Uncomment this for 50Hz CCIR */
#define CCIR 1
@@ -62,15 +52,15 @@ MA 02111-1307 USA
#ifdef CCIR
#define DT3155_MAX_ROWS 576
#define DT3155_MAX_COLS 768
-#define FORMAT50HZ TRUE
+#define FORMAT50HZ 1
#else
#define DT3155_MAX_ROWS 480
#define DT3155_MAX_COLS 640
-#define FORMAT50HZ FALSE
+#define FORMAT50HZ 0
#endif
/* Configuration structure */
-struct dt3155_config_s {
+struct dt3155_config {
u32 acq_mode;
u32 cols, rows;
u32 continuous;
@@ -78,20 +68,20 @@ struct dt3155_config_s {
/* hold data for each frame */
-typedef struct {
+struct frame_info {
u32 addr; /* address of the buffer with the frame */
u32 tag; /* unique number for the frame */
struct timeval time; /* time that capture took place */
-} frame_info_t;
+};
/*
* Structure for interrupt and buffer handling.
* This is the setup for 1 card
*/
-struct dt3155_fbuffer_s {
+struct dt3155_fbuffer {
int nbuffers;
- frame_info_t frame_info[BOARD_MAX_BUFFS];
+ struct frame_info frame_info[BOARD_MAX_BUFFS];
int empty_buffers[BOARD_MAX_BUFFS]; /* indexes empty frames */
int empty_len; /* Number of empty buffers */
@@ -120,20 +110,20 @@ struct dt3155_fbuffer_s {
#define DT3155_ACQ 2
/* There is one status structure for each card. */
-typedef struct dt3155_status_s {
+struct dt3155_status {
int fixed_mode; /* if 1, we are in fixed frame mode */
u32 reg_addr; /* Register address for a single card */
u32 mem_addr; /* Buffer start addr for this card */
u32 mem_size; /* This is the amount of mem available */
u32 irq; /* this card's irq */
- struct dt3155_config_s config; /* configuration struct */
- struct dt3155_fbuffer_s fbuffer; /* frame buffer state struct */
+ struct dt3155_config config; /* configuration struct */
+ struct dt3155_fbuffer fbuffer; /* frame buffer state struct */
u32 state; /* this card's state */
u32 device_installed; /* Flag if installed. 1=installed */
-} dt3155_status_t;
+};
/* Reference to global status structure */
-extern struct dt3155_status_s dt3155_status[MAXBOARDS];
+extern struct dt3155_status dt3155_status[MAXBOARDS];
#define DT3155_STATE_IDLE 0x00
#define DT3155_STATE_FRAME 0x01
@@ -144,8 +134,8 @@ extern struct dt3155_status_s dt3155_status[MAXBOARDS];
#define DT3155_IOC_MAGIC '!'
-#define DT3155_SET_CONFIG _IOW(DT3155_IOC_MAGIC, 1, struct dt3155_config_s)
-#define DT3155_GET_CONFIG _IOR(DT3155_IOC_MAGIC, 2, struct dt3155_status_s)
+#define DT3155_SET_CONFIG _IOW(DT3155_IOC_MAGIC, 1, struct dt3155_config)
+#define DT3155_GET_CONFIG _IOR(DT3155_IOC_MAGIC, 2, struct dt3155_status)
#define DT3155_STOP _IO(DT3155_IOC_MAGIC, 3)
#define DT3155_START _IO(DT3155_IOC_MAGIC, 4)
#define DT3155_FLUSH _IO(DT3155_IOC_MAGIC, 5)
@@ -160,12 +150,12 @@ extern struct dt3155_status_s dt3155_status[MAXBOARDS];
#define DT_ERR_MASK 0xff0000/* not used but it might be one day */
/* User code will probably want to declare one of these for each card */
-typedef struct dt3155_read_s {
+struct dt3155_read {
u32 offset;
u32 frame_seq;
u32 state;
- frame_info_t frame_info;
-} dt3155_read_t;
+ struct frame_info frame_info;
+};
#endif /* _DT3155_inc */
diff --git a/drivers/staging/dt3155/dt3155_drv.c b/drivers/staging/dt3155/dt3155_drv.c
index 7ac2c6d8e9a..40ef97f3feb 100644
--- a/drivers/staging/dt3155/dt3155_drv.c
+++ b/drivers/staging/dt3155/dt3155_drv.c
@@ -63,6 +63,7 @@ extern void printques(int);
#include <linux/types.h>
#include <linux/poll.h>
#include <linux/sched.h>
+#include <linux/smp_lock.h>
#include <asm/io.h>
#include <asm/uaccess.h>
@@ -94,7 +95,7 @@ int dt3155_errno = 0;
#endif
/* wait queue for interrupts */
-wait_queue_head_t dt3155_read_wait_queue[ MAXBOARDS ];
+wait_queue_head_t dt3155_read_wait_queue[MAXBOARDS];
#define DT_3155_SUCCESS 0
#define DT_3155_FAILURE -EIO
@@ -111,10 +112,10 @@ int dt3155_major = 0;
/* Global structures and variables */
/* Status of each device */
-struct dt3155_status_s dt3155_status[ MAXBOARDS ];
+struct dt3155_status dt3155_status[MAXBOARDS];
/* kernel logical address of the board */
-u8 *dt3155_lbase[ MAXBOARDS ] = { NULL
+u8 *dt3155_lbase[MAXBOARDS] = { NULL
#if MAXBOARDS == 2
, NULL
#endif
@@ -122,7 +123,7 @@ u8 *dt3155_lbase[ MAXBOARDS ] = { NULL
/* DT3155 registers */
u8 *dt3155_bbase = NULL; /* kernel logical address of the *
* buffer region */
-u32 dt3155_dev_open[ MAXBOARDS ] = {0
+u32 dt3155_dev_open[MAXBOARDS] = {0
#if MAXBOARDS == 2
, 0
#endif
@@ -141,17 +142,17 @@ static void quick_stop (int minor)
{
// TODO: scott was here
#if 1
- ReadMReg((dt3155_lbase[ minor ] + INT_CSR), int_csr_r.reg);
+ ReadMReg((dt3155_lbase[minor] + INT_CSR), int_csr_r.reg);
/* disable interrupts */
int_csr_r.fld.FLD_END_EVE_EN = 0;
int_csr_r.fld.FLD_END_ODD_EN = 0;
- WriteMReg((dt3155_lbase[ minor ] + INT_CSR), int_csr_r.reg );
+ WriteMReg((dt3155_lbase[minor] + INT_CSR), int_csr_r.reg);
- dt3155_status[ minor ].state &= ~(DT3155_STATE_STOP|0xff);
+ dt3155_status[minor].state &= ~(DT3155_STATE_STOP|0xff);
/* mark the system stopped: */
- dt3155_status[ minor ].state |= DT3155_STATE_IDLE;
- dt3155_fbuffer[ minor ]->stop_acquire = 0;
- dt3155_fbuffer[ minor ]->even_stopped = 0;
+ dt3155_status[minor].state |= DT3155_STATE_IDLE;
+ dt3155_fbuffer[minor]->stop_acquire = 0;
+ dt3155_fbuffer[minor]->even_stopped = 0;
#else
dt3155_status[minor].state |= DT3155_STATE_STOP;
dt3155_status[minor].fbuffer.stop_acquire = 1;
@@ -167,7 +168,7 @@ static void quick_stop (int minor)
* - Assumes irq's are disabled, via SA_INTERRUPT flag
* being set in request_irq() call from init_module()
*****************************************************/
-static inline void dt3155_isr( int irq, void *dev_id, struct pt_regs *regs )
+static void dt3155_isr(int irq, void *dev_id, struct pt_regs *regs)
{
int minor = -1;
int index;
@@ -175,8 +176,8 @@ static inline void dt3155_isr( int irq, void *dev_id, struct pt_regs *regs )
u32 buffer_addr;
/* find out who issued the interrupt */
- for ( index = 0; index < ndevices; index++ ) {
- if( dev_id == (void*) &dt3155_status[ index ])
+ for (index = 0; index < ndevices; index++) {
+ if(dev_id == (void*) &dt3155_status[index])
{
minor = index;
break;
@@ -184,15 +185,15 @@ static inline void dt3155_isr( int irq, void *dev_id, struct pt_regs *regs )
}
/* hopefully we should not get here */
- if ( minor < 0 || minor >= MAXBOARDS ) {
+ if (minor < 0 || minor >= MAXBOARDS) {
printk(KERN_ERR "dt3155_isr called with invalid dev_id\n");
return;
}
/* Check for corruption and set a flag if so */
- ReadMReg( (dt3155_lbase[ minor ] + CSR1), csr1_r.reg );
+ ReadMReg((dt3155_lbase[minor] + CSR1), csr1_r.reg);
- if ( (csr1_r.fld.FLD_CRPT_EVE) || (csr1_r.fld.FLD_CRPT_ODD) )
+ if ((csr1_r.fld.FLD_CRPT_EVE) || (csr1_r.fld.FLD_CRPT_ODD))
{
/* TODO: this should probably stop acquisition */
/* and set some flags so that dt3155_read */
@@ -202,27 +203,27 @@ static inline void dt3155_isr( int irq, void *dev_id, struct pt_regs *regs )
return;
}
- ReadMReg((dt3155_lbase[ minor ] + INT_CSR), int_csr_r.reg);
+ ReadMReg((dt3155_lbase[minor] + INT_CSR), int_csr_r.reg);
/* Handle the even field ... */
if (int_csr_r.fld.FLD_END_EVE)
{
- if ( (dt3155_status[ minor ].state & DT3155_STATE_MODE) ==
- DT3155_STATE_FLD )
+ if ((dt3155_status[minor].state & DT3155_STATE_MODE) ==
+ DT3155_STATE_FLD)
{
- dt3155_fbuffer[ minor ]->frame_count++;
+ dt3155_fbuffer[minor]->frame_count++;
}
- ReadI2C(dt3155_lbase[ minor ], EVEN_CSR, &i2c_even_csr.reg);
+ ReadI2C(dt3155_lbase[minor], EVEN_CSR, &i2c_even_csr.reg);
/* Clear the interrupt? */
int_csr_r.fld.FLD_END_EVE = 1;
/* disable the interrupt if last field */
- if (dt3155_fbuffer[ minor ]->stop_acquire)
+ if (dt3155_fbuffer[minor]->stop_acquire)
{
printk("dt3155: even stopped.\n");
- dt3155_fbuffer[ minor ]->even_stopped = 1;
+ dt3155_fbuffer[minor]->even_stopped = 1;
if (i2c_even_csr.fld.SNGL_EVE)
{
int_csr_r.fld.FLD_END_EVE_EN = 0;
@@ -233,75 +234,75 @@ static inline void dt3155_isr( int irq, void *dev_id, struct pt_regs *regs )
}
}
- WriteMReg( (dt3155_lbase[ minor ] + INT_CSR), int_csr_r.reg );
+ WriteMReg((dt3155_lbase[minor] + INT_CSR), int_csr_r.reg);
/* Set up next DMA if we are doing FIELDS */
- if ( (dt3155_status[ minor ].state & DT3155_STATE_MODE ) ==
+ if ((dt3155_status[minor].state & DT3155_STATE_MODE) ==
DT3155_STATE_FLD)
{
/* GCS (Aug 2, 2002) -- In field mode, dma the odd field
into the lower half of the buffer */
- const u32 stride = dt3155_status[ minor ].config.cols;
- buffer_addr = dt3155_fbuffer[ minor ]->
- frame_info[ dt3155_fbuffer[ minor ]->active_buf ].addr
+ const u32 stride = dt3155_status[minor].config.cols;
+ buffer_addr = dt3155_fbuffer[minor]->
+ frame_info[dt3155_fbuffer[minor]->active_buf].addr
+ (DT3155_MAX_ROWS / 2) * stride;
local_save_flags(flags);
local_irq_disable();
- wake_up_interruptible( &dt3155_read_wait_queue[ minor ] );
+ wake_up_interruptible(&dt3155_read_wait_queue[minor]);
/* Set up the DMA address for the next field */
local_irq_restore(flags);
- WriteMReg((dt3155_lbase[ minor ] + ODD_DMA_START), buffer_addr);
+ WriteMReg((dt3155_lbase[minor] + ODD_DMA_START), buffer_addr);
}
/* Check for errors. */
i2c_even_csr.fld.DONE_EVE = 1;
- if ( i2c_even_csr.fld.ERROR_EVE )
+ if (i2c_even_csr.fld.ERROR_EVE)
dt3155_errno = DT_ERR_OVERRUN;
- WriteI2C( dt3155_lbase[ minor ], EVEN_CSR, i2c_even_csr.reg );
+ WriteI2C(dt3155_lbase[minor], EVEN_CSR, i2c_even_csr.reg);
/* Note that we actually saw an even field meaning */
/* that subsequent odd field complete the frame */
- dt3155_fbuffer[ minor ]->even_happened = 1;
+ dt3155_fbuffer[minor]->even_happened = 1;
/* recording the time that the even field finished, this should be */
/* about time in the middle of the frame */
- do_gettimeofday( &(dt3155_fbuffer[ minor ]->
- frame_info[ dt3155_fbuffer[ minor ]->
- active_buf ].time) );
+ do_gettimeofday(&(dt3155_fbuffer[minor]->
+ frame_info[dt3155_fbuffer[minor]->
+ active_buf].time));
return;
}
/* ... now handle the odd field */
- if ( int_csr_r.fld.FLD_END_ODD )
+ if (int_csr_r.fld.FLD_END_ODD)
{
- ReadI2C( dt3155_lbase[ minor ], ODD_CSR, &i2c_odd_csr.reg );
+ ReadI2C(dt3155_lbase[minor], ODD_CSR, &i2c_odd_csr.reg);
/* Clear the interrupt? */
int_csr_r.fld.FLD_END_ODD = 1;
- if (dt3155_fbuffer[ minor ]->even_happened ||
- (dt3155_status[ minor ].state & DT3155_STATE_MODE) ==
+ if (dt3155_fbuffer[minor]->even_happened ||
+ (dt3155_status[minor].state & DT3155_STATE_MODE) ==
DT3155_STATE_FLD)
{
- dt3155_fbuffer[ minor ]->frame_count++;
+ dt3155_fbuffer[minor]->frame_count++;
}
- if ( dt3155_fbuffer[ minor ]->stop_acquire &&
- dt3155_fbuffer[ minor ]->even_stopped )
+ if (dt3155_fbuffer[minor]->stop_acquire &&
+ dt3155_fbuffer[minor]->even_stopped)
{
printk(KERN_DEBUG "dt3155: stopping odd..\n");
- if ( i2c_odd_csr.fld.SNGL_ODD )
+ if (i2c_odd_csr.fld.SNGL_ODD)
{
/* disable interrupts */
int_csr_r.fld.FLD_END_ODD_EN = 0;
- dt3155_status[ minor ].state &= ~(DT3155_STATE_STOP|0xff);
+ dt3155_status[minor].state &= ~(DT3155_STATE_STOP|0xff);
/* mark the system stopped: */
- dt3155_status[ minor ].state |= DT3155_STATE_IDLE;
- dt3155_fbuffer[ minor ]->stop_acquire = 0;
- dt3155_fbuffer[ minor ]->even_stopped = 0;
+ dt3155_status[minor].state |= DT3155_STATE_IDLE;
+ dt3155_fbuffer[minor]->stop_acquire = 0;
+ dt3155_fbuffer[minor]->even_stopped = 0;
printk(KERN_DEBUG "dt3155: state is now %x\n",
dt3155_status[minor].state);
@@ -312,104 +313,104 @@ static inline void dt3155_isr( int irq, void *dev_id, struct pt_regs *regs )
}
}
- WriteMReg( (dt3155_lbase[ minor ] + INT_CSR), int_csr_r.reg );
+ WriteMReg((dt3155_lbase[minor] + INT_CSR), int_csr_r.reg);
/* if the odd field has been acquired, then */
/* change the next dma location for both fields */
/* and wake up the process if sleeping */
- if ( dt3155_fbuffer[ minor ]->even_happened ||
- (dt3155_status[ minor ].state & DT3155_STATE_MODE) ==
- DT3155_STATE_FLD )
+ if (dt3155_fbuffer[minor]->even_happened ||
+ (dt3155_status[minor].state & DT3155_STATE_MODE) ==
+ DT3155_STATE_FLD)
{
local_save_flags(flags);
local_irq_disable();
#ifdef DEBUG_QUES_B
- printques( minor );
+ printques(minor);
#endif
- if ( dt3155_fbuffer[ minor ]->nbuffers > 2 )
+ if (dt3155_fbuffer[minor]->nbuffers > 2)
{
- if ( !are_empty_buffers( minor ) )
+ if (!are_empty_buffers(minor))
{
/* The number of active + locked buffers is
* at most 2, and since there are none empty, there
* must be at least nbuffers-2 ready buffers.
* This is where we 'drop frames', oldest first. */
- push_empty( pop_ready( minor ), minor );
+ push_empty(pop_ready(minor), minor);
}
/* The ready_que can't be full, since we know
* there is one active buffer right now, so it's safe
* to push the active buf on the ready_que. */
- push_ready( minor, dt3155_fbuffer[ minor ]->active_buf );
+ push_ready(minor, dt3155_fbuffer[minor]->active_buf);
/* There's at least 1 empty -- make it active */
- dt3155_fbuffer[ minor ]->active_buf = pop_empty( minor );
- dt3155_fbuffer[ minor ]->
- frame_info[ dt3155_fbuffer[ minor ]->
- active_buf ].tag = ++unique_tag;
+ dt3155_fbuffer[minor]->active_buf = pop_empty(minor);
+ dt3155_fbuffer[minor]->
+ frame_info[dt3155_fbuffer[minor]->
+ active_buf].tag = ++unique_tag;
}
else /* nbuffers == 2, special case */
{ /* There is 1 active buffer.
* If there is a locked buffer, keep the active buffer
* the same -- that means we drop a frame.
*/
- if ( dt3155_fbuffer[ minor ]->locked_buf < 0 )
+ if (dt3155_fbuffer[minor]->locked_buf < 0)
{
- push_ready( minor,
- dt3155_fbuffer[ minor ]->active_buf );
- if (are_empty_buffers( minor ) )
+ push_ready(minor,
+ dt3155_fbuffer[minor]->active_buf);
+ if (are_empty_buffers(minor))
{
- dt3155_fbuffer[ minor ]->active_buf =
- pop_empty( minor );
+ dt3155_fbuffer[minor]->active_buf =
+ pop_empty(minor);
}
else
{ /* no empty or locked buffers, so use a readybuf */
- dt3155_fbuffer[ minor ]->active_buf =
- pop_ready( minor );
+ dt3155_fbuffer[minor]->active_buf =
+ pop_ready(minor);
}
}
}
#ifdef DEBUG_QUES_B
- printques( minor );
+ printques(minor);
#endif
- dt3155_fbuffer[ minor ]->even_happened = 0;
+ dt3155_fbuffer[minor]->even_happened = 0;
- wake_up_interruptible( &dt3155_read_wait_queue[ minor ] );
+ wake_up_interruptible(&dt3155_read_wait_queue[minor]);
local_irq_restore(flags);
}
/* Set up the DMA address for the next frame/field */
- buffer_addr = dt3155_fbuffer[ minor ]->
- frame_info[ dt3155_fbuffer[ minor ]->active_buf ].addr;
- if ( (dt3155_status[ minor ].state & DT3155_STATE_MODE) ==
- DT3155_STATE_FLD )
+ buffer_addr = dt3155_fbuffer[minor]->
+ frame_info[dt3155_fbuffer[minor]->active_buf].addr;
+ if ((dt3155_status[minor].state & DT3155_STATE_MODE) ==
+ DT3155_STATE_FLD)
{
- WriteMReg((dt3155_lbase[ minor ] + EVEN_DMA_START), buffer_addr);
+ WriteMReg((dt3155_lbase[minor] + EVEN_DMA_START), buffer_addr);
}
else
{
- WriteMReg((dt3155_lbase[ minor ] + EVEN_DMA_START), buffer_addr);
+ WriteMReg((dt3155_lbase[minor] + EVEN_DMA_START), buffer_addr);
- WriteMReg((dt3155_lbase[ minor ] + ODD_DMA_START), buffer_addr
- + dt3155_status[ minor ].config.cols);
+ WriteMReg((dt3155_lbase[minor] + ODD_DMA_START), buffer_addr
+ + dt3155_status[minor].config.cols);
}
/* Do error checking */
i2c_odd_csr.fld.DONE_ODD = 1;
- if ( i2c_odd_csr.fld.ERROR_ODD )
+ if (i2c_odd_csr.fld.ERROR_ODD)
dt3155_errno = DT_ERR_OVERRUN;
- WriteI2C(dt3155_lbase[ minor ], ODD_CSR, i2c_odd_csr.reg );
+ WriteI2C(dt3155_lbase[minor], ODD_CSR, i2c_odd_csr.reg);
return;
}
/* If we get here, the Odd Field wasn't it either... */
- printk( "neither even nor odd. shared perhaps?\n");
+ printk("neither even nor odd. shared perhaps?\n");
}
/*****************************************************
@@ -420,22 +421,22 @@ static inline void dt3155_isr( int irq, void *dev_id, struct pt_regs *regs )
*****************************************************/
static void dt3155_init_isr(int minor)
{
- const u32 stride = dt3155_status[ minor ].config.cols;
+ const u32 stride = dt3155_status[minor].config.cols;
- switch (dt3155_status[ minor ].state & DT3155_STATE_MODE)
+ switch (dt3155_status[minor].state & DT3155_STATE_MODE)
{
case DT3155_STATE_FLD:
{
- even_dma_start_r = dt3155_status[ minor ].
- fbuffer.frame_info[ dt3155_status[ minor ].fbuffer.active_buf ].addr;
+ even_dma_start_r = dt3155_status[minor].
+ fbuffer.frame_info[dt3155_status[minor].fbuffer.active_buf].addr;
even_dma_stride_r = 0;
odd_dma_stride_r = 0;
- WriteMReg((dt3155_lbase[ minor ] + EVEN_DMA_START),
+ WriteMReg((dt3155_lbase[minor] + EVEN_DMA_START),
even_dma_start_r);
- WriteMReg((dt3155_lbase[ minor ] + EVEN_DMA_STRIDE),
+ WriteMReg((dt3155_lbase[minor] + EVEN_DMA_STRIDE),
even_dma_stride_r);
- WriteMReg((dt3155_lbase[ minor ] + ODD_DMA_STRIDE),
+ WriteMReg((dt3155_lbase[minor] + ODD_DMA_STRIDE),
odd_dma_stride_r);
break;
}
@@ -443,19 +444,19 @@ static void dt3155_init_isr(int minor)
case DT3155_STATE_FRAME:
default:
{
- even_dma_start_r = dt3155_status[ minor ].
- fbuffer.frame_info[ dt3155_status[ minor ].fbuffer.active_buf ].addr;
+ even_dma_start_r = dt3155_status[minor].
+ fbuffer.frame_info[dt3155_status[minor].fbuffer.active_buf].addr;
odd_dma_start_r = even_dma_start_r + stride;
even_dma_stride_r = stride;
odd_dma_stride_r = stride;
- WriteMReg((dt3155_lbase[ minor ] + EVEN_DMA_START),
+ WriteMReg((dt3155_lbase[minor] + EVEN_DMA_START),
even_dma_start_r);
- WriteMReg((dt3155_lbase[ minor ] + ODD_DMA_START),
+ WriteMReg((dt3155_lbase[minor] + ODD_DMA_START),
odd_dma_start_r);
- WriteMReg((dt3155_lbase[ minor ] + EVEN_DMA_STRIDE),
+ WriteMReg((dt3155_lbase[minor] + EVEN_DMA_STRIDE),
even_dma_stride_r);
- WriteMReg((dt3155_lbase[ minor ] + ODD_DMA_STRIDE),
+ WriteMReg((dt3155_lbase[minor] + ODD_DMA_STRIDE),
odd_dma_stride_r);
break;
}
@@ -464,9 +465,9 @@ static void dt3155_init_isr(int minor)
/* 50/60 Hz should be set before this point but let's make sure it is */
/* right anyway */
- ReadI2C(dt3155_lbase[ minor ], CSR2, &i2c_csr2.reg);
+ ReadI2C(dt3155_lbase[minor], CSR2, &i2c_csr2.reg);
i2c_csr2.fld.HZ50 = FORMAT50HZ;
- WriteI2C(dt3155_lbase[ minor ], CSR2, i2c_csr2.reg);
+ WriteI2C(dt3155_lbase[minor], CSR2, i2c_csr2.reg);
/* enable busmaster chip, clear flags */
@@ -486,7 +487,7 @@ static void dt3155_init_isr(int minor)
csr1_r.fld.FLD_CRPT_EVE = 1; /* writing a 1 clears flags */
csr1_r.fld.FLD_CRPT_ODD = 1;
- WriteMReg((dt3155_lbase[ minor ] + CSR1),csr1_r.reg);
+ WriteMReg((dt3155_lbase[minor] + CSR1),csr1_r.reg);
/* Enable interrupts at the end of each field */
@@ -495,14 +496,14 @@ static void dt3155_init_isr(int minor)
int_csr_r.fld.FLD_END_ODD_EN = 1;
int_csr_r.fld.FLD_START_EN = 0;
- WriteMReg((dt3155_lbase[ minor ] + INT_CSR), int_csr_r.reg);
+ WriteMReg((dt3155_lbase[minor] + INT_CSR), int_csr_r.reg);
/* start internal BUSY bits */
- ReadI2C(dt3155_lbase[ minor ], CSR2, &i2c_csr2.reg);
+ ReadI2C(dt3155_lbase[minor], CSR2, &i2c_csr2.reg);
i2c_csr2.fld.BUSY_ODD = 1;
i2c_csr2.fld.BUSY_EVE = 1;
- WriteI2C(dt3155_lbase[ minor ], CSR2, i2c_csr2.reg);
+ WriteI2C(dt3155_lbase[minor], CSR2, i2c_csr2.reg);
/* Now its up to the interrupt routine!! */
@@ -521,7 +522,7 @@ static int dt3155_ioctl(struct inode *inode,
{
int minor = MINOR(inode->i_rdev); /* What device are we ioctl()'ing? */
- if ( minor >= MAXBOARDS || minor < 0 )
+ if (minor >= MAXBOARDS || minor < 0)
return -ENODEV;
/* make sure it is valid command */
@@ -545,7 +546,7 @@ static int dt3155_ioctl(struct inode *inode,
return -EBUSY;
{
- struct dt3155_config_s tmp;
+ struct dt3155_config tmp;
if (copy_from_user((void *)&tmp, (void *) arg, sizeof(tmp)))
return -EFAULT;
/* check for valid settings */
@@ -565,7 +566,7 @@ static int dt3155_ioctl(struct inode *inode,
case DT3155_GET_CONFIG:
{
if (copy_to_user((void *) arg, (void *) &dt3155_status[minor],
- sizeof(dt3155_status_t) ))
+ sizeof(struct dt3155_status)))
return -EFAULT;
return 0;
}
@@ -586,7 +587,7 @@ static int dt3155_ioctl(struct inode *inode,
quick_stop(minor);
if (copy_to_user((void *) arg, (void *) &dt3155_status[minor],
- sizeof(dt3155_status_t)))
+ sizeof(struct dt3155_status)))
return -EFAULT;
return 0;
}
@@ -609,8 +610,8 @@ static int dt3155_ioctl(struct inode *inode,
}
dt3155_init_isr(minor);
- if (copy_to_user( (void *) arg, (void *) &dt3155_status[minor],
- sizeof(dt3155_status_t)))
+ if (copy_to_user((void *) arg, (void *) &dt3155_status[minor],
+ sizeof(struct dt3155_status)))
return -EFAULT;
return 0;
}
@@ -681,36 +682,36 @@ static int dt3155_mmap (struct file * file, struct vm_area_struct * vma)
* MOD_INC_USE_COUNT make sure that the driver memory is not freed
* while the device is in use.
*****************************************************/
-static int dt3155_open( struct inode* inode, struct file* filep)
+static int dt3155_open(struct inode* inode, struct file* filep)
{
int minor = MINOR(inode->i_rdev); /* what device are we opening? */
- if (dt3155_dev_open[ minor ]) {
+ if (dt3155_dev_open[minor]) {
printk ("DT3155: Already opened by another process.\n");
return -EBUSY;
}
- if (dt3155_status[ minor ].device_installed==0)
+ if (dt3155_status[minor].device_installed==0)
{
printk("DT3155 Open Error: No such device dt3155 minor number %d\n",
minor);
return -EIO;
}
- if (dt3155_status[ minor ].state != DT3155_STATE_IDLE) {
+ if (dt3155_status[minor].state != DT3155_STATE_IDLE) {
printk ("DT3155: Not in idle state (state = %x)\n",
- dt3155_status[ minor ].state);
+ dt3155_status[minor].state);
return -EBUSY;
}
printk("DT3155: Device opened.\n");
- dt3155_dev_open[ minor ] = 1 ;
+ dt3155_dev_open[minor] = 1 ;
- dt3155_flush( minor );
+ dt3155_flush(minor);
/* Disable ALL interrupts */
int_csr_r.reg = 0;
- WriteMReg( (dt3155_lbase[ minor ] + INT_CSR), int_csr_r.reg );
+ WriteMReg((dt3155_lbase[minor] + INT_CSR), int_csr_r.reg);
init_waitqueue_head(&(dt3155_read_wait_queue[minor]));
@@ -724,20 +725,20 @@ static int dt3155_open( struct inode* inode, struct file* filep)
* Now decrement the use count.
*
*****************************************************/
-static int dt3155_close( struct inode *inode, struct file *filep)
+static int dt3155_close(struct inode *inode, struct file *filep)
{
int minor;
minor = MINOR(inode->i_rdev); /* which device are we closing */
- if (!dt3155_dev_open[ minor ])
+ if (!dt3155_dev_open[minor])
{
printk("DT3155: attempt to CLOSE a not OPEN device\n");
}
else
{
- dt3155_dev_open[ minor ] = 0;
+ dt3155_dev_open[minor] = 0;
- if (dt3155_status[ minor ].state != DT3155_STATE_IDLE)
+ if (dt3155_status[minor].state != DT3155_STATE_IDLE)
{
quick_stop(minor);
}
@@ -756,11 +757,11 @@ static ssize_t dt3155_read(struct file *filep, char __user *buf,
int minor = MINOR(filep->f_dentry->d_inode->i_rdev);
u32 offset;
int frame_index;
- frame_info_t *frame_info_p;
+ struct frame_info *frame_info;
/* TODO: this should check the error flag and */
/* return an error on hardware failures */
- if (count != sizeof(dt3155_read_t))
+ if (count != sizeof(struct dt3155_read))
{
printk("DT3155 ERROR (NJC): count is not right\n");
return -EINVAL;
@@ -781,7 +782,7 @@ static ssize_t dt3155_read(struct file *filep, char __user *buf,
if (filep->f_flags & O_NDELAY)
{
if ((frame_index = dt3155_get_ready_buffer(minor)) < 0) {
- /*printk( "dt3155: no buffers available (?)\n");*/
+ /*printk("dt3155: no buffers available (?)\n");*/
/* printques(minor); */
return -EAGAIN;
}
@@ -806,21 +807,21 @@ static ssize_t dt3155_read(struct file *filep, char __user *buf,
}
}
- frame_info_p = &dt3155_status[minor].fbuffer.frame_info[frame_index];
+ frame_info = &dt3155_status[minor].fbuffer.frame_info[frame_index];
/* make this an offset */
- offset = frame_info_p->addr - dt3155_status[minor].mem_addr;
+ offset = frame_info->addr - dt3155_status[minor].mem_addr;
put_user(offset, (unsigned int *) buf);
buf += sizeof(u32);
- put_user( dt3155_status[minor].fbuffer.frame_count, (unsigned int *) buf);
+ put_user(dt3155_status[minor].fbuffer.frame_count, (unsigned int *) buf);
buf += sizeof(u32);
put_user(dt3155_status[minor].state, (unsigned int *) buf);
buf += sizeof(u32);
- if (copy_to_user(buf, frame_info_p, sizeof(frame_info_t)))
+ if (copy_to_user(buf, frame_info, sizeof(*frame_info)))
return -EFAULT;
- return sizeof(dt3155_read_t);
+ return sizeof(struct dt3155_read);
}
static unsigned int dt3155_poll (struct file * filp, poll_table *wait)
@@ -835,6 +836,17 @@ static unsigned int dt3155_poll (struct file * filp, poll_table *wait)
return 0;
}
+static long
+dt3155_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = dt3155_ioctl(file->f_path.dentry->d_inode, file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
/*****************************************************
* file operations supported by DT3155 driver
@@ -842,12 +854,12 @@ static unsigned int dt3155_poll (struct file * filp, poll_table *wait)
* register_chrdev
*****************************************************/
static struct file_operations dt3155_fops = {
- read: dt3155_read,
- ioctl: dt3155_ioctl,
- mmap: dt3155_mmap,
- poll: dt3155_poll,
- open: dt3155_open,
- release: dt3155_close
+ .read = dt3155_read,
+ .unlocked_ioctl = dt3155_unlocked_ioctl,
+ .mmap = dt3155_mmap,
+ .poll = dt3155_poll,
+ .open = dt3155_open,
+ .release = dt3155_close
};
@@ -889,7 +901,7 @@ static int find_PCI (void)
/* Now, just go out and make sure that this/these device(s) is/are
actually mapped into the kernel address space */
- if ((error = pci_read_config_dword( pci_dev, PCI_BASE_ADDRESS_0,
+ if ((error = pci_read_config_dword(pci_dev, PCI_BASE_ADDRESS_0,
(u32 *) &base)))
{
printk("DT3155: Was not able to find device \n");
@@ -901,26 +913,26 @@ static int find_PCI (void)
/* Remap the base address to a logical address through which we
* can access it. */
- dt3155_lbase[ pci_index - 1 ] = ioremap(base,PCI_PAGE_SIZE);
- dt3155_status[ pci_index - 1 ].reg_addr = base;
+ dt3155_lbase[pci_index - 1] = ioremap(base,PCI_PAGE_SIZE);
+ dt3155_status[pci_index - 1].reg_addr = base;
DT_3155_DEBUG_MSG("DT3155: New logical address is %p \n",
dt3155_lbase[pci_index-1]);
- if ( !dt3155_lbase[pci_index-1] )
+ if (!dt3155_lbase[pci_index-1])
{
printk("DT3155: Unable to remap control registers\n");
goto err;
}
- if ( (error = pci_read_config_byte( pci_dev, PCI_INTERRUPT_LINE, &irq)) )
+ if ((error = pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &irq)))
{
printk("DT3155: Was not able to find device \n");
goto err;
}
DT_3155_DEBUG_MSG("DT3155: IRQ is %d \n",irq);
- dt3155_status[ pci_index-1 ].irq = irq;
+ dt3155_status[pci_index-1].irq = irq;
/* Set flag: kth device found! */
- dt3155_status[ pci_index-1 ].device_installed = 1;
+ dt3155_status[pci_index-1].device_installed = 1;
printk("DT3155: Installing device %d w/irq %d and address %p\n",
pci_index,
dt3155_status[pci_index-1].irq,
@@ -945,89 +957,89 @@ int init_module(void)
{
int index;
int rcode = 0;
- char *devname[ MAXBOARDS ];
+ char *devname[MAXBOARDS];
- devname[ 0 ] = "dt3155a";
+ devname[0] = "dt3155a";
#if MAXBOARDS == 2
- devname[ 1 ] = "dt3155b";
+ devname[1] = "dt3155b";
#endif
printk("DT3155: Loading module...\n");
/* Register the device driver */
- rcode = register_chrdev( dt3155_major, "dt3155", &dt3155_fops );
- if( rcode < 0 )
+ rcode = register_chrdev(dt3155_major, "dt3155", &dt3155_fops);
+ if(rcode < 0)
{
- printk( KERN_INFO "DT3155: register_chrdev failed \n");
+ printk(KERN_INFO "DT3155: register_chrdev failed \n");
return rcode;
}
- if( dt3155_major == 0 )
+ if(dt3155_major == 0)
dt3155_major = rcode; /* dynamic */
/* init the status variables. */
/* DMA memory is taken care of in setup_buffers() */
- for ( index = 0; index < MAXBOARDS; index++ )
+ for (index = 0; index < MAXBOARDS; index++)
{
- dt3155_status[ index ].config.acq_mode = DT3155_MODE_FRAME;
- dt3155_status[ index ].config.continuous = DT3155_ACQ;
- dt3155_status[ index ].config.cols = DT3155_MAX_COLS;
- dt3155_status[ index ].config.rows = DT3155_MAX_ROWS;
- dt3155_status[ index ].state = DT3155_STATE_IDLE;
+ dt3155_status[index].config.acq_mode = DT3155_MODE_FRAME;
+ dt3155_status[index].config.continuous = DT3155_ACQ;
+ dt3155_status[index].config.cols = DT3155_MAX_COLS;
+ dt3155_status[index].config.rows = DT3155_MAX_ROWS;
+ dt3155_status[index].state = DT3155_STATE_IDLE;
/* find_PCI() will check if devices are installed; */
/* first assume they're not: */
- dt3155_status[ index ].mem_addr = 0;
- dt3155_status[ index ].mem_size = 0;
- dt3155_status[ index ].state = DT3155_STATE_IDLE;
- dt3155_status[ index ].device_installed = 0;
+ dt3155_status[index].mem_addr = 0;
+ dt3155_status[index].mem_size = 0;
+ dt3155_status[index].state = DT3155_STATE_IDLE;
+ dt3155_status[index].device_installed = 0;
}
/* Now let's find the hardware. find_PCI() will set ndevices to the
* number of cards found in this machine. */
{
- if ( (rcode = find_PCI()) != DT_3155_SUCCESS )
+ if ((rcode = find_PCI()) != DT_3155_SUCCESS)
{
printk("DT3155 error: find_PCI() failed to find dt3155 board(s)\n");
- unregister_chrdev( dt3155_major, "dt3155" );
+ unregister_chrdev(dt3155_major, "dt3155");
return rcode;
}
}
/* Ok, time to setup the frame buffers */
- if( (rcode = dt3155_setup_buffers(&allocatorAddr)) < 0 )
+ if((rcode = dt3155_setup_buffers(&allocatorAddr)) < 0)
{
printk("DT3155: Error: setting up buffer not large enough.");
- unregister_chrdev( dt3155_major, "dt3155" );
+ unregister_chrdev(dt3155_major, "dt3155");
return rcode;
}
/* If we are this far, then there is enough RAM */
/* for the buffers: Print the configuration. */
- for( index = 0; index < ndevices; index++ )
+ for( index = 0; index < ndevices; index++)
{
printk("DT3155: Device = %d; acq_mode = %d; "
"continuous = %d; cols = %d; rows = %d;\n",
index ,
- dt3155_status[ index ].config.acq_mode,
- dt3155_status[ index ].config.continuous,
- dt3155_status[ index ].config.cols,
- dt3155_status[ index ].config.rows);
+ dt3155_status[index].config.acq_mode,
+ dt3155_status[index].config.continuous,
+ dt3155_status[index].config.cols,
+ dt3155_status[index].config.rows);
printk("DT3155: m_addr = 0x%x; m_size = %ld; "
"state = %d; device_installed = %d\n",
- dt3155_status[ index ].mem_addr,
- (long int)dt3155_status[ index ].mem_size,
- dt3155_status[ index ].state,
- dt3155_status[ index ].device_installed);
+ dt3155_status[index].mem_addr,
+ (long int)dt3155_status[index].mem_size,
+ dt3155_status[index].state,
+ dt3155_status[index].device_installed);
}
/* Disable ALL interrupts */
int_csr_r.reg = 0;
- for( index = 0; index < ndevices; index++ )
+ for( index = 0; index < ndevices; index++)
{
- WriteMReg( (dt3155_lbase[ index ] + INT_CSR), int_csr_r.reg );
- if( dt3155_status[ index ].device_installed )
+ WriteMReg((dt3155_lbase[index] + INT_CSR), int_csr_r.reg);
+ if(dt3155_status[index].device_installed)
{
/*
* This driver *looks* like it can handle sharing interrupts,
@@ -1036,14 +1048,14 @@ int init_module(void)
* as a reminder in case any problems arise. (SS)
*/
/* in older kernels flags are: SA_SHIRQ | SA_INTERRUPT */
- rcode = request_irq( dt3155_status[ index ].irq, (void *)dt3155_isr,
- IRQF_SHARED | IRQF_DISABLED, devname[ index ],
+ rcode = request_irq(dt3155_status[index].irq, (void *)dt3155_isr,
+ IRQF_SHARED | IRQF_DISABLED, devname[index],
(void*) &dt3155_status[index]);
- if( rcode < 0 )
+ if(rcode < 0)
{
printk("DT3155: minor %d request_irq failed for IRQ %d\n",
index, dt3155_status[index].irq);
- unregister_chrdev( dt3155_major, "dt3155" );
+ unregister_chrdev(dt3155_major, "dt3155");
return rcode;
}
}
@@ -1072,15 +1084,15 @@ void cleanup_module(void)
allocator_cleanup();
#endif
- unregister_chrdev( dt3155_major, "dt3155" );
+ unregister_chrdev(dt3155_major, "dt3155");
- for( index = 0; index < ndevices; index++ )
+ for(index = 0; index < ndevices; index++)
{
- if( dt3155_status[ index ].device_installed == 1 )
+ if(dt3155_status[index].device_installed == 1)
{
- printk( "DT3155: Freeing irq %d for device %d\n",
- dt3155_status[ index ].irq, index );
- free_irq( dt3155_status[ index ].irq, (void*)&dt3155_status[index] );
+ printk("DT3155: Freeing irq %d for device %d\n",
+ dt3155_status[index].irq, index);
+ free_irq(dt3155_status[index].irq, (void*)&dt3155_status[index]);
}
}
}
diff --git a/drivers/staging/dt3155/dt3155_io.c b/drivers/staging/dt3155/dt3155_io.c
index 6b9c68501a6..7792e712d16 100644
--- a/drivers/staging/dt3155/dt3155_io.c
+++ b/drivers/staging/dt3155/dt3155_io.c
@@ -74,23 +74,22 @@ u8 i2c_pm_lut_data;
* wait_ibsyclr()
*
* This function handles read/write timing and r/w timeout error
- *
- * Returns TRUE if NEW_CYCLE clears
- * Returns FALSE if NEW_CYCLE doesn't clear in roughly 3 msecs, otherwise
- * returns 0
*/
static int wait_ibsyclr(u8 *lpReg)
{
/* wait 100 microseconds */
udelay(100L);
/* __delay(loops_per_sec/10000); */
+
+ ReadMReg(lpReg + IIC_CSR2, iic_csr2_r.reg);
if (iic_csr2_r.fld.NEW_CYCLE) {
/* if NEW_CYCLE didn't clear */
/* TIMEOUT ERROR */
dt3155_errno = DT_ERR_I2C_TIMEOUT;
- return FALSE;
- } else
- return TRUE; /* no error */
+ return -ETIMEDOUT;
+ }
+
+ return 0; /* no error */
}
/*
@@ -101,14 +100,9 @@ static int wait_ibsyclr(u8 *lpReg)
* 1st parameter is pointer to 32-bit register base address
* 2nd parameter is reg. index;
* 3rd is value to be written
- *
- * Returns TRUE - Successful completion
- * FALSE - Timeout error - cycle did not complete!
*/
int WriteI2C(u8 *lpReg, u_short wIregIndex, u8 byVal)
{
- int writestat; /* status for return */
-
/* read 32 bit IIC_CSR2 register data into union */
ReadMReg((lpReg + IIC_CSR2), iic_csr2_r.reg);
@@ -126,8 +120,7 @@ int WriteI2C(u8 *lpReg, u_short wIregIndex, u8 byVal)
WriteMReg((lpReg + IIC_CSR2), iic_csr2_r.reg);
/* wait for IIC cycle to finish */
- writestat = wait_ibsyclr(lpReg);
- return writestat;
+ return wait_ibsyclr(lpReg);
}
/*
@@ -138,9 +131,6 @@ int WriteI2C(u8 *lpReg, u_short wIregIndex, u8 byVal)
* 1st parameter is pointer to 32-bit register base address
* 2nd parameter is reg. index;
* 3rd is adrs of value to be read
- *
- * Returns TRUE - Successful completion
- * FALSE - Timeout error - cycle did not complete!
*/
int ReadI2C(u8 *lpReg, u_short wIregIndex, u8 *byVal)
{
diff --git a/drivers/staging/dt3155/dt3155_isr.c b/drivers/staging/dt3155/dt3155_isr.c
index 09d7d9b8272..33ddc9c057f 100644
--- a/drivers/staging/dt3155/dt3155_isr.c
+++ b/drivers/staging/dt3155/dt3155_isr.c
@@ -1,7 +1,7 @@
/*
Copyright 1996,2002,2005 Gregory D. Hager, Alfred A. Rizzi, Noah J. Cowan,
- Jason Lapenta, Scott Smedley, Greg Sharp
+ Jason Lapenta, Scott Smedley, Greg Sharp
This file is part of the DT3155 Device Driver.
@@ -22,7 +22,7 @@ MA 02111-1307 USA
File: dt3155_isr.c
Purpose: Buffer management routines, and other routines for the ISR
- (the actual isr is in dt3155_drv.c)
+ (the actual isr is in dt3155_drv.c)
-- Changes --
@@ -30,16 +30,16 @@ Purpose: Buffer management routines, and other routines for the ISR
-------------------------------------------------------------------
03-Jul-2000 JML n/a
02-Apr-2002 SS Mods to make work with separate allocator
- module; Merged John Roll's mods to make work with
- multiple boards.
+ module; Merged John Roll's mods to make work with
+ multiple boards.
10-Jul-2002 GCS Complete rewrite of setup_buffers to disallow
- buffers which span a 4MB boundary.
+ buffers which span a 4MB boundary.
24-Jul-2002 SS GPL licence.
30-Jul-2002 NJC Added support for buffer loop.
31-Jul-2002 NJC Complete rewrite of buffer management
02-Aug-2002 NJC Including slab.h instead of malloc.h (no warning).
- Also, allocator_init() now returns allocator_max
- so cleaned up allocate_buffers() accordingly.
+ Also, allocator_init() now returns allocator_max
+ so cleaned up allocate_buffers() accordingly.
08-Aug-2005 SS port to 2.6 kernel.
*/
@@ -60,7 +60,7 @@ Purpose: Buffer management routines, and other routines for the ISR
/* Pointer into global structure for handling buffers */
-struct dt3155_fbuffer_s *dt3155_fbuffer[MAXBOARDS] = {NULL
+struct dt3155_fbuffer *dt3155_fbuffer[MAXBOARDS] = {NULL
#if MAXBOARDS == 2
, NULL
#endif
@@ -77,9 +77,9 @@ struct dt3155_fbuffer_s *dt3155_fbuffer[MAXBOARDS] = {NULL
* are_empty_buffers
* m is minor # of device
***************************/
-inline bool are_empty_buffers( int m )
+bool are_empty_buffers(int m)
{
- return ( dt3155_fbuffer[ m ]->empty_len );
+ return dt3155_fbuffer[m]->empty_len;
}
/**************************
@@ -92,56 +92,56 @@ inline bool are_empty_buffers( int m )
* given by dt3155_fbuffer[m]->empty_buffers[0].
* empty_buffers should never fill up, though this is not checked.
**************************/
-inline void push_empty( int index, int m )
+void push_empty(int index, int m)
{
- dt3155_fbuffer[m]->empty_buffers[ dt3155_fbuffer[m]->empty_len ] = index;
+ dt3155_fbuffer[m]->empty_buffers[dt3155_fbuffer[m]->empty_len] = index;
dt3155_fbuffer[m]->empty_len++;
}
/**************************
- * pop_empty( m )
+ * pop_empty(m)
* m is minor # of device
**************************/
-inline int pop_empty( int m )
+int pop_empty(int m)
{
dt3155_fbuffer[m]->empty_len--;
- return dt3155_fbuffer[m]->empty_buffers[ dt3155_fbuffer[m]->empty_len ];
+ return dt3155_fbuffer[m]->empty_buffers[dt3155_fbuffer[m]->empty_len];
}
/*************************
- * is_ready_buf_empty( m )
+ * is_ready_buf_empty(m)
* m is minor # of device
*************************/
-inline bool is_ready_buf_empty( int m )
+bool is_ready_buf_empty(int m)
{
- return ((dt3155_fbuffer[ m ]->ready_len) == 0);
+ return ((dt3155_fbuffer[m]->ready_len) == 0);
}
/*************************
- * is_ready_buf_full( m )
+ * is_ready_buf_full(m)
* m is minor # of device
* this should *never* be true if there are any active, locked or empty
* buffers, since it corresponds to nbuffers ready buffers!!
* 7/31/02: total rewrite. --NJC
*************************/
-inline bool is_ready_buf_full( int m )
+bool is_ready_buf_full(int m)
{
- return ( dt3155_fbuffer[ m ]->ready_len == dt3155_fbuffer[ m ]->nbuffers );
+ return dt3155_fbuffer[m]->ready_len == dt3155_fbuffer[m]->nbuffers;
}
/*****************************************************
- * push_ready( m, buffer )
+ * push_ready(m, buffer)
* m is minor # of device
*
*****************************************************/
-inline void push_ready( int m, int index )
+void push_ready(int m, int index)
{
int head = dt3155_fbuffer[m]->ready_head;
- dt3155_fbuffer[ m ]->ready_que[ head ] = index;
- dt3155_fbuffer[ m ]->ready_head = ( (head + 1) %
- (dt3155_fbuffer[ m ]->nbuffers) );
- dt3155_fbuffer[ m ]->ready_len++;
+ dt3155_fbuffer[m]->ready_que[head] = index;
+ dt3155_fbuffer[m]->ready_head = ((head + 1) %
+ (dt3155_fbuffer[m]->nbuffers));
+ dt3155_fbuffer[m]->ready_len++;
}
@@ -151,12 +151,12 @@ inline void push_ready( int m, int index )
*
* Simply comptutes the tail given the head and the length.
*****************************************************/
-static inline int get_tail( int m )
+static int get_tail(int m)
{
- return ((dt3155_fbuffer[ m ]->ready_head -
- dt3155_fbuffer[ m ]->ready_len +
- dt3155_fbuffer[ m ]->nbuffers)%
- (dt3155_fbuffer[ m ]->nbuffers));
+ return (dt3155_fbuffer[m]->ready_head -
+ dt3155_fbuffer[m]->ready_len +
+ dt3155_fbuffer[m]->nbuffers)%
+ (dt3155_fbuffer[m]->nbuffers);
}
@@ -168,12 +168,12 @@ static inline int get_tail( int m )
* This assumes that there is a ready buffer ready... should
* be checked (e.g. with is_ready_buf_empty() prior to call.
*****************************************************/
-inline int pop_ready( int m )
+int pop_ready(int m)
{
int tail;
tail = get_tail(m);
- dt3155_fbuffer[ m ]->ready_len--;
- return dt3155_fbuffer[ m ]->ready_que[ tail ];
+ dt3155_fbuffer[m]->ready_len--;
+ return dt3155_fbuffer[m]->ready_que[tail];
}
@@ -181,35 +181,33 @@ inline int pop_ready( int m )
* printques
* m is minor # of device
*****************************************************/
-inline void printques( int m )
+void printques(int m)
{
- int head = dt3155_fbuffer[ m ]->ready_head;
+ int head = dt3155_fbuffer[m]->ready_head;
int tail;
- int num = dt3155_fbuffer[ m ]->nbuffers;
+ int num = dt3155_fbuffer[m]->nbuffers;
int frame_index;
int index;
tail = get_tail(m);
printk("\n R:");
- for ( index = tail; index != head; index++, index = index % (num) )
- {
- frame_index = dt3155_fbuffer[ m ]->ready_que[ index ];
- printk(" %d ", frame_index );
+ for (index = tail; index != head; index++, index = index % (num)) {
+ frame_index = dt3155_fbuffer[m]->ready_que[index];
+ printk(" %d ", frame_index);
}
printk("\n E:");
- for ( index = 0; index < dt3155_fbuffer[ m ]->empty_len; index++ )
- {
- frame_index = dt3155_fbuffer[ m ]->empty_buffers[ index ];
- printk(" %d ", frame_index );
+ for (index = 0; index < dt3155_fbuffer[m]->empty_len; index++) {
+ frame_index = dt3155_fbuffer[m]->empty_buffers[index];
+ printk(" %d ", frame_index);
}
- frame_index = dt3155_fbuffer[ m ]->active_buf;
+ frame_index = dt3155_fbuffer[m]->active_buf;
printk("\n A: %d", frame_index);
- frame_index = dt3155_fbuffer[ m ]->locked_buf;
- printk("\n L: %d \n", frame_index );
+ frame_index = dt3155_fbuffer[m]->locked_buf;
+ printk("\n L: %d\n", frame_index);
}
@@ -220,11 +218,12 @@ inline void printques( int m )
* the start address up to the beginning of the
* next 4MB chunk (assuming bufsize < 4MB).
*****************************************************/
-u32 adjust_4MB (u32 buf_addr, u32 bufsize) {
- if (((buf_addr+bufsize) & UPPER_10_BITS) != (buf_addr & UPPER_10_BITS))
- return (buf_addr+bufsize) & UPPER_10_BITS;
- else
- return buf_addr;
+u32 adjust_4MB(u32 buf_addr, u32 bufsize)
+{
+ if (((buf_addr+bufsize) & UPPER_10_BITS) != (buf_addr & UPPER_10_BITS))
+ return (buf_addr+bufsize) & UPPER_10_BITS;
+ else
+ return buf_addr;
}
@@ -235,7 +234,7 @@ u32 adjust_4MB (u32 buf_addr, u32 bufsize) {
* buffers. If there is not enough free space
* try for less memory.
*****************************************************/
-void allocate_buffers (u32 *buf_addr, u32* total_size_kbs,
+void allocate_buffers(u32 *buf_addr, u32* total_size_kbs,
u32 bufsize)
{
/* Compute the minimum amount of memory guaranteed to hold all
@@ -268,15 +267,15 @@ void allocate_buffers (u32 *buf_addr, u32* total_size_kbs,
printk("DT3155: ...but need at least: %d KB\n", min_size_kbs);
printk("DT3155: ...the allocator has: %d KB\n", allocator_max);
size_kbs = (full_size_kbs <= allocator_max ? full_size_kbs : allocator_max);
- if (size_kbs > min_size_kbs) {
- if ((*buf_addr = allocator_allocate_dma (size_kbs, GFP_KERNEL)) != 0) {
- printk("DT3155: Managed to allocate: %d KB\n", size_kbs);
- *total_size_kbs = size_kbs;
- return;
+ if (size_kbs > min_size_kbs) {
+ if ((*buf_addr = allocator_allocate_dma(size_kbs, GFP_KERNEL)) != 0) {
+ printk("DT3155: Managed to allocate: %d KB\n", size_kbs);
+ *total_size_kbs = size_kbs;
+ return;
+ }
}
- }
/* If we got here, the allocation failed */
- printk ("DT3155: Allocator failed!\n");
+ printk("DT3155: Allocator failed!\n");
*buf_addr = 0;
*total_size_kbs = 0;
return;
@@ -312,28 +311,26 @@ u32 dt3155_setup_buffers(u32 *allocatorAddr)
int m; /* minor # of device, looped for all devs */
/* zero the fbuffer status and address structure */
- for ( m = 0; m < ndevices; m++)
- {
- dt3155_fbuffer[ m ] = &(dt3155_status[ m ].fbuffer);
+ for (m = 0; m < ndevices; m++) {
+ dt3155_fbuffer[m] = &(dt3155_status[m].fbuffer);
/* Make sure the buffering variables are consistent */
{
- u8 *ptr = (u8 *) dt3155_fbuffer[ m ];
- for( index = 0; index < sizeof(struct dt3155_fbuffer_s); index++)
- *(ptr++)=0;
+ u8 *ptr = (u8 *) dt3155_fbuffer[m];
+ for (index = 0; index < sizeof(struct dt3155_fbuffer); index++)
+ *(ptr++) = 0;
}
}
/* allocate a large contiguous chunk of RAM */
- allocate_buffers (&rambuff_addr, &rambuff_size, bufsize);
+ allocate_buffers(&rambuff_addr, &rambuff_size, bufsize);
printk("DT3155: mem info\n");
- printk(" - rambuf_addr = 0x%x \n", rambuff_addr);
- printk(" - length (kb) = %u \n", rambuff_size);
- if( rambuff_addr == 0 )
- {
- printk( KERN_INFO
- "DT3155: Error setup_buffers() allocator dma failed \n" );
- return -ENOMEM;
+ printk(" - rambuf_addr = 0x%x\n", rambuff_addr);
+ printk(" - length (kb) = %u\n", rambuff_size);
+ if (rambuff_addr == 0) {
+ printk(KERN_INFO
+ "DT3155: Error setup_buffers() allocator dma failed\n");
+ return -ENOMEM;
}
*allocatorAddr = rambuff_addr;
rambuff_end = rambuff_addr + 1024 * rambuff_size;
@@ -341,70 +338,68 @@ u32 dt3155_setup_buffers(u32 *allocatorAddr)
/* after allocation, we need to count how many useful buffers there
are so we can give an equal number to each device */
rambuff_acm = rambuff_addr;
- for ( index = 0; index < MAXBUFFERS; index++) {
- rambuff_acm = adjust_4MB (rambuff_acm, bufsize);/*avoid spanning 4MB bdry*/
- if (rambuff_acm + bufsize > rambuff_end)
- break;
- rambuff_acm += bufsize;
- }
+ for (index = 0; index < MAXBUFFERS; index++) {
+ rambuff_acm = adjust_4MB(rambuff_acm, bufsize);/*avoid spanning 4MB bdry*/
+ if (rambuff_acm + bufsize > rambuff_end)
+ break;
+ rambuff_acm += bufsize;
+ }
/* Following line is OK, will waste buffers if index
* not evenly divisible by ndevices -NJC*/
numbufs = index / ndevices;
printk(" - numbufs = %u\n", numbufs);
- if (numbufs < 2) {
- printk( KERN_INFO
- "DT3155: Error setup_buffers() couldn't allocate 2 bufs/board\n" );
- return -ENOMEM;
- }
+ if (numbufs < 2) {
+ printk(KERN_INFO
+ "DT3155: Error setup_buffers() couldn't allocate 2 bufs/board\n");
+ return -ENOMEM;
+ }
/* now that we have board memory we spit it up */
/* between the boards and the buffers */
- rambuff_acm = rambuff_addr;
- for ( m = 0; m < ndevices; m ++)
- {
- rambuff_acm = adjust_4MB (rambuff_acm, bufsize);
-
- /* Save the start of this boards buffer space (for mmap). */
- dt3155_status[ m ].mem_addr = rambuff_acm;
-
- for (index = 0; index < numbufs; index++)
- {
- rambuff_acm = adjust_4MB (rambuff_acm, bufsize);
- if (rambuff_acm + bufsize > rambuff_end) {
- /* Should never happen */
- printk ("DT3155 PROGRAM ERROR (GCS)\n"
- "Error distributing allocated buffers\n");
- return -ENOMEM;
- }
-
- dt3155_fbuffer[ m ]->frame_info[ index ].addr = rambuff_acm;
- push_empty( index, m );
- /* printk(" - Buffer : %lx\n",
- * dt3155_fbuffer[ m ]->frame_info[ index ].addr );
- */
- dt3155_fbuffer[ m ]->nbuffers += 1;
- rambuff_acm += bufsize;
+ rambuff_acm = rambuff_addr;
+ for (m = 0; m < ndevices; m++) {
+ rambuff_acm = adjust_4MB(rambuff_acm, bufsize);
+
+ /* Save the start of this boards buffer space (for mmap). */
+ dt3155_status[m].mem_addr = rambuff_acm;
+
+ for (index = 0; index < numbufs; index++) {
+ rambuff_acm = adjust_4MB(rambuff_acm, bufsize);
+ if (rambuff_acm + bufsize > rambuff_end) {
+ /* Should never happen */
+ printk("DT3155 PROGRAM ERROR (GCS)\n"
+ "Error distributing allocated buffers\n");
+ return -ENOMEM;
+ }
+
+ dt3155_fbuffer[m]->frame_info[index].addr = rambuff_acm;
+ push_empty(index, m);
+ /* printk(" - Buffer : %lx\n",
+ * dt3155_fbuffer[m]->frame_info[index].addr);
+ */
+ dt3155_fbuffer[m]->nbuffers += 1;
+ rambuff_acm += bufsize;
}
- /* Make sure there is an active buffer there. */
- dt3155_fbuffer[ m ]->active_buf = pop_empty( m );
- dt3155_fbuffer[ m ]->even_happened = 0;
- dt3155_fbuffer[ m ]->even_stopped = 0;
+ /* Make sure there is an active buffer there. */
+ dt3155_fbuffer[m]->active_buf = pop_empty(m);
+ dt3155_fbuffer[m]->even_happened = 0;
+ dt3155_fbuffer[m]->even_stopped = 0;
- /* make sure there is no locked_buf JML 2/28/00 */
- dt3155_fbuffer[ m ]->locked_buf = -1;
+ /* make sure there is no locked_buf JML 2/28/00 */
+ dt3155_fbuffer[m]->locked_buf = -1;
- dt3155_status[ m ].mem_size =
- rambuff_acm - dt3155_status[ m ].mem_addr;
+ dt3155_status[m].mem_size =
+ rambuff_acm - dt3155_status[m].mem_addr;
- /* setup the ready queue */
- dt3155_fbuffer[ m ]->ready_head = 0;
- dt3155_fbuffer[ m ]->ready_len = 0;
- printk("Available buffers for device %d: %d\n",
- m, dt3155_fbuffer[ m ]->nbuffers);
+ /* setup the ready queue */
+ dt3155_fbuffer[m]->ready_head = 0;
+ dt3155_fbuffer[m]->ready_len = 0;
+ printk("Available buffers for device %d: %d\n",
+ m, dt3155_fbuffer[m]->nbuffers);
}
- return 1;
+ return 1;
}
/*****************************************************
@@ -415,13 +410,12 @@ u32 dt3155_setup_buffers(u32 *allocatorAddr)
*
* m is minor number of device
*****************************************************/
-static inline void internal_release_locked_buffer( int m )
+static void internal_release_locked_buffer(int m)
{
/* Pointer into global structure for handling buffers */
- if ( dt3155_fbuffer[ m ]->locked_buf >= 0 )
- {
- push_empty( dt3155_fbuffer[ m ]->locked_buf, m );
- dt3155_fbuffer[ m ]->locked_buf = -1;
+ if (dt3155_fbuffer[m]->locked_buf >= 0) {
+ push_empty(dt3155_fbuffer[m]->locked_buf, m);
+ dt3155_fbuffer[m]->locked_buf = -1;
}
}
@@ -433,7 +427,7 @@ static inline void internal_release_locked_buffer( int m )
* The user function of the above.
*
*****************************************************/
-inline void dt3155_release_locked_buffer( int m )
+void dt3155_release_locked_buffer(int m)
{
unsigned long int flags;
local_save_flags(flags);
@@ -448,28 +442,28 @@ inline void dt3155_release_locked_buffer( int m )
* m is minor # of device
*
*****************************************************/
-inline int dt3155_flush( int m )
+int dt3155_flush(int m)
{
int index;
unsigned long int flags;
local_save_flags(flags);
local_irq_disable();
- internal_release_locked_buffer( m );
- dt3155_fbuffer[ m ]->empty_len = 0;
+ internal_release_locked_buffer(m);
+ dt3155_fbuffer[m]->empty_len = 0;
- for ( index = 0; index < dt3155_fbuffer[ m ]->nbuffers; index++ )
- push_empty( index, m );
+ for (index = 0; index < dt3155_fbuffer[m]->nbuffers; index++)
+ push_empty(index, m);
/* Make sure there is an active buffer there. */
- dt3155_fbuffer[ m ]->active_buf = pop_empty( m );
+ dt3155_fbuffer[m]->active_buf = pop_empty(m);
- dt3155_fbuffer[ m ]->even_happened = 0;
- dt3155_fbuffer[ m ]->even_stopped = 0;
+ dt3155_fbuffer[m]->even_happened = 0;
+ dt3155_fbuffer[m]->even_stopped = 0;
/* setup the ready queue */
- dt3155_fbuffer[ m ]->ready_head = 0;
- dt3155_fbuffer[ m ]->ready_len = 0;
+ dt3155_fbuffer[m]->ready_head = 0;
+ dt3155_fbuffer[m]->ready_len = 0;
local_irq_restore(flags);
@@ -485,7 +479,7 @@ inline int dt3155_flush( int m )
* If the user has a buffer locked it will unlock
* that buffer before returning the new one.
*****************************************************/
-inline int dt3155_get_ready_buffer( int m )
+int dt3155_get_ready_buffer(int m)
{
int frame_index;
unsigned long int flags;
@@ -493,21 +487,20 @@ inline int dt3155_get_ready_buffer( int m )
local_irq_disable();
#ifdef DEBUG_QUES_A
- printques( m );
+ printques(m);
#endif
- internal_release_locked_buffer( m );
+ internal_release_locked_buffer(m);
- if (is_ready_buf_empty( m ))
- frame_index = -1;
- else
- {
- frame_index = pop_ready( m );
- dt3155_fbuffer[ m ]->locked_buf = frame_index;
+ if (is_ready_buf_empty(m))
+ frame_index = -1;
+ else {
+ frame_index = pop_ready(m);
+ dt3155_fbuffer[m]->locked_buf = frame_index;
}
#ifdef DEBUG_QUES_B
- printques( m );
+ printques(m);
#endif
local_irq_restore(flags);
diff --git a/drivers/staging/dt3155/dt3155_isr.h b/drivers/staging/dt3155/dt3155_isr.h
index 7595cb16c98..7d474cf743d 100644
--- a/drivers/staging/dt3155/dt3155_isr.h
+++ b/drivers/staging/dt3155/dt3155_isr.h
@@ -36,7 +36,7 @@ MA 02111-1307 USA
#ifndef DT3155_ISR_H
#define DT3155_ISR_H
-extern struct dt3155_fbuffer_s *dt3155_fbuffer[MAXBOARDS];
+extern struct dt3155_fbuffer *dt3155_fbuffer[MAXBOARDS];
/* User functions for buffering */
/* Initialize the buffering system. This should */
diff --git a/drivers/staging/dt3155v4l/Kconfig b/drivers/staging/dt3155v4l/Kconfig
new file mode 100644
index 00000000000..5cd5a575b64
--- /dev/null
+++ b/drivers/staging/dt3155v4l/Kconfig
@@ -0,0 +1,20 @@
+config VIDEO_DT3155
+ tristate "DT3155 frame grabber, Video4Linux interface"
+ depends on PCI && VIDEO_DEV && VIDEO_V4L2
+ select VIDEOBUF_DMA_CONTIG
+ default n
+ ---help---
+ Enables dt3155 device driver for the DataTranslation DT3155 frame grabber.
+ Say Y here if you have this hardware.
+ In doubt, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called dt3155v4l.
+
+config DT3155_CCIR
+ bool "Selects CCIR/50Hz vertical refresh"
+ depends on VIDEO_DT3155
+ default y
+ ---help---
+ Select it for CCIR/50Hz (European region),
+ or leave it unselected for RS-170/60Hz (North America).
diff --git a/drivers/staging/dt3155v4l/Makefile b/drivers/staging/dt3155v4l/Makefile
new file mode 100644
index 00000000000..ce7a3ec2faf
--- /dev/null
+++ b/drivers/staging/dt3155v4l/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_VIDEO_DT3155) += dt3155v4l.o
diff --git a/drivers/staging/dt3155v4l/dt3155v4l.c b/drivers/staging/dt3155v4l/dt3155v4l.c
new file mode 100644
index 00000000000..6dc3af62284
--- /dev/null
+++ b/drivers/staging/dt3155v4l/dt3155v4l.c
@@ -0,0 +1,1200 @@
+/***************************************************************************
+ * Copyright (C) 2006-2010 by Marin Mitov *
+ * mitov@issp.bas.bg *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published by *
+ * the Free Software Foundation; either version 2 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program; if not, write to the *
+ * Free Software Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ ***************************************************************************/
+
+#include <linux/version.h>
+#include <linux/stringify.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf-dma-contig.h>
+
+#include "dt3155v4l.h"
+
+#define DT3155_VENDOR_ID 0x8086
+#define DT3155_DEVICE_ID 0x1223
+
+/* DT3155_CHUNK_SIZE is 4M (2^22) 8 full size buffers */
+#define DT3155_CHUNK_SIZE (1U << 22)
+
+#define DT3155_COH_FLAGS (GFP_KERNEL | GFP_DMA32 | __GFP_COLD | __GFP_NOWARN)
+
+#define DT3155_BUF_SIZE (768 * 576)
+
+/* global initializers (for all boards) */
+#ifdef CONFIG_DT3155_CCIR
+static const u8 csr2_init = VT_50HZ;
+#define DT3155_CURRENT_NORM V4L2_STD_625_50
+static const unsigned int img_width = 768;
+static const unsigned int img_height = 576;
+static const unsigned int frames_per_sec = 25;
+static const struct v4l2_fmtdesc frame_std[] = {
+ {
+ .index = 0,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = 0,
+ .description = "CCIR/50Hz 8 bits gray",
+ .pixelformat = V4L2_PIX_FMT_GREY,
+ },
+};
+#else
+static const u8 csr2_init = VT_60HZ;
+#define DT3155_CURRENT_NORM V4L2_STD_525_60
+static const unsigned int img_width = 640;
+static const unsigned int img_height = 480;
+static const unsigned int frames_per_sec = 30;
+static const struct v4l2_fmtdesc frame_std[] = {
+ {
+ .index = 0,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = 0,
+ .description = "RS-170/60Hz 8 bits gray",
+ .pixelformat = V4L2_PIX_FMT_GREY,
+ },
+};
+#endif
+
+#define NUM_OF_FORMATS ARRAY_SIZE(frame_std)
+
+static u8 config_init = ACQ_MODE_EVEN;
+
+/**
+ * read_i2c_reg - reads an internal i2c register
+ *
+ * @addr: dt3155 mmio base address
+ * @index: index (internal address) of register to read
+ * @data: pointer to byte the read data will be placed in
+ *
+ * returns: zero on success or error code
+ *
+ * This function starts reading the specified (by index) register
+ * and busy waits for the process to finish. The result is placed
+ * in a byte pointed by data.
+ */
+static int
+read_i2c_reg(void __iomem *addr, u8 index, u8 *data)
+{
+ u32 tmp = index;
+
+ iowrite32((tmp<<17) | IIC_READ, addr + IIC_CSR2);
+ mmiowb();
+ udelay(45); /* wait at least 43 usec for NEW_CYCLE to clear */
+ if (ioread32(addr + IIC_CSR2) & NEW_CYCLE) {
+ /* error: NEW_CYCLE not cleared */
+ printk(KERN_ERR "dt3155: NEW_CYCLE not cleared\n");
+ return -EIO;
+ }
+ tmp = ioread32(addr + IIC_CSR1);
+ if (tmp & DIRECT_ABORT) {
+ /* error: DIRECT_ABORT set */
+ printk(KERN_ERR "dt3155: DIRECT_ABORT set\n");
+ /* reset DIRECT_ABORT bit */
+ iowrite32(DIRECT_ABORT, addr + IIC_CSR1);
+ return -EIO;
+ }
+ *data = tmp>>24;
+ return 0;
+}
+
+/**
+ * write_i2c_reg - writes to an internal i2c register
+ *
+ * @addr: dt3155 mmio base address
+ * @index: index (internal address) of register to read
+ * @data: data to be written
+ *
+ * returns: zero on success or error code
+ *
+ * This function starts writting the specified (by index) register
+ * and busy waits for the process to finish.
+ */
+static int
+write_i2c_reg(void __iomem *addr, u8 index, u8 data)
+{
+ u32 tmp = index;
+
+ iowrite32((tmp<<17) | IIC_WRITE | data, addr + IIC_CSR2);
+ mmiowb();
+ udelay(65); /* wait at least 63 usec for NEW_CYCLE to clear */
+ if (ioread32(addr + IIC_CSR2) & NEW_CYCLE) {
+ /* error: NEW_CYCLE not cleared */
+ printk(KERN_ERR "dt3155: NEW_CYCLE not cleared\n");
+ return -EIO;
+ }
+ if (ioread32(addr + IIC_CSR1) & DIRECT_ABORT) {
+ /* error: DIRECT_ABORT set */
+ printk(KERN_ERR "dt3155: DIRECT_ABORT set\n");
+ /* reset DIRECT_ABORT bit */
+ iowrite32(DIRECT_ABORT, addr + IIC_CSR1);
+ return -EIO;
+ }
+ return 0;
+}
+
+/**
+ * write_i2c_reg_nowait - writes to an internal i2c register
+ *
+ * @addr: dt3155 mmio base address
+ * @index: index (internal address) of register to read
+ * @data: data to be written
+ *
+ * This function starts writting the specified (by index) register
+ * and then returns.
+ */
+static void write_i2c_reg_nowait(void __iomem *addr, u8 index, u8 data)
+{
+ u32 tmp = index;
+
+ iowrite32((tmp<<17) | IIC_WRITE | data, addr + IIC_CSR2);
+ mmiowb();
+}
+
+/**
+ * wait_i2c_reg - waits the read/write to finish
+ *
+ * @addr: dt3155 mmio base address
+ *
+ * returns: zero on success or error code
+ *
+ * This function waits reading/writting to finish.
+ */
+static int wait_i2c_reg(void __iomem *addr)
+{
+ if (ioread32(addr + IIC_CSR2) & NEW_CYCLE)
+ udelay(65); /* wait at least 63 usec for NEW_CYCLE to clear */
+ if (ioread32(addr + IIC_CSR2) & NEW_CYCLE) {
+ /* error: NEW_CYCLE not cleared */
+ printk(KERN_ERR "dt3155: NEW_CYCLE not cleared\n");
+ return -EIO;
+ }
+ if (ioread32(addr + IIC_CSR1) & DIRECT_ABORT) {
+ /* error: DIRECT_ABORT set */
+ printk(KERN_ERR "dt3155: DIRECT_ABORT set\n");
+ /* reset DIRECT_ABORT bit */
+ iowrite32(DIRECT_ABORT, addr + IIC_CSR1);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int
+dt3155_start_acq(struct dt3155_priv *pd)
+{
+ struct videobuf_buffer *vb = pd->curr_buf;
+ dma_addr_t dma_addr;
+
+ dma_addr = videobuf_to_dma_contig(vb);
+ iowrite32(dma_addr, pd->regs + EVEN_DMA_START);
+ iowrite32(dma_addr + vb->width, pd->regs + ODD_DMA_START);
+ iowrite32(vb->width, pd->regs + EVEN_DMA_STRIDE);
+ iowrite32(vb->width, pd->regs + ODD_DMA_STRIDE);
+ /* enable interrupts, clear all irq flags */
+ iowrite32(FLD_START_EN | FLD_END_ODD_EN | FLD_START |
+ FLD_END_EVEN | FLD_END_ODD, pd->regs + INT_CSR);
+ iowrite32(FIFO_EN | SRST | FLD_CRPT_ODD | FLD_CRPT_EVEN |
+ FLD_DN_ODD | FLD_DN_EVEN | CAP_CONT_EVEN | CAP_CONT_ODD,
+ pd->regs + CSR1);
+ wait_i2c_reg(pd->regs);
+ write_i2c_reg(pd->regs, CONFIG, pd->config);
+ write_i2c_reg(pd->regs, EVEN_CSR, CSR_ERROR | CSR_DONE);
+ write_i2c_reg(pd->regs, ODD_CSR, CSR_ERROR | CSR_DONE);
+
+ /* start the board */
+ write_i2c_reg(pd->regs, CSR2, pd->csr2 | BUSY_EVEN | BUSY_ODD);
+ return 0; /* success */
+}
+
+static int
+dt3155_stop_acq(struct dt3155_priv *pd)
+{
+ int tmp;
+
+ /* stop the board */
+ wait_i2c_reg(pd->regs);
+ write_i2c_reg(pd->regs, CSR2, pd->csr2);
+
+ /* disable all irqs, clear all irq flags */
+ iowrite32(FLD_START | FLD_END_EVEN | FLD_END_ODD, pd->regs + INT_CSR);
+ write_i2c_reg(pd->regs, EVEN_CSR, CSR_ERROR | CSR_DONE);
+ write_i2c_reg(pd->regs, ODD_CSR, CSR_ERROR | CSR_DONE);
+ tmp = ioread32(pd->regs + CSR1) & (FLD_CRPT_EVEN | FLD_CRPT_ODD);
+ if (tmp)
+ printk(KERN_ERR "dt3155: corrupted field %u\n", tmp);
+ iowrite32(FIFO_EN | SRST | FLD_CRPT_ODD | FLD_CRPT_EVEN |
+ FLD_DN_ODD | FLD_DN_EVEN | CAP_CONT_EVEN | CAP_CONT_ODD,
+ pd->regs + CSR1);
+ return 0;
+}
+
+/* Locking: Caller holds q->vb_lock */
+static int
+dt3155_buf_setup(struct videobuf_queue *q, unsigned int *count,
+ unsigned int *size)
+{
+ *size = img_width * img_height;
+ return 0;
+}
+
+/* Locking: Caller holds q->vb_lock */
+static int
+dt3155_buf_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ int ret = 0;
+
+ vb->width = img_width;
+ vb->height = img_height;
+ vb->size = img_width * img_height;
+ vb->field = field;
+ if (vb->state == VIDEOBUF_NEEDS_INIT)
+ ret = videobuf_iolock(q, vb, NULL);
+ if (ret) {
+ vb->state = VIDEOBUF_ERROR;
+ printk(KERN_ERR "ERROR: videobuf_iolock() failed\n");
+ videobuf_dma_contig_free(q, vb); /* FIXME: needed? */
+ } else
+ vb->state = VIDEOBUF_PREPARED;
+ return ret;
+}
+
+/* Locking: Caller holds q->vb_lock & q->irqlock */
+static void
+dt3155_buf_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
+{
+ struct dt3155_priv *pd = q->priv_data;
+
+ if (vb->state != VIDEOBUF_NEEDS_INIT) {
+ vb->state = VIDEOBUF_QUEUED;
+ list_add_tail(&vb->queue, &pd->dmaq);
+ wake_up_interruptible_sync(&pd->do_dma);
+ } else
+ vb->state = VIDEOBUF_ERROR;
+}
+
+/* Locking: Caller holds q->vb_lock */
+static void
+dt3155_buf_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
+{
+ if (vb->state == VIDEOBUF_ACTIVE)
+ videobuf_waiton(vb, 0, 0); /* FIXME: cannot be interrupted */
+ videobuf_dma_contig_free(q, vb);
+ vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static struct videobuf_queue_ops vbq_ops = {
+ .buf_setup = dt3155_buf_setup,
+ .buf_prepare = dt3155_buf_prepare,
+ .buf_queue = dt3155_buf_queue,
+ .buf_release = dt3155_buf_release,
+};
+
+static irqreturn_t
+dt3155_irq_handler_even(int irq, void *dev_id)
+{
+ struct dt3155_priv *ipd = dev_id;
+ struct videobuf_buffer *ivb;
+ dma_addr_t dma_addr;
+ u32 tmp;
+
+ tmp = ioread32(ipd->regs + INT_CSR) & (FLD_START | FLD_END_ODD);
+ if (!tmp)
+ return IRQ_NONE; /* not our irq */
+ if ((tmp & FLD_START) && !(tmp & FLD_END_ODD)) {
+ iowrite32(FLD_START_EN | FLD_END_ODD_EN | FLD_START,
+ ipd->regs + INT_CSR);
+ ipd->field_count++;
+ return IRQ_HANDLED; /* start of field irq */
+ }
+ if ((tmp & FLD_START) && (tmp & FLD_END_ODD)) {
+ if (!ipd->stats.start_before_end++)
+ printk(KERN_ERR "dt3155: irq: START before END\n");
+ }
+ /* check for corrupted fields */
+/* write_i2c_reg(ipd->regs, EVEN_CSR, CSR_ERROR | CSR_DONE); */
+/* write_i2c_reg(ipd->regs, ODD_CSR, CSR_ERROR | CSR_DONE); */
+ tmp = ioread32(ipd->regs + CSR1) & (FLD_CRPT_EVEN | FLD_CRPT_ODD);
+ if (tmp) {
+ if (!ipd->stats.corrupted_fields++)
+ printk(KERN_ERR "dt3155: corrupted field %u\n", tmp);
+ iowrite32(FIFO_EN | SRST | FLD_CRPT_ODD | FLD_CRPT_EVEN |
+ FLD_DN_ODD | FLD_DN_EVEN |
+ CAP_CONT_EVEN | CAP_CONT_ODD,
+ ipd->regs + CSR1);
+ mmiowb();
+ }
+
+ spin_lock(&ipd->lock);
+ if (ipd->curr_buf && ipd->curr_buf->state == VIDEOBUF_ACTIVE) {
+ if (waitqueue_active(&ipd->curr_buf->done)) {
+ do_gettimeofday(&ipd->curr_buf->ts);
+ ipd->curr_buf->field_count = ipd->field_count;
+ ipd->curr_buf->state = VIDEOBUF_DONE;
+ wake_up(&ipd->curr_buf->done);
+ } else {
+ ivb = ipd->curr_buf;
+ goto load_dma;
+ }
+ } else
+ goto stop_dma;
+ if (list_empty(&ipd->dmaq))
+ goto stop_dma;
+ ivb = list_first_entry(&ipd->dmaq, typeof(*ivb), queue);
+ list_del(&ivb->queue);
+ if (ivb->state == VIDEOBUF_QUEUED) {
+ ivb->state = VIDEOBUF_ACTIVE;
+ ipd->curr_buf = ivb;
+ } else
+ goto stop_dma;
+load_dma:
+ dma_addr = videobuf_to_dma_contig(ivb);
+ iowrite32(dma_addr, ipd->regs + EVEN_DMA_START);
+ iowrite32(dma_addr + ivb->width, ipd->regs + ODD_DMA_START);
+ iowrite32(ivb->width, ipd->regs + EVEN_DMA_STRIDE);
+ iowrite32(ivb->width, ipd->regs + ODD_DMA_STRIDE);
+ mmiowb();
+ /* enable interrupts, clear all irq flags */
+ iowrite32(FLD_START_EN | FLD_END_ODD_EN | FLD_START |
+ FLD_END_EVEN | FLD_END_ODD, ipd->regs + INT_CSR);
+ spin_unlock(&ipd->lock);
+ return IRQ_HANDLED;
+
+stop_dma:
+ ipd->curr_buf = NULL;
+ /* stop the board */
+ write_i2c_reg_nowait(ipd->regs, CSR2, ipd->csr2);
+ /* disable interrupts, clear all irq flags */
+ iowrite32(FLD_START | FLD_END_EVEN | FLD_END_ODD, ipd->regs + INT_CSR);
+ spin_unlock(&ipd->lock);
+ return IRQ_HANDLED;
+}
+
+static int
+dt3155_threadfn(void *arg)
+{
+ struct dt3155_priv *pd = arg;
+ struct videobuf_buffer *vb;
+ unsigned long flags;
+
+ while (1) {
+ wait_event_interruptible(pd->do_dma,
+ kthread_should_stop() || !list_empty(&pd->dmaq));
+ if (kthread_should_stop())
+ break;
+
+ spin_lock_irqsave(&pd->lock, flags);
+ if (pd->curr_buf) /* dma is active */
+ goto done;
+ if (list_empty(&pd->dmaq)) /* no empty biffers */
+ goto done;
+ vb = list_first_entry(&pd->dmaq, typeof(*vb), queue);
+ list_del(&vb->queue);
+ if (vb->state == VIDEOBUF_QUEUED) {
+ vb->state = VIDEOBUF_ACTIVE;
+ pd->curr_buf = vb;
+ spin_unlock_irqrestore(&pd->lock, flags);
+ /* start dma */
+ dt3155_start_acq(pd);
+ continue;
+ } else
+ printk(KERN_DEBUG "%s(): This is a BUG\n", __func__);
+done:
+ spin_unlock_irqrestore(&pd->lock, flags);
+ }
+ return 0;
+}
+
+static int
+dt3155_open(struct file *filp)
+{
+ int ret = 0;
+ struct dt3155_priv *pd = video_drvdata(filp);
+
+ printk(KERN_INFO "dt3155: open(): minor: %i\n", pd->vdev->minor);
+
+ if (mutex_lock_interruptible(&pd->mux) == -EINTR)
+ return -ERESTARTSYS;
+ if (!pd->users) {
+ pd->vidq = kzalloc(sizeof(*pd->vidq), GFP_KERNEL);
+ if (!pd->vidq) {
+ printk(KERN_ERR "dt3155: error: alloc queue\n");
+ ret = -ENOMEM;
+ goto err_alloc_queue;
+ }
+ videobuf_queue_dma_contig_init(pd->vidq, &vbq_ops,
+ &pd->pdev->dev, &pd->lock,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
+ sizeof(struct videobuf_buffer), pd);
+ /* disable all irqs, clear all irq flags */
+ iowrite32(FLD_START | FLD_END_EVEN | FLD_END_ODD,
+ pd->regs + INT_CSR);
+ pd->irq_handler = dt3155_irq_handler_even;
+ ret = request_irq(pd->pdev->irq, pd->irq_handler,
+ IRQF_SHARED, DT3155_NAME, pd);
+ if (ret) {
+ printk(KERN_ERR "dt3155: error: request_irq\n");
+ goto err_request_irq;
+ }
+ pd->curr_buf = NULL;
+ pd->thread = kthread_run(dt3155_threadfn, pd,
+ "dt3155_thread_%i", pd->vdev->minor);
+ if (IS_ERR(pd->thread)) {
+ printk(KERN_ERR "dt3155: kthread_run() failed\n");
+ ret = PTR_ERR(pd->thread);
+ goto err_thread;
+ }
+ pd->field_count = 0;
+ }
+ pd->users++;
+ goto done;
+err_thread:
+ free_irq(pd->pdev->irq, pd);
+err_request_irq:
+ kfree(pd->vidq);
+ pd->vidq = NULL;
+err_alloc_queue:
+done:
+ mutex_unlock(&pd->mux);
+ return ret;
+}
+
+static int
+dt3155_release(struct file *filp)
+{
+ struct dt3155_priv *pd = video_drvdata(filp);
+ struct videobuf_buffer *tmp;
+ unsigned long flags;
+ int ret = 0;
+
+ printk(KERN_INFO "dt3155: release(): minor: %i\n", pd->vdev->minor);
+
+ if (mutex_lock_interruptible(&pd->mux) == -EINTR)
+ return -ERESTARTSYS;
+ pd->users--;
+ BUG_ON(pd->users < 0);
+ if (pd->acq_fp == filp) {
+ spin_lock_irqsave(&pd->lock, flags);
+ INIT_LIST_HEAD(&pd->dmaq); /* queue is emptied */
+ tmp = pd->curr_buf;
+ spin_unlock_irqrestore(&pd->lock, flags);
+ if (tmp)
+ videobuf_waiton(tmp, 0, 1); /* block, interruptible */
+ dt3155_stop_acq(pd);
+ videobuf_stop(pd->vidq);
+ pd->acq_fp = NULL;
+ pd->streaming = 0;
+ }
+ if (!pd->users) {
+ kthread_stop(pd->thread);
+ free_irq(pd->pdev->irq, pd);
+ kfree(pd->vidq);
+ pd->vidq = NULL;
+ }
+ mutex_unlock(&pd->mux);
+ return ret;
+}
+
+static ssize_t
+dt3155_read(struct file *filp, char __user *user, size_t size, loff_t *loff)
+{
+ struct dt3155_priv *pd = video_drvdata(filp);
+ int ret;
+
+ if (mutex_lock_interruptible(&pd->mux) == -EINTR)
+ return -ERESTARTSYS;
+ if (!pd->acq_fp) {
+ pd->acq_fp = filp;
+ pd->streaming = 0;
+ } else if (pd->acq_fp != filp) {
+ ret = -EBUSY;
+ goto done;
+ } else if (pd->streaming == 1) {
+ ret = -EINVAL;
+ goto done;
+ }
+ ret = videobuf_read_stream(pd->vidq, user, size, loff, 0,
+ filp->f_flags & O_NONBLOCK);
+done:
+ mutex_unlock(&pd->mux);
+ return ret;
+}
+
+static unsigned int
+dt3155_poll(struct file *filp, struct poll_table_struct *polltbl)
+{
+ struct dt3155_priv *pd = video_drvdata(filp);
+
+ return videobuf_poll_stream(filp, pd->vidq, polltbl);
+}
+
+static int
+dt3155_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct dt3155_priv *pd = video_drvdata(filp);
+
+ return videobuf_mmap_mapper(pd->vidq, vma);
+}
+
+static const struct v4l2_file_operations dt3155_fops = {
+ .owner = THIS_MODULE,
+ .open = dt3155_open,
+ .release = dt3155_release,
+ .read = dt3155_read,
+ .poll = dt3155_poll,
+ .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
+ .mmap = dt3155_mmap,
+};
+
+static int
+dt3155_ioc_streamon(struct file *filp, void *p, enum v4l2_buf_type type)
+{
+ struct dt3155_priv *pd = video_drvdata(filp);
+ int ret = -ERESTARTSYS;
+
+ if (mutex_lock_interruptible(&pd->mux) == -EINTR)
+ return ret;
+ if (!pd->acq_fp) {
+ ret = videobuf_streamon(pd->vidq);
+ if (ret)
+ goto unlock;
+ pd->acq_fp = filp;
+ pd->streaming = 1;
+ wake_up_interruptible_sync(&pd->do_dma);
+ } else if (pd->acq_fp == filp) {
+ pd->streaming = 1;
+ ret = videobuf_streamon(pd->vidq);
+ if (!ret)
+ wake_up_interruptible_sync(&pd->do_dma);
+ } else
+ ret = -EBUSY;
+unlock:
+ mutex_unlock(&pd->mux);
+ return ret;
+}
+
+static int
+dt3155_ioc_streamoff(struct file *filp, void *p, enum v4l2_buf_type type)
+{
+ struct dt3155_priv *pd = video_drvdata(filp);
+ struct videobuf_buffer *tmp;
+ unsigned long flags;
+ int ret;
+
+ ret = videobuf_streamoff(pd->vidq);
+ if (ret)
+ return ret;
+ spin_lock_irqsave(&pd->lock, flags);
+ tmp = pd->curr_buf;
+ spin_unlock_irqrestore(&pd->lock, flags);
+ if (tmp)
+ videobuf_waiton(tmp, 0, 1); /* block, interruptible */
+ return ret;
+}
+
+static int
+dt3155_ioc_querycap(struct file *filp, void *p, struct v4l2_capability *cap)
+{
+ struct dt3155_priv *pd = video_drvdata(filp);
+
+ strcpy(cap->driver, DT3155_NAME);
+ strcpy(cap->card, DT3155_NAME " frame grabber");
+ sprintf(cap->bus_info, "PCI:%s", pci_name(pd->pdev));
+ cap->version =
+ KERNEL_VERSION(DT3155_VER_MAJ, DT3155_VER_MIN, DT3155_VER_EXT);
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
+ return 0;
+}
+
+static int
+dt3155_ioc_enum_fmt_vid_cap(struct file *filp, void *p, struct v4l2_fmtdesc *f)
+{
+ if (f->index >= NUM_OF_FORMATS)
+ return -EINVAL;
+ *f = frame_std[f->index];
+ return 0;
+}
+
+static int
+dt3155_ioc_g_fmt_vid_cap(struct file *filp, void *p, struct v4l2_format *f)
+{
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ f->fmt.pix.width = img_width;
+ f->fmt.pix.height = img_height;
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_GREY;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.bytesperline = f->fmt.pix.width;
+ f->fmt.pix.sizeimage = f->fmt.pix.width * f->fmt.pix.height;
+ f->fmt.pix.colorspace = 0;
+ f->fmt.pix.priv = 0;
+ return 0;
+}
+
+static int
+dt3155_ioc_try_fmt_vid_cap(struct file *filp, void *p, struct v4l2_format *f)
+{
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ if (f->fmt.pix.width == img_width &&
+ f->fmt.pix.height == img_height &&
+ f->fmt.pix.pixelformat == V4L2_PIX_FMT_GREY &&
+ f->fmt.pix.field == V4L2_FIELD_NONE &&
+ f->fmt.pix.bytesperline == f->fmt.pix.width &&
+ f->fmt.pix.sizeimage == f->fmt.pix.width * f->fmt.pix.height)
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static int
+dt3155_ioc_s_fmt_vid_cap(struct file *filp, void *p, struct v4l2_format *f)
+{
+ struct dt3155_priv *pd = video_drvdata(filp);
+ int ret = -ERESTARTSYS;
+
+ if (mutex_lock_interruptible(&pd->mux) == -EINTR)
+ return ret;
+ if (!pd->acq_fp) {
+ pd->acq_fp = filp;
+ pd->streaming = 0;
+ } else if (pd->acq_fp != filp) {
+ ret = -EBUSY;
+ goto done;
+ }
+/* FIXME: we don't change the format for now
+ if (pd->vidq->streaming || pd->vidq->reading || pd->curr_buff) {
+ ret = -EBUSY;
+ goto done;
+ }
+*/
+ ret = dt3155_ioc_g_fmt_vid_cap(filp, p, f);
+done:
+ mutex_unlock(&pd->mux);
+ return ret;
+}
+
+static int
+dt3155_ioc_reqbufs(struct file *filp, void *p, struct v4l2_requestbuffers *b)
+{
+ struct dt3155_priv *pd = video_drvdata(filp);
+ struct videobuf_queue *q = pd->vidq;
+ int ret = -ERESTARTSYS;
+
+ if (b->memory != V4L2_MEMORY_MMAP)
+ return -EINVAL;
+ if (mutex_lock_interruptible(&pd->mux) == -EINTR)
+ return ret;
+ if (!pd->acq_fp)
+ pd->acq_fp = filp;
+ else if (pd->acq_fp != filp) {
+ ret = -EBUSY;
+ goto done;
+ }
+ pd->streaming = 1;
+ ret = 0;
+done:
+ mutex_unlock(&pd->mux);
+ if (ret)
+ return ret;
+ if (b->count)
+ ret = videobuf_reqbufs(q, b);
+ else { /* FIXME: is it necessary? */
+ printk(KERN_DEBUG "dt3155: request to free buffers\n");
+ /* ret = videobuf_mmap_free(q); */
+ ret = dt3155_ioc_streamoff(filp, p,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ }
+ return ret;
+}
+
+static int
+dt3155_ioc_querybuf(struct file *filp, void *p, struct v4l2_buffer *b)
+{
+ struct dt3155_priv *pd = video_drvdata(filp);
+ struct videobuf_queue *q = pd->vidq;
+
+ return videobuf_querybuf(q, b);
+}
+
+static int
+dt3155_ioc_qbuf(struct file *filp, void *p, struct v4l2_buffer *b)
+{
+ struct dt3155_priv *pd = video_drvdata(filp);
+ struct videobuf_queue *q = pd->vidq;
+ int ret;
+
+ ret = videobuf_qbuf(q, b);
+ if (ret)
+ return ret;
+ return videobuf_querybuf(q, b);
+}
+
+static int
+dt3155_ioc_dqbuf(struct file *filp, void *p, struct v4l2_buffer *b)
+{
+ struct dt3155_priv *pd = video_drvdata(filp);
+ struct videobuf_queue *q = pd->vidq;
+
+ return videobuf_dqbuf(q, b, filp->f_flags & O_NONBLOCK);
+}
+
+static int
+dt3155_ioc_querystd(struct file *filp, void *p, v4l2_std_id *norm)
+{
+ *norm = DT3155_CURRENT_NORM;
+ return 0;
+}
+
+static int
+dt3155_ioc_g_std(struct file *filp, void *p, v4l2_std_id *norm)
+{
+ *norm = DT3155_CURRENT_NORM;
+ return 0;
+}
+
+static int
+dt3155_ioc_s_std(struct file *filp, void *p, v4l2_std_id *norm)
+{
+ if (*norm & DT3155_CURRENT_NORM)
+ return 0;
+ return -EINVAL;
+}
+
+static int
+dt3155_ioc_enum_input(struct file *filp, void *p, struct v4l2_input *input)
+{
+ if (input->index)
+ return -EINVAL;
+ strcpy(input->name, "Coax in");
+ input->type = V4L2_INPUT_TYPE_CAMERA;
+ /*
+ * FIXME: input->std = 0 according to v4l2 API
+ * VIDIOC_G_STD, VIDIOC_S_STD, VIDIOC_QUERYSTD and VIDIOC_ENUMSTD
+ * should return -EINVAL
+ */
+ input->std = DT3155_CURRENT_NORM;
+ input->status = 0;/* FIXME: add sync detection & V4L2_IN_ST_NO_H_LOCK */
+ return 0;
+}
+
+static int
+dt3155_ioc_g_input(struct file *filp, void *p, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int
+dt3155_ioc_s_input(struct file *filp, void *p, unsigned int i)
+{
+ if (i)
+ return -EINVAL;
+ return 0;
+}
+
+static int
+dt3155_ioc_g_parm(struct file *filp, void *p, struct v4l2_streamparm *parms)
+{
+ if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ parms->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ parms->parm.capture.capturemode = 0;
+ parms->parm.capture.timeperframe.numerator = 1001;
+ parms->parm.capture.timeperframe.denominator = frames_per_sec * 1000;
+ parms->parm.capture.extendedmode = 0;
+ parms->parm.capture.readbuffers = 1; /* FIXME: 2 buffers? */
+ return 0;
+}
+
+static int
+dt3155_ioc_s_parm(struct file *filp, void *p, struct v4l2_streamparm *parms)
+{
+ if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ parms->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ parms->parm.capture.capturemode = 0;
+ parms->parm.capture.timeperframe.numerator = 1001;
+ parms->parm.capture.timeperframe.denominator = frames_per_sec * 1000;
+ parms->parm.capture.extendedmode = 0;
+ parms->parm.capture.readbuffers = 1; /* FIXME: 2 buffers? */
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops dt3155_ioctl_ops = {
+ .vidioc_streamon = dt3155_ioc_streamon,
+ .vidioc_streamoff = dt3155_ioc_streamoff,
+ .vidioc_querycap = dt3155_ioc_querycap,
+/*
+ .vidioc_g_priority = dt3155_ioc_g_priority,
+ .vidioc_s_priority = dt3155_ioc_s_priority,
+*/
+ .vidioc_enum_fmt_vid_cap = dt3155_ioc_enum_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = dt3155_ioc_try_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = dt3155_ioc_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = dt3155_ioc_s_fmt_vid_cap,
+ .vidioc_reqbufs = dt3155_ioc_reqbufs,
+ .vidioc_querybuf = dt3155_ioc_querybuf,
+ .vidioc_qbuf = dt3155_ioc_qbuf,
+ .vidioc_dqbuf = dt3155_ioc_dqbuf,
+ .vidioc_querystd = dt3155_ioc_querystd,
+ .vidioc_g_std = dt3155_ioc_g_std,
+ .vidioc_s_std = dt3155_ioc_s_std,
+ .vidioc_enum_input = dt3155_ioc_enum_input,
+ .vidioc_g_input = dt3155_ioc_g_input,
+ .vidioc_s_input = dt3155_ioc_s_input,
+/*
+ .vidioc_queryctrl = dt3155_ioc_queryctrl,
+ .vidioc_g_ctrl = dt3155_ioc_g_ctrl,
+ .vidioc_s_ctrl = dt3155_ioc_s_ctrl,
+ .vidioc_querymenu = dt3155_ioc_querymenu,
+ .vidioc_g_ext_ctrls = dt3155_ioc_g_ext_ctrls,
+ .vidioc_s_ext_ctrls = dt3155_ioc_s_ext_ctrls,
+*/
+ .vidioc_g_parm = dt3155_ioc_g_parm,
+ .vidioc_s_parm = dt3155_ioc_s_parm,
+/*
+ .vidioc_cropcap = dt3155_ioc_cropcap,
+ .vidioc_g_crop = dt3155_ioc_g_crop,
+ .vidioc_s_crop = dt3155_ioc_s_crop,
+ .vidioc_enum_framesizes = dt3155_ioc_enum_framesizes,
+ .vidioc_enum_frameintervals = dt3155_ioc_enum_frameintervals,
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
+ .vidiocgmbuf = iocgmbuf,
+#endif
+*/
+};
+
+static int __devinit
+dt3155_init_board(struct pci_dev *dev)
+{
+ struct dt3155_priv *pd = pci_get_drvdata(dev);
+ void *buf_cpu;
+ dma_addr_t buf_dma;
+ int i;
+ u8 tmp;
+
+ pci_set_master(dev); /* dt3155 needs it */
+
+ /* resetting the adapter */
+ iowrite32(FLD_CRPT_ODD | FLD_CRPT_EVEN | FLD_DN_ODD | FLD_DN_EVEN,
+ pd->regs + CSR1);
+ mmiowb();
+ msleep(10);
+
+ /* initializing adaper registers */
+ iowrite32(FIFO_EN | SRST, pd->regs + CSR1);
+ mmiowb();
+ iowrite32(0xEEEEEE01, pd->regs + EVEN_PIXEL_FMT);
+ iowrite32(0xEEEEEE01, pd->regs + ODD_PIXEL_FMT);
+ iowrite32(0x00000020, pd->regs + FIFO_TRIGER);
+ iowrite32(0x00000103, pd->regs + XFER_MODE);
+ iowrite32(0, pd->regs + RETRY_WAIT_CNT);
+ iowrite32(0, pd->regs + INT_CSR);
+ iowrite32(1, pd->regs + EVEN_FLD_MASK);
+ iowrite32(1, pd->regs + ODD_FLD_MASK);
+ iowrite32(0, pd->regs + MASK_LENGTH);
+ iowrite32(0x0005007C, pd->regs + FIFO_FLAG_CNT);
+ iowrite32(0x01010101, pd->regs + IIC_CLK_DUR);
+ mmiowb();
+
+ /* verifying that we have a DT3155 board (not just a SAA7116 chip) */
+ read_i2c_reg(pd->regs, DT_ID, &tmp);
+ if (tmp != DT3155_ID)
+ return -ENODEV;
+
+ /* initialize AD LUT */
+ write_i2c_reg(pd->regs, AD_ADDR, 0);
+ for (i = 0; i < 256; i++)
+ write_i2c_reg(pd->regs, AD_LUT, i);
+
+ /* initialize ADC references */
+ /* FIXME: pos_ref & neg_ref depend on VT_50HZ */
+ write_i2c_reg(pd->regs, AD_ADDR, AD_CMD_REG);
+ write_i2c_reg(pd->regs, AD_CMD, VIDEO_CNL_1 | SYNC_CNL_1 | SYNC_LVL_3);
+ write_i2c_reg(pd->regs, AD_ADDR, AD_POS_REF);
+ write_i2c_reg(pd->regs, AD_CMD, 34);
+ write_i2c_reg(pd->regs, AD_ADDR, AD_NEG_REF);
+ write_i2c_reg(pd->regs, AD_CMD, 0);
+
+ /* initialize PM LUT */
+ write_i2c_reg(pd->regs, CONFIG, pd->config | PM_LUT_PGM);
+ for (i = 0; i < 256; i++) {
+ write_i2c_reg(pd->regs, PM_LUT_ADDR, i);
+ write_i2c_reg(pd->regs, PM_LUT_DATA, i);
+ }
+ write_i2c_reg(pd->regs, CONFIG, pd->config | PM_LUT_PGM | PM_LUT_SEL);
+ for (i = 0; i < 256; i++) {
+ write_i2c_reg(pd->regs, PM_LUT_ADDR, i);
+ write_i2c_reg(pd->regs, PM_LUT_DATA, i);
+ }
+ write_i2c_reg(pd->regs, CONFIG, pd->config); /* ACQ_MODE_EVEN */
+
+ /* select chanel 1 for input and set sync level */
+ write_i2c_reg(pd->regs, AD_ADDR, AD_CMD_REG);
+ write_i2c_reg(pd->regs, AD_CMD, VIDEO_CNL_1 | SYNC_CNL_1 | SYNC_LVL_3);
+
+ /* allocate memory, and initialize the DMA machine */
+ buf_cpu = dma_alloc_coherent(&dev->dev, DT3155_BUF_SIZE, &buf_dma,
+ GFP_KERNEL);
+ if (!buf_cpu) {
+ printk(KERN_ERR "dt3155: dma_alloc_coherent "
+ "(in dt3155_init_board) failed\n");
+ return -ENOMEM;
+ }
+ iowrite32(buf_dma, pd->regs + EVEN_DMA_START);
+ iowrite32(buf_dma, pd->regs + ODD_DMA_START);
+ iowrite32(0, pd->regs + EVEN_DMA_STRIDE);
+ iowrite32(0, pd->regs + ODD_DMA_STRIDE);
+
+ /* Perform a pseudo even field acquire */
+ iowrite32(FIFO_EN | SRST | CAP_CONT_ODD, pd->regs + CSR1);
+ write_i2c_reg(pd->regs, CSR2, pd->csr2 | SYNC_SNTL);
+ write_i2c_reg(pd->regs, CONFIG, pd->config);
+ write_i2c_reg(pd->regs, EVEN_CSR, CSR_SNGL);
+ write_i2c_reg(pd->regs, CSR2, pd->csr2 | BUSY_EVEN | SYNC_SNTL);
+ msleep(100);
+ read_i2c_reg(pd->regs, CSR2, &tmp);
+ write_i2c_reg(pd->regs, EVEN_CSR, CSR_ERROR | CSR_SNGL | CSR_DONE);
+ write_i2c_reg(pd->regs, ODD_CSR, CSR_ERROR | CSR_SNGL | CSR_DONE);
+ write_i2c_reg(pd->regs, CSR2, pd->csr2);
+ iowrite32(FIFO_EN | SRST | FLD_DN_EVEN | FLD_DN_ODD, pd->regs + CSR1);
+
+ /* deallocate memory */
+ dma_free_coherent(&dev->dev, DT3155_BUF_SIZE, buf_cpu, buf_dma);
+ if (tmp & BUSY_EVEN) {
+ printk(KERN_ERR "dt3155: BUSY_EVEN not cleared\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static struct video_device dt3155_vdev = {
+ .name = DT3155_NAME,
+ .fops = &dt3155_fops,
+ .ioctl_ops = &dt3155_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release,
+ .tvnorms = DT3155_CURRENT_NORM,
+ .current_norm = DT3155_CURRENT_NORM,
+};
+
+/* same as in drivers/base/dma-coherent.c */
+struct dma_coherent_mem {
+ void *virt_base;
+ u32 device_base;
+ int size;
+ int flags;
+ unsigned long *bitmap;
+};
+
+static int __devinit
+dt3155_alloc_coherent(struct device *dev, size_t size, int flags)
+{
+ int pages = size >> PAGE_SHIFT;
+ int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
+
+ if ((flags & DMA_MEMORY_MAP) == 0)
+ goto out;
+ if (!size)
+ goto out;
+ if (dev->dma_mem)
+ goto out;
+
+ dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
+ if (!dev->dma_mem)
+ goto out;
+ dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!dev->dma_mem->bitmap)
+ goto err_bitmap;
+
+ dev->dma_mem->virt_base = dma_alloc_coherent(dev, size,
+ &dev->dma_mem->device_base, DT3155_COH_FLAGS);
+ if (!dev->dma_mem->virt_base)
+ goto err_coherent;
+ dev->dma_mem->size = pages;
+ dev->dma_mem->flags = flags;
+ return DMA_MEMORY_MAP;
+
+err_coherent:
+ kfree(dev->dma_mem->bitmap);
+err_bitmap:
+ kfree(dev->dma_mem);
+out:
+ return 0;
+}
+
+static void __devexit
+dt3155_free_coherent(struct device *dev)
+{
+ struct dma_coherent_mem *mem = dev->dma_mem;
+
+ if (!mem)
+ return;
+ dev->dma_mem = NULL;
+ dma_free_coherent(dev, mem->size << PAGE_SHIFT,
+ mem->virt_base, mem->device_base);
+ kfree(mem->bitmap);
+ kfree(mem);
+}
+
+static int __devinit
+dt3155_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ int err;
+ struct dt3155_priv *pd;
+
+ printk(KERN_INFO "dt3155: probe()\n");
+ err = dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ printk(KERN_ERR "dt3155: cannot set dma_mask\n");
+ return -ENODEV;
+ }
+ err = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ printk(KERN_ERR "dt3155: cannot set dma_coherent_mask\n");
+ return -ENODEV;
+ }
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd) {
+ printk(KERN_ERR "dt3155: cannot allocate dt3155_priv\n");
+ return -ENOMEM;
+ }
+ pd->vdev = video_device_alloc();
+ if (!pd->vdev) {
+ printk(KERN_ERR "dt3155: cannot allocate vdev structure\n");
+ goto err_video_device_alloc;
+ }
+ *pd->vdev = dt3155_vdev;
+ pci_set_drvdata(dev, pd); /* for use in dt3155_remove() */
+ video_set_drvdata(pd->vdev, pd); /* for use in video_fops */
+ pd->users = 0;
+ pd->acq_fp = NULL;
+ pd->pdev = dev;
+ INIT_LIST_HEAD(&pd->dmaq);
+ init_waitqueue_head(&pd->do_dma);
+ mutex_init(&pd->mux);
+ pd->csr2 = csr2_init;
+ pd->config = config_init;
+ err = pci_enable_device(pd->pdev);
+ if (err) {
+ printk(KERN_ERR "dt3155: pci_dev not enabled\n");
+ goto err_enable_dev;
+ }
+ err = pci_request_region(pd->pdev, 0, pci_name(pd->pdev));
+ if (err)
+ goto err_req_region;
+ pd->regs = pci_iomap(pd->pdev, 0, pci_resource_len(pd->pdev, 0));
+ if (!pd->regs) {
+ err = -ENOMEM;
+ printk(KERN_ERR "dt3155: pci_iomap failed\n");
+ goto err_pci_iomap;
+ }
+ err = dt3155_init_board(pd->pdev);
+ if (err) {
+ printk(KERN_ERR "dt3155: dt3155_init_board failed\n");
+ goto err_init_board;
+ }
+ err = video_register_device(pd->vdev, VFL_TYPE_GRABBER, -1);
+ if (err) {
+ printk(KERN_ERR "dt3155: Cannot register video device\n");
+ goto err_init_board;
+ }
+ err = dt3155_alloc_coherent(&dev->dev, DT3155_CHUNK_SIZE,
+ DMA_MEMORY_MAP);
+ if (err)
+ printk(KERN_INFO "dt3155: preallocated 8 buffers\n");
+ printk(KERN_INFO "dt3155: /dev/video%i is ready\n", pd->vdev->minor);
+ return 0; /* success */
+
+err_init_board:
+ pci_iounmap(pd->pdev, pd->regs);
+err_pci_iomap:
+ pci_release_region(pd->pdev, 0);
+err_req_region:
+ pci_disable_device(pd->pdev);
+err_enable_dev:
+ video_device_release(pd->vdev);
+err_video_device_alloc:
+ kfree(pd);
+ return err;
+}
+
+static void __devexit
+dt3155_remove(struct pci_dev *dev)
+{
+ struct dt3155_priv *pd = pci_get_drvdata(dev);
+
+ printk(KERN_INFO "dt3155: remove()\n");
+ dt3155_free_coherent(&dev->dev);
+ video_unregister_device(pd->vdev);
+ pci_iounmap(dev, pd->regs);
+ pci_release_region(pd->pdev, 0);
+ pci_disable_device(pd->pdev);
+ /*
+ * video_device_release() is invoked automatically
+ * see: struct video_device dt3155_vdev
+ */
+ kfree(pd);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
+ { PCI_DEVICE(DT3155_VENDOR_ID, DT3155_DEVICE_ID) },
+ { 0, /* zero marks the end */ },
+};
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+static struct pci_driver pci_driver = {
+ .name = DT3155_NAME,
+ .id_table = pci_ids,
+ .probe = dt3155_probe,
+ .remove = __devexit_p(dt3155_remove),
+};
+
+static int __init
+dt3155_init_module(void)
+{
+ int err;
+
+ printk(KERN_INFO "dt3155: ==================\n");
+ printk(KERN_INFO "dt3155: init()\n");
+ err = pci_register_driver(&pci_driver);
+ if (err) {
+ printk(KERN_ERR "dt3155: cannot register pci_driver\n");
+ return err;
+ }
+ return 0; /* succes */
+}
+
+static void __exit
+dt3155_exit_module(void)
+{
+ pci_unregister_driver(&pci_driver);
+ printk(KERN_INFO "dt3155: exit()\n");
+ printk(KERN_INFO "dt3155: ==================\n");
+}
+
+module_init(dt3155_init_module);
+module_exit(dt3155_exit_module);
+
+MODULE_DESCRIPTION("video4linux pci-driver for dt3155 frame grabber");
+MODULE_AUTHOR("Marin Mitov <mitov@issp.bas.bg>");
+MODULE_VERSION(DT3155_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/dt3155v4l/dt3155v4l.h b/drivers/staging/dt3155v4l/dt3155v4l.h
new file mode 100644
index 00000000000..aa68a6f38aa
--- /dev/null
+++ b/drivers/staging/dt3155v4l/dt3155v4l.h
@@ -0,0 +1,224 @@
+/***************************************************************************
+ * Copyright (C) 2006-2010 by Marin Mitov *
+ * mitov@issp.bas.bg *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published by *
+ * the Free Software Foundation; either version 2 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program; if not, write to the *
+ * Free Software Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ ***************************************************************************/
+
+/* DT3155 header file */
+#ifndef _DT3155_H_
+#define _DT3155_H_
+
+#ifdef __KERNEL__
+
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
+#define DT3155_NAME "dt3155"
+#define DT3155_VER_MAJ 1
+#define DT3155_VER_MIN 1
+#define DT3155_VER_EXT 0
+#define DT3155_VERSION __stringify(DT3155_VER_MAJ) "." \
+ __stringify(DT3155_VER_MIN) "." \
+ __stringify(DT3155_VER_EXT)
+
+/* DT3155 Base Register offsets (memory mapped) */
+#define EVEN_DMA_START 0x00
+#define ODD_DMA_START 0x0C
+#define EVEN_DMA_STRIDE 0x18
+#define ODD_DMA_STRIDE 0x24
+#define EVEN_PIXEL_FMT 0x30
+#define ODD_PIXEL_FMT 0x34
+#define FIFO_TRIGER 0x38
+#define XFER_MODE 0x3C
+#define CSR1 0x40
+#define RETRY_WAIT_CNT 0x44
+#define INT_CSR 0x48
+#define EVEN_FLD_MASK 0x4C
+#define ODD_FLD_MASK 0x50
+#define MASK_LENGTH 0x54
+#define FIFO_FLAG_CNT 0x58
+#define IIC_CLK_DUR 0x5C
+#define IIC_CSR1 0x60
+#define IIC_CSR2 0x64
+
+/* DT3155 Internal Registers indexes (i2c/IIC mapped) */
+#define CSR2 0x10
+#define EVEN_CSR 0x11
+#define ODD_CSR 0x12
+#define CONFIG 0x13
+#define DT_ID 0x1F
+#define X_CLIP_START 0x20
+#define Y_CLIP_START 0x22
+#define X_CLIP_END 0x24
+#define Y_CLIP_END 0x26
+#define AD_ADDR 0x30
+#define AD_LUT 0x31
+#define AD_CMD 0x32
+#define DIG_OUT 0x40
+#define PM_LUT_ADDR 0x50
+#define PM_LUT_DATA 0x51
+
+/* AD command register values */
+#define AD_CMD_REG 0x00
+#define AD_POS_REF 0x01
+#define AD_NEG_REF 0x02
+
+/* CSR1 bit masks */
+#define CRPT_DIS 0x00004000
+#define FLD_CRPT_ODD 0x00000200
+#define FLD_CRPT_EVEN 0x00000100
+#define FIFO_EN 0x00000080
+#define SRST 0x00000040
+#define FLD_DN_ODD 0x00000020
+#define FLD_DN_EVEN 0x00000010
+/* These should not be used.
+ * Use CAP_CONT_ODD/EVEN instead
+#define CAP_SNGL_ODD 0x00000008
+#define CAP_SNGL_EVEN 0x00000004
+*/
+#define CAP_CONT_ODD 0x00000002
+#define CAP_CONT_EVEN 0x00000001
+
+/* INT_CSR bit masks */
+#define FLD_START_EN 0x00000400
+#define FLD_END_ODD_EN 0x00000200
+#define FLD_END_EVEN_EN 0x00000100
+#define FLD_START 0x00000004
+#define FLD_END_ODD 0x00000002
+#define FLD_END_EVEN 0x00000001
+
+/* IIC_CSR1 bit masks */
+#define DIRECT_ABORT 0x00000200
+
+/* IIC_CSR2 bit masks */
+#define NEW_CYCLE 0x01000000
+#define DIR_RD 0x00010000
+#define IIC_READ 0x01010000
+#define IIC_WRITE 0x01000000
+
+/* CSR2 bit masks */
+#define DISP_PASS 0x40
+#define BUSY_ODD 0x20
+#define BUSY_EVEN 0x10
+#define SYNC_PRESENT 0x08
+#define VT_50HZ 0x04
+#define SYNC_SNTL 0x02
+#define CHROM_FILT 0x01
+#define VT_60HZ 0x00
+
+/* CSR_EVEN/ODD bit masks */
+#define CSR_ERROR 0x04
+#define CSR_SNGL 0x02
+#define CSR_DONE 0x01
+
+/* CONFIG bit masks */
+#define PM_LUT_PGM 0x80
+#define PM_LUT_SEL 0x40
+#define CLIP_EN 0x20
+#define HSCALE_EN 0x10
+#define EXT_TRIG_UP 0x0C
+#define EXT_TRIG_DOWN 0x04
+#define ACQ_MODE_NEXT 0x02
+#define ACQ_MODE_ODD 0x01
+#define ACQ_MODE_EVEN 0x00
+
+/* AD_CMD bit masks */
+#define VIDEO_CNL_1 0x00
+#define VIDEO_CNL_2 0x40
+#define VIDEO_CNL_3 0x80
+#define VIDEO_CNL_4 0xC0
+#define SYNC_CNL_1 0x00
+#define SYNC_CNL_2 0x10
+#define SYNC_CNL_3 0x20
+#define SYNC_CNL_4 0x30
+#define SYNC_LVL_1 0x00
+#define SYNC_LVL_2 0x04
+#define SYNC_LVL_3 0x08
+#define SYNC_LVL_4 0x0C
+
+/* DT3155 identificator */
+#define DT3155_ID 0x20
+
+#ifdef CONFIG_DT3155_CCIR
+#define DMA_STRIDE 768
+#else
+#define DMA_STRIDE 640
+#endif
+
+/**
+ * struct dt3155_stats - statistics structure
+ *
+ * @free_bufs_empty: no free image buffers
+ * @corrupted_fields: corrupted fields
+ * @dma_map_failed: dma mapping failed
+ * @start_before_end: new started before old ended
+ */
+struct dt3155_stats {
+ int free_bufs_empty;
+ int corrupted_fields;
+ int dma_map_failed;
+ int start_before_end;
+};
+
+/* per board private data structure */
+/**
+ * struct dt3155_priv - private data structure
+ *
+ * @vdev: pointer to video_device structure
+ * @acq_fp pointer to filp that starts acquisition
+ * @streaming streaming is negotiated
+ * @pdev: pointer to pci_dev structure
+ * @vidq pointer to videobuf_queue structure
+ * @curr_buf: pointer to curren buffer
+ * @thread pointer to worker thraed
+ * @irq_handler: irq handler for the driver
+ * @qt_ops local copy of dma-contig qtype_ops
+ * @dmaq queue for dma buffers
+ * @do_dma wait queue of the kernel thread
+ * @mux: mutex to protect the instance
+ * @lock spinlock for videobuf queues
+ * @field_count fields counter
+ * @stats: statistics structure
+ * @users open count
+ * @regs: local copy of mmio base register
+ * @csr2: local copy of csr2 register
+ * @config: local copy of config register
+ */
+struct dt3155_priv {
+ struct video_device *vdev;
+ struct file *acq_fp;
+ int streaming;
+ struct pci_dev *pdev;
+ struct videobuf_queue *vidq;
+ struct videobuf_buffer *curr_buf;
+ struct task_struct *thread;
+ irq_handler_t irq_handler;
+ struct videobuf_qtype_ops qt_ops;
+ struct list_head dmaq;
+ wait_queue_head_t do_dma;
+ struct mutex mux;
+ spinlock_t lock;
+ unsigned int field_count;
+ struct dt3155_stats stats;
+ void *regs;
+ int users;
+ u8 csr2, config;
+};
+
+#endif /* __KERNEL__ */
+
+#endif /* _DT3155_H_ */
diff --git a/drivers/staging/echo/echo.c b/drivers/staging/echo/echo.c
index 4cc4f2eb7eb..58c4e907e44 100644
--- a/drivers/staging/echo/echo.c
+++ b/drivers/staging/echo/echo.c
@@ -603,7 +603,7 @@ int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx)
}
EXPORT_SYMBOL_GPL(oslec_update);
-/* This function is seperated from the echo canceller is it is usually called
+/* This function is separated from the echo canceller is it is usually called
as part of the tx process. See rx HP (DC blocking) filter above, it's
the same design.
diff --git a/drivers/staging/et131x/et1310_address_map.h b/drivers/staging/et131x/et1310_address_map.h
index ea746ba41fa..e6c8cb3828e 100644
--- a/drivers/staging/et131x/et1310_address_map.h
+++ b/drivers/staging/et131x/et1310_address_map.h
@@ -117,7 +117,7 @@
/*
* Software reset reg at address 0x0028
- * 0: txdma_sw_reset
+ * 0: txdma_sw_reset
* 1: rxdma_sw_reset
* 2: txmac_sw_reset
* 3: rxmac_sw_reset
@@ -1052,7 +1052,7 @@ typedef struct _RXMAC_t { /* Location: */
* 4-0: register
*/
-#define MII_ADDR(phy,reg) ((phy) << 8 | (reg))
+#define MII_ADDR(phy, reg) ((phy) << 8 | (reg))
/*
* structure for MII Management Control reg in mac address map.
@@ -1249,8 +1249,7 @@ typedef struct _MAC_t { /* Location: */
/*
* MAC STATS Module of JAGCore Address Mapping
*/
-struct macstat_regs
-{ /* Location: */
+struct macstat_regs { /* Location: */
u32 pad[32]; /* 0x6000 - 607C */
/* Tx/Rx 0-64 Byte Frame Counter */
diff --git a/drivers/staging/et131x/et1310_eeprom.c b/drivers/staging/et131x/et1310_eeprom.c
index e4d095b0b52..5a8e6b913da 100644
--- a/drivers/staging/et131x/et1310_eeprom.c
+++ b/drivers/staging/et131x/et1310_eeprom.c
@@ -302,7 +302,7 @@ static int eeprom_read(struct et131x_adapter *etdev, u32 addr, u8 *pdata)
err = eeprom_wait_ready(pdev, NULL);
if (err)
return err;
- /*
+ /*
* Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0,
* and bits 1:0 both =0. Bit 5 should be set according to the type
* of EEPROM being accessed (1=two byte addressing, 0=one byte
@@ -383,9 +383,9 @@ int et131x_init_eeprom(struct et131x_adapter *etdev)
/* This error could mean that there was an error
* reading the eeprom or that the eeprom doesn't exist.
- * We will treat each case the same and not try to gather
- * additional information that normally would come from the
- * eeprom, like MAC Address
+ * We will treat each case the same and not try to
+ * gather additional information that normally would
+ * come from the eeprom, like MAC Address
*/
etdev->has_eeprom = 0;
return -EIO;
diff --git a/drivers/staging/et131x/et1310_phy.c b/drivers/staging/et131x/et1310_phy.c
index 34cd5d1b586..a6d9f29ff49 100644
--- a/drivers/staging/et131x/et1310_phy.c
+++ b/drivers/staging/et131x/et1310_phy.c
@@ -344,7 +344,7 @@ static void ET1310_PhyDuplexMode(struct et131x_adapter *etdev, u16 duplex)
static void ET1310_PhySpeedSelect(struct et131x_adapter *etdev, u16 speed)
{
u16 data;
- static const u16 bits[3]={0x0000, 0x2000, 0x0040};
+ static const u16 bits[3] = {0x0000, 0x2000, 0x0040};
/* Read the PHY control register */
MiRead(etdev, PHY_CONTROL, &data);
diff --git a/drivers/staging/et131x/et1310_rx.c b/drivers/staging/et131x/et1310_rx.c
index 54686e2ace6..8e04bdd8f6b 100644
--- a/drivers/staging/et131x/et1310_rx.c
+++ b/drivers/staging/et131x/et1310_rx.c
@@ -344,7 +344,7 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
"Cannot alloc memory for Packet Status Ring\n");
return -ENOMEM;
}
- printk("PSR %lx\n", (unsigned long) rx_ring->pPSRingPa);
+ printk(KERN_INFO "PSR %lx\n", (unsigned long) rx_ring->pPSRingPa);
/*
* NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
@@ -363,7 +363,7 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
return -ENOMEM;
}
rx_ring->NumRfd = NIC_DEFAULT_NUM_RFD;
- printk("PRS %lx\n", (unsigned long)rx_ring->rx_status_bus);
+ printk(KERN_INFO "PRS %lx\n", (unsigned long)rx_ring->rx_status_bus);
/* Recv
* pci_pool_create initializes a lookaside list. After successful
@@ -445,10 +445,10 @@ void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
rx_ring->pFbr1RingVa - rx_ring->Fbr1offset);
bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr1NumEntries)
- + 0xfff;
+ + 0xfff;
pci_free_consistent(adapter->pdev, bufsize,
- rx_ring->pFbr1RingVa, rx_ring->pFbr1RingPa);
+ rx_ring->pFbr1RingVa, rx_ring->pFbr1RingPa);
rx_ring->pFbr1RingVa = NULL;
}
@@ -478,7 +478,7 @@ void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
rx_ring->pFbr0RingVa - rx_ring->Fbr0offset);
bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr0NumEntries)
- + 0xfff;
+ + 0xfff;
pci_free_consistent(adapter->pdev,
bufsize,
@@ -504,7 +504,7 @@ void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
pci_free_consistent(adapter->pdev,
sizeof(struct rx_status_block),
rx_ring->rx_status_block, rx_ring->rx_status_bus);
- rx_ring->rx_status_block = NULL;
+ rx_ring->rx_status_block = NULL;
}
/* Free receive buffer pool */
@@ -547,7 +547,7 @@ int et131x_init_recv(struct et131x_adapter *adapter)
/* Setup each RFD */
for (rfdct = 0; rfdct < rx_ring->NumRfd; rfdct++) {
- rfd = (MP_RFD *) kmem_cache_alloc(rx_ring->RecvLookaside,
+ rfd = kmem_cache_alloc(rx_ring->RecvLookaside,
GFP_ATOMIC | GFP_DMA);
if (!rfd) {
@@ -713,7 +713,7 @@ void SetRxDmaTimer(struct et131x_adapter *etdev)
*/
void et131x_rx_dma_disable(struct et131x_adapter *etdev)
{
- u32 csr;
+ u32 csr;
/* Setup the receive dma configuration register */
writel(0x00002001, &etdev->regs->rxdma.csr);
csr = readl(&etdev->regs->rxdma.csr);
@@ -743,9 +743,9 @@ void et131x_rx_dma_enable(struct et131x_adapter *etdev)
else if (etdev->rx_ring.Fbr1BufferSize == 16384)
csr |= 0x1800;
#ifdef USE_FBR0
- csr |= 0x0400; /* FBR0 enable */
+ csr |= 0x0400; /* FBR0 enable */
if (etdev->rx_ring.Fbr0BufferSize == 256)
- csr |= 0x0100;
+ csr |= 0x0100;
else if (etdev->rx_ring.Fbr0BufferSize == 512)
csr |= 0x0200;
else if (etdev->rx_ring.Fbr0BufferSize == 1024)
@@ -757,7 +757,7 @@ void et131x_rx_dma_enable(struct et131x_adapter *etdev)
if ((csr & 0x00020000) != 0) {
udelay(5);
csr = readl(&etdev->regs->rxdma.csr);
- if ((csr & 0x00020000) != 0) {
+ if ((csr & 0x00020000) != 0) {
dev_err(&etdev->pdev->dev,
"RX Dma failed to exit halt state. CSR 0x%08x\n",
csr);
@@ -841,8 +841,7 @@ PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev)
(rindex == 1 &&
bindex > rx_local->Fbr1NumEntries - 1))
#else
- if (rindex != 1 ||
- bindex > rx_local->Fbr1NumEntries - 1)
+ if (rindex != 1 || bindex > rx_local->Fbr1NumEntries - 1)
#endif
{
/* Illegal buffer or ring index cannot be used by S/W*/
@@ -1063,20 +1062,20 @@ void et131x_handle_recv_interrupt(struct et131x_adapter *etdev)
static inline u32 bump_fbr(u32 *fbr, u32 limit)
{
- u32 v = *fbr;
- v++;
- /* This works for all cases where limit < 1024. The 1023 case
- works because 1023++ is 1024 which means the if condition is not
- taken but the carry of the bit into the wrap bit toggles the wrap
- value correctly */
- if ((v & ET_DMA10_MASK) > limit) {
- v &= ~ET_DMA10_MASK;
- v ^= ET_DMA10_WRAP;
- }
- /* For the 1023 case */
- v &= (ET_DMA10_MASK|ET_DMA10_WRAP);
- *fbr = v;
- return v;
+ u32 v = *fbr;
+ v++;
+ /* This works for all cases where limit < 1024. The 1023 case
+ works because 1023++ is 1024 which means the if condition is not
+ taken but the carry of the bit into the wrap bit toggles the wrap
+ value correctly */
+ if ((v & ET_DMA10_MASK) > limit) {
+ v &= ~ET_DMA10_MASK;
+ v ^= ET_DMA10_WRAP;
+ }
+ /* For the 1023 case */
+ v &= (ET_DMA10_MASK|ET_DMA10_WRAP);
+ *fbr = v;
+ return v;
}
/**
@@ -1105,7 +1104,7 @@ void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD rfd)
if (ri == 1) {
struct fbr_desc *next =
(struct fbr_desc *) (rx_local->pFbr1RingVa) +
- INDEX10(rx_local->local_Fbr1_full);
+ INDEX10(rx_local->local_Fbr1_full);
/* Handle the Free Buffer Ring advancement here. Write
* the PA / Buffer Index for the returned buffer into
diff --git a/drivers/staging/et131x/et1310_rx.h b/drivers/staging/et131x/et1310_rx.h
index ca84a9146d6..e8c653d37a7 100644
--- a/drivers/staging/et131x/et1310_rx.h
+++ b/drivers/staging/et131x/et1310_rx.h
@@ -91,8 +91,7 @@
#define ALCATEL_BROADCAST_PKT 0x02000000
/* typedefs for Free Buffer Descriptors */
-struct fbr_desc
-{
+struct fbr_desc {
u32 addr_lo;
u32 addr_hi;
u32 word2; /* Bits 10-31 reserved, 0-9 descriptor */
@@ -117,7 +116,7 @@ struct fbr_desc
* 9: jp Jumbo Packet
* 10: vp VLAN Packet
* 11-15: unused
- * 16: asw_prev_pkt_dropped e.g. IFG too small on previous
+ * 16: asw_prev_pkt_dropped e.g. IFG too small on previous
* 17: asw_RX_DV_event short receive event detected
* 18: asw_false_carrier_event bad carrier since last good packet
* 19: asw_code_err one or more nibbles signalled as errors
diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
index b6ff20f47de..0f3473d758e 100644
--- a/drivers/staging/et131x/et1310_tx.c
+++ b/drivers/staging/et131x/et1310_tx.c
@@ -112,7 +112,7 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
struct tx_ring *tx_ring = &adapter->tx_ring;
/* Allocate memory for the TCB's (Transmit Control Block) */
- adapter->tx_ring.tcb_ring = (struct tcb *)
+ adapter->tx_ring.tcb_ring =
kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA);
if (!adapter->tx_ring.tcb_ring) {
dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
diff --git a/drivers/staging/et131x/et131x_initpci.c b/drivers/staging/et131x/et131x_initpci.c
index 1dd5fa5b888..47baab3e6ea 100644
--- a/drivers/staging/et131x/et131x_initpci.c
+++ b/drivers/staging/et131x/et131x_initpci.c
@@ -113,7 +113,13 @@
static u32 et131x_speed_set;
module_param(et131x_speed_set, uint, 0);
MODULE_PARM_DESC(et131x_speed_set,
- "Set Link speed and dublex manually (0-5) [0] \n 1 : 10Mb Half-Duplex \n 2 : 10Mb Full-Duplex \n 3 : 100Mb Half-Duplex \n 4 : 100Mb Full-Duplex \n 5 : 1000Mb Full-Duplex \n 0 : Auto Speed Auto Dublex");
+ "Set Link speed and dublex manually (0-5) [0]\n \
+ 1 : 10Mb Half-Duplex\n \
+ 2 : 10Mb Full-Duplex\n \
+ 3 : 100Mb Half-Duplex\n \
+ 4 : 100Mb Full-Duplex\n \
+ 5 : 1000Mb Full-Duplex\n \
+ 0 : Auto Speed Auto Dublex");
/**
* et131x_hwaddr_init - set up the MAC Address on the ET1310
@@ -558,7 +564,7 @@ static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
/* Parse configuration parameters into the private adapter struct */
if (et131x_speed_set)
dev_info(&etdev->pdev->dev,
- "Speed set manually to : %d \n", et131x_speed_set);
+ "Speed set manually to : %d\n", et131x_speed_set);
etdev->SpeedDuplex = et131x_speed_set;
etdev->RegistryJumboPacket = 1514; /* 1514-9216 */
@@ -820,7 +826,7 @@ static int __init et131x_init_module(void)
if (et131x_speed_set < PARM_SPEED_DUPLEX_MIN ||
et131x_speed_set > PARM_SPEED_DUPLEX_MAX) {
printk(KERN_WARNING "et131x: invalid speed setting ignored.\n");
- et131x_speed_set = 0;
+ et131x_speed_set = 0;
}
return pci_register_driver(&et131x_driver);
}
diff --git a/drivers/staging/et131x/et131x_isr.c b/drivers/staging/et131x/et131x_isr.c
index cb7f6775ce0..36f68fe3e8c 100644
--- a/drivers/staging/et131x/et131x_isr.c
+++ b/drivers/staging/et131x/et131x_isr.c
@@ -253,14 +253,12 @@ void et131x_isr_handler(struct work_struct *work)
* exit.
*/
/* Handle all the completed Transmit interrupts */
- if (status & ET_INTR_TXDMA_ISR) {
+ if (status & ET_INTR_TXDMA_ISR)
et131x_handle_send_interrupt(etdev);
- }
/* Handle all the completed Receives interrupts */
- if (status & ET_INTR_RXDMA_XFR_DONE) {
+ if (status & ET_INTR_RXDMA_XFR_DONE)
et131x_handle_recv_interrupt(etdev);
- }
status &= 0xffffffd7;
diff --git a/drivers/staging/et131x/et131x_netdev.c b/drivers/staging/et131x/et131x_netdev.c
index ab047f2ff72..106d548982c 100644
--- a/drivers/staging/et131x/et131x_netdev.c
+++ b/drivers/staging/et131x/et131x_netdev.c
@@ -404,7 +404,7 @@ void et131x_multicast(struct net_device *netdev)
struct et131x_adapter *adapter = netdev_priv(netdev);
uint32_t PacketFilter = 0;
unsigned long flags;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int i;
spin_lock_irqsave(&adapter->Lock, flags);
@@ -426,33 +426,29 @@ void et131x_multicast(struct net_device *netdev)
* accordingly
*/
- if (netdev->flags & IFF_PROMISC) {
+ if (netdev->flags & IFF_PROMISC)
adapter->PacketFilter |= ET131X_PACKET_TYPE_PROMISCUOUS;
- } else {
+ else
adapter->PacketFilter &= ~ET131X_PACKET_TYPE_PROMISCUOUS;
- }
- if (netdev->flags & IFF_ALLMULTI) {
+ if (netdev->flags & IFF_ALLMULTI)
adapter->PacketFilter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
- }
- if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST) {
+ if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST)
adapter->PacketFilter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
- }
if (netdev_mc_count(netdev) < 1) {
adapter->PacketFilter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
adapter->PacketFilter &= ~ET131X_PACKET_TYPE_MULTICAST;
- } else {
+ } else
adapter->PacketFilter |= ET131X_PACKET_TYPE_MULTICAST;
- }
/* Set values in the private adapter struct */
i = 0;
- netdev_for_each_mc_addr(mclist, netdev) {
+ netdev_for_each_mc_addr(ha, netdev) {
if (i == NIC_MAX_MCAST_LIST)
break;
- memcpy(adapter->MCList[i++], mclist->dmi_addr, ETH_ALEN);
+ memcpy(adapter->MCList[i++], ha->addr, ETH_ALEN);
}
adapter->MCAddressCount = i;
diff --git a/drivers/staging/frontier/alphatrack.c b/drivers/staging/frontier/alphatrack.c
index a50a21518a8..4e52105e607 100644
--- a/drivers/staging/frontier/alphatrack.c
+++ b/drivers/staging/frontier/alphatrack.c
@@ -134,7 +134,7 @@ MODULE_PARM_DESC(min_interrupt_out_interval,
/* Structure to hold all of our device specific stuff */
struct usb_alphatrack {
- struct semaphore sem; /* locks this structure */
+ struct mutex mtx; /* locks this structure */
struct usb_interface *intf; /* save off the usb interface pointer */
int open_count; /* number of times this port has been opened */
@@ -238,7 +238,7 @@ static void usb_alphatrack_interrupt_in_callback(struct urb *urb)
if (urb->actual_length != INPUT_CMD_SIZE) {
dev_warn(&dev->intf->dev,
"Urb length was %d bytes!!"
- "Do something intelligent \n", urb->actual_length);
+ "Do something intelligent\n", urb->actual_length);
} else {
alphatrack_ocmd_info(&dev->intf->dev,
&(*dev->ring_buffer)[dev->ring_tail].cmd,
@@ -347,7 +347,7 @@ static int usb_alphatrack_open(struct inode *inode, struct file *file)
}
/* lock this device */
- if (down_interruptible(&dev->sem)) {
+ if (mutex_lock_interruptible(&dev->mtx)) {
retval = -ERESTARTSYS;
goto unlock_disconnect_exit;
}
@@ -390,7 +390,7 @@ static int usb_alphatrack_open(struct inode *inode, struct file *file)
file->private_data = dev;
unlock_exit:
- up(&dev->sem);
+ mutex_unlock(&dev->mtx);
unlock_disconnect_exit:
mutex_unlock(&disconnect_mutex);
@@ -413,7 +413,7 @@ static int usb_alphatrack_release(struct inode *inode, struct file *file)
goto exit;
}
- if (down_interruptible(&dev->sem)) {
+ if (mutex_lock_interruptible(&dev->mtx)) {
retval = -ERESTARTSYS;
goto exit;
}
@@ -425,7 +425,7 @@ static int usb_alphatrack_release(struct inode *inode, struct file *file)
if (dev->intf == NULL) {
/* the device was unplugged before the file was released */
- up(&dev->sem);
+ mutex_unlock(&dev->mtx);
/* unlock here as usb_alphatrack_delete frees dev */
usb_alphatrack_delete(dev);
retval = -ENODEV;
@@ -441,7 +441,7 @@ static int usb_alphatrack_release(struct inode *inode, struct file *file)
dev->open_count = 0;
unlock_exit:
- up(&dev->sem);
+ mutex_unlock(&dev->mtx);
exit:
return retval;
@@ -486,7 +486,7 @@ static ssize_t usb_alphatrack_read(struct file *file, char __user *buffer,
goto exit;
/* lock this object */
- if (down_interruptible(&dev->sem)) {
+ if (mutex_lock_interruptible(&dev->mtx)) {
retval = -ERESTARTSYS;
goto exit;
}
@@ -532,7 +532,7 @@ static ssize_t usb_alphatrack_read(struct file *file, char __user *buffer,
unlock_exit:
/* unlock the device */
- up(&dev->sem);
+ mutex_unlock(&dev->mtx);
exit:
return retval;
@@ -556,7 +556,7 @@ static ssize_t usb_alphatrack_write(struct file *file,
goto exit;
/* lock this object */
- if (down_interruptible(&dev->sem)) {
+ if (mutex_lock_interruptible(&dev->mtx)) {
retval = -ERESTARTSYS;
goto exit;
}
@@ -599,7 +599,7 @@ static ssize_t usb_alphatrack_write(struct file *file,
}
if (dev->interrupt_out_endpoint == NULL) {
- err("Endpoint should not be be null! \n");
+ err("Endpoint should not be be null!\n");
goto unlock_exit;
}
@@ -627,7 +627,7 @@ static ssize_t usb_alphatrack_write(struct file *file,
unlock_exit:
/* unlock the device */
- up(&dev->sem);
+ mutex_unlock(&dev->mtx);
exit:
return retval;
@@ -678,7 +678,7 @@ static int usb_alphatrack_probe(struct usb_interface *intf,
dev_err(&intf->dev, "Out of memory\n");
goto exit;
}
- init_MUTEX(&dev->sem);
+ mutex_init(&dev->mtx);
dev->intf = intf;
init_waitqueue_head(&dev->read_wait);
init_waitqueue_head(&dev->write_wait);
@@ -771,7 +771,7 @@ static int usb_alphatrack_probe(struct usb_interface *intf,
kmalloc(sizeof(struct alphatrack_ocmd) * true_size, GFP_KERNEL);
if (!dev->write_buffer) {
- dev_err(&intf->dev, "Couldn't allocate write_buffer \n");
+ dev_err(&intf->dev, "Couldn't allocate write_buffer\n");
goto error;
}
@@ -835,7 +835,7 @@ static void usb_alphatrack_disconnect(struct usb_interface *intf)
dev = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
- down(&dev->sem);
+ mutex_lock(&dev->mtx);
minor = intf->minor;
@@ -844,11 +844,11 @@ static void usb_alphatrack_disconnect(struct usb_interface *intf)
/* if the device is not opened, then we clean up right now */
if (!dev->open_count) {
- up(&dev->sem);
+ mutex_unlock(&dev->mtx);
usb_alphatrack_delete(dev);
} else {
dev->intf = NULL;
- up(&dev->sem);
+ mutex_unlock(&dev->mtx);
}
atomic_set(&dev->writes_pending, 0);
diff --git a/drivers/staging/frontier/tranzport.c b/drivers/staging/frontier/tranzport.c
index 2f03f43f3a2..eed74f0fe0b 100644
--- a/drivers/staging/frontier/tranzport.c
+++ b/drivers/staging/frontier/tranzport.c
@@ -123,7 +123,7 @@ struct tranzport_cmd {
/* Structure to hold all of our device specific stuff */
struct usb_tranzport {
- struct semaphore sem; /* locks this structure */
+ struct mutex mtx; /* locks this structure */
struct usb_interface *intf; /* save off the usb interface pointer */
int open_count; /* number of times this port opened */
struct tranzport_cmd (*ring_buffer)[RING_BUFFER_SIZE];
@@ -198,7 +198,9 @@ static void usb_tranzport_abort_transfers(struct usb_tranzport *dev)
{ \
struct usb_interface *intf = to_usb_interface(dev); \
struct usb_tranzport *t = usb_get_intfdata(intf); \
- int temp = simple_strtoul(buf, NULL, 10); \
+ unsigned long temp; \
+ if (strict_strtoul(buf, 10, &temp)) \
+ return -EINVAL; \
t->value = temp; \
return count; \
} \
@@ -255,7 +257,7 @@ static void usb_tranzport_interrupt_in_callback(struct urb *urb)
if (urb->actual_length != 8) {
dev_warn(&dev->intf->dev,
"Urb length was %d bytes!!"
- "Do something intelligent \n",
+ "Do something intelligent\n",
urb->actual_length);
} else {
dbg_info(&dev->intf->dev,
@@ -365,7 +367,7 @@ static int usb_tranzport_open(struct inode *inode, struct file *file)
}
/* lock this device */
- if (down_interruptible(&dev->sem)) {
+ if (mutex_lock_interruptible(&dev->mtx)) {
retval = -ERESTARTSYS;
goto unlock_disconnect_exit;
}
@@ -409,7 +411,7 @@ static int usb_tranzport_open(struct inode *inode, struct file *file)
file->private_data = dev;
unlock_exit:
- up(&dev->sem);
+ mutex_unlock(&dev->mtx);
unlock_disconnect_exit:
mutex_unlock(&disconnect_mutex);
@@ -432,7 +434,7 @@ static int usb_tranzport_release(struct inode *inode, struct file *file)
goto exit;
}
- if (down_interruptible(&dev->sem)) {
+ if (mutex_lock_interruptible(&dev->mtx)) {
retval = -ERESTARTSYS;
goto exit;
}
@@ -444,7 +446,7 @@ static int usb_tranzport_release(struct inode *inode, struct file *file)
if (dev->intf == NULL) {
/* the device was unplugged before the file was released */
- up(&dev->sem);
+ mutex_unlock(&dev->mtx);
/* unlock here as usb_tranzport_delete frees dev */
usb_tranzport_delete(dev);
retval = -ENODEV;
@@ -460,7 +462,7 @@ static int usb_tranzport_release(struct inode *inode, struct file *file)
dev->open_count = 0;
unlock_exit:
- up(&dev->sem);
+ mutex_unlock(&dev->mtx);
exit:
return retval;
@@ -510,7 +512,7 @@ static ssize_t usb_tranzport_read(struct file *file, char __user *buffer,
goto exit;
/* lock this object */
- if (down_interruptible(&dev->sem)) {
+ if (mutex_lock_interruptible(&dev->mtx)) {
retval = -ERESTARTSYS;
goto exit;
}
@@ -658,7 +660,7 @@ retval = 8;
unlock_exit:
/* unlock the device */
-up(&dev->sem);
+mutex_unlock(&dev->mtx);
exit:
return retval;
@@ -682,7 +684,7 @@ static ssize_t usb_tranzport_write(struct file *file,
goto exit;
/* lock this object */
- if (down_interruptible(&dev->sem)) {
+ if (mutex_lock_interruptible(&dev->mtx)) {
retval = -ERESTARTSYS;
goto exit;
}
@@ -724,7 +726,7 @@ static ssize_t usb_tranzport_write(struct file *file,
}
if (dev->interrupt_out_endpoint == NULL) {
- err("Endpoint should not be be null! \n");
+ err("Endpoint should not be be null!\n");
goto unlock_exit;
}
@@ -751,7 +753,7 @@ static ssize_t usb_tranzport_write(struct file *file,
unlock_exit:
/* unlock the device */
- up(&dev->sem);
+ mutex_unlock(&dev->mtx);
exit:
return retval;
@@ -800,7 +802,7 @@ static int usb_tranzport_probe(struct usb_interface *intf,
dev_err(&intf->dev, "Out of memory\n");
goto exit;
}
- init_MUTEX(&dev->sem);
+ mutex_init(&dev->mtx);
dev->intf = intf;
init_waitqueue_head(&dev->read_wait);
init_waitqueue_head(&dev->write_wait);
@@ -940,18 +942,18 @@ static void usb_tranzport_disconnect(struct usb_interface *intf)
mutex_lock(&disconnect_mutex);
dev = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
- down(&dev->sem);
+ mutex_lock(&dev->mtx);
minor = intf->minor;
/* give back our minor */
usb_deregister_dev(intf, &usb_tranzport_class);
/* if the device is not opened, then we clean up right now */
if (!dev->open_count) {
- up(&dev->sem);
+ mutex_unlock(&dev->mtx);
usb_tranzport_delete(dev);
} else {
dev->intf = NULL;
- up(&dev->sem);
+ mutex_unlock(&dev->mtx);
}
mutex_unlock(&disconnect_mutex);
diff --git a/drivers/staging/go7007/go7007-fw.c b/drivers/staging/go7007/go7007-fw.c
index ee622ff1707..c9a6409edfe 100644
--- a/drivers/staging/go7007/go7007-fw.c
+++ b/drivers/staging/go7007/go7007-fw.c
@@ -380,13 +380,12 @@ static int gen_mjpeghdr_to_package(struct go7007 *go, __le16 *code, int space)
unsigned int addr = 0x19;
int size = 0, i, off = 0, chunk;
- buf = kmalloc(4096, GFP_KERNEL);
+ buf = kzalloc(4096, GFP_KERNEL);
if (buf == NULL) {
printk(KERN_ERR "go7007: unable to allocate 4096 bytes for "
"firmware construction\n");
return -1;
}
- memset(buf, 0, 4096);
for (i = 1; i < 32; ++i) {
mjpeg_frame_header(go, buf + size, i);
@@ -651,13 +650,12 @@ static int gen_mpeg1hdr_to_package(struct go7007 *go,
unsigned int addr = 0x19;
int i, off = 0, chunk;
- buf = kmalloc(5120, GFP_KERNEL);
+ buf = kzalloc(5120, GFP_KERNEL);
if (buf == NULL) {
printk(KERN_ERR "go7007: unable to allocate 5120 bytes for "
"firmware construction\n");
return -1;
}
- memset(buf, 0, 5120);
framelen[0] = mpeg1_frame_header(go, buf, 0, 1, PFRAME);
if (go->interlace_coding)
framelen[0] += mpeg1_frame_header(go, buf + framelen[0] / 8,
@@ -839,13 +837,12 @@ static int gen_mpeg4hdr_to_package(struct go7007 *go,
unsigned int addr = 0x19;
int i, off = 0, chunk;
- buf = kmalloc(5120, GFP_KERNEL);
+ buf = kzalloc(5120, GFP_KERNEL);
if (buf == NULL) {
printk(KERN_ERR "go7007: unable to allocate 5120 bytes for "
"firmware construction\n");
return -1;
}
- memset(buf, 0, 5120);
framelen[0] = mpeg4_frame_header(go, buf, 0, PFRAME);
i = 368;
framelen[1] = mpeg4_frame_header(go, buf + i, 0, BFRAME_PRE);
@@ -1585,13 +1582,12 @@ int go7007_construct_fw_image(struct go7007 *go, u8 **fw, int *fwlen)
go->board_info->firmware);
return -1;
}
- code = kmalloc(codespace * 2, GFP_KERNEL);
+ code = kzalloc(codespace * 2, GFP_KERNEL);
if (code == NULL) {
printk(KERN_ERR "go7007: unable to allocate %d bytes for "
"firmware construction\n", codespace * 2);
goto fw_failed;
}
- memset(code, 0, codespace * 2);
src = (__le16 *)fw_entry->data;
srclen = fw_entry->size / 2;
while (srclen >= 2) {
diff --git a/drivers/staging/go7007/go7007-usb.c b/drivers/staging/go7007/go7007-usb.c
index ee278f64a16..20ed930b588 100644
--- a/drivers/staging/go7007/go7007-usb.c
+++ b/drivers/staging/go7007/go7007-usb.c
@@ -670,10 +670,9 @@ static int go7007_usb_onboard_write_interrupt(struct go7007 *go,
"go7007-usb: WriteInterrupt: %04x %04x\n", addr, data);
#endif
- tbuf = kmalloc(8, GFP_KERNEL);
+ tbuf = kzalloc(8, GFP_KERNEL);
if (tbuf == NULL)
return -ENOMEM;
- memset(tbuf, 0, 8);
tbuf[0] = data & 0xff;
tbuf[1] = data >> 8;
tbuf[2] = addr & 0xff;
diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c
index 723c1a64d87..46b4b9f6855 100644
--- a/drivers/staging/go7007/go7007-v4l2.c
+++ b/drivers/staging/go7007/go7007-v4l2.c
@@ -720,7 +720,7 @@ static int vidioc_reqbufs(struct file *file, void *priv,
if (count > 32)
count = 32;
- gofh->bufs = kmalloc(count * sizeof(struct go7007_buffer),
+ gofh->bufs = kcalloc(count, sizeof(struct go7007_buffer),
GFP_KERNEL);
if (!gofh->bufs) {
@@ -728,8 +728,6 @@ static int vidioc_reqbufs(struct file *file, void *priv,
goto unlock_and_return;
}
- memset(gofh->bufs, 0, count * sizeof(struct go7007_buffer));
-
for (i = 0; i < count; ++i) {
gofh->bufs[i].go = go;
gofh->bufs[i].index = i;
diff --git a/drivers/staging/go7007/saa7134-go7007.c b/drivers/staging/go7007/saa7134-go7007.c
index b25d7d2090e..cf7c34a9945 100644
--- a/drivers/staging/go7007/saa7134-go7007.c
+++ b/drivers/staging/go7007/saa7134-go7007.c
@@ -242,13 +242,13 @@ static void saa7134_go7007_irq_ts_done(struct saa7134_dev *dev,
printk(KERN_DEBUG "saa7134-go7007: irq: lost %ld\n",
(status >> 16) & 0x0f);
if (status & 0x100000) {
- dma_sync_single(&dev->pci->dev,
- saa->bottom_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(&dev->pci->dev,
+ saa->bottom_dma, PAGE_SIZE, DMA_FROM_DEVICE);
go7007_parse_video_stream(go, saa->bottom, PAGE_SIZE);
saa_writel(SAA7134_RS_BA2(5), cpu_to_le32(saa->bottom_dma));
} else {
- dma_sync_single(&dev->pci->dev,
- saa->top_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(&dev->pci->dev,
+ saa->top_dma, PAGE_SIZE, DMA_FROM_DEVICE);
go7007_parse_video_stream(go, saa->top, PAGE_SIZE);
saa_writel(SAA7134_RS_BA1(5), cpu_to_le32(saa->top_dma));
}
@@ -440,10 +440,9 @@ static int saa7134_go7007_init(struct saa7134_dev *dev)
printk(KERN_DEBUG "saa7134-go7007: probing new SAA713X board\n");
- saa = kmalloc(sizeof(struct saa7134_go7007), GFP_KERNEL);
+ saa = kzalloc(sizeof(struct saa7134_go7007), GFP_KERNEL);
if (saa == NULL)
return -ENOMEM;
- memset(saa, 0, sizeof(struct saa7134_go7007));
/* Allocate a couple pages for receiving the compressed stream */
saa->top = (u8 *)get_zeroed_page(GFP_KERNEL);
diff --git a/drivers/staging/go7007/wis-saa7113.c b/drivers/staging/go7007/wis-saa7113.c
index 5c12b4d3845..bd925457f8b 100644
--- a/drivers/staging/go7007/wis-saa7113.c
+++ b/drivers/staging/go7007/wis-saa7113.c
@@ -289,6 +289,7 @@ static int wis_saa7113_probe(struct i2c_client *client,
if (write_regs(client, initial_registers) < 0) {
printk(KERN_ERR
"wis-saa7113: error initializing SAA7113\n");
+ i2c_set_clientdata(client, NULL);
kfree(dec);
return -ENODEV;
}
diff --git a/drivers/staging/go7007/wis-saa7115.c b/drivers/staging/go7007/wis-saa7115.c
index 73f2283a880..b2eb804c195 100644
--- a/drivers/staging/go7007/wis-saa7115.c
+++ b/drivers/staging/go7007/wis-saa7115.c
@@ -422,6 +422,7 @@ static int wis_saa7115_probe(struct i2c_client *client,
if (write_regs(client, initial_registers) < 0) {
printk(KERN_ERR
"wis-saa7115: error initializing SAA7115\n");
+ i2c_set_clientdata(client, NULL);
kfree(dec);
return -ENODEV;
}
diff --git a/drivers/staging/go7007/wis-tw9903.c b/drivers/staging/go7007/wis-tw9903.c
index 3ac6f785c4a..2afea09091b 100644
--- a/drivers/staging/go7007/wis-tw9903.c
+++ b/drivers/staging/go7007/wis-tw9903.c
@@ -294,6 +294,7 @@ static int wis_tw9903_probe(struct i2c_client *client,
if (write_regs(client, initial_registers) < 0) {
printk(KERN_ERR "wis-tw9903: error initializing TW9903\n");
+ i2c_set_clientdata(client, NULL);
kfree(dec);
return -ENODEV;
}
diff --git a/drivers/staging/hv/Kconfig b/drivers/staging/hv/Kconfig
index 40447020a79..97480f5c659 100644
--- a/drivers/staging/hv/Kconfig
+++ b/drivers/staging/hv/Kconfig
@@ -17,7 +17,7 @@ config HYPERV_STORAGE
config HYPERV_BLOCK
tristate "Microsoft Hyper-V virtual block driver"
- depends on BLOCK && SCSI
+ depends on BLOCK && SCSI && LBDAF
default HYPERV
help
Select this option to enable the Hyper-V virtual block driver.
@@ -29,4 +29,10 @@ config HYPERV_NET
help
Select this option to enable the Hyper-V virtual network driver.
+config HYPERV_UTILS
+ tristate "Microsoft Hyper-V Utilities driver"
+ default HYPERV
+ help
+ Select this option to enable the Hyper-V Utilities.
+
endif
diff --git a/drivers/staging/hv/Makefile b/drivers/staging/hv/Makefile
index 27ebae8a918..1866f80a45d 100644
--- a/drivers/staging/hv/Makefile
+++ b/drivers/staging/hv/Makefile
@@ -2,10 +2,11 @@ obj-$(CONFIG_HYPERV) += hv_vmbus.o
obj-$(CONFIG_HYPERV_STORAGE) += hv_storvsc.o
obj-$(CONFIG_HYPERV_BLOCK) += hv_blkvsc.o
obj-$(CONFIG_HYPERV_NET) += hv_netvsc.o
+obj-$(CONFIG_HYPERV_UTILS) += hv_utils.o
hv_vmbus-objs := vmbus_drv.o osd.o \
- Vmbus.o Hv.o Connection.o Channel.o \
- ChannelMgmt.o ChannelInterface.o RingBuffer.o
-hv_storvsc-objs := storvsc_drv.o StorVsc.o
-hv_blkvsc-objs := blkvsc_drv.o BlkVsc.o
-hv_netvsc-objs := netvsc_drv.o NetVsc.o RndisFilter.o
+ vmbus.o hv.o connection.o channel.o \
+ channel_mgmt.o channel_interface.o ring_buffer.o
+hv_storvsc-objs := storvsc_drv.o storvsc.o
+hv_blkvsc-objs := blkvsc_drv.o blkvsc.o
+hv_netvsc-objs := netvsc_drv.o netvsc.o rndis_filter.o
diff --git a/drivers/staging/hv/TODO b/drivers/staging/hv/TODO
index dbfbde937a6..66a89c809dd 100644
--- a/drivers/staging/hv/TODO
+++ b/drivers/staging/hv/TODO
@@ -1,7 +1,5 @@
TODO:
- fix remaining checkpatch warnings and errors
- - use of /** when it is not a kerneldoc header
- - remove RingBuffer.c to us in-kernel ringbuffer functions instead.
- audit the vmbus to verify it is working properly with the
driver model
- convert vmbus driver interface function pointer tables
@@ -9,7 +7,6 @@ TODO:
- see if the vmbus can be merged with the other virtual busses
in the kernel
- audit the network driver
- - use existing net_device_stats struct in network device
- checking for carrier inside open is wrong, network device API
confusion??
- audit the block driver
diff --git a/drivers/staging/hv/BlkVsc.c b/drivers/staging/hv/blkvsc.c
index a48ee3a1264..0daebc472e6 100644
--- a/drivers/staging/hv/BlkVsc.c
+++ b/drivers/staging/hv/blkvsc.c
@@ -23,7 +23,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include "osd.h"
-#include "StorVsc.c"
+#include "storvsc.c"
static const char *gBlkDriverName = "blkvsc";
@@ -78,7 +78,7 @@ int BlkVscInitialize(struct hv_driver *Driver)
storDriver = (struct storvsc_driver_object *)Driver;
/* Make sure we are at least 2 pages since 1 page is used for control */
- ASSERT(storDriver->RingBufferSize >= (PAGE_SIZE << 1));
+ /* ASSERT(storDriver->RingBufferSize >= (PAGE_SIZE << 1)); */
Driver->name = gBlkDriverName;
memcpy(&Driver->deviceType, &gBlkVscDeviceType, sizeof(struct hv_guid));
diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
index 8f1fda3256a..61bd0be5fb1 100644
--- a/drivers/staging/hv/blkvsc_drv.c
+++ b/drivers/staging/hv/blkvsc_drv.c
@@ -32,9 +32,9 @@
#include <scsi/scsi_dbg.h>
#include "osd.h"
#include "logging.h"
-#include "VersionInfo.h"
+#include "version_info.h"
#include "vmbus.h"
-#include "StorVscApi.h"
+#include "storvsc_api.h"
#define BLKVSC_MINORS 64
@@ -149,13 +149,14 @@ static int blkvsc_do_flush(struct block_device_context *blkdev);
static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev);
static int blkvsc_do_pending_reqs(struct block_device_context *blkdev);
-
static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
+module_param(blkvsc_ringbuffer_size, int, S_IRUGO);
+MODULE_PARM_DESC(ring_size, "Ring buffer size (in bytes)");
/* The one and only one */
static struct blkvsc_driver_context g_blkvsc_drv;
-static struct block_device_operations block_ops = {
+static const struct block_device_operations block_ops = {
.owner = THIS_MODULE,
.open = blkvsc_open,
.release = blkvsc_release,
@@ -165,7 +166,7 @@ static struct block_device_operations block_ops = {
.ioctl = blkvsc_ioctl,
};
-/**
+/*
* blkvsc_drv_init - BlkVsc driver initialization.
*/
static int blkvsc_drv_init(int (*drv_init)(struct hv_driver *drv))
@@ -245,7 +246,7 @@ static void blkvsc_drv_exit(void)
return;
}
-/**
+/*
* blkvsc_probe - Add a new device for this driver
*/
static int blkvsc_probe(struct device *device)
@@ -288,8 +289,8 @@ static int blkvsc_probe(struct device *device)
/* Initialize what we can here */
spin_lock_init(&blkdev->lock);
- ASSERT(sizeof(struct blkvsc_request_group) <=
- sizeof(struct blkvsc_request));
+ /* ASSERT(sizeof(struct blkvsc_request_group) <= */
+ /* sizeof(struct blkvsc_request)); */
blkdev->request_pool = kmem_cache_create(dev_name(&device_ctx->device),
sizeof(struct blkvsc_request) +
@@ -555,7 +556,7 @@ static int blkvsc_do_inquiry(struct block_device_context *blkdev)
blkdev->device_type = UNKNOWN_DEV_TYPE;
}
- DPRINT_DBG(BLKVSC_DRV, "device type %d \n", device_type);
+ DPRINT_DBG(BLKVSC_DRV, "device type %d\n", device_type);
blkdev->device_id_len = buf[7];
if (blkdev->device_id_len > 64)
@@ -733,7 +734,7 @@ static int blkvsc_do_read_capacity16(struct block_device_context *blkdev)
return 0;
}
-/**
+/*
* blkvsc_remove() - Callback when our device is removed
*/
static int blkvsc_remove(struct device *device)
@@ -808,8 +809,8 @@ static int blkvsc_remove(struct device *device)
static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req)
{
- ASSERT(blkvsc_req->req);
- ASSERT(blkvsc_req->sector_count <= (MAX_MULTIPAGE_BUFFER_COUNT*8));
+ /* ASSERT(blkvsc_req->req); */
+ /* ASSERT(blkvsc_req->sector_count <= (MAX_MULTIPAGE_BUFFER_COUNT*8)); */
blkvsc_req->cmd_len = 16;
@@ -940,7 +941,7 @@ static int blkvsc_do_request(struct block_device_context *blkdev,
int pending = 0;
struct blkvsc_request_group *group = NULL;
- DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p sect %lu \n", blkdev, req,
+ DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p sect %lu\n", blkdev, req,
(unsigned long)blk_rq_pos(req));
/* Create a group to tie req to list of blkvsc_reqs */
@@ -1116,7 +1117,7 @@ static void blkvsc_request_completion(struct hv_storvsc_request *request)
unsigned long flags;
struct blkvsc_request *comp_req, *tmp;
- ASSERT(blkvsc_req->group);
+ /* ASSERT(blkvsc_req->group); */
DPRINT_DBG(BLKVSC_DRV, "blkdev %p blkvsc_req %p group %p type %s "
"sect_start %lu sect_count %ld len %d group outstd %d "
@@ -1144,7 +1145,7 @@ static void blkvsc_request_completion(struct hv_storvsc_request *request)
&blkvsc_req->group->blkvsc_req_list,
req_entry) {
DPRINT_DBG(BLKVSC_DRV, "completing blkvsc_req %p "
- "sect_start %lu sect_count %ld \n",
+ "sect_start %lu sect_count %ld\n",
comp_req,
(unsigned long)comp_req->sector_start,
comp_req->sector_count);
@@ -1198,7 +1199,7 @@ static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev)
&pend_req->group->blkvsc_req_list,
req_entry) {
DPRINT_DBG(BLKVSC_DRV, "completing blkvsc_req %p "
- "sect_start %lu sect_count %ld \n",
+ "sect_start %lu sect_count %ld\n",
comp_req,
(unsigned long) comp_req->sector_start,
comp_req->sector_count);
@@ -1213,7 +1214,10 @@ static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev)
(!comp_req->request.Status ? 0 : -EIO),
comp_req->sector_count *
blkdev->sector_size);
- ASSERT(ret != 0);
+
+ /* FIXME: shouldn't this do more than return? */
+ if (ret)
+ goto out;
}
kmem_cache_free(blkdev->request_pool, comp_req);
@@ -1245,6 +1249,7 @@ static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev)
kmem_cache_free(blkdev->request_pool, pend_req);
}
+out:
return ret;
}
@@ -1276,7 +1281,7 @@ static void blkvsc_request(struct request_queue *queue)
struct request *req;
int ret = 0;
- DPRINT_DBG(BLKVSC_DRV, "- enter \n");
+ DPRINT_DBG(BLKVSC_DRV, "- enter\n");
while ((req = blk_peek_request(queue)) != NULL) {
DPRINT_DBG(BLKVSC_DRV, "- req %p\n", req);
@@ -1485,7 +1490,7 @@ static int __init blkvsc_init(void)
{
int ret;
- ASSERT(sizeof(sector_t) == 8); /* Make sure CONFIG_LBD is set */
+ BUILD_BUG_ON(sizeof(sector_t) != 8);
DPRINT_ENTER(BLKVSC_DRV);
@@ -1507,6 +1512,6 @@ static void __exit blkvsc_exit(void)
MODULE_LICENSE("GPL");
MODULE_VERSION(HV_DRV_VERSION);
-module_param(blkvsc_ringbuffer_size, int, S_IRUGO);
+MODULE_DESCRIPTION("Microsoft Hyper-V virtual block driver");
module_init(blkvsc_init);
module_exit(blkvsc_exit);
diff --git a/drivers/staging/hv/Channel.c b/drivers/staging/hv/channel.c
index e69e9ee704a..f047c5a7f64 100644
--- a/drivers/staging/hv/Channel.c
+++ b/drivers/staging/hv/channel.c
@@ -21,9 +21,10 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "osd.h"
#include "logging.h"
-#include "VmbusPrivate.h"
+#include "vmbus_private.h"
/* Internal routines */
static int VmbusChannelCreateGpadlHeader(
@@ -65,8 +66,9 @@ static void DumpMonitorPage(struct hv_monitor_page *MonitorPage)
}
#endif
-/**
- * VmbusChannelSetEvent - Trigger an event notification on the specified channel.
+/*
+ * VmbusChannelSetEvent - Trigger an event notification on the specified
+ * channel.
*/
static void VmbusChannelSetEvent(struct vmbus_channel *Channel)
{
@@ -120,7 +122,7 @@ static void VmbusChannelClearEvent(struct vmbus_channel *channel)
}
#endif
-/**
+/*
* VmbusChannelGetDebugInfo -Retrieve various channel debug info
*/
void VmbusChannelGetDebugInfo(struct vmbus_channel *Channel,
@@ -165,7 +167,7 @@ void VmbusChannelGetDebugInfo(struct vmbus_channel *Channel,
RingBufferGetDebugInfo(&Channel->Outbound, &DebugInfo->Outbound);
}
-/**
+/*
* VmbusChannelOpen - Open the specified channel.
*/
int VmbusChannelOpen(struct vmbus_channel *NewChannel, u32 SendRingBufferSize,
@@ -173,16 +175,16 @@ int VmbusChannelOpen(struct vmbus_channel *NewChannel, u32 SendRingBufferSize,
void (*OnChannelCallback)(void *context), void *Context)
{
struct vmbus_channel_open_channel *openMsg;
- struct vmbus_channel_msginfo *openInfo;
+ struct vmbus_channel_msginfo *openInfo = NULL;
void *in, *out;
unsigned long flags;
- int ret;
+ int ret, err = 0;
DPRINT_ENTER(VMBUS);
/* Aligned to page size */
- ASSERT(!(SendRingBufferSize & (PAGE_SIZE - 1)));
- ASSERT(!(RecvRingBufferSize & (PAGE_SIZE - 1)));
+ /* ASSERT(!(SendRingBufferSize & (PAGE_SIZE - 1))); */
+ /* ASSERT(!(RecvRingBufferSize & (PAGE_SIZE - 1))); */
NewChannel->OnChannelCallback = OnChannelCallback;
NewChannel->ChannelCallbackContext = Context;
@@ -190,8 +192,10 @@ int VmbusChannelOpen(struct vmbus_channel *NewChannel, u32 SendRingBufferSize,
/* Allocate the ring buffer */
out = osd_PageAlloc((SendRingBufferSize + RecvRingBufferSize)
>> PAGE_SHIFT);
- ASSERT(out);
- ASSERT(((unsigned long)out & (PAGE_SIZE-1)) == 0);
+ if (!out)
+ return -ENOMEM;
+
+ /* ASSERT(((unsigned long)out & (PAGE_SIZE-1)) == 0); */
in = (void *)((unsigned long)out + SendRingBufferSize);
@@ -199,9 +203,18 @@ int VmbusChannelOpen(struct vmbus_channel *NewChannel, u32 SendRingBufferSize,
NewChannel->RingBufferPageCount = (SendRingBufferSize +
RecvRingBufferSize) >> PAGE_SHIFT;
- RingBufferInit(&NewChannel->Outbound, out, SendRingBufferSize);
+ ret = RingBufferInit(&NewChannel->Outbound, out, SendRingBufferSize);
+ if (ret != 0) {
+ err = ret;
+ goto errorout;
+ }
+
+ ret = RingBufferInit(&NewChannel->Inbound, in, RecvRingBufferSize);
+ if (ret != 0) {
+ err = ret;
+ goto errorout;
+ }
- RingBufferInit(&NewChannel->Inbound, in, RecvRingBufferSize);
/* Establish the gpadl for the ring buffer */
DPRINT_DBG(VMBUS, "Establishing ring buffer's gpadl for channel %p...",
@@ -215,6 +228,11 @@ int VmbusChannelOpen(struct vmbus_channel *NewChannel, u32 SendRingBufferSize,
RecvRingBufferSize,
&NewChannel->RingBufferGpadlHandle);
+ if (ret != 0) {
+ err = ret;
+ goto errorout;
+ }
+
DPRINT_DBG(VMBUS, "channel %p <relid %d gpadl 0x%x send ring %p "
"size %d recv ring %p size %d, downstreamoffset %d>",
NewChannel, NewChannel->OfferMsg.ChildRelId,
@@ -229,21 +247,31 @@ int VmbusChannelOpen(struct vmbus_channel *NewChannel, u32 SendRingBufferSize,
openInfo = kmalloc(sizeof(*openInfo) +
sizeof(struct vmbus_channel_open_channel),
GFP_KERNEL);
- ASSERT(openInfo != NULL);
+ if (!openInfo) {
+ err = -ENOMEM;
+ goto errorout;
+ }
openInfo->WaitEvent = osd_WaitEventCreate();
+ if (!openInfo->WaitEvent) {
+ err = -ENOMEM;
+ goto errorout;
+ }
openMsg = (struct vmbus_channel_open_channel *)openInfo->Msg;
openMsg->Header.MessageType = ChannelMessageOpenChannel;
openMsg->OpenId = NewChannel->OfferMsg.ChildRelId; /* FIXME */
openMsg->ChildRelId = NewChannel->OfferMsg.ChildRelId;
openMsg->RingBufferGpadlHandle = NewChannel->RingBufferGpadlHandle;
- ASSERT(openMsg->RingBufferGpadlHandle);
openMsg->DownstreamRingBufferPageOffset = SendRingBufferSize >>
PAGE_SHIFT;
openMsg->ServerContextAreaGpadlHandle = 0; /* TODO */
- ASSERT(UserDataLen <= MAX_USER_DEFINED_BYTES);
+ if (UserDataLen > MAX_USER_DEFINED_BYTES) {
+ err = -EINVAL;
+ goto errorout;
+ }
+
if (UserDataLen)
memcpy(openMsg->UserData, UserData, UserDataLen);
@@ -281,10 +309,19 @@ Cleanup:
DPRINT_EXIT(VMBUS);
return 0;
+
+errorout:
+ RingBufferCleanup(&NewChannel->Outbound);
+ RingBufferCleanup(&NewChannel->Inbound);
+ osd_PageFree(out, (SendRingBufferSize + RecvRingBufferSize)
+ >> PAGE_SHIFT);
+ kfree(openInfo);
+ return err;
}
-/**
- * DumpGpadlBody - Dump the gpadl body message to the console for debugging purposes.
+/*
+ * DumpGpadlBody - Dump the gpadl body message to the console for
+ * debugging purposes.
*/
static void DumpGpadlBody(struct vmbus_channel_gpadl_body *Gpadl, u32 Len)
{
@@ -300,8 +337,9 @@ static void DumpGpadlBody(struct vmbus_channel_gpadl_body *Gpadl, u32 Len)
i, Gpadl->Pfn[i]);
}
-/**
- * DumpGpadlHeader - Dump the gpadl header message to the console for debugging purposes.
+/*
+ * DumpGpadlHeader - Dump the gpadl header message to the console for
+ * debugging purposes.
*/
static void DumpGpadlHeader(struct vmbus_channel_gpadl_header *Gpadl)
{
@@ -325,7 +363,7 @@ static void DumpGpadlHeader(struct vmbus_channel_gpadl_header *Gpadl)
}
}
-/**
+/*
* VmbusChannelCreateGpadlHeader - Creates a gpadl for the specified buffer
*/
static int VmbusChannelCreateGpadlHeader(void *Kbuffer, u32 Size,
@@ -338,13 +376,13 @@ static int VmbusChannelCreateGpadlHeader(void *Kbuffer, u32 Size,
struct vmbus_channel_gpadl_header *gpaHeader;
struct vmbus_channel_gpadl_body *gpadlBody;
struct vmbus_channel_msginfo *msgHeader;
- struct vmbus_channel_msginfo *msgBody;
+ struct vmbus_channel_msginfo *msgBody = NULL;
u32 msgSize;
int pfnSum, pfnCount, pfnLeft, pfnCurr, pfnSize;
/* ASSERT((kbuffer & (PAGE_SIZE-1)) == 0); */
- ASSERT((Size & (PAGE_SIZE-1)) == 0);
+ /* ASSERT((Size & (PAGE_SIZE-1)) == 0); */
pageCount = Size >> PAGE_SHIFT;
pfn = virt_to_phys(Kbuffer) >> PAGE_SHIFT;
@@ -362,6 +400,8 @@ static int VmbusChannelCreateGpadlHeader(void *Kbuffer, u32 Size,
sizeof(struct vmbus_channel_gpadl_header) +
sizeof(struct gpa_range) + pfnCount * sizeof(u64);
msgHeader = kzalloc(msgSize, GFP_KERNEL);
+ if (!msgHeader)
+ goto nomem;
INIT_LIST_HEAD(&msgHeader->SubMsgList);
msgHeader->MessageSize = msgSize;
@@ -396,7 +436,9 @@ static int VmbusChannelCreateGpadlHeader(void *Kbuffer, u32 Size,
sizeof(struct vmbus_channel_gpadl_body) +
pfnCurr * sizeof(u64);
msgBody = kzalloc(msgSize, GFP_KERNEL);
- ASSERT(msgBody);
+ /* FIXME: we probably need to more if this fails */
+ if (!msgBody)
+ goto nomem;
msgBody->MessageSize = msgSize;
(*MessageCount)++;
gpadlBody =
@@ -439,9 +481,13 @@ static int VmbusChannelCreateGpadlHeader(void *Kbuffer, u32 Size,
}
return 0;
+nomem:
+ kfree(msgHeader);
+ kfree(msgBody);
+ return -ENOMEM;
}
-/**
+/*
* VmbusChannelEstablishGpadl - Estabish a GPADL for the specified buffer
*
* @Channel: a channel
@@ -455,24 +501,29 @@ int VmbusChannelEstablishGpadl(struct vmbus_channel *Channel, void *Kbuffer,
struct vmbus_channel_gpadl_header *gpadlMsg;
struct vmbus_channel_gpadl_body *gpadlBody;
/* struct vmbus_channel_gpadl_created *gpadlCreated; */
- struct vmbus_channel_msginfo *msgInfo;
+ struct vmbus_channel_msginfo *msgInfo = NULL;
struct vmbus_channel_msginfo *subMsgInfo;
u32 msgCount;
struct list_head *curr;
u32 nextGpadlHandle;
unsigned long flags;
- int ret;
+ int ret = 0;
DPRINT_ENTER(VMBUS);
nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
atomic_inc(&gVmbusConnection.NextGpadlHandle);
- VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
- ASSERT(msgInfo != NULL);
- ASSERT(msgCount > 0);
+ ret = VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
+ if (ret)
+ return ret;
msgInfo->WaitEvent = osd_WaitEventCreate();
+ if (!msgInfo->WaitEvent) {
+ ret = -ENOMEM;
+ goto Cleanup;
+ }
+
gpadlMsg = (struct vmbus_channel_gpadl_header *)msgInfo->Msg;
gpadlMsg->Header.MessageType = ChannelMessageGpadlHeader;
gpadlMsg->ChildRelId = Channel->OfferMsg.ChildRelId;
@@ -518,7 +569,9 @@ int VmbusChannelEstablishGpadl(struct vmbus_channel *Channel, void *Kbuffer,
ret = VmbusPostMessage(gpadlBody,
subMsgInfo->MessageSize -
sizeof(*subMsgInfo));
- ASSERT(ret == 0);
+ if (ret != 0)
+ goto Cleanup;
+
}
}
osd_WaitEventWait(msgInfo->WaitEvent);
@@ -545,7 +598,7 @@ Cleanup:
return ret;
}
-/**
+/*
* VmbusChannelTeardownGpadl -Teardown the specified GPADL handle
*/
int VmbusChannelTeardownGpadl(struct vmbus_channel *Channel, u32 GpadlHandle)
@@ -557,13 +610,18 @@ int VmbusChannelTeardownGpadl(struct vmbus_channel *Channel, u32 GpadlHandle)
DPRINT_ENTER(VMBUS);
- ASSERT(GpadlHandle != 0);
+ /* ASSERT(GpadlHandle != 0); */
info = kmalloc(sizeof(*info) +
sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL);
- ASSERT(info != NULL);
+ if (!info)
+ return -ENOMEM;
info->WaitEvent = osd_WaitEventCreate();
+ if (!info->WaitEvent) {
+ kfree(info);
+ return -ENOMEM;
+ }
msg = (struct vmbus_channel_gpadl_teardown *)info->Msg;
@@ -598,7 +656,7 @@ int VmbusChannelTeardownGpadl(struct vmbus_channel *Channel, u32 GpadlHandle)
return ret;
}
-/**
+/*
* VmbusChannelClose - Close the specified channel
*/
void VmbusChannelClose(struct vmbus_channel *Channel)
@@ -617,7 +675,10 @@ void VmbusChannelClose(struct vmbus_channel *Channel)
/* Send a closing message */
info = kmalloc(sizeof(*info) +
sizeof(struct vmbus_channel_close_channel), GFP_KERNEL);
- ASSERT(info != NULL);
+ /* FIXME: can't do anything other than return here because the
+ * function is void */
+ if (!info)
+ return;
/* info->waitEvent = osd_WaitEventCreate(); */
@@ -664,7 +725,18 @@ void VmbusChannelClose(struct vmbus_channel *Channel)
}
/**
- * VmbusChannelSendPacket - Send the specified buffer on the given channel
+ * VmbusChannelSendPacket() - Send the specified buffer on the given channel
+ * @Channel: Pointer to vmbus_channel structure.
+ * @Buffer: Pointer to the buffer you want to receive the data into.
+ * @BufferLen: Maximum size of what the the buffer will hold
+ * @RequestId: Identifier of the request
+ * @vmbus_packet_type: Type of packet that is being send e.g. negotiate, time
+ * packet etc.
+ *
+ * Sends data in @Buffer directly to hyper-v via the vmbus
+ * This will send the data unparsed to hyper-v.
+ *
+ * Mainly used by Hyper-V drivers.
*/
int VmbusChannelSendPacket(struct vmbus_channel *Channel, const void *Buffer,
u32 BufferLen, u64 RequestId,
@@ -683,7 +755,7 @@ int VmbusChannelSendPacket(struct vmbus_channel *Channel, const void *Buffer,
DumpVmbusChannel(Channel);
- ASSERT((packetLenAligned - packetLen) < sizeof(u64));
+ /* ASSERT((packetLenAligned - packetLen) < sizeof(u64)); */
/* Setup the descriptor */
desc.Type = Type; /* VmbusPacketTypeDataInBand; */
@@ -708,9 +780,11 @@ int VmbusChannelSendPacket(struct vmbus_channel *Channel, const void *Buffer,
return ret;
}
+EXPORT_SYMBOL(VmbusChannelSendPacket);
-/**
- * VmbusChannelSendPacketPageBuffer - Send a range of single-page buffer packets using a GPADL Direct packet type.
+/*
+ * VmbusChannelSendPacketPageBuffer - Send a range of single-page buffer
+ * packets using a GPADL Direct packet type.
*/
int VmbusChannelSendPacketPageBuffer(struct vmbus_channel *Channel,
struct hv_page_buffer PageBuffers[],
@@ -728,7 +802,8 @@ int VmbusChannelSendPacketPageBuffer(struct vmbus_channel *Channel,
DPRINT_ENTER(VMBUS);
- ASSERT(PageCount <= MAX_PAGE_BUFFER_COUNT);
+ if (PageCount > MAX_PAGE_BUFFER_COUNT)
+ return -EINVAL;
DumpVmbusChannel(Channel);
@@ -742,7 +817,7 @@ int VmbusChannelSendPacketPageBuffer(struct vmbus_channel *Channel,
packetLen = descSize + BufferLen;
packetLenAligned = ALIGN_UP(packetLen, sizeof(u64));
- ASSERT((packetLenAligned - packetLen) < sizeof(u64));
+ /* ASSERT((packetLenAligned - packetLen) < sizeof(u64)); */
/* Setup the descriptor */
desc.Type = VmbusPacketTypeDataUsingGpaDirect;
@@ -774,8 +849,9 @@ int VmbusChannelSendPacketPageBuffer(struct vmbus_channel *Channel,
return ret;
}
-/**
- * VmbusChannelSendPacketMultiPageBuffer - Send a multi-page buffer packet using a GPADL Direct packet type.
+/*
+ * VmbusChannelSendPacketMultiPageBuffer - Send a multi-page buffer packet
+ * using a GPADL Direct packet type.
*/
int VmbusChannelSendPacketMultiPageBuffer(struct vmbus_channel *Channel,
struct hv_multipage_buffer *MultiPageBuffer,
@@ -798,8 +874,8 @@ int VmbusChannelSendPacketMultiPageBuffer(struct vmbus_channel *Channel,
DPRINT_DBG(VMBUS, "data buffer - offset %u len %u pfn count %u",
MultiPageBuffer->Offset, MultiPageBuffer->Length, PfnCount);
- ASSERT(PfnCount > 0);
- ASSERT(PfnCount <= MAX_MULTIPAGE_BUFFER_COUNT);
+ if ((PfnCount < 0) || (PfnCount > MAX_MULTIPAGE_BUFFER_COUNT))
+ return -EINVAL;
/*
* Adjust the size down since VMBUS_CHANNEL_PACKET_MULITPAGE_BUFFER is
@@ -811,7 +887,7 @@ int VmbusChannelSendPacketMultiPageBuffer(struct vmbus_channel *Channel,
packetLen = descSize + BufferLen;
packetLenAligned = ALIGN_UP(packetLen, sizeof(u64));
- ASSERT((packetLenAligned - packetLen) < sizeof(u64));
+ /* ASSERT((packetLenAligned - packetLen) < sizeof(u64)); */
/* Setup the descriptor */
desc.Type = VmbusPacketTypeDataUsingGpaDirect;
@@ -843,10 +919,20 @@ int VmbusChannelSendPacketMultiPageBuffer(struct vmbus_channel *Channel,
return ret;
}
+
/**
- * VmbusChannelRecvPacket - Retrieve the user packet on the specified channel
+ * VmbusChannelRecvPacket() - Retrieve the user packet on the specified channel
+ * @Channel: Pointer to vmbus_channel structure.
+ * @Buffer: Pointer to the buffer you want to receive the data into.
+ * @BufferLen: Maximum size of what the the buffer will hold
+ * @BufferActualLen: The actual size of the data after it was received
+ * @RequestId: Identifier of the request
+ *
+ * Receives directly from the hyper-v vmbus and puts the data it received
+ * into Buffer. This will receive the data unparsed from hyper-v.
+ *
+ * Mainly used by Hyper-V drivers.
*/
-/* TODO: Do we ever receive a gpa direct packet other than the ones we send ? */
int VmbusChannelRecvPacket(struct vmbus_channel *Channel, void *Buffer,
u32 BufferLen, u32 *BufferActualLen, u64 *RequestId)
{
@@ -908,8 +994,9 @@ int VmbusChannelRecvPacket(struct vmbus_channel *Channel, void *Buffer,
return 0;
}
+EXPORT_SYMBOL(VmbusChannelRecvPacket);
-/**
+/*
* VmbusChannelRecvPacketRaw - Retrieve the raw packet on the specified channel
*/
int VmbusChannelRecvPacketRaw(struct vmbus_channel *Channel, void *Buffer,
@@ -972,20 +1059,20 @@ int VmbusChannelRecvPacketRaw(struct vmbus_channel *Channel, void *Buffer,
return 0;
}
-/**
+/*
* VmbusChannelOnChannelEvent - Channel event callback
*/
void VmbusChannelOnChannelEvent(struct vmbus_channel *Channel)
{
DumpVmbusChannel(Channel);
- ASSERT(Channel->OnChannelCallback);
+ /* ASSERT(Channel->OnChannelCallback); */
Channel->OnChannelCallback(Channel->ChannelCallbackContext);
mod_timer(&Channel->poll_timer, jiffies + usecs_to_jiffies(100));
}
-/**
+/*
* VmbusChannelOnTimer - Timer event callback
*/
void VmbusChannelOnTimer(unsigned long data)
@@ -996,7 +1083,7 @@ void VmbusChannelOnTimer(unsigned long data)
channel->OnChannelCallback(channel->ChannelCallbackContext);
}
-/**
+/*
* DumpVmbusChannel - Dump vmbus channel info to the console
*/
static void DumpVmbusChannel(struct vmbus_channel *Channel)
diff --git a/drivers/staging/hv/Channel.h b/drivers/staging/hv/channel.h
index 6b283edcae6..acb2c556369 100644
--- a/drivers/staging/hv/Channel.h
+++ b/drivers/staging/hv/channel.h
@@ -25,7 +25,7 @@
#ifndef _CHANNEL_H_
#define _CHANNEL_H_
-#include "ChannelMgmt.h"
+#include "channel_mgmt.h"
/* The format must be the same as struct vmdata_gpa_direct */
struct VMBUS_CHANNEL_PACKET_PAGE_BUFFER {
diff --git a/drivers/staging/hv/ChannelInterface.c b/drivers/staging/hv/channel_interface.c
index 019b064f7cb..d9f51ac75ea 100644
--- a/drivers/staging/hv/ChannelInterface.c
+++ b/drivers/staging/hv/channel_interface.c
@@ -23,7 +23,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include "osd.h"
-#include "VmbusPrivate.h"
+#include "vmbus_private.h"
static int IVmbusChannelOpen(struct hv_device *device, u32 SendBufferSize,
u32 RecvRingBufferSize, void *UserData,
diff --git a/drivers/staging/hv/ChannelInterface.h b/drivers/staging/hv/channel_interface.h
index 27b7a253b71..6acaf6ce2c4 100644
--- a/drivers/staging/hv/ChannelInterface.h
+++ b/drivers/staging/hv/channel_interface.h
@@ -25,7 +25,7 @@
#ifndef _CHANNEL_INTERFACE_H_
#define _CHANNEL_INTERFACE_H_
-#include "VmbusApi.h"
+#include "vmbus_api.h"
void GetChannelInterface(struct vmbus_channel_interface *ChannelInterface);
diff --git a/drivers/staging/hv/ChannelMgmt.c b/drivers/staging/hv/channel_mgmt.c
index 5f92c2102ab..3f53b4d1e4c 100644
--- a/drivers/staging/hv/ChannelMgmt.c
+++ b/drivers/staging/hv/channel_mgmt.c
@@ -22,18 +22,22 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/list.h>
+#include <linux/module.h>
#include "osd.h"
#include "logging.h"
-#include "VmbusPrivate.h"
+#include "vmbus_private.h"
+#include "utils.h"
struct vmbus_channel_message_table_entry {
enum vmbus_channel_message_type messageType;
void (*messageHandler)(struct vmbus_channel_message_header *msg);
};
-#define MAX_NUM_DEVICE_CLASSES_SUPPORTED 4
+#define MAX_MSG_TYPES 3
+#define MAX_NUM_DEVICE_CLASSES_SUPPORTED 7
+
static const struct hv_guid
- gSupportedDeviceClasses[MAX_NUM_DEVICE_CLASSES_SUPPORTED] = {
+ gSupportedDeviceClasses[MAX_NUM_DEVICE_CLASSES_SUPPORTED] = {
/* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */
/* Storage - SCSI */
{
@@ -69,9 +73,167 @@ static const struct hv_guid
0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5
}
},
+ /* 0E0B6031-5213-4934-818B-38D90CED39DB */
+ /* Shutdown */
+ {
+ .data = {
+ 0x31, 0x60, 0x0B, 0X0E, 0x13, 0x52, 0x34, 0x49,
+ 0x81, 0x8B, 0x38, 0XD9, 0x0C, 0xED, 0x39, 0xDB
+ }
+ },
+ /* {9527E630-D0AE-497b-ADCE-E80AB0175CAF} */
+ /* TimeSync */
+ {
+ .data = {
+ 0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49,
+ 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf
+ }
+ },
+ /* {57164f39-9115-4e78-ab55-382f3bd5422d} */
+ /* Heartbeat */
+ {
+ .data = {
+ 0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e,
+ 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d
+ }
+ },
};
+
/**
+ * prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
+ * @icmsghdrp: Pointer to msg header structure
+ * @icmsg_negotiate: Pointer to negotiate message structure
+ * @buf: Raw buffer channel data
+ *
+ * @icmsghdrp is of type &struct icmsg_hdr.
+ * @negop is of type &struct icmsg_negotiate.
+ * Set up and fill in default negotiate response message. This response can
+ * come from both the vmbus driver and the hv_utils driver. The current api
+ * will respond properly to both Windows 2008 and Windows 2008-R2 operating
+ * systems.
+ *
+ * Mainly used by Hyper-V drivers.
+ */
+void prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
+ struct icmsg_negotiate *negop,
+ u8 *buf)
+{
+ if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
+ icmsghdrp->icmsgsize = 0x10;
+
+ negop = (struct icmsg_negotiate *)&buf[
+ sizeof(struct vmbuspipe_hdr) +
+ sizeof(struct icmsg_hdr)];
+
+ if (negop->icframe_vercnt == 2 &&
+ negop->icversion_data[1].major == 3) {
+ negop->icversion_data[0].major = 3;
+ negop->icversion_data[0].minor = 0;
+ negop->icversion_data[1].major = 3;
+ negop->icversion_data[1].minor = 0;
+ } else {
+ negop->icversion_data[0].major = 1;
+ negop->icversion_data[0].minor = 0;
+ negop->icversion_data[1].major = 1;
+ negop->icversion_data[1].minor = 0;
+ }
+
+ negop->icframe_vercnt = 1;
+ negop->icmsg_vercnt = 1;
+ }
+}
+EXPORT_SYMBOL(prep_negotiate_resp);
+
+/**
+ * chn_cb_negotiate() - Default handler for non IDE/SCSI/NETWORK
+ * Hyper-V requests
+ * @context: Pointer to argument structure.
+ *
+ * Set up the default handler for non device driver specific requests
+ * from Hyper-V. This stub responds to the default negotiate messages
+ * that come in for every non IDE/SCSI/Network request.
+ * This behavior is normally overwritten in the hv_utils driver. That
+ * driver handles requests like gracefull shutdown, heartbeats etc.
+ *
+ * Mainly used by Hyper-V drivers.
+ */
+void chn_cb_negotiate(void *context)
+{
+ struct vmbus_channel *channel = context;
+ u8 *buf;
+ u32 buflen, recvlen;
+ u64 requestid;
+
+ struct icmsg_hdr *icmsghdrp;
+ struct icmsg_negotiate *negop = NULL;
+
+ buflen = PAGE_SIZE;
+ buf = kmalloc(buflen, GFP_ATOMIC);
+
+ VmbusChannelRecvPacket(channel, buf, buflen, &recvlen, &requestid);
+
+ if (recvlen > 0) {
+ icmsghdrp = (struct icmsg_hdr *)&buf[
+ sizeof(struct vmbuspipe_hdr)];
+
+ prep_negotiate_resp(icmsghdrp, negop, buf);
+
+ icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
+ | ICMSGHDRFLAG_RESPONSE;
+
+ VmbusChannelSendPacket(channel, buf,
+ recvlen, requestid,
+ VmbusPacketTypeDataInBand, 0);
+ }
+
+ kfree(buf);
+}
+EXPORT_SYMBOL(chn_cb_negotiate);
+
+/*
+ * Function table used for message responses for non IDE/SCSI/Network type
+ * messages. (Such as KVP/Shutdown etc)
+ */
+struct hyperv_service_callback hv_cb_utils[MAX_MSG_TYPES] = {
+ /* 0E0B6031-5213-4934-818B-38D90CED39DB */
+ /* Shutdown */
+ {
+ .msg_type = HV_SHUTDOWN_MSG,
+ .data = {
+ 0x31, 0x60, 0x0B, 0X0E, 0x13, 0x52, 0x34, 0x49,
+ 0x81, 0x8B, 0x38, 0XD9, 0x0C, 0xED, 0x39, 0xDB
+ },
+ .callback = chn_cb_negotiate,
+ .log_msg = "Shutdown channel functionality initialized"
+ },
+
+ /* {9527E630-D0AE-497b-ADCE-E80AB0175CAF} */
+ /* TimeSync */
+ {
+ .msg_type = HV_TIMESYNC_MSG,
+ .data = {
+ 0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49,
+ 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf
+ },
+ .callback = chn_cb_negotiate,
+ .log_msg = "Timesync channel functionality initialized"
+ },
+ /* {57164f39-9115-4e78-ab55-382f3bd5422d} */
+ /* Heartbeat */
+ {
+ .msg_type = HV_HEARTBEAT_MSG,
+ .data = {
+ 0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e,
+ 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d
+ },
+ .callback = chn_cb_negotiate,
+ .log_msg = "Heartbeat channel functionality initialized"
+ },
+};
+EXPORT_SYMBOL(hv_cb_utils);
+
+/*
* AllocVmbusChannel - Allocate and initialize a vmbus channel object
*/
struct vmbus_channel *AllocVmbusChannel(void)
@@ -97,7 +259,7 @@ struct vmbus_channel *AllocVmbusChannel(void)
return channel;
}
-/**
+/*
* ReleaseVmbusChannel - Release the vmbus channel object itself
*/
static inline void ReleaseVmbusChannel(void *context)
@@ -115,7 +277,7 @@ static inline void ReleaseVmbusChannel(void *context)
DPRINT_EXIT(VMBUS);
}
-/**
+/*
* FreeVmbusChannel - Release the resources used by the vmbus channel object
*/
void FreeVmbusChannel(struct vmbus_channel *Channel)
@@ -131,8 +293,9 @@ void FreeVmbusChannel(struct vmbus_channel *Channel)
Channel);
}
-/**
- * VmbusChannelProcessOffer - Process the offer by creating a channel/device associated with this offer
+/*
+ * VmbusChannelProcessOffer - Process the offer by creating a channel/device
+ * associated with this offer
*/
static void VmbusChannelProcessOffer(void *context)
{
@@ -140,6 +303,7 @@ static void VmbusChannelProcessOffer(void *context)
struct vmbus_channel *channel;
bool fNew = true;
int ret;
+ int cnt;
unsigned long flags;
DPRINT_ENTER(VMBUS);
@@ -209,11 +373,28 @@ static void VmbusChannelProcessOffer(void *context)
* can cleanup properly
*/
newChannel->State = CHANNEL_OPEN_STATE;
+ cnt = 0;
+
+ while (cnt != MAX_MSG_TYPES) {
+ if (memcmp(&newChannel->OfferMsg.Offer.InterfaceType,
+ &hv_cb_utils[cnt].data,
+ sizeof(struct hv_guid)) == 0) {
+ DPRINT_INFO(VMBUS, "%s",
+ hv_cb_utils[cnt].log_msg);
+
+ if (VmbusChannelOpen(newChannel, 2 * PAGE_SIZE,
+ 2 * PAGE_SIZE, NULL, 0,
+ hv_cb_utils[cnt].callback,
+ newChannel) == 0)
+ hv_cb_utils[cnt].channel = newChannel;
+ }
+ cnt++;
+ }
}
DPRINT_EXIT(VMBUS);
}
-/**
+/*
* VmbusChannelProcessRescindOffer - Rescind the offer by initiating a device removal
*/
static void VmbusChannelProcessRescindOffer(void *context)
@@ -225,7 +406,7 @@ static void VmbusChannelProcessRescindOffer(void *context)
DPRINT_EXIT(VMBUS);
}
-/**
+/*
* VmbusChannelOnOffer - Handler for channel offers from vmbus in parent partition.
*
* We ignore all offers except network and storage offers. For each network and
@@ -308,7 +489,7 @@ static void VmbusChannelOnOffer(struct vmbus_channel_message_header *hdr)
DPRINT_EXIT(VMBUS);
}
-/**
+/*
* VmbusChannelOnOfferRescind - Rescind offer handler.
*
* We queue a work item to process this offer synchronously
@@ -335,7 +516,7 @@ static void VmbusChannelOnOfferRescind(struct vmbus_channel_message_header *hdr)
DPRINT_EXIT(VMBUS);
}
-/**
+/*
* VmbusChannelOnOffersDelivered - This is invoked when all offers have been delivered.
*
* Nothing to do here.
@@ -347,7 +528,7 @@ static void VmbusChannelOnOffersDelivered(
DPRINT_EXIT(VMBUS);
}
-/**
+/*
* VmbusChannelOnOpenResult - Open result handler.
*
* This is invoked when we received a response to our channel open request.
@@ -395,7 +576,7 @@ static void VmbusChannelOnOpenResult(struct vmbus_channel_message_header *hdr)
DPRINT_EXIT(VMBUS);
}
-/**
+/*
* VmbusChannelOnGpadlCreated - GPADL created handler.
*
* This is invoked when we received a response to our gpadl create request.
@@ -447,7 +628,7 @@ static void VmbusChannelOnGpadlCreated(struct vmbus_channel_message_header *hdr)
DPRINT_EXIT(VMBUS);
}
-/**
+/*
* VmbusChannelOnGpadlTorndown - GPADL torndown handler.
*
* This is invoked when we received a response to our gpadl teardown request.
@@ -495,7 +676,7 @@ static void VmbusChannelOnGpadlTorndown(
DPRINT_EXIT(VMBUS);
}
-/**
+/*
* VmbusChannelOnVersionResponse - Version response handler
*
* This is invoked when we received a response to our initiate contact request.
@@ -558,7 +739,7 @@ static struct vmbus_channel_message_table_entry
{ChannelMessageUnload, NULL},
};
-/**
+/*
* VmbusOnChannelMessage - Handler for channel protocol messages.
*
* This is invoked in the vmbus worker thread context.
@@ -597,7 +778,7 @@ void VmbusOnChannelMessage(void *Context)
DPRINT_EXIT(VMBUS);
}
-/**
+/*
* VmbusChannelRequestOffers - Send a request to get all our pending offers.
*/
int VmbusChannelRequestOffers(void)
@@ -611,9 +792,15 @@ int VmbusChannelRequestOffers(void)
msgInfo = kmalloc(sizeof(*msgInfo) +
sizeof(struct vmbus_channel_message_header),
GFP_KERNEL);
- ASSERT(msgInfo != NULL);
+ if (!msgInfo)
+ return -ENOMEM;
msgInfo->WaitEvent = osd_WaitEventCreate();
+ if (!msgInfo->WaitEvent) {
+ kfree(msgInfo);
+ return -ENOMEM;
+ }
+
msg = (struct vmbus_channel_message_header *)msgInfo->Msg;
msg->MessageType = ChannelMessageRequestOffers;
@@ -651,8 +838,9 @@ Cleanup:
return ret;
}
-/**
- * VmbusChannelReleaseUnattachedChannels - Release channels that are unattached/unconnected ie (no drivers associated)
+/*
+ * VmbusChannelReleaseUnattachedChannels - Release channels that are
+ * unattached/unconnected ie (no drivers associated)
*/
void VmbusChannelReleaseUnattachedChannels(void)
{
diff --git a/drivers/staging/hv/ChannelMgmt.h b/drivers/staging/hv/channel_mgmt.h
index fa973d86b62..5908b81d3e9 100644
--- a/drivers/staging/hv/ChannelMgmt.h
+++ b/drivers/staging/hv/channel_mgmt.h
@@ -27,9 +27,9 @@
#include <linux/list.h>
#include <linux/timer.h>
-#include "RingBuffer.h"
-#include "VmbusChannelInterface.h"
-#include "VmbusPacketFormat.h"
+#include "ring_buffer.h"
+#include "vmbus_channel_interface.h"
+#include "vmbus_packet_format.h"
/* Version 1 messages */
enum vmbus_channel_message_type {
diff --git a/drivers/staging/hv/Connection.c b/drivers/staging/hv/connection.c
index e0ea9cf90f0..e8824dadffc 100644
--- a/drivers/staging/hv/Connection.c
+++ b/drivers/staging/hv/connection.c
@@ -26,7 +26,7 @@
#include <linux/vmalloc.h>
#include "osd.h"
#include "logging.h"
-#include "VmbusPrivate.h"
+#include "vmbus_private.h"
struct VMBUS_CONNECTION gVmbusConnection = {
@@ -34,7 +34,7 @@ struct VMBUS_CONNECTION gVmbusConnection = {
.NextGpadlHandle = ATOMIC_INIT(0xE1E10),
};
-/**
+/*
* VmbusConnect - Sends a connect request on the partition service connection
*/
int VmbusConnect(void)
@@ -93,11 +93,16 @@ int VmbusConnect(void)
sizeof(struct vmbus_channel_initiate_contact),
GFP_KERNEL);
if (msgInfo == NULL) {
- ret = -1;
+ ret = -ENOMEM;
goto Cleanup;
}
msgInfo->WaitEvent = osd_WaitEventCreate();
+ if (!msgInfo->WaitEvent) {
+ ret = -ENOMEM;
+ goto Cleanup;
+ }
+
msg = (struct vmbus_channel_initiate_contact *)msgInfo->Msg;
msg->Header.MessageType = ChannelMessageInitiateContact;
@@ -180,7 +185,7 @@ Cleanup:
return ret;
}
-/**
+/*
* VmbusDisconnect - Sends a disconnect request on the partition service connection
*/
int VmbusDisconnect(void)
@@ -195,6 +200,8 @@ int VmbusDisconnect(void)
return -1;
msg = kzalloc(sizeof(struct vmbus_channel_message_header), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
msg->MessageType = ChannelMessageUnload;
@@ -218,7 +225,7 @@ Cleanup:
return ret;
}
-/**
+/*
* GetChannelFromRelId - Get the channel object given its child relative id (ie channel id)
*/
struct vmbus_channel *GetChannelFromRelId(u32 relId)
@@ -239,7 +246,7 @@ struct vmbus_channel *GetChannelFromRelId(u32 relId)
return foundChannel;
}
-/**
+/*
* VmbusProcessChannelEvent - Process a channel event notification
*/
static void VmbusProcessChannelEvent(void *context)
@@ -247,7 +254,7 @@ static void VmbusProcessChannelEvent(void *context)
struct vmbus_channel *channel;
u32 relId = (u32)(unsigned long)context;
- ASSERT(relId > 0);
+ /* ASSERT(relId > 0); */
/*
* Find the channel based on this relid and invokes the
@@ -259,15 +266,15 @@ static void VmbusProcessChannelEvent(void *context)
VmbusChannelOnChannelEvent(channel);
/*
* WorkQueueQueueWorkItem(channel->dataWorkQueue,
- * VmbusChannelOnChannelEvent,
- * (void*)channel);
+ * VmbusChannelOnChannelEvent,
+ * (void*)channel);
*/
} else {
DPRINT_ERR(VMBUS, "channel not found for relid - %d.", relId);
}
}
-/**
+/*
* VmbusOnEvents - Handler for events
*/
void VmbusOnEvents(void)
@@ -308,7 +315,7 @@ void VmbusOnEvents(void)
return;
}
-/**
+/*
* VmbusPostMessage - Send a msg on the vmbus's message connection
*/
int VmbusPostMessage(void *buffer, size_t bufferLen)
@@ -320,7 +327,7 @@ int VmbusPostMessage(void *buffer, size_t bufferLen)
return HvPostMessage(connId, 1, buffer, bufferLen);
}
-/**
+/*
* VmbusSetEvent - Send an event notification to the parent
*/
int VmbusSetEvent(u32 childRelId)
diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/hv.c
index 3a1112d29ae..6c77e64027f 100644
--- a/drivers/staging/hv/Hv.c
+++ b/drivers/staging/hv/hv.c
@@ -25,7 +25,7 @@
#include <linux/vmalloc.h>
#include "osd.h"
#include "logging.h"
-#include "VmbusPrivate.h"
+#include "vmbus_private.h"
/* The one and only */
struct hv_context gHvContext = {
@@ -35,7 +35,7 @@ struct hv_context gHvContext = {
.SignalEventBuffer = NULL,
};
-/**
+/*
* HvQueryHypervisorPresence - Query the cpuid for presense of windows hypervisor
*/
static int HvQueryHypervisorPresence(void)
@@ -56,7 +56,7 @@ static int HvQueryHypervisorPresence(void)
return ecx & HV_PRESENT_BIT;
}
-/**
+/*
* HvQueryHypervisorInfo - Get version info of the windows hypervisor
*/
static int HvQueryHypervisorInfo(void)
@@ -125,7 +125,7 @@ static int HvQueryHypervisorInfo(void)
return maxLeaf;
}
-/**
+/*
* HvDoHypercall - Invoke the specified hypercall
*/
static u64 HvDoHypercall(u64 Control, void *Input, void *Output)
@@ -180,7 +180,7 @@ static u64 HvDoHypercall(u64 Control, void *Input, void *Output)
#endif /* !x86_64 */
}
-/**
+/*
* HvInit - Main initialization routine.
*
* This routine must be called before any other routines in here are called
@@ -294,7 +294,7 @@ Cleanup:
return ret;
}
-/**
+/*
* HvCleanup - Cleanup routine.
*
* This routine is called normally during driver unloading or exiting.
@@ -305,11 +305,9 @@ void HvCleanup(void)
DPRINT_ENTER(VMBUS);
- if (gHvContext.SignalEventBuffer) {
- kfree(gHvContext.SignalEventBuffer);
- gHvContext.SignalEventBuffer = NULL;
- gHvContext.SignalEventParam = NULL;
- }
+ kfree(gHvContext.SignalEventBuffer);
+ gHvContext.SignalEventBuffer = NULL;
+ gHvContext.SignalEventParam = NULL;
if (gHvContext.HypercallPage) {
hypercallMsr.AsUINT64 = 0;
@@ -321,7 +319,7 @@ void HvCleanup(void)
DPRINT_EXIT(VMBUS);
}
-/**
+/*
* HvPostMessage - Post a message using the hypervisor message IPC.
*
* This involves a hypercall.
@@ -362,7 +360,7 @@ u16 HvPostMessage(union hv_connection_id connectionId,
}
-/**
+/*
* HvSignalEvent - Signal an event on the specified connection using the hypervisor event IPC.
*
* This involves a hypercall.
@@ -376,7 +374,7 @@ u16 HvSignalEvent(void)
return status;
}
-/**
+/*
* HvSynicInit - Initialize the Synthethic Interrupt Controller.
*
* If it is already initialized by another entity (ie x2v shim), we need to
@@ -482,7 +480,7 @@ Cleanup:
return;
}
-/**
+/*
* HvSynicCleanup - Cleanup routine for HvSynicInit().
*/
void HvSynicCleanup(void *arg)
diff --git a/drivers/staging/hv/Hv.h b/drivers/staging/hv/hv.h
index 41f5ebb86e1..41f5ebb86e1 100644
--- a/drivers/staging/hv/Hv.h
+++ b/drivers/staging/hv/hv.h
diff --git a/drivers/staging/hv/hv_utils.c b/drivers/staging/hv/hv_utils.c
new file mode 100644
index 00000000000..8a49aafea37
--- /dev/null
+++ b/drivers/staging/hv/hv_utils.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2010, Microsoft Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Authors:
+ * Haiyang Zhang <haiyangz@microsoft.com>
+ * Hank Janssen <hjanssen@microsoft.com>
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sysctl.h>
+#include <linux/reboot.h>
+
+#include "logging.h"
+#include "osd.h"
+#include "vmbus.h"
+#include "vmbus_packet_format.h"
+#include "vmbus_channel_interface.h"
+#include "version_info.h"
+#include "channel.h"
+#include "vmbus_private.h"
+#include "vmbus_api.h"
+#include "utils.h"
+
+
+static void shutdown_onchannelcallback(void *context)
+{
+ struct vmbus_channel *channel = context;
+ u8 *buf;
+ u32 buflen, recvlen;
+ u64 requestid;
+ u8 execute_shutdown = false;
+
+ struct shutdown_msg_data *shutdown_msg;
+
+ struct icmsg_hdr *icmsghdrp;
+ struct icmsg_negotiate *negop = NULL;
+
+ DPRINT_ENTER(VMBUS);
+
+ buflen = PAGE_SIZE;
+ buf = kmalloc(buflen, GFP_ATOMIC);
+
+ VmbusChannelRecvPacket(channel, buf, buflen, &recvlen, &requestid);
+
+ if (recvlen > 0) {
+ DPRINT_DBG(VMBUS, "shutdown packet: len=%d, requestid=%lld",
+ recvlen, requestid);
+
+ icmsghdrp = (struct icmsg_hdr *)&buf[
+ sizeof(struct vmbuspipe_hdr)];
+
+ if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
+ prep_negotiate_resp(icmsghdrp, negop, buf);
+ } else {
+ shutdown_msg = (struct shutdown_msg_data *)&buf[
+ sizeof(struct vmbuspipe_hdr) +
+ sizeof(struct icmsg_hdr)];
+
+ switch (shutdown_msg->flags) {
+ case 0:
+ case 1:
+ icmsghdrp->status = HV_S_OK;
+ execute_shutdown = true;
+
+ DPRINT_INFO(VMBUS, "Shutdown request received -"
+ " gracefull shutdown initiated");
+ break;
+ default:
+ icmsghdrp->status = HV_E_FAIL;
+ execute_shutdown = false;
+
+ DPRINT_INFO(VMBUS, "Shutdown request received -"
+ " Invalid request");
+ break;
+ };
+ }
+
+ icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
+ | ICMSGHDRFLAG_RESPONSE;
+
+ VmbusChannelSendPacket(channel, buf,
+ recvlen, requestid,
+ VmbusPacketTypeDataInBand, 0);
+ }
+
+ kfree(buf);
+
+ DPRINT_EXIT(VMBUS);
+
+ if (execute_shutdown == true)
+ orderly_poweroff(false);
+}
+
+/*
+ * Set guest time to host UTC time.
+ */
+static inline void do_adj_guesttime(u64 hosttime)
+{
+ s64 host_tns;
+ struct timespec host_ts;
+
+ host_tns = (hosttime - WLTIMEDELTA) * 100;
+ host_ts = ns_to_timespec(host_tns);
+
+ do_settimeofday(&host_ts);
+}
+
+/*
+ * Synchronize time with host after reboot, restore, etc.
+ *
+ * ICTIMESYNCFLAG_SYNC flag bit indicates reboot, restore events of the VM.
+ * After reboot the flag ICTIMESYNCFLAG_SYNC is included in the first time
+ * message after the timesync channel is opened. Since the hv_utils module is
+ * loaded after hv_vmbus, the first message is usually missed. The other
+ * thing is, systime is automatically set to emulated hardware clock which may
+ * not be UTC time or in the same time zone. So, to override these effects, we
+ * use the first 50 time samples for initial system time setting.
+ */
+static inline void adj_guesttime(u64 hosttime, u8 flags)
+{
+ static s32 scnt = 50;
+
+ if ((flags & ICTIMESYNCFLAG_SYNC) != 0) {
+ do_adj_guesttime(hosttime);
+ return;
+ }
+
+ if ((flags & ICTIMESYNCFLAG_SAMPLE) != 0 && scnt > 0) {
+ scnt--;
+ do_adj_guesttime(hosttime);
+ }
+}
+
+/*
+ * Time Sync Channel message handler.
+ */
+static void timesync_onchannelcallback(void *context)
+{
+ struct vmbus_channel *channel = context;
+ u8 *buf;
+ u32 buflen, recvlen;
+ u64 requestid;
+ struct icmsg_hdr *icmsghdrp;
+ struct ictimesync_data *timedatap;
+
+ DPRINT_ENTER(VMBUS);
+
+ buflen = PAGE_SIZE;
+ buf = kmalloc(buflen, GFP_ATOMIC);
+
+ VmbusChannelRecvPacket(channel, buf, buflen, &recvlen, &requestid);
+
+ if (recvlen > 0) {
+ DPRINT_DBG(VMBUS, "timesync packet: recvlen=%d, requestid=%lld",
+ recvlen, requestid);
+
+ icmsghdrp = (struct icmsg_hdr *)&buf[
+ sizeof(struct vmbuspipe_hdr)];
+
+ if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
+ prep_negotiate_resp(icmsghdrp, NULL, buf);
+ } else {
+ timedatap = (struct ictimesync_data *)&buf[
+ sizeof(struct vmbuspipe_hdr) +
+ sizeof(struct icmsg_hdr)];
+ adj_guesttime(timedatap->parenttime, timedatap->flags);
+ }
+
+ icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
+ | ICMSGHDRFLAG_RESPONSE;
+
+ VmbusChannelSendPacket(channel, buf,
+ recvlen, requestid,
+ VmbusPacketTypeDataInBand, 0);
+ }
+
+ kfree(buf);
+
+ DPRINT_EXIT(VMBUS);
+}
+
+/*
+ * Heartbeat functionality.
+ * Every two seconds, Hyper-V send us a heartbeat request message.
+ * we respond to this message, and Hyper-V knows we are alive.
+ */
+static void heartbeat_onchannelcallback(void *context)
+{
+ struct vmbus_channel *channel = context;
+ u8 *buf;
+ u32 buflen, recvlen;
+ u64 requestid;
+ struct icmsg_hdr *icmsghdrp;
+ struct heartbeat_msg_data *heartbeat_msg;
+
+ DPRINT_ENTER(VMBUS);
+
+ buflen = PAGE_SIZE;
+ buf = kmalloc(buflen, GFP_ATOMIC);
+
+ VmbusChannelRecvPacket(channel, buf, buflen, &recvlen, &requestid);
+
+ if (recvlen > 0) {
+ DPRINT_DBG(VMBUS, "heartbeat packet: len=%d, requestid=%lld",
+ recvlen, requestid);
+
+ icmsghdrp = (struct icmsg_hdr *)&buf[
+ sizeof(struct vmbuspipe_hdr)];
+
+ icmsghdrp = (struct icmsg_hdr *)&buf[
+ sizeof(struct vmbuspipe_hdr)];
+
+ if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
+ prep_negotiate_resp(icmsghdrp, NULL, buf);
+ } else {
+ heartbeat_msg = (struct heartbeat_msg_data *)&buf[
+ sizeof(struct vmbuspipe_hdr) +
+ sizeof(struct icmsg_hdr)];
+
+ DPRINT_DBG(VMBUS, "heartbeat seq = %lld",
+ heartbeat_msg->seq_num);
+
+ heartbeat_msg->seq_num += 1;
+ }
+
+ icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
+ | ICMSGHDRFLAG_RESPONSE;
+
+ VmbusChannelSendPacket(channel, buf,
+ recvlen, requestid,
+ VmbusPacketTypeDataInBand, 0);
+ }
+
+ kfree(buf);
+
+ DPRINT_EXIT(VMBUS);
+}
+
+static int __init init_hyperv_utils(void)
+{
+ printk(KERN_INFO "Registering HyperV Utility Driver\n");
+
+ hv_cb_utils[HV_SHUTDOWN_MSG].channel->OnChannelCallback =
+ &shutdown_onchannelcallback;
+ hv_cb_utils[HV_SHUTDOWN_MSG].callback = &shutdown_onchannelcallback;
+
+ hv_cb_utils[HV_TIMESYNC_MSG].channel->OnChannelCallback =
+ &timesync_onchannelcallback;
+ hv_cb_utils[HV_TIMESYNC_MSG].callback = &timesync_onchannelcallback;
+
+ hv_cb_utils[HV_HEARTBEAT_MSG].channel->OnChannelCallback =
+ &heartbeat_onchannelcallback;
+ hv_cb_utils[HV_HEARTBEAT_MSG].callback = &heartbeat_onchannelcallback;
+
+ return 0;
+}
+
+static void exit_hyperv_utils(void)
+{
+ printk(KERN_INFO "De-Registered HyperV Utility Driver\n");
+
+ hv_cb_utils[HV_SHUTDOWN_MSG].channel->OnChannelCallback =
+ &chn_cb_negotiate;
+ hv_cb_utils[HV_SHUTDOWN_MSG].callback = &chn_cb_negotiate;
+
+ hv_cb_utils[HV_TIMESYNC_MSG].channel->OnChannelCallback =
+ &chn_cb_negotiate;
+ hv_cb_utils[HV_TIMESYNC_MSG].callback = &chn_cb_negotiate;
+
+ hv_cb_utils[HV_HEARTBEAT_MSG].channel->OnChannelCallback =
+ &chn_cb_negotiate;
+ hv_cb_utils[HV_HEARTBEAT_MSG].callback = &chn_cb_negotiate;
+}
+
+module_init(init_hyperv_utils);
+module_exit(exit_hyperv_utils);
+
+MODULE_DESCRIPTION("Hyper-V Utilities");
+MODULE_VERSION(HV_DRV_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/hv/logging.h b/drivers/staging/hv/logging.h
index 9e55617bd67..ad4cfcfb7b1 100644
--- a/drivers/staging/hv/logging.h
+++ b/drivers/staging/hv/logging.h
@@ -61,13 +61,6 @@
extern unsigned int vmbus_loglevel;
-#define ASSERT(expr) \
- if (!(expr)) { \
- printk(KERN_CRIT "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr, __FILE__, __func__, __LINE__); \
- __asm__ __volatile__("int3"); \
- }
-
#define DPRINT(mod, lvl, fmt, args...) do {\
if ((mod & (HIWORD(vmbus_loglevel))) && \
(lvl <= LOWORD(vmbus_loglevel))) \
diff --git a/drivers/staging/hv/NetVsc.c b/drivers/staging/hv/netvsc.c
index e4bf8229750..ba15059c45b 100644
--- a/drivers/staging/hv/NetVsc.c
+++ b/drivers/staging/hv/netvsc.c
@@ -25,8 +25,8 @@
#include <linux/slab.h>
#include "osd.h"
#include "logging.h"
-#include "NetVsc.h"
-#include "RndisFilter.h"
+#include "netvsc.h"
+#include "rndis_filter.h"
/* Globals */
@@ -92,7 +92,7 @@ static struct netvsc_device *AllocNetDevice(struct hv_device *Device)
static void FreeNetDevice(struct netvsc_device *Device)
{
- ASSERT(atomic_read(&Device->RefCount) == 0);
+ WARN_ON(atomic_read(&Device->RefCount) == 0);
Device->Device->Extension = NULL;
kfree(Device);
}
@@ -131,7 +131,7 @@ static void PutNetDevice(struct hv_device *Device)
struct netvsc_device *netDevice;
netDevice = Device->Extension;
- ASSERT(netDevice);
+ /* ASSERT(netDevice); */
atomic_dec(&netDevice->RefCount);
}
@@ -167,7 +167,7 @@ static struct netvsc_device *ReleaseInboundNetDevice(struct hv_device *Device)
return netDevice;
}
-/**
+/*
* NetVscInitialize - Main entry point
*/
int NetVscInitialize(struct hv_driver *drv)
@@ -184,14 +184,15 @@ int NetVscInitialize(struct hv_driver *drv)
sizeof(struct vmtransfer_page_packet_header));
/* Make sure we are at least 2 pages since 1 page is used for control */
- ASSERT(driver->RingBufferSize >= (PAGE_SIZE << 1));
+ /* ASSERT(driver->RingBufferSize >= (PAGE_SIZE << 1)); */
drv->name = gDriverName;
memcpy(&drv->deviceType, &gNetVscDeviceType, sizeof(struct hv_guid));
/* Make sure it is set by the caller */
- ASSERT(driver->OnReceiveCallback);
- ASSERT(driver->OnLinkStatusChanged);
+ /* FIXME: These probably should still be tested in some way */
+ /* ASSERT(driver->OnReceiveCallback); */
+ /* ASSERT(driver->OnLinkStatusChanged); */
/* Setup the dispatch table */
driver->Base.OnDeviceAdd = NetVscOnDeviceAdd;
@@ -222,9 +223,9 @@ static int NetVscInitializeReceiveBufferWithNetVsp(struct hv_device *Device)
DPRINT_EXIT(NETVSC);
return -1;
}
- ASSERT(netDevice->ReceiveBufferSize > 0);
+ /* ASSERT(netDevice->ReceiveBufferSize > 0); */
/* page-size grandularity */
- ASSERT((netDevice->ReceiveBufferSize & (PAGE_SIZE - 1)) == 0);
+ /* ASSERT((netDevice->ReceiveBufferSize & (PAGE_SIZE - 1)) == 0); */
netDevice->ReceiveBuffer =
osd_PageAlloc(netDevice->ReceiveBufferSize >> PAGE_SHIFT);
@@ -236,8 +237,8 @@ static int NetVscInitializeReceiveBufferWithNetVsp(struct hv_device *Device)
goto Cleanup;
}
/* page-aligned buffer */
- ASSERT(((unsigned long)netDevice->ReceiveBuffer & (PAGE_SIZE - 1)) ==
- 0);
+ /* ASSERT(((unsigned long)netDevice->ReceiveBuffer & (PAGE_SIZE - 1)) == */
+ /* 0); */
DPRINT_INFO(NETVSC, "Establishing receive buffer's GPADL...");
@@ -294,8 +295,8 @@ static int NetVscInitializeReceiveBufferWithNetVsp(struct hv_device *Device)
}
/* Parse the response */
- ASSERT(netDevice->ReceiveSectionCount == 0);
- ASSERT(netDevice->ReceiveSections == NULL);
+ /* ASSERT(netDevice->ReceiveSectionCount == 0); */
+ /* ASSERT(netDevice->ReceiveSections == NULL); */
netDevice->ReceiveSectionCount = initPacket->Messages.Version1Messages.SendReceiveBufferComplete.NumSections;
@@ -353,9 +354,13 @@ static int NetVscInitializeSendBufferWithNetVsp(struct hv_device *Device)
DPRINT_EXIT(NETVSC);
return -1;
}
- ASSERT(netDevice->SendBufferSize > 0);
+ if (netDevice->SendBufferSize <= 0) {
+ ret = -EINVAL;
+ goto Cleanup;
+ }
+
/* page-size grandularity */
- ASSERT((netDevice->SendBufferSize & (PAGE_SIZE - 1)) == 0);
+ /* ASSERT((netDevice->SendBufferSize & (PAGE_SIZE - 1)) == 0); */
netDevice->SendBuffer =
osd_PageAlloc(netDevice->SendBufferSize >> PAGE_SHIFT);
@@ -366,7 +371,7 @@ static int NetVscInitializeSendBufferWithNetVsp(struct hv_device *Device)
goto Cleanup;
}
/* page-aligned buffer */
- ASSERT(((unsigned long)netDevice->SendBuffer & (PAGE_SIZE - 1)) == 0);
+ /* ASSERT(((unsigned long)netDevice->SendBuffer & (PAGE_SIZE - 1)) == 0); */
DPRINT_INFO(NETVSC, "Establishing send buffer's GPADL...");
@@ -705,7 +710,7 @@ static void NetVscDisconnectFromVsp(struct netvsc_device *NetDevice)
DPRINT_EXIT(NETVSC);
}
-/**
+/*
* NetVscOnDeviceAdd - Callback when the device belonging to this driver is added
*/
static int NetVscOnDeviceAdd(struct hv_device *Device, void *AdditionalInfo)
@@ -749,6 +754,10 @@ static int NetVscOnDeviceAdd(struct hv_device *Device, void *AdditionalInfo)
&netDevice->ReceivePacketList);
}
netDevice->ChannelInitEvent = osd_WaitEventCreate();
+ if (!netDevice->ChannelInitEvent) {
+ ret = -ENOMEM;
+ goto Cleanup;
+ }
/* Open the channel */
ret = Device->Driver->VmbusChannelInterface.Open(Device,
@@ -807,7 +816,7 @@ Cleanup:
return ret;
}
-/**
+/*
* NetVscOnDeviceRemove - Callback when the root bus device is removed
*/
static int NetVscOnDeviceRemove(struct hv_device *Device)
@@ -864,7 +873,7 @@ static int NetVscOnDeviceRemove(struct hv_device *Device)
return 0;
}
-/**
+/*
* NetVscOnCleanup - Perform any cleanup when the driver is removed
*/
static void NetVscOnCleanup(struct hv_driver *drv)
@@ -908,7 +917,7 @@ static void NetVscOnSendCompletion(struct hv_device *Device,
NvspMessage1TypeSendRNDISPacketComplete) {
/* Get the send context */
nvscPacket = (struct hv_netvsc_packet *)(unsigned long)Packet->TransactionId;
- ASSERT(nvscPacket);
+ /* ASSERT(nvscPacket); */
/* Notify the layer above us */
nvscPacket->Completion.Send.OnSendCompletion(nvscPacket->Completion.Send.SendCompletionContext);
@@ -1087,13 +1096,13 @@ static void NetVscOnReceive(struct hv_device *Device,
}
/* Remove the 1st packet to represent the xfer page packet itself */
- xferpagePacket = (struct xferpage_packet*)listHead.next;
+ xferpagePacket = (struct xferpage_packet *)listHead.next;
list_del(&xferpagePacket->ListEntry);
/* This is how much we can satisfy */
xferpagePacket->Count = count - 1;
- ASSERT(xferpagePacket->Count > 0 && xferpagePacket->Count <=
- vmxferpagePacket->RangeCount);
+ /* ASSERT(xferpagePacket->Count > 0 && xferpagePacket->Count <= */
+ /* vmxferpagePacket->RangeCount); */
if (xferpagePacket->Count != vmxferpagePacket->RangeCount) {
DPRINT_INFO(NETVSC, "Needed %d netvsc pkts to satisy this xfer "
@@ -1103,7 +1112,7 @@ static void NetVscOnReceive(struct hv_device *Device,
/* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
for (i = 0; i < (count - 1); i++) {
- netvscPacket = (struct hv_netvsc_packet*)listHead.next;
+ netvscPacket = (struct hv_netvsc_packet *)listHead.next;
list_del(&netvscPacket->ListEntry);
/* Initialize the netvsc packet */
@@ -1121,9 +1130,9 @@ static void NetVscOnReceive(struct hv_device *Device,
vmxferpagePacket->Ranges[i].ByteCount;
netvscPacket->PageBufferCount = 1;
- ASSERT(vmxferpagePacket->Ranges[i].ByteOffset +
- vmxferpagePacket->Ranges[i].ByteCount <
- netDevice->ReceiveBufferSize);
+ /* ASSERT(vmxferpagePacket->Ranges[i].ByteOffset + */
+ /* vmxferpagePacket->Ranges[i].ByteCount < */
+ /* netDevice->ReceiveBufferSize); */
netvscPacket->PageBuffers[0].Length =
vmxferpagePacket->Ranges[i].ByteCount;
@@ -1161,7 +1170,7 @@ static void NetVscOnReceive(struct hv_device *Device,
if (bytesRemain == 0)
break;
}
- ASSERT(bytesRemain == 0);
+ /* ASSERT(bytesRemain == 0); */
}
DPRINT_DBG(NETVSC, "[%d] - (abs offset %u len %u) => "
"(pfn %llx, offset %u, len %u)", i,
@@ -1177,7 +1186,7 @@ static void NetVscOnReceive(struct hv_device *Device,
NetVscOnReceiveCompletion(netvscPacket->Completion.Recv.ReceiveCompletionContext);
}
- ASSERT(list_empty(&listHead));
+ /* ASSERT(list_empty(&listHead)); */
PutNetDevice(Device);
DPRINT_EXIT(NETVSC);
@@ -1241,7 +1250,7 @@ static void NetVscOnReceiveCompletion(void *Context)
DPRINT_ENTER(NETVSC);
- ASSERT(packet->XferPagePacket);
+ /* ASSERT(packet->XferPagePacket); */
/*
* Even though it seems logical to do a GetOutboundNetDevice() here to
@@ -1259,7 +1268,7 @@ static void NetVscOnReceiveCompletion(void *Context)
/* Overloading use of the lock. */
spin_lock_irqsave(&netDevice->receive_packet_list_lock, flags);
- ASSERT(packet->XferPagePacket->Count > 0);
+ /* ASSERT(packet->XferPagePacket->Count > 0); */
packet->XferPagePacket->Count--;
/*
@@ -1286,30 +1295,35 @@ static void NetVscOnReceiveCompletion(void *Context)
DPRINT_EXIT(NETVSC);
}
-void NetVscOnChannelCallback(void *Context)
+static void NetVscOnChannelCallback(void *Context)
{
- const int netPacketSize = 2048;
int ret;
struct hv_device *device = Context;
struct netvsc_device *netDevice;
u32 bytesRecvd;
u64 requestId;
- unsigned char packet[netPacketSize];
+ unsigned char *packet;
struct vmpacket_descriptor *desc;
- unsigned char *buffer = packet;
- int bufferlen = netPacketSize;
+ unsigned char *buffer;
+ int bufferlen = NETVSC_PACKET_SIZE;
DPRINT_ENTER(NETVSC);
- ASSERT(device);
+ /* ASSERT(device); */
+
+ packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
+ GFP_KERNEL);
+ if (!packet)
+ return;
+ buffer = packet;
netDevice = GetInboundNetDevice(device);
if (!netDevice) {
DPRINT_ERR(NETVSC, "net device (%p) shutting down..."
"ignoring inbound packets", netDevice);
DPRINT_EXIT(NETVSC);
- return;
+ goto out;
}
do {
@@ -1341,17 +1355,17 @@ void NetVscOnChannelCallback(void *Context)
}
/* reset */
- if (bufferlen > netPacketSize) {
+ if (bufferlen > NETVSC_PACKET_SIZE) {
kfree(buffer);
buffer = packet;
- bufferlen = netPacketSize;
+ bufferlen = NETVSC_PACKET_SIZE;
}
} else {
/* reset */
- if (bufferlen > netPacketSize) {
+ if (bufferlen > NETVSC_PACKET_SIZE) {
kfree(buffer);
buffer = packet;
- bufferlen = netPacketSize;
+ bufferlen = NETVSC_PACKET_SIZE;
}
break;
@@ -1368,12 +1382,12 @@ void NetVscOnChannelCallback(void *Context)
}
bufferlen = bytesRecvd;
- } else {
- ASSERT(0);
}
} while (1);
PutNetDevice(device);
DPRINT_EXIT(NETVSC);
+out:
+ kfree(buffer);
return;
}
diff --git a/drivers/staging/hv/NetVsc.h b/drivers/staging/hv/netvsc.h
index 6e0e0349412..c71dce5b3f7 100644
--- a/drivers/staging/hv/NetVsc.h
+++ b/drivers/staging/hv/netvsc.h
@@ -26,9 +26,9 @@
#define _NETVSC_H_
#include <linux/list.h>
-#include "VmbusPacketFormat.h"
-#include "VmbusChannelInterface.h"
-#include "NetVscApi.h"
+#include "vmbus_packet_format.h"
+#include "vmbus_channel_interface.h"
+#include "netvsc_api.h"
#define NVSP_INVALID_PROTOCOL_VERSION ((u32)0xFFFFFFFF)
@@ -289,6 +289,7 @@ struct nvsp_message {
/* Preallocated receive packets */
#define NETVSC_RECEIVE_PACKETLIST_COUNT 256
+#define NETVSC_PACKET_SIZE 2048
/* Per netvsc channel-specific */
struct netvsc_device {
diff --git a/drivers/staging/hv/NetVscApi.h b/drivers/staging/hv/netvsc_api.h
index 95d7a32b12f..4b5b3ac458c 100644
--- a/drivers/staging/hv/NetVscApi.h
+++ b/drivers/staging/hv/netvsc_api.h
@@ -25,11 +25,7 @@
#ifndef _NETVSC_API_H_
#define _NETVSC_API_H_
-#include "VmbusApi.h"
-
-/* Defines */
-#define NETVSC_DEVICE_RING_BUFFER_SIZE (64*PAGE_SIZE)
-#define HW_MACADDR_LEN 6
+#include "vmbus_api.h"
/* Fwd declaration */
struct hv_netvsc_packet;
@@ -93,9 +89,6 @@ struct netvsc_driver {
u32 RingBufferSize;
u32 RequestExtSize;
- /* Additional num of page buffers to allocate */
- u32 AdditionalRequestPageBufferCount;
-
/*
* This is set by the caller to allow us to callback when we
* receive a packet from the "wire"
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
index ab27d9a4446..55b993298ff 100644
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/staging/hv/netvsc_drv.c
@@ -30,20 +30,22 @@
#include <linux/skbuff.h>
#include <linux/in.h>
#include <linux/slab.h>
+#include <linux/dmi.h>
+#include <linux/pci.h>
#include <net/arp.h>
#include <net/route.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
#include "osd.h"
#include "logging.h"
-#include "VersionInfo.h"
+#include "version_info.h"
#include "vmbus.h"
-#include "NetVscApi.h"
+#include "netvsc_api.h"
struct net_device_context {
/* point back to our device context */
struct vm_device *device_ctx;
- struct net_device_stats stats;
+ unsigned long avail;
};
struct netvsc_driver_context {
@@ -53,18 +55,17 @@ struct netvsc_driver_context {
struct netvsc_driver drv_obj;
};
-static int netvsc_ringbuffer_size = NETVSC_DEVICE_RING_BUFFER_SIZE;
+#define PACKET_PAGES_LOWATER 8
+/* Need this many pages to handle worst case fragmented packet */
+#define PACKET_PAGES_HIWATER (MAX_SKB_FRAGS + 2)
+
+static int ring_size = roundup_pow_of_two(2*MAX_SKB_FRAGS+1);
+module_param(ring_size, int, S_IRUGO);
+MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
/* The one and only one */
static struct netvsc_driver_context g_netvsc_drv;
-static struct net_device_stats *netvsc_get_stats(struct net_device *net)
-{
- struct net_device_context *net_device_ctx = netdev_priv(net);
-
- return &net_device_ctx->stats;
-}
-
static void netvsc_set_multicast_list(struct net_device *net)
{
}
@@ -78,9 +79,6 @@ static int netvsc_open(struct net_device *net)
DPRINT_ENTER(NETVSC_DRV);
if (netif_carrier_ok(net)) {
- memset(&net_device_ctx->stats, 0,
- sizeof(struct net_device_stats));
-
/* Open up the device */
ret = RndisFilterOnOpen(device_obj);
if (ret != 0) {
@@ -122,22 +120,20 @@ static void netvsc_xmit_completion(void *context)
struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
struct sk_buff *skb = (struct sk_buff *)
(unsigned long)packet->Completion.Send.SendCompletionTid;
- struct net_device *net;
DPRINT_ENTER(NETVSC_DRV);
kfree(packet);
if (skb) {
- net = skb->dev;
- dev_kfree_skb_any(skb);
+ struct net_device *net = skb->dev;
+ struct net_device_context *net_device_ctx = netdev_priv(net);
+ unsigned int num_pages = skb_shinfo(skb)->nr_frags + 2;
- if (netif_queue_stopped(net)) {
- DPRINT_INFO(NETVSC_DRV, "net device (%p) waking up...",
- net);
+ dev_kfree_skb_any(skb);
- netif_wake_queue(net);
- }
+ if ((net_device_ctx->avail += num_pages) >= PACKET_PAGES_HIWATER)
+ netif_wake_queue(net);
}
DPRINT_EXIT(NETVSC_DRV);
@@ -152,65 +148,58 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
(struct netvsc_driver_context *)driver_ctx;
struct netvsc_driver *net_drv_obj = &net_drv_ctx->drv_obj;
struct hv_netvsc_packet *packet;
- int i;
int ret;
- int num_frags;
- int retries = 0;
+ unsigned int i, num_pages;
DPRINT_ENTER(NETVSC_DRV);
- /* Support only 1 chain of frags */
- ASSERT(skb_shinfo(skb)->frag_list == NULL);
- ASSERT(skb->dev == net);
-
DPRINT_DBG(NETVSC_DRV, "xmit packet - len %d data_len %d",
skb->len, skb->data_len);
- /* Add 1 for skb->data and any additional ones requested */
- num_frags = skb_shinfo(skb)->nr_frags + 1 +
- net_drv_obj->AdditionalRequestPageBufferCount;
+ /* Add 1 for skb->data and additional one for RNDIS */
+ num_pages = skb_shinfo(skb)->nr_frags + 1 + 1;
+ if (num_pages > net_device_ctx->avail)
+ return NETDEV_TX_BUSY;
/* Allocate a netvsc packet based on # of frags. */
packet = kzalloc(sizeof(struct hv_netvsc_packet) +
- (num_frags * sizeof(struct hv_page_buffer)) +
+ (num_pages * sizeof(struct hv_page_buffer)) +
net_drv_obj->RequestExtSize, GFP_ATOMIC);
if (!packet) {
+ /* out of memory, silently drop packet */
DPRINT_ERR(NETVSC_DRV, "unable to allocate hv_netvsc_packet");
- return -1;
+
+ dev_kfree_skb(skb);
+ net->stats.tx_dropped++;
+ return NETDEV_TX_OK;
}
packet->Extension = (void *)(unsigned long)packet +
sizeof(struct hv_netvsc_packet) +
- (num_frags * sizeof(struct hv_page_buffer));
+ (num_pages * sizeof(struct hv_page_buffer));
/* Setup the rndis header */
- packet->PageBufferCount = num_frags;
+ packet->PageBufferCount = num_pages;
/* TODO: Flush all write buffers/ memory fence ??? */
/* wmb(); */
/* Initialize it from the skb */
- ASSERT(skb->data);
packet->TotalDataBufferLength = skb->len;
- /*
- * Start filling in the page buffers starting at
- * AdditionalRequestPageBufferCount offset
- */
- packet->PageBuffers[net_drv_obj->AdditionalRequestPageBufferCount].Pfn = virt_to_phys(skb->data) >> PAGE_SHIFT;
- packet->PageBuffers[net_drv_obj->AdditionalRequestPageBufferCount].Offset = (unsigned long)skb->data & (PAGE_SIZE - 1);
- packet->PageBuffers[net_drv_obj->AdditionalRequestPageBufferCount].Length = skb->len - skb->data_len;
-
- ASSERT((skb->len - skb->data_len) <= PAGE_SIZE);
-
- for (i = net_drv_obj->AdditionalRequestPageBufferCount + 1;
- i < num_frags; i++) {
- packet->PageBuffers[i].Pfn =
- page_to_pfn(skb_shinfo(skb)->frags[i-(net_drv_obj->AdditionalRequestPageBufferCount+1)].page);
- packet->PageBuffers[i].Offset =
- skb_shinfo(skb)->frags[i-(net_drv_obj->AdditionalRequestPageBufferCount+1)].page_offset;
- packet->PageBuffers[i].Length =
- skb_shinfo(skb)->frags[i-(net_drv_obj->AdditionalRequestPageBufferCount+1)].size;
+ /* Start filling in the page buffers starting after RNDIS buffer. */
+ packet->PageBuffers[1].Pfn = virt_to_phys(skb->data) >> PAGE_SHIFT;
+ packet->PageBuffers[1].Offset
+ = (unsigned long)skb->data & (PAGE_SIZE - 1);
+ packet->PageBuffers[1].Length = skb_headlen(skb);
+
+ /* Additional fragments are after SKB data */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *f = &skb_shinfo(skb)->frags[i];
+
+ packet->PageBuffers[i+2].Pfn = page_to_pfn(f->page);
+ packet->PageBuffers[i+2].Offset = f->page_offset;
+ packet->PageBuffers[i+2].Length = f->size;
}
/* Set the completion routine */
@@ -218,55 +207,29 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
packet->Completion.Send.SendCompletionContext = packet;
packet->Completion.Send.SendCompletionTid = (unsigned long)skb;
-retry_send:
ret = net_drv_obj->OnSend(&net_device_ctx->device_ctx->device_obj,
packet);
-
if (ret == 0) {
- ret = NETDEV_TX_OK;
- net_device_ctx->stats.tx_bytes += skb->len;
- net_device_ctx->stats.tx_packets++;
- } else {
- retries++;
- if (retries < 4) {
- DPRINT_ERR(NETVSC_DRV, "unable to send..."
- "retrying %d...", retries);
- udelay(100);
- goto retry_send;
- }
-
- /* no more room or we are shutting down */
- DPRINT_ERR(NETVSC_DRV, "unable to send (%d)..."
- "marking net device (%p) busy", ret, net);
- DPRINT_INFO(NETVSC_DRV, "net device (%p) stopping", net);
+ net->stats.tx_bytes += skb->len;
+ net->stats.tx_packets++;
- ret = NETDEV_TX_BUSY;
- net_device_ctx->stats.tx_dropped++;
+ DPRINT_DBG(NETVSC_DRV, "# of xmits %lu total size %lu",
+ net->stats.tx_packets,
+ net->stats.tx_bytes);
- netif_stop_queue(net);
-
- /*
- * Null it since the caller will free it instead of the
- * completion routine
- */
- packet->Completion.Send.SendCompletionTid = 0;
-
- /*
- * Release the resources since we will not get any send
- * completion
- */
- netvsc_xmit_completion((void *)packet);
+ if ((net_device_ctx->avail -= num_pages) < PACKET_PAGES_LOWATER)
+ netif_stop_queue(net);
+ } else {
+ /* we are shutting down or bus overloaded, just drop packet */
+ net->stats.tx_dropped++;
+ netvsc_xmit_completion(packet);
}
- DPRINT_DBG(NETVSC_DRV, "# of xmits %lu total size %lu",
- net_device_ctx->stats.tx_packets,
- net_device_ctx->stats.tx_bytes);
-
DPRINT_EXIT(NETVSC_DRV);
- return ret;
+ return NETDEV_TX_OK;
}
-/**
+/*
* netvsc_linkstatus_callback - Link up/down notification
*/
static void netvsc_linkstatus_callback(struct hv_device *device_obj,
@@ -293,18 +256,17 @@ static void netvsc_linkstatus_callback(struct hv_device *device_obj,
DPRINT_EXIT(NETVSC_DRV);
}
-/**
- * netvsc_recv_callback - Callback when we receive a packet from the "wire" on the specified device.
+/*
+ * netvsc_recv_callback - Callback when we receive a packet from the
+ * "wire" on the specified device.
*/
static int netvsc_recv_callback(struct hv_device *device_obj,
struct hv_netvsc_packet *packet)
{
struct vm_device *device_ctx = to_vm_device(device_obj);
struct net_device *net = dev_get_drvdata(&device_ctx->device);
- struct net_device_context *net_device_ctx;
struct sk_buff *skb;
void *data;
- int ret;
int i;
unsigned long flags;
@@ -316,14 +278,12 @@ static int netvsc_recv_callback(struct hv_device *device_obj,
return 0;
}
- net_device_ctx = netdev_priv(net);
-
- /* Allocate a skb - TODO preallocate this */
- /* Pad 2-bytes to align IP header to 16 bytes */
- skb = dev_alloc_skb(packet->TotalDataBufferLength + 2);
- ASSERT(skb);
- skb_reserve(skb, 2);
- skb->dev = net;
+ /* Allocate a skb - TODO direct I/O to pages? */
+ skb = netdev_alloc_skb_ip_align(net, packet->TotalDataBufferLength);
+ if (unlikely(!skb)) {
+ ++net->stats.rx_dropped;
+ return 0;
+ }
/* for kmap_atomic */
local_irq_save(flags);
@@ -348,39 +308,45 @@ static int netvsc_recv_callback(struct hv_device *device_obj,
local_irq_restore(flags);
skb->protocol = eth_type_trans(skb, net);
-
skb->ip_summed = CHECKSUM_NONE;
+ net->stats.rx_packets++;
+ net->stats.rx_bytes += skb->len;
+
/*
* Pass the skb back up. Network stack will deallocate the skb when it
- * is done
+ * is done.
+ * TODO - use NAPI?
*/
- ret = netif_rx(skb);
-
- switch (ret) {
- case NET_RX_DROP:
- net_device_ctx->stats.rx_dropped++;
- break;
- default:
- net_device_ctx->stats.rx_packets++;
- net_device_ctx->stats.rx_bytes += skb->len;
- break;
+ netif_rx(skb);
- }
DPRINT_DBG(NETVSC_DRV, "# of recvs %lu total size %lu",
- net_device_ctx->stats.rx_packets,
- net_device_ctx->stats.rx_bytes);
+ net->stats.rx_packets, net->stats.rx_bytes);
DPRINT_EXIT(NETVSC_DRV);
return 0;
}
+static void netvsc_get_drvinfo(struct net_device *net,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, "hv_netvsc");
+ strcpy(info->version, HV_DRV_VERSION);
+ strcpy(info->fw_version, "N/A");
+}
+
+static const struct ethtool_ops ethtool_ops = {
+ .get_drvinfo = netvsc_get_drvinfo,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_link = ethtool_op_get_link,
+};
+
static const struct net_device_ops device_ops = {
.ndo_open = netvsc_open,
.ndo_stop = netvsc_close,
.ndo_start_xmit = netvsc_start_xmit,
- .ndo_get_stats = netvsc_get_stats,
.ndo_set_multicast_list = netvsc_set_multicast_list,
};
@@ -413,6 +379,7 @@ static int netvsc_probe(struct device *device)
net_device_ctx = netdev_priv(net);
net_device_ctx->device_ctx = device_ctx;
+ net_device_ctx->avail = ring_size;
dev_set_drvdata(device, net);
/* Notify the netvsc driver of the new device */
@@ -442,6 +409,10 @@ static int netvsc_probe(struct device *device)
net->netdev_ops = &device_ops;
+ /* TODO: Add GSO and Checksum offload */
+ net->features = NETIF_F_SG;
+
+ SET_ETHTOOL_OPS(net, &ethtool_ops);
SET_NETDEV_DEV(net, device);
ret = register_netdev(net);
@@ -559,7 +530,7 @@ static int netvsc_drv_init(int (*drv_init)(struct hv_driver *drv))
vmbus_get_interface(&net_drv_obj->Base.VmbusChannelInterface);
- net_drv_obj->RingBufferSize = netvsc_ringbuffer_size;
+ net_drv_obj->RingBufferSize = ring_size * PAGE_SIZE;
net_drv_obj->OnReceiveCallback = netvsc_recv_callback;
net_drv_obj->OnLinkStatusChanged = netvsc_linkstatus_callback;
@@ -581,6 +552,20 @@ static int netvsc_drv_init(int (*drv_init)(struct hv_driver *drv))
return ret;
}
+static const struct dmi_system_id __initconst
+hv_netvsc_dmi_table[] __maybe_unused = {
+ {
+ .ident = "Hyper-V",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
+ DMI_MATCH(DMI_BOARD_NAME, "Virtual Machine"),
+ },
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(dmi, hv_netvsc_dmi_table);
+
static int __init netvsc_init(void)
{
int ret;
@@ -588,6 +573,9 @@ static int __init netvsc_init(void)
DPRINT_ENTER(NETVSC_DRV);
DPRINT_INFO(NETVSC_DRV, "Netvsc initializing....");
+ if (!dmi_check_system(hv_netvsc_dmi_table))
+ return -ENODEV;
+
ret = netvsc_drv_init(NetVscInitialize);
DPRINT_EXIT(NETVSC_DRV);
@@ -602,9 +590,16 @@ static void __exit netvsc_exit(void)
DPRINT_EXIT(NETVSC_DRV);
}
+static const struct pci_device_id __initconst
+hv_netvsc_pci_table[] __maybe_unused = {
+ { PCI_DEVICE(0x1414, 0x5353) }, /* VGA compatible controller */
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, hv_netvsc_pci_table);
+
MODULE_LICENSE("GPL");
MODULE_VERSION(HV_DRV_VERSION);
-module_param(netvsc_ringbuffer_size, int, S_IRUGO);
+MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
module_init(netvsc_init);
module_exit(netvsc_exit);
diff --git a/drivers/staging/hv/osd.c b/drivers/staging/hv/osd.c
index 9aea3106729..8c3eb278a81 100644
--- a/drivers/staging/hv/osd.c
+++ b/drivers/staging/hv/osd.c
@@ -59,6 +59,15 @@ void *osd_VirtualAllocExec(unsigned int size)
#endif
}
+/**
+ * osd_PageAlloc() - Allocate pages
+ * @count: Total number of Kernel pages you want to allocate
+ *
+ * Tries to allocate @count number of consecutive free kernel pages.
+ * And if successful, it will set the pages to 0 before returning.
+ * If successfull it will return pointer to the @count pages.
+ * Mainly used by Hyper-V drivers.
+ */
void *osd_PageAlloc(unsigned int count)
{
void *p;
@@ -78,6 +87,14 @@ void *osd_PageAlloc(unsigned int count)
}
EXPORT_SYMBOL_GPL(osd_PageAlloc);
+/**
+ * osd_PageFree() - Free pages
+ * @page: Pointer to the first page to be freed
+ * @count: Total number of Kernel pages you free
+ *
+ * Frees the pages allocated by osd_PageAlloc()
+ * Mainly used by Hyper-V drivers.
+ */
void osd_PageFree(void *page, unsigned int count)
{
free_pages((unsigned long)page, get_order(count * PAGE_SIZE));
@@ -86,6 +103,17 @@ void osd_PageFree(void *page, unsigned int count)
}
EXPORT_SYMBOL_GPL(osd_PageFree);
+/**
+ * osd_WaitEventCreate() - Create the event queue
+ *
+ * Allocates memory for a &struct osd_waitevent. And then calls
+ * init_waitqueue_head to set up the wait queue for the event.
+ * This structure is usually part of a another structure that contains
+ * the actual Hyper-V device driver structure.
+ *
+ * Returns pointer to &struct osd_waitevent
+ * Mainly used by Hyper-V drivers.
+ */
struct osd_waitevent *osd_WaitEventCreate(void)
{
struct osd_waitevent *wait = kmalloc(sizeof(struct osd_waitevent),
@@ -99,6 +127,19 @@ struct osd_waitevent *osd_WaitEventCreate(void)
}
EXPORT_SYMBOL_GPL(osd_WaitEventCreate);
+
+/**
+ * osd_WaitEventSet() - Wake up the process
+ * @waitEvent: Structure to event to be woken up
+ *
+ * @waitevent is of type &struct osd_waitevent
+ *
+ * Wake up the sleeping process so it can do some work.
+ * And set condition indicator in &struct osd_waitevent to indicate
+ * the process is in a woken state.
+ *
+ * Only used by Network and Storage Hyper-V drivers.
+ */
void osd_WaitEventSet(struct osd_waitevent *waitEvent)
{
waitEvent->condition = 1;
@@ -106,6 +147,20 @@ void osd_WaitEventSet(struct osd_waitevent *waitEvent)
}
EXPORT_SYMBOL_GPL(osd_WaitEventSet);
+/**
+ * osd_WaitEventWait() - Wait for event till condition is true
+ * @waitEvent: Structure to event to be put to sleep
+ *
+ * @waitevent is of type &struct osd_waitevent
+ *
+ * Set up the process to sleep until waitEvent->condition get true.
+ * And set condition indicator in &struct osd_waitevent to indicate
+ * the process is in a sleeping state.
+ *
+ * Returns the status of 'wait_event_interruptible()' system call
+ *
+ * Mainly used by Hyper-V drivers.
+ */
int osd_WaitEventWait(struct osd_waitevent *waitEvent)
{
int ret = 0;
@@ -117,6 +172,21 @@ int osd_WaitEventWait(struct osd_waitevent *waitEvent)
}
EXPORT_SYMBOL_GPL(osd_WaitEventWait);
+/**
+ * osd_WaitEventWaitEx() - Wait for event or timeout for process wakeup
+ * @waitEvent: Structure to event to be put to sleep
+ * @TimeoutInMs: Total number of Milliseconds to wait before waking up
+ *
+ * @waitevent is of type &struct osd_waitevent
+ * Set up the process to sleep until @waitEvent->condition get true or
+ * @TimeoutInMs (Time out in Milliseconds) has been reached.
+ * And set condition indicator in &struct osd_waitevent to indicate
+ * the process is in a sleeping state.
+ *
+ * Returns the status of 'wait_event_interruptible_timeout()' system call
+ *
+ * Mainly used by Hyper-V drivers.
+ */
int osd_WaitEventWaitEx(struct osd_waitevent *waitEvent, u32 TimeoutInMs)
{
int ret = 0;
diff --git a/drivers/staging/hv/RingBuffer.c b/drivers/staging/hv/ring_buffer.c
index 80b8a2c7784..ae2a10e24d9 100644
--- a/drivers/staging/hv/RingBuffer.c
+++ b/drivers/staging/hv/ring_buffer.c
@@ -25,14 +25,14 @@
#include <linux/mm.h>
#include "osd.h"
#include "logging.h"
-#include "RingBuffer.h"
+#include "ring_buffer.h"
/* #defines */
/* Amount of space to write to */
-#define BYTES_AVAIL_TO_WRITE(r, w, z) ((w) >= (r))?((z) - ((w) - (r))):((r) - (w))
+#define BYTES_AVAIL_TO_WRITE(r, w, z) ((w) >= (r)) ? ((z) - ((w) - (r))) : ((r) - (w))
/*++
@@ -72,7 +72,7 @@ GetNextWriteLocation(RING_BUFFER_INFO *RingInfo)
{
u32 next = RingInfo->RingBuffer->WriteIndex;
- ASSERT(next < RingInfo->RingDataSize);
+ /* ASSERT(next < RingInfo->RingDataSize); */
return next;
}
@@ -106,7 +106,7 @@ GetNextReadLocation(RING_BUFFER_INFO *RingInfo)
{
u32 next = RingInfo->RingBuffer->ReadIndex;
- ASSERT(next < RingInfo->RingDataSize);
+ /* ASSERT(next < RingInfo->RingDataSize); */
return next;
}
@@ -126,7 +126,7 @@ GetNextReadLocationWithOffset(RING_BUFFER_INFO *RingInfo, u32 Offset)
{
u32 next = RingInfo->RingBuffer->ReadIndex;
- ASSERT(next < RingInfo->RingDataSize);
+ /* ASSERT(next < RingInfo->RingDataSize); */
next += Offset;
next %= RingInfo->RingDataSize;
@@ -301,7 +301,8 @@ Description:
--*/
int RingBufferInit(RING_BUFFER_INFO *RingInfo, void *Buffer, u32 BufferLen)
{
- ASSERT(sizeof(RING_BUFFER) == PAGE_SIZE);
+ if (sizeof(RING_BUFFER) != PAGE_SIZE)
+ return -EINVAL;
memset(RingInfo, 0, sizeof(RING_BUFFER_INFO));
@@ -489,7 +490,8 @@ int RingBufferRead(RING_BUFFER_INFO *InRingInfo, void *Buffer,
u64 prevIndices = 0;
unsigned long flags;
- ASSERT(BufferLen > 0);
+ if (BufferLen <= 0)
+ return -EINVAL;
spin_lock_irqsave(&InRingInfo->ring_lock, flags);
diff --git a/drivers/staging/hv/RingBuffer.h b/drivers/staging/hv/ring_buffer.h
index 6202157e145..6202157e145 100644
--- a/drivers/staging/hv/RingBuffer.h
+++ b/drivers/staging/hv/ring_buffer.h
diff --git a/drivers/staging/hv/rndis.h b/drivers/staging/hv/rndis.h
index 7c73277c1f9..723e1f15b90 100644
--- a/drivers/staging/hv/rndis.h
+++ b/drivers/staging/hv/rndis.h
@@ -622,7 +622,7 @@ struct rndis_message {
/* get the size of an RNDIS message. Pass in the message type, */
/* struct rndis_set_request, struct rndis_packet for example */
#define RNDIS_MESSAGE_SIZE(Message) \
- (sizeof(Message) + (sizeof(struct rndis_message) - \
+ (sizeof(Message) + (sizeof(struct rndis_message) - \
sizeof(union rndis_message_container)))
/* get pointer to info buffer with message pointer */
diff --git a/drivers/staging/hv/RndisFilter.c b/drivers/staging/hv/rndis_filter.c
index 6704f64c93f..5edf0853c6a 100644
--- a/drivers/staging/hv/RndisFilter.c
+++ b/drivers/staging/hv/rndis_filter.c
@@ -22,10 +22,12 @@
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/if_ether.h>
+
#include "osd.h"
#include "logging.h"
-#include "NetVscApi.h"
-#include "RndisFilter.h"
+#include "netvsc_api.h"
+#include "rndis_filter.h"
/* Data types */
struct rndis_filter_driver_object {
@@ -50,7 +52,7 @@ struct rndis_device {
spinlock_t request_lock;
struct list_head RequestList;
- unsigned char HwMacAddr[HW_MACADDR_LEN];
+ unsigned char HwMacAddr[ETH_ALEN];
};
struct rndis_request {
@@ -354,8 +356,8 @@ static void RndisFilterReceiveData(struct rndis_device *Device,
DPRINT_ENTER(NETVSC);
/* empty ethernet frame ?? */
- ASSERT(Packet->PageBuffers[0].Length >
- RNDIS_MESSAGE_SIZE(struct rndis_packet));
+ /* ASSERT(Packet->PageBuffers[0].Length > */
+ /* RNDIS_MESSAGE_SIZE(struct rndis_packet)); */
rndisPacket = &Message->Message.Packet;
@@ -389,7 +391,9 @@ static int RndisFilterOnReceive(struct hv_device *Device,
DPRINT_ENTER(NETVSC);
- ASSERT(netDevice);
+ if (!netDevice)
+ return -EINVAL;
+
/* Make sure the rndis device state is initialized */
if (!netDevice->Extension) {
DPRINT_ERR(NETVSC, "got rndis message but no rndis device..."
@@ -490,7 +494,8 @@ static int RndisFilterQueryDevice(struct rndis_device *Device, u32 Oid,
DPRINT_ENTER(NETVSC);
- ASSERT(Result);
+ if (!Result)
+ return -EINVAL;
*ResultSize = 0;
request = GetRndisRequest(Device, REMOTE_NDIS_QUERY_MSG,
@@ -538,7 +543,7 @@ Cleanup:
static int RndisFilterQueryDeviceMac(struct rndis_device *Device)
{
- u32 size = HW_MACADDR_LEN;
+ u32 size = ETH_ALEN;
return RndisFilterQueryDevice(Device,
RNDIS_OID_802_3_PERMANENT_ADDRESS,
@@ -565,8 +570,8 @@ static int RndisFilterSetPacketFilter(struct rndis_device *Device,
DPRINT_ENTER(NETVSC);
- ASSERT(RNDIS_MESSAGE_SIZE(struct rndis_set_request) + sizeof(u32) <=
- sizeof(struct rndis_message));
+ /* ASSERT(RNDIS_MESSAGE_SIZE(struct rndis_set_request) + sizeof(u32) <= */
+ /* sizeof(struct rndis_message)); */
request = GetRndisRequest(Device, REMOTE_NDIS_SET_MSG,
RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
@@ -622,7 +627,6 @@ int RndisFilterInit(struct netvsc_driver *Driver)
sizeof(struct rndis_filter_packet));
Driver->RequestExtSize = sizeof(struct rndis_filter_packet);
- Driver->AdditionalRequestPageBufferCount = 1; /* For rndis header */
/* Driver->Context = rndisDriver; */
@@ -639,8 +643,8 @@ int RndisFilterInit(struct netvsc_driver *Driver)
Driver->Base.OnDeviceRemove;
gRndisFilter.InnerDriver.Base.OnCleanup = Driver->Base.OnCleanup;
- ASSERT(Driver->OnSend);
- ASSERT(Driver->OnReceiveCallback);
+ /* ASSERT(Driver->OnSend); */
+ /* ASSERT(Driver->OnReceiveCallback); */
gRndisFilter.InnerDriver.OnSend = Driver->OnSend;
gRndisFilter.InnerDriver.OnReceiveCallback = Driver->OnReceiveCallback;
gRndisFilter.InnerDriver.OnLinkStatusChanged =
@@ -811,8 +815,8 @@ static int RndisFilterOnDeviceAdd(struct hv_device *Device,
/* Initialize the rndis device */
netDevice = Device->Extension;
- ASSERT(netDevice);
- ASSERT(netDevice->Device);
+ /* ASSERT(netDevice); */
+ /* ASSERT(netDevice->Device); */
netDevice->Extension = rndisDevice;
rndisDevice->NetDevice = netDevice;
@@ -834,16 +838,10 @@ static int RndisFilterOnDeviceAdd(struct hv_device *Device,
*/
}
- DPRINT_INFO(NETVSC, "Device 0x%p mac addr %02x%02x%02x%02x%02x%02x",
- rndisDevice,
- rndisDevice->HwMacAddr[0],
- rndisDevice->HwMacAddr[1],
- rndisDevice->HwMacAddr[2],
- rndisDevice->HwMacAddr[3],
- rndisDevice->HwMacAddr[4],
- rndisDevice->HwMacAddr[5]);
+ DPRINT_INFO(NETVSC, "Device 0x%p mac addr %pM",
+ rndisDevice, rndisDevice->HwMacAddr);
- memcpy(deviceInfo->MacAddr, rndisDevice->HwMacAddr, HW_MACADDR_LEN);
+ memcpy(deviceInfo->MacAddr, rndisDevice->HwMacAddr, ETH_ALEN);
RndisFilterQueryDeviceLinkStatus(rndisDevice);
@@ -891,7 +889,9 @@ int RndisFilterOnOpen(struct hv_device *Device)
DPRINT_ENTER(NETVSC);
- ASSERT(netDevice);
+ if (!netDevice)
+ return -EINVAL;
+
ret = RndisFilterOpenDevice(netDevice->Extension);
DPRINT_EXIT(NETVSC);
@@ -906,7 +906,9 @@ int RndisFilterOnClose(struct hv_device *Device)
DPRINT_ENTER(NETVSC);
- ASSERT(netDevice);
+ if (!netDevice)
+ return -EINVAL;
+
ret = RndisFilterCloseDevice(netDevice->Extension);
DPRINT_EXIT(NETVSC);
@@ -927,7 +929,7 @@ static int RndisFilterOnSend(struct hv_device *Device,
/* Add the rndis header */
filterPacket = (struct rndis_filter_packet *)Packet->Extension;
- ASSERT(filterPacket);
+ /* ASSERT(filterPacket); */
memset(filterPacket, 0, sizeof(struct rndis_filter_packet));
diff --git a/drivers/staging/hv/RndisFilter.h b/drivers/staging/hv/rndis_filter.h
index fa7dd79ddeb..764b9bf3e5d 100644
--- a/drivers/staging/hv/RndisFilter.h
+++ b/drivers/staging/hv/rndis_filter.h
@@ -27,7 +27,7 @@
#define __struct_bcount(x)
-#include "NetVsc.h"
+#include "netvsc.h"
#include "rndis.h"
diff --git a/drivers/staging/hv/StorVsc.c b/drivers/staging/hv/storvsc.c
index e426a23ca53..27a276e08ee 100644
--- a/drivers/staging/hv/StorVsc.c
+++ b/drivers/staging/hv/storvsc.c
@@ -25,8 +25,8 @@
#include <linux/delay.h>
#include "osd.h"
#include "logging.h"
-#include "StorVscApi.h"
-#include "VmbusPacketFormat.h"
+#include "storvsc_api.h"
+#include "vmbus_packet_format.h"
#include "vstorage.h"
@@ -100,7 +100,7 @@ static inline struct storvsc_device *AllocStorDevice(struct hv_device *Device)
static inline void FreeStorDevice(struct storvsc_device *Device)
{
- ASSERT(atomic_read(&Device->RefCount) == 0);
+ /* ASSERT(atomic_read(&Device->RefCount) == 0); */
kfree(Device);
}
@@ -137,10 +137,10 @@ static inline void PutStorDevice(struct hv_device *Device)
struct storvsc_device *storDevice;
storDevice = (struct storvsc_device *)Device->Extension;
- ASSERT(storDevice);
+ /* ASSERT(storDevice); */
atomic_dec(&storDevice->RefCount);
- ASSERT(atomic_read(&storDevice->RefCount));
+ /* ASSERT(atomic_read(&storDevice->RefCount)); */
}
/* Drop ref count to 1 to effectively disable GetStorDevice() */
@@ -149,7 +149,7 @@ static inline struct storvsc_device *ReleaseStorDevice(struct hv_device *Device)
struct storvsc_device *storDevice;
storDevice = (struct storvsc_device *)Device->Extension;
- ASSERT(storDevice);
+ /* ASSERT(storDevice); */
/* Busy wait until the ref drop to 2, then set it to 1 */
while (atomic_cmpxchg(&storDevice->RefCount, 2, 1) != 2)
@@ -165,7 +165,7 @@ static inline struct storvsc_device *FinalReleaseStorDevice(
struct storvsc_device *storDevice;
storDevice = (struct storvsc_device *)Device->Extension;
- ASSERT(storDevice);
+ /* ASSERT(storDevice); */
/* Busy wait until the ref drop to 1, then set it to 0 */
while (atomic_cmpxchg(&storDevice->RefCount, 1, 0) != 1)
@@ -199,6 +199,10 @@ static int StorVscChannelInit(struct hv_device *Device)
*/
memset(request, 0, sizeof(struct storvsc_request_extension));
request->WaitEvent = osd_WaitEventCreate();
+ if (!request->WaitEvent) {
+ ret = -ENOMEM;
+ goto nomem;
+ }
vstorPacket->Operation = VStorOperationBeginInitialization;
vstorPacket->Flags = REQUEST_COMPLETION_FLAG;
@@ -338,7 +342,7 @@ static int StorVscChannelInit(struct hv_device *Device)
Cleanup:
kfree(request->WaitEvent);
request->WaitEvent = NULL;
-
+nomem:
PutStorDevice(Device);
DPRINT_EXIT(STORVSC);
@@ -366,12 +370,12 @@ static void StorVscOnIOCompletion(struct hv_device *Device,
"completed bytes xfer %u", RequestExt,
VStorPacket->VmSrb.DataTransferLength);
- ASSERT(RequestExt != NULL);
- ASSERT(RequestExt->Request != NULL);
+ /* ASSERT(RequestExt != NULL); */
+ /* ASSERT(RequestExt->Request != NULL); */
request = RequestExt->Request;
- ASSERT(request->OnIOCompletion != NULL);
+ /* ASSERT(request->OnIOCompletion != NULL); */
/* Copy over the status...etc */
request->Status = VStorPacket->VmSrb.ScsiStatus;
@@ -391,8 +395,8 @@ static void StorVscOnIOCompletion(struct hv_device *Device,
"valid - len %d\n", RequestExt,
VStorPacket->VmSrb.SenseInfoLength);
- ASSERT(VStorPacket->VmSrb.SenseInfoLength <=
- request->SenseBufferSize);
+ /* ASSERT(VStorPacket->VmSrb.SenseInfoLength <= */
+ /* request->SenseBufferSize); */
memcpy(request->SenseBuffer,
VStorPacket->VmSrb.SenseData,
VStorPacket->VmSrb.SenseInfoLength);
@@ -447,7 +451,7 @@ static void StorVscOnChannelCallback(void *context)
DPRINT_ENTER(STORVSC);
- ASSERT(device);
+ /* ASSERT(device); */
storDevice = MustGetStorDevice(device);
if (!storDevice) {
@@ -470,7 +474,7 @@ static void StorVscOnChannelCallback(void *context)
request = (struct storvsc_request_extension *)
(unsigned long)requestId;
- ASSERT(request);
+ /* ASSERT(request);c */
/* if (vstorPacket.Flags & SYNTHETIC_FLAG) */
if ((request == &storDevice->InitRequest) ||
@@ -533,7 +537,7 @@ static int StorVscConnectToVsp(struct hv_device *Device)
return ret;
}
-/**
+/*
* StorVscOnDeviceAdd - Callback when the device belonging to this driver is added
*/
static int StorVscOnDeviceAdd(struct hv_device *Device, void *AdditionalInfo)
@@ -554,7 +558,7 @@ static int StorVscOnDeviceAdd(struct hv_device *Device, void *AdditionalInfo)
/* Save the channel properties to our storvsc channel */
/* props = (struct vmstorage_channel_properties *)
- * channel->offerMsg.Offer.u.Standard.UserDefined; */
+ * channel->offerMsg.Offer.u.Standard.UserDefined; */
/* FIXME: */
/*
@@ -585,7 +589,7 @@ Cleanup:
return ret;
}
-/**
+/*
* StorVscOnDeviceRemove - Callback when the our device is being removed
*/
static int StorVscOnDeviceRemove(struct hv_device *Device)
@@ -649,6 +653,10 @@ int StorVscOnHostReset(struct hv_device *Device)
vstorPacket = &request->VStorPacket;
request->WaitEvent = osd_WaitEventCreate();
+ if (!request->WaitEvent) {
+ ret = -ENOMEM;
+ goto Cleanup;
+ }
vstorPacket->Operation = VStorOperationResetBus;
vstorPacket->Flags = REQUEST_COMPLETION_FLAG;
@@ -683,7 +691,7 @@ Cleanup:
return ret;
}
-/**
+/*
* StorVscOnIORequest - Callback to initiate an I/O request
*/
static int StorVscOnIORequest(struct hv_device *Device,
@@ -717,7 +725,7 @@ static int StorVscOnIORequest(struct hv_device *Device,
}
/* print_hex_dump_bytes("", DUMP_PREFIX_NONE, Request->Cdb,
- * Request->CdbLen); */
+ * Request->CdbLen); */
requestExtension->Request = Request;
requestExtension->Device = Device;
@@ -783,7 +791,7 @@ static int StorVscOnIORequest(struct hv_device *Device,
return ret;
}
-/**
+/*
* StorVscOnCleanup - Perform any cleanup when the driver is removed
*/
static void StorVscOnCleanup(struct hv_driver *Driver)
@@ -792,7 +800,7 @@ static void StorVscOnCleanup(struct hv_driver *Driver)
DPRINT_EXIT(STORVSC);
}
-/**
+/*
* StorVscInitialize - Main entry point
*/
int StorVscInitialize(struct hv_driver *Driver)
@@ -813,7 +821,7 @@ int StorVscInitialize(struct hv_driver *Driver)
sizeof(struct vmscsi_request));
/* Make sure we are at least 2 pages since 1 page is used for control */
- ASSERT(storDriver->RingBufferSize >= (PAGE_SIZE << 1));
+ /* ASSERT(storDriver->RingBufferSize >= (PAGE_SIZE << 1)); */
Driver->name = gDriverName;
memcpy(&Driver->deviceType, &gStorVscDeviceType,
diff --git a/drivers/staging/hv/StorVscApi.h b/drivers/staging/hv/storvsc_api.h
index 126a8588edb..0063bde9a4b 100644
--- a/drivers/staging/hv/StorVscApi.h
+++ b/drivers/staging/hv/storvsc_api.h
@@ -25,7 +25,7 @@
#ifndef _STORVSC_API_H_
#define _STORVSC_API_H_
-#include "VmbusApi.h"
+#include "vmbus_api.h"
/* Defines */
#define STORVSC_RING_BUFFER_SIZE (10*PAGE_SIZE)
diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c
index 8a58272b803..d22e35f598b 100644
--- a/drivers/staging/hv/storvsc_drv.c
+++ b/drivers/staging/hv/storvsc_drv.c
@@ -33,9 +33,9 @@
#include <scsi/scsi_dbg.h>
#include "osd.h"
#include "logging.h"
-#include "VersionInfo.h"
+#include "version_info.h"
#include "vmbus.h"
-#include "StorVscApi.h"
+#include "storvsc_api.h"
struct host_device_context {
@@ -97,6 +97,8 @@ static int storvsc_get_chs(struct scsi_device *sdev, struct block_device *bdev,
static int storvsc_ringbuffer_size = STORVSC_RING_BUFFER_SIZE;
+module_param(storvsc_ringbuffer_size, int, S_IRUGO);
+MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
/* The one and only one */
static struct storvsc_driver_context g_storvsc_drv;
@@ -112,7 +114,7 @@ static struct scsi_host_template scsi_driver = {
.slave_configure = storvsc_device_configure,
.cmd_per_lun = 1,
/* 64 max_queue * 1 target */
- .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS,
+ .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS,
.this_id = -1,
/* no use setting to 0 since ll_blk_rw reset it to 1 */
/* currently 32 */
@@ -130,7 +132,7 @@ static struct scsi_host_template scsi_driver = {
};
-/**
+/*
* storvsc_drv_init - StorVsc driver initialization.
*/
static int storvsc_drv_init(int (*drv_init)(struct hv_driver *drv))
@@ -223,7 +225,7 @@ static void storvsc_drv_exit(void)
return;
}
-/**
+/*
* storvsc_probe - Add a new device for this driver
*/
static int storvsc_probe(struct device *device)
@@ -319,7 +321,7 @@ static int storvsc_probe(struct device *device)
return ret;
}
-/**
+/*
* storvsc_remove - Callback when our device is removed
*/
static int storvsc_remove(struct device *device)
@@ -372,7 +374,7 @@ static int storvsc_remove(struct device *device)
return ret;
}
-/**
+/*
* storvsc_commmand_completion - Command completion processing
*/
static void storvsc_commmand_completion(struct hv_storvsc_request *request)
@@ -385,11 +387,11 @@ static void storvsc_commmand_completion(struct hv_storvsc_request *request)
void (*scsi_done_fn)(struct scsi_cmnd *);
struct scsi_sense_hdr sense_hdr;
- ASSERT(request == &cmd_request->request);
- ASSERT((unsigned long)scmnd->host_scribble ==
- (unsigned long)cmd_request);
- ASSERT(scmnd);
- ASSERT(scmnd->scsi_done);
+ /* ASSERT(request == &cmd_request->request); */
+ /* ASSERT(scmnd); */
+ /* ASSERT((unsigned long)scmnd->host_scribble == */
+ /* (unsigned long)cmd_request); */
+ /* ASSERT(scmnd->scsi_done); */
DPRINT_ENTER(STORVSC_DRV);
@@ -413,7 +415,7 @@ static void storvsc_commmand_completion(struct hv_storvsc_request *request)
scsi_print_sense_hdr("storvsc", &sense_hdr);
}
- ASSERT(request->BytesXfer <= request->DataBuffer.Length);
+ /* ASSERT(request->BytesXfer <= request->DataBuffer.Length); */
scsi_set_resid(scmnd, request->DataBuffer.Length - request->BytesXfer);
scsi_done_fn = scmnd->scsi_done;
@@ -522,7 +524,7 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
src = src_addr;
srclen = orig_sgl[i].length;
- ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE);
+ /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */
if (j == 0)
bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
@@ -583,7 +585,7 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
KM_IRQ0) + orig_sgl[i].offset;
dest = dest_addr;
destlen = orig_sgl[i].length;
- ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE);
+ /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */
if (j == 0)
bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
@@ -623,7 +625,7 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
return total_copied;
}
-/**
+/*
* storvsc_queuecommand - Initiate command processing
*/
static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
@@ -655,7 +657,7 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
/* If retrying, no need to prep the cmd */
if (scmnd->host_scribble) {
- ASSERT(scmnd->scsi_done != NULL);
+ /* ASSERT(scmnd->scsi_done != NULL); */
cmd_request =
(struct storvsc_cmd_request *)scmnd->host_scribble;
@@ -665,8 +667,8 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
goto retry_request;
}
- ASSERT(scmnd->scsi_done == NULL);
- ASSERT(scmnd->host_scribble == NULL);
+ /* ASSERT(scmnd->scsi_done == NULL); */
+ /* ASSERT(scmnd->host_scribble == NULL); */
scmnd->scsi_done = done;
@@ -717,7 +719,7 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
request->TargetId = scmnd->device->id;
request->LunId = scmnd->device->lun;
- ASSERT(scmnd->cmd_len <= 16);
+ /* ASSERT(scmnd->cmd_len <= 16); */
request->CdbLen = scmnd->cmd_len;
request->Cdb = scmnd->cmnd;
@@ -767,19 +769,17 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
request->DataBuffer.Offset = sgl[0].offset;
for (i = 0; i < scsi_sg_count(scmnd); i++) {
- DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d \n",
+ DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d\n",
i, sgl[i].length, sgl[i].offset);
request->DataBuffer.PfnArray[i] =
page_to_pfn(sg_page((&sgl[i])));
}
} else if (scsi_sglist(scmnd)) {
- ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE);
+ /* ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE); */
request->DataBuffer.Offset =
virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
request->DataBuffer.PfnArray[0] =
virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
- } else {
- ASSERT(scsi_bufflen(scmnd) == 0);
}
retry_request:
@@ -824,7 +824,7 @@ static int storvsc_merge_bvec(struct request_queue *q,
return bvec->bv_len;
}
-/**
+/*
* storvsc_device_configure - Configure the specified scsi device
*/
static int storvsc_device_alloc(struct scsi_device *sdevice)
@@ -863,7 +863,7 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
return 0;
}
-/**
+/*
* storvsc_host_reset_handler - Reset the scsi HBA
*/
static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
@@ -993,6 +993,6 @@ static void __exit storvsc_exit(void)
MODULE_LICENSE("GPL");
MODULE_VERSION(HV_DRV_VERSION);
-module_param(storvsc_ringbuffer_size, int, S_IRUGO);
+MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver");
module_init(storvsc_init);
module_exit(storvsc_exit);
diff --git a/drivers/staging/hv/utils.h b/drivers/staging/hv/utils.h
new file mode 100644
index 00000000000..7c0749999a6
--- /dev/null
+++ b/drivers/staging/hv/utils.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2009, Microsoft Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Authors:
+ * Haiyang Zhang <haiyangz@microsoft.com>
+ * Hank Janssen <hjanssen@microsoft.com>
+ */
+#ifndef __HV_UTILS_H_
+#define __HV_UTILS_H_
+
+/*
+ * Common header for Hyper-V ICs
+ */
+#define ICMSGTYPE_NEGOTIATE 0
+#define ICMSGTYPE_HEARTBEAT 1
+#define ICMSGTYPE_KVPEXCHANGE 2
+#define ICMSGTYPE_SHUTDOWN 3
+#define ICMSGTYPE_TIMESYNC 4
+#define ICMSGTYPE_VSS 5
+
+#define ICMSGHDRFLAG_TRANSACTION 1
+#define ICMSGHDRFLAG_REQUEST 2
+#define ICMSGHDRFLAG_RESPONSE 4
+
+#define HV_S_OK 0x00000000
+#define HV_E_FAIL 0x80004005
+#define HV_ERROR_NOT_SUPPORTED 0x80070032
+#define HV_ERROR_MACHINE_LOCKED 0x800704F7
+
+struct vmbuspipe_hdr {
+ u32 flags;
+ u32 msgsize;
+} __attribute__((packed));
+
+struct ic_version {
+ u16 major;
+ u16 minor;
+} __attribute__((packed));
+
+struct icmsg_hdr {
+ struct ic_version icverframe;
+ u16 icmsgtype;
+ struct ic_version icvermsg;
+ u16 icmsgsize;
+ u32 status;
+ u8 ictransaction_id;
+ u8 icflags;
+ u8 reserved[2];
+} __attribute__((packed));
+
+struct icmsg_negotiate {
+ u16 icframe_vercnt;
+ u16 icmsg_vercnt;
+ u32 reserved;
+ struct ic_version icversion_data[1]; /* any size array */
+} __attribute__((packed));
+
+struct shutdown_msg_data {
+ u32 reason_code;
+ u32 timeout_seconds;
+ u32 flags;
+ u8 display_message[2048];
+} __attribute__((packed));
+
+struct heartbeat_msg_data {
+ u64 seq_num;
+ u32 reserved[8];
+} __attribute__((packed));
+
+/* Time Sync IC defs */
+#define ICTIMESYNCFLAG_PROBE 0
+#define ICTIMESYNCFLAG_SYNC 1
+#define ICTIMESYNCFLAG_SAMPLE 2
+
+#ifdef __x86_64__
+#define WLTIMEDELTA 116444736000000000L /* in 100ns unit */
+#else
+#define WLTIMEDELTA 116444736000000000LL
+#endif
+
+struct ictimesync_data{
+ u64 parenttime;
+ u64 childtime;
+ u64 roundtriptime;
+ u8 flags;
+} __attribute__((packed));
+
+/* Index for each IC struct in array hv_cb_utils[] */
+#define HV_SHUTDOWN_MSG 0
+#define HV_TIMESYNC_MSG 1
+#define HV_HEARTBEAT_MSG 2
+
+struct hyperv_service_callback {
+ u8 msg_type;
+ char *log_msg;
+ unsigned char data[16];
+ struct vmbus_channel *channel;
+ void (*callback) (void *context);
+};
+
+extern void prep_negotiate_resp(struct icmsg_hdr *,
+ struct icmsg_negotiate *, u8 *);
+extern void chn_cb_negotiate(void *);
+extern struct hyperv_service_callback hv_cb_utils[];
+
+#endif /* __HV_UTILS_H_ */
diff --git a/drivers/staging/hv/VersionInfo.h b/drivers/staging/hv/version_info.h
index 10d7b19a485..35178f2c796 100644
--- a/drivers/staging/hv/VersionInfo.h
+++ b/drivers/staging/hv/version_info.h
@@ -29,19 +29,20 @@
*
* Definition of versioning is as follows;
*
- * Major Number Changes for these scenarios;
+ * Major Number Changes for these scenarios;
* 1. When a new version of Windows Hyper-V
* is released.
* 2. A Major change has occurred in the
- * Linux IC's.
+ * Linux IC's.
* (For example the merge for the first time
* into the kernel) Every time the Major Number
* changes, the Revision number is reset to 0.
* Minor Number Changes when new functionality is added
* to the Linux IC's that is not a bug fix.
*
+ * 3.1 - Added completed hv_utils driver. Shutdown/Heartbeat/Timesync
*/
-#define HV_DRV_VERSION "3.0"
+#define HV_DRV_VERSION "3.1"
#endif
diff --git a/drivers/staging/hv/Vmbus.c b/drivers/staging/hv/vmbus.c
index 2f84bf7c0a9..007543bdb41 100644
--- a/drivers/staging/hv/Vmbus.c
+++ b/drivers/staging/hv/vmbus.c
@@ -24,8 +24,8 @@
#include <linux/slab.h>
#include "osd.h"
#include "logging.h"
-#include "VersionInfo.h"
-#include "VmbusPrivate.h"
+#include "version_info.h"
+#include "vmbus_private.h"
static const char *gDriverName = "vmbus";
@@ -52,7 +52,7 @@ static const struct hv_guid gVmbusDeviceId = {
static struct hv_driver *gDriver; /* vmbus driver object */
static struct hv_device *gDevice; /* vmbus root device */
-/**
+/*
* VmbusGetChannelOffers - Retrieve the channel offers from the parent partition
*/
static void VmbusGetChannelOffers(void)
@@ -62,7 +62,7 @@ static void VmbusGetChannelOffers(void)
DPRINT_EXIT(VMBUS);
}
-/**
+/*
* VmbusGetChannelInterface - Get the channel interface
*/
static void VmbusGetChannelInterface(struct vmbus_channel_interface *Interface)
@@ -70,7 +70,7 @@ static void VmbusGetChannelInterface(struct vmbus_channel_interface *Interface)
GetChannelInterface(Interface);
}
-/**
+/*
* VmbusGetChannelInfo - Get the device info for the specified device object
*/
static void VmbusGetChannelInfo(struct hv_device *DeviceObject,
@@ -79,7 +79,7 @@ static void VmbusGetChannelInfo(struct hv_device *DeviceObject,
GetChannelInfo(DeviceObject, DeviceInfo);
}
-/**
+/*
* VmbusCreateChildDevice - Creates the child device on the bus that represents the channel offer
*/
struct hv_device *VmbusChildDeviceCreate(struct hv_guid *DeviceType,
@@ -92,7 +92,7 @@ struct hv_device *VmbusChildDeviceCreate(struct hv_guid *DeviceType,
Context);
}
-/**
+/*
* VmbusChildDeviceAdd - Registers the child device with the vmbus
*/
int VmbusChildDeviceAdd(struct hv_device *ChildDevice)
@@ -102,7 +102,7 @@ int VmbusChildDeviceAdd(struct hv_device *ChildDevice)
return vmbusDriver->OnChildDeviceAdd(gDevice, ChildDevice);
}
-/**
+/*
* VmbusChildDeviceRemove Unregisters the child device from the vmbus
*/
void VmbusChildDeviceRemove(struct hv_device *ChildDevice)
@@ -112,7 +112,7 @@ void VmbusChildDeviceRemove(struct hv_device *ChildDevice)
vmbusDriver->OnChildDeviceRemove(ChildDevice);
}
-/**
+/*
* VmbusOnDeviceAdd - Callback when the root bus device is added
*/
static int VmbusOnDeviceAdd(struct hv_device *dev, void *AdditionalInfo)
@@ -141,7 +141,7 @@ static int VmbusOnDeviceAdd(struct hv_device *dev, void *AdditionalInfo)
return ret;
}
-/**
+/*
* VmbusOnDeviceRemove - Callback when the root bus device is removed
*/
static int VmbusOnDeviceRemove(struct hv_device *dev)
@@ -157,7 +157,7 @@ static int VmbusOnDeviceRemove(struct hv_device *dev)
return ret;
}
-/**
+/*
* VmbusOnCleanup - Perform any cleanup when the driver is removed
*/
static void VmbusOnCleanup(struct hv_driver *drv)
@@ -169,7 +169,7 @@ static void VmbusOnCleanup(struct hv_driver *drv)
DPRINT_EXIT(VMBUS);
}
-/**
+/*
* VmbusOnMsgDPC - DPC routine to handle messages from the hypervisior
*/
static void VmbusOnMsgDPC(struct hv_driver *drv)
@@ -185,11 +185,10 @@ static void VmbusOnMsgDPC(struct hv_driver *drv)
/* no msg */
break;
} else {
- copied = kmalloc(sizeof(*copied), GFP_ATOMIC);
+ copied = kmemdup(msg, sizeof(*copied), GFP_ATOMIC);
if (copied == NULL)
continue;
- memcpy(copied, msg, sizeof(*copied));
osd_schedule_callback(gVmbusConnection.WorkQueue,
VmbusOnChannelMessage,
(void *)copied);
@@ -217,7 +216,7 @@ static void VmbusOnMsgDPC(struct hv_driver *drv)
}
}
-/**
+/*
* VmbusOnEventDPC - DPC routine to handle events from the hypervisior
*/
static void VmbusOnEventDPC(struct hv_driver *drv)
@@ -226,7 +225,7 @@ static void VmbusOnEventDPC(struct hv_driver *drv)
VmbusOnEvents();
}
-/**
+/*
* VmbusOnISR - ISR routine
*/
static int VmbusOnISR(struct hv_driver *drv)
@@ -264,7 +263,7 @@ static int VmbusOnISR(struct hv_driver *drv)
return ret;
}
-/**
+/*
* VmbusInitialize - Main entry point
*/
int VmbusInitialize(struct hv_driver *drv)
diff --git a/drivers/staging/hv/vmbus.h b/drivers/staging/hv/vmbus.h
index 6404b8424be..0c6ee0f487f 100644
--- a/drivers/staging/hv/vmbus.h
+++ b/drivers/staging/hv/vmbus.h
@@ -26,7 +26,7 @@
#define _VMBUS_H_
#include <linux/device.h>
-#include "VmbusApi.h"
+#include "vmbus_api.h"
struct driver_context {
struct hv_guid class_id;
diff --git a/drivers/staging/hv/VmbusApi.h b/drivers/staging/hv/vmbus_api.h
index d089bb193e7..4275be3292c 100644
--- a/drivers/staging/hv/VmbusApi.h
+++ b/drivers/staging/hv/vmbus_api.h
@@ -84,6 +84,24 @@ struct hv_device_info {
struct hv_dev_port_info Outbound;
};
+/**
+ * struct vmbus_channel_interface - Contains member functions for vmbus channel
+ * @Open: Open the channel
+ * @Close: Close the channel
+ * @SendPacket: Send a packet over the channel
+ * @SendPacketPageBuffer: Send a single page buffer over the channel
+ * @SendPacketMultiPageBuffer: Send a multiple page buffers
+ * @RecvPacket: Receive packet
+ * @RecvPacketRaw: Receive Raw packet
+ * @EstablishGpadl: Set up GPADL for ringbuffer
+ * @TeardownGpadl: Teardown GPADL for ringbuffer
+ * @GetInfo: Get info about the channel
+ *
+ * This structure contains function pointer to control vmbus channel
+ * behavior. None of these functions is externally callable, but they
+ * are used for normal vmbus channel internal behavior.
+ * Only used by Hyper-V drivers.
+ */
struct vmbus_channel_interface {
int (*Open)(struct hv_device *Device, u32 SendBufferSize,
u32 RecvRingBufferSize, void *UserData, u32 UserDataLen,
diff --git a/drivers/staging/hv/VmbusChannelInterface.h b/drivers/staging/hv/vmbus_channel_interface.h
index 26742823748..26742823748 100644
--- a/drivers/staging/hv/VmbusChannelInterface.h
+++ b/drivers/staging/hv/vmbus_channel_interface.h
diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
index 3397ef08e0a..c21731a12ca 100644
--- a/drivers/staging/hv/vmbus_drv.c
+++ b/drivers/staging/hv/vmbus_drv.c
@@ -27,7 +27,7 @@
#include <linux/pci.h>
#include <linux/dmi.h>
#include <linux/slab.h>
-#include "VersionInfo.h"
+#include "version_info.h"
#include "osd.h"
#include "logging.h"
#include "vmbus.h"
@@ -129,7 +129,7 @@ static struct vmbus_driver_context g_vmbus_drv = {
.bus.dev_attrs = vmbus_device_attrs,
};
-/**
+/*
* vmbus_show_device_attr - Show the device attribute in sysfs.
*
* This is invoked when user does a
@@ -233,17 +233,17 @@ static ssize_t vmbus_show_device_attr(struct device *dev,
}
}
-/**
+/*
* vmbus_bus_init -Main vmbus driver initialization routine.
*
* Here, we
- * - initialize the vmbus driver context
- * - setup various driver entry points
- * - invoke the vmbus hv main init routine
- * - get the irq resource
- * - invoke the vmbus to add the vmbus root device
- * - setup the vmbus root device
- * - retrieve the channel offers
+ * - initialize the vmbus driver context
+ * - setup various driver entry points
+ * - invoke the vmbus hv main init routine
+ * - get the irq resource
+ * - invoke the vmbus to add the vmbus root device
+ * - setup the vmbus root device
+ * - retrieve the channel offers
*/
static int vmbus_bus_init(int (*drv_init)(struct hv_driver *drv))
{
@@ -362,7 +362,7 @@ cleanup:
return ret;
}
-/**
+/*
* vmbus_bus_exit - Terminate the vmbus driver.
*
* This routine is opposite of vmbus_bus_init()
@@ -398,8 +398,18 @@ static void vmbus_bus_exit(void)
return;
}
+
/**
- * vmbus_child_driver_register - Register a vmbus's child driver
+ * vmbus_child_driver_register() - Register a vmbus's child driver
+ * @driver_ctx: Pointer to driver structure you want to register
+ *
+ * @driver_ctx is of type &struct driver_context
+ *
+ * Registers the given driver with Linux through the 'driver_register()' call
+ * And sets up the hyper-v vmbus handling for this driver.
+ * It will return the state of the 'driver_register()' call.
+ *
+ * Mainly used by Hyper-V drivers.
*/
int vmbus_child_driver_register(struct driver_context *driver_ctx)
{
@@ -425,7 +435,15 @@ int vmbus_child_driver_register(struct driver_context *driver_ctx)
EXPORT_SYMBOL(vmbus_child_driver_register);
/**
- * vmbus_child_driver_unregister Unregister a vmbus's child driver
+ * vmbus_child_driver_unregister() - Unregister a vmbus's child driver
+ * @driver_ctx: Pointer to driver structure you want to un-register
+ *
+ * @driver_ctx is of type &struct driver_context
+ *
+ * Un-register the given driver with Linux through the 'driver_unregister()'
+ * call. And ungegisters the driver from the Hyper-V vmbus handler.
+ *
+ * Mainly used by Hyper-V drivers.
*/
void vmbus_child_driver_unregister(struct driver_context *driver_ctx)
{
@@ -443,9 +461,15 @@ void vmbus_child_driver_unregister(struct driver_context *driver_ctx)
EXPORT_SYMBOL(vmbus_child_driver_unregister);
/**
- * vmbus_get_interface - Get the vmbus channel interface.
+ * vmbus_get_interface() - Get the vmbus channel interface.
+ * @interface: Pointer to channel interface structure
+ *
+ * Get the Hyper-V channel used for the driver.
+ *
+ * @interface is of type &struct vmbus_channel_interface
+ * This is invoked by child/client driver that sits above vmbus.
*
- * This is invoked by child/client driver that sits above vmbus
+ * Mainly used by Hyper-V drivers.
*/
void vmbus_get_interface(struct vmbus_channel_interface *interface)
{
@@ -455,7 +479,7 @@ void vmbus_get_interface(struct vmbus_channel_interface *interface)
}
EXPORT_SYMBOL(vmbus_get_interface);
-/**
+/*
* vmbus_child_device_get_info - Get the vmbus child device info.
*
* This is invoked to display various device attributes in sysfs.
@@ -468,8 +492,9 @@ static void vmbus_child_device_get_info(struct hv_device *device_obj,
vmbus_drv_obj->GetChannelInfo(device_obj, device_info);
}
-/**
- * vmbus_child_device_create - Creates and registers a new child device on the vmbus.
+/*
+ * vmbus_child_device_create - Creates and registers a new child device
+ * on the vmbus.
*/
static struct hv_device *vmbus_child_device_create(struct hv_guid *type,
struct hv_guid *instance,
@@ -523,7 +548,7 @@ static struct hv_device *vmbus_child_device_create(struct hv_guid *type,
return child_device_obj;
}
-/**
+/*
* vmbus_child_device_register - Register the child device on the specified bus
*/
static int vmbus_child_device_register(struct hv_device *root_device_obj,
@@ -571,8 +596,9 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
return ret;
}
-/**
- * vmbus_child_device_unregister - Remove the specified child device from the vmbus.
+/*
+ * vmbus_child_device_unregister - Remove the specified child device
+ * from the vmbus.
*/
static void vmbus_child_device_unregister(struct hv_device *device_obj)
{
@@ -595,7 +621,7 @@ static void vmbus_child_device_unregister(struct hv_device *device_obj)
DPRINT_EXIT(VMBUS_DRV);
}
-/**
+/*
* vmbus_child_device_destroy - Destroy the specified child device on the vmbus.
*/
static void vmbus_child_device_destroy(struct hv_device *device_obj)
@@ -605,7 +631,7 @@ static void vmbus_child_device_destroy(struct hv_device *device_obj)
DPRINT_EXIT(VMBUS_DRV);
}
-/**
+/*
* vmbus_uevent - add uevent for our device
*
* This routine is invoked when a device is added or removed on the vmbus to
@@ -684,7 +710,7 @@ static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
return 0;
}
-/**
+/*
* vmbus_match - Attempt to match the specified device to the specified driver
*/
static int vmbus_match(struct device *device, struct device_driver *driver)
@@ -719,7 +745,7 @@ static int vmbus_match(struct device *device, struct device_driver *driver)
return match;
}
-/**
+/*
* vmbus_probe_failed_cb - Callback when a driver probe failed in vmbus_probe()
*
* We need a callback because we cannot invoked device_unregister() inside
@@ -742,7 +768,7 @@ static void vmbus_probe_failed_cb(struct work_struct *context)
DPRINT_EXIT(VMBUS_DRV);
}
-/**
+/*
* vmbus_probe - Add the new vmbus's child device
*/
static int vmbus_probe(struct device *child_device)
@@ -778,7 +804,7 @@ static int vmbus_probe(struct device *child_device)
return ret;
}
-/**
+/*
* vmbus_remove - Remove a vmbus device
*/
static int vmbus_remove(struct device *child_device)
@@ -820,7 +846,7 @@ static int vmbus_remove(struct device *child_device)
return 0;
}
-/**
+/*
* vmbus_shutdown - Shutdown a vmbus device
*/
static void vmbus_shutdown(struct device *child_device)
@@ -856,7 +882,7 @@ static void vmbus_shutdown(struct device *child_device)
return;
}
-/**
+/*
* vmbus_bus_release - Final callback release of the vmbus root device
*/
static void vmbus_bus_release(struct device *device)
@@ -870,7 +896,7 @@ static void vmbus_bus_release(struct device *device)
DPRINT_EXIT(VMBUS_DRV);
}
-/**
+/*
* vmbus_device_release - Final callback release of the vmbus child device
*/
static void vmbus_device_release(struct device *device)
@@ -888,7 +914,7 @@ static void vmbus_device_release(struct device *device)
return;
}
-/**
+/*
* vmbus_msg_dpc - Tasklet routine to handle hypervisor messages
*/
static void vmbus_msg_dpc(unsigned long data)
@@ -897,7 +923,7 @@ static void vmbus_msg_dpc(unsigned long data)
DPRINT_ENTER(VMBUS_DRV);
- ASSERT(vmbus_drv_obj->OnMsgDpc != NULL);
+ /* ASSERT(vmbus_drv_obj->OnMsgDpc != NULL); */
/* Call to bus driver to handle interrupt */
vmbus_drv_obj->OnMsgDpc(&vmbus_drv_obj->Base);
@@ -905,7 +931,7 @@ static void vmbus_msg_dpc(unsigned long data)
DPRINT_EXIT(VMBUS_DRV);
}
-/**
+/*
* vmbus_msg_dpc - Tasklet routine to handle hypervisor events
*/
static void vmbus_event_dpc(unsigned long data)
@@ -914,7 +940,7 @@ static void vmbus_event_dpc(unsigned long data)
DPRINT_ENTER(VMBUS_DRV);
- ASSERT(vmbus_drv_obj->OnEventDpc != NULL);
+ /* ASSERT(vmbus_drv_obj->OnEventDpc != NULL); */
/* Call to bus driver to handle interrupt */
vmbus_drv_obj->OnEventDpc(&vmbus_drv_obj->Base);
@@ -929,7 +955,7 @@ static irqreturn_t vmbus_isr(int irq, void *dev_id)
DPRINT_ENTER(VMBUS_DRV);
- ASSERT(vmbus_driver_obj->OnIsr != NULL);
+ /* ASSERT(vmbus_driver_obj->OnIsr != NULL); */
/* Call to bus driver to handle interrupt */
ret = vmbus_driver_obj->OnIsr(&vmbus_driver_obj->Base);
diff --git a/drivers/staging/hv/VmbusPacketFormat.h b/drivers/staging/hv/vmbus_packet_format.h
index 79120bc742d..f9f6b4bf6fb 100644
--- a/drivers/staging/hv/VmbusPacketFormat.h
+++ b/drivers/staging/hv/vmbus_packet_format.h
@@ -22,6 +22,7 @@
*/
#ifndef _VMBUSPACKETFORMAT_H_
+#define _VMBUSPACKETFORMAT_H_
struct vmpacket_descriptor {
u16 Type;
diff --git a/drivers/staging/hv/VmbusPrivate.h b/drivers/staging/hv/vmbus_private.h
index 05ad2c9380d..588c667a7f6 100644
--- a/drivers/staging/hv/VmbusPrivate.h
+++ b/drivers/staging/hv/vmbus_private.h
@@ -25,12 +25,12 @@
#ifndef _VMBUS_PRIVATE_H_
#define _VMBUS_PRIVATE_H_
-#include "Hv.h"
-#include "VmbusApi.h"
-#include "Channel.h"
-#include "ChannelMgmt.h"
-#include "ChannelInterface.h"
-#include "RingBuffer.h"
+#include "hv.h"
+#include "vmbus_api.h"
+#include "channel.h"
+#include "channel_mgmt.h"
+#include "channel_interface.h"
+#include "ring_buffer.h"
#include <linux/list.h>
diff --git a/drivers/staging/hv/vstorage.h b/drivers/staging/hv/vstorage.h
index 6d160a53914..4ea597d7a7d 100644
--- a/drivers/staging/hv/vstorage.h
+++ b/drivers/staging/hv/vstorage.h
@@ -28,7 +28,7 @@
#define REVISION_STRING(REVISION_) #REVISION_
#define FILL_VMSTOR_REVISION(RESULT_LVALUE_) \
{ \
- char *revisionString = REVISION_STRING($Revision: 6 $) + 11; \
+ char *revisionString = REVISION_STRING($Revision : 6 $) + 11; \
RESULT_LVALUE_ = 0; \
while (*revisionString >= '0' && *revisionString <= '9') { \
RESULT_LVALUE_ *= 10; \
diff --git a/drivers/staging/iio/Documentation/iio_utils.h b/drivers/staging/iio/Documentation/iio_utils.h
index 74d31247337..a4555e6f133 100644
--- a/drivers/staging/iio/Documentation/iio_utils.h
+++ b/drivers/staging/iio/Documentation/iio_utils.h
@@ -7,111 +7,173 @@
* the Free Software Foundation.
*/
+/* Made up value to limit allocation sizes */
+#include <string.h>
+#include <stdlib.h>
+
+#define IIO_MAX_NAME_LENGTH 30
+
#define IIO_EVENT_CODE_RING_50_FULL 200
#define IIO_EVENT_CODE_RING_75_FULL 201
#define IIO_EVENT_CODE_RING_100_FULL 202
+const char *iio_dir = "/sys/bus/iio/devices/";
+
struct iio_event_data {
int id;
__s64 timestamp;
};
-
-inline char *find_ring_subelement(const char *directory, const char *subelement)
-{
- DIR *dp;
- const struct dirent *ent;
- int pos;
- char temp[100];
- char *returnstring;
- dp = opendir(directory);
- if (dp == NULL) {
- printf("could not directory: %s\n", directory);
- return NULL;
- }
- while (ent = readdir(dp), ent != NULL) {
- if (strcmp(ent->d_name, ".") != 0 &&
- strcmp(ent->d_name, "..") != 0) {
- if (strncmp(ent->d_name, subelement, strlen(subelement)) == 0) {
- int length = sprintf(temp, "%s%s%s", directory, ent->d_name, "/");
- returnstring = malloc(length+1);
- strncpy(returnstring, temp, length+1);
- return returnstring;
-
- }
- }
- }
- return 0;
-}
-
-
-char *find_type_by_name(const char *name, const char *type)
+/**
+ * find_type_by_name() - function to match top level types by name
+ * @name: top level type instance name
+ * @type: the type of top level instance being sort
+ *
+ * Typical types this is used for are device and trigger.
+ **/
+inline int find_type_by_name(const char *name, const char *type)
{
- const char *iio_dir = "/sys/class/iio/";
const struct dirent *ent;
- int cnt, pos, pos2;
+ int number, numstrlen;
FILE *nameFile;
DIR *dp;
- char thisname[100];
- char temp[100];
-
- char *returnstring = NULL;
+ char thisname[IIO_MAX_NAME_LENGTH];
+ char *filename;
struct stat Stat;
- pos = sprintf(temp, "%s", iio_dir);
+
dp = opendir(iio_dir);
if (dp == NULL) {
printf("No industrialio devices available");
- return NULL;
+ return -ENODEV;
}
+
while (ent = readdir(dp), ent != NULL) {
- cnt++;
- /*reject . and .. */
if (strcmp(ent->d_name, ".") != 0 &&
- strcmp(ent->d_name, "..") != 0) {
- /*make sure it isn't a trigger!*/
- if (strncmp(ent->d_name, type, strlen(type)) == 0) {
- /* build full path to new file */
- pos2 = pos + sprintf(temp + pos, "%s/", ent->d_name);
- sprintf(temp + pos2, "name");
- printf("search location %s\n", temp);
- nameFile = fopen(temp, "r");
- if (!nameFile) {
- sprintf(temp + pos2, "modalias", ent->d_name);
- nameFile = fopen(temp, "r");
- if (!nameFile) {
- printf("Failed to find a name for device\n");
- return NULL;
- }
- }
+ strcmp(ent->d_name, "..") != 0 &&
+ strlen(ent->d_name) > strlen(type) &&
+ strncmp(ent->d_name, type, strlen(type)) == 0) {
+ numstrlen = sscanf(ent->d_name + strlen(type),
+ "%d",
+ &number);
+ /* verify the next character is not a colon */
+ if (strncmp(ent->d_name + strlen(type) + numstrlen,
+ ":",
+ 1) != 0) {
+ filename = malloc(strlen(iio_dir)
+ + strlen(type)
+ + 1
+ + numstrlen
+ + 1);
+ if (filename == NULL)
+ return -ENOMEM;
+ sprintf(filename, "%s%s%d/name",
+ iio_dir,
+ type,
+ number);
+ nameFile = fopen(filename, "r");
+ if (!nameFile)
+ continue;
+ free(filename);
fscanf(nameFile, "%s", thisname);
- if (strcmp(name, thisname) == 0) {
- returnstring = malloc(strlen(temp) + 1);
- sprintf(temp + pos2, "");
- strcpy(returnstring, temp);
- return returnstring;
- }
+ if (strcmp(name, thisname) == 0)
+ return number;
fclose(nameFile);
-
}
}
}
+ return -ENODEV;
}
-int write_sysfs_int(char *filename, char *basedir, int val)
+inline int _write_sysfs_int(char *filename, char *basedir, int val, int verify)
{
int ret;
- FILE *sysfsfp;
- char temp[100];
- sprintf(temp, "%s%s", basedir, filename);
+ FILE *sysfsfp;
+ int test;
+ char *temp = malloc(strlen(basedir) + strlen(filename) + 2);
+ if (temp == NULL)
+ return -ENOMEM;
+ sprintf(temp, "%s/%s", basedir, filename);
sysfsfp = fopen(temp, "w");
- if (sysfsfp == NULL)
- return -1;
+ if (sysfsfp == NULL) {
+ printf("failed to open %s\n", temp);
+ ret = -errno;
+ goto error_free;
+ }
fprintf(sysfsfp, "%d", val);
fclose(sysfsfp);
- return 0;
+ if (verify) {
+ sysfsfp = fopen(temp, "r");
+ if (sysfsfp == NULL) {
+ printf("failed to open %s\n", temp);
+ ret = -errno;
+ goto error_free;
+ }
+ fscanf(sysfsfp, "%d", &test);
+ if (test != val) {
+ printf("Possible failure in int write %d to %s%s\n",
+ val,
+ basedir,
+ filename);
+ ret = -1;
+ }
+ }
+error_free:
+ free(temp);
+ return ret;
}
+int write_sysfs_int(char *filename, char *basedir, int val)
+{
+ return _write_sysfs_int(filename, basedir, val, 0);
+}
+
+int write_sysfs_int_and_verify(char *filename, char *basedir, int val)
+{
+ return _write_sysfs_int(filename, basedir, val, 1);
+}
+
+int _write_sysfs_string(char *filename, char *basedir, char *val, int verify)
+{
+ int ret;
+ FILE *sysfsfp;
+ char *temp = malloc(strlen(basedir) + strlen(filename) + 2);
+ if (temp == NULL) {
+ printf("Memory allocation failed\n");
+ return -ENOMEM;
+ }
+ sprintf(temp, "%s/%s", basedir, filename);
+ sysfsfp = fopen(temp, "w");
+ if (sysfsfp == NULL) {
+ printf("Could not open %s\n", temp);
+ ret = -errno;
+ goto error_free;
+ }
+ fprintf(sysfsfp, "%s", val);
+ fclose(sysfsfp);
+ if (verify) {
+ sysfsfp = fopen(temp, "r");
+ if (sysfsfp == NULL) {
+ ret = -errno;
+ goto error_free;
+ }
+ fscanf(sysfsfp, "%s", temp);
+ if (strcmp(temp, val) != 0) {
+ printf("Possible failure in string write of %s "
+ "Should be %s "
+ "writen to %s\%s\n",
+ temp,
+ val,
+ basedir,
+ filename);
+ ret = -1;
+ }
+ }
+error_free:
+ free(temp);
+
+ return ret;
+}
/**
* write_sysfs_string_and_verify() - string write, readback and verify
* @filename: name of file to write to
@@ -120,40 +182,54 @@ int write_sysfs_int(char *filename, char *basedir, int val)
**/
int write_sysfs_string_and_verify(char *filename, char *basedir, char *val)
{
- int ret;
- FILE *sysfsfp;
- char temp[100];
- sprintf(temp, "%s%s", basedir, filename);
- sysfsfp = fopen(temp, "w");
- if (sysfsfp == NULL)
- return -1;
- fprintf(sysfsfp, "%s", val);
- fclose(sysfsfp);
+ return _write_sysfs_string(filename, basedir, val, 1);
+}
- sysfsfp = fopen(temp, "r");
- if (sysfsfp == NULL)
- return -1;
- fscanf(sysfsfp, "%s", temp);
- if (strcmp(temp, val) != 0) {
- printf("Possible failure in string write %s to %s%s \n",
- val,
- basedir,
- filename);
- return -1;
- }
- return 0;
+int write_sysfs_string(char *filename, char *basedir, char *val)
+{
+ return _write_sysfs_string(filename, basedir, val, 0);
}
int read_sysfs_posint(char *filename, char *basedir)
{
int ret;
FILE *sysfsfp;
- char temp[100];
- sprintf(temp, "%s%s", basedir, filename);
+ char *temp = malloc(strlen(basedir) + strlen(filename) + 2);
+ if (temp == NULL) {
+ printf("Memory allocation failed");
+ return -ENOMEM;
+ }
+ sprintf(temp, "%s/%s", basedir, filename);
sysfsfp = fopen(temp, "r");
- if (sysfsfp == NULL)
- return -1;
+ if (sysfsfp == NULL) {
+ ret = -errno;
+ goto error_free;
+ }
fscanf(sysfsfp, "%d\n", &ret);
fclose(sysfsfp);
+error_free:
+ free(temp);
+ return ret;
+}
+
+int read_sysfs_float(char *filename, char *basedir, float *val)
+{
+ float ret = 0;
+ FILE *sysfsfp;
+ char *temp = malloc(strlen(basedir) + strlen(filename) + 2);
+ if (temp == NULL) {
+ printf("Memory allocation failed");
+ return -ENOMEM;
+ }
+ sprintf(temp, "%s/%s", basedir, filename);
+ sysfsfp = fopen(temp, "r");
+ if (sysfsfp == NULL) {
+ ret = -errno;
+ goto error_free;
+ }
+ fscanf(sysfsfp, "%f\n", val);
+ fclose(sysfsfp);
+error_free:
+ free(temp);
return ret;
}
diff --git a/drivers/staging/iio/Documentation/lis3l02dqbuffersimple.c b/drivers/staging/iio/Documentation/lis3l02dqbuffersimple.c
index 2b5cfc58d78..3a580284020 100644
--- a/drivers/staging/iio/Documentation/lis3l02dqbuffersimple.c
+++ b/drivers/staging/iio/Documentation/lis3l02dqbuffersimple.c
@@ -1,4 +1,4 @@
-/* Industrialio test ring buffer with a lis3l02dq acceleromter
+/* Industrialio ring buffer with a lis3l02dq accelerometer
*
* Copyright (c) 2008 Jonathan Cameron
*
@@ -6,126 +6,181 @@
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
- * Assumes suitable udev rules are used to create the dev nodes as named here.
+ * This program is primarily intended as an example application.
*/
#include <dirent.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
-#include <stdint.h>
-#include <sys/types.h>
#include <sys/stat.h>
#include <sys/dir.h>
-
#include <linux/types.h>
-#include <dirent.h>
-#include "iio_util.h"
+#include "iio_utils.h"
-static const char *ring_access = "/dev/iio/lis3l02dq_ring_access";
-static const char *ring_event = "/dev/iio/lis3l02dq_ring_event";
-static const char *device_name = "lis3l02dq";
-static const char *trigger_name = "lis3l02dq-dev0";
-static int NumVals = 3;
-static int scan_ts = 1;
-static int RingLength = 128;
+const char *device_name = "lis3l02dq";
+const char *trigger_name_base = "lis3l02dq-dev";
+const int num_vals = 3;
+const int scan_ts = 1;
+const int buf_len = 128;
+const int num_loops = 10;
/*
* Could get this from ring bps, but only after starting the ring
- * which is a bit late for it to be useful
+ * which is a bit late for it to be useful.
+ *
+ * Todo: replace with much more generic version based on scan_elements
+ * directory.
*/
-int size_from_scanmode(int numVals, int timestamp)
+int size_from_scanmode(int num_vals, int timestamp)
{
- if (numVals && timestamp)
+ if (num_vals && timestamp)
return 16;
else if (timestamp)
return 8;
else
- return numVals*2;
+ return num_vals*2;
}
int main(int argc, char **argv)
{
+ int ret;
int i, j, k, toread;
FILE *fp_ev;
int fp;
+
+ char *trigger_name, *dev_dir_name, *buf_dir_name;
char *data;
size_t read_size;
struct iio_event_data dat;
+ int dev_num, trig_num;
- char *BaseDirectoryName,
- *TriggerDirectoryName,
- *RingBufferDirectoryName;
+ char *buffer_access, *buffer_event;
+ const char *iio_dir = "/sys/bus/iio/devices/";
+ int scan_size;
+ float gain = 1;
- BaseDirectoryName = find_type_by_name(device_name, "device");
- if (BaseDirectoryName == NULL) {
- printf("Failed to find the %s \n", device_name);
- return -1;
+
+ /* Find out which iio device is the accelerometer. */
+ dev_num = find_type_by_name(device_name, "device");
+ if (dev_num < 0) {
+ printf("Failed to find the %s\n", device_name);
+ ret = -ENODEV;
+ goto error_ret;
+ }
+ printf("iio device number being used is %d\n", dev_num);
+ asprintf(&dev_dir_name, "%sdevice%d", iio_dir, dev_num);
+
+ /*
+ * Build the trigger name.
+ * In this case we want the lis3l02dq's data ready trigger
+ * for this lis3l02dq. The naming is lis3l02dq_dev[n], where
+ * n matches the device number found above.
+ */
+ ret = asprintf(&trigger_name, "%s%d", trigger_name_base, dev_num);
+ if (ret < 0) {
+ ret = -ENOMEM;
+ goto error_free_dev_dir_name;
}
- TriggerDirectoryName = find_type_by_name(trigger_name, "trigger");
- if (TriggerDirectoryName == NULL) {
+
+ /*
+ * Find the trigger by name.
+ * This is techically unecessary here as we only need to
+ * refer to the trigger by name and that name is already
+ * known.
+ */
+ trig_num = find_type_by_name(trigger_name, "trigger");
+ if (trig_num < 0) {
printf("Failed to find the %s\n", trigger_name);
- return -1;
+ ret = -ENODEV;
+ goto error_free_triggername;
}
- RingBufferDirectoryName = find_ring_subelement(BaseDirectoryName,
- "ring_buffer");
- if (RingBufferDirectoryName == NULL) {
- printf("Failed to find ring buffer\n");
- return -1;
+ printf("iio trigger number being used is %d\n", trig_num);
+
+ /*
+ * Read in the scale value - in a more generic case, first
+ * check for accel_scale, then the indivual channel scales
+ */
+ ret = read_sysfs_float("accel_scale", dev_dir_name, &gain);
+ if (ret)
+ goto error_free_triggername;;
+
+ /*
+ * Construct the directory name for the associated buffer.
+ * As we know that the lis3l02dq has only one buffer this may
+ * be built rather than found.
+ */
+ ret = asprintf(&buf_dir_name, "%sdevice%d:buffer0", iio_dir, dev_num);
+ if (ret < 0) {
+ ret = -ENOMEM;
+ goto error_free_triggername;
}
-
- if (write_sysfs_string_and_verify("trigger/current_trigger",
- BaseDirectoryName,
- (char *)trigger_name) < 0) {
- printf("Failed to write current_trigger file \n");
- return -1;
+ /* Set the device trigger to be the data rdy trigger found above */
+ ret = write_sysfs_string_and_verify("trigger/current_trigger",
+ dev_dir_name,
+ trigger_name);
+ if (ret < 0) {
+ printf("Failed to write current_trigger file\n");
+ goto error_free_buf_dir_name;
}
/* Setup ring buffer parameters */
- if (write_sysfs_int("length", RingBufferDirectoryName,
- RingLength) < 0) {
- printf("Failed to open the ring buffer length file \n");
- return -1;
- }
+ ret = write_sysfs_int("length", buf_dir_name, buf_len);
+ if (ret < 0)
+ goto error_free_buf_dir_name;
- /* Enable the ring buffer */
- if (write_sysfs_int("ring_enable", RingBufferDirectoryName, 1) < 0) {
- printf("Failed to open the ring buffer control file \n");
- return -1;
- };
+ /* Enable the buffer */
+ ret = write_sysfs_int("ring_enable", buf_dir_name, 1);
+ if (ret < 0)
+ goto error_free_buf_dir_name;
- data = malloc(size_from_scanmode(NumVals, scan_ts)*RingLength);
+ data = malloc(size_from_scanmode(num_vals, scan_ts)*buf_len);
if (!data) {
- printf("Could not allocate space for usespace data store\n");
- return -1;
+ ret = -ENOMEM;
+ goto error_free_buf_dir_name;
+ }
+
+ ret = asprintf(&buffer_access,
+ "/dev/device%d:buffer0:access0",
+ dev_num);
+ if (ret < 0) {
+ ret = -ENOMEM;
+ goto error_free_data;
}
+ ret = asprintf(&buffer_event, "/dev/device%d:buffer0:event0", dev_num);
+ if (ret < 0) {
+ ret = -ENOMEM;
+ goto error_free_data;
+ }
/* Attempt to open non blocking the access dev */
- fp = open(ring_access, O_RDONLY | O_NONBLOCK);
+ fp = open(buffer_access, O_RDONLY | O_NONBLOCK);
if (fp == -1) { /*If it isn't there make the node */
- printf("Failed to open %s\n", ring_access);
- return -1;
+ printf("Failed to open %s\n", buffer_access);
+ ret = -errno;
+ goto error_free_buffer_event;
}
/* Attempt to open the event access dev (blocking this time) */
- fp_ev = fopen(ring_event, "rb");
+ fp_ev = fopen(buffer_event, "rb");
if (fp_ev == NULL) {
- printf("Failed to open %s\n", ring_event);
- return -1;
+ printf("Failed to open %s\n", buffer_event);
+ ret = -errno;
+ goto error_close_buffer_access;
}
/* Wait for events 10 times */
- for (j = 0; j < 10; j++) {
+ for (j = 0; j < num_loops; j++) {
read_size = fread(&dat, 1, sizeof(struct iio_event_data),
fp_ev);
switch (dat.id) {
case IIO_EVENT_CODE_RING_100_FULL:
- toread = RingLength;
+ toread = buf_len;
break;
case IIO_EVENT_CODE_RING_75_FULL:
- toread = RingLength*3/4;
+ toread = buf_len*3/4;
break;
case IIO_EVENT_CODE_RING_50_FULL:
- toread = RingLength/2;
+ toread = buf_len/2;
break;
default:
printf("Unexpecteded event code\n");
@@ -133,39 +188,51 @@ int main(int argc, char **argv)
}
read_size = read(fp,
data,
- toread*size_from_scanmode(NumVals, scan_ts));
+ toread*size_from_scanmode(num_vals, scan_ts));
if (read_size == -EAGAIN) {
- printf("nothing available \n");
+ printf("nothing available\n");
continue;
}
-
- for (i = 0;
- i < read_size/size_from_scanmode(NumVals, scan_ts);
- i++) {
- for (k = 0; k < NumVals; k++) {
- __s16 val = *(__s16 *)(&data[i*size_from_scanmode(NumVals, scan_ts)
+ scan_size = size_from_scanmode(num_vals, scan_ts);
+ for (i = 0; i < read_size/scan_size; i++) {
+ for (k = 0; k < num_vals; k++) {
+ __s16 val = *(__s16 *)(&data[i*scan_size
+ (k)*2]);
- printf("%05d ", val);
+ printf("%05f ", (float)val*gain);
}
printf(" %lld\n",
- *(__s64 *)(&data[(i+1)*size_from_scanmode(NumVals, scan_ts)
+ *(__s64 *)(&data[(i + 1)
+ *size_from_scanmode(num_vals,
+ scan_ts)
- sizeof(__s64)]));
}
}
/* Stop the ring buffer */
- if (write_sysfs_int("ring_enable", RingBufferDirectoryName, 0) < 0) {
- printf("Failed to open the ring buffer control file \n");
- return -1;
- };
-
- /* Disconnect from the trigger - writing something that doesn't exist.*/
- write_sysfs_string_and_verify("trigger/current_trigger",
- BaseDirectoryName, "NULL");
- free(BaseDirectoryName);
- free(TriggerDirectoryName);
- free(RingBufferDirectoryName);
+ ret = write_sysfs_int("ring_enable", buf_dir_name, 0);
+ if (ret < 0)
+ goto error_close_buffer_event;
+
+ /* Disconnect from the trigger - just write a dummy name.*/
+ write_sysfs_string("trigger/current_trigger",
+ dev_dir_name, "NULL");
+
+error_close_buffer_event:
+ fclose(fp_ev);
+error_close_buffer_access:
+ close(fp);
+error_free_data:
free(data);
-
- return 0;
+error_free_buffer_access:
+ free(buffer_access);
+error_free_buffer_event:
+ free(buffer_event);
+error_free_buf_dir_name:
+ free(buf_dir_name);
+error_free_triggername:
+ free(trigger_name);
+error_free_dev_dir_name:
+ free(dev_dir_name);
+error_ret:
+ return ret;
}
diff --git a/drivers/staging/iio/Documentation/sysfs-class-iio b/drivers/staging/iio/Documentation/sysfs-class-iio
new file mode 100644
index 00000000000..714b4c57c82
--- /dev/null
+++ b/drivers/staging/iio/Documentation/sysfs-class-iio
@@ -0,0 +1,294 @@
+
+What: /sys/bus/iio/devices/device[n]
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ Hardware chip or device accessed by on communication port.
+ Corresponds to a grouping of sensor channels.
+
+What: /sys/bus/iio/devices/trigger[n]
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ An event driven driver of data capture to an in kernel buffer.
+ May be provided by a device driver that also has an IIO device
+ based on hardware generated events (e.g. data ready) or
+ provided by a separate driver for other hardware (e.g.
+ periodic timer, gpio or high resolution timer).
+ Contains trigger type specific elements. These do not
+ generalize well and hence are not documented in this file.
+
+What: /sys/bus/iio/devices/device[n]:buffer
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ Link to /sys/class/iio/device[n]/device[n]:buffer. n indicates the
+ device with which this buffer buffer is associated.
+
+What: /sys/.../device[n]/name
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ Description of the physical chip / device. Typically a part
+ number.
+
+What: /sys/.../device[n]/sampling_frequency
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ Some devices have internal clocks. This parameter sets the
+ resulting sampling frequency. In many devices this
+ parameter has an effect on input filters etc rather than
+ simply controlling when the input is sampled. As this
+ effects datardy triggers, hardware buffers and the sysfs
+ direct access interfaces, it may be found in any of the
+ relevant directories. If it effects all of the above
+ then it is to be found in the base device directory as here.
+
+What: /sys/.../device[n]/sampling_frequency_available
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ When the internal sampling clock can only take a small
+ discrete set of values, this file lists those availale.
+
+What: /sys/.../device[n]/in[_name][m]_raw
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ Raw (unscaled no bias removal etc) voltage measurement from
+ channel m. name is used in special cases where this does
+ not correspond to externally available input (e.g. supply
+ voltage monitoring in which case the file is in_supply_raw).
+
+What: /sys/.../device[n]/in[_name][m]_offset
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ If known for a device, offset to be added to in[m]_raw prior
+ to scaling by in[_name][m]_scale in order to obtain voltage in
+ millivolts. Not present if the offset is always 0 or unknown.
+ If m is not present, then voltage offset applies to all in
+ channels. May be writable if a variable offset is controlled
+ by the device. Note that this is different to calibbias which
+ is for devices that apply offsets to compensate for variation
+ between different instances of the part, typically adjusted by
+ using some hardware supported calibration procedure.
+
+What: /sys/.../device[n]/in[_name][m]_offset_available
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ If a small number of discrete offset values are available, this
+ will be a space separated list. If these are independant (but
+ options the same) for individual offsets then m should not be
+ present.
+
+What: /sys/.../device[n]/in[_name][m]_offset_[min|max]
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ If a more or less continuous range of voltage offsets are supported
+ then these specify the minimum and maximum. If shared by all
+ in channels then m is not present.
+
+What: /sys/.../device[n]/in[_name][m]_calibbias
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ Hardware applied calibration offset. (assumed to fix production
+ inaccuracies)
+
+What /sys/.../device[n]/in[_name][m]_calibscale
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ Hardware applied calibration scale factor. (assumed to fix production
+ inaccuracies)
+
+What: /sys/.../device[n]/in[_name][m]_scale
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ If known for a device, scale to be applied to volt[m]_raw post
+ addition of in[_name][m]_offset in order to obtain the measured
+ voltage in millivolts. If shared across all in channels then m is not present.
+
+What: /sys/.../device[n]/in[m]-in[o]_raw
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ Raw (unscaled) differential voltage measurement equivalent to
+ channel m - channel o where these channel numbers apply to the physically
+ equivalent inputs when non differential readings are separately available.
+ In differential only parts, then all that is required is a consistent
+ labelling.
+
+What: /sys/.../device[n]/accel[_x|_y|_z][m]_raw
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ Acceleration in direction x, y or z (may be arbitrarily assigned
+ but should match other such assignments on device)
+ channel m (not present if only one accelerometer channel at
+ this orientation). Has all of the equivalent parameters as per in[m].
+ Units after application of scale and offset are m/s^2.
+
+What: /sys/.../device[n]/gyro[_x|_y|_z][m]_raw
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ Angular velocity about axis x, y or z (may be arbitrarily assigned)
+ channel m (not present if only one gyroscope at this orientation).
+ Data converted by application of offset then scale to
+ radians per second. Has all the equivalent parameters as per in[m].
+
+What: /sys/.../device[n]/incli[_x|_y|_z][m]_raw
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ Inclination raw reading about axis x, y or z (may be arbitarily
+ assigned) channel m (not present if only one inclinometer at
+ this orientation). Data converted by application of offset
+ and scale to Degrees.
+
+What: /sys/.../device[n]/magn[_x|_y|_z][m]_raw
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ Magnetic field along axis x, y or z (may be arbitrarily assigned)
+ channel m (not present if only one magnetometer at this orientation).
+ Data converted by application of offset then scale to Gauss
+ Has all the equivalent modifiers as per in[m].
+
+What: /sys/.../device[n]/device[n]:event[m]
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ Configuration of which hardware generated events are passed up to
+ userspace. Some of these are a bit complex to generalize so this
+ section is a work in progress.
+
+What: /sys/.../device[n]:event[m]/dev
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ major:minor character device numbers for the event line.
+
+Taking accel_x0 as an example
+
+What: /sys/.../device[n]:event[m]/accel_x0_thresh[_high|_low]_en
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ Event generated when accel_x0 passes a threshold in correction direction
+ (or stays beyond one). If direction isn't specified, either triggers it.
+ Note driver will assume last p events requested are enabled where p is
+ however many it supports. So if you want to be sure you have
+ set what you think you have, check the contents of these. Drivers
+ may have to buffer any parameters so that they are consistent when a
+ given event type is enabled a future point (and not those for whatever
+ alarm was previously enabled).
+
+What: /sys/.../device[n]:event[m]/accel_x0_roc[_high|_low]_en
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ Same as above but based on the first differential of the value.
+
+
+What: /sys/.../device[n]:event[m]/accel_x0[_thresh|_roc][_high|_low]_period
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ A period of time (microsecs) for which the condition must be broken
+ before an interrupt is triggered. Applies to all alarms if type is not
+ specified.
+
+What: /sys/.../device[n]:event[m]/accel_x0[_thresh|_roc][_high|_low]_value
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ The actual value of the threshold in raw device units obtained by
+ reverse application of scale and offfset to the acceleration in m/s^2.
+
+What: /sys/.../device[n]/scan_elements
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ Directory containing interfaces for elements that will be captured
+ for a single triggered sample set in the buffer.
+
+What: /sys/.../device[n]/scan_elements/[m]_accel_x0_en
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ Scan element control for triggered data capture. m implies the
+ ordering within the buffer. Next the type is specified with
+ modifier and channel number as per the sysfs single channel
+ access above.
+
+What: /sys/.../device[n]/scan_elements/accel[_x0]_precision
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ Scan element precision within the buffer. Note that the
+ data alignment must restrictions must be read from within
+ buffer to work out full data alignment for data read
+ via buffer_access chrdev. _x0 dropped if shared across all
+ acceleration channels.
+
+What: /sys/.../device[n]/scan_elements/accel[_x0]_shift
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ A bit shift (to right) that must be applied prior to
+ extracting the bits specified by accel[_x0]_precision.
+
+What: /sys/.../device[n]/device[n]:buffer:event/dev
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ Buffer for device n event character device major:minor numbers.
+
+What: /sys/.../device[n]/device[n]:buffer:access/dev
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ Buffer for device n access character device o major:minor numbers.
+
+What: /sys/.../device[n]:buffer/trigger
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ The name of the trigger source being used, as per string given
+ in /sys/class/iio/trigger[n]/name.
+
+What: /sys/.../device[n]:buffer/length
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ Number of scans contained by the buffer.
+
+What: /sys/.../device[n]:buffer/bps
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ Bytes per scan. Due to alignment fun, the scan may be larger
+ than implied directly by the scan_element parameters.
+
+What: /sys/.../device[n]:buffer/enable
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ Actually start the buffer capture up. Will start trigger
+ if first device and appropriate.
+
+What: /sys/.../device[n]:buffer/alignment
+KernelVersion: 2.6.35
+Contact: linux-iio@vger.kernel.org
+Description:
+ Minimum data alignment. Scan elements larger than this are aligned
+ to the nearest power of 2 times this. (may not be true in weird
+ hardware buffers that pack data well)
+
diff --git a/drivers/staging/iio/Documentation/userspace.txt b/drivers/staging/iio/Documentation/userspace.txt
index 661015a0b86..4838818f65e 100644
--- a/drivers/staging/iio/Documentation/userspace.txt
+++ b/drivers/staging/iio/Documentation/userspace.txt
@@ -56,5 +56,5 @@ KERNEL="ring_event_line*", ID="spi1.0", DRIVER="lis3l02dq", NAME="iio/lis3l02dq_
KERNEL="event_line*", ID="spi1.0", DRIVER="lis3l02dq", NAME="iio/lis3l02dq_event"
KERNEL="ring_access*", ID="spi1.0", DRIVER="lis3l02dq", NAME="iio/lis3l02dq_ring_access"
-The files, lis3l02dqbuffersimple.c and iio_util.h in this directory provide an example
+The files, lis3l02dqbuffersimple.c and iio_utils.h in this directory provide an example
of how to use the ring buffer and event interfaces.
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index ace99f6d116..b0e62449c62 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -41,6 +41,8 @@ config IIO_TRIGGER
source "drivers/staging/iio/accel/Kconfig"
source "drivers/staging/iio/adc/Kconfig"
+source "drivers/staging/iio/gyro/Kconfig"
+source "drivers/staging/iio/imu/Kconfig"
source "drivers/staging/iio/light/Kconfig"
source "drivers/staging/iio/trigger/Kconfig"
diff --git a/drivers/staging/iio/Makefile b/drivers/staging/iio/Makefile
index 7ec021810a7..3502b39f084 100644
--- a/drivers/staging/iio/Makefile
+++ b/drivers/staging/iio/Makefile
@@ -11,6 +11,8 @@ obj-$(CONFIG_IIO_SW_RING) += ring_sw.o
obj-y += accel/
obj-y += adc/
+obj-y += gyro/
+obj-y += imu/
obj-y += light/
obj-y += trigger/ \ No newline at end of file
diff --git a/drivers/staging/iio/accel/Kconfig b/drivers/staging/iio/accel/Kconfig
index 3d3c3339dbc..b4e57d1bc87 100644
--- a/drivers/staging/iio/accel/Kconfig
+++ b/drivers/staging/iio/accel/Kconfig
@@ -3,6 +3,31 @@
#
comment "Accelerometers"
+config ADIS16209
+ tristate "Analog Devices ADIS16209 Dual-Axis Digital Inclinometer and Accelerometer"
+ depends on SPI
+ select IIO_TRIGGER if IIO_RING_BUFFER
+ select IIO_SW_RING if IIO_RING_BUFFER
+ help
+ Say yes here to build support for Analog Devices adis16209 dual-axis digital inclinometer
+ and accelerometer.
+
+config ADIS16220
+ tristate "Analog Devices ADIS16220 Programmable Digital Vibration Sensor driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices adis16220 programmable
+ digital vibration sensor.
+
+config ADIS16240
+ tristate "Analog Devices ADIS16240 Programmable Impact Sensor and Recorder"
+ depends on SPI
+ select IIO_TRIGGER if IIO_RING_BUFFER
+ select IIO_SW_RING if IIO_RING_BUFFER
+ help
+ Say yes here to build support for Analog Devices adis16240 programmable
+ impact Sensor and recorder.
+
config KXSD9
tristate "Kionix KXSD9 Accelerometer Driver"
depends on SPI
diff --git a/drivers/staging/iio/accel/Makefile b/drivers/staging/iio/accel/Makefile
index d5335f9094a..c34b13634c2 100644
--- a/drivers/staging/iio/accel/Makefile
+++ b/drivers/staging/iio/accel/Makefile
@@ -1,6 +1,17 @@
#
# Makefile for industrial I/O accelerometer drivers
#
+adis16209-y := adis16209_core.o
+adis16209-$(CONFIG_IIO_RING_BUFFER) += adis16209_ring.o adis16209_trigger.o
+obj-$(CONFIG_ADIS16209) += adis16209.o
+
+adis16220-y := adis16220_core.o
+obj-$(CONFIG_ADIS16220) += adis16220.o
+
+adis16240-y := adis16240_core.o
+adis16240-$(CONFIG_IIO_RING_BUFFER) += adis16240_ring.o adis16240_trigger.o
+obj-$(CONFIG_ADIS16240) += adis16240.o
+
obj-$(CONFIG_KXSD9) += kxsd9.o
lis3l02dq-y := lis3l02dq_core.o
diff --git a/drivers/staging/iio/accel/accel.h b/drivers/staging/iio/accel/accel.h
index d7fc7f98348..1b6e37f7620 100644
--- a/drivers/staging/iio/accel/accel.h
+++ b/drivers/staging/iio/accel/accel.h
@@ -2,6 +2,8 @@
#include "../sysfs.h"
/* Accelerometer types of attribute */
+#define IIO_DEV_ATTR_ACCEL_OFFSET(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(accel_offset, _mode, _show, _store, _addr)
#define IIO_DEV_ATTR_ACCEL_X_OFFSET(_mode, _show, _store, _addr) \
IIO_DEVICE_ATTR(accel_x_offset, _mode, _show, _store, _addr)
@@ -21,14 +23,17 @@
#define IIO_DEV_ATTR_ACCEL_Z_GAIN(_mode, _show, _store, _addr) \
IIO_DEVICE_ATTR(accel_z_gain, _mode, _show, _store, _addr)
+#define IIO_DEV_ATTR_ACCEL(_show, _addr) \
+ IIO_DEVICE_ATTR(accel_raw, S_IRUGO, _show, NULL, _addr)
+
#define IIO_DEV_ATTR_ACCEL_X(_show, _addr) \
- IIO_DEVICE_ATTR(accel_x, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(accel_x_raw, S_IRUGO, _show, NULL, _addr)
#define IIO_DEV_ATTR_ACCEL_Y(_show, _addr) \
- IIO_DEVICE_ATTR(accel_y, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(accel_y_raw, S_IRUGO, _show, NULL, _addr)
#define IIO_DEV_ATTR_ACCEL_Z(_show, _addr) \
- IIO_DEVICE_ATTR(accel_z, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(accel_z_raw, S_IRUGO, _show, NULL, _addr)
/* Thresholds are somewhat chip dependent - may need quite a few defs here */
/* For unified thresholds (shared across all directions */
@@ -61,7 +66,6 @@
#define IIO_DEV_ATTR_ACCEL_THRESH_Z(_mode, _show, _store, _addr) \
IIO_DEVICE_ATTR(thresh_accel_z, _mode, _show, _store, _addr)
-
/**
* IIO_EVENT_ATTR_ACCEL_X_HIGH: threshold event, x acceleration
* @_show: read x acceleration high threshold
diff --git a/drivers/staging/iio/accel/adis16209.h b/drivers/staging/iio/accel/adis16209.h
new file mode 100644
index 00000000000..877fd2a4838
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16209.h
@@ -0,0 +1,193 @@
+#ifndef SPI_ADIS16209_H_
+#define SPI_ADIS16209_H_
+
+#define ADIS16209_STARTUP_DELAY 220 /* ms */
+
+#define ADIS16209_READ_REG(a) a
+#define ADIS16209_WRITE_REG(a) ((a) | 0x80)
+
+/* Flash memory write count */
+#define ADIS16209_FLASH_CNT 0x00
+/* Output, power supply */
+#define ADIS16209_SUPPLY_OUT 0x02
+/* Output, x-axis accelerometer */
+#define ADIS16209_XACCL_OUT 0x04
+/* Output, y-axis accelerometer */
+#define ADIS16209_YACCL_OUT 0x06
+/* Output, auxiliary ADC input */
+#define ADIS16209_AUX_ADC 0x08
+/* Output, temperature */
+#define ADIS16209_TEMP_OUT 0x0A
+/* Output, x-axis inclination */
+#define ADIS16209_XINCL_OUT 0x0C
+/* Output, y-axis inclination */
+#define ADIS16209_YINCL_OUT 0x0E
+/* Output, +/-180 vertical rotational position */
+#define ADIS16209_ROT_OUT 0x10
+/* Calibration, x-axis acceleration offset null */
+#define ADIS16209_XACCL_NULL 0x12
+/* Calibration, y-axis acceleration offset null */
+#define ADIS16209_YACCL_NULL 0x14
+/* Calibration, x-axis inclination offset null */
+#define ADIS16209_XINCL_NULL 0x16
+/* Calibration, y-axis inclination offset null */
+#define ADIS16209_YINCL_NULL 0x18
+/* Calibration, vertical rotation offset null */
+#define ADIS16209_ROT_NULL 0x1A
+/* Alarm 1 amplitude threshold */
+#define ADIS16209_ALM_MAG1 0x20
+/* Alarm 2 amplitude threshold */
+#define ADIS16209_ALM_MAG2 0x22
+/* Alarm 1, sample period */
+#define ADIS16209_ALM_SMPL1 0x24
+/* Alarm 2, sample period */
+#define ADIS16209_ALM_SMPL2 0x26
+/* Alarm control */
+#define ADIS16209_ALM_CTRL 0x28
+/* Auxiliary DAC data */
+#define ADIS16209_AUX_DAC 0x30
+/* General-purpose digital input/output control */
+#define ADIS16209_GPIO_CTRL 0x32
+/* Miscellaneous control */
+#define ADIS16209_MSC_CTRL 0x34
+/* Internal sample period (rate) control */
+#define ADIS16209_SMPL_PRD 0x36
+/* Operation, filter configuration */
+#define ADIS16209_AVG_CNT 0x38
+/* Operation, sleep mode control */
+#define ADIS16209_SLP_CNT 0x3A
+/* Diagnostics, system status register */
+#define ADIS16209_DIAG_STAT 0x3C
+/* Operation, system command register */
+#define ADIS16209_GLOB_CMD 0x3E
+
+#define ADIS16209_OUTPUTS 8
+
+/* MSC_CTRL */
+/* Self-test at power-on: 1 = disabled, 0 = enabled */
+#define ADIS16209_MSC_CTRL_PWRUP_SELF_TEST (1 << 10)
+/* Self-test enable */
+#define ADIS16209_MSC_CTRL_SELF_TEST_EN (1 << 8)
+/* Data-ready enable: 1 = enabled, 0 = disabled */
+#define ADIS16209_MSC_CTRL_DATA_RDY_EN (1 << 2)
+/* Data-ready polarity: 1 = active high, 0 = active low */
+#define ADIS16209_MSC_CTRL_ACTIVE_HIGH (1 << 1)
+/* Data-ready line selection: 1 = DIO2, 0 = DIO1 */
+#define ADIS16209_MSC_CTRL_DATA_RDY_DIO2 (1 << 0)
+
+/* DIAG_STAT */
+/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16209_DIAG_STAT_ALARM2 (1<<9)
+/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16209_DIAG_STAT_ALARM1 (1<<8)
+/* Self-test diagnostic error flag: 1 = error condition, 0 = normal operation */
+#define ADIS16209_DIAG_STAT_SELFTEST_FAIL (1<<5)
+/* SPI communications failure */
+#define ADIS16209_DIAG_STAT_SPI_FAIL (1<<3)
+/* Flash update failure */
+#define ADIS16209_DIAG_STAT_FLASH_UPT (1<<2)
+/* Power supply above 3.625 V */
+#define ADIS16209_DIAG_STAT_POWER_HIGH (1<<1)
+/* Power supply below 3.15 V */
+#define ADIS16209_DIAG_STAT_POWER_LOW (1<<0)
+
+/* GLOB_CMD */
+#define ADIS16209_GLOB_CMD_SW_RESET (1<<7)
+#define ADIS16209_GLOB_CMD_CLEAR_STAT (1<<4)
+#define ADIS16209_GLOB_CMD_FACTORY_CAL (1<<1)
+
+#define ADIS16209_MAX_TX 24
+#define ADIS16209_MAX_RX 24
+
+#define ADIS16209_ERROR_ACTIVE (1<<14)
+
+/**
+ * struct adis16209_state - device instance specific data
+ * @us: actual spi_device
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @work_cont_thresh: CLEAN
+ * @inter: used to check if new interrupt has been triggered
+ * @last_timestamp: passing timestamp from th to bh of interrupt handler
+ * @indio_dev: industrial I/O device structure
+ * @trig: data ready trigger registered with iio
+ * @tx: transmit buffer
+ * @rx: recieve buffer
+ * @buf_lock: mutex to protect tx and rx
+ **/
+struct adis16209_state {
+ struct spi_device *us;
+ struct work_struct work_trigger_to_ring;
+ struct iio_work_cont work_cont_thresh;
+ s64 last_timestamp;
+ struct iio_dev *indio_dev;
+ struct iio_trigger *trig;
+ u8 *tx;
+ u8 *rx;
+ struct mutex buf_lock;
+};
+
+int adis16209_set_irq(struct device *dev, bool enable);
+
+#ifdef CONFIG_IIO_RING_BUFFER
+enum adis16209_scan {
+ ADIS16209_SCAN_SUPPLY,
+ ADIS16209_SCAN_ACC_X,
+ ADIS16209_SCAN_ACC_Y,
+ ADIS16209_SCAN_AUX_ADC,
+ ADIS16209_SCAN_TEMP,
+ ADIS16209_SCAN_INCLI_X,
+ ADIS16209_SCAN_INCLI_Y,
+ ADIS16209_SCAN_ROT,
+};
+
+void adis16209_remove_trigger(struct iio_dev *indio_dev);
+int adis16209_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t adis16209_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+int adis16209_configure_ring(struct iio_dev *indio_dev);
+void adis16209_unconfigure_ring(struct iio_dev *indio_dev);
+
+int adis16209_initialize_ring(struct iio_ring_buffer *ring);
+void adis16209_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void adis16209_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16209_probe_trigger(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline ssize_t
+adis16209_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return 0;
+}
+
+static int adis16209_configure_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline void adis16209_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16209_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return 0;
+}
+
+static inline void adis16209_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+
+#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* SPI_ADIS16209_H_ */
diff --git a/drivers/staging/iio/accel/adis16209_core.c b/drivers/staging/iio/accel/adis16209_core.c
new file mode 100644
index 00000000000..ac375c50f56
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16209_core.c
@@ -0,0 +1,615 @@
+/*
+ * ADIS16209 Programmable Digital Vibration Sensor driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "accel.h"
+#include "inclinometer.h"
+#include "../gyro/gyro.h"
+#include "../adc/adc.h"
+
+#include "adis16209.h"
+
+#define DRIVER_NAME "adis16209"
+
+static int adis16209_check_status(struct device *dev);
+
+/**
+ * adis16209_spi_write_reg_8() - write single byte to a register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the register to be written
+ * @val: the value to write
+ **/
+static int adis16209_spi_write_reg_8(struct device *dev,
+ u8 reg_address,
+ u8 val)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16209_state *st = iio_dev_get_devdata(indio_dev);
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16209_WRITE_REG(reg_address);
+ st->tx[1] = val;
+
+ ret = spi_write(st->us, st->tx, 2);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/**
+ * adis16209_spi_write_reg_16() - write 2 bytes to a pair of registers
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ * is assumed to have address one greater.
+ * @val: value to be written
+ **/
+static int adis16209_spi_write_reg_16(struct device *dev,
+ u8 lower_reg_address,
+ u16 value)
+{
+ int ret;
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16209_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ }, {
+ .tx_buf = st->tx + 2,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16209_WRITE_REG(lower_reg_address);
+ st->tx[1] = value & 0xFF;
+ st->tx[2] = ADIS16209_WRITE_REG(lower_reg_address + 1);
+ st->tx[3] = (value >> 8) & 0xFF;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/**
+ * adis16209_spi_read_reg_16() - read 2 bytes from a 16-bit register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ * is assumed to have address one greater.
+ * @val: somewhere to pass back the value read
+ **/
+static int adis16209_spi_read_reg_16(struct device *dev,
+ u8 lower_reg_address,
+ u16 *val)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16209_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = 20,
+ }, {
+ .rx_buf = st->rx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = 20,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16209_READ_REG(lower_reg_address);
+ st->tx[1] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret) {
+ dev_err(&st->us->dev,
+ "problem when reading 16 bit register 0x%02X",
+ lower_reg_address);
+ goto error_ret;
+ }
+ *val = (st->rx[0] << 8) | st->rx[1];
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static ssize_t adis16209_read_12bit_unsigned(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u16 val = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = adis16209_spi_read_reg_16(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ if (val & ADIS16209_ERROR_ACTIVE)
+ adis16209_check_status(dev);
+
+ return sprintf(buf, "%u\n", val & 0x0FFF);
+}
+
+static ssize_t adis16209_read_14bit_unsigned(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u16 val = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = adis16209_spi_read_reg_16(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ if (val & ADIS16209_ERROR_ACTIVE)
+ adis16209_check_status(dev);
+
+ return sprintf(buf, "%u\n", val & 0x3FFF);
+}
+
+static ssize_t adis16209_read_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ ssize_t ret;
+ u16 val;
+
+ /* Take the iio_dev status lock */
+ mutex_lock(&indio_dev->mlock);
+
+ ret = adis16209_spi_read_reg_16(dev, ADIS16209_TEMP_OUT, (u16 *)&val);
+ if (ret)
+ goto error_ret;
+
+ if (val & ADIS16209_ERROR_ACTIVE)
+ adis16209_check_status(dev);
+
+ val &= 0xFFF;
+ ret = sprintf(buf, "%d\n", val);
+
+error_ret:
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+
+static ssize_t adis16209_read_14bit_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ s16 val = 0;
+ ssize_t ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ ret = adis16209_spi_read_reg_16(dev, this_attr->address, (u16 *)&val);
+ if (!ret) {
+ if (val & ADIS16209_ERROR_ACTIVE)
+ adis16209_check_status(dev);
+
+ val = ((s16)(val << 2) >> 2);
+ ret = sprintf(buf, "%d\n", val);
+ }
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static ssize_t adis16209_write_16bit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+ ret = adis16209_spi_write_reg_16(dev, this_attr->address, val);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+static int adis16209_reset(struct device *dev)
+{
+ int ret;
+ ret = adis16209_spi_write_reg_8(dev,
+ ADIS16209_GLOB_CMD,
+ ADIS16209_GLOB_CMD_SW_RESET);
+ if (ret)
+ dev_err(dev, "problem resetting device");
+
+ return ret;
+}
+
+static ssize_t adis16209_write_reset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ if (len < 1)
+ return -EINVAL;
+ switch (buf[0]) {
+ case '1':
+ case 'y':
+ case 'Y':
+ return adis16209_reset(dev);
+ }
+ return -EINVAL;
+}
+
+int adis16209_set_irq(struct device *dev, bool enable)
+{
+ int ret = 0;
+ u16 msc;
+
+ ret = adis16209_spi_read_reg_16(dev, ADIS16209_MSC_CTRL, &msc);
+ if (ret)
+ goto error_ret;
+
+ msc |= ADIS16209_MSC_CTRL_ACTIVE_HIGH;
+ msc &= ~ADIS16209_MSC_CTRL_DATA_RDY_DIO2;
+ if (enable)
+ msc |= ADIS16209_MSC_CTRL_DATA_RDY_EN;
+ else
+ msc &= ~ADIS16209_MSC_CTRL_DATA_RDY_EN;
+
+ ret = adis16209_spi_write_reg_16(dev, ADIS16209_MSC_CTRL, msc);
+
+error_ret:
+ return ret;
+}
+
+static int adis16209_check_status(struct device *dev)
+{
+ u16 status;
+ int ret;
+
+ ret = adis16209_spi_read_reg_16(dev, ADIS16209_DIAG_STAT, &status);
+ if (ret < 0) {
+ dev_err(dev, "Reading status failed\n");
+ goto error_ret;
+ }
+ ret = status & 0x1F;
+
+ if (status & ADIS16209_DIAG_STAT_SELFTEST_FAIL)
+ dev_err(dev, "Self test failure\n");
+ if (status & ADIS16209_DIAG_STAT_SPI_FAIL)
+ dev_err(dev, "SPI failure\n");
+ if (status & ADIS16209_DIAG_STAT_FLASH_UPT)
+ dev_err(dev, "Flash update failed\n");
+ if (status & ADIS16209_DIAG_STAT_POWER_HIGH)
+ dev_err(dev, "Power supply above 3.625V\n");
+ if (status & ADIS16209_DIAG_STAT_POWER_LOW)
+ dev_err(dev, "Power supply below 3.15V\n");
+
+error_ret:
+ return ret;
+}
+
+static int adis16209_self_test(struct device *dev)
+{
+ int ret;
+ ret = adis16209_spi_write_reg_16(dev,
+ ADIS16209_MSC_CTRL,
+ ADIS16209_MSC_CTRL_SELF_TEST_EN);
+ if (ret) {
+ dev_err(dev, "problem starting self test");
+ goto err_ret;
+ }
+
+ adis16209_check_status(dev);
+
+err_ret:
+ return ret;
+}
+
+static int adis16209_initial_setup(struct adis16209_state *st)
+{
+ int ret;
+ struct device *dev = &st->indio_dev->dev;
+
+ /* Disable IRQ */
+ ret = adis16209_set_irq(dev, false);
+ if (ret) {
+ dev_err(dev, "disable irq failed");
+ goto err_ret;
+ }
+
+ /* Do self test */
+ ret = adis16209_self_test(dev);
+ if (ret) {
+ dev_err(dev, "self test failure");
+ goto err_ret;
+ }
+
+ /* Read status register to check the result */
+ ret = adis16209_check_status(dev);
+ if (ret) {
+ adis16209_reset(dev);
+ dev_err(dev, "device not playing ball -> reset");
+ msleep(ADIS16209_STARTUP_DELAY);
+ ret = adis16209_check_status(dev);
+ if (ret) {
+ dev_err(dev, "giving up");
+ goto err_ret;
+ }
+ }
+
+ printk(KERN_INFO DRIVER_NAME ": at CS%d (irq %d)\n",
+ st->us->chip_select, st->us->irq);
+
+err_ret:
+ return ret;
+}
+
+static IIO_DEV_ATTR_IN_NAMED_RAW(supply, adis16209_read_14bit_unsigned,
+ ADIS16209_SUPPLY_OUT);
+static IIO_CONST_ATTR(in_supply_scale, "0.30518");
+static IIO_DEV_ATTR_IN_RAW(0, adis16209_read_12bit_unsigned,
+ ADIS16209_AUX_ADC);
+static IIO_CONST_ATTR(in0_scale, "0.6105");
+
+static IIO_DEV_ATTR_ACCEL_X(adis16209_read_14bit_signed,
+ ADIS16209_XACCL_OUT);
+static IIO_DEV_ATTR_ACCEL_Y(adis16209_read_14bit_signed,
+ ADIS16209_YACCL_OUT);
+static IIO_DEV_ATTR_ACCEL_X_OFFSET(S_IWUSR | S_IRUGO,
+ adis16209_read_14bit_signed,
+ adis16209_write_16bit,
+ ADIS16209_XACCL_NULL);
+static IIO_DEV_ATTR_ACCEL_Y_OFFSET(S_IWUSR | S_IRUGO,
+ adis16209_read_14bit_signed,
+ adis16209_write_16bit,
+ ADIS16209_YACCL_NULL);
+static IIO_CONST_ATTR(accel_scale, "0.24414");
+
+static IIO_DEV_ATTR_INCLI_X(adis16209_read_14bit_signed,
+ ADIS16209_XINCL_OUT);
+static IIO_DEV_ATTR_INCLI_Y(adis16209_read_14bit_signed,
+ ADIS16209_YINCL_OUT);
+static IIO_DEV_ATTR_INCLI_X_OFFSET(S_IWUSR | S_IRUGO,
+ adis16209_read_14bit_signed,
+ adis16209_write_16bit,
+ ADIS16209_XACCL_NULL);
+static IIO_DEV_ATTR_INCLI_Y_OFFSET(S_IWUSR | S_IRUGO,
+ adis16209_read_14bit_signed,
+ adis16209_write_16bit,
+ ADIS16209_YACCL_NULL);
+static IIO_CONST_ATTR(incli_scale, "0.025");
+
+static IIO_DEVICE_ATTR(rot_raw, S_IRUGO, adis16209_read_14bit_signed,
+ NULL, ADIS16209_ROT_OUT);
+
+static IIO_DEV_ATTR_TEMP(adis16209_read_temp);
+static IIO_CONST_ATTR(temp_offset, "25");
+static IIO_CONST_ATTR(temp_scale, "-0.47");
+
+static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, adis16209_write_reset, 0);
+
+static IIO_CONST_ATTR(name, "adis16209");
+
+static struct attribute *adis16209_event_attributes[] = {
+ NULL
+};
+
+static struct attribute_group adis16209_event_attribute_group = {
+ .attrs = adis16209_event_attributes,
+};
+
+static struct attribute *adis16209_attributes[] = {
+ &iio_dev_attr_in_supply_raw.dev_attr.attr,
+ &iio_const_attr_in_supply_scale.dev_attr.attr,
+ &iio_dev_attr_temp.dev_attr.attr,
+ &iio_const_attr_temp_offset.dev_attr.attr,
+ &iio_const_attr_temp_scale.dev_attr.attr,
+ &iio_dev_attr_reset.dev_attr.attr,
+ &iio_const_attr_name.dev_attr.attr,
+ &iio_dev_attr_in0_raw.dev_attr.attr,
+ &iio_const_attr_in0_scale.dev_attr.attr,
+ &iio_dev_attr_accel_x_raw.dev_attr.attr,
+ &iio_dev_attr_accel_y_raw.dev_attr.attr,
+ &iio_dev_attr_accel_x_offset.dev_attr.attr,
+ &iio_dev_attr_accel_y_offset.dev_attr.attr,
+ &iio_const_attr_accel_scale.dev_attr.attr,
+ &iio_dev_attr_incli_x_raw.dev_attr.attr,
+ &iio_dev_attr_incli_y_raw.dev_attr.attr,
+ &iio_dev_attr_incli_x_offset.dev_attr.attr,
+ &iio_dev_attr_incli_y_offset.dev_attr.attr,
+ &iio_const_attr_incli_scale.dev_attr.attr,
+ &iio_dev_attr_rot_raw.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group adis16209_attribute_group = {
+ .attrs = adis16209_attributes,
+};
+
+static int __devinit adis16209_probe(struct spi_device *spi)
+{
+ int ret, regdone = 0;
+ struct adis16209_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+ if (!st) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, st);
+
+ /* Allocate the comms buffers */
+ st->rx = kzalloc(sizeof(*st->rx)*ADIS16209_MAX_RX, GFP_KERNEL);
+ if (st->rx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->tx = kzalloc(sizeof(*st->tx)*ADIS16209_MAX_TX, GFP_KERNEL);
+ if (st->tx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_rx;
+ }
+ st->us = spi;
+ mutex_init(&st->buf_lock);
+ /* setup the industrialio driver allocated elements */
+ st->indio_dev = iio_allocate_device();
+ if (st->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_tx;
+ }
+
+ st->indio_dev->dev.parent = &spi->dev;
+ st->indio_dev->num_interrupt_lines = 1;
+ st->indio_dev->event_attrs = &adis16209_event_attribute_group;
+ st->indio_dev->attrs = &adis16209_attribute_group;
+ st->indio_dev->dev_data = (void *)(st);
+ st->indio_dev->driver_module = THIS_MODULE;
+ st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = adis16209_configure_ring(st->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ ret = iio_device_register(st->indio_dev);
+ if (ret)
+ goto error_unreg_ring_funcs;
+ regdone = 1;
+
+ ret = adis16209_initialize_ring(st->indio_dev->ring);
+ if (ret) {
+ printk(KERN_ERR "failed to initialize the ring\n");
+ goto error_unreg_ring_funcs;
+ }
+
+ if (spi->irq) {
+ ret = iio_register_interrupt_line(spi->irq,
+ st->indio_dev,
+ 0,
+ IRQF_TRIGGER_RISING,
+ "adis16209");
+ if (ret)
+ goto error_uninitialize_ring;
+
+ ret = adis16209_probe_trigger(st->indio_dev);
+ if (ret)
+ goto error_unregister_line;
+ }
+
+ /* Get the device into a sane initial state */
+ ret = adis16209_initial_setup(st);
+ if (ret)
+ goto error_remove_trigger;
+ return 0;
+
+error_remove_trigger:
+ adis16209_remove_trigger(st->indio_dev);
+error_unregister_line:
+ if (spi->irq)
+ iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+ adis16209_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+ adis16209_unconfigure_ring(st->indio_dev);
+error_free_dev:
+ if (regdone)
+ iio_device_unregister(st->indio_dev);
+ else
+ iio_free_device(st->indio_dev);
+error_free_tx:
+ kfree(st->tx);
+error_free_rx:
+ kfree(st->rx);
+error_free_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+static int adis16209_remove(struct spi_device *spi)
+{
+ struct adis16209_state *st = spi_get_drvdata(spi);
+ struct iio_dev *indio_dev = st->indio_dev;
+
+ flush_scheduled_work();
+
+ adis16209_remove_trigger(indio_dev);
+ if (spi->irq)
+ iio_unregister_interrupt_line(indio_dev, 0);
+
+ adis16209_uninitialize_ring(indio_dev->ring);
+ iio_device_unregister(indio_dev);
+ adis16209_unconfigure_ring(indio_dev);
+ kfree(st->tx);
+ kfree(st->rx);
+ kfree(st);
+
+ return 0;
+}
+
+static struct spi_driver adis16209_driver = {
+ .driver = {
+ .name = "adis16209",
+ .owner = THIS_MODULE,
+ },
+ .probe = adis16209_probe,
+ .remove = __devexit_p(adis16209_remove),
+};
+
+static __init int adis16209_init(void)
+{
+ return spi_register_driver(&adis16209_driver);
+}
+module_init(adis16209_init);
+
+static __exit void adis16209_exit(void)
+{
+ spi_unregister_driver(&adis16209_driver);
+}
+module_exit(adis16209_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16209 Digital Vibration Sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/accel/adis16209_ring.c b/drivers/staging/iio/accel/adis16209_ring.c
new file mode 100644
index 00000000000..533e2857491
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16209_ring.c
@@ -0,0 +1,266 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../ring_sw.h"
+#include "accel.h"
+#include "../trigger.h"
+#include "adis16209.h"
+
+/**
+ * combine_8_to_16() utility function to munge to u8s into u16
+ **/
+static inline u16 combine_8_to_16(u8 lower, u8 upper)
+{
+ u16 _lower = lower;
+ u16 _upper = upper;
+ return _lower | (_upper << 8);
+}
+
+static IIO_SCAN_EL_C(supply, ADIS16209_SCAN_SUPPLY, IIO_UNSIGNED(14),
+ ADIS16209_SUPPLY_OUT, NULL);
+static IIO_SCAN_EL_C(accel_x, ADIS16209_SCAN_ACC_X, IIO_SIGNED(14),
+ ADIS16209_XACCL_OUT, NULL);
+static IIO_SCAN_EL_C(accel_y, ADIS16209_SCAN_ACC_Y, IIO_SIGNED(14),
+ ADIS16209_YACCL_OUT, NULL);
+static IIO_SCAN_EL_C(aux_adc, ADIS16209_SCAN_AUX_ADC, IIO_UNSIGNED(12),
+ ADIS16209_AUX_ADC, NULL);
+static IIO_SCAN_EL_C(temp, ADIS16209_SCAN_TEMP, IIO_UNSIGNED(12),
+ ADIS16209_TEMP_OUT, NULL);
+static IIO_SCAN_EL_C(incli_x, ADIS16209_SCAN_INCLI_X, IIO_SIGNED(14),
+ ADIS16209_XINCL_OUT, NULL);
+static IIO_SCAN_EL_C(incli_y, ADIS16209_SCAN_INCLI_Y, IIO_SIGNED(14),
+ ADIS16209_YINCL_OUT, NULL);
+static IIO_SCAN_EL_C(rot, ADIS16209_SCAN_ROT, IIO_SIGNED(14),
+ ADIS16209_ROT_OUT, NULL);
+
+static IIO_SCAN_EL_TIMESTAMP(8);
+
+static struct attribute *adis16209_scan_el_attrs[] = {
+ &iio_scan_el_supply.dev_attr.attr,
+ &iio_scan_el_accel_x.dev_attr.attr,
+ &iio_scan_el_accel_y.dev_attr.attr,
+ &iio_scan_el_aux_adc.dev_attr.attr,
+ &iio_scan_el_temp.dev_attr.attr,
+ &iio_scan_el_incli_x.dev_attr.attr,
+ &iio_scan_el_incli_y.dev_attr.attr,
+ &iio_scan_el_rot.dev_attr.attr,
+ &iio_scan_el_timestamp.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group adis16209_scan_el_group = {
+ .attrs = adis16209_scan_el_attrs,
+ .name = "scan_elements",
+};
+
+/**
+ * adis16209_poll_func_th() top half interrupt handler called by trigger
+ * @private_data: iio_dev
+ **/
+static void adis16209_poll_func_th(struct iio_dev *indio_dev)
+{
+ struct adis16209_state *st = iio_dev_get_devdata(indio_dev);
+ st->last_timestamp = indio_dev->trig->timestamp;
+ schedule_work(&st->work_trigger_to_ring);
+}
+
+/**
+ * adis16209_read_ring_data() read data registers which will be placed into ring
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @rx: somewhere to pass back the value read
+ **/
+static int adis16209_read_ring_data(struct device *dev, u8 *rx)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16209_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfers[ADIS16209_OUTPUTS + 1];
+ int ret;
+ int i;
+
+ mutex_lock(&st->buf_lock);
+
+ spi_message_init(&msg);
+
+ memset(xfers, 0, sizeof(xfers));
+ for (i = 0; i <= ADIS16209_OUTPUTS; i++) {
+ xfers[i].bits_per_word = 8;
+ xfers[i].cs_change = 1;
+ xfers[i].len = 2;
+ xfers[i].delay_usecs = 20;
+ xfers[i].tx_buf = st->tx + 2 * i;
+ st->tx[2 * i]
+ = ADIS16209_READ_REG(ADIS16209_SUPPLY_OUT + 2 * i);
+ st->tx[2 * i + 1] = 0;
+ if (i >= 1)
+ xfers[i].rx_buf = rx + 2 * (i - 1);
+ spi_message_add_tail(&xfers[i], &msg);
+ }
+
+ ret = spi_sync(st->us, &msg);
+ if (ret)
+ dev_err(&st->us->dev, "problem when burst reading");
+
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
+ * specific to be rolled into the core.
+ */
+static void adis16209_trigger_bh_to_ring(struct work_struct *work_s)
+{
+ struct adis16209_state *st
+ = container_of(work_s, struct adis16209_state,
+ work_trigger_to_ring);
+
+ int i = 0;
+ s16 *data;
+ size_t datasize = st->indio_dev
+ ->ring->access.get_bpd(st->indio_dev->ring);
+
+ data = kmalloc(datasize , GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(&st->us->dev, "memory alloc failed in ring bh");
+ return;
+ }
+
+ if (st->indio_dev->scan_count)
+ if (adis16209_read_ring_data(&st->indio_dev->dev, st->rx) >= 0)
+ for (; i < st->indio_dev->scan_count; i++) {
+ data[i] = combine_8_to_16(st->rx[i*2+1],
+ st->rx[i*2]);
+ }
+
+ /* Guaranteed to be aligned with 8 byte boundary */
+ if (st->indio_dev->scan_timestamp)
+ *((s64 *)(data + ((i + 3)/4)*4)) = st->last_timestamp;
+
+ st->indio_dev->ring->access.store_to(st->indio_dev->ring,
+ (u8 *)data,
+ st->last_timestamp);
+
+ iio_trigger_notify_done(st->indio_dev->trig);
+ kfree(data);
+
+ return;
+}
+
+/* in these circumstances is it better to go with unaligned packing and
+ * deal with the cost?*/
+static int adis16209_data_rdy_ring_preenable(struct iio_dev *indio_dev)
+{
+ size_t size;
+ dev_dbg(&indio_dev->dev, "%s\n", __func__);
+ /* Check if there are any scan elements enabled, if not fail*/
+ if (!(indio_dev->scan_count || indio_dev->scan_timestamp))
+ return -EINVAL;
+
+ if (indio_dev->ring->access.set_bpd) {
+ if (indio_dev->scan_timestamp)
+ if (indio_dev->scan_count)
+ /* Timestamp (aligned to s64) and data */
+ size = (((indio_dev->scan_count * sizeof(s16))
+ + sizeof(s64) - 1)
+ & ~(sizeof(s64) - 1))
+ + sizeof(s64);
+ else /* Timestamp only */
+ size = sizeof(s64);
+ else /* Data only */
+ size = indio_dev->scan_count*sizeof(s16);
+ indio_dev->ring->access.set_bpd(indio_dev->ring, size);
+ }
+
+ return 0;
+}
+
+static int adis16209_data_rdy_ring_postenable(struct iio_dev *indio_dev)
+{
+ return indio_dev->trig
+ ? iio_trigger_attach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc)
+ : 0;
+}
+
+static int adis16209_data_rdy_ring_predisable(struct iio_dev *indio_dev)
+{
+ return indio_dev->trig
+ ? iio_trigger_dettach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc)
+ : 0;
+}
+
+void adis16209_unconfigure_ring(struct iio_dev *indio_dev)
+{
+ kfree(indio_dev->pollfunc);
+ iio_sw_rb_free(indio_dev->ring);
+}
+
+int adis16209_configure_ring(struct iio_dev *indio_dev)
+{
+ int ret = 0;
+ struct adis16209_state *st = indio_dev->dev_data;
+ struct iio_ring_buffer *ring;
+ INIT_WORK(&st->work_trigger_to_ring, adis16209_trigger_bh_to_ring);
+ /* Set default scan mode */
+
+ iio_scan_mask_set(indio_dev, iio_scan_el_supply.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_rot.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_accel_x.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_accel_y.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_temp.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_aux_adc.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_incli_x.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_incli_y.number);
+ indio_dev->scan_timestamp = true;
+
+ indio_dev->scan_el_attrs = &adis16209_scan_el_group;
+
+ ring = iio_sw_rb_allocate(indio_dev);
+ if (!ring) {
+ ret = -ENOMEM;
+ return ret;
+ }
+ indio_dev->ring = ring;
+ /* Effectively select the ring buffer implementation */
+ iio_ring_sw_register_funcs(&ring->access);
+ ring->preenable = &adis16209_data_rdy_ring_preenable;
+ ring->postenable = &adis16209_data_rdy_ring_postenable;
+ ring->predisable = &adis16209_data_rdy_ring_predisable;
+ ring->owner = THIS_MODULE;
+
+ indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL);
+ if (indio_dev->pollfunc == NULL) {
+ ret = -ENOMEM;
+ goto error_iio_sw_rb_free;;
+ }
+ indio_dev->pollfunc->poll_func_main = &adis16209_poll_func_th;
+ indio_dev->pollfunc->private_data = indio_dev;
+ indio_dev->modes |= INDIO_RING_TRIGGERED;
+ return 0;
+
+error_iio_sw_rb_free:
+ iio_sw_rb_free(indio_dev->ring);
+ return ret;
+}
+
+int adis16209_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return iio_ring_buffer_register(ring, 0);
+}
+
+void adis16209_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+ iio_ring_buffer_unregister(ring);
+}
diff --git a/drivers/staging/iio/accel/adis16209_trigger.c b/drivers/staging/iio/accel/adis16209_trigger.c
new file mode 100644
index 00000000000..4a0507c9a13
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16209_trigger.c
@@ -0,0 +1,124 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../trigger.h"
+#include "adis16209.h"
+
+/**
+ * adis16209_data_rdy_trig_poll() the event handler for the data rdy trig
+ **/
+static int adis16209_data_rdy_trig_poll(struct iio_dev *dev_info,
+ int index,
+ s64 timestamp,
+ int no_test)
+{
+ struct adis16209_state *st = iio_dev_get_devdata(dev_info);
+ struct iio_trigger *trig = st->trig;
+
+ trig->timestamp = timestamp;
+ iio_trigger_poll(trig);
+
+ return IRQ_HANDLED;
+}
+
+IIO_EVENT_SH(data_rdy_trig, &adis16209_data_rdy_trig_poll);
+
+static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
+
+static struct attribute *adis16209_trigger_attrs[] = {
+ &dev_attr_name.attr,
+ NULL,
+};
+
+static const struct attribute_group adis16209_trigger_attr_group = {
+ .attrs = adis16209_trigger_attrs,
+};
+
+/**
+ * adis16209_data_rdy_trigger_set_state() set datardy interrupt state
+ **/
+static int adis16209_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct adis16209_state *st = trig->private_data;
+ struct iio_dev *indio_dev = st->indio_dev;
+ int ret = 0;
+
+ dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state);
+ ret = adis16209_set_irq(&st->indio_dev->dev, state);
+ if (state == false) {
+ iio_remove_event_from_list(&iio_event_data_rdy_trig,
+ &indio_dev->interrupts[0]
+ ->ev_list);
+ flush_scheduled_work();
+ } else {
+ iio_add_event_to_list(&iio_event_data_rdy_trig,
+ &indio_dev->interrupts[0]->ev_list);
+ }
+ return ret;
+}
+
+/**
+ * adis16209_trig_try_reen() try renabling irq for data rdy trigger
+ * @trig: the datardy trigger
+ **/
+static int adis16209_trig_try_reen(struct iio_trigger *trig)
+{
+ struct adis16209_state *st = trig->private_data;
+ enable_irq(st->us->irq);
+ return 0;
+}
+
+int adis16209_probe_trigger(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct adis16209_state *st = indio_dev->dev_data;
+
+ st->trig = iio_allocate_trigger();
+ st->trig->name = kmalloc(IIO_TRIGGER_NAME_LENGTH, GFP_KERNEL);
+ if (!st->trig->name) {
+ ret = -ENOMEM;
+ goto error_free_trig;
+ }
+ snprintf((char *)st->trig->name,
+ IIO_TRIGGER_NAME_LENGTH,
+ "adis16209-dev%d", indio_dev->id);
+ st->trig->dev.parent = &st->us->dev;
+ st->trig->owner = THIS_MODULE;
+ st->trig->private_data = st;
+ st->trig->set_trigger_state = &adis16209_data_rdy_trigger_set_state;
+ st->trig->try_reenable = &adis16209_trig_try_reen;
+ st->trig->control_attrs = &adis16209_trigger_attr_group;
+ ret = iio_trigger_register(st->trig);
+
+ /* select default trigger */
+ indio_dev->trig = st->trig;
+ if (ret)
+ goto error_free_trig_name;
+
+ return 0;
+
+error_free_trig_name:
+ kfree(st->trig->name);
+error_free_trig:
+ iio_free_trigger(st->trig);
+
+ return ret;
+}
+
+void adis16209_remove_trigger(struct iio_dev *indio_dev)
+{
+ struct adis16209_state *state = indio_dev->dev_data;
+
+ iio_trigger_unregister(state->trig);
+ kfree(state->trig->name);
+ iio_free_trigger(state->trig);
+}
diff --git a/drivers/staging/iio/accel/adis16220.h b/drivers/staging/iio/accel/adis16220.h
new file mode 100644
index 00000000000..2abf4850b37
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16220.h
@@ -0,0 +1,147 @@
+#ifndef SPI_ADIS16220_H_
+#define SPI_ADIS16220_H_
+
+#define ADIS16220_STARTUP_DELAY 220 /* ms */
+
+#define ADIS16220_READ_REG(a) a
+#define ADIS16220_WRITE_REG(a) ((a) | 0x80)
+
+/* Flash memory write count */
+#define ADIS16220_FLASH_CNT 0x00
+/* Control, acceleration offset adjustment control */
+#define ADIS16220_ACCL_NULL 0x02
+/* Control, AIN1 offset adjustment control */
+#define ADIS16220_AIN1_NULL 0x04
+/* Control, AIN2 offset adjustment control */
+#define ADIS16220_AIN2_NULL 0x06
+/* Output, power supply during capture */
+#define ADIS16220_CAPT_SUPPLY 0x0A
+/* Output, temperature during capture */
+#define ADIS16220_CAPT_TEMP 0x0C
+/* Output, peak acceleration during capture */
+#define ADIS16220_CAPT_PEAKA 0x0E
+/* Output, peak AIN1 level during capture */
+#define ADIS16220_CAPT_PEAK1 0x10
+/* Output, peak AIN2 level during capture */
+#define ADIS16220_CAPT_PEAK2 0x12
+/* Output, capture buffer for acceleration */
+#define ADIS16220_CAPT_BUFA 0x14
+/* Output, capture buffer for AIN1 */
+#define ADIS16220_CAPT_BUF1 0x16
+/* Output, capture buffer for AIN2 */
+#define ADIS16220_CAPT_BUF2 0x18
+/* Control, capture buffer address pointer */
+#define ADIS16220_CAPT_PNTR 0x1A
+/* Control, capture control register */
+#define ADIS16220_CAPT_CTRL 0x1C
+/* Control, capture period (automatic mode) */
+#define ADIS16220_CAPT_PRD 0x1E
+/* Control, Alarm A, acceleration peak threshold */
+#define ADIS16220_ALM_MAGA 0x20
+/* Control, Alarm 1, AIN1 peak threshold */
+#define ADIS16220_ALM_MAG1 0x22
+/* Control, Alarm 2, AIN2 peak threshold */
+#define ADIS16220_ALM_MAG2 0x24
+/* Control, Alarm S, peak threshold */
+#define ADIS16220_ALM_MAGS 0x26
+/* Control, alarm configuration register */
+#define ADIS16220_ALM_CTRL 0x28
+/* Control, general I/O configuration */
+#define ADIS16220_GPIO_CTRL 0x32
+/* Control, self-test control, AIN configuration */
+#define ADIS16220_MSC_CTRL 0x34
+/* Control, digital I/O configuration */
+#define ADIS16220_DIO_CTRL 0x36
+/* Control, filter configuration */
+#define ADIS16220_AVG_CNT 0x38
+/* Status, system status */
+#define ADIS16220_DIAG_STAT 0x3C
+/* Control, system commands */
+#define ADIS16220_GLOB_CMD 0x3E
+/* Status, self-test response */
+#define ADIS16220_ST_DELTA 0x40
+/* Lot Identification Code 1 */
+#define ADIS16220_LOT_ID1 0x52
+/* Lot Identification Code 2 */
+#define ADIS16220_LOT_ID2 0x54
+/* Product identifier; convert to decimal = 16220 */
+#define ADIS16220_PROD_ID 0x56
+/* Serial number */
+#define ADIS16220_SERIAL_NUM 0x58
+
+#define ADIS16220_CAPTURE_SIZE 2048
+
+/* MSC_CTRL */
+#define ADIS16220_MSC_CTRL_SELF_TEST_EN (1 << 8)
+#define ADIS16220_MSC_CTRL_POWER_SUP_COM_AIN1 (1 << 1)
+#define ADIS16220_MSC_CTRL_POWER_SUP_COM_AIN2 (1 << 0)
+
+/* DIO_CTRL */
+#define ADIS16220_MSC_CTRL_DIO2_BUSY_IND (3<<4)
+#define ADIS16220_MSC_CTRL_DIO1_BUSY_IND (3<<2)
+#define ADIS16220_MSC_CTRL_DIO2_ACT_HIGH (1<<1)
+#define ADIS16220_MSC_CTRL_DIO1_ACT_HIGH (1<<0)
+
+/* DIAG_STAT */
+/* AIN2 sample > ALM_MAG2 */
+#define ADIS16220_DIAG_STAT_ALM_MAG2 (1<<14)
+/* AIN1 sample > ALM_MAG1 */
+#define ADIS16220_DIAG_STAT_ALM_MAG1 (1<<13)
+/* Acceleration sample > ALM_MAGA */
+#define ADIS16220_DIAG_STAT_ALM_MAGA (1<<12)
+/* Error condition programmed into ALM_MAGS[11:0] and ALM_CTRL[5:4] is true */
+#define ADIS16220_DIAG_STAT_ALM_MAGS (1<<11)
+/* |Peak value in AIN2 data capture| > ALM_MAG2 */
+#define ADIS16220_DIAG_STAT_PEAK_AIN2 (1<<10)
+/* |Peak value in AIN1 data capture| > ALM_MAG1 */
+#define ADIS16220_DIAG_STAT_PEAK_AIN1 (1<<9)
+/* |Peak value in acceleration data capture| > ALM_MAGA */
+#define ADIS16220_DIAG_STAT_PEAK_ACCEL (1<<8)
+/* Data ready, capture complete */
+#define ADIS16220_DIAG_STAT_DATA_RDY (1<<7)
+#define ADIS16220_DIAG_STAT_FLASH_CHK (1<<6)
+#define ADIS16220_DIAG_STAT_SELF_TEST (1<<5)
+/* Capture period violation/interruption */
+#define ADIS16220_DIAG_STAT_VIOLATION (1<<4)
+/* SPI communications failure */
+#define ADIS16220_DIAG_STAT_SPI_FAIL (1<<3)
+/* Flash update failure */
+#define ADIS16220_DIAG_STAT_FLASH_UPT (1<<2)
+/* Power supply above 3.625 V */
+#define ADIS16220_DIAG_STAT_POWER_HIGH (1<<1)
+/* Power supply below 3.15 V */
+#define ADIS16220_DIAG_STAT_POWER_LOW (1<<0)
+
+/* GLOB_CMD */
+#define ADIS16220_GLOB_CMD_SW_RESET (1<<7)
+#define ADIS16220_GLOB_CMD_SELF_TEST (1<<2)
+#define ADIS16220_GLOB_CMD_PWR_DOWN (1<<1)
+
+#define ADIS16220_MAX_TX 2048
+#define ADIS16220_MAX_RX 2048
+
+#define ADIS16220_SPI_BURST (u32)(1000 * 1000)
+#define ADIS16220_SPI_FAST (u32)(2000 * 1000)
+
+/**
+ * struct adis16220_state - device instance specific data
+ * @us: actual spi_device
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @work_cont_thresh: CLEAN
+ * @inter: used to check if new interrupt has been triggered
+ * @last_timestamp: passing timestamp from th to bh of interrupt handler
+ * @indio_dev: industrial I/O device structure
+ * @trig: data ready trigger registered with iio
+ * @tx: transmit buffer
+ * @rx: recieve buffer
+ * @buf_lock: mutex to protect tx and rx
+ **/
+struct adis16220_state {
+ struct spi_device *us;
+ struct iio_dev *indio_dev;
+ u8 *tx;
+ u8 *rx;
+ struct mutex buf_lock;
+};
+
+#endif /* SPI_ADIS16220_H_ */
diff --git a/drivers/staging/iio/accel/adis16220_core.c b/drivers/staging/iio/accel/adis16220_core.c
new file mode 100644
index 00000000000..6de439fd167
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16220_core.c
@@ -0,0 +1,670 @@
+/*
+ * ADIS16220 Programmable Digital Vibration Sensor driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "accel.h"
+#include "../adc/adc.h"
+
+#include "adis16220.h"
+
+#define DRIVER_NAME "adis16220"
+
+/**
+ * adis16220_spi_write_reg_8() - write single byte to a register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the register to be written
+ * @val: the value to write
+ **/
+static int adis16220_spi_write_reg_8(struct device *dev,
+ u8 reg_address,
+ u8 val)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16220_state *st = iio_dev_get_devdata(indio_dev);
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16220_WRITE_REG(reg_address);
+ st->tx[1] = val;
+
+ ret = spi_write(st->us, st->tx, 2);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/**
+ * adis16220_spi_write_reg_16() - write 2 bytes to a pair of registers
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ * is assumed to have address one greater.
+ * @val: value to be written
+ **/
+static int adis16220_spi_write_reg_16(struct device *dev,
+ u8 lower_reg_address,
+ u16 value)
+{
+ int ret;
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16220_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = 25,
+ }, {
+ .tx_buf = st->tx + 2,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = 25,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16220_WRITE_REG(lower_reg_address);
+ st->tx[1] = value & 0xFF;
+ st->tx[2] = ADIS16220_WRITE_REG(lower_reg_address + 1);
+ st->tx[3] = (value >> 8) & 0xFF;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/**
+ * adis16220_spi_read_reg_16() - read 2 bytes from a 16-bit register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ * is assumed to have address one greater.
+ * @val: somewhere to pass back the value read
+ **/
+static int adis16220_spi_read_reg_16(struct device *dev,
+ u8 lower_reg_address,
+ u16 *val)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16220_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = 25,
+ }, {
+ .rx_buf = st->rx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = 25,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16220_READ_REG(lower_reg_address);
+ st->tx[1] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret) {
+ dev_err(&st->us->dev,
+ "problem when reading 16 bit register 0x%02X",
+ lower_reg_address);
+ goto error_ret;
+ }
+ *val = (st->rx[0] << 8) | st->rx[1];
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static ssize_t adis16220_spi_read_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf,
+ unsigned bits)
+{
+ int ret;
+ s16 val = 0;
+ unsigned shift = 16 - bits;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = adis16220_spi_read_reg_16(dev, this_attr->address, (u16 *)&val);
+ if (ret)
+ return ret;
+
+ val = ((s16)(val << shift) >> shift);
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t adis16220_read_12bit_unsigned(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u16 val = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = adis16220_spi_read_reg_16(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%u\n", val & 0x0FFF);
+}
+
+static ssize_t adis16220_read_16bit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ /* Take the iio_dev status lock */
+ mutex_lock(&indio_dev->mlock);
+ ret = adis16220_spi_read_signed(dev, attr, buf, 16);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static ssize_t adis16220_write_16bit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+ ret = adis16220_spi_write_reg_16(dev, this_attr->address, val);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+static int adis16220_capture(struct device *dev)
+{
+ int ret;
+ ret = adis16220_spi_write_reg_16(dev,
+ ADIS16220_GLOB_CMD,
+ 0xBF08); /* initiates a manual data capture */
+ if (ret)
+ dev_err(dev, "problem beginning capture");
+
+ msleep(10); /* delay for capture to finish */
+
+ return ret;
+}
+
+static int adis16220_reset(struct device *dev)
+{
+ int ret;
+ ret = adis16220_spi_write_reg_8(dev,
+ ADIS16220_GLOB_CMD,
+ ADIS16220_GLOB_CMD_SW_RESET);
+ if (ret)
+ dev_err(dev, "problem resetting device");
+
+ return ret;
+}
+
+static ssize_t adis16220_write_reset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ if (len < 1)
+ return -1;
+ switch (buf[0]) {
+ case '1':
+ case 'y':
+ case 'Y':
+ return adis16220_reset(dev) == 0 ? len : -EIO;
+ }
+ return -1;
+}
+
+static ssize_t adis16220_write_capture(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ if (len < 1)
+ return -1;
+ switch (buf[0]) {
+ case '1':
+ case 'y':
+ case 'Y':
+ return adis16220_capture(dev) == 0 ? len : -EIO;
+ }
+ return -1;
+}
+
+static int adis16220_check_status(struct device *dev)
+{
+ u16 status;
+ int ret;
+
+ ret = adis16220_spi_read_reg_16(dev, ADIS16220_DIAG_STAT, &status);
+
+ if (ret < 0) {
+ dev_err(dev, "Reading status failed\n");
+ goto error_ret;
+ }
+ ret = status & 0x7F;
+
+ if (status & ADIS16220_DIAG_STAT_VIOLATION)
+ dev_err(dev, "Capture period violation/interruption\n");
+ if (status & ADIS16220_DIAG_STAT_SPI_FAIL)
+ dev_err(dev, "SPI failure\n");
+ if (status & ADIS16220_DIAG_STAT_FLASH_UPT)
+ dev_err(dev, "Flash update failed\n");
+ if (status & ADIS16220_DIAG_STAT_POWER_HIGH)
+ dev_err(dev, "Power supply above 5.25V\n");
+ if (status & ADIS16220_DIAG_STAT_POWER_LOW)
+ dev_err(dev, "Power supply below 4.75V\n");
+
+error_ret:
+ return ret;
+}
+
+static int adis16220_self_test(struct device *dev)
+{
+ int ret;
+ ret = adis16220_spi_write_reg_16(dev,
+ ADIS16220_MSC_CTRL,
+ ADIS16220_MSC_CTRL_SELF_TEST_EN);
+ if (ret) {
+ dev_err(dev, "problem starting self test");
+ goto err_ret;
+ }
+
+ adis16220_check_status(dev);
+
+err_ret:
+ return ret;
+}
+
+static int adis16220_initial_setup(struct adis16220_state *st)
+{
+ int ret;
+ struct device *dev = &st->indio_dev->dev;
+
+ /* Do self test */
+ ret = adis16220_self_test(dev);
+ if (ret) {
+ dev_err(dev, "self test failure");
+ goto err_ret;
+ }
+
+ /* Read status register to check the result */
+ ret = adis16220_check_status(dev);
+ if (ret) {
+ adis16220_reset(dev);
+ dev_err(dev, "device not playing ball -> reset");
+ msleep(ADIS16220_STARTUP_DELAY);
+ ret = adis16220_check_status(dev);
+ if (ret) {
+ dev_err(dev, "giving up");
+ goto err_ret;
+ }
+ }
+
+ printk(KERN_INFO DRIVER_NAME ": at CS%d (irq %d)\n",
+ st->us->chip_select, st->us->irq);
+
+err_ret:
+ return ret;
+}
+
+static ssize_t adis16220_capture_buffer_read(struct adis16220_state *st,
+ char *buf,
+ loff_t off,
+ size_t count,
+ int addr)
+{
+ struct spi_message msg;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = 25,
+ }, {
+ .tx_buf = st->tx,
+ .rx_buf = st->rx,
+ .bits_per_word = 8,
+ .cs_change = 1,
+ .delay_usecs = 25,
+ },
+ };
+ int ret;
+ int i;
+
+ if (unlikely(!count))
+ return count;
+
+ if ((off >= ADIS16220_CAPTURE_SIZE) || (count & 1) || (off & 1))
+ return -EINVAL;
+
+ if (off + count > ADIS16220_CAPTURE_SIZE)
+ count = ADIS16220_CAPTURE_SIZE - off;
+
+ /* write the begin position of capture buffer */
+ ret = adis16220_spi_write_reg_16(&st->indio_dev->dev,
+ ADIS16220_CAPT_PNTR,
+ off > 1);
+ if (ret)
+ return -EIO;
+
+ /* read count/2 values from capture buffer */
+ mutex_lock(&st->buf_lock);
+
+ for (i = 0; i < count; i += 2) {
+ st->tx[i] = ADIS16220_READ_REG(addr);
+ st->tx[i + 1] = 0;
+ }
+ xfers[1].len = count;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret) {
+
+ mutex_unlock(&st->buf_lock);
+ return -EIO;
+ }
+
+ memcpy(buf, st->rx, count);
+
+ mutex_unlock(&st->buf_lock);
+ return count;
+}
+
+static ssize_t adis16220_accel_bin_read(struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf,
+ loff_t off,
+ size_t count)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16220_state *st = iio_dev_get_devdata(indio_dev);
+
+ return adis16220_capture_buffer_read(st, buf,
+ off, count,
+ ADIS16220_CAPT_BUFA);
+}
+
+static struct bin_attribute accel_bin = {
+ .attr = {
+ .name = "accel_bin",
+ .mode = S_IRUGO,
+ },
+ .read = adis16220_accel_bin_read,
+ .size = ADIS16220_CAPTURE_SIZE,
+};
+
+static ssize_t adis16220_adc1_bin_read(struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16220_state *st = iio_dev_get_devdata(indio_dev);
+
+ return adis16220_capture_buffer_read(st, buf,
+ off, count,
+ ADIS16220_CAPT_BUF1);
+}
+
+static struct bin_attribute adc1_bin = {
+ .attr = {
+ .name = "in0_bin",
+ .mode = S_IRUGO,
+ },
+ .read = adis16220_adc1_bin_read,
+ .size = ADIS16220_CAPTURE_SIZE,
+};
+
+static ssize_t adis16220_adc2_bin_read(struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16220_state *st = iio_dev_get_devdata(indio_dev);
+
+ return adis16220_capture_buffer_read(st, buf,
+ off, count,
+ ADIS16220_CAPT_BUF2);
+}
+
+
+static struct bin_attribute adc2_bin = {
+ .attr = {
+ .name = "in1_bin",
+ .mode = S_IRUGO,
+ },
+ .read = adis16220_adc2_bin_read,
+ .size = ADIS16220_CAPTURE_SIZE,
+};
+
+static IIO_DEV_ATTR_IN_NAMED_RAW(supply, adis16220_read_12bit_unsigned,
+ ADIS16220_CAPT_SUPPLY);
+static IIO_CONST_ATTR(in_supply_scale, "0.0012207");
+static IIO_DEV_ATTR_ACCEL(adis16220_read_16bit, ADIS16220_CAPT_BUFA);
+static IIO_DEVICE_ATTR(accel_peak_raw, S_IRUGO, adis16220_read_16bit,
+ NULL, ADIS16220_CAPT_PEAKA);
+static IIO_DEV_ATTR_ACCEL_OFFSET(S_IWUSR | S_IRUGO,
+ adis16220_read_16bit,
+ adis16220_write_16bit,
+ ADIS16220_ACCL_NULL);
+static IIO_DEV_ATTR_TEMP_RAW(adis16220_read_12bit_unsigned);
+static IIO_CONST_ATTR(temp_offset, "25");
+static IIO_CONST_ATTR(temp_scale, "-0.47");
+
+static IIO_DEV_ATTR_IN_RAW(0, adis16220_read_16bit, ADIS16220_CAPT_BUF1);
+static IIO_DEV_ATTR_IN_RAW(1, adis16220_read_16bit, ADIS16220_CAPT_BUF2);
+
+static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL,
+ adis16220_write_reset, 0);
+
+#define IIO_DEV_ATTR_CAPTURE(_store) \
+ IIO_DEVICE_ATTR(capture, S_IWUGO, NULL, _store, 0)
+
+static IIO_DEV_ATTR_CAPTURE(adis16220_write_capture);
+
+#define IIO_DEV_ATTR_CAPTURE_COUNT(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(capture_count, _mode, _show, _store, _addr)
+
+static IIO_DEV_ATTR_CAPTURE_COUNT(S_IWUSR | S_IRUGO,
+ adis16220_read_16bit,
+ adis16220_write_16bit,
+ ADIS16220_CAPT_PNTR);
+
+static IIO_CONST_ATTR_AVAIL_SAMP_FREQ("100200");
+
+static IIO_CONST_ATTR(name, "adis16220");
+
+static struct attribute *adis16220_attributes[] = {
+ &iio_dev_attr_in_supply_raw.dev_attr.attr,
+ &iio_const_attr_in_supply_scale.dev_attr.attr,
+ &iio_dev_attr_accel_raw.dev_attr.attr,
+ &iio_dev_attr_accel_offset.dev_attr.attr,
+ &iio_dev_attr_accel_peak_raw.dev_attr.attr,
+ &iio_dev_attr_temp_raw.dev_attr.attr,
+ &iio_dev_attr_in0_raw.dev_attr.attr,
+ &iio_dev_attr_in1_raw.dev_attr.attr,
+ &iio_const_attr_temp_offset.dev_attr.attr,
+ &iio_const_attr_temp_scale.dev_attr.attr,
+ &iio_const_attr_available_sampling_frequency.dev_attr.attr,
+ &iio_dev_attr_reset.dev_attr.attr,
+ &iio_dev_attr_capture.dev_attr.attr,
+ &iio_dev_attr_capture_count.dev_attr.attr,
+ &iio_const_attr_name.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group adis16220_attribute_group = {
+ .attrs = adis16220_attributes,
+};
+
+static int __devinit adis16220_probe(struct spi_device *spi)
+{
+ int ret, regdone = 0;
+ struct adis16220_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+ if (!st) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, st);
+
+ /* Allocate the comms buffers */
+ st->rx = kzalloc(sizeof(*st->rx)*ADIS16220_MAX_RX, GFP_KERNEL);
+ if (st->rx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->tx = kzalloc(sizeof(*st->tx)*ADIS16220_MAX_TX, GFP_KERNEL);
+ if (st->tx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_rx;
+ }
+ st->us = spi;
+ mutex_init(&st->buf_lock);
+ /* setup the industrialio driver allocated elements */
+ st->indio_dev = iio_allocate_device();
+ if (st->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_tx;
+ }
+
+ st->indio_dev->dev.parent = &spi->dev;
+ st->indio_dev->attrs = &adis16220_attribute_group;
+ st->indio_dev->dev_data = (void *)(st);
+ st->indio_dev->driver_module = THIS_MODULE;
+ st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_device_register(st->indio_dev);
+ if (ret)
+ goto error_free_dev;
+ regdone = 1;
+
+ ret = sysfs_create_bin_file(&st->indio_dev->dev.kobj, &accel_bin);
+ if (ret)
+ goto error_free_dev;
+
+ ret = sysfs_create_bin_file(&st->indio_dev->dev.kobj, &adc1_bin);
+ if (ret)
+ goto error_rm_accel_bin;
+
+ ret = sysfs_create_bin_file(&st->indio_dev->dev.kobj, &adc2_bin);
+ if (ret)
+ goto error_rm_adc1_bin;
+
+ /* Get the device into a sane initial state */
+ ret = adis16220_initial_setup(st);
+ if (ret)
+ goto error_rm_adc2_bin;
+ return 0;
+
+error_rm_adc2_bin:
+ sysfs_remove_bin_file(&st->indio_dev->dev.kobj, &adc2_bin);
+error_rm_adc1_bin:
+ sysfs_remove_bin_file(&st->indio_dev->dev.kobj, &adc1_bin);
+error_rm_accel_bin:
+ sysfs_remove_bin_file(&st->indio_dev->dev.kobj, &accel_bin);
+error_free_dev:
+ if (regdone)
+ iio_device_unregister(st->indio_dev);
+ else
+ iio_free_device(st->indio_dev);
+error_free_tx:
+ kfree(st->tx);
+error_free_rx:
+ kfree(st->rx);
+error_free_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+static int adis16220_remove(struct spi_device *spi)
+{
+ struct adis16220_state *st = spi_get_drvdata(spi);
+ struct iio_dev *indio_dev = st->indio_dev;
+
+ flush_scheduled_work();
+
+ sysfs_remove_bin_file(&st->indio_dev->dev.kobj, &adc2_bin);
+ sysfs_remove_bin_file(&st->indio_dev->dev.kobj, &adc1_bin);
+ sysfs_remove_bin_file(&st->indio_dev->dev.kobj, &accel_bin);
+ iio_device_unregister(indio_dev);
+ kfree(st->tx);
+ kfree(st->rx);
+ kfree(st);
+
+ return 0;
+}
+
+static struct spi_driver adis16220_driver = {
+ .driver = {
+ .name = "adis16220",
+ .owner = THIS_MODULE,
+ },
+ .probe = adis16220_probe,
+ .remove = __devexit_p(adis16220_remove),
+};
+
+static __init int adis16220_init(void)
+{
+ return spi_register_driver(&adis16220_driver);
+}
+module_init(adis16220_init);
+
+static __exit void adis16220_exit(void)
+{
+ spi_unregister_driver(&adis16220_driver);
+}
+module_exit(adis16220_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16220 Digital Vibration Sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/accel/adis16240.h b/drivers/staging/iio/accel/adis16240.h
new file mode 100644
index 00000000000..dcff43c7523
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16240.h
@@ -0,0 +1,218 @@
+#ifndef SPI_ADIS16240_H_
+#define SPI_ADIS16240_H_
+
+#define ADIS16240_STARTUP_DELAY 220 /* ms */
+
+#define ADIS16240_READ_REG(a) a
+#define ADIS16240_WRITE_REG(a) ((a) | 0x80)
+
+/* Flash memory write count */
+#define ADIS16240_FLASH_CNT 0x00
+/* Output, power supply */
+#define ADIS16240_SUPPLY_OUT 0x02
+/* Output, x-axis accelerometer */
+#define ADIS16240_XACCL_OUT 0x04
+/* Output, y-axis accelerometer */
+#define ADIS16240_YACCL_OUT 0x06
+/* Output, z-axis accelerometer */
+#define ADIS16240_ZACCL_OUT 0x08
+/* Output, auxiliary ADC input */
+#define ADIS16240_AUX_ADC 0x0A
+/* Output, temperature */
+#define ADIS16240_TEMP_OUT 0x0C
+/* Output, x-axis acceleration peak */
+#define ADIS16240_XPEAK_OUT 0x0E
+/* Output, y-axis acceleration peak */
+#define ADIS16240_YPEAK_OUT 0x10
+/* Output, z-axis acceleration peak */
+#define ADIS16240_ZPEAK_OUT 0x12
+/* Output, sum-of-squares acceleration peak */
+#define ADIS16240_XYZPEAK_OUT 0x14
+/* Output, Capture Buffer 1, X and Y acceleration */
+#define ADIS16240_CAPT_BUF1 0x16
+/* Output, Capture Buffer 2, Z acceleration */
+#define ADIS16240_CAPT_BUF2 0x18
+/* Diagnostic, error flags */
+#define ADIS16240_DIAG_STAT 0x1A
+/* Diagnostic, event counter */
+#define ADIS16240_EVNT_CNTR 0x1C
+/* Diagnostic, check sum value from firmware test */
+#define ADIS16240_CHK_SUM 0x1E
+/* Calibration, x-axis acceleration offset adjustment */
+#define ADIS16240_XACCL_OFF 0x20
+/* Calibration, y-axis acceleration offset adjustment */
+#define ADIS16240_YACCL_OFF 0x22
+/* Calibration, z-axis acceleration offset adjustment */
+#define ADIS16240_ZACCL_OFF 0x24
+/* Clock, hour and minute */
+#define ADIS16240_CLK_TIME 0x2E
+/* Clock, month and day */
+#define ADIS16240_CLK_DATE 0x30
+/* Clock, year */
+#define ADIS16240_CLK_YEAR 0x32
+/* Wake-up setting, hour and minute */
+#define ADIS16240_WAKE_TIME 0x34
+/* Wake-up setting, month and day */
+#define ADIS16240_WAKE_DATE 0x36
+/* Alarm 1 amplitude threshold */
+#define ADIS16240_ALM_MAG1 0x38
+/* Alarm 2 amplitude threshold */
+#define ADIS16240_ALM_MAG2 0x3A
+/* Alarm control */
+#define ADIS16240_ALM_CTRL 0x3C
+/* Capture, external trigger control */
+#define ADIS16240_XTRIG_CTRL 0x3E
+/* Capture, address pointer */
+#define ADIS16240_CAPT_PNTR 0x40
+/* Capture, configuration and control */
+#define ADIS16240_CAPT_CTRL 0x42
+/* General-purpose digital input/output control */
+#define ADIS16240_GPIO_CTRL 0x44
+/* Miscellaneous control */
+#define ADIS16240_MSC_CTRL 0x46
+/* Internal sample period (rate) control */
+#define ADIS16240_SMPL_PRD 0x48
+/* System command */
+#define ADIS16240_GLOB_CMD 0x4A
+
+#define ADIS16240_OUTPUTS 6
+
+/* MSC_CTRL */
+/* Enables sum-of-squares output (XYZPEAK_OUT) */
+#define ADIS16240_MSC_CTRL_XYZPEAK_OUT_EN (1 << 15)
+/* Enables peak tracking output (XPEAK_OUT, YPEAK_OUT, and ZPEAK_OUT) */
+#define ADIS16240_MSC_CTRL_X_Y_ZPEAK_OUT_EN (1 << 14)
+/* Self-test enable: 1 = apply electrostatic force, 0 = disabled */
+#define ADIS16240_MSC_CTRL_SELF_TEST_EN (1 << 8)
+/* Data-ready enable: 1 = enabled, 0 = disabled */
+#define ADIS16240_MSC_CTRL_DATA_RDY_EN (1 << 2)
+/* Data-ready polarity: 1 = active high, 0 = active low */
+#define ADIS16240_MSC_CTRL_ACTIVE_HIGH (1 << 1)
+/* Data-ready line selection: 1 = DIO2, 0 = DIO1 */
+#define ADIS16240_MSC_CTRL_DATA_RDY_DIO2 (1 << 0)
+
+/* DIAG_STAT */
+/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16240_DIAG_STAT_ALARM2 (1<<9)
+/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16240_DIAG_STAT_ALARM1 (1<<8)
+/* Capture buffer full: 1 = capture buffer is full */
+#define ADIS16240_DIAG_STAT_CPT_BUF_FUL (1<<7)
+/* Flash test, checksum flag: 1 = mismatch, 0 = match */
+#define ADIS16240_DIAG_STAT_CHKSUM (1<<6)
+/* Power-on, self-test flag: 1 = failure, 0 = pass */
+#define ADIS16240_DIAG_STAT_PWRON_FAIL (1<<5)
+/* Power-on self-test: 1 = in-progress, 0 = complete */
+#define ADIS16240_DIAG_STAT_PWRON_BUSY (1<<4)
+/* SPI communications failure */
+#define ADIS16240_DIAG_STAT_SPI_FAIL (1<<3)
+/* Flash update failure */
+#define ADIS16240_DIAG_STAT_FLASH_UPT (1<<2)
+/* Power supply above 3.625 V */
+#define ADIS16240_DIAG_STAT_POWER_HIGH (1<<1)
+ /* Power supply below 3.15 V */
+#define ADIS16240_DIAG_STAT_POWER_LOW (1<<0)
+
+/* GLOB_CMD */
+#define ADIS16240_GLOB_CMD_RESUME (1<<8)
+#define ADIS16240_GLOB_CMD_SW_RESET (1<<7)
+#define ADIS16240_GLOB_CMD_STANDBY (1<<2)
+
+#define ADIS16240_ERROR_ACTIVE (1<<14)
+
+#define ADIS16240_MAX_TX 24
+#define ADIS16240_MAX_RX 24
+
+/**
+ * struct adis16240_state - device instance specific data
+ * @us: actual spi_device
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @work_cont_thresh: CLEAN
+ * @inter: used to check if new interrupt has been triggered
+ * @last_timestamp: passing timestamp from th to bh of interrupt handler
+ * @indio_dev: industrial I/O device structure
+ * @trig: data ready trigger registered with iio
+ * @tx: transmit buffer
+ * @rx: recieve buffer
+ * @buf_lock: mutex to protect tx and rx
+ **/
+struct adis16240_state {
+ struct spi_device *us;
+ struct work_struct work_trigger_to_ring;
+ struct iio_work_cont work_cont_thresh;
+ s64 last_timestamp;
+ struct iio_dev *indio_dev;
+ struct iio_trigger *trig;
+ u8 *tx;
+ u8 *rx;
+ struct mutex buf_lock;
+};
+
+int adis16240_set_irq(struct device *dev, bool enable);
+
+#ifdef CONFIG_IIO_RING_BUFFER
+/* At the moment triggers are only used for ring buffer
+ * filling. This may change!
+ */
+
+enum adis16240_scan {
+ ADIS16240_SCAN_SUPPLY,
+ ADIS16240_SCAN_ACC_X,
+ ADIS16240_SCAN_ACC_Y,
+ ADIS16240_SCAN_ACC_Z,
+ ADIS16240_SCAN_AUX_ADC,
+ ADIS16240_SCAN_TEMP,
+};
+
+void adis16240_remove_trigger(struct iio_dev *indio_dev);
+int adis16240_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t adis16240_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+
+int adis16240_configure_ring(struct iio_dev *indio_dev);
+void adis16240_unconfigure_ring(struct iio_dev *indio_dev);
+
+int adis16240_initialize_ring(struct iio_ring_buffer *ring);
+void adis16240_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void adis16240_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16240_probe_trigger(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline ssize_t
+adis16240_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return 0;
+}
+
+static int adis16240_configure_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline void adis16240_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16240_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return 0;
+}
+
+static inline void adis16240_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+
+#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* SPI_ADIS16240_H_ */
diff --git a/drivers/staging/iio/accel/adis16240_core.c b/drivers/staging/iio/accel/adis16240_core.c
new file mode 100644
index 00000000000..54fd6d77412
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16240_core.c
@@ -0,0 +1,599 @@
+/*
+ * ADIS16240 Programmable Impact Sensor and Recorder driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "accel.h"
+#include "../adc/adc.h"
+
+#include "adis16240.h"
+
+#define DRIVER_NAME "adis16240"
+
+static int adis16240_check_status(struct device *dev);
+
+/**
+ * adis16240_spi_write_reg_8() - write single byte to a register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the register to be written
+ * @val: the value to write
+ **/
+static int adis16240_spi_write_reg_8(struct device *dev,
+ u8 reg_address,
+ u8 val)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16240_state *st = iio_dev_get_devdata(indio_dev);
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16240_WRITE_REG(reg_address);
+ st->tx[1] = val;
+
+ ret = spi_write(st->us, st->tx, 2);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/**
+ * adis16240_spi_write_reg_16() - write 2 bytes to a pair of registers
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ * is assumed to have address one greater.
+ * @val: value to be written
+ **/
+static int adis16240_spi_write_reg_16(struct device *dev,
+ u8 lower_reg_address,
+ u16 value)
+{
+ int ret;
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16240_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = 25,
+ }, {
+ .tx_buf = st->tx + 2,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = 25,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16240_WRITE_REG(lower_reg_address);
+ st->tx[1] = value & 0xFF;
+ st->tx[2] = ADIS16240_WRITE_REG(lower_reg_address + 1);
+ st->tx[3] = (value >> 8) & 0xFF;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/**
+ * adis16240_spi_read_reg_16() - read 2 bytes from a 16-bit register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ * is assumed to have address one greater.
+ * @val: somewhere to pass back the value read
+ **/
+static int adis16240_spi_read_reg_16(struct device *dev,
+ u8 lower_reg_address,
+ u16 *val)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16240_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = 25,
+ }, {
+ .rx_buf = st->rx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = 25,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16240_READ_REG(lower_reg_address);
+ st->tx[1] = 0;
+ st->tx[2] = 0;
+ st->tx[3] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret) {
+ dev_err(&st->us->dev,
+ "problem when reading 16 bit register 0x%02X",
+ lower_reg_address);
+ goto error_ret;
+ }
+ *val = (st->rx[0] << 8) | st->rx[1];
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static ssize_t adis16240_spi_read_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf,
+ unsigned bits)
+{
+ int ret;
+ s16 val = 0;
+ unsigned shift = 16 - bits;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = adis16240_spi_read_reg_16(dev, this_attr->address, (u16 *)&val);
+ if (ret)
+ return ret;
+
+ if (val & ADIS16240_ERROR_ACTIVE)
+ adis16240_check_status(dev);
+
+ val = ((s16)(val << shift) >> shift);
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t adis16240_read_10bit_unsigned(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u16 val = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = adis16240_spi_read_reg_16(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ if (val & ADIS16240_ERROR_ACTIVE)
+ adis16240_check_status(dev);
+
+ return sprintf(buf, "%u\n", val & 0x03FF);
+}
+
+static ssize_t adis16240_read_10bit_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ /* Take the iio_dev status lock */
+ mutex_lock(&indio_dev->mlock);
+ ret = adis16240_spi_read_signed(dev, attr, buf, 10);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static ssize_t adis16240_read_12bit_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ /* Take the iio_dev status lock */
+ mutex_lock(&indio_dev->mlock);
+ ret = adis16240_spi_read_signed(dev, attr, buf, 12);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static ssize_t adis16240_write_16bit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+ ret = adis16240_spi_write_reg_16(dev, this_attr->address, val);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+static int adis16240_reset(struct device *dev)
+{
+ int ret;
+ ret = adis16240_spi_write_reg_8(dev,
+ ADIS16240_GLOB_CMD,
+ ADIS16240_GLOB_CMD_SW_RESET);
+ if (ret)
+ dev_err(dev, "problem resetting device");
+
+ return ret;
+}
+
+static ssize_t adis16240_write_reset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ if (len < 1)
+ return -EINVAL;
+ switch (buf[0]) {
+ case '1':
+ case 'y':
+ case 'Y':
+ return adis16240_reset(dev);
+ }
+ return -EINVAL;
+}
+
+int adis16240_set_irq(struct device *dev, bool enable)
+{
+ int ret = 0;
+ u16 msc;
+
+ ret = adis16240_spi_read_reg_16(dev, ADIS16240_MSC_CTRL, &msc);
+ if (ret)
+ goto error_ret;
+
+ msc |= ADIS16240_MSC_CTRL_ACTIVE_HIGH;
+ msc &= ~ADIS16240_MSC_CTRL_DATA_RDY_DIO2;
+ if (enable)
+ msc |= ADIS16240_MSC_CTRL_DATA_RDY_EN;
+ else
+ msc &= ~ADIS16240_MSC_CTRL_DATA_RDY_EN;
+
+ ret = adis16240_spi_write_reg_16(dev, ADIS16240_MSC_CTRL, msc);
+
+error_ret:
+ return ret;
+}
+
+static int adis16240_self_test(struct device *dev)
+{
+ int ret;
+ ret = adis16240_spi_write_reg_16(dev,
+ ADIS16240_MSC_CTRL,
+ ADIS16240_MSC_CTRL_SELF_TEST_EN);
+ if (ret) {
+ dev_err(dev, "problem starting self test");
+ goto err_ret;
+ }
+
+ msleep(ADIS16240_STARTUP_DELAY);
+
+ adis16240_check_status(dev);
+
+err_ret:
+ return ret;
+}
+
+static int adis16240_check_status(struct device *dev)
+{
+ u16 status;
+ int ret;
+
+ ret = adis16240_spi_read_reg_16(dev, ADIS16240_DIAG_STAT, &status);
+
+ if (ret < 0) {
+ dev_err(dev, "Reading status failed\n");
+ goto error_ret;
+ }
+
+ ret = status & 0x2F;
+ if (status & ADIS16240_DIAG_STAT_PWRON_FAIL)
+ dev_err(dev, "Power-on, self-test fail\n");
+ if (status & ADIS16240_DIAG_STAT_SPI_FAIL)
+ dev_err(dev, "SPI failure\n");
+ if (status & ADIS16240_DIAG_STAT_FLASH_UPT)
+ dev_err(dev, "Flash update failed\n");
+ if (status & ADIS16240_DIAG_STAT_POWER_HIGH)
+ dev_err(dev, "Power supply above 3.625V\n");
+ if (status & ADIS16240_DIAG_STAT_POWER_LOW)
+ dev_err(dev, "Power supply below 2.225V\n");
+
+error_ret:
+ return ret;
+}
+
+static int adis16240_initial_setup(struct adis16240_state *st)
+{
+ int ret;
+ struct device *dev = &st->indio_dev->dev;
+
+ /* Disable IRQ */
+ ret = adis16240_set_irq(dev, false);
+ if (ret) {
+ dev_err(dev, "disable irq failed");
+ goto err_ret;
+ }
+
+ /* Do self test */
+ ret = adis16240_self_test(dev);
+ if (ret) {
+ dev_err(dev, "self test failure");
+ goto err_ret;
+ }
+
+ /* Read status register to check the result */
+ ret = adis16240_check_status(dev);
+ if (ret) {
+ adis16240_reset(dev);
+ dev_err(dev, "device not playing ball -> reset");
+ msleep(ADIS16240_STARTUP_DELAY);
+ ret = adis16240_check_status(dev);
+ if (ret) {
+ dev_err(dev, "giving up");
+ goto err_ret;
+ }
+ }
+
+ printk(KERN_INFO DRIVER_NAME ": at CS%d (irq %d)\n",
+ st->us->chip_select, st->us->irq);
+
+err_ret:
+ return ret;
+}
+
+static IIO_DEV_ATTR_IN_NAMED_RAW(supply, adis16240_read_10bit_unsigned,
+ ADIS16240_SUPPLY_OUT);
+static IIO_DEV_ATTR_IN_RAW(0, adis16240_read_10bit_signed,
+ ADIS16240_AUX_ADC);
+static IIO_CONST_ATTR(in_supply_scale, "0.00488");
+static IIO_DEV_ATTR_ACCEL_X(adis16240_read_10bit_signed,
+ ADIS16240_XACCL_OUT);
+static IIO_DEVICE_ATTR(accel_x_peak_raw, S_IRUGO,
+ adis16240_read_10bit_signed, NULL,
+ ADIS16240_XPEAK_OUT);
+static IIO_DEV_ATTR_ACCEL_Y(adis16240_read_10bit_signed,
+ ADIS16240_YACCL_OUT);
+static IIO_DEVICE_ATTR(accel_y_peak_raw, S_IRUGO,
+ adis16240_read_10bit_signed, NULL,
+ ADIS16240_YPEAK_OUT);
+static IIO_DEV_ATTR_ACCEL_Z(adis16240_read_10bit_signed,
+ ADIS16240_ZACCL_OUT);
+static IIO_DEVICE_ATTR(accel_z_peak_raw, S_IRUGO,
+ adis16240_read_10bit_signed, NULL,
+ ADIS16240_ZPEAK_OUT);
+
+static IIO_DEVICE_ATTR(accel_xyz_squared_peak_raw, S_IRUGO,
+ adis16240_read_12bit_signed, NULL,
+ ADIS16240_XYZPEAK_OUT);
+static IIO_DEV_ATTR_ACCEL_X_OFFSET(S_IWUSR | S_IRUGO,
+ adis16240_read_10bit_signed,
+ adis16240_write_16bit,
+ ADIS16240_XACCL_OFF);
+static IIO_DEV_ATTR_ACCEL_Y_OFFSET(S_IWUSR | S_IRUGO,
+ adis16240_read_10bit_signed,
+ adis16240_write_16bit,
+ ADIS16240_YACCL_OFF);
+static IIO_DEV_ATTR_ACCEL_Z_OFFSET(S_IWUSR | S_IRUGO,
+ adis16240_read_10bit_signed,
+ adis16240_write_16bit,
+ ADIS16240_ZACCL_OFF);
+static IIO_DEV_ATTR_TEMP_RAW(adis16240_read_10bit_unsigned);
+static IIO_CONST_ATTR(temp_scale, "0.244");
+
+static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, adis16240_write_reset, 0);
+
+static IIO_CONST_ATTR_AVAIL_SAMP_FREQ("4096");
+
+static IIO_CONST_ATTR(name, "adis16240");
+
+static struct attribute *adis16240_event_attributes[] = {
+ NULL
+};
+
+static struct attribute_group adis16240_event_attribute_group = {
+ .attrs = adis16240_event_attributes,
+};
+
+static struct attribute *adis16240_attributes[] = {
+ &iio_dev_attr_in_supply_raw.dev_attr.attr,
+ &iio_const_attr_in_supply_scale.dev_attr.attr,
+ &iio_dev_attr_in0_raw.dev_attr.attr,
+ &iio_dev_attr_accel_x_raw.dev_attr.attr,
+ &iio_dev_attr_accel_x_offset.dev_attr.attr,
+ &iio_dev_attr_accel_x_peak_raw.dev_attr.attr,
+ &iio_dev_attr_accel_y_raw.dev_attr.attr,
+ &iio_dev_attr_accel_y_offset.dev_attr.attr,
+ &iio_dev_attr_accel_y_peak_raw.dev_attr.attr,
+ &iio_dev_attr_accel_z_raw.dev_attr.attr,
+ &iio_dev_attr_accel_z_offset.dev_attr.attr,
+ &iio_dev_attr_accel_z_peak_raw.dev_attr.attr,
+ &iio_dev_attr_accel_xyz_squared_peak_raw.dev_attr.attr,
+ &iio_dev_attr_temp_raw.dev_attr.attr,
+ &iio_const_attr_temp_scale.dev_attr.attr,
+ &iio_const_attr_available_sampling_frequency.dev_attr.attr,
+ &iio_dev_attr_reset.dev_attr.attr,
+ &iio_const_attr_name.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group adis16240_attribute_group = {
+ .attrs = adis16240_attributes,
+};
+
+static int __devinit adis16240_probe(struct spi_device *spi)
+{
+ int ret, regdone = 0;
+ struct adis16240_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+ if (!st) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, st);
+
+ /* Allocate the comms buffers */
+ st->rx = kzalloc(sizeof(*st->rx)*ADIS16240_MAX_RX, GFP_KERNEL);
+ if (st->rx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->tx = kzalloc(sizeof(*st->tx)*ADIS16240_MAX_TX, GFP_KERNEL);
+ if (st->tx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_rx;
+ }
+ st->us = spi;
+ mutex_init(&st->buf_lock);
+ /* setup the industrialio driver allocated elements */
+ st->indio_dev = iio_allocate_device();
+ if (st->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_tx;
+ }
+
+ st->indio_dev->dev.parent = &spi->dev;
+ st->indio_dev->num_interrupt_lines = 1;
+ st->indio_dev->event_attrs = &adis16240_event_attribute_group;
+ st->indio_dev->attrs = &adis16240_attribute_group;
+ st->indio_dev->dev_data = (void *)(st);
+ st->indio_dev->driver_module = THIS_MODULE;
+ st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = adis16240_configure_ring(st->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ ret = iio_device_register(st->indio_dev);
+ if (ret)
+ goto error_unreg_ring_funcs;
+ regdone = 1;
+
+ ret = adis16240_initialize_ring(st->indio_dev->ring);
+ if (ret) {
+ printk(KERN_ERR "failed to initialize the ring\n");
+ goto error_unreg_ring_funcs;
+ }
+
+ if (spi->irq) {
+ ret = iio_register_interrupt_line(spi->irq,
+ st->indio_dev,
+ 0,
+ IRQF_TRIGGER_RISING,
+ "adis16240");
+ if (ret)
+ goto error_uninitialize_ring;
+
+ ret = adis16240_probe_trigger(st->indio_dev);
+ if (ret)
+ goto error_unregister_line;
+ }
+
+ /* Get the device into a sane initial state */
+ ret = adis16240_initial_setup(st);
+ if (ret)
+ goto error_remove_trigger;
+ return 0;
+
+error_remove_trigger:
+ adis16240_remove_trigger(st->indio_dev);
+error_unregister_line:
+ if (spi->irq)
+ iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+ adis16240_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+ adis16240_unconfigure_ring(st->indio_dev);
+error_free_dev:
+ if (regdone)
+ iio_device_unregister(st->indio_dev);
+ else
+ iio_free_device(st->indio_dev);
+error_free_tx:
+ kfree(st->tx);
+error_free_rx:
+ kfree(st->rx);
+error_free_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+static int adis16240_remove(struct spi_device *spi)
+{
+ struct adis16240_state *st = spi_get_drvdata(spi);
+ struct iio_dev *indio_dev = st->indio_dev;
+
+ flush_scheduled_work();
+
+ adis16240_remove_trigger(indio_dev);
+ if (spi->irq)
+ iio_unregister_interrupt_line(indio_dev, 0);
+
+ adis16240_uninitialize_ring(indio_dev->ring);
+ iio_device_unregister(indio_dev);
+ adis16240_unconfigure_ring(indio_dev);
+ kfree(st->tx);
+ kfree(st->rx);
+ kfree(st);
+
+ return 0;
+}
+
+static struct spi_driver adis16240_driver = {
+ .driver = {
+ .name = "adis16240",
+ .owner = THIS_MODULE,
+ },
+ .probe = adis16240_probe,
+ .remove = __devexit_p(adis16240_remove),
+};
+
+static __init int adis16240_init(void)
+{
+ return spi_register_driver(&adis16240_driver);
+}
+module_init(adis16240_init);
+
+static __exit void adis16240_exit(void)
+{
+ spi_unregister_driver(&adis16240_driver);
+}
+module_exit(adis16240_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices Programmable Impact Sensor and Recorder");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/accel/adis16240_ring.c b/drivers/staging/iio/accel/adis16240_ring.c
new file mode 100644
index 00000000000..26b677bd84c
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16240_ring.c
@@ -0,0 +1,254 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../ring_sw.h"
+#include "accel.h"
+#include "../trigger.h"
+#include "adis16240.h"
+
+/**
+ * combine_8_to_16() utility function to munge to u8s into u16
+ **/
+static inline u16 combine_8_to_16(u8 lower, u8 upper)
+{
+ u16 _lower = lower;
+ u16 _upper = upper;
+ return _lower | (_upper << 8);
+}
+
+static IIO_SCAN_EL_C(supply, ADIS16240_SCAN_SUPPLY, IIO_UNSIGNED(10),
+ ADIS16240_SUPPLY_OUT, NULL);
+static IIO_SCAN_EL_C(accel_x, ADIS16240_SCAN_ACC_X, IIO_SIGNED(10),
+ ADIS16240_XACCL_OUT, NULL);
+static IIO_SCAN_EL_C(accel_y, ADIS16240_SCAN_ACC_Y, IIO_SIGNED(10),
+ ADIS16240_YACCL_OUT, NULL);
+static IIO_SCAN_EL_C(accel_z, ADIS16240_SCAN_ACC_Z, IIO_SIGNED(10),
+ ADIS16240_ZACCL_OUT, NULL);
+static IIO_SCAN_EL_C(aux_adc, ADIS16240_SCAN_AUX_ADC, IIO_UNSIGNED(10),
+ ADIS16240_AUX_ADC, NULL);
+static IIO_SCAN_EL_C(temp, ADIS16240_SCAN_TEMP, IIO_UNSIGNED(10),
+ ADIS16240_TEMP_OUT, NULL);
+
+static IIO_SCAN_EL_TIMESTAMP(6);
+
+static struct attribute *adis16240_scan_el_attrs[] = {
+ &iio_scan_el_supply.dev_attr.attr,
+ &iio_scan_el_accel_x.dev_attr.attr,
+ &iio_scan_el_accel_y.dev_attr.attr,
+ &iio_scan_el_accel_z.dev_attr.attr,
+ &iio_scan_el_aux_adc.dev_attr.attr,
+ &iio_scan_el_temp.dev_attr.attr,
+ &iio_scan_el_timestamp.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group adis16240_scan_el_group = {
+ .attrs = adis16240_scan_el_attrs,
+ .name = "scan_elements",
+};
+
+/**
+ * adis16240_poll_func_th() top half interrupt handler called by trigger
+ * @private_data: iio_dev
+ **/
+static void adis16240_poll_func_th(struct iio_dev *indio_dev)
+{
+ struct adis16240_state *st = iio_dev_get_devdata(indio_dev);
+ st->last_timestamp = indio_dev->trig->timestamp;
+ schedule_work(&st->work_trigger_to_ring);
+}
+
+/**
+ * adis16240_read_ring_data() read data registers which will be placed into ring
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @rx: somewhere to pass back the value read
+ **/
+static int adis16240_read_ring_data(struct device *dev, u8 *rx)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16240_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfers[ADIS16240_OUTPUTS + 1];
+ int ret;
+ int i;
+
+ mutex_lock(&st->buf_lock);
+
+ spi_message_init(&msg);
+
+ memset(xfers, 0, sizeof(xfers));
+ for (i = 0; i <= ADIS16240_OUTPUTS; i++) {
+ xfers[i].bits_per_word = 8;
+ xfers[i].cs_change = 1;
+ xfers[i].len = 2;
+ xfers[i].delay_usecs = 30;
+ xfers[i].tx_buf = st->tx + 2 * i;
+ st->tx[2 * i]
+ = ADIS16240_READ_REG(ADIS16240_SUPPLY_OUT + 2 * i);
+ st->tx[2 * i + 1] = 0;
+ if (i >= 1)
+ xfers[i].rx_buf = rx + 2 * (i - 1);
+ spi_message_add_tail(&xfers[i], &msg);
+ }
+
+ ret = spi_sync(st->us, &msg);
+ if (ret)
+ dev_err(&st->us->dev, "problem when burst reading");
+
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+
+static void adis16240_trigger_bh_to_ring(struct work_struct *work_s)
+{
+ struct adis16240_state *st
+ = container_of(work_s, struct adis16240_state,
+ work_trigger_to_ring);
+
+ int i = 0;
+ s16 *data;
+ size_t datasize = st->indio_dev
+ ->ring->access.get_bpd(st->indio_dev->ring);
+
+ data = kmalloc(datasize , GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(&st->us->dev, "memory alloc failed in ring bh");
+ return;
+ }
+
+ if (st->indio_dev->scan_count)
+ if (adis16240_read_ring_data(&st->indio_dev->dev, st->rx) >= 0)
+ for (; i < st->indio_dev->scan_count; i++) {
+ data[i] = combine_8_to_16(st->rx[i*2+1],
+ st->rx[i*2]);
+ }
+
+ /* Guaranteed to be aligned with 8 byte boundary */
+ if (st->indio_dev->scan_timestamp)
+ *((s64 *)(data + ((i + 3)/4)*4)) = st->last_timestamp;
+
+ st->indio_dev->ring->access.store_to(st->indio_dev->ring,
+ (u8 *)data,
+ st->last_timestamp);
+
+ iio_trigger_notify_done(st->indio_dev->trig);
+ kfree(data);
+
+ return;
+}
+
+static int adis16240_data_rdy_ring_preenable(struct iio_dev *indio_dev)
+{
+ size_t size;
+ dev_dbg(&indio_dev->dev, "%s\n", __func__);
+ /* Check if there are any scan elements enabled, if not fail*/
+ if (!(indio_dev->scan_count || indio_dev->scan_timestamp))
+ return -EINVAL;
+
+ if (indio_dev->ring->access.set_bpd) {
+ if (indio_dev->scan_timestamp)
+ if (indio_dev->scan_count)
+ /* Timestamp (aligned sizeof(s64) and data */
+ size = (((indio_dev->scan_count * sizeof(s16))
+ + sizeof(s64) - 1)
+ & ~(sizeof(s64) - 1))
+ + sizeof(s64);
+ else /* Timestamp only */
+ size = sizeof(s64);
+ else /* Data only */
+ size = indio_dev->scan_count*sizeof(s16);
+ indio_dev->ring->access.set_bpd(indio_dev->ring, size);
+ }
+
+ return 0;
+}
+
+static int adis16240_data_rdy_ring_postenable(struct iio_dev *indio_dev)
+{
+ return indio_dev->trig
+ ? iio_trigger_attach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc)
+ : 0;
+}
+
+static int adis16240_data_rdy_ring_predisable(struct iio_dev *indio_dev)
+{
+ return indio_dev->trig
+ ? iio_trigger_dettach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc)
+ : 0;
+}
+
+void adis16240_unconfigure_ring(struct iio_dev *indio_dev)
+{
+ kfree(indio_dev->pollfunc);
+ iio_sw_rb_free(indio_dev->ring);
+}
+
+int adis16240_configure_ring(struct iio_dev *indio_dev)
+{
+ int ret = 0;
+ struct adis16240_state *st = indio_dev->dev_data;
+ struct iio_ring_buffer *ring;
+ INIT_WORK(&st->work_trigger_to_ring, adis16240_trigger_bh_to_ring);
+ /* Set default scan mode */
+
+ iio_scan_mask_set(indio_dev, iio_scan_el_supply.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_accel_x.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_accel_y.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_accel_z.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_temp.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_aux_adc.number);
+ indio_dev->scan_timestamp = true;
+
+ indio_dev->scan_el_attrs = &adis16240_scan_el_group;
+
+ ring = iio_sw_rb_allocate(indio_dev);
+ if (!ring) {
+ ret = -ENOMEM;
+ return ret;
+ }
+ indio_dev->ring = ring;
+ /* Effectively select the ring buffer implementation */
+ iio_ring_sw_register_funcs(&ring->access);
+ ring->preenable = &adis16240_data_rdy_ring_preenable;
+ ring->postenable = &adis16240_data_rdy_ring_postenable;
+ ring->predisable = &adis16240_data_rdy_ring_predisable;
+ ring->owner = THIS_MODULE;
+
+ indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL);
+ if (indio_dev->pollfunc == NULL) {
+ ret = -ENOMEM;
+ goto error_iio_sw_rb_free;;
+ }
+ indio_dev->pollfunc->poll_func_main = &adis16240_poll_func_th;
+ indio_dev->pollfunc->private_data = indio_dev;
+ indio_dev->modes |= INDIO_RING_TRIGGERED;
+ return 0;
+
+error_iio_sw_rb_free:
+ iio_sw_rb_free(indio_dev->ring);
+ return ret;
+}
+
+int adis16240_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return iio_ring_buffer_register(ring, 0);
+}
+
+void adis16240_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+ iio_ring_buffer_unregister(ring);
+}
diff --git a/drivers/staging/iio/accel/adis16240_trigger.c b/drivers/staging/iio/accel/adis16240_trigger.c
new file mode 100644
index 00000000000..df1312e17f4
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16240_trigger.c
@@ -0,0 +1,124 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../trigger.h"
+#include "adis16240.h"
+
+/**
+ * adis16240_data_rdy_trig_poll() the event handler for the data rdy trig
+ **/
+static int adis16240_data_rdy_trig_poll(struct iio_dev *dev_info,
+ int index,
+ s64 timestamp,
+ int no_test)
+{
+ struct adis16240_state *st = iio_dev_get_devdata(dev_info);
+ struct iio_trigger *trig = st->trig;
+
+ trig->timestamp = timestamp;
+ iio_trigger_poll(trig);
+
+ return IRQ_HANDLED;
+}
+
+IIO_EVENT_SH(data_rdy_trig, &adis16240_data_rdy_trig_poll);
+
+static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
+
+static struct attribute *adis16240_trigger_attrs[] = {
+ &dev_attr_name.attr,
+ NULL,
+};
+
+static const struct attribute_group adis16240_trigger_attr_group = {
+ .attrs = adis16240_trigger_attrs,
+};
+
+/**
+ * adis16240_data_rdy_trigger_set_state() set datardy interrupt state
+ **/
+static int adis16240_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct adis16240_state *st = trig->private_data;
+ struct iio_dev *indio_dev = st->indio_dev;
+ int ret = 0;
+
+ dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state);
+ ret = adis16240_set_irq(&st->indio_dev->dev, state);
+ if (state == false) {
+ iio_remove_event_from_list(&iio_event_data_rdy_trig,
+ &indio_dev->interrupts[0]
+ ->ev_list);
+ flush_scheduled_work();
+ } else {
+ iio_add_event_to_list(&iio_event_data_rdy_trig,
+ &indio_dev->interrupts[0]->ev_list);
+ }
+ return ret;
+}
+
+/**
+ * adis16240_trig_try_reen() try renabling irq for data rdy trigger
+ * @trig: the datardy trigger
+ **/
+static int adis16240_trig_try_reen(struct iio_trigger *trig)
+{
+ struct adis16240_state *st = trig->private_data;
+ enable_irq(st->us->irq);
+ return 0;
+}
+
+int adis16240_probe_trigger(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct adis16240_state *st = indio_dev->dev_data;
+
+ st->trig = iio_allocate_trigger();
+ st->trig->name = kmalloc(IIO_TRIGGER_NAME_LENGTH, GFP_KERNEL);
+ if (!st->trig->name) {
+ ret = -ENOMEM;
+ goto error_free_trig;
+ }
+ snprintf((char *)st->trig->name,
+ IIO_TRIGGER_NAME_LENGTH,
+ "adis16240-dev%d", indio_dev->id);
+ st->trig->dev.parent = &st->us->dev;
+ st->trig->owner = THIS_MODULE;
+ st->trig->private_data = st;
+ st->trig->set_trigger_state = &adis16240_data_rdy_trigger_set_state;
+ st->trig->try_reenable = &adis16240_trig_try_reen;
+ st->trig->control_attrs = &adis16240_trigger_attr_group;
+ ret = iio_trigger_register(st->trig);
+
+ /* select default trigger */
+ indio_dev->trig = st->trig;
+ if (ret)
+ goto error_free_trig_name;
+
+ return 0;
+
+error_free_trig_name:
+ kfree(st->trig->name);
+error_free_trig:
+ iio_free_trigger(st->trig);
+
+ return ret;
+}
+
+void adis16240_remove_trigger(struct iio_dev *indio_dev)
+{
+ struct adis16240_state *state = indio_dev->dev_data;
+
+ iio_trigger_unregister(state->trig);
+ kfree(state->trig->name);
+ iio_free_trigger(state->trig);
+}
diff --git a/drivers/staging/iio/accel/inclinometer.h b/drivers/staging/iio/accel/inclinometer.h
new file mode 100644
index 00000000000..5b49f835eac
--- /dev/null
+++ b/drivers/staging/iio/accel/inclinometer.h
@@ -0,0 +1,23 @@
+/*
+ * Inclinometer related attributes
+ */
+#include "../sysfs.h"
+
+#define IIO_DEV_ATTR_INCLI_X(_show, _addr) \
+ IIO_DEVICE_ATTR(incli_x_raw, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_INCLI_Y(_show, _addr) \
+ IIO_DEVICE_ATTR(incli_y_raw, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_INCLI_Z(_show, _addr) \
+ IIO_DEVICE_ATTR(incli_z_raw, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_INCLI_X_OFFSET(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(incli_x_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_INCLI_Y_OFFSET(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(incli_y_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_INCLI_Z_OFFSET(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(incli_z_offset, _mode, _show, _store, _addr)
+
diff --git a/drivers/staging/iio/accel/kxsd9.c b/drivers/staging/iio/accel/kxsd9.c
index db2dd537ffb..ae7ffe114fc 100644
--- a/drivers/staging/iio/accel/kxsd9.c
+++ b/drivers/staging/iio/accel/kxsd9.c
@@ -26,6 +26,7 @@
#include <linux/rtc.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include "../iio.h"
#include "../sysfs.h"
@@ -51,8 +52,10 @@
#define KXSD9_READ(a) (0x80 | (a))
#define KXSD9_WRITE(a) (a)
-#define IIO_DEV_ATTR_ACCEL_SET_RANGE(_mode, _show, _store) \
- IIO_DEVICE_ATTR(accel_range, _mode, _show, _store, 0)
+#define KXSD9_SCALE_2G "0.011978"
+#define KXSD9_SCALE_4G "0.023927"
+#define KXSD9_SCALE_6G "0.035934"
+#define KXSD9_SCALE_8G "0.047853"
#define KXSD9_STATE_RX_SIZE 2
#define KXSD9_STATE_TX_SIZE 4
@@ -73,9 +76,9 @@ struct kxsd9_state {
};
/* This may want to move to mili g to allow for non integer ranges */
-static ssize_t kxsd9_read_accel_range(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t kxsd9_read_scale(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int ret;
ssize_t len = 0;
@@ -101,16 +104,16 @@ static ssize_t kxsd9_read_accel_range(struct device *dev,
switch (st->rx[1] & KXSD9_FS_MASK) {
case KXSD9_FS_8:
- len += sprintf(buf, "8\n");
+ len += sprintf(buf, "%s\n", KXSD9_SCALE_8G);
break;
case KXSD9_FS_6:
- len += sprintf(buf, "6\n");
+ len += sprintf(buf, "%s\n", KXSD9_SCALE_6G);
break;
case KXSD9_FS_4:
- len += sprintf(buf, "4\n");
+ len += sprintf(buf, "%s\n", KXSD9_SCALE_4G);
break;
case KXSD9_FS_2:
- len += sprintf(buf, "2\n");
+ len += sprintf(buf, "%s\n", KXSD9_SCALE_2G);
break;
}
@@ -119,12 +122,12 @@ error_ret:
return ret ? ret : len;
}
-static ssize_t kxsd9_write_accel_range(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+static ssize_t kxsd9_write_scale(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
- long readin;
+
struct spi_message msg;
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
@@ -145,25 +148,25 @@ static ssize_t kxsd9_write_accel_range(struct device *dev,
},
};
- ret = strict_strtol(buf, 10, &readin);
- if (ret)
- return ret;
- switch (readin) {
- case 8:
+ if (!strncmp(buf, KXSD9_SCALE_8G,
+ strlen(buf) < strlen(KXSD9_SCALE_8G)
+ ? strlen(buf) : strlen(KXSD9_SCALE_8G)))
val = KXSD9_FS_8;
- break;
- case 6:
+ else if (!strncmp(buf, KXSD9_SCALE_6G,
+ strlen(buf) < strlen(KXSD9_SCALE_6G)
+ ? strlen(buf) : strlen(KXSD9_SCALE_6G)))
val = KXSD9_FS_6;
- break;
- case 4:
+ else if (!strncmp(buf, KXSD9_SCALE_4G,
+ strlen(buf) < strlen(KXSD9_SCALE_4G)
+ ? strlen(buf) : strlen(KXSD9_SCALE_4G)))
val = KXSD9_FS_4;
- break;
- case 2:
+ else if (!strncmp(buf, KXSD9_SCALE_2G,
+ strlen(buf) < strlen(KXSD9_SCALE_2G)
+ ? strlen(buf) : strlen(KXSD9_SCALE_2G)))
val = KXSD9_FS_2;
- break;
- default:
+ else
return -EINVAL;
- }
+
mutex_lock(&st->buf_lock);
st->tx[0] = KXSD9_READ(KXSD9_REG_CTRL_C);
st->tx[1] = 0;
@@ -182,6 +185,7 @@ error_ret:
mutex_unlock(&st->buf_lock);
return ret ? ret : len;
}
+
static ssize_t kxsd9_read_accel(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -227,17 +231,27 @@ error_ret:
static IIO_DEV_ATTR_ACCEL_X(kxsd9_read_accel, KXSD9_REG_X);
static IIO_DEV_ATTR_ACCEL_Y(kxsd9_read_accel, KXSD9_REG_Y);
static IIO_DEV_ATTR_ACCEL_Z(kxsd9_read_accel, KXSD9_REG_Z);
-static IIO_DEV_ATTR_ADC(0, kxsd9_read_accel, KXSD9_REG_AUX);
-static IIO_DEV_ATTR_ACCEL_SET_RANGE(S_IRUGO | S_IWUSR,
- kxsd9_read_accel_range,
- kxsd9_write_accel_range);
+static IIO_DEV_ATTR_IN_RAW(0, kxsd9_read_accel, KXSD9_REG_AUX);
+
+static IIO_DEVICE_ATTR(accel_scale,
+ S_IRUGO | S_IWUSR,
+ kxsd9_read_scale,
+ kxsd9_write_scale,
+ 0);
+
+static IIO_CONST_ATTR(accel_scale_available,
+ KXSD9_SCALE_2G " "
+ KXSD9_SCALE_4G " "
+ KXSD9_SCALE_6G " "
+ KXSD9_SCALE_8G);
static struct attribute *kxsd9_attributes[] = {
- &iio_dev_attr_accel_x.dev_attr.attr,
- &iio_dev_attr_accel_y.dev_attr.attr,
- &iio_dev_attr_accel_z.dev_attr.attr,
- &iio_dev_attr_adc_0.dev_attr.attr,
- &iio_dev_attr_accel_range.dev_attr.attr,
+ &iio_dev_attr_accel_x_raw.dev_attr.attr,
+ &iio_dev_attr_accel_y_raw.dev_attr.attr,
+ &iio_dev_attr_accel_z_raw.dev_attr.attr,
+ &iio_dev_attr_in0_raw.dev_attr.attr,
+ &iio_dev_attr_accel_scale.dev_attr.attr,
+ &iio_const_attr_accel_scale_available.dev_attr.attr,
NULL,
};
diff --git a/drivers/staging/iio/accel/lis3l02dq.h b/drivers/staging/iio/accel/lis3l02dq.h
index 91a5375408c..e76a97937a3 100644
--- a/drivers/staging/iio/accel/lis3l02dq.h
+++ b/drivers/staging/iio/accel/lis3l02dq.h
@@ -179,10 +179,6 @@ int lis3l02dq_spi_read_reg_8(struct device *dev,
int lis3l02dq_spi_write_reg_8(struct device *dev,
u8 reg_address,
u8 *val);
-#define LIS3L02DQ_SCAN_ACC_X 0
-#define LIS3L02DQ_SCAN_ACC_Y 1
-#define LIS3L02DQ_SCAN_ACC_Z 2
-
#ifdef CONFIG_IIO_RING_BUFFER
/* At the moment triggers are only used for ring buffer
diff --git a/drivers/staging/iio/accel/lis3l02dq_core.c b/drivers/staging/iio/accel/lis3l02dq_core.c
index 82e43588e8a..6b5577d7d8d 100644
--- a/drivers/staging/iio/accel/lis3l02dq_core.c
+++ b/drivers/staging/iio/accel/lis3l02dq_core.c
@@ -458,41 +458,39 @@ err_ret:
return ret;
}
-static IIO_DEV_ATTR_ACCEL_X_OFFSET(S_IWUSR | S_IRUGO,
- lis3l02dq_read_signed,
- lis3l02dq_write_signed,
- LIS3L02DQ_REG_OFFSET_X_ADDR);
-
-static IIO_DEV_ATTR_ACCEL_Y_OFFSET(S_IWUSR | S_IRUGO,
- lis3l02dq_read_signed,
- lis3l02dq_write_signed,
- LIS3L02DQ_REG_OFFSET_Y_ADDR);
-
-static IIO_DEV_ATTR_ACCEL_Z_OFFSET(S_IWUSR | S_IRUGO,
- lis3l02dq_read_signed,
- lis3l02dq_write_signed,
- LIS3L02DQ_REG_OFFSET_Z_ADDR);
-
-static IIO_DEV_ATTR_ACCEL_X_GAIN(S_IWUSR | S_IRUGO,
- lis3l02dq_read_unsigned,
- lis3l02dq_write_unsigned,
- LIS3L02DQ_REG_GAIN_X_ADDR);
-
-static IIO_DEV_ATTR_ACCEL_Y_GAIN(S_IWUSR | S_IRUGO,
- lis3l02dq_read_unsigned,
- lis3l02dq_write_unsigned,
- LIS3L02DQ_REG_GAIN_Y_ADDR);
-
-static IIO_DEV_ATTR_ACCEL_Z_GAIN(S_IWUSR | S_IRUGO,
- lis3l02dq_read_unsigned,
- lis3l02dq_write_unsigned,
- LIS3L02DQ_REG_GAIN_Z_ADDR);
-
-static IIO_DEV_ATTR_ACCEL_THRESH(S_IWUSR | S_IRUGO,
- lis3l02dq_read_16bit_signed,
- lis3l02dq_write_16bit_signed,
- LIS3L02DQ_REG_THS_L_ADDR);
-
+#define LIS3L02DQ_SIGNED_ATTR(name, reg) \
+ IIO_DEVICE_ATTR(name, \
+ S_IWUSR | S_IRUGO, \
+ lis3l02dq_read_signed, \
+ lis3l02dq_write_signed, \
+ reg);
+
+#define LIS3L02DQ_UNSIGNED_ATTR(name, reg) \
+ IIO_DEVICE_ATTR(name, \
+ S_IWUSR | S_IRUGO, \
+ lis3l02dq_read_unsigned, \
+ lis3l02dq_write_unsigned, \
+ reg);
+
+static LIS3L02DQ_SIGNED_ATTR(accel_x_calibbias,
+ LIS3L02DQ_REG_OFFSET_X_ADDR);
+static LIS3L02DQ_SIGNED_ATTR(accel_y_calibbias,
+ LIS3L02DQ_REG_OFFSET_Y_ADDR);
+static LIS3L02DQ_SIGNED_ATTR(accel_z_calibbias,
+ LIS3L02DQ_REG_OFFSET_Z_ADDR);
+
+static LIS3L02DQ_UNSIGNED_ATTR(accel_x_calibscale,
+ LIS3L02DQ_REG_GAIN_X_ADDR);
+static LIS3L02DQ_UNSIGNED_ATTR(accel_y_calibscale,
+ LIS3L02DQ_REG_GAIN_Y_ADDR);
+static LIS3L02DQ_UNSIGNED_ATTR(accel_z_calibscale,
+ LIS3L02DQ_REG_GAIN_Z_ADDR);
+
+static IIO_DEVICE_ATTR(accel_mag_either_rising_value,
+ S_IWUSR | S_IRUGO,
+ lis3l02dq_read_16bit_signed,
+ lis3l02dq_write_16bit_signed,
+ LIS3L02DQ_REG_THS_L_ADDR);
/* RFC The reading method for these will change depending on whether
* ring buffer capture is in use. Is it worth making these take two
* functions and let the core handle which to call, or leave as in this
@@ -512,7 +510,7 @@ static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
lis3l02dq_read_frequency,
lis3l02dq_write_frequency);
-static IIO_CONST_ATTR_AVAIL_SAMP_FREQ("280 560 1120 4480");
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("280 560 1120 4480");
static ssize_t lis3l02dq_read_interrupt_config(struct device *dev,
struct device_attribute *attr,
@@ -522,7 +520,7 @@ static ssize_t lis3l02dq_read_interrupt_config(struct device *dev,
s8 val;
struct iio_event_attr *this_attr = to_iio_event_attr(attr);
- ret = lis3l02dq_spi_read_reg_8(dev,
+ ret = lis3l02dq_spi_read_reg_8(dev->parent,
LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
(u8 *)&val);
@@ -545,7 +543,7 @@ static ssize_t lis3l02dq_write_interrupt_config(struct device *dev,
mutex_lock(&indio_dev->mlock);
/* read current value */
- ret = lis3l02dq_spi_read_reg_8(dev,
+ ret = lis3l02dq_spi_read_reg_8(dev->parent,
LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
&valold);
if (ret)
@@ -668,43 +666,51 @@ static void lis3l02dq_thresh_handler_bh_no_check(struct work_struct *work_s)
/* A shared handler for a number of threshold types */
IIO_EVENT_SH(threshold, &lis3l02dq_thresh_handler_th);
-IIO_EVENT_ATTR_ACCEL_X_HIGH_SH(iio_event_threshold,
- lis3l02dq_read_interrupt_config,
- lis3l02dq_write_interrupt_config,
- LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_X_HIGH);
-
-IIO_EVENT_ATTR_ACCEL_Y_HIGH_SH(iio_event_threshold,
- lis3l02dq_read_interrupt_config,
- lis3l02dq_write_interrupt_config,
- LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_Y_HIGH);
-
-IIO_EVENT_ATTR_ACCEL_Z_HIGH_SH(iio_event_threshold,
- lis3l02dq_read_interrupt_config,
- lis3l02dq_write_interrupt_config,
- LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_Z_HIGH);
-
-IIO_EVENT_ATTR_ACCEL_X_LOW_SH(iio_event_threshold,
- lis3l02dq_read_interrupt_config,
- lis3l02dq_write_interrupt_config,
- LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_X_LOW);
-
-IIO_EVENT_ATTR_ACCEL_Y_LOW_SH(iio_event_threshold,
- lis3l02dq_read_interrupt_config,
- lis3l02dq_write_interrupt_config,
- LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_Y_LOW);
+IIO_EVENT_ATTR_SH(accel_x_mag_pos_rising_en,
+ iio_event_threshold,
+ lis3l02dq_read_interrupt_config,
+ lis3l02dq_write_interrupt_config,
+ LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_X_HIGH);
+
+IIO_EVENT_ATTR_SH(accel_y_mag_pos_rising_en,
+ iio_event_threshold,
+ lis3l02dq_read_interrupt_config,
+ lis3l02dq_write_interrupt_config,
+ LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_Y_HIGH);
+
+IIO_EVENT_ATTR_SH(accel_z_mag_pos_rising_en,
+ iio_event_threshold,
+ lis3l02dq_read_interrupt_config,
+ lis3l02dq_write_interrupt_config,
+ LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_Z_HIGH);
+
+IIO_EVENT_ATTR_SH(accel_x_mag_neg_rising_en,
+ iio_event_threshold,
+ lis3l02dq_read_interrupt_config,
+ lis3l02dq_write_interrupt_config,
+ LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_X_LOW);
+
+IIO_EVENT_ATTR_SH(accel_y_mag_neg_rising_en,
+ iio_event_threshold,
+ lis3l02dq_read_interrupt_config,
+ lis3l02dq_write_interrupt_config,
+ LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_Y_LOW);
+
+IIO_EVENT_ATTR_SH(accel_z_mag_neg_rising_en,
+ iio_event_threshold,
+ lis3l02dq_read_interrupt_config,
+ lis3l02dq_write_interrupt_config,
+ LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_Z_LOW);
-IIO_EVENT_ATTR_ACCEL_Z_LOW_SH(iio_event_threshold,
- lis3l02dq_read_interrupt_config,
- lis3l02dq_write_interrupt_config,
- LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_Z_LOW);
static struct attribute *lis3l02dq_event_attributes[] = {
- &iio_event_attr_accel_x_high.dev_attr.attr,
- &iio_event_attr_accel_y_high.dev_attr.attr,
- &iio_event_attr_accel_z_high.dev_attr.attr,
- &iio_event_attr_accel_x_low.dev_attr.attr,
- &iio_event_attr_accel_y_low.dev_attr.attr,
- &iio_event_attr_accel_z_low.dev_attr.attr,
+ &iio_event_attr_accel_x_mag_pos_rising_en.dev_attr.attr,
+ &iio_event_attr_accel_y_mag_pos_rising_en.dev_attr.attr,
+ &iio_event_attr_accel_z_mag_pos_rising_en.dev_attr.attr,
+ &iio_event_attr_accel_x_mag_neg_rising_en.dev_attr.attr,
+ &iio_event_attr_accel_y_mag_neg_rising_en.dev_attr.attr,
+ &iio_event_attr_accel_z_mag_neg_rising_en.dev_attr.attr,
+ &iio_dev_attr_accel_mag_either_rising_value.dev_attr.attr,
NULL
};
@@ -713,20 +719,21 @@ static struct attribute_group lis3l02dq_event_attribute_group = {
};
static IIO_CONST_ATTR(name, "lis3l02dq");
+static IIO_CONST_ATTR(accel_scale, "0.00958");
static struct attribute *lis3l02dq_attributes[] = {
- &iio_dev_attr_accel_x_offset.dev_attr.attr,
- &iio_dev_attr_accel_y_offset.dev_attr.attr,
- &iio_dev_attr_accel_z_offset.dev_attr.attr,
- &iio_dev_attr_accel_x_gain.dev_attr.attr,
- &iio_dev_attr_accel_y_gain.dev_attr.attr,
- &iio_dev_attr_accel_z_gain.dev_attr.attr,
- &iio_dev_attr_thresh.dev_attr.attr,
- &iio_dev_attr_accel_x.dev_attr.attr,
- &iio_dev_attr_accel_y.dev_attr.attr,
- &iio_dev_attr_accel_z.dev_attr.attr,
+ &iio_dev_attr_accel_x_calibbias.dev_attr.attr,
+ &iio_dev_attr_accel_y_calibbias.dev_attr.attr,
+ &iio_dev_attr_accel_z_calibbias.dev_attr.attr,
+ &iio_dev_attr_accel_x_calibscale.dev_attr.attr,
+ &iio_dev_attr_accel_y_calibscale.dev_attr.attr,
+ &iio_dev_attr_accel_z_calibscale.dev_attr.attr,
+ &iio_const_attr_accel_scale.dev_attr.attr,
+ &iio_dev_attr_accel_x_raw.dev_attr.attr,
+ &iio_dev_attr_accel_y_raw.dev_attr.attr,
+ &iio_dev_attr_accel_z_raw.dev_attr.attr,
&iio_dev_attr_sampling_frequency.dev_attr.attr,
- &iio_const_attr_available_sampling_frequency.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
&iio_const_attr_name.dev_attr.attr,
NULL
};
diff --git a/drivers/staging/iio/accel/lis3l02dq_ring.c b/drivers/staging/iio/accel/lis3l02dq_ring.c
index a4d97ea0df3..e4e202e6cb3 100644
--- a/drivers/staging/iio/accel/lis3l02dq_ring.c
+++ b/drivers/staging/iio/accel/lis3l02dq_ring.c
@@ -75,16 +75,16 @@ error_ret:
return ret;
}
-static IIO_SCAN_EL_C(accel_x, LIS3L02DQ_SCAN_ACC_X, IIO_SIGNED(16),
+static IIO_SCAN_EL_C(accel_x, 0, IIO_SIGNED(16),
LIS3L02DQ_REG_OUT_X_L_ADDR,
&lis3l02dq_scan_el_set_state);
-static IIO_SCAN_EL_C(accel_y, LIS3L02DQ_SCAN_ACC_Y, IIO_SIGNED(16),
+static IIO_SCAN_EL_C(accel_y, 1, IIO_SIGNED(16),
LIS3L02DQ_REG_OUT_Y_L_ADDR,
&lis3l02dq_scan_el_set_state);
-static IIO_SCAN_EL_C(accel_z, LIS3L02DQ_SCAN_ACC_Z, IIO_SIGNED(16),
+static IIO_SCAN_EL_C(accel_z, 2, IIO_SIGNED(16),
LIS3L02DQ_REG_OUT_Z_L_ADDR,
&lis3l02dq_scan_el_set_state);
-static IIO_SCAN_EL_TIMESTAMP;
+static IIO_SCAN_EL_TIMESTAMP(3);
static struct attribute *lis3l02dq_scan_el_attrs[] = {
&iio_scan_el_accel_x.dev_attr.attr,
@@ -192,8 +192,7 @@ error_ret:
}
-static const u8 read_all_tx_array[] =
-{
+static const u8 read_all_tx_array[] = {
LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_L_ADDR), 0,
LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_H_ADDR), 0,
LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_L_ADDR), 0,
@@ -208,7 +207,7 @@ static const u8 read_all_tx_array[] =
* @rx_array: (dma capable) recieve array, must be at least
* 4*number of channels
**/
-int lis3l02dq_read_all(struct lis3l02dq_state *st, u8 *rx_array)
+static int lis3l02dq_read_all(struct lis3l02dq_state *st, u8 *rx_array)
{
struct spi_transfer *xfers;
struct spi_message msg;
@@ -358,10 +357,10 @@ static int lis3l02dq_data_rdy_ring_predisable(struct iio_dev *indio_dev)
/* Caller responsible for locking as necessary. */
-static int __lis3l02dq_write_data_ready_config(struct device *dev,
- struct
- iio_event_handler_list *list,
- bool state)
+static int
+__lis3l02dq_write_data_ready_config(struct device *dev,
+ struct iio_event_handler_list *list,
+ bool state)
{
int ret;
u8 valold;
@@ -584,7 +583,7 @@ error_iio_sw_rb_free:
int lis3l02dq_initialize_ring(struct iio_ring_buffer *ring)
{
- return iio_ring_buffer_register(ring);
+ return iio_ring_buffer_register(ring, 0);
}
void lis3l02dq_uninitialize_ring(struct iio_ring_buffer *ring)
@@ -592,7 +591,6 @@ void lis3l02dq_uninitialize_ring(struct iio_ring_buffer *ring)
iio_ring_buffer_unregister(ring);
}
-
int lis3l02dq_set_ring_length(struct iio_dev *indio_dev, int length)
{
/* Set sensible defaults for the ring buffer */
diff --git a/drivers/staging/iio/accel/sca3000.h b/drivers/staging/iio/accel/sca3000.h
index da7d3cb5ae7..e5321999b26 100644
--- a/drivers/staging/iio/accel/sca3000.h
+++ b/drivers/staging/iio/accel/sca3000.h
@@ -187,6 +187,7 @@ struct sca3000_state {
/**
* struct sca3000_chip_info - model dependant parameters
* @name: model identification
+ * @scale: string containing floating point scale factor
* @temp_output: some devices have temperature sensors.
* @measurement_mode_freq: normal mode sampling frequency
* @option_mode_1: first optional mode. Not all models have one
@@ -199,6 +200,7 @@ struct sca3000_state {
**/
struct sca3000_chip_info {
const char *name;
+ const char *scale;
bool temp_output;
int measurement_mode_freq;
int option_mode_1;
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index 1c229869a22..d4f82c39f33 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -27,11 +27,9 @@
enum sca3000_variant {
d01,
- d03,
e02,
e04,
e05,
- l01,
};
/* Note where option modes are not defined, the chip simply does not
@@ -44,21 +42,20 @@ enum sca3000_variant {
static const struct sca3000_chip_info sca3000_spi_chip_info_tbl[] = {
{
.name = "sca3000-d01",
+ .scale = " 0.0073575",
.temp_output = true,
.measurement_mode_freq = 250,
.option_mode_1 = SCA3000_OP_MODE_BYPASS,
.option_mode_1_freq = 250,
}, {
- /* No data sheet available - may be the same as the 3100-d03?*/
- .name = "sca3000-d03",
- .temp_output = true,
- }, {
.name = "sca3000-e02",
+ .scale = "0.00981",
.measurement_mode_freq = 125,
.option_mode_1 = SCA3000_OP_MODE_NARROW,
.option_mode_1_freq = 63,
}, {
.name = "sca3000-e04",
+ .scale = "0.01962",
.measurement_mode_freq = 100,
.option_mode_1 = SCA3000_OP_MODE_NARROW,
.option_mode_1_freq = 50,
@@ -66,18 +63,12 @@ static const struct sca3000_chip_info sca3000_spi_chip_info_tbl[] = {
.option_mode_2_freq = 400,
}, {
.name = "sca3000-e05",
+ .scale = "0.0613125",
.measurement_mode_freq = 200,
.option_mode_1 = SCA3000_OP_MODE_NARROW,
.option_mode_1_freq = 50,
.option_mode_2 = SCA3000_OP_MODE_WIDE,
.option_mode_2_freq = 400,
- }, {
- /* No data sheet available.
- * Frequencies are unknown.
- */
- .name = "sca3000-l01",
- .temp_output = true,
- .option_mode_1 = SCA3000_OP_MODE_BYPASS,
},
};
@@ -286,7 +277,7 @@ static int sca3000_check_status(struct device *dev)
if (ret < 0)
goto error_ret;
if (rx[1] & SCA3000_EEPROM_CS_ERROR)
- dev_err(dev, "eeprom error \n");
+ dev_err(dev, "eeprom error\n");
if (rx[1] & SCA3000_SPI_FRAME_ERROR)
dev_err(dev, "Previous SPI Frame was corrupt\n");
kfree(rx);
@@ -327,6 +318,14 @@ error_ret:
return ret ? ret : len;
}
+static ssize_t sca3000_show_scale(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct sca3000_state *st = dev_info->dev_data;
+ return sprintf(buf, "%s\n", st->info->scale);
+}
static ssize_t sca3000_show_name(struct device *dev,
struct device_attribute *attr,
@@ -395,7 +394,7 @@ sca3000_show_available_measurement_modes(struct device *dev,
break;
}
/* always supported */
- len += sprintf(buf + len, " 3 - motion detection \n");
+ len += sprintf(buf + len, " 3 - motion detection\n");
return len;
}
@@ -495,7 +494,7 @@ error_ret:
/* Not even vaguely standard attributes so defined here rather than
* in the relevant IIO core headers
*/
-static IIO_DEVICE_ATTR(available_measurement_modes, S_IRUGO,
+static IIO_DEVICE_ATTR(measurement_mode_available, S_IRUGO,
sca3000_show_available_measurement_modes,
NULL, 0);
@@ -508,6 +507,8 @@ static IIO_DEVICE_ATTR(measurement_mode, S_IRUGO | S_IWUSR,
static IIO_DEV_ATTR_NAME(sca3000_show_name);
static IIO_DEV_ATTR_REV(sca3000_show_rev);
+static IIO_DEVICE_ATTR(accel_scale, S_IRUGO, sca3000_show_scale,
+ NULL, 0);
static IIO_DEV_ATTR_ACCEL_X(sca3000_read_13bit_signed,
SCA3000_REG_ADDR_X_MSB);
@@ -683,7 +684,7 @@ error_free_lock:
/* Should only really be registered if ring buffer support is compiled in.
* Does no harm however and doing it right would add a fair bit of complexity
*/
-static IIO_DEV_ATTR_AVAIL_SAMP_FREQ(sca3000_read_av_freq);
+static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(sca3000_read_av_freq);
static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
sca3000_read_frequency,
@@ -718,7 +719,10 @@ static ssize_t sca3000_read_temp(struct device *dev,
error_ret:
return ret;
}
-static IIO_DEV_ATTR_TEMP(sca3000_read_temp);
+static IIO_DEV_ATTR_TEMP_RAW(sca3000_read_temp);
+
+static IIO_CONST_ATTR(temp_scale, "0.555556");
+static IIO_CONST_ATTR(temp_offset, "-214.6");
/**
* sca3000_show_thresh() sysfs query of a threshold
@@ -770,31 +774,34 @@ static ssize_t sca3000_write_thresh(struct device *dev,
return ret ? ret : len;
}
-static IIO_DEV_ATTR_ACCEL_THRESH_X(S_IRUGO | S_IWUSR,
- sca3000_show_thresh,
- sca3000_write_thresh,
- SCA3000_REG_CTRL_SEL_MD_X_TH);
-static IIO_DEV_ATTR_ACCEL_THRESH_Y(S_IRUGO | S_IWUSR,
- sca3000_show_thresh,
- sca3000_write_thresh,
- SCA3000_REG_CTRL_SEL_MD_Y_TH);
-static IIO_DEV_ATTR_ACCEL_THRESH_Z(S_IRUGO | S_IWUSR,
- sca3000_show_thresh,
- sca3000_write_thresh,
- SCA3000_REG_CTRL_SEL_MD_Z_TH);
+static IIO_DEVICE_ATTR(accel_x_mag_either_rising_value,
+ S_IRUGO | S_IWUSR,
+ sca3000_show_thresh,
+ sca3000_write_thresh,
+ SCA3000_REG_CTRL_SEL_MD_X_TH);
+
+static IIO_DEVICE_ATTR(accel_y_mag_either_rising_value,
+ S_IRUGO | S_IWUSR,
+ sca3000_show_thresh,
+ sca3000_write_thresh,
+ SCA3000_REG_CTRL_SEL_MD_Y_TH);
+
+static IIO_DEVICE_ATTR(accel_z_mag_either_rising_value,
+ S_IRUGO | S_IWUSR,
+ sca3000_show_thresh,
+ sca3000_write_thresh,
+ SCA3000_REG_CTRL_SEL_MD_Z_TH);
static struct attribute *sca3000_attributes[] = {
&iio_dev_attr_name.dev_attr.attr,
&iio_dev_attr_revision.dev_attr.attr,
- &iio_dev_attr_accel_x.dev_attr.attr,
- &iio_dev_attr_accel_y.dev_attr.attr,
- &iio_dev_attr_accel_z.dev_attr.attr,
- &iio_dev_attr_thresh_accel_x.dev_attr.attr,
- &iio_dev_attr_thresh_accel_y.dev_attr.attr,
- &iio_dev_attr_thresh_accel_z.dev_attr.attr,
- &iio_dev_attr_available_measurement_modes.dev_attr.attr,
+ &iio_dev_attr_accel_scale.dev_attr.attr,
+ &iio_dev_attr_accel_x_raw.dev_attr.attr,
+ &iio_dev_attr_accel_y_raw.dev_attr.attr,
+ &iio_dev_attr_accel_z_raw.dev_attr.attr,
+ &iio_dev_attr_measurement_mode_available.dev_attr.attr,
&iio_dev_attr_measurement_mode.dev_attr.attr,
- &iio_dev_attr_available_sampling_frequency.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
&iio_dev_attr_sampling_frequency.dev_attr.attr,
NULL,
};
@@ -802,18 +809,18 @@ static struct attribute *sca3000_attributes[] = {
static struct attribute *sca3000_attributes_with_temp[] = {
&iio_dev_attr_name.dev_attr.attr,
&iio_dev_attr_revision.dev_attr.attr,
- &iio_dev_attr_accel_x.dev_attr.attr,
- &iio_dev_attr_accel_y.dev_attr.attr,
- &iio_dev_attr_accel_z.dev_attr.attr,
- &iio_dev_attr_thresh_accel_x.dev_attr.attr,
- &iio_dev_attr_thresh_accel_y.dev_attr.attr,
- &iio_dev_attr_thresh_accel_z.dev_attr.attr,
- &iio_dev_attr_available_measurement_modes.dev_attr.attr,
+ &iio_dev_attr_accel_scale.dev_attr.attr,
+ &iio_dev_attr_accel_x_raw.dev_attr.attr,
+ &iio_dev_attr_accel_y_raw.dev_attr.attr,
+ &iio_dev_attr_accel_z_raw.dev_attr.attr,
+ &iio_dev_attr_measurement_mode_available.dev_attr.attr,
&iio_dev_attr_measurement_mode.dev_attr.attr,
- &iio_dev_attr_available_sampling_frequency.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
&iio_dev_attr_sampling_frequency.dev_attr.attr,
/* Only present if temp sensor is */
- &iio_dev_attr_temp.dev_attr.attr,
+ &iio_dev_attr_temp_raw.dev_attr.attr,
+ &iio_const_attr_temp_offset.dev_attr.attr,
+ &iio_const_attr_temp_scale.dev_attr.attr,
NULL,
};
@@ -910,7 +917,7 @@ static ssize_t sca3000_query_mo_det(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev->parent);
struct sca3000_state *st = indio_dev->dev_data;
struct iio_event_attr *this_attr = to_iio_event_attr(attr);
int ret, len = 0;
@@ -975,7 +982,7 @@ static ssize_t sca3000_query_ring_int(struct device *dev,
struct iio_event_attr *this_attr = to_iio_event_attr(attr);
int ret, len;
u8 *rx;
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev->parent);
struct sca3000_state *st = indio_dev->dev_data;
mutex_lock(&st->lock);
ret = sca3000_read_data(st, SCA3000_REG_ADDR_INT_MASK, &rx, 1);
@@ -995,7 +1002,7 @@ static ssize_t sca3000_set_ring_int(struct device *dev,
const char *buf,
size_t len)
{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev->parent);
struct sca3000_state *st = indio_dev->dev_data;
struct iio_event_attr *this_attr = to_iio_event_attr(attr);
@@ -1085,7 +1092,7 @@ static ssize_t sca3000_set_mo_det(struct device *dev,
const char *buf,
size_t len)
{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev->parent);
struct sca3000_state *st = indio_dev->dev_data;
struct iio_event_attr *this_attr = to_iio_event_attr(attr);
long val;
@@ -1155,20 +1162,23 @@ IIO_EVENT_ATTR_FREE_FALL_DETECT_SH(iio_event_all,
0)
/* Motion detector related event attributes */
-IIO_EVENT_ATTR_ACCEL_X_HIGH_SH(iio_event_all,
- sca3000_query_mo_det,
- sca3000_set_mo_det,
- SCA3000_MD_CTRL_OR_X);
-
-IIO_EVENT_ATTR_ACCEL_Y_HIGH_SH(iio_event_all,
- sca3000_query_mo_det,
- sca3000_set_mo_det,
- SCA3000_MD_CTRL_OR_Y);
-
-IIO_EVENT_ATTR_ACCEL_Z_HIGH_SH(iio_event_all,
- sca3000_query_mo_det,
- sca3000_set_mo_det,
- SCA3000_MD_CTRL_OR_Z);
+IIO_EVENT_ATTR_SH(accel_x_mag_either_rising_en,
+ iio_event_all,
+ sca3000_query_mo_det,
+ sca3000_set_mo_det,
+ SCA3000_MD_CTRL_OR_X);
+
+IIO_EVENT_ATTR_SH(accel_y_mag_either_rising_en,
+ iio_event_all,
+ sca3000_query_mo_det,
+ sca3000_set_mo_det,
+ SCA3000_MD_CTRL_OR_Y);
+
+IIO_EVENT_ATTR_SH(accel_z_mag_either_rising_en,
+ iio_event_all,
+ sca3000_query_mo_det,
+ sca3000_set_mo_det,
+ SCA3000_MD_CTRL_OR_Z);
/* Hardware ring buffer related event attributes */
IIO_EVENT_ATTR_RING_50_FULL_SH(iio_event_all,
@@ -1183,11 +1193,14 @@ IIO_EVENT_ATTR_RING_75_FULL_SH(iio_event_all,
static struct attribute *sca3000_event_attributes[] = {
&iio_event_attr_free_fall.dev_attr.attr,
- &iio_event_attr_accel_x_high.dev_attr.attr,
- &iio_event_attr_accel_y_high.dev_attr.attr,
- &iio_event_attr_accel_z_high.dev_attr.attr,
+ &iio_event_attr_accel_x_mag_either_rising_en.dev_attr.attr,
+ &iio_event_attr_accel_y_mag_either_rising_en.dev_attr.attr,
+ &iio_event_attr_accel_z_mag_either_rising_en.dev_attr.attr,
&iio_event_attr_ring_50_full.dev_attr.attr,
&iio_event_attr_ring_75_full.dev_attr.attr,
+ &iio_dev_attr_accel_x_mag_either_rising_value.dev_attr.attr,
+ &iio_dev_attr_accel_y_mag_either_rising_value.dev_attr.attr,
+ &iio_dev_attr_accel_z_mag_either_rising_value.dev_attr.attr,
NULL,
};
@@ -1325,7 +1338,7 @@ static int __devinit __sca3000_probe(struct spi_device *spi,
if (ret < 0)
goto error_free_dev;
regdone = 1;
- ret = iio_ring_buffer_register(st->indio_dev->ring);
+ ret = iio_ring_buffer_register(st->indio_dev->ring, 0);
if (ret < 0)
goto error_unregister_dev;
if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0) {
@@ -1344,9 +1357,10 @@ static int __devinit __sca3000_probe(struct spi_device *spi,
* is overkill. At very least a simpler registration method
* might be worthwhile.
*/
- iio_add_event_to_list(iio_event_attr_accel_z_high.listel,
- &st->indio_dev
- ->interrupts[0]->ev_list);
+ iio_add_event_to_list(
+ iio_event_attr_accel_z_mag_either_rising_en.listel,
+ &st->indio_dev
+ ->interrupts[0]->ev_list);
}
sca3000_register_ring_funcs(st->indio_dev);
ret = sca3000_clean_setup(st);
@@ -1437,9 +1451,6 @@ static int sca3000_remove(struct spi_device *spi)
SCA3000_VARIANT_PROBE(d01);
static SCA3000_VARIANT_SPI_DRIVER(d01);
-SCA3000_VARIANT_PROBE(d03);
-static SCA3000_VARIANT_SPI_DRIVER(d03);
-
SCA3000_VARIANT_PROBE(e02);
static SCA3000_VARIANT_SPI_DRIVER(e02);
@@ -1449,9 +1460,6 @@ static SCA3000_VARIANT_SPI_DRIVER(e04);
SCA3000_VARIANT_PROBE(e05);
static SCA3000_VARIANT_SPI_DRIVER(e05);
-SCA3000_VARIANT_PROBE(l01);
-static SCA3000_VARIANT_SPI_DRIVER(l01);
-
static __init int sca3000_init(void)
{
int ret;
@@ -1459,32 +1467,22 @@ static __init int sca3000_init(void)
ret = spi_register_driver(&sca3000_d01_driver);
if (ret)
goto error_ret;
- ret = spi_register_driver(&sca3000_d03_driver);
- if (ret)
- goto error_unreg_d01;
ret = spi_register_driver(&sca3000_e02_driver);
if (ret)
- goto error_unreg_d03;
+ goto error_unreg_d01;
ret = spi_register_driver(&sca3000_e04_driver);
if (ret)
goto error_unreg_e02;
ret = spi_register_driver(&sca3000_e05_driver);
if (ret)
goto error_unreg_e04;
- ret = spi_register_driver(&sca3000_l01_driver);
- if (ret)
- goto error_unreg_e05;
return 0;
-error_unreg_e05:
- spi_unregister_driver(&sca3000_e05_driver);
error_unreg_e04:
spi_unregister_driver(&sca3000_e04_driver);
error_unreg_e02:
spi_unregister_driver(&sca3000_e02_driver);
-error_unreg_d03:
- spi_unregister_driver(&sca3000_d03_driver);
error_unreg_d01:
spi_unregister_driver(&sca3000_d01_driver);
error_ret:
@@ -1494,11 +1492,9 @@ error_ret:
static __exit void sca3000_exit(void)
{
- spi_unregister_driver(&sca3000_l01_driver);
spi_unregister_driver(&sca3000_e05_driver);
spi_unregister_driver(&sca3000_e04_driver);
spi_unregister_driver(&sca3000_e02_driver);
- spi_unregister_driver(&sca3000_d03_driver);
spi_unregister_driver(&sca3000_d01_driver);
}
diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c
index 40cbab2a659..8e8c068d401 100644
--- a/drivers/staging/iio/accel/sca3000_ring.c
+++ b/drivers/staging/iio/accel/sca3000_ring.c
@@ -186,11 +186,29 @@ static ssize_t sca3000_store_ring_bpse(struct device *dev,
return ret ? ret : len;
}
-static IIO_CONST_ATTR(bpse_available, "8 11");
+static IIO_SCAN_EL_C(accel_x, 0, 0, 0, NULL);
+static IIO_SCAN_EL_C(accel_y, 1, 0, 0, NULL);
+static IIO_SCAN_EL_C(accel_z, 2, 0, 0, NULL);
+static IIO_CONST_ATTR(accel_precision_available, "8 11");
+static IIO_DEVICE_ATTR(accel_precision,
+ S_IRUGO | S_IWUSR,
+ sca3000_show_ring_bpse,
+ sca3000_store_ring_bpse,
+ 0);
+
+static struct attribute *sca3000_scan_el_attrs[] = {
+ &iio_scan_el_accel_x.dev_attr.attr,
+ &iio_scan_el_accel_y.dev_attr.attr,
+ &iio_scan_el_accel_z.dev_attr.attr,
+ &iio_const_attr_accel_precision_available.dev_attr.attr,
+ &iio_dev_attr_accel_precision.dev_attr.attr,
+ NULL
+};
-static IIO_DEV_ATTR_BPSE(S_IRUGO | S_IWUSR,
- sca3000_show_ring_bpse,
- sca3000_store_ring_bpse);
+static struct attribute_group sca3000_scan_el_group = {
+ .attrs = sca3000_scan_el_attrs,
+ .name = "scan_elements",
+};
/*
* Ring buffer attributes
@@ -198,17 +216,15 @@ static IIO_DEV_ATTR_BPSE(S_IRUGO | S_IWUSR,
* only apply to the ring buffer. At all times full rate and accuracy
* is available via direct reading from registers.
*/
-static struct attribute *iio_ring_attributes[] = {
+static struct attribute *sca3000_ring_attributes[] = {
&dev_attr_length.attr,
&dev_attr_bps.attr,
&dev_attr_ring_enable.attr,
- &iio_dev_attr_bpse.dev_attr.attr,
- &iio_const_attr_bpse_available.dev_attr.attr,
NULL,
};
static struct attribute_group sca3000_ring_attr = {
- .attrs = iio_ring_attributes,
+ .attrs = sca3000_ring_attributes,
};
static const struct attribute_group *sca3000_ring_attr_groups[] = {
@@ -228,7 +244,7 @@ static struct iio_ring_buffer *sca3000_rb_allocate(struct iio_dev *indio_dev)
ring = kzalloc(sizeof *ring, GFP_KERNEL);
if (!ring)
- return 0;
+ return NULL;
ring->private = indio_dev;
buf = &ring->buf;
iio_ring_buffer_init(buf, indio_dev);
@@ -248,6 +264,7 @@ static inline void sca3000_rb_free(struct iio_ring_buffer *r)
int sca3000_configure_ring(struct iio_dev *indio_dev)
{
+ indio_dev->scan_el_attrs = &sca3000_scan_el_group;
indio_dev->ring = sca3000_rb_allocate(indio_dev);
if (indio_dev->ring == NULL)
return -ENOMEM;
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index 3989c0ca0e0..0835fbc86c2 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -7,11 +7,16 @@ config MAX1363
tristate "MAXIM max1363 ADC driver"
depends on I2C
select IIO_TRIGGER if IIO_RING_BUFFER
+ select MAX1363_RING_BUFFER
help
Say yes here to build support for many MAXIM i2c analog to digital
- convertors (ADC). (max1361, max1362, max1363, max1364, max1136,
- max1136, max1137, max1138, max1139, max1236, max1237, max11238,
- max1239) Provides direct access via sysfs.
+ convertors (ADC). (max1361, max1362, max1363, max1364, max1036,
+ max1037, max1038, max1039, max1136, max1136, max1137, max1138,
+ max1139, max1236, max1237, max11238, max1239, max11600, max11601,
+ max11602, max11603, max11604, max11605, max11606, max11607,
+ max11608, max11609, max11610, max11611, max11612, max11613,
+ max11614, max11615, max11616, max11617) Provides direct access
+ via sysfs.
config MAX1363_RING_BUFFER
bool "MAXIM max1363: use ring buffer"
diff --git a/drivers/staging/iio/adc/Makefile b/drivers/staging/iio/adc/Makefile
index 08cee5c22b9..18c9376ecbb 100644
--- a/drivers/staging/iio/adc/Makefile
+++ b/drivers/staging/iio/adc/Makefile
@@ -3,6 +3,6 @@
#
max1363-y := max1363_core.o
-max1363-$(CONFIG_MAX1363_RING_BUFFER) += max1363_ring.o
+max1363-y += max1363_ring.o
obj-$(CONFIG_MAX1363) += max1363.o
diff --git a/drivers/staging/iio/adc/adc.h b/drivers/staging/iio/adc/adc.h
index d925b2c5e38..04eb16fd0a9 100644
--- a/drivers/staging/iio/adc/adc.h
+++ b/drivers/staging/iio/adc/adc.h
@@ -9,5 +9,20 @@
*
*/
+/* Deprecated */
#define IIO_DEV_ATTR_ADC(_num, _show, _addr) \
IIO_DEVICE_ATTR(adc_##_num, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_IN_RAW(_num, _show, _addr) \
+ IIO_DEVICE_ATTR(in##_num##_raw, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_IN_NAMED_RAW(_name, _show, _addr) \
+ IIO_DEVICE_ATTR(in_##_name##_raw, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_IN_DIFF_RAW(_nump, _numn, _show, _addr) \
+ IIO_DEVICE_ATTR_NAMED(in##_nump##min##_numn##_raw, \
+ in##_nump-in##_numn##_raw, \
+ S_IRUGO, \
+ _show, \
+ NULL, \
+ _addr)
diff --git a/drivers/staging/iio/adc/max1363.h b/drivers/staging/iio/adc/max1363.h
index c112fbef270..72cf3670936 100644
--- a/drivers/staging/iio/adc/max1363.h
+++ b/drivers/staging/iio/adc/max1363.h
@@ -72,77 +72,54 @@
* @numvals: The number of values returned by a single scan
*/
struct max1363_mode {
- const char *name;
int8_t conf;
- int numvals;
+ long modemask;
};
-#define MAX1363_MODE_SINGLE(_num) { \
- .name = #_num, \
- .conf = MAX1363_CHANNEL_SEL(_num) \
+#define MAX1363_MODE_SINGLE(_num, _mask) { \
+ .conf = MAX1363_CHANNEL_SEL(_num) \
| MAX1363_CONFIG_SCAN_SINGLE_1 \
| MAX1363_CONFIG_SE, \
- .numvals = 1, \
+ .modemask = _mask, \
}
-#define MAX1363_MODE_SINGLE_TIMES_8(_num) { \
- .name = #_num"x8", \
- .conf = MAX1363_CHANNEL_SEL(_num) \
- | MAX1363_CONFIG_SCAN_SINGLE_8 \
- | MAX1363_CONFIG_SE, \
- .numvals = 8, \
- }
-
-#define MAX1363_MODE_SCAN_TO_CHANNEL(_num) { \
- .name = "0..."#_num, \
- .conf = MAX1363_CHANNEL_SEL(_num) \
+#define MAX1363_MODE_SCAN_TO_CHANNEL(_num, _mask) { \
+ .conf = MAX1363_CHANNEL_SEL(_num) \
| MAX1363_CONFIG_SCAN_TO_CS \
| MAX1363_CONFIG_SE, \
- .numvals = _num + 1, \
+ .modemask = _mask, \
}
/* note not available for max1363 hence naming */
-#define MAX1236_MODE_SCAN_MID_TO_CHANNEL(_mid, _num) { \
- .name = #_mid"..."#_num, \
- .conf = MAX1363_CHANNEL_SEL(_num) \
+#define MAX1236_MODE_SCAN_MID_TO_CHANNEL(_mid, _num, _mask) { \
+ .conf = MAX1363_CHANNEL_SEL(_num) \
| MAX1236_SCAN_MID_TO_CHANNEL \
| MAX1363_CONFIG_SE, \
- .numvals = _num - _mid + 1 \
+ .modemask = _mask \
}
-#define MAX1363_MODE_DIFF_SINGLE(_nump, _numm) { \
- .name = #_nump"-"#_numm, \
- .conf = MAX1363_CHANNEL_SEL(_nump) \
+#define MAX1363_MODE_DIFF_SINGLE(_nump, _numm, _mask) { \
+ .conf = MAX1363_CHANNEL_SEL(_nump) \
| MAX1363_CONFIG_SCAN_SINGLE_1 \
| MAX1363_CONFIG_DE, \
- .numvals = 1, \
- }
-
-#define MAX1363_MODE_DIFF_SINGLE_TIMES_8(_nump, _numm) { \
- .name = #_nump"-"#_numm, \
- .conf = MAX1363_CHANNEL_SEL(_nump) \
- | MAX1363_CONFIG_SCAN_SINGLE_8 \
- | MAX1363_CONFIG_DE, \
- .numvals = 1, \
+ .modemask = _mask \
}
/* Can't think how to automate naming so specify for now */
-#define MAX1363_MODE_DIFF_SCAN_TO_CHANNEL_NAMED(_name, _num, _numvals) { \
- .name = #_name, \
- .conf = MAX1363_CHANNEL_SEL(_num) \
+#define MAX1363_MODE_DIFF_SCAN_TO_CHANNEL(_num, _numvals, _mask) { \
+ .conf = MAX1363_CHANNEL_SEL(_num) \
| MAX1363_CONFIG_SCAN_TO_CS \
| MAX1363_CONFIG_DE, \
- .numvals = _numvals, \
+ .modemask = _mask \
}
/* note only available for max1363 hence naming */
-#define MAX1236_MODE_DIFF_SCAN_MID_TO_CHANNEL_NAMED(_name, _num, _numvals) { \
- .name = #_name, \
- .conf = MAX1363_CHANNEL_SEL(_num) \
+#define MAX1236_MODE_DIFF_SCAN_MID_TO_CHANNEL(_num, _numvals, _mask) { \
+ .conf = MAX1363_CHANNEL_SEL(_num) \
| MAX1236_SCAN_MID_TO_CHANNEL \
| MAX1363_CONFIG_SE, \
- .numvals = _numvals, \
+ .modemask = _mask \
}
/* Not currently handled */
@@ -158,35 +135,43 @@ struct max1363_mode {
* clear what all the various options actually do. Alternative suggestions
* that don't require user to have intimate knowledge of the chip welcomed.
*/
+enum max1363_channels {
+ max1363_in0, max1363_in1, max1363_in2, max1363_in3,
+ max1363_in4, max1363_in5, max1363_in6, max1363_in7,
+ max1363_in8, max1363_in9, max1363_in10, max1363_in11,
+
+ max1363_in0min1, max1363_in2min3,
+ max1363_in4min5, max1363_in6min7,
+ max1363_in8min9, max1363_in10min11,
+
+ max1363_in1min0, max1363_in3min2,
+ max1363_in5min4, max1363_in7min6,
+ max1363_in9min8, max1363_in11min10,
+ };
/* This must be maintained along side the max1363_mode_table in max1363_core */
enum max1363_modes {
/* Single read of a single channel */
_s0, _s1, _s2, _s3, _s4, _s5, _s6, _s7, _s8, _s9, _s10, _s11,
- /* Eight reads of a single channel */
- se0, se1, se2, se3, se4, se5, se6, se7, se8, se9, se10, se11,
- /* Scan to channel */
- s0to1, s0to2, s0to3, s0to4, s0to5, s0to6,
- s0to7, s0to8, s0to9, s0to10, s0to11,
/* Differential single read */
d0m1, d2m3, d4m5, d6m7, d8m9, d10m11,
d1m0, d3m2, d5m4, d7m6, d9m8, d11m10,
- /* Differential single read 8 times */
- de0m1, de2m3, de4m5, de6m7, de8m9, de10m11,
- de1m0, de3m2, de5m4, de7m6, de9m8, de11m10,
- /* Differential scan to channel */
- d0m1to2m3, d0m1to4m5, d0m1to6m7, d0m1to8m9, d0m1to10m11,
- d1m0to3m2, d1m0to5m4, d1m0to7m6, d1m0to9m8, d1m0to11m10,
- /* Scan mid to channel max123{6-9} only */
- s2to3, s6to7, s6to8, s6to9, s6to10, s6to11,
- /* Differential scan mid to channel */
- s6m7to8m9, s6m7to10m11, s7m6to9m8, s7m6to11m10,
+ /* Scan to channel and mid to channel where overlapping */
+ s0to1, s0to2, s2to3, s0to3, s0to4, s0to5, s0to6,
+ s6to7, s0to7, s6to8, s0to8, s6to9,
+ s0to9, s6to10, s0to10, s6to11, s0to11,
+ /* Differential scan to channel and mid to channel where overlapping */
+ d0m1to2m3, d0m1to4m5, d0m1to6m7, d6m7to8m9,
+ d0m1to8m9, d6m7to10m11, d0m1to10m11, d1m0to3m2,
+ d1m0to5m4, d1m0to7m6, d7m6to9m8, d1m0to9m8,
+ d7m6to11m10, d1m0to11m10,
};
/**
* struct max1363_chip_info - chip specifc information
* @name: indentification string for chip
* @num_inputs: number of physical inputs on chip
+ * @bits: accuracy of the adc in bits
* @int_vref_mv: the internal reference voltage
* @monitor_mode: whether the chip supports monitor interrupts
* @mode_list: array of available scan modes
@@ -196,11 +181,14 @@ enum max1363_modes {
struct max1363_chip_info {
const char *name;
u8 num_inputs;
+ u8 bits;
u16 int_vref_mv;
bool monitor_mode;
const enum max1363_modes *mode_list;
int num_modes;
enum max1363_modes default_mode;
+ struct attribute_group *dev_attrs;
+ struct attribute_group *scan_attrs;
};
@@ -212,6 +200,7 @@ struct max1363_chip_info {
* @configbyte: cache of current device config byte
* @chip_info: chip model specific constants, available modes etc
* @current_mode: the scan mode of this chip
+ * @requestedmask: a valid requested set of channels
* @poll_work: bottom half of polling interrupt handler
* @protect_ring: used to ensure only one polling bh running at a time
* @reg: supply regulator
@@ -223,16 +212,21 @@ struct max1363_state {
char configbyte;
const struct max1363_chip_info *chip_info;
const struct max1363_mode *current_mode;
+ u32 requestedmask;
struct work_struct poll_work;
atomic_t protect_ring;
struct iio_trigger *trig;
struct regulator *reg;
};
+
+const struct max1363_mode
+*max1363_match_mode(u32 mask, const struct max1363_chip_info *ci);
+
+int max1363_set_scan_mode(struct max1363_state *st);
+
#ifdef CONFIG_MAX1363_RING_BUFFER
-ssize_t max1363_scan_from_ring(struct device *dev,
- struct device_attribute *attr,
- char *buf);
+int max1363_single_channel_from_ring(long mask, struct max1363_state *st);
int max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void max1363_ring_cleanup(struct iio_dev *indio_dev);
@@ -250,14 +244,12 @@ static inline int max1363_initialize_ring(struct iio_ring_buffer *ring)
return 0;
};
-
-static inline ssize_t max1363_scan_from_ring(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+int max1363_single_channel_from_ring(long mask, struct max1363_state *st)
{
- return 0;
+ return -EINVAL;
};
+
static inline int
max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev)
{
diff --git a/drivers/staging/iio/adc/max1363_core.c b/drivers/staging/iio/adc/max1363_core.c
index 773f1d1d9c6..20e267448d1 100644
--- a/drivers/staging/iio/adc/max1363_core.c
+++ b/drivers/staging/iio/adc/max1363_core.c
@@ -1,27 +1,26 @@
/*
- * linux/drivers/industrialio/adc/max1363.c
- * Copyright (C) 2008 Jonathan Cameron
- *
- * based on linux/drivers/i2c/chips/max123x
- * Copyright (C) 2002-2004 Stefan Eletzhofer
- *
- * based on linux/drivers/acron/char/pcf8583.c
- * Copyright (C) 2000 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * max1363.c
- *
- * Partial support for max1363 and similar chips.
- *
- * Not currently implemented.
- *
- * - Monitor interrrupt generation.
- * - Control of internal reference.
- * - Sysfs scan interface currently assumes unipolar mode.
- */
+ * iio/adc/max1363.c
+ * Copyright (C) 2008-2010 Jonathan Cameron
+ *
+ * based on linux/drivers/i2c/chips/max123x
+ * Copyright (C) 2002-2004 Stefan Eletzhofer
+ *
+ * based on linux/drivers/acron/char/pcf8583.c
+ * Copyright (C) 2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * max1363.c
+ *
+ * Partial support for max1363 and similar chips.
+ *
+ * Not currently implemented.
+ *
+ * - Monitor interrrupt generation.
+ * - Control of internal reference.
+ */
#include <linux/interrupt.h>
#include <linux/gpio.h>
@@ -38,118 +37,328 @@
#include "../iio.h"
#include "../sysfs.h"
+#include "../ring_generic.h"
+#include "adc.h"
#include "max1363.h"
-/* Available scan modes.
- * Awkwardly the associated enum is in the header so it is available to
- * the ring buffer code.
- */
-static const struct max1363_mode max1363_mode_table[] = {
- MAX1363_MODE_SINGLE(0),
- MAX1363_MODE_SINGLE(1),
- MAX1363_MODE_SINGLE(2),
- MAX1363_MODE_SINGLE(3),
- MAX1363_MODE_SINGLE(4),
- MAX1363_MODE_SINGLE(5),
- MAX1363_MODE_SINGLE(6),
- MAX1363_MODE_SINGLE(7),
- MAX1363_MODE_SINGLE(8),
- MAX1363_MODE_SINGLE(9),
- MAX1363_MODE_SINGLE(10),
- MAX1363_MODE_SINGLE(11),
-
- MAX1363_MODE_SINGLE_TIMES_8(0),
- MAX1363_MODE_SINGLE_TIMES_8(1),
- MAX1363_MODE_SINGLE_TIMES_8(2),
- MAX1363_MODE_SINGLE_TIMES_8(3),
- MAX1363_MODE_SINGLE_TIMES_8(4),
- MAX1363_MODE_SINGLE_TIMES_8(5),
- MAX1363_MODE_SINGLE_TIMES_8(6),
- MAX1363_MODE_SINGLE_TIMES_8(7),
- MAX1363_MODE_SINGLE_TIMES_8(8),
- MAX1363_MODE_SINGLE_TIMES_8(9),
- MAX1363_MODE_SINGLE_TIMES_8(10),
- MAX1363_MODE_SINGLE_TIMES_8(11),
-
- MAX1363_MODE_SCAN_TO_CHANNEL(1),
- MAX1363_MODE_SCAN_TO_CHANNEL(2),
- MAX1363_MODE_SCAN_TO_CHANNEL(3),
- MAX1363_MODE_SCAN_TO_CHANNEL(4),
- MAX1363_MODE_SCAN_TO_CHANNEL(5),
- MAX1363_MODE_SCAN_TO_CHANNEL(6),
- MAX1363_MODE_SCAN_TO_CHANNEL(7),
- MAX1363_MODE_SCAN_TO_CHANNEL(8),
- MAX1363_MODE_SCAN_TO_CHANNEL(9),
- MAX1363_MODE_SCAN_TO_CHANNEL(10),
- MAX1363_MODE_SCAN_TO_CHANNEL(11),
-
- MAX1363_MODE_DIFF_SINGLE(0, 1),
- MAX1363_MODE_DIFF_SINGLE(2, 3),
- MAX1363_MODE_DIFF_SINGLE(4, 5),
- MAX1363_MODE_DIFF_SINGLE(6, 7),
- MAX1363_MODE_DIFF_SINGLE(8, 9),
- MAX1363_MODE_DIFF_SINGLE(10, 11),
- MAX1363_MODE_DIFF_SINGLE(1, 0),
- MAX1363_MODE_DIFF_SINGLE(3, 2),
- MAX1363_MODE_DIFF_SINGLE(5, 4),
- MAX1363_MODE_DIFF_SINGLE(7, 6),
- MAX1363_MODE_DIFF_SINGLE(9, 8),
- MAX1363_MODE_DIFF_SINGLE(11, 10),
-
- MAX1363_MODE_DIFF_SINGLE_TIMES_8(0, 1),
- MAX1363_MODE_DIFF_SINGLE_TIMES_8(2, 3),
- MAX1363_MODE_DIFF_SINGLE_TIMES_8(4, 5),
- MAX1363_MODE_DIFF_SINGLE_TIMES_8(6, 7),
- MAX1363_MODE_DIFF_SINGLE_TIMES_8(8, 9),
- MAX1363_MODE_DIFF_SINGLE_TIMES_8(10, 11),
- MAX1363_MODE_DIFF_SINGLE_TIMES_8(1, 0),
- MAX1363_MODE_DIFF_SINGLE_TIMES_8(3, 2),
- MAX1363_MODE_DIFF_SINGLE_TIMES_8(5, 4),
- MAX1363_MODE_DIFF_SINGLE_TIMES_8(7, 6),
- MAX1363_MODE_DIFF_SINGLE_TIMES_8(9, 8),
- MAX1363_MODE_DIFF_SINGLE_TIMES_8(11, 10),
-
- MAX1363_MODE_DIFF_SCAN_TO_CHANNEL_NAMED(0-1...2-3, 2, 2),
- MAX1363_MODE_DIFF_SCAN_TO_CHANNEL_NAMED(0-1...4-5, 4, 3),
- MAX1363_MODE_DIFF_SCAN_TO_CHANNEL_NAMED(0-1...6-7, 6, 4),
- MAX1363_MODE_DIFF_SCAN_TO_CHANNEL_NAMED(0-1...8-9, 8, 5),
- MAX1363_MODE_DIFF_SCAN_TO_CHANNEL_NAMED(0-1...10-11, 10, 6),
- MAX1363_MODE_DIFF_SCAN_TO_CHANNEL_NAMED(1-0...3-2, 3, 2),
- MAX1363_MODE_DIFF_SCAN_TO_CHANNEL_NAMED(1-0...5-4, 5, 3),
- MAX1363_MODE_DIFF_SCAN_TO_CHANNEL_NAMED(1-0...7-6, 7, 4),
- MAX1363_MODE_DIFF_SCAN_TO_CHANNEL_NAMED(1-0...9-8, 9, 5),
- MAX1363_MODE_DIFF_SCAN_TO_CHANNEL_NAMED(1-0...11-10, 11, 6),
-
- MAX1236_MODE_SCAN_MID_TO_CHANNEL(2, 3),
- MAX1236_MODE_SCAN_MID_TO_CHANNEL(6, 7),
- MAX1236_MODE_SCAN_MID_TO_CHANNEL(6, 8),
- MAX1236_MODE_SCAN_MID_TO_CHANNEL(6, 9),
- MAX1236_MODE_SCAN_MID_TO_CHANNEL(6, 10),
- MAX1236_MODE_SCAN_MID_TO_CHANNEL(6, 11),
-
- MAX1236_MODE_DIFF_SCAN_MID_TO_CHANNEL_NAMED(6-7...8-9, 8, 2),
- MAX1236_MODE_DIFF_SCAN_MID_TO_CHANNEL_NAMED(6-7...10-11, 10, 3),
- MAX1236_MODE_DIFF_SCAN_MID_TO_CHANNEL_NAMED(7-6...9-8, 9, 2),
- MAX1236_MODE_DIFF_SCAN_MID_TO_CHANNEL_NAMED(7-6...11-10, 11, 3),
+/* Here we claim all are 16 bits. This currently does no harm and saves
+ * us a lot of scan element listings */
+
+#define MAX1363_SCAN_EL(number) \
+ IIO_SCAN_EL_C(in##number, number, IIO_UNSIGNED(16), 0, NULL);
+#define MAX1363_SCAN_EL_D(p, n, number) \
+ IIO_SCAN_NAMED_EL_C(in##p##m##in##n, in##p-in##n, \
+ number, IIO_SIGNED(16), 0 , NULL);
+
+static MAX1363_SCAN_EL(0);
+static MAX1363_SCAN_EL(1);
+static MAX1363_SCAN_EL(2);
+static MAX1363_SCAN_EL(3);
+static MAX1363_SCAN_EL(4);
+static MAX1363_SCAN_EL(5);
+static MAX1363_SCAN_EL(6);
+static MAX1363_SCAN_EL(7);
+static MAX1363_SCAN_EL(8);
+static MAX1363_SCAN_EL(9);
+static MAX1363_SCAN_EL(10);
+static MAX1363_SCAN_EL(11);
+static MAX1363_SCAN_EL_D(0, 1, 12);
+static MAX1363_SCAN_EL_D(2, 3, 13);
+static MAX1363_SCAN_EL_D(4, 5, 14);
+static MAX1363_SCAN_EL_D(6, 7, 15);
+static MAX1363_SCAN_EL_D(8, 9, 16);
+static MAX1363_SCAN_EL_D(10, 11, 17);
+static MAX1363_SCAN_EL_D(1, 0, 18);
+static MAX1363_SCAN_EL_D(3, 2, 19);
+static MAX1363_SCAN_EL_D(5, 4, 20);
+static MAX1363_SCAN_EL_D(7, 6, 21);
+static MAX1363_SCAN_EL_D(9, 8, 22);
+static MAX1363_SCAN_EL_D(11, 10, 23);
+
+static const struct max1363_mode max1363_mode_table[] = {
+ /* All of the single channel options first */
+ MAX1363_MODE_SINGLE(0, 1 << 0),
+ MAX1363_MODE_SINGLE(1, 1 << 1),
+ MAX1363_MODE_SINGLE(2, 1 << 2),
+ MAX1363_MODE_SINGLE(3, 1 << 3),
+ MAX1363_MODE_SINGLE(4, 1 << 4),
+ MAX1363_MODE_SINGLE(5, 1 << 5),
+ MAX1363_MODE_SINGLE(6, 1 << 6),
+ MAX1363_MODE_SINGLE(7, 1 << 7),
+ MAX1363_MODE_SINGLE(8, 1 << 8),
+ MAX1363_MODE_SINGLE(9, 1 << 9),
+ MAX1363_MODE_SINGLE(10, 1 << 10),
+ MAX1363_MODE_SINGLE(11, 1 << 11),
+
+ MAX1363_MODE_DIFF_SINGLE(0, 1, 1 << 12),
+ MAX1363_MODE_DIFF_SINGLE(2, 3, 1 << 13),
+ MAX1363_MODE_DIFF_SINGLE(4, 5, 1 << 14),
+ MAX1363_MODE_DIFF_SINGLE(6, 7, 1 << 15),
+ MAX1363_MODE_DIFF_SINGLE(8, 9, 1 << 16),
+ MAX1363_MODE_DIFF_SINGLE(10, 11, 1 << 17),
+ MAX1363_MODE_DIFF_SINGLE(1, 0, 1 << 18),
+ MAX1363_MODE_DIFF_SINGLE(3, 2, 1 << 19),
+ MAX1363_MODE_DIFF_SINGLE(5, 4, 1 << 20),
+ MAX1363_MODE_DIFF_SINGLE(7, 6, 1 << 21),
+ MAX1363_MODE_DIFF_SINGLE(9, 8, 1 << 22),
+ MAX1363_MODE_DIFF_SINGLE(11, 10, 1 << 23),
+
+ /* The multichannel scans next */
+ MAX1363_MODE_SCAN_TO_CHANNEL(1, 0x003),
+ MAX1363_MODE_SCAN_TO_CHANNEL(2, 0x007),
+ MAX1236_MODE_SCAN_MID_TO_CHANNEL(2, 3, 0x00C),
+ MAX1363_MODE_SCAN_TO_CHANNEL(3, 0x00F),
+ MAX1363_MODE_SCAN_TO_CHANNEL(4, 0x01F),
+ MAX1363_MODE_SCAN_TO_CHANNEL(5, 0x03F),
+ MAX1363_MODE_SCAN_TO_CHANNEL(6, 0x07F),
+ MAX1236_MODE_SCAN_MID_TO_CHANNEL(6, 7, 0x0C0),
+ MAX1363_MODE_SCAN_TO_CHANNEL(7, 0x0FF),
+ MAX1236_MODE_SCAN_MID_TO_CHANNEL(6, 8, 0x1C0),
+ MAX1363_MODE_SCAN_TO_CHANNEL(8, 0x1FF),
+ MAX1236_MODE_SCAN_MID_TO_CHANNEL(6, 9, 0x3C0),
+ MAX1363_MODE_SCAN_TO_CHANNEL(9, 0x3FF),
+ MAX1236_MODE_SCAN_MID_TO_CHANNEL(6, 10, 0x7C0),
+ MAX1363_MODE_SCAN_TO_CHANNEL(10, 0x7FF),
+ MAX1236_MODE_SCAN_MID_TO_CHANNEL(6, 11, 0xFC0),
+ MAX1363_MODE_SCAN_TO_CHANNEL(11, 0xFFF),
+
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL(2, 2, 0x003000),
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL(4, 3, 0x007000),
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL(6, 4, 0x00F000),
+ MAX1236_MODE_DIFF_SCAN_MID_TO_CHANNEL(8, 2, 0x018000),
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL(8, 5, 0x01F000),
+ MAX1236_MODE_DIFF_SCAN_MID_TO_CHANNEL(10, 3, 0x038000),
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL(10, 6, 0x3F000),
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL(3, 2, 0x0C0000),
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL(5, 3, 0x1C0000),
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL(7, 4, 0x3C0000),
+ MAX1236_MODE_DIFF_SCAN_MID_TO_CHANNEL(9, 2, 0x600000),
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL(9, 5, 0x7C0000),
+ MAX1236_MODE_DIFF_SCAN_MID_TO_CHANNEL(11, 3, 0xE00000),
+ MAX1363_MODE_DIFF_SCAN_TO_CHANNEL(11, 6, 0xFC0000),
};
+const struct max1363_mode
+*max1363_match_mode(u32 mask, const struct max1363_chip_info *ci)
+{
+ int i;
+ if (mask)
+ for (i = 0; i < ci->num_modes; i++)
+ if (!((~max1363_mode_table[ci->mode_list[i]].modemask) &
+ mask))
+ return &max1363_mode_table[ci->mode_list[i]];
+ return NULL;
+};
+
+static ssize_t max1363_show_precision(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct max1363_state *st = iio_dev_get_devdata(dev_info);
+ return sprintf(buf, "%d\n", st->chip_info->bits);
+}
+
+static IIO_DEVICE_ATTR(in_precision, S_IRUGO, max1363_show_precision,
+ NULL, 0);
+
+static int max1363_write_basic_config(struct i2c_client *client,
+ unsigned char d1,
+ unsigned char d2)
+{
+ int ret;
+ u8 *tx_buf = kmalloc(2 , GFP_KERNEL);
+
+ if (!tx_buf)
+ return -ENOMEM;
+ tx_buf[0] = d1;
+ tx_buf[1] = d2;
+
+ ret = i2c_master_send(client, tx_buf, 2);
+ kfree(tx_buf);
+
+ return (ret > 0) ? 0 : ret;
+}
+
+int max1363_set_scan_mode(struct max1363_state *st)
+{
+ st->configbyte &= ~(MAX1363_CHANNEL_SEL_MASK
+ | MAX1363_SCAN_MASK
+ | MAX1363_SE_DE_MASK);
+ st->configbyte |= st->current_mode->conf;
+
+ return max1363_write_basic_config(st->client,
+ st->setupbyte,
+ st->configbyte);
+}
+
+static ssize_t max1363_read_single_channel(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct max1363_state *st = iio_dev_get_devdata(dev_info);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ struct i2c_client *client = st->client;
+ int ret = 0, len = 0;
+ s32 data ;
+ char rxbuf[2];
+ long mask;
+
+ mutex_lock(&dev_info->mlock);
+ /* If ring buffer capture is occuring, query the buffer */
+ if (iio_ring_enabled(dev_info)) {
+ mask = max1363_mode_table[this_attr->address].modemask;
+ data = max1363_single_channel_from_ring(mask, st);
+ if (data < 0) {
+ ret = data;
+ goto error_ret;
+ }
+ } else {
+ /* Check to see if current scan mode is correct */
+ if (st->current_mode !=
+ &max1363_mode_table[this_attr->address]) {
+ /* Update scan mode if needed */
+ st->current_mode
+ = &max1363_mode_table[this_attr->address];
+ ret = max1363_set_scan_mode(st);
+ if (ret)
+ goto error_ret;
+ }
+ if (st->chip_info->bits != 8) {
+ /* Get reading */
+ data = i2c_master_recv(client, rxbuf, 2);
+ if (data < 0) {
+ ret = data;
+ goto error_ret;
+ }
+
+ data = (s32)(rxbuf[1]) | ((s32)(rxbuf[0] & 0x0F)) << 8;
+ } else {
+ /* Get reading */
+ data = i2c_master_recv(client, rxbuf, 1);
+ if (data < 0) {
+ ret = data;
+ goto error_ret;
+ }
+ data = rxbuf[0];
+ }
+ }
+ /* Pretty print the result */
+ len = sprintf(buf, "%u\n", data);
+
+error_ret:
+ mutex_unlock(&dev_info->mlock);
+ return ret ? ret : len;
+}
+
+/* Direct read attribtues */
+static IIO_DEV_ATTR_IN_RAW(0, max1363_read_single_channel, _s0);
+static IIO_DEV_ATTR_IN_RAW(1, max1363_read_single_channel, _s1);
+static IIO_DEV_ATTR_IN_RAW(2, max1363_read_single_channel, _s2);
+static IIO_DEV_ATTR_IN_RAW(3, max1363_read_single_channel, _s3);
+static IIO_DEV_ATTR_IN_RAW(4, max1363_read_single_channel, _s4);
+static IIO_DEV_ATTR_IN_RAW(5, max1363_read_single_channel, _s5);
+static IIO_DEV_ATTR_IN_RAW(6, max1363_read_single_channel, _s6);
+static IIO_DEV_ATTR_IN_RAW(7, max1363_read_single_channel, _s7);
+static IIO_DEV_ATTR_IN_RAW(8, max1363_read_single_channel, _s8);
+static IIO_DEV_ATTR_IN_RAW(9, max1363_read_single_channel, _s9);
+static IIO_DEV_ATTR_IN_RAW(10, max1363_read_single_channel, _s10);
+static IIO_DEV_ATTR_IN_RAW(11, max1363_read_single_channel, _s11);
+
+static IIO_DEV_ATTR_IN_DIFF_RAW(0, 1, max1363_read_single_channel, d0m1);
+static IIO_DEV_ATTR_IN_DIFF_RAW(2, 3, max1363_read_single_channel, d2m3);
+static IIO_DEV_ATTR_IN_DIFF_RAW(4, 5, max1363_read_single_channel, d4m5);
+static IIO_DEV_ATTR_IN_DIFF_RAW(6, 7, max1363_read_single_channel, d6m7);
+static IIO_DEV_ATTR_IN_DIFF_RAW(8, 9, max1363_read_single_channel, d8m9);
+static IIO_DEV_ATTR_IN_DIFF_RAW(10, 11, max1363_read_single_channel, d10m11);
+static IIO_DEV_ATTR_IN_DIFF_RAW(1, 0, max1363_read_single_channel, d1m0);
+static IIO_DEV_ATTR_IN_DIFF_RAW(3, 2, max1363_read_single_channel, d3m2);
+static IIO_DEV_ATTR_IN_DIFF_RAW(5, 4, max1363_read_single_channel, d5m4);
+static IIO_DEV_ATTR_IN_DIFF_RAW(7, 6, max1363_read_single_channel, d7m6);
+static IIO_DEV_ATTR_IN_DIFF_RAW(9, 8, max1363_read_single_channel, d9m8);
+static IIO_DEV_ATTR_IN_DIFF_RAW(11, 10, max1363_read_single_channel, d11m10);
+
+
+static ssize_t max1363_show_scale(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ /* Driver currently only support internal vref */
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct max1363_state *st = iio_dev_get_devdata(dev_info);
+ /* Corresponds to Vref / 2^(bits) */
+
+ if ((1 << (st->chip_info->bits + 1))
+ > st->chip_info->int_vref_mv)
+ return sprintf(buf, "0.5\n");
+ else
+ return sprintf(buf, "%d\n",
+ st->chip_info->int_vref_mv >> st->chip_info->bits);
+}
+
+static IIO_DEVICE_ATTR(in_scale, S_IRUGO, max1363_show_scale, NULL, 0);
+
+static ssize_t max1363_show_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct max1363_state *st = iio_dev_get_devdata(dev_info);
+ return sprintf(buf, "%s\n", st->chip_info->name);
+}
+
+static IIO_DEVICE_ATTR(name, S_IRUGO, max1363_show_name, NULL, 0);
+
/* Applies to max1363 */
static const enum max1363_modes max1363_mode_list[] = {
_s0, _s1, _s2, _s3,
- se0, se1, se2, se3,
s0to1, s0to2, s0to3,
d0m1, d2m3, d1m0, d3m2,
- de0m1, de2m3, de1m0, de3m2,
d0m1to2m3, d1m0to3m2,
};
+static struct attribute *max1363_device_attrs[] = {
+ &iio_dev_attr_in0_raw.dev_attr.attr,
+ &iio_dev_attr_in1_raw.dev_attr.attr,
+ &iio_dev_attr_in2_raw.dev_attr.attr,
+ &iio_dev_attr_in3_raw.dev_attr.attr,
+ &iio_dev_attr_in0min1_raw.dev_attr.attr,
+ &iio_dev_attr_in2min3_raw.dev_attr.attr,
+ &iio_dev_attr_in1min0_raw.dev_attr.attr,
+ &iio_dev_attr_in3min2_raw.dev_attr.attr,
+ &iio_dev_attr_name.dev_attr.attr,
+ &iio_dev_attr_in_scale.dev_attr.attr,
+ NULL
+};
+
+static struct attribute_group max1363_dev_attr_group = {
+ .attrs = max1363_device_attrs,
+};
+
+static struct attribute *max1363_scan_el_attrs[] = {
+ &iio_scan_el_in0.dev_attr.attr,
+ &iio_scan_el_in1.dev_attr.attr,
+ &iio_scan_el_in2.dev_attr.attr,
+ &iio_scan_el_in3.dev_attr.attr,
+ &iio_scan_el_in0min1.dev_attr.attr,
+ &iio_scan_el_in2min3.dev_attr.attr,
+ &iio_scan_el_in1min0.dev_attr.attr,
+ &iio_scan_el_in3min2.dev_attr.attr,
+ &iio_dev_attr_in_precision.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group max1363_scan_el_group = {
+ .name = "scan_elements",
+ .attrs = max1363_scan_el_attrs,
+};
+
/* Appies to max1236, max1237 */
static const enum max1363_modes max1236_mode_list[] = {
_s0, _s1, _s2, _s3,
- se0, se1, se2, se3,
s0to1, s0to2, s0to3,
d0m1, d2m3, d1m0, d3m2,
- de0m1, de2m3, de1m0, de3m2,
d0m1to2m3, d1m0to3m2,
s2to3,
};
@@ -157,24 +366,162 @@ static const enum max1363_modes max1236_mode_list[] = {
/* Applies to max1238, max1239 */
static const enum max1363_modes max1238_mode_list[] = {
_s0, _s1, _s2, _s3, _s4, _s5, _s6, _s7, _s8, _s9, _s10, _s11,
- se0, se1, se2, se3, se4, se5, se6, se7, se8, se9, se10, se11,
s0to1, s0to2, s0to3, s0to4, s0to5, s0to6,
s0to7, s0to8, s0to9, s0to10, s0to11,
d0m1, d2m3, d4m5, d6m7, d8m9, d10m11,
d1m0, d3m2, d5m4, d7m6, d9m8, d11m10,
- de0m1, de2m3, de4m5, de6m7, de8m9, de10m11,
- de1m0, de3m2, de5m4, de7m6, de9m8, de11m10,
d0m1to2m3, d0m1to4m5, d0m1to6m7, d0m1to8m9, d0m1to10m11,
d1m0to3m2, d1m0to5m4, d1m0to7m6, d1m0to9m8, d1m0to11m10,
s6to7, s6to8, s6to9, s6to10, s6to11,
- s6m7to8m9, s6m7to10m11, s7m6to9m8, s7m6to11m10,
+ d6m7to8m9, d6m7to10m11, d7m6to9m8, d7m6to11m10,
+};
+
+static struct attribute *max1238_device_attrs[] = {
+ &iio_dev_attr_in0_raw.dev_attr.attr,
+ &iio_dev_attr_in1_raw.dev_attr.attr,
+ &iio_dev_attr_in2_raw.dev_attr.attr,
+ &iio_dev_attr_in3_raw.dev_attr.attr,
+ &iio_dev_attr_in4_raw.dev_attr.attr,
+ &iio_dev_attr_in5_raw.dev_attr.attr,
+ &iio_dev_attr_in6_raw.dev_attr.attr,
+ &iio_dev_attr_in7_raw.dev_attr.attr,
+ &iio_dev_attr_in8_raw.dev_attr.attr,
+ &iio_dev_attr_in9_raw.dev_attr.attr,
+ &iio_dev_attr_in10_raw.dev_attr.attr,
+ &iio_dev_attr_in11_raw.dev_attr.attr,
+ &iio_dev_attr_in0min1_raw.dev_attr.attr,
+ &iio_dev_attr_in2min3_raw.dev_attr.attr,
+ &iio_dev_attr_in4min5_raw.dev_attr.attr,
+ &iio_dev_attr_in6min7_raw.dev_attr.attr,
+ &iio_dev_attr_in8min9_raw.dev_attr.attr,
+ &iio_dev_attr_in10min11_raw.dev_attr.attr,
+ &iio_dev_attr_in1min0_raw.dev_attr.attr,
+ &iio_dev_attr_in3min2_raw.dev_attr.attr,
+ &iio_dev_attr_in5min4_raw.dev_attr.attr,
+ &iio_dev_attr_in7min6_raw.dev_attr.attr,
+ &iio_dev_attr_in9min8_raw.dev_attr.attr,
+ &iio_dev_attr_in11min10_raw.dev_attr.attr,
+ &iio_dev_attr_name.dev_attr.attr,
+ &iio_dev_attr_in_scale.dev_attr.attr,
+ NULL
+};
+
+static struct attribute_group max1238_dev_attr_group = {
+ .attrs = max1238_device_attrs,
+};
+
+static struct attribute *max1238_scan_el_attrs[] = {
+ &iio_scan_el_in0.dev_attr.attr,
+ &iio_scan_el_in1.dev_attr.attr,
+ &iio_scan_el_in2.dev_attr.attr,
+ &iio_scan_el_in3.dev_attr.attr,
+ &iio_scan_el_in4.dev_attr.attr,
+ &iio_scan_el_in5.dev_attr.attr,
+ &iio_scan_el_in6.dev_attr.attr,
+ &iio_scan_el_in7.dev_attr.attr,
+ &iio_scan_el_in8.dev_attr.attr,
+ &iio_scan_el_in9.dev_attr.attr,
+ &iio_scan_el_in10.dev_attr.attr,
+ &iio_scan_el_in11.dev_attr.attr,
+ &iio_scan_el_in0min1.dev_attr.attr,
+ &iio_scan_el_in2min3.dev_attr.attr,
+ &iio_scan_el_in4min5.dev_attr.attr,
+ &iio_scan_el_in6min7.dev_attr.attr,
+ &iio_scan_el_in8min9.dev_attr.attr,
+ &iio_scan_el_in10min11.dev_attr.attr,
+ &iio_scan_el_in1min0.dev_attr.attr,
+ &iio_scan_el_in3min2.dev_attr.attr,
+ &iio_scan_el_in5min4.dev_attr.attr,
+ &iio_scan_el_in7min6.dev_attr.attr,
+ &iio_scan_el_in9min8.dev_attr.attr,
+ &iio_scan_el_in11min10.dev_attr.attr,
+ &iio_dev_attr_in_precision.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group max1238_scan_el_group = {
+ .name = "scan_elements",
+ .attrs = max1238_scan_el_attrs,
};
+static const enum max1363_modes max11607_mode_list[] = {
+ _s0, _s1, _s2, _s3,
+ s0to1, s0to2, s0to3,
+ s2to3,
+ d0m1, d2m3, d1m0, d3m2,
+ d0m1to2m3, d1m0to3m2,
+};
+
+static const enum max1363_modes max11608_mode_list[] = {
+ _s0, _s1, _s2, _s3, _s4, _s5, _s6, _s7,
+ s0to1, s0to2, s0to3, s0to4, s0to5, s0to6, s0to7,
+ s6to7,
+ d0m1, d2m3, d4m5, d6m7,
+ d1m0, d3m2, d5m4, d7m6,
+ d0m1to2m3, d0m1to4m5, d0m1to6m7,
+ d1m0to3m2, d1m0to5m4, d1m0to7m6,
+};
+
+static struct attribute *max11608_device_attrs[] = {
+ &iio_dev_attr_in0_raw.dev_attr.attr,
+ &iio_dev_attr_in1_raw.dev_attr.attr,
+ &iio_dev_attr_in2_raw.dev_attr.attr,
+ &iio_dev_attr_in3_raw.dev_attr.attr,
+ &iio_dev_attr_in4_raw.dev_attr.attr,
+ &iio_dev_attr_in5_raw.dev_attr.attr,
+ &iio_dev_attr_in6_raw.dev_attr.attr,
+ &iio_dev_attr_in7_raw.dev_attr.attr,
+ &iio_dev_attr_in0min1_raw.dev_attr.attr,
+ &iio_dev_attr_in2min3_raw.dev_attr.attr,
+ &iio_dev_attr_in4min5_raw.dev_attr.attr,
+ &iio_dev_attr_in6min7_raw.dev_attr.attr,
+ &iio_dev_attr_in1min0_raw.dev_attr.attr,
+ &iio_dev_attr_in3min2_raw.dev_attr.attr,
+ &iio_dev_attr_in5min4_raw.dev_attr.attr,
+ &iio_dev_attr_in7min6_raw.dev_attr.attr,
+ &iio_dev_attr_name.dev_attr.attr,
+ &iio_dev_attr_in_scale.dev_attr.attr,
+ NULL
+};
+
+static struct attribute_group max11608_dev_attr_group = {
+ .attrs = max11608_device_attrs,
+};
+
+static struct attribute *max11608_scan_el_attrs[] = {
+ &iio_scan_el_in0.dev_attr.attr,
+ &iio_scan_el_in1.dev_attr.attr,
+ &iio_scan_el_in2.dev_attr.attr,
+ &iio_scan_el_in3.dev_attr.attr,
+ &iio_scan_el_in4.dev_attr.attr,
+ &iio_scan_el_in5.dev_attr.attr,
+ &iio_scan_el_in6.dev_attr.attr,
+ &iio_scan_el_in7.dev_attr.attr,
+ &iio_scan_el_in0min1.dev_attr.attr,
+ &iio_scan_el_in2min3.dev_attr.attr,
+ &iio_scan_el_in4min5.dev_attr.attr,
+ &iio_scan_el_in6min7.dev_attr.attr,
+ &iio_scan_el_in1min0.dev_attr.attr,
+ &iio_scan_el_in3min2.dev_attr.attr,
+ &iio_scan_el_in5min4.dev_attr.attr,
+ &iio_scan_el_in7min6.dev_attr.attr,
+ &iio_dev_attr_in_precision.dev_attr.attr,
+};
+
+static struct attribute_group max11608_scan_el_group = {
+ .name = "scan_elements",
+ .attrs = max11608_scan_el_attrs,
+};
+
enum { max1361,
max1362,
max1363,
max1364,
+ max1036,
+ max1037,
+ max1038,
+ max1039,
max1136,
max1137,
max1138,
@@ -183,6 +530,24 @@ enum { max1361,
max1237,
max1238,
max1239,
+ max11600,
+ max11601,
+ max11602,
+ max11603,
+ max11604,
+ max11605,
+ max11606,
+ max11607,
+ max11608,
+ max11609,
+ max11610,
+ max11611,
+ max11612,
+ max11613,
+ max11614,
+ max11615,
+ max11616,
+ max11617,
};
/* max1363 and max1368 tested - rest from data sheet */
@@ -190,118 +555,350 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
{
.name = "max1361",
.num_inputs = 4,
+ .bits = 10,
+ .int_vref_mv = 2048,
.monitor_mode = 1,
.mode_list = max1363_mode_list,
.num_modes = ARRAY_SIZE(max1363_mode_list),
.default_mode = s0to3,
+ .dev_attrs = &max1363_dev_attr_group,
+ .scan_attrs = &max1363_scan_el_group,
}, {
.name = "max1362",
.num_inputs = 4,
+ .bits = 10,
+ .int_vref_mv = 4096,
.monitor_mode = 1,
.mode_list = max1363_mode_list,
.num_modes = ARRAY_SIZE(max1363_mode_list),
.default_mode = s0to3,
+ .dev_attrs = &max1363_dev_attr_group,
+ .scan_attrs = &max1363_scan_el_group,
}, {
.name = "max1363",
.num_inputs = 4,
+ .bits = 12,
+ .int_vref_mv = 2048,
.monitor_mode = 1,
.mode_list = max1363_mode_list,
.num_modes = ARRAY_SIZE(max1363_mode_list),
.default_mode = s0to3,
+ .dev_attrs = &max1363_dev_attr_group,
+ .scan_attrs = &max1363_scan_el_group,
}, {
.name = "max1364",
.num_inputs = 4,
+ .bits = 12,
+ .int_vref_mv = 4096,
.monitor_mode = 1,
.mode_list = max1363_mode_list,
.num_modes = ARRAY_SIZE(max1363_mode_list),
.default_mode = s0to3,
+ .dev_attrs = &max1363_dev_attr_group,
+ .scan_attrs = &max1363_scan_el_group,
+ }, {
+ .name = "max1036",
+ .num_inputs = 4,
+ .bits = 8,
+ .int_vref_mv = 4096,
+ .mode_list = max1236_mode_list,
+ .num_modes = ARRAY_SIZE(max1236_mode_list),
+ .default_mode = s0to3,
+ .dev_attrs = &max1363_dev_attr_group,
+ .scan_attrs = &max1363_scan_el_group,
+ }, {
+ .name = "max1037",
+ .num_inputs = 4,
+ .bits = 8,
+ .int_vref_mv = 2048,
+ .mode_list = max1236_mode_list,
+ .num_modes = ARRAY_SIZE(max1236_mode_list),
+ .default_mode = s0to3,
+ .dev_attrs = &max1363_dev_attr_group,
+ .scan_attrs = &max1363_scan_el_group,
+ }, {
+ .name = "max1038",
+ .num_inputs = 12,
+ .bits = 8,
+ .int_vref_mv = 4096,
+ .mode_list = max1238_mode_list,
+ .num_modes = ARRAY_SIZE(max1238_mode_list),
+ .default_mode = s0to11,
+ .dev_attrs = &max1238_dev_attr_group,
+ .scan_attrs = &max1238_scan_el_group,
+ }, {
+ .name = "max1039",
+ .num_inputs = 12,
+ .bits = 8,
+ .int_vref_mv = 2048,
+ .mode_list = max1238_mode_list,
+ .num_modes = ARRAY_SIZE(max1238_mode_list),
+ .default_mode = s0to11,
+ .dev_attrs = &max1238_dev_attr_group,
+ .scan_attrs = &max1238_scan_el_group,
}, {
.name = "max1136",
.num_inputs = 4,
+ .bits = 10,
.int_vref_mv = 4096,
.mode_list = max1236_mode_list,
.num_modes = ARRAY_SIZE(max1236_mode_list),
.default_mode = s0to3,
+ .dev_attrs = &max1363_dev_attr_group,
+ .scan_attrs = &max1363_scan_el_group,
}, {
.name = "max1137",
.num_inputs = 4,
+ .bits = 10,
.int_vref_mv = 2048,
.mode_list = max1236_mode_list,
.num_modes = ARRAY_SIZE(max1236_mode_list),
.default_mode = s0to3,
+ .dev_attrs = &max1363_dev_attr_group,
+ .scan_attrs = &max1363_scan_el_group,
}, {
.name = "max1138",
.num_inputs = 12,
+ .bits = 10,
.int_vref_mv = 4096,
.mode_list = max1238_mode_list,
.num_modes = ARRAY_SIZE(max1238_mode_list),
.default_mode = s0to11,
+ .dev_attrs = &max1238_dev_attr_group,
+ .scan_attrs = &max1238_scan_el_group,
}, {
.name = "max1139",
.num_inputs = 12,
+ .bits = 10,
.int_vref_mv = 2048,
.mode_list = max1238_mode_list,
.num_modes = ARRAY_SIZE(max1238_mode_list),
.default_mode = s0to11,
+ .dev_attrs = &max1238_dev_attr_group,
+ .scan_attrs = &max1238_scan_el_group,
}, {
.name = "max1236",
.num_inputs = 4,
+ .bits = 12,
.int_vref_mv = 4096,
.mode_list = max1236_mode_list,
.num_modes = ARRAY_SIZE(max1236_mode_list),
.default_mode = s0to3,
+ .dev_attrs = &max1363_dev_attr_group,
+ .scan_attrs = &max1363_scan_el_group,
}, {
.name = "max1237",
.num_inputs = 4,
+ .bits = 12,
.int_vref_mv = 2048,
.mode_list = max1236_mode_list,
.num_modes = ARRAY_SIZE(max1236_mode_list),
.default_mode = s0to3,
+ .dev_attrs = &max1363_dev_attr_group,
+ .scan_attrs = &max1363_scan_el_group,
}, {
.name = "max1238",
.num_inputs = 12,
+ .bits = 12,
.int_vref_mv = 4096,
.mode_list = max1238_mode_list,
.num_modes = ARRAY_SIZE(max1238_mode_list),
.default_mode = s0to11,
+ .dev_attrs = &max1238_dev_attr_group,
+ .scan_attrs = &max1238_scan_el_group,
}, {
.name = "max1239",
.num_inputs = 12,
+ .bits = 12,
.int_vref_mv = 2048,
.mode_list = max1238_mode_list,
.num_modes = ARRAY_SIZE(max1238_mode_list),
.default_mode = s0to11,
- },
+ .dev_attrs = &max1238_dev_attr_group,
+ .scan_attrs = &max1238_scan_el_group,
+ }, {
+ .name = "max11600",
+ .num_inputs = 4,
+ .bits = 8,
+ .int_vref_mv = 4096,
+ .mode_list = max11607_mode_list,
+ .num_modes = ARRAY_SIZE(max11607_mode_list),
+ .default_mode = s0to3,
+ .dev_attrs = &max1363_dev_attr_group,
+ .scan_attrs = &max1363_scan_el_group,
+ }, {
+ .name = "max11601",
+ .num_inputs = 4,
+ .bits = 8,
+ .int_vref_mv = 2048,
+ .mode_list = max11607_mode_list,
+ .num_modes = ARRAY_SIZE(max11607_mode_list),
+ .default_mode = s0to3,
+ .dev_attrs = &max1363_dev_attr_group,
+ .scan_attrs = &max1363_scan_el_group,
+ }, {
+ .name = "max11602",
+ .num_inputs = 8,
+ .bits = 8,
+ .int_vref_mv = 4096,
+ .mode_list = max11608_mode_list,
+ .num_modes = ARRAY_SIZE(max11608_mode_list),
+ .default_mode = s0to7,
+ .dev_attrs = &max11608_dev_attr_group,
+ .scan_attrs = &max11608_scan_el_group,
+ }, {
+ .name = "max11603",
+ .num_inputs = 8,
+ .bits = 8,
+ .int_vref_mv = 2048,
+ .mode_list = max11608_mode_list,
+ .num_modes = ARRAY_SIZE(max11608_mode_list),
+ .default_mode = s0to7,
+ .dev_attrs = &max11608_dev_attr_group,
+ .scan_attrs = &max11608_scan_el_group,
+ }, {
+ .name = "max11604",
+ .num_inputs = 12,
+ .bits = 8,
+ .int_vref_mv = 4098,
+ .mode_list = max1238_mode_list,
+ .num_modes = ARRAY_SIZE(max1238_mode_list),
+ .default_mode = s0to11,
+ .dev_attrs = &max1238_dev_attr_group,
+ .scan_attrs = &max1238_scan_el_group,
+ }, {
+ .name = "max11605",
+ .num_inputs = 12,
+ .bits = 8,
+ .int_vref_mv = 2048,
+ .mode_list = max1238_mode_list,
+ .num_modes = ARRAY_SIZE(max1238_mode_list),
+ .default_mode = s0to11,
+ .dev_attrs = &max1238_dev_attr_group,
+ .scan_attrs = &max1238_scan_el_group,
+ }, {
+ .name = "max11606",
+ .num_inputs = 4,
+ .bits = 10,
+ .int_vref_mv = 4096,
+ .mode_list = max11607_mode_list,
+ .num_modes = ARRAY_SIZE(max11607_mode_list),
+ .default_mode = s0to3,
+ .dev_attrs = &max1363_dev_attr_group,
+ .scan_attrs = &max1363_scan_el_group,
+ }, {
+ .name = "max11607",
+ .num_inputs = 4,
+ .bits = 10,
+ .int_vref_mv = 2048,
+ .mode_list = max11607_mode_list,
+ .num_modes = ARRAY_SIZE(max11607_mode_list),
+ .default_mode = s0to3,
+ .dev_attrs = &max1363_dev_attr_group,
+ .scan_attrs = &max1363_scan_el_group,
+ }, {
+ .name = "max11608",
+ .num_inputs = 8,
+ .bits = 10,
+ .int_vref_mv = 4096,
+ .mode_list = max11608_mode_list,
+ .num_modes = ARRAY_SIZE(max11608_mode_list),
+ .default_mode = s0to7,
+ .dev_attrs = &max11608_dev_attr_group,
+ .scan_attrs = &max11608_scan_el_group,
+ }, {
+ .name = "max11609",
+ .num_inputs = 8,
+ .bits = 10,
+ .int_vref_mv = 2048,
+ .mode_list = max11608_mode_list,
+ .num_modes = ARRAY_SIZE(max11608_mode_list),
+ .default_mode = s0to7,
+ .dev_attrs = &max11608_dev_attr_group,
+ .scan_attrs = &max11608_scan_el_group,
+ }, {
+ .name = "max11610",
+ .num_inputs = 12,
+ .bits = 10,
+ .int_vref_mv = 4098,
+ .mode_list = max1238_mode_list,
+ .num_modes = ARRAY_SIZE(max1238_mode_list),
+ .default_mode = s0to11,
+ .dev_attrs = &max1238_dev_attr_group,
+ .scan_attrs = &max1238_scan_el_group,
+ }, {
+ .name = "max11611",
+ .num_inputs = 12,
+ .bits = 10,
+ .int_vref_mv = 2048,
+ .mode_list = max1238_mode_list,
+ .num_modes = ARRAY_SIZE(max1238_mode_list),
+ .default_mode = s0to11,
+ .dev_attrs = &max1238_dev_attr_group,
+ .scan_attrs = &max1238_scan_el_group,
+ }, {
+ .name = "max11612",
+ .num_inputs = 4,
+ .bits = 12,
+ .int_vref_mv = 4096,
+ .mode_list = max11607_mode_list,
+ .num_modes = ARRAY_SIZE(max11607_mode_list),
+ .default_mode = s0to3,
+ .dev_attrs = &max1363_dev_attr_group,
+ .scan_attrs = &max1363_scan_el_group,
+ }, {
+ .name = "max11613",
+ .num_inputs = 4,
+ .bits = 12,
+ .int_vref_mv = 2048,
+ .mode_list = max11607_mode_list,
+ .num_modes = ARRAY_SIZE(max11607_mode_list),
+ .default_mode = s0to3,
+ .dev_attrs = &max1363_dev_attr_group,
+ .scan_attrs = &max1363_scan_el_group,
+ }, {
+ .name = "max11614",
+ .num_inputs = 8,
+ .bits = 12,
+ .int_vref_mv = 4096,
+ .mode_list = max11608_mode_list,
+ .num_modes = ARRAY_SIZE(max11608_mode_list),
+ .default_mode = s0to7,
+ .dev_attrs = &max11608_dev_attr_group,
+ .scan_attrs = &max11608_scan_el_group,
+ }, {
+ .name = "max11615",
+ .num_inputs = 8,
+ .bits = 12,
+ .int_vref_mv = 2048,
+ .mode_list = max11608_mode_list,
+ .num_modes = ARRAY_SIZE(max11608_mode_list),
+ .default_mode = s0to7,
+ .dev_attrs = &max11608_dev_attr_group,
+ .scan_attrs = &max11608_scan_el_group,
+ }, {
+ .name = "max11616",
+ .num_inputs = 12,
+ .bits = 12,
+ .int_vref_mv = 4098,
+ .mode_list = max1238_mode_list,
+ .num_modes = ARRAY_SIZE(max1238_mode_list),
+ .default_mode = s0to11,
+ .dev_attrs = &max1238_dev_attr_group,
+ .scan_attrs = &max1238_scan_el_group,
+ }, {
+ .name = "max11617",
+ .num_inputs = 12,
+ .bits = 12,
+ .int_vref_mv = 2048,
+ .mode_list = max1238_mode_list,
+ .num_modes = ARRAY_SIZE(max1238_mode_list),
+ .default_mode = s0to11,
+ .dev_attrs = &max1238_dev_attr_group,
+ .scan_attrs = &max1238_scan_el_group,
+ }
};
-static int max1363_write_basic_config(struct i2c_client *client,
- unsigned char d1,
- unsigned char d2)
-{
- int ret;
- u8 *tx_buf = kmalloc(2 , GFP_KERNEL);
- if (!tx_buf)
- return -ENOMEM;
- tx_buf[0] = d1;
- tx_buf[1] = d2;
-
- ret = i2c_master_send(client, tx_buf, 2);
- kfree(tx_buf);
- return (ret > 0) ? 0 : ret;
-}
-
-static int max1363_set_scan_mode(struct max1363_state *st)
-{
- st->configbyte &= ~(MAX1363_CHANNEL_SEL_MASK
- | MAX1363_SCAN_MASK
- | MAX1363_SE_DE_MASK);
- st->configbyte |= st->current_mode->conf;
-
- return max1363_write_basic_config(st->client,
- st->setupbyte,
- st->configbyte);
-}
-
static int max1363_initial_setup(struct max1363_state *st)
{
st->setupbyte = MAX1363_SETUP_AIN3_IS_AIN3_REF_IS_VDD
@@ -318,167 +915,6 @@ static int max1363_initial_setup(struct max1363_state *st)
return max1363_set_scan_mode(st);
}
-static ssize_t max1363_show_av_scan_modes(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct max1363_state *st = dev_info->dev_data;
- int i, len = 0;
-
- for (i = 0; i < st->chip_info->num_modes; i++)
- len += sprintf(buf + len, "%s ",
- max1363_mode_table[st->chip_info
- ->mode_list[i]].name);
- len += sprintf(buf + len, "\n");
-
- return len;
-}
-
-
-/* The dev here is the sysfs related one, not the underlying i2c one */
-static ssize_t max1363_scan_direct(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct max1363_state *st = dev_info->dev_data;
- int len = 0, ret, i;
- struct i2c_client *client = st->client;
- char *rxbuf;
-
- if (st->current_mode->numvals == 0)
- return 0;
- rxbuf = kmalloc(st->current_mode->numvals*2, GFP_KERNEL);
- if (rxbuf == NULL)
- return -ENOMEM;
-
- /* Interpretation depends on whether these are signed or not!*/
- /* Assume not for now */
- ret = i2c_master_recv(client, rxbuf, st->current_mode->numvals*2);
-
- if (ret < 0)
- return ret;
- for (i = 0; i < st->current_mode->numvals; i++)
- len += sprintf(buf+len, "%d ",
- ((int)(rxbuf[i*2+0]&0x0F) << 8)
- + ((int)(rxbuf[i*2+1])));
- kfree(rxbuf);
- len += sprintf(buf + len, "\n");
-
- return len;
-}
-
-static ssize_t max1363_scan(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- int ret;
-
- mutex_lock(&dev_info->mlock);
- if (dev_info->currentmode == INDIO_RING_TRIGGERED)
- ret = max1363_scan_from_ring(dev, attr, buf);
- else
- ret = max1363_scan_direct(dev, attr, buf);
- mutex_unlock(&dev_info->mlock);
-
- return ret;
-}
-
-/* Cannot query the device, so use local copy of state */
-static ssize_t max1363_show_scan_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct max1363_state *st = dev_info->dev_data;
-
- return sprintf(buf, "%s\n", st->current_mode->name);
-}
-
-static const struct max1363_mode
-*__max1363_find_mode_in_ci(const struct max1363_chip_info *info,
- const char *buf)
-{
- int i;
- for (i = 0; i < info->num_modes; i++)
- if (strcmp(max1363_mode_table[info->mode_list[i]].name, buf)
- == 0)
- return &max1363_mode_table[info->mode_list[i]];
- return NULL;
-}
-
-static ssize_t max1363_store_scan_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct max1363_state *st = dev_info->dev_data;
- const struct max1363_mode *new_mode;
- int ret;
-
- mutex_lock(&dev_info->mlock);
- new_mode = NULL;
- /* Avoid state changes if a ring buffer is enabled */
- if (!iio_ring_enabled(dev_info)) {
- new_mode
- = __max1363_find_mode_in_ci(st->chip_info, buf);
- if (!new_mode) {
- ret = -EINVAL;
- goto error_ret;
- }
- st->current_mode = new_mode;
- ret = max1363_set_scan_mode(st);
- if (ret)
- goto error_ret;
- } else {
- ret = -EBUSY;
- goto error_ret;
- }
- mutex_unlock(&dev_info->mlock);
-
- return len;
-
-error_ret:
- mutex_unlock(&dev_info->mlock);
-
- return ret;
-}
-
-IIO_DEV_ATTR_AVAIL_SCAN_MODES(max1363_show_av_scan_modes);
-IIO_DEV_ATTR_SCAN_MODE(S_IRUGO | S_IWUSR,
- max1363_show_scan_mode,
- max1363_store_scan_mode);
-
-IIO_DEV_ATTR_SCAN(max1363_scan);
-
-static ssize_t max1363_show_name(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct max1363_state *st = dev_info->dev_data;
- return sprintf(buf, "%s\n", st->chip_info->name);
-}
-
-IIO_DEVICE_ATTR(name, S_IRUGO, max1363_show_name, NULL, 0);
-
-/*name export */
-
-static struct attribute *max1363_attributes[] = {
- &iio_dev_attr_available_scan_modes.dev_attr.attr,
- &iio_dev_attr_scan_mode.dev_attr.attr,
- &iio_dev_attr_scan.dev_attr.attr,
- &iio_dev_attr_name.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group max1363_attribute_group = {
- .attrs = max1363_attributes,
-};
-
static int __devinit max1363_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -506,6 +942,7 @@ static int __devinit max1363_probe(struct i2c_client *client,
ret = -ENODEV;
goto error_free_st;
}
+
st->reg = regulator_get(&client->dev, "vcc");
if (!IS_ERR(st->reg)) {
ret = regulator_enable(st->reg);
@@ -520,20 +957,35 @@ static int __devinit max1363_probe(struct i2c_client *client,
goto error_disable_reg;
}
+ st->indio_dev->available_scan_masks
+ = kzalloc(sizeof(*st->indio_dev->available_scan_masks)*
+ (st->chip_info->num_modes + 1), GFP_KERNEL);
+ if (!st->indio_dev->available_scan_masks) {
+ ret = -ENOMEM;
+ goto error_free_device;
+ }
+
+ for (i = 0; i < st->chip_info->num_modes; i++)
+ st->indio_dev->available_scan_masks[i] =
+ max1363_mode_table[st->chip_info->mode_list[i]]
+ .modemask;
/* Estabilish that the iio_dev is a child of the i2c device */
st->indio_dev->dev.parent = &client->dev;
- st->indio_dev->attrs = &max1363_attribute_group;
+ st->indio_dev->attrs = st->chip_info->dev_attrs;
+
+ /* Todo: this shouldn't be here. */
+ st->indio_dev->scan_el_attrs = st->chip_info->scan_attrs;
st->indio_dev->dev_data = (void *)(st);
st->indio_dev->driver_module = THIS_MODULE;
st->indio_dev->modes = INDIO_DIRECT_MODE;
ret = max1363_initial_setup(st);
if (ret)
- goto error_free_device;
+ goto error_free_available_scan_masks;
ret = max1363_register_ring_funcs_and_init(st->indio_dev);
if (ret)
- goto error_free_device;
+ goto error_free_available_scan_masks;
ret = iio_device_register(st->indio_dev);
if (ret)
@@ -545,6 +997,8 @@ static int __devinit max1363_probe(struct i2c_client *client,
return 0;
error_cleanup_ring:
max1363_ring_cleanup(st->indio_dev);
+error_free_available_scan_masks:
+ kfree(st->indio_dev->available_scan_masks);
error_free_device:
if (!regdone)
iio_free_device(st->indio_dev);
@@ -570,6 +1024,7 @@ static int max1363_remove(struct i2c_client *client)
struct iio_dev *indio_dev = st->indio_dev;
max1363_uninitialize_ring(indio_dev->ring);
max1363_ring_cleanup(indio_dev);
+ kfree(st->indio_dev->available_scan_masks);
iio_device_unregister(indio_dev);
if (!IS_ERR(st->reg)) {
regulator_disable(st->reg);
@@ -586,6 +1041,10 @@ static const struct i2c_device_id max1363_id[] = {
{ "max1362", max1362 },
{ "max1363", max1363 },
{ "max1364", max1364 },
+ { "max1036", max1036 },
+ { "max1037", max1037 },
+ { "max1038", max1038 },
+ { "max1039", max1039 },
{ "max1136", max1136 },
{ "max1137", max1137 },
{ "max1138", max1138 },
@@ -594,6 +1053,24 @@ static const struct i2c_device_id max1363_id[] = {
{ "max1237", max1237 },
{ "max1238", max1238 },
{ "max1239", max1239 },
+ { "max11600", max11600 },
+ { "max11601", max11601 },
+ { "max11602", max11602 },
+ { "max11603", max11603 },
+ { "max11604", max11604 },
+ { "max11605", max11605 },
+ { "max11606", max11606 },
+ { "max11607", max11607 },
+ { "max11608", max11608 },
+ { "max11609", max11609 },
+ { "max11610", max11610 },
+ { "max11611", max11611 },
+ { "max11612", max11612 },
+ { "max11613", max11613 },
+ { "max11614", max11614 },
+ { "max11615", max11615 },
+ { "max11616", max11616 },
+ { "max11617", max11617 },
{}
};
diff --git a/drivers/staging/iio/adc/max1363_ring.c b/drivers/staging/iio/adc/max1363_ring.c
index f94fe2d38a9..56688dc9c92 100644
--- a/drivers/staging/iio/adc/max1363_ring.c
+++ b/drivers/staging/iio/adc/max1363_ring.c
@@ -17,6 +17,7 @@
#include <linux/sysfs.h>
#include <linux/list.h>
#include <linux/i2c.h>
+#include <linux/bitops.h>
#include "../iio.h"
#include "../ring_generic.h"
@@ -26,32 +27,39 @@
#include "max1363.h"
-ssize_t max1363_scan_from_ring(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+/* Todo: test this */
+int max1363_single_channel_from_ring(long mask, struct max1363_state *st)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct max1363_state *info = dev_info->dev_data;
- int i, ret, len = 0;
- char *ring_data;
+ unsigned long numvals;
+ int count = 0, ret;
+ u8 *ring_data;
+ if (!(st->current_mode->modemask & mask)) {
+ ret = -EBUSY;
+ goto error_ret;
+ }
+ numvals = hweight_long(st->current_mode->modemask);
- ring_data = kmalloc(info->current_mode->numvals*2, GFP_KERNEL);
+ ring_data = kmalloc(numvals*2, GFP_KERNEL);
if (ring_data == NULL) {
ret = -ENOMEM;
goto error_ret;
}
- ret = dev_info->ring->access.read_last(dev_info->ring, ring_data);
+ ret = st->indio_dev->ring->access.read_last(st->indio_dev->ring,
+ ring_data);
if (ret)
goto error_free_ring_data;
- len += sprintf(buf+len, "ring ");
- for (i = 0; i < info->current_mode->numvals; i++)
- len += sprintf(buf + len, "%d ",
- ((int)(ring_data[i*2 + 0] & 0x0F) << 8)
- + ((int)(ring_data[i*2 + 1])));
- len += sprintf(buf + len, "\n");
- kfree(ring_data);
-
- return len;
+ /* Need a count of channels prior to this one */
+ mask >>= 1;
+ while (mask) {
+ if (mask & st->current_mode->modemask)
+ count++;
+ mask >>= 1;
+ }
+ if (st->chip_info->bits != 8)
+ ret = ((int)(ring_data[count*2 + 0] & 0x0F) << 8)
+ + (int)(ring_data[count*2 + 1]);
+ else
+ ret = ring_data[count];
error_free_ring_data:
kfree(ring_data);
@@ -70,9 +78,25 @@ static int max1363_ring_preenable(struct iio_dev *indio_dev)
{
struct max1363_state *st = indio_dev->dev_data;
size_t d_size;
+ unsigned long numvals;
+ /*
+ * Need to figure out the current mode based upon the requested
+ * scan mask in iio_dev
+ */
+ st->current_mode = max1363_match_mode(st->indio_dev->scan_mask,
+ st->chip_info);
+ if (!st->current_mode)
+ return -EINVAL;
+
+ max1363_set_scan_mode(st);
+
+ numvals = hweight_long(st->current_mode->modemask);
if (indio_dev->ring->access.set_bpd) {
- d_size = st->current_mode->numvals*2 + sizeof(s64);
+ if (st->chip_info->bits != 8)
+ d_size = numvals*2 + sizeof(s64);
+ else
+ d_size = numvals + sizeof(s64);
if (d_size % 8)
d_size += 8 - (d_size % 8);
indio_dev->ring->access.set_bpd(indio_dev->ring, d_size);
@@ -118,7 +142,7 @@ static int max1363_ring_predisable(struct iio_dev *indio_dev)
* then. Some triggers will generate their own time stamp. Currently
* there is no way of notifying them when no one cares.
**/
-void max1363_poll_func_th(struct iio_dev *indio_dev)
+static void max1363_poll_func_th(struct iio_dev *indio_dev)
{
struct max1363_state *st = indio_dev->dev_data;
@@ -145,9 +169,13 @@ static void max1363_poll_bh_to_ring(struct work_struct *work_s)
__u8 *rxbuf;
int b_sent;
size_t d_size;
+ unsigned long numvals = hweight_long(st->current_mode->modemask);
/* Ensure the timestamp is 8 byte aligned */
- d_size = st->current_mode->numvals*2 + sizeof(s64);
+ if (st->chip_info->bits != 8)
+ d_size = numvals*2 + sizeof(s64);
+ else
+ d_size = numvals + sizeof(s64);
if (d_size % sizeof(s64))
d_size += sizeof(s64) - (d_size % sizeof(s64));
@@ -159,16 +187,16 @@ static void max1363_poll_bh_to_ring(struct work_struct *work_s)
* might as well have this test in here in the meantime as it does
* no harm.
*/
- if (st->current_mode->numvals == 0)
+ if (numvals == 0)
return;
rxbuf = kmalloc(d_size, GFP_KERNEL);
if (rxbuf == NULL)
return;
-
- b_sent = i2c_master_recv(st->client,
- rxbuf,
- st->current_mode->numvals*2);
+ if (st->chip_info->bits != 8)
+ b_sent = i2c_master_recv(st->client, rxbuf, numvals*2);
+ else
+ b_sent = i2c_master_recv(st->client, rxbuf, numvals);
if (b_sent < 0)
goto done;
@@ -238,5 +266,5 @@ void max1363_uninitialize_ring(struct iio_ring_buffer *ring)
int max1363_initialize_ring(struct iio_ring_buffer *ring)
{
- return iio_ring_buffer_register(ring);
+ return iio_ring_buffer_register(ring, 0);
};
diff --git a/drivers/staging/iio/chrdev.h b/drivers/staging/iio/chrdev.h
index f42bafb3a89..3f96f8696a4 100644
--- a/drivers/staging/iio/chrdev.h
+++ b/drivers/staging/iio/chrdev.h
@@ -94,7 +94,7 @@ struct iio_event_interface {
struct iio_chrdev_minor_attr attr;
struct module *owner;
void *private;
- char _name[20];
+ char _name[35];
char _attrname[20];
};
diff --git a/drivers/staging/iio/gyro/Kconfig b/drivers/staging/iio/gyro/Kconfig
new file mode 100644
index 00000000000..c4043610c0d
--- /dev/null
+++ b/drivers/staging/iio/gyro/Kconfig
@@ -0,0 +1,13 @@
+#
+# IIO Digital Gyroscope Sensor drivers configuration
+#
+comment "Digital gyroscope sensors"
+
+config ADIS16260
+ tristate "Analog Devices ADIS16260/5 Digital Gyroscope Sensor SPI driver"
+ depends on SPI
+ select IIO_TRIGGER if IIO_RING_BUFFER
+ select IIO_SW_RING if IIO_RING_BUFFER
+ help
+ Say yes here to build support for Analog Devices adis16260/5
+ programmable digital gyroscope sensor.
diff --git a/drivers/staging/iio/gyro/Makefile b/drivers/staging/iio/gyro/Makefile
new file mode 100644
index 00000000000..6d2c547686c
--- /dev/null
+++ b/drivers/staging/iio/gyro/Makefile
@@ -0,0 +1,7 @@
+
+# Makefile for digital gyroscope sensor drivers
+#
+
+adis16260-y := adis16260_core.o
+adis16260-$(CONFIG_IIO_RING_BUFFER) += adis16260_ring.o adis16260_trigger.o
+obj-$(CONFIG_ADIS16260) += adis16260.o
diff --git a/drivers/staging/iio/gyro/adis16260.h b/drivers/staging/iio/gyro/adis16260.h
new file mode 100644
index 00000000000..f19efb4c91c
--- /dev/null
+++ b/drivers/staging/iio/gyro/adis16260.h
@@ -0,0 +1,175 @@
+#ifndef SPI_ADIS16260_H_
+#define SPI_ADIS16260_H_
+
+#define ADIS16260_STARTUP_DELAY 220 /* ms */
+
+#define ADIS16260_READ_REG(a) a
+#define ADIS16260_WRITE_REG(a) ((a) | 0x80)
+
+#define ADIS16260_FLASH_CNT 0x00 /* Flash memory write count */
+#define ADIS16260_SUPPLY_OUT 0x02 /* Power supply measurement */
+#define ADIS16260_GYRO_OUT 0x04 /* X-axis gyroscope output */
+#define ADIS16260_AUX_ADC 0x0A /* analog input channel measurement */
+#define ADIS16260_TEMP_OUT 0x0C /* internal temperature measurement */
+#define ADIS16260_ANGL_OUT 0x0E /* angle displacement */
+#define ADIS16260_GYRO_OFF 0x14 /* Calibration, offset/bias adjustment */
+#define ADIS16260_GYRO_SCALE 0x16 /* Calibration, scale adjustment */
+#define ADIS16260_ALM_MAG1 0x20 /* Alarm 1 magnitude/polarity setting */
+#define ADIS16260_ALM_MAG2 0x22 /* Alarm 2 magnitude/polarity setting */
+#define ADIS16260_ALM_SMPL1 0x24 /* Alarm 1 dynamic rate of change setting */
+#define ADIS16260_ALM_SMPL2 0x26 /* Alarm 2 dynamic rate of change setting */
+#define ADIS16260_ALM_CTRL 0x28 /* Alarm control */
+#define ADIS16260_AUX_DAC 0x30 /* Auxiliary DAC data */
+#define ADIS16260_GPIO_CTRL 0x32 /* Control, digital I/O line */
+#define ADIS16260_MSC_CTRL 0x34 /* Control, data ready, self-test settings */
+#define ADIS16260_SMPL_PRD 0x36 /* Control, internal sample rate */
+#define ADIS16260_SENS_AVG 0x38 /* Control, dynamic range, filtering */
+#define ADIS16260_SLP_CNT 0x3A /* Control, sleep mode initiation */
+#define ADIS16260_DIAG_STAT 0x3C /* Diagnostic, error flags */
+#define ADIS16260_GLOB_CMD 0x3E /* Control, global commands */
+#define ADIS16260_LOT_ID1 0x52 /* Lot Identification Code 1 */
+#define ADIS16260_LOT_ID2 0x54 /* Lot Identification Code 2 */
+#define ADIS16260_PROD_ID 0x56 /* Product identifier;
+ * convert to decimal = 16,265/16,260 */
+#define ADIS16260_SERIAL_NUM 0x58 /* Serial number */
+
+#define ADIS16260_OUTPUTS 5
+
+#define ADIS16260_ERROR_ACTIVE (1<<14)
+#define ADIS16260_NEW_DATA (1<<15)
+
+/* MSC_CTRL */
+#define ADIS16260_MSC_CTRL_MEM_TEST (1<<11)
+/* Internal self-test enable */
+#define ADIS16260_MSC_CTRL_INT_SELF_TEST (1<<10)
+#define ADIS16260_MSC_CTRL_NEG_SELF_TEST (1<<9)
+#define ADIS16260_MSC_CTRL_POS_SELF_TEST (1<<8)
+#define ADIS16260_MSC_CTRL_DATA_RDY_EN (1<<2)
+#define ADIS16260_MSC_CTRL_DATA_RDY_POL_HIGH (1<<1)
+#define ADIS16260_MSC_CTRL_DATA_RDY_DIO2 (1<<0)
+
+/* SMPL_PRD */
+/* Time base (tB): 0 = 1.953 ms, 1 = 60.54 ms */
+#define ADIS16260_SMPL_PRD_TIME_BASE (1<<7)
+#define ADIS16260_SMPL_PRD_DIV_MASK 0x7F
+
+/* SLP_CNT */
+#define ADIS16260_SLP_CNT_POWER_OFF 0x80
+
+/* DIAG_STAT */
+#define ADIS16260_DIAG_STAT_ALARM2 (1<<9)
+#define ADIS16260_DIAG_STAT_ALARM1 (1<<8)
+#define ADIS16260_DIAG_STAT_FLASH_CHK (1<<6)
+#define ADIS16260_DIAG_STAT_SELF_TEST (1<<5)
+#define ADIS16260_DIAG_STAT_OVERFLOW (1<<4)
+#define ADIS16260_DIAG_STAT_SPI_FAIL (1<<3)
+#define ADIS16260_DIAG_STAT_FLASH_UPT (1<<2)
+#define ADIS16260_DIAG_STAT_POWER_HIGH (1<<1)
+#define ADIS16260_DIAG_STAT_POWER_LOW (1<<0)
+
+/* GLOB_CMD */
+#define ADIS16260_GLOB_CMD_SW_RESET (1<<7)
+#define ADIS16260_GLOB_CMD_FLASH_UPD (1<<3)
+#define ADIS16260_GLOB_CMD_DAC_LATCH (1<<2)
+#define ADIS16260_GLOB_CMD_FAC_CALIB (1<<1)
+#define ADIS16260_GLOB_CMD_AUTO_NULL (1<<0)
+
+#define ADIS16260_MAX_TX 24
+#define ADIS16260_MAX_RX 24
+
+#define ADIS16260_SPI_SLOW (u32)(300 * 1000)
+#define ADIS16260_SPI_BURST (u32)(1000 * 1000)
+#define ADIS16260_SPI_FAST (u32)(2000 * 1000)
+
+/**
+ * struct adis16260_state - device instance specific data
+ * @us: actual spi_device
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @work_cont_thresh: CLEAN
+ * @inter: used to check if new interrupt has been triggered
+ * @last_timestamp: passing timestamp from th to bh of interrupt handler
+ * @indio_dev: industrial I/O device structure
+ * @trig: data ready trigger registered with iio
+ * @tx: transmit buffer
+ * @rx: recieve buffer
+ * @buf_lock: mutex to protect tx and rx
+ **/
+struct adis16260_state {
+ struct spi_device *us;
+ struct work_struct work_trigger_to_ring;
+ struct iio_work_cont work_cont_thresh;
+ s64 last_timestamp;
+ struct iio_dev *indio_dev;
+ struct iio_trigger *trig;
+ u8 *tx;
+ u8 *rx;
+ struct mutex buf_lock;
+};
+
+int adis16260_set_irq(struct device *dev, bool enable);
+
+#ifdef CONFIG_IIO_RING_BUFFER
+/* At the moment triggers are only used for ring buffer
+ * filling. This may change!
+ */
+
+enum adis16260_scan {
+ ADIS16260_SCAN_SUPPLY,
+ ADIS16260_SCAN_GYRO,
+ ADIS16260_SCAN_AUX_ADC,
+ ADIS16260_SCAN_TEMP,
+ ADIS16260_SCAN_ANGL,
+};
+
+void adis16260_remove_trigger(struct iio_dev *indio_dev);
+int adis16260_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t adis16260_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+
+int adis16260_configure_ring(struct iio_dev *indio_dev);
+void adis16260_unconfigure_ring(struct iio_dev *indio_dev);
+
+int adis16260_initialize_ring(struct iio_ring_buffer *ring);
+void adis16260_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void adis16260_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16260_probe_trigger(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline ssize_t
+adis16260_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return 0;
+}
+
+static int adis16260_configure_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline void adis16260_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16260_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return 0;
+}
+
+static inline void adis16260_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+
+#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* SPI_ADIS16260_H_ */
diff --git a/drivers/staging/iio/gyro/adis16260_core.c b/drivers/staging/iio/gyro/adis16260_core.c
new file mode 100644
index 00000000000..c93f4d580fc
--- /dev/null
+++ b/drivers/staging/iio/gyro/adis16260_core.c
@@ -0,0 +1,661 @@
+/*
+ * ADIS16260 Programmable Digital Gyroscope Sensor Driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../adc/adc.h"
+#include "gyro.h"
+
+#include "adis16260.h"
+
+#define DRIVER_NAME "adis16260"
+
+static int adis16260_check_status(struct device *dev);
+
+/**
+ * adis16260_spi_write_reg_8() - write single byte to a register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the register to be written
+ * @val: the value to write
+ **/
+static int adis16260_spi_write_reg_8(struct device *dev,
+ u8 reg_address,
+ u8 val)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16260_state *st = iio_dev_get_devdata(indio_dev);
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16260_WRITE_REG(reg_address);
+ st->tx[1] = val;
+
+ ret = spi_write(st->us, st->tx, 2);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/**
+ * adis16260_spi_write_reg_16() - write 2 bytes to a pair of registers
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ * is assumed to have address one greater.
+ * @val: value to be written
+ **/
+static int adis16260_spi_write_reg_16(struct device *dev,
+ u8 lower_reg_address,
+ u16 value)
+{
+ int ret;
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16260_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = 20,
+ }, {
+ .tx_buf = st->tx + 2,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = 20,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16260_WRITE_REG(lower_reg_address);
+ st->tx[1] = value & 0xFF;
+ st->tx[2] = ADIS16260_WRITE_REG(lower_reg_address + 1);
+ st->tx[3] = (value >> 8) & 0xFF;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/**
+ * adis16260_spi_read_reg_16() - read 2 bytes from a 16-bit register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ * is assumed to have address one greater.
+ * @val: somewhere to pass back the value read
+ **/
+static int adis16260_spi_read_reg_16(struct device *dev,
+ u8 lower_reg_address,
+ u16 *val)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16260_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = 30,
+ }, {
+ .rx_buf = st->rx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = 30,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16260_READ_REG(lower_reg_address);
+ st->tx[1] = 0;
+ st->tx[2] = 0;
+ st->tx[3] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret) {
+ dev_err(&st->us->dev,
+ "problem when reading 16 bit register 0x%02X",
+ lower_reg_address);
+ goto error_ret;
+ }
+ *val = (st->rx[0] << 8) | st->rx[1];
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static ssize_t adis16260_spi_read_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf,
+ unsigned bits)
+{
+ int ret;
+ s16 val = 0;
+ unsigned shift = 16 - bits;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = adis16260_spi_read_reg_16(dev, this_attr->address, (u16 *)&val);
+ if (ret)
+ return ret;
+
+ if (val & ADIS16260_ERROR_ACTIVE)
+ adis16260_check_status(dev);
+ val = ((s16)(val << shift) >> shift);
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t adis16260_read_12bit_unsigned(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u16 val = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = adis16260_spi_read_reg_16(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ if (val & ADIS16260_ERROR_ACTIVE)
+ adis16260_check_status(dev);
+
+ return sprintf(buf, "%u\n", val & 0x0FFF);
+}
+
+static ssize_t adis16260_read_12bit_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ /* Take the iio_dev status lock */
+ mutex_lock(&indio_dev->mlock);
+ ret = adis16260_spi_read_signed(dev, attr, buf, 12);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static ssize_t adis16260_read_14bit_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ /* Take the iio_dev status lock */
+ mutex_lock(&indio_dev->mlock);
+ ret = adis16260_spi_read_signed(dev, attr, buf, 14);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static ssize_t adis16260_write_16bit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+ ret = adis16260_spi_write_reg_16(dev, this_attr->address, val);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+static ssize_t adis16260_read_frequency(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret, len = 0;
+ u16 t;
+ int sps;
+ ret = adis16260_spi_read_reg_16(dev,
+ ADIS16260_SMPL_PRD,
+ &t);
+ if (ret)
+ return ret;
+ sps = (t & ADIS16260_SMPL_PRD_TIME_BASE) ? 66 : 2048;
+ sps /= (t & ADIS16260_SMPL_PRD_DIV_MASK) + 1;
+ len = sprintf(buf, "%d SPS\n", sps);
+ return len;
+}
+
+static ssize_t adis16260_write_frequency(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16260_state *st = iio_dev_get_devdata(indio_dev);
+ long val;
+ int ret;
+ u8 t;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ t = (2048 / val);
+ if (t > 0)
+ t--;
+ t &= ADIS16260_SMPL_PRD_DIV_MASK;
+ if ((t & ADIS16260_SMPL_PRD_DIV_MASK) >= 0x0A)
+ st->us->max_speed_hz = ADIS16260_SPI_SLOW;
+ else
+ st->us->max_speed_hz = ADIS16260_SPI_FAST;
+
+ ret = adis16260_spi_write_reg_8(dev,
+ ADIS16260_SMPL_PRD,
+ t);
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+static int adis16260_reset(struct device *dev)
+{
+ int ret;
+ ret = adis16260_spi_write_reg_8(dev,
+ ADIS16260_GLOB_CMD,
+ ADIS16260_GLOB_CMD_SW_RESET);
+ if (ret)
+ dev_err(dev, "problem resetting device");
+
+ return ret;
+}
+
+static ssize_t adis16260_write_reset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ if (len < 1)
+ return -EINVAL;
+ switch (buf[0]) {
+ case '1':
+ case 'y':
+ case 'Y':
+ return adis16260_reset(dev);
+ }
+ return -EINVAL;
+}
+
+int adis16260_set_irq(struct device *dev, bool enable)
+{
+ int ret;
+ u16 msc;
+ ret = adis16260_spi_read_reg_16(dev, ADIS16260_MSC_CTRL, &msc);
+ if (ret)
+ goto error_ret;
+
+ msc |= ADIS16260_MSC_CTRL_DATA_RDY_POL_HIGH;
+ if (enable)
+ msc |= ADIS16260_MSC_CTRL_DATA_RDY_EN;
+ else
+ msc &= ~ADIS16260_MSC_CTRL_DATA_RDY_EN;
+
+ ret = adis16260_spi_write_reg_16(dev, ADIS16260_MSC_CTRL, msc);
+ if (ret)
+ goto error_ret;
+
+error_ret:
+ return ret;
+}
+
+/* Power down the device */
+static int adis16260_stop_device(struct device *dev)
+{
+ int ret;
+ u16 val = ADIS16260_SLP_CNT_POWER_OFF;
+
+ ret = adis16260_spi_write_reg_16(dev, ADIS16260_SLP_CNT, val);
+ if (ret)
+ dev_err(dev, "problem with turning device off: SLP_CNT");
+
+ return ret;
+}
+
+static int adis16260_self_test(struct device *dev)
+{
+ int ret;
+ ret = adis16260_spi_write_reg_16(dev,
+ ADIS16260_MSC_CTRL,
+ ADIS16260_MSC_CTRL_MEM_TEST);
+ if (ret) {
+ dev_err(dev, "problem starting self test");
+ goto err_ret;
+ }
+
+ adis16260_check_status(dev);
+
+err_ret:
+ return ret;
+}
+
+static int adis16260_check_status(struct device *dev)
+{
+ u16 status;
+ int ret;
+
+ ret = adis16260_spi_read_reg_16(dev, ADIS16260_DIAG_STAT, &status);
+
+ if (ret < 0) {
+ dev_err(dev, "Reading status failed\n");
+ goto error_ret;
+ }
+ ret = status & 0x7F;
+ if (status & ADIS16260_DIAG_STAT_FLASH_CHK)
+ dev_err(dev, "Flash checksum error\n");
+ if (status & ADIS16260_DIAG_STAT_SELF_TEST)
+ dev_err(dev, "Self test error\n");
+ if (status & ADIS16260_DIAG_STAT_OVERFLOW)
+ dev_err(dev, "Sensor overrange\n");
+ if (status & ADIS16260_DIAG_STAT_SPI_FAIL)
+ dev_err(dev, "SPI failure\n");
+ if (status & ADIS16260_DIAG_STAT_FLASH_UPT)
+ dev_err(dev, "Flash update failed\n");
+ if (status & ADIS16260_DIAG_STAT_POWER_HIGH)
+ dev_err(dev, "Power supply above 5.25V\n");
+ if (status & ADIS16260_DIAG_STAT_POWER_LOW)
+ dev_err(dev, "Power supply below 4.75V\n");
+
+error_ret:
+ return ret;
+}
+
+static int adis16260_initial_setup(struct adis16260_state *st)
+{
+ int ret;
+ struct device *dev = &st->indio_dev->dev;
+
+ /* Disable IRQ */
+ ret = adis16260_set_irq(dev, false);
+ if (ret) {
+ dev_err(dev, "disable irq failed");
+ goto err_ret;
+ }
+
+ /* Do self test */
+ ret = adis16260_self_test(dev);
+ if (ret) {
+ dev_err(dev, "self test failure");
+ goto err_ret;
+ }
+
+ /* Read status register to check the result */
+ ret = adis16260_check_status(dev);
+ if (ret) {
+ adis16260_reset(dev);
+ dev_err(dev, "device not playing ball -> reset");
+ msleep(ADIS16260_STARTUP_DELAY);
+ ret = adis16260_check_status(dev);
+ if (ret) {
+ dev_err(dev, "giving up");
+ goto err_ret;
+ }
+ }
+
+ printk(KERN_INFO DRIVER_NAME ": at CS%d (irq %d)\n",
+ st->us->chip_select, st->us->irq);
+
+err_ret:
+ return ret;
+}
+
+static IIO_DEV_ATTR_IN_NAMED_RAW(supply,
+ adis16260_read_12bit_unsigned,
+ ADIS16260_SUPPLY_OUT);
+static IIO_CONST_ATTR(in_supply_scale, "0.0018315");
+
+static IIO_DEV_ATTR_GYRO(adis16260_read_14bit_signed,
+ ADIS16260_GYRO_OUT);
+static IIO_DEV_ATTR_GYRO_SCALE(S_IWUSR | S_IRUGO,
+ adis16260_read_14bit_signed,
+ adis16260_write_16bit,
+ ADIS16260_GYRO_SCALE);
+static IIO_DEV_ATTR_GYRO_OFFSET(S_IWUSR | S_IRUGO,
+ adis16260_read_12bit_signed,
+ adis16260_write_16bit,
+ ADIS16260_GYRO_OFF);
+
+static IIO_DEV_ATTR_TEMP_RAW(adis16260_read_12bit_unsigned);
+static IIO_CONST_ATTR(temp_offset, "25");
+static IIO_CONST_ATTR(temp_scale, "0.1453");
+
+static IIO_DEV_ATTR_IN_RAW(0, adis16260_read_12bit_unsigned,
+ ADIS16260_AUX_ADC);
+static IIO_CONST_ATTR(in0_scale, "0.0006105");
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+ adis16260_read_frequency,
+ adis16260_write_frequency);
+static IIO_DEV_ATTR_ANGL(adis16260_read_14bit_signed,
+ ADIS16260_ANGL_OUT);
+
+static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, adis16260_write_reset, 0);
+
+static IIO_CONST_ATTR_AVAIL_SAMP_FREQ("256 2048");
+
+static IIO_CONST_ATTR(name, "adis16260");
+
+static struct attribute *adis16260_event_attributes[] = {
+ NULL
+};
+
+static struct attribute_group adis16260_event_attribute_group = {
+ .attrs = adis16260_event_attributes,
+};
+
+static struct attribute *adis16260_attributes[] = {
+ &iio_dev_attr_in_supply_raw.dev_attr.attr,
+ &iio_const_attr_in_supply_scale.dev_attr.attr,
+ &iio_dev_attr_gyro_raw.dev_attr.attr,
+ &iio_dev_attr_gyro_scale.dev_attr.attr,
+ &iio_dev_attr_gyro_offset.dev_attr.attr,
+ &iio_dev_attr_angl_raw.dev_attr.attr,
+ &iio_dev_attr_temp_raw.dev_attr.attr,
+ &iio_const_attr_temp_offset.dev_attr.attr,
+ &iio_const_attr_temp_scale.dev_attr.attr,
+ &iio_dev_attr_in0_raw.dev_attr.attr,
+ &iio_const_attr_in0_scale.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_const_attr_available_sampling_frequency.dev_attr.attr,
+ &iio_dev_attr_reset.dev_attr.attr,
+ &iio_const_attr_name.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group adis16260_attribute_group = {
+ .attrs = adis16260_attributes,
+};
+
+static int __devinit adis16260_probe(struct spi_device *spi)
+{
+ int ret, regdone = 0;
+ struct adis16260_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+ if (!st) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, st);
+
+ /* Allocate the comms buffers */
+ st->rx = kzalloc(sizeof(*st->rx)*ADIS16260_MAX_RX, GFP_KERNEL);
+ if (st->rx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->tx = kzalloc(sizeof(*st->tx)*ADIS16260_MAX_TX, GFP_KERNEL);
+ if (st->tx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_rx;
+ }
+ st->us = spi;
+ mutex_init(&st->buf_lock);
+ /* setup the industrialio driver allocated elements */
+ st->indio_dev = iio_allocate_device();
+ if (st->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_tx;
+ }
+
+ st->indio_dev->dev.parent = &spi->dev;
+ st->indio_dev->num_interrupt_lines = 1;
+ st->indio_dev->event_attrs = &adis16260_event_attribute_group;
+ st->indio_dev->attrs = &adis16260_attribute_group;
+ st->indio_dev->dev_data = (void *)(st);
+ st->indio_dev->driver_module = THIS_MODULE;
+ st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = adis16260_configure_ring(st->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ ret = iio_device_register(st->indio_dev);
+ if (ret)
+ goto error_unreg_ring_funcs;
+ regdone = 1;
+
+ ret = adis16260_initialize_ring(st->indio_dev->ring);
+ if (ret) {
+ printk(KERN_ERR "failed to initialize the ring\n");
+ goto error_unreg_ring_funcs;
+ }
+
+ if (spi->irq) {
+ ret = iio_register_interrupt_line(spi->irq,
+ st->indio_dev,
+ 0,
+ IRQF_TRIGGER_RISING,
+ "adis16260");
+ if (ret)
+ goto error_uninitialize_ring;
+
+ ret = adis16260_probe_trigger(st->indio_dev);
+ if (ret)
+ goto error_unregister_line;
+ }
+
+ /* Get the device into a sane initial state */
+ ret = adis16260_initial_setup(st);
+ if (ret)
+ goto error_remove_trigger;
+ return 0;
+
+error_remove_trigger:
+ adis16260_remove_trigger(st->indio_dev);
+error_unregister_line:
+ if (spi->irq)
+ iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+ adis16260_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+ adis16260_unconfigure_ring(st->indio_dev);
+error_free_dev:
+ if (regdone)
+ iio_device_unregister(st->indio_dev);
+ else
+ iio_free_device(st->indio_dev);
+error_free_tx:
+ kfree(st->tx);
+error_free_rx:
+ kfree(st->rx);
+error_free_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+static int adis16260_remove(struct spi_device *spi)
+{
+ int ret;
+ struct adis16260_state *st = spi_get_drvdata(spi);
+ struct iio_dev *indio_dev = st->indio_dev;
+
+ ret = adis16260_stop_device(&(indio_dev->dev));
+ if (ret)
+ goto err_ret;
+
+ flush_scheduled_work();
+
+ adis16260_remove_trigger(indio_dev);
+ if (spi->irq)
+ iio_unregister_interrupt_line(indio_dev, 0);
+
+ adis16260_uninitialize_ring(indio_dev->ring);
+ iio_device_unregister(indio_dev);
+ adis16260_unconfigure_ring(indio_dev);
+ kfree(st->tx);
+ kfree(st->rx);
+ kfree(st);
+
+ return 0;
+
+err_ret:
+ return ret;
+}
+
+static struct spi_driver adis16260_driver = {
+ .driver = {
+ .name = "adis16260",
+ .owner = THIS_MODULE,
+ },
+ .probe = adis16260_probe,
+ .remove = __devexit_p(adis16260_remove),
+};
+
+static __init int adis16260_init(void)
+{
+ return spi_register_driver(&adis16260_driver);
+}
+module_init(adis16260_init);
+
+static __exit void adis16260_exit(void)
+{
+ spi_unregister_driver(&adis16260_driver);
+}
+module_exit(adis16260_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16260/5 Digital Gyroscope Sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/gyro/adis16260_ring.c b/drivers/staging/iio/gyro/adis16260_ring.c
new file mode 100644
index 00000000000..4c4390ca6d7
--- /dev/null
+++ b/drivers/staging/iio/gyro/adis16260_ring.c
@@ -0,0 +1,256 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../ring_sw.h"
+#include "../accel/accel.h"
+#include "../trigger.h"
+#include "adis16260.h"
+
+/**
+ * combine_8_to_16() utility function to munge to u8s into u16
+ **/
+static inline u16 combine_8_to_16(u8 lower, u8 upper)
+{
+ u16 _lower = lower;
+ u16 _upper = upper;
+ return _lower | (_upper << 8);
+}
+
+static IIO_SCAN_EL_C(supply, ADIS16260_SCAN_SUPPLY, IIO_UNSIGNED(12),
+ ADIS16260_SUPPLY_OUT, NULL);
+static IIO_SCAN_EL_C(gyro, ADIS16260_SCAN_GYRO, IIO_SIGNED(14),
+ ADIS16260_GYRO_OUT, NULL);
+static IIO_SCAN_EL_C(aux_adc, ADIS16260_SCAN_AUX_ADC, IIO_SIGNED(14),
+ ADIS16260_AUX_ADC, NULL);
+static IIO_SCAN_EL_C(temp, ADIS16260_SCAN_TEMP, IIO_UNSIGNED(12),
+ ADIS16260_TEMP_OUT, NULL);
+static IIO_SCAN_EL_C(angl, ADIS16260_SCAN_ANGL, IIO_UNSIGNED(12),
+ ADIS16260_ANGL_OUT, NULL);
+
+static IIO_SCAN_EL_TIMESTAMP(5);
+
+static struct attribute *adis16260_scan_el_attrs[] = {
+ &iio_scan_el_supply.dev_attr.attr,
+ &iio_scan_el_gyro.dev_attr.attr,
+ &iio_scan_el_aux_adc.dev_attr.attr,
+ &iio_scan_el_temp.dev_attr.attr,
+ &iio_scan_el_angl.dev_attr.attr,
+ &iio_scan_el_timestamp.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group adis16260_scan_el_group = {
+ .attrs = adis16260_scan_el_attrs,
+ .name = "scan_elements",
+};
+
+/**
+ * adis16260_poll_func_th() top half interrupt handler called by trigger
+ * @private_data: iio_dev
+ **/
+static void adis16260_poll_func_th(struct iio_dev *indio_dev)
+{
+ struct adis16260_state *st = iio_dev_get_devdata(indio_dev);
+ st->last_timestamp = indio_dev->trig->timestamp;
+ schedule_work(&st->work_trigger_to_ring);
+}
+
+/**
+ * adis16260_read_ring_data() read data registers which will be placed into ring
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @rx: somewhere to pass back the value read
+ **/
+static int adis16260_read_ring_data(struct device *dev, u8 *rx)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16260_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfers[ADIS16260_OUTPUTS + 1];
+ int ret;
+ int i;
+
+ mutex_lock(&st->buf_lock);
+
+ spi_message_init(&msg);
+
+ memset(xfers, 0, sizeof(xfers));
+ for (i = 0; i <= ADIS16260_OUTPUTS; i++) {
+ xfers[i].bits_per_word = 8;
+ xfers[i].cs_change = 1;
+ xfers[i].len = 2;
+ xfers[i].delay_usecs = 30;
+ xfers[i].tx_buf = st->tx + 2 * i;
+ if (i < 2) /* SUPPLY_OUT:0x02 GYRO_OUT:0x04 */
+ st->tx[2 * i]
+ = ADIS16260_READ_REG(ADIS16260_SUPPLY_OUT
+ + 2 * i);
+ else /* 0x06 to 0x09 is reserved */
+ st->tx[2 * i]
+ = ADIS16260_READ_REG(ADIS16260_SUPPLY_OUT
+ + 2 * i + 4);
+ st->tx[2 * i + 1] = 0;
+ if (i >= 1)
+ xfers[i].rx_buf = rx + 2 * (i - 1);
+ spi_message_add_tail(&xfers[i], &msg);
+ }
+
+ ret = spi_sync(st->us, &msg);
+ if (ret)
+ dev_err(&st->us->dev, "problem when burst reading");
+
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+
+static void adis16260_trigger_bh_to_ring(struct work_struct *work_s)
+{
+ struct adis16260_state *st
+ = container_of(work_s, struct adis16260_state,
+ work_trigger_to_ring);
+
+ int i = 0;
+ s16 *data;
+ size_t datasize = st->indio_dev
+ ->ring->access.get_bpd(st->indio_dev->ring);
+
+ data = kmalloc(datasize , GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(&st->us->dev, "memory alloc failed in ring bh");
+ return;
+ }
+
+ if (st->indio_dev->scan_count)
+ if (adis16260_read_ring_data(&st->indio_dev->dev, st->rx) >= 0)
+ for (; i < st->indio_dev->scan_count; i++) {
+ data[i] = combine_8_to_16(st->rx[i*2+1],
+ st->rx[i*2]);
+ }
+
+ /* Guaranteed to be aligned with 8 byte boundary */
+ if (st->indio_dev->scan_timestamp)
+ *((s64 *)(data + ((i + 3)/4)*4)) = st->last_timestamp;
+
+ st->indio_dev->ring->access.store_to(st->indio_dev->ring,
+ (u8 *)data,
+ st->last_timestamp);
+
+ iio_trigger_notify_done(st->indio_dev->trig);
+ kfree(data);
+
+ return;
+}
+
+static int adis16260_data_rdy_ring_preenable(struct iio_dev *indio_dev)
+{
+ size_t size;
+ dev_dbg(&indio_dev->dev, "%s\n", __func__);
+ /* Check if there are any scan elements enabled, if not fail*/
+ if (!(indio_dev->scan_count || indio_dev->scan_timestamp))
+ return -EINVAL;
+
+ if (indio_dev->ring->access.set_bpd) {
+ if (indio_dev->scan_timestamp)
+ if (indio_dev->scan_count)
+ /* Timestamp (aligned s64) and data */
+ size = (((indio_dev->scan_count * sizeof(s16))
+ + sizeof(s64) - 1)
+ & ~(sizeof(s64) - 1))
+ + sizeof(s64);
+ else /* Timestamp only */
+ size = sizeof(s64);
+ else /* Data only */
+ size = indio_dev->scan_count*sizeof(s16);
+ indio_dev->ring->access.set_bpd(indio_dev->ring, size);
+ }
+
+ return 0;
+}
+
+static int adis16260_data_rdy_ring_postenable(struct iio_dev *indio_dev)
+{
+ return indio_dev->trig
+ ? iio_trigger_attach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc)
+ : 0;
+}
+
+static int adis16260_data_rdy_ring_predisable(struct iio_dev *indio_dev)
+{
+ return indio_dev->trig
+ ? iio_trigger_dettach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc)
+ : 0;
+}
+
+void adis16260_unconfigure_ring(struct iio_dev *indio_dev)
+{
+ kfree(indio_dev->pollfunc);
+ iio_sw_rb_free(indio_dev->ring);
+}
+
+int adis16260_configure_ring(struct iio_dev *indio_dev)
+{
+ int ret = 0;
+ struct adis16260_state *st = indio_dev->dev_data;
+ struct iio_ring_buffer *ring;
+ INIT_WORK(&st->work_trigger_to_ring, adis16260_trigger_bh_to_ring);
+ /* Set default scan mode */
+
+ iio_scan_mask_set(indio_dev, iio_scan_el_supply.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_gyro.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_aux_adc.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_temp.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_angl.number);
+ indio_dev->scan_timestamp = true;
+
+ indio_dev->scan_el_attrs = &adis16260_scan_el_group;
+
+ ring = iio_sw_rb_allocate(indio_dev);
+ if (!ring) {
+ ret = -ENOMEM;
+ return ret;
+ }
+ indio_dev->ring = ring;
+ /* Effectively select the ring buffer implementation */
+ iio_ring_sw_register_funcs(&ring->access);
+ ring->preenable = &adis16260_data_rdy_ring_preenable;
+ ring->postenable = &adis16260_data_rdy_ring_postenable;
+ ring->predisable = &adis16260_data_rdy_ring_predisable;
+ ring->owner = THIS_MODULE;
+
+ indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL);
+ if (indio_dev->pollfunc == NULL) {
+ ret = -ENOMEM;
+ goto error_iio_sw_rb_free;;
+ }
+ indio_dev->pollfunc->poll_func_main = &adis16260_poll_func_th;
+ indio_dev->pollfunc->private_data = indio_dev;
+ indio_dev->modes |= INDIO_RING_TRIGGERED;
+ return 0;
+
+error_iio_sw_rb_free:
+ iio_sw_rb_free(indio_dev->ring);
+ return ret;
+}
+
+int adis16260_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return iio_ring_buffer_register(ring, 0);
+}
+
+void adis16260_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+ iio_ring_buffer_unregister(ring);
+}
diff --git a/drivers/staging/iio/gyro/adis16260_trigger.c b/drivers/staging/iio/gyro/adis16260_trigger.c
new file mode 100644
index 00000000000..b3c565942b8
--- /dev/null
+++ b/drivers/staging/iio/gyro/adis16260_trigger.c
@@ -0,0 +1,124 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../trigger.h"
+#include "adis16260.h"
+
+/**
+ * adis16260_data_rdy_trig_poll() the event handler for the data rdy trig
+ **/
+static int adis16260_data_rdy_trig_poll(struct iio_dev *dev_info,
+ int index,
+ s64 timestamp,
+ int no_test)
+{
+ struct adis16260_state *st = iio_dev_get_devdata(dev_info);
+ struct iio_trigger *trig = st->trig;
+
+ trig->timestamp = timestamp;
+ iio_trigger_poll(trig);
+
+ return IRQ_HANDLED;
+}
+
+IIO_EVENT_SH(data_rdy_trig, &adis16260_data_rdy_trig_poll);
+
+static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
+
+static struct attribute *adis16260_trigger_attrs[] = {
+ &dev_attr_name.attr,
+ NULL,
+};
+
+static const struct attribute_group adis16260_trigger_attr_group = {
+ .attrs = adis16260_trigger_attrs,
+};
+
+/**
+ * adis16260_data_rdy_trigger_set_state() set datardy interrupt state
+ **/
+static int adis16260_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct adis16260_state *st = trig->private_data;
+ struct iio_dev *indio_dev = st->indio_dev;
+ int ret = 0;
+
+ dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state);
+ ret = adis16260_set_irq(&st->indio_dev->dev, state);
+ if (state == false) {
+ iio_remove_event_from_list(&iio_event_data_rdy_trig,
+ &indio_dev->interrupts[0]
+ ->ev_list);
+ flush_scheduled_work();
+ } else {
+ iio_add_event_to_list(&iio_event_data_rdy_trig,
+ &indio_dev->interrupts[0]->ev_list);
+ }
+ return ret;
+}
+
+/**
+ * adis16260_trig_try_reen() try renabling irq for data rdy trigger
+ * @trig: the datardy trigger
+ **/
+static int adis16260_trig_try_reen(struct iio_trigger *trig)
+{
+ struct adis16260_state *st = trig->private_data;
+ enable_irq(st->us->irq);
+ return 0;
+}
+
+int adis16260_probe_trigger(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct adis16260_state *st = indio_dev->dev_data;
+
+ st->trig = iio_allocate_trigger();
+ st->trig->name = kmalloc(IIO_TRIGGER_NAME_LENGTH, GFP_KERNEL);
+ if (!st->trig->name) {
+ ret = -ENOMEM;
+ goto error_free_trig;
+ }
+ snprintf((char *)st->trig->name,
+ IIO_TRIGGER_NAME_LENGTH,
+ "adis16260-dev%d", indio_dev->id);
+ st->trig->dev.parent = &st->us->dev;
+ st->trig->owner = THIS_MODULE;
+ st->trig->private_data = st;
+ st->trig->set_trigger_state = &adis16260_data_rdy_trigger_set_state;
+ st->trig->try_reenable = &adis16260_trig_try_reen;
+ st->trig->control_attrs = &adis16260_trigger_attr_group;
+ ret = iio_trigger_register(st->trig);
+
+ /* select default trigger */
+ indio_dev->trig = st->trig;
+ if (ret)
+ goto error_free_trig_name;
+
+ return 0;
+
+error_free_trig_name:
+ kfree(st->trig->name);
+error_free_trig:
+ iio_free_trigger(st->trig);
+
+ return ret;
+}
+
+void adis16260_remove_trigger(struct iio_dev *indio_dev)
+{
+ struct adis16260_state *state = indio_dev->dev_data;
+
+ iio_trigger_unregister(state->trig);
+ kfree(state->trig->name);
+ iio_free_trigger(state->trig);
+}
diff --git a/drivers/staging/iio/gyro/gyro.h b/drivers/staging/iio/gyro/gyro.h
new file mode 100644
index 00000000000..f68edab8f30
--- /dev/null
+++ b/drivers/staging/iio/gyro/gyro.h
@@ -0,0 +1,43 @@
+
+#include "../sysfs.h"
+
+/* Gyroscope types of attribute */
+
+#define IIO_DEV_ATTR_GYRO_OFFSET(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(gyro_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_GYRO_X_OFFSET(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(gyro_x_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_GYRO_Y_OFFSET(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(gyro_y_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_GYRO_Z_OFFSET(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(gyro_z_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_GYRO_X_GAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(gyro_x_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_GYRO_Y_GAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(gyro_y_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_GYRO_Z_GAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(gyro_z_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_GYRO_SCALE(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(gyro_scale, S_IRUGO, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_GYRO(_show, _addr) \
+ IIO_DEVICE_ATTR(gyro_raw, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_GYRO_X(_show, _addr) \
+ IIO_DEVICE_ATTR(gyro_x_raw, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_GYRO_Y(_show, _addr) \
+ IIO_DEVICE_ATTR(gyro_y_raw, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_GYRO_Z(_show, _addr) \
+ IIO_DEVICE_ATTR(gyro_z_raw, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_ANGL(_show, _addr) \
+ IIO_DEVICE_ATTR(angl_raw, S_IRUGO, _show, NULL, _addr)
diff --git a/drivers/staging/iio/iio.h b/drivers/staging/iio/iio.h
index 71dbfe12b57..fcee47cbe89 100644
--- a/drivers/staging/iio/iio.h
+++ b/drivers/staging/iio/iio.h
@@ -96,6 +96,7 @@ void iio_remove_event_from_list(struct iio_event_handler_list *el,
* control method is used
* @scan_count: [INTERN] the number of elements in the current scan mode
* @scan_mask: [INTERN] bitmask used in masking scan mode elements
+ * @available_scan_masks: [DRIVER] optional array of allowed bitmasks
* @scan_timestamp: [INTERN] does the scan mode include a timestamp
* @trig: [INTERN] current device trigger (ring buffer modes)
* @pollfunc: [DRIVER] function run on trigger being recieved
@@ -122,7 +123,8 @@ struct iio_dev {
struct attribute_group *scan_el_attrs;
int scan_count;
- u16 scan_mask;
+ u32 scan_mask;
+ u32 *available_scan_masks;
bool scan_timestamp;
struct iio_trigger *trig;
struct iio_poll_func *pollfunc;
@@ -132,22 +134,57 @@ struct iio_dev {
* These are mainly provided to allow for a change of implementation if a device
* has a large number of scan elements
*/
-#define IIO_MAX_SCAN_LENGTH 15
+#define IIO_MAX_SCAN_LENGTH 31
+
+/* note 0 used as error indicator as it doesn't make sense. */
+static inline u32 iio_scan_mask_match(u32 *av_masks, u32 mask)
+{
+ while (*av_masks) {
+ if (!(~*av_masks & mask))
+ return *av_masks;
+ av_masks++;
+ }
+ return 0;
+}
static inline int iio_scan_mask_query(struct iio_dev *dev_info, int bit)
{
+ u32 mask;
+
if (bit > IIO_MAX_SCAN_LENGTH)
return -EINVAL;
+
+ if (!dev_info->scan_mask)
+ return 0;
+
+ if (dev_info->available_scan_masks)
+ mask = iio_scan_mask_match(dev_info->available_scan_masks,
+ dev_info->scan_mask);
else
- return !!(dev_info->scan_mask & (1 << bit));
+ mask = dev_info->scan_mask;
+
+ if (!mask)
+ return -EINVAL;
+
+ return !!(mask & (1 << bit));
};
static inline int iio_scan_mask_set(struct iio_dev *dev_info, int bit)
{
+ u32 mask;
+ u32 trialmask = dev_info->scan_mask | (1 << bit);
+
if (bit > IIO_MAX_SCAN_LENGTH)
return -EINVAL;
- dev_info->scan_mask |= (1 << bit);
+ if (dev_info->available_scan_masks) {
+ mask = iio_scan_mask_match(dev_info->available_scan_masks,
+ trialmask);
+ if (!mask)
+ return -EINVAL;
+ }
+ dev_info->scan_mask = trialmask;
dev_info->scan_count++;
+
return 0;
};
@@ -340,7 +377,7 @@ void iio_deallocate_chrdev(struct iio_handler *handler);
#define IIO_UNSIGNED(a) (a)
extern dev_t iio_devt;
-extern struct class iio_class;
+extern struct bus_type iio_bus_type;
/**
* iio_put_device() - reference counted deallocation of struct device
diff --git a/drivers/staging/iio/imu/Kconfig b/drivers/staging/iio/imu/Kconfig
new file mode 100644
index 00000000000..6308d6faad5
--- /dev/null
+++ b/drivers/staging/iio/imu/Kconfig
@@ -0,0 +1,33 @@
+#
+# IIO imu drivers configuration
+#
+comment "Inertial measurement units"
+
+config ADIS16300
+ tristate "Analog Devices ADIS16300 IMU SPI driver"
+ depends on SPI
+ select IIO_SW_RING
+ select IIO_RING_BUFFER
+ select IIO_TRIGGER
+ help
+ Say yes here to build support for Analog Devices adis16300 four degrees
+ of freedom inertial sensor.
+
+config ADIS16350
+ tristate "Analog Devices ADIS16350/54/55/60/62/64/65 IMU SPI driver"
+ depends on SPI
+ select IIO_TRIGGER if IIO_RING_BUFFER
+ select IIO_SW_RING if IIO_RING_BUFFER
+ help
+ Say yes here to build support for Analog Devices adis16350/54/55/60/62/64/65
+ high precision tri-axis inertial sensor.
+
+config ADIS16400
+ tristate "Analog Devices ADIS16400/5 IMU SPI driver"
+ depends on SPI
+ select IIO_SW_RING
+ select IIO_RING_BUFFER
+ select IIO_TRIGGER
+ help
+ Say yes here to build support for Analog Devices adis16400/5 triaxial
+ inertial sensor with Magnetometer. \ No newline at end of file
diff --git a/drivers/staging/iio/imu/Makefile b/drivers/staging/iio/imu/Makefile
new file mode 100644
index 00000000000..31df7359e20
--- /dev/null
+++ b/drivers/staging/iio/imu/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for Inertial Measurement Units
+#
+adis16300-y := adis16300_core.o
+adis16300-$(CONFIG_IIO_RING_BUFFER) += adis16300_ring.o adis16300_trigger.o
+obj-$(CONFIG_ADIS16300) += adis16300.o
+
+adis16350-y := adis16350_core.o
+adis16350-$(CONFIG_IIO_RING_BUFFER) += adis16350_ring.o adis16350_trigger.o
+obj-$(CONFIG_ADIS16350) += adis16350.o
+
+adis16400-y := adis16400_core.o
+adis16400-$(CONFIG_IIO_RING_BUFFER) += adis16400_ring.o adis16400_trigger.o
+obj-$(CONFIG_ADIS16400) += adis16400.o \ No newline at end of file
diff --git a/drivers/staging/iio/imu/adis16300.h b/drivers/staging/iio/imu/adis16300.h
new file mode 100644
index 00000000000..1c7ea5c840e
--- /dev/null
+++ b/drivers/staging/iio/imu/adis16300.h
@@ -0,0 +1,194 @@
+#ifndef SPI_ADIS16300_H_
+#define SPI_ADIS16300_H_
+
+#define ADIS16300_STARTUP_DELAY 220 /* ms */
+
+#define ADIS16300_READ_REG(a) a
+#define ADIS16300_WRITE_REG(a) ((a) | 0x80)
+
+#define ADIS16300_FLASH_CNT 0x00 /* Flash memory write count */
+#define ADIS16300_SUPPLY_OUT 0x02 /* Power supply measurement */
+#define ADIS16300_XGYRO_OUT 0x04 /* X-axis gyroscope output */
+#define ADIS16300_XACCL_OUT 0x0A /* X-axis accelerometer output */
+#define ADIS16300_YACCL_OUT 0x0C /* Y-axis accelerometer output */
+#define ADIS16300_ZACCL_OUT 0x0E /* Z-axis accelerometer output */
+#define ADIS16300_TEMP_OUT 0x10 /* Temperature output */
+#define ADIS16300_XINCLI_OUT 0x12 /* X-axis inclinometer output measurement */
+#define ADIS16300_YINCLI_OUT 0x14 /* Y-axis inclinometer output measurement */
+#define ADIS16300_AUX_ADC 0x16 /* Auxiliary ADC measurement */
+
+/* Calibration parameters */
+#define ADIS16300_XGYRO_OFF 0x1A /* X-axis gyroscope bias offset factor */
+#define ADIS16300_XACCL_OFF 0x20 /* X-axis acceleration bias offset factor */
+#define ADIS16300_YACCL_OFF 0x22 /* Y-axis acceleration bias offset factor */
+#define ADIS16300_ZACCL_OFF 0x24 /* Z-axis acceleration bias offset factor */
+
+#define ADIS16300_GPIO_CTRL 0x32 /* Auxiliary digital input/output control */
+#define ADIS16300_MSC_CTRL 0x34 /* Miscellaneous control */
+#define ADIS16300_SMPL_PRD 0x36 /* Internal sample period (rate) control */
+#define ADIS16300_SENS_AVG 0x38 /* Dynamic range and digital filter control */
+#define ADIS16300_SLP_CNT 0x3A /* Sleep mode control */
+#define ADIS16300_DIAG_STAT 0x3C /* System status */
+
+/* Alarm functions */
+#define ADIS16300_GLOB_CMD 0x3E /* System command */
+#define ADIS16300_ALM_MAG1 0x26 /* Alarm 1 amplitude threshold */
+#define ADIS16300_ALM_MAG2 0x28 /* Alarm 2 amplitude threshold */
+#define ADIS16300_ALM_SMPL1 0x2A /* Alarm 1 sample size */
+#define ADIS16300_ALM_SMPL2 0x2C /* Alarm 2 sample size */
+#define ADIS16300_ALM_CTRL 0x2E /* Alarm control */
+#define ADIS16300_AUX_DAC 0x30 /* Auxiliary DAC data */
+
+#define ADIS16300_ERROR_ACTIVE (1<<14)
+#define ADIS16300_NEW_DATA (1<<15)
+
+/* MSC_CTRL */
+#define ADIS16300_MSC_CTRL_MEM_TEST (1<<11)
+#define ADIS16300_MSC_CTRL_INT_SELF_TEST (1<<10)
+#define ADIS16300_MSC_CTRL_NEG_SELF_TEST (1<<9)
+#define ADIS16300_MSC_CTRL_POS_SELF_TEST (1<<8)
+#define ADIS16300_MSC_CTRL_GYRO_BIAS (1<<7)
+#define ADIS16300_MSC_CTRL_ACCL_ALIGN (1<<6)
+#define ADIS16300_MSC_CTRL_DATA_RDY_EN (1<<2)
+#define ADIS16300_MSC_CTRL_DATA_RDY_POL_HIGH (1<<1)
+#define ADIS16300_MSC_CTRL_DATA_RDY_DIO2 (1<<0)
+
+/* SMPL_PRD */
+#define ADIS16300_SMPL_PRD_TIME_BASE (1<<7)
+#define ADIS16300_SMPL_PRD_DIV_MASK 0x7F
+
+/* DIAG_STAT */
+#define ADIS16300_DIAG_STAT_ZACCL_FAIL (1<<15)
+#define ADIS16300_DIAG_STAT_YACCL_FAIL (1<<14)
+#define ADIS16300_DIAG_STAT_XACCL_FAIL (1<<13)
+#define ADIS16300_DIAG_STAT_XGYRO_FAIL (1<<10)
+#define ADIS16300_DIAG_STAT_ALARM2 (1<<9)
+#define ADIS16300_DIAG_STAT_ALARM1 (1<<8)
+#define ADIS16300_DIAG_STAT_FLASH_CHK (1<<6)
+#define ADIS16300_DIAG_STAT_SELF_TEST (1<<5)
+#define ADIS16300_DIAG_STAT_OVERFLOW (1<<4)
+#define ADIS16300_DIAG_STAT_SPI_FAIL (1<<3)
+#define ADIS16300_DIAG_STAT_FLASH_UPT (1<<2)
+#define ADIS16300_DIAG_STAT_POWER_HIGH (1<<1)
+#define ADIS16300_DIAG_STAT_POWER_LOW (1<<0)
+
+/* GLOB_CMD */
+#define ADIS16300_GLOB_CMD_SW_RESET (1<<7)
+#define ADIS16300_GLOB_CMD_P_AUTO_NULL (1<<4)
+#define ADIS16300_GLOB_CMD_FLASH_UPD (1<<3)
+#define ADIS16300_GLOB_CMD_DAC_LATCH (1<<2)
+#define ADIS16300_GLOB_CMD_FAC_CALIB (1<<1)
+#define ADIS16300_GLOB_CMD_AUTO_NULL (1<<0)
+
+/* SLP_CNT */
+#define ADIS16300_SLP_CNT_POWER_OFF (1<<8)
+
+#define ADIS16300_MAX_TX 18
+#define ADIS16300_MAX_RX 18
+
+#define ADIS16300_SPI_SLOW (u32)(300 * 1000)
+#define ADIS16300_SPI_BURST (u32)(1000 * 1000)
+#define ADIS16300_SPI_FAST (u32)(2000 * 1000)
+
+/**
+ * struct adis16300_state - device instance specific data
+ * @us: actual spi_device
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @work_cont_thresh: CLEAN
+ * @inter: used to check if new interrupt has been triggered
+ * @last_timestamp: passing timestamp from th to bh of interrupt handler
+ * @indio_dev: industrial I/O device structure
+ * @trig: data ready trigger registered with iio
+ * @tx: transmit buffer
+ * @rx: recieve buffer
+ * @buf_lock: mutex to protect tx and rx
+ **/
+struct adis16300_state {
+ struct spi_device *us;
+ struct work_struct work_trigger_to_ring;
+ struct iio_work_cont work_cont_thresh;
+ s64 last_timestamp;
+ struct iio_dev *indio_dev;
+ struct iio_trigger *trig;
+ u8 *tx;
+ u8 *rx;
+ struct mutex buf_lock;
+};
+
+int adis16300_spi_read_burst(struct device *dev, u8 *rx);
+
+int adis16300_set_irq(struct device *dev, bool enable);
+
+int adis16300_reset(struct device *dev);
+
+int adis16300_check_status(struct device *dev);
+
+#ifdef CONFIG_IIO_RING_BUFFER
+/* At the moment triggers are only used for ring buffer
+ * filling. This may change!
+ */
+
+enum adis16300_scan {
+ ADIS16300_SCAN_SUPPLY,
+ ADIS16300_SCAN_GYRO_X,
+ ADIS16300_SCAN_ACC_X,
+ ADIS16300_SCAN_ACC_Y,
+ ADIS16300_SCAN_ACC_Z,
+ ADIS16300_SCAN_TEMP,
+ ADIS16300_SCAN_ADC_0,
+ ADIS16300_SCAN_INCLI_X,
+ ADIS16300_SCAN_INCLI_Y,
+};
+
+void adis16300_remove_trigger(struct iio_dev *indio_dev);
+int adis16300_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t adis16300_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+
+int adis16300_configure_ring(struct iio_dev *indio_dev);
+void adis16300_unconfigure_ring(struct iio_dev *indio_dev);
+
+int adis16300_initialize_ring(struct iio_ring_buffer *ring);
+void adis16300_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void adis16300_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16300_probe_trigger(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline ssize_t
+adis16300_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return 0;
+}
+
+static int adis16300_configure_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline void adis16300_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16300_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return 0;
+}
+
+static inline void adis16300_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+
+#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* SPI_ADIS16300_H_ */
diff --git a/drivers/staging/iio/imu/adis16300_core.c b/drivers/staging/iio/imu/adis16300_core.c
new file mode 100644
index 00000000000..5a7e5ef9bc5
--- /dev/null
+++ b/drivers/staging/iio/imu/adis16300_core.c
@@ -0,0 +1,768 @@
+/*
+ * ADIS16300 Four Degrees of Freedom Inertial Sensor Driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../accel/accel.h"
+#include "../accel/inclinometer.h"
+#include "../gyro/gyro.h"
+#include "../adc/adc.h"
+
+#include "adis16300.h"
+
+#define DRIVER_NAME "adis16300"
+
+/* At the moment the spi framework doesn't allow global setting of cs_change.
+ * It's in the likely to be added comment at the top of spi.h.
+ * This means that use cannot be made of spi_write etc.
+ */
+
+/**
+ * adis16300_spi_write_reg_8() - write single byte to a register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the register to be written
+ * @val: the value to write
+ **/
+static int adis16300_spi_write_reg_8(struct device *dev,
+ u8 reg_address,
+ u8 val)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16300_state *st = iio_dev_get_devdata(indio_dev);
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16300_WRITE_REG(reg_address);
+ st->tx[1] = val;
+
+ ret = spi_write(st->us, st->tx, 2);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/**
+ * adis16300_spi_write_reg_16() - write 2 bytes to a pair of registers
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ * is assumed to have address one greater.
+ * @val: value to be written
+ **/
+static int adis16300_spi_write_reg_16(struct device *dev,
+ u8 lower_reg_address,
+ u16 value)
+{
+ int ret;
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16300_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ }, {
+ .tx_buf = st->tx + 2,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16300_WRITE_REG(lower_reg_address);
+ st->tx[1] = value & 0xFF;
+ st->tx[2] = ADIS16300_WRITE_REG(lower_reg_address + 1);
+ st->tx[3] = (value >> 8) & 0xFF;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/**
+ * adis16300_spi_read_reg_16() - read 2 bytes from a 16-bit register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ * is assumed to have address one greater.
+ * @val: somewhere to pass back the value read
+ **/
+static int adis16300_spi_read_reg_16(struct device *dev,
+ u8 lower_reg_address,
+ u16 *val)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16300_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 0,
+ }, {
+ .rx_buf = st->rx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 0,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16300_READ_REG(lower_reg_address);
+ st->tx[1] = 0;
+ st->tx[2] = 0;
+ st->tx[3] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret) {
+ dev_err(&st->us->dev,
+ "problem when reading 16 bit register 0x%02X",
+ lower_reg_address);
+ goto error_ret;
+ }
+ *val = (st->rx[0] << 8) | st->rx[1];
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+/**
+ * adis16300_spi_read_burst() - read all data registers
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @rx: somewhere to pass back the value read (min size is 24 bytes)
+ **/
+int adis16300_spi_read_burst(struct device *dev, u8 *rx)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16300_state *st = iio_dev_get_devdata(indio_dev);
+ u32 old_speed_hz = st->us->max_speed_hz;
+ int ret;
+
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 0,
+ }, {
+ .rx_buf = rx,
+ .bits_per_word = 8,
+ .len = 18,
+ .cs_change = 0,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16300_READ_REG(ADIS16300_GLOB_CMD);
+ st->tx[1] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+
+ st->us->max_speed_hz = min(ADIS16300_SPI_BURST, old_speed_hz);
+ spi_setup(st->us);
+
+ ret = spi_sync(st->us, &msg);
+ if (ret)
+ dev_err(&st->us->dev, "problem when burst reading");
+
+ st->us->max_speed_hz = old_speed_hz;
+ spi_setup(st->us);
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static ssize_t adis16300_spi_read_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf,
+ unsigned bits)
+{
+ int ret;
+ s16 val = 0;
+ unsigned shift = 16 - bits;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = adis16300_spi_read_reg_16(dev, this_attr->address, (u16 *)&val);
+ if (ret)
+ return ret;
+
+ if (val & ADIS16300_ERROR_ACTIVE)
+ adis16300_check_status(dev);
+ val = ((s16)(val << shift) >> shift);
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t adis16300_read_12bit_unsigned(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u16 val = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = adis16300_spi_read_reg_16(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ if (val & ADIS16300_ERROR_ACTIVE)
+ adis16300_check_status(dev);
+
+ return sprintf(buf, "%u\n", val & 0x0FFF);
+}
+
+static ssize_t adis16300_read_14bit_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ /* Take the iio_dev status lock */
+ mutex_lock(&indio_dev->mlock);
+ ret = adis16300_spi_read_signed(dev, attr, buf, 14);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static ssize_t adis16300_read_12bit_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ /* Take the iio_dev status lock */
+ mutex_lock(&indio_dev->mlock);
+ ret = adis16300_spi_read_signed(dev, attr, buf, 12);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static ssize_t adis16300_read_13bit_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ /* Take the iio_dev status lock */
+ mutex_lock(&indio_dev->mlock);
+ ret = adis16300_spi_read_signed(dev, attr, buf, 13);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static ssize_t adis16300_write_16bit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+ ret = adis16300_spi_write_reg_16(dev, this_attr->address, val);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+static ssize_t adis16300_read_frequency(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret, len = 0;
+ u16 t;
+ int sps;
+ ret = adis16300_spi_read_reg_16(dev,
+ ADIS16300_SMPL_PRD,
+ &t);
+ if (ret)
+ return ret;
+ sps = (t & ADIS16300_SMPL_PRD_TIME_BASE) ? 53 : 1638;
+ sps /= (t & ADIS16300_SMPL_PRD_DIV_MASK) + 1;
+ len = sprintf(buf, "%d SPS\n", sps);
+ return len;
+}
+
+static ssize_t adis16300_write_frequency(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16300_state *st = iio_dev_get_devdata(indio_dev);
+ long val;
+ int ret;
+ u8 t;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ t = (1638 / val);
+ if (t > 0)
+ t--;
+ t &= ADIS16300_SMPL_PRD_DIV_MASK;
+ if ((t & ADIS16300_SMPL_PRD_DIV_MASK) >= 0x0A)
+ st->us->max_speed_hz = ADIS16300_SPI_SLOW;
+ else
+ st->us->max_speed_hz = ADIS16300_SPI_FAST;
+
+ ret = adis16300_spi_write_reg_8(dev,
+ ADIS16300_SMPL_PRD,
+ t);
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+static ssize_t adis16300_write_reset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ if (len < 1)
+ return -1;
+ switch (buf[0]) {
+ case '1':
+ case 'y':
+ case 'Y':
+ return adis16300_reset(dev);
+ }
+ return -1;
+}
+
+
+
+int adis16300_set_irq(struct device *dev, bool enable)
+{
+ int ret;
+ u16 msc;
+ ret = adis16300_spi_read_reg_16(dev, ADIS16300_MSC_CTRL, &msc);
+ if (ret)
+ goto error_ret;
+
+ msc |= ADIS16300_MSC_CTRL_DATA_RDY_POL_HIGH;
+ msc &= ~ADIS16300_MSC_CTRL_DATA_RDY_DIO2;
+ if (enable)
+ msc |= ADIS16300_MSC_CTRL_DATA_RDY_EN;
+ else
+ msc &= ~ADIS16300_MSC_CTRL_DATA_RDY_EN;
+
+ ret = adis16300_spi_write_reg_16(dev, ADIS16300_MSC_CTRL, msc);
+ if (ret)
+ goto error_ret;
+
+error_ret:
+ return ret;
+}
+
+int adis16300_reset(struct device *dev)
+{
+ int ret;
+ ret = adis16300_spi_write_reg_8(dev,
+ ADIS16300_GLOB_CMD,
+ ADIS16300_GLOB_CMD_SW_RESET);
+ if (ret)
+ dev_err(dev, "problem resetting device");
+
+ return ret;
+}
+
+/* Power down the device */
+static int adis16300_stop_device(struct device *dev)
+{
+ int ret;
+ u16 val = ADIS16300_SLP_CNT_POWER_OFF;
+
+ ret = adis16300_spi_write_reg_16(dev, ADIS16300_SLP_CNT, val);
+ if (ret)
+ dev_err(dev, "problem with turning device off: SLP_CNT");
+
+ return ret;
+}
+
+int adis16300_check_status(struct device *dev)
+{
+ u16 status;
+ int ret;
+
+ ret = adis16300_spi_read_reg_16(dev, ADIS16300_DIAG_STAT, &status);
+
+ if (ret < 0) {
+ dev_err(dev, "Reading status failed\n");
+ goto error_ret;
+ }
+ ret = status;
+ if (status & ADIS16300_DIAG_STAT_ZACCL_FAIL)
+ dev_err(dev, "Z-axis accelerometer self-test failure\n");
+ if (status & ADIS16300_DIAG_STAT_YACCL_FAIL)
+ dev_err(dev, "Y-axis accelerometer self-test failure\n");
+ if (status & ADIS16300_DIAG_STAT_XACCL_FAIL)
+ dev_err(dev, "X-axis accelerometer self-test failure\n");
+ if (status & ADIS16300_DIAG_STAT_XGYRO_FAIL)
+ dev_err(dev, "X-axis gyroscope self-test failure\n");
+ if (status & ADIS16300_DIAG_STAT_ALARM2)
+ dev_err(dev, "Alarm 2 active\n");
+ if (status & ADIS16300_DIAG_STAT_ALARM1)
+ dev_err(dev, "Alarm 1 active\n");
+ if (status & ADIS16300_DIAG_STAT_FLASH_CHK)
+ dev_err(dev, "Flash checksum error\n");
+ if (status & ADIS16300_DIAG_STAT_SELF_TEST)
+ dev_err(dev, "Self test error\n");
+ if (status & ADIS16300_DIAG_STAT_OVERFLOW)
+ dev_err(dev, "Sensor overrange\n");
+ if (status & ADIS16300_DIAG_STAT_SPI_FAIL)
+ dev_err(dev, "SPI failure\n");
+ if (status & ADIS16300_DIAG_STAT_FLASH_UPT)
+ dev_err(dev, "Flash update failed\n");
+ if (status & ADIS16300_DIAG_STAT_POWER_HIGH)
+ dev_err(dev, "Power supply above 5.25V\n");
+ if (status & ADIS16300_DIAG_STAT_POWER_LOW)
+ dev_err(dev, "Power supply below 4.75V\n");
+
+error_ret:
+ return ret;
+}
+
+static int adis16300_initial_setup(struct adis16300_state *st)
+{
+ int ret;
+ u16 smp_prd;
+ struct device *dev = &st->indio_dev->dev;
+
+ /* use low spi speed for init */
+ st->us->max_speed_hz = ADIS16300_SPI_SLOW;
+ st->us->mode = SPI_MODE_3;
+ spi_setup(st->us);
+
+ /* Disable IRQ */
+ ret = adis16300_set_irq(dev, false);
+ if (ret) {
+ dev_err(dev, "disable irq failed");
+ goto err_ret;
+ }
+
+ /* Do self test */
+
+ /* Read status register to check the result */
+ ret = adis16300_check_status(dev);
+ if (ret) {
+ adis16300_reset(dev);
+ dev_err(dev, "device not playing ball -> reset");
+ msleep(ADIS16300_STARTUP_DELAY);
+ ret = adis16300_check_status(dev);
+ if (ret) {
+ dev_err(dev, "giving up");
+ goto err_ret;
+ }
+ }
+
+ printk(KERN_INFO DRIVER_NAME ": at CS%d (irq %d)\n",
+ st->us->chip_select, st->us->irq);
+
+ /* use high spi speed if possible */
+ ret = adis16300_spi_read_reg_16(dev, ADIS16300_SMPL_PRD, &smp_prd);
+ if (!ret && (smp_prd & ADIS16300_SMPL_PRD_DIV_MASK) < 0x0A) {
+ st->us->max_speed_hz = ADIS16300_SPI_SLOW;
+ spi_setup(st->us);
+ }
+
+err_ret:
+ return ret;
+}
+
+static IIO_DEV_ATTR_ACCEL_X_OFFSET(S_IWUSR | S_IRUGO,
+ adis16300_read_12bit_signed,
+ adis16300_write_16bit,
+ ADIS16300_XACCL_OFF);
+
+static IIO_DEV_ATTR_ACCEL_Y_OFFSET(S_IWUSR | S_IRUGO,
+ adis16300_read_12bit_signed,
+ adis16300_write_16bit,
+ ADIS16300_YACCL_OFF);
+
+static IIO_DEV_ATTR_ACCEL_Z_OFFSET(S_IWUSR | S_IRUGO,
+ adis16300_read_12bit_signed,
+ adis16300_write_16bit,
+ ADIS16300_ZACCL_OFF);
+
+static IIO_DEV_ATTR_IN_NAMED_RAW(supply, adis16300_read_14bit_signed,
+ ADIS16300_SUPPLY_OUT);
+static IIO_CONST_ATTR(in_supply_scale, "0.00242");
+
+static IIO_DEV_ATTR_GYRO_X(adis16300_read_14bit_signed,
+ ADIS16300_XGYRO_OUT);
+static IIO_CONST_ATTR(gyro_scale, "0.05 deg/s");
+
+static IIO_DEV_ATTR_ACCEL_X(adis16300_read_14bit_signed,
+ ADIS16300_XACCL_OUT);
+static IIO_DEV_ATTR_ACCEL_Y(adis16300_read_14bit_signed,
+ ADIS16300_YACCL_OUT);
+static IIO_DEV_ATTR_ACCEL_Z(adis16300_read_14bit_signed,
+ ADIS16300_ZACCL_OUT);
+static IIO_CONST_ATTR(accel_scale, "0.0006 g");
+
+static IIO_DEV_ATTR_INCLI_X(adis16300_read_13bit_signed,
+ ADIS16300_XINCLI_OUT);
+static IIO_DEV_ATTR_INCLI_Y(adis16300_read_13bit_signed,
+ ADIS16300_YINCLI_OUT);
+static IIO_CONST_ATTR(incli_scale, "0.044 d");
+
+static IIO_DEV_ATTR_TEMP_RAW(adis16300_read_12bit_signed);
+static IIO_CONST_ATTR(temp_offset, "198.16 K");
+static IIO_CONST_ATTR(temp_scale, "0.14 K");
+
+static IIO_DEV_ATTR_IN_RAW(0, adis16300_read_12bit_unsigned,
+ ADIS16300_AUX_ADC);
+static IIO_CONST_ATTR(in0_scale, "0.000806");
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+ adis16300_read_frequency,
+ adis16300_write_frequency);
+
+static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, adis16300_write_reset, 0);
+
+static IIO_CONST_ATTR_AVAIL_SAMP_FREQ("409 546 819 1638");
+
+static IIO_CONST_ATTR(name, "adis16300");
+
+static struct attribute *adis16300_event_attributes[] = {
+ NULL
+};
+
+static struct attribute_group adis16300_event_attribute_group = {
+ .attrs = adis16300_event_attributes,
+};
+
+static struct attribute *adis16300_attributes[] = {
+ &iio_dev_attr_accel_x_offset.dev_attr.attr,
+ &iio_dev_attr_accel_y_offset.dev_attr.attr,
+ &iio_dev_attr_accel_z_offset.dev_attr.attr,
+ &iio_dev_attr_in_supply_raw.dev_attr.attr,
+ &iio_const_attr_in_supply_scale.dev_attr.attr,
+ &iio_dev_attr_gyro_x_raw.dev_attr.attr,
+ &iio_const_attr_gyro_scale.dev_attr.attr,
+ &iio_dev_attr_accel_x_raw.dev_attr.attr,
+ &iio_dev_attr_accel_y_raw.dev_attr.attr,
+ &iio_dev_attr_accel_z_raw.dev_attr.attr,
+ &iio_const_attr_accel_scale.dev_attr.attr,
+ &iio_dev_attr_incli_x_raw.dev_attr.attr,
+ &iio_dev_attr_incli_y_raw.dev_attr.attr,
+ &iio_const_attr_incli_scale.dev_attr.attr,
+ &iio_dev_attr_temp_raw.dev_attr.attr,
+ &iio_const_attr_temp_offset.dev_attr.attr,
+ &iio_const_attr_temp_scale.dev_attr.attr,
+ &iio_dev_attr_in0_raw.dev_attr.attr,
+ &iio_const_attr_in0_scale.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_const_attr_available_sampling_frequency.dev_attr.attr,
+ &iio_dev_attr_reset.dev_attr.attr,
+ &iio_const_attr_name.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group adis16300_attribute_group = {
+ .attrs = adis16300_attributes,
+};
+
+static int __devinit adis16300_probe(struct spi_device *spi)
+{
+ int ret, regdone = 0;
+ struct adis16300_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+ if (!st) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, st);
+
+ /* Allocate the comms buffers */
+ st->rx = kzalloc(sizeof(*st->rx)*ADIS16300_MAX_RX, GFP_KERNEL);
+ if (st->rx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->tx = kzalloc(sizeof(*st->tx)*ADIS16300_MAX_TX, GFP_KERNEL);
+ if (st->tx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_rx;
+ }
+ st->us = spi;
+ mutex_init(&st->buf_lock);
+ /* setup the industrialio driver allocated elements */
+ st->indio_dev = iio_allocate_device();
+ if (st->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_tx;
+ }
+
+ st->indio_dev->dev.parent = &spi->dev;
+ st->indio_dev->num_interrupt_lines = 1;
+ st->indio_dev->event_attrs = &adis16300_event_attribute_group;
+ st->indio_dev->attrs = &adis16300_attribute_group;
+ st->indio_dev->dev_data = (void *)(st);
+ st->indio_dev->driver_module = THIS_MODULE;
+ st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = adis16300_configure_ring(st->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ ret = iio_device_register(st->indio_dev);
+ if (ret)
+ goto error_unreg_ring_funcs;
+ regdone = 1;
+
+ ret = adis16300_initialize_ring(st->indio_dev->ring);
+ if (ret) {
+ printk(KERN_ERR "failed to initialize the ring\n");
+ goto error_unreg_ring_funcs;
+ }
+
+ if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0) {
+#if 0 /* fixme: here we should support */
+ iio_init_work_cont(&st->work_cont_thresh,
+ NULL,
+ adis16300_thresh_handler_bh_no_check,
+ 0,
+ 0,
+ st);
+#endif
+ ret = iio_register_interrupt_line(spi->irq,
+ st->indio_dev,
+ 0,
+ IRQF_TRIGGER_RISING,
+ "adis16300");
+ if (ret)
+ goto error_uninitialize_ring;
+
+ ret = adis16300_probe_trigger(st->indio_dev);
+ if (ret)
+ goto error_unregister_line;
+ }
+
+ /* Get the device into a sane initial state */
+ ret = adis16300_initial_setup(st);
+ if (ret)
+ goto error_remove_trigger;
+ return 0;
+
+error_remove_trigger:
+ if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+ adis16300_remove_trigger(st->indio_dev);
+error_unregister_line:
+ if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+ iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+ adis16300_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+ adis16300_unconfigure_ring(st->indio_dev);
+error_free_dev:
+ if (regdone)
+ iio_device_unregister(st->indio_dev);
+ else
+ iio_free_device(st->indio_dev);
+error_free_tx:
+ kfree(st->tx);
+error_free_rx:
+ kfree(st->rx);
+error_free_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+/* fixme, confirm ordering in this function */
+static int adis16300_remove(struct spi_device *spi)
+{
+ int ret;
+ struct adis16300_state *st = spi_get_drvdata(spi);
+ struct iio_dev *indio_dev = st->indio_dev;
+
+ ret = adis16300_stop_device(&(indio_dev->dev));
+ if (ret)
+ goto err_ret;
+
+ flush_scheduled_work();
+
+ adis16300_remove_trigger(indio_dev);
+ if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
+ iio_unregister_interrupt_line(indio_dev, 0);
+
+ adis16300_uninitialize_ring(indio_dev->ring);
+ adis16300_unconfigure_ring(indio_dev);
+ iio_device_unregister(indio_dev);
+ kfree(st->tx);
+ kfree(st->rx);
+ kfree(st);
+
+ return 0;
+
+err_ret:
+ return ret;
+}
+
+static struct spi_driver adis16300_driver = {
+ .driver = {
+ .name = "adis16300",
+ .owner = THIS_MODULE,
+ },
+ .probe = adis16300_probe,
+ .remove = __devexit_p(adis16300_remove),
+};
+
+static __init int adis16300_init(void)
+{
+ return spi_register_driver(&adis16300_driver);
+}
+module_init(adis16300_init);
+
+static __exit void adis16300_exit(void)
+{
+ spi_unregister_driver(&adis16300_driver);
+}
+module_exit(adis16300_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16300 IMU SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/imu/adis16300_ring.c b/drivers/staging/iio/imu/adis16300_ring.c
new file mode 100644
index 00000000000..76cf8a6f3c3
--- /dev/null
+++ b/drivers/staging/iio/imu/adis16300_ring.c
@@ -0,0 +1,233 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../ring_sw.h"
+#include "../accel/accel.h"
+#include "../trigger.h"
+#include "adis16300.h"
+
+/**
+ * combine_8_to_16() utility function to munge to u8s into u16
+ **/
+static inline u16 combine_8_to_16(u8 lower, u8 upper)
+{
+ u16 _lower = lower;
+ u16 _upper = upper;
+ return _lower | (_upper << 8);
+}
+
+static IIO_SCAN_EL_C(supply, ADIS16300_SCAN_SUPPLY, IIO_SIGNED(14),
+ ADIS16300_SUPPLY_OUT, NULL);
+
+static IIO_SCAN_EL_C(gyro_x, ADIS16300_SCAN_GYRO_X, IIO_SIGNED(14),
+ ADIS16300_XGYRO_OUT, NULL);
+
+static IIO_SCAN_EL_C(accel_x, ADIS16300_SCAN_ACC_X, IIO_SIGNED(14),
+ ADIS16300_XACCL_OUT, NULL);
+static IIO_SCAN_EL_C(accel_y, ADIS16300_SCAN_ACC_Y, IIO_SIGNED(14),
+ ADIS16300_YACCL_OUT, NULL);
+static IIO_SCAN_EL_C(accel_z, ADIS16300_SCAN_ACC_Z, IIO_SIGNED(14),
+ ADIS16300_ZACCL_OUT, NULL);
+
+static IIO_SCAN_EL_C(temp, ADIS16300_SCAN_TEMP, IIO_SIGNED(12),
+ ADIS16300_TEMP_OUT, NULL);
+static IIO_SCAN_EL_C(adc_0, ADIS16300_SCAN_ADC_0, IIO_SIGNED(12),
+ ADIS16300_AUX_ADC, NULL);
+
+static IIO_SCAN_EL_C(incli_x, ADIS16300_SCAN_INCLI_X, IIO_SIGNED(12),
+ ADIS16300_XINCLI_OUT, NULL);
+static IIO_SCAN_EL_C(incli_y, ADIS16300_SCAN_INCLI_Y, IIO_SIGNED(12),
+ ADIS16300_YINCLI_OUT, NULL);
+
+static IIO_SCAN_EL_TIMESTAMP(9);
+
+static struct attribute *adis16300_scan_el_attrs[] = {
+ &iio_scan_el_supply.dev_attr.attr,
+ &iio_scan_el_gyro_x.dev_attr.attr,
+ &iio_scan_el_temp.dev_attr.attr,
+ &iio_scan_el_accel_x.dev_attr.attr,
+ &iio_scan_el_accel_y.dev_attr.attr,
+ &iio_scan_el_accel_z.dev_attr.attr,
+ &iio_scan_el_incli_x.dev_attr.attr,
+ &iio_scan_el_incli_y.dev_attr.attr,
+ &iio_scan_el_adc_0.dev_attr.attr,
+ &iio_scan_el_timestamp.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group adis16300_scan_el_group = {
+ .attrs = adis16300_scan_el_attrs,
+ .name = "scan_elements",
+};
+
+/**
+ * adis16300_poll_func_th() top half interrupt handler called by trigger
+ * @private_data: iio_dev
+ **/
+static void adis16300_poll_func_th(struct iio_dev *indio_dev)
+{
+ struct adis16300_state *st = iio_dev_get_devdata(indio_dev);
+ st->last_timestamp = indio_dev->trig->timestamp;
+ schedule_work(&st->work_trigger_to_ring);
+ /* Indicate that this interrupt is being handled */
+
+ /* Technically this is trigger related, but without this
+ * handler running there is currently no way for the interrupt
+ * to clear.
+ */
+}
+
+/* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
+ * specific to be rolled into the core.
+ */
+static void adis16300_trigger_bh_to_ring(struct work_struct *work_s)
+{
+ struct adis16300_state *st
+ = container_of(work_s, struct adis16300_state,
+ work_trigger_to_ring);
+
+ int i = 0;
+ s16 *data;
+ size_t datasize = st->indio_dev
+ ->ring->access.get_bpd(st->indio_dev->ring);
+
+ data = kmalloc(datasize , GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(&st->us->dev, "memory alloc failed in ring bh");
+ return;
+ }
+
+ if (st->indio_dev->scan_count)
+ if (adis16300_spi_read_burst(&st->indio_dev->dev, st->rx) >= 0)
+ for (; i < st->indio_dev->scan_count; i++) {
+ data[i] = combine_8_to_16(st->rx[i*2+1],
+ st->rx[i*2]);
+ }
+
+ /* Guaranteed to be aligned with 8 byte boundary */
+ if (st->indio_dev->scan_timestamp)
+ *((s64 *)(data + ((i + 3)/4)*4)) = st->last_timestamp;
+
+ st->indio_dev->ring->access.store_to(st->indio_dev->ring,
+ (u8 *)data,
+ st->last_timestamp);
+
+ iio_trigger_notify_done(st->indio_dev->trig);
+ kfree(data);
+
+ return;
+}
+/* in these circumstances is it better to go with unaligned packing and
+ * deal with the cost?*/
+static int adis16300_data_rdy_ring_preenable(struct iio_dev *indio_dev)
+{
+ size_t size;
+ dev_dbg(&indio_dev->dev, "%s\n", __func__);
+ /* Check if there are any scan elements enabled, if not fail*/
+ if (!(indio_dev->scan_count || indio_dev->scan_timestamp))
+ return -EINVAL;
+
+ if (indio_dev->ring->access.set_bpd) {
+ if (indio_dev->scan_timestamp)
+ if (indio_dev->scan_count) /* Timestamp and data */
+ size = 4*sizeof(s64);
+ else /* Timestamp only */
+ size = sizeof(s64);
+ else /* Data only */
+ size = indio_dev->scan_count*sizeof(s16);
+ indio_dev->ring->access.set_bpd(indio_dev->ring, size);
+ }
+
+ return 0;
+}
+
+static int adis16300_data_rdy_ring_postenable(struct iio_dev *indio_dev)
+{
+ return indio_dev->trig
+ ? iio_trigger_attach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc)
+ : 0;
+}
+
+static int adis16300_data_rdy_ring_predisable(struct iio_dev *indio_dev)
+{
+ return indio_dev->trig
+ ? iio_trigger_dettach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc)
+ : 0;
+}
+
+void adis16300_unconfigure_ring(struct iio_dev *indio_dev)
+{
+ kfree(indio_dev->pollfunc);
+ iio_sw_rb_free(indio_dev->ring);
+}
+
+int adis16300_configure_ring(struct iio_dev *indio_dev)
+{
+ int ret = 0;
+ struct adis16300_state *st = indio_dev->dev_data;
+ struct iio_ring_buffer *ring;
+ INIT_WORK(&st->work_trigger_to_ring, adis16300_trigger_bh_to_ring);
+ /* Set default scan mode */
+
+ iio_scan_mask_set(indio_dev, iio_scan_el_supply.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_gyro_x.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_accel_x.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_accel_y.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_accel_z.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_temp.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_adc_0.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_incli_x.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_incli_y.number);
+ indio_dev->scan_timestamp = true;
+
+ indio_dev->scan_el_attrs = &adis16300_scan_el_group;
+
+ ring = iio_sw_rb_allocate(indio_dev);
+ if (!ring) {
+ ret = -ENOMEM;
+ return ret;
+ }
+ indio_dev->ring = ring;
+ /* Effectively select the ring buffer implementation */
+ iio_ring_sw_register_funcs(&ring->access);
+ ring->preenable = &adis16300_data_rdy_ring_preenable;
+ ring->postenable = &adis16300_data_rdy_ring_postenable;
+ ring->predisable = &adis16300_data_rdy_ring_predisable;
+ ring->owner = THIS_MODULE;
+
+ indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL);
+ if (indio_dev->pollfunc == NULL) {
+ ret = -ENOMEM;
+ goto error_iio_sw_rb_free;;
+ }
+ indio_dev->pollfunc->poll_func_main = &adis16300_poll_func_th;
+ indio_dev->pollfunc->private_data = indio_dev;
+ indio_dev->modes |= INDIO_RING_TRIGGERED;
+ return 0;
+
+error_iio_sw_rb_free:
+ iio_sw_rb_free(indio_dev->ring);
+ return ret;
+}
+
+int adis16300_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return iio_ring_buffer_register(ring, 0);
+}
+
+void adis16300_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+ iio_ring_buffer_unregister(ring);
+}
diff --git a/drivers/staging/iio/imu/adis16300_trigger.c b/drivers/staging/iio/imu/adis16300_trigger.c
new file mode 100644
index 00000000000..54edb20bf11
--- /dev/null
+++ b/drivers/staging/iio/imu/adis16300_trigger.c
@@ -0,0 +1,127 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../trigger.h"
+#include "adis16300.h"
+
+/**
+ * adis16300_data_rdy_trig_poll() the event handler for the data rdy trig
+ **/
+static int adis16300_data_rdy_trig_poll(struct iio_dev *dev_info,
+ int index,
+ s64 timestamp,
+ int no_test)
+{
+ struct adis16300_state *st = iio_dev_get_devdata(dev_info);
+ struct iio_trigger *trig = st->trig;
+
+ trig->timestamp = timestamp;
+ iio_trigger_poll(trig);
+
+ return IRQ_HANDLED;
+}
+
+IIO_EVENT_SH(data_rdy_trig, &adis16300_data_rdy_trig_poll);
+
+static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
+
+static struct attribute *adis16300_trigger_attrs[] = {
+ &dev_attr_name.attr,
+ NULL,
+};
+
+static const struct attribute_group adis16300_trigger_attr_group = {
+ .attrs = adis16300_trigger_attrs,
+};
+
+/**
+ * adis16300_data_rdy_trigger_set_state() set datardy interrupt state
+ **/
+static int adis16300_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct adis16300_state *st = trig->private_data;
+ struct iio_dev *indio_dev = st->indio_dev;
+ int ret = 0;
+
+ dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state);
+ ret = adis16300_set_irq(&st->indio_dev->dev, state);
+ if (state == false) {
+ iio_remove_event_from_list(&iio_event_data_rdy_trig,
+ &indio_dev->interrupts[0]
+ ->ev_list);
+ /* possible quirk with handler currently worked around
+ by ensuring the work queue is empty */
+ flush_scheduled_work();
+ } else {
+ iio_add_event_to_list(&iio_event_data_rdy_trig,
+ &indio_dev->interrupts[0]->ev_list);
+ }
+ return ret;
+}
+
+/**
+ * adis16300_trig_try_reen() try renabling irq for data rdy trigger
+ * @trig: the datardy trigger
+ **/
+static int adis16300_trig_try_reen(struct iio_trigger *trig)
+{
+ struct adis16300_state *st = trig->private_data;
+ enable_irq(st->us->irq);
+ /* irq reenabled so success! */
+ return 0;
+}
+
+int adis16300_probe_trigger(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct adis16300_state *st = indio_dev->dev_data;
+
+ st->trig = iio_allocate_trigger();
+ st->trig->name = kmalloc(IIO_TRIGGER_NAME_LENGTH, GFP_KERNEL);
+ if (!st->trig->name) {
+ ret = -ENOMEM;
+ goto error_free_trig;
+ }
+ snprintf((char *)st->trig->name,
+ IIO_TRIGGER_NAME_LENGTH,
+ "adis16300-dev%d", indio_dev->id);
+ st->trig->dev.parent = &st->us->dev;
+ st->trig->owner = THIS_MODULE;
+ st->trig->private_data = st;
+ st->trig->set_trigger_state = &adis16300_data_rdy_trigger_set_state;
+ st->trig->try_reenable = &adis16300_trig_try_reen;
+ st->trig->control_attrs = &adis16300_trigger_attr_group;
+ ret = iio_trigger_register(st->trig);
+
+ /* select default trigger */
+ indio_dev->trig = st->trig;
+ if (ret)
+ goto error_free_trig_name;
+
+ return 0;
+
+error_free_trig_name:
+ kfree(st->trig->name);
+error_free_trig:
+ iio_free_trigger(st->trig);
+
+ return ret;
+}
+
+void adis16300_remove_trigger(struct iio_dev *indio_dev)
+{
+ struct adis16300_state *state = indio_dev->dev_data;
+
+ iio_trigger_unregister(state->trig);
+ kfree(state->trig->name);
+ iio_free_trigger(state->trig);
+}
diff --git a/drivers/staging/iio/imu/adis16350.h b/drivers/staging/iio/imu/adis16350.h
new file mode 100644
index 00000000000..334b18ace38
--- /dev/null
+++ b/drivers/staging/iio/imu/adis16350.h
@@ -0,0 +1,193 @@
+#ifndef SPI_ADIS16350_H_
+#define SPI_ADIS16350_H_
+
+#define ADIS16350_STARTUP_DELAY 220 /* ms */
+
+#define ADIS16350_READ_REG(a) a
+#define ADIS16350_WRITE_REG(a) ((a) | 0x80)
+
+#define ADIS16350_FLASH_CNT 0x00 /* Flash memory write count */
+#define ADIS16350_SUPPLY_OUT 0x02 /* Power supply measurement */
+#define ADIS16350_XGYRO_OUT 0x04 /* X-axis gyroscope output */
+#define ADIS16350_YGYRO_OUT 0x06 /* Y-axis gyroscope output */
+#define ADIS16350_ZGYRO_OUT 0x08 /* Z-axis gyroscope output */
+#define ADIS16350_XACCL_OUT 0x0A /* X-axis accelerometer output */
+#define ADIS16350_YACCL_OUT 0x0C /* Y-axis accelerometer output */
+#define ADIS16350_ZACCL_OUT 0x0E /* Z-axis accelerometer output */
+#define ADIS16350_XTEMP_OUT 0x10 /* X-axis gyroscope temperature measurement */
+#define ADIS16350_YTEMP_OUT 0x12 /* Y-axis gyroscope temperature measurement */
+#define ADIS16350_ZTEMP_OUT 0x14 /* Z-axis gyroscope temperature measurement */
+#define ADIS16350_AUX_ADC 0x16 /* Auxiliary ADC measurement */
+
+/* Calibration parameters */
+#define ADIS16350_XGYRO_OFF 0x1A /* X-axis gyroscope bias offset factor */
+#define ADIS16350_YGYRO_OFF 0x1C /* Y-axis gyroscope bias offset factor */
+#define ADIS16350_ZGYRO_OFF 0x1E /* Z-axis gyroscope bias offset factor */
+#define ADIS16350_XACCL_OFF 0x20 /* X-axis acceleration bias offset factor */
+#define ADIS16350_YACCL_OFF 0x22 /* Y-axis acceleration bias offset factor */
+#define ADIS16350_ZACCL_OFF 0x24 /* Z-axis acceleration bias offset factor */
+
+#define ADIS16350_GPIO_CTRL 0x32 /* Auxiliary digital input/output control */
+#define ADIS16350_MSC_CTRL 0x34 /* Miscellaneous control */
+#define ADIS16350_SMPL_PRD 0x36 /* Internal sample period (rate) control */
+#define ADIS16350_SENS_AVG 0x38 /* Dynamic range and digital filter control */
+#define ADIS16350_SLP_CNT 0x3A /* Sleep mode control */
+#define ADIS16350_DIAG_STAT 0x3C /* System status */
+
+/* Alarm functions */
+#define ADIS16350_GLOB_CMD 0x3E /* System command */
+#define ADIS16350_ALM_MAG1 0x26 /* Alarm 1 amplitude threshold */
+#define ADIS16350_ALM_MAG2 0x28 /* Alarm 2 amplitude threshold */
+#define ADIS16350_ALM_SMPL1 0x2A /* Alarm 1 sample size */
+#define ADIS16350_ALM_SMPL2 0x2C /* Alarm 2 sample size */
+#define ADIS16350_ALM_CTRL 0x2E /* Alarm control */
+#define ADIS16350_AUX_DAC 0x30 /* Auxiliary DAC data */
+
+#define ADIS16350_ERROR_ACTIVE (1<<14)
+#define ADIS16350_NEW_DATA (1<<15)
+
+/* MSC_CTRL */
+#define ADIS16350_MSC_CTRL_MEM_TEST (1<<11)
+#define ADIS16350_MSC_CTRL_INT_SELF_TEST (1<<10)
+#define ADIS16350_MSC_CTRL_NEG_SELF_TEST (1<<9)
+#define ADIS16350_MSC_CTRL_POS_SELF_TEST (1<<8)
+#define ADIS16350_MSC_CTRL_GYRO_BIAS (1<<7)
+#define ADIS16350_MSC_CTRL_ACCL_ALIGN (1<<6)
+#define ADIS16350_MSC_CTRL_DATA_RDY_EN (1<<2)
+#define ADIS16350_MSC_CTRL_DATA_RDY_POL_HIGH (1<<1)
+#define ADIS16350_MSC_CTRL_DATA_RDY_DIO2 (1<<0)
+
+/* SMPL_PRD */
+#define ADIS16350_SMPL_PRD_TIME_BASE (1<<7)
+#define ADIS16350_SMPL_PRD_DIV_MASK 0x7F
+
+/* DIAG_STAT */
+#define ADIS16350_DIAG_STAT_ZACCL_FAIL (1<<15)
+#define ADIS16350_DIAG_STAT_YACCL_FAIL (1<<14)
+#define ADIS16350_DIAG_STAT_XACCL_FAIL (1<<13)
+#define ADIS16350_DIAG_STAT_XGYRO_FAIL (1<<12)
+#define ADIS16350_DIAG_STAT_YGYRO_FAIL (1<<11)
+#define ADIS16350_DIAG_STAT_ZGYRO_FAIL (1<<10)
+#define ADIS16350_DIAG_STAT_ALARM2 (1<<9)
+#define ADIS16350_DIAG_STAT_ALARM1 (1<<8)
+#define ADIS16350_DIAG_STAT_FLASH_CHK (1<<6)
+#define ADIS16350_DIAG_STAT_SELF_TEST (1<<5)
+#define ADIS16350_DIAG_STAT_OVERFLOW (1<<4)
+#define ADIS16350_DIAG_STAT_SPI_FAIL (1<<3)
+#define ADIS16350_DIAG_STAT_FLASH_UPT (1<<2)
+#define ADIS16350_DIAG_STAT_POWER_HIGH (1<<1)
+#define ADIS16350_DIAG_STAT_POWER_LOW (1<<0)
+
+/* GLOB_CMD */
+#define ADIS16350_GLOB_CMD_SW_RESET (1<<7)
+#define ADIS16350_GLOB_CMD_P_AUTO_NULL (1<<4)
+#define ADIS16350_GLOB_CMD_FLASH_UPD (1<<3)
+#define ADIS16350_GLOB_CMD_DAC_LATCH (1<<2)
+#define ADIS16350_GLOB_CMD_FAC_CALIB (1<<1)
+#define ADIS16350_GLOB_CMD_AUTO_NULL (1<<0)
+
+/* SLP_CNT */
+#define ADIS16350_SLP_CNT_POWER_OFF (1<<8)
+
+#define ADIS16350_MAX_TX 24
+#define ADIS16350_MAX_RX 24
+
+#define ADIS16350_SPI_SLOW (u32)(300 * 1000)
+#define ADIS16350_SPI_BURST (u32)(1000 * 1000)
+#define ADIS16350_SPI_FAST (u32)(2000 * 1000)
+
+/**
+ * struct adis16350_state - device instance specific data
+ * @us: actual spi_device
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @work_cont_thresh: CLEAN
+ * @inter: used to check if new interrupt has been triggered
+ * @last_timestamp: passing timestamp from th to bh of interrupt handler
+ * @indio_dev: industrial I/O device structure
+ * @trig: data ready trigger registered with iio
+ * @tx: transmit buffer
+ * @rx: recieve buffer
+ * @buf_lock: mutex to protect tx and rx
+ **/
+struct adis16350_state {
+ struct spi_device *us;
+ struct work_struct work_trigger_to_ring;
+ struct iio_work_cont work_cont_data_rdy;
+ s64 last_timestamp;
+ struct iio_dev *indio_dev;
+ struct iio_trigger *trig;
+ u8 *tx;
+ u8 *rx;
+ struct mutex buf_lock;
+};
+
+int adis16350_set_irq(struct device *dev, bool enable);
+
+#ifdef CONFIG_IIO_RING_BUFFER
+
+enum adis16350_scan {
+ ADIS16350_SCAN_SUPPLY,
+ ADIS16350_SCAN_GYRO_X,
+ ADIS16350_SCAN_GYRO_Y,
+ ADIS16350_SCAN_GYRO_Z,
+ ADIS16350_SCAN_ACC_X,
+ ADIS16350_SCAN_ACC_Y,
+ ADIS16350_SCAN_ACC_Z,
+ ADIS16350_SCAN_TEMP_X,
+ ADIS16350_SCAN_TEMP_Y,
+ ADIS16350_SCAN_TEMP_Z,
+ ADIS16350_SCAN_ADC_0
+};
+
+void adis16350_remove_trigger(struct iio_dev *indio_dev);
+int adis16350_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t adis16350_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+
+int adis16350_configure_ring(struct iio_dev *indio_dev);
+void adis16350_unconfigure_ring(struct iio_dev *indio_dev);
+
+int adis16350_initialize_ring(struct iio_ring_buffer *ring);
+void adis16350_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void adis16350_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16350_probe_trigger(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline ssize_t
+adis16350_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return 0;
+}
+
+static int adis16350_configure_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline void adis16350_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16350_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return 0;
+}
+
+static inline void adis16350_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+
+#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* SPI_ADIS16350_H_ */
diff --git a/drivers/staging/iio/imu/adis16350_core.c b/drivers/staging/iio/imu/adis16350_core.c
new file mode 100644
index 00000000000..0edde73ce5c
--- /dev/null
+++ b/drivers/staging/iio/imu/adis16350_core.c
@@ -0,0 +1,736 @@
+/*
+ * ADIS16350/54/55/60/62/64/65 high precision tri-axis inertial sensor
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../accel/accel.h"
+#include "../adc/adc.h"
+#include "../gyro/gyro.h"
+
+#include "adis16350.h"
+
+#define DRIVER_NAME "adis16350"
+
+static int adis16350_check_status(struct device *dev);
+
+/**
+ * adis16350_spi_write_reg_8() - write single byte to a register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the register to be written
+ * @val: the value to write
+ **/
+static int adis16350_spi_write_reg_8(struct device *dev,
+ u8 reg_address,
+ u8 val)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16350_state *st = iio_dev_get_devdata(indio_dev);
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16350_WRITE_REG(reg_address);
+ st->tx[1] = val;
+
+ ret = spi_write(st->us, st->tx, 2);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/**
+ * adis16350_spi_write_reg_16() - write 2 bytes to a pair of registers
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ * is assumed to have address one greater.
+ * @val: value to be written
+ **/
+static int adis16350_spi_write_reg_16(struct device *dev,
+ u8 lower_reg_address,
+ u16 value)
+{
+ int ret;
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16350_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = 25,
+ }, {
+ .tx_buf = st->tx + 2,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = 25,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16350_WRITE_REG(lower_reg_address);
+ st->tx[1] = value & 0xFF;
+ st->tx[2] = ADIS16350_WRITE_REG(lower_reg_address + 1);
+ st->tx[3] = (value >> 8) & 0xFF;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/**
+ * adis16350_spi_read_reg_16() - read 2 bytes from a 16-bit register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ * is assumed to have address one greater.
+ * @val: somewhere to pass back the value read
+ **/
+static int adis16350_spi_read_reg_16(struct device *dev,
+ u8 lower_reg_address,
+ u16 *val)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16350_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = 25,
+ }, {
+ .rx_buf = st->rx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ .delay_usecs = 25,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16350_READ_REG(lower_reg_address);
+ st->tx[1] = 0;
+ st->tx[2] = 0;
+ st->tx[3] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret) {
+ dev_err(&st->us->dev,
+ "problem when reading 16 bit register 0x%02X",
+ lower_reg_address);
+ goto error_ret;
+ }
+ *val = (st->rx[0] << 8) | st->rx[1];
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+
+static ssize_t adis16350_spi_read_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf,
+ unsigned bits)
+{
+ int ret;
+ s16 val = 0;
+ unsigned shift = 16 - bits;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = adis16350_spi_read_reg_16(dev, this_attr->address, (u16 *)&val);
+ if (ret)
+ return ret;
+
+ if (val & ADIS16350_ERROR_ACTIVE)
+ adis16350_check_status(dev);
+ val = ((s16)(val << shift) >> shift);
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t adis16350_read_12bit_unsigned(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u16 val = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = adis16350_spi_read_reg_16(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ if (val & ADIS16350_ERROR_ACTIVE)
+ adis16350_check_status(dev);
+
+ return sprintf(buf, "%u\n", val & 0x0FFF);
+}
+
+static ssize_t adis16350_read_14bit_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ /* Take the iio_dev status lock */
+ mutex_lock(&indio_dev->mlock);
+ ret = adis16350_spi_read_signed(dev, attr, buf, 14);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static ssize_t adis16350_read_12bit_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ /* Take the iio_dev status lock */
+ mutex_lock(&indio_dev->mlock);
+ ret = adis16350_spi_read_signed(dev, attr, buf, 12);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static ssize_t adis16350_write_16bit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+ ret = adis16350_spi_write_reg_16(dev, this_attr->address, val);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+static ssize_t adis16350_read_frequency(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret, len = 0;
+ u16 t;
+ int sps;
+ ret = adis16350_spi_read_reg_16(dev,
+ ADIS16350_SMPL_PRD,
+ &t);
+ if (ret)
+ return ret;
+ sps = (t & ADIS16350_SMPL_PRD_TIME_BASE) ? 53 : 1638;
+ sps /= (t & ADIS16350_SMPL_PRD_DIV_MASK) + 1;
+ len = sprintf(buf, "%d SPS\n", sps);
+ return len;
+}
+
+static ssize_t adis16350_write_frequency(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16350_state *st = iio_dev_get_devdata(indio_dev);
+ long val;
+ int ret;
+ u8 t;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ t = (1638 / val);
+ if (t > 0)
+ t--;
+ t &= ADIS16350_SMPL_PRD_DIV_MASK;
+ if ((t & ADIS16350_SMPL_PRD_DIV_MASK) >= 0x0A)
+ st->us->max_speed_hz = ADIS16350_SPI_SLOW;
+ else
+ st->us->max_speed_hz = ADIS16350_SPI_FAST;
+
+ ret = adis16350_spi_write_reg_8(dev,
+ ADIS16350_SMPL_PRD,
+ t);
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+static int adis16350_reset(struct device *dev)
+{
+ int ret;
+ ret = adis16350_spi_write_reg_8(dev,
+ ADIS16350_GLOB_CMD,
+ ADIS16350_GLOB_CMD_SW_RESET);
+ if (ret)
+ dev_err(dev, "problem resetting device");
+
+ return ret;
+}
+
+static ssize_t adis16350_write_reset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ if (len < 1)
+ return -1;
+ switch (buf[0]) {
+ case '1':
+ case 'y':
+ case 'Y':
+ return adis16350_reset(dev);
+ }
+ return -1;
+}
+
+int adis16350_set_irq(struct device *dev, bool enable)
+{
+ int ret;
+ u16 msc;
+ ret = adis16350_spi_read_reg_16(dev, ADIS16350_MSC_CTRL, &msc);
+ if (ret)
+ goto error_ret;
+
+ msc |= ADIS16350_MSC_CTRL_DATA_RDY_POL_HIGH;
+ msc &= ~ADIS16350_MSC_CTRL_DATA_RDY_DIO2;
+
+ if (enable)
+ msc |= ADIS16350_MSC_CTRL_DATA_RDY_EN;
+ else
+ msc &= ~ADIS16350_MSC_CTRL_DATA_RDY_EN;
+
+ ret = adis16350_spi_write_reg_16(dev, ADIS16350_MSC_CTRL, msc);
+ if (ret)
+ goto error_ret;
+
+error_ret:
+ return ret;
+}
+
+/* Power down the device */
+static int adis16350_stop_device(struct device *dev)
+{
+ int ret;
+ u16 val = ADIS16350_SLP_CNT_POWER_OFF;
+
+ ret = adis16350_spi_write_reg_16(dev, ADIS16350_SLP_CNT, val);
+ if (ret)
+ dev_err(dev, "problem with turning device off: SLP_CNT");
+
+ return ret;
+}
+
+static int adis16350_self_test(struct device *dev)
+{
+ int ret;
+ ret = adis16350_spi_write_reg_16(dev,
+ ADIS16350_MSC_CTRL,
+ ADIS16350_MSC_CTRL_MEM_TEST);
+ if (ret) {
+ dev_err(dev, "problem starting self test");
+ goto err_ret;
+ }
+
+ adis16350_check_status(dev);
+
+err_ret:
+ return ret;
+}
+
+static int adis16350_check_status(struct device *dev)
+{
+ u16 status;
+ int ret;
+
+ ret = adis16350_spi_read_reg_16(dev, ADIS16350_DIAG_STAT, &status);
+
+ if (ret < 0) {
+ dev_err(dev, "Reading status failed\n");
+ goto error_ret;
+ }
+ ret = status;
+ if (status & ADIS16350_DIAG_STAT_ZACCL_FAIL)
+ dev_err(dev, "Z-axis accelerometer self-test failure\n");
+ if (status & ADIS16350_DIAG_STAT_YACCL_FAIL)
+ dev_err(dev, "Y-axis accelerometer self-test failure\n");
+ if (status & ADIS16350_DIAG_STAT_XACCL_FAIL)
+ dev_err(dev, "X-axis accelerometer self-test failure\n");
+ if (status & ADIS16350_DIAG_STAT_XGYRO_FAIL)
+ dev_err(dev, "X-axis gyroscope self-test failure\n");
+ if (status & ADIS16350_DIAG_STAT_YGYRO_FAIL)
+ dev_err(dev, "Y-axis gyroscope self-test failure\n");
+ if (status & ADIS16350_DIAG_STAT_ZGYRO_FAIL)
+ dev_err(dev, "Z-axis gyroscope self-test failure\n");
+ if (status & ADIS16350_DIAG_STAT_ALARM2)
+ dev_err(dev, "Alarm 2 active\n");
+ if (status & ADIS16350_DIAG_STAT_ALARM1)
+ dev_err(dev, "Alarm 1 active\n");
+ if (status & ADIS16350_DIAG_STAT_FLASH_CHK)
+ dev_err(dev, "Flash checksum error\n");
+ if (status & ADIS16350_DIAG_STAT_SELF_TEST)
+ dev_err(dev, "Self test error\n");
+ if (status & ADIS16350_DIAG_STAT_OVERFLOW)
+ dev_err(dev, "Sensor overrange\n");
+ if (status & ADIS16350_DIAG_STAT_SPI_FAIL)
+ dev_err(dev, "SPI failure\n");
+ if (status & ADIS16350_DIAG_STAT_FLASH_UPT)
+ dev_err(dev, "Flash update failed\n");
+ if (status & ADIS16350_DIAG_STAT_POWER_HIGH)
+ dev_err(dev, "Power supply above 5.25V\n");
+ if (status & ADIS16350_DIAG_STAT_POWER_LOW)
+ dev_err(dev, "Power supply below 4.75V\n");
+
+error_ret:
+ return ret;
+}
+
+static int adis16350_initial_setup(struct adis16350_state *st)
+{
+ int ret;
+ u16 smp_prd;
+ struct device *dev = &st->indio_dev->dev;
+
+ /* use low spi speed for init */
+ st->us->max_speed_hz = ADIS16350_SPI_SLOW;
+ st->us->mode = SPI_MODE_3;
+ spi_setup(st->us);
+
+ /* Disable IRQ */
+ ret = adis16350_set_irq(dev, false);
+ if (ret) {
+ dev_err(dev, "disable irq failed");
+ goto err_ret;
+ }
+
+ /* Do self test */
+ ret = adis16350_self_test(dev);
+ if (ret) {
+ dev_err(dev, "self test failure");
+ goto err_ret;
+ }
+
+ /* Read status register to check the result */
+ ret = adis16350_check_status(dev);
+ if (ret) {
+ adis16350_reset(dev);
+ dev_err(dev, "device not playing ball -> reset");
+ msleep(ADIS16350_STARTUP_DELAY);
+ ret = adis16350_check_status(dev);
+ if (ret) {
+ dev_err(dev, "giving up");
+ goto err_ret;
+ }
+ }
+
+ printk(KERN_INFO DRIVER_NAME ": at CS%d (irq %d)\n",
+ st->us->chip_select, st->us->irq);
+
+ /* use high spi speed if possible */
+ ret = adis16350_spi_read_reg_16(dev, ADIS16350_SMPL_PRD, &smp_prd);
+ if (!ret && (smp_prd & ADIS16350_SMPL_PRD_DIV_MASK) < 0x0A) {
+ st->us->max_speed_hz = ADIS16350_SPI_SLOW;
+ spi_setup(st->us);
+ }
+
+err_ret:
+ return ret;
+}
+
+static IIO_DEV_ATTR_ACCEL_X_OFFSET(S_IWUSR | S_IRUGO,
+ adis16350_read_12bit_signed,
+ adis16350_write_16bit,
+ ADIS16350_XACCL_OFF);
+
+static IIO_DEV_ATTR_ACCEL_Y_OFFSET(S_IWUSR | S_IRUGO,
+ adis16350_read_12bit_signed,
+ adis16350_write_16bit,
+ ADIS16350_YACCL_OFF);
+
+static IIO_DEV_ATTR_ACCEL_Z_OFFSET(S_IWUSR | S_IRUGO,
+ adis16350_read_12bit_signed,
+ adis16350_write_16bit,
+ ADIS16350_ZACCL_OFF);
+
+static IIO_DEV_ATTR_IN_NAMED_RAW(supply, adis16350_read_12bit_unsigned,
+ ADIS16350_SUPPLY_OUT);
+static IIO_CONST_ATTR(in_supply_scale, "0.002418");
+
+static IIO_DEV_ATTR_GYRO_X(adis16350_read_14bit_signed,
+ ADIS16350_XGYRO_OUT);
+static IIO_DEV_ATTR_GYRO_Y(adis16350_read_14bit_signed,
+ ADIS16350_YGYRO_OUT);
+static IIO_DEV_ATTR_GYRO_Z(adis16350_read_14bit_signed,
+ ADIS16350_ZGYRO_OUT);
+static IIO_CONST_ATTR(gyro_scale, "0.05");
+
+static IIO_DEV_ATTR_ACCEL_X(adis16350_read_14bit_signed,
+ ADIS16350_XACCL_OUT);
+static IIO_DEV_ATTR_ACCEL_Y(adis16350_read_14bit_signed,
+ ADIS16350_YACCL_OUT);
+static IIO_DEV_ATTR_ACCEL_Z(adis16350_read_14bit_signed,
+ ADIS16350_ZACCL_OUT);
+static IIO_CONST_ATTR(accel_scale, "0.00333");
+
+static IIO_DEVICE_ATTR(temp_x_raw, S_IRUGO, adis16350_read_12bit_signed,
+ NULL, ADIS16350_XTEMP_OUT);
+static IIO_DEVICE_ATTR(temp_y_raw, S_IRUGO, adis16350_read_12bit_signed,
+ NULL, ADIS16350_YTEMP_OUT);
+static IIO_DEVICE_ATTR(temp_z_raw, S_IRUGO, adis16350_read_12bit_signed,
+ NULL, ADIS16350_ZTEMP_OUT);
+static IIO_CONST_ATTR(temp_scale, "0.0005");
+
+static IIO_DEV_ATTR_IN_RAW(0, adis16350_read_12bit_unsigned,
+ ADIS16350_AUX_ADC);
+static IIO_CONST_ATTR(in0_scale, "0.000806");
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+ adis16350_read_frequency,
+ adis16350_write_frequency);
+
+static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL,
+ adis16350_write_reset, 0);
+
+static IIO_CONST_ATTR_AVAIL_SAMP_FREQ("409 546 819 1638");
+
+static IIO_CONST_ATTR(name, "adis16350");
+
+static struct attribute *adis16350_attributes[] = {
+ &iio_dev_attr_accel_x_offset.dev_attr.attr,
+ &iio_dev_attr_accel_y_offset.dev_attr.attr,
+ &iio_dev_attr_accel_z_offset.dev_attr.attr,
+ &iio_dev_attr_in_supply_raw.dev_attr.attr,
+ &iio_const_attr_in_supply_scale.dev_attr.attr,
+ &iio_dev_attr_gyro_x_raw.dev_attr.attr,
+ &iio_dev_attr_gyro_y_raw.dev_attr.attr,
+ &iio_dev_attr_gyro_z_raw.dev_attr.attr,
+ &iio_const_attr_gyro_scale.dev_attr.attr,
+ &iio_dev_attr_accel_x_raw.dev_attr.attr,
+ &iio_dev_attr_accel_y_raw.dev_attr.attr,
+ &iio_dev_attr_accel_z_raw.dev_attr.attr,
+ &iio_const_attr_accel_scale.dev_attr.attr,
+ &iio_dev_attr_temp_x_raw.dev_attr.attr,
+ &iio_dev_attr_temp_y_raw.dev_attr.attr,
+ &iio_dev_attr_temp_z_raw.dev_attr.attr,
+ &iio_const_attr_temp_scale.dev_attr.attr,
+ &iio_dev_attr_in0_raw.dev_attr.attr,
+ &iio_const_attr_in0_scale.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_const_attr_available_sampling_frequency.dev_attr.attr,
+ &iio_dev_attr_reset.dev_attr.attr,
+ &iio_const_attr_name.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group adis16350_attribute_group = {
+ .attrs = adis16350_attributes,
+};
+
+static struct attribute *adis16350_event_attributes[] = {
+ NULL,
+};
+
+static struct attribute_group adis16350_event_attribute_group = {
+ .attrs = adis16350_event_attributes,
+};
+
+static int __devinit adis16350_probe(struct spi_device *spi)
+{
+ int ret, regdone = 0;
+ struct adis16350_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+ if (!st) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, st);
+
+ /* Allocate the comms buffers */
+ st->rx = kzalloc(sizeof(*st->rx)*ADIS16350_MAX_RX, GFP_KERNEL);
+ if (st->rx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->tx = kzalloc(sizeof(*st->tx)*ADIS16350_MAX_TX, GFP_KERNEL);
+ if (st->tx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_rx;
+ }
+ st->us = spi;
+ mutex_init(&st->buf_lock);
+ /* setup the industrialio driver allocated elements */
+ st->indio_dev = iio_allocate_device();
+ if (st->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_tx;
+ }
+
+ st->indio_dev->dev.parent = &spi->dev;
+ st->indio_dev->num_interrupt_lines = 1;
+ st->indio_dev->event_attrs = &adis16350_event_attribute_group;
+ st->indio_dev->attrs = &adis16350_attribute_group;
+ st->indio_dev->dev_data = (void *)(st);
+ st->indio_dev->driver_module = THIS_MODULE;
+ st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = adis16350_configure_ring(st->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ ret = iio_device_register(st->indio_dev);
+ if (ret)
+ goto error_unreg_ring_funcs;
+ regdone = 1;
+
+ ret = adis16350_initialize_ring(st->indio_dev->ring);
+ if (ret) {
+ printk(KERN_ERR "failed to initialize the ring\n");
+ goto error_unreg_ring_funcs;
+ }
+
+ if (spi->irq) {
+ ret = iio_register_interrupt_line(spi->irq,
+ st->indio_dev,
+ 0,
+ IRQF_TRIGGER_RISING,
+ "adis16350");
+ if (ret)
+ goto error_uninitialize_ring;
+
+ ret = adis16350_probe_trigger(st->indio_dev);
+ if (ret)
+ goto error_unregister_line;
+ }
+
+ /* Get the device into a sane initial state */
+ ret = adis16350_initial_setup(st);
+ if (ret)
+ goto error_remove_trigger;
+ return 0;
+
+error_remove_trigger:
+ adis16350_remove_trigger(st->indio_dev);
+error_unregister_line:
+ if (spi->irq)
+ iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+ adis16350_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+ adis16350_unconfigure_ring(st->indio_dev);
+error_free_dev:
+ if (regdone)
+ iio_device_unregister(st->indio_dev);
+ else
+ iio_free_device(st->indio_dev);
+error_free_tx:
+ kfree(st->tx);
+error_free_rx:
+ kfree(st->rx);
+error_free_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+static int adis16350_remove(struct spi_device *spi)
+{
+ int ret;
+ struct adis16350_state *st = spi_get_drvdata(spi);
+ struct iio_dev *indio_dev = st->indio_dev;
+
+ ret = adis16350_stop_device(&(indio_dev->dev));
+ if (ret)
+ goto err_ret;
+
+ flush_scheduled_work();
+
+ adis16350_remove_trigger(indio_dev);
+ if (spi->irq)
+ iio_unregister_interrupt_line(indio_dev, 0);
+
+ adis16350_uninitialize_ring(indio_dev->ring);
+ iio_device_unregister(indio_dev);
+ adis16350_unconfigure_ring(indio_dev);
+ kfree(st->tx);
+ kfree(st->rx);
+ kfree(st);
+
+ return 0;
+
+err_ret:
+ return ret;
+}
+
+static const struct spi_device_id adis16350_id[] = {
+ {"adis16350", 0},
+ {"adis16354", 0},
+ {"adis16355", 0},
+ {"adis16360", 0},
+ {"adis16362", 0},
+ {"adis16364", 0},
+ {"adis16365", 0},
+ {}
+};
+
+static struct spi_driver adis16350_driver = {
+ .driver = {
+ .name = "adis16350",
+ .owner = THIS_MODULE,
+ },
+ .probe = adis16350_probe,
+ .remove = __devexit_p(adis16350_remove),
+ .id_table = adis16350_id,
+};
+
+static __init int adis16350_init(void)
+{
+ return spi_register_driver(&adis16350_driver);
+}
+module_init(adis16350_init);
+
+static __exit void adis16350_exit(void)
+{
+ spi_unregister_driver(&adis16350_driver);
+}
+module_exit(adis16350_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16350/54/55/60/62/64/65 IMU SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/imu/adis16350_ring.c b/drivers/staging/iio/imu/adis16350_ring.c
new file mode 100644
index 00000000000..5e9716ea7c7
--- /dev/null
+++ b/drivers/staging/iio/imu/adis16350_ring.c
@@ -0,0 +1,286 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../ring_sw.h"
+#include "../accel/accel.h"
+#include "../trigger.h"
+#include "adis16350.h"
+
+/**
+ * combine_8_to_16() utility function to munge to u8s into u16
+ **/
+static inline u16 combine_8_to_16(u8 lower, u8 upper)
+{
+ u16 _lower = lower;
+ u16 _upper = upper;
+ return _lower | (_upper << 8);
+}
+
+static IIO_SCAN_EL_C(supply, ADIS16350_SCAN_SUPPLY, IIO_UNSIGNED(12),
+ ADIS16350_SUPPLY_OUT, NULL);
+
+static IIO_SCAN_EL_C(gyro_x, ADIS16350_SCAN_GYRO_X, IIO_SIGNED(14),
+ ADIS16350_XGYRO_OUT, NULL);
+static IIO_SCAN_EL_C(gyro_y, ADIS16350_SCAN_GYRO_Y, IIO_SIGNED(14),
+ ADIS16350_YGYRO_OUT, NULL);
+static IIO_SCAN_EL_C(gyro_z, ADIS16350_SCAN_GYRO_Z, IIO_SIGNED(14),
+ ADIS16350_ZGYRO_OUT, NULL);
+
+static IIO_SCAN_EL_C(accel_x, ADIS16350_SCAN_ACC_X, IIO_SIGNED(14),
+ ADIS16350_XACCL_OUT, NULL);
+static IIO_SCAN_EL_C(accel_y, ADIS16350_SCAN_ACC_Y, IIO_SIGNED(14),
+ ADIS16350_YACCL_OUT, NULL);
+static IIO_SCAN_EL_C(accel_z, ADIS16350_SCAN_ACC_Z, IIO_SIGNED(14),
+ ADIS16350_ZACCL_OUT, NULL);
+
+static IIO_SCAN_EL_C(temp_x, ADIS16350_SCAN_TEMP_X, IIO_SIGNED(12),
+ ADIS16350_XTEMP_OUT, NULL);
+static IIO_SCAN_EL_C(temp_y, ADIS16350_SCAN_TEMP_Y, IIO_SIGNED(12),
+ ADIS16350_YTEMP_OUT, NULL);
+static IIO_SCAN_EL_C(temp_z, ADIS16350_SCAN_TEMP_Z, IIO_SIGNED(12),
+ ADIS16350_ZTEMP_OUT, NULL);
+
+static IIO_SCAN_EL_C(adc_0, ADIS16350_SCAN_ADC_0, IIO_UNSIGNED(12),
+ ADIS16350_AUX_ADC, NULL);
+
+static IIO_SCAN_EL_TIMESTAMP(11);
+
+static struct attribute *adis16350_scan_el_attrs[] = {
+ &iio_scan_el_supply.dev_attr.attr,
+ &iio_scan_el_gyro_x.dev_attr.attr,
+ &iio_scan_el_gyro_y.dev_attr.attr,
+ &iio_scan_el_gyro_z.dev_attr.attr,
+ &iio_scan_el_accel_x.dev_attr.attr,
+ &iio_scan_el_accel_y.dev_attr.attr,
+ &iio_scan_el_accel_z.dev_attr.attr,
+ &iio_scan_el_temp_x.dev_attr.attr,
+ &iio_scan_el_temp_y.dev_attr.attr,
+ &iio_scan_el_temp_z.dev_attr.attr,
+ &iio_scan_el_adc_0.dev_attr.attr,
+ &iio_scan_el_timestamp.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group adis16350_scan_el_group = {
+ .attrs = adis16350_scan_el_attrs,
+ .name = "scan_elements",
+};
+
+/**
+ * adis16350_poll_func_th() top half interrupt handler called by trigger
+ * @private_data: iio_dev
+ **/
+static void adis16350_poll_func_th(struct iio_dev *indio_dev)
+{
+ struct adis16350_state *st = iio_dev_get_devdata(indio_dev);
+ st->last_timestamp = indio_dev->trig->timestamp;
+ schedule_work(&st->work_trigger_to_ring);
+}
+
+/**
+ * adis16350_spi_read_burst() - read all data registers
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @rx: somewhere to pass back the value read (min size is 24 bytes)
+ **/
+static int adis16350_spi_read_burst(struct device *dev, u8 *rx)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16350_state *st = iio_dev_get_devdata(indio_dev);
+ u32 old_speed_hz = st->us->max_speed_hz;
+ int ret;
+
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 0,
+ }, {
+ .rx_buf = rx,
+ .bits_per_word = 8,
+ .len = 22,
+ .cs_change = 0,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16350_READ_REG(ADIS16350_GLOB_CMD);
+ st->tx[1] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+
+ st->us->max_speed_hz = ADIS16350_SPI_BURST;
+ spi_setup(st->us);
+
+ ret = spi_sync(st->us, &msg);
+ if (ret)
+ dev_err(&st->us->dev, "problem when burst reading");
+
+ st->us->max_speed_hz = old_speed_hz;
+ spi_setup(st->us);
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+/* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
+ * specific to be rolled into the core.
+ */
+static void adis16350_trigger_bh_to_ring(struct work_struct *work_s)
+{
+ struct adis16350_state *st
+ = container_of(work_s, struct adis16350_state,
+ work_trigger_to_ring);
+
+ int i = 0;
+ s16 *data;
+ size_t datasize = st->indio_dev
+ ->ring->access.get_bpd(st->indio_dev->ring);
+
+ data = kmalloc(datasize , GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(&st->us->dev, "memory alloc failed in ring bh");
+ return;
+ }
+
+ if (st->indio_dev->scan_count)
+ if (adis16350_spi_read_burst(&st->indio_dev->dev, st->rx) >= 0)
+ for (; i < st->indio_dev->scan_count; i++) {
+ data[i] = combine_8_to_16(st->rx[i*2+1],
+ st->rx[i*2]);
+ }
+
+ /* Guaranteed to be aligned with 8 byte boundary */
+ if (st->indio_dev->scan_timestamp)
+ *((s64 *)(data + ((i + 3)/4)*4)) = st->last_timestamp;
+
+ st->indio_dev->ring->access.store_to(st->indio_dev->ring,
+ (u8 *)data,
+ st->last_timestamp);
+
+ iio_trigger_notify_done(st->indio_dev->trig);
+ kfree(data);
+
+ return;
+}
+
+static int adis16350_data_rdy_ring_preenable(struct iio_dev *indio_dev)
+{
+ size_t size;
+ dev_dbg(&indio_dev->dev, "%s\n", __func__);
+ /* Check if there are any scan elements enabled, if not fail*/
+ if (!(indio_dev->scan_count || indio_dev->scan_timestamp))
+ return -EINVAL;
+
+ if (indio_dev->ring->access.set_bpd) {
+ if (indio_dev->scan_timestamp)
+ if (indio_dev->scan_count)
+ /* Timestamp (aligned sizeof(s64) and data */
+ size = (((indio_dev->scan_count * sizeof(s16))
+ + sizeof(s64) - 1)
+ & ~(sizeof(s64) - 1))
+ + sizeof(s64);
+ else /* Timestamp only */
+ size = sizeof(s64);
+ else /* Data only */
+ size = indio_dev->scan_count*sizeof(s16);
+ indio_dev->ring->access.set_bpd(indio_dev->ring, size);
+ }
+
+ return 0;
+}
+
+static int adis16350_data_rdy_ring_postenable(struct iio_dev *indio_dev)
+{
+ return indio_dev->trig
+ ? iio_trigger_attach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc)
+ : 0;
+}
+
+static int adis16350_data_rdy_ring_predisable(struct iio_dev *indio_dev)
+{
+ return indio_dev->trig
+ ? iio_trigger_dettach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc)
+ : 0;
+}
+
+void adis16350_unconfigure_ring(struct iio_dev *indio_dev)
+{
+ kfree(indio_dev->pollfunc);
+ iio_sw_rb_free(indio_dev->ring);
+}
+
+int adis16350_configure_ring(struct iio_dev *indio_dev)
+{
+ int ret = 0;
+ struct adis16350_state *st = indio_dev->dev_data;
+ struct iio_ring_buffer *ring;
+ INIT_WORK(&st->work_trigger_to_ring, adis16350_trigger_bh_to_ring);
+ /* Set default scan mode */
+
+ iio_scan_mask_set(indio_dev, iio_scan_el_supply.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_gyro_x.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_gyro_y.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_gyro_z.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_accel_x.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_accel_y.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_accel_z.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_temp_x.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_temp_y.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_temp_z.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_adc_0.number);
+ indio_dev->scan_timestamp = true;
+
+ indio_dev->scan_el_attrs = &adis16350_scan_el_group;
+
+ ring = iio_sw_rb_allocate(indio_dev);
+ if (!ring) {
+ ret = -ENOMEM;
+ return ret;
+ }
+ indio_dev->ring = ring;
+ /* Effectively select the ring buffer implementation */
+ iio_ring_sw_register_funcs(&ring->access);
+ ring->preenable = &adis16350_data_rdy_ring_preenable;
+ ring->postenable = &adis16350_data_rdy_ring_postenable;
+ ring->predisable = &adis16350_data_rdy_ring_predisable;
+ ring->owner = THIS_MODULE;
+
+ indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL);
+ if (indio_dev->pollfunc == NULL) {
+ ret = -ENOMEM;
+ goto error_iio_sw_rb_free;;
+ }
+ indio_dev->pollfunc->poll_func_main = &adis16350_poll_func_th;
+ indio_dev->pollfunc->private_data = indio_dev;
+ indio_dev->modes |= INDIO_RING_TRIGGERED;
+ return 0;
+
+error_iio_sw_rb_free:
+ iio_sw_rb_free(indio_dev->ring);
+ return ret;
+}
+
+int adis16350_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return iio_ring_buffer_register(ring, 0);
+}
+
+void adis16350_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+ iio_ring_buffer_unregister(ring);
+}
diff --git a/drivers/staging/iio/imu/adis16350_trigger.c b/drivers/staging/iio/imu/adis16350_trigger.c
new file mode 100644
index 00000000000..1ffa75d05fa
--- /dev/null
+++ b/drivers/staging/iio/imu/adis16350_trigger.c
@@ -0,0 +1,127 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../trigger.h"
+#include "adis16350.h"
+
+/**
+ * adis16350_data_rdy_trig_poll() the event handler for the data rdy trig
+ **/
+static int adis16350_data_rdy_trig_poll(struct iio_dev *dev_info,
+ int index,
+ s64 timestamp,
+ int no_test)
+{
+ struct adis16350_state *st = iio_dev_get_devdata(dev_info);
+ struct iio_trigger *trig = st->trig;
+
+ trig->timestamp = timestamp;
+ iio_trigger_poll(trig);
+
+ return IRQ_HANDLED;
+}
+
+IIO_EVENT_SH(data_rdy_trig, &adis16350_data_rdy_trig_poll);
+
+static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
+
+static struct attribute *adis16350_trigger_attrs[] = {
+ &dev_attr_name.attr,
+ NULL,
+};
+
+static const struct attribute_group adis16350_trigger_attr_group = {
+ .attrs = adis16350_trigger_attrs,
+};
+
+/**
+ * adis16350_data_rdy_trigger_set_state() set datardy interrupt state
+ **/
+static int adis16350_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct adis16350_state *st = trig->private_data;
+ struct iio_dev *indio_dev = st->indio_dev;
+ int ret = 0;
+
+ dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state);
+ ret = adis16350_set_irq(&st->indio_dev->dev, state);
+ if (state == false) {
+ iio_remove_event_from_list(&iio_event_data_rdy_trig,
+ &indio_dev->interrupts[0]
+ ->ev_list);
+ /* possible quirk with handler currently worked around
+ by ensuring the work queue is empty */
+ flush_scheduled_work();
+ } else {
+ iio_add_event_to_list(&iio_event_data_rdy_trig,
+ &indio_dev->interrupts[0]->ev_list);
+ }
+ return ret;
+}
+
+/**
+ * adis16350_trig_try_reen() try renabling irq for data rdy trigger
+ * @trig: the datardy trigger
+ **/
+static int adis16350_trig_try_reen(struct iio_trigger *trig)
+{
+ struct adis16350_state *st = trig->private_data;
+ enable_irq(st->us->irq);
+ /* irq reenabled so success! */
+ return 0;
+}
+
+int adis16350_probe_trigger(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct adis16350_state *st = indio_dev->dev_data;
+
+ st->trig = iio_allocate_trigger();
+ st->trig->name = kmalloc(IIO_TRIGGER_NAME_LENGTH, GFP_KERNEL);
+ if (!st->trig->name) {
+ ret = -ENOMEM;
+ goto error_free_trig;
+ }
+ snprintf((char *)st->trig->name,
+ IIO_TRIGGER_NAME_LENGTH,
+ "adis16350-dev%d", indio_dev->id);
+ st->trig->dev.parent = &st->us->dev;
+ st->trig->owner = THIS_MODULE;
+ st->trig->private_data = st;
+ st->trig->set_trigger_state = &adis16350_data_rdy_trigger_set_state;
+ st->trig->try_reenable = &adis16350_trig_try_reen;
+ st->trig->control_attrs = &adis16350_trigger_attr_group;
+ ret = iio_trigger_register(st->trig);
+
+ /* select default trigger */
+ indio_dev->trig = st->trig;
+ if (ret)
+ goto error_free_trig_name;
+
+ return 0;
+
+error_free_trig_name:
+ kfree(st->trig->name);
+error_free_trig:
+ iio_free_trigger(st->trig);
+
+ return ret;
+}
+
+void adis16350_remove_trigger(struct iio_dev *indio_dev)
+{
+ struct adis16350_state *state = indio_dev->dev_data;
+
+ iio_trigger_unregister(state->trig);
+ kfree(state->trig->name);
+ iio_free_trigger(state->trig);
+}
diff --git a/drivers/staging/iio/imu/adis16400.h b/drivers/staging/iio/imu/adis16400.h
new file mode 100644
index 00000000000..5a69a7ab91c
--- /dev/null
+++ b/drivers/staging/iio/imu/adis16400.h
@@ -0,0 +1,229 @@
+/*
+ * adis16400.h support Analog Devices ADIS16400
+ * 3d 18g accelerometers,
+ * 3d gyroscopes,
+ * 3d 2.5gauss magnetometers via SPI
+ *
+ * Copyright (c) 2009 Manuel Stahl <manuel.stahl@iis.fraunhofer.de>
+ * Copyright (c) 2007 Jonathan Cameron <jic23@cam.ac.uk>
+ *
+ * Loosely based upon lis3l02dq.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SPI_ADIS16400_H_
+#define SPI_ADIS16400_H_
+
+#define ADIS16400_STARTUP_DELAY 220 /* ms */
+
+#define ADIS16400_READ_REG(a) a
+#define ADIS16400_WRITE_REG(a) ((a) | 0x80)
+
+#define ADIS16400_FLASH_CNT 0x00 /* Flash memory write count */
+#define ADIS16400_SUPPLY_OUT 0x02 /* Power supply measurement */
+#define ADIS16400_XGYRO_OUT 0x04 /* X-axis gyroscope output */
+#define ADIS16400_YGYRO_OUT 0x06 /* Y-axis gyroscope output */
+#define ADIS16400_ZGYRO_OUT 0x08 /* Z-axis gyroscope output */
+#define ADIS16400_XACCL_OUT 0x0A /* X-axis accelerometer output */
+#define ADIS16400_YACCL_OUT 0x0C /* Y-axis accelerometer output */
+#define ADIS16400_ZACCL_OUT 0x0E /* Z-axis accelerometer output */
+#define ADIS16400_XMAGN_OUT 0x10 /* X-axis magnetometer measurement */
+#define ADIS16400_YMAGN_OUT 0x12 /* Y-axis magnetometer measurement */
+#define ADIS16400_ZMAGN_OUT 0x14 /* Z-axis magnetometer measurement */
+#define ADIS16400_TEMP_OUT 0x16 /* Temperature output */
+#define ADIS16400_AUX_ADC 0x18 /* Auxiliary ADC measurement */
+
+/* Calibration parameters */
+#define ADIS16400_XGYRO_OFF 0x1A /* X-axis gyroscope bias offset factor */
+#define ADIS16400_YGYRO_OFF 0x1C /* Y-axis gyroscope bias offset factor */
+#define ADIS16400_ZGYRO_OFF 0x1E /* Z-axis gyroscope bias offset factor */
+#define ADIS16400_XACCL_OFF 0x20 /* X-axis acceleration bias offset factor */
+#define ADIS16400_YACCL_OFF 0x22 /* Y-axis acceleration bias offset factor */
+#define ADIS16400_ZACCL_OFF 0x24 /* Z-axis acceleration bias offset factor */
+#define ADIS16400_XMAGN_HIF 0x26 /* X-axis magnetometer, hard-iron factor */
+#define ADIS16400_YMAGN_HIF 0x28 /* Y-axis magnetometer, hard-iron factor */
+#define ADIS16400_ZMAGN_HIF 0x2A /* Z-axis magnetometer, hard-iron factor */
+#define ADIS16400_XMAGN_SIF 0x2C /* X-axis magnetometer, soft-iron factor */
+#define ADIS16400_YMAGN_SIF 0x2E /* Y-axis magnetometer, soft-iron factor */
+#define ADIS16400_ZMAGN_SIF 0x30 /* Z-axis magnetometer, soft-iron factor */
+
+#define ADIS16400_GPIO_CTRL 0x32 /* Auxiliary digital input/output control */
+#define ADIS16400_MSC_CTRL 0x34 /* Miscellaneous control */
+#define ADIS16400_SMPL_PRD 0x36 /* Internal sample period (rate) control */
+#define ADIS16400_SENS_AVG 0x38 /* Dynamic range and digital filter control */
+#define ADIS16400_SLP_CNT 0x3A /* Sleep mode control */
+#define ADIS16400_DIAG_STAT 0x3C /* System status */
+
+/* Alarm functions */
+#define ADIS16400_GLOB_CMD 0x3E /* System command */
+#define ADIS16400_ALM_MAG1 0x40 /* Alarm 1 amplitude threshold */
+#define ADIS16400_ALM_MAG2 0x42 /* Alarm 2 amplitude threshold */
+#define ADIS16400_ALM_SMPL1 0x44 /* Alarm 1 sample size */
+#define ADIS16400_ALM_SMPL2 0x46 /* Alarm 2 sample size */
+#define ADIS16400_ALM_CTRL 0x48 /* Alarm control */
+#define ADIS16400_AUX_DAC 0x4A /* Auxiliary DAC data */
+
+#define ADIS16400_PRODUCT_ID 0x56 /* Product identifier */
+#define ADIS16400_PRODUCT_ID_DEFAULT 0x4015 /* Datasheet says 0x4105, I get 0x4015 */
+
+#define ADIS16400_ERROR_ACTIVE (1<<14)
+#define ADIS16400_NEW_DATA (1<<14)
+
+/* MSC_CTRL */
+#define ADIS16400_MSC_CTRL_MEM_TEST (1<<11)
+#define ADIS16400_MSC_CTRL_INT_SELF_TEST (1<<10)
+#define ADIS16400_MSC_CTRL_NEG_SELF_TEST (1<<9)
+#define ADIS16400_MSC_CTRL_POS_SELF_TEST (1<<8)
+#define ADIS16400_MSC_CTRL_GYRO_BIAS (1<<7)
+#define ADIS16400_MSC_CTRL_ACCL_ALIGN (1<<6)
+#define ADIS16400_MSC_CTRL_DATA_RDY_EN (1<<2)
+#define ADIS16400_MSC_CTRL_DATA_RDY_POL_HIGH (1<<1)
+#define ADIS16400_MSC_CTRL_DATA_RDY_DIO2 (1<<0)
+
+/* SMPL_PRD */
+#define ADIS16400_SMPL_PRD_TIME_BASE (1<<7)
+#define ADIS16400_SMPL_PRD_DIV_MASK 0x7F
+
+/* DIAG_STAT */
+#define ADIS16400_DIAG_STAT_ZACCL_FAIL (1<<15)
+#define ADIS16400_DIAG_STAT_YACCL_FAIL (1<<14)
+#define ADIS16400_DIAG_STAT_XACCL_FAIL (1<<13)
+#define ADIS16400_DIAG_STAT_XGYRO_FAIL (1<<12)
+#define ADIS16400_DIAG_STAT_YGYRO_FAIL (1<<11)
+#define ADIS16400_DIAG_STAT_ZGYRO_FAIL (1<<10)
+#define ADIS16400_DIAG_STAT_ALARM2 (1<<9)
+#define ADIS16400_DIAG_STAT_ALARM1 (1<<8)
+#define ADIS16400_DIAG_STAT_FLASH_CHK (1<<6)
+#define ADIS16400_DIAG_STAT_SELF_TEST (1<<5)
+#define ADIS16400_DIAG_STAT_OVERFLOW (1<<4)
+#define ADIS16400_DIAG_STAT_SPI_FAIL (1<<3)
+#define ADIS16400_DIAG_STAT_FLASH_UPT (1<<2)
+#define ADIS16400_DIAG_STAT_POWER_HIGH (1<<1)
+#define ADIS16400_DIAG_STAT_POWER_LOW (1<<0)
+
+/* GLOB_CMD */
+#define ADIS16400_GLOB_CMD_SW_RESET (1<<7)
+#define ADIS16400_GLOB_CMD_P_AUTO_NULL (1<<4)
+#define ADIS16400_GLOB_CMD_FLASH_UPD (1<<3)
+#define ADIS16400_GLOB_CMD_DAC_LATCH (1<<2)
+#define ADIS16400_GLOB_CMD_FAC_CALIB (1<<1)
+#define ADIS16400_GLOB_CMD_AUTO_NULL (1<<0)
+
+/* SLP_CNT */
+#define ADIS16400_SLP_CNT_POWER_OFF (1<<8)
+
+#define ADIS16400_MAX_TX 24
+#define ADIS16400_MAX_RX 24
+
+#define ADIS16400_SPI_SLOW (u32)(300 * 1000)
+#define ADIS16400_SPI_BURST (u32)(1000 * 1000)
+#define ADIS16400_SPI_FAST (u32)(2000 * 1000)
+
+/**
+ * struct adis16400_state - device instance specific data
+ * @us: actual spi_device
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @work_cont_thresh: CLEAN
+ * @inter: used to check if new interrupt has been triggered
+ * @last_timestamp: passing timestamp from th to bh of interrupt handler
+ * @indio_dev: industrial I/O device structure
+ * @trig: data ready trigger registered with iio
+ * @tx: transmit buffer
+ * @rx: recieve buffer
+ * @buf_lock: mutex to protect tx and rx
+ **/
+struct adis16400_state {
+ struct spi_device *us;
+ struct work_struct work_trigger_to_ring;
+ struct iio_work_cont work_cont_thresh;
+ s64 last_timestamp;
+ struct iio_dev *indio_dev;
+ struct iio_trigger *trig;
+ u8 *tx;
+ u8 *rx;
+ struct mutex buf_lock;
+};
+
+int adis16400_spi_read_burst(struct device *dev, u8 *rx);
+
+int adis16400_set_irq(struct device *dev, bool enable);
+
+int adis16400_reset(struct device *dev);
+
+int adis16400_check_status(struct device *dev);
+
+#ifdef CONFIG_IIO_RING_BUFFER
+/* At the moment triggers are only used for ring buffer
+ * filling. This may change!
+ */
+
+enum adis16400_scan {
+ ADIS16400_SCAN_SUPPLY,
+ ADIS16400_SCAN_GYRO_X,
+ ADIS16400_SCAN_GYRO_Y,
+ ADIS16400_SCAN_GYRO_Z,
+ ADIS16400_SCAN_ACC_X,
+ ADIS16400_SCAN_ACC_Y,
+ ADIS16400_SCAN_ACC_Z,
+ ADIS16400_SCAN_MAGN_X,
+ ADIS16400_SCAN_MAGN_Y,
+ ADIS16400_SCAN_MAGN_Z,
+ ADIS16400_SCAN_TEMP,
+ ADIS16400_SCAN_ADC_0
+};
+
+void adis16400_remove_trigger(struct iio_dev *indio_dev);
+int adis16400_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t adis16400_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+
+int adis16400_configure_ring(struct iio_dev *indio_dev);
+void adis16400_unconfigure_ring(struct iio_dev *indio_dev);
+
+int adis16400_initialize_ring(struct iio_ring_buffer *ring);
+void adis16400_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void adis16400_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16400_probe_trigger(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline ssize_t
+adis16400_read_data_from_ring(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return 0;
+}
+
+static int adis16400_configure_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline void adis16400_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16400_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return 0;
+}
+
+static inline void adis16400_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+
+#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* SPI_ADIS16400_H_ */
diff --git a/drivers/staging/iio/imu/adis16400_core.c b/drivers/staging/iio/imu/adis16400_core.c
new file mode 100644
index 00000000000..e69e2ce47da
--- /dev/null
+++ b/drivers/staging/iio/imu/adis16400_core.c
@@ -0,0 +1,800 @@
+/*
+ * adis16400.c support Analog Devices ADIS16400/5
+ * 3d 2g Linear Accelerometers,
+ * 3d Gyroscopes,
+ * 3d Magnetometers via SPI
+ *
+ * Copyright (c) 2009 Manuel Stahl <manuel.stahl@iis.fraunhofer.de>
+ * Copyright (c) 2007 Jonathan Cameron <jic23@cam.ac.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../accel/accel.h"
+#include "../adc/adc.h"
+#include "../gyro/gyro.h"
+#include "../magnetometer/magnet.h"
+
+#include "adis16400.h"
+
+#define DRIVER_NAME "adis16400"
+
+/* At the moment the spi framework doesn't allow global setting of cs_change.
+ * It's in the likely to be added comment at the top of spi.h.
+ * This means that use cannot be made of spi_write etc.
+ */
+
+/**
+ * adis16400_spi_write_reg_8() - write single byte to a register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the register to be written
+ * @val: the value to write
+ **/
+static int adis16400_spi_write_reg_8(struct device *dev,
+ u8 reg_address,
+ u8 val)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16400_state *st = iio_dev_get_devdata(indio_dev);
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16400_WRITE_REG(reg_address);
+ st->tx[1] = val;
+
+ ret = spi_write(st->us, st->tx, 2);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/**
+ * adis16400_spi_write_reg_16() - write 2 bytes to a pair of registers
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ * is assumed to have address one greater.
+ * @val: value to be written
+ **/
+static int adis16400_spi_write_reg_16(struct device *dev,
+ u8 lower_reg_address,
+ u16 value)
+{
+ int ret;
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16400_state *st = iio_dev_get_devdata(indio_dev);
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ }, {
+ .tx_buf = st->tx + 2,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16400_WRITE_REG(lower_reg_address);
+ st->tx[1] = value & 0xFF;
+ st->tx[2] = ADIS16400_WRITE_REG(lower_reg_address + 1);
+ st->tx[3] = (value >> 8) & 0xFF;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
+ mutex_unlock(&st->buf_lock);
+
+ return ret;
+}
+
+/**
+ * adis16400_spi_read_reg_16() - read 2 bytes from a 16-bit register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ * is assumed to have address one greater.
+ * @val: somewhere to pass back the value read
+ **/
+static int adis16400_spi_read_reg_16(struct device *dev,
+ u8 lower_reg_address,
+ u16 *val)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16400_state *st = iio_dev_get_devdata(indio_dev);
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ }, {
+ .rx_buf = st->rx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 1,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16400_READ_REG(lower_reg_address);
+ st->tx[1] = 0;
+ st->tx[2] = 0;
+ st->tx[3] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
+ if (ret) {
+ dev_err(&st->us->dev,
+ "problem when reading 16 bit register 0x%02X",
+ lower_reg_address);
+ goto error_ret;
+ }
+ *val = (st->rx[0] << 8) | st->rx[1];
+
+error_ret:
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+/**
+ * adis16400_spi_read_burst() - read all data registers
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @rx: somewhere to pass back the value read (min size is 24 bytes)
+ **/
+int adis16400_spi_read_burst(struct device *dev, u8 *rx)
+{
+ struct spi_message msg;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16400_state *st = iio_dev_get_devdata(indio_dev);
+ u32 old_speed_hz = st->us->max_speed_hz;
+ int ret;
+
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = st->tx,
+ .bits_per_word = 8,
+ .len = 2,
+ .cs_change = 0,
+ }, {
+ .rx_buf = rx,
+ .bits_per_word = 8,
+ .len = 24,
+ .cs_change = 1,
+ },
+ };
+
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = ADIS16400_READ_REG(ADIS16400_GLOB_CMD);
+ st->tx[1] = 0;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+
+ st->us->max_speed_hz = min(ADIS16400_SPI_BURST, old_speed_hz);
+ spi_setup(st->us);
+
+ ret = spi_sync(st->us, &msg);
+ if (ret)
+ dev_err(&st->us->dev, "problem when burst reading");
+
+ st->us->max_speed_hz = old_speed_hz;
+ spi_setup(st->us);
+ mutex_unlock(&st->buf_lock);
+ return ret;
+}
+
+static ssize_t adis16400_spi_read_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf,
+ unsigned bits)
+{
+ int ret;
+ s16 val = 0;
+ unsigned shift = 16 - bits;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = adis16400_spi_read_reg_16(dev, this_attr->address, (u16 *)&val);
+ if (ret)
+ return ret;
+
+ if (val & ADIS16400_ERROR_ACTIVE)
+ adis16400_check_status(dev);
+ val = ((s16)(val << shift) >> shift);
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t adis16400_read_12bit_unsigned(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u16 val = 0;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = adis16400_spi_read_reg_16(dev, this_attr->address, &val);
+ if (ret)
+ return ret;
+
+ if (val & ADIS16400_ERROR_ACTIVE)
+ adis16400_check_status(dev);
+
+ return sprintf(buf, "%u\n", val & 0x0FFF);
+}
+
+static ssize_t adis16400_read_14bit_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ /* Take the iio_dev status lock */
+ mutex_lock(&indio_dev->mlock);
+ ret = adis16400_spi_read_signed(dev, attr, buf, 14);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static ssize_t adis16400_read_12bit_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ /* Take the iio_dev status lock */
+ mutex_lock(&indio_dev->mlock);
+ ret = adis16400_spi_read_signed(dev, attr, buf, 12);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+
+static ssize_t adis16400_write_16bit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int ret;
+ long val;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ goto error_ret;
+ ret = adis16400_spi_write_reg_16(dev, this_attr->address, val);
+
+error_ret:
+ return ret ? ret : len;
+}
+
+static ssize_t adis16400_read_frequency(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret, len = 0;
+ u16 t;
+ int sps;
+ ret = adis16400_spi_read_reg_16(dev,
+ ADIS16400_SMPL_PRD,
+ &t);
+ if (ret)
+ return ret;
+ sps = (t & ADIS16400_SMPL_PRD_TIME_BASE) ? 53 : 1638;
+ sps /= (t & ADIS16400_SMPL_PRD_DIV_MASK) + 1;
+ len = sprintf(buf, "%d SPS\n", sps);
+ return len;
+}
+
+static ssize_t adis16400_write_frequency(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adis16400_state *st = iio_dev_get_devdata(indio_dev);
+ long val;
+ int ret;
+ u8 t;
+
+ ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ t = (1638 / val);
+ if (t > 0)
+ t--;
+ t &= ADIS16400_SMPL_PRD_DIV_MASK;
+ if ((t & ADIS16400_SMPL_PRD_DIV_MASK) >= 0x0A)
+ st->us->max_speed_hz = ADIS16400_SPI_SLOW;
+ else
+ st->us->max_speed_hz = ADIS16400_SPI_FAST;
+
+ ret = adis16400_spi_write_reg_8(dev,
+ ADIS16400_SMPL_PRD,
+ t);
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+static ssize_t adis16400_write_reset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ if (len < 1)
+ return -1;
+ switch (buf[0]) {
+ case '1':
+ case 'y':
+ case 'Y':
+ return adis16400_reset(dev);
+ }
+ return -1;
+}
+
+
+
+int adis16400_set_irq(struct device *dev, bool enable)
+{
+ int ret;
+ u16 msc;
+ ret = adis16400_spi_read_reg_16(dev, ADIS16400_MSC_CTRL, &msc);
+ if (ret)
+ goto error_ret;
+
+ msc |= ADIS16400_MSC_CTRL_DATA_RDY_POL_HIGH;
+ if (enable)
+ msc |= ADIS16400_MSC_CTRL_DATA_RDY_EN;
+ else
+ msc &= ~ADIS16400_MSC_CTRL_DATA_RDY_EN;
+
+ ret = adis16400_spi_write_reg_16(dev, ADIS16400_MSC_CTRL, msc);
+ if (ret)
+ goto error_ret;
+
+error_ret:
+ return ret;
+}
+
+int adis16400_reset(struct device *dev)
+{
+ int ret;
+ ret = adis16400_spi_write_reg_8(dev,
+ ADIS16400_GLOB_CMD,
+ ADIS16400_GLOB_CMD_SW_RESET);
+ if (ret)
+ dev_err(dev, "problem resetting device");
+
+ return ret;
+}
+
+/* Power down the device */
+static int adis16400_stop_device(struct device *dev)
+{
+ int ret;
+ u16 val = ADIS16400_SLP_CNT_POWER_OFF;
+
+ ret = adis16400_spi_write_reg_16(dev, ADIS16400_SLP_CNT, val);
+ if (ret)
+ dev_err(dev, "problem with turning device off: SLP_CNT");
+
+ return ret;
+}
+
+static int adis16400_self_test(struct device *dev)
+{
+ int ret;
+ ret = adis16400_spi_write_reg_16(dev,
+ ADIS16400_MSC_CTRL,
+ ADIS16400_MSC_CTRL_MEM_TEST);
+ if (ret) {
+ dev_err(dev, "problem starting self test");
+ goto err_ret;
+ }
+
+ adis16400_check_status(dev);
+
+err_ret:
+ return ret;
+}
+
+int adis16400_check_status(struct device *dev)
+{
+ u16 status;
+ int ret;
+
+ ret = adis16400_spi_read_reg_16(dev, ADIS16400_DIAG_STAT, &status);
+
+ if (ret < 0) {
+ dev_err(dev, "Reading status failed\n");
+ goto error_ret;
+ }
+ ret = status;
+ if (status & ADIS16400_DIAG_STAT_ZACCL_FAIL)
+ dev_err(dev, "Z-axis accelerometer self-test failure\n");
+ if (status & ADIS16400_DIAG_STAT_YACCL_FAIL)
+ dev_err(dev, "Y-axis accelerometer self-test failure\n");
+ if (status & ADIS16400_DIAG_STAT_XACCL_FAIL)
+ dev_err(dev, "X-axis accelerometer self-test failure\n");
+ if (status & ADIS16400_DIAG_STAT_XGYRO_FAIL)
+ dev_err(dev, "X-axis gyroscope self-test failure\n");
+ if (status & ADIS16400_DIAG_STAT_YGYRO_FAIL)
+ dev_err(dev, "Y-axis gyroscope self-test failure\n");
+ if (status & ADIS16400_DIAG_STAT_ZGYRO_FAIL)
+ dev_err(dev, "Z-axis gyroscope self-test failure\n");
+ if (status & ADIS16400_DIAG_STAT_ALARM2)
+ dev_err(dev, "Alarm 2 active\n");
+ if (status & ADIS16400_DIAG_STAT_ALARM1)
+ dev_err(dev, "Alarm 1 active\n");
+ if (status & ADIS16400_DIAG_STAT_FLASH_CHK)
+ dev_err(dev, "Flash checksum error\n");
+ if (status & ADIS16400_DIAG_STAT_SELF_TEST)
+ dev_err(dev, "Self test error\n");
+ if (status & ADIS16400_DIAG_STAT_OVERFLOW)
+ dev_err(dev, "Sensor overrange\n");
+ if (status & ADIS16400_DIAG_STAT_SPI_FAIL)
+ dev_err(dev, "SPI failure\n");
+ if (status & ADIS16400_DIAG_STAT_FLASH_UPT)
+ dev_err(dev, "Flash update failed\n");
+ if (status & ADIS16400_DIAG_STAT_POWER_HIGH)
+ dev_err(dev, "Power supply above 5.25V\n");
+ if (status & ADIS16400_DIAG_STAT_POWER_LOW)
+ dev_err(dev, "Power supply below 4.75V\n");
+
+error_ret:
+ return ret;
+}
+
+static int adis16400_initial_setup(struct adis16400_state *st)
+{
+ int ret;
+ u16 prod_id, smp_prd;
+ struct device *dev = &st->indio_dev->dev;
+
+ /* use low spi speed for init */
+ st->us->max_speed_hz = ADIS16400_SPI_SLOW;
+ st->us->mode = SPI_MODE_3;
+ spi_setup(st->us);
+
+ /* Disable IRQ */
+ ret = adis16400_set_irq(dev, false);
+ if (ret) {
+ dev_err(dev, "disable irq failed");
+ goto err_ret;
+ }
+
+ /* Do self test */
+
+ /* Read status register to check the result */
+ ret = adis16400_check_status(dev);
+ if (ret) {
+ adis16400_reset(dev);
+ dev_err(dev, "device not playing ball -> reset");
+ msleep(ADIS16400_STARTUP_DELAY);
+ ret = adis16400_check_status(dev);
+ if (ret) {
+ dev_err(dev, "giving up");
+ goto err_ret;
+ }
+ }
+
+ ret = adis16400_spi_read_reg_16(dev, ADIS16400_PRODUCT_ID, &prod_id);
+ if (ret)
+ goto err_ret;
+
+ if (prod_id != ADIS16400_PRODUCT_ID_DEFAULT)
+ dev_warn(dev, "unknown product id");
+
+ printk(KERN_INFO DRIVER_NAME ": prod_id 0x%04x at CS%d (irq %d)\n",
+ prod_id, st->us->chip_select, st->us->irq);
+
+ /* use high spi speed if possible */
+ ret = adis16400_spi_read_reg_16(dev, ADIS16400_SMPL_PRD, &smp_prd);
+ if (!ret && (smp_prd & ADIS16400_SMPL_PRD_DIV_MASK) < 0x0A) {
+ st->us->max_speed_hz = ADIS16400_SPI_SLOW;
+ spi_setup(st->us);
+ }
+
+
+err_ret:
+
+ return ret;
+}
+
+static IIO_DEV_ATTR_ACCEL_X_OFFSET(S_IWUSR | S_IRUGO,
+ adis16400_read_12bit_signed,
+ adis16400_write_16bit,
+ ADIS16400_XACCL_OFF);
+
+static IIO_DEV_ATTR_ACCEL_Y_OFFSET(S_IWUSR | S_IRUGO,
+ adis16400_read_12bit_signed,
+ adis16400_write_16bit,
+ ADIS16400_YACCL_OFF);
+
+static IIO_DEV_ATTR_ACCEL_Z_OFFSET(S_IWUSR | S_IRUGO,
+ adis16400_read_12bit_signed,
+ adis16400_write_16bit,
+ ADIS16400_ZACCL_OFF);
+
+static IIO_DEV_ATTR_IN_NAMED_RAW(supply, adis16400_read_14bit_signed,
+ ADIS16400_SUPPLY_OUT);
+static IIO_CONST_ATTR(in_supply_scale, "0.002418");
+
+static IIO_DEV_ATTR_GYRO_X(adis16400_read_14bit_signed,
+ ADIS16400_XGYRO_OUT);
+static IIO_DEV_ATTR_GYRO_Y(adis16400_read_14bit_signed,
+ ADIS16400_YGYRO_OUT);
+static IIO_DEV_ATTR_GYRO_Z(adis16400_read_14bit_signed,
+ ADIS16400_ZGYRO_OUT);
+static IIO_CONST_ATTR(gyro_scale, "0.05 deg/s");
+
+static IIO_DEV_ATTR_ACCEL_X(adis16400_read_14bit_signed,
+ ADIS16400_XACCL_OUT);
+static IIO_DEV_ATTR_ACCEL_Y(adis16400_read_14bit_signed,
+ ADIS16400_YACCL_OUT);
+static IIO_DEV_ATTR_ACCEL_Z(adis16400_read_14bit_signed,
+ ADIS16400_ZACCL_OUT);
+static IIO_CONST_ATTR(accel_scale, "0.00333 g");
+
+static IIO_DEV_ATTR_MAGN_X(adis16400_read_14bit_signed,
+ ADIS16400_XMAGN_OUT);
+static IIO_DEV_ATTR_MAGN_Y(adis16400_read_14bit_signed,
+ ADIS16400_YMAGN_OUT);
+static IIO_DEV_ATTR_MAGN_Z(adis16400_read_14bit_signed,
+ ADIS16400_ZMAGN_OUT);
+static IIO_CONST_ATTR(magn_scale, "0.0005 Gs");
+
+
+static IIO_DEV_ATTR_TEMP_RAW(adis16400_read_12bit_signed);
+static IIO_CONST_ATTR(temp_offset, "198.16 K");
+static IIO_CONST_ATTR(temp_scale, "0.14 K");
+
+static IIO_DEV_ATTR_IN_RAW(0, adis16400_read_12bit_unsigned,
+ ADIS16400_AUX_ADC);
+static IIO_CONST_ATTR(in0_scale, "0.000806");
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+ adis16400_read_frequency,
+ adis16400_write_frequency);
+
+static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, adis16400_write_reset, 0);
+
+static IIO_CONST_ATTR_AVAIL_SAMP_FREQ("409 546 819 1638");
+
+static IIO_CONST_ATTR(name, "adis16400");
+
+static struct attribute *adis16400_event_attributes[] = {
+ NULL
+};
+
+static struct attribute_group adis16400_event_attribute_group = {
+ .attrs = adis16400_event_attributes,
+};
+
+static struct attribute *adis16400_attributes[] = {
+ &iio_dev_attr_accel_x_offset.dev_attr.attr,
+ &iio_dev_attr_accel_y_offset.dev_attr.attr,
+ &iio_dev_attr_accel_z_offset.dev_attr.attr,
+ &iio_dev_attr_in_supply_raw.dev_attr.attr,
+ &iio_const_attr_in_supply_scale.dev_attr.attr,
+ &iio_dev_attr_gyro_x_raw.dev_attr.attr,
+ &iio_dev_attr_gyro_y_raw.dev_attr.attr,
+ &iio_dev_attr_gyro_z_raw.dev_attr.attr,
+ &iio_const_attr_gyro_scale.dev_attr.attr,
+ &iio_dev_attr_accel_x_raw.dev_attr.attr,
+ &iio_dev_attr_accel_y_raw.dev_attr.attr,
+ &iio_dev_attr_accel_z_raw.dev_attr.attr,
+ &iio_const_attr_accel_scale.dev_attr.attr,
+ &iio_dev_attr_magn_x_raw.dev_attr.attr,
+ &iio_dev_attr_magn_y_raw.dev_attr.attr,
+ &iio_dev_attr_magn_z_raw.dev_attr.attr,
+ &iio_const_attr_magn_scale.dev_attr.attr,
+ &iio_dev_attr_temp_raw.dev_attr.attr,
+ &iio_const_attr_temp_offset.dev_attr.attr,
+ &iio_const_attr_temp_scale.dev_attr.attr,
+ &iio_dev_attr_in0_raw.dev_attr.attr,
+ &iio_const_attr_in0_scale.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_const_attr_available_sampling_frequency.dev_attr.attr,
+ &iio_dev_attr_reset.dev_attr.attr,
+ &iio_const_attr_name.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group adis16400_attribute_group = {
+ .attrs = adis16400_attributes,
+};
+
+static int __devinit adis16400_probe(struct spi_device *spi)
+{
+ int ret, regdone = 0;
+ struct adis16400_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+ if (!st) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, st);
+
+ /* Allocate the comms buffers */
+ st->rx = kzalloc(sizeof(*st->rx)*ADIS16400_MAX_RX, GFP_KERNEL);
+ if (st->rx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_st;
+ }
+ st->tx = kzalloc(sizeof(*st->tx)*ADIS16400_MAX_TX, GFP_KERNEL);
+ if (st->tx == NULL) {
+ ret = -ENOMEM;
+ goto error_free_rx;
+ }
+ st->us = spi;
+ mutex_init(&st->buf_lock);
+ /* setup the industrialio driver allocated elements */
+ st->indio_dev = iio_allocate_device();
+ if (st->indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_free_tx;
+ }
+
+ st->indio_dev->dev.parent = &spi->dev;
+ st->indio_dev->num_interrupt_lines = 1;
+ st->indio_dev->event_attrs = &adis16400_event_attribute_group;
+ st->indio_dev->attrs = &adis16400_attribute_group;
+ st->indio_dev->dev_data = (void *)(st);
+ st->indio_dev->driver_module = THIS_MODULE;
+ st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = adis16400_configure_ring(st->indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ ret = iio_device_register(st->indio_dev);
+ if (ret)
+ goto error_unreg_ring_funcs;
+ regdone = 1;
+
+ ret = adis16400_initialize_ring(st->indio_dev->ring);
+ if (ret) {
+ printk(KERN_ERR "failed to initialize the ring\n");
+ goto error_unreg_ring_funcs;
+ }
+
+ if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0) {
+#if 0 /* fixme: here we should support */
+ iio_init_work_cont(&st->work_cont_thresh,
+ NULL,
+ adis16400_thresh_handler_bh_no_check,
+ 0,
+ 0,
+ st);
+#endif
+ ret = iio_register_interrupt_line(spi->irq,
+ st->indio_dev,
+ 0,
+ IRQF_TRIGGER_RISING,
+ "adis16400");
+ if (ret)
+ goto error_uninitialize_ring;
+
+ ret = adis16400_probe_trigger(st->indio_dev);
+ if (ret)
+ goto error_unregister_line;
+ }
+
+ /* Get the device into a sane initial state */
+ ret = adis16400_initial_setup(st);
+ if (ret)
+ goto error_remove_trigger;
+ return 0;
+
+error_remove_trigger:
+ if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+ adis16400_remove_trigger(st->indio_dev);
+error_unregister_line:
+ if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+ iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+ adis16400_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+ adis16400_unconfigure_ring(st->indio_dev);
+error_free_dev:
+ if (regdone)
+ iio_device_unregister(st->indio_dev);
+ else
+ iio_free_device(st->indio_dev);
+error_free_tx:
+ kfree(st->tx);
+error_free_rx:
+ kfree(st->rx);
+error_free_st:
+ kfree(st);
+error_ret:
+ return ret;
+}
+
+/* fixme, confirm ordering in this function */
+static int adis16400_remove(struct spi_device *spi)
+{
+ int ret;
+ struct adis16400_state *st = spi_get_drvdata(spi);
+ struct iio_dev *indio_dev = st->indio_dev;
+
+ ret = adis16400_stop_device(&(indio_dev->dev));
+ if (ret)
+ goto err_ret;
+
+ flush_scheduled_work();
+
+ adis16400_remove_trigger(indio_dev);
+ if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
+ iio_unregister_interrupt_line(indio_dev, 0);
+
+ adis16400_uninitialize_ring(indio_dev->ring);
+ adis16400_unconfigure_ring(indio_dev);
+ iio_device_unregister(indio_dev);
+ kfree(st->tx);
+ kfree(st->rx);
+ kfree(st);
+
+ return 0;
+
+err_ret:
+ return ret;
+}
+
+static struct spi_driver adis16400_driver = {
+ .driver = {
+ .name = "adis16400",
+ .owner = THIS_MODULE,
+ },
+ .probe = adis16400_probe,
+ .remove = __devexit_p(adis16400_remove),
+};
+
+static __init int adis16400_init(void)
+{
+ return spi_register_driver(&adis16400_driver);
+}
+module_init(adis16400_init);
+
+static __exit void adis16400_exit(void)
+{
+ spi_unregister_driver(&adis16400_driver);
+}
+module_exit(adis16400_exit);
+
+MODULE_AUTHOR("Manuel Stahl <manuel.stahl@iis.fraunhofer.de>");
+MODULE_DESCRIPTION("Analog Devices ADIS16400/5 IMU SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/imu/adis16400_ring.c b/drivers/staging/iio/imu/adis16400_ring.c
new file mode 100644
index 00000000000..5529b32bd2e
--- /dev/null
+++ b/drivers/staging/iio/imu/adis16400_ring.c
@@ -0,0 +1,245 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../ring_sw.h"
+#include "../accel/accel.h"
+#include "../trigger.h"
+#include "adis16400.h"
+
+/**
+ * combine_8_to_16() utility function to munge to u8s into u16
+ **/
+static inline u16 combine_8_to_16(u8 lower, u8 upper)
+{
+ u16 _lower = lower;
+ u16 _upper = upper;
+ return _lower | (_upper << 8);
+}
+
+static IIO_SCAN_EL_C(supply, ADIS16400_SCAN_SUPPLY, IIO_SIGNED(14),
+ ADIS16400_SUPPLY_OUT, NULL);
+
+static IIO_SCAN_EL_C(gyro_x, ADIS16400_SCAN_GYRO_X, IIO_SIGNED(14),
+ ADIS16400_XGYRO_OUT, NULL);
+static IIO_SCAN_EL_C(gyro_y, ADIS16400_SCAN_GYRO_Y, IIO_SIGNED(14),
+ ADIS16400_YGYRO_OUT, NULL);
+static IIO_SCAN_EL_C(gyro_z, ADIS16400_SCAN_GYRO_Z, IIO_SIGNED(14),
+ ADIS16400_ZGYRO_OUT, NULL);
+
+static IIO_SCAN_EL_C(accel_x, ADIS16400_SCAN_ACC_X, IIO_SIGNED(14),
+ ADIS16400_XACCL_OUT, NULL);
+static IIO_SCAN_EL_C(accel_y, ADIS16400_SCAN_ACC_Y, IIO_SIGNED(14),
+ ADIS16400_YACCL_OUT, NULL);
+static IIO_SCAN_EL_C(accel_z, ADIS16400_SCAN_ACC_Z, IIO_SIGNED(14),
+ ADIS16400_ZACCL_OUT, NULL);
+
+static IIO_SCAN_EL_C(magn_x, ADIS16400_SCAN_MAGN_X, IIO_SIGNED(14),
+ ADIS16400_XMAGN_OUT, NULL);
+static IIO_SCAN_EL_C(magn_y, ADIS16400_SCAN_MAGN_Y, IIO_SIGNED(14),
+ ADIS16400_YMAGN_OUT, NULL);
+static IIO_SCAN_EL_C(magn_z, ADIS16400_SCAN_MAGN_Z, IIO_SIGNED(14),
+ ADIS16400_ZMAGN_OUT, NULL);
+
+static IIO_SCAN_EL_C(temp, ADIS16400_SCAN_TEMP, IIO_SIGNED(12),
+ ADIS16400_TEMP_OUT, NULL);
+static IIO_SCAN_EL_C(adc_0, ADIS16400_SCAN_ADC_0, IIO_SIGNED(12),
+ ADIS16400_AUX_ADC, NULL);
+
+static IIO_SCAN_EL_TIMESTAMP(12);
+
+static struct attribute *adis16400_scan_el_attrs[] = {
+ &iio_scan_el_supply.dev_attr.attr,
+ &iio_scan_el_gyro_x.dev_attr.attr,
+ &iio_scan_el_gyro_y.dev_attr.attr,
+ &iio_scan_el_gyro_z.dev_attr.attr,
+ &iio_scan_el_accel_x.dev_attr.attr,
+ &iio_scan_el_accel_y.dev_attr.attr,
+ &iio_scan_el_accel_z.dev_attr.attr,
+ &iio_scan_el_magn_x.dev_attr.attr,
+ &iio_scan_el_magn_y.dev_attr.attr,
+ &iio_scan_el_magn_z.dev_attr.attr,
+ &iio_scan_el_temp.dev_attr.attr,
+ &iio_scan_el_adc_0.dev_attr.attr,
+ &iio_scan_el_timestamp.dev_attr.attr,
+ NULL,
+};
+
+static struct attribute_group adis16400_scan_el_group = {
+ .attrs = adis16400_scan_el_attrs,
+ .name = "scan_elements",
+};
+
+/**
+ * adis16400_poll_func_th() top half interrupt handler called by trigger
+ * @private_data: iio_dev
+ **/
+static void adis16400_poll_func_th(struct iio_dev *indio_dev)
+{
+ struct adis16400_state *st = iio_dev_get_devdata(indio_dev);
+ st->last_timestamp = indio_dev->trig->timestamp;
+ schedule_work(&st->work_trigger_to_ring);
+ /* Indicate that this interrupt is being handled */
+
+ /* Technically this is trigger related, but without this
+ * handler running there is currently no way for the interrupt
+ * to clear.
+ */
+}
+
+/* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
+ * specific to be rolled into the core.
+ */
+static void adis16400_trigger_bh_to_ring(struct work_struct *work_s)
+{
+ struct adis16400_state *st
+ = container_of(work_s, struct adis16400_state,
+ work_trigger_to_ring);
+
+ int i = 0;
+ s16 *data;
+ size_t datasize = st->indio_dev
+ ->ring->access.get_bpd(st->indio_dev->ring);
+
+ data = kmalloc(datasize , GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(&st->us->dev, "memory alloc failed in ring bh");
+ return;
+ }
+
+ if (st->indio_dev->scan_count)
+ if (adis16400_spi_read_burst(&st->indio_dev->dev, st->rx) >= 0)
+ for (; i < st->indio_dev->scan_count; i++) {
+ data[i] = combine_8_to_16(st->rx[i*2+1],
+ st->rx[i*2]);
+ }
+
+ /* Guaranteed to be aligned with 8 byte boundary */
+ if (st->indio_dev->scan_timestamp)
+ *((s64 *)(data + ((i + 3)/4)*4)) = st->last_timestamp;
+
+ st->indio_dev->ring->access.store_to(st->indio_dev->ring,
+ (u8 *)data,
+ st->last_timestamp);
+
+ iio_trigger_notify_done(st->indio_dev->trig);
+ kfree(data);
+
+ return;
+}
+/* in these circumstances is it better to go with unaligned packing and
+ * deal with the cost?*/
+static int adis16400_data_rdy_ring_preenable(struct iio_dev *indio_dev)
+{
+ size_t size;
+ dev_dbg(&indio_dev->dev, "%s\n", __func__);
+ /* Check if there are any scan elements enabled, if not fail*/
+ if (!(indio_dev->scan_count || indio_dev->scan_timestamp))
+ return -EINVAL;
+
+ if (indio_dev->ring->access.set_bpd) {
+ if (indio_dev->scan_timestamp)
+ if (indio_dev->scan_count) /* Timestamp and data */
+ size = 6*sizeof(s64);
+ else /* Timestamp only */
+ size = sizeof(s64);
+ else /* Data only */
+ size = indio_dev->scan_count*sizeof(s16);
+ indio_dev->ring->access.set_bpd(indio_dev->ring, size);
+ }
+
+ return 0;
+}
+
+static int adis16400_data_rdy_ring_postenable(struct iio_dev *indio_dev)
+{
+ return indio_dev->trig
+ ? iio_trigger_attach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc)
+ : 0;
+}
+
+static int adis16400_data_rdy_ring_predisable(struct iio_dev *indio_dev)
+{
+ return indio_dev->trig
+ ? iio_trigger_dettach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc)
+ : 0;
+}
+
+void adis16400_unconfigure_ring(struct iio_dev *indio_dev)
+{
+ kfree(indio_dev->pollfunc);
+ iio_sw_rb_free(indio_dev->ring);
+}
+
+int adis16400_configure_ring(struct iio_dev *indio_dev)
+{
+ int ret = 0;
+ struct adis16400_state *st = indio_dev->dev_data;
+ struct iio_ring_buffer *ring;
+ INIT_WORK(&st->work_trigger_to_ring, adis16400_trigger_bh_to_ring);
+ /* Set default scan mode */
+
+ iio_scan_mask_set(indio_dev, iio_scan_el_supply.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_gyro_x.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_gyro_y.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_gyro_z.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_accel_x.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_accel_y.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_accel_z.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_magn_x.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_magn_y.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_magn_z.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_temp.number);
+ iio_scan_mask_set(indio_dev, iio_scan_el_adc_0.number);
+ indio_dev->scan_timestamp = true;
+
+ indio_dev->scan_el_attrs = &adis16400_scan_el_group;
+
+ ring = iio_sw_rb_allocate(indio_dev);
+ if (!ring) {
+ ret = -ENOMEM;
+ return ret;
+ }
+ indio_dev->ring = ring;
+ /* Effectively select the ring buffer implementation */
+ iio_ring_sw_register_funcs(&ring->access);
+ ring->preenable = &adis16400_data_rdy_ring_preenable;
+ ring->postenable = &adis16400_data_rdy_ring_postenable;
+ ring->predisable = &adis16400_data_rdy_ring_predisable;
+ ring->owner = THIS_MODULE;
+
+ indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL);
+ if (indio_dev->pollfunc == NULL) {
+ ret = -ENOMEM;
+ goto error_iio_sw_rb_free;;
+ }
+ indio_dev->pollfunc->poll_func_main = &adis16400_poll_func_th;
+ indio_dev->pollfunc->private_data = indio_dev;
+ indio_dev->modes |= INDIO_RING_TRIGGERED;
+ return 0;
+
+error_iio_sw_rb_free:
+ iio_sw_rb_free(indio_dev->ring);
+ return ret;
+}
+
+int adis16400_initialize_ring(struct iio_ring_buffer *ring)
+{
+ return iio_ring_buffer_register(ring, 0);
+}
+
+void adis16400_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+ iio_ring_buffer_unregister(ring);
+}
diff --git a/drivers/staging/iio/imu/adis16400_trigger.c b/drivers/staging/iio/imu/adis16400_trigger.c
new file mode 100644
index 00000000000..3b3250ac768
--- /dev/null
+++ b/drivers/staging/iio/imu/adis16400_trigger.c
@@ -0,0 +1,127 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../trigger.h"
+#include "adis16400.h"
+
+/**
+ * adis16400_data_rdy_trig_poll() the event handler for the data rdy trig
+ **/
+static int adis16400_data_rdy_trig_poll(struct iio_dev *dev_info,
+ int index,
+ s64 timestamp,
+ int no_test)
+{
+ struct adis16400_state *st = iio_dev_get_devdata(dev_info);
+ struct iio_trigger *trig = st->trig;
+
+ trig->timestamp = timestamp;
+ iio_trigger_poll(trig);
+
+ return IRQ_HANDLED;
+}
+
+IIO_EVENT_SH(data_rdy_trig, &adis16400_data_rdy_trig_poll);
+
+static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
+
+static struct attribute *adis16400_trigger_attrs[] = {
+ &dev_attr_name.attr,
+ NULL,
+};
+
+static const struct attribute_group adis16400_trigger_attr_group = {
+ .attrs = adis16400_trigger_attrs,
+};
+
+/**
+ * adis16400_data_rdy_trigger_set_state() set datardy interrupt state
+ **/
+static int adis16400_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct adis16400_state *st = trig->private_data;
+ struct iio_dev *indio_dev = st->indio_dev;
+ int ret = 0;
+
+ dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state);
+ ret = adis16400_set_irq(&st->indio_dev->dev, state);
+ if (state == false) {
+ iio_remove_event_from_list(&iio_event_data_rdy_trig,
+ &indio_dev->interrupts[0]
+ ->ev_list);
+ /* possible quirk with handler currently worked around
+ by ensuring the work queue is empty */
+ flush_scheduled_work();
+ } else {
+ iio_add_event_to_list(&iio_event_data_rdy_trig,
+ &indio_dev->interrupts[0]->ev_list);
+ }
+ return ret;
+}
+
+/**
+ * adis16400_trig_try_reen() try renabling irq for data rdy trigger
+ * @trig: the datardy trigger
+ **/
+static int adis16400_trig_try_reen(struct iio_trigger *trig)
+{
+ struct adis16400_state *st = trig->private_data;
+ enable_irq(st->us->irq);
+ /* irq reenabled so success! */
+ return 0;
+}
+
+int adis16400_probe_trigger(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct adis16400_state *st = indio_dev->dev_data;
+
+ st->trig = iio_allocate_trigger();
+ st->trig->name = kmalloc(IIO_TRIGGER_NAME_LENGTH, GFP_KERNEL);
+ if (!st->trig->name) {
+ ret = -ENOMEM;
+ goto error_free_trig;
+ }
+ snprintf((char *)st->trig->name,
+ IIO_TRIGGER_NAME_LENGTH,
+ "adis16400-dev%d", indio_dev->id);
+ st->trig->dev.parent = &st->us->dev;
+ st->trig->owner = THIS_MODULE;
+ st->trig->private_data = st;
+ st->trig->set_trigger_state = &adis16400_data_rdy_trigger_set_state;
+ st->trig->try_reenable = &adis16400_trig_try_reen;
+ st->trig->control_attrs = &adis16400_trigger_attr_group;
+ ret = iio_trigger_register(st->trig);
+
+ /* select default trigger */
+ indio_dev->trig = st->trig;
+ if (ret)
+ goto error_free_trig_name;
+
+ return 0;
+
+error_free_trig_name:
+ kfree(st->trig->name);
+error_free_trig:
+ iio_free_trigger(st->trig);
+
+ return ret;
+}
+
+void adis16400_remove_trigger(struct iio_dev *indio_dev)
+{
+ struct adis16400_state *state = indio_dev->dev_data;
+
+ iio_trigger_unregister(state->trig);
+ kfree(state->trig->name);
+ iio_free_trigger(state->trig);
+}
diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c
index 1d77082c853..01030684ef2 100644
--- a/drivers/staging/iio/industrialio-core.c
+++ b/drivers/staging/iio/industrialio-core.c
@@ -42,16 +42,10 @@ dev_t iio_devt;
EXPORT_SYMBOL(iio_devt);
#define IIO_DEV_MAX 256
-static char *iio_devnode(struct device *dev, mode_t *mode)
-{
- return kasprintf(GFP_KERNEL, "iio/%s", dev_name(dev));
-}
-
-struct class iio_class = {
+struct bus_type iio_bus_type = {
.name = "iio",
- .devnode = iio_devnode,
};
-EXPORT_SYMBOL(iio_class);
+EXPORT_SYMBOL(iio_bus_type);
void __iio_change_event(struct iio_detected_event_list *ev,
int ev_code,
@@ -405,7 +399,7 @@ int iio_setup_ev_int(struct iio_event_interface *ev_int,
{
int ret, minor;
- ev_int->dev.class = &iio_class;
+ ev_int->dev.bus = &iio_bus_type;
ev_int->dev.parent = dev;
ev_int->dev.type = &iio_event_type;
device_initialize(&ev_int->dev);
@@ -478,23 +472,23 @@ static int __init iio_init(void)
{
int ret;
- /* Create sysfs class */
- ret = class_register(&iio_class);
+ /* Register sysfs bus */
+ ret = bus_register(&iio_bus_type);
if (ret < 0) {
printk(KERN_ERR
- "%s could not create sysfs class\n",
+ "%s could not register bus type\n",
__FILE__);
goto error_nothing;
}
ret = iio_dev_init();
if (ret < 0)
- goto error_unregister_class;
+ goto error_unregister_bus_type;
return 0;
-error_unregister_class:
- class_unregister(&iio_class);
+error_unregister_bus_type:
+ bus_unregister(&iio_bus_type);
error_nothing:
return ret;
}
@@ -502,7 +496,7 @@ error_nothing:
static void __exit iio_exit(void)
{
iio_dev_exit();
- class_unregister(&iio_class);
+ bus_unregister(&iio_bus_type);
}
static int iio_device_register_sysfs(struct iio_dev *dev_info)
@@ -667,8 +661,9 @@ static int iio_device_register_eventset(struct iio_dev *dev_info)
dev_info->event_interfaces[i].id = ret;
snprintf(dev_info->event_interfaces[i]._name, 20,
- "event_line%d",
- dev_info->event_interfaces[i].id);
+ "%s:event%d",
+ dev_name(&dev_info->dev),
+ dev_info->event_interfaces[i].id);
ret = iio_setup_ev_int(&dev_info->event_interfaces[i],
(const char *)(dev_info
@@ -683,16 +678,14 @@ static int iio_device_register_eventset(struct iio_dev *dev_info)
dev_info->event_interfaces[i].id);
goto error_free_setup_ev_ints;
}
- }
- for (i = 0; i < dev_info->num_interrupt_lines; i++) {
- snprintf(dev_info->event_interfaces[i]._attrname, 20,
- "event_line%d_sources", i);
- dev_info->event_attrs[i].name
- = (const char *)
- (dev_info->event_interfaces[i]._attrname);
- ret = sysfs_create_group(&dev_info->dev.kobj,
- &dev_info->event_attrs[i]);
+ dev_set_drvdata(&dev_info->event_interfaces[i].dev,
+ (void *)dev_info);
+ ret = sysfs_create_group(&dev_info
+ ->event_interfaces[i]
+ .dev.kobj,
+ &dev_info->event_attrs[i]);
+
if (ret) {
dev_err(&dev_info->dev,
"Failed to register sysfs for event attrs");
@@ -714,13 +707,13 @@ error_unregister_config_attrs:
i = dev_info->num_interrupt_lines - 1;
error_remove_sysfs_interfaces:
for (j = 0; j < i; j++)
- sysfs_remove_group(&dev_info->dev.kobj,
+ sysfs_remove_group(&dev_info
+ ->event_interfaces[j].dev.kobj,
&dev_info->event_attrs[j]);
- i = dev_info->num_interrupt_lines - 1;
error_free_setup_ev_ints:
for (j = 0; j < i; j++) {
iio_free_idr_val(&iio_event_idr,
- dev_info->event_interfaces[i].id);
+ dev_info->event_interfaces[j].id);
iio_free_ev_int(&dev_info->event_interfaces[j]);
}
kfree(dev_info->interrupts);
@@ -738,7 +731,8 @@ static void iio_device_unregister_eventset(struct iio_dev *dev_info)
if (dev_info->num_interrupt_lines == 0)
return;
for (i = 0; i < dev_info->num_interrupt_lines; i++)
- sysfs_remove_group(&dev_info->dev.kobj,
+ sysfs_remove_group(&dev_info
+ ->event_interfaces[i].dev.kobj,
&dev_info->event_attrs[i]);
for (i = 0; i < dev_info->num_interrupt_lines; i++) {
@@ -769,7 +763,7 @@ struct iio_dev *iio_allocate_device(void)
if (dev) {
dev->dev.type = &iio_dev_type;
- dev->dev.class = &iio_class;
+ dev->dev.bus = &iio_bus_type;
device_initialize(&dev->dev);
dev_set_drvdata(&dev->dev, (void *)dev);
mutex_init(&dev->mlock);
@@ -810,7 +804,7 @@ int iio_device_register(struct iio_dev *dev_info)
ret = iio_device_register_eventset(dev_info);
if (ret) {
dev_err(dev_info->dev.parent,
- "Failed to register event set \n");
+ "Failed to register event set\n");
goto error_free_sysfs;
}
if (dev_info->modes & INDIO_RING_TRIGGERED)
diff --git a/drivers/staging/iio/industrialio-ring.c b/drivers/staging/iio/industrialio-ring.c
index e53e214bfeb..ada159bbb1f 100644
--- a/drivers/staging/iio/industrialio-ring.c
+++ b/drivers/staging/iio/industrialio-ring.c
@@ -20,19 +20,11 @@
#include <linux/poll.h>
#include <linux/module.h>
#include <linux/cdev.h>
-#include <linux/idr.h>
#include <linux/slab.h>
#include "iio.h"
#include "ring_generic.h"
-/* IDR for ring buffer identifier */
-static DEFINE_IDR(iio_ring_idr);
-/* IDR for ring event identifier */
-static DEFINE_IDR(iio_ring_event_idr);
-/* IDR for ring access identifier */
-static DEFINE_IDR(iio_ring_access_idr);
-
int iio_push_ring_event(struct iio_ring_buffer *ring_buf,
int event_code,
s64 timestamp)
@@ -66,7 +58,7 @@ EXPORT_SYMBOL(iio_push_or_escallate_ring_event);
* This function relies on all ring buffer implementations having an
* iio_ring_buffer as their first element.
**/
-int iio_ring_open(struct inode *inode, struct file *filp)
+static int iio_ring_open(struct inode *inode, struct file *filp)
{
struct iio_handler *hand
= container_of(inode->i_cdev, struct iio_handler, chrdev);
@@ -85,7 +77,7 @@ int iio_ring_open(struct inode *inode, struct file *filp)
* This function relies on all ring buffer implementations having an
* iio_ring_buffer as their first element.
**/
-int iio_ring_release(struct inode *inode, struct file *filp)
+static int iio_ring_release(struct inode *inode, struct file *filp)
{
struct cdev *cd = inode->i_cdev;
struct iio_handler *hand = iio_cdev_to_handler(cd);
@@ -104,10 +96,8 @@ int iio_ring_release(struct inode *inode, struct file *filp)
* This function relies on all ring buffer implementations having an
* iio_ring _bufer as their first element.
**/
-ssize_t iio_ring_rip_outer(struct file *filp,
- char *buf,
- size_t count,
- loff_t *f_ps)
+static ssize_t iio_ring_rip_outer(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_ps)
{
struct iio_ring_buffer *rb = filp->private_data;
int ret, dead_offset, copied;
@@ -158,25 +148,21 @@ __iio_request_ring_buffer_event_chrdev(struct iio_ring_buffer *buf,
struct device *dev)
{
int ret;
- ret = iio_get_new_idr_val(&iio_ring_event_idr);
- if (ret < 0)
- goto error_ret;
- else
- buf->ev_int.id = ret;
- snprintf(buf->ev_int._name, 20,
- "ring_event_line%d",
+ buf->ev_int.id = id;
+
+ snprintf(buf->ev_int._name, sizeof(buf->ev_int._name),
+ "%s:event%d",
+ dev_name(&buf->dev),
buf->ev_int.id);
ret = iio_setup_ev_int(&(buf->ev_int),
buf->ev_int._name,
owner,
dev);
if (ret)
- goto error_free_id;
+ goto error_ret;
return 0;
-error_free_id:
- iio_free_idr_val(&iio_ring_event_idr, buf->ev_int.id);
error_ret:
return ret;
}
@@ -185,7 +171,6 @@ static inline void
__iio_free_ring_buffer_event_chrdev(struct iio_ring_buffer *buf)
{
iio_free_ev_int(&(buf->ev_int));
- iio_free_idr_val(&iio_ring_event_idr, buf->ev_int.id);
}
static void iio_ring_access_release(struct device *dev)
@@ -210,7 +195,7 @@ __iio_request_ring_buffer_access_chrdev(struct iio_ring_buffer *buf,
buf->access_handler.flags = 0;
buf->access_dev.parent = &buf->dev;
- buf->access_dev.class = &iio_class;
+ buf->access_dev.bus = &iio_bus_type;
buf->access_dev.type = &iio_ring_access_type;
device_initialize(&buf->access_dev);
@@ -221,16 +206,16 @@ __iio_request_ring_buffer_access_chrdev(struct iio_ring_buffer *buf,
}
buf->access_dev.devt = MKDEV(MAJOR(iio_devt), minor);
- ret = iio_get_new_idr_val(&iio_ring_access_idr);
- if (ret < 0)
- goto error_device_put;
- else
- buf->access_id = ret;
- dev_set_name(&buf->access_dev, "ring_access%d", buf->access_id);
+
+ buf->access_id = id;
+
+ dev_set_name(&buf->access_dev, "%s:access%d",
+ dev_name(&buf->dev),
+ buf->access_id);
ret = device_add(&buf->access_dev);
if (ret < 0) {
printk(KERN_ERR "failed to add the ring access dev\n");
- goto error_free_idr;
+ goto error_device_put;
}
cdev_init(&buf->access_handler.chrdev, &iio_ring_fileops);
@@ -242,10 +227,9 @@ __iio_request_ring_buffer_access_chrdev(struct iio_ring_buffer *buf,
goto error_device_unregister;
}
return 0;
+
error_device_unregister:
device_unregister(&buf->access_dev);
-error_free_idr:
- iio_free_idr_val(&iio_ring_access_idr, buf->access_id);
error_device_put:
put_device(&buf->access_dev);
@@ -254,7 +238,6 @@ error_device_put:
static void __iio_free_ring_buffer_access_chrdev(struct iio_ring_buffer *buf)
{
- iio_free_idr_val(&iio_ring_access_idr, buf->access_id);
device_unregister(&buf->access_dev);
}
@@ -266,22 +249,23 @@ void iio_ring_buffer_init(struct iio_ring_buffer *ring,
ring->indio_dev = dev_info;
ring->ev_int.private = ring;
ring->access_handler.private = ring;
+ ring->shared_ev_pointer.ev_p = NULL;
+ spin_lock_init(&ring->shared_ev_pointer.lock);
}
EXPORT_SYMBOL(iio_ring_buffer_init);
-int iio_ring_buffer_register(struct iio_ring_buffer *ring)
+int iio_ring_buffer_register(struct iio_ring_buffer *ring, int id)
{
int ret;
- ret = iio_get_new_idr_val(&iio_ring_idr);
- if (ret < 0)
- goto error_ret;
- else
- ring->id = ret;
- dev_set_name(&ring->dev, "ring_buffer%d", ring->id);
+ ring->id = id;
+
+ dev_set_name(&ring->dev, "%s:buffer%d",
+ dev_name(ring->dev.parent),
+ ring->id);
ret = device_add(&ring->dev);
if (ret)
- goto error_free_id;
+ goto error_ret;
ret = __iio_request_ring_buffer_event_chrdev(ring,
0,
@@ -302,8 +286,6 @@ error_free_ring_buffer_event_chrdev:
__iio_free_ring_buffer_event_chrdev(ring);
error_remove_device:
device_del(&ring->dev);
-error_free_id:
- iio_free_idr_val(&iio_ring_idr, ring->id);
error_ret:
return ret;
}
@@ -314,7 +296,6 @@ void iio_ring_buffer_unregister(struct iio_ring_buffer *ring)
__iio_free_ring_buffer_access_chrdev(ring);
__iio_free_ring_buffer_event_chrdev(ring);
device_del(&ring->dev);
- iio_free_idr_val(&iio_ring_idr, ring->id);
}
EXPORT_SYMBOL(iio_ring_buffer_unregister);
diff --git a/drivers/staging/iio/industrialio-trigger.c b/drivers/staging/iio/industrialio-trigger.c
index 35ec80ba444..5682e61600f 100644
--- a/drivers/staging/iio/industrialio-trigger.c
+++ b/drivers/staging/iio/industrialio-trigger.c
@@ -18,6 +18,7 @@
#include "iio.h"
#include "trigger.h"
+#include "trigger_consumer.h"
/* RFC - Question of approach
* Make the common case (single sensor single trigger)
@@ -92,9 +93,9 @@ idr_again:
**/
static void iio_trigger_unregister_id(struct iio_trigger *trig_info)
{
- spin_lock(&iio_trigger_idr_lock);
- idr_remove(&iio_trigger_idr, trig_info->id);
- spin_unlock(&iio_trigger_idr_lock);
+ spin_lock(&iio_trigger_idr_lock);
+ idr_remove(&iio_trigger_idr, trig_info->id);
+ spin_unlock(&iio_trigger_idr_lock);
}
int iio_trigger_register(struct iio_trigger *trig_info)
@@ -156,6 +157,9 @@ struct iio_trigger *iio_trigger_find_by_name(const char *name, size_t len)
struct iio_trigger *trig;
bool found = false;
+ if (len && name[len - 1] == '\n')
+ len--;
+
mutex_lock(&iio_trigger_list_lock);
list_for_each_entry(trig, &iio_trigger_list, list) {
if (strncmp(trig->name, name, len) == 0) {
@@ -166,7 +170,7 @@ struct iio_trigger *iio_trigger_find_by_name(const char *name, size_t len)
mutex_unlock(&iio_trigger_list_lock);
return found ? trig : NULL;
-};
+}
EXPORT_SYMBOL(iio_trigger_find_by_name);
void iio_trigger_poll(struct iio_trigger *trig)
@@ -331,9 +335,9 @@ static ssize_t iio_trigger_write_current(struct device *dev,
return len;
}
-DEVICE_ATTR(current_trigger, S_IRUGO | S_IWUSR,
- iio_trigger_read_current,
- iio_trigger_write_current);
+static DEVICE_ATTR(current_trigger, S_IRUGO | S_IWUSR,
+ iio_trigger_read_current,
+ iio_trigger_write_current);
static struct attribute *iio_trigger_consumer_attrs[] = {
&dev_attr_current_trigger.attr,
@@ -362,7 +366,7 @@ struct iio_trigger *iio_allocate_trigger(void)
trig = kzalloc(sizeof *trig, GFP_KERNEL);
if (trig) {
trig->dev.type = &iio_trig_type;
- trig->dev.class = &iio_class;
+ trig->dev.bus = &iio_bus_type;
device_initialize(&trig->dev);
dev_set_drvdata(&trig->dev, (void *)trig);
spin_lock_init(&trig->pollfunc_list_lock);
diff --git a/drivers/staging/iio/light/tsl2563.c b/drivers/staging/iio/light/tsl2563.c
index 8770a00e365..43aaacff4e7 100644
--- a/drivers/staging/iio/light/tsl2563.c
+++ b/drivers/staging/iio/light/tsl2563.c
@@ -592,18 +592,30 @@ static ssize_t tsl2563_calib1_store(struct device *dev,
* once I understand what they mean */
static DEVICE_ATTR(adc0, S_IRUGO, tsl2563_adc0_show, NULL);
static DEVICE_ATTR(adc1, S_IRUGO, tsl2563_adc1_show, NULL);
-static DEVICE_ATTR(lux, S_IRUGO, tsl2563_lux_show, NULL);
+static DEVICE_ATTR(illuminance0_input, S_IRUGO, tsl2563_lux_show, NULL);
static DEVICE_ATTR(calib0, S_IRUGO | S_IWUSR,
tsl2563_calib0_show, tsl2563_calib0_store);
static DEVICE_ATTR(calib1, S_IRUGO | S_IWUSR,
tsl2563_calib1_show, tsl2563_calib1_store);
+static ssize_t tsl2563_show_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct tsl2563_chip *chip = indio_dev->dev_data;
+ return sprintf(buf, "%s\n", chip->client->name);
+}
+
+static DEVICE_ATTR(name, S_IRUGO, tsl2563_show_name, NULL);
+
static struct attribute *tsl2563_attributes[] = {
&dev_attr_adc0.attr,
&dev_attr_adc1.attr,
- &dev_attr_lux.attr,
+ &dev_attr_illuminance0_input.attr,
&dev_attr_calib0.attr,
&dev_attr_calib1.attr,
+ &dev_attr_name.attr,
NULL
};
@@ -634,7 +646,7 @@ static int __devinit tsl2563_probe(struct i2c_client *client,
err = tsl2563_detect(chip);
if (err) {
- dev_err(&client->dev, "device not found, error %d \n", -err);
+ dev_err(&client->dev, "device not found, error %d\n", -err);
goto fail1;
}
diff --git a/drivers/staging/iio/magnetometer/magnet.h b/drivers/staging/iio/magnetometer/magnet.h
new file mode 100644
index 00000000000..64338301f8d
--- /dev/null
+++ b/drivers/staging/iio/magnetometer/magnet.h
@@ -0,0 +1,31 @@
+
+#include "../sysfs.h"
+
+/* Magnetometer types of attribute */
+
+#define IIO_DEV_ATTR_MAGN_X_OFFSET(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(magn_x_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_MAGN_Y_OFFSET(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(magn_y_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_MAGN_Z_OFFSET(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(magn_z_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_MAGN_X_GAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(magn_x_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_MAGN_Y_GAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(magn_y_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_MAGN_Z_GAIN(_mode, _show, _store, _addr) \
+ IIO_DEVICE_ATTR(magn_z_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_MAGN_X(_show, _addr) \
+ IIO_DEVICE_ATTR(magn_x_raw, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_MAGN_Y(_show, _addr) \
+ IIO_DEVICE_ATTR(magn_y_raw, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_MAGN_Z(_show, _addr) \
+ IIO_DEVICE_ATTR(magn_z_raw, S_IRUGO, _show, NULL, _addr)
diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
index 09044adf732..0e443757b02 100644
--- a/drivers/staging/iio/ring_generic.h
+++ b/drivers/staging/iio/ring_generic.h
@@ -134,19 +134,17 @@ void iio_ring_buffer_init(struct iio_ring_buffer *ring,
struct iio_dev *dev_info);
/**
- * __iio_init_ring_buffer() - initialize common elements of ring buffers
+ * __iio_update_ring_buffer() - update common elements of ring buffers
* @ring: ring buffer that is the event source
* @bytes_per_datum: size of individual datum including timestamp
* @length: number of datums in ring
**/
-static inline void __iio_init_ring_buffer(struct iio_ring_buffer *ring,
- int bytes_per_datum, int length)
+static inline void __iio_update_ring_buffer(struct iio_ring_buffer *ring,
+ int bytes_per_datum, int length)
{
ring->bpd = bytes_per_datum;
ring->length = length;
ring->loopcount = 0;
- ring->shared_ev_pointer.ev_p = 0;
- spin_lock_init(&ring->shared_ev_pointer.lock);
}
/**
@@ -198,25 +196,6 @@ ssize_t iio_scan_el_store(struct device *dev, struct device_attribute *attr,
**/
ssize_t iio_scan_el_show(struct device *dev, struct device_attribute *attr,
char *buf);
-/**
- * IIO_SCAN_EL - declare and initialize a scan element without control func
- * @_name: identifying name. Resulting struct is iio_scan_el_##_name,
- * sysfs element, scan_en_##_name.
- * @_number: unique id number for the scan element.
- * @_bits: number of bits in the scan element result (used in mixed bit
- * length devices).
- * @_label: indentification variable used by drivers. Often a reg address.
- **/
-#define IIO_SCAN_EL(_name, _number, _bits, _label) \
- struct iio_scan_el iio_scan_el_##_name = { \
- .dev_attr = __ATTR(scan_en_##_name, \
- S_IRUGO | S_IWUSR, \
- iio_scan_el_show, \
- iio_scan_el_store), \
- .mask = (1 << _number), \
- .bit_count = _bits, \
- .label = _label, \
- }
ssize_t iio_scan_el_ts_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len);
@@ -227,7 +206,7 @@ ssize_t iio_scan_el_ts_show(struct device *dev, struct device_attribute *attr,
* IIO_SCAN_EL_C - declare and initialize a scan element with a control func
*
* @_name: identifying name. Resulting struct is iio_scan_el_##_name,
- * sysfs element, scan_en_##_name.
+ * sysfs element, _name##_en.
* @_number: unique id number for the scan element.
* @_bits: number of bits in the scan element result (used in mixed bit
* length devices).
@@ -236,7 +215,7 @@ ssize_t iio_scan_el_ts_show(struct device *dev, struct device_attribute *attr,
**/
#define IIO_SCAN_EL_C(_name, _number, _bits, _label, _controlfunc) \
struct iio_scan_el iio_scan_el_##_name = { \
- .dev_attr = __ATTR(scan_en_##_name, \
+ .dev_attr = __ATTR(_number##_##_name##_en, \
S_IRUGO | S_IWUSR, \
iio_scan_el_show, \
iio_scan_el_store), \
@@ -245,14 +224,27 @@ ssize_t iio_scan_el_ts_show(struct device *dev, struct device_attribute *attr,
.label = _label, \
.set_state = _controlfunc, \
}
+
+#define IIO_SCAN_NAMED_EL_C(_name, _string, _number, _bits, _label, _cf) \
+ struct iio_scan_el iio_scan_el_##_name = { \
+ .dev_attr = __ATTR(_number##_##_string##_en, \
+ S_IRUGO | S_IWUSR, \
+ iio_scan_el_show, \
+ iio_scan_el_store), \
+ .number = _number, \
+ .bit_count = _bits, \
+ .label = _label, \
+ .set_state = _cf, \
+ }
+
/**
* IIO_SCAN_EL_TIMESTAMP - declare a special scan element for timestamps
*
* Odd one out. Handled slightly differently from other scan elements.
**/
-#define IIO_SCAN_EL_TIMESTAMP \
+#define IIO_SCAN_EL_TIMESTAMP(number) \
struct iio_scan_el iio_scan_el_timestamp = { \
- .dev_attr = __ATTR(scan_en_timestamp, \
+ .dev_attr = __ATTR(number##_timestamp_en, \
S_IRUGO | S_IWUSR, \
iio_scan_el_ts_show, \
iio_scan_el_ts_store), \
@@ -267,7 +259,7 @@ static inline void iio_put_ring_buffer(struct iio_ring_buffer *ring)
container_of(d, struct iio_ring_buffer, dev)
#define access_dev_to_iio_ring_buffer(d) \
container_of(d, struct iio_ring_buffer, access_dev)
-int iio_ring_buffer_register(struct iio_ring_buffer *ring);
+int iio_ring_buffer_register(struct iio_ring_buffer *ring, int id);
void iio_ring_buffer_unregister(struct iio_ring_buffer *ring);
ssize_t iio_read_ring_length(struct device *dev,
diff --git a/drivers/staging/iio/ring_sw.c b/drivers/staging/iio/ring_sw.c
index cf22c091668..1f14cd4770e 100644
--- a/drivers/staging/iio/ring_sw.c
+++ b/drivers/staging/iio/ring_sw.c
@@ -14,22 +14,25 @@
#include <linux/workqueue.h>
#include "ring_sw.h"
-static inline int __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer *ring,
- int bytes_per_datum, int length)
+static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer *ring,
+ int bytes_per_datum, int length)
{
if ((length == 0) || (bytes_per_datum == 0))
return -EINVAL;
-
- __iio_init_ring_buffer(&ring->buf, bytes_per_datum, length);
- spin_lock_init(&ring->use_lock);
+ __iio_update_ring_buffer(&ring->buf, bytes_per_datum, length);
ring->data = kmalloc(length*ring->buf.bpd, GFP_KERNEL);
- ring->read_p = 0;
- ring->write_p = 0;
- ring->last_written_p = 0;
- ring->half_p = 0;
+ ring->read_p = NULL;
+ ring->write_p = NULL;
+ ring->last_written_p = NULL;
+ ring->half_p = NULL;
return ring->data ? 0 : -ENOMEM;
}
+static inline void __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
+{
+ spin_lock_init(&ring->use_lock);
+}
+
static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
{
kfree(ring->data);
@@ -59,16 +62,15 @@ EXPORT_SYMBOL(iio_unmark_sw_rb_in_use);
* in the device driver */
/* Lock always held if their is a chance this may be called */
/* Only one of these per ring may run concurrently - enforced by drivers */
-int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
- unsigned char *data,
- s64 timestamp)
+static int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
+ unsigned char *data, s64 timestamp)
{
int ret = 0;
int code;
unsigned char *temp_ptr, *change_test_ptr;
/* initial store */
- if (unlikely(ring->write_p == 0)) {
+ if (unlikely(ring->write_p == NULL)) {
ring->write_p = ring->data;
/* Doesn't actually matter if this is out of the set
* as long as the read pointer is valid before this
@@ -99,7 +101,7 @@ int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
*/
ring->write_p = temp_ptr;
- if (ring->read_p == 0)
+ if (ring->read_p == NULL)
ring->read_p = ring->data;
/* Buffer full - move the read pointer and create / escalate
* ring event */
@@ -123,8 +125,7 @@ int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
spin_lock(&ring->buf.shared_ev_pointer.lock);
ret = iio_push_or_escallate_ring_event(&ring->buf,
- IIO_EVENT_CODE_RING_100_FULL,
- timestamp);
+ IIO_EVENT_CODE_RING_100_FULL, timestamp);
spin_unlock(&ring->buf.shared_ev_pointer.lock);
if (ret)
goto error_ret;
@@ -180,7 +181,7 @@ int iio_rip_sw_rb(struct iio_ring_buffer *r,
/* build local copy */
initial_read_p = ring->read_p;
- if (unlikely(initial_read_p == 0)) { /* No data here as yet */
+ if (unlikely(initial_read_p == NULL)) { /* No data here as yet */
ret = 0;
goto error_free_data_cpy;
}
@@ -278,8 +279,8 @@ int iio_store_to_sw_rb(struct iio_ring_buffer *r, u8 *data, s64 timestamp)
}
EXPORT_SYMBOL(iio_store_to_sw_rb);
-int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer *ring,
- unsigned char *data)
+static int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer *ring,
+ unsigned char *data)
{
unsigned char *last_written_p_copy;
@@ -289,7 +290,7 @@ again:
last_written_p_copy = ring->last_written_p;
barrier(); /*unnessecary? */
/* Check there is anything here */
- if (last_written_p_copy == 0)
+ if (last_written_p_copy == NULL)
return -EAGAIN;
memcpy(data, last_written_p_copy, ring->buf.bpd);
@@ -320,7 +321,8 @@ int iio_request_update_sw_rb(struct iio_ring_buffer *r)
goto error_ret;
}
__iio_free_sw_ring_buffer(ring);
- ret = __iio_init_sw_ring_buffer(ring, ring->buf.bpd, ring->buf.length);
+ ret = __iio_allocate_sw_ring_buffer(ring, ring->buf.bpd,
+ ring->buf.length);
error_ret:
spin_unlock(&ring->use_lock);
return ret;
@@ -409,14 +411,14 @@ struct iio_ring_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
ring = kzalloc(sizeof *ring, GFP_KERNEL);
if (!ring)
- return 0;
+ return NULL;
buf = &ring->buf;
-
iio_ring_buffer_init(buf, indio_dev);
+ __iio_init_sw_ring_buffer(ring);
buf->dev.type = &iio_sw_ring_type;
device_initialize(&buf->dev);
buf->dev.parent = &indio_dev->dev;
- buf->dev.class = &iio_class;
+ buf->dev.bus = &iio_bus_type;
dev_set_drvdata(&buf->dev, (void *)buf);
return buf;
diff --git a/drivers/staging/iio/sysfs.h b/drivers/staging/iio/sysfs.h
index e501e1338e1..afcf5ab85f4 100644
--- a/drivers/staging/iio/sysfs.h
+++ b/drivers/staging/iio/sysfs.h
@@ -98,6 +98,9 @@ struct iio_const_attr {
struct iio_dev_attr iio_dev_attr_##_name \
= IIO_ATTR(_name, _mode, _show, _store, _addr)
+#define IIO_DEVICE_ATTR_NAMED(_vname, _name, _mode, _show, _store, _addr) \
+ struct iio_dev_attr iio_dev_attr_##_vname \
+ = IIO_ATTR(_name, _mode, _show, _store, _addr)
#define IIO_DEVICE_ATTR_2(_name, _mode, _show, _store, _addr, _val2) \
struct iio_dev_attr iio_dev_attr_##_name \
@@ -141,18 +144,25 @@ struct iio_const_attr {
*
* May be mode dependent on some devices
**/
+/* Deprecated */
#define IIO_DEV_ATTR_AVAIL_SAMP_FREQ(_show) \
IIO_DEVICE_ATTR(available_sampling_frequency, S_IRUGO, _show, NULL, 0)
+#define IIO_DEV_ATTR_SAMP_FREQ_AVAIL(_show) \
+ IIO_DEVICE_ATTR(sampling_frequency_available, S_IRUGO, _show, NULL, 0)
/**
* IIO_CONST_ATTR_AVAIL_SAMP_FREQ - list available sampling frequencies
* @_string: frequency string for the attribute
*
* Constant version
**/
-#define IIO_CONST_ATTR_AVAIL_SAMP_FREQ(_string) \
+/* Deprecated */
+#define IIO_CONST_ATTR_AVAIL_SAMP_FREQ(_string) \
IIO_CONST_ATTR(available_sampling_frequency, _string)
+#define IIO_CONST_ATTR_SAMP_FREQ_AVAIL(_string) \
+ IIO_CONST_ATTR(sampling_frequency_available, _string)
+
/**
* IIO_DEV_ATTR_SCAN_MODE - select a scan mode
* @_mode: sysfs file mode/permissions
@@ -231,6 +241,9 @@ struct iio_const_attr {
#define IIO_DEV_ATTR_TEMP(_show) \
IIO_DEVICE_ATTR(temp, S_IRUGO, _show, NULL, 0)
+#define IIO_DEV_ATTR_TEMP_RAW(_show) \
+ IIO_DEVICE_ATTR(temp_raw, S_IRUGO, _show, NULL, 0)
+
/**
* IIO_EVENT_SH - generic shared event handler
* @_name: event name
diff --git a/drivers/staging/iio/trigger/iio-trig-gpio.c b/drivers/staging/iio/trigger/iio-trig-gpio.c
index 0c3bad3187f..1da285d2863 100644
--- a/drivers/staging/iio/trigger/iio-trig-gpio.c
+++ b/drivers/staging/iio/trigger/iio-trig-gpio.c
@@ -13,7 +13,6 @@
* TODO:
*
* Add board config elements to allow specification of startup settings.
- * Add configuration settings (irq type etc)
*/
#include <linux/kernel.h>
@@ -26,12 +25,12 @@
#include "../iio.h"
#include "../trigger.h"
-LIST_HEAD(iio_gpio_trigger_list);
-DEFINE_MUTEX(iio_gpio_trigger_list_lock);
+static LIST_HEAD(iio_gpio_trigger_list);
+static DEFINE_MUTEX(iio_gpio_trigger_list_lock);
struct iio_gpio_trigger_info {
struct mutex in_use;
- int gpio;
+ unsigned int irq;
};
/*
* Need to reference count these triggers and only enable gpio interrupts
@@ -58,78 +57,77 @@ static const struct attribute_group iio_gpio_trigger_attr_group = {
.attrs = iio_gpio_trigger_attrs,
};
-static int iio_gpio_trigger_probe(struct platform_device *dev)
+static int iio_gpio_trigger_probe(struct platform_device *pdev)
{
- int *pdata = dev->dev.platform_data;
struct iio_gpio_trigger_info *trig_info;
struct iio_trigger *trig, *trig2;
- int i, irq, ret = 0;
- if (!pdata) {
- printk(KERN_ERR "No IIO gpio trigger platform data found\n");
- goto error_ret;
- }
- for (i = 0;; i++) {
- if (!gpio_is_valid(pdata[i]))
- break;
- trig = iio_allocate_trigger();
- if (!trig) {
- ret = -ENOMEM;
- goto error_free_completed_registrations;
- }
+ unsigned long irqflags;
+ struct resource *irq_res;
+ int irq, ret = 0, irq_res_cnt = 0;
- trig_info = kzalloc(sizeof(*trig_info), GFP_KERNEL);
- if (!trig_info) {
- ret = -ENOMEM;
- goto error_put_trigger;
- }
- trig->control_attrs = &iio_gpio_trigger_attr_group;
- trig->private_data = trig_info;
- trig_info->gpio = pdata[i];
- trig->owner = THIS_MODULE;
- trig->name = kmalloc(IIO_TRIGGER_NAME_LENGTH, GFP_KERNEL);
- if (!trig->name) {
- ret = -ENOMEM;
- goto error_free_trig_info;
+ do {
+ irq_res = platform_get_resource(pdev,
+ IORESOURCE_IRQ, irq_res_cnt);
+
+ if (irq_res == NULL) {
+ if (irq_res_cnt == 0)
+ dev_err(&pdev->dev, "No GPIO IRQs specified");
+ break;
}
- snprintf((char *)trig->name,
- IIO_TRIGGER_NAME_LENGTH,
- "gpiotrig%d",
- pdata[i]);
- ret = gpio_request(trig_info->gpio, trig->name);
- if (ret)
- goto error_free_name;
-
- ret = gpio_direction_input(trig_info->gpio);
- if (ret)
- goto error_release_gpio;
-
- irq = gpio_to_irq(trig_info->gpio);
- if (irq < 0) {
- ret = irq;
- goto error_release_gpio;
+ irqflags = (irq_res->flags & IRQF_TRIGGER_MASK) | IRQF_SHARED;
+
+ for (irq = irq_res->start; irq <= irq_res->end; irq++) {
+
+ trig = iio_allocate_trigger();
+ if (!trig) {
+ ret = -ENOMEM;
+ goto error_free_completed_registrations;
+ }
+
+ trig_info = kzalloc(sizeof(*trig_info), GFP_KERNEL);
+ if (!trig_info) {
+ ret = -ENOMEM;
+ goto error_put_trigger;
+ }
+ trig->control_attrs = &iio_gpio_trigger_attr_group;
+ trig->private_data = trig_info;
+ trig_info->irq = irq;
+ trig->owner = THIS_MODULE;
+ trig->name = kmalloc(IIO_TRIGGER_NAME_LENGTH,
+ GFP_KERNEL);
+ if (!trig->name) {
+ ret = -ENOMEM;
+ goto error_free_trig_info;
+ }
+ snprintf((char *)trig->name,
+ IIO_TRIGGER_NAME_LENGTH,
+ "irqtrig%d", irq);
+
+ ret = request_irq(irq, iio_gpio_trigger_poll,
+ irqflags, trig->name, trig);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "request IRQ-%d failed", irq);
+ goto error_free_name;
+ }
+
+ ret = iio_trigger_register(trig);
+ if (ret)
+ goto error_release_irq;
+
+ list_add_tail(&trig->alloc_list,
+ &iio_gpio_trigger_list);
}
- ret = request_irq(irq, iio_gpio_trigger_poll,
- IRQF_TRIGGER_RISING,
- trig->name,
- trig);
- if (ret)
- goto error_release_gpio;
+ irq_res_cnt++;
+ } while (irq_res != NULL);
- ret = iio_trigger_register(trig);
- if (ret)
- goto error_release_irq;
- list_add_tail(&trig->alloc_list, &iio_gpio_trigger_list);
-
- }
return 0;
/* First clean up the partly allocated trigger */
error_release_irq:
free_irq(irq, trig);
-error_release_gpio:
- gpio_free(trig_info->gpio);
error_free_name:
kfree(trig->name);
error_free_trig_info:
@@ -143,18 +141,16 @@ error_free_completed_registrations:
&iio_gpio_trigger_list,
alloc_list) {
trig_info = trig->private_data;
- free_irq(gpio_to_irq(trig_info->gpio), trig);
- gpio_free(trig_info->gpio);
+ free_irq(gpio_to_irq(trig_info->irq), trig);
kfree(trig->name);
kfree(trig_info);
iio_trigger_unregister(trig);
}
-error_ret:
return ret;
}
-static int iio_gpio_trigger_remove(struct platform_device *dev)
+static int iio_gpio_trigger_remove(struct platform_device *pdev)
{
struct iio_trigger *trig, *trig2;
struct iio_gpio_trigger_info *trig_info;
@@ -166,8 +162,7 @@ static int iio_gpio_trigger_remove(struct platform_device *dev)
alloc_list) {
trig_info = trig->private_data;
iio_trigger_unregister(trig);
- free_irq(gpio_to_irq(trig_info->gpio), trig);
- gpio_free(trig_info->gpio);
+ free_irq(trig_info->irq, trig);
kfree(trig->name);
kfree(trig_info);
iio_put_trigger(trig);
diff --git a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
index 4295bbc7b50..4ee3ae1ef89 100644
--- a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
+++ b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
@@ -19,8 +19,8 @@
#include "../iio.h"
#include "../trigger.h"
-LIST_HEAD(iio_prtc_trigger_list);
-DEFINE_MUTEX(iio_prtc_trigger_list_lock);
+static LIST_HEAD(iio_prtc_trigger_list);
+static DEFINE_MUTEX(iio_prtc_trigger_list_lock);
struct iio_prtc_trigger_info {
struct rtc_device *rtc;
diff --git a/drivers/staging/line6/control.h b/drivers/staging/line6/control.h
index 2f19665d95a..47e18ab6d5b 100644
--- a/drivers/staging/line6/control.h
+++ b/drivers/staging/line6/control.h
@@ -22,24 +22,44 @@
enum {
POD_tweak = 1,
POD_wah_position = 4,
- POD_compression_gain = 5, /* device: LINE6_BITS_PODXTALL */
+
+ /* device: LINE6_BITS_PODXTALL */
+ POD_compression_gain = 5,
+
POD_vol_pedal_position = 7,
POD_compression_threshold = 9,
POD_pan = 10,
POD_amp_model_setup = 11,
- POD_amp_model = 12, /* firmware: 2.0 */
+ POD_amp_model = 12, /* firmware: 2.0 */
POD_drive = 13,
POD_bass = 14,
- POD_mid = 15, /* device: LINE6_BITS_PODXTALL */
- POD_lowmid = 15, /* device: LINE6_BITS_BASSPODXTALL */
- POD_treble = 16, /* device: LINE6_BITS_PODXTALL */
- POD_highmid = 16, /* device: LINE6_BITS_BASSPODXTALL */
+
+ /* device: LINE6_BITS_PODXTALL */
+ POD_mid = 15,
+
+ /* device: LINE6_BITS_BASSPODXTALL */
+ POD_lowmid = 15,
+
+ /* device: LINE6_BITS_PODXTALL */
+ POD_treble = 16,
+
+ /* device: LINE6_BITS_BASSPODXTALL */
+ POD_highmid = 16,
+
POD_chan_vol = 17,
- POD_reverb_mix = 18, /* device: LINE6_BITS_PODXTALL */
+
+ /* device: LINE6_BITS_PODXTALL */
+ POD_reverb_mix = 18,
+
POD_effect_setup = 19,
POD_band_1_frequency = 20, /* firmware: 2.0 */
- POD_presence = 21, /* device: LINE6_BITS_PODXTALL */
- POD_treble__bass = 21, /* device: LINE6_BITS_BASSPODXTALL */
+
+ /* device: LINE6_BITS_PODXTALL */
+ POD_presence = 21,
+
+ /* device: LINE6_BITS_BASSPODXTALL */
+ POD_treble__bass = 21,
+
POD_noise_gate_enable = 22,
POD_gate_threshold = 23,
POD_gate_decay_time = 24,
@@ -50,78 +70,137 @@ enum {
POD_mod_param_1 = 29,
POD_delay_param_1 = 30,
POD_delay_param_1_note_value = 31,
- POD_band_2_frequency__bass = 32, /* device: LINE6_BITS_BASSPODXTALL */ /* firmware: 2.0 */
+
+ /* device: LINE6_BITS_BASSPODXTALL */
+ POD_band_2_frequency__bass = 32, /* firmware: 2.0 */
+
POD_delay_param_2 = 33,
POD_delay_volume_mix = 34,
POD_delay_param_3 = 35,
- POD_reverb_enable = 36, /* device: LINE6_BITS_PODXTALL */
- POD_reverb_type = 37, /* device: LINE6_BITS_PODXTALL */
- POD_reverb_decay = 38, /* device: LINE6_BITS_PODXTALL */
- POD_reverb_tone = 39, /* device: LINE6_BITS_PODXTALL */
- POD_reverb_pre_delay = 40, /* device: LINE6_BITS_PODXTALL */
- POD_reverb_pre_post = 41, /* device: LINE6_BITS_PODXTALL */
- POD_band_2_frequency = 42, /* device: LINE6_BITS_PODXTALL */ /* firmware: 2.0 */
- POD_band_3_frequency__bass = 42, /* device: LINE6_BITS_BASSPODXTALL */ /* firmware: 2.0 */
+
+ /* device: LINE6_BITS_PODXTALL */
+ POD_reverb_enable = 36,
+ POD_reverb_type = 37,
+ POD_reverb_decay = 38,
+ POD_reverb_tone = 39,
+ POD_reverb_pre_delay = 40,
+ POD_reverb_pre_post = 41,
+ POD_band_2_frequency = 42,
+
+ /* device: LINE6_BITS_BASSPODXTALL */
+ POD_band_3_frequency__bass = 42, /* firmware: 2.0 */
+
POD_wah_enable = 43,
- POD_modulation_lo_cut = 44, /* device: LINE6_BITS_BASSPODXTALL */
- POD_delay_reverb_lo_cut = 45, /* device: LINE6_BITS_BASSPODXTALL */
- POD_volume_pedal_minimum = 46, /* device: LINE6_BITS_PODXTALL */ /* firmware: 2.0 */
- POD_eq_pre_post = 46, /* device: LINE6_BITS_BASSPODXTALL */ /* firmware: 2.0 */
+
+ /* device: LINE6_BITS_BASSPODXTALL */
+ POD_modulation_lo_cut = 44,
+ POD_delay_reverb_lo_cut = 45,
+
+ /* device: LINE6_BITS_PODXTALL */
+ POD_volume_pedal_minimum = 46, /* firmware: 2.0 */
+
+ /* device: LINE6_BITS_BASSPODXTALL */
+ POD_eq_pre_post = 46, /* firmware: 2.0 */
+
POD_volume_pre_post = 47,
- POD_di_model = 48, /* device: LINE6_BITS_BASSPODXTALL */
- POD_di_delay = 49, /* device: LINE6_BITS_BASSPODXTALL */
+
+ /* device: LINE6_BITS_BASSPODXTALL */
+ POD_di_model = 48,
+ POD_di_delay = 49,
+
POD_mod_enable = 50,
POD_mod_param_1_note_value = 51,
POD_mod_param_2 = 52,
POD_mod_param_3 = 53,
POD_mod_param_4 = 54,
- POD_mod_param_5 = 55, /* device: LINE6_BITS_BASSPODXTALL */
+
+ /* device: LINE6_BITS_BASSPODXTALL */
+ POD_mod_param_5 = 55,
+
POD_mod_volume_mix = 56,
POD_mod_pre_post = 57,
POD_modulation_model = 58,
- POD_band_3_frequency = 60, /* device: LINE6_BITS_PODXTALL */ /* firmware: 2.0 */
- POD_band_4_frequency__bass = 60, /* device: LINE6_BITS_BASSPODXTALL */ /* firmware: 2.0 */
+
+ /* device: LINE6_BITS_PODXTALL */
+ POD_band_3_frequency = 60, /* firmware: 2.0 */
+
+ /* device: LINE6_BITS_BASSPODXTALL */
+ POD_band_4_frequency__bass = 60, /* firmware: 2.0 */
+
POD_mod_param_1_double_precision = 61,
POD_delay_param_1_double_precision = 62,
POD_eq_enable = 63, /* firmware: 2.0 */
POD_tap = 64,
POD_volume_tweak_pedal_assign = 65,
- POD_band_5_frequency = 68, /* device: LINE6_BITS_BASSPODXTALL */ /* firmware: 2.0 */
+
+ /* device: LINE6_BITS_BASSPODXTALL */
+ POD_band_5_frequency = 68, /* firmware: 2.0 */
+
POD_tuner = 69,
POD_mic_selection = 70,
POD_cabinet_model = 71,
POD_stomp_model = 75,
POD_roomlevel = 76,
- POD_band_4_frequency = 77, /* device: LINE6_BITS_PODXTALL */ /* firmware: 2.0 */
- POD_band_6_frequency = 77, /* device: LINE6_BITS_BASSPODXTALL */ /* firmware: 2.0 */
+
+ /* device: LINE6_BITS_PODXTALL */
+ POD_band_4_frequency = 77, /* firmware: 2.0 */
+
+ /* device: LINE6_BITS_BASSPODXTALL */
+ POD_band_6_frequency = 77, /* firmware: 2.0 */
+
POD_stomp_param_1_note_value = 78,
POD_stomp_param_2 = 79,
POD_stomp_param_3 = 80,
POD_stomp_param_4 = 81,
POD_stomp_param_5 = 82,
POD_stomp_param_6 = 83,
- POD_amp_switch_select = 84, /* device: LINE6_BITS_LIVE */
+
+ /* device: LINE6_BITS_LIVE */
+ POD_amp_switch_select = 84,
+
POD_delay_param_4 = 85,
POD_delay_param_5 = 86,
POD_delay_pre_post = 87,
- POD_delay_model = 88, /* device: LINE6_BITS_PODXTALL */
- POD_delay_verb_model = 88, /* device: LINE6_BITS_BASSPODXTALL */
+
+ /* device: LINE6_BITS_PODXTALL */
+ POD_delay_model = 88,
+
+ /* device: LINE6_BITS_BASSPODXTALL */
+ POD_delay_verb_model = 88,
+
POD_tempo_msb = 89,
POD_tempo_lsb = 90,
POD_wah_model = 91, /* firmware: 3.0 */
POD_bypass_volume = 105, /* firmware: 2.14 */
- POD_fx_loop_on_off = 107, /* device: LINE6_BITS_PRO */
+
+ /* device: LINE6_BITS_PRO */
+ POD_fx_loop_on_off = 107,
+
POD_tweak_param_select = 108,
POD_amp1_engage = 111,
POD_band_1_gain = 114, /* firmware: 2.0 */
- POD_band_2_gain__bass = 115, /* device: LINE6_BITS_BASSPODXTALL */ /* firmware: 2.0 */
- POD_band_2_gain = 116, /* device: LINE6_BITS_PODXTALL */ /* firmware: 2.0 */
- POD_band_3_gain__bass = 116, /* device: LINE6_BITS_BASSPODXTALL */ /* firmware: 2.0 */
- POD_band_3_gain = 117, /* device: LINE6_BITS_PODXTALL */ /* firmware: 2.0 */
- POD_band_4_gain__bass = 117, /* device: LINE6_BITS_BASSPODXTALL */ /* firmware: 2.0 */
- POD_band_5_gain__bass = 118, /* device: LINE6_BITS_BASSPODXTALL */ /* firmware: 2.0 */
- POD_band_4_gain = 119, /* device: LINE6_BITS_PODXTALL */ /* firmware: 2.0 */
- POD_band_6_gain__bass = 119 /* device: LINE6_BITS_BASSPODXTALL */ /* firmware: 2.0 */
+
+ /* device: LINE6_BITS_BASSPODXTALL */
+ POD_band_2_gain__bass = 115, /* firmware: 2.0 */
+
+ /* device: LINE6_BITS_PODXTALL */
+ POD_band_2_gain = 116, /* firmware: 2.0 */
+
+ /* device: LINE6_BITS_BASSPODXTALL */
+ POD_band_3_gain__bass = 116, /* firmware: 2.0 */
+
+ /* device: LINE6_BITS_PODXTALL */
+ POD_band_3_gain = 117, /* firmware: 2.0 */
+
+ /* device: LINE6_BITS_BASSPODXTALL */
+ POD_band_4_gain__bass = 117, /* firmware: 2.0 */
+ POD_band_5_gain__bass = 118, /* firmware: 2.0 */
+
+ /* device: LINE6_BITS_PODXTALL */
+ POD_band_4_gain = 119, /* firmware: 2.0 */
+
+ /* device: LINE6_BITS_BASSPODXTALL */
+ POD_band_6_gain__bass = 119 /* firmware: 2.0 */
};
/**
@@ -139,7 +218,8 @@ enum {
VARIAX_pickup2_position = 23, /* type: 24 bit float */
VARIAX_pickup2_angle = 26, /* type: 24 bit float */
VARIAX_pickup2_level = 29, /* type: 24 bit float */
- VARIAX_pickup_phase = 32, /* 0: in phase, 1: out of phase */
+ VARIAX_pickup_phase = 32, /* 0: in phase,
+ 1: out of phase */
VARIAX_capacitance = 33, /* type: 24 bit float */
VARIAX_tone_resistance = 36, /* type: 24 bit float */
VARIAX_volume_resistance = 39, /* type: 24 bit float */
diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
index 258555417bc..1d5a4730276 100644
--- a/drivers/staging/line6/driver.c
+++ b/drivers/staging/line6/driver.c
@@ -399,7 +399,7 @@ static void line6_data_received(struct urb *urb)
static int line6_send(struct usb_line6 *line6, unsigned char *buf, size_t len)
{
int retval;
- unsigned int partial;
+ int partial;
#if DO_DUMP_URB_SEND
line6_write_hexdump(line6, 'S', buf, len);
@@ -684,11 +684,11 @@ static int line6_probe(struct usb_interface *interface, const struct usb_device_
/* check vendor and product id */
for (devtype = ARRAY_SIZE(line6_id_table) - 1; devtype--;) {
- u16 vendor = le16_to_cpu(usbdev->descriptor.idVendor);
- u16 product = le16_to_cpu(usbdev->descriptor.idProduct);
+ u16 idVendor = le16_to_cpu(usbdev->descriptor.idVendor);
+ u16 idProduct = le16_to_cpu(usbdev->descriptor.idProduct);
- if (vendor == line6_id_table[devtype].idVendor
- && product == line6_id_table[devtype].idProduct)
+ if (idVendor == line6_id_table[devtype].idVendor
+ && idProduct == line6_id_table[devtype].idProduct)
break;
}
diff --git a/drivers/staging/line6/dumprequest.c b/drivers/staging/line6/dumprequest.c
index bb8c9da5803..cd468c39da5 100644
--- a/drivers/staging/line6/dumprequest.c
+++ b/drivers/staging/line6/dumprequest.c
@@ -105,10 +105,9 @@ int line6_wait_dump(struct line6_dump_request *l6dr, int nonblock)
int line6_dumpreq_initbuf(struct line6_dump_request *l6dr, const void *buf,
size_t len, int num)
{
- l6dr->reqbufs[num].buffer = kmalloc(len, GFP_KERNEL);
+ l6dr->reqbufs[num].buffer = kmemdup(buf, len, GFP_KERNEL);
if (l6dr->reqbufs[num].buffer == NULL)
return -ENOMEM;
- memcpy(l6dr->reqbufs[num].buffer, buf, len);
l6dr->reqbufs[num].length = len;
return 0;
}
diff --git a/drivers/staging/line6/pod.c b/drivers/staging/line6/pod.c
index 4983f2b51cf..28f514611ab 100644
--- a/drivers/staging/line6/pod.c
+++ b/drivers/staging/line6/pod.c
@@ -1074,7 +1074,8 @@ int pod_init(struct usb_interface *interface, struct usb_line6_pod *pod)
return -ENOMEM;
}
- pod->buffer_versionreq = kmalloc(sizeof(pod_request_version),
+ pod->buffer_versionreq = kmemdup(pod_request_version,
+ sizeof(pod_request_version),
GFP_KERNEL);
if (pod->buffer_versionreq == NULL) {
@@ -1083,9 +1084,6 @@ int pod_init(struct usb_interface *interface, struct usb_line6_pod *pod)
return -ENOMEM;
}
- memcpy(pod->buffer_versionreq, pod_request_version,
- sizeof(pod_request_version));
-
/* create sysfs entries: */
err = pod_create_files2(&interface->dev);
if (err < 0) {
diff --git a/drivers/staging/line6/variax.c b/drivers/staging/line6/variax.c
index 28eb89983f3..58ddbe6393f 100644
--- a/drivers/staging/line6/variax.c
+++ b/drivers/staging/line6/variax.c
@@ -486,7 +486,8 @@ int variax_init(struct usb_interface *interface,
return err;
}
- variax->buffer_activate = kmalloc(sizeof(variax_activate), GFP_KERNEL);
+ variax->buffer_activate = kmemdup(variax_activate,
+ sizeof(variax_activate), GFP_KERNEL);
if (variax->buffer_activate == NULL) {
dev_err(&interface->dev, "Out of memory\n");
@@ -494,8 +495,6 @@ int variax_init(struct usb_interface *interface,
return -ENOMEM;
}
- memcpy(variax->buffer_activate, variax_activate,
- sizeof(variax_activate));
init_timer(&variax->activate_timer);
/* create sysfs entries: */
diff --git a/drivers/staging/memrar/Kconfig b/drivers/staging/memrar/Kconfig
new file mode 100644
index 00000000000..cbeebc55090
--- /dev/null
+++ b/drivers/staging/memrar/Kconfig
@@ -0,0 +1,15 @@
+config MRST_RAR_HANDLER
+ tristate "RAR handler driver for Intel Moorestown platform"
+ depends on RAR_REGISTER
+ ---help---
+ This driver provides a memory management interface to
+ restricted access regions (RAR) available on the Intel
+ Moorestown platform.
+
+ Once locked down, restricted access regions are only
+ accessible by specific hardware on the platform. The x86
+ CPU is typically not one of those platforms. As such this
+ driver does not access RAR, and only provides a buffer
+ allocation/bookkeeping mechanism.
+
+ If unsure, say N.
diff --git a/drivers/staging/memrar/Makefile b/drivers/staging/memrar/Makefile
new file mode 100644
index 00000000000..a3336c00cc5
--- /dev/null
+++ b/drivers/staging/memrar/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_MRST_RAR_HANDLER) += memrar.o
+memrar-y := memrar_allocator.o memrar_handler.o
diff --git a/drivers/staging/memrar/TODO b/drivers/staging/memrar/TODO
new file mode 100644
index 00000000000..0087447d503
--- /dev/null
+++ b/drivers/staging/memrar/TODO
@@ -0,0 +1,43 @@
+RAR Handler (memrar) Driver TODO Items
+======================================
+
+Maintainer: Ossama Othman <ossama.othman@intel.com>
+
+memrar.h
+--------
+1. This header exposes the driver's user space and kernel space
+ interfaces. It should be moved to <linux/rar/memrar.h>, or
+ something along those lines, when this memrar driver is moved out
+ of `staging'.
+ a. It would be ideal if staging/rar_register/rar_register.h was
+ moved to the same directory.
+
+memrar_allocator.[ch]
+---------------------
+1. Address potential fragmentation issues with the memrar_allocator.
+
+2. Hide struct memrar_allocator details/fields. They need not be
+ exposed to the user.
+ a. Forward declare struct memrar_allocator.
+ b. Move all three struct definitions to `memrar_allocator.c'
+ source file.
+ c. Add a memrar_allocator_largest_free_area() function, or
+ something like that to get access to the value of the struct
+ memrar_allocator "largest_free_area" field. This allows the
+ struct memrar_allocator fields to be completely hidden from
+ the user. The memrar_handler code really only needs this for
+ statistic gathering on-demand.
+ d. Do the same for the "capacity" field as the
+ "largest_free_area" field.
+
+3. Move memrar_allocator.* to kernel `lib' directory since it is HW
+ neutral.
+ a. Alternatively, use lib/genalloc.c instead.
+ b. A kernel port of Doug Lea's malloc() implementation may also
+ be an option.
+
+memrar_handler.c
+----------------
+1. Split user space interface (ioctl code) from core/kernel code,
+ e.g.:
+ memrar_handler.c -> memrar_core.c, memrar_user.c
diff --git a/drivers/staging/memrar/memrar-abi b/drivers/staging/memrar/memrar-abi
new file mode 100644
index 00000000000..98a6bb158ba
--- /dev/null
+++ b/drivers/staging/memrar/memrar-abi
@@ -0,0 +1,89 @@
+What: /dev/memrar
+Date: March 2010
+KernelVersion: Kernel version this feature first showed up in.
+Contact: Ossama Othman <ossama.othman@intel.com>
+Description: The Intel Moorestown Restricted Access Region (RAR)
+ Handler driver exposes an ioctl() based interface that
+ allows a user to reserve and release blocks of RAR
+ memory.
+
+ Note: A sysfs based one was not appropriate for the
+ RAR handler's usage model.
+
+ =========================================================
+ ioctl() Requests
+ =========================================================
+ RAR_HANDLER_RESERVE
+ -------------------
+ Description: Reserve RAR block.
+ Type: struct RAR_block_info
+ Direction: in/out
+ Errors: EINVAL (invalid RAR type or size)
+ ENOMEM (not enough RAR memory)
+
+ RAR_HANDLER_STAT
+ ----------------
+ Description: Get RAR statistics.
+ Type: struct RAR_stat
+ Direction: in/out
+ Errors: EINVAL (invalid RAR type)
+
+ RAR_HANDLER_RELEASE
+ -------------------
+ Description: Release previously reserved RAR block.
+ Type: 32 bit unsigned integer
+ (e.g. uint32_t), i.e the RAR "handle".
+ Direction: in
+ Errors: EINVAL (invalid RAR handle)
+
+
+ =========================================================
+ ioctl() Request Parameter Types
+ =========================================================
+ The structures referred to above are defined as
+ follows:
+
+ /**
+ * struct RAR_block_info - user space struct that
+ * describes RAR buffer
+ * @type: Type of RAR memory (e.g.,
+ * RAR_TYPE_VIDEO or RAR_TYPE_AUDIO) [in]
+ * @size: Requested size of a block in bytes to
+ * be reserved in RAR. [in]
+ * @handle: Handle that can be used to refer to
+ * reserved block. [out]
+ *
+ * This is the basic structure exposed to the user
+ * space that describes a given RAR buffer. It used
+ * as the parameter for the RAR_HANDLER_RESERVE ioctl.
+ * The buffer's underlying bus address is not exposed
+ * to the user. User space code refers to the buffer
+ * entirely by "handle".
+ */
+ struct RAR_block_info {
+ __u32 type;
+ __u32 size;
+ __u32 handle;
+ };
+
+ /**
+ * struct RAR_stat - RAR statistics structure
+ * @type: Type of RAR memory (e.g.,
+ * RAR_TYPE_VIDEO or
+ * RAR_TYPE_AUDIO) [in]
+ * @capacity: Total size of RAR memory
+ * region. [out]
+ * @largest_block_size: Size of the largest reservable
+ * block. [out]
+ *
+ * This structure is used for RAR_HANDLER_STAT ioctl.
+ */
+ struct RAR_stat {
+ __u32 type;
+ __u32 capacity;
+ __u32 largest_block_size;
+ };
+
+ Lastly, the RAR_HANDLER_RELEASE ioctl expects a
+ "handle" to the RAR block of memory. It is a 32 bit
+ unsigned integer.
diff --git a/drivers/staging/memrar/memrar.h b/drivers/staging/memrar/memrar.h
new file mode 100644
index 00000000000..0b735b827c0
--- /dev/null
+++ b/drivers/staging/memrar/memrar.h
@@ -0,0 +1,155 @@
+/*
+ * RAR Handler (/dev/memrar) internal driver API.
+ * Copyright (C) 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General
+ * Public License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the Free
+ * Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ * The full GNU General Public License is included in this
+ * distribution in the file called COPYING.
+ */
+
+
+#ifndef _MEMRAR_H
+#define _MEMRAR_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+
+/**
+ * struct RAR_stat - RAR statistics structure
+ * @type: Type of RAR memory (e.g., audio vs. video)
+ * @capacity: Total size of RAR memory region.
+ * @largest_block_size: Size of the largest reservable block.
+ *
+ * This structure is used for RAR_HANDLER_STAT ioctl and for the
+ * RAR_get_stat() user space wrapper function.
+ */
+struct RAR_stat {
+ __u32 type;
+ __u32 capacity;
+ __u32 largest_block_size;
+};
+
+
+/**
+ * struct RAR_block_info - user space struct that describes RAR buffer
+ * @type: Type of RAR memory (e.g., audio vs. video)
+ * @size: Requested size of a block to be reserved in RAR.
+ * @handle: Handle that can be used to refer to reserved block.
+ *
+ * This is the basic structure exposed to the user space that
+ * describes a given RAR buffer. The buffer's underlying bus address
+ * is not exposed to the user. User space code refers to the buffer
+ * entirely by "handle".
+ */
+struct RAR_block_info {
+ __u32 type;
+ __u32 size;
+ __u32 handle;
+};
+
+
+#define RAR_IOCTL_BASE 0xE0
+
+/* Reserve RAR block. */
+#define RAR_HANDLER_RESERVE _IOWR(RAR_IOCTL_BASE, 0x00, struct RAR_block_info)
+
+/* Release previously reserved RAR block. */
+#define RAR_HANDLER_RELEASE _IOW(RAR_IOCTL_BASE, 0x01, __u32)
+
+/* Get RAR stats. */
+#define RAR_HANDLER_STAT _IOWR(RAR_IOCTL_BASE, 0x02, struct RAR_stat)
+
+
+#ifdef __KERNEL__
+
+/* -------------------------------------------------------------- */
+/* Kernel Side RAR Handler Interface */
+/* -------------------------------------------------------------- */
+
+/**
+ * struct RAR_buffer - kernel space struct that describes RAR buffer
+ * @info: structure containing base RAR buffer information
+ * @bus_address: buffer bus address
+ *
+ * Structure that contains all information related to a given block of
+ * memory in RAR. It is generally only used when retrieving RAR
+ * related bus addresses.
+ *
+ * Note: This structure is used only by RAR-enabled drivers, and is
+ * not intended to be exposed to the user space.
+ */
+struct RAR_buffer {
+ struct RAR_block_info info;
+ dma_addr_t bus_address;
+};
+
+/**
+ * rar_reserve() - reserve RAR buffers
+ * @buffers: array of RAR_buffers where type and size of buffers to
+ * reserve are passed in, handle and bus address are
+ * passed out
+ * @count: number of RAR_buffers in the "buffers" array
+ *
+ * This function will reserve buffers in the restricted access regions
+ * of given types.
+ *
+ * It returns the number of successfully reserved buffers. Successful
+ * buffer reservations will have the corresponding bus_address field
+ * set to a non-zero value in the given buffers vector.
+ */
+extern size_t rar_reserve(struct RAR_buffer *buffers,
+ size_t count);
+
+/**
+ * rar_release() - release RAR buffers
+ * @buffers: array of RAR_buffers where handles to buffers to be
+ * released are passed in
+ * @count: number of RAR_buffers in the "buffers" array
+ *
+ * This function will release RAR buffers that were retrieved through
+ * a call to rar_reserve() or rar_handle_to_bus() by decrementing the
+ * reference count. The RAR buffer will be reclaimed when the
+ * reference count drops to zero.
+ *
+ * It returns the number of successfully released buffers. Successful
+ * releases will have their handle field set to zero in the given
+ * buffers vector.
+ */
+extern size_t rar_release(struct RAR_buffer *buffers,
+ size_t count);
+
+/**
+ * rar_handle_to_bus() - convert a vector of RAR handles to bus addresses
+ * @buffers: array of RAR_buffers containing handles to be
+ * converted to bus_addresses
+ * @count: number of RAR_buffers in the "buffers" array
+
+ * This function will retrieve the RAR buffer bus addresses, type and
+ * size corresponding to the RAR handles provided in the buffers
+ * vector.
+ *
+ * It returns the number of successfully converted buffers. The bus
+ * address will be set to 0 for unrecognized handles.
+ *
+ * The reference count for each corresponding buffer in RAR will be
+ * incremented. Call rar_release() when done with the buffers.
+ */
+extern size_t rar_handle_to_bus(struct RAR_buffer *buffers,
+ size_t count);
+
+
+#endif /* __KERNEL__ */
+
+#endif /* _MEMRAR_H */
diff --git a/drivers/staging/memrar/memrar_allocator.c b/drivers/staging/memrar/memrar_allocator.c
new file mode 100644
index 00000000000..a4f8c5846a0
--- /dev/null
+++ b/drivers/staging/memrar/memrar_allocator.c
@@ -0,0 +1,432 @@
+/*
+ * memrar_allocator 1.0: An allocator for Intel RAR.
+ *
+ * Copyright (C) 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General
+ * Public License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the Free
+ * Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ * The full GNU General Public License is included in this
+ * distribution in the file called COPYING.
+ *
+ *
+ * ------------------------------------------------------------------
+ *
+ * This simple allocator implementation provides a
+ * malloc()/free()-like interface for reserving space within a
+ * previously reserved block of memory. It is not specific to
+ * any hardware, nor is it coupled with the lower level paging
+ * mechanism.
+ *
+ * The primary goal of this implementation is to provide a means
+ * to partition an arbitrary block of memory without actually
+ * accessing the memory or incurring any hardware side-effects
+ * (e.g. paging). It is, in effect, a bookkeeping mechanism for
+ * buffers.
+ */
+
+
+#include "memrar_allocator.h"
+#include <linux/slab.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+
+struct memrar_allocator *memrar_create_allocator(unsigned long base,
+ size_t capacity,
+ size_t block_size)
+{
+ struct memrar_allocator *allocator = NULL;
+ struct memrar_address_ranges *first_node = NULL;
+
+ /*
+ * Make sure the base address is aligned on a block_size
+ * boundary.
+ *
+ * @todo Is this necessary?
+ */
+ /* base = ALIGN(base, block_size); */
+
+ /* Validate parameters.
+ *
+ * Make sure we can allocate the entire memory space. Zero
+ * capacity or block size are obviously invalid.
+ */
+ if (base == 0
+ || capacity == 0
+ || block_size == 0
+ || ULONG_MAX - capacity < base
+ || capacity < block_size)
+ return allocator;
+
+ /*
+ * There isn't much point in creating a memory allocator that
+ * is only capable of holding one block but we'll allow it,
+ * and issue a diagnostic.
+ */
+ WARN(capacity < block_size * 2,
+ "memrar: Only one block available to allocator.\n");
+
+ allocator = kmalloc(sizeof(*allocator), GFP_KERNEL);
+
+ if (allocator == NULL)
+ return allocator;
+
+ mutex_init(&allocator->lock);
+ allocator->base = base;
+
+ /* Round the capacity down to a multiple of block_size. */
+ allocator->capacity = (capacity / block_size) * block_size;
+
+ allocator->block_size = block_size;
+
+ allocator->largest_free_area = allocator->capacity;
+
+ /* Initialize the handle and free lists. */
+ INIT_LIST_HEAD(&allocator->allocated_list.list);
+ INIT_LIST_HEAD(&allocator->free_list.list);
+
+ first_node = kmalloc(sizeof(*first_node), GFP_KERNEL);
+ if (first_node == NULL) {
+ kfree(allocator);
+ allocator = NULL;
+ } else {
+ /* Full range of blocks is available. */
+ first_node->range.begin = base;
+ first_node->range.end = base + allocator->capacity;
+ list_add(&first_node->list,
+ &allocator->free_list.list);
+ }
+
+ return allocator;
+}
+
+void memrar_destroy_allocator(struct memrar_allocator *allocator)
+{
+ /*
+ * Assume that the memory allocator lock isn't held at this
+ * point in time. Caller must ensure that.
+ */
+
+ struct memrar_address_ranges *pos = NULL;
+ struct memrar_address_ranges *n = NULL;
+
+ if (allocator == NULL)
+ return;
+
+ mutex_lock(&allocator->lock);
+
+ /* Reclaim free list resources. */
+ list_for_each_entry_safe(pos,
+ n,
+ &allocator->free_list.list,
+ list) {
+ list_del(&pos->list);
+ kfree(pos);
+ }
+
+ mutex_unlock(&allocator->lock);
+
+ kfree(allocator);
+}
+
+unsigned long memrar_allocator_alloc(struct memrar_allocator *allocator,
+ size_t size)
+{
+ struct memrar_address_ranges *pos = NULL;
+
+ size_t num_blocks;
+ unsigned long reserved_bytes;
+
+ /*
+ * Address of allocated buffer. We assume that zero is not a
+ * valid address.
+ */
+ unsigned long addr = 0;
+
+ if (allocator == NULL || size == 0)
+ return addr;
+
+ /* Reserve enough blocks to hold the amount of bytes requested. */
+ num_blocks = DIV_ROUND_UP(size, allocator->block_size);
+
+ reserved_bytes = num_blocks * allocator->block_size;
+
+ mutex_lock(&allocator->lock);
+
+ if (reserved_bytes > allocator->largest_free_area) {
+ mutex_unlock(&allocator->lock);
+ return addr;
+ }
+
+ /*
+ * Iterate through the free list to find a suitably sized
+ * range of free contiguous memory blocks.
+ *
+ * We also take the opportunity to reset the size of the
+ * largest free area size statistic.
+ */
+ list_for_each_entry(pos, &allocator->free_list.list, list) {
+ struct memrar_address_range * const fr = &pos->range;
+ size_t const curr_size = fr->end - fr->begin;
+
+ if (curr_size >= reserved_bytes && addr == 0) {
+ struct memrar_address_range *range = NULL;
+ struct memrar_address_ranges * const new_node =
+ kmalloc(sizeof(*new_node), GFP_KERNEL);
+
+ if (new_node == NULL)
+ break;
+
+ list_add(&new_node->list,
+ &allocator->allocated_list.list);
+
+ /*
+ * Carve out area of memory from end of free
+ * range.
+ */
+ range = &new_node->range;
+ range->end = fr->end;
+ fr->end -= reserved_bytes;
+ range->begin = fr->end;
+ addr = range->begin;
+
+ /*
+ * Check if largest area has decreased in
+ * size. We'll need to continue scanning for
+ * the next largest area if it has.
+ */
+ if (curr_size == allocator->largest_free_area)
+ allocator->largest_free_area -=
+ reserved_bytes;
+ else
+ break;
+ }
+
+ /*
+ * Reset largest free area size statistic as needed,
+ * but only if we've actually allocated memory.
+ */
+ if (addr != 0
+ && curr_size > allocator->largest_free_area) {
+ allocator->largest_free_area = curr_size;
+ break;
+ }
+ }
+
+ mutex_unlock(&allocator->lock);
+
+ return addr;
+}
+
+long memrar_allocator_free(struct memrar_allocator *allocator,
+ unsigned long addr)
+{
+ struct list_head *pos = NULL;
+ struct list_head *tmp = NULL;
+ struct list_head *dst = NULL;
+
+ struct memrar_address_ranges *allocated = NULL;
+ struct memrar_address_range const *handle = NULL;
+
+ unsigned long old_end = 0;
+ unsigned long new_chunk_size = 0;
+
+ if (allocator == NULL)
+ return -EINVAL;
+
+ if (addr == 0)
+ return 0; /* Ignore "free(0)". */
+
+ mutex_lock(&allocator->lock);
+
+ /* Find the corresponding handle. */
+ list_for_each_entry(allocated,
+ &allocator->allocated_list.list,
+ list) {
+ if (allocated->range.begin == addr) {
+ handle = &allocated->range;
+ break;
+ }
+ }
+
+ /* No such buffer created by this allocator. */
+ if (handle == NULL) {
+ mutex_unlock(&allocator->lock);
+ return -EFAULT;
+ }
+
+ /*
+ * Coalesce adjacent chunks of memory if possible.
+ *
+ * @note This isn't full blown coalescing since we're only
+ * coalescing at most three chunks of memory.
+ */
+ list_for_each_safe(pos, tmp, &allocator->free_list.list) {
+ /* @todo O(n) performance. Optimize. */
+
+ struct memrar_address_range * const chunk =
+ &list_entry(pos,
+ struct memrar_address_ranges,
+ list)->range;
+
+ /* Extend size of existing free adjacent chunk. */
+ if (chunk->end == handle->begin) {
+ /*
+ * Chunk "less than" than the one we're
+ * freeing is adjacent.
+ *
+ * Before:
+ *
+ * +-----+------+
+ * |chunk|handle|
+ * +-----+------+
+ *
+ * After:
+ *
+ * +------------+
+ * | chunk |
+ * +------------+
+ */
+
+ struct memrar_address_ranges const * const next =
+ list_entry(pos->next,
+ struct memrar_address_ranges,
+ list);
+
+ chunk->end = handle->end;
+
+ /*
+ * Now check if next free chunk is adjacent to
+ * the current extended free chunk.
+ *
+ * Before:
+ *
+ * +------------+----+
+ * | chunk |next|
+ * +------------+----+
+ *
+ * After:
+ *
+ * +-----------------+
+ * | chunk |
+ * +-----------------+
+ */
+ if (!list_is_singular(pos)
+ && chunk->end == next->range.begin) {
+ chunk->end = next->range.end;
+ list_del(pos->next);
+ kfree(next);
+ }
+
+ list_del(&allocated->list);
+
+ new_chunk_size = chunk->end - chunk->begin;
+
+ goto exit_memrar_free;
+
+ } else if (handle->end == chunk->begin) {
+ /*
+ * Chunk "greater than" than the one we're
+ * freeing is adjacent.
+ *
+ * +------+-----+
+ * |handle|chunk|
+ * +------+-----+
+ *
+ * After:
+ *
+ * +------------+
+ * | chunk |
+ * +------------+
+ */
+
+ struct memrar_address_ranges const * const prev =
+ list_entry(pos->prev,
+ struct memrar_address_ranges,
+ list);
+
+ chunk->begin = handle->begin;
+
+ /*
+ * Now check if previous free chunk is
+ * adjacent to the current extended free
+ * chunk.
+ *
+ *
+ * Before:
+ *
+ * +----+------------+
+ * |prev| chunk |
+ * +----+------------+
+ *
+ * After:
+ *
+ * +-----------------+
+ * | chunk |
+ * +-----------------+
+ */
+ if (!list_is_singular(pos)
+ && prev->range.end == chunk->begin) {
+ chunk->begin = prev->range.begin;
+ list_del(pos->prev);
+ kfree(prev);
+ }
+
+ list_del(&allocated->list);
+
+ new_chunk_size = chunk->end - chunk->begin;
+
+ goto exit_memrar_free;
+
+ } else if (chunk->end < handle->begin
+ && chunk->end > old_end) {
+ /* Keep track of where the entry could be
+ * potentially moved from the "allocated" list
+ * to the "free" list if coalescing doesn't
+ * occur, making sure the "free" list remains
+ * sorted.
+ */
+ old_end = chunk->end;
+ dst = pos;
+ }
+ }
+
+ /*
+ * Nothing to coalesce.
+ *
+ * Move the entry from the "allocated" list to the "free"
+ * list.
+ */
+ list_move(&allocated->list, dst);
+ new_chunk_size = handle->end - handle->begin;
+ allocated = NULL;
+
+exit_memrar_free:
+
+ if (new_chunk_size > allocator->largest_free_area)
+ allocator->largest_free_area = new_chunk_size;
+
+ mutex_unlock(&allocator->lock);
+
+ kfree(allocated);
+
+ return 0;
+}
+
+
+
+/*
+ Local Variables:
+ c-file-style: "linux"
+ End:
+*/
diff --git a/drivers/staging/memrar/memrar_allocator.h b/drivers/staging/memrar/memrar_allocator.h
new file mode 100644
index 00000000000..0b80dead710
--- /dev/null
+++ b/drivers/staging/memrar/memrar_allocator.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General
+ * Public License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the Free
+ * Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ * The full GNU General Public License is included in this
+ * distribution in the file called COPYING.
+ */
+
+#ifndef MEMRAR_ALLOCATOR_H
+#define MEMRAR_ALLOCATOR_H
+
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+
+/**
+ * struct memrar_address_range - struct that describes a memory range
+ * @begin: Beginning of available address range.
+ * @end: End of available address range, one past the end,
+ * i.e. [begin, end).
+ */
+struct memrar_address_range {
+/* private: internal use only */
+ unsigned long begin;
+ unsigned long end;
+};
+
+/**
+ * struct memrar_address_ranges - list of areas of memory.
+ * @list: Linked list of address ranges.
+ * @range: Memory address range corresponding to given list node.
+ */
+struct memrar_address_ranges {
+/* private: internal use only */
+ struct list_head list;
+ struct memrar_address_range range;
+};
+
+/**
+ * struct memrar_allocator - encapsulation of the memory allocator state
+ * @lock: Lock used to synchronize access to the memory
+ * allocator state.
+ * @base: Base (start) address of the allocator memory
+ * space.
+ * @capacity: Size of the allocator memory space in bytes.
+ * @block_size: The size in bytes of individual blocks within
+ * the allocator memory space.
+ * @largest_free_area: Largest free area of memory in the allocator
+ * in bytes.
+ * @allocated_list: List of allocated memory block address
+ * ranges.
+ * @free_list: List of free address ranges.
+ *
+ * This structure contains all memory allocator state, including the
+ * base address, capacity, free list, lock, etc.
+ */
+struct memrar_allocator {
+/* private: internal use only */
+ struct mutex lock;
+ unsigned long base;
+ size_t capacity;
+ size_t block_size;
+ size_t largest_free_area;
+ struct memrar_address_ranges allocated_list;
+ struct memrar_address_ranges free_list;
+};
+
+/**
+ * memrar_create_allocator() - create a memory allocator
+ * @base: Address at which the memory allocator begins.
+ * @capacity: Desired size of the memory allocator. This value must
+ * be larger than the block_size, ideally more than twice
+ * as large since there wouldn't be much point in using a
+ * memory allocator otherwise.
+ * @block_size: The size of individual blocks within the memory
+ * allocator. This value must smaller than the
+ * capacity.
+ *
+ * Create a memory allocator with the given capacity and block size.
+ * The capacity will be reduced to be a multiple of the block size, if
+ * necessary.
+ *
+ * Returns an instance of the memory allocator, if creation succeeds,
+ * otherwise zero if creation fails. Failure may occur if not enough
+ * kernel memory exists to create the memrar_allocator instance
+ * itself, or if the capacity and block_size arguments are not
+ * compatible or make sense.
+ */
+struct memrar_allocator *memrar_create_allocator(unsigned long base,
+ size_t capacity,
+ size_t block_size);
+
+/**
+ * memrar_destroy_allocator() - destroy allocator
+ * @allocator: The allocator being destroyed.
+ *
+ * Reclaim resources held by the memory allocator. The caller must
+ * explicitly free all memory reserved by memrar_allocator_alloc()
+ * prior to calling this function. Otherwise leaks will occur.
+ */
+void memrar_destroy_allocator(struct memrar_allocator *allocator);
+
+/**
+ * memrar_allocator_alloc() - reserve an area of memory of given size
+ * @allocator: The allocator instance being used to reserve buffer.
+ * @size: The size in bytes of the buffer to allocate.
+ *
+ * This functions reserves an area of memory managed by the given
+ * allocator. It returns zero if allocation was not possible.
+ * Failure may occur if the allocator no longer has space available.
+ */
+unsigned long memrar_allocator_alloc(struct memrar_allocator *allocator,
+ size_t size);
+
+/**
+ * memrar_allocator_free() - release buffer starting at given address
+ * @allocator: The allocator instance being used to release the buffer.
+ * @address: The address of the buffer being released.
+ *
+ * Release an area of memory starting at the given address. Failure
+ * could occur if the given address is not in the address space
+ * managed by the allocator. Returns zero on success or an errno
+ * (negative value) on failure.
+ */
+long memrar_allocator_free(struct memrar_allocator *allocator,
+ unsigned long address);
+
+#endif /* MEMRAR_ALLOCATOR_H */
+
+
+/*
+ Local Variables:
+ c-file-style: "linux"
+ End:
+*/
diff --git a/drivers/staging/memrar/memrar_handler.c b/drivers/staging/memrar/memrar_handler.c
new file mode 100644
index 00000000000..efa7fd62d39
--- /dev/null
+++ b/drivers/staging/memrar/memrar_handler.c
@@ -0,0 +1,996 @@
+/*
+ * memrar_handler 1.0: An Intel restricted access region handler device
+ *
+ * Copyright (C) 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General
+ * Public License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the Free
+ * Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ * The full GNU General Public License is included in this
+ * distribution in the file called COPYING.
+ *
+ * -------------------------------------------------------------------
+ *
+ * Moorestown restricted access regions (RAR) provide isolated
+ * areas of main memory that are only acceessible by authorized
+ * devices.
+ *
+ * The Intel Moorestown RAR handler module exposes a kernel space
+ * RAR memory management mechanism. It is essentially a
+ * RAR-specific allocator.
+ *
+ * Besides providing RAR buffer management, the RAR handler also
+ * behaves in many ways like an OS virtual memory manager. For
+ * example, the RAR "handles" created by the RAR handler are
+ * analogous to user space virtual addresses.
+ *
+ * RAR memory itself is never accessed directly by the RAR
+ * handler.
+ */
+
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/kernel.h>
+#include <linux/uaccess.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+
+#include "../rar_register/rar_register.h"
+
+#include "memrar.h"
+#include "memrar_allocator.h"
+
+
+#define MEMRAR_VER "1.0"
+
+/*
+ * Moorestown supports three restricted access regions.
+ *
+ * We only care about the first two, video and audio. The third,
+ * reserved for Chaabi and the P-unit, will be handled by their
+ * respective drivers.
+ */
+#define MRST_NUM_RAR 2
+
+/* ---------------- -------------------- ------------------- */
+
+/**
+ * struct memrar_buffer_info - struct that keeps track of all RAR buffers
+ * @list: Linked list of memrar_buffer_info objects.
+ * @buffer: Core RAR buffer information.
+ * @refcount: Reference count.
+ * @owner: File handle corresponding to process that reserved the
+ * block of memory in RAR. This will be zero for buffers
+ * allocated by other drivers instead of by a user space
+ * process.
+ *
+ * This structure encapsulates a link list of RAR buffers, as well as
+ * other characteristics specific to a given list node, such as the
+ * reference count on the corresponding RAR buffer.
+ */
+struct memrar_buffer_info {
+ struct list_head list;
+ struct RAR_buffer buffer;
+ struct kref refcount;
+ struct file *owner;
+};
+
+/**
+ * struct memrar_rar_info - characteristics of a given RAR
+ * @base: Base bus address of the RAR.
+ * @length: Length of the RAR.
+ * @iobase: Virtual address of RAR mapped into kernel.
+ * @allocator: Allocator associated with the RAR. Note the allocator
+ * "capacity" may be smaller than the RAR length if the
+ * length is not a multiple of the configured allocator
+ * block size.
+ * @buffers: Table that keeps track of all reserved RAR buffers.
+ * @lock: Lock used to synchronize access to RAR-specific data
+ * structures.
+ *
+ * Each RAR has an associated memrar_rar_info structure that describes
+ * where in memory the RAR is located, how large it is, and a list of
+ * reserved RAR buffers inside that RAR. Each RAR also has a mutex
+ * associated with it to reduce lock contention when operations on
+ * multiple RARs are performed in parallel.
+ */
+struct memrar_rar_info {
+ dma_addr_t base;
+ unsigned long length;
+ void __iomem *iobase;
+ struct memrar_allocator *allocator;
+ struct memrar_buffer_info buffers;
+ struct mutex lock;
+ int allocated; /* True if we own this RAR */
+};
+
+/*
+ * Array of RAR characteristics.
+ */
+static struct memrar_rar_info memrars[MRST_NUM_RAR];
+
+/* ---------------- -------------------- ------------------- */
+
+/* Validate RAR type. */
+static inline int memrar_is_valid_rar_type(u32 type)
+{
+ return type == RAR_TYPE_VIDEO || type == RAR_TYPE_AUDIO;
+}
+
+/* Check if an address/handle falls with the given RAR memory range. */
+static inline int memrar_handle_in_range(struct memrar_rar_info *rar,
+ u32 vaddr)
+{
+ unsigned long const iobase = (unsigned long) (rar->iobase);
+ return (vaddr >= iobase && vaddr < iobase + rar->length);
+}
+
+/* Retrieve RAR information associated with the given handle. */
+static struct memrar_rar_info *memrar_get_rar_info(u32 vaddr)
+{
+ int i;
+ for (i = 0; i < MRST_NUM_RAR; ++i) {
+ struct memrar_rar_info * const rar = &memrars[i];
+ if (memrar_handle_in_range(rar, vaddr))
+ return rar;
+ }
+
+ return NULL;
+}
+
+/**
+ * memrar_get_bus address - handle to bus address
+ *
+ * Retrieve bus address from given handle.
+ *
+ * Returns address corresponding to given handle. Zero if handle is
+ * invalid.
+ */
+static dma_addr_t memrar_get_bus_address(
+ struct memrar_rar_info *rar,
+ u32 vaddr)
+{
+ unsigned long const iobase = (unsigned long) (rar->iobase);
+
+ if (!memrar_handle_in_range(rar, vaddr))
+ return 0;
+
+ /*
+ * An assumption is made that the virtual address offset is
+ * the same as the bus address offset, at least based on the
+ * way this driver is implemented. For example, vaddr + 2 ==
+ * baddr + 2.
+ *
+ * @todo Is that a valid assumption?
+ */
+ return rar->base + (vaddr - iobase);
+}
+
+/**
+ * memrar_get_physical_address - handle to physical address
+ *
+ * Retrieve physical address from given handle.
+ *
+ * Returns address corresponding to given handle. Zero if handle is
+ * invalid.
+ */
+static dma_addr_t memrar_get_physical_address(
+ struct memrar_rar_info *rar,
+ u32 vaddr)
+{
+ /*
+ * @todo This assumes that the bus address and physical
+ * address are the same. That is true for Moorestown
+ * but not necessarily on other platforms. This
+ * deficiency should be addressed at some point.
+ */
+ return memrar_get_bus_address(rar, vaddr);
+}
+
+/**
+ * memrar_release_block - release a block to the pool
+ * @kref: kref of block
+ *
+ * Core block release code. A node has hit zero references so can
+ * be released and the lists must be updated.
+ *
+ * Note: This code removes the node from a list. Make sure any list
+ * iteration is performed using list_for_each_safe().
+ */
+static void memrar_release_block_i(struct kref *ref)
+{
+ /*
+ * Last reference is being released. Remove from the table,
+ * and reclaim resources.
+ */
+
+ struct memrar_buffer_info * const node =
+ container_of(ref, struct memrar_buffer_info, refcount);
+
+ struct RAR_block_info * const user_info =
+ &node->buffer.info;
+
+ struct memrar_allocator * const allocator =
+ memrars[user_info->type].allocator;
+
+ list_del(&node->list);
+
+ memrar_allocator_free(allocator, user_info->handle);
+
+ kfree(node);
+}
+
+/**
+ * memrar_init_rar_resources - configure a RAR
+ * @rarnum: rar that has been allocated
+ * @devname: name of our device
+ *
+ * Initialize RAR parameters, such as bus addresses, etc and make
+ * the resource accessible.
+ */
+static int memrar_init_rar_resources(int rarnum, char const *devname)
+{
+ /* ---- Sanity Checks ----
+ * 1. RAR bus addresses in both Lincroft and Langwell RAR
+ * registers should be the same.
+ * a. There's no way we can do this through IA.
+ *
+ * 2. Secure device ID in Langwell RAR registers should be set
+ * appropriately, e.g. only LPE DMA for the audio RAR, and
+ * security for the other Langwell based RAR registers.
+ * a. There's no way we can do this through IA.
+ *
+ * 3. Audio and video RAR registers and RAR access should be
+ * locked down. If not, enable RAR access control. Except
+ * for debugging purposes, there is no reason for them to
+ * be unlocked.
+ * a. We can only do this for the Lincroft (IA) side.
+ *
+ * @todo Should the RAR handler driver even be aware of audio
+ * and video RAR settings?
+ */
+
+ /*
+ * RAR buffer block size.
+ *
+ * We choose it to be the size of a page to simplify the
+ * /dev/memrar mmap() implementation and usage. Otherwise
+ * paging is not involved once an RAR is locked down.
+ */
+ static size_t const RAR_BLOCK_SIZE = PAGE_SIZE;
+
+ dma_addr_t low, high;
+ struct memrar_rar_info * const rar = &memrars[rarnum];
+
+ BUG_ON(MRST_NUM_RAR != ARRAY_SIZE(memrars));
+ BUG_ON(!memrar_is_valid_rar_type(rarnum));
+ BUG_ON(rar->allocated);
+
+ mutex_init(&rar->lock);
+
+ /*
+ * Initialize the process table before we reach any
+ * code that exit on failure since the finalization
+ * code requires an initialized list.
+ */
+ INIT_LIST_HEAD(&rar->buffers.list);
+
+ if (rar_get_address(rarnum, &low, &high) != 0)
+ /* No RAR is available. */
+ return -ENODEV;
+
+ if (low == 0 || high == 0) {
+ rar->base = 0;
+ rar->length = 0;
+ rar->iobase = NULL;
+ rar->allocator = NULL;
+ return -ENOSPC;
+ }
+
+ /*
+ * @todo Verify that LNC and LNW RAR register contents
+ * addresses, security, etc are compatible and
+ * consistent).
+ */
+
+ rar->length = high - low + 1;
+
+ /* Claim RAR memory as our own. */
+ if (request_mem_region(low, rar->length, devname) == NULL) {
+ rar->length = 0;
+ pr_err("%s: Unable to claim RAR[%d] memory.\n", devname, rarnum);
+ pr_err("%s: RAR[%d] disabled.\n", devname, rarnum);
+ return -EBUSY;
+ }
+
+ rar->base = low;
+
+ /*
+ * Now map it into the kernel address space.
+ *
+ * Note that the RAR memory may only be accessed by IA
+ * when debugging. Otherwise attempts to access the
+ * RAR memory when it is locked down will result in
+ * behavior similar to writing to /dev/null and
+ * reading from /dev/zero. This behavior is enforced
+ * by the hardware. Even if we don't access the
+ * memory, mapping it into the kernel provides us with
+ * a convenient RAR handle to bus address mapping.
+ */
+ rar->iobase = ioremap_nocache(rar->base, rar->length);
+ if (rar->iobase == NULL) {
+ pr_err("%s: Unable to map RAR memory.\n", devname);
+ release_mem_region(low, rar->length);
+ return -ENOMEM;
+ }
+
+ /* Initialize corresponding memory allocator. */
+ rar->allocator = memrar_create_allocator((unsigned long) rar->iobase,
+ rar->length, RAR_BLOCK_SIZE);
+ if (rar->allocator == NULL) {
+ iounmap(rar->iobase);
+ release_mem_region(low, rar->length);
+ return -ENOMEM;
+ }
+
+ pr_info("%s: BRAR[%d] bus address range = [0x%lx, 0x%lx]\n",
+ devname, rarnum, (unsigned long) low, (unsigned long) high);
+
+ pr_info("%s: BRAR[%d] size = %zu KiB\n",
+ devname, rarnum, rar->allocator->capacity / 1024);
+
+ rar->allocated = 1;
+ return 0;
+}
+
+/**
+ * memrar_fini_rar_resources - free up RAR resources
+ *
+ * Finalize RAR resources. Free up the resource tables, hand the memory
+ * back to the kernel, unmap the device and release the address space.
+ */
+static void memrar_fini_rar_resources(void)
+{
+ int z;
+ struct memrar_buffer_info *pos;
+ struct memrar_buffer_info *tmp;
+
+ /*
+ * @todo Do we need to hold a lock at this point in time?
+ * (module initialization failure or exit?)
+ */
+
+ for (z = MRST_NUM_RAR; z-- != 0; ) {
+ struct memrar_rar_info * const rar = &memrars[z];
+
+ if (!rar->allocated)
+ continue;
+
+ /* Clean up remaining resources. */
+
+ list_for_each_entry_safe(pos,
+ tmp,
+ &rar->buffers.list,
+ list) {
+ kref_put(&pos->refcount, memrar_release_block_i);
+ }
+
+ memrar_destroy_allocator(rar->allocator);
+ rar->allocator = NULL;
+
+ iounmap(rar->iobase);
+ release_mem_region(rar->base, rar->length);
+
+ rar->iobase = NULL;
+ rar->base = 0;
+ rar->length = 0;
+
+ unregister_rar(z);
+ }
+}
+
+/**
+ * memrar_reserve_block - handle an allocation request
+ * @request: block being requested
+ * @filp: owner it is tied to
+ *
+ * Allocate a block of the requested RAR. If successful return the
+ * request object filled in and zero, if not report an error code
+ */
+
+static long memrar_reserve_block(struct RAR_buffer *request,
+ struct file *filp)
+{
+ struct RAR_block_info * const rinfo = &request->info;
+ struct RAR_buffer *buffer;
+ struct memrar_buffer_info *buffer_info;
+ u32 handle;
+ struct memrar_rar_info *rar = NULL;
+
+ /* Prevent array overflow. */
+ if (!memrar_is_valid_rar_type(rinfo->type))
+ return -EINVAL;
+
+ rar = &memrars[rinfo->type];
+ if (!rar->allocated)
+ return -ENODEV;
+
+ /* Reserve memory in RAR. */
+ handle = memrar_allocator_alloc(rar->allocator, rinfo->size);
+ if (handle == 0)
+ return -ENOMEM;
+
+ buffer_info = kmalloc(sizeof(*buffer_info), GFP_KERNEL);
+
+ if (buffer_info == NULL) {
+ memrar_allocator_free(rar->allocator, handle);
+ return -ENOMEM;
+ }
+
+ buffer = &buffer_info->buffer;
+ buffer->info.type = rinfo->type;
+ buffer->info.size = rinfo->size;
+
+ /* Memory handle corresponding to the bus address. */
+ buffer->info.handle = handle;
+ buffer->bus_address = memrar_get_bus_address(rar, handle);
+
+ /*
+ * Keep track of owner so that we can later cleanup if
+ * necessary.
+ */
+ buffer_info->owner = filp;
+
+ kref_init(&buffer_info->refcount);
+
+ mutex_lock(&rar->lock);
+ list_add(&buffer_info->list, &rar->buffers.list);
+ mutex_unlock(&rar->lock);
+
+ rinfo->handle = buffer->info.handle;
+ request->bus_address = buffer->bus_address;
+
+ return 0;
+}
+
+/**
+ * memrar_release_block - release a RAR block
+ * @addr: address in RAR space
+ *
+ * Release a previously allocated block. Releases act on complete
+ * blocks, partially freeing a block is not supported
+ */
+
+static long memrar_release_block(u32 addr)
+{
+ struct memrar_buffer_info *pos;
+ struct memrar_buffer_info *tmp;
+ struct memrar_rar_info * const rar = memrar_get_rar_info(addr);
+ long result = -EINVAL;
+
+ if (rar == NULL)
+ return -ENOENT;
+
+ mutex_lock(&rar->lock);
+
+ /*
+ * Iterate through the buffer list to find the corresponding
+ * buffer to be released.
+ */
+ list_for_each_entry_safe(pos,
+ tmp,
+ &rar->buffers.list,
+ list) {
+ struct RAR_block_info * const info =
+ &pos->buffer.info;
+
+ /*
+ * Take into account handle offsets that may have been
+ * added to the base handle, such as in the following
+ * scenario:
+ *
+ * u32 handle = base + offset;
+ * rar_handle_to_bus(handle);
+ * rar_release(handle);
+ */
+ if (addr >= info->handle
+ && addr < (info->handle + info->size)
+ && memrar_is_valid_rar_type(info->type)) {
+ kref_put(&pos->refcount, memrar_release_block_i);
+ result = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&rar->lock);
+
+ return result;
+}
+
+/**
+ * memrar_get_stats - read statistics for a RAR
+ * @r: statistics to be filled in
+ *
+ * Returns the statistics data for the RAR, or an error code if
+ * the request cannot be completed
+ */
+static long memrar_get_stat(struct RAR_stat *r)
+{
+ struct memrar_allocator *allocator;
+
+ if (!memrar_is_valid_rar_type(r->type))
+ return -EINVAL;
+
+ if (!memrars[r->type].allocated)
+ return -ENODEV;
+
+ allocator = memrars[r->type].allocator;
+
+ BUG_ON(allocator == NULL);
+
+ /*
+ * Allocator capacity doesn't change over time. No
+ * need to synchronize.
+ */
+ r->capacity = allocator->capacity;
+
+ mutex_lock(&allocator->lock);
+ r->largest_block_size = allocator->largest_free_area;
+ mutex_unlock(&allocator->lock);
+ return 0;
+}
+
+/**
+ * memrar_ioctl - ioctl callback
+ * @filp: file issuing the request
+ * @cmd: command
+ * @arg: pointer to control information
+ *
+ * Perform one of the ioctls supported by the memrar device
+ */
+
+static long memrar_ioctl(struct file *filp,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ long result = 0;
+
+ struct RAR_buffer buffer;
+ struct RAR_block_info * const request = &buffer.info;
+ struct RAR_stat rar_info;
+ u32 rar_handle;
+
+ switch (cmd) {
+ case RAR_HANDLER_RESERVE:
+ if (copy_from_user(request,
+ argp,
+ sizeof(*request)))
+ return -EFAULT;
+
+ result = memrar_reserve_block(&buffer, filp);
+ if (result != 0)
+ return result;
+
+ return copy_to_user(argp, request, sizeof(*request));
+
+ case RAR_HANDLER_RELEASE:
+ if (copy_from_user(&rar_handle,
+ argp,
+ sizeof(rar_handle)))
+ return -EFAULT;
+
+ return memrar_release_block(rar_handle);
+
+ case RAR_HANDLER_STAT:
+ if (copy_from_user(&rar_info,
+ argp,
+ sizeof(rar_info)))
+ return -EFAULT;
+
+ /*
+ * Populate the RAR_stat structure based on the RAR
+ * type given by the user
+ */
+ if (memrar_get_stat(&rar_info) != 0)
+ return -EINVAL;
+
+ /*
+ * @todo Do we need to verify destination pointer
+ * "argp" is non-zero? Is that already done by
+ * copy_to_user()?
+ */
+ return copy_to_user(argp,
+ &rar_info,
+ sizeof(rar_info)) ? -EFAULT : 0;
+
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+/**
+ * memrar_mmap - mmap helper for deubgging
+ * @filp: handle doing the mapping
+ * @vma: memory area
+ *
+ * Support the mmap operation on the RAR space for debugging systems
+ * when the memory is not locked down.
+ */
+
+static int memrar_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ /*
+ * This mmap() implementation is predominantly useful for
+ * debugging since the CPU will be prevented from accessing
+ * RAR memory by the hardware when RAR is properly locked
+ * down.
+ *
+ * In order for this implementation to be useful RAR memory
+ * must be not be locked down. However, we only want to do
+ * that when debugging. DO NOT leave RAR memory unlocked in a
+ * deployed device that utilizes RAR.
+ */
+
+ size_t const size = vma->vm_end - vma->vm_start;
+
+ /* Users pass the RAR handle as the mmap() offset parameter. */
+ unsigned long const handle = vma->vm_pgoff << PAGE_SHIFT;
+
+ struct memrar_rar_info * const rar = memrar_get_rar_info(handle);
+ unsigned long pfn;
+
+ /* Only allow priviledged apps to go poking around this way */
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ /* Invalid RAR handle or size passed to mmap(). */
+ if (rar == NULL
+ || handle == 0
+ || size > (handle - (unsigned long) rar->iobase))
+ return -EINVAL;
+
+ /*
+ * Retrieve physical address corresponding to the RAR handle,
+ * and convert it to a page frame.
+ */
+ pfn = memrar_get_physical_address(rar, handle) >> PAGE_SHIFT;
+
+
+ pr_debug("memrar: mapping RAR range [0x%lx, 0x%lx) into user space.\n",
+ handle,
+ handle + size);
+
+ /*
+ * Map RAR memory into user space. This is really only useful
+ * for debugging purposes since the memory won't be
+ * accessible, i.e. reads return zero and writes are ignored,
+ * when RAR access control is enabled.
+ */
+ if (remap_pfn_range(vma,
+ vma->vm_start,
+ pfn,
+ size,
+ vma->vm_page_prot))
+ return -EAGAIN;
+
+ /* vma->vm_ops = &memrar_mem_ops; */
+
+ return 0;
+}
+
+/**
+ * memrar_open - device open method
+ * @inode: inode to open
+ * @filp: file handle
+ *
+ * As we support multiple arbitary opens there is no work to be done
+ * really.
+ */
+
+static int memrar_open(struct inode *inode, struct file *filp)
+{
+ nonseekable_open(inode, filp);
+ return 0;
+}
+
+/**
+ * memrar_release - close method for miscev
+ * @inode: inode of device
+ * @filp: handle that is going away
+ *
+ * Free up all the regions that belong to this file handle. We use
+ * the handle as a natural Linux style 'lifetime' indicator and to
+ * ensure resources are not leaked when their owner explodes in an
+ * unplanned fashion.
+ */
+
+static int memrar_release(struct inode *inode, struct file *filp)
+{
+ /* Free all regions associated with the given file handle. */
+
+ struct memrar_buffer_info *pos;
+ struct memrar_buffer_info *tmp;
+ int z;
+
+ for (z = 0; z != MRST_NUM_RAR; ++z) {
+ struct memrar_rar_info * const rar = &memrars[z];
+
+ mutex_lock(&rar->lock);
+
+ list_for_each_entry_safe(pos,
+ tmp,
+ &rar->buffers.list,
+ list) {
+ if (filp == pos->owner)
+ kref_put(&pos->refcount,
+ memrar_release_block_i);
+ }
+
+ mutex_unlock(&rar->lock);
+ }
+
+ return 0;
+}
+
+/**
+ * rar_reserve - reserve RAR memory
+ * @buffers: buffers to reserve
+ * @count: number wanted
+ *
+ * Reserve a series of buffers in the RAR space. Returns the number of
+ * buffers successfully allocated
+ */
+
+size_t rar_reserve(struct RAR_buffer *buffers, size_t count)
+{
+ struct RAR_buffer * const end =
+ (buffers == NULL ? buffers : buffers + count);
+ struct RAR_buffer *i;
+
+ size_t reserve_count = 0;
+
+ for (i = buffers; i != end; ++i) {
+ if (memrar_reserve_block(i, NULL) == 0)
+ ++reserve_count;
+ else
+ i->bus_address = 0;
+ }
+
+ return reserve_count;
+}
+EXPORT_SYMBOL(rar_reserve);
+
+/**
+ * rar_release - return RAR buffers
+ * @buffers: buffers to release
+ * @size: size of released block
+ *
+ * Return a set of buffers to the RAR pool
+ */
+
+size_t rar_release(struct RAR_buffer *buffers, size_t count)
+{
+ struct RAR_buffer * const end =
+ (buffers == NULL ? buffers : buffers + count);
+ struct RAR_buffer *i;
+
+ size_t release_count = 0;
+
+ for (i = buffers; i != end; ++i) {
+ u32 * const handle = &i->info.handle;
+ if (memrar_release_block(*handle) == 0) {
+ /*
+ * @todo We assume we should do this each time
+ * the ref count is decremented. Should
+ * we instead only do this when the ref
+ * count has dropped to zero, and the
+ * buffer has been completely
+ * released/unmapped?
+ */
+ *handle = 0;
+ ++release_count;
+ }
+ }
+
+ return release_count;
+}
+EXPORT_SYMBOL(rar_release);
+
+/**
+ * rar_handle_to_bus - RAR to bus address
+ * @buffers: RAR buffer structure
+ * @count: number of buffers to convert
+ *
+ * Turn a list of RAR handle mappings into actual bus addresses. Note
+ * that when the device is locked down the bus addresses in question
+ * are not CPU accessible.
+ */
+
+size_t rar_handle_to_bus(struct RAR_buffer *buffers, size_t count)
+{
+ struct RAR_buffer * const end =
+ (buffers == NULL ? buffers : buffers + count);
+ struct RAR_buffer *i;
+ struct memrar_buffer_info *pos;
+
+ size_t conversion_count = 0;
+
+ /*
+ * Find all bus addresses corresponding to the given handles.
+ *
+ * @todo Not liking this nested loop. Optimize.
+ */
+ for (i = buffers; i != end; ++i) {
+ struct memrar_rar_info * const rar =
+ memrar_get_rar_info(i->info.handle);
+
+ /*
+ * Check if we have a bogus handle, and then continue
+ * with remaining buffers.
+ */
+ if (rar == NULL) {
+ i->bus_address = 0;
+ continue;
+ }
+
+ mutex_lock(&rar->lock);
+
+ list_for_each_entry(pos, &rar->buffers.list, list) {
+ struct RAR_block_info * const user_info =
+ &pos->buffer.info;
+
+ /*
+ * Take into account handle offsets that may
+ * have been added to the base handle, such as
+ * in the following scenario:
+ *
+ * u32 handle = base + offset;
+ * rar_handle_to_bus(handle);
+ */
+
+ if (i->info.handle >= user_info->handle
+ && i->info.handle < (user_info->handle
+ + user_info->size)) {
+ u32 const offset =
+ i->info.handle - user_info->handle;
+
+ i->info.type = user_info->type;
+ i->info.size = user_info->size - offset;
+ i->bus_address =
+ pos->buffer.bus_address
+ + offset;
+
+ /* Increment the reference count. */
+ kref_get(&pos->refcount);
+
+ ++conversion_count;
+ break;
+ } else {
+ i->bus_address = 0;
+ }
+ }
+
+ mutex_unlock(&rar->lock);
+ }
+
+ return conversion_count;
+}
+EXPORT_SYMBOL(rar_handle_to_bus);
+
+static const struct file_operations memrar_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = memrar_ioctl,
+ .mmap = memrar_mmap,
+ .open = memrar_open,
+ .release = memrar_release,
+};
+
+static struct miscdevice memrar_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR, /* dynamic allocation */
+ .name = "memrar", /* /dev/memrar */
+ .fops = &memrar_fops
+};
+
+static char const banner[] __initdata =
+ KERN_INFO
+ "Intel RAR Handler: " MEMRAR_VER " initialized.\n";
+
+/**
+ * memrar_registration_callback - RAR obtained
+ * @rar: RAR number
+ *
+ * We have been granted ownership of the RAR. Add it to our memory
+ * management tables
+ */
+
+static int memrar_registration_callback(unsigned long rar)
+{
+ /*
+ * We initialize the RAR parameters early on so that we can
+ * discontinue memrar device initialization and registration
+ * if suitably configured RARs are not available.
+ */
+ return memrar_init_rar_resources(rar, memrar_miscdev.name);
+}
+
+/**
+ * memrar_init - initialise RAR support
+ *
+ * Initialise support for RAR handlers. This may get loaded before
+ * the RAR support is activated, but the callbacks on the registration
+ * will handle that situation for us anyway.
+ */
+
+static int __init memrar_init(void)
+{
+ int err;
+
+ printk(banner);
+
+ err = misc_register(&memrar_miscdev);
+ if (err)
+ return err;
+
+ /* Now claim the two RARs we want */
+ err = register_rar(0, memrar_registration_callback, 0);
+ if (err)
+ goto fail;
+
+ err = register_rar(1, memrar_registration_callback, 1);
+ if (err == 0)
+ return 0;
+
+ /* It is possible rar 0 registered and allocated resources then rar 1
+ failed so do a full resource free */
+ memrar_fini_rar_resources();
+fail:
+ misc_deregister(&memrar_miscdev);
+ return err;
+}
+
+/**
+ * memrar_exit - unregister and unload
+ *
+ * Unregister the device and then unload any mappings and release
+ * the RAR resources
+ */
+
+static void __exit memrar_exit(void)
+{
+ misc_deregister(&memrar_miscdev);
+ memrar_fini_rar_resources();
+}
+
+
+module_init(memrar_init);
+module_exit(memrar_exit);
+
+
+MODULE_AUTHOR("Ossama Othman <ossama.othman@intel.com>");
+MODULE_DESCRIPTION("Intel Restricted Access Region Handler");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(MEMRAR_VER);
+
+
+
+/*
+ Local Variables:
+ c-file-style: "linux"
+ End:
+*/
diff --git a/drivers/staging/netwave/Kconfig b/drivers/staging/netwave/Kconfig
deleted file mode 100644
index 8033e8171f9..00000000000
--- a/drivers/staging/netwave/Kconfig
+++ /dev/null
@@ -1,11 +0,0 @@
-config PCMCIA_NETWAVE
- tristate "Xircom Netwave AirSurfer Pcmcia wireless support"
- depends on PCMCIA && WLAN
- select WIRELESS_EXT
- select WEXT_PRIV
- help
- Say Y here if you intend to attach this type of PCMCIA (PC-card)
- wireless Ethernet networking card to your computer.
-
- To compile this driver as a module, choose M here: the module will be
- called netwave_cs. If unsure, say N.
diff --git a/drivers/staging/netwave/Makefile b/drivers/staging/netwave/Makefile
deleted file mode 100644
index 2ab89de59b9..00000000000
--- a/drivers/staging/netwave/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_PCMCIA_NETWAVE) += netwave_cs.o
diff --git a/drivers/staging/netwave/TODO b/drivers/staging/netwave/TODO
deleted file mode 100644
index 9bd15a2f6d9..00000000000
--- a/drivers/staging/netwave/TODO
+++ /dev/null
@@ -1,7 +0,0 @@
-TODO:
- - step up and maintain this driver to ensure that it continues
- to work. Having the hardware for this is pretty much a
- requirement. If this does not happen, the will be removed in
- the 2.6.35 kernel release.
-
-Please send patches to Greg Kroah-Hartman <greg@kroah.com>.
diff --git a/drivers/staging/netwave/netwave_cs.c b/drivers/staging/netwave/netwave_cs.c
deleted file mode 100644
index 3875a722d12..00000000000
--- a/drivers/staging/netwave/netwave_cs.c
+++ /dev/null
@@ -1,1369 +0,0 @@
-/*********************************************************************
- *
- * Filename: netwave_cs.c
- * Version: 0.4.1
- * Description: Netwave AirSurfer Wireless LAN PC Card driver
- * Status: Experimental.
- * Authors: John Markus Bjørndalen <johnm@cs.uit.no>
- * Dag Brattli <dagb@cs.uit.no>
- * David Hinds <dahinds@users.sourceforge.net>
- * Created at: A long time ago!
- * Modified at: Mon Nov 10 11:54:37 1997
- * Modified by: Dag Brattli <dagb@cs.uit.no>
- *
- * Copyright (c) 1997 University of Tromsø, Norway
- *
- * Revision History:
- *
- * 08-Nov-97 15:14:47 John Markus Bjørndalen <johnm@cs.uit.no>
- * - Fixed some bugs in netwave_rx and cleaned it up a bit.
- * (One of the bugs would have destroyed packets when receiving
- * multiple packets per interrupt).
- * - Cleaned up parts of newave_hw_xmit.
- * - A few general cleanups.
- * 24-Oct-97 13:17:36 Dag Brattli <dagb@cs.uit.no>
- * - Fixed netwave_rx receive function (got updated docs)
- * Others:
- * - Changed name from xircnw to netwave, take a look at
- * http://www.netwave-wireless.com
- * - Some reorganizing of the code
- * - Removed possible race condition between interrupt handler and transmit
- * function
- * - Started to add wireless extensions, but still needs some coding
- * - Added watchdog for better handling of transmission timeouts
- * (hopefully this works better)
- ********************************************************************/
-
-/* To have statistics (just packets sent) define this */
-#undef NETWAVE_STATS
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ptrace.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/bitops.h>
-#include <linux/wireless.h>
-#include <net/iw_handler.h>
-
-#include <pcmcia/cs_types.h>
-#include <pcmcia/cs.h>
-#include <pcmcia/cistpl.h>
-#include <pcmcia/cisreg.h>
-#include <pcmcia/ds.h>
-#include <pcmcia/mem_op.h>
-
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#define NETWAVE_REGOFF 0x8000
-/* The Netwave IO registers, offsets to iobase */
-#define NETWAVE_REG_COR 0x0
-#define NETWAVE_REG_CCSR 0x2
-#define NETWAVE_REG_ASR 0x4
-#define NETWAVE_REG_IMR 0xa
-#define NETWAVE_REG_PMR 0xc
-#define NETWAVE_REG_IOLOW 0x6
-#define NETWAVE_REG_IOHI 0x7
-#define NETWAVE_REG_IOCONTROL 0x8
-#define NETWAVE_REG_DATA 0xf
-/* The Netwave Extended IO registers, offsets to RamBase */
-#define NETWAVE_EREG_ASCC 0x114
-#define NETWAVE_EREG_RSER 0x120
-#define NETWAVE_EREG_RSERW 0x124
-#define NETWAVE_EREG_TSER 0x130
-#define NETWAVE_EREG_TSERW 0x134
-#define NETWAVE_EREG_CB 0x100
-#define NETWAVE_EREG_SPCQ 0x154
-#define NETWAVE_EREG_SPU 0x155
-#define NETWAVE_EREG_LIF 0x14e
-#define NETWAVE_EREG_ISPLQ 0x156
-#define NETWAVE_EREG_HHC 0x158
-#define NETWAVE_EREG_NI 0x16e
-#define NETWAVE_EREG_MHS 0x16b
-#define NETWAVE_EREG_TDP 0x140
-#define NETWAVE_EREG_RDP 0x150
-#define NETWAVE_EREG_PA 0x160
-#define NETWAVE_EREG_EC 0x180
-#define NETWAVE_EREG_CRBP 0x17a
-#define NETWAVE_EREG_ARW 0x166
-
-/*
- * Commands used in the extended command buffer
- * NETWAVE_EREG_CB (0x100-0x10F)
- */
-#define NETWAVE_CMD_NOP 0x00
-#define NETWAVE_CMD_SRC 0x01
-#define NETWAVE_CMD_STC 0x02
-#define NETWAVE_CMD_AMA 0x03
-#define NETWAVE_CMD_DMA 0x04
-#define NETWAVE_CMD_SAMA 0x05
-#define NETWAVE_CMD_ER 0x06
-#define NETWAVE_CMD_DR 0x07
-#define NETWAVE_CMD_TL 0x08
-#define NETWAVE_CMD_SRP 0x09
-#define NETWAVE_CMD_SSK 0x0a
-#define NETWAVE_CMD_SMD 0x0b
-#define NETWAVE_CMD_SAPD 0x0c
-#define NETWAVE_CMD_SSS 0x11
-/* End of Command marker */
-#define NETWAVE_CMD_EOC 0x00
-
-/* ASR register bits */
-#define NETWAVE_ASR_RXRDY 0x80
-#define NETWAVE_ASR_TXBA 0x01
-
-#define TX_TIMEOUT ((32*HZ)/100)
-
-static const unsigned int imrConfRFU1 = 0x10; /* RFU interrupt mask, keep high */
-static const unsigned int imrConfIENA = 0x02; /* Interrupt enable */
-
-static const unsigned int corConfIENA = 0x01; /* Interrupt enable */
-static const unsigned int corConfLVLREQ = 0x40; /* Keep high */
-
-static const unsigned int rxConfRxEna = 0x80; /* Receive Enable */
-static const unsigned int rxConfMAC = 0x20; /* MAC host receive mode*/
-static const unsigned int rxConfPro = 0x10; /* Promiscuous */
-static const unsigned int rxConfAMP = 0x08; /* Accept Multicast Packets */
-static const unsigned int rxConfBcast = 0x04; /* Accept Broadcast Packets */
-
-static const unsigned int txConfTxEna = 0x80; /* Transmit Enable */
-static const unsigned int txConfMAC = 0x20; /* Host sends MAC mode */
-static const unsigned int txConfEUD = 0x10; /* Enable Uni-Data packets */
-static const unsigned int txConfKey = 0x02; /* Scramble data packets */
-static const unsigned int txConfLoop = 0x01; /* Loopback mode */
-
-
-/*====================================================================*/
-
-/* Parameters that can be set with 'insmod' */
-
-/* Choose the domain, default is 0x100 */
-static u_int domain = 0x100;
-
-/* Scramble key, range from 0x0 to 0xffff.
- * 0x0 is no scrambling.
- */
-static u_int scramble_key = 0x0;
-
-/* Shared memory speed, in ns. The documentation states that
- * the card should not be read faster than every 400ns.
- * This timing should be provided by the HBA. If it becomes a
- * problem, try setting mem_speed to 400.
- */
-static int mem_speed;
-
-module_param(domain, int, 0);
-module_param(scramble_key, int, 0);
-module_param(mem_speed, int, 0);
-
-/*====================================================================*/
-
-/* PCMCIA (Card Services) related functions */
-static void netwave_release(struct pcmcia_device *link); /* Card removal */
-static int netwave_pcmcia_config(struct pcmcia_device *arg); /* Runs after card
- insertion */
-static void netwave_detach(struct pcmcia_device *p_dev); /* Destroy instance */
-
-/* Hardware configuration */
-static void netwave_doreset(unsigned int iobase, u_char __iomem *ramBase);
-static void netwave_reset(struct net_device *dev);
-
-/* Misc device stuff */
-static int netwave_open(struct net_device *dev); /* Open the device */
-static int netwave_close(struct net_device *dev); /* Close the device */
-
-/* Packet transmission and Packet reception */
-static netdev_tx_t netwave_start_xmit( struct sk_buff *skb,
- struct net_device *dev);
-static int netwave_rx( struct net_device *dev);
-
-/* Interrupt routines */
-static irqreturn_t netwave_interrupt(int irq, void *dev_id);
-static void netwave_watchdog(struct net_device *);
-
-/* Wireless extensions */
-static struct iw_statistics* netwave_get_wireless_stats(struct net_device *dev);
-
-static void set_multicast_list(struct net_device *dev);
-
-/*
- A struct pcmcia_device structure has fields for most things that are needed
- to keep track of a socket, but there will usually be some device
- specific information that also needs to be kept track of. The
- 'priv' pointer in a struct pcmcia_device structure can be used to point to
- a device-specific private data structure, like this.
-
- A driver needs to provide a dev_node_t structure for each device
- on a card. In some cases, there is only one device per card (for
- example, ethernet cards, modems). In other cases, there may be
- many actual or logical devices (SCSI adapters, memory cards with
- multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a struct pcmcia_device
- structure. We allocate them in the card's private data structure,
- because they generally can't be allocated dynamically.
-*/
-
-static const struct iw_handler_def netwave_handler_def;
-
-#define SIOCGIPSNAP SIOCIWFIRSTPRIV + 1 /* Site Survey Snapshot */
-
-#define MAX_ESA 10
-
-typedef struct net_addr {
- u_char addr48[6];
-} net_addr;
-
-struct site_survey {
- u_short length;
- u_char struct_revision;
- u_char roaming_state;
-
- u_char sp_existsFlag;
- u_char sp_link_quality;
- u_char sp_max_link_quality;
- u_char linkQualityGoodFairBoundary;
- u_char linkQualityFairPoorBoundary;
- u_char sp_utilization;
- u_char sp_goodness;
- u_char sp_hotheadcount;
- u_char roaming_condition;
-
- net_addr sp;
- u_char numAPs;
- net_addr nearByAccessPoints[MAX_ESA];
-};
-
-typedef struct netwave_private {
- struct pcmcia_device *p_dev;
- spinlock_t spinlock; /* Serialize access to the hardware (SMP) */
- dev_node_t node;
- u_char __iomem *ramBase;
- int timeoutCounter;
- int lastExec;
- struct timer_list watchdog; /* To avoid blocking state */
- struct site_survey nss;
- struct iw_statistics iw_stats; /* Wireless stats */
-} netwave_private;
-
-/*
- * The Netwave card is little-endian, so won't work for big endian
- * systems.
- */
-static inline unsigned short get_uint16(u_char __iomem *staddr)
-{
- return readw(staddr); /* Return only 16 bits */
-}
-
-static inline short get_int16(u_char __iomem * staddr)
-{
- return readw(staddr);
-}
-
-/*
- * Wait until the WOC (Write Operation Complete) bit in the
- * ASR (Adapter Status Register) is asserted.
- * This should have aborted if it takes too long time.
- */
-static inline void wait_WOC(unsigned int iobase)
-{
- /* Spin lock */
- while ((inb(iobase + NETWAVE_REG_ASR) & 0x8) != 0x8) ;
-}
-
-static void netwave_snapshot(netwave_private *priv, u_char __iomem *ramBase,
- unsigned int iobase) {
- u_short resultBuffer;
-
- /* if time since last snapshot is > 1 sec. (100 jiffies?) then take
- * new snapshot, else return cached data. This is the recommended rate.
- */
- if ( jiffies - priv->lastExec > 100) {
- /* Take site survey snapshot */
- /*printk( KERN_DEBUG "Taking new snapshot. %ld\n", jiffies -
- priv->lastExec); */
- wait_WOC(iobase);
- writeb(NETWAVE_CMD_SSS, ramBase + NETWAVE_EREG_CB + 0);
- writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 1);
- wait_WOC(iobase);
-
- /* Get result and copy to cach */
- resultBuffer = readw(ramBase + NETWAVE_EREG_CRBP);
- copy_from_pc( &priv->nss, ramBase+resultBuffer,
- sizeof(struct site_survey));
- }
-}
-
-/*
- * Function netwave_get_wireless_stats (dev)
- *
- * Wireless extensions statistics
- *
- */
-static struct iw_statistics *netwave_get_wireless_stats(struct net_device *dev)
-{
- unsigned long flags;
- unsigned int iobase = dev->base_addr;
- netwave_private *priv = netdev_priv(dev);
- u_char __iomem *ramBase = priv->ramBase;
- struct iw_statistics* wstats;
-
- wstats = &priv->iw_stats;
-
- spin_lock_irqsave(&priv->spinlock, flags);
-
- netwave_snapshot( priv, ramBase, iobase);
-
- wstats->status = priv->nss.roaming_state;
- wstats->qual.qual = readb( ramBase + NETWAVE_EREG_SPCQ);
- wstats->qual.level = readb( ramBase + NETWAVE_EREG_ISPLQ);
- wstats->qual.noise = readb( ramBase + NETWAVE_EREG_SPU) & 0x3f;
- wstats->discard.nwid = 0L;
- wstats->discard.code = 0L;
- wstats->discard.misc = 0L;
-
- spin_unlock_irqrestore(&priv->spinlock, flags);
-
- return &priv->iw_stats;
-}
-
-static const struct net_device_ops netwave_netdev_ops = {
- .ndo_open = netwave_open,
- .ndo_stop = netwave_close,
- .ndo_start_xmit = netwave_start_xmit,
- .ndo_set_multicast_list = set_multicast_list,
- .ndo_tx_timeout = netwave_watchdog,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/*
- * Function netwave_attach (void)
- *
- * Creates an "instance" of the driver, allocating local data
- * structures for one device. The device is registered with Card
- * Services.
- *
- * The dev_link structure is initialized, but we don't actually
- * configure the card at this point -- we wait until we receive a
- * card insertion event.
- */
-static int netwave_probe(struct pcmcia_device *link)
-{
- struct net_device *dev;
- netwave_private *priv;
-
- dev_dbg(&link->dev, "netwave_attach()\n");
-
- /* Initialize the struct pcmcia_device structure */
- dev = alloc_etherdev(sizeof(netwave_private));
- if (!dev)
- return -ENOMEM;
- priv = netdev_priv(dev);
- priv->p_dev = link;
- link->priv = dev;
-
- /* The io structure describes IO port mapping */
- link->io.NumPorts1 = 16;
- link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- /* link->io.NumPorts2 = 16;
- link->io.Attributes2 = IO_DATA_PATH_WIDTH_16; */
- link->io.IOAddrLines = 5;
-
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = &netwave_interrupt;
-
- /* General socket configuration */
- link->conf.Attributes = CONF_ENABLE_IRQ;
- link->conf.IntType = INT_MEMORY_AND_IO;
- link->conf.ConfigIndex = 1;
-
- /* Netwave private struct init. link/dev/node already taken care of,
- * other stuff zero'd - Jean II */
- spin_lock_init(&priv->spinlock);
-
- /* Netwave specific entries in the device structure */
- dev->netdev_ops = &netwave_netdev_ops;
- /* wireless extensions */
- dev->wireless_handlers = &netwave_handler_def;
-
- dev->watchdog_timeo = TX_TIMEOUT;
-
- return netwave_pcmcia_config( link);
-} /* netwave_attach */
-
-/*
- * Function netwave_detach (link)
- *
- * This deletes a driver "instance". The device is de-registered
- * with Card Services. If it has been released, all local data
- * structures are freed. Otherwise, the structures will be freed
- * when the device is released.
- */
-static void netwave_detach(struct pcmcia_device *link)
-{
- struct net_device *dev = link->priv;
-
- dev_dbg(&link->dev, "netwave_detach\n");
-
- netwave_release(link);
-
- if (link->dev_node)
- unregister_netdev(dev);
-
- free_netdev(dev);
-} /* netwave_detach */
-
-/*
- * Wireless Handler : get protocol name
- */
-static int netwave_get_name(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- strcpy(wrqu->name, "Netwave");
- return 0;
-}
-
-/*
- * Wireless Handler : set Network ID
- */
-static int netwave_set_nwid(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- unsigned long flags;
- unsigned int iobase = dev->base_addr;
- netwave_private *priv = netdev_priv(dev);
- u_char __iomem *ramBase = priv->ramBase;
-
- /* Disable interrupts & save flags */
- spin_lock_irqsave(&priv->spinlock, flags);
-
- if(!wrqu->nwid.disabled) {
- domain = wrqu->nwid.value;
- printk( KERN_DEBUG "Setting domain to 0x%x%02x\n",
- (domain >> 8) & 0x01, domain & 0xff);
- wait_WOC(iobase);
- writeb(NETWAVE_CMD_SMD, ramBase + NETWAVE_EREG_CB + 0);
- writeb( domain & 0xff, ramBase + NETWAVE_EREG_CB + 1);
- writeb((domain >>8 ) & 0x01,ramBase + NETWAVE_EREG_CB+2);
- writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 3);
- }
-
- /* ReEnable interrupts & restore flags */
- spin_unlock_irqrestore(&priv->spinlock, flags);
-
- return 0;
-}
-
-/*
- * Wireless Handler : get Network ID
- */
-static int netwave_get_nwid(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- wrqu->nwid.value = domain;
- wrqu->nwid.disabled = 0;
- wrqu->nwid.fixed = 1;
- return 0;
-}
-
-/*
- * Wireless Handler : set scramble key
- */
-static int netwave_set_scramble(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *key)
-{
- unsigned long flags;
- unsigned int iobase = dev->base_addr;
- netwave_private *priv = netdev_priv(dev);
- u_char __iomem *ramBase = priv->ramBase;
-
- /* Disable interrupts & save flags */
- spin_lock_irqsave(&priv->spinlock, flags);
-
- scramble_key = (key[0] << 8) | key[1];
- wait_WOC(iobase);
- writeb(NETWAVE_CMD_SSK, ramBase + NETWAVE_EREG_CB + 0);
- writeb(scramble_key & 0xff, ramBase + NETWAVE_EREG_CB + 1);
- writeb((scramble_key>>8) & 0xff, ramBase + NETWAVE_EREG_CB + 2);
- writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 3);
-
- /* ReEnable interrupts & restore flags */
- spin_unlock_irqrestore(&priv->spinlock, flags);
-
- return 0;
-}
-
-/*
- * Wireless Handler : get scramble key
- */
-static int netwave_get_scramble(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *key)
-{
- key[1] = scramble_key & 0xff;
- key[0] = (scramble_key>>8) & 0xff;
- wrqu->encoding.flags = IW_ENCODE_ENABLED;
- wrqu->encoding.length = 2;
- return 0;
-}
-
-/*
- * Wireless Handler : get mode
- */
-static int netwave_get_mode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- if(domain & 0x100)
- wrqu->mode = IW_MODE_INFRA;
- else
- wrqu->mode = IW_MODE_ADHOC;
-
- return 0;
-}
-
-/*
- * Wireless Handler : get range info
- */
-static int netwave_get_range(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_range *range = (struct iw_range *) extra;
- int ret = 0;
-
- /* Set the length (very important for backward compatibility) */
- wrqu->data.length = sizeof(struct iw_range);
-
- /* Set all the info we don't care or don't know about to zero */
- memset(range, 0, sizeof(struct iw_range));
-
- /* Set the Wireless Extension versions */
- range->we_version_compiled = WIRELESS_EXT;
- range->we_version_source = 9; /* Nothing for us in v10 and v11 */
-
- /* Set information in the range struct */
- range->throughput = 450 * 1000; /* don't argue on this ! */
- range->min_nwid = 0x0000;
- range->max_nwid = 0x01FF;
-
- range->num_channels = range->num_frequency = 0;
-
- range->sensitivity = 0x3F;
- range->max_qual.qual = 255;
- range->max_qual.level = 255;
- range->max_qual.noise = 0;
-
- range->num_bitrates = 1;
- range->bitrate[0] = 1000000; /* 1 Mb/s */
-
- range->encoding_size[0] = 2; /* 16 bits scrambling */
- range->num_encoding_sizes = 1;
- range->max_encoding_tokens = 1; /* Only one key possible */
-
- return ret;
-}
-
-/*
- * Wireless Private Handler : get snapshot
- */
-static int netwave_get_snap(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- unsigned long flags;
- unsigned int iobase = dev->base_addr;
- netwave_private *priv = netdev_priv(dev);
- u_char __iomem *ramBase = priv->ramBase;
-
- /* Disable interrupts & save flags */
- spin_lock_irqsave(&priv->spinlock, flags);
-
- /* Take snapshot of environment */
- netwave_snapshot( priv, ramBase, iobase);
- wrqu->data.length = priv->nss.length;
- memcpy(extra, (u_char *) &priv->nss, sizeof( struct site_survey));
-
- priv->lastExec = jiffies;
-
- /* ReEnable interrupts & restore flags */
- spin_unlock_irqrestore(&priv->spinlock, flags);
-
- return(0);
-}
-
-/*
- * Structures to export the Wireless Handlers
- * This is the stuff that are treated the wireless extensions (iwconfig)
- */
-
-static const struct iw_priv_args netwave_private_args[] = {
-/*{ cmd, set_args, get_args, name } */
- { SIOCGIPSNAP, 0,
- IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | sizeof(struct site_survey),
- "getsitesurvey" },
-};
-
-static const iw_handler netwave_handler[] =
-{
- NULL, /* SIOCSIWNAME */
- netwave_get_name, /* SIOCGIWNAME */
- netwave_set_nwid, /* SIOCSIWNWID */
- netwave_get_nwid, /* SIOCGIWNWID */
- NULL, /* SIOCSIWFREQ */
- NULL, /* SIOCGIWFREQ */
- NULL, /* SIOCSIWMODE */
- netwave_get_mode, /* SIOCGIWMODE */
- NULL, /* SIOCSIWSENS */
- NULL, /* SIOCGIWSENS */
- NULL, /* SIOCSIWRANGE */
- netwave_get_range, /* SIOCGIWRANGE */
- NULL, /* SIOCSIWPRIV */
- NULL, /* SIOCGIWPRIV */
- NULL, /* SIOCSIWSTATS */
- NULL, /* SIOCGIWSTATS */
- NULL, /* SIOCSIWSPY */
- NULL, /* SIOCGIWSPY */
- NULL, /* -- hole -- */
- NULL, /* -- hole -- */
- NULL, /* SIOCSIWAP */
- NULL, /* SIOCGIWAP */
- NULL, /* -- hole -- */
- NULL, /* SIOCGIWAPLIST */
- NULL, /* -- hole -- */
- NULL, /* -- hole -- */
- NULL, /* SIOCSIWESSID */
- NULL, /* SIOCGIWESSID */
- NULL, /* SIOCSIWNICKN */
- NULL, /* SIOCGIWNICKN */
- NULL, /* -- hole -- */
- NULL, /* -- hole -- */
- NULL, /* SIOCSIWRATE */
- NULL, /* SIOCGIWRATE */
- NULL, /* SIOCSIWRTS */
- NULL, /* SIOCGIWRTS */
- NULL, /* SIOCSIWFRAG */
- NULL, /* SIOCGIWFRAG */
- NULL, /* SIOCSIWTXPOW */
- NULL, /* SIOCGIWTXPOW */
- NULL, /* SIOCSIWRETRY */
- NULL, /* SIOCGIWRETRY */
- netwave_set_scramble, /* SIOCSIWENCODE */
- netwave_get_scramble, /* SIOCGIWENCODE */
-};
-
-static const iw_handler netwave_private_handler[] =
-{
- NULL, /* SIOCIWFIRSTPRIV */
- netwave_get_snap, /* SIOCIWFIRSTPRIV + 1 */
-};
-
-static const struct iw_handler_def netwave_handler_def =
-{
- .num_standard = ARRAY_SIZE(netwave_handler),
- .num_private = ARRAY_SIZE(netwave_private_handler),
- .num_private_args = ARRAY_SIZE(netwave_private_args),
- .standard = (iw_handler *) netwave_handler,
- .private = (iw_handler *) netwave_private_handler,
- .private_args = (struct iw_priv_args *) netwave_private_args,
- .get_wireless_stats = netwave_get_wireless_stats,
-};
-
-/*
- * Function netwave_pcmcia_config (link)
- *
- * netwave_pcmcia_config() is scheduled to run after a CARD_INSERTION
- * event is received, to configure the PCMCIA socket, and to make the
- * device available to the system.
- *
- */
-
-static int netwave_pcmcia_config(struct pcmcia_device *link) {
- struct net_device *dev = link->priv;
- netwave_private *priv = netdev_priv(dev);
- int i, j, ret;
- win_req_t req;
- memreq_t mem;
- u_char __iomem *ramBase = NULL;
-
- dev_dbg(&link->dev, "netwave_pcmcia_config\n");
-
- /*
- * Try allocating IO ports. This tries a few fixed addresses.
- * If you want, you can also read the card's config table to
- * pick addresses -- see the serial driver for an example.
- */
- for (i = j = 0x0; j < 0x400; j += 0x20) {
- link->io.BasePort1 = j ^ 0x300;
- i = pcmcia_request_io(link, &link->io);
- if (i == 0)
- break;
- }
- if (i != 0)
- goto failed;
-
- /*
- * Now allocate an interrupt line. Note that this does not
- * actually assign a handler to the interrupt.
- */
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
- goto failed;
-
- /*
- * This actually configures the PCMCIA socket -- setting up
- * the I/O windows and the interrupt mapping.
- */
- ret = pcmcia_request_configuration(link, &link->conf);
- if (ret)
- goto failed;
-
- /*
- * Allocate a 32K memory window. Note that the struct pcmcia_device
- * structure provides space for one window handle -- if your
- * device needs several windows, you'll need to keep track of
- * the handles in your private data structure, dev->priv.
- */
- dev_dbg(&link->dev, "Setting mem speed of %d\n", mem_speed);
-
- req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_CM|WIN_ENABLE;
- req.Base = 0; req.Size = 0x8000;
- req.AccessSpeed = mem_speed;
- ret = pcmcia_request_window(link, &req, &link->win);
- if (ret)
- goto failed;
- mem.CardOffset = 0x20000; mem.Page = 0;
- ret = pcmcia_map_mem_page(link, link->win, &mem);
- if (ret)
- goto failed;
-
- /* Store base address of the common window frame */
- ramBase = ioremap(req.Base, 0x8000);
- priv->ramBase = ramBase;
-
- dev->irq = link->irq.AssignedIRQ;
- dev->base_addr = link->io.BasePort1;
- SET_NETDEV_DEV(dev, &link->dev);
-
- if (register_netdev(dev) != 0) {
- printk(KERN_DEBUG "netwave_cs: register_netdev() failed\n");
- goto failed;
- }
-
- strcpy(priv->node.dev_name, dev->name);
- link->dev_node = &priv->node;
-
- /* Reset card before reading physical address */
- netwave_doreset(dev->base_addr, ramBase);
-
- /* Read the ethernet address and fill in the Netwave registers. */
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = readb(ramBase + NETWAVE_EREG_PA + i);
-
- printk(KERN_INFO "%s: Netwave: port %#3lx, irq %d, mem %lx, "
- "id %c%c, hw_addr %pM\n",
- dev->name, dev->base_addr, dev->irq,
- (u_long) ramBase,
- (int) readb(ramBase+NETWAVE_EREG_NI),
- (int) readb(ramBase+NETWAVE_EREG_NI+1),
- dev->dev_addr);
-
- /* get revision words */
- printk(KERN_DEBUG "Netwave_reset: revision %04x %04x\n",
- get_uint16(ramBase + NETWAVE_EREG_ARW),
- get_uint16(ramBase + NETWAVE_EREG_ARW+2));
- return 0;
-
-failed:
- netwave_release(link);
- return -ENODEV;
-} /* netwave_pcmcia_config */
-
-/*
- * Function netwave_release (arg)
- *
- * After a card is removed, netwave_release() will unregister the net
- * device, and release the PCMCIA configuration. If the device is
- * still open, this will be postponed until it is closed.
- */
-static void netwave_release(struct pcmcia_device *link)
-{
- struct net_device *dev = link->priv;
- netwave_private *priv = netdev_priv(dev);
-
- dev_dbg(&link->dev, "netwave_release\n");
-
- pcmcia_disable_device(link);
- if (link->win)
- iounmap(priv->ramBase);
-}
-
-static int netwave_suspend(struct pcmcia_device *link)
-{
- struct net_device *dev = link->priv;
-
- if (link->open)
- netif_device_detach(dev);
-
- return 0;
-}
-
-static int netwave_resume(struct pcmcia_device *link)
-{
- struct net_device *dev = link->priv;
-
- if (link->open) {
- netwave_reset(dev);
- netif_device_attach(dev);
- }
-
- return 0;
-}
-
-
-/*
- * Function netwave_doreset (ioBase, ramBase)
- *
- * Proper hardware reset of the card.
- */
-static void netwave_doreset(unsigned int ioBase, u_char __iomem *ramBase)
-{
- /* Reset card */
- wait_WOC(ioBase);
- outb(0x80, ioBase + NETWAVE_REG_PMR);
- writeb(0x08, ramBase + NETWAVE_EREG_ASCC); /* Bit 3 is WOC */
- outb(0x0, ioBase + NETWAVE_REG_PMR); /* release reset */
-}
-
-/*
- * Function netwave_reset (dev)
- *
- * Reset and restore all of the netwave registers
- */
-static void netwave_reset(struct net_device *dev) {
- /* u_char state; */
- netwave_private *priv = netdev_priv(dev);
- u_char __iomem *ramBase = priv->ramBase;
- unsigned int iobase = dev->base_addr;
-
- pr_debug("netwave_reset: Done with hardware reset\n");
-
- priv->timeoutCounter = 0;
-
- /* Reset card */
- netwave_doreset(iobase, ramBase);
- printk(KERN_DEBUG "netwave_reset: Done with hardware reset\n");
-
- /* Write a NOP to check the card */
- wait_WOC(iobase);
- writeb(NETWAVE_CMD_NOP, ramBase + NETWAVE_EREG_CB + 0);
- writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 1);
-
- /* Set receive conf */
- wait_WOC(iobase);
- writeb(NETWAVE_CMD_SRC, ramBase + NETWAVE_EREG_CB + 0);
- writeb(rxConfRxEna + rxConfBcast, ramBase + NETWAVE_EREG_CB + 1);
- writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 2);
-
- /* Set transmit conf */
- wait_WOC(iobase);
- writeb(NETWAVE_CMD_STC, ramBase + NETWAVE_EREG_CB + 0);
- writeb(txConfTxEna, ramBase + NETWAVE_EREG_CB + 1);
- writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 2);
-
- /* Now set the MU Domain */
- printk(KERN_DEBUG "Setting domain to 0x%x%02x\n", (domain >> 8) & 0x01, domain & 0xff);
- wait_WOC(iobase);
- writeb(NETWAVE_CMD_SMD, ramBase + NETWAVE_EREG_CB + 0);
- writeb(domain & 0xff, ramBase + NETWAVE_EREG_CB + 1);
- writeb((domain>>8) & 0x01, ramBase + NETWAVE_EREG_CB + 2);
- writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 3);
-
- /* Set scramble key */
- printk(KERN_DEBUG "Setting scramble key to 0x%x\n", scramble_key);
- wait_WOC(iobase);
- writeb(NETWAVE_CMD_SSK, ramBase + NETWAVE_EREG_CB + 0);
- writeb(scramble_key & 0xff, ramBase + NETWAVE_EREG_CB + 1);
- writeb((scramble_key>>8) & 0xff, ramBase + NETWAVE_EREG_CB + 2);
- writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 3);
-
- /* Enable interrupts, bit 4 high to keep unused
- * source from interrupting us, bit 2 high to
- * set interrupt enable, 567 to enable TxDN,
- * RxErr and RxRdy
- */
- wait_WOC(iobase);
- outb(imrConfIENA+imrConfRFU1, iobase + NETWAVE_REG_IMR);
-
- /* Hent 4 bytes fra 0x170. Skal vaere 0a,29,88,36
- * waitWOC
- * skriv 80 til d000:3688
- * sjekk om det ble 80
- */
-
- /* Enable Receiver */
- wait_WOC(iobase);
- writeb(NETWAVE_CMD_ER, ramBase + NETWAVE_EREG_CB + 0);
- writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 1);
-
- /* Set the IENA bit in COR */
- wait_WOC(iobase);
- outb(corConfIENA + corConfLVLREQ, iobase + NETWAVE_REG_COR);
-}
-
-/*
- * Function netwave_hw_xmit (data, len, dev)
- */
-static int netwave_hw_xmit(unsigned char* data, int len,
- struct net_device* dev) {
- unsigned long flags;
- unsigned int TxFreeList,
- curBuff,
- MaxData,
- DataOffset;
- int tmpcount;
-
- netwave_private *priv = netdev_priv(dev);
- u_char __iomem * ramBase = priv->ramBase;
- unsigned int iobase = dev->base_addr;
-
- /* Disable interrupts & save flags */
- spin_lock_irqsave(&priv->spinlock, flags);
-
- /* Check if there are transmit buffers available */
- wait_WOC(iobase);
- if ((inb(iobase+NETWAVE_REG_ASR) & NETWAVE_ASR_TXBA) == 0) {
- /* No buffers available */
- printk(KERN_DEBUG "netwave_hw_xmit: %s - no xmit buffers available.\n",
- dev->name);
- spin_unlock_irqrestore(&priv->spinlock, flags);
- return 1;
- }
-
- dev->stats.tx_bytes += len;
-
- pr_debug("Transmitting with SPCQ %x SPU %x LIF %x ISPLQ %x\n",
- readb(ramBase + NETWAVE_EREG_SPCQ),
- readb(ramBase + NETWAVE_EREG_SPU),
- readb(ramBase + NETWAVE_EREG_LIF),
- readb(ramBase + NETWAVE_EREG_ISPLQ));
-
- /* Now try to insert it into the adapters free memory */
- wait_WOC(iobase);
- TxFreeList = get_uint16(ramBase + NETWAVE_EREG_TDP);
- MaxData = get_uint16(ramBase + NETWAVE_EREG_TDP+2);
- DataOffset = get_uint16(ramBase + NETWAVE_EREG_TDP+4);
-
- pr_debug("TxFreeList %x, MaxData %x, DataOffset %x\n",
- TxFreeList, MaxData, DataOffset);
-
- /* Copy packet to the adapter fragment buffers */
- curBuff = TxFreeList;
- tmpcount = 0;
- while (tmpcount < len) {
- int tmplen = len - tmpcount;
- copy_to_pc(ramBase + curBuff + DataOffset, data + tmpcount,
- (tmplen < MaxData) ? tmplen : MaxData);
- tmpcount += MaxData;
-
- /* Advance to next buffer */
- curBuff = get_uint16(ramBase + curBuff);
- }
-
- /* Now issue transmit list */
- wait_WOC(iobase);
- writeb(NETWAVE_CMD_TL, ramBase + NETWAVE_EREG_CB + 0);
- writeb(len & 0xff, ramBase + NETWAVE_EREG_CB + 1);
- writeb((len>>8) & 0xff, ramBase + NETWAVE_EREG_CB + 2);
- writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 3);
-
- spin_unlock_irqrestore(&priv->spinlock, flags);
- return 0;
-}
-
-static netdev_tx_t netwave_start_xmit(struct sk_buff *skb,
- struct net_device *dev) {
- /* This flag indicate that the hardware can't perform a transmission.
- * Theoritically, NET3 check it before sending a packet to the driver,
- * but in fact it never do that and pool continuously.
- * As the watchdog will abort too long transmissions, we are quite safe...
- */
-
- netif_stop_queue(dev);
-
- {
- short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
- unsigned char* buf = skb->data;
-
- if (netwave_hw_xmit( buf, length, dev) == 1) {
- /* Some error, let's make them call us another time? */
- netif_start_queue(dev);
- }
- dev->trans_start = jiffies;
- }
- dev_kfree_skb(skb);
-
- return NETDEV_TX_OK;
-} /* netwave_start_xmit */
-
-/*
- * Function netwave_interrupt (irq, dev_id)
- *
- * This function is the interrupt handler for the Netwave card. This
- * routine will be called whenever:
- * 1. A packet is received.
- * 2. A packet has successfully been transferred and the unit is
- * ready to transmit another packet.
- * 3. A command has completed execution.
- */
-static irqreturn_t netwave_interrupt(int irq, void* dev_id)
-{
- unsigned int iobase;
- u_char __iomem *ramBase;
- struct net_device *dev = (struct net_device *)dev_id;
- struct netwave_private *priv = netdev_priv(dev);
- struct pcmcia_device *link = priv->p_dev;
- int i;
-
- if (!netif_device_present(dev))
- return IRQ_NONE;
-
- iobase = dev->base_addr;
- ramBase = priv->ramBase;
-
- /* Now find what caused the interrupt, check while interrupts ready */
- for (i = 0; i < 10; i++) {
- u_char status;
-
- wait_WOC(iobase);
- if (!(inb(iobase+NETWAVE_REG_CCSR) & 0x02))
- break; /* None of the interrupt sources asserted (normal exit) */
-
- status = inb(iobase + NETWAVE_REG_ASR);
-
- if (!pcmcia_dev_present(link)) {
- pr_debug("netwave_interrupt: Interrupt with status 0x%x "
- "from removed or suspended card!\n", status);
- break;
- }
-
- /* RxRdy */
- if (status & 0x80) {
- netwave_rx(dev);
- /* wait_WOC(iobase); */
- /* RxRdy cannot be reset directly by the host */
- }
- /* RxErr */
- if (status & 0x40) {
- u_char rser;
-
- rser = readb(ramBase + NETWAVE_EREG_RSER);
-
- if (rser & 0x04) {
- ++dev->stats.rx_dropped;
- ++dev->stats.rx_crc_errors;
- }
- if (rser & 0x02)
- ++dev->stats.rx_frame_errors;
-
- /* Clear the RxErr bit in RSER. RSER+4 is the
- * write part. Also clear the RxCRC (0x04) and
- * RxBig (0x02) bits if present */
- wait_WOC(iobase);
- writeb(0x40 | (rser & 0x06), ramBase + NETWAVE_EREG_RSER + 4);
-
- /* Write bit 6 high to ASCC to clear RxErr in ASR,
- * WOC must be set first!
- */
- wait_WOC(iobase);
- writeb(0x40, ramBase + NETWAVE_EREG_ASCC);
-
- /* Remember to count up dev->stats on error packets */
- ++dev->stats.rx_errors;
- }
- /* TxDN */
- if (status & 0x20) {
- int txStatus;
-
- txStatus = readb(ramBase + NETWAVE_EREG_TSER);
- pr_debug("Transmit done. TSER = %x id %x\n",
- txStatus, readb(ramBase + NETWAVE_EREG_TSER + 1));
-
- if (txStatus & 0x20) {
- /* Transmitting was okay, clear bits */
- wait_WOC(iobase);
- writeb(0x2f, ramBase + NETWAVE_EREG_TSER + 4);
- ++dev->stats.tx_packets;
- }
-
- if (txStatus & 0xd0) {
- if (txStatus & 0x80) {
- ++dev->stats.collisions; /* Because of /proc/net/dev*/
- /* ++dev->stats.tx_aborted_errors; */
- /* printk("Collision. %ld\n", jiffies - dev->trans_start); */
- }
- if (txStatus & 0x40)
- ++dev->stats.tx_carrier_errors;
- /* 0x80 TxGU Transmit giveup - nine times and no luck
- * 0x40 TxNOAP No access point. Discarded packet.
- * 0x10 TxErr Transmit error. Always set when
- * TxGU and TxNOAP is set. (Those are the only ones
- * to set TxErr).
- */
- pr_debug("netwave_interrupt: TxDN with error status %x\n",
- txStatus);
-
- /* Clear out TxGU, TxNOAP, TxErr and TxTrys */
- wait_WOC(iobase);
- writeb(0xdf & txStatus, ramBase+NETWAVE_EREG_TSER+4);
- ++dev->stats.tx_errors;
- }
- pr_debug("New status is TSER %x ASR %x\n",
- readb(ramBase + NETWAVE_EREG_TSER),
- inb(iobase + NETWAVE_REG_ASR));
-
- netif_wake_queue(dev);
- }
- /* TxBA, this would trigger on all error packets received */
- /* if (status & 0x01) {
- pr_debug("Transmit buffers available, %x\n", status);
- }
- */
- }
- /* Handled if we looped at least one time - Jean II */
- return IRQ_RETVAL(i);
-} /* netwave_interrupt */
-
-/*
- * Function netwave_watchdog (a)
- *
- * Watchdog : when we start a transmission, we set a timer in the
- * kernel. If the transmission complete, this timer is disabled. If
- * it expire, we reset the card.
- *
- */
-static void netwave_watchdog(struct net_device *dev) {
-
- pr_debug("%s: netwave_watchdog: watchdog timer expired\n", dev->name);
- netwave_reset(dev);
- dev->trans_start = jiffies;
- netif_wake_queue(dev);
-} /* netwave_watchdog */
-
-static int netwave_rx(struct net_device *dev)
-{
- netwave_private *priv = netdev_priv(dev);
- u_char __iomem *ramBase = priv->ramBase;
- unsigned int iobase = dev->base_addr;
- u_char rxStatus;
- struct sk_buff *skb = NULL;
- unsigned int curBuffer,
- rcvList;
- int rcvLen;
- int tmpcount = 0;
- int dataCount, dataOffset;
- int i;
- u_char *ptr;
-
- pr_debug("xinw_rx: Receiving ... \n");
-
- /* Receive max 10 packets for now. */
- for (i = 0; i < 10; i++) {
- /* Any packets? */
- wait_WOC(iobase);
- rxStatus = readb(ramBase + NETWAVE_EREG_RSER);
- if ( !( rxStatus & 0x80)) /* No more packets */
- break;
-
- /* Check if multicast/broadcast or other */
- /* multicast = (rxStatus & 0x20); */
-
- /* The receive list pointer and length of the packet */
- wait_WOC(iobase);
- rcvLen = get_int16( ramBase + NETWAVE_EREG_RDP);
- rcvList = get_uint16( ramBase + NETWAVE_EREG_RDP + 2);
-
- if (rcvLen < 0) {
- printk(KERN_DEBUG "netwave_rx: Receive packet with len %d\n",
- rcvLen);
- return 0;
- }
-
- skb = dev_alloc_skb(rcvLen+5);
- if (skb == NULL) {
- pr_debug("netwave_rx: Could not allocate an sk_buff of "
- "length %d\n", rcvLen);
- ++dev->stats.rx_dropped;
- /* Tell the adapter to skip the packet */
- wait_WOC(iobase);
- writeb(NETWAVE_CMD_SRP, ramBase + NETWAVE_EREG_CB + 0);
- writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 1);
- return 0;
- }
-
- skb_reserve( skb, 2); /* Align IP on 16 byte */
- skb_put( skb, rcvLen);
-
- /* Copy packet fragments to the skb data area */
- ptr = (u_char*) skb->data;
- curBuffer = rcvList;
- tmpcount = 0;
- while ( tmpcount < rcvLen) {
- /* Get length and offset of current buffer */
- dataCount = get_uint16( ramBase+curBuffer+2);
- dataOffset = get_uint16( ramBase+curBuffer+4);
-
- copy_from_pc( ptr + tmpcount,
- ramBase+curBuffer+dataOffset, dataCount);
-
- tmpcount += dataCount;
-
- /* Point to next buffer */
- curBuffer = get_uint16(ramBase + curBuffer);
- }
-
- skb->protocol = eth_type_trans(skb,dev);
- /* Queue packet for network layer */
- netif_rx(skb);
-
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += rcvLen;
-
- /* Got the packet, tell the adapter to skip it */
- wait_WOC(iobase);
- writeb(NETWAVE_CMD_SRP, ramBase + NETWAVE_EREG_CB + 0);
- writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 1);
- pr_debug("Packet reception ok\n");
- }
- return 0;
-}
-
-static int netwave_open(struct net_device *dev) {
- netwave_private *priv = netdev_priv(dev);
- struct pcmcia_device *link = priv->p_dev;
-
- dev_dbg(&link->dev, "netwave_open: starting.\n");
-
- if (!pcmcia_dev_present(link))
- return -ENODEV;
-
- link->open++;
-
- netif_start_queue(dev);
- netwave_reset(dev);
-
- return 0;
-}
-
-static int netwave_close(struct net_device *dev) {
- netwave_private *priv = netdev_priv(dev);
- struct pcmcia_device *link = priv->p_dev;
-
- dev_dbg(&link->dev, "netwave_close: finishing.\n");
-
- link->open--;
- netif_stop_queue(dev);
-
- return 0;
-}
-
-static struct pcmcia_device_id netwave_ids[] = {
- PCMCIA_DEVICE_PROD_ID12("Xircom", "CreditCard Netwave", 0x2e3ee845, 0x54e28a28),
- PCMCIA_DEVICE_NULL,
-};
-MODULE_DEVICE_TABLE(pcmcia, netwave_ids);
-
-static struct pcmcia_driver netwave_driver = {
- .owner = THIS_MODULE,
- .drv = {
- .name = "netwave_cs",
- },
- .probe = netwave_probe,
- .remove = netwave_detach,
- .id_table = netwave_ids,
- .suspend = netwave_suspend,
- .resume = netwave_resume,
-};
-
-static int __init init_netwave_cs(void)
-{
- return pcmcia_register_driver(&netwave_driver);
-}
-
-static void __exit exit_netwave_cs(void)
-{
- pcmcia_unregister_driver(&netwave_driver);
-}
-
-module_init(init_netwave_cs);
-module_exit(exit_netwave_cs);
-
-/* Set or clear the multicast filter for this adaptor.
- num_addrs == -1 Promiscuous mode, receive all packets
- num_addrs == 0 Normal mode, clear multicast list
- num_addrs > 0 Multicast mode, receive normal and MC packets, and do
- best-effort filtering.
- */
-static void set_multicast_list(struct net_device *dev)
-{
- unsigned int iobase = dev->base_addr;
- netwave_private *priv = netdev_priv(dev);
- u_char __iomem * ramBase = priv->ramBase;
- u_char rcvMode = 0;
-
-#ifdef PCMCIA_DEBUG
- {
- xstatic int old;
- if (old != netdev_mc_count(dev)) {
- old = netdev_mc_count(dev);
- pr_debug("%s: setting Rx mode to %d addresses.\n",
- dev->name, netdev_mc_count(dev));
- }
- }
-#endif
-
- if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) {
- /* Multicast Mode */
- rcvMode = rxConfRxEna + rxConfAMP + rxConfBcast;
- } else if (dev->flags & IFF_PROMISC) {
- /* Promiscous mode */
- rcvMode = rxConfRxEna + rxConfPro + rxConfAMP + rxConfBcast;
- } else {
- /* Normal mode */
- rcvMode = rxConfRxEna + rxConfBcast;
- }
-
- /* printk("netwave set_multicast_list: rcvMode to %x\n", rcvMode);*/
- /* Now set receive mode */
- wait_WOC(iobase);
- writeb(NETWAVE_CMD_SRC, ramBase + NETWAVE_EREG_CB + 0);
- writeb(rcvMode, ramBase + NETWAVE_EREG_CB + 1);
- writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 2);
-}
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/otus/80211core/cagg.c b/drivers/staging/otus/80211core/cagg.c
index f9514c06c14..c3cef1a02aa 100644
--- a/drivers/staging/otus/80211core/cagg.c
+++ b/drivers/staging/otus/80211core/cagg.c
@@ -2485,7 +2485,7 @@ void zfAggTxRetransmit(zdev_t* dev, struct bufInfo *buf_info, struct aggControl
BAW->insert(dev, buf_info->buf, tid_tx->bar_ssn >> 4, aggControl->tid_baw, buf_info->baw_retransmit, &header_r);
}*/
- if ((err = zfHpSend(dev,
+ err = zfHpSend(dev,
buf_info->baw_header->header,
buf_info->baw_header->headerLen,
buf_info->baw_header->snap,
@@ -2496,7 +2496,8 @@ void zfAggTxRetransmit(zdev_t* dev, struct bufInfo *buf_info, struct aggControl
buf_info->baw_header->removeLen,
ZM_EXTERNAL_ALLOC_BUF,
(u8_t)tid_tx->ac,
- buf_info->baw_header->keyIdx)) != ZM_SUCCESS)
+ buf_info->baw_header->keyIdx);
+ if (err != ZM_SUCCESS)
{
goto zlError;
}
@@ -2797,9 +2798,10 @@ u16_t zfAggTxSendEth(zdev_t* dev, zbuf_t* buf, u16_t port, u16_t bufType, u8_t f
BAW->insert(dev, buf, tid_tx->bar_ssn >> 4, aggControl->tid_baw, 0, &header_r);
}*/
- if ((err = zfHpSend(dev, header, headerLen, snap, snapLen,
+ err = zfHpSend(dev, header, headerLen, snap, snapLen,
mic, micLen, frag.buf[i], removeLen,
- frag.bufType[i], zcUpToAc[up&0x7], keyIdx)) != ZM_SUCCESS)
+ frag.bufType[i], zcUpToAc[up&0x7], keyIdx);
+ if (err != ZM_SUCCESS)
{
goto zlError;
}
@@ -2849,7 +2851,8 @@ u16_t zfAggSendAddbaRequest(zdev_t* dev, u16_t *dst, u16_t ac, u16_t up)
/*
* TBD : Maximum size of management frame
*/
- if ((buf = zfwBufAllocate(dev, 1024)) == NULL)
+ buf = zfwBufAllocate(dev, 1024);
+ if (buf == NULL)
{
zm_msg0_mm(ZM_LV_0, "Alloc mm buf Fail!");
return ZM_SUCCESS;
@@ -2892,8 +2895,9 @@ u16_t zfAggSendAddbaRequest(zdev_t* dev, u16_t *dst, u16_t ac, u16_t up)
//zm_msg2_mm(ZM_LV_2, "buf->data=", buf->data);
#if 0
- if ((err = zfHpSend(dev, NULL, 0, NULL, 0, NULL, 0, buf, 0,
- ZM_INTERNAL_ALLOC_BUF, 0, 0xff)) != ZM_SUCCESS)
+ err = zfHpSend(dev, NULL, 0, NULL, 0, NULL, 0, buf, 0,
+ ZM_INTERNAL_ALLOC_BUF, 0, 0xff);
+ if (err != ZM_SUCCESS)
{
goto zlError;
}
@@ -3290,7 +3294,8 @@ u16_t zfAggSendAddbaResponse(zdev_t* dev, struct aggBaFrameParameter *bf)
/*
* TBD : Maximum size of management frame
*/
- if ((buf = zfwBufAllocate(dev, 1024)) == NULL)
+ buf = zfwBufAllocate(dev, 1024);
+ if (buf == NULL)
{
zm_msg0_mm(ZM_LV_0, "Alloc mm buf Fail!");
return ZM_SUCCESS;
@@ -3337,8 +3342,9 @@ u16_t zfAggSendAddbaResponse(zdev_t* dev, struct aggBaFrameParameter *bf)
//zm_msg2_mm(ZM_LV_2, "buf->data=", buf->data);
#if 0
- if ((err = zfHpSend(dev, NULL, 0, NULL, 0, NULL, 0, buf, 0,
- ZM_INTERNAL_ALLOC_BUF, 0, 0xff)) != ZM_SUCCESS)
+ err = zfHpSend(dev, NULL, 0, NULL, 0, NULL, 0, buf, 0,
+ ZM_INTERNAL_ALLOC_BUF, 0, 0xff);
+ if (err != ZM_SUCCESS)
{
goto zlError;
}
@@ -3443,7 +3449,8 @@ u16_t zfAggSendBar(zdev_t* dev, TID_TX tid_tx, struct aggBarControl *aggBarCon
/*
* TBD : Maximum size of management frame
*/
- if ((buf = zfwBufAllocate(dev, 1024)) == NULL)
+ buf = zfwBufAllocate(dev, 1024);
+ if (buf == NULL)
{
zm_msg0_mm(ZM_LV_0, "Alloc mm buf Fail!");
return ZM_SUCCESS;
@@ -3486,8 +3493,9 @@ u16_t zfAggSendBar(zdev_t* dev, TID_TX tid_tx, struct aggBarControl *aggBarCon
//zm_msg2_mm(ZM_LV_2, "buf->data=", buf->data);
#if 0
- if ((err = zfHpSend(dev, NULL, 0, NULL, 0, NULL, 0, buf, 0,
- ZM_INTERNAL_ALLOC_BUF, 0, 0xff)) != ZM_SUCCESS)
+ err = zfHpSend(dev, NULL, 0, NULL, 0, NULL, 0, buf, 0,
+ ZM_INTERNAL_ALLOC_BUF, 0, 0xff);
+ if (err != ZM_SUCCESS)
{
goto zlError;
}
diff --git a/drivers/staging/otus/80211core/ccmd.c b/drivers/staging/otus/80211core/ccmd.c
index 3e3d9b500f6..ab300df0201 100644
--- a/drivers/staging/otus/80211core/ccmd.c
+++ b/drivers/staging/otus/80211core/ccmd.c
@@ -1436,7 +1436,8 @@ u16_t zfiWlanDeauth(zdev_t *dev, u16_t *macAddr, u16_t reason)
*/
/*
- if ((id = zfApFindSta(dev, macAddr)) != 0xffff)
+ id = zfApFindSta(dev, macAddr);
+ if (id != 0xffff)
{
u32_t key[8];
u16_t nullAddr[3] = { 0x0, 0x0, 0x0 };
diff --git a/drivers/staging/otus/80211core/cfunc.c b/drivers/staging/otus/80211core/cfunc.c
index e0a9f383c75..3b9341b13c0 100644
--- a/drivers/staging/otus/80211core/cfunc.c
+++ b/drivers/staging/otus/80211core/cfunc.c
@@ -1072,7 +1072,8 @@ u16_t zfFindCleanFrequency(zdev_t* dev, u32_t adhocMode)
zmw_get_wlan_dev(dev);
- if ((pBssInfo = wd->sta.bssList.head) == NULL)
+ pBssInfo = wd->sta.bssList.head;
+ if (pBssInfo == NULL)
{
if( adhocMode == ZM_ADHOCBAND_B || adhocMode == ZM_ADHOCBAND_G ||
adhocMode == ZM_ADHOCBAND_BG || adhocMode == ZM_ADHOCBAND_ABG )
diff --git a/drivers/staging/otus/80211core/cic.c b/drivers/staging/otus/80211core/cic.c
index c84f079e3d8..53c09a0935f 100644
--- a/drivers/staging/otus/80211core/cic.c
+++ b/drivers/staging/otus/80211core/cic.c
@@ -329,7 +329,8 @@ void zfCoreEvent(zdev_t* dev, u16_t event, u8_t* rsp)
if (wd->wlanMode == ZM_MODE_AP)
{
zmw_enter_critical_section(dev);
- if ((i=zfApFindSta(dev, (u16_t*)rsp)) != 0xffff)
+ i = zfApFindSta(dev, (u16_t*)rsp);
+ if (i != 0xffff)
{
zfRateCtrlTxFailEvent(dev, &wd->ap.staTable[i].rcCell, 0,(u32_t)zfPhyCtrlToRate(retryRate));
}
@@ -357,7 +358,8 @@ void zfCoreEvent(zdev_t* dev, u16_t event, u8_t* rsp)
if (wd->wlanMode == ZM_MODE_AP)
{
zmw_enter_critical_section(dev);
- if ((i=zfApFindSta(dev, (u16_t*)rsp)) != 0xffff)
+ i = zfApFindSta(dev, (u16_t*)rsp);
+ if (i != 0xffff)
{
zfRateCtrlTxFailEvent(dev, &wd->ap.staTable[i].rcCell, 0,(u32_t)zfPhyCtrlToRate(retryRate));
}
@@ -387,7 +389,8 @@ void zfCoreEvent(zdev_t* dev, u16_t event, u8_t* rsp)
if (wd->wlanMode == ZM_MODE_AP)
{
zmw_enter_critical_section(dev);
- if ((i=zfApFindSta(dev, (u16_t*)rsp)) != 0xffff)
+ i = zfApFindSta(dev, (u16_t*)rsp);
+ if (i != 0xffff)
{
zfRateCtrlTxSuccessEvent(dev, &wd->ap.staTable[i].rcCell, zfPhyCtrlToRate(retryRate));
}
diff --git a/drivers/staging/otus/80211core/cinit.c b/drivers/staging/otus/80211core/cinit.c
index 5f853ce7930..11823311e9c 100644
--- a/drivers/staging/otus/80211core/cinit.c
+++ b/drivers/staging/otus/80211core/cinit.c
@@ -622,7 +622,8 @@ u16_t zfTxGenWlanHeader(zdev_t* dev, zbuf_t* buf, u16_t* header, u16_t seq,
phyCtrl = 0xc0001; //PHY control L
/* WDS port checking */
- if ((wdsPort = (port - 0x20)) >= ZM_MAX_WDS_SUPPORT)
+ wdsPort = port - 0x20;
+ if (wdsPort >= ZM_MAX_WDS_SUPPORT)
{
wdsPort = 0;
}
diff --git a/drivers/staging/otus/80211core/cmm.c b/drivers/staging/otus/80211core/cmm.c
index 484e753df35..007ef3b606a 100644
--- a/drivers/staging/otus/80211core/cmm.c
+++ b/drivers/staging/otus/80211core/cmm.c
@@ -83,7 +83,8 @@ u16_t zfFindElement(zdev_t* dev, zbuf_t* buf, u8_t eid)
/* Get offset of first element */
subType = (zmw_rx_buf_readb(dev, buf, 0) >> 4);
- if ((offset = zgElementOffsetTable[subType]) == 0xff)
+ offset = zgElementOffsetTable[subType];
+ if (offset == 0xff)
{
zm_assert(0);
}
@@ -107,10 +108,12 @@ u16_t zfFindElement(zdev_t* dev, zbuf_t* buf, u8_t eid)
while ((offset+2)<bufLen) // including element ID and length (2bytes)
{
/* Search target element */
- if ((id = zmw_rx_buf_readb(dev, buf, offset)) == eid)
+ id = zmw_rx_buf_readb(dev, buf, offset);
+ if (id == eid)
{
/* Bingo */
- if ((elen = zmw_rx_buf_readb(dev, buf, offset+1))>(bufLen - offset))
+ elen = zmw_rx_buf_readb(dev, buf, offset+1);
+ if (elen > bufLen - offset)
{
/* Element length error */
return 0xffff;
@@ -151,7 +154,8 @@ u16_t zfFindElement(zdev_t* dev, zbuf_t* buf, u8_t eid)
#if 1
elen = zmw_rx_buf_readb(dev, buf, offset+1);
#else
- if ((elen = zmw_rx_buf_readb(dev, buf, offset+1)) == 0)
+ elen = zmw_rx_buf_readb(dev, buf, offset+1);
+ if (elen == 0)
{
return 0xffff;
}
@@ -194,7 +198,8 @@ u16_t zfFindWifiElement(zdev_t* dev, zbuf_t* buf, u8_t type, u8_t subtype)
/* Get offset of first element */
subType = (zmw_rx_buf_readb(dev, buf, 0) >> 4);
- if ((offset = zgElementOffsetTable[subType]) == 0xff)
+ offset = zgElementOffsetTable[subType];
+ if (offset == 0xff)
{
zm_assert(0);
}
@@ -207,10 +212,12 @@ u16_t zfFindWifiElement(zdev_t* dev, zbuf_t* buf, u8_t type, u8_t subtype)
while ((offset+2)<bufLen) // including element ID and length (2bytes)
{
/* Search target element */
- if ((id = zmw_rx_buf_readb(dev, buf, offset)) == ZM_WLAN_EID_WIFI_IE)
+ id = zmw_rx_buf_readb(dev, buf, offset);
+ if (id == ZM_WLAN_EID_WIFI_IE)
{
/* Bingo */
- if ((elen = zmw_rx_buf_readb(dev, buf, offset+1))>(bufLen - offset))
+ elen = zmw_rx_buf_readb(dev, buf, offset+1);
+ if (elen > bufLen - offset)
{
/* Element length error */
return 0xffff;
@@ -229,7 +236,8 @@ u16_t zfFindWifiElement(zdev_t* dev, zbuf_t* buf, u8_t type, u8_t subtype)
{
if ( subtype != 0xff )
{
- if ( (tmp = zmw_rx_buf_readb(dev, buf, offset+6)) == subtype )
+ tmp = zmw_rx_buf_readb(dev, buf, offset+6);
+ if (tmp == subtype)
{
return offset;
}
@@ -241,7 +249,8 @@ u16_t zfFindWifiElement(zdev_t* dev, zbuf_t* buf, u8_t type, u8_t subtype)
}
}
/* Advance to next element */
- if ((elen = zmw_rx_buf_readb(dev, buf, offset+1)) == 0)
+ elen = zmw_rx_buf_readb(dev, buf, offset+1);
+ if (elen == 0)
{
return 0xffff;
}
@@ -348,7 +357,8 @@ u16_t zfFindSuperGElement(zdev_t* dev, zbuf_t* buf, u8_t type)
/* Get offset of first element */
subType = (zmw_rx_buf_readb(dev, buf, 0) >> 4);
- if ((offset = zgElementOffsetTable[subType]) == 0xff)
+ offset = zgElementOffsetTable[subType];
+ if (offset == 0xff)
{
zm_assert(0);
}
@@ -361,10 +371,12 @@ u16_t zfFindSuperGElement(zdev_t* dev, zbuf_t* buf, u8_t type)
while ((offset+2)<bufLen) // including element ID and length (2bytes)
{
/* Search target element */
- if ((id = zmw_rx_buf_readb(dev, buf, offset)) == ZM_WLAN_EID_VENDOR_PRIVATE)
+ id = zmw_rx_buf_readb(dev, buf, offset);
+ if (id == ZM_WLAN_EID_VENDOR_PRIVATE)
{
/* Bingo */
- if ((elen = zmw_rx_buf_readb(dev, buf, offset+1))>(bufLen - offset))
+ elen = zmw_rx_buf_readb(dev, buf, offset+1);
+ if (elen > bufLen - offset)
{
/* Element length error */
return 0xffff;
@@ -389,7 +401,8 @@ u16_t zfFindSuperGElement(zdev_t* dev, zbuf_t* buf, u8_t type)
#if 1
elen = zmw_rx_buf_readb(dev, buf, offset+1);
#else
- if ((elen = zmw_rx_buf_readb(dev, buf, offset+1)) == 0)
+ elen = zmw_rx_buf_readb(dev, buf, offset+1);
+ if (elen == 0)
{
return 0xffff;
}
@@ -411,7 +424,8 @@ u16_t zfFindXRElement(zdev_t* dev, zbuf_t* buf, u8_t type)
/* Get offset of first element */
subType = (zmw_rx_buf_readb(dev, buf, 0) >> 4);
- if ((offset = zgElementOffsetTable[subType]) == 0xff)
+ offset = zgElementOffsetTable[subType];
+ if (offset == 0xff)
{
zm_assert(0);
}
@@ -424,10 +438,12 @@ u16_t zfFindXRElement(zdev_t* dev, zbuf_t* buf, u8_t type)
while ((offset+2)<bufLen) // including element ID and length (2bytes)
{
/* Search target element */
- if ((id = zmw_rx_buf_readb(dev, buf, offset)) == ZM_WLAN_EID_VENDOR_PRIVATE)
+ id = zmw_rx_buf_readb(dev, buf, offset);
+ if (id == ZM_WLAN_EID_VENDOR_PRIVATE)
{
/* Bingo */
- if ((elen = zmw_rx_buf_readb(dev, buf, offset+1))>(bufLen - offset))
+ elen = zmw_rx_buf_readb(dev, buf, offset+1);
+ if (elen > bufLen - offset)
{
/* Element length error */
return 0xffff;
@@ -447,7 +463,8 @@ u16_t zfFindXRElement(zdev_t* dev, zbuf_t* buf, u8_t type)
#if 1
elen = zmw_rx_buf_readb(dev, buf, offset+1);
#else
- if ((elen = zmw_rx_buf_readb(dev, buf, offset+1)) == 0)
+ elen = zmw_rx_buf_readb(dev, buf, offset+1);
+ if (elen == 0)
{
return 0xffff;
}
@@ -868,7 +885,8 @@ void zfSendMmFrame(zdev_t* dev, u8_t frameType, u16_t* dst,
zm_msg2_mm(ZM_LV_2, "Send mm frame, type=", frameType);
/* TBD : Maximum size of management frame */
- if ((buf = zfwBufAllocate(dev, 1024)) == NULL)
+ buf = zfwBufAllocate(dev, 1024);
+ if (buf == NULL)
{
zm_msg0_mm(ZM_LV_0, "Alloc mm buf Fail!");
return;
@@ -1257,7 +1275,8 @@ void zfSendMmFrame(zdev_t* dev, u8_t frameType, u16_t* dst,
{
vap = (u16_t) p3;
- if ((aid = zfApFindSta(dev, dst)) != 0xffff)
+ aid = zfApFindSta(dev, dst);
+ if (aid != 0xffff)
{
zmw_enter_critical_section(dev);
/* Clear STA table */
@@ -1303,8 +1322,9 @@ void zfSendMmFrame(zdev_t* dev, u8_t frameType, u16_t* dst,
//zm_msg2_mm(ZM_LV_2, "buf->data=", buf->data);
#if 0
- if ((err = zfHpSend(dev, NULL, 0, NULL, 0, NULL, 0, buf, 0,
- ZM_INTERNAL_ALLOC_BUF, 0, 0xff)) != ZM_SUCCESS)
+ err = zfHpSend(dev, NULL, 0, NULL, 0, NULL, 0, buf, 0,
+ ZM_INTERNAL_ALLOC_BUF, 0, 0xff);
+ if (err != ZM_SUCCESS)
{
goto zlError;
}
@@ -1366,7 +1386,8 @@ void zfProcessManagement(zdev_t* dev, zbuf_t* buf, struct zsAdditionInfo* AddInf
if ((ra[0] & 0x1) != 1)
{
/* AP : Find virtual AP */
- if ((index = zfApFindSta(dev, ta)) != 0xffff)
+ index = zfApFindSta(dev, ta);
+ if (index != 0xffff)
{
vap = wd->ap.staTable[index].vap;
}
@@ -1534,7 +1555,8 @@ void zfProcessProbeReq(zdev_t* dev, zbuf_t* buf, u16_t* src)
}
/* check SSID */
- if ((offset = zfFindElement(dev, buf, ZM_WLAN_EID_SSID)) == 0xffff)
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_SSID);
+ if (offset == 0xffff)
{
zm_msg0_mm(ZM_LV_3, "probe req SSID not found");
return;
@@ -1561,8 +1583,8 @@ void zfProcessProbeReq(zdev_t* dev, zbuf_t* buf, u16_t* src)
{
for (j=0; j<len; j++)
{
- if ((ch = zmw_rx_buf_readb(dev, buf, offset+2+j))
- != wd->ap.ssid[i][j])
+ ch = zmw_rx_buf_readb(dev, buf, offset+2+j);
+ if (ch != wd->ap.ssid[i][j])
{
break;
}
@@ -1814,7 +1836,8 @@ u16_t zfFindATHExtCap(zdev_t* dev, zbuf_t* buf, u8_t type, u8_t subtype)
/* Get offset of first element */
subType = (zmw_rx_buf_readb(dev, buf, 0) >> 4);
- if ((offset = zgElementOffsetTable[subType]) == 0xff)
+ offset = zgElementOffsetTable[subType];
+ if (offset == 0xff)
{
zm_assert(0);
}
@@ -1828,10 +1851,12 @@ u16_t zfFindATHExtCap(zdev_t* dev, zbuf_t* buf, u8_t type, u8_t subtype)
while ((offset+2)<bufLen) // including element ID and length (2bytes)
{
/* Search target element */
- if ((id = zmw_rx_buf_readb(dev, buf, offset)) == ZM_WLAN_EID_WIFI_IE)
+ id = zmw_rx_buf_readb(dev, buf, offset);
+ if (id == ZM_WLAN_EID_WIFI_IE)
{
/* Bingo */
- if ((elen = zmw_rx_buf_readb(dev, buf, offset+1))>(bufLen - offset))
+ elen = zmw_rx_buf_readb(dev, buf, offset+1);
+ if (elen > bufLen - offset)
{
/* Element length error */
return 0xffff;
@@ -1850,7 +1875,8 @@ u16_t zfFindATHExtCap(zdev_t* dev, zbuf_t* buf, u8_t type, u8_t subtype)
{
if ( subtype != 0xff )
{
- if ( (tmp = zmw_rx_buf_readb(dev, buf, offset+6)) == subtype )
+ tmp = zmw_rx_buf_readb(dev, buf, offset+6);
+ if (tmp == subtype )
{
return offset;
}
@@ -1863,7 +1889,8 @@ u16_t zfFindATHExtCap(zdev_t* dev, zbuf_t* buf, u8_t type, u8_t subtype)
}
/* Advance to next element */
- if ((elen = zmw_rx_buf_readb(dev, buf, offset+1)) == 0)
+ elen = zmw_rx_buf_readb(dev, buf, offset+1);
+ if (elen == 0)
{
return 0xffff;
}
@@ -1884,7 +1911,8 @@ u16_t zfFindBrdcmMrvlRlnkExtCap(zdev_t* dev, zbuf_t* buf)
/* Get offset of first element */
subType = (zmw_rx_buf_readb(dev, buf, 0) >> 4);
- if ((offset = zgElementOffsetTable[subType]) == 0xff)
+ offset = zgElementOffsetTable[subType];
+ if (offset == 0xff)
{
zm_assert(0);
}
@@ -1898,10 +1926,12 @@ u16_t zfFindBrdcmMrvlRlnkExtCap(zdev_t* dev, zbuf_t* buf)
while ((offset+2)<bufLen) // including element ID and length (2bytes)
{
/* Search target element */
- if ((id = zmw_rx_buf_readb(dev, buf, offset)) == ZM_WLAN_EID_WIFI_IE)
+ id = zmw_rx_buf_readb(dev, buf, offset);
+ if (id == ZM_WLAN_EID_WIFI_IE)
{
/* Bingo */
- if ((elen = zmw_rx_buf_readb(dev, buf, offset+1))>(bufLen - offset))
+ elen = zmw_rx_buf_readb(dev, buf, offset+1);
+ if (elen > bufLen - offset)
{
/* Element length error */
return 0xffff;
@@ -1930,7 +1960,8 @@ u16_t zfFindBrdcmMrvlRlnkExtCap(zdev_t* dev, zbuf_t* buf)
else if ((id = zmw_rx_buf_readb(dev, buf, offset)) == 0x7F)
{
/* Bingo */
- if ((elen = zmw_rx_buf_readb(dev, buf, offset+1))>(bufLen - offset))
+ elen = zmw_rx_buf_readb(dev, buf, offset+1);
+ if (elen > bufLen - offset)
{
/* Element length error */
return 0xffff;
@@ -1941,7 +1972,8 @@ u16_t zfFindBrdcmMrvlRlnkExtCap(zdev_t* dev, zbuf_t* buf)
return 0xffff;
}
- if ((tmp = zmw_rx_buf_readb(dev, buf, offset+2)) == 0x01)
+ tmp = zmw_rx_buf_readb(dev, buf, offset+2);
+ if (tmp == 0x01)
{
return offset;
@@ -1949,7 +1981,8 @@ u16_t zfFindBrdcmMrvlRlnkExtCap(zdev_t* dev, zbuf_t* buf)
}
/* Advance to next element */
- if ((elen = zmw_rx_buf_readb(dev, buf, offset+1)) == 0)
+ elen = zmw_rx_buf_readb(dev, buf, offset+1);
+ if (elen == 0)
{
return 0xffff;
}
@@ -1970,7 +2003,8 @@ u16_t zfFindMarvelExtCap(zdev_t* dev, zbuf_t* buf)
/* Get offset of first element */
subType = (zmw_rx_buf_readb(dev, buf, 0) >> 4);
- if ((offset = zgElementOffsetTable[subType]) == 0xff)
+ offset = zgElementOffsetTable[subType];
+ if (offset == 0xff)
{
zm_assert(0);
}
@@ -1984,10 +2018,12 @@ u16_t zfFindMarvelExtCap(zdev_t* dev, zbuf_t* buf)
while ((offset+2)<bufLen) // including element ID and length (2bytes)
{
/* Search target element */
- if ((id = zmw_rx_buf_readb(dev, buf, offset)) == ZM_WLAN_EID_WIFI_IE)
+ id = zmw_rx_buf_readb(dev, buf, offset);
+ if (id == ZM_WLAN_EID_WIFI_IE)
{
/* Bingo */
- if ((elen = zmw_rx_buf_readb(dev, buf, offset+1))>(bufLen - offset))
+ elen = zmw_rx_buf_readb(dev, buf, offset+1);
+ if (elen>(bufLen - offset))
{
/* Element length error */
return 0xffff;
@@ -2008,7 +2044,8 @@ u16_t zfFindMarvelExtCap(zdev_t* dev, zbuf_t* buf)
}
/* Advance to next element */
- if ((elen = zmw_rx_buf_readb(dev, buf, offset+1)) == 0)
+ elen = zmw_rx_buf_readb(dev, buf, offset+1);
+ if (elen == 0)
{
return 0xffff;
}
@@ -2029,7 +2066,8 @@ u16_t zfFindBroadcomExtCap(zdev_t* dev, zbuf_t* buf)
/* Get offset of first element */
subType = (zmw_rx_buf_readb(dev, buf, 0) >> 4);
- if ((offset = zgElementOffsetTable[subType]) == 0xff)
+ offset = zgElementOffsetTable[subType];
+ if (offset == 0xff)
{
zm_assert(0);
}
@@ -2043,10 +2081,12 @@ u16_t zfFindBroadcomExtCap(zdev_t* dev, zbuf_t* buf)
while((offset+2) < bufLen) // including element ID and length (2bytes)
{
/* Search target element */
- if ((id = zmw_rx_buf_readb(dev, buf, offset)) == ZM_WLAN_EID_WIFI_IE)
+ id = zmw_rx_buf_readb(dev, buf, offset);
+ if (id == ZM_WLAN_EID_WIFI_IE)
{
/* Bingo */
- if ((elen = zmw_rx_buf_readb(dev, buf, offset+1)) > (bufLen - offset))
+ elen = zmw_rx_buf_readb(dev, buf, offset+1);
+ if (elen > (bufLen - offset))
{
/* Element length error */
return 0xffff;
@@ -2066,7 +2106,8 @@ u16_t zfFindBroadcomExtCap(zdev_t* dev, zbuf_t* buf)
}
/* Advance to next element */
- if ((elen = zmw_rx_buf_readb(dev, buf, offset+1)) == 0)
+ elen = zmw_rx_buf_readb(dev, buf, offset+1);
+ if (elen == 0)
{
return 0xffff;
}
@@ -2089,7 +2130,8 @@ u16_t zfFindRlnkExtCap(zdev_t* dev, zbuf_t* buf)
/* Get offset of first element */
subType = (zmw_rx_buf_readb(dev, buf, 0) >> 4);
- if ((offset = zgElementOffsetTable[subType]) == 0xff)
+ offset = zgElementOffsetTable[subType];
+ if (offset == 0xff)
{
zm_assert(0);
}
@@ -2103,10 +2145,12 @@ u16_t zfFindRlnkExtCap(zdev_t* dev, zbuf_t* buf)
while((offset+2) < bufLen) // including element ID and length (2bytes)
{
/* Search target element */
- if ((id = zmw_rx_buf_readb(dev, buf, offset)) == 0x7F)
+ id = zmw_rx_buf_readb(dev, buf, offset);
+ if (id == 0x7F)
{
/* Bingo */
- if ((elen = zmw_rx_buf_readb(dev, buf, offset+1)) > (bufLen - offset))
+ elen = zmw_rx_buf_readb(dev, buf, offset+1);
+ if (elen > bufLen - offset)
{
/* Element length error */
return 0xffff;
@@ -2117,7 +2161,8 @@ u16_t zfFindRlnkExtCap(zdev_t* dev, zbuf_t* buf)
return 0xffff;
}
- if ((tmp = zmw_rx_buf_readb(dev, buf, offset+2)) == 0x01)
+ tmp = zmw_rx_buf_readb(dev, buf, offset+2);
+ if (tmp == 0x01)
{
return offset;
@@ -2125,7 +2170,8 @@ u16_t zfFindRlnkExtCap(zdev_t* dev, zbuf_t* buf)
}
/* Advance to next element */
- if ((elen = zmw_rx_buf_readb(dev, buf, offset+1)) == 0)
+ elen = zmw_rx_buf_readb(dev, buf, offset+1);
+ if (elen == 0)
{
return 0xffff;
}
diff --git a/drivers/staging/otus/80211core/cmmap.c b/drivers/staging/otus/80211core/cmmap.c
index 7f09fded459..8ec3830e843 100644
--- a/drivers/staging/otus/80211core/cmmap.c
+++ b/drivers/staging/otus/80211core/cmmap.c
@@ -141,7 +141,8 @@ u16_t zfApGetSTAInfo(zdev_t* dev, u16_t* addr, u16_t* state, u8_t* vap)
zmw_enter_critical_section(dev);
- if ((id = zfApFindSta(dev, addr)) != 0xffff)
+ id = zfApFindSta(dev, addr);
+ if (id != 0xffff)
{
*vap = wd->ap.staTable[id].vap;
*state = wd->ap.staTable[id++].state;
@@ -163,7 +164,8 @@ void zfApGetStaQosType(zdev_t* dev, u16_t* addr, u8_t* qosType)
zmw_enter_critical_section(dev);
- if ((id = zfApFindSta(dev, addr)) != 0xffff)
+ id = zfApFindSta(dev, addr);
+ if (id != 0xffff)
{
*qosType = wd->ap.staTable[id].qosType;
}
@@ -189,7 +191,8 @@ void zfApGetStaTxRateAndQosType(zdev_t* dev, u16_t* addr, u32_t* phyCtrl,
zmw_enter_critical_section(dev);
- if ((id = zfApFindSta(dev, addr)) != 0xffff)
+ id = zfApFindSta(dev, addr);
+ if (id != 0xffff)
{
rate = (u8_t)zfRateCtrlGetTxRate(dev, &wd->ap.staTable[id].rcCell, rcProbingFlag);
#ifdef ZM_AP_DEBUG
@@ -234,7 +237,8 @@ void zfApGetStaEncryType(zdev_t* dev, u16_t* addr, u8_t* encryType)
zmw_enter_critical_section(dev);
- if ((id = zfApFindSta(dev, addr)) != 0xffff)
+ id = zfApFindSta(dev, addr);
+ if (id != 0xffff)
{
*encryType = wd->ap.staTable[id].encryMode;
}
@@ -260,7 +264,8 @@ void zfApGetStaWpaIv(zdev_t* dev, u16_t* addr, u16_t* iv16, u32_t* iv32)
zmw_enter_critical_section(dev);
- if ((id = zfApFindSta(dev, addr)) != 0xffff)
+ id = zfApFindSta(dev, addr);
+ if (id != 0xffff)
{
*iv16 = wd->ap.staTable[id].iv16;
*iv32 = wd->ap.staTable[id].iv32;
@@ -289,7 +294,8 @@ void zfApSetStaWpaIv(zdev_t* dev, u16_t* addr, u16_t iv16, u32_t iv32)
zmw_enter_critical_section(dev);
- if ((id = zfApFindSta(dev, addr)) != 0xffff)
+ id = zfApFindSta(dev, addr);
+ if (id != 0xffff)
{
wd->ap.staTable[id].iv16 = iv16;
wd->ap.staTable[id].iv32 = iv32;
@@ -321,7 +327,8 @@ void zfApClearStaKey(zdev_t* dev, u16_t* addr)
zmw_enter_critical_section(dev);
- if ((id = zfApFindSta(dev, addr)) != 0xffff)
+ id = zfApFindSta(dev, addr);
+ if (id != 0xffff)
{
/* Turn off STA's key information */
zfHpRemoveKey(dev, id+1);
@@ -348,7 +355,8 @@ void zfApGetStaCencIvAndKeyIdx(zdev_t* dev, u16_t* addr, u32_t *iv, u8_t *keyIdx
zmw_enter_critical_section(dev);
- if ((id = zfApFindSta(dev, addr)) != 0xffff)
+ id = zfApFindSta(dev, addr);
+ if (id != 0xffff)
{
*iv++ = wd->ap.staTable[id].txiv[0];
*iv++ = wd->ap.staTable[id].txiv[1];
@@ -379,7 +387,8 @@ void zfApSetStaCencIv(zdev_t* dev, u16_t* addr, u32_t *iv)
zmw_enter_critical_section(dev);
- if ((id = zfApFindSta(dev, addr)) != 0xffff)
+ id = zfApFindSta(dev, addr);
+ if (id != 0xffff)
{
wd->ap.staTable[id].txiv[0] = *iv++;
wd->ap.staTable[id].txiv[1] = *iv++;
@@ -539,7 +548,8 @@ u16_t zfApBufferPsFrame(zdev_t* dev, zbuf_t* buf, u16_t port)
{
zmw_enter_critical_section(dev);
- if ((id = zfApFindSta(dev, addr)) != 0xffff)
+ id = zfApFindSta(dev, addr);
+ if (id != 0xffff)
{
if (wd->ap.staTable[id].psMode == 1)
{
@@ -603,7 +613,8 @@ u16_t zfApGetSTAInfoAndUpdatePs(zdev_t* dev, u16_t* addr, u16_t* state,
//psMode=0;
#endif
- if ((id = zfApFindSta(dev, addr)) != 0xffff)
+ id = zfApFindSta(dev, addr);
+ if (id != 0xffff)
{
if (psMode != 0)
{
@@ -648,7 +659,8 @@ u16_t zfApGetSTAInfoAndUpdatePs(zdev_t* dev, u16_t* addr, u16_t* state,
while (1)
{
- if ((psBuf = zfQueueGetWithMac(dev, wd->ap.uapsdQ, (u8_t*)addr, &mb)) != NULL)
+ psBuf = zfQueueGetWithMac(dev, wd->ap.uapsdQ, (u8_t*)addr, &mb);
+ if (psBuf != NULL)
{
zfTxSendEth(dev, psBuf, 0, ZM_EXTERNAL_ALLOC_BUF, 0);
}
@@ -730,7 +742,8 @@ u16_t zfApAddSta(zdev_t* dev, u16_t* addr, u16_t state, u16_t apId, u8_t type,
zmw_enter_critical_section(dev);
- if ((index = zfApFindSta(dev, addr)) != 0xffff)
+ index = zfApFindSta(dev, addr);
+ if (index != 0xffff)
{
zm_msg0_mm(ZM_LV_2, "found");
/* Update STA state */
@@ -963,7 +976,8 @@ void zfApProcessBeacon(zdev_t* dev, zbuf_t* buf)
zm_msg0_mm(ZM_LV_3, "Rx beacon");
/* update Non-ERP flag(wd->ap.nonErpObss) */
- if ((offset = zfFindElement(dev, buf, ZM_WLAN_EID_ERP)) == 0xffff)
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_ERP);
+ if (offset == 0xffff)
{
/* 11b OBSS */
wd->ap.protectedObss++;
@@ -1046,7 +1060,8 @@ void zfApProcessAuth(zdev_t* dev, zbuf_t* buf, u16_t* src, u16_t apId)
if (seq == 1)
{
/* AP : update STA to auth */
- if ((ret = zfApAddSta(dev, src, ZM_STATE_AUTH, apId, 0, 0, 0)) != 0xffff)
+ ret = zfApAddSta(dev, src, ZM_STATE_AUTH, apId, 0, 0, 0);
+ if (ret != 0xffff)
{
/* AP : call zfwAuthNotify() for host to judge */
//zfwAuthNotify(dev, src);
@@ -1177,12 +1192,14 @@ void zfApProcessAsocReq(zdev_t* dev, zbuf_t* buf, u16_t* src, u16_t apId)
zmw_get_wlan_dev(dev);
/* AP : check SSID */
- if ((offset = zfFindElement(dev, buf, ZM_WLAN_EID_SSID)) != 0xffff)
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_SSID);
+ if (offset != 0xffff)
{
k = 0;
for (j = 0; j < wd->ap.vapNumber; j++)
{
- if ((tmp = zmw_buf_readb(dev, buf, offset+1))
+ tmp = zmw_buf_readb(dev, buf, offset+1);
+ if (tmp
!= wd->ap.ssidLen[j])
{
k++;
@@ -1198,7 +1215,8 @@ void zfApProcessAsocReq(zdev_t* dev, zbuf_t* buf, u16_t* src, u16_t apId)
{
for (i=0; i<wd->ap.ssidLen[j]; i++)
{
- if ((tmp = zmw_buf_readb(dev, buf, offset+2+i))
+ tmp = zmw_buf_readb(dev, buf, offset+2+i);
+ if (tmp
!= wd->ap.ssid[j][i])
{
break;
@@ -1222,13 +1240,15 @@ void zfApProcessAsocReq(zdev_t* dev, zbuf_t* buf, u16_t* src, u16_t apId)
/* TODO : check capability */
/* AP : check support rate */
- if ((offset = zfFindElement(dev, buf, ZM_WLAN_EID_EXTENDED_RATE)) != 0xffff)
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_EXTENDED_RATE);
+ if (offset != 0xffff)
{
/* 11g STA */
staType = 1;
}
//CWYang(+)
- if ((offset = zfFindElement(dev, buf, ZM_WLAN_EID_HT_CAPABILITY)) != 0xffff)
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_HT_CAPABILITY);
+ if (offset != 0xffff)
{
/* 11n STA */
staType = 2;
@@ -1251,7 +1271,8 @@ void zfApProcessAsocReq(zdev_t* dev, zbuf_t* buf, u16_t* src, u16_t apId)
/* AP : check 11h */
/* AP : check WME */
- if ((offset = zfFindWifiElement(dev, buf, 2, 0)) != 0xffff)
+ offset = zfFindWifiElement(dev, buf, 2, 0);
+ if (offset != 0xffff)
{
/* WME STA */
qosType = 1;
@@ -1265,7 +1286,8 @@ void zfApProcessAsocReq(zdev_t* dev, zbuf_t* buf, u16_t* src, u16_t apId)
if (wd->ap.wpaSupport[apId] == 1)
{
- if ( (offset = zfFindElement(dev, buf, ZM_WLAN_EID_WPA_IE)) != 0xffff )
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_WPA_IE);
+ if (offset != 0xffff)
{
/* get WPA IE */
u8_t length = zmw_rx_buf_readb(dev, buf, offset+1);
@@ -1406,12 +1428,14 @@ void zfApStoreAsocReqIe(zdev_t* dev, zbuf_t* buf, u16_t aid)
offset = 28;
/* supported rates */
- if ((offset = zfFindElement(dev, buf, ZM_WLAN_EID_SUPPORT_RATE)) == 0xffff)
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_SUPPORT_RATE);
+ if (offset == 0xffff)
return;
length = zmw_rx_buf_readb(dev, buf, offset + 1);
/* extended supported rates */
- if ((offset = zfFindElement(dev, buf, ZM_WLAN_EID_EXTENDED_RATE)) == 0xffff)
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_EXTENDED_RATE);
+ if (offset == 0xffff)
return;
length = zmw_rx_buf_readb(dev, buf, offset + 1);
@@ -1426,7 +1450,8 @@ void zfApStoreAsocReqIe(zdev_t* dev, zbuf_t* buf, u16_t aid)
/* QoS */
/* HT capabilities: 28 octets */
- if ((offset = zfFindElement(dev, buf, ZM_WLAN_EID_HT_CAPABILITY)) != 0xffff) {
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_HT_CAPABILITY);
+ if (offset != 0xffff) {
/* atheros pre n */
htcap = (u8_t *)&wd->ap.ie[aid].HtCap;
htcap[0] = zmw_rx_buf_readb(dev, buf, offset);
@@ -1479,7 +1504,8 @@ void zfApProcessDeauth(zdev_t* dev, zbuf_t* buf, u16_t* src, u16_t apId)
zmw_enter_critical_section(dev);
/* AP : if SA=associated STA then deauthenticate STA */
- if ((aid = zfApFindSta(dev, src)) != 0xffff)
+ aid = zfApFindSta(dev, src);
+ if (aid != 0xffff)
{
/* Clear STA table */
wd->ap.staTable[aid].valid = 0;
@@ -1500,7 +1526,8 @@ void zfApProcessDisasoc(zdev_t* dev, zbuf_t* buf, u16_t* src, u16_t apId)
zmw_enter_critical_section(dev);
/* AP : if SA=associated STA then deauthenticate STA */
- if ((aid = zfApFindSta(dev, src)) != 0xffff)
+ aid = zfApFindSta(dev, src);
+ if (aid != 0xffff)
{
/* Clear STA table */
wd->ap.staTable[aid].valid = 0;
@@ -1674,7 +1701,8 @@ u16_t zfApAddIeTim(zdev_t* dev, zbuf_t* buf, u16_t offset, u16_t vap)
dst[0] = zmw_tx_buf_readh(dev, psBuf, 0);
dst[1] = zmw_tx_buf_readh(dev, psBuf, 2);
dst[2] = zmw_tx_buf_readh(dev, psBuf, 4);
- if ((aid = zfApFindSta(dev, dst)) != 0xffff)
+ aid = zfApFindSta(dev, dst);
+ if (aid != 0xffff)
{
if (wd->ap.staTable[aid].psMode != 0)
{
@@ -1896,7 +1924,8 @@ void zfApSendBeacon(zdev_t* dev)
zm_msg1_mm(ZM_LV_2, "Send beacon, vap=", vap);
/* TBD : Maximum size of beacon */
- if ((buf = zfwBufAllocate(dev, 1024)) == NULL)
+ buf = zfwBufAllocate(dev, 1024);
+ if (buf == NULL)
{
zm_msg0_mm(ZM_LV_0, "Alloc beacon buf Fail!");
return;
@@ -2101,8 +2130,8 @@ u16_t zfIntrabssForward(zdev_t* dev, zbuf_t* buf, u8_t srcVap)
if ((asocFlag == 1) || ((dst[0]&0x1) == 0x1))
{
/* Allocate frame */
- if ((txBuf = zfwBufAllocate(dev, ZM_RX_FRAME_SIZE))
- == NULL)
+ txBuf = zfwBufAllocate(dev, ZM_RX_FRAME_SIZE);
+ if (txBuf == NULL)
{
zm_msg0_rx(ZM_LV_1, "Alloc intra-bss buf Fail!");
goto zlAllocError;
@@ -2133,7 +2162,8 @@ u16_t zfIntrabssForward(zdev_t* dev, zbuf_t* buf, u8_t srcVap)
/* Transmit frame */
/* Return error if port is disabled */
- if ((err = zfTxPortControl(dev, txBuf, vap)) == ZM_PORT_DISABLED)
+ err = zfTxPortControl(dev, txBuf, vap);
+ if (err == ZM_PORT_DISABLED)
{
err = ZM_ERR_TX_PORT_DISABLED;
goto zlTxError;
@@ -2141,7 +2171,8 @@ u16_t zfIntrabssForward(zdev_t* dev, zbuf_t* buf, u8_t srcVap)
#if 1
/* AP : Buffer frame for power saving STA */
- if ((ret = zfApBufferPsFrame(dev, txBuf, vap)) == 0)
+ ret = zfApBufferPsFrame(dev, txBuf, vap);
+ if (ret == 0)
{
/* forward frame if not been buffered */
#if 1
@@ -2177,7 +2208,8 @@ struct zsMicVar* zfApGetRxMicKey(zdev_t* dev, zbuf_t* buf)
macAddr[1] = sa[2] + (sa[3] << 8);
macAddr[2] = sa[4] + (sa[5] << 8);
- if ((id = zfApFindSta(dev, macAddr)) != 0xffff)
+ id = zfApFindSta(dev, macAddr);
+ if (id != 0xffff)
return (&wd->ap.staTable[id].rxMicKey);
return NULL;
@@ -2369,7 +2401,8 @@ void zfApSendFailure(zdev_t* dev, u8_t* addr)
staAddr[1] = addr[2] + (((u16_t)addr[3])<<8);
staAddr[2] = addr[4] + (((u16_t)addr[5])<<8);
zmw_enter_critical_section(dev);
- if ((id = zfApFindSta(dev, staAddr)) != 0xffff)
+ id = zfApFindSta(dev, staAddr);
+ if (id != 0xffff)
{
/* Send failture : Add 3 minutes to inactive time that will */
/* will make STA been kicked out soon */
diff --git a/drivers/staging/otus/80211core/cmmsta.c b/drivers/staging/otus/80211core/cmmsta.c
index c3fd47529c1..0fda30d05ed 100644
--- a/drivers/staging/otus/80211core/cmmsta.c
+++ b/drivers/staging/otus/80211core/cmmsta.c
@@ -602,7 +602,8 @@ void zfStaProtErpMonitor(zdev_t* dev, zbuf_t* buf)
if (zfRxBufferEqualToStr(dev, buf, bssid, ZM_WLAN_HEADER_A2_OFFSET, 6))
{
- if ( (offset=zfFindElement(dev, buf, ZM_WLAN_EID_ERP)) != 0xffff )
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_ERP);
+ if (offset != 0xffff)
{
erp = zmw_rx_buf_readb(dev, buf, offset+2);
@@ -628,7 +629,8 @@ void zfStaProtErpMonitor(zdev_t* dev, zbuf_t* buf)
}
//Check the existence of Non-N AP
//Follow the check the "pBssInfo->EnableHT"
- if ((offset = zfFindElement(dev, buf, ZM_WLAN_EID_HT_CAPABILITY)) != 0xffff)
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_HT_CAPABILITY);
+ if (offset != 0xffff)
{}
else if ((offset = zfFindElement(dev, buf, ZM_WLAN_PREN2_EID_HTCAPABILITY)) != 0xffff)
{}
@@ -658,9 +660,11 @@ void zfStaUpdateWmeParameter(zdev_t* dev, zbuf_t* buf)
if (wd->sta.wmeConnected != 0)
{
/* Find WME parameter element */
- if ((offset = zfFindWifiElement(dev, buf, 2, 1)) != 0xffff)
+ offset = zfFindWifiElement(dev, buf, 2, 1);
+ if (offset != 0xffff)
{
- if ((len = zmw_rx_buf_readb(dev, buf, offset+1)) >= 7)
+ len = zmw_rx_buf_readb(dev, buf, offset+1);
+ if (len >= 7)
{
rxWmeParameterSetCount=zmw_rx_buf_readb(dev, buf, offset+8);
if (rxWmeParameterSetCount != wd->sta.wmeParameterSetCount)
@@ -741,7 +745,8 @@ void zfStaUpdateDot11HDFS(zdev_t* dev, zbuf_t* buf)
*/
/* get EID(Channel Switch Announcement) */
- if ( (offset = zfFindElement(dev, buf, ZM_WLAN_EID_CHANNEL_SWITCH_ANNOUNCE)) == 0xffff )
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_CHANNEL_SWITCH_ANNOUNCE);
+ if (offset == 0xffff)
{
//zm_debug_msg0("EID(Channel Switch Announcement) not found");
return;
@@ -1216,7 +1221,8 @@ void zfStaSendBeacon(zdev_t* dev)
//zm_debug_msg0("\n");
/* TBD : Maximum size of beacon */
- if ((buf = zfwBufAllocate(dev, 1024)) == NULL)
+ buf = zfwBufAllocate(dev, 1024);
+ if (buf == NULL)
{
zm_debug_msg0("Allocate beacon buffer failed");
return;
@@ -1370,7 +1376,8 @@ struct zsBssInfo* zfStaFindBssInfo(zdev_t* dev, zbuf_t* buf, struct zsWlanProbeR
zmw_get_wlan_dev(dev);
- if ((pBssInfo = wd->sta.bssList.head) == NULL)
+ pBssInfo = wd->sta.bssList.head;
+ if (pBssInfo == NULL)
{
return NULL;
}
@@ -1420,8 +1427,10 @@ struct zsBssInfo* zfStaFindBssInfo(zdev_t* dev, zbuf_t* buf, struct zsWlanProbeR
/* Check channel */
/* Add check channel to solve the bug #31222 */
if (isMatched) {
- if ((offset = zfFindElement(dev, buf, ZM_WLAN_EID_DS)) != 0xffff) {
- if ((length = zmw_rx_buf_readb(dev, buf, offset+1)) == 1) {
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_DS);
+ if (offset != 0xffff) {
+ length = zmw_rx_buf_readb(dev, buf, offset+1);
+ if (length == 1) {
channel = zmw_rx_buf_readb(dev, buf, offset+2);
if (zfHpIsAllowedChannel(dev, zfChNumToFreq(dev, channel, 0)) == 0) {
frequency = 0;
@@ -1473,7 +1482,8 @@ u8_t zfStaInitBssInfo(zdev_t* dev, zbuf_t* buf,
}
/* get SSID */
- if ( (offset = zfFindElement(dev, buf, ZM_WLAN_EID_SSID)) == 0xffff )
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_SSID);
+ if (offset == 0xffff)
{
zm_debug_msg0("EID(SSID) not found");
goto zlError;
@@ -1506,7 +1516,8 @@ u8_t zfStaInitBssInfo(zdev_t* dev, zbuf_t* buf,
zfCopyFromRxBuffer(dev, buf, pBssInfo->ssid, offset, length+2);
/* get DS parameter */
- if ( (offset = zfFindElement(dev, buf, ZM_WLAN_EID_DS)) != 0xffff )
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_DS);
+ if (offset != 0xffff)
{
length = zmw_rx_buf_readb(dev, buf, offset+1);
if ( length != 1 )
@@ -1590,7 +1601,8 @@ u8_t zfStaInitBssInfo(zdev_t* dev, zbuf_t* buf,
pBssInfo->frameBodysize = accumulateLen;
/* get supported rates */
- if ( (offset = zfFindElement(dev, buf, ZM_WLAN_EID_SUPPORT_RATE)) == 0xffff )
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_SUPPORT_RATE);
+ if (offset == 0xffff)
{
zm_debug_msg0("EID(supported rates) not found");
goto zlError;
@@ -1607,7 +1619,8 @@ u8_t zfStaInitBssInfo(zdev_t* dev, zbuf_t* buf,
/* get Country information */
- if ( (offset = zfFindElement(dev, buf, ZM_WLAN_EID_COUNTRY)) != 0xffff )
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_COUNTRY);
+ if (offset != 0xffff)
{
length = zmw_rx_buf_readb(dev, buf, offset+1);
if (length > ZM_MAX_COUNTRY_INFO_SIZE)
@@ -1625,13 +1638,15 @@ u8_t zfStaInitBssInfo(zdev_t* dev, zbuf_t* buf,
}
/* get ERP information */
- if ( (offset = zfFindElement(dev, buf, ZM_WLAN_EID_ERP)) != 0xffff )
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_ERP);
+ if (offset != 0xffff)
{
pBssInfo->erp = zmw_rx_buf_readb(dev, buf, offset+2);
}
/* get extended supported rates */
- if ( (offset = zfFindElement(dev, buf, ZM_WLAN_EID_EXTENDED_RATE)) != 0xffff )
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_EXTENDED_RATE);
+ if (offset != 0xffff)
{
length = zmw_rx_buf_readb(dev, buf, offset+1);
if (length > ZM_MAX_SUPP_RATES_IE_SIZE)
@@ -1648,7 +1663,8 @@ u8_t zfStaInitBssInfo(zdev_t* dev, zbuf_t* buf,
}
/* get WPA IE */
- if ( (offset = zfFindElement(dev, buf, ZM_WLAN_EID_WPA_IE)) != 0xffff )
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_WPA_IE);
+ if (offset != 0xffff)
{
length = zmw_rx_buf_readb(dev, buf, offset+1);
if (length > ZM_MAX_IE_SIZE)
@@ -1664,7 +1680,8 @@ u8_t zfStaInitBssInfo(zdev_t* dev, zbuf_t* buf,
}
/* get WPS IE */
- if ((offset = zfFindWifiElement(dev, buf, 4, 0xff)) != 0xffff)
+ offset = zfFindWifiElement(dev, buf, 4, 0xff);
+ if (offset != 0xffff)
{
length = zmw_rx_buf_readb(dev, buf, offset+1);
if (length > ZM_MAX_WPS_IE_SIZE )
@@ -1679,19 +1696,22 @@ u8_t zfStaInitBssInfo(zdev_t* dev, zbuf_t* buf,
}
/* get SuperG IE */
- if ((offset = zfFindSuperGElement(dev, buf, ZM_WLAN_EID_VENDOR_PRIVATE)) != 0xffff)
+ offset = zfFindSuperGElement(dev, buf, ZM_WLAN_EID_VENDOR_PRIVATE);
+ if (offset != 0xffff)
{
pBssInfo->apCap |= ZM_SuperG_AP;
}
/* get XR IE */
- if ((offset = zfFindXRElement(dev, buf, ZM_WLAN_EID_VENDOR_PRIVATE)) != 0xffff)
+ offset = zfFindXRElement(dev, buf, ZM_WLAN_EID_VENDOR_PRIVATE);
+ if (offset != 0xffff)
{
pBssInfo->apCap |= ZM_XR_AP;
}
/* get RSN IE */
- if ( (offset = zfFindElement(dev, buf, ZM_WLAN_EID_RSN_IE)) != 0xffff )
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_RSN_IE);
+ if (offset != 0xffff)
{
length = zmw_rx_buf_readb(dev, buf, offset+1);
if (length > ZM_MAX_IE_SIZE)
@@ -1707,7 +1727,8 @@ u8_t zfStaInitBssInfo(zdev_t* dev, zbuf_t* buf,
}
#ifdef ZM_ENABLE_CENC
/* get CENC IE */
- if ( (offset = zfFindElement(dev, buf, ZM_WLAN_EID_CENC_IE)) != 0xffff )
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_CENC_IE);
+ if (offset != 0xffff)
{
length = zmw_rx_buf_readb(dev, buf, offset+1);
if (length > ZM_MAX_IE_SIZE )
@@ -1726,7 +1747,8 @@ u8_t zfStaInitBssInfo(zdev_t* dev, zbuf_t* buf,
/* get WME Parameter IE, probe rsp may contain WME parameter element */
//if ( wd->bQoSEnable )
{
- if ((offset = zfFindWifiElement(dev, buf, 2, 1)) != 0xffff)
+ offset = zfFindWifiElement(dev, buf, 2, 1);
+ if (offset != 0xffff)
{
apQosInfo = zmw_rx_buf_readb(dev, buf, offset+8) & 0x80;
pBssInfo->wmeSupport = 1 | apQosInfo;
@@ -1742,7 +1764,8 @@ u8_t zfStaInitBssInfo(zdev_t* dev, zbuf_t* buf,
}
}
//CWYang(+)
- if ((offset = zfFindElement(dev, buf, ZM_WLAN_EID_HT_CAPABILITY)) != 0xffff)
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_HT_CAPABILITY);
+ if (offset != 0xffff)
{
/* 11n AP */
pBssInfo->EnableHT = 1;
@@ -1792,7 +1815,8 @@ u8_t zfStaInitBssInfo(zdev_t* dev, zbuf_t* buf,
pBssInfo->EnableHT = 0;
}
/* HT information */
- if ((offset = zfFindElement(dev, buf, ZM_WLAN_EID_EXTENDED_HT_CAPABILITY)) != 0xffff)
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_EXTENDED_HT_CAPABILITY);
+ if (offset != 0xffff)
{
/* atheros pre n */
pBssInfo->extChOffset = zmw_rx_buf_readb(dev, buf, offset+2) & 0x03;
@@ -1848,7 +1872,8 @@ u8_t zfStaInitBssInfo(zdev_t* dev, zbuf_t* buf,
}
/* get Marvel Extended Capability */
- if ((offset = zfFindMarvelExtCap(dev, buf)) != 0xffff)
+ offset = zfFindMarvelExtCap(dev, buf);
+ if (offset != 0xffff)
{
pBssInfo->marvelAp = 1;
}
@@ -1858,7 +1883,8 @@ u8_t zfStaInitBssInfo(zdev_t* dev, zbuf_t* buf,
}
/* get ATIM window */
- if ( (offset = zfFindElement(dev, buf, ZM_WLAN_EID_IBSS)) != 0xffff )
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_IBSS);
+ if (offset != 0xffff )
{
pBssInfo->atimWindow = zmw_rx_buf_readh(dev, buf,offset+2);
}
@@ -2017,7 +2043,8 @@ zlUpdateRssi:
pBssInfo->tick = wd->tick;
/* Update ERP information */
- if ( (offset = zfFindElement(dev, buf, ZM_WLAN_EID_ERP)) != 0xffff )
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_ERP);
+ if (offset != 0xffff)
{
pBssInfo->erp = zmw_rx_buf_readb(dev, buf, offset+2);
}
@@ -2116,7 +2143,8 @@ void zfStaProcessBeacon(zdev_t* dev, zbuf_t* buf, struct zsAdditionInfo* AddInfo
#if 0
else if ( wd->sta.oppositeCount == 0 )
{ /* IBSS merge if SSID matched */
- if ( (offset = zfFindElement(dev, buf, ZM_WLAN_EID_SSID)) != 0xffff )
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_SSID);
+ if (offset != 0xffff)
{
if ( (wd->sta.ssidLen == zmw_buf_readb(dev, buf, offset+1))&&
(zfRxBufferEqualToStr(dev, buf, wd->sta.ssid,
@@ -2410,7 +2438,8 @@ void zfStaProcessAsocRsp(zdev_t* dev, zbuf_t* buf)
if ((wd->sta.wmeEnabled & ZM_STA_WME_ENABLE_BIT) != 0) //WME enabled
{
/* Asoc rsp may contain WME parameter element */
- if ((offset = zfFindWifiElement(dev, buf, 2, 1)) != 0xffff)
+ offset = zfFindWifiElement(dev, buf, 2, 1);
+ if (offset != 0xffff)
{
zm_debug_msg0("WME enable");
wd->sta.wmeConnected = 1;
@@ -2605,7 +2634,8 @@ void zfStaStoreAsocRspIe(zdev_t* dev, zbuf_t* buf)
return;
}
- if ((offset = zfFindElement(dev, buf, ZM_WLAN_EID_HT_CAPABILITY)) != 0xffff)
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_HT_CAPABILITY);
+ if (offset != 0xffff)
{
/* atheros pre n */
zm_debug_msg0("atheros pre n");
@@ -2645,7 +2675,8 @@ void zfStaStoreAsocRspIe(zdev_t* dev, zbuf_t* buf)
asocBw40 = (u8_t)((wd->sta.ie.HtCap.HtCapInfo & HTCAP_SupChannelWidthSet) >> 1);
/* HT information */
- if ((offset = zfFindElement(dev, buf, ZM_WLAN_EID_EXTENDED_HT_CAPABILITY)) != 0xffff)
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_EXTENDED_HT_CAPABILITY);
+ if (offset != 0xffff)
{
/* atheros pre n */
zm_debug_msg0("atheros pre n HTINFO");
@@ -2815,7 +2846,8 @@ void zfStaProcessProbeReq(zdev_t* dev, zbuf_t* buf, u16_t* src)
}
/* check SSID */
- if ((offset = zfFindElement(dev, buf, ZM_WLAN_EID_SSID)) == 0xffff)
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_SSID);
+ if (offset == 0xffff)
{
zm_msg0_mm(ZM_LV_3, "probe req SSID not found");
return;
@@ -3774,7 +3806,8 @@ static struct zsBssInfo* zfInfraFindAPToConnect(zdev_t* dev,
}
/* Skip if AP in blocking list */
- if ((ret = zfStaIsApInBlockingList(dev, pBssInfo->bssid)) == TRUE)
+ ret = zfStaIsApInBlockingList(dev, pBssInfo->bssid);
+ if (ret == TRUE)
{
zm_msg0_mm(ZM_LV_0, "Candidate AP in blocking List, skip if there's stilla choice!");
pNowBssInfo = pBssInfo;
@@ -5007,7 +5040,8 @@ void zfSendNullData(zdev_t* dev, u8_t type)
zmw_get_wlan_dev(dev);
- if ((buf = zfwBufAllocate(dev, 1024)) == NULL)
+ buf = zfwBufAllocate(dev, 1024);
+ if (buf == NULL)
{
zm_msg0_mm(ZM_LV_0, "Alloc mm buf Fail!");
return;
@@ -5056,8 +5090,9 @@ void zfSendNullData(zdev_t* dev, u8_t type)
/*increase unicast frame counter*/
wd->commTally.txUnicastFrm++;
- if ((err = zfHpSend(dev, header, hlen, NULL, 0, NULL, 0, buf, 0,
- ZM_INTERNAL_ALLOC_BUF, 0, 0xff)) != ZM_SUCCESS)
+ err = zfHpSend(dev, header, hlen, NULL, 0, NULL, 0, buf, 0,
+ ZM_INTERNAL_ALLOC_BUF, 0, 0xff);
+ if (err != ZM_SUCCESS)
{
goto zlError;
}
@@ -5083,7 +5118,8 @@ void zfSendPSPoll(zdev_t* dev)
zmw_get_wlan_dev(dev);
- if ((buf = zfwBufAllocate(dev, 1024)) == NULL)
+ buf = zfwBufAllocate(dev, 1024);
+ if (buf == NULL)
{
zm_msg0_mm(ZM_LV_0, "Alloc mm buf Fail!");
return;
@@ -5107,8 +5143,9 @@ void zfSendPSPoll(zdev_t* dev)
// goto zlError;
//}
- if ((err = zfHpSend(dev, header, hlen, NULL, 0, NULL, 0, buf, 0,
- ZM_INTERNAL_ALLOC_BUF, 0, 0xff)) != ZM_SUCCESS)
+ err = zfHpSend(dev, header, hlen, NULL, 0, NULL, 0, buf, 0,
+ ZM_INTERNAL_ALLOC_BUF, 0, 0xff);
+ if (err != ZM_SUCCESS)
{
goto zlError;
}
@@ -5134,7 +5171,8 @@ void zfSendBA(zdev_t* dev, u16_t start_seq, u8_t *bitmap)
zmw_get_wlan_dev(dev);
- if ((buf = zfwBufAllocate(dev, 1024)) == NULL)
+ buf = zfwBufAllocate(dev, 1024);
+ if (buf == NULL)
{
zm_msg0_mm(ZM_LV_0, "Alloc mm buf Fail!");
return;
@@ -5166,8 +5204,9 @@ void zfSendBA(zdev_t* dev, u16_t start_seq, u8_t *bitmap)
offset++;
}
- if ((err = zfHpSend(dev, header, hlen, NULL, 0, NULL, 0, buf, 0,
- ZM_INTERNAL_ALLOC_BUF, 0, 0xff)) != ZM_SUCCESS)
+ err = zfHpSend(dev, header, hlen, NULL, 0, NULL, 0, buf, 0,
+ ZM_INTERNAL_ALLOC_BUF, 0, 0xff);
+ if (err != ZM_SUCCESS)
{
goto zlError;
}
diff --git a/drivers/staging/otus/80211core/coid.c b/drivers/staging/otus/80211core/coid.c
index 0524e1f36f0..229aed8f889 100644
--- a/drivers/staging/otus/80211core/coid.c
+++ b/drivers/staging/otus/80211core/coid.c
@@ -553,7 +553,8 @@ u8_t zfiWlanSetKey(zdev_t* dev, struct zsKeyInfo keyInfo)
if (keyInfo.flag & ZM_KEY_FLAG_PK)
{
/* Find STA's information */
- if ((id = zfApFindSta(dev, keyInfo.macAddr)) == 0xffff)
+ id = zfApFindSta(dev, keyInfo.macAddr);
+ if (id == 0xffff)
{
/* Can't STA in the staTable */
return ZM_STATUS_FAILURE;
diff --git a/drivers/staging/otus/80211core/cpsmgr.c b/drivers/staging/otus/80211core/cpsmgr.c
index 98e1f0cc072..32313beba78 100644
--- a/drivers/staging/otus/80211core/cpsmgr.c
+++ b/drivers/staging/otus/80211core/cpsmgr.c
@@ -602,7 +602,8 @@ void zfPowerSavingMgrProcessBeacon(zdev_t* dev, zbuf_t* buf)
wd->sta.psMgr.isSleepAllowed = 1;
- if ( (offset=zfFindElement(dev, buf, ZM_WLAN_EID_TIM)) != 0xffff )
+ offset = zfFindElement(dev, buf, ZM_WLAN_EID_TIM);
+ if (offset != 0xffff)
{
length = zmw_rx_buf_readb(dev, buf, offset+1);
diff --git a/drivers/staging/otus/80211core/ctxrx.c b/drivers/staging/otus/80211core/ctxrx.c
index 4e7f4bd86f4..a127196260e 100644
--- a/drivers/staging/otus/80211core/ctxrx.c
+++ b/drivers/staging/otus/80211core/ctxrx.c
@@ -109,7 +109,8 @@ void zfGetRxIvIcvLength(zdev_t* dev, zbuf_t* buf, u8_t vap, u16_t* pIvLen,
addr[2] = zmw_rx_buf_readh(dev, buf, ZM_WLAN_HEADER_A2_OFFSET+4);
/* Find STA's information */
- if ((id = zfApFindSta(dev, addr)) != 0xffff)
+ id = zfApFindSta(dev, addr);
+ if (id != 0xffff)
{
if (wd->ap.staTable[id].encryMode == ZM_TKIP)
{
@@ -132,7 +133,8 @@ void zfGetRxIvIcvLength(zdev_t* dev, zbuf_t* buf, u8_t vap, u16_t* pIvLen,
}
}
/* WDS port checking */
- if ((wdsPort = vap - 0x20) >= ZM_MAX_WDS_SUPPORT)
+ wdsPort = vap - 0x20;
+ if (wdsPort >= ZM_MAX_WDS_SUPPORT)
{
wdsPort = 0;
}
@@ -741,8 +743,9 @@ u16_t zfiTxSend80211Mgmt(zdev_t* dev, zbuf_t* buf, u16_t port)
zfwBufRemoveHead(dev, buf, 24);
- if ((err = zfHpSend(dev, header, hlen, NULL, 0, NULL, 0, buf, 0,
- ZM_EXTERNAL_ALLOC_BUF, 0, 0)) != ZM_SUCCESS)
+ err = zfHpSend(dev, header, hlen, NULL, 0, NULL, 0, buf, 0,
+ ZM_EXTERNAL_ALLOC_BUF, 0, 0);
+ if (err != ZM_SUCCESS)
{
goto zlError;
}
@@ -799,7 +802,8 @@ u16_t zfiTxSendEth(zdev_t* dev, zbuf_t* buf, u16_t port)
ZM_PERFORMANCE_TX_MSDU(dev, wd->tick);
zm_msg1_tx(ZM_LV_2, "zfiTxSendEth(), port=", port);
/* Return error if port is disabled */
- if ((err = zfTxPortControl(dev, buf, port)) == ZM_PORT_DISABLED)
+ err = zfTxPortControl(dev, buf, port);
+ if (err == ZM_PORT_DISABLED)
{
err = ZM_ERR_TX_PORT_DISABLED;
goto zlError;
@@ -809,7 +813,8 @@ u16_t zfiTxSendEth(zdev_t* dev, zbuf_t* buf, u16_t port)
if ((wd->wlanMode == ZM_MODE_AP) && (port < 0x20))
{
/* AP : Buffer frame for power saving STA */
- if ((ret = zfApBufferPsFrame(dev, buf, port)) == 1)
+ ret = zfApBufferPsFrame(dev, buf, port);
+ if (ret == 1)
{
return ZM_SUCCESS;
}
@@ -1119,7 +1124,8 @@ u16_t zfTxSendEth(zdev_t* dev, zbuf_t* buf, u16_t port, u16_t bufType, u16_t fla
i = 0;
while( frameLen > 0 )
{
- if ((frag.buf[i] = zfwBufAllocate(dev, fragLen+32)) != NULL)
+ frag.buf[i] = zfwBufAllocate(dev, fragLen+32);
+ if (frag.buf[i] != NULL)
{
frag.bufType[i] = ZM_INTERNAL_ALLOC_BUF;
frag.seq[i] = frag.seq[0] + i;
@@ -1276,7 +1282,8 @@ void zfCoreRecv(zdev_t* dev, zbuf_t* buf, struct zsAdditionInfo* addInfo)
bssid[2] = zmw_buf_readh(dev, buf, 20);
/* Validate Rx frame */
- if ((ret = zfWlanRxValidate(dev, buf)) != ZM_SUCCESS)
+ ret = zfWlanRxValidate(dev, buf);
+ if (ret != ZM_SUCCESS)
{
zm_msg1_rx(ZM_LV_1, "Rx invalid:", ret);
goto zlError;
@@ -1301,7 +1308,8 @@ void zfCoreRecv(zdev_t* dev, zbuf_t* buf, struct zsAdditionInfo* addInfo)
#endif
/* Filter Rx frame */
- if ((ret = zfWlanRxFilter(dev, buf)) != ZM_SUCCESS)
+ ret = zfWlanRxFilter(dev, buf);
+ if (ret != ZM_SUCCESS)
{
zm_msg1_rx(ZM_LV_1, "Rx duplicated:", ret);
goto zlError;
@@ -2086,7 +2094,8 @@ void zfiRecv80211(zdev_t* dev, zbuf_t* buf, struct zsAdditionInfo* addInfo)
//zm_msg0_rx(ZM_LV_0, "Rx data");
if (wd->wlanMode == ZM_MODE_AP)
{
- if ((ret = zfApUpdatePsBit(dev, buf, &vap, &uapsdTrig)) != ZM_SUCCESS)
+ ret = zfApUpdatePsBit(dev, buf, &vap, &uapsdTrig);
+ if (ret != ZM_SUCCESS)
{
zfwBufFree(dev, buf, 0);
return;
@@ -2115,7 +2124,8 @@ void zfiRecv80211(zdev_t* dev, zbuf_t* buf, struct zsAdditionInfo* addInfo)
for (ii=0; ii<pktNum; ii++)
{
//if ((psBuf = zfQueueGet(dev, wd->ap.uapsdQ)) != NULL)
- if ((psBuf = zfQueueGetWithMac(dev, wd->ap.uapsdQ, src, &mb)) != NULL)
+ psBuf = zfQueueGetWithMac(dev, wd->ap.uapsdQ, src, &mb);
+ if (psBuf != NULL)
{
if ((ii+1) == pktNum)
{
@@ -2232,7 +2242,8 @@ void zfiRecv80211(zdev_t* dev, zbuf_t* buf, struct zsAdditionInfo* addInfo)
}
else
{
- if ( (buf = zfDefragment(dev, buf, &bIsDefrag, addInfo)) == NULL )
+ buf = zfDefragment(dev, buf, &bIsDefrag, addInfo);
+ if (buf == NULL)
{
/* In this case, the buffer has been freed in zfDefragment */
return;
@@ -2836,7 +2847,8 @@ void zfiRecv80211(zdev_t* dev, zbuf_t* buf, struct zsAdditionInfo* addInfo)
zfwBufRemoveHead(dev, buf, 18+offset);
#endif // ZM_ENABLE_NATIVE_WIFI
#if 1
- if ((ret = zfIntrabssForward(dev, buf, vap)) == 1)
+ ret = zfIntrabssForward(dev, buf, vap);
+ if (ret == 1)
{
/* Free Rx buffer if intra-BSS unicast frame */
zm_msg0_rx(ZM_LV_2, "Free intra-BSS unicast frame");
@@ -3037,7 +3049,8 @@ u16_t zfWlanRxValidate(zdev_t* dev, zbuf_t* buf)
}
else if ( wd->wlanMode != ZM_MODE_PSEUDO )
{
- if ( (ret=zfStaRxValidateFrame(dev, buf))!=ZM_SUCCESS )
+ ret = zfStaRxValidateFrame(dev, buf);
+ if (ret != ZM_SUCCESS)
{
//zm_debug_msg1("discard frame, code = ", ret);
return ret;
@@ -3787,12 +3800,14 @@ void zfPushVtxq(zdev_t* dev)
/* 2006.12.20, Serve Management queue */
while( zfHpGetFreeTxdCount(dev) > 0 )
{
- if ((buf = zfGetVmmq(dev)) != 0)
+ buf = zfGetVmmq(dev);
+ if (buf != 0)
{
txed = 1;
//zm_debug_msg2("send buf = ", buf);
- if ((err = zfHpSend(dev, NULL, 0, NULL, 0, NULL, 0, buf, 0,
- ZM_INTERNAL_ALLOC_BUF, 0, 0xff)) != ZM_SUCCESS)
+ err = zfHpSend(dev, NULL, 0, NULL, 0, NULL, 0, buf, 0,
+ ZM_INTERNAL_ALLOC_BUF, 0, 0xff);
+ if (err != ZM_SUCCESS)
{
zfwBufFree(dev, buf, 0);
}
@@ -3831,9 +3846,11 @@ void zfPushVtxq(zdev_t* dev)
/* Service VTxQ[3] */
for (i=0; i<4; i++)
{
- if ((freeTxd = zfHpGetFreeTxdCount(dev)) >= 3)
+ freeTxd = zfHpGetFreeTxdCount(dev);
+ if (freeTxd >= 3)
{
- if ((buf = zfGetVtxq(dev, 3)) != 0)
+ buf = zfGetVtxq(dev, 3);
+ if (buf != 0)
{
txed = 1;
//zm_debug_msg2("send buf = ", buf);
@@ -3850,9 +3867,11 @@ void zfPushVtxq(zdev_t* dev)
/* Service VTxQ[2] */
for (i=0; i<3; i++)
{
- if ((freeTxd = zfHpGetFreeTxdCount(dev)) >= (zfHpGetMaxTxdCount(dev)*1/4))
+ freeTxd = zfHpGetFreeTxdCount(dev);
+ if (freeTxd >= (zfHpGetMaxTxdCount(dev)*1/4))
{
- if ((buf = zfGetVtxq(dev, 2)) != 0)
+ buf = zfGetVtxq(dev, 2);
+ if (buf != 0)
{
txed = 1;
zfTxSendEth(dev, buf, 0, ZM_EXTERNAL_ALLOC_BUF, 0);
@@ -3860,7 +3879,8 @@ void zfPushVtxq(zdev_t* dev)
}
if (wd->sta.ac0PriorityHigherThanAc2 == 1)
{
- if ((buf = zfGetVtxq(dev, 0)) != 0)
+ buf = zfGetVtxq(dev, 0);
+ if (buf != 0)
{
txed = 1;
zfTxSendEth(dev, buf, 0, ZM_EXTERNAL_ALLOC_BUF, 0);
@@ -3877,9 +3897,11 @@ void zfPushVtxq(zdev_t* dev)
/* Service VTxQ[0] */
for (i=0; i<2; i++)
{
- if ((freeTxd = zfHpGetFreeTxdCount(dev)) >= (zfHpGetMaxTxdCount(dev)*2/4))
+ freeTxd = zfHpGetFreeTxdCount(dev);
+ if (freeTxd >= (zfHpGetMaxTxdCount(dev)*2/4))
{
- if ((buf = zfGetVtxq(dev, 0)) != 0)
+ buf = zfGetVtxq(dev, 0);
+ if (buf != 0)
{
txed = 1;
zfTxSendEth(dev, buf, 0, ZM_EXTERNAL_ALLOC_BUF, 0);
@@ -3894,9 +3916,11 @@ void zfPushVtxq(zdev_t* dev)
}
/* Service VTxQ[1] */
- if ((freeTxd = zfHpGetFreeTxdCount(dev)) >= (zfHpGetMaxTxdCount(dev)*3/4))
+ freeTxd = zfHpGetFreeTxdCount(dev);
+ if (freeTxd >= (zfHpGetMaxTxdCount(dev)*3/4))
{
- if ((buf = zfGetVtxq(dev, 1)) != 0)
+ buf = zfGetVtxq(dev, 1);
+ if (buf != 0)
{
txed = 1;
zfTxSendEth(dev, buf, 0, ZM_EXTERNAL_ALLOC_BUF, 0);
@@ -3983,9 +4007,10 @@ void zf80211FrameSend(zdev_t* dev, zbuf_t* buf, u16_t* header, u16_t snapLen,
}
wd->ledStruct.txTraffic++;
- if ((err = zfHpSend(dev, header, headerLen, snap, snapLen,
+ err = zfHpSend(dev, header, headerLen, snap, snapLen,
tail, tailLen, buf, offset,
- bufType, ac, keyIdx)) != ZM_SUCCESS)
+ bufType, ac, keyIdx);
+ if (err != ZM_SUCCESS)
{
if (bufType == ZM_EXTERNAL_ALLOC_BUF)
{
diff --git a/drivers/staging/otus/80211core/queue.c b/drivers/staging/otus/80211core/queue.c
index d294831b5c3..29be4bdb40a 100644
--- a/drivers/staging/otus/80211core/queue.c
+++ b/drivers/staging/otus/80211core/queue.c
@@ -31,8 +31,9 @@ struct zsQueue* zfQueueCreate(zdev_t* dev, u16_t size)
{
struct zsQueue* q;
- if ((q = (struct zsQueue*)zfwMemAllocate(dev, sizeof(struct zsQueue)
- + (sizeof(struct zsQueueCell)*(size-1)))) != NULL)
+ q = (struct zsQueue*)zfwMemAllocate(dev, sizeof(struct zsQueue)
+ + (sizeof(struct zsQueueCell)*(size-1)));
+ if (q != NULL)
{
q->size = size;
q->sizeMask = size-1;
diff --git a/drivers/staging/otus/80211core/ratectrl.c b/drivers/staging/otus/80211core/ratectrl.c
index a43104cd7f5..a1abe2f4f34 100644
--- a/drivers/staging/otus/80211core/ratectrl.c
+++ b/drivers/staging/otus/80211core/ratectrl.c
@@ -538,7 +538,8 @@ u16_t zfRateCtrlGetTxRate(zdev_t* dev, struct zsRcCell* rcCell, u16_t* probing)
((rcCell->currentRate <= 16) &&
((wd->PER[rcCell->currentRate]/2) <= ZM_RATE_PROBING_THRESHOLD)))
{
- if ((newRate=zfRateCtrlGetHigherRate(rcCell)) != rcCell->currentRate)
+ newRate = zfRateCtrlGetHigherRate(rcCell);
+ if (newRate != rcCell->currentRate)
{
*probing = 1;
wd->probeCount++;
diff --git a/drivers/staging/otus/hal/hpani.c b/drivers/staging/otus/hal/hpani.c
index 0afecd8c2de..f53e483b394 100644
--- a/drivers/staging/otus/hal/hpani.c
+++ b/drivers/staging/otus/hal/hpani.c
@@ -18,8 +18,8 @@
#include "hpusb.h"
-extern u16_t zfDelayWriteInternalReg(zdev_t* dev, u32_t addr, u32_t val);
-extern u16_t zfFlushDelayWrite(zdev_t* dev);
+extern u16_t zfDelayWriteInternalReg(zdev_t *dev, u32_t addr, u32_t val);
+extern u16_t zfFlushDelayWrite(zdev_t *dev);
/*
* Anti noise immunity support. We track phy errors and react
@@ -52,13 +52,13 @@ extern u16_t zfFlushDelayWrite(zdev_t* dev);
#define ZM_HAL_EP_RND(x, mul) \
((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
-s32_t BEACON_RSSI(zdev_t* dev)
+s32_t BEACON_RSSI(zdev_t *dev)
{
s32_t rssi;
struct zsHpPriv *HpPriv;
zmw_get_wlan_dev(dev);
- HpPriv = (struct zsHpPriv*)wd->hpPrivate;
+ HpPriv = (struct zsHpPriv *)wd->hpPrivate;
rssi = ZM_HAL_EP_RND(HpPriv->stats.ast_nodestats.ns_avgbrssi, ZM_HAL_RSSI_EP_MULTIPLIER);
@@ -70,7 +70,7 @@ s32_t BEACON_RSSI(zdev_t* dev)
* resets the channel statistics
*/
-void zfHpAniAttach(zdev_t* dev)
+void zfHpAniAttach(zdev_t *dev)
{
#define N(a) (sizeof(a) / sizeof(a[0]))
u32_t i;
@@ -82,11 +82,10 @@ void zfHpAniAttach(zdev_t* dev)
const int firpwr[] = { -78, -78, -78, -78, -80 };
zmw_get_wlan_dev(dev);
- HpPriv = (struct zsHpPriv*)wd->hpPrivate;
+ HpPriv = (struct zsHpPriv *)wd->hpPrivate;
- for (i = 0; i < 5; i++)
- {
- HpPriv->totalSizeDesired[i] = totalSizeDesired[i];
+ for (i = 0; i < 5; i++) {
+ HpPriv->totalSizeDesired[i] = totalSizeDesired[i];
HpPriv->coarseHigh[i] = coarseHigh[i];
HpPriv->coarseLow[i] = coarseLow[i];
HpPriv->firpwr[i] = firpwr[i];
@@ -96,8 +95,7 @@ void zfHpAniAttach(zdev_t* dev)
HpPriv->hasHwPhyCounters = 1;
memset((char *)&HpPriv->ani, 0, sizeof(HpPriv->ani));
- for (i = 0; i < N(wd->regulationTable.allowChannel); i++)
- {
+ for (i = 0; i < ARRAY_SIZE(HpPriv->ani); i++) {
/* New ANI stuff */
HpPriv->ani[i].ofdmTrigHigh = ZM_HAL_ANI_OFDM_TRIG_HIGH;
HpPriv->ani[i].ofdmTrigLow = ZM_HAL_ANI_OFDM_TRIG_LOW;
@@ -109,14 +107,12 @@ void zfHpAniAttach(zdev_t* dev)
HpPriv->ani[i].cckWeakSigThreshold = ZM_HAL_ANI_CCK_WEAK_SIG_THR;
HpPriv->ani[i].spurImmunityLevel = ZM_HAL_ANI_SPUR_IMMUNE_LVL;
HpPriv->ani[i].firstepLevel = ZM_HAL_ANI_FIRSTEP_LVL;
- if (HpPriv->hasHwPhyCounters)
- {
+ if (HpPriv->hasHwPhyCounters) {
HpPriv->ani[i].ofdmPhyErrBase = 0;//AR_PHY_COUNTMAX - ZM_HAL_ANI_OFDM_TRIG_HIGH;
HpPriv->ani[i].cckPhyErrBase = 0;//AR_PHY_COUNTMAX - ZM_HAL_ANI_CCK_TRIG_HIGH;
}
}
- if (HpPriv->hasHwPhyCounters)
- {
+ if (HpPriv->hasHwPhyCounters) {
//zm_debug_msg2("Setting OfdmErrBase = 0x", HpPriv->ani[0].ofdmPhyErrBase);
//zm_debug_msg2("Setting cckErrBase = 0x", HpPriv->ani[0].cckPhyErrBase);
//OS_REG_WRITE(ah, AR_PHY_ERR_1, HpPriv->ani[0].ofdmPhyErrBase);
@@ -135,7 +131,7 @@ void zfHpAniAttach(zdev_t* dev)
/*
* Control Adaptive Noise Immunity Parameters
*/
-u8_t zfHpAniControl(zdev_t* dev, ZM_HAL_ANI_CMD cmd, int param)
+u8_t zfHpAniControl(zdev_t *dev, ZM_HAL_ANI_CMD cmd, int param)
{
#define N(a) (sizeof(a)/sizeof(a[0]))
typedef s32_t TABLE[];
@@ -143,7 +139,7 @@ u8_t zfHpAniControl(zdev_t* dev, ZM_HAL_ANI_CMD cmd, int param)
struct zsAniState *aniState;
zmw_get_wlan_dev(dev);
- HpPriv = (struct zsHpPriv*)wd->hpPrivate;
+ HpPriv = (struct zsHpPriv *)wd->hpPrivate;
aniState = HpPriv->curani;
switch (cmd)
@@ -152,8 +148,7 @@ u8_t zfHpAniControl(zdev_t* dev, ZM_HAL_ANI_CMD cmd, int param)
{
u32_t level = param;
- if (level >= N(HpPriv->totalSizeDesired))
- {
+ if (level >= N(HpPriv->totalSizeDesired)) {
zm_debug_msg1("level out of range, desired level : ", level);
zm_debug_msg1("max level : ", N(HpPriv->totalSizeDesired));
return FALSE;
diff --git a/drivers/staging/otus/hal/hpani.h b/drivers/staging/otus/hal/hpani.h
index 96e69af3c68..b89241371ab 100644
--- a/drivers/staging/otus/hal/hpani.h
+++ b/drivers/staging/otus/hal/hpani.h
@@ -99,8 +99,8 @@ typedef enum {
ZM_HAL_ANI_PHYERR_RESET, /* reset phy error stats */
} ZM_HAL_ANI_CMD;
-#define AR_PHY_COUNTMAX (3 << 22) // Max counted before intr
-#define ZM_HAL_PROCESS_ANI 0x00000001 /* ANI state setup */
+#define AR_PHY_COUNTMAX (3 << 22) /* Max counted before intr */
+#define ZM_HAL_PROCESS_ANI 0x00000001 /* ANI state setup */
#define ZM_RSSI_DUMMY_MARKER 0x127
/* PHY registers in ar5416, related base and register offsets
@@ -353,7 +353,7 @@ typedef enum {
#define AR_PHY_CCK_DETECT 0x1C6208
#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F
#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
-#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0 // [12:6] settling time for antenna switch
+#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0 /* [12:6] settling time for antenna switch */
#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6
#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000
@@ -392,7 +392,6 @@ typedef enum {
#define AR_PHY_TPCRG1_PD_GAIN_2_S 18
#define AR_PHY_TPCRG1_PD_GAIN_3 0x00300000
#define AR_PHY_TPCRG1_PD_GAIN_3_S 20
-//
#define AR_PHY_ANALOG_SWAP 0xa268
#define AR_PHY_SWAP_ALT_CHAIN 0x00000040
diff --git a/drivers/staging/otus/hal/hpfw2.c b/drivers/staging/otus/hal/hpfw2.c
index baceb029976..17f405b5db1 100644
--- a/drivers/staging/otus/hal/hpfw2.c
+++ b/drivers/staging/otus/hal/hpfw2.c
@@ -1015,4 +1015,4 @@ const u32_t zcP2FwImage[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, };
-const u32_t zcP2FwImageSize=15964;
+const u32_t zcP2FwImageSize = 15964;
diff --git a/drivers/staging/otus/hal/hpfwu.c b/drivers/staging/otus/hal/hpfwu.c
index 2b77cbacc6d..68fabef180a 100644
--- a/drivers/staging/otus/hal/hpfwu.c
+++ b/drivers/staging/otus/hal/hpfwu.c
@@ -1014,4 +1014,4 @@ const u32_t zcFwImage[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
-const u32_t zcFwImageSize=15936;
+const u32_t zcFwImageSize = 15936;
diff --git a/drivers/staging/otus/hal/hpfwu_2k.c b/drivers/staging/otus/hal/hpfwu_2k.c
index 94e2caca536..b675d6d556b 100644
--- a/drivers/staging/otus/hal/hpfwu_2k.c
+++ b/drivers/staging/otus/hal/hpfwu_2k.c
@@ -1013,4 +1013,4 @@ const u32_t zcFwImage[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, };
-const u32_t zcFwImageSize=15928;
+const u32_t zcFwImageSize = 15928;
diff --git a/drivers/staging/otus/hal/hpfwu_BA.c b/drivers/staging/otus/hal/hpfwu_BA.c
index 0c741571f2b..f89419b3743 100644
--- a/drivers/staging/otus/hal/hpfwu_BA.c
+++ b/drivers/staging/otus/hal/hpfwu_BA.c
@@ -871,4 +871,4 @@ const u32_t zcFwImage[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, };
-const u32_t zcFwImageSize=13656;
+const u32_t zcFwImageSize = 13656;
diff --git a/drivers/staging/otus/hal/hpfwu_OTUS_RC.c b/drivers/staging/otus/hal/hpfwu_OTUS_RC.c
index 089d3e0ad85..accbec4369f 100644
--- a/drivers/staging/otus/hal/hpfwu_OTUS_RC.c
+++ b/drivers/staging/otus/hal/hpfwu_OTUS_RC.c
@@ -712,4 +712,4 @@ const u32_t zcFwImage[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
-const u32_t zcFwImageSize=11104;
+const u32_t zcFwImageSize = 11104;
diff --git a/drivers/staging/otus/hal/hpfwuinit.c b/drivers/staging/otus/hal/hpfwuinit.c
index ed80ffafaff..5d0dccca080 100644
--- a/drivers/staging/otus/hal/hpfwuinit.c
+++ b/drivers/staging/otus/hal/hpfwuinit.c
@@ -237,4 +237,4 @@ const u32_t zcFwImage[] = {
0x45485441, 0x38731652, 0x89ACFF91, 0xEE55D178,
0xEE000D0A, };
-const u32_t zcFwImageSize=3508;
+const u32_t zcFwImageSize = 3508;
diff --git a/drivers/staging/otus/hal/hpmain.c b/drivers/staging/otus/hal/hpmain.c
index 8dff5b97dfe..5f412e02045 100644
--- a/drivers/staging/otus/hal/hpmain.c
+++ b/drivers/staging/otus/hal/hpmain.c
@@ -142,8 +142,9 @@ u16_t zfHpInit(zdev_t* dev, u32_t frequency)
if (wd->modeMDKEnable)
{
/* download the MDK firmware */
- if ((ret = zfFirmwareDownload(dev, (u32_t*)zcDKFwImage,
- (u32_t)zcDKFwImageSize, ZM_FIRMWARE_WLAN_ADDR)) != ZM_SUCCESS)
+ ret = zfFirmwareDownload(dev, (u32_t*)zcDKFwImage,
+ (u32_t)zcDKFwImageSize, ZM_FIRMWARE_WLAN_ADDR);
+ if (ret != ZM_SUCCESS)
{
/* TODO : exception handling */
//return 1;
@@ -153,8 +154,9 @@ u16_t zfHpInit(zdev_t* dev, u32_t frequency)
{
#ifndef ZM_OTUS_LINUX_PHASE_2
/* download the normal firmware */
- if ((ret = zfFirmwareDownload(dev, (u32_t*)zcFwImage,
- (u32_t)zcFwImageSize, ZM_FIRMWARE_WLAN_ADDR)) != ZM_SUCCESS)
+ ret = zfFirmwareDownload(dev, (u32_t*)zcFwImage,
+ (u32_t)zcFwImageSize, ZM_FIRMWARE_WLAN_ADDR);
+ if (ret != ZM_SUCCESS)
{
/* TODO : exception handling */
//return 1;
@@ -162,16 +164,18 @@ u16_t zfHpInit(zdev_t* dev, u32_t frequency)
#else
// 1-PH fw: ReadMac() store some global variable
- if ((ret = zfFirmwareDownloadNotJump(dev, (u32_t*)zcFwBufImage,
- (u32_t)zcFwBufImageSize, 0x102800)) != ZM_SUCCESS)
+ ret = zfFirmwareDownloadNotJump(dev, (u32_t*)zcFwBufImage,
+ (u32_t)zcFwBufImageSize, 0x102800);
+ if (ret != ZM_SUCCESS)
{
DbgPrint("Dl zcFwBufImage failed!");
}
zfwSleep(dev, 1000);
- if ((ret = zfFirmwareDownload(dev, (u32_t*)zcFwImage,
- (u32_t)zcFwImageSize, ZM_FIRMWARE_WLAN_ADDR)) != ZM_SUCCESS)
+ ret = zfFirmwareDownload(dev, (u32_t*)zcFwImage,
+ (u32_t)zcFwImageSize, ZM_FIRMWARE_WLAN_ADDR);
+ if (ret != ZM_SUCCESS)
{
DbgPrint("Dl zcFwBufImage failed!");
}
@@ -249,15 +253,17 @@ u16_t zfHpReinit(zdev_t* dev, u32_t frequency)
#ifndef ZM_OTUS_LINUX_PHASE_2
/* Download firmware */
- if ((ret = zfFirmwareDownload(dev, (u32_t*)zcFwImage,
- (u32_t)zcFwImageSize, ZM_FIRMWARE_WLAN_ADDR)) != ZM_SUCCESS)
+ ret = zfFirmwareDownload(dev, (u32_t*)zcFwImage,
+ (u32_t)zcFwImageSize, ZM_FIRMWARE_WLAN_ADDR);
+ if (ret != ZM_SUCCESS)
{
/* TODO : exception handling */
//return 1;
}
#else
- if ((ret = zfFirmwareDownload(dev, (u32_t*)zcP2FwImage,
- (u32_t)zcP2FwImageSize, ZM_FIRMWARE_WLAN_ADDR)) != ZM_SUCCESS)
+ ret = zfFirmwareDownload(dev, (u32_t*)zcP2FwImage,
+ (u32_t)zcP2FwImageSize, ZM_FIRMWARE_WLAN_ADDR);
+ if (ret != ZM_SUCCESS)
{
/* TODO : exception handling */
//return 1;
diff --git a/drivers/staging/otus/hal/hpreg.c b/drivers/staging/otus/hal/hpreg.c
index 178777c09db..da3b7743387 100644
--- a/drivers/staging/otus/hal/hpreg.c
+++ b/drivers/staging/otus/hal/hpreg.c
@@ -30,7 +30,7 @@
#include "hpusb.h"
/* used throughout this file... */
-#define N(a) (sizeof (a) / sizeof (a[0]))
+#define N(a) (sizeof(a) / sizeof(a[0]))
#define HAL_MODE_11A_TURBO HAL_MODE_108A
#define HAL_MODE_11G_TURBO HAL_MODE_108G
@@ -78,7 +78,7 @@ enum {
};
#define MKK5GHZ_FLAG1 (DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS)
-#define MKK5GHZ_FLAG2 (DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC| LIMIT_FRAME_4MS)
+#define MKK5GHZ_FLAG2 (DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS)
typedef enum {
DFS_UNINIT_DOMAIN = 0, /* Uninitialized dfs domain */
@@ -272,7 +272,7 @@ static REG_DMN_PAIR_MAPPING regDomainPairs[] = {
/* MKK4 */
{MKK4_MKKB, MKK4, MKKA, MKK5GHZ_FLAG2, NEED_NFC, PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G, CTRY_JAPAN10 },
{MKK4_MKKA1, MKK4, MKKA, MKK5GHZ_FLAG1, NEED_NFC, PSCAN_MKK3 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN28 },
- {MKK4_MKKA2, MKK4, MKKA, MKK5GHZ_FLAG1, NEED_NFC, PSCAN_MKK3 |PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN11 },
+ {MKK4_MKKA2, MKK4, MKKA, MKK5GHZ_FLAG1, NEED_NFC, PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN11 },
{MKK4_MKKC, MKK4, MKKC, MKK5GHZ_FLAG1, NEED_NFC, PSCAN_MKK3, CTRY_JAPAN12 },
{MKK4_FCCA, MKK4, FCCA, MKK5GHZ_FLAG1, NEED_NFC, NO_PSCAN, CTRY_JAPAN29 },
{MKK4_MKKA, MKK4, MKKA, MKK5GHZ_FLAG1, NEED_NFC, PSCAN_MKK3 | PSCAN_MKKA, CTRY_JAPAN36 },
@@ -301,7 +301,7 @@ static REG_DMN_PAIR_MAPPING regDomainPairs[] = {
{MKK8_MKKA2, MKK8, MKKA, MKK5GHZ_FLAG1, NEED_NFC, PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN23 },
{MKK8_MKKC, MKK8, MKKC, MKK5GHZ_FLAG1, NEED_NFC, PSCAN_MKK1 | PSCAN_MKK3 , CTRY_JAPAN24 },
- /* MKK9 */
+ /* MKK9 */
{MKK9_MKKA, MKK9, MKKA, MKK5GHZ_FLAG1, NEED_NFC, NO_PSCAN, CTRY_JAPAN34 },
{MKK9_FCCA, MKK9, FCCA, MKK5GHZ_FLAG1, NEED_NFC, NO_PSCAN, CTRY_JAPAN37 },
{MKK9_MKKA1, MKK9, MKKA, MKK5GHZ_FLAG1, NEED_NFC, PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN38 },
@@ -359,7 +359,7 @@ static REG_DMN_PAIR_MAPPING regDomainPairs[] = {
#define COUNTRY_CODE_MASK 0x03ff
#define CF_INTERFERENCE (CHANNEL_CW_INT | CHANNEL_RADAR_INT)
#define CHANNEL_14 (2484) /* 802.11g operation is not permitted on channel 14 */
-#define IS_11G_CH14(_ch,_cf) \
+#define IS_11G_CH14(_ch, _cf) \
(((_ch) == CHANNEL_14) && ((_cf) == CHANNEL_G))
#define YES TRUE
@@ -373,183 +373,183 @@ enum {
typedef struct {
HAL_CTRY_CODE countryCode;
HAL_REG_DOMAIN regDmnEnum;
- const char* isoName;
- const char* name;
+ const char *isoName;
+ const char *name;
HAL_BOOL allow11g;
HAL_BOOL allow11aTurbo;
HAL_BOOL allow11gTurbo;
- HAL_BOOL allow11na; /* HT-40 allowed in 5GHz? */
- HAL_BOOL allow11ng; /* HT-40 allowed in 2GHz? */
+ HAL_BOOL allow11na; /* HT-40 allowed in 5GHz? */
+ HAL_BOOL allow11ng; /* HT-40 allowed in 2GHz? */
u16_t outdoorChanStart;
} COUNTRY_CODE_TO_ENUM_RD;
static COUNTRY_CODE_TO_ENUM_RD allCountries[] = {
- {CTRY_DEBUG, NO_ENUMRD, "DB", "DEBUG", YES, YES, YES, YES, YES, 7000 },
- {CTRY_DEFAULT, DEF_REGDMN, "NA", "NO_COUNTRY_SET", YES, YES, YES, YES, YES, 7000 },
- {CTRY_ALBANIA, NULL1_WORLD, "AL", "ALBANIA", YES, NO, YES, NO, YES, 7000 },
- {CTRY_ALGERIA, NULL1_WORLD, "DZ", "ALGERIA", YES, NO, YES, NO, YES, 7000 },
- {CTRY_ARGENTINA, APL3_WORLD, "AR", "ARGENTINA", YES, NO, NO, NO, NO, 7000 },
- {CTRY_ARMENIA, ETSI4_WORLD, "AM", "ARMENIA", YES, NO, YES, NO, YES, 7000 },
- {CTRY_AUSTRALIA, FCC6_WORLD, "AU", "AUSTRALIA", YES, YES, YES, YES, YES, 7000 },
- {CTRY_AUSTRIA, ETSI2_WORLD, "AT", "AUSTRIA", YES, NO, YES, YES, YES, 7000 },
- {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ", "AZERBAIJAN", YES, YES, YES, YES, YES, 7000 },
- {CTRY_BAHRAIN, APL6_WORLD, "BH", "BAHRAIN", YES, NO, YES, NO, YES, 7000 },
- {CTRY_BELARUS, ETSI1_WORLD, "BY", "BELARUS", YES, NO, YES, YES, YES, 7000 },
- {CTRY_BELGIUM, ETSI1_WORLD, "BE", "BELGIUM", YES, NO, YES, YES, YES, 7000 },
- {CTRY_BELIZE, APL1_ETSIC, "BZ", "BELIZE", YES, YES, YES, YES, YES, 7000 },
- {CTRY_BOLIVIA, APL1_ETSIC, "BO", "BOLVIA", YES, YES, YES, YES, YES, 7000 },
- {CTRY_BRAZIL, FCC3_WORLD, "BR", "BRAZIL", NO, NO, NO, NO, NO, 7000 },
- {CTRY_BRUNEI_DARUSSALAM,APL1_WORLD,"BN", "BRUNEI DARUSSALAM", YES, YES, YES, YES, YES, 7000 },
- {CTRY_BULGARIA, ETSI6_WORLD, "BG", "BULGARIA", YES, NO, YES, YES, YES, 7000 },
- {CTRY_CANADA, FCC6_FCCA, "CA", "CANADA", YES, YES, YES, YES, YES, 7000 },
- {CTRY_CHILE, APL6_WORLD, "CL", "CHILE", YES, YES, YES, YES, YES, 7000 },
- {CTRY_CHINA, APL1_WORLD, "CN", "CHINA", YES, YES, YES, YES, YES, 7000 },
- {CTRY_COLOMBIA, FCC1_FCCA, "CO", "COLOMBIA", YES, NO, YES, NO, YES, 7000 },
- {CTRY_COSTA_RICA, FCC1_WORLD, "CR", "COSTA RICA", YES, NO, YES, NO, YES, 7000 },
- {CTRY_CROATIA, ETSI3_WORLD, "HR", "CROATIA", YES, NO, YES, NO, YES, 7000 },
- {CTRY_CYPRUS, ETSI3_WORLD, "CY", "CYPRUS", YES, YES, YES, YES, YES, 7000 },
- {CTRY_CZECH, ETSI3_WORLD, "CZ", "CZECH REPUBLIC", YES, NO, YES, YES, YES, 7000 },
- {CTRY_DENMARK, ETSI1_WORLD, "DK", "DENMARK", YES, NO, YES, YES, YES, 7000 },
- {CTRY_DOMINICAN_REPUBLIC,FCC1_FCCA,"DO", "DOMINICAN REPUBLIC", YES, YES, YES, YES, YES, 7000 },
- {CTRY_ECUADOR, FCC1_WORLD, "EC", "ECUADOR", YES, NO, NO, NO, YES, 7000 },
- {CTRY_EGYPT, ETSI3_WORLD, "EG", "EGYPT", YES, NO, YES, NO, YES, 7000 },
- {CTRY_EL_SALVADOR, FCC1_WORLD, "SV", "EL SALVADOR", YES, NO, YES, NO, YES, 7000 },
- {CTRY_ESTONIA, ETSI1_WORLD, "EE", "ESTONIA", YES, NO, YES, YES, YES, 7000 },
- {CTRY_FINLAND, ETSI1_WORLD, "FI", "FINLAND", YES, NO, YES, YES, YES, 7000 },
- {CTRY_FRANCE, ETSI1_WORLD, "FR", "FRANCE", YES, NO, YES, YES, YES, 7000 },
- {CTRY_FRANCE2, ETSI3_WORLD, "F2", "FRANCE_RES", YES, NO, YES, YES, YES, 7000 },
- {CTRY_GEORGIA, ETSI4_WORLD, "GE", "GEORGIA", YES, YES, YES, YES, YES, 7000 },
- {CTRY_GERMANY, ETSI1_WORLD, "DE", "GERMANY", YES, NO, YES, YES, YES, 7000 },
- {CTRY_GREECE, ETSI1_WORLD, "GR", "GREECE", YES, NO, YES, YES, YES, 7000 },
- {CTRY_GUATEMALA, FCC1_FCCA, "GT", "GUATEMALA", YES, YES, YES, YES, YES, 7000 },
- {CTRY_HONDURAS, NULL1_WORLD, "HN", "HONDURAS", YES, NO, YES, NO, YES, 7000 },
- {CTRY_HONG_KONG, FCC2_WORLD, "HK", "HONG KONG", YES, YES, YES, YES, YES, 7000 },
- {CTRY_HUNGARY, ETSI4_WORLD, "HU", "HUNGARY", YES, NO, YES, YES, YES, 7000 },
- {CTRY_ICELAND, ETSI1_WORLD, "IS", "ICELAND", YES, NO, YES, YES, YES, 7000 },
- {CTRY_INDIA, APL6_WORLD, "IN", "INDIA", YES, NO, YES, NO, YES, 7000 },
- {CTRY_INDONESIA, APL1_WORLD, "ID", "INDONESIA", YES, NO, YES, NO, YES, 7000 },
- {CTRY_IRAN, APL1_WORLD, "IR", "IRAN", YES, YES, YES, YES, YES, 7000 },
- {CTRY_IRELAND, ETSI1_WORLD, "IE", "IRELAND", YES, NO, YES, YES, YES, 7000 },
- {CTRY_ISRAEL, ETSI3_WORLD, "IL", "ISRAEL", YES, NO, YES, NO, YES, 7000 },
- {CTRY_ISRAEL2, NULL1_ETSIB, "ISR","ISRAEL_RES", YES, NO, YES, NO, YES, 7000 },
- {CTRY_ITALY, ETSI1_WORLD, "IT", "ITALY", YES, NO, YES, YES, YES, 7000 },
- {CTRY_JAMAICA, ETSI1_WORLD, "JM", "JAMAICA", YES, NO, YES, YES, YES, 7000 },
- {CTRY_JAPAN, MKK1_MKKA, "JP", "JAPAN", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN1, MKK1_MKKB, "J1", "JAPAN1", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN2, MKK1_FCCA, "J2", "JAPAN2", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN3, MKK2_MKKA, "J3", "JAPAN3", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN4, MKK1_MKKA1, "J4", "JAPAN4", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN5, MKK1_MKKA2, "J5", "JAPAN5", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN6, MKK1_MKKC, "J6", "JAPAN6", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN7, MKK3_MKKB, "J7", "JAPAN7", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN8, MKK3_MKKA2, "J8", "JAPAN8", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN9, MKK3_MKKC, "J9", "JAPAN9", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN10, MKK4_MKKB, "J10", "JAPAN10", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN11, MKK4_MKKA2, "J11", "JAPAN11", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN12, MKK4_MKKC, "J12", "JAPAN12", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN13, MKK5_MKKB, "J13", "JAPAN13", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN14, MKK5_MKKA2, "J14", "JAPAN14", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN15, MKK5_MKKC, "J15", "JAPAN15", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN16, MKK6_MKKB, "J16", "JAPAN16", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN17, MKK6_MKKA2, "J17", "JAPAN17", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN18, MKK6_MKKC, "J18", "JAPAN18", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN19, MKK7_MKKB, "J19", "JAPAN19", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN20, MKK7_MKKA, "J20", "JAPAN20", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN21, MKK7_MKKC, "J21", "JAPAN21", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN22, MKK8_MKKB, "J22", "JAPAN22", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN23, MKK8_MKKA2, "J23", "JAPAN23", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN24, MKK8_MKKC, "J24", "JAPAN24", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN25, MKK3_MKKA, "J25", "JAPAN25", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN26, MKK3_MKKA1, "J26", "JAPAN26", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN27, MKK3_FCCA, "J27", "JAPAN27", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN28, MKK4_MKKA1, "J28", "JAPAN28", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN29, MKK4_FCCA, "J29", "JAPAN29", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN30, MKK6_MKKA1, "J30", "JAPAN30", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN31, MKK6_FCCA, "J31", "JAPAN31", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN32, MKK7_MKKA1, "J32", "JAPAN32", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN33, MKK7_FCCA, "J33", "JAPAN33", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN34, MKK9_MKKA, "J34", "JAPAN34", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN35, MKK10_MKKA, "J35", "JAPAN35", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN36, MKK4_MKKA, "J36", "JAPAN36", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN37, MKK9_FCCA, "J37", "JAPAN37", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN38, MKK9_MKKA1, "J38", "JAPAN38", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN39, MKK9_MKKC, "J39", "JAPAN39", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN40, MKK10_MKKA2, "J40", "JAPAN40", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN41, MKK10_FCCA, "J41", "JAPAN41", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN42, MKK10_MKKA1, "J42", "JAPAN42", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN43, MKK10_MKKC, "J43", "JAPAN43", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN44, MKK10_MKKA2, "J44", "JAPAN44", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN45, MKK11_MKKA, "J45", "JAPAN45", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN46, MKK11_FCCA, "J46", "JAPAN46", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN47, MKK11_MKKA1, "J47", "JAPAN47", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN48, MKK11_MKKC, "J48", "JAPAN48", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN49, MKK11_MKKA2, "J49", "JAPAN49", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN50, MKK12_MKKA, "J50", "JAPAN50", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN51, MKK12_FCCA, "J51", "JAPAN51", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN52, MKK12_MKKA1, "J52", "JAPAN52", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN53, MKK12_MKKC, "J53", "JAPAN53", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JAPAN54, MKK12_MKKA2, "J54", "JAPAN54", YES, NO, NO, NO, NO, 7000 },
- {CTRY_JORDAN, ETSI2_WORLD, "JO", "JORDAN", YES, NO, YES, NO, YES, 7000 },
- {CTRY_KAZAKHSTAN, NULL1_WORLD, "KZ", "KAZAKHSTAN", YES, NO, YES, NO, YES, 7000 },
- {CTRY_KOREA_NORTH, APL9_WORLD, "KP", "NORTH KOREA", YES, NO, NO, YES, YES, 7000 },
- {CTRY_KOREA_ROC, APL9_WORLD, "KR", "KOREA REPUBLIC", YES, NO, NO, NO, NO, 7000 },
- {CTRY_KOREA_ROC2, APL2_APLD, "K2", "KOREA REPUBLIC2",YES, NO, NO, NO, NO, 7000 },
- {CTRY_KOREA_ROC3, APL9_WORLD, "K3", "KOREA REPUBLIC3",YES, NO, NO, NO, NO, 7000 },
- {CTRY_KUWAIT, NULL1_WORLD, "KW", "KUWAIT", YES, NO, YES, NO, YES, 7000 },
- {CTRY_LATVIA, ETSI1_WORLD, "LV", "LATVIA", YES, NO, YES, YES, YES, 7000 },
- {CTRY_LEBANON, NULL1_WORLD, "LB", "LEBANON", YES, NO, YES, NO, YES, 7000 },
- {CTRY_LIECHTENSTEIN,ETSI1_WORLD, "LI", "LIECHTENSTEIN", YES, NO, YES, YES, YES, 7000 },
- {CTRY_LITHUANIA, ETSI1_WORLD, "LT", "LITHUANIA", YES, NO, YES, YES, YES, 7000 },
- {CTRY_LUXEMBOURG, ETSI1_WORLD, "LU", "LUXEMBOURG", YES, NO, YES, YES, YES, 7000 },
- {CTRY_MACAU, FCC2_WORLD, "MO", "MACAU", YES, YES, YES, YES, YES, 7000 },
- {CTRY_MACEDONIA, NULL1_WORLD, "MK", "MACEDONIA", YES, NO, YES, NO, YES, 7000 },
- {CTRY_MALAYSIA, APL8_WORLD, "MY", "MALAYSIA", NO, NO, NO, NO, NO, 7000 },
- {CTRY_MALTA, ETSI1_WORLD, "MT", "MALTA", YES, NO, YES, YES, YES, 7000 },
- {CTRY_MEXICO, FCC1_FCCA, "MX", "MEXICO", YES, YES, YES, YES, YES, 7000 },
- {CTRY_MONACO, ETSI4_WORLD, "MC", "MONACO", YES, YES, YES, YES, YES, 7000 },
- {CTRY_MOROCCO, NULL1_WORLD, "MA", "MOROCCO", YES, NO, YES, NO, YES, 7000 },
- {CTRY_NETHERLANDS, ETSI1_WORLD, "NL", "NETHERLANDS", YES, NO, YES, YES, YES, 7000 },
- {CTRY_NETHERLANDS_ANT, ETSI1_WORLD, "AN", "NETHERLANDS-ANTILLES", YES, NO, YES, YES, YES, 7000 },
- {CTRY_NEW_ZEALAND, FCC2_ETSIC, "NZ", "NEW ZEALAND", YES, NO, YES, NO, YES, 7000 },
- {CTRY_NORWAY, ETSI1_WORLD, "NO", "NORWAY", YES, NO, YES, YES, YES, 7000 },
- {CTRY_OMAN, APL6_WORLD, "OM", "OMAN", YES, NO, YES, NO, YES, 7000 },
- {CTRY_PAKISTAN, NULL1_WORLD, "PK", "PAKISTAN", YES, NO, YES, NO, YES, 7000 },
- {CTRY_PANAMA, FCC1_FCCA, "PA", "PANAMA", YES, YES, YES, YES, YES, 7000 },
- {CTRY_PERU, APL1_WORLD, "PE", "PERU", YES, NO, YES, NO, YES, 7000 },
- {CTRY_PHILIPPINES, APL1_WORLD, "PH", "PHILIPPINES", YES, YES, YES, YES, YES, 7000 },
- {CTRY_POLAND, ETSI1_WORLD, "PL", "POLAND", YES, NO, YES, YES, YES, 7000 },
- {CTRY_PORTUGAL, ETSI1_WORLD, "PT", "PORTUGAL", YES, NO, YES, YES, YES, 7000 },
- {CTRY_PUERTO_RICO, FCC1_FCCA, "PR", "PUERTO RICO", YES, YES, YES, YES, YES, 7000 },
- {CTRY_QATAR, NULL1_WORLD, "QA", "QATAR", YES, NO, YES, NO, YES, 7000 },
- {CTRY_ROMANIA, NULL1_WORLD, "RO", "ROMANIA", YES, NO, YES, NO, YES, 7000 },
- {CTRY_RUSSIA, NULL1_WORLD, "RU", "RUSSIA", YES, NO, YES, NO, YES, 7000 },
- {CTRY_SAUDI_ARABIA,NULL1_WORLD, "SA", "SAUDI ARABIA", YES, NO, YES, NO, YES, 7000 },
- {CTRY_SERBIA_MONT, ETSI1_WORLD, "CS", "SERBIA & MONTENEGRO", YES, NO, YES, YES, YES, 7000 },
- {CTRY_SINGAPORE, APL6_WORLD, "SG", "SINGAPORE", YES, YES, YES, YES, YES, 7000 },
- {CTRY_SLOVAKIA, ETSI1_WORLD, "SK", "SLOVAK REPUBLIC",YES, NO, YES, YES, YES, 7000 },
- {CTRY_SLOVENIA, ETSI1_WORLD, "SI", "SLOVENIA", YES, NO, YES, YES, YES, 7000 },
- {CTRY_SOUTH_AFRICA,FCC3_WORLD, "ZA", "SOUTH AFRICA", YES, NO, YES, NO, YES, 7000 },
- {CTRY_SPAIN, ETSI1_WORLD, "ES", "SPAIN", YES, NO, YES, YES, YES, 7000 },
- {CTRY_SRILANKA, FCC3_WORLD, "LK", "SRI LANKA", YES, NO, YES, NO, YES, 7000 },
- {CTRY_SWEDEN, ETSI1_WORLD, "SE", "SWEDEN", YES, NO, YES, YES, YES, 7000 },
- {CTRY_SWITZERLAND, ETSI1_WORLD, "CH", "SWITZERLAND", YES, NO, YES, YES, YES, 7000 },
- {CTRY_SYRIA, NULL1_WORLD, "SY", "SYRIA", YES, NO, YES, NO, YES, 7000 },
- {CTRY_TAIWAN, APL3_FCCA, "TW", "TAIWAN", YES, YES, YES, YES, YES, 7000 },
- {CTRY_THAILAND, NULL1_WORLD, "TH", "THAILAND", YES, NO, YES, NO, YES, 7000 },
- {CTRY_TRINIDAD_Y_TOBAGO,ETSI4_WORLD,"TT", "TRINIDAD & TOBAGO", YES, NO, YES, NO, YES, 7000 },
- {CTRY_TUNISIA, ETSI3_WORLD, "TN", "TUNISIA", YES, NO, YES, NO, YES, 7000 },
- {CTRY_TURKEY, ETSI3_WORLD, "TR", "TURKEY", YES, NO, YES, NO, YES, 7000 },
- {CTRY_UKRAINE, NULL1_WORLD, "UA", "UKRAINE", YES, NO, YES, NO, YES, 7000 },
- {CTRY_UAE, NULL1_WORLD, "AE", "UNITED ARAB EMIRATES", YES, NO, YES, NO, YES, 7000 },
- {CTRY_UNITED_KINGDOM, ETSI1_WORLD,"GB", "UNITED KINGDOM", YES, NO, YES, NO, YES, 7000 },
- {CTRY_UNITED_STATES, FCC3_FCCA, "US", "UNITED STATES", YES, YES, YES, YES, YES, 5825 },
- {CTRY_UNITED_STATES_FCC49, FCC4_FCCA, "PS", "UNITED STATES (PUBLIC SAFETY)", YES, YES, YES, YES, YES, 7000 },
- {CTRY_URUGUAY, FCC1_WORLD, "UY", "URUGUAY", YES, NO, YES, NO, YES, 7000 },
- {CTRY_UZBEKISTAN, FCC3_FCCA, "UZ", "UZBEKISTAN", YES, YES, YES, YES, YES, 7000 },
- {CTRY_VENEZUELA, APL2_ETSIC, "VE", "VENEZUELA", YES, NO, YES, NO, YES, 7000 },
- {CTRY_VIET_NAM, NULL1_WORLD, "VN", "VIET NAM", YES, NO, YES, NO, YES, 7000 },
- {CTRY_YEMEN, NULL1_WORLD, "YE", "YEMEN", YES, NO, YES, NO, YES, 7000 },
- {CTRY_ZIMBABWE, NULL1_WORLD, "ZW", "ZIMBABWE", YES, NO, YES, NO, YES, 7000 }
+ {CTRY_DEBUG, NO_ENUMRD, "DB", "DEBUG", YES, YES, YES, YES, YES, 7000 },
+ {CTRY_DEFAULT, DEF_REGDMN, "NA", "NO_COUNTRY_SET", YES, YES, YES, YES, YES, 7000 },
+ {CTRY_ALBANIA, NULL1_WORLD, "AL", "ALBANIA", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_ALGERIA, NULL1_WORLD, "DZ", "ALGERIA", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_ARGENTINA, APL3_WORLD, "AR", "ARGENTINA", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_ARMENIA, ETSI4_WORLD, "AM", "ARMENIA", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_AUSTRALIA, FCC6_WORLD, "AU", "AUSTRALIA", YES, YES, YES, YES, YES, 7000 },
+ {CTRY_AUSTRIA, ETSI2_WORLD, "AT", "AUSTRIA", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ", "AZERBAIJAN", YES, YES, YES, YES, YES, 7000 },
+ {CTRY_BAHRAIN, APL6_WORLD, "BH", "BAHRAIN", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_BELARUS, ETSI1_WORLD, "BY", "BELARUS", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_BELGIUM, ETSI1_WORLD, "BE", "BELGIUM", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_BELIZE, APL1_ETSIC, "BZ", "BELIZE", YES, YES, YES, YES, YES, 7000 },
+ {CTRY_BOLIVIA, APL1_ETSIC, "BO", "BOLVIA", YES, YES, YES, YES, YES, 7000 },
+ {CTRY_BRAZIL, FCC3_WORLD, "BR", "BRAZIL", NO, NO, NO, NO, NO, 7000 },
+ {CTRY_BRUNEI_DARUSSALAM, APL1_WORLD, "BN", "BRUNEI DARUSSALAM", YES, YES, YES, YES, YES, 7000 },
+ {CTRY_BULGARIA, ETSI6_WORLD, "BG", "BULGARIA", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_CANADA, FCC6_FCCA, "CA", "CANADA", YES, YES, YES, YES, YES, 7000 },
+ {CTRY_CHILE, APL6_WORLD, "CL", "CHILE", YES, YES, YES, YES, YES, 7000 },
+ {CTRY_CHINA, APL1_WORLD, "CN", "CHINA", YES, YES, YES, YES, YES, 7000 },
+ {CTRY_COLOMBIA, FCC1_FCCA, "CO", "COLOMBIA", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_COSTA_RICA, FCC1_WORLD, "CR", "COSTA RICA", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_CROATIA, ETSI3_WORLD, "HR", "CROATIA", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_CYPRUS, ETSI3_WORLD, "CY", "CYPRUS", YES, YES, YES, YES, YES, 7000 },
+ {CTRY_CZECH, ETSI3_WORLD, "CZ", "CZECH REPUBLIC", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_DENMARK, ETSI1_WORLD, "DK", "DENMARK", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_DOMINICAN_REPUBLIC, FCC1_FCCA, "DO", "DOMINICAN REPUBLIC", YES, YES, YES, YES, YES, 7000 },
+ {CTRY_ECUADOR, FCC1_WORLD, "EC", "ECUADOR", YES, NO, NO, NO, YES, 7000 },
+ {CTRY_EGYPT, ETSI3_WORLD, "EG", "EGYPT", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_EL_SALVADOR, FCC1_WORLD, "SV", "EL SALVADOR", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_ESTONIA, ETSI1_WORLD, "EE", "ESTONIA", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_FINLAND, ETSI1_WORLD, "FI", "FINLAND", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_FRANCE, ETSI1_WORLD, "FR", "FRANCE", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_FRANCE2, ETSI3_WORLD, "F2", "FRANCE_RES", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_GEORGIA, ETSI4_WORLD, "GE", "GEORGIA", YES, YES, YES, YES, YES, 7000 },
+ {CTRY_GERMANY, ETSI1_WORLD, "DE", "GERMANY", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_GREECE, ETSI1_WORLD, "GR", "GREECE", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_GUATEMALA, FCC1_FCCA, "GT", "GUATEMALA", YES, YES, YES, YES, YES, 7000 },
+ {CTRY_HONDURAS, NULL1_WORLD, "HN", "HONDURAS", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_HONG_KONG, FCC2_WORLD, "HK", "HONG KONG", YES, YES, YES, YES, YES, 7000 },
+ {CTRY_HUNGARY, ETSI4_WORLD, "HU", "HUNGARY", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_ICELAND, ETSI1_WORLD, "IS", "ICELAND", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_INDIA, APL6_WORLD, "IN", "INDIA", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_INDONESIA, APL1_WORLD, "ID", "INDONESIA", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_IRAN, APL1_WORLD, "IR", "IRAN", YES, YES, YES, YES, YES, 7000 },
+ {CTRY_IRELAND, ETSI1_WORLD, "IE", "IRELAND", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_ISRAEL, ETSI3_WORLD, "IL", "ISRAEL", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_ISRAEL2, NULL1_ETSIB, "ISR", "ISRAEL_RES", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_ITALY, ETSI1_WORLD, "IT", "ITALY", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_JAMAICA, ETSI1_WORLD, "JM", "JAMAICA", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_JAPAN, MKK1_MKKA, "JP", "JAPAN", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN1, MKK1_MKKB, "J1", "JAPAN1", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN2, MKK1_FCCA, "J2", "JAPAN2", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN3, MKK2_MKKA, "J3", "JAPAN3", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN4, MKK1_MKKA1, "J4", "JAPAN4", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN5, MKK1_MKKA2, "J5", "JAPAN5", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN6, MKK1_MKKC, "J6", "JAPAN6", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN7, MKK3_MKKB, "J7", "JAPAN7", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN8, MKK3_MKKA2, "J8", "JAPAN8", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN9, MKK3_MKKC, "J9", "JAPAN9", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN10, MKK4_MKKB, "J10", "JAPAN10", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN11, MKK4_MKKA2, "J11", "JAPAN11", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN12, MKK4_MKKC, "J12", "JAPAN12", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN13, MKK5_MKKB, "J13", "JAPAN13", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN14, MKK5_MKKA2, "J14", "JAPAN14", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN15, MKK5_MKKC, "J15", "JAPAN15", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN16, MKK6_MKKB, "J16", "JAPAN16", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN17, MKK6_MKKA2, "J17", "JAPAN17", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN18, MKK6_MKKC, "J18", "JAPAN18", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN19, MKK7_MKKB, "J19", "JAPAN19", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN20, MKK7_MKKA, "J20", "JAPAN20", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN21, MKK7_MKKC, "J21", "JAPAN21", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN22, MKK8_MKKB, "J22", "JAPAN22", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN23, MKK8_MKKA2, "J23", "JAPAN23", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN24, MKK8_MKKC, "J24", "JAPAN24", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN25, MKK3_MKKA, "J25", "JAPAN25", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN26, MKK3_MKKA1, "J26", "JAPAN26", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN27, MKK3_FCCA, "J27", "JAPAN27", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN28, MKK4_MKKA1, "J28", "JAPAN28", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN29, MKK4_FCCA, "J29", "JAPAN29", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN30, MKK6_MKKA1, "J30", "JAPAN30", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN31, MKK6_FCCA, "J31", "JAPAN31", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN32, MKK7_MKKA1, "J32", "JAPAN32", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN33, MKK7_FCCA, "J33", "JAPAN33", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN34, MKK9_MKKA, "J34", "JAPAN34", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN35, MKK10_MKKA, "J35", "JAPAN35", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN36, MKK4_MKKA, "J36", "JAPAN36", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN37, MKK9_FCCA, "J37", "JAPAN37", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN38, MKK9_MKKA1, "J38", "JAPAN38", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN39, MKK9_MKKC, "J39", "JAPAN39", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN40, MKK10_MKKA2, "J40", "JAPAN40", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN41, MKK10_FCCA, "J41", "JAPAN41", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN42, MKK10_MKKA1, "J42", "JAPAN42", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN43, MKK10_MKKC, "J43", "JAPAN43", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN44, MKK10_MKKA2, "J44", "JAPAN44", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN45, MKK11_MKKA, "J45", "JAPAN45", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN46, MKK11_FCCA, "J46", "JAPAN46", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN47, MKK11_MKKA1, "J47", "JAPAN47", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN48, MKK11_MKKC, "J48", "JAPAN48", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN49, MKK11_MKKA2, "J49", "JAPAN49", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN50, MKK12_MKKA, "J50", "JAPAN50", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN51, MKK12_FCCA, "J51", "JAPAN51", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN52, MKK12_MKKA1, "J52", "JAPAN52", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN53, MKK12_MKKC, "J53", "JAPAN53", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JAPAN54, MKK12_MKKA2, "J54", "JAPAN54", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_JORDAN, ETSI2_WORLD, "JO", "JORDAN", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_KAZAKHSTAN, NULL1_WORLD, "KZ", "KAZAKHSTAN", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_KOREA_NORTH, APL9_WORLD, "KP", "NORTH KOREA", YES, NO, NO, YES, YES, 7000 },
+ {CTRY_KOREA_ROC, APL9_WORLD, "KR", "KOREA REPUBLIC", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_KOREA_ROC2, APL2_APLD, "K2", "KOREA REPUBLIC2", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_KOREA_ROC3, APL9_WORLD, "K3", "KOREA REPUBLIC3", YES, NO, NO, NO, NO, 7000 },
+ {CTRY_KUWAIT, NULL1_WORLD, "KW", "KUWAIT", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_LATVIA, ETSI1_WORLD, "LV", "LATVIA", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_LEBANON, NULL1_WORLD, "LB", "LEBANON", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_LIECHTENSTEIN, ETSI1_WORLD, "LI", "LIECHTENSTEIN", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_LITHUANIA, ETSI1_WORLD, "LT", "LITHUANIA", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_LUXEMBOURG, ETSI1_WORLD, "LU", "LUXEMBOURG", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_MACAU, FCC2_WORLD, "MO", "MACAU", YES, YES, YES, YES, YES, 7000 },
+ {CTRY_MACEDONIA, NULL1_WORLD, "MK", "MACEDONIA", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_MALAYSIA, APL8_WORLD, "MY", "MALAYSIA", NO, NO, NO, NO, NO, 7000 },
+ {CTRY_MALTA, ETSI1_WORLD, "MT", "MALTA", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_MEXICO, FCC1_FCCA, "MX", "MEXICO", YES, YES, YES, YES, YES, 7000 },
+ {CTRY_MONACO, ETSI4_WORLD, "MC", "MONACO", YES, YES, YES, YES, YES, 7000 },
+ {CTRY_MOROCCO, NULL1_WORLD, "MA", "MOROCCO", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_NETHERLANDS, ETSI1_WORLD, "NL", "NETHERLANDS", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_NETHERLANDS_ANT, ETSI1_WORLD, "AN", "NETHERLANDS-ANTILLES", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_NEW_ZEALAND, FCC2_ETSIC, "NZ", "NEW ZEALAND", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_NORWAY, ETSI1_WORLD, "NO", "NORWAY", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_OMAN, APL6_WORLD, "OM", "OMAN", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_PAKISTAN, NULL1_WORLD, "PK", "PAKISTAN", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_PANAMA, FCC1_FCCA, "PA", "PANAMA", YES, YES, YES, YES, YES, 7000 },
+ {CTRY_PERU, APL1_WORLD, "PE", "PERU", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_PHILIPPINES, APL1_WORLD, "PH", "PHILIPPINES", YES, YES, YES, YES, YES, 7000 },
+ {CTRY_POLAND, ETSI1_WORLD, "PL", "POLAND", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_PORTUGAL, ETSI1_WORLD, "PT", "PORTUGAL", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_PUERTO_RICO, FCC1_FCCA, "PR", "PUERTO RICO", YES, YES, YES, YES, YES, 7000 },
+ {CTRY_QATAR, NULL1_WORLD, "QA", "QATAR", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_ROMANIA, NULL1_WORLD, "RO", "ROMANIA", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_RUSSIA, NULL1_WORLD, "RU", "RUSSIA", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA", "SAUDI ARABIA", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_SERBIA_MONT, ETSI1_WORLD, "CS", "SERBIA & MONTENEGRO", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_SINGAPORE, APL6_WORLD, "SG", "SINGAPORE", YES, YES, YES, YES, YES, 7000 },
+ {CTRY_SLOVAKIA, ETSI1_WORLD, "SK", "SLOVAK REPUBLIC", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_SLOVENIA, ETSI1_WORLD, "SI", "SLOVENIA", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_SOUTH_AFRICA, FCC3_WORLD, "ZA", "SOUTH AFRICA", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_SPAIN, ETSI1_WORLD, "ES", "SPAIN", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_SRILANKA, FCC3_WORLD, "LK", "SRI LANKA", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_SWEDEN, ETSI1_WORLD, "SE", "SWEDEN", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_SWITZERLAND, ETSI1_WORLD, "CH", "SWITZERLAND", YES, NO, YES, YES, YES, 7000 },
+ {CTRY_SYRIA, NULL1_WORLD, "SY", "SYRIA", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_TAIWAN, APL3_FCCA, "TW", "TAIWAN", YES, YES, YES, YES, YES, 7000 },
+ {CTRY_THAILAND, NULL1_WORLD, "TH", "THAILAND", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_TRINIDAD_Y_TOBAGO, ETSI4_WORLD, "TT", "TRINIDAD & TOBAGO", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_TUNISIA, ETSI3_WORLD, "TN", "TUNISIA", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_TURKEY, ETSI3_WORLD, "TR", "TURKEY", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_UKRAINE, NULL1_WORLD, "UA", "UKRAINE", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_UAE, NULL1_WORLD, "AE", "UNITED ARAB EMIRATES", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_UNITED_KINGDOM, ETSI1_WORLD, "GB", "UNITED KINGDOM", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_UNITED_STATES, FCC3_FCCA, "US", "UNITED STATES", YES, YES, YES, YES, YES, 5825 },
+ {CTRY_UNITED_STATES_FCC49, FCC4_FCCA, "PS", "UNITED STATES (PUBLIC SAFETY)", YES, YES, YES, YES, YES, 7000 },
+ {CTRY_URUGUAY, FCC1_WORLD, "UY", "URUGUAY", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_UZBEKISTAN, FCC3_FCCA, "UZ", "UZBEKISTAN", YES, YES, YES, YES, YES, 7000 },
+ {CTRY_VENEZUELA, APL2_ETSIC, "VE", "VENEZUELA", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_VIET_NAM, NULL1_WORLD, "VN", "VIET NAM", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_YEMEN, NULL1_WORLD, "YE", "YEMEN", YES, NO, YES, NO, YES, 7000 },
+ {CTRY_ZIMBABWE, NULL1_WORLD, "ZW", "ZIMBABWE", YES, NO, YES, NO, YES, 7000 }
};
typedef struct RegDmnFreqBand {
@@ -660,7 +660,7 @@ enum {
W1_5745_5825,
W1_5500_5700,
W2_5260_5320,
- W2_5180_5240,
+ W2_5180_5240,
W2_5825_5825,
};
@@ -1332,8 +1332,8 @@ static REG_DOMAIN regDomains[] = {
BMZERO,
BMZERO,
BMZERO,
- BM(F2_2312_2372,F2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
- BM(G2_2312_2372,G2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(F2_2312_2372, F2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(G2_2312_2372, G2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
BMZERO,
BMZERO,
BMZERO},
@@ -1342,9 +1342,9 @@ static REG_DOMAIN regDomains[] = {
BMZERO,
BMZERO,
BMZERO,
- BM(F1_2457_2472,-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
- BM(G1_2457_2472,-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
- BM(T2_2437_2437,-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(F1_2457_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(G1_2457_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
BMZERO,
BMZERO},
@@ -1352,9 +1352,9 @@ static REG_DOMAIN regDomains[] = {
BMZERO,
BMZERO,
BMZERO,
- BM(F1_2432_2442,-1,-1,-1,-1,-1,-1,-1, -1, -1, -1, -1),
- BM(G1_2432_2442,-1,-1,-1,-1,-1,-1,-1, -1, -1, -1, -1),
- BM(T2_2437_2437,-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(F1_2432_2442, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(G1_2432_2442, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
BMZERO,
BMZERO},
@@ -1362,9 +1362,9 @@ static REG_DOMAIN regDomains[] = {
BMZERO,
BMZERO,
BMZERO,
- BM(F3_2412_2472,-1,-1,-1,-1,-1,-1,-1, -1, -1, -1, -1),
- BM(G3_2412_2472,-1,-1,-1,-1,-1,-1,-1, -1, -1, -1, -1),
- BM(T2_2437_2437,-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(F3_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(G3_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
BMZERO,
BMZERO},
@@ -1372,10 +1372,10 @@ static REG_DOMAIN regDomains[] = {
BMZERO,
BMZERO,
BMZERO,
- BM(F1_2412_2462,-1,-1,-1,-1,-1,-1,-1, -1, -1, -1, -1),
- BM(G1_2412_2462,-1,-1,-1,-1,-1,-1,-1, -1, -1, -1, -1),
- BM(T2_2437_2437,-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
- BM(NG2_2422_2452,-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(F1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(G1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(NG2_2422_2452, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
BMZERO},
{MKKA, MKK, NO_DFS, PSCAN_MKKA | PSCAN_MKKA_G | PSCAN_MKKA1 | PSCAN_MKKA1_G | PSCAN_MKKA2 | PSCAN_MKKA2_G, DISALLOW_ADHOC_11A_TURB,
@@ -1384,36 +1384,36 @@ static REG_DOMAIN regDomains[] = {
BMZERO,
BM(F2_2412_2462, F1_2467_2472, F2_2484_2484, -1, -1, -1, -1, -1, -1, -1, -1, -1),
BM(G2_2412_2462, G1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
- BM(T2_2437_2437,-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
- BM(NG1_2422_2452,-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(NG1_2422_2452, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
BMZERO},
{MKKC, MKK, NO_DFS, NO_PSCAN, NO_REQ,
BMZERO,
BMZERO,
BMZERO,
- BM(F2_2412_2472,-1,-1,-1,-1,-1,-1,-1, -1, -1, -1, -1),
- BM(G2_2412_2472,-1,-1,-1,-1,-1,-1,-1, -1, -1, -1, -1),
- BM(T2_2437_2437,-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
- BM(NG1_2422_2452,-1,-1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(F2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(G2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(NG1_2422_2452, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
BMZERO},
{WORLD, ETSI, NO_DFS, NO_PSCAN, NO_REQ,
BMZERO,
BMZERO,
BMZERO,
- BM(F2_2412_2472,-1,-1,-1,-1,-1,-1,-1, -1, -1, -1, -1),
- BM(G2_2412_2472,-1,-1,-1,-1,-1,-1,-1, -1, -1, -1, -1),
- BM(T2_2437_2437,-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
- BM(NG1_2422_2452,-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(F2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(G2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
+ BM(NG1_2422_2452, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
BMZERO},
{WOR0_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_PER_11D,
BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825, W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1),
BMZERO,
- BM(W1_2412_2412,W1_2437_2442,W1_2462_2462,W1_2472_2472,W1_2417_2432, W1_2447_2457, W1_2467_2467, W1_2484_2484, -1, -1, -1, -1),
- BM(WG1_2412_2412,WG1_2437_2442,WG1_2462_2462,WG1_2472_2472,WG1_2417_2432,WG1_2447_2457,WG1_2467_2467, -1, -1, -1, -1, -1),
+ BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472, W1_2417_2432, W1_2447_2457, W1_2467_2467, W1_2484_2484, -1, -1, -1, -1),
+ BM(WG1_2412_2412, WG1_2437_2442, WG1_2462_2462, WG1_2472_2472, WG1_2417_2432, WG1_2447_2457, WG1_2467_2467, -1, -1, -1, -1, -1),
BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
BMZERO,
BMZERO},
@@ -1429,21 +1429,21 @@ static REG_DOMAIN regDomains[] = {
BMZERO},
{WOR02_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_PER_11D,
- BM(W1_5260_5320, W1_5180_5240,W1_5170_5230,W1_5745_5825,W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
+ BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825, W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1),
BMZERO,
- BM(W1_2412_2412,W1_2437_2442,W1_2462_2462, W1_2472_2472,W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
- BM(WG1_2412_2412,WG1_2437_2442,WG1_2462_2462, WG1_2472_2472,WG1_2417_2432, WG1_2447_2457, WG1_2467_2467, -1, -1, -1, -1, -1),
+ BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472, W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
+ BM(WG1_2412_2412, WG1_2437_2442, WG1_2462_2462, WG1_2472_2472, WG1_2417_2432, WG1_2447_2457, WG1_2467_2467, -1, -1, -1, -1, -1),
BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
BMZERO,
BMZERO},
{EU1_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_PER_11D,
- BM(W1_5260_5320, W1_5180_5240,W1_5170_5230,W1_5745_5825,W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
+ BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825, W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1),
BMZERO,
- BM(W1_2412_2412,W1_2437_2442,W1_2462_2462, W2_2472_2472,W1_2417_2432, W1_2447_2457, W2_2467_2467, -1, -1, -1, -1, -1),
- BM(WG1_2412_2412,WG1_2437_2442,WG1_2462_2462, WG2_2472_2472,WG1_2417_2432, WG1_2447_2457, WG2_2467_2467, -1, -1, -1, -1, -1),
+ BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W2_2472_2472, W1_2417_2432, W1_2447_2457, W2_2467_2467, -1, -1, -1, -1, -1),
+ BM(WG1_2412_2412, WG1_2437_2442, WG1_2462_2462, WG2_2472_2472, WG1_2417_2432, WG1_2447_2457, WG2_2467_2467, -1, -1, -1, -1, -1),
BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
BMZERO,
BMZERO},
@@ -1452,8 +1452,8 @@ static REG_DOMAIN regDomains[] = {
BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825, W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
BMZERO,
BMZERO,
- BM(W1_2412_2412,W1_2437_2442,W1_2462_2462,W1_2472_2472,W1_2417_2432, W1_2447_2457, W1_2467_2467, W1_2484_2484, -1, -1, -1, -1),
- BM(WG1_2412_2412,WG1_2437_2442,WG1_2462_2462,WG1_2472_2472,WG1_2417_2432,WG1_2447_2457,WG1_2467_2467, -1, -1, -1, -1, -1),
+ BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472, W1_2417_2432, W1_2447_2457, W1_2467_2467, W1_2484_2484, -1, -1, -1, -1),
+ BM(WG1_2412_2412, WG1_2437_2442, WG1_2462_2462, WG1_2472_2472, WG1_2417_2432, WG1_2447_2457, WG1_2467_2467, -1, -1, -1, -1, -1),
BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
BMZERO,
BMZERO},
@@ -1462,8 +1462,8 @@ static REG_DOMAIN regDomains[] = {
BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825, W1_5500_5700, -1, -1, -1, -1, -1, -1, -1),
BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1),
BMZERO,
- BM(W1_2412_2412,W1_2437_2442,W1_2462_2462,W1_2472_2472,W1_2417_2432, W1_2447_2457, W1_2467_2467, W1_2484_2484, -1, -1, -1, -1),
- BM(WG1_2412_2412,WG1_2437_2442,WG1_2462_2462,WG1_2472_2472,WG1_2417_2432,WG1_2447_2457,WG1_2467_2467, -1, -1, -1, -1, -1),
+ BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472, W1_2417_2432, W1_2447_2457, W1_2467_2467, W1_2484_2484, -1, -1, -1, -1),
+ BM(WG1_2412_2412, WG1_2437_2442, WG1_2462_2462, WG1_2472_2472, WG1_2417_2432, WG1_2447_2457, WG1_2467_2467, -1, -1, -1, -1, -1),
BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
BMZERO,
BMZERO},
@@ -1472,8 +1472,8 @@ static REG_DOMAIN regDomains[] = {
BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1),
BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1),
BMZERO,
- BM(W1_2412_2412,W1_2437_2442,W1_2462_2462,W1_2472_2472,W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
- BM(WG1_2412_2412,WG1_2437_2442,WG1_2462_2462,WG1_2472_2472,WG1_2417_2432,WG1_2447_2457,WG1_2467_2467,-1, -1, -1, -1, -1),
+ BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472, W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1),
+ BM(WG1_2412_2412, WG1_2437_2442, WG1_2462_2462, WG1_2472_2472, WG1_2417_2432, WG1_2447_2457, WG1_2467_2467, -1, -1, -1, -1, -1),
BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
BMZERO,
BMZERO},
@@ -1482,8 +1482,8 @@ static REG_DOMAIN regDomains[] = {
BM(W2_5260_5320, W2_5180_5240, F2_5745_5805, W2_5825_5825, -1, -1, -1, -1, -1, -1, -1, -1),
BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1),
BMZERO,
- BM(W1_2412_2412,W1_2437_2442,W1_2462_2462, W1_2417_2432,W1_2447_2457,-1, -1, -1, -1, -1, -1, -1),
- BM(WG1_2412_2412,WG1_2437_2442,WG1_2462_2462, WG1_2417_2432,WG1_2447_2457,-1, -1, -1, -1, -1, -1, -1),
+ BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2417_2432, W1_2447_2457, -1, -1, -1, -1, -1, -1, -1),
+ BM(WG1_2412_2412, WG1_2437_2442, WG1_2462_2462, WG1_2417_2432, WG1_2447_2457, -1, -1, -1, -1, -1, -1, -1),
BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1),
BMZERO,
BMZERO},
@@ -1554,30 +1554,24 @@ static const struct cmode modes[] = {
u8_t GetWmRD(u16_t regionCode, u16_t channelFlag, REG_DOMAIN *rd)
{
s16_t i, found, regDmn;
- u64_t flags=NO_REQ;
- REG_DMN_PAIR_MAPPING *regPair=NULL;
+ u64_t flags = NO_REQ;
+ REG_DMN_PAIR_MAPPING *regPair = NULL;
- for (i=0, found=0; (i<N(regDomainPairs))&&(!found); i++)
- {
- if (regDomainPairs[i].regDmnEnum == regionCode)
- {
+ for (i = 0, found = 0; (i < N(regDomainPairs)) && (!found); i++) {
+ if (regDomainPairs[i].regDmnEnum == regionCode) {
regPair = &regDomainPairs[i];
found = 1;
}
}
- if (!found)
- {
+ if (!found) {
zm_debug_msg1("Failed to find reg domain pair ", regionCode);
return FALSE;
}
- if (channelFlag & ZM_REG_FLAG_CHANNEL_2GHZ)
- {
+ if (channelFlag & ZM_REG_FLAG_CHANNEL_2GHZ) {
regDmn = regPair->regDmn2GHz;
flags = regPair->flags2GHz;
- }
- else
- {
+ } else {
regDmn = regPair->regDmn5GHz;
flags = regPair->flags5GHz;
}
@@ -1587,19 +1581,16 @@ u8_t GetWmRD(u16_t regionCode, u16_t channelFlag, REG_DOMAIN *rd)
* unitary reg domain of the pair
*/
- for (i=0;i<N(regDomains); i++)
- {
- if (regDomains[i].regDmnEnum == regDmn)
- {
- if (rd != NULL)
- {
- zfMemoryCopy((u8_t *)rd, (u8_t *)&regDomains[i],
- sizeof(REG_DOMAIN));
+ for (i = 0 ; i < N(regDomains) ; i++) {
+ if (regDomains[i].regDmnEnum == regDmn) {
+ if (rd != NULL) {
+ zfMemoryCopy((u8_t *)rd, (u8_t *)&regDomains[i],
+ sizeof(REG_DOMAIN));
}
}
}
rd->pscan &= regPair->pscanMask;
- rd->flags = (u32_t)flags;
+ rd->flags = (u32_t)flags;
return TRUE;
}
@@ -1610,7 +1601,7 @@ u8_t isChanBitMaskZero(u64_t *bitmask)
{
u16_t i;
- for (i=0; i<BMLEN; i++) {
+ for (i = 0; i < BMLEN; i++) {
if (bitmask[i] != 0)
return FALSE;
}
@@ -1632,384 +1623,344 @@ u8_t IS_BIT_SET(u32_t bit, u64_t *bitmask)
}
-void zfHpGetRegulationTable(zdev_t* dev, u16_t regionCode, u16_t c_lo, u16_t c_hi)
+void zfHpGetRegulationTable(zdev_t *dev, u16_t regionCode, u16_t c_lo, u16_t c_hi)
{
REG_DOMAIN rd5GHz, rd2GHz;
const struct cmode *cm;
- s16_t next=0,b;
- struct zsHpPriv* hpPriv;
+ s16_t next = 0, b;
+ struct zsHpPriv *hpPriv;
- zmw_get_wlan_dev(dev);
- hpPriv=wd->hpPrivate;
+ zmw_get_wlan_dev(dev);
+ hpPriv = wd->hpPrivate;
- zmw_declare_for_critical_section();
+ zmw_declare_for_critical_section();
- if (!GetWmRD(regionCode, ~ZM_REG_FLAG_CHANNEL_2GHZ, &rd5GHz))
- {
- zm_debug_msg1("couldn't find unitary 5GHz reg domain for Region Code ", regionCode);
+ if (!GetWmRD(regionCode, ~ZM_REG_FLAG_CHANNEL_2GHZ, &rd5GHz)) {
+ zm_debug_msg1("couldn't find unitary 5GHz reg domain for Region Code ", regionCode);
return;
}
- if (!GetWmRD(regionCode, ZM_REG_FLAG_CHANNEL_2GHZ, &rd2GHz))
- {
- zm_debug_msg1("couldn't find unitary 2GHz reg domain for Region Code ", regionCode);
+ if (!GetWmRD(regionCode, ZM_REG_FLAG_CHANNEL_2GHZ, &rd2GHz)) {
+ zm_debug_msg1("couldn't find unitary 2GHz reg domain for Region Code ", regionCode);
return;
}
- if (wd->regulationTable.regionCode == regionCode)
- {
- zm_debug_msg1("current region code is the same with Region Code ", regionCode);
- return;
- }
- else
- {
- wd->regulationTable.regionCode = regionCode;
- }
+ if (wd->regulationTable.regionCode == regionCode) {
+ zm_debug_msg1("current region code is the same with Region Code ", regionCode);
+ return;
+ } else
+ wd->regulationTable.regionCode = regionCode;
- next = 0;
+ next = 0;
- zmw_enter_critical_section(dev);
+ zmw_enter_critical_section(dev);
- for (cm = modes; cm < &modes[N(modes)]; cm++)
- {
+ for (cm = modes; cm < &modes[N(modes)]; cm++) {
u16_t c;
- u64_t *channelBM=NULL;
- REG_DOMAIN *rd=NULL;
- REG_DMN_FREQ_BAND *fband=NULL,*freqs=NULL;
+ u64_t *channelBM = NULL;
+ REG_DOMAIN *rd = NULL;
+ REG_DMN_FREQ_BAND *fband = NULL, *freqs = NULL;
- switch (cm->mode)
- {
+ switch (cm->mode) {
case HAL_MODE_TURBO:
- //we don't have turbo mode so we disable it
- //zm_debug_msg0("CWY - HAL_MODE_TURBO");
- channelBM = NULL;
- //rd = &rd5GHz;
- //channelBM = rd->chan11a_turbo;
- //freqs = &regDmn5GhzTurboFreq[0];
- //ctl = rd->conformanceTestLimit | CTL_TURBO;
+ /* we don't have turbo mode so we disable it
+ //zm_debug_msg0("CWY - HAL_MODE_TURBO"); */
+ channelBM = NULL;
+ /* rd = &rd5GHz;
+ channelBM = rd->chan11a_turbo;
+ freqs = &regDmn5GhzTurboFreq[0];
+ ctl = rd->conformanceTestLimit | CTL_TURBO; */
break;
case HAL_MODE_11A:
- if ((hpPriv->OpFlags & 0x1) != 0)
- {
- rd = &rd5GHz;
- channelBM = rd->chan11a;
- freqs = &regDmn5GhzFreq[0];
- c_lo = 4920; //from channel 184
- c_hi = 5825; //to channel 165
- //ctl = rd->conformanceTestLimit;
- //zm_debug_msg2("CWY - HAL_MODE_11A, channelBM = 0x", *channelBM);
- }
- //else
- {
- //channelBM = NULL;
- }
+ if ((hpPriv->OpFlags & 0x1) != 0) {
+ rd = &rd5GHz;
+ channelBM = rd->chan11a;
+ freqs = &regDmn5GhzFreq[0];
+ c_lo = 4920; /* from channel 184 */
+ c_hi = 5825; /* to channel 165 */
+ /* ctl = rd->conformanceTestLimit;
+ zm_debug_msg2("CWY - HAL_MODE_11A, channelBM = 0x", *channelBM); */
+ }
+ /* else
+ channelBM = NULL;
+ */
break;
case HAL_MODE_11B:
- //Disable 11B mode because it only has difference with 11G in PowerDFS Data,
- //and we don't use this now.
- //zm_debug_msg0("CWY - HAL_MODE_11B");
+ /* Disable 11B mode because it only has difference with 11G in PowerDFS Data,
+ and we don't use this now.
+ zm_debug_msg0("CWY - HAL_MODE_11B"); */
channelBM = NULL;
- //rd = &rd2GHz;
- //channelBM = rd->chan11b;
- //freqs = &regDmn2GhzFreq[0];
- //ctl = rd->conformanceTestLimit | CTL_11B;
- //zm_debug_msg2("CWY - HAL_MODE_11B, channelBM = 0x", *channelBM);
+ /* rd = &rd2GHz;
+ channelBM = rd->chan11b;
+ freqs = &regDmn2GhzFreq[0];
+ ctl = rd->conformanceTestLimit | CTL_11B;
+ zm_debug_msg2("CWY - HAL_MODE_11B, channelBM = 0x", *channelBM); */
break;
case HAL_MODE_11G:
- if ((hpPriv->OpFlags & 0x2) != 0)
- {
- rd = &rd2GHz;
- channelBM = rd->chan11g;
- freqs = &regDmn2Ghz11gFreq[0];
- c_lo = 2412; //from channel 1
- //c_hi = 2462; //to channel 11
- c_hi = 2472; //to channel 13
- //ctl = rd->conformanceTestLimit | CTL_11G;
- //zm_debug_msg2("CWY - HAL_MODE_11G, channelBM = 0x", *channelBM);
- }
- //else
- {
- //channelBM = NULL;
- }
+ if ((hpPriv->OpFlags & 0x2) != 0) {
+ rd = &rd2GHz;
+ channelBM = rd->chan11g;
+ freqs = &regDmn2Ghz11gFreq[0];
+ c_lo = 2412; /* from channel 1 */
+ /* c_hi = 2462; to channel 11 */
+ c_hi = 2472; /* to channel 13 */
+ /* ctl = rd->conformanceTestLimit | CTL_11G; */
+ /* zm_debug_msg2("CWY - HAL_MODE_11G, channelBM = 0x", *channelBM); */
+ }
+ /* else
+ channelBM = NULL;
+ */
break;
case HAL_MODE_11G_TURBO:
- //we don't have turbo mode so we disable it
- //zm_debug_msg0("CWY - HAL_MODE_11G_TURBO");
- channelBM = NULL;
- //rd = &rd2GHz;
- //channelBM = rd->chan11g_turbo;
- //freqs = &regDmn2Ghz11gTurboFreq[0];
- //ctl = rd->conformanceTestLimit | CTL_108G;
+ /* we don't have turbo mode so we disable it
+ zm_debug_msg0("CWY - HAL_MODE_11G_TURBO"); */
+ channelBM = NULL;
+ /* rd = &rd2GHz;
+ channelBM = rd->chan11g_turbo;
+ freqs = &regDmn2Ghz11gTurboFreq[0];
+ ctl = rd->conformanceTestLimit | CTL_108G; */
break;
case HAL_MODE_11A_TURBO:
- //we don't have turbo mode so we disable it
- //zm_debug_msg0("CWY - HAL_MODE_11A_TURBO");
- channelBM = NULL;
- //rd = &rd5GHz;
- //channelBM = rd->chan11a_dyn_turbo;
- //freqs = &regDmn5GhzTurboFreq[0];
- //ctl = rd->conformanceTestLimit | CTL_108G;
+ /* we don't have turbo mode so we disable it
+ zm_debug_msg0("CWY - HAL_MODE_11A_TURBO"); */
+ channelBM = NULL;
+ /* rd = &rd5GHz;
+ channelBM = rd->chan11a_dyn_turbo;
+ freqs = &regDmn5GhzTurboFreq[0];
+ ctl = rd->conformanceTestLimit | CTL_108G; */
break;
default:
- zm_debug_msg1("Unkonwn HAL mode ", cm->mode);
+ zm_debug_msg1("Unkonwn HAL mode ", cm->mode);
continue;
}
- if (channelBM == NULL)
- {
- //zm_debug_msg0("CWY - channelBM is NULL");
+
+ if (channelBM == NULL) {
+ /* zm_debug_msg0("CWY - channelBM is NULL"); */
continue;
- }
- if (isChanBitMaskZero(channelBM))
- {
- //zm_debug_msg0("CWY - BitMask is Zero");
- continue;
- }
-
- // RAY:Is it ok??
- if (freqs == NULL )
- {
- continue;
- }
-
- for (b=0;b<64*BMLEN; b++)
- {
- if (IS_BIT_SET(b,channelBM))
- {
+ }
+
+ if (isChanBitMaskZero(channelBM)) {
+ /* zm_debug_msg0("CWY - BitMask is Zero"); */
+ continue;
+ }
+
+ /* RAY:Is it ok?? */
+ if (freqs == NULL)
+ continue;
+
+ for (b = 0 ; b < 64*BMLEN ; b++) {
+ if (IS_BIT_SET(b, channelBM)) {
fband = &freqs[b];
- //zm_debug_msg1("CWY - lowChannel = ", fband->lowChannel);
- //zm_debug_msg1("CWY - highChannel = ", fband->highChannel);
- //zm_debug_msg1("CWY - channelSep = ", fband->channelSep);
- for (c=fband->lowChannel; c <= fband->highChannel;
- c += fband->channelSep)
- {
+ /* zm_debug_msg1("CWY - lowChannel = ", fband->lowChannel);
+ zm_debug_msg1("CWY - highChannel = ", fband->highChannel);
+ zm_debug_msg1("CWY - channelSep = ", fband->channelSep); */
+ for (c = fband->lowChannel; c <= fband->highChannel;
+ c += fband->channelSep) {
ZM_HAL_CHANNEL icv;
- //Disable all DFS channel
- if ((hpPriv->disableDfsCh==0) || (!(fband->useDfs & rd->dfsMask)))
- {
- if( fband->channelBW < 20 )
- {
- /**************************************************************/
- /* */
- /* Temporary discard channel that BW < 20MHz (5 or 10MHz) */
- /* Our architecture does not implemnt it !!! */
- /* */
- /**************************************************************/
- continue;
- }
- if ((c >= c_lo) && (c <= c_hi))
- {
- icv.channel = c;
- icv.channelFlags = cm->flags;
- icv.maxRegTxPower = fband->powerDfs;
- if (fband->usePassScan & rd->pscan)
- icv.channelFlags |= ZM_REG_FLAG_CHANNEL_PASSIVE;
- else
- icv.channelFlags &= ~ZM_REG_FLAG_CHANNEL_PASSIVE;
- if (fband->useDfs & rd->dfsMask)
- icv.privFlags = ZM_REG_FLAG_CHANNEL_DFS;
- else
- icv.privFlags = 0;
-
- /* For now disable radar for FCC3 */
- if (fband->useDfs & rd->dfsMask & DFS_FCC3)
- {
- icv.privFlags &= ~ZM_REG_FLAG_CHANNEL_DFS;
- icv.privFlags |= ZM_REG_FLAG_CHANNEL_DFS_CLEAR;
- }
-
- if(rd->flags & LIMIT_FRAME_4MS)
- icv.privFlags |= ZM_REG_FLAG_CHANNEL_DFS_CLEAR;
-
- icv.minTxPower = 0;
- icv.maxTxPower = 0;
-
- zm_assert(next < 60);
-
- wd->regulationTable.allowChannel[next++] = icv;
- }
+ /* Disable all DFS channel */
+ if ((hpPriv->disableDfsCh == 0) || (!(fband->useDfs & rd->dfsMask))) {
+ if (fband->channelBW < 20) {
+ /**************************************************************/
+ /* */
+ /* Temporary discard channel that BW < 20MHz (5 or 10MHz) */
+ /* Our architecture does not implemnt it !!! */
+ /* */
+ /**************************************************************/
+ continue;
+ }
+ if ((c >= c_lo) && (c <= c_hi)) {
+ icv.channel = c;
+ icv.channelFlags = cm->flags;
+ icv.maxRegTxPower = fband->powerDfs;
+ if (fband->usePassScan & rd->pscan)
+ icv.channelFlags |= ZM_REG_FLAG_CHANNEL_PASSIVE;
+ else
+ icv.channelFlags &= ~ZM_REG_FLAG_CHANNEL_PASSIVE;
+ if (fband->useDfs & rd->dfsMask)
+ icv.privFlags = ZM_REG_FLAG_CHANNEL_DFS;
+ else
+ icv.privFlags = 0;
+
+ /* For now disable radar for FCC3 */
+ if (fband->useDfs & rd->dfsMask & DFS_FCC3) {
+ icv.privFlags &= ~ZM_REG_FLAG_CHANNEL_DFS;
+ icv.privFlags |= ZM_REG_FLAG_CHANNEL_DFS_CLEAR;
+ }
+
+ if (rd->flags & LIMIT_FRAME_4MS)
+ icv.privFlags |= ZM_REG_FLAG_CHANNEL_DFS_CLEAR;
+
+ icv.minTxPower = 0;
+ icv.maxTxPower = 0;
+
+ zm_assert(next < 60);
+
+ wd->regulationTable.allowChannel[next++] = icv;
+ }
+ }
}
}
}
}
- }
wd->regulationTable.allowChannelCnt = next;
- #if 0
- {
- /* debug print */
- u32_t i;
- DbgPrint("\n-------------------------------------------\n");
- DbgPrint("zfHpGetRegulationTable print all channel info regincode = 0x%x\n", wd->regulationTable.regionCode);
- DbgPrint("index channel channelFlags maxRegTxPower privFlags useDFS\n");
-
- for (i=0; i<wd->regulationTable.allowChannelCnt; i++)
- {
- DbgPrint("%02d %d %04x %02d %x %x\n",
- i,
- wd->regulationTable.allowChannel[i].channel,
- wd->regulationTable.allowChannel[i].channelFlags,
- wd->regulationTable.allowChannel[i].maxRegTxPower,
- wd->regulationTable.allowChannel[i].privFlags,
- wd->regulationTable.allowChannel[i].privFlags & ZM_REG_FLAG_CHANNEL_DFS);
- }
- }
- #endif
-
- zmw_leave_critical_section(dev);
+ #if 0
+ {
+ /* debug print */
+ u32_t i;
+ DbgPrint("\n-------------------------------------------\n");
+ DbgPrint("zfHpGetRegulationTable print all channel info regincode = 0x%x\n", wd->regulationTable.regionCode);
+ DbgPrint("index channel channelFlags maxRegTxPower privFlags useDFS\n");
+
+ for (i = 0 ; i < wd->regulationTable.allowChannelCnt ; i++) {
+ DbgPrint("%02d %d %04x %02d %x %x\n", i,
+ wd->regulationTable.allowChannel[i].channel,
+ wd->regulationTable.allowChannel[i].channelFlags,
+ wd->regulationTable.allowChannel[i].maxRegTxPower,
+ wd->regulationTable.allowChannel[i].privFlags,
+ wd->regulationTable.allowChannel[i].privFlags & ZM_REG_FLAG_CHANNEL_DFS);
+ }
+ }
+ #endif
+
+ zmw_leave_critical_section(dev);
}
-void zfHpGetRegulationTablefromRegionCode(zdev_t* dev, u16_t regionCode)
+void zfHpGetRegulationTablefromRegionCode(zdev_t *dev, u16_t regionCode)
{
- u16_t c_lo = 2000, c_hi = 6000; //default channel is all enable
- u8_t isoName[3] = {'N', 'A', 0};
+ u16_t c_lo = 2000, c_hi = 6000; /* default channel is all enable */
+ u8_t isoName[3] = {'N', 'A', 0};
- zfCoreSetIsoName(dev, isoName);
+ zfCoreSetIsoName(dev, isoName);
- zfHpGetRegulationTable(dev, regionCode, c_lo, c_hi);
+ zfHpGetRegulationTable(dev, regionCode, c_lo, c_hi);
}
-void zfHpGetRegulationTablefromCountry(zdev_t* dev, u16_t CountryCode)
+void zfHpGetRegulationTablefromCountry(zdev_t *dev, u16_t CountryCode)
{
- u16_t i;
- u16_t c_lo = 2000, c_hi = 6000; //default channel is all enable
- u16_t RegDomain;
-
- zmw_get_wlan_dev(dev);
+ u16_t i;
+ u16_t c_lo = 2000, c_hi = 6000; /* default channel is all enable */
+ u16_t RegDomain;
- zmw_declare_for_critical_section();
+ zmw_get_wlan_dev(dev);
- for (i = 0; i < N(allCountries); i++)
- {
- if (CountryCode == allCountries[i].countryCode)
- {
- RegDomain = allCountries[i].regDmnEnum;
+ zmw_declare_for_critical_section();
- // read the ACU country code from EEPROM
- zfCoreSetIsoName(dev, (u8_t*)allCountries[i].isoName);
+ for (i = 0; i < N(allCountries); i++) {
+ if (CountryCode == allCountries[i].countryCode) {
+ RegDomain = allCountries[i].regDmnEnum;
- //zm_debug_msg_s("CWY - Country Name = ", allCountries[i].name);
+ /* read the ACU country code from EEPROM */
+ zfCoreSetIsoName(dev, (u8_t *)allCountries[i].isoName);
- if (wd->regulationTable.regionCode != RegDomain)
- {
- //zm_debug_msg0("CWY - Change regulatory table");
+ /* zm_debug_msg_s("CWY - Country Name = ", allCountries[i].name); */
- zfHpGetRegulationTable(dev, RegDomain, c_lo, c_hi);
- }
- return;
- }
- }
- zm_debug_msg1("Invalid CountryCode = ", CountryCode);
+ if (wd->regulationTable.regionCode != RegDomain) {
+ /* zm_debug_msg0("CWY - Change regulatory table"); */
+ zfHpGetRegulationTable(dev, RegDomain, c_lo, c_hi);
+ }
+ return;
+ }
+ }
+ zm_debug_msg1("Invalid CountryCode = ", CountryCode);
}
-u8_t zfHpGetRegulationTablefromISO(zdev_t* dev, u8_t *countryInfo, u8_t length)
+u8_t zfHpGetRegulationTablefromISO(zdev_t *dev, u8_t *countryInfo, u8_t length)
{
- u16_t i;
- u16_t RegDomain;
- u16_t c_lo = 2000, c_hi = 6000; //default channel is all enable
- //u8_t strLen = 2;
-
- zmw_get_wlan_dev(dev);
-
- zmw_declare_for_critical_section();
-
- if (countryInfo[4] != 0x20)
- { // with (I)ndoor/(O)utdoor info
- //strLen = 3;
- }
- //zm_debug_msg_s("Desired iso name = ", isoName);
- for (i = 0; i < N(allCountries); i++)
- {
- //zm_debug_msg_s("Current iso name = ", allCountries[i].isoName);
- if (zfMemoryIsEqual((u8_t *)allCountries[i].isoName, (u8_t *)&countryInfo[2], length-1))
- {
- //DbgPrint("Set current iso name = %s\n", allCountries[i].isoName);
- //zm_debug_msg0("iso name hit!!");
-
- RegDomain = allCountries[i].regDmnEnum;
-
- if (wd->regulationTable.regionCode != RegDomain)
- {
- zfHpGetRegulationTable(dev, RegDomain, c_lo, c_hi);
- }
-
- //while (index < (countryInfo[1]+2))
- //{
- // if (countryInfo[index] <= 14)
- // {
- // /* calculate 2.4GHz low boundary channel frequency */
- // ch = countryInfo[index];
- // if ( ch == 14 )
- // c_lo = ZM_CH_G_14;
- // else
- // c_lo = ZM_CH_G_1 + (ch - 1) * 5;
- // /* calculate 2.4GHz high boundary channel frequency */
- // ch = countryInfo[index] + countryInfo[index + 1] - 1;
- // if ( ch == 14 )
- // c_hi = ZM_CH_G_14;
- // else
- // c_hi = ZM_CH_G_1 + (ch - 1) * 5;
- // }
- // else
- // {
- // /* calculate 5GHz low boundary channel frequency */
- // ch = countryInfo[index];
- // if ( (ch >= 184)&&(ch <= 196) )
- // c_lo = 4000 + ch*5;
- // else
- // c_lo = 5000 + ch*5;
- // /* calculate 5GHz high boundary channel frequency */
- // ch = countryInfo[index] + countryInfo[index + 1] - 1;
- // if ( (ch >= 184)&&(ch <= 196) )
- // c_hi = 4000 + ch*5;
- // else
- // c_hi = 5000 + ch*5;
- // }
- //
- // zfHpGetRegulationTable(dev, RegDomain, c_lo, c_hi);
- //
- // index+=3;
- //}
-
- return 0;
- }
- }
- //zm_debug_msg_s("Invalid iso name = ", &countryInfo[2]);
- return 1;
+ u16_t i;
+ u16_t RegDomain;
+ u16_t c_lo = 2000, c_hi = 6000; /* default channel is all enable */
+ /* u8_t strLen = 2; */
+
+ zmw_get_wlan_dev(dev);
+
+ zmw_declare_for_critical_section();
+
+ if (countryInfo[4] != 0x20) {
+ /* with (I)ndoor/(O)utdoor info
+ strLen = 3; */
+ }
+ /* zm_debug_msg_s("Desired iso name = ", isoName); */
+ for (i = 0; i < N(allCountries); i++) {
+ /* zm_debug_msg_s("Current iso name = ", allCountries[i].isoName); */
+ if (zfMemoryIsEqual((u8_t *)allCountries[i].isoName, (u8_t *)&countryInfo[2], length-1)) {
+ /* DbgPrint("Set current iso name = %s\n", allCountries[i].isoName); */
+ /* zm_debug_msg0("iso name hit!!"); */
+
+ RegDomain = allCountries[i].regDmnEnum;
+
+ if (wd->regulationTable.regionCode != RegDomain)
+ zfHpGetRegulationTable(dev, RegDomain, c_lo, c_hi);
+ /*
+ while (index < (countryInfo[1]+2)) {
+ if (countryInfo[index] <= 14) {
+ // calculate 2.4GHz low boundary channel frequency
+ ch = countryInfo[index];
+ if ( ch == 14 )
+ c_lo = ZM_CH_G_14;
+ else
+ c_lo = ZM_CH_G_1 + (ch - 1) * 5;
+ // calculate 2.4GHz high boundary channel frequency
+ ch = countryInfo[index] + countryInfo[index + 1] - 1;
+ if ( ch == 14 )
+ c_hi = ZM_CH_G_14;
+ else
+ c_hi = ZM_CH_G_1 + (ch - 1) * 5;
+ } else {
+ // calculate 5GHz low boundary channel frequency
+ ch = countryInfo[index];
+ if ( (ch >= 184)&&(ch <= 196) )
+ c_lo = 4000 + ch*5;
+ else
+ c_lo = 5000 + ch*5;
+ // calculate 5GHz high boundary channel frequency
+ ch = countryInfo[index] + countryInfo[index + 1] - 1;
+ if ( (ch >= 184)&&(ch <= 196) )
+ c_hi = 4000 + ch*5;
+ else
+ c_hi = 5000 + ch*5;
+ }
+
+ zfHpGetRegulationTable(dev, RegDomain, c_lo, c_hi);
+
+ index+=3;
+ }
+ */
+ return 0;
+ }
+ }
+ /* zm_debug_msg_s("Invalid iso name = ", &countryInfo[2]); */
+ return 1;
}
-const char* zfHpGetisoNamefromregionCode(zdev_t* dev, u16_t regionCode)
+const char *zfHpGetisoNamefromregionCode(zdev_t *dev, u16_t regionCode)
{
- u16_t i;
-
- for (i = 0; i < N(allCountries); i++)
- {
- if (allCountries[i].regDmnEnum == regionCode)
- {
- return allCountries[i].isoName;
- }
- }
- /* no matching item, return default */
- return allCountries[0].isoName;
+ u16_t i;
+
+ for (i = 0; i < N(allCountries); i++) {
+ if (allCountries[i].regDmnEnum == regionCode)
+ return allCountries[i].isoName;
+ }
+ /* no matching item, return default */
+ return allCountries[0].isoName;
}
-u16_t zfHpGetRegionCodeFromIsoName(zdev_t* dev, u8_t *countryIsoName)
+u16_t zfHpGetRegionCodeFromIsoName(zdev_t *dev, u8_t *countryIsoName)
{
- u16_t i;
- u16_t regionCode;
-
- /* if no matching item, return default */
- regionCode = DEF_REGDMN;
-
- for (i = 0; i < N(allCountries); i++)
- {
- if (zfMemoryIsEqual((u8_t *)allCountries[i].isoName, countryIsoName, 2))
- {
- regionCode = allCountries[i].regDmnEnum;
- break;
- }
- }
-
- return regionCode;
+ u16_t i;
+ u16_t regionCode;
+
+ /* if no matching item, return default */
+ regionCode = DEF_REGDMN;
+
+ for (i = 0; i < N(allCountries); i++) {
+ if (zfMemoryIsEqual((u8_t *)allCountries[i].isoName, countryIsoName, 2)) {
+ regionCode = allCountries[i].regDmnEnum;
+ break;
+ }
+ }
+
+ return regionCode;
}
/************************************************************************/
@@ -2029,323 +1980,294 @@ u16_t zfHpGetRegionCodeFromIsoName(zdev_t* dev, u8_t *countryIsoName)
/* Chao-Wen Yang ZyDAS Technology Corporation 2007.3 */
/* */
/************************************************************************/
-u16_t zfHpDeleteAllowChannel(zdev_t* dev, u16_t freq)
+u16_t zfHpDeleteAllowChannel(zdev_t *dev, u16_t freq)
{
- u16_t i, bandIndex = 0;
- u16_t dfs5GBand[][2] = {{5150, 5240}, {5260, 5350}, {5450, 5700}, {5725, 5825}};
-
- zmw_get_wlan_dev(dev);
- /* Find which band does this frequency belong */
- for (i = 0; i < 4; i++)
- {
- if ((freq >= dfs5GBand[i][0]) && (freq <= dfs5GBand[i][1]))
- bandIndex = i + 1;
- }
-
- if (bandIndex == 0)
- {
- /* 2.4G, don't care */
- return 0;
- }
- else
- {
- bandIndex--;
- }
- /* Set all channels in this band to passive scan */
- for (i = 0; i < wd->regulationTable.allowChannelCnt; i++)
- {
- if ((wd->regulationTable.allowChannel[i].channel >= dfs5GBand[bandIndex][0]) &&
- (wd->regulationTable.allowChannel[i].channel <= dfs5GBand[bandIndex][1]))
- {
- /* if channel is not passive, set it to be passive and mark it */
- if ((wd->regulationTable.allowChannel[i].channelFlags &
- ZM_REG_FLAG_CHANNEL_PASSIVE) == 0)
- {
- wd->regulationTable.allowChannel[i].channelFlags |=
- (ZM_REG_FLAG_CHANNEL_PASSIVE | ZM_REG_FLAG_CHANNEL_CSA);
- }
- }
- }
-
- return 0;
+ u16_t i, bandIndex = 0;
+ u16_t dfs5GBand[][2] = { {5150, 5240}, {5260, 5350}, {5450, 5700}, {5725, 5825} };
+
+ zmw_get_wlan_dev(dev);
+ /* Find which band does this frequency belong */
+ for (i = 0; i < 4; i++) {
+ if ((freq >= dfs5GBand[i][0]) && (freq <= dfs5GBand[i][1]))
+ bandIndex = i + 1;
+ }
+
+ if (bandIndex == 0) {
+ /* 2.4G, don't care */
+ return 0;
+ } else
+ bandIndex--;
+ /* Set all channels in this band to passive scan */
+ for (i = 0; i < wd->regulationTable.allowChannelCnt; i++) {
+ if ((wd->regulationTable.allowChannel[i].channel >= dfs5GBand[bandIndex][0]) &&
+ (wd->regulationTable.allowChannel[i].channel <= dfs5GBand[bandIndex][1])) {
+ /* if channel is not passive, set it to be passive and mark it */
+ if ((wd->regulationTable.allowChannel[i].channelFlags &
+ ZM_REG_FLAG_CHANNEL_PASSIVE) == 0) {
+ wd->regulationTable.allowChannel[i].channelFlags |=
+ (ZM_REG_FLAG_CHANNEL_PASSIVE | ZM_REG_FLAG_CHANNEL_CSA);
+ }
+ }
+ }
+
+ return 0;
}
-u16_t zfHpAddAllowChannel(zdev_t* dev, u16_t freq)
+u16_t zfHpAddAllowChannel(zdev_t *dev, u16_t freq)
{
- u16_t i, j, arrayIndex;
+ u16_t i, j, arrayIndex;
- zmw_get_wlan_dev(dev);
+ zmw_get_wlan_dev(dev);
- for (i = 0; i < wd->regulationTable.allowChannelCnt; i++)
- {
- if (wd->regulationTable.allowChannel[i].channel == freq)
- break;
- }
+ for (i = 0; i < wd->regulationTable.allowChannelCnt; i++) {
+ if (wd->regulationTable.allowChannel[i].channel == freq)
+ break;
+ }
- if ( i == wd->regulationTable.allowChannelCnt)
- {
- for (j = 0; j < wd->regulationTable.allowChannelCnt; j++)
- {
- if (wd->regulationTable.allowChannel[j].channel > freq)
- break;
- }
+ if (i == wd->regulationTable.allowChannelCnt) {
+ for (j = 0; j < wd->regulationTable.allowChannelCnt; j++) {
+ if (wd->regulationTable.allowChannel[j].channel > freq)
+ break;
+ }
- //zm_debug_msg1("CWY - add frequency = ", freq);
- //zm_debug_msg1("CWY - channel array index = ", j);
+ /* zm_debug_msg1("CWY - add frequency = ", freq);
+ zm_debug_msg1("CWY - channel array index = ", j); */
- arrayIndex = j;
+ arrayIndex = j;
- if (arrayIndex < wd->regulationTable.allowChannelCnt)
- {
- for (j = wd->regulationTable.allowChannelCnt; j > arrayIndex; j--)
- wd->regulationTable.allowChannel[j] = wd->regulationTable.allowChannel[j - 1];
- }
- wd->regulationTable.allowChannel[arrayIndex].channel = freq;
+ if (arrayIndex < wd->regulationTable.allowChannelCnt) {
+ for (j = wd->regulationTable.allowChannelCnt; j > arrayIndex; j--)
+ wd->regulationTable.allowChannel[j] = wd->regulationTable.allowChannel[j - 1];
+ }
+ wd->regulationTable.allowChannel[arrayIndex].channel = freq;
- wd->regulationTable.allowChannelCnt++;
- }
+ wd->regulationTable.allowChannelCnt++;
+ }
- return 0;
+ return 0;
}
-u16_t zfHpIsDfsChannelNCS(zdev_t* dev, u16_t freq)
+u16_t zfHpIsDfsChannelNCS(zdev_t *dev, u16_t freq)
{
- u8_t flag = ZM_REG_FLAG_CHANNEL_DFS;
- u16_t i;
- zmw_get_wlan_dev(dev);
-
- for (i = 0; i < wd->regulationTable.allowChannelCnt; i++)
- {
- //DbgPrint("DFS:freq=%d, chan=%d", freq, wd->regulationTable.allowChannel[i].channel);
- if (wd->regulationTable.allowChannel[i].channel == freq)
- {
- flag = wd->regulationTable.allowChannel[i].privFlags;
- break;
- }
- }
-
- return (flag & (ZM_REG_FLAG_CHANNEL_DFS|ZM_REG_FLAG_CHANNEL_DFS_CLEAR));
+ u8_t flag = ZM_REG_FLAG_CHANNEL_DFS;
+ u16_t i;
+ zmw_get_wlan_dev(dev);
+
+ for (i = 0; i < wd->regulationTable.allowChannelCnt; i++) {
+ /* DbgPrint("DFS:freq=%d, chan=%d", freq, wd->regulationTable.allowChannel[i].channel); */
+ if (wd->regulationTable.allowChannel[i].channel == freq) {
+ flag = wd->regulationTable.allowChannel[i].privFlags;
+ break; }
+ }
+
+ return flag & (ZM_REG_FLAG_CHANNEL_DFS|ZM_REG_FLAG_CHANNEL_DFS_CLEAR);
}
-u16_t zfHpIsDfsChannel(zdev_t* dev, u16_t freq)
+u16_t zfHpIsDfsChannel(zdev_t *dev, u16_t freq)
{
- u8_t flag = ZM_REG_FLAG_CHANNEL_DFS;
- u16_t i;
- zmw_get_wlan_dev(dev);
+ u8_t flag = ZM_REG_FLAG_CHANNEL_DFS;
+ u16_t i;
+ zmw_get_wlan_dev(dev);
- zmw_declare_for_critical_section();
+ zmw_declare_for_critical_section();
- zmw_enter_critical_section(dev);
+ zmw_enter_critical_section(dev);
- for (i = 0; i < wd->regulationTable.allowChannelCnt; i++)
- {
- //DbgPrint("DFS:freq=%d, chan=%d", freq, wd->regulationTable.allowChannel[i].channel);
- if (wd->regulationTable.allowChannel[i].channel == freq)
- {
- flag = wd->regulationTable.allowChannel[i].privFlags;
- break;
- }
- }
+ for (i = 0; i < wd->regulationTable.allowChannelCnt; i++) {
+ /* DbgPrint("DFS:freq=%d, chan=%d", freq, wd->regulationTable.allowChannel[i].channel); */
+ if (wd->regulationTable.allowChannel[i].channel == freq) {
+ flag = wd->regulationTable.allowChannel[i].privFlags;
+ break;
+ }
+ }
- zmw_leave_critical_section(dev);
+ zmw_leave_critical_section(dev);
- return (flag & (ZM_REG_FLAG_CHANNEL_DFS|ZM_REG_FLAG_CHANNEL_DFS_CLEAR));
+ return flag & (ZM_REG_FLAG_CHANNEL_DFS|ZM_REG_FLAG_CHANNEL_DFS_CLEAR);
}
-u16_t zfHpIsAllowedChannel(zdev_t* dev, u16_t freq)
+u16_t zfHpIsAllowedChannel(zdev_t *dev, u16_t freq)
{
- u16_t i;
- zmw_get_wlan_dev(dev);
-
- for (i = 0; i < wd->regulationTable.allowChannelCnt; i++)
- {
- if (wd->regulationTable.allowChannel[i].channel == freq)
- {
- return 1;
- }
- }
-
- return 0;
+ u16_t i;
+ zmw_get_wlan_dev(dev);
+
+ for (i = 0; i < wd->regulationTable.allowChannelCnt; i++) {
+ if (wd->regulationTable.allowChannel[i].channel == freq)
+ return 1;
+ }
+
+ return 0;
}
-u16_t zfHpFindFirstNonDfsChannel(zdev_t* dev, u16_t aBand)
+u16_t zfHpFindFirstNonDfsChannel(zdev_t *dev, u16_t aBand)
{
- u16_t chan = 2412;
- u16_t i;
- zmw_get_wlan_dev(dev);
-
- zmw_declare_for_critical_section();
-
- zmw_enter_critical_section(dev);
-
- for (i = 0; i < wd->regulationTable.allowChannelCnt; i++)
- {
- if ((wd->regulationTable.allowChannel[i].privFlags & ZM_REG_FLAG_CHANNEL_DFS) != 0)
- {
- if (aBand)
- {
- if (wd->regulationTable.allowChannel[i].channel > 3000)
- {
- chan = wd->regulationTable.allowChannel[i].channel;
- break;
- }
- }
- else
- {
- if (wd->regulationTable.allowChannel[i].channel < 3000)
- {
- chan = wd->regulationTable.allowChannel[i].channel;
- break;
- }
- }
- }
- }
-
- zmw_leave_critical_section(dev);
-
- return chan;
+ u16_t chan = 2412;
+ u16_t i;
+ zmw_get_wlan_dev(dev);
+
+ zmw_declare_for_critical_section();
+
+ zmw_enter_critical_section(dev);
+
+ for (i = 0; i < wd->regulationTable.allowChannelCnt; i++) {
+ if ((wd->regulationTable.allowChannel[i].privFlags & ZM_REG_FLAG_CHANNEL_DFS) != 0) {
+ if (aBand) {
+ if (wd->regulationTable.allowChannel[i].channel > 3000) {
+ chan = wd->regulationTable.allowChannel[i].channel;
+ break;
+ }
+ } else {
+ if (wd->regulationTable.allowChannel[i].channel < 3000) {
+ chan = wd->regulationTable.allowChannel[i].channel;
+ break;
+ }
+ }
+ }
+ }
+
+ zmw_leave_critical_section(dev);
+
+ return chan;
}
/* porting from ACU */
/* save RegulatoryDomain in hpriv */
-u8_t zfHpGetRegulatoryDomain(zdev_t* dev)
+u8_t zfHpGetRegulatoryDomain(zdev_t *dev)
{
- zmw_get_wlan_dev(dev);
-
- switch (wd->regulationTable.regionCode)
- {
- case NO_ENUMRD:
- return 0;
- break;
- case FCC1_FCCA:
- case FCC1_WORLD:
- case FCC4_FCCA:
- case FCC5_FCCA:
- case FCC2_WORLD:
- case FCC2_ETSIC:
- case FCC3_FCCA:
- case FCC3_WORLD:
- case FCC1:
- case FCC2:
- case FCC3:
- case FCC4:
- case FCC5:
- case FCCA:
- return 0x10;//WG_AMERICAS DOT11_REG_DOMAIN_FCC United States
- break;
-
- case FCC2_FCCA:
- return 0x20;//DOT11_REG_DOMAIN_DOC Canada
- break;
-
- case ETSI1_WORLD:
- case ETSI3_ETSIA:
- case ETSI2_WORLD:
- case ETSI3_WORLD:
- case ETSI4_WORLD:
- case ETSI4_ETSIC:
- case ETSI5_WORLD:
- case ETSI6_WORLD:
- case ETSI_RESERVED:
- case ETSI1:
- case ETSI2:
- case ETSI3:
- case ETSI4:
- case ETSI5:
- case ETSI6:
- case ETSIA:
- case ETSIB:
- case ETSIC:
- return 0x30;//WG_EMEA DOT11_REG_DOMAIN_ETSI Most of Europe
- break;
-
- case MKK1_MKKA:
- case MKK1_MKKB:
- case MKK2_MKKA:
- case MKK1_FCCA:
- case MKK1_MKKA1:
- case MKK1_MKKA2:
- case MKK1_MKKC:
- case MKK3_MKKB:
- case MKK3_MKKA2:
- case MKK3_MKKC:
- case MKK4_MKKB:
- case MKK4_MKKA2:
- case MKK4_MKKC:
- case MKK5_MKKB:
- case MKK5_MKKA2:
- case MKK5_MKKC:
- case MKK6_MKKB:
- case MKK6_MKKA2:
- case MKK6_MKKC:
- case MKK7_MKKB:
- case MKK7_MKKA:
- case MKK7_MKKC:
- case MKK8_MKKB:
- case MKK8_MKKA2:
- case MKK8_MKKC:
- case MKK6_MKKA1:
- case MKK6_FCCA:
- case MKK7_MKKA1:
- case MKK7_FCCA:
- case MKK9_FCCA:
- case MKK9_MKKA1:
- case MKK9_MKKC:
- case MKK9_MKKA2:
- case MKK10_FCCA:
- case MKK10_MKKA1:
- case MKK10_MKKC:
- case MKK10_MKKA2:
- case MKK11_MKKA:
- case MKK11_FCCA:
- case MKK11_MKKA1:
- case MKK11_MKKC:
- case MKK11_MKKA2:
- case MKK12_MKKA:
- case MKK12_FCCA:
- case MKK12_MKKA1:
- case MKK12_MKKC:
- case MKK12_MKKA2:
- case MKK3_MKKA:
- case MKK3_MKKA1:
- case MKK3_FCCA:
- case MKK4_MKKA:
- case MKK4_MKKA1:
- case MKK4_FCCA:
- case MKK9_MKKA:
- case MKK10_MKKA:
- case MKK1:
- case MKK2:
- case MKK3:
- case MKK4:
- case MKK5:
- case MKK6:
- case MKK7:
- case MKK8:
- case MKK9:
- case MKK10:
- case MKK11:
- case MKK12:
- case MKKA:
- case MKKC:
- return 0x40;//WG_JAPAN DOT11_REG_DOMAIN_MKK Japan
- break;
-
- default:
- break;
- }
- return 0xFF;// Didn't input RegDmn by mean to distinguish by customer
+ zmw_get_wlan_dev(dev);
+
+ switch (wd->regulationTable.regionCode) {
+ case NO_ENUMRD:
+ return 0;
+ break;
+ case FCC1_FCCA:
+ case FCC1_WORLD:
+ case FCC4_FCCA:
+ case FCC5_FCCA:
+ case FCC2_WORLD:
+ case FCC2_ETSIC:
+ case FCC3_FCCA:
+ case FCC3_WORLD:
+ case FCC1:
+ case FCC2:
+ case FCC3:
+ case FCC4:
+ case FCC5:
+ case FCCA:
+ return 0x10;/* WG_AMERICAS DOT11_REG_DOMAIN_FCC United States */
+ break;
+
+ case FCC2_FCCA:
+ return 0x20;/* DOT11_REG_DOMAIN_DOC Canada */
+ break;
+
+ case ETSI1_WORLD:
+ case ETSI3_ETSIA:
+ case ETSI2_WORLD:
+ case ETSI3_WORLD:
+ case ETSI4_WORLD:
+ case ETSI4_ETSIC:
+ case ETSI5_WORLD:
+ case ETSI6_WORLD:
+ case ETSI_RESERVED:
+ case ETSI1:
+ case ETSI2:
+ case ETSI3:
+ case ETSI4:
+ case ETSI5:
+ case ETSI6:
+ case ETSIA:
+ case ETSIB:
+ case ETSIC:
+ return 0x30;/* WG_EMEA DOT11_REG_DOMAIN_ETSI Most of Europe */
+ break;
+
+ case MKK1_MKKA:
+ case MKK1_MKKB:
+ case MKK2_MKKA:
+ case MKK1_FCCA:
+ case MKK1_MKKA1:
+ case MKK1_MKKA2:
+ case MKK1_MKKC:
+ case MKK3_MKKB:
+ case MKK3_MKKA2:
+ case MKK3_MKKC:
+ case MKK4_MKKB:
+ case MKK4_MKKA2:
+ case MKK4_MKKC:
+ case MKK5_MKKB:
+ case MKK5_MKKA2:
+ case MKK5_MKKC:
+ case MKK6_MKKB:
+ case MKK6_MKKA2:
+ case MKK6_MKKC:
+ case MKK7_MKKB:
+ case MKK7_MKKA:
+ case MKK7_MKKC:
+ case MKK8_MKKB:
+ case MKK8_MKKA2:
+ case MKK8_MKKC:
+ case MKK6_MKKA1:
+ case MKK6_FCCA:
+ case MKK7_MKKA1:
+ case MKK7_FCCA:
+ case MKK9_FCCA:
+ case MKK9_MKKA1:
+ case MKK9_MKKC:
+ case MKK9_MKKA2:
+ case MKK10_FCCA:
+ case MKK10_MKKA1:
+ case MKK10_MKKC:
+ case MKK10_MKKA2:
+ case MKK11_MKKA:
+ case MKK11_FCCA:
+ case MKK11_MKKA1:
+ case MKK11_MKKC:
+ case MKK11_MKKA2:
+ case MKK12_MKKA:
+ case MKK12_FCCA:
+ case MKK12_MKKA1:
+ case MKK12_MKKC:
+ case MKK12_MKKA2:
+ case MKK3_MKKA:
+ case MKK3_MKKA1:
+ case MKK3_FCCA:
+ case MKK4_MKKA:
+ case MKK4_MKKA1:
+ case MKK4_FCCA:
+ case MKK9_MKKA:
+ case MKK10_MKKA:
+ case MKK1:
+ case MKK2:
+ case MKK3:
+ case MKK4:
+ case MKK5:
+ case MKK6:
+ case MKK7:
+ case MKK8:
+ case MKK9:
+ case MKK10:
+ case MKK11:
+ case MKK12:
+ case MKKA:
+ case MKKC:
+ return 0x40;/* WG_JAPAN DOT11_REG_DOMAIN_MKK Japan */
+ break;
+
+ default:
+ break;
+ }
+ return 0xFF; /* Didn't input RegDmn by mean to distinguish by customer */
}
-
-void zfHpDisableDfsChannel(zdev_t* dev, u8_t disableFlag)
+void zfHpDisableDfsChannel(zdev_t *dev, u8_t disableFlag)
{
- struct zsHpPriv* hpPriv;
+ struct zsHpPriv *hpPriv;
- zmw_get_wlan_dev(dev);
- hpPriv=wd->hpPrivate;
- hpPriv->disableDfsCh = disableFlag;
- return;
+ zmw_get_wlan_dev(dev);
+ hpPriv = wd->hpPrivate;
+ hpPriv->disableDfsCh = disableFlag;
+ return;
}
diff --git a/drivers/staging/otus/ioctl.c b/drivers/staging/otus/ioctl.c
index 84be4b2cd69..a48c8e4a9ea 100644
--- a/drivers/staging/otus/ioctl.c
+++ b/drivers/staging/otus/ioctl.c
@@ -30,34 +30,34 @@
#include "usbdrv.h"
-#define ZD_IOCTL_WPA (SIOCDEVPRIVATE + 1)
-#define ZD_IOCTL_PARAM (SIOCDEVPRIVATE + 2)
-#define ZD_IOCTL_GETWPAIE (SIOCDEVPRIVATE + 3)
+#define ZD_IOCTL_WPA (SIOCDEVPRIVATE + 1)
+#define ZD_IOCTL_PARAM (SIOCDEVPRIVATE + 2)
+#define ZD_IOCTL_GETWPAIE (SIOCDEVPRIVATE + 3)
#ifdef ZM_ENABLE_CENC
-#define ZM_IOCTL_CENC (SIOCDEVPRIVATE + 4)
+#define ZM_IOCTL_CENC (SIOCDEVPRIVATE + 4)
#endif /* ZM_ENABLE_CENC */
-#define ZD_PARAM_ROAMING 0x0001
-#define ZD_PARAM_PRIVACY 0x0002
-#define ZD_PARAM_WPA 0x0003
+#define ZD_PARAM_ROAMING 0x0001
+#define ZD_PARAM_PRIVACY 0x0002
+#define ZD_PARAM_WPA 0x0003
#define ZD_PARAM_COUNTERMEASURES 0x0004
#define ZD_PARAM_DROPUNENCRYPTED 0x0005
-#define ZD_PARAM_AUTH_ALGS 0x0006
-#define ZD_PARAM_WPS_FILTER 0x0007
+#define ZD_PARAM_AUTH_ALGS 0x0006
+#define ZD_PARAM_WPS_FILTER 0x0007
#ifdef ZM_ENABLE_CENC
#define P80211_PACKET_CENCFLAG 0x0001
#endif /* ZM_ENABLE_CENC */
-#define P80211_PACKET_SETKEY 0x0003
+#define P80211_PACKET_SETKEY 0x0003
#define ZD_CMD_SET_ENCRYPT_KEY 0x0001
-#define ZD_CMD_SET_MLME 0x0002
-#define ZD_CMD_SCAN_REQ 0x0003
+#define ZD_CMD_SET_MLME 0x0002
+#define ZD_CMD_SCAN_REQ 0x0003
#define ZD_CMD_SET_GENERIC_ELEMENT 0x0004
-#define ZD_CMD_GET_TSC 0x0005
+#define ZD_CMD_GET_TSC 0x0005
#define ZD_CRYPT_ALG_NAME_LEN 16
-#define ZD_MAX_KEY_SIZE 32
-#define ZD_MAX_GENERIC_SIZE 64
+#define ZD_MAX_KEY_SIZE 32
+#define ZD_MAX_GENERIC_SIZE 64
#include <net/iw_handler.h>
@@ -867,6 +867,7 @@ int usbdrvwext_giwscan(struct net_device *dev,
char *current_ev = extra;
char *end_buf;
int i;
+ struct zsBssListV1 *pBssList;
/* BssList = wd->sta.pBssList; */
/* zmw_get_wlan_dev(dev); */
@@ -874,8 +875,10 @@ int usbdrvwext_giwscan(struct net_device *dev,
return 0;
/* struct zsBssList BssList; */
- struct zsBssListV1 *pBssList = kmalloc(sizeof(struct zsBssListV1),
- GFP_KERNEL);
+ pBssList = kmalloc(sizeof(struct zsBssListV1), GFP_KERNEL);
+ if (pBssList == NULL)
+ return -ENOMEM;
+
if (data->length == 0)
end_buf = extra + IW_SCAN_MAX_DATA;
else
diff --git a/drivers/staging/otus/usbdrv.c b/drivers/staging/otus/usbdrv.c
index 0ce65b5b945..165a82b9ab8 100644
--- a/drivers/staging/otus/usbdrv.c
+++ b/drivers/staging/otus/usbdrv.c
@@ -350,7 +350,8 @@ int usbdrv_open(struct net_device *dev)
}
size = zfiGlobalDataSize(dev);
- if ((mem = kmalloc(size, GFP_KERNEL)) == NULL)
+ mem = kmalloc(size, GFP_KERNEL);
+ if (mem == NULL)
{
rc = -EBUSY;
goto exit;
@@ -698,7 +699,8 @@ void usbdrv_remove1(struct pci_dev *pcid)
struct net_device *dev;
struct usbdrv_private *macp;
- if (!(dev = (struct net_device *) pci_get_drvdata(pcid)))
+ dev = (struct net_device *)pci_get_drvdata(pcid);
+ if (!dev)
return;
macp = dev->ml_priv;
diff --git a/drivers/staging/otus/wwrap.c b/drivers/staging/otus/wwrap.c
index a74f7eea56e..b02eb42cd79 100644
--- a/drivers/staging/otus/wwrap.c
+++ b/drivers/staging/otus/wwrap.c
@@ -956,7 +956,6 @@ int zfLnxCencSendMsg(struct sock *netlink_sk, u_int8_t *msg, int len)
/*ÌîдÊý¾Ý±¨Ïà¹ØÐÅÏ¢*/
nlh = NLMSG_PUT(skb, 0, 0, WAI_K_MSG, size-sizeof(*nlh));
pos = NLMSG_DATA(nlh);
- memset(pos, 0, len);
/*´«Êäµ½Óû§¿Õ¼äµÄÊý¾Ý*/
memcpy(pos, msg, len);
diff --git a/drivers/staging/otus/zdusb.c b/drivers/staging/otus/zdusb.c
index bb89d85a4c7..2c799a25029 100644
--- a/drivers/staging/otus/zdusb.c
+++ b/drivers/staging/otus/zdusb.c
@@ -95,16 +95,14 @@ static int zfLnxProbe(struct usb_interface *interface,
printk(KERN_NOTICE "USB 1.1 Host\n");
#endif
- if (!(macp = kmalloc(sizeof(struct usbdrv_private), GFP_KERNEL)))
+ macp = kzalloc(sizeof(struct usbdrv_private), GFP_KERNEL);
+ if (!macp)
{
printk(KERN_ERR "out of memory allocating device structure\n");
result = -ENOMEM;
goto fail;
}
- /* Zero the memory */
- memset(macp, 0, sizeof(struct usbdrv_private));
-
net = alloc_etherdev(0);
if (net == NULL)
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index 377884f3480..9ca0e9e2a96 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -57,7 +57,7 @@
#include <generated/utsrelease.h>
#include <linux/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/system.h>
#define LCD_MINOR 156
@@ -1906,12 +1906,11 @@ static struct logical_input *panel_bind_key(char *name, char *press,
{
struct logical_input *key;
- key = kmalloc(sizeof(struct logical_input), GFP_KERNEL);
+ key = kzalloc(sizeof(struct logical_input), GFP_KERNEL);
if (!key) {
printk(KERN_ERR "panel: not enough memory\n");
return NULL;
}
- memset(key, 0, sizeof(struct logical_input));
if (!input_name2mask(name, &key->mask, &key->value, &scan_mask_i,
&scan_mask_o))
return NULL;
diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
index 0c495eacb75..42783d7367e 100644
--- a/drivers/staging/phison/phison.c
+++ b/drivers/staging/phison/phison.c
@@ -23,7 +23,7 @@
#define PHISON_DEBUG
#define DRV_NAME "phison_e-box" /* #0003 */
-#define DRV_VERSION "0.91" /* #0003 */
+#define DRV_VERSION "0.91" /* #0003 */
#define PCI_VENDOR_ID_PHISON 0x1987
#define PCI_DEVICE_ID_PS5000 0x5000
@@ -56,7 +56,7 @@ static int phison_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
- .udma_mask = ATA_UDMA5,
+ .udma_mask = ATA_UDMA5,
.port_ops = &phison_ops,
};
diff --git a/drivers/staging/poch/Kconfig b/drivers/staging/poch/Kconfig
deleted file mode 100644
index b3b33b984a5..00000000000
--- a/drivers/staging/poch/Kconfig
+++ /dev/null
@@ -1,6 +0,0 @@
-config POCH
- tristate "Redrapids Pocket Change CardBus support"
- depends on PCI && UIO
- default N
- ---help---
- Enable support for Redrapids Pocket Change CardBus devices.
diff --git a/drivers/staging/poch/Makefile b/drivers/staging/poch/Makefile
deleted file mode 100644
index d2b96805cb9..00000000000
--- a/drivers/staging/poch/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_POCH) += poch.o
diff --git a/drivers/staging/poch/README b/drivers/staging/poch/README
deleted file mode 100644
index ac76ff969a2..00000000000
--- a/drivers/staging/poch/README
+++ /dev/null
@@ -1,136 +0,0 @@
-TODO:
- - Rx block size is limited to < 2048, hardware bug?
- - Group size is limited to < page size, kernel alloc/mmap API issues
- - test whether Tx is transmitting data from provided buffers
- - handle device unplug case
- - handle temperature above threshold
- - use bus address instead of physical address for DMA
- - support for snapshot mode
- - audit userspace interfaces
- - get reserved major/minor if needed
-
-Sample Code:
-
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <sys/mman.h>
-#include <sys/ioctl.h>
-#include <poll.h>
-#include <stdio.h>
-#include <error.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <stdint.h>
-
-#include <sysfs/libsysfs.h>
-
-#include <poch.h>
-
-struct pconsume {
- uint32_t * offsets;
- uint32_t nfetch;
- uint32_t nflush;
-};
-
-uint32_t offsets[10];
-
-void process_group(unsigned char *buf, uint32_t size)
-{
- uint16_t *buf16 = (uint16_t *)buf;
-
- printf("RX: %p %u %04x %04x %04x %04x %04x %04x\n", buf, size,
- buf16[0], buf16[1], buf16[2], buf16[3], buf16[4], buf16[5]);
-}
-
-int main()
-{
- struct sysfs_attribute *attr;
- char *path;
- int ret;
- unsigned long mmap_size;
- int fd;
- unsigned char *cbuf;
-
- uint32_t nflush;
- struct pollfd poll_fds;
- int count = 0;
- int i;
-
- path = "/sys/class/pocketchange/poch0/ch0/block_size";
- attr = sysfs_open_attribute(path);
- ret = sysfs_write_attribute(attr, "256", strlen("256"));
- if (ret == -1)
- error(1, errno, "error writing attribute %s", path);
- sysfs_close_attribute(attr);
-
- path = "/sys/class/pocketchange/poch0/ch0/group_size";
- attr = sysfs_open_attribute(path);
- ret = sysfs_write_attribute(attr, "4096", strlen("4096"));
- if (ret == -1)
- error(1, errno, "error writing attribute %s", path);
- sysfs_close_attribute(attr);
-
- path = "/sys/class/pocketchange/poch0/ch0/group_count";
- attr = sysfs_open_attribute(path);
- ret = sysfs_write_attribute(attr, "64", strlen("64"));
- if (ret == -1)
- error(1, errno, "error writing attribute %s", path);
- sysfs_close_attribute(attr);
-
- fd = open("/dev/ch0", O_RDWR);
- if (fd == -1)
- error(1, errno, "error opening device node");
-
- path = "/sys/class/pocketchange/poch0/ch0/mmap_size";
- attr = sysfs_open_attribute(path);
- ret = sysfs_read_attribute(attr);
- if (ret == -1)
- error(1, errno, "error reading attribute %s", path);
- printf("%s", attr->value);
- sscanf(attr->value, "%lu", &mmap_size);
- sysfs_close_attribute(attr);
-
- cbuf = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
- MAP_PRIVATE, fd, 0);
- if (cbuf == MAP_FAILED)
- error(1, errno, "error mapping DMA buffers");
-
- ret = ioctl(fd, POCH_IOC_TRANSFER_START, 0);
- if (ret == -1)
- error(1, errno, "error starting transfer");
-
- nflush = 0;
- while (1) {
- struct pconsume consume;
-
- consume.offsets = offsets;
- consume.nfetch = 10;
- consume.nflush = nflush;
-
- ret = ioctl(fd, POCH_IOC_CONSUME, &consume);
- if (ret == -1)
- error(1, errno, "error consuming groups");
-
- nflush = consume.nfetch;
-
- for (i = 0; i < nflush; i++) {
- process_group(cbuf + consume.offsets[i], 4096);
-
- count++;
- if (count == 1000)
- break;
- }
-
- if (count == 1000)
- break;
- }
-
- ret = ioctl(fd, POCH_IOC_TRANSFER_STOP, 0);
- if (ret == -1)
- error(1, errno, "error starting transfer");
-
- return 0;
-}
-
-Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
-Vijay Kumar <vijaykumar@bravegnu.org> and Jaya Kumar <jayakumar.lkml@gmail.com>
diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c
deleted file mode 100644
index f940a34c1a0..00000000000
--- a/drivers/staging/poch/poch.c
+++ /dev/null
@@ -1,1443 +0,0 @@
-/*
- * User-space DMA and UIO based Redrapids Pocket Change CardBus driver
- *
- * Copyright 2008 Vijay Kumar <vijaykumar@bravegnu.org>
- *
- * Licensed under GPL version 2 only.
- */
-
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/uio_driver.h>
-#include <linux/spinlock.h>
-#include <linux/cdev.h>
-#include <linux/delay.h>
-#include <linux/sysfs.h>
-#include <linux/poll.h>
-#include <linux/idr.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/ioctl.h>
-#include <linux/io.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-
-#include "poch.h"
-
-#include <asm/cacheflush.h>
-
-#ifndef PCI_VENDOR_ID_RRAPIDS
-#define PCI_VENDOR_ID_RRAPIDS 0x17D2
-#endif
-
-#ifndef PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE
-#define PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE 0x0351
-#endif
-
-#define POCH_NCHANNELS 2
-
-#define MAX_POCH_CARDS 8
-#define MAX_POCH_DEVICES (MAX_POCH_CARDS * POCH_NCHANNELS)
-
-#define DRV_NAME "poch"
-#define PFX DRV_NAME ": "
-
-/*
- * BAR0 Bridge Register Definitions
- */
-
-#define BRIDGE_REV_REG 0x0
-#define BRIDGE_INT_MASK_REG 0x4
-#define BRIDGE_INT_STAT_REG 0x8
-
-#define BRIDGE_INT_ACTIVE (0x1 << 31)
-#define BRIDGE_INT_FPGA (0x1 << 2)
-#define BRIDGE_INT_TEMP_FAIL (0x1 << 1)
-#define BRIDGE_INT_TEMP_WARN (0x1 << 0)
-
-#define BRIDGE_FPGA_RESET_REG 0xC
-
-#define BRIDGE_CARD_POWER_REG 0x10
-#define BRIDGE_CARD_POWER_EN (0x1 << 0)
-#define BRIDGE_CARD_POWER_PROG_DONE (0x1 << 31)
-
-#define BRIDGE_JTAG_REG 0x14
-#define BRIDGE_DMA_GO_REG 0x18
-#define BRIDGE_STAT_0_REG 0x1C
-#define BRIDGE_STAT_1_REG 0x20
-#define BRIDGE_STAT_2_REG 0x24
-#define BRIDGE_STAT_3_REG 0x28
-#define BRIDGE_TEMP_STAT_REG 0x2C
-#define BRIDGE_TEMP_THRESH_REG 0x30
-#define BRIDGE_EEPROM_REVSEL_REG 0x34
-#define BRIDGE_CIS_STRUCT_REG 0x100
-#define BRIDGE_BOARDREV_REG 0x124
-
-/*
- * BAR1 FPGA Register Definitions
- */
-
-#define FPGA_IFACE_REV_REG 0x0
-#define FPGA_RX_BLOCK_SIZE_REG 0x8
-#define FPGA_TX_BLOCK_SIZE_REG 0xC
-#define FPGA_RX_BLOCK_COUNT_REG 0x10
-#define FPGA_TX_BLOCK_COUNT_REG 0x14
-#define FPGA_RX_CURR_DMA_BLOCK_REG 0x18
-#define FPGA_TX_CURR_DMA_BLOCK_REG 0x1C
-#define FPGA_RX_GROUP_COUNT_REG 0x20
-#define FPGA_TX_GROUP_COUNT_REG 0x24
-#define FPGA_RX_CURR_GROUP_REG 0x28
-#define FPGA_TX_CURR_GROUP_REG 0x2C
-#define FPGA_RX_CURR_PCI_REG 0x38
-#define FPGA_TX_CURR_PCI_REG 0x3C
-#define FPGA_RX_GROUP0_START_REG 0x40
-#define FPGA_TX_GROUP0_START_REG 0xC0
-#define FPGA_DMA_DESC_1_REG 0x140
-#define FPGA_DMA_DESC_2_REG 0x144
-#define FPGA_DMA_DESC_3_REG 0x148
-#define FPGA_DMA_DESC_4_REG 0x14C
-
-#define FPGA_DMA_INT_STAT_REG 0x150
-#define FPGA_DMA_INT_MASK_REG 0x154
-#define FPGA_DMA_INT_RX (1 << 0)
-#define FPGA_DMA_INT_TX (1 << 1)
-
-#define FPGA_RX_GROUPS_PER_INT_REG 0x158
-#define FPGA_TX_GROUPS_PER_INT_REG 0x15C
-#define FPGA_DMA_ADR_PAGE_REG 0x160
-#define FPGA_FPGA_REV_REG 0x200
-
-#define FPGA_ADC_CLOCK_CTL_REG 0x204
-#define FPGA_ADC_CLOCK_CTL_OSC_EN (0x1 << 3)
-#define FPGA_ADC_CLOCK_LOCAL_CLK (0x1 | FPGA_ADC_CLOCK_CTL_OSC_EN)
-#define FPGA_ADC_CLOCK_EXT_SAMP_CLK 0X0
-
-#define FPGA_ADC_DAC_EN_REG 0x208
-#define FPGA_ADC_DAC_EN_DAC_OFF (0x1 << 1)
-#define FPGA_ADC_DAC_EN_ADC_OFF (0x1 << 0)
-
-#define FPGA_INT_STAT_REG 0x20C
-#define FPGA_INT_MASK_REG 0x210
-#define FPGA_INT_PLL_UNLOCKED (0x1 << 9)
-#define FPGA_INT_DMA_CORE (0x1 << 8)
-#define FPGA_INT_TX_FF_EMPTY (0x1 << 7)
-#define FPGA_INT_RX_FF_EMPTY (0x1 << 6)
-#define FPGA_INT_TX_FF_OVRFLW (0x1 << 3)
-#define FPGA_INT_RX_FF_OVRFLW (0x1 << 2)
-#define FPGA_INT_TX_ACQ_DONE (0x1 << 1)
-#define FPGA_INT_RX_ACQ_DONE (0x1)
-
-#define FPGA_RX_CTL_REG 0x214
-#define FPGA_RX_CTL_FIFO_FLUSH (0x1 << 9)
-#define FPGA_RX_CTL_SYNTH_DATA (0x1 << 8)
-#define FPGA_RX_CTL_CONT_CAP (0x0 << 1)
-#define FPGA_RX_CTL_SNAP_CAP (0x1 << 1)
-
-#define FPGA_RX_ARM_REG 0x21C
-
-#define FPGA_DOM_REG 0x224
-#define FPGA_DOM_DCM_RESET (0x1 << 5)
-#define FPGA_DOM_SOFT_RESET (0x1 << 4)
-#define FPGA_DOM_DUAL_M_SG_DMA (0x0)
-#define FPGA_DOM_TARGET_ACCESS (0x1)
-
-#define FPGA_TX_CTL_REG 0x228
-#define FPGA_TX_CTL_FIFO_FLUSH (0x1 << 9)
-#define FPGA_TX_CTL_OUTPUT_ZERO (0x0 << 2)
-#define FPGA_TX_CTL_OUTPUT_CARDBUS (0x1 << 2)
-#define FPGA_TX_CTL_OUTPUT_ADC (0x2 << 2)
-#define FPGA_TX_CTL_OUTPUT_SNAPSHOT (0x3 << 2)
-#define FPGA_TX_CTL_LOOPBACK (0x1 << 0)
-
-#define FPGA_ENDIAN_MODE_REG 0x22C
-#define FPGA_RX_FIFO_COUNT_REG 0x28C
-#define FPGA_TX_ENABLE_REG 0x298
-#define FPGA_TX_TRIGGER_REG 0x29C
-#define FPGA_TX_DATAMEM_COUNT_REG 0x2A8
-#define FPGA_CAP_FIFO_REG 0x300
-#define FPGA_TX_SNAPSHOT_REG 0x8000
-
-/*
- * Channel Index Definitions
- */
-
-enum {
- CHNO_RX_CHANNEL,
- CHNO_TX_CHANNEL,
-};
-
-struct poch_dev;
-
-enum channel_dir {
- CHANNEL_DIR_RX,
- CHANNEL_DIR_TX,
-};
-
-struct poch_group_info {
- struct page *pg;
- dma_addr_t dma_addr;
- unsigned long user_offset;
-};
-
-struct channel_info {
- unsigned int chno;
-
- atomic_t sys_block_size;
- atomic_t sys_group_size;
- atomic_t sys_group_count;
-
- enum channel_dir dir;
-
- unsigned long block_size;
- unsigned long group_size;
- unsigned long group_count;
-
- /* Contains the DMA address and VM offset of each group. */
- struct poch_group_info *groups;
-
- /* Contains the header and circular buffer exported to userspace. */
- spinlock_t group_offsets_lock;
-
- /* Last group consumed by user space. */
- unsigned int consumed;
- /* Last group indicated as 'complete' to user space. */
- unsigned int transfer;
-
- wait_queue_head_t wq;
-
- union {
- unsigned int data_available;
- unsigned int space_available;
- };
-
- void __iomem *bridge_iomem;
- void __iomem *fpga_iomem;
- spinlock_t *iomem_lock;
-
- atomic_t free;
- atomic_t inited;
-
- /* Error counters */
- struct poch_counters counters;
- spinlock_t counters_lock;
-
- struct device *dev;
-};
-
-struct poch_dev {
- struct uio_info uio;
- struct pci_dev *pci_dev;
- unsigned int nchannels;
- struct channel_info channels[POCH_NCHANNELS];
- struct cdev cdev;
-
- /* Counts the no. of channels that have been opened. On first
- * open, the card is powered on. On last channel close, the
- * card is powered off.
- */
- atomic_t usage;
-
- void __iomem *bridge_iomem;
- void __iomem *fpga_iomem;
- spinlock_t iomem_lock;
-
- struct device *dev;
-};
-
-static int synth_rx;
-module_param(synth_rx, bool, 0600);
-MODULE_PARM_DESC(synth_rx,
- "Synthesize received values using a counter. Default: No");
-
-static int loopback;
-module_param(loopback, bool, 0600);
-MODULE_PARM_DESC(loopback,
- "Enable hardware loopback of trasnmitted data. Default: No");
-
-static dev_t poch_first_dev;
-static struct class *poch_cls;
-static DEFINE_IDR(poch_ids);
-
-static ssize_t store_block_size(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct channel_info *channel = dev_get_drvdata(dev);
- unsigned long block_size;
-
- sscanf(buf, "%lu", &block_size);
- atomic_set(&channel->sys_block_size, block_size);
-
- return count;
-}
-static DEVICE_ATTR(block_size, S_IWUSR|S_IWGRP, NULL, store_block_size);
-
-static ssize_t store_group_size(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct channel_info *channel = dev_get_drvdata(dev);
- unsigned long group_size;
-
- sscanf(buf, "%lu", &group_size);
- atomic_set(&channel->sys_group_size, group_size);
-
- return count;
-}
-static DEVICE_ATTR(group_size, S_IWUSR|S_IWGRP, NULL, store_group_size);
-
-static ssize_t store_group_count(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct channel_info *channel = dev_get_drvdata(dev);
- unsigned long group_count;
-
- sscanf(buf, "%lu", &group_count);
- atomic_set(&channel->sys_group_count, group_count);
-
- return count;
-}
-static DEVICE_ATTR(group_count, S_IWUSR|S_IWGRP, NULL, store_group_count);
-
-static ssize_t show_direction(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct channel_info *channel = dev_get_drvdata(dev);
- int len;
-
- len = sprintf(buf, "%s\n", (channel->dir ? "tx" : "rx"));
- return len;
-}
-static DEVICE_ATTR(dir, S_IRUSR|S_IRGRP, show_direction, NULL);
-
-static unsigned long npages(unsigned long bytes)
-{
- if (bytes % PAGE_SIZE == 0)
- return bytes / PAGE_SIZE;
- else
- return (bytes / PAGE_SIZE) + 1;
-}
-
-static ssize_t show_mmap_size(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct channel_info *channel = dev_get_drvdata(dev);
- int len;
- unsigned long mmap_size;
- unsigned long group_pages;
- unsigned long total_group_pages;
-
- group_pages = npages(channel->group_size);
- total_group_pages = group_pages * channel->group_count;
-
- mmap_size = total_group_pages * PAGE_SIZE;
- len = sprintf(buf, "%lu\n", mmap_size);
- return len;
-}
-static DEVICE_ATTR(mmap_size, S_IRUSR|S_IRGRP, show_mmap_size, NULL);
-
-static struct device_attribute *poch_class_attrs[] = {
- &dev_attr_block_size,
- &dev_attr_group_size,
- &dev_attr_group_count,
- &dev_attr_dir,
- &dev_attr_mmap_size,
-};
-
-static void poch_channel_free_groups(struct channel_info *channel)
-{
- unsigned long i;
-
- for (i = 0; i < channel->group_count; i++) {
- struct poch_group_info *group;
- unsigned int order;
-
- group = &channel->groups[i];
- order = get_order(channel->group_size);
- if (group->pg)
- __free_pages(group->pg, order);
- }
-}
-
-static int poch_channel_alloc_groups(struct channel_info *channel)
-{
- unsigned long i;
- unsigned long group_pages;
-
- group_pages = npages(channel->group_size);
-
- for (i = 0; i < channel->group_count; i++) {
- struct poch_group_info *group;
- unsigned int order;
- gfp_t gfp_mask;
-
- group = &channel->groups[i];
- order = get_order(channel->group_size);
-
- /*
- * __GFP_COMP is required here since we are going to
- * perform non-linear mapping to userspace. For more
- * information read the vm_insert_page() function
- * comments.
- */
-
- gfp_mask = GFP_KERNEL | GFP_DMA32 | __GFP_ZERO;
- group->pg = alloc_pages(gfp_mask, order);
- if (!group->pg) {
- poch_channel_free_groups(channel);
- return -ENOMEM;
- }
-
- /* FIXME: This is the physical address not the bus
- * address! This won't work in architectures that
- * have an IOMMU. Can we use pci_map_single() for
- * this?
- */
- group->dma_addr = page_to_pfn(group->pg) * PAGE_SIZE;
- group->user_offset = (i * group_pages) * PAGE_SIZE;
-
- printk(KERN_INFO PFX "%ld: user_offset: 0x%lx\n", i,
- group->user_offset);
- }
-
- return 0;
-}
-
-static int channel_latch_attr(struct channel_info *channel)
-{
- channel->group_count = atomic_read(&channel->sys_group_count);
- channel->group_size = atomic_read(&channel->sys_group_size);
- channel->block_size = atomic_read(&channel->sys_block_size);
-
- if (channel->group_count == 0) {
- printk(KERN_ERR PFX "invalid group count %lu",
- channel->group_count);
- return -EINVAL;
- }
-
- if (channel->group_size == 0 ||
- channel->group_size < channel->block_size) {
- printk(KERN_ERR PFX "invalid group size %lu",
- channel->group_size);
- return -EINVAL;
- }
-
- if (channel->block_size == 0 || (channel->block_size % 8) != 0) {
- printk(KERN_ERR PFX "invalid block size %lu",
- channel->block_size);
- return -EINVAL;
- }
-
- if (channel->group_size % channel->block_size != 0) {
- printk(KERN_ERR PFX
- "group size should be multiple of block size");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * Configure DMA group registers
- */
-static void channel_dma_init(struct channel_info *channel)
-{
- void __iomem *fpga = channel->fpga_iomem;
- u32 group_regs_base;
- u32 group_reg;
- unsigned int page;
- unsigned int group_in_page;
- unsigned long i;
- u32 block_size_reg;
- u32 block_count_reg;
- u32 group_count_reg;
- u32 groups_per_int_reg;
- u32 curr_pci_reg;
-
- if (channel->chno == CHNO_RX_CHANNEL) {
- group_regs_base = FPGA_RX_GROUP0_START_REG;
- block_size_reg = FPGA_RX_BLOCK_SIZE_REG;
- block_count_reg = FPGA_RX_BLOCK_COUNT_REG;
- group_count_reg = FPGA_RX_GROUP_COUNT_REG;
- groups_per_int_reg = FPGA_RX_GROUPS_PER_INT_REG;
- curr_pci_reg = FPGA_RX_CURR_PCI_REG;
- } else {
- group_regs_base = FPGA_TX_GROUP0_START_REG;
- block_size_reg = FPGA_TX_BLOCK_SIZE_REG;
- block_count_reg = FPGA_TX_BLOCK_COUNT_REG;
- group_count_reg = FPGA_TX_GROUP_COUNT_REG;
- groups_per_int_reg = FPGA_TX_GROUPS_PER_INT_REG;
- curr_pci_reg = FPGA_TX_CURR_PCI_REG;
- }
-
- printk(KERN_WARNING "block_size, group_size, group_count\n");
- /*
- * Block size is represented in no. of 64 bit transfers.
- */
- iowrite32(channel->block_size / 8, fpga + block_size_reg);
- iowrite32(channel->group_size / channel->block_size,
- fpga + block_count_reg);
- iowrite32(channel->group_count, fpga + group_count_reg);
- /* FIXME: Hardcoded groups per int. Get it from sysfs? */
- iowrite32(16, fpga + groups_per_int_reg);
-
- /* Unlock PCI address? Not defined in the data sheet, but used
- * in the reference code by Redrapids.
- */
- iowrite32(0x1, fpga + curr_pci_reg);
-
- /* The DMA address page register is shared between the RX and
- * TX channels, so acquire lock.
- */
- for (i = 0; i < channel->group_count; i++) {
- page = i / 32;
- group_in_page = i % 32;
-
- group_reg = group_regs_base + (group_in_page * 4);
-
- spin_lock(channel->iomem_lock);
- iowrite32(page, fpga + FPGA_DMA_ADR_PAGE_REG);
- iowrite32(channel->groups[i].dma_addr, fpga + group_reg);
- spin_unlock(channel->iomem_lock);
- }
-
- for (i = 0; i < channel->group_count; i++) {
- page = i / 32;
- group_in_page = i % 32;
-
- group_reg = group_regs_base + (group_in_page * 4);
-
- spin_lock(channel->iomem_lock);
- iowrite32(page, fpga + FPGA_DMA_ADR_PAGE_REG);
- printk(KERN_INFO PFX "%ld: read dma_addr: 0x%x\n", i,
- ioread32(fpga + group_reg));
- spin_unlock(channel->iomem_lock);
- }
-
-}
-
-static void __poch_channel_clear_counters(struct channel_info *channel)
-{
- channel->counters.pll_unlock = 0;
- channel->counters.fifo_empty = 0;
- channel->counters.fifo_overflow = 0;
-}
-
-static int poch_channel_init(struct channel_info *channel,
- struct poch_dev *poch_dev)
-{
- struct pci_dev *pdev = poch_dev->pci_dev;
- struct device *dev = &pdev->dev;
- unsigned long alloc_size;
- int ret;
-
- printk(KERN_WARNING "channel_latch_attr\n");
-
- ret = channel_latch_attr(channel);
- if (ret != 0)
- goto out;
-
- channel->consumed = 0;
- channel->transfer = 0;
-
- /* Allocate memory to hold group information. */
- alloc_size = channel->group_count * sizeof(struct poch_group_info);
- channel->groups = kzalloc(alloc_size, GFP_KERNEL);
- if (!channel->groups) {
- dev_err(dev, "error allocating memory for group info\n");
- ret = -ENOMEM;
- goto out;
- }
-
- printk(KERN_WARNING "poch_channel_alloc_groups\n");
-
- ret = poch_channel_alloc_groups(channel);
- if (ret) {
- dev_err(dev, "error allocating groups of order %d\n",
- get_order(channel->group_size));
- goto out_free_group_info;
- }
-
- channel->fpga_iomem = poch_dev->fpga_iomem;
- channel->bridge_iomem = poch_dev->bridge_iomem;
- channel->iomem_lock = &poch_dev->iomem_lock;
- spin_lock_init(&channel->counters_lock);
-
- __poch_channel_clear_counters(channel);
-
- return 0;
-
- out_free_group_info:
- kfree(channel->groups);
- out:
- return ret;
-}
-
-static int poch_wait_fpga_prog(void __iomem *bridge)
-{
- unsigned long total_wait;
- const unsigned long wait_period = 100;
- /* FIXME: Get the actual timeout */
- const unsigned long prog_timeo = 10000; /* 10 Seconds */
- u32 card_power;
-
- printk(KERN_WARNING "poch_wait_fpg_prog\n");
-
- printk(KERN_INFO PFX "programming fpga ...\n");
- total_wait = 0;
- while (1) {
- msleep(wait_period);
- total_wait += wait_period;
-
- card_power = ioread32(bridge + BRIDGE_CARD_POWER_REG);
- if (card_power & BRIDGE_CARD_POWER_PROG_DONE) {
- printk(KERN_INFO PFX "programming done\n");
- return 0;
- }
- if (total_wait > prog_timeo) {
- printk(KERN_ERR PFX
- "timed out while programming FPGA\n");
- return -EIO;
- }
- }
-}
-
-static void poch_card_power_off(struct poch_dev *poch_dev)
-{
- void __iomem *bridge = poch_dev->bridge_iomem;
- u32 card_power;
-
- iowrite32(0, bridge + BRIDGE_INT_MASK_REG);
- iowrite32(0, bridge + BRIDGE_DMA_GO_REG);
-
- card_power = ioread32(bridge + BRIDGE_CARD_POWER_REG);
- iowrite32(card_power & ~BRIDGE_CARD_POWER_EN,
- bridge + BRIDGE_CARD_POWER_REG);
-}
-
-enum clk_src {
- CLK_SRC_ON_BOARD,
- CLK_SRC_EXTERNAL
-};
-
-static void poch_card_clock_on(void __iomem *fpga)
-{
- /* FIXME: Get this data through sysfs? */
- enum clk_src clk_src = CLK_SRC_ON_BOARD;
-
- if (clk_src == CLK_SRC_ON_BOARD) {
- iowrite32(FPGA_ADC_CLOCK_LOCAL_CLK | FPGA_ADC_CLOCK_CTL_OSC_EN,
- fpga + FPGA_ADC_CLOCK_CTL_REG);
- } else if (clk_src == CLK_SRC_EXTERNAL) {
- iowrite32(FPGA_ADC_CLOCK_EXT_SAMP_CLK,
- fpga + FPGA_ADC_CLOCK_CTL_REG);
- }
-}
-
-static int poch_card_power_on(struct poch_dev *poch_dev)
-{
- void __iomem *bridge = poch_dev->bridge_iomem;
- void __iomem *fpga = poch_dev->fpga_iomem;
-
- iowrite32(BRIDGE_CARD_POWER_EN, bridge + BRIDGE_CARD_POWER_REG);
-
- if (poch_wait_fpga_prog(bridge) != 0) {
- poch_card_power_off(poch_dev);
- return -EIO;
- }
-
- poch_card_clock_on(fpga);
-
- /* Sync to new clock, reset state machines, set DMA mode. */
- iowrite32(FPGA_DOM_DCM_RESET | FPGA_DOM_SOFT_RESET
- | FPGA_DOM_DUAL_M_SG_DMA, fpga + FPGA_DOM_REG);
-
- /* FIXME: The time required for sync. needs to be tuned. */
- msleep(1000);
-
- return 0;
-}
-
-static void poch_channel_analog_on(struct channel_info *channel)
-{
- void __iomem *fpga = channel->fpga_iomem;
- u32 adc_dac_en;
-
- spin_lock(channel->iomem_lock);
- adc_dac_en = ioread32(fpga + FPGA_ADC_DAC_EN_REG);
- switch (channel->chno) {
- case CHNO_RX_CHANNEL:
- iowrite32(adc_dac_en & ~FPGA_ADC_DAC_EN_ADC_OFF,
- fpga + FPGA_ADC_DAC_EN_REG);
- break;
- case CHNO_TX_CHANNEL:
- iowrite32(adc_dac_en & ~FPGA_ADC_DAC_EN_DAC_OFF,
- fpga + FPGA_ADC_DAC_EN_REG);
- break;
- }
- spin_unlock(channel->iomem_lock);
-}
-
-static int poch_open(struct inode *inode, struct file *filp)
-{
- struct poch_dev *poch_dev;
- struct channel_info *channel;
- void __iomem *bridge;
- void __iomem *fpga;
- int chno;
- int usage;
- int ret;
-
- poch_dev = container_of(inode->i_cdev, struct poch_dev, cdev);
- bridge = poch_dev->bridge_iomem;
- fpga = poch_dev->fpga_iomem;
-
- chno = iminor(inode) % poch_dev->nchannels;
- channel = &poch_dev->channels[chno];
-
- if (!atomic_dec_and_test(&channel->free)) {
- atomic_inc(&channel->free);
- ret = -EBUSY;
- goto out;
- }
-
- usage = atomic_inc_return(&poch_dev->usage);
-
- printk(KERN_WARNING "poch_card_power_on\n");
-
- if (usage == 1) {
- ret = poch_card_power_on(poch_dev);
- if (ret)
- goto out_dec_usage;
- }
-
- printk(KERN_INFO "CardBus Bridge Revision: %x\n",
- ioread32(bridge + BRIDGE_REV_REG));
- printk(KERN_INFO "CardBus Interface Revision: %x\n",
- ioread32(fpga + FPGA_IFACE_REV_REG));
-
- channel->chno = chno;
- filp->private_data = channel;
-
- printk(KERN_WARNING "poch_channel_init\n");
-
- ret = poch_channel_init(channel, poch_dev);
- if (ret)
- goto out_power_off;
-
- poch_channel_analog_on(channel);
-
- printk(KERN_WARNING "channel_dma_init\n");
-
- channel_dma_init(channel);
-
- printk(KERN_WARNING "poch_channel_analog_on\n");
-
- if (usage == 1) {
- printk(KERN_WARNING "setting up DMA\n");
-
- /* Initialize DMA Controller. */
- iowrite32(FPGA_CAP_FIFO_REG, bridge + BRIDGE_STAT_2_REG);
- iowrite32(FPGA_DMA_DESC_1_REG, bridge + BRIDGE_STAT_3_REG);
-
- ioread32(fpga + FPGA_DMA_INT_STAT_REG);
- ioread32(fpga + FPGA_INT_STAT_REG);
- ioread32(bridge + BRIDGE_INT_STAT_REG);
-
- /* Initialize Interrupts. FIXME: Enable temperature
- * handling We are enabling both Tx and Rx channel
- * interrupts here. Do we need to enable interrupts
- * only for the current channel? Anyways we won't get
- * the interrupt unless the DMA is activated.
- */
- iowrite32(BRIDGE_INT_FPGA, bridge + BRIDGE_INT_MASK_REG);
- iowrite32(FPGA_INT_DMA_CORE
- | FPGA_INT_PLL_UNLOCKED
- | FPGA_INT_TX_FF_EMPTY
- | FPGA_INT_RX_FF_EMPTY
- | FPGA_INT_TX_FF_OVRFLW
- | FPGA_INT_RX_FF_OVRFLW,
- fpga + FPGA_INT_MASK_REG);
- iowrite32(FPGA_DMA_INT_RX | FPGA_DMA_INT_TX,
- fpga + FPGA_DMA_INT_MASK_REG);
- }
-
- if (channel->dir == CHANNEL_DIR_TX) {
- /* Flush TX FIFO and output data from cardbus. */
- u32 ctl_val = 0;
-
- ctl_val |= FPGA_TX_CTL_FIFO_FLUSH;
- ctl_val |= FPGA_TX_CTL_OUTPUT_CARDBUS;
- if (loopback)
- ctl_val |= FPGA_TX_CTL_LOOPBACK;
-
- iowrite32(ctl_val, fpga + FPGA_TX_CTL_REG);
- } else {
- /* Flush RX FIFO and output data to cardbus. */
- u32 ctl_val = FPGA_RX_CTL_CONT_CAP | FPGA_RX_CTL_FIFO_FLUSH;
- if (synth_rx)
- ctl_val |= FPGA_RX_CTL_SYNTH_DATA;
-
- iowrite32(ctl_val, fpga + FPGA_RX_CTL_REG);
- }
-
- atomic_inc(&channel->inited);
-
- return 0;
-
- out_power_off:
- if (usage == 1)
- poch_card_power_off(poch_dev);
- out_dec_usage:
- atomic_dec(&poch_dev->usage);
- atomic_inc(&channel->free);
- out:
- return ret;
-}
-
-static int poch_release(struct inode *inode, struct file *filp)
-{
- struct channel_info *channel = filp->private_data;
- struct poch_dev *poch_dev;
- int usage;
-
- poch_dev = container_of(inode->i_cdev, struct poch_dev, cdev);
-
- usage = atomic_dec_return(&poch_dev->usage);
- if (usage == 0) {
- printk(KERN_WARNING "poch_card_power_off\n");
- poch_card_power_off(poch_dev);
- }
-
- atomic_dec(&channel->inited);
- poch_channel_free_groups(channel);
- kfree(channel->groups);
- atomic_inc(&channel->free);
-
- return 0;
-}
-
-/*
- * Map the the group buffers, to user space.
- */
-static int poch_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct channel_info *channel = filp->private_data;
-
- unsigned long start;
- unsigned long size;
-
- unsigned long group_pages;
- unsigned long total_group_pages;
-
- int pg_num;
- struct page *pg;
-
- int i;
- int ret;
-
- printk(KERN_WARNING "poch_mmap\n");
-
- if (vma->vm_pgoff) {
- printk(KERN_WARNING PFX "page offset: %lu\n", vma->vm_pgoff);
- return -EINVAL;
- }
-
- group_pages = npages(channel->group_size);
- total_group_pages = group_pages * channel->group_count;
-
- size = vma->vm_end - vma->vm_start;
- if (size != total_group_pages * PAGE_SIZE) {
- printk(KERN_WARNING PFX "required %lu bytes\n", size);
- return -EINVAL;
- }
-
- start = vma->vm_start;
-
- for (i = 0; i < channel->group_count; i++) {
- pg = channel->groups[i].pg;
- for (pg_num = 0; pg_num < group_pages; pg_num++, pg++) {
- printk(KERN_DEBUG PFX "%d: group %d: 0x%lx\n",
- pg_num, i, start);
- ret = vm_insert_page(vma, start, pg);
- if (ret) {
- printk(KERN_DEBUG PFX
- "vm_insert 2 failed at %d\n", pg_num);
- return ret;
- }
- start += PAGE_SIZE;
- }
- }
-
- return 0;
-}
-
-/*
- * Check whether there is some group that the user space has not
- * consumed yet. When the user space consumes a group, it sets it to
- * -1. Cosuming could be reading data in case of RX and filling a
- * buffer in case of TX.
- */
-static int poch_channel_available(struct channel_info *channel)
-{
- int available = 0;
-
- spin_lock_irq(&channel->group_offsets_lock);
-
- if (channel->consumed != channel->transfer)
- available = 1;
-
- spin_unlock_irq(&channel->group_offsets_lock);
-
- return available;
-}
-
-static unsigned int poch_poll(struct file *filp, poll_table *pt)
-{
- struct channel_info *channel = filp->private_data;
- unsigned int ret = 0;
-
- poll_wait(filp, &channel->wq, pt);
-
- if (poch_channel_available(channel)) {
- if (channel->dir == CHANNEL_DIR_RX)
- ret = POLLIN | POLLRDNORM;
- else
- ret = POLLOUT | POLLWRNORM;
- }
-
- return ret;
-}
-
-static int poch_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
-{
- struct channel_info *channel = filp->private_data;
- void __iomem *fpga = channel->fpga_iomem;
- void __iomem *bridge = channel->bridge_iomem;
- void __user *argp = (void __user *)arg;
- struct vm_area_struct *vms;
- struct poch_counters counters;
- int ret;
-
- switch (cmd) {
- case POCH_IOC_TRANSFER_START:
- switch (channel->chno) {
- case CHNO_TX_CHANNEL:
- printk(KERN_INFO PFX "ioctl: Tx start\n");
- iowrite32(0x1, fpga + FPGA_TX_TRIGGER_REG);
- iowrite32(0x1, fpga + FPGA_TX_ENABLE_REG);
-
- /* FIXME: Does it make sense to do a DMA GO
- * twice, once in Tx and once in Rx.
- */
- iowrite32(0x1, bridge + BRIDGE_DMA_GO_REG);
- break;
- case CHNO_RX_CHANNEL:
- printk(KERN_INFO PFX "ioctl: Rx start\n");
- iowrite32(0x1, fpga + FPGA_RX_ARM_REG);
- iowrite32(0x1, bridge + BRIDGE_DMA_GO_REG);
- break;
- }
- break;
- case POCH_IOC_TRANSFER_STOP:
- switch (channel->chno) {
- case CHNO_TX_CHANNEL:
- printk(KERN_INFO PFX "ioctl: Tx stop\n");
- iowrite32(0x0, fpga + FPGA_TX_ENABLE_REG);
- iowrite32(0x0, fpga + FPGA_TX_TRIGGER_REG);
- iowrite32(0x0, bridge + BRIDGE_DMA_GO_REG);
- break;
- case CHNO_RX_CHANNEL:
- printk(KERN_INFO PFX "ioctl: Rx stop\n");
- iowrite32(0x0, fpga + FPGA_RX_ARM_REG);
- iowrite32(0x0, bridge + BRIDGE_DMA_GO_REG);
- break;
- }
- break;
- case POCH_IOC_CONSUME:
- {
- int available;
- int nfetch;
- unsigned int from;
- unsigned int count;
- unsigned int i, j;
- struct poch_consume consume;
- struct poch_consume *uconsume;
-
- uconsume = argp;
- ret = copy_from_user(&consume, uconsume, sizeof(consume));
- if (ret)
- return ret;
-
- spin_lock_irq(&channel->group_offsets_lock);
-
- channel->consumed += consume.nflush;
- channel->consumed %= channel->group_count;
-
- available = channel->transfer - channel->consumed;
- if (available < 0)
- available += channel->group_count;
-
- from = channel->consumed;
-
- spin_unlock_irq(&channel->group_offsets_lock);
-
- nfetch = consume.nfetch;
- count = min(available, nfetch);
-
- for (i = 0; i < count; i++) {
- j = (from + i) % channel->group_count;
- ret = put_user(channel->groups[j].user_offset,
- &consume.offsets[i]);
- if (ret)
- return -EFAULT;
- }
-
- ret = put_user(count, &uconsume->nfetch);
- if (ret)
- return -EFAULT;
-
- break;
- }
- case POCH_IOC_GET_COUNTERS:
- if (!access_ok(VERIFY_WRITE, argp, sizeof(struct poch_counters)))
- return -EFAULT;
-
- spin_lock_irq(&channel->counters_lock);
- counters = channel->counters;
- __poch_channel_clear_counters(channel);
- spin_unlock_irq(&channel->counters_lock);
-
- ret = copy_to_user(argp, &counters,
- sizeof(struct poch_counters));
- if (ret)
- return ret;
-
- break;
- case POCH_IOC_SYNC_GROUP_FOR_USER:
- case POCH_IOC_SYNC_GROUP_FOR_DEVICE:
- vms = find_vma(current->mm, arg);
- if (!vms)
- /* Address not mapped. */
- return -EINVAL;
- if (vms->vm_file != filp)
- /* Address mapped from different device/file. */
- return -EINVAL;
-
- flush_cache_range(vms, arg, arg + channel->group_size);
- break;
- }
- return 0;
-}
-
-static struct file_operations poch_fops = {
- .owner = THIS_MODULE,
- .open = poch_open,
- .release = poch_release,
- .ioctl = poch_ioctl,
- .poll = poch_poll,
- .mmap = poch_mmap
-};
-
-static void poch_irq_dma(struct channel_info *channel)
-{
- u32 prev_transfer;
- u32 curr_transfer;
- long groups_done;
- unsigned long i, j;
- struct poch_group_info *groups;
- u32 curr_group_reg;
-
- if (!atomic_read(&channel->inited))
- return;
-
- prev_transfer = channel->transfer;
-
- if (channel->chno == CHNO_RX_CHANNEL)
- curr_group_reg = FPGA_RX_CURR_GROUP_REG;
- else
- curr_group_reg = FPGA_TX_CURR_GROUP_REG;
-
- curr_transfer = ioread32(channel->fpga_iomem + curr_group_reg);
-
- groups_done = curr_transfer - prev_transfer;
- /* Check wrap over, and handle it. */
- if (groups_done <= 0)
- groups_done += channel->group_count;
-
- groups = channel->groups;
-
- spin_lock(&channel->group_offsets_lock);
-
- for (i = 0; i < groups_done; i++) {
- j = (prev_transfer + i) % channel->group_count;
-
- channel->transfer += 1;
- channel->transfer %= channel->group_count;
-
- if (channel->transfer == channel->consumed) {
- channel->consumed += 1;
- channel->consumed %= channel->group_count;
- }
- }
-
- spin_unlock(&channel->group_offsets_lock);
-
- wake_up_interruptible(&channel->wq);
-}
-
-static irqreturn_t poch_irq_handler(int irq, void *p)
-{
- struct poch_dev *poch_dev = p;
- void __iomem *bridge = poch_dev->bridge_iomem;
- void __iomem *fpga = poch_dev->fpga_iomem;
- struct channel_info *channel_rx = &poch_dev->channels[CHNO_RX_CHANNEL];
- struct channel_info *channel_tx = &poch_dev->channels[CHNO_TX_CHANNEL];
- u32 bridge_stat;
- u32 fpga_stat;
- u32 dma_stat;
-
- bridge_stat = ioread32(bridge + BRIDGE_INT_STAT_REG);
- fpga_stat = ioread32(fpga + FPGA_INT_STAT_REG);
- dma_stat = ioread32(fpga + FPGA_DMA_INT_STAT_REG);
-
- ioread32(fpga + FPGA_DMA_INT_STAT_REG);
- ioread32(fpga + FPGA_INT_STAT_REG);
- ioread32(bridge + BRIDGE_INT_STAT_REG);
-
- if (bridge_stat & BRIDGE_INT_FPGA) {
- if (fpga_stat & FPGA_INT_DMA_CORE) {
- if (dma_stat & FPGA_DMA_INT_RX)
- poch_irq_dma(channel_rx);
- if (dma_stat & FPGA_DMA_INT_TX)
- poch_irq_dma(channel_tx);
- }
- if (fpga_stat & FPGA_INT_PLL_UNLOCKED) {
- channel_tx->counters.pll_unlock++;
- channel_rx->counters.pll_unlock++;
- if (printk_ratelimit())
- printk(KERN_WARNING PFX "PLL unlocked\n");
- }
- if (fpga_stat & FPGA_INT_TX_FF_EMPTY)
- channel_tx->counters.fifo_empty++;
- if (fpga_stat & FPGA_INT_TX_FF_OVRFLW)
- channel_tx->counters.fifo_overflow++;
- if (fpga_stat & FPGA_INT_RX_FF_EMPTY)
- channel_rx->counters.fifo_empty++;
- if (fpga_stat & FPGA_INT_RX_FF_OVRFLW)
- channel_rx->counters.fifo_overflow++;
-
- /*
- * FIXME: These errors should be notified through the
- * poll interface as POLLERR.
- */
-
- /* Re-enable interrupts. */
- iowrite32(BRIDGE_INT_FPGA, bridge + BRIDGE_INT_MASK_REG);
-
- return IRQ_HANDLED;
- }
-
- return IRQ_NONE;
-}
-
-static void poch_class_dev_unregister(struct poch_dev *poch_dev, int id)
-{
- int i, j;
- int nattrs;
- struct channel_info *channel;
- dev_t devno;
-
- if (poch_dev->dev == NULL)
- return;
-
- for (i = 0; i < poch_dev->nchannels; i++) {
- channel = &poch_dev->channels[i];
- devno = poch_first_dev + (id * poch_dev->nchannels) + i;
-
- if (!channel->dev)
- continue;
-
- nattrs = sizeof(poch_class_attrs)/sizeof(poch_class_attrs[0]);
- for (j = 0; j < nattrs; j++)
- device_remove_file(channel->dev, poch_class_attrs[j]);
-
- device_unregister(channel->dev);
- }
-
- device_unregister(poch_dev->dev);
-}
-
-static int __devinit poch_class_dev_register(struct poch_dev *poch_dev,
- int id)
-{
- struct device *dev = &poch_dev->pci_dev->dev;
- int i, j;
- int nattrs;
- int ret;
- struct channel_info *channel;
- dev_t devno;
-
- poch_dev->dev = device_create(poch_cls, &poch_dev->pci_dev->dev,
- MKDEV(0, 0), NULL, "poch%d", id);
- if (IS_ERR(poch_dev->dev)) {
- dev_err(dev, "error creating parent class device");
- ret = PTR_ERR(poch_dev->dev);
- poch_dev->dev = NULL;
- return ret;
- }
-
- for (i = 0; i < poch_dev->nchannels; i++) {
- channel = &poch_dev->channels[i];
-
- devno = poch_first_dev + (id * poch_dev->nchannels) + i;
- channel->dev = device_create(poch_cls, poch_dev->dev, devno,
- NULL, "ch%d", i);
- if (IS_ERR(channel->dev)) {
- dev_err(dev, "error creating channel class device");
- ret = PTR_ERR(channel->dev);
- channel->dev = NULL;
- poch_class_dev_unregister(poch_dev, id);
- return ret;
- }
-
- dev_set_drvdata(channel->dev, channel);
- nattrs = sizeof(poch_class_attrs)/sizeof(poch_class_attrs[0]);
- for (j = 0; j < nattrs; j++) {
- ret = device_create_file(channel->dev,
- poch_class_attrs[j]);
- if (ret) {
- dev_err(dev, "error creating attribute file");
- poch_class_dev_unregister(poch_dev, id);
- return ret;
- }
- }
- }
-
- return 0;
-}
-
-static int __devinit poch_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *pci_id)
-{
- struct device *dev = &pdev->dev;
- struct poch_dev *poch_dev;
- struct uio_info *uio;
- int ret;
- int id;
- int i;
-
- poch_dev = kzalloc(sizeof(struct poch_dev), GFP_KERNEL);
- if (!poch_dev) {
- dev_err(dev, "error allocating priv. data memory\n");
- return -ENOMEM;
- }
-
- poch_dev->pci_dev = pdev;
- uio = &poch_dev->uio;
-
- pci_set_drvdata(pdev, poch_dev);
-
- spin_lock_init(&poch_dev->iomem_lock);
-
- poch_dev->nchannels = POCH_NCHANNELS;
- poch_dev->channels[CHNO_RX_CHANNEL].dir = CHANNEL_DIR_RX;
- poch_dev->channels[CHNO_TX_CHANNEL].dir = CHANNEL_DIR_TX;
-
- for (i = 0; i < poch_dev->nchannels; i++) {
- init_waitqueue_head(&poch_dev->channels[i].wq);
- atomic_set(&poch_dev->channels[i].free, 1);
- atomic_set(&poch_dev->channels[i].inited, 0);
- }
-
- ret = pci_enable_device(pdev);
- if (ret) {
- dev_err(dev, "error enabling device\n");
- goto out_free;
- }
-
- ret = pci_request_regions(pdev, "poch");
- if (ret) {
- dev_err(dev, "error requesting resources\n");
- goto out_disable;
- }
-
- uio->mem[0].addr = pci_resource_start(pdev, 1);
- if (!uio->mem[0].addr) {
- dev_err(dev, "invalid BAR1\n");
- ret = -ENODEV;
- goto out_release;
- }
-
- uio->mem[0].size = pci_resource_len(pdev, 1);
- uio->mem[0].memtype = UIO_MEM_PHYS;
-
- uio->name = "poch";
- uio->version = "0.0.1";
- uio->irq = -1;
- ret = uio_register_device(dev, uio);
- if (ret) {
- dev_err(dev, "error register UIO device: %d\n", ret);
- goto out_release;
- }
-
- poch_dev->bridge_iomem = ioremap(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
- if (poch_dev->bridge_iomem == NULL) {
- dev_err(dev, "error mapping bridge (bar0) registers\n");
- ret = -ENOMEM;
- goto out_uio_unreg;
- }
-
- poch_dev->fpga_iomem = ioremap(pci_resource_start(pdev, 1),
- pci_resource_len(pdev, 1));
- if (poch_dev->fpga_iomem == NULL) {
- dev_err(dev, "error mapping fpga (bar1) registers\n");
- ret = -ENOMEM;
- goto out_bar0_unmap;
- }
-
- ret = request_irq(pdev->irq, poch_irq_handler, IRQF_SHARED,
- dev_name(dev), poch_dev);
- if (ret) {
- dev_err(dev, "error requesting IRQ %u\n", pdev->irq);
- ret = -ENOMEM;
- goto out_bar1_unmap;
- }
-
- if (!idr_pre_get(&poch_ids, GFP_KERNEL)) {
- dev_err(dev, "error allocating memory ids\n");
- ret = -ENOMEM;
- goto out_free_irq;
- }
-
- idr_get_new(&poch_ids, poch_dev, &id);
- if (id >= MAX_POCH_CARDS) {
- dev_err(dev, "minors exhausted\n");
- ret = -EBUSY;
- goto out_free_irq;
- }
-
- cdev_init(&poch_dev->cdev, &poch_fops);
- poch_dev->cdev.owner = THIS_MODULE;
- ret = cdev_add(&poch_dev->cdev,
- poch_first_dev + (id * poch_dev->nchannels),
- poch_dev->nchannels);
- if (ret) {
- dev_err(dev, "error register character device\n");
- goto out_idr_remove;
- }
-
- ret = poch_class_dev_register(poch_dev, id);
- if (ret)
- goto out_cdev_del;
-
- return 0;
-
- out_cdev_del:
- cdev_del(&poch_dev->cdev);
- out_idr_remove:
- idr_remove(&poch_ids, id);
- out_free_irq:
- free_irq(pdev->irq, poch_dev);
- out_bar1_unmap:
- iounmap(poch_dev->fpga_iomem);
- out_bar0_unmap:
- iounmap(poch_dev->bridge_iomem);
- out_uio_unreg:
- uio_unregister_device(uio);
- out_release:
- pci_release_regions(pdev);
- out_disable:
- pci_disable_device(pdev);
- out_free:
- kfree(poch_dev);
- return ret;
-}
-
-/*
- * FIXME: We are yet to handle the hot unplug case.
- */
-static void poch_pci_remove(struct pci_dev *pdev)
-{
- struct poch_dev *poch_dev = pci_get_drvdata(pdev);
- struct uio_info *uio = &poch_dev->uio;
- unsigned int minor = MINOR(poch_dev->cdev.dev);
- unsigned int id = minor / poch_dev->nchannels;
-
- poch_class_dev_unregister(poch_dev, id);
- cdev_del(&poch_dev->cdev);
- idr_remove(&poch_ids, id);
- free_irq(pdev->irq, poch_dev);
- iounmap(poch_dev->fpga_iomem);
- iounmap(poch_dev->bridge_iomem);
- uio_unregister_device(uio);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
- iounmap(uio->mem[0].internal_addr);
-
- kfree(poch_dev);
-}
-
-static const struct pci_device_id poch_pci_ids[] /* __devinitconst */ = {
- { PCI_DEVICE(PCI_VENDOR_ID_RRAPIDS,
- PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE) },
- { 0, }
-};
-
-static struct pci_driver poch_pci_driver = {
- .name = DRV_NAME,
- .id_table = poch_pci_ids,
- .probe = poch_pci_probe,
- .remove = poch_pci_remove,
-};
-
-static int __init poch_init_module(void)
-{
- int ret = 0;
-
- ret = alloc_chrdev_region(&poch_first_dev, 0,
- MAX_POCH_DEVICES, DRV_NAME);
- if (ret) {
- printk(KERN_ERR PFX "error allocating device no.");
- return ret;
- }
-
- poch_cls = class_create(THIS_MODULE, "pocketchange");
- if (IS_ERR(poch_cls)) {
- ret = PTR_ERR(poch_cls);
- goto out_unreg_chrdev;
- }
-
- ret = pci_register_driver(&poch_pci_driver);
- if (ret) {
- printk(KERN_ERR PFX "error register PCI device");
- goto out_class_destroy;
- }
-
- return 0;
-
- out_class_destroy:
- class_destroy(poch_cls);
-
- out_unreg_chrdev:
- unregister_chrdev_region(poch_first_dev, MAX_POCH_DEVICES);
-
- return ret;
-}
-
-static void __exit poch_exit_module(void)
-{
- pci_unregister_driver(&poch_pci_driver);
- class_destroy(poch_cls);
- unregister_chrdev_region(poch_first_dev, MAX_POCH_DEVICES);
-}
-
-module_init(poch_init_module);
-module_exit(poch_exit_module);
-
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/poch/poch.h b/drivers/staging/poch/poch.h
deleted file mode 100644
index 8b08385861f..00000000000
--- a/drivers/staging/poch/poch.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * User-space DMA and UIO based Redrapids Pocket Change CardBus driver
- *
- * Copyright 2008 Vijay Kumar <vijaykumar@bravegnu.org>
- *
- * Part of userspace API. Should be moved to a header file in
- * include/linux for final version.
- *
- */
-
-#include <linux/types.h>
-
-struct poch_counters {
- __u32 fifo_empty;
- __u32 fifo_overflow;
- __u32 pll_unlock;
-};
-
-struct poch_consume {
- __u32 __user *offsets;
- __u32 nfetch;
- __u32 nflush;
-};
-
-#define POCH_IOC_NUM '9'
-
-#define POCH_IOC_TRANSFER_START _IO(POCH_IOC_NUM, 0)
-#define POCH_IOC_TRANSFER_STOP _IO(POCH_IOC_NUM, 1)
-#define POCH_IOC_GET_COUNTERS _IOR(POCH_IOC_NUM, 2, \
- struct poch_counters)
-#define POCH_IOC_SYNC_GROUP_FOR_USER _IO(POCH_IOC_NUM, 3)
-#define POCH_IOC_SYNC_GROUP_FOR_DEVICE _IO(POCH_IOC_NUM, 4)
-
-#define POCH_IOC_CONSUME _IOWR(POCH_IOC_NUM, 5, \
- struct poch_consume)
diff --git a/drivers/staging/pohmelfs/config.c b/drivers/staging/pohmelfs/config.c
index eed0e5545a5..8c8d1c282e7 100644
--- a/drivers/staging/pohmelfs/config.c
+++ b/drivers/staging/pohmelfs/config.c
@@ -204,18 +204,18 @@ int pohmelfs_copy_crypto(struct pohmelfs_sb *psb)
}
if (g->hash_keysize) {
- psb->hash_key = kmalloc(g->hash_keysize, GFP_KERNEL);
+ psb->hash_key = kmemdup(g->hash_key, g->hash_keysize,
+ GFP_KERNEL);
if (!psb->hash_key)
goto err_out_free_cipher_string;
- memcpy(psb->hash_key, g->hash_key, g->hash_keysize);
psb->hash_keysize = g->hash_keysize;
}
if (g->cipher_keysize) {
- psb->cipher_key = kmalloc(g->cipher_keysize, GFP_KERNEL);
+ psb->cipher_key = kmemdup(g->cipher_key, g->cipher_keysize,
+ GFP_KERNEL);
if (!psb->cipher_key)
goto err_out_free_hash;
- memcpy(psb->cipher_key, g->cipher_key, g->cipher_keysize);
psb->cipher_keysize = g->cipher_keysize;
}
@@ -238,11 +238,10 @@ static int pohmelfs_send_reply(int err, int msg_num, int action, struct cn_msg *
{
struct pohmelfs_cn_ack *ack;
- ack = kmalloc(sizeof(struct pohmelfs_cn_ack), GFP_KERNEL);
+ ack = kzalloc(sizeof(struct pohmelfs_cn_ack), GFP_KERNEL);
if (!ack)
return -ENOMEM;
- memset(ack, 0, sizeof(struct pohmelfs_cn_ack));
memcpy(&ack->msg, msg, sizeof(struct cn_msg));
if (action == POHMELFS_CTLINFO_ACK)
@@ -455,14 +454,12 @@ static int pohmelfs_crypto_hash_init(struct pohmelfs_config_group *g, struct poh
g->hash_strlen = c->strlen;
g->hash_keysize = c->keysize;
- g->hash_key = kmalloc(c->keysize, GFP_KERNEL);
+ g->hash_key = kmemdup(key, c->keysize, GFP_KERNEL);
if (!g->hash_key) {
kfree(g->hash_string);
return -ENOMEM;
}
- memcpy(g->hash_key, key, c->keysize);
-
return 0;
}
@@ -480,14 +477,12 @@ static int pohmelfs_crypto_cipher_init(struct pohmelfs_config_group *g, struct p
g->cipher_strlen = c->strlen;
g->cipher_keysize = c->keysize;
- g->cipher_key = kmalloc(c->keysize, GFP_KERNEL);
+ g->cipher_key = kmemdup(key, c->keysize, GFP_KERNEL);
if (!g->cipher_key) {
kfree(g->cipher_string);
return -ENOMEM;
}
- memcpy(g->cipher_key, key, c->keysize);
-
return 0;
}
@@ -509,13 +504,13 @@ static int pohmelfs_cn_crypto(struct cn_msg *msg)
}
switch (crypto->type) {
- case POHMELFS_CRYPTO_HASH:
+ case POHMELFS_CRYPTO_HASH:
err = pohmelfs_crypto_hash_init(g, crypto);
break;
- case POHMELFS_CRYPTO_CIPHER:
+ case POHMELFS_CRYPTO_CIPHER:
err = pohmelfs_crypto_cipher_init(g, crypto);
break;
- default:
+ default:
err = -ENOTSUPP;
break;
}
@@ -536,24 +531,24 @@ static void pohmelfs_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *n
return;
switch (msg->flags) {
- case POHMELFS_FLAGS_ADD:
- case POHMELFS_FLAGS_DEL:
- case POHMELFS_FLAGS_MODIFY:
+ case POHMELFS_FLAGS_ADD:
+ case POHMELFS_FLAGS_DEL:
+ case POHMELFS_FLAGS_MODIFY:
err = pohmelfs_cn_ctl(msg, msg->flags);
break;
- case POHMELFS_FLAGS_FLUSH:
+ case POHMELFS_FLAGS_FLUSH:
err = pohmelfs_cn_flush(msg);
break;
- case POHMELFS_FLAGS_SHOW:
+ case POHMELFS_FLAGS_SHOW:
err = pohmelfs_cn_disp(msg);
break;
- case POHMELFS_FLAGS_DUMP:
+ case POHMELFS_FLAGS_DUMP:
err = pohmelfs_cn_dump(msg);
break;
- case POHMELFS_FLAGS_CRYPTO:
+ case POHMELFS_FLAGS_CRYPTO:
err = pohmelfs_cn_crypto(msg);
break;
- default:
+ default:
err = -ENOSYS;
break;
}
diff --git a/drivers/staging/pohmelfs/crypto.c b/drivers/staging/pohmelfs/crypto.c
index 884183c0913..2fdb3e01460 100644
--- a/drivers/staging/pohmelfs/crypto.c
+++ b/drivers/staging/pohmelfs/crypto.c
@@ -170,17 +170,17 @@ static int pohmelfs_crypto_process(struct ablkcipher_request *req,
err = crypto_ablkcipher_decrypt(req);
switch (err) {
- case -EINPROGRESS:
- case -EBUSY:
- err = wait_for_completion_interruptible_timeout(&complete.complete,
+ case -EINPROGRESS:
+ case -EBUSY:
+ err = wait_for_completion_interruptible_timeout(&complete.complete,
timeout);
- if (!err)
- err = -ETIMEDOUT;
- else if (err > 0)
- err = complete.error;
- break;
- default:
- break;
+ if (!err)
+ err = -ETIMEDOUT;
+ else if (err > 0)
+ err = complete.error;
+ break;
+ default:
+ break;
}
return err;
@@ -196,7 +196,7 @@ int pohmelfs_crypto_process_input_data(struct pohmelfs_crypto_engine *e, u64 cmd
return 0;
dprintk("%s: eng: %p, iv: %llx, data: %p, page: %p/%lu, size: %u.\n",
- __func__, e, cmd_iv, data, page, (page)?page->index:0, size);
+ __func__, e, cmd_iv, data, page, (page) ? page->index : 0, size);
if (data) {
sg_init_one(&sg, data, size);
@@ -247,7 +247,7 @@ int pohmelfs_crypto_process_input_data(struct pohmelfs_crypto_engine *e, u64 cmd
dprintk("%s: eng: %p, hash: %p, cipher: %p: iv : %llx, hash mismatch (recv/calc): ",
__func__, e, e->hash, e->cipher, cmd_iv);
- for (i=0; i<crypto_hash_digestsize(e->hash); ++i) {
+ for (i = 0; i < crypto_hash_digestsize(e->hash); ++i) {
#if 0
dprintka("%02x ", recv[i]);
if (recv[i] != calc[i]) {
@@ -279,7 +279,7 @@ err_out_exit:
}
static int pohmelfs_trans_iter(struct netfs_trans *t, struct pohmelfs_crypto_engine *e,
- int (* iterator) (struct pohmelfs_crypto_engine *e,
+ int (*iterator) (struct pohmelfs_crypto_engine *e,
struct scatterlist *dst,
struct scatterlist *src))
{
@@ -319,7 +319,7 @@ static int pohmelfs_trans_iter(struct netfs_trans *t, struct pohmelfs_crypto_eng
return 0;
dpage_idx = 0;
- for (i=0; i<t->page_num; ++i) {
+ for (i = 0; i < t->page_num; ++i) {
struct page *page = t->pages[i];
struct page *dpage = e->pages[dpage_idx];
@@ -402,7 +402,7 @@ static int pohmelfs_hash(struct pohmelfs_crypto_thread *tc)
{
unsigned int i;
dprintk("%s: ", __func__);
- for (i=0; i<tc->psb->crypto_attached_size; ++i)
+ for (i = 0; i < tc->psb->crypto_attached_size; ++i)
dprintka("%02x ", dst[i]);
dprintka("\n");
}
@@ -414,7 +414,7 @@ static void pohmelfs_crypto_pages_free(struct pohmelfs_crypto_engine *e)
{
unsigned int i;
- for (i=0; i<e->page_num; ++i)
+ for (i = 0; i < e->page_num; ++i)
__free_page(e->pages[i]);
kfree(e->pages);
}
@@ -427,7 +427,7 @@ static int pohmelfs_crypto_pages_alloc(struct pohmelfs_crypto_engine *e, struct
if (!e->pages)
return -ENOMEM;
- for (i=0; i<psb->trans_max_pages; ++i) {
+ for (i = 0; i < psb->trans_max_pages; ++i) {
e->pages[i] = alloc_page(GFP_KERNEL);
if (!e->pages[i])
break;
@@ -612,7 +612,7 @@ static int pohmelfs_sys_crypto_init(struct pohmelfs_sb *psb)
__func__, st, &st->eng, &st->eng.hash, &st->eng.cipher);
}
- for (i=0; i<psb->crypto_thread_num; ++i) {
+ for (i = 0; i < psb->crypto_thread_num; ++i) {
err = -ENOMEM;
t = kzalloc(sizeof(struct pohmelfs_crypto_thread), GFP_KERNEL);
if (!t)
@@ -780,7 +780,7 @@ int pohmelfs_crypto_init(struct pohmelfs_sb *psb)
}
static int pohmelfs_crypto_thread_get(struct pohmelfs_sb *psb,
- int (* action)(struct pohmelfs_crypto_thread *t, void *data), void *data)
+ int (*action)(struct pohmelfs_crypto_thread *t, void *data), void *data)
{
struct pohmelfs_crypto_thread *t = NULL;
int err;
diff --git a/drivers/staging/pohmelfs/dir.c b/drivers/staging/pohmelfs/dir.c
index 79819f07bfb..059e9d2ddf6 100644
--- a/drivers/staging/pohmelfs/dir.c
+++ b/drivers/staging/pohmelfs/dir.c
@@ -105,7 +105,7 @@ static struct pohmelfs_name *pohmelfs_insert_hash(struct pohmelfs_inode *pi,
if (ret) {
printk("%s: exist: parent: %llu, ino: %llu, hash: %x, len: %u, data: '%s', "
- "new: ino: %llu, hash: %x, len: %u, data: '%s'.\n",
+ "new: ino: %llu, hash: %x, len: %u, data: '%s'.\n",
__func__, pi->ino,
ret->ino, ret->hash, ret->len, ret->data,
new->ino, new->hash, new->len, new->data);
@@ -234,7 +234,7 @@ struct pohmelfs_inode *pohmelfs_new_inode(struct pohmelfs_sb *psb,
int err = -EEXIST;
dprintk("%s: creating inode: parent: %llu, ino: %llu, str: %p.\n",
- __func__, (parent)?parent->ino:0, info->ino, str);
+ __func__, (parent) ? parent->ino : 0, info->ino, str);
err = -ENOMEM;
new = iget_locked(psb->sb, info->ino);
@@ -265,8 +265,8 @@ struct pohmelfs_inode *pohmelfs_new_inode(struct pohmelfs_sb *psb,
s.len = 2;
s.hash = jhash(s.name, s.len, 0);
- err = pohmelfs_add_dir(psb, npi, (parent)?parent:npi, &s,
- (parent)?parent->vfs_inode.i_mode:npi->vfs_inode.i_mode, 0);
+ err = pohmelfs_add_dir(psb, npi, (parent) ? parent : npi, &s,
+ (parent) ? parent->vfs_inode.i_mode : npi->vfs_inode.i_mode, 0);
if (err)
goto err_out_put;
}
@@ -277,7 +277,7 @@ struct pohmelfs_inode *pohmelfs_new_inode(struct pohmelfs_sb *psb,
err = pohmelfs_add_dir(psb, parent, npi, str, info->mode, link);
dprintk("%s: %s inserted name: '%s', new_offset: %llu, ino: %llu, parent: %llu.\n",
- __func__, (err)?"unsuccessfully":"successfully",
+ __func__, (err) ? "unsuccessfully" : "successfully",
str->name, parent->total_len, info->ino, parent->ino);
if (err && err != -EEXIST)
@@ -605,7 +605,7 @@ struct pohmelfs_inode *pohmelfs_create_entry_local(struct pohmelfs_sb *psb,
if (!start)
info.ino = pohmelfs_new_ino(psb);
- info.nlink = S_ISDIR(mode)?2:1;
+ info.nlink = S_ISDIR(mode) ? 2 : 1;
info.uid = current_fsuid();
info.gid = current_fsgid();
info.size = 0;
@@ -849,7 +849,7 @@ static int pohmelfs_create_link(struct pohmelfs_inode *parent, struct qstr *obj,
}
dprintk("%s: parent: %llu, obj: '%s', target_inode: %llu, target_str: '%s', full: '%s'.\n",
- __func__, parent->ino, obj->name, (target)?target->ino:0, (tstr)?tstr->name:NULL,
+ __func__, parent->ino, obj->name, (target) ? target->ino : 0, (tstr) ? tstr->name : NULL,
(char *)data);
cmd->cmd = NETFS_LINK;
diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
index 63275529ff5..643b413d9f0 100644
--- a/drivers/staging/pohmelfs/inode.c
+++ b/drivers/staging/pohmelfs/inode.c
@@ -29,7 +29,6 @@
#include <linux/slab.h>
#include <linux/statfs.h>
#include <linux/writeback.h>
-#include <linux/quotaops.h>
#include "netfs.h"
@@ -685,7 +684,7 @@ static int pohmelfs_readpages_trans_complete(struct page **__pages, unsigned int
goto err_out_free;
}
- for (i=0; i<num; ++i) {
+ for (i = 0; i < num; ++i) {
page = pages[i];
if (err)
@@ -880,7 +879,7 @@ static struct inode *pohmelfs_alloc_inode(struct super_block *sb)
/*
* We want fsync() to work on POHMELFS.
*/
-static int pohmelfs_fsync(struct file *file, struct dentry *dentry, int datasync)
+static int pohmelfs_fsync(struct file *file, int datasync)
{
struct inode *inode = file->f_mapping->host;
struct writeback_control wbc = {
@@ -969,13 +968,6 @@ int pohmelfs_setattr_raw(struct inode *inode, struct iattr *attr)
goto err_out_exit;
}
- if ((attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
- (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
- err = dquot_transfer(inode, attr);
- if (err)
- goto err_out_exit;
- }
-
err = inode_setattr(inode, attr);
if (err) {
dprintk("%s: ino: %llu, failed to set the attributes.\n", __func__, POHMELFS_I(inode)->ino);
@@ -1431,35 +1423,35 @@ static int pohmelfs_parse_options(char *options, struct pohmelfs_sb *psb, int re
continue;
switch (token) {
- case pohmelfs_opt_idx:
- psb->idx = option;
- break;
- case pohmelfs_opt_trans_scan_timeout:
- psb->trans_scan_timeout = msecs_to_jiffies(option);
- break;
- case pohmelfs_opt_drop_scan_timeout:
- psb->drop_scan_timeout = msecs_to_jiffies(option);
- break;
- case pohmelfs_opt_wait_on_page_timeout:
- psb->wait_on_page_timeout = msecs_to_jiffies(option);
- break;
- case pohmelfs_opt_mcache_timeout:
- psb->mcache_timeout = msecs_to_jiffies(option);
- break;
- case pohmelfs_opt_trans_retries:
- psb->trans_retries = option;
- break;
- case pohmelfs_opt_crypto_thread_num:
- psb->crypto_thread_num = option;
- break;
- case pohmelfs_opt_trans_max_pages:
- psb->trans_max_pages = option;
- break;
- case pohmelfs_opt_crypto_fail_unsupported:
- psb->crypto_fail_unsupported = 1;
- break;
- default:
- return -EINVAL;
+ case pohmelfs_opt_idx:
+ psb->idx = option;
+ break;
+ case pohmelfs_opt_trans_scan_timeout:
+ psb->trans_scan_timeout = msecs_to_jiffies(option);
+ break;
+ case pohmelfs_opt_drop_scan_timeout:
+ psb->drop_scan_timeout = msecs_to_jiffies(option);
+ break;
+ case pohmelfs_opt_wait_on_page_timeout:
+ psb->wait_on_page_timeout = msecs_to_jiffies(option);
+ break;
+ case pohmelfs_opt_mcache_timeout:
+ psb->mcache_timeout = msecs_to_jiffies(option);
+ break;
+ case pohmelfs_opt_trans_retries:
+ psb->trans_retries = option;
+ break;
+ case pohmelfs_opt_crypto_thread_num:
+ psb->crypto_thread_num = option;
+ break;
+ case pohmelfs_opt_trans_max_pages:
+ psb->trans_max_pages = option;
+ break;
+ case pohmelfs_opt_crypto_fail_unsupported:
+ psb->crypto_fail_unsupported = 1;
+ break;
+ default:
+ return -EINVAL;
}
}
@@ -1777,7 +1769,7 @@ static int pohmelfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
seq_printf(m, "%pi6:%u", &sin->sin6_addr, ntohs(sin->sin6_port));
} else {
unsigned int i;
- for (i=0; i<ctl->addrlen; ++i)
+ for (i = 0; i < ctl->addrlen; ++i)
seq_printf(m, "%02x.", ctl->addr.addr[i]);
}
@@ -2035,7 +2027,7 @@ err_out_exit:
static void __exit exit_pohmel_fs(void)
{
- unregister_filesystem(&pohmel_fs_type);
+ unregister_filesystem(&pohmel_fs_type);
pohmelfs_destroy_inodecache();
pohmelfs_mcache_exit();
pohmelfs_config_exit();
diff --git a/drivers/staging/pohmelfs/net.c b/drivers/staging/pohmelfs/net.c
index 4a86f0b1ea8..9279897ff16 100644
--- a/drivers/staging/pohmelfs/net.c
+++ b/drivers/staging/pohmelfs/net.c
@@ -713,8 +713,8 @@ static int pohmelfs_crypto_cap_response(struct netfs_state *st)
dprintk("%s: cipher '%s': %s, hash: '%s': %s.\n",
__func__,
- psb->cipher_string, (cap->cipher_strlen)?"SUPPORTED":"NOT SUPPORTED",
- psb->hash_string, (cap->hash_strlen)?"SUPPORTED":"NOT SUPPORTED");
+ psb->cipher_string, (cap->cipher_strlen) ? "SUPPORTED" : "NOT SUPPORTED",
+ psb->hash_string, (cap->hash_strlen) ? "SUPPORTED" : "NOT SUPPORTED");
if (!cap->hash_strlen) {
if (psb->hash_strlen && psb->crypto_fail_unsupported)
@@ -748,11 +748,11 @@ static int pohmelfs_capabilities_response(struct netfs_state *st)
return err;
switch (cmd->id) {
- case POHMELFS_CRYPTO_CAPABILITIES:
+ case POHMELFS_CRYPTO_CAPABILITIES:
return pohmelfs_crypto_cap_response(st);
- case POHMELFS_ROOT_CAPABILITIES:
+ case POHMELFS_ROOT_CAPABILITIES:
return pohmelfs_root_cap_response(st);
- default:
+ default:
break;
}
return -EINVAL;
@@ -774,7 +774,7 @@ static int pohmelfs_getxattr_response(struct netfs_state *st)
m = pohmelfs_mcache_search(psb, cmd->id);
dprintk("%s: id: %llu, gen: %llu, err: %d.\n",
- __func__, cmd->id, (m)?m->gen:0, error);
+ __func__, cmd->id, (m) ? m->gen : 0, error);
if (!m) {
printk("%s: failed to find getxattr cache entry: id: %llu.\n", __func__, cmd->id);
@@ -824,7 +824,7 @@ int pohmelfs_data_lock_response(struct netfs_state *st)
m = pohmelfs_mcache_search(psb, id);
dprintk("%s: id: %llu, gen: %llu, err: %d.\n",
- __func__, cmd->id, (m)?m->gen:0, err);
+ __func__, cmd->id, (m) ? m->gen : 0, err);
if (!m) {
pohmelfs_data_recv(st, st->data, cmd->size);
@@ -915,7 +915,7 @@ static int pohmelfs_recv(void *data)
unsigned char *hash = e->data;
dprintk("%s: received hash: ", __func__);
- for (i=0; i<cmd->csize; ++i)
+ for (i = 0; i < cmd->csize; ++i)
printk("%02x ", hash[i]);
printk("\n");
@@ -933,37 +933,37 @@ static int pohmelfs_recv(void *data)
}
switch (cmd->cmd) {
- case NETFS_READ_PAGE:
+ case NETFS_READ_PAGE:
err = pohmelfs_read_page_response(st);
break;
- case NETFS_READDIR:
+ case NETFS_READDIR:
err = pohmelfs_readdir_response(st);
break;
- case NETFS_LOOKUP:
+ case NETFS_LOOKUP:
err = pohmelfs_lookup_response(st);
break;
- case NETFS_CREATE:
+ case NETFS_CREATE:
err = pohmelfs_create_response(st);
break;
- case NETFS_REMOVE:
+ case NETFS_REMOVE:
err = pohmelfs_remove_response(st);
break;
- case NETFS_TRANS:
+ case NETFS_TRANS:
err = pohmelfs_transaction_response(st);
break;
- case NETFS_PAGE_CACHE:
+ case NETFS_PAGE_CACHE:
err = pohmelfs_page_cache_response(st);
break;
- case NETFS_CAPABILITIES:
+ case NETFS_CAPABILITIES:
err = pohmelfs_capabilities_response(st);
break;
- case NETFS_LOCK:
+ case NETFS_LOCK:
err = pohmelfs_data_lock_response(st);
break;
- case NETFS_XATTR_GET:
+ case NETFS_XATTR_GET:
err = pohmelfs_getxattr_response(st);
break;
- default:
+ default:
printk("%s: wrong cmd: %u, id: %llu, start: %llu, size: %u, ext: %u.\n",
__func__, cmd->cmd, cmd->id, cmd->start, cmd->size, cmd->ext);
netfs_state_reset(st);
diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
index 01cba006e07..63391d2c25a 100644
--- a/drivers/staging/pohmelfs/netfs.h
+++ b/drivers/staging/pohmelfs/netfs.h
@@ -305,7 +305,7 @@ struct pohmelfs_inode {
};
struct netfs_trans;
-typedef int (* netfs_trans_complete_t)(struct page **pages, unsigned int page_num,
+typedef int (*netfs_trans_complete_t)(struct page **pages, unsigned int page_num,
void *private, int err);
struct netfs_state;
@@ -489,7 +489,7 @@ void pohmelfs_crypto_thread_make_ready(struct pohmelfs_crypto_thread *th);
struct netfs_state {
struct mutex __state_lock; /* Can not allow to use the same socket simultaneously */
struct mutex __state_send_lock;
- struct netfs_cmd cmd; /* Cached command */
+ struct netfs_cmd cmd; /* Cached command */
struct netfs_inode_info info; /* Cached inode info */
void *data; /* Cached some data */
@@ -500,9 +500,9 @@ struct netfs_state {
struct task_struct *thread; /* Async receiving thread */
/* Waiting/polling machinery */
- wait_queue_t wait;
- wait_queue_head_t *whead;
- wait_queue_head_t thread_wait;
+ wait_queue_t wait;
+ wait_queue_head_t *whead;
+ wait_queue_head_t thread_wait;
struct mutex trans_lock;
struct rb_root trans_root;
@@ -620,8 +620,8 @@ struct pohmelfs_sb {
/*
* Timed checks: stale transactions, inodes to be freed and so on.
*/
- struct delayed_work dwork;
- struct delayed_work drop_dwork;
+ struct delayed_work dwork;
+ struct delayed_work drop_dwork;
struct super_block *sb;
@@ -911,7 +911,8 @@ static inline void pohmelfs_mcache_put(struct pohmelfs_sb *psb,
pohmelfs_mcache_free(psb, m);
}
-//#define POHMELFS_TRUNCATE_ON_INODE_FLUSH
+/*#define POHMELFS_TRUNCATE_ON_INODE_FLUSH
+ */
#endif /* __KERNEL__*/
diff --git a/drivers/staging/quatech_usb2/quatech_usb2.c b/drivers/staging/quatech_usb2/quatech_usb2.c
index 1561f74a413..ecd73135b31 100644
--- a/drivers/staging/quatech_usb2/quatech_usb2.c
+++ b/drivers/staging/quatech_usb2/quatech_usb2.c
@@ -57,7 +57,7 @@ static int debug;
#define QT2_HW_FLOW_CONTROL_MASK 0xc5
#define QT2_SW_FLOW_CONTROL_MASK 0xc6
#define QT2_SW_FLOW_CONTROL_DISABLE 0xc7
-#define QT2_BREAK_CONTROL 0xc8
+#define QT2_BREAK_CONTROL 0xc8
#define QT2_STOP_RECEIVE 0xe0
#define QT2_FLUSH_DEVICE 0xc4
#define QT2_GET_SET_QMCR 0xe1
@@ -207,7 +207,7 @@ struct quatech2_dev {
bool ReadBulkStopped;
char open_ports;
struct usb_serial_port *current_port;
- int buffer_size;
+ int buffer_size;
};
/* structure which holds line and modem status flags */
@@ -1648,10 +1648,10 @@ __func__);
} /*endif*/
if (tty_st && urb->actual_length) {
tty_buffer_request_room(tty_st, 1);
- tty_insert_flip_string(tty_st,
- &((unsigned char *)(urb->transfer_buffer)
- )[i],
- 1);
+ tty_insert_flip_string(tty_st, &(
+ (unsigned char *)
+ (urb->transfer_buffer)
+ )[i], 1);
}
} /*endfor*/
tty_flip_buffer_push(tty_st);
diff --git a/drivers/staging/ramzswap/TODO b/drivers/staging/ramzswap/TODO
deleted file mode 100644
index 8d64e28fac0..00000000000
--- a/drivers/staging/ramzswap/TODO
+++ /dev/null
@@ -1,5 +0,0 @@
-TODO:
- - Add support for swap notifiers
-
-Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
-Nitin Gupta <ngupta@vflare.org>
diff --git a/drivers/staging/ramzswap/ramzswap_drv.c b/drivers/staging/ramzswap/ramzswap_drv.c
index ee5eb12b928..d14bf9129e3 100644
--- a/drivers/staging/ramzswap/ramzswap_drv.c
+++ b/drivers/staging/ramzswap/ramzswap_drv.c
@@ -36,13 +36,6 @@
static int ramzswap_major;
static struct ramzswap *devices;
-/*
- * Pages that compress to larger than this size are
- * forwarded to backing swap, if present or stored
- * uncompressed in memory otherwise.
- */
-static unsigned int max_zpage_size;
-
/* Module params (documentation at end) */
static unsigned int num_devices;
@@ -79,52 +72,6 @@ static int page_zero_filled(void *ptr)
return 1;
}
-/*
- * memlimit cannot be greater than backing disk size.
- */
-static void ramzswap_set_memlimit(struct ramzswap *rzs, size_t totalram_bytes)
-{
- int memlimit_valid = 1;
-
- if (!rzs->memlimit) {
- pr_info("Memory limit not set.\n");
- memlimit_valid = 0;
- }
-
- if (rzs->memlimit > rzs->disksize) {
- pr_info("Memory limit cannot be greater than "
- "disksize: limit=%zu, disksize=%zu\n",
- rzs->memlimit, rzs->disksize);
- memlimit_valid = 0;
- }
-
- if (!memlimit_valid) {
- size_t mempart, disksize;
- pr_info("Using default: smaller of (%u%% of RAM) and "
- "(backing disk size).\n",
- default_memlimit_perc_ram);
- mempart = default_memlimit_perc_ram * (totalram_bytes / 100);
- disksize = rzs->disksize;
- rzs->memlimit = mempart > disksize ? disksize : mempart;
- }
-
- if (rzs->memlimit > totalram_bytes / 2) {
- pr_info(
- "Its not advisable setting limit more than half of "
- "size of memory since we expect a 2:1 compression ratio. "
- "Limit represents amount of *compressed* data we can keep "
- "in memory!\n"
- "\tMemory Size: %zu kB\n"
- "\tLimit you selected: %zu kB\n"
- "Continuing anyway ...\n",
- totalram_bytes >> 10, rzs->memlimit >> 10
- );
- }
-
- rzs->memlimit &= PAGE_MASK;
- BUG_ON(!rzs->memlimit);
-}
-
static void ramzswap_set_disksize(struct ramzswap *rzs, size_t totalram_bytes)
{
if (!rzs->disksize) {
@@ -156,80 +103,22 @@ static void ramzswap_set_disksize(struct ramzswap *rzs, size_t totalram_bytes)
/*
* Swap header (1st page of swap device) contains information
- * to indentify it as a swap partition. Prepare such a header
- * for ramzswap device (ramzswap0) so that swapon can identify
- * it as swap partition. In case backing swap device is provided,
- * copy its swap header.
+ * about a swap file/partition. Prepare such a header for the
+ * given ramzswap device so that swapon can identify it as a
+ * swap partition.
*/
-static int setup_swap_header(struct ramzswap *rzs, union swap_header *s)
+static void setup_swap_header(struct ramzswap *rzs, union swap_header *s)
{
- int ret = 0;
- struct page *page;
- struct address_space *mapping;
- union swap_header *backing_swap_header;
-
- /*
- * There is no backing swap device. Create a swap header
- * that is acceptable by swapon.
- */
- if (!rzs->backing_swap) {
- s->info.version = 1;
- s->info.last_page = (rzs->disksize >> PAGE_SHIFT) - 1;
- s->info.nr_badpages = 0;
- memcpy(s->magic.magic, "SWAPSPACE2", 10);
- return 0;
- }
-
- /*
- * We have a backing swap device. Copy its swap header
- * to ramzswap device header. If this header contains
- * invalid information (backing device not a swap
- * partition, etc.), swapon will fail for ramzswap
- * which is correct behavior - we don't want to swap
- * over filesystem partition!
- */
-
- /* Read the backing swap header (code from sys_swapon) */
- mapping = rzs->swap_file->f_mapping;
- if (!mapping->a_ops->readpage) {
- ret = -EINVAL;
- goto out;
- }
-
- page = read_mapping_page(mapping, 0, rzs->swap_file);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto out;
- }
-
- backing_swap_header = kmap(page);
- memcpy(s, backing_swap_header, sizeof(*s));
- if (s->info.nr_badpages) {
- pr_info("Cannot use backing swap with bad pages (%u)\n",
- s->info.nr_badpages);
- ret = -EINVAL;
- }
- /*
- * ramzswap disksize equals number of usable pages in backing
- * swap. Set last_page in swap header to match this disksize
- * ('last_page' means 0-based index of last usable swap page).
- */
+ s->info.version = 1;
s->info.last_page = (rzs->disksize >> PAGE_SHIFT) - 1;
- kunmap(page);
-
-out:
- return ret;
+ s->info.nr_badpages = 0;
+ memcpy(s->magic.magic, "SWAPSPACE2", 10);
}
static void ramzswap_ioctl_get_stats(struct ramzswap *rzs,
struct ramzswap_ioctl_stats *s)
{
- strncpy(s->backing_swap_name, rzs->backing_swap_name,
- MAX_SWAP_NAME_LEN - 1);
- s->backing_swap_name[MAX_SWAP_NAME_LEN - 1] = '\0';
-
s->disksize = rzs->disksize;
- s->memlimit = rzs->memlimit;
#if defined(CONFIG_RAMZSWAP_STATS)
{
@@ -265,333 +154,10 @@ static void ramzswap_ioctl_get_stats(struct ramzswap *rzs,
s->orig_data_size = rs->pages_stored << PAGE_SHIFT;
s->compr_data_size = rs->compr_size;
s->mem_used_total = mem_used;
-
- s->bdev_num_reads = rzs_stat64_read(rzs, &rs->bdev_num_reads);
- s->bdev_num_writes = rzs_stat64_read(rzs, &rs->bdev_num_writes);
}
#endif /* CONFIG_RAMZSWAP_STATS */
}
-static int add_backing_swap_extent(struct ramzswap *rzs,
- pgoff_t phy_pagenum,
- pgoff_t num_pages)
-{
- unsigned int idx;
- struct list_head *head;
- struct page *curr_page, *new_page;
- unsigned int extents_per_page = PAGE_SIZE /
- sizeof(struct ramzswap_backing_extent);
-
- idx = rzs->num_extents % extents_per_page;
- if (!idx) {
- new_page = alloc_page(__GFP_ZERO);
- if (!new_page)
- return -ENOMEM;
-
- if (rzs->num_extents) {
- curr_page = virt_to_page(rzs->curr_extent);
- head = &curr_page->lru;
- } else {
- head = &rzs->backing_swap_extent_list;
- }
-
- list_add(&new_page->lru, head);
- rzs->curr_extent = page_address(new_page);
- }
-
- rzs->curr_extent->phy_pagenum = phy_pagenum;
- rzs->curr_extent->num_pages = num_pages;
-
- pr_debug("add_extent: idx=%u, phy_pgnum=%lu, num_pgs=%lu, "
- "pg_last=%lu, curr_ext=%p\n", idx, phy_pagenum, num_pages,
- phy_pagenum + num_pages - 1, rzs->curr_extent);
-
- if (idx != extents_per_page - 1)
- rzs->curr_extent++;
-
- return 0;
-}
-
-static int setup_backing_swap_extents(struct ramzswap *rzs,
- struct inode *inode, unsigned long *num_pages)
-{
- int ret = 0;
- unsigned blkbits;
- unsigned blocks_per_page;
- pgoff_t contig_pages = 0, total_pages = 0;
- pgoff_t pagenum = 0, prev_pagenum = 0;
- sector_t probe_block = 0;
- sector_t last_block;
-
- blkbits = inode->i_blkbits;
- blocks_per_page = PAGE_SIZE >> blkbits;
-
- last_block = i_size_read(inode) >> blkbits;
- while (probe_block + blocks_per_page <= last_block) {
- unsigned block_in_page;
- sector_t first_block;
-
- first_block = bmap(inode, probe_block);
- if (first_block == 0)
- goto bad_bmap;
-
- /* It must be PAGE_SIZE aligned on-disk */
- if (first_block & (blocks_per_page - 1)) {
- probe_block++;
- goto probe_next;
- }
-
- /* All blocks within this page must be contiguous on disk */
- for (block_in_page = 1; block_in_page < blocks_per_page;
- block_in_page++) {
- sector_t block;
-
- block = bmap(inode, probe_block + block_in_page);
- if (block == 0)
- goto bad_bmap;
- if (block != first_block + block_in_page) {
- /* Discontiguity */
- probe_block++;
- goto probe_next;
- }
- }
-
- /*
- * We found a PAGE_SIZE length, PAGE_SIZE aligned
- * run of blocks.
- */
- pagenum = first_block >> (PAGE_SHIFT - blkbits);
-
- if (total_pages && (pagenum != prev_pagenum + 1)) {
- ret = add_backing_swap_extent(rzs, prev_pagenum -
- (contig_pages - 1), contig_pages);
- if (ret < 0)
- goto out;
- rzs->num_extents++;
- contig_pages = 0;
- }
- total_pages++;
- contig_pages++;
- prev_pagenum = pagenum;
- probe_block += blocks_per_page;
-
-probe_next:
- continue;
- }
-
- if (contig_pages) {
- pr_debug("adding last extent: pagenum=%lu, "
- "contig_pages=%lu\n", pagenum, contig_pages);
- ret = add_backing_swap_extent(rzs,
- prev_pagenum - (contig_pages - 1), contig_pages);
- if (ret < 0)
- goto out;
- rzs->num_extents++;
- }
- if (!rzs->num_extents) {
- pr_err("No swap extents found!\n");
- ret = -EINVAL;
- }
-
- if (!ret) {
- *num_pages = total_pages;
- pr_info("Found %lu extents containing %luk\n",
- rzs->num_extents, *num_pages << (PAGE_SHIFT - 10));
- }
- goto out;
-
-bad_bmap:
- pr_err("Backing swapfile has holes\n");
- ret = -EINVAL;
-out:
- while (ret && !list_empty(&rzs->backing_swap_extent_list)) {
- struct page *page;
- struct list_head *entry = rzs->backing_swap_extent_list.next;
- page = list_entry(entry, struct page, lru);
- list_del(entry);
- __free_page(page);
- }
- return ret;
-}
-
-static void map_backing_swap_extents(struct ramzswap *rzs)
-{
- struct ramzswap_backing_extent *se;
- struct page *table_page, *se_page;
- unsigned long num_pages, num_table_pages, entry;
- unsigned long se_idx, span;
- unsigned entries_per_page = PAGE_SIZE / sizeof(*rzs->table);
- unsigned extents_per_page = PAGE_SIZE / sizeof(*se);
-
- /* True for block device */
- if (!rzs->num_extents)
- return;
-
- se_page = list_entry(rzs->backing_swap_extent_list.next,
- struct page, lru);
- se = page_address(se_page);
- span = se->num_pages;
- num_pages = rzs->disksize >> PAGE_SHIFT;
- num_table_pages = DIV_ROUND_UP(num_pages * sizeof(*rzs->table),
- PAGE_SIZE);
-
- entry = 0;
- se_idx = 0;
- while (num_table_pages--) {
- table_page = vmalloc_to_page(&rzs->table[entry]);
- while (span <= entry) {
- se_idx++;
- if (se_idx == rzs->num_extents)
- BUG();
-
- if (!(se_idx % extents_per_page)) {
- se_page = list_entry(se_page->lru.next,
- struct page, lru);
- se = page_address(se_page);
- } else
- se++;
-
- span += se->num_pages;
- }
- table_page->mapping = (struct address_space *)se;
- table_page->private = se->num_pages - (span - entry);
- pr_debug("map_table: entry=%lu, span=%lu, map=%p, priv=%lu\n",
- entry, span, table_page->mapping, table_page->private);
- entry += entries_per_page;
- }
-}
-
-/*
- * Check if value of backing_swap module param is sane.
- * Claim this device and set ramzswap size equal to
- * size of this block device.
- */
-static int setup_backing_swap(struct ramzswap *rzs)
-{
- int ret = 0;
- size_t disksize;
- unsigned long num_pages = 0;
- struct inode *inode;
- struct file *swap_file;
- struct address_space *mapping;
- struct block_device *bdev = NULL;
-
- if (!rzs->backing_swap_name[0]) {
- pr_debug("backing_swap param not given\n");
- goto out;
- }
-
- pr_info("Using backing swap device: %s\n", rzs->backing_swap_name);
-
- swap_file = filp_open(rzs->backing_swap_name,
- O_RDWR | O_LARGEFILE, 0);
- if (IS_ERR(swap_file)) {
- pr_err("Error opening backing device: %s\n",
- rzs->backing_swap_name);
- ret = -EINVAL;
- goto out;
- }
-
- mapping = swap_file->f_mapping;
- inode = mapping->host;
-
- if (S_ISBLK(inode->i_mode)) {
- bdev = I_BDEV(inode);
- ret = bd_claim(bdev, setup_backing_swap);
- if (ret < 0) {
- bdev = NULL;
- goto bad_param;
- }
- disksize = i_size_read(inode);
- /*
- * Can happen if user gives an extended partition as
- * backing swap or simply a bad disk.
- */
- if (!disksize) {
- pr_err("Error reading backing swap size.\n");
- goto bad_param;
- }
- } else if (S_ISREG(inode->i_mode)) {
- bdev = inode->i_sb->s_bdev;
- if (IS_SWAPFILE(inode)) {
- ret = -EBUSY;
- goto bad_param;
- }
- ret = setup_backing_swap_extents(rzs, inode, &num_pages);
- if (ret < 0)
- goto bad_param;
- disksize = num_pages << PAGE_SHIFT;
- } else {
- goto bad_param;
- }
-
- rzs->swap_file = swap_file;
- rzs->backing_swap = bdev;
- rzs->disksize = disksize;
-
- return 0;
-
-bad_param:
- if (bdev)
- bd_release(bdev);
- filp_close(swap_file, NULL);
-
-out:
- rzs->backing_swap = NULL;
- return ret;
-}
-
-/*
- * Map logical page number 'pagenum' to physical page number
- * on backing swap device. For block device, this is a nop.
- */
-static u32 map_backing_swap_page(struct ramzswap *rzs, u32 pagenum)
-{
- u32 skip_pages, entries_per_page;
- size_t delta, se_offset, skipped;
- struct page *table_page, *se_page;
- struct ramzswap_backing_extent *se;
-
- if (!rzs->num_extents)
- return pagenum;
-
- entries_per_page = PAGE_SIZE / sizeof(*rzs->table);
-
- table_page = vmalloc_to_page(&rzs->table[pagenum]);
- se = (struct ramzswap_backing_extent *)table_page->mapping;
- se_page = virt_to_page(se);
-
- skip_pages = pagenum - (pagenum / entries_per_page * entries_per_page);
- se_offset = table_page->private + skip_pages;
-
- if (se_offset < se->num_pages)
- return se->phy_pagenum + se_offset;
-
- skipped = se->num_pages - table_page->private;
- do {
- struct ramzswap_backing_extent *se_base;
- u32 se_entries_per_page = PAGE_SIZE / sizeof(*se);
-
- /* Get next swap extent */
- se_base = (struct ramzswap_backing_extent *)
- page_address(se_page);
- if (se - se_base == se_entries_per_page - 1) {
- se_page = list_entry(se_page->lru.next,
- struct page, lru);
- se = page_address(se_page);
- } else {
- se++;
- }
-
- skipped += se->num_pages;
- } while (skipped < skip_pages);
-
- delta = skipped - skip_pages;
- se_offset = se->num_pages - delta;
-
- return se->phy_pagenum + se_offset;
-}
-
static void ramzswap_free_page(struct ramzswap *rzs, size_t index)
{
u32 clen;
@@ -678,38 +244,12 @@ static int handle_uncompressed_page(struct ramzswap *rzs, struct bio *bio)
/*
* Called when request page is not present in ramzswap.
- * Its either in backing swap device (if present) or
- * this is an attempt to read before any previous write
+ * This is an attempt to read before any previous write
* to this location - this happens due to readahead when
* swap device is read from user-space (e.g. during swapon)
*/
static int handle_ramzswap_fault(struct ramzswap *rzs, struct bio *bio)
{
- /*
- * Always forward such requests to backing swap
- * device (if present)
- */
- if (rzs->backing_swap) {
- u32 pagenum;
- rzs_stat64_dec(rzs, &rzs->stats.num_reads);
- rzs_stat64_inc(rzs, &rzs->stats.bdev_num_reads);
- bio->bi_bdev = rzs->backing_swap;
-
- /*
- * In case backing swap is a file, find the right offset within
- * the file corresponding to logical position 'index'. For block
- * device, this is a nop.
- */
- pagenum = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
- bio->bi_sector = map_backing_swap_page(rzs, pagenum)
- << SECTORS_PER_PAGE_SHIFT;
- return 1;
- }
-
- /*
- * Its unlikely event in case backing dev is
- * not present
- */
pr_debug("Read before write on swap device: "
"sector=%lu, size=%u, offset=%u\n",
(ulong)(bio->bi_sector), bio->bi_size,
@@ -781,7 +321,7 @@ out:
static int ramzswap_write(struct ramzswap *rzs, struct bio *bio)
{
- int ret, fwd_write_request = 0;
+ int ret;
u32 offset, index;
size_t clen;
struct zobj_header *zheader;
@@ -795,14 +335,6 @@ static int ramzswap_write(struct ramzswap *rzs, struct bio *bio)
src = rzs->compress_buffer;
- /*
- * System swaps to same sector again when the stored page
- * is no longer referenced by any process. So, its now safe
- * to free the memory that was allocated for this page.
- */
- if (rzs->table[index].page || rzs_test_flag(rzs, index, RZS_ZERO))
- ramzswap_free_page(rzs, index);
-
mutex_lock(&rzs->lock);
user_mem = kmap_atomic(page, KM_USER0);
@@ -817,14 +349,6 @@ static int ramzswap_write(struct ramzswap *rzs, struct bio *bio)
return 0;
}
- if (rzs->backing_swap &&
- (rzs->stats.compr_size > rzs->memlimit - PAGE_SIZE)) {
- kunmap_atomic(user_mem, KM_USER0);
- mutex_unlock(&rzs->lock);
- fwd_write_request = 1;
- goto out;
- }
-
ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen,
rzs->compress_workmem);
@@ -838,18 +362,11 @@ static int ramzswap_write(struct ramzswap *rzs, struct bio *bio)
}
/*
- * Page is incompressible. Forward it to backing swap
- * if present. Otherwise, store it as-is (uncompressed)
+ * Page is incompressible. Store it as-is (uncompressed)
* since we do not want to return too many swap write
* errors which has side effect of hanging the system.
*/
if (unlikely(clen > max_zpage_size)) {
- if (rzs->backing_swap) {
- mutex_unlock(&rzs->lock);
- fwd_write_request = 1;
- goto out;
- }
-
clen = PAGE_SIZE;
page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
if (unlikely(!page_store)) {
@@ -875,8 +392,6 @@ static int ramzswap_write(struct ramzswap *rzs, struct bio *bio)
pr_info("Error allocating memory for compressed "
"page: %u, size=%zu\n", index, clen);
rzs_stat64_inc(rzs, &rzs->stats.failed_writes);
- if (rzs->backing_swap)
- fwd_write_request = 1;
goto out;
}
@@ -914,31 +429,6 @@ memstore:
return 0;
out:
- if (fwd_write_request) {
- rzs_stat64_inc(rzs, &rzs->stats.bdev_num_writes);
- bio->bi_bdev = rzs->backing_swap;
-#if 0
- /*
- * TODO: We currently have linear mapping of ramzswap and
- * backing swap sectors. This is not desired since we want
- * to optimize writes to backing swap to minimize disk seeks
- * or have effective wear leveling (for SSDs). Also, a
- * non-linear mapping is required to implement compressed
- * on-disk swapping.
- */
- bio->bi_sector = get_backing_swap_page()
- << SECTORS_PER_PAGE_SHIFT;
-#endif
- /*
- * In case backing swap is a file, find the right offset within
- * the file corresponding to logical position 'index'. For block
- * device, this is a nop.
- */
- bio->bi_sector = map_backing_swap_page(rzs, index)
- << SECTORS_PER_PAGE_SHIFT;
- return 1;
- }
-
bio_io_error(bio);
return 0;
}
@@ -996,19 +486,11 @@ static int ramzswap_make_request(struct request_queue *queue, struct bio *bio)
static void reset_device(struct ramzswap *rzs)
{
- int is_backing_blkdev = 0;
- size_t index, num_pages;
- unsigned entries_per_page;
- unsigned long num_table_pages, entry = 0;
+ size_t index;
/* Do not accept any new I/O request */
rzs->init_done = 0;
- if (rzs->backing_swap && !rzs->num_extents)
- is_backing_blkdev = 1;
-
- num_pages = rzs->disksize >> PAGE_SHIFT;
-
/* Free various per-device buffers */
kfree(rzs->compress_workmem);
free_pages((unsigned long)rzs->compress_buffer, 1);
@@ -1017,7 +499,7 @@ static void reset_device(struct ramzswap *rzs)
rzs->compress_buffer = NULL;
/* Free all pages that are still in this ramzswap device */
- for (index = 0; index < num_pages; index++) {
+ for (index = 0; index < rzs->disksize >> PAGE_SHIFT; index++) {
struct page *page;
u16 offset;
@@ -1033,51 +515,16 @@ static void reset_device(struct ramzswap *rzs)
xv_free(rzs->mem_pool, page, offset);
}
- entries_per_page = PAGE_SIZE / sizeof(*rzs->table);
- num_table_pages = DIV_ROUND_UP(num_pages * sizeof(*rzs->table),
- PAGE_SIZE);
- /*
- * Set page->mapping to NULL for every table page.
- * Otherwise, we will hit bad_page() during free.
- */
- while (rzs->num_extents && num_table_pages--) {
- struct page *page;
- page = vmalloc_to_page(&rzs->table[entry]);
- page->mapping = NULL;
- entry += entries_per_page;
- }
vfree(rzs->table);
rzs->table = NULL;
xv_destroy_pool(rzs->mem_pool);
rzs->mem_pool = NULL;
- /* Free all swap extent pages */
- while (!list_empty(&rzs->backing_swap_extent_list)) {
- struct page *page;
- struct list_head *entry;
- entry = rzs->backing_swap_extent_list.next;
- page = list_entry(entry, struct page, lru);
- list_del(entry);
- __free_page(page);
- }
- INIT_LIST_HEAD(&rzs->backing_swap_extent_list);
- rzs->num_extents = 0;
-
- /* Close backing swap device, if present */
- if (rzs->backing_swap) {
- if (is_backing_blkdev)
- bd_release(rzs->backing_swap);
- filp_close(rzs->swap_file, NULL);
- rzs->backing_swap = NULL;
- memset(rzs->backing_swap_name, 0, MAX_SWAP_NAME_LEN);
- }
-
/* Reset stats */
memset(&rzs->stats, 0, sizeof(rzs->stats));
rzs->disksize = 0;
- rzs->memlimit = 0;
}
static int ramzswap_ioctl_init_device(struct ramzswap *rzs)
@@ -1092,14 +539,7 @@ static int ramzswap_ioctl_init_device(struct ramzswap *rzs)
return -EBUSY;
}
- ret = setup_backing_swap(rzs);
- if (ret)
- goto fail;
-
- if (rzs->backing_swap)
- ramzswap_set_memlimit(rzs, totalram_pages << PAGE_SHIFT);
- else
- ramzswap_set_disksize(rzs, totalram_pages << PAGE_SHIFT);
+ ramzswap_set_disksize(rzs, totalram_pages << PAGE_SHIFT);
rzs->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
if (!rzs->compress_workmem) {
@@ -1126,8 +566,6 @@ static int ramzswap_ioctl_init_device(struct ramzswap *rzs)
}
memset(rzs->table, 0, num_pages * sizeof(*rzs->table));
- map_backing_swap_extents(rzs);
-
page = alloc_page(__GFP_ZERO);
if (!page) {
pr_err("Error allocating swap header page\n");
@@ -1138,23 +576,13 @@ static int ramzswap_ioctl_init_device(struct ramzswap *rzs)
rzs_set_flag(rzs, 0, RZS_UNCOMPRESSED);
swap_header = kmap(page);
- ret = setup_swap_header(rzs, swap_header);
+ setup_swap_header(rzs, swap_header);
kunmap(page);
- if (ret) {
- pr_err("Error setting swap header\n");
- goto fail;
- }
set_capacity(rzs->disk, rzs->disksize >> SECTOR_SHIFT);
- /*
- * We have ident mapping of sectors for ramzswap and
- * and the backing swap device. So, this queue flag
- * should be according to backing dev.
- */
- if (!rzs->backing_swap ||
- blk_queue_nonrot(rzs->backing_swap->bd_disk->queue))
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rzs->disk->queue);
+ /* ramzswap devices sort of resembles non-rotational disks */
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rzs->disk->queue);
rzs->mem_pool = xv_create_pool();
if (!rzs->mem_pool) {
@@ -1163,17 +591,6 @@ static int ramzswap_ioctl_init_device(struct ramzswap *rzs)
goto fail;
}
- /*
- * Pages that compress to size greater than this are forwarded
- * to physical swap disk (if backing dev is provided)
- * TODO: make this configurable
- */
- if (rzs->backing_swap)
- max_zpage_size = max_zpage_size_bdev;
- else
- max_zpage_size = max_zpage_size_nobdev;
- pr_debug("Max compressed page size: %u bytes\n", max_zpage_size);
-
rzs->init_done = 1;
pr_debug("Initialization done!\n");
@@ -1198,7 +615,7 @@ static int ramzswap_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
int ret = 0;
- size_t disksize_kb, memlimit_kb;
+ size_t disksize_kb;
struct ramzswap *rzs = bdev->bd_disk->private_data;
@@ -1217,36 +634,6 @@ static int ramzswap_ioctl(struct block_device *bdev, fmode_t mode,
pr_info("Disk size set to %zu kB\n", disksize_kb);
break;
- case RZSIO_SET_MEMLIMIT_KB:
- if (rzs->init_done) {
- /* TODO: allow changing memlimit */
- ret = -EBUSY;
- goto out;
- }
- if (copy_from_user(&memlimit_kb, (void *)arg,
- _IOC_SIZE(cmd))) {
- ret = -EFAULT;
- goto out;
- }
- rzs->memlimit = memlimit_kb << 10;
- pr_info("Memory limit set to %zu kB\n", memlimit_kb);
- break;
-
- case RZSIO_SET_BACKING_SWAP:
- if (rzs->init_done) {
- ret = -EBUSY;
- goto out;
- }
-
- if (copy_from_user(&rzs->backing_swap_name, (void *)arg,
- _IOC_SIZE(cmd))) {
- ret = -EFAULT;
- goto out;
- }
- rzs->backing_swap_name[MAX_SWAP_NAME_LEN - 1] = '\0';
- pr_info("Backing swap set to %s\n", rzs->backing_swap_name);
- break;
-
case RZSIO_GET_STATS:
{
struct ramzswap_ioctl_stats *stats;
@@ -1295,9 +682,21 @@ out:
return ret;
}
+void ramzswap_slot_free_notify(struct block_device *bdev, unsigned long index)
+{
+ struct ramzswap *rzs;
+
+ rzs = bdev->bd_disk->private_data;
+ ramzswap_free_page(rzs, index);
+ rzs_stat64_inc(rzs, &rzs->stats.notify_free);
+
+ return;
+}
+
static struct block_device_operations ramzswap_devops = {
.ioctl = ramzswap_ioctl,
- .owner = THIS_MODULE,
+ .swap_slot_free_notify = ramzswap_slot_free_notify,
+ .owner = THIS_MODULE
};
static int create_device(struct ramzswap *rzs, int device_id)
@@ -1306,7 +705,6 @@ static int create_device(struct ramzswap *rzs, int device_id)
mutex_init(&rzs->lock);
spin_lock_init(&rzs->stat64_lock);
- INIT_LIST_HEAD(&rzs->backing_swap_extent_list);
rzs->queue = blk_alloc_queue(GFP_KERNEL);
if (!rzs->queue) {
@@ -1336,10 +734,7 @@ static int create_device(struct ramzswap *rzs, int device_id)
rzs->disk->private_data = rzs;
snprintf(rzs->disk->disk_name, 16, "ramzswap%d", device_id);
- /*
- * Actual capacity set using RZSIO_SET_DISKSIZE_KB ioctl
- * or set equal to backing swap device (if provided)
- */
+ /* Actual capacity set using RZSIO_SET_DISKSIZE_KB ioctl */
set_capacity(rzs->disk, 0);
blk_queue_physical_block_size(rzs->disk->queue, PAGE_SIZE);
diff --git a/drivers/staging/ramzswap/ramzswap_drv.h b/drivers/staging/ramzswap/ramzswap_drv.h
index c7e0e767c22..63c30420df2 100644
--- a/drivers/staging/ramzswap/ramzswap_drv.h
+++ b/drivers/staging/ramzswap/ramzswap_drv.h
@@ -31,8 +31,7 @@ static const unsigned max_num_devices = 32;
* Stored at beginning of each compressed object.
*
* It stores back-reference to table entry which points to this
- * object. This is required to support memory defragmentation or
- * migrating compressed pages to backing swap disk.
+ * object. This is required to support memory defragmentation.
*/
struct zobj_header {
#if 0
@@ -44,27 +43,17 @@ struct zobj_header {
/* Default ramzswap disk size: 25% of total RAM */
static const unsigned default_disksize_perc_ram = 25;
-static const unsigned default_memlimit_perc_ram = 15;
/*
- * Max compressed page size when backing device is provided.
- * Pages that compress to size greater than this are sent to
- * physical swap disk.
- */
-static const unsigned max_zpage_size_bdev = PAGE_SIZE / 2;
-
-/*
- * Max compressed page size when there is no backing dev.
* Pages that compress to size greater than this are stored
* uncompressed in memory.
*/
-static const unsigned max_zpage_size_nobdev = PAGE_SIZE / 4 * 3;
+static const unsigned max_zpage_size = PAGE_SIZE / 4 * 3;
/*
- * NOTE: max_zpage_size_{bdev,nobdev} sizes must be
- * less than or equal to:
+ * NOTE: max_zpage_size must be less than or equal to:
* XV_MAX_ALLOC_SIZE - sizeof(struct zobj_header)
- * since otherwise xv_malloc would always return failure.
+ * otherwise, xv_malloc() would always return failure.
*/
/*-- End of configurable params */
@@ -98,15 +87,6 @@ struct table {
u8 flags;
} __attribute__((aligned(4)));
-/*
- * Swap extent information in case backing swap is a regular
- * file. These extent entries must fit exactly in a page.
- */
-struct ramzswap_backing_extent {
- pgoff_t phy_pagenum;
- pgoff_t num_pages;
-} __attribute__((aligned(4)));
-
struct ramzswap_stats {
/* basic stats */
size_t compr_size; /* compressed size of pages stored -
@@ -123,8 +103,6 @@ struct ramzswap_stats {
u32 pages_stored; /* no. of pages currently stored */
u32 good_compress; /* % of pages with compression ratio<=50% */
u32 pages_expand; /* % of incompressible pages */
- u64 bdev_num_reads; /* no. of reads on backing dev */
- u64 bdev_num_writes; /* no. of writes on backing dev */
#endif
};
@@ -139,11 +117,6 @@ struct ramzswap {
struct gendisk *disk;
int init_done;
/*
- * This is limit on compressed data size (stats.compr_size)
- * Its applicable only when backing swap device is present.
- */
- size_t memlimit; /* bytes */
- /*
* This is limit on amount of *uncompressed* worth of data
* we can hold. When backing swap device is provided, it is
* set equal to device size.
@@ -151,14 +124,6 @@ struct ramzswap {
size_t disksize; /* bytes */
struct ramzswap_stats stats;
-
- /* backing swap device info */
- struct ramzswap_backing_extent *curr_extent;
- struct list_head backing_swap_extent_list;
- unsigned long num_extents;
- char backing_swap_name[MAX_SWAP_NAME_LEN];
- struct block_device *backing_swap;
- struct file *swap_file;
};
/*-- */
@@ -182,13 +147,6 @@ static void rzs_stat64_inc(struct ramzswap *rzs, u64 *v)
spin_unlock(&rzs->stat64_lock);
}
-static void rzs_stat64_dec(struct ramzswap *rzs, u64 *v)
-{
- spin_lock(&rzs->stat64_lock);
- *v = *v - 1;
- spin_unlock(&rzs->stat64_lock);
-}
-
static u64 rzs_stat64_read(struct ramzswap *rzs, u64 *v)
{
u64 val;
@@ -203,7 +161,6 @@ static u64 rzs_stat64_read(struct ramzswap *rzs, u64 *v)
#define rzs_stat_inc(v)
#define rzs_stat_dec(v)
#define rzs_stat64_inc(r, v)
-#define rzs_stat64_dec(r, v)
#define rzs_stat64_read(r, v)
#endif /* CONFIG_RAMZSWAP_STATS */
diff --git a/drivers/staging/ramzswap/ramzswap_ioctl.h b/drivers/staging/ramzswap/ramzswap_ioctl.h
index d26076d41bd..db94bcb4296 100644
--- a/drivers/staging/ramzswap/ramzswap_ioctl.h
+++ b/drivers/staging/ramzswap/ramzswap_ioctl.h
@@ -15,11 +15,7 @@
#ifndef _RAMZSWAP_IOCTL_H_
#define _RAMZSWAP_IOCTL_H_
-#define MAX_SWAP_NAME_LEN 128
-
struct ramzswap_ioctl_stats {
- char backing_swap_name[MAX_SWAP_NAME_LEN];
- u64 memlimit; /* only applicable if backing swap present */
u64 disksize; /* user specified or equal to backing swap
* size (if present) */
u64 num_reads; /* failed + successful */
@@ -36,15 +32,11 @@ struct ramzswap_ioctl_stats {
u64 orig_data_size;
u64 compr_data_size;
u64 mem_used_total;
- u64 bdev_num_reads; /* no. of reads on backing dev */
- u64 bdev_num_writes; /* no. of writes on backing dev */
} __attribute__ ((packed, aligned(4)));
#define RZSIO_SET_DISKSIZE_KB _IOW('z', 0, size_t)
-#define RZSIO_SET_MEMLIMIT_KB _IOW('z', 1, size_t)
-#define RZSIO_SET_BACKING_SWAP _IOW('z', 2, unsigned char[MAX_SWAP_NAME_LEN])
-#define RZSIO_GET_STATS _IOR('z', 3, struct ramzswap_ioctl_stats)
-#define RZSIO_INIT _IO('z', 4)
-#define RZSIO_RESET _IO('z', 5)
+#define RZSIO_GET_STATS _IOR('z', 1, struct ramzswap_ioctl_stats)
+#define RZSIO_INIT _IO('z', 2)
+#define RZSIO_RESET _IO('z', 3)
#endif
diff --git a/drivers/staging/rar_register/Kconfig b/drivers/staging/rar_register/Kconfig
index 3f73839643e..e9c27738199 100644
--- a/drivers/staging/rar_register/Kconfig
+++ b/drivers/staging/rar_register/Kconfig
@@ -8,23 +8,23 @@ menu "RAR Register Driver"
#
config RAR_REGISTER
tristate "Restricted Access Region Register Driver"
+ depends on PCI
default n
---help---
- This driver allows other kernel drivers access to the
- contents of the restricted access region control
- registers.
+ This driver allows other kernel drivers access to the
+ contents of the restricted access region control registers.
- The restricted access region control registers
- (rar_registers) are used to pass address and
- locking information on restricted access regions
- to other drivers that use restricted access regions
+ The restricted access region control registers
+ (rar_registers) are used to pass address and
+ locking information on restricted access regions
+ to other drivers that use restricted access regions.
- The restricted access regions are regions of memory
- on the Intel MID Platform that are not accessible to
- the x86 processor, but are accessible to dedicated
- processors on board peripheral devices.
+ The restricted access regions are regions of memory
+ on the Intel MID Platform that are not accessible to
+ the x86 processor, but are accessible to dedicated
+ processors on board peripheral devices.
- The purpose of the restricted access regions is to
- protect sensitive data from compromise by unauthorized
- programs running on the x86 processor.
+ The purpose of the restricted access regions is to
+ protect sensitive data from compromise by unauthorized
+ programs running on the x86 processor.
endmenu
diff --git a/drivers/staging/rar_register/rar_register.c b/drivers/staging/rar_register/rar_register.c
index bfc0e31f1a6..618503f422e 100644
--- a/drivers/staging/rar_register/rar_register.c
+++ b/drivers/staging/rar_register/rar_register.c
@@ -51,98 +51,159 @@
#include <linux/kernel.h>
/* === Lincroft Message Bus Interface === */
-/* Message Control Register */
-#define LNC_MCR_OFFSET 0xD0
-
-/* Maximum number of clients (other drivers using this driver) */
-#define MAX_RAR_CLIENTS 10
-
-/* Message Data Register */
-#define LNC_MDR_OFFSET 0xD4
+#define LNC_MCR_OFFSET 0xD0 /* Message Control Register */
+#define LNC_MDR_OFFSET 0xD4 /* Message Data Register */
/* Message Opcodes */
-#define LNC_MESSAGE_READ_OPCODE 0xD0
+#define LNC_MESSAGE_READ_OPCODE 0xD0
#define LNC_MESSAGE_WRITE_OPCODE 0xE0
/* Message Write Byte Enables */
-#define LNC_MESSAGE_BYTE_WRITE_ENABLES 0xF
+#define LNC_MESSAGE_BYTE_WRITE_ENABLES 0xF
/* B-unit Port */
-#define LNC_BUNIT_PORT 0x3
+#define LNC_BUNIT_PORT 0x3
/* === Lincroft B-Unit Registers - Programmed by IA32 firmware === */
-#define LNC_BRAR0L 0x10
-#define LNC_BRAR0H 0x11
-#define LNC_BRAR1L 0x12
-#define LNC_BRAR1H 0x13
-
+#define LNC_BRAR0L 0x10
+#define LNC_BRAR0H 0x11
+#define LNC_BRAR1L 0x12
+#define LNC_BRAR1H 0x13
/* Reserved for SeP */
-#define LNC_BRAR2L 0x14
-#define LNC_BRAR2H 0x15
+#define LNC_BRAR2L 0x14
+#define LNC_BRAR2H 0x15
/* Moorestown supports three restricted access regions. */
#define MRST_NUM_RAR 3
-
/* RAR Bus Address Range */
-struct RAR_address_range {
+struct rar_addr {
dma_addr_t low;
dma_addr_t high;
};
-/* Structure containing low and high RAR register offsets. */
-struct RAR_offsets {
- u32 low; /* Register offset for low RAR bus address. */
- u32 high; /* Register offset for high RAR bus address. */
-};
-
+/*
+ * We create one of these for each RAR
+ */
struct client {
- int (*client_callback)(void *client_data);
- void *customer_data;
- int client_called;
- };
+ int (*callback)(unsigned long data);
+ unsigned long driver_priv;
+ bool busy;
+};
static DEFINE_MUTEX(rar_mutex);
static DEFINE_MUTEX(lnc_reg_mutex);
-struct RAR_device {
- struct RAR_offsets const rar_offsets[MRST_NUM_RAR];
- struct RAR_address_range rar_addr[MRST_NUM_RAR];
+/*
+ * One per RAR device (currently only one device)
+ */
+struct rar_device {
+ struct rar_addr rar_addr[MRST_NUM_RAR];
struct pci_dev *rar_dev;
bool registered;
- };
-
-/* this platform has only one rar_device for 3 rar regions */
-static struct RAR_device my_rar_device = {
- .rar_offsets = {
- [0].low = LNC_BRAR0L,
- [0].high = LNC_BRAR0H,
- [1].low = LNC_BRAR1L,
- [1].high = LNC_BRAR1H,
- [2].low = LNC_BRAR2L,
- [2].high = LNC_BRAR2H
- }
+ bool allocated;
+ struct client client[MRST_NUM_RAR];
};
-/* this data is for handling requests from other drivers which arrive
- * prior to this driver initializing
+/* Current platforms have only one rar_device for 3 rar regions */
+static struct rar_device my_rar_device;
+
+/*
+ * Abstract out multiple device support. Current platforms only
+ * have a single RAR device.
*/
-static struct client clients[MAX_RAR_CLIENTS];
-static int num_clients;
+/**
+ * alloc_rar_device - return a new RAR structure
+ *
+ * Return a new (but not yet ready) RAR device object
+ */
+static struct rar_device *alloc_rar_device(void)
+{
+ if (my_rar_device.allocated)
+ return NULL;
+ my_rar_device.allocated = 1;
+ return &my_rar_device;
+}
-/*
- * This function is used to retrieved RAR info using the Lincroft
- * message bus interface.
+/**
+ * free_rar_device - free a RAR object
+ * @rar: the RAR device being freed
+ *
+ * Release a RAR object and any attached resources
*/
-static int retrieve_rar_addr(struct pci_dev *pdev,
- int offset,
- dma_addr_t *addr)
+static void free_rar_device(struct rar_device *rar)
+{
+ pci_dev_put(rar->rar_dev);
+ rar->allocated = 0;
+}
+
+/**
+ * _rar_to_device - return the device handling this RAR
+ * @rar: RAR number
+ * @off: returned offset
+ *
+ * Internal helper for looking up RAR devices. This and alloc are the
+ * two functions that need touching to go to multiple RAR devices.
+ */
+static struct rar_device *_rar_to_device(int rar, int *off)
+{
+ if (rar >= 0 && rar <= 3) {
+ *off = rar;
+ return &my_rar_device;
+ }
+ return NULL;
+}
+
+
+/**
+ * rar_to_device - return the device handling this RAR
+ * @rar: RAR number
+ * @off: returned offset
+ *
+ * Return the device this RAR maps to if one is present, otherwise
+ * returns NULL. Reports the offset relative to the base of this
+ * RAR device in off.
+ */
+static struct rar_device *rar_to_device(int rar, int *off)
+{
+ struct rar_device *rar_dev = _rar_to_device(rar, off);
+ if (rar_dev == NULL || !rar_dev->registered)
+ return NULL;
+ return rar_dev;
+}
+
+/**
+ * rar_to_client - return the client handling this RAR
+ * @rar: RAR number
+ *
+ * Return the client this RAR maps to if a mapping is known, otherwise
+ * returns NULL.
+ */
+static struct client *rar_to_client(int rar)
+{
+ int idx;
+ struct rar_device *r = _rar_to_device(rar, &idx);
+ if (r != NULL)
+ return &r->client[idx];
+ return NULL;
+}
+
+/**
+ * rar_read_addr - retrieve a RAR mapping
+ * @pdev: PCI device for the RAR
+ * @offset: offset for message
+ * @addr: returned address
+ *
+ * Reads the address of a given RAR register. Returns 0 on success
+ * or an error code on failure.
+ */
+static int rar_read_addr(struct pci_dev *pdev, int offset, dma_addr_t *addr)
{
/*
* ======== The Lincroft Message Bus Interface ========
- * Lincroft registers may be obtained from the PCI
- * (the Host Bridge) using the Lincroft Message Bus
+ * Lincroft registers may be obtained via PCI from
+ * the host bridge using the Lincroft Message Bus
* Interface. That message bus interface is generally
* comprised of two registers: a control register (MCR, 0xDO)
* and a data register (MDR, 0xD4).
@@ -182,6 +243,7 @@ static int retrieve_rar_addr(struct pci_dev *pdev,
*/
int result;
+ u32 addr32;
/* Construct control message */
u32 const message =
@@ -192,11 +254,6 @@ static int retrieve_rar_addr(struct pci_dev *pdev,
dev_dbg(&pdev->dev, "Offset for 'get' LNC MSG is %x\n", offset);
- if (addr == 0) {
- WARN_ON(1);
- return -EINVAL;
- }
-
/*
* We synchronize access to the Lincroft MCR and MDR registers
* until BOTH the command is issued through the MCR register
@@ -209,26 +266,25 @@ static int retrieve_rar_addr(struct pci_dev *pdev,
/* Send the control message */
result = pci_write_config_dword(pdev, LNC_MCR_OFFSET, message);
-
- dev_dbg(&pdev->dev, "Result from send ctl register is %x\n", result);
-
if (!result) {
- result = pci_read_config_dword(pdev, LNC_MDR_OFFSET,
- (u32 *)addr);
- dev_dbg(&pdev->dev,
- "Result from read data register is %x\n", result);
-
- dev_dbg(&pdev->dev,
- "Value read from data register is %lx\n",
- (unsigned long)*addr);
+ /* Read back the address as a 32bit value */
+ result = pci_read_config_dword(pdev, LNC_MDR_OFFSET, &addr32);
+ *addr = (dma_addr_t)addr32;
}
-
mutex_unlock(&lnc_reg_mutex);
-
return result;
}
-static int set_rar_address(struct pci_dev *pdev,
+/**
+ * rar_set_addr - Set a RAR mapping
+ * @pdev: PCI device for the RAR
+ * @offset: offset for message
+ * @addr: address to set
+ *
+ * Sets the address of a given RAR register. Returns 0 on success
+ * or an error code on failure.
+ */
+static int rar_set_addr(struct pci_dev *pdev,
int offset,
dma_addr_t addr)
{
@@ -236,11 +292,11 @@ static int set_rar_address(struct pci_dev *pdev,
* Data being written to this register must be written before
* writing the appropriate control message to the MCR
* register.
- * @note See rar_get_address() for a description of the
+ * See rar_get_addrs() for a description of the
* message bus interface being used here.
*/
- int result = 0;
+ int result;
/* Construct control message */
u32 const message = (LNC_MESSAGE_WRITE_OPCODE << 24)
@@ -248,13 +304,6 @@ static int set_rar_address(struct pci_dev *pdev,
| (offset << 8)
| (LNC_MESSAGE_BYTE_WRITE_ENABLES << 4);
- if (addr == 0) {
- WARN_ON(1);
- return -EINVAL;
- }
-
- dev_dbg(&pdev->dev, "Offset for 'set' LNC MSG is %x\n", offset);
-
/*
* We synchronize access to the Lincroft MCR and MDR registers
* until BOTH the command is issued through the MCR register
@@ -267,32 +316,27 @@ static int set_rar_address(struct pci_dev *pdev,
/* Send the control message */
result = pci_write_config_dword(pdev, LNC_MDR_OFFSET, addr);
-
- dev_dbg(&pdev->dev, "Result from write data register is %x\n", result);
-
- if (!result) {
- dev_dbg(&pdev->dev,
- "Value written to data register is %lx\n",
- (unsigned long)addr);
-
+ if (!result)
+ /* And address */
result = pci_write_config_dword(pdev, LNC_MCR_OFFSET, message);
- dev_dbg(&pdev->dev, "Result from send ctl register is %x\n",
- result);
- }
-
mutex_unlock(&lnc_reg_mutex);
-
return result;
}
/*
-* Initialize RAR parameters, such as bus addresses, etc.
-*/
-static int init_rar_params(struct pci_dev *pdev)
+ * rar_init_params - Initialize RAR parameters
+ * @rar: RAR device to initialise
+ *
+ * Initialize RAR parameters, such as bus addresses, etc. Returns 0
+ * on success, or an error code on failure.
+ */
+static int init_rar_params(struct rar_device *rar)
{
+ struct pci_dev *pdev = rar->rar_dev;
unsigned int i;
int result = 0;
+ int offset = 0x10; /* RAR 0 to 2 in order low/high/low/high/... */
/* Retrieve RAR start and end bus addresses.
* Access the RAR registers through the Lincroft Message Bus
@@ -300,15 +344,16 @@ static int init_rar_params(struct pci_dev *pdev)
*/
for (i = 0; i < MRST_NUM_RAR; ++i) {
- struct RAR_offsets const *offset =
- &my_rar_device.rar_offsets[i];
- struct RAR_address_range *addr = &my_rar_device.rar_addr[i];
-
- if ((retrieve_rar_addr(pdev, offset->low, &addr->low) != 0)
- || (retrieve_rar_addr(pdev, offset->high, &addr->high) != 0)) {
- result = -1;
- break;
- }
+ struct rar_addr *addr = &rar->rar_addr[i];
+
+ result = rar_read_addr(pdev, offset++, &addr->low);
+ if (result != 0)
+ return result;
+
+ result = rar_read_addr(pdev, offset++, &addr->high);
+ if (result != 0)
+ return result;
+
/*
* Only the upper 22 bits of the RAR addresses are
@@ -336,201 +381,237 @@ static int init_rar_params(struct pci_dev *pdev)
/* Done accessing the device. */
if (result == 0) {
- int z;
- for (z = 0; z != MRST_NUM_RAR; ++z) {
+ for (i = 0; i != MRST_NUM_RAR; ++i) {
/*
* "BRAR" refers to the RAR registers in the
* Lincroft B-unit.
*/
dev_info(&pdev->dev, "BRAR[%u] bus address range = "
- "[%lx, %lx]\n", z,
- (unsigned long)my_rar_device.rar_addr[z].low,
- (unsigned long)my_rar_device.rar_addr[z].high);
+ "[%lx, %lx]\n", i,
+ (unsigned long)rar->rar_addr[i].low,
+ (unsigned long)rar->rar_addr[i].high);
}
}
-
return result;
}
-/*
- * The rar_get_address function is used by other device drivers
- * to obtain RAR address information on a RAR. It takes three
- * parameters:
+/**
+ * rar_get_address - get the bus address in a RAR
+ * @start: return value of start address of block
+ * @end: return value of end address of block
*
- * int rar_index
- * The rar_index is an index to the rar for which you wish to retrieve
- * the address information.
- * Values can be 0,1, or 2.
+ * The rar_get_address function is used by other device drivers
+ * to obtain RAR address information on a RAR. It takes three
+ * parameters:
*
- * The function returns a 0 upon success or a -1 if there is no RAR
- * facility on this system.
+ * The function returns a 0 upon success or an error if there is no RAR
+ * facility on this system.
*/
-int rar_get_address(int rar_index,
- dma_addr_t *start_address,
- dma_addr_t *end_address)
+int rar_get_address(int rar_index, dma_addr_t *start, dma_addr_t *end)
{
- int result = -ENODEV;
-
- if (my_rar_device.registered) {
- if (start_address == 0 || end_address == 0
- || rar_index >= MRST_NUM_RAR || rar_index < 0) {
- result = -EINVAL;
- } else {
- *start_address =
- my_rar_device.rar_addr[rar_index].low;
- *end_address =
- my_rar_device.rar_addr[rar_index].high;
-
- result = 0;
- }
+ int idx;
+ struct rar_device *rar = rar_to_device(rar_index, &idx);
+
+ if (rar == NULL) {
+ WARN_ON(1);
+ return -ENODEV;
}
- return result;
+ *start = rar->rar_addr[idx].low;
+ *end = rar->rar_addr[idx].high;
+ return 0;
}
EXPORT_SYMBOL(rar_get_address);
-/*
- * The rar_lock function is ued by other device drivers to lock an RAR.
- * once an RAR is locked, it stays locked until the next system reboot.
- * The function takes one parameter:
+/**
+ * rar_lock - lock a RAR register
+ * @rar_index: RAR to lock (0-2)
*
- * int rar_index
- * The rar_index is an index to the rar that you want to lock.
- * Values can be 0,1, or 2.
+ * The rar_lock function is ued by other device drivers to lock an RAR.
+ * once a RAR is locked, it stays locked until the next system reboot.
*
- * The function returns a 0 upon success or a -1 if there is no RAR
- * facility on this system.
+ * The function returns a 0 upon success or an error if there is no RAR
+ * facility on this system, or the locking fails
*/
int rar_lock(int rar_index)
{
- int result = -ENODEV;
-
- if (rar_index >= MRST_NUM_RAR || rar_index < 0) {
- result = -EINVAL;
- return result;
- }
-
- dev_dbg(&my_rar_device.rar_dev->dev, "rar_lock mutex locking\n");
- mutex_lock(&rar_mutex);
+ struct rar_device *rar;
+ int result;
+ int idx;
+ dma_addr_t low, high;
- if (my_rar_device.registered) {
+ rar = rar_to_device(rar_index, &idx);
- dma_addr_t low = my_rar_device.rar_addr[rar_index].low &
- 0xfffffc00u;
+ if (rar == NULL) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
- dma_addr_t high = my_rar_device.rar_addr[rar_index].high &
- 0xfffffc00u;
+ low = rar->rar_addr[idx].low & 0xfffffc00u;
+ high = rar->rar_addr[idx].high & 0xfffffc00u;
- /*
- * Only allow I/O from the graphics and Langwell;
- * Not from the x96 processor
- */
- if (rar_index == (int)RAR_TYPE_VIDEO) {
- low |= 0x00000009;
- high |= 0x00000015;
- }
+ /*
+ * Only allow I/O from the graphics and Langwell;
+ * not from the x86 processor
+ */
- else if (rar_index == (int)RAR_TYPE_AUDIO) {
- /* Only allow I/O from Langwell; nothing from x86 */
- low |= 0x00000008;
- high |= 0x00000018;
- }
+ if (rar_index == RAR_TYPE_VIDEO) {
+ low |= 0x00000009;
+ high |= 0x00000015;
+ } else if (rar_index == RAR_TYPE_AUDIO) {
+ /* Only allow I/O from Langwell; nothing from x86 */
+ low |= 0x00000008;
+ high |= 0x00000018;
+ } else
+ /* Read-only from all agents */
+ high |= 0x00000018;
- else
- /* Read-only from all agents */
- high |= 0x00000018;
+ /*
+ * Now program the register using the Lincroft message
+ * bus interface.
+ */
+ result = rar_set_addr(rar->rar_dev,
+ 2 * idx, low);
- /*
- * Now program the register using the Lincroft message
- * bus interface.
- */
- result = set_rar_address(my_rar_device.rar_dev,
- my_rar_device.rar_offsets[rar_index].low,
- low);
-
- if (result == 0)
- result = set_rar_address(
- my_rar_device.rar_dev,
- my_rar_device.rar_offsets[rar_index].high,
- high);
- }
+ if (result == 0)
+ result = rar_set_addr(rar->rar_dev,
+ 2 * idx + 1, high);
- dev_dbg(&my_rar_device.rar_dev->dev, "rar_lock mutex unlocking\n");
- mutex_unlock(&rar_mutex);
return result;
}
EXPORT_SYMBOL(rar_lock);
-/* The register_rar function is to used by other device drivers
- * to ensure that this driver is ready. As we cannot be sure of
- * the compile/execute order of dirvers in ther kernel, it is
- * best to give this driver a callback function to call when
- * it is ready to give out addresses. The callback function
- * would have those steps that continue the initialization of
- * a driver that do require a valid RAR address. One of those
- * steps would be to call rar_get_address()
- * This function return 0 on success an -1 on failure.
-*/
-int register_rar(int (*callback)(void *yourparameter), void *yourparameter)
+/**
+ * register_rar - register a RAR handler
+ * @num: RAR we wish to register for
+ * @callback: function to call when RAR support is available
+ * @data: data to pass to this function
+ *
+ * The register_rar function is to used by other device drivers
+ * to ensure that this driver is ready. As we cannot be sure of
+ * the compile/execute order of drivers in ther kernel, it is
+ * best to give this driver a callback function to call when
+ * it is ready to give out addresses. The callback function
+ * would have those steps that continue the initialization of
+ * a driver that do require a valid RAR address. One of those
+ * steps would be to call rar_get_address()
+ *
+ * This function return 0 on success an error code on failure.
+ */
+int register_rar(int num, int (*callback)(unsigned long data),
+ unsigned long data)
{
-
- int result = -ENODEV;
-
- if (callback == NULL)
- return -EINVAL;
+ /* For now we hardcode a single RAR device */
+ struct rar_device *rar;
+ struct client *c;
+ int idx;
+ int retval = 0;
mutex_lock(&rar_mutex);
- if (my_rar_device.registered) {
+ /* Do we have a client mapping for this RAR number ? */
+ c = rar_to_client(num);
+ if (c == NULL) {
+ retval = -ERANGE;
+ goto done;
+ }
+ /* Is it claimed ? */
+ if (c->busy) {
+ retval = -EBUSY;
+ goto done;
+ }
+ c->busy = 1;
+
+ /* See if we have a handler for this RAR yet, if we do then fire it */
+ rar = rar_to_device(num, &idx);
- mutex_unlock(&rar_mutex);
+ if (rar) {
/*
* if the driver already registered, then we can simply
* call the callback right now
*/
-
- return (*callback)(yourparameter);
- }
-
- if (num_clients < MRST_NUM_RAR) {
-
- clients[num_clients].client_callback = callback;
- clients[num_clients].customer_data = yourparameter;
- num_clients += 1;
- result = 0;
+ (*callback)(data);
+ goto done;
}
+ /* Arrange to be called back when the hardware is found */
+ c->callback = callback;
+ c->driver_priv = data;
+done:
mutex_unlock(&rar_mutex);
- return result;
-
+ return retval;
}
EXPORT_SYMBOL(register_rar);
-/* Suspend - returns -ENOSYS */
-static int rar_suspend(struct pci_dev *dev, pm_message_t state)
+/**
+ * unregister_rar - release a RAR allocation
+ * @num: RAR number
+ *
+ * Releases a RAR allocation, or pending allocation. If a callback is
+ * pending then this function will either complete before the unregister
+ * returns or not at all.
+ */
+
+void unregister_rar(int num)
{
- return -ENOSYS;
+ struct client *c;
+
+ mutex_lock(&rar_mutex);
+ c = rar_to_client(num);
+ if (c == NULL || !c->busy)
+ WARN_ON(1);
+ else
+ c->busy = 0;
+ mutex_unlock(&rar_mutex);
}
+EXPORT_SYMBOL(unregister_rar);
-static int rar_resume(struct pci_dev *dev)
+/**
+ * rar_callback - Process callbacks
+ * @rar: new RAR device
+ *
+ * Process the callbacks for a newly found RAR device.
+ */
+
+static void rar_callback(struct rar_device *rar)
{
- return -ENOSYS;
+ struct client *c = &rar->client[0];
+ int i;
+
+ mutex_lock(&rar_mutex);
+
+ rar->registered = 1; /* Ensure no more callbacks queue */
+
+ for (i = 0; i < MRST_NUM_RAR; i++) {
+ if (c->callback && c->busy) {
+ c->callback(c->driver_priv);
+ c->callback = NULL;
+ }
+ c++;
+ }
+ mutex_unlock(&rar_mutex);
}
-/*
- * This function registers the driver with the device subsystem (
- * either PCI, USB, etc).
- * Function that is activaed on the succesful probe of the RAR device
- * (Moorestown host controller).
+/**
+ * rar_probe - PCI probe callback
+ * @dev: PCI device
+ * @id: matching entry in the match table
+ *
+ * A RAR device has been discovered. Initialise it and if successful
+ * process any pending callbacks that can now be completed.
*/
static int rar_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int error;
- int counter;
+ struct rar_device *rar;
dev_dbg(&dev->dev, "PCI probe starting\n");
- /* enable the device */
+ rar = alloc_rar_device();
+ if (rar == NULL)
+ return -EBUSY;
+
+ /* Enable the device */
error = pci_enable_device(dev);
if (error) {
dev_err(&dev->dev,
@@ -538,50 +619,30 @@ static int rar_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto end_function;
}
- /* we have only one device; fill in the rar_device structure */
- my_rar_device.rar_dev = dev;
+ /* Fill in the rar_device structure */
+ rar->rar_dev = pci_dev_get(dev);
+ pci_set_drvdata(dev, rar);
/*
- * Initialize the RAR parameters, which have to be retrieved
- * via the message bus interface.
- */
- error = init_rar_params(dev);
+ * Initialize the RAR parameters, which have to be retrieved
+ * via the message bus interface.
+ */
+ error = init_rar_params(rar);
if (error) {
pci_disable_device(dev);
-
- dev_err(&dev->dev,
- "Error retrieving RAR addresses\n");
-
+ dev_err(&dev->dev, "Error retrieving RAR addresses\n");
goto end_function;
}
-
- dev_dbg(&dev->dev, "PCI probe locking\n");
- mutex_lock(&rar_mutex);
- my_rar_device.registered = 1;
-
/* now call anyone who has registered (using callbacks) */
- for (counter = 0; counter < num_clients; counter += 1) {
- if (clients[counter].client_callback) {
- error = (*clients[counter].client_callback)(
- clients[counter].customer_data);
- /* set callback to NULL to indicate it has been done */
- clients[counter].client_callback = NULL;
- dev_dbg(&my_rar_device.rar_dev->dev,
- "Callback called for %d\n",
- counter);
- }
- }
-
- dev_dbg(&dev->dev, "PCI probe unlocking\n");
- mutex_unlock(&rar_mutex);
-
+ rar_callback(rar);
+ return 0;
end_function:
-
+ free_rar_device(rar);
return error;
}
const struct pci_device_id rar_pci_id_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_RAR_DEVICE_ID) },
+ { PCI_VDEVICE(INTEL, 0x4110) },
{ 0 }
};
@@ -594,8 +655,7 @@ static struct pci_driver rar_pci_driver = {
.name = "rar_register_driver",
.id_table = rar_pci_id_tbl,
.probe = rar_probe,
- .suspend = rar_suspend,
- .resume = rar_resume
+ /* Cannot be unplugged - no remove */
};
static int __init rar_init_handler(void)
diff --git a/drivers/staging/rar_register/rar_register.h b/drivers/staging/rar_register/rar_register.h
index 29ade0f361d..ffa805780f8 100644
--- a/drivers/staging/rar_register/rar_register.h
+++ b/drivers/staging/rar_register/rar_register.h
@@ -21,63 +21,23 @@
#ifndef _RAR_REGISTER_H
#define _RAR_REGISTER_H
-# include <linux/types.h>
+#include <linux/types.h>
/* following are used both in drivers as well as user space apps */
-enum RAR_type {
- RAR_TYPE_VIDEO = 0,
- RAR_TYPE_AUDIO,
- RAR_TYPE_IMAGE,
- RAR_TYPE_DATA
-};
-#ifdef __KERNEL__
+#define RAR_TYPE_VIDEO 0
+#define RAR_TYPE_AUDIO 1
+#define RAR_TYPE_IMAGE 2
+#define RAR_TYPE_DATA 3
-/* PCI device id for controller */
-#define PCI_RAR_DEVICE_ID 0x4110
+#ifdef __KERNEL__
-/* The register_rar function is to used by other device drivers
- * to ensure that this driver is ready. As we cannot be sure of
- * the compile/execute order of dirvers in ther kernel, it is
- * best to give this driver a callback function to call when
- * it is ready to give out addresses. The callback function
- * would have those steps that continue the initialization of
- * a driver that do require a valid RAR address. One of those
- * steps would be to call get_rar_address()
- * This function return 0 on success an -1 on failure.
- */
-int register_rar(int (*callback)(void *yourparameter), void *yourparameter);
+struct rar_device;
-/* The get_rar_address function is used by other device drivers
- * to obtain RAR address information on a RAR. It takes two
- * parameter:
- *
- * int rar_index
- * The rar_index is an index to the rar for which you wish to retrieve
- * the address information.
- * Values can be 0,1, or 2.
- *
- * struct RAR_address_struct is a pointer to a place to which the function
- * can return the address structure for the RAR.
- *
- * The function returns a 0 upon success or a -1 if there is no RAR
- * facility on this system.
- */
-int rar_get_address(int rar_index,
- dma_addr_t *start_address,
- dma_addr_t *end_address);
-
-/* The lock_rar function is ued by other device drivers to lock an RAR.
- * once an RAR is locked, it stays locked until the next system reboot.
- * The function takes one parameter:
- *
- * int rar_index
- * The rar_index is an index to the rar that you want to lock.
- * Values can be 0,1, or 2.
- *
- * The function returns a 0 upon success or a -1 if there is no RAR
- * facility on this system.
- */
+int register_rar(int num,
+ int (*callback)(unsigned long data), unsigned long data);
+void unregister_rar(int num);
+int rar_get_address(int rar_index, dma_addr_t *start, dma_addr_t *end);
int rar_lock(int rar_index);
#endif /* __KERNEL__ */
diff --git a/drivers/staging/rt2860/chip/mac_pci.h b/drivers/staging/rt2860/chip/mac_pci.h
index bc704acaa3d..9f25ef047f5 100644
--- a/drivers/staging/rt2860/chip/mac_pci.h
+++ b/drivers/staging/rt2860/chip/mac_pci.h
@@ -147,13 +147,12 @@ typedef union _TX_ATTENUATION_CTRL_STRUC {
/* ----------------- Frimware Related MACRO ----------------- */
#define RTMP_WRITE_FIRMWARE(_pAd, _pFwImage, _FwLen) \
- do{ \
+ do { \
unsigned long _i, _firm; \
RTMP_IO_WRITE32(_pAd, PBF_SYS_CTRL, 0x10000); \
\
- for(_i=0; _i<_FwLen; _i+=4) \
- { \
- _firm = _pFwImage[_i] + \
+ for (_i = 0; _i < _FwLen; _i += 4) { \
+ _firm = _pFwImage[_i] + \
(_pFwImage[_i+3] << 24) + \
(_pFwImage[_i+2] << 16) + \
(_pFwImage[_i+1] << 8); \
@@ -165,19 +164,19 @@ typedef union _TX_ATTENUATION_CTRL_STRUC {
/* initialize BBP R/W access agent */ \
RTMP_IO_WRITE32(_pAd, H2M_BBP_AGENT, 0); \
RTMP_IO_WRITE32(_pAd, H2M_MAILBOX_CSR, 0); \
- }while(0)
+ } while (0)
/* ----------------- TX Related MACRO ----------------- */
-#define RTMP_START_DEQUEUE(pAd, QueIdx, irqFlags) do{}while(0)
-#define RTMP_STOP_DEQUEUE(pAd, QueIdx, irqFlags) do{}while(0)
+#define RTMP_START_DEQUEUE(pAd, QueIdx, irqFlags) do {} while (0)
+#define RTMP_STOP_DEQUEUE(pAd, QueIdx, irqFlags) do {} while (0)
#define RTMP_HAS_ENOUGH_FREE_DESC(pAd, pTxBlk, freeNum, pPacket) \
((freeNum) >= (unsigned long)(pTxBlk->TotalFragNum + RTMP_GET_PACKET_FRAGMENTS(pPacket) + 3)) /* rough estimate we will use 3 more descriptor. */
-#define RTMP_RELEASE_DESC_RESOURCE(pAd, QueIdx) \
- do{}while(0)
+#define RTMP_RELEASE_DESC_RESOURCE(pAd, QueIdx) do {} while (0)
#define NEED_QUEUE_BACK_FOR_AGG(pAd, QueIdx, freeNum, _TxFrameType) \
- (((freeNum != (TX_RING_SIZE-1)) && (pAd->TxSwQueue[QueIdx].Number == 0)) || (freeNum<3))
+ (((freeNum != (TX_RING_SIZE-1)) && \
+ (pAd->TxSwQueue[QueIdx].Number == 0)) || (freeNum < 3))
#define HAL_KickOutMgmtTx(_pAd, _QueIdx, _pPacket, _pSrcBufVA, _SrcBufLen) \
RtmpPCIMgmtKickOut(_pAd, _QueIdx, _pPacket, _pSrcBufVA, _SrcBufLen)
@@ -185,19 +184,19 @@ typedef union _TX_ATTENUATION_CTRL_STRUC {
#define HAL_WriteSubTxResource(pAd, pTxBlk, bIsLast, pFreeNumber) \
/* RtmpPCI_WriteSubTxResource(pAd, pTxBlk, bIsLast, pFreeNumber) */
-#define HAL_WriteTxResource(pAd, pTxBlk,bIsLast, pFreeNumber) \
+#define HAL_WriteTxResource(pAd, pTxBlk, bIsLast, pFreeNumber) \
RtmpPCI_WriteSingleTxResource(pAd, pTxBlk, bIsLast, pFreeNumber)
#define HAL_WriteFragTxResource(pAd, pTxBlk, fragNum, pFreeNumber) \
RtmpPCI_WriteFragTxResource(pAd, pTxBlk, fragNum, pFreeNumber)
-#define HAL_WriteMultiTxResource(pAd, pTxBlk,frameNum, pFreeNumber) \
+#define HAL_WriteMultiTxResource(pAd, pTxBlk, frameNum, pFreeNumber) \
RtmpPCI_WriteMultiTxResource(pAd, pTxBlk, frameNum, pFreeNumber)
#define HAL_FinalWriteTxResource(_pAd, _pTxBlk, _TotalMPDUSize, _FirstTxIdx) \
RtmpPCI_FinalWriteTxResource(_pAd, _pTxBlk, _TotalMPDUSize, _FirstTxIdx)
-#define HAL_LastTxIdx(_pAd, _QueIdx,_LastTxIdx) \
+#define HAL_LastTxIdx(_pAd, _QueIdx, _LastTxIdx) \
/*RtmpPCIDataLastTxIdx(_pAd, _QueIdx,_LastTxIdx) */
#define HAL_KickOutTx(_pAd, _pTxBlk, _QueIdx) \
@@ -259,24 +258,24 @@ typedef union _TX_ATTENUATION_CTRL_STRUC {
/* Insert the BA bitmap to ASIC for the Wcid entry */
#define RTMP_ADD_BA_SESSION_TO_ASIC(_pAd, _Aid, _TID) \
- do{ \
+ do { \
u32 _Value = 0, _Offset; \
_Offset = MAC_WCID_BASE + (_Aid) * HW_WCID_ENTRY_SIZE + 4; \
RTMP_IO_READ32((_pAd), _Offset, &_Value);\
_Value |= (0x10000<<(_TID)); \
RTMP_IO_WRITE32((_pAd), _Offset, _Value);\
- }while(0)
+ } while (0)
/* Remove the BA bitmap from ASIC for the Wcid entry */
/* bitmap field starts at 0x10000 in ASIC WCID table */
#define RTMP_DEL_BA_SESSION_FROM_ASIC(_pAd, _Wcid, _TID) \
- do{ \
+ do { \
u32 _Value = 0, _Offset; \
_Offset = MAC_WCID_BASE + (_Wcid) * HW_WCID_ENTRY_SIZE + 4; \
RTMP_IO_READ32((_pAd), _Offset, &_Value); \
_Value &= (~(0x10000 << (_TID))); \
RTMP_IO_WRITE32((_pAd), _Offset, _Value); \
- }while(0)
+ } while (0)
/* ----------------- Interface Related MACRO ----------------- */
@@ -285,16 +284,16 @@ typedef union _TX_ATTENUATION_CTRL_STRUC {
/* Since it use ADAPTER structure, it have to be put after structure definition. */
/* */
#define RTMP_ASIC_INTERRUPT_DISABLE(_pAd) \
- do{ \
+ do { \
RTMP_IO_WRITE32((_pAd), INT_MASK_CSR, 0x0); /* 0: disable */ \
RTMP_CLEAR_FLAG((_pAd), fRTMP_ADAPTER_INTERRUPT_ACTIVE); \
- }while(0)
+ } while (0)
#define RTMP_ASIC_INTERRUPT_ENABLE(_pAd)\
- do{ \
+ do { \
RTMP_IO_WRITE32((_pAd), INT_MASK_CSR, (_pAd)->int_enable_reg /*DELAYINTMASK*/); /* 1:enable */ \
RTMP_SET_FLAG((_pAd), fRTMP_ADAPTER_INTERRUPT_ACTIVE); \
- }while(0)
+ } while (0)
#define RTMP_IRQ_INIT(pAd) \
{ pAd->int_enable_reg = ((DELAYINTMASK) | \
diff --git a/drivers/staging/rt2860/chip/mac_usb.h b/drivers/staging/rt2860/chip/mac_usb.h
index 0b67c0b1de0..ed0c0b43b05 100644
--- a/drivers/staging/rt2860/chip/mac_usb.h
+++ b/drivers/staging/rt2860/chip/mac_usb.h
@@ -25,7 +25,7 @@
*************************************************************************
Module Name:
- mac_usb.h
+ mac_usb.h
Abstract:
@@ -46,7 +46,7 @@
#define USB_CYC_CFG 0x02a4
#define BEACON_RING_SIZE 2
-#define MGMTPIPEIDX 0 /* EP6 is highest priority */
+#define MGMTPIPEIDX 0 /* EP6 is highest priority */
#define RTMP_PKT_TAIL_PADDING 11 /* 3(max 4 byte padding) + 4 (last packet padding) + 4 (MaxBulkOutsize align padding) */
@@ -220,53 +220,51 @@ struct rt_rx_context {
******************************************************************************/
#define RTMP_START_DEQUEUE(pAd, QueIdx, irqFlags) \
- do{ \
+ do { \
RTMP_IRQ_LOCK(&pAd->DeQueueLock[QueIdx], irqFlags); \
- if (pAd->DeQueueRunning[QueIdx]) \
- { \
- RTMP_IRQ_UNLOCK(&pAd->DeQueueLock[QueIdx], irqFlags);\
+ if (pAd->DeQueueRunning[QueIdx]) { \
+ RTMP_IRQ_UNLOCK(&pAd->DeQueueLock[QueIdx], irqFlags); \
DBGPRINT(RT_DEBUG_OFF, ("DeQueueRunning[%d]= TRUE!\n", QueIdx)); \
continue; \
- } \
- else \
- { \
+ } else { \
pAd->DeQueueRunning[QueIdx] = TRUE; \
RTMP_IRQ_UNLOCK(&pAd->DeQueueLock[QueIdx], irqFlags);\
} \
- }while(0)
+ } while (0)
#define RTMP_STOP_DEQUEUE(pAd, QueIdx, irqFlags) \
- do{ \
+ do { \
RTMP_IRQ_LOCK(&pAd->DeQueueLock[QueIdx], irqFlags); \
pAd->DeQueueRunning[QueIdx] = FALSE; \
RTMP_IRQ_UNLOCK(&pAd->DeQueueLock[QueIdx], irqFlags); \
- }while(0)
+ } while (0)
#define RTMP_HAS_ENOUGH_FREE_DESC(pAd, pTxBlk, freeNum, pPacket) \
(RTUSBFreeDescriptorRequest(pAd, pTxBlk->QueIdx, (pTxBlk->TotalFrameLen + GET_OS_PKT_LEN(pPacket))) == NDIS_STATUS_SUCCESS)
#define RTMP_RELEASE_DESC_RESOURCE(pAd, QueIdx) \
- do{}while(0)
+ do {} while (0)
#define NEED_QUEUE_BACK_FOR_AGG(_pAd, _QueIdx, _freeNum, _TxFrameType) \
- ((_TxFrameType == TX_RALINK_FRAME) && (RTUSBNeedQueueBackForAgg(_pAd, _QueIdx)))
+ ((_TxFrameType == TX_RALINK_FRAME) && \
+ (RTUSBNeedQueueBackForAgg(_pAd, _QueIdx)))
#define HAL_WriteSubTxResource(pAd, pTxBlk, bIsLast, pFreeNumber) \
- RtmpUSB_WriteSubTxResource(pAd, pTxBlk, bIsLast, pFreeNumber)
+ RtmpUSB_WriteSubTxResource(pAd, pTxBlk, bIsLast, pFreeNumber)
-#define HAL_WriteTxResource(pAd, pTxBlk,bIsLast, pFreeNumber) \
- RtmpUSB_WriteSingleTxResource(pAd, pTxBlk,bIsLast, pFreeNumber)
+#define HAL_WriteTxResource(pAd, pTxBlk, bIsLast, pFreeNumber) \
+ RtmpUSB_WriteSingleTxResource(pAd, pTxBlk, bIsLast, pFreeNumber)
#define HAL_WriteFragTxResource(pAd, pTxBlk, fragNum, pFreeNumber) \
- RtmpUSB_WriteFragTxResource(pAd, pTxBlk, fragNum, pFreeNumber)
+ RtmpUSB_WriteFragTxResource(pAd, pTxBlk, fragNum, pFreeNumber)
-#define HAL_WriteMultiTxResource(pAd, pTxBlk,frameNum, pFreeNumber) \
- RtmpUSB_WriteMultiTxResource(pAd, pTxBlk,frameNum, pFreeNumber)
+#define HAL_WriteMultiTxResource(pAd, pTxBlk, frameNum, pFreeNumber) \
+ RtmpUSB_WriteMultiTxResource(pAd, pTxBlk, frameNum, pFreeNumber)
#define HAL_FinalWriteTxResource(pAd, pTxBlk, totalMPDUSize, TxIdx) \
- RtmpUSB_FinalWriteTxResource(pAd, pTxBlk, totalMPDUSize, TxIdx)
+ RtmpUSB_FinalWriteTxResource(pAd, pTxBlk, totalMPDUSize, TxIdx)
-#define HAL_LastTxIdx(pAd, QueIdx,TxIdx) \
+#define HAL_LastTxIdx(pAd, QueIdx, TxIdx) \
/*RtmpUSBDataLastTxIdx(pAd, QueIdx,TxIdx) */
#define HAL_KickOutTx(pAd, pTxBlk, QueIdx) \
@@ -286,8 +284,8 @@ struct rt_rx_context {
/*
* Device Hardware Interface Related MACRO
*/
-#define RTMP_IRQ_INIT(pAd) do{}while(0)
-#define RTMP_IRQ_ENABLE(pAd) do{}while(0)
+#define RTMP_IRQ_INIT(pAd) do {} while (0)
+#define RTMP_IRQ_ENABLE(pAd) do {} while (0)
/*
* MLME Related MACRO
@@ -305,8 +303,8 @@ struct rt_rx_context {
RTUSBMlmeUp(pAd); }
#define RTMP_MLME_RESET_STATE_MACHINE(pAd) \
- MlmeEnqueue(pAd, MLME_CNTL_STATE_MACHINE, MT2_RESET_CONF, 0, NULL); \
- RTUSBMlmeUp(pAd);
+ { MlmeEnqueue(pAd, MLME_CNTL_STATE_MACHINE, MT2_RESET_CONF, 0, NULL); \
+ RTUSBMlmeUp(pAd); }
#define RTMP_HANDLE_COUNTER_MEASURE(_pAd, _pEntry) \
{ RTUSBEnqueueInternalCmd(_pAd, CMDTHREAD_802_11_COUNTER_MEASURE, _pEntry, sizeof(struct rt_mac_table_entry)); \
@@ -330,12 +328,11 @@ struct rt_rx_context {
{\
if ((_pAd)->StaCfg.WindowsPowerMode == Ndis802_11PowerModeFast_PSP) \
MlmeSetPsmBit(_pAd, _val);\
- else \
- { \
+ else { \
u16 _psm_val; \
_psm_val = _val; \
RTUSBEnqueueInternalCmd(_pAd, CMDTHREAD_SET_PSM_BIT, &(_psm_val), sizeof(u16)); \
- }\
+ } \
}
#define RTMP_MLME_RADIO_ON(pAd) \
diff --git a/drivers/staging/rt2860/chip/rtmp_mac.h b/drivers/staging/rt2860/chip/rtmp_mac.h
index f6a72581d3b..e8f7172ce42 100644
--- a/drivers/staging/rt2860/chip/rtmp_mac.h
+++ b/drivers/staging/rt2860/chip/rtmp_mac.h
@@ -154,7 +154,7 @@ typedef union _INT_SOURCE_CSR_STRUC {
u32 GPTimer:1;
u32 RxCoherent:1; /*bit16 */
u32 TxCoherent:1;
- u32 : 14;
+ u32: 14;
} field;
u32 word;
} INT_SOURCE_CSR_STRUC, *PINT_SOURCE_CSR_STRUC;
@@ -175,7 +175,7 @@ typedef union _INT_MASK_CSR_STRUC {
u32 HccaDmaDone:1;
u32 MgmtDmaDone:1;
u32 MCUCommandINT:1;
- u32 : 20;
+ u32: 20;
u32 RxCoherent:1;
u32 TxCoherent:1;
} field;
@@ -209,7 +209,7 @@ typedef union _WPDMA_RST_IDX_STRUC {
u32 RST_DTX_IDX5:1;
u32 rsv:10;
u32 RST_DRX_IDX0:1;
- u32 : 15;
+ u32: 15;
} field;
u32 word;
} WPDMA_RST_IDX_STRUC, *PWPDMA_RST_IDX_STRUC;
@@ -448,7 +448,7 @@ typedef union _BBP_CSR_CFG_STRUC {
u32 Busy:1; /* 1: ASIC is busy execute BBP programming. */
u32 BBP_PAR_DUR:1; /* 0: 4 MAC clock cycles 1: 8 MAC clock cycles */
u32 BBP_RW_MODE:1; /* 0: use serial mode 1:parallel */
- u32 : 12;
+ u32: 12;
} field;
u32 word;
} BBP_CSR_CFG_STRUC, *PBBP_CSR_CFG_STRUC;
@@ -494,7 +494,7 @@ typedef union _LED_CFG_STRUC {
u32 GLedMode:2; /* green Led Mode */
u32 YLedMode:2; /* yellow Led Mode */
u32 LedPolar:1; /* Led Polarity. 0: active low1: active high */
- u32 : 1;
+ u32: 1;
} field;
u32 word;
} LED_CFG_STRUC, *PLED_CFG_STRUC;
@@ -533,7 +533,7 @@ typedef union _BCN_TIME_CFG_STRUC {
u32 TsfSyncMode:2; /* Enable TSF sync, 00: disable, 01: infra mode, 10: ad-hoc mode */
u32 bTBTTEnable:1;
u32 bBeaconGen:1; /* Enable beacon generator */
- u32 : 3;
+ u32: 3;
u32 TxTimestampCompensate:8;
} field;
u32 word;
@@ -560,7 +560,7 @@ typedef union _AUTO_WAKEUP_STRUC {
u32 AutoLeadTime:8;
u32 NumofSleepingTbtt:7; /* ForceWake has high privilege than PutToSleep when both set */
u32 EnableAutoWakeup:1; /* 0:sleep, 1:awake */
- u32 : 16;
+ u32: 16;
} field;
u32 word;
} AUTO_WAKEUP_STRUC, *PAUTO_WAKEUP_STRUC;
@@ -578,7 +578,7 @@ typedef union _EDCA_AC_CFG_STRUC {
u32 Aifsn:4; /* # of slot time */
u32 Cwmin:4; /* */
u32 Cwmax:4; /*unit power of 2 */
- u32 : 12; /* */
+ u32: 12; /* */
} field;
u32 word;
} EDCA_AC_CFG_STRUC, *PEDCA_AC_CFG_STRUC;
@@ -751,7 +751,7 @@ typedef union _AUTO_RSP_CFG_STRUC {
u32 rsv:1; /* Power bit value in conrtrol frame */
u32 DualCTSEn:1; /* Power bit value in conrtrol frame */
u32 AckCtsPsmBit:1; /* Power bit value in conrtrol frame */
- u32 : 24;
+ u32: 24;
} field;
u32 word;
} AUTO_RSP_CFG_STRUC, *PAUTO_RSP_CFG_STRUC;
@@ -981,21 +981,21 @@ typedef union _MPDU_DEN_CNT_STRUC {
typedef union _SHAREDKEY_MODE_STRUC {
struct {
u32 Bss0Key0CipherAlg:3;
- u32 : 1;
+ u32: 1;
u32 Bss0Key1CipherAlg:3;
- u32 : 1;
+ u32: 1;
u32 Bss0Key2CipherAlg:3;
- u32 : 1;
+ u32: 1;
u32 Bss0Key3CipherAlg:3;
- u32 : 1;
+ u32: 1;
u32 Bss1Key0CipherAlg:3;
- u32 : 1;
+ u32: 1;
u32 Bss1Key1CipherAlg:3;
- u32 : 1;
+ u32: 1;
u32 Bss1Key2CipherAlg:3;
- u32 : 1;
+ u32: 1;
u32 Bss1Key3CipherAlg:3;
- u32 : 1;
+ u32: 1;
} field;
u32 word;
} SHAREDKEY_MODE_STRUC, *PSHAREDKEY_MODE_STRUC;
@@ -1103,7 +1103,7 @@ typedef union _RX_FILTR_CFG_STRUC {
u32 DropBAR:1; /* */
u32 DropRsvCntlType:1;
- u32 : 15;
+ u32: 15;
} field;
u32 word;
} RX_FILTR_CFG_STRUC, *PRX_FILTR_CFG_STRUC;
@@ -1128,21 +1128,21 @@ typedef union _PHY_CSR4_STRUC {
typedef union _SEC_CSR5_STRUC {
struct {
u32 Bss2Key0CipherAlg:3;
- u32 : 1;
+ u32: 1;
u32 Bss2Key1CipherAlg:3;
- u32 : 1;
+ u32: 1;
u32 Bss2Key2CipherAlg:3;
- u32 : 1;
+ u32: 1;
u32 Bss2Key3CipherAlg:3;
- u32 : 1;
+ u32: 1;
u32 Bss3Key0CipherAlg:3;
- u32 : 1;
+ u32: 1;
u32 Bss3Key1CipherAlg:3;
- u32 : 1;
+ u32: 1;
u32 Bss3Key2CipherAlg:3;
- u32 : 1;
+ u32: 1;
u32 Bss3Key3CipherAlg:3;
- u32 : 1;
+ u32: 1;
} field;
u32 word;
} SEC_CSR5_STRUC, *PSEC_CSR5_STRUC;
diff --git a/drivers/staging/rt2860/chip/rtmp_phy.h b/drivers/staging/rt2860/chip/rtmp_phy.h
index 8b8b0f47f03..9f924ea6ca3 100644
--- a/drivers/staging/rt2860/chip/rtmp_phy.h
+++ b/drivers/staging/rt2860/chip/rtmp_phy.h
@@ -177,8 +177,7 @@
#ifdef RTMP_MAC_PCI
#define RTMP_RF_IO_WRITE32(_A, _V) \
{ \
- if ((_A)->bPCIclkOff == FALSE) \
- { \
+ if ((_A)->bPCIclkOff == FALSE) { \
PHY_CSR4_STRUC _value; \
unsigned long _busyCnt = 0; \
\
@@ -187,9 +186,8 @@
if (_value.field.Busy == IDLE) \
break; \
_busyCnt++; \
- }while (_busyCnt < MAX_BUSY_COUNT); \
- if(_busyCnt < MAX_BUSY_COUNT) \
- { \
+ } while (_busyCnt < MAX_BUSY_COUNT); \
+ if (_busyCnt < MAX_BUSY_COUNT) { \
RTMP_IO_WRITE32((_A), RF_CSR_CFG0, (_V)); \
} \
} \
@@ -218,52 +216,46 @@
_bViaMCU: if we need access the bbp via the MCU.
*/
#define RTMP_BBP_IO_READ8(_pAd, _bbpID, _pV, _bViaMCU) \
- do{ \
- BBP_CSR_CFG_STRUC BbpCsr; \
- int _busyCnt, _secCnt, _regID; \
- \
- _regID = ((_bViaMCU) == TRUE ? H2M_BBP_AGENT : BBP_CSR_CFG); \
- for (_busyCnt=0; _busyCnt<MAX_BUSY_COUNT; _busyCnt++) \
- { \
- RTMP_IO_READ32(_pAd, _regID, &BbpCsr.word); \
+ do { \
+ BBP_CSR_CFG_STRUC BbpCsr; \
+ int _busyCnt, _secCnt, _regID; \
+ \
+ _regID = ((_bViaMCU) == TRUE ? H2M_BBP_AGENT : BBP_CSR_CFG); \
+ for (_busyCnt = 0; _busyCnt < MAX_BUSY_COUNT; _busyCnt++) { \
+ RTMP_IO_READ32(_pAd, _regID, &BbpCsr.word); \
if (BbpCsr.field.Busy == BUSY) \
- continue; \
+ continue; \
BbpCsr.word = 0; \
BbpCsr.field.fRead = 1; \
- BbpCsr.field.BBP_RW_MODE = 1; \
+ BbpCsr.field.BBP_RW_MODE = 1; \
BbpCsr.field.Busy = 1; \
- BbpCsr.field.RegNum = _bbpID; \
+ BbpCsr.field.RegNum = _bbpID; \
RTMP_IO_WRITE32(_pAd, _regID, BbpCsr.word); \
- if ((_bViaMCU) == TRUE) \
- { \
- AsicSendCommandToMcu(_pAd, 0x80, 0xff, 0x0, 0x0); \
- RTMPusecDelay(1000); \
- } \
- for (_secCnt=0; _secCnt<MAX_BUSY_COUNT; _secCnt++) \
- { \
+ if ((_bViaMCU) == TRUE) { \
+ AsicSendCommandToMcu(_pAd, 0x80, 0xff, 0x0, 0x0); \
+ RTMPusecDelay(1000); \
+ } \
+ for (_secCnt = 0; _secCnt < MAX_BUSY_COUNT; _secCnt++) { \
RTMP_IO_READ32(_pAd, _regID, &BbpCsr.word); \
- if (BbpCsr.field.Busy == IDLE) \
- break; \
- } \
- if ((BbpCsr.field.Busy == IDLE) && \
- (BbpCsr.field.RegNum == _bbpID)) \
- { \
- *(_pV) = (u8)BbpCsr.field.Value; \
- break; \
- } \
- } \
- if (BbpCsr.field.Busy == BUSY) \
- { \
- DBGPRINT_ERR(("BBP(viaMCU=%d) read R%d fail\n", (_bViaMCU), _bbpID)); \
+ if (BbpCsr.field.Busy == IDLE) \
+ break; \
+ } \
+ if ((BbpCsr.field.Busy == IDLE) && \
+ (BbpCsr.field.RegNum == _bbpID)) { \
+ *(_pV) = (u8)BbpCsr.field.Value; \
+ break; \
+ } \
+ } \
+ if (BbpCsr.field.Busy == BUSY) { \
+ DBGPRINT_ERR(("BBP(viaMCU=%d) read R%d fail\n", (_bViaMCU), _bbpID)); \
*(_pV) = (_pAd)->BbpWriteLatch[_bbpID]; \
- if ((_bViaMCU) == TRUE) \
- { \
+ if ((_bViaMCU) == TRUE) { \
RTMP_IO_READ32(_pAd, _regID, &BbpCsr.word); \
BbpCsr.field.Busy = 0; \
RTMP_IO_WRITE32(_pAd, _regID, BbpCsr.word); \
} \
} \
- }while(0)
+ } while (0)
/*
This marco used for the BBP read operation which didn't need via MCU.
@@ -283,42 +275,35 @@
int i, k; \
BOOLEAN brc; \
BbpCsr.field.Busy = IDLE; \
- if ((IS_RT3090((_A)) || IS_RT3572((_A)) || IS_RT3390((_A))) && ((_A)->StaCfg.PSControl.field.rt30xxPowerMode == 3) \
+ if ((IS_RT3090((_A)) || IS_RT3572((_A)) || IS_RT3390((_A))) \
+ && ((_A)->StaCfg.PSControl.field.rt30xxPowerMode == 3) \
&& ((_A)->StaCfg.PSControl.field.EnableNewPS == TRUE) \
&& ((_A)->bPCIclkOff == FALSE) \
- && ((_A)->brt30xxBanMcuCmd == FALSE)) \
- { \
- for (i=0; i<MAX_BUSY_COUNT; i++) \
- { \
- RTMP_IO_READ32(_A, H2M_BBP_AGENT, &BbpCsr.word); \
- if (BbpCsr.field.Busy == BUSY) \
- { \
- continue; \
- } \
- BbpCsr.word = 0; \
- BbpCsr.field.fRead = 1; \
- BbpCsr.field.BBP_RW_MODE = 1; \
- BbpCsr.field.Busy = 1; \
- BbpCsr.field.RegNum = _I; \
- RTMP_IO_WRITE32(_A, H2M_BBP_AGENT, BbpCsr.word); \
- brc = AsicSendCommandToMcu(_A, 0x80, 0xff, 0x0, 0x0); \
- if (brc == TRUE) \
- { \
- for (k=0; k<MAX_BUSY_COUNT; k++) \
- { \
- RTMP_IO_READ32(_A, H2M_BBP_AGENT, &BbpCsr.word); \
- if (BbpCsr.field.Busy == IDLE) \
- break; \
- } \
- if ((BbpCsr.field.Busy == IDLE) && \
- (BbpCsr.field.RegNum == _I)) \
- { \
- *(_pV) = (u8)BbpCsr.field.Value; \
- break; \
- } \
- } \
- else \
- { \
+ && ((_A)->brt30xxBanMcuCmd == FALSE)) { \
+ for (i = 0; i < MAX_BUSY_COUNT; i++) { \
+ RTMP_IO_READ32(_A, H2M_BBP_AGENT, &BbpCsr.word); \
+ if (BbpCsr.field.Busy == BUSY) { \
+ continue; \
+ } \
+ BbpCsr.word = 0; \
+ BbpCsr.field.fRead = 1; \
+ BbpCsr.field.BBP_RW_MODE = 1; \
+ BbpCsr.field.Busy = 1; \
+ BbpCsr.field.RegNum = _I; \
+ RTMP_IO_WRITE32(_A, H2M_BBP_AGENT, BbpCsr.word); \
+ brc = AsicSendCommandToMcu(_A, 0x80, 0xff, 0x0, 0x0); \
+ if (brc == TRUE) { \
+ for (k = 0; k < MAX_BUSY_COUNT; k++) { \
+ RTMP_IO_READ32(_A, H2M_BBP_AGENT, &BbpCsr.word); \
+ if (BbpCsr.field.Busy == IDLE) \
+ break; \
+ } \
+ if ((BbpCsr.field.Busy == IDLE) && \
+ (BbpCsr.field.RegNum == _I)) { \
+ *(_pV) = (u8)BbpCsr.field.Value; \
+ break; \
+ } \
+ } else { \
BbpCsr.field.Busy = 0; \
RTMP_IO_WRITE32(_A, H2M_BBP_AGENT, BbpCsr.word); \
} \
@@ -326,46 +311,38 @@
} \
else if (!((IS_RT3090((_A)) || IS_RT3572((_A)) || IS_RT3390((_A))) && ((_A)->StaCfg.PSControl.field.rt30xxPowerMode == 3) \
&& ((_A)->StaCfg.PSControl.field.EnableNewPS == TRUE)) \
- && ((_A)->bPCIclkOff == FALSE)) \
- { \
- for (i=0; i<MAX_BUSY_COUNT; i++) \
- { \
- RTMP_IO_READ32(_A, H2M_BBP_AGENT, &BbpCsr.word); \
- if (BbpCsr.field.Busy == BUSY) \
- { \
- continue; \
- } \
- BbpCsr.word = 0; \
- BbpCsr.field.fRead = 1; \
- BbpCsr.field.BBP_RW_MODE = 1; \
- BbpCsr.field.Busy = 1; \
- BbpCsr.field.RegNum = _I; \
- RTMP_IO_WRITE32(_A, H2M_BBP_AGENT, BbpCsr.word); \
- AsicSendCommandToMcu(_A, 0x80, 0xff, 0x0, 0x0); \
- for (k=0; k<MAX_BUSY_COUNT; k++) \
- { \
- RTMP_IO_READ32(_A, H2M_BBP_AGENT, &BbpCsr.word); \
- if (BbpCsr.field.Busy == IDLE) \
- break; \
- } \
- if ((BbpCsr.field.Busy == IDLE) && \
- (BbpCsr.field.RegNum == _I)) \
- { \
- *(_pV) = (u8)BbpCsr.field.Value; \
- break; \
- } \
- } \
- } \
- else \
- { \
+ && ((_A)->bPCIclkOff == FALSE)) { \
+ for (i = 0; i < MAX_BUSY_COUNT; i++) { \
+ RTMP_IO_READ32(_A, H2M_BBP_AGENT, &BbpCsr.word); \
+ if (BbpCsr.field.Busy == BUSY) { \
+ continue; \
+ } \
+ BbpCsr.word = 0; \
+ BbpCsr.field.fRead = 1; \
+ BbpCsr.field.BBP_RW_MODE = 1; \
+ BbpCsr.field.Busy = 1; \
+ BbpCsr.field.RegNum = _I; \
+ RTMP_IO_WRITE32(_A, H2M_BBP_AGENT, BbpCsr.word); \
+ AsicSendCommandToMcu(_A, 0x80, 0xff, 0x0, 0x0); \
+ for (k = 0; k < MAX_BUSY_COUNT; k++) { \
+ RTMP_IO_READ32(_A, H2M_BBP_AGENT, &BbpCsr.word); \
+ if (BbpCsr.field.Busy == IDLE) \
+ break; \
+ } \
+ if ((BbpCsr.field.Busy == IDLE) && \
+ (BbpCsr.field.RegNum == _I)) { \
+ *(_pV) = (u8)BbpCsr.field.Value; \
+ break; \
+ } \
+ } \
+ } else { \
DBGPRINT_ERR((" , brt30xxBanMcuCmd = %d, Read BBP %d \n", (_A)->brt30xxBanMcuCmd, (_I))); \
- *(_pV) = (_A)->BbpWriteLatch[_I]; \
- } \
- if ((BbpCsr.field.Busy == BUSY) || ((_A)->bPCIclkOff == TRUE)) \
- { \
- DBGPRINT_ERR(("BBP read R%d=0x%x fail\n", _I, BbpCsr.word)); \
- *(_pV) = (_A)->BbpWriteLatch[_I]; \
- } \
+ *(_pV) = (_A)->BbpWriteLatch[_I]; \
+ } \
+ if ((BbpCsr.field.Busy == BUSY) || ((_A)->bPCIclkOff == TRUE)) { \
+ DBGPRINT_ERR(("BBP read R%d=0x%x fail\n", _I, BbpCsr.word)); \
+ *(_pV) = (_A)->BbpWriteLatch[_I]; \
+ } \
}
/*
@@ -376,43 +353,39 @@
_bViaMCU: if we need access the bbp via the MCU.
*/
#define RTMP_BBP_IO_WRITE8(_pAd, _bbpID, _pV, _bViaMCU) \
- do{ \
+ do { \
BBP_CSR_CFG_STRUC BbpCsr; \
- int _busyCnt, _regID; \
- \
+ int _busyCnt, _regID; \
+ \
_regID = ((_bViaMCU) == TRUE ? H2M_BBP_AGENT : BBP_CSR_CFG); \
- for (_busyCnt=0; _busyCnt<MAX_BUSY_COUNT; _busyCnt++) \
- { \
+ for (_busyCnt = 0; _busyCnt < MAX_BUSY_COUNT; _busyCnt++) { \
RTMP_IO_READ32((_pAd), BBP_CSR_CFG, &BbpCsr.word); \
- if (BbpCsr.field.Busy == BUSY) \
- continue; \
- BbpCsr.word = 0; \
- BbpCsr.field.fRead = 0; \
- BbpCsr.field.BBP_RW_MODE = 1; \
- BbpCsr.field.Busy = 1; \
- BbpCsr.field.Value = _pV; \
- BbpCsr.field.RegNum = _bbpID; \
- RTMP_IO_WRITE32((_pAd), BBP_CSR_CFG, BbpCsr.word); \
- if ((_bViaMCU) == TRUE) \
- { \
- AsicSendCommandToMcu(_pAd, 0x80, 0xff, 0x0, 0x0); \
- if ((_pAd)->OpMode == OPMODE_AP) \
- RTMPusecDelay(1000); \
- } \
- (_pAd)->BbpWriteLatch[_bbpID] = _pV; \
- break; \
- } \
- if (_busyCnt == MAX_BUSY_COUNT) \
- { \
- DBGPRINT_ERR(("BBP write R%d fail\n", _bbpID)); \
- if((_bViaMCU) == TRUE) \
- { \
+ if (BbpCsr.field.Busy == BUSY) \
+ continue; \
+ BbpCsr.word = 0; \
+ BbpCsr.field.fRead = 0; \
+ BbpCsr.field.BBP_RW_MODE = 1; \
+ BbpCsr.field.Busy = 1; \
+ BbpCsr.field.Value = _pV; \
+ BbpCsr.field.RegNum = _bbpID; \
+ RTMP_IO_WRITE32((_pAd), BBP_CSR_CFG, BbpCsr.word); \
+ if ((_bViaMCU) == TRUE) { \
+ AsicSendCommandToMcu(_pAd, 0x80, 0xff, 0x0, 0x0); \
+ if ((_pAd)->OpMode == OPMODE_AP) \
+ RTMPusecDelay(1000); \
+ } \
+ (_pAd)->BbpWriteLatch[_bbpID] = _pV; \
+ break; \
+ } \
+ if (_busyCnt == MAX_BUSY_COUNT) { \
+ DBGPRINT_ERR(("BBP write R%d fail\n", _bbpID)); \
+ if ((_bViaMCU) == TRUE) { \
RTMP_IO_READ32(_pAd, H2M_BBP_AGENT, &BbpCsr.word); \
- BbpCsr.field.Busy = 0; \
+ BbpCsr.field.Busy = 0; \
RTMP_IO_WRITE32(_pAd, H2M_BBP_AGENT, BbpCsr.word); \
- } \
- } \
- }while(0)
+ } \
+ } \
+ } while (0)
/*
This marco used for the BBP write operation which didn't need via MCU.
@@ -426,25 +399,22 @@
will use this function too and didn't access the bbp register via the MCU.
*/
/* Write BBP register by register's ID & value */
-#define RTMP_BBP_IO_WRITE8_BY_REG_ID(_A, _I, _V) \
-{ \
- BBP_CSR_CFG_STRUC BbpCsr; \
- int BusyCnt = 0; \
+#define RTMP_BBP_IO_WRITE8_BY_REG_ID(_A, _I, _V) \
+{ \
+ BBP_CSR_CFG_STRUC BbpCsr; \
+ int BusyCnt = 0; \
BOOLEAN brc; \
- if (_I < MAX_NUM_OF_BBP_LATCH) \
- { \
- if ((IS_RT3090((_A)) || IS_RT3572((_A)) || IS_RT3390((_A))) && ((_A)->StaCfg.PSControl.field.rt30xxPowerMode == 3) \
+ if (_I < MAX_NUM_OF_BBP_LATCH) { \
+ if ((IS_RT3090((_A)) || IS_RT3572((_A)) || IS_RT3390((_A))) \
+ && ((_A)->StaCfg.PSControl.field.rt30xxPowerMode == 3) \
&& ((_A)->StaCfg.PSControl.field.EnableNewPS == TRUE) \
&& ((_A)->bPCIclkOff == FALSE) \
- && ((_A)->brt30xxBanMcuCmd == FALSE)) \
- { \
- if (_A->AccessBBPFailCount > 20) \
- { \
- AsicResetBBPAgent(_A); \
- _A->AccessBBPFailCount = 0; \
- } \
- for (BusyCnt=0; BusyCnt<MAX_BUSY_COUNT; BusyCnt++) \
- { \
+ && ((_A)->brt30xxBanMcuCmd == FALSE)) { \
+ if (_A->AccessBBPFailCount > 20) { \
+ AsicResetBBPAgent(_A); \
+ _A->AccessBBPFailCount = 0; \
+ } \
+ for (BusyCnt = 0; BusyCnt < MAX_BUSY_COUNT; BusyCnt++) { \
RTMP_IO_READ32(_A, H2M_BBP_AGENT, &BbpCsr.word); \
if (BbpCsr.field.Busy == BUSY) \
continue; \
@@ -456,29 +426,24 @@
BbpCsr.field.RegNum = _I; \
RTMP_IO_WRITE32(_A, H2M_BBP_AGENT, BbpCsr.word); \
brc = AsicSendCommandToMcu(_A, 0x80, 0xff, 0x0, 0x0); \
- if (brc == TRUE) \
- { \
+ if (brc == TRUE) { \
(_A)->BbpWriteLatch[_I] = _V; \
- } \
- else \
- { \
+ } else { \
BbpCsr.field.Busy = 0; \
RTMP_IO_WRITE32(_A, H2M_BBP_AGENT, BbpCsr.word); \
} \
break; \
} \
} \
- else if (!((IS_RT3090((_A)) || IS_RT3572((_A)) || IS_RT3390((_A))) && ((_A)->StaCfg.PSControl.field.rt30xxPowerMode == 3) \
+ else if (!((IS_RT3090((_A)) || IS_RT3572((_A)) || IS_RT3390((_A))) \
+ && ((_A)->StaCfg.PSControl.field.rt30xxPowerMode == 3) \
&& ((_A)->StaCfg.PSControl.field.EnableNewPS == TRUE)) \
- && ((_A)->bPCIclkOff == FALSE)) \
- { \
- if (_A->AccessBBPFailCount > 20) \
- { \
- AsicResetBBPAgent(_A); \
- _A->AccessBBPFailCount = 0; \
- } \
- for (BusyCnt=0; BusyCnt<MAX_BUSY_COUNT; BusyCnt++) \
- { \
+ && ((_A)->bPCIclkOff == FALSE)) { \
+ if (_A->AccessBBPFailCount > 20) { \
+ AsicResetBBPAgent(_A); \
+ _A->AccessBBPFailCount = 0; \
+ } \
+ for (BusyCnt = 0; BusyCnt < MAX_BUSY_COUNT; BusyCnt++) { \
RTMP_IO_READ32(_A, H2M_BBP_AGENT, &BbpCsr.word); \
if (BbpCsr.field.Busy == BUSY) \
continue; \
@@ -493,20 +458,15 @@
(_A)->BbpWriteLatch[_I] = _V; \
break; \
} \
- } \
- else \
- { \
+ } else { \
DBGPRINT_ERR((" brt30xxBanMcuCmd = %d. Write BBP %d \n", (_A)->brt30xxBanMcuCmd, (_I))); \
} \
- if ((BusyCnt == MAX_BUSY_COUNT) || ((_A)->bPCIclkOff == TRUE)) \
- { \
- if (BusyCnt == MAX_BUSY_COUNT) \
+ if ((BusyCnt == MAX_BUSY_COUNT) || ((_A)->bPCIclkOff == TRUE)) { \
+ if (BusyCnt == MAX_BUSY_COUNT) \
(_A)->AccessBBPFailCount++; \
- DBGPRINT_ERR(("BBP write R%d=0x%x fail. BusyCnt= %d.bPCIclkOff = %d. \n", _I, BbpCsr.word, BusyCnt, (_A)->bPCIclkOff )); \
+ DBGPRINT_ERR(("BBP write R%d=0x%x fail. BusyCnt= %d.bPCIclkOff = %d. \n", _I, BbpCsr.word, BusyCnt, (_A)->bPCIclkOff)); \
} \
- } \
- else \
- { \
+ } else { \
DBGPRINT_ERR(("****** BBP_Write_Latch Buffer exceeds max boundry ****** \n")); \
} \
}
@@ -522,7 +482,7 @@
#ifdef RT30xx
#define RTMP_ASIC_MMPS_DISABLE(_pAd) \
- do{ \
+ do { \
u32 _macData; \
u8 _bbpData = 0; \
/* disable MMPS BBP control register */ \
@@ -534,10 +494,10 @@
RTMP_IO_READ32(_pAd, 0x1210, &_macData); \
_macData &= ~(0x09); /*bit 0, 3*/ \
RTMP_IO_WRITE32(_pAd, 0x1210, _macData); \
- }while(0)
+ } while (0)
#define RTMP_ASIC_MMPS_ENABLE(_pAd) \
- do{ \
+ do { \
u32 _macData; \
u8 _bbpData = 0; \
/* enable MMPS BBP control register */ \
@@ -549,7 +509,7 @@
RTMP_IO_READ32(_pAd, 0x1210, &_macData); \
_macData |= (0x09); /*bit 0, 3*/ \
RTMP_IO_WRITE32(_pAd, 0x1210, _macData); \
- }while(0)
+ } while (0)
#endif /* RT30xx // */
diff --git a/drivers/staging/rt2860/chips/rt3070.c b/drivers/staging/rt2860/chips/rt3070.c
index 627bad943a3..3a17fd10ec1 100644
--- a/drivers/staging/rt2860/chips/rt3070.c
+++ b/drivers/staging/rt2860/chips/rt3070.c
@@ -56,7 +56,7 @@ void NICInitRT3070RFRegisters(struct rt_rtmp_adapter *pAd)
u32 RfReg = 0;
u32 data;
- RT30xxReadRFRegister(pAd, RF_R30, (u8 *)& RfReg);
+ RT30xxReadRFRegister(pAd, RF_R30, (u8 *)&RfReg);
RfReg |= 0x80;
RT30xxWriteRFRegister(pAd, RF_R30, (u8)RfReg);
RTMPusecDelay(1000);
@@ -84,7 +84,7 @@ void NICInitRT3070RFRegisters(struct rt_rtmp_adapter *pAd)
}
} else if (IS_RT3071(pAd)) {
/* Driver should set RF R6 bit6 on before init RF registers */
- RT30xxReadRFRegister(pAd, RF_R06, (u8 *)& RfReg);
+ RT30xxReadRFRegister(pAd, RF_R06, (u8 *)&RfReg);
RfReg |= 0x40;
RT30xxWriteRFRegister(pAd, RF_R06, (u8)RfReg);
diff --git a/drivers/staging/rt2860/chips/rt3090.c b/drivers/staging/rt2860/chips/rt3090.c
index 5927ba4c5a9..c2933c69bc0 100644
--- a/drivers/staging/rt2860/chips/rt3090.c
+++ b/drivers/staging/rt2860/chips/rt3090.c
@@ -53,7 +53,7 @@ void NICInitRT3090RFRegisters(struct rt_rtmp_adapter *pAd)
/* Driver should toggle RF R30 bit7 before init RF registers */
u32 RfReg = 0, data;
- RT30xxReadRFRegister(pAd, RF_R30, (u8 *)& RfReg);
+ RT30xxReadRFRegister(pAd, RF_R30, (u8 *)&RfReg);
RfReg |= 0x80;
RT30xxWriteRFRegister(pAd, RF_R30, (u8)RfReg);
RTMPusecDelay(1000);
@@ -90,7 +90,7 @@ void NICInitRT3090RFRegisters(struct rt_rtmp_adapter *pAd)
}
/* Driver should set RF R6 bit6 on before calibration */
- RT30xxReadRFRegister(pAd, RF_R06, (u8 *)& RfReg);
+ RT30xxReadRFRegister(pAd, RF_R06, (u8 *)&RfReg);
RfReg |= 0x40;
RT30xxWriteRFRegister(pAd, RF_R06, (u8)RfReg);
diff --git a/drivers/staging/rt2860/chips/rt30xx.c b/drivers/staging/rt2860/chips/rt30xx.c
index 6e684a3ccf0..4367a196aef 100644
--- a/drivers/staging/rt2860/chips/rt30xx.c
+++ b/drivers/staging/rt2860/chips/rt30xx.c
@@ -170,8 +170,7 @@ void RTMPFilterCalibration(struct rt_rtmp_adapter *pAd)
pAd->Mlme.CaliBW40RfR24 = 0x2F; /*Bit[5] must be 1 for BW 40 */
do {
- if (loop == 1) /*BandWidth = 40 MHz */
- {
+ if (loop == 1) { /*BandWidth = 40 MHz */
/* Write 0x27 to RF_R24 to program filter */
RF_R24_Value = 0x27;
RT30xxWriteRFRegister(pAd, RF_R24, RF_R24_Value);
@@ -190,8 +189,7 @@ void RTMPFilterCalibration(struct rt_rtmp_adapter *pAd)
RT30xxReadRFRegister(pAd, RF_R31, &value);
value |= 0x20;
RT30xxWriteRFRegister(pAd, RF_R31, value);
- } else /*BandWidth = 20 MHz */
- {
+ } else { /*BandWidth = 20 MHz */
/* Write 0x07 to RF_R24 to program filter */
RF_R24_Value = 0x07;
RT30xxWriteRFRegister(pAd, RF_R24, RF_R24_Value);
@@ -353,8 +351,7 @@ void RT30xxLoadRFNormalModeSetup(struct rt_rtmp_adapter *pAd)
RT30xxReadRFRegister(pAd, RF_R27, &RFValue);
/* TX to RX IQ glitch(RF_R27) has been fixed in RT3070(F). */
/* Raising RF voltage is no longer needed for RT3070(F) */
- if (IS_RT3090(pAd)) /* RT309x and RT3071/72 */
- {
+ if (IS_RT3090(pAd)) { /* RT309x and RT3071/72 */
if ((pAd->MACVersion & 0xffff) < 0x0211)
RFValue = (RFValue & (~0x77)) | 0x3;
else
diff --git a/drivers/staging/rt2860/common/cmm_aes.c b/drivers/staging/rt2860/common/cmm_aes.c
index 250357c5cd6..1d159ff82fd 100644
--- a/drivers/staging/rt2860/common/cmm_aes.c
+++ b/drivers/staging/rt2860/common/cmm_aes.c
@@ -281,7 +281,7 @@ void construct_mic_header2(unsigned char *mic_header2,
mic_header2[6] = mpdu[22] & 0x0f; /* SC */
mic_header2[7] = 0x00; /* mpdu[23]; */
- if ((!qc_exists) & a4_exists) {
+ if ((!qc_exists) && a4_exists) {
for (i = 0; i < 6; i++)
mic_header2[8 + i] = mpdu[24 + i]; /* A4 */
diff --git a/drivers/staging/rt2860/common/cmm_data.c b/drivers/staging/rt2860/common/cmm_data.c
index 68263cee795..93a53479d76 100644
--- a/drivers/staging/rt2860/common/cmm_data.c
+++ b/drivers/staging/rt2860/common/cmm_data.c
@@ -773,7 +773,8 @@ void RTMPDeQueuePacket(struct rt_rtmp_adapter *pAd, IN BOOLEAN bIntContext, u8 Q
/* probe the Queue Head */
pQueue = &pAd->TxSwQueue[QueIdx];
- if ((pEntry = pQueue->Head) == NULL) {
+ pEntry = pQueue->Head;
+ if (pEntry == NULL) {
DEQUEUE_UNLOCK(&pAd->irq_lock, bIntContext,
IrqFlags);
break;
@@ -824,7 +825,8 @@ void RTMPDeQueuePacket(struct rt_rtmp_adapter *pAd, IN BOOLEAN bIntContext, u8 Q
}
do {
- if ((pEntry = pQueue->Head) == NULL)
+ pEntry = pQueue->Head;
+ if (pEntry == NULL)
break;
/* For TX_AMSDU_FRAME/TX_RALINK_FRAME, Need to check if next pakcet can do aggregation. */
@@ -1422,7 +1424,7 @@ u32 deaggregate_AMSDU_announce(struct rt_rtmp_adapter *pAd,
if ((Header802_3[12] == 0x88) && (Header802_3[13] == 0x8E)) {
/* avoid local heap overflow, use dyanamic allocation */
struct rt_mlme_queue_elem *Elem =
- (struct rt_mlme_queue_elem *)kmalloc(sizeof(struct rt_mlme_queue_elem),
+ kmalloc(sizeof(struct rt_mlme_queue_elem),
MEM_ALLOC_FLAG);
if (Elem != NULL) {
memmove(Elem->Msg +
diff --git a/drivers/staging/rt2860/common/cmm_mac_pci.c b/drivers/staging/rt2860/common/cmm_mac_pci.c
index 560ebd398e1..e26ba494287 100644
--- a/drivers/staging/rt2860/common/cmm_mac_pci.c
+++ b/drivers/staging/rt2860/common/cmm_mac_pci.c
@@ -1558,7 +1558,7 @@ void RT28xxPciMlmeRadioOFF(struct rt_rtmp_adapter *pAd)
if (INFRA_ON(pAd) || ADHOC_ON(pAd)) {
struct rt_mlme_disassoc_req DisReq;
struct rt_mlme_queue_elem *pMsgElem =
- (struct rt_mlme_queue_elem *)kmalloc(sizeof(struct rt_mlme_queue_elem),
+ kmalloc(sizeof(struct rt_mlme_queue_elem),
MEM_ALLOC_FLAG);
if (pMsgElem) {
diff --git a/drivers/staging/rt2860/common/cmm_mac_usb.c b/drivers/staging/rt2860/common/cmm_mac_usb.c
index 9dd6959cd5a..8aec70fc20d 100644
--- a/drivers/staging/rt2860/common/cmm_mac_usb.c
+++ b/drivers/staging/rt2860/common/cmm_mac_usb.c
@@ -1087,7 +1087,7 @@ void RT28xxUsbMlmeRadioOFF(struct rt_rtmp_adapter *pAd)
if (INFRA_ON(pAd) || ADHOC_ON(pAd)) {
struct rt_mlme_disassoc_req DisReq;
struct rt_mlme_queue_elem *pMsgElem =
- (struct rt_mlme_queue_elem *)kmalloc(sizeof(struct rt_mlme_queue_elem),
+ kmalloc(sizeof(struct rt_mlme_queue_elem),
MEM_ALLOC_FLAG);
if (pMsgElem) {
diff --git a/drivers/staging/rt2860/common/cmm_wpa.c b/drivers/staging/rt2860/common/cmm_wpa.c
index 94e119faaa7..c16f3763cca 100644
--- a/drivers/staging/rt2860/common/cmm_wpa.c
+++ b/drivers/staging/rt2860/common/cmm_wpa.c
@@ -2928,25 +2928,23 @@ void WpaShowAllsuite(u8 *rsnie, u32 rsnie_len)
hex_dump("RSNIE", rsnie, rsnie_len);
/* group cipher */
- if ((pSuite =
- GetSuiteFromRSNIE(rsnie, rsnie_len, GROUP_SUITE,
- &count)) != NULL) {
+ pSuite = GetSuiteFromRSNIE(rsnie, rsnie_len, GROUP_SUITE, &count);
+ if (pSuite != NULL) {
hex_dump("group cipher", pSuite, 4 * count);
}
/* pairwise cipher */
- if ((pSuite =
- GetSuiteFromRSNIE(rsnie, rsnie_len, PAIRWISE_SUITE,
- &count)) != NULL) {
+ pSuite = GetSuiteFromRSNIE(rsnie, rsnie_len, PAIRWISE_SUITE, &count);
+ if (pSuite != NULL) {
hex_dump("pairwise cipher", pSuite, 4 * count);
}
/* AKM */
- if ((pSuite =
- GetSuiteFromRSNIE(rsnie, rsnie_len, AKM_SUITE, &count)) != NULL) {
+ pSuite = GetSuiteFromRSNIE(rsnie, rsnie_len, AKM_SUITE, &count);
+ if (pSuite != NULL) {
hex_dump("AKM suite", pSuite, 4 * count);
}
/* PMKID */
- if ((pSuite =
- GetSuiteFromRSNIE(rsnie, rsnie_len, PMKID_LIST, &count)) != NULL) {
+ pSuite = GetSuiteFromRSNIE(rsnie, rsnie_len, PMKID_LIST, &count);
+ if (pSuite != NULL) {
hex_dump("PMKID", pSuite, LEN_PMKID);
}
diff --git a/drivers/staging/rt2860/common/rtmp_init.c b/drivers/staging/rt2860/common/rtmp_init.c
index 21a95ffdfb8..a09038542f2 100644
--- a/drivers/staging/rt2860/common/rtmp_init.c
+++ b/drivers/staging/rt2860/common/rtmp_init.c
@@ -2810,17 +2810,6 @@ void UserCfgInit(struct rt_rtmp_adapter *pAd)
}
/* IRQL = PASSIVE_LEVEL */
-u8 BtoH(char ch)
-{
- if (ch >= '0' && ch <= '9')
- return (ch - '0'); /* Handle numerals */
- if (ch >= 'A' && ch <= 'F')
- return (ch - 'A' + 0xA); /* Handle capitol hex digits */
- if (ch >= 'a' && ch <= 'f')
- return (ch - 'a' + 0xA); /* Handle small hex digits */
- return (255);
-}
-
/* */
/* FUNCTION: AtoH(char *, u8 *, int) */
/* */
@@ -2847,8 +2836,8 @@ void AtoH(char *src, u8 *dest, int destlen)
destTemp = (u8 *)dest;
while (destlen--) {
- *destTemp = BtoH(*srcptr++) << 4; /* Put 1st ascii byte in upper nibble. */
- *destTemp += BtoH(*srcptr++); /* Add 2nd ascii byte to above. */
+ *destTemp = hex_to_bin(*srcptr++) << 4; /* Put 1st ascii byte in upper nibble. */
+ *destTemp += hex_to_bin(*srcptr++); /* Add 2nd ascii byte to above. */
destTemp++;
}
}
diff --git a/drivers/staging/rt2860/common/spectrum.c b/drivers/staging/rt2860/common/spectrum.c
index 51e38d80933..2d5f847e6cc 100644
--- a/drivers/staging/rt2860/common/spectrum.c
+++ b/drivers/staging/rt2860/common/spectrum.c
@@ -1900,8 +1900,8 @@ static void PeerMeasureReportAction(struct rt_rtmp_adapter *pAd,
/* if (pAd->CommonCfg.bIEEE80211H != TRUE) */
/* return; */
- if ((pMeasureReportInfo =
- kmalloc(sizeof(struct rt_measure_rpi_report), GFP_ATOMIC)) == NULL) {
+ pMeasureReportInfo = kmalloc(sizeof(struct rt_measure_rpi_report), GFP_ATOMIC);
+ if (pMeasureReportInfo == NULL) {
DBGPRINT(RT_DEBUG_ERROR,
("%s unable to alloc memory for measure report buffer (size=%zu).\n",
__func__, sizeof(struct rt_measure_rpi_report)));
@@ -2016,7 +2016,8 @@ static void PeerTpcRepAction(struct rt_rtmp_adapter *pAd, struct rt_mlme_queue_e
NdisZeroMemory(&TpcRepInfo, sizeof(struct rt_tpc_report_info));
if (PeerTpcRepSanity
(pAd, Elem->Msg, Elem->MsgLen, &DialogToken, &TpcRepInfo)) {
- if ((pEntry = TpcReqLookUp(pAd, DialogToken)) != NULL) {
+ pEntry = TpcReqLookUp(pAd, DialogToken);
+ if (pEntry != NULL) {
TpcReqDelete(pAd, pEntry->DialogToken);
DBGPRINT(RT_DEBUG_TRACE,
("%s: DialogToken=%x, TxPwr=%d, LinkMargin=%d\n",
diff --git a/drivers/staging/rt2860/iface/rtmp_usb.h b/drivers/staging/rt2860/iface/rtmp_usb.h
index 6bb384a7466..33479cc443a 100644
--- a/drivers/staging/rt2860/iface/rtmp_usb.h
+++ b/drivers/staging/rt2860/iface/rtmp_usb.h
@@ -81,8 +81,8 @@ extern u8 EpToQueue[6];
#define RT28XX_PUT_DEVICE usb_put_dev
#define RTUSB_ALLOC_URB(iso) usb_alloc_urb(iso, GFP_ATOMIC)
#define RTUSB_SUBMIT_URB(pUrb) usb_submit_urb(pUrb, GFP_ATOMIC)
-#define RTUSB_URB_ALLOC_BUFFER(pUsb_Dev, BufSize, pDma_addr) usb_buffer_alloc(pUsb_Dev, BufSize, GFP_ATOMIC, pDma_addr)
-#define RTUSB_URB_FREE_BUFFER(pUsb_Dev, BufSize, pTransferBuf, Dma_addr) usb_buffer_free(pUsb_Dev, BufSize, pTransferBuf, Dma_addr)
+#define RTUSB_URB_ALLOC_BUFFER(pUsb_Dev, BufSize, pDma_addr) usb_alloc_coherent(pUsb_Dev, BufSize, GFP_ATOMIC, pDma_addr)
+#define RTUSB_URB_FREE_BUFFER(pUsb_Dev, BufSize, pTransferBuf, Dma_addr) usb_free_coherent(pUsb_Dev, BufSize, pTransferBuf, Dma_addr)
#define RTUSB_FREE_URB(pUrb) usb_free_urb(pUrb)
diff --git a/drivers/staging/rt2860/mlme.h b/drivers/staging/rt2860/mlme.h
index 11434132f93..99c9362bae8 100644
--- a/drivers/staging/rt2860/mlme.h
+++ b/drivers/staging/rt2860/mlme.h
@@ -322,7 +322,7 @@ struct rt_trigger_eventa {
u8 BSSID[6];
u8 RegClass; /* Regulatory Class */
u16 Channel;
- unsigned long CDCounter; /* Maintain a seperate count down counter for each Event A. */
+ unsigned long CDCounter; /* Maintain a separate count down counter for each Event A. */
};
/* 20/40 trigger event table */
diff --git a/drivers/staging/rt2860/pci_main_dev.c b/drivers/staging/rt2860/pci_main_dev.c
index e665d862281..321facd6b0a 100644
--- a/drivers/staging/rt2860/pci_main_dev.c
+++ b/drivers/staging/rt2860/pci_main_dev.c
@@ -107,13 +107,13 @@ MODULE_VERSION(STA_DRIVER_VERSION);
/* Our PCI driver structure */
/* */
static struct pci_driver rt2860_driver = {
-name: "rt2860",
-id_table:rt2860_pci_tbl,
-probe: rt2860_probe,
-remove:__devexit_p(rt2860_remove_one),
+name: "rt2860",
+id_table : rt2860_pci_tbl,
+probe : rt2860_probe,
+remove : __devexit_p(rt2860_remove_one),
#ifdef CONFIG_PM
-suspend:rt2860_suspend,
-resume:rt2860_resume,
+suspend : rt2860_suspend,
+resume : rt2860_resume,
#endif
};
@@ -211,9 +211,9 @@ static int rt2860_resume(struct pci_dev *pci_dev)
DBGPRINT(RT_DEBUG_TRACE, ("===> rt2860_resume()\n"));
- if (net_dev == NULL) {
+ if (net_dev == NULL)
DBGPRINT(RT_DEBUG_ERROR, ("net_dev == NULL!\n"));
- } else
+ else
GET_PAD_FROM_NET_DEV(pAd, net_dev);
if (pAd != NULL) {
@@ -281,7 +281,9 @@ static int __devinit rt2860_probe(IN struct pci_dev *pci_dev,
/*PCIDevInit============================================== */
/* wake up and enable device */
- if ((rv = pci_enable_device(pci_dev)) != 0) {
+ rv = pci_enable_device(pci_dev);
+
+ if (rv != 0) {
DBGPRINT(RT_DEBUG_ERROR,
("Enable PCI device failed, errno=%d!\n", rv));
return rv;
@@ -289,7 +291,9 @@ static int __devinit rt2860_probe(IN struct pci_dev *pci_dev,
print_name = (char *)pci_name(pci_dev);
- if ((rv = pci_request_regions(pci_dev, print_name)) != 0) {
+ rv = pci_request_regions(pci_dev, print_name);
+
+ if (rv != 0) {
DBGPRINT(RT_DEBUG_ERROR,
("Request PCI resource failed, errno=%d!\n", rv));
goto err_out;
@@ -490,9 +494,8 @@ static void RTMPInitPCIeDevice(struct pci_dev *pci_dev, struct rt_rtmp_adapter *
/* Support advanced power save after 2892/2790. */
/* MAC version at offset 0x1000 is 0x2872XXXX/0x2870XXXX(PCIe, USB, SDIO). */
- if ((MacCsr0 & 0xffff0000) != 0x28600000) {
+ if ((MacCsr0 & 0xffff0000) != 0x28600000)
OPSTATUS_SET_FLAG(pAd, fOP_STATUS_PCIE_DEVICE);
- }
}
}
@@ -900,9 +903,9 @@ void RTMPPCIeLinkCtrlValueRestore(struct rt_rtmp_adapter *pAd, u8 Level)
if ((Configuration != 0) && (Configuration != 0xFFFF)) {
Configuration &= 0xfefc;
/* If call from interface down, restore to orginial setting. */
- if (Level == RESTORE_CLOSE) {
+ if (Level == RESTORE_CLOSE)
Configuration |= pAd->HostLnkCtrlConfiguration;
- } else
+ else
Configuration |= 0x0;
PCI_REG_WIRTE_WORD(pObj->parent_pci_dev,
pAd->HostLnkCtrlOffset,
@@ -1100,13 +1103,13 @@ void RTMPrt3xSetPCIePowerLinkCtrl(struct rt_rtmp_adapter *pAd)
/* Find PCI-to-PCI Bridge Express Capability Offset */
pos = pci_find_capability(pObj->parent_pci_dev, PCI_CAP_ID_EXP);
- if (pos != 0) {
+ if (pos != 0)
pAd->HostLnkCtrlOffset = pos + PCI_EXP_LNKCTL;
- }
+
/* If configurared to turn on L1. */
HostConfiguration = 0;
if (pAd->StaCfg.PSControl.field.rt30xxForceASPMTest == 1) {
- DBGPRINT(RT_DEBUG_TRACE, ("Enter,PSM : Force ASPM \n"));
+ DBGPRINT(RT_DEBUG_TRACE, ("Enter,PSM : Force ASPM\n"));
/* Skip non-exist deice right away */
if ((pAd->HostLnkCtrlOffset != 0)) {
diff --git a/drivers/staging/rt2860/rt_linux.c b/drivers/staging/rt2860/rt_linux.c
index fd9a2072139..0029b2d73b7 100644
--- a/drivers/staging/rt2860/rt_linux.c
+++ b/drivers/staging/rt2860/rt_linux.c
@@ -82,7 +82,7 @@ char const *pWirelessFloodEventText[IW_FLOOD_EVENT_TYPE_NUM] = {
};
/* timeout -- ms */
-void RTMP_SetPeriodicTimer(struct timer_list * pTimer,
+void RTMP_SetPeriodicTimer(struct timer_list *pTimer,
IN unsigned long timeout)
{
timeout = ((timeout * OS_HZ) / 1000);
@@ -92,7 +92,7 @@ void RTMP_SetPeriodicTimer(struct timer_list * pTimer,
/* convert NdisMInitializeTimer --> RTMP_OS_Init_Timer */
void RTMP_OS_Init_Timer(struct rt_rtmp_adapter *pAd,
- struct timer_list * pTimer,
+ struct timer_list *pTimer,
IN TIMER_FUNCTION function, void *data)
{
init_timer(pTimer);
@@ -100,7 +100,7 @@ void RTMP_OS_Init_Timer(struct rt_rtmp_adapter *pAd,
pTimer->function = function;
}
-void RTMP_OS_Add_Timer(struct timer_list * pTimer,
+void RTMP_OS_Add_Timer(struct timer_list *pTimer,
IN unsigned long timeout)
{
if (timer_pending(pTimer))
@@ -111,14 +111,14 @@ void RTMP_OS_Add_Timer(struct timer_list * pTimer,
add_timer(pTimer);
}
-void RTMP_OS_Mod_Timer(struct timer_list * pTimer,
+void RTMP_OS_Mod_Timer(struct timer_list *pTimer,
IN unsigned long timeout)
{
timeout = ((timeout * OS_HZ) / 1000);
mod_timer(pTimer, jiffies + timeout);
}
-void RTMP_OS_Del_Timer(struct timer_list * pTimer,
+void RTMP_OS_Del_Timer(struct timer_list *pTimer,
OUT BOOLEAN * pCancelled)
{
if (timer_pending(pTimer)) {
@@ -146,7 +146,7 @@ void RTMPusecDelay(unsigned long usec)
udelay(usec % 50);
}
-void RTMP_GetCurrentSystemTime(LARGE_INTEGER * time)
+void RTMP_GetCurrentSystemTime(LARGE_INTEGER *time)
{
time->u.LowPart = jiffies;
}
@@ -154,11 +154,11 @@ void RTMP_GetCurrentSystemTime(LARGE_INTEGER * time)
/* pAd MUST allow to be NULL */
int os_alloc_mem(struct rt_rtmp_adapter *pAd, u8 ** mem, unsigned long size)
{
- *mem = (u8 *)kmalloc(size, GFP_ATOMIC);
+ *mem = kmalloc(size, GFP_ATOMIC);
if (*mem)
- return (NDIS_STATUS_SUCCESS);
+ return NDIS_STATUS_SUCCESS;
else
- return (NDIS_STATUS_FAILURE);
+ return NDIS_STATUS_FAILURE;
}
/* pAd MUST allow to be NULL */
@@ -167,7 +167,7 @@ int os_free_mem(struct rt_rtmp_adapter *pAd, void *mem)
ASSERT(mem);
kfree(mem);
- return (NDIS_STATUS_SUCCESS);
+ return NDIS_STATUS_SUCCESS;
}
void *RtmpOSNetPktAlloc(struct rt_rtmp_adapter *pAd, IN int size)
@@ -176,7 +176,7 @@ void *RtmpOSNetPktAlloc(struct rt_rtmp_adapter *pAd, IN int size)
/* Add 2 more bytes for ip header alignment */
skb = dev_alloc_skb(size + 2);
- return ((void *)skb);
+ return (void *)skb;
}
void *RTMP_AllocateFragPacketBuffer(struct rt_rtmp_adapter *pAd,
@@ -201,7 +201,7 @@ void *RTMP_AllocateFragPacketBuffer(struct rt_rtmp_adapter *pAd,
void *RTMP_AllocateTxPacketBuffer(struct rt_rtmp_adapter *pAd,
unsigned long Length,
IN BOOLEAN Cached,
- void ** VirtualAddress)
+ void **VirtualAddress)
{
struct sk_buff *pkt;
@@ -271,7 +271,7 @@ void RTMPFreeAdapter(struct rt_rtmp_adapter *pAd)
BOOLEAN OS_Need_Clone_Packet(void)
{
- return (FALSE);
+ return FALSE;
}
/*
@@ -299,7 +299,7 @@ BOOLEAN OS_Need_Clone_Packet(void)
int RTMPCloneNdisPacket(struct rt_rtmp_adapter *pAd,
IN BOOLEAN pInsAMSDUHdr,
void *pInPacket,
- void ** ppOutPacket)
+ void **ppOutPacket)
{
struct sk_buff *pkt;
@@ -328,7 +328,7 @@ int RTMPCloneNdisPacket(struct rt_rtmp_adapter *pAd,
/* the allocated NDIS PACKET must be freed via RTMPFreeNdisPacket() */
int RTMPAllocateNdisPacket(struct rt_rtmp_adapter *pAd,
- void ** ppPacket,
+ void **ppPacket,
u8 *pHeader,
u32 HeaderLen,
u8 *pData, u32 DataLen)
@@ -391,7 +391,7 @@ int Sniff2BytesFromNdisBuffer(char *pFirstBuffer,
void RTMP_QueryPacketInfo(void *pPacket,
struct rt_packet_info *pPacketInfo,
- u8 ** pSrcBufVA, u32 * pSrcBufLen)
+ u8 **pSrcBufVA, u32 * pSrcBufLen)
{
pPacketInfo->BufferCount = 1;
pPacketInfo->pFirstBuffer = (char *)GET_OS_PKT_DATAPTR(pPacket);
@@ -402,9 +402,9 @@ void RTMP_QueryPacketInfo(void *pPacket,
*pSrcBufLen = GET_OS_PKT_LEN(pPacket);
}
-void RTMP_QueryNextPacketInfo(void ** ppPacket,
+void RTMP_QueryNextPacketInfo(void **ppPacket,
struct rt_packet_info *pPacketInfo,
- u8 ** pSrcBufVA, u32 * pSrcBufLen)
+ u8 **pSrcBufVA, u32 * pSrcBufLen)
{
void *pPacket = NULL;
@@ -463,8 +463,8 @@ void *duplicate_pkt(struct rt_rtmp_adapter *pAd,
struct sk_buff *skb;
void *pPacket = NULL;
- if ((skb =
- __dev_alloc_skb(HdrLen + DataSize + 2, MEM_ALLOC_FLAG)) != NULL) {
+ skb = __dev_alloc_skb(HdrLen + DataSize + 2, MEM_ALLOC_FLAG);
+ if (skb != NULL) {
skb_reserve(skb, 2);
NdisMoveMemory(skb_tail_pointer(skb), pHeader802_3, HdrLen);
skb_put(skb, HdrLen);
@@ -589,7 +589,7 @@ rt_get_sg_list_from_packet(void *pPacket, struct rt_rtmp_sg_list *sg)
sg->NumberOfElements = 1;
sg->Elements[0].Address = GET_OS_PKT_DATAPTR(pPacket);
sg->Elements[0].Length = GET_OS_PKT_LEN(pPacket);
- return (sg);
+ return sg;
}
void hex_dump(char *str, unsigned char *pSrcBufVA, unsigned int SrcBufLen)
@@ -673,7 +673,8 @@ void RTMPSendWirelessEvent(struct rt_rtmp_adapter *pAd,
return;
}
/*Allocate memory and copy the msg. */
- if ((pBuf = kmalloc(IW_CUSTOM_MAX_LEN, GFP_ATOMIC)) != NULL) {
+ pBuf = kmalloc(IW_CUSTOM_MAX_LEN, GFP_ATOMIC);
+ if (pBuf != NULL) {
/*Prepare the payload */
memset(pBuf, 0, IW_CUSTOM_MAX_LEN);
@@ -1062,7 +1063,7 @@ void RtmpOSTaskCustomize(struct rt_rtmp_os_task *pTask)
#ifndef KTHREAD_SUPPORT
- daemonize((char *)& pTask->taskName[0] /*"%s",pAd->net_dev->name */ );
+ daemonize((char *)&pTask->taskName[0] /*"%s",pAd->net_dev->name */);
allow_signal(SIGTERM);
allow_signal(SIGKILL);
@@ -1247,7 +1248,7 @@ void RtmpOSNetDevFree(struct net_device *pNetDev)
free_netdev(pNetDev);
}
-int RtmpOSNetDevAlloc(struct net_device ** new_dev_p, u32 privDataSize)
+int RtmpOSNetDevAlloc(struct net_device **new_dev_p, u32 privDataSize)
{
/* assign it as null first. */
*new_dev_p = NULL;
@@ -1344,7 +1345,7 @@ struct net_device *RtmpOSNetDevCreate(struct rt_rtmp_adapter *pAd,
int status;
/* allocate a new network device */
- status = RtmpOSNetDevAlloc(&pNetDev, 0 /*privMemSize */ );
+ status = RtmpOSNetDevAlloc(&pNetDev, 0 /*privMemSize */);
if (status != NDIS_STATUS_SUCCESS) {
/* allocation fail, exit */
DBGPRINT(RT_DEBUG_ERROR,
diff --git a/drivers/staging/rt2860/rt_linux.h b/drivers/staging/rt2860/rt_linux.h
index a7c540f8e3e..b370fb21e42 100644
--- a/drivers/staging/rt2860/rt_linux.h
+++ b/drivers/staging/rt2860/rt_linux.h
@@ -455,10 +455,11 @@ void hex_dump(char *str, unsigned char *pSrcBufVA, unsigned int SrcBufLen);
* Device DMA Access related definitions and data structures.
**********************************************************************************/
#ifdef RTMP_MAC_PCI
-dma_addr_t linux_pci_map_single(void *handle, void *ptr, size_t size,
- int sd_idx, int direction);
-void linux_pci_unmap_single(void *handle, dma_addr_t dma_addr, size_t size,
- int direction);
+struct rt_rtmp_adapter;
+dma_addr_t linux_pci_map_single(struct rt_rtmp_adapter *pAd, void *ptr,
+ size_t size, int sd_idx, int direction);
+void linux_pci_unmap_single(struct rt_rtmp_adapter *pAd, dma_addr_t dma_addr,
+ size_t size, int direction);
#define PCI_MAP_SINGLE(_handle, _ptr, _size, _sd_idx, _dir) \
linux_pci_map_single(_handle, _ptr, _size, _sd_idx, _dir)
@@ -475,11 +476,6 @@ void linux_pci_unmap_single(void *handle, dma_addr_t dma_addr, size_t size,
#define DEV_ALLOC_SKB(_length) \
dev_alloc_skb(_length)
#endif /* RTMP_MAC_PCI // */
-#ifdef RTMP_MAC_USB
-#define PCI_MAP_SINGLE(_handle, _ptr, _size, _dir) (unsigned long)0
-
-#define PCI_UNMAP_SINGLE(_handle, _ptr, _size, _dir)
-#endif /* RTMP_MAC_USB // */
/*
* unsigned long
diff --git a/drivers/staging/rt2860/rt_main_dev.c b/drivers/staging/rt2860/rt_main_dev.c
index fbddb00cfed..ad60ceaf4b8 100644
--- a/drivers/staging/rt2860/rt_main_dev.c
+++ b/drivers/staging/rt2860/rt_main_dev.c
@@ -439,13 +439,13 @@ int rt28xx_open(struct net_device *dev)
RTMPInitPCIeLinkCtrlValue(pAd);
#endif /* RTMP_MAC_PCI // */
- return (retval);
+ return retval;
err:
/*+++Add by shiang, move from rt28xx_init() to here. */
RtmpOSIRQRelease(net_dev);
/*---Add by shiang, move from rt28xx_init() to here. */
- return (-1);
+ return -1;
} /* End of rt28xx_open */
static const struct net_device_ops rt2860_netdev_ops = {
@@ -534,7 +534,7 @@ int rt28xx_packet_xmit(struct sk_buff *skb)
}
RTMP_SET_PACKET_5VT(pPacket, 0);
- STASendPackets((void *)pAd, (void **)& pPacket, 1);
+ STASendPackets((void *)pAd, (void **)&pPacket, 1);
status = NETDEV_TX_OK;
done:
@@ -571,7 +571,7 @@ static int rt28xx_send_packets(IN struct sk_buff *skb_p,
return NETDEV_TX_OK;
}
- NdisZeroMemory((u8 *)& skb_p->cb[CB_OFF], 15);
+ NdisZeroMemory((u8 *)&skb_p->cb[CB_OFF], 15);
RTMP_SET_PACKET_NET_DEVICE_MBSSID(skb_p, MAIN_MBSSID);
return rt28xx_packet_xmit(skb_p);
@@ -628,13 +628,13 @@ void tbtt_tasklet(unsigned long data)
========================================================================
Routine Description:
- return ethernet statistics counter
+ return ethernet statistics counter
Arguments:
- net_dev Pointer to net_device
+ net_dev Pointer to net_device
Return Value:
- net_device_stats*
+ net_device_stats*
Note:
@@ -728,9 +728,9 @@ int AdapterBlockAllocateMemory(void *handle, void ** ppAd)
if (*ppAd) {
NdisZeroMemory(*ppAd, sizeof(struct rt_rtmp_adapter));
- ((struct rt_rtmp_adapter *)* ppAd)->OS_Cookie = handle;
- return (NDIS_STATUS_SUCCESS);
+ ((struct rt_rtmp_adapter *)*ppAd)->OS_Cookie = handle;
+ return NDIS_STATUS_SUCCESS;
} else {
- return (NDIS_STATUS_FAILURE);
+ return NDIS_STATUS_FAILURE;
}
}
diff --git a/drivers/staging/rt2860/rt_pci_rbus.c b/drivers/staging/rt2860/rt_pci_rbus.c
index e0a0aeeb17a..3004be6da00 100644
--- a/drivers/staging/rt2860/rt_pci_rbus.c
+++ b/drivers/staging/rt2860/rt_pci_rbus.c
@@ -81,7 +81,7 @@ void RTMP_AllocateTxDescMemory(struct rt_rtmp_adapter *pAd,
u32 Index,
unsigned long Length,
IN BOOLEAN Cached,
- void ** VirtualAddress,
+ void **VirtualAddress,
dma_addr_t *PhysicalAddress)
{
struct os_cookie *pObj = (struct os_cookie *)pAd->OS_Cookie;
@@ -96,7 +96,7 @@ void RTMP_AllocateTxDescMemory(struct rt_rtmp_adapter *pAd,
void RTMP_AllocateMgmtDescMemory(struct rt_rtmp_adapter *pAd,
unsigned long Length,
IN BOOLEAN Cached,
- void ** VirtualAddress,
+ void **VirtualAddress,
dma_addr_t *PhysicalAddress)
{
struct os_cookie *pObj = (struct os_cookie *)pAd->OS_Cookie;
@@ -111,7 +111,7 @@ void RTMP_AllocateMgmtDescMemory(struct rt_rtmp_adapter *pAd,
void RTMP_AllocateRxDescMemory(struct rt_rtmp_adapter *pAd,
unsigned long Length,
IN BOOLEAN Cached,
- void ** VirtualAddress,
+ void **VirtualAddress,
dma_addr_t *PhysicalAddress)
{
struct os_cookie *pObj = (struct os_cookie *)pAd->OS_Cookie;
@@ -139,7 +139,7 @@ void RTMP_AllocateFirstTxBuffer(struct rt_rtmp_adapter *pAd,
u32 Index,
unsigned long Length,
IN BOOLEAN Cached,
- void ** VirtualAddress,
+ void **VirtualAddress,
dma_addr_t *PhysicalAddress)
{
struct os_cookie *pObj = (struct os_cookie *)pAd->OS_Cookie;
@@ -173,7 +173,7 @@ void RTMP_FreeFirstTxBuffer(struct rt_rtmp_adapter *pAd,
void RTMP_AllocateSharedMemory(struct rt_rtmp_adapter *pAd,
unsigned long Length,
IN BOOLEAN Cached,
- void ** VirtualAddress,
+ void **VirtualAddress,
dma_addr_t *PhysicalAddress)
{
struct os_cookie *pObj = (struct os_cookie *)pAd->OS_Cookie;
@@ -197,7 +197,7 @@ void RTMP_AllocateSharedMemory(struct rt_rtmp_adapter *pAd,
void *RTMP_AllocateRxPacketBuffer(struct rt_rtmp_adapter *pAd,
unsigned long Length,
IN BOOLEAN Cached,
- void ** VirtualAddress,
+ void **VirtualAddress,
OUT dma_addr_t *
PhysicalAddress)
{
@@ -790,10 +790,9 @@ IRQ_HANDLE_TYPE rt2860_interrupt(int irq, void *dev_instance)
* invaild or writeback cache
* and convert virtual address to physical address
*/
-dma_addr_t linux_pci_map_single(void *handle, void *ptr, size_t size,
- int sd_idx, int direction)
+dma_addr_t linux_pci_map_single(struct rt_rtmp_adapter *pAd, void *ptr,
+ size_t size, int sd_idx, int direction)
{
- struct rt_rtmp_adapter *pAd;
struct os_cookie *pObj;
/*
@@ -812,7 +811,6 @@ dma_addr_t linux_pci_map_single(void *handle, void *ptr, size_t size,
sd_idx = -1
*/
- pAd = (struct rt_rtmp_adapter *)handle;
pObj = (struct os_cookie *)pAd->OS_Cookie;
if (sd_idx == 1) {
@@ -826,13 +824,11 @@ dma_addr_t linux_pci_map_single(void *handle, void *ptr, size_t size,
}
-void linux_pci_unmap_single(void *handle, dma_addr_t dma_addr, size_t size,
- int direction)
+void linux_pci_unmap_single(struct rt_rtmp_adapter *pAd, dma_addr_t dma_addr,
+ size_t size, int direction)
{
- struct rt_rtmp_adapter *pAd;
struct os_cookie *pObj;
- pAd = (struct rt_rtmp_adapter *)handle;
pObj = (struct os_cookie *)pAd->OS_Cookie;
pci_unmap_single(pObj->pci_dev, dma_addr, size, direction);
diff --git a/drivers/staging/rt2860/rt_usb.c b/drivers/staging/rt2860/rt_usb.c
index 01a7eb4e8ba..bcfc0f54d2a 100644
--- a/drivers/staging/rt2860/rt_usb.c
+++ b/drivers/staging/rt2860/rt_usb.c
@@ -233,8 +233,7 @@ static void rtusb_dataout_complete(unsigned long data)
FREE_HTTX_RING(pAd, BulkOutPipeId, pHTTXContext);
/*RTMP_IRQ_UNLOCK(&pAd->TxContextQueueLock[BulkOutPipeId], IrqFlags); */
- } else /* STATUS_OTHER */
- {
+ } else { /* STATUS_OTHER */
u8 *pBuf;
pAd->BulkOutCompleteOther++;
@@ -316,8 +315,7 @@ static void rtusb_null_frame_done_tasklet(unsigned long data)
RTMP_IRQ_UNLOCK(&pAd->BulkOutLock[0], irqFlag);
RTMPDeQueuePacket(pAd, FALSE, NUM_OF_TX_RING, MAX_TX_PROCESS);
- } else /* STATUS_OTHER */
- {
+ } else { /* STATUS_OTHER */
if ((!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_RESET_IN_PROGRESS)) &&
(!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_HALT_IN_PROGRESS)) &&
(!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_NIC_NOT_EXIST)) &&
@@ -362,8 +360,7 @@ static void rtusb_rts_frame_done_tasklet(unsigned long data)
if (Status == USB_ST_NOERROR) {
RTMP_IRQ_UNLOCK(&pAd->BulkOutLock[0], irqFlag);
RTMPDeQueuePacket(pAd, FALSE, NUM_OF_TX_RING, MAX_TX_PROCESS);
- } else /* STATUS_OTHER */
- {
+ } else { /* STATUS_OTHER */
if ((!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_RESET_IN_PROGRESS)) &&
(!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_HALT_IN_PROGRESS)) &&
(!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_NIC_NOT_EXIST)) &&
@@ -410,8 +407,7 @@ static void rtusb_pspoll_frame_done_tasklet(unsigned long data)
if (Status == USB_ST_NOERROR) {
RTMPDeQueuePacket(pAd, FALSE, NUM_OF_TX_RING, MAX_TX_PROCESS);
- } else /* STATUS_OTHER */
- {
+ } else { /* STATUS_OTHER */
if ((!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_RESET_IN_PROGRESS)) &&
(!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_HALT_IN_PROGRESS)) &&
(!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_NIC_NOT_EXIST)) &&
@@ -473,14 +469,12 @@ static void rx_done_tasklet(unsigned long data)
if (Status == USB_ST_NOERROR) {
pAd->BulkInComplete++;
pAd->NextRxBulkInPosition = 0;
- if (pRxContext->BulkInOffset) /* As jan's comment, it may bulk-in success but size is zero. */
- {
+ if (pRxContext->BulkInOffset) { /* As jan's comment, it may bulk-in success but size is zero. */
pRxContext->Readable = TRUE;
INC_RING_INDEX(pAd->NextRxBulkInIndex, RX_RING_SIZE);
}
RTMP_IRQ_UNLOCK(&pAd->BulkInLock, IrqFlags);
- } else /* STATUS_OTHER */
- {
+ } else { /* STATUS_OTHER */
pAd->BulkInCompleteFail++;
/* Still read this packet although it may comtain wrong bytes. */
pRxContext->Readable = FALSE;
@@ -584,7 +578,7 @@ static void rtusb_mgmt_dma_done_tasklet(unsigned long data)
/* The protectioon of rest bulk should be in BulkOut routine */
if (pAd->MgmtRing.TxSwFreeIdx <
MGMT_RING_SIZE
- /* pMLMEContext->bWaitingBulkOut == TRUE */ ) {
+ /* pMLMEContext->bWaitingBulkOut == TRUE */) {
RTUSB_SET_BULK_FLAG(pAd, fRTUSB_BULK_OUT_MLME);
}
RTUSBKickBulkOut(pAd);
diff --git a/drivers/staging/rt2860/rtmp.h b/drivers/staging/rt2860/rtmp.h
index 4401a55bda6..82b6e783b33 100644
--- a/drivers/staging/rt2860/rtmp.h
+++ b/drivers/staging/rt2860/rtmp.h
@@ -181,8 +181,7 @@ struct rt_queue_header {
(QueueHeader)->Head; \
{ \
struct rt_queue_entry *pNext; \
- if ((QueueHeader)->Head != NULL) \
- { \
+ if ((QueueHeader)->Head != NULL) { \
pNext = (QueueHeader)->Head->Next; \
(QueueHeader)->Head->Next = NULL; \
(QueueHeader)->Head = pNext; \
@@ -242,9 +241,9 @@ struct rt_queue_header {
#define OPSTATUS_CLEAR_FLAG(_pAd, _F) ((_pAd)->CommonCfg.OpStatusFlags &= ~(_F))
#define OPSTATUS_TEST_FLAG(_pAd, _F) (((_pAd)->CommonCfg.OpStatusFlags & (_F)) != 0)
-#define CLIENT_STATUS_SET_FLAG(_pEntry,_F) ((_pEntry)->ClientStatusFlags |= (_F))
-#define CLIENT_STATUS_CLEAR_FLAG(_pEntry,_F) ((_pEntry)->ClientStatusFlags &= ~(_F))
-#define CLIENT_STATUS_TEST_FLAG(_pEntry,_F) (((_pEntry)->ClientStatusFlags & (_F)) != 0)
+#define CLIENT_STATUS_SET_FLAG(_pEntry, _F) ((_pEntry)->ClientStatusFlags |= (_F))
+#define CLIENT_STATUS_CLEAR_FLAG(_pEntry, _F) ((_pEntry)->ClientStatusFlags &= ~(_F))
+#define CLIENT_STATUS_TEST_FLAG(_pEntry, _F) (((_pEntry)->ClientStatusFlags & (_F)) != 0)
#define RX_FILTER_SET_FLAG(_pAd, _F) ((_pAd)->CommonCfg.PacketFilter |= (_F))
#define RX_FILTER_CLEAR_FLAG(_pAd, _F) ((_pAd)->CommonCfg.PacketFilter &= ~(_F))
@@ -279,13 +278,13 @@ struct rt_queue_header {
_pAd->StaActive.SupportedHtPhy.RecomWidth = _pAd->MlmeAux.AddHtInfo.AddHtInfo.RecomWidth; \
_pAd->StaActive.SupportedHtPhy.OperaionMode = _pAd->MlmeAux.AddHtInfo.AddHtInfo2.OperaionMode; \
_pAd->StaActive.SupportedHtPhy.NonGfPresent = _pAd->MlmeAux.AddHtInfo.AddHtInfo2.NonGfPresent; \
- NdisMoveMemory((_pAd)->MacTab.Content[BSSID_WCID].HTCapability.MCSSet, (_pAd)->StaActive.SupportedPhyInfo.MCSSet, sizeof(u8)* 16);\
+ NdisMoveMemory((_pAd)->MacTab.Content[BSSID_WCID].HTCapability.MCSSet, (_pAd)->StaActive.SupportedPhyInfo.MCSSet, sizeof(u8) * 16);\
}
#define COPY_AP_HTSETTINGS_FROM_BEACON(_pAd, _pHtCapability) \
{ \
_pAd->MacTab.Content[BSSID_WCID].AMsduSize = (u8)(_pHtCapability->HtCapInfo.AMsduSize); \
- _pAd->MacTab.Content[BSSID_WCID].MmpsMode= (u8)(_pHtCapability->HtCapInfo.MimoPs); \
+ _pAd->MacTab.Content[BSSID_WCID].MmpsMode = (u8)(_pHtCapability->HtCapInfo.MimoPs); \
_pAd->MacTab.Content[BSSID_WCID].MaxRAmpduFactor = (u8)(_pHtCapability->HtCapParm.MaxRAmpduFactor); \
}
@@ -349,17 +348,14 @@ struct rt_rtmp_sg_list {
/* if orginal Ethernet frame contains no LLC/SNAP, then an extra LLC/SNAP encap is required */
#define EXTRA_LLCSNAP_ENCAP_FROM_PKT_START(_pBufVA, _pExtraLlcSnapEncap) \
{ \
- if (((*(_pBufVA + 12) << 8) + *(_pBufVA + 13)) > 1500) \
- { \
+ if (((*(_pBufVA + 12) << 8) + *(_pBufVA + 13)) > 1500) { \
_pExtraLlcSnapEncap = SNAP_802_1H; \
if (NdisEqualMemory(IPX, _pBufVA + 12, 2) || \
- NdisEqualMemory(APPLE_TALK, _pBufVA + 12, 2)) \
- { \
+ NdisEqualMemory(APPLE_TALK, _pBufVA + 12, 2)) { \
_pExtraLlcSnapEncap = SNAP_BRIDGE_TUNNEL; \
} \
} \
- else \
- { \
+ else { \
_pExtraLlcSnapEncap = NULL; \
} \
}
@@ -367,17 +363,14 @@ struct rt_rtmp_sg_list {
/* New Define for new Tx Path. */
#define EXTRA_LLCSNAP_ENCAP_FROM_PKT_OFFSET(_pBufVA, _pExtraLlcSnapEncap) \
{ \
- if (((*(_pBufVA) << 8) + *(_pBufVA + 1)) > 1500) \
- { \
+ if (((*(_pBufVA) << 8) + *(_pBufVA + 1)) > 1500) { \
_pExtraLlcSnapEncap = SNAP_802_1H; \
if (NdisEqualMemory(IPX, _pBufVA, 2) || \
- NdisEqualMemory(APPLE_TALK, _pBufVA, 2)) \
- { \
+ NdisEqualMemory(APPLE_TALK, _pBufVA, 2)) { \
_pExtraLlcSnapEncap = SNAP_BRIDGE_TUNNEL; \
} \
} \
- else \
- { \
+ else { \
_pExtraLlcSnapEncap = NULL; \
} \
}
@@ -399,33 +392,29 @@ struct rt_rtmp_sg_list {
#define CONVERT_TO_802_3(_p8023hdr, _pDA, _pSA, _pData, _DataSize, _pRemovedLLCSNAP) \
{ \
char LLC_Len[2]; \
- \
+ \
_pRemovedLLCSNAP = NULL; \
if (NdisEqualMemory(SNAP_802_1H, _pData, 6) || \
- NdisEqualMemory(SNAP_BRIDGE_TUNNEL, _pData, 6)) \
- { \
- u8 *pProto = _pData + 6; \
- \
- if ((NdisEqualMemory(IPX, pProto, 2) || NdisEqualMemory(APPLE_TALK, pProto, 2)) && \
- NdisEqualMemory(SNAP_802_1H, _pData, 6)) \
- { \
- LLC_Len[0] = (u8)(_DataSize / 256); \
- LLC_Len[1] = (u8)(_DataSize % 256); \
- MAKE_802_3_HEADER(_p8023hdr, _pDA, _pSA, LLC_Len); \
- } \
- else \
- { \
- MAKE_802_3_HEADER(_p8023hdr, _pDA, _pSA, pProto); \
- _pRemovedLLCSNAP = _pData; \
- _DataSize -= LENGTH_802_1_H; \
- _pData += LENGTH_802_1_H; \
- } \
+ NdisEqualMemory(SNAP_BRIDGE_TUNNEL, _pData, 6)) { \
+ u8 *pProto = _pData + 6; \
+ \
+ if ((NdisEqualMemory(IPX, pProto, 2) || NdisEqualMemory(APPLE_TALK, pProto, 2)) && \
+ NdisEqualMemory(SNAP_802_1H, _pData, 6)) { \
+ LLC_Len[0] = (u8)(_DataSize / 256); \
+ LLC_Len[1] = (u8)(_DataSize % 256); \
+ MAKE_802_3_HEADER(_p8023hdr, _pDA, _pSA, LLC_Len); \
+ } \
+ else { \
+ MAKE_802_3_HEADER(_p8023hdr, _pDA, _pSA, pProto); \
+ _pRemovedLLCSNAP = _pData; \
+ _DataSize -= LENGTH_802_1_H; \
+ _pData += LENGTH_802_1_H; \
+ } \
} \
- else \
- { \
- LLC_Len[0] = (u8)(_DataSize / 256); \
- LLC_Len[1] = (u8)(_DataSize % 256); \
- MAKE_802_3_HEADER(_p8023hdr, _pDA, _pSA, LLC_Len); \
+ else { \
+ LLC_Len[0] = (u8)(_DataSize / 256); \
+ LLC_Len[1] = (u8)(_DataSize % 256); \
+ MAKE_802_3_HEADER(_p8023hdr, _pDA, _pSA, LLC_Len); \
} \
}
@@ -438,19 +427,19 @@ struct rt_rtmp_sg_list {
u32 High32TSF, Low32TSF; \
RTMP_IO_READ32(_pAd, TSF_TIMER_DW1, &High32TSF); \
RTMP_IO_READ32(_pAd, TSF_TIMER_DW0, &Low32TSF); \
- MlmeEnqueueForRecv(_pAd, Wcid, High32TSF, Low32TSF, (u8)_Rssi0, (u8)_Rssi1,(u8)_Rssi2,_FrameSize, _pFrame, (u8)_PlcpSignal); \
+ MlmeEnqueueForRecv(_pAd, Wcid, High32TSF, Low32TSF, (u8)_Rssi0, (u8)_Rssi1, (u8)_Rssi2, _FrameSize, _pFrame, (u8)_PlcpSignal); \
}
#endif /* RTMP_MAC_PCI // */
#ifdef RTMP_MAC_USB
#define REPORT_MGMT_FRAME_TO_MLME(_pAd, Wcid, _pFrame, _FrameSize, _Rssi0, _Rssi1, _Rssi2, _PlcpSignal) \
{ \
- u32 High32TSF=0, Low32TSF=0; \
- MlmeEnqueueForRecv(_pAd, Wcid, High32TSF, Low32TSF, (u8)_Rssi0, (u8)_Rssi1,(u8)_Rssi2,_FrameSize, _pFrame, (u8)_PlcpSignal); \
+ u32 High32TSF = 0, Low32TSF = 0; \
+ MlmeEnqueueForRecv(_pAd, Wcid, High32TSF, Low32TSF, (u8)_Rssi0, (u8)_Rssi1, (u8)_Rssi2, _FrameSize, _pFrame, (u8)_PlcpSignal); \
}
#endif /* RTMP_MAC_USB // */
-#define MAC_ADDR_EQUAL(pAddr1,pAddr2) RTMPEqualMemory((void *)(pAddr1), (void *)(pAddr2), MAC_ADDR_LEN)
-#define SSID_EQUAL(ssid1, len1, ssid2, len2) ((len1==len2) && (RTMPEqualMemory(ssid1, ssid2, len1)))
+#define MAC_ADDR_EQUAL(pAddr1, pAddr2) RTMPEqualMemory((void *)(pAddr1), (void *)(pAddr2), MAC_ADDR_LEN)
+#define SSID_EQUAL(ssid1, len1, ssid2, len2) ((len1 == len2) && (RTMPEqualMemory(ssid1, ssid2, len1)))
/* */
/* Check if it is Japan W53(ch52,56,60,64) channel. */
@@ -1054,7 +1043,7 @@ typedef union _BACAP_STRUC {
u32 MMPSmode:2; /* MIMO power save more, 0:static, 1:dynamic, 2:rsv, 3:mimo enable */
u32 bHtAdhoc:1; /* adhoc can use ht rate. */
u32 b2040CoexistScanSup:1; /*As Sta, support do 2040 coexistence scan for AP. As Ap, support monitor trigger event to check if can use BW 40MHz. */
- u32 : 4;
+ u32: 4;
} field;
u32 word;
} BACAP_STRUC, *PBACAP_STRUC;
@@ -1334,7 +1323,7 @@ struct rt_common_config {
BOOLEAN PSPXlink; /* 0: Disable. 1: Enable */
-#if defined(RT305x)||defined(RT30xx)
+#if defined(RT305x) || defined(RT30xx)
/* request by Gary, for High Power issue */
u8 HighPowerPatchDisabled;
#endif
@@ -2169,8 +2158,8 @@ struct rt_rtmp_adapter {
**************************************************************************/
struct rt_rx_blk {
RT28XX_RXD_STRUC RxD;
- struct rt_rxwi * pRxWI;
- struct rt_header_802_11 * pHeader;
+ struct rt_rxwi *pRxWI;
+ struct rt_header_802_11 *pHeader;
void *pRxPacket;
u8 *pData;
u16 DataSize;
@@ -2268,7 +2257,7 @@ struct rt_tx_blk {
* Other static inline function definitions
**************************************************************************/
static inline void ConvertMulticastIP2MAC(u8 *pIpAddr,
- u8 ** ppMacAddr,
+ u8 **ppMacAddr,
u16 ProtoType)
{
if (pIpAddr == NULL)
@@ -2310,7 +2299,7 @@ char *GetBW(int BW);
/* Private routines in rtmp_init.c */
/* */
int RTMPAllocAdapterBlock(void *handle,
- struct rt_rtmp_adapter * * ppAdapter);
+ struct rt_rtmp_adapter **ppAdapter);
int RTMPAllocTxRxRingMemory(struct rt_rtmp_adapter *pAd);
@@ -2367,8 +2356,6 @@ void RTMPMoveMemory(void *pDest, void *pSrc, unsigned long Length);
void AtoH(char *src, u8 *dest, int destlen);
-u8 BtoH(char ch);
-
void RTMPPatchMacBbpBug(struct rt_rtmp_adapter *pAd);
void RTMPInitTimer(struct rt_rtmp_adapter *pAd,
@@ -2431,11 +2418,11 @@ void ORIBATimerTimeout(struct rt_rtmp_adapter *pAd);
void SendRefreshBAR(struct rt_rtmp_adapter *pAd, struct rt_mac_table_entry *pEntry);
void ActHeaderInit(struct rt_rtmp_adapter *pAd,
- struct rt_header_802_11 * pHdr80211,
+ struct rt_header_802_11 *pHdr80211,
u8 *Addr1, u8 *Addr2, u8 *Addr3);
void BarHeaderInit(struct rt_rtmp_adapter *pAd,
- struct rt_frame_bar * pCntlBar, u8 *pDA, u8 *pSA);
+ struct rt_frame_bar *pCntlBar, u8 *pDA, u8 *pSA);
void InsertActField(struct rt_rtmp_adapter *pAd,
u8 *pFrameBuf,
@@ -2443,7 +2430,7 @@ void InsertActField(struct rt_rtmp_adapter *pAd,
BOOLEAN CntlEnqueueForRecv(struct rt_rtmp_adapter *pAd,
unsigned long Wcid,
- unsigned long MsgLen, struct rt_frame_ba_req * pMsg);
+ unsigned long MsgLen, struct rt_frame_ba_req *pMsg);
/* */
/* Private routines in rtmp_data.c */
@@ -2511,7 +2498,7 @@ int MlmeDataHardTransmit(struct rt_rtmp_adapter *pAd,
u8 QueIdx, void *pPacket);
void RTMPWriteTxDescriptor(struct rt_rtmp_adapter *pAd,
- struct rt_txd * pTxD, IN BOOLEAN bWIV, u8 QSEL);
+ struct rt_txd *pTxD, IN BOOLEAN bWIV, u8 QSEL);
#endif /* RTMP_MAC_PCI // */
u16 RTMPCalcDuration(struct rt_rtmp_adapter *pAd, u8 Rate, unsigned long Size);
@@ -2527,10 +2514,10 @@ void RTMPWriteTxWI(struct rt_rtmp_adapter *pAd, struct rt_txwi * pTxWI, IN BOOLE
IN BOOLEAN CfAck, IN HTTRANSMIT_SETTING * pTransmit);
void RTMPWriteTxWI_Data(struct rt_rtmp_adapter *pAd,
- struct rt_txwi * pTxWI, struct rt_tx_blk *pTxBlk);
+ struct rt_txwi *pTxWI, struct rt_tx_blk *pTxBlk);
void RTMPWriteTxWI_Cache(struct rt_rtmp_adapter *pAd,
- struct rt_txwi * pTxWI, struct rt_tx_blk *pTxBlk);
+ struct rt_txwi *pTxWI, struct rt_tx_blk *pTxBlk);
void RTMPSuspendMsduTransmission(struct rt_rtmp_adapter *pAd);
@@ -2573,10 +2560,10 @@ void WpaStaGroupKeySetting(struct rt_rtmp_adapter *pAd);
int RTMPCloneNdisPacket(struct rt_rtmp_adapter *pAd,
IN BOOLEAN pInsAMSDUHdr,
void *pInPacket,
- void ** ppOutPacket);
+ void **ppOutPacket);
int RTMPAllocateNdisPacket(struct rt_rtmp_adapter *pAd,
- void ** pPacket,
+ void **pPacket,
u8 *pHeader,
u32 HeaderLen,
u8 *pData, u32 DataLen);
@@ -2717,7 +2704,7 @@ BOOLEAN AsicCheckCommanOk(struct rt_rtmp_adapter *pAd, u8 Command);
void MacAddrRandomBssid(struct rt_rtmp_adapter *pAd, u8 *pAddr);
void MgtMacHeaderInit(struct rt_rtmp_adapter *pAd,
- struct rt_header_802_11 * pHdr80211,
+ struct rt_header_802_11 *pHdr80211,
u8 SubType,
u8 ToDs, u8 *pDA, u8 *pBssid);
@@ -2796,7 +2783,7 @@ void MlmeQueueDestroy(struct rt_mlme_queue *Queue);
BOOLEAN MlmeEnqueue(struct rt_rtmp_adapter *pAd,
unsigned long Machine,
- unsigned long MsgType, unsigned long MsgLen, void * Msg);
+ unsigned long MsgType, unsigned long MsgLen, void *Msg);
BOOLEAN MlmeEnqueueForRecv(struct rt_rtmp_adapter *pAd,
unsigned long Wcid,
@@ -2807,7 +2794,7 @@ BOOLEAN MlmeEnqueueForRecv(struct rt_rtmp_adapter *pAd,
u8 Rssi2,
unsigned long MsgLen, void *Msg, u8 Signal);
-BOOLEAN MlmeDequeue(struct rt_mlme_queue *Queue, struct rt_mlme_queue_elem ** Elem);
+BOOLEAN MlmeDequeue(struct rt_mlme_queue *Queue, struct rt_mlme_queue_elem **Elem);
void MlmeRestartStateMachine(struct rt_rtmp_adapter *pAd);
@@ -2816,8 +2803,8 @@ BOOLEAN MlmeQueueEmpty(struct rt_mlme_queue *Queue);
BOOLEAN MlmeQueueFull(struct rt_mlme_queue *Queue);
BOOLEAN MsgTypeSubst(struct rt_rtmp_adapter *pAd,
- struct rt_frame_802_11 * pFrame,
- int * Machine, int * MsgType);
+ struct rt_frame_802_11 *pFrame,
+ int *Machine, int *MsgType);
void StateMachineInit(struct rt_state_machine *Sm,
IN STATE_MACHINE_FUNC Trans[],
@@ -2895,8 +2882,8 @@ void AssocPostProc(struct rt_rtmp_adapter *pAd,
u8 ExtRate[],
u8 ExtRateLen,
struct rt_edca_parm *pEdcaParm,
- struct rt_ht_capability_ie * pHtCapability,
- u8 HtCapabilityLen, struct rt_add_ht_info_ie * pAddHtInfo);
+ struct rt_ht_capability_ie *pHtCapability,
+ u8 HtCapabilityLen, struct rt_add_ht_info_ie *pAddHtInfo);
void AuthStateMachineInit(struct rt_rtmp_adapter *pAd,
struct rt_state_machine *sm, OUT STATE_MACHINE_FUNC Trans[]);
@@ -2928,7 +2915,7 @@ void AuthRspStateMachineInit(struct rt_rtmp_adapter *pAd,
void PeerDeauthAction(struct rt_rtmp_adapter *pAd, struct rt_mlme_queue_elem *Elem);
void PeerAuthSimpleRspGenAndSend(struct rt_rtmp_adapter *pAd,
- struct rt_header_802_11 * pHdr80211,
+ struct rt_header_802_11 *pHdr80211,
u16 Alg,
u16 Seq,
u16 Reason, u16 Status);
@@ -3054,133 +3041,133 @@ void ScanNextChannel(struct rt_rtmp_adapter *pAd);
unsigned long MakeIbssBeacon(struct rt_rtmp_adapter *pAd);
BOOLEAN MlmeScanReqSanity(struct rt_rtmp_adapter *pAd,
- void * Msg,
+ void *Msg,
unsigned long MsgLen,
- u8 * BssType,
+ u8 *BssType,
char ssid[],
- u8 * SsidLen, u8 * ScanType);
+ u8 *SsidLen, u8 *ScanType);
BOOLEAN PeerBeaconAndProbeRspSanity(struct rt_rtmp_adapter *pAd,
- void * Msg,
+ void *Msg,
unsigned long MsgLen,
u8 MsgChannel,
u8 *pAddr2,
u8 *pBssid,
char Ssid[],
- u8 * pSsidLen,
- u8 * pBssType,
- u16 * pBeaconPeriod,
- u8 * pChannel,
- u8 * pNewChannel,
+ u8 *pSsidLen,
+ u8 *pBssType,
+ u16 *pBeaconPeriod,
+ u8 *pChannel,
+ u8 *pNewChannel,
OUT LARGE_INTEGER * pTimestamp,
- struct rt_cf_parm * pCfParm,
- u16 * pAtimWin,
- u16 * pCapabilityInfo,
- u8 * pErp,
- u8 * pDtimCount,
- u8 * pDtimPeriod,
- u8 * pBcastFlag,
- u8 * pMessageToMe,
+ struct rt_cf_parm *pCfParm,
+ u16 *pAtimWin,
+ u16 *pCapabilityInfo,
+ u8 *pErp,
+ u8 *pDtimCount,
+ u8 *pDtimPeriod,
+ u8 *pBcastFlag,
+ u8 *pMessageToMe,
u8 SupRate[],
- u8 * pSupRateLen,
+ u8 *pSupRateLen,
u8 ExtRate[],
- u8 * pExtRateLen,
- u8 * pCkipFlag,
- u8 * pAironetCellPowerLimit,
+ u8 *pExtRateLen,
+ u8 *pCkipFlag,
+ u8 *pAironetCellPowerLimit,
struct rt_edca_parm *pEdcaParm,
struct rt_qbss_load_parm *pQbssLoad,
struct rt_qos_capability_parm *pQosCapability,
- unsigned long * pRalinkIe,
- u8 * pHtCapabilityLen,
- u8 * pPreNHtCapabilityLen,
- struct rt_ht_capability_ie * pHtCapability,
- u8 * AddHtInfoLen,
- struct rt_add_ht_info_ie * AddHtInfo,
- u8 * NewExtChannel,
- u16 * LengthVIE,
+ unsigned long *pRalinkIe,
+ u8 *pHtCapabilityLen,
+ u8 *pPreNHtCapabilityLen,
+ struct rt_ht_capability_ie *pHtCapability,
+ u8 *AddHtInfoLen,
+ struct rt_add_ht_info_ie *AddHtInfo,
+ u8 *NewExtChannel,
+ u16 *LengthVIE,
struct rt_ndis_802_11_variable_ies *pVIE);
BOOLEAN PeerAddBAReqActionSanity(struct rt_rtmp_adapter *pAd,
- void * pMsg,
+ void *pMsg,
unsigned long MsgLen, u8 *pAddr2);
BOOLEAN PeerAddBARspActionSanity(struct rt_rtmp_adapter *pAd,
- void * pMsg, unsigned long MsgLen);
+ void *pMsg, unsigned long MsgLen);
BOOLEAN PeerDelBAActionSanity(struct rt_rtmp_adapter *pAd,
- u8 Wcid, void * pMsg, unsigned long MsgLen);
+ u8 Wcid, void *pMsg, unsigned long MsgLen);
BOOLEAN MlmeAssocReqSanity(struct rt_rtmp_adapter *pAd,
- void * Msg,
+ void *Msg,
unsigned long MsgLen,
u8 *pApAddr,
- u16 * CapabilityInfo,
- unsigned long * Timeout, u16 * ListenIntv);
+ u16 *CapabilityInfo,
+ unsigned long *Timeout, u16 *ListenIntv);
BOOLEAN MlmeAuthReqSanity(struct rt_rtmp_adapter *pAd,
- void * Msg,
+ void *Msg,
unsigned long MsgLen,
u8 *pAddr,
- unsigned long * Timeout, u16 * Alg);
+ unsigned long *Timeout, u16 *Alg);
BOOLEAN MlmeStartReqSanity(struct rt_rtmp_adapter *pAd,
- void * Msg,
+ void *Msg,
unsigned long MsgLen,
- char Ssid[], u8 * Ssidlen);
+ char Ssid[], u8 *Ssidlen);
BOOLEAN PeerAuthSanity(struct rt_rtmp_adapter *pAd,
- void * Msg,
+ void *Msg,
unsigned long MsgLen,
u8 *pAddr,
- u16 * Alg,
- u16 * Seq,
- u16 * Status, char ChlgText[]);
+ u16 *Alg,
+ u16 *Seq,
+ u16 *Status, char ChlgText[]);
-BOOLEAN PeerAssocRspSanity(struct rt_rtmp_adapter *pAd, void * pMsg, unsigned long MsgLen, u8 *pAddr2, u16 * pCapabilityInfo, u16 * pStatus, u16 * pAid, u8 SupRate[], u8 * pSupRateLen, u8 ExtRate[], u8 * pExtRateLen, struct rt_ht_capability_ie * pHtCapability, struct rt_add_ht_info_ie * pAddHtInfo, /* AP might use this additional ht info IE */
- u8 * pHtCapabilityLen,
- u8 * pAddHtInfoLen,
- u8 * pNewExtChannelOffset,
- struct rt_edca_parm *pEdcaParm, u8 * pCkipFlag);
+BOOLEAN PeerAssocRspSanity(struct rt_rtmp_adapter *pAd, void *pMsg, unsigned long MsgLen, u8 *pAddr2, u16 *pCapabilityInfo, u16 *pStatus, u16 *pAid, u8 SupRate[], u8 *pSupRateLen, u8 ExtRate[], u8 *pExtRateLen, struct rt_ht_capability_ie *pHtCapability, struct rt_add_ht_info_ie *pAddHtInfo, /* AP might use this additional ht info IE */
+ u8 *pHtCapabilityLen,
+ u8 *pAddHtInfoLen,
+ u8 *pNewExtChannelOffset,
+ struct rt_edca_parm *pEdcaParm, u8 *pCkipFlag);
BOOLEAN PeerDisassocSanity(struct rt_rtmp_adapter *pAd,
- void * Msg,
+ void *Msg,
unsigned long MsgLen,
- u8 *pAddr2, u16 * Reason);
+ u8 *pAddr2, u16 *Reason);
BOOLEAN PeerWpaMessageSanity(struct rt_rtmp_adapter *pAd,
- struct rt_eapol_packet * pMsg,
+ struct rt_eapol_packet *pMsg,
unsigned long MsgLen,
u8 MsgType, struct rt_mac_table_entry *pEntry);
BOOLEAN PeerDeauthSanity(struct rt_rtmp_adapter *pAd,
- void * Msg,
+ void *Msg,
unsigned long MsgLen,
- u8 *pAddr2, u16 * Reason);
+ u8 *pAddr2, u16 *Reason);
BOOLEAN PeerProbeReqSanity(struct rt_rtmp_adapter *pAd,
- void * Msg,
+ void *Msg,
unsigned long MsgLen,
u8 *pAddr2,
- char Ssid[], u8 * pSsidLen);
+ char Ssid[], u8 *pSsidLen);
-BOOLEAN GetTimBit(char * Ptr,
+BOOLEAN GetTimBit(char *Ptr,
u16 Aid,
- u8 * TimLen,
- u8 * BcastFlag,
- u8 * DtimCount,
- u8 * DtimPeriod, u8 * MessageToMe);
+ u8 *TimLen,
+ u8 *BcastFlag,
+ u8 *DtimCount,
+ u8 *DtimPeriod, u8 *MessageToMe);
u8 ChannelSanity(struct rt_rtmp_adapter *pAd, u8 channel);
NDIS_802_11_NETWORK_TYPE NetworkTypeInUseSanity(struct rt_bss_entry *pBss);
BOOLEAN MlmeDelBAReqSanity(struct rt_rtmp_adapter *pAd,
- void * Msg, unsigned long MsgLen);
+ void *Msg, unsigned long MsgLen);
BOOLEAN MlmeAddBAReqSanity(struct rt_rtmp_adapter *pAd,
- void * Msg, unsigned long MsgLen, u8 *pAddr2);
+ void *Msg, unsigned long MsgLen, u8 *pAddr2);
-unsigned long MakeOutgoingFrame(u8 * Buffer, unsigned long * Length, ...);
+unsigned long MakeOutgoingFrame(u8 *Buffer, unsigned long *Length, ...);
void LfsrInit(struct rt_rtmp_adapter *pAd, unsigned long Seed);
@@ -3215,7 +3202,7 @@ void MlmeSetTxRate(struct rt_rtmp_adapter *pAd,
void MlmeSelectTxRateTable(struct rt_rtmp_adapter *pAd,
struct rt_mac_table_entry *pEntry,
- u8 ** ppTable,
+ u8 **ppTable,
u8 *pTableSize, u8 *pInitTxRateIdx);
void MlmeCalculateChannelQuality(struct rt_rtmp_adapter *pAd,
@@ -3235,15 +3222,15 @@ void MlmeUpdateTxRates(struct rt_rtmp_adapter *pAd,
void MlmeUpdateHtTxRates(struct rt_rtmp_adapter *pAd, u8 apidx);
void RTMPCheckRates(struct rt_rtmp_adapter *pAd,
- IN u8 SupRate[], IN u8 * SupRateLen);
+ IN u8 SupRate[], IN u8 *SupRateLen);
BOOLEAN RTMPCheckChannel(struct rt_rtmp_adapter *pAd,
u8 CentralChannel, u8 Channel);
BOOLEAN RTMPCheckHt(struct rt_rtmp_adapter *pAd,
u8 Wcid,
- struct rt_ht_capability_ie * pHtCapability,
- struct rt_add_ht_info_ie * pAddHtInfo);
+ struct rt_ht_capability_ie *pHtCapability,
+ struct rt_add_ht_info_ie *pAddHtInfo);
void StaQuickResponeForRateUpExec(void *SystemSpecific1,
void *FunctionContext,
@@ -3268,7 +3255,7 @@ int set_eFusedump_Proc(struct rt_rtmp_adapter *pAd, char *arg);
void eFusePhysicalReadRegisters(struct rt_rtmp_adapter *pAd,
u16 Offset,
- u16 Length, u16 * pData);
+ u16 Length, u16 *pData);
int RtmpEfuseSupportCheck(struct rt_rtmp_adapter *pAd);
@@ -3391,7 +3378,7 @@ int RT_CfgSetWepKey(struct rt_rtmp_adapter *pAd,
int RT_CfgSetWPAPSKKey(struct rt_rtmp_adapter *pAd,
char *keyString,
- u8 * pHashStr,
+ u8 *pHashStr,
int hashStrLen, u8 *pPMKBuf);
/* */
@@ -3402,9 +3389,9 @@ void RTMPWPARemoveAllKeys(struct rt_rtmp_adapter *pAd);
void RTMPSetPhyMode(struct rt_rtmp_adapter *pAd, unsigned long phymode);
void RTMPUpdateHTIE(struct rt_ht_capability *pRtHt,
- u8 * pMcsSet,
- struct rt_ht_capability_ie * pHtCapability,
- struct rt_add_ht_info_ie * pAddHtInfo);
+ u8 *pMcsSet,
+ struct rt_ht_capability_ie *pHtCapability,
+ struct rt_add_ht_info_ie *pAddHtInfo);
void RTMPAddWcidAttributeEntry(struct rt_rtmp_adapter *pAd,
u8 BssIdx,
@@ -3436,22 +3423,22 @@ void RTMPToWirelessSta(struct rt_rtmp_adapter *pAd,
u32 DataLen, IN BOOLEAN bClearFrame);
void WpaDerivePTK(struct rt_rtmp_adapter *pAd,
- u8 * PMK,
- u8 * ANonce,
- u8 * AA,
- u8 * SNonce,
- u8 * SA, u8 * output, u32 len);
+ u8 *PMK,
+ u8 *ANonce,
+ u8 *AA,
+ u8 *SNonce,
+ u8 *SA, u8 *output, u32 len);
-void GenRandom(struct rt_rtmp_adapter *pAd, u8 * macAddr, u8 * random);
+void GenRandom(struct rt_rtmp_adapter *pAd, u8 *macAddr, u8 *random);
BOOLEAN RTMPCheckWPAframe(struct rt_rtmp_adapter *pAd,
struct rt_mac_table_entry *pEntry,
u8 *pData,
unsigned long DataByteCount, u8 FromWhichBSSID);
-void AES_GTK_KEY_UNWRAP(u8 * key,
- u8 * plaintext,
- u32 c_len, u8 * ciphertext);
+void AES_GTK_KEY_UNWRAP(u8 *key,
+ u8 *plaintext,
+ u32 c_len, u8 *ciphertext);
BOOLEAN RTMPParseEapolKeyData(struct rt_rtmp_adapter *pAd,
u8 *pKeyData,
@@ -3464,11 +3451,11 @@ void ConstructEapolMsg(struct rt_mac_table_entry *pEntry,
u8 GroupKeyWepStatus,
u8 MsgType,
u8 DefaultKeyIdx,
- u8 * KeyNonce,
- u8 * TxRSC,
- u8 * GTK,
- u8 * RSNIE,
- u8 RSNIE_Len, struct rt_eapol_packet * pMsg);
+ u8 *KeyNonce,
+ u8 *TxRSC,
+ u8 *GTK,
+ u8 *RSNIE,
+ u8 RSNIE_Len, struct rt_eapol_packet *pMsg);
int RTMPSoftDecryptBroadCastData(struct rt_rtmp_adapter *pAd,
struct rt_rx_blk *pRxBlk,
@@ -3515,66 +3502,66 @@ void PeerGroupMsg1Action(struct rt_rtmp_adapter *pAd,
void PeerGroupMsg2Action(struct rt_rtmp_adapter *pAd,
struct rt_mac_table_entry *pEntry,
- void * Msg, u32 MsgLen);
+ void *Msg, u32 MsgLen);
-void WpaDeriveGTK(u8 * PMK,
- u8 * GNonce,
- u8 * AA, u8 * output, u32 len);
+void WpaDeriveGTK(u8 *PMK,
+ u8 *GNonce,
+ u8 *AA, u8 *output, u32 len);
-void AES_GTK_KEY_WRAP(u8 * key,
- u8 * plaintext,
- u32 p_len, u8 * ciphertext);
+void AES_GTK_KEY_WRAP(u8 *key,
+ u8 *plaintext,
+ u32 p_len, u8 *ciphertext);
/*typedef void (*TIMER_FUNCTION)(unsigned long); */
/* timeout -- ms */
-void RTMP_SetPeriodicTimer(struct timer_list * pTimer,
+void RTMP_SetPeriodicTimer(struct timer_list *pTimer,
IN unsigned long timeout);
void RTMP_OS_Init_Timer(struct rt_rtmp_adapter *pAd,
- struct timer_list * pTimer,
+ struct timer_list *pTimer,
IN TIMER_FUNCTION function, void *data);
-void RTMP_OS_Add_Timer(struct timer_list * pTimer,
+void RTMP_OS_Add_Timer(struct timer_list *pTimer,
IN unsigned long timeout);
-void RTMP_OS_Mod_Timer(struct timer_list * pTimer,
+void RTMP_OS_Mod_Timer(struct timer_list *pTimer,
IN unsigned long timeout);
-void RTMP_OS_Del_Timer(struct timer_list * pTimer,
- OUT BOOLEAN * pCancelled);
+void RTMP_OS_Del_Timer(struct timer_list *pTimer,
+ OUT BOOLEAN *pCancelled);
void RTMP_OS_Release_Packet(struct rt_rtmp_adapter *pAd, struct rt_queue_entry *pEntry);
void RTMPusecDelay(unsigned long usec);
int os_alloc_mem(struct rt_rtmp_adapter *pAd,
- u8 ** mem, unsigned long size);
+ u8 **mem, unsigned long size);
int os_free_mem(struct rt_rtmp_adapter *pAd, void *mem);
void RTMP_AllocateSharedMemory(struct rt_rtmp_adapter *pAd,
unsigned long Length,
IN BOOLEAN Cached,
- void ** VirtualAddress,
+ void **VirtualAddress,
dma_addr_t *PhysicalAddress);
void RTMPFreeTxRxRingMemory(struct rt_rtmp_adapter *pAd);
-int AdapterBlockAllocateMemory(void *handle, void ** ppAd);
+int AdapterBlockAllocateMemory(void *handle, void **ppAd);
void RTMP_AllocateTxDescMemory(struct rt_rtmp_adapter *pAd,
u32 Index,
unsigned long Length,
IN BOOLEAN Cached,
- void ** VirtualAddress,
+ void **VirtualAddress,
dma_addr_t *PhysicalAddress);
void RTMP_AllocateFirstTxBuffer(struct rt_rtmp_adapter *pAd,
u32 Index,
unsigned long Length,
IN BOOLEAN Cached,
- void ** VirtualAddress,
+ void **VirtualAddress,
dma_addr_t *PhysicalAddress);
void RTMP_FreeFirstTxBuffer(struct rt_rtmp_adapter *pAd,
@@ -3586,13 +3573,13 @@ void RTMP_FreeFirstTxBuffer(struct rt_rtmp_adapter *pAd,
void RTMP_AllocateMgmtDescMemory(struct rt_rtmp_adapter *pAd,
unsigned long Length,
IN BOOLEAN Cached,
- void ** VirtualAddress,
+ void **VirtualAddress,
dma_addr_t *PhysicalAddress);
void RTMP_AllocateRxDescMemory(struct rt_rtmp_adapter *pAd,
unsigned long Length,
IN BOOLEAN Cached,
- void ** VirtualAddress,
+ void **VirtualAddress,
dma_addr_t *PhysicalAddress);
void RTMP_FreeDescMemory(struct rt_rtmp_adapter *pAd,
@@ -3605,30 +3592,29 @@ void *RtmpOSNetPktAlloc(struct rt_rtmp_adapter *pAd, IN int size);
void *RTMP_AllocateRxPacketBuffer(struct rt_rtmp_adapter *pAd,
unsigned long Length,
IN BOOLEAN Cached,
- void ** VirtualAddress,
- OUT dma_addr_t *
- PhysicalAddress);
+ void **VirtualAddress,
+ OUT dma_addr_t *PhysicalAddress);
void *RTMP_AllocateTxPacketBuffer(struct rt_rtmp_adapter *pAd,
unsigned long Length,
IN BOOLEAN Cached,
- void ** VirtualAddress);
+ void **VirtualAddress);
void *RTMP_AllocateFragPacketBuffer(struct rt_rtmp_adapter *pAd,
unsigned long Length);
void RTMP_QueryPacketInfo(void *pPacket,
struct rt_packet_info *pPacketInfo,
- u8 ** pSrcBufVA, u32 * pSrcBufLen);
+ u8 **pSrcBufVA, u32 *pSrcBufLen);
-void RTMP_QueryNextPacketInfo(void ** ppPacket,
+void RTMP_QueryNextPacketInfo(void **ppPacket,
struct rt_packet_info *pPacketInfo,
- u8 ** pSrcBufVA, u32 * pSrcBufLen);
+ u8 **pSrcBufVA, u32 *pSrcBufLen);
BOOLEAN RTMP_FillTxBlkInfo(struct rt_rtmp_adapter *pAd, struct rt_tx_blk *pTxBlk);
-struct rt_rtmp_sg_list *
-rt_get_sg_list_from_packet(void *pPacket, struct rt_rtmp_sg_list *sg);
+struct rt_rtmp_sg_list *rt_get_sg_list_from_packet(void *pPacket,
+ struct rt_rtmp_sg_list *sg);
void announce_802_3_packet(struct rt_rtmp_adapter *pAd, void *pPacket);
@@ -3717,23 +3703,19 @@ void wlan_802_11_to_802_3_packet(struct rt_rtmp_adapter *pAd,
{ \
u8 *_pRemovedLLCSNAP = NULL, *_pDA, *_pSA; \
\
- if (RX_BLK_TEST_FLAG(_pRxBlk, fRX_MESH)) \
- { \
+ if (RX_BLK_TEST_FLAG(_pRxBlk, fRX_MESH)) { \
_pDA = _pRxBlk->pHeader->Addr3; \
_pSA = (u8 *)_pRxBlk->pHeader + sizeof(struct rt_header_802_11); \
} \
- else \
- { \
- if (RX_BLK_TEST_FLAG(_pRxBlk, fRX_INFRA)) \
- { \
+ else {\
+ if (RX_BLK_TEST_FLAG(_pRxBlk, fRX_INFRA)) {\
_pDA = _pRxBlk->pHeader->Addr1; \
if (RX_BLK_TEST_FLAG(_pRxBlk, fRX_DLS)) \
_pSA = _pRxBlk->pHeader->Addr2; \
else \
_pSA = _pRxBlk->pHeader->Addr3; \
} \
- else \
- { \
+ else { \
_pDA = _pRxBlk->pHeader->Addr1; \
_pSA = _pRxBlk->pHeader->Addr2; \
} \
@@ -3771,8 +3753,8 @@ void Update_Rssi_Sample(struct rt_rtmp_adapter *pAd,
void *GetPacketFromRxRing(struct rt_rtmp_adapter *pAd,
OUT PRT28XX_RXD_STRUC pSaveRxD,
- OUT BOOLEAN * pbReschedule,
- IN u32 * pRxPending);
+ OUT BOOLEAN *pbReschedule,
+ IN u32 *pRxPending);
void *RTMPDeFragmentDataFrame(struct rt_rtmp_adapter *pAd, struct rt_rx_blk *pRxBlk);
@@ -3919,24 +3901,24 @@ BOOLEAN RtmpRaDevCtrlExit(struct rt_rtmp_adapter *pAd);
/* */
u16 RtmpPCI_WriteTxResource(struct rt_rtmp_adapter *pAd,
struct rt_tx_blk *pTxBlk,
- IN BOOLEAN bIsLast, u16 * FreeNumber);
+ IN BOOLEAN bIsLast, u16 *FreeNumber);
u16 RtmpPCI_WriteSingleTxResource(struct rt_rtmp_adapter *pAd,
struct rt_tx_blk *pTxBlk,
IN BOOLEAN bIsLast,
- u16 * FreeNumber);
+ u16 *FreeNumber);
u16 RtmpPCI_WriteMultiTxResource(struct rt_rtmp_adapter *pAd,
struct rt_tx_blk *pTxBlk,
- u8 frameNum, u16 * FreeNumber);
+ u8 frameNum, u16 *FreeNumber);
u16 RtmpPCI_WriteFragTxResource(struct rt_rtmp_adapter *pAd,
struct rt_tx_blk *pTxBlk,
- u8 fragNum, u16 * FreeNumber);
+ u8 fragNum, u16 *FreeNumber);
u16 RtmpPCI_WriteSubTxResource(struct rt_rtmp_adapter *pAd,
struct rt_tx_blk *pTxBlk,
- IN BOOLEAN bIsLast, u16 * FreeNumber);
+ IN BOOLEAN bIsLast, u16 *FreeNumber);
void RtmpPCI_FinalWriteTxResource(struct rt_rtmp_adapter *pAd,
struct rt_tx_blk *pTxBlk,
@@ -3955,8 +3937,8 @@ int RtmpPCIMgmtKickOut(struct rt_rtmp_adapter *pAd,
u8 *pSrcBufVA, u32 SrcBufLen);
int RTMPCheckRxError(struct rt_rtmp_adapter *pAd,
- struct rt_header_802_11 * pHeader,
- struct rt_rxwi * pRxWI, IN PRT28XX_RXD_STRUC pRxD);
+ struct rt_header_802_11 *pHeader,
+ struct rt_rxwi *pRxWI, IN PRT28XX_RXD_STRUC pRxD);
BOOLEAN RT28xxPciAsicRadioOff(struct rt_rtmp_adapter *pAd,
u8 Level, u16 TbttNumToNextWakeUp);
@@ -4138,15 +4120,15 @@ void append_pkt(struct rt_rtmp_adapter *pAd,
u8 *pHeader802_3,
u32 HdrLen,
u8 *pData,
- unsigned long DataSize, void ** ppPacket);
+ unsigned long DataSize, void **ppPacket);
u32 deaggregate_AMSDU_announce(struct rt_rtmp_adapter *pAd,
void *pPacket,
u8 *pData, unsigned long DataSize);
int RTMPCheckRxError(struct rt_rtmp_adapter *pAd,
- struct rt_header_802_11 * pHeader,
- struct rt_rxwi * pRxWI,
+ struct rt_header_802_11 *pHeader,
+ struct rt_rxwi *pRxWI,
IN PRT28XX_RXD_STRUC pRxINFO);
void RTUSBMlmeHardTransmit(struct rt_rtmp_adapter *pAd, struct rt_mgmt *pMgmt);
@@ -4173,20 +4155,20 @@ void RTMPWriteTxInfo(struct rt_rtmp_adapter *pAd,
/* */
u16 RtmpUSB_WriteSubTxResource(struct rt_rtmp_adapter *pAd,
struct rt_tx_blk *pTxBlk,
- IN BOOLEAN bIsLast, u16 * FreeNumber);
+ IN BOOLEAN bIsLast, u16 *FreeNumber);
u16 RtmpUSB_WriteSingleTxResource(struct rt_rtmp_adapter *pAd,
struct rt_tx_blk *pTxBlk,
IN BOOLEAN bIsLast,
- u16 * FreeNumber);
+ u16 *FreeNumber);
u16 RtmpUSB_WriteFragTxResource(struct rt_rtmp_adapter *pAd,
struct rt_tx_blk *pTxBlk,
- u8 fragNum, u16 * FreeNumber);
+ u8 fragNum, u16 *FreeNumber);
u16 RtmpUSB_WriteMultiTxResource(struct rt_rtmp_adapter *pAd,
struct rt_tx_blk *pTxBlk,
- u8 frameNum, u16 * FreeNumber);
+ u8 frameNum, u16 *FreeNumber);
void RtmpUSB_FinalWriteTxResource(struct rt_rtmp_adapter *pAd,
struct rt_tx_blk *pTxBlk,
@@ -4205,7 +4187,7 @@ int RtmpUSBMgmtKickOut(struct rt_rtmp_adapter *pAd,
void RtmpUSBNullFrameKickOut(struct rt_rtmp_adapter *pAd,
u8 QueIdx,
- u8 * pNullFrame, u32 frameLen);
+ u8 *pNullFrame, u32 frameLen);
void RtmpUsbStaAsicForceWakeupTimeout(void *SystemSpecific1,
void *FunctionContext,
@@ -4245,9 +4227,9 @@ void AsicStaBbpTuning(struct rt_rtmp_adapter *pAd);
BOOLEAN StaAddMacTableEntry(struct rt_rtmp_adapter *pAd,
struct rt_mac_table_entry *pEntry,
u8 MaxSupportedRateIn500Kbps,
- struct rt_ht_capability_ie * pHtCapability,
+ struct rt_ht_capability_ie *pHtCapability,
u8 HtCapabilityLen,
- struct rt_add_ht_info_ie * pAddHtInfo,
+ struct rt_add_ht_info_ie *pAddHtInfo,
u8 AddHtInfoLen, u16 CapabilityInfo);
BOOLEAN AUTH_ReqSend(struct rt_rtmp_adapter *pAd,
@@ -4313,7 +4295,7 @@ void RtmpOSNetDevClose(struct net_device *pNetDev);
void RtmpOSNetDevDetach(struct net_device *pNetDev);
-int RtmpOSNetDevAlloc(struct net_device ** pNewNetDev, u32 privDataSize);
+int RtmpOSNetDevAlloc(struct net_device **pNewNetDev, u32 privDataSize);
void RtmpOSNetDevFree(struct net_device *pNetDev);
diff --git a/drivers/staging/rt2860/sta/assoc.c b/drivers/staging/rt2860/sta/assoc.c
index 7055f229e51..6e85d5e6554 100644
--- a/drivers/staging/rt2860/sta/assoc.c
+++ b/drivers/staging/rt2860/sta/assoc.c
@@ -1596,7 +1596,6 @@ BOOLEAN StaAddMacTableEntry(struct rt_rtmp_adapter *pAd,
union iwreq_data wrqu;
wext_notify_event_assoc(pAd);
- memset(wrqu.ap_addr.sa_data, 0, MAC_ADDR_LEN);
memcpy(wrqu.ap_addr.sa_data, pAd->MlmeAux.Bssid, MAC_ADDR_LEN);
wireless_send_event(pAd->net_dev, SIOCGIWAP, &wrqu, NULL);
diff --git a/drivers/staging/rt2860/sta_ioctl.c b/drivers/staging/rt2860/sta_ioctl.c
index de4b6277bae..112da7a6c41 100644
--- a/drivers/staging/rt2860/sta_ioctl.c
+++ b/drivers/staging/rt2860/sta_ioctl.c
@@ -608,7 +608,6 @@ int rt_ioctl_siwap(struct net_device *dev,
/* Prevent to connect AP again in STAMlmePeriodicExec */
pAdapter->MlmeAux.AutoReconnectSsidLen = 32;
- memset(Bssid, 0, MAC_ADDR_LEN);
memcpy(Bssid, ap_addr->sa_data, MAC_ADDR_LEN);
MlmeEnqueue(pAdapter,
MLME_CNTL_STATE_MACHINE,
@@ -1047,8 +1046,7 @@ int rt_ioctl_giwscan(struct net_device *dev,
if (tmpRate == 0x6c
&& pAdapter->ScanTab.BssEntry[i].HtCapabilityLen >
0) {
- int rate_count =
- sizeof(ralinkrate) / sizeof(__s32);
+ int rate_count = ARRAY_SIZE(ralinkrate);
struct rt_ht_cap_info capInfo =
pAdapter->ScanTab.BssEntry[i].HtCapability.
HtCapInfo;
@@ -1061,10 +1059,11 @@ int rt_ioctl_giwscan(struct net_device *dev,
int rate_index =
12 + ((u8)capInfo.ChannelWidth * 24) +
((u8)shortGI * 48) + ((u8)maxMCS);
+
if (rate_index < 0)
rate_index = 0;
- if (rate_index > rate_count)
- rate_index = rate_count;
+ if (rate_index >= rate_count)
+ rate_index = rate_count - 1;
iwe.u.bitrate.value =
ralinkrate[rate_index] * 500000;
}
@@ -2338,7 +2337,7 @@ int rt_ioctl_giwrate(struct net_device *dev,
*/
GET_PAD_FROM_NET_DEV(pAd, dev);
- rate_count = sizeof(ralinkrate) / sizeof(__s32);
+ rate_count = ARRAY_SIZE(ralinkrate);
/*check if the interface is down */
if (!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_INTERRUPT_IN_USE)) {
DBGPRINT(RT_DEBUG_TRACE, ("INFO::Network is down!\n"));
@@ -2369,8 +2368,8 @@ int rt_ioctl_giwrate(struct net_device *dev,
if (rate_index < 0)
rate_index = 0;
- if (rate_index > rate_count)
- rate_index = rate_count;
+ if (rate_index >= rate_count)
+ rate_index = rate_count - 1;
wrqu->bitrate.value = ralinkrate[rate_index] * 500000;
wrqu->bitrate.disabled = 0;
diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c
index 740db0c1ac0..b740662d095 100644
--- a/drivers/staging/rt2860/usb_main_dev.c
+++ b/drivers/staging/rt2860/usb_main_dev.c
@@ -98,6 +98,7 @@ struct usb_device_id rtusb_usb_id[] = {
{USB_DEVICE(0x5A57, 0x0282)}, /* Zinwell */
{USB_DEVICE(0x7392, 0x7718)},
{USB_DEVICE(0x7392, 0x7717)},
+ {USB_DEVICE(0x0411, 0x016f)}, /* MelCo.,Inc. WLI-UC-G301N */
{USB_DEVICE(0x1737, 0x0070)}, /* Linksys WUSB100 */
{USB_DEVICE(0x1737, 0x0071)}, /* Linksys WUSB600N */
{USB_DEVICE(0x0411, 0x00e8)}, /* Buffalo WLI-UC-G300N */
@@ -154,7 +155,7 @@ static void rt2870_disconnect(struct usb_device *dev, struct rt_rtmp_adapter *pA
static int __devinit rt2870_probe(IN struct usb_interface *intf,
IN struct usb_device *usb_dev,
IN const struct usb_device_id *dev_id,
- struct rt_rtmp_adapter ** ppAd);
+ struct rt_rtmp_adapter **ppAd);
#ifndef PF_NOFREEZE
#define PF_NOFREEZE 0
@@ -802,7 +803,7 @@ static void rt2870_disconnect(struct usb_device *dev, struct rt_rtmp_adapter *pA
static int __devinit rt2870_probe(IN struct usb_interface *intf,
IN struct usb_device *usb_dev,
IN const struct usb_device_id *dev_id,
- struct rt_rtmp_adapter ** ppAd)
+ struct rt_rtmp_adapter **ppAd)
{
struct net_device *net_dev = NULL;
struct rt_rtmp_adapter *pAd = (struct rt_rtmp_adapter *)NULL;
diff --git a/drivers/staging/rt2870/Kconfig b/drivers/staging/rt2870/Kconfig
index 6ea172b433e..e988680b5be 100644
--- a/drivers/staging/rt2870/Kconfig
+++ b/drivers/staging/rt2870/Kconfig
@@ -1,6 +1,6 @@
config RT2870
tristate "Ralink 2870/3070 wireless support"
- depends on USB && X86 && WLAN
+ depends on USB && (X86 || ARM) && WLAN
select WIRELESS_EXT
select WEXT_PRIV
select CRC_CCITT
diff --git a/drivers/staging/rt2870/common/rtusb_bulk.c b/drivers/staging/rt2870/common/rtusb_bulk.c
index 379780c72b3..d2746f8732e 100644
--- a/drivers/staging/rt2870/common/rtusb_bulk.c
+++ b/drivers/staging/rt2870/common/rtusb_bulk.c
@@ -172,11 +172,11 @@ void RTUSBInitRxDesc(struct rt_rtmp_adapter *pAd, struct rt_rx_context *pRxConte
*/
#define BULK_OUT_LOCK(pLock, IrqFlags) \
- if(1 /*!(in_interrupt() & 0xffff0000)*/) \
+ if (1 /*!(in_interrupt() & 0xffff0000)*/) \
RTMP_IRQ_LOCK((pLock), IrqFlags);
#define BULK_OUT_UNLOCK(pLock, IrqFlags) \
- if(1 /*!(in_interrupt() & 0xffff0000)*/) \
+ if (1 /*!(in_interrupt() & 0xffff0000)*/) \
RTMP_IRQ_UNLOCK((pLock), IrqFlags);
void RTUSBBulkOutDataPacket(struct rt_rtmp_adapter *pAd,
@@ -187,7 +187,7 @@ void RTUSBBulkOutDataPacket(struct rt_rtmp_adapter *pAd,
PURB pUrb;
int ret = 0;
struct rt_txinfo *pTxInfo, *pLastTxInfo = NULL;
- struct rt_txwi * pTxWI;
+ struct rt_txwi *pTxWI;
unsigned long TmpBulkEndPos, ThisBulkSize;
unsigned long IrqFlags = 0, IrqFlags2 = 0;
u8 *pWirelessPkt, *pAppendant;
@@ -273,9 +273,9 @@ void RTUSBBulkOutDataPacket(struct rt_rtmp_adapter *pAd,
}
do {
- pTxInfo = (struct rt_txinfo *)& pWirelessPkt[TmpBulkEndPos];
+ pTxInfo = (struct rt_txinfo *)&pWirelessPkt[TmpBulkEndPos];
pTxWI =
- (struct rt_txwi *) & pWirelessPkt[TmpBulkEndPos + TXINFO_SIZE];
+ (struct rt_txwi *)&pWirelessPkt[TmpBulkEndPos + TXINFO_SIZE];
if (pAd->bForcePrintTX == TRUE)
DBGPRINT(RT_DEBUG_TRACE,
@@ -310,7 +310,7 @@ void RTUSBBulkOutDataPacket(struct rt_rtmp_adapter *pAd,
pHTTXContext->ENextBulkOutPosition =
TmpBulkEndPos;
break;
- } else if (((pAd->BulkOutMaxPacketSize < 512) && ((ThisBulkSize & 0xfffff800) != 0)) /*|| ( (ThisBulkSize != 0) && (pTxWI->AMPDU == 0)) */ ) { /* For USB 1.1 or peer which didn't support AMPDU, limit the BulkOut size. */
+ } else if (((pAd->BulkOutMaxPacketSize < 512) && ((ThisBulkSize & 0xfffff800) != 0)) /*|| ( (ThisBulkSize != 0) && (pTxWI->AMPDU == 0)) */) { /* For USB 1.1 or peer which didn't support AMPDU, limit the BulkOut size. */
/* For performence in b/g mode, now just check for USB 1.1 and didn't care about the APMDU or not! 2008/06/04. */
pHTTXContext->ENextBulkOutPosition =
TmpBulkEndPos;
@@ -326,7 +326,7 @@ void RTUSBBulkOutDataPacket(struct rt_rtmp_adapter *pAd,
if (pTxInfo->QSEL != FIFO_EDCA) {
DBGPRINT(RT_DEBUG_ERROR,
("%s(): ====> pTxInfo->QueueSel(%d)!= FIFO_EDCA!!!!\n",
- __FUNCTION__, pTxInfo->QSEL));
+ __func__, pTxInfo->QSEL));
DBGPRINT(RT_DEBUG_ERROR,
("\tCWPos=%ld, NBPos=%ld, ENBPos=%ld, bCopy=%d!\n",
pHTTXContext->CurWritePosition,
@@ -334,7 +334,7 @@ void RTUSBBulkOutDataPacket(struct rt_rtmp_adapter *pAd,
pHTTXContext->ENextBulkOutPosition,
pHTTXContext->bCopySavePad));
hex_dump("Wrong QSel Pkt:",
- (u8 *)& pWirelessPkt[TmpBulkEndPos],
+ (u8 *)&pWirelessPkt[TmpBulkEndPos],
(pHTTXContext->CurWritePosition -
pHTTXContext->NextBulkOutPosition));
}
@@ -401,9 +401,8 @@ void RTUSBBulkOutDataPacket(struct rt_rtmp_adapter *pAd,
} while (TRUE);
/* adjust the pTxInfo->USBDMANextVLD value of last pTxInfo. */
- if (pLastTxInfo) {
+ if (pLastTxInfo)
pLastTxInfo->USBDMANextVLD = 0;
- }
/*
We need to copy SavedPad when following condition matched!
@@ -475,7 +474,8 @@ void RTUSBBulkOutDataPacket(struct rt_rtmp_adapter *pAd,
(usb_complete_t) RTUSBBulkOutDataPacketComplete);
pUrb = pHTTXContext->pUrb;
- if ((ret = RTUSB_SUBMIT_URB(pUrb)) != 0) {
+ ret = RTUSB_SUBMIT_URB(pUrb);
+ if (ret != 0) {
DBGPRINT(RT_DEBUG_ERROR,
("RTUSBBulkOutDataPacket: Submit Tx URB failed %d\n",
ret));
@@ -573,7 +573,8 @@ void RTUSBBulkOutNullFrame(struct rt_rtmp_adapter *pAd)
(usb_complete_t) RTUSBBulkOutNullFrameComplete);
pUrb = pNullContext->pUrb;
- if ((ret = RTUSB_SUBMIT_URB(pUrb)) != 0) {
+ ret = RTUSB_SUBMIT_URB(pUrb);
+ if (ret != 0) {
RTMP_IRQ_LOCK(&pAd->BulkOutLock[0], IrqFlags);
pAd->BulkOutPending[0] = FALSE;
pAd->watchDogTxPendingCnt[0] = 0;
@@ -667,7 +668,8 @@ void RTUSBBulkOutMLMEPacket(struct rt_rtmp_adapter *pAd, u8 Index)
pUrb->transfer_flags &= (~URB_NO_TRANSFER_DMA_MAP);
pUrb = pMLMEContext->pUrb;
- if ((ret = RTUSB_SUBMIT_URB(pUrb)) != 0) {
+ ret = RTUSB_SUBMIT_URB(pUrb);
+ if (ret != 0) {
DBGPRINT(RT_DEBUG_ERROR,
("RTUSBBulkOutMLMEPacket: Submit MLME URB failed %d\n",
ret));
@@ -742,7 +744,8 @@ void RTUSBBulkOutPsPoll(struct rt_rtmp_adapter *pAd)
(usb_complete_t) RTUSBBulkOutPsPollComplete);
pUrb = pPsPollContext->pUrb;
- if ((ret = RTUSB_SUBMIT_URB(pUrb)) != 0) {
+ ret = RTUSB_SUBMIT_URB(pUrb);
+ if (ret != 0) {
RTMP_IRQ_LOCK(&pAd->BulkOutLock[0], IrqFlags);
pAd->BulkOutPending[0] = FALSE;
pAd->watchDogTxPendingCnt[0] = 0;
@@ -799,7 +802,8 @@ void DoBulkIn(struct rt_rtmp_adapter *pAd)
RTUSBInitRxDesc(pAd, pRxContext);
pUrb = pRxContext->pUrb;
- if ((ret = RTUSB_SUBMIT_URB(pUrb)) != 0) { /* fail */
+ ret = RTUSB_SUBMIT_URB(pUrb);
+ if (ret != 0) { /* fail */
RTMP_IRQ_LOCK(&pAd->BulkInLock, IrqFlags);
pRxContext->InUse = FALSE;
@@ -949,9 +953,8 @@ void RTUSBKickBulkOut(struct rt_rtmp_adapter *pAd)
if (!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_NEED_STOP_TX)
) {
/* 2. PS-Poll frame is next */
- if (RTUSB_TEST_BULK_FLAG(pAd, fRTUSB_BULK_OUT_PSPOLL)) {
+ if (RTUSB_TEST_BULK_FLAG(pAd, fRTUSB_BULK_OUT_PSPOLL))
RTUSBBulkOutPsPoll(pAd);
- }
/* 5. Mlme frame is next */
else if ((RTUSB_TEST_BULK_FLAG(pAd, fRTUSB_BULK_OUT_MLME)) ||
(pAd->MgmtRing.TxSwFreeIdx < MGMT_RING_SIZE)) {
@@ -1014,9 +1017,8 @@ void RTUSBKickBulkOut(struct rt_rtmp_adapter *pAd)
}
}
/* 8. No data avaliable */
- else {
-
- }
+ else
+ ;
}
}
diff --git a/drivers/staging/rt2870/common/rtusb_data.c b/drivers/staging/rt2870/common/rtusb_data.c
index 4583764c78d..69368862217 100644
--- a/drivers/staging/rt2870/common/rtusb_data.c
+++ b/drivers/staging/rt2870/common/rtusb_data.c
@@ -124,7 +124,7 @@ int RTUSBFreeDescriptorRequest(struct rt_rtmp_adapter *pAd,
}
RTMP_IRQ_UNLOCK(&pAd->TxContextQueueLock[BulkOutPipeId], IrqFlags);
- return (Status);
+ return Status;
}
int RTUSBFreeDescriptorRelease(struct rt_rtmp_adapter *pAd,
@@ -138,7 +138,7 @@ int RTUSBFreeDescriptorRelease(struct rt_rtmp_adapter *pAd,
pHTTXContext->bCurWriting = FALSE;
RTMP_IRQ_UNLOCK(&pAd->TxContextQueueLock[BulkOutPipeId], IrqFlags);
- return (NDIS_STATUS_SUCCESS);
+ return NDIS_STATUS_SUCCESS;
}
BOOLEAN RTUSBNeedQueueBackForAgg(struct rt_rtmp_adapter *pAd, u8 BulkOutPipeId)
@@ -151,7 +151,7 @@ BOOLEAN RTUSBNeedQueueBackForAgg(struct rt_rtmp_adapter *pAd, u8 BulkOutPipeId)
RTMP_IRQ_LOCK(&pAd->TxContextQueueLock[BulkOutPipeId], IrqFlags);
if ((pHTTXContext->IRPPending ==
- TRUE) /*&& (pAd->TxSwQueue[BulkOutPipeId].Number == 0) */ ) {
+ TRUE) /*&& (pAd->TxSwQueue[BulkOutPipeId].Number == 0) */) {
if ((pHTTXContext->CurWritePosition <
pHTTXContext->ENextBulkOutPosition)
&&
@@ -201,7 +201,7 @@ void RTUSBRejectPendingPackets(struct rt_rtmp_adapter *pAd)
for (Index = 0; Index < 4; Index++) {
NdisAcquireSpinLock(&pAd->TxSwQueueLock[Index]);
while (pAd->TxSwQueue[Index].Head != NULL) {
- pQueue = (struct rt_queue_header *)& (pAd->TxSwQueue[Index]);
+ pQueue = (struct rt_queue_header *)&(pAd->TxSwQueue[Index]);
pEntry = RemoveHeadQueue(pQueue);
pPacket = QUEUE_ENTRY_TO_PACKET(pEntry);
RELEASE_NDIS_PACKET(pAd, pPacket, NDIS_STATUS_FAILURE);
diff --git a/drivers/staging/rt2870/common/rtusb_io.c b/drivers/staging/rt2870/common/rtusb_io.c
index cf0d2f5dbc6..6b9ca24bbee 100644
--- a/drivers/staging/rt2870/common/rtusb_io.c
+++ b/drivers/staging/rt2870/common/rtusb_io.c
@@ -24,7 +24,7 @@
* *
*************************************************************************
- Module Name:
+ Module Name:
rtusb_io.c
Abstract:
@@ -400,8 +400,7 @@ int RTUSBWriteBBPRegister(struct rt_rtmp_adapter *pAd,
("RTUSBWriteBBPRegister(BBP_CSR_CFG):retry count=%d!\n",
i));
i++;
- }
- while ((i < RETRY_LIMIT)
+ } while ((i < RETRY_LIMIT)
&& (!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_NIC_NOT_EXIST)));
if ((i == RETRY_LIMIT)
@@ -455,8 +454,7 @@ int RTUSBWriteRFRegister(struct rt_rtmp_adapter *pAd, u32 Value)
("RTUSBWriteRFRegister(RF_CSR_CFG0):retry count=%d!\n",
i));
i++;
- }
- while ((i < RETRY_LIMIT)
+ } while ((i < RETRY_LIMIT)
&& (!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_NIC_NOT_EXIST)));
if ((i == RETRY_LIMIT)
@@ -652,11 +650,11 @@ int RTUSBEnqueueCmdFromNdis(struct rt_rtmp_adapter *pAd,
}
else
#endif
- return (NDIS_STATUS_RESOURCES);
+ return NDIS_STATUS_RESOURCES;
status = os_alloc_mem(pAd, (u8 **) (&cmdqelmt), sizeof(struct rt_cmdqelmt));
if ((status != NDIS_STATUS_SUCCESS) || (cmdqelmt == NULL))
- return (NDIS_STATUS_RESOURCES);
+ return NDIS_STATUS_RESOURCES;
cmdqelmt->buffer = NULL;
if (pInformationBuffer != NULL) {
@@ -666,7 +664,7 @@ int RTUSBEnqueueCmdFromNdis(struct rt_rtmp_adapter *pAd,
if ((status != NDIS_STATUS_SUCCESS)
|| (cmdqelmt->buffer == NULL)) {
kfree(cmdqelmt);
- return (NDIS_STATUS_RESOURCES);
+ return NDIS_STATUS_RESOURCES;
} else {
NdisMoveMemory(cmdqelmt->buffer, pInformationBuffer,
InformationBufferLength);
@@ -698,7 +696,7 @@ int RTUSBEnqueueCmdFromNdis(struct rt_rtmp_adapter *pAd,
} else
RTUSBCMDUp(pAd);
- return (NDIS_STATUS_SUCCESS);
+ return NDIS_STATUS_SUCCESS;
}
/*
@@ -726,7 +724,7 @@ int RTUSBEnqueueInternalCmd(struct rt_rtmp_adapter *pAd,
status = os_alloc_mem(pAd, (u8 **) & cmdqelmt, sizeof(struct rt_cmdqelmt));
if ((status != NDIS_STATUS_SUCCESS) || (cmdqelmt == NULL))
- return (NDIS_STATUS_RESOURCES);
+ return NDIS_STATUS_RESOURCES;
NdisZeroMemory(cmdqelmt, sizeof(struct rt_cmdqelmt));
if (InformationBufferLength > 0) {
@@ -736,7 +734,7 @@ int RTUSBEnqueueInternalCmd(struct rt_rtmp_adapter *pAd,
if ((status != NDIS_STATUS_SUCCESS)
|| (cmdqelmt->buffer == NULL)) {
os_free_mem(pAd, cmdqelmt);
- return (NDIS_STATUS_RESOURCES);
+ return NDIS_STATUS_RESOURCES;
} else {
NdisMoveMemory(cmdqelmt->buffer, pInformationBuffer,
InformationBufferLength);
@@ -767,7 +765,7 @@ int RTUSBEnqueueInternalCmd(struct rt_rtmp_adapter *pAd,
} else
RTUSBCMDUp(pAd);
}
- return (NDIS_STATUS_SUCCESS);
+ return NDIS_STATUS_SUCCESS;
}
/*
@@ -1071,7 +1069,7 @@ void CMDHandler(struct rt_rtmp_adapter *pAd)
TXRXQ_PCNT,
&MACValue);
if ((MACValue & 0xf00000
- /*0x800000 */ ) == 0)
+ /*0x800000 */) == 0)
break;
Index++;
RTMPusecDelay(10000);
@@ -1169,11 +1167,10 @@ void CMDHandler(struct rt_rtmp_adapter *pAd)
(usb_complete_t)
RTUSBBulkOutDataPacketComplete);
- if ((ret =
- RTUSB_SUBMIT_URB
+ ret = RTUSB_SUBMIT_URB
(pHTTXContext->
- pUrb)) !=
- 0) {
+ pUrb);
+ if (ret != 0) {
RTMP_INT_LOCK
(&pAd->
BulkOutLock
@@ -1406,15 +1403,13 @@ void CMDHandler(struct rt_rtmp_adapter *pAd)
/* All transfers must be aborted or cancelled before attempting to reset the pipe. */
{
u32 MACValue;
-
{
/*while ((atomic_read(&pAd->PendingRx) > 0) && (!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_NIC_NOT_EXIST))) */
if ((pAd->PendingRx > 0)
&&
(!RTMP_TEST_FLAG
(pAd,
- fRTMP_ADAPTER_NIC_NOT_EXIST)))
- {
+ fRTMP_ADAPTER_NIC_NOT_EXIST))) {
DBGPRINT_RAW
(RT_DEBUG_ERROR,
("BulkIn IRP Pending!!!\n"));
@@ -1424,7 +1419,6 @@ void CMDHandler(struct rt_rtmp_adapter *pAd)
pAd->PendingRx = 0;
}
}
-
/* Wait 10ms before reading register. */
RTMPusecDelay(10000);
ntStatus =
@@ -1545,7 +1539,8 @@ void CMDHandler(struct rt_rtmp_adapter *pAd)
RTUSBInitRxDesc(pAd,
pRxContext);
pUrb = pRxContext->pUrb;
- if ((ret = RTUSB_SUBMIT_URB(pUrb)) != 0) { /* fail */
+ ret = RTUSB_SUBMIT_URB(pUrb);
+ if (ret != 0) { /* fail */
RTMP_IRQ_LOCK
(&pAd->
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c
index 4c5d63fd583..c8dbcb92591 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c
@@ -109,11 +109,10 @@ int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops)
if (hcrypt == NULL)
return -1;
- alg = kmalloc(sizeof(*alg), GFP_KERNEL);
+ alg = kzalloc(sizeof(*alg), GFP_KERNEL);
if (alg == NULL)
return -ENOMEM;
- memset(alg, 0, sizeof(*alg));
alg->ops = ops;
spin_lock_irqsave(&hcrypt->lock, flags);
@@ -207,11 +206,10 @@ int ieee80211_crypto_init(void)
{
int ret = -ENOMEM;
- hcrypt = kmalloc(sizeof(*hcrypt), GFP_KERNEL);
+ hcrypt = kzalloc(sizeof(*hcrypt), GFP_KERNEL);
if (!hcrypt)
goto out;
- memset(hcrypt, 0, sizeof(*hcrypt));
INIT_LIST_HEAD(&hcrypt->algs);
spin_lock_init(&hcrypt->lock);
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c
index 40f1b99faad..731d2686411 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c
@@ -69,10 +69,9 @@ static void * ieee80211_ccmp_init(int key_idx)
{
struct ieee80211_ccmp_data *priv;
- priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+ priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
- memset(priv, 0, sizeof(*priv));
priv->key_idx = key_idx;
priv->tfm = (void *)crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c
index a5254111d9a..ee71ee90fd8 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c
@@ -70,10 +70,9 @@ static void * ieee80211_tkip_init(int key_idx)
{
struct ieee80211_tkip_data *priv;
- priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+ priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
- memset(priv, 0, sizeof(*priv));
priv->key_idx = key_idx;
priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c
index c6c3bc38459..f790cd65f10 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c
@@ -45,10 +45,9 @@ static void * prism2_wep_init(int keyidx)
{
struct prism2_wep_data *priv;
- priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+ priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
- memset(priv, 0, sizeof(*priv));
priv->key_idx = keyidx;
priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tx_tfm)) {
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_module.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_module.c
index 18392fce487..9d58a429c56 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_module.c
@@ -66,8 +66,8 @@ static inline int ieee80211_networks_allocate(struct ieee80211_device *ieee)
if (ieee->networks)
return 0;
- ieee->networks = kmalloc(
- MAX_NETWORK_COUNT * sizeof(struct ieee80211_network),
+ ieee->networks = kcalloc(
+ MAX_NETWORK_COUNT, sizeof(struct ieee80211_network),
GFP_KERNEL);
if (!ieee->networks) {
printk(KERN_WARNING "%s: Out of memory allocating beacons\n",
@@ -75,9 +75,6 @@ static inline int ieee80211_networks_allocate(struct ieee80211_device *ieee)
return -ENOMEM;
}
- memset(ieee->networks, 0,
- MAX_NETWORK_COUNT * sizeof(struct ieee80211_network));
-
return 0;
}
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
index 2b7080cc2c0..3a724496e74 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
@@ -1489,8 +1489,6 @@ inline void ieee80211_process_probe_response(
memcpy(target, &network, sizeof(*target));
list_add_tail(&target->list, &ieee->network_list);
- if(ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE)
- ieee80211_softmac_new_net(ieee,&network);
} else {
IEEE80211_DEBUG_SCAN("Updating '%s' (%pM) via %s.\n",
escape_essid(target->ssid,
@@ -1516,8 +1514,6 @@ inline void ieee80211_process_probe_response(
renew = 1;
//YJ,add,080819,for hidden ap,end
update_network(target, &network);
- if(renew && (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE))
- ieee80211_softmac_new_net(ieee,&network);
}
spin_unlock_irqrestore(&ieee->lock, flags);
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
index be2d17f60c3..1b838a266e0 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
@@ -1435,7 +1435,7 @@ static inline u16 auth_parse(struct sk_buff *skb, u8** challenge, int *chlen)
if(*(t++) == MFIE_TYPE_CHALLENGE){
*chlen = *(t++);
- *challenge = (u8*)kmalloc(*chlen, GFP_ATOMIC);
+ *challenge = kmalloc(*chlen, GFP_ATOMIC);
memcpy(*challenge, t, *chlen);
}
}
@@ -1555,7 +1555,8 @@ ieee80211_rx_auth_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
//IEEE80211DMESG("Rx probe");
ieee->softmac_stats.rx_auth_rq++;
- if ((status = auth_rq_parse(skb, dest))!= -1){
+ status = auth_rq_parse(skb, dest);
+ if (status != -1) {
ieee80211_resp_to_auth(ieee, status, dest);
}
//DMESG("Dest is "MACSTR, MAC2STR(dest));
@@ -2321,9 +2322,11 @@ void ieee80211_disassociate(struct ieee80211_device *ieee)
if(IS_DOT11D_ENABLE(ieee))
Dot11d_Reset(ieee);
- ieee->state = IEEE80211_NOLINK;
+
ieee->link_change(ieee->dev);
- notify_wx_assoc_event(ieee);
+ if (ieee->state == IEEE80211_LINKED)
+ notify_wx_assoc_event(ieee);
+ ieee->state = IEEE80211_NOLINK;
}
void ieee80211_associate_retry_wq(struct work_struct *work)
@@ -2664,11 +2667,11 @@ static int ieee80211_wpa_set_wpa_ie(struct ieee80211_device *ieee,
return -EINVAL;
if (param->u.wpa_ie.len) {
- buf = kmalloc(param->u.wpa_ie.len, GFP_KERNEL);
+ buf = kmemdup(param->u.wpa_ie.data, param->u.wpa_ie.len,
+ GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- memcpy(buf, param->u.wpa_ie.data, param->u.wpa_ie.len);
kfree(ieee->wpa_ie);
ieee->wpa_ie = buf;
ieee->wpa_ie_len = param->u.wpa_ie.len;
@@ -2858,8 +2861,7 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
ieee80211_crypt_delayed_deinit(ieee, crypt);
- new_crypt = (struct ieee80211_crypt_data *)
- kmalloc(sizeof(*new_crypt), GFP_KERNEL);
+ new_crypt = kmalloc(sizeof(*new_crypt), GFP_KERNEL);
if (new_crypt == NULL) {
ret = -ENOMEM;
goto done;
@@ -2950,7 +2952,7 @@ int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct iw_poin
goto out;
}
- param = (struct ieee_param *)kmalloc(p->length, GFP_KERNEL);
+ param = kmalloc(p->length, GFP_KERNEL);
if (param == NULL){
ret = -ENOMEM;
goto out;
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c
index ad42bcdc937..e46ff2ffa09 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c
@@ -277,8 +277,6 @@ void ieee80211_wx_sync_scan_wq(struct work_struct *work)
chan = ieee->current_network.channel;
- netif_carrier_off(ieee->dev);
-
if (ieee->data_hard_stop)
ieee->data_hard_stop(ieee->dev);
@@ -300,8 +298,6 @@ void ieee80211_wx_sync_scan_wq(struct work_struct *work)
if(ieee->iw_mode == IW_MODE_ADHOC || ieee->iw_mode == IW_MODE_MASTER)
ieee80211_start_send_beacons(ieee);
- netif_carrier_on(ieee->dev);
-
//YJ,add,080828, In prevent of lossing ping packet during scanning
//ieee80211_sta_ps_send_null_frame(ieee, false);
//YJ,add,080828,end
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
index c5b80f9c32c..07d8dbcdca2 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
@@ -325,11 +325,10 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
struct ieee80211_crypt_data *new_crypt;
/* take WEP into use */
- new_crypt = kmalloc(sizeof(struct ieee80211_crypt_data),
+ new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data),
GFP_KERNEL);
if (new_crypt == NULL)
return -ENOMEM;
- memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
new_crypt->ops = ieee80211_get_crypto_ops("WEP");
if (!new_crypt->ops)
new_crypt->ops = ieee80211_get_crypto_ops("WEP");
@@ -728,10 +727,9 @@ int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len)
printk("len:%zu, ie:%d\n", len, ie[1]);
return -EINVAL;
}
- buf = kmalloc(len, GFP_KERNEL);
+ buf = kmemdup(ie, len, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- memcpy(buf, ie, len);
kfree(ieee->wpa_ie);
ieee->wpa_ie = buf;
ieee->wpa_ie_len = len;
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
index 55d12e3271d..dacefea7811 100644
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ b/drivers/staging/rtl8187se/r8180_core.c
@@ -44,45 +44,45 @@
#include "ieee80211/dot11d.h"
static struct pci_device_id rtl8180_pci_id_tbl[] __devinitdata = {
- {
- .vendor = PCI_VENDOR_ID_REALTEK,
- .device = 0x8199,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = 0,
- },
- {
- .vendor = 0,
- .device = 0,
- .subvendor = 0,
- .subdevice = 0,
- .driver_data = 0,
- }
+ {
+ .vendor = PCI_VENDOR_ID_REALTEK,
+ .device = 0x8199,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = 0,
+ },
+ {
+ .vendor = 0,
+ .device = 0,
+ .subvendor = 0,
+ .subdevice = 0,
+ .driver_data = 0,
+ }
};
-static char* ifname = "wlan%d";
+static char *ifname = "wlan%d";
static int hwseqnum = 0;
static int hwwep = 0;
static int channels = 0x3fff;
-#define eqMacAddr(a,b) ( ((a)[0]==(b)[0] && (a)[1]==(b)[1] && (a)[2]==(b)[2] && (a)[3]==(b)[3] && (a)[4]==(b)[4] && (a)[5]==(b)[5]) ? 1:0 )
-#define cpMacAddr(des,src) ((des)[0]=(src)[0],(des)[1]=(src)[1],(des)[2]=(src)[2],(des)[3]=(src)[3],(des)[4]=(src)[4],(des)[5]=(src)[5])
+#define eqMacAddr(a, b) (((a)[0] == (b)[0] && (a)[1] == (b)[1] && (a)[2] == (b)[2] && (a)[3] == (b)[3] && (a)[4] == (b)[4] && (a)[5] == (b)[5]) ? 1 : 0)
+#define cpMacAddr(des, src) ((des)[0] = (src)[0], (des)[1] = (src)[1], (des)[2] = (src)[2], (des)[3] = (src)[3], (des)[4] = (src)[4], (des)[5] = (src)[5])
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, rtl8180_pci_id_tbl);
MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>");
MODULE_DESCRIPTION("Linux driver for Realtek RTL8180 / RTL8185 WiFi cards");
-module_param(ifname, charp, S_IRUGO|S_IWUSR );
-module_param(hwseqnum,int, S_IRUGO|S_IWUSR);
-module_param(hwwep,int, S_IRUGO|S_IWUSR);
-module_param(channels,int, S_IRUGO|S_IWUSR);
+module_param(ifname, charp, S_IRUGO|S_IWUSR);
+module_param(hwseqnum, int, S_IRUGO|S_IWUSR);
+module_param(hwwep, int, S_IRUGO|S_IWUSR);
+module_param(channels, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(devname," Net interface name, wlan%d=default");
-MODULE_PARM_DESC(hwseqnum," Try to use hardware 802.11 header sequence numbers. Zero=default");
-MODULE_PARM_DESC(hwwep," Try to use hardware WEP support. Still broken and not available on all cards");
-MODULE_PARM_DESC(channels," Channel bitmask for specific locales. NYI");
+MODULE_PARM_DESC(devname, " Net interface name, wlan%d=default");
+MODULE_PARM_DESC(hwseqnum, " Try to use hardware 802.11 header sequence numbers. Zero=default");
+MODULE_PARM_DESC(hwwep, " Try to use hardware WEP support. Still broken and not available on all cards");
+MODULE_PARM_DESC(channels, " Channel bitmask for specific locales. NYI");
static int __devinit rtl8180_pci_probe(struct pci_dev *pdev,
@@ -90,7 +90,7 @@ static int __devinit rtl8180_pci_probe(struct pci_dev *pdev,
static void __devexit rtl8180_pci_remove(struct pci_dev *pdev);
-static void rtl8180_shutdown (struct pci_dev *pdev)
+static void rtl8180_shutdown(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
if (dev->netdev_ops->ndo_stop)
@@ -168,40 +168,40 @@ static struct pci_driver rtl8180_pci_driver = {
u8 read_nic_byte(struct net_device *dev, int x)
{
- return 0xff&readb((u8*)dev->mem_start +x);
+ return 0xff&readb((u8 *)dev->mem_start + x);
}
u32 read_nic_dword(struct net_device *dev, int x)
{
- return readl((u8*)dev->mem_start +x);
+ return readl((u8 *)dev->mem_start + x);
}
u16 read_nic_word(struct net_device *dev, int x)
{
- return readw((u8*)dev->mem_start +x);
+ return readw((u8 *)dev->mem_start + x);
}
-void write_nic_byte(struct net_device *dev, int x,u8 y)
+void write_nic_byte(struct net_device *dev, int x, u8 y)
{
- writeb(y,(u8*)dev->mem_start +x);
+ writeb(y, (u8 *)dev->mem_start + x);
udelay(20);
}
-void write_nic_dword(struct net_device *dev, int x,u32 y)
+void write_nic_dword(struct net_device *dev, int x, u32 y)
{
- writel(y,(u8*)dev->mem_start +x);
+ writel(y, (u8 *)dev->mem_start + x);
udelay(20);
}
-void write_nic_word(struct net_device *dev, int x,u16 y)
+void write_nic_word(struct net_device *dev, int x, u16 y)
{
- writew(y,(u8*)dev->mem_start +x);
+ writew(y, (u8 *)dev->mem_start + x);
udelay(20);
}
inline void force_pci_posting(struct net_device *dev)
{
- read_nic_byte(dev,EPROM_CMD);
+ read_nic_byte(dev, EPROM_CMD);
mb();
}
@@ -220,7 +220,7 @@ static int proc_get_registers(char *page, char **start,
{
struct net_device *dev = data;
int len = 0;
- int i,n;
+ int i, n;
int max = 0xff;
/* This dump the current register page */
@@ -231,7 +231,7 @@ static int proc_get_registers(char *page, char **start,
len += snprintf(page + len, count - len, "%2x ",
read_nic_byte(dev, n));
}
- len += snprintf(page + len, count - len,"\n");
+ len += snprintf(page + len, count - len, "\n");
*eof = 1;
return len;
@@ -287,7 +287,7 @@ static int proc_get_stats_tx(char *page, char **start,
int len = 0;
unsigned long totalOK;
- totalOK=priv->stats.txnpokint+priv->stats.txhpokint+priv->stats.txlpokint;
+ totalOK = priv->stats.txnpokint+priv->stats.txhpokint+priv->stats.txlpokint;
len += snprintf(page + len, count - len,
"TX OK: %lu\n"
"TX Error: %lu\n"
@@ -308,12 +308,12 @@ static int proc_get_stats_tx(char *page, char **start,
void rtl8180_proc_module_init(void)
{
DMESG("Initializing proc filesystem");
- rtl8180_proc=create_proc_entry(RTL8180_MODULE_NAME, S_IFDIR, init_net.proc_net);
+ rtl8180_proc = create_proc_entry(RTL8180_MODULE_NAME, S_IFDIR, init_net.proc_net);
}
void rtl8180_proc_module_remove(void)
{
- remove_proc_entry(RTL8180_MODULE_NAME, init_net.proc_net);
+ remove_proc_entry(RTL8180_MODULE_NAME, init_net.proc_net);
}
void rtl8180_proc_remove_one(struct net_device *dev)
@@ -383,82 +383,83 @@ void rtl8180_proc_init_one(struct net_device *dev)
short buffer_add(struct buffer **buffer, u32 *buf, dma_addr_t dma,
struct buffer **bufferhead)
{
- struct buffer *tmp;
+ struct buffer *tmp;
- if(! *buffer){
+ if (!*buffer) {
- *buffer = kmalloc(sizeof(struct buffer),GFP_KERNEL);
+ *buffer = kmalloc(sizeof(struct buffer), GFP_KERNEL);
if (*buffer == NULL) {
DMESGE("Failed to kmalloc head of TX/RX struct");
return -1;
}
- (*buffer)->next=*buffer;
- (*buffer)->buf=buf;
- (*buffer)->dma=dma;
- if(bufferhead !=NULL)
+ (*buffer)->next = *buffer;
+ (*buffer)->buf = buf;
+ (*buffer)->dma = dma;
+ if (bufferhead != NULL)
(*bufferhead) = (*buffer);
return 0;
}
- tmp=*buffer;
+ tmp = *buffer;
- while(tmp->next!=(*buffer)) tmp=tmp->next;
- if ((tmp->next= kmalloc(sizeof(struct buffer),GFP_KERNEL)) == NULL){
+ while (tmp->next != (*buffer))
+ tmp = tmp->next;
+ tmp->next = kmalloc(sizeof(struct buffer), GFP_KERNEL);
+ if (tmp->next == NULL) {
DMESGE("Failed to kmalloc TX/RX struct");
return -1;
}
- tmp->next->buf=buf;
- tmp->next->dma=dma;
- tmp->next->next=*buffer;
+ tmp->next->buf = buf;
+ tmp->next->dma = dma;
+ tmp->next->next = *buffer;
return 0;
}
-void buffer_free(struct net_device *dev,struct buffer **buffer,int len,short
-consistent)
+void buffer_free(struct net_device *dev, struct buffer **buffer, int len, short consistent)
{
- struct buffer *tmp,*next;
+ struct buffer *tmp, *next;
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
- struct pci_dev *pdev=priv->pdev;
+ struct pci_dev *pdev = priv->pdev;
if (!*buffer)
return;
tmp = *buffer;
- do{
- next=tmp->next;
- if(consistent){
- pci_free_consistent(pdev,len,
- tmp->buf,tmp->dma);
- }else{
+ do {
+ next = tmp->next;
+ if (consistent) {
+ pci_free_consistent(pdev, len,
+ tmp->buf, tmp->dma);
+ } else {
pci_unmap_single(pdev, tmp->dma,
- len,PCI_DMA_FROMDEVICE);
+ len, PCI_DMA_FROMDEVICE);
kfree(tmp->buf);
}
kfree(tmp);
tmp = next;
}
- while(next != *buffer);
+ while (next != *buffer);
- *buffer=NULL;
+ *buffer = NULL;
}
void print_buffer(u32 *buffer, int len)
{
int i;
- u8 *buf =(u8*)buffer;
+ u8 *buf = (u8 *)buffer;
- printk("ASCII BUFFER DUMP (len: %x):\n",len);
+ printk("ASCII BUFFER DUMP (len: %x):\n", len);
- for(i=0;i<len;i++)
- printk("%c",buf[i]);
+ for (i = 0; i < len; i++)
+ printk("%c", buf[i]);
- printk("\nBINARY BUFFER DUMP (len: %x):\n",len);
+ printk("\nBINARY BUFFER DUMP (len: %x):\n", len);
- for(i=0;i<len;i++)
- printk("%02x",buf[i]);
+ for (i = 0; i < len; i++)
+ printk("%02x", buf[i]);
printk("\n");
}
@@ -466,37 +467,37 @@ void print_buffer(u32 *buffer, int len)
int get_curr_tx_free_desc(struct net_device *dev, int priority)
{
struct r8180_priv *priv = ieee80211_priv(dev);
- u32* tail;
- u32* head;
+ u32 *tail;
+ u32 *head;
int ret;
- switch (priority){
- case MANAGE_PRIORITY:
- head = priv->txmapringhead;
- tail = priv->txmapringtail;
- break;
- case BK_PRIORITY:
- head = priv->txbkpringhead;
- tail = priv->txbkpringtail;
- break;
- case BE_PRIORITY:
- head = priv->txbepringhead;
- tail = priv->txbepringtail;
- break;
- case VI_PRIORITY:
- head = priv->txvipringhead;
- tail = priv->txvipringtail;
- break;
- case VO_PRIORITY:
- head = priv->txvopringhead;
- tail = priv->txvopringtail;
- break;
- case HI_PRIORITY:
- head = priv->txhpringhead;
- tail = priv->txhpringtail;
- break;
- default:
- return -1;
+ switch (priority) {
+ case MANAGE_PRIORITY:
+ head = priv->txmapringhead;
+ tail = priv->txmapringtail;
+ break;
+ case BK_PRIORITY:
+ head = priv->txbkpringhead;
+ tail = priv->txbkpringtail;
+ break;
+ case BE_PRIORITY:
+ head = priv->txbepringhead;
+ tail = priv->txbepringtail;
+ break;
+ case VI_PRIORITY:
+ head = priv->txvipringhead;
+ tail = priv->txvipringtail;
+ break;
+ case VO_PRIORITY:
+ head = priv->txvopringhead;
+ tail = priv->txvopringtail;
+ break;
+ case HI_PRIORITY:
+ head = priv->txhpringhead;
+ tail = priv->txhpringtail;
+ break;
+ default:
+ return -1;
}
if (head <= tail)
@@ -530,7 +531,7 @@ short check_nic_enought_desc(struct net_device *dev, int priority)
* between the tail and the head
*/
- return (required+2 < get_curr_tx_free_desc(dev,priority));
+ return (required+2 < get_curr_tx_free_desc(dev, priority));
}
void fix_tx_fifo(struct net_device *dev)
@@ -539,45 +540,45 @@ void fix_tx_fifo(struct net_device *dev)
u32 *tmp;
int i;
- for (tmp=priv->txmapring, i=0;
+ for (tmp = priv->txmapring, i = 0;
i < priv->txringcount;
- tmp+=8, i++){
- *tmp = *tmp &~ (1<<31);
+ tmp += 8, i++) {
+ *tmp = *tmp & ~(1<<31);
}
- for (tmp=priv->txbkpring, i=0;
+ for (tmp = priv->txbkpring, i = 0;
i < priv->txringcount;
- tmp+=8, i++) {
- *tmp = *tmp &~ (1<<31);
+ tmp += 8, i++) {
+ *tmp = *tmp & ~(1<<31);
}
- for (tmp=priv->txbepring, i=0;
+ for (tmp = priv->txbepring, i = 0;
i < priv->txringcount;
- tmp+=8, i++){
- *tmp = *tmp &~ (1<<31);
+ tmp += 8, i++) {
+ *tmp = *tmp & ~(1<<31);
}
- for (tmp=priv->txvipring, i=0;
+ for (tmp = priv->txvipring, i = 0;
i < priv->txringcount;
- tmp+=8, i++) {
- *tmp = *tmp &~ (1<<31);
+ tmp += 8, i++) {
+ *tmp = *tmp & ~(1<<31);
}
- for (tmp=priv->txvopring, i=0;
+ for (tmp = priv->txvopring, i = 0;
i < priv->txringcount;
- tmp+=8, i++){
- *tmp = *tmp &~ (1<<31);
+ tmp += 8, i++) {
+ *tmp = *tmp & ~(1<<31);
}
- for (tmp=priv->txhpring, i=0;
+ for (tmp = priv->txhpring, i = 0;
i < priv->txringcount;
- tmp+=8,i++){
- *tmp = *tmp &~ (1<<31);
+ tmp += 8, i++) {
+ *tmp = *tmp & ~(1<<31);
}
- for (tmp=priv->txbeaconring, i=0;
+ for (tmp = priv->txbeaconring, i = 0;
i < priv->txbeaconcount;
- tmp+=8, i++){
- *tmp = *tmp &~ (1<<31);
+ tmp += 8, i++) {
+ *tmp = *tmp & ~(1<<31);
}
priv->txmapringtail = priv->txmapring;
@@ -619,20 +620,20 @@ void fix_rx_fifo(struct net_device *dev)
struct buffer *rxbuf;
u8 rx_desc_size;
- rx_desc_size = 8; // 4*8 = 32 bytes
+ rx_desc_size = 8; /* 4*8 = 32 bytes */
- for (tmp=priv->rxring, rxbuf=priv->rxbufferhead;
+ for (tmp = priv->rxring, rxbuf = priv->rxbufferhead;
(tmp < (priv->rxring)+(priv->rxringcount)*rx_desc_size);
- tmp+=rx_desc_size,rxbuf=rxbuf->next){
+ tmp += rx_desc_size, rxbuf = rxbuf->next) {
*(tmp+2) = rxbuf->dma;
- *tmp=*tmp &~ 0xfff;
- *tmp=*tmp | priv->rxbuffersize;
+ *tmp = *tmp & ~0xfff;
+ *tmp = *tmp | priv->rxbuffersize;
*tmp |= (1<<31);
}
- priv->rxringtail=priv->rxring;
- priv->rxbuffer=priv->rxbufferhead;
- priv->rx_skb_complete=1;
+ priv->rxringtail = priv->rxring;
+ priv->rxbuffer = priv->rxbufferhead;
+ priv->rx_skb_complete = 1;
set_nic_rxring(dev);
}
@@ -672,25 +673,23 @@ void rtl8180_RSSI_calc(struct net_device *dev, u8 *rssi, u8 *qual)
q = *qual;
orig_qual = *qual;
- _rssi = 0; // avoid gcc complains..
+ _rssi = 0; /* avoid gcc complains.. */
if (q <= 0x4e) {
temp = QUALITY_MAP[q];
} else {
- if( q & 0x80 ) {
+ if (q & 0x80)
temp = 0x32;
- } else {
+ else
temp = 1;
- }
}
*qual = temp;
temp2 = *rssi;
- if ( _rssi < 0x64 ){
- if ( _rssi == 0 ) {
+ if (_rssi < 0x64) {
+ if (_rssi == 0)
*rssi = 1;
- }
} else {
*rssi = 0x64;
}
@@ -703,27 +702,27 @@ void rtl8180_irq_enable(struct net_device *dev)
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
priv->irq_enabled = 1;
- write_nic_word(dev,INTA_MASK, priv->irq_mask);
+ write_nic_word(dev, INTA_MASK, priv->irq_mask);
}
void rtl8180_irq_disable(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
- write_nic_dword(dev,IMR,0);
+ write_nic_dword(dev, IMR, 0);
force_pci_posting(dev);
priv->irq_enabled = 0;
}
-void rtl8180_set_mode(struct net_device *dev,int mode)
+void rtl8180_set_mode(struct net_device *dev, int mode)
{
u8 ecmd;
- ecmd=read_nic_byte(dev, EPROM_CMD);
- ecmd=ecmd &~ EPROM_CMD_OPERATING_MODE_MASK;
- ecmd=ecmd | (mode<<EPROM_CMD_OPERATING_MODE_SHIFT);
- ecmd=ecmd &~ (1<<EPROM_CS_SHIFT);
- ecmd=ecmd &~ (1<<EPROM_CK_SHIFT);
+ ecmd = read_nic_byte(dev, EPROM_CMD);
+ ecmd = ecmd & ~EPROM_CMD_OPERATING_MODE_MASK;
+ ecmd = ecmd | (mode<<EPROM_CMD_OPERATING_MODE_SHIFT);
+ ecmd = ecmd & ~(1<<EPROM_CS_SHIFT);
+ ecmd = ecmd & ~(1<<EPROM_CK_SHIFT);
write_nic_byte(dev, EPROM_CMD, ecmd);
}
@@ -737,13 +736,12 @@ void rtl8180_update_msr(struct net_device *dev)
u32 rxconf;
msr = read_nic_byte(dev, MSR);
- msr &= ~ MSR_LINK_MASK;
+ msr &= ~MSR_LINK_MASK;
- rxconf=read_nic_dword(dev,RX_CONF);
+ rxconf = read_nic_dword(dev, RX_CONF);
- if(priv->ieee80211->state == IEEE80211_LINKED)
- {
- if(priv->ieee80211->iw_mode == IW_MODE_ADHOC)
+ if (priv->ieee80211->state == IEEE80211_LINKED) {
+ if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
msr |= (MSR_LINK_ADHOC<<MSR_LINK_SHIFT);
else if (priv->ieee80211->iw_mode == IW_MODE_MASTER)
msr |= (MSR_LINK_MASTER<<MSR_LINK_SHIFT);
@@ -753,7 +751,7 @@ void rtl8180_update_msr(struct net_device *dev)
msr |= (MSR_LINK_NONE<<MSR_LINK_SHIFT);
rxconf |= (1<<RX_CHECK_BSSID_SHIFT);
- }else {
+ } else {
msr |= (MSR_LINK_NONE<<MSR_LINK_SHIFT);
rxconf &= ~(1<<RX_CHECK_BSSID_SHIFT);
}
@@ -762,7 +760,7 @@ void rtl8180_update_msr(struct net_device *dev)
write_nic_dword(dev, RX_CONF, rxconf);
}
-void rtl8180_set_chan(struct net_device *dev,short ch)
+void rtl8180_set_chan(struct net_device *dev, short ch)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
@@ -771,8 +769,8 @@ void rtl8180_set_chan(struct net_device *dev,short ch)
return;
}
- priv->chan=ch;
- priv->rf_set_chan(dev,priv->chan);
+ priv->chan = ch;
+ priv->rf_set_chan(dev, priv->chan);
}
void rtl8180_rx_enable(struct net_device *dev)
@@ -782,8 +780,8 @@ void rtl8180_rx_enable(struct net_device *dev)
/* for now we accept data, management & ctl frame*/
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
- rxconf=read_nic_dword(dev,RX_CONF);
- rxconf = rxconf &~ MAC_FILTER_MASK;
+ rxconf = read_nic_dword(dev, RX_CONF);
+ rxconf = rxconf & ~MAC_FILTER_MASK;
rxconf = rxconf | (1<<ACCEPT_MNG_FRAME_SHIFT);
rxconf = rxconf | (1<<ACCEPT_DATA_FRAME_SHIFT);
rxconf = rxconf | (1<<ACCEPT_BCAST_FRAME_SHIFT);
@@ -791,39 +789,39 @@ void rtl8180_rx_enable(struct net_device *dev)
if (dev->flags & IFF_PROMISC)
DMESG("NIC in promisc mode");
- if(priv->ieee80211->iw_mode == IW_MODE_MONITOR || \
- dev->flags & IFF_PROMISC){
+ if (priv->ieee80211->iw_mode == IW_MODE_MONITOR || \
+ dev->flags & IFF_PROMISC) {
rxconf = rxconf | (1<<ACCEPT_ALLMAC_FRAME_SHIFT);
- }else{
+ } else {
rxconf = rxconf | (1<<ACCEPT_NICMAC_FRAME_SHIFT);
}
- if(priv->ieee80211->iw_mode == IW_MODE_MONITOR){
+ if (priv->ieee80211->iw_mode == IW_MODE_MONITOR) {
rxconf = rxconf | (1<<ACCEPT_CTL_FRAME_SHIFT);
rxconf = rxconf | (1<<ACCEPT_ICVERR_FRAME_SHIFT);
rxconf = rxconf | (1<<ACCEPT_PWR_FRAME_SHIFT);
}
- if( priv->crcmon == 1 && priv->ieee80211->iw_mode == IW_MODE_MONITOR)
+ if (priv->crcmon == 1 && priv->ieee80211->iw_mode == IW_MODE_MONITOR)
rxconf = rxconf | (1<<ACCEPT_CRCERR_FRAME_SHIFT);
rxconf = rxconf & ~RX_FIFO_THRESHOLD_MASK;
rxconf = rxconf | (RX_FIFO_THRESHOLD_NONE << RX_FIFO_THRESHOLD_SHIFT);
rxconf = rxconf | (1<<RX_AUTORESETPHY_SHIFT);
- rxconf = rxconf &~ MAX_RX_DMA_MASK;
+ rxconf = rxconf & ~MAX_RX_DMA_MASK;
rxconf = rxconf | (MAX_RX_DMA_2048<<MAX_RX_DMA_SHIFT);
rxconf = rxconf | RCR_ONLYERLPKT;
- rxconf = rxconf &~ RCR_CS_MASK;
+ rxconf = rxconf & ~RCR_CS_MASK;
write_nic_dword(dev, RX_CONF, rxconf);
fix_rx_fifo(dev);
- cmd=read_nic_byte(dev,CMD);
- write_nic_byte(dev,CMD,cmd | (1<<CMD_RX_ENABLE_SHIFT));
+ cmd = read_nic_byte(dev, CMD);
+ write_nic_byte(dev, CMD, cmd | (1<<CMD_RX_ENABLE_SHIFT));
}
void set_nic_txring(struct net_device *dev)
@@ -843,20 +841,20 @@ void rtl8180_conttx_enable(struct net_device *dev)
{
u32 txconf;
- txconf = read_nic_dword(dev,TX_CONF);
- txconf = txconf &~ TX_LOOPBACK_MASK;
- txconf = txconf | (TX_LOOPBACK_CONTINUE <<TX_LOOPBACK_SHIFT);
- write_nic_dword(dev,TX_CONF,txconf);
+ txconf = read_nic_dword(dev, TX_CONF);
+ txconf = txconf & ~TX_LOOPBACK_MASK;
+ txconf = txconf | (TX_LOOPBACK_CONTINUE<<TX_LOOPBACK_SHIFT);
+ write_nic_dword(dev, TX_CONF, txconf);
}
void rtl8180_conttx_disable(struct net_device *dev)
{
u32 txconf;
- txconf = read_nic_dword(dev,TX_CONF);
- txconf = txconf &~ TX_LOOPBACK_MASK;
- txconf = txconf | (TX_LOOPBACK_NONE <<TX_LOOPBACK_SHIFT);
- write_nic_dword(dev,TX_CONF,txconf);
+ txconf = read_nic_dword(dev, TX_CONF);
+ txconf = txconf & ~TX_LOOPBACK_MASK;
+ txconf = txconf | (TX_LOOPBACK_NONE<<TX_LOOPBACK_SHIFT);
+ write_nic_dword(dev, TX_CONF, txconf);
}
void rtl8180_tx_enable(struct net_device *dev)
@@ -883,54 +881,54 @@ void rtl8180_tx_enable(struct net_device *dev)
txconf = txconf & ~(1<<TCR_PROBE_NOTIMESTAMP_SHIFT);
- txconf = txconf &~ TX_LOOPBACK_MASK;
- txconf = txconf | (TX_LOOPBACK_NONE <<TX_LOOPBACK_SHIFT);
- txconf = txconf &~ TCR_DPRETRY_MASK;
- txconf = txconf &~ TCR_RTSRETRY_MASK;
+ txconf = txconf & ~TX_LOOPBACK_MASK;
+ txconf = txconf | (TX_LOOPBACK_NONE<<TX_LOOPBACK_SHIFT);
+ txconf = txconf & ~TCR_DPRETRY_MASK;
+ txconf = txconf & ~TCR_RTSRETRY_MASK;
txconf = txconf | (priv->retry_data<<TX_DPRETRY_SHIFT);
txconf = txconf | (priv->retry_rts<<TX_RTSRETRY_SHIFT);
- txconf = txconf &~ (1<<TX_NOCRC_SHIFT);
+ txconf = txconf & ~(1<<TX_NOCRC_SHIFT);
if (priv->hw_plcp_len)
txconf = txconf & ~TCR_PLCP_LEN;
else
txconf = txconf | TCR_PLCP_LEN;
- txconf = txconf &~ TCR_MXDMA_MASK;
+ txconf = txconf & ~TCR_MXDMA_MASK;
txconf = txconf | (TCR_MXDMA_2048<<TCR_MXDMA_SHIFT);
txconf = txconf | TCR_CWMIN;
txconf = txconf | TCR_DISCW;
txconf = txconf | (1 << TX_NOICV_SHIFT);
- write_nic_dword(dev,TX_CONF,txconf);
+ write_nic_dword(dev, TX_CONF, txconf);
fix_tx_fifo(dev);
- cmd=read_nic_byte(dev,CMD);
- write_nic_byte(dev,CMD,cmd | (1<<CMD_TX_ENABLE_SHIFT));
+ cmd = read_nic_byte(dev, CMD);
+ write_nic_byte(dev, CMD, cmd | (1<<CMD_TX_ENABLE_SHIFT));
- write_nic_dword(dev,TX_CONF,txconf);
+ write_nic_dword(dev, TX_CONF, txconf);
}
void rtl8180_beacon_tx_enable(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
- rtl8180_set_mode(dev,EPROM_CMD_CONFIG);
+ rtl8180_set_mode(dev, EPROM_CMD_CONFIG);
priv->dma_poll_stop_mask &= ~(TPPOLLSTOP_BQ);
- write_nic_byte(dev,TPPollStop, priv->dma_poll_mask);
- rtl8180_set_mode(dev,EPROM_CMD_NORMAL);
+ write_nic_byte(dev, TPPollStop, priv->dma_poll_mask);
+ rtl8180_set_mode(dev, EPROM_CMD_NORMAL);
}
void rtl8180_beacon_tx_disable(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
- rtl8180_set_mode(dev,EPROM_CMD_CONFIG);
+ rtl8180_set_mode(dev, EPROM_CMD_CONFIG);
priv->dma_poll_stop_mask |= TPPOLLSTOP_BQ;
- write_nic_byte(dev,TPPollStop, priv->dma_poll_stop_mask);
- rtl8180_set_mode(dev,EPROM_CMD_NORMAL);
+ write_nic_byte(dev, TPPollStop, priv->dma_poll_stop_mask);
+ rtl8180_set_mode(dev, EPROM_CMD_NORMAL);
}
@@ -939,13 +937,13 @@ void rtl8180_rtx_disable(struct net_device *dev)
u8 cmd;
struct r8180_priv *priv = ieee80211_priv(dev);
- cmd=read_nic_byte(dev,CMD);
- write_nic_byte(dev, CMD, cmd &~ \
+ cmd = read_nic_byte(dev, CMD);
+ write_nic_byte(dev, CMD, cmd & ~\
((1<<CMD_RX_ENABLE_SHIFT)|(1<<CMD_TX_ENABLE_SHIFT)));
force_pci_posting(dev);
mdelay(10);
- if(!priv->rx_skb_complete)
+ if (!priv->rx_skb_complete)
dev_kfree_skb_any(priv->rx_skb);
}
@@ -960,11 +958,11 @@ short alloc_tx_desc_ring(struct net_device *dev, int bufsize, int count,
struct pci_dev *pdev = priv->pdev;
void *buf;
- if((bufsize & 0xfff) != bufsize) {
- DMESGE ("TX buffer allocation too large");
+ if ((bufsize & 0xfff) != bufsize) {
+ DMESGE("TX buffer allocation too large");
return 0;
}
- desc = (u32*)pci_alloc_consistent(pdev,
+ desc = (u32 *)pci_alloc_consistent(pdev,
sizeof(u32)*8*count+256, &dma_desc);
if (desc == NULL)
return -1;
@@ -983,90 +981,90 @@ short alloc_tx_desc_ring(struct net_device *dev, int bufsize, int count,
if (buf == NULL)
return -ENOMEM;
- switch(addr) {
+ switch (addr) {
case TX_MANAGEPRIORITY_RING_ADDR:
- if(-1 == buffer_add(&(priv->txmapbufs),buf,dma_tmp,NULL)){
+ if (-1 == buffer_add(&(priv->txmapbufs), buf, dma_tmp, NULL)) {
DMESGE("Unable to allocate mem for buffer NP");
return -ENOMEM;
}
break;
case TX_BKPRIORITY_RING_ADDR:
- if(-1 == buffer_add(&(priv->txbkpbufs),buf,dma_tmp,NULL)){
+ if (-1 == buffer_add(&(priv->txbkpbufs), buf, dma_tmp, NULL)) {
DMESGE("Unable to allocate mem for buffer LP");
return -ENOMEM;
}
break;
case TX_BEPRIORITY_RING_ADDR:
- if(-1 == buffer_add(&(priv->txbepbufs),buf,dma_tmp,NULL)){
+ if (-1 == buffer_add(&(priv->txbepbufs), buf, dma_tmp, NULL)) {
DMESGE("Unable to allocate mem for buffer NP");
return -ENOMEM;
}
break;
case TX_VIPRIORITY_RING_ADDR:
- if(-1 == buffer_add(&(priv->txvipbufs),buf,dma_tmp,NULL)){
+ if (-1 == buffer_add(&(priv->txvipbufs), buf, dma_tmp, NULL)) {
DMESGE("Unable to allocate mem for buffer LP");
return -ENOMEM;
}
break;
case TX_VOPRIORITY_RING_ADDR:
- if(-1 == buffer_add(&(priv->txvopbufs),buf,dma_tmp,NULL)){
+ if (-1 == buffer_add(&(priv->txvopbufs), buf, dma_tmp, NULL)) {
DMESGE("Unable to allocate mem for buffer NP");
return -ENOMEM;
}
break;
case TX_HIGHPRIORITY_RING_ADDR:
- if(-1 == buffer_add(&(priv->txhpbufs),buf,dma_tmp,NULL)){
+ if (-1 == buffer_add(&(priv->txhpbufs), buf, dma_tmp, NULL)) {
DMESGE("Unable to allocate mem for buffer HP");
return -ENOMEM;
}
break;
case TX_BEACON_RING_ADDR:
- if(-1 == buffer_add(&(priv->txbeaconbufs),buf,dma_tmp,NULL)){
- DMESGE("Unable to allocate mem for buffer BP");
+ if (-1 == buffer_add(&(priv->txbeaconbufs), buf, dma_tmp, NULL)) {
+ DMESGE("Unable to allocate mem for buffer BP");
return -ENOMEM;
}
break;
}
- *tmp = *tmp &~ (1<<31); // descriptor empty, owned by the drv
+ *tmp = *tmp & ~(1<<31); /* descriptor empty, owned by the drv */
*(tmp+2) = (u32)dma_tmp;
*(tmp+3) = bufsize;
- if(i+1<count)
+ if (i+1 < count)
*(tmp+4) = (u32)dma_desc+((i+1)*8*4);
else
*(tmp+4) = (u32)dma_desc;
- tmp=tmp+8;
+ tmp = tmp+8;
}
- switch(addr) {
+ switch (addr) {
case TX_MANAGEPRIORITY_RING_ADDR:
- priv->txmapringdma=dma_desc;
- priv->txmapring=desc;
+ priv->txmapringdma = dma_desc;
+ priv->txmapring = desc;
break;
case TX_BKPRIORITY_RING_ADDR:
- priv->txbkpringdma=dma_desc;
- priv->txbkpring=desc;
+ priv->txbkpringdma = dma_desc;
+ priv->txbkpring = desc;
break;
case TX_BEPRIORITY_RING_ADDR:
- priv->txbepringdma=dma_desc;
- priv->txbepring=desc;
+ priv->txbepringdma = dma_desc;
+ priv->txbepring = desc;
break;
case TX_VIPRIORITY_RING_ADDR:
- priv->txvipringdma=dma_desc;
- priv->txvipring=desc;
+ priv->txvipringdma = dma_desc;
+ priv->txvipring = desc;
break;
case TX_VOPRIORITY_RING_ADDR:
- priv->txvopringdma=dma_desc;
- priv->txvopring=desc;
+ priv->txvopringdma = dma_desc;
+ priv->txvopring = desc;
break;
case TX_HIGHPRIORITY_RING_ADDR:
- priv->txhpringdma=dma_desc;
- priv->txhpring=desc;
+ priv->txhpringdma = dma_desc;
+ priv->txhpring = desc;
break;
case TX_BEACON_RING_ADDR:
- priv->txbeaconringdma=dma_desc;
- priv->txbeaconring=desc;
+ priv->txbeaconringdma = dma_desc;
+ priv->txbeaconring = desc;
break;
}
@@ -1077,37 +1075,37 @@ short alloc_tx_desc_ring(struct net_device *dev, int bufsize, int count,
void free_tx_desc_rings(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
- struct pci_dev *pdev=priv->pdev;
+ struct pci_dev *pdev = priv->pdev;
int count = priv->txringcount;
pci_free_consistent(pdev, sizeof(u32)*8*count+256,
priv->txmapring, priv->txmapringdma);
- buffer_free(dev,&(priv->txmapbufs),priv->txbuffsize,1);
+ buffer_free(dev, &(priv->txmapbufs), priv->txbuffsize, 1);
pci_free_consistent(pdev, sizeof(u32)*8*count+256,
priv->txbkpring, priv->txbkpringdma);
- buffer_free(dev,&(priv->txbkpbufs),priv->txbuffsize,1);
+ buffer_free(dev, &(priv->txbkpbufs), priv->txbuffsize, 1);
pci_free_consistent(pdev, sizeof(u32)*8*count+256,
priv->txbepring, priv->txbepringdma);
- buffer_free(dev,&(priv->txbepbufs),priv->txbuffsize,1);
+ buffer_free(dev, &(priv->txbepbufs), priv->txbuffsize, 1);
pci_free_consistent(pdev, sizeof(u32)*8*count+256,
priv->txvipring, priv->txvipringdma);
- buffer_free(dev,&(priv->txvipbufs),priv->txbuffsize,1);
+ buffer_free(dev, &(priv->txvipbufs), priv->txbuffsize, 1);
pci_free_consistent(pdev, sizeof(u32)*8*count+256,
priv->txvopring, priv->txvopringdma);
- buffer_free(dev,&(priv->txvopbufs),priv->txbuffsize,1);
+ buffer_free(dev, &(priv->txvopbufs), priv->txbuffsize, 1);
pci_free_consistent(pdev, sizeof(u32)*8*count+256,
priv->txhpring, priv->txhpringdma);
- buffer_free(dev,&(priv->txhpbufs),priv->txbuffsize,1);
+ buffer_free(dev, &(priv->txhpbufs), priv->txbuffsize, 1);
count = priv->txbeaconcount;
pci_free_consistent(pdev, sizeof(u32)*8*count+256,
priv->txbeaconring, priv->txbeaconringdma);
- buffer_free(dev,&(priv->txbeaconbufs),priv->txbuffsize,1);
+ buffer_free(dev, &(priv->txbeaconbufs), priv->txbuffsize, 1);
}
void free_rx_desc_ring(struct net_device *dev)
@@ -1119,7 +1117,7 @@ void free_rx_desc_ring(struct net_device *dev)
pci_free_consistent(pdev, sizeof(u32)*8*count+256,
priv->rxring, priv->rxringdma);
- buffer_free(dev,&(priv->rxbuffer),priv->rxbuffersize,0);
+ buffer_free(dev, &(priv->rxbuffer), priv->rxbuffersize, 0);
}
short alloc_rx_desc_ring(struct net_device *dev, u16 bufsize, int count)
@@ -1127,20 +1125,20 @@ short alloc_rx_desc_ring(struct net_device *dev, u16 bufsize, int count)
int i;
u32 *desc;
u32 *tmp;
- dma_addr_t dma_desc,dma_tmp;
+ dma_addr_t dma_desc, dma_tmp;
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
- struct pci_dev *pdev=priv->pdev;
+ struct pci_dev *pdev = priv->pdev;
void *buf;
u8 rx_desc_size;
- rx_desc_size = 8; // 4*8 = 32 bytes
+ rx_desc_size = 8; /* 4*8 = 32 bytes */
- if((bufsize & 0xfff) != bufsize){
- DMESGE ("RX buffer allocation too large");
+ if ((bufsize & 0xfff) != bufsize) {
+ DMESGE("RX buffer allocation too large");
return -1;
}
- desc = (u32*)pci_alloc_consistent(pdev,sizeof(u32)*rx_desc_size*count+256,
+ desc = (u32 *)pci_alloc_consistent(pdev, sizeof(u32)*rx_desc_size*count+256,
&dma_desc);
if (dma_desc & 0xff)
@@ -1150,33 +1148,34 @@ short alloc_rx_desc_ring(struct net_device *dev, u16 bufsize, int count)
*/
WARN(1, "DMA buffer is not aligned\n");
- priv->rxring=desc;
- priv->rxringdma=dma_desc;
- tmp=desc;
+ priv->rxring = desc;
+ priv->rxringdma = dma_desc;
+ tmp = desc;
for (i = 0; i < count; i++) {
- if ((buf= kmalloc(bufsize * sizeof(u8),GFP_ATOMIC)) == NULL){
+ buf = kmalloc(bufsize * sizeof(u8), GFP_ATOMIC);
+ if (buf == NULL) {
DMESGE("Failed to kmalloc RX buffer");
return -1;
}
- dma_tmp = pci_map_single(pdev,buf,bufsize * sizeof(u8),
+ dma_tmp = pci_map_single(pdev, buf, bufsize * sizeof(u8),
PCI_DMA_FROMDEVICE);
- if(-1 == buffer_add(&(priv->rxbuffer), buf,dma_tmp,
- &(priv->rxbufferhead))){
- DMESGE("Unable to allocate mem RX buf");
- return -1;
+ if (-1 == buffer_add(&(priv->rxbuffer), buf, dma_tmp,
+ &(priv->rxbufferhead))) {
+ DMESGE("Unable to allocate mem RX buf");
+ return -1;
}
- *tmp = 0; //zero pads the header of the descriptor
- *tmp = *tmp |( bufsize&0xfff);
+ *tmp = 0; /* zero pads the header of the descriptor */
+ *tmp = *tmp | (bufsize&0xfff);
*(tmp+2) = (u32)dma_tmp;
- *tmp = *tmp |(1<<31); // descriptor void, owned by the NIC
+ *tmp = *tmp | (1<<31); /* descriptor void, owned by the NIC */
- tmp=tmp+rx_desc_size;
+ tmp = tmp+rx_desc_size;
}
- *(tmp-rx_desc_size) = *(tmp-rx_desc_size) | (1<<30); // this is the last descriptor
+ *(tmp-rx_desc_size) = *(tmp-rx_desc_size) | (1<<30); /* this is the last descriptor */
return 0;
}
@@ -1187,10 +1186,10 @@ void set_nic_rxring(struct net_device *dev)
u8 pgreg;
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
- pgreg=read_nic_byte(dev, PGSELECT);
- write_nic_byte(dev, PGSELECT, pgreg &~ (1<<PGSELECT_PG_SHIFT));
+ pgreg = read_nic_byte(dev, PGSELECT);
+ write_nic_byte(dev, PGSELECT, pgreg & ~(1<<PGSELECT_PG_SHIFT));
- write_nic_dword(dev, RXRING_ADDR,priv->rxringdma);
+ write_nic_dword(dev, RXRING_ADDR, priv->rxringdma);
}
void rtl8180_reset(struct net_device *dev)
@@ -1199,28 +1198,28 @@ void rtl8180_reset(struct net_device *dev)
rtl8180_irq_disable(dev);
- cr=read_nic_byte(dev,CMD);
+ cr = read_nic_byte(dev, CMD);
cr = cr & 2;
cr = cr | (1<<CMD_RST_SHIFT);
- write_nic_byte(dev,CMD,cr);
+ write_nic_byte(dev, CMD, cr);
force_pci_posting(dev);
mdelay(200);
- if(read_nic_byte(dev,CMD) & (1<<CMD_RST_SHIFT))
+ if (read_nic_byte(dev, CMD) & (1<<CMD_RST_SHIFT))
DMESGW("Card reset timeout!");
else
DMESG("Card successfully reset");
- rtl8180_set_mode(dev,EPROM_CMD_LOAD);
+ rtl8180_set_mode(dev, EPROM_CMD_LOAD);
force_pci_posting(dev);
mdelay(200);
}
inline u16 ieeerate2rtlrate(int rate)
{
- switch(rate){
+ switch (rate) {
case 10:
return 0;
case 20:
@@ -1250,7 +1249,7 @@ inline u16 ieeerate2rtlrate(int rate)
}
}
-static u16 rtl_rate[] = {10,20,55,110,60,90,120,180,240,360,480,540,720};
+static u16 rtl_rate[] = {10, 20, 55, 110, 60, 90, 120, 180, 240, 360, 480, 540, 720};
inline u16 rtl8180_rate2rate(short rate)
{
@@ -1261,7 +1260,7 @@ inline u16 rtl8180_rate2rate(short rate)
inline u8 rtl8180_IsWirelessBMode(u16 rate)
{
- if( ((rate <= 110) && (rate != 60) && (rate != 90)) || (rate == 220) )
+ if (((rate <= 110) && (rate != 60) && (rate != 90)) || (rate == 220))
return 1;
else
return 0;
@@ -1331,15 +1330,14 @@ u16 N_DBPSOfRate(u16 DataRate)
return N_DBPS;
}
-//
-// Description:
-// For Netgear case, they want good-looking singal strength.
-//
+/*
+ * For Netgear case, they want good-looking singal strength.
+ */
long NetgearSignalStrengthTranslate(long LastSS, long CurrSS)
{
long RetSS;
- // Step 1. Scale mapping.
+ /* Step 1. Scale mapping. */
if (CurrSS >= 71 && CurrSS <= 100)
RetSS = 90 + ((CurrSS - 70) / 3);
else if (CurrSS >= 41 && CurrSS <= 70)
@@ -1361,47 +1359,45 @@ long NetgearSignalStrengthTranslate(long LastSS, long CurrSS)
else
RetSS = CurrSS;
- // Step 2. Smoothing.
- if(LastSS > 0)
- RetSS = ((LastSS * 5) + (RetSS)+ 5) / 6;
+ /* Step 2. Smoothing. */
+ if (LastSS > 0)
+ RetSS = ((LastSS * 5) + (RetSS) + 5) / 6;
return RetSS;
}
-//
-// Description:
-// Translate 0-100 signal strength index into dBm.
-//
+/*
+ * Translate 0-100 signal strength index into dBm.
+ */
long TranslateToDbm8185(u8 SignalStrengthIndex)
{
long SignalPower;
- // Translate to dBm (x=0.5y-95).
+ /* Translate to dBm (x=0.5y-95). */
SignalPower = (long)((SignalStrengthIndex + 1) >> 1);
SignalPower -= 95;
return SignalPower;
}
-//
-// Description:
-// Perform signal smoothing for dynamic mechanism.
-// This is different with PerformSignalSmoothing8185 in smoothing fomula.
-// No dramatic adjustion is apply because dynamic mechanism need some degree
-// of correctness. Ported from 8187B.
-//
+/*
+ * Perform signal smoothing for dynamic mechanism.
+ * This is different with PerformSignalSmoothing8185 in smoothing fomula.
+ * No dramatic adjustion is apply because dynamic mechanism need some degree
+ * of correctness. Ported from 8187B.
+ */
void PerformUndecoratedSignalSmoothing8185(struct r8180_priv *priv,
bool bCckRate)
{
- // Determin the current packet is CCK rate.
+ /* Determin the current packet is CCK rate. */
priv->bCurCCKPkt = bCckRate;
if (priv->UndecoratedSmoothedSS >= 0)
- priv->UndecoratedSmoothedSS = ( (priv->UndecoratedSmoothedSS * 5) + (priv->SignalStrength * 10) ) / 6;
+ priv->UndecoratedSmoothedSS = ((priv->UndecoratedSmoothedSS * 5) + (priv->SignalStrength * 10)) / 6;
else
priv->UndecoratedSmoothedSS = priv->SignalStrength * 10;
- priv->UndercorateSmoothedRxPower = ( (priv->UndercorateSmoothedRxPower * 50) + (priv->RxPower* 11)) / 60;
+ priv->UndercorateSmoothedRxPower = ((priv->UndercorateSmoothedRxPower * 50) + (priv->RxPower * 11)) / 60;
if (bCckRate)
priv->CurCCKRSSI = priv->RSSI;
@@ -1410,28 +1406,30 @@ void PerformUndecoratedSignalSmoothing8185(struct r8180_priv *priv,
}
-/* This is rough RX isr handling routine*/
+/*
+ * This is rough RX isr handling routine
+ */
void rtl8180_rx(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
struct sk_buff *tmp_skb;
- short first,last;
+ short first, last;
u32 len;
int lastlen;
unsigned char quality, signal;
u8 rate;
- u32 *tmp,*tmp2;
+ u32 *tmp, *tmp2;
u8 rx_desc_size;
u8 padding;
char rxpower = 0;
u32 RXAGC = 0;
long RxAGC_dBm = 0;
- u8 LNA=0, BB=0;
- u8 LNA_gain[4]={02, 17, 29, 39};
+ u8 LNA = 0, BB = 0;
+ u8 LNA_gain[4] = {02, 17, 29, 39};
u8 Antenna = 0;
struct ieee80211_hdr_4addr *hdr;
- u16 fc,type;
- u8 bHwError = 0,bCRC = 0,bICV = 0;
+ u16 fc, type;
+ u8 bHwError = 0, bCRC = 0, bICV = 0;
bool bCckRate = false;
u8 RSSI = 0;
long SignalStrengthIndex = 0;
@@ -1447,36 +1445,37 @@ void rtl8180_rx(struct net_device *dev)
if ((*(priv->rxringtail)) & (1<<31)) {
/* we have got an RX int, but the descriptor
- * we are pointing is empty*/
+ * we are pointing is empty */
priv->stats.rxnodata++;
priv->ieee80211->stats.rx_errors++;
tmp2 = NULL;
tmp = priv->rxringtail;
- do{
- if(tmp == priv->rxring)
+ do {
+ if (tmp == priv->rxring)
tmp = priv->rxring + (priv->rxringcount - 1)*rx_desc_size;
else
tmp -= rx_desc_size;
- if(! (*tmp & (1<<31)))
+ if (!(*tmp & (1<<31)))
tmp2 = tmp;
- }while(tmp != priv->rxring);
+ } while (tmp != priv->rxring);
- if(tmp2) priv->rxringtail = tmp2;
+ if (tmp2)
+ priv->rxringtail = tmp2;
}
/* while there are filled descriptors */
- while(!(*(priv->rxringtail) & (1<<31))){
- if(*(priv->rxringtail) & (1<<26))
+ while (!(*(priv->rxringtail) & (1<<31))) {
+ if (*(priv->rxringtail) & (1<<26))
DMESGW("RX buffer overflow");
- if(*(priv->rxringtail) & (1<<12))
+ if (*(priv->rxringtail) & (1<<12))
priv->stats.rxicverr++;
- if(*(priv->rxringtail) & (1<<27)){
+ if (*(priv->rxringtail) & (1<<27)) {
priv->stats.rxdmafail++;
- //DMESG("EE: RX DMA FAILED at buffer pointed by descriptor %x",(u32)priv->rxringtail);
+ /* DMESG("EE: RX DMA FAILED at buffer pointed by descriptor %x",(u32)priv->rxringtail); */
goto drop;
}
@@ -1486,12 +1485,13 @@ void rtl8180_rx(struct net_device *dev)
sizeof(u8),
PCI_DMA_FROMDEVICE);
- first = *(priv->rxringtail) & (1<<29) ? 1:0;
- if(first) priv->rx_prevlen=0;
+ first = *(priv->rxringtail) & (1<<29) ? 1 : 0;
+ if (first)
+ priv->rx_prevlen = 0;
- last = *(priv->rxringtail) & (1<<28) ? 1:0;
- if(last){
- lastlen=((*priv->rxringtail) &0xfff);
+ last = *(priv->rxringtail) & (1<<28) ? 1 : 0;
+ if (last) {
+ lastlen = ((*priv->rxringtail) & 0xfff);
/* if the last descriptor (that should
* tell us the total packet len) tell
@@ -1500,221 +1500,213 @@ void rtl8180_rx(struct net_device *dev)
* problem..
* workaround to prevent kernel panic
*/
- if(lastlen < priv->rx_prevlen)
- len=0;
+ if (lastlen < priv->rx_prevlen)
+ len = 0;
else
- len=lastlen-priv->rx_prevlen;
+ len = lastlen-priv->rx_prevlen;
- if(*(priv->rxringtail) & (1<<13)) {
- if ((*(priv->rxringtail) & 0xfff) <500)
+ if (*(priv->rxringtail) & (1<<13)) {
+ if ((*(priv->rxringtail) & 0xfff) < 500)
priv->stats.rxcrcerrmin++;
- else if ((*(priv->rxringtail) & 0x0fff) >1000)
+ else if ((*(priv->rxringtail) & 0x0fff) > 1000)
priv->stats.rxcrcerrmax++;
else
priv->stats.rxcrcerrmid++;
}
- }else{
+ } else {
len = priv->rxbuffersize;
}
- if(first && last) {
+ if (first && last) {
padding = ((*(priv->rxringtail+3))&(0x04000000))>>26;
- }else if(first) {
+ } else if (first) {
padding = ((*(priv->rxringtail+3))&(0x04000000))>>26;
- if(padding) {
+ if (padding)
len -= 2;
- }
- }else {
+ } else {
padding = 0;
}
- padding = 0;
- priv->rx_prevlen+=len;
+ padding = 0;
+ priv->rx_prevlen += len;
- if(priv->rx_prevlen > MAX_FRAG_THRESHOLD + 100){
+ if (priv->rx_prevlen > MAX_FRAG_THRESHOLD + 100) {
/* HW is probably passing several buggy frames
* without FD or LD flag set.
* Throw this garbage away to prevent skb
* memory exausting
*/
- if(!priv->rx_skb_complete)
+ if (!priv->rx_skb_complete)
dev_kfree_skb_any(priv->rx_skb);
priv->rx_skb_complete = 1;
}
- signal=(unsigned char)(((*(priv->rxringtail+3))& (0x00ff0000))>>16);
+ signal = (unsigned char)(((*(priv->rxringtail+3)) & (0x00ff0000))>>16);
signal = (signal & 0xfe) >> 1;
- quality=(unsigned char)((*(priv->rxringtail+3)) & (0xff));
+ quality = (unsigned char)((*(priv->rxringtail+3)) & (0xff));
stats.mac_time[0] = *(priv->rxringtail+1);
stats.mac_time[1] = *(priv->rxringtail+2);
- rxpower =((char)(((*(priv->rxringtail+4))& (0x00ff0000))>>16))/2 - 42;
- RSSI = ((u8)(((*(priv->rxringtail+3)) & (0x0000ff00))>> 8)) & (0x7f);
+ rxpower = ((char)(((*(priv->rxringtail+4)) & (0x00ff0000))>>16))/2 - 42;
+ RSSI = ((u8)(((*(priv->rxringtail+3)) & (0x0000ff00))>>8)) & (0x7f);
- rate=((*(priv->rxringtail)) &
+ rate = ((*(priv->rxringtail)) &
((1<<23)|(1<<22)|(1<<21)|(1<<20)))>>20;
stats.rate = rtl8180_rate2rate(rate);
- Antenna = (((*(priv->rxringtail +3))& (0x00008000)) == 0 )? 0:1 ;
- if(!rtl8180_IsWirelessBMode(stats.rate))
- { // OFDM rate.
+ Antenna = (((*(priv->rxringtail+3)) & (0x00008000)) == 0) ? 0 : 1;
+ if (!rtl8180_IsWirelessBMode(stats.rate)) { /* OFDM rate. */
+ RxAGC_dBm = rxpower+1; /* bias */
+ } else { /* CCK rate. */
+ RxAGC_dBm = signal; /* bit 0 discard */
- RxAGC_dBm = rxpower+1; //bias
- }
- else
- { // CCK rate.
- RxAGC_dBm = signal;//bit 0 discard
-
- LNA = (u8) (RxAGC_dBm & 0x60 ) >> 5 ; //bit 6~ bit 5
- BB = (u8) (RxAGC_dBm & 0x1F); // bit 4 ~ bit 0
+ LNA = (u8) (RxAGC_dBm & 0x60) >> 5; /* bit 6~ bit 5 */
+ BB = (u8) (RxAGC_dBm & 0x1F); /* bit 4 ~ bit 0 */
- RxAGC_dBm = -( LNA_gain[LNA] + (BB *2) ); //Pin_11b=-(LNA_gain+BB_gain) (dBm)
+ RxAGC_dBm = -(LNA_gain[LNA] + (BB*2)); /* Pin_11b=-(LNA_gain+BB_gain) (dBm) */
- RxAGC_dBm +=4; //bias
+ RxAGC_dBm += 4; /* bias */
}
- if(RxAGC_dBm & 0x80) //absolute value
- RXAGC= ~(RxAGC_dBm)+1;
+ if (RxAGC_dBm & 0x80) /* absolute value */
+ RXAGC = ~(RxAGC_dBm)+1;
bCckRate = rtl8180_IsWirelessBMode(stats.rate);
- // Translate RXAGC into 1-100.
- if(!rtl8180_IsWirelessBMode(stats.rate))
- { // OFDM rate.
- if(RXAGC>90)
- RXAGC=90;
- else if(RXAGC<25)
- RXAGC=25;
- RXAGC=(90-RXAGC)*100/65;
- }
- else
- { // CCK rate.
- if(RXAGC>95)
- RXAGC=95;
- else if(RXAGC<30)
- RXAGC=30;
- RXAGC=(95-RXAGC)*100/65;
+ /* Translate RXAGC into 1-100. */
+ if (!rtl8180_IsWirelessBMode(stats.rate)) { /* OFDM rate. */
+ if (RXAGC > 90)
+ RXAGC = 90;
+ else if (RXAGC < 25)
+ RXAGC = 25;
+ RXAGC = (90-RXAGC)*100/65;
+ } else { /* CCK rate. */
+ if (RXAGC > 95)
+ RXAGC = 95;
+ else if (RXAGC < 30)
+ RXAGC = 30;
+ RXAGC = (95-RXAGC)*100/65;
}
priv->SignalStrength = (u8)RXAGC;
priv->RecvSignalPower = RxAGC_dBm;
priv->RxPower = rxpower;
priv->RSSI = RSSI;
/* SQ translation formula is provided by SD3 DZ. 2006.06.27 */
- if(quality >= 127)
- quality = 1;//0; //0 will cause epc to show signal zero , walk aroud now;
- else if(quality < 27)
+ if (quality >= 127)
+ quality = 1; /*0; */ /* 0 will cause epc to show signal zero , walk aroud now; */
+ else if (quality < 27)
quality = 100;
else
quality = 127 - quality;
priv->SignalQuality = quality;
- stats.signal = (u8)quality;//priv->wstats.qual.level = priv->SignalStrength;
+ stats.signal = (u8)quality; /*priv->wstats.qual.level = priv->SignalStrength; */
stats.signalstrength = RXAGC;
- if(stats.signalstrength > 100)
+ if (stats.signalstrength > 100)
stats.signalstrength = 100;
stats.signalstrength = (stats.signalstrength * 70)/100 + 30;
- // printk("==========================>rx : RXAGC is %d,signalstrength is %d\n",RXAGC,stats.signalstrength);
+ /* printk("==========================>rx : RXAGC is %d,signalstrength is %d\n",RXAGC,stats.signalstrength); */
stats.rssi = priv->wstats.qual.qual = priv->SignalQuality;
- stats.noise = priv->wstats.qual.noise = 100 - priv ->wstats.qual.qual;
- bHwError = (((*(priv->rxringtail))& (0x00000fff)) == 4080)| (((*(priv->rxringtail))& (0x04000000)) != 0 )
- | (((*(priv->rxringtail))& (0x08000000)) != 0 )| (((~(*(priv->rxringtail)))& (0x10000000)) != 0 )| (((~(*(priv->rxringtail)))& (0x20000000)) != 0 );
+ stats.noise = priv->wstats.qual.noise = 100 - priv->wstats.qual.qual;
+ bHwError = (((*(priv->rxringtail)) & (0x00000fff)) == 4080) | (((*(priv->rxringtail)) & (0x04000000)) != 0)
+ | (((*(priv->rxringtail)) & (0x08000000)) != 0) | (((~(*(priv->rxringtail))) & (0x10000000)) != 0) | (((~(*(priv->rxringtail))) & (0x20000000)) != 0);
bCRC = ((*(priv->rxringtail)) & (0x00002000)) >> 13;
bICV = ((*(priv->rxringtail)) & (0x00001000)) >> 12;
hdr = (struct ieee80211_hdr_4addr *)priv->rxbuffer->buf;
fc = le16_to_cpu(hdr->frame_ctl);
- type = WLAN_FC_GET_TYPE(fc);
+ type = WLAN_FC_GET_TYPE(fc);
- if((IEEE80211_FTYPE_CTL != type) &&
- (eqMacAddr(priv->ieee80211->current_network.bssid, (fc & IEEE80211_FCTL_TODS)? hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS )? hdr->addr2 : hdr->addr3))
- && (!bHwError) && (!bCRC)&& (!bICV))
- {
+ if ((IEEE80211_FTYPE_CTL != type) &&
+ (eqMacAddr(priv->ieee80211->current_network.bssid, (fc & IEEE80211_FCTL_TODS) ? hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 : hdr->addr3))
+ && (!bHwError) && (!bCRC) && (!bICV)) {
/* Perform signal smoothing for dynamic
* mechanism on demand. This is different
* with PerformSignalSmoothing8185 in smoothing
* fomula. No dramatic adjustion is apply
* because dynamic mechanism need some degree
* of correctness. */
- PerformUndecoratedSignalSmoothing8185(priv,bCckRate);
- //
- // For good-looking singal strength.
- //
+ PerformUndecoratedSignalSmoothing8185(priv, bCckRate);
+
+ /* For good-looking singal strength. */
SignalStrengthIndex = NetgearSignalStrengthTranslate(
priv->LastSignalStrengthInPercent,
priv->SignalStrength);
priv->LastSignalStrengthInPercent = SignalStrengthIndex;
priv->Stats_SignalStrength = TranslateToDbm8185((u8)SignalStrengthIndex);
- //
- // We need more correct power of received packets and the "SignalStrength" of RxStats is beautified,
- // so we record the correct power here.
- //
- priv->Stats_SignalQuality =(long) (priv->Stats_SignalQuality * 5 + (long)priv->SignalQuality + 5) / 6;
- priv->Stats_RecvSignalPower = (long)(priv->Stats_RecvSignalPower * 5 + priv->RecvSignalPower -1) / 6;
-
- // Figure out which antenna that received the lasted packet.
- priv->LastRxPktAntenna = Antenna ? 1 : 0; // 0: aux, 1: main.
- SwAntennaDiversityRxOk8185(dev, priv->SignalStrength);
+ /*
+ * We need more correct power of received packets and the "SignalStrength" of RxStats is beautified,
+ * so we record the correct power here.
+ */
+ priv->Stats_SignalQuality = (long)(priv->Stats_SignalQuality * 5 + (long)priv->SignalQuality + 5) / 6;
+ priv->Stats_RecvSignalPower = (long)(priv->Stats_RecvSignalPower * 5 + priv->RecvSignalPower - 1) / 6;
+
+ /* Figure out which antenna that received the lasted packet. */
+ priv->LastRxPktAntenna = Antenna ? 1 : 0; /* 0: aux, 1: main. */
+ SwAntennaDiversityRxOk8185(dev, priv->SignalStrength);
}
- if(first){
- if(!priv->rx_skb_complete){
+ if (first) {
+ if (!priv->rx_skb_complete) {
/* seems that HW sometimes fails to reiceve and
doesn't provide the last descriptor */
dev_kfree_skb_any(priv->rx_skb);
priv->stats.rxnolast++;
}
/* support for prism header has been originally added by Christian */
- if(priv->prism_hdr && priv->ieee80211->iw_mode == IW_MODE_MONITOR){
-
- }else{
+ if (priv->prism_hdr && priv->ieee80211->iw_mode == IW_MODE_MONITOR) {
+
+ } else {
priv->rx_skb = dev_alloc_skb(len+2);
- if( !priv->rx_skb) goto drop;
+ if (!priv->rx_skb)
+ goto drop;
}
- priv->rx_skb_complete=0;
- priv->rx_skb->dev=dev;
- }else{
+ priv->rx_skb_complete = 0;
+ priv->rx_skb->dev = dev;
+ } else {
/* if we are here we should have already RXed
* the first frame.
* If we get here and the skb is not allocated then
* we have just throw out garbage (skb not allocated)
* and we are still rxing garbage....
*/
- if(!priv->rx_skb_complete){
+ if (!priv->rx_skb_complete) {
- tmp_skb= dev_alloc_skb(priv->rx_skb->len +len+2);
+ tmp_skb = dev_alloc_skb(priv->rx_skb->len+len+2);
- if(!tmp_skb) goto drop;
+ if (!tmp_skb)
+ goto drop;
- tmp_skb->dev=dev;
+ tmp_skb->dev = dev;
- memcpy(skb_put(tmp_skb,priv->rx_skb->len),
+ memcpy(skb_put(tmp_skb, priv->rx_skb->len),
priv->rx_skb->data,
priv->rx_skb->len);
dev_kfree_skb_any(priv->rx_skb);
- priv->rx_skb=tmp_skb;
+ priv->rx_skb = tmp_skb;
}
}
- if(!priv->rx_skb_complete) {
- if(padding) {
- memcpy(skb_put(priv->rx_skb,len),
- (((unsigned char *)priv->rxbuffer->buf) + 2),len);
+ if (!priv->rx_skb_complete) {
+ if (padding) {
+ memcpy(skb_put(priv->rx_skb, len),
+ (((unsigned char *)priv->rxbuffer->buf) + 2), len);
} else {
- memcpy(skb_put(priv->rx_skb,len),
- priv->rxbuffer->buf,len);
+ memcpy(skb_put(priv->rx_skb, len),
+ priv->rxbuffer->buf, len);
}
}
- if(last && !priv->rx_skb_complete){
- if(priv->rx_skb->len > 4)
- skb_trim(priv->rx_skb,priv->rx_skb->len-4);
- if(!ieee80211_rtl_rx(priv->ieee80211,
+ if (last && !priv->rx_skb_complete) {
+ if (priv->rx_skb->len > 4)
+ skb_trim(priv->rx_skb, priv->rx_skb->len-4);
+ if (!ieee80211_rtl_rx(priv->ieee80211,
priv->rx_skb, &stats))
dev_kfree_skb_any(priv->rx_skb);
- priv->rx_skb_complete=1;
+ priv->rx_skb_complete = 1;
}
pci_dma_sync_single_for_device(priv->pdev,
@@ -1723,22 +1715,22 @@ void rtl8180_rx(struct net_device *dev)
sizeof(u8),
PCI_DMA_FROMDEVICE);
-drop: // this is used when we have not enough mem
+drop: /* this is used when we have not enough mem */
/* restore the descriptor */
- *(priv->rxringtail+2)=priv->rxbuffer->dma;
- *(priv->rxringtail)=*(priv->rxringtail) &~ 0xfff;
- *(priv->rxringtail)=
+ *(priv->rxringtail+2) = priv->rxbuffer->dma;
+ *(priv->rxringtail) = *(priv->rxringtail) & ~0xfff;
+ *(priv->rxringtail) =
*(priv->rxringtail) | priv->rxbuffersize;
- *(priv->rxringtail)=
+ *(priv->rxringtail) =
*(priv->rxringtail) | (1<<31);
- priv->rxringtail+=rx_desc_size;
- if(priv->rxringtail >=
- (priv->rxring)+(priv->rxringcount )*rx_desc_size)
- priv->rxringtail=priv->rxring;
+ priv->rxringtail += rx_desc_size;
+ if (priv->rxringtail >=
+ (priv->rxring)+(priv->rxringcount)*rx_desc_size)
+ priv->rxringtail = priv->rxring;
- priv->rxbuffer=(priv->rxbuffer->next);
+ priv->rxbuffer = (priv->rxbuffer->next);
}
}
@@ -1747,10 +1739,10 @@ void rtl8180_dma_kick(struct net_device *dev, int priority)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
- rtl8180_set_mode(dev,EPROM_CMD_CONFIG);
+ rtl8180_set_mode(dev, EPROM_CMD_CONFIG);
write_nic_byte(dev, TX_DMA_POLLING,
(1 << (priority + 1)) | priv->dma_poll_mask);
- rtl8180_set_mode(dev,EPROM_CMD_NORMAL);
+ rtl8180_set_mode(dev, EPROM_CMD_NORMAL);
force_pci_posting(dev);
}
@@ -1759,31 +1751,31 @@ void rtl8180_data_hard_stop(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
- rtl8180_set_mode(dev,EPROM_CMD_CONFIG);
+ rtl8180_set_mode(dev, EPROM_CMD_CONFIG);
priv->dma_poll_stop_mask |= TPPOLLSTOP_AC_VIQ;
- write_nic_byte(dev,TPPollStop, priv->dma_poll_stop_mask);
- rtl8180_set_mode(dev,EPROM_CMD_NORMAL);
+ write_nic_byte(dev, TPPollStop, priv->dma_poll_stop_mask);
+ rtl8180_set_mode(dev, EPROM_CMD_NORMAL);
}
void rtl8180_data_hard_resume(struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
- rtl8180_set_mode(dev,EPROM_CMD_CONFIG);
+ rtl8180_set_mode(dev, EPROM_CMD_CONFIG);
priv->dma_poll_stop_mask &= ~(TPPOLLSTOP_AC_VIQ);
- write_nic_byte(dev,TPPollStop, priv->dma_poll_stop_mask);
- rtl8180_set_mode(dev,EPROM_CMD_NORMAL);
+ write_nic_byte(dev, TPPollStop, priv->dma_poll_stop_mask);
+ rtl8180_set_mode(dev, EPROM_CMD_NORMAL);
}
-/* this function TX data frames when the ieee80211 stack requires this.
+/*
+ * This function TX data frames when the ieee80211 stack requires this.
* It checks also if we need to stop the ieee tx queue, eventually do it
*/
-void rtl8180_hard_data_xmit(struct sk_buff *skb,struct net_device *dev, int
-rate)
-{
+void rtl8180_hard_data_xmit(struct sk_buff *skb, struct net_device *dev, int
+rate) {
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
int mode;
- struct ieee80211_hdr_3addr *h = (struct ieee80211_hdr_3addr *) skb->data;
+ struct ieee80211_hdr_3addr *h = (struct ieee80211_hdr_3addr *) skb->data;
short morefrag = (h->frame_control) & IEEE80211_FCTL_MOREFRAGS;
unsigned long flags;
int priority;
@@ -1792,35 +1784,35 @@ rate)
rate = ieeerate2rtlrate(rate);
/*
- * This function doesn't require lock because we make
- * sure it's called with the tx_lock already acquired.
- * this come from the kernel's hard_xmit callback (through
- * the ieee stack, or from the try_wake_queue (again through
- * the ieee stack.
- */
+ * This function doesn't require lock because we make
+ * sure it's called with the tx_lock already acquired.
+ * this come from the kernel's hard_xmit callback (through
+ * the ieee stack, or from the try_wake_queue (again through
+ * the ieee stack.
+ */
priority = AC2Q(skb->priority);
- spin_lock_irqsave(&priv->tx_lock,flags);
+ spin_lock_irqsave(&priv->tx_lock, flags);
- if(priv->ieee80211->bHwRadioOff)
- {
- spin_unlock_irqrestore(&priv->tx_lock,flags);
+ if (priv->ieee80211->bHwRadioOff) {
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
return;
}
- if (!check_nic_enought_desc(dev, priority)){
+ if (!check_nic_enought_desc(dev, priority)) {
DMESGW("Error: no descriptor left by previous TX (avail %d) ",
get_curr_tx_free_desc(dev, priority));
ieee80211_rtl_stop_queue(priv->ieee80211);
}
- rtl8180_tx(dev, skb->data, skb->len, priority, morefrag,0,rate);
+ rtl8180_tx(dev, skb->data, skb->len, priority, morefrag, 0, rate);
if (!check_nic_enought_desc(dev, priority))
ieee80211_rtl_stop_queue(priv->ieee80211);
- spin_unlock_irqrestore(&priv->tx_lock,flags);
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
}
-/* This is a rough attempt to TX a frame
+/*
+ * This is a rough attempt to TX a frame
* This is called by the ieee 80211 stack to TX management frames.
* If the ring is full packet are dropped (for data frame the queue
* is stopped before this can happen). For this reason it is better
@@ -1830,8 +1822,8 @@ rate)
* Since queues for Management and Data frames are different we
* might use a different lock than tx_lock (for example mgmt_tx_lock)
*/
-/* these function may loops if invoked with 0 descriptors or 0 len buffer*/
-int rtl8180_hard_start_xmit(struct sk_buff *skb,struct net_device *dev)
+/* these function may loops if invoked with 0 descriptors or 0 len buffer */
+int rtl8180_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
unsigned long flags;
@@ -1839,66 +1831,68 @@ int rtl8180_hard_start_xmit(struct sk_buff *skb,struct net_device *dev)
priority = MANAGE_PRIORITY;
- spin_lock_irqsave(&priv->tx_lock,flags);
+ spin_lock_irqsave(&priv->tx_lock, flags);
if (priv->ieee80211->bHwRadioOff) {
- spin_unlock_irqrestore(&priv->tx_lock,flags);
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
rtl8180_tx(dev, skb->data, skb->len, priority,
- 0, 0,ieeerate2rtlrate(priv->ieee80211->basic_rate));
+ 0, 0, ieeerate2rtlrate(priv->ieee80211->basic_rate));
- priv->ieee80211->stats.tx_bytes+=skb->len;
+ priv->ieee80211->stats.tx_bytes += skb->len;
priv->ieee80211->stats.tx_packets++;
- spin_unlock_irqrestore(&priv->tx_lock,flags);
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
-// longpre 144+48 shortpre 72+24
-u16 rtl8180_len2duration(u32 len, short rate,short* ext)
+/* longpre 144+48 shortpre 72+24 */
+u16 rtl8180_len2duration(u32 len, short rate, short *ext)
{
u16 duration;
u16 drift;
- *ext=0;
+ *ext = 0;
- switch(rate){
- case 0://1mbps
- *ext=0;
- duration = ((len+4)<<4) /0x2;
+ switch (rate) {
+ case 0: /* 1mbps */
+ *ext = 0;
+ duration = ((len+4)<<4) / 0x2;
drift = ((len+4)<<4) % 0x2;
- if(drift ==0 ) break;
+ if (drift == 0)
+ break;
duration++;
break;
- case 1://2mbps
- *ext=0;
- duration = ((len+4)<<4) /0x4;
+ case 1: /* 2mbps */
+ *ext = 0;
+ duration = ((len+4)<<4) / 0x4;
drift = ((len+4)<<4) % 0x4;
- if(drift ==0 ) break;
+ if (drift == 0)
+ break;
duration++;
break;
- case 2: //5.5mbps
- *ext=0;
- duration = ((len+4)<<4) /0xb;
+ case 2: /* 5.5mbps */
+ *ext = 0;
+ duration = ((len+4)<<4) / 0xb;
drift = ((len+4)<<4) % 0xb;
- if(drift ==0 )
+ if (drift == 0)
break;
duration++;
break;
default:
- case 3://11mbps
- *ext=0;
- duration = ((len+4)<<4) /0x16;
+ case 3: /* 11mbps */
+ *ext = 0;
+ duration = ((len+4)<<4) / 0x16;
drift = ((len+4)<<4) % 0x16;
- if(drift ==0 )
+ if (drift == 0)
break;
duration++;
- if(drift > 6)
+ if (drift > 6)
break;
- *ext=1;
+ *ext = 1;
break;
}
@@ -1911,19 +1905,20 @@ void rtl8180_prepare_beacon(struct net_device *dev)
struct sk_buff *skb;
u16 word = read_nic_word(dev, BcnItv);
- word &= ~BcnItv_BcnItv; // clear Bcn_Itv
- word |= cpu_to_le16(priv->ieee80211->current_network.beacon_interval);//0x64;
+ word &= ~BcnItv_BcnItv; /* clear Bcn_Itv */
+ word |= cpu_to_le16(priv->ieee80211->current_network.beacon_interval); /* 0x64; */
write_nic_word(dev, BcnItv, word);
skb = ieee80211_get_beacon(priv->ieee80211);
- if(skb){
- rtl8180_tx(dev,skb->data,skb->len,BEACON_PRIORITY,
- 0,0,ieeerate2rtlrate(priv->ieee80211->basic_rate));
+ if (skb) {
+ rtl8180_tx(dev, skb->data, skb->len, BEACON_PRIORITY,
+ 0, 0, ieeerate2rtlrate(priv->ieee80211->basic_rate));
dev_kfree_skb_any(skb);
}
}
-/* This function do the real dirty work: it enqueues a TX command
+/*
+ * This function do the real dirty work: it enqueues a TX command
* descriptor in the ring buffer, copyes the frame in a TX buffer
* and kicks the NIC to ensure it does the DMA transfer.
*/
@@ -1931,7 +1926,7 @@ short rtl8180_tx(struct net_device *dev, u8* txbuf, int len, int priority,
short morefrag, short descfrag, int rate)
{
struct r8180_priv *priv = ieee80211_priv(dev);
- u32 *tail,*temp_tail;
+ u32 *tail, *temp_tail;
u32 *begin;
u32 *buf;
int i;
@@ -1940,70 +1935,69 @@ short rtl8180_tx(struct net_device *dev, u8* txbuf, int len, int priority,
int count;
u16 duration;
short ext;
- struct buffer* buflist;
+ struct buffer *buflist;
struct ieee80211_hdr_3addr *frag_hdr = (struct ieee80211_hdr_3addr *)txbuf;
u8 dest[ETH_ALEN];
u8 bUseShortPreamble = 0;
u8 bCTSEnable = 0;
u8 bRTSEnable = 0;
- u16 Duration = 0;
+ u16 Duration = 0;
u16 RtsDur = 0;
u16 ThisFrameTime = 0;
u16 TxDescDuration = 0;
- u8 ownbit_flag = false;
+ u8 ownbit_flag = false;
- switch(priority) {
+ switch (priority) {
case MANAGE_PRIORITY:
- tail=priv->txmapringtail;
- begin=priv->txmapring;
+ tail = priv->txmapringtail;
+ begin = priv->txmapring;
buflist = priv->txmapbufstail;
count = priv->txringcount;
break;
case BK_PRIORITY:
- tail=priv->txbkpringtail;
- begin=priv->txbkpring;
+ tail = priv->txbkpringtail;
+ begin = priv->txbkpring;
buflist = priv->txbkpbufstail;
count = priv->txringcount;
break;
case BE_PRIORITY:
- tail=priv->txbepringtail;
- begin=priv->txbepring;
+ tail = priv->txbepringtail;
+ begin = priv->txbepring;
buflist = priv->txbepbufstail;
count = priv->txringcount;
break;
case VI_PRIORITY:
- tail=priv->txvipringtail;
- begin=priv->txvipring;
+ tail = priv->txvipringtail;
+ begin = priv->txvipring;
buflist = priv->txvipbufstail;
count = priv->txringcount;
break;
case VO_PRIORITY:
- tail=priv->txvopringtail;
- begin=priv->txvopring;
+ tail = priv->txvopringtail;
+ begin = priv->txvopring;
buflist = priv->txvopbufstail;
count = priv->txringcount;
break;
case HI_PRIORITY:
- tail=priv->txhpringtail;
- begin=priv->txhpring;
+ tail = priv->txhpringtail;
+ begin = priv->txhpring;
buflist = priv->txhpbufstail;
count = priv->txringcount;
break;
case BEACON_PRIORITY:
- tail=priv->txbeaconringtail;
- begin=priv->txbeaconring;
+ tail = priv->txbeaconringtail;
+ begin = priv->txbeaconring;
buflist = priv->txbeaconbufstail;
count = priv->txbeaconcount;
break;
default:
return -1;
break;
- }
+ }
memcpy(&dest, frag_hdr->addr1, ETH_ALEN);
if (is_multicast_ether_addr(dest) ||
- is_broadcast_ether_addr(dest))
- {
+ is_broadcast_ether_addr(dest)) {
Duration = 0;
RtsDur = 0;
bRTSEnable = 0;
@@ -2011,40 +2005,38 @@ short rtl8180_tx(struct net_device *dev, u8* txbuf, int len, int priority,
ThisFrameTime = ComputeTxTime(len + sCrcLng, rtl8180_rate2rate(rate), 0, bUseShortPreamble);
TxDescDuration = ThisFrameTime;
- } else {// Unicast packet
+ } else { /* Unicast packet */
u16 AckTime;
- //YJ,add,080828,for Keep alive
+ /* YJ,add,080828,for Keep alive */
priv->NumTxUnicast++;
/* Figure out ACK rate according to BSS basic rate
* and Tx rate. */
- AckTime = ComputeTxTime(14, 10,0, 0); // AckCTSLng = 14 use 1M bps send
+ AckTime = ComputeTxTime(14, 10, 0, 0); /* AckCTSLng = 14 use 1M bps send */
- if ( ((len + sCrcLng) > priv->rts) && priv->rts )
- { // RTS/CTS.
+ if (((len + sCrcLng) > priv->rts) && priv->rts) { /* RTS/CTS. */
u16 RtsTime, CtsTime;
- //u16 CtsRate;
+ /* u16 CtsRate; */
bRTSEnable = 1;
bCTSEnable = 0;
- // Rate and time required for RTS.
- RtsTime = ComputeTxTime( sAckCtsLng/8,priv->ieee80211->basic_rate, 0, 0);
- // Rate and time required for CTS.
- CtsTime = ComputeTxTime(14, 10,0, 0); // AckCTSLng = 14 use 1M bps send
+ /* Rate and time required for RTS. */
+ RtsTime = ComputeTxTime(sAckCtsLng/8, priv->ieee80211->basic_rate, 0, 0);
+ /* Rate and time required for CTS. */
+ CtsTime = ComputeTxTime(14, 10, 0, 0); /* AckCTSLng = 14 use 1M bps send */
- // Figure out time required to transmit this frame.
+ /* Figure out time required to transmit this frame. */
ThisFrameTime = ComputeTxTime(len + sCrcLng,
rtl8180_rate2rate(rate),
0,
bUseShortPreamble);
- // RTS-CTS-ThisFrame-ACK.
+ /* RTS-CTS-ThisFrame-ACK. */
RtsDur = CtsTime + ThisFrameTime + AckTime + 3*aSifsTime;
TxDescDuration = RtsTime + RtsDur;
- }
- else {// Normal case.
+ } else { /* Normal case. */
bCTSEnable = 0;
bRTSEnable = 0;
RtsDur = 0;
@@ -2054,34 +2046,34 @@ short rtl8180_tx(struct net_device *dev, u8* txbuf, int len, int priority,
}
if (!(frag_hdr->frame_control & IEEE80211_FCTL_MOREFRAGS)) {
- // ThisFrame-ACK.
+ /* ThisFrame-ACK. */
Duration = aSifsTime + AckTime;
- } else { // One or more fragments remained.
+ } else { /* One or more fragments remained. */
u16 NextFragTime;
- NextFragTime = ComputeTxTime( len + sCrcLng, //pretend following packet length equal current packet
+ NextFragTime = ComputeTxTime(len + sCrcLng, /* pretend following packet length equal current packet */
rtl8180_rate2rate(rate),
0,
- bUseShortPreamble );
+ bUseShortPreamble);
- //ThisFrag-ACk-NextFrag-ACK.
+ /* ThisFrag-ACk-NextFrag-ACK. */
Duration = NextFragTime + 3*aSifsTime + 2*AckTime;
}
- } // End of Unicast packet
+ } /* End of Unicast packet */
frag_hdr->duration_id = Duration;
- buflen=priv->txbuffsize;
- remain=len;
+ buflen = priv->txbuffsize;
+ remain = len;
temp_tail = tail;
- while(remain!=0){
+ while (remain != 0) {
mb();
- if(!buflist){
+ if (!buflist) {
DMESGE("TX buffer error, cannot TX frames. pri %d.", priority);
return -1;
}
- buf=buflist->buf;
+ buf = buflist->buf;
if ((*tail & (1 << 31)) && (priority != BEACON_PRIORITY)) {
DMESGW("No more TX desc, returning %x of %x",
@@ -2090,51 +2082,50 @@ short rtl8180_tx(struct net_device *dev, u8* txbuf, int len, int priority,
return remain;
}
- *tail= 0; // zeroes header
+ *tail = 0; /* zeroes header */
*(tail+1) = 0;
*(tail+3) = 0;
*(tail+5) = 0;
*(tail+6) = 0;
*(tail+7) = 0;
- /*FIXME: this should be triggered by HW encryption parameters.*/
+ /* FIXME: this should be triggered by HW encryption parameters.*/
*tail |= (1<<15); /* no encrypt */
- if(remain==len && !descfrag) {
+ if (remain == len && !descfrag) {
ownbit_flag = false;
- *tail = *tail| (1<<29) ; //fist segment of the packet
- *tail = *tail |(len);
+ *tail = *tail | (1<<29) ; /* fist segment of the packet */
+ *tail = *tail | (len);
} else {
ownbit_flag = true;
}
- for(i=0;i<buflen&& remain >0;i++,remain--){
- ((u8*)buf)[i]=txbuf[i]; //copy data into descriptor pointed DMAble buffer
- if(remain == 4 && i+4 >= buflen) break;
+ for (i = 0; i < buflen && remain > 0; i++, remain--) {
+ ((u8 *)buf)[i] = txbuf[i]; /* copy data into descriptor pointed DMAble buffer */
+ if (remain == 4 && i+4 >= buflen)
+ break;
/* ensure the last desc has at least 4 bytes payload */
}
txbuf = txbuf + i;
- *(tail+3)=*(tail+3) &~ 0xfff;
- *(tail+3)=*(tail+3) | i; // buffer lenght
- // Use short preamble or not
+ *(tail+3) = *(tail+3) & ~0xfff;
+ *(tail+3) = *(tail+3) | i; /* buffer length */
+ /* Use short preamble or not */
if (priv->ieee80211->current_network.capability&WLAN_CAPABILITY_SHORT_PREAMBLE)
- if (priv->plcp_preamble_mode==1 && rate!=0) // short mode now, not long!
- ;// *tail |= (1<<16); // enable short preamble mode.
+ if (priv->plcp_preamble_mode == 1 && rate != 0) /* short mode now, not long! */
+ ; /* *tail |= (1<<16); */ /* enable short preamble mode. */
- if(bCTSEnable) {
+ if (bCTSEnable)
*tail |= (1<<18);
- }
- if(bRTSEnable) //rts enable
- {
- *tail |= ((ieeerate2rtlrate(priv->ieee80211->basic_rate))<<19);//RTS RATE
- *tail |= (1<<23);//rts enable
- *(tail+1) |=(RtsDur&0xffff);//RTS Duration
+ if (bRTSEnable) { /* rts enable */
+ *tail |= ((ieeerate2rtlrate(priv->ieee80211->basic_rate))<<19); /* RTS RATE */
+ *tail |= (1<<23); /* rts enable */
+ *(tail+1) |= (RtsDur&0xffff); /* RTS Duration */
}
- *(tail+3) |= ((TxDescDuration&0xffff)<<16); //DURATION
-// *(tail+3) |= (0xe6<<16);
- *(tail+5) |= (11<<8);//(priv->retry_data<<8); //retry lim ;
+ *(tail+3) |= ((TxDescDuration&0xffff)<<16); /* DURATION */
+ /* *(tail+3) |= (0xe6<<16); */
+ *(tail+5) |= (11<<8); /* (priv->retry_data<<8); */ /* retry lim; */
*tail = *tail | ((rate&0xf) << 24);
@@ -2143,71 +2134,73 @@ short rtl8180_tx(struct net_device *dev, u8* txbuf, int len, int priority,
if (!priv->hw_plcp_len) {
duration = rtl8180_len2duration(len, rate, &ext);
*(tail+1) = *(tail+1) | ((duration & 0x7fff)<<16);
- if(ext) *(tail+1) = *(tail+1) |(1<<31); //plcp length extension
+ if (ext)
+ *(tail+1) = *(tail+1) | (1<<31); /* plcp length extension */
}
- if(morefrag) *tail = (*tail) | (1<<17); // more fragment
- if(!remain) *tail = (*tail) | (1<<28); // last segment of frame
+ if (morefrag)
+ *tail = (*tail) | (1<<17); /* more fragment */
+ if (!remain)
+ *tail = (*tail) | (1<<28); /* last segment of frame */
- *(tail+5) = *(tail+5)|(2<<27);
- *(tail+7) = *(tail+7)|(1<<4);
+ *(tail+5) = *(tail+5)|(2<<27);
+ *(tail+7) = *(tail+7)|(1<<4);
wmb();
- if(ownbit_flag)
- {
- *tail = *tail | (1<<31); // descriptor ready to be txed
- }
+ if (ownbit_flag)
+ *tail = *tail | (1<<31); /* descriptor ready to be txed */
- if((tail - begin)/8 == count-1)
- tail=begin;
+ if ((tail - begin)/8 == count-1)
+ tail = begin;
else
- tail=tail+8;
+ tail = tail+8;
- buflist=buflist->next;
+ buflist = buflist->next;
mb();
- switch(priority) {
- case MANAGE_PRIORITY:
- priv->txmapringtail=tail;
- priv->txmapbufstail=buflist;
- break;
- case BK_PRIORITY:
- priv->txbkpringtail=tail;
- priv->txbkpbufstail=buflist;
- break;
- case BE_PRIORITY:
- priv->txbepringtail=tail;
- priv->txbepbufstail=buflist;
- break;
- case VI_PRIORITY:
- priv->txvipringtail=tail;
- priv->txvipbufstail=buflist;
- break;
- case VO_PRIORITY:
- priv->txvopringtail=tail;
- priv->txvopbufstail=buflist;
- break;
- case HI_PRIORITY:
- priv->txhpringtail=tail;
- priv->txhpbufstail = buflist;
- break;
- case BEACON_PRIORITY:
- /* the HW seems to be happy with the 1st
- * descriptor filled and the 2nd empty...
- * So always update descriptor 1 and never
- * touch 2nd
- */
- break;
+ switch (priority) {
+ case MANAGE_PRIORITY:
+ priv->txmapringtail = tail;
+ priv->txmapbufstail = buflist;
+ break;
+ case BK_PRIORITY:
+ priv->txbkpringtail = tail;
+ priv->txbkpbufstail = buflist;
+ break;
+ case BE_PRIORITY:
+ priv->txbepringtail = tail;
+ priv->txbepbufstail = buflist;
+ break;
+ case VI_PRIORITY:
+ priv->txvipringtail = tail;
+ priv->txvipbufstail = buflist;
+ break;
+ case VO_PRIORITY:
+ priv->txvopringtail = tail;
+ priv->txvopbufstail = buflist;
+ break;
+ case HI_PRIORITY:
+ priv->txhpringtail = tail;
+ priv->txhpbufstail = buflist;
+ break;
+ case BEACON_PRIORITY:
+ /*
+ * The HW seems to be happy with the 1st
+ * descriptor filled and the 2nd empty...
+ * So always update descriptor 1 and never
+ * touch 2nd
+ */
+ break;
}
}
- *temp_tail = *temp_tail | (1<<31); // descriptor ready to be txed
- rtl8180_dma_kick(dev,priority);
+ *temp_tail = *temp_tail | (1<<31); /* descriptor ready to be txed */
+ rtl8180_dma_kick(dev, priority);
return 0;
}
-void rtl8180_irq_rx_tasklet(struct r8180_priv * priv);
+void rtl8180_irq_rx_tasklet(struct r8180_priv *priv);
void rtl8180_link_change(struct net_device *dev)
{
@@ -2217,13 +2210,13 @@ void rtl8180_link_change(struct net_device *dev)
rtl8180_update_msr(dev);
- rtl8180_set_mode(dev,EPROM_CMD_CONFIG);
+ rtl8180_set_mode(dev, EPROM_CMD_CONFIG);
- write_nic_dword(dev,BSSID,((u32*)net->bssid)[0]);
- write_nic_word(dev,BSSID+4,((u16*)net->bssid)[2]);
+ write_nic_dword(dev, BSSID, ((u32 *)net->bssid)[0]);
+ write_nic_word(dev, BSSID+4, ((u16 *)net->bssid)[2]);
- beacon_interval = read_nic_dword(dev,BEACON_INTERVAL);
- beacon_interval &= ~ BEACON_INTERVAL_MASK;
+ beacon_interval = read_nic_dword(dev, BEACON_INTERVAL);
+ beacon_interval &= ~BEACON_INTERVAL_MASK;
beacon_interval |= net->beacon_interval;
write_nic_dword(dev, BEACON_INTERVAL, beacon_interval);
@@ -2232,42 +2225,50 @@ void rtl8180_link_change(struct net_device *dev)
rtl8180_set_chan(dev, priv->chan);
}
-void rtl8180_rq_tx_ack(struct net_device *dev){
+void rtl8180_rq_tx_ack(struct net_device *dev)
+{
struct r8180_priv *priv = ieee80211_priv(dev);
- write_nic_byte(dev,CONFIG4,read_nic_byte(dev,CONFIG4)|CONFIG4_PWRMGT);
+ write_nic_byte(dev, CONFIG4, read_nic_byte(dev, CONFIG4) | CONFIG4_PWRMGT);
priv->ack_tx_to_ieee = 1;
}
-short rtl8180_is_tx_queue_empty(struct net_device *dev){
+short rtl8180_is_tx_queue_empty(struct net_device *dev)
+{
struct r8180_priv *priv = ieee80211_priv(dev);
- u32* d;
+ u32 *d;
for (d = priv->txmapring;
- d < priv->txmapring + priv->txringcount;d+=8)
- if(*d & (1<<31)) return 0;
+ d < priv->txmapring + priv->txringcount; d += 8)
+ if (*d & (1<<31))
+ return 0;
for (d = priv->txbkpring;
- d < priv->txbkpring + priv->txringcount;d+=8)
- if(*d & (1<<31)) return 0;
+ d < priv->txbkpring + priv->txringcount; d += 8)
+ if (*d & (1<<31))
+ return 0;
for (d = priv->txbepring;
- d < priv->txbepring + priv->txringcount;d+=8)
- if(*d & (1<<31)) return 0;
+ d < priv->txbepring + priv->txringcount; d += 8)
+ if (*d & (1<<31))
+ return 0;
for (d = priv->txvipring;
- d < priv->txvipring + priv->txringcount;d+=8)
- if(*d & (1<<31)) return 0;
+ d < priv->txvipring + priv->txringcount; d += 8)
+ if (*d & (1<<31))
+ return 0;
for (d = priv->txvopring;
- d < priv->txvopring + priv->txringcount;d+=8)
- if(*d & (1<<31)) return 0;
+ d < priv->txvopring + priv->txringcount; d += 8)
+ if (*d & (1<<31))
+ return 0;
for (d = priv->txhpring;
- d < priv->txhpring + priv->txringcount;d+=8)
- if(*d & (1<<31)) return 0;
+ d < priv->txhpring + priv->txringcount; d += 8)
+ if (*d & (1<<31))
+ return 0;
return 1;
}
/* FIXME FIXME 5msecs is random */
@@ -2278,22 +2279,22 @@ void rtl8180_hw_wakeup(struct net_device *dev)
unsigned long flags;
struct r8180_priv *priv = ieee80211_priv(dev);
- spin_lock_irqsave(&priv->ps_lock,flags);
- write_nic_byte(dev,CONFIG4,read_nic_byte(dev,CONFIG4)&~CONFIG4_PWRMGT);
+ spin_lock_irqsave(&priv->ps_lock, flags);
+ write_nic_byte(dev, CONFIG4, read_nic_byte(dev, CONFIG4) & ~CONFIG4_PWRMGT);
if (priv->rf_wakeup)
priv->rf_wakeup(dev);
- spin_unlock_irqrestore(&priv->ps_lock,flags);
+ spin_unlock_irqrestore(&priv->ps_lock, flags);
}
void rtl8180_hw_sleep_down(struct net_device *dev)
{
- unsigned long flags;
- struct r8180_priv *priv = ieee80211_priv(dev);
+ unsigned long flags;
+ struct r8180_priv *priv = ieee80211_priv(dev);
- spin_lock_irqsave(&priv->ps_lock,flags);
- if(priv->rf_sleep)
- priv->rf_sleep(dev);
- spin_unlock_irqrestore(&priv->ps_lock,flags);
+ spin_lock_irqsave(&priv->ps_lock, flags);
+ if (priv->rf_sleep)
+ priv->rf_sleep(dev);
+ spin_unlock_irqrestore(&priv->ps_lock, flags);
}
void rtl8180_hw_sleep(struct net_device *dev, u32 th, u32 tl)
@@ -2302,47 +2303,50 @@ void rtl8180_hw_sleep(struct net_device *dev, u32 th, u32 tl)
u32 rb = jiffies;
unsigned long flags;
- spin_lock_irqsave(&priv->ps_lock,flags);
+ spin_lock_irqsave(&priv->ps_lock, flags);
- /* Writing HW register with 0 equals to disable
+ /*
+ * Writing HW register with 0 equals to disable
* the timer, that is not really what we want
*/
tl -= MSECS(4+16+7);
- /* If the interval in witch we are requested to sleep is too
+ /*
+ * If the interval in witch we are requested to sleep is too
* short then give up and remain awake
*/
- if(((tl>=rb)&& (tl-rb) <= MSECS(MIN_SLEEP_TIME))
- ||((rb>tl)&& (rb-tl) < MSECS(MIN_SLEEP_TIME))) {
- spin_unlock_irqrestore(&priv->ps_lock,flags);
+ if (((tl >= rb) && (tl-rb) <= MSECS(MIN_SLEEP_TIME))
+ || ((rb > tl) && (rb-tl) < MSECS(MIN_SLEEP_TIME))) {
+ spin_unlock_irqrestore(&priv->ps_lock, flags);
printk("too short to sleep\n");
return;
}
{
- u32 tmp = (tl>rb)?(tl-rb):(rb-tl);
+ u32 tmp = (tl > rb) ? (tl-rb) : (rb-tl);
priv->DozePeriodInPast2Sec += jiffies_to_msecs(tmp);
- queue_delayed_work(priv->ieee80211->wq, &priv->ieee80211->hw_wakeup_wq, tmp); //as tl may be less than rb
+ queue_delayed_work(priv->ieee80211->wq, &priv->ieee80211->hw_wakeup_wq, tmp); /* as tl may be less than rb */
}
- /* if we suspect the TimerInt is gone beyond tl
+ /*
+ * If we suspect the TimerInt is gone beyond tl
* while setting it, then give up
*/
- if(((tl > rb) && ((tl-rb) > MSECS(MAX_SLEEP_TIME)))||
+ if (((tl > rb) && ((tl-rb) > MSECS(MAX_SLEEP_TIME))) ||
((tl < rb) && ((rb-tl) > MSECS(MAX_SLEEP_TIME)))) {
- spin_unlock_irqrestore(&priv->ps_lock,flags);
+ spin_unlock_irqrestore(&priv->ps_lock, flags);
return;
}
queue_work(priv->ieee80211->wq, (void *)&priv->ieee80211->hw_sleep_wq);
- spin_unlock_irqrestore(&priv->ps_lock,flags);
+ spin_unlock_irqrestore(&priv->ps_lock, flags);
}
-void rtl8180_wmm_param_update(struct work_struct * work)
+void rtl8180_wmm_param_update(struct work_struct *work)
{
- struct ieee80211_device * ieee = container_of(work, struct ieee80211_device,wmm_param_update_wq);
+ struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, wmm_param_update_wq);
struct net_device *dev = ieee->dev;
u8 *ac_param = (u8 *)(ieee->current_network.wmm_param);
u8 mode = ieee->current_network.mode;
@@ -2351,81 +2355,81 @@ void rtl8180_wmm_param_update(struct work_struct * work)
PAC_PARAM pAcParam;
u8 i;
- if(!ieee->current_network.QoS_Enable){
- //legacy ac_xx_param update
+ if (!ieee->current_network.QoS_Enable) {
+ /* legacy ac_xx_param update */
AcParam.longData = 0;
- AcParam.f.AciAifsn.f.AIFSN = 2; // Follow 802.11 DIFS.
+ AcParam.f.AciAifsn.f.AIFSN = 2; /* Follow 802.11 DIFS. */
AcParam.f.AciAifsn.f.ACM = 0;
- AcParam.f.Ecw.f.ECWmin = 3; // Follow 802.11 CWmin.
- AcParam.f.Ecw.f.ECWmax = 7; // Follow 802.11 CWmax.
+ AcParam.f.Ecw.f.ECWmin = 3; /* Follow 802.11 CWmin. */
+ AcParam.f.Ecw.f.ECWmax = 7; /* Follow 802.11 CWmax. */
AcParam.f.TXOPLimit = 0;
- for(eACI = 0; eACI < AC_MAX; eACI++){
+ for (eACI = 0; eACI < AC_MAX; eACI++) {
AcParam.f.AciAifsn.f.ACI = (u8)eACI;
{
u8 u1bAIFS;
u32 u4bAcParam;
pAcParam = (PAC_PARAM)(&AcParam);
- // Retrive paramters to udpate.
- u1bAIFS = pAcParam->f.AciAifsn.f.AIFSN *(((mode&IEEE_G) == IEEE_G)?9:20) + aSifsTime;
+ /* Retrive paramters to udpate. */
+ u1bAIFS = pAcParam->f.AciAifsn.f.AIFSN * (((mode&IEEE_G) == IEEE_G) ? 9 : 20) + aSifsTime;
u4bAcParam = ((((u32)(pAcParam->f.TXOPLimit))<<AC_PARAM_TXOP_LIMIT_OFFSET)|
(((u32)(pAcParam->f.Ecw.f.ECWmax))<<AC_PARAM_ECW_MAX_OFFSET)|
(((u32)(pAcParam->f.Ecw.f.ECWmin))<<AC_PARAM_ECW_MIN_OFFSET)|
(((u32)u1bAIFS) << AC_PARAM_AIFS_OFFSET));
- switch(eACI){
- case AC1_BK:
- write_nic_dword(dev, AC_BK_PARAM, u4bAcParam);
- break;
- case AC0_BE:
- write_nic_dword(dev, AC_BE_PARAM, u4bAcParam);
- break;
- case AC2_VI:
- write_nic_dword(dev, AC_VI_PARAM, u4bAcParam);
- break;
- case AC3_VO:
- write_nic_dword(dev, AC_VO_PARAM, u4bAcParam);
- break;
- default:
- printk(KERN_WARNING "SetHwReg8185():invalid ACI: %d!\n", eACI);
- break;
+ switch (eACI) {
+ case AC1_BK:
+ write_nic_dword(dev, AC_BK_PARAM, u4bAcParam);
+ break;
+ case AC0_BE:
+ write_nic_dword(dev, AC_BE_PARAM, u4bAcParam);
+ break;
+ case AC2_VI:
+ write_nic_dword(dev, AC_VI_PARAM, u4bAcParam);
+ break;
+ case AC3_VO:
+ write_nic_dword(dev, AC_VO_PARAM, u4bAcParam);
+ break;
+ default:
+ printk(KERN_WARNING "SetHwReg8185():invalid ACI: %d!\n", eACI);
+ break;
}
}
}
return;
}
- for(i = 0; i < AC_MAX; i++){
- //AcParam.longData = 0;
- pAcParam = (AC_PARAM * )ac_param;
+ for (i = 0; i < AC_MAX; i++) {
+ /* AcParam.longData = 0; */
+ pAcParam = (AC_PARAM *)ac_param;
{
AC_CODING eACI;
u8 u1bAIFS;
u32 u4bAcParam;
- // Retrive paramters to udpate.
+ /* Retrive paramters to udpate. */
eACI = pAcParam->f.AciAifsn.f.ACI;
- //Mode G/A: slotTimeTimer = 9; Mode B: 20
- u1bAIFS = pAcParam->f.AciAifsn.f.AIFSN * (((mode&IEEE_G) == IEEE_G)?9:20) + aSifsTime;
- u4bAcParam = ( (((u32)(pAcParam->f.TXOPLimit)) << AC_PARAM_TXOP_LIMIT_OFFSET) |
+ /* Mode G/A: slotTimeTimer = 9; Mode B: 20 */
+ u1bAIFS = pAcParam->f.AciAifsn.f.AIFSN * (((mode&IEEE_G) == IEEE_G) ? 9 : 20) + aSifsTime;
+ u4bAcParam = ((((u32)(pAcParam->f.TXOPLimit)) << AC_PARAM_TXOP_LIMIT_OFFSET) |
(((u32)(pAcParam->f.Ecw.f.ECWmax)) << AC_PARAM_ECW_MAX_OFFSET) |
(((u32)(pAcParam->f.Ecw.f.ECWmin)) << AC_PARAM_ECW_MIN_OFFSET) |
(((u32)u1bAIFS) << AC_PARAM_AIFS_OFFSET));
- switch(eACI){
- case AC1_BK:
- write_nic_dword(dev, AC_BK_PARAM, u4bAcParam);
- break;
- case AC0_BE:
- write_nic_dword(dev, AC_BE_PARAM, u4bAcParam);
- break;
- case AC2_VI:
- write_nic_dword(dev, AC_VI_PARAM, u4bAcParam);
- break;
- case AC3_VO:
- write_nic_dword(dev, AC_VO_PARAM, u4bAcParam);
- break;
- default:
- printk(KERN_WARNING "SetHwReg8185(): invalid ACI: %d !\n", eACI);
- break;
+ switch (eACI) {
+ case AC1_BK:
+ write_nic_dword(dev, AC_BK_PARAM, u4bAcParam);
+ break;
+ case AC0_BE:
+ write_nic_dword(dev, AC_BE_PARAM, u4bAcParam);
+ break;
+ case AC2_VI:
+ write_nic_dword(dev, AC_VI_PARAM, u4bAcParam);
+ break;
+ case AC3_VO:
+ write_nic_dword(dev, AC_VO_PARAM, u4bAcParam);
+ break;
+ default:
+ printk(KERN_WARNING "SetHwReg8185(): invalid ACI: %d !\n", eACI);
+ break;
}
}
ac_param += (sizeof(AC_PARAM));
@@ -2434,7 +2438,7 @@ void rtl8180_wmm_param_update(struct work_struct * work)
void rtl8180_tx_irq_wq(struct work_struct *work);
void rtl8180_restart_wq(struct work_struct *work);
-//void rtl8180_rq_tx_ack(struct work_struct *work);
+/* void rtl8180_rq_tx_ack(struct work_struct *work); */
void rtl8180_watch_dog_wq(struct work_struct *work);
void rtl8180_hw_wakeup_wq(struct work_struct *work);
void rtl8180_hw_sleep_wq(struct work_struct *work);
@@ -2450,95 +2454,91 @@ void watch_dog_adaptive(unsigned long data)
return;
}
- // Tx High Power Mechanism.
- if(CheckHighPower((struct net_device *)data))
+ /* Tx High Power Mechanism. */
+ if (CheckHighPower((struct net_device *)data))
queue_work(priv->ieee80211->wq, (void *)&priv->ieee80211->tx_pw_wq);
- // Tx Power Tracking on 87SE.
+ /* Tx Power Tracking on 87SE. */
if (CheckTxPwrTracking((struct net_device *)data))
TxPwrTracking87SE((struct net_device *)data);
- // Perform DIG immediately.
- if(CheckDig((struct net_device *)data) == true)
+ /* Perform DIG immediately. */
+ if (CheckDig((struct net_device *)data) == true)
queue_work(priv->ieee80211->wq, (void *)&priv->ieee80211->hw_dig_wq);
- rtl8180_watch_dog((struct net_device *)data);
+ rtl8180_watch_dog((struct net_device *)data);
queue_work(priv->ieee80211->wq, (void *)&priv->ieee80211->GPIOChangeRFWorkItem);
- priv->watch_dog_timer.expires = jiffies + MSECS(IEEE80211_WATCH_DOG_TIME);
+ priv->watch_dog_timer.expires = jiffies + MSECS(IEEE80211_WATCH_DOG_TIME);
add_timer(&priv->watch_dog_timer);
}
static CHANNEL_LIST ChannelPlan[] = {
- {{1,2,3,4,5,6,7,8,9,10,11,36,40,44,48,52,56,60,64},19}, //FCC
- {{1,2,3,4,5,6,7,8,9,10,11},11}, //IC
- {{1,2,3,4,5,6,7,8,9,10,11,12,13,36,40,44,48,52,56,60,64},21}, //ETSI
- {{1,2,3,4,5,6,7,8,9,10,11,12,13,36,40,44,48,52,56,60,64},21}, //Spain. Change to ETSI.
- {{1,2,3,4,5,6,7,8,9,10,11,12,13,36,40,44,48,52,56,60,64},21}, //France. Change to ETSI.
- {{14,36,40,44,48,52,56,60,64},9}, //MKK
- {{1,2,3,4,5,6,7,8,9,10,11,12,13,14, 36,40,44,48,52,56,60,64},22},//MKK1
- {{1,2,3,4,5,6,7,8,9,10,11,12,13,36,40,44,48,52,56,60,64},21}, //Israel.
- {{1,2,3,4,5,6,7,8,9,10,11,12,13,34,38,42,46},17}, // For 11a , TELEC
- {{1,2,3,4,5,6,7,8,9,10,11,12,13,14},14}, //For Global Domain. 1-11:active scan, 12-14 passive scan. //+YJ, 080626
- {{1,2,3,4,5,6,7,8,9,10,11,12,13},13} //world wide 13: ch1~ch11 active scan, ch12~13 passive //lzm add 080826
+ {{1,2,3,4,5,6,7,8,9,10,11,36,40,44,48,52,56,60,64},19}, /* FCC */
+ {{1,2,3,4,5,6,7,8,9,10,11},11}, /* IC */
+ {{1,2,3,4,5,6,7,8,9,10,11,12,13,36,40,44,48,52,56,60,64},21}, /* ETSI */
+ {{1,2,3,4,5,6,7,8,9,10,11,12,13,36,40,44,48,52,56,60,64},21}, /* Spain. Change to ETSI. */
+ {{1,2,3,4,5,6,7,8,9,10,11,12,13,36,40,44,48,52,56,60,64},21}, /* France. Change to ETSI. */
+ {{14,36,40,44,48,52,56,60,64},9}, /* MKK */
+ {{1,2,3,4,5,6,7,8,9,10,11,12,13,14, 36,40,44,48,52,56,60,64},22},/* MKK1 */
+ {{1,2,3,4,5,6,7,8,9,10,11,12,13,36,40,44,48,52,56,60,64},21}, /* Israel. */
+ {{1,2,3,4,5,6,7,8,9,10,11,12,13,34,38,42,46},17}, /* For 11a , TELEC */
+ {{1,2,3,4,5,6,7,8,9,10,11,12,13,14},14}, /* For Global Domain. 1-11:active scan, 12-14 passive scan. //+YJ, 080626 */
+ {{1,2,3,4,5,6,7,8,9,10,11,12,13},13} /* world wide 13: ch1~ch11 active scan, ch12~13 passive //lzm add 080826 */
};
static void rtl8180_set_channel_map(u8 channel_plan, struct ieee80211_device *ieee)
{
int i;
- //lzm add 080826
- ieee->MinPassiveChnlNum=MAX_CHANNEL_NUMBER+1;
- ieee->IbssStartChnl=0;
-
- switch (channel_plan)
- {
- case COUNTRY_CODE_FCC:
- case COUNTRY_CODE_IC:
- case COUNTRY_CODE_ETSI:
- case COUNTRY_CODE_SPAIN:
- case COUNTRY_CODE_FRANCE:
- case COUNTRY_CODE_MKK:
- case COUNTRY_CODE_MKK1:
- case COUNTRY_CODE_ISRAEL:
- case COUNTRY_CODE_TELEC:
+ /* lzm add 080826 */
+ ieee->MinPassiveChnlNum = MAX_CHANNEL_NUMBER+1;
+ ieee->IbssStartChnl = 0;
+
+ switch (channel_plan) {
+ case COUNTRY_CODE_FCC:
+ case COUNTRY_CODE_IC:
+ case COUNTRY_CODE_ETSI:
+ case COUNTRY_CODE_SPAIN:
+ case COUNTRY_CODE_FRANCE:
+ case COUNTRY_CODE_MKK:
+ case COUNTRY_CODE_MKK1:
+ case COUNTRY_CODE_ISRAEL:
+ case COUNTRY_CODE_TELEC:
{
Dot11d_Init(ieee);
ieee->bGlobalDomain = false;
- if (ChannelPlan[channel_plan].Len != 0){
- // Clear old channel map
+ if (ChannelPlan[channel_plan].Len != 0) {
+ /* Clear old channel map */
memset(GET_DOT11D_INFO(ieee)->channel_map, 0, sizeof(GET_DOT11D_INFO(ieee)->channel_map));
- // Set new channel map
- for (i=0;i<ChannelPlan[channel_plan].Len;i++)
- {
- if(ChannelPlan[channel_plan].Channel[i] <= 14)
+ /* Set new channel map */
+ for (i = 0; i < ChannelPlan[channel_plan].Len; i++) {
+ if (ChannelPlan[channel_plan].Channel[i] <= 14)
GET_DOT11D_INFO(ieee)->channel_map[ChannelPlan[channel_plan].Channel[i]] = 1;
}
}
break;
}
- case COUNTRY_CODE_GLOBAL_DOMAIN:
+ case COUNTRY_CODE_GLOBAL_DOMAIN:
{
GET_DOT11D_INFO(ieee)->bEnabled = 0;
Dot11d_Reset(ieee);
ieee->bGlobalDomain = true;
break;
}
- case COUNTRY_CODE_WORLD_WIDE_13_INDEX://lzm add 080826
+ case COUNTRY_CODE_WORLD_WIDE_13_INDEX:/* lzm add 080826 */
{
- ieee->MinPassiveChnlNum=12;
- ieee->IbssStartChnl= 10;
- break;
+ ieee->MinPassiveChnlNum = 12;
+ ieee->IbssStartChnl = 10;
+ break;
}
- default:
+ default:
{
Dot11d_Init(ieee);
ieee->bGlobalDomain = false;
memset(GET_DOT11D_INFO(ieee)->channel_map, 0, sizeof(GET_DOT11D_INFO(ieee)->channel_map));
- for (i=1;i<=14;i++)
- {
+ for (i = 1; i <= 14; i++)
GET_DOT11D_INFO(ieee)->channel_map[i] = 1;
- }
break;
}
}
@@ -2546,7 +2546,7 @@ static void rtl8180_set_channel_map(u8 channel_plan, struct ieee80211_device *ie
void GPIOChangeRFWorkItemCallBack(struct work_struct *work);
-//YJ,add,080828
+/* YJ,add,080828 */
static void rtl8180_statistics_init(struct Stats *pstats)
{
memset(pstats, 0, sizeof(struct Stats));
@@ -2557,8 +2557,8 @@ static void rtl8180_link_detect_init(plink_detect_t plink_detect)
memset(plink_detect, 0, sizeof(link_detect_t));
plink_detect->SlotNum = DEFAULT_SLOT_NUM;
}
-//YJ,add,080828,end
+/* YJ,add,080828,end */
static void rtl8187se_eeprom_register_read(struct eeprom_93cx6 *eeprom)
{
struct net_device *dev = eeprom->data;
@@ -2607,19 +2607,19 @@ short rtl8180_init(struct net_device *dev)
eeprom_93cx6_read(&eeprom, EEPROM_COUNTRY_CODE>>1, &eeprom_val);
priv->channel_plan = eeprom_val & 0xFF;
- if(priv->channel_plan > COUNTRY_CODE_GLOBAL_DOMAIN){
+ if (priv->channel_plan > COUNTRY_CODE_GLOBAL_DOMAIN) {
printk("rtl8180_init:Error channel plan! Set to default.\n");
priv->channel_plan = 0;
}
- DMESG("Channel plan is %d\n",priv->channel_plan);
+ DMESG("Channel plan is %d\n", priv->channel_plan);
rtl8180_set_channel_map(priv->channel_plan, priv->ieee80211);
- //FIXME: these constants are placed in a bad pleace.
- priv->txbuffsize = 2048;//1024;
- priv->txringcount = 32;//32;
- priv->rxbuffersize = 2048;//1024;
- priv->rxringcount = 64;//32;
+ /* FIXME: these constants are placed in a bad pleace. */
+ priv->txbuffsize = 2048; /* 1024; */
+ priv->txringcount = 32; /* 32; */
+ priv->rxbuffersize = 2048; /* 1024; */
+ priv->rxringcount = 64; /* 32; */
priv->txbeaconcount = 2;
priv->rx_skb_complete = 1;
@@ -2628,7 +2628,7 @@ short rtl8180_init(struct net_device *dev)
priv->RFProgType = 0;
priv->bInHctTest = false;
- priv->irq_enabled=0;
+ priv->irq_enabled = 0;
rtl8180_statistics_init(&priv->stats);
rtl8180_link_detect_init(&priv->link_detect);
@@ -2640,7 +2640,7 @@ short rtl8180_init(struct net_device *dev)
IEEE_SOFTMAC_ASSOCIATE | IEEE_SOFTMAC_PROBERQ |
IEEE_SOFTMAC_PROBERS | IEEE_SOFTMAC_TX_QUEUE;
priv->ieee80211->active_scan = 1;
- priv->ieee80211->rate = 110; //11 mbps
+ priv->ieee80211->rate = 110; /* 11 mbps */
priv->ieee80211->modulation = IEEE80211_CCK_MODULATION;
priv->ieee80211->host_encrypt = 1;
priv->ieee80211->host_decrypt = 1;
@@ -2650,27 +2650,27 @@ short rtl8180_init(struct net_device *dev)
priv->ieee80211->ps_is_queue_empty = rtl8180_is_tx_queue_empty;
priv->hw_wep = hwwep;
- priv->prism_hdr=0;
- priv->dev=dev;
+ priv->prism_hdr = 0;
+ priv->dev = dev;
priv->retry_rts = DEFAULT_RETRY_RTS;
priv->retry_data = DEFAULT_RETRY_DATA;
priv->RFChangeInProgress = false;
priv->SetRFPowerStateInProgress = false;
priv->RFProgType = 0;
priv->bInHctTest = false;
- priv->bInactivePs = true;//false;
+ priv->bInactivePs = true; /* false; */
priv->ieee80211->bInactivePs = priv->bInactivePs;
priv->bSwRfProcessing = false;
priv->eRFPowerState = eRfOff;
priv->RfOffReason = 0;
priv->LedStrategy = SW_LED_MODE0;
- priv->TxPollingTimes = 0;//lzm add 080826
+ priv->TxPollingTimes = 0; /* lzm add 080826 */
priv->bLeisurePs = true;
priv->dot11PowerSaveMode = eActive;
priv->AdMinCheckPeriod = 5;
priv->AdMaxCheckPeriod = 10;
- priv->AdMaxRxSsThreshold = 30;//60->30
- priv->AdRxSsThreshold = 20;//50->20
+ priv->AdMaxRxSsThreshold = 30; /* 60->30 */
+ priv->AdRxSsThreshold = 20; /* 50->20 */
priv->AdCheckPeriod = priv->AdMinCheckPeriod;
priv->AdTickCount = 0;
priv->AdRxSignalStrength = -1;
@@ -2691,15 +2691,15 @@ short rtl8180_init(struct net_device *dev)
priv->bTxPowerTrack = false;
priv->ThermalMeter = 0;
priv->FalseAlarmRegValue = 0;
- priv->RegDigOfdmFaUpTh = 0xc; // Upper threhold of OFDM false alarm, which is used in DIG.
+ priv->RegDigOfdmFaUpTh = 0xc; /* Upper threhold of OFDM false alarm, which is used in DIG. */
priv->DIG_NumberFallbackVote = 0;
priv->DIG_NumberUpgradeVote = 0;
priv->LastSignalStrengthInPercent = 0;
priv->Stats_SignalStrength = 0;
priv->LastRxPktAntenna = 0;
- priv->SignalQuality = 0; // in 0-100 index.
+ priv->SignalQuality = 0; /* in 0-100 index. */
priv->Stats_SignalQuality = 0;
- priv->RecvSignalPower = 0; // in dBm.
+ priv->RecvSignalPower = 0; /* in dBm. */
priv->Stats_RecvSignalPower = 0;
priv->AdMainAntennaRxOkCnt = 0;
priv->AdAuxAntennaRxOkCnt = 0;
@@ -2784,7 +2784,7 @@ short rtl8180_init(struct net_device *dev)
priv->ieee80211->data_hard_stop = rtl8180_data_hard_stop;
priv->ieee80211->data_hard_resume = rtl8180_data_hard_resume;
- priv->ieee80211->init_wmmparam_flag = 0;
+ priv->ieee80211->init_wmmparam_flag = 0;
priv->ieee80211->start_send_beacons = rtl8180_start_tx_beacon;
priv->ieee80211->stop_send_beacons = rtl8180_beacon_tx_disable;
@@ -2829,57 +2829,57 @@ short rtl8180_init(struct net_device *dev)
priv->ieee80211->modulation |= IEEE80211_OFDM_MODULATION;
priv->ieee80211->short_slot = 1;
- // just for sync 85
+ /* just for sync 85 */
priv->enable_gpio0 = 0;
eeprom_93cx6_read(&eeprom, EEPROM_SW_REVD_OFFSET, &eeprom_val);
usValue = eeprom_val;
- DMESG("usValue is 0x%x\n",usValue);
- //3Read AntennaDiversity
+ DMESG("usValue is 0x%x\n", usValue);
+ /* 3Read AntennaDiversity */
- // SW Antenna Diversity.
+ /* SW Antenna Diversity. */
if ((usValue & EEPROM_SW_AD_MASK) != EEPROM_SW_AD_ENABLE)
priv->EEPROMSwAntennaDiversity = false;
else
priv->EEPROMSwAntennaDiversity = true;
- // Default Antenna to use.
+ /* Default Antenna to use. */
if ((usValue & EEPROM_DEF_ANT_MASK) != EEPROM_DEF_ANT_1)
priv->EEPROMDefaultAntenna1 = false;
else
priv->EEPROMDefaultAntenna1 = true;
- if( priv->RegSwAntennaDiversityMechanism == 0 ) // Auto
+ if (priv->RegSwAntennaDiversityMechanism == 0) /* Auto */
/* 0: default from EEPROM. */
priv->bSwAntennaDiverity = priv->EEPROMSwAntennaDiversity;
else
/* 1:disable antenna diversity, 2: enable antenna diversity. */
- priv->bSwAntennaDiverity = ((priv->RegSwAntennaDiversityMechanism == 1)? false : true);
+ priv->bSwAntennaDiverity = ((priv->RegSwAntennaDiversityMechanism == 1) ? false : true);
if (priv->RegDefaultAntenna == 0)
/* 0: default from EEPROM. */
priv->bDefaultAntenna1 = priv->EEPROMDefaultAntenna1;
else
/* 1: main, 2: aux. */
- priv->bDefaultAntenna1 = ((priv->RegDefaultAntenna== 2) ? true : false);
+ priv->bDefaultAntenna1 = ((priv->RegDefaultAntenna == 2) ? true : false);
- /* rtl8185 can calc plcp len in HW.*/
+ /* rtl8185 can calc plcp len in HW. */
priv->hw_plcp_len = 1;
priv->plcp_preamble_mode = 2;
- /*the eeprom type is stored in RCR register bit #6 */
+ /* the eeprom type is stored in RCR register bit #6 */
if (RCR_9356SEL & read_nic_dword(dev, RCR))
- priv->epromtype=EPROM_93c56;
+ priv->epromtype = EPROM_93c56;
else
- priv->epromtype=EPROM_93c46;
+ priv->epromtype = EPROM_93c46;
eeprom_93cx6_multiread(&eeprom, 0x7, (__le16 *)
dev->dev_addr, 3);
- for(i=1,j=0; i<14; i+=2,j++){
+ for (i = 1, j = 0; i < 14; i += 2, j++) {
eeprom_93cx6_read(&eeprom, EPROM_TXPW_CH1_2 + j, &word);
- priv->chtxpwr[i]=word & 0xff;
- priv->chtxpwr[i+1]=(word & 0xff00)>>8;
+ priv->chtxpwr[i] = word & 0xff;
+ priv->chtxpwr[i+1] = (word & 0xff00)>>8;
}
for (i = 1, j = 0; i < 14; i += 2, j++) {
eeprom_93cx6_read(&eeprom, EPROM_TXPW_OFDM_CH1_2 + j, &word);
@@ -2906,7 +2906,7 @@ short rtl8180_init(struct net_device *dev)
priv->ofdm_txpwr_base = (word>>4) & 0xf;
eeprom_93cx6_read(&eeprom, EPROM_VERSION, &version);
- DMESG("EEPROM version %x",version);
+ DMESG("EEPROM version %x", version);
priv->rcr_csense = 3;
eeprom_93cx6_read(&eeprom, ENERGY_TRESHOLD, &eeprom_val);
@@ -2922,43 +2922,43 @@ short rtl8180_init(struct net_device *dev)
priv->rf_set_chan = rtl8225z2_rf_set_chan;
priv->rf_set_sens = NULL;
- if (0!=alloc_rx_desc_ring(dev, priv->rxbuffersize, priv->rxringcount))
+ if (0 != alloc_rx_desc_ring(dev, priv->rxbuffersize, priv->rxringcount))
return -ENOMEM;
- if (0!=alloc_tx_desc_ring(dev, priv->txbuffsize, priv->txringcount,
+ if (0 != alloc_tx_desc_ring(dev, priv->txbuffsize, priv->txringcount,
TX_MANAGEPRIORITY_RING_ADDR))
return -ENOMEM;
- if (0!=alloc_tx_desc_ring(dev, priv->txbuffsize, priv->txringcount,
+ if (0 != alloc_tx_desc_ring(dev, priv->txbuffsize, priv->txringcount,
TX_BKPRIORITY_RING_ADDR))
return -ENOMEM;
- if (0!=alloc_tx_desc_ring(dev, priv->txbuffsize, priv->txringcount,
+ if (0 != alloc_tx_desc_ring(dev, priv->txbuffsize, priv->txringcount,
TX_BEPRIORITY_RING_ADDR))
return -ENOMEM;
- if (0!=alloc_tx_desc_ring(dev, priv->txbuffsize, priv->txringcount,
+ if (0 != alloc_tx_desc_ring(dev, priv->txbuffsize, priv->txringcount,
TX_VIPRIORITY_RING_ADDR))
return -ENOMEM;
- if (0!=alloc_tx_desc_ring(dev, priv->txbuffsize, priv->txringcount,
+ if (0 != alloc_tx_desc_ring(dev, priv->txbuffsize, priv->txringcount,
TX_VOPRIORITY_RING_ADDR))
return -ENOMEM;
- if (0!=alloc_tx_desc_ring(dev, priv->txbuffsize, priv->txringcount,
+ if (0 != alloc_tx_desc_ring(dev, priv->txbuffsize, priv->txringcount,
TX_HIGHPRIORITY_RING_ADDR))
return -ENOMEM;
- if (0!=alloc_tx_desc_ring(dev, priv->txbuffsize, priv->txbeaconcount,
+ if (0 != alloc_tx_desc_ring(dev, priv->txbuffsize, priv->txbeaconcount,
TX_BEACON_RING_ADDR))
return -ENOMEM;
- if(request_irq(dev->irq, (void *)rtl8180_interrupt, IRQF_SHARED, dev->name, dev)){
- DMESGE("Error allocating IRQ %d",dev->irq);
- return -1;
- }else{
- priv->irq=dev->irq;
- DMESG("IRQ %d",dev->irq);
+ if (request_irq(dev->irq, (void *)rtl8180_interrupt, IRQF_SHARED, dev->name, dev)) {
+ DMESGE("Error allocating IRQ %d", dev->irq);
+ return -1;
+ } else {
+ priv->irq = dev->irq;
+ DMESG("IRQ %d", dev->irq);
}
return 0;
@@ -2975,36 +2975,36 @@ void rtl8180_set_hw_wep(struct net_device *dev)
u8 security;
u32 key0_word4;
- pgreg=read_nic_byte(dev, PGSELECT);
- write_nic_byte(dev, PGSELECT, pgreg &~ (1<<PGSELECT_PG_SHIFT));
+ pgreg = read_nic_byte(dev, PGSELECT);
+ write_nic_byte(dev, PGSELECT, pgreg & ~(1<<PGSELECT_PG_SHIFT));
key0_word4 = read_nic_dword(dev, KEY0+4+4+4);
- key0_word4 &= ~ 0xff;
- key0_word4 |= priv->key0[3]& 0xff;
- write_nic_dword(dev,KEY0,(priv->key0[0]));
- write_nic_dword(dev,KEY0+4,(priv->key0[1]));
- write_nic_dword(dev,KEY0+4+4,(priv->key0[2]));
- write_nic_dword(dev,KEY0+4+4+4,(key0_word4));
-
- security = read_nic_byte(dev,SECURITY);
+ key0_word4 &= ~0xff;
+ key0_word4 |= priv->key0[3] & 0xff;
+ write_nic_dword(dev, KEY0, (priv->key0[0]));
+ write_nic_dword(dev, KEY0+4, (priv->key0[1]));
+ write_nic_dword(dev, KEY0+4+4, (priv->key0[2]));
+ write_nic_dword(dev, KEY0+4+4+4, (key0_word4));
+
+ security = read_nic_byte(dev, SECURITY);
security |= (1<<SECURITY_WEP_TX_ENABLE_SHIFT);
security |= (1<<SECURITY_WEP_RX_ENABLE_SHIFT);
- security &= ~ SECURITY_ENCRYP_MASK;
+ security &= ~SECURITY_ENCRYP_MASK;
security |= (SECURITY_ENCRYP_104<<SECURITY_ENCRYP_SHIFT);
write_nic_byte(dev, SECURITY, security);
- DMESG("key %x %x %x %x",read_nic_dword(dev,KEY0+4+4+4),
- read_nic_dword(dev,KEY0+4+4),read_nic_dword(dev,KEY0+4),
- read_nic_dword(dev,KEY0));
+ DMESG("key %x %x %x %x", read_nic_dword(dev, KEY0+4+4+4),
+ read_nic_dword(dev, KEY0+4+4), read_nic_dword(dev, KEY0+4),
+ read_nic_dword(dev, KEY0));
}
void rtl8185_rf_pins_enable(struct net_device *dev)
{
-// u16 tmp;
-// tmp = read_nic_word(dev, RFPinsEnable);
- write_nic_word(dev, RFPinsEnable, 0x1fff);// | tmp);
+ /* u16 tmp; */
+ /* tmp = read_nic_word(dev, RFPinsEnable); */
+ write_nic_word(dev, RFPinsEnable, 0x1fff); /* | tmp); */
}
void rtl8185_set_anaparam2(struct net_device *dev, u32 a)
@@ -3018,7 +3018,7 @@ void rtl8185_set_anaparam2(struct net_device *dev, u32 a)
write_nic_dword(dev, ANAPARAM2, a);
conf3 = read_nic_byte(dev, CONFIG3);
- write_nic_byte(dev, CONFIG3, conf3 &~(1<<CONFIG3_ANAPARAM_W_SHIFT));
+ write_nic_byte(dev, CONFIG3, conf3 & ~(1<<CONFIG3_ANAPARAM_W_SHIFT));
rtl8180_set_mode(dev, EPROM_CMD_NORMAL);
}
@@ -3033,7 +3033,7 @@ void rtl8180_set_anaparam(struct net_device *dev, u32 a)
write_nic_dword(dev, ANAPARAM, a);
conf3 = read_nic_byte(dev, CONFIG3);
- write_nic_byte(dev, CONFIG3, conf3 &~(1<<CONFIG3_ANAPARAM_W_SHIFT));
+ write_nic_byte(dev, CONFIG3, conf3 & ~(1<<CONFIG3_ANAPARAM_W_SHIFT));
rtl8180_set_mode(dev, EPROM_CMD_NORMAL);
}
@@ -3050,27 +3050,27 @@ void rtl8185_write_phy(struct net_device *dev, u8 adr, u32 data)
adr |= 0x80;
- phyw= ((data<<8) | adr);
+ phyw = ((data<<8) | adr);
- // Note that, we must write 0xff7c after 0x7d-0x7f to write BB register.
+ /* Note that, we must write 0xff7c after 0x7d-0x7f to write BB register. */
write_nic_byte(dev, 0x7f, ((phyw & 0xff000000) >> 24));
write_nic_byte(dev, 0x7e, ((phyw & 0x00ff0000) >> 16));
write_nic_byte(dev, 0x7d, ((phyw & 0x0000ff00) >> 8));
- write_nic_byte(dev, 0x7c, ((phyw & 0x000000ff) ));
+ write_nic_byte(dev, 0x7c, ((phyw & 0x000000ff)));
/* this is ok to fail when we write AGC table. check for AGC table might be
* done by masking with 0x7f instead of 0xff
*/
- //if(phyr != (data&0xff)) DMESGW("Phy write timeout %x %x %x", phyr, data,adr);
+ /* if (phyr != (data&0xff)) DMESGW("Phy write timeout %x %x %x", phyr, data, adr); */
}
-inline void write_phy_ofdm (struct net_device *dev, u8 adr, u32 data)
+inline void write_phy_ofdm(struct net_device *dev, u8 adr, u32 data)
{
data = data & 0xff;
rtl8185_write_phy(dev, adr, data);
}
-void write_phy_cck (struct net_device *dev, u8 adr, u32 data)
+void write_phy_cck(struct net_device *dev, u8 adr, u32 data)
{
data = data & 0xff;
rtl8185_write_phy(dev, adr, data | 0x10000);
@@ -3080,19 +3080,19 @@ void rtl8185_set_rate(struct net_device *dev)
{
int i;
u16 word;
- int basic_rate,min_rr_rate,max_rr_rate;
+ int basic_rate, min_rr_rate, max_rr_rate;
basic_rate = ieeerate2rtlrate(240);
min_rr_rate = ieeerate2rtlrate(60);
max_rr_rate = ieeerate2rtlrate(240);
write_nic_byte(dev, RESP_RATE,
- max_rr_rate<<MAX_RESP_RATE_SHIFT| min_rr_rate<<MIN_RESP_RATE_SHIFT);
+ max_rr_rate<<MAX_RESP_RATE_SHIFT | min_rr_rate<<MIN_RESP_RATE_SHIFT);
word = read_nic_word(dev, BRSR);
word &= ~BRSR_MBR_8185;
- for(i=0;i<=basic_rate;i++)
+ for (i = 0; i <= basic_rate; i++)
word |= (1<<i);
write_nic_word(dev, BRSR, word);
@@ -3100,14 +3100,15 @@ void rtl8185_set_rate(struct net_device *dev)
void rtl8180_adapter_start(struct net_device *dev)
{
- struct r8180_priv *priv = ieee80211_priv(dev);
+ struct r8180_priv *priv = ieee80211_priv(dev);
rtl8180_rtx_disable(dev);
rtl8180_reset(dev);
/* enable beacon timeout, beacon TX ok and err
* LP tx ok and err, HP TX ok and err, NP TX ok and err,
- * RX ok and ERR, and GP timer */
+ * RX ok and ERR, and GP timer
+ */
priv->irq_mask = 0x6fcf;
priv->dma_poll_mask = 0;
@@ -3115,8 +3116,8 @@ void rtl8180_adapter_start(struct net_device *dev)
rtl8180_beacon_tx_disable(dev);
rtl8180_set_mode(dev, EPROM_CMD_CONFIG);
- write_nic_dword(dev, MAC0, ((u32*)dev->dev_addr)[0]);
- write_nic_word(dev, MAC4, ((u32*)dev->dev_addr)[1] & 0xffff );
+ write_nic_dword(dev, MAC0, ((u32 *)dev->dev_addr)[0]);
+ write_nic_word(dev, MAC4, ((u32 *)dev->dev_addr)[1] & 0xffff);
rtl8180_set_mode(dev, EPROM_CMD_NORMAL);
rtl8180_update_msr(dev);
@@ -3128,21 +3129,21 @@ void rtl8180_adapter_start(struct net_device *dev)
rtl8180_set_mode(dev, EPROM_CMD_CONFIG);
/*
- The following is very strange. seems to be that 1 means test mode,
- but we need to acknolwledges the nic when a packet is ready
- although we set it to 0
- */
+ * The following is very strange. seems to be that 1 means test mode,
+ * but we need to acknolwledges the nic when a packet is ready
+ * although we set it to 0
+ */
write_nic_byte(dev,
- CONFIG2, read_nic_byte(dev,CONFIG2) &~\
+ CONFIG2, read_nic_byte(dev, CONFIG2) & ~\
(1<<CONFIG2_DMA_POLLING_MODE_SHIFT));
- //^the nic isn't in test mode
+ /* ^the nic isn't in test mode */
write_nic_byte(dev,
- CONFIG2, read_nic_byte(dev,CONFIG2)|(1<<4));
+ CONFIG2, read_nic_byte(dev, CONFIG2)|(1<<4));
- rtl8180_set_mode(dev,EPROM_CMD_NORMAL);
+ rtl8180_set_mode(dev, EPROM_CMD_NORMAL);
- write_nic_dword(dev,INT_TIMEOUT,0);
+ write_nic_dword(dev, INT_TIMEOUT, 0);
write_nic_byte(dev, WPA_CONFIG, 0);
@@ -3153,7 +3154,7 @@ void rtl8180_adapter_start(struct net_device *dev)
write_nic_byte(dev, GP_ENABLE, read_nic_byte(dev, GP_ENABLE) & ~(1<<6));
- /*FIXME cfg 3 ClkRun enable - isn't it ReadOnly ? */
+ /* FIXME cfg 3 ClkRun enable - isn't it ReadOnly ? */
rtl8180_set_mode(dev, EPROM_CMD_CONFIG);
write_nic_byte(dev, CONFIG3, read_nic_byte(dev, CONFIG3)
| (1 << CONFIG3_CLKRUN_SHIFT));
@@ -3161,14 +3162,15 @@ void rtl8180_adapter_start(struct net_device *dev)
priv->rf_init(dev);
- if(priv->rf_set_sens != NULL)
- priv->rf_set_sens(dev,priv->sens);
+ if (priv->rf_set_sens != NULL)
+ priv->rf_set_sens(dev, priv->sens);
rtl8180_irq_enable(dev);
netif_start_queue(dev);
}
-/* this configures registers for beacon tx and enables it via
+/*
+ * This configures registers for beacon tx and enables it via
* rtl8180_beacon_tx_enable(). rtl8180_beacon_tx_disable() might
* be used to stop beacon transmission
*/
@@ -3181,12 +3183,12 @@ void rtl8180_start_tx_beacon(struct net_device *dev)
rtl8180_irq_disable(dev);
rtl8180_beacon_tx_enable(dev);
- word = read_nic_word(dev, AtimWnd) &~ AtimWnd_AtimWnd;
- write_nic_word(dev, AtimWnd,word);// word |=
+ word = read_nic_word(dev, AtimWnd) & ~AtimWnd_AtimWnd;
+ write_nic_word(dev, AtimWnd, word); /* word |= */
word = read_nic_word(dev, BintrItv);
word &= ~BintrItv_BintrItv;
- word |= 1000;/*priv->ieee80211->current_network.beacon_interval *
+ word |= 1000; /* priv->ieee80211->current_network.beacon_interval *
((priv->txbeaconcount > 1)?(priv->txbeaconcount-1):1);
// FIXME: check if correct ^^ worked with 0x3e8;
*/
@@ -3194,7 +3196,7 @@ void rtl8180_start_tx_beacon(struct net_device *dev)
rtl8180_set_mode(dev, EPROM_CMD_NORMAL);
- rtl8185b_irq_enable(dev);
+ rtl8185b_irq_enable(dev);
}
static struct net_device_stats *rtl8180_stats(struct net_device *dev)
@@ -3203,17 +3205,18 @@ static struct net_device_stats *rtl8180_stats(struct net_device *dev)
return &priv->ieee80211->stats;
}
-//
-// Change current and default preamble mode.
-//
+
+/*
+ * Change current and default preamble mode.
+ */
bool
MgntActSet_802_11_PowerSaveMode(
struct r8180_priv *priv,
RT_PS_MODE rtPsMode
)
{
- // Currently, we do not change power save mode on IBSS mode.
- if(priv->ieee80211->iw_mode == IW_MODE_ADHOC)
+ /* Currently, we do not change power save mode on IBSS mode. */
+ if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
return false;
priv->ieee80211->ps = rtPsMode;
@@ -3225,7 +3228,7 @@ void LeisurePSEnter(struct r8180_priv *priv)
{
if (priv->bLeisurePs) {
if (priv->ieee80211->ps == IEEE80211_PS_DISABLED)
- MgntActSet_802_11_PowerSaveMode(priv, IEEE80211_PS_MBCAST|IEEE80211_PS_UNICAST);//IEEE80211_PS_ENABLE
+ MgntActSet_802_11_PowerSaveMode(priv, IEEE80211_PS_MBCAST|IEEE80211_PS_UNICAST); /* IEEE80211_PS_ENABLE */
}
}
@@ -3237,53 +3240,48 @@ void LeisurePSLeave(struct r8180_priv *priv)
}
}
-void rtl8180_hw_wakeup_wq (struct work_struct *work)
+void rtl8180_hw_wakeup_wq(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
- struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,hw_wakeup_wq);
+ struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, hw_wakeup_wq);
struct net_device *dev = ieee->dev;
rtl8180_hw_wakeup(dev);
}
-void rtl8180_hw_sleep_wq (struct work_struct *work)
+void rtl8180_hw_sleep_wq(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
- struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,hw_sleep_wq);
- struct net_device *dev = ieee->dev;
+ struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, hw_sleep_wq);
+ struct net_device *dev = ieee->dev;
- rtl8180_hw_sleep_down(dev);
+ rtl8180_hw_sleep_down(dev);
}
-static void MgntLinkKeepAlive(struct r8180_priv *priv )
+static void MgntLinkKeepAlive(struct r8180_priv *priv)
{
if (priv->keepAliveLevel == 0)
return;
- if(priv->ieee80211->state == IEEE80211_LINKED)
- {
- //
- // Keep-Alive.
- //
+ if (priv->ieee80211->state == IEEE80211_LINKED) {
+ /*
+ * Keep-Alive.
+ */
- if ( (priv->keepAliveLevel== 2) ||
+ if ((priv->keepAliveLevel == 2) ||
(priv->link_detect.LastNumTxUnicast == priv->NumTxUnicast &&
- priv->link_detect.LastNumRxUnicast == priv->ieee80211->NumRxUnicast )
- )
- {
+ priv->link_detect.LastNumRxUnicast == priv->ieee80211->NumRxUnicast)
+ ) {
priv->link_detect.IdleCount++;
- //
- // Send a Keep-Alive packet packet to AP if we had been idle for a while.
- //
- if(priv->link_detect.IdleCount >= ((KEEP_ALIVE_INTERVAL / CHECK_FOR_HANG_PERIOD)-1) )
- {
+ /*
+ * Send a Keep-Alive packet packet to AP if we had been idle for a while.
+ */
+ if (priv->link_detect.IdleCount >= ((KEEP_ALIVE_INTERVAL / CHECK_FOR_HANG_PERIOD)-1)) {
priv->link_detect.IdleCount = 0;
ieee80211_sta_ps_send_null_frame(priv->ieee80211, false);
}
- }
- else
- {
+ } else {
priv->link_detect.IdleCount = 0;
}
priv->link_detect.LastNumTxUnicast = priv->NumTxUnicast;
@@ -3301,46 +3299,45 @@ void rtl8180_watch_dog(struct net_device *dev)
u32 TotalRxNum = 0;
u16 SlotIndex = 0;
u16 i = 0;
- if(priv->ieee80211->actscanning == false){
- if((priv->ieee80211->iw_mode != IW_MODE_ADHOC) && (priv->ieee80211->state == IEEE80211_NOLINK) && (priv->ieee80211->beinretry == false) && (priv->eRFPowerState == eRfOn)){
+ if (priv->ieee80211->actscanning == false) {
+ if ((priv->ieee80211->iw_mode != IW_MODE_ADHOC) && (priv->ieee80211->state == IEEE80211_NOLINK) && (priv->ieee80211->beinretry == false) && (priv->eRFPowerState == eRfOn))
IPSEnter(dev);
- }
}
- //YJ,add,080828,for link state check
- if((priv->ieee80211->state == IEEE80211_LINKED) && (priv->ieee80211->iw_mode == IW_MODE_INFRA)){
+ /* YJ,add,080828,for link state check */
+ if ((priv->ieee80211->state == IEEE80211_LINKED) && (priv->ieee80211->iw_mode == IW_MODE_INFRA)) {
SlotIndex = (priv->link_detect.SlotIndex++) % priv->link_detect.SlotNum;
priv->link_detect.RxFrameNum[SlotIndex] = priv->ieee80211->NumRxDataInPeriod + priv->ieee80211->NumRxBcnInPeriod;
- for( i=0; i<priv->link_detect.SlotNum; i++ )
- TotalRxNum+= priv->link_detect.RxFrameNum[i];
+ for (i = 0; i < priv->link_detect.SlotNum; i++)
+ TotalRxNum += priv->link_detect.RxFrameNum[i];
- if(TotalRxNum == 0){
+ if (TotalRxNum == 0) {
priv->ieee80211->state = IEEE80211_ASSOCIATING;
queue_work(priv->ieee80211->wq, &priv->ieee80211->associate_procedure_wq);
}
}
- //YJ,add,080828,for KeepAlive
+ /* YJ,add,080828,for KeepAlive */
MgntLinkKeepAlive(priv);
- //YJ,add,080828,for LPS
+ /* YJ,add,080828,for LPS */
if (priv->PowerProfile == POWER_PROFILE_BATTERY)
priv->bLeisurePs = true;
else if (priv->PowerProfile == POWER_PROFILE_AC) {
LeisurePSLeave(priv);
- priv->bLeisurePs= false;
+ priv->bLeisurePs = false;
}
- if(priv->ieee80211->state == IEEE80211_LINKED){
+ if (priv->ieee80211->state == IEEE80211_LINKED) {
priv->link_detect.NumRxOkInPeriod = priv->ieee80211->NumRxDataInPeriod;
- if( priv->link_detect.NumRxOkInPeriod> 666 ||
- priv->link_detect.NumTxOkInPeriod> 666 ) {
+ if (priv->link_detect.NumRxOkInPeriod > 666 ||
+ priv->link_detect.NumTxOkInPeriod > 666) {
bBusyTraffic = true;
}
- if(((priv->link_detect.NumRxOkInPeriod + priv->link_detect.NumTxOkInPeriod) > 8)
+ if (((priv->link_detect.NumRxOkInPeriod + priv->link_detect.NumTxOkInPeriod) > 8)
|| (priv->link_detect.NumRxOkInPeriod > 2)) {
- bEnterPS= false;
+ bEnterPS = false;
} else
- bEnterPS= true;
+ bEnterPS = true;
if (bEnterPS)
LeisurePSEnter(priv);
@@ -3359,19 +3356,19 @@ int _rtl8180_up(struct net_device *dev)
{
struct r8180_priv *priv = ieee80211_priv(dev);
- priv->up=1;
+ priv->up = 1;
DMESG("Bringing up iface");
rtl8185b_adapter_start(dev);
rtl8185b_rx_enable(dev);
rtl8185b_tx_enable(dev);
- if(priv->bInactivePs){
- if(priv->ieee80211->iw_mode == IW_MODE_ADHOC)
+ if (priv->bInactivePs) {
+ if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
IPSLeave(dev);
}
- timer_rate_adaptive((unsigned long)dev);
+ timer_rate_adaptive((unsigned long)dev);
watch_dog_adaptive((unsigned long)dev);
- if(priv->bSwAntennaDiverity)
+ if (priv->bSwAntennaDiverity)
SwAntennaDiversityTimerCallback(dev);
ieee80211_softmac_start_protocol(priv->ieee80211);
return 0;
@@ -3392,7 +3389,8 @@ int rtl8180_up(struct net_device *dev)
{
struct r8180_priv *priv = ieee80211_priv(dev);
- if (priv->up == 1) return -1;
+ if (priv->up == 1)
+ return -1;
return _rtl8180_up(dev);
}
@@ -3416,7 +3414,7 @@ int rtl8180_down(struct net_device *dev)
if (priv->up == 0)
return -1;
- priv->up=0;
+ priv->up = 0;
ieee80211_softmac_stop_protocol(priv->ieee80211);
/* FIXME */
@@ -3432,8 +3430,8 @@ int rtl8180_down(struct net_device *dev)
cancel_delayed_work(&priv->ieee80211->hw_dig_wq);
cancel_delayed_work(&priv->ieee80211->tx_pw_wq);
del_timer_sync(&priv->SwAntennaDiversityTimer);
- SetZebraRFPowerState8185(dev,eRfOff);
- memset(&(priv->ieee80211->current_network),0,sizeof(struct ieee80211_network));
+ SetZebraRFPowerState8185(dev, eRfOff);
+ memset(&(priv->ieee80211->current_network), 0, sizeof(struct ieee80211_network));
priv->ieee80211->state = IEEE80211_NOLINK;
return 0;
}
@@ -3483,7 +3481,7 @@ static void r8180_set_multicast(struct net_device *dev)
struct r8180_priv *priv = ieee80211_priv(dev);
short promisc;
- promisc = (dev->flags & IFF_PROMISC) ? 1:0;
+ promisc = (dev->flags & IFF_PROMISC) ? 1 : 0;
if (promisc != priv->promisc)
rtl8180_restart(dev);
@@ -3500,7 +3498,7 @@ int r8180_set_mac_adr(struct net_device *dev, void *mac)
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
- if(priv->ieee80211->iw_mode == IW_MODE_MASTER)
+ if (priv->ieee80211->iw_mode == IW_MODE_MASTER)
memcpy(priv->ieee80211->current_network.bssid, dev->dev_addr, ETH_ALEN);
if (priv->up) {
@@ -3518,7 +3516,7 @@ int rtl8180_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
struct iwreq *wrq = (struct iwreq *) rq;
- int ret=-1;
+ int ret = -1;
switch (cmd) {
case RTL_IOCTL_WPA_SUPPLICANT:
@@ -3549,21 +3547,21 @@ static int __devinit rtl8180_pci_probe(struct pci_dev *pdev,
{
unsigned long ioaddr = 0;
struct net_device *dev = NULL;
- struct r8180_priv *priv= NULL;
+ struct r8180_priv *priv = NULL;
u8 unit = 0;
unsigned long pmem_start, pmem_len, pmem_flags;
DMESG("Configuring chip resources");
- if( pci_enable_device (pdev) ){
+ if (pci_enable_device(pdev)) {
DMESG("Failed to enable PCI device");
return -EIO;
}
pci_set_master(pdev);
pci_set_dma_mask(pdev, 0xffffff00ULL);
- pci_set_consistent_dma_mask(pdev,0xffffff00ULL);
+ pci_set_consistent_dma_mask(pdev, 0xffffff00ULL);
dev = alloc_ieee80211(sizeof(struct r8180_priv));
if (!dev)
return -ENOMEM;
@@ -3578,26 +3576,26 @@ static int __devinit rtl8180_pci_probe(struct pci_dev *pdev,
pmem_start = pci_resource_start(pdev, 1);
pmem_len = pci_resource_len(pdev, 1);
- pmem_flags = pci_resource_flags (pdev, 1);
+ pmem_flags = pci_resource_flags(pdev, 1);
if (!(pmem_flags & IORESOURCE_MEM)) {
DMESG("region #1 not a MMIO resource, aborting");
goto fail;
}
- if( ! request_mem_region(pmem_start, pmem_len, RTL8180_MODULE_NAME)) {
+ if (!request_mem_region(pmem_start, pmem_len, RTL8180_MODULE_NAME)) {
DMESG("request_mem_region failed!");
goto fail;
}
- ioaddr = (unsigned long)ioremap_nocache( pmem_start, pmem_len);
- if( ioaddr == (unsigned long)NULL ){
+ ioaddr = (unsigned long)ioremap_nocache(pmem_start, pmem_len);
+ if (ioaddr == (unsigned long)NULL) {
DMESG("ioremap failed!");
goto fail1;
}
- dev->mem_start = ioaddr; // shared mem start
- dev->mem_end = ioaddr + pci_resource_len(pdev, 0); // shared mem end
+ dev->mem_start = ioaddr; /* shared mem start */
+ dev->mem_end = ioaddr + pci_resource_len(pdev, 0); /* shared mem end */
pci_read_config_byte(pdev, 0x05, &unit);
pci_write_config_byte(pdev, 0x05, unit & (~0x04));
@@ -3608,16 +3606,16 @@ static int __devinit rtl8180_pci_probe(struct pci_dev *pdev,
dev->netdev_ops = &rtl8180_netdev_ops;
dev->wireless_handlers = &r8180_wx_handlers_def;
- dev->type=ARPHRD_ETHER;
+ dev->type = ARPHRD_ETHER;
dev->watchdog_timeo = HZ*3;
- if (dev_alloc_name(dev, ifname) < 0){
- DMESG("Oops: devname already taken! Trying wlan%%d...\n");
+ if (dev_alloc_name(dev, ifname) < 0) {
+ DMESG("Oops: devname already taken! Trying wlan%%d...\n");
ifname = "wlan%d";
dev_alloc_name(dev, ifname);
- }
+ }
- if(rtl8180_init(dev)!=0){
+ if (rtl8180_init(dev) != 0) {
DMESG("Initialization failed");
goto fail1;
}
@@ -3631,16 +3629,16 @@ static int __devinit rtl8180_pci_probe(struct pci_dev *pdev,
DMESG("Driver probe completed\n");
return 0;
fail1:
- if( dev->mem_start != (unsigned long)NULL ){
- iounmap( (void *)dev->mem_start );
- release_mem_region( pci_resource_start(pdev, 1),
- pci_resource_len(pdev, 1) );
+ if (dev->mem_start != (unsigned long)NULL) {
+ iounmap((void *)dev->mem_start);
+ release_mem_region(pci_resource_start(pdev, 1),
+ pci_resource_len(pdev, 1));
}
fail:
- if(dev){
+ if (dev) {
if (priv->irq) {
free_irq(dev->irq, dev);
- dev->irq=0;
+ dev->irq = 0;
}
free_ieee80211(dev);
}
@@ -3668,19 +3666,19 @@ static void __devexit rtl8180_pci_remove(struct pci_dev *pdev)
rtl8180_reset(dev);
mdelay(10);
- if(priv->irq){
- DMESG("Freeing irq %d",dev->irq);
+ if (priv->irq) {
+ DMESG("Freeing irq %d", dev->irq);
free_irq(dev->irq, dev);
- priv->irq=0;
+ priv->irq = 0;
}
free_rx_desc_ring(dev);
free_tx_desc_rings(dev);
- if( dev->mem_start != (unsigned long)NULL ){
- iounmap( (void *)dev->mem_start );
- release_mem_region( pci_resource_start(pdev, 1),
- pci_resource_len(pdev, 1) );
+ if (dev->mem_start != (unsigned long)NULL) {
+ iounmap((void *)dev->mem_start);
+ release_mem_region(pci_resource_start(pdev, 1),
+ pci_resource_len(pdev, 1));
}
free_ieee80211(dev);
@@ -3740,7 +3738,7 @@ static int __init rtl8180_pci_module_init(void)
static void __exit rtl8180_pci_module_exit(void)
{
- pci_unregister_driver (&rtl8180_pci_driver);
+ pci_unregister_driver(&rtl8180_pci_driver);
rtl8180_proc_module_remove();
ieee80211_crypto_tkip_exit();
ieee80211_crypto_ccmp_exit();
@@ -3755,109 +3753,111 @@ void rtl8180_try_wake_queue(struct net_device *dev, int pri)
short enough_desc;
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
- spin_lock_irqsave(&priv->tx_lock,flags);
- enough_desc = check_nic_enought_desc(dev,pri);
- spin_unlock_irqrestore(&priv->tx_lock,flags);
+ spin_lock_irqsave(&priv->tx_lock, flags);
+ enough_desc = check_nic_enought_desc(dev, pri);
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
- if(enough_desc)
+ if (enough_desc)
ieee80211_rtl_wake_queue(priv->ieee80211);
}
-void rtl8180_tx_isr(struct net_device *dev, int pri,short error)
+void rtl8180_tx_isr(struct net_device *dev, int pri, short error)
{
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
- u32 *tail; //tail virtual addr
- u32 *head; //head virtual addr
- u32 *begin;//start of ring virtual addr
- u32 *nicv; //nic pointer virtual addr
- u32 nic; //nic pointer physical addr
- u32 nicbegin;// start of ring physical addr
+ u32 *tail; /* tail virtual addr */
+ u32 *head; /* head virtual addr */
+ u32 *begin; /* start of ring virtual addr */
+ u32 *nicv; /* nic pointer virtual addr */
+ u32 nic; /* nic pointer physical addr */
+ u32 nicbegin; /* start of ring physical addr */
unsigned long flag;
- /* physical addr are ok on 32 bits since we set DMA mask*/
+ /* physical addr are ok on 32 bits since we set DMA mask */
int offs;
- int j,i;
+ int j, i;
int hd;
- if (error) priv->stats.txretry++; //tony 20060601
- spin_lock_irqsave(&priv->tx_lock,flag);
- switch(pri) {
+ if (error)
+ priv->stats.txretry++; /* tony 20060601 */
+ spin_lock_irqsave(&priv->tx_lock, flag);
+ switch (pri) {
case MANAGE_PRIORITY:
tail = priv->txmapringtail;
begin = priv->txmapring;
head = priv->txmapringhead;
- nic = read_nic_dword(dev,TX_MANAGEPRIORITY_RING_ADDR);
+ nic = read_nic_dword(dev, TX_MANAGEPRIORITY_RING_ADDR);
nicbegin = priv->txmapringdma;
break;
case BK_PRIORITY:
tail = priv->txbkpringtail;
begin = priv->txbkpring;
head = priv->txbkpringhead;
- nic = read_nic_dword(dev,TX_BKPRIORITY_RING_ADDR);
+ nic = read_nic_dword(dev, TX_BKPRIORITY_RING_ADDR);
nicbegin = priv->txbkpringdma;
break;
case BE_PRIORITY:
tail = priv->txbepringtail;
begin = priv->txbepring;
head = priv->txbepringhead;
- nic = read_nic_dword(dev,TX_BEPRIORITY_RING_ADDR);
+ nic = read_nic_dword(dev, TX_BEPRIORITY_RING_ADDR);
nicbegin = priv->txbepringdma;
break;
case VI_PRIORITY:
tail = priv->txvipringtail;
begin = priv->txvipring;
head = priv->txvipringhead;
- nic = read_nic_dword(dev,TX_VIPRIORITY_RING_ADDR);
+ nic = read_nic_dword(dev, TX_VIPRIORITY_RING_ADDR);
nicbegin = priv->txvipringdma;
break;
case VO_PRIORITY:
tail = priv->txvopringtail;
begin = priv->txvopring;
head = priv->txvopringhead;
- nic = read_nic_dword(dev,TX_VOPRIORITY_RING_ADDR);
+ nic = read_nic_dword(dev, TX_VOPRIORITY_RING_ADDR);
nicbegin = priv->txvopringdma;
break;
case HI_PRIORITY:
tail = priv->txhpringtail;
begin = priv->txhpring;
head = priv->txhpringhead;
- nic = read_nic_dword(dev,TX_HIGHPRIORITY_RING_ADDR);
+ nic = read_nic_dword(dev, TX_HIGHPRIORITY_RING_ADDR);
nicbegin = priv->txhpringdma;
break;
default:
- spin_unlock_irqrestore(&priv->tx_lock,flag);
+ spin_unlock_irqrestore(&priv->tx_lock, flag);
return ;
}
- nicv = (u32*) ((nic - nicbegin) + (u8*)begin);
- if((head <= tail && (nicv > tail || nicv < head)) ||
- (head > tail && (nicv > tail && nicv < head))){
+ nicv = (u32 *)((nic - nicbegin) + (u8*)begin);
+ if ((head <= tail && (nicv > tail || nicv < head)) ||
+ (head > tail && (nicv > tail && nicv < head))) {
DMESGW("nic has lost pointer");
- spin_unlock_irqrestore(&priv->tx_lock,flag);
+ spin_unlock_irqrestore(&priv->tx_lock, flag);
rtl8180_restart(dev);
return;
}
- /* we check all the descriptors between the head and the nic,
+ /*
+ * We check all the descriptors between the head and the nic,
* but not the currently pointed by the nic (the next to be txed)
* and the previous of the pointed (might be in process ??)
- */
+ */
offs = (nic - nicbegin);
- offs = offs / 8 /4;
- hd = (head - begin) /8;
+ offs = offs / 8 / 4;
+ hd = (head - begin) / 8;
- if(offs >= hd)
+ if (offs >= hd)
j = offs - hd;
else
- j = offs + (priv->txringcount -1 -hd);
+ j = offs + (priv->txringcount-1-hd);
- j-=2;
- if(j<0) j=0;
+ j -= 2;
+ if (j < 0)
+ j = 0;
- for(i=0;i<j;i++)
- {
- if((*head) & (1<<31))
+ for (i = 0; i < j; i++) {
+ if ((*head) & (1<<31))
break;
- if(((*head)&(0x10000000)) != 0){
+ if (((*head)&(0x10000000)) != 0) {
priv->CurrRetryCnt += (u16)((*head) & (0x000000ff));
if (!error)
priv->NumTxOkTotal++;
@@ -3866,15 +3866,16 @@ void rtl8180_tx_isr(struct net_device *dev, int pri,short error)
if (!error)
priv->NumTxOkBytesTotal += (*(head+3)) & (0x00000fff);
- *head = *head &~ (1<<31);
+ *head = *head & ~(1<<31);
- if((head - begin)/8 == priv->txringcount-1)
- head=begin;
+ if ((head - begin)/8 == priv->txringcount-1)
+ head = begin;
else
- head+=8;
+ head += 8;
}
- /* the head has been moved to the last certainly TXed
+ /*
+ * The head has been moved to the last certainly TXed
* (or at least processed by the nic) packet.
* The driver take forcefully owning of all these packets
* If the packet previous of the nic pointer has been
@@ -3883,14 +3884,14 @@ void rtl8180_tx_isr(struct net_device *dev, int pri,short error)
* TXed no memory leak occour at all.
*/
- switch(pri) {
+ switch (pri) {
case MANAGE_PRIORITY:
priv->txmapringhead = head;
- if(priv->ack_tx_to_ieee){
- if(rtl8180_is_tx_queue_empty(dev)){
+ if (priv->ack_tx_to_ieee) {
+ if (rtl8180_is_tx_queue_empty(dev)) {
priv->ack_tx_to_ieee = 0;
- ieee80211_ps_tx_ack(priv->ieee80211,!error);
+ ieee80211_ps_tx_ack(priv->ieee80211, !error);
}
}
break;
@@ -3911,17 +3912,17 @@ void rtl8180_tx_isr(struct net_device *dev, int pri,short error)
break;
}
- spin_unlock_irqrestore(&priv->tx_lock,flag);
+ spin_unlock_irqrestore(&priv->tx_lock, flag);
}
void rtl8180_tx_irq_wq(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
- struct ieee80211_device * ieee = (struct ieee80211_device*)
- container_of(dwork, struct ieee80211_device, watch_dog_wq);
+ struct ieee80211_device * ieee = (struct ieee80211_device *)
+ container_of(dwork, struct ieee80211_device, watch_dog_wq);
struct net_device *dev = ieee->dev;
- rtl8180_tx_isr(dev,MANAGE_PRIORITY,0);
+ rtl8180_tx_isr(dev, MANAGE_PRIORITY, 0);
}
irqreturn_t rtl8180_interrupt(int irq, void *netdev, struct pt_regs *regs)
{
@@ -3931,23 +3932,24 @@ irqreturn_t rtl8180_interrupt(int irq, void *netdev, struct pt_regs *regs)
u32 inta;
/* We should return IRQ_NONE, but for now let me keep this */
- if(priv->irq_enabled == 0) return IRQ_HANDLED;
+ if (priv->irq_enabled == 0)
+ return IRQ_HANDLED;
- spin_lock_irqsave(&priv->irq_th_lock,flags);
+ spin_lock_irqsave(&priv->irq_th_lock, flags);
- //ISR: 4bytes
- inta = read_nic_dword(dev, ISR);// & priv->IntrMask;
- write_nic_dword(dev,ISR,inta); // reset int situation
+ /* ISR: 4bytes */
+ inta = read_nic_dword(dev, ISR); /* & priv->IntrMask; */
+ write_nic_dword(dev, ISR, inta); /* reset int situation */
priv->stats.shints++;
- if(!inta){
- spin_unlock_irqrestore(&priv->irq_th_lock,flags);
+ if (!inta) {
+ spin_unlock_irqrestore(&priv->irq_th_lock, flags);
return IRQ_HANDLED;
/*
- most probably we can safely return IRQ_NONE,
- but for now is better to avoid problems
- */
+ * most probably we can safely return IRQ_NONE,
+ * but for now is better to avoid problems
+ */
}
if (inta == 0xffff) {
@@ -3958,8 +3960,8 @@ irqreturn_t rtl8180_interrupt(int irq, void *netdev, struct pt_regs *regs)
priv->stats.ints++;
- if(!netif_running(dev)) {
- spin_unlock_irqrestore(&priv->irq_th_lock,flags);
+ if (!netif_running(dev)) {
+ spin_unlock_irqrestore(&priv->irq_th_lock, flags);
return IRQ_HANDLED;
}
@@ -3973,70 +3975,70 @@ irqreturn_t rtl8180_interrupt(int irq, void *netdev, struct pt_regs *regs)
priv->stats.txbeaconerr++;
if (inta & IMR_TMGDOK)
- rtl8180_tx_isr(dev,MANAGE_PRIORITY,0);
+ rtl8180_tx_isr(dev, MANAGE_PRIORITY, 0);
- if(inta & ISR_THPDER){
+ if (inta & ISR_THPDER) {
priv->stats.txhperr++;
- rtl8180_tx_isr(dev,HI_PRIORITY,1);
+ rtl8180_tx_isr(dev, HI_PRIORITY, 1);
priv->ieee80211->stats.tx_errors++;
}
- if(inta & ISR_THPDOK){ //High priority tx ok
- priv->link_detect.NumTxOkInPeriod++; //YJ,add,080828
+ if (inta & ISR_THPDOK) { /* High priority tx ok */
+ priv->link_detect.NumTxOkInPeriod++; /* YJ,add,080828 */
priv->stats.txhpokint++;
- rtl8180_tx_isr(dev,HI_PRIORITY,0);
+ rtl8180_tx_isr(dev, HI_PRIORITY, 0);
}
- if(inta & ISR_RER) {
+ if (inta & ISR_RER)
priv->stats.rxerr++;
- }
- if(inta & ISR_TBKDER){ //corresponding to BK_PRIORITY
+
+ if (inta & ISR_TBKDER) { /* corresponding to BK_PRIORITY */
priv->stats.txbkperr++;
priv->ieee80211->stats.tx_errors++;
- rtl8180_tx_isr(dev,BK_PRIORITY,1);
+ rtl8180_tx_isr(dev, BK_PRIORITY, 1);
rtl8180_try_wake_queue(dev, BE_PRIORITY);
}
- if(inta & ISR_TBEDER){ //corresponding to BE_PRIORITY
+ if (inta & ISR_TBEDER) { /* corresponding to BE_PRIORITY */
priv->stats.txbeperr++;
priv->ieee80211->stats.tx_errors++;
- rtl8180_tx_isr(dev,BE_PRIORITY,1);
+ rtl8180_tx_isr(dev, BE_PRIORITY, 1);
rtl8180_try_wake_queue(dev, BE_PRIORITY);
}
- if(inta & ISR_TNPDER){ //corresponding to VO_PRIORITY
+ if (inta & ISR_TNPDER) { /* corresponding to VO_PRIORITY */
priv->stats.txnperr++;
priv->ieee80211->stats.tx_errors++;
- rtl8180_tx_isr(dev,NORM_PRIORITY,1);
+ rtl8180_tx_isr(dev, NORM_PRIORITY, 1);
rtl8180_try_wake_queue(dev, NORM_PRIORITY);
}
- if(inta & ISR_TLPDER){ //corresponding to VI_PRIORITY
+ if (inta & ISR_TLPDER) { /* corresponding to VI_PRIORITY */
priv->stats.txlperr++;
priv->ieee80211->stats.tx_errors++;
- rtl8180_tx_isr(dev,LOW_PRIORITY,1);
+ rtl8180_tx_isr(dev, LOW_PRIORITY, 1);
rtl8180_try_wake_queue(dev, LOW_PRIORITY);
}
- if(inta & ISR_ROK){
+ if (inta & ISR_ROK) {
priv->stats.rxint++;
tasklet_schedule(&priv->irq_rx_tasklet);
}
- if(inta & ISR_RQoSOK ){
+ if (inta & ISR_RQoSOK) {
priv->stats.rxint++;
tasklet_schedule(&priv->irq_rx_tasklet);
}
- if(inta & ISR_BcnInt) {
+
+ if (inta & ISR_BcnInt)
rtl8180_prepare_beacon(dev);
- }
- if(inta & ISR_RDU){
+ if (inta & ISR_RDU) {
DMESGW("No RX descriptor available");
priv->stats.rxrdu++;
tasklet_schedule(&priv->irq_rx_tasklet);
}
- if(inta & ISR_RXFOVW){
+ if (inta & ISR_RXFOVW) {
priv->stats.rxoverflow++;
tasklet_schedule(&priv->irq_rx_tasklet);
}
@@ -4044,39 +4046,39 @@ irqreturn_t rtl8180_interrupt(int irq, void *netdev, struct pt_regs *regs)
if (inta & ISR_TXFOVW)
priv->stats.txoverflow++;
- if(inta & ISR_TNPDOK){ //Normal priority tx ok
- priv->link_detect.NumTxOkInPeriod++; //YJ,add,080828
+ if (inta & ISR_TNPDOK) { /* Normal priority tx ok */
+ priv->link_detect.NumTxOkInPeriod++; /* YJ,add,080828 */
priv->stats.txnpokint++;
- rtl8180_tx_isr(dev,NORM_PRIORITY,0);
+ rtl8180_tx_isr(dev, NORM_PRIORITY, 0);
}
- if(inta & ISR_TLPDOK){ //Low priority tx ok
- priv->link_detect.NumTxOkInPeriod++; //YJ,add,080828
+ if (inta & ISR_TLPDOK) { /* Low priority tx ok */
+ priv->link_detect.NumTxOkInPeriod++; /* YJ,add,080828 */
priv->stats.txlpokint++;
- rtl8180_tx_isr(dev,LOW_PRIORITY,0);
+ rtl8180_tx_isr(dev, LOW_PRIORITY, 0);
rtl8180_try_wake_queue(dev, LOW_PRIORITY);
}
- if(inta & ISR_TBKDOK){ //corresponding to BK_PRIORITY
+ if (inta & ISR_TBKDOK) { /* corresponding to BK_PRIORITY */
priv->stats.txbkpokint++;
- priv->link_detect.NumTxOkInPeriod++; //YJ,add,080828
- rtl8180_tx_isr(dev,BK_PRIORITY,0);
+ priv->link_detect.NumTxOkInPeriod++; /* YJ,add,080828 */
+ rtl8180_tx_isr(dev, BK_PRIORITY, 0);
rtl8180_try_wake_queue(dev, BE_PRIORITY);
}
- if(inta & ISR_TBEDOK){ //corresponding to BE_PRIORITY
+ if (inta & ISR_TBEDOK) { /* corresponding to BE_PRIORITY */
priv->stats.txbeperr++;
- priv->link_detect.NumTxOkInPeriod++; //YJ,add,080828
- rtl8180_tx_isr(dev,BE_PRIORITY,0);
+ priv->link_detect.NumTxOkInPeriod++; /* YJ,add,080828 */
+ rtl8180_tx_isr(dev, BE_PRIORITY, 0);
rtl8180_try_wake_queue(dev, BE_PRIORITY);
}
force_pci_posting(dev);
- spin_unlock_irqrestore(&priv->irq_th_lock,flags);
+ spin_unlock_irqrestore(&priv->irq_th_lock, flags);
return IRQ_HANDLED;
}
-void rtl8180_irq_rx_tasklet(struct r8180_priv* priv)
+void rtl8180_irq_rx_tasklet(struct r8180_priv *priv)
{
rtl8180_rx(priv->dev);
}
@@ -4089,14 +4091,14 @@ void GPIOChangeRFWorkItemCallBack(struct work_struct *work)
u8 btPSR;
u8 btConfig0;
RT_RF_POWER_STATE eRfPowerStateToSet;
- bool bActuallySet=false;
+ bool bActuallySet = false;
char *argv[3];
- static char *RadioPowerPath = "/etc/acpi/events/RadioPower.sh";
- static char *envp[] = {"HOME=/", "TERM=linux", "PATH=/usr/bin:/bin", NULL};
+ static char *RadioPowerPath = "/etc/acpi/events/RadioPower.sh";
+ static char *envp[] = {"HOME=/", "TERM=linux", "PATH=/usr/bin:/bin", NULL};
static int readf_count = 0;
- if(readf_count % 10 == 0)
+ if (readf_count % 10 == 0)
priv->PowerProfile = read_acadapter_file("/proc/acpi/ac_adapter/AC0/state");
readf_count = (readf_count+1)%0xffff;
diff --git a/drivers/staging/rtl8187se/r8180_rtl8225z2.c b/drivers/staging/rtl8187se/r8180_rtl8225z2.c
index 6edf5a46fa4..2a2afd51cf4 100644
--- a/drivers/staging/rtl8187se/r8180_rtl8225z2.c
+++ b/drivers/staging/rtl8187se/r8180_rtl8225z2.c
@@ -1,14 +1,13 @@
/*
- This is part of the rtl8180-sa2400 driver
- released under the GPL (See file COPYING for details).
- Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it>
-
- This files contains programming code for the rtl8225
- radio frontend.
-
- *Many* thanks to Realtek Corp. for their great support!
-
-*/
+ * This is part of the rtl8180-sa2400 driver
+ * released under the GPL (See file COPYING for details).
+ * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it>
+ *
+ * This files contains programming code for the rtl8225
+ * radio frontend.
+ *
+ * *Many* thanks to Realtek Corp. for their great support!
+ */
#include "r8180_hw.h"
#include "r8180_rtl8225.h"
@@ -225,7 +224,7 @@ static void rtl8225_SetTXPowerLevel(struct net_device *dev, short ch)
}
static const u8 rtl8225z2_threshold[] = {
- 0x8d, 0x8d, 0x8d, 0x8d, 0x9d, 0xad, 0xbd,
+ 0x8d, 0x8d, 0x8d, 0x8d, 0x9d, 0xad, 0xbd,
};
static const u8 rtl8225z2_gain_bg[] = {
@@ -307,7 +306,7 @@ static u32 read_rtl8225(struct net_device *dev, u8 adr)
u32 data2Write = ((u32)(adr & 0x1f)) << 27;
u32 dataRead;
u32 mask;
- u16 oval,oval2,oval3,tmp;
+ u16 oval, oval2, oval3, tmp;
int i;
short bit, rw;
u8 wLength = 6;
@@ -325,9 +324,11 @@ static u32 read_rtl8225(struct net_device *dev, u8 adr)
oval &= ~0xf;
- write_nic_word(dev, RFPinsOutput, oval | BB_HOST_BANG_EN ); udelay(4);
+ write_nic_word(dev, RFPinsOutput, oval | BB_HOST_BANG_EN);
+ udelay(4);
- write_nic_word(dev, RFPinsOutput, oval ); udelay(5);
+ write_nic_word(dev, RFPinsOutput, oval);
+ udelay(5);
rw = 0;
@@ -335,31 +336,45 @@ static u32 read_rtl8225(struct net_device *dev, u8 adr)
for (i = 0; i < wLength/2; i++) {
bit = ((data2Write&mask) != 0) ? 1 : 0;
- write_nic_word(dev, RFPinsOutput, bit|oval | rw); udelay(1);
+ write_nic_word(dev, RFPinsOutput, bit | oval | rw);
+ udelay(1);
- write_nic_word(dev, RFPinsOutput, bit|oval | BB_HOST_BANG_CLK | rw); udelay(2);
- write_nic_word(dev, RFPinsOutput, bit|oval | BB_HOST_BANG_CLK | rw); udelay(2);
+ write_nic_word(dev, RFPinsOutput,
+ bit | oval | BB_HOST_BANG_CLK | rw);
+ udelay(2);
+ write_nic_word(dev, RFPinsOutput,
+ bit | oval | BB_HOST_BANG_CLK | rw);
+ udelay(2);
- mask = (low2high) ? (mask<<1): (mask>>1);
+ mask = (low2high) ? (mask<<1) : (mask>>1);
if (i == 2) {
rw = BB_HOST_BANG_RW;
- write_nic_word(dev, RFPinsOutput, bit|oval | BB_HOST_BANG_CLK | rw); udelay(2);
- write_nic_word(dev, RFPinsOutput, bit|oval | rw); udelay(2);
+ write_nic_word(dev, RFPinsOutput,
+ bit | oval | BB_HOST_BANG_CLK | rw);
+ udelay(2);
+ write_nic_word(dev, RFPinsOutput, bit | oval | rw);
+ udelay(2);
break;
}
- bit = ((data2Write&mask) != 0) ? 1: 0;
+ bit = ((data2Write&mask) != 0) ? 1 : 0;
- write_nic_word(dev, RFPinsOutput, oval|bit|rw| BB_HOST_BANG_CLK); udelay(2);
- write_nic_word(dev, RFPinsOutput, oval|bit|rw| BB_HOST_BANG_CLK); udelay(2);
+ write_nic_word(dev, RFPinsOutput,
+ oval | bit | rw | BB_HOST_BANG_CLK);
+ udelay(2);
+ write_nic_word(dev, RFPinsOutput,
+ oval | bit | rw | BB_HOST_BANG_CLK);
+ udelay(2);
- write_nic_word(dev, RFPinsOutput, oval| bit |rw); udelay(1);
+ write_nic_word(dev, RFPinsOutput, oval | bit | rw);
+ udelay(1);
mask = (low2high) ? (mask<<1) : (mask>>1);
}
- write_nic_word(dev, RFPinsOutput, rw|oval); udelay(2);
+ write_nic_word(dev, RFPinsOutput, rw|oval);
+ udelay(2);
mask = (low2high) ? 0x01 : (((u32)0x01) << (12-1));
/*
@@ -371,9 +386,12 @@ static u32 read_rtl8225(struct net_device *dev, u8 adr)
for (i = 0; i < rLength; i++) {
write_nic_word(dev, RFPinsOutput, rw|oval); udelay(1);
- write_nic_word(dev, RFPinsOutput, rw|oval|BB_HOST_BANG_CLK); udelay(2);
- write_nic_word(dev, RFPinsOutput, rw|oval|BB_HOST_BANG_CLK); udelay(2);
- write_nic_word(dev, RFPinsOutput, rw|oval|BB_HOST_BANG_CLK); udelay(2);
+ write_nic_word(dev, RFPinsOutput, rw|oval|BB_HOST_BANG_CLK);
+ udelay(2);
+ write_nic_word(dev, RFPinsOutput, rw|oval|BB_HOST_BANG_CLK);
+ udelay(2);
+ write_nic_word(dev, RFPinsOutput, rw|oval|BB_HOST_BANG_CLK);
+ udelay(2);
tmp = read_nic_word(dev, RFPinsInput);
dataRead |= (tmp & BB_HOST_BANG_CLK ? mask : 0);
@@ -383,7 +401,9 @@ static u32 read_rtl8225(struct net_device *dev, u8 adr)
mask = (low2high) ? (mask<<1) : (mask>>1);
}
- write_nic_word(dev, RFPinsOutput, BB_HOST_BANG_EN|BB_HOST_BANG_RW|oval); udelay(2);
+ write_nic_word(dev, RFPinsOutput,
+ BB_HOST_BANG_EN | BB_HOST_BANG_RW | oval);
+ udelay(2);
write_nic_word(dev, RFPinsEnable, oval2);
write_nic_word(dev, RFPinsSelect, oval3); /* Set To SW Switch */
@@ -426,7 +446,7 @@ void rtl8225z2_rf_close(struct net_device *dev)
s8 DbmToTxPwrIdx(struct r8180_priv *priv, WIRELESS_MODE WirelessMode,
s32 PowerInDbm)
{
- bool bUseDefault = true;
+ bool bUseDefault = true;
s8 TxPwrIdx = 0;
/*
@@ -486,8 +506,10 @@ void rtl8225z2_SetTXPowerLevel(struct net_device *dev, short ch)
if (IS_DOT11D_ENABLE(priv->ieee80211) &&
IS_DOT11D_STATE_DONE(priv->ieee80211)) {
u8 MaxTxPwrInDbm = DOT11D_GetMaxTxPwrInDbm(priv->ieee80211, ch);
- u8 CckMaxPwrIdx = DbmToTxPwrIdx(priv, WIRELESS_MODE_B, MaxTxPwrInDbm);
- u8 OfdmMaxPwrIdx = DbmToTxPwrIdx(priv, WIRELESS_MODE_G, MaxTxPwrInDbm);
+ u8 CckMaxPwrIdx = DbmToTxPwrIdx(priv, WIRELESS_MODE_B,
+ MaxTxPwrInDbm);
+ u8 OfdmMaxPwrIdx = DbmToTxPwrIdx(priv, WIRELESS_MODE_G,
+ MaxTxPwrInDbm);
if (cck_power_level > CckMaxPwrIdx)
cck_power_level = CckMaxPwrIdx;
@@ -524,7 +546,7 @@ void rtl8225z2_SetTXPowerLevel(struct net_device *dev, short ch)
if (ofdm_power_level <= 11) {
write_phy_ofdm(dev, 0x07, 0x5c);
write_phy_ofdm(dev, 0x09, 0x5c);
- }
+ }
if (ofdm_power_level <= 17) {
write_phy_ofdm(dev, 0x07, 0x54);
@@ -613,7 +635,7 @@ void rtl8225z2_rf_init(struct net_device *dev)
int i;
short channel = 1;
u16 brsr;
- u32 data,addr;
+ u32 data, addr;
priv->chan = channel;
@@ -740,7 +762,7 @@ void rtl8225z2_rf_init(struct net_device *dev)
write_phy_ofdm(dev, 0x26, 0x90); mdelay(1);
write_phy_ofdm(dev, 0x27, 0x88); mdelay(1);
- rtl8225z2_set_gain(dev,4);
+ rtl8225z2_set_gain(dev, 4);
write_phy_cck(dev, 0x0, 0x98); mdelay(1);
write_phy_cck(dev, 0x3, 0x20); mdelay(1);
@@ -803,12 +825,12 @@ void rtl8225z2_rf_set_mode(struct net_device *dev)
write_phy_ofdm(dev, 0xf, 0x20);
write_phy_ofdm(dev, 0x11, 0x7);
- rtl8225z2_set_gain(dev,4);
+ rtl8225z2_set_gain(dev, 4);
- write_phy_ofdm(dev,0x15, 0x40);
- write_phy_ofdm(dev,0x17, 0x40);
+ write_phy_ofdm(dev, 0x15, 0x40);
+ write_phy_ofdm(dev, 0x17, 0x40);
- write_nic_dword(dev, 0x94,0x10000000);
+ write_nic_dword(dev, 0x94, 0x10000000);
} else {
write_rtl8225(dev, 0x5, 0x1864);
write_nic_dword(dev, RF_PARA, 0x10044);
@@ -819,18 +841,18 @@ void rtl8225z2_rf_set_mode(struct net_device *dev)
write_phy_ofdm(dev, 0xf, 0x20);
write_phy_ofdm(dev, 0x11, 0x7);
- rtl8225z2_set_gain(dev,4);
+ rtl8225z2_set_gain(dev, 4);
- write_phy_ofdm(dev,0x15, 0x40);
- write_phy_ofdm(dev,0x17, 0x40);
+ write_phy_ofdm(dev, 0x15, 0x40);
+ write_phy_ofdm(dev, 0x17, 0x40);
- write_nic_dword(dev, 0x94,0x04000002);
+ write_nic_dword(dev, 0x94, 0x04000002);
}
}
-#define MAX_DOZE_WAITING_TIMES_85B 20
-#define MAX_POLLING_24F_TIMES_87SE 10
-#define LPS_MAX_SLEEP_WAITING_TIMES_87SE 5
+#define MAX_DOZE_WAITING_TIMES_85B 20
+#define MAX_POLLING_24F_TIMES_87SE 10
+#define LPS_MAX_SLEEP_WAITING_TIMES_87SE 5
bool SetZebraRFPowerState8185(struct net_device *dev,
RT_RF_POWER_STATE eRFPowerState)
@@ -882,12 +904,14 @@ bool SetZebraRFPowerState8185(struct net_device *dev,
break;
case eRfSleep:
for (QueueID = 0, i = 0; QueueID < 6;) {
- if (get_curr_tx_free_desc(dev, QueueID) == priv->txringcount) {
+ if (get_curr_tx_free_desc(dev, QueueID) ==
+ priv->txringcount) {
QueueID++;
continue;
} else {
priv->TxPollingTimes++;
- if (priv->TxPollingTimes >= LPS_MAX_SLEEP_WAITING_TIMES_87SE) {
+ if (priv->TxPollingTimes >=
+ LPS_MAX_SLEEP_WAITING_TIMES_87SE) {
bActionAllowed = false;
break;
} else
@@ -915,7 +939,8 @@ bool SetZebraRFPowerState8185(struct net_device *dev,
while (true) {
u8 tmp24F = read_nic_byte(dev, 0x24f);
- if ((tmp24F == 0x01) || (tmp24F == 0x09)) {
+ if ((tmp24F == 0x01) ||
+ (tmp24F == 0x09)) {
bTurnOffBB = true;
break;
} else {
@@ -935,7 +960,8 @@ bool SetZebraRFPowerState8185(struct net_device *dev,
if (bTurnOffBB) {
/* turn off BB */
u1bTmp = read_nic_byte(dev, 0x24E);
- write_nic_byte(dev, 0x24E, (u1bTmp | BIT5 | BIT6));
+ write_nic_byte(dev, 0x24E,
+ (u1bTmp | BIT5 | BIT6));
/* turn off AFE PLL */
write_nic_byte(dev, 0x54, 0xFC);
@@ -945,7 +971,8 @@ bool SetZebraRFPowerState8185(struct net_device *dev,
break;
case eRfOff:
for (QueueID = 0, i = 0; QueueID < 6;) {
- if (get_curr_tx_free_desc(dev, QueueID) == priv->txringcount) {
+ if (get_curr_tx_free_desc(dev, QueueID) ==
+ priv->txringcount) {
QueueID++;
continue;
} else {
diff --git a/drivers/staging/rtl8192e/Makefile b/drivers/staging/rtl8192e/Makefile
index e032c3e1e86..41cb4d3d626 100644
--- a/drivers/staging/rtl8192e/Makefile
+++ b/drivers/staging/rtl8192e/Makefile
@@ -18,6 +18,7 @@ r8192e_pci-objs := \
r819xE_firmware.o \
r819xE_cmdpkt.o \
r8192E_dm.o \
+ r8192_pm.o \
ieee80211/ieee80211_rx.o \
ieee80211/ieee80211_softmac.o \
ieee80211/ieee80211_tx.o \
diff --git a/drivers/staging/rtl8192e/ieee80211.h b/drivers/staging/rtl8192e/ieee80211.h
index c39249eb54b..e1f03d79d2b 100644
--- a/drivers/staging/rtl8192e/ieee80211.h
+++ b/drivers/staging/rtl8192e/ieee80211.h
@@ -1846,7 +1846,7 @@ struct ieee80211_device {
spinlock_t bw_spinlock;
spinlock_t reorder_spinlock;
- // for HT operation rate set. we use this one for HT data rate to seperate different descriptors
+ // for HT operation rate set. we use this one for HT data rate to separate different descriptors
//the way fill this is the same as in the IE
u8 Regdot11HTOperationalRateSet[16]; //use RATR format
u8 dot11HTOperationalRateSet[16]; //use RATR format
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211.h b/drivers/staging/rtl8192e/ieee80211/ieee80211.h
index 1f613a28152..50728f6e9c5 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211.h
@@ -2067,7 +2067,7 @@ struct ieee80211_device {
spinlock_t bw_spinlock;
spinlock_t reorder_spinlock;
- // for HT operation rate set. we use this one for HT data rate to seperate different descriptors
+ // for HT operation rate set. we use this one for HT data rate to separate different descriptors
//the way fill this is the same as in the IE
u8 Regdot11HTOperationalRateSet[16]; //use RATR format
u8 dot11HTOperationalRateSet[16]; //use RATR format
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c
index b3c9bf4b4ea..d5aa9af3d9f 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c
@@ -109,11 +109,10 @@ int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops)
if (hcrypt == NULL)
return -1;
- alg = kmalloc(sizeof(*alg), GFP_KERNEL);
+ alg = kzalloc(sizeof(*alg), GFP_KERNEL);
if (alg == NULL)
return -ENOMEM;
- memset(alg, 0, sizeof(*alg));
alg->ops = ops;
spin_lock_irqsave(&hcrypt->lock, flags);
@@ -207,11 +206,10 @@ int __init ieee80211_crypto_init(void)
{
int ret = -ENOMEM;
- hcrypt = kmalloc(sizeof(*hcrypt), GFP_KERNEL);
+ hcrypt = kzalloc(sizeof(*hcrypt), GFP_KERNEL);
if (!hcrypt)
goto out;
- memset(hcrypt, 0, sizeof(*hcrypt));
INIT_LIST_HEAD(&hcrypt->algs);
spin_lock_init(&hcrypt->lock);
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_ccmp.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_ccmp.c
index 1776f7e69bf..7165c4c75c7 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_ccmp.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_ccmp.c
@@ -96,10 +96,9 @@ static void * ieee80211_ccmp_init(int key_idx)
{
struct ieee80211_ccmp_data *priv;
- priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+ priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
- memset(priv, 0, sizeof(*priv));
priv->key_idx = key_idx;
#if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED))
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_tkip.c
index 03cb21eb065..65f48896bfa 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_tkip.c
@@ -87,10 +87,9 @@ static void * ieee80211_tkip_init(int key_idx)
{
struct ieee80211_tkip_data *priv;
- priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+ priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
- memset(priv, 0, sizeof(*priv));
priv->key_idx = key_idx;
#if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED))
priv->tx_tfm_arc4 = crypto_alloc_tfm("arc4", 0);
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_wep.c
index 5678313e30a..c4bbc8ddbad 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_wep.c
@@ -71,10 +71,9 @@ static void * prism2_wep_init(int keyidx)
{
struct prism2_wep_data *priv;
- priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+ priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
- memset(priv, 0, sizeof(*priv));
priv->key_idx = keyidx;
#if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED))
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c
index f43a7db5c78..614a8b630e6 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c
@@ -65,8 +65,8 @@ static inline int ieee80211_networks_allocate(struct ieee80211_device *ieee)
if (ieee->networks)
return 0;
- ieee->networks = kmalloc(
- MAX_NETWORK_COUNT * sizeof(struct ieee80211_network),
+ ieee->networks = kcalloc(
+ MAX_NETWORK_COUNT, sizeof(struct ieee80211_network),
GFP_KERNEL);
if (!ieee->networks) {
printk(KERN_WARNING "%s: Out of memory allocating beacons\n",
@@ -74,9 +74,6 @@ static inline int ieee80211_networks_allocate(struct ieee80211_device *ieee)
return -ENOMEM;
}
- memset(ieee->networks, 0,
- MAX_NETWORK_COUNT * sizeof(struct ieee80211_network));
-
return 0;
}
@@ -170,7 +167,7 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
ieee80211_softmac_init(ieee);
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13))
- ieee->pHTInfo = (RT_HIGH_THROUGHPUT*)kzalloc(sizeof(RT_HIGH_THROUGHPUT), GFP_KERNEL);
+ ieee->pHTInfo = kzalloc(sizeof(RT_HIGH_THROUGHPUT), GFP_KERNEL);
#else
ieee->pHTInfo = (RT_HIGH_THROUGHPUT*)kmalloc(sizeof(RT_HIGH_THROUGHPUT), GFP_KERNEL);
memset(ieee->pHTInfo,0,sizeof(RT_HIGH_THROUGHPUT));
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c
index ce265ae5fe1..da10067485e 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c
@@ -1397,7 +1397,7 @@ int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
/* skb: hdr + (possible reassembled) full plaintext payload */
payload = skb->data + hdrlen;
//ethertype = (payload[6] << 8) | payload[7];
- rxb = (struct ieee80211_rxb*)kmalloc(sizeof(struct ieee80211_rxb),GFP_ATOMIC);
+ rxb = kmalloc(sizeof(struct ieee80211_rxb), GFP_ATOMIC);
if(rxb == NULL)
{
IEEE80211_DEBUG(IEEE80211_DL_ERR,"%s(): kmalloc rxb error\n",__FUNCTION__);
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c
index d1d7b086675..46b6e8c900e 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c
@@ -1800,7 +1800,7 @@ static inline u16 auth_parse(struct sk_buff *skb, u8** challenge, int *chlen)
if(*(t++) == MFIE_TYPE_CHALLENGE){
*chlen = *(t++);
- *challenge = (u8*)kmalloc(*chlen, GFP_ATOMIC);
+ *challenge = kmalloc(*chlen, GFP_ATOMIC);
memcpy(*challenge, t, *chlen);
}
}
@@ -1934,7 +1934,8 @@ ieee80211_rx_auth_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
//IEEE80211DMESG("Rx probe");
ieee->softmac_stats.rx_auth_rq++;
- if ((status = auth_rq_parse(skb, dest))!= -1){
+ status = auth_rq_parse(skb, dest);
+ if (status != -1) {
ieee80211_resp_to_auth(ieee, status, dest);
}
//DMESG("Dest is "MACSTR, MAC2STR(dest));
@@ -3078,10 +3079,9 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
ieee->seq_ctrl[i] = 0;
}
#ifdef ENABLE_DOT11D
- ieee->pDot11dInfo = kmalloc(sizeof(RT_DOT11D_INFO), GFP_ATOMIC);
+ ieee->pDot11dInfo = kzalloc(sizeof(RT_DOT11D_INFO), GFP_ATOMIC);
if (!ieee->pDot11dInfo)
IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't alloc memory for DOT11D\n");
- memset(ieee->pDot11dInfo, 0, sizeof(RT_DOT11D_INFO));
#endif
//added for AP roaming
ieee->LinkDetectInfo.SlotNum = 2;
@@ -3255,11 +3255,11 @@ static int ieee80211_wpa_set_wpa_ie(struct ieee80211_device *ieee,
return -EINVAL;
if (param->u.wpa_ie.len) {
- buf = kmalloc(param->u.wpa_ie.len, GFP_KERNEL);
+ buf = kmemdup(param->u.wpa_ie.data, param->u.wpa_ie.len,
+ GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- memcpy(buf, param->u.wpa_ie.data, param->u.wpa_ie.len);
kfree(ieee->wpa_ie);
ieee->wpa_ie = buf;
ieee->wpa_ie_len = param->u.wpa_ie.len;
@@ -3458,8 +3458,7 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
ieee80211_crypt_delayed_deinit(ieee, crypt);
- new_crypt = (struct ieee80211_crypt_data *)
- kmalloc(sizeof(*new_crypt), GFP_KERNEL);
+ new_crypt = kmalloc(sizeof(*new_crypt), GFP_KERNEL);
if (new_crypt == NULL) {
ret = -ENOMEM;
goto done;
@@ -3591,7 +3590,7 @@ int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct iw_poin
goto out;
}
- param = (struct ieee_param *)kmalloc(p->length, GFP_KERNEL);
+ param = kmalloc(p->length, GFP_KERNEL);
if (param == NULL){
ret = -ENOMEM;
goto out;
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c
index de57967b968..4971b1c8e7d 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c
@@ -477,11 +477,10 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
struct ieee80211_crypt_data *new_crypt;
/* take WEP into use */
- new_crypt = kmalloc(sizeof(struct ieee80211_crypt_data),
+ new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data),
GFP_KERNEL);
if (new_crypt == NULL)
return -ENOMEM;
- memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
new_crypt->ops = ieee80211_get_crypto_ops("WEP");
if (!new_crypt->ops)
new_crypt->ops = ieee80211_get_crypto_ops("WEP");
@@ -980,10 +979,9 @@ int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len)
printk("len:%zu, ie:%d\n", len, ie[1]);
return -EINVAL;
}
- buf = kmalloc(len, GFP_KERNEL);
+ buf = kmemdup(ie, len, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- memcpy(buf, ie, len);
kfree(ieee->wpa_ie);
ieee->wpa_ie = buf;
ieee->wpa_ie_len = len;
diff --git a/drivers/staging/rtl8192e/ieee80211/rtl819x_Qos.h b/drivers/staging/rtl8192e/ieee80211/rtl819x_Qos.h
index a50ee0e1c05..d4565ecc7ab 100644
--- a/drivers/staging/rtl8192e/ieee80211/rtl819x_Qos.h
+++ b/drivers/staging/rtl8192e/ieee80211/rtl819x_Qos.h
@@ -69,147 +69,6 @@ typedef enum _ACK_POLICY{
}ACK_POLICY,*PACK_POLICY;
#define WMM_PARAM_ELEMENT_SIZE (8+(4*AC_PARAM_SIZE))
-#if 0
-#define GET_QOS_CTRL(_pStart) ReadEF2Byte((u8 *)(_pStart) + 24)
-#define SET_QOS_CTRL(_pStart, _value) WriteEF2Byte((u8 *)(_pStart) + 24, _value)
-
-// WMM control field.
-#define GET_QOS_CTRL_WMM_UP(_pStart) ((u8)LE_BITS_TO_2BYTE((u8 *)(_pStart)+24, 0, 3))
-#define SET_QOS_CTRL_WMM_UP(_pStart, _value) SET_BITS_TO_LE_2BYTE((u8 *)(_pStart)+24, 0, 3, (u8)(_value))
-
-#define GET_QOS_CTRL_WMM_EOSP(_pStart) ((u8)LE_BITS_TO_2BYTE((u8 *)(_pStart)+24, 4, 1))
-#define SET_QOS_CTRL_WMM_EOSP(_pStart, _value) SET_BITS_TO_LE_2BYTE((u8 *)(_pStart)+24, 4, 1, (u8)(_value))
-
-#define GET_QOS_CTRL_WMM_ACK_POLICY(_pStart) ((u8)LE_BITS_TO_2BYTE((u8 *)(_pStart)+24, 5, 2))
-#define SET_QOS_CTRL_WMM_ACK_POLICY(_pStart, _value) SET_BITS_TO_LE_2BYTE((u8 *)(_pStart)+24, 5, 2, (u8)(_value))
-
-// 802.11e control field (by STA, data)
-#define GET_QOS_CTRL_STA_DATA_TID(_pStart) ((u8)LE_BITS_TO_2BYTE((u8 *)(_pStart)+24, 0, 4))
-#define SET_QOS_CTRL_STA_DATA_TID(_pStart, _value) SET_BITS_TO_LE_2BYTE((u8 *)(_pStart)+24, 0, 4, (u8)(_value))
-
-#define GET_QOS_CTRL_STA_DATA_QSIZE_FLAG(_pStart) ((u8)LE_BITS_TO_2BYTE((u8 *)(_pStart)+24, 4, 1))
-#define SET_QOS_CTRL_STA_DATA_QSIZE_FLAG(_pStart, _value) SET_BITS_TO_LE_2BYTE((u8 *)(_pStart)+24, 4, 1, (u8)(_value))
-
-#define GET_QOS_CTRL_STA_DATA_ACK_POLICY(_pStart) ((u8)LE_BITS_TO_2BYTE((u8 *)(_pStart)+24, 5, 2))
-#define SET_QOS_CTRL_STA_DATA_ACK_POLICY(_pStart, _value) SET_BITS_TO_LE_2BYTE((u8 *)(_pStart)+24, 5, 2, (u8)(_value))
-
-#define GET_QOS_CTRL_STA_DATA_TXOP(_pStart) ((u8)LE_BITS_TO_2BYTE((u8 *)(_pStart)+24, 8, 8))
-#define SET_QOS_CTRL_STA_DATA_TXOP(_pStart, _value) SET_BITS_TO_LE_2BYTE((u8 *)(_pStart)+24, 8, 8, (u8)(_value))
-
-#define GET_QOS_CTRL_STA_DATA_QSIZE(_pStart) GET_QOS_CTRL_STA_DATA_TXOP(_pStart)
-#define SET_QOS_CTRL_STA_DATA_QSIZE(_pStart, _value) SET_QOS_CTRL_STA_DATA_TXOP(_pStart)
-
-// 802.11e control field (by HC, data)
-#define GET_QOS_CTRL_HC_DATA_TID(_pStart) ((u8)LE_BITS_TO_2BYTE((u8 *)(_pStart)+24, 0, 4))
-#define SET_QOS_CTRL_HC_DATA_TID(_pStart, _value) SET_BITS_TO_LE_2BYTE((u8 *)(_pStart)+24, 0, 4, (u8)(_value))
-
-#define GET_QOS_CTRL_HC_DATA_EOSP(_pStart) ((u8)LE_BITS_TO_2BYTE((u8 *)(_pStart)+24, 4, 1))
-#define SET_QOS_CTRL_HC_DATA_EOSP(_pStart, _value) SET_BITS_TO_LE_2BYTE((u8 *)(_pStart)+24, 4, 1, (u8)(_value))
-
-#define GET_QOS_CTRL_HC_DATA_ACK_POLICY(_pStart) ((u8)LE_BITS_TO_2BYTE((u8 *)(_pStart)+24, 5, 2))
-#define SET_QOS_CTRL_HC_DATA_ACK_POLICY(_pStart, _value) SET_BITS_TO_LE_2BYTE((u8 *)(_pStart)+24, 5, 2, (u8)(_value))
-
-#define GET_QOS_CTRL_HC_DATA_PS_BUFSTATE(_pStart) ((u8)LE_BITS_TO_2BYTE((u8 *)(_pStart)+24, 8, 8))
-#define SET_QOS_CTRL_HC_DATA_PS_BUFSTATE(_pStart, _value) SET_BITS_TO_LE_2BYTE((u8 *)(_pStart)+24, 8, 8, (u8)(_value))
-
-// 802.11e control field (by HC, CFP)
-#define GET_QOS_CTRL_HC_CFP_TID(_pStart) ((u8)LE_BITS_TO_2BYTE((u8 *)(_pStart)+24, 0, 4))
-#define SET_QOS_CTRL_HC_CFP_TID(_pStart, _value) SET_BITS_TO_LE_2BYTE((u8 *)(_pStart)+24, 0, 4, (u8)(_value))
-
-#define GET_QOS_CTRL_HC_CFP_EOSP(_pStart) ((u8)LE_BITS_TO_2BYTE((u8 *)(_pStart)+24, 4, 1))
-#define SET_QOS_CTRL_HC_CFP_EOSP(_pStart, _value) SET_BITS_TO_LE_2BYTE((u8 *)(_pStart)+24, 4, 1, (u8)(_value))
-
-#define GET_QOS_CTRL_HC_CFP_ACK_POLICY(_pStart) ((u8)LE_BITS_TO_2BYTE((u8 *)(_pStart)+24, 5, 2))
-#define SET_QOS_CTRL_HC_CFP_ACK_POLICY(_pStart, _value) SET_BITS_TO_LE_2BYTE((u8 *)(_pStart)+24, 5, 2, (u8)(_value))
-
-#define GET_QOS_CTRL_HC_CFP_TXOP_LIMIT(_pStart) ((u8)LE_BITS_TO_2BYTE((u8 *)(_pStart)+24, 8, 8))
-#define SET_QOS_CTRL_HC_CFP_TXOP_LIMIT(_pStart, _value) SET_BITS_TO_LE_2BYTE((u8 *)(_pStart)+24, 8, 8, (u8)(_value))
-
-#define SET_WMM_QOS_INFO_FIELD(_pStart, _val) WriteEF1Byte(_pStart, _val)
-
-#define GET_WMM_QOS_INFO_FIELD_PARAMETERSET_COUNT(_pStart) LE_BITS_TO_1BYTE(_pStart, 0, 4)
-#define SET_WMM_QOS_INFO_FIELD_PARAMETERSET_COUNT(_pStart, _val) SET_BITS_TO_LE_1BYTE(_pStart, 0, 4, _val)
-
-#define GET_WMM_QOS_INFO_FIELD_AP_UAPSD(_pStart) LE_BITS_TO_1BYTE(_pStart, 7, 1)
-#define SET_WMM_QOS_INFO_FIELD_AP_UAPSD(_pStart, _val) SET_BITS_TO_LE_1BYTE(_pStart, 7, 1, _val)
-
-#define GET_WMM_QOS_INFO_FIELD_STA_AC_VO_UAPSD(_pStart) LE_BITS_TO_1BYTE(_pStart, 0, 1)
-#define SET_WMM_QOS_INFO_FIELD_STA_AC_VO_UAPSD(_pStart, _val) SET_BITS_TO_LE_1BYTE(_pStart, 0, 1, _val)
-
-#define GET_WMM_QOS_INFO_FIELD_STA_AC_VI_UAPSD(_pStart) LE_BITS_TO_1BYTE(_pStart, 1, 1)
-#define SET_WMM_QOS_INFO_FIELD_STA_AC_VI_UAPSD(_pStart, _val) SET_BITS_TO_LE_1BYTE(_pStart, 1, 1, _val)
-
-#define GET_WMM_QOS_INFO_FIELD_STA_AC_BE_UAPSD(_pStart) LE_BITS_TO_1BYTE(_pStart, 2, 1)
-#define SET_WMM_QOS_INFO_FIELD_STA_AC_BE_UAPSD(_pStart, _val) SET_BITS_TO_LE_1BYTE(_pStart, 2, 1, _val)
-
-#define GET_WMM_QOS_INFO_FIELD_STA_AC_BK_UAPSD(_pStart) LE_BITS_TO_1BYTE(_pStart, 3, 1)
-#define SET_WMM_QOS_INFO_FIELD_STA_AC_BK_UAPSD(_pStart, _val) SET_BITS_TO_LE_1BYTE(_pStart, 3, 1, _val)
-
-#define GET_WMM_QOS_INFO_FIELD_STA_MAX_SP_LEN(_pStart) LE_BITS_TO_1BYTE(_pStart, 5, 2)
-#define SET_WMM_QOS_INFO_FIELD_STA_MAX_SP_LEN(_pStart, _val) SET_BITS_TO_LE_1BYTE(_pStart, 5, 2, _val)
-
-
-#define WMM_INFO_ELEMENT_SIZE 7
-
-#define GET_WMM_INFO_ELE_OUI(_pStart) ((u8 *)(_pStart))
-#define SET_WMM_INFO_ELE_OUI(_pStart, _pVal) PlatformMoveMemory(_pStart, _pVal, 3);
-
-#define GET_WMM_INFO_ELE_OUI_TYPE(_pStart) ( EF1Byte( *((u8 *)(_pStart)+3) ) )
-#define SET_WMM_INFO_ELE_OUI_TYPE(_pStart, _val) ( *((u8 *)(_pStart)+3) = EF1Byte(_val) )
-
-#define GET_WMM_INFO_ELE_OUI_SUBTYPE(_pStart) ( EF1Byte( *((u8 *)(_pStart)+4) ) )
-#define SET_WMM_INFO_ELE_OUI_SUBTYPE(_pStart, _val) ( *((u8 *)(_pStart)+4) = EF1Byte(_val) )
-
-#define GET_WMM_INFO_ELE_VERSION(_pStart) ( EF1Byte( *((u8 *)(_pStart)+5) ) )
-#define SET_WMM_INFO_ELE_VERSION(_pStart, _val) ( *((u8 *)(_pStart)+5) = EF1Byte(_val) )
-
-#define GET_WMM_INFO_ELE_QOS_INFO_FIELD(_pStart) ( EF1Byte( *((u8 *)(_pStart)+6) ) )
-#define SET_WMM_INFO_ELE_QOS_INFO_FIELD(_pStart, _val) ( *((u8 *)(_pStart)+6) = EF1Byte(_val) )
-
-
-
-#define GET_WMM_AC_PARAM_AIFSN(_pStart) ( (u8)LE_BITS_TO_4BYTE(_pStart, 0, 4) )
-#define SET_WMM_AC_PARAM_AIFSN(_pStart, _val) SET_BITS_TO_LE_4BYTE(_pStart, 0, 4, _val)
-
-#define GET_WMM_AC_PARAM_ACM(_pStart) ( (u8)LE_BITS_TO_4BYTE(_pStart, 4, 1) )
-#define SET_WMM_AC_PARAM_ACM(_pStart, _val) SET_BITS_TO_LE_4BYTE(_pStart, 4, 1, _val)
-
-#define GET_WMM_AC_PARAM_ACI(_pStart) ( (u8)LE_BITS_TO_4BYTE(_pStart, 5, 2) )
-#define SET_WMM_AC_PARAM_ACI(_pStart, _val) SET_BITS_TO_LE_4BYTE(_pStart, 5, 2, _val)
-
-#define GET_WMM_AC_PARAM_ACI_AIFSN(_pStart) ( (u8)LE_BITS_TO_4BYTE(_pStart, 0, 8) )
-#define SET_WMM_AC_PARAM_ACI_AIFSN(_pStart, _val) SET_BTIS_TO_LE_4BYTE(_pStart, 0, 8, _val)
-
-#define GET_WMM_AC_PARAM_ECWMIN(_pStart) ( (u8)LE_BITS_TO_4BYTE(_pStart, 8, 4) )
-#define SET_WMM_AC_PARAM_ECWMIN(_pStart, _val) SET_BITS_TO_LE_4BYTE(_pStart, 8, 4, _val)
-
-#define GET_WMM_AC_PARAM_ECWMAX(_pStart) ( (u8)LE_BITS_TO_4BYTE(_pStart, 12, 4) )
-#define SET_WMM_AC_PARAM_ECWMAX(_pStart, _val) SET_BITS_TO_LE_4BYTE(_pStart, 12, 4, _val)
-
-#define GET_WMM_AC_PARAM_TXOP_LIMIT(_pStart) ( (u16)LE_BITS_TO_4BYTE(_pStart, 16, 16) )
-#define SET_WMM_AC_PARAM_TXOP_LIMIT(_pStart, _val) SET_BITS_TO_LE_4BYTE(_pStart, 16, 16, _val)
-
-
-
-
-#define GET_WMM_PARAM_ELE_OUI(_pStart) ((u8 *)(_pStart))
-#define SET_WMM_PARAM_ELE_OUI(_pStart, _pVal) PlatformMoveMemory(_pStart, _pVal, 3)
-
-#define GET_WMM_PARAM_ELE_OUI_TYPE(_pStart) ( EF1Byte( *((u8 *)(_pStart)+3) ) )
-#define SET_WMM_PARAM_ELE_OUI_TYPE(_pStart, _val) ( *((u8 *)(_pStart)+3) = EF1Byte(_val) )
-
-#define GET_WMM_PARAM_ELE_OUI_SUBTYPE(_pStart) ( EF1Byte( *((u8 *)(_pStart)+4) ) )
-#define SET_WMM_PARAM_ELE_OUI_SUBTYPE(_pStart, _val) ( *((u8 *)(_pStart)+4) = EF1Byte(_val) )
-
-#define GET_WMM_PARAM_ELE_VERSION(_pStart) ( EF1Byte( *((u8 *)(_pStart)+5) ) )
-#define SET_WMM_PARAM_ELE_VERSION(_pStart, _val) ( *((u8 *)(_pStart)+5) = EF1Byte(_val) )
-
-#define GET_WMM_PARAM_ELE_QOS_INFO_FIELD(_pStart) ( EF1Byte( *((u8 *)(_pStart)+6) ) )
-#define SET_WMM_PARAM_ELE_QOS_INFO_FIELD(_pStart, _val) ( *((u8 *)(_pStart)+6) = EF1Byte(_val) )
-
-#define GET_WMM_PARAM_ELE_AC_PARAM(_pStart) ( (u8 *)(_pStart)+8 )
-#define SET_WMM_PARAM_ELE_AC_PARAM(_pStart, _pVal) PlatformMoveMemory((_pStart)+8, _pVal, 16)
-#endif
//
// QoS Control Field
@@ -360,22 +219,6 @@ typedef union _QOS_INFO_FIELD{
}QOS_INFO_FIELD, *PQOS_INFO_FIELD;
-#if 0
-//
-// WMM Information Element
-// Ref: WMM spec 2.2.1: WME Information Element, p.10.
-//
-typedef struct _WMM_INFO_ELEMENT{
-// u8 ElementID;
-// u8 Length;
- u8 OUI[3];
- u8 OUI_Type;
- u8 OUI_SubType;
- u8 Version;
- QOS_INFO_FIELD QosInfo;
-}WMM_INFO_ELEMENT, *PWMM_INFO_ELEMENT;
-#endif
-
//
// ACI to AC coding.
// Ref: WMM spec 2.2.2: WME Parameter Element, p.13.
@@ -649,16 +492,7 @@ typedef struct _OCTET_STRING{
u8 *Octet;
u16 Length;
}OCTET_STRING, *POCTET_STRING;
-#if 0
-#define FillOctetString(_os,_octet,_len) \
- (_os).Octet=(u8 *)(_octet); \
- (_os).Length=(_len);
-
-#define WMM_ELEM_HDR_LEN 6
-#define WMMElemSkipHdr(_osWMMElem) \
- (_osWMMElem).Octet += WMM_ELEM_HDR_LEN; \
- (_osWMMElem).Length -= WMM_ELEM_HDR_LEN;
-#endif
+
//
// STA QoS data.
// Ref: DOT11_QOS in 8185 code. [def. in QoS_mp.h]
diff --git a/drivers/staging/rtl8192e/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192e/ieee80211/rtl819x_TSProc.c
index e8699616fad..5876b4d53eb 100644
--- a/drivers/staging/rtl8192e/ieee80211/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/ieee80211/rtl819x_TSProc.c
@@ -3,13 +3,6 @@
#include <linux/slab.h>
#include "rtl819x_TS.h"
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
-#define list_for_each_entry_safe(pos, n, head, member) \
- for (pos = list_entry((head)->next, typeof(*pos), member), \
- n = list_entry(pos->member.next, typeof(*pos), member); \
- &pos->member != (head); \
- pos = n, n = list_entry(n->member.next, typeof(*n), member))
-#endif
void TsSetupTimeOut(unsigned long data)
{
// Not implement yet
@@ -29,7 +22,6 @@ void TsInactTimeout(unsigned long data)
* return: NULL
* notice:
********************************************************************************************************************/
-#if 1
void RxPktPendingTimeout(unsigned long data)
{
PRX_TS_RECORD pRxTs = (PRX_TS_RECORD)data;
@@ -90,25 +82,16 @@ void RxPktPendingTimeout(unsigned long data)
return;
}
ieee80211_indicate_packets(ieee, stats_IndicateArray, index);
- bPktInBuf = false;
}
if(bPktInBuf && (pRxTs->RxTimeoutIndicateSeq==0xffff))
{
pRxTs->RxTimeoutIndicateSeq = pRxTs->RxIndicateSeq;
-#if 0
- if(timer_pending(&pRxTs->RxPktPendingTimer))
- del_timer_sync(&pRxTs->RxPktPendingTimer);
- pRxTs->RxPktPendingTimer.expires = jiffies + ieee->pHTInfo->RxReorderPendingTime;
- add_timer(&pRxTs->RxPktPendingTimer);
-#else
mod_timer(&pRxTs->RxPktPendingTimer, jiffies + MSECS(ieee->pHTInfo->RxReorderPendingTime));
-#endif
}
spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
//PlatformReleaseSpinLock(Adapter, RT_RX_SPINLOCK);
}
-#endif
/********************************************************************************************************************
*function: Add BA timer function
@@ -372,17 +355,11 @@ bool GetTs(
IEEE80211_DEBUG(IEEE80211_DL_ERR, "get TS for Broadcast or Multicast\n");
return false;
}
-#if 0
- if(ieee->pStaQos->CurrentQosMode == QOS_DISABLE)
- { UP = 0; } //only use one TS
- else if(ieee->pStaQos->CurrentQosMode & QOS_WMM)
- {
-#else
+
if (ieee->current_network.qos_data.supported == 0)
UP = 0;
else
{
-#endif
// In WMM case: we use 4 TID only
if (!IsACValid(TID))
{
@@ -553,8 +530,8 @@ void RemoveTsEntry(
void RemovePeerTS(struct ieee80211_device* ieee, u8* Addr)
{
PTS_COMMON_INFO pTS, pTmpTS;
+
printk("===========>RemovePeerTS,%pM\n", Addr);
-#if 1
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List)
{
if (memcmp(pTS->Addr, Addr, 6) == 0)
@@ -595,13 +572,12 @@ void RemovePeerTS(struct ieee80211_device* ieee, u8* Addr)
list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
}
}
-#endif
}
void RemoveAllTS(struct ieee80211_device* ieee)
{
PTS_COMMON_INFO pTS, pTmpTS;
-#if 1
+
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List)
{
RemoveTsEntry(ieee, pTS, TX_DIR);
@@ -629,7 +605,6 @@ void RemoveAllTS(struct ieee80211_device* ieee)
list_del_init(&pTS->List);
list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
}
-#endif
}
void TsStartAddBaProcess(struct ieee80211_device* ieee, PTX_TS_RECORD pTxTS)
@@ -637,7 +612,6 @@ void TsStartAddBaProcess(struct ieee80211_device* ieee, PTX_TS_RECORD pTxTS)
if(pTxTS->bAddBaReqInProgress == false)
{
pTxTS->bAddBaReqInProgress = true;
-#if 1
if(pTxTS->bAddBaReqDelayed)
{
IEEE80211_DEBUG(IEEE80211_DL_BA, "TsStartAddBaProcess(): Delayed Start ADDBA after 60 sec!!\n");
@@ -648,13 +622,7 @@ void TsStartAddBaProcess(struct ieee80211_device* ieee, PTX_TS_RECORD pTxTS)
IEEE80211_DEBUG(IEEE80211_DL_BA,"TsStartAddBaProcess(): Immediately Start ADDBA now!!\n");
mod_timer(&pTxTS->TsAddBaTimer, jiffies+10); //set 10 ticks
}
-#endif
}
else
IEEE80211_DEBUG(IEEE80211_DL_ERR, "%s()==>BA timer is already added\n", __FUNCTION__);
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
-EXPORT_SYMBOL_NOVERS(RemovePeerTS);
-#else
-//EXPORT_SYMBOL(RemovePeerTS);
-#endif
diff --git a/drivers/staging/rtl8192e/r8190_rtl8256.c b/drivers/staging/rtl8192e/r8190_rtl8256.c
index 1bd054d42f2..7391f5f8f25 100644
--- a/drivers/staging/rtl8192e/r8190_rtl8256.c
+++ b/drivers/staging/rtl8192e/r8190_rtl8256.c
@@ -802,7 +802,7 @@ MgntDisconnectIBSS(
case RT_OP_MODE_IBSS:
btMsr |= MSR_LINK_ADHOC;
- // led link set seperate
+ // led link set separate
break;
case RT_OP_MODE_AP:
@@ -885,7 +885,7 @@ MlmeDisassociateRequest(
case RT_OP_MODE_IBSS:
btMsr |= MSR_LINK_ADHOC;
- // led link set seperate
+ // led link set separate
break;
case RT_OP_MODE_AP:
diff --git a/drivers/staging/rtl8192e/r8192E_core.c b/drivers/staging/rtl8192e/r8192E_core.c
index bb7e1ef28d3..eb41402d1d3 100644
--- a/drivers/staging/rtl8192e/r8192E_core.c
+++ b/drivers/staging/rtl8192e/r8192E_core.c
@@ -62,7 +62,7 @@
//#include <linux/usb.h>
// FIXME: check if 2.6.7 is ok
-#ifdef CONFIG_PM_RTL
+#ifdef CONFIG_PM
#include "r8192_pm.h"
#endif
@@ -146,7 +146,7 @@ static struct pci_driver rtl8192_pci_driver = {
.id_table = rtl8192_pci_id_tbl, /* PCI_ID table */
.probe = rtl8192_pci_probe, /* probe fn */
.remove = __devexit_p(rtl8192_pci_disconnect), /* remove fn */
-#ifdef CONFIG_PM_RTL
+#ifdef CONFIG_PM
.suspend = rtl8192E_suspend, /* PM suspend fn */
.resume = rtl8192E_resume, /* PM resume fn */
#else
@@ -407,7 +407,7 @@ rtl8192e_SetHwReg(struct net_device *dev,u8 variable,u8* val)
case RT_OP_MODE_IBSS:
btMsr |= MSR_ADHOC;
- // led link set seperate
+ // led link set separate
break;
case RT_OP_MODE_AP:
@@ -1864,13 +1864,15 @@ static short rtl8192_pci_initdescring(struct net_device *dev)
/* general process for other queue */
for (i = 0; i < MAX_TX_QUEUE_COUNT; i++) {
- if ((ret = rtl8192_alloc_tx_desc_ring(dev, i, priv->txringcount)))
+ ret = rtl8192_alloc_tx_desc_ring(dev, i, priv->txringcount);
+ if (ret)
goto err_free_rings;
}
#if 0
/* specific process for hardware beacon process */
- if ((ret = rtl8192_alloc_tx_desc_ring(dev, MAX_TX_QUEUE_COUNT - 1, 2)))
+ ret = rtl8192_alloc_tx_desc_ring(dev, MAX_TX_QUEUE_COUNT - 1, 2);
+ if (ret)
goto err_free_rings;
#endif
@@ -5038,7 +5040,7 @@ static int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
goto out;
}
- ipw = (struct ieee_param *)kmalloc(p->length, GFP_KERNEL);
+ ipw = kmalloc(p->length, GFP_KERNEL);
if (ipw == NULL){
ret = -ENOMEM;
goto out;
diff --git a/drivers/staging/rtl8192e/r8192_pm.c b/drivers/staging/rtl8192e/r8192_pm.c
index feef29b0a89..521d49f8f8e 100644
--- a/drivers/staging/rtl8192e/r8192_pm.c
+++ b/drivers/staging/rtl8192e/r8192_pm.c
@@ -9,8 +9,6 @@
Released under the terms of GPL (General Public Licence)
*/
-#ifdef CONFIG_PM_RTL
-
#include "r8192E.h"
#include "r8192E_hw.h"
#include "r8192_pm.h"
@@ -27,7 +25,9 @@ int rtl8192E_suspend (struct pci_dev *pdev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct r8192_priv *priv = ieee80211_priv(dev);
+#ifdef RTL8190P
u8 ucRegRead;
+#endif
u32 ulRegRead;
RT_TRACE(COMP_POWER, "============> r8192E suspend call.\n");
@@ -168,5 +168,3 @@ int rtl8192E_enable_wake (struct pci_dev *dev, pm_message_t state, int enable)
state.event, enable);
return(-EAGAIN);
}
-
-#endif //CONFIG_PM_RTL
diff --git a/drivers/staging/rtl8192e/r8192_pm.h b/drivers/staging/rtl8192e/r8192_pm.h
index 68d292b1d86..4da9b464b41 100644
--- a/drivers/staging/rtl8192e/r8192_pm.h
+++ b/drivers/staging/rtl8192e/r8192_pm.h
@@ -10,8 +10,6 @@
*/
-#ifdef CONFIG_PM_RTL
-
#ifndef R8192E_PM_H
#define R8192E_PM_H
@@ -24,5 +22,3 @@ int rtl8192E_resume (struct pci_dev *dev);
int rtl8192E_enable_wake (struct pci_dev *dev, pm_message_t state, int enable);
#endif //R8192E_PM_H
-
-#endif // CONFIG_PM_RTL
diff --git a/drivers/staging/rtl8192su/Kconfig b/drivers/staging/rtl8192su/Kconfig
index b72a96206f5..b422ea1ecf9 100644
--- a/drivers/staging/rtl8192su/Kconfig
+++ b/drivers/staging/rtl8192su/Kconfig
@@ -3,5 +3,6 @@ config RTL8192SU
depends on PCI && WLAN && USB
select WIRELESS_EXT
select WEXT_PRIV
+ select EEPROM_93CX6
default N
---help---
diff --git a/drivers/staging/rtl8192su/Makefile b/drivers/staging/rtl8192su/Makefile
index c8b4332d108..7133894afe7 100644
--- a/drivers/staging/rtl8192su/Makefile
+++ b/drivers/staging/rtl8192su/Makefile
@@ -9,7 +9,6 @@ EXTRA_CFLAGS += -DTHOMAS_BEACON
#EXTRA_CFLAGS += -DMUTIPLE_BULK_OUT
r8192s_usb-objs := \
- r8180_93cx6.o \
r8192U_wx.o \
r8192S_phy.o \
r8192S_rtl6052.o \
@@ -21,6 +20,7 @@ r8192s_usb-objs := \
r8192S_Efuse.o \
r8192U_core.o \
r8192U_pm.o \
+ r8192SU_led.o \
ieee80211/ieee80211_crypt.o \
ieee80211/ieee80211_crypt_tkip.o \
ieee80211/ieee80211_crypt_ccmp.o \
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211.h b/drivers/staging/rtl8192su/ieee80211/ieee80211.h
index 32b261d1559..bcb2b12a839 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211.h
@@ -1152,7 +1152,7 @@ struct ieee80211_device {
spinlock_t reorder_spinlock;
/*
* for HT operation rate set, we use this one for HT data rate to
- * seperate different descriptors the way fill this is the same as
+ * separate different descriptors the way fill this is the same as
* in the IE
*/
u8 Regdot11HTOperationalRateSet[16]; /* use RATR format */
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt.c
index c4640e63196..80194233943 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt.c
@@ -109,11 +109,10 @@ int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops)
if (hcrypt == NULL)
return -1;
- alg = kmalloc(sizeof(*alg), GFP_KERNEL);
+ alg = kzalloc(sizeof(*alg), GFP_KERNEL);
if (alg == NULL)
return -ENOMEM;
- memset(alg, 0, sizeof(*alg));
alg->ops = ops;
spin_lock_irqsave(&hcrypt->lock, flags);
@@ -206,11 +205,10 @@ int __init ieee80211_crypto_init(void)
{
int ret = -ENOMEM;
- hcrypt = kmalloc(sizeof(*hcrypt), GFP_KERNEL);
+ hcrypt = kzalloc(sizeof(*hcrypt), GFP_KERNEL);
if (!hcrypt)
goto out;
- memset(hcrypt, 0, sizeof(*hcrypt));
INIT_LIST_HEAD(&hcrypt->algs);
spin_lock_init(&hcrypt->lock);
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_ccmp.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_ccmp.c
index 8a93f7d3eb3..77de957f1b1 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_ccmp.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_ccmp.c
@@ -68,10 +68,9 @@ static void * ieee80211_ccmp_init(int key_idx)
{
struct ieee80211_ccmp_data *priv;
- priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+ priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
- memset(priv, 0, sizeof(*priv));
priv->key_idx = key_idx;
priv->tfm = (void *)crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_tkip.c
index 7e48748da10..ade5f6f1366 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_tkip.c
@@ -67,10 +67,9 @@ static void * ieee80211_tkip_init(int key_idx)
{
struct ieee80211_tkip_data *priv;
- priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+ priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
- memset(priv, 0, sizeof(*priv));
priv->key_idx = key_idx;
priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_wep.c
index 64f9cf01c94..a1c0a59122b 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_wep.c
@@ -43,10 +43,9 @@ static void * prism2_wep_init(int keyidx)
{
struct prism2_wep_data *priv;
- priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+ priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
- memset(priv, 0, sizeof(*priv));
priv->key_idx = keyidx;
priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_module.c
index c024fa60072..a87650a517b 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_module.c
@@ -65,8 +65,8 @@ static inline int ieee80211_networks_allocate(struct ieee80211_device *ieee)
if (ieee->networks)
return 0;
- ieee->networks = kmalloc(
- MAX_NETWORK_COUNT * sizeof(struct ieee80211_network),
+ ieee->networks = kcalloc(
+ MAX_NETWORK_COUNT, sizeof(struct ieee80211_network),
GFP_KERNEL);
if (!ieee->networks) {
printk(KERN_WARNING "%s: Out of memory allocating beacons\n",
@@ -74,9 +74,6 @@ static inline int ieee80211_networks_allocate(struct ieee80211_device *ieee)
return -ENOMEM;
}
- memset(ieee->networks, 0,
- MAX_NETWORK_COUNT * sizeof(struct ieee80211_network));
-
return 0;
}
@@ -161,7 +158,7 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
ieee80211_softmac_init(ieee);
- ieee->pHTInfo = (RT_HIGH_THROUGHPUT*)kzalloc(sizeof(RT_HIGH_THROUGHPUT), GFP_KERNEL);
+ ieee->pHTInfo = kzalloc(sizeof(RT_HIGH_THROUGHPUT), GFP_KERNEL);
if (ieee->pHTInfo == NULL)
{
IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't alloc memory for HTInfo\n");
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_r8192s.h b/drivers/staging/rtl8192su/ieee80211/ieee80211_r8192s.h
index 7d6c3bc143a..1824cda790d 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_r8192s.h
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_r8192s.h
@@ -172,18 +172,20 @@ enum {
IG_Max
};
-typedef enum _LED_CTL_MODE {
- LED_CTL_POWER_ON = 1,
- LED_CTL_LINK = 2,
- LED_CTL_NO_LINK = 3,
- LED_CTL_TX = 4,
- LED_CTL_RX = 5,
- LED_CTL_SITE_SURVEY = 6,
- LED_CTL_POWER_OFF = 7,
- LED_CTL_START_TO_LINK = 8,
- LED_CTL_START_WPS = 9,
- LED_CTL_STOP_WPS = 10,
+typedef enum _LED_CTL_MODE{
+ LED_CTL_POWER_ON = 1,
+ LED_CTL_LINK = 2,
+ LED_CTL_NO_LINK = 3,
+ LED_CTL_TX = 4,
+ LED_CTL_RX = 5,
+ LED_CTL_SITE_SURVEY = 6,
+ LED_CTL_POWER_OFF = 7,
+ LED_CTL_START_TO_LINK = 8,
+ LED_CTL_START_WPS = 9,
+ LED_CTL_STOP_WPS = 10,
LED_CTL_START_WPS_BOTTON = 11,
+ LED_CTL_STOP_WPS_FAIL = 12,
+ LED_CTL_STOP_WPS_FAIL_OVERLAP = 13,
} LED_CTL_MODE;
typedef union _frameqos {
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_rx.c
index cc80faf6598..1f2bc7ac6f7 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_rx.c
@@ -1191,7 +1191,7 @@ int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
/* skb: hdr + (possible reassembled) full plaintext payload */
payload = skb->data + hdrlen;
//ethertype = (payload[6] << 8) | payload[7];
- rxb = (struct ieee80211_rxb*)kmalloc(sizeof(struct ieee80211_rxb),GFP_ATOMIC);
+ rxb = kmalloc(sizeof(struct ieee80211_rxb), GFP_ATOMIC);
if(rxb == NULL)
{
IEEE80211_DEBUG(IEEE80211_DL_ERR,"%s(): kmalloc rxb error\n",__FUNCTION__);
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c
index 84a4e23b60b..4f1f2f08b2d 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c
@@ -1557,7 +1557,7 @@ static inline u16 auth_parse(struct sk_buff *skb, u8** challenge, int *chlen)
if(*(t++) == MFIE_TYPE_CHALLENGE){
*chlen = *(t++);
- *challenge = (u8*)kmalloc(*chlen, GFP_ATOMIC);
+ *challenge = kmalloc(*chlen, GFP_ATOMIC);
memcpy(*challenge, t, *chlen);
}
}
@@ -1691,7 +1691,8 @@ ieee80211_rx_auth_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
//IEEE80211DMESG("Rx probe");
ieee->softmac_stats.rx_auth_rq++;
- if ((status = auth_rq_parse(skb, dest))!= -1){
+ status = auth_rq_parse(skb, dest);
+ if (status != -1) {
ieee80211_resp_to_auth(ieee, status, dest);
}
//DMESG("Dest is "MACSTR, MAC2STR(dest));
@@ -2698,10 +2699,9 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
for(i = 0; i < 5; i++) {
ieee->seq_ctrl[i] = 0;
}
- ieee->pDot11dInfo = kmalloc(sizeof(RT_DOT11D_INFO), GFP_ATOMIC);
+ ieee->pDot11dInfo = kzalloc(sizeof(RT_DOT11D_INFO), GFP_ATOMIC);
if (!ieee->pDot11dInfo)
IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't alloc memory for DOT11D\n");
- memset(ieee->pDot11dInfo, 0, sizeof(RT_DOT11D_INFO));
//added for AP roaming
ieee->LinkDetectInfo.SlotNum = 2;
ieee->LinkDetectInfo.NumRecvBcnInPeriod=0;
@@ -2844,11 +2844,11 @@ static int ieee80211_wpa_set_wpa_ie(struct ieee80211_device *ieee,
return -EINVAL;
if (param->u.wpa_ie.len) {
- buf = kmalloc(param->u.wpa_ie.len, GFP_KERNEL);
+ buf = kmemdup(param->u.wpa_ie.data, param->u.wpa_ie.len,
+ GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- memcpy(buf, param->u.wpa_ie.data, param->u.wpa_ie.len);
kfree(ieee->wpa_ie);
ieee->wpa_ie = buf;
ieee->wpa_ie_len = param->u.wpa_ie.len;
@@ -3047,8 +3047,7 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
ieee80211_crypt_delayed_deinit(ieee, crypt);
- new_crypt = (struct ieee80211_crypt_data *)
- kmalloc(sizeof(*new_crypt), GFP_KERNEL);
+ new_crypt = kmalloc(sizeof(*new_crypt), GFP_KERNEL);
if (new_crypt == NULL) {
ret = -ENOMEM;
goto done;
@@ -3181,7 +3180,7 @@ int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct iw_poin
goto out;
}
- param = (struct ieee_param *)kmalloc(p->length, GFP_KERNEL);
+ param = kmalloc(p->length, GFP_KERNEL);
if (param == NULL){
ret = -ENOMEM;
goto out;
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_wx.c
index 727cc552c5e..2ce5bd543ea 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_wx.c
@@ -352,11 +352,10 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
struct ieee80211_crypt_data *new_crypt;
/* take WEP into use */
- new_crypt = kmalloc(sizeof(struct ieee80211_crypt_data),
+ new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data),
GFP_KERNEL);
if (new_crypt == NULL)
return -ENOMEM;
- memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
new_crypt->ops = ieee80211_get_crypto_ops("WEP");
if (!new_crypt->ops)
new_crypt->ops = ieee80211_get_crypto_ops("WEP");
@@ -768,10 +767,9 @@ int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len)
printk("len: %Zd, ie:%d\n", len, ie[1]);
return -EINVAL;
}
- buf = kmalloc(len, GFP_KERNEL);
+ buf = kmemdup(ie, len, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- memcpy(buf, ie, len);
kfree(ieee->wpa_ie);
ieee->wpa_ie = buf;
ieee->wpa_ie_len = len;
diff --git a/drivers/staging/rtl8192su/ieee80211/rtl819x_Qos.h b/drivers/staging/rtl8192su/ieee80211/rtl819x_Qos.h
index 7aa9a7790b6..d4565ecc7ab 100644
--- a/drivers/staging/rtl8192su/ieee80211/rtl819x_Qos.h
+++ b/drivers/staging/rtl8192su/ieee80211/rtl819x_Qos.h
@@ -1,7 +1,6 @@
#ifndef __INC_QOS_TYPE_H
#define __INC_QOS_TYPE_H
-//#include "EndianFree.h"
#define BIT0 0x00000001
#define BIT1 0x00000002
#define BIT2 0x00000004
diff --git a/drivers/staging/rtl8192su/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192su/ieee80211/rtl819x_TSProc.c
index 38468c53967..de143ecae5f 100644
--- a/drivers/staging/rtl8192su/ieee80211/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192su/ieee80211/rtl819x_TSProc.c
@@ -22,7 +22,6 @@ void TsInactTimeout(unsigned long data)
* return: NULL
* notice:
********************************************************************************************************************/
-#if 1
void RxPktPendingTimeout(unsigned long data)
{
PRX_TS_RECORD pRxTs = (PRX_TS_RECORD)data;
@@ -83,8 +82,6 @@ void RxPktPendingTimeout(unsigned long data)
return;
}
ieee80211_indicate_packets(ieee, stats_IndicateArray, index);
- bPktInBuf = false;
-
}
if(bPktInBuf && (pRxTs->RxTimeoutIndicateSeq==0xffff))
@@ -95,7 +92,6 @@ void RxPktPendingTimeout(unsigned long data)
spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
//PlatformReleaseSpinLock(Adapter, RT_RX_SPINLOCK);
}
-#endif
/********************************************************************************************************************
*function: Add BA timer function
@@ -534,8 +530,8 @@ void RemoveTsEntry(
void RemovePeerTS(struct ieee80211_device* ieee, u8* Addr)
{
PTS_COMMON_INFO pTS, pTmpTS;
+
printk("===========>RemovePeerTS,%pM\n", Addr);
-#if 1
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List)
{
if (memcmp(pTS->Addr, Addr, 6) == 0)
@@ -576,13 +572,12 @@ void RemovePeerTS(struct ieee80211_device* ieee, u8* Addr)
list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
}
}
-#endif
}
void RemoveAllTS(struct ieee80211_device* ieee)
{
PTS_COMMON_INFO pTS, pTmpTS;
-#if 1
+
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List)
{
RemoveTsEntry(ieee, pTS, TX_DIR);
@@ -610,7 +605,6 @@ void RemoveAllTS(struct ieee80211_device* ieee)
list_del_init(&pTS->List);
list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
}
-#endif
}
void TsStartAddBaProcess(struct ieee80211_device* ieee, PTX_TS_RECORD pTxTS)
@@ -618,7 +612,6 @@ void TsStartAddBaProcess(struct ieee80211_device* ieee, PTX_TS_RECORD pTxTS)
if(pTxTS->bAddBaReqInProgress == false)
{
pTxTS->bAddBaReqInProgress = true;
-#if 1
if(pTxTS->bAddBaReqDelayed)
{
IEEE80211_DEBUG(IEEE80211_DL_BA, "TsStartAddBaProcess(): Delayed Start ADDBA after 60 sec!!\n");
@@ -629,7 +622,6 @@ void TsStartAddBaProcess(struct ieee80211_device* ieee, PTX_TS_RECORD pTxTS)
IEEE80211_DEBUG(IEEE80211_DL_BA,"TsStartAddBaProcess(): Immediately Start ADDBA now!!\n");
mod_timer(&pTxTS->TsAddBaTimer, jiffies+10); //set 10 ticks
}
-#endif
}
else
IEEE80211_DEBUG(IEEE80211_DL_ERR, "%s()==>BA timer is already added\n", __FUNCTION__);
diff --git a/drivers/staging/rtl8192su/r8180_93cx6.c b/drivers/staging/rtl8192su/r8180_93cx6.c
deleted file mode 100644
index 8878cfeb0fb..00000000000
--- a/drivers/staging/rtl8192su/r8180_93cx6.c
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- This files contains card eeprom (93c46 or 93c56) programming routines,
- memory is addressed by 16 bits words.
-
- This is part of rtl8180 OpenSource driver.
- Copyright (C) Andrea Merello 2004 <andreamrl@tiscali.it>
- Released under the terms of GPL (General Public Licence)
-
- Parts of this driver are based on the GPL part of the
- official realtek driver.
-
- Parts of this driver are based on the rtl8180 driver skeleton
- from Patric Schenke & Andres Salomon.
-
- Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver.
-
- We want to tanks the Authors of those projects and the Ndiswrapper
- project Authors.
-*/
-
-#include "r8180_93cx6.h"
-
-void eprom_cs(struct net_device *dev, short bit)
-{
- if(bit)
- write_nic_byte_E(dev, EPROM_CMD,
- (1<<EPROM_CS_SHIFT) | \
- read_nic_byte_E(dev, EPROM_CMD)); //enable EPROM
- else
- write_nic_byte_E(dev, EPROM_CMD, read_nic_byte_E(dev, EPROM_CMD)\
- &~(1<<EPROM_CS_SHIFT)); //disable EPROM
-
- force_pci_posting(dev);
- udelay(EPROM_DELAY);
-}
-
-
-void eprom_ck_cycle(struct net_device *dev)
-{
- write_nic_byte_E(dev, EPROM_CMD,
- (1<<EPROM_CK_SHIFT) | read_nic_byte_E(dev,EPROM_CMD));
- force_pci_posting(dev);
- udelay(EPROM_DELAY);
- write_nic_byte_E(dev, EPROM_CMD,
- read_nic_byte_E(dev, EPROM_CMD) &~ (1<<EPROM_CK_SHIFT));
- force_pci_posting(dev);
- udelay(EPROM_DELAY);
-}
-
-
-void eprom_w(struct net_device *dev,short bit)
-{
- if(bit)
- write_nic_byte_E(dev, EPROM_CMD, (1<<EPROM_W_SHIFT) | \
- read_nic_byte_E(dev,EPROM_CMD));
- else
- write_nic_byte_E(dev, EPROM_CMD, read_nic_byte_E(dev,EPROM_CMD)\
- &~(1<<EPROM_W_SHIFT));
-
- force_pci_posting(dev);
- udelay(EPROM_DELAY);
-}
-
-
-short eprom_r(struct net_device *dev)
-{
- short bit;
-
- bit=(read_nic_byte_E(dev, EPROM_CMD) & (1<<EPROM_R_SHIFT) );
- udelay(EPROM_DELAY);
-
- if(bit) return 1;
- return 0;
-}
-
-
-void eprom_send_bits_string(struct net_device *dev, short b[], int len)
-{
- int i;
-
- for(i=0; i<len; i++){
- eprom_w(dev, b[i]);
- eprom_ck_cycle(dev);
- }
-}
-
-
-u32 eprom_read(struct net_device *dev, u32 addr)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- short read_cmd[]={1,1,0};
- short addr_str[8];
- int i;
- int addr_len;
- u32 ret;
-
- ret=0;
- //enable EPROM programming
- write_nic_byte_E(dev, EPROM_CMD,
- (EPROM_CMD_PROGRAM<<EPROM_CMD_OPERATING_MODE_SHIFT));
- force_pci_posting(dev);
- udelay(EPROM_DELAY);
-
- if (priv->epromtype==EPROM_93c56){
- addr_str[7]=addr & 1;
- addr_str[6]=addr & (1<<1);
- addr_str[5]=addr & (1<<2);
- addr_str[4]=addr & (1<<3);
- addr_str[3]=addr & (1<<4);
- addr_str[2]=addr & (1<<5);
- addr_str[1]=addr & (1<<6);
- addr_str[0]=addr & (1<<7);
- addr_len=8;
- }else{
- addr_str[5]=addr & 1;
- addr_str[4]=addr & (1<<1);
- addr_str[3]=addr & (1<<2);
- addr_str[2]=addr & (1<<3);
- addr_str[1]=addr & (1<<4);
- addr_str[0]=addr & (1<<5);
- addr_len=6;
- }
- eprom_cs(dev, 1);
- eprom_ck_cycle(dev);
- eprom_send_bits_string(dev, read_cmd, 3);
- eprom_send_bits_string(dev, addr_str, addr_len);
-
- //keep chip pin D to low state while reading.
- //I'm unsure if it is necessary, but anyway shouldn't hurt
- eprom_w(dev, 0);
-
- for(i=0;i<16;i++){
- //eeprom needs a clk cycle between writing opcode&adr
- //and reading data. (eeprom outs a dummy 0)
- eprom_ck_cycle(dev);
- ret |= (eprom_r(dev)<<(15-i));
- }
-
- eprom_cs(dev, 0);
- eprom_ck_cycle(dev);
-
- //disable EPROM programming
- write_nic_byte_E(dev, EPROM_CMD,
- (EPROM_CMD_NORMAL<<EPROM_CMD_OPERATING_MODE_SHIFT));
- return ret;
-}
diff --git a/drivers/staging/rtl8192su/r8180_93cx6.h b/drivers/staging/rtl8192su/r8180_93cx6.h
deleted file mode 100644
index 0309800255c..00000000000
--- a/drivers/staging/rtl8192su/r8180_93cx6.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- This is part of rtl8187 OpenSource driver
- Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it>
- Released under the terms of GPL (General Public Licence)
-
- Parts of this driver are based on the GPL part of the official realtek driver
- Parts of this driver are based on the rtl8180 driver skeleton from Patric Schenke & Andres Salomon
- Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver
-
- We want to tanks the Authors of such projects and the Ndiswrapper project Authors.
-*/
-
-/*This files contains card eeprom (93c46 or 93c56) programming routines*/
-/*memory is addressed by WORDS*/
-
-#include "r8192U.h"
-#include "r8192S_hw.h"
-
-#define EPROM_DELAY 10
-
-#define EPROM_ANAPARAM_ADDRLWORD 0xd
-#define EPROM_ANAPARAM_ADDRHWORD 0xe
-
-#define EPROM_RFCHIPID 0x6
-#define EPROM_TXPW_BASE 0x05
-#define EPROM_RFCHIPID_RTL8225U 5
-#define EPROM_RF_PARAM 0x4
-#define EPROM_CONFIG2 0xc
-
-#define EPROM_VERSION 0x1E
-#define MAC_ADR 0x7
-
-#define CIS 0x18
-
-#define EPROM_TXPW0 0x16
-#define EPROM_TXPW2 0x1b
-#define EPROM_TXPW1 0x3d
-
-
-u32 eprom_read(struct net_device *dev,u32 addr); //reads a 16 bits word
diff --git a/drivers/staging/rtl8192su/r8192SU_led.c b/drivers/staging/rtl8192su/r8192SU_led.c
new file mode 100644
index 00000000000..609dba67eb4
--- /dev/null
+++ b/drivers/staging/rtl8192su/r8192SU_led.c
@@ -0,0 +1,2347 @@
+/*
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ */
+
+#include "r8192U.h"
+#include "r8192S_hw.h"
+#include "r8192SU_led.h"
+
+#define LED_BLINK_NORMAL_INTERVAL 100
+#define LED_BLINK_SLOWLY_INTERVAL 200
+#define LED_BLINK_LONG_INTERVAL 400
+
+#define LED_BLINK_NO_LINK_INTERVAL_ALPHA 1000
+#define LED_BLINK_LINK_INTERVAL_ALPHA 500
+#define LED_BLINK_SCAN_INTERVAL_ALPHA 180
+#define LED_BLINK_FASTER_INTERVAL_ALPHA 50
+#define LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA 5000
+
+
+
+static void BlinkTimerCallback (unsigned long data);
+
+static void BlinkWorkItemCallback (struct work_struct *work);
+
+void InitLed819xUsb (struct net_device *dev, PLED_819xUsb pLed,
+ LED_PIN_819xUsb LedPin)
+{
+ struct r8192_priv *priv = ieee80211_priv(dev);
+
+ pLed->dev = dev;
+ pLed->LedPin = LedPin;
+ pLed->CurrLedState = LED_OFF;
+ pLed->bLedOn = FALSE;
+
+ pLed->bLedBlinkInProgress = FALSE;
+ pLed->BlinkTimes = 0;
+ pLed->BlinkingLedState = LED_OFF;
+
+ init_timer(&pLed->BlinkTimer);
+ pLed->BlinkTimer.data = (unsigned long)dev;
+ pLed->BlinkTimer.function = BlinkTimerCallback;
+
+ INIT_WORK(&priv->BlinkWorkItem, (void*)BlinkWorkItemCallback);
+ priv->pLed = pLed;
+}
+
+
+void DeInitLed819xUsb (PLED_819xUsb pLed)
+{
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = FALSE;
+}
+
+void SwLedOn (struct net_device *dev, PLED_819xUsb pLed)
+{
+ u8 LedCfg;
+
+ LedCfg = read_nic_byte(dev, LEDCFG);
+ switch (pLed->LedPin) {
+ case LED_PIN_GPIO0:
+ break;
+ case LED_PIN_LED0:
+ write_nic_byte(dev, LEDCFG, LedCfg&0xf0);
+ break;
+ case LED_PIN_LED1:
+ write_nic_byte(dev, LEDCFG, LedCfg&0x0f);
+ break;
+ default:
+ break;
+ }
+ pLed->bLedOn = TRUE;
+}
+
+void SwLedOff (struct net_device *dev, PLED_819xUsb pLed)
+{
+ u8 LedCfg;
+
+ LedCfg = read_nic_byte(dev, LEDCFG);
+ switch (pLed->LedPin) {
+ case LED_PIN_GPIO0:
+ break;
+ case LED_PIN_LED0:
+ LedCfg &= 0xf0;
+ write_nic_byte(dev, LEDCFG, (LedCfg|BIT3));
+ break;
+ case LED_PIN_LED1:
+ LedCfg &= 0x0f;
+ write_nic_byte(dev, LEDCFG, (LedCfg|BIT7));
+ break;
+ default:
+ break;
+ }
+ pLed->bLedOn = FALSE;
+}
+
+
+void
+InitSwLeds(
+ struct net_device *dev
+ )
+{
+ struct r8192_priv *priv = ieee80211_priv(dev);
+
+ InitLed819xUsb(dev, &(priv->SwLed0), LED_PIN_LED0);
+
+ InitLed819xUsb(dev,&(priv->SwLed1), LED_PIN_LED1);
+}
+
+
+void
+DeInitSwLeds(
+ struct net_device *dev
+ )
+{
+ struct r8192_priv *priv = ieee80211_priv(dev);
+
+ DeInitLed819xUsb( &(priv->SwLed0) );
+ DeInitLed819xUsb( &(priv->SwLed1) );
+}
+
+
+void
+SwLedBlink(
+ PLED_819xUsb pLed
+ )
+{
+ struct net_device *dev = (struct net_device *)(pLed->dev);
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ bool bStopBlinking = FALSE;
+
+ if( pLed->BlinkingLedState == LED_ON )
+ {
+ SwLedOn(dev, pLed);
+ RT_TRACE(COMP_LED, "Blinktimes (%d): turn on\n", pLed->BlinkTimes);
+ }
+ else
+ {
+ SwLedOff(dev, pLed);
+ RT_TRACE(COMP_LED, "Blinktimes (%d): turn off\n", pLed->BlinkTimes);
+ }
+
+ pLed->BlinkTimes--;
+ switch(pLed->CurrLedState)
+ {
+
+ case LED_BLINK_NORMAL:
+ if(pLed->BlinkTimes == 0)
+ {
+ bStopBlinking = TRUE;
+ }
+ break;
+
+ case LED_BLINK_StartToBlink:
+ if( (priv->ieee80211->state == IEEE80211_LINKED) && (priv->ieee80211->iw_mode == IW_MODE_INFRA))
+ {
+ bStopBlinking = TRUE;
+ }
+ else if((priv->ieee80211->state == IEEE80211_LINKED) && (priv->ieee80211->iw_mode == IW_MODE_ADHOC))
+ {
+ bStopBlinking = TRUE;
+ }
+ else if(pLed->BlinkTimes == 0)
+ {
+ bStopBlinking = TRUE;
+ }
+ break;
+
+ case LED_BLINK_WPS:
+ if( pLed->BlinkTimes == 0 )
+ {
+ bStopBlinking = TRUE;
+ }
+ break;
+
+
+ default:
+ bStopBlinking = TRUE;
+ break;
+
+ }
+
+ if(bStopBlinking)
+ {
+ if( priv->ieee80211->eRFPowerState != eRfOn )
+ {
+ SwLedOff(dev, pLed);
+ }
+ else if( (priv->ieee80211->state == IEEE80211_LINKED) && (pLed->bLedOn == false))
+ {
+ SwLedOn(dev, pLed);
+ }
+ else if( (priv->ieee80211->state != IEEE80211_LINKED) && pLed->bLedOn == true)
+ {
+ SwLedOff(dev, pLed);
+ }
+
+ pLed->BlinkTimes = 0;
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+ else
+ {
+ if( pLed->BlinkingLedState == LED_ON )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+
+ switch( pLed->CurrLedState )
+ {
+ case LED_BLINK_NORMAL:
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_NORMAL_INTERVAL));
+ break;
+
+ case LED_BLINK_SLOWLY:
+ case LED_BLINK_StartToBlink:
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_SLOWLY_INTERVAL));
+ break;
+
+ case LED_BLINK_WPS:
+ {
+ if( pLed->BlinkingLedState == LED_ON )
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_LONG_INTERVAL));
+ else
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_LONG_INTERVAL));
+ }
+ break;
+
+ default:
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_SLOWLY_INTERVAL));
+ break;
+ }
+ }
+}
+
+
+void
+SwLedBlink1(
+ PLED_819xUsb pLed
+ )
+{
+ struct net_device *dev = (struct net_device *)(pLed->dev);
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ PLED_819xUsb pLed1 = &(priv->SwLed1);
+ bool bStopBlinking = FALSE;
+
+ if(priv->CustomerID == RT_CID_819x_CAMEO)
+ pLed = &(priv->SwLed1);
+
+ if( pLed->BlinkingLedState == LED_ON )
+ {
+ SwLedOn(dev, pLed);
+ RT_TRACE(COMP_LED, "Blinktimes (%d): turn on\n", pLed->BlinkTimes);
+ }
+ else
+ {
+ SwLedOff(dev, pLed);
+ RT_TRACE(COMP_LED, "Blinktimes (%d): turn off\n", pLed->BlinkTimes);
+ }
+
+
+ if(priv->CustomerID == RT_CID_DEFAULT)
+ {
+ if(priv->ieee80211->state == IEEE80211_LINKED)
+ {
+ if(!pLed1->bSWLedCtrl)
+ {
+ SwLedOn(dev, pLed1);
+ pLed1->bSWLedCtrl = TRUE;
+ }
+ else if(!pLed1->bLedOn)
+ SwLedOn(dev, pLed1);
+ RT_TRACE(COMP_LED, "Blinktimes (): turn on pLed1\n");
+ }
+ else
+ {
+ if(!pLed1->bSWLedCtrl)
+ {
+ SwLedOff(dev, pLed1);
+ pLed1->bSWLedCtrl = TRUE;
+ }
+ else if(pLed1->bLedOn)
+ SwLedOff(dev, pLed1);
+ RT_TRACE(COMP_LED, "Blinktimes (): turn off pLed1\n");
+ }
+ }
+
+ switch(pLed->CurrLedState)
+ {
+ case LED_BLINK_SLOWLY:
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
+ break;
+
+ case LED_BLINK_NORMAL:
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_LINK_INTERVAL_ALPHA));
+ break;
+
+ case LED_SCAN_BLINK:
+ pLed->BlinkTimes--;
+ if( pLed->BlinkTimes == 0 )
+ {
+ bStopBlinking = TRUE;
+ }
+
+ if(bStopBlinking)
+ {
+ if( priv->ieee80211->eRFPowerState != eRfOn )
+ {
+ SwLedOff(dev, pLed);
+ }
+ else if(priv->ieee80211->state == IEEE80211_LINKED)
+ {
+ pLed->bLedLinkBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_BLINK_NORMAL;
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_LINK_INTERVAL_ALPHA));
+ RT_TRACE(COMP_LED, "CurrLedState %d\n", pLed->CurrLedState);
+
+ }
+ else if(priv->ieee80211->state != IEEE80211_LINKED)
+ {
+ pLed->bLedNoLinkBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
+ RT_TRACE(COMP_LED, "CurrLedState %d\n", pLed->CurrLedState);
+ }
+ pLed->bLedScanBlinkInProgress = FALSE;
+ }
+ else
+ {
+ if( priv->ieee80211->eRFPowerState != eRfOn )
+ {
+ SwLedOff(dev, pLed);
+ }
+ else
+ {
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_SCAN_INTERVAL_ALPHA));
+ }
+ }
+ break;
+
+ case LED_TXRX_BLINK:
+ pLed->BlinkTimes--;
+ if( pLed->BlinkTimes == 0 )
+ {
+ bStopBlinking = TRUE;
+ }
+ if(bStopBlinking)
+ {
+ if( priv->ieee80211->eRFPowerState != eRfOn )
+ {
+ SwLedOff(dev, pLed);
+ }
+ else if(priv->ieee80211->state == IEEE80211_LINKED)
+ {
+ pLed->bLedLinkBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_BLINK_NORMAL;
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_LINK_INTERVAL_ALPHA));
+ RT_TRACE(COMP_LED, "CurrLedState %d\n", pLed->CurrLedState);
+ }
+ else if(priv->ieee80211->state != IEEE80211_LINKED)
+ {
+ pLed->bLedNoLinkBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
+ RT_TRACE(COMP_LED, "CurrLedState %d\n", pLed->CurrLedState);
+ }
+ pLed->BlinkTimes = 0;
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+ else
+ {
+ if( priv->ieee80211->eRFPowerState != eRfOn )
+ {
+ SwLedOff(dev, pLed);
+ }
+ else
+ {
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_FASTER_INTERVAL_ALPHA));
+ }
+ }
+ break;
+
+ case LED_BLINK_WPS:
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_SCAN_INTERVAL_ALPHA));
+ break;
+
+ case LED_BLINK_WPS_STOP:
+ if(pLed->BlinkingLedState == LED_ON)
+ {
+ pLed->BlinkingLedState = LED_OFF;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA));
+ bStopBlinking = FALSE;
+ }
+ else
+ {
+ bStopBlinking = TRUE;
+ }
+
+ if(bStopBlinking)
+ {
+ if( priv->ieee80211->eRFPowerState != eRfOn )
+ {
+ SwLedOff(dev, pLed);
+ }
+ else
+ {
+ pLed->bLedLinkBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_BLINK_NORMAL;
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_LINK_INTERVAL_ALPHA));
+ RT_TRACE(COMP_LED, "CurrLedState %d\n", pLed->CurrLedState);
+ }
+ pLed->bLedWPSBlinkInProgress = FALSE;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+}
+
+void
+SwLedBlink2(
+ PLED_819xUsb pLed
+ )
+{
+ struct net_device *dev = (struct net_device *)(pLed->dev);
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ bool bStopBlinking = FALSE;
+
+ if( pLed->BlinkingLedState == LED_ON)
+ {
+ SwLedOn(dev, pLed);
+ RT_TRACE(COMP_LED, "Blinktimes (%d): turn on\n", pLed->BlinkTimes);
+ }
+ else
+ {
+ SwLedOff(dev, pLed);
+ RT_TRACE(COMP_LED, "Blinktimes (%d): turn off\n", pLed->BlinkTimes);
+ }
+
+ switch(pLed->CurrLedState)
+ {
+ case LED_SCAN_BLINK:
+ pLed->BlinkTimes--;
+ if( pLed->BlinkTimes == 0 )
+ {
+ bStopBlinking = TRUE;
+ }
+
+ if(bStopBlinking)
+ {
+ if( priv->ieee80211->eRFPowerState != eRfOn )
+ {
+ SwLedOff(dev, pLed);
+ RT_TRACE(COMP_LED, "eRFPowerState %d\n", priv->ieee80211->eRFPowerState);
+ }
+ else if(priv->ieee80211->state == IEEE80211_LINKED)
+ {
+ pLed->CurrLedState = LED_ON;
+ pLed->BlinkingLedState = LED_ON;
+ SwLedOn(dev, pLed);
+ RT_TRACE(COMP_LED, "stop scan blink CurrLedState %d\n", pLed->CurrLedState);
+
+ }
+ else if(priv->ieee80211->state != IEEE80211_LINKED)
+ {
+ pLed->CurrLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_OFF;
+ SwLedOff(dev, pLed);
+ RT_TRACE(COMP_LED, "stop scan blink CurrLedState %d\n", pLed->CurrLedState);
+ }
+ pLed->bLedScanBlinkInProgress = FALSE;
+ }
+ else
+ {
+ if( priv->ieee80211->eRFPowerState != eRfOn )
+ {
+ SwLedOff(dev, pLed);
+ }
+ else
+ {
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_SCAN_INTERVAL_ALPHA));
+ }
+ }
+ break;
+
+ case LED_TXRX_BLINK:
+ pLed->BlinkTimes--;
+ if( pLed->BlinkTimes == 0 )
+ {
+ bStopBlinking = TRUE;
+ }
+ if(bStopBlinking)
+ {
+ if( priv->ieee80211->eRFPowerState != eRfOn )
+ {
+ SwLedOff(dev, pLed);
+ }
+ else if(priv->ieee80211->state == IEEE80211_LINKED)
+ {
+ pLed->CurrLedState = LED_ON;
+ pLed->BlinkingLedState = LED_ON;
+ SwLedOn(dev, pLed);
+ RT_TRACE(COMP_LED, "stop CurrLedState %d\n", pLed->CurrLedState);
+
+ }
+ else if(priv->ieee80211->state != IEEE80211_LINKED)
+ {
+ pLed->CurrLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_OFF;
+ SwLedOff(dev, pLed);
+ RT_TRACE(COMP_LED, "stop CurrLedState %d\n", pLed->CurrLedState);
+ }
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+ else
+ {
+ if( priv->ieee80211->eRFPowerState != eRfOn )
+ {
+ SwLedOff(dev, pLed);
+ }
+ else
+ {
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_FASTER_INTERVAL_ALPHA));
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+}
+
+void
+SwLedBlink3(
+ PLED_819xUsb pLed
+ )
+{
+ struct net_device *dev = (struct net_device *)(pLed->dev);
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ bool bStopBlinking = FALSE;
+
+ if( pLed->BlinkingLedState == LED_ON )
+ {
+ SwLedOn(dev, pLed);
+ RT_TRACE(COMP_LED, "Blinktimes (%d): turn on\n", pLed->BlinkTimes);
+ }
+ else
+ {
+ if(pLed->CurrLedState != LED_BLINK_WPS_STOP)
+ SwLedOff(dev, pLed);
+ RT_TRACE(COMP_LED, "Blinktimes (%d): turn off\n", pLed->BlinkTimes);
+ }
+
+ switch(pLed->CurrLedState)
+ {
+ case LED_SCAN_BLINK:
+ pLed->BlinkTimes--;
+ if( pLed->BlinkTimes == 0 )
+ {
+ bStopBlinking = TRUE;
+ }
+
+ if(bStopBlinking)
+ {
+ if( priv->ieee80211->eRFPowerState != eRfOn )
+ {
+ SwLedOff(dev, pLed);
+ }
+ else if(priv->ieee80211->state == IEEE80211_LINKED)
+ {
+ pLed->CurrLedState = LED_ON;
+ pLed->BlinkingLedState = LED_ON;
+ if( !pLed->bLedOn )
+ SwLedOn(dev, pLed);
+
+ RT_TRACE(COMP_LED, "CurrLedState %d\n", pLed->CurrLedState);
+ }
+ else if(priv->ieee80211->state != IEEE80211_LINKED)
+ {
+ pLed->CurrLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_OFF;
+ if( pLed->bLedOn )
+ SwLedOff(dev, pLed);
+
+ RT_TRACE(COMP_LED, "CurrLedState %d\n", pLed->CurrLedState);
+ }
+ pLed->bLedScanBlinkInProgress = FALSE;
+ }
+ else
+ {
+ if( priv->ieee80211->eRFPowerState != eRfOn )
+ {
+ SwLedOff(dev, pLed);
+ }
+ else
+ {
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_SCAN_INTERVAL_ALPHA));
+ }
+ }
+ break;
+
+ case LED_TXRX_BLINK:
+ pLed->BlinkTimes--;
+ if( pLed->BlinkTimes == 0 )
+ {
+ bStopBlinking = TRUE;
+ }
+ if(bStopBlinking)
+ {
+ if( priv->ieee80211->eRFPowerState != eRfOn )
+ {
+ SwLedOff(dev, pLed);
+ }
+ else if(priv->ieee80211->state == IEEE80211_LINKED)
+ {
+ pLed->CurrLedState = LED_ON;
+ pLed->BlinkingLedState = LED_ON;
+
+ if( !pLed->bLedOn )
+ SwLedOn(dev, pLed);
+
+ RT_TRACE(COMP_LED, "CurrLedState %d\n", pLed->CurrLedState);
+ }
+ else if(priv->ieee80211->state != IEEE80211_LINKED)
+ {
+ pLed->CurrLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_OFF;
+
+ if( pLed->bLedOn )
+ SwLedOff(dev, pLed);
+
+
+ RT_TRACE(COMP_LED, "CurrLedState %d\n", pLed->CurrLedState);
+ }
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+ else
+ {
+ if( priv->ieee80211->eRFPowerState != eRfOn )
+ {
+ SwLedOff(dev, pLed);
+ }
+ else
+ {
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_FASTER_INTERVAL_ALPHA));
+ }
+ }
+ break;
+
+ case LED_BLINK_WPS:
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_SCAN_INTERVAL_ALPHA));
+ break;
+
+ case LED_BLINK_WPS_STOP:
+ if(pLed->BlinkingLedState == LED_ON)
+ {
+ pLed->BlinkingLedState = LED_OFF;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA));
+ bStopBlinking = FALSE;
+ }
+ else
+ {
+ bStopBlinking = TRUE;
+ }
+
+ if(bStopBlinking)
+ {
+ if( priv->ieee80211->eRFPowerState != eRfOn )
+ {
+ SwLedOff(dev, pLed);
+ }
+ else
+ {
+ pLed->CurrLedState = LED_ON;
+ pLed->BlinkingLedState = LED_ON;
+ SwLedOn(dev, pLed);
+ RT_TRACE(COMP_LED, "CurrLedState %d\n", pLed->CurrLedState);
+ }
+ pLed->bLedWPSBlinkInProgress = FALSE;
+ }
+ break;
+
+
+ default:
+ break;
+ }
+
+}
+
+
+void
+SwLedBlink4(
+ PLED_819xUsb pLed
+ )
+{
+ struct net_device *dev = (struct net_device *)(pLed->dev);
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ PLED_819xUsb pLed1 = &(priv->SwLed1);
+ bool bStopBlinking = FALSE;
+
+ if( pLed->BlinkingLedState == LED_ON )
+ {
+ SwLedOn(dev, pLed);
+ RT_TRACE(COMP_LED, "Blinktimes (%d): turn on\n", pLed->BlinkTimes);
+ }
+ else
+ {
+ SwLedOff(dev, pLed);
+ RT_TRACE(COMP_LED, "Blinktimes (%d): turn off\n", pLed->BlinkTimes);
+ }
+
+ if(!pLed1->bLedWPSBlinkInProgress && pLed1->BlinkingLedState == LED_UNKNOWN)
+ {
+ pLed1->BlinkingLedState = LED_OFF;
+ pLed1->CurrLedState = LED_OFF;
+ SwLedOff(dev, pLed1);
+ }
+
+ switch(pLed->CurrLedState)
+ {
+ case LED_BLINK_SLOWLY:
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
+ break;
+
+ case LED_BLINK_StartToBlink:
+ if( pLed->bLedOn )
+ {
+ pLed->BlinkingLedState = LED_OFF;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_SLOWLY_INTERVAL));
+ }
+ else
+ {
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_NORMAL_INTERVAL));
+ }
+ break;
+
+ case LED_SCAN_BLINK:
+ pLed->BlinkTimes--;
+ if( pLed->BlinkTimes == 0 )
+ {
+ bStopBlinking = TRUE;
+ }
+
+ if(bStopBlinking)
+ {
+ if( priv->ieee80211->eRFPowerState != eRfOn && priv->ieee80211->RfOffReason > RF_CHANGE_BY_PS)
+ {
+ SwLedOff(dev, pLed);
+ }
+ else
+ {
+ pLed->bLedNoLinkBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
+ }
+ pLed->bLedScanBlinkInProgress = FALSE;
+ }
+ else
+ {
+ if( priv->ieee80211->eRFPowerState != eRfOn && priv->ieee80211->RfOffReason > RF_CHANGE_BY_PS)
+ {
+ SwLedOff(dev, pLed);
+ }
+ else
+ {
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_SCAN_INTERVAL_ALPHA));
+ }
+ }
+ break;
+
+ case LED_TXRX_BLINK:
+ pLed->BlinkTimes--;
+ if( pLed->BlinkTimes == 0 )
+ {
+ bStopBlinking = TRUE;
+ }
+ if(bStopBlinking)
+ {
+ if( priv->ieee80211->eRFPowerState != eRfOn && priv->ieee80211->RfOffReason > RF_CHANGE_BY_PS)
+ {
+ SwLedOff(dev, pLed);
+ }
+ else
+ {
+ pLed->bLedNoLinkBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
+ }
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+ else
+ {
+ if( priv->ieee80211->eRFPowerState != eRfOn && priv->ieee80211->RfOffReason > RF_CHANGE_BY_PS)
+ {
+ SwLedOff(dev, pLed);
+ }
+ else
+ {
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_FASTER_INTERVAL_ALPHA));
+ }
+ }
+ break;
+
+ case LED_BLINK_WPS:
+ if( pLed->bLedOn )
+ {
+ pLed->BlinkingLedState = LED_OFF;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_SLOWLY_INTERVAL));
+ }
+ else
+ {
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_NORMAL_INTERVAL));
+ }
+ break;
+
+ case LED_BLINK_WPS_STOP:
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_NORMAL_INTERVAL));
+ break;
+
+ case LED_BLINK_WPS_STOP_OVERLAP:
+ pLed->BlinkTimes--;
+ if(pLed->BlinkTimes == 0)
+ {
+ if(pLed->bLedOn)
+ {
+ pLed->BlinkTimes = 1;
+ }
+ else
+ {
+ bStopBlinking = TRUE;
+ }
+ }
+
+ if(bStopBlinking)
+ {
+ pLed->BlinkTimes = 10;
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_LINK_INTERVAL_ALPHA));
+ }
+ else
+ {
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_NORMAL_INTERVAL));
+ }
+ break;
+
+
+ default:
+ break;
+ }
+
+ RT_TRACE(COMP_LED, "SwLedBlink4 CurrLedState %d\n", pLed->CurrLedState);
+
+
+}
+
+void
+SwLedBlink5(
+ PLED_819xUsb pLed
+ )
+{
+ struct net_device *dev = (struct net_device *)(pLed->dev);
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ bool bStopBlinking = FALSE;
+
+ if( pLed->BlinkingLedState == LED_ON )
+ {
+ SwLedOn(dev, pLed);
+ RT_TRACE(COMP_LED, "Blinktimes (%d): turn on\n", pLed->BlinkTimes);
+ }
+ else
+ {
+ SwLedOff(dev, pLed);
+ RT_TRACE(COMP_LED, "Blinktimes (%d): turn off\n", pLed->BlinkTimes);
+ }
+
+ switch(pLed->CurrLedState)
+ {
+ case LED_SCAN_BLINK:
+ pLed->BlinkTimes--;
+ if( pLed->BlinkTimes == 0 )
+ {
+ bStopBlinking = TRUE;
+ }
+
+ if(bStopBlinking)
+ {
+ if( priv->ieee80211->eRFPowerState != eRfOn && priv->ieee80211->RfOffReason > RF_CHANGE_BY_PS)
+ {
+ pLed->CurrLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_OFF;
+ if(pLed->bLedOn)
+ SwLedOff(dev, pLed);
+ }
+ else
+ { pLed->CurrLedState = LED_ON;
+ pLed->BlinkingLedState = LED_ON;
+ if(!pLed->bLedOn)
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_FASTER_INTERVAL_ALPHA));
+ }
+
+ pLed->bLedScanBlinkInProgress = FALSE;
+ }
+ else
+ {
+ if( priv->ieee80211->eRFPowerState != eRfOn && priv->ieee80211->RfOffReason > RF_CHANGE_BY_PS)
+ {
+ SwLedOff(dev, pLed);
+ }
+ else
+ {
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_SCAN_INTERVAL_ALPHA));
+ }
+ }
+ break;
+
+
+ case LED_TXRX_BLINK:
+ pLed->BlinkTimes--;
+ if( pLed->BlinkTimes == 0 )
+ {
+ bStopBlinking = TRUE;
+ }
+
+ if(bStopBlinking)
+ {
+ if( priv->ieee80211->eRFPowerState != eRfOn && priv->ieee80211->RfOffReason > RF_CHANGE_BY_PS)
+ {
+ pLed->CurrLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_OFF;
+ if(pLed->bLedOn)
+ SwLedOff(dev, pLed);
+ }
+ else
+ {
+ pLed->CurrLedState = LED_ON;
+ pLed->BlinkingLedState = LED_ON;
+ if(!pLed->bLedOn)
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_FASTER_INTERVAL_ALPHA));
+ }
+
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+ else
+ {
+ if( priv->ieee80211->eRFPowerState != eRfOn && priv->ieee80211->RfOffReason > RF_CHANGE_BY_PS)
+ {
+ SwLedOff(dev, pLed);
+ }
+ else
+ {
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_FASTER_INTERVAL_ALPHA));
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ RT_TRACE(COMP_LED, "SwLedBlink5 CurrLedState %d\n", pLed->CurrLedState);
+
+
+}
+
+
+void
+BlinkTimerCallback(
+ unsigned long data
+ )
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct r8192_priv *priv = ieee80211_priv(dev);
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
+ schedule_work(&(priv->BlinkWorkItem));
+#endif
+}
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+void BlinkWorkItemCallback(struct work_struct *work)
+{
+ struct r8192_priv *priv = container_of(work, struct r8192_priv, BlinkWorkItem);
+#else
+void BlinkWorkItemCallback(void * Context)
+{
+ struct net_device *dev = (struct net_device *)Context;
+ struct r8192_priv *priv = ieee80211_priv(dev);
+#endif
+
+ PLED_819xUsb pLed = priv->pLed;
+
+ switch(priv->LedStrategy)
+ {
+ case SW_LED_MODE0:
+ SwLedBlink(pLed);
+ break;
+
+ case SW_LED_MODE1:
+ SwLedBlink1(pLed);
+ break;
+
+ case SW_LED_MODE2:
+ SwLedBlink2(pLed);
+ break;
+
+ case SW_LED_MODE3:
+ SwLedBlink3(pLed);
+ break;
+
+ case SW_LED_MODE4:
+ SwLedBlink4(pLed);
+ break;
+
+ case SW_LED_MODE5:
+ SwLedBlink5(pLed);
+ break;
+
+ default:
+ SwLedBlink(pLed);
+ break;
+ }
+}
+
+
+
+
+void
+SwLedControlMode0(
+ struct net_device *dev,
+ LED_CTL_MODE LedAction
+)
+{
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ PLED_819xUsb pLed = &(priv->SwLed1);
+
+ switch(LedAction)
+ {
+ case LED_CTL_TX:
+ case LED_CTL_RX:
+ if( pLed->bLedBlinkInProgress == FALSE )
+ {
+ pLed->bLedBlinkInProgress = TRUE;
+
+ pLed->CurrLedState = LED_BLINK_NORMAL;
+ pLed->BlinkTimes = 2;
+
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_NORMAL_INTERVAL));
+ }
+ break;
+
+ case LED_CTL_START_TO_LINK:
+ if( pLed->bLedBlinkInProgress == FALSE )
+ {
+ pLed->bLedBlinkInProgress = TRUE;
+
+ pLed->CurrLedState = LED_BLINK_StartToBlink;
+ pLed->BlinkTimes = 24;
+
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_SLOWLY_INTERVAL));
+ }
+ else
+ {
+ pLed->CurrLedState = LED_BLINK_StartToBlink;
+ }
+ break;
+
+ case LED_CTL_LINK:
+ pLed->CurrLedState = LED_ON;
+ if( pLed->bLedBlinkInProgress == FALSE )
+ {
+ SwLedOn(dev, pLed);
+ }
+ break;
+
+ case LED_CTL_NO_LINK:
+ pLed->CurrLedState = LED_OFF;
+ if( pLed->bLedBlinkInProgress == FALSE )
+ {
+ SwLedOff(dev, pLed);
+ }
+ break;
+
+ case LED_CTL_POWER_OFF:
+ pLed->CurrLedState = LED_OFF;
+ if(pLed->bLedBlinkInProgress)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+ SwLedOff(dev, pLed);
+ break;
+
+ case LED_CTL_START_WPS:
+ if( pLed->bLedBlinkInProgress == FALSE || pLed->CurrLedState == LED_ON)
+ {
+ pLed->bLedBlinkInProgress = TRUE;
+
+ pLed->CurrLedState = LED_BLINK_WPS;
+ pLed->BlinkTimes = 20;
+
+ if( pLed->bLedOn )
+ {
+ pLed->BlinkingLedState = LED_OFF;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_LONG_INTERVAL));
+ }
+ else
+ {
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_LONG_INTERVAL));
+ }
+ }
+ break;
+
+ case LED_CTL_STOP_WPS:
+ if(pLed->bLedBlinkInProgress)
+ {
+ pLed->CurrLedState = LED_OFF;
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+ break;
+
+
+ default:
+ break;
+ }
+
+ RT_TRACE(COMP_LED, "Led %d\n", pLed->CurrLedState);
+
+}
+
+void
+SwLedControlMode1(
+ struct net_device *dev,
+ LED_CTL_MODE LedAction
+)
+{
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ PLED_819xUsb pLed = &(priv->SwLed0);
+
+ if(priv->CustomerID == RT_CID_819x_CAMEO)
+ pLed = &(priv->SwLed1);
+
+ switch(LedAction)
+ {
+ case LED_CTL_START_TO_LINK:
+ case LED_CTL_NO_LINK:
+ if( pLed->bLedNoLinkBlinkInProgress == FALSE )
+ {
+ if(pLed->CurrLedState == LED_SCAN_BLINK || IS_LED_WPS_BLINKING(pLed))
+ {
+ return;
+ }
+ if( pLed->bLedLinkBlinkInProgress == TRUE )
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedLinkBlinkInProgress = FALSE;
+ }
+ if(pLed->bLedBlinkInProgress ==TRUE)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+
+ pLed->bLedNoLinkBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
+ }
+ break;
+
+ case LED_CTL_LINK:
+ if( pLed->bLedLinkBlinkInProgress == FALSE )
+ {
+ if(pLed->CurrLedState == LED_SCAN_BLINK || IS_LED_WPS_BLINKING(pLed))
+ {
+ return;
+ }
+ if(pLed->bLedNoLinkBlinkInProgress == TRUE)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = FALSE;
+ }
+ if(pLed->bLedBlinkInProgress ==TRUE)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+ pLed->bLedLinkBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_BLINK_NORMAL;
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_LINK_INTERVAL_ALPHA));
+ }
+ break;
+
+ case LED_CTL_SITE_SURVEY:
+ if((priv->ieee80211->LinkDetectInfo.bBusyTraffic) && (priv->ieee80211->state == IEEE80211_LINKED))
+ ;
+ else if(pLed->bLedScanBlinkInProgress ==FALSE)
+ {
+ if(IS_LED_WPS_BLINKING(pLed))
+ return;
+
+ if(pLed->bLedNoLinkBlinkInProgress == TRUE)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = FALSE;
+ }
+ if( pLed->bLedLinkBlinkInProgress == TRUE )
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedLinkBlinkInProgress = FALSE;
+ }
+ if(pLed->bLedBlinkInProgress ==TRUE)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+ pLed->bLedScanBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_SCAN_BLINK;
+ pLed->BlinkTimes = 24;
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_SCAN_INTERVAL_ALPHA));
+
+ }
+ break;
+
+ case LED_CTL_TX:
+ case LED_CTL_RX:
+ if(pLed->bLedBlinkInProgress ==FALSE)
+ {
+ if(pLed->CurrLedState == LED_SCAN_BLINK || IS_LED_WPS_BLINKING(pLed))
+ {
+ }
+ if(pLed->bLedNoLinkBlinkInProgress == TRUE)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = FALSE;
+ }
+ if( pLed->bLedLinkBlinkInProgress == TRUE )
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedLinkBlinkInProgress = FALSE;
+ }
+ pLed->bLedBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_TXRX_BLINK;
+ pLed->BlinkTimes = 2;
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_FASTER_INTERVAL_ALPHA));
+ }
+ break;
+
+ case LED_CTL_START_WPS:
+ case LED_CTL_START_WPS_BOTTON:
+ if(pLed->bLedWPSBlinkInProgress ==FALSE)
+ {
+ if(pLed->bLedNoLinkBlinkInProgress == TRUE)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = FALSE;
+ }
+ if( pLed->bLedLinkBlinkInProgress == TRUE )
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedLinkBlinkInProgress = FALSE;
+ }
+ if(pLed->bLedBlinkInProgress ==TRUE)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+ if(pLed->bLedScanBlinkInProgress ==TRUE)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = FALSE;
+ }
+ pLed->bLedWPSBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_BLINK_WPS;
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_SCAN_INTERVAL_ALPHA));
+
+ }
+ break;
+
+
+ case LED_CTL_STOP_WPS:
+ if(pLed->bLedNoLinkBlinkInProgress == TRUE)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = FALSE;
+ }
+ if( pLed->bLedLinkBlinkInProgress == TRUE )
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedLinkBlinkInProgress = FALSE;
+ }
+ if(pLed->bLedBlinkInProgress ==TRUE)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+ if(pLed->bLedScanBlinkInProgress ==TRUE)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = FALSE;
+ }
+ if(pLed->bLedWPSBlinkInProgress)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ }
+ else
+ {
+ pLed->bLedWPSBlinkInProgress = TRUE;
+ }
+
+ pLed->CurrLedState = LED_BLINK_WPS_STOP;
+ if(pLed->bLedOn)
+ {
+ pLed->BlinkingLedState = LED_OFF;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA));
+ }
+ else
+ {
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), 0);
+ }
+ break;
+
+ case LED_CTL_STOP_WPS_FAIL:
+ if(pLed->bLedWPSBlinkInProgress)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = FALSE;
+ }
+
+ pLed->bLedNoLinkBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
+ break;
+
+ case LED_CTL_POWER_OFF:
+ pLed->CurrLedState = LED_OFF;
+ if( pLed->bLedNoLinkBlinkInProgress)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = FALSE;
+ }
+ if( pLed->bLedLinkBlinkInProgress)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedLinkBlinkInProgress = FALSE;
+ }
+ if( pLed->bLedBlinkInProgress)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+ if( pLed->bLedWPSBlinkInProgress )
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = FALSE;
+ }
+ if( pLed->bLedScanBlinkInProgress)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = FALSE;
+ }
+
+ SwLedOff(dev, pLed);
+ break;
+
+ default:
+ break;
+
+ }
+
+ RT_TRACE(COMP_LED, "Led %d\n", pLed->CurrLedState);
+}
+
+void
+SwLedControlMode2(
+ struct net_device *dev,
+ LED_CTL_MODE LedAction
+)
+{
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ PLED_819xUsb pLed = &(priv->SwLed0);
+
+ switch(LedAction)
+ {
+ case LED_CTL_SITE_SURVEY:
+ if(priv->ieee80211->LinkDetectInfo.bBusyTraffic)
+ ;
+ else if(pLed->bLedScanBlinkInProgress ==FALSE)
+ {
+ if(IS_LED_WPS_BLINKING(pLed))
+ return;
+
+ if(pLed->bLedBlinkInProgress ==TRUE)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+ pLed->bLedScanBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_SCAN_BLINK;
+ pLed->BlinkTimes = 24;
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_SCAN_INTERVAL_ALPHA));
+
+ }
+ break;
+
+ case LED_CTL_TX:
+ case LED_CTL_RX:
+ if((pLed->bLedBlinkInProgress ==FALSE) && (priv->ieee80211->state == IEEE80211_LINKED))
+ {
+ if(pLed->CurrLedState == LED_SCAN_BLINK || IS_LED_WPS_BLINKING(pLed))
+ {
+ return;
+ }
+
+ pLed->bLedBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_TXRX_BLINK;
+ pLed->BlinkTimes = 2;
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_FASTER_INTERVAL_ALPHA));
+ }
+ break;
+
+ case LED_CTL_LINK:
+ pLed->CurrLedState = LED_ON;
+ pLed->BlinkingLedState = LED_ON;
+ if( pLed->bLedBlinkInProgress)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+ if( pLed->bLedScanBlinkInProgress)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = FALSE;
+ }
+
+ mod_timer(&(pLed->BlinkTimer), 0);
+ break;
+
+ case LED_CTL_START_WPS:
+ case LED_CTL_START_WPS_BOTTON:
+ if(pLed->bLedWPSBlinkInProgress ==FALSE)
+ {
+ if(pLed->bLedBlinkInProgress ==TRUE)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+ if(pLed->bLedScanBlinkInProgress ==TRUE)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = FALSE;
+ }
+ pLed->bLedWPSBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_ON;
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), 0);
+ }
+ break;
+
+ case LED_CTL_STOP_WPS:
+ pLed->bLedWPSBlinkInProgress = FALSE;
+ if( priv->ieee80211->eRFPowerState != eRfOn )
+ {
+ SwLedOff(dev, pLed);
+ }
+ else
+ {
+ pLed->CurrLedState = LED_ON;
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), 0);
+ RT_TRACE(COMP_LED, "CurrLedState %d\n", pLed->CurrLedState);
+ }
+ break;
+
+ case LED_CTL_STOP_WPS_FAIL:
+ pLed->bLedWPSBlinkInProgress = FALSE;
+ if( priv->ieee80211->eRFPowerState != eRfOn )
+ {
+ SwLedOff(dev, pLed);
+ }
+ else
+ {
+ pLed->CurrLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_OFF;
+ mod_timer(&(pLed->BlinkTimer), 0);
+ RT_TRACE(COMP_LED, "CurrLedState %d\n", pLed->CurrLedState);
+ }
+ break;
+
+ case LED_CTL_START_TO_LINK:
+ case LED_CTL_NO_LINK:
+ if(!IS_LED_BLINKING(pLed))
+ {
+ pLed->CurrLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_OFF;
+ mod_timer(&(pLed->BlinkTimer), 0);
+ }
+ break;
+
+ case LED_CTL_POWER_OFF:
+ pLed->CurrLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_OFF;
+ if( pLed->bLedBlinkInProgress)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+ if( pLed->bLedScanBlinkInProgress)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = FALSE;
+ }
+ if( pLed->bLedWPSBlinkInProgress )
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = FALSE;
+ }
+
+ mod_timer(&(pLed->BlinkTimer), 0);
+ break;
+
+ default:
+ break;
+
+ }
+
+ RT_TRACE(COMP_LED, "CurrLedState %d\n", pLed->CurrLedState);
+}
+
+ void
+ SwLedControlMode3(
+ struct net_device *dev,
+ LED_CTL_MODE LedAction
+)
+{
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ PLED_819xUsb pLed = &(priv->SwLed0);
+
+ switch(LedAction)
+ {
+ case LED_CTL_SITE_SURVEY:
+ if(priv->ieee80211->LinkDetectInfo.bBusyTraffic)
+ ;
+ else if(pLed->bLedScanBlinkInProgress ==FALSE)
+ {
+ if(IS_LED_WPS_BLINKING(pLed))
+ return;
+
+ if(pLed->bLedBlinkInProgress ==TRUE)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+ pLed->bLedScanBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_SCAN_BLINK;
+ pLed->BlinkTimes = 24;
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_SCAN_INTERVAL_ALPHA));
+
+ }
+ break;
+
+ case LED_CTL_TX:
+ case LED_CTL_RX:
+ if((pLed->bLedBlinkInProgress ==FALSE) && (priv->ieee80211->state == IEEE80211_LINKED))
+ {
+ if(pLed->CurrLedState == LED_SCAN_BLINK || IS_LED_WPS_BLINKING(pLed))
+ {
+ return;
+ }
+
+ pLed->bLedBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_TXRX_BLINK;
+ pLed->BlinkTimes = 2;
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_FASTER_INTERVAL_ALPHA));
+ }
+ break;
+
+ case LED_CTL_LINK:
+ if(IS_LED_WPS_BLINKING(pLed))
+ return;
+
+ pLed->CurrLedState = LED_ON;
+ pLed->BlinkingLedState = LED_ON;
+ if( pLed->bLedBlinkInProgress)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+ if( pLed->bLedScanBlinkInProgress)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = FALSE;
+ }
+
+ mod_timer(&(pLed->BlinkTimer), 0);
+ break;
+
+ case LED_CTL_START_WPS:
+ case LED_CTL_START_WPS_BOTTON:
+ if(pLed->bLedWPSBlinkInProgress ==FALSE)
+ {
+ if(pLed->bLedBlinkInProgress ==TRUE)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+ if(pLed->bLedScanBlinkInProgress ==TRUE)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = FALSE;
+ }
+ pLed->bLedWPSBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_BLINK_WPS;
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_SCAN_INTERVAL_ALPHA));
+
+ }
+ break;
+
+ case LED_CTL_STOP_WPS:
+ if(pLed->bLedWPSBlinkInProgress)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = FALSE;
+ }
+ else
+ {
+ pLed->bLedWPSBlinkInProgress = TRUE;
+ }
+
+ pLed->CurrLedState = LED_BLINK_WPS_STOP;
+ if(pLed->bLedOn)
+ {
+ pLed->BlinkingLedState = LED_OFF;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA));
+ }
+ else
+ {
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), 0);
+ }
+
+ break;
+
+
+ case LED_CTL_STOP_WPS_FAIL:
+ if(pLed->bLedWPSBlinkInProgress)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = FALSE;
+ }
+
+ pLed->CurrLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_OFF;
+ mod_timer(&(pLed->BlinkTimer), 0);
+ break;
+
+ case LED_CTL_START_TO_LINK:
+ case LED_CTL_NO_LINK:
+ if(!IS_LED_BLINKING(pLed))
+ {
+ pLed->CurrLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_OFF;
+ mod_timer(&(pLed->BlinkTimer), 0);
+ }
+ break;
+
+ case LED_CTL_POWER_OFF:
+ pLed->CurrLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_OFF;
+ if( pLed->bLedBlinkInProgress)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+ if( pLed->bLedScanBlinkInProgress)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = FALSE;
+ }
+ if( pLed->bLedWPSBlinkInProgress )
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = FALSE;
+ }
+
+ mod_timer(&(pLed->BlinkTimer), 0);
+ break;
+
+ default:
+ break;
+
+ }
+
+ RT_TRACE(COMP_LED, "CurrLedState %d\n", pLed->CurrLedState);
+}
+
+
+void
+SwLedControlMode4(
+ struct net_device *dev,
+ LED_CTL_MODE LedAction
+)
+{
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ PLED_819xUsb pLed = &(priv->SwLed0);
+ PLED_819xUsb pLed1 = &(priv->SwLed1);
+
+ switch(LedAction)
+ {
+ case LED_CTL_START_TO_LINK:
+ if(pLed1->bLedWPSBlinkInProgress)
+ {
+ pLed1->bLedWPSBlinkInProgress = FALSE;
+ del_timer_sync(&(pLed1->BlinkTimer));
+
+ pLed1->BlinkingLedState = LED_OFF;
+ pLed1->CurrLedState = LED_OFF;
+
+ if(pLed1->bLedOn)
+ mod_timer(&(pLed1->BlinkTimer), 0);
+ }
+
+ if( pLed->bLedStartToLinkBlinkInProgress == FALSE )
+ {
+ if(pLed->CurrLedState == LED_SCAN_BLINK || IS_LED_WPS_BLINKING(pLed))
+ {
+ return;
+ }
+ if(pLed->bLedBlinkInProgress ==TRUE)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+ if(pLed->bLedNoLinkBlinkInProgress ==TRUE)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = FALSE;
+ }
+
+ pLed->bLedStartToLinkBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_BLINK_StartToBlink;
+ if( pLed->bLedOn )
+ {
+ pLed->BlinkingLedState = LED_OFF;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_SLOWLY_INTERVAL));
+ }
+ else
+ {
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_NORMAL_INTERVAL));
+ }
+ }
+ break;
+
+ case LED_CTL_LINK:
+ case LED_CTL_NO_LINK:
+ if(LedAction == LED_CTL_LINK)
+ {
+ if(pLed1->bLedWPSBlinkInProgress)
+ {
+ pLed1->bLedWPSBlinkInProgress = FALSE;
+ del_timer_sync(&(pLed1->BlinkTimer));
+
+ pLed1->BlinkingLedState = LED_OFF;
+ pLed1->CurrLedState = LED_OFF;
+
+ if(pLed1->bLedOn)
+ mod_timer(&(pLed1->BlinkTimer), 0);
+ }
+ }
+
+ if( pLed->bLedNoLinkBlinkInProgress == FALSE )
+ {
+ if(pLed->CurrLedState == LED_SCAN_BLINK || IS_LED_WPS_BLINKING(pLed))
+ {
+ return;
+ }
+ if(pLed->bLedBlinkInProgress ==TRUE)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+
+ pLed->bLedNoLinkBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
+ }
+
+ break;
+
+ case LED_CTL_SITE_SURVEY:
+ if((priv->ieee80211->LinkDetectInfo.bBusyTraffic) && (priv->ieee80211->state == IEEE80211_LINKED))
+ ;
+ else if(pLed->bLedScanBlinkInProgress ==FALSE)
+ {
+ if(IS_LED_WPS_BLINKING(pLed))
+ return;
+
+ if(pLed->bLedNoLinkBlinkInProgress == TRUE)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = FALSE;
+ }
+ if(pLed->bLedBlinkInProgress ==TRUE)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+ pLed->bLedScanBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_SCAN_BLINK;
+ pLed->BlinkTimes = 24;
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_SCAN_INTERVAL_ALPHA));
+
+ }
+ break;
+
+ case LED_CTL_TX:
+ case LED_CTL_RX:
+ if(pLed->bLedBlinkInProgress ==FALSE)
+ {
+ if(pLed->CurrLedState == LED_SCAN_BLINK || IS_LED_WPS_BLINKING(pLed))
+ {
+ return;
+ }
+ if(pLed->bLedNoLinkBlinkInProgress == TRUE)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = FALSE;
+ }
+ pLed->bLedBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_TXRX_BLINK;
+ pLed->BlinkTimes = 2;
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_FASTER_INTERVAL_ALPHA));
+ }
+ break;
+
+ case LED_CTL_START_WPS:
+ case LED_CTL_START_WPS_BOTTON:
+ if(pLed1->bLedWPSBlinkInProgress)
+ {
+ pLed1->bLedWPSBlinkInProgress = FALSE;
+ del_timer_sync(&(pLed1->BlinkTimer));
+
+ pLed1->BlinkingLedState = LED_OFF;
+ pLed1->CurrLedState = LED_OFF;
+
+ if(pLed1->bLedOn)
+ mod_timer(&(pLed1->BlinkTimer), 0);
+ }
+
+ if(pLed->bLedWPSBlinkInProgress ==FALSE)
+ {
+ if(pLed->bLedNoLinkBlinkInProgress == TRUE)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = FALSE;
+ }
+ if(pLed->bLedBlinkInProgress ==TRUE)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+ if(pLed->bLedScanBlinkInProgress ==TRUE)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = FALSE;
+ }
+ pLed->bLedWPSBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_BLINK_WPS;
+ if( pLed->bLedOn )
+ {
+ pLed->BlinkingLedState = LED_OFF;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_SLOWLY_INTERVAL));
+ }
+ else
+ {
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_NORMAL_INTERVAL));
+ }
+
+ }
+ break;
+
+ case LED_CTL_STOP_WPS:
+ if(pLed->bLedWPSBlinkInProgress)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = FALSE;
+ }
+
+ pLed->bLedNoLinkBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
+
+ break;
+
+ case LED_CTL_STOP_WPS_FAIL:
+ if(pLed->bLedWPSBlinkInProgress)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = FALSE;
+ }
+
+ pLed->bLedNoLinkBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
+
+ if(pLed1->bLedWPSBlinkInProgress)
+ del_timer_sync(&(pLed1->BlinkTimer));
+ else
+ pLed1->bLedWPSBlinkInProgress = TRUE;
+
+ pLed1->CurrLedState = LED_BLINK_WPS_STOP;
+ if( pLed1->bLedOn )
+ pLed1->BlinkingLedState = LED_OFF;
+ else
+ pLed1->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed1->BlinkTimer), jiffies + MSECS(LED_BLINK_NORMAL_INTERVAL));
+
+ break;
+
+ case LED_CTL_STOP_WPS_FAIL_OVERLAP:
+ if(pLed->bLedWPSBlinkInProgress)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = FALSE;
+ }
+
+ pLed->bLedNoLinkBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
+
+ if(pLed1->bLedWPSBlinkInProgress)
+ del_timer_sync(&(pLed1->BlinkTimer));
+ else
+ pLed1->bLedWPSBlinkInProgress = TRUE;
+
+ pLed1->CurrLedState = LED_BLINK_WPS_STOP_OVERLAP;
+ pLed1->BlinkTimes = 10;
+ if( pLed1->bLedOn )
+ pLed1->BlinkingLedState = LED_OFF;
+ else
+ pLed1->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed1->BlinkTimer), jiffies + MSECS(LED_BLINK_NORMAL_INTERVAL));
+
+ break;
+
+ case LED_CTL_POWER_OFF:
+ pLed->CurrLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_OFF;
+
+ if( pLed->bLedNoLinkBlinkInProgress)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = FALSE;
+ }
+ if( pLed->bLedLinkBlinkInProgress)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedLinkBlinkInProgress = FALSE;
+ }
+ if( pLed->bLedBlinkInProgress)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+ if( pLed->bLedWPSBlinkInProgress )
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = FALSE;
+ }
+ if( pLed->bLedScanBlinkInProgress)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = FALSE;
+ }
+ if( pLed->bLedStartToLinkBlinkInProgress)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedStartToLinkBlinkInProgress = FALSE;
+ }
+
+ if( pLed1->bLedWPSBlinkInProgress )
+ {
+ del_timer_sync(&(pLed1->BlinkTimer));
+ pLed1->bLedWPSBlinkInProgress = FALSE;
+ }
+
+
+ pLed1->BlinkingLedState = LED_UNKNOWN;
+ SwLedOff(dev, pLed);
+ SwLedOff(dev, pLed1);
+ break;
+
+ default:
+ break;
+
+ }
+
+ RT_TRACE(COMP_LED, "Led %d\n", pLed->CurrLedState);
+}
+
+
+
+void
+SwLedControlMode5(
+ struct net_device *dev,
+ LED_CTL_MODE LedAction
+)
+{
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ PLED_819xUsb pLed = &(priv->SwLed0);
+
+ if(priv->CustomerID == RT_CID_819x_CAMEO)
+ pLed = &(priv->SwLed1);
+
+ switch(LedAction)
+ {
+ case LED_CTL_POWER_ON:
+ case LED_CTL_NO_LINK:
+ case LED_CTL_LINK:
+ if(pLed->CurrLedState == LED_SCAN_BLINK)
+ {
+ return;
+ }
+ pLed->CurrLedState = LED_ON;
+ pLed->BlinkingLedState = LED_ON;
+ pLed->bLedBlinkInProgress = FALSE;
+ mod_timer(&(pLed->BlinkTimer), 0);
+ break;
+
+ case LED_CTL_SITE_SURVEY:
+ if((priv->ieee80211->LinkDetectInfo.bBusyTraffic) && (priv->ieee80211->state == IEEE80211_LINKED))
+ ;
+ else if(pLed->bLedScanBlinkInProgress ==FALSE)
+ {
+ if(pLed->bLedBlinkInProgress ==TRUE)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+ pLed->bLedScanBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_SCAN_BLINK;
+ pLed->BlinkTimes = 24;
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_SCAN_INTERVAL_ALPHA));
+
+ }
+ break;
+
+ case LED_CTL_TX:
+ case LED_CTL_RX:
+ if(pLed->bLedBlinkInProgress ==FALSE)
+ {
+ if(pLed->CurrLedState == LED_SCAN_BLINK)
+ {
+ return;
+ }
+ pLed->bLedBlinkInProgress = TRUE;
+ pLed->CurrLedState = LED_TXRX_BLINK;
+ pLed->BlinkTimes = 2;
+ if( pLed->bLedOn )
+ pLed->BlinkingLedState = LED_OFF;
+ else
+ pLed->BlinkingLedState = LED_ON;
+ mod_timer(&(pLed->BlinkTimer), jiffies + MSECS(LED_BLINK_FASTER_INTERVAL_ALPHA));
+ }
+ break;
+
+ case LED_CTL_POWER_OFF:
+ pLed->CurrLedState = LED_OFF;
+ pLed->BlinkingLedState = LED_OFF;
+
+ if( pLed->bLedBlinkInProgress)
+ {
+ del_timer_sync(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = FALSE;
+ }
+
+ SwLedOff(dev, pLed);
+ break;
+
+ default:
+ break;
+
+ }
+
+ RT_TRACE(COMP_LED, "Led %d\n", pLed->CurrLedState);
+}
+
+
+void
+LedControl8192SUsb(
+ struct net_device *dev,
+ LED_CTL_MODE LedAction
+ )
+{
+ struct r8192_priv *priv = ieee80211_priv(dev);
+
+ if( priv->bRegUseLed == FALSE)
+ return;
+
+ if (!priv->up)
+ return;
+
+ if(priv->bInHctTest)
+ return;
+
+ if( priv->ieee80211->eRFPowerState != eRfOn &&
+ (LedAction == LED_CTL_TX || LedAction == LED_CTL_RX ||
+ LedAction == LED_CTL_SITE_SURVEY ||
+ LedAction == LED_CTL_LINK ||
+ LedAction == LED_CTL_NO_LINK ||
+ LedAction == LED_CTL_POWER_ON) )
+ {
+ return;
+ }
+
+ switch(priv->LedStrategy)
+ {
+ case SW_LED_MODE0:
+ break;
+
+ case SW_LED_MODE1:
+ SwLedControlMode1(dev, LedAction);
+ break;
+ case SW_LED_MODE2:
+ SwLedControlMode2(dev, LedAction);
+ break;
+
+ case SW_LED_MODE3:
+ SwLedControlMode3(dev, LedAction);
+ break;
+
+ case SW_LED_MODE4:
+ SwLedControlMode4(dev, LedAction);
+ break;
+
+ case SW_LED_MODE5:
+ SwLedControlMode5(dev, LedAction);
+ break;
+
+ default:
+ break;
+ }
+
+ RT_TRACE(COMP_LED, "LedStrategy:%d, LedAction %d\n", priv->LedStrategy,LedAction);
+}
+
+
diff --git a/drivers/staging/rtl8192su/r8192SU_led.h b/drivers/staging/rtl8192su/r8192SU_led.h
new file mode 100644
index 00000000000..acedae4a59c
--- /dev/null
+++ b/drivers/staging/rtl8192su/r8192SU_led.h
@@ -0,0 +1,93 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+#ifndef __INC_HAL8192USBLED_H
+#define __INC_HAL8192USBLED_H
+
+#include <linux/types.h>
+#include <linux/timer.h>
+
+typedef enum _LED_STATE_819xUsb{
+ LED_UNKNOWN = 0,
+ LED_ON = 1,
+ LED_OFF = 2,
+ LED_BLINK_NORMAL = 3,
+ LED_BLINK_SLOWLY = 4,
+ LED_POWER_ON_BLINK = 5,
+ LED_SCAN_BLINK = 6,
+ LED_NO_LINK_BLINK = 7,
+ LED_BLINK_StartToBlink = 8,
+ LED_BLINK_WPS = 9,
+ LED_TXRX_BLINK = 10,
+ LED_BLINK_WPS_STOP = 11,
+ LED_BLINK_WPS_STOP_OVERLAP = 12,
+
+}LED_STATE_819xUsb;
+
+#define IS_LED_WPS_BLINKING(_LED_819xUsb) (((PLED_819xUsb)_LED_819xUsb)->CurrLedState==LED_BLINK_WPS \
+ || ((PLED_819xUsb)_LED_819xUsb)->CurrLedState==LED_BLINK_WPS_STOP \
+ || ((PLED_819xUsb)_LED_819xUsb)->bLedWPSBlinkInProgress)
+
+#define IS_LED_BLINKING(_LED_819xUsb) (((PLED_819xUsb)_LED_819xUsb)->bLedWPSBlinkInProgress \
+ ||((PLED_819xUsb)_LED_819xUsb)->bLedScanBlinkInProgress)
+
+typedef enum _LED_PIN_819xUsb{
+ LED_PIN_GPIO0,
+ LED_PIN_LED0,
+ LED_PIN_LED1
+}LED_PIN_819xUsb;
+
+typedef enum _LED_STRATEGY_819xUsb{
+ SW_LED_MODE0, /* SW control 1 LED via GPIO0. It is default option. */
+ SW_LED_MODE1, /* SW control for PCI Express */
+ SW_LED_MODE2, /* SW control for Cameo. */
+ SW_LED_MODE3, /* SW contorl for RunTop. */
+ SW_LED_MODE4, /* SW control for Netcore */
+ SW_LED_MODE5,
+ HW_LED, /* HW control 2 LEDs, LED0 and LED1 (there are 4 different control modes) */
+}LED_STRATEGY_819xUsb, *PLED_STRATEGY_819xUsb;
+
+typedef struct _LED_819xUsb{
+ struct net_device *dev;
+
+ LED_PIN_819xUsb LedPin;
+
+ LED_STATE_819xUsb CurrLedState;
+ bool bLedOn;
+
+ bool bSWLedCtrl;
+
+ bool bLedBlinkInProgress;
+ bool bLedNoLinkBlinkInProgress;
+ bool bLedLinkBlinkInProgress;
+ bool bLedStartToLinkBlinkInProgress;
+ bool bLedScanBlinkInProgress;
+ bool bLedWPSBlinkInProgress;
+
+ u32 BlinkTimes;
+ LED_STATE_819xUsb BlinkingLedState;
+
+ struct timer_list BlinkTimer;
+} LED_819xUsb, *PLED_819xUsb;
+
+void InitSwLeds(struct net_device *dev);
+void DeInitSwLeds(struct net_device *dev);
+void LedControl8192SUsb(struct net_device *dev,LED_CTL_MODE LedAction);
+
+#endif
+
diff --git a/drivers/staging/rtl8192su/r8192S_firmware.c b/drivers/staging/rtl8192su/r8192S_firmware.c
index 752a3f1fb3f..5036d547d5d 100644
--- a/drivers/staging/rtl8192su/r8192S_firmware.c
+++ b/drivers/staging/rtl8192su/r8192S_firmware.c
@@ -31,44 +31,46 @@
// Code size
// Created by Roger, 2008.04.10.
//
-bool FirmwareDownloadCode(struct net_device *dev, u8 * code_virtual_address,u32 buffer_len)
+bool FirmwareDownloadCode(struct net_device *dev,
+ u8 *code_virtual_address,
+ u32 buffer_len)
{
- struct r8192_priv *priv = ieee80211_priv(dev);
- bool rt_status = true;
- u16 frag_threshold = MAX_FIRMWARE_CODE_SIZE; //Fragmentation might be required in 90/92 but not in 92S
- u16 frag_length, frag_offset = 0;
- struct sk_buff *skb;
- unsigned char *seg_ptr;
- cb_desc *tcb_desc;
- u8 bLastIniPkt = 0;
- u16 ExtraDescOffset = 0;
-
-
- RT_TRACE(COMP_FIRMWARE, "--->FirmwareDownloadCode()\n" );
-
- //MAX_TRANSMIT_BUFFER_SIZE
- if(buffer_len >= MAX_FIRMWARE_CODE_SIZE-USB_HWDESC_HEADER_LEN)
- {
- RT_TRACE(COMP_ERR, "Size over MAX_FIRMWARE_CODE_SIZE! \n");
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ bool rt_status = true;
+ /* Fragmentation might be required in 90/92 but not in 92S */
+ u16 frag_threshold = MAX_FIRMWARE_CODE_SIZE;
+ u16 frag_length, frag_offset = 0;
+ struct sk_buff *skb;
+ unsigned char *seg_ptr;
+ cb_desc *tcb_desc;
+ u8 bLastIniPkt = 0;
+ u16 ExtraDescOffset = 0;
+
+ if (buffer_len >= MAX_FIRMWARE_CODE_SIZE - USB_HWDESC_HEADER_LEN) {
+ RT_TRACE(COMP_ERR, "(%s): Firmware exceeds"
+ " MAX_FIRMWARE_CODE_SIZE\n", __func__);
goto cmdsend_downloadcode_fail;
}
-
ExtraDescOffset = USB_HWDESC_HEADER_LEN;
-
do {
if((buffer_len-frag_offset) > frag_threshold)
- {
frag_length = frag_threshold + ExtraDescOffset;
+ else {
+ frag_length = (u16)(buffer_len -
+ frag_offset + ExtraDescOffset);
+ bLastIniPkt = 1;
}
- else
- {
- frag_length = (u16)(buffer_len - frag_offset + ExtraDescOffset);
- bLastIniPkt = 1;
- }
-
- /* Allocate skb buffer to contain firmware info and tx descriptor info. */
+ /*
+ * Allocate skb buffer to contain firmware info
+ * and tx descriptor info.
+ */
skb = dev_alloc_skb(frag_length);
- memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev));
+ if (skb == NULL) {
+ RT_TRACE(COMP_ERR, "(%s): unable to alloc skb buffer\n",
+ __func__);
+ goto cmdsend_downloadcode_fail;
+ }
+ memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
tcb_desc = (cb_desc*)(skb->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->queue_index = TXCMD_QUEUE;
@@ -76,73 +78,60 @@ bool FirmwareDownloadCode(struct net_device *dev, u8 * code_virtual_address,u32
tcb_desc->bLastIniPkt = bLastIniPkt;
skb_reserve(skb, ExtraDescOffset);
- seg_ptr = (u8 *)skb_put(skb, (u32)(frag_length-ExtraDescOffset));
- memcpy(seg_ptr, code_virtual_address+frag_offset, (u32)(frag_length-ExtraDescOffset));
- tcb_desc->txbuf_size= frag_length;
+ seg_ptr = (u8 *)skb_put(skb,
+ (u32)(frag_length - ExtraDescOffset));
+
+ memcpy(seg_ptr, code_virtual_address + frag_offset,
+ (u32)(frag_length-ExtraDescOffset));
+
+ tcb_desc->txbuf_size = frag_length;
- if(!priv->ieee80211->check_nic_enough_desc(dev,tcb_desc->queue_index)||
- (!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index]))||\
- (priv->ieee80211->queue_stop) )
- {
+ if (!priv->ieee80211->check_nic_enough_desc(dev, tcb_desc->queue_index) ||
+ (!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index])) ||
+ (priv->ieee80211->queue_stop)) {
RT_TRACE(COMP_FIRMWARE,"=====================================================> tx full!\n");
skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb);
- }
- else
- {
- priv->ieee80211->softmac_hard_start_xmit(skb,dev);
- }
+ } else
+ priv->ieee80211->softmac_hard_start_xmit(skb, dev);
frag_offset += (frag_length - ExtraDescOffset);
- }while(frag_offset < buffer_len);
-
+ } while (frag_offset < buffer_len);
return rt_status ;
-
cmdsend_downloadcode_fail:
rt_status = false;
- RT_TRACE(COMP_ERR, "CmdSendDownloadCode fail !!\n");
+ RT_TRACE(COMP_ERR, "(%s): failed\n", __func__);
return rt_status;
-
}
-RT_STATUS
-FirmwareEnableCPU(struct net_device *dev)
+bool FirmwareEnableCPU(struct net_device *dev)
{
+ bool rtStatus = true;
+ u8 tmpU1b, CPUStatus = 0;
+ u16 tmpU2b;
+ u32 iCheckTime = 200;
- RT_STATUS rtStatus = RT_STATUS_SUCCESS;
- u8 tmpU1b, CPUStatus = 0;
- u16 tmpU2b;
- u32 iCheckTime = 200;
-
- RT_TRACE(COMP_FIRMWARE, "-->FirmwareEnableCPU()\n" );
- // Enable CPU.
+ /* Enable CPU. */
tmpU1b = read_nic_byte(dev, SYS_CLKR);
- write_nic_byte(dev, SYS_CLKR, (tmpU1b|SYS_CPU_CLKSEL)); //AFE source
-
+ /* AFE source */
+ write_nic_byte(dev, SYS_CLKR, (tmpU1b|SYS_CPU_CLKSEL));
tmpU2b = read_nic_word(dev, SYS_FUNC_EN);
write_nic_word(dev, SYS_FUNC_EN, (tmpU2b|FEN_CPUEN));
-
- //Polling IMEM Ready after CPU has refilled.
- do
- {
+ /* Poll IMEM Ready after CPU has refilled. */
+ do {
CPUStatus = read_nic_byte(dev, TCR);
- if(CPUStatus& IMEM_RDY)
- {
- RT_TRACE(COMP_FIRMWARE, "IMEM Ready after CPU has refilled.\n");
+ if (CPUStatus & IMEM_RDY)
+ /* success */
break;
- }
-
- //usleep(100);
udelay(100);
- }while(iCheckTime--);
-
- if(!(CPUStatus & IMEM_RDY))
- return RT_STATUS_FAILURE;
-
- RT_TRACE(COMP_FIRMWARE, "<--FirmwareEnableCPU(): rtStatus(%#x)\n", rtStatus);
+ } while (iCheckTime--);
+ if (!(CPUStatus & IMEM_RDY)) {
+ RT_TRACE(COMP_ERR, "%s(): failed to enable CPU", __func__);
+ rtStatus = false;
+ }
return rtStatus;
}
@@ -176,106 +165,87 @@ FirmwareGetNextStatus(FIRMWARE_8192S_STATUS FWCurrentStatus)
return NextFWStatus;
}
-bool
-FirmwareCheckReady(struct net_device *dev, u8 LoadFWStatus)
+bool FirmwareCheckReady(struct net_device *dev, u8 LoadFWStatus)
{
- struct r8192_priv *priv = ieee80211_priv(dev);
- RT_STATUS rtStatus = RT_STATUS_SUCCESS;
- rt_firmware *pFirmware = priv->pFirmware;
- int PollingCnt = 1000;
- //u8 tmpU1b, CPUStatus = 0;
- u8 CPUStatus = 0;
- u32 tmpU4b;
- //bool bOrgIMREnable;
-
- RT_TRACE(COMP_FIRMWARE, "--->FirmwareCheckReady(): LoadStaus(%d),", LoadFWStatus);
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ bool rtStatus = true;
+ rt_firmware *pFirmware = priv->pFirmware;
+ int PollingCnt = 1000;
+ u8 CPUStatus = 0;
+ u32 tmpU4b;
pFirmware->FWStatus = (FIRMWARE_8192S_STATUS)LoadFWStatus;
- if( LoadFWStatus == FW_STATUS_LOAD_IMEM)
- {
- do
- {//Polling IMEM code done.
+ switch (LoadFWStatus) {
+ case FW_STATUS_LOAD_IMEM:
+ do { /* Polling IMEM code done. */
CPUStatus = read_nic_byte(dev, TCR);
if(CPUStatus& IMEM_CODE_DONE)
break;
-
udelay(5);
- }while(PollingCnt--);
- if(!(CPUStatus & IMEM_CHK_RPT) || PollingCnt <= 0)
- {
+ } while (PollingCnt--);
+ if (!(CPUStatus & IMEM_CHK_RPT) || PollingCnt <= 0) {
RT_TRACE(COMP_ERR, "FW_STATUS_LOAD_IMEM FAIL CPU, Status=%x\r\n", CPUStatus);
- return false;
+ goto FirmwareCheckReadyFail;
}
- }
- else if( LoadFWStatus == FW_STATUS_LOAD_EMEM)
- {//Check Put Code OK and Turn On CPU
- do
- {//Polling EMEM code done.
+ break;
+ case FW_STATUS_LOAD_EMEM: /* Check Put Code OK and Turn On CPU */
+ do { /* Polling EMEM code done. */
CPUStatus = read_nic_byte(dev, TCR);
if(CPUStatus& EMEM_CODE_DONE)
break;
-
udelay(5);
- }while(PollingCnt--);
- if(!(CPUStatus & EMEM_CHK_RPT))
- {
+ } while (PollingCnt--);
+ if (!(CPUStatus & EMEM_CHK_RPT)) {
RT_TRACE(COMP_ERR, "FW_STATUS_LOAD_EMEM FAIL CPU, Status=%x\r\n", CPUStatus);
- return false;
+ goto FirmwareCheckReadyFail;
}
-
- // Turn On CPU
- rtStatus = FirmwareEnableCPU(dev);
- if(rtStatus != RT_STATUS_SUCCESS)
- {
- RT_TRACE(COMP_ERR, "Enable CPU fail ! \n" );
- return false;
+ /* Turn On CPU */
+ if (FirmwareEnableCPU(dev) != true) {
+ RT_TRACE(COMP_ERR, "%s(): failed to enable CPU",
+ __func__);
+ goto FirmwareCheckReadyFail;
}
- }
- else if( LoadFWStatus == FW_STATUS_LOAD_DMEM)
- {
- do
- {//Polling DMEM code done
+ break;
+ case FW_STATUS_LOAD_DMEM:
+ do { /* Polling DMEM code done */
CPUStatus = read_nic_byte(dev, TCR);
if(CPUStatus& DMEM_CODE_DONE)
break;
udelay(5);
- }while(PollingCnt--);
+ } while (PollingCnt--);
- if(!(CPUStatus & DMEM_CODE_DONE))
- {
+ if (!(CPUStatus & DMEM_CODE_DONE)) {
RT_TRACE(COMP_ERR, "Polling DMEM code done fail ! CPUStatus(%#x)\n", CPUStatus);
- return false;
+ goto FirmwareCheckReadyFail;
}
- RT_TRACE(COMP_FIRMWARE, "DMEM code download success, CPUStatus(%#x)\n", CPUStatus);
+ RT_TRACE(COMP_FIRMWARE, "%s(): DMEM code download success, "
+ "CPUStatus(%#x)",
+ __func__, CPUStatus);
-// PollingCnt = 100; // Set polling cycle to 10ms.
- PollingCnt = 10000; // Set polling cycle to 10ms.
+ PollingCnt = 10000; /* Set polling cycle to 10ms. */
- do
- {//Polling Load Firmware ready
+ do { /* Polling Load Firmware ready */
CPUStatus = read_nic_byte(dev, TCR);
if(CPUStatus & FWRDY)
break;
-
udelay(100);
- }while(PollingCnt--);
+ } while (PollingCnt--);
- RT_TRACE(COMP_FIRMWARE, "Polling Load Firmware ready, CPUStatus(%x)\n", CPUStatus);
+ RT_TRACE(COMP_FIRMWARE, "%s(): polling load firmware ready, "
+ "CPUStatus(%x)",
+ __func__, CPUStatus);
- //if(!(CPUStatus & LOAD_FW_READY))
- //if((CPUStatus & LOAD_FW_READY) != 0xff)
- if((CPUStatus & LOAD_FW_READY) != LOAD_FW_READY)
- {
- RT_TRACE(COMP_ERR, "Polling Load Firmware ready fail ! CPUStatus(%x)\n", CPUStatus);
- return false;
+ if ((CPUStatus & LOAD_FW_READY) != LOAD_FW_READY) {
+ RT_TRACE(COMP_ERR, "Polling Load Firmware ready failed "
+ "CPUStatus(%x)\n", CPUStatus);
+ goto FirmwareCheckReadyFail;
}
-
- //
- // <Roger_Notes> USB interface will update reserved followings parameters later!!
- // 2008.08.28.
- //
+ /*
+ * USB interface will update
+ * reserved followings parameters later
+ */
//
// <Roger_Notes> If right here, we can set TCR/RCR to desired value
@@ -288,16 +258,23 @@ FirmwareCheckReady(struct net_device *dev, u8 LoadFWStatus)
write_nic_dword(dev, RCR,
(tmpU4b|RCR_APPFCS|RCR_APP_ICV|RCR_APP_MIC));
- RT_TRACE(COMP_FIRMWARE, "FirmwareCheckReady(): Current RCR settings(%#x)\n", tmpU4b);
-
-
+ RT_TRACE(COMP_FIRMWARE, "%s(): Current RCR settings(%#x)",
+ __func__, tmpU4b);
// Set to normal mode.
write_nic_byte(dev, LBKMD_SEL, LBK_NORMAL);
-
+ break;
+ default:
+ break;
}
+ RT_TRACE(COMP_FIRMWARE, "%s(): LoadFWStatus(%d), success",
+ __func__, LoadFWStatus);
+ return rtStatus;
- RT_TRACE(COMP_FIRMWARE, "<---FirmwareCheckReady(): LoadFWStatus(%d), rtStatus(%x)\n", LoadFWStatus, rtStatus);
- return (rtStatus == RT_STATUS_SUCCESS) ? true:false;
+FirmwareCheckReadyFail:
+ rtStatus = false;
+ RT_TRACE(COMP_FIRMWARE, "%s(): LoadFWStatus(%d), failed",
+ __func__, LoadFWStatus);
+ return rtStatus;
}
//
@@ -338,143 +315,159 @@ void FirmwareHeaderPriveUpdate(struct net_device *dev, PRT_8192S_FIRMWARE_PRIV
pFwPriv->rf_config = FirmwareHeaderMapRfType(dev);
}
+bool FirmwareRequest92S(struct net_device *dev, rt_firmware *pFirmware)
+{
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ bool rtStatus = true;
+ const char *pFwImageFileName[1] = {"RTL8192SU/rtl8192sfw.bin"};
+ u8 *pucMappedFile = NULL;
+ u32 ulInitStep = 0;
+ u8 FwHdrSize = RT_8192S_FIRMWARE_HDR_SIZE;
+ PRT_8192S_FIRMWARE_HDR pFwHdr = NULL;
+ u32 file_length = 0;
+ int rc;
+ const struct firmware *fw_entry;
+
+ rc = request_firmware(&fw_entry,
+ pFwImageFileName[ulInitStep],
+ &priv->udev->dev);
+ if (rc < 0)
+ goto RequestFirmware_Fail;
+
+ if (fw_entry->size > sizeof(pFirmware->szFwTmpBuffer)) {
+ RT_TRACE(COMP_ERR, "%s(): image file too large"
+ "for container buffer", __func__);
+ release_firmware(fw_entry);
+ goto RequestFirmware_Fail;
+ }
+ memcpy(pFirmware->szFwTmpBuffer, fw_entry->data, fw_entry->size);
+ pFirmware->szFwTmpBufferLen = fw_entry->size;
+ release_firmware(fw_entry);
+
+ pucMappedFile = pFirmware->szFwTmpBuffer;
+ file_length = pFirmware->szFwTmpBufferLen;
+
+ /* Retrieve FW header. */
+ pFirmware->pFwHeader = (PRT_8192S_FIRMWARE_HDR) pucMappedFile;
+ pFwHdr = pFirmware->pFwHeader;
+
+ RT_TRACE(COMP_FIRMWARE, "%s(): signature: %x, version: %x, "
+ "size: %x, imemsize: %x, sram size: %x",
+ __func__, pFwHdr->Signature, pFwHdr->Version,
+ pFwHdr->DMEMSize, pFwHdr->IMG_IMEM_SIZE,
+ pFwHdr->IMG_SRAM_SIZE);
+
+ pFirmware->FirmwareVersion = byte(pFwHdr->Version , 0);
+
+ if ((pFwHdr->IMG_IMEM_SIZE == 0) ||
+ (pFwHdr->IMG_IMEM_SIZE > sizeof(pFirmware->FwIMEM))) {
+ RT_TRACE(COMP_ERR, "%s(): memory for data image is less than"
+ " IMEM requires", __func__);
+ goto RequestFirmware_Fail;
+ } else {
+ pucMappedFile += FwHdrSize;
+ /* Retrieve IMEM image. */
+ memcpy(pFirmware->FwIMEM, pucMappedFile, pFwHdr->IMG_IMEM_SIZE);
+ pFirmware->FwIMEMLen = pFwHdr->IMG_IMEM_SIZE;
+ }
+
+ if (pFwHdr->IMG_SRAM_SIZE > sizeof(pFirmware->FwEMEM)) {
+ RT_TRACE(COMP_ERR, "%s(): memory for data image is less than"
+ " EMEM requires", __func__);
+ goto RequestFirmware_Fail;
+ } else {
+ pucMappedFile += pFirmware->FwIMEMLen;
+ /* Retriecve EMEM image */
+ memcpy(pFirmware->FwEMEM, pucMappedFile, pFwHdr->IMG_SRAM_SIZE);
+ pFirmware->FwEMEMLen = pFwHdr->IMG_SRAM_SIZE;
+ }
+ return rtStatus;
+
+RequestFirmware_Fail:
+ RT_TRACE(COMP_ERR, "%s(): failed with TCR-Status: %x\n",
+ __func__, read_nic_word(dev, TCR));
+ rtStatus = false;
+ return rtStatus;
+}
bool FirmwareDownload92S(struct net_device *dev)
{
- struct r8192_priv *priv = ieee80211_priv(dev);
- bool rtStatus = true;
- const char *pFwImageFileName[1] = {"RTL8192SU/rtl8192sfw.bin"};
- u8 *pucMappedFile = NULL;
- u32 ulFileLength, ulInitStep = 0;
- u8 FwHdrSize = RT_8192S_FIRMWARE_HDR_SIZE;
- rt_firmware *pFirmware = priv->pFirmware;
- u8 FwStatus = FW_STATUS_INIT;
- PRT_8192S_FIRMWARE_HDR pFwHdr = NULL;
- PRT_8192S_FIRMWARE_PRIV pFwPriv = NULL;
- int rc;
- const struct firmware *fw_entry;
- u32 file_length = 0;
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ bool rtStatus = true;
+ u8 *pucMappedFile = NULL;
+ u32 ulFileLength;
+ u8 FwHdrSize = RT_8192S_FIRMWARE_HDR_SIZE;
+ rt_firmware *pFirmware = priv->pFirmware;
+ u8 FwStatus = FW_STATUS_INIT;
+ PRT_8192S_FIRMWARE_HDR pFwHdr = NULL;
+ PRT_8192S_FIRMWARE_PRIV pFwPriv = NULL;
pFirmware->FWStatus = FW_STATUS_INIT;
-
- RT_TRACE(COMP_FIRMWARE, " --->FirmwareDownload92S()\n");
-
-/*
-* Load the firmware from RTL8192SU/rtl8192sfw.bin
-*/
- if(pFirmware->szFwTmpBufferLen == 0)
- {
- rc = request_firmware(&fw_entry, pFwImageFileName[ulInitStep],&priv->udev->dev);
- if(rc < 0 ) {
- RT_TRACE(COMP_ERR, "request firmware fail!\n");
- goto DownloadFirmware_Fail;
- }
-
- if(fw_entry->size > sizeof(pFirmware->szFwTmpBuffer)) {
- RT_TRACE(COMP_ERR, "img file size exceed the container buffer fail!\n");
- release_firmware(fw_entry);
- goto DownloadFirmware_Fail;
- }
-
- memcpy(pFirmware->szFwTmpBuffer,fw_entry->data,fw_entry->size);
- pFirmware->szFwTmpBufferLen = fw_entry->size;
- release_firmware(fw_entry);
-
- pucMappedFile = pFirmware->szFwTmpBuffer;
- file_length = pFirmware->szFwTmpBufferLen;
-
- /* Retrieve FW header. */
- pFirmware->pFwHeader = (PRT_8192S_FIRMWARE_HDR) pucMappedFile;
- pFwHdr = pFirmware->pFwHeader;
- RT_TRACE(COMP_FIRMWARE,"signature:%x, version:%x, size:%x, imemsize:%x, sram size:%x\n", \
- pFwHdr->Signature, pFwHdr->Version, pFwHdr->DMEMSize, \
- pFwHdr->IMG_IMEM_SIZE, pFwHdr->IMG_SRAM_SIZE);
- pFirmware->FirmwareVersion = byte(pFwHdr->Version ,0);
- if ((pFwHdr->IMG_IMEM_SIZE==0) || (pFwHdr->IMG_IMEM_SIZE > sizeof(pFirmware->FwIMEM))) {
- RT_TRACE(COMP_ERR, "%s: memory for data image is less than IMEM required\n",\
- __FUNCTION__);
- goto DownloadFirmware_Fail;
- } else {
- pucMappedFile+=FwHdrSize;
- /* Retrieve IMEM image. */
- memcpy(pFirmware->FwIMEM, pucMappedFile, pFwHdr->IMG_IMEM_SIZE);
- pFirmware->FwIMEMLen = pFwHdr->IMG_IMEM_SIZE;
- }
-
- if (pFwHdr->IMG_SRAM_SIZE > sizeof(pFirmware->FwEMEM)) {
- RT_TRACE(COMP_ERR, "%s: memory for data image is less than EMEM required\n",\
- __FUNCTION__);
- goto DownloadFirmware_Fail;
- } else {
- pucMappedFile += pFirmware->FwIMEMLen;
- /* Retriecve EMEM image */
- memcpy(pFirmware->FwEMEM, pucMappedFile, pFwHdr->IMG_SRAM_SIZE);//===>6
- pFirmware->FwEMEMLen = pFwHdr->IMG_SRAM_SIZE;
- }
+ /*
+ * Load the firmware from RTL8192SU/rtl8192sfw.bin if necessary
+ */
+ if (pFirmware->szFwTmpBufferLen == 0) {
+ if (FirmwareRequest92S(dev, pFirmware) != true)
+ goto DownloadFirmware_Fail;
}
-
FwStatus = FirmwareGetNextStatus(pFirmware->FWStatus);
- while(FwStatus!= FW_STATUS_READY)
- {
- // Image buffer redirection.
- switch(FwStatus)
- {
- case FW_STATUS_LOAD_IMEM:
- pucMappedFile = pFirmware->FwIMEM;
- ulFileLength = pFirmware->FwIMEMLen;
- break;
+ while (FwStatus != FW_STATUS_READY) {
+ /* Image buffer redirection. */
+ switch (FwStatus) {
+ case FW_STATUS_LOAD_IMEM:
+ pucMappedFile = pFirmware->FwIMEM;
+ ulFileLength = pFirmware->FwIMEMLen;
+ break;
- case FW_STATUS_LOAD_EMEM:
- pucMappedFile = pFirmware->FwEMEM;
- ulFileLength = pFirmware->FwEMEMLen;
- break;
+ case FW_STATUS_LOAD_EMEM:
+ pucMappedFile = pFirmware->FwEMEM;
+ ulFileLength = pFirmware->FwEMEMLen;
+ break;
- case FW_STATUS_LOAD_DMEM:
- /* <Roger_Notes> Partial update the content of header private. 2008.12.18 */
- pFwHdr = pFirmware->pFwHeader;
- pFwPriv = (PRT_8192S_FIRMWARE_PRIV)&pFwHdr->FWPriv;
- FirmwareHeaderPriveUpdate(dev, pFwPriv);
- pucMappedFile = (u8*)(pFirmware->pFwHeader)+RT_8192S_FIRMWARE_HDR_EXCLUDE_PRI_SIZE;
- ulFileLength = FwHdrSize-RT_8192S_FIRMWARE_HDR_EXCLUDE_PRI_SIZE;
- break;
+ case FW_STATUS_LOAD_DMEM:
+ /* Partial update the content of private header */
+ pFwHdr = pFirmware->pFwHeader;
+ pFwPriv = (PRT_8192S_FIRMWARE_PRIV)&pFwHdr->FWPriv;
+ FirmwareHeaderPriveUpdate(dev, pFwPriv);
+ pucMappedFile = (u8 *)(pFirmware->pFwHeader) +
+ RT_8192S_FIRMWARE_HDR_EXCLUDE_PRI_SIZE;
- default:
- RT_TRACE(COMP_ERR, "Unexpected Download step!!\n");
- goto DownloadFirmware_Fail;
- break;
+ ulFileLength = FwHdrSize -
+ RT_8192S_FIRMWARE_HDR_EXCLUDE_PRI_SIZE;
+ break;
+
+ default:
+ RT_TRACE(COMP_ERR, "Unexpected Download step!!\n");
+ goto DownloadFirmware_Fail;
+ break;
}
- //3//
- //3// <2> Download image file
- //3 //
- rtStatus = FirmwareDownloadCode(dev, pucMappedFile, ulFileLength);
+ /* <2> Download image file */
+
+ rtStatus = FirmwareDownloadCode(dev,
+ pucMappedFile,
+ ulFileLength);
if(rtStatus != true)
- {
- RT_TRACE(COMP_ERR, "FirmwareDownloadCode() fail ! \n" );
goto DownloadFirmware_Fail;
- }
- //3//
- //3// <3> Check whether load FW process is ready
- //3 //
+ /* <3> Check whether load FW process is ready */
+
rtStatus = FirmwareCheckReady(dev, FwStatus);
if(rtStatus != true)
- {
- RT_TRACE(COMP_ERR, "FirmwareDownloadCode() fail ! \n");
goto DownloadFirmware_Fail;
- }
FwStatus = FirmwareGetNextStatus(pFirmware->FWStatus);
}
- RT_TRACE(COMP_FIRMWARE, "Firmware Download Success!!\n");
+ RT_TRACE(COMP_FIRMWARE, "%s(): Firmware Download Success", __func__);
return rtStatus;
- DownloadFirmware_Fail:
- RT_TRACE(COMP_ERR, "Firmware Download Fail!!%x\n",read_nic_word(dev, TCR));
+DownloadFirmware_Fail:
+ RT_TRACE(COMP_ERR, "%s(): failed with TCR-Status: %x\n",
+ __func__, read_nic_word(dev, TCR));
rtStatus = false;
return rtStatus;
}
diff --git a/drivers/staging/rtl8192su/r8192S_phy.c b/drivers/staging/rtl8192su/r8192S_phy.c
index 63d4e5fd7b1..b6c0f199074 100644
--- a/drivers/staging/rtl8192su/r8192S_phy.c
+++ b/drivers/staging/rtl8192su/r8192S_phy.c
@@ -882,7 +882,7 @@ phy_BB8192S_Config_ParaFile(struct net_device* dev)
//
// 1. Read PHY_REG.TXT BB INIT!!
- // We will seperate as 1T1R/1T2R/1T2R_GREEN/2T2R
+ // We will separate as 1T1R/1T2R/1T2R_GREEN/2T2R
//
if (priv->rf_type == RF_1T2R || priv->rf_type == RF_2T2R ||
priv->rf_type == RF_1T1R ||priv->rf_type == RF_2T2R_GREEN)
@@ -1873,10 +1873,9 @@ PHY_GetTxPowerLevel8192S(
if(priv->bTXPowerDataReadFromEEPORM == FALSE)
return;
- //
- // Read predefined TX power index in EEPROM
- //
-// if(priv->epromtype == EPROM_93c46)
+ /*
+ * Read predefined TX power index in EEPROM
+ */
{
//
// Mainly we use RF-A Tx Power to write the Tx Power registers, but the RF-B Tx
@@ -3607,128 +3606,103 @@ void SetBWModeCallback8192SUsb(struct net_device *dev)
RT_TRACE(COMP_SCAN, "<==SetBWMode8190Pci()" );
}
-//
-// Callback routine of the work item for set bandwidth mode.
-//
-// use in phy only (in win it's work)
+/*
+ * Callback routine of the work item for set bandwidth mode.
+ *
+ * use in phy only (in win it's work)
+ */
void SetBWModeCallback8192SUsbWorkItem(struct net_device *dev)
{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u8 regBwOpMode;
-
- // Added it for 20/40 mhz switch time evaluation by guangan 070531
- //u32 NowL, NowH;
- //u8Byte BeginTime, EndTime;
- u8 regRRSR_RSC;
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ u8 regBwOpMode;
+ u8 regRRSR_RSC;
- RT_TRACE(COMP_SCAN, "==>SetBWModeCallback8192SUsbWorkItem() Switch to %s bandwidth\n", \
- priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20?"20MHz":"40MHz");
+ RT_TRACE(COMP_SCAN, "%s(): Switch to %s bandwidth", __func__,
+ priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 ? "20MHz" : "40MHz");
- if(priv->rf_chip == RF_PSEUDO_11N)
- {
+ if (priv->rf_chip == RF_PSEUDO_11N) {
priv->SetBWModeInProgress= FALSE;
return;
}
-
if(!priv->up)
return;
-
- // Added it for 20/40 mhz switch time evaluation by guangan 070531
- //NowL = read_nic_dword(dev, TSFR);
- //NowH = read_nic_dword(dev, TSFR+4);
- //BeginTime = ((u8Byte)NowH << 32) + NowL;
-
- //3<1>Set MAC register
+ /* Set MAC register */
regBwOpMode = read_nic_byte(dev, BW_OPMODE);
regRRSR_RSC = read_nic_byte(dev, RRSR+2);
-
- switch(priv->CurrentChannelBW)
- {
- case HT_CHANNEL_WIDTH_20:
- regBwOpMode |= BW_OPMODE_20MHZ;
- // 2007/02/07 Mark by Emily becasue we have not verify whether this register works
- write_nic_byte(dev, BW_OPMODE, regBwOpMode);
- break;
-
- case HT_CHANNEL_WIDTH_20_40:
- regBwOpMode &= ~BW_OPMODE_20MHZ;
- // 2007/02/07 Mark by Emily becasue we have not verify whether this register works
- write_nic_byte(dev, BW_OPMODE, regBwOpMode);
- regRRSR_RSC = (regRRSR_RSC&0x90) |(priv->nCur40MhzPrimeSC<<5);
- write_nic_byte(dev, RRSR+2, regRRSR_RSC);
-
- break;
-
- default:
- RT_TRACE(COMP_DBG, "SetBWModeCallback8192SUsbWorkItem(): unknown Bandwidth: %#X\n",
- priv->CurrentChannelBW);
- break;
+ switch (priv->CurrentChannelBW) {
+ case HT_CHANNEL_WIDTH_20:
+ regBwOpMode |= BW_OPMODE_20MHZ;
+ /* we have not verified whether this register works */
+ write_nic_byte(dev, BW_OPMODE, regBwOpMode);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ regBwOpMode &= ~BW_OPMODE_20MHZ;
+ /* we have not verified whether this register works */
+ write_nic_byte(dev, BW_OPMODE, regBwOpMode);
+ regRRSR_RSC = (regRRSR_RSC&0x90) | (priv->nCur40MhzPrimeSC<<5);
+ write_nic_byte(dev, RRSR+2, regRRSR_RSC);
+ break;
+ default:
+ RT_TRACE(COMP_DBG, "%s(): unknown Bandwidth: %#X", __func__,
+ priv->CurrentChannelBW);
+ break;
}
-
- //3 <2>Set PHY related register
- switch(priv->CurrentChannelBW)
- {
- case HT_CHANNEL_WIDTH_20:
- rtl8192_setBBreg(dev, rFPGA0_RFMOD, bRFMOD, 0x0);
- rtl8192_setBBreg(dev, rFPGA1_RFMOD, bRFMOD, 0x0);
-
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter2, 0xff, 0x58);
-
- break;
- case HT_CHANNEL_WIDTH_20_40:
- rtl8192_setBBreg(dev, rFPGA0_RFMOD, bRFMOD, 0x1);
- rtl8192_setBBreg(dev, rFPGA1_RFMOD, bRFMOD, 0x1);
-
- // Set Control channel to upper or lower. These settings are required only for 40MHz
- rtl8192_setBBreg(dev, rCCK0_System, bCCKSideBand, (priv->nCur40MhzPrimeSC>>1));
- rtl8192_setBBreg(dev, rOFDM1_LSTF, 0xC00, priv->nCur40MhzPrimeSC);
-
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter2, 0xff, 0x18);
-
- break;
-
-
- default:
- RT_TRACE(COMP_DBG, "SetBWModeCallback8192SUsbWorkItem(): unknown Bandwidth: %#X\n"\
- ,priv->CurrentChannelBW);
- break;
+ /* Set PHY related register */
+ switch (priv->CurrentChannelBW) {
+ case HT_CHANNEL_WIDTH_20:
+ rtl8192_setBBreg(dev, rFPGA0_RFMOD, bRFMOD, 0x0);
+ rtl8192_setBBreg(dev, rFPGA1_RFMOD, bRFMOD, 0x0);
+ rtl8192_setBBreg(dev, rFPGA0_AnalogParameter2, 0xff, 0x58);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ rtl8192_setBBreg(dev, rFPGA0_RFMOD, bRFMOD, 0x1);
+ rtl8192_setBBreg(dev, rFPGA1_RFMOD, bRFMOD, 0x1);
+ /*
+ * Set Control channel to upper or lower.
+ * These settings are required only for 40MHz
+ */
+ rtl8192_setBBreg(dev, rCCK0_System, bCCKSideBand,
+ (priv->nCur40MhzPrimeSC>>1));
+ rtl8192_setBBreg(dev, rOFDM1_LSTF, 0xC00,
+ priv->nCur40MhzPrimeSC);
+ rtl8192_setBBreg(dev, rFPGA0_AnalogParameter2, 0xff, 0x18);
+ break;
+ default:
+ RT_TRACE(COMP_DBG, "%s(): unknown Bandwidth: %#X", __func__,
+ priv->CurrentChannelBW);
+ break;
}
- //Skip over setting of J-mode in BB register here. Default value is "None J mode". Emily 20070315
-
- //3<3>Set RF related register
- switch( priv->rf_chip )
- {
- case RF_8225:
- PHY_SetRF8225Bandwidth(dev, priv->CurrentChannelBW);
- break;
-
- case RF_8256:
- // Please implement this function in Hal8190PciPhy8256.c
- //PHY_SetRF8256Bandwidth(dev, priv->CurrentChannelBW);
- break;
-
- case RF_6052:
- PHY_RF6052SetBandwidth(dev, priv->CurrentChannelBW);
- break;
-
- case RF_8258:
- // Please implement this function in Hal8190PciPhy8258.c
- // PHY_SetRF8258Bandwidth();
- break;
-
- case RF_PSEUDO_11N:
- // Do Nothing
- break;
+ /*
+ * Skip over setting of J-mode in BB register here.
+ * Default value is "None J mode".
+ */
- default:
- //RT_ASSERT(FALSE, ("Unknown rf_chip: %d\n", priv->rf_chip));
- break;
+ /* Set RF related register */
+ switch (priv->rf_chip) {
+ case RF_8225:
+ PHY_SetRF8225Bandwidth(dev, priv->CurrentChannelBW);
+ break;
+ case RF_8256:
+ /* Please implement this function in Hal8190PciPhy8256.c */
+ /* PHY_SetRF8256Bandwidth(dev, priv->CurrentChannelBW); */
+ break;
+ case RF_6052:
+ PHY_RF6052SetBandwidth(dev, priv->CurrentChannelBW);
+ break;
+ case RF_8258:
+ /* Please implement this function in Hal8190PciPhy8258.c */
+ /* PHY_SetRF8258Bandwidth(); */
+ break;
+ case RF_PSEUDO_11N:
+ /* Do Nothing */
+ break;
+ default:
+ RT_TRACE(COMP_DBG, "%s(): unknown rf_chip: %d", __func__,
+ priv->rf_chip);
+ break;
}
-
priv->SetBWModeInProgress= FALSE;
-
- RT_TRACE(COMP_SCAN, "<==SetBWModeCallback8192SUsbWorkItem()" );
}
//--------------------------Move to oter DIR later-------------------------------*/
diff --git a/drivers/staging/rtl8192su/r8192U.h b/drivers/staging/rtl8192su/r8192U.h
index ba87623f32e..eccf4478fba 100644
--- a/drivers/staging/rtl8192su/r8192U.h
+++ b/drivers/staging/rtl8192su/r8192U.h
@@ -43,6 +43,13 @@
#include "ieee80211/ieee80211.h"
#include "r8192S_firmware.h"
+#include "r8192SU_led.h"
+
+/* EEPROM defs for use with linux/eeprom_93cx6.h */
+#define RTL819X_EEPROM_CMD_READ (1 << 0)
+#define RTL819X_EEPROM_CMD_WRITE (1 << 1)
+#define RTL819X_EEPROM_CMD_CK (1 << 2)
+#define RTL819X_EEPROM_CMD_CS (1 << 3)
//#define RTL8192U
#define RTL819xU_MODULE_NAME "rtl819xU"
@@ -735,11 +742,6 @@ typedef enum _RTL8192SUSB_LOOPBACK{
#define RSVD_FW_QUEUE_PAGE_CMD_SHIFT 0x08
#define RSVD_FW_QUEUE_PAGE_BCN_SHIFT 0x00
#define RSVD_FW_QUEUE_PAGE_PUB_SHIFT 0x08
-//=================================================================
-//=================================================================
-
-#define EPROM_93c46 0
-#define EPROM_93c56 1
#define DEFAULT_FRAG_THRESHOLD 2342U
#define MIN_FRAG_THRESHOLD 256U
@@ -1066,19 +1068,6 @@ typedef enum _RT_CUSTOMER_ID
RT_CID_PRONET = 13,
}RT_CUSTOMER_ID, *PRT_CUSTOMER_ID;
-//================================================================================
-// LED customization.
-//================================================================================
-
-typedef enum _LED_STRATEGY_8190{
- SW_LED_MODE0, // SW control 1 LED via GPIO0. It is default option.
- SW_LED_MODE1, // SW control for PCI Express
- SW_LED_MODE2, // SW control for Cameo.
- SW_LED_MODE3, // SW contorl for RunTop.
- SW_LED_MODE4, // SW control for Netcore
- HW_LED, // HW control 2 LEDs, LED0 and LED1 (there are 4 different control modes)
-}LED_STRATEGY_8190, *PLED_STRATEGY_8190;
-
typedef enum _RESET_TYPE {
RESET_TYPE_NORESET = 0x00,
RESET_TYPE_NORMAL = 0x01,
@@ -1123,15 +1112,14 @@ typedef struct r8192_priv
{
struct rtl819x_ops* ops;
struct usb_device *udev;
- //added for maintain info from eeprom
- short epromtype;
+ /* added for maintain info from eeprom */
u16 eeprom_vid;
u16 eeprom_pid;
u8 eeprom_CustomerID;
u8 eeprom_SubCustomerID;
u8 eeprom_ChannelPlan;
RT_CUSTOMER_ID CustomerID;
- LED_STRATEGY_8190 LedStrategy;
+ LED_STRATEGY_819xUsb LedStrategy;
u8 txqueue_to_outpipemap[9];
u8 RtOutPipes[16];
u8 RtInPipes[16];
@@ -1501,8 +1489,17 @@ typedef struct r8192_priv
u8 MinSpaceCfg;
u16 rf_pathmap;
-//#endif
+ /* added for led control */
+ PLED_819xUsb pLed;
+ LED_819xUsb SwLed0;
+ LED_819xUsb SwLed1;
+ u8 bRegUseLed;
+ struct work_struct BlinkWorkItem;
+ /* added for led control */
+ u16 FwCmdIOMap;
+ u32 FwCmdIOParam;
+ u8 DMFlag;
diff --git a/drivers/staging/rtl8192su/r8192U_core.c b/drivers/staging/rtl8192su/r8192U_core.c
index 04d9b85f3d4..447d6474a70 100644
--- a/drivers/staging/rtl8192su/r8192U_core.c
+++ b/drivers/staging/rtl8192su/r8192U_core.c
@@ -26,6 +26,7 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#include <linux/eeprom_93cx6.h>
#undef LOOP_TEST
#undef DUMP_RX
@@ -54,7 +55,6 @@
#include <asm/uaccess.h>
#include "r8192U.h"
-#include "r8180_93cx6.h" /* Card EEPROM */
#include "r8192U_wx.h"
#include "r8192S_rtl8225.h"
@@ -217,6 +217,36 @@ static CHANNEL_LIST ChannelPlan[] = {
{{1,2,3,4,5,6,7,8,9,10,11,12,13,14},14} //For Global Domain. 1-11:active scan, 12-14 passive scan. //+YJ, 080626
};
+static void rtl819x_eeprom_register_read(struct eeprom_93cx6 *eeprom)
+{
+ struct net_device *dev = eeprom->data;
+ u8 reg = read_nic_byte(dev, EPROM_CMD);
+
+ eeprom->reg_data_in = reg & RTL819X_EEPROM_CMD_WRITE;
+ eeprom->reg_data_out = reg & RTL819X_EEPROM_CMD_READ;
+ eeprom->reg_data_clock = reg & RTL819X_EEPROM_CMD_CK;
+ eeprom->reg_chip_select = reg & RTL819X_EEPROM_CMD_CS;
+}
+
+static void rtl819x_eeprom_register_write(struct eeprom_93cx6 *eeprom)
+{
+ struct net_device *dev = eeprom->data;
+ u8 reg = 2 << 6;
+
+ if (eeprom->reg_data_in)
+ reg |= RTL819X_EEPROM_CMD_WRITE;
+ if (eeprom->reg_data_out)
+ reg |= RTL819X_EEPROM_CMD_READ;
+ if (eeprom->reg_data_clock)
+ reg |= RTL819X_EEPROM_CMD_CK;
+ if (eeprom->reg_chip_select)
+ reg |= RTL819X_EEPROM_CMD_CS;
+
+ write_nic_byte(dev, EPROM_CMD, reg);
+ read_nic_byte(dev, EPROM_CMD);
+ udelay(10);
+}
+
static void rtl819x_set_channel_map(u8 channel_plan, struct r8192_priv* priv)
{
int i, max_chan=-1, min_chan=-1;
@@ -1155,15 +1185,6 @@ void tx_timeout(struct net_device *dev)
//DMESG("TXTIMEOUT");
}
-
-/* this is only for debug */
-void dump_eprom(struct net_device *dev)
-{
- int i;
- for(i=0; i<63; i++)
- RT_TRACE(COMP_EPROM, "EEPROM addr %x : %x", i, eprom_read(dev,i));
-}
-
/* this is only for debug */
void rtl8192_dump_reg(struct net_device *dev)
{
@@ -1201,6 +1222,7 @@ void rtl8192_set_mode(struct net_device *dev,int mode)
void rtl8192_update_msr(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
+ LED_CTL_MODE LedAction = LED_CTL_NO_LINK;
u8 msr;
msr = read_nic_byte(dev, MSR);
@@ -1211,19 +1233,23 @@ void rtl8192_update_msr(struct net_device *dev)
* this is intentional and make sense for ad-hoc and
* master (see the create BSS/IBSS func)
*/
- if (priv->ieee80211->state == IEEE80211_LINKED){
+ if (priv->ieee80211->state == IEEE80211_LINKED) {
- if (priv->ieee80211->iw_mode == IW_MODE_INFRA)
+ if (priv->ieee80211->iw_mode == IW_MODE_INFRA) {
msr |= (MSR_LINK_MANAGED<<MSR_LINK_SHIFT);
- else if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
+ LedAction = LED_CTL_LINK;
+ } else if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
msr |= (MSR_LINK_ADHOC<<MSR_LINK_SHIFT);
else if (priv->ieee80211->iw_mode == IW_MODE_MASTER)
msr |= (MSR_LINK_MASTER<<MSR_LINK_SHIFT);
- }else
+ } else
msr |= (MSR_LINK_NONE<<MSR_LINK_SHIFT);
write_nic_byte(dev, MSR, msr);
+
+ if(priv->ieee80211->LedControlHandler != NULL)
+ priv->ieee80211->LedControlHandler(dev, LedAction);
}
void rtl8192_set_chan(struct net_device *dev,short ch)
@@ -1278,7 +1304,6 @@ static int rtl8192_rx_initiate(struct net_device*dev)
kfree_skb(skb);
break;
}
-// printk("nomal packet IN request!\n");
usb_fill_bulk_urb(entry, priv->udev,
usb_rcvbulkpipe(priv->udev, 3), skb_tail_pointer(skb),
RX_URB_SIZE, rtl8192_rx_isr, skb);
@@ -1292,7 +1317,6 @@ static int rtl8192_rx_initiate(struct net_device*dev)
/* command packet rx procedure */
while (skb_queue_len(&priv->rx_queue) < MAX_RX_URB + 3) {
-// printk("command packet IN request!\n");
skb = __dev_alloc_skb(RX_URB_SIZE ,GFP_KERNEL);
if (!skb)
break;
@@ -2138,15 +2162,13 @@ short rtl8192SU_tx(struct net_device *dev, struct sk_buff* skb)
struct r8192_priv *priv = ieee80211_priv(dev);
cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
tx_desc_819x_usb *tx_desc = (tx_desc_819x_usb *)skb->data;
- //tx_fwinfo_819x_usb *tx_fwinfo = (tx_fwinfo_819x_usb *)(skb->data + USB_HWDESC_HEADER_LEN);//92su del
struct usb_device *udev = priv->udev;
int pend;
int status;
struct urb *tx_urb = NULL, *tx_urb_zero = NULL;
- //int urb_len;
unsigned int idx_pipe;
- u16 MPDUOverhead = 0;
- //RT_DEBUG_DATA(COMP_SEND, tcb_desc, sizeof(cb_desc));
+ u16 MPDUOverhead = 0;
+ u16 type = 0;
pend = atomic_read(&priv->tx_pending[tcb_desc->queue_index]);
/* we are locked here so the two atomic_read and inc are executed
@@ -2343,6 +2365,11 @@ short rtl8192SU_tx(struct net_device *dev, struct sk_buff* skb)
skb->data,
skb->len, rtl8192_tx_isr, skb);
+ if (type == IEEE80211_FTYPE_DATA) {
+ if (priv->ieee80211->LedControlHandler != NULL)
+ priv->ieee80211->LedControlHandler(dev, LED_CTL_TX);
+ }
+
status = usb_submit_urb(tx_urb, GFP_ATOMIC);
if (!status) {
/*
@@ -2577,30 +2604,20 @@ void rtl8192SU_update_ratr_table(struct net_device* dev)
void rtl8192SU_link_change(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- struct ieee80211_device* ieee = priv->ieee80211;
- //unsigned long flags;
+ struct ieee80211_device *ieee = priv->ieee80211;
u32 reg = 0;
- printk("=====>%s 1\n", __func__);
reg = read_nic_dword(dev, RCR);
-
- if (ieee->state == IEEE80211_LINKED)
- {
-
+ if (ieee->state == IEEE80211_LINKED) {
rtl8192SU_net_update(dev);
rtl8192SU_update_ratr_table(dev);
ieee->SetFwCmdHandler(dev, FW_CMD_HIGH_PWR_ENABLE);
priv->ReceiveConfig = reg |= RCR_CBSSID;
- }else{
+ } else
priv->ReceiveConfig = reg &= ~RCR_CBSSID;
-
- }
-
write_nic_dword(dev, RCR, reg);
rtl8192_update_msr(dev);
-
- printk("<=====%s 2\n", __func__);
}
static struct ieee80211_qos_parameters def_qos_parameters = {
@@ -3303,18 +3320,6 @@ static void rtl8192_init_priv_task(struct net_device* dev)
(unsigned long)priv);
}
-static void rtl8192_get_eeprom_size(struct net_device* dev)
-{
- u16 curCR = 0;
- struct r8192_priv *priv = ieee80211_priv(dev);
- RT_TRACE(COMP_EPROM, "===========>%s()\n", __FUNCTION__);
- curCR = read_nic_word_E(dev,EPROM_CMD);
- RT_TRACE(COMP_EPROM, "read from Reg EPROM_CMD(%x):%x\n", EPROM_CMD, curCR);
- //whether need I consider BIT5?
- priv->epromtype = (curCR & Cmd9346CR_9356SEL) ? EPROM_93c56 : EPROM_93c46;
- RT_TRACE(COMP_EPROM, "<===========%s(), epromtype:%d\n", __FUNCTION__, priv->epromtype);
-}
-
//used to swap endian. as ntohl & htonl are not neccessary to swap endian, so use this instead.
static inline u16 endian_swap(u16* data)
{
@@ -3409,34 +3414,28 @@ void update_hal_variables(struct r8192_priv *priv)
}
}
-//
-// Description:
-// Config HW adapter information into initial value.
-//
-// Assumption:
-// 1. After Auto load fail(i.e, check CR9346 fail)
-//
-// Created by Roger, 2008.10.21.
-//
-void
-rtl8192SU_ConfigAdapterInfo8192SForAutoLoadFail(struct net_device* dev)
+/*
+ * Description:
+ * Config HW adapter information into initial value.
+ *
+ * Assumption:
+ * 1. After Auto load fail(i.e, check CR9346 fail)
+ *
+ */
+void rtl8192SU_ConfigAdapterInfo8192SForAutoLoadFail(struct net_device *dev)
{
- struct r8192_priv *priv = ieee80211_priv(dev);
- //u16 i,usValue;
- //u8 sMacAddr[6] = {0x00, 0xE0, 0x4C, 0x81, 0x92, 0x00};
- u8 rf_path; // For EEPROM/EFUSE After V0.6_1117
- int i;
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ u8 rf_path; /* For EEPROM/EFUSE After V0.6_1117 */
+ int i;
RT_TRACE(COMP_INIT, "====> ConfigAdapterInfo8192SForAutoLoadFail\n");
- write_nic_byte(dev, SYS_ISO_CTRL+1, 0xE8); // Isolation signals from Loader
- //PlatformStallExecution(10000);
+ /* Isolation signals from Loader */
+ write_nic_byte(dev, SYS_ISO_CTRL+1, 0xE8);
mdelay(10);
- write_nic_byte(dev, PMC_FSM, 0x02); // Enable Loader Data Keep
-
- //RT_ASSERT(priv->AutoloadFailFlag==TRUE, ("ReadAdapterInfo8192SEEPROM(): AutoloadFailFlag !=TRUE\n"));
+ write_nic_byte(dev, PMC_FSM, 0x02); /* Enable Loader Data Keep */
- // Initialize IC Version && Channel Plan
+ /* Initialize IC Version && Channel Plan */
priv->eeprom_vid = 0;
priv->eeprom_pid = 0;
priv->card_8192_version = 0;
@@ -3447,12 +3446,14 @@ rtl8192SU_ConfigAdapterInfo8192SForAutoLoadFail(struct net_device* dev)
RT_TRACE(COMP_INIT, "EEPROM VID = 0x%4x\n", priv->eeprom_vid);
RT_TRACE(COMP_INIT, "EEPROM PID = 0x%4x\n", priv->eeprom_pid);
- RT_TRACE(COMP_INIT, "EEPROM Customer ID: 0x%2x\n", priv->eeprom_CustomerID);
- RT_TRACE(COMP_INIT, "EEPROM SubCustomer ID: 0x%2x\n", priv->eeprom_SubCustomerID);
- RT_TRACE(COMP_INIT, "EEPROM ChannelPlan = 0x%4x\n", priv->eeprom_ChannelPlan);
- RT_TRACE(COMP_INIT, "IgnoreDiffRateTxPowerOffset = %d\n", priv->bIgnoreDiffRateTxPowerOffset);
-
-
+ RT_TRACE(COMP_INIT, "EEPROM Customer ID: 0x%2x\n",
+ priv->eeprom_CustomerID);
+ RT_TRACE(COMP_INIT, "EEPROM SubCustomer ID: 0x%2x\n",
+ priv->eeprom_SubCustomerID);
+ RT_TRACE(COMP_INIT, "EEPROM ChannelPlan = 0x%4x\n",
+ priv->eeprom_ChannelPlan);
+ RT_TRACE(COMP_INIT, "IgnoreDiffRateTxPowerOffset = %d\n",
+ priv->bIgnoreDiffRateTxPowerOffset);
priv->EEPROMUsbOption = EEPROM_USB_Default_OPTIONAL_FUNC;
RT_TRACE(COMP_INIT, "USB Option = %#x\n", priv->EEPROMUsbOption);
@@ -3460,19 +3461,15 @@ rtl8192SU_ConfigAdapterInfo8192SForAutoLoadFail(struct net_device* dev)
for(i=0; i<5; i++)
priv->EEPROMUsbPhyParam[i] = EEPROM_USB_Default_PHY_PARAM;
- //RT_PRINT_DATA(COMP_INIT|COMP_EFUSE, DBG_LOUD, ("EFUSE USB PHY Param: \n"), priv->EEPROMUsbPhyParam, 5);
-
{
- //<Roger_Notes> In this case, we random assigh MAC address here. 2008.10.15.
+ /*
+ * In this case, we randomly assign a MAC address here.
+ */
static u8 sMacAddr[6] = {0x00, 0xE0, 0x4C, 0x81, 0x92, 0x00};
- u8 i;
-
- //sMacAddr[5] = (u8)GetRandomNumber(1, 254);
-
for(i = 0; i < 6; i++)
dev->dev_addr[i] = sMacAddr[i];
}
- //NicIFSetMacAddress(Adapter, Adapter->PermanentAddress);
+ /* NicIFSetMacAddress(Adapter, Adapter->PermanentAddress); */
write_nic_dword(dev, IDR0, ((u32*)dev->dev_addr)[0]);
write_nic_word(dev, IDR4, ((u16*)(dev->dev_addr + 4))[0]);
@@ -3481,7 +3478,7 @@ rtl8192SU_ConfigAdapterInfo8192SForAutoLoadFail(struct net_device* dev)
dev->dev_addr);
priv->EEPROMBoardType = EEPROM_Default_BoardType;
- priv->rf_type = RF_1T2R; //RF_2T2R
+ priv->rf_type = RF_1T2R; /* RF_2T2R */
priv->EEPROMTxPowerDiff = EEPROM_Default_PwDiff;
priv->EEPROMThermalMeter = EEPROM_Default_ThermalMeter;
priv->EEPROMCrystalCap = EEPROM_Default_CrystalCap;
@@ -3490,13 +3487,11 @@ rtl8192SU_ConfigAdapterInfo8192SForAutoLoadFail(struct net_device* dev)
priv->EEPROMTSSI_B = EEPROM_Default_TSSI;
priv->EEPROMTxPwrTkMode = EEPROM_Default_TxPwrTkMode;
-
-
for (rf_path = 0; rf_path < 2; rf_path++)
{
for (i = 0; i < 3; i++)
{
- // Read CCK RF A & B Tx power
+ /* Read CCK RF A & B Tx power */
priv->RfCckChnlAreaTxPwr[rf_path][i] =
priv->RfOfdmChnlAreaTxPwr1T[rf_path][i] =
priv->RfOfdmChnlAreaTxPwr2T[rf_path][i] =
@@ -3506,175 +3501,133 @@ rtl8192SU_ConfigAdapterInfo8192SForAutoLoadFail(struct net_device* dev)
update_hal_variables(priv);
- //
- // Update remained HAL variables.
- //
+ /*
+ * Update remaining HAL variables.
+ */
priv->TSSI_13dBm = priv->EEPROMThermalMeter *100;
- priv->LegacyHTTxPowerDiff = priv->EEPROMTxPowerDiff;//new
+ priv->LegacyHTTxPowerDiff = priv->EEPROMTxPowerDiff; /* new */
priv->TxPowerDiff = priv->EEPROMTxPowerDiff;
- //priv->AntennaTxPwDiff[0] = (priv->EEPROMTxPowerDiff & 0xf);// Antenna B gain offset to antenna A, bit0~3
- //priv->AntennaTxPwDiff[1] = ((priv->EEPROMTxPowerDiff & 0xf0)>>4);// Antenna C gain offset to antenna A, bit4~7
- priv->CrystalCap = priv->EEPROMCrystalCap; // CrystalCap, bit12~15
- priv->ThermalMeter[0] = priv->EEPROMThermalMeter;// ThermalMeter, bit0~3 for RFIC1, bit4~7 for RFIC2
+ /* Antenna B gain offset to antenna A, bit0~3 */
+ /* priv->AntennaTxPwDiff[0] = (priv->EEPROMTxPowerDiff & 0xf); */
+ /* Antenna C gain offset to antenna A, bit4~7 */
+ /* priv->AntennaTxPwDiff[1] = ((priv->EEPROMTxPowerDiff & 0xf0)>>4); */
+ /* CrystalCap, bit12~15 */
+ priv->CrystalCap = priv->EEPROMCrystalCap;
+ /* ThermalMeter, bit0~3 for RFIC1, bit4~7 for RFIC2 */
+ priv->ThermalMeter[0] = priv->EEPROMThermalMeter;
priv->LedStrategy = SW_LED_MODE0;
init_rate_adaptive(dev);
RT_TRACE(COMP_INIT, "<==== ConfigAdapterInfo8192SForAutoLoadFail\n");
-
}
-//
-// Description:
-// Read HW adapter information by E-Fuse or EEPROM according CR9346 reported.
-//
-// Assumption:
-// 1. CR9346 regiser has verified.
-// 2. PASSIVE_LEVEL (USB interface)
-//
-// Created by Roger, 2008.10.21.
-//
-void
-rtl8192SU_ReadAdapterInfo8192SUsb(struct net_device* dev)
+/*
+ * Description:
+ * Read HW adapter information by E-Fuse
+ * or EEPROM according CR9346 reported.
+ *
+ * Assumption:
+ * 1. CR9346 regiser has verified.
+ * 2. PASSIVE_LEVEL (USB interface)
+ */
+void rtl8192SU_ReadAdapterInfo8192SUsb(struct net_device *dev)
{
- struct r8192_priv *priv = ieee80211_priv(dev);
- u16 i,usValue;
- u8 tmpU1b, tempval;
- u16 EEPROMId;
- u8 hwinfo[HWSET_MAX_SIZE_92S];
- u8 rf_path, index; // For EEPROM/EFUSE After V0.6_1117
-
-
- RT_TRACE(COMP_INIT, "====> ReadAdapterInfo8192SUsb\n");
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ u16 i;
+ u8 tmpU1b, tempval;
+ u16 EEPROMId;
+ u8 hwinfo[HWSET_MAX_SIZE_92S];
+ u8 rf_path, index; /* For EEPROM/EFUSE After V0.6_1117 */
+ struct eeprom_93cx6 eeprom;
+ u16 eeprom_val;
+
+ eeprom.data = dev;
+ eeprom.register_read = rtl819x_eeprom_register_read;
+ eeprom.register_write = rtl819x_eeprom_register_write;
+ eeprom.width = PCI_EEPROM_WIDTH_93C46;
- //
- // <Roger_Note> The following operation are prevent Efuse leakage by turn on 2.5V.
- // 2008.11.25.
- //
+ /*
+ * The following operation are prevent Efuse leakage by turn on 2.5V.
+ */
tmpU1b = read_nic_byte(dev, EFUSE_TEST+3);
write_nic_byte(dev, EFUSE_TEST+3, tmpU1b|0x80);
- //PlatformStallExecution(1000);
mdelay(10);
write_nic_byte(dev, EFUSE_TEST+3, (tmpU1b&(~BIT7)));
- // Retrieve Chip version.
+ /* Retrieve Chip version. */
priv->card_8192_version = (VERSION_8192S)((read_nic_dword(dev, PMC_FSM)>>16)&0xF);
RT_TRACE(COMP_INIT, "Chip Version ID: 0x%2x\n", priv->card_8192_version);
- switch(priv->card_8192_version)
- {
- case 0:
- RT_TRACE(COMP_INIT, "Chip Version ID: VERSION_8192S_ACUT.\n");
- break;
- case 1:
- RT_TRACE(COMP_INIT, "Chip Version ID: VERSION_8192S_BCUT.\n");
- break;
- case 2:
- RT_TRACE(COMP_INIT, "Chip Version ID: VERSION_8192S_CCUT.\n");
- break;
- default:
- RT_TRACE(COMP_INIT, "Unknown Chip Version!!\n");
- priv->card_8192_version = VERSION_8192S_BCUT;
- break;
+ switch (priv->card_8192_version) {
+ case 0:
+ RT_TRACE(COMP_INIT, "Chip Version ID: VERSION_8192S_ACUT.\n");
+ break;
+ case 1:
+ RT_TRACE(COMP_INIT, "Chip Version ID: VERSION_8192S_BCUT.\n");
+ break;
+ case 2:
+ RT_TRACE(COMP_INIT, "Chip Version ID: VERSION_8192S_CCUT.\n");
+ break;
+ default:
+ RT_TRACE(COMP_INIT, "Unknown Chip Version!!\n");
+ priv->card_8192_version = VERSION_8192S_BCUT;
+ break;
}
- //if (IS_BOOT_FROM_EEPROM(Adapter))
- if(priv->EepromOrEfuse)
- { // Read frin EEPROM
- write_nic_byte(dev, SYS_ISO_CTRL+1, 0xE8); // Isolation signals from Loader
- //PlatformStallExecution(10000);
+ if (priv->EepromOrEfuse) { /* Read from EEPROM */
+ /* Isolation signals from Loader */
+ write_nic_byte(dev, SYS_ISO_CTRL+1, 0xE8);
mdelay(10);
- write_nic_byte(dev, PMC_FSM, 0x02); // Enable Loader Data Keep
- // Read all Content from EEPROM or EFUSE.
- for(i = 0; i < HWSET_MAX_SIZE_92S; i += 2)
- {
- usValue = eprom_read(dev, (u16) (i>>1));
- *((u16*)(&hwinfo[i])) = usValue;
+ /* Enable Loader Data Keep */
+ write_nic_byte(dev, PMC_FSM, 0x02);
+ /* Read all Content from EEPROM or EFUSE. */
+ for (i = 0; i < HWSET_MAX_SIZE_92S; i += 2) {
+ eeprom_93cx6_read(&eeprom, (u16) (i>>1), &eeprom_val);
+ *((u16 *)(&hwinfo[i])) = eeprom_val;
}
- }
- else if (!(priv->EepromOrEfuse))
- { // Read from EFUSE
-
- //
- // <Roger_Notes> We set Isolation signals from Loader and reset EEPROM after system resuming
- // from suspend mode.
- // 2008.10.21.
- //
- //PlatformEFIOWrite1Byte(Adapter, SYS_ISO_CTRL+1, 0xE8); // Isolation signals from Loader
- //PlatformStallExecution(10000);
- //PlatformEFIOWrite1Byte(Adapter, SYS_FUNC_EN+1, 0x40);
- //PlatformEFIOWrite1Byte(Adapter, SYS_FUNC_EN+1, 0x50);
-
- //tmpU1b = PlatformEFIORead1Byte(Adapter, EFUSE_TEST+3);
- //PlatformEFIOWrite1Byte(Adapter, EFUSE_TEST+3, (tmpU1b | 0x80));
- //PlatformEFIOWrite1Byte(Adapter, EFUSE_TEST+3, 0x72);
- //PlatformEFIOWrite1Byte(Adapter, EFUSE_CLK, 0x03);
-
- // Read EFUSE real map to shadow.
+ } else if (!(priv->EepromOrEfuse)) { /* Read from EFUSE */
+ /* Read EFUSE real map to shadow. */
EFUSE_ShadowMapUpdate(dev);
memcpy(hwinfo, &priv->EfuseMap[EFUSE_INIT_MAP][0], HWSET_MAX_SIZE_92S);
+ } else {
+ RT_TRACE(COMP_INIT, "%s(): Invalid boot type", __func__);
}
- else
- {
- RT_TRACE(COMP_INIT, "ReadAdapterInfo8192SUsb(): Invalid boot type!!\n");
- }
-
- //YJ,test,090106
- //dump_buf(hwinfo,HWSET_MAX_SIZE_92S);
- //
- // <Roger_Notes> The following are EFUSE/EEPROM independent operations!!
- //
- //RT_PRINT_DATA(COMP_EFUSE, DBG_LOUD, ("MAP: \n"), hwinfo, HWSET_MAX_SIZE_92S);
- //
- // <Roger_Notes> Event though CR9346 regiser can verify whether Autoload is success or not, but we still
- // double check ID codes for 92S here(e.g., due to HW GPIO polling fail issue).
- // 2008.10.21.
- //
+ /*
+ * Even though CR9346 regiser can verify whether Autoload
+ * is success or not, but we still double check ID codes for 92S here
+ * (e.g., due to HW GPIO polling fail issue)
+ */
EEPROMId = *((u16 *)&hwinfo[0]);
-
- if( EEPROMId != RTL8190_EEPROM_ID )
- {
+ if (EEPROMId != RTL8190_EEPROM_ID) {
RT_TRACE(COMP_INIT, "ID(%#x) is invalid!!\n", EEPROMId);
priv->bTXPowerDataReadFromEEPORM = FALSE;
priv->AutoloadFailFlag=TRUE;
- }
- else
- {
+ } else {
priv->AutoloadFailFlag=FALSE;
priv->bTXPowerDataReadFromEEPORM = TRUE;
}
- // Read IC Version && Channel Plan
- if(!priv->AutoloadFailFlag)
- {
- // VID, PID
+ /* Read IC Version && Channel Plan */
+ if (!priv->AutoloadFailFlag) {
+ /* VID, PID */
priv->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
priv->eeprom_pid = *(u16 *)&hwinfo[EEPROM_PID];
priv->bIgnoreDiffRateTxPowerOffset = false; //cosa for test
- // EEPROM Version ID, Channel plan
+ /* EEPROM Version ID, Channel plan */
priv->EEPROMVersion = *(u8 *)&hwinfo[EEPROM_Version];
priv->eeprom_ChannelPlan = *(u8 *)&hwinfo[EEPROM_ChannelPlan];
- // Customer ID, 0x00 and 0xff are reserved for Realtek.
+ /* Customer ID, 0x00 and 0xff are reserved for Realtek. */
priv->eeprom_CustomerID = *(u8 *)&hwinfo[EEPROM_CustomID];
priv->eeprom_SubCustomerID = *(u8 *)&hwinfo[EEPROM_SubCustomID];
- }
- else
- {
- //priv->eeprom_vid = 0;
- //priv->eeprom_pid = 0;
- //priv->EEPROMVersion = 0;
- //priv->eeprom_ChannelPlan = 0;
- //priv->eeprom_CustomerID = 0;
- //priv->eeprom_SubCustomerID = 0;
-
+ } else {
rtl8192SU_ConfigAdapterInfo8192SForAutoLoadFail(dev);
return;
}
-
RT_TRACE(COMP_INIT, "EEPROM Id = 0x%4x\n", EEPROMId);
RT_TRACE(COMP_INIT, "EEPROM VID = 0x%4x\n", priv->eeprom_vid);
RT_TRACE(COMP_INIT, "EEPROM PID = 0x%4x\n", priv->eeprom_pid);
@@ -3684,18 +3637,13 @@ rtl8192SU_ReadAdapterInfo8192SUsb(struct net_device* dev)
RT_TRACE(COMP_INIT, "EEPROM ChannelPlan = 0x%4x\n", priv->eeprom_ChannelPlan);
RT_TRACE(COMP_INIT, "bIgnoreDiffRateTxPowerOffset = %d\n", priv->bIgnoreDiffRateTxPowerOffset);
-
- // Read USB optional function.
- if(!priv->AutoloadFailFlag)
- {
+ /* Read USB optional function. */
+ if (!priv->AutoloadFailFlag) {
priv->EEPROMUsbOption = *(u8 *)&hwinfo[EEPROM_USB_OPTIONAL];
- }
- else
- {
+ } else {
priv->EEPROMUsbOption = EEPROM_USB_Default_OPTIONAL_FUNC;
}
-
priv->EEPROMUsbEndPointNumber = rtl8192SU_UsbOptionToEndPointNumber((priv->EEPROMUsbOption&EEPROM_EP_NUMBER)>>3);
RT_TRACE(COMP_INIT, "USB Option = %#x\n", priv->EEPROMUsbOption);
@@ -4134,18 +4082,12 @@ short rtl8192_init(struct net_device *dev)
rtl8192_init_priv_variable(dev);
rtl8192_init_priv_lock(priv);
rtl8192_init_priv_task(dev);
- rtl8192_get_eeprom_size(dev);
priv->ops->rtl819x_read_eeprom_info(dev);
rtl8192_get_channel_map(dev);
init_hal_dm(dev);
init_timer(&priv->watch_dog_timer);
priv->watch_dog_timer.data = (unsigned long)dev;
priv->watch_dog_timer.function = watch_dog_timer_callback;
-
- //rtl8192_adapter_start(dev);
-#ifdef DEBUG_EPROM
- dump_eprom(dev);
-#endif
return 0;
}
@@ -5513,85 +5455,88 @@ void rtl819x_update_rxcounts(
}
}
-extern void rtl819x_watchdog_wqcallback(struct work_struct *work)
+void rtl819x_watchdog_wqcallback(struct work_struct *work)
{
- struct delayed_work *dwork = container_of(work,struct delayed_work,work);
- struct r8192_priv *priv = container_of(dwork,struct r8192_priv,watch_dog_wq);
- struct net_device *dev = priv->ieee80211->dev;
+ struct delayed_work *dwork = container_of(work,
+ struct delayed_work,
+ work);
+ struct r8192_priv *priv = container_of(dwork,
+ struct r8192_priv,
+ watch_dog_wq);
+ struct net_device *dev = priv->ieee80211->dev;
struct ieee80211_device* ieee = priv->ieee80211;
- RESET_TYPE ResetType = RESET_TYPE_NORESET;
- static u8 check_reset_cnt=0;
+ RESET_TYPE ResetType = RESET_TYPE_NORESET;
+ static u8 check_reset_cnt;
+ u32 TotalRxBcnNum = 0;
+ u32 TotalRxDataNum = 0;
bool bBusyTraffic = false;
if(!priv->up)
return;
hal_dm_watchdog(dev);
-
- {//to get busy traffic condition
- if(ieee->state == IEEE80211_LINKED)
- {
- //windows mod 666 to 100.
- //if( ieee->LinkDetectInfo.NumRxOkInPeriod> 666 ||
- // ieee->LinkDetectInfo.NumTxOkInPeriod> 666 ) {
- if( ieee->LinkDetectInfo.NumRxOkInPeriod> 100 ||
- ieee->LinkDetectInfo.NumTxOkInPeriod> 100 ) {
+ /* to get busy traffic condition */
+ if (ieee->state == IEEE80211_LINKED) {
+ if (ieee->LinkDetectInfo.NumRxOkInPeriod > 666 ||
+ ieee->LinkDetectInfo.NumTxOkInPeriod > 666)
bBusyTraffic = true;
- }
- ieee->LinkDetectInfo.NumRxOkInPeriod = 0;
- ieee->LinkDetectInfo.NumTxOkInPeriod = 0;
- ieee->LinkDetectInfo.bBusyTraffic = bBusyTraffic;
- }
- }
- //added by amy for AP roaming
- {
- if(priv->ieee80211->state == IEEE80211_LINKED && priv->ieee80211->iw_mode == IW_MODE_INFRA)
- {
- u32 TotalRxBcnNum = 0;
- u32 TotalRxDataNum = 0;
- rtl819x_update_rxcounts(priv, &TotalRxBcnNum, &TotalRxDataNum);
- if((TotalRxBcnNum+TotalRxDataNum) == 0)
- {
- #ifdef TODO
- if(rfState == eRfOff)
- RT_TRACE(COMP_ERR,"========>%s()\n",__FUNCTION__);
- #endif
- printk("===>%s(): AP is power off,connect another one\n",__FUNCTION__);
- // Dot11d_Reset(dev);
- priv->ieee80211->state = IEEE80211_ASSOCIATING;
- notify_wx_assoc_event(priv->ieee80211);
- RemovePeerTS(priv->ieee80211,priv->ieee80211->current_network.bssid);
- ieee->is_roaming = true;
- priv->ieee80211->link_change(dev);
- queue_work(priv->ieee80211->wq, &priv->ieee80211->associate_procedure_wq);
- }
+ ieee->LinkDetectInfo.NumRxOkInPeriod = 0;
+ ieee->LinkDetectInfo.NumTxOkInPeriod = 0;
+ ieee->LinkDetectInfo.bBusyTraffic = bBusyTraffic;
+ }
+
+ if (priv->ieee80211->state == IEEE80211_LINKED &&
+ priv->ieee80211->iw_mode == IW_MODE_INFRA) {
+ rtl819x_update_rxcounts(priv, &TotalRxBcnNum, &TotalRxDataNum);
+ if ((TotalRxBcnNum + TotalRxDataNum) == 0) {
+ RT_TRACE(COMP_ERR, "%s(): AP is powered off,"
+ "connect another one\n", __func__);
+ /* Dot11d_Reset(dev); */
+ priv->ieee80211->state = IEEE80211_ASSOCIATING;
+ notify_wx_assoc_event(priv->ieee80211);
+ RemovePeerTS(priv->ieee80211,
+ priv->ieee80211->current_network.bssid);
+ ieee->is_roaming = true;
+ priv->ieee80211->link_change(dev);
+ if(ieee->LedControlHandler != NULL)
+ ieee->LedControlHandler(ieee->dev,
+ LED_CTL_START_TO_LINK);
+ queue_work(priv->ieee80211->wq,
+ &priv->ieee80211->associate_procedure_wq);
}
- priv->ieee80211->LinkDetectInfo.NumRecvBcnInPeriod=0;
- priv->ieee80211->LinkDetectInfo.NumRecvDataInPeriod=0;
}
-// CAM_read_entry(dev,4);
- //check if reset the driver
- if(check_reset_cnt++ >= 3 && !ieee->is_roaming)
- {
- ResetType = rtl819x_ifcheck_resetornot(dev);
+ priv->ieee80211->LinkDetectInfo.NumRecvBcnInPeriod = 0;
+ priv->ieee80211->LinkDetectInfo.NumRecvDataInPeriod = 0;
+
+ /*
+ * CAM_read_entry(dev,4);
+ * check if reset the driver
+ */
+ if (check_reset_cnt++ >= 3 && !ieee->is_roaming) {
+ ResetType = rtl819x_ifcheck_resetornot(dev);
check_reset_cnt = 3;
- //DbgPrint("Start to check silent reset\n");
}
- // RT_TRACE(COMP_RESET,"%s():priv->force_reset is %d,priv->ResetProgress is %d, priv->bForcedSilentReset is %d,priv->bDisableNormalResetCheck is %d,ResetType is %d\n",__FUNCTION__,priv->force_reset,priv->ResetProgress,priv->bForcedSilentReset,priv->bDisableNormalResetCheck,ResetType);
-#if 1
- if( (priv->force_reset) || (priv->ResetProgress==RESET_TYPE_NORESET &&
+ if ((priv->force_reset) || (priv->ResetProgress == RESET_TYPE_NORESET &&
(priv->bForcedSilentReset ||
- (!priv->bDisableNormalResetCheck && ResetType==RESET_TYPE_SILENT)))) // This is control by OID set in Pomelo
- {
- RT_TRACE(COMP_RESET,"%s():priv->force_reset is %d,priv->ResetProgress is %d, priv->bForcedSilentReset is %d,priv->bDisableNormalResetCheck is %d,ResetType is %d\n",__FUNCTION__,priv->force_reset,priv->ResetProgress,priv->bForcedSilentReset,priv->bDisableNormalResetCheck,ResetType);
+ (!priv->bDisableNormalResetCheck &&
+ /* This is control by OID set in Pomelo */
+ ResetType == RESET_TYPE_SILENT)))) {
+ RT_TRACE(COMP_RESET, "%s(): priv->force_reset is %d,"
+ "priv->ResetProgress is %d, "
+ "priv->bForcedSilentReset is %d, "
+ "priv->bDisableNormalResetCheck is %d, "
+ "ResetType is %d",
+ __func__,
+ priv->force_reset,
+ priv->ResetProgress,
+ priv->bForcedSilentReset,
+ priv->bDisableNormalResetCheck,
+ ResetType);
rtl819x_ifsilentreset(dev);
}
-#endif
priv->force_reset = false;
priv->bForcedSilentReset = false;
priv->bResetInProgress = false;
- RT_TRACE(COMP_TRACE, " <==RtUsbCheckForHangWorkItemCallback()\n");
-
}
void watch_dog_timer_callback(unsigned long data)
@@ -5816,7 +5761,7 @@ int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
goto out;
}
- ipw = (struct ieee_param *)kmalloc(p->length, GFP_KERNEL);
+ ipw = kmalloc(p->length, GFP_KERNEL);
if (ipw == NULL){
ret = -ENOMEM;
goto out;
@@ -7593,96 +7538,113 @@ void rtl8192_try_wake_queue(struct net_device *dev, int pri)
void EnableHWSecurityConfig8192(struct net_device *dev)
{
- u8 SECR_value = 0x0;
+ u8 SECR_value = 0x0;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- struct ieee80211_device* ieee = priv->ieee80211;
+ struct ieee80211_device *ieee = priv->ieee80211;
SECR_value = SCR_TxEncEnable | SCR_RxDecEnable;
-#if 1
- if (((KEY_TYPE_WEP40 == ieee->pairwise_key_type) || (KEY_TYPE_WEP104 == ieee->pairwise_key_type)) && (priv->ieee80211->auth_mode != 2))
- {
- SECR_value |= SCR_RxUseDK;
- SECR_value |= SCR_TxUseDK;
- }
- else if ((ieee->iw_mode == IW_MODE_ADHOC) && (ieee->pairwise_key_type & (KEY_TYPE_CCMP | KEY_TYPE_TKIP)))
- {
- SECR_value |= SCR_RxUseDK;
- SECR_value |= SCR_TxUseDK;
+ switch (ieee->pairwise_key_type) {
+ case KEY_TYPE_WEP40:
+ case KEY_TYPE_WEP104:
+ if (priv->ieee80211->auth_mode != 2) {
+ SECR_value |= SCR_RxUseDK;
+ SECR_value |= SCR_TxUseDK;
+ }
+ break;
+ case KEY_TYPE_TKIP:
+ case KEY_TYPE_CCMP:
+ if (ieee->iw_mode == IW_MODE_ADHOC) {
+ SECR_value |= SCR_RxUseDK;
+ SECR_value |= SCR_TxUseDK;
+ }
+ break;
+ default:
+ break;
}
-#endif
- //add HWSec active enable here.
-//default using hwsec. when peer AP is in N mode only and pairwise_key_type is none_aes(which HT_IOT_ACT_PURE_N_MODE indicates it), use software security. when peer AP is in b,g,n mode mixed and pairwise_key_type is none_aes, use g mode hw security. WB on 2008.7.4
+ /*
+ * add HWSec active enable here.
+ * default using hwsec.
+ * when peer AP is in N mode only and pairwise_key_type is none_aes
+ * (which HT_IOT_ACT_PURE_N_MODE indicates it),
+ * use software security.
+ * when peer AP is in b,g,n mode mixed and pairwise_key_type is none_aes
+ * use g mode hw security.
+ */
ieee->hwsec_active = 1;
- if ((ieee->pHTInfo->IOTAction&HT_IOT_ACT_PURE_N_MODE) || !hwwep)//!ieee->hwsec_support) //add hwsec_support flag to totol control hw_sec on/off
- {
+ /* add hwsec_support flag to totol control hw_sec on/off */
+ if ((ieee->pHTInfo->IOTAction&HT_IOT_ACT_PURE_N_MODE) || !hwwep) {
ieee->hwsec_active = 0;
SECR_value &= ~SCR_RxDecEnable;
}
- RT_TRACE(COMP_SEC,"%s:, hwsec:%d, pairwise_key:%d, SECR_value:%x\n", __FUNCTION__, \
- ieee->hwsec_active, ieee->pairwise_key_type, SECR_value);
- {
- write_nic_byte(dev, SECR, SECR_value);//SECR_value | SCR_UseDK );
- }
+ RT_TRACE(COMP_SEC, "%s(): hwsec: %d, pairwise_key: %d, "
+ "SECR_value: %x",
+ __func__, ieee->hwsec_active,
+ ieee->pairwise_key_type, SECR_value);
+
+ write_nic_byte(dev, SECR, SECR_value); /* SECR_value | SCR_UseDK ); */
}
-void setKey( struct net_device *dev,
+void setKey(struct net_device *dev,
u8 EntryNo,
u8 KeyIndex,
u16 KeyType,
u8 *MacAddr,
u8 DefaultKey,
- u32 *KeyContent )
+ u32 *KeyContent)
{
u32 TargetCommand = 0;
u32 TargetContent = 0;
u16 usConfig = 0;
u8 i;
+
if (EntryNo >= TOTAL_CAM_ENTRY)
- RT_TRACE(COMP_ERR, "cam entry exceeds in setKey()\n");
+ RT_TRACE(COMP_ERR, "%s(): cam entry exceeds TOTAL_CAM_ENTRY",
+ __func__);
- RT_TRACE(COMP_SEC, "====>to setKey(), dev:%p, EntryNo:%d, KeyIndex:%d, KeyType:%d, MacAddr%pM\n", dev,EntryNo, KeyIndex, KeyType, MacAddr);
+ RT_TRACE(COMP_SEC, "%s(): dev: %p, EntryNo: %d, "
+ "KeyIndex: %d, KeyType: %d, MacAddr: %pM",
+ __func__, dev, EntryNo,
+ KeyIndex, KeyType, MacAddr);
if (DefaultKey)
- usConfig |= BIT15 | (KeyType<<2);
+ usConfig |= BIT15 | (KeyType << 2);
else
- usConfig |= BIT15 | (KeyType<<2) | KeyIndex;
-// usConfig |= BIT15 | (KeyType<<2) | (DefaultKey<<5) | KeyIndex;
+ usConfig |= BIT15 | (KeyType << 2) | KeyIndex;
-
- for(i=0 ; i<CAM_CONTENT_COUNT; i++){
- TargetCommand = i+CAM_CONTENT_COUNT*EntryNo;
+ for (i = 0 ; i < CAM_CONTENT_COUNT; i++) {
+ TargetCommand = i + CAM_CONTENT_COUNT * EntryNo;
TargetCommand |= BIT31|BIT16;
-
- if(i==0){//MAC|Config
- TargetContent = (u32)(*(MacAddr+0)) << 16|
- (u32)(*(MacAddr+1)) << 24|
+ switch (i) {
+ case 0: /* MAC|Config */
+ TargetContent = (u32)(*(MacAddr + 0)) << 16|
+ (u32)(*(MacAddr + 1)) << 24|
(u32)usConfig;
write_nic_dword(dev, WCAMI, TargetContent);
write_nic_dword(dev, RWCAM, TargetCommand);
- // printk("setkey cam =%8x\n", read_cam(dev, i+6*EntryNo));
- }
- else if(i==1){//MAC
- TargetContent = (u32)(*(MacAddr+2)) |
- (u32)(*(MacAddr+3)) << 8|
- (u32)(*(MacAddr+4)) << 16|
- (u32)(*(MacAddr+5)) << 24;
+ continue;
+ case 1: /* MAC */
+ TargetContent = (u32)(*(MacAddr + 2))|
+ (u32)(*(MacAddr + 3)) << 8|
+ (u32)(*(MacAddr + 4)) << 16|
+ (u32)(*(MacAddr + 5)) << 24;
write_nic_dword(dev, WCAMI, TargetContent);
write_nic_dword(dev, RWCAM, TargetCommand);
+ continue;
+ default: /* Key Material */
+ if (KeyContent != NULL) {
+ write_nic_dword(dev, WCAMI,
+ (u32)(*(KeyContent+i-2)));
+ write_nic_dword(dev, RWCAM,
+ TargetCommand);
+ }
+ continue;
}
- else {
- //Key Material
- if(KeyContent !=NULL){
- write_nic_dword(dev, WCAMI, (u32)(*(KeyContent+i-2)) );
- write_nic_dword(dev, RWCAM, TargetCommand);
- }
- }
}
-
}
/***************************************************************************
diff --git a/drivers/staging/rtl8192su/r819xU_cmdpkt.c b/drivers/staging/rtl8192su/r819xU_cmdpkt.c
index 3ebfe79bb66..a8e9d2d96f5 100644
--- a/drivers/staging/rtl8192su/r819xU_cmdpkt.c
+++ b/drivers/staging/rtl8192su/r819xU_cmdpkt.c
@@ -1,384 +1,255 @@
-/******************************************************************************
-
- (c) Copyright 2008, RealTEK Technologies Inc. All Rights Reserved.
-
- Module: r819xusb_cmdpkt.c (RTL8190 TX/RX command packet handler Source C File)
-
- Note: The module is responsible for handling TX and RX command packet.
- 1. TX : Send set and query configuration command packet.
- 2. RX : Receive tx feedback, beacon state, query configuration
- command packet.
-
- Function:
-
- Export:
-
- Abbrev:
-
- History:
- Data Who Remark
-
- 05/06/2008 amy Create initial version porting from windows driver.
-
-******************************************************************************/
+/*
+ * (c) Copyright 2008, RealTEK Technologies Inc. All Rights Reserved.
+ *
+ * Module: r819xusb_cmdpkt.c
+ * (RTL8190 TX/RX command packet handler Source C File)
+ *
+ * Note: The module is responsible for handling TX and RX command packet.
+ * 1.TX: Send set and query configuration command packet.
+ * 2.RX: Receive tx feedback, beacon state, query configuration, command packet.
+ */
#include "r8192U.h"
#include "r819xU_cmdpkt.h"
-/*---------------------------Define Local Constant---------------------------*/
-/* Debug constant*/
-#define CMPK_DEBOUNCE_CNT 1
-/* 2007/10/24 MH Add for printing a range of data. */
-#define CMPK_PRINT(Address)\
-{\
- unsigned char i;\
- u32 temp[10];\
- \
- memcpy(temp, Address, 40);\
- for (i = 0; i <40; i+=4)\
- printk("\r\n %08x", temp[i]);\
-}\
-/*---------------------------Define functions---------------------------------*/
-
-bool
-SendTxCommandPacket(
- struct net_device *dev,
- void* pData,
- u32 DataLen
- )
+
+bool SendTxCommandPacket(struct net_device *dev, void *pData, u32 DataLen)
{
bool rtStatus = true;
struct r8192_priv *priv = ieee80211_priv(dev);
struct sk_buff *skb;
cb_desc *tcb_desc;
unsigned char *ptr_buf;
- //bool bLastInitPacket = false;
- //PlatformAcquireSpinLock(Adapter, RT_TX_SPINLOCK);
+ /* PlatformAcquireSpinLock(Adapter, RT_TX_SPINLOCK); */
- //Get TCB and local buffer from common pool. (It is shared by CmdQ, MgntQ, and USB coalesce DataQ)
+ /*
+ * Get TCB and local buffer from common pool.
+ * (It is shared by CmdQ, MgntQ, and USB coalesce DataQ)
+ */
skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + DataLen + 4);
- memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev));
- tcb_desc = (cb_desc*)(skb->cb + MAX_DEV_ADDR_SIZE);
+ if (skb == NULL) {
+ RT_TRACE(COMP_ERR, "(%s): unable to alloc skb buffer\n",
+ __func__);
+ rtStatus = false;
+ return rtStatus;
+ }
+ memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
+ tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->queue_index = TXCMD_QUEUE;
tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_NORMAL;
tcb_desc->bLastIniPkt = 0;
skb_reserve(skb, USB_HWDESC_HEADER_LEN);
ptr_buf = skb_put(skb, DataLen);
- memset(ptr_buf,0,DataLen);
- memcpy(ptr_buf,pData,DataLen);
- tcb_desc->txbuf_size= (u16)DataLen;
-
- if(!priv->ieee80211->check_nic_enough_desc(dev,tcb_desc->queue_index)||
- (!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index]))||\
- (priv->ieee80211->queue_stop) ) {
- RT_TRACE(COMP_FIRMWARE,"===================NULL packet==================================> tx full!\n");
+ memcpy(ptr_buf, pData, DataLen);
+ tcb_desc->txbuf_size = (u16)DataLen;
+
+ if (!priv->ieee80211->check_nic_enough_desc(dev, tcb_desc->queue_index) ||
+ (!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index])) ||
+ (priv->ieee80211->queue_stop)) {
+ RT_TRACE(COMP_FIRMWARE, "NULL packet => tx full\n");
skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb);
} else {
- priv->ieee80211->softmac_hard_start_xmit(skb,dev);
+ priv->ieee80211->softmac_hard_start_xmit(skb, dev);
}
//PlatformReleaseSpinLock(Adapter, RT_TX_SPINLOCK);
return rtStatus;
}
-/*-----------------------------------------------------------------------------
+/*
* Function: cmpk_message_handle_tx()
*
* Overview: Driver internal module can call the API to send message to
- * firmware side. For example, you can send a debug command packet.
- * Or you can send a request for FW to modify RLX4181 LBUS HW bank.
- * Otherwise, you can change MAC/PHT/RF register by firmware at
- * run time. We do not support message more than one segment now.
+ * firmware side. For example, you can send a debug command packet.
+ * Or you can send a request for FW to modify RLX4181 LBUS HW bank.
+ * Otherwise, you can change MAC/PHT/RF register by firmware at
+ * run time. We do not support message more than one segment now.
*
* Input: NONE
*
* Output: NONE
*
* Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 05/06/2008 amy porting from windows code.
- *
- *---------------------------------------------------------------------------*/
+ */
extern bool cmpk_message_handle_tx(
struct net_device *dev,
- u8* codevirtualaddress,
+ u8 *codevirtualaddress,
u32 packettype,
u32 buffer_len)
{
-
- bool rt_status = true;
+ bool rt_status = true;
return rt_status;
-} /* CMPK_Message_Handle_Tx */
+}
-/*-----------------------------------------------------------------------------
- * Function: cmpk_counttxstatistic()
- *
- * Overview:
- *
- * Input: PADAPTER pAdapter - .
- * CMPK_TXFB_T *psTx_FB - .
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 05/12/2008 amy Create Version 0 porting from windows code.
- *
- *---------------------------------------------------------------------------*/
+/*
+ * Function: cmpk_counttxstatistic()
+ */
static void
-cmpk_count_txstatistic(
- struct net_device *dev,
- cmpk_txfb_t *pstx_fb)
+cmpk_count_txstatistic(struct net_device *dev, cmpk_txfb_t *pstx_fb)
{
struct r8192_priv *priv = ieee80211_priv(dev);
#ifdef ENABLE_PS
- RT_RF_POWER_STATE rtState;
+ RT_RF_POWER_STATE rtState;
- pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE, (pu1Byte)(&rtState));
+ pAdapter->HalFunc.GetHwRegHandler(pAdapter,
+ HW_VAR_RF_STATE,
+ (pu1Byte)(&rtState));
- // When RF is off, we should not count the packet for hw/sw synchronize
- // reason, ie. there may be a duration while sw switch is changed and hw
- // switch is being changed. 2006.12.04, by shien chang.
+ /*
+ * When RF is off, we should not count the packet for hw/sw synchronize
+ * reason, ie. there may be a duration while sw switch is changed and hw
+ * switch is being changed.
+ */
if (rtState == eRfOff)
- {
return;
- }
#endif
#ifdef TODO
- if(pAdapter->bInHctTest)
+ if (pAdapter->bInHctTest)
return;
#endif
- /* We can not know the packet length and transmit type: broadcast or uni
- or multicast. So the relative statistics must be collected in tx
- feedback info. */
- if (pstx_fb->tok)
- {
+ /*
+ * We can not know the packet length and transmit type:
+ * broadcast or uni or multicast.
+ * So the relative statistics must be collected in tx feedback info
+ */
+ if (pstx_fb->tok) {
priv->stats.txfeedbackok++;
priv->stats.txoktotal++;
priv->stats.txokbytestotal += pstx_fb->pkt_length;
priv->stats.txokinperiod++;
-
/* We can not make sure broadcast/multicast or unicast mode. */
- if (pstx_fb->pkt_type == PACKET_MULTICAST)
- {
+ if (pstx_fb->pkt_type == PACKET_MULTICAST) {
priv->stats.txmulticast++;
priv->stats.txbytesmulticast += pstx_fb->pkt_length;
- }
- else if (pstx_fb->pkt_type == PACKET_BROADCAST)
- {
+ } else if (pstx_fb->pkt_type == PACKET_BROADCAST) {
priv->stats.txbroadcast++;
priv->stats.txbytesbroadcast += pstx_fb->pkt_length;
- }
- else
- {
+ } else {
priv->stats.txunicast++;
priv->stats.txbytesunicast += pstx_fb->pkt_length;
}
- }
- else
- {
+ } else {
priv->stats.txfeedbackfail++;
priv->stats.txerrtotal++;
priv->stats.txerrbytestotal += pstx_fb->pkt_length;
-
/* We can not make sure broadcast/multicast or unicast mode. */
if (pstx_fb->pkt_type == PACKET_MULTICAST)
- {
priv->stats.txerrmulticast++;
- }
else if (pstx_fb->pkt_type == PACKET_BROADCAST)
- {
priv->stats.txerrbroadcast++;
- }
else
- {
priv->stats.txerrunicast++;
- }
}
-
priv->stats.txretrycount += pstx_fb->retry_cnt;
priv->stats.txfeedbackretry += pstx_fb->retry_cnt;
+}
-} /* cmpk_CountTxStatistic */
-
-
-
-/*-----------------------------------------------------------------------------
+/*
* Function: cmpk_handle_tx_feedback()
*
* Overview: The function is responsible for extract the message inside TX
- * feedbck message from firmware. It will contain dedicated info in
- * ws-06-0063-rtl8190-command-packet-specification. Please
- * refer to chapter "TX Feedback Element". We have to read 20 bytes
- * in the command packet.
+ * feedbck message from firmware. It will contain dedicated info in
+ * ws-06-0063-rtl8190-command-packet-specification. Please
+ * refer to chapter "TX Feedback Element". We have to read 20 bytes
+ * in the command packet.
*
* Input: struct net_device * dev
- * u8 * pmsg - Msg Ptr of the command packet.
+ * u8 *pmsg - Msg Ptr of the command packet.
*
* Output: NONE
*
* Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 05/08/2008 amy Create Version 0 porting from windows code.
- *
- *---------------------------------------------------------------------------*/
-static void
-cmpk_handle_tx_feedback(
- struct net_device *dev,
- u8 * pmsg)
+ */
+static void cmpk_handle_tx_feedback(struct net_device *dev, u8 *pmsg)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- cmpk_txfb_t rx_tx_fb; /* */
+ cmpk_txfb_t rx_tx_fb;
priv->stats.txfeedback++;
- /* 0. Display received message. */
- //cmpk_Display_Message(CMPK_RX_TX_FB_SIZE, pMsg);
-
/* 1. Extract TX feedback info from RFD to temp structure buffer. */
+ memcpy((u8 *)&rx_tx_fb, pmsg, sizeof(cmpk_txfb_t));
- /* 2007/07/05 MH Use pointer to transfer structure memory. */
- //memcpy((UINT8 *)&rx_tx_fb, pMsg, sizeof(CMPK_TXFB_T));
- memcpy((u8*)&rx_tx_fb, pmsg, sizeof(cmpk_txfb_t));
/* 2. Use tx feedback info to count TX statistics. */
cmpk_count_txstatistic(dev, &rx_tx_fb);
+}
- /* 2007/01/17 MH Comment previous method for TX statistic function. */
- /* Collect info TX feedback packet to fill TCB. */
- /* We can not know the packet length and transmit type: broadcast or uni
- or multicast. */
- //CountTxStatistics( pAdapter, &tcb );
-
-} /* cmpk_Handle_Tx_Feedback */
-
-void
-cmdpkt_beacontimerinterrupt_819xusb(
- struct net_device *dev
-)
+void cmdpkt_beacontimerinterrupt_819xusb(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u16 tx_rate;
- {
- //
- // 070117, rcnjko: 87B have to S/W beacon for DTM encryption_cmn.
- //
- if(priv->ieee80211->current_network.mode == IEEE_A ||
- priv->ieee80211->current_network.mode == IEEE_N_5G ||
- (priv->ieee80211->current_network.mode == IEEE_N_24G && (!priv->ieee80211->pHTInfo->bCurSuppCCK)))
- {
- tx_rate = 60;
- DMESG("send beacon frame tx rate is 6Mbpm\n");
- }
- else
- {
- tx_rate =10;
- DMESG("send beacon frame tx rate is 1Mbpm\n");
- }
-
- rtl819xusb_beacon_tx(dev,tx_rate); // HW Beacon
+ if (priv->ieee80211->current_network.mode == IEEE_A ||
+ priv->ieee80211->current_network.mode == IEEE_N_5G ||
+ (priv->ieee80211->current_network.mode == IEEE_N_24G &&
+ (!priv->ieee80211->pHTInfo->bCurSuppCCK))) {
+ tx_rate = 60;
+ DMESG("send beacon frame tx rate is 6Mbpm\n");
+ } else {
+ tx_rate = 10;
+ DMESG("send beacon frame tx rate is 1Mbpm\n");
}
-
+ rtl819xusb_beacon_tx(dev, tx_rate); /* HW Beacon */
}
-
-
-
-/*-----------------------------------------------------------------------------
+/*
* Function: cmpk_handle_interrupt_status()
*
* Overview: The function is responsible for extract the message from
- * firmware. It will contain dedicated info in
- * ws-07-0063-v06-rtl819x-command-packet-specification-070315.doc.
- * Please refer to chapter "Interrupt Status Element".
+ * firmware. It will contain dedicated info in
+ * ws-07-0063-v06-rtl819x-command-packet-specification-070315.doc.
+ * Please refer to chapter "Interrupt Status Element".
*
* Input: struct net_device *dev,
- * u8* pmsg - Message Pointer of the command packet.
+ * u8* pmsg - Message Pointer of the command packet.
*
* Output: NONE
*
* Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 05/12/2008 amy Add this for rtl8192 porting from windows code.
- *
- *---------------------------------------------------------------------------*/
-static void
-cmpk_handle_interrupt_status(
- struct net_device *dev,
- u8* pmsg)
+ */
+static void cmpk_handle_interrupt_status(struct net_device *dev, u8 *pmsg)
{
cmpk_intr_sta_t rx_intr_status; /* */
struct r8192_priv *priv = ieee80211_priv(dev);
DMESG("---> cmpk_Handle_Interrupt_Status()\n");
- /* 0. Display received message. */
- //cmpk_Display_Message(CMPK_RX_BEACON_STATE_SIZE, pMsg);
-
/* 1. Extract TX feedback info from RFD to temp structure buffer. */
- /* It seems that FW use big endian(MIPS) and DRV use little endian in
- windows OS. So we have to read the content byte by byte or transfer
- endian type before copy the message copy. */
- //rx_bcn_state.Element_ID = pMsg[0];
- //rx_bcn_state.Length = pMsg[1];
rx_intr_status.length = pmsg[1];
- if (rx_intr_status.length != (sizeof(cmpk_intr_sta_t) - 2))
- {
+ if (rx_intr_status.length != (sizeof(cmpk_intr_sta_t) - 2)) {
DMESG("cmpk_Handle_Interrupt_Status: wrong length!\n");
return;
}
-
-
- // Statistics of beacon for ad-hoc mode.
- if( priv->ieee80211->iw_mode == IW_MODE_ADHOC)
- {
+ /* Statistics of beacon for ad-hoc mode. */
+ if (priv->ieee80211->iw_mode == IW_MODE_ADHOC) {
//2 maybe need endian transform?
rx_intr_status.interrupt_status = *((u32 *)(pmsg + 4));
//rx_intr_status.InterruptStatus = N2H4BYTE(*((UINT32 *)(pMsg + 4)));
DMESG("interrupt status = 0x%x\n", rx_intr_status.interrupt_status);
- if (rx_intr_status.interrupt_status & ISR_TxBcnOk)
- {
+ if (rx_intr_status.interrupt_status & ISR_TxBcnOk) {
priv->ieee80211->bibsscoordinator = true;
priv->stats.txbeaconokint++;
- }
- else if (rx_intr_status.interrupt_status & ISR_TxBcnErr)
- {
+ } else if (rx_intr_status.interrupt_status & ISR_TxBcnErr) {
priv->ieee80211->bibsscoordinator = false;
priv->stats.txbeaconerr++;
}
if (rx_intr_status.interrupt_status & ISR_BcnTimerIntr)
- {
cmdpkt_beacontimerinterrupt_819xusb(dev);
- }
-
}
-
- // Other informations in interrupt status we need?
-
-
+ /* Other informations in interrupt status we need? */
DMESG("<---- cmpk_handle_interrupt_status()\n");
+}
-} /* cmpk_handle_interrupt_status */
-
-
-/*-----------------------------------------------------------------------------
+/*
* Function: cmpk_handle_query_config_rx()
*
* Overview: The function is responsible for extract the message from
* firmware. It will contain dedicated info in
- * ws-06-0063-rtl8190-command-packet-specification. Please
- * refer to chapter "Beacon State Element".
+ * ws-06-0063-rtl8190-command-packet-specification
+ * Please refer to chapter "Beacon State Element".
*
* Input: u8 * pmsg - Message Pointer of the command packet.
*
@@ -386,44 +257,28 @@ cmpk_handle_interrupt_status(
*
* Return: NONE
*
- * Revised History:
- * When Who Remark
- * 05/12/2008 amy Create Version 0 porting from windows code.
- *
- *---------------------------------------------------------------------------*/
-static void
-cmpk_handle_query_config_rx(
- struct net_device *dev,
- u8* pmsg)
+ */
+static void cmpk_handle_query_config_rx(struct net_device *dev, u8 *pmsg)
{
- cmpk_query_cfg_t rx_query_cfg; /* */
-
- /* 0. Display received message. */
- //cmpk_Display_Message(CMPK_RX_BEACON_STATE_SIZE, pMsg);
+ cmpk_query_cfg_t rx_query_cfg;
+ /*
+ * Extract TX feedback info from RFD to temp structure buffer.
+ */
+ rx_query_cfg.cfg_action = (pmsg[4] & 0x80000000) >> 31;
+ rx_query_cfg.cfg_type = (pmsg[4] & 0x60) >> 5;
+ rx_query_cfg.cfg_size = (pmsg[4] & 0x18) >> 3;
+ rx_query_cfg.cfg_page = (pmsg[6] & 0x0F) >> 0;
+ rx_query_cfg.cfg_offset = pmsg[7];
+ rx_query_cfg.value = (pmsg[8] << 24) | (pmsg[9] << 16) |
+ (pmsg[10] << 8) | (pmsg[11] << 0);
+ rx_query_cfg.mask = (pmsg[12] << 24) | (pmsg[13] << 16) |
+ (pmsg[14] << 8) | (pmsg[15] << 0);
+}
- /* 1. Extract TX feedback info from RFD to temp structure buffer. */
- /* It seems that FW use big endian(MIPS) and DRV use little endian in
- windows OS. So we have to read the content byte by byte or transfer
- endian type before copy the message copy. */
- //rx_query_cfg.Element_ID = pMsg[0];
- //rx_query_cfg.Length = pMsg[1];
- rx_query_cfg.cfg_action = (pmsg[4] & 0x80000000)>>31;
- rx_query_cfg.cfg_type = (pmsg[4] & 0x60) >> 5;
- rx_query_cfg.cfg_size = (pmsg[4] & 0x18) >> 3;
- rx_query_cfg.cfg_page = (pmsg[6] & 0x0F) >> 0;
- rx_query_cfg.cfg_offset = pmsg[7];
- rx_query_cfg.value = (pmsg[8] << 24) | (pmsg[9] << 16) |
- (pmsg[10] << 8) | (pmsg[11] << 0);
- rx_query_cfg.mask = (pmsg[12] << 24) | (pmsg[13] << 16) |
- (pmsg[14] << 8) | (pmsg[15] << 0);
-
-} /* cmpk_Handle_Query_Config_Rx */
-
-
-/*-----------------------------------------------------------------------------
+/*
* Function: cmpk_count_tx_status()
*
- * Overview: Count aggregated tx status from firmwar of one type rx command
+ * Overview: Count aggregated tx status from firmware of one type rx command
* packet element id = RX_TX_STATUS.
*
* Input: NONE
@@ -431,14 +286,9 @@ cmpk_handle_query_config_rx(
* Output: NONE
*
* Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 05/12/2008 amy Create Version 0 porting from windows code.
- *
- *---------------------------------------------------------------------------*/
-static void cmpk_count_tx_status( struct net_device *dev,
- cmpk_tx_status_t *pstx_status)
+ */
+static void cmpk_count_tx_status(struct net_device *dev,
+ cmpk_tx_status_t *pstx_status)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -448,13 +298,13 @@ static void cmpk_count_tx_status( struct net_device *dev,
pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE, (pu1Byte)(&rtState));
- // When RF is off, we should not count the packet for hw/sw synchronize
- // reason, ie. there may be a duration while sw switch is changed and hw
- // switch is being changed. 2006.12.04, by shien chang.
+ /*
+ * When RF is off, we should not count the packet for hw/sw synchronize
+ * reason, ie. there may be a duration while sw switch is changed and hw
+ * switch is being changed.
+ */
if (rtState == eRfOff)
- {
return;
- }
#endif
priv->stats.txfeedbackok += pstx_status->txok;
@@ -463,15 +313,11 @@ static void cmpk_count_tx_status( struct net_device *dev,
priv->stats.txfeedbackfail += pstx_status->txfail;
priv->stats.txerrtotal += pstx_status->txfail;
- priv->stats.txretrycount += pstx_status->txretry;
+ priv->stats.txretrycount += pstx_status->txretry;
priv->stats.txfeedbackretry += pstx_status->txretry;
- //pAdapter->TxStats.NumTxOkBytesTotal += psTx_FB->pkt_length;
- //pAdapter->TxStats.NumTxErrBytesTotal += psTx_FB->pkt_length;
- //pAdapter->MgntInfo.LinkDetectInfo.NumTxOkInPeriod++;
-
- priv->stats.txmulticast += pstx_status->txmcok;
- priv->stats.txbroadcast += pstx_status->txbcok;
+ priv->stats.txmulticast += pstx_status->txmcok;
+ priv->stats.txbroadcast += pstx_status->txbcok;
priv->stats.txunicast += pstx_status->txucok;
priv->stats.txerrmulticast += pstx_status->txmcfail;
@@ -480,14 +326,12 @@ static void cmpk_count_tx_status( struct net_device *dev,
priv->stats.txbytesmulticast += pstx_status->txmclength;
priv->stats.txbytesbroadcast += pstx_status->txbclength;
- priv->stats.txbytesunicast += pstx_status->txuclength;
-
- priv->stats.last_packet_rate = pstx_status->rate;
-} /* cmpk_CountTxStatus */
-
+ priv->stats.txbytesunicast += pstx_status->txuclength;
+ priv->stats.last_packet_rate = pstx_status->rate;
+}
-/*-----------------------------------------------------------------------------
+/*
* Function: cmpk_handle_tx_status()
*
* Overview: Firmware add a new tx feedback status to reduce rx command
@@ -498,27 +342,18 @@ static void cmpk_count_tx_status( struct net_device *dev,
* Output: NONE
*
* Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 05/12/2008 amy Create Version 0 porting from windows code.
- *
- *---------------------------------------------------------------------------*/
+ */
static void
-cmpk_handle_tx_status(
- struct net_device *dev,
- u8* pmsg)
+cmpk_handle_tx_status(struct net_device *dev, u8 *pmsg)
{
- cmpk_tx_status_t rx_tx_sts; /* */
+ cmpk_tx_status_t rx_tx_sts;
- memcpy((void*)&rx_tx_sts, (void*)pmsg, sizeof(cmpk_tx_status_t));
+ memcpy((void *)&rx_tx_sts, (void *)pmsg, sizeof(cmpk_tx_status_t));
/* 2. Use tx feedback info to count TX statistics. */
cmpk_count_tx_status(dev, &rx_tx_sts);
+}
-} /* cmpk_Handle_Tx_Status */
-
-
-/*-----------------------------------------------------------------------------
+/*
* Function: cmpk_handle_tx_rate_history()
*
* Overview: Firmware add a new tx rate history
@@ -528,117 +363,90 @@ cmpk_handle_tx_status(
* Output: NONE
*
* Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 05/12/2008 amy Create Version 0 porting from windows code.
- *
- *---------------------------------------------------------------------------*/
-static void
-cmpk_handle_tx_rate_history(
- struct net_device *dev,
- u8* pmsg)
+ */
+static void cmpk_handle_tx_rate_history(struct net_device *dev, u8 *pmsg)
{
cmpk_tx_rahis_t *ptxrate;
-// RT_RF_POWER_STATE rtState;
- u8 i, j;
- u16 length = sizeof(cmpk_tx_rahis_t);
- u32 *ptemp;
+ u8 i, j;
+ u16 length = sizeof(cmpk_tx_rahis_t);
+ u32 *ptemp;
struct r8192_priv *priv = ieee80211_priv(dev);
-
#ifdef ENABLE_PS
- pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE, (pu1Byte)(&rtState));
-
- // When RF is off, we should not count the packet for hw/sw synchronize
- // reason, ie. there may be a duration while sw switch is changed and hw
- // switch is being changed. 2006.12.04, by shien chang.
+ pAdapter->HalFunc.GetHwRegHandler(pAdapter,
+ HW_VAR_RF_STATE,
+ (pu1Byte)(&rtState));
+ /*
+ * When RF is off, we should not count the packet for hw/sw synchronize
+ * reason, ie. there may be a duration while sw switch is changed and hw
+ * switch is being changed.
+ */
if (rtState == eRfOff)
- {
return;
- }
#endif
-
ptemp = (u32 *)pmsg;
- //
- // Do endian transfer to word alignment(16 bits) for windows system.
- // You must do different endian transfer for linux and MAC OS
- //
- for (i = 0; i < (length/4); i++)
- {
- u16 temp1, temp2;
-
- temp1 = ptemp[i]&0x0000FFFF;
- temp2 = ptemp[i]>>16;
- ptemp[i] = (temp1<<16)|temp2;
+ /*
+ * Do endian transfer to word alignment(16 bits) for windows system.
+ * You must do different endian transfer for linux and MAC OS
+ */
+ for (i = 0; i < (length/4); i++) {
+ u16 temp1, temp2;
+ temp1 = ptemp[i] & 0x0000FFFF;
+ temp2 = ptemp[i] >> 16;
+ ptemp[i] = (temp1 << 16) | temp2;
}
ptxrate = (cmpk_tx_rahis_t *)pmsg;
- if (ptxrate == NULL )
- {
+ if (ptxrate == NULL)
return;
- }
- for (i = 0; i < 16; i++)
- {
- // Collect CCK rate packet num
+ for (i = 0; i < 16; i++) {
+ /* Collect CCK rate packet num */
if (i < 4)
priv->stats.txrate.cck[i] += ptxrate->cck[i];
-
- // Collect OFDM rate packet num
- if (i< 8)
+ /* Collect OFDM rate packet num */
+ if (i < 8)
priv->stats.txrate.ofdm[i] += ptxrate->ofdm[i];
-
for (j = 0; j < 4; j++)
priv->stats.txrate.ht_mcs[j][i] += ptxrate->ht_mcs[j][i];
}
-} /* cmpk_Handle_Tx_Rate_History */
-
+}
-/*-----------------------------------------------------------------------------
+/*
* Function: cmpk_message_handle_rx()
*
* Overview: In the function, we will capture different RX command packet
- * info. Every RX command packet element has different message
- * length and meaning in content. We only support three type of RX
- * command packet now. Please refer to document
- * ws-06-0063-rtl8190-command-packet-specification.
+ * info. Every RX command packet element has different message
+ * length and meaning in content. We only support three type of RX
+ * command packet now. Please refer to document
+ * ws-06-0063-rtl8190-command-packet-specification.
*
* Input: NONE
*
* Output: NONE
*
* Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 05/06/2008 amy Create Version 0 porting from windows code.
- *
- *---------------------------------------------------------------------------*/
+ */
extern u32
cmpk_message_handle_rx(
struct net_device *dev,
struct ieee80211_rx_stats *pstats)
{
-// u32 debug_level = DBG_LOUD;
struct r8192_priv *priv = ieee80211_priv(dev);
int total_length;
u8 cmd_length, exe_cnt = 0;
u8 element_id;
u8 *pcmd_buff;
- /* 0. Check inpt arguments. If is is a command queue message or pointer is
- null. */
- if (/*(prfd->queue_id != CMPK_RX_QUEUE_ID) || */(pstats== NULL))
- {
- /* Print error message. */
- /*RT_TRACE(COMP_SEND, DebugLevel,
- ("\n\r[CMPK]-->Err queue id or pointer"));*/
+ /*
+ * 0. Check input arguments.
+ * If is is a command queue message or pointer is null
+ */
+ if ((pstats == NULL))
return 0; /* This is not a command packet. */
- }
/* 1. Read received command packet message length from RFD. */
total_length = pstats->Length;
@@ -648,70 +456,49 @@ cmpk_message_handle_rx(
/* 3. Read command pakcet element id and length. */
element_id = pcmd_buff[0];
- /*RT_TRACE(COMP_SEND, DebugLevel,
- ("\n\r[CMPK]-->element ID=%d Len=%d", element_id, total_length));*/
-
- /* 4. Check every received command packet conent according to different
- element type. Because FW may aggregate RX command packet to minimize
- transmit time between DRV and FW.*/
- // Add a counter to prevent to locked in the loop too long
- while (total_length > 0 || exe_cnt++ >100)
- {
- /* 2007/01/17 MH We support aggregation of different cmd in the same packet. */
- element_id = pcmd_buff[0];
- switch(element_id)
- {
- case RX_TX_FEEDBACK:
- cmpk_handle_tx_feedback (dev, pcmd_buff);
- cmd_length = CMPK_RX_TX_FB_SIZE;
- break;
-
- case RX_INTERRUPT_STATUS:
- cmpk_handle_interrupt_status(dev, pcmd_buff);
- cmd_length = sizeof(cmpk_intr_sta_t);
- break;
-
- case BOTH_QUERY_CONFIG:
- cmpk_handle_query_config_rx(dev, pcmd_buff);
- cmd_length = CMPK_BOTH_QUERY_CONFIG_SIZE;
- break;
-
- case RX_TX_STATUS:
- cmpk_handle_tx_status(dev, pcmd_buff);
- cmd_length = CMPK_RX_TX_STS_SIZE;
- break;
-
- case RX_TX_PER_PKT_FEEDBACK:
- // You must at lease add a switch case element here,
- // Otherwise, we will jump to default case.
- //DbgPrint("CCX Test\r\n");
- cmd_length = CMPK_RX_TX_FB_SIZE;
- break;
-
- case RX_TX_RATE_HISTORY:
- //DbgPrint(" rx tx rate history\r\n");
- cmpk_handle_tx_rate_history(dev, pcmd_buff);
- cmd_length = CMPK_TX_RAHIS_SIZE;
- break;
-
- default:
-
- RT_TRACE(COMP_ERR, "---->cmpk_message_handle_rx():unknown CMD Element\n");
- return 1; /* This is a command packet. */
- }
- // 2007/01/22 MH Display received rx command packet info.
- //cmpk_Display_Message(cmd_length, pcmd_buff);
+ /*
+ * 4. Check every received command packet conent according to different
+ * element type. Because FW may aggregate RX command packet to minimize
+ * transmit time between DRV and FW.
+ */
- // 2007/01/22 MH Add to display tx statistic.
- //cmpk_DisplayTxStatistic(pAdapter);
-
- /* 2007/03/09 MH Collect sidderent cmd element pkt num. */
+ /* Add a counter to prevent to locked in the loop too long */
+ while (total_length > 0 || exe_cnt++ > 100) {
+ /* We support aggregation of different cmd in the same packet */
+ element_id = pcmd_buff[0];
+ switch (element_id) {
+ case RX_TX_FEEDBACK:
+ cmpk_handle_tx_feedback(dev, pcmd_buff);
+ cmd_length = CMPK_RX_TX_FB_SIZE;
+ break;
+ case RX_INTERRUPT_STATUS:
+ cmpk_handle_interrupt_status(dev, pcmd_buff);
+ cmd_length = sizeof(cmpk_intr_sta_t);
+ break;
+ case BOTH_QUERY_CONFIG:
+ cmpk_handle_query_config_rx(dev, pcmd_buff);
+ cmd_length = CMPK_BOTH_QUERY_CONFIG_SIZE;
+ break;
+ case RX_TX_STATUS:
+ cmpk_handle_tx_status(dev, pcmd_buff);
+ cmd_length = CMPK_RX_TX_STS_SIZE;
+ break;
+ case RX_TX_PER_PKT_FEEDBACK:
+ cmd_length = CMPK_RX_TX_FB_SIZE;
+ break;
+ case RX_TX_RATE_HISTORY:
+ cmpk_handle_tx_rate_history(dev, pcmd_buff);
+ cmd_length = CMPK_TX_RAHIS_SIZE;
+ break;
+ default:
+ RT_TRACE(COMP_ERR, "(%s): unknown CMD Element\n",
+ __func__);
+ return 1; /* This is a command packet. */
+ }
priv->stats.rxcmdpkt[element_id]++;
-
total_length -= cmd_length;
pcmd_buff += cmd_length;
- } /* while (total_length > 0) */
- return 1; /* This is a command packet. */
-
-} /* CMPK_Message_Handle_Rx */
+ }
+ return 1; /* This is a command packet. */
+}
diff --git a/drivers/staging/rtl8192su/r819xU_cmdpkt.h b/drivers/staging/rtl8192su/r819xU_cmdpkt.h
index cced8e0d278..d3c56155188 100644
--- a/drivers/staging/rtl8192su/r819xU_cmdpkt.h
+++ b/drivers/staging/rtl8192su/r819xU_cmdpkt.h
@@ -1,199 +1,191 @@
#ifndef R819XUSB_CMDPKT_H
#define R819XUSB_CMDPKT_H
-/* Different command packet have dedicated message length and definition. */
-#define CMPK_RX_TX_FB_SIZE sizeof(cmpk_txfb_t) //20
-#define CMPK_TX_SET_CONFIG_SIZE sizeof(cmpk_set_cfg_t) //16
-#define CMPK_BOTH_QUERY_CONFIG_SIZE sizeof(cmpk_set_cfg_t) //16
-#define CMPK_RX_TX_STS_SIZE sizeof(cmpk_tx_status_t)//
-#define CMPK_RX_DBG_MSG_SIZE sizeof(cmpk_rx_dbginfo_t)//
-#define CMPK_TX_RAHIS_SIZE sizeof(cmpk_tx_rahis_t)
-
-/* 2008/05/08 amy For USB constant. */
-#define ISR_TxBcnOk BIT27 // Transmit Beacon OK
-#define ISR_TxBcnErr BIT26 // Transmit Beacon Error
-#define ISR_BcnTimerIntr BIT13 // Beacon Timer Interrupt
-
-/* Define element ID of command packet. */
-
-/*------------------------------Define structure----------------------------*/
-/* Define different command packet structure. */
-/* 1. RX side: TX feedback packet. */
-typedef struct tag_cmd_pkt_tx_feedback
-{
- // DWORD 0
- u8 element_id; /* Command packet type. */
- u8 length; /* Command packet length. */
- /* 2007/07/05 MH Change tx feedback info field. */
- /*------TX Feedback Info Field */
- u8 TID:4; /* */
- u8 fail_reason:3; /* */
- u8 tok:1; /* Transmit ok. */
- u8 reserve1:4; /* */
- u8 pkt_type:2; /* */
- u8 bandwidth:1; /* */
- u8 qos_pkt:1; /* */
-
- // DWORD 1
- u8 reserve2; /* */
- /*------TX Feedback Info Field */
- u8 retry_cnt; /* */
- u16 pkt_id; /* */
-
- // DWORD 3
- u16 seq_num; /* */
- u8 s_rate; /* Start rate. */
- u8 f_rate; /* Final rate. */
-
- // DWORD 4
- u8 s_rts_rate; /* */
- u8 f_rts_rate; /* */
- u16 pkt_length; /* */
-
- // DWORD 5
- u16 reserve3; /* */
- u16 duration; /* */
-}cmpk_txfb_t;
-
-/* 2. RX side: Interrupt status packet. It includes Beacon State,
- Beacon Timer Interrupt and other useful informations in MAC ISR Reg. */
-typedef struct tag_cmd_pkt_interrupt_status
-{
- u8 element_id; /* Command packet type. */
- u8 length; /* Command packet length. */
- u16 reserve;
- u32 interrupt_status; /* Interrupt Status. */
-}cmpk_intr_sta_t;
-
-
-/* 3. TX side: Set configuration packet. */
-typedef struct tag_cmd_pkt_set_configuration
-{
- u8 element_id; /* Command packet type. */
- u8 length; /* Command packet length. */
- u16 reserve1; /* */
- u8 cfg_reserve1:3;
- u8 cfg_size:2; /* Configuration info. */
- u8 cfg_type:2; /* Configuration info. */
- u8 cfg_action:1; /* Configuration info. */
- u8 cfg_reserve2; /* Configuration info. */
- u8 cfg_page:4; /* Configuration info. */
- u8 cfg_reserve3:4; /* Configuration info. */
- u8 cfg_offset; /* Configuration info. */
- u32 value; /* */
- u32 mask; /* */
-}cmpk_set_cfg_t;
-
-/* 4. Both side : TX/RX query configuraton packet. The query structure is the
- same as set configuration. */
-#define cmpk_query_cfg_t cmpk_set_cfg_t
-/* 5. Multi packet feedback status. */
-typedef struct tag_tx_stats_feedback // PJ quick rxcmd 09042007
-{
- // For endian transfer --> Driver will not the same as firmware structure.
- // DW 0
+/*
+ * Different command packets have dedicated message length and definition.
+ */
+#define CMPK_RX_TX_FB_SIZE sizeof(cmpk_txfb_t) /* 20 */
+#define CMPK_TX_SET_CONFIG_SIZE sizeof(cmpk_set_cfg_t) /* 16 */
+#define CMPK_BOTH_QUERY_CONFIG_SIZE sizeof(cmpk_set_cfg_t) /* 16 */
+#define CMPK_RX_TX_STS_SIZE sizeof(cmpk_tx_status_t)
+#define CMPK_RX_DBG_MSG_SIZE sizeof(cmpk_rx_dbginfo_t)
+#define CMPK_TX_RAHIS_SIZE sizeof(cmpk_tx_rahis_t)
+
+/* For USB constant. */
+#define ISR_TxBcnOk BIT27 /* Transmit Beacon OK */
+#define ISR_TxBcnErr BIT26 /* Transmit Beacon Error */
+#define ISR_BcnTimerIntr BIT13 /* Beacon Timer Interrupt */
+
+/*
+ * Define different command packet structures
+ *
+ * 1. RX side: TX feedback packet.
+ */
+typedef struct tag_cmd_pkt_tx_feedback {
+ /* DWORD 0 */
+ u8 element_id; /* Command packet type. */
+ u8 length; /* Command packet length. */
+ /* TX Feedback Info Field */
+ u8 TID:4;
+ u8 fail_reason:3;
+ u8 tok:1; /* Transmit ok. */
+ u8 reserve1:4;
+ u8 pkt_type:2;
+ u8 bandwidth:1;
+ u8 qos_pkt:1;
+
+ /* DWORD 1 */
+ u8 reserve2;
+ /* TX Feedback Info Field */
+ u8 retry_cnt;
+ u16 pkt_id;
+
+ /* DWORD 3 */
+ u16 seq_num;
+ u8 s_rate; /* Start rate. */
+ u8 f_rate; /* Final rate. */
+
+ /* DWORD 4 */
+ u8 s_rts_rate;
+ u8 f_rts_rate;
+ u16 pkt_length;
+
+ /* DWORD 5 */
+ u16 reserve3;
+ u16 duration;
+} cmpk_txfb_t;
+
+/*
+ * 2. RX side: Interrupt status packet.
+ * It includes Beacon State, Beacon Timer Interrupt
+ * and other useful informations in MAC ISR Reg.
+ */
+typedef struct tag_cmd_pkt_interrupt_status {
+ u8 element_id; /* Command packet type. */
+ u8 length; /* Command packet length. */
+ u16 reserve;
+ u32 interrupt_status; /* Interrupt Status. */
+} cmpk_intr_sta_t;
+
+
+/*
+ * 3. TX side: Set configuration packet.
+ */
+typedef struct tag_cmd_pkt_set_configuration {
+ u8 element_id; /* Command packet type. */
+ u8 length; /* Command packet length. */
u16 reserve1;
- u8 length; // Command packet length
- u8 element_id; // Command packet type
-
- // DW 1
- u16 txfail; // Tx Fail count
- u16 txok; // Tx ok count
-
- // DW 2
- u16 txmcok; // tx multicast
- u16 txretry; // Tx Retry count
-
- // DW 3
- u16 txucok; // tx unicast
- u16 txbcok; // tx broadcast
-
- // DW 4
- u16 txbcfail; //
- u16 txmcfail; //
-
- // DW 5
- u16 reserve2; //
- u16 txucfail; //
-
- // DW 6-8
- u32 txmclength;
- u32 txbclength;
- u32 txuclength;
-
- // DW 9
- u16 reserve3_23;
- u8 reserve3_1;
- u8 rate;
-}__attribute__((packed)) cmpk_tx_status_t;
-
-/* 6. Debug feedback message. */
-/* 2007/10/23 MH Define RX debug message */
-typedef struct tag_rx_debug_message_feedback
-{
- // For endian transfer --> for driver
- // DW 0
- u16 reserve1;
- u8 length; // Command packet length
- u8 element_id; // Command packet type
-
- // DW 1-??
- // Variable debug message.
-
-}cmpk_rx_dbginfo_t;
-
-/* 2008/03/20 MH Define transmit rate history. For big endian format. */
-typedef struct tag_tx_rate_history
-{
- // For endian transfer --> for driver
- // DW 0
- u8 element_id; // Command packet type
- u8 length; // Command packet length
- u16 reserved1;
-
- // DW 1-2 CCK rate counter
- u16 cck[4];
-
- // DW 3-6
- u16 ofdm[8];
-
- // DW 7-14
- //UINT16 MCS_BW0_SG0[16];
-
- // DW 15-22
- //UINT16 MCS_BW1_SG0[16];
-
- // DW 23-30
- //UINT16 MCS_BW0_SG1[16];
-
- // DW 31-38
- //UINT16 MCS_BW1_SG1[16];
-
- // DW 7-14 BW=0 SG=0
- // DW 15-22 BW=1 SG=0
- // DW 23-30 BW=0 SG=1
- // DW 31-38 BW=1 SG=1
- u16 ht_mcs[4][16];
-
-}__attribute__((packed)) cmpk_tx_rahis_t;
+ u8 cfg_reserve1:3;
+ u8 cfg_size:2; /* Configuration info. */
+ u8 cfg_type:2; /* Configuration info. */
+ u8 cfg_action:1; /* Configuration info. */
+ u8 cfg_reserve2; /* Configuration info. */
+ u8 cfg_page:4; /* Configuration info. */
+ u8 cfg_reserve3:4; /* Configuration info. */
+ u8 cfg_offset; /* Configuration info. */
+ u32 value;
+ u32 mask;
+} cmpk_set_cfg_t;
+
+/*
+ * 4. Both side : TX/RX query configuraton packet.
+ * The query structure is the same as set configuration.
+ */
+#define cmpk_query_cfg_t cmpk_set_cfg_t
-typedef enum tag_command_packet_directories
-{
+/*
+ * 5. Multi packet feedback status.
+ */
+typedef struct tag_tx_stats_feedback {
+ /*
+ * For endian transfer
+ * Driver will not the same as firmware structure.
+ */
+ /* DW 0 */
+ u16 reserve1;
+ u8 length; /* Command packet length */
+ u8 element_id; /* Command packet type */
+
+ /* DW 1 */
+ u16 txfail; /* Tx Fail count */
+ u16 txok; /* Tx ok count */
+
+ /* DW 2 */
+ u16 txmcok; /* tx multicast */
+ u16 txretry; /* Tx Retry count */
+
+ /* DW 3 */
+ u16 txucok; /* tx unicast */
+ u16 txbcok; /* tx broadcast */
+
+ /* DW 4 */
+ u16 txbcfail;
+ u16 txmcfail;
+
+ /* DW 5 */
+ u16 reserve2;
+ u16 txucfail;
+
+ /* DW 6-8 */
+ u32 txmclength;
+ u32 txbclength;
+ u32 txuclength;
+
+ /* DW 9 */
+ u16 reserve3_23;
+ u8 reserve3_1;
+ u8 rate;
+} __attribute__((packed)) cmpk_tx_status_t;
+
+/*
+ * 6. Debug feedback message.
+ */
+typedef struct tag_rx_debug_message_feedback {
+ /* For endian transfer --> for driver */
+ /* DW 0 */
+ u16 reserve1;
+ u8 length; /* Command packet length */
+ u8 element_id; /* Command packet type */
+} cmpk_rx_dbginfo_t;
+
+/*
+ * Define transmit rate history. For big endian format.
+ */
+typedef struct tag_tx_rate_history {
+ /* For endian transfer --> for driver */
+ /* DW 0 */
+ u8 element_id; /* Command packet type */
+ u8 length; /* Command packet length */
+ u16 reserved1;
+ /* DW 1-2 CCK rate counter */
+ u16 cck[4];
+ /* DW 3-6 */
+ u16 ofdm[8];
+ u16 ht_mcs[4][16];
+} __attribute__((packed)) cmpk_tx_rahis_t;
+
+typedef enum tag_command_packet_directories {
RX_TX_FEEDBACK = 0,
- RX_INTERRUPT_STATUS = 1,
- TX_SET_CONFIG = 2,
- BOTH_QUERY_CONFIG = 3,
- RX_TX_STATUS = 4,
- RX_DBGINFO_FEEDBACK = 5,
- RX_TX_PER_PKT_FEEDBACK = 6,
- RX_TX_RATE_HISTORY = 7,
+ RX_INTERRUPT_STATUS = 1,
+ TX_SET_CONFIG = 2,
+ BOTH_QUERY_CONFIG = 3,
+ RX_TX_STATUS = 4,
+ RX_DBGINFO_FEEDBACK = 5,
+ RX_TX_PER_PKT_FEEDBACK = 6,
+ RX_TX_RATE_HISTORY = 7,
RX_CMD_ELE_MAX
-}cmpk_element_e;
+} cmpk_element_e;
-extern bool cmpk_message_handle_tx(struct net_device *dev, u8* codevirtualaddress, u32 packettype, u32 buffer_len);
+extern bool cmpk_message_handle_tx(struct net_device *dev,
+ u8 *codevirtualaddress,
+ u32 packettype,
+ u32 buffer_len);
-extern u32 cmpk_message_handle_rx(struct net_device *dev, struct ieee80211_rx_stats * pstats);
-extern bool SendTxCommandPacket( struct net_device *dev, void* pData, u32 DataLen);
+extern u32 cmpk_message_handle_rx(struct net_device *dev,
+ struct ieee80211_rx_stats *pstats);
+extern bool SendTxCommandPacket(struct net_device *dev,
+ void *pData,
+ u32 DataLen);
#endif
diff --git a/drivers/staging/rtl8192u/dot11d.h b/drivers/staging/rtl8192u/dot11d.h
index 15b7a4ba37b..0851b9db17a 100644
--- a/drivers/staging/rtl8192u/dot11d.h
+++ b/drivers/staging/rtl8192u/dot11d.h
@@ -4,44 +4,44 @@
#ifdef ENABLE_DOT11D
#include "ieee80211.h"
-//#define ENABLE_DOT11D
-
-//#define DOT11D_MAX_CHNL_NUM 83
typedef struct _CHNL_TXPOWER_TRIPLE {
u8 FirstChnl;
u8 NumChnls;
u8 MaxTxPowerInDbm;
-}CHNL_TXPOWER_TRIPLE, *PCHNL_TXPOWER_TRIPLE;
+} CHNL_TXPOWER_TRIPLE, *PCHNL_TXPOWER_TRIPLE;
typedef enum _DOT11D_STATE {
DOT11D_STATE_NONE = 0,
DOT11D_STATE_LEARNED,
DOT11D_STATE_DONE,
-}DOT11D_STATE;
+} DOT11D_STATE;
typedef struct _RT_DOT11D_INFO {
- //DECLARE_RT_OBJECT(RT_DOT11D_INFO);
+ /* DECLARE_RT_OBJECT(RT_DOT11D_INFO); */
- bool bEnabled; // dot11MultiDomainCapabilityEnabled
+ bool bEnabled; /* dot11MultiDomainCapabilityEnabled */
- u16 CountryIeLen; // > 0 if CountryIeBuf[] contains valid country information element.
+ u16 CountryIeLen; /* > 0 if CountryIeBuf[] contains valid country information element. */
u8 CountryIeBuf[MAX_IE_LEN];
- u8 CountryIeSrcAddr[6]; // Source AP of the country IE.
+ u8 CountryIeSrcAddr[6]; /* Source AP of the country IE. */
u8 CountryIeWatchdog;
- u8 channel_map[MAX_CHANNEL_NUMBER+1]; //!!!Value 0: Invalid, 1: Valid (active scan), 2: Valid (passive scan)
- //u8 ChnlListLen; // #Bytes valid in ChnlList[].
- //u8 ChnlList[DOT11D_MAX_CHNL_NUM];
+ u8 channel_map[MAX_CHANNEL_NUMBER+1]; /* !Value 0: Invalid, 1: Valid (active scan), 2: Valid (passive scan) */
u8 MaxTxPwrDbmList[MAX_CHANNEL_NUMBER+1];
DOT11D_STATE State;
-}RT_DOT11D_INFO, *PRT_DOT11D_INFO;
-#define eqMacAddr(a,b) ( ((a)[0]==(b)[0] && (a)[1]==(b)[1] && (a)[2]==(b)[2] && (a)[3]==(b)[3] && (a)[4]==(b)[4] && (a)[5]==(b)[5]) ? 1:0 )
-#define cpMacAddr(des,src) ((des)[0]=(src)[0],(des)[1]=(src)[1],(des)[2]=(src)[2],(des)[3]=(src)[3],(des)[4]=(src)[4],(des)[5]=(src)[5])
+} RT_DOT11D_INFO, *PRT_DOT11D_INFO;
+#define eqMacAddr(a, b) (((a)[0] == (b)[0] && \
+ (a)[1] == (b)[1] && (a)[2] == (b)[2] && (a)[3] == (b)[3] && \
+ (a)[4] == (b)[4] && (a)[5] == (b)[5]) ? 1 : 0)
+#define cpMacAddr(des, src) ((des)[0] = (src)[0], \
+ (des)[1] = (src)[1], (des)[2] = (src)[2], \
+ (des)[3] = (src)[3], (des)[4] = (src)[4], \
+ (des)[5] = (src)[5])
#define GET_DOT11D_INFO(__pIeeeDev) ((PRT_DOT11D_INFO)((__pIeeeDev)->pDot11dInfo))
-#define IS_DOT11D_ENABLE(__pIeeeDev) GET_DOT11D_INFO(__pIeeeDev)->bEnabled
+#define IS_DOT11D_ENABLE(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)->bEnabled)
#define IS_COUNTRY_IE_VALID(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)->CountryIeLen > 0)
#define IS_EQUAL_CIE_SRC(__pIeeeDev, __pTa) eqMacAddr(GET_DOT11D_INFO(__pIeeeDev)->CountryIeSrcAddr, __pTa)
@@ -53,9 +53,9 @@ typedef struct _RT_DOT11D_INFO {
(!memcmp(GET_DOT11D_INFO(__pIeeeDev)->CountryIeBuf, (__Ie).Octet, (__Ie).Length)))
#define CIE_WATCHDOG_TH 1
-#define GET_CIE_WATCHDOG(__pIeeeDev) GET_DOT11D_INFO(__pIeeeDev)->CountryIeWatchdog
+#define GET_CIE_WATCHDOG(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)->CountryIeWatchdog)
#define RESET_CIE_WATCHDOG(__pIeeeDev) GET_CIE_WATCHDOG(__pIeeeDev) = 0
-#define UPDATE_CIE_WATCHDOG(__pIeeeDev) ++GET_CIE_WATCHDOG(__pIeeeDev)
+#define UPDATE_CIE_WATCHDOG(__pIeeeDev) (++GET_CIE_WATCHDOG(__pIeeeDev))
#define IS_DOT11D_STATE_DONE(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)->State == DOT11D_STATE_DONE)
@@ -73,9 +73,9 @@ Dot11d_Reset(
void
Dot11d_UpdateCountryIe(
struct ieee80211_device *dev,
- u8 * pTaddr,
- u16 CoutryIeLen,
- u8 * pCoutryIe
+ u8 *pTaddr,
+ u16 CoutryIeLen,
+ u8 *pCoutryIe
);
u8
@@ -86,17 +86,17 @@ DOT11D_GetMaxTxPwrInDbm(
void
DOT11D_ScanComplete(
- struct ieee80211_device * dev
+ struct ieee80211_device *dev
);
int IsLegalChannel(
- struct ieee80211_device * dev,
+ struct ieee80211_device *dev,
u8 channel
);
int ToLegalChannel(
- struct ieee80211_device * dev,
+ struct ieee80211_device *dev,
u8 channel
);
-#endif //ENABLE_DOT11D
-#endif // #ifndef __INC_DOT11D_H
+#endif /* ENABLE_DOT11D */
+#endif /* #ifndef __INC_DOT11D_H */
diff --git a/drivers/staging/rtl8192u/ieee80211.h b/drivers/staging/rtl8192u/ieee80211.h
index 9d05ed6791e..9c726113214 100644
--- a/drivers/staging/rtl8192u/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211.h
@@ -58,12 +58,12 @@
*
*/
#define container_of(ptr, type, member) ({ \
- const typeof( ((type *)0)->member ) *__mptr = (ptr); \
- (type *)( (char *)__mptr - offsetof(type,member) );})
+ const typeof(((type *)0)->member) (*__mptr = (ptr)); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
#endif
#define KEY_TYPE_NA 0x0
-#define KEY_TYPE_WEP40 0x1
+#define KEY_TYPE_WEP40 0x1
#define KEY_TYPE_TKIP 0x2
#define KEY_TYPE_CCMP 0x4
#define KEY_TYPE_WEP104 0x5
@@ -71,9 +71,9 @@
/* added for rtl819x tx procedure */
#define MAX_QUEUE_SIZE 0x10
-//
-// 8190 queue mapping
-//
+/*
+ * 8190 queue mapping
+ */
#define BK_QUEUE 0
#define BE_QUEUE 1
#define VI_QUEUE 2
@@ -87,13 +87,13 @@
#define LOW_QUEUE BE_QUEUE
#define NORMAL_QUEUE MGNT_QUEUE
-//added by amy for ps
+/* added by amy for ps */
#define SWRF_TIMEOUT 50
-//added by amy for LEAP related
-#define IE_CISCO_FLAG_POSITION 0x08 // Flag byte: byte 8, numbered from 0.
-#define SUPPORT_CKIP_MIC 0x08 // bit3
-#define SUPPORT_CKIP_PK 0x10 // bit4
+/* added by amy for LEAP related */
+#define IE_CISCO_FLAG_POSITION 0x08 /* Flag byte: byte 8, numbered from 0. */
+#define SUPPORT_CKIP_MIC 0x08 /* bit3 */
+#define SUPPORT_CKIP_PK 0x10 /* bit4 */
/* defined for skb cb field */
/* At most 28 byte */
typedef struct cb_desc {
@@ -105,7 +105,7 @@ typedef struct cb_desc {
u8 bEncrypt:1;
u8 bTxDisableRateFallBack:1;
u8 bTxUseDriverAssingedRate:1;
- u8 bHwSec:1; //indicate whether use Hw security. WB
+ u8 bHwSec:1; /* indicate whether use Hw security. WB */
u8 reserved1;
@@ -125,17 +125,13 @@ typedef struct cb_desc {
u8 bRTSUseShortGI:1;
u8 bMulticast:1;
u8 bBroadcast:1;
- //u8 reserved2:2;
u8 drv_agg_enable:1;
u8 reserved2:1;
/* Tx Desc related element(12-19) */
u8 rata_index;
u8 queue_index;
- //u8 reserved3;
- //u8 reserved4;
u16 txbuf_size;
- //u8 reserved5;
u8 RATRIndex;
u8 reserved6;
u8 reserved7;
@@ -146,13 +142,10 @@ typedef struct cb_desc {
u8 rts_rate;
u8 ampdu_factor;
u8 ampdu_density;
- //u8 reserved9;
- //u8 reserved10;
- //u8 reserved11;
u8 DrvAggrNum;
u16 pkt_size;
u8 reserved12;
-}cb_desc, *pcb_desc;
+} cb_desc, *pcb_desc;
/*--------------------------Define -------------------------------------------*/
#define MGN_1M 0x02
@@ -186,9 +179,9 @@ typedef struct cb_desc {
#define MGN_MCS14 0x8e
#define MGN_MCS15 0x8f
-//----------------------------------------------------------------------------
-// 802.11 Management frame Reason Code field
-//----------------------------------------------------------------------------
+/*
+ * 802.11 Management frame Reason Code field
+ */
enum _ReasonCode{
unspec_reason = 0x1,
auth_not_valid = 0x2,
@@ -200,11 +193,11 @@ enum _ReasonCode{
disas_lv_ss = 0x8,
asoc_not_auth = 0x9,
- //----MIC_CHECK
+ /* ----MIC_CHECK */
mic_failure = 0xe,
- //----END MIC_CHECK
+ /* ----END MIC_CHECK */
- // Reason code defined in 802.11i D10.0 p.28.
+ /* Reason code defined in 802.11i D10.0 p.28. */
invalid_IE = 0x0d,
four_way_tmout = 0x0f,
two_way_tmout = 0x10,
@@ -214,27 +207,29 @@ enum _ReasonCode{
invalid_AKMP = 0x14,
unsup_RSNIEver = 0x15,
invalid_RSNIE = 0x16,
- auth_802_1x_fail= 0x17,
+ auth_802_1x_fail = 0x17,
ciper_reject = 0x18,
- // Reason code defined in 7.3.1.7, 802.1e D13.0, p.42. Added by Annie, 2005-11-15.
- QoS_unspec = 0x20, // 32
- QAP_bandwidth = 0x21, // 33
- poor_condition = 0x22, // 34
- no_facility = 0x23, // 35
- // Where is 36???
- req_declined = 0x25, // 37
- invalid_param = 0x26, // 38
- req_not_honored= 0x27, // 39
- TS_not_created = 0x2F, // 47
- DL_not_allowed = 0x30, // 48
- dest_not_exist = 0x31, // 49
- dest_not_QSTA = 0x32, // 50
+ /* Reason code defined in 7.3.1.7, 802.1e D13.0, p.42. */
+ QoS_unspec = 0x20, /* 32 */
+ QAP_bandwidth = 0x21, /* 33 */
+ poor_condition = 0x22, /* 34 */
+ no_facility = 0x23, /* 35 */
+ /* Where is 36??? */
+ req_declined = 0x25, /* 37 */
+ invalid_param = 0x26, /* 38 */
+ req_not_honored = 0x27, /* 39 */
+ TS_not_created = 0x2F, /* 47 */
+ DL_not_allowed = 0x30, /* 48 */
+ dest_not_exist = 0x31, /* 49 */
+ dest_not_QSTA = 0x32, /* 50 */
};
-#define aSifsTime ((priv->ieee80211->current_network.mode == IEEE_A)||(priv->ieee80211->current_network.mode == IEEE_N_24G)||(priv->ieee80211->current_network.mode == IEEE_N_5G))? 16 : 10
+#define aSifsTime ((priv->ieee80211->current_network.mode == IEEE_A) || \
+ (priv->ieee80211->current_network.mode == IEEE_N_24G) || \
+ (priv->ieee80211->current_network.mode == IEEE_N_5G)) ? 16 : 10
#define MGMT_QUEUE_NUM 5
@@ -249,15 +244,12 @@ enum _ReasonCode{
#define IEEE_PARAM_PRIVACY_INVOKED 4
#define IEEE_PARAM_AUTH_ALGS 5
#define IEEE_PARAM_IEEE_802_1X 6
-//It should consistent with the driver_XXX.c
-// David, 2006.9.26
+/* It should consistent with the driver_XXX.c */
#define IEEE_PARAM_WPAX_SELECT 7
-//Added for notify the encryption type selection
-// David, 2006.9.26
+/* Added for notify the encryption type selection */
#define IEEE_PROTO_WPA 1
#define IEEE_PROTO_RSN 2
-//Added for notify the encryption type selection
-// David, 2006.9.26
+/* Added for notify the encryption type selection */
#define IEEE_WPAX_USEGROUP 0
#define IEEE_WPAX_WEP40 1
#define IEEE_WPAX_TKIP 2
@@ -284,7 +276,7 @@ enum _ReasonCode{
#define MAX_IE_LEN 0xff
-// added for kernel conflict
+/* added for kernel conflict */
#define ieee80211_crypt_deinit_entries ieee80211_crypt_deinit_entries_rsl
#define ieee80211_crypt_deinit_handler ieee80211_crypt_deinit_handler_rsl
#define ieee80211_crypt_delayed_deinit ieee80211_crypt_delayed_deinit_rsl
@@ -385,7 +377,7 @@ typedef struct ieee_param {
u8 key[0];
} crypt;
} u;
-}ieee_param;
+} ieee_param;
#if WIRELESS_EXT < 17
@@ -398,7 +390,7 @@ typedef struct ieee_param {
#endif
-// linux under 2.6.9 release may not support it, so modify it for common use
+/* linux under 2.6.9 release may not support it, so modify it for common use */
#define MSECS(t) msecs_to_jiffies(t)
#define msleep_interruptible_rsl msleep_interruptible
@@ -432,7 +424,7 @@ typedef struct ieee_param {
#define IEEE80211_FCTL_FRAMETYPE 0x00fc
#define IEEE80211_FCTL_TODS 0x0100
#define IEEE80211_FCTL_FROMDS 0x0200
-#define IEEE80211_FCTL_DSTODS 0x0300 //added by david
+#define IEEE80211_FCTL_DSTODS 0x0300
#define IEEE80211_FCTL_MOREFRAGS 0x0400
#define IEEE80211_FCTL_RETRY 0x0800
#define IEEE80211_FCTL_PM 0x1000
@@ -476,7 +468,7 @@ typedef struct ieee_param {
#define IEEE80211_STYPE_CFACK 0x0050
#define IEEE80211_STYPE_CFPOLL 0x0060
#define IEEE80211_STYPE_CFACKPOLL 0x0070
-#define IEEE80211_STYPE_QOS_DATA 0x0080 //added for WMM 2006/8/2
+#define IEEE80211_STYPE_QOS_DATA 0x0080
#define IEEE80211_STYPE_QOS_NULL 0x00C0
#define IEEE80211_SCTL_FRAG 0x000F
@@ -486,12 +478,12 @@ typedef struct ieee_param {
#define IEEE80211_QCTL_TID 0x000F
#define FC_QOS_BIT BIT7
-#define IsDataFrame(pdu) ( ((pdu[0] & 0x0C)==0x08) ? true : false )
-#define IsLegacyDataFrame(pdu) (IsDataFrame(pdu) && (!(pdu[0]&FC_QOS_BIT)) )
-//added by wb. Is this right?
-#define IsQoSDataFrame(pframe) ((*(u16*)pframe&(IEEE80211_STYPE_QOS_DATA|IEEE80211_FTYPE_DATA)) == (IEEE80211_STYPE_QOS_DATA|IEEE80211_FTYPE_DATA))
-#define Frame_Order(pframe) (*(u16*)pframe&IEEE80211_FCTL_ORDER)
-#define SN_LESS(a, b) (((a-b)&0x800)!=0)
+#define IsDataFrame(pdu) (((pdu[0] & 0x0C) == 0x08) ? true : false)
+#define IsLegacyDataFrame(pdu) (IsDataFrame(pdu) && (!(pdu[0]&FC_QOS_BIT)))
+
+#define IsQoSDataFrame(pframe) ((*(u16 *)pframe&(IEEE80211_STYPE_QOS_DATA|IEEE80211_FTYPE_DATA)) == (IEEE80211_STYPE_QOS_DATA|IEEE80211_FTYPE_DATA))
+#define Frame_Order(pframe) (*(u16 *)pframe&IEEE80211_FCTL_ORDER)
+#define SN_LESS(a, b) (((a-b)&0x800) != 0)
#define SN_EQUAL(a, b) (a == b)
#define MAX_DEV_ADDR_SIZE 8
typedef enum _ACT_CATEGORY{
@@ -516,10 +508,10 @@ typedef enum _BA_ACTION{
} BA_ACTION, *PBA_ACTION;
typedef enum _InitialGainOpType{
- IG_Backup=0,
+ IG_Backup = 0,
IG_Restore,
IG_Max
-}InitialGainOpType;
+} InitialGainOpType;
/* debug macros */
#define CONFIG_IEEE80211_DEBUG
@@ -528,25 +520,26 @@ extern u32 ieee80211_debug_level;
#define IEEE80211_DEBUG(level, fmt, args...) \
do { if (ieee80211_debug_level & (level)) \
printk(KERN_DEBUG "ieee80211: " fmt, ## args); } while (0)
-//wb added to debug out data buf
-//if you want print DATA buffer related BA, please set ieee80211_debug_level to DATA|BA
+/* wb added to debug out data buf
+ * if you want print DATA buffer related BA, please set ieee80211_debug_level
+ * to DATA|BA
+ */
#define IEEE80211_DEBUG_DATA(level, data, datalen) \
- do{ if ((ieee80211_debug_level & (level)) == (level)) \
- { \
+ do { if ((ieee80211_debug_level & (level)) == (level)) { \
int i; \
- u8* pdata = (u8*) data; \
+ u8* pdata = (u8 *) data; \
printk(KERN_DEBUG "ieee80211: %s()\n", __FUNCTION__); \
- for(i=0; i<(int)(datalen); i++) \
- { \
+ for (i = 0; i < (int)(datalen); i++) { \
printk("%2x ", pdata[i]); \
- if ((i+1)%16 == 0) printk("\n"); \
- } \
+ if ((i+1)%16 == 0) \
+ printk("\n"); \
+ } \
printk("\n"); \
} \
} while (0)
#else
#define IEEE80211_DEBUG(level, fmt, args...) do {} while (0)
-#define IEEE80211_DEBUG_DATA(level, data, datalen) do {} while(0)
+#define IEEE80211_DEBUG_DATA(level, data, datalen) do {} while (0)
#endif /* CONFIG_IEEE80211_DEBUG */
/* debug macros not dependent on CONFIG_IEEE80211_DEBUG */
@@ -589,16 +582,16 @@ do { if (ieee80211_debug_level & (level)) \
#define IEEE80211_DL_TX (1<<8)
#define IEEE80211_DL_RX (1<<9)
-#define IEEE80211_DL_HT (1<<10) //HT
-#define IEEE80211_DL_BA (1<<11) //ba
-#define IEEE80211_DL_TS (1<<12) //TS
+#define IEEE80211_DL_HT (1<<10) /* HT */
+#define IEEE80211_DL_BA (1<<11) /* ba */
+#define IEEE80211_DL_TS (1<<12) /* TS */
#define IEEE80211_DL_QOS (1<<13)
#define IEEE80211_DL_REORDER (1<<14)
#define IEEE80211_DL_IOT (1<<15)
#define IEEE80211_DL_IPS (1<<16)
-#define IEEE80211_DL_TRACE (1<<29) //trace function, need to user net_ratelimit() together in order not to print too much to the screen
-#define IEEE80211_DL_DATA (1<<30) //use this flag to control whether print data buf out.
-#define IEEE80211_DL_ERR (1<<31) //always open
+#define IEEE80211_DL_TRACE (1<<29) /* trace function, need to user net_ratelimit() together in order not to print too much to the screen */
+#define IEEE80211_DL_DATA (1<<30) /* use this flag to control whether print data buf out. */
+#define IEEE80211_DL_ERR (1<<31) /* always open */
#define IEEE80211_ERROR(f, a...) printk(KERN_ERR "ieee80211: " f, ## a)
#define IEEE80211_WARNING(f, a...) printk(KERN_WARNING "ieee80211: " f, ## a)
#define IEEE80211_DEBUG_INFO(f, a...) IEEE80211_DEBUG(IEEE80211_DL_INFO, f, ## a)
@@ -618,18 +611,17 @@ do { if (ieee80211_debug_level & (level)) \
/* Added by Annie, 2005-11-22. */
#define MAX_STR_LEN 64
/* I want to see ASCII 33 to 126 only. Otherwise, I print '?'. Annie, 2005-11-22.*/
-#define PRINTABLE(_ch) (_ch>'!' && _ch<'~')
+#define PRINTABLE(_ch) (_ch > '!' && _ch < '~')
#define IEEE80211_PRINT_STR(_Comp, _TitleString, _Ptr, _Len) \
- if((_Comp) & level) \
- { \
+ if ((_Comp) & level) { \
int __i; \
u8 buffer[MAX_STR_LEN]; \
- int length = (_Len<MAX_STR_LEN)? _Len : (MAX_STR_LEN-1) ; \
+ int length = (_Len < MAX_STR_LEN) ? _Len : (MAX_STR_LEN - 1); \
memset(buffer, 0, MAX_STR_LEN); \
- memcpy(buffer, (u8 *)_Ptr, length ); \
- for( __i=0; __i<MAX_STR_LEN; __i++ ) \
- { \
- if( !PRINTABLE(buffer[__i]) ) buffer[__i] = '?'; \
+ memcpy(buffer, (u8 *)_Ptr, length); \
+ for (__i = 0; __i < MAX_STR_LEN; __i++) { \
+ if (!PRINTABLE(buffer[__i])) \
+ buffer[__i] = '?'; \
} \
buffer[length] = '\0'; \
printk("Rtl819x: "); \
@@ -644,9 +636,9 @@ do { if (ieee80211_debug_level & (level)) \
#include <linux/if_arp.h> /* ARPHRD_ETHER */
#ifndef WIRELESS_SPY
-#define WIRELESS_SPY // enable iwspy support
+#define WIRELESS_SPY /* enable iwspy support */
#endif
-#include <net/iw_handler.h> // new driver API
+#include <net/iw_handler.h> /* new driver API */
#ifndef ETH_P_PAE
#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
@@ -873,29 +865,28 @@ struct ieee80211_rx_stats {
u32 beacon_time;
u8 nic_type;
u16 Length;
- // u8 DataRate; // In 0.5 Mbps
- u8 SignalQuality; // in 0-100 index.
- s32 RecvSignalPower; // Real power in dBm for this packet, no beautification and aggregation.
- s8 RxPower; // in dBm Translate from PWdB
- u8 SignalStrength; // in 0-100 index.
+ u8 SignalQuality; /* in 0-100 index. */
+ s32 RecvSignalPower; /* Real power in dBm for this packet, no beautification and aggregation. */
+ s8 RxPower; /* in dBm Translate from PWdB */
+ u8 SignalStrength; /* in 0-100 index. */
u16 bHwError:1;
u16 bCRC:1;
u16 bICV:1;
u16 bShortPreamble:1;
- u16 Antenna:1; //for rtl8185
- u16 Decrypted:1; //for rtl8185, rtl8187
- u16 Wakeup:1; //for rtl8185
- u16 Reserved0:1; //for rtl8185
+ u16 Antenna:1; /* for rtl8185 */
+ u16 Decrypted:1; /* for rtl8185, rtl8187 */
+ u16 Wakeup:1; /* for rtl8185 */
+ u16 Reserved0:1; /* for rtl8185 */
u8 AGC;
u32 TimeStampLow;
u32 TimeStampHigh;
bool bShift;
- bool bIsQosData; // Added by Annie, 2005-12-22.
+ bool bIsQosData;
u8 UserPriority;
- //1!!!!!!!!!!!!!!!!!!!!!!!!!!!
- //1Attention Please!!!<11n or 8190 specific code should be put below this line>
- //1!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ /*
+ * 1Attention Please!!!<11n or 8190 specific code should be put below this line>
+ */
u8 RxDrvInfoSize;
u8 RxBufShift;
@@ -904,21 +895,20 @@ struct ieee80211_rx_stats {
bool bContainHTC;
bool RxIs40MHzPacket;
u32 RxPWDBAll;
- u8 RxMIMOSignalStrength[4]; // in 0~100 index
+ u8 RxMIMOSignalStrength[4]; /* in 0~100 index */
s8 RxMIMOSignalQuality[2];
bool bPacketMatchBSSID;
bool bIsCCK;
bool bPacketToSelf;
- //added by amy
- u8* virtual_address;
- u16 packetlength; // Total packet length: Must equal to sum of all FragLength
- u16 fraglength; // FragLength should equal to PacketLength in non-fragment case
- u16 fragoffset; // Data offset for this fragment
+ u8 *virtual_address;
+ u16 packetlength; /* Total packet length: Must equal to sum of all FragLength */
+ u16 fraglength; /* FragLength should equal to PacketLength in non-fragment case */
+ u16 fragoffset; /* Data offset for this fragment */
u16 ntotalfrag;
bool bisrxaggrsubframe;
- bool bPacketBeacon; //cosa add for rssi
- bool bToSelfBA; //cosa add for rssi
- char cck_adc_pwdb[4]; //cosa add for rx path selection
+ bool bPacketBeacon; /* cosa add for rssi */
+ bool bToSelfBA; /* cosa add for rssi */
+ char cck_adc_pwdb[4]; /* cosa add for rx path selection */
u16 Seq_Num;
};
@@ -1045,9 +1035,9 @@ enum ieee80211_mfie {
MFIE_TYPE_ERP = 42,
MFIE_TYPE_RSN = 48,
MFIE_TYPE_RATES_EX = 50,
- MFIE_TYPE_HT_CAP= 45,
- MFIE_TYPE_HT_INFO= 61,
- MFIE_TYPE_AIRONET=133,
+ MFIE_TYPE_HT_CAP = 45,
+ MFIE_TYPE_HT_INFO = 61,
+ MFIE_TYPE_AIRONET = 133,
MFIE_TYPE_GENERIC = 221,
MFIE_TYPE_QOS_PARAMETER = 222,
};
@@ -1199,7 +1189,7 @@ struct ieee80211_txb {
struct ieee80211_drv_agg_txb {
u8 nr_drv_agg_frames;
struct sk_buff *tx_agg_frames[MAX_TX_AGG_COUNT];
-}__attribute__((packed));
+} __attribute__((packed));
#define MAX_SUBFRAME_COUNT 64
struct ieee80211_rxb {
@@ -1207,7 +1197,7 @@ struct ieee80211_rxb {
struct sk_buff *subframes[MAX_SUBFRAME_COUNT];
u8 dst[ETH_ALEN];
u8 src[ETH_ALEN];
-}__attribute__((packed));
+} __attribute__((packed));
typedef union _frameqos {
u16 shortdata;
@@ -1218,8 +1208,8 @@ typedef union _frameqos {
u16 ack_policy:2;
u16 reserved:1;
u16 txop:8;
- }field;
-}frameqos,*pframeqos;
+ } field;
+} frameqos, *pframeqos;
/* SWEEP TABLE ENTRIES NUMBER*/
#define MAX_SWEEP_TAB_ENTRIES 42
@@ -1234,7 +1224,7 @@ typedef union _frameqos {
#define MAX_CHANNEL_NUMBER 161
#define IEEE80211_SOFTMAC_SCAN_TIME 100
-//(HZ / 2)
+/* (HZ / 2) */
#define IEEE80211_SOFTMAC_ASSOC_RETRY_TIME (HZ * 2)
#define CRC_LENGTH 4U
@@ -1310,7 +1300,6 @@ struct ieee80211_tim_parameters {
u8 tim_period;
} __attribute__ ((packed));
-//#else
struct ieee80211_wmm_ac_param {
u8 ac_aci_acm_aifsn;
u8 ac_ecwmin_ecwmax;
@@ -1340,7 +1329,7 @@ struct ieee80211_wmm_tspec_elem {
u32 min_phy_rate;
u16 surp_band_allow;
u16 medium_time;
-}__attribute__((packed));
+} __attribute__((packed));
enum eap_type {
EAP_PACKET = 0,
EAPOL_START,
@@ -1361,17 +1350,15 @@ static inline const char *eap_get_type(int type)
{
return ((u32)type >= ARRAY_SIZE(eap_types)) ? "Unknown" : eap_types[type];
}
-//added by amy for reorder
-static inline u8 Frame_QoSTID(u8* buf)
+static inline u8 Frame_QoSTID(u8 *buf)
{
struct ieee80211_hdr_3addr *hdr;
u16 fc;
hdr = (struct ieee80211_hdr_3addr *)buf;
fc = le16_to_cpu(hdr->frame_ctl);
- return (u8)((frameqos*)(buf + (((fc & IEEE80211_FCTL_TODS)&&(fc & IEEE80211_FCTL_FROMDS))? 30 : 24)))->field.tid;
+ return (u8)((frameqos *)(buf + (((fc & IEEE80211_FCTL_TODS) && (fc & IEEE80211_FCTL_FROMDS)) ? 30 : 24)))->field.tid;
}
-//added by amy for reorder
struct eapol {
u8 snap[6];
@@ -1429,7 +1416,7 @@ struct ieee80211_info_element_hdr {
*/
#define IEEE80211_DEFAULT_TX_ESSID "Penguin"
-#define IEEE80211_DEFAULT_BASIC_RATE 2 //1Mbps
+#define IEEE80211_DEFAULT_BASIC_RATE 2 /* 1Mbps */
enum {WMM_all_frame, WMM_two_frame, WMM_four_frame, WMM_six_frame};
#define MAX_SP_Len (WMM_all_frame << 4)
@@ -1445,8 +1432,7 @@ enum {WMM_all_frame, WMM_two_frame, WMM_four_frame, WMM_six_frame};
#define IEEE80211_PS_UNICAST IEEE80211_DTIM_UCAST
#define IEEE80211_PS_MBCAST IEEE80211_DTIM_MBCAST
-//added by David for QoS 2006/6/30
-//#define WMM_Hang_8187
+
#ifdef WMM_Hang_8187
#undef WMM_Hang_8187
#endif
@@ -1461,15 +1447,14 @@ enum {WMM_all_frame, WMM_two_frame, WMM_four_frame, WMM_six_frame};
#define MAX_RECEIVE_BUFFER_SIZE 9100
-//UP Mapping to AC, using in MgntQuery_SequenceNumber() and maybe for DSCP
-//#define UP2AC(up) ((up<3) ? ((up==0)?1:0) : (up>>1))
+/* UP Mapping to AC, using in MgntQuery_SequenceNumber() and maybe for DSCP */
#define UP2AC(up) ( \
((up) < 1) ? WME_AC_BE : \
((up) < 3) ? WME_AC_BK : \
((up) < 4) ? WME_AC_BE : \
((up) < 6) ? WME_AC_VI : \
WME_AC_VO)
-//AC Mapping to UP, using in Tx part for selecting the corresponding TX queue
+/* AC Mapping to UP, using in Tx part for selecting the corresponding TX queue */
#define AC2UP(_ac) ( \
((_ac) == WME_AC_VO) ? 6 : \
((_ac) == WME_AC_VI) ? 5 : \
@@ -1496,19 +1481,19 @@ typedef struct _bss_ht{
bool support_ht;
- // HT related elements
+ /* HT related elements */
u8 ht_cap_buf[32];
u16 ht_cap_len;
u8 ht_info_buf[32];
u16 ht_info_len;
HT_SPEC_VER ht_spec_ver;
- //HT_CAPABILITY_ELE bdHTCapEle;
- //HT_INFORMATION_ELE bdHTInfoEle;
+ /* HT_CAPABILITY_ELE bdHTCapEle; */
+ /* HT_INFORMATION_ELE bdHTInfoEle; */
bool aggregation;
bool long_slot_time;
-}bss_ht, *pbss_ht;
+} bss_ht, *pbss_ht;
typedef enum _erp_t{
ERP_NonERPpresent = 0x01,
@@ -1525,16 +1510,15 @@ struct ieee80211_network {
u8 ssid[IW_ESSID_MAX_SIZE + 1];
u8 ssid_len;
struct ieee80211_qos_data qos_data;
- //added by amy for LEAP
bool bWithAironetIE;
bool bCkipSupported;
bool bCcxRmEnable;
u16 CcxRmState[2];
- // CCXv4 S59, MBSSID.
+ /* CCXv4 S59, MBSSID. */
bool bMBssidValid;
u8 MBssidMask;
u8 MBssid[6];
- // CCX 2 S38, WLAN Device Version Number element. Annie, 2006-08-20.
+ /* CCX 2 S38, WLAN Device Version Number element. */
bool bWithCcxVerNum;
u8 BssCcxVerNumber;
/* These are network statistics */
@@ -1563,29 +1547,28 @@ struct ieee80211_network {
u8 dtim_data;
u32 last_dtim_sta_time[2];
- //appeded for QoS
+ /* appeded for QoS */
u8 wmm_info;
struct ieee80211_wmm_ac_param wmm_param[4];
u8 QoS_Enable;
#ifdef THOMAS_TURBO
- u8 Turbo_Enable;//enable turbo mode, added by thomas
+ u8 Turbo_Enable;/* enable turbo mode, added by thomas */
#endif
#ifdef ENABLE_DOT11D
u16 CountryIeLen;
u8 CountryIeBuf[MAX_IE_LEN];
#endif
- // HT Related, by amy, 2008.04.29
+ /* HT Related */
BSS_HT bssht;
- // Add to handle broadcom AP management frame CCK rate.
+ /* Add to handle broadcom AP management frame CCK rate. */
bool broadcom_cap_exist;
bool ralink_cap_exist;
bool atheros_cap_exist;
bool cisco_cap_exist;
bool unknown_cap_exist;
-// u8 berp_info;
bool berp_info_valid;
bool buseprotection;
- //put at the end of the structure.
+ /* put at the end of the structure. */
struct list_head list;
};
@@ -1650,75 +1633,66 @@ enum ieee80211_state {
typedef struct tx_pending_t{
int frag;
struct ieee80211_txb *txb;
-}tx_pending_t;
+} tx_pending_t;
-typedef struct _bandwidth_autoswitch
-{
+typedef struct _bandwidth_autoswitch {
long threshold_20Mhzto40Mhz;
long threshold_40Mhzto20Mhz;
bool bforced_tx20Mhz;
bool bautoswitch_enable;
-}bandwidth_autoswitch,*pbandwidth_autoswitch;
+} bandwidth_autoswitch, *pbandwidth_autoswitch;
-//added by amy for order
#define REORDER_WIN_SIZE 128
#define REORDER_ENTRY_NUM 128
-typedef struct _RX_REORDER_ENTRY
-{
+typedef struct _RX_REORDER_ENTRY {
struct list_head List;
u16 SeqNum;
- struct ieee80211_rxb* prxb;
+ struct ieee80211_rxb *prxb;
} RX_REORDER_ENTRY, *PRX_REORDER_ENTRY;
-//added by amy for order
-typedef enum _Fsync_State{
+
+typedef enum _Fsync_State {
Default_Fsync,
HW_Fsync,
SW_Fsync
-}Fsync_State;
+} Fsync_State;
-// Power save mode configured.
-typedef enum _RT_PS_MODE
-{
- eActive, // Active/Continuous access.
- eMaxPs, // Max power save mode.
- eFastPs // Fast power save mode.
-}RT_PS_MODE;
+/* Power save mode configured. */
+typedef enum _RT_PS_MODE {
+ eActive, /* Active/Continuous access. */
+ eMaxPs, /* Max power save mode. */
+ eFastPs /* Fast power save mode. */
+} RT_PS_MODE;
-typedef enum _IPS_CALLBACK_FUNCION
-{
+typedef enum _IPS_CALLBACK_FUNCION {
IPS_CALLBACK_NONE = 0,
IPS_CALLBACK_MGNT_LINK_REQUEST = 1,
IPS_CALLBACK_JOIN_REQUEST = 2,
-}IPS_CALLBACK_FUNCION;
+} IPS_CALLBACK_FUNCION;
-typedef enum _RT_JOIN_ACTION{
+typedef enum _RT_JOIN_ACTION {
RT_JOIN_INFRA = 1,
RT_JOIN_IBSS = 2,
RT_START_IBSS = 3,
RT_NO_ACTION = 4,
-}RT_JOIN_ACTION;
+} RT_JOIN_ACTION;
-typedef struct _IbssParms{
+typedef struct _IbssParms {
u16 atimWin;
-}IbssParms, *PIbssParms;
-#define MAX_NUM_RATES 264 // Max num of support rates element: 8, Max num of ext. support rate: 255. 061122, by rcnjko.
+} IbssParms, *PIbssParms;
+#define MAX_NUM_RATES 264 /* Max num of support rates element: 8, Max num of ext. support rate: 255. 061122, by rcnjko. */
-// RF state.
-typedef enum _RT_RF_POWER_STATE
-{
+/* RF state. */
+typedef enum _RT_RF_POWER_STATE {
eRfOn,
eRfSleep,
eRfOff
-}RT_RF_POWER_STATE;
+} RT_RF_POWER_STATE;
-typedef struct _RT_POWER_SAVE_CONTROL
-{
+typedef struct _RT_POWER_SAVE_CONTROL {
- //
- // Inactive Power Save(IPS) : Disable RF when disconnected
- //
+ /* Inactive Power Save(IPS) : Disable RF when disconnected */
bool bInactivePs;
bool bIPSModeBackup;
bool bSwRfProcessing;
@@ -1726,15 +1700,15 @@ typedef struct _RT_POWER_SAVE_CONTROL
struct work_struct InactivePsWorkItem;
struct timer_list InactivePsTimer;
- // Return point for join action
+ /* Return point for join action */
IPS_CALLBACK_FUNCION ReturnPoint;
- // Recored Parameters for rescheduled JoinRequest
+ /* Recored Parameters for rescheduled JoinRequest */
bool bTmpBssDesc;
RT_JOIN_ACTION tmpJoinAction;
struct ieee80211_network tmpBssDesc;
- // Recored Parameters for rescheduled MgntLinkRequest
+ /* Recored Parameters for rescheduled MgntLinkRequest */
bool bTmpScanOnly;
bool bTmpActiveScan;
bool bTmpFilterHiddenAP;
@@ -1753,23 +1727,20 @@ typedef struct _RT_POWER_SAVE_CONTROL
IbssParms tmpIbpm;
bool bTmpIbpm;
- //
- // Leisre Poswer Save : Disable RF if connected but traffic is not busy
- //
+ /* Leisre Poswer Save : Disable RF if connected but traffic is not busy */
bool bLeisurePs;
-}RT_POWER_SAVE_CONTROL,*PRT_POWER_SAVE_CONTROL;
+} RT_POWER_SAVE_CONTROL, *PRT_POWER_SAVE_CONTROL;
typedef u32 RT_RF_CHANGE_SOURCE;
#define RF_CHANGE_BY_SW BIT31
#define RF_CHANGE_BY_HW BIT30
#define RF_CHANGE_BY_PS BIT29
#define RF_CHANGE_BY_IPS BIT28
-#define RF_CHANGE_BY_INIT 0 // Do not change the RFOff reason. Defined by Bruce, 2008-01-17.
+#define RF_CHANGE_BY_INIT 0 /* Do not change the RFOff reason. */
#ifdef ENABLE_DOT11D
-typedef enum
-{
+typedef enum {
COUNTRY_CODE_FCC = 0,
COUNTRY_CODE_IC = 1,
COUNTRY_CODE_ETSI = 2,
@@ -1781,84 +1752,78 @@ typedef enum
COUNTRY_CODE_TELEC,
COUNTRY_CODE_MIC,
COUNTRY_CODE_GLOBAL_DOMAIN
-}country_code_type_t;
+} country_code_type_t;
#endif
#define RT_MAX_LD_SLOT_NUM 10
-typedef struct _RT_LINK_DETECT_T{
+typedef struct _RT_LINK_DETECT_T {
u32 NumRecvBcnInPeriod;
u32 NumRecvDataInPeriod;
- u32 RxBcnNum[RT_MAX_LD_SLOT_NUM]; // number of Rx beacon / CheckForHang_period to determine link status
- u32 RxDataNum[RT_MAX_LD_SLOT_NUM]; // number of Rx data / CheckForHang_period to determine link status
- u16 SlotNum; // number of CheckForHang period to determine link status
+ u32 RxBcnNum[RT_MAX_LD_SLOT_NUM]; /* number of Rx beacon / CheckForHang_period to determine link status */
+ u32 RxDataNum[RT_MAX_LD_SLOT_NUM]; /* number of Rx data / CheckForHang_period to determine link status */
+ u16 SlotNum; /* number of CheckForHang period to determine link status */
u16 SlotIndex;
u32 NumTxOkInPeriod;
u32 NumRxOkInPeriod;
bool bBusyTraffic;
-}RT_LINK_DETECT_T, *PRT_LINK_DETECT_T;
+} RT_LINK_DETECT_T, *PRT_LINK_DETECT_T;
struct ieee80211_device {
struct net_device *dev;
struct ieee80211_security sec;
- //hw security related
-// u8 hwsec_support; //support?
- u8 hwsec_active; //hw security active.
+ /* hw security related */
+ u8 hwsec_active; /* hw security active. */
bool is_silent_reset;
bool ieee_up;
- //added by amy
bool bSupportRemoteWakeUp;
- RT_PS_MODE dot11PowerSaveMode; // Power save mode configured.
+ RT_PS_MODE dot11PowerSaveMode; /* Power save mode configured. */
bool actscanning;
bool beinretry;
RT_RF_POWER_STATE eRFPowerState;
RT_RF_CHANGE_SOURCE RfOffReason;
bool is_set_key;
- //11n spec related I wonder if These info structure need to be moved out of ieee80211_device
+ /* 11n spec related I wonder if These info structure need to be moved out of ieee80211_device */
- //11n HT below
+ /* 11n HT below */
PRT_HIGH_THROUGHPUT pHTInfo;
- //struct timer_list SwBwTimer;
-// spinlock_t chnlop_spinlock;
spinlock_t bw_spinlock;
spinlock_t reorder_spinlock;
- // for HT operation rate set. we use this one for HT data rate to seperate different descriptors
- //the way fill this is the same as in the IE
- u8 Regdot11HTOperationalRateSet[16]; //use RATR format
- u8 dot11HTOperationalRateSet[16]; //use RATR format
+ /* for HT operation rate set. we use this one for HT data rate to
+ * separate different descriptors
+ * the way fill this is the same as in the IE
+ */
+ u8 Regdot11HTOperationalRateSet[16]; /* use RATR format */
+ u8 dot11HTOperationalRateSet[16]; /* use RATR format */
u8 RegHTSuppRateSet[16];
u8 HTCurrentOperaRate;
u8 HTHighestOperaRate;
- //wb added for rate operation mode to firmware
+ /* wb added for rate operation mode to firmware */
u8 bTxDisableRateFallBack;
u8 bTxUseDriverAssingedRate;
atomic_t atm_chnlop;
atomic_t atm_swbw;
-// u8 HTHighestOperaRate;
-// u8 HTCurrentOperaRate;
- // 802.11e and WMM Traffic Stream Info (TX)
+ /* 802.11e and WMM Traffic Stream Info (TX) */
struct list_head Tx_TS_Admit_List;
struct list_head Tx_TS_Pending_List;
struct list_head Tx_TS_Unused_List;
TX_TS_RECORD TxTsRecord[TOTAL_TS_NUM];
- // 802.11e and WMM Traffic Stream Info (RX)
+ /* 802.11e and WMM Traffic Stream Info (RX) */
struct list_head Rx_TS_Admit_List;
struct list_head Rx_TS_Pending_List;
struct list_head Rx_TS_Unused_List;
RX_TS_RECORD RxTsRecord[TOTAL_TS_NUM];
-//#ifdef TO_DO_LIST
RX_REORDER_ENTRY RxReorderEntry[128];
struct list_head RxReorder_Unused_List;
-//#endif
- // Qos related. Added by Annie, 2005-11-01.
-// PSTA_QOS pStaQos;
- u8 ForcedPriority; // Force per-packet priority 1~7. (default: 0, not to force it.)
+ /* Qos related. */
+/* PSTA_QOS pStaQos; */
+ u8 ForcedPriority; /* Force per-packet priority 1~7. (default: 0, not to force it.) */
/* Bookkeeping structures */
@@ -1925,7 +1890,7 @@ struct ieee80211_device {
* with RX of broad/multicast frames */
/* Fragmentation structures */
- // each streaming contain a entry
+ /* each streaming contain a entry */
struct ieee80211_frag_entry frag_cache[17][IEEE80211_FRAG_CACHE_LEN];
unsigned int frag_next_idx[17];
u16 fts; /* Fragmentation Threshold */
@@ -1967,16 +1932,16 @@ struct ieee80211_device {
u16 prev_seq_ctl; /* used to drop duplicate frames */
/* map of allowed channels. 0 is dummy */
- // FIXME: remeber to default to a basic channel plan depending of the PHY type
+ /* FIXME: remeber to default to a basic channel plan depending of the PHY type */
#ifdef ENABLE_DOT11D
- void* pDot11dInfo;
+ void *pDot11dInfo;
bool bGlobalDomain;
#else
int channel_map[MAX_CHANNEL_NUMBER+1];
#endif
int rate; /* current rate */
int basic_rate;
- //FIXME: pleace callback, see if redundant with softmac_features
+ /* FIXME: pleace callback, see if redundant with softmac_features */
short active_scan;
/* this contains flags for selectively enable softmac support */
@@ -2017,8 +1982,8 @@ struct ieee80211_device {
short wap_set;
short ssid_set;
- u8 wpax_type_set; //{added by David, 2006.9.28}
- u32 wpax_type_notify; //{added by David, 2006.9.26}
+ u8 wpax_type_set;
+ u32 wpax_type_notify;
/* QoS related flag */
char init_wmmparam_flag;
@@ -2040,7 +2005,7 @@ struct ieee80211_device {
struct sk_buff *mgmt_queue_ring[MGMT_QUEUE_NUM];
int mgmt_queue_head;
int mgmt_queue_tail;
-//{ added for rtl819x
+/* added for rtl819x */
#define IEEE80211_QUEUE_LIMIT 128
u8 AsocRetryCount;
unsigned int hw_header;
@@ -2049,12 +2014,12 @@ struct ieee80211_device {
struct sk_buff_head skb_drv_aggQ[MAX_QUEUE_SIZE];
u32 sta_edca_param[4];
bool aggregation;
- // Enable/Disable Rx immediate BA capability.
+ /* Enable/Disable Rx immediate BA capability. */
bool enable_rx_imm_BA;
bool bibsscoordinator;
- //+by amy for DM ,080515
- //Dynamic Tx power for near/far range enable/Disable , by amy , 2008-05-15
+ /*+by amy for DM ,080515 */
+ /* Dynamic Tx power for near/far range enable/Disable */
bool bdynamic_txpower_enable;
bool bCTSToSelfEnable;
@@ -2065,21 +2030,20 @@ struct ieee80211_device {
u8 fsync_rssi_threshold;
bool bfsync_enable;
- u8 fsync_multiple_timeinterval; // FsyncMultipleTimeInterval * FsyncTimeInterval
- u32 fsync_firstdiff_ratethreshold; // low threshold
- u32 fsync_seconddiff_ratethreshold; // decrease threshold
+ u8 fsync_multiple_timeinterval; /* FsyncMultipleTimeInterval * FsyncTimeInterval */
+ u32 fsync_firstdiff_ratethreshold; /* low threshold */
+ u32 fsync_seconddiff_ratethreshold; /* decrease threshold */
Fsync_State fsync_state;
bool bis_any_nonbepkts;
- //20Mhz 40Mhz AutoSwitch Threshold
+ /* 20Mhz 40Mhz AutoSwitch Threshold */
bandwidth_autoswitch bandwidth_auto_switch;
- //for txpower tracking
+ /* for txpower tracking */
bool FwRWRF;
- //added by amy for AP roaming
+ /* added by amy for AP roaming */
RT_LINK_DETECT_T LinkDetectInfo;
- //added by amy for ps
+ /* added by amy for ps */
RT_POWER_SAVE_CONTROL PowerSaveControl;
-//}
/* used if IEEE_SOFTMAC_TX_QUEUE is set */
struct tx_pending_t tx_pending;
@@ -2095,11 +2059,8 @@ struct ieee80211_device {
struct delayed_work start_ibss_wq;
struct work_struct wx_sync_scan_wq;
struct workqueue_struct *wq;
- // Qos related. Added by Annie, 2005-11-01.
- //STA_QOS StaQos;
-
- //u32 STA_EDCA_PARAM[4];
- //CHANNEL_ACCESS_SETTING ChannelAccessSetting;
+ /* Qos related. Added by Annie, 2005-11-01. */
+ /* STA_QOS StaQos; */
/* Callback functions */
@@ -2114,10 +2075,10 @@ struct ieee80211_device {
struct net_device *dev);
int (*reset_port)(struct net_device *dev);
- int (*is_queue_full) (struct net_device * dev, int pri);
+ int (*is_queue_full) (struct net_device *dev, int pri);
- int (*handle_management) (struct net_device * dev,
- struct ieee80211_network * network, u16 type);
+ int (*handle_management) (struct net_device *dev,
+ struct ieee80211_network *network, u16 type);
int (*is_qos_active) (struct net_device *dev, struct sk_buff *skb);
/* Softmac-generated frames (mamagement) are TXed via this
@@ -2137,7 +2098,7 @@ struct ieee80211_device {
* This function can't sleep.
*/
void (*softmac_data_hard_start_xmit)(struct sk_buff *skb,
- struct net_device *dev,int rate);
+ struct net_device *dev, int rate);
/* stops the HW queue for DATA frames. Useful to avoid
* waste time to TX data frame when we are reassociating
@@ -2152,7 +2113,7 @@ struct ieee80211_device {
* This function can sleep. the driver should ensure
* the radio has been swithced before return.
*/
- void (*set_chan)(struct net_device *dev,short ch);
+ void (*set_chan)(struct net_device *dev, short ch);
/* These are not used if the ieee stack takes care of
* scanning (IEEE_SOFTMAC_SCAN feature set).
@@ -2186,7 +2147,7 @@ struct ieee80211_device {
* stop_send_bacons is NOT guaranteed to be called only
* after start_send_beacons.
*/
- void (*start_send_beacons) (struct net_device *dev,u16 tx_rate);
+ void (*start_send_beacons) (struct net_device *dev, u16 tx_rate);
void (*stop_send_beacons) (struct net_device *dev);
/* power save mode related */
@@ -2194,19 +2155,17 @@ struct ieee80211_device {
void (*ps_request_tx_ack) (struct net_device *dev);
void (*enter_sleep_state) (struct net_device *dev, u32 th, u32 tl);
short (*ps_is_queue_empty) (struct net_device *dev);
- int (*handle_beacon) (struct net_device * dev, struct ieee80211_beacon * beacon, struct ieee80211_network * network);
- int (*handle_assoc_response) (struct net_device * dev, struct ieee80211_assoc_response_frame * resp, struct ieee80211_network * network);
+ int (*handle_beacon) (struct net_device *dev, struct ieee80211_beacon *beacon, struct ieee80211_network *network);
+ int (*handle_assoc_response) (struct net_device *dev, struct ieee80211_assoc_response_frame *resp, struct ieee80211_network *network);
/* check whether Tx hw resouce available */
short (*check_nic_enough_desc)(struct net_device *dev, int queue_index);
- //added by wb for HT related
-// void (*SwChnlByTimerHandler)(struct net_device *dev, int channel);
+ /* added by wb for HT related */
void (*SetBWModeHandler)(struct net_device *dev, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset);
-// void (*UpdateHalRATRTableHandler)(struct net_device* dev, u8* pMcsRate);
- bool (*GetNmodeSupportBySecCfg)(struct net_device* dev);
- void (*SetWirelessMode)(struct net_device* dev, u8 wireless_mode);
- bool (*GetHalfNmodeSupportByAPsHandler)(struct net_device* dev);
+ bool (*GetNmodeSupportBySecCfg)(struct net_device *dev);
+ void (*SetWirelessMode)(struct net_device *dev, u8 wireless_mode);
+ bool (*GetHalfNmodeSupportByAPsHandler)(struct net_device *dev);
void (*InitialGainHandler)(struct net_device *dev, u8 Operation);
/* This must be the last item so that it points to the data
@@ -2307,7 +2266,7 @@ extern inline int ieee80211_get_hdrlen(u16 fc)
case IEEE80211_FTYPE_DATA:
if ((fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS))
hdrlen = IEEE80211_4ADDR_LEN; /* Addr4 */
- if(IEEE80211_QOS_HAS_SEQ(fc))
+ if (IEEE80211_QOS_HAS_SEQ(fc))
hdrlen += 2; /* QOS ctrl*/
break;
case IEEE80211_FTYPE_CTL:
@@ -2408,10 +2367,10 @@ extern int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
#if WIRELESS_EXT >= 18
extern int ieee80211_wx_get_encode_ext(struct ieee80211_device *ieee,
struct iw_request_info *info,
- union iwreq_data* wrqu, char *extra);
+ union iwreq_data *wrqu, char *extra);
extern int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
struct iw_request_info *info,
- union iwreq_data* wrqu, char *extra);
+ union iwreq_data *wrqu, char *extra);
extern int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
struct iw_request_info *info,
struct iw_param *data, char *extra);
@@ -2477,7 +2436,9 @@ extern int ieee80211_wx_set_wap(struct ieee80211_device *ieee,
union iwreq_data *awrq,
char *extra);
-extern int ieee80211_wx_get_essid(struct ieee80211_device *ieee, struct iw_request_info *a,union iwreq_data *wrqu,char *b);
+extern int ieee80211_wx_get_essid(struct ieee80211_device *ieee,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b);
extern int ieee80211_wx_set_rate(struct ieee80211_device *ieee,
struct iw_request_info *info,
@@ -2506,7 +2467,6 @@ extern int ieee80211_wx_set_freq(struct ieee80211_device *ieee, struct iw_reques
extern int ieee80211_wx_get_freq(struct ieee80211_device *ieee, struct iw_request_info *a,
union iwreq_data *wrqu, char *b);
-//extern void ieee80211_wx_sync_scan_wq(struct ieee80211_device *ieee);
extern void ieee80211_wx_sync_scan_wq(struct work_struct *work);
@@ -2533,54 +2493,53 @@ extern int ieee80211_wx_set_rts(struct ieee80211_device *ieee,
extern int ieee80211_wx_get_rts(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra);
-//HT
-#define MAX_RECEIVE_BUFFER_SIZE 9100 //
-extern void HTDebugHTCapability(u8* CapIE, u8* TitleString );
-extern void HTDebugHTInfo(u8* InfoIE, u8* TitleString);
-
-void HTSetConnectBwMode(struct ieee80211_device* ieee, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset);
-extern void HTUpdateDefaultSetting(struct ieee80211_device* ieee);
-extern void HTConstructCapabilityElement(struct ieee80211_device* ieee, u8* posHTCap, u8* len, u8 isEncrypt);
-extern void HTConstructInfoElement(struct ieee80211_device* ieee, u8* posHTInfo, u8* len, u8 isEncrypt);
-extern void HTConstructRT2RTAggElement(struct ieee80211_device* ieee, u8* posRT2RTAgg, u8* len);
+/* HT */
+#define MAX_RECEIVE_BUFFER_SIZE 9100
+extern void HTDebugHTCapability(u8 *CapIE, u8 *TitleString);
+extern void HTDebugHTInfo(u8 *InfoIE, u8 *TitleString);
+
+void HTSetConnectBwMode(struct ieee80211_device *ieee, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset);
+extern void HTUpdateDefaultSetting(struct ieee80211_device *ieee);
+extern void HTConstructCapabilityElement(struct ieee80211_device *ieee, u8 *posHTCap, u8 *len, u8 isEncrypt);
+extern void HTConstructInfoElement(struct ieee80211_device *ieee, u8 *posHTInfo, u8 *len, u8 isEncrypt);
+extern void HTConstructRT2RTAggElement(struct ieee80211_device *ieee, u8 *posRT2RTAgg, u8 *len);
extern void HTOnAssocRsp(struct ieee80211_device *ieee);
-extern void HTInitializeHTInfo(struct ieee80211_device* ieee);
+extern void HTInitializeHTInfo(struct ieee80211_device *ieee);
extern void HTInitializeBssDesc(PBSS_HT pBssHT);
-extern void HTResetSelfAndSavePeerSetting(struct ieee80211_device* ieee, struct ieee80211_network * pNetwork);
-extern void HTUpdateSelfAndPeerSetting(struct ieee80211_device* ieee, struct ieee80211_network * pNetwork);
-extern u8 HTGetHighestMCSRate(struct ieee80211_device* ieee, u8* pMCSRateSet, u8* pMCSFilter);
+extern void HTResetSelfAndSavePeerSetting(struct ieee80211_device *ieee, struct ieee80211_network *pNetwork);
+extern void HTUpdateSelfAndPeerSetting(struct ieee80211_device *ieee, struct ieee80211_network *pNetwork);
+extern u8 HTGetHighestMCSRate(struct ieee80211_device *ieee, u8 *pMCSRateSet, u8 *pMCSFilter);
extern u8 MCS_FILTER_ALL[];
extern u16 MCS_DATA_RATE[2][2][77] ;
-extern u8 HTCCheck(struct ieee80211_device* ieee, u8* pFrame);
-//extern void HTSetConnectBwModeCallback(unsigned long data);
+extern u8 HTCCheck(struct ieee80211_device *ieee, u8 *pFrame);
extern void HTResetIOTSetting(PRT_HIGH_THROUGHPUT pHTInfo);
-extern bool IsHTHalfNmodeAPs(struct ieee80211_device* ieee);
-extern u16 HTHalfMcsToDataRate(struct ieee80211_device* ieee, u8 nMcsRate);
-extern u16 HTMcsToDataRate( struct ieee80211_device* ieee, u8 nMcsRate);
-extern u16 TxCountToDataRate( struct ieee80211_device* ieee, u8 nDataRate);
-//function in BAPROC.c
-extern int ieee80211_rx_ADDBAReq( struct ieee80211_device* ieee, struct sk_buff *skb);
-extern int ieee80211_rx_ADDBARsp( struct ieee80211_device* ieee, struct sk_buff *skb);
-extern int ieee80211_rx_DELBA(struct ieee80211_device* ieee,struct sk_buff *skb);
-extern void TsInitAddBA( struct ieee80211_device* ieee, PTX_TS_RECORD pTS, u8 Policy, u8 bOverwritePending);
-extern void TsInitDelBA( struct ieee80211_device* ieee, PTS_COMMON_INFO pTsCommonInfo, TR_SELECT TxRxSelect);
+extern bool IsHTHalfNmodeAPs(struct ieee80211_device *ieee);
+extern u16 HTHalfMcsToDataRate(struct ieee80211_device *ieee, u8 nMcsRate);
+extern u16 HTMcsToDataRate(struct ieee80211_device *ieee, u8 nMcsRate);
+extern u16 TxCountToDataRate(struct ieee80211_device *ieee, u8 nDataRate);
+/* function in BAPROC.c */
+extern int ieee80211_rx_ADDBAReq(struct ieee80211_device *ieee, struct sk_buff *skb);
+extern int ieee80211_rx_ADDBARsp(struct ieee80211_device *ieee, struct sk_buff *skb);
+extern int ieee80211_rx_DELBA(struct ieee80211_device *ieee, struct sk_buff *skb);
+extern void TsInitAddBA(struct ieee80211_device *ieee, PTX_TS_RECORD pTS, u8 Policy, u8 bOverwritePending);
+extern void TsInitDelBA(struct ieee80211_device *ieee, PTS_COMMON_INFO pTsCommonInfo, TR_SELECT TxRxSelect);
extern void BaSetupTimeOut(unsigned long data);
extern void TxBaInactTimeout(unsigned long data);
extern void RxBaInactTimeout(unsigned long data);
-extern void ResetBaEntry( PBA_RECORD pBA);
-//function in TS.c
+extern void ResetBaEntry(PBA_RECORD pBA);
+/* function in TS.c */
extern bool GetTs(
- struct ieee80211_device* ieee,
- PTS_COMMON_INFO *ppTS,
- u8* Addr,
+ struct ieee80211_device *ieee,
+ PTS_COMMON_INFO *ppTS,
+ u8 *Addr,
u8 TID,
- TR_SELECT TxRxSelect, //Rx:1, Tx:0
+ TR_SELECT TxRxSelect, /* Rx:1, Tx:0 */
bool bAddNewTs
);
extern void TSInitialize(struct ieee80211_device *ieee);
-extern void TsStartAddBaProcess(struct ieee80211_device* ieee, PTX_TS_RECORD pTxTS);
-extern void RemovePeerTS(struct ieee80211_device* ieee, u8* Addr);
-extern void RemoveAllTS(struct ieee80211_device* ieee);
+extern void TsStartAddBaProcess(struct ieee80211_device *ieee, PTX_TS_RECORD pTxTS);
+extern void RemovePeerTS(struct ieee80211_device *ieee, u8 *Addr);
+extern void RemoveAllTS(struct ieee80211_device *ieee);
void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee);
extern const long ieee80211_wlan_frequencies[];
@@ -2595,7 +2554,8 @@ extern inline int ieee80211_get_scans(struct ieee80211_device *ieee)
return ieee->scans;
}
-static inline const char *escape_essid(const char *essid, u8 essid_len) {
+static inline const char *escape_essid(const char *essid, u8 essid_len)
+{
static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
const char *s = essid;
char *d = escaped;
@@ -2630,6 +2590,6 @@ extern int ieee80211_parse_info_param(struct ieee80211_device *ieee,
struct ieee80211_network *network,
struct ieee80211_rx_stats *stats);
-void ieee80211_indicate_packets(struct ieee80211_device *ieee, struct ieee80211_rxb** prxbIndicateArray,u8 index);
+void ieee80211_indicate_packets(struct ieee80211_device *ieee, struct ieee80211_rxb **prxbIndicateArray, u8 index);
#define RT_ASOC_RETRY_LIMIT 5
#endif /* IEEE80211_H */
diff --git a/drivers/staging/rtl8192u/ieee80211/api.c b/drivers/staging/rtl8192u/ieee80211/api.c
index c627d029528..5f46e50e586 100644
--- a/drivers/staging/rtl8192u/ieee80211/api.c
+++ b/drivers/staging/rtl8192u/ieee80211/api.c
@@ -131,12 +131,10 @@ struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags)
if (alg == NULL)
goto out;
- tfm = kmalloc(sizeof(*tfm) + alg->cra_ctxsize, GFP_KERNEL);
+ tfm = kzalloc(sizeof(*tfm) + alg->cra_ctxsize, GFP_KERNEL);
if (tfm == NULL)
goto out_put;
- memset(tfm, 0, sizeof(*tfm) + alg->cra_ctxsize);
-
tfm->__crt_alg = alg;
if (crypto_init_flags(tfm, flags))
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index 39847c81e29..e1216b70495 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -1829,7 +1829,7 @@ struct ieee80211_device {
spinlock_t bw_spinlock;
spinlock_t reorder_spinlock;
- // for HT operation rate set. we use this one for HT data rate to seperate different descriptors
+ // for HT operation rate set. we use this one for HT data rate to separate different descriptors
//the way fill this is the same as in the IE
u8 Regdot11HTOperationalRateSet[16]; //use RATR format
u8 dot11HTOperationalRateSet[16]; //use RATR format
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
index 521e7b98993..8707eba4f90 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
@@ -109,11 +109,10 @@ int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops)
if (hcrypt == NULL)
return -1;
- alg = kmalloc(sizeof(*alg), GFP_KERNEL);
+ alg = kzalloc(sizeof(*alg), GFP_KERNEL);
if (alg == NULL)
return -ENOMEM;
- memset(alg, 0, sizeof(*alg));
alg->ops = ops;
spin_lock_irqsave(&hcrypt->lock, flags);
@@ -206,11 +205,10 @@ int __init ieee80211_crypto_init(void)
{
int ret = -ENOMEM;
- hcrypt = kmalloc(sizeof(*hcrypt), GFP_KERNEL);
+ hcrypt = kzalloc(sizeof(*hcrypt), GFP_KERNEL);
if (!hcrypt)
goto out;
- memset(hcrypt, 0, sizeof(*hcrypt));
INIT_LIST_HEAD(&hcrypt->algs);
spin_lock_init(&hcrypt->lock);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
index 0b57632bcff..4b078e53638 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
@@ -68,10 +68,9 @@ static void * ieee80211_ccmp_init(int key_idx)
{
struct ieee80211_ccmp_data *priv;
- priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+ priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
- memset(priv, 0, sizeof(*priv));
priv->key_idx = key_idx;
priv->tfm = (void*)crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
index 9510507d8d0..a98584c845b 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
@@ -67,10 +67,9 @@ static void * ieee80211_tkip_init(int key_idx)
{
struct ieee80211_tkip_data *priv;
- priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+ priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
- memset(priv, 0, sizeof(*priv));
priv->key_idx = key_idx;
priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
index 61ad11cae38..96c2c9d67fd 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
@@ -43,10 +43,9 @@ static void * prism2_wep_init(int keyidx)
{
struct prism2_wep_data *priv;
- priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+ priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
- memset(priv, 0, sizeof(*priv));
priv->key_idx = keyidx;
priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
index b752017a4d1..7455264aa54 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
@@ -65,8 +65,8 @@ static inline int ieee80211_networks_allocate(struct ieee80211_device *ieee)
if (ieee->networks)
return 0;
- ieee->networks = kmalloc(
- MAX_NETWORK_COUNT * sizeof(struct ieee80211_network),
+ ieee->networks = kcalloc(
+ MAX_NETWORK_COUNT, sizeof(struct ieee80211_network),
GFP_KERNEL);
if (!ieee->networks) {
printk(KERN_WARNING "%s: Out of memory allocating beacons\n",
@@ -74,9 +74,6 @@ static inline int ieee80211_networks_allocate(struct ieee80211_device *ieee)
return -ENOMEM;
}
- memset(ieee->networks, 0,
- MAX_NETWORK_COUNT * sizeof(struct ieee80211_network));
-
return 0;
}
@@ -161,7 +158,7 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
ieee80211_softmac_init(ieee);
- ieee->pHTInfo = (RT_HIGH_THROUGHPUT*)kzalloc(sizeof(RT_HIGH_THROUGHPUT), GFP_KERNEL);
+ ieee->pHTInfo = kzalloc(sizeof(RT_HIGH_THROUGHPUT), GFP_KERNEL);
if (ieee->pHTInfo == NULL)
{
IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't alloc memory for HTInfo\n");
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index 7e9b367594a..192123fbec7 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -1302,7 +1302,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
/* skb: hdr + (possible reassembled) full plaintext payload */
payload = skb->data + hdrlen;
//ethertype = (payload[6] << 8) | payload[7];
- rxb = (struct ieee80211_rxb*)kmalloc(sizeof(struct ieee80211_rxb),GFP_ATOMIC);
+ rxb = kmalloc(sizeof(struct ieee80211_rxb), GFP_ATOMIC);
if(rxb == NULL)
{
IEEE80211_DEBUG(IEEE80211_DL_ERR,"%s(): kmalloc rxb error\n",__FUNCTION__);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index d54e3a77423..a2e84c57857 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -1579,8 +1579,9 @@ static inline u16 auth_parse(struct sk_buff *skb, u8** challenge, int *chlen)
if(*(t++) == MFIE_TYPE_CHALLENGE){
*chlen = *(t++);
- *challenge = (u8*)kmalloc(*chlen, GFP_ATOMIC);
- memcpy(*challenge, t, *chlen);
+ *challenge = kmemdup(t, *chlen, GFP_ATOMIC);
+ if (!*challenge)
+ return -ENOMEM;
}
}
@@ -1713,7 +1714,8 @@ ieee80211_rx_auth_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
//IEEE80211DMESG("Rx probe");
ieee->softmac_stats.rx_auth_rq++;
- if ((status = auth_rq_parse(skb, dest))!= -1){
+ status = auth_rq_parse(skb, dest);
+ if (status != -1) {
ieee80211_resp_to_auth(ieee, status, dest);
}
//DMESG("Dest is "MACSTR, MAC2STR(dest));
@@ -2720,10 +2722,9 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
ieee->seq_ctrl[i] = 0;
}
#ifdef ENABLE_DOT11D
- ieee->pDot11dInfo = kmalloc(sizeof(RT_DOT11D_INFO), GFP_ATOMIC);
+ ieee->pDot11dInfo = kzalloc(sizeof(RT_DOT11D_INFO), GFP_ATOMIC);
if (!ieee->pDot11dInfo)
IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't alloc memory for DOT11D\n");
- memset(ieee->pDot11dInfo, 0, sizeof(RT_DOT11D_INFO));
#endif
//added for AP roaming
ieee->LinkDetectInfo.SlotNum = 2;
@@ -2868,11 +2869,11 @@ static int ieee80211_wpa_set_wpa_ie(struct ieee80211_device *ieee,
return -EINVAL;
if (param->u.wpa_ie.len) {
- buf = kmalloc(param->u.wpa_ie.len, GFP_KERNEL);
+ buf = kmemdup(param->u.wpa_ie.data, param->u.wpa_ie.len,
+ GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- memcpy(buf, param->u.wpa_ie.data, param->u.wpa_ie.len);
kfree(ieee->wpa_ie);
ieee->wpa_ie = buf;
ieee->wpa_ie_len = param->u.wpa_ie.len;
@@ -3074,8 +3075,7 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
ieee80211_crypt_delayed_deinit(ieee, crypt);
- new_crypt = (struct ieee80211_crypt_data *)
- kmalloc(sizeof(*new_crypt), GFP_KERNEL);
+ new_crypt = kmalloc(sizeof(*new_crypt), GFP_KERNEL);
if (new_crypt == NULL) {
ret = -ENOMEM;
goto done;
@@ -3207,7 +3207,7 @@ int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct iw_poin
goto out;
}
- param = (struct ieee_param *)kmalloc(p->length, GFP_KERNEL);
+ param = kmalloc(p->length, GFP_KERNEL);
if (param == NULL){
ret = -ENOMEM;
goto out;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index 48537d94894..81aa2ed226a 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -47,7 +47,6 @@
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
-#include <linux/version.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
#include <asm/uaccess.h>
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
index 750e94e1711..fb78ed2876e 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
@@ -30,7 +30,6 @@
******************************************************************************/
#include <linux/wireless.h>
-#include <linux/version.h>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -380,11 +379,10 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
struct ieee80211_crypt_data *new_crypt;
/* take WEP into use */
- new_crypt = kmalloc(sizeof(struct ieee80211_crypt_data),
+ new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data),
GFP_KERNEL);
if (new_crypt == NULL)
return -ENOMEM;
- memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
new_crypt->ops = ieee80211_get_crypto_ops("WEP");
if (!new_crypt->ops) {
request_module("ieee80211_crypt_wep");
@@ -849,10 +847,9 @@ int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len)
printk("len:%zu, ie:%d\n", len, ie[1]);
return -EINVAL;
}
- buf = kmalloc(len, GFP_KERNEL);
+ buf = kmemdup(ie, len, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- memcpy(buf, ie, len);
kfree(ieee->wpa_ie);
ieee->wpa_ie = buf;
ieee->wpa_ie_len = len;
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h b/drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h
index 13b1e5ca436..9e4ced15edf 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h
@@ -1,7 +1,6 @@
#ifndef __INC_QOS_TYPE_H
#define __INC_QOS_TYPE_H
-//#include "EndianFree.h"
#define BIT0 0x00000001
#define BIT1 0x00000002
#define BIT2 0x00000004
@@ -220,7 +219,6 @@ typedef union _QOS_INFO_FIELD{
}QOS_INFO_FIELD, *PQOS_INFO_FIELD;
-
//
// ACI to AC coding.
// Ref: WMM spec 2.2.2: WME Parameter Element, p.13.
@@ -494,6 +492,7 @@ typedef struct _OCTET_STRING{
u8 *Octet;
u16 Length;
}OCTET_STRING, *POCTET_STRING;
+
//
// STA QoS data.
// Ref: DOT11_QOS in 8185 code. [def. in QoS_mp.h]
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
index 451120ff213..c3fcaae0750 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
@@ -87,10 +87,7 @@ void RxPktPendingTimeout(unsigned long data)
if(bPktInBuf && (pRxTs->RxTimeoutIndicateSeq==0xffff))
{
pRxTs->RxTimeoutIndicateSeq = pRxTs->RxIndicateSeq;
- if(timer_pending(&pRxTs->RxPktPendingTimer))
- del_timer_sync(&pRxTs->RxPktPendingTimer);
- pRxTs->RxPktPendingTimer.expires = jiffies + ieee->pHTInfo->RxReorderPendingTime;
- add_timer(&pRxTs->RxPktPendingTimer);
+ mod_timer(&pRxTs->RxPktPendingTimer, jiffies + MSECS(ieee->pHTInfo->RxReorderPendingTime));
}
spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
//PlatformReleaseSpinLock(Adapter, RT_RX_SPINLOCK);
@@ -358,6 +355,7 @@ bool GetTs(
IEEE80211_DEBUG(IEEE80211_DL_ERR, "get TS for Broadcast or Multicast\n");
return false;
}
+
if (ieee->current_network.qos_data.supported == 0)
UP = 0;
else
@@ -532,6 +530,7 @@ void RemoveTsEntry(
void RemovePeerTS(struct ieee80211_device* ieee, u8* Addr)
{
PTS_COMMON_INFO pTS, pTmpTS;
+
printk("===========>RemovePeerTS,%pM\n", Addr);
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List)
{
@@ -578,6 +577,7 @@ void RemovePeerTS(struct ieee80211_device* ieee, u8* Addr)
void RemoveAllTS(struct ieee80211_device* ieee)
{
PTS_COMMON_INFO pTS, pTmpTS;
+
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List)
{
RemoveTsEntry(ieee, pTS, TX_DIR);
@@ -626,4 +626,3 @@ void TsStartAddBaProcess(struct ieee80211_device* ieee, PTX_TS_RECORD pTxTS)
else
IEEE80211_DEBUG(IEEE80211_DL_ERR, "%s()==>BA timer is already added\n", __FUNCTION__);
}
-EXPORT_SYMBOL(RemovePeerTS);
diff --git a/drivers/staging/rtl8192u/ieee80211_crypt.h b/drivers/staging/rtl8192u/ieee80211_crypt.h
index b58a3bcc0dc..0b4ea431982 100644
--- a/drivers/staging/rtl8192u/ieee80211_crypt.h
+++ b/drivers/staging/rtl8192u/ieee80211_crypt.h
@@ -77,7 +77,7 @@ struct ieee80211_crypt_data {
int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops);
int ieee80211_unregister_crypto_ops(struct ieee80211_crypto_ops *ops);
-struct ieee80211_crypto_ops * ieee80211_get_crypto_ops(const char *name);
+struct ieee80211_crypto_ops *ieee80211_get_crypto_ops(const char *name);
void ieee80211_crypt_deinit_entries(struct ieee80211_device *, int);
void ieee80211_crypt_deinit_handler(unsigned long);
void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee,
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 68ebb025677..2bede271a2f 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -2216,7 +2216,8 @@ short rtl8192_usb_initendpoints(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- priv->rx_urb = (struct urb**) kmalloc (sizeof(struct urb*) * (MAX_RX_URB+1), GFP_KERNEL);
+ priv->rx_urb = kmalloc(sizeof(struct urb *) * (MAX_RX_URB+1),
+ GFP_KERNEL);
#ifndef JACKSON_NEW_RX
for(i=0;i<(MAX_RX_URB+1);i++){
@@ -2250,12 +2251,11 @@ short rtl8192_usb_initendpoints(struct net_device *dev)
#endif
memset(priv->rx_urb, 0, sizeof(struct urb*) * MAX_RX_URB);
- priv->pp_rxskb = (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) * MAX_RX_URB, GFP_KERNEL);
+ priv->pp_rxskb = kcalloc(MAX_RX_URB, sizeof(struct sk_buff *),
+ GFP_KERNEL);
if (priv->pp_rxskb == NULL)
goto destroy;
- memset(priv->pp_rxskb, 0, sizeof(struct sk_buff*) * MAX_RX_URB);
-
goto _middle;
@@ -2839,7 +2839,7 @@ static void rtl8192_init_priv_variable(struct net_device* dev)
(priv->EarlyRxThreshold == 7 ? RCR_ONLYERLPKT:0);
priv->AcmControl = 0;
- priv->pFirmware = (rt_firmware*)kmalloc(sizeof(rt_firmware), GFP_KERNEL);
+ priv->pFirmware = kmalloc(sizeof(rt_firmware), GFP_KERNEL);
if (priv->pFirmware)
memset(priv->pFirmware, 0, sizeof(rt_firmware));
@@ -4415,7 +4415,7 @@ int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
goto out;
}
- ipw = (struct ieee_param *)kmalloc(p->length, GFP_KERNEL);
+ ipw = kmalloc(p->length, GFP_KERNEL);
if (ipw == NULL){
ret = -ENOMEM;
goto out;
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.c b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
index fd19a85297a..0cb28c776c4 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.c
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
@@ -63,7 +63,6 @@ SendTxCommandPacket(
tcb_desc->bLastIniPkt = 0;
skb_reserve(skb, USB_HWDESC_HEADER_LEN);
ptr_buf = skb_put(skb, DataLen);
- memset(ptr_buf,0,DataLen);
memcpy(ptr_buf,pData,DataLen);
tcb_desc->txbuf_size= (u16)DataLen;
diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
index 88880734921..0332c370fd8 100644
--- a/drivers/staging/sep/sep_driver.c
+++ b/drivers/staging/sep/sep_driver.c
@@ -39,7 +39,6 @@
#include <linux/mm.h>
#include <linux/poll.h>
#include <linux/wait.h>
-#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/firmware.h>
#include <linux/slab.h>
diff --git a/drivers/staging/serqt_usb2/serqt_usb2.c b/drivers/staging/serqt_usb2/serqt_usb2.c
index 44f2d4eaf84..27841ef6a56 100644
--- a/drivers/staging/serqt_usb2/serqt_usb2.c
+++ b/drivers/staging/serqt_usb2/serqt_usb2.c
@@ -413,7 +413,7 @@ static void qt_read_bulk_callback(struct urb *urb)
case 0x01:
/* Modem status status change 4th byte must follow */
- dbg("Modem status status. \n");
+ dbg("Modem status status.\n");
if (i > (RxCount - 4)) {
dbg("Illegal escape sequences in received data\n");
break;
@@ -424,7 +424,7 @@ static void qt_read_bulk_callback(struct urb *urb)
flag = 1;
break;
case 0xff:
- dbg("No status sequence. \n");
+ dbg("No status sequence.\n");
if (tty) {
ProcessRxChar(tty, port, data[i]);
@@ -738,7 +738,7 @@ static int qt_startup(struct usb_serial *serial)
if (!qt_port) {
dbg("%s: kmalloc for quatech_port (%d) failed!.",
__func__, i);
- for(--i; i >= 0; i--) {
+ for (--i; i >= 0; i--) {
port = serial->port[i];
kfree(usb_get_serial_port_data(port));
usb_set_serial_port_data(port, NULL);
@@ -963,11 +963,11 @@ static int qt_open(struct tty_struct *tty,
}
- dbg("port number is %d \n", port->number);
- dbg("serial number is %d \n", port->serial->minor);
- dbg("Bulkin endpoint is %d \n", port->bulk_in_endpointAddress);
- dbg("BulkOut endpoint is %d \n", port->bulk_out_endpointAddress);
- dbg("Interrupt endpoint is %d \n", port->interrupt_in_endpointAddress);
+ dbg("port number is %d\n", port->number);
+ dbg("serial number is %d\n", port->serial->minor);
+ dbg("Bulkin endpoint is %d\n", port->bulk_in_endpointAddress);
+ dbg("BulkOut endpoint is %d\n", port->bulk_out_endpointAddress);
+ dbg("Interrupt endpoint is %d\n", port->interrupt_in_endpointAddress);
dbg("port's number in the device is %d\n", quatech_port->port_num);
quatech_port->read_urb = port->read_urb;
@@ -1470,7 +1470,7 @@ static int qt_tiocmget(struct tty_struct *tty, struct file *file)
int retval = -ENODEV;
unsigned long flags;
- dbg("In %s \n", __func__);
+ dbg("In %s\n", __func__);
if (!serial)
return -ENODEV;
@@ -1496,14 +1496,14 @@ static int qt_tiocmset(struct tty_struct *tty, struct file *file,
unsigned long flags;
int retval = -ENODEV;
- dbg("In %s \n", __func__);
+ dbg("In %s\n", __func__);
if (!serial)
return -ENODEV;
spin_lock_irqsave(&qt_port->lock, flags);
- dbg("%s - port %d \n", __func__, port->number);
+ dbg("%s - port %d\n", __func__, port->number);
dbg("%s - qt_port->RxHolding = %d\n", __func__, qt_port->RxHolding);
retval = qt_real_tiocmset(tty, port, file, serial, set);
@@ -1584,9 +1584,9 @@ static int qt_calc_num_ports(struct usb_serial *serial)
{
int num_ports;
- dbg("numberofendpoints: %d \n",
+ dbg("numberofendpoints: %d\n",
(int)serial->interface->cur_altsetting->desc.bNumEndpoints);
- dbg("numberofendpoints: %d \n",
+ dbg("numberofendpoints: %d\n",
(int)serial->interface->altsetting->desc.bNumEndpoints);
num_ports =
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index 7daeced317c..bebf0fd2af8 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -1367,12 +1367,12 @@ static void slic_mcast_set_list(struct net_device *dev)
struct adapter *adapter = netdev_priv(dev);
int status = STATUS_SUCCESS;
char *addresses;
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
ASSERT(adapter);
- netdev_for_each_mc_addr(mc_list, dev) {
- addresses = (char *) &mc_list->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addresses = (char *) &ha->addr;
status = slic_mcast_add_list(adapter, addresses);
if (status != STATUS_SUCCESS)
break;
diff --git a/drivers/staging/sm7xx/smtcfb.c b/drivers/staging/sm7xx/smtcfb.c
index 8d7261c052e..9ffeb36ddde 100644
--- a/drivers/staging/sm7xx/smtcfb.c
+++ b/drivers/staging/sm7xx/smtcfb.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2006 Silicon Motion Technology Corp.
* Authors: Ge Wang, gewang@siliconmotion.com
- * Boyod boyod.yang@siliconmotion.com.cn
+ * Boyod boyod.yang@siliconmotion.com.cn
*
* Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
@@ -13,17 +13,17 @@
* more details.
*
* Version 0.10.26192.21.01
- * - Add PowerPC/Big endian support
- * - Add 2D support for Lynx
- * - Verified on2.6.19.2 Boyod.yang <boyod.yang@siliconmotion.com.cn>
+ * - Add PowerPC/Big endian support
+ * - Add 2D support for Lynx
+ * - Verified on2.6.19.2 Boyod.yang <boyod.yang@siliconmotion.com.cn>
*
* Version 0.09.2621.00.01
- * - Only support Linux Kernel's version 2.6.21.
+ * - Only support Linux Kernel's version 2.6.21.
* Boyod.yang <boyod.yang@siliconmotion.com.cn>
*
* Version 0.09
- * - Only support Linux Kernel's version 2.6.12.
- * Boyod.yang <boyod.yang@siliconmotion.com.cn>
+ * - Only support Linux Kernel's version 2.6.12.
+ * Boyod.yang <boyod.yang@siliconmotion.com.cn>
*/
#ifndef __KERNEL__
@@ -688,13 +688,11 @@ static struct smtcfb_info *smtc_alloc_fb_info(struct pci_dev *dev,
{
struct smtcfb_info *sfb;
- sfb = kmalloc(sizeof(struct smtcfb_info), GFP_KERNEL);
+ sfb = kzalloc(sizeof(struct smtcfb_info), GFP_KERNEL);
if (!sfb)
return NULL;
- memset(sfb, 0, sizeof(struct smtcfb_info));
-
sfb->currcon = -1;
sfb->dev = dev;
diff --git a/drivers/staging/sm7xx/smtcfb.h b/drivers/staging/sm7xx/smtcfb.h
index 7ee565c2c95..0c113835b85 100644
--- a/drivers/staging/sm7xx/smtcfb.h
+++ b/drivers/staging/sm7xx/smtcfb.h
@@ -3,7 +3,7 @@
*
* Copyright (C) 2006 Silicon Motion Technology Corp.
* Authors: Ge Wang, gewang@siliconmotion.com
- * Boyod boyod.yang@siliconmotion.com.cn
+ * Boyod boyod.yang@siliconmotion.com.cn
*
* Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
diff --git a/drivers/staging/strip/Kconfig b/drivers/staging/strip/Kconfig
deleted file mode 100644
index 36257b5cd6e..00000000000
--- a/drivers/staging/strip/Kconfig
+++ /dev/null
@@ -1,22 +0,0 @@
-config STRIP
- tristate "STRIP (Metricom starmode radio IP)"
- depends on INET
- select WIRELESS_EXT
- ---help---
- Say Y if you have a Metricom radio and intend to use Starmode Radio
- IP. STRIP is a radio protocol developed for the MosquitoNet project
- to send Internet traffic using Metricom radios. Metricom radios are
- small, battery powered, 100kbit/sec packet radio transceivers, about
- the size and weight of a cellular telephone. (You may also have heard
- them called "Metricom modems" but we avoid the term "modem" because
- it misleads many people into thinking that you can plug a Metricom
- modem into a phone line and use it as a modem.)
-
- You can use STRIP on any Linux machine with a serial port, although
- it is obviously most useful for people with laptop computers. If you
- think you might get a Metricom radio in the future, there is no harm
- in saying Y to STRIP now, except that it makes the kernel a bit
- bigger.
-
- To compile this as a module, choose M here: the module will be
- called strip.
diff --git a/drivers/staging/strip/Makefile b/drivers/staging/strip/Makefile
deleted file mode 100644
index 6417bdcac2f..00000000000
--- a/drivers/staging/strip/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_STRIP) += strip.o
diff --git a/drivers/staging/strip/TODO b/drivers/staging/strip/TODO
deleted file mode 100644
index 9bd15a2f6d9..00000000000
--- a/drivers/staging/strip/TODO
+++ /dev/null
@@ -1,7 +0,0 @@
-TODO:
- - step up and maintain this driver to ensure that it continues
- to work. Having the hardware for this is pretty much a
- requirement. If this does not happen, the will be removed in
- the 2.6.35 kernel release.
-
-Please send patches to Greg Kroah-Hartman <greg@kroah.com>.
diff --git a/drivers/staging/strip/strip.c b/drivers/staging/strip/strip.c
deleted file mode 100644
index c976c6b4d28..00000000000
--- a/drivers/staging/strip/strip.c
+++ /dev/null
@@ -1,2823 +0,0 @@
-/*
- * Copyright 1996 The Board of Trustees of The Leland Stanford
- * Junior University. All Rights Reserved.
- *
- * Permission to use, copy, modify, and distribute this
- * software and its documentation for any purpose and without
- * fee is hereby granted, provided that the above copyright
- * notice appear in all copies. Stanford University
- * makes no representations about the suitability of this
- * software for any purpose. It is provided "as is" without
- * express or implied warranty.
- *
- * strip.c This module implements Starmode Radio IP (STRIP)
- * for kernel-based devices like TTY. It interfaces between a
- * raw TTY, and the kernel's INET protocol layers (via DDI).
- *
- * Version: @(#)strip.c 1.3 July 1997
- *
- * Author: Stuart Cheshire <cheshire@cs.stanford.edu>
- *
- * Fixes: v0.9 12th Feb 1996 (SC)
- * New byte stuffing (2+6 run-length encoding)
- * New watchdog timer task
- * New Protocol key (SIP0)
- *
- * v0.9.1 3rd March 1996 (SC)
- * Changed to dynamic device allocation -- no more compile
- * time (or boot time) limit on the number of STRIP devices.
- *
- * v0.9.2 13th March 1996 (SC)
- * Uses arp cache lookups (but doesn't send arp packets yet)
- *
- * v0.9.3 17th April 1996 (SC)
- * Fixed bug where STR_ERROR flag was getting set unneccessarily
- * (causing otherwise good packets to be unneccessarily dropped)
- *
- * v0.9.4 27th April 1996 (SC)
- * First attempt at using "&COMMAND" Starmode AT commands
- *
- * v0.9.5 29th May 1996 (SC)
- * First attempt at sending (unicast) ARP packets
- *
- * v0.9.6 5th June 1996 (Elliot)
- * Put "message level" tags in every "printk" statement
- *
- * v0.9.7 13th June 1996 (laik)
- * Added support for the /proc fs
- *
- * v0.9.8 July 1996 (Mema)
- * Added packet logging
- *
- * v1.0 November 1996 (SC)
- * Fixed (severe) memory leaks in the /proc fs code
- * Fixed race conditions in the logging code
- *
- * v1.1 January 1997 (SC)
- * Deleted packet logging (use tcpdump instead)
- * Added support for Metricom Firmware v204 features
- * (like message checksums)
- *
- * v1.2 January 1997 (SC)
- * Put portables list back in
- *
- * v1.3 July 1997 (SC)
- * Made STRIP driver set the radio's baud rate automatically.
- * It is no longer necessarily to manually set the radio's
- * rate permanently to 115200 -- the driver handles setting
- * the rate automatically.
- */
-
-#ifdef MODULE
-static const char StripVersion[] = "1.3A-STUART.CHESHIRE-MODULAR";
-#else
-static const char StripVersion[] = "1.3A-STUART.CHESHIRE";
-#endif
-
-#define TICKLE_TIMERS 0
-#define EXT_COUNTERS 1
-
-
-/************************************************************************/
-/* Header files */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
-
-# include <linux/ctype.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/tty.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/if_strip.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/serial.h>
-#include <linux/serialP.h>
-#include <linux/rcupdate.h>
-#include <linux/compat.h>
-#include <linux/slab.h>
-#include <net/arp.h>
-#include <net/net_namespace.h>
-
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/time.h>
-#include <linux/jiffies.h>
-
-/************************************************************************/
-/* Useful structures and definitions */
-
-/*
- * A MetricomKey identifies the protocol being carried inside a Metricom
- * Starmode packet.
- */
-
-typedef union {
- __u8 c[4];
- __u32 l;
-} MetricomKey;
-
-/*
- * An IP address can be viewed as four bytes in memory (which is what it is) or as
- * a single 32-bit long (which is convenient for assignment, equality testing etc.)
- */
-
-typedef union {
- __u8 b[4];
- __u32 l;
-} IPaddr;
-
-/*
- * A MetricomAddressString is used to hold a printable representation of
- * a Metricom address.
- */
-
-typedef struct {
- __u8 c[24];
-} MetricomAddressString;
-
-/* Encapsulation can expand packet of size x to 65/64x + 1
- * Sent packet looks like "<CR>*<address>*<key><encaps payload><CR>"
- * 1 1 1-18 1 4 ? 1
- * eg. <CR>*0000-1234*SIP0<encaps payload><CR>
- * We allow 31 bytes for the stars, the key, the address and the <CR>s
- */
-#define STRIP_ENCAP_SIZE(X) (32 + (X)*65L/64L)
-
-/*
- * A STRIP_Header is never really sent over the radio, but making a dummy
- * header for internal use within the kernel that looks like an Ethernet
- * header makes certain other software happier. For example, tcpdump
- * already understands Ethernet headers.
- */
-
-typedef struct {
- MetricomAddress dst_addr; /* Destination address, e.g. "0000-1234" */
- MetricomAddress src_addr; /* Source address, e.g. "0000-5678" */
- unsigned short protocol; /* The protocol type, using Ethernet codes */
-} STRIP_Header;
-
-typedef struct {
- char c[60];
-} MetricomNode;
-
-#define NODE_TABLE_SIZE 32
-typedef struct {
- struct timeval timestamp;
- int num_nodes;
- MetricomNode node[NODE_TABLE_SIZE];
-} MetricomNodeTable;
-
-enum { FALSE = 0, TRUE = 1 };
-
-/*
- * Holds the radio's firmware version.
- */
-typedef struct {
- char c[50];
-} FirmwareVersion;
-
-/*
- * Holds the radio's serial number.
- */
-typedef struct {
- char c[18];
-} SerialNumber;
-
-/*
- * Holds the radio's battery voltage.
- */
-typedef struct {
- char c[11];
-} BatteryVoltage;
-
-typedef struct {
- char c[8];
-} char8;
-
-enum {
- NoStructure = 0, /* Really old firmware */
- StructuredMessages = 1, /* Parsable AT response msgs */
- ChecksummedMessages = 2 /* Parsable AT response msgs with checksums */
-};
-
-struct strip {
- int magic;
- /*
- * These are pointers to the malloc()ed frame buffers.
- */
-
- unsigned char *rx_buff; /* buffer for received IP packet */
- unsigned char *sx_buff; /* buffer for received serial data */
- int sx_count; /* received serial data counter */
- int sx_size; /* Serial buffer size */
- unsigned char *tx_buff; /* transmitter buffer */
- unsigned char *tx_head; /* pointer to next byte to XMIT */
- int tx_left; /* bytes left in XMIT queue */
- int tx_size; /* Serial buffer size */
-
- /*
- * STRIP interface statistics.
- */
-
- unsigned long rx_packets; /* inbound frames counter */
- unsigned long tx_packets; /* outbound frames counter */
- unsigned long rx_errors; /* Parity, etc. errors */
- unsigned long tx_errors; /* Planned stuff */
- unsigned long rx_dropped; /* No memory for skb */
- unsigned long tx_dropped; /* When MTU change */
- unsigned long rx_over_errors; /* Frame bigger than STRIP buf. */
-
- unsigned long pps_timer; /* Timer to determine pps */
- unsigned long rx_pps_count; /* Counter to determine pps */
- unsigned long tx_pps_count; /* Counter to determine pps */
- unsigned long sx_pps_count; /* Counter to determine pps */
- unsigned long rx_average_pps; /* rx packets per second * 8 */
- unsigned long tx_average_pps; /* tx packets per second * 8 */
- unsigned long sx_average_pps; /* sent packets per second * 8 */
-
-#ifdef EXT_COUNTERS
- unsigned long rx_bytes; /* total received bytes */
- unsigned long tx_bytes; /* total received bytes */
- unsigned long rx_rbytes; /* bytes thru radio i/f */
- unsigned long tx_rbytes; /* bytes thru radio i/f */
- unsigned long rx_sbytes; /* tot bytes thru serial i/f */
- unsigned long tx_sbytes; /* tot bytes thru serial i/f */
- unsigned long rx_ebytes; /* tot stat/err bytes */
- unsigned long tx_ebytes; /* tot stat/err bytes */
-#endif
-
- /*
- * Internal variables.
- */
-
- struct list_head list; /* Linked list of devices */
-
- int discard; /* Set if serial error */
- int working; /* Is radio working correctly? */
- int firmware_level; /* Message structuring level */
- int next_command; /* Next periodic command */
- unsigned int user_baud; /* The user-selected baud rate */
- int mtu; /* Our mtu (to spot changes!) */
- long watchdog_doprobe; /* Next time to test the radio */
- long watchdog_doreset; /* Time to do next reset */
- long gratuitous_arp; /* Time to send next ARP refresh */
- long arp_interval; /* Next ARP interval */
- struct timer_list idle_timer; /* For periodic wakeup calls */
- MetricomAddress true_dev_addr; /* True address of radio */
- int manual_dev_addr; /* Hack: See note below */
-
- FirmwareVersion firmware_version; /* The radio's firmware version */
- SerialNumber serial_number; /* The radio's serial number */
- BatteryVoltage battery_voltage; /* The radio's battery voltage */
-
- /*
- * Other useful structures.
- */
-
- struct tty_struct *tty; /* ptr to TTY structure */
- struct net_device *dev; /* Our device structure */
-
- /*
- * Neighbour radio records
- */
-
- MetricomNodeTable portables;
- MetricomNodeTable poletops;
-};
-
-/*
- * Note: manual_dev_addr hack
- *
- * It is not possible to change the hardware address of a Metricom radio,
- * or to send packets with a user-specified hardware source address, thus
- * trying to manually set a hardware source address is a questionable
- * thing to do. However, if the user *does* manually set the hardware
- * source address of a STRIP interface, then the kernel will believe it,
- * and use it in certain places. For example, the hardware address listed
- * by ifconfig will be the manual address, not the true one.
- * (Both addresses are listed in /proc/net/strip.)
- * Also, ARP packets will be sent out giving the user-specified address as
- * the source address, not the real address. This is dangerous, because
- * it means you won't receive any replies -- the ARP replies will go to
- * the specified address, which will be some other radio. The case where
- * this is useful is when that other radio is also connected to the same
- * machine. This allows you to connect a pair of radios to one machine,
- * and to use one exclusively for inbound traffic, and the other
- * exclusively for outbound traffic. Pretty neat, huh?
- *
- * Here's the full procedure to set this up:
- *
- * 1. "slattach" two interfaces, e.g. st0 for outgoing packets,
- * and st1 for incoming packets
- *
- * 2. "ifconfig" st0 (outbound radio) to have the hardware address
- * which is the real hardware address of st1 (inbound radio).
- * Now when it sends out packets, it will masquerade as st1, and
- * replies will be sent to that radio, which is exactly what we want.
- *
- * 3. Set the route table entry ("route add default ..." or
- * "route add -net ...", as appropriate) to send packets via the st0
- * interface (outbound radio). Do not add any route which sends packets
- * out via the st1 interface -- that radio is for inbound traffic only.
- *
- * 4. "ifconfig" st1 (inbound radio) to have hardware address zero.
- * This tells the STRIP driver to "shut down" that interface and not
- * send any packets through it. In particular, it stops sending the
- * periodic gratuitous ARP packets that a STRIP interface normally sends.
- * Also, when packets arrive on that interface, it will search the
- * interface list to see if there is another interface who's manual
- * hardware address matches its own real address (i.e. st0 in this
- * example) and if so it will transfer ownership of the skbuff to
- * that interface, so that it looks to the kernel as if the packet
- * arrived on that interface. This is necessary because when the
- * kernel sends an ARP packet on st0, it expects to get a reply on
- * st0, and if it sees the reply come from st1 then it will ignore
- * it (to be accurate, it puts the entry in the ARP table, but
- * labelled in such a way that st0 can't use it).
- *
- * Thanks to Petros Maniatis for coming up with the idea of splitting
- * inbound and outbound traffic between two interfaces, which turned
- * out to be really easy to implement, even if it is a bit of a hack.
- *
- * Having set a manual address on an interface, you can restore it
- * to automatic operation (where the address is automatically kept
- * consistent with the real address of the radio) by setting a manual
- * address of all ones, e.g. "ifconfig st0 hw strip FFFFFFFFFFFF"
- * This 'turns off' manual override mode for the device address.
- *
- * Note: The IEEE 802 headers reported in tcpdump will show the *real*
- * radio addresses the packets were sent and received from, so that you
- * can see what is really going on with packets, and which interfaces
- * they are really going through.
- */
-
-
-/************************************************************************/
-/* Constants */
-
-/*
- * CommandString1 works on all radios
- * Other CommandStrings are only used with firmware that provides structured responses.
- *
- * ats319=1 Enables Info message for node additions and deletions
- * ats319=2 Enables Info message for a new best node
- * ats319=4 Enables checksums
- * ats319=8 Enables ACK messages
- */
-
-static const int MaxCommandStringLength = 32;
-static const int CompatibilityCommand = 1;
-
-static const char CommandString0[] = "*&COMMAND*ATS319=7"; /* Turn on checksums & info messages */
-static const char CommandString1[] = "*&COMMAND*ATS305?"; /* Query radio name */
-static const char CommandString2[] = "*&COMMAND*ATS325?"; /* Query battery voltage */
-static const char CommandString3[] = "*&COMMAND*ATS300?"; /* Query version information */
-static const char CommandString4[] = "*&COMMAND*ATS311?"; /* Query poletop list */
-static const char CommandString5[] = "*&COMMAND*AT~LA"; /* Query portables list */
-typedef struct {
- const char *string;
- long length;
-} StringDescriptor;
-
-static const StringDescriptor CommandString[] = {
- {CommandString0, sizeof(CommandString0) - 1},
- {CommandString1, sizeof(CommandString1) - 1},
- {CommandString2, sizeof(CommandString2) - 1},
- {CommandString3, sizeof(CommandString3) - 1},
- {CommandString4, sizeof(CommandString4) - 1},
- {CommandString5, sizeof(CommandString5) - 1}
-};
-
-#define GOT_ALL_RADIO_INFO(S) \
- ((S)->firmware_version.c[0] && \
- (S)->battery_voltage.c[0] && \
- memcmp(&(S)->true_dev_addr, zero_address.c, sizeof(zero_address)))
-
-static const char hextable[16] = "0123456789ABCDEF";
-
-static const MetricomAddress zero_address;
-static const MetricomAddress broadcast_address =
- { {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF} };
-
-static const MetricomKey SIP0Key = { "SIP0" };
-static const MetricomKey ARP0Key = { "ARP0" };
-static const MetricomKey ATR_Key = { "ATR " };
-static const MetricomKey ACK_Key = { "ACK_" };
-static const MetricomKey INF_Key = { "INF_" };
-static const MetricomKey ERR_Key = { "ERR_" };
-
-static const long MaxARPInterval = 60 * HZ; /* One minute */
-
-/*
- * Maximum Starmode packet length is 1183 bytes. Allowing 4 bytes for
- * protocol key, 4 bytes for checksum, one byte for CR, and 65/64 expansion
- * for STRIP encoding, that translates to a maximum payload MTU of 1155.
- * Note: A standard NFS 1K data packet is a total of 0x480 (1152) bytes
- * long, including IP header, UDP header, and NFS header. Setting the STRIP
- * MTU to 1152 allows us to send default sized NFS packets without fragmentation.
- */
-static const unsigned short MAX_SEND_MTU = 1152;
-static const unsigned short MAX_RECV_MTU = 1500; /* Hoping for Ethernet sized packets in the future! */
-static const unsigned short DEFAULT_STRIP_MTU = 1152;
-static const int STRIP_MAGIC = 0x5303;
-static const long LongTime = 0x7FFFFFFF;
-
-/************************************************************************/
-/* Global variables */
-
-static LIST_HEAD(strip_list);
-static DEFINE_SPINLOCK(strip_lock);
-
-/************************************************************************/
-/* Macros */
-
-/* Returns TRUE if text T begins with prefix P */
-#define has_prefix(T,L,P) (((L) >= sizeof(P)-1) && !strncmp((T), (P), sizeof(P)-1))
-
-/* Returns TRUE if text T of length L is equal to string S */
-#define text_equal(T,L,S) (((L) == sizeof(S)-1) && !strncmp((T), (S), sizeof(S)-1))
-
-#define READHEX(X) ((X)>='0' && (X)<='9' ? (X)-'0' : \
- (X)>='a' && (X)<='f' ? (X)-'a'+10 : \
- (X)>='A' && (X)<='F' ? (X)-'A'+10 : 0 )
-
-#define READHEX16(X) ((__u16)(READHEX(X)))
-
-#define READDEC(X) ((X)>='0' && (X)<='9' ? (X)-'0' : 0)
-
-#define ARRAY_END(X) (&((X)[ARRAY_SIZE(X)]))
-
-#define JIFFIE_TO_SEC(X) ((X) / HZ)
-
-
-/************************************************************************/
-/* Utility routines */
-
-static int arp_query(unsigned char *haddr, u32 paddr,
- struct net_device *dev)
-{
- struct neighbour *neighbor_entry;
- int ret = 0;
-
- neighbor_entry = neigh_lookup(&arp_tbl, &paddr, dev);
-
- if (neighbor_entry != NULL) {
- neighbor_entry->used = jiffies;
- if (neighbor_entry->nud_state & NUD_VALID) {
- memcpy(haddr, neighbor_entry->ha, dev->addr_len);
- ret = 1;
- }
- neigh_release(neighbor_entry);
- }
- return ret;
-}
-
-static void DumpData(char *msg, struct strip *strip_info, __u8 * ptr,
- __u8 * end)
-{
- static const int MAX_DumpData = 80;
- __u8 pkt_text[MAX_DumpData], *p = pkt_text;
-
- *p++ = '\"';
-
- while (ptr < end && p < &pkt_text[MAX_DumpData - 4]) {
- if (*ptr == '\\') {
- *p++ = '\\';
- *p++ = '\\';
- } else {
- if (*ptr >= 32 && *ptr <= 126) {
- *p++ = *ptr;
- } else {
- sprintf(p, "\\%02X", *ptr);
- p += 3;
- }
- }
- ptr++;
- }
-
- if (ptr == end)
- *p++ = '\"';
- *p++ = 0;
-
- printk(KERN_INFO "%s: %-13s%s\n", strip_info->dev->name, msg, pkt_text);
-}
-
-
-/************************************************************************/
-/* Byte stuffing/unstuffing routines */
-
-/* Stuffing scheme:
- * 00 Unused (reserved character)
- * 01-3F Run of 2-64 different characters
- * 40-7F Run of 1-64 different characters plus a single zero at the end
- * 80-BF Run of 1-64 of the same character
- * C0-FF Run of 1-64 zeroes (ASCII 0)
- */
-
-typedef enum {
- Stuff_Diff = 0x00,
- Stuff_DiffZero = 0x40,
- Stuff_Same = 0x80,
- Stuff_Zero = 0xC0,
- Stuff_NoCode = 0xFF, /* Special code, meaning no code selected */
-
- Stuff_CodeMask = 0xC0,
- Stuff_CountMask = 0x3F,
- Stuff_MaxCount = 0x3F,
- Stuff_Magic = 0x0D /* The value we are eliminating */
-} StuffingCode;
-
-/* StuffData encodes the data starting at "src" for "length" bytes.
- * It writes it to the buffer pointed to by "dst" (which must be at least
- * as long as 1 + 65/64 of the input length). The output may be up to 1.6%
- * larger than the input for pathological input, but will usually be smaller.
- * StuffData returns the new value of the dst pointer as its result.
- * "code_ptr_ptr" points to a "__u8 *" which is used to hold encoding state
- * between calls, allowing an encoded packet to be incrementally built up
- * from small parts. On the first call, the "__u8 *" pointed to should be
- * initialized to NULL; between subsequent calls the calling routine should
- * leave the value alone and simply pass it back unchanged so that the
- * encoder can recover its current state.
- */
-
-#define StuffData_FinishBlock(X) \
-(*code_ptr = (X) ^ Stuff_Magic, code = Stuff_NoCode)
-
-static __u8 *StuffData(__u8 * src, __u32 length, __u8 * dst,
- __u8 ** code_ptr_ptr)
-{
- __u8 *end = src + length;
- __u8 *code_ptr = *code_ptr_ptr;
- __u8 code = Stuff_NoCode, count = 0;
-
- if (!length)
- return (dst);
-
- if (code_ptr) {
- /*
- * Recover state from last call, if applicable
- */
- code = (*code_ptr ^ Stuff_Magic) & Stuff_CodeMask;
- count = (*code_ptr ^ Stuff_Magic) & Stuff_CountMask;
- }
-
- while (src < end) {
- switch (code) {
- /* Stuff_NoCode: If no current code, select one */
- case Stuff_NoCode:
- /* Record where we're going to put this code */
- code_ptr = dst++;
- count = 0; /* Reset the count (zero means one instance) */
- /* Tentatively start a new block */
- if (*src == 0) {
- code = Stuff_Zero;
- src++;
- } else {
- code = Stuff_Same;
- *dst++ = *src++ ^ Stuff_Magic;
- }
- /* Note: We optimistically assume run of same -- */
- /* which will be fixed later in Stuff_Same */
- /* if it turns out not to be true. */
- break;
-
- /* Stuff_Zero: We already have at least one zero encoded */
- case Stuff_Zero:
- /* If another zero, count it, else finish this code block */
- if (*src == 0) {
- count++;
- src++;
- } else {
- StuffData_FinishBlock(Stuff_Zero + count);
- }
- break;
-
- /* Stuff_Same: We already have at least one byte encoded */
- case Stuff_Same:
- /* If another one the same, count it */
- if ((*src ^ Stuff_Magic) == code_ptr[1]) {
- count++;
- src++;
- break;
- }
- /* else, this byte does not match this block. */
- /* If we already have two or more bytes encoded, finish this code block */
- if (count) {
- StuffData_FinishBlock(Stuff_Same + count);
- break;
- }
- /* else, we only have one so far, so switch to Stuff_Diff code */
- code = Stuff_Diff;
- /* and fall through to Stuff_Diff case below
- * Note cunning cleverness here: case Stuff_Diff compares
- * the current character with the previous two to see if it
- * has a run of three the same. Won't this be an error if
- * there aren't two previous characters stored to compare with?
- * No. Because we know the current character is *not* the same
- * as the previous one, the first test below will necessarily
- * fail and the send half of the "if" won't be executed.
- */
-
- /* Stuff_Diff: We have at least two *different* bytes encoded */
- case Stuff_Diff:
- /* If this is a zero, must encode a Stuff_DiffZero, and begin a new block */
- if (*src == 0) {
- StuffData_FinishBlock(Stuff_DiffZero +
- count);
- }
- /* else, if we have three in a row, it is worth starting a Stuff_Same block */
- else if ((*src ^ Stuff_Magic) == dst[-1]
- && dst[-1] == dst[-2]) {
- /* Back off the last two characters we encoded */
- code += count - 2;
- /* Note: "Stuff_Diff + 0" is an illegal code */
- if (code == Stuff_Diff + 0) {
- code = Stuff_Same + 0;
- }
- StuffData_FinishBlock(code);
- code_ptr = dst - 2;
- /* dst[-1] already holds the correct value */
- count = 2; /* 2 means three bytes encoded */
- code = Stuff_Same;
- }
- /* else, another different byte, so add it to the block */
- else {
- *dst++ = *src ^ Stuff_Magic;
- count++;
- }
- src++; /* Consume the byte */
- break;
- }
- if (count == Stuff_MaxCount) {
- StuffData_FinishBlock(code + count);
- }
- }
- if (code == Stuff_NoCode) {
- *code_ptr_ptr = NULL;
- } else {
- *code_ptr_ptr = code_ptr;
- StuffData_FinishBlock(code + count);
- }
- return (dst);
-}
-
-/*
- * UnStuffData decodes the data at "src", up to (but not including) "end".
- * It writes the decoded data into the buffer pointed to by "dst", up to a
- * maximum of "dst_length", and returns the new value of "src" so that a
- * follow-on call can read more data, continuing from where the first left off.
- *
- * There are three types of results:
- * 1. The source data runs out before extracting "dst_length" bytes:
- * UnStuffData returns NULL to indicate failure.
- * 2. The source data produces exactly "dst_length" bytes:
- * UnStuffData returns new_src = end to indicate that all bytes were consumed.
- * 3. "dst_length" bytes are extracted, with more remaining.
- * UnStuffData returns new_src < end to indicate that there are more bytes
- * to be read.
- *
- * Note: The decoding may be destructive, in that it may alter the source
- * data in the process of decoding it (this is necessary to allow a follow-on
- * call to resume correctly).
- */
-
-static __u8 *UnStuffData(__u8 * src, __u8 * end, __u8 * dst,
- __u32 dst_length)
-{
- __u8 *dst_end = dst + dst_length;
- /* Sanity check */
- if (!src || !end || !dst || !dst_length)
- return (NULL);
- while (src < end && dst < dst_end) {
- int count = (*src ^ Stuff_Magic) & Stuff_CountMask;
- switch ((*src ^ Stuff_Magic) & Stuff_CodeMask) {
- case Stuff_Diff:
- if (src + 1 + count >= end)
- return (NULL);
- do {
- *dst++ = *++src ^ Stuff_Magic;
- }
- while (--count >= 0 && dst < dst_end);
- if (count < 0)
- src += 1;
- else {
- if (count == 0)
- *src = Stuff_Same ^ Stuff_Magic;
- else
- *src =
- (Stuff_Diff +
- count) ^ Stuff_Magic;
- }
- break;
- case Stuff_DiffZero:
- if (src + 1 + count >= end)
- return (NULL);
- do {
- *dst++ = *++src ^ Stuff_Magic;
- }
- while (--count >= 0 && dst < dst_end);
- if (count < 0)
- *src = Stuff_Zero ^ Stuff_Magic;
- else
- *src =
- (Stuff_DiffZero + count) ^ Stuff_Magic;
- break;
- case Stuff_Same:
- if (src + 1 >= end)
- return (NULL);
- do {
- *dst++ = src[1] ^ Stuff_Magic;
- }
- while (--count >= 0 && dst < dst_end);
- if (count < 0)
- src += 2;
- else
- *src = (Stuff_Same + count) ^ Stuff_Magic;
- break;
- case Stuff_Zero:
- do {
- *dst++ = 0;
- }
- while (--count >= 0 && dst < dst_end);
- if (count < 0)
- src += 1;
- else
- *src = (Stuff_Zero + count) ^ Stuff_Magic;
- break;
- }
- }
- if (dst < dst_end)
- return (NULL);
- else
- return (src);
-}
-
-
-/************************************************************************/
-/* General routines for STRIP */
-
-/*
- * set_baud sets the baud rate to the rate defined by baudcode
- */
-static void set_baud(struct tty_struct *tty, speed_t baudrate)
-{
- struct ktermios old_termios;
-
- mutex_lock(&tty->termios_mutex);
- old_termios =*(tty->termios);
- tty_encode_baud_rate(tty, baudrate, baudrate);
- tty->ops->set_termios(tty, &old_termios);
- mutex_unlock(&tty->termios_mutex);
-}
-
-/*
- * Convert a string to a Metricom Address.
- */
-
-#define IS_RADIO_ADDRESS(p) ( \
- isdigit((p)[0]) && isdigit((p)[1]) && isdigit((p)[2]) && isdigit((p)[3]) && \
- (p)[4] == '-' && \
- isdigit((p)[5]) && isdigit((p)[6]) && isdigit((p)[7]) && isdigit((p)[8]) )
-
-static int string_to_radio_address(MetricomAddress * addr, __u8 * p)
-{
- if (!IS_RADIO_ADDRESS(p))
- return (1);
- addr->c[0] = 0;
- addr->c[1] = 0;
- addr->c[2] = READHEX(p[0]) << 4 | READHEX(p[1]);
- addr->c[3] = READHEX(p[2]) << 4 | READHEX(p[3]);
- addr->c[4] = READHEX(p[5]) << 4 | READHEX(p[6]);
- addr->c[5] = READHEX(p[7]) << 4 | READHEX(p[8]);
- return (0);
-}
-
-/*
- * Convert a Metricom Address to a string.
- */
-
-static __u8 *radio_address_to_string(const MetricomAddress * addr,
- MetricomAddressString * p)
-{
- sprintf(p->c, "%02X%02X-%02X%02X", addr->c[2], addr->c[3],
- addr->c[4], addr->c[5]);
- return (p->c);
-}
-
-/*
- * Note: Must make sure sx_size is big enough to receive a stuffed
- * MAX_RECV_MTU packet. Additionally, we also want to ensure that it's
- * big enough to receive a large radio neighbour list (currently 4K).
- */
-
-static int allocate_buffers(struct strip *strip_info, int mtu)
-{
- struct net_device *dev = strip_info->dev;
- int sx_size = max_t(int, STRIP_ENCAP_SIZE(MAX_RECV_MTU), 4096);
- int tx_size = STRIP_ENCAP_SIZE(mtu) + MaxCommandStringLength;
- __u8 *r = kmalloc(MAX_RECV_MTU, GFP_ATOMIC);
- __u8 *s = kmalloc(sx_size, GFP_ATOMIC);
- __u8 *t = kmalloc(tx_size, GFP_ATOMIC);
- if (r && s && t) {
- strip_info->rx_buff = r;
- strip_info->sx_buff = s;
- strip_info->tx_buff = t;
- strip_info->sx_size = sx_size;
- strip_info->tx_size = tx_size;
- strip_info->mtu = dev->mtu = mtu;
- return (1);
- }
- kfree(r);
- kfree(s);
- kfree(t);
- return (0);
-}
-
-/*
- * MTU has been changed by the IP layer.
- * We could be in
- * an upcall from the tty driver, or in an ip packet queue.
- */
-static int strip_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct strip *strip_info = netdev_priv(dev);
- int old_mtu = strip_info->mtu;
- unsigned char *orbuff = strip_info->rx_buff;
- unsigned char *osbuff = strip_info->sx_buff;
- unsigned char *otbuff = strip_info->tx_buff;
-
- if (new_mtu > MAX_SEND_MTU) {
- printk(KERN_ERR
- "%s: MTU exceeds maximum allowable (%d), MTU change cancelled.\n",
- strip_info->dev->name, MAX_SEND_MTU);
- return -EINVAL;
- }
-
- spin_lock_bh(&strip_lock);
- if (!allocate_buffers(strip_info, new_mtu)) {
- printk(KERN_ERR "%s: unable to grow strip buffers, MTU change cancelled.\n",
- strip_info->dev->name);
- spin_unlock_bh(&strip_lock);
- return -ENOMEM;
- }
-
- if (strip_info->sx_count) {
- if (strip_info->sx_count <= strip_info->sx_size)
- memcpy(strip_info->sx_buff, osbuff,
- strip_info->sx_count);
- else {
- strip_info->discard = strip_info->sx_count;
- strip_info->rx_over_errors++;
- }
- }
-
- if (strip_info->tx_left) {
- if (strip_info->tx_left <= strip_info->tx_size)
- memcpy(strip_info->tx_buff, strip_info->tx_head,
- strip_info->tx_left);
- else {
- strip_info->tx_left = 0;
- strip_info->tx_dropped++;
- }
- }
- strip_info->tx_head = strip_info->tx_buff;
- spin_unlock_bh(&strip_lock);
-
- printk(KERN_NOTICE "%s: strip MTU changed fom %d to %d.\n",
- strip_info->dev->name, old_mtu, strip_info->mtu);
-
- kfree(orbuff);
- kfree(osbuff);
- kfree(otbuff);
- return 0;
-}
-
-static void strip_unlock(struct strip *strip_info)
-{
- /*
- * Set the timer to go off in one second.
- */
- strip_info->idle_timer.expires = jiffies + 1 * HZ;
- add_timer(&strip_info->idle_timer);
- netif_wake_queue(strip_info->dev);
-}
-
-
-
-/*
- * If the time is in the near future, time_delta prints the number of
- * seconds to go into the buffer and returns the address of the buffer.
- * If the time is not in the near future, it returns the address of the
- * string "Not scheduled" The buffer must be long enough to contain the
- * ascii representation of the number plus 9 charactes for the " seconds"
- * and the null character.
- */
-#ifdef CONFIG_PROC_FS
-static char *time_delta(char buffer[], long time)
-{
- time -= jiffies;
- if (time > LongTime / 2)
- return ("Not scheduled");
- if (time < 0)
- time = 0; /* Don't print negative times */
- sprintf(buffer, "%ld seconds", time / HZ);
- return (buffer);
-}
-
-/* get Nth element of the linked list */
-static struct strip *strip_get_idx(loff_t pos)
-{
- struct strip *str;
- int i = 0;
-
- list_for_each_entry_rcu(str, &strip_list, list) {
- if (pos == i)
- return str;
- ++i;
- }
- return NULL;
-}
-
-static void *strip_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(RCU)
-{
- rcu_read_lock();
- return *pos ? strip_get_idx(*pos - 1) : SEQ_START_TOKEN;
-}
-
-static void *strip_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- struct list_head *l;
- struct strip *s;
-
- ++*pos;
- if (v == SEQ_START_TOKEN)
- return strip_get_idx(1);
-
- s = v;
- l = &s->list;
- list_for_each_continue_rcu(l, &strip_list) {
- return list_entry(l, struct strip, list);
- }
- return NULL;
-}
-
-static void strip_seq_stop(struct seq_file *seq, void *v)
- __releases(RCU)
-{
- rcu_read_unlock();
-}
-
-static void strip_seq_neighbours(struct seq_file *seq,
- const MetricomNodeTable * table,
- const char *title)
-{
- /* We wrap this in a do/while loop, so if the table changes */
- /* while we're reading it, we just go around and try again. */
- struct timeval t;
-
- do {
- int i;
- t = table->timestamp;
- if (table->num_nodes)
- seq_printf(seq, "\n %s\n", title);
- for (i = 0; i < table->num_nodes; i++) {
- MetricomNode node;
-
- spin_lock_bh(&strip_lock);
- node = table->node[i];
- spin_unlock_bh(&strip_lock);
- seq_printf(seq, " %s\n", node.c);
- }
- } while (table->timestamp.tv_sec != t.tv_sec
- || table->timestamp.tv_usec != t.tv_usec);
-}
-
-/*
- * This function prints radio status information via the seq_file
- * interface. The interface takes care of buffer size and over
- * run issues.
- *
- * The buffer in seq_file is PAGESIZE (4K)
- * so this routine should never print more or it will get truncated.
- * With the maximum of 32 portables and 32 poletops
- * reported, the routine outputs 3107 bytes into the buffer.
- */
-static void strip_seq_status_info(struct seq_file *seq,
- const struct strip *strip_info)
-{
- char temp[32];
- MetricomAddressString addr_string;
-
- /* First, we must copy all of our data to a safe place, */
- /* in case a serial interrupt comes in and changes it. */
- int tx_left = strip_info->tx_left;
- unsigned long rx_average_pps = strip_info->rx_average_pps;
- unsigned long tx_average_pps = strip_info->tx_average_pps;
- unsigned long sx_average_pps = strip_info->sx_average_pps;
- int working = strip_info->working;
- int firmware_level = strip_info->firmware_level;
- long watchdog_doprobe = strip_info->watchdog_doprobe;
- long watchdog_doreset = strip_info->watchdog_doreset;
- long gratuitous_arp = strip_info->gratuitous_arp;
- long arp_interval = strip_info->arp_interval;
- FirmwareVersion firmware_version = strip_info->firmware_version;
- SerialNumber serial_number = strip_info->serial_number;
- BatteryVoltage battery_voltage = strip_info->battery_voltage;
- char *if_name = strip_info->dev->name;
- MetricomAddress true_dev_addr = strip_info->true_dev_addr;
- MetricomAddress dev_dev_addr =
- *(MetricomAddress *) strip_info->dev->dev_addr;
- int manual_dev_addr = strip_info->manual_dev_addr;
-#ifdef EXT_COUNTERS
- unsigned long rx_bytes = strip_info->rx_bytes;
- unsigned long tx_bytes = strip_info->tx_bytes;
- unsigned long rx_rbytes = strip_info->rx_rbytes;
- unsigned long tx_rbytes = strip_info->tx_rbytes;
- unsigned long rx_sbytes = strip_info->rx_sbytes;
- unsigned long tx_sbytes = strip_info->tx_sbytes;
- unsigned long rx_ebytes = strip_info->rx_ebytes;
- unsigned long tx_ebytes = strip_info->tx_ebytes;
-#endif
-
- seq_printf(seq, "\nInterface name\t\t%s\n", if_name);
- seq_printf(seq, " Radio working:\t\t%s\n", working ? "Yes" : "No");
- radio_address_to_string(&true_dev_addr, &addr_string);
- seq_printf(seq, " Radio address:\t\t%s\n", addr_string.c);
- if (manual_dev_addr) {
- radio_address_to_string(&dev_dev_addr, &addr_string);
- seq_printf(seq, " Device address:\t%s\n", addr_string.c);
- }
- seq_printf(seq, " Firmware version:\t%s", !working ? "Unknown" :
- !firmware_level ? "Should be upgraded" :
- firmware_version.c);
- if (firmware_level >= ChecksummedMessages)
- seq_printf(seq, " (Checksums Enabled)");
- seq_printf(seq, "\n");
- seq_printf(seq, " Serial number:\t\t%s\n", serial_number.c);
- seq_printf(seq, " Battery voltage:\t%s\n", battery_voltage.c);
- seq_printf(seq, " Transmit queue (bytes):%d\n", tx_left);
- seq_printf(seq, " Receive packet rate: %ld packets per second\n",
- rx_average_pps / 8);
- seq_printf(seq, " Transmit packet rate: %ld packets per second\n",
- tx_average_pps / 8);
- seq_printf(seq, " Sent packet rate: %ld packets per second\n",
- sx_average_pps / 8);
- seq_printf(seq, " Next watchdog probe:\t%s\n",
- time_delta(temp, watchdog_doprobe));
- seq_printf(seq, " Next watchdog reset:\t%s\n",
- time_delta(temp, watchdog_doreset));
- seq_printf(seq, " Next gratuitous ARP:\t");
-
- if (!memcmp
- (strip_info->dev->dev_addr, zero_address.c,
- sizeof(zero_address)))
- seq_printf(seq, "Disabled\n");
- else {
- seq_printf(seq, "%s\n", time_delta(temp, gratuitous_arp));
- seq_printf(seq, " Next ARP interval:\t%ld seconds\n",
- JIFFIE_TO_SEC(arp_interval));
- }
-
- if (working) {
-#ifdef EXT_COUNTERS
- seq_printf(seq, "\n");
- seq_printf(seq,
- " Total bytes: \trx:\t%lu\ttx:\t%lu\n",
- rx_bytes, tx_bytes);
- seq_printf(seq,
- " thru radio: \trx:\t%lu\ttx:\t%lu\n",
- rx_rbytes, tx_rbytes);
- seq_printf(seq,
- " thru serial port: \trx:\t%lu\ttx:\t%lu\n",
- rx_sbytes, tx_sbytes);
- seq_printf(seq,
- " Total stat/err bytes:\trx:\t%lu\ttx:\t%lu\n",
- rx_ebytes, tx_ebytes);
-#endif
- strip_seq_neighbours(seq, &strip_info->poletops,
- "Poletops:");
- strip_seq_neighbours(seq, &strip_info->portables,
- "Portables:");
- }
-}
-
-/*
- * This function is exports status information from the STRIP driver through
- * the /proc file system.
- */
-static int strip_seq_show(struct seq_file *seq, void *v)
-{
- if (v == SEQ_START_TOKEN)
- seq_printf(seq, "strip_version: %s\n", StripVersion);
- else
- strip_seq_status_info(seq, (const struct strip *)v);
- return 0;
-}
-
-
-static const struct seq_operations strip_seq_ops = {
- .start = strip_seq_start,
- .next = strip_seq_next,
- .stop = strip_seq_stop,
- .show = strip_seq_show,
-};
-
-static int strip_seq_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &strip_seq_ops);
-}
-
-static const struct file_operations strip_seq_fops = {
- .owner = THIS_MODULE,
- .open = strip_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-#endif
-
-
-
-/************************************************************************/
-/* Sending routines */
-
-static void ResetRadio(struct strip *strip_info)
-{
- struct tty_struct *tty = strip_info->tty;
- static const char init[] = "ate0q1dt**starmode\r**";
- StringDescriptor s = { init, sizeof(init) - 1 };
-
- /*
- * If the radio isn't working anymore,
- * we should clear the old status information.
- */
- if (strip_info->working) {
- printk(KERN_INFO "%s: No response: Resetting radio.\n",
- strip_info->dev->name);
- strip_info->firmware_version.c[0] = '\0';
- strip_info->serial_number.c[0] = '\0';
- strip_info->battery_voltage.c[0] = '\0';
- strip_info->portables.num_nodes = 0;
- do_gettimeofday(&strip_info->portables.timestamp);
- strip_info->poletops.num_nodes = 0;
- do_gettimeofday(&strip_info->poletops.timestamp);
- }
-
- strip_info->pps_timer = jiffies;
- strip_info->rx_pps_count = 0;
- strip_info->tx_pps_count = 0;
- strip_info->sx_pps_count = 0;
- strip_info->rx_average_pps = 0;
- strip_info->tx_average_pps = 0;
- strip_info->sx_average_pps = 0;
-
- /* Mark radio address as unknown */
- *(MetricomAddress *) & strip_info->true_dev_addr = zero_address;
- if (!strip_info->manual_dev_addr)
- *(MetricomAddress *) strip_info->dev->dev_addr =
- zero_address;
- strip_info->working = FALSE;
- strip_info->firmware_level = NoStructure;
- strip_info->next_command = CompatibilityCommand;
- strip_info->watchdog_doprobe = jiffies + 10 * HZ;
- strip_info->watchdog_doreset = jiffies + 1 * HZ;
-
- /* If the user has selected a baud rate above 38.4 see what magic we have to do */
- if (strip_info->user_baud > 38400) {
- /*
- * Subtle stuff: Pay attention :-)
- * If the serial port is currently at the user's selected (>38.4) rate,
- * then we temporarily switch to 19.2 and issue the ATS304 command
- * to tell the radio to switch to the user's selected rate.
- * If the serial port is not currently at that rate, that means we just
- * issued the ATS304 command last time through, so this time we restore
- * the user's selected rate and issue the normal starmode reset string.
- */
- if (strip_info->user_baud == tty_get_baud_rate(tty)) {
- static const char b0[] = "ate0q1s304=57600\r";
- static const char b1[] = "ate0q1s304=115200\r";
- static const StringDescriptor baudstring[2] =
- { {b0, sizeof(b0) - 1}
- , {b1, sizeof(b1) - 1}
- };
- set_baud(tty, 19200);
- if (strip_info->user_baud == 57600)
- s = baudstring[0];
- else if (strip_info->user_baud == 115200)
- s = baudstring[1];
- else
- s = baudstring[1]; /* For now */
- } else
- set_baud(tty, strip_info->user_baud);
- }
-
- tty->ops->write(tty, s.string, s.length);
-#ifdef EXT_COUNTERS
- strip_info->tx_ebytes += s.length;
-#endif
-}
-
-/*
- * Called by the driver when there's room for more data. If we have
- * more packets to send, we send them here.
- */
-
-static void strip_write_some_more(struct tty_struct *tty)
-{
- struct strip *strip_info = tty->disc_data;
-
- /* First make sure we're connected. */
- if (!strip_info || strip_info->magic != STRIP_MAGIC ||
- !netif_running(strip_info->dev))
- return;
-
- if (strip_info->tx_left > 0) {
- int num_written =
- tty->ops->write(tty, strip_info->tx_head,
- strip_info->tx_left);
- strip_info->tx_left -= num_written;
- strip_info->tx_head += num_written;
-#ifdef EXT_COUNTERS
- strip_info->tx_sbytes += num_written;
-#endif
- } else { /* Else start transmission of another packet */
-
- clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- strip_unlock(strip_info);
- }
-}
-
-static __u8 *add_checksum(__u8 * buffer, __u8 * end)
-{
- __u16 sum = 0;
- __u8 *p = buffer;
- while (p < end)
- sum += *p++;
- end[3] = hextable[sum & 0xF];
- sum >>= 4;
- end[2] = hextable[sum & 0xF];
- sum >>= 4;
- end[1] = hextable[sum & 0xF];
- sum >>= 4;
- end[0] = hextable[sum & 0xF];
- return (end + 4);
-}
-
-static unsigned char *strip_make_packet(unsigned char *buffer,
- struct strip *strip_info,
- struct sk_buff *skb)
-{
- __u8 *ptr = buffer;
- __u8 *stuffstate = NULL;
- STRIP_Header *header = (STRIP_Header *) skb->data;
- MetricomAddress haddr = header->dst_addr;
- int len = skb->len - sizeof(STRIP_Header);
- MetricomKey key;
-
- /*HexDump("strip_make_packet", strip_info, skb->data, skb->data + skb->len); */
-
- if (header->protocol == htons(ETH_P_IP))
- key = SIP0Key;
- else if (header->protocol == htons(ETH_P_ARP))
- key = ARP0Key;
- else {
- printk(KERN_ERR
- "%s: strip_make_packet: Unknown packet type 0x%04X\n",
- strip_info->dev->name, ntohs(header->protocol));
- return (NULL);
- }
-
- if (len > strip_info->mtu) {
- printk(KERN_ERR
- "%s: Dropping oversized transmit packet: %d bytes\n",
- strip_info->dev->name, len);
- return (NULL);
- }
-
- /*
- * If we're sending to ourselves, discard the packet.
- * (Metricom radios choke if they try to send a packet to their own address.)
- */
- if (!memcmp(haddr.c, strip_info->true_dev_addr.c, sizeof(haddr))) {
- printk(KERN_ERR "%s: Dropping packet addressed to self\n",
- strip_info->dev->name);
- return (NULL);
- }
-
- /*
- * If this is a broadcast packet, send it to our designated Metricom
- * 'broadcast hub' radio (First byte of address being 0xFF means broadcast)
- */
- if (haddr.c[0] == 0xFF) {
- __be32 brd = 0;
- struct in_device *in_dev;
-
- rcu_read_lock();
- in_dev = __in_dev_get_rcu(strip_info->dev);
- if (in_dev == NULL) {
- rcu_read_unlock();
- return NULL;
- }
- if (in_dev->ifa_list)
- brd = in_dev->ifa_list->ifa_broadcast;
- rcu_read_unlock();
-
- /* arp_query returns 1 if it succeeds in looking up the address, 0 if it fails */
- if (!arp_query(haddr.c, brd, strip_info->dev)) {
- printk(KERN_ERR
- "%s: Unable to send packet (no broadcast hub configured)\n",
- strip_info->dev->name);
- return (NULL);
- }
- /*
- * If we are the broadcast hub, don't bother sending to ourselves.
- * (Metricom radios choke if they try to send a packet to their own address.)
- */
- if (!memcmp
- (haddr.c, strip_info->true_dev_addr.c, sizeof(haddr)))
- return (NULL);
- }
-
- *ptr++ = 0x0D;
- *ptr++ = '*';
- *ptr++ = hextable[haddr.c[2] >> 4];
- *ptr++ = hextable[haddr.c[2] & 0xF];
- *ptr++ = hextable[haddr.c[3] >> 4];
- *ptr++ = hextable[haddr.c[3] & 0xF];
- *ptr++ = '-';
- *ptr++ = hextable[haddr.c[4] >> 4];
- *ptr++ = hextable[haddr.c[4] & 0xF];
- *ptr++ = hextable[haddr.c[5] >> 4];
- *ptr++ = hextable[haddr.c[5] & 0xF];
- *ptr++ = '*';
- *ptr++ = key.c[0];
- *ptr++ = key.c[1];
- *ptr++ = key.c[2];
- *ptr++ = key.c[3];
-
- ptr =
- StuffData(skb->data + sizeof(STRIP_Header), len, ptr,
- &stuffstate);
-
- if (strip_info->firmware_level >= ChecksummedMessages)
- ptr = add_checksum(buffer + 1, ptr);
-
- *ptr++ = 0x0D;
- return (ptr);
-}
-
-static void strip_send(struct strip *strip_info, struct sk_buff *skb)
-{
- MetricomAddress haddr;
- unsigned char *ptr = strip_info->tx_buff;
- int doreset = (long) jiffies - strip_info->watchdog_doreset >= 0;
- int doprobe = (long) jiffies - strip_info->watchdog_doprobe >= 0
- && !doreset;
- __be32 addr, brd;
-
- /*
- * 1. If we have a packet, encapsulate it and put it in the buffer
- */
- if (skb) {
- char *newptr = strip_make_packet(ptr, strip_info, skb);
- strip_info->tx_pps_count++;
- if (!newptr)
- strip_info->tx_dropped++;
- else {
- ptr = newptr;
- strip_info->sx_pps_count++;
- strip_info->tx_packets++; /* Count another successful packet */
-#ifdef EXT_COUNTERS
- strip_info->tx_bytes += skb->len;
- strip_info->tx_rbytes += ptr - strip_info->tx_buff;
-#endif
- /*DumpData("Sending:", strip_info, strip_info->tx_buff, ptr); */
- /*HexDump("Sending", strip_info, strip_info->tx_buff, ptr); */
- }
- }
-
- /*
- * 2. If it is time for another tickle, tack it on, after the packet
- */
- if (doprobe) {
- StringDescriptor ts = CommandString[strip_info->next_command];
-#if TICKLE_TIMERS
- {
- struct timeval tv;
- do_gettimeofday(&tv);
- printk(KERN_INFO "**** Sending tickle string %d at %02d.%06d\n",
- strip_info->next_command, tv.tv_sec % 100,
- tv.tv_usec);
- }
-#endif
- if (ptr == strip_info->tx_buff)
- *ptr++ = 0x0D;
-
- *ptr++ = '*'; /* First send "**" to provoke an error message */
- *ptr++ = '*';
-
- /* Then add the command */
- memcpy(ptr, ts.string, ts.length);
-
- /* Add a checksum ? */
- if (strip_info->firmware_level < ChecksummedMessages)
- ptr += ts.length;
- else
- ptr = add_checksum(ptr, ptr + ts.length);
-
- *ptr++ = 0x0D; /* Terminate the command with a <CR> */
-
- /* Cycle to next periodic command? */
- if (strip_info->firmware_level >= StructuredMessages)
- if (++strip_info->next_command >=
- ARRAY_SIZE(CommandString))
- strip_info->next_command = 0;
-#ifdef EXT_COUNTERS
- strip_info->tx_ebytes += ts.length;
-#endif
- strip_info->watchdog_doprobe = jiffies + 10 * HZ;
- strip_info->watchdog_doreset = jiffies + 1 * HZ;
- /*printk(KERN_INFO "%s: Routine radio test.\n", strip_info->dev->name); */
- }
-
- /*
- * 3. Set up the strip_info ready to send the data (if any).
- */
- strip_info->tx_head = strip_info->tx_buff;
- strip_info->tx_left = ptr - strip_info->tx_buff;
- set_bit(TTY_DO_WRITE_WAKEUP, &strip_info->tty->flags);
- /*
- * 4. Debugging check to make sure we're not overflowing the buffer.
- */
- if (strip_info->tx_size - strip_info->tx_left < 20)
- printk(KERN_ERR "%s: Sending%5d bytes;%5d bytes free.\n",
- strip_info->dev->name, strip_info->tx_left,
- strip_info->tx_size - strip_info->tx_left);
-
- /*
- * 5. If watchdog has expired, reset the radio. Note: if there's data waiting in
- * the buffer, strip_write_some_more will send it after the reset has finished
- */
- if (doreset) {
- ResetRadio(strip_info);
- return;
- }
-
- if (1) {
- struct in_device *in_dev;
-
- brd = addr = 0;
- rcu_read_lock();
- in_dev = __in_dev_get_rcu(strip_info->dev);
- if (in_dev) {
- if (in_dev->ifa_list) {
- brd = in_dev->ifa_list->ifa_broadcast;
- addr = in_dev->ifa_list->ifa_local;
- }
- }
- rcu_read_unlock();
- }
-
-
- /*
- * 6. If it is time for a periodic ARP, queue one up to be sent.
- * We only do this if:
- * 1. The radio is working
- * 2. It's time to send another periodic ARP
- * 3. We really know what our address is (and it is not manually set to zero)
- * 4. We have a designated broadcast address configured
- * If we queue up an ARP packet when we don't have a designated broadcast
- * address configured, then the packet will just have to be discarded in
- * strip_make_packet. This is not fatal, but it causes misleading information
- * to be displayed in tcpdump. tcpdump will report that periodic APRs are
- * being sent, when in fact they are not, because they are all being dropped
- * in the strip_make_packet routine.
- */
- if (strip_info->working
- && (long) jiffies - strip_info->gratuitous_arp >= 0
- && memcmp(strip_info->dev->dev_addr, zero_address.c,
- sizeof(zero_address))
- && arp_query(haddr.c, brd, strip_info->dev)) {
- /*printk(KERN_INFO "%s: Sending gratuitous ARP with interval %ld\n",
- strip_info->dev->name, strip_info->arp_interval / HZ); */
- strip_info->gratuitous_arp =
- jiffies + strip_info->arp_interval;
- strip_info->arp_interval *= 2;
- if (strip_info->arp_interval > MaxARPInterval)
- strip_info->arp_interval = MaxARPInterval;
- if (addr)
- arp_send(ARPOP_REPLY, ETH_P_ARP, addr, /* Target address of ARP packet is our address */
- strip_info->dev, /* Device to send packet on */
- addr, /* Source IP address this ARP packet comes from */
- NULL, /* Destination HW address is NULL (broadcast it) */
- strip_info->dev->dev_addr, /* Source HW address is our HW address */
- strip_info->dev->dev_addr); /* Target HW address is our HW address (redundant) */
- }
-
- /*
- * 7. All ready. Start the transmission
- */
- strip_write_some_more(strip_info->tty);
-}
-
-/* Encapsulate a datagram and kick it into a TTY queue. */
-static netdev_tx_t strip_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct strip *strip_info = netdev_priv(dev);
-
- if (!netif_running(dev)) {
- printk(KERN_ERR "%s: xmit call when iface is down\n",
- dev->name);
- return NETDEV_TX_BUSY;
- }
-
- netif_stop_queue(dev);
-
- del_timer(&strip_info->idle_timer);
-
-
- if (time_after(jiffies, strip_info->pps_timer + HZ)) {
- unsigned long t = jiffies - strip_info->pps_timer;
- unsigned long rx_pps_count =
- DIV_ROUND_CLOSEST(strip_info->rx_pps_count*HZ*8, t);
- unsigned long tx_pps_count =
- DIV_ROUND_CLOSEST(strip_info->tx_pps_count*HZ*8, t);
- unsigned long sx_pps_count =
- DIV_ROUND_CLOSEST(strip_info->sx_pps_count*HZ*8, t);
-
- strip_info->pps_timer = jiffies;
- strip_info->rx_pps_count = 0;
- strip_info->tx_pps_count = 0;
- strip_info->sx_pps_count = 0;
-
- strip_info->rx_average_pps = (strip_info->rx_average_pps + rx_pps_count + 1) / 2;
- strip_info->tx_average_pps = (strip_info->tx_average_pps + tx_pps_count + 1) / 2;
- strip_info->sx_average_pps = (strip_info->sx_average_pps + sx_pps_count + 1) / 2;
-
- if (rx_pps_count / 8 >= 10)
- printk(KERN_INFO "%s: WARNING: Receiving %ld packets per second.\n",
- strip_info->dev->name, rx_pps_count / 8);
- if (tx_pps_count / 8 >= 10)
- printk(KERN_INFO "%s: WARNING: Tx %ld packets per second.\n",
- strip_info->dev->name, tx_pps_count / 8);
- if (sx_pps_count / 8 >= 10)
- printk(KERN_INFO "%s: WARNING: Sending %ld packets per second.\n",
- strip_info->dev->name, sx_pps_count / 8);
- }
-
- spin_lock_bh(&strip_lock);
-
- strip_send(strip_info, skb);
-
- spin_unlock_bh(&strip_lock);
-
- if (skb)
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-/*
- * IdleTask periodically calls strip_xmit, so even when we have no IP packets
- * to send for an extended period of time, the watchdog processing still gets
- * done to ensure that the radio stays in Starmode
- */
-
-static void strip_IdleTask(unsigned long parameter)
-{
- strip_xmit(NULL, (struct net_device *) parameter);
-}
-
-/*
- * Create the MAC header for an arbitrary protocol layer
- *
- * saddr!=NULL means use this specific address (n/a for Metricom)
- * saddr==NULL means use default device source address
- * daddr!=NULL means use this destination address
- * daddr==NULL means leave destination address alone
- * (e.g. unresolved arp -- kernel will call
- * rebuild_header later to fill in the address)
- */
-
-static int strip_header(struct sk_buff *skb, struct net_device *dev,
- unsigned short type, const void *daddr,
- const void *saddr, unsigned len)
-{
- struct strip *strip_info = netdev_priv(dev);
- STRIP_Header *header = (STRIP_Header *) skb_push(skb, sizeof(STRIP_Header));
-
- /*printk(KERN_INFO "%s: strip_header 0x%04X %s\n", dev->name, type,
- type == ETH_P_IP ? "IP" : type == ETH_P_ARP ? "ARP" : ""); */
-
- header->src_addr = strip_info->true_dev_addr;
- header->protocol = htons(type);
-
- /*HexDump("strip_header", netdev_priv(dev), skb->data, skb->data + skb->len); */
-
- if (!daddr)
- return (-dev->hard_header_len);
-
- header->dst_addr = *(MetricomAddress *) daddr;
- return (dev->hard_header_len);
-}
-
-/*
- * Rebuild the MAC header. This is called after an ARP
- * (or in future other address resolution) has completed on this
- * sk_buff. We now let ARP fill in the other fields.
- * I think this should return zero if packet is ready to send,
- * or non-zero if it needs more time to do an address lookup
- */
-
-static int strip_rebuild_header(struct sk_buff *skb)
-{
-#ifdef CONFIG_INET
- STRIP_Header *header = (STRIP_Header *) skb->data;
-
- /* Arp find returns zero if if knows the address, */
- /* or if it doesn't know the address it sends an ARP packet and returns non-zero */
- return arp_find(header->dst_addr.c, skb) ? 1 : 0;
-#else
- return 0;
-#endif
-}
-
-
-/************************************************************************/
-/* Receiving routines */
-
-/*
- * This function parses the response to the ATS300? command,
- * extracting the radio version and serial number.
- */
-static void get_radio_version(struct strip *strip_info, __u8 * ptr, __u8 * end)
-{
- __u8 *p, *value_begin, *value_end;
- int len;
-
- /* Determine the beginning of the second line of the payload */
- p = ptr;
- while (p < end && *p != 10)
- p++;
- if (p >= end)
- return;
- p++;
- value_begin = p;
-
- /* Determine the end of line */
- while (p < end && *p != 10)
- p++;
- if (p >= end)
- return;
- value_end = p;
- p++;
-
- len = value_end - value_begin;
- len = min_t(int, len, sizeof(FirmwareVersion) - 1);
- if (strip_info->firmware_version.c[0] == 0)
- printk(KERN_INFO "%s: Radio Firmware: %.*s\n",
- strip_info->dev->name, len, value_begin);
- sprintf(strip_info->firmware_version.c, "%.*s", len, value_begin);
-
- /* Look for the first colon */
- while (p < end && *p != ':')
- p++;
- if (p >= end)
- return;
- /* Skip over the space */
- p += 2;
- len = sizeof(SerialNumber) - 1;
- if (p + len <= end) {
- sprintf(strip_info->serial_number.c, "%.*s", len, p);
- } else {
- printk(KERN_DEBUG
- "STRIP: radio serial number shorter (%zd) than expected (%d)\n",
- end - p, len);
- }
-}
-
-/*
- * This function parses the response to the ATS325? command,
- * extracting the radio battery voltage.
- */
-static void get_radio_voltage(struct strip *strip_info, __u8 * ptr, __u8 * end)
-{
- int len;
-
- len = sizeof(BatteryVoltage) - 1;
- if (ptr + len <= end) {
- sprintf(strip_info->battery_voltage.c, "%.*s", len, ptr);
- } else {
- printk(KERN_DEBUG
- "STRIP: radio voltage string shorter (%zd) than expected (%d)\n",
- end - ptr, len);
- }
-}
-
-/*
- * This function parses the responses to the AT~LA and ATS311 commands,
- * which list the radio's neighbours.
- */
-static void get_radio_neighbours(MetricomNodeTable * table, __u8 * ptr, __u8 * end)
-{
- table->num_nodes = 0;
- while (ptr < end && table->num_nodes < NODE_TABLE_SIZE) {
- MetricomNode *node = &table->node[table->num_nodes++];
- char *dst = node->c, *limit = dst + sizeof(*node) - 1;
- while (ptr < end && *ptr <= 32)
- ptr++;
- while (ptr < end && dst < limit && *ptr != 10)
- *dst++ = *ptr++;
- *dst++ = 0;
- while (ptr < end && ptr[-1] != 10)
- ptr++;
- }
- do_gettimeofday(&table->timestamp);
-}
-
-static int get_radio_address(struct strip *strip_info, __u8 * p)
-{
- MetricomAddress addr;
-
- if (string_to_radio_address(&addr, p))
- return (1);
-
- /* See if our radio address has changed */
- if (memcmp(strip_info->true_dev_addr.c, addr.c, sizeof(addr))) {
- MetricomAddressString addr_string;
- radio_address_to_string(&addr, &addr_string);
- printk(KERN_INFO "%s: Radio address = %s\n",
- strip_info->dev->name, addr_string.c);
- strip_info->true_dev_addr = addr;
- if (!strip_info->manual_dev_addr)
- *(MetricomAddress *) strip_info->dev->dev_addr =
- addr;
- /* Give the radio a few seconds to get its head straight, then send an arp */
- strip_info->gratuitous_arp = jiffies + 15 * HZ;
- strip_info->arp_interval = 1 * HZ;
- }
- return (0);
-}
-
-static int verify_checksum(struct strip *strip_info)
-{
- __u8 *p = strip_info->sx_buff;
- __u8 *end = strip_info->sx_buff + strip_info->sx_count - 4;
- u_short sum =
- (READHEX16(end[0]) << 12) | (READHEX16(end[1]) << 8) |
- (READHEX16(end[2]) << 4) | (READHEX16(end[3]));
- while (p < end)
- sum -= *p++;
- if (sum == 0 && strip_info->firmware_level == StructuredMessages) {
- strip_info->firmware_level = ChecksummedMessages;
- printk(KERN_INFO "%s: Radio provides message checksums\n",
- strip_info->dev->name);
- }
- return (sum == 0);
-}
-
-static void RecvErr(char *msg, struct strip *strip_info)
-{
- __u8 *ptr = strip_info->sx_buff;
- __u8 *end = strip_info->sx_buff + strip_info->sx_count;
- DumpData(msg, strip_info, ptr, end);
- strip_info->rx_errors++;
-}
-
-static void RecvErr_Message(struct strip *strip_info, __u8 * sendername,
- const __u8 * msg, u_long len)
-{
- if (has_prefix(msg, len, "001")) { /* Not in StarMode! */
- RecvErr("Error Msg:", strip_info);
- printk(KERN_INFO "%s: Radio %s is not in StarMode\n",
- strip_info->dev->name, sendername);
- }
-
- else if (has_prefix(msg, len, "002")) { /* Remap handle */
- /* We ignore "Remap handle" messages for now */
- }
-
- else if (has_prefix(msg, len, "003")) { /* Can't resolve name */
- RecvErr("Error Msg:", strip_info);
- printk(KERN_INFO "%s: Destination radio name is unknown\n",
- strip_info->dev->name);
- }
-
- else if (has_prefix(msg, len, "004")) { /* Name too small or missing */
- strip_info->watchdog_doreset = jiffies + LongTime;
-#if TICKLE_TIMERS
- {
- struct timeval tv;
- do_gettimeofday(&tv);
- printk(KERN_INFO
- "**** Got ERR_004 response at %02d.%06d\n",
- tv.tv_sec % 100, tv.tv_usec);
- }
-#endif
- if (!strip_info->working) {
- strip_info->working = TRUE;
- printk(KERN_INFO "%s: Radio now in starmode\n",
- strip_info->dev->name);
- /*
- * If the radio has just entered a working state, we should do our first
- * probe ASAP, so that we find out our radio address etc. without delay.
- */
- strip_info->watchdog_doprobe = jiffies;
- }
- if (strip_info->firmware_level == NoStructure && sendername) {
- strip_info->firmware_level = StructuredMessages;
- strip_info->next_command = 0; /* Try to enable checksums ASAP */
- printk(KERN_INFO
- "%s: Radio provides structured messages\n",
- strip_info->dev->name);
- }
- if (strip_info->firmware_level >= StructuredMessages) {
- /*
- * If this message has a valid checksum on the end, then the call to verify_checksum
- * will elevate the firmware_level to ChecksummedMessages for us. (The actual return
- * code from verify_checksum is ignored here.)
- */
- verify_checksum(strip_info);
- /*
- * If the radio has structured messages but we don't yet have all our information about it,
- * we should do probes without delay, until we have gathered all the information
- */
- if (!GOT_ALL_RADIO_INFO(strip_info))
- strip_info->watchdog_doprobe = jiffies;
- }
- }
-
- else if (has_prefix(msg, len, "005")) /* Bad count specification */
- RecvErr("Error Msg:", strip_info);
-
- else if (has_prefix(msg, len, "006")) /* Header too big */
- RecvErr("Error Msg:", strip_info);
-
- else if (has_prefix(msg, len, "007")) { /* Body too big */
- RecvErr("Error Msg:", strip_info);
- printk(KERN_ERR
- "%s: Error! Packet size too big for radio.\n",
- strip_info->dev->name);
- }
-
- else if (has_prefix(msg, len, "008")) { /* Bad character in name */
- RecvErr("Error Msg:", strip_info);
- printk(KERN_ERR
- "%s: Radio name contains illegal character\n",
- strip_info->dev->name);
- }
-
- else if (has_prefix(msg, len, "009")) /* No count or line terminator */
- RecvErr("Error Msg:", strip_info);
-
- else if (has_prefix(msg, len, "010")) /* Invalid checksum */
- RecvErr("Error Msg:", strip_info);
-
- else if (has_prefix(msg, len, "011")) /* Checksum didn't match */
- RecvErr("Error Msg:", strip_info);
-
- else if (has_prefix(msg, len, "012")) /* Failed to transmit packet */
- RecvErr("Error Msg:", strip_info);
-
- else
- RecvErr("Error Msg:", strip_info);
-}
-
-static void process_AT_response(struct strip *strip_info, __u8 * ptr,
- __u8 * end)
-{
- u_long len;
- __u8 *p = ptr;
- while (p < end && p[-1] != 10)
- p++; /* Skip past first newline character */
- /* Now ptr points to the AT command, and p points to the text of the response. */
- len = p - ptr;
-
-#if TICKLE_TIMERS
- {
- struct timeval tv;
- do_gettimeofday(&tv);
- printk(KERN_INFO "**** Got AT response %.7s at %02d.%06d\n",
- ptr, tv.tv_sec % 100, tv.tv_usec);
- }
-#endif
-
- if (has_prefix(ptr, len, "ATS300?"))
- get_radio_version(strip_info, p, end);
- else if (has_prefix(ptr, len, "ATS305?"))
- get_radio_address(strip_info, p);
- else if (has_prefix(ptr, len, "ATS311?"))
- get_radio_neighbours(&strip_info->poletops, p, end);
- else if (has_prefix(ptr, len, "ATS319=7"))
- verify_checksum(strip_info);
- else if (has_prefix(ptr, len, "ATS325?"))
- get_radio_voltage(strip_info, p, end);
- else if (has_prefix(ptr, len, "AT~LA"))
- get_radio_neighbours(&strip_info->portables, p, end);
- else
- RecvErr("Unknown AT Response:", strip_info);
-}
-
-static void process_ACK(struct strip *strip_info, __u8 * ptr, __u8 * end)
-{
- /* Currently we don't do anything with ACKs from the radio */
-}
-
-static void process_Info(struct strip *strip_info, __u8 * ptr, __u8 * end)
-{
- if (ptr + 16 > end)
- RecvErr("Bad Info Msg:", strip_info);
-}
-
-static struct net_device *get_strip_dev(struct strip *strip_info)
-{
- /* If our hardware address is *manually set* to zero, and we know our */
- /* real radio hardware address, try to find another strip device that has been */
- /* manually set to that address that we can 'transfer ownership' of this packet to */
- if (strip_info->manual_dev_addr &&
- !memcmp(strip_info->dev->dev_addr, zero_address.c,
- sizeof(zero_address))
- && memcmp(&strip_info->true_dev_addr, zero_address.c,
- sizeof(zero_address))) {
- struct net_device *dev;
- read_lock_bh(&dev_base_lock);
- for_each_netdev(&init_net, dev) {
- if (dev->type == strip_info->dev->type &&
- !memcmp(dev->dev_addr,
- &strip_info->true_dev_addr,
- sizeof(MetricomAddress))) {
- printk(KERN_INFO
- "%s: Transferred packet ownership to %s.\n",
- strip_info->dev->name, dev->name);
- read_unlock_bh(&dev_base_lock);
- return (dev);
- }
- }
- read_unlock_bh(&dev_base_lock);
- }
- return (strip_info->dev);
-}
-
-/*
- * Send one completely decapsulated datagram to the next layer.
- */
-
-static void deliver_packet(struct strip *strip_info, STRIP_Header * header,
- __u16 packetlen)
-{
- struct sk_buff *skb = dev_alloc_skb(sizeof(STRIP_Header) + packetlen);
- if (!skb) {
- printk(KERN_ERR "%s: memory squeeze, dropping packet.\n",
- strip_info->dev->name);
- strip_info->rx_dropped++;
- } else {
- memcpy(skb_put(skb, sizeof(STRIP_Header)), header,
- sizeof(STRIP_Header));
- memcpy(skb_put(skb, packetlen), strip_info->rx_buff,
- packetlen);
- skb->dev = get_strip_dev(strip_info);
- skb->protocol = header->protocol;
- skb_reset_mac_header(skb);
-
- /* Having put a fake header on the front of the sk_buff for the */
- /* benefit of tools like tcpdump, skb_pull now 'consumes' that */
- /* fake header before we hand the packet up to the next layer. */
- skb_pull(skb, sizeof(STRIP_Header));
-
- /* Finally, hand the packet up to the next layer (e.g. IP or ARP, etc.) */
- strip_info->rx_packets++;
- strip_info->rx_pps_count++;
-#ifdef EXT_COUNTERS
- strip_info->rx_bytes += packetlen;
-#endif
- netif_rx(skb);
- }
-}
-
-static void process_IP_packet(struct strip *strip_info,
- STRIP_Header * header, __u8 * ptr,
- __u8 * end)
-{
- __u16 packetlen;
-
- /* Decode start of the IP packet header */
- ptr = UnStuffData(ptr, end, strip_info->rx_buff, 4);
- if (!ptr) {
- RecvErr("IP Packet too short", strip_info);
- return;
- }
-
- packetlen = ((__u16) strip_info->rx_buff[2] << 8) | strip_info->rx_buff[3];
-
- if (packetlen > MAX_RECV_MTU) {
- printk(KERN_INFO "%s: Dropping oversized received IP packet: %d bytes\n",
- strip_info->dev->name, packetlen);
- strip_info->rx_dropped++;
- return;
- }
-
- /*printk(KERN_INFO "%s: Got %d byte IP packet\n", strip_info->dev->name, packetlen); */
-
- /* Decode remainder of the IP packet */
- ptr =
- UnStuffData(ptr, end, strip_info->rx_buff + 4, packetlen - 4);
- if (!ptr) {
- RecvErr("IP Packet too short", strip_info);
- return;
- }
-
- if (ptr < end) {
- RecvErr("IP Packet too long", strip_info);
- return;
- }
-
- header->protocol = htons(ETH_P_IP);
-
- deliver_packet(strip_info, header, packetlen);
-}
-
-static void process_ARP_packet(struct strip *strip_info,
- STRIP_Header * header, __u8 * ptr,
- __u8 * end)
-{
- __u16 packetlen;
- struct arphdr *arphdr = (struct arphdr *) strip_info->rx_buff;
-
- /* Decode start of the ARP packet */
- ptr = UnStuffData(ptr, end, strip_info->rx_buff, 8);
- if (!ptr) {
- RecvErr("ARP Packet too short", strip_info);
- return;
- }
-
- packetlen = 8 + (arphdr->ar_hln + arphdr->ar_pln) * 2;
-
- if (packetlen > MAX_RECV_MTU) {
- printk(KERN_INFO
- "%s: Dropping oversized received ARP packet: %d bytes\n",
- strip_info->dev->name, packetlen);
- strip_info->rx_dropped++;
- return;
- }
-
- /*printk(KERN_INFO "%s: Got %d byte ARP %s\n",
- strip_info->dev->name, packetlen,
- ntohs(arphdr->ar_op) == ARPOP_REQUEST ? "request" : "reply"); */
-
- /* Decode remainder of the ARP packet */
- ptr =
- UnStuffData(ptr, end, strip_info->rx_buff + 8, packetlen - 8);
- if (!ptr) {
- RecvErr("ARP Packet too short", strip_info);
- return;
- }
-
- if (ptr < end) {
- RecvErr("ARP Packet too long", strip_info);
- return;
- }
-
- header->protocol = htons(ETH_P_ARP);
-
- deliver_packet(strip_info, header, packetlen);
-}
-
-/*
- * process_text_message processes a <CR>-terminated block of data received
- * from the radio that doesn't begin with a '*' character. All normal
- * Starmode communication messages with the radio begin with a '*',
- * so any text that does not indicates a serial port error, a radio that
- * is in Hayes command mode instead of Starmode, or a radio with really
- * old firmware that doesn't frame its Starmode responses properly.
- */
-static void process_text_message(struct strip *strip_info)
-{
- __u8 *msg = strip_info->sx_buff;
- int len = strip_info->sx_count;
-
- /* Check for anything that looks like it might be our radio name */
- /* (This is here for backwards compatibility with old firmware) */
- if (len == 9 && get_radio_address(strip_info, msg) == 0)
- return;
-
- if (text_equal(msg, len, "OK"))
- return; /* Ignore 'OK' responses from prior commands */
- if (text_equal(msg, len, "ERROR"))
- return; /* Ignore 'ERROR' messages */
- if (has_prefix(msg, len, "ate0q1"))
- return; /* Ignore character echo back from the radio */
-
- /* Catch other error messages */
- /* (This is here for backwards compatibility with old firmware) */
- if (has_prefix(msg, len, "ERR_")) {
- RecvErr_Message(strip_info, NULL, &msg[4], len - 4);
- return;
- }
-
- RecvErr("No initial *", strip_info);
-}
-
-/*
- * process_message processes a <CR>-terminated block of data received
- * from the radio. If the radio is not in Starmode or has old firmware,
- * it may be a line of text in response to an AT command. Ideally, with
- * a current radio that's properly in Starmode, all data received should
- * be properly framed and checksummed radio message blocks, containing
- * either a starmode packet, or a other communication from the radio
- * firmware, like "INF_" Info messages and &COMMAND responses.
- */
-static void process_message(struct strip *strip_info)
-{
- STRIP_Header header = { zero_address, zero_address, 0 };
- __u8 *ptr = strip_info->sx_buff;
- __u8 *end = strip_info->sx_buff + strip_info->sx_count;
- __u8 sendername[32], *sptr = sendername;
- MetricomKey key;
-
- /*HexDump("Receiving", strip_info, ptr, end); */
-
- /* Check for start of address marker, and then skip over it */
- if (*ptr == '*')
- ptr++;
- else {
- process_text_message(strip_info);
- return;
- }
-
- /* Copy out the return address */
- while (ptr < end && *ptr != '*'
- && sptr < ARRAY_END(sendername) - 1)
- *sptr++ = *ptr++;
- *sptr = 0; /* Null terminate the sender name */
-
- /* Check for end of address marker, and skip over it */
- if (ptr >= end || *ptr != '*') {
- RecvErr("No second *", strip_info);
- return;
- }
- ptr++; /* Skip the second '*' */
-
- /* If the sender name is "&COMMAND", ignore this 'packet' */
- /* (This is here for backwards compatibility with old firmware) */
- if (!strcmp(sendername, "&COMMAND")) {
- strip_info->firmware_level = NoStructure;
- strip_info->next_command = CompatibilityCommand;
- return;
- }
-
- if (ptr + 4 > end) {
- RecvErr("No proto key", strip_info);
- return;
- }
-
- /* Get the protocol key out of the buffer */
- key.c[0] = *ptr++;
- key.c[1] = *ptr++;
- key.c[2] = *ptr++;
- key.c[3] = *ptr++;
-
- /* If we're using checksums, verify the checksum at the end of the packet */
- if (strip_info->firmware_level >= ChecksummedMessages) {
- end -= 4; /* Chop the last four bytes off the packet (they're the checksum) */
- if (ptr > end) {
- RecvErr("Missing Checksum", strip_info);
- return;
- }
- if (!verify_checksum(strip_info)) {
- RecvErr("Bad Checksum", strip_info);
- return;
- }
- }
-
- /*printk(KERN_INFO "%s: Got packet from \"%s\".\n", strip_info->dev->name, sendername); */
-
- /*
- * Fill in (pseudo) source and destination addresses in the packet.
- * We assume that the destination address was our address (the radio does not
- * tell us this). If the radio supplies a source address, then we use it.
- */
- header.dst_addr = strip_info->true_dev_addr;
- string_to_radio_address(&header.src_addr, sendername);
-
-#ifdef EXT_COUNTERS
- if (key.l == SIP0Key.l) {
- strip_info->rx_rbytes += (end - ptr);
- process_IP_packet(strip_info, &header, ptr, end);
- } else if (key.l == ARP0Key.l) {
- strip_info->rx_rbytes += (end - ptr);
- process_ARP_packet(strip_info, &header, ptr, end);
- } else if (key.l == ATR_Key.l) {
- strip_info->rx_ebytes += (end - ptr);
- process_AT_response(strip_info, ptr, end);
- } else if (key.l == ACK_Key.l) {
- strip_info->rx_ebytes += (end - ptr);
- process_ACK(strip_info, ptr, end);
- } else if (key.l == INF_Key.l) {
- strip_info->rx_ebytes += (end - ptr);
- process_Info(strip_info, ptr, end);
- } else if (key.l == ERR_Key.l) {
- strip_info->rx_ebytes += (end - ptr);
- RecvErr_Message(strip_info, sendername, ptr, end - ptr);
- } else
- RecvErr("Unrecognized protocol key", strip_info);
-#else
- if (key.l == SIP0Key.l)
- process_IP_packet(strip_info, &header, ptr, end);
- else if (key.l == ARP0Key.l)
- process_ARP_packet(strip_info, &header, ptr, end);
- else if (key.l == ATR_Key.l)
- process_AT_response(strip_info, ptr, end);
- else if (key.l == ACK_Key.l)
- process_ACK(strip_info, ptr, end);
- else if (key.l == INF_Key.l)
- process_Info(strip_info, ptr, end);
- else if (key.l == ERR_Key.l)
- RecvErr_Message(strip_info, sendername, ptr, end - ptr);
- else
- RecvErr("Unrecognized protocol key", strip_info);
-#endif
-}
-
-#define TTYERROR(X) ((X) == TTY_BREAK ? "Break" : \
- (X) == TTY_FRAME ? "Framing Error" : \
- (X) == TTY_PARITY ? "Parity Error" : \
- (X) == TTY_OVERRUN ? "Hardware Overrun" : "Unknown Error")
-
-/*
- * Handle the 'receiver data ready' interrupt.
- * This function is called by the 'tty_io' module in the kernel when
- * a block of STRIP data has been received, which can now be decapsulated
- * and sent on to some IP layer for further processing.
- */
-
-static void strip_receive_buf(struct tty_struct *tty, const unsigned char *cp,
- char *fp, int count)
-{
- struct strip *strip_info = tty->disc_data;
- const unsigned char *end = cp + count;
-
- if (!strip_info || strip_info->magic != STRIP_MAGIC
- || !netif_running(strip_info->dev))
- return;
-
- spin_lock_bh(&strip_lock);
-#if 0
- {
- struct timeval tv;
- do_gettimeofday(&tv);
- printk(KERN_INFO
- "**** strip_receive_buf: %3d bytes at %02d.%06d\n",
- count, tv.tv_sec % 100, tv.tv_usec);
- }
-#endif
-
-#ifdef EXT_COUNTERS
- strip_info->rx_sbytes += count;
-#endif
-
- /* Read the characters out of the buffer */
- while (cp < end) {
- if (fp && *fp)
- printk(KERN_INFO "%s: %s on serial port\n",
- strip_info->dev->name, TTYERROR(*fp));
- if (fp && *fp++ && !strip_info->discard) { /* If there's a serial error, record it */
- /* If we have some characters in the buffer, discard them */
- strip_info->discard = strip_info->sx_count;
- strip_info->rx_errors++;
- }
-
- /* Leading control characters (CR, NL, Tab, etc.) are ignored */
- if (strip_info->sx_count > 0 || *cp >= ' ') {
- if (*cp == 0x0D) { /* If end of packet, decide what to do with it */
- if (strip_info->sx_count > 3000)
- printk(KERN_INFO
- "%s: Cut a %d byte packet (%zd bytes remaining)%s\n",
- strip_info->dev->name,
- strip_info->sx_count,
- end - cp - 1,
- strip_info->
- discard ? " (discarded)" :
- "");
- if (strip_info->sx_count >
- strip_info->sx_size) {
- strip_info->rx_over_errors++;
- printk(KERN_INFO
- "%s: sx_buff overflow (%d bytes total)\n",
- strip_info->dev->name,
- strip_info->sx_count);
- } else if (strip_info->discard)
- printk(KERN_INFO
- "%s: Discarding bad packet (%d/%d)\n",
- strip_info->dev->name,
- strip_info->discard,
- strip_info->sx_count);
- else
- process_message(strip_info);
- strip_info->discard = 0;
- strip_info->sx_count = 0;
- } else {
- /* Make sure we have space in the buffer */
- if (strip_info->sx_count <
- strip_info->sx_size)
- strip_info->sx_buff[strip_info->
- sx_count] =
- *cp;
- strip_info->sx_count++;
- }
- }
- cp++;
- }
- spin_unlock_bh(&strip_lock);
-}
-
-
-/************************************************************************/
-/* General control routines */
-
-static int set_mac_address(struct strip *strip_info,
- MetricomAddress * addr)
-{
- /*
- * We're using a manually specified address if the address is set
- * to anything other than all ones. Setting the address to all ones
- * disables manual mode and goes back to automatic address determination
- * (tracking the true address that the radio has).
- */
- strip_info->manual_dev_addr =
- memcmp(addr->c, broadcast_address.c,
- sizeof(broadcast_address));
- if (strip_info->manual_dev_addr)
- *(MetricomAddress *) strip_info->dev->dev_addr = *addr;
- else
- *(MetricomAddress *) strip_info->dev->dev_addr =
- strip_info->true_dev_addr;
- return 0;
-}
-
-static int strip_set_mac_address(struct net_device *dev, void *addr)
-{
- struct strip *strip_info = netdev_priv(dev);
- struct sockaddr *sa = addr;
- printk(KERN_INFO "%s: strip_set_dev_mac_address called\n", dev->name);
- set_mac_address(strip_info, (MetricomAddress *) sa->sa_data);
- return 0;
-}
-
-static struct net_device_stats *strip_get_stats(struct net_device *dev)
-{
- struct strip *strip_info = netdev_priv(dev);
- static struct net_device_stats stats;
-
- memset(&stats, 0, sizeof(struct net_device_stats));
-
- stats.rx_packets = strip_info->rx_packets;
- stats.tx_packets = strip_info->tx_packets;
- stats.rx_dropped = strip_info->rx_dropped;
- stats.tx_dropped = strip_info->tx_dropped;
- stats.tx_errors = strip_info->tx_errors;
- stats.rx_errors = strip_info->rx_errors;
- stats.rx_over_errors = strip_info->rx_over_errors;
- return (&stats);
-}
-
-
-/************************************************************************/
-/* Opening and closing */
-
-/*
- * Here's the order things happen:
- * When the user runs "slattach -p strip ..."
- * 1. The TTY module calls strip_open;;
- * 2. strip_open calls strip_alloc
- * 3. strip_alloc calls register_netdev
- * 4. register_netdev calls strip_dev_init
- * 5. then strip_open finishes setting up the strip_info
- *
- * When the user runs "ifconfig st<x> up address netmask ..."
- * 6. strip_open_low gets called
- *
- * When the user runs "ifconfig st<x> down"
- * 7. strip_close_low gets called
- *
- * When the user kills the slattach process
- * 8. strip_close gets called
- * 9. strip_close calls dev_close
- * 10. if the device is still up, then dev_close calls strip_close_low
- * 11. strip_close calls strip_free
- */
-
-/* Open the low-level part of the STRIP channel. Easy! */
-
-static int strip_open_low(struct net_device *dev)
-{
- struct strip *strip_info = netdev_priv(dev);
-
- if (strip_info->tty == NULL)
- return (-ENODEV);
-
- if (!allocate_buffers(strip_info, dev->mtu))
- return (-ENOMEM);
-
- strip_info->sx_count = 0;
- strip_info->tx_left = 0;
-
- strip_info->discard = 0;
- strip_info->working = FALSE;
- strip_info->firmware_level = NoStructure;
- strip_info->next_command = CompatibilityCommand;
- strip_info->user_baud = tty_get_baud_rate(strip_info->tty);
-
- printk(KERN_INFO "%s: Initializing Radio.\n",
- strip_info->dev->name);
- ResetRadio(strip_info);
- strip_info->idle_timer.expires = jiffies + 1 * HZ;
- add_timer(&strip_info->idle_timer);
- netif_wake_queue(dev);
- return (0);
-}
-
-
-/*
- * Close the low-level part of the STRIP channel. Easy!
- */
-
-static int strip_close_low(struct net_device *dev)
-{
- struct strip *strip_info = netdev_priv(dev);
-
- if (strip_info->tty == NULL)
- return -EBUSY;
- clear_bit(TTY_DO_WRITE_WAKEUP, &strip_info->tty->flags);
- netif_stop_queue(dev);
-
- /*
- * Free all STRIP frame buffers.
- */
- kfree(strip_info->rx_buff);
- strip_info->rx_buff = NULL;
- kfree(strip_info->sx_buff);
- strip_info->sx_buff = NULL;
- kfree(strip_info->tx_buff);
- strip_info->tx_buff = NULL;
-
- del_timer(&strip_info->idle_timer);
- return 0;
-}
-
-static const struct header_ops strip_header_ops = {
- .create = strip_header,
- .rebuild = strip_rebuild_header,
-};
-
-
-static const struct net_device_ops strip_netdev_ops = {
- .ndo_open = strip_open_low,
- .ndo_stop = strip_close_low,
- .ndo_start_xmit = strip_xmit,
- .ndo_set_mac_address = strip_set_mac_address,
- .ndo_get_stats = strip_get_stats,
- .ndo_change_mtu = strip_change_mtu,
-};
-
-/*
- * This routine is called by DDI when the
- * (dynamically assigned) device is registered
- */
-
-static void strip_dev_setup(struct net_device *dev)
-{
- /*
- * Finish setting up the DEVICE info.
- */
-
- dev->trans_start = 0;
- dev->tx_queue_len = 30; /* Drop after 30 frames queued */
-
- dev->flags = 0;
- dev->mtu = DEFAULT_STRIP_MTU;
- dev->type = ARPHRD_METRICOM; /* dtang */
- dev->hard_header_len = sizeof(STRIP_Header);
- /*
- * netdev_priv(dev) Already holds a pointer to our struct strip
- */
-
- *(MetricomAddress *)dev->broadcast = broadcast_address;
- dev->dev_addr[0] = 0;
- dev->addr_len = sizeof(MetricomAddress);
-
- dev->header_ops = &strip_header_ops,
- dev->netdev_ops = &strip_netdev_ops;
-}
-
-/*
- * Free a STRIP channel.
- */
-
-static void strip_free(struct strip *strip_info)
-{
- spin_lock_bh(&strip_lock);
- list_del_rcu(&strip_info->list);
- spin_unlock_bh(&strip_lock);
-
- strip_info->magic = 0;
-
- free_netdev(strip_info->dev);
-}
-
-
-/*
- * Allocate a new free STRIP channel
- */
-static struct strip *strip_alloc(void)
-{
- struct list_head *n;
- struct net_device *dev;
- struct strip *strip_info;
-
- dev = alloc_netdev(sizeof(struct strip), "st%d",
- strip_dev_setup);
-
- if (!dev)
- return NULL; /* If no more memory, return */
-
-
- strip_info = netdev_priv(dev);
- strip_info->dev = dev;
-
- strip_info->magic = STRIP_MAGIC;
- strip_info->tty = NULL;
-
- strip_info->gratuitous_arp = jiffies + LongTime;
- strip_info->arp_interval = 0;
- init_timer(&strip_info->idle_timer);
- strip_info->idle_timer.data = (long) dev;
- strip_info->idle_timer.function = strip_IdleTask;
-
-
- spin_lock_bh(&strip_lock);
- rescan:
- /*
- * Search the list to find where to put our new entry
- * (and in the process decide what channel number it is
- * going to be)
- */
- list_for_each(n, &strip_list) {
- struct strip *s = hlist_entry(n, struct strip, list);
-
- if (s->dev->base_addr == dev->base_addr) {
- ++dev->base_addr;
- goto rescan;
- }
- }
-
- sprintf(dev->name, "st%ld", dev->base_addr);
-
- list_add_tail_rcu(&strip_info->list, &strip_list);
- spin_unlock_bh(&strip_lock);
-
- return strip_info;
-}
-
-/*
- * Open the high-level part of the STRIP channel.
- * This function is called by the TTY module when the
- * STRIP line discipline is called for. Because we are
- * sure the tty line exists, we only have to link it to
- * a free STRIP channel...
- */
-
-static int strip_open(struct tty_struct *tty)
-{
- struct strip *strip_info = tty->disc_data;
-
- /*
- * First make sure we're not already connected.
- */
-
- if (strip_info && strip_info->magic == STRIP_MAGIC)
- return -EEXIST;
-
- /*
- * We need a write method.
- */
-
- if (tty->ops->write == NULL || tty->ops->set_termios == NULL)
- return -EOPNOTSUPP;
-
- /*
- * OK. Find a free STRIP channel to use.
- */
- if ((strip_info = strip_alloc()) == NULL)
- return -ENFILE;
-
- /*
- * Register our newly created device so it can be ifconfig'd
- * strip_dev_init() will be called as a side-effect
- */
-
- if (register_netdev(strip_info->dev) != 0) {
- printk(KERN_ERR "strip: register_netdev() failed.\n");
- strip_free(strip_info);
- return -ENFILE;
- }
-
- strip_info->tty = tty;
- tty->disc_data = strip_info;
- tty->receive_room = 65536;
-
- tty_driver_flush_buffer(tty);
-
- /*
- * Restore default settings
- */
-
- strip_info->dev->type = ARPHRD_METRICOM; /* dtang */
-
- /*
- * Set tty options
- */
-
- tty->termios->c_iflag |= IGNBRK | IGNPAR; /* Ignore breaks and parity errors. */
- tty->termios->c_cflag |= CLOCAL; /* Ignore modem control signals. */
- tty->termios->c_cflag &= ~HUPCL; /* Don't close on hup */
-
- printk(KERN_INFO "STRIP: device \"%s\" activated\n",
- strip_info->dev->name);
-
- /*
- * Done. We have linked the TTY line to a channel.
- */
- return (strip_info->dev->base_addr);
-}
-
-/*
- * Close down a STRIP channel.
- * This means flushing out any pending queues, and then restoring the
- * TTY line discipline to what it was before it got hooked to STRIP
- * (which usually is TTY again).
- */
-
-static void strip_close(struct tty_struct *tty)
-{
- struct strip *strip_info = tty->disc_data;
-
- /*
- * First make sure we're connected.
- */
-
- if (!strip_info || strip_info->magic != STRIP_MAGIC)
- return;
-
- unregister_netdev(strip_info->dev);
-
- tty->disc_data = NULL;
- strip_info->tty = NULL;
- printk(KERN_INFO "STRIP: device \"%s\" closed down\n",
- strip_info->dev->name);
- strip_free(strip_info);
- tty->disc_data = NULL;
-}
-
-
-/************************************************************************/
-/* Perform I/O control calls on an active STRIP channel. */
-
-static int strip_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct strip *strip_info = tty->disc_data;
-
- /*
- * First make sure we're connected.
- */
-
- if (!strip_info || strip_info->magic != STRIP_MAGIC)
- return -EINVAL;
-
- switch (cmd) {
- case SIOCGIFNAME:
- if(copy_to_user((void __user *) arg, strip_info->dev->name, strlen(strip_info->dev->name) + 1))
- return -EFAULT;
- break;
- case SIOCSIFHWADDR:
- {
- MetricomAddress addr;
- //printk(KERN_INFO "%s: SIOCSIFHWADDR\n", strip_info->dev->name);
- if(copy_from_user(&addr, (void __user *) arg, sizeof(MetricomAddress)))
- return -EFAULT;
- return set_mac_address(strip_info, &addr);
- }
- default:
- return tty_mode_ioctl(tty, file, cmd, arg);
- break;
- }
- return 0;
-}
-
-#ifdef CONFIG_COMPAT
-static long strip_compat_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- case SIOCGIFNAME:
- case SIOCSIFHWADDR:
- return strip_ioctl(tty, file, cmd,
- (unsigned long)compat_ptr(arg));
- }
- return -ENOIOCTLCMD;
-}
-#endif
-
-/************************************************************************/
-/* Initialization */
-
-static struct tty_ldisc_ops strip_ldisc = {
- .magic = TTY_LDISC_MAGIC,
- .name = "strip",
- .owner = THIS_MODULE,
- .open = strip_open,
- .close = strip_close,
- .ioctl = strip_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = strip_compat_ioctl,
-#endif
- .receive_buf = strip_receive_buf,
- .write_wakeup = strip_write_some_more,
-};
-
-/*
- * Initialize the STRIP driver.
- * This routine is called at boot time, to bootstrap the multi-channel
- * STRIP driver
- */
-
-static char signon[] __initdata =
- KERN_INFO "STRIP: Version %s (unlimited channels)\n";
-
-static int __init strip_init_driver(void)
-{
- int status;
-
- printk(signon, StripVersion);
-
-
- /*
- * Fill in our line protocol discipline, and register it
- */
- if ((status = tty_register_ldisc(N_STRIP, &strip_ldisc)))
- printk(KERN_ERR "STRIP: can't register line discipline (err = %d)\n",
- status);
-
- /*
- * Register the status file with /proc
- */
- proc_net_fops_create(&init_net, "strip", S_IFREG | S_IRUGO, &strip_seq_fops);
-
- return status;
-}
-
-module_init(strip_init_driver);
-
-static const char signoff[] __exitdata =
- KERN_INFO "STRIP: Module Unloaded\n";
-
-static void __exit strip_exit_driver(void)
-{
- int i;
- struct list_head *p,*n;
-
- /* module ref count rules assure that all entries are unregistered */
- list_for_each_safe(p, n, &strip_list) {
- struct strip *s = list_entry(p, struct strip, list);
- strip_free(s);
- }
-
- /* Unregister with the /proc/net file here. */
- proc_net_remove(&init_net, "strip");
-
- if ((i = tty_unregister_ldisc(N_STRIP)))
- printk(KERN_ERR "STRIP: can't unregister line discipline (err = %d)\n", i);
-
- printk(signoff);
-}
-
-module_exit(strip_exit_driver);
-
-MODULE_AUTHOR("Stuart Cheshire <cheshire@cs.stanford.edu>");
-MODULE_DESCRIPTION("Starmode Radio IP (STRIP) Device Driver");
-MODULE_LICENSE("Dual BSD/GPL");
-
-MODULE_SUPPORTED_DEVICE("Starmode Radio IP (STRIP) modem");
diff --git a/drivers/staging/ti-st/Kconfig b/drivers/staging/ti-st/Kconfig
new file mode 100644
index 00000000000..3ab204ddc29
--- /dev/null
+++ b/drivers/staging/ti-st/Kconfig
@@ -0,0 +1,25 @@
+#
+# TI's shared transport line discipline and the protocol
+# drivers (BT, FM and GPS)
+#
+menu "Texas Instruments shared transport line discipline"
+config TI_ST
+ tristate "shared transport core driver"
+ depends on RFKILL
+ select FW_LOADER
+ help
+ This enables the shared transport core driver for TI
+ BT / FM and GPS combo chips. This enables protocol drivers
+ to register themselves with core and send data, the responses
+ are returned to relevant protocol drivers based on their
+ packet types.
+
+config ST_BT
+ tristate "BlueZ bluetooth driver for ST"
+ depends on BT
+ select TI_ST
+ help
+ This enables the Bluetooth driver for TI BT/FM/GPS combo devices.
+ This makes use of shared transport line discipline core driver to
+ communicate with the BT core of the combo chip.
+endmenu
diff --git a/drivers/staging/ti-st/Makefile b/drivers/staging/ti-st/Makefile
new file mode 100644
index 00000000000..0167d1d2c25
--- /dev/null
+++ b/drivers/staging/ti-st/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for TI's shared transport line discipline
+# and its protocol drivers (BT, FM, GPS)
+#
+obj-$(CONFIG_TI_ST) += st_drv.o
+st_drv-objs := st_core.o st_kim.o st_ll.o
+obj-$(CONFIG_ST_BT) += bt_drv.o
diff --git a/drivers/staging/ti-st/TODO b/drivers/staging/ti-st/TODO
new file mode 100644
index 00000000000..2c4fe583901
--- /dev/null
+++ b/drivers/staging/ti-st/TODO
@@ -0,0 +1,19 @@
+TODO:
+
+1. A per-device/tty port context required to support multiple devices
+on same platform.
+
+2. REMOVE the sysfs entry PID passing mechanism, since there should
+be a better way to request user-space to install line discipline.
+
+3. Re-view/Re-work on the locking.
+
+4. Re-structure to make the ldisc driver more generic for chipsets which mux
+multiple connectivity (BT, FM, GPS) upon 1 TTY port.
+
+5. Step up and maintain this driver to ensure that it continues
+to work. Having the hardware for this is pretty much a
+requirement. If this does not happen, the will be removed in
+the 2.6.35 kernel release.
+
+Please send patches to Greg Kroah-Hartman <greg@kroah.com>.
diff --git a/drivers/staging/ti-st/bt_drv.c b/drivers/staging/ti-st/bt_drv.c
new file mode 100644
index 00000000000..d8420b5c91f
--- /dev/null
+++ b/drivers/staging/ti-st/bt_drv.c
@@ -0,0 +1,502 @@
+/*
+ * Texas Instrument's Bluetooth Driver For Shared Transport.
+ *
+ * Bluetooth Driver acts as interface between HCI CORE and
+ * TI Shared Transport Layer.
+ *
+ * Copyright (C) 2009 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "st.h"
+#include "bt_drv.h"
+
+/* Define this macro to get debug msg */
+#undef DEBUG
+
+#ifdef DEBUG
+#define BT_DRV_DBG(fmt, arg...) printk(KERN_INFO "(btdrv):"fmt"\n" , ## arg)
+#define BTDRV_API_START() printk(KERN_INFO "(btdrv): %s Start\n", \
+ __func__)
+#define BTDRV_API_EXIT(errno) printk(KERN_INFO "(btdrv): %s Exit(%d)\n", \
+ __func__, errno)
+#else
+#define BT_DRV_DBG(fmt, arg...)
+#define BTDRV_API_START()
+#define BTDRV_API_EXIT(errno)
+#endif
+
+#define BT_DRV_ERR(fmt, arg...) printk(KERN_ERR "(btdrv):"fmt"\n" , ## arg)
+
+static int reset;
+static struct hci_st *hst;
+
+/* Increments HCI counters based on pocket ID (cmd,acl,sco) */
+static inline void hci_st_tx_complete(struct hci_st *hst, int pkt_type)
+{
+ struct hci_dev *hdev;
+
+ BTDRV_API_START();
+
+ hdev = hst->hdev;
+
+ /* Update HCI stat counters */
+ switch (pkt_type) {
+ case HCI_COMMAND_PKT:
+ hdev->stat.cmd_tx++;
+ break;
+
+ case HCI_ACLDATA_PKT:
+ hdev->stat.acl_tx++;
+ break;
+
+ case HCI_SCODATA_PKT:
+ hdev->stat.cmd_tx++;
+ break;
+ }
+
+ BTDRV_API_EXIT(0);
+}
+
+/* ------- Interfaces to Shared Transport ------ */
+
+/* Called by ST layer to indicate protocol registration completion
+ * status.hci_st_open() function will wait for signal from this
+ * API when st_register() function returns ST_PENDING.
+ */
+static void hci_st_registration_completion_cb(char data)
+{
+ BTDRV_API_START();
+
+ /* hci_st_open() function needs value of 'data' to know
+ * the registration status(success/fail),So have a back
+ * up of it.
+ */
+ hst->streg_cbdata = data;
+
+ /* Got a feedback from ST for BT driver registration
+ * request.Wackup hci_st_open() function to continue
+ * it's open operation.
+ */
+ complete(&hst->wait_for_btdrv_reg_completion);
+
+ BTDRV_API_EXIT(0);
+}
+
+/* Called by Shared Transport layer when receive data is
+ * available */
+static long hci_st_receive(struct sk_buff *skb)
+{
+ int err;
+ int len;
+
+ BTDRV_API_START();
+
+ err = 0;
+ len = 0;
+
+ if (skb == NULL) {
+ BT_DRV_ERR("Invalid SKB received from ST");
+ BTDRV_API_EXIT(-EFAULT);
+ return -EFAULT;
+ }
+ if (!hst) {
+ kfree_skb(skb);
+ BT_DRV_ERR("Invalid hci_st memory,freeing SKB");
+ BTDRV_API_EXIT(-EFAULT);
+ return -EFAULT;
+ }
+ if (!test_bit(BT_DRV_RUNNING, &hst->flags)) {
+ kfree_skb(skb);
+ BT_DRV_ERR("Device is not running,freeing SKB");
+ BTDRV_API_EXIT(-EINVAL);
+ return -EINVAL;
+ }
+
+ len = skb->len;
+ skb->dev = (struct net_device *)hst->hdev;
+
+ /* Forward skb to HCI CORE layer */
+ err = hci_recv_frame(skb);
+ if (err) {
+ kfree_skb(skb);
+ BT_DRV_ERR("Unable to push skb to HCI CORE(%d),freeing SKB",
+ err);
+ BTDRV_API_EXIT(err);
+ return err;
+ }
+ hst->hdev->stat.byte_rx += len;
+
+ BTDRV_API_EXIT(0);
+ return 0;
+}
+
+/* ------- Interfaces to HCI layer ------ */
+
+/* Called from HCI core to initialize the device */
+static int hci_st_open(struct hci_dev *hdev)
+{
+ static struct st_proto_s hci_st_proto;
+ unsigned long timeleft;
+ int err;
+
+ BTDRV_API_START();
+
+ err = 0;
+
+ BT_DRV_DBG("%s %p", hdev->name, hdev);
+
+ /* Already registered with ST ? */
+ if (test_bit(BT_ST_REGISTERED, &hst->flags)) {
+ BT_DRV_ERR("Registered with ST already,open called again?");
+ BTDRV_API_EXIT(0);
+ return 0;
+ }
+
+ /* Populate BT driver info required by ST */
+ memset(&hci_st_proto, 0, sizeof(hci_st_proto));
+
+ /* BT driver ID */
+ hci_st_proto.type = ST_BT;
+
+ /* Receive function which called from ST */
+ hci_st_proto.recv = hci_st_receive;
+
+ /* Packet match function may used in future */
+ hci_st_proto.match_packet = NULL;
+
+ /* Callback to be called when registration is pending */
+ hci_st_proto.reg_complete_cb = hci_st_registration_completion_cb;
+
+ /* This is write function pointer of ST. BT driver will make use of this
+ * for sending any packets to chip. ST will assign and give to us, so
+ * make it as NULL */
+ hci_st_proto.write = NULL;
+
+ /* Register with ST layer */
+ err = st_register(&hci_st_proto);
+ if (err == ST_ERR_PENDING) {
+ /* Prepare wait-for-completion handler data structures.
+ * Needed to syncronize this and st_registration_completion_cb()
+ * functions.
+ */
+ init_completion(&hst->wait_for_btdrv_reg_completion);
+
+ /* Reset ST registration callback status flag , this value
+ * will be updated in hci_st_registration_completion_cb()
+ * function whenever it called from ST driver.
+ */
+ hst->streg_cbdata = -EINPROGRESS;
+
+ /* ST is busy with other protocol registration(may be busy with
+ * firmware download).So,Wait till the registration callback
+ * (passed as a argument to st_register() function) getting
+ * called from ST.
+ */
+ BT_DRV_DBG(" %s waiting for reg completion signal from ST",
+ __func__);
+
+ timeleft =
+ wait_for_completion_timeout
+ (&hst->wait_for_btdrv_reg_completion,
+ msecs_to_jiffies(BT_REGISTER_TIMEOUT));
+ if (!timeleft) {
+ BT_DRV_ERR("Timeout(%ld sec),didn't get reg"
+ "completion signal from ST",
+ BT_REGISTER_TIMEOUT / 1000);
+ BTDRV_API_EXIT(-ETIMEDOUT);
+ return -ETIMEDOUT;
+ }
+
+ /* Is ST registration callback called with ERROR value? */
+ if (hst->streg_cbdata != 0) {
+ BT_DRV_ERR("ST reg completion CB called with invalid"
+ "status %d", hst->streg_cbdata);
+ BTDRV_API_EXIT(-EAGAIN);
+ return -EAGAIN;
+ }
+ err = 0;
+ } else if (err == ST_ERR_FAILURE) {
+ BT_DRV_ERR("st_register failed %d", err);
+ BTDRV_API_EXIT(-EAGAIN);
+ return -EAGAIN;
+ }
+
+ /* Do we have proper ST write function? */
+ if (hci_st_proto.write != NULL) {
+ /* We need this pointer for sending any Bluetooth pkts */
+ hst->st_write = hci_st_proto.write;
+ } else {
+ BT_DRV_ERR("failed to get ST write func pointer");
+
+ /* Undo registration with ST */
+ err = st_unregister(ST_BT);
+ if (err < 0)
+ BT_DRV_ERR("st_unregister failed %d", err);
+
+ hst->st_write = NULL;
+ BTDRV_API_EXIT(-EAGAIN);
+ return -EAGAIN;
+ }
+
+ /* Registration with ST layer is completed successfully,
+ * now chip is ready to accept commands from HCI CORE.
+ * Mark HCI Device flag as RUNNING
+ */
+ set_bit(HCI_RUNNING, &hdev->flags);
+
+ /* Registration with ST successful */
+ set_bit(BT_ST_REGISTERED, &hst->flags);
+
+ BTDRV_API_EXIT(err);
+ return err;
+}
+
+/* Close device */
+static int hci_st_close(struct hci_dev *hdev)
+{
+ int err;
+
+ BTDRV_API_START();
+
+ err = 0;
+
+ /* Unregister from ST layer */
+ if (test_and_clear_bit(BT_ST_REGISTERED, &hst->flags)) {
+ err = st_unregister(ST_BT);
+ if (err != ST_SUCCESS) {
+ BT_DRV_ERR("st_unregister failed %d", err);
+ BTDRV_API_EXIT(-EBUSY);
+ return -EBUSY;
+ }
+ }
+
+ hst->st_write = NULL;
+
+ /* ST layer would have moved chip to inactive state.
+ * So,clear HCI device RUNNING flag.
+ */
+ if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) {
+ BTDRV_API_EXIT(0);
+ return 0;
+ }
+
+ BTDRV_API_EXIT(err);
+ return err;
+}
+
+/* Called from HCI CORE , Sends frames to Shared Transport */
+static int hci_st_send_frame(struct sk_buff *skb)
+{
+ struct hci_dev *hdev;
+ struct hci_st *hst;
+ long len;
+
+ BTDRV_API_START();
+
+ if (skb == NULL) {
+ BT_DRV_ERR("Invalid skb received from HCI CORE");
+ BTDRV_API_EXIT(-ENOMEM);
+ return -ENOMEM;
+ }
+ hdev = (struct hci_dev *)skb->dev;
+ if (!hdev) {
+ BT_DRV_ERR("SKB received for invalid HCI Device (hdev=NULL)");
+ BTDRV_API_EXIT(-ENODEV);
+ return -ENODEV;
+ }
+ if (!test_bit(HCI_RUNNING, &hdev->flags)) {
+ BT_DRV_ERR("Device is not running");
+ BTDRV_API_EXIT(-EBUSY);
+ return -EBUSY;
+ }
+
+ hst = (struct hci_st *)hdev->driver_data;
+
+ /* Prepend skb with frame type */
+ memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+
+ BT_DRV_DBG(" %s: type %d len %d", hdev->name, bt_cb(skb)->pkt_type,
+ skb->len);
+
+ /* Insert skb to shared transport layer's transmit queue.
+ * Freeing skb memory is taken care in shared transport layer,
+ * so don't free skb memory here.
+ */
+ if (!hst->st_write) {
+ kfree_skb(skb);
+ BT_DRV_ERR(" Can't write to ST, st_write null?");
+ BTDRV_API_EXIT(-EAGAIN);
+ return -EAGAIN;
+ }
+ len = hst->st_write(skb);
+ if (len < 0) {
+ /* Something went wrong in st write , free skb memory */
+ kfree_skb(skb);
+ BT_DRV_ERR(" ST write failed (%ld)", len);
+ BTDRV_API_EXIT(-EAGAIN);
+ return -EAGAIN;
+ }
+
+ /* ST accepted our skb. So, Go ahead and do rest */
+ hdev->stat.byte_tx += len;
+ hci_st_tx_complete(hst, bt_cb(skb)->pkt_type);
+
+ BTDRV_API_EXIT(0);
+ return 0;
+}
+
+static void hci_st_destruct(struct hci_dev *hdev)
+{
+ BTDRV_API_START();
+
+ if (!hdev) {
+ BT_DRV_ERR("Destruct called with invalid HCI Device"
+ "(hdev=NULL)");
+ BTDRV_API_EXIT(0);
+ return;
+ }
+
+ BT_DRV_DBG("%s", hdev->name);
+
+ /* free hci_st memory */
+ if (hdev->driver_data != NULL)
+ kfree(hdev->driver_data);
+
+ BTDRV_API_EXIT(0);
+ return;
+}
+
+/* Creates new HCI device */
+static int hci_st_register_dev(struct hci_st *hst)
+{
+ struct hci_dev *hdev;
+
+ BTDRV_API_START();
+
+ /* Initialize and register HCI device */
+ hdev = hci_alloc_dev();
+ if (!hdev) {
+ BT_DRV_ERR("Can't allocate HCI device");
+ BTDRV_API_EXIT(-ENOMEM);
+ return -ENOMEM;
+ }
+ BT_DRV_DBG(" HCI device allocated. hdev= %p", hdev);
+
+ hst->hdev = hdev;
+ hdev->bus = HCI_UART;
+ hdev->driver_data = hst;
+ hdev->open = hci_st_open;
+ hdev->close = hci_st_close;
+ hdev->flush = NULL;
+ hdev->send = hci_st_send_frame;
+ hdev->destruct = hci_st_destruct;
+ hdev->owner = THIS_MODULE;
+
+ if (reset)
+ set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
+
+ if (hci_register_dev(hdev) < 0) {
+ BT_DRV_ERR("Can't register HCI device");
+ hci_free_dev(hdev);
+ BTDRV_API_EXIT(-ENODEV);
+ return -ENODEV;
+ }
+
+ BT_DRV_DBG(" HCI device registered. hdev= %p", hdev);
+ BTDRV_API_EXIT(0);
+ return 0;
+}
+
+/* ------- Module Init interface ------ */
+
+static int __init bt_drv_init(void)
+{
+ int err;
+
+ BTDRV_API_START();
+
+ err = 0;
+
+ BT_DRV_DBG(" Bluetooth Driver Version %s", VERSION);
+
+ /* Allocate local resource memory */
+ hst = kzalloc(sizeof(struct hci_st), GFP_KERNEL);
+ if (!hst) {
+ BT_DRV_ERR("Can't allocate control structure");
+ BTDRV_API_EXIT(-ENFILE);
+ return -ENFILE;
+ }
+
+ /* Expose "hciX" device to user space */
+ err = hci_st_register_dev(hst);
+ if (err) {
+ /* Release local resource memory */
+ kfree(hst);
+
+ BT_DRV_ERR("Unable to expose hci0 device(%d)", err);
+ BTDRV_API_EXIT(err);
+ return err;
+ }
+ set_bit(BT_DRV_RUNNING, &hst->flags);
+
+ BTDRV_API_EXIT(err);
+ return err;
+}
+
+/* ------- Module Exit interface ------ */
+
+static void __exit bt_drv_exit(void)
+{
+ BTDRV_API_START();
+
+ /* Deallocate local resource's memory */
+ if (hst) {
+ struct hci_dev *hdev = hst->hdev;
+
+ if (hdev == NULL) {
+ BT_DRV_ERR("Invalid hdev memory");
+ kfree(hst);
+ } else {
+ hci_st_close(hdev);
+ if (test_and_clear_bit(BT_DRV_RUNNING, &hst->flags)) {
+ /* Remove HCI device (hciX) created
+ * in module init.
+ */
+ hci_unregister_dev(hdev);
+
+ /* Free HCI device memory */
+ hci_free_dev(hdev);
+ }
+ }
+ }
+ BTDRV_API_EXIT(0);
+}
+
+module_init(bt_drv_init);
+module_exit(bt_drv_exit);
+
+/* ------ Module Info ------ */
+
+module_param(reset, bool, 0644);
+MODULE_PARM_DESC(reset, "Send HCI reset command on initialization");
+MODULE_AUTHOR("Raja Mani <raja_mani@ti.com>");
+MODULE_DESCRIPTION("Bluetooth Driver for TI Shared Transport" VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/ti-st/bt_drv.h b/drivers/staging/ti-st/bt_drv.h
new file mode 100644
index 00000000000..a0beebec846
--- /dev/null
+++ b/drivers/staging/ti-st/bt_drv.h
@@ -0,0 +1,61 @@
+/*
+ * Texas Instrument's Bluetooth Driver For Shared Transport.
+ *
+ * Bluetooth Driver acts as interface between HCI CORE and
+ * TI Shared Transport Layer.
+ *
+ * Copyright (C) 2009 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef _BT_DRV_H
+#define _BT_DRV_H
+
+/* Bluetooth Driver Version */
+#define VERSION "1.0"
+
+/* Defines number of seconds to wait for reg completion
+ * callback getting called from ST (in case,registration
+ * with ST returns PENDING status)
+ */
+#define BT_REGISTER_TIMEOUT msecs_to_jiffies(6000) /* 6 sec */
+
+/* BT driver's local status */
+#define BT_DRV_RUNNING 0
+#define BT_ST_REGISTERED 1
+
+/* BT driver operation structure */
+struct hci_st {
+
+ /* hci device pointer which binds to bt driver */
+ struct hci_dev *hdev;
+
+ /* used locally,to maintain various BT driver status */
+ unsigned long flags;
+
+ /* to hold ST registration callback status */
+ char streg_cbdata;
+
+ /* write function pointer of ST driver */
+ long (*st_write) (struct sk_buff *);
+
+ /* Wait on comepletion handler needed to synchronize
+ * hci_st_open() and hci_st_registration_completion_cb()
+ * functions.*/
+ struct completion wait_for_btdrv_reg_completion;
+};
+
+#endif
diff --git a/drivers/staging/ti-st/fm.h b/drivers/staging/ti-st/fm.h
new file mode 100644
index 00000000000..be41453649e
--- /dev/null
+++ b/drivers/staging/ti-st/fm.h
@@ -0,0 +1,13 @@
+struct fm_event_hdr {
+ unsigned char plen;
+} __attribute__ ((packed));
+
+#define FM_MAX_FRAME_SIZE 0xFF /* TODO: */
+#define FM_EVENT_HDR_SIZE 1 /* size of fm_event_hdr */
+#define ST_FM_CH8_PKT 0x8
+
+/* gps stuff */
+struct gps_event_hdr {
+unsigned char opcode;
+unsigned short plen;
+} __attribute__ ((packed));
diff --git a/drivers/staging/ti-st/st.h b/drivers/staging/ti-st/st.h
new file mode 100644
index 00000000000..e8fc97e32c9
--- /dev/null
+++ b/drivers/staging/ti-st/st.h
@@ -0,0 +1,90 @@
+/*
+ * Shared Transport Header file
+ * To be included by the protocol stack drivers for
+ * Texas Instruments BT,FM and GPS combo chip drivers
+ *
+ * Copyright (C) 2009 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef ST_H
+#define ST_H
+
+#include <linux/skbuff.h>
+/*
+ * st.h
+ */
+
+/* TODO:
+ * Move the following to tty.h upon acceptance
+ */
+#define N_TI_WL 20 /* Ldisc for TI's WL BT, FM, GPS combo chips */
+
+/* some gpios have active high, others like fm have
+ * active low
+ */
+enum kim_gpio_state {
+ KIM_GPIO_INACTIVE,
+ KIM_GPIO_ACTIVE,
+};
+/*
+ * the list of protocols on chip
+ */
+enum proto_type {
+ ST_BT,
+ ST_FM,
+ ST_GPS,
+ ST_MAX,
+};
+
+enum {
+ ST_ERR_FAILURE = -1, /* check struct */
+ ST_SUCCESS,
+ ST_ERR_PENDING = -5, /* to call reg_complete_cb */
+ ST_ERR_ALREADY, /* already registered */
+ ST_ERR_INPROGRESS,
+ ST_ERR_NOPROTO, /* protocol not supported */
+};
+
+/* per protocol structure
+ * for BT/FM and GPS
+ */
+struct st_proto_s {
+ enum proto_type type;
+/*
+ * to be called by ST when data arrives
+ */
+ long (*recv) (struct sk_buff *);
+/*
+ * for future use, logic now to be in ST
+ */
+ unsigned char (*match_packet) (const unsigned char *data);
+/*
+ * subsequent registration return PENDING,
+ * signalled complete by this callback function
+ */
+ void (*reg_complete_cb) (char data);
+/*
+ * write function, sent in as NULL and to be returned to
+ * protocol drivers
+ */
+ long (*write) (struct sk_buff *skb);
+};
+
+extern long st_register(struct st_proto_s *new_proto);
+extern long st_unregister(enum proto_type type);
+
+#endif /* ST_H */
diff --git a/drivers/staging/ti-st/st_core.c b/drivers/staging/ti-st/st_core.c
new file mode 100644
index 00000000000..4e93694e1c2
--- /dev/null
+++ b/drivers/staging/ti-st/st_core.c
@@ -0,0 +1,1062 @@
+/*
+ * Shared Transport Line discipline driver Core
+ * This hooks up ST KIM driver and ST LL driver
+ * Copyright (C) 2009 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#define pr_fmt(fmt) "(stc): " fmt
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+
+/* understand BT, FM and GPS for now */
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/hci.h>
+#include "fm.h"
+/*
+ * packet formats for fm and gps
+ * #include "gps.h"
+ */
+#include "st_core.h"
+#include "st_kim.h"
+#include "st_ll.h"
+#include "st.h"
+
+#ifdef DEBUG
+/* strings to be used for rfkill entries and by
+ * ST Core to be used for sysfs debug entry
+ */
+#define PROTO_ENTRY(type, name) name
+const unsigned char *protocol_strngs[] = {
+ PROTO_ENTRY(ST_BT, "Bluetooth"),
+ PROTO_ENTRY(ST_FM, "FM"),
+ PROTO_ENTRY(ST_GPS, "GPS"),
+};
+#endif
+/* function pointer pointing to either,
+ * st_kim_recv during registration to receive fw download responses
+ * st_int_recv after registration to receive proto stack responses
+ */
+void (*st_recv) (void*, const unsigned char*, long);
+
+/********************************************************************/
+#if 0
+/* internal misc functions */
+bool is_protocol_list_empty(void)
+{
+ unsigned char i = 0;
+ pr_info(" %s ", __func__);
+ for (i = 0; i < ST_MAX; i++) {
+ if (st_gdata->list[i] != NULL)
+ return ST_NOTEMPTY;
+ /* not empty */
+ }
+ /* list empty */
+ return ST_EMPTY;
+}
+#endif
+/* can be called in from
+ * -- KIM (during fw download)
+ * -- ST Core (during st_write)
+ *
+ * This is the internal write function - a wrapper
+ * to tty->ops->write
+ */
+int st_int_write(struct st_data_s *st_gdata,
+ const unsigned char *data, int count)
+{
+#ifdef VERBOSE /* for debug */
+ int i;
+#endif
+ struct tty_struct *tty;
+ if (unlikely(st_gdata == NULL || st_gdata->tty == NULL)) {
+ pr_err("tty unavailable to perform write");
+ return ST_ERR_FAILURE;
+ }
+ tty = st_gdata->tty;
+#ifdef VERBOSE
+ printk(KERN_ERR "start data..\n");
+ for (i = 0; i < count; i++) /* no newlines for each datum */
+ printk(" %x", data[i]);
+ printk(KERN_ERR "\n ..end data\n");
+#endif
+ return tty->ops->write(tty, data, count);
+
+}
+
+/*
+ * push the skb received to relevant
+ * protocol stacks
+ */
+void st_send_frame(enum proto_type protoid, struct st_data_s *st_gdata)
+{
+ pr_info(" %s(prot:%d) ", __func__, protoid);
+
+ if (unlikely
+ (st_gdata == NULL || st_gdata->rx_skb == NULL
+ || st_gdata->list[protoid] == NULL)) {
+ pr_err("protocol %d not registered, no data to send?",
+ protoid);
+ kfree_skb(st_gdata->rx_skb);
+ return;
+ }
+ /* this cannot fail
+ * this shouldn't take long
+ * - should be just skb_queue_tail for the
+ * protocol stack driver
+ */
+ if (likely(st_gdata->list[protoid]->recv != NULL)) {
+ if (unlikely(st_gdata->list[protoid]->recv(st_gdata->rx_skb)
+ != ST_SUCCESS)) {
+ pr_err(" proto stack %d's ->recv failed", protoid);
+ kfree_skb(st_gdata->rx_skb);
+ return;
+ }
+ } else {
+ pr_err(" proto stack %d's ->recv null", protoid);
+ kfree_skb(st_gdata->rx_skb);
+ }
+ pr_info(" done %s", __func__);
+ return;
+}
+
+/*
+ * to call registration complete callbacks
+ * of all protocol stack drivers
+ */
+void st_reg_complete(struct st_data_s *st_gdata, char err)
+{
+ unsigned char i = 0;
+ pr_info(" %s ", __func__);
+ for (i = 0; i < ST_MAX; i++) {
+ if (likely(st_gdata != NULL && st_gdata->list[i] != NULL &&
+ st_gdata->list[i]->reg_complete_cb != NULL))
+ st_gdata->list[i]->reg_complete_cb(err);
+ }
+}
+
+static inline int st_check_data_len(struct st_data_s *st_gdata,
+ int protoid, int len)
+{
+ register int room = skb_tailroom(st_gdata->rx_skb);
+
+ pr_info("len %d room %d", len, room);
+
+ if (!len) {
+ /* Received packet has only packet header and
+ * has zero length payload. So, ask ST CORE to
+ * forward the packet to protocol driver (BT/FM/GPS)
+ */
+ st_send_frame(protoid, st_gdata);
+
+ } else if (len > room) {
+ /* Received packet's payload length is larger.
+ * We can't accommodate it in created skb.
+ */
+ pr_err("Data length is too large len %d room %d", len,
+ room);
+ kfree_skb(st_gdata->rx_skb);
+ } else {
+ /* Packet header has non-zero payload length and
+ * we have enough space in created skb. Lets read
+ * payload data */
+ st_gdata->rx_state = ST_BT_W4_DATA;
+ st_gdata->rx_count = len;
+ return len;
+ }
+
+ /* Change ST state to continue to process next
+ * packet */
+ st_gdata->rx_state = ST_W4_PACKET_TYPE;
+ st_gdata->rx_skb = NULL;
+ st_gdata->rx_count = 0;
+
+ return 0;
+}
+
+/* internal function for action when wake-up ack
+ * received
+ */
+static inline void st_wakeup_ack(struct st_data_s *st_gdata,
+ unsigned char cmd)
+{
+ register struct sk_buff *waiting_skb;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&st_gdata->lock, flags);
+ /* de-Q from waitQ and Q in txQ now that the
+ * chip is awake
+ */
+ while ((waiting_skb = skb_dequeue(&st_gdata->tx_waitq)))
+ skb_queue_tail(&st_gdata->txq, waiting_skb);
+
+ /* state forwarded to ST LL */
+ st_ll_sleep_state(st_gdata, (unsigned long)cmd);
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
+
+ /* wake up to send the recently copied skbs from waitQ */
+ st_tx_wakeup(st_gdata);
+}
+
+/* Decodes received RAW data and forwards to corresponding
+ * client drivers (Bluetooth,FM,GPS..etc).
+ *
+ */
+void st_int_recv(void *disc_data,
+ const unsigned char *data, long count)
+{
+ register char *ptr;
+ struct hci_event_hdr *eh;
+ struct hci_acl_hdr *ah;
+ struct hci_sco_hdr *sh;
+ struct fm_event_hdr *fm;
+ struct gps_event_hdr *gps;
+ register int len = 0, type = 0, dlen = 0;
+ static enum proto_type protoid = ST_MAX;
+ struct st_data_s *st_gdata = (struct st_data_s *)disc_data;
+
+ ptr = (char *)data;
+ /* tty_receive sent null ? */
+ if (unlikely(ptr == NULL) || (st_gdata == NULL)) {
+ pr_err(" received null from TTY ");
+ return;
+ }
+
+ pr_info("count %ld rx_state %ld"
+ "rx_count %ld", count, st_gdata->rx_state,
+ st_gdata->rx_count);
+
+ /* Decode received bytes here */
+ while (count) {
+ if (st_gdata->rx_count) {
+ len = min_t(unsigned int, st_gdata->rx_count, count);
+ memcpy(skb_put(st_gdata->rx_skb, len), ptr, len);
+ st_gdata->rx_count -= len;
+ count -= len;
+ ptr += len;
+
+ if (st_gdata->rx_count)
+ continue;
+
+ /* Check ST RX state machine , where are we? */
+ switch (st_gdata->rx_state) {
+
+ /* Waiting for complete packet ? */
+ case ST_BT_W4_DATA:
+ pr_info("Complete pkt received");
+
+ /* Ask ST CORE to forward
+ * the packet to protocol driver */
+ st_send_frame(protoid, st_gdata);
+
+ st_gdata->rx_state = ST_W4_PACKET_TYPE;
+ st_gdata->rx_skb = NULL;
+ protoid = ST_MAX; /* is this required ? */
+ continue;
+
+ /* Waiting for Bluetooth event header ? */
+ case ST_BT_W4_EVENT_HDR:
+ eh = (struct hci_event_hdr *)st_gdata->rx_skb->
+ data;
+
+ pr_info("Event header: evt 0x%2.2x"
+ "plen %d", eh->evt, eh->plen);
+
+ st_check_data_len(st_gdata, protoid, eh->plen);
+ continue;
+
+ /* Waiting for Bluetooth acl header ? */
+ case ST_BT_W4_ACL_HDR:
+ ah = (struct hci_acl_hdr *)st_gdata->rx_skb->
+ data;
+ dlen = __le16_to_cpu(ah->dlen);
+
+ pr_info("ACL header: dlen %d", dlen);
+
+ st_check_data_len(st_gdata, protoid, dlen);
+ continue;
+
+ /* Waiting for Bluetooth sco header ? */
+ case ST_BT_W4_SCO_HDR:
+ sh = (struct hci_sco_hdr *)st_gdata->rx_skb->
+ data;
+
+ pr_info("SCO header: dlen %d", sh->dlen);
+
+ st_check_data_len(st_gdata, protoid, sh->dlen);
+ continue;
+ case ST_FM_W4_EVENT_HDR:
+ fm = (struct fm_event_hdr *)st_gdata->rx_skb->
+ data;
+ pr_info("FM Header: ");
+ st_check_data_len(st_gdata, ST_FM, fm->plen);
+ continue;
+ /* TODO : Add GPS packet machine logic here */
+ case ST_GPS_W4_EVENT_HDR:
+ /* [0x09 pkt hdr][R/W byte][2 byte len] */
+ gps = (struct gps_event_hdr *)st_gdata->rx_skb->
+ data;
+ pr_info("GPS Header: ");
+ st_check_data_len(st_gdata, ST_GPS, gps->plen);
+ continue;
+ } /* end of switch rx_state */
+ }
+
+ /* end of if rx_count */
+ /* Check first byte of packet and identify module
+ * owner (BT/FM/GPS) */
+ switch (*ptr) {
+
+ /* Bluetooth event packet? */
+ case HCI_EVENT_PKT:
+ pr_info("Event packet");
+ st_gdata->rx_state = ST_BT_W4_EVENT_HDR;
+ st_gdata->rx_count = HCI_EVENT_HDR_SIZE;
+ type = HCI_EVENT_PKT;
+ protoid = ST_BT;
+ break;
+
+ /* Bluetooth acl packet? */
+ case HCI_ACLDATA_PKT:
+ pr_info("ACL packet");
+ st_gdata->rx_state = ST_BT_W4_ACL_HDR;
+ st_gdata->rx_count = HCI_ACL_HDR_SIZE;
+ type = HCI_ACLDATA_PKT;
+ protoid = ST_BT;
+ break;
+
+ /* Bluetooth sco packet? */
+ case HCI_SCODATA_PKT:
+ pr_info("SCO packet");
+ st_gdata->rx_state = ST_BT_W4_SCO_HDR;
+ st_gdata->rx_count = HCI_SCO_HDR_SIZE;
+ type = HCI_SCODATA_PKT;
+ protoid = ST_BT;
+ break;
+
+ /* Channel 8(FM) packet? */
+ case ST_FM_CH8_PKT:
+ pr_info("FM CH8 packet");
+ type = ST_FM_CH8_PKT;
+ st_gdata->rx_state = ST_FM_W4_EVENT_HDR;
+ st_gdata->rx_count = FM_EVENT_HDR_SIZE;
+ protoid = ST_FM;
+ break;
+
+ /* Channel 9(GPS) packet? */
+ case 0x9: /*ST_LL_GPS_CH9_PKT */
+ pr_info("GPS CH9 packet");
+ type = 0x9; /* ST_LL_GPS_CH9_PKT; */
+ protoid = ST_GPS;
+ st_gdata->rx_state = ST_GPS_W4_EVENT_HDR;
+ st_gdata->rx_count = 3; /* GPS_EVENT_HDR_SIZE -1*/
+ break;
+ case LL_SLEEP_IND:
+ case LL_SLEEP_ACK:
+ case LL_WAKE_UP_IND:
+ pr_info("PM packet");
+ /* this takes appropriate action based on
+ * sleep state received --
+ */
+ st_ll_sleep_state(st_gdata, *ptr);
+ ptr++;
+ count--;
+ continue;
+ case LL_WAKE_UP_ACK:
+ pr_info("PM packet");
+ /* wake up ack received */
+ st_wakeup_ack(st_gdata, *ptr);
+ ptr++;
+ count--;
+ continue;
+ /* Unknow packet? */
+ default:
+ pr_err("Unknown packet type %2.2x", (__u8) *ptr);
+ ptr++;
+ count--;
+ continue;
+ };
+ ptr++;
+ count--;
+
+ switch (protoid) {
+ case ST_BT:
+ /* Allocate new packet to hold received data */
+ st_gdata->rx_skb =
+ bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
+ if (!st_gdata->rx_skb) {
+ pr_err("Can't allocate mem for new packet");
+ st_gdata->rx_state = ST_W4_PACKET_TYPE;
+ st_gdata->rx_count = 0;
+ return;
+ }
+ bt_cb(st_gdata->rx_skb)->pkt_type = type;
+ break;
+ case ST_FM: /* for FM */
+ st_gdata->rx_skb =
+ alloc_skb(FM_MAX_FRAME_SIZE, GFP_ATOMIC);
+ if (!st_gdata->rx_skb) {
+ pr_err("Can't allocate mem for new packet");
+ st_gdata->rx_state = ST_W4_PACKET_TYPE;
+ st_gdata->rx_count = 0;
+ return;
+ }
+ /* place holder 0x08 */
+ skb_reserve(st_gdata->rx_skb, 1);
+ st_gdata->rx_skb->cb[0] = ST_FM_CH8_PKT;
+ break;
+ case ST_GPS:
+ /* for GPS */
+ st_gdata->rx_skb =
+ alloc_skb(100 /*GPS_MAX_FRAME_SIZE */ , GFP_ATOMIC);
+ if (!st_gdata->rx_skb) {
+ pr_err("Can't allocate mem for new packet");
+ st_gdata->rx_state = ST_W4_PACKET_TYPE;
+ st_gdata->rx_count = 0;
+ return;
+ }
+ /* place holder 0x09 */
+ skb_reserve(st_gdata->rx_skb, 1);
+ st_gdata->rx_skb->cb[0] = 0x09; /*ST_GPS_CH9_PKT; */
+ break;
+ case ST_MAX:
+ break;
+ }
+ }
+ pr_info("done %s", __func__);
+ return;
+}
+
+/* internal de-Q function
+ * -- return previous in-completely written skb
+ * or return the skb in the txQ
+ */
+struct sk_buff *st_int_dequeue(struct st_data_s *st_gdata)
+{
+ struct sk_buff *returning_skb;
+
+ pr_info("%s", __func__);
+ /* if the previous skb wasn't written completely
+ */
+ if (st_gdata->tx_skb != NULL) {
+ returning_skb = st_gdata->tx_skb;
+ st_gdata->tx_skb = NULL;
+ return returning_skb;
+ }
+
+ /* de-Q from the txQ always if previous write is complete */
+ return skb_dequeue(&st_gdata->txq);
+}
+
+/* internal Q-ing function
+ * will either Q the skb to txq or the tx_waitq
+ * depending on the ST LL state
+ *
+ * lock the whole func - since ll_getstate and Q-ing should happen
+ * in one-shot
+ */
+void st_int_enqueue(struct st_data_s *st_gdata, struct sk_buff *skb)
+{
+ unsigned long flags = 0;
+
+ pr_info("%s", __func__);
+ /* this function can be invoked in more then one context.
+ * so have a lock */
+ spin_lock_irqsave(&st_gdata->lock, flags);
+
+ switch (st_ll_getstate(st_gdata)) {
+ case ST_LL_AWAKE:
+ pr_info("ST LL is AWAKE, sending normally");
+ skb_queue_tail(&st_gdata->txq, skb);
+ break;
+ case ST_LL_ASLEEP_TO_AWAKE:
+ skb_queue_tail(&st_gdata->tx_waitq, skb);
+ break;
+ case ST_LL_AWAKE_TO_ASLEEP: /* host cannot be in this state */
+ pr_err("ST LL is illegal state(%ld),"
+ "purging received skb.", st_ll_getstate(st_gdata));
+ kfree_skb(skb);
+ break;
+
+ case ST_LL_ASLEEP:
+ /* call a function of ST LL to put data
+ * in tx_waitQ and wake_ind in txQ
+ */
+ skb_queue_tail(&st_gdata->tx_waitq, skb);
+ st_ll_wakeup(st_gdata);
+ break;
+ default:
+ pr_err("ST LL is illegal state(%ld),"
+ "purging received skb.", st_ll_getstate(st_gdata));
+ kfree_skb(skb);
+ break;
+ }
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
+ pr_info("done %s", __func__);
+ return;
+}
+
+/*
+ * internal wakeup function
+ * called from either
+ * - TTY layer when write's finished
+ * - st_write (in context of the protocol stack)
+ */
+void st_tx_wakeup(struct st_data_s *st_data)
+{
+ struct sk_buff *skb;
+ unsigned long flags; /* for irq save flags */
+ pr_info("%s", __func__);
+ /* check for sending & set flag sending here */
+ if (test_and_set_bit(ST_TX_SENDING, &st_data->tx_state)) {
+ pr_info("ST already sending");
+ /* keep sending */
+ set_bit(ST_TX_WAKEUP, &st_data->tx_state);
+ return;
+ /* TX_WAKEUP will be checked in another
+ * context
+ */
+ }
+ do { /* come back if st_tx_wakeup is set */
+ /* woke-up to write */
+ clear_bit(ST_TX_WAKEUP, &st_data->tx_state);
+ while ((skb = st_int_dequeue(st_data))) {
+ int len;
+ spin_lock_irqsave(&st_data->lock, flags);
+ /* enable wake-up from TTY */
+ set_bit(TTY_DO_WRITE_WAKEUP, &st_data->tty->flags);
+ len = st_int_write(st_data, skb->data, skb->len);
+ skb_pull(skb, len);
+ /* if skb->len = len as expected, skb->len=0 */
+ if (skb->len) {
+ /* would be the next skb to be sent */
+ st_data->tx_skb = skb;
+ spin_unlock_irqrestore(&st_data->lock, flags);
+ break;
+ }
+ kfree_skb(skb);
+ spin_unlock_irqrestore(&st_data->lock, flags);
+ }
+ /* if wake-up is set in another context- restart sending */
+ } while (test_bit(ST_TX_WAKEUP, &st_data->tx_state));
+
+ /* clear flag sending */
+ clear_bit(ST_TX_SENDING, &st_data->tx_state);
+}
+
+/********************************************************************/
+/* functions called from ST KIM
+*/
+void kim_st_list_protocols(struct st_data_s *st_gdata, char *buf)
+{
+ unsigned long flags = 0;
+#ifdef DEBUG
+ unsigned char i = ST_MAX;
+#endif
+ spin_lock_irqsave(&st_gdata->lock, flags);
+#ifdef DEBUG /* more detailed log */
+ for (i = 0; i < ST_MAX; i++) {
+ if (i == 0) {
+ sprintf(buf, "%s is %s", protocol_strngs[i],
+ st_gdata->list[i] !=
+ NULL ? "Registered" : "Unregistered");
+ } else {
+ sprintf(buf, "%s\n%s is %s", buf, protocol_strngs[i],
+ st_gdata->list[i] !=
+ NULL ? "Registered" : "Unregistered");
+ }
+ }
+ sprintf(buf, "%s\n", buf);
+#else /* limited info */
+ sprintf(buf, "BT=%c\nFM=%c\nGPS=%c\n",
+ st_gdata->list[ST_BT] != NULL ? 'R' : 'U',
+ st_gdata->list[ST_FM] != NULL ? 'R' : 'U',
+ st_gdata->list[ST_GPS] != NULL ? 'R' : 'U');
+#endif
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
+}
+
+/********************************************************************/
+/*
+ * functions called from protocol stack drivers
+ * to be EXPORT-ed
+ */
+long st_register(struct st_proto_s *new_proto)
+{
+ struct st_data_s *st_gdata;
+ long err = ST_SUCCESS;
+ unsigned long flags = 0;
+
+ st_kim_ref(&st_gdata);
+ pr_info("%s(%d) ", __func__, new_proto->type);
+ if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL
+ || new_proto->reg_complete_cb == NULL) {
+ pr_err("gdata/new_proto/recv or reg_complete_cb not ready");
+ return ST_ERR_FAILURE;
+ }
+
+ if (new_proto->type < ST_BT || new_proto->type >= ST_MAX) {
+ pr_err("protocol %d not supported", new_proto->type);
+ return ST_ERR_NOPROTO;
+ }
+
+ if (st_gdata->list[new_proto->type] != NULL) {
+ pr_err("protocol %d already registered", new_proto->type);
+ return ST_ERR_ALREADY;
+ }
+
+ /* can be from process context only */
+ spin_lock_irqsave(&st_gdata->lock, flags);
+
+ if (test_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state)) {
+ pr_info(" ST_REG_IN_PROGRESS:%d ", new_proto->type);
+ /* fw download in progress */
+ st_kim_chip_toggle(new_proto->type, KIM_GPIO_ACTIVE);
+
+ st_gdata->list[new_proto->type] = new_proto;
+ new_proto->write = st_write;
+
+ set_bit(ST_REG_PENDING, &st_gdata->st_state);
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
+ return ST_ERR_PENDING;
+ } else if (st_gdata->protos_registered == ST_EMPTY) {
+ pr_info(" protocol list empty :%d ", new_proto->type);
+ set_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state);
+ st_recv = st_kim_recv;
+
+ /* release lock previously held - re-locked below */
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
+
+ /* enable the ST LL - to set default chip state */
+ st_ll_enable(st_gdata);
+ /* this may take a while to complete
+ * since it involves BT fw download
+ */
+ err = st_kim_start();
+ if (err != ST_SUCCESS) {
+ clear_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state);
+ if ((st_gdata->protos_registered != ST_EMPTY) &&
+ (test_bit(ST_REG_PENDING, &st_gdata->st_state))) {
+ pr_err(" KIM failure complete callback ");
+ st_reg_complete(st_gdata, ST_ERR_FAILURE);
+ }
+
+ return ST_ERR_FAILURE;
+ }
+
+ /* the protocol might require other gpios to be toggled
+ */
+ st_kim_chip_toggle(new_proto->type, KIM_GPIO_ACTIVE);
+
+ clear_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state);
+ st_recv = st_int_recv;
+
+ /* this is where all pending registration
+ * are signalled to be complete by calling callback functions
+ */
+ if ((st_gdata->protos_registered != ST_EMPTY) &&
+ (test_bit(ST_REG_PENDING, &st_gdata->st_state))) {
+ pr_info(" call reg complete callback ");
+ st_gdata->protos_registered++;
+ st_reg_complete(st_gdata, ST_SUCCESS);
+ }
+ clear_bit(ST_REG_PENDING, &st_gdata->st_state);
+
+ /* check for already registered once more,
+ * since the above check is old
+ */
+ if (st_gdata->list[new_proto->type] != NULL) {
+ pr_err(" proto %d already registered ",
+ new_proto->type);
+ return ST_ERR_ALREADY;
+ }
+
+ spin_lock_irqsave(&st_gdata->lock, flags);
+ st_gdata->list[new_proto->type] = new_proto;
+ new_proto->write = st_write;
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
+ return err;
+ }
+ /* if fw is already downloaded & new stack registers protocol */
+ else {
+ switch (new_proto->type) {
+ case ST_BT:
+ /* do nothing */
+ break;
+ case ST_FM:
+ case ST_GPS:
+ st_kim_chip_toggle(new_proto->type, KIM_GPIO_ACTIVE);
+ break;
+ case ST_MAX:
+ default:
+ pr_err("%d protocol not supported",
+ new_proto->type);
+ err = ST_ERR_NOPROTO;
+ /* something wrong */
+ break;
+ }
+ st_gdata->list[new_proto->type] = new_proto;
+ new_proto->write = st_write;
+
+ /* lock already held before entering else */
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
+ return err;
+ }
+ pr_info("done %s(%d) ", __func__, new_proto->type);
+}
+EXPORT_SYMBOL_GPL(st_register);
+
+/* to unregister a protocol -
+ * to be called from protocol stack driver
+ */
+long st_unregister(enum proto_type type)
+{
+ long err = ST_SUCCESS;
+ unsigned long flags = 0;
+ struct st_data_s *st_gdata;
+
+ pr_info("%s: %d ", __func__, type);
+
+ st_kim_ref(&st_gdata);
+ if (type < ST_BT || type >= ST_MAX) {
+ pr_err(" protocol %d not supported", type);
+ return ST_ERR_NOPROTO;
+ }
+
+ spin_lock_irqsave(&st_gdata->lock, flags);
+
+ if (st_gdata->list[type] == NULL) {
+ pr_err(" protocol %d not registered", type);
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
+ return ST_ERR_NOPROTO;
+ }
+
+ st_gdata->protos_registered--;
+ st_gdata->list[type] = NULL;
+
+ /* kim ignores BT in the below function
+ * and handles the rest, BT is toggled
+ * only in kim_start and kim_stop
+ */
+ st_kim_chip_toggle(type, KIM_GPIO_INACTIVE);
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
+
+ if ((st_gdata->protos_registered == ST_EMPTY) &&
+ (!test_bit(ST_REG_PENDING, &st_gdata->st_state))) {
+ pr_info(" all protocols unregistered ");
+
+ /* stop traffic on tty */
+ if (st_gdata->tty) {
+ tty_ldisc_flush(st_gdata->tty);
+ stop_tty(st_gdata->tty);
+ }
+
+ /* all protocols now unregistered */
+ st_kim_stop();
+ /* disable ST LL */
+ st_ll_disable(st_gdata);
+ }
+ return err;
+}
+
+/*
+ * called in protocol stack drivers
+ * via the write function pointer
+ */
+long st_write(struct sk_buff *skb)
+{
+ struct st_data_s *st_gdata;
+#ifdef DEBUG
+ enum proto_type protoid = ST_MAX;
+#endif
+ long len;
+
+ st_kim_ref(&st_gdata);
+ if (unlikely(skb == NULL || st_gdata == NULL
+ || st_gdata->tty == NULL)) {
+ pr_err("data/tty unavailable to perform write");
+ return ST_ERR_FAILURE;
+ }
+#ifdef DEBUG /* open-up skb to read the 1st byte */
+ switch (skb->data[0]) {
+ case HCI_COMMAND_PKT:
+ case HCI_ACLDATA_PKT:
+ case HCI_SCODATA_PKT:
+ protoid = ST_BT;
+ break;
+ case ST_FM_CH8_PKT:
+ protoid = ST_FM;
+ break;
+ case 0x09:
+ protoid = ST_GPS;
+ break;
+ }
+ if (unlikely(st_gdata->list[protoid] == NULL)) {
+ pr_err(" protocol %d not registered, and writing? ",
+ protoid);
+ return ST_ERR_FAILURE;
+ }
+#endif
+ pr_info("%d to be written", skb->len);
+ len = skb->len;
+
+ /* st_ll to decide where to enqueue the skb */
+ st_int_enqueue(st_gdata, skb);
+ /* wake up */
+ st_tx_wakeup(st_gdata);
+
+ /* return number of bytes written */
+ return len;
+}
+
+/* for protocols making use of shared transport */
+EXPORT_SYMBOL_GPL(st_unregister);
+
+/********************************************************************/
+/*
+ * functions called from TTY layer
+ */
+static int st_tty_open(struct tty_struct *tty)
+{
+ int err = ST_SUCCESS;
+ struct st_data_s *st_gdata;
+ pr_info("%s ", __func__);
+
+ st_kim_ref(&st_gdata);
+ st_gdata->tty = tty;
+ tty->disc_data = st_gdata;
+
+ /* don't do an wakeup for now */
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+
+ /* mem already allocated
+ */
+ tty->receive_room = 65536;
+ /* Flush any pending characters in the driver and discipline. */
+ tty_ldisc_flush(tty);
+ tty_driver_flush_buffer(tty);
+ /*
+ * signal to UIM via KIM that -
+ * installation of N_TI_WL ldisc is complete
+ */
+ st_kim_complete();
+ pr_info("done %s", __func__);
+ return err;
+}
+
+static void st_tty_close(struct tty_struct *tty)
+{
+ unsigned char i = ST_MAX;
+ unsigned long flags = 0;
+ struct st_data_s *st_gdata = tty->disc_data;
+
+ pr_info("%s ", __func__);
+
+ /* TODO:
+ * if a protocol has been registered & line discipline
+ * un-installed for some reason - what should be done ?
+ */
+ spin_lock_irqsave(&st_gdata->lock, flags);
+ for (i = ST_BT; i < ST_MAX; i++) {
+ if (st_gdata->list[i] != NULL)
+ pr_err("%d not un-registered", i);
+ st_gdata->list[i] = NULL;
+ }
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
+ /*
+ * signal to UIM via KIM that -
+ * N_TI_WL ldisc is un-installed
+ */
+ st_kim_complete();
+ st_gdata->tty = NULL;
+ /* Flush any pending characters in the driver and discipline. */
+ tty_ldisc_flush(tty);
+ tty_driver_flush_buffer(tty);
+
+ spin_lock_irqsave(&st_gdata->lock, flags);
+ /* empty out txq and tx_waitq */
+ skb_queue_purge(&st_gdata->txq);
+ skb_queue_purge(&st_gdata->tx_waitq);
+ /* reset the TTY Rx states of ST */
+ st_gdata->rx_count = 0;
+ st_gdata->rx_state = ST_W4_PACKET_TYPE;
+ kfree_skb(st_gdata->rx_skb);
+ st_gdata->rx_skb = NULL;
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
+
+ pr_info("%s: done ", __func__);
+}
+
+static void st_tty_receive(struct tty_struct *tty, const unsigned char *data,
+ char *tty_flags, int count)
+{
+
+#ifdef VERBOSE
+ long i;
+ printk(KERN_ERR "incoming data...\n");
+ for (i = 0; i < count; i++)
+ printk(" %x", data[i]);
+ printk(KERN_ERR "\n.. data end\n");
+#endif
+
+ /*
+ * if fw download is in progress then route incoming data
+ * to KIM for validation
+ */
+ st_recv(tty->disc_data, data, count);
+ pr_info("done %s", __func__);
+}
+
+/* wake-up function called in from the TTY layer
+ * inside the internal wakeup function will be called
+ */
+static void st_tty_wakeup(struct tty_struct *tty)
+{
+ struct st_data_s *st_gdata = tty->disc_data;
+ pr_info("%s ", __func__);
+ /* don't do an wakeup for now */
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+
+ /* call our internal wakeup */
+ st_tx_wakeup((void *)st_gdata);
+}
+
+static void st_tty_flush_buffer(struct tty_struct *tty)
+{
+ struct st_data_s *st_gdata = tty->disc_data;
+ pr_info("%s ", __func__);
+
+ kfree_skb(st_gdata->tx_skb);
+ st_gdata->tx_skb = NULL;
+
+ tty->ops->flush_buffer(tty);
+ return;
+}
+
+/********************************************************************/
+int st_core_init(struct st_data_s **core_data)
+{
+ struct st_data_s *st_gdata;
+ long err;
+ static struct tty_ldisc_ops *st_ldisc_ops;
+
+ /* populate and register to TTY line discipline */
+ st_ldisc_ops = kzalloc(sizeof(*st_ldisc_ops), GFP_KERNEL);
+ if (!st_ldisc_ops) {
+ pr_err("no mem to allocate");
+ return -ENOMEM;
+ }
+
+ st_ldisc_ops->magic = TTY_LDISC_MAGIC;
+ st_ldisc_ops->name = "n_st"; /*"n_hci"; */
+ st_ldisc_ops->open = st_tty_open;
+ st_ldisc_ops->close = st_tty_close;
+ st_ldisc_ops->receive_buf = st_tty_receive;
+ st_ldisc_ops->write_wakeup = st_tty_wakeup;
+ st_ldisc_ops->flush_buffer = st_tty_flush_buffer;
+ st_ldisc_ops->owner = THIS_MODULE;
+
+ err = tty_register_ldisc(N_TI_WL, st_ldisc_ops);
+ if (err) {
+ pr_err("error registering %d line discipline %ld",
+ N_TI_WL, err);
+ kfree(st_ldisc_ops);
+ return err;
+ }
+ pr_info("registered n_shared line discipline");
+
+ st_gdata = kzalloc(sizeof(struct st_data_s), GFP_KERNEL);
+ if (!st_gdata) {
+ pr_err("memory allocation failed");
+ err = tty_unregister_ldisc(N_TI_WL);
+ if (err)
+ pr_err("unable to un-register ldisc %ld", err);
+ kfree(st_ldisc_ops);
+ err = -ENOMEM;
+ return err;
+ }
+
+ /* Initialize ST TxQ and Tx waitQ queue head. All BT/FM/GPS module skb's
+ * will be pushed in this queue for actual transmission.
+ */
+ skb_queue_head_init(&st_gdata->txq);
+ skb_queue_head_init(&st_gdata->tx_waitq);
+
+ /* Locking used in st_int_enqueue() to avoid multiple execution */
+ spin_lock_init(&st_gdata->lock);
+
+ /* ldisc_ops ref to be only used in __exit of module */
+ st_gdata->ldisc_ops = st_ldisc_ops;
+
+#if 0
+ err = st_kim_init();
+ if (err) {
+ pr_err("error during kim initialization(%ld)", err);
+ kfree(st_gdata);
+ err = tty_unregister_ldisc(N_TI_WL);
+ if (err)
+ pr_err("unable to un-register ldisc");
+ kfree(st_ldisc_ops);
+ return -1;
+ }
+#endif
+
+ err = st_ll_init(st_gdata);
+ if (err) {
+ pr_err("error during st_ll initialization(%ld)", err);
+ kfree(st_gdata);
+ err = tty_unregister_ldisc(N_TI_WL);
+ if (err)
+ pr_err("unable to un-register ldisc");
+ kfree(st_ldisc_ops);
+ return -1;
+ }
+ *core_data = st_gdata;
+ return 0;
+}
+
+void st_core_exit(struct st_data_s *st_gdata)
+{
+ long err;
+ /* internal module cleanup */
+ err = st_ll_deinit(st_gdata);
+ if (err)
+ pr_err("error during deinit of ST LL %ld", err);
+#if 0
+ err = st_kim_deinit();
+ if (err)
+ pr_err("error during deinit of ST KIM %ld", err);
+#endif
+ if (st_gdata != NULL) {
+ /* Free ST Tx Qs and skbs */
+ skb_queue_purge(&st_gdata->txq);
+ skb_queue_purge(&st_gdata->tx_waitq);
+ kfree_skb(st_gdata->rx_skb);
+ kfree_skb(st_gdata->tx_skb);
+ /* TTY ldisc cleanup */
+ err = tty_unregister_ldisc(N_TI_WL);
+ if (err)
+ pr_err("unable to un-register ldisc %ld", err);
+ kfree(st_gdata->ldisc_ops);
+ /* free the global data pointer */
+ kfree(st_gdata);
+ }
+}
+
+
diff --git a/drivers/staging/ti-st/st_core.h b/drivers/staging/ti-st/st_core.h
new file mode 100644
index 00000000000..f271c88a808
--- /dev/null
+++ b/drivers/staging/ti-st/st_core.h
@@ -0,0 +1,98 @@
+/*
+ * Shared Transport Core header file
+ *
+ * Copyright (C) 2009 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef ST_CORE_H
+#define ST_CORE_H
+
+#include <linux/skbuff.h>
+#include "st.h"
+
+/* states of protocol list */
+#define ST_NOTEMPTY 1
+#define ST_EMPTY 0
+
+/*
+ * possible st_states
+ */
+#define ST_INITIALIZING 1
+#define ST_REG_IN_PROGRESS 2
+#define ST_REG_PENDING 3
+#define ST_WAITING_FOR_RESP 4
+
+/*
+ * local data required for ST/KIM/ST-HCI-LL
+ */
+struct st_data_s {
+ unsigned long st_state;
+/*
+ * an instance of tty_struct & ldisc ops to move around
+ */
+ struct tty_struct *tty;
+ struct tty_ldisc_ops *ldisc_ops;
+/*
+ * the tx skb -
+ * if the skb is already dequeued and the tty failed to write the same
+ * maintain the skb to write in the next transaction
+ */
+ struct sk_buff *tx_skb;
+#define ST_TX_SENDING 1
+#define ST_TX_WAKEUP 2
+ unsigned long tx_state;
+/*
+ * list of protocol registered
+ */
+ struct st_proto_s *list[ST_MAX];
+/*
+ * lock
+ */
+ unsigned long rx_state;
+ unsigned long rx_count;
+ struct sk_buff *rx_skb;
+ struct sk_buff_head txq, tx_waitq;
+ spinlock_t lock; /* ST LL state lock */
+ unsigned char protos_registered;
+ unsigned long ll_state; /* ST LL power state */
+};
+
+/* point this to tty->driver->write or tty->ops->write
+ * depending upon the kernel version
+ */
+int st_int_write(struct st_data_s*, const unsigned char*, int);
+/* internal write function, passed onto protocol drivers
+ * via the write function ptr of protocol struct
+ */
+long st_write(struct sk_buff *);
+/* function to be called from ST-LL
+ */
+void st_ll_send_frame(enum proto_type, struct sk_buff *);
+/* internal wake up function */
+void st_tx_wakeup(struct st_data_s *st_data);
+
+int st_core_init(struct st_data_s **);
+void st_core_exit(struct st_data_s *);
+void st_kim_ref(struct st_data_s **);
+
+#define GPS_STUB_TEST
+#ifdef GPS_STUB_TEST
+int gps_chrdrv_stub_write(const unsigned char*, int);
+void gps_chrdrv_stub_init(void);
+#endif
+
+#endif /*ST_CORE_H */
diff --git a/drivers/staging/ti-st/st_kim.c b/drivers/staging/ti-st/st_kim.c
new file mode 100644
index 00000000000..98cbabba384
--- /dev/null
+++ b/drivers/staging/ti-st/st_kim.c
@@ -0,0 +1,754 @@
+/*
+ * Shared Transport Line discipline driver Core
+ * Init Manager module responsible for GPIO control
+ * and firmware download
+ * Copyright (C) 2009 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#define pr_fmt(fmt) "(stk) :" fmt
+#include <linux/platform_device.h>
+#include <linux/jiffies.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/gpio.h>
+
+#include <linux/sched.h>
+
+#include "st_kim.h"
+/* understand BT events for fw response */
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/hci.h>
+
+
+static int kim_probe(struct platform_device *pdev);
+static int kim_remove(struct platform_device *pdev);
+
+/* KIM platform device driver structure */
+static struct platform_driver kim_platform_driver = {
+ .probe = kim_probe,
+ .remove = kim_remove,
+ /* TODO: ST driver power management during suspend/resume ?
+ */
+#if 0
+ .suspend = kim_suspend,
+ .resume = kim_resume,
+#endif
+ .driver = {
+ .name = "kim",
+ .owner = THIS_MODULE,
+ },
+};
+
+#ifndef LEGACY_RFKILL_SUPPORT
+static ssize_t show_pid(struct device *dev, struct device_attribute
+ *attr, char *buf);
+static ssize_t store_pid(struct device *dev, struct device_attribute
+ *devattr, char *buf, size_t count);
+static ssize_t show_list(struct device *dev, struct device_attribute
+ *attr, char *buf);
+
+/* structures specific for sysfs entries */
+static struct kobj_attribute pid_attr =
+__ATTR(pid, 0644, (void *)show_pid, (void *)store_pid);
+
+static struct kobj_attribute list_protocols =
+__ATTR(protocols, 0444, (void *)show_list, NULL);
+
+static struct attribute *uim_attrs[] = {
+ &pid_attr.attr,
+ /* add more debug sysfs entries */
+ &list_protocols.attr,
+ NULL,
+};
+
+static struct attribute_group uim_attr_grp = {
+ .attrs = uim_attrs,
+};
+#else
+static int kim_toggle_radio(void*, bool);
+static const struct rfkill_ops kim_rfkill_ops = {
+ .set_block = kim_toggle_radio,
+};
+#endif /* LEGACY_RFKILL_SUPPORT */
+
+/* strings to be used for rfkill entries and by
+ * ST Core to be used for sysfs debug entry
+ */
+#define PROTO_ENTRY(type, name) name
+const unsigned char *protocol_names[] = {
+ PROTO_ENTRY(ST_BT, "Bluetooth"),
+ PROTO_ENTRY(ST_FM, "FM"),
+ PROTO_ENTRY(ST_GPS, "GPS"),
+};
+
+struct kim_data_s *kim_gdata;
+
+/**********************************************************************/
+/* internal functions */
+
+/*
+ * function to return whether the firmware response was proper
+ * in case of error don't complete so that waiting for proper
+ * response times out
+ */
+void validate_firmware_response(struct sk_buff *skb)
+{
+ if (unlikely(skb->data[5] != 0)) {
+ pr_err("no proper response during fw download");
+ pr_err("data6 %x", skb->data[5]);
+ return; /* keep waiting for the proper response */
+ }
+ /* becos of all the script being downloaded */
+ complete_all(&kim_gdata->kim_rcvd);
+ kfree_skb(skb);
+}
+
+/* check for data len received inside kim_int_recv
+ * most often hit the last case to update state to waiting for data
+ */
+static inline int kim_check_data_len(int len)
+{
+ register int room = skb_tailroom(kim_gdata->rx_skb);
+
+ pr_info("len %d room %d", len, room);
+
+ if (!len) {
+ validate_firmware_response(kim_gdata->rx_skb);
+ } else if (len > room) {
+ /* Received packet's payload length is larger.
+ * We can't accommodate it in created skb.
+ */
+ pr_err("Data length is too large len %d room %d", len,
+ room);
+ kfree_skb(kim_gdata->rx_skb);
+ } else {
+ /* Packet header has non-zero payload length and
+ * we have enough space in created skb. Lets read
+ * payload data */
+ kim_gdata->rx_state = ST_BT_W4_DATA;
+ kim_gdata->rx_count = len;
+ return len;
+ }
+
+ /* Change ST LL state to continue to process next
+ * packet */
+ kim_gdata->rx_state = ST_W4_PACKET_TYPE;
+ kim_gdata->rx_skb = NULL;
+ kim_gdata->rx_count = 0;
+
+ return 0;
+}
+
+/* receive function called during firmware download
+ * - firmware download responses on different UART drivers
+ * have been observed to come in bursts of different
+ * tty_receive and hence the logic
+ */
+void kim_int_recv(const unsigned char *data, long count)
+{
+ register char *ptr;
+ struct hci_event_hdr *eh;
+ register int len = 0, type = 0;
+
+ pr_info("%s", __func__);
+ /* Decode received bytes here */
+ ptr = (char *)data;
+ if (unlikely(ptr == NULL)) {
+ pr_err(" received null from TTY ");
+ return;
+ }
+ while (count) {
+ if (kim_gdata->rx_count) {
+ len = min_t(unsigned int, kim_gdata->rx_count, count);
+ memcpy(skb_put(kim_gdata->rx_skb, len), ptr, len);
+ kim_gdata->rx_count -= len;
+ count -= len;
+ ptr += len;
+
+ if (kim_gdata->rx_count)
+ continue;
+
+ /* Check ST RX state machine , where are we? */
+ switch (kim_gdata->rx_state) {
+ /* Waiting for complete packet ? */
+ case ST_BT_W4_DATA:
+ pr_info("Complete pkt received");
+ validate_firmware_response(kim_gdata->rx_skb);
+ kim_gdata->rx_state = ST_W4_PACKET_TYPE;
+ kim_gdata->rx_skb = NULL;
+ continue;
+ /* Waiting for Bluetooth event header ? */
+ case ST_BT_W4_EVENT_HDR:
+ eh = (struct hci_event_hdr *)kim_gdata->
+ rx_skb->data;
+ pr_info("Event header: evt 0x%2.2x"
+ "plen %d", eh->evt, eh->plen);
+ kim_check_data_len(eh->plen);
+ continue;
+ } /* end of switch */
+ } /* end of if rx_state */
+ switch (*ptr) {
+ /* Bluetooth event packet? */
+ case HCI_EVENT_PKT:
+ pr_info("Event packet");
+ kim_gdata->rx_state = ST_BT_W4_EVENT_HDR;
+ kim_gdata->rx_count = HCI_EVENT_HDR_SIZE;
+ type = HCI_EVENT_PKT;
+ break;
+ default:
+ pr_info("unknown packet");
+ ptr++;
+ count--;
+ continue;
+ } /* end of switch *ptr */
+ ptr++;
+ count--;
+ kim_gdata->rx_skb =
+ bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
+ if (!kim_gdata->rx_skb) {
+ pr_err("can't allocate mem for new packet");
+ kim_gdata->rx_state = ST_W4_PACKET_TYPE;
+ kim_gdata->rx_count = 0;
+ return;
+ } /* not necessary in this case */
+ bt_cb(kim_gdata->rx_skb)->pkt_type = type;
+ } /* end of while count */
+ pr_info("done %s", __func__);
+ return;
+}
+
+static long read_local_version(char *bts_scr_name)
+{
+ unsigned short version = 0, chip = 0, min_ver = 0, maj_ver = 0;
+ char read_ver_cmd[] = { 0x01, 0x01, 0x10, 0x00 };
+
+ pr_info("%s", __func__);
+
+ INIT_COMPLETION(kim_gdata->kim_rcvd);
+ if (4 != st_int_write(kim_gdata->core_data, read_ver_cmd, 4)) {
+ pr_err("kim: couldn't write 4 bytes");
+ return ST_ERR_FAILURE;
+ }
+
+ if (!wait_for_completion_timeout
+ (&kim_gdata->kim_rcvd, msecs_to_jiffies(CMD_RESP_TIME))) {
+ pr_err(" waiting for ver info- timed out ");
+ return ST_ERR_FAILURE;
+ }
+
+ version =
+ MAKEWORD(kim_gdata->resp_buffer[13], kim_gdata->resp_buffer[14]);
+ chip = (version & 0x7C00) >> 10;
+ min_ver = (version & 0x007F);
+ maj_ver = (version & 0x0380) >> 7;
+
+ if (version & 0x8000)
+ maj_ver |= 0x0008;
+
+ sprintf(bts_scr_name, "TIInit_%d.%d.%d.bts", chip, maj_ver, min_ver);
+ pr_info("%s", bts_scr_name);
+ return ST_SUCCESS;
+}
+
+/* internal function which parses through the .bts firmware script file
+ * intreprets SEND, DELAY actions only as of now
+ */
+static long download_firmware(void)
+{
+ long err = ST_SUCCESS;
+ long len = 0;
+ register unsigned char *ptr = NULL;
+ register unsigned char *action_ptr = NULL;
+ unsigned char bts_scr_name[30] = { 0 }; /* 30 char long bts scr name? */
+
+ pr_info("%s", __func__);
+
+ err = read_local_version(bts_scr_name);
+ if (err != ST_SUCCESS) {
+ pr_err("kim: failed to read local ver");
+ return err;
+ }
+ err =
+ request_firmware(&kim_gdata->fw_entry, bts_scr_name,
+ &kim_gdata->kim_pdev->dev);
+ if (unlikely((err != 0) || (kim_gdata->fw_entry->data == NULL) ||
+ (kim_gdata->fw_entry->size == 0))) {
+ pr_err(" request_firmware failed(errno %ld) for %s", err,
+ bts_scr_name);
+ return ST_ERR_FAILURE;
+ }
+ ptr = (void *)kim_gdata->fw_entry->data;
+ len = kim_gdata->fw_entry->size;
+ /* bts_header to remove out magic number and
+ * version
+ */
+ ptr += sizeof(struct bts_header);
+ len -= sizeof(struct bts_header);
+
+ while (len > 0 && ptr) {
+ pr_info(" action size %d, type %d ",
+ ((struct bts_action *)ptr)->size,
+ ((struct bts_action *)ptr)->type);
+
+ switch (((struct bts_action *)ptr)->type) {
+ case ACTION_SEND_COMMAND: /* action send */
+ action_ptr = &(((struct bts_action *)ptr)->data[0]);
+ if (unlikely
+ (((struct hci_command *)action_ptr)->opcode ==
+ 0xFF36)) {
+ /* ignore remote change
+ * baud rate HCI VS command */
+ pr_err
+ (" change remote baud\
+ rate command in firmware");
+ break;
+ }
+
+ INIT_COMPLETION(kim_gdata->kim_rcvd);
+ err = st_int_write(kim_gdata->core_data,
+ ((struct bts_action_send *)action_ptr)->data,
+ ((struct bts_action *)ptr)->size);
+ if (unlikely(err < 0)) {
+ release_firmware(kim_gdata->fw_entry);
+ return ST_ERR_FAILURE;
+ }
+ if (!wait_for_completion_timeout
+ (&kim_gdata->kim_rcvd,
+ msecs_to_jiffies(CMD_RESP_TIME))) {
+ pr_err
+ (" response timeout during fw download ");
+ /* timed out */
+ release_firmware(kim_gdata->fw_entry);
+ return ST_ERR_FAILURE;
+ }
+ break;
+ case ACTION_DELAY: /* sleep */
+ pr_info("sleep command in scr");
+ action_ptr = &(((struct bts_action *)ptr)->data[0]);
+ mdelay(((struct bts_action_delay *)action_ptr)->msec);
+ break;
+ }
+ len =
+ len - (sizeof(struct bts_action) +
+ ((struct bts_action *)ptr)->size);
+ ptr =
+ ptr + sizeof(struct bts_action) +
+ ((struct bts_action *)ptr)->size;
+ }
+ /* fw download complete */
+ release_firmware(kim_gdata->fw_entry);
+ return ST_SUCCESS;
+}
+
+/**********************************************************************/
+/* functions called from ST core */
+
+/* function to toggle the GPIO
+ * needs to know whether the GPIO is active high or active low
+ */
+void st_kim_chip_toggle(enum proto_type type, enum kim_gpio_state state)
+{
+ pr_info(" %s ", __func__);
+
+ if (kim_gdata->gpios[type] == -1) {
+ pr_info(" gpio not requested for protocol %s",
+ protocol_names[type]);
+ return;
+ }
+ switch (type) {
+ case ST_BT:
+ /*Do Nothing */
+ break;
+
+ case ST_FM:
+ if (state == KIM_GPIO_ACTIVE)
+ gpio_set_value(kim_gdata->gpios[ST_FM], GPIO_LOW);
+ else
+ gpio_set_value(kim_gdata->gpios[ST_FM], GPIO_HIGH);
+ break;
+
+ case ST_GPS:
+ if (state == KIM_GPIO_ACTIVE)
+ gpio_set_value(kim_gdata->gpios[ST_GPS], GPIO_HIGH);
+ else
+ gpio_set_value(kim_gdata->gpios[ST_GPS], GPIO_LOW);
+ break;
+
+ case ST_MAX:
+ default:
+ break;
+ }
+
+ return;
+}
+
+/* called from ST Core, when REG_IN_PROGRESS (registration in progress)
+ * can be because of
+ * 1. response to read local version
+ * 2. during send/recv's of firmware download
+ */
+void st_kim_recv(void *disc_data, const unsigned char *data, long count)
+{
+ pr_info(" %s ", __func__);
+ /* copy to local buffer */
+ if (unlikely(data[4] == 0x01 && data[5] == 0x10 && data[0] == 0x04)) {
+ /* must be the read_ver_cmd */
+ memcpy(kim_gdata->resp_buffer, data, count);
+ complete_all(&kim_gdata->kim_rcvd);
+ return;
+ } else {
+ kim_int_recv(data, count);
+ /* either completes or times out */
+ }
+ return;
+}
+
+/* to signal completion of line discipline installation
+ * called from ST Core, upon tty_open
+ */
+void st_kim_complete(void)
+{
+ complete(&kim_gdata->ldisc_installed);
+}
+
+/* called from ST Core upon 1st registration
+*/
+long st_kim_start(void)
+{
+ long err = ST_SUCCESS;
+ long retry = POR_RETRY_COUNT;
+ pr_info(" %s", __func__);
+
+ do {
+#ifdef LEGACY_RFKILL_SUPPORT
+ /* TODO: this is only because rfkill sub-system
+ * doesn't send events to user-space if the state
+ * isn't changed
+ */
+ rfkill_set_hw_state(kim_gdata->rfkill[ST_BT], 1);
+#endif
+ /* Configure BT nShutdown to HIGH state */
+ gpio_set_value(kim_gdata->gpios[ST_BT], GPIO_LOW);
+ mdelay(5); /* FIXME: a proper toggle */
+ gpio_set_value(kim_gdata->gpios[ST_BT], GPIO_HIGH);
+ mdelay(100);
+ /* re-initialize the completion */
+ INIT_COMPLETION(kim_gdata->ldisc_installed);
+#ifndef LEGACY_RFKILL_SUPPORT
+ /* send signal to UIM */
+ err = kill_pid(find_get_pid(kim_gdata->uim_pid), SIGUSR2, 0);
+ if (err != 0) {
+ pr_info(" sending SIGUSR2 to uim failed %ld", err);
+ err = ST_ERR_FAILURE;
+ continue;
+ }
+#else
+ /* unblock and send event to UIM via /dev/rfkill */
+ rfkill_set_hw_state(kim_gdata->rfkill[ST_BT], 0);
+#endif
+ /* wait for ldisc to be installed */
+ err = wait_for_completion_timeout(&kim_gdata->ldisc_installed,
+ msecs_to_jiffies(LDISC_TIME));
+ if (!err) { /* timeout */
+ pr_err("line disc installation timed out ");
+ err = ST_ERR_FAILURE;
+ continue;
+ } else {
+ /* ldisc installed now */
+ pr_info(" line discipline installed ");
+ err = download_firmware();
+ if (err != ST_SUCCESS) {
+ pr_err("download firmware failed");
+ continue;
+ } else { /* on success don't retry */
+ break;
+ }
+ }
+ } while (retry--);
+ return err;
+}
+
+/* called from ST Core, on the last un-registration
+*/
+long st_kim_stop(void)
+{
+ long err = ST_SUCCESS;
+
+ INIT_COMPLETION(kim_gdata->ldisc_installed);
+#ifndef LEGACY_RFKILL_SUPPORT
+ /* send signal to UIM */
+ err = kill_pid(find_get_pid(kim_gdata->uim_pid), SIGUSR2, 1);
+ if (err != 0) {
+ pr_err("sending SIGUSR2 to uim failed %ld", err);
+ return ST_ERR_FAILURE;
+ }
+#else
+ /* set BT rfkill to be blocked */
+ err = rfkill_set_hw_state(kim_gdata->rfkill[ST_BT], 1);
+#endif
+
+ /* wait for ldisc to be un-installed */
+ err = wait_for_completion_timeout(&kim_gdata->ldisc_installed,
+ msecs_to_jiffies(LDISC_TIME));
+ if (!err) { /* timeout */
+ pr_err(" timed out waiting for ldisc to be un-installed");
+ return ST_ERR_FAILURE;
+ }
+
+ /* By default configure BT nShutdown to LOW state */
+ gpio_set_value(kim_gdata->gpios[ST_BT], GPIO_LOW);
+ mdelay(1);
+ gpio_set_value(kim_gdata->gpios[ST_BT], GPIO_HIGH);
+ mdelay(1);
+ gpio_set_value(kim_gdata->gpios[ST_BT], GPIO_LOW);
+ return err;
+}
+
+/**********************************************************************/
+/* functions called from subsystems */
+
+#ifndef LEGACY_RFKILL_SUPPORT
+/* called when sysfs entry is written to */
+static ssize_t store_pid(struct device *dev, struct device_attribute
+ *devattr, char *buf, size_t count)
+{
+ pr_info("%s: pid %s ", __func__, buf);
+ sscanf(buf, "%ld", &kim_gdata->uim_pid);
+ /* to be made use by kim_start to signal SIGUSR2
+ */
+ return strlen(buf);
+}
+
+/* called when sysfs entry is read from */
+static ssize_t show_pid(struct device *dev, struct device_attribute
+ *attr, char *buf)
+{
+ sprintf(buf, "%ld", kim_gdata->uim_pid);
+ return strlen(buf);
+}
+
+/* called when sysfs entry is read from */
+static ssize_t show_list(struct device *dev, struct device_attribute
+ *attr, char *buf)
+{
+ kim_st_list_protocols(kim_gdata->core_data, buf);
+ return strlen(buf);
+}
+
+#else /* LEGACY_RFKILL_SUPPORT */
+
+/* function called from rfkill subsystem, when someone from
+ * user space would write 0/1 on the sysfs entry
+ * /sys/class/rfkill/rfkill0,1,3/state
+ */
+static int kim_toggle_radio(void *data, bool blocked)
+{
+ enum proto_type type = *((enum proto_type *)data);
+ pr_info(" %s: %d ", __func__, type);
+
+ switch (type) {
+ case ST_BT:
+ /* do nothing */
+ break;
+ case ST_FM:
+ case ST_GPS:
+ if (blocked)
+ st_kim_chip_toggle(type, KIM_GPIO_INACTIVE);
+ else
+ st_kim_chip_toggle(type, KIM_GPIO_ACTIVE);
+ break;
+ case ST_MAX:
+ pr_err(" wrong proto type ");
+ break;
+ }
+ return ST_SUCCESS;
+}
+
+#endif /* LEGACY_RFKILL_SUPPORT */
+
+void st_kim_ref(struct st_data_s **core_data)
+{
+ *core_data = kim_gdata->core_data;
+}
+
+/**********************************************************************/
+/* functions called from platform device driver subsystem
+ * need to have a relevant platform device entry in the platform's
+ * board-*.c file
+ */
+
+static int kim_probe(struct platform_device *pdev)
+{
+ long status;
+ long proto;
+ long *gpios = pdev->dev.platform_data;
+
+ status = st_core_init(&kim_gdata->core_data);
+ if (status != 0) {
+ pr_err(" ST core init failed");
+ return ST_ERR_FAILURE;
+ }
+
+ for (proto = 0; proto < ST_MAX; proto++) {
+ kim_gdata->gpios[proto] = gpios[proto];
+ pr_info(" %ld gpio to be requested", gpios[proto]);
+ }
+
+ for (proto = 0; (proto < ST_MAX) && (gpios[proto] != -1); proto++) {
+ /* Claim the Bluetooth/FM/GPIO
+ * nShutdown gpio from the system
+ */
+ status = gpio_request(gpios[proto], "kim");
+ if (unlikely(status)) {
+ pr_err(" gpio %ld request failed ", gpios[proto]);
+ proto -= 1;
+ while (proto >= 0) {
+ if (gpios[proto] != -1)
+ gpio_free(gpios[proto]);
+ }
+ return status;
+ }
+
+ /* Configure nShutdown GPIO as output=0 */
+ status =
+ gpio_direction_output(gpios[proto], 0);
+ if (unlikely(status)) {
+ pr_err(" unable to configure gpio %ld",
+ gpios[proto]);
+ proto -= 1;
+ while (proto >= 0) {
+ if (gpios[proto] != -1)
+ gpio_free(gpios[proto]);
+ }
+ return status;
+ }
+ }
+#ifndef LEGACY_RFKILL_SUPPORT
+ /* pdev to contain BT, FM and GPS enable/N-Shutdown GPIOs
+ * execute request_gpio, set output direction
+ */
+ kim_gdata->kim_kobj = kobject_create_and_add("uim", NULL);
+ /* create the sysfs entry for UIM to put in pid */
+ if (sysfs_create_group(kim_gdata->kim_kobj, &uim_attr_grp)) {
+ pr_err(" sysfs entry creation failed");
+ kobject_put(kim_gdata->kim_kobj);
+ /* free requested GPIOs and fail probe */
+ for (proto = ST_BT; proto < ST_MAX; proto++) {
+ if (gpios[proto] != -1)
+ gpio_free(gpios[proto]);
+ }
+ return -1; /* fail insmod */
+ }
+ pr_info(" sysfs entry created ");
+#endif
+ /* get reference of pdev for request_firmware
+ */
+ kim_gdata->kim_pdev = pdev;
+ init_completion(&kim_gdata->kim_rcvd);
+ init_completion(&kim_gdata->ldisc_installed);
+#ifdef LEGACY_RFKILL_SUPPORT
+ for (proto = 0; (proto < ST_MAX) && (gpios[proto] != -1); proto++) {
+ /* TODO: should all types be rfkill_type_bt ? */
+ kim_gdata->rf_protos[proto] = proto;
+ kim_gdata->rfkill[proto] = rfkill_alloc(protocol_names[proto],
+ &pdev->dev, RFKILL_TYPE_BLUETOOTH,
+ &kim_rfkill_ops, &kim_gdata->rf_protos[proto]);
+ if (kim_gdata->rfkill[proto] == NULL) {
+ pr_err("cannot create rfkill entry for gpio %ld",
+ gpios[proto]);
+ continue;
+ }
+ /* block upon creation */
+ rfkill_init_sw_state(kim_gdata->rfkill[proto], 1);
+ status = rfkill_register(kim_gdata->rfkill[proto]);
+ if (unlikely(status)) {
+ pr_err("rfkill registration failed for gpio %ld",
+ gpios[proto]);
+ rfkill_unregister(kim_gdata->rfkill[proto]);
+ continue;
+ }
+ pr_info("rfkill entry created for %ld", gpios[proto]);
+ }
+#endif
+ return ST_SUCCESS;
+}
+
+static int kim_remove(struct platform_device *pdev)
+{
+ /* free the GPIOs requested
+ */
+ long *gpios = pdev->dev.platform_data;
+ long proto;
+
+ for (proto = 0; (proto < ST_MAX) && (gpios[proto] != -1); proto++) {
+ /* Claim the Bluetooth/FM/GPIO
+ * nShutdown gpio from the system
+ */
+ gpio_free(gpios[proto]);
+#ifdef LEGACY_RFKILL_SUPPORT
+ rfkill_unregister(kim_gdata->rfkill[proto]);
+ rfkill_destroy(kim_gdata->rfkill[proto]);
+ kim_gdata->rfkill[proto] = NULL;
+#endif
+ }
+ pr_info("kim: GPIO Freed");
+#ifndef LEGACY_RFKILL_SUPPORT
+ /* delete the sysfs entries */
+ sysfs_remove_group(kim_gdata->kim_kobj, &uim_attr_grp);
+ kobject_put(kim_gdata->kim_kobj);
+#endif
+ kim_gdata->kim_pdev = NULL;
+ st_core_exit(kim_gdata->core_data);
+ return ST_SUCCESS;
+}
+
+/**********************************************************************/
+/* entry point for ST KIM module, called in from ST Core */
+
+static int __init st_kim_init(void)
+{
+ long ret = ST_SUCCESS;
+ kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC);
+ if (!kim_gdata) {
+ pr_err("no mem to allocate");
+ return -ENOMEM;
+ }
+
+ ret = platform_driver_register(&kim_platform_driver);
+ if (ret != 0) {
+ pr_err("platform drv registration failed");
+ return ST_ERR_FAILURE;
+ }
+ return ST_SUCCESS;
+}
+
+static void __exit st_kim_deinit(void)
+{
+ /* the following returns void */
+ platform_driver_unregister(&kim_platform_driver);
+ kfree(kim_gdata);
+ kim_gdata = NULL;
+}
+
+
+module_init(st_kim_init);
+module_exit(st_kim_deinit);
+MODULE_AUTHOR("Pavan Savoy <pavan_savoy@ti.com>");
+MODULE_DESCRIPTION("Shared Transport Driver for TI BT/FM/GPS combo chips ");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/ti-st/st_kim.h b/drivers/staging/ti-st/st_kim.h
new file mode 100644
index 00000000000..ff3270ec784
--- /dev/null
+++ b/drivers/staging/ti-st/st_kim.h
@@ -0,0 +1,150 @@
+/*
+ * Shared Transport Line discipline driver Core
+ * Init Manager Module header file
+ * Copyright (C) 2009 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef ST_KIM_H
+#define ST_KIM_H
+
+#include <linux/types.h>
+#include "st.h"
+#include "st_core.h"
+#include "st_ll.h"
+#include <linux/rfkill.h>
+
+/* time in msec to wait for
+ * line discipline to be installed
+ */
+#define LDISC_TIME 500
+#define CMD_RESP_TIME 500
+#define MAKEWORD(a, b) ((unsigned short)(((unsigned char)(a)) \
+ | ((unsigned short)((unsigned char)(b))) << 8))
+
+#define GPIO_HIGH 1
+#define GPIO_LOW 0
+
+/* the Power-On-Reset logic, requires to attempt
+ * to download firmware onto chip more than once
+ * since the self-test for chip takes a while
+ */
+#define POR_RETRY_COUNT 5
+/*
+ * legacy rfkill support where-in 3 rfkill
+ * devices are created for the 3 gpios
+ * that ST has requested
+ */
+#define LEGACY_RFKILL_SUPPORT
+/*
+ * header file for ST provided by KIM
+ */
+struct kim_data_s {
+ long uim_pid;
+ struct platform_device *kim_pdev;
+ struct completion kim_rcvd, ldisc_installed;
+ /* MAX len of the .bts firmware script name */
+ char resp_buffer[30];
+ const struct firmware *fw_entry;
+ long gpios[ST_MAX];
+ struct kobject *kim_kobj;
+/* used by kim_int_recv to validate fw response */
+ unsigned long rx_state;
+ unsigned long rx_count;
+ struct sk_buff *rx_skb;
+#ifdef LEGACY_RFKILL_SUPPORT
+ struct rfkill *rfkill[ST_MAX];
+ enum proto_type rf_protos[ST_MAX];
+#endif
+ struct st_data_s *core_data;
+};
+
+long st_kim_start(void);
+long st_kim_stop(void);
+/*
+ * called from st_tty_receive to authenticate fw_download
+ */
+void st_kim_recv(void *, const unsigned char *, long count);
+
+void st_kim_chip_toggle(enum proto_type, enum kim_gpio_state);
+
+void st_kim_complete(void);
+
+/* function called from ST KIM to ST Core, to
+ * list out the protocols registered
+ */
+void kim_st_list_protocols(struct st_data_s *, char *);
+
+/*
+ * BTS headers
+ */
+#define ACTION_SEND_COMMAND 1
+#define ACTION_WAIT_EVENT 2
+#define ACTION_SERIAL 3
+#define ACTION_DELAY 4
+#define ACTION_RUN_SCRIPT 5
+#define ACTION_REMARKS 6
+
+/*
+ * * BRF Firmware header
+ * */
+struct bts_header {
+ uint32_t magic;
+ uint32_t version;
+ uint8_t future[24];
+ uint8_t actions[0];
+} __attribute__ ((packed));
+
+/*
+ * * BRF Actions structure
+ * */
+struct bts_action {
+ uint16_t type;
+ uint16_t size;
+ uint8_t data[0];
+} __attribute__ ((packed));
+
+struct bts_action_send {
+ uint8_t data[0];
+} __attribute__ ((packed));
+
+struct bts_action_wait {
+ uint32_t msec;
+ uint32_t size;
+ uint8_t data[0];
+} __attribute__ ((packed));
+
+struct bts_action_delay {
+ uint32_t msec;
+} __attribute__ ((packed));
+
+struct bts_action_serial {
+ uint32_t baud;
+ uint32_t flow_control;
+} __attribute__ ((packed));
+
+/* for identifying the change speed HCI VS
+ * command
+ */
+struct hci_command {
+ uint8_t prefix;
+ uint16_t opcode;
+ uint8_t plen;
+ uint32_t speed;
+} __attribute__ ((packed));
+
+
+#endif /* ST_KIM_H */
diff --git a/drivers/staging/ti-st/st_ll.c b/drivers/staging/ti-st/st_ll.c
new file mode 100644
index 00000000000..0685a100db6
--- /dev/null
+++ b/drivers/staging/ti-st/st_ll.c
@@ -0,0 +1,147 @@
+/*
+ * Shared Transport driver
+ * HCI-LL module responsible for TI proprietary HCI_LL protocol
+ * Copyright (C) 2009 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#define pr_fmt(fmt) "(stll) :" fmt
+#include "st_ll.h"
+
+/**********************************************************************/
+/* internal functions */
+static void send_ll_cmd(struct st_data_s *st_data,
+ unsigned char cmd)
+{
+
+ pr_info("%s: writing %x", __func__, cmd);
+ st_int_write(st_data, &cmd, 1);
+ return;
+}
+
+static void ll_device_want_to_sleep(struct st_data_s *st_data)
+{
+ pr_info("%s", __func__);
+ /* sanity check */
+ if (st_data->ll_state != ST_LL_AWAKE)
+ pr_err("ERR hcill: ST_LL_GO_TO_SLEEP_IND"
+ "in state %ld", st_data->ll_state);
+
+ send_ll_cmd(st_data, LL_SLEEP_ACK);
+ /* update state */
+ st_data->ll_state = ST_LL_ASLEEP;
+}
+
+static void ll_device_want_to_wakeup(struct st_data_s *st_data)
+{
+ /* diff actions in diff states */
+ switch (st_data->ll_state) {
+ case ST_LL_ASLEEP:
+ send_ll_cmd(st_data, LL_WAKE_UP_ACK); /* send wake_ack */
+ break;
+ case ST_LL_ASLEEP_TO_AWAKE:
+ /* duplicate wake_ind */
+ pr_err("duplicate wake_ind while waiting for Wake ack");
+ break;
+ case ST_LL_AWAKE:
+ /* duplicate wake_ind */
+ pr_err("duplicate wake_ind already AWAKE");
+ break;
+ case ST_LL_AWAKE_TO_ASLEEP:
+ /* duplicate wake_ind */
+ pr_err("duplicate wake_ind");
+ break;
+ }
+ /* update state */
+ st_data->ll_state = ST_LL_AWAKE;
+}
+
+/**********************************************************************/
+/* functions invoked by ST Core */
+
+/* called when ST Core wants to
+ * enable ST LL */
+void st_ll_enable(struct st_data_s *ll)
+{
+ ll->ll_state = ST_LL_AWAKE;
+}
+
+/* called when ST Core /local module wants to
+ * disable ST LL */
+void st_ll_disable(struct st_data_s *ll)
+{
+ ll->ll_state = ST_LL_INVALID;
+}
+
+/* called when ST Core wants to update the state */
+void st_ll_wakeup(struct st_data_s *ll)
+{
+ if (likely(ll->ll_state != ST_LL_AWAKE)) {
+ send_ll_cmd(ll, LL_WAKE_UP_IND); /* WAKE_IND */
+ ll->ll_state = ST_LL_ASLEEP_TO_AWAKE;
+ } else {
+ /* don't send the duplicate wake_indication */
+ pr_err(" Chip already AWAKE ");
+ }
+}
+
+/* called when ST Core wants the state */
+unsigned long st_ll_getstate(struct st_data_s *ll)
+{
+ pr_info(" returning state %ld", ll->ll_state);
+ return ll->ll_state;
+}
+
+/* called from ST Core, when a PM related packet arrives */
+unsigned long st_ll_sleep_state(struct st_data_s *st_data,
+ unsigned char cmd)
+{
+ switch (cmd) {
+ case LL_SLEEP_IND: /* sleep ind */
+ pr_info("sleep indication recvd");
+ ll_device_want_to_sleep(st_data);
+ break;
+ case LL_SLEEP_ACK: /* sleep ack */
+ pr_err("sleep ack rcvd: host shouldn't");
+ break;
+ case LL_WAKE_UP_IND: /* wake ind */
+ pr_info("wake indication recvd");
+ ll_device_want_to_wakeup(st_data);
+ break;
+ case LL_WAKE_UP_ACK: /* wake ack */
+ pr_info("wake ack rcvd");
+ st_data->ll_state = ST_LL_AWAKE;
+ break;
+ default:
+ pr_err(" unknown input/state ");
+ return ST_ERR_FAILURE;
+ }
+ return ST_SUCCESS;
+}
+
+/* Called from ST CORE to initialize ST LL */
+long st_ll_init(struct st_data_s *ll)
+{
+ /* set state to invalid */
+ ll->ll_state = ST_LL_INVALID;
+ return 0;
+}
+
+/* Called from ST CORE to de-initialize ST LL */
+long st_ll_deinit(struct st_data_s *ll)
+{
+ return 0;
+}
diff --git a/drivers/staging/ti-st/st_ll.h b/drivers/staging/ti-st/st_ll.h
new file mode 100644
index 00000000000..77dfbf07e7b
--- /dev/null
+++ b/drivers/staging/ti-st/st_ll.h
@@ -0,0 +1,62 @@
+/*
+ * Shared Transport Low Level (ST LL)
+ *
+ * Copyright (C) 2009 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef ST_LL_H
+#define ST_LL_H
+
+#include <linux/skbuff.h>
+#include "st.h"
+#include "st_core.h"
+
+/* ST LL receiver states */
+#define ST_W4_PACKET_TYPE 0
+#define ST_BT_W4_EVENT_HDR 1
+#define ST_BT_W4_ACL_HDR 2
+#define ST_BT_W4_SCO_HDR 3
+#define ST_BT_W4_DATA 4
+#define ST_FM_W4_EVENT_HDR 5
+#define ST_GPS_W4_EVENT_HDR 6
+
+/* ST LL state machines */
+#define ST_LL_ASLEEP 0
+#define ST_LL_ASLEEP_TO_AWAKE 1
+#define ST_LL_AWAKE 2
+#define ST_LL_AWAKE_TO_ASLEEP 3
+#define ST_LL_INVALID 4
+
+#define LL_SLEEP_IND 0x30
+#define LL_SLEEP_ACK 0x31
+#define LL_WAKE_UP_IND 0x32
+#define LL_WAKE_UP_ACK 0x33
+
+/* initialize and de-init ST LL */
+long st_ll_init(struct st_data_s *);
+long st_ll_deinit(struct st_data_s *);
+
+/* enable/disable ST LL along with KIM start/stop
+ * called by ST Core
+ */
+void st_ll_enable(struct st_data_s *);
+void st_ll_disable(struct st_data_s *);
+
+unsigned long st_ll_getstate(struct st_data_s *);
+unsigned long st_ll_sleep_state(struct st_data_s *, unsigned char);
+void st_ll_wakeup(struct st_data_s *);
+#endif /* ST_LL_H */
diff --git a/drivers/staging/ti-st/sysfs-uim b/drivers/staging/ti-st/sysfs-uim
new file mode 100644
index 00000000000..10311afcbd4
--- /dev/null
+++ b/drivers/staging/ti-st/sysfs-uim
@@ -0,0 +1,16 @@
+What: /sys/class/rfkill/rfkill%d/
+Date: March 22
+Contact: Pavan Savoy <pavan_savoy@ti.com>
+Description:
+ Creates the rfkill entries for Radio apps like
+ BT app, FM app or GPS app to toggle corresponding
+ cores of the chip
+
+What: /dev/rfkill
+Date: March 22
+Contact: Pavan Savoy <pavan_savoy@ti.com>
+Description:
+ A daemon which maintains the ldisc installation and
+ uninstallation would be ppolling on this device and listening
+ on events which would suggest either to install or un-install
+ line discipline
diff --git a/drivers/staging/tm6000/Kconfig b/drivers/staging/tm6000/Kconfig
new file mode 100644
index 00000000000..5fe759cc2ee
--- /dev/null
+++ b/drivers/staging/tm6000/Kconfig
@@ -0,0 +1,32 @@
+config VIDEO_TM6000
+ tristate "TV Master TM5600/6000/6010 driver"
+ depends on VIDEO_DEV && I2C && INPUT && USB && EXPERIMENTAL
+ select VIDEO_TUNER
+ select TUNER_XC2028
+ select VIDEOBUF_VMALLOC
+ help
+ Support for TM5600/TM6000/TM6010 USB Device
+
+ Since these cards have no MPEG decoder onboard, they transmit
+ only compressed MPEG data over the usb bus, so you need
+ an external software decoder to watch TV on your computer.
+
+ Say Y if you own such a device and want to use it.
+
+config VIDEO_TM6000_ALSA
+ tristate "TV Master TM5600/6000/6010 audio support"
+ depends on VIDEO_TM6000 && SND && EXPERIMENTAL
+ select SND_PCM
+ ---help---
+ This is a video4linux driver for direct (DMA) audio for
+ TM5600/TM6000/TM6010 USB Devices.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tm6000-alsa.
+
+config VIDEO_TM6000_DVB
+ bool "DVB Support for tm6000 based TV cards"
+ depends on VIDEO_TM6000 && DVB_CORE && EXPERIMENTAL
+ select DVB_ZL10353
+ ---help---
+ This adds support for DVB cards based on the tm5600/tm6000 chip.
diff --git a/drivers/staging/tm6000/Makefile b/drivers/staging/tm6000/Makefile
new file mode 100644
index 00000000000..93370fccc07
--- /dev/null
+++ b/drivers/staging/tm6000/Makefile
@@ -0,0 +1,17 @@
+tm6000-objs := tm6000-cards.o \
+ tm6000-core.o \
+ tm6000-i2c.o \
+ tm6000-video.o \
+ tm6000-stds.o
+
+ifeq ($(CONFIG_VIDEO_TM6000_DVB),y)
+tm6000-objs += tm6000-dvb.o
+endif
+
+obj-$(CONFIG_VIDEO_TM6000) += tm6000.o
+obj-$(CONFIG_VIDEO_TM6000_ALSA) += tm6000-alsa.o
+
+EXTRA_CFLAGS = -Idrivers/media/video
+EXTRA_CFLAGS += -Idrivers/media/common/tuners
+EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
+EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
diff --git a/drivers/staging/tm6000/README b/drivers/staging/tm6000/README
new file mode 100644
index 00000000000..c340ebc2ee9
--- /dev/null
+++ b/drivers/staging/tm6000/README
@@ -0,0 +1,22 @@
+Todo:
+ - Fix the loss of some blocks when receiving the video URB's
+ - Add a lock at tm6000_read_write_usb() to prevent two simultaneous access to the
+ URB control transfers
+ - Properly add the locks at tm6000-video
+ - Add audio support
+ - Add vbi support
+ - Add IR support
+ - Do several cleanups
+ - I think that frame1/frame0 are inverted. This causes a funny effect at the image.
+ the fix is trivial, but require some tests
+ - My tm6010 devices sometimes insist on stop working. I need to turn them off, removing
+ from my machine and wait for a while for it to work again. I'm starting to think that
+ it is an overheat issue - is there a workaround that we could do?
+ - Sometimes, tm6010 doesn't read eeprom at the proper time (hardware bug). So, the device
+ got miss-detected as a "generic" tm6000. This can be really bad if the tuner is the
+ Low Power one, as it may result on loading the high power firmware, that could damage
+ the device. Maybe we may read eeprom to double check, when the device is marked as "generic"
+ - Coding Style fixes
+ - sparse cleanups
+
+Please send patches to linux-media@vger.kernel.org
diff --git a/drivers/staging/tm6000/tm6000-alsa.c b/drivers/staging/tm6000/tm6000-alsa.c
new file mode 100644
index 00000000000..bc89f9d2800
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000-alsa.c
@@ -0,0 +1,414 @@
+/*
+ *
+ * Support for audio capture for tm5600/6000/6010
+ * (c) 2007-2008 Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * Based on cx88-alsa.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/usb.h>
+
+#include <asm/delay.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/control.h>
+#include <sound/initval.h>
+
+
+#include "tm6000.h"
+#include "tm6000-regs.h"
+
+#undef dprintk
+
+#define dprintk(level, fmt, arg...) do { \
+ if (debug >= level) \
+ printk(KERN_INFO "%s/1: " fmt, chip->core->name , ## arg); \
+ } while (0)
+
+/****************************************************************************
+ Data type declarations - Can be moded to a header file later
+ ****************************************************************************/
+
+struct snd_tm6000_card {
+ struct snd_card *card;
+
+ spinlock_t reg_lock;
+
+ atomic_t count;
+
+ unsigned int period_size;
+ unsigned int num_periods;
+
+ struct tm6000_core *core;
+ struct tm6000_buffer *buf;
+
+ int bufsize;
+
+ struct snd_pcm_substream *substream;
+};
+
+
+/****************************************************************************
+ Module global static vars
+ ****************************************************************************/
+
+static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
+static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
+static int enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 1};
+
+module_param_array(enable, bool, NULL, 0444);
+MODULE_PARM_DESC(enable, "Enable tm6000x soundcard. default enabled.");
+
+module_param_array(index, int, NULL, 0444);
+MODULE_PARM_DESC(index, "Index value for tm6000x capture interface(s).");
+
+
+/****************************************************************************
+ Module macros
+ ****************************************************************************/
+
+MODULE_DESCRIPTION("ALSA driver module for tm5600/tm6000/tm6010 based TV cards");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_LICENSE("GPL");
+MODULE_SUPPORTED_DEVICE("{{Trident,tm5600},"
+ "{{Trident,tm6000},"
+ "{{Trident,tm6010}");
+static unsigned int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "enable debug messages");
+
+/****************************************************************************
+ Module specific funtions
+ ****************************************************************************/
+
+/*
+ * BOARD Specific: Sets audio DMA
+ */
+
+static int _tm6000_start_audio_dma(struct snd_tm6000_card *chip)
+{
+ struct tm6000_core *core = chip->core;
+ int val;
+
+ /* Enables audio */
+ val = tm6000_get_reg(core, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, 0x0);
+ val |= 0x20;
+ tm6000_set_reg(core, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, val);
+
+ tm6000_set_reg(core, TM6010_REQ08_R01_A_INIT, 0x80);
+
+ return 0;
+}
+
+/*
+ * BOARD Specific: Resets audio DMA
+ */
+static int _tm6000_stop_audio_dma(struct snd_tm6000_card *chip)
+{
+ struct tm6000_core *core = chip->core;
+ int val;
+ dprintk(1, "Stopping audio DMA\n");
+
+ /* Enables audio */
+ val = tm6000_get_reg(core, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, 0x0);
+ val &= ~0x20;
+ tm6000_set_reg(core, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, val);
+
+ tm6000_set_reg(core, TM6010_REQ08_R01_A_INIT, 0);
+
+ return 0;
+}
+
+static int dsp_buffer_free(struct snd_tm6000_card *chip)
+{
+ BUG_ON(!chip->bufsize);
+
+ dprintk(2, "Freeing buffer\n");
+
+ /* FIXME: Frees buffer */
+
+ chip->bufsize = 0;
+
+ return 0;
+}
+
+/****************************************************************************
+ ALSA PCM Interface
+ ****************************************************************************/
+
+/*
+ * Digital hardware definition
+ */
+#define DEFAULT_FIFO_SIZE 4096
+
+static struct snd_pcm_hardware snd_tm6000_digital_hw = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+
+ .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000,
+ .rate_min = 44100,
+ .rate_max = 48000,
+ .channels_min = 2,
+ .channels_max = 2,
+ .period_bytes_min = DEFAULT_FIFO_SIZE/4,
+ .period_bytes_max = DEFAULT_FIFO_SIZE/4,
+ .periods_min = 1,
+ .periods_max = 1024,
+ .buffer_bytes_max = (1024*1024),
+};
+
+/*
+ * audio pcm capture open callback
+ */
+static int snd_tm6000_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int err;
+
+ err = snd_pcm_hw_constraint_pow2(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (err < 0)
+ goto _error;
+
+ chip->substream = substream;
+
+ runtime->hw = snd_tm6000_digital_hw;
+
+ return 0;
+_error:
+ dprintk(1, "Error opening PCM!\n");
+ return err;
+}
+
+/*
+ * audio close callback
+ */
+static int snd_tm6000_close(struct snd_pcm_substream *substream)
+{
+ return 0;
+}
+
+/*
+ * hw_params callback
+ */
+static int snd_tm6000_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream);
+
+ if (substream->runtime->dma_area) {
+ dsp_buffer_free(chip);
+ substream->runtime->dma_area = NULL;
+ }
+
+ chip->period_size = params_period_bytes(hw_params);
+ chip->num_periods = params_periods(hw_params);
+ chip->bufsize = chip->period_size * params_periods(hw_params);
+
+ BUG_ON(!chip->bufsize);
+
+ dprintk(1, "Setting buffer\n");
+
+ /* FIXME: Allocate buffer for audio */
+
+
+ return 0;
+}
+
+/*
+ * hw free callback
+ */
+static int snd_tm6000_hw_free(struct snd_pcm_substream *substream)
+{
+
+ struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream);
+
+ if (substream->runtime->dma_area) {
+ dsp_buffer_free(chip);
+ substream->runtime->dma_area = NULL;
+ }
+
+ return 0;
+}
+
+/*
+ * prepare callback
+ */
+static int snd_tm6000_prepare(struct snd_pcm_substream *substream)
+{
+ return 0;
+}
+
+
+/*
+ * trigger callback
+ */
+static int snd_tm6000_card_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream);
+ int err;
+
+ spin_lock(&chip->reg_lock);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ err = _tm6000_start_audio_dma(chip);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ err = _tm6000_stop_audio_dma(chip);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ spin_unlock(&chip->reg_lock);
+
+ return err;
+}
+
+/*
+ * pointer callback
+ */
+static snd_pcm_uframes_t snd_tm6000_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ u16 count;
+
+ count = atomic_read(&chip->count);
+
+ return runtime->period_size * (count & (runtime->periods-1));
+}
+
+/*
+ * operators
+ */
+static struct snd_pcm_ops snd_tm6000_pcm_ops = {
+ .open = snd_tm6000_pcm_open,
+ .close = snd_tm6000_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = snd_tm6000_hw_params,
+ .hw_free = snd_tm6000_hw_free,
+ .prepare = snd_tm6000_prepare,
+ .trigger = snd_tm6000_card_trigger,
+ .pointer = snd_tm6000_pointer,
+};
+
+/*
+ * create a PCM device
+ */
+static int __devinit snd_tm6000_pcm(struct snd_tm6000_card *chip,
+ int device, char *name)
+{
+ int err;
+ struct snd_pcm *pcm;
+
+ err = snd_pcm_new(chip->card, name, device, 0, 1, &pcm);
+ if (err < 0)
+ return err;
+ pcm->private_data = chip;
+ strcpy(pcm->name, name);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_tm6000_pcm_ops);
+
+ return 0;
+}
+
+/* FIXME: Control interface - How to control volume/mute? */
+
+/****************************************************************************
+ Basic Flow for Sound Devices
+ ****************************************************************************/
+
+/*
+ * Alsa Constructor - Component probe
+ */
+
+int tm6000_audio_init(struct tm6000_core *dev, int idx)
+{
+ struct snd_card *card;
+ struct snd_tm6000_card *chip;
+ int rc, len;
+ char component[14];
+
+ if (idx >= SNDRV_CARDS)
+ return -ENODEV;
+
+ if (!enable[idx])
+ return -ENOENT;
+
+ rc = snd_card_create(index[idx], id[idx], THIS_MODULE, 0, &card);
+ if (rc < 0) {
+ snd_printk(KERN_ERR "cannot create card instance %d\n", idx);
+ return rc;
+ }
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ chip->core = dev;
+ chip->card = card;
+
+ strcpy(card->driver, "tm6000-alsa");
+ sprintf(component, "USB%04x:%04x",
+ le16_to_cpu(dev->udev->descriptor.idVendor),
+ le16_to_cpu(dev->udev->descriptor.idProduct));
+ snd_component_add(card, component);
+
+ if (dev->udev->descriptor.iManufacturer)
+ len = usb_string(dev->udev,
+ dev->udev->descriptor.iManufacturer,
+ card->longname, sizeof(card->longname));
+ else
+ len = 0;
+
+ if (len > 0)
+ strlcat(card->longname, " ", sizeof(card->longname));
+
+ strlcat(card->longname, card->shortname, sizeof(card->longname));
+
+ len = strlcat(card->longname, " at ", sizeof(card->longname));
+
+ if (len < sizeof(card->longname))
+ usb_make_path(dev->udev, card->longname + len,
+ sizeof(card->longname) - len);
+
+ strlcat(card->longname,
+ dev->udev->speed == USB_SPEED_LOW ? ", low speed" :
+ dev->udev->speed == USB_SPEED_FULL ? ", full speed" :
+ ", high speed",
+ sizeof(card->longname));
+
+ rc = snd_tm6000_pcm(chip, 0, "tm6000 Digital");
+ if (rc < 0)
+ goto error;
+
+ rc = snd_card_register(card);
+ if (rc < 0)
+ goto error;
+
+
+ return 0;
+
+error:
+ snd_card_free(card);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tm6000_audio_init);
+
diff --git a/drivers/staging/tm6000/tm6000-cards.c b/drivers/staging/tm6000/tm6000-cards.c
new file mode 100644
index 00000000000..6143e20d139
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000-cards.c
@@ -0,0 +1,973 @@
+/*
+ tm6000-cards.c - driver for TM5600/TM6000/TM6010 USB video capture devices
+
+ Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/usb.h>
+#include <linux/version.h>
+#include <media/v4l2-common.h>
+#include <media/tuner.h>
+#include <media/tvaudio.h>
+#include <media/i2c-addr.h>
+
+#include "tm6000.h"
+#include "tm6000-regs.h"
+#include "tuner-xc2028.h"
+#include "xc5000.h"
+
+#define TM6000_BOARD_UNKNOWN 0
+#define TM5600_BOARD_GENERIC 1
+#define TM6000_BOARD_GENERIC 2
+#define TM6010_BOARD_GENERIC 3
+#define TM5600_BOARD_10MOONS_UT821 4
+#define TM5600_BOARD_10MOONS_UT330 5
+#define TM6000_BOARD_ADSTECH_DUAL_TV 6
+#define TM6000_BOARD_FREECOM_AND_SIMILAR 7
+#define TM6000_BOARD_ADSTECH_MINI_DUAL_TV 8
+#define TM6010_BOARD_HAUPPAUGE_900H 9
+#define TM6010_BOARD_BEHOLD_WANDER 10
+#define TM6010_BOARD_BEHOLD_VOYAGER 11
+#define TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE 12
+#define TM6010_BOARD_TWINHAN_TU501 13
+
+#define TM6000_MAXBOARDS 16
+static unsigned int card[] = {[0 ... (TM6000_MAXBOARDS - 1)] = UNSET };
+
+module_param_array(card, int, NULL, 0444);
+
+static unsigned long tm6000_devused;
+
+
+struct tm6000_board {
+ char *name;
+
+ struct tm6000_capabilities caps;
+
+ enum tm6000_devtype type; /* variant of the chipset */
+ int tuner_type; /* type of the tuner */
+ int tuner_addr; /* tuner address */
+ int demod_addr; /* demodulator address */
+
+ struct tm6000_gpio gpio;
+};
+
+struct tm6000_board tm6000_boards[] = {
+ [TM6000_BOARD_UNKNOWN] = {
+ .name = "Unknown tm6000 video grabber",
+ .caps = {
+ .has_tuner = 1,
+ },
+ .gpio = {
+ .tuner_reset = TM6000_GPIO_1,
+ },
+ },
+ [TM5600_BOARD_GENERIC] = {
+ .name = "Generic tm5600 board",
+ .type = TM5600,
+ .tuner_type = TUNER_XC2028,
+ .tuner_addr = 0xc2 >> 1,
+ .caps = {
+ .has_tuner = 1,
+ },
+ .gpio = {
+ .tuner_reset = TM6000_GPIO_1,
+ },
+ },
+ [TM6000_BOARD_GENERIC] = {
+ .name = "Generic tm6000 board",
+ .tuner_type = TUNER_XC2028,
+ .tuner_addr = 0xc2 >> 1,
+ .caps = {
+ .has_tuner = 1,
+ .has_dvb = 1,
+ },
+ .gpio = {
+ .tuner_reset = TM6000_GPIO_1,
+ },
+ },
+ [TM6010_BOARD_GENERIC] = {
+ .name = "Generic tm6010 board",
+ .type = TM6010,
+ .tuner_type = TUNER_XC2028,
+ .tuner_addr = 0xc2 >> 1,
+ .demod_addr = 0x1e >> 1,
+ .caps = {
+ .has_tuner = 1,
+ .has_dvb = 1,
+ .has_zl10353 = 1,
+ .has_eeprom = 1,
+ .has_remote = 1,
+ },
+ .gpio = {
+ .tuner_reset = TM6010_GPIO_2,
+ .tuner_on = TM6010_GPIO_3,
+ .demod_reset = TM6010_GPIO_1,
+ .demod_on = TM6010_GPIO_4,
+ .power_led = TM6010_GPIO_7,
+ .dvb_led = TM6010_GPIO_5,
+ .ir = TM6010_GPIO_0,
+ },
+ },
+ [TM5600_BOARD_10MOONS_UT821] = {
+ .name = "10Moons UT 821",
+ .tuner_type = TUNER_XC2028,
+ .type = TM5600,
+ .tuner_addr = 0xc2 >> 1,
+ .caps = {
+ .has_tuner = 1,
+ .has_eeprom = 1,
+ },
+ .gpio = {
+ .tuner_reset = TM6000_GPIO_1,
+ },
+ },
+ [TM5600_BOARD_10MOONS_UT330] = {
+ .name = "10Moons UT 330",
+ .tuner_type = TUNER_PHILIPS_FQ1216AME_MK4,
+ .tuner_addr = 0xc8 >> 1,
+ .caps = {
+ .has_tuner = 1,
+ .has_dvb = 0,
+ .has_zl10353 = 0,
+ .has_eeprom = 1,
+ },
+ },
+ [TM6000_BOARD_ADSTECH_DUAL_TV] = {
+ .name = "ADSTECH Dual TV USB",
+ .tuner_type = TUNER_XC2028,
+ .tuner_addr = 0xc8 >> 1,
+ .caps = {
+ .has_tuner = 1,
+ .has_tda9874 = 1,
+ .has_dvb = 1,
+ .has_zl10353 = 1,
+ .has_eeprom = 1,
+ },
+ },
+ [TM6000_BOARD_FREECOM_AND_SIMILAR] = {
+ .name = "Freecom Hybrid Stick / Moka DVB-T Receiver Dual",
+ .tuner_type = TUNER_XC2028, /* has a XC3028 */
+ .tuner_addr = 0xc2 >> 1,
+ .demod_addr = 0x1e >> 1,
+ .caps = {
+ .has_tuner = 1,
+ .has_dvb = 1,
+ .has_zl10353 = 1,
+ .has_eeprom = 0,
+ .has_remote = 1,
+ },
+ .gpio = {
+ .tuner_reset = TM6000_GPIO_4,
+ },
+ },
+ [TM6000_BOARD_ADSTECH_MINI_DUAL_TV] = {
+ .name = "ADSTECH Mini Dual TV USB",
+ .tuner_type = TUNER_XC2028, /* has a XC3028 */
+ .tuner_addr = 0xc8 >> 1,
+ .demod_addr = 0x1e >> 1,
+ .caps = {
+ .has_tuner = 1,
+ .has_dvb = 1,
+ .has_zl10353 = 1,
+ .has_eeprom = 0,
+ },
+ .gpio = {
+ .tuner_reset = TM6000_GPIO_4,
+ },
+ },
+ [TM6010_BOARD_HAUPPAUGE_900H] = {
+ .name = "Hauppauge WinTV HVR-900H / WinTV USB2-Stick",
+ .tuner_type = TUNER_XC2028, /* has a XC3028 */
+ .tuner_addr = 0xc2 >> 1,
+ .demod_addr = 0x1e >> 1,
+ .type = TM6010,
+ .caps = {
+ .has_tuner = 1,
+ .has_dvb = 1,
+ .has_zl10353 = 1,
+ .has_eeprom = 1,
+ .has_remote = 1,
+ },
+ .gpio = {
+ .tuner_reset = TM6010_GPIO_2,
+ .tuner_on = TM6010_GPIO_3,
+ .demod_reset = TM6010_GPIO_1,
+ .demod_on = TM6010_GPIO_4,
+ .power_led = TM6010_GPIO_7,
+ .dvb_led = TM6010_GPIO_5,
+ .ir = TM6010_GPIO_0,
+ },
+ },
+ [TM6010_BOARD_BEHOLD_WANDER] = {
+ .name = "Beholder Wander DVB-T/TV/FM USB2.0",
+ .tuner_type = TUNER_XC5000,
+ .tuner_addr = 0xc2 >> 1,
+ .demod_addr = 0x1e >> 1,
+ .type = TM6010,
+ .caps = {
+ .has_tuner = 1,
+ .has_dvb = 1,
+ .has_zl10353 = 1,
+ .has_eeprom = 1,
+ .has_remote = 1,
+ },
+ .gpio = {
+ .tuner_reset = TM6010_GPIO_0,
+ .demod_reset = TM6010_GPIO_1,
+ .power_led = TM6010_GPIO_6,
+ },
+ },
+ [TM6010_BOARD_BEHOLD_VOYAGER] = {
+ .name = "Beholder Voyager TV/FM USB2.0",
+ .tuner_type = TUNER_XC5000,
+ .tuner_addr = 0xc2 >> 1,
+ .type = TM6010,
+ .caps = {
+ .has_tuner = 1,
+ .has_dvb = 0,
+ .has_zl10353 = 0,
+ .has_eeprom = 1,
+ .has_remote = 1,
+ },
+ .gpio = {
+ .tuner_reset = TM6010_GPIO_0,
+ .power_led = TM6010_GPIO_6,
+ },
+ },
+ [TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE] = {
+ .name = "Terratec Cinergy Hybrid XE / Cinergy Hybrid-Stick",
+ .tuner_type = TUNER_XC2028, /* has a XC3028 */
+ .tuner_addr = 0xc2 >> 1,
+ .demod_addr = 0x1e >> 1,
+ .type = TM6010,
+ .caps = {
+ .has_tuner = 1,
+ .has_dvb = 1,
+ .has_zl10353 = 1,
+ .has_eeprom = 1,
+ .has_remote = 1,
+ },
+ .gpio = {
+ .tuner_reset = TM6010_GPIO_2,
+ .tuner_on = TM6010_GPIO_3,
+ .demod_reset = TM6010_GPIO_1,
+ .demod_on = TM6010_GPIO_4,
+ .power_led = TM6010_GPIO_7,
+ .dvb_led = TM6010_GPIO_5,
+ .ir = TM6010_GPIO_0,
+ },
+ },
+ [TM6010_BOARD_TWINHAN_TU501] = {
+ .name = "Twinhan TU501(704D1)",
+ .tuner_type = TUNER_XC2028, /* has a XC3028 */
+ .tuner_addr = 0xc2 >> 1,
+ .demod_addr = 0x1e >> 1,
+ .type = TM6010,
+ .caps = {
+ .has_tuner = 1,
+ .has_dvb = 1,
+ .has_zl10353 = 1,
+ .has_eeprom = 1,
+ .has_remote = 1,
+ },
+ .gpio = {
+ .tuner_reset = TM6010_GPIO_2,
+ .tuner_on = TM6010_GPIO_3,
+ .demod_reset = TM6010_GPIO_1,
+ .demod_on = TM6010_GPIO_4,
+ .power_led = TM6010_GPIO_7,
+ .dvb_led = TM6010_GPIO_5,
+ .ir = TM6010_GPIO_0,
+ },
+ }
+};
+
+/* table of devices that work with this driver */
+struct usb_device_id tm6000_id_table[] = {
+ { USB_DEVICE(0x6000, 0x0001), .driver_info = TM5600_BOARD_10MOONS_UT821 },
+ { USB_DEVICE(0x6000, 0x0002), .driver_info = TM6010_BOARD_GENERIC },
+ { USB_DEVICE(0x06e1, 0xf332), .driver_info = TM6000_BOARD_ADSTECH_DUAL_TV },
+ { USB_DEVICE(0x14aa, 0x0620), .driver_info = TM6000_BOARD_FREECOM_AND_SIMILAR },
+ { USB_DEVICE(0x06e1, 0xb339), .driver_info = TM6000_BOARD_ADSTECH_MINI_DUAL_TV },
+ { USB_DEVICE(0x2040, 0x6600), .driver_info = TM6010_BOARD_HAUPPAUGE_900H },
+ { USB_DEVICE(0x2040, 0x6601), .driver_info = TM6010_BOARD_HAUPPAUGE_900H },
+ { USB_DEVICE(0x2040, 0x6610), .driver_info = TM6010_BOARD_HAUPPAUGE_900H },
+ { USB_DEVICE(0x2040, 0x6611), .driver_info = TM6010_BOARD_HAUPPAUGE_900H },
+ { USB_DEVICE(0x6000, 0xdec0), .driver_info = TM6010_BOARD_BEHOLD_WANDER },
+ { USB_DEVICE(0x6000, 0xdec1), .driver_info = TM6010_BOARD_BEHOLD_VOYAGER },
+ { USB_DEVICE(0x0ccd, 0x0086), .driver_info = TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE },
+ { USB_DEVICE(0x0ccd, 0x00A5), .driver_info = TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE },
+ { USB_DEVICE(0x13d3, 0x3240), .driver_info = TM6010_BOARD_TWINHAN_TU501 },
+ { USB_DEVICE(0x13d3, 0x3241), .driver_info = TM6010_BOARD_TWINHAN_TU501 },
+ { USB_DEVICE(0x13d3, 0x3243), .driver_info = TM6010_BOARD_TWINHAN_TU501 },
+ { USB_DEVICE(0x13d3, 0x3264), .driver_info = TM6010_BOARD_TWINHAN_TU501 },
+ { },
+};
+
+/* Tuner callback to provide the proper gpio changes needed for xc5000 */
+int tm6000_xc5000_callback(void *ptr, int component, int command, int arg)
+{
+ int rc = 0;
+ struct tm6000_core *dev = ptr;
+
+ if (dev->tuner_type != TUNER_XC5000)
+ return 0;
+
+ switch (command) {
+ case XC5000_TUNER_RESET:
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ dev->gpio.tuner_reset, 0x01);
+ msleep(15);
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ dev->gpio.tuner_reset, 0x00);
+ msleep(15);
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ dev->gpio.tuner_reset, 0x01);
+ break;
+ }
+ return (rc);
+}
+
+
+/* Tuner callback to provide the proper gpio changes needed for xc2028 */
+
+int tm6000_tuner_callback(void *ptr, int component, int command, int arg)
+{
+ int rc = 0;
+ struct tm6000_core *dev = ptr;
+
+ if (dev->tuner_type != TUNER_XC2028)
+ return 0;
+
+ switch (command) {
+ case XC2028_RESET_CLK:
+ tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT,
+ 0x02, arg);
+ msleep(10);
+ rc = tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ TM6000_GPIO_CLK, 0);
+ if (rc < 0)
+ return rc;
+ msleep(10);
+ rc = tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ TM6000_GPIO_CLK, 1);
+ break;
+ case XC2028_TUNER_RESET:
+ /* Reset codes during load firmware */
+ switch (arg) {
+ case 0:
+ /* newer tuner can faster reset */
+ switch (dev->model) {
+ case TM5600_BOARD_10MOONS_UT821:
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ dev->gpio.tuner_reset, 0x01);
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ 0x300, 0x01);
+ msleep(10);
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ dev->gpio.tuner_reset, 0x00);
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ 0x300, 0x00);
+ msleep(10);
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ dev->gpio.tuner_reset, 0x01);
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ 0x300, 0x01);
+ break;
+ case TM6010_BOARD_HAUPPAUGE_900H:
+ case TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE:
+ case TM6010_BOARD_TWINHAN_TU501:
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ dev->gpio.tuner_reset, 0x01);
+ msleep(60);
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ dev->gpio.tuner_reset, 0x00);
+ msleep(75);
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ dev->gpio.tuner_reset, 0x01);
+ msleep(60);
+ break;
+ default:
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ dev->gpio.tuner_reset, 0x00);
+ msleep(130);
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ dev->gpio.tuner_reset, 0x01);
+ msleep(130);
+ break;
+ }
+ break;
+ case 1:
+ tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT,
+ 0x02, 0x01);
+ msleep(10);
+ break;
+
+ case 2:
+ rc = tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ TM6000_GPIO_CLK, 0);
+ if (rc < 0)
+ return rc;
+ msleep(100);
+ rc = tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ TM6000_GPIO_CLK, 1);
+ msleep(100);
+ break;
+ }
+ }
+ return rc;
+}
+
+int tm6000_cards_setup(struct tm6000_core *dev)
+{
+ int i, rc;
+
+ /*
+ * Board-specific initialization sequence. Handles all GPIO
+ * initialization sequences that are board-specific.
+ * Up to now, all found devices use GPIO1 and GPIO4 at the same way.
+ * Probably, they're all based on some reference device. Due to that,
+ * there's a common routine at the end to handle those GPIO's. Devices
+ * that use different pinups or init sequences can just return at
+ * the board-specific session.
+ */
+ switch (dev->model) {
+ case TM6010_BOARD_HAUPPAUGE_900H:
+ case TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE:
+ case TM6010_BOARD_TWINHAN_TU501:
+ case TM6010_BOARD_GENERIC:
+ /* Turn xceive 3028 on */
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_on, 0x01);
+ msleep(15);
+ /* Turn zarlink zl10353 on */
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.demod_on, 0x00);
+ msleep(15);
+ /* Reset zarlink zl10353 */
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.demod_reset, 0x00);
+ msleep(50);
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.demod_reset, 0x01);
+ msleep(15);
+ /* Turn zarlink zl10353 off */
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.demod_on, 0x01);
+ msleep(15);
+ /* ir ? */
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.ir, 0x01);
+ msleep(15);
+ /* Power led on (blue) */
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.power_led, 0x00);
+ msleep(15);
+ /* DVB led off (orange) */
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.dvb_led, 0x01);
+ msleep(15);
+ /* Turn zarlink zl10353 on */
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.demod_on, 0x00);
+ msleep(15);
+ break;
+ case TM6010_BOARD_BEHOLD_WANDER:
+ /* Power led on (blue) */
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.power_led, 0x01);
+ msleep(15);
+ /* Reset zarlink zl10353 */
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.demod_reset, 0x00);
+ msleep(50);
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.demod_reset, 0x01);
+ msleep(15);
+ break;
+ case TM6010_BOARD_BEHOLD_VOYAGER:
+ /* Power led on (blue) */
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.power_led, 0x01);
+ msleep(15);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Default initialization. Most of the devices seem to use GPIO1
+ * and GPIO4.on the same way, so, this handles the common sequence
+ * used by most devices.
+ * If a device uses a different sequence or different GPIO pins for
+ * reset, just add the code at the board-specific part
+ */
+
+ if (dev->gpio.tuner_reset) {
+ for (i = 0; i < 2; i++) {
+ rc = tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ dev->gpio.tuner_reset, 0x00);
+ if (rc < 0) {
+ printk(KERN_ERR "Error %i doing tuner reset\n", rc);
+ return rc;
+ }
+
+ msleep(10); /* Just to be conservative */
+ rc = tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ dev->gpio.tuner_reset, 0x01);
+ if (rc < 0) {
+ printk(KERN_ERR "Error %i doing tuner reset\n", rc);
+ return rc;
+ }
+ msleep(10);
+
+ if (!i) {
+ rc = tm6000_get_reg32(dev, REQ_40_GET_VERSION, 0, 0);
+ if (rc >= 0)
+ printk(KERN_DEBUG "board=0x%08x\n", rc);
+ }
+ }
+ } else {
+ printk(KERN_ERR "Tuner reset is not configured\n");
+ return -1;
+ }
+
+ msleep(50);
+
+ return 0;
+};
+
+static void tm6000_config_tuner(struct tm6000_core *dev)
+{
+ struct tuner_setup tun_setup;
+
+ /* Load tuner module */
+ v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
+ "tuner", "tuner", dev->tuner_addr, NULL);
+
+ memset(&tun_setup, 0, sizeof(tun_setup));
+ tun_setup.type = dev->tuner_type;
+ tun_setup.addr = dev->tuner_addr;
+
+ tun_setup.mode_mask = 0;
+ if (dev->caps.has_tuner)
+ tun_setup.mode_mask |= (T_ANALOG_TV | T_RADIO);
+ if (dev->caps.has_dvb)
+ tun_setup.mode_mask |= T_DIGITAL_TV;
+
+ switch (dev->tuner_type) {
+ case TUNER_XC2028:
+ tun_setup.tuner_callback = tm6000_tuner_callback;;
+ break;
+ case TUNER_XC5000:
+ tun_setup.tuner_callback = tm6000_xc5000_callback;
+ break;
+ }
+
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_type_addr, &tun_setup);
+
+ switch (dev->tuner_type) {
+ case TUNER_XC2028: {
+ struct v4l2_priv_tun_config xc2028_cfg;
+ struct xc2028_ctrl ctl;
+
+ memset(&xc2028_cfg, 0, sizeof(xc2028_cfg));
+ memset(&ctl, 0, sizeof(ctl));
+
+ ctl.input1 = 1;
+ ctl.read_not_reliable = 0;
+ ctl.msleep = 10;
+ ctl.demod = XC3028_FE_ZARLINK456;
+ ctl.vhfbw7 = 1;
+ ctl.uhfbw8 = 1;
+ xc2028_cfg.tuner = TUNER_XC2028;
+ xc2028_cfg.priv = &ctl;
+
+ switch (dev->model) {
+ case TM6010_BOARD_HAUPPAUGE_900H:
+ case TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE:
+ case TM6010_BOARD_TWINHAN_TU501:
+ ctl.fname = "xc3028L-v36.fw";
+ break;
+ default:
+ if (dev->dev_type == TM6010)
+ ctl.fname = "xc3028-v27.fw";
+ else
+ ctl.fname = "xc3028-v24.fw";
+ }
+
+ printk(KERN_INFO "Setting firmware parameters for xc2028\n");
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_config,
+ &xc2028_cfg);
+
+ }
+ break;
+ case TUNER_XC5000:
+ {
+ struct v4l2_priv_tun_config xc5000_cfg;
+ struct xc5000_config ctl = {
+ .i2c_address = dev->tuner_addr,
+ .if_khz = 4570,
+ .radio_input = XC5000_RADIO_FM1,
+ };
+
+ xc5000_cfg.tuner = TUNER_XC5000;
+ xc5000_cfg.priv = &ctl;
+
+
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_config,
+ &xc5000_cfg);
+ }
+ break;
+ default:
+ printk(KERN_INFO "Unknown tuner type. Tuner is not configured.\n");
+ break;
+ }
+}
+
+static int tm6000_init_dev(struct tm6000_core *dev)
+{
+ struct v4l2_frequency f;
+ int rc = 0;
+
+ mutex_init(&dev->lock);
+
+ mutex_lock(&dev->lock);
+
+ /* Initializa board-specific data */
+ dev->dev_type = tm6000_boards[dev->model].type;
+ dev->tuner_type = tm6000_boards[dev->model].tuner_type;
+ dev->tuner_addr = tm6000_boards[dev->model].tuner_addr;
+
+ dev->gpio = tm6000_boards[dev->model].gpio;
+
+ dev->demod_addr = tm6000_boards[dev->model].demod_addr;
+
+ dev->caps = tm6000_boards[dev->model].caps;
+
+ /* initialize hardware */
+ rc = tm6000_init(dev);
+ if (rc < 0)
+ goto err;
+
+ rc = v4l2_device_register(&dev->udev->dev, &dev->v4l2_dev);
+ if (rc < 0)
+ goto err;
+
+ /* register i2c bus */
+ rc = tm6000_i2c_register(dev);
+ if (rc < 0)
+ goto err;
+
+ /* Default values for STD and resolutions */
+ dev->width = 720;
+ dev->height = 480;
+ dev->norm = V4L2_STD_PAL_M;
+
+ /* Configure tuner */
+ tm6000_config_tuner(dev);
+
+ /* Set video standard */
+ v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_std, dev->norm);
+
+ /* Set tuner frequency - also loads firmware on xc2028/xc3028 */
+ f.tuner = 0;
+ f.type = V4L2_TUNER_ANALOG_TV;
+ f.frequency = 3092; /* 193.25 MHz */
+ dev->freq = f.frequency;
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, &f);
+
+ if (dev->caps.has_tda9874)
+ v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
+ "tvaudio", "tvaudio", I2C_ADDR_TDA9874, NULL);
+
+ /* register and initialize V4L2 */
+ rc = tm6000_v4l2_register(dev);
+ if (rc < 0)
+ goto err;
+
+ if (dev->caps.has_dvb) {
+ dev->dvb = kzalloc(sizeof(*(dev->dvb)), GFP_KERNEL);
+ if (!dev->dvb) {
+ rc = -ENOMEM;
+ goto err2;
+ }
+
+#ifdef CONFIG_VIDEO_TM6000_DVB
+ rc = tm6000_dvb_register(dev);
+ if (rc < 0) {
+ kfree(dev->dvb);
+ dev->dvb = NULL;
+ goto err2;
+ }
+#endif
+ }
+ mutex_unlock(&dev->lock);
+ return 0;
+
+err2:
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+err:
+ mutex_unlock(&dev->lock);
+ return rc;
+}
+
+/* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */
+#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
+
+static void get_max_endpoint(struct usb_device *udev,
+ struct usb_host_interface *alt,
+ char *msgtype,
+ struct usb_host_endpoint *curr_e,
+ struct tm6000_endpoint *tm_ep)
+{
+ u16 tmp = le16_to_cpu(curr_e->desc.wMaxPacketSize);
+ unsigned int size = tmp & 0x7ff;
+
+ if (udev->speed == USB_SPEED_HIGH)
+ size = size * hb_mult (tmp);
+
+ if (size > tm_ep->maxsize) {
+ tm_ep->endp = curr_e;
+ tm_ep->maxsize = size;
+ tm_ep->bInterfaceNumber = alt->desc.bInterfaceNumber;
+ tm_ep->bAlternateSetting = alt->desc.bAlternateSetting;
+
+ printk(KERN_INFO "tm6000: %s endpoint: 0x%02x (max size=%u bytes)\n",
+ msgtype, curr_e->desc.bEndpointAddress,
+ size);
+ }
+}
+
+/*
+ * tm6000_usb_probe()
+ * checks for supported devices
+ */
+static int tm6000_usb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct usb_device *usbdev;
+ struct tm6000_core *dev = NULL;
+ int i, rc = 0;
+ int nr = 0;
+ char *speed;
+
+ usbdev = usb_get_dev(interface_to_usbdev(interface));
+
+ /* Selects the proper interface */
+ rc = usb_set_interface(usbdev, 0, 1);
+ if (rc < 0)
+ goto err;
+
+ /* Check to see next free device and mark as used */
+ nr = find_first_zero_bit(&tm6000_devused, TM6000_MAXBOARDS);
+ if (nr >= TM6000_MAXBOARDS) {
+ printk(KERN_ERR "tm6000: Supports only %i tm60xx boards.\n", TM6000_MAXBOARDS);
+ usb_put_dev(usbdev);
+ return -ENOMEM;
+ }
+
+ /* Create and initialize dev struct */
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (dev == NULL) {
+ printk(KERN_ERR "tm6000" ": out of memory!\n");
+ usb_put_dev(usbdev);
+ return -ENOMEM;
+ }
+ spin_lock_init(&dev->slock);
+
+ /* Increment usage count */
+ tm6000_devused |= 1<<nr;
+ snprintf(dev->name, 29, "tm6000 #%d", nr);
+
+ dev->model = id->driver_info;
+ if ((card[nr] >= 0) && (card[nr] < ARRAY_SIZE(tm6000_boards)))
+ dev->model = card[nr];
+
+ dev->udev = usbdev;
+ dev->devno = nr;
+
+ switch (usbdev->speed) {
+ case USB_SPEED_LOW:
+ speed = "1.5";
+ break;
+ case USB_SPEED_UNKNOWN:
+ case USB_SPEED_FULL:
+ speed = "12";
+ break;
+ case USB_SPEED_HIGH:
+ speed = "480";
+ break;
+ default:
+ speed = "unknown";
+ }
+
+
+
+ /* Get endpoints */
+ for (i = 0; i < interface->num_altsetting; i++) {
+ int ep;
+
+ for (ep = 0; ep < interface->altsetting[i].desc.bNumEndpoints; ep++) {
+ struct usb_host_endpoint *e;
+ int dir_out;
+
+ e = &interface->altsetting[i].endpoint[ep];
+
+ dir_out = ((e->desc.bEndpointAddress &
+ USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT);
+
+ printk(KERN_INFO "tm6000: alt %d, interface %i, class %i\n",
+ i,
+ interface->altsetting[i].desc.bInterfaceNumber,
+ interface->altsetting[i].desc.bInterfaceClass);
+
+ switch (e->desc.bmAttributes) {
+ case USB_ENDPOINT_XFER_BULK:
+ if (!dir_out) {
+ get_max_endpoint(usbdev,
+ &interface->altsetting[i],
+ "Bulk IN", e,
+ &dev->bulk_in);
+ } else {
+ get_max_endpoint(usbdev,
+ &interface->altsetting[i],
+ "Bulk OUT", e,
+ &dev->bulk_out);
+ }
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ if (!dir_out) {
+ get_max_endpoint(usbdev,
+ &interface->altsetting[i],
+ "ISOC IN", e,
+ &dev->isoc_in);
+ } else {
+ get_max_endpoint(usbdev,
+ &interface->altsetting[i],
+ "ISOC OUT", e,
+ &dev->isoc_out);
+ }
+ break;
+ }
+ }
+ }
+
+
+ printk(KERN_INFO "tm6000: New video device @ %s Mbps (%04x:%04x, ifnum %d)\n",
+ speed,
+ le16_to_cpu(dev->udev->descriptor.idVendor),
+ le16_to_cpu(dev->udev->descriptor.idProduct),
+ interface->altsetting->desc.bInterfaceNumber);
+
+/* check if the the device has the iso in endpoint at the correct place */
+ if (!dev->isoc_in.endp) {
+ printk(KERN_ERR "tm6000: probing error: no IN ISOC endpoint!\n");
+ rc = -ENODEV;
+
+ goto err;
+ }
+
+ /* save our data pointer in this interface device */
+ usb_set_intfdata(interface, dev);
+
+ printk(KERN_INFO "tm6000: Found %s\n", tm6000_boards[dev->model].name);
+
+ rc = tm6000_init_dev(dev);
+
+ if (rc < 0)
+ goto err;
+
+ return 0;
+
+err:
+ printk(KERN_ERR "tm6000: Error %d while registering\n", rc);
+
+ tm6000_devused &= ~(1<<nr);
+ usb_put_dev(usbdev);
+
+ kfree(dev);
+ return rc;
+}
+
+/*
+ * tm6000_usb_disconnect()
+ * called when the device gets diconencted
+ * video device will be unregistered on v4l2_close in case it is still open
+ */
+static void tm6000_usb_disconnect(struct usb_interface *interface)
+{
+ struct tm6000_core *dev = usb_get_intfdata(interface);
+ usb_set_intfdata(interface, NULL);
+
+ if (!dev)
+ return;
+
+ printk(KERN_INFO "tm6000: disconnecting %s\n", dev->name);
+
+ mutex_lock(&dev->lock);
+
+#ifdef CONFIG_VIDEO_TM6000_DVB
+ if (dev->dvb) {
+ tm6000_dvb_unregister(dev);
+ kfree(dev->dvb);
+ }
+#endif
+
+ tm6000_v4l2_unregister(dev);
+
+ tm6000_i2c_unregister(dev);
+
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ dev->state |= DEV_DISCONNECTED;
+
+ usb_put_dev(dev->udev);
+
+ mutex_unlock(&dev->lock);
+ kfree(dev);
+}
+
+static struct usb_driver tm6000_usb_driver = {
+ .name = "tm6000",
+ .probe = tm6000_usb_probe,
+ .disconnect = tm6000_usb_disconnect,
+ .id_table = tm6000_id_table,
+};
+
+static int __init tm6000_module_init(void)
+{
+ int result;
+
+ printk(KERN_INFO "tm6000" " v4l2 driver version %d.%d.%d loaded\n",
+ (TM6000_VERSION >> 16) & 0xff,
+ (TM6000_VERSION >> 8) & 0xff, TM6000_VERSION & 0xff);
+
+ /* register this driver with the USB subsystem */
+ result = usb_register(&tm6000_usb_driver);
+ if (result)
+ printk(KERN_ERR "tm6000"
+ " usb_register failed. Error number %d.\n", result);
+
+ return result;
+}
+
+static void __exit tm6000_module_exit(void)
+{
+ /* deregister at USB subsystem */
+ usb_deregister(&tm6000_usb_driver);
+}
+
+module_init(tm6000_module_init);
+module_exit(tm6000_module_exit);
+
+MODULE_DESCRIPTION("Trident TVMaster TM5600/TM6000/TM6010 USB2 adapter");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/tm6000/tm6000-core.c b/drivers/staging/tm6000/tm6000-core.c
new file mode 100644
index 00000000000..bfbc53bd291
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000-core.c
@@ -0,0 +1,602 @@
+/*
+ tm6000-core.c - driver for TM5600/TM6000/TM6010 USB video capture devices
+
+ Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+
+ Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com>
+ - DVB-T support
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/usb.h>
+#include <linux/i2c.h>
+#include "tm6000.h"
+#include "tm6000-regs.h"
+#include <media/v4l2-common.h>
+#include <media/tuner.h>
+
+#define USB_TIMEOUT 5*HZ /* ms */
+
+int tm6000_read_write_usb (struct tm6000_core *dev, u8 req_type, u8 req,
+ u16 value, u16 index, u8 *buf, u16 len)
+{
+ int ret, i;
+ unsigned int pipe;
+ static int ini=0, last=0, n=0;
+ u8 *data=NULL;
+
+ if (len)
+ data = kzalloc(len, GFP_KERNEL);
+
+
+ if (req_type & USB_DIR_IN)
+ pipe=usb_rcvctrlpipe(dev->udev, 0);
+ else {
+ pipe=usb_sndctrlpipe(dev->udev, 0);
+ memcpy(data, buf, len);
+ }
+
+ if (tm6000_debug & V4L2_DEBUG_I2C) {
+ if (!ini)
+ last=ini=jiffies;
+
+ printk("%06i (dev %p, pipe %08x): ", n, dev->udev, pipe);
+
+ printk( "%s: %06u ms %06u ms %02x %02x %02x %02x %02x %02x %02x %02x ",
+ (req_type & USB_DIR_IN)?" IN":"OUT",
+ jiffies_to_msecs(jiffies-last),
+ jiffies_to_msecs(jiffies-ini),
+ req_type, req,value&0xff,value>>8, index&0xff, index>>8,
+ len&0xff, len>>8);
+ last=jiffies;
+ n++;
+
+ if ( !(req_type & USB_DIR_IN) ) {
+ printk(">>> ");
+ for (i=0;i<len;i++) {
+ printk(" %02x",buf[i]);
+ }
+ printk("\n");
+ }
+ }
+
+ ret = usb_control_msg(dev->udev, pipe, req, req_type, value, index, data,
+ len, USB_TIMEOUT);
+
+ if (req_type & USB_DIR_IN)
+ memcpy(buf, data, len);
+
+ if (tm6000_debug & V4L2_DEBUG_I2C) {
+ if (ret<0) {
+ if (req_type & USB_DIR_IN)
+ printk("<<< (len=%d)\n",len);
+
+ printk("%s: Error #%d\n", __FUNCTION__, ret);
+ } else if (req_type & USB_DIR_IN) {
+ printk("<<< ");
+ for (i=0;i<len;i++) {
+ printk(" %02x",buf[i]);
+ }
+ printk("\n");
+ }
+ }
+
+ kfree(data);
+
+ msleep(5);
+
+ return ret;
+}
+
+int tm6000_set_reg (struct tm6000_core *dev, u8 req, u16 value, u16 index)
+{
+ return
+ tm6000_read_write_usb (dev, USB_DIR_OUT | USB_TYPE_VENDOR,
+ req, value, index, NULL, 0);
+}
+EXPORT_SYMBOL_GPL(tm6000_set_reg);
+
+int tm6000_get_reg (struct tm6000_core *dev, u8 req, u16 value, u16 index)
+{
+ int rc;
+ u8 buf[1];
+
+ rc=tm6000_read_write_usb (dev, USB_DIR_IN | USB_TYPE_VENDOR, req,
+ value, index, buf, 1);
+
+ if (rc<0)
+ return rc;
+
+ return *buf;
+}
+EXPORT_SYMBOL_GPL(tm6000_get_reg);
+
+int tm6000_get_reg16 (struct tm6000_core *dev, u8 req, u16 value, u16 index)
+{
+ int rc;
+ u8 buf[2];
+
+ rc=tm6000_read_write_usb (dev, USB_DIR_IN | USB_TYPE_VENDOR, req,
+ value, index, buf, 2);
+
+ if (rc<0)
+ return rc;
+
+ return buf[1]|buf[0]<<8;
+}
+
+int tm6000_get_reg32 (struct tm6000_core *dev, u8 req, u16 value, u16 index)
+{
+ int rc;
+ u8 buf[4];
+
+ rc=tm6000_read_write_usb (dev, USB_DIR_IN | USB_TYPE_VENDOR, req,
+ value, index, buf, 4);
+
+ if (rc<0)
+ return rc;
+
+ return buf[3] | buf[2] << 8 | buf[1] << 16 | buf[0] << 24;
+}
+
+void tm6000_set_fourcc_format(struct tm6000_core *dev)
+{
+ if (dev->dev_type == TM6010) {
+ int val;
+
+ val = tm6000_get_reg(dev, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, 0) & 0xfc;
+ if (dev->fourcc == V4L2_PIX_FMT_UYVY)
+ tm6000_set_reg(dev, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, val);
+ else
+ tm6000_set_reg(dev, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, val | 1);
+ } else {
+ if (dev->fourcc == V4L2_PIX_FMT_UYVY)
+ tm6000_set_reg(dev, TM6010_REQ07_RC1_TRESHOLD, 0xd0);
+ else
+ tm6000_set_reg(dev, TM6010_REQ07_RC1_TRESHOLD, 0x90);
+ }
+}
+
+int tm6000_init_analog_mode (struct tm6000_core *dev)
+{
+ if (dev->dev_type == TM6010) {
+ int val;
+
+ /* Enable video */
+ val = tm6000_get_reg(dev, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, 0);
+ val |= 0x60;
+ tm6000_set_reg(dev, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, val);
+ val = tm6000_get_reg(dev,
+ TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE, 0);
+ val &= ~0x40;
+ tm6000_set_reg(dev, TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE, val);
+
+ /* Init teletext */
+ tm6000_set_reg(dev, TM6010_REQ07_R3F_RESET, 0x01);
+ tm6000_set_reg(dev, TM6010_REQ07_R41_TELETEXT_VBI_CODE1, 0x27);
+ tm6000_set_reg(dev, TM6010_REQ07_R42_VBI_DATA_HIGH_LEVEL, 0x55);
+ tm6000_set_reg(dev, TM6010_REQ07_R43_VBI_DATA_TYPE_LINE7, 0x66);
+ tm6000_set_reg(dev, TM6010_REQ07_R44_VBI_DATA_TYPE_LINE8, 0x66);
+ tm6000_set_reg(dev, TM6010_REQ07_R45_VBI_DATA_TYPE_LINE9, 0x66);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R46_VBI_DATA_TYPE_LINE10, 0x66);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R47_VBI_DATA_TYPE_LINE11, 0x66);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R48_VBI_DATA_TYPE_LINE12, 0x66);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R49_VBI_DATA_TYPE_LINE13, 0x66);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R4A_VBI_DATA_TYPE_LINE14, 0x66);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R4B_VBI_DATA_TYPE_LINE15, 0x66);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R4C_VBI_DATA_TYPE_LINE16, 0x66);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R4D_VBI_DATA_TYPE_LINE17, 0x66);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R4E_VBI_DATA_TYPE_LINE18, 0x66);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R4F_VBI_DATA_TYPE_LINE19, 0x66);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R50_VBI_DATA_TYPE_LINE20, 0x66);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R51_VBI_DATA_TYPE_LINE21, 0x66);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R52_VBI_DATA_TYPE_LINE22, 0x66);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R53_VBI_DATA_TYPE_LINE23, 0x00);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R54_VBI_DATA_TYPE_RLINES, 0x00);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R55_VBI_LOOP_FILTER_GAIN, 0x01);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R56_VBI_LOOP_FILTER_I_GAIN, 0x00);
+ tm6000_set_reg(dev,
+ TM6010_REQ07_R57_VBI_LOOP_FILTER_P_GAIN, 0x02);
+ tm6000_set_reg(dev, TM6010_REQ07_R58_VBI_CAPTION_DTO1, 0x35);
+ tm6000_set_reg(dev, TM6010_REQ07_R59_VBI_CAPTION_DTO0, 0xa0);
+ tm6000_set_reg(dev, TM6010_REQ07_R5A_VBI_TELETEXT_DTO1, 0x11);
+ tm6000_set_reg(dev, TM6010_REQ07_R5B_VBI_TELETEXT_DTO0, 0x4c);
+ tm6000_set_reg(dev, TM6010_REQ07_R40_TELETEXT_VBI_CODE0, 0x01);
+ tm6000_set_reg(dev, TM6010_REQ07_R3F_RESET, 0x00);
+
+
+ /* Init audio */
+ tm6000_set_reg(dev, TM6010_REQ08_R01_A_INIT, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R02_A_FIX_GAIN_CTRL, 0x04);
+ tm6000_set_reg(dev, TM6010_REQ08_R03_A_AUTO_GAIN_CTRL, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R04_A_SIF_AMP_CTRL, 0xa0);
+ tm6000_set_reg(dev, TM6010_REQ08_R05_A_STANDARD_MOD, 0x05);
+ tm6000_set_reg(dev, TM6010_REQ08_R06_A_SOUND_MOD, 0x06);
+ tm6000_set_reg(dev, TM6010_REQ08_R07_A_LEFT_VOL, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R08_A_RIGHT_VOL, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R09_A_MAIN_VOL, 0x08);
+ tm6000_set_reg(dev, TM6010_REQ08_R0A_A_I2S_MOD, 0x91);
+ tm6000_set_reg(dev, TM6010_REQ08_R0B_A_ASD_THRES1, 0x20);
+ tm6000_set_reg(dev, TM6010_REQ08_R0C_A_ASD_THRES2, 0x12);
+ tm6000_set_reg(dev, TM6010_REQ08_R0D_A_AMD_THRES, 0x20);
+ tm6000_set_reg(dev, TM6010_REQ08_R0E_A_MONO_THRES1, 0xf0);
+ tm6000_set_reg(dev, TM6010_REQ08_R0F_A_MONO_THRES2, 0x80);
+ tm6000_set_reg(dev, TM6010_REQ08_R10_A_MUTE_THRES1, 0xc0);
+ tm6000_set_reg(dev, TM6010_REQ08_R11_A_MUTE_THRES2, 0x80);
+ tm6000_set_reg(dev, TM6010_REQ08_R12_A_AGC_U, 0x12);
+ tm6000_set_reg(dev, TM6010_REQ08_R13_A_AGC_ERR_T, 0xfe);
+ tm6000_set_reg(dev, TM6010_REQ08_R14_A_AGC_GAIN_INIT, 0x20);
+ tm6000_set_reg(dev, TM6010_REQ08_R15_A_AGC_STEP_THR, 0x14);
+ tm6000_set_reg(dev, TM6010_REQ08_R16_A_AGC_GAIN_MAX, 0xfe);
+ tm6000_set_reg(dev, TM6010_REQ08_R17_A_AGC_GAIN_MIN, 0x01);
+ tm6000_set_reg(dev, TM6010_REQ08_R18_A_TR_CTRL, 0xa0);
+ tm6000_set_reg(dev, TM6010_REQ08_R19_A_FH_2FH_GAIN, 0x32);
+ tm6000_set_reg(dev, TM6010_REQ08_R1A_A_NICAM_SER_MAX, 0x64);
+ tm6000_set_reg(dev, TM6010_REQ08_R1B_A_NICAM_SER_MIN, 0x20);
+ tm6000_set_reg(dev, REQ_08_SET_GET_AVREG_BIT, 0x1c, 0x00);
+ tm6000_set_reg(dev, REQ_08_SET_GET_AVREG_BIT, 0x1d, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R1E_A_GAIN_DEEMPH_OUT, 0x13);
+ tm6000_set_reg(dev, TM6010_REQ08_R1F_A_TEST_INTF_SEL, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R20_A_TEST_PIN_SEL, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3);
+ tm6000_set_reg(dev, TM6010_REQ08_R06_A_SOUND_MOD, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ08_R01_A_INIT, 0x80);
+
+ } else {
+ /* Enables soft reset */
+ tm6000_set_reg(dev, TM6010_REQ07_R3F_RESET, 0x01);
+
+ if (dev->scaler) {
+ tm6000_set_reg(dev, TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE, 0x20);
+ } else {
+ /* Enable Hfilter and disable TS Drop err */
+ tm6000_set_reg(dev, TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE, 0x80);
+ }
+
+ tm6000_set_reg(dev, TM6010_REQ07_RC3_HSTART1, 0x88);
+ tm6000_set_reg(dev, TM6010_REQ07_RD8_IR_WAKEUP_SEL, 0x23);
+ tm6000_set_reg(dev, TM6010_REQ07_RD1_ADDR_FOR_REQ1, 0xc0);
+ tm6000_set_reg(dev, TM6010_REQ07_RD2_ADDR_FOR_REQ2, 0xd8);
+ tm6000_set_reg(dev, TM6010_REQ07_RD6_ENDP_REQ1_REQ2, 0x06);
+ tm6000_set_reg(dev, TM6010_REQ07_RD8_IR_PULSE_CNT0, 0x1f);
+
+ /* AP Software reset */
+ tm6000_set_reg(dev, TM6010_REQ07_RFF_SOFT_RESET, 0x08);
+ tm6000_set_reg(dev, TM6010_REQ07_RFF_SOFT_RESET, 0x00);
+
+ tm6000_set_fourcc_format(dev);
+
+ /* Disables soft reset */
+ tm6000_set_reg(dev, TM6010_REQ07_R3F_RESET, 0x00);
+
+ /* E3: Select input 0 - TV tuner */
+ tm6000_set_reg(dev, TM6010_REQ07_RE3_OUT_SEL1, 0x00);
+ tm6000_set_reg(dev, REQ_07_SET_GET_AVREG, 0xeb, 0x60);
+
+ /* This controls input */
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, TM6000_GPIO_2, 0x0);
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, TM6000_GPIO_3, 0x01);
+ }
+ msleep(20);
+
+ /* Tuner firmware can now be loaded */
+
+ /*FIXME: Hack!!! */
+ struct v4l2_frequency f;
+ mutex_lock(&dev->lock);
+ f.frequency=dev->freq;
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, &f);
+ mutex_unlock(&dev->lock);
+
+ msleep(100);
+ tm6000_set_standard (dev, &dev->norm);
+ tm6000_set_audio_bitrate (dev,48000);
+
+ return 0;
+}
+
+int tm6000_init_digital_mode (struct tm6000_core *dev)
+{
+ if (dev->dev_type == TM6010) {
+ int val;
+ u8 buf[2];
+
+ /* digital init */
+ val = tm6000_get_reg(dev, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, 0);
+ val &= ~0x60;
+ tm6000_set_reg(dev, TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, val);
+ val = tm6000_get_reg(dev, TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE, 0);
+ val |= 0x40;
+ tm6000_set_reg(dev, TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE, val);
+ tm6000_set_reg(dev, TM6010_REQ07_RFE_POWER_DOWN, 0x28);
+ tm6000_set_reg(dev, TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xfc);
+ tm6000_set_reg(dev, TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0xff);
+ tm6000_set_reg(dev, TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfe);
+ tm6000_read_write_usb (dev, 0xc0, 0x0e, 0x00c2, 0x0008, buf, 2);
+ printk (KERN_INFO "buf %#x %#x \n", buf[0], buf[1]);
+
+
+ } else {
+ tm6000_set_reg(dev, TM6010_REQ07_RFF_SOFT_RESET, 0x08);
+ tm6000_set_reg(dev, TM6010_REQ07_RFF_SOFT_RESET, 0x00);
+ tm6000_set_reg(dev, TM6010_REQ07_R3F_RESET, 0x01);
+ tm6000_set_reg(dev, TM6010_REQ07_RD8_IR_PULSE_CNT0, 0x08);
+ tm6000_set_reg(dev, TM6010_REQ07_RE2_OUT_SEL2, 0x0c);
+ tm6000_set_reg(dev, TM6010_REQ07_RE8_TYPESEL_MOS_I2S, 0xff);
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0x00eb, 0xd8);
+ tm6000_set_reg(dev, TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE, 0x40);
+ tm6000_set_reg(dev, TM6010_REQ07_RC1_TRESHOLD, 0xd0);
+ tm6000_set_reg(dev, TM6010_REQ07_RC3_HSTART1, 0x09);
+ tm6000_set_reg(dev, TM6010_REQ07_RD8_IR_WAKEUP_SEL, 0x37);
+ tm6000_set_reg(dev, TM6010_REQ07_RD1_ADDR_FOR_REQ1, 0xd8);
+ tm6000_set_reg(dev, TM6010_REQ07_RD2_ADDR_FOR_REQ2, 0xc0);
+ tm6000_set_reg(dev, TM6010_REQ07_RD6_ENDP_REQ1_REQ2, 0x60);
+
+ tm6000_set_reg(dev, TM6010_REQ07_RE2_OUT_SEL2, 0x0c);
+ tm6000_set_reg(dev, TM6010_REQ07_RE8_TYPESEL_MOS_I2S, 0xff);
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0x00eb, 0x08);
+ msleep(50);
+
+ tm6000_set_reg (dev, REQ_04_EN_DISABLE_MCU_INT, 0x0020, 0x00);
+ msleep(50);
+ tm6000_set_reg (dev, REQ_04_EN_DISABLE_MCU_INT, 0x0020, 0x01);
+ msleep(50);
+ tm6000_set_reg (dev, REQ_04_EN_DISABLE_MCU_INT, 0x0020, 0x00);
+ msleep(100);
+ }
+ return 0;
+}
+
+struct reg_init {
+ u8 req;
+ u8 reg;
+ u8 val;
+};
+
+/* The meaning of those initializations are unknown */
+struct reg_init tm6000_init_tab[] = {
+ /* REG VALUE */
+ { TM6010_REQ07_RD8_IR_PULSE_CNT0, 0x1f },
+ { TM6010_REQ07_RFF_SOFT_RESET, 0x08 },
+ { TM6010_REQ07_RFF_SOFT_RESET, 0x00 },
+ { TM6010_REQ07_RD5_POWERSAVE, 0x4f },
+ { TM6010_REQ07_RD8_IR_WAKEUP_SEL, 0x23 },
+ { TM6010_REQ07_RD8_IR_WAKEUP_ADD, 0x08 },
+ { TM6010_REQ07_RE2_OUT_SEL2, 0x00 },
+ { TM6010_REQ07_RE3_OUT_SEL1, 0x10 },
+ { TM6010_REQ07_RE5_REMOTE_WAKEUP, 0x00 },
+ { TM6010_REQ07_RE8_TYPESEL_MOS_I2S, 0x00 },
+ { REQ_07_SET_GET_AVREG, 0xeb, 0x64 }, /* 48000 bits/sample, external input */
+ { REQ_07_SET_GET_AVREG, 0xee, 0xc2 },
+ { TM6010_REQ07_R3F_RESET, 0x01 }, /* Start of soft reset */
+ { TM6010_REQ07_R00_VIDEO_CONTROL0, 0x00 },
+ { TM6010_REQ07_R01_VIDEO_CONTROL1, 0x07 },
+ { TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f },
+ { TM6010_REQ07_R03_YC_SEP_CONTROL, 0x00 },
+ { TM6010_REQ07_R05_NOISE_THRESHOLD, 0x64 },
+ { TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01 },
+ { TM6010_REQ07_R08_LUMA_CONTRAST_ADJ, 0x82 },
+ { TM6010_REQ07_R09_LUMA_BRIGHTNESS_ADJ, 0x36 },
+ { TM6010_REQ07_R0A_CHROMA_SATURATION_ADJ, 0x50 },
+ { TM6010_REQ07_R0C_CHROMA_AGC_CONTROL, 0x6a },
+ { TM6010_REQ07_R11_AGC_PEAK_CONTROL, 0xc9 },
+ { TM6010_REQ07_R12_AGC_GATE_STARTH, 0x07 },
+ { TM6010_REQ07_R13_AGC_GATE_STARTL, 0x3b },
+ { TM6010_REQ07_R14_AGC_GATE_WIDTH, 0x47 },
+ { TM6010_REQ07_R15_AGC_BP_DELAY, 0x6f },
+ { TM6010_REQ07_R17_HLOOP_MAXSTATE, 0xcd },
+ { TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e },
+ { TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x8b },
+ { TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xa2 },
+ { TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe9 },
+ { TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c },
+ { TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc },
+ { TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc },
+ { TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd },
+ { TM6010_REQ07_R20_HSYNC_RISING_EDGE_TIME, 0x3c },
+ { TM6010_REQ07_R21_HSYNC_PHASE_OFFSET, 0x3c },
+ { TM6010_REQ07_R2D_CHROMA_BURST_END, 0x48 },
+ { TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88 },
+ { TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x22 },
+ { TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61 },
+ { TM6010_REQ07_R32_VSYNC_HLOCK_MIN, 0x74 },
+ { TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x1c },
+ { TM6010_REQ07_R34_VSYNC_AGC_MIN, 0x74 },
+ { TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c },
+ { TM6010_REQ07_R36_VSYNC_VBI_MIN, 0x7a },
+ { TM6010_REQ07_R37_VSYNC_VBI_MAX, 0x26 },
+ { TM6010_REQ07_R38_VSYNC_THRESHOLD, 0x40 },
+ { TM6010_REQ07_R39_VSYNC_TIME_CONSTANT, 0x0a },
+ { TM6010_REQ07_R42_VBI_DATA_HIGH_LEVEL, 0x55 },
+ { TM6010_REQ07_R51_VBI_DATA_TYPE_LINE21, 0x11 },
+ { TM6010_REQ07_R55_VBI_LOOP_FILTER_GAIN, 0x01 },
+ { TM6010_REQ07_R57_VBI_LOOP_FILTER_P_GAIN, 0x02 },
+ { TM6010_REQ07_R58_VBI_CAPTION_DTO1, 0x35 },
+ { TM6010_REQ07_R59_VBI_CAPTION_DTO0, 0xa0 },
+ { TM6010_REQ07_R80_COMB_FILTER_TRESHOLD, 0x15 },
+ { TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42 },
+ { TM6010_REQ07_RC1_TRESHOLD, 0xd0 },
+ { TM6010_REQ07_RC3_HSTART1, 0x88 },
+ { TM6010_REQ07_R3F_RESET, 0x00 }, /* End of the soft reset */
+ { TM6010_REQ05_R18_IMASK7, 0x00 },
+};
+
+struct reg_init tm6010_init_tab[] = {
+ { TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE, 0x00 },
+ { TM6010_REQ07_RC4_HSTART0, 0xa0 },
+ { TM6010_REQ07_RC6_HEND0, 0x40 },
+ { TM6010_REQ07_RCA_VEND0, 0x31 },
+ { TM6010_REQ07_RCC_ACTIVE_VIDEO_IF, 0xe1 },
+ { TM6010_REQ07_RE0_DVIDEO_SOURCE, 0x03 },
+ { TM6010_REQ07_RFE_POWER_DOWN, 0x7f },
+
+ { TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0 },
+ { TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf4 },
+ { TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf8 },
+ { TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x00 },
+ { TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf2 },
+ { TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xf0 },
+ { TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2 },
+ { TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x60 },
+ { TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc },
+
+ { TM6010_REQ07_R3F_RESET, 0x01 },
+ { TM6010_REQ07_R00_VIDEO_CONTROL0, 0x00 },
+ { TM6010_REQ07_R01_VIDEO_CONTROL1, 0x07 },
+ { TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f },
+ { TM6010_REQ07_R03_YC_SEP_CONTROL, 0x00 },
+ { TM6010_REQ07_R05_NOISE_THRESHOLD, 0x64 },
+ { TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01 },
+ { TM6010_REQ07_R08_LUMA_CONTRAST_ADJ, 0x82 },
+ { TM6010_REQ07_R09_LUMA_BRIGHTNESS_ADJ, 0x36 },
+ { TM6010_REQ07_R0A_CHROMA_SATURATION_ADJ, 0x50 },
+ { TM6010_REQ07_R0C_CHROMA_AGC_CONTROL, 0x6a },
+ { TM6010_REQ07_R11_AGC_PEAK_CONTROL, 0xc9 },
+ { TM6010_REQ07_R12_AGC_GATE_STARTH, 0x07 },
+ { TM6010_REQ07_R13_AGC_GATE_STARTL, 0x3b },
+ { TM6010_REQ07_R14_AGC_GATE_WIDTH, 0x47 },
+ { TM6010_REQ07_R15_AGC_BP_DELAY, 0x6f },
+ { TM6010_REQ07_R17_HLOOP_MAXSTATE, 0xcd },
+ { TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e },
+ { TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x8b },
+ { TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xa2 },
+ { TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe9 },
+ { TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c },
+ { TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc },
+ { TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc },
+ { TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd },
+ { TM6010_REQ07_R20_HSYNC_RISING_EDGE_TIME, 0x3c },
+ { TM6010_REQ07_R21_HSYNC_PHASE_OFFSET, 0x3c },
+ { TM6010_REQ07_R2D_CHROMA_BURST_END, 0x48 },
+ { TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88 },
+ { TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x22 },
+ { TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61 },
+ { TM6010_REQ07_R32_VSYNC_HLOCK_MIN, 0x74 },
+ { TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x1c },
+ { TM6010_REQ07_R34_VSYNC_AGC_MIN, 0x74 },
+ { TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c },
+ { TM6010_REQ07_R36_VSYNC_VBI_MIN, 0x7a },
+ { TM6010_REQ07_R37_VSYNC_VBI_MAX, 0x26 },
+ { TM6010_REQ07_R38_VSYNC_THRESHOLD, 0x40 },
+ { TM6010_REQ07_R39_VSYNC_TIME_CONSTANT, 0x0a },
+ { TM6010_REQ07_R42_VBI_DATA_HIGH_LEVEL, 0x55 },
+ { TM6010_REQ07_R51_VBI_DATA_TYPE_LINE21, 0x11 },
+ { TM6010_REQ07_R55_VBI_LOOP_FILTER_GAIN, 0x01 },
+ { TM6010_REQ07_R57_VBI_LOOP_FILTER_P_GAIN, 0x02 },
+ { TM6010_REQ07_R58_VBI_CAPTION_DTO1, 0x35 },
+ { TM6010_REQ07_R59_VBI_CAPTION_DTO0, 0xa0 },
+ { TM6010_REQ07_R80_COMB_FILTER_TRESHOLD, 0x15 },
+ { TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42 },
+ { TM6010_REQ07_RC1_TRESHOLD, 0xd0 },
+ { TM6010_REQ07_RC3_HSTART1, 0x88 },
+ { TM6010_REQ07_R3F_RESET, 0x00 },
+
+ { TM6010_REQ05_R18_IMASK7, 0x00 },
+
+ { TM6010_REQ07_RD8_IR_LEADER1, 0xaa },
+ { TM6010_REQ07_RD8_IR_LEADER0, 0x30 },
+ { TM6010_REQ07_RD8_IR_PULSE_CNT1, 0x20 },
+ { TM6010_REQ07_RD8_IR_PULSE_CNT0, 0xd0 },
+ { REQ_04_EN_DISABLE_MCU_INT, 0x02, 0x00 },
+ { TM6010_REQ07_RD8_IR, 0x2f },
+
+ /* set remote wakeup key:any key wakeup */
+ { TM6010_REQ07_RE5_REMOTE_WAKEUP, 0xfe },
+ { TM6010_REQ07_RD8_IR_WAKEUP_SEL, 0xff },
+};
+
+int tm6000_init (struct tm6000_core *dev)
+{
+ int board, rc=0, i, size;
+ struct reg_init *tab;
+
+ if (dev->dev_type == TM6010) {
+ tab = tm6010_init_tab;
+ size = ARRAY_SIZE(tm6010_init_tab);
+ } else {
+ tab = tm6000_init_tab;
+ size = ARRAY_SIZE(tm6000_init_tab);
+ }
+
+ /* Load board's initialization table */
+ for (i=0; i< size; i++) {
+ rc= tm6000_set_reg (dev, tab[i].req, tab[i].reg, tab[i].val);
+ if (rc<0) {
+ printk (KERN_ERR "Error %i while setting req %d, "
+ "reg %d to value %d\n", rc,
+ tab[i].req,tab[i].reg, tab[i].val);
+ return rc;
+ }
+ }
+
+ msleep(5); /* Just to be conservative */
+
+ /* Check board version - maybe 10Moons specific */
+ board=tm6000_get_reg32 (dev, REQ_40_GET_VERSION, 0, 0);
+ if (board >=0) {
+ printk (KERN_INFO "Board version = 0x%08x\n",board);
+ } else {
+ printk (KERN_ERR "Error %i while retrieving board version\n",board);
+ }
+
+ rc = tm6000_cards_setup(dev);
+
+ return rc;
+}
+
+int tm6000_set_audio_bitrate(struct tm6000_core *dev, int bitrate)
+{
+ int val;
+
+ val=tm6000_get_reg (dev, REQ_07_SET_GET_AVREG, 0xeb, 0x0);
+printk("Original value=%d\n",val);
+ if (val<0)
+ return val;
+
+ val &= 0x0f; /* Preserve the audio input control bits */
+ switch (bitrate) {
+ case 44100:
+ val|=0xd0;
+ dev->audio_bitrate=bitrate;
+ break;
+ case 48000:
+ val|=0x60;
+ dev->audio_bitrate=bitrate;
+ break;
+ }
+ val=tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0xeb, val);
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(tm6000_set_audio_bitrate);
diff --git a/drivers/staging/tm6000/tm6000-dvb.c b/drivers/staging/tm6000/tm6000-dvb.c
new file mode 100644
index 00000000000..eafc89c22b6
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000-dvb.c
@@ -0,0 +1,346 @@
+/*
+ tm6000-dvb.c - dvb-t support for TM5600/TM6000/TM6010 USB video capture devices
+
+ Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/usb.h>
+
+#include "tm6000.h"
+#include "tm6000-regs.h"
+
+#include "zl10353.h"
+
+#include <media/tuner.h>
+
+#include "tuner-xc2028.h"
+
+static void inline print_err_status (struct tm6000_core *dev,
+ int packet, int status)
+{
+ char *errmsg = "Unknown";
+
+ switch(status) {
+ case -ENOENT:
+ errmsg = "unlinked synchronuously";
+ break;
+ case -ECONNRESET:
+ errmsg = "unlinked asynchronuously";
+ break;
+ case -ENOSR:
+ errmsg = "Buffer error (overrun)";
+ break;
+ case -EPIPE:
+ errmsg = "Stalled (device not responding)";
+ break;
+ case -EOVERFLOW:
+ errmsg = "Babble (bad cable?)";
+ break;
+ case -EPROTO:
+ errmsg = "Bit-stuff error (bad cable?)";
+ break;
+ case -EILSEQ:
+ errmsg = "CRC/Timeout (could be anything)";
+ break;
+ case -ETIME:
+ errmsg = "Device does not respond";
+ break;
+ }
+ if (packet<0) {
+ dprintk(dev, 1, "URB status %d [%s].\n",
+ status, errmsg);
+ } else {
+ dprintk(dev, 1, "URB packet %d, status %d [%s].\n",
+ packet, status, errmsg);
+ }
+}
+
+static void tm6000_urb_received(struct urb *urb)
+{
+ int ret;
+ struct tm6000_core* dev = urb->context;
+
+ if(urb->status != 0) {
+ print_err_status (dev,0,urb->status);
+ }
+ else if(urb->actual_length>0){
+ dvb_dmx_swfilter(&dev->dvb->demux, urb->transfer_buffer,
+ urb->actual_length);
+ }
+
+ if(dev->dvb->streams > 0) {
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if(ret < 0) {
+ printk(KERN_ERR "tm6000: error %s\n", __FUNCTION__);
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
+ }
+ }
+}
+
+int tm6000_start_stream(struct tm6000_core *dev)
+{
+ int ret;
+ unsigned int pipe, size;
+ struct tm6000_dvb *dvb = dev->dvb;
+
+ printk(KERN_INFO "tm6000: got start stream request %s\n",__FUNCTION__);
+
+ tm6000_init_digital_mode(dev);
+
+ dvb->bulk_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if(dvb->bulk_urb == NULL) {
+ printk(KERN_ERR "tm6000: couldn't allocate urb\n");
+ return -ENOMEM;
+ }
+
+ pipe = usb_rcvbulkpipe(dev->udev, dev->bulk_in.endp->desc.bEndpointAddress
+ & USB_ENDPOINT_NUMBER_MASK);
+
+ size = usb_maxpacket(dev->udev, pipe, usb_pipeout(pipe));
+ size = size * 15; /* 512 x 8 or 12 or 15 */
+
+ dvb->bulk_urb->transfer_buffer = kzalloc(size, GFP_KERNEL);
+ if(dvb->bulk_urb->transfer_buffer == NULL) {
+ usb_free_urb(dvb->bulk_urb);
+ printk(KERN_ERR "tm6000: couldn't allocate transfer buffer!\n");
+ return -ENOMEM;
+ }
+
+ usb_fill_bulk_urb(dvb->bulk_urb, dev->udev, pipe,
+ dvb->bulk_urb->transfer_buffer,
+ size,
+ tm6000_urb_received, dev);
+
+ ret = usb_clear_halt(dev->udev, pipe);
+ if(ret < 0) {
+ printk(KERN_ERR "tm6000: error %i in %s during pipe reset\n",ret,__FUNCTION__);
+ return ret;
+ }
+ else {
+ printk(KERN_ERR "tm6000: pipe resetted\n");
+ }
+
+/* mutex_lock(&tm6000_driver.open_close_mutex); */
+ ret = usb_submit_urb(dvb->bulk_urb, GFP_KERNEL);
+
+/* mutex_unlock(&tm6000_driver.open_close_mutex); */
+ if (ret) {
+ printk(KERN_ERR "tm6000: submit of urb failed (error=%i)\n",ret);
+
+ kfree(dvb->bulk_urb->transfer_buffer);
+ usb_free_urb(dvb->bulk_urb);
+ return ret;
+ }
+
+ return 0;
+}
+
+void tm6000_stop_stream(struct tm6000_core *dev)
+{
+ struct tm6000_dvb *dvb = dev->dvb;
+
+ if(dvb->bulk_urb) {
+ printk (KERN_INFO "urb killing\n");
+ usb_kill_urb(dvb->bulk_urb);
+ printk (KERN_INFO "urb buffer free\n");
+ kfree(dvb->bulk_urb->transfer_buffer);
+ usb_free_urb(dvb->bulk_urb);
+ dvb->bulk_urb = NULL;
+ }
+}
+
+int tm6000_start_feed(struct dvb_demux_feed *feed)
+{
+ struct dvb_demux *demux = feed->demux;
+ struct tm6000_core *dev = demux->priv;
+ struct tm6000_dvb *dvb = dev->dvb;
+ printk(KERN_INFO "tm6000: got start feed request %s\n",__FUNCTION__);
+
+ mutex_lock(&dvb->mutex);
+ if(dvb->streams == 0) {
+ dvb->streams = 1;
+/* mutex_init(&tm6000_dev->streming_mutex); */
+ tm6000_start_stream(dev);
+ }
+ else {
+ ++(dvb->streams);
+ }
+ mutex_unlock(&dvb->mutex);
+
+ return 0;
+}
+
+int tm6000_stop_feed(struct dvb_demux_feed *feed) {
+ struct dvb_demux *demux = feed->demux;
+ struct tm6000_core *dev = demux->priv;
+ struct tm6000_dvb *dvb = dev->dvb;
+
+ printk(KERN_INFO "tm6000: got stop feed request %s\n",__FUNCTION__);
+
+ mutex_lock(&dvb->mutex);
+
+ printk (KERN_INFO "stream %#x\n", dvb->streams);
+ --(dvb->streams);
+ if(dvb->streams == 0) {
+ printk (KERN_INFO "stop stream\n");
+ tm6000_stop_stream(dev);
+/* mutex_destroy(&tm6000_dev->streaming_mutex); */
+ }
+ mutex_unlock(&dvb->mutex);
+/* mutex_destroy(&tm6000_dev->streaming_mutex); */
+
+ return 0;
+}
+
+int tm6000_dvb_attach_frontend(struct tm6000_core *dev)
+{
+ struct tm6000_dvb *dvb = dev->dvb;
+
+ if(dev->caps.has_zl10353) {
+ struct zl10353_config config =
+ {.demod_address = dev->demod_addr,
+ .no_tuner = 1,
+ .parallel_ts = 1,
+ .if2 = 45700,
+ .disable_i2c_gate_ctrl = 1,
+ };
+
+ dvb->frontend = dvb_attach(zl10353_attach, &config,
+ &dev->i2c_adap);
+ }
+ else {
+ printk(KERN_ERR "tm6000: no frontend defined for the device!\n");
+ return -1;
+ }
+
+ return (!dvb->frontend) ? -1 : 0;
+}
+
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+
+int tm6000_dvb_register(struct tm6000_core *dev)
+{
+ int ret = -1;
+ struct tm6000_dvb *dvb = dev->dvb;
+
+ mutex_init(&dvb->mutex);
+
+ dvb->streams = 0;
+
+ /* attach the frontend */
+ ret = tm6000_dvb_attach_frontend(dev);
+ if(ret < 0) {
+ printk(KERN_ERR "tm6000: couldn't attach the frontend!\n");
+ goto err;
+ }
+
+ ret = dvb_register_adapter(&dvb->adapter, "Trident TVMaster 6000 DVB-T",
+ THIS_MODULE, &dev->udev->dev, adapter_nr);
+ dvb->adapter.priv = dev;
+
+ if (dvb->frontend) {
+ struct xc2028_config cfg = {
+ .i2c_adap = &dev->i2c_adap,
+ .i2c_addr = dev->tuner_addr,
+ };
+
+ dvb->frontend->callback = tm6000_tuner_callback;
+ ret = dvb_register_frontend(&dvb->adapter, dvb->frontend);
+ if (ret < 0) {
+ printk(KERN_ERR
+ "tm6000: couldn't register frontend\n");
+ goto adapter_err;
+ }
+
+ if (!dvb_attach(xc2028_attach, dvb->frontend, &cfg)) {
+ printk(KERN_ERR "tm6000: couldn't register "
+ "frontend (xc3028)\n");
+ ret = -EINVAL;
+ goto frontend_err;
+ }
+ printk(KERN_INFO "tm6000: XC2028/3028 asked to be "
+ "attached to frontend!\n");
+ } else {
+ printk(KERN_ERR "tm6000: no frontend found\n");
+ }
+
+ dvb->demux.dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING
+ | DMX_MEMORY_BASED_FILTERING;
+ dvb->demux.priv = dev;
+ dvb->demux.filternum = 8;
+ dvb->demux.feednum = 8;
+ dvb->demux.start_feed = tm6000_start_feed;
+ dvb->demux.stop_feed = tm6000_stop_feed;
+ dvb->demux.write_to_decoder = NULL;
+ ret = dvb_dmx_init(&dvb->demux);
+ if(ret < 0) {
+ printk("tm6000: dvb_dmx_init failed (errno = %d)\n", ret);
+ goto frontend_err;
+ }
+
+ dvb->dmxdev.filternum = dev->dvb->demux.filternum;
+ dvb->dmxdev.demux = &dev->dvb->demux.dmx;
+ dvb->dmxdev.capabilities = 0;
+
+ ret = dvb_dmxdev_init(&dvb->dmxdev, &dvb->adapter);
+ if(ret < 0) {
+ printk("tm6000: dvb_dmxdev_init failed (errno = %d)\n", ret);
+ goto dvb_dmx_err;
+ }
+
+ return 0;
+
+dvb_dmx_err:
+ dvb_dmx_release(&dvb->demux);
+frontend_err:
+ if(dvb->frontend) {
+ dvb_frontend_detach(dvb->frontend);
+ dvb_unregister_frontend(dvb->frontend);
+ }
+adapter_err:
+ dvb_unregister_adapter(&dvb->adapter);
+err:
+ return ret;
+}
+
+void tm6000_dvb_unregister(struct tm6000_core *dev)
+{
+ struct tm6000_dvb *dvb = dev->dvb;
+
+ if(dvb->bulk_urb != NULL) {
+ struct urb *bulk_urb = dvb->bulk_urb;
+
+ kfree(bulk_urb->transfer_buffer);
+ bulk_urb->transfer_buffer = NULL;
+ usb_unlink_urb(bulk_urb);
+ usb_free_urb(bulk_urb);
+ }
+
+/* mutex_lock(&tm6000_driver.open_close_mutex); */
+ if(dvb->frontend) {
+ dvb_frontend_detach(dvb->frontend);
+ dvb_unregister_frontend(dvb->frontend);
+ }
+
+ dvb_dmxdev_release(&dvb->dmxdev);
+ dvb_dmx_release(&dvb->demux);
+ dvb_unregister_adapter(&dvb->adapter);
+ mutex_destroy(&dvb->mutex);
+/* mutex_unlock(&tm6000_driver.open_close_mutex); */
+
+}
diff --git a/drivers/staging/tm6000/tm6000-i2c.c b/drivers/staging/tm6000/tm6000-i2c.c
new file mode 100644
index 00000000000..94ff489a1bb
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000-i2c.c
@@ -0,0 +1,370 @@
+/*
+ tm6000-i2c.c - driver for TM5600/TM6000/TM6010 USB video capture devices
+
+ Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+
+ Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com>
+ - Fix SMBus Read Byte command
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/usb.h>
+#include <linux/i2c.h>
+
+#include "tm6000.h"
+#include "tm6000-regs.h"
+#include <media/v4l2-common.h>
+#include <media/tuner.h>
+#include "tuner-xc2028.h"
+
+
+/*FIXME: Hack to avoid needing to patch i2c-id.h */
+#define I2C_HW_B_TM6000 I2C_HW_B_EM28XX
+/* ----------------------------------------------------------- */
+
+static unsigned int i2c_debug = 0;
+module_param(i2c_debug, int, 0644);
+MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]");
+
+#define i2c_dprintk(lvl,fmt, args...) if (i2c_debug>=lvl) do{ \
+ printk(KERN_DEBUG "%s at %s: " fmt, \
+ dev->name, __FUNCTION__ , ##args); } while (0)
+
+static int tm6000_i2c_send_regs(struct tm6000_core *dev, unsigned char addr,
+ __u8 reg, char *buf, int len)
+{
+ int rc;
+ unsigned int tsleep;
+ unsigned int i2c_packet_limit = 16;
+
+ if (dev->dev_type == TM6010)
+ i2c_packet_limit = 64;
+
+ if (!buf)
+ return -1;
+
+ if (len < 1 || len > i2c_packet_limit) {
+ printk(KERN_ERR "Incorrect length of i2c packet = %d, limit set to %d\n",
+ len, i2c_packet_limit);
+ return -1;
+ }
+
+ /* capture mutex */
+ rc = tm6000_read_write_usb(dev, USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, REQ_16_SET_GET_I2C_WR1_RDN,
+ addr | reg << 8, 0, buf, len);
+
+ if (rc < 0) {
+ /* release mutex */
+ return rc;
+ }
+
+ /* Calculate delay time, 14000us for 64 bytes */
+ tsleep = ((len * 200) + 200 + 1000) / 1000;
+ msleep(tsleep);
+
+ /* release mutex */
+ return rc;
+}
+
+/* Generic read - doesn't work fine with 16bit registers */
+static int tm6000_i2c_recv_regs(struct tm6000_core *dev, unsigned char addr,
+ __u8 reg, char *buf, int len)
+{
+ int rc;
+ u8 b[2];
+ unsigned int i2c_packet_limit = 16;
+
+ if (dev->dev_type == TM6010)
+ i2c_packet_limit = 64;
+
+ if (!buf)
+ return -1;
+
+ if (len < 1 || len > i2c_packet_limit) {
+ printk(KERN_ERR "Incorrect length of i2c packet = %d, limit set to %d\n",
+ len, i2c_packet_limit);
+ return -1;
+ }
+
+ /* capture mutex */
+ if ((dev->caps.has_zl10353) && (dev->demod_addr << 1 == addr) && (reg % 2 == 0)) {
+ /*
+ * Workaround an I2C bug when reading from zl10353
+ */
+ reg -= 1;
+ len += 1;
+
+ rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ REQ_16_SET_GET_I2C_WR1_RDN, addr | reg << 8, 0, b, len);
+
+ *buf = b[1];
+ } else {
+ rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ REQ_16_SET_GET_I2C_WR1_RDN, addr | reg << 8, 0, buf, len);
+ }
+
+ /* release mutex */
+ return rc;
+}
+
+/*
+ * read from a 16bit register
+ * for example xc2028, xc3028 or xc3028L
+ */
+static int tm6000_i2c_recv_regs16(struct tm6000_core *dev, unsigned char addr,
+ __u16 reg, char *buf, int len)
+{
+ int rc;
+ unsigned char ureg;
+
+ if (!buf || len != 2)
+ return -1;
+
+ /* capture mutex */
+ if (dev->dev_type == TM6010) {
+ ureg = reg & 0xFF;
+ rc = tm6000_read_write_usb(dev, USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, REQ_16_SET_GET_I2C_WR1_RDN,
+ addr | (reg & 0xFF00), 0, &ureg, 1);
+
+ if (rc < 0) {
+ /* release mutex */
+ return rc;
+ }
+
+ msleep(1400 / 1000);
+ rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, REQ_35_AFTEK_TUNER_READ,
+ reg, 0, buf, len);
+ } else {
+ rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, REQ_14_SET_GET_I2C_WR2_RDN,
+ addr, reg, buf, len);
+ }
+
+ /* release mutex */
+ return rc;
+}
+
+static int tm6000_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg msgs[], int num)
+{
+ struct tm6000_core *dev = i2c_adap->algo_data;
+ int addr, rc, i, byte;
+
+ if (num <= 0)
+ return 0;
+ for (i = 0; i < num; i++) {
+ addr = (msgs[i].addr << 1) & 0xff;
+ i2c_dprintk(2,"%s %s addr=0x%x len=%d:",
+ (msgs[i].flags & I2C_M_RD) ? "read" : "write",
+ i == num - 1 ? "stop" : "nonstop", addr, msgs[i].len);
+ if (msgs[i].flags & I2C_M_RD) {
+ /* read request without preceding register selection */
+ /*
+ * The TM6000 only supports a read transaction
+ * immediately after a 1 or 2 byte write to select
+ * a register. We cannot fulfil this request.
+ */
+ i2c_dprintk(2, " read without preceding write not"
+ " supported");
+ rc = -EOPNOTSUPP;
+ goto err;
+ } else if (i + 1 < num && msgs[i].len <= 2 &&
+ (msgs[i + 1].flags & I2C_M_RD) &&
+ msgs[i].addr == msgs[i + 1].addr) {
+ /* 1 or 2 byte write followed by a read */
+ if (i2c_debug >= 2)
+ for (byte = 0; byte < msgs[i].len; byte++)
+ printk(" %02x", msgs[i].buf[byte]);
+ i2c_dprintk(2, "; joined to read %s len=%d:",
+ i == num - 2 ? "stop" : "nonstop",
+ msgs[i + 1].len);
+
+ if (msgs[i].len == 2) {
+ rc = tm6000_i2c_recv_regs16(dev, addr,
+ msgs[i].buf[0] << 8 | msgs[i].buf[1],
+ msgs[i + 1].buf, msgs[i + 1].len);
+ } else {
+ rc = tm6000_i2c_recv_regs(dev, addr, msgs[i].buf[0],
+ msgs[i + 1].buf, msgs[i + 1].len);
+ }
+
+ i++;
+
+ if (addr == dev->tuner_addr << 1) {
+ tm6000_set_reg(dev, REQ_50_SET_START, 0, 0);
+ tm6000_set_reg(dev, REQ_51_SET_STOP, 0, 0);
+ }
+ if (i2c_debug >= 2)
+ for (byte = 0; byte < msgs[i].len; byte++)
+ printk(" %02x", msgs[i].buf[byte]);
+ } else {
+ /* write bytes */
+ if (i2c_debug >= 2)
+ for (byte = 0; byte < msgs[i].len; byte++)
+ printk(" %02x", msgs[i].buf[byte]);
+ rc = tm6000_i2c_send_regs(dev, addr, msgs[i].buf[0],
+ msgs[i].buf + 1, msgs[i].len - 1);
+
+ if (addr == dev->tuner_addr << 1) {
+ tm6000_set_reg(dev, REQ_50_SET_START, 0, 0);
+ tm6000_set_reg(dev, REQ_51_SET_STOP, 0, 0);
+ }
+ }
+ if (i2c_debug >= 2)
+ printk("\n");
+ if (rc < 0)
+ goto err;
+ }
+
+ return num;
+err:
+ i2c_dprintk(2," ERROR: %i\n", rc);
+ return rc;
+}
+
+static int tm6000_i2c_eeprom(struct tm6000_core *dev,
+ unsigned char *eedata, int len)
+{
+ int i, rc;
+ unsigned char *p = eedata;
+ unsigned char bytes[17];
+
+ dev->i2c_client.addr = 0xa0 >> 1;
+
+ bytes[16] = '\0';
+ for (i = 0; i < len; ) {
+ *p = i;
+ rc = tm6000_i2c_recv_regs(dev, 0xa0, i, p, 1);
+ if (rc < 1) {
+ if (p == eedata)
+ goto noeeprom;
+ else {
+ printk(KERN_WARNING
+ "%s: i2c eeprom read error (err=%d)\n",
+ dev->name, rc);
+ }
+ return -1;
+ }
+ p++;
+ if (0 == (i % 16))
+ printk(KERN_INFO "%s: i2c eeprom %02x:", dev->name, i);
+ printk(" %02x", eedata[i]);
+ if ((eedata[i] >= ' ') && (eedata[i] <= 'z')) {
+ bytes[i%16] = eedata[i];
+ } else {
+ bytes[i%16]='.';
+ }
+
+ i++;
+
+ if (0 == (i % 16)) {
+ bytes[16] = '\0';
+ printk(" %s\n", bytes);
+ }
+ }
+ if (0 != (i%16)) {
+ bytes[i%16] = '\0';
+ for (i %= 16; i < 16; i++)
+ printk(" ");
+ }
+ printk(" %s\n", bytes);
+
+ return 0;
+
+noeeprom:
+ printk(KERN_INFO "%s: Huh, no eeprom present (err=%d)?\n",
+ dev->name, rc);
+ return rc;
+}
+
+/* ----------------------------------------------------------- */
+
+/*
+ * functionality()
+ */
+static u32 functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_SMBUS_EMUL;
+}
+
+#define mass_write(addr, reg, data...) \
+ { const static u8 _val[] = data; \
+ rc=tm6000_read_write_usb(dev,USB_DIR_OUT | USB_TYPE_VENDOR, \
+ REQ_16_SET_GET_I2C_WR1_RDN,(reg<<8)+addr, 0x00, (u8 *) _val, \
+ ARRAY_SIZE(_val)); \
+ if (rc<0) { \
+ printk(KERN_ERR "Error on line %d: %d\n",__LINE__,rc); \
+ return rc; \
+ } \
+ msleep (10); \
+ }
+
+static struct i2c_algorithm tm6000_algo = {
+ .master_xfer = tm6000_i2c_xfer,
+ .functionality = functionality,
+};
+
+static struct i2c_adapter tm6000_adap_template = {
+ .owner = THIS_MODULE,
+ .class = I2C_CLASS_TV_ANALOG | I2C_CLASS_TV_DIGITAL,
+ .name = "tm6000",
+ .id = I2C_HW_B_TM6000,
+ .algo = &tm6000_algo,
+};
+
+static struct i2c_client tm6000_client_template = {
+ .name = "tm6000 internal",
+};
+
+/* ----------------------------------------------------------- */
+
+/*
+ * tm6000_i2c_register()
+ * register i2c bus
+ */
+int tm6000_i2c_register(struct tm6000_core *dev)
+{
+ unsigned char eedata[256];
+
+ dev->i2c_adap = tm6000_adap_template;
+ dev->i2c_adap.dev.parent = &dev->udev->dev;
+ strcpy(dev->i2c_adap.name, dev->name);
+ dev->i2c_adap.algo_data = dev;
+ i2c_add_adapter(&dev->i2c_adap);
+
+ dev->i2c_client = tm6000_client_template;
+ dev->i2c_client.adapter = &dev->i2c_adap;
+
+ i2c_set_adapdata(&dev->i2c_adap, &dev->v4l2_dev);
+
+ tm6000_i2c_eeprom(dev, eedata, sizeof(eedata));
+
+ return 0;
+}
+
+/*
+ * tm6000_i2c_unregister()
+ * unregister i2c_bus
+ */
+int tm6000_i2c_unregister(struct tm6000_core *dev)
+{
+ i2c_del_adapter(&dev->i2c_adap);
+ return 0;
+}
diff --git a/drivers/staging/tm6000/tm6000-regs.h b/drivers/staging/tm6000/tm6000-regs.h
new file mode 100644
index 00000000000..1c5289c971f
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000-regs.h
@@ -0,0 +1,541 @@
+/*
+ tm6000-regs.h - driver for TM5600/TM6000/TM6010 USB video capture devices
+
+ Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * Define TV Master TM5600/TM6000/TM6010 Request codes
+ */
+#define REQ_00_SET_IR_VALUE 0
+#define REQ_01_SET_WAKEUP_IRCODE 1
+#define REQ_02_GET_IR_CODE 2
+#define REQ_03_SET_GET_MCU_PIN 3
+#define REQ_04_EN_DISABLE_MCU_INT 4
+#define REQ_05_SET_GET_USBREG 5
+ /* Write: RegNum, Value, 0 */
+ /* Read : RegNum, Value, 1, RegStatus */
+#define REQ_06_SET_GET_USBREG_BIT 6
+#define REQ_07_SET_GET_AVREG 7
+ /* Write: RegNum, Value, 0 */
+ /* Read : RegNum, Value, 1, RegStatus */
+#define REQ_08_SET_GET_AVREG_BIT 8
+#define REQ_09_SET_GET_TUNER_FQ 9
+#define REQ_10_SET_TUNER_SYSTEM 10
+#define REQ_11_SET_EEPROM_ADDR 11
+#define REQ_12_SET_GET_EEPROMBYTE 12
+#define REQ_13_GET_EEPROM_SEQREAD 13
+#define REQ_14_SET_GET_I2C_WR2_RDN 14
+#define REQ_15_SET_GET_I2CBYTE 15
+ /* Write: Subaddr, Slave Addr, value, 0 */
+ /* Read : Subaddr, Slave Addr, value, 1 */
+#define REQ_16_SET_GET_I2C_WR1_RDN 16
+ /* Subaddr, Slave Addr, 0, length */
+#define REQ_17_SET_GET_I2CFP 17
+ /* Write: Slave Addr, register, value */
+ /* Read : Slave Addr, register, 2, data */
+#define REQ_20_DATA_TRANSFER 20
+#define REQ_30_I2C_WRITE 30
+#define REQ_31_I2C_READ 31
+#define REQ_35_AFTEK_TUNER_READ 35
+#define REQ_40_GET_VERSION 40
+#define REQ_50_SET_START 50
+#define REQ_51_SET_STOP 51
+#define REQ_52_TRANSMIT_DATA 52
+#define REQ_53_SPI_INITIAL 53
+#define REQ_54_SPI_SETSTART 54
+#define REQ_55_SPI_INOUTDATA 55
+#define REQ_56_SPI_SETSTOP 56
+
+/*
+ * Define TV Master TM5600/TM6000/TM6010 GPIO lines
+ */
+
+#define TM6000_GPIO_CLK 0x101
+#define TM6000_GPIO_DATA 0x100
+
+#define TM6000_GPIO_1 0x102
+#define TM6000_GPIO_2 0x103
+#define TM6000_GPIO_3 0x104
+#define TM6000_GPIO_4 0x300
+#define TM6000_GPIO_5 0x301
+#define TM6000_GPIO_6 0x304
+#define TM6000_GPIO_7 0x305
+
+/* tm6010 defines GPIO with different values */
+#define TM6010_GPIO_0 0x0102
+#define TM6010_GPIO_1 0x0103
+#define TM6010_GPIO_2 0x0104
+#define TM6010_GPIO_3 0x0105
+#define TM6010_GPIO_4 0x0106
+#define TM6010_GPIO_5 0x0107
+#define TM6010_GPIO_6 0x0300
+#define TM6010_GPIO_7 0x0301
+#define TM6010_GPIO_9 0x0305
+/*
+ * Define TV Master TM5600/TM6000/TM6010 URB message codes and length
+ */
+
+enum {
+ TM6000_URB_MSG_VIDEO=1,
+ TM6000_URB_MSG_AUDIO,
+ TM6000_URB_MSG_VBI,
+ TM6000_URB_MSG_PTS,
+ TM6000_URB_MSG_ERR,
+};
+
+/* Define TM6000/TM6010 Video decoder registers */
+#define TM6010_REQ07_R00_VIDEO_CONTROL0 0x07, 0x00
+#define TM6010_REQ07_R01_VIDEO_CONTROL1 0x07, 0x01
+#define TM6010_REQ07_R02_VIDEO_CONTROL2 0x07, 0x02
+#define TM6010_REQ07_R03_YC_SEP_CONTROL 0x07, 0x03
+#define TM6010_REQ07_R04_LUMA_HAGC_CONTROL 0x07, 0x04
+#define TM6010_REQ07_R05_NOISE_THRESHOLD 0x07, 0x05
+#define TM6010_REQ07_R06_AGC_GATE_THRESHOLD 0x07, 0x06
+#define TM6010_REQ07_R07_OUTPUT_CONTROL 0x07, 0x07
+#define TM6010_REQ07_R08_LUMA_CONTRAST_ADJ 0x07, 0x08
+#define TM6010_REQ07_R09_LUMA_BRIGHTNESS_ADJ 0x07, 0x09
+#define TM6010_REQ07_R0A_CHROMA_SATURATION_ADJ 0x07, 0x0a
+#define TM6010_REQ07_R0B_CHROMA_HUE_PHASE_ADJ 0x07, 0x0b
+#define TM6010_REQ07_R0C_CHROMA_AGC_CONTROL 0x07, 0x0c
+#define TM6010_REQ07_R0D_CHROMA_KILL_LEVEL 0x07, 0x0d
+#define TM6010_REQ07_R0F_CHROMA_AUTO_POSITION 0x07, 0x0f
+#define TM6010_REQ07_R10_AGC_PEAK_NOMINAL 0x07, 0x10
+#define TM6010_REQ07_R11_AGC_PEAK_CONTROL 0x07, 0x11
+#define TM6010_REQ07_R12_AGC_GATE_STARTH 0x07, 0x12
+#define TM6010_REQ07_R13_AGC_GATE_STARTL 0x07, 0x13
+#define TM6010_REQ07_R14_AGC_GATE_WIDTH 0x07, 0x14
+#define TM6010_REQ07_R15_AGC_BP_DELAY 0x07, 0x15
+#define TM6010_REQ07_R16_LOCK_COUNT 0x07, 0x16
+#define TM6010_REQ07_R17_HLOOP_MAXSTATE 0x07, 0x17
+#define TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3 0x07, 0x18
+#define TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2 0x07, 0x19
+#define TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1 0x07, 0x1a
+#define TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0 0x07, 0x1b
+#define TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3 0x07, 0x1c
+#define TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2 0x07, 0x1d
+#define TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1 0x07, 0x1e
+#define TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0 0x07, 0x1f
+#define TM6010_REQ07_R20_HSYNC_RISING_EDGE_TIME 0x07, 0x20
+#define TM6010_REQ07_R21_HSYNC_PHASE_OFFSET 0x07, 0x21
+#define TM6010_REQ07_R22_HSYNC_PLL_START_TIME 0x07, 0x22
+#define TM6010_REQ07_R23_HSYNC_PLL_END_TIME 0x07, 0x23
+#define TM6010_REQ07_R24_HSYNC_TIP_START_TIME 0x07, 0x24
+#define TM6010_REQ07_R25_HSYNC_TIP_END_TIME 0x07, 0x25
+#define TM6010_REQ07_R26_HSYNC_RISING_EDGE_START 0x07, 0x26
+#define TM6010_REQ07_R27_HSYNC_RISING_EDGE_END 0x07, 0x27
+#define TM6010_REQ07_R28_BACKPORCH_START 0x07, 0x28
+#define TM6010_REQ07_R29_BACKPORCH_END 0x07, 0x29
+#define TM6010_REQ07_R2A_HSYNC_FILTER_START 0x07, 0x2a
+#define TM6010_REQ07_R2B_HSYNC_FILTER_END 0x07, 0x2b
+#define TM6010_REQ07_R2C_CHROMA_BURST_START 0x07, 0x2c
+#define TM6010_REQ07_R2D_CHROMA_BURST_END 0x07, 0x2d
+#define TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART 0x07, 0x2e
+#define TM6010_REQ07_R2F_ACTIVE_VIDEO_HWIDTH 0x07, 0x2f
+#define TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART 0x07, 0x30
+#define TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT 0x07, 0x31
+#define TM6010_REQ07_R32_VSYNC_HLOCK_MIN 0x07, 0x32
+#define TM6010_REQ07_R33_VSYNC_HLOCK_MAX 0x07, 0x33
+#define TM6010_REQ07_R34_VSYNC_AGC_MIN 0x07, 0x34
+#define TM6010_REQ07_R35_VSYNC_AGC_MAX 0x07, 0x35
+#define TM6010_REQ07_R36_VSYNC_VBI_MIN 0x07, 0x36
+#define TM6010_REQ07_R37_VSYNC_VBI_MAX 0x07, 0x37
+#define TM6010_REQ07_R38_VSYNC_THRESHOLD 0x07, 0x38
+#define TM6010_REQ07_R39_VSYNC_TIME_CONSTANT 0x07, 0x39
+#define TM6010_REQ07_R3A_STATUS1 0x07, 0x3a
+#define TM6010_REQ07_R3B_STATUS2 0x07, 0x3b
+#define TM6010_REQ07_R3C_STATUS3 0x07, 0x3c
+#define TM6010_REQ07_R3F_RESET 0x07, 0x3f
+#define TM6010_REQ07_R40_TELETEXT_VBI_CODE0 0x07, 0x40
+#define TM6010_REQ07_R41_TELETEXT_VBI_CODE1 0x07, 0x41
+#define TM6010_REQ07_R42_VBI_DATA_HIGH_LEVEL 0x07, 0x42
+#define TM6010_REQ07_R43_VBI_DATA_TYPE_LINE7 0x07, 0x43
+#define TM6010_REQ07_R44_VBI_DATA_TYPE_LINE8 0x07, 0x44
+#define TM6010_REQ07_R45_VBI_DATA_TYPE_LINE9 0x07, 0x45
+#define TM6010_REQ07_R46_VBI_DATA_TYPE_LINE10 0x07, 0x46
+#define TM6010_REQ07_R47_VBI_DATA_TYPE_LINE11 0x07, 0x47
+#define TM6010_REQ07_R48_VBI_DATA_TYPE_LINE12 0x07, 0x48
+#define TM6010_REQ07_R49_VBI_DATA_TYPE_LINE13 0x07, 0x49
+#define TM6010_REQ07_R4A_VBI_DATA_TYPE_LINE14 0x07, 0x4a
+#define TM6010_REQ07_R4B_VBI_DATA_TYPE_LINE15 0x07, 0x4b
+#define TM6010_REQ07_R4C_VBI_DATA_TYPE_LINE16 0x07, 0x4c
+#define TM6010_REQ07_R4D_VBI_DATA_TYPE_LINE17 0x07, 0x4d
+#define TM6010_REQ07_R4E_VBI_DATA_TYPE_LINE18 0x07, 0x4e
+#define TM6010_REQ07_R4F_VBI_DATA_TYPE_LINE19 0x07, 0x4f
+#define TM6010_REQ07_R50_VBI_DATA_TYPE_LINE20 0x07, 0x50
+#define TM6010_REQ07_R51_VBI_DATA_TYPE_LINE21 0x07, 0x51
+#define TM6010_REQ07_R52_VBI_DATA_TYPE_LINE22 0x07, 0x52
+#define TM6010_REQ07_R53_VBI_DATA_TYPE_LINE23 0x07, 0x53
+#define TM6010_REQ07_R54_VBI_DATA_TYPE_RLINES 0x07, 0x54
+#define TM6010_REQ07_R55_VBI_LOOP_FILTER_GAIN 0x07, 0x55
+#define TM6010_REQ07_R56_VBI_LOOP_FILTER_I_GAIN 0x07, 0x56
+#define TM6010_REQ07_R57_VBI_LOOP_FILTER_P_GAIN 0x07, 0x57
+#define TM6010_REQ07_R58_VBI_CAPTION_DTO1 0x07, 0x58
+#define TM6010_REQ07_R59_VBI_CAPTION_DTO0 0x07, 0x59
+#define TM6010_REQ07_R5A_VBI_TELETEXT_DTO1 0x07, 0x5a
+#define TM6010_REQ07_R5B_VBI_TELETEXT_DTO0 0x07, 0x5b
+#define TM6010_REQ07_R5C_VBI_WSS625_DTO1 0x07, 0x5c
+#define TM6010_REQ07_R5D_VBI_WSS625_DTO0 0x07, 0x5d
+#define TM6010_REQ07_R5E_VBI_CAPTION_FRAME_START 0x07, 0x5e
+#define TM6010_REQ07_R5F_VBI_WSS625_FRAME_START 0x07, 0x5f
+#define TM6010_REQ07_R60_TELETEXT_FRAME_START 0x07, 0x60
+#define TM6010_REQ07_R61_VBI_CCDATA1 0x07, 0x61
+#define TM6010_REQ07_R62_VBI_CCDATA2 0x07, 0x62
+#define TM6010_REQ07_R63_VBI_WSS625_DATA1 0x07, 0x63
+#define TM6010_REQ07_R64_VBI_WSS625_DATA2 0x07, 0x64
+#define TM6010_REQ07_R65_VBI_DATA_STATUS 0x07, 0x65
+#define TM6010_REQ07_R66_VBI_CAPTION_START 0x07, 0x66
+#define TM6010_REQ07_R67_VBI_WSS625_START 0x07, 0x67
+#define TM6010_REQ07_R68_VBI_TELETEXT_START 0x07, 0x68
+#define TM6010_REQ07_R70_HSYNC_DTO_INC_STATUS3 0x07, 0x70
+#define TM6010_REQ07_R71_HSYNC_DTO_INC_STATUS2 0x07, 0x71
+#define TM6010_REQ07_R72_HSYNC_DTO_INC_STATUS1 0x07, 0x72
+#define TM6010_REQ07_R73_HSYNC_DTO_INC_STATUS0 0x07, 0x73
+#define TM6010_REQ07_R74_CHROMA_DTO_INC_STATUS3 0x07, 0x74
+#define TM6010_REQ07_R75_CHROMA_DTO_INC_STATUS2 0x07, 0x75
+#define TM6010_REQ07_R76_CHROMA_DTO_INC_STATUS1 0x07, 0x76
+#define TM6010_REQ07_R77_CHROMA_DTO_INC_STATUS0 0x07, 0x77
+#define TM6010_REQ07_R78_AGC_AGAIN_STATUS 0x07, 0x78
+#define TM6010_REQ07_R79_AGC_DGAIN_STATUS 0x07, 0x79
+#define TM6010_REQ07_R7A_CHROMA_MAG_STATUS 0x07, 0x7a
+#define TM6010_REQ07_R7B_CHROMA_GAIN_STATUS1 0x07, 0x7b
+#define TM6010_REQ07_R7C_CHROMA_GAIN_STATUS0 0x07, 0x7c
+#define TM6010_REQ07_R7D_CORDIC_FREQ_STATUS 0x07, 0x7d
+#define TM6010_REQ07_R7F_STATUS_NOISE 0x07, 0x7f
+#define TM6010_REQ07_R80_COMB_FILTER_TRESHOLD 0x07, 0x80
+#define TM6010_REQ07_R82_COMB_FILTER_CONFIG 0x07, 0x82
+#define TM6010_REQ07_R83_CHROMA_LOCK_CONFIG 0x07, 0x83
+#define TM6010_REQ07_R84_NOISE_NTSC_C 0x07, 0x84
+#define TM6010_REQ07_R85_NOISE_PAL_C 0x07, 0x85
+#define TM6010_REQ07_R86_NOISE_PHASE_C 0x07, 0x86
+#define TM6010_REQ07_R87_NOISE_PHASE_Y 0x07, 0x87
+#define TM6010_REQ07_R8A_CHROMA_LOOPFILTER_STATE 0x07, 0x8a
+#define TM6010_REQ07_R8B_CHROMA_HRESAMPLER 0x07, 0x8b
+#define TM6010_REQ07_R8D_CPUMP_DELAY_ADJ 0x07, 0x8d
+#define TM6010_REQ07_R8E_CPUMP_ADJ 0x07, 0x8e
+#define TM6010_REQ07_R8F_CPUMP_DELAY 0x07, 0x8f
+
+/* Define TM6000/TM6010 Miscellaneous registers */
+#define TM6010_REQ07_RC0_ACTIVE_VIDEO_SOURCE 0x07, 0xc0
+#define TM6010_REQ07_RC1_TRESHOLD 0x07, 0xc1
+#define TM6010_REQ07_RC2_HSYNC_WIDTH 0x07, 0xc2
+#define TM6010_REQ07_RC3_HSTART1 0x07, 0xc3
+#define TM6010_REQ07_RC4_HSTART0 0x07, 0xc4
+#define TM6010_REQ07_RC5_HEND1 0x07, 0xc5
+#define TM6010_REQ07_RC6_HEND0 0x07, 0xc6
+#define TM6010_REQ07_RC7_VSTART1 0x07, 0xc7
+#define TM6010_REQ07_RC8_VSTART0 0x07, 0xc8
+#define TM6010_REQ07_RC9_VEND1 0x07, 0xc9
+#define TM6010_REQ07_RCA_VEND0 0x07, 0xca
+#define TM6010_REQ07_RCB_DELAY 0x07, 0xcb
+#define TM6010_REQ07_RCC_ACTIVE_VIDEO_IF 0x07, 0xcc
+#define TM6010_REQ07_RD0_USB_PERIPHERY_CONTROL 0x07, 0xd0
+#define TM6010_REQ07_RD1_ADDR_FOR_REQ1 0x07, 0xd1
+#define TM6010_REQ07_RD2_ADDR_FOR_REQ2 0x07, 0xd2
+#define TM6010_REQ07_RD3_ADDR_FOR_REQ3 0x07, 0xd3
+#define TM6010_REQ07_RD4_ADDR_FOR_REQ4 0x07, 0xd4
+#define TM6010_REQ07_RD5_POWERSAVE 0x07, 0xd5
+#define TM6010_REQ07_RD6_ENDP_REQ1_REQ2 0x07, 0xd6
+#define TM6010_REQ07_RD7_ENDP_REQ3_REQ4 0x07, 0xd7
+#define TM6010_REQ07_RD8_IR 0x07, 0xd8
+#define TM6010_REQ07_RD8_IR_BSIZE 0x07, 0xd9
+#define TM6010_REQ07_RD8_IR_WAKEUP_SEL 0x07, 0xda
+#define TM6010_REQ07_RD8_IR_WAKEUP_ADD 0x07, 0xdb
+#define TM6010_REQ07_RD8_IR_LEADER1 0x07, 0xdc
+#define TM6010_REQ07_RD8_IR_LEADER0 0x07, 0xdd
+#define TM6010_REQ07_RD8_IR_PULSE_CNT1 0x07, 0xde
+#define TM6010_REQ07_RD8_IR_PULSE_CNT0 0x07, 0xdf
+#define TM6010_REQ07_RE0_DVIDEO_SOURCE 0x07, 0xe0
+#define TM6010_REQ07_RE0_DVIDEO_SOURCE_IF 0x07, 0xe1
+#define TM6010_REQ07_RE2_OUT_SEL2 0x07, 0xe2
+#define TM6010_REQ07_RE3_OUT_SEL1 0x07, 0xe3
+#define TM6010_REQ07_RE4_OUT_SEL0 0x07, 0xe4
+#define TM6010_REQ07_RE5_REMOTE_WAKEUP 0x07, 0xe5
+#define TM6010_REQ07_RE7_PUB_GPIO 0x07, 0xe7
+#define TM6010_REQ07_RE8_TYPESEL_MOS_I2S 0x07, 0xe8
+#define TM6010_REQ07_RE9_TYPESEL_MOS_TS 0x07, 0xe9
+#define TM6010_REQ07_REA_TYPESEL_MOS_CCIR 0x07, 0xea
+#define TM6010_REQ07_RF0_BIST_CRC_RESULT0 0x07, 0xf0
+#define TM6010_REQ07_RF1_BIST_CRC_RESULT1 0x07, 0xf1
+#define TM6010_REQ07_RF2_BIST_CRC_RESULT2 0x07, 0xf2
+#define TM6010_REQ07_RF3_BIST_CRC_RESULT3 0x07, 0xf3
+#define TM6010_REQ07_RF4_BIST_ERR_VST2 0x07, 0xf4
+#define TM6010_REQ07_RF5_BIST_ERR_VST1 0x07, 0xf5
+#define TM6010_REQ07_RF6_BIST_ERR_VST0 0x07, 0xf6
+#define TM6010_REQ07_RF7_BIST 0x07, 0xf7
+#define TM6010_REQ07_RFE_POWER_DOWN 0x07, 0xfe
+#define TM6010_REQ07_RFF_SOFT_RESET 0x07, 0xff
+
+/* Define TM6000/TM6010 USB registers */
+#define TM6010_REQ05_R00_MAIN_CTRL 0x05, 0x00
+#define TM6010_REQ05_R01_DEVADDR 0x05, 0x01
+#define TM6010_REQ05_R02_TEST 0x05, 0x02
+#define TM6010_REQ05_R04_SOFN0 0x05, 0x04
+#define TM6010_REQ05_R05_SOFN1 0x05, 0x05
+#define TM6010_REQ05_R06_SOFTM0 0x05, 0x06
+#define TM6010_REQ05_R07_SOFTM1 0x05, 0x07
+#define TM6010_REQ05_R08_PHY_TEST 0x05, 0x08
+#define TM6010_REQ05_R09_VCTL 0x05, 0x09
+#define TM6010_REQ05_R0A_VSTA 0x05, 0x0a
+#define TM6010_REQ05_R0B_CX_CFG 0x05, 0x0b
+#define TM6010_REQ05_R0C_ENDP0_REG0 0x05, 0x0c
+#define TM6010_REQ05_R10_GMASK 0x05, 0x10
+#define TM6010_REQ05_R11_IMASK0 0x05, 0x11
+#define TM6010_REQ05_R12_IMASK1 0x05, 0x12
+#define TM6010_REQ05_R13_IMASK2 0x05, 0x13
+#define TM6010_REQ05_R14_IMASK3 0x05, 0x14
+#define TM6010_REQ05_R15_IMASK4 0x05, 0x15
+#define TM6010_REQ05_R16_IMASK5 0x05, 0x16
+#define TM6010_REQ05_R17_IMASK6 0x05, 0x17
+#define TM6010_REQ05_R18_IMASK7 0x05, 0x18
+#define TM6010_REQ05_R19_ZEROP0 0x05, 0x19
+#define TM6010_REQ05_R1A_ZEROP1 0x05, 0x1a
+#define TM6010_REQ05_R1C_FIFO_EMP0 0x05, 0x1c
+#define TM6010_REQ05_R1D_FIFO_EMP1 0x05, 0x1d
+#define TM6010_REQ05_R20_IRQ_GROUP 0x05, 0x20
+#define TM6010_REQ05_R21_IRQ_SOURCE0 0x05, 0x21
+#define TM6010_REQ05_R22_IRQ_SOURCE1 0x05, 0x22
+#define TM6010_REQ05_R23_IRQ_SOURCE2 0x05, 0x23
+#define TM6010_REQ05_R24_IRQ_SOURCE3 0x05, 0x24
+#define TM6010_REQ05_R25_IRQ_SOURCE4 0x05, 0x25
+#define TM6010_REQ05_R26_IRQ_SOURCE5 0x05, 0x26
+#define TM6010_REQ05_R27_IRQ_SOURCE6 0x05, 0x27
+#define TM6010_REQ05_R28_IRQ_SOURCE7 0x05, 0x28
+#define TM6010_REQ05_R29_SEQ_ERR0 0x05, 0x29
+#define TM6010_REQ05_R2A_SEQ_ERR1 0x05, 0x2a
+#define TM6010_REQ05_R2B_SEQ_ABORT0 0x05, 0x2b
+#define TM6010_REQ05_R2C_SEQ_ABORT1 0x05, 0x2c
+#define TM6010_REQ05_R2D_TX_ZERO0 0x05, 0x2d
+#define TM6010_REQ05_R2E_TX_ZERO1 0x05, 0x2e
+#define TM6010_REQ05_R2F_IDLE_CNT 0x05, 0x2f
+#define TM6010_REQ05_R30_FNO_P1 0x05, 0x30
+#define TM6010_REQ05_R31_FNO_P2 0x05, 0x31
+#define TM6010_REQ05_R32_FNO_P3 0x05, 0x32
+#define TM6010_REQ05_R33_FNO_P4 0x05, 0x33
+#define TM6010_REQ05_R34_FNO_P5 0x05, 0x34
+#define TM6010_REQ05_R35_FNO_P6 0x05, 0x35
+#define TM6010_REQ05_R36_FNO_P7 0x05, 0x36
+#define TM6010_REQ05_R37_FNO_P8 0x05, 0x37
+#define TM6010_REQ05_R38_FNO_P9 0x05, 0x38
+#define TM6010_REQ05_R30_FNO_P10 0x05, 0x39
+#define TM6010_REQ05_R30_FNO_P11 0x05, 0x3a
+#define TM6010_REQ05_R30_FNO_P12 0x05, 0x3b
+#define TM6010_REQ05_R30_FNO_P13 0x05, 0x3c
+#define TM6010_REQ05_R30_FNO_P14 0x05, 0x3d
+#define TM6010_REQ05_R30_FNO_P15 0x05, 0x3e
+#define TM6010_REQ05_R40_IN_MAXPS_LOW1 0x05, 0x40
+#define TM6010_REQ05_R41_IN_MAXPS_HIGH1 0x05, 0x41
+#define TM6010_REQ05_R42_IN_MAXPS_LOW2 0x05, 0x42
+#define TM6010_REQ05_R43_IN_MAXPS_HIGH2 0x05, 0x43
+#define TM6010_REQ05_R44_IN_MAXPS_LOW3 0x05, 0x44
+#define TM6010_REQ05_R45_IN_MAXPS_HIGH3 0x05, 0x45
+#define TM6010_REQ05_R46_IN_MAXPS_LOW4 0x05, 0x46
+#define TM6010_REQ05_R47_IN_MAXPS_HIGH4 0x05, 0x47
+#define TM6010_REQ05_R48_IN_MAXPS_LOW5 0x05, 0x48
+#define TM6010_REQ05_R49_IN_MAXPS_HIGH5 0x05, 0x49
+#define TM6010_REQ05_R4A_IN_MAXPS_LOW6 0x05, 0x4a
+#define TM6010_REQ05_R4B_IN_MAXPS_HIGH6 0x05, 0x4b
+#define TM6010_REQ05_R4C_IN_MAXPS_LOW7 0x05, 0x4c
+#define TM6010_REQ05_R4D_IN_MAXPS_HIGH7 0x05, 0x4d
+#define TM6010_REQ05_R4E_IN_MAXPS_LOW8 0x05, 0x4e
+#define TM6010_REQ05_R4F_IN_MAXPS_HIGH8 0x05, 0x4f
+#define TM6010_REQ05_R50_IN_MAXPS_LOW9 0x05, 0x50
+#define TM6010_REQ05_R51_IN_MAXPS_HIGH9 0x05, 0x51
+#define TM6010_REQ05_R40_IN_MAXPS_LOW10 0x05, 0x52
+#define TM6010_REQ05_R41_IN_MAXPS_HIGH10 0x05, 0x53
+#define TM6010_REQ05_R40_IN_MAXPS_LOW11 0x05, 0x54
+#define TM6010_REQ05_R41_IN_MAXPS_HIGH11 0x05, 0x55
+#define TM6010_REQ05_R40_IN_MAXPS_LOW12 0x05, 0x56
+#define TM6010_REQ05_R41_IN_MAXPS_HIGH12 0x05, 0x57
+#define TM6010_REQ05_R40_IN_MAXPS_LOW13 0x05, 0x58
+#define TM6010_REQ05_R41_IN_MAXPS_HIGH13 0x05, 0x59
+#define TM6010_REQ05_R40_IN_MAXPS_LOW14 0x05, 0x5a
+#define TM6010_REQ05_R41_IN_MAXPS_HIGH14 0x05, 0x5b
+#define TM6010_REQ05_R40_IN_MAXPS_LOW15 0x05, 0x5c
+#define TM6010_REQ05_R41_IN_MAXPS_HIGH15 0x05, 0x5d
+#define TM6010_REQ05_R60_OUT_MAXPS_LOW1 0x05, 0x60
+#define TM6010_REQ05_R61_OUT_MAXPS_HIGH1 0x05, 0x61
+#define TM6010_REQ05_R62_OUT_MAXPS_LOW2 0x05, 0x62
+#define TM6010_REQ05_R63_OUT_MAXPS_HIGH2 0x05, 0x63
+#define TM6010_REQ05_R64_OUT_MAXPS_LOW3 0x05, 0x64
+#define TM6010_REQ05_R65_OUT_MAXPS_HIGH3 0x05, 0x65
+#define TM6010_REQ05_R66_OUT_MAXPS_LOW4 0x05, 0x66
+#define TM6010_REQ05_R67_OUT_MAXPS_HIGH4 0x05, 0x67
+#define TM6010_REQ05_R68_OUT_MAXPS_LOW5 0x05, 0x68
+#define TM6010_REQ05_R69_OUT_MAXPS_HIGH5 0x05, 0x69
+#define TM6010_REQ05_R6A_OUT_MAXPS_LOW6 0x05, 0x6a
+#define TM6010_REQ05_R6B_OUT_MAXPS_HIGH6 0x05, 0x6b
+#define TM6010_REQ05_R6C_OUT_MAXPS_LOW7 0x05, 0x6c
+#define TM6010_REQ05_R6D_OUT_MAXPS_HIGH7 0x05, 0x6d
+#define TM6010_REQ05_R6E_OUT_MAXPS_LOW8 0x05, 0x6e
+#define TM6010_REQ05_R6F_OUT_MAXPS_HIGH8 0x05, 0x6f
+#define TM6010_REQ05_R70_OUT_MAXPS_LOW9 0x05, 0x70
+#define TM6010_REQ05_R71_OUT_MAXPS_HIGH9 0x05, 0x71
+#define TM6010_REQ05_R60_OUT_MAXPS_LOW10 0x05, 0x72
+#define TM6010_REQ05_R61_OUT_MAXPS_HIGH10 0x05, 0x73
+#define TM6010_REQ05_R60_OUT_MAXPS_LOW11 0x05, 0x74
+#define TM6010_REQ05_R61_OUT_MAXPS_HIGH11 0x05, 0x75
+#define TM6010_REQ05_R60_OUT_MAXPS_LOW12 0x05, 0x76
+#define TM6010_REQ05_R61_OUT_MAXPS_HIGH12 0x05, 0x77
+#define TM6010_REQ05_R60_OUT_MAXPS_LOW13 0x05, 0x78
+#define TM6010_REQ05_R61_OUT_MAXPS_HIGH13 0x05, 0x79
+#define TM6010_REQ05_R60_OUT_MAXPS_LOW14 0x05, 0x7a
+#define TM6010_REQ05_R61_OUT_MAXPS_HIGH14 0x05, 0x7b
+#define TM6010_REQ05_R60_OUT_MAXPS_LOW15 0x05, 0x7c
+#define TM6010_REQ05_R61_OUT_MAXPS_HIGH15 0x05, 0x7d
+#define TM6010_REQ05_R80_FIFO0 0x05, 0x80
+#define TM6010_REQ05_R81_FIFO1 0x05, 0x81
+#define TM6010_REQ05_R82_FIFO2 0x05, 0x82
+#define TM6010_REQ05_R83_FIFO3 0x05, 0x83
+#define TM6010_REQ05_R84_FIFO4 0x05, 0x84
+#define TM6010_REQ05_R85_FIFO5 0x05, 0x85
+#define TM6010_REQ05_R86_FIFO6 0x05, 0x86
+#define TM6010_REQ05_R87_FIFO7 0x05, 0x87
+#define TM6010_REQ05_R88_FIFO8 0x05, 0x88
+#define TM6010_REQ05_R89_FIFO9 0x05, 0x89
+#define TM6010_REQ05_R81_FIFO10 0x05, 0x8a
+#define TM6010_REQ05_R81_FIFO11 0x05, 0x8b
+#define TM6010_REQ05_R81_FIFO12 0x05, 0x8c
+#define TM6010_REQ05_R81_FIFO13 0x05, 0x8d
+#define TM6010_REQ05_R81_FIFO14 0x05, 0x8e
+#define TM6010_REQ05_R81_FIFO15 0x05, 0x8f
+#define TM6010_REQ05_R90_CFG_FIFO0 0x05, 0x90
+#define TM6010_REQ05_R91_CFG_FIFO1 0x05, 0x91
+#define TM6010_REQ05_R92_CFG_FIFO2 0x05, 0x92
+#define TM6010_REQ05_R93_CFG_FIFO3 0x05, 0x93
+#define TM6010_REQ05_R94_CFG_FIFO4 0x05, 0x94
+#define TM6010_REQ05_R95_CFG_FIFO5 0x05, 0x95
+#define TM6010_REQ05_R96_CFG_FIFO6 0x05, 0x96
+#define TM6010_REQ05_R97_CFG_FIFO7 0x05, 0x97
+#define TM6010_REQ05_R98_CFG_FIFO8 0x05, 0x98
+#define TM6010_REQ05_R99_CFG_FIFO9 0x05, 0x99
+#define TM6010_REQ05_R91_CFG_FIFO10 0x05, 0x9a
+#define TM6010_REQ05_R91_CFG_FIFO11 0x05, 0x9b
+#define TM6010_REQ05_R91_CFG_FIFO12 0x05, 0x9c
+#define TM6010_REQ05_R91_CFG_FIFO13 0x05, 0x9d
+#define TM6010_REQ05_R91_CFG_FIFO14 0x05, 0x9e
+#define TM6010_REQ05_R91_CFG_FIFO15 0x05, 0x9f
+#define TM6010_REQ05_RA0_CTL_FIFO0 0x05, 0xa0
+#define TM6010_REQ05_RA1_CTL_FIFO1 0x05, 0xa1
+#define TM6010_REQ05_RA2_CTL_FIFO2 0x05, 0xa2
+#define TM6010_REQ05_RA3_CTL_FIFO3 0x05, 0xa3
+#define TM6010_REQ05_RA4_CTL_FIFO4 0x05, 0xa4
+#define TM6010_REQ05_RA5_CTL_FIFO5 0x05, 0xa5
+#define TM6010_REQ05_RA6_CTL_FIFO6 0x05, 0xa6
+#define TM6010_REQ05_RA7_CTL_FIFO7 0x05, 0xa7
+#define TM6010_REQ05_RA8_CTL_FIFO8 0x05, 0xa8
+#define TM6010_REQ05_RA9_CTL_FIFO9 0x05, 0xa9
+#define TM6010_REQ05_RA1_CTL_FIFO10 0x05, 0xaa
+#define TM6010_REQ05_RA1_CTL_FIFO11 0x05, 0xab
+#define TM6010_REQ05_RA1_CTL_FIFO12 0x05, 0xac
+#define TM6010_REQ05_RA1_CTL_FIFO13 0x05, 0xad
+#define TM6010_REQ05_RA1_CTL_FIFO14 0x05, 0xae
+#define TM6010_REQ05_RA1_CTL_FIFO15 0x05, 0xaf
+#define TM6010_REQ05_RB0_BC_LOW_FIFO0 0x05, 0xb0
+#define TM6010_REQ05_RB1_BC_LOW_FIFO1 0x05, 0xb1
+#define TM6010_REQ05_RB2_BC_LOW_FIFO2 0x05, 0xb2
+#define TM6010_REQ05_RB3_BC_LOW_FIFO3 0x05, 0xb3
+#define TM6010_REQ05_RB4_BC_LOW_FIFO4 0x05, 0xb4
+#define TM6010_REQ05_RB5_BC_LOW_FIFO5 0x05, 0xb5
+#define TM6010_REQ05_RB6_BC_LOW_FIFO6 0x05, 0xb6
+#define TM6010_REQ05_RB7_BC_LOW_FIFO7 0x05, 0xb7
+#define TM6010_REQ05_RB8_BC_LOW_FIFO8 0x05, 0xb8
+#define TM6010_REQ05_RB9_BC_LOW_FIFO9 0x05, 0xb9
+#define TM6010_REQ05_RB1_BC_LOW_FIFO10 0x05, 0xba
+#define TM6010_REQ05_RB1_BC_LOW_FIFO11 0x05, 0xbb
+#define TM6010_REQ05_RB1_BC_LOW_FIFO12 0x05, 0xbc
+#define TM6010_REQ05_RB1_BC_LOW_FIFO13 0x05, 0xbd
+#define TM6010_REQ05_RB1_BC_LOW_FIFO14 0x05, 0xbe
+#define TM6010_REQ05_RB1_BC_LOW_FIFO15 0x05, 0xbf
+#define TM6010_REQ05_RC0_DATA_FIFO0 0x05, 0xc0
+#define TM6010_REQ05_RC4_DATA_FIFO1 0x05, 0xc4
+#define TM6010_REQ05_RC8_DATA_FIFO2 0x05, 0xc8
+#define TM6010_REQ05_RCC_DATA_FIFO3 0x05, 0xcc
+#define TM6010_REQ05_RD0_DATA_FIFO4 0x05, 0xd0
+#define TM6010_REQ05_RD4_DATA_FIFO5 0x05, 0xd4
+#define TM6010_REQ05_RD8_DATA_FIFO6 0x05, 0xd8
+#define TM6010_REQ05_RDC_DATA_FIFO7 0x05, 0xdc
+#define TM6010_REQ05_RE0_DATA_FIFO8 0x05, 0xe0
+#define TM6010_REQ05_RE4_DATA_FIFO9 0x05, 0xe4
+#define TM6010_REQ05_RC4_DATA_FIFO10 0x05, 0xe8
+#define TM6010_REQ05_RC4_DATA_FIFO11 0x05, 0xec
+#define TM6010_REQ05_RC4_DATA_FIFO12 0x05, 0xf0
+#define TM6010_REQ05_RC4_DATA_FIFO13 0x05, 0xf4
+#define TM6010_REQ05_RC4_DATA_FIFO14 0x05, 0xf8
+#define TM6010_REQ05_RC4_DATA_FIFO15 0x05, 0xfc
+
+/* Define TM6000/TM6010 Audio decoder registers */
+#define TM6010_REQ08_R00_A_VERSION 0x08, 0x00
+#define TM6010_REQ08_R01_A_INIT 0x08, 0x01
+#define TM6010_REQ08_R02_A_FIX_GAIN_CTRL 0x08, 0x02
+#define TM6010_REQ08_R03_A_AUTO_GAIN_CTRL 0x08, 0x03
+#define TM6010_REQ08_R04_A_SIF_AMP_CTRL 0x08, 0x04
+#define TM6010_REQ08_R05_A_STANDARD_MOD 0x08, 0x05
+#define TM6010_REQ08_R06_A_SOUND_MOD 0x08, 0x06
+#define TM6010_REQ08_R07_A_LEFT_VOL 0x08, 0x07
+#define TM6010_REQ08_R08_A_RIGHT_VOL 0x08, 0x08
+#define TM6010_REQ08_R09_A_MAIN_VOL 0x08, 0x09
+#define TM6010_REQ08_R0A_A_I2S_MOD 0x08, 0x0a
+#define TM6010_REQ08_R0B_A_ASD_THRES1 0x08, 0x0b
+#define TM6010_REQ08_R0C_A_ASD_THRES2 0x08, 0x0c
+#define TM6010_REQ08_R0D_A_AMD_THRES 0x08, 0x0d
+#define TM6010_REQ08_R0E_A_MONO_THRES1 0x08, 0x0e
+#define TM6010_REQ08_R0F_A_MONO_THRES2 0x08, 0x0f
+#define TM6010_REQ08_R10_A_MUTE_THRES1 0x08, 0x10
+#define TM6010_REQ08_R11_A_MUTE_THRES2 0x08, 0x11
+#define TM6010_REQ08_R12_A_AGC_U 0x08, 0x12
+#define TM6010_REQ08_R13_A_AGC_ERR_T 0x08, 0x13
+#define TM6010_REQ08_R14_A_AGC_GAIN_INIT 0x08, 0x14
+#define TM6010_REQ08_R15_A_AGC_STEP_THR 0x08, 0x15
+#define TM6010_REQ08_R16_A_AGC_GAIN_MAX 0x08, 0x16
+#define TM6010_REQ08_R17_A_AGC_GAIN_MIN 0x08, 0x17
+#define TM6010_REQ08_R18_A_TR_CTRL 0x08, 0x18
+#define TM6010_REQ08_R19_A_FH_2FH_GAIN 0x08, 0x19
+#define TM6010_REQ08_R1A_A_NICAM_SER_MAX 0x08, 0x1a
+#define TM6010_REQ08_R1B_A_NICAM_SER_MIN 0x08, 0x1b
+#define TM6010_REQ08_R1E_A_GAIN_DEEMPH_OUT 0x08, 0x1e
+#define TM6010_REQ08_R1F_A_TEST_INTF_SEL 0x08, 0x1f
+#define TM6010_REQ08_R20_A_TEST_PIN_SEL 0x08, 0x20
+#define TM6010_REQ08_R21_A_AGC_ERR 0x08, 0x21
+#define TM6010_REQ08_R22_A_AGC_GAIN 0x08, 0x22
+#define TM6010_REQ08_R23_A_NICAM_INFO 0x08, 0x23
+#define TM6010_REQ08_R24_A_SER 0x08, 0x24
+#define TM6010_REQ08_R25_A_C1_AMP 0x08, 0x25
+#define TM6010_REQ08_R26_A_C2_AMP 0x08, 0x26
+#define TM6010_REQ08_R27_A_NOISE_AMP 0x08, 0x27
+#define TM6010_REQ08_R28_A_AUDIO_MODE_RES 0x08, 0x28
+
+/* Define TM6000/TM6010 Video ADC registers */
+#define TM6010_REQ08_RE0_ADC_REF 0x08, 0xe0
+#define TM6010_REQ08_RE1_DAC_CLMP 0x08, 0xe1
+#define TM6010_REQ08_RE2_POWER_DOWN_CTRL1 0x08, 0xe2
+#define TM6010_REQ08_RE3_ADC_IN1_SEL 0x08, 0xe3
+#define TM6010_REQ08_RE4_ADC_IN2_SEL 0x08, 0xe4
+#define TM6010_REQ08_RE5_GAIN_PARAM 0x08, 0xe5
+#define TM6010_REQ08_RE6_POWER_DOWN_CTRL2 0x08, 0xe6
+#define TM6010_REQ08_RE7_REG_GAIN_Y 0x08, 0xe7
+#define TM6010_REQ08_RE8_REG_GAIN_C 0x08, 0xe8
+#define TM6010_REQ08_RE9_BIAS_CTRL 0x08, 0xe9
+#define TM6010_REQ08_REA_BUFF_DRV_CTRL 0x08, 0xea
+#define TM6010_REQ08_REB_SIF_GAIN_CTRL 0x08, 0xeb
+#define TM6010_REQ08_REC_REVERSE_YC_CTRL 0x08, 0xec
+#define TM6010_REQ08_RED_GAIN_SEL 0x08, 0xed
+
+/* Define TM6000/TM6010 Audio ADC registers */
+#define TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG 0x08, 0xf0
+#define TM6010_REQ08_RF1_AADC_POWER_DOWN 0x08, 0xf1
+#define TM6010_REQ08_RF2_LEFT_CHANNEL_VOL 0x08, 0xf2
+#define TM6010_REQ08_RF3_RIGHT_CHANNEL_VOL 0x08, 0xf3
diff --git a/drivers/staging/tm6000/tm6000-stds.c b/drivers/staging/tm6000/tm6000-stds.c
new file mode 100644
index 00000000000..b3564f611e5
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000-stds.c
@@ -0,0 +1,873 @@
+/*
+ tm6000-stds.c - driver for TM5600/TM6000/TM6010 USB video capture devices
+
+ Copyright (C) 2007 Mauro Carvalho Chehab <mchehab@redhat.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include "tm6000.h"
+#include "tm6000-regs.h"
+
+struct tm6000_reg_settings {
+ unsigned char req;
+ unsigned char reg;
+ unsigned char value;
+};
+
+struct tm6000_std_tv_settings {
+ v4l2_std_id id;
+ struct tm6000_reg_settings sif[12];
+ struct tm6000_reg_settings nosif[12];
+ struct tm6000_reg_settings common[25];
+};
+
+struct tm6000_std_settings {
+ v4l2_std_id id;
+ struct tm6000_reg_settings common[37];
+};
+
+static struct tm6000_std_tv_settings tv_stds[] = {
+ {
+ .id = V4L2_STD_PAL_M,
+ .sif = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf2},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x08},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x62},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfe},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0xcb},
+ {0, 0, 0},
+ },
+ .nosif = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x0f},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x60},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8b},
+ {0, 0, 0},
+ },
+ .common = {
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x04},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x00},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x83},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x0a},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe0},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x20},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
+
+ {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc},
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_PAL_Nc,
+ .sif = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf2},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x08},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x62},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfe},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0xcb},
+ {0, 0, 0},
+ },
+ .nosif = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x0f},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x60},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8b},
+ {0, 0, 0},
+ },
+ .common = {
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x36},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x02},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x91},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x1f},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0x0c},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2c},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
+
+ {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc},
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_PAL,
+ .sif = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf2},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x08},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x62},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfe},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0xcb},
+ {0, 0, 0}
+ },
+ .nosif = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x0f},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x60},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8b},
+ {0, 0, 0},
+ },
+ .common = {
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x32},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x02},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x25},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0xd5},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x63},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0x50},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2c},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
+
+ {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc},
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_SECAM,
+ .sif = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf2},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x08},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x62},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfe},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0xcb},
+ {0, 0, 0},
+ },
+ .nosif = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x0f},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x60},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8b},
+ {0, 0, 0},
+ },
+ .common = {
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x38},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x02},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x24},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x92},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xe8},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xed},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2c},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x2c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x18},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0xFF},
+
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_NTSC,
+ .sif = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf2},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x08},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x62},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfe},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0xcb},
+ {0, 0, 0},
+ },
+ .nosif = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf8},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x0f},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x60},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8b},
+ {0, 0, 0},
+ },
+ .common = {
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x00},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0f},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x00},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x8b},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xa2},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe9},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x22},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x1c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
+
+ {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdd},
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ },
+};
+
+static struct tm6000_std_settings composite_stds[] = {
+ {
+ .id = V4L2_STD_PAL_M,
+ .common = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf4},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x0f},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x68},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8b},
+
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x04},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x00},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x83},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x0a},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe0},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x20},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
+
+ {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc},
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_PAL_Nc,
+ .common = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf4},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x0f},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x68},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8b},
+
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x36},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x02},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x91},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x1f},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0x0c},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2c},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
+
+ {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc},
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_PAL,
+ .common = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf4},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x0f},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x68},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8b},
+
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x32},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x02},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x25},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0xd5},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x63},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0x50},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2c},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
+
+ {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc},
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_SECAM,
+ .common = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf4},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x0f},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x68},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8b},
+
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x38},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x02},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x24},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x92},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xe8},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xed},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2c},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x2c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x18},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0xFF},
+
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_NTSC,
+ .common = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xf4},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf3},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x0f},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf1},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xe0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe8},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x68},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8b},
+
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x00},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0f},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x00},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x8b},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xa2},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe9},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x22},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x1c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
+
+ {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdd},
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ },
+};
+
+static struct tm6000_std_settings svideo_stds[] = {
+ {
+ .id = V4L2_STD_PAL_M,
+ .common = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xfc},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf8},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x00},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf2},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xf0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe0},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x68},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8a},
+
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x05},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x04},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x83},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x0a},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe0},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x22},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
+
+ {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc},
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_PAL_Nc,
+ .common = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xfc},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf8},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x00},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf2},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xf0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe0},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x68},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8a},
+
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x37},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x04},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x91},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x1f},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0x0c},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x22},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
+
+ {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc},
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_PAL,
+ .common = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xfc},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf8},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x00},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf2},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xf0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe0},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x68},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8a},
+
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x33},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x04},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x00},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x25},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0xd5},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0x63},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0x50},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2a},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x0c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x52},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
+
+ {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdc},
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_SECAM,
+ .common = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xfc},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf8},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x00},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf2},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xf0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe0},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x68},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8a},
+
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x39},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0e},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x03},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x01},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x24},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x92},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xe8},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xed},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x8c},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x2a},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0xc1},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x2c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x18},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0xFF},
+
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_NTSC,
+ .common = {
+ {TM6010_REQ08_RE2_POWER_DOWN_CTRL1, 0xf0},
+ {TM6010_REQ08_RE3_ADC_IN1_SEL, 0xfc},
+ {TM6010_REQ08_RE4_ADC_IN2_SEL, 0xf8},
+ {TM6010_REQ08_RE6_POWER_DOWN_CTRL2, 0x00},
+ {TM6010_REQ08_REA_BUFF_DRV_CTRL, 0xf2},
+ {TM6010_REQ08_REB_SIF_GAIN_CTRL, 0xf0},
+ {TM6010_REQ08_REC_REVERSE_YC_CTRL, 0xc2},
+ {TM6010_REQ08_RED_GAIN_SEL, 0xe0},
+ {TM6010_REQ08_RF0_DAUDIO_INPUT_CONFIG, 0x68},
+ {TM6010_REQ08_RF1_AADC_POWER_DOWN, 0xfc},
+ {TM6010_REQ07_RFE_POWER_DOWN, 0x8a},
+
+ {TM6010_REQ07_R3F_RESET, 0x01},
+ {TM6010_REQ07_R00_VIDEO_CONTROL0, 0x01},
+ {TM6010_REQ07_R01_VIDEO_CONTROL1, 0x0f},
+ {TM6010_REQ07_R02_VIDEO_CONTROL2, 0x5f},
+ {TM6010_REQ07_R03_YC_SEP_CONTROL, 0x03},
+ {TM6010_REQ07_R07_OUTPUT_CONTROL, 0x00},
+ {TM6010_REQ07_R17_HLOOP_MAXSTATE, 0x8b},
+ {TM6010_REQ07_R18_CHROMA_DTO_INCREMENT3, 0x1e},
+ {TM6010_REQ07_R19_CHROMA_DTO_INCREMENT2, 0x8b},
+ {TM6010_REQ07_R1A_CHROMA_DTO_INCREMENT1, 0xa2},
+ {TM6010_REQ07_R1B_CHROMA_DTO_INCREMENT0, 0xe9},
+ {TM6010_REQ07_R1C_HSYNC_DTO_INCREMENT3, 0x1c},
+ {TM6010_REQ07_R1D_HSYNC_DTO_INCREMENT2, 0xcc},
+ {TM6010_REQ07_R1E_HSYNC_DTO_INCREMENT1, 0xcc},
+ {TM6010_REQ07_R1F_HSYNC_DTO_INCREMENT0, 0xcd},
+ {TM6010_REQ07_R2E_ACTIVE_VIDEO_HSTART, 0x88},
+ {TM6010_REQ07_R30_ACTIVE_VIDEO_VSTART, 0x22},
+ {TM6010_REQ07_R31_ACTIVE_VIDEO_VHIGHT, 0x61},
+ {TM6010_REQ07_R33_VSYNC_HLOCK_MAX, 0x1c},
+ {TM6010_REQ07_R35_VSYNC_AGC_MAX, 0x1c},
+ {TM6010_REQ07_R82_COMB_FILTER_CONFIG, 0x42},
+ {TM6010_REQ07_R83_CHROMA_LOCK_CONFIG, 0x6F},
+
+ {TM6010_REQ07_R04_LUMA_HAGC_CONTROL, 0xdd},
+ {TM6010_REQ07_R0D_CHROMA_KILL_LEVEL, 0x07},
+ {TM6010_REQ07_R3F_RESET, 0x00},
+ {0, 0, 0},
+ },
+ },
+};
+
+void tm6000_get_std_res(struct tm6000_core *dev)
+{
+ /* Currently, those are the only supported resoltions */
+ if (dev->norm & V4L2_STD_525_60) {
+ dev->height = 480;
+ } else {
+ dev->height = 576;
+ }
+ dev->width = 720;
+}
+
+static int tm6000_load_std(struct tm6000_core *dev,
+ struct tm6000_reg_settings *set, int max_size)
+{
+ int i, rc;
+
+ /* Load board's initialization table */
+ for (i = 0; max_size; i++) {
+ if (!set[i].req)
+ return 0;
+
+ if ((dev->dev_type != TM6010) &&
+ (set[i].req == REQ_08_SET_GET_AVREG_BIT))
+ continue;
+
+ rc = tm6000_set_reg(dev, set[i].req, set[i].reg, set[i].value);
+ if (rc < 0) {
+ printk(KERN_ERR "Error %i while setting "
+ "req %d, reg %d to value %d\n",
+ rc, set[i].req, set[i].reg, set[i].value);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int tm6000_set_tv(struct tm6000_core *dev, int pos)
+{
+ int rc;
+
+ /* FIXME: This code is for tm6010 - not tested yet - doesn't work with
+ tm5600
+ */
+
+ /* FIXME: This is tuner-dependent */
+ int nosif = 0;
+
+ if (nosif) {
+ rc = tm6000_load_std(dev, tv_stds[pos].nosif,
+ sizeof(tv_stds[pos].nosif));
+ } else {
+ rc = tm6000_load_std(dev, tv_stds[pos].sif,
+ sizeof(tv_stds[pos].sif));
+ }
+ if (rc < 0)
+ return rc;
+ rc = tm6000_load_std(dev, tv_stds[pos].common,
+ sizeof(tv_stds[pos].common));
+
+ return rc;
+}
+
+int tm6000_set_standard(struct tm6000_core *dev, v4l2_std_id * norm)
+{
+ int i, rc = 0;
+
+ dev->norm = *norm;
+ tm6000_get_std_res(dev);
+
+ switch (dev->input) {
+ case TM6000_INPUT_TV:
+ for (i = 0; i < ARRAY_SIZE(tv_stds); i++) {
+ if (*norm & tv_stds[i].id) {
+ rc = tm6000_set_tv(dev, i);
+ goto ret;
+ }
+ }
+ return -EINVAL;
+ case TM6000_INPUT_SVIDEO:
+ for (i = 0; i < ARRAY_SIZE(svideo_stds); i++) {
+ if (*norm & svideo_stds[i].id) {
+ rc = tm6000_load_std(dev, svideo_stds[i].common,
+ sizeof(svideo_stds[i].
+ common));
+ goto ret;
+ }
+ }
+ return -EINVAL;
+ case TM6000_INPUT_COMPOSITE:
+ for (i = 0; i < ARRAY_SIZE(composite_stds); i++) {
+ if (*norm & composite_stds[i].id) {
+ rc = tm6000_load_std(dev,
+ composite_stds[i].common,
+ sizeof(composite_stds[i].
+ common));
+ goto ret;
+ }
+ }
+ return -EINVAL;
+ }
+
+ret:
+ if (rc < 0)
+ return rc;
+
+ msleep(40);
+
+
+ return 0;
+}
diff --git a/drivers/staging/tm6000/tm6000-usb-isoc.h b/drivers/staging/tm6000/tm6000-usb-isoc.h
new file mode 100644
index 00000000000..5a5049acd4e
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000-usb-isoc.h
@@ -0,0 +1,53 @@
+/*
+ tm6000-buf.c - driver for TM5600/TM6000/TM6010 USB video capture devices
+
+ Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/videodev2.h>
+
+#define TM6000_URB_MSG_LEN 180
+
+struct usb_isoc_ctl {
+ /* max packet size of isoc transaction */
+ int max_pkt_size;
+
+ /* number of allocated urbs */
+ int num_bufs;
+
+ /* urb for isoc transfers */
+ struct urb **urb;
+
+ /* transfer buffers for isoc transfer */
+ char **transfer_buffer;
+
+ /* Last buffer command and region */
+ u8 cmd;
+ int pos, size, pktsize;
+
+ /* Last field: ODD or EVEN? */
+ int field;
+
+ /* Stores incomplete commands */
+ u32 tmp_buf;
+ int tmp_buf_len;
+
+ /* Stores already requested buffers */
+ struct tm6000_buffer *buf;
+
+ /* Stores the number of received fields */
+ int nfields;
+};
diff --git a/drivers/staging/tm6000/tm6000-video.c b/drivers/staging/tm6000/tm6000-video.c
new file mode 100644
index 00000000000..f2b7fe4a358
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000-video.c
@@ -0,0 +1,1557 @@
+/*
+ tm6000-video.c - driver for TM5600/TM6000/TM6010 USB video capture devices
+
+ Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+
+ Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com>
+ - Fixed module load/unload
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/random.h>
+#include <linux/version.h>
+#include <linux/usb.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ioctl.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/highmem.h>
+#include <linux/freezer.h>
+
+#include "tm6000-regs.h"
+#include "tm6000.h"
+
+#define BUFFER_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
+
+/* Limits minimum and default number of buffers */
+#define TM6000_MIN_BUF 4
+#define TM6000_DEF_BUF 8
+
+#define TM6000_MAX_ISO_PACKETS 40 /* Max number of ISO packets */
+
+/* Declare static vars that will be used as parameters */
+static unsigned int vid_limit = 16; /* Video memory limit, in Mb */
+static int video_nr = -1; /* /dev/videoN, -1 for autodetect */
+
+/* Debug level */
+int tm6000_debug;
+
+/* supported controls */
+static struct v4l2_queryctrl tm6000_qctrl[] = {
+ {
+ .id = V4L2_CID_BRIGHTNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Brightness",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
+ .default_value = 54,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_CONTRAST,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Contrast",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 0x1,
+ .default_value = 119,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_SATURATION,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Saturation",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 0x1,
+ .default_value = 112,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_HUE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Hue",
+ .minimum = -128,
+ .maximum = 127,
+ .step = 0x1,
+ .default_value = 0,
+ .flags = 0,
+ }
+};
+
+static int qctl_regs[ARRAY_SIZE(tm6000_qctrl)];
+
+static struct tm6000_fmt format[] = {
+ {
+ .name = "4:2:2, packed, YVY2",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = 16,
+ }, {
+ .name = "4:2:2, packed, UYVY",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .depth = 16,
+ }, {
+ .name = "A/V + VBI mux packet",
+ .fourcc = V4L2_PIX_FMT_TM6000,
+ .depth = 16,
+ }
+};
+
+/* ------------------------------------------------------------------
+ DMA and thread functions
+ ------------------------------------------------------------------*/
+
+#define norm_maxw(a) 720
+#define norm_maxh(a) 576
+
+#define norm_minw(a) norm_maxw(a)
+#define norm_minh(a) norm_maxh(a)
+
+/*
+ * video-buf generic routine to get the next available buffer
+ */
+static inline void get_next_buf(struct tm6000_dmaqueue *dma_q,
+ struct tm6000_buffer **buf)
+{
+ struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq);
+ char *outp;
+
+ if (list_empty(&dma_q->active)) {
+ dprintk(dev, V4L2_DEBUG_QUEUE, "No active queue to serve\n");
+ *buf = NULL;
+ return;
+ }
+
+ *buf = list_entry(dma_q->active.next,
+ struct tm6000_buffer, vb.queue);
+
+ if (!buf)
+ return;
+
+ /* Cleans up buffer - Usefull for testing for frame/URB loss */
+ outp = videobuf_to_vmalloc(&(*buf)->vb);
+// if (outp)
+// memset(outp, 0, (*buf)->vb.size);
+
+ return;
+}
+
+/*
+ * Announces that a buffer were filled and request the next
+ */
+static inline void buffer_filled(struct tm6000_core *dev,
+ struct tm6000_dmaqueue *dma_q,
+ struct tm6000_buffer *buf)
+{
+ /* Advice that buffer was filled */
+ dprintk(dev, V4L2_DEBUG_ISOC, "[%p/%d] wakeup\n", buf, buf->vb.i);
+ buf->vb.state = VIDEOBUF_DONE;
+ buf->vb.field_count++;
+ do_gettimeofday(&buf->vb.ts);
+
+ list_del(&buf->vb.queue);
+ wake_up(&buf->vb.done);
+}
+
+const char *tm6000_msg_type[] = {
+ "unknown(0)", /* 0 */
+ "video", /* 1 */
+ "audio", /* 2 */
+ "vbi", /* 3 */
+ "pts", /* 4 */
+ "err", /* 5 */
+ "unknown(6)", /* 6 */
+ "unknown(7)", /* 7 */
+};
+
+/*
+ * Identify the tm5600/6000 buffer header type and properly handles
+ */
+static int copy_packet(struct urb *urb, u32 header, u8 **ptr, u8 *endp,
+ u8 *out_p, struct tm6000_buffer **buf)
+{
+ struct tm6000_dmaqueue *dma_q = urb->context;
+ struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq);
+ u8 c;
+ unsigned int cmd, cpysize, pktsize, size, field, block, line, pos = 0;
+ int rc = 0;
+ /* FIXME: move to tm6000-isoc */
+ static int last_line = -2, start_line = -2, last_field = -2;
+
+ /* FIXME: this is the hardcoded window size
+ */
+ unsigned int linewidth = (*buf)->vb.width << 1;
+
+ if (!dev->isoc_ctl.cmd) {
+ c = (header >> 24) & 0xff;
+
+ /* split the header fields */
+ size = (((header & 0x7e) << 1) -1) *4;
+ block = (header >> 7) & 0xf;
+ field = (header >> 11) & 0x1;
+ line = (header >> 12) & 0x1ff;
+ cmd = (header >> 21) & 0x7;
+
+ /* Validates header fields */
+ if(size > TM6000_URB_MSG_LEN)
+ size = TM6000_URB_MSG_LEN;
+
+ if (cmd == TM6000_URB_MSG_VIDEO) {
+ if ((block+1)*TM6000_URB_MSG_LEN>linewidth)
+ cmd = TM6000_URB_MSG_ERR;
+
+ /* FIXME: Mounts the image as field0+field1
+ * It should, instead, check if the user selected
+ * entrelaced or non-entrelaced mode
+ */
+ pos = ((line << 1) - field - 1) * linewidth +
+ block * TM6000_URB_MSG_LEN;
+
+ /* Don't allow to write out of the buffer */
+ if (pos+TM6000_URB_MSG_LEN > (*buf)->vb.size) {
+ dprintk(dev, V4L2_DEBUG_ISOC,
+ "ERR: size=%d, num=%d, line=%d, "
+ "field=%d\n",
+ size, block, line, field);
+
+ cmd = TM6000_URB_MSG_ERR;
+ }
+ } else {
+ pos=0;
+ }
+
+ /* Prints debug info */
+ dprintk(dev, V4L2_DEBUG_ISOC, "size=%d, num=%d, "
+ " line=%d, field=%d\n",
+ size, block, line, field);
+
+ if ((last_line!=line)&&(last_line+1!=line) &&
+ (cmd != TM6000_URB_MSG_ERR) ) {
+ if (cmd != TM6000_URB_MSG_VIDEO) {
+ dprintk(dev, V4L2_DEBUG_ISOC, "cmd=%d, "
+ "size=%d, num=%d, line=%d, field=%d\n",
+ cmd, size, block, line, field);
+ }
+ if (start_line<0)
+ start_line=last_line;
+ /* Prints debug info */
+ dprintk(dev, V4L2_DEBUG_ISOC, "lines= %d-%d, "
+ "field=%d\n",
+ start_line, last_line, field);
+
+ if ((start_line<6 && last_line>200) &&
+ (last_field != field) ) {
+
+ dev->isoc_ctl.nfields++;
+ if (dev->isoc_ctl.nfields>=2) {
+ dev->isoc_ctl.nfields=0;
+
+ /* Announces that a new buffer were filled */
+ buffer_filled (dev, dma_q, *buf);
+ dprintk(dev, V4L2_DEBUG_ISOC,
+ "new buffer filled\n");
+ get_next_buf (dma_q, buf);
+ if (!*buf)
+ return rc;
+ out_p = videobuf_to_vmalloc(&((*buf)->vb));
+ if (!out_p)
+ return rc;
+
+ pos = dev->isoc_ctl.pos = 0;
+ }
+ }
+
+ start_line=line;
+ last_field=field;
+ }
+ if (cmd == TM6000_URB_MSG_VIDEO)
+ last_line = line;
+
+ pktsize = TM6000_URB_MSG_LEN;
+ } else {
+ /* Continue the last copy */
+ cmd = dev->isoc_ctl.cmd;
+ size= dev->isoc_ctl.size;
+ pos = dev->isoc_ctl.pos;
+ pktsize = dev->isoc_ctl.pktsize;
+ }
+
+ cpysize = (endp-(*ptr) > size) ? size : endp - *ptr;
+
+ if (cpysize) {
+ /* handles each different URB message */
+ switch(cmd) {
+ case TM6000_URB_MSG_VIDEO:
+ /* Fills video buffer */
+ memcpy(&out_p[pos], *ptr, cpysize);
+ break;
+ case TM6000_URB_MSG_PTS:
+ break;
+ case TM6000_URB_MSG_AUDIO:
+/* Need some code to process audio */
+printk ("%ld: cmd=%s, size=%d\n", jiffies,
+ tm6000_msg_type[cmd],size);
+ break;
+ default:
+ dprintk (dev, V4L2_DEBUG_ISOC, "cmd=%s, size=%d\n",
+ tm6000_msg_type[cmd],size);
+ }
+ }
+ if (cpysize<size) {
+ /* End of URB packet, but cmd processing is not
+ * complete. Preserve the state for a next packet
+ */
+ dev->isoc_ctl.pos = pos+cpysize;
+ dev->isoc_ctl.size= size-cpysize;
+ dev->isoc_ctl.cmd = cmd;
+ dev->isoc_ctl.pktsize = pktsize-cpysize;
+ (*ptr)+=cpysize;
+ } else {
+ dev->isoc_ctl.cmd = 0;
+ (*ptr)+=pktsize;
+ }
+
+ return rc;
+}
+
+static int copy_streams(u8 *data, u8 *out_p, unsigned long len,
+ struct urb *urb, struct tm6000_buffer **buf)
+{
+ struct tm6000_dmaqueue *dma_q = urb->context;
+ struct tm6000_core *dev= container_of(dma_q,struct tm6000_core,vidq);
+ u8 *ptr=data, *endp=data+len;
+ unsigned long header=0;
+ int rc=0;
+
+ for (ptr=data; ptr<endp;) {
+ if (!dev->isoc_ctl.cmd) {
+ u8 *p=(u8 *)&dev->isoc_ctl.tmp_buf;
+ /* FIXME: This seems very complex
+ * It just recovers up to 3 bytes of the header that
+ * might be at the previous packet
+ */
+ if (dev->isoc_ctl.tmp_buf_len) {
+ while (dev->isoc_ctl.tmp_buf_len) {
+ if ( *(ptr+3-dev->isoc_ctl.tmp_buf_len) == 0x47) {
+ break;
+ }
+ p++;
+ dev->isoc_ctl.tmp_buf_len--;
+ }
+ if (dev->isoc_ctl.tmp_buf_len) {
+ memcpy(&header, p,
+ dev->isoc_ctl.tmp_buf_len);
+ memcpy((u8 *)&header +
+ dev->isoc_ctl.tmp_buf_len,
+ ptr,
+ 4 - dev->isoc_ctl.tmp_buf_len);
+ ptr += 4 - dev->isoc_ctl.tmp_buf_len;
+ goto HEADER;
+ }
+ }
+ /* Seek for sync */
+ for (;ptr<endp-3;ptr++) {
+ if (*(ptr+3)==0x47)
+ break;
+ }
+
+ if (ptr+3>=endp) {
+ dev->isoc_ctl.tmp_buf_len=endp-ptr;
+ memcpy (&dev->isoc_ctl.tmp_buf,ptr,
+ dev->isoc_ctl.tmp_buf_len);
+ dev->isoc_ctl.cmd=0;
+ return rc;
+ }
+
+ /* Get message header */
+ header=*(unsigned long *)ptr;
+ ptr+=4;
+ }
+HEADER:
+ /* Copy or continue last copy */
+ rc=copy_packet(urb,header,&ptr,endp,out_p,buf);
+ if (rc<0) {
+ buf=NULL;
+ printk(KERN_ERR "tm6000: buffer underrun at %ld\n",
+ jiffies);
+ return rc;
+ }
+ if (!*buf)
+ return 0;
+ }
+
+ return 0;
+}
+/*
+ * Identify the tm5600/6000 buffer header type and properly handles
+ */
+static int copy_multiplexed(u8 *ptr, u8 *out_p, unsigned long len,
+ struct urb *urb, struct tm6000_buffer **buf)
+{
+ struct tm6000_dmaqueue *dma_q = urb->context;
+ struct tm6000_core *dev= container_of(dma_q,struct tm6000_core,vidq);
+ unsigned int pos=dev->isoc_ctl.pos,cpysize;
+ int rc=1;
+
+ while (len>0) {
+ cpysize=min(len,(*buf)->vb.size-pos);
+//printk("Copying %d bytes (max=%lu) from %p to %p[%u]\n",cpysize,(*buf)->vb.size,ptr,out_p,pos);
+ memcpy(&out_p[pos], ptr, cpysize);
+ pos+=cpysize;
+ ptr+=cpysize;
+ len-=cpysize;
+ if (pos >= (*buf)->vb.size) {
+ pos=0;
+ /* Announces that a new buffer were filled */
+ buffer_filled (dev, dma_q, *buf);
+ dprintk(dev, V4L2_DEBUG_ISOC, "new buffer filled\n");
+ get_next_buf (dma_q, buf);
+ if (!*buf)
+ break;
+ out_p = videobuf_to_vmalloc(&((*buf)->vb));
+ if (!out_p)
+ return rc;
+ pos = 0;
+ }
+ }
+
+ dev->isoc_ctl.pos=pos;
+ return rc;
+}
+
+static void inline print_err_status (struct tm6000_core *dev,
+ int packet, int status)
+{
+ char *errmsg = "Unknown";
+
+ switch(status) {
+ case -ENOENT:
+ errmsg = "unlinked synchronuously";
+ break;
+ case -ECONNRESET:
+ errmsg = "unlinked asynchronuously";
+ break;
+ case -ENOSR:
+ errmsg = "Buffer error (overrun)";
+ break;
+ case -EPIPE:
+ errmsg = "Stalled (device not responding)";
+ break;
+ case -EOVERFLOW:
+ errmsg = "Babble (bad cable?)";
+ break;
+ case -EPROTO:
+ errmsg = "Bit-stuff error (bad cable?)";
+ break;
+ case -EILSEQ:
+ errmsg = "CRC/Timeout (could be anything)";
+ break;
+ case -ETIME:
+ errmsg = "Device does not respond";
+ break;
+ }
+ if (packet<0) {
+ dprintk(dev, V4L2_DEBUG_QUEUE, "URB status %d [%s].\n",
+ status, errmsg);
+ } else {
+ dprintk(dev, V4L2_DEBUG_QUEUE, "URB packet %d, status %d [%s].\n",
+ packet, status, errmsg);
+ }
+}
+
+
+/*
+ * Controls the isoc copy of each urb packet
+ */
+static inline int tm6000_isoc_copy(struct urb *urb)
+{
+ struct tm6000_dmaqueue *dma_q = urb->context;
+ struct tm6000_core *dev= container_of(dma_q,struct tm6000_core,vidq);
+ struct tm6000_buffer *buf;
+ int i, len=0, rc=1;
+ int size;
+ char *outp = NULL, *p;
+ unsigned long copied;
+
+ get_next_buf(dma_q, &buf);
+ if (buf)
+ outp = videobuf_to_vmalloc(&buf->vb);
+
+ if (!outp)
+ return 0;
+
+ size = buf->vb.size;
+
+ copied=0;
+
+ if (urb->status<0) {
+ print_err_status (dev,-1,urb->status);
+ return 0;
+ }
+
+ for (i = 0; i < urb->number_of_packets; i++) {
+ int status = urb->iso_frame_desc[i].status;
+
+ if (status<0) {
+ print_err_status (dev,i,status);
+ continue;
+ }
+
+ len=urb->iso_frame_desc[i].actual_length;
+
+// if (len>=TM6000_URB_MSG_LEN) {
+ p=urb->transfer_buffer + urb->iso_frame_desc[i].offset;
+ if (!urb->iso_frame_desc[i].status) {
+ if ((buf->fmt->fourcc)==V4L2_PIX_FMT_TM6000) {
+ rc=copy_multiplexed(p, outp, len, urb, &buf);
+ if (rc<=0)
+ return rc;
+ } else {
+ copy_streams(p, outp, len, urb, &buf);
+ }
+ }
+ copied += len;
+ if (copied >= size || !buf)
+ break;
+// }
+ }
+ return rc;
+}
+
+/* ------------------------------------------------------------------
+ URB control
+ ------------------------------------------------------------------*/
+
+/*
+ * IRQ callback, called by URB callback
+ */
+static void tm6000_irq_callback(struct urb *urb)
+{
+ struct tm6000_dmaqueue *dma_q = urb->context;
+ struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq);
+ int i;
+
+ if (!dev)
+ return;
+
+ spin_lock(&dev->slock);
+ tm6000_isoc_copy(urb);
+ spin_unlock(&dev->slock);
+
+ /* Reset urb buffers */
+ for (i = 0; i < urb->number_of_packets; i++) {
+ urb->iso_frame_desc[i].status = 0;
+ urb->iso_frame_desc[i].actual_length = 0;
+ }
+
+ urb->status = usb_submit_urb(urb, GFP_ATOMIC);
+ if (urb->status)
+ tm6000_err("urb resubmit failed (error=%i)\n",
+ urb->status);
+}
+
+/*
+ * Stop and Deallocate URBs
+ */
+static void tm6000_uninit_isoc(struct tm6000_core *dev)
+{
+ struct urb *urb;
+ int i;
+
+ dev->isoc_ctl.nfields = -1;
+ dev->isoc_ctl.buf = NULL;
+ for (i = 0; i < dev->isoc_ctl.num_bufs; i++) {
+ urb=dev->isoc_ctl.urb[i];
+ if (urb) {
+ usb_kill_urb(urb);
+ usb_unlink_urb(urb);
+ if (dev->isoc_ctl.transfer_buffer[i]) {
+ usb_free_coherent(dev->udev,
+ urb->transfer_buffer_length,
+ dev->isoc_ctl.transfer_buffer[i],
+ urb->transfer_dma);
+ }
+ usb_free_urb(urb);
+ dev->isoc_ctl.urb[i] = NULL;
+ }
+ dev->isoc_ctl.transfer_buffer[i] = NULL;
+ }
+
+ kfree (dev->isoc_ctl.urb);
+ kfree (dev->isoc_ctl.transfer_buffer);
+
+ dev->isoc_ctl.urb=NULL;
+ dev->isoc_ctl.transfer_buffer=NULL;
+ dev->isoc_ctl.num_bufs = 0;
+
+ dev->isoc_ctl.num_bufs=0;
+}
+
+/*
+ * Allocate URBs and start IRQ
+ */
+static int tm6000_prepare_isoc(struct tm6000_core *dev, unsigned int framesize)
+{
+ struct tm6000_dmaqueue *dma_q = &dev->vidq;
+ int i, j, sb_size, pipe, size, max_packets, num_bufs = 5;
+ struct urb *urb;
+
+ /* De-allocates all pending stuff */
+ tm6000_uninit_isoc(dev);
+
+ usb_set_interface(dev->udev,
+ dev->isoc_in.bInterfaceNumber,
+ dev->isoc_in.bAlternateSetting);
+
+ pipe = usb_rcvisocpipe(dev->udev,
+ dev->isoc_in.endp->desc.bEndpointAddress &
+ USB_ENDPOINT_NUMBER_MASK);
+
+ size = usb_maxpacket(dev->udev, pipe, usb_pipeout(pipe));
+
+ if (size > dev->isoc_in.maxsize)
+ size = dev->isoc_in.maxsize;
+
+ dev->isoc_ctl.max_pkt_size = size;
+
+ max_packets = ( framesize + size - 1) / size;
+
+ if (max_packets > TM6000_MAX_ISO_PACKETS)
+ max_packets = TM6000_MAX_ISO_PACKETS;
+
+ sb_size = max_packets * size;
+
+ dev->isoc_ctl.num_bufs = num_bufs;
+
+ dev->isoc_ctl.urb = kmalloc(sizeof(void *)*num_bufs, GFP_KERNEL);
+ if (!dev->isoc_ctl.urb) {
+ tm6000_err("cannot alloc memory for usb buffers\n");
+ return -ENOMEM;
+ }
+
+ dev->isoc_ctl.transfer_buffer = kmalloc(sizeof(void *)*num_bufs,
+ GFP_KERNEL);
+ if (!dev->isoc_ctl.transfer_buffer) {
+ tm6000_err("cannot allocate memory for usbtransfer\n");
+ kfree(dev->isoc_ctl.urb);
+ return -ENOMEM;
+ }
+
+ dprintk(dev, V4L2_DEBUG_QUEUE, "Allocating %d x %d packets"
+ " (%d bytes) of %d bytes each to handle %u size\n",
+ max_packets, num_bufs, sb_size,
+ dev->isoc_in.maxsize, size);
+
+ /* allocate urbs and transfer buffers */
+ for (i = 0; i < dev->isoc_ctl.num_bufs; i++) {
+ urb = usb_alloc_urb(max_packets, GFP_KERNEL);
+ if (!urb) {
+ tm6000_err("cannot alloc isoc_ctl.urb %i\n", i);
+ tm6000_uninit_isoc(dev);
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+ dev->isoc_ctl.urb[i] = urb;
+
+ dev->isoc_ctl.transfer_buffer[i] = usb_alloc_coherent(dev->udev,
+ sb_size, GFP_KERNEL, &urb->transfer_dma);
+ if (!dev->isoc_ctl.transfer_buffer[i]) {
+ tm6000_err ("unable to allocate %i bytes for transfer"
+ " buffer %i%s\n",
+ sb_size, i,
+ in_interrupt()?" while in int":"");
+ tm6000_uninit_isoc(dev);
+ return -ENOMEM;
+ }
+ memset(dev->isoc_ctl.transfer_buffer[i], 0, sb_size);
+
+ usb_fill_bulk_urb(urb, dev->udev, pipe,
+ dev->isoc_ctl.transfer_buffer[i], sb_size,
+ tm6000_irq_callback, dma_q);
+ urb->interval = dev->isoc_in.endp->desc.bInterval;
+ urb->number_of_packets = max_packets;
+ urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
+
+ for (j = 0; j < max_packets; j++) {
+ urb->iso_frame_desc[j].offset = size * j;
+ urb->iso_frame_desc[j].length = size;
+ }
+ }
+
+ return 0;
+}
+
+static int tm6000_start_thread( struct tm6000_core *dev)
+{
+ struct tm6000_dmaqueue *dma_q = &dev->vidq;
+ int i;
+
+ dma_q->frame=0;
+ dma_q->ini_jiffies=jiffies;
+
+ init_waitqueue_head(&dma_q->wq);
+
+ /* submit urbs and enables IRQ */
+ for (i = 0; i < dev->isoc_ctl.num_bufs; i++) {
+ int rc = usb_submit_urb(dev->isoc_ctl.urb[i], GFP_ATOMIC);
+ if (rc) {
+ tm6000_err("submit of urb %i failed (error=%i)\n", i,
+ rc);
+ tm6000_uninit_isoc(dev);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------
+ Videobuf operations
+ ------------------------------------------------------------------*/
+
+static int
+buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size)
+{
+ struct tm6000_fh *fh = vq->priv_data;
+
+ *size = fh->fmt->depth * fh->width * fh->height >> 3;
+ if (0 == *count)
+ *count = TM6000_DEF_BUF;
+
+ if (*count < TM6000_MIN_BUF) {
+ *count=TM6000_MIN_BUF;
+ }
+
+ while (*size * *count > vid_limit * 1024 * 1024)
+ (*count)--;
+
+ return 0;
+}
+
+static void free_buffer(struct videobuf_queue *vq, struct tm6000_buffer *buf)
+{
+ struct tm6000_fh *fh = vq->priv_data;
+ struct tm6000_core *dev = fh->dev;
+ unsigned long flags;
+
+ if (in_interrupt())
+ BUG();
+
+ /* We used to wait for the buffer to finish here, but this didn't work
+ because, as we were keeping the state as VIDEOBUF_QUEUED,
+ videobuf_queue_cancel marked it as finished for us.
+ (Also, it could wedge forever if the hardware was misconfigured.)
+
+ This should be safe; by the time we get here, the buffer isn't
+ queued anymore. If we ever start marking the buffers as
+ VIDEOBUF_ACTIVE, it won't be, though.
+ */
+ spin_lock_irqsave(&dev->slock, flags);
+ if (dev->isoc_ctl.buf == buf)
+ dev->isoc_ctl.buf = NULL;
+ spin_unlock_irqrestore(&dev->slock, flags);
+
+ videobuf_vmalloc_free(&buf->vb);
+ buf->vb.state = VIDEOBUF_NEEDS_INIT;
+}
+
+static int
+buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct tm6000_fh *fh = vq->priv_data;
+ struct tm6000_buffer *buf = container_of(vb,struct tm6000_buffer,vb);
+ struct tm6000_core *dev = fh->dev;
+ int rc = 0, urb_init = 0;
+
+ BUG_ON(NULL == fh->fmt);
+
+
+ /* FIXME: It assumes depth=2 */
+ /* The only currently supported format is 16 bits/pixel */
+ buf->vb.size = fh->fmt->depth*fh->width*fh->height >> 3;
+ if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
+ return -EINVAL;
+
+ if (buf->fmt != fh->fmt ||
+ buf->vb.width != fh->width ||
+ buf->vb.height != fh->height ||
+ buf->vb.field != field) {
+ buf->fmt = fh->fmt;
+ buf->vb.width = fh->width;
+ buf->vb.height = fh->height;
+ buf->vb.field = field;
+ buf->vb.state = VIDEOBUF_NEEDS_INIT;
+ }
+
+ if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
+ if (0 != (rc = videobuf_iolock(vq, &buf->vb, NULL)))
+ goto fail;
+ urb_init = 1;
+ }
+
+ if (!dev->isoc_ctl.num_bufs)
+ urb_init = 1;
+
+ if (urb_init) {
+ rc = tm6000_prepare_isoc(dev, buf->vb.size);
+ if (rc < 0)
+ goto fail;
+
+ rc = tm6000_start_thread(dev);
+ if (rc < 0)
+ goto fail;
+
+ }
+
+ buf->vb.state = VIDEOBUF_PREPARED;
+ return 0;
+
+fail:
+ free_buffer(vq, buf);
+ return rc;
+}
+
+static void
+buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+{
+ struct tm6000_buffer *buf = container_of(vb,struct tm6000_buffer,vb);
+ struct tm6000_fh *fh = vq->priv_data;
+ struct tm6000_core *dev = fh->dev;
+ struct tm6000_dmaqueue *vidq = &dev->vidq;
+
+ buf->vb.state = VIDEOBUF_QUEUED;
+ list_add_tail(&buf->vb.queue, &vidq->active);
+}
+
+static void buffer_release(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+{
+ struct tm6000_buffer *buf = container_of(vb,struct tm6000_buffer,vb);
+
+ free_buffer(vq,buf);
+}
+
+static struct videobuf_queue_ops tm6000_video_qops = {
+ .buf_setup = buffer_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_queue = buffer_queue,
+ .buf_release = buffer_release,
+};
+
+/* ------------------------------------------------------------------
+ IOCTL handling
+ ------------------------------------------------------------------*/
+
+static int res_get(struct tm6000_core *dev, struct tm6000_fh *fh)
+{
+ /* is it free? */
+ mutex_lock(&dev->lock);
+ if (dev->resources) {
+ /* no, someone else uses it */
+ mutex_unlock(&dev->lock);
+ return 0;
+ }
+ /* it's free, grab it */
+ dev->resources =1;
+ dprintk(dev, V4L2_DEBUG_RES_LOCK, "res: get\n");
+ mutex_unlock(&dev->lock);
+ return 1;
+}
+
+static int res_locked(struct tm6000_core *dev)
+{
+ return (dev->resources);
+}
+
+static void res_free(struct tm6000_core *dev, struct tm6000_fh *fh)
+{
+ mutex_lock(&dev->lock);
+ dev->resources = 0;
+ dprintk(dev, V4L2_DEBUG_RES_LOCK, "res: put\n");
+ mutex_unlock(&dev->lock);
+}
+
+/* ------------------------------------------------------------------
+ IOCTL vidioc handling
+ ------------------------------------------------------------------*/
+static int vidioc_querycap (struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ // struct tm6000_core *dev = ((struct tm6000_fh *)priv)->dev;
+
+ strlcpy(cap->driver, "tm6000", sizeof(cap->driver));
+ strlcpy(cap->card,"Trident TVMaster TM5600/6000/6010", sizeof(cap->card));
+ // strlcpy(cap->bus_info, dev->udev->dev.bus_id, sizeof(cap->bus_info));
+ cap->version = TM6000_VERSION;
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_STREAMING |
+ V4L2_CAP_TUNER |
+ V4L2_CAP_READWRITE;
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap (struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (unlikely(f->index >= ARRAY_SIZE(format)))
+ return -EINVAL;
+
+ strlcpy(f->description,format[f->index].name,sizeof(f->description));
+ f->pixelformat = format[f->index].fourcc;
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_cap (struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct tm6000_fh *fh=priv;
+
+ f->fmt.pix.width = fh->width;
+ f->fmt.pix.height = fh->height;
+ f->fmt.pix.field = fh->vb_vidq.field;
+ f->fmt.pix.pixelformat = fh->fmt->fourcc;
+ f->fmt.pix.bytesperline =
+ (f->fmt.pix.width * fh->fmt->depth) >> 3;
+ f->fmt.pix.sizeimage =
+ f->fmt.pix.height * f->fmt.pix.bytesperline;
+
+ return (0);
+}
+
+static struct tm6000_fmt* format_by_fourcc(unsigned int fourcc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(format); i++)
+ if (format[i].fourcc == fourcc)
+ return format+i;
+ return NULL;
+}
+
+static int vidioc_try_fmt_vid_cap (struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct tm6000_core *dev = ((struct tm6000_fh *)priv)->dev;
+ struct tm6000_fmt *fmt;
+ enum v4l2_field field;
+
+ fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+ if (NULL == fmt) {
+ dprintk(dev, V4L2_DEBUG_IOCTL_ARG, "Fourcc format (0x%08x)"
+ " invalid.\n", f->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+
+ field = f->fmt.pix.field;
+
+ if (field == V4L2_FIELD_ANY) {
+// field=V4L2_FIELD_INTERLACED;
+ field=V4L2_FIELD_SEQ_TB;
+ } else if (V4L2_FIELD_INTERLACED != field) {
+ dprintk(dev, V4L2_DEBUG_IOCTL_ARG, "Field type invalid.\n");
+ return -EINVAL;
+ }
+
+ tm6000_get_std_res (dev);
+
+ f->fmt.pix.width = dev->width;
+ f->fmt.pix.height = dev->height;
+
+ f->fmt.pix.width &= ~0x01;
+
+ f->fmt.pix.field = field;
+
+ f->fmt.pix.bytesperline =
+ (f->fmt.pix.width * fmt->depth) >> 3;
+ f->fmt.pix.sizeimage =
+ f->fmt.pix.height * f->fmt.pix.bytesperline;
+
+ return 0;
+}
+
+/*FIXME: This seems to be generic enough to be at videodev2 */
+static int vidioc_s_fmt_vid_cap (struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct tm6000_fh *fh=priv;
+ struct tm6000_core *dev = fh->dev;
+ int ret = vidioc_try_fmt_vid_cap(file,fh,f);
+ if (ret < 0)
+ return (ret);
+
+ fh->fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+ fh->width = f->fmt.pix.width;
+ fh->height = f->fmt.pix.height;
+ fh->vb_vidq.field = f->fmt.pix.field;
+ fh->type = f->type;
+
+ dev->fourcc = f->fmt.pix.pixelformat;
+
+ tm6000_set_fourcc_format(dev);
+
+ return (0);
+}
+
+static int vidioc_reqbufs (struct file *file, void *priv,
+ struct v4l2_requestbuffers *p)
+{
+ struct tm6000_fh *fh=priv;
+
+ return (videobuf_reqbufs(&fh->vb_vidq, p));
+}
+
+static int vidioc_querybuf (struct file *file, void *priv,
+ struct v4l2_buffer *p)
+{
+ struct tm6000_fh *fh=priv;
+
+ return (videobuf_querybuf(&fh->vb_vidq, p));
+}
+
+static int vidioc_qbuf (struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct tm6000_fh *fh=priv;
+
+ return (videobuf_qbuf(&fh->vb_vidq, p));
+}
+
+static int vidioc_dqbuf (struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct tm6000_fh *fh=priv;
+
+ return (videobuf_dqbuf(&fh->vb_vidq, p,
+ file->f_flags & O_NONBLOCK));
+}
+
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
+static int vidiocgmbuf (struct file *file, void *priv, struct video_mbuf *mbuf)
+{
+ struct tm6000_fh *fh=priv;
+
+ return videobuf_cgmbuf (&fh->vb_vidq, mbuf, 8);
+}
+#endif
+
+static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct tm6000_fh *fh=priv;
+ struct tm6000_core *dev = fh->dev;
+
+ if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ if (i != fh->type)
+ return -EINVAL;
+
+ if (!res_get(dev,fh))
+ return -EBUSY;
+ return (videobuf_streamon(&fh->vb_vidq));
+}
+
+static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct tm6000_fh *fh=priv;
+ struct tm6000_core *dev = fh->dev;
+
+ if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ if (i != fh->type)
+ return -EINVAL;
+
+ videobuf_streamoff(&fh->vb_vidq);
+ res_free(dev,fh);
+
+ return (0);
+}
+
+static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id *norm)
+{
+ int rc=0;
+ struct tm6000_fh *fh=priv;
+ struct tm6000_core *dev = fh->dev;
+
+ rc=tm6000_set_standard (dev, norm);
+
+ fh->width = dev->width;
+ fh->height = dev->height;
+
+ if (rc<0)
+ return rc;
+
+ v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_std, dev->norm);
+
+ return 0;
+}
+
+static int vidioc_enum_input (struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ switch (inp->index) {
+ case TM6000_INPUT_TV:
+ inp->type = V4L2_INPUT_TYPE_TUNER;
+ strcpy(inp->name,"Television");
+ break;
+ case TM6000_INPUT_COMPOSITE:
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ strcpy(inp->name,"Composite");
+ break;
+ case TM6000_INPUT_SVIDEO:
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ strcpy(inp->name,"S-Video");
+ break;
+ default:
+ return -EINVAL;
+ }
+ inp->std = TM6000_STD;
+
+ return 0;
+}
+
+static int vidioc_g_input (struct file *file, void *priv, unsigned int *i)
+{
+ struct tm6000_fh *fh=priv;
+ struct tm6000_core *dev = fh->dev;
+
+ *i=dev->input;
+
+ return 0;
+}
+static int vidioc_s_input (struct file *file, void *priv, unsigned int i)
+{
+ struct tm6000_fh *fh=priv;
+ struct tm6000_core *dev = fh->dev;
+ int rc=0;
+ char buf[1];
+
+ switch (i) {
+ case TM6000_INPUT_TV:
+ dev->input=i;
+ *buf=0;
+ break;
+ case TM6000_INPUT_COMPOSITE:
+ case TM6000_INPUT_SVIDEO:
+ dev->input=i;
+ *buf=1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ rc=tm6000_read_write_usb (dev, USB_DIR_OUT | USB_TYPE_VENDOR,
+ REQ_03_SET_GET_MCU_PIN, 0x03, 1, buf, 1);
+
+ if (!rc) {
+ dev->input=i;
+ rc=vidioc_s_std (file, priv, &dev->vfd->current_norm);
+ }
+
+ return (rc);
+}
+
+ /* --- controls ---------------------------------------------- */
+static int vidioc_queryctrl (struct file *file, void *priv,
+ struct v4l2_queryctrl *qc)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tm6000_qctrl); i++)
+ if (qc->id && qc->id == tm6000_qctrl[i].id) {
+ memcpy(qc, &(tm6000_qctrl[i]),
+ sizeof(*qc));
+ return (0);
+ }
+
+ return -EINVAL;
+}
+
+static int vidioc_g_ctrl (struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct tm6000_fh *fh=priv;
+ struct tm6000_core *dev = fh->dev;
+ int val;
+
+ /* FIXME: Probably, those won't work! Maybe we need shadow regs */
+ switch (ctrl->id) {
+ case V4L2_CID_CONTRAST:
+ val = tm6000_get_reg(dev, TM6010_REQ07_R08_LUMA_CONTRAST_ADJ, 0);
+ break;
+ case V4L2_CID_BRIGHTNESS:
+ val = tm6000_get_reg(dev, TM6010_REQ07_R09_LUMA_BRIGHTNESS_ADJ, 0);
+ return 0;
+ case V4L2_CID_SATURATION:
+ val = tm6000_get_reg(dev, TM6010_REQ07_R0A_CHROMA_SATURATION_ADJ, 0);
+ return 0;
+ case V4L2_CID_HUE:
+ val = tm6000_get_reg(dev, TM6010_REQ07_R0B_CHROMA_HUE_PHASE_ADJ, 0);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ if (val<0)
+ return val;
+
+ ctrl->value=val;
+
+ return 0;
+}
+static int vidioc_s_ctrl (struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct tm6000_fh *fh =priv;
+ struct tm6000_core *dev = fh->dev;
+ u8 val=ctrl->value;
+
+ switch (ctrl->id) {
+ case V4L2_CID_CONTRAST:
+ tm6000_set_reg(dev, TM6010_REQ07_R08_LUMA_CONTRAST_ADJ, val);
+ return 0;
+ case V4L2_CID_BRIGHTNESS:
+ tm6000_set_reg(dev, TM6010_REQ07_R09_LUMA_BRIGHTNESS_ADJ, val);
+ return 0;
+ case V4L2_CID_SATURATION:
+ tm6000_set_reg(dev, TM6010_REQ07_R0A_CHROMA_SATURATION_ADJ, val);
+ return 0;
+ case V4L2_CID_HUE:
+ tm6000_set_reg(dev, TM6010_REQ07_R0B_CHROMA_HUE_PHASE_ADJ, val);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int vidioc_g_tuner (struct file *file, void *priv,
+ struct v4l2_tuner *t)
+{
+ struct tm6000_fh *fh =priv;
+ struct tm6000_core *dev = fh->dev;
+
+ if (unlikely(UNSET == dev->tuner_type))
+ return -EINVAL;
+ if (0 != t->index)
+ return -EINVAL;
+
+ strcpy(t->name, "Television");
+ t->type = V4L2_TUNER_ANALOG_TV;
+ t->capability = V4L2_TUNER_CAP_NORM;
+ t->rangehigh = 0xffffffffUL;
+ t->rxsubchans = V4L2_TUNER_SUB_MONO;
+
+ return 0;
+}
+
+static int vidioc_s_tuner (struct file *file, void *priv,
+ struct v4l2_tuner *t)
+{
+ struct tm6000_fh *fh =priv;
+ struct tm6000_core *dev = fh->dev;
+
+ if (UNSET == dev->tuner_type)
+ return -EINVAL;
+ if (0 != t->index)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vidioc_g_frequency (struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ struct tm6000_fh *fh =priv;
+ struct tm6000_core *dev = fh->dev;
+
+ if (unlikely(UNSET == dev->tuner_type))
+ return -EINVAL;
+
+ f->type = V4L2_TUNER_ANALOG_TV;
+ f->frequency = dev->freq;
+
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, g_frequency, f);
+
+ return 0;
+}
+
+static int vidioc_s_frequency (struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ struct tm6000_fh *fh =priv;
+ struct tm6000_core *dev = fh->dev;
+
+ if (unlikely(f->type != V4L2_TUNER_ANALOG_TV))
+ return -EINVAL;
+
+ if (unlikely(UNSET == dev->tuner_type))
+ return -EINVAL;
+ if (unlikely(f->tuner != 0))
+ return -EINVAL;
+
+// mutex_lock(&dev->lock);
+ dev->freq = f->frequency;
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, f);
+// mutex_unlock(&dev->lock);
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------
+ File operations for the device
+ ------------------------------------------------------------------*/
+
+static int tm6000_open(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct tm6000_core *dev = video_drvdata(file);
+ struct tm6000_fh *fh;
+ enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ int i,rc;
+
+ printk(KERN_INFO "tm6000: open called (dev=%s)\n",
+ video_device_node_name(vdev));
+
+ dprintk(dev, V4L2_DEBUG_OPEN, "tm6000: open called (dev=%s)\n",
+ video_device_node_name(vdev));
+
+
+ /* If more than one user, mutex should be added */
+ dev->users++;
+
+ dprintk(dev, V4L2_DEBUG_OPEN, "open dev=%s type=%s users=%d\n",
+ video_device_node_name(vdev), v4l2_type_names[type],
+ dev->users);
+
+ /* allocate + initialize per filehandle data */
+ fh = kzalloc(sizeof(*fh),GFP_KERNEL);
+ if (NULL == fh) {
+ dev->users--;
+ return -ENOMEM;
+ }
+
+ file->private_data = fh;
+ fh->dev = dev;
+
+ fh->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dev->fourcc = format[0].fourcc;
+
+ fh->fmt = format_by_fourcc(dev->fourcc);
+
+ tm6000_get_std_res (dev);
+
+ fh->width = dev->width;
+ fh->height = dev->height;
+
+ dprintk(dev, V4L2_DEBUG_OPEN, "Open: fh=0x%08lx, dev=0x%08lx, "
+ "dev->vidq=0x%08lx\n",
+ (unsigned long)fh,(unsigned long)dev,(unsigned long)&dev->vidq);
+ dprintk(dev, V4L2_DEBUG_OPEN, "Open: list_empty "
+ "queued=%d\n",list_empty(&dev->vidq.queued));
+ dprintk(dev, V4L2_DEBUG_OPEN, "Open: list_empty "
+ "active=%d\n",list_empty(&dev->vidq.active));
+
+ /* initialize hardware on analog mode */
+ if (dev->mode!=TM6000_MODE_ANALOG) {
+ rc=tm6000_init_analog_mode (dev);
+ if (rc<0)
+ return rc;
+
+ /* Put all controls at a sane state */
+ for (i = 0; i < ARRAY_SIZE(tm6000_qctrl); i++)
+ qctl_regs[i] =tm6000_qctrl[i].default_value;
+
+ dev->mode=TM6000_MODE_ANALOG;
+ }
+
+ videobuf_queue_vmalloc_init(&fh->vb_vidq, &tm6000_video_qops,
+ NULL, &dev->slock,
+ fh->type,
+ V4L2_FIELD_INTERLACED,
+ sizeof(struct tm6000_buffer),fh);
+
+ return 0;
+}
+
+static ssize_t
+tm6000_read(struct file *file, char __user *data, size_t count, loff_t *pos)
+{
+ struct tm6000_fh *fh = file->private_data;
+
+ if (fh->type==V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ if (res_locked(fh->dev))
+ return -EBUSY;
+
+ return videobuf_read_stream(&fh->vb_vidq, data, count, pos, 0,
+ file->f_flags & O_NONBLOCK);
+ }
+ return 0;
+}
+
+static unsigned int
+tm6000_poll(struct file *file, struct poll_table_struct *wait)
+{
+ struct tm6000_fh *fh = file->private_data;
+ struct tm6000_buffer *buf;
+
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type)
+ return POLLERR;
+
+ if (res_get(fh->dev,fh)) {
+ /* streaming capture */
+ if (list_empty(&fh->vb_vidq.stream))
+ return POLLERR;
+ buf = list_entry(fh->vb_vidq.stream.next,struct tm6000_buffer,vb.stream);
+ } else {
+ /* read() capture */
+ return videobuf_poll_stream(file, &fh->vb_vidq,
+ wait);
+ }
+ poll_wait(file, &buf->vb.done, wait);
+ if (buf->vb.state == VIDEOBUF_DONE ||
+ buf->vb.state == VIDEOBUF_ERROR)
+ return POLLIN|POLLRDNORM;
+ return 0;
+}
+
+static int tm6000_release(struct file *file)
+{
+ struct tm6000_fh *fh = file->private_data;
+ struct tm6000_core *dev = fh->dev;
+ struct video_device *vdev = video_devdata(file);
+
+ dprintk(dev, V4L2_DEBUG_OPEN, "tm6000: close called (dev=%s, users=%d)\n",
+ video_device_node_name(vdev), dev->users);
+
+ dev->users--;
+
+ if (!dev->users) {
+ tm6000_uninit_isoc(dev);
+ videobuf_mmap_free(&fh->vb_vidq);
+ }
+
+ kfree (fh);
+
+ return 0;
+}
+
+static int tm6000_mmap(struct file *file, struct vm_area_struct * vma)
+{
+ struct tm6000_fh *fh = file->private_data;
+ int ret;
+
+ ret=videobuf_mmap_mapper(&fh->vb_vidq, vma);
+
+ return ret;
+}
+
+static struct v4l2_file_operations tm6000_fops = {
+ .owner = THIS_MODULE,
+ .open = tm6000_open,
+ .release = tm6000_release,
+ .ioctl = video_ioctl2, /* V4L2 ioctl handler */
+ .read = tm6000_read,
+ .poll = tm6000_poll,
+ .mmap = tm6000_mmap,
+};
+
+static const struct v4l2_ioctl_ops video_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+ .vidioc_s_std = vidioc_s_std,
+ .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_g_input = vidioc_g_input,
+ .vidioc_s_input = vidioc_s_input,
+ .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_s_ctrl = vidioc_s_ctrl,
+ .vidioc_g_tuner = vidioc_g_tuner,
+ .vidioc_s_tuner = vidioc_s_tuner,
+ .vidioc_g_frequency = vidioc_g_frequency,
+ .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
+ .vidiocgmbuf = vidiocgmbuf,
+#endif
+};
+
+static struct video_device tm6000_template = {
+ .name = "tm6000",
+ .fops = &tm6000_fops,
+ .ioctl_ops = &video_ioctl_ops,
+ .release = video_device_release,
+ .tvnorms = TM6000_STD,
+ .current_norm = V4L2_STD_NTSC_M,
+};
+
+/* -----------------------------------------------------------------
+ Initialization and module stuff
+ ------------------------------------------------------------------*/
+
+int tm6000_v4l2_register(struct tm6000_core *dev)
+{
+ int ret = -1;
+ struct video_device *vfd;
+
+ vfd = video_device_alloc();
+ if(!vfd) {
+ return -ENOMEM;
+ }
+ dev->vfd = vfd;
+
+ /* init video dma queues */
+ INIT_LIST_HEAD(&dev->vidq.active);
+ INIT_LIST_HEAD(&dev->vidq.queued);
+
+ memcpy (dev->vfd, &tm6000_template, sizeof(*(dev->vfd)));
+ dev->vfd->debug=tm6000_debug;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(dev->vfd, VFL_TYPE_GRABBER, video_nr);
+ printk(KERN_INFO "Trident TVMaster TM5600/TM6000/TM6010 USB2 board (Load status: %d)\n", ret);
+ return ret;
+}
+
+int tm6000_v4l2_unregister(struct tm6000_core *dev)
+{
+ video_unregister_device(dev->vfd);
+
+ return 0;
+}
+
+int tm6000_v4l2_exit(void)
+{
+ return 0;
+}
+
+module_param(video_nr, int, 0);
+MODULE_PARM_DESC(video_nr,"Allow changing video device number");
+
+module_param_named (debug, tm6000_debug, int, 0444);
+MODULE_PARM_DESC(debug,"activates debug info");
+
+module_param(vid_limit,int,0644);
+MODULE_PARM_DESC(vid_limit,"capture memory limit in megabytes");
+
diff --git a/drivers/staging/tm6000/tm6000.h b/drivers/staging/tm6000/tm6000.h
new file mode 100644
index 00000000000..6812d6867d5
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000.h
@@ -0,0 +1,301 @@
+/*
+ tm6000.h - driver for TM5600/TM6000/TM6010 USB video capture devices
+
+ Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+
+ Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com>
+ - DVB-T support
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+// Use the tm6000-hack, instead of the proper initialization code
+//#define HACK 1
+
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <media/videobuf-vmalloc.h>
+#include "tm6000-usb-isoc.h"
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <media/v4l2-device.h>
+
+
+#include <linux/dvb/frontend.h>
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dmxdev.h"
+
+#define TM6000_VERSION KERNEL_VERSION(0, 0, 2)
+
+/* Inputs */
+
+enum tm6000_itype {
+ TM6000_INPUT_TV = 0,
+ TM6000_INPUT_COMPOSITE,
+ TM6000_INPUT_SVIDEO,
+};
+
+enum tm6000_devtype {
+ TM6000 = 0,
+ TM5600,
+ TM6010,
+};
+
+/* ------------------------------------------------------------------
+ Basic structures
+ ------------------------------------------------------------------*/
+
+struct tm6000_fmt {
+ char *name;
+ u32 fourcc; /* v4l2 format id */
+ int depth;
+};
+
+/* buffer for one video frame */
+struct tm6000_buffer {
+ /* common v4l buffer stuff -- must be first */
+ struct videobuf_buffer vb;
+
+ struct tm6000_fmt *fmt;
+};
+
+struct tm6000_dmaqueue {
+ struct list_head active;
+ struct list_head queued;
+
+ /* thread for generating video stream*/
+ struct task_struct *kthread;
+ wait_queue_head_t wq;
+ /* Counters to control fps rate */
+ int frame;
+ int ini_jiffies;
+};
+
+/* device states */
+enum tm6000_core_state {
+ DEV_INITIALIZED = 0x01,
+ DEV_DISCONNECTED = 0x02,
+ DEV_MISCONFIGURED = 0x04,
+};
+
+/* io methods */
+enum tm6000_io_method {
+ IO_NONE,
+ IO_READ,
+ IO_MMAP,
+};
+
+enum tm6000_mode {
+ TM6000_MODE_UNKNOWN=0,
+ TM6000_MODE_ANALOG,
+ TM6000_MODE_DIGITAL,
+};
+
+struct tm6000_gpio {
+ int tuner_reset;
+ int tuner_on;
+ int demod_reset;
+ int demod_on;
+ int power_led;
+ int dvb_led;
+ int ir;
+};
+
+struct tm6000_capabilities {
+ unsigned int has_tuner:1;
+ unsigned int has_tda9874:1;
+ unsigned int has_dvb:1;
+ unsigned int has_zl10353:1;
+ unsigned int has_eeprom:1;
+ unsigned int has_remote:1;
+};
+
+struct tm6000_dvb {
+ struct dvb_adapter adapter;
+ struct dvb_demux demux;
+ struct dvb_frontend *frontend;
+ struct dmxdev dmxdev;
+ unsigned int streams;
+ struct urb *bulk_urb;
+ struct mutex mutex;
+};
+
+struct tm6000_endpoint {
+ struct usb_host_endpoint *endp;
+ __u8 bInterfaceNumber;
+ __u8 bAlternateSetting;
+ unsigned maxsize;
+};
+
+struct tm6000_core {
+ /* generic device properties */
+ char name[30]; /* name (including minor) of the device */
+ int model; /* index in the device_data struct */
+ int devno; /* marks the number of this device */
+ enum tm6000_devtype dev_type; /* type of device */
+
+ v4l2_std_id norm; /* Current norm */
+ int width,height; /* Selected resolution */
+
+ enum tm6000_core_state state;
+
+ /* Device Capabilities*/
+ struct tm6000_capabilities caps;
+
+ /* Tuner configuration */
+ int tuner_type; /* type of the tuner */
+ int tuner_addr; /* tuner address */
+
+ struct tm6000_gpio gpio;
+
+ /* Demodulator configuration */
+ int demod_addr; /* demodulator address */
+
+ int audio_bitrate;
+ /* i2c i/o */
+ struct i2c_adapter i2c_adap;
+ struct i2c_client i2c_client;
+
+ /* video for linux */
+ int users;
+
+ /* various device info */
+ unsigned int resources;
+ struct video_device *vfd;
+ struct tm6000_dmaqueue vidq;
+ struct v4l2_device v4l2_dev;
+
+ int input;
+ int freq;
+ unsigned int fourcc;
+
+ enum tm6000_mode mode;
+
+ /* DVB-T support */
+ struct tm6000_dvb *dvb;
+
+ /* locks */
+ struct mutex lock;
+
+ /* usb transfer */
+ struct usb_device *udev; /* the usb device */
+
+ struct tm6000_endpoint bulk_in, bulk_out, isoc_in, isoc_out;
+
+ /* scaler!=0 if scaler is active*/
+ int scaler;
+
+ /* Isoc control struct */
+ struct usb_isoc_ctl isoc_ctl;
+
+ spinlock_t slock;
+};
+
+struct tm6000_fh {
+ struct tm6000_core *dev;
+
+ /* video capture */
+ struct tm6000_fmt *fmt;
+ unsigned int width,height;
+ struct videobuf_queue vb_vidq;
+
+ enum v4l2_buf_type type;
+};
+
+#define TM6000_STD V4L2_STD_PAL|V4L2_STD_PAL_N|V4L2_STD_PAL_Nc| \
+ V4L2_STD_PAL_M|V4L2_STD_PAL_60|V4L2_STD_NTSC_M| \
+ V4L2_STD_NTSC_M_JP|V4L2_STD_SECAM
+
+/* In tm6000-cards.c */
+
+int tm6000_tuner_callback (void *ptr, int component, int command, int arg);
+int tm6000_xc5000_callback (void *ptr, int component, int command, int arg);
+int tm6000_cards_setup(struct tm6000_core *dev);
+
+/* In tm6000-core.c */
+
+int tm6000_read_write_usb (struct tm6000_core *dev, u8 reqtype, u8 req,
+ u16 value, u16 index, u8 *buf, u16 len);
+int tm6000_get_reg (struct tm6000_core *dev, u8 req, u16 value, u16 index);
+int tm6000_get_reg16(struct tm6000_core *dev, u8 req, u16 value, u16 index);
+int tm6000_get_reg32(struct tm6000_core *dev, u8 req, u16 value, u16 index);
+int tm6000_set_reg (struct tm6000_core *dev, u8 req, u16 value, u16 index);
+int tm6000_init (struct tm6000_core *dev);
+
+int tm6000_init_analog_mode (struct tm6000_core *dev);
+int tm6000_init_digital_mode (struct tm6000_core *dev);
+int tm6000_set_audio_bitrate (struct tm6000_core *dev, int bitrate);
+
+int tm6000_dvb_register(struct tm6000_core *dev);
+void tm6000_dvb_unregister(struct tm6000_core *dev);
+
+int tm6000_v4l2_register(struct tm6000_core *dev);
+int tm6000_v4l2_unregister(struct tm6000_core *dev);
+int tm6000_v4l2_exit(void);
+void tm6000_set_fourcc_format(struct tm6000_core *dev);
+
+/* In tm6000-stds.c */
+void tm6000_get_std_res(struct tm6000_core *dev);
+int tm6000_set_standard (struct tm6000_core *dev, v4l2_std_id *norm);
+
+/* In tm6000-i2c.c */
+int tm6000_i2c_register(struct tm6000_core *dev);
+int tm6000_i2c_unregister(struct tm6000_core *dev);
+
+/* In tm6000-queue.c */
+
+int tm6000_v4l2_mmap(struct file *filp, struct vm_area_struct *vma);
+
+int tm6000_vidioc_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type i);
+int tm6000_vidioc_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type i);
+int tm6000_vidioc_reqbufs (struct file *file, void *priv,
+ struct v4l2_requestbuffers *rb);
+int tm6000_vidioc_querybuf (struct file *file, void *priv,
+ struct v4l2_buffer *b);
+int tm6000_vidioc_qbuf (struct file *file, void *priv, struct v4l2_buffer *b);
+int tm6000_vidioc_dqbuf (struct file *file, void *priv, struct v4l2_buffer *b);
+ssize_t tm6000_v4l2_read(struct file *filp, char __user * buf, size_t count,
+ loff_t * f_pos);
+unsigned int tm6000_v4l2_poll(struct file *file,
+ struct poll_table_struct *wait);
+int tm6000_queue_init(struct tm6000_core *dev);
+
+/* In tm6000-alsa.c */
+int tm6000_audio_init(struct tm6000_core *dev, int idx);
+
+
+/* Debug stuff */
+
+extern int tm6000_debug;
+
+#define dprintk(dev, level, fmt, arg...) do {\
+ if (tm6000_debug & level) \
+ printk(KERN_INFO "(%lu) %s %s :"fmt, jiffies, \
+ dev->name, __FUNCTION__ , ##arg); } while (0)
+
+#define V4L2_DEBUG_REG 0x0004
+#define V4L2_DEBUG_I2C 0x0008
+#define V4L2_DEBUG_QUEUE 0x0010
+#define V4L2_DEBUG_ISOC 0x0020
+#define V4L2_DEBUG_RES_LOCK 0x0040 /* Resource locking */
+#define V4L2_DEBUG_OPEN 0x0080 /* video open/close debug */
+
+#define tm6000_err(fmt, arg...) do {\
+ printk(KERN_ERR "tm6000 %s :"fmt, \
+ __FUNCTION__ , ##arg); } while (0)
+
+
diff --git a/drivers/staging/udlfb/udlfb.c b/drivers/staging/udlfb/udlfb.c
index a78ade0dc68..c7e061e5e04 100644
--- a/drivers/staging/udlfb/udlfb.c
+++ b/drivers/staging/udlfb/udlfb.c
@@ -58,17 +58,17 @@ static struct usb_device_id id_table[] = {
MODULE_DEVICE_TABLE(usb, id_table);
#ifndef CONFIG_FB_DEFERRED_IO
-#warning message "kernel FB_DEFFERRED_IO option to support generic fbdev apps"
+#warning Please set CONFIG_FB_DEFFERRED_IO option to support generic fbdev apps
#endif
#ifndef CONFIG_FB_SYS_IMAGEBLIT
#ifndef CONFIG_FB_SYS_IMAGEBLIT_MODULE
-#warning message "FB_SYS_* in kernel or module option to support fb console"
+#warning Please set CONFIG_FB_SYS_IMAGEBLIT option to support fb console
#endif
#endif
#ifndef CONFIG_FB_MODE_HELPERS
-#warning message "kernel FB_MODE_HELPERS required. Expect build break"
+#warning CONFIG_FB_MODE_HELPERS required. Expect build break
#endif
/* dlfb keeps a list of urbs for efficient bulk transfers */
@@ -366,32 +366,32 @@ static int dlfb_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes)
}
/*
-Render a command stream for an encoded horizontal line segment of pixels.
-
-A command buffer holds several commands.
-It always begins with a fresh command header
-(the protocol doesn't require this, but we enforce it to allow
-multiple buffers to be potentially encoded and sent in parallel).
-A single command encodes one contiguous horizontal line of pixels
-
-The function relies on the client to do all allocation, so that
-rendering can be done directly to output buffers (e.g. USB URBs).
-The function fills the supplied command buffer, providing information
-on where it left off, so the client may call in again with additional
-buffers if the line will take several buffers to complete.
-
-A single command can transmit a maximum of 256 pixels,
-regardless of the compression ratio (protocol design limit).
-To the hardware, 0 for a size byte means 256
-
-Rather than 256 pixel commands which are either rl or raw encoded,
-the rlx command simply assumes alternating raw and rl spans within one cmd.
-This has a slightly larger header overhead, but produces more even results.
-It also processes all data (read and write) in a single pass.
-Performance benchmarks of common cases show it having just slightly better
-compression than 256 pixel raw -or- rle commands, with similar CPU consumpion.
-But for very rl friendly data, will compress not quite as well.
-*/
+ * Render a command stream for an encoded horizontal line segment of pixels.
+ *
+ * A command buffer holds several commands.
+ * It always begins with a fresh command header
+ * (the protocol doesn't require this, but we enforce it to allow
+ * multiple buffers to be potentially encoded and sent in parallel).
+ * A single command encodes one contiguous horizontal line of pixels
+ *
+ * The function relies on the client to do all allocation, so that
+ * rendering can be done directly to output buffers (e.g. USB URBs).
+ * The function fills the supplied command buffer, providing information
+ * on where it left off, so the client may call in again with additional
+ * buffers if the line will take several buffers to complete.
+ *
+ * A single command can transmit a maximum of 256 pixels,
+ * regardless of the compression ratio (protocol design limit).
+ * To the hardware, 0 for a size byte means 256
+ *
+ * Rather than 256 pixel commands which are either rl or raw encoded,
+ * the rlx command simply assumes alternating raw and rl spans within one cmd.
+ * This has a slightly larger header overhead, but produces more even results.
+ * It also processes all data (read and write) in a single pass.
+ * Performance benchmarks of common cases show it having just slightly better
+ * compression than 256 pixel raw -or- rle commands, with similar CPU consumpion.
+ * But for very rl friendly data, will compress not quite as well.
+ */
static void dlfb_compress_hline(
const uint16_t **pixel_start_ptr,
const uint16_t *const pixel_end,
@@ -1063,7 +1063,8 @@ static ssize_t metrics_misc_show(struct device *fbdev,
atomic_read(&dev->lost_pixels) ? "yes" : "no");
}
-static ssize_t edid_show(struct kobject *kobj, struct bin_attribute *a,
+static ssize_t edid_show(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *a,
char *buf, loff_t off, size_t count) {
struct device *fbdev = container_of(kobj, struct device, kobj);
struct fb_info *fb_info = dev_get_drvdata(fbdev);
@@ -1438,7 +1439,7 @@ static int __init dlfb_module_init(void)
if (res)
err("usb_register failed. Error number %d", res);
- printk("VMODES initialized\n");
+ printk(KERN_INFO "VMODES initialized\n");
return res;
}
@@ -1508,8 +1509,8 @@ static void dlfb_free_urb_list(struct dlfb_data *dev)
urb = unode->urb;
/* Free each separately allocated piece */
- usb_buffer_free(urb->dev, dev->urbs.size,
- urb->transfer_buffer, urb->transfer_dma);
+ usb_free_coherent(urb->dev, dev->urbs.size,
+ urb->transfer_buffer, urb->transfer_dma);
usb_free_urb(urb);
kfree(node);
}
@@ -1543,8 +1544,8 @@ static int dlfb_alloc_urb_list(struct dlfb_data *dev, int count, size_t size)
}
unode->urb = urb;
- buf = usb_buffer_alloc(dev->udev, MAX_TRANSFER, GFP_KERNEL,
- &urb->transfer_dma);
+ buf = usb_alloc_coherent(dev->udev, MAX_TRANSFER, GFP_KERNEL,
+ &urb->transfer_dma);
if (!buf) {
kfree(unode);
usb_free_urb(urb);
@@ -1567,7 +1568,7 @@ static int dlfb_alloc_urb_list(struct dlfb_data *dev, int count, size_t size)
kref_get(&dev->kref); /* released in free_render_urbs() */
- dl_notice("allocated %d %d byte urbs \n", i, (int) size);
+ dl_notice("allocated %d %d byte urbs\n", i, (int) size);
return i;
}
diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c
index bc267408667..5972ae70e38 100644
--- a/drivers/staging/usbip/stub_rx.c
+++ b/drivers/staging/usbip/stub_rx.c
@@ -21,7 +21,7 @@
#include "usbip_common.h"
#include "stub.h"
-#include "../../usb/core/hcd.h"
+#include <linux/usb/hcd.h>
static int is_clear_halt_cmd(struct urb *urb)
@@ -502,13 +502,13 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
}
/* set priv->urb->setup_packet */
- priv->urb->setup_packet = kzalloc(8, GFP_KERNEL);
+ priv->urb->setup_packet = kmemdup(&pdu->u.cmd_submit.setup, 8,
+ GFP_KERNEL);
if (!priv->urb->setup_packet) {
dev_err(&sdev->interface->dev, "allocate setup_packet\n");
usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
return;
}
- memcpy(priv->urb->setup_packet, &pdu->u.cmd_submit.setup, 8);
/* set other members from the base header of pdu */
priv->urb->context = (void *) priv;
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index e3fa4216c1c..52408164036 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -562,7 +562,7 @@ EXPORT_SYMBOL_GPL(sockfd_to_socket);
/* there may be more cases to tweak the flags. */
static unsigned int tweak_transfer_flags(unsigned int flags)
{
- flags &= ~(URB_NO_TRANSFER_DMA_MAP|URB_NO_SETUP_DMA_MAP);
+ flags &= ~URB_NO_TRANSFER_DMA_MAP;
return flags;
}
diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
index 6f1dcb197d1..e1bbd1287e2 100644
--- a/drivers/staging/usbip/usbip_common.h
+++ b/drivers/staging/usbip/usbip_common.h
@@ -182,7 +182,7 @@ struct usbip_header_basic {
__u32 devid;
#define USBIP_DIR_OUT 0
-#define USBIP_DIR_IN 1
+#define USBIP_DIR_IN 1
__u32 direction;
__u32 ep; /* endpoint number */
} __attribute__ ((packed));
diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
index 5e375173bbc..41a1fe5138f 100644
--- a/drivers/staging/usbip/vhci.h
+++ b/drivers/staging/usbip/vhci.h
@@ -18,7 +18,7 @@
*/
#include <linux/platform_device.h>
-#include "../../usb/core/hcd.h"
+#include <linux/usb/hcd.h>
struct vhci_device {
diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
index 0b1766122d3..be5d8db9816 100644
--- a/drivers/staging/usbip/vhci_hcd.c
+++ b/drivers/staging/usbip/vhci_hcd.c
@@ -1072,7 +1072,7 @@ static struct hc_driver vhci_hc_driver = {
.flags = HCD_USB2,
.start = vhci_start,
- .stop = vhci_stop,
+ .stop = vhci_stop,
.urb_enqueue = vhci_urb_enqueue,
.urb_dequeue = vhci_urb_dequeue,
diff --git a/drivers/staging/usbip/vhci_tx.c b/drivers/staging/usbip/vhci_tx.c
index b71b4c2fbd8..e1c1f716a1c 100644
--- a/drivers/staging/usbip/vhci_tx.c
+++ b/drivers/staging/usbip/vhci_tx.c
@@ -179,7 +179,7 @@ static int vhci_send_cmd_unlink(struct vhci_device *vdev)
memset(&msg, 0, sizeof(msg));
memset(&iov, 0, sizeof(iov));
- usbip_dbg_vhci_tx("setup cmd unlink, %lu \n", unlink->seqnum);
+ usbip_dbg_vhci_tx("setup cmd unlink, %lu\n", unlink->seqnum);
/* 1. setup usbip_header */
diff --git a/drivers/staging/vme/boards/vme_vmivme7805.c b/drivers/staging/vme/boards/vme_vmivme7805.c
index 843c9edde55..80eaa0c4fe1 100644
--- a/drivers/staging/vme/boards/vme_vmivme7805.c
+++ b/drivers/staging/vme/boards/vme_vmivme7805.c
@@ -10,7 +10,6 @@
* option) any later version.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
diff --git a/drivers/staging/vme/bridges/vme_ca91cx42.c b/drivers/staging/vme/bridges/vme_ca91cx42.c
index b159ea58adf..0c82eb47a28 100644
--- a/drivers/staging/vme/bridges/vme_ca91cx42.c
+++ b/drivers/staging/vme/bridges/vme_ca91cx42.c
@@ -26,9 +26,9 @@
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <asm/time.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/time.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
#include "../vme.h"
#include "../vme_bridge.h"
@@ -94,31 +94,35 @@ static u32 ca91cx42_IACK_irqhandler(struct ca91cx42_driver *bridge)
return CA91CX42_LINT_SW_IACK;
}
-static u32 ca91cx42_VERR_irqhandler(struct ca91cx42_driver *bridge)
+static u32 ca91cx42_VERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
{
int val;
+ struct ca91cx42_driver *bridge;
+
+ bridge = ca91cx42_bridge->driver_priv;
val = ioread32(bridge->base + DGCS);
if (!(val & 0x00000800)) {
- printk(KERN_ERR "ca91c042: ca91cx42_VERR_irqhandler DMA Read "
- "Error DGCS=%08X\n", val);
+ dev_err(ca91cx42_bridge->parent, "ca91cx42_VERR_irqhandler DMA "
+ "Read Error DGCS=%08X\n", val);
}
return CA91CX42_LINT_VERR;
}
-static u32 ca91cx42_LERR_irqhandler(struct ca91cx42_driver *bridge)
+static u32 ca91cx42_LERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
{
int val;
+ struct ca91cx42_driver *bridge;
- val = ioread32(bridge->base + DGCS);
+ bridge = ca91cx42_bridge->driver_priv;
- if (!(val & 0x00000800)) {
- printk(KERN_ERR "ca91c042: ca91cx42_LERR_irqhandler DMA Read "
- "Error DGCS=%08X\n", val);
+ val = ioread32(bridge->base + DGCS);
- }
+ if (!(val & 0x00000800))
+ dev_err(ca91cx42_bridge->parent, "ca91cx42_LERR_irqhandler DMA "
+ "Read Error DGCS=%08X\n", val);
return CA91CX42_LINT_LERR;
}
@@ -176,9 +180,9 @@ static irqreturn_t ca91cx42_irqhandler(int irq, void *ptr)
if (stat & CA91CX42_LINT_SW_IACK)
serviced |= ca91cx42_IACK_irqhandler(bridge);
if (stat & CA91CX42_LINT_VERR)
- serviced |= ca91cx42_VERR_irqhandler(bridge);
+ serviced |= ca91cx42_VERR_irqhandler(ca91cx42_bridge);
if (stat & CA91CX42_LINT_LERR)
- serviced |= ca91cx42_LERR_irqhandler(bridge);
+ serviced |= ca91cx42_LERR_irqhandler(ca91cx42_bridge);
if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 |
CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 |
CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 |
@@ -326,9 +330,12 @@ int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
unsigned int i, addr = 0, granularity;
unsigned int temp_ctl = 0;
unsigned int vme_bound, pci_offset;
+ struct vme_bridge *ca91cx42_bridge;
struct ca91cx42_driver *bridge;
- bridge = image->parent->driver_priv;
+ ca91cx42_bridge = image->parent;
+
+ bridge = ca91cx42_bridge->driver_priv;
i = image->number;
@@ -353,7 +360,7 @@ int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
case VME_USER3:
case VME_USER4:
default:
- printk(KERN_ERR "Invalid address space\n");
+ dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
return -EINVAL;
break;
}
@@ -371,15 +378,18 @@ int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
granularity = 0x10000;
if (vme_base & (granularity - 1)) {
- printk(KERN_ERR "Invalid VME base alignment\n");
+ dev_err(ca91cx42_bridge->parent, "Invalid VME base "
+ "alignment\n");
return -EINVAL;
}
if (vme_bound & (granularity - 1)) {
- printk(KERN_ERR "Invalid VME bound alignment\n");
+ dev_err(ca91cx42_bridge->parent, "Invalid VME bound "
+ "alignment\n");
return -EINVAL;
}
if (pci_offset & (granularity - 1)) {
- printk(KERN_ERR "Invalid PCI Offset alignment\n");
+ dev_err(ca91cx42_bridge->parent, "Invalid PCI Offset "
+ "alignment\n");
return -EINVAL;
}
@@ -491,7 +501,7 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image,
/* Find pci_dev container of dev */
if (ca91cx42_bridge->parent == NULL) {
- printk(KERN_ERR "Dev entry NULL\n");
+ dev_err(ca91cx42_bridge->parent, "Dev entry NULL\n");
return -EINVAL;
}
pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
@@ -515,8 +525,8 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image,
if (image->bus_resource.name == NULL) {
image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_KERNEL);
if (image->bus_resource.name == NULL) {
- printk(KERN_ERR "Unable to allocate memory for resource"
- " name\n");
+ dev_err(ca91cx42_bridge->parent, "Unable to allocate "
+ "memory for resource name\n");
retval = -ENOMEM;
goto err_name;
}
@@ -533,8 +543,8 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image,
&(image->bus_resource), size, size, PCIBIOS_MIN_MEM,
0, NULL, NULL);
if (retval) {
- printk(KERN_ERR "Failed to allocate mem resource for "
- "window %d size 0x%lx start 0x%lx\n",
+ dev_err(ca91cx42_bridge->parent, "Failed to allocate mem "
+ "resource for window %d size 0x%lx start 0x%lx\n",
image->number, (unsigned long)size,
(unsigned long)image->bus_resource.start);
goto err_resource;
@@ -543,7 +553,7 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image,
image->kern_base = ioremap_nocache(
image->bus_resource.start, size);
if (image->kern_base == NULL) {
- printk(KERN_ERR "Failed to remap resource\n");
+ dev_err(ca91cx42_bridge->parent, "Failed to remap resource\n");
retval = -ENOMEM;
goto err_remap;
}
@@ -582,9 +592,12 @@ int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
unsigned int i, granularity = 0;
unsigned int temp_ctl = 0;
unsigned long long pci_bound, vme_offset, pci_base;
+ struct vme_bridge *ca91cx42_bridge;
struct ca91cx42_driver *bridge;
- bridge = image->parent->driver_priv;
+ ca91cx42_bridge = image->parent;
+
+ bridge = ca91cx42_bridge->driver_priv;
i = image->number;
@@ -595,12 +608,14 @@ int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
/* Verify input data */
if (vme_base & (granularity - 1)) {
- printk(KERN_ERR "Invalid VME Window alignment\n");
+ dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
+ "alignment\n");
retval = -EINVAL;
goto err_window;
}
if (size & (granularity - 1)) {
- printk(KERN_ERR "Invalid VME Window alignment\n");
+ dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
+ "alignment\n");
retval = -EINVAL;
goto err_window;
}
@@ -614,8 +629,8 @@ int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
retval = ca91cx42_alloc_resource(image, size);
if (retval) {
spin_unlock(&(image->lock));
- printk(KERN_ERR "Unable to allocate memory for resource "
- "name\n");
+ dev_err(ca91cx42_bridge->parent, "Unable to allocate memory "
+ "for resource name\n");
retval = -ENOMEM;
goto err_res;
}
@@ -658,7 +673,7 @@ int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
break;
default:
spin_unlock(&(image->lock));
- printk(KERN_ERR "Invalid data width\n");
+ dev_err(ca91cx42_bridge->parent, "Invalid data width\n");
retval = -EINVAL;
goto err_dwidth;
break;
@@ -690,7 +705,7 @@ int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
case VME_USER4:
default:
spin_unlock(&(image->lock));
- printk(KERN_ERR "Invalid address space\n");
+ dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
retval = -EINVAL;
goto err_aspace;
break;
@@ -921,12 +936,14 @@ int ca91cx42_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
struct vme_dma_vme *vme_attr;
dma_addr_t desc_ptr;
int retval = 0;
+ struct device *dev;
+
+ dev = list->parent->parent->parent;
/* XXX descriptor must be aligned on 64-bit boundaries */
- entry = (struct ca91cx42_dma_entry *)
- kmalloc(sizeof(struct ca91cx42_dma_entry), GFP_KERNEL);
+ entry = kmalloc(sizeof(struct ca91cx42_dma_entry), GFP_KERNEL);
if (entry == NULL) {
- printk(KERN_ERR "Failed to allocate memory for dma resource "
+ dev_err(dev, "Failed to allocate memory for dma resource "
"structure\n");
retval = -ENOMEM;
goto err_mem;
@@ -934,7 +951,7 @@ int ca91cx42_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
/* Test descriptor alignment */
if ((unsigned long)&(entry->descriptor) & CA91CX42_DCPP_M) {
- printk("Descriptor not aligned to 16 byte boundary as "
+ dev_err(dev, "Descriptor not aligned to 16 byte boundary as "
"required: %p\n", &(entry->descriptor));
retval = -EINVAL;
goto err_align;
@@ -955,7 +972,7 @@ int ca91cx42_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
if ((vme_attr->aspace & ~(VME_A16 | VME_A24 | VME_A32 | VME_USER1 |
VME_USER2)) != 0) {
- printk(KERN_ERR "Unsupported cycle type\n");
+ dev_err(dev, "Unsupported cycle type\n");
retval = -EINVAL;
goto err_aspace;
}
@@ -963,7 +980,7 @@ int ca91cx42_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
if ((vme_attr->cycle & ~(VME_SCT | VME_BLT | VME_SUPER | VME_USER |
VME_PROG | VME_DATA)) != 0) {
- printk(KERN_ERR "Unsupported cycle type\n");
+ dev_err(dev, "Unsupported cycle type\n");
retval = -EINVAL;
goto err_cycle;
}
@@ -972,7 +989,7 @@ int ca91cx42_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
if (!(((src->type == VME_DMA_PCI) && (dest->type == VME_DMA_VME)) ||
((src->type == VME_DMA_VME) && (dest->type == VME_DMA_PCI)))) {
- printk(KERN_ERR "Cannot perform transfer with this "
+ dev_err(dev, "Cannot perform transfer with this "
"source-destination combination\n");
retval = -EINVAL;
goto err_direct;
@@ -997,7 +1014,7 @@ int ca91cx42_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D64;
break;
default:
- printk(KERN_ERR "Invalid data width\n");
+ dev_err(dev, "Invalid data width\n");
return -EINVAL;
}
@@ -1019,7 +1036,7 @@ int ca91cx42_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER2;
break;
default:
- printk(KERN_ERR "Invalid address space\n");
+ dev_err(dev, "Invalid address space\n");
return -EINVAL;
break;
}
@@ -1079,12 +1096,13 @@ int ca91cx42_dma_list_exec(struct vme_dma_list *list)
int retval = 0;
dma_addr_t bus_addr;
u32 val;
-
+ struct device *dev;
struct ca91cx42_driver *bridge;
ctrlr = list->parent;
bridge = ctrlr->parent->driver_priv;
+ dev = ctrlr->parent->parent;
mutex_lock(&(ctrlr->mtx));
@@ -1140,7 +1158,7 @@ int ca91cx42_dma_list_exec(struct vme_dma_list *list)
if (val & (CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
CA91CX42_DGCS_PERR)) {
- printk(KERN_ERR "ca91c042: DMA Error. DGCS=%08X\n", val);
+ dev_err(dev, "ca91c042: DMA Error. DGCS=%08X\n", val);
val = ioread32(bridge->base + DCTL);
}
@@ -1476,7 +1494,7 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* We want to support more than one of each bridge so we need to
* dynamically allocate the bridge structure
*/
- ca91cx42_bridge = kmalloc(sizeof(struct vme_bridge), GFP_KERNEL);
+ ca91cx42_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
if (ca91cx42_bridge == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for device "
@@ -1485,9 +1503,7 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_struct;
}
- memset(ca91cx42_bridge, 0, sizeof(struct vme_bridge));
-
- ca91cx42_device = kmalloc(sizeof(struct ca91cx42_driver), GFP_KERNEL);
+ ca91cx42_device = kzalloc(sizeof(struct ca91cx42_driver), GFP_KERNEL);
if (ca91cx42_device == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for device "
@@ -1496,8 +1512,6 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_driver;
}
- memset(ca91cx42_device, 0, sizeof(struct ca91cx42_driver));
-
ca91cx42_bridge->driver_priv = ca91cx42_device;
/* Enable the device */
@@ -1665,9 +1679,8 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_info(&pdev->dev, "Slot ID is %d\n",
ca91cx42_slot_get(ca91cx42_bridge));
- if (ca91cx42_crcsr_init(ca91cx42_bridge, pdev)) {
+ if (ca91cx42_crcsr_init(ca91cx42_bridge, pdev))
dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
- }
/* Need to save ca91cx42_bridge pointer locally in link list for use in
* ca91cx42_remove()
diff --git a/drivers/staging/vme/bridges/vme_tsi148.c b/drivers/staging/vme/bridges/vme_tsi148.c
index 783051f59f1..abe88a380b7 100644
--- a/drivers/staging/vme/bridges/vme_tsi148.c
+++ b/drivers/staging/vme/bridges/vme_tsi148.c
@@ -26,9 +26,9 @@
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <asm/time.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
+#include <linux/time.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
#include "../vme.h"
#include "../vme_bridge.h"
@@ -40,27 +40,6 @@ static void tsi148_remove(struct pci_dev *);
static void __exit tsi148_exit(void);
-int tsi148_slave_set(struct vme_slave_resource *, int, unsigned long long,
- unsigned long long, dma_addr_t, vme_address_t, vme_cycle_t);
-int tsi148_slave_get(struct vme_slave_resource *, int *, unsigned long long *,
- unsigned long long *, dma_addr_t *, vme_address_t *, vme_cycle_t *);
-
-int tsi148_master_get(struct vme_master_resource *, int *, unsigned long long *,
- unsigned long long *, vme_address_t *, vme_cycle_t *, vme_width_t *);
-int tsi148_master_set(struct vme_master_resource *, int, unsigned long long,
- unsigned long long, vme_address_t, vme_cycle_t, vme_width_t);
-ssize_t tsi148_master_read(struct vme_master_resource *, void *, size_t,
- loff_t);
-ssize_t tsi148_master_write(struct vme_master_resource *, void *, size_t,
- loff_t);
-unsigned int tsi148_master_rmw(struct vme_master_resource *, unsigned int,
- unsigned int, unsigned int, loff_t);
-int tsi148_dma_list_add (struct vme_dma_list *, struct vme_dma_attr *,
- struct vme_dma_attr *, size_t);
-int tsi148_dma_list_exec(struct vme_dma_list *);
-int tsi148_dma_list_empty(struct vme_dma_list *);
-int tsi148_generate_irq(int, int);
-
/* Module parameter */
static int err_chk;
static int geoid;
@@ -122,7 +101,7 @@ static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
u32 serviced = 0;
for (i = 0; i < 4; i++) {
- if(stat & TSI148_LCSR_INTS_LMS[i]) {
+ if (stat & TSI148_LCSR_INTS_LMS[i]) {
/* We only enable interrupts if the callback is set */
bridge->lm_callback[i](i);
serviced |= TSI148_LCSR_INTC_LMC[i];
@@ -137,16 +116,20 @@ static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
*
* XXX This functionality is not exposed up though API.
*/
-static u32 tsi148_MB_irqhandler(struct tsi148_driver *bridge, u32 stat)
+static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
{
int i;
u32 val;
u32 serviced = 0;
+ struct tsi148_driver *bridge;
+
+ bridge = tsi148_bridge->driver_priv;
for (i = 0; i < 4; i++) {
- if(stat & TSI148_LCSR_INTS_MBS[i]) {
+ if (stat & TSI148_LCSR_INTS_MBS[i]) {
val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
- printk("VME Mailbox %d received: 0x%x\n", i, val);
+ dev_err(tsi148_bridge->parent, "VME Mailbox %d received"
+ ": 0x%x\n", i, val);
serviced |= TSI148_LCSR_INTC_MBC[i];
}
}
@@ -157,19 +140,22 @@ static u32 tsi148_MB_irqhandler(struct tsi148_driver *bridge, u32 stat)
/*
* Display error & status message when PERR (PCI) exception interrupt occurs.
*/
-static u32 tsi148_PERR_irqhandler(struct tsi148_driver *bridge)
+static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
{
- printk(KERN_ERR
- "PCI Exception at address: 0x%08x:%08x, attributes: %08x\n",
+ struct tsi148_driver *bridge;
+
+ bridge = tsi148_bridge->driver_priv;
+
+ dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, "
+ "attributes: %08x\n",
ioread32be(bridge->base + TSI148_LCSR_EDPAU),
ioread32be(bridge->base + TSI148_LCSR_EDPAL),
- ioread32be(bridge->base + TSI148_LCSR_EDPAT)
- );
- printk(KERN_ERR
- "PCI-X attribute reg: %08x, PCI-X split completion reg: %08x\n",
+ ioread32be(bridge->base + TSI148_LCSR_EDPAT));
+
+ dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split "
+ "completion reg: %08x\n",
ioread32be(bridge->base + TSI148_LCSR_EDPXA),
- ioread32be(bridge->base + TSI148_LCSR_EDPXS)
- );
+ ioread32be(bridge->base + TSI148_LCSR_EDPXS));
iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
@@ -196,22 +182,21 @@ static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
reg_join(error_addr_high, error_addr_low, &error_addr);
/* Check for exception register overflow (we have lost error data) */
- if(error_attrib & TSI148_LCSR_VEAT_VEOF) {
- printk(KERN_ERR "VME Bus Exception Overflow Occurred\n");
+ if (error_attrib & TSI148_LCSR_VEAT_VEOF) {
+ dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow "
+ "Occurred\n");
}
- error = (struct vme_bus_error *)kmalloc(sizeof (struct vme_bus_error),
- GFP_ATOMIC);
+ error = kmalloc(sizeof(struct vme_bus_error), GFP_ATOMIC);
if (error) {
error->address = error_addr;
error->attributes = error_attrib;
list_add_tail(&(error->list), &(tsi148_bridge->vme_errors));
} else {
- printk(KERN_ERR
- "Unable to alloc memory for VMEbus Error reporting\n");
- printk(KERN_ERR
- "VME Bus Error at address: 0x%llx, attributes: %08x\n",
- error_addr, error_attrib);
+ dev_err(tsi148_bridge->parent, "Unable to alloc memory for "
+ "VMEbus Error reporting\n");
+ dev_err(tsi148_bridge->parent, "VME Bus Error at address: "
+ "0x%llx, attributes: %08x\n", error_addr, error_attrib);
}
/* Clear Status */
@@ -244,10 +229,9 @@ static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
for (i = 7; i > 0; i--) {
if (stat & (1 << i)) {
/*
- * Note: Even though the registers are defined
- * as 32-bits in the spec, we only want to issue
- * 8-bit IACK cycles on the bus, read from offset
- * 3.
+ * Note: Even though the registers are defined as
+ * 32-bits in the spec, we only want to issue 8-bit
+ * IACK cycles on the bus, read from offset 3.
*/
vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
@@ -281,9 +265,8 @@ static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
/* Only look at unmasked interrupts */
stat &= enable;
- if (unlikely(!stat)) {
+ if (unlikely(!stat))
return IRQ_NONE;
- }
/* Call subhandlers as appropriate */
/* DMA irqs */
@@ -298,11 +281,11 @@ static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
/* Mail box irqs */
if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
- serviced |= tsi148_MB_irqhandler(bridge, stat);
+ serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat);
/* PCI bus error */
if (stat & TSI148_LCSR_INTS_PERRS)
- serviced |= tsi148_PERR_irqhandler(bridge);
+ serviced |= tsi148_PERR_irqhandler(tsi148_bridge);
/* VME bus error */
if (stat & TSI148_LCSR_INTS_VERRS)
@@ -346,8 +329,8 @@ static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
IRQF_SHARED,
driver_name, tsi148_bridge);
if (result) {
- dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
- pdev->irq);
+ dev_err(tsi148_bridge->parent, "Can't get assigned pci irq "
+ "vector %02X\n", pdev->irq);
return result;
}
@@ -515,7 +498,9 @@ static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
/* Iterate through errors */
list_for_each(err_pos, &(tsi148_bridge->vme_errors)) {
vme_err = list_entry(err_pos, struct vme_bus_error, list);
- if((vme_err->address >= address) && (vme_err->address < bound)){
+ if ((vme_err->address >= address) &&
+ (vme_err->address < bound)) {
+
valid = vme_err;
break;
}
@@ -548,7 +533,9 @@ static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
list_for_each_safe(err_pos, temp, &(tsi148_bridge->vme_errors)) {
vme_err = list_entry(err_pos, struct vme_bus_error, list);
- if((vme_err->address >= address) && (vme_err->address < bound)){
+ if ((vme_err->address >= address) &&
+ (vme_err->address < bound)) {
+
list_del(err_pos);
kfree(vme_err);
}
@@ -568,9 +555,11 @@ int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
unsigned int vme_bound_low, vme_bound_high;
unsigned int pci_offset_low, pci_offset_high;
unsigned long long vme_bound, pci_offset;
+ struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
- bridge = image->parent->driver_priv;
+ tsi148_bridge = image->parent;
+ bridge = tsi148_bridge->driver_priv;
i = image->number;
@@ -597,7 +586,7 @@ int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
case VME_USER3:
case VME_USER4:
default:
- printk("Invalid address space\n");
+ dev_err(tsi148_bridge->parent, "Invalid address space\n");
return -EINVAL;
break;
}
@@ -615,15 +604,16 @@ int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
if (vme_base_low & (granularity - 1)) {
- printk("Invalid VME base alignment\n");
+ dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n");
return -EINVAL;
}
if (vme_bound_low & (granularity - 1)) {
- printk("Invalid VME bound alignment\n");
+ dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n");
return -EINVAL;
}
if (pci_offset_low & (granularity - 1)) {
- printk("Invalid PCI Offset alignment\n");
+ dev_err(tsi148_bridge->parent, "Invalid PCI Offset "
+ "alignment\n");
return -EINVAL;
}
@@ -815,12 +805,7 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
tsi148_bridge = image->parent;
- /* Find pci_dev container of dev */
- if (tsi148_bridge->parent == NULL) {
- printk("Dev entry NULL\n");
- return -EINVAL;
- }
- pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
+ pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
existing_size = (unsigned long long)(image->bus_resource.end -
image->bus_resource.start);
@@ -839,15 +824,14 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
}
/* Exit here if size is zero */
- if (size == 0) {
+ if (size == 0)
return 0;
- }
if (image->bus_resource.name == NULL) {
image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_KERNEL);
if (image->bus_resource.name == NULL) {
- printk(KERN_ERR "Unable to allocate memory for resource"
- " name\n");
+ dev_err(tsi148_bridge->parent, "Unable to allocate "
+ "memory for resource name\n");
retval = -ENOMEM;
goto err_name;
}
@@ -864,8 +848,8 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
&(image->bus_resource), size, size, PCIBIOS_MIN_MEM,
0, NULL, NULL);
if (retval) {
- printk(KERN_ERR "Failed to allocate mem resource for "
- "window %d size 0x%lx start 0x%lx\n",
+ dev_err(tsi148_bridge->parent, "Failed to allocate mem "
+ "resource for window %d size 0x%lx start 0x%lx\n",
image->number, (unsigned long)size,
(unsigned long)image->bus_resource.start);
goto err_resource;
@@ -874,7 +858,7 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
image->kern_base = ioremap_nocache(
image->bus_resource.start, size);
if (image->kern_base == NULL) {
- printk(KERN_ERR "Failed to remap resource\n");
+ dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
retval = -ENOMEM;
goto err_remap;
}
@@ -907,7 +891,7 @@ static void tsi148_free_resource(struct vme_master_resource *image)
/*
* Set the attributes of an outbound window.
*/
-int tsi148_master_set( struct vme_master_resource *image, int enabled,
+int tsi148_master_set(struct vme_master_resource *image, int enabled,
unsigned long long vme_base, unsigned long long size,
vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
{
@@ -918,19 +902,24 @@ int tsi148_master_set( struct vme_master_resource *image, int enabled,
unsigned int pci_bound_low, pci_bound_high;
unsigned int vme_offset_low, vme_offset_high;
unsigned long long pci_bound, vme_offset, pci_base;
+ struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
- bridge = image->parent->driver_priv;
+ tsi148_bridge = image->parent;
+
+ bridge = tsi148_bridge->driver_priv;
/* Verify input data */
if (vme_base & 0xFFFF) {
- printk(KERN_ERR "Invalid VME Window alignment\n");
+ dev_err(tsi148_bridge->parent, "Invalid VME Window "
+ "alignment\n");
retval = -EINVAL;
goto err_window;
}
if ((size == 0) && (enabled != 0)) {
- printk(KERN_ERR "Size must be non-zero for enabled windows\n");
+ dev_err(tsi148_bridge->parent, "Size must be non-zero for "
+ "enabled windows\n");
retval = -EINVAL;
goto err_window;
}
@@ -944,7 +933,7 @@ int tsi148_master_set( struct vme_master_resource *image, int enabled,
retval = tsi148_alloc_resource(image, size);
if (retval) {
spin_unlock(&(image->lock));
- printk(KERN_ERR "Unable to allocate memory for "
+ dev_err(tsi148_bridge->parent, "Unable to allocate memory for "
"resource\n");
goto err_res;
}
@@ -971,19 +960,20 @@ int tsi148_master_set( struct vme_master_resource *image, int enabled,
if (pci_base_low & 0xFFFF) {
spin_unlock(&(image->lock));
- printk(KERN_ERR "Invalid PCI base alignment\n");
+ dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
retval = -EINVAL;
goto err_gran;
}
if (pci_bound_low & 0xFFFF) {
spin_unlock(&(image->lock));
- printk(KERN_ERR "Invalid PCI bound alignment\n");
+ dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
retval = -EINVAL;
goto err_gran;
}
if (vme_offset_low & 0xFFFF) {
spin_unlock(&(image->lock));
- printk(KERN_ERR "Invalid VME Offset alignment\n");
+ dev_err(tsi148_bridge->parent, "Invalid VME Offset "
+ "alignment\n");
retval = -EINVAL;
goto err_gran;
}
@@ -1029,8 +1019,8 @@ int tsi148_master_set( struct vme_master_resource *image, int enabled,
temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
}
if (cycle & VME_2eSSTB) {
- printk(KERN_WARNING "Currently not setting Broadcast Select "
- "Registers\n");
+ dev_warn(tsi148_bridge->parent, "Currently not setting "
+ "Broadcast Select Registers\n");
temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
}
@@ -1046,7 +1036,7 @@ int tsi148_master_set( struct vme_master_resource *image, int enabled,
break;
default:
spin_unlock(&(image->lock));
- printk(KERN_ERR "Invalid data width\n");
+ dev_err(tsi148_bridge->parent, "Invalid data width\n");
retval = -EINVAL;
goto err_dwidth;
}
@@ -1083,7 +1073,7 @@ int tsi148_master_set( struct vme_master_resource *image, int enabled,
break;
default:
spin_unlock(&(image->lock));
- printk(KERN_ERR "Invalid address space\n");
+ dev_err(tsi148_bridge->parent, "Invalid address space\n");
retval = -EINVAL;
goto err_aspace;
break;
@@ -1137,7 +1127,7 @@ err_window:
*
* XXX Not parsing prefetch information.
*/
-int __tsi148_master_get( struct vme_master_resource *image, int *enabled,
+int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
{
@@ -1214,17 +1204,17 @@ int __tsi148_master_get( struct vme_master_resource *image, int *enabled,
*cycle |= VME_2eSST320;
/* Setup cycle types */
- if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_SCT)
+ if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT)
*cycle |= VME_SCT;
- if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_BLT)
+ if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT)
*cycle |= VME_BLT;
- if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_MBLT)
+ if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT)
*cycle |= VME_MBLT;
- if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_2eVME)
+ if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME)
*cycle |= VME_2eVME;
- if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_2eSST)
+ if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST)
*cycle |= VME_2eSST;
- if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_2eSSTB)
+ if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB)
*cycle |= VME_2eSSTB;
if (ctl & TSI148_LCSR_OTAT_SUP)
@@ -1247,7 +1237,7 @@ int __tsi148_master_get( struct vme_master_resource *image, int *enabled,
}
-int tsi148_master_get( struct vme_master_resource *image, int *enabled,
+int tsi148_master_get(struct vme_master_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
{
@@ -1289,7 +1279,7 @@ ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
count);
- if(vme_err != NULL) {
+ if (vme_err != NULL) {
dev_err(image->parent->parent, "First VME read error detected "
"an at address 0x%llx\n", vme_err->address);
retval = vme_err->address - (vme_base + offset);
@@ -1352,9 +1342,9 @@ ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
count);
- if(vme_err != NULL) {
- printk("First VME write error detected an at address 0x%llx\n",
- vme_err->address);
+ if (vme_err != NULL) {
+ dev_warn(tsi148_bridge->parent, "First VME write error detected"
+ " an at address 0x%llx\n", vme_err->address);
retval = vme_err->address - (vme_base + offset);
/* Clear down save errors in this address range */
tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
@@ -1428,8 +1418,8 @@ unsigned int tsi148_master_rmw(struct vme_master_resource *image,
return result;
}
-static int tsi148_dma_set_vme_src_attributes (u32 *attr, vme_address_t aspace,
- vme_cycle_t cycle, vme_width_t dwidth)
+static int tsi148_dma_set_vme_src_attributes(struct device *dev, u32 *attr,
+ vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
{
/* Setup 2eSST speeds */
switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
@@ -1445,23 +1435,24 @@ static int tsi148_dma_set_vme_src_attributes (u32 *attr, vme_address_t aspace,
}
/* Setup cycle types */
- if (cycle & VME_SCT) {
+ if (cycle & VME_SCT)
*attr |= TSI148_LCSR_DSAT_TM_SCT;
- }
- if (cycle & VME_BLT) {
+
+ if (cycle & VME_BLT)
*attr |= TSI148_LCSR_DSAT_TM_BLT;
- }
- if (cycle & VME_MBLT) {
+
+ if (cycle & VME_MBLT)
*attr |= TSI148_LCSR_DSAT_TM_MBLT;
- }
- if (cycle & VME_2eVME) {
+
+ if (cycle & VME_2eVME)
*attr |= TSI148_LCSR_DSAT_TM_2eVME;
- }
- if (cycle & VME_2eSST) {
+
+ if (cycle & VME_2eSST)
*attr |= TSI148_LCSR_DSAT_TM_2eSST;
- }
+
if (cycle & VME_2eSSTB) {
- printk("Currently not setting Broadcast Select Registers\n");
+ dev_err(dev, "Currently not setting Broadcast Select "
+ "Registers\n");
*attr |= TSI148_LCSR_DSAT_TM_2eSSTB;
}
@@ -1474,7 +1465,7 @@ static int tsi148_dma_set_vme_src_attributes (u32 *attr, vme_address_t aspace,
*attr |= TSI148_LCSR_DSAT_DBW_32;
break;
default:
- printk("Invalid data width\n");
+ dev_err(dev, "Invalid data width\n");
return -EINVAL;
}
@@ -1508,7 +1499,7 @@ static int tsi148_dma_set_vme_src_attributes (u32 *attr, vme_address_t aspace,
*attr |= TSI148_LCSR_DSAT_AMODE_USER4;
break;
default:
- printk("Invalid address space\n");
+ dev_err(dev, "Invalid address space\n");
return -EINVAL;
break;
}
@@ -1521,8 +1512,8 @@ static int tsi148_dma_set_vme_src_attributes (u32 *attr, vme_address_t aspace,
return 0;
}
-static int tsi148_dma_set_vme_dest_attributes(u32 *attr, vme_address_t aspace,
- vme_cycle_t cycle, vme_width_t dwidth)
+static int tsi148_dma_set_vme_dest_attributes(struct device *dev, u32 *attr,
+ vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
{
/* Setup 2eSST speeds */
switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
@@ -1538,23 +1529,24 @@ static int tsi148_dma_set_vme_dest_attributes(u32 *attr, vme_address_t aspace,
}
/* Setup cycle types */
- if (cycle & VME_SCT) {
+ if (cycle & VME_SCT)
*attr |= TSI148_LCSR_DDAT_TM_SCT;
- }
- if (cycle & VME_BLT) {
+
+ if (cycle & VME_BLT)
*attr |= TSI148_LCSR_DDAT_TM_BLT;
- }
- if (cycle & VME_MBLT) {
+
+ if (cycle & VME_MBLT)
*attr |= TSI148_LCSR_DDAT_TM_MBLT;
- }
- if (cycle & VME_2eVME) {
+
+ if (cycle & VME_2eVME)
*attr |= TSI148_LCSR_DDAT_TM_2eVME;
- }
- if (cycle & VME_2eSST) {
+
+ if (cycle & VME_2eSST)
*attr |= TSI148_LCSR_DDAT_TM_2eSST;
- }
+
if (cycle & VME_2eSSTB) {
- printk("Currently not setting Broadcast Select Registers\n");
+ dev_err(dev, "Currently not setting Broadcast Select "
+ "Registers\n");
*attr |= TSI148_LCSR_DDAT_TM_2eSSTB;
}
@@ -1567,7 +1559,7 @@ static int tsi148_dma_set_vme_dest_attributes(u32 *attr, vme_address_t aspace,
*attr |= TSI148_LCSR_DDAT_DBW_32;
break;
default:
- printk("Invalid data width\n");
+ dev_err(dev, "Invalid data width\n");
return -EINVAL;
}
@@ -1601,7 +1593,7 @@ static int tsi148_dma_set_vme_dest_attributes(u32 *attr, vme_address_t aspace,
*attr |= TSI148_LCSR_DDAT_AMODE_USER4;
break;
default:
- printk("Invalid address space\n");
+ dev_err(dev, "Invalid address space\n");
return -EINVAL;
break;
}
@@ -1617,7 +1609,7 @@ static int tsi148_dma_set_vme_dest_attributes(u32 *attr, vme_address_t aspace,
/*
* Add a link list descriptor to the list
*/
-int tsi148_dma_list_add (struct vme_dma_list *list, struct vme_dma_attr *src,
+int tsi148_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
struct vme_dma_attr *dest, size_t count)
{
struct tsi148_dma_entry *entry, *prev;
@@ -1627,21 +1619,24 @@ int tsi148_dma_list_add (struct vme_dma_list *list, struct vme_dma_attr *src,
struct vme_dma_vme *vme_attr;
dma_addr_t desc_ptr;
int retval = 0;
+ struct vme_bridge *tsi148_bridge;
+
+ tsi148_bridge = list->parent->parent;
/* Descriptor must be aligned on 64-bit boundaries */
- entry = (struct tsi148_dma_entry *)kmalloc(
- sizeof(struct tsi148_dma_entry), GFP_KERNEL);
+ entry = kmalloc(sizeof(struct tsi148_dma_entry), GFP_KERNEL);
if (entry == NULL) {
- printk("Failed to allocate memory for dma resource "
- "structure\n");
+ dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
+ "dma resource structure\n");
retval = -ENOMEM;
goto err_mem;
}
/* Test descriptor alignment */
if ((unsigned long)&(entry->descriptor) & 0x7) {
- printk("Descriptor not aligned to 8 byte boundary as "
- "required: %p\n", &(entry->descriptor));
+ dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 "
+ "byte boundary as required: %p\n",
+ &(entry->descriptor));
retval = -EINVAL;
goto err_align;
}
@@ -1659,13 +1654,13 @@ int tsi148_dma_list_add (struct vme_dma_list *list, struct vme_dma_attr *src,
entry->descriptor.dsal = pattern_attr->pattern;
entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PAT;
/* Default behaviour is 32 bit pattern */
- if (pattern_attr->type & VME_DMA_PATTERN_BYTE) {
+ if (pattern_attr->type & VME_DMA_PATTERN_BYTE)
entry->descriptor.dsat |= TSI148_LCSR_DSAT_PSZ;
- }
+
/* It seems that the default behaviour is to increment */
- if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0) {
+ if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0)
entry->descriptor.dsat |= TSI148_LCSR_DSAT_NIN;
- }
+
break;
case VME_DMA_PCI:
pci_attr = (struct vme_dma_pci *)src->private;
@@ -1686,13 +1681,13 @@ int tsi148_dma_list_add (struct vme_dma_list *list, struct vme_dma_attr *src,
entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_VME;
retval = tsi148_dma_set_vme_src_attributes(
- &(entry->descriptor.dsat), vme_attr->aspace,
- vme_attr->cycle, vme_attr->dwidth);
- if(retval < 0 )
+ tsi148_bridge->parent, &(entry->descriptor.dsat),
+ vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
+ if (retval < 0)
goto err_source;
break;
default:
- printk("Invalid source type\n");
+ dev_err(tsi148_bridge->parent, "Invalid source type\n");
retval = -EINVAL;
goto err_source;
break;
@@ -1724,13 +1719,13 @@ int tsi148_dma_list_add (struct vme_dma_list *list, struct vme_dma_attr *src,
entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_VME;
retval = tsi148_dma_set_vme_dest_attributes(
- &(entry->descriptor.ddat), vme_attr->aspace,
- vme_attr->cycle, vme_attr->dwidth);
- if(retval < 0 )
+ tsi148_bridge->parent, &(entry->descriptor.ddat),
+ vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
+ if (retval < 0)
goto err_dest;
break;
default:
- printk("Invalid destination type\n");
+ dev_err(tsi148_bridge->parent, "Invalid destination type\n");
retval = -EINVAL;
goto err_dest;
break;
@@ -1743,7 +1738,7 @@ int tsi148_dma_list_add (struct vme_dma_list *list, struct vme_dma_attr *src,
list_add_tail(&(entry->list), &(list->entries));
/* Fill out previous descriptors "Next Address" */
- if(entry->list.prev != &(list->entries)){
+ if (entry->list.prev != &(list->entries)) {
prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
list);
/* We need the bus address for the pointer */
@@ -1795,17 +1790,20 @@ int tsi148_dma_list_exec(struct vme_dma_list *list)
dma_addr_t bus_addr;
u32 bus_addr_high, bus_addr_low;
u32 val, dctlreg = 0;
+ struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
ctrlr = list->parent;
- bridge = ctrlr->parent->driver_priv;
+ tsi148_bridge = ctrlr->parent;
+
+ bridge = tsi148_bridge->driver_priv;
mutex_lock(&(ctrlr->mtx));
channel = ctrlr->number;
- if (! list_empty(&(ctrlr->running))) {
+ if (!list_empty(&(ctrlr->running))) {
/*
* XXX We have an active DMA transfer and currently haven't
* sorted out the mechanism for "pending" DMA transfers.
@@ -1847,7 +1845,7 @@ int tsi148_dma_list_exec(struct vme_dma_list *list)
TSI148_LCSR_OFFSET_DSTA);
if (val & TSI148_LCSR_DSTA_VBE) {
- printk(KERN_ERR "tsi148: DMA Error. DSTA=%08X\n", val);
+ dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val);
retval = -EIO;
}
@@ -1867,7 +1865,7 @@ int tsi148_dma_list_exec(struct vme_dma_list *list)
int tsi148_dma_list_empty(struct vme_dma_list *list)
{
struct list_head *pos, *temp;
- struct tsi148_dma_entry *entry;
+ struct tsi148_dma_entry *entry;
/* detach and free each entry */
list_for_each_safe(pos, temp, &(list->entries)) {
@@ -1876,7 +1874,7 @@ int tsi148_dma_list_empty(struct vme_dma_list *list)
kfree(entry);
}
- return (0);
+ return 0;
}
/*
@@ -1891,9 +1889,12 @@ int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
{
u32 lm_base_high, lm_base_low, lm_ctl = 0;
int i;
+ struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
- bridge = lm->parent->driver_priv;
+ tsi148_bridge = lm->parent;
+
+ bridge = tsi148_bridge->driver_priv;
mutex_lock(&(lm->mtx));
@@ -1901,8 +1902,8 @@ int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
for (i = 0; i < lm->monitors; i++) {
if (bridge->lm_callback[i] != NULL) {
mutex_unlock(&(lm->mtx));
- printk("Location monitor callback attached, can't "
- "reset\n");
+ dev_err(tsi148_bridge->parent, "Location monitor "
+ "callback attached, can't reset\n");
return -EBUSY;
}
}
@@ -1922,7 +1923,7 @@ int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
break;
default:
mutex_unlock(&(lm->mtx));
- printk("Invalid address space\n");
+ dev_err(tsi148_bridge->parent, "Invalid address space\n");
return -EINVAL;
break;
}
@@ -1969,18 +1970,18 @@ int tsi148_lm_get(struct vme_lm_resource *lm, unsigned long long *lm_base,
if (lm_ctl & TSI148_LCSR_LMAT_EN)
enabled = 1;
- if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16) {
+ if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16)
*aspace |= VME_A16;
- }
- if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24) {
+
+ if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24)
*aspace |= VME_A24;
- }
- if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32) {
+
+ if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32)
*aspace |= VME_A32;
- }
- if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64) {
+
+ if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64)
*aspace |= VME_A64;
- }
+
if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
*cycle |= VME_SUPER;
@@ -2005,9 +2006,12 @@ int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
void (*callback)(int))
{
u32 lm_ctl, tmp;
+ struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
- bridge = lm->parent->driver_priv;
+ tsi148_bridge = lm->parent;
+
+ bridge = tsi148_bridge->driver_priv;
mutex_lock(&(lm->mtx));
@@ -2015,14 +2019,15 @@ int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
mutex_unlock(&(lm->mtx));
- printk("Location monitor not properly configured\n");
+ dev_err(tsi148_bridge->parent, "Location monitor not properly "
+ "configured\n");
return -EINVAL;
}
/* Check that a callback isn't already attached */
if (bridge->lm_callback[monitor] != NULL) {
mutex_unlock(&(lm->mtx));
- printk("Existing callback attached\n");
+ dev_err(tsi148_bridge->parent, "Existing callback attached\n");
return -EBUSY;
}
@@ -2094,7 +2099,7 @@ int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
*/
int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
{
- u32 slot = 0;
+ u32 slot = 0;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
@@ -2139,8 +2144,8 @@ static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
&(bridge->crcsr_bus));
if (bridge->crcsr_kernel == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
- "image\n");
+ dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
+ "CR/CSR image\n");
return -ENOMEM;
}
@@ -2159,29 +2164,30 @@ static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
if (cbar != vstat) {
cbar = vstat;
- dev_info(&pdev->dev, "Setting CR/CSR offset\n");
+ dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n");
iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
}
- dev_info(&pdev->dev, "CR/CSR Offset: %d\n", cbar);
+ dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar);
crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
if (crat & TSI148_LCSR_CRAT_EN) {
- dev_info(&pdev->dev, "Enabling CR/CSR space\n");
+ dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
iowrite32be(crat | TSI148_LCSR_CRAT_EN,
bridge->base + TSI148_LCSR_CRAT);
} else
- dev_info(&pdev->dev, "CR/CSR already enabled\n");
+ dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
/* If we want flushed, error-checked writes, set up a window
* over the CR/CSR registers. We read from here to safely flush
* through VME writes.
*/
- if(err_chk) {
+ if (err_chk) {
retval = tsi148_master_set(bridge->flush_image, 1,
(vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
VME_D16);
if (retval)
- dev_err(&pdev->dev, "Configuring flush image failed\n");
+ dev_err(tsi148_bridge->parent, "Configuring flush image"
+ " failed\n");
}
return 0;
@@ -2224,8 +2230,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* If we want to support more than one of each bridge, we need to
* dynamically generate this so we get one per device
*/
- tsi148_bridge = (struct vme_bridge *)kmalloc(sizeof(struct vme_bridge),
- GFP_KERNEL);
+ tsi148_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
if (tsi148_bridge == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for device "
"structure\n");
@@ -2233,9 +2238,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_struct;
}
- memset(tsi148_bridge, 0, sizeof(struct vme_bridge));
-
- tsi148_device = kmalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
+ tsi148_device = kzalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
if (tsi148_device == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for device "
"structure\n");
@@ -2243,8 +2246,6 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_driver;
}
- memset(tsi148_device, 0, sizeof(struct tsi148_driver));
-
tsi148_bridge->driver_priv = tsi148_device;
/* Enable the device */
@@ -2301,10 +2302,10 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* hence have one less master window resource available.
*/
master_num = TSI148_MAX_MASTER;
- if(err_chk){
+ if (err_chk) {
master_num--;
- tsi148_device->flush_image = (struct vme_master_resource *)
+ tsi148_device->flush_image =
kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL);
if (tsi148_device->flush_image == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
@@ -2331,8 +2332,8 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Add master windows to list */
INIT_LIST_HEAD(&(tsi148_bridge->master_resources));
for (i = 0; i < master_num; i++) {
- master_image = (struct vme_master_resource *)kmalloc(
- sizeof(struct vme_master_resource), GFP_KERNEL);
+ master_image = kmalloc(sizeof(struct vme_master_resource),
+ GFP_KERNEL);
if (master_image == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"master resource structure\n");
@@ -2360,8 +2361,8 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Add slave windows to list */
INIT_LIST_HEAD(&(tsi148_bridge->slave_resources));
for (i = 0; i < TSI148_MAX_SLAVE; i++) {
- slave_image = (struct vme_slave_resource *)kmalloc(
- sizeof(struct vme_slave_resource), GFP_KERNEL);
+ slave_image = kmalloc(sizeof(struct vme_slave_resource),
+ GFP_KERNEL);
if (slave_image == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"slave resource structure\n");
@@ -2386,8 +2387,8 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Add dma engines to list */
INIT_LIST_HEAD(&(tsi148_bridge->dma_resources));
for (i = 0; i < TSI148_MAX_DMA; i++) {
- dma_ctrlr = (struct vme_dma_resource *)kmalloc(
- sizeof(struct vme_dma_resource), GFP_KERNEL);
+ dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
+ GFP_KERNEL);
if (dma_ctrlr == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"dma resource structure\n");
@@ -2444,7 +2445,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
dev_info(&pdev->dev, "Board is%s the VME system controller\n",
- (data & TSI148_LCSR_VSTAT_SCONS)? "" : " not");
+ (data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
if (!geoid)
dev_info(&pdev->dev, "VME geographical address is %d\n",
data & TSI148_LCSR_VSTAT_GA_M);
@@ -2504,7 +2505,8 @@ err_slave:
err_master:
/* resources are stored in link list */
list_for_each(pos, &(tsi148_bridge->master_resources)) {
- master_image = list_entry(pos, struct vme_master_resource, list);
+ master_image = list_entry(pos, struct vme_master_resource,
+ list);
list_del(pos);
kfree(master_image);
}
@@ -2624,8 +2626,6 @@ static void tsi148_remove(struct pci_dev *pdev)
static void __exit tsi148_exit(void)
{
pci_unregister_driver(&tsi148_driver);
-
- printk(KERN_DEBUG "Driver removed.\n");
}
MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
diff --git a/drivers/staging/vme/bridges/vme_tsi148.h b/drivers/staging/vme/bridges/vme_tsi148.h
index 9e5f7fa1d74..bda64ef8575 100644
--- a/drivers/staging/vme/bridges/vme_tsi148.h
+++ b/drivers/staging/vme/bridges/vme_tsi148.h
@@ -579,7 +579,7 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
/*
* Memory Base Address Lower Reg (CRG + $010)
*/
-#define TSI148_PCFS_MBARL_BASEL_M (0xFFFFF<<12) /* Base Addr Lower Mask */
+#define TSI148_PCFS_MBARL_BASEL_M (0xFFFFF<<12) /* Base Addr Lower Mask */
#define TSI148_PCFS_MBARL_PRE (1<<3) /* Prefetch */
#define TSI148_PCFS_MBARL_MTYPE_M (3<<1) /* Memory Type Mask */
#define TSI148_PCFS_MBARL_IOMEM (1<<0) /* I/O Space Indicator */
@@ -615,7 +615,8 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
*/
#define TSI148_PCFS_PCIXSTAT_RSCEM (1<<29) /* Recieved Split Comp Error */
#define TSI148_PCFS_PCIXSTAT_DMCRS_M (7<<26) /* max Cumulative Read Size */
-#define TSI148_PCFS_PCIXSTAT_DMOST_M (7<<23) /* max outstanding Split Trans */
+#define TSI148_PCFS_PCIXSTAT_DMOST_M (7<<23) /* max outstanding Split Trans
+ */
#define TSI148_PCFS_PCIXSTAT_DMMRC_M (3<<21) /* max mem read byte count */
#define TSI148_PCFS_PCIXSTAT_DC (1<<20) /* Device Complexity */
#define TSI148_PCFS_PCIXSTAT_USC (1<<19) /* Unexpected Split comp */
@@ -703,7 +704,8 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
#define TSI148_LCSR_VMCTRL_RMWEN (1<<20) /* RMW Enable */
-#define TSI148_LCSR_VMCTRL_ATO_M (7<<16) /* Master Access Time-out Mask */
+#define TSI148_LCSR_VMCTRL_ATO_M (7<<16) /* Master Access Time-out Mask
+ */
#define TSI148_LCSR_VMCTRL_ATO_32 (0<<16) /* 32 us */
#define TSI148_LCSR_VMCTRL_ATO_128 (1<<16) /* 128 us */
#define TSI148_LCSR_VMCTRL_ATO_512 (2<<16) /* 512 us */
@@ -733,14 +735,16 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
#define TSI148_LCSR_VMCTRL_VTON_256 (6<<8) /* 256us */
#define TSI148_LCSR_VMCTRL_VTON_512 (7<<8) /* 512us */
-#define TSI148_LCSR_VMCTRL_VREL_M (3<<3) /* VMEbus Master Rel Mode Mask */
+#define TSI148_LCSR_VMCTRL_VREL_M (3<<3) /* VMEbus Master Rel Mode Mask
+ */
#define TSI148_LCSR_VMCTRL_VREL_T_D (0<<3) /* Time on or Done */
#define TSI148_LCSR_VMCTRL_VREL_T_R_D (1<<3) /* Time on and REQ or Done */
#define TSI148_LCSR_VMCTRL_VREL_T_B_D (2<<3) /* Time on and BCLR or Done */
#define TSI148_LCSR_VMCTRL_VREL_T_D_R (3<<3) /* Time on or Done and REQ */
#define TSI148_LCSR_VMCTRL_VFAIR (1<<2) /* VMEbus Master Fair Mode */
-#define TSI148_LCSR_VMCTRL_VREQL_M (3<<0) /* VMEbus Master Req Level Mask */
+#define TSI148_LCSR_VMCTRL_VREQL_M (3<<0) /* VMEbus Master Req Level Mask
+ */
/*
* VMEbus Control Register CRG+$238
@@ -762,7 +766,8 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
#define TSI148_LCSR_VCTRL_DLT_16384 (0xB<<24) /* 16384 VCLKS */
#define TSI148_LCSR_VCTRL_DLT_32768 (0xC<<24) /* 32768 VCLKS */
-#define TSI148_LCSR_VCTRL_NERBB (1<<20) /* No Early Release of Bus Busy */
+#define TSI148_LCSR_VCTRL_NERBB (1<<20) /* No Early Release of Bus Busy
+ */
#define TSI148_LCSR_VCTRL_SRESET (1<<17) /* System Reset */
#define TSI148_LCSR_VCTRL_LRESET (1<<16) /* Local Reset */
@@ -773,7 +778,8 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
#define TSI148_LCSR_VCTRL_ATOEN (1<<7) /* Arbiter Time-out Enable */
#define TSI148_LCSR_VCTRL_ROBIN (1<<6) /* VMEbus Round Robin */
-#define TSI148_LCSR_VCTRL_GTO_M (7<<0) /* VMEbus Global Time-out Mask */
+#define TSI148_LCSR_VCTRL_GTO_M (7<<0) /* VMEbus Global Time-out Mask
+ */
#define TSI148_LCSR_VCTRL_GTO_8 (0<<0) /* 8 us */
#define TSI148_LCSR_VCTRL_GTO_16 (1<<0) /* 16 us */
#define TSI148_LCSR_VCTRL_GTO_32 (2<<0) /* 32 us */
@@ -794,7 +800,7 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
#define TSI148_LCSR_VSTAT_ACFAILS (1<<9) /* AC fail status */
#define TSI148_LCSR_VSTAT_SCONS (1<<8) /* System Cont Status */
#define TSI148_LCSR_VSTAT_GAP (1<<5) /* Geographic Addr Parity */
-#define TSI148_LCSR_VSTAT_GA_M (0x1F<<0) /* Geographic Addr Mask */
+#define TSI148_LCSR_VSTAT_GA_M (0x1F<<0) /* Geographic Addr Mask */
/*
* PCI Configuration Status Register CRG+$240
@@ -1341,7 +1347,7 @@ static const int TSI148_LCSR_INTC_MBC[4] = { TSI148_LCSR_INTC_MB0C,
* DMA Next Link Address Lower
*/
#define TSI148_LCSR_DNLAL_DNLAL_M (0x3FFFFFF<<6) /* Address Mask */
-#define TSI148_LCSR_DNLAL_LLA (1<<0) /* Last Link Address Indicator */
+#define TSI148_LCSR_DNLAL_LLA (1<<0) /* Last Link Address Indicator */
/*
* DMA 2eSST Broadcast Select
@@ -1371,7 +1377,7 @@ static const int TSI148_LCSR_INTC_MBC[4] = { TSI148_LCSR_INTC_MB0C,
#define TSI148_GCSR_GCTRL_MBI0S (1<<0) /* Mail box 0 Int Status */
#define TSI148_GCSR_GAP (1<<5) /* Geographic Addr Parity */
-#define TSI148_GCSR_GA_M (0x1F<<0) /* Geographic Address Mask */
+#define TSI148_GCSR_GA_M (0x1F<<0) /* Geographic Address Mask */
/*
* CR/CSR Register Group
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index 1ab9a985dfb..bc16fc070fd 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -31,6 +31,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/syscalls.h>
+#include <linux/smp_lock.h>
#include <linux/types.h>
#include <asm/io.h>
@@ -130,8 +131,7 @@ static int vme_user_release(struct inode *, struct file *);
static ssize_t vme_user_read(struct file *, char *, size_t, loff_t *);
static ssize_t vme_user_write(struct file *, const char *, size_t, loff_t *);
static loff_t vme_user_llseek(struct file *, loff_t, int);
-static int vme_user_ioctl(struct inode *, struct file *, unsigned int,
- unsigned long);
+static long vme_user_unlocked_ioctl(struct file *, unsigned int, unsigned long);
static int __init vme_user_probe(struct device *, int, int);
static int __exit vme_user_remove(struct device *, int, int);
@@ -142,7 +142,7 @@ static struct file_operations vme_user_fops = {
.read = vme_user_read,
.write = vme_user_write,
.llseek = vme_user_llseek,
- .ioctl = vme_user_ioctl,
+ .unlocked_ioctl = vme_user_unlocked_ioctl,
};
@@ -555,6 +555,18 @@ static int vme_user_ioctl(struct inode *inode, struct file *file,
return -EINVAL;
}
+static long
+vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = vme_user_ioctl(file->f_path.dentry->d_inode, file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
/*
* Unallocate a previously allocated buffer
diff --git a/drivers/staging/vme/vme.c b/drivers/staging/vme/vme.c
index 934283a19ca..093fbffbf55 100644
--- a/drivers/staging/vme/vme.c
+++ b/drivers/staging/vme/vme.c
@@ -36,7 +36,7 @@
/* Bitmask and mutex to keep track of bridge numbers */
static unsigned int vme_bus_numbers;
-DEFINE_MUTEX(vme_bus_num_mtx);
+static DEFINE_MUTEX(vme_bus_num_mtx);
static void __exit vme_exit(void);
static int __init vme_init(void);
@@ -1408,7 +1408,7 @@ EXPORT_SYMBOL(vme_unregister_driver);
/* - Bus Registration ------------------------------------------------------ */
-int vme_calc_slot(struct device *dev)
+static int vme_calc_slot(struct device *dev)
{
struct vme_bridge *bridge;
int num;
diff --git a/drivers/staging/vt6655/80211hdr.h b/drivers/staging/vt6655/80211hdr.h
index e5cee6fd053..b7b170e19aa 100644
--- a/drivers/staging/vt6655/80211hdr.h
+++ b/drivers/staging/vt6655/80211hdr.h
@@ -34,7 +34,7 @@
#include "ttype.h"
/*--------------------- Export Definitions -------------------------*/
-// bit type
+/* bit type */
#define BIT0 0x00000001
#define BIT1 0x00000002
#define BIT2 0x00000004
@@ -80,7 +80,7 @@
#define WLAN_HDR_ADDR4_LEN 30
#define WLAN_IEHDR_LEN 2
#define WLAN_SSID_MAXLEN 32
-//#define WLAN_RATES_MAXLEN 255
+/*#define WLAN_RATES_MAXLEN 255*/
#define WLAN_RATES_MAXLEN 16
#define WLAN_RATES_MAXLEN_11B 4
#define WLAN_RSN_MAXLEN 32
@@ -106,7 +106,7 @@
#define WLAN_WEP40_KEYLEN 5
#define WLAN_WEP104_KEYLEN 13
#define WLAN_WEP232_KEYLEN 29
-//#define WLAN_WEPMAX_KEYLEN 29
+/*#define WLAN_WEPMAX_KEYLEN 29*/
#define WLAN_WEPMAX_KEYLEN 32
#define WLAN_CHALLENGE_IE_MAXLEN 255
#define WLAN_CHALLENGE_IE_LEN 130
@@ -115,7 +115,7 @@
#define WLAN_WEP_ICV_LEN 4
#define WLAN_FRAGS_MAX 16
-// Frame Type
+/* Frame Type */
#define WLAN_TYPE_MGR 0x00
#define WLAN_TYPE_CTL 0x01
#define WLAN_TYPE_DATA 0x02
@@ -125,7 +125,7 @@
#define WLAN_FTYPE_DATA 0x02
-// Frame Subtypes
+/* Frame Subtypes */
#define WLAN_FSTYPE_ASSOCREQ 0x00
#define WLAN_FSTYPE_ASSOCRESP 0x01
#define WLAN_FSTYPE_REASSOCREQ 0x02
@@ -139,7 +139,7 @@
#define WLAN_FSTYPE_DEAUTHEN 0x0c
#define WLAN_FSTYPE_ACTION 0x0d
-// Control
+/* Control */
#define WLAN_FSTYPE_PSPOLL 0x0a
#define WLAN_FSTYPE_RTS 0x0b
#define WLAN_FSTYPE_CTS 0x0c
@@ -147,7 +147,7 @@
#define WLAN_FSTYPE_CFEND 0x0e
#define WLAN_FSTYPE_CFENDCFACK 0x0f
-// Data
+/* Data */
#define WLAN_FSTYPE_DATAONLY 0x00
#define WLAN_FSTYPE_DATA_CFACK 0x01
#define WLAN_FSTYPE_DATA_CFPOLL 0x02
@@ -160,7 +160,7 @@
#ifdef __BIG_ENDIAN
-// GET & SET Frame Control bit
+/* GET & SET Frame Control bit */
#define WLAN_GET_FC_PRVER(n) ((((WORD)(n) >> 8) & (BIT0 | BIT1))
#define WLAN_GET_FC_FTYPE(n) ((((WORD)(n) >> 8) & (BIT2 | BIT3)) >> 2)
#define WLAN_GET_FC_FSTYPE(n) ((((WORD)(n) >> 8) & (BIT4|BIT5|BIT6|BIT7)) >> 4)
@@ -173,12 +173,12 @@
#define WLAN_GET_FC_ISWEP(n) ((((WORD)(n) << 8) & (BIT14)) >> 14)
#define WLAN_GET_FC_ORDER(n) ((((WORD)(n) << 8) & (BIT15)) >> 15)
-// Sequence Field bit
+/* Sequence Field bit */
#define WLAN_GET_SEQ_FRGNUM(n) (((WORD)(n) >> 8) & (BIT0|BIT1|BIT2|BIT3))
#define WLAN_GET_SEQ_SEQNUM(n) ((((WORD)(n) >> 8) & (~(BIT0|BIT1|BIT2|BIT3))) >> 4)
-// Capability Field bit
+/* Capability Field bit */
#define WLAN_GET_CAP_INFO_ESS(n) (((n) >> 8) & BIT0)
#define WLAN_GET_CAP_INFO_IBSS(n) ((((n) >> 8) & BIT1) >> 1)
#define WLAN_GET_CAP_INFO_CFPOLLABLE(n) ((((n) >> 8) & BIT2) >> 2)
@@ -195,7 +195,7 @@
#else
-// GET & SET Frame Control bit
+/* GET & SET Frame Control bit */
#define WLAN_GET_FC_PRVER(n) (((WORD)(n)) & (BIT0 | BIT1))
#define WLAN_GET_FC_FTYPE(n) ((((WORD)(n)) & (BIT2 | BIT3)) >> 2)
#define WLAN_GET_FC_FSTYPE(n) ((((WORD)(n)) & (BIT4|BIT5|BIT6|BIT7)) >> 4)
@@ -209,12 +209,12 @@
#define WLAN_GET_FC_ORDER(n) ((((WORD)(n)) & (BIT15)) >> 15)
-// Sequence Field bit
+/* Sequence Field bit */
#define WLAN_GET_SEQ_FRGNUM(n) (((WORD)(n)) & (BIT0|BIT1|BIT2|BIT3))
#define WLAN_GET_SEQ_SEQNUM(n) ((((WORD)(n)) & (~(BIT0|BIT1|BIT2|BIT3))) >> 4)
-// Capability Field bit
+/* Capability Field bit */
#define WLAN_GET_CAP_INFO_ESS(n) ((n) & BIT0)
#define WLAN_GET_CAP_INFO_IBSS(n) (((n) & BIT1) >> 1)
#define WLAN_GET_CAP_INFO_CFPOLLABLE(n) (((n) & BIT2) >> 2)
@@ -229,7 +229,7 @@
#define WLAN_GET_CAP_INFO_GRPACK(n) (((n) & BIT14) >> 14)
-#endif //#ifdef __BIG_ENDIAN
+#endif /*#ifdef __BIG_ENDIAN */
#define WLAN_SET_CAP_INFO_ESS(n) (n)
@@ -261,7 +261,7 @@
#define WLAN_SET_SEQ_FRGNUM(n) ((WORD)(n))
#define WLAN_SET_SEQ_SEQNUM(n) (((WORD)(n)) << 4)
-// ERP Field bit
+/* ERP Field bit */
#define WLAN_GET_ERP_NONERP_PRESENT(n) ((n) & BIT0)
#define WLAN_GET_ERP_USE_PROTECTION(n) (((n) & BIT1) >> 1)
@@ -273,19 +273,19 @@
-// Support & Basic Rates field
+/* Support & Basic Rates field */
#define WLAN_MGMT_IS_BASICRATE(b) ((b) & BIT7)
#define WLAN_MGMT_GET_RATE(b) ((b) & ~BIT7)
-// TIM field
+/* TIM field */
#define WLAN_MGMT_IS_MULTICAST_TIM(b) ((b) & BIT0)
#define WLAN_MGMT_GET_TIM_OFFSET(b) (((b) & ~BIT0) >> 1)
-// 3-Addr & 4-Addr
+/* 3-Addr & 4-Addr */
#define WLAN_HDR_A3_DATA_PTR(p) (((PBYTE)(p)) + WLAN_HDR_ADDR3_LEN)
#define WLAN_HDR_A4_DATA_PTR(p) (((PBYTE)(p)) + WLAN_HDR_ADDR4_LEN)
-// IEEE ADDR
+/* IEEE ADDR */
#define IEEE_ADDR_UNIVERSAL 0x02
#define IEEE_ADDR_GROUP 0x01
@@ -293,7 +293,7 @@ typedef struct {
BYTE abyAddr[6];
} IEEE_ADDR, *PIEEE_ADDR;
-// 802.11 Header Format
+/* 802.11 Header Format */
typedef struct tagWLAN_80211HDR_A2 {
@@ -302,7 +302,7 @@ typedef struct tagWLAN_80211HDR_A2 {
BYTE abyAddr1[WLAN_ADDR_LEN];
BYTE abyAddr2[WLAN_ADDR_LEN];
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
WLAN_80211HDR_A2, *PWLAN_80211HDR_A2;
typedef struct tagWLAN_80211HDR_A3 {
@@ -327,7 +327,7 @@ typedef struct tagWLAN_80211HDR_A4 {
WORD wSeqCtl;
BYTE abyAddr4[WLAN_ADDR_LEN];
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
WLAN_80211HDR_A4, *PWLAN_80211HDR_A4;
@@ -348,6 +348,6 @@ typedef union tagUWLAN_80211HDR {
-#endif // __80211HDR_H__
+#endif /* __80211HDR_H__ */
diff --git a/drivers/staging/vt6655/80211mgr.c b/drivers/staging/vt6655/80211mgr.c
index d309049370e..38697c86248 100644
--- a/drivers/staging/vt6655/80211mgr.c
+++ b/drivers/staging/vt6655/80211mgr.c
@@ -89,9 +89,9 @@ static int msglevel =MSG_LEVEL_INFO;
*
-*/
-VOID
+void
vMgrEncodeBeacon(
- IN PWLAN_FR_BEACON pFrame
+ PWLAN_FR_BEACON pFrame
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
@@ -121,9 +121,9 @@ vMgrEncodeBeacon(
-*/
-VOID
+void
vMgrDecodeBeacon(
- IN PWLAN_FR_BEACON pFrame
+ PWLAN_FR_BEACON pFrame
)
{
PWLAN_IE pItem;
@@ -242,9 +242,9 @@ vMgrDecodeBeacon(
-*/
-VOID
+void
vMgrEncodeIBSSATIM(
- IN PWLAN_FR_IBSSATIM pFrame
+ PWLAN_FR_IBSSATIM pFrame
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
@@ -265,9 +265,9 @@ vMgrEncodeIBSSATIM(
*
-*/
-VOID
+void
vMgrDecodeIBSSATIM(
- IN PWLAN_FR_IBSSATIM pFrame
+ PWLAN_FR_IBSSATIM pFrame
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
@@ -287,9 +287,9 @@ vMgrDecodeIBSSATIM(
*
-*/
-VOID
+void
vMgrEncodeDisassociation(
- IN PWLAN_FR_DISASSOC pFrame
+ PWLAN_FR_DISASSOC pFrame
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
@@ -315,9 +315,9 @@ vMgrEncodeDisassociation(
*
-*/
-VOID
+void
vMgrDecodeDisassociation(
- IN PWLAN_FR_DISASSOC pFrame
+ PWLAN_FR_DISASSOC pFrame
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
@@ -341,9 +341,9 @@ vMgrDecodeDisassociation(
-*/
-VOID
+void
vMgrEncodeAssocRequest(
- IN PWLAN_FR_ASSOCREQ pFrame
+ PWLAN_FR_ASSOCREQ pFrame
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
@@ -368,9 +368,9 @@ vMgrEncodeAssocRequest(
*
-*/
-VOID
+void
vMgrDecodeAssocRequest(
- IN PWLAN_FR_ASSOCREQ pFrame
+ PWLAN_FR_ASSOCREQ pFrame
)
{
PWLAN_IE pItem;
@@ -434,9 +434,9 @@ vMgrDecodeAssocRequest(
*
-*/
-VOID
+void
vMgrEncodeAssocResponse(
- IN PWLAN_FR_ASSOCRESP pFrame
+ PWLAN_FR_ASSOCRESP pFrame
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
@@ -466,9 +466,9 @@ vMgrEncodeAssocResponse(
*
-*/
-VOID
+void
vMgrDecodeAssocResponse(
- IN PWLAN_FR_ASSOCRESP pFrame
+ PWLAN_FR_ASSOCRESP pFrame
)
{
PWLAN_IE pItem;
@@ -512,9 +512,9 @@ vMgrDecodeAssocResponse(
*
-*/
-VOID
+void
vMgrEncodeReassocRequest(
- IN PWLAN_FR_REASSOCREQ pFrame
+ PWLAN_FR_REASSOCREQ pFrame
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
@@ -544,9 +544,9 @@ vMgrEncodeReassocRequest(
-*/
-VOID
+void
vMgrDecodeReassocRequest(
- IN PWLAN_FR_REASSOCREQ pFrame
+ PWLAN_FR_REASSOCREQ pFrame
)
{
PWLAN_IE pItem;
@@ -616,9 +616,9 @@ vMgrDecodeReassocRequest(
-*/
-VOID
+void
vMgrEncodeProbeRequest(
- IN PWLAN_FR_PROBEREQ pFrame
+ PWLAN_FR_PROBEREQ pFrame
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
@@ -637,9 +637,9 @@ vMgrEncodeProbeRequest(
*
-*/
-VOID
+void
vMgrDecodeProbeRequest(
- IN PWLAN_FR_PROBEREQ pFrame
+ PWLAN_FR_PROBEREQ pFrame
)
{
PWLAN_IE pItem;
@@ -690,9 +690,9 @@ vMgrDecodeProbeRequest(
-*/
-VOID
+void
vMgrEncodeProbeResponse(
- IN PWLAN_FR_PROBERESP pFrame
+ PWLAN_FR_PROBERESP pFrame
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
@@ -724,9 +724,9 @@ vMgrEncodeProbeResponse(
*
-*/
-VOID
+void
vMgrDecodeProbeResponse(
- IN PWLAN_FR_PROBERESP pFrame
+ PWLAN_FR_PROBERESP pFrame
)
{
PWLAN_IE pItem;
@@ -838,9 +838,9 @@ vMgrDecodeProbeResponse(
*
-*/
-VOID
+void
vMgrEncodeAuthen(
- IN PWLAN_FR_AUTHEN pFrame
+ PWLAN_FR_AUTHEN pFrame
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
@@ -869,9 +869,9 @@ vMgrEncodeAuthen(
*
-*/
-VOID
+void
vMgrDecodeAuthen(
- IN PWLAN_FR_AUTHEN pFrame
+ PWLAN_FR_AUTHEN pFrame
)
{
PWLAN_IE pItem;
@@ -909,9 +909,9 @@ vMgrDecodeAuthen(
*
-*/
-VOID
+void
vMgrEncodeDeauthen(
- IN PWLAN_FR_DEAUTHEN pFrame
+ PWLAN_FR_DEAUTHEN pFrame
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
@@ -936,9 +936,9 @@ vMgrEncodeDeauthen(
*
-*/
-VOID
+void
vMgrDecodeDeauthen(
- IN PWLAN_FR_DEAUTHEN pFrame
+ PWLAN_FR_DEAUTHEN pFrame
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
@@ -962,9 +962,9 @@ vMgrDecodeDeauthen(
*
-*/
-VOID
+void
vMgrEncodeReassocResponse(
- IN PWLAN_FR_REASSOCRESP pFrame
+ PWLAN_FR_REASSOCRESP pFrame
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
@@ -995,9 +995,9 @@ vMgrEncodeReassocResponse(
-*/
-VOID
+void
vMgrDecodeReassocResponse(
- IN PWLAN_FR_REASSOCRESP pFrame
+ PWLAN_FR_REASSOCRESP pFrame
)
{
PWLAN_IE pItem;
diff --git a/drivers/staging/vt6655/80211mgr.h b/drivers/staging/vt6655/80211mgr.h
index 5efc13227eb..658fe144f89 100644
--- a/drivers/staging/vt6655/80211mgr.h
+++ b/drivers/staging/vt6655/80211mgr.h
@@ -714,114 +714,114 @@ typedef struct tagWLAN_FR_DEAUTHEN {
/*--------------------- Export Functions --------------------------*/
-VOID
+void
vMgrEncodeBeacon(
- IN PWLAN_FR_BEACON pFrame
+ PWLAN_FR_BEACON pFrame
);
-VOID
+void
vMgrDecodeBeacon(
- IN PWLAN_FR_BEACON pFrame
+ PWLAN_FR_BEACON pFrame
);
-VOID
+void
vMgrEncodeIBSSATIM(
- IN PWLAN_FR_IBSSATIM pFrame
+ PWLAN_FR_IBSSATIM pFrame
);
-VOID
+void
vMgrDecodeIBSSATIM(
- IN PWLAN_FR_IBSSATIM pFrame
+ PWLAN_FR_IBSSATIM pFrame
);
-VOID
+void
vMgrEncodeDisassociation(
- IN PWLAN_FR_DISASSOC pFrame
+ PWLAN_FR_DISASSOC pFrame
);
-VOID
+void
vMgrDecodeDisassociation(
- IN PWLAN_FR_DISASSOC pFrame
+ PWLAN_FR_DISASSOC pFrame
);
-VOID
+void
vMgrEncodeAssocRequest(
- IN PWLAN_FR_ASSOCREQ pFrame
+ PWLAN_FR_ASSOCREQ pFrame
);
-VOID
+void
vMgrDecodeAssocRequest(
- IN PWLAN_FR_ASSOCREQ pFrame
+ PWLAN_FR_ASSOCREQ pFrame
);
-VOID
+void
vMgrEncodeAssocResponse(
- IN PWLAN_FR_ASSOCRESP pFrame
+ PWLAN_FR_ASSOCRESP pFrame
);
-VOID
+void
vMgrDecodeAssocResponse(
- IN PWLAN_FR_ASSOCRESP pFrame
+ PWLAN_FR_ASSOCRESP pFrame
);
-VOID
+void
vMgrEncodeReassocRequest(
- IN PWLAN_FR_REASSOCREQ pFrame
+ PWLAN_FR_REASSOCREQ pFrame
);
-VOID
+void
vMgrDecodeReassocRequest(
- IN PWLAN_FR_REASSOCREQ pFrame
+ PWLAN_FR_REASSOCREQ pFrame
);
-VOID
+void
vMgrEncodeProbeRequest(
- IN PWLAN_FR_PROBEREQ pFrame
+ PWLAN_FR_PROBEREQ pFrame
);
-VOID
+void
vMgrDecodeProbeRequest(
- IN PWLAN_FR_PROBEREQ pFrame
+ PWLAN_FR_PROBEREQ pFrame
);
-VOID
+void
vMgrEncodeProbeResponse(
- IN PWLAN_FR_PROBERESP pFrame
+ PWLAN_FR_PROBERESP pFrame
);
-VOID
+void
vMgrDecodeProbeResponse(
- IN PWLAN_FR_PROBERESP pFrame
+ PWLAN_FR_PROBERESP pFrame
);
-VOID
+void
vMgrEncodeAuthen(
- IN PWLAN_FR_AUTHEN pFrame
+ PWLAN_FR_AUTHEN pFrame
);
-VOID
+void
vMgrDecodeAuthen(
- IN PWLAN_FR_AUTHEN pFrame
+ PWLAN_FR_AUTHEN pFrame
);
-VOID
+void
vMgrEncodeDeauthen(
- IN PWLAN_FR_DEAUTHEN pFrame
+ PWLAN_FR_DEAUTHEN pFrame
);
-VOID
+void
vMgrDecodeDeauthen(
- IN PWLAN_FR_DEAUTHEN pFrame
+ PWLAN_FR_DEAUTHEN pFrame
);
-VOID
+void
vMgrEncodeReassocResponse(
- IN PWLAN_FR_REASSOCRESP pFrame
+ PWLAN_FR_REASSOCRESP pFrame
);
-VOID
+void
vMgrDecodeReassocResponse(
- IN PWLAN_FR_REASSOCRESP pFrame
+ PWLAN_FR_REASSOCRESP pFrame
);
#endif// __80211MGR_H__
diff --git a/drivers/staging/vt6655/IEEE11h.c b/drivers/staging/vt6655/IEEE11h.c
index 2567143d3b1..22f12f5ef90 100644
--- a/drivers/staging/vt6655/IEEE11h.c
+++ b/drivers/staging/vt6655/IEEE11h.c
@@ -203,8 +203,8 @@ static BOOL s_bRxTPCReq(PSMgmtObject pMgmt, PWLAN_FRAME_TPCREQ pTPCReq, BYTE byR
-*/
BOOL
IEEE11hbMgrRxAction (
- IN PVOID pMgmtHandle,
- IN PVOID pRxPacket
+ void *pMgmtHandle,
+ void *pRxPacket
)
{
PSMgmtObject pMgmt = (PSMgmtObject) pMgmtHandle;
@@ -265,7 +265,7 @@ IEEE11hbMgrRxAction (
BOOL IEEE11hbMSRRepTx (
- IN PVOID pMgmtHandle
+ void *pMgmtHandle
)
{
PSMgmtObject pMgmt = (PSMgmtObject) pMgmtHandle;
diff --git a/drivers/staging/vt6655/IEEE11h.h b/drivers/staging/vt6655/IEEE11h.h
index 0f61eddd6f2..ae32498a511 100644
--- a/drivers/staging/vt6655/IEEE11h.h
+++ b/drivers/staging/vt6655/IEEE11h.h
@@ -46,7 +46,7 @@
/*--------------------- Export Functions --------------------------*/
BOOL IEEE11hbMSRRepTx (
- IN PVOID pMgmtHandle
+ void *pMgmtHandle
);
#endif // __IEEE11h_H__
diff --git a/drivers/staging/vt6655/aes_ccmp.c b/drivers/staging/vt6655/aes_ccmp.c
index 2614ed380a4..fef1b91c292 100644
--- a/drivers/staging/vt6655/aes_ccmp.c
+++ b/drivers/staging/vt6655/aes_ccmp.c
@@ -277,7 +277,7 @@ int ii,jj,kk;
pbyPayload = pbyIV + 8; //IV-length
abyNonce[0] = 0x00; //now is 0, if Qos here will be priority
- memcpy(&(abyNonce[1]), pMACHeader->abyAddr2, U_ETHER_ADDR_LEN);
+ memcpy(&(abyNonce[1]), pMACHeader->abyAddr2, ETH_ALEN);
abyNonce[7] = pbyIV[7];
abyNonce[8] = pbyIV[6];
abyNonce[9] = pbyIV[5];
@@ -299,16 +299,16 @@ int ii,jj,kk;
byTmp = (BYTE)(pMACHeader->wFrameCtl >> 8);
byTmp &= 0x87;
MIC_HDR1[3] = byTmp | 0x40;
- memcpy(&(MIC_HDR1[4]), pMACHeader->abyAddr1, U_ETHER_ADDR_LEN);
- memcpy(&(MIC_HDR1[10]), pMACHeader->abyAddr2, U_ETHER_ADDR_LEN);
+ memcpy(&(MIC_HDR1[4]), pMACHeader->abyAddr1, ETH_ALEN);
+ memcpy(&(MIC_HDR1[10]), pMACHeader->abyAddr2, ETH_ALEN);
//MIC_HDR2
- memcpy(&(MIC_HDR2[0]), pMACHeader->abyAddr3, U_ETHER_ADDR_LEN);
+ memcpy(&(MIC_HDR2[0]), pMACHeader->abyAddr3, ETH_ALEN);
byTmp = (BYTE)(pMACHeader->wSeqCtl & 0xff);
MIC_HDR2[6] = byTmp & 0x0f;
MIC_HDR2[7] = 0;
if ( bA4 ) {
- memcpy(&(MIC_HDR2[8]), pMACHeader->abyAddr4, U_ETHER_ADDR_LEN);
+ memcpy(&(MIC_HDR2[8]), pMACHeader->abyAddr4, ETH_ALEN);
} else {
MIC_HDR2[8] = 0x00;
MIC_HDR2[9] = 0x00;
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index cd5b8ea0253..5414c6c6c05 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -1723,15 +1723,15 @@ ULONG
s_ulGetRatio(PSDevice pDevice);
static
-VOID
+void
s_vChangeAntenna(
- IN PSDevice pDevice
+ PSDevice pDevice
);
static
-VOID
+void
s_vChangeAntenna (
- IN PSDevice pDevice
+ PSDevice pDevice
)
{
@@ -1778,10 +1778,10 @@ s_vChangeAntenna (
*/
UINT
BBuGetFrameTime (
- IN BYTE byPreambleType,
- IN BYTE byPktType,
- IN UINT cbFrameLength,
- IN WORD wRate
+ BYTE byPreambleType,
+ BYTE byPktType,
+ UINT cbFrameLength,
+ WORD wRate
)
{
UINT uFrameTime;
@@ -1843,15 +1843,15 @@ BBuGetFrameTime (
* Return Value: none
*
*/
-VOID
+void
BBvCaculateParameter (
- IN PSDevice pDevice,
- IN UINT cbFrameLength,
- IN WORD wRate,
- IN BYTE byPacketType,
- OUT PWORD pwPhyLen,
- OUT PBYTE pbyPhySrv,
- OUT PBYTE pbyPhySgn
+ PSDevice pDevice,
+ UINT cbFrameLength,
+ WORD wRate,
+ BYTE byPacketType,
+ PWORD pwPhyLen,
+ PBYTE pbyPhySrv,
+ PBYTE pbyPhySgn
)
{
UINT cbBitCount;
@@ -2321,7 +2321,7 @@ BOOL BBbVT3253Init (PSDevice pDevice)
* Return Value: none
*
*/
-VOID BBvReadAllRegs (DWORD_PTR dwIoBase, PBYTE pbyBBRegs)
+void BBvReadAllRegs (DWORD_PTR dwIoBase, PBYTE pbyBBRegs)
{
int ii;
BYTE byBase = 1;
@@ -2438,7 +2438,7 @@ void BBvLoopbackOff (PSDevice pDevice)
* Return Value: none
*
*/
-VOID
+void
BBvSetShortSlotTime (PSDevice pDevice)
{
BYTE byBBRxConf=0;
@@ -2462,7 +2462,7 @@ BBvSetShortSlotTime (PSDevice pDevice)
}
-VOID BBvSetVGAGainOffset(PSDevice pDevice, BYTE byData)
+void BBvSetVGAGainOffset(PSDevice pDevice, BYTE byData)
{
BYTE byBBRxConf=0;
@@ -2494,7 +2494,7 @@ VOID BBvSetVGAGainOffset(PSDevice pDevice, BYTE byData)
* Return Value: none
*
*/
-VOID
+void
BBvSoftwareReset (DWORD_PTR dwIoBase)
{
BBbWriteEmbeded(dwIoBase, 0x50, 0x40);
@@ -2515,7 +2515,7 @@ BBvSoftwareReset (DWORD_PTR dwIoBase)
* Return Value: none
*
*/
-VOID
+void
BBvPowerSaveModeON (DWORD_PTR dwIoBase)
{
BYTE byOrgData;
@@ -2537,7 +2537,7 @@ BBvPowerSaveModeON (DWORD_PTR dwIoBase)
* Return Value: none
*
*/
-VOID
+void
BBvPowerSaveModeOFF (DWORD_PTR dwIoBase)
{
BYTE byOrgData;
@@ -2561,7 +2561,7 @@ BBvPowerSaveModeOFF (DWORD_PTR dwIoBase)
*
*/
-VOID
+void
BBvSetTxAntennaMode (DWORD_PTR dwIoBase, BYTE byAntennaMode)
{
BYTE byBBTxConf;
@@ -2603,7 +2603,7 @@ BBvSetTxAntennaMode (DWORD_PTR dwIoBase, BYTE byAntennaMode)
*
*/
-VOID
+void
BBvSetRxAntennaMode (DWORD_PTR dwIoBase, BYTE byAntennaMode)
{
BYTE byBBRxConf;
@@ -2634,14 +2634,14 @@ BBvSetRxAntennaMode (DWORD_PTR dwIoBase, BYTE byAntennaMode)
* Return Value: none
*
*/
-VOID
+void
BBvSetDeepSleep (DWORD_PTR dwIoBase, BYTE byLocalID)
{
BBbWriteEmbeded(dwIoBase, 0x0C, 0x17);//CR12
BBbWriteEmbeded(dwIoBase, 0x0D, 0xB9);//CR13
}
-VOID
+void
BBvExitDeepSleep (DWORD_PTR dwIoBase, BYTE byLocalID)
{
BBbWriteEmbeded(dwIoBase, 0x0C, 0x00);//CR12
@@ -2759,7 +2759,7 @@ ULONG ulPacketNum;
}
-VOID
+void
BBvClearAntDivSQ3Value (PSDevice pDevice)
{
UINT ii;
@@ -2786,7 +2786,7 @@ BBvClearAntDivSQ3Value (PSDevice pDevice)
*
*/
-VOID
+void
BBvAntennaDiversity (PSDevice pDevice, BYTE byRxRate, BYTE bySQ3)
{
@@ -2876,9 +2876,9 @@ BBvAntennaDiversity (PSDevice pDevice, BYTE byRxRate, BYTE bySQ3)
*
-*/
-VOID
+void
TimerSQ3CallBack (
- IN HANDLE hDeviceContext
+ void *hDeviceContext
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -2924,9 +2924,9 @@ TimerSQ3CallBack (
*
-*/
-VOID
+void
TimerState1CallBack (
- IN HANDLE hDeviceContext
+ void *hDeviceContext
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
diff --git a/drivers/staging/vt6655/baseband.h b/drivers/staging/vt6655/baseband.h
index 0682a396ea4..b236ff4139a 100644
--- a/drivers/staging/vt6655/baseband.h
+++ b/drivers/staging/vt6655/baseband.h
@@ -120,58 +120,58 @@
UINT
BBuGetFrameTime(
- IN BYTE byPreambleType,
- IN BYTE byPktType,
- IN UINT cbFrameLength,
- IN WORD wRate
+ BYTE byPreambleType,
+ BYTE byPktType,
+ UINT cbFrameLength,
+ WORD wRate
);
-VOID
+void
BBvCaculateParameter (
- IN PSDevice pDevice,
- IN UINT cbFrameLength,
- IN WORD wRate,
- IN BYTE byPacketType,
- OUT PWORD pwPhyLen,
- OUT PBYTE pbyPhySrv,
- OUT PBYTE pbyPhySgn
+ PSDevice pDevice,
+ UINT cbFrameLength,
+ WORD wRate,
+ BYTE byPacketType,
+ PWORD pwPhyLen,
+ PBYTE pbyPhySrv,
+ PBYTE pbyPhySgn
);
BOOL BBbReadEmbeded(DWORD_PTR dwIoBase, BYTE byBBAddr, PBYTE pbyData);
BOOL BBbWriteEmbeded(DWORD_PTR dwIoBase, BYTE byBBAddr, BYTE byData);
-VOID BBvReadAllRegs(DWORD_PTR dwIoBase, PBYTE pbyBBRegs);
+void BBvReadAllRegs(DWORD_PTR dwIoBase, PBYTE pbyBBRegs);
void BBvLoopbackOn(PSDevice pDevice);
void BBvLoopbackOff(PSDevice pDevice);
void BBvSetShortSlotTime(PSDevice pDevice);
BOOL BBbIsRegBitsOn(DWORD_PTR dwIoBase, BYTE byBBAddr, BYTE byTestBits);
BOOL BBbIsRegBitsOff(DWORD_PTR dwIoBase, BYTE byBBAddr, BYTE byTestBits);
-VOID BBvSetVGAGainOffset(PSDevice pDevice, BYTE byData);
+void BBvSetVGAGainOffset(PSDevice pDevice, BYTE byData);
// VT3253 Baseband
BOOL BBbVT3253Init(PSDevice pDevice);
-VOID BBvSoftwareReset(DWORD_PTR dwIoBase);
-VOID BBvPowerSaveModeON(DWORD_PTR dwIoBase);
-VOID BBvPowerSaveModeOFF(DWORD_PTR dwIoBase);
-VOID BBvSetTxAntennaMode(DWORD_PTR dwIoBase, BYTE byAntennaMode);
-VOID BBvSetRxAntennaMode(DWORD_PTR dwIoBase, BYTE byAntennaMode);
-VOID BBvSetDeepSleep(DWORD_PTR dwIoBase, BYTE byLocalID);
-VOID BBvExitDeepSleep(DWORD_PTR dwIoBase, BYTE byLocalID);
+void BBvSoftwareReset(DWORD_PTR dwIoBase);
+void BBvPowerSaveModeON(DWORD_PTR dwIoBase);
+void BBvPowerSaveModeOFF(DWORD_PTR dwIoBase);
+void BBvSetTxAntennaMode(DWORD_PTR dwIoBase, BYTE byAntennaMode);
+void BBvSetRxAntennaMode(DWORD_PTR dwIoBase, BYTE byAntennaMode);
+void BBvSetDeepSleep(DWORD_PTR dwIoBase, BYTE byLocalID);
+void BBvExitDeepSleep(DWORD_PTR dwIoBase, BYTE byLocalID);
// timer for antenna diversity
-VOID
+void
TimerSQ3CallBack (
- IN HANDLE hDeviceContext
+ void *hDeviceContext
);
-VOID
+void
TimerState1CallBack(
- IN HANDLE hDeviceContext
+ void *hDeviceContext
);
void BBvAntennaDiversity(PSDevice pDevice, BYTE byRxRate, BYTE bySQ3);
-VOID
+void
BBvClearAntDivSQ3Value (PSDevice pDevice);
#endif // __BASEBAND_H__
diff --git a/drivers/staging/vt6655/bssdb.c b/drivers/staging/vt6655/bssdb.c
index 9535d4473c5..6312a55dab1 100644
--- a/drivers/staging/vt6655/bssdb.c
+++ b/drivers/staging/vt6655/bssdb.c
@@ -90,19 +90,19 @@ const WORD awHWRetry1[5][5] = {
/*--------------------- Static Functions --------------------------*/
-VOID s_vCheckSensitivity(
- IN HANDLE hDeviceContext
+void s_vCheckSensitivity(
+ void *hDeviceContext
);
#ifdef Calcu_LinkQual
-VOID s_uCalculateLinkQual(
- IN HANDLE hDeviceContext
+void s_uCalculateLinkQual(
+ void *hDeviceContext
);
#endif
-VOID s_vCheckPreEDThreshold(
- IN HANDLE hDeviceContext
+void s_vCheckPreEDThreshold(
+ void *hDeviceContext
);
/*--------------------- Export Variables --------------------------*/
@@ -125,10 +125,10 @@ VOID s_vCheckPreEDThreshold(
PKnownBSS
BSSpSearchBSSList(
- IN HANDLE hDeviceContext,
- IN PBYTE pbyDesireBSSID,
- IN PBYTE pbyDesireSSID,
- IN CARD_PHY_TYPE ePhyType
+ void *hDeviceContext,
+ PBYTE pbyDesireBSSID,
+ PBYTE pbyDesireSSID,
+ CARD_PHY_TYPE ePhyType
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -280,10 +280,10 @@ if(pDevice->bLinkPass==FALSE) pCurrBSS->bSelected = FALSE;
-*/
-VOID
+void
BSSvClearBSSList(
- IN HANDLE hDeviceContext,
- IN BOOL bKeepCurrBSSID
+ void *hDeviceContext,
+ BOOL bKeepCurrBSSID
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -325,9 +325,9 @@ BSSvClearBSSList(
-*/
PKnownBSS
BSSpAddrIsInBSSList(
- IN HANDLE hDeviceContext,
- IN PBYTE abyBSSID,
- IN PWLAN_IE_SSID pSSID
+ void *hDeviceContext,
+ PBYTE abyBSSID,
+ PWLAN_IE_SSID pSSID
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -368,23 +368,23 @@ BSSpAddrIsInBSSList(
BOOL
BSSbInsertToBSSList (
- IN HANDLE hDeviceContext,
- IN PBYTE abyBSSIDAddr,
- IN QWORD qwTimestamp,
- IN WORD wBeaconInterval,
- IN WORD wCapInfo,
- IN BYTE byCurrChannel,
- IN PWLAN_IE_SSID pSSID,
- IN PWLAN_IE_SUPP_RATES pSuppRates,
- IN PWLAN_IE_SUPP_RATES pExtSuppRates,
- IN PERPObject psERP,
- IN PWLAN_IE_RSN pRSN,
- IN PWLAN_IE_RSN_EXT pRSNWPA,
- IN PWLAN_IE_COUNTRY pIE_Country,
- IN PWLAN_IE_QUIET pIE_Quiet,
- IN UINT uIELength,
- IN PBYTE pbyIEs,
- IN HANDLE pRxPacketContext
+ void *hDeviceContext,
+ PBYTE abyBSSIDAddr,
+ QWORD qwTimestamp,
+ WORD wBeaconInterval,
+ WORD wCapInfo,
+ BYTE byCurrChannel,
+ PWLAN_IE_SSID pSSID,
+ PWLAN_IE_SUPP_RATES pSuppRates,
+ PWLAN_IE_SUPP_RATES pExtSuppRates,
+ PERPObject psERP,
+ PWLAN_IE_RSN pRSN,
+ PWLAN_IE_RSN_EXT pRSNWPA,
+ PWLAN_IE_COUNTRY pIE_Country,
+ PWLAN_IE_QUIET pIE_Quiet,
+ UINT uIELength,
+ PBYTE pbyIEs,
+ void *pRxPacketContext
)
{
@@ -502,7 +502,7 @@ BSSbInsertToBSSList (
if ((bIs802_1x == TRUE) && (pSSID->len == ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len) &&
( !memcmp(pSSID->abySSID, ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySSID, pSSID->len))) {
- bAdd_PMKID_Candidate((HANDLE)pDevice, pBSSList->abyBSSID, &pBSSList->sRSNCapObj);
+ bAdd_PMKID_Candidate((void *)pDevice, pBSSList->abyBSSID, &pBSSList->sRSNCapObj);
if ((pDevice->bLinkPass == TRUE) && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
if ((KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, PAIRWISE_KEY, &pTransmitKey) == TRUE) ||
@@ -585,24 +585,24 @@ BSSbInsertToBSSList (
BOOL
BSSbUpdateToBSSList (
- IN HANDLE hDeviceContext,
- IN QWORD qwTimestamp,
- IN WORD wBeaconInterval,
- IN WORD wCapInfo,
- IN BYTE byCurrChannel,
- IN BOOL bChannelHit,
- IN PWLAN_IE_SSID pSSID,
- IN PWLAN_IE_SUPP_RATES pSuppRates,
- IN PWLAN_IE_SUPP_RATES pExtSuppRates,
- IN PERPObject psERP,
- IN PWLAN_IE_RSN pRSN,
- IN PWLAN_IE_RSN_EXT pRSNWPA,
- IN PWLAN_IE_COUNTRY pIE_Country,
- IN PWLAN_IE_QUIET pIE_Quiet,
- IN PKnownBSS pBSSList,
- IN UINT uIELength,
- IN PBYTE pbyIEs,
- IN HANDLE pRxPacketContext
+ void *hDeviceContext,
+ QWORD qwTimestamp,
+ WORD wBeaconInterval,
+ WORD wCapInfo,
+ BYTE byCurrChannel,
+ BOOL bChannelHit,
+ PWLAN_IE_SSID pSSID,
+ PWLAN_IE_SUPP_RATES pSuppRates,
+ PWLAN_IE_SUPP_RATES pExtSuppRates,
+ PERPObject psERP,
+ PWLAN_IE_RSN pRSN,
+ PWLAN_IE_RSN_EXT pRSNWPA,
+ PWLAN_IE_COUNTRY pIE_Country,
+ PWLAN_IE_QUIET pIE_Quiet,
+ PKnownBSS pBSSList,
+ UINT uIELength,
+ PBYTE pbyIEs,
+ void *pRxPacketContext
)
{
int ii;
@@ -764,9 +764,9 @@ BSSbUpdateToBSSList (
BOOL
BSSDBbIsSTAInNodeDB(
- IN PVOID pMgmtObject,
- IN PBYTE abyDstAddr,
- OUT PUINT puNodeIndex
+ void *pMgmtObject,
+ PBYTE abyDstAddr,
+ PUINT puNodeIndex
)
{
PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject;
@@ -797,10 +797,10 @@ BSSDBbIsSTAInNodeDB(
* None
*
-*/
-VOID
+void
BSSvCreateOneNode(
- IN HANDLE hDeviceContext,
- OUT PUINT puNodeIndex
+ void *hDeviceContext,
+ PUINT puNodeIndex
)
{
@@ -862,10 +862,10 @@ BSSvCreateOneNode(
* None
*
-*/
-VOID
+void
BSSvRemoveOneNode(
- IN HANDLE hDeviceContext,
- IN UINT uNodeIndex
+ void *hDeviceContext,
+ UINT uNodeIndex
)
{
@@ -895,12 +895,12 @@ BSSvRemoveOneNode(
*
-*/
-VOID
+void
BSSvUpdateAPNode(
- IN HANDLE hDeviceContext,
- IN PWORD pwCapInfo,
- IN PWLAN_IE_SUPP_RATES pSuppRates,
- IN PWLAN_IE_SUPP_RATES pExtSuppRates
+ void *hDeviceContext,
+ PWORD pwCapInfo,
+ PWLAN_IE_SUPP_RATES pSuppRates,
+ PWLAN_IE_SUPP_RATES pExtSuppRates
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -919,7 +919,7 @@ BSSvUpdateAPNode(
pMgmt->abyCurrExtSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)pExtSuppRates,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
uRateLen);
- RATEvParseMaxRate((PVOID) pDevice,
+ RATEvParseMaxRate((void *)pDevice,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
TRUE,
@@ -958,9 +958,9 @@ BSSvUpdateAPNode(
-*/
-VOID
+void
BSSvAddMulticastNode(
- IN HANDLE hDeviceContext
+ void *hDeviceContext
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -972,7 +972,7 @@ BSSvAddMulticastNode(
pMgmt->sNodeDBTable[0].bActive = TRUE;
pMgmt->sNodeDBTable[0].bPSEnable = FALSE;
skb_queue_head_init(&pMgmt->sNodeDBTable[0].sTxPSQueue);
- RATEvParseMaxRate((PVOID) pDevice,
+ RATEvParseMaxRate((void *)pDevice,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
TRUE,
@@ -1011,9 +1011,9 @@ BSSvAddMulticastNode(
BOOL cc=FALSE;
UINT status;
#endif
-VOID
+void
BSSvSecondCallBack(
- IN HANDLE hDeviceContext
+ void *hDeviceContext
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -1111,7 +1111,7 @@ start:
}
#ifdef Calcu_LinkQual
- s_uCalculateLinkQual((HANDLE)pDevice);
+ s_uCalculateLinkQual((void *)pDevice);
#endif
for (ii = 0; ii < (MAX_NODE_NUM + 1); ii++) {
@@ -1162,7 +1162,7 @@ start:
*/
if (ii > 0) {
// ii = 0 for multicast node (AP & Adhoc)
- RATEvTxRateFallBack((PVOID)pDevice, &(pMgmt->sNodeDBTable[ii]));
+ RATEvTxRateFallBack((void *)pDevice, &(pMgmt->sNodeDBTable[ii]));
}
else {
// ii = 0 reserved for unicast AP node (Infra STA)
@@ -1170,7 +1170,7 @@ start:
#ifdef PLICE_DEBUG
printk("SecondCallback:Before:TxDataRate is %d\n",pMgmt->sNodeDBTable[0].wTxDataRate);
#endif
- RATEvTxRateFallBack((PVOID)pDevice, &(pMgmt->sNodeDBTable[ii]));
+ RATEvTxRateFallBack((void *)pDevice, &(pMgmt->sNodeDBTable[ii]));
#ifdef PLICE_DEBUG
printk("SecondCallback:After:TxDataRate is %d\n",pMgmt->sNodeDBTable[0].wTxDataRate);
#endif
@@ -1215,14 +1215,14 @@ start:
if (pDevice->bShortSlotTime) {
pDevice->bShortSlotTime = FALSE;
BBvSetShortSlotTime(pDevice);
- vUpdateIFS((PVOID)pDevice);
+ vUpdateIFS((void *)pDevice);
}
}
else {
if (!pDevice->bShortSlotTime) {
pDevice->bShortSlotTime = TRUE;
BBvSetShortSlotTime(pDevice);
- vUpdateIFS((PVOID)pDevice);
+ vUpdateIFS((void *)pDevice);
}
}
@@ -1261,18 +1261,18 @@ start:
if (pMgmt->sNodeDBTable[0].bActive) { // Assoc with BSS
// DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "Callback inactive Count = [%d]\n", pMgmt->sNodeDBTable[0].uInActiveCount);
//if (pDevice->bUpdateBBVGA) {
- // s_vCheckSensitivity((HANDLE) pDevice);
+ // s_vCheckSensitivity((void *) pDevice);
//}
if (pDevice->bUpdateBBVGA) {
- // s_vCheckSensitivity((HANDLE) pDevice);
- s_vCheckPreEDThreshold((HANDLE)pDevice);
+ // s_vCheckSensitivity((void *) pDevice);
+ s_vCheckPreEDThreshold((void *)pDevice);
}
if ((pMgmt->sNodeDBTable[0].uInActiveCount >= (LOST_BEACON_COUNT/2)) &&
(pDevice->byBBVGACurrent != pDevice->abyBBVGA[0]) ) {
pDevice->byBBVGANew = pDevice->abyBBVGA[0];
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_CHANGE_BBSENSITIVITY, NULL);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_CHANGE_BBSENSITIVITY, NULL);
}
if (pMgmt->sNodeDBTable[0].uInActiveCount >= LOST_BEACON_COUNT) {
@@ -1324,10 +1324,10 @@ start:
pDevice->eEncryptionStatus = pDevice->eOldEncryptionStatus;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Roaming ...\n");
- BSSvClearBSSList((HANDLE)pDevice, pDevice->bLinkPass);
+ BSSvClearBSSList((void *)pDevice, pDevice->bLinkPass);
pMgmt->eScanType = WMAC_SCAN_ACTIVE;
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_SSID, pMgmt->abyDesireSSID);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, pMgmt->abyDesireSSID);
pDevice->uAutoReConnectTime = 0;
}
}
@@ -1342,16 +1342,16 @@ start:
else {
DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Adhoc re-scaning ...\n");
pMgmt->eScanType = WMAC_SCAN_ACTIVE;
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_SSID, NULL);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL);
pDevice->uAutoReConnectTime = 0;
};
}
if (pMgmt->eCurrState == WMAC_STATE_JOINTED) {
if (pDevice->bUpdateBBVGA) {
- //s_vCheckSensitivity((HANDLE) pDevice);
- s_vCheckPreEDThreshold((HANDLE)pDevice);
+ //s_vCheckSensitivity((void *) pDevice);
+ s_vCheckPreEDThreshold((void *)pDevice);
}
if (pMgmt->sNodeDBTable[0].uInActiveCount >=ADHOC_LOST_BEACON_COUNT) {
DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Lost other STA beacon [%d] sec, started !\n", pMgmt->sNodeDBTable[0].uInActiveCount);
@@ -1388,13 +1388,13 @@ start:
-VOID
+void
BSSvUpdateNodeTxCounter(
- IN HANDLE hDeviceContext,
- IN BYTE byTsr0,
- IN BYTE byTsr1,
- IN PBYTE pbyBuffer,
- IN UINT uFIFOHeaderSize
+ void *hDeviceContext,
+ BYTE byTsr0,
+ BYTE byTsr1,
+ PBYTE pbyBuffer,
+ UINT uFIFOHeaderSize
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -1503,7 +1503,7 @@ BSSvUpdateNodeTxCounter(
pMACHeader = (PS802_11Header)(pbyBuffer + uFIFOHeaderSize);
- if (BSSDBbIsSTAInNodeDB((HANDLE)pMgmt, &(pMACHeader->abyAddr1[0]), &uNodeIndex)){
+ if (BSSDBbIsSTAInNodeDB((void *)pMgmt, &(pMACHeader->abyAddr1[0]), &uNodeIndex)){
pMgmt->sNodeDBTable[uNodeIndex].uTxAttempts += 1;
if ((byTsr1 & TSR1_TERR) == 0) {
// transmit success, TxAttempts at least plus one
@@ -1581,10 +1581,10 @@ BSSvUpdateNodeTxCounter(
-*/
-VOID
+void
BSSvClearNodeDBTable(
- IN HANDLE hDeviceContext,
- IN UINT uStartIndex
+ void *hDeviceContext,
+ UINT uStartIndex
)
{
@@ -1610,8 +1610,8 @@ BSSvClearNodeDBTable(
};
-VOID s_vCheckSensitivity(
- IN HANDLE hDeviceContext
+void s_vCheckSensitivity(
+ void *hDeviceContext
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -1649,7 +1649,7 @@ VOID s_vCheckSensitivity(
if (pDevice->byBBVGANew != pDevice->byBBVGACurrent) {
pDevice->uBBVGADiffCount++;
if (pDevice->uBBVGADiffCount >= BB_VGA_CHANGE_THRESHOLD)
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_CHANGE_BBSENSITIVITY, NULL);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_CHANGE_BBSENSITIVITY, NULL);
} else {
pDevice->uBBVGADiffCount = 0;
}
@@ -1659,9 +1659,9 @@ VOID s_vCheckSensitivity(
}
-VOID
+void
BSSvClearAnyBSSJoinRecord (
- IN HANDLE hDeviceContext
+ void *hDeviceContext
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -1675,8 +1675,8 @@ BSSvClearAnyBSSJoinRecord (
}
#ifdef Calcu_LinkQual
-VOID s_uCalculateLinkQual(
- IN HANDLE hDeviceContext
+void s_uCalculateLinkQual(
+ void *hDeviceContext
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -1723,8 +1723,8 @@ else
}
#endif
-VOID s_vCheckPreEDThreshold(
- IN HANDLE hDeviceContext
+void s_vCheckPreEDThreshold(
+ void *hDeviceContext
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
diff --git a/drivers/staging/vt6655/bssdb.h b/drivers/staging/vt6655/bssdb.h
index 5ce4ef9c1bd..e09ef876297 100644
--- a/drivers/staging/vt6655/bssdb.h
+++ b/drivers/staging/vt6655/bssdb.h
@@ -244,128 +244,128 @@ typedef struct tagKnownNodeDB {
PKnownBSS
BSSpSearchBSSList(
- IN HANDLE hDeviceContext,
- IN PBYTE pbyDesireBSSID,
- IN PBYTE pbyDesireSSID,
- IN CARD_PHY_TYPE ePhyType
+ void *hDeviceContext,
+ PBYTE pbyDesireBSSID,
+ PBYTE pbyDesireSSID,
+ CARD_PHY_TYPE ePhyType
);
PKnownBSS
BSSpAddrIsInBSSList(
- IN HANDLE hDeviceContext,
- IN PBYTE abyBSSID,
- IN PWLAN_IE_SSID pSSID
+ void *hDeviceContext,
+ PBYTE abyBSSID,
+ PWLAN_IE_SSID pSSID
);
-VOID
+void
BSSvClearBSSList(
- IN HANDLE hDeviceContext,
- IN BOOL bKeepCurrBSSID
+ void *hDeviceContext,
+ BOOL bKeepCurrBSSID
);
BOOL
BSSbInsertToBSSList(
- IN HANDLE hDeviceContext,
- IN PBYTE abyBSSIDAddr,
- IN QWORD qwTimestamp,
- IN WORD wBeaconInterval,
- IN WORD wCapInfo,
- IN BYTE byCurrChannel,
- IN PWLAN_IE_SSID pSSID,
- IN PWLAN_IE_SUPP_RATES pSuppRates,
- IN PWLAN_IE_SUPP_RATES pExtSuppRates,
- IN PERPObject psERP,
- IN PWLAN_IE_RSN pRSN,
- IN PWLAN_IE_RSN_EXT pRSNWPA,
- IN PWLAN_IE_COUNTRY pIE_Country,
- IN PWLAN_IE_QUIET pIE_Quiet,
- IN UINT uIELength,
- IN PBYTE pbyIEs,
- IN HANDLE pRxPacketContext
+ void *hDeviceContext,
+ PBYTE abyBSSIDAddr,
+ QWORD qwTimestamp,
+ WORD wBeaconInterval,
+ WORD wCapInfo,
+ BYTE byCurrChannel,
+ PWLAN_IE_SSID pSSID,
+ PWLAN_IE_SUPP_RATES pSuppRates,
+ PWLAN_IE_SUPP_RATES pExtSuppRates,
+ PERPObject psERP,
+ PWLAN_IE_RSN pRSN,
+ PWLAN_IE_RSN_EXT pRSNWPA,
+ PWLAN_IE_COUNTRY pIE_Country,
+ PWLAN_IE_QUIET pIE_Quiet,
+ UINT uIELength,
+ PBYTE pbyIEs,
+ void *pRxPacketContext
);
BOOL
BSSbUpdateToBSSList(
- IN HANDLE hDeviceContext,
- IN QWORD qwTimestamp,
- IN WORD wBeaconInterval,
- IN WORD wCapInfo,
- IN BYTE byCurrChannel,
- IN BOOL bChannelHit,
- IN PWLAN_IE_SSID pSSID,
- IN PWLAN_IE_SUPP_RATES pSuppRates,
- IN PWLAN_IE_SUPP_RATES pExtSuppRates,
- IN PERPObject psERP,
- IN PWLAN_IE_RSN pRSN,
- IN PWLAN_IE_RSN_EXT pRSNWPA,
- IN PWLAN_IE_COUNTRY pIE_Country,
- IN PWLAN_IE_QUIET pIE_Quiet,
- IN PKnownBSS pBSSList,
- IN UINT uIELength,
- IN PBYTE pbyIEs,
- IN HANDLE pRxPacketContext
+ void *hDeviceContext,
+ QWORD qwTimestamp,
+ WORD wBeaconInterval,
+ WORD wCapInfo,
+ BYTE byCurrChannel,
+ BOOL bChannelHit,
+ PWLAN_IE_SSID pSSID,
+ PWLAN_IE_SUPP_RATES pSuppRates,
+ PWLAN_IE_SUPP_RATES pExtSuppRates,
+ PERPObject psERP,
+ PWLAN_IE_RSN pRSN,
+ PWLAN_IE_RSN_EXT pRSNWPA,
+ PWLAN_IE_COUNTRY pIE_Country,
+ PWLAN_IE_QUIET pIE_Quiet,
+ PKnownBSS pBSSList,
+ UINT uIELength,
+ PBYTE pbyIEs,
+ void *pRxPacketContext
);
BOOL
BSSDBbIsSTAInNodeDB(
- IN HANDLE hDeviceContext,
- IN PBYTE abyDstAddr,
- OUT PUINT puNodeIndex
+ void *hDeviceContext,
+ PBYTE abyDstAddr,
+ PUINT puNodeIndex
);
-VOID
+void
BSSvCreateOneNode(
- IN HANDLE hDeviceContext,
- OUT PUINT puNodeIndex
+ void *hDeviceContext,
+ PUINT puNodeIndex
);
-VOID
+void
BSSvUpdateAPNode(
- IN HANDLE hDeviceContext,
- IN PWORD pwCapInfo,
- IN PWLAN_IE_SUPP_RATES pItemRates,
- IN PWLAN_IE_SUPP_RATES pExtSuppRates
+ void *hDeviceContext,
+ PWORD pwCapInfo,
+ PWLAN_IE_SUPP_RATES pItemRates,
+ PWLAN_IE_SUPP_RATES pExtSuppRates
);
-VOID
+void
BSSvSecondCallBack(
- IN HANDLE hDeviceContext
+ void *hDeviceContext
);
-VOID
+void
BSSvUpdateNodeTxCounter(
- IN HANDLE hDeviceContext,
- IN BYTE byTsr0,
- IN BYTE byTsr1,
- IN PBYTE pbyBuffer,
- IN UINT uFIFOHeaderSize
+ void *hDeviceContext,
+ BYTE byTsr0,
+ BYTE byTsr1,
+ PBYTE pbyBuffer,
+ UINT uFIFOHeaderSize
);
-VOID
+void
BSSvRemoveOneNode(
- IN HANDLE hDeviceContext,
- IN UINT uNodeIndex
+ void *hDeviceContext,
+ UINT uNodeIndex
);
-VOID
+void
BSSvAddMulticastNode(
- IN HANDLE hDeviceContext
+ void *hDeviceContext
);
-VOID
+void
BSSvClearNodeDBTable(
- IN HANDLE hDeviceContext,
- IN UINT uStartIndex
+ void *hDeviceContext,
+ UINT uStartIndex
);
-VOID
+void
BSSvClearAnyBSSJoinRecord(
- IN HANDLE hDeviceContext
+ void *hDeviceContext
);
#endif //__BSSDB_H__
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index bf4fd49709d..7bc2d7654b0 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -423,12 +423,12 @@ SCountryTable ChannelRuleTab[CCODE_MAX+1] =
/*--------------------- Static Functions --------------------------*/
static
-VOID
+void
s_vCaculateOFDMRParameter(
- IN BYTE byRate,
- IN CARD_PHY_TYPE ePHYType,
- OUT PBYTE pbyTxRate,
- OUT PBYTE pbyRsvTime
+ BYTE byRate,
+ CARD_PHY_TYPE ePHYType,
+ PBYTE pbyTxRate,
+ PBYTE pbyRsvTime
);
@@ -496,12 +496,12 @@ exit:
*
*/
static
-VOID
+void
s_vCaculateOFDMRParameter (
- IN BYTE byRate,
- IN CARD_PHY_TYPE ePHYType,
- OUT PBYTE pbyTxRate,
- OUT PBYTE pbyRsvTime
+ BYTE byRate,
+ CARD_PHY_TYPE ePHYType,
+ PBYTE pbyTxRate,
+ PBYTE pbyRsvTime
)
{
switch (byRate) {
@@ -611,8 +611,8 @@ s_vCaculateOFDMRParameter (
*
*/
static
-VOID
-s_vSetRSPINF (PSDevice pDevice, CARD_PHY_TYPE ePHYType, PVOID pvSupportRateIEs, PVOID pvExtSupportRateIEs)
+void
+s_vSetRSPINF (PSDevice pDevice, CARD_PHY_TYPE ePHYType, void *pvSupportRateIEs, void *pvExtSupportRateIEs)
{
BYTE byServ = 0, bySignal = 0; // For CCK
WORD wLen = 0;
@@ -728,7 +728,7 @@ s_vSetRSPINF (PSDevice pDevice, CARD_PHY_TYPE ePHYType, PVOID pvSupportRateIEs,
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Functions --------------------------*/
-BYTE CARDbyGetChannelMapping (PVOID pDeviceHandler, BYTE byChannelNumber, CARD_PHY_TYPE ePhyType)
+BYTE CARDbyGetChannelMapping (void *pDeviceHandler, BYTE byChannelNumber, CARD_PHY_TYPE ePhyType)
{
UINT ii;
@@ -746,7 +746,7 @@ BYTE CARDbyGetChannelMapping (PVOID pDeviceHandler, BYTE byChannelNumber, CARD_P
}
-BYTE CARDbyGetChannelNumber (PVOID pDeviceHandler, BYTE byChannelIndex)
+BYTE CARDbyGetChannelNumber (void *pDeviceHandler, BYTE byChannelIndex)
{
// PSDevice pDevice = (PSDevice) pDeviceHandler;
return(sChannelTbl[byChannelIndex].byChannelNumber);
@@ -765,7 +765,7 @@ BYTE CARDbyGetChannelNumber (PVOID pDeviceHandler, BYTE byChannelIndex)
* Return Value: TRUE if succeeded; FALSE if failed.
*
*/
-BOOL CARDbSetChannel (PVOID pDeviceHandler, UINT uConnectionChannel)
+BOOL CARDbSetChannel (void *pDeviceHandler, UINT uConnectionChannel)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
BOOL bResult = TRUE;
@@ -853,7 +853,7 @@ BOOL CARDbSetChannel (PVOID pDeviceHandler, UINT uConnectionChannel)
*
*/
/*
-BOOL CARDbSendPacket (PVOID pDeviceHandler, PVOID pPacket, CARD_PKT_TYPE ePktType, UINT uLength)
+BOOL CARDbSendPacket (void *pDeviceHandler, void *pPacket, CARD_PKT_TYPE ePktType, UINT uLength)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
if (ePktType == PKT_TYPE_802_11_MNG) {
@@ -881,7 +881,7 @@ BOOL CARDbSendPacket (PVOID pDeviceHandler, PVOID pPacket, CARD_PKT_TYPE ePktTyp
* Return Value: TRUE if short preamble; otherwise FALSE
*
*/
-BOOL CARDbIsShortPreamble (PVOID pDeviceHandler)
+BOOL CARDbIsShortPreamble (void *pDeviceHandler)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
if (pDevice->byPreambleType == 0) {
@@ -902,7 +902,7 @@ BOOL CARDbIsShortPreamble (PVOID pDeviceHandler)
* Return Value: TRUE if short slot time; otherwise FALSE
*
*/
-BOOL CARDbIsShorSlotTime (PVOID pDeviceHandler)
+BOOL CARDbIsShorSlotTime (void *pDeviceHandler)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
return(pDevice->bShortSlotTime);
@@ -921,7 +921,7 @@ BOOL CARDbIsShorSlotTime (PVOID pDeviceHandler)
* Return Value: None.
*
*/
-BOOL CARDbSetPhyParameter (PVOID pDeviceHandler, CARD_PHY_TYPE ePHYType, WORD wCapInfo, BYTE byERPField, PVOID pvSupportRateIEs, PVOID pvExtSupportRateIEs)
+BOOL CARDbSetPhyParameter (void *pDeviceHandler, CARD_PHY_TYPE ePHYType, WORD wCapInfo, BYTE byERPField, void *pvSupportRateIEs, void *pvExtSupportRateIEs)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
BYTE byCWMaxMin = 0;
@@ -1108,7 +1108,7 @@ BOOL CARDbSetPhyParameter (PVOID pDeviceHandler, CARD_PHY_TYPE ePHYType, WORD wC
* Return Value: none
*
*/
-BOOL CARDbUpdateTSF (PVOID pDeviceHandler, BYTE byRxRate, QWORD qwBSSTimestamp, QWORD qwLocalTSF)
+BOOL CARDbUpdateTSF (void *pDeviceHandler, BYTE byRxRate, QWORD qwBSSTimestamp, QWORD qwLocalTSF)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
QWORD qwTSFOffset;
@@ -1143,7 +1143,7 @@ BOOL CARDbUpdateTSF (PVOID pDeviceHandler, BYTE byRxRate, QWORD qwBSSTimestamp,
* Return Value: TRUE if succeed; otherwise FALSE
*
*/
-BOOL CARDbSetBeaconPeriod (PVOID pDeviceHandler, WORD wBeaconInterval)
+BOOL CARDbSetBeaconPeriod (void *pDeviceHandler, WORD wBeaconInterval)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
UINT uBeaconInterval = 0;
@@ -1197,7 +1197,7 @@ BOOL CARDbSetBeaconPeriod (PVOID pDeviceHandler, WORD wBeaconInterval)
* Return Value: TRUE if all data packet complete; otherwise FALSE.
*
*/
-BOOL CARDbStopTxPacket (PVOID pDeviceHandler, CARD_PKT_TYPE ePktType)
+BOOL CARDbStopTxPacket (void *pDeviceHandler, CARD_PKT_TYPE ePktType)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -1255,7 +1255,7 @@ BOOL CARDbStopTxPacket (PVOID pDeviceHandler, CARD_PKT_TYPE ePktType)
* Return Value: TRUE if success; FALSE if failed.
*
*/
-BOOL CARDbStartTxPacket (PVOID pDeviceHandler, CARD_PKT_TYPE ePktType)
+BOOL CARDbStartTxPacket (void *pDeviceHandler, CARD_PKT_TYPE ePktType)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -1297,7 +1297,7 @@ BOOL CARDbStartTxPacket (PVOID pDeviceHandler, CARD_PKT_TYPE ePktType)
* Return Value: TRUE if success; FALSE if failed.
*
*/
-BOOL CARDbSetBSSID(PVOID pDeviceHandler, PBYTE pbyBSSID, CARD_OP_MODE eOPMode)
+BOOL CARDbSetBSSID(void *pDeviceHandler, PBYTE pbyBSSID, CARD_OP_MODE eOPMode)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -1367,7 +1367,7 @@ BOOL CARDbSetBSSID(PVOID pDeviceHandler, PBYTE pbyBSSID, CARD_OP_MODE eOPMode)
*
*/
BOOL CARDbSetTxDataRate(
- PVOID pDeviceHandler,
+ void *pDeviceHandler,
WORD wDataRate
)
{
@@ -1393,7 +1393,7 @@ BOOL CARDbSetTxDataRate(
-*/
BOOL
CARDbPowerDown(
- PVOID pDeviceHandler
+ void *pDeviceHandler
)
{
PSDevice pDevice = (PSDevice)pDeviceHandler;
@@ -1430,7 +1430,7 @@ CARDbPowerDown(
* Return Value: TRUE if success; otherwise FALSE
*
*/
-BOOL CARDbRadioPowerOff (PVOID pDeviceHandler)
+BOOL CARDbRadioPowerOff (void *pDeviceHandler)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
BOOL bResult = TRUE;
@@ -1479,7 +1479,7 @@ MACvRegBitsOn(pDevice->PortOffset, MAC_REG_GPIOCTL0, LED_ACTSET); //LED issue
* Return Value: TRUE if success; otherwise FALSE
*
*/
-BOOL CARDbRadioPowerOn (PVOID pDeviceHandler)
+BOOL CARDbRadioPowerOn (void *pDeviceHandler)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
BOOL bResult = TRUE;
@@ -1523,7 +1523,7 @@ MACvRegBitsOff(pDevice->PortOffset, MAC_REG_GPIOCTL0, LED_ACTSET); //LED issue
-BOOL CARDbRemoveKey (PVOID pDeviceHandler, PBYTE pbyBSSID)
+BOOL CARDbRemoveKey (void *pDeviceHandler, PBYTE pbyBSSID)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -1550,10 +1550,10 @@ BOOL CARDbRemoveKey (PVOID pDeviceHandler, PBYTE pbyBSSID)
-*/
BOOL
CARDbAdd_PMKID_Candidate (
- IN PVOID pDeviceHandler,
- IN PBYTE pbyBSSID,
- IN BOOL bRSNCapExist,
- IN WORD wRSNCap
+ void *pDeviceHandler,
+ PBYTE pbyBSSID,
+ BOOL bRSNCapExist,
+ WORD wRSNCap
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -1576,7 +1576,7 @@ CARDbAdd_PMKID_Candidate (
// Update Old Candidate
for (ii = 0; ii < pDevice->gsPMKIDCandidate.NumCandidates; ii++) {
pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[ii];
- if ( !memcmp(pCandidateList->BSSID, pbyBSSID, U_ETHER_ADDR_LEN)) {
+ if ( !memcmp(pCandidateList->BSSID, pbyBSSID, ETH_ALEN)) {
if ((bRSNCapExist == TRUE) && (wRSNCap & BIT0)) {
pCandidateList->Flags |= NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED;
} else {
@@ -1593,15 +1593,15 @@ CARDbAdd_PMKID_Candidate (
} else {
pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED);
}
- memcpy(pCandidateList->BSSID, pbyBSSID, U_ETHER_ADDR_LEN);
+ memcpy(pCandidateList->BSSID, pbyBSSID, ETH_ALEN);
pDevice->gsPMKIDCandidate.NumCandidates++;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"NumCandidates:%d\n", (int)pDevice->gsPMKIDCandidate.NumCandidates);
return TRUE;
}
-PVOID
+void *
CARDpGetCurrentAddress (
- IN PVOID pDeviceHandler
+ void *pDeviceHandler
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -1611,7 +1611,7 @@ CARDpGetCurrentAddress (
-VOID CARDvInitChannelTable (PVOID pDeviceHandler)
+void CARDvInitChannelTable (void *pDeviceHandler)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
BOOL bMultiBand = FALSE;
@@ -1708,9 +1708,9 @@ VOID CARDvInitChannelTable (PVOID pDeviceHandler)
-*/
BOOL
CARDbStartMeasure (
- IN PVOID pDeviceHandler,
- IN PVOID pvMeasureEIDs,
- IN UINT uNumOfMeasureEIDs
+ void *pDeviceHandler,
+ void *pvMeasureEIDs,
+ UINT uNumOfMeasureEIDs
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -1835,10 +1835,10 @@ CARDbStartMeasure (
-*/
BOOL
CARDbChannelSwitch (
- IN PVOID pDeviceHandler,
- IN BYTE byMode,
- IN BYTE byNewChannel,
- IN BYTE byCount
+ void *pDeviceHandler,
+ BYTE byMode,
+ BYTE byNewChannel,
+ BYTE byCount
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -1878,12 +1878,12 @@ CARDbChannelSwitch (
-*/
BOOL
CARDbSetQuiet (
- IN PVOID pDeviceHandler,
- IN BOOL bResetQuiet,
- IN BYTE byQuietCount,
- IN BYTE byQuietPeriod,
- IN WORD wQuietDuration,
- IN WORD wQuietOffset
+ void *pDeviceHandler,
+ BOOL bResetQuiet,
+ BYTE byQuietCount,
+ BYTE byQuietPeriod,
+ WORD wQuietDuration,
+ WORD wQuietOffset
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -1934,7 +1934,7 @@ CARDbSetQuiet (
-*/
BOOL
CARDbStartQuiet (
- IN PVOID pDeviceHandler
+ void *pDeviceHandler
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -2033,11 +2033,11 @@ CARDbStartQuiet (
* Return Value: none.
*
-*/
-VOID
+void
CARDvSetCountryInfo (
- IN PVOID pDeviceHandler,
- IN CARD_PHY_TYPE ePHYType,
- IN PVOID pIE
+ void *pDeviceHandler,
+ CARD_PHY_TYPE ePHYType,
+ void *pIE
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -2092,11 +2092,11 @@ CARDvSetCountryInfo (
* Return Value: none.
*
-*/
-VOID
+void
CARDvSetPowerConstraint (
- IN PVOID pDeviceHandler,
- IN BYTE byChannel,
- IN I8 byPower
+ void *pDeviceHandler,
+ BYTE byChannel,
+ I8 byPower
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -2127,11 +2127,11 @@ CARDvSetPowerConstraint (
* Return Value: none.
*
-*/
-VOID
+void
CARDvGetPowerCapability (
- IN PVOID pDeviceHandler,
- OUT PBYTE pbyMinPower,
- OUT PBYTE pbyMaxPower
+ void *pDeviceHandler,
+ PBYTE pbyMinPower,
+ PBYTE pbyMaxPower
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -2165,8 +2165,8 @@ CARDvGetPowerCapability (
-*/
BYTE
CARDbySetSupportChannels (
- IN PVOID pDeviceHandler,
- IN OUT PBYTE pbyIEs
+ void *pDeviceHandler,
+ PBYTE pbyIEs
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -2256,7 +2256,7 @@ CARDbySetSupportChannels (
-*/
I8
CARDbyGetTransmitPower (
- IN PVOID pDeviceHandler
+ void *pDeviceHandler
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -2267,8 +2267,8 @@ CARDbyGetTransmitPower (
BOOL
CARDbChannelGetList (
- IN UINT uCountryCodeIdx,
- OUT PBYTE pbyChannelTable
+ UINT uCountryCodeIdx,
+ PBYTE pbyChannelTable
)
{
if (uCountryCodeIdx >= CCODE_MAX) {
@@ -2279,10 +2279,10 @@ CARDbChannelGetList (
}
-VOID
+void
CARDvSetCountryIE(
- IN PVOID pDeviceHandler,
- IN PVOID pIE
+ void *pDeviceHandler,
+ void *pIE
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -2307,10 +2307,10 @@ CARDvSetCountryIE(
BOOL
CARDbGetChannelMapInfo(
- IN PVOID pDeviceHandler,
- IN UINT uChannelIndex,
- OUT PBYTE pbyChannelNumber,
- OUT PBYTE pbyMap
+ void *pDeviceHandler,
+ UINT uChannelIndex,
+ PBYTE pbyChannelNumber,
+ PBYTE pbyMap
)
{
// PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -2324,11 +2324,11 @@ CARDbGetChannelMapInfo(
}
-VOID
+void
CARDvSetChannelMapInfo(
- IN PVOID pDeviceHandler,
- IN UINT uChannelIndex,
- IN BYTE byMap
+ void *pDeviceHandler,
+ UINT uChannelIndex,
+ BYTE byMap
)
{
// PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -2340,9 +2340,9 @@ CARDvSetChannelMapInfo(
}
-VOID
+void
CARDvClearChannelMapInfo(
- IN PVOID pDeviceHandler
+ void *pDeviceHandler
)
{
// PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -2356,7 +2356,7 @@ CARDvClearChannelMapInfo(
BYTE
CARDbyAutoChannelSelect(
- IN PVOID pDeviceHandler,
+ void *pDeviceHandler,
CARD_PHY_TYPE ePHYType
)
{
@@ -2420,9 +2420,9 @@ CARDbyAutoChannelSelect(
//xxx
-VOID
+void
CARDvSafeResetTx (
- IN PVOID pDeviceHandler
+ void *pDeviceHandler
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -2476,9 +2476,9 @@ CARDvSafeResetTx (
* Return Value: none
*
-*/
-VOID
+void
CARDvSafeResetRx (
- IN PVOID pDeviceHandler
+ void *pDeviceHandler
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -2537,7 +2537,7 @@ CARDvSafeResetRx (
* Return Value: response Control frame rate
*
*/
-WORD CARDwGetCCKControlRate(PVOID pDeviceHandler, WORD wRateIdx)
+WORD CARDwGetCCKControlRate(void *pDeviceHandler, WORD wRateIdx)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
UINT ui = (UINT)wRateIdx;
@@ -2564,14 +2564,14 @@ WORD CARDwGetCCKControlRate(PVOID pDeviceHandler, WORD wRateIdx)
* Return Value: response Control frame rate
*
*/
-WORD CARDwGetOFDMControlRate (PVOID pDeviceHandler, WORD wRateIdx)
+WORD CARDwGetOFDMControlRate (void *pDeviceHandler, WORD wRateIdx)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
UINT ui = (UINT)wRateIdx;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BASIC RATE: %X\n", pDevice->wBasicRate);
- if (!CARDbIsOFDMinBasicRate((PVOID)pDevice)) {
+ if (!CARDbIsOFDMinBasicRate((void *)pDevice)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"CARDwGetOFDMControlRate:(NO OFDM) %d\n", wRateIdx);
if (wRateIdx > RATE_24M)
wRateIdx = RATE_24M;
@@ -2601,7 +2601,7 @@ WORD CARDwGetOFDMControlRate (PVOID pDeviceHandler, WORD wRateIdx)
* Return Value: None.
*
*/
-void CARDvSetRSPINF (PVOID pDeviceHandler, CARD_PHY_TYPE ePHYType)
+void CARDvSetRSPINF (void *pDeviceHandler, CARD_PHY_TYPE ePHYType)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
BYTE byServ = 0x00, bySignal = 0x00; //For CCK
@@ -2614,7 +2614,7 @@ void CARDvSetRSPINF (PVOID pDeviceHandler, CARD_PHY_TYPE ePHYType)
//RSPINF_b_1
BBvCaculateParameter(pDevice,
14,
- CARDwGetCCKControlRate((PVOID)pDevice, RATE_1M),
+ CARDwGetCCKControlRate((void *)pDevice, RATE_1M),
PK_TYPE_11B,
&wLen,
&byServ,
@@ -2625,7 +2625,7 @@ void CARDvSetRSPINF (PVOID pDeviceHandler, CARD_PHY_TYPE ePHYType)
///RSPINF_b_2
BBvCaculateParameter(pDevice,
14,
- CARDwGetCCKControlRate((PVOID)pDevice, RATE_2M),
+ CARDwGetCCKControlRate((void *)pDevice, RATE_2M),
PK_TYPE_11B,
&wLen,
&byServ,
@@ -2636,7 +2636,7 @@ void CARDvSetRSPINF (PVOID pDeviceHandler, CARD_PHY_TYPE ePHYType)
//RSPINF_b_5
BBvCaculateParameter(pDevice,
14,
- CARDwGetCCKControlRate((PVOID)pDevice, RATE_5M),
+ CARDwGetCCKControlRate((void *)pDevice, RATE_5M),
PK_TYPE_11B,
&wLen,
&byServ,
@@ -2647,7 +2647,7 @@ void CARDvSetRSPINF (PVOID pDeviceHandler, CARD_PHY_TYPE ePHYType)
//RSPINF_b_11
BBvCaculateParameter(pDevice,
14,
- CARDwGetCCKControlRate((PVOID)pDevice, RATE_11M),
+ CARDwGetCCKControlRate((void *)pDevice, RATE_11M),
PK_TYPE_11B,
&wLen,
&byServ,
@@ -2686,26 +2686,26 @@ void CARDvSetRSPINF (PVOID pDeviceHandler, CARD_PHY_TYPE ePHYType)
&byRsvTime);
VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_24, MAKEWORD(byTxRate,byRsvTime));
//RSPINF_a_36
- s_vCaculateOFDMRParameter(CARDwGetOFDMControlRate((PVOID)pDevice, RATE_36M),
+ s_vCaculateOFDMRParameter(CARDwGetOFDMControlRate((void *)pDevice, RATE_36M),
ePHYType,
&byTxRate,
&byRsvTime);
VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_36, MAKEWORD(byTxRate,byRsvTime));
//RSPINF_a_48
- s_vCaculateOFDMRParameter(CARDwGetOFDMControlRate((PVOID)pDevice, RATE_48M),
+ s_vCaculateOFDMRParameter(CARDwGetOFDMControlRate((void *)pDevice, RATE_48M),
ePHYType,
&byTxRate,
&byRsvTime);
VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_48, MAKEWORD(byTxRate,byRsvTime));
//RSPINF_a_54
- s_vCaculateOFDMRParameter(CARDwGetOFDMControlRate((PVOID)pDevice, RATE_54M),
+ s_vCaculateOFDMRParameter(CARDwGetOFDMControlRate((void *)pDevice, RATE_54M),
ePHYType,
&byTxRate,
&byRsvTime);
VNSvOutPortW(pDevice->PortOffset + MAC_REG_RSPINF_A_54, MAKEWORD(byTxRate,byRsvTime));
//RSPINF_a_72
- s_vCaculateOFDMRParameter(CARDwGetOFDMControlRate((PVOID)pDevice, RATE_54M),
+ s_vCaculateOFDMRParameter(CARDwGetOFDMControlRate((void *)pDevice, RATE_54M),
ePHYType,
&byTxRate,
&byRsvTime);
@@ -2726,7 +2726,7 @@ void CARDvSetRSPINF (PVOID pDeviceHandler, CARD_PHY_TYPE ePHYType)
* Return Value: None.
*
*/
-void vUpdateIFS (PVOID pDeviceHandler)
+void vUpdateIFS (void *pDeviceHandler)
{
//Set SIFS, DIFS, EIFS, SlotTime, CwMin
PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -2780,7 +2780,7 @@ void vUpdateIFS (PVOID pDeviceHandler)
VNSvOutPortB(pDevice->PortOffset + MAC_REG_CWMAXMIN0, (BYTE)byMaxMin);
}
-void CARDvUpdateBasicTopRate (PVOID pDeviceHandler)
+void CARDvUpdateBasicTopRate (void *pDeviceHandler)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
BYTE byTopOFDM = RATE_24M, byTopCCK = RATE_1M;
@@ -2820,7 +2820,7 @@ void CARDvUpdateBasicTopRate (PVOID pDeviceHandler)
* Return Value: TRUE if succeeded; FALSE if failed.
*
*/
-BOOL CARDbAddBasicRate (PVOID pDeviceHandler, WORD wRateIdx)
+BOOL CARDbAddBasicRate (void *pDeviceHandler, WORD wRateIdx)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
WORD wRate = (WORD)(1<<wRateIdx);
@@ -2828,12 +2828,12 @@ BOOL CARDbAddBasicRate (PVOID pDeviceHandler, WORD wRateIdx)
pDevice->wBasicRate |= wRate;
//Determines the highest basic rate.
- CARDvUpdateBasicTopRate((PVOID)pDevice);
+ CARDvUpdateBasicTopRate((void *)pDevice);
return(TRUE);
}
-BOOL CARDbIsOFDMinBasicRate (PVOID pDeviceHandler)
+BOOL CARDbIsOFDMinBasicRate (void *pDeviceHandler)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
int ii;
@@ -2845,14 +2845,14 @@ BOOL CARDbIsOFDMinBasicRate (PVOID pDeviceHandler)
return FALSE;
}
-BYTE CARDbyGetPktType (PVOID pDeviceHandler)
+BYTE CARDbyGetPktType (void *pDeviceHandler)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
if (pDevice->byBBType == BB_TYPE_11A || pDevice->byBBType == BB_TYPE_11B) {
return (BYTE)pDevice->byBBType;
}
- else if (CARDbIsOFDMinBasicRate((PVOID)pDevice)) {
+ else if (CARDbIsOFDMinBasicRate((void *)pDevice)) {
return PK_TYPE_11GA;
}
else {
@@ -2902,7 +2902,7 @@ void CARDvSetLoopbackMode (DWORD_PTR dwIoBase, WORD wLoopbackMode)
* Return Value: none
*
*/
-BOOL CARDbSoftwareReset (PVOID pDeviceHandler)
+BOOL CARDbSoftwareReset (void *pDeviceHandler)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index 264b844cf05..76313462cf7 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -87,168 +87,168 @@ typedef enum _CARD_OP_MODE {
/*--------------------- Export Functions --------------------------*/
BOOL ChannelValid(UINT CountryCode, UINT ChannelIndex);
-void CARDvSetRSPINF(PVOID pDeviceHandler, CARD_PHY_TYPE ePHYType);
-void vUpdateIFS(PVOID pDeviceHandler);
-void CARDvUpdateBasicTopRate(PVOID pDeviceHandler);
-BOOL CARDbAddBasicRate(PVOID pDeviceHandler, WORD wRateIdx);
-BOOL CARDbIsOFDMinBasicRate(PVOID pDeviceHandler);
+void CARDvSetRSPINF(void *pDeviceHandler, CARD_PHY_TYPE ePHYType);
+void vUpdateIFS(void *pDeviceHandler);
+void CARDvUpdateBasicTopRate(void *pDeviceHandler);
+BOOL CARDbAddBasicRate(void *pDeviceHandler, WORD wRateIdx);
+BOOL CARDbIsOFDMinBasicRate(void *pDeviceHandler);
void CARDvSetLoopbackMode(DWORD_PTR dwIoBase, WORD wLoopbackMode);
-BOOL CARDbSoftwareReset(PVOID pDeviceHandler);
+BOOL CARDbSoftwareReset(void *pDeviceHandler);
void CARDvSetFirstNextTBTT(DWORD_PTR dwIoBase, WORD wBeaconInterval);
void CARDvUpdateNextTBTT(DWORD_PTR dwIoBase, QWORD qwTSF, WORD wBeaconInterval);
BOOL CARDbGetCurrentTSF(DWORD_PTR dwIoBase, PQWORD pqwCurrTSF);
QWORD CARDqGetNextTBTT(QWORD qwTSF, WORD wBeaconInterval);
QWORD CARDqGetTSFOffset(BYTE byRxRate, QWORD qwTSF1, QWORD qwTSF2);
-BOOL CARDbSetTxPower(PVOID pDeviceHandler, ULONG ulTxPower);
-BYTE CARDbyGetPktType(PVOID pDeviceHandler);
-VOID CARDvSafeResetTx(PVOID pDeviceHandler);
-VOID CARDvSafeResetRx(PVOID pDeviceHandler);
+BOOL CARDbSetTxPower(void *pDeviceHandler, ULONG ulTxPower);
+BYTE CARDbyGetPktType(void *pDeviceHandler);
+void CARDvSafeResetTx(void *pDeviceHandler);
+void CARDvSafeResetRx(void *pDeviceHandler);
//xxx
-BOOL CARDbRadioPowerOff(PVOID pDeviceHandler);
-BOOL CARDbRadioPowerOn(PVOID pDeviceHandler);
-BOOL CARDbSetChannel(PVOID pDeviceHandler, UINT uConnectionChannel);
-//BOOL CARDbSendPacket(PVOID pDeviceHandler, PVOID pPacket, CARD_PKT_TYPE ePktType, UINT uLength);
-BOOL CARDbIsShortPreamble(PVOID pDeviceHandler);
-BOOL CARDbIsShorSlotTime(PVOID pDeviceHandler);
-BOOL CARDbSetPhyParameter(PVOID pDeviceHandler, CARD_PHY_TYPE ePHYType, WORD wCapInfo, BYTE byERPField, PVOID pvSupportRateIEs, PVOID pvExtSupportRateIEs);
-BOOL CARDbUpdateTSF(PVOID pDeviceHandler, BYTE byRxRate, QWORD qwBSSTimestamp, QWORD qwLocalTSF);
-BOOL CARDbStopTxPacket(PVOID pDeviceHandler, CARD_PKT_TYPE ePktType);
-BOOL CARDbStartTxPacket(PVOID pDeviceHandler, CARD_PKT_TYPE ePktType);
-BOOL CARDbSetBeaconPeriod(PVOID pDeviceHandler, WORD wBeaconInterval);
-BOOL CARDbSetBSSID(PVOID pDeviceHandler, PBYTE pbyBSSID, CARD_OP_MODE eOPMode);
+BOOL CARDbRadioPowerOff(void *pDeviceHandler);
+BOOL CARDbRadioPowerOn(void *pDeviceHandler);
+BOOL CARDbSetChannel(void *pDeviceHandler, UINT uConnectionChannel);
+//BOOL CARDbSendPacket(void *pDeviceHandler, void *pPacket, CARD_PKT_TYPE ePktType, UINT uLength);
+BOOL CARDbIsShortPreamble(void *pDeviceHandler);
+BOOL CARDbIsShorSlotTime(void *pDeviceHandler);
+BOOL CARDbSetPhyParameter(void *pDeviceHandler, CARD_PHY_TYPE ePHYType, WORD wCapInfo, BYTE byERPField, void *pvSupportRateIEs, void *pvExtSupportRateIEs);
+BOOL CARDbUpdateTSF(void *pDeviceHandler, BYTE byRxRate, QWORD qwBSSTimestamp, QWORD qwLocalTSF);
+BOOL CARDbStopTxPacket(void *pDeviceHandler, CARD_PKT_TYPE ePktType);
+BOOL CARDbStartTxPacket(void *pDeviceHandler, CARD_PKT_TYPE ePktType);
+BOOL CARDbSetBeaconPeriod(void *pDeviceHandler, WORD wBeaconInterval);
+BOOL CARDbSetBSSID(void *pDeviceHandler, PBYTE pbyBSSID, CARD_OP_MODE eOPMode);
BOOL
CARDbPowerDown(
- PVOID pDeviceHandler
+ void *pDeviceHandler
);
BOOL CARDbSetTxDataRate(
- PVOID pDeviceHandler,
+ void *pDeviceHandler,
WORD wDataRate
);
-BOOL CARDbRemoveKey (PVOID pDeviceHandler, PBYTE pbyBSSID);
+BOOL CARDbRemoveKey (void *pDeviceHandler, PBYTE pbyBSSID);
BOOL
CARDbAdd_PMKID_Candidate (
- IN PVOID pDeviceHandler,
- IN PBYTE pbyBSSID,
- IN BOOL bRSNCapExist,
- IN WORD wRSNCap
+ void *pDeviceHandler,
+ PBYTE pbyBSSID,
+ BOOL bRSNCapExist,
+ WORD wRSNCap
);
-PVOID
+void *
CARDpGetCurrentAddress (
- IN PVOID pDeviceHandler
+ void *pDeviceHandler
);
-VOID CARDvInitChannelTable(PVOID pDeviceHandler);
-BYTE CARDbyGetChannelMapping(PVOID pDeviceHandler, BYTE byChannelNumber, CARD_PHY_TYPE ePhyType);
+void CARDvInitChannelTable(void *pDeviceHandler);
+BYTE CARDbyGetChannelMapping(void *pDeviceHandler, BYTE byChannelNumber, CARD_PHY_TYPE ePhyType);
BOOL
CARDbStartMeasure (
- IN PVOID pDeviceHandler,
- IN PVOID pvMeasureEIDs,
- IN UINT uNumOfMeasureEIDs
+ void *pDeviceHandler,
+ void *pvMeasureEIDs,
+ UINT uNumOfMeasureEIDs
);
BOOL
CARDbChannelSwitch (
- IN PVOID pDeviceHandler,
- IN BYTE byMode,
- IN BYTE byNewChannel,
- IN BYTE byCount
+ void *pDeviceHandler,
+ BYTE byMode,
+ BYTE byNewChannel,
+ BYTE byCount
);
BOOL
CARDbSetQuiet (
- IN PVOID pDeviceHandler,
- IN BOOL bResetQuiet,
- IN BYTE byQuietCount,
- IN BYTE byQuietPeriod,
- IN WORD wQuietDuration,
- IN WORD wQuietOffset
+ void *pDeviceHandler,
+ BOOL bResetQuiet,
+ BYTE byQuietCount,
+ BYTE byQuietPeriod,
+ WORD wQuietDuration,
+ WORD wQuietOffset
);
BOOL
CARDbStartQuiet (
- IN PVOID pDeviceHandler
+ void *pDeviceHandler
);
-VOID
+void
CARDvSetCountryInfo (
- IN PVOID pDeviceHandler,
- IN CARD_PHY_TYPE ePHYType,
- IN PVOID pIE
+ void *pDeviceHandler,
+ CARD_PHY_TYPE ePHYType,
+ void *pIE
);
-VOID
+void
CARDvSetPowerConstraint (
- IN PVOID pDeviceHandler,
- IN BYTE byChannel,
- IN I8 byPower
+ void *pDeviceHandler,
+ BYTE byChannel,
+ I8 byPower
);
-VOID
+void
CARDvGetPowerCapability (
- IN PVOID pDeviceHandler,
- OUT PBYTE pbyMinPower,
- OUT PBYTE pbyMaxPower
+ void *pDeviceHandler,
+ PBYTE pbyMinPower,
+ PBYTE pbyMaxPower
);
BYTE
CARDbySetSupportChannels (
- IN PVOID pDeviceHandler,
- IN OUT PBYTE pbyIEs
+ void *pDeviceHandler,
+ PBYTE pbyIEs
);
I8
CARDbyGetTransmitPower (
- IN PVOID pDeviceHandler
+ void *pDeviceHandler
);
BOOL
CARDbChannelGetList (
- IN UINT uCountryCodeIdx,
- OUT PBYTE pbyChannelTable
+ UINT uCountryCodeIdx,
+ PBYTE pbyChannelTable
);
-VOID
+void
CARDvSetCountryIE(
- IN PVOID pDeviceHandler,
- IN PVOID pIE
+ void *pDeviceHandler,
+ void *pIE
);
BOOL
CARDbGetChannelMapInfo(
- IN PVOID pDeviceHandler,
- IN UINT uChannelIndex,
- OUT PBYTE pbyChannelNumber,
- OUT PBYTE pbyMap
+ void *pDeviceHandler,
+ UINT uChannelIndex,
+ PBYTE pbyChannelNumber,
+ PBYTE pbyMap
);
-VOID
+void
CARDvSetChannelMapInfo(
- IN PVOID pDeviceHandler,
- IN UINT uChannelIndex,
- IN BYTE byMap
+ void *pDeviceHandler,
+ UINT uChannelIndex,
+ BYTE byMap
);
-VOID
+void
CARDvClearChannelMapInfo(
- IN PVOID pDeviceHandler
+ void *pDeviceHandler
);
BYTE
CARDbyAutoChannelSelect(
- IN PVOID pDeviceHandler,
+ void *pDeviceHandler,
CARD_PHY_TYPE ePHYType
);
-BYTE CARDbyGetChannelNumber(PVOID pDeviceHandler, BYTE byChannelIndex);
+BYTE CARDbyGetChannelNumber(void *pDeviceHandler, BYTE byChannelIndex);
#endif // __CARD_H__
diff --git a/drivers/staging/vt6655/datarate.c b/drivers/staging/vt6655/datarate.c
index 10da57f2844..38b09a7fb53 100644
--- a/drivers/staging/vt6655/datarate.c
+++ b/drivers/staging/vt6655/datarate.c
@@ -64,15 +64,15 @@ const BYTE acbyIERate[MAX_RATE] =
/*--------------------- Static Functions --------------------------*/
-VOID s_vResetCounter (
- IN PKnownNodeDB psNodeDBTable
+void s_vResetCounter (
+ PKnownNodeDB psNodeDBTable
);
-VOID
+void
s_vResetCounter (
- IN PKnownNodeDB psNodeDBTable
+ PKnownNodeDB psNodeDBTable
)
{
BYTE ii;
@@ -106,7 +106,7 @@ s_vResetCounter (
-*/
BYTE
DATARATEbyGetRateIdx (
- IN BYTE byRate
+ BYTE byRate
)
{
BYTE ii;
@@ -160,7 +160,7 @@ DATARATEbyGetRateIdx (
-*/
WORD
wGetRateIdx(
- IN BYTE byRate
+ BYTE byRate
)
{
WORD ii;
@@ -194,17 +194,17 @@ wGetRateIdx(
* Return Value: none
*
-*/
-VOID
+void
RATEvParseMaxRate (
- IN PVOID pDeviceHandler,
- IN PWLAN_IE_SUPP_RATES pItemRates,
- IN PWLAN_IE_SUPP_RATES pItemExtRates,
- IN BOOL bUpdateBasicRate,
- OUT PWORD pwMaxBasicRate,
- OUT PWORD pwMaxSuppRate,
- OUT PWORD pwSuppRate,
- OUT PBYTE pbyTopCCKRate,
- OUT PBYTE pbyTopOFDMRate
+ void *pDeviceHandler,
+ PWLAN_IE_SUPP_RATES pItemRates,
+ PWLAN_IE_SUPP_RATES pItemExtRates,
+ BOOL bUpdateBasicRate,
+ PWORD pwMaxBasicRate,
+ PWORD pwMaxSuppRate,
+ PWORD pwSuppRate,
+ PBYTE pbyTopCCKRate,
+ PBYTE pbyTopOFDMRate
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -235,7 +235,7 @@ UINT uRateLen;
if (WLAN_MGMT_IS_BASICRATE(byRate) &&
(bUpdateBasicRate == TRUE)) {
// Add to basic rate set, update pDevice->byTopCCKBasicRate and pDevice->byTopOFDMBasicRate
- CARDbAddBasicRate((PVOID)pDevice, wGetRateIdx(byRate));
+ CARDbAddBasicRate((void *)pDevice, wGetRateIdx(byRate));
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ParseMaxRate AddBasicRate: %d\n", wGetRateIdx(byRate));
}
byRate = (BYTE)(pItemRates->abyRates[ii]&0x7F);
@@ -258,7 +258,7 @@ UINT uRateLen;
// select highest basic rate
if (WLAN_MGMT_IS_BASICRATE(pItemExtRates->abyRates[ii])) {
// Add to basic rate set, update pDevice->byTopCCKBasicRate and pDevice->byTopOFDMBasicRate
- CARDbAddBasicRate((PVOID)pDevice, wGetRateIdx(byRate));
+ CARDbAddBasicRate((void *)pDevice, wGetRateIdx(byRate));
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ParseMaxRate AddBasicRate: %d\n", wGetRateIdx(byRate));
}
byRate = (BYTE)(pItemExtRates->abyRates[ii]&0x7F);
@@ -271,7 +271,7 @@ UINT uRateLen;
}
} //if(pItemExtRates != NULL)
- if ((pDevice->byPacketType == PK_TYPE_11GB) && CARDbIsOFDMinBasicRate((PVOID)pDevice)) {
+ if ((pDevice->byPacketType == PK_TYPE_11GB) && CARDbIsOFDMinBasicRate((void *)pDevice)) {
pDevice->byPacketType = PK_TYPE_11GA;
}
@@ -283,7 +283,7 @@ UINT uRateLen;
else
*pwMaxBasicRate = pDevice->byTopOFDMBasicRate;
if (wOldBasicRate != pDevice->wBasicRate)
- CARDvSetRSPINF((PVOID)pDevice, pDevice->eCurrentPHYType);
+ CARDvSetRSPINF((void *)pDevice, pDevice->eCurrentPHYType);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Exit ParseMaxRate\n");
}
@@ -307,10 +307,10 @@ UINT uRateLen;
#define AUTORATE_TXCNT_THRESHOLD 20
#define AUTORATE_INC_THRESHOLD 30
-VOID
+void
RATEvTxRateFallBack (
- IN PVOID pDeviceHandler,
- IN PKnownNodeDB psNodeDBTable
+ void *pDeviceHandler,
+ PKnownNodeDB psNodeDBTable
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -411,9 +411,9 @@ TxRate_iwconfig=psNodeDBTable->wTxDataRate;
-*/
BYTE
RATEuSetIE (
- IN PWLAN_IE_SUPP_RATES pSrcRates,
- IN PWLAN_IE_SUPP_RATES pDstRates,
- IN UINT uRateLen
+ PWLAN_IE_SUPP_RATES pSrcRates,
+ PWLAN_IE_SUPP_RATES pDstRates,
+ UINT uRateLen
)
{
UINT ii, uu, uRateCnt = 0;
diff --git a/drivers/staging/vt6655/datarate.h b/drivers/staging/vt6655/datarate.h
index 5096f3df499..b8ca792e9c6 100644
--- a/drivers/staging/vt6655/datarate.h
+++ b/drivers/staging/vt6655/datarate.h
@@ -54,41 +54,41 @@
-VOID
+void
RATEvParseMaxRate(
- IN PVOID pDeviceHandler,
- IN PWLAN_IE_SUPP_RATES pItemRates,
- IN PWLAN_IE_SUPP_RATES pItemExtRates,
- IN BOOL bUpdateBasicRate,
- OUT PWORD pwMaxBasicRate,
- OUT PWORD pwMaxSuppRate,
- OUT PWORD pwSuppRate,
- OUT PBYTE pbyTopCCKRate,
- OUT PBYTE pbyTopOFDMRate
+ void *pDeviceHandler,
+ PWLAN_IE_SUPP_RATES pItemRates,
+ PWLAN_IE_SUPP_RATES pItemExtRates,
+ BOOL bUpdateBasicRate,
+ PWORD pwMaxBasicRate,
+ PWORD pwMaxSuppRate,
+ PWORD pwSuppRate,
+ PBYTE pbyTopCCKRate,
+ PBYTE pbyTopOFDMRate
);
-VOID
+void
RATEvTxRateFallBack(
- IN PVOID pDeviceHandler,
- IN PKnownNodeDB psNodeDBTable
+ void *pDeviceHandler,
+ PKnownNodeDB psNodeDBTable
);
BYTE
RATEuSetIE(
- IN PWLAN_IE_SUPP_RATES pSrcRates,
- IN PWLAN_IE_SUPP_RATES pDstRates,
- IN UINT uRateLen
+ PWLAN_IE_SUPP_RATES pSrcRates,
+ PWLAN_IE_SUPP_RATES pDstRates,
+ UINT uRateLen
);
WORD
wGetRateIdx(
- IN BYTE byRate
+ BYTE byRate
);
BYTE
DATARATEbyGetRateIdx(
- IN BYTE byRate
+ BYTE byRate
);
diff --git a/drivers/staging/vt6655/desc.h b/drivers/staging/vt6655/desc.h
index b573ef77abe..cedb1e7df4f 100644
--- a/drivers/staging/vt6655/desc.h
+++ b/drivers/staging/vt6655/desc.h
@@ -232,7 +232,8 @@ typedef struct tagDEVICE_RD_INFO {
/*
static inline PDEVICE_RD_INFO alloc_rd_info(void) {
PDEVICE_RD_INFO ptr;
- if ((ptr = kmalloc(sizeof(DEVICE_RD_INFO), GFP_ATOMIC)) == NULL)
+ ptr = kmalloc(sizeof(DEVICE_RD_INFO), GFP_ATOMIC);
+ if (ptr == NULL)
return NULL;
else {
memset(ptr,0,sizeof(DEVICE_RD_INFO));
@@ -361,7 +362,8 @@ typedef struct tagDEVICE_TD_INFO{
/*
static inline PDEVICE_TD_INFO alloc_td_info(void) {
PDEVICE_TD_INFO ptr;
- if ((ptr = kmalloc(sizeof(DEVICE_TD_INFO),GFP_ATOMIC))==NULL)
+ ptr = kmalloc(sizeof(DEVICE_TD_INFO),GFP_ATOMIC);
+ if (ptr == NULL)
return NULL;
else {
memset(ptr,0,sizeof(DEVICE_TD_INFO));
@@ -444,8 +446,8 @@ typedef const SRrvTime_atim *PCSRrvTime_atim;
typedef struct tagSRTSData {
WORD wFrameControl;
WORD wDurationID;
- BYTE abyRA[U_ETHER_ADDR_LEN];
- BYTE abyTA[U_ETHER_ADDR_LEN];
+ BYTE abyRA[ETH_ALEN];
+ BYTE abyTA[ETH_ALEN];
}__attribute__ ((__packed__))
SRTSData, *PSRTSData;
typedef const SRTSData *PCSRTSData;
@@ -520,7 +522,7 @@ typedef const SRTS_a_FB *PCSRTS_a_FB;
typedef struct tagSCTSData {
WORD wFrameControl;
WORD wDurationID;
- BYTE abyRA[U_ETHER_ADDR_LEN];
+ BYTE abyRA[ETH_ALEN];
WORD wReserved;
}__attribute__ ((__packed__))
SCTSData, *PSCTSData;
diff --git a/drivers/staging/vt6655/device.h b/drivers/staging/vt6655/device.h
index cde44d21b75..40ee4e14237 100644
--- a/drivers/staging/vt6655/device.h
+++ b/drivers/staging/vt6655/device.h
@@ -103,7 +103,7 @@
#define MAC_MAX_CONTEXT_REG (256+128)
#define MAX_MULTICAST_ADDRESS_NUM 32
-#define MULTICAST_ADDRESS_LIST_SIZE (MAX_MULTICAST_ADDRESS_NUM * U_ETHER_ADDR_LEN)
+#define MULTICAST_ADDRESS_LIST_SIZE (MAX_MULTICAST_ADDRESS_NUM * ETH_ALEN)
//#define OP_MODE_INFRASTRUCTURE 0
@@ -304,7 +304,7 @@ typedef enum {
// The receive duplicate detection cache entry
typedef struct tagSCacheEntry{
WORD wFmSequence;
- BYTE abyAddr2[U_ETHER_ADDR_LEN];
+ BYTE abyAddr2[ETH_ALEN];
} SCacheEntry, *PSCacheEntry;
typedef struct tagSCache{
@@ -321,7 +321,7 @@ typedef struct tagSDeFragControlBlock
{
WORD wSequence;
WORD wFragNum;
- BYTE abyAddr2[U_ETHER_ADDR_LEN];
+ BYTE abyAddr2[ETH_ALEN];
UINT uLifetime;
struct sk_buff* skb;
PBYTE pbyRxBuffer;
@@ -484,7 +484,7 @@ typedef struct __device_info {
BYTE byOriginalZonetype;
BYTE abyMacContext[MAC_MAX_CONTEXT_REG];
BOOL bLinkPass; // link status: OK or fail
- BYTE abyCurrentNetAddr[U_ETHER_ADDR_LEN];
+ BYTE abyCurrentNetAddr[ETH_ALEN];
// Adapter statistics
SStatCounter scStatistic;
@@ -546,8 +546,8 @@ typedef struct __device_info {
BYTE byOpMode;
BOOL bBSSIDFilter;
WORD wMaxTransmitMSDULifetime;
- BYTE abyBSSID[U_ETHER_ADDR_LEN];
- BYTE abyDesireBSSID[U_ETHER_ADDR_LEN];
+ BYTE abyBSSID[ETH_ALEN];
+ BYTE abyDesireBSSID[ETH_ALEN];
WORD wCTSDuration; // update while speed change
WORD wACKDuration; // update while speed change
WORD wRTSTransmitLen; // update while speed change
@@ -753,9 +753,9 @@ typedef struct __device_info {
SEthernetHeader sTxEthHeader;
SEthernetHeader sRxEthHeader;
- BYTE abyBroadcastAddr[U_ETHER_ADDR_LEN];
- BYTE abySNAP_RFC1042[U_ETHER_ADDR_LEN];
- BYTE abySNAP_Bridgetunnel[U_ETHER_ADDR_LEN];
+ BYTE abyBroadcastAddr[ETH_ALEN];
+ BYTE abySNAP_RFC1042[ETH_ALEN];
+ BYTE abySNAP_Bridgetunnel[ETH_ALEN];
BYTE abyEEPROM[EEP_MAX_CONTEXT_SIZE]; //DWORD alignment
// Pre-Authentication & PMK cache
SPMKID gsPMKID;
@@ -828,7 +828,7 @@ typedef struct __device_info {
//PLICE_DEBUG->
- inline static VOID EnQueue (PSDevice pDevice,PSRxMgmtPacket pRxMgmtPacket)
+ inline static void EnQueue (PSDevice pDevice,PSRxMgmtPacket pRxMgmtPacket)
{
//printk("Enter EnQueue:tail is %d\n",pDevice->rxManeQueue.tail);
if ((pDevice->rxManeQueue.tail+1) % NUM == pDevice->rxManeQueue.head)
@@ -869,7 +869,7 @@ typedef struct __device_info {
}
}
-VOID InitRxManagementQueue(PSDevice pDevice);
+void InitRxManagementQueue(PSDevice pDevice);
@@ -898,7 +898,8 @@ inline static BOOL device_get_ip(PSDevice pInfo) {
static inline PDEVICE_RD_INFO alloc_rd_info(void) {
PDEVICE_RD_INFO ptr;
- if ((ptr = (PDEVICE_RD_INFO)kmalloc((int)sizeof(DEVICE_RD_INFO), (int)GFP_ATOMIC)) == NULL)
+ ptr = (PDEVICE_RD_INFO)kmalloc((int)sizeof(DEVICE_RD_INFO), (int)GFP_ATOMIC);
+ if (ptr == NULL)
return NULL;
else {
memset(ptr,0,sizeof(DEVICE_RD_INFO));
@@ -908,7 +909,8 @@ static inline PDEVICE_RD_INFO alloc_rd_info(void) {
static inline PDEVICE_TD_INFO alloc_td_info(void) {
PDEVICE_TD_INFO ptr;
- if ((ptr = (PDEVICE_TD_INFO)kmalloc((int)sizeof(DEVICE_TD_INFO), (int)GFP_ATOMIC))==NULL)
+ ptr = (PDEVICE_TD_INFO)kmalloc((int)sizeof(DEVICE_TD_INFO), (int)GFP_ATOMIC);
+ if (ptr == NULL)
return NULL;
else {
memset(ptr,0,sizeof(DEVICE_TD_INFO));
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index e40a2e990f4..e49bb258b5c 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -429,14 +429,14 @@ pOpts->flags|=DEVICE_FLAGS_DiversityANT;
static void
device_set_options(PSDevice pDevice) {
- BYTE abyBroadcastAddr[U_ETHER_ADDR_LEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- BYTE abySNAP_RFC1042[U_ETHER_ADDR_LEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00};
- BYTE abySNAP_Bridgetunnel[U_ETHER_ADDR_LEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0xF8};
+ BYTE abyBroadcastAddr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ BYTE abySNAP_RFC1042[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00};
+ BYTE abySNAP_Bridgetunnel[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0xF8};
- memcpy(pDevice->abyBroadcastAddr, abyBroadcastAddr, U_ETHER_ADDR_LEN);
- memcpy(pDevice->abySNAP_RFC1042, abySNAP_RFC1042, U_ETHER_ADDR_LEN);
- memcpy(pDevice->abySNAP_Bridgetunnel, abySNAP_Bridgetunnel, U_ETHER_ADDR_LEN);
+ memcpy(pDevice->abyBroadcastAddr, abyBroadcastAddr, ETH_ALEN);
+ memcpy(pDevice->abySNAP_RFC1042, abySNAP_RFC1042, ETH_ALEN);
+ memcpy(pDevice->abySNAP_Bridgetunnel, abySNAP_Bridgetunnel, ETH_ALEN);
pDevice->uChannel = pDevice->sOpts.channel_num;
pDevice->wRTSThreshold = pDevice->sOpts.rts_thresh;
@@ -478,7 +478,7 @@ pDevice->bUpdateBBVGA = TRUE;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->bDiversityRegCtlON= %d\n",(INT)pDevice->bDiversityRegCtlON);
}
-static VOID s_vCompleteCurrentMeasure (IN PSDevice pDevice, IN BYTE byResult)
+static void s_vCompleteCurrentMeasure (PSDevice pDevice, BYTE byResult)
{
UINT ii;
DWORD dwDuration = 0;
@@ -638,7 +638,8 @@ byValue1 = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_ANTENNA);
//2008-8-4 <add> by chester
//zonetype initial
pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
- if((zonetype=Config_FileOperation(pDevice,FALSE,NULL)) >= 0) { //read zonetype file ok!
+ zonetype = Config_FileOperation(pDevice,FALSE,NULL);
+ if (zonetype >= 0) { //read zonetype file ok!
if ((zonetype == 0)&&
(pDevice->abyEEPROM[EEP_OFS_ZONETYPE] !=0x00)){ //for USA
pDevice->abyEEPROM[EEP_OFS_ZONETYPE] = 0;
@@ -728,7 +729,7 @@ else
pDevice->abyOFDMPwrTbl[ii+CB_MAX_CHANNEL_24G+1] = SROMbyReadEmbedded(pDevice->PortOffset, (BYTE)(ii + EEP_OFS_OFDMA_PWR_TBL));
pDevice->abyOFDMDefaultPwr[ii+CB_MAX_CHANNEL_24G+1] = SROMbyReadEmbedded(pDevice->PortOffset, (BYTE)(ii + EEP_OFS_OFDMA_PWR_dBm));
}
- CARDvInitChannelTable((PVOID)pDevice);
+ CARDvInitChannelTable((void *)pDevice);
if (pDevice->byLocalID > REV_ID_VT3253_B1) {
@@ -846,7 +847,7 @@ else CARDbRadioPowerOn(pDevice);
-static VOID device_init_diversity_timer(PSDevice pDevice) {
+static void device_init_diversity_timer(PSDevice pDevice) {
init_timer(&pDevice->TimerSQ3Tmax1);
pDevice->TimerSQ3Tmax1.data = (ULONG)pDevice;
@@ -1073,7 +1074,7 @@ device_found1(struct pci_dev *pcid, const struct pci_device_id *ent)
//Enable the chip specified capbilities
pDevice->flags = pDevice->sOpts.flags | (pChip_info->flags & 0xFF000000UL);
pDevice->tx_80211 = device_dma0_tx_80211;
- pDevice->sMgmtObj.pAdapter = (PVOID)pDevice;
+ pDevice->sMgmtObj.pAdapter = (void *)pDevice;
pDevice->pMgmt = &(pDevice->sMgmtObj);
dev->irq = pcid->irq;
@@ -1090,11 +1091,13 @@ device_found1(struct pci_dev *pcid, const struct pci_device_id *ent)
}
//2008-07-21-01<Add>by MikeLiu
//register wpadev
+#if 0
if(wpa_set_wpadev(pDevice, 1)!=0) {
printk("Fail to Register WPADEV?\n");
unregister_netdev(pDevice->dev);
free_netdev(dev);
}
+#endif
device_print_info(pDevice);
pci_set_drvdata(pcid, pDevice);
return 0;
@@ -1242,13 +1245,13 @@ device_release_WPADEV(pDevice);
}
#ifdef HOSTAP
if (dev)
- hostap_set_hostapd(pDevice, 0, 0);
+ vt6655_hostap_set_hostapd(pDevice, 0, 0);
#endif
if (dev)
unregister_netdev(dev);
if (pDevice->PortOffset)
- iounmap((PVOID)pDevice->PortOffset);
+ iounmap((void *)pDevice->PortOffset);
if (pDevice->pcid)
pci_release_regions(pDevice->pcid);
@@ -1460,7 +1463,7 @@ static void device_free_rd0_ring(PSDevice pDevice) {
dev_kfree_skb(pRDInfo->skb);
- kfree((PVOID)pDesc->pRDInfo);
+ kfree((void *)pDesc->pRDInfo);
}
}
@@ -1478,7 +1481,7 @@ static void device_free_rd1_ring(PSDevice pDevice) {
dev_kfree_skb(pRDInfo->skb);
- kfree((PVOID)pDesc->pRDInfo);
+ kfree((void *)pDesc->pRDInfo);
}
}
@@ -1563,7 +1566,7 @@ static void device_free_td0_ring(PSDevice pDevice) {
if (pTDInfo->skb)
dev_kfree_skb(pTDInfo->skb);
- kfree((PVOID)pDesc->pTDInfo);
+ kfree((void *)pDesc->pTDInfo);
}
}
@@ -1581,7 +1584,7 @@ static void device_free_td1_ring(PSDevice pDevice) {
if (pTDInfo->skb)
dev_kfree_skb(pTDInfo->skb);
- kfree((PVOID)pDesc->pTDInfo);
+ kfree((void *)pDesc->pTDInfo);
}
}
@@ -1829,7 +1832,7 @@ static void device_free_tx_buf(PSDevice pDevice, PSTxDesc pDesc) {
//PLICE_DEBUG ->
-VOID InitRxManagementQueue(PSDevice pDevice)
+void InitRxManagementQueue(PSDevice pDevice)
{
pDevice->rxManeQueue.packet_num = 0;
pDevice->rxManeQueue.head = pDevice->rxManeQueue.tail = 0;
@@ -1968,7 +1971,7 @@ device_init_rd0_ring(pDevice);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "call device_init_registers\n");
device_init_registers(pDevice, DEVICE_INIT_COLD);
MACvReadEtherAddress(pDevice->PortOffset, pDevice->abyCurrentNetAddr);
- memcpy(pDevice->pMgmt->abyMACAddr, pDevice->abyCurrentNetAddr, U_ETHER_ADDR_LEN);
+ memcpy(pDevice->pMgmt->abyMACAddr, pDevice->abyCurrentNetAddr, ETH_ALEN);
device_set_multi(pDevice->dev);
// Init for Key Management
@@ -2008,11 +2011,11 @@ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "call MACvIntEnable\n");
MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
if (pDevice->pMgmt->eConfigMode == WMAC_CONFIG_AP) {
- bScheduleCommand((HANDLE)pDevice, WLAN_CMD_RUN_AP, NULL);
+ bScheduleCommand((void *)pDevice, WLAN_CMD_RUN_AP, NULL);
}
else {
- bScheduleCommand((HANDLE)pDevice, WLAN_CMD_BSSID_SCAN, NULL);
- bScheduleCommand((HANDLE)pDevice, WLAN_CMD_SSID, NULL);
+ bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, NULL);
+ bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL);
}
pDevice->flags |=DEVICE_FLAGS_OPENED;
@@ -2031,7 +2034,7 @@ static int device_close(struct net_device *dev) {
//PLICE_DEBUG<-
//2007-1121-02<Add>by EinsnLiu
if (pDevice->bLinkPass) {
- bScheduleCommand((HANDLE)pDevice, WLAN_CMD_DISASSOCIATE, NULL);
+ bScheduleCommand((void *)pDevice, WLAN_CMD_DISASSOCIATE, NULL);
mdelay(30);
}
#ifdef TxInSleep
@@ -2149,11 +2152,11 @@ BOOL device_dma0_xmit(PSDevice pDevice, struct sk_buff *skb, UINT uNodeIndex) {
pHeadTD->m_td1TD1.byTCR = (TCR_EDP|TCR_STP);
- memcpy(pDevice->sTxEthHeader.abyDstAddr, (PBYTE)(skb->data), U_HEADER_LEN);
- cbFrameBodySize = skb->len - U_HEADER_LEN;
+ memcpy(pDevice->sTxEthHeader.abyDstAddr, (PBYTE)(skb->data), ETH_HLEN);
+ cbFrameBodySize = skb->len - ETH_HLEN;
// 802.1H
- if (ntohs(pDevice->sTxEthHeader.wType) > MAX_DATA_LEN) {
+ if (ntohs(pDevice->sTxEthHeader.wType) > ETH_DATA_LEN) {
cbFrameBodySize += 8;
}
uMACfragNum = cbGetFragCount(pDevice, pTransmitKey, cbFrameBodySize, &pDevice->sTxEthHeader);
@@ -2353,10 +2356,10 @@ static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
pHeadTD->m_td1TD1.byTCR = (TCR_EDP|TCR_STP);
- memcpy(pDevice->sTxEthHeader.abyDstAddr, (PBYTE)(skb->data), U_HEADER_LEN);
- cbFrameBodySize = skb->len - U_HEADER_LEN;
+ memcpy(pDevice->sTxEthHeader.abyDstAddr, (PBYTE)(skb->data), ETH_HLEN);
+ cbFrameBodySize = skb->len - ETH_HLEN;
// 802.1H
- if (ntohs(pDevice->sTxEthHeader.wType) > MAX_DATA_LEN) {
+ if (ntohs(pDevice->sTxEthHeader.wType) > ETH_DATA_LEN) {
cbFrameBodySize += 8;
}
@@ -2633,10 +2636,10 @@ pDevice->byTopCCKBasicRate,pDevice->byTopOFDMBasicRate);
BYTE Descriptor_type;
WORD Key_info;
BOOL bTxeapol_key = FALSE;
- Protocol_Version = skb->data[U_HEADER_LEN];
- Packet_Type = skb->data[U_HEADER_LEN+1];
- Descriptor_type = skb->data[U_HEADER_LEN+1+1+2];
- Key_info = (skb->data[U_HEADER_LEN+1+1+2+1] << 8)|(skb->data[U_HEADER_LEN+1+1+2+2]);
+ Protocol_Version = skb->data[ETH_HLEN];
+ Packet_Type = skb->data[ETH_HLEN+1];
+ Descriptor_type = skb->data[ETH_HLEN+1+1+2];
+ Key_info = (skb->data[ETH_HLEN+1+1+2+1] << 8)|(skb->data[ETH_HLEN+1+1+2+2]);
if (pDevice->sTxEthHeader.wType == TYPE_PKT_802_1x) {
if(((Protocol_Version==1) ||(Protocol_Version==2)) &&
(Packet_Type==3)) { //802.1x OR eapol-key challenge frame transfer
@@ -2857,7 +2860,7 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
pDevice->bBeaconSent = FALSE;
if (pDevice->bEnablePSMode) {
- PSbIsNextTBTTWakeUp((HANDLE)pDevice);
+ PSbIsNextTBTTWakeUp((void *)pDevice);
};
if ((pDevice->eOPMode == OP_MODE_AP) ||
@@ -2890,7 +2893,7 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
// check if mutltcast tx bufferring
pMgmt->byDTIMCount = pMgmt->byDTIMPeriod - 1;
pMgmt->sNodeDBTable[0].bRxPSPoll = TRUE;
- bScheduleCommand((HANDLE)pDevice, WLAN_CMD_RX_PSPOLL, NULL);
+ bScheduleCommand((void *)pDevice, WLAN_CMD_RX_PSPOLL, NULL);
}
}
}
@@ -3022,7 +3025,7 @@ int Config_FileOperation(PSDevice pDevice,BOOL fwrite,unsigned char *Parameter)
goto error1;
}
-buffer = (UCHAR *)kmalloc(1024, GFP_KERNEL);
+buffer = kmalloc(1024, GFP_KERNEL);
if(buffer==NULL) {
printk("alllocate mem for file fail?\n");
result = -1;
@@ -3080,7 +3083,7 @@ static void device_set_multi(struct net_device *dev) {
PSMgmtObject pMgmt = pDevice->pMgmt;
u32 mc_filter[2];
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
VNSvInPortB(pDevice->PortOffset + MAC_REG_RCR, &(pDevice->byRxMode));
@@ -3100,8 +3103,8 @@ static void device_set_multi(struct net_device *dev) {
}
else {
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
}
MACvSelectPage1(pDevice->PortOffset);
@@ -3328,7 +3331,7 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
break;
case SIOCSIWTXPOW:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWTXPOW \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWTXPOW \n");
rc = -EOPNOTSUPP;
break;
@@ -3406,7 +3409,7 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
// Get the spy list
case SIOCGIWSPY:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWSPY \n");
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWSPY \n");
rc = -EOPNOTSUPP;
break;
@@ -3523,7 +3526,7 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
case IOCTL_CMD_HOSTAPD:
- rc = hostap_ioctl(pDevice, &wrq->u.data);
+ rc = vt6655_hostap_ioctl(pDevice, &wrq->u.data);
break;
case IOCTL_CMD_WPA:
@@ -3546,7 +3549,7 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
netif_stop_queue(pDevice->dev);
spin_lock_irq(&pDevice->lock);
- bScheduleCommand((HANDLE)pDevice, WLAN_CMD_RUN_AP, NULL);
+ bScheduleCommand((void *)pDevice, WLAN_CMD_RUN_AP, NULL);
spin_unlock_irq(&pDevice->lock);
}
else {
@@ -3560,8 +3563,8 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
pMgmt->eScanType = WMAC_SCAN_ACTIVE;
if(pDevice->bWPASuppWextEnabled !=TRUE)
#endif
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_SSID, NULL);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL);
spin_unlock_irq(&pDevice->lock);
}
pDevice->bCommit = FALSE;
@@ -3716,9 +3719,9 @@ viawget_resume(struct pci_dev *pcid)
init_timer(&pMgmt->sTimerSecondCallback);
init_timer(&pDevice->sTimerCommand);
MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
- BSSvClearBSSList((HANDLE)pDevice, pDevice->bLinkPass);
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_SSID, NULL);
+ BSSvClearBSSList((void *)pDevice, pDevice->bLinkPass);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL);
spin_unlock_irq(&pDevice->lock);
}
return 0;
diff --git a/drivers/staging/vt6655/dpc.c b/drivers/staging/vt6655/dpc.c
index 67f238c01b4..6b758a8c1af 100644
--- a/drivers/staging/vt6655/dpc.c
+++ b/drivers/staging/vt6655/dpc.c
@@ -76,70 +76,70 @@ const BYTE acbyRxRate[MAX_RATE] =
/*--------------------- Static Functions --------------------------*/
-static BYTE s_byGetRateIdx(IN BYTE byRate);
+static BYTE s_byGetRateIdx(BYTE byRate);
static
-VOID
+void
s_vGetDASA(
- IN PBYTE pbyRxBufferAddr,
- OUT PUINT pcbHeaderSize,
- OUT PSEthernetHeader psEthHeader
+ PBYTE pbyRxBufferAddr,
+ PUINT pcbHeaderSize,
+ PSEthernetHeader psEthHeader
);
static
-VOID
+void
s_vProcessRxMACHeader (
- IN PSDevice pDevice,
- IN PBYTE pbyRxBufferAddr,
- IN UINT cbPacketSize,
- IN BOOL bIsWEP,
- IN BOOL bExtIV,
- OUT PUINT pcbHeadSize
+ PSDevice pDevice,
+ PBYTE pbyRxBufferAddr,
+ UINT cbPacketSize,
+ BOOL bIsWEP,
+ BOOL bExtIV,
+ PUINT pcbHeadSize
);
static BOOL s_bAPModeRxCtl(
- IN PSDevice pDevice,
- IN PBYTE pbyFrame,
- IN INT iSANodeIndex
+ PSDevice pDevice,
+ PBYTE pbyFrame,
+ INT iSANodeIndex
);
static BOOL s_bAPModeRxData (
- IN PSDevice pDevice,
- IN struct sk_buff* skb,
- IN UINT FrameSize,
- IN UINT cbHeaderOffset,
- IN INT iSANodeIndex,
- IN INT iDANodeIndex
+ PSDevice pDevice,
+ struct sk_buff* skb,
+ UINT FrameSize,
+ UINT cbHeaderOffset,
+ INT iSANodeIndex,
+ INT iDANodeIndex
);
static BOOL s_bHandleRxEncryption(
- IN PSDevice pDevice,
- IN PBYTE pbyFrame,
- IN UINT FrameSize,
- IN PBYTE pbyRsr,
- OUT PBYTE pbyNewRsr,
- OUT PSKeyItem *pKeyOut,
+ PSDevice pDevice,
+ PBYTE pbyFrame,
+ UINT FrameSize,
+ PBYTE pbyRsr,
+ PBYTE pbyNewRsr,
+ PSKeyItem *pKeyOut,
int * pbExtIV,
- OUT PWORD pwRxTSC15_0,
- OUT PDWORD pdwRxTSC47_16
+ PWORD pwRxTSC15_0,
+ PDWORD pdwRxTSC47_16
);
static BOOL s_bHostWepRxEncryption(
- IN PSDevice pDevice,
- IN PBYTE pbyFrame,
- IN UINT FrameSize,
- IN PBYTE pbyRsr,
- IN BOOL bOnFly,
- IN PSKeyItem pKey,
- OUT PBYTE pbyNewRsr,
+ PSDevice pDevice,
+ PBYTE pbyFrame,
+ UINT FrameSize,
+ PBYTE pbyRsr,
+ BOOL bOnFly,
+ PSKeyItem pKey,
+ PBYTE pbyNewRsr,
int * pbExtIV,
- OUT PWORD pwRxTSC15_0,
- OUT PDWORD pdwRxTSC47_16
+ PWORD pwRxTSC15_0,
+ PDWORD pdwRxTSC47_16
);
@@ -163,14 +163,14 @@ static BOOL s_bHostWepRxEncryption(
*
-*/
static
-VOID
+void
s_vProcessRxMACHeader (
- IN PSDevice pDevice,
- IN PBYTE pbyRxBufferAddr,
- IN UINT cbPacketSize,
- IN BOOL bIsWEP,
- IN BOOL bExtIV,
- OUT PUINT pcbHeadSize
+ PSDevice pDevice,
+ PBYTE pbyRxBufferAddr,
+ UINT cbPacketSize,
+ BOOL bIsWEP,
+ BOOL bExtIV,
+ PUINT pcbHeadSize
)
{
PBYTE pbyRxBuffer;
@@ -236,11 +236,11 @@ s_vProcessRxMACHeader (
}
}
- cbHeaderSize -= (U_ETHER_ADDR_LEN * 2);
+ cbHeaderSize -= (ETH_ALEN * 2);
pbyRxBuffer = (PBYTE) (pbyRxBufferAddr + cbHeaderSize);
- for(ii=0;ii<U_ETHER_ADDR_LEN;ii++)
+ for(ii=0;ii<ETH_ALEN;ii++)
*pbyRxBuffer++ = pDevice->sRxEthHeader.abyDstAddr[ii];
- for(ii=0;ii<U_ETHER_ADDR_LEN;ii++)
+ for(ii=0;ii<ETH_ALEN;ii++)
*pbyRxBuffer++ = pDevice->sRxEthHeader.abySrcAddr[ii];
*pcbHeadSize = cbHeaderSize;
@@ -249,7 +249,7 @@ s_vProcessRxMACHeader (
-static BYTE s_byGetRateIdx (IN BYTE byRate)
+static BYTE s_byGetRateIdx (BYTE byRate)
{
BYTE byRateIdx;
@@ -262,11 +262,11 @@ static BYTE s_byGetRateIdx (IN BYTE byRate)
static
-VOID
+void
s_vGetDASA (
- IN PBYTE pbyRxBufferAddr,
- OUT PUINT pcbHeaderSize,
- OUT PSEthernetHeader psEthHeader
+ PBYTE pbyRxBufferAddr,
+ PUINT pcbHeaderSize,
+ PSEthernetHeader psEthHeader
)
{
UINT cbHeaderSize = 0;
@@ -277,14 +277,14 @@ s_vGetDASA (
if ((pMACHeader->wFrameCtl & FC_TODS) == 0) {
if (pMACHeader->wFrameCtl & FC_FROMDS) {
- for(ii=0;ii<U_ETHER_ADDR_LEN;ii++) {
+ for(ii=0;ii<ETH_ALEN;ii++) {
psEthHeader->abyDstAddr[ii] = pMACHeader->abyAddr1[ii];
psEthHeader->abySrcAddr[ii] = pMACHeader->abyAddr3[ii];
}
}
else {
// IBSS mode
- for(ii=0;ii<U_ETHER_ADDR_LEN;ii++) {
+ for(ii=0;ii<ETH_ALEN;ii++) {
psEthHeader->abyDstAddr[ii] = pMACHeader->abyAddr1[ii];
psEthHeader->abySrcAddr[ii] = pMACHeader->abyAddr2[ii];
}
@@ -293,14 +293,14 @@ s_vGetDASA (
else {
// Is AP mode..
if (pMACHeader->wFrameCtl & FC_FROMDS) {
- for(ii=0;ii<U_ETHER_ADDR_LEN;ii++) {
+ for(ii=0;ii<ETH_ALEN;ii++) {
psEthHeader->abyDstAddr[ii] = pMACHeader->abyAddr3[ii];
psEthHeader->abySrcAddr[ii] = pMACHeader->abyAddr4[ii];
cbHeaderSize += 6;
}
}
else {
- for(ii=0;ii<U_ETHER_ADDR_LEN;ii++) {
+ for(ii=0;ii<ETH_ALEN;ii++) {
psEthHeader->abyDstAddr[ii] = pMACHeader->abyAddr3[ii];
psEthHeader->abySrcAddr[ii] = pMACHeader->abyAddr2[ii];
}
@@ -314,7 +314,7 @@ s_vGetDASA (
//PLICE_DEBUG ->
-VOID MngWorkItem(PVOID Context)
+void MngWorkItem(void *Context)
{
PSRxMgmtPacket pRxMgmtPacket;
PSDevice pDevice = (PSDevice) Context;
@@ -335,8 +335,8 @@ VOID MngWorkItem(PVOID Context)
BOOL
device_receive_frame (
- IN PSDevice pDevice,
- IN PSRxDesc pCurrRD
+ PSDevice pDevice,
+ PSRxDesc pCurrRD
)
{
@@ -569,7 +569,7 @@ device_receive_frame (
// RX OK
//
//remove the CRC length
- FrameSize -= U_CRC_LEN;
+ FrameSize -= ETH_FCS_LEN;
if (( !(*pbyRsr & (RSR_ADDRBROAD | RSR_ADDRMULTI))) && // unicast address
(IS_FRAGMENT_PKT((skb->data+4)))
@@ -631,13 +631,13 @@ device_receive_frame (
tasklet_schedule(&pDevice->RxMngWorkItem);
#else
//printk("RxMan\n");
- vMgrRxManagePacket((HANDLE)pDevice, pDevice->pMgmt, pRxPacket);
+ vMgrRxManagePacket((void *)pDevice, pDevice->pMgmt, pRxPacket);
//tasklet_schedule(&pDevice->RxMngWorkItem);
#endif
#endif
//PLICE_DEBUG<-
- //vMgrRxManagePacket((HANDLE)pDevice, pDevice->pMgmt, pRxPacket);
+ //vMgrRxManagePacket((void *)pDevice, pDevice->pMgmt, pRxPacket);
// hostap Deamon handle 802.11 management
if (pDevice->bEnableHostapd) {
skb->dev = pDevice->apdev;
@@ -1039,9 +1039,9 @@ device_receive_frame (
static BOOL s_bAPModeRxCtl (
- IN PSDevice pDevice,
- IN PBYTE pbyFrame,
- IN INT iSANodeIndex
+ PSDevice pDevice,
+ PBYTE pbyFrame,
+ INT iSANodeIndex
)
{
PS802_11Header p802_11Header;
@@ -1087,7 +1087,7 @@ static BOOL s_bAPModeRxCtl (
// delcare received ps-poll event
if (IS_CTL_PSPOLL(pbyFrame)) {
pMgmt->sNodeDBTable[iSANodeIndex].bRxPSPoll = TRUE;
- bScheduleCommand((HANDLE)pDevice, WLAN_CMD_RX_PSPOLL, NULL);
+ bScheduleCommand((void *)pDevice, WLAN_CMD_RX_PSPOLL, NULL);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dpc: WLAN_CMD_RX_PSPOLL 1\n");
}
else {
@@ -1096,7 +1096,7 @@ static BOOL s_bAPModeRxCtl (
if (!IS_FC_POWERMGT(pbyFrame)) {
pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable = FALSE;
pMgmt->sNodeDBTable[iSANodeIndex].bRxPSPoll = TRUE;
- bScheduleCommand((HANDLE)pDevice, WLAN_CMD_RX_PSPOLL, NULL);
+ bScheduleCommand((void *)pDevice, WLAN_CMD_RX_PSPOLL, NULL);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dpc: WLAN_CMD_RX_PSPOLL 2\n");
}
}
@@ -1112,7 +1112,7 @@ static BOOL s_bAPModeRxCtl (
if (pMgmt->sNodeDBTable[iSANodeIndex].wEnQueueCnt > 0) {
pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable = FALSE;
pMgmt->sNodeDBTable[iSANodeIndex].bRxPSPoll = TRUE;
- bScheduleCommand((HANDLE)pDevice, WLAN_CMD_RX_PSPOLL, NULL);
+ bScheduleCommand((void *)pDevice, WLAN_CMD_RX_PSPOLL, NULL);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dpc: WLAN_CMD_RX_PSPOLL 3\n");
}
@@ -1163,15 +1163,15 @@ static BOOL s_bAPModeRxCtl (
}
static BOOL s_bHandleRxEncryption (
- IN PSDevice pDevice,
- IN PBYTE pbyFrame,
- IN UINT FrameSize,
- IN PBYTE pbyRsr,
- OUT PBYTE pbyNewRsr,
- OUT PSKeyItem *pKeyOut,
+ PSDevice pDevice,
+ PBYTE pbyFrame,
+ UINT FrameSize,
+ PBYTE pbyRsr,
+ PBYTE pbyNewRsr,
+ PSKeyItem *pKeyOut,
int * pbExtIV,
- OUT PWORD pwRxTSC15_0,
- OUT PDWORD pdwRxTSC47_16
+ PWORD pwRxTSC15_0,
+ PDWORD pdwRxTSC47_16
)
{
UINT PayloadLen = FrameSize;
@@ -1309,16 +1309,16 @@ static BOOL s_bHandleRxEncryption (
static BOOL s_bHostWepRxEncryption (
- IN PSDevice pDevice,
- IN PBYTE pbyFrame,
- IN UINT FrameSize,
- IN PBYTE pbyRsr,
- IN BOOL bOnFly,
- IN PSKeyItem pKey,
- OUT PBYTE pbyNewRsr,
+ PSDevice pDevice,
+ PBYTE pbyFrame,
+ UINT FrameSize,
+ PBYTE pbyRsr,
+ BOOL bOnFly,
+ PSKeyItem pKey,
+ PBYTE pbyNewRsr,
int * pbExtIV,
- OUT PWORD pwRxTSC15_0,
- OUT PDWORD pdwRxTSC47_16
+ PWORD pwRxTSC15_0,
+ PDWORD pdwRxTSC47_16
)
{
UINT PayloadLen = FrameSize;
@@ -1440,12 +1440,12 @@ static BOOL s_bHostWepRxEncryption (
static BOOL s_bAPModeRxData (
- IN PSDevice pDevice,
- IN struct sk_buff* skb,
- IN UINT FrameSize,
- IN UINT cbHeaderOffset,
- IN INT iSANodeIndex,
- IN INT iDANodeIndex
+ PSDevice pDevice,
+ struct sk_buff* skb,
+ UINT FrameSize,
+ UINT cbHeaderOffset,
+ INT iSANodeIndex,
+ INT iDANodeIndex
)
{
PSMgmtObject pMgmt = pDevice->pMgmt;
diff --git a/drivers/staging/vt6655/dpc.h b/drivers/staging/vt6655/dpc.h
index 51508b9087e..e574963fee0 100644
--- a/drivers/staging/vt6655/dpc.h
+++ b/drivers/staging/vt6655/dpc.h
@@ -43,11 +43,11 @@
BOOL
device_receive_frame (
- IN PSDevice pDevice,
- IN PSRxDesc pCurrRD
+ PSDevice pDevice,
+ PSRxDesc pCurrRD
);
-VOID MngWorkItem(PVOID Context);
+void MngWorkItem(void *Context);
#endif // __RXTX_H__
diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
index 58abf44c76a..195cc36654a 100644
--- a/drivers/staging/vt6655/hostap.c
+++ b/drivers/staging/vt6655/hostap.c
@@ -90,10 +90,9 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
- pDevice->apdev = (struct net_device *)kmalloc(sizeof(struct net_device), GFP_KERNEL);
+ pDevice->apdev = kzalloc(sizeof(struct net_device), GFP_KERNEL);
if (pDevice->apdev == NULL)
return -ENOMEM;
- memset(pDevice->apdev, 0, sizeof(struct net_device));
apdev_priv = netdev_priv(pDevice->apdev);
*apdev_priv = *pDevice;
@@ -183,7 +182,7 @@ KeyvInitTable(&pDevice->sKey,pDevice->PortOffset);
*
*/
-int hostap_set_hostapd(PSDevice pDevice, int val, int rtnl_locked)
+int vt6655_hostap_set_hostapd(PSDevice pDevice, int val, int rtnl_locked)
{
if (val < 0 || val > 1)
return -EINVAL;
@@ -746,7 +745,7 @@ static int hostap_get_encryption(PSDevice pDevice,
/*
* Description:
- * hostap_ioctl main function supported for hostap deamon.
+ * vt6655_hostap_ioctl main function supported for hostap deamon.
*
* Parameters:
* In:
@@ -758,7 +757,7 @@ static int hostap_get_encryption(PSDevice pDevice,
*
*/
-int hostap_ioctl(PSDevice pDevice, struct iw_point *p)
+int vt6655_hostap_ioctl(PSDevice pDevice, struct iw_point *p)
{
struct viawget_hostapd_param *param;
int ret = 0;
@@ -768,7 +767,7 @@ int hostap_ioctl(PSDevice pDevice, struct iw_point *p)
p->length > VIAWGET_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
return -EINVAL;
- param = (struct viawget_hostapd_param *) kmalloc((int)p->length, (int)GFP_KERNEL);
+ param = kmalloc((int)p->length, (int)GFP_KERNEL);
if (param == NULL)
return -ENOMEM;
@@ -846,7 +845,7 @@ int hostap_ioctl(PSDevice pDevice, struct iw_point *p)
return -EOPNOTSUPP;
default:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "hostap_ioctl: unknown cmd=%d\n",
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "vt6655_hostap_ioctl: unknown cmd=%d\n",
(int)param->cmd);
return -EOPNOTSUPP;
break;
diff --git a/drivers/staging/vt6655/hostap.h b/drivers/staging/vt6655/hostap.h
index 8fd667b542b..55db55524d9 100644
--- a/drivers/staging/vt6655/hostap.h
+++ b/drivers/staging/vt6655/hostap.h
@@ -61,8 +61,8 @@
#define ARPHRD_IEEE80211 801
#endif
-int hostap_set_hostapd(PSDevice pDevice, int val, int rtnl_locked);
-int hostap_ioctl(PSDevice pDevice, struct iw_point *p);
+int vt6655_hostap_set_hostapd(PSDevice pDevice, int val, int rtnl_locked);
+int vt6655_hostap_ioctl(PSDevice pDevice, struct iw_point *p);
#endif // __HOSTAP_H__
diff --git a/drivers/staging/vt6655/ioctl.c b/drivers/staging/vt6655/ioctl.c
index d9a5fd21ab3..404287c6025 100644
--- a/drivers/staging/vt6655/ioctl.c
+++ b/drivers/staging/vt6655/ioctl.c
@@ -83,7 +83,7 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
pReq->wResult = 0;
- switch(pReq->wCmdCode) {
+ switch (pReq->wCmdCode) {
case WLAN_CMD_BSS_SCAN:
@@ -109,14 +109,14 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
}
spin_lock_irq(&pDevice->lock);
if (memcmp(pMgmt->abyCurrBSSID, &abyNullAddr[0], 6) == 0)
- BSSvClearBSSList((HANDLE)pDevice, FALSE);
+ BSSvClearBSSList((void *)pDevice, FALSE);
else
- BSSvClearBSSList((HANDLE)pDevice, pDevice->bLinkPass);
+ BSSvClearBSSList((void *)pDevice, pDevice->bLinkPass);
if (pItemSSID->len != 0)
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, abyScanSSID);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, abyScanSSID);
else
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
spin_unlock_irq(&pDevice->lock);
break;
@@ -223,8 +223,8 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
netif_stop_queue(pDevice->dev);
spin_lock_irq(&pDevice->lock);
pMgmt->eCurrState = WMAC_STATE_IDLE;
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_SSID, NULL);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL);
spin_unlock_irq(&pDevice->lock);
break;
@@ -429,7 +429,7 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
break;
};
if (sValue.dwValue == 1) {
- if (hostap_set_hostapd(pDevice, 1, 1) == 0){
+ if (vt6655_hostap_set_hostapd(pDevice, 1, 1) == 0){
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable HOSTAP\n");
}
else {
@@ -438,7 +438,7 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
}
}
else {
- hostap_set_hostapd(pDevice, 0, 1);
+ vt6655_hostap_set_hostapd(pDevice, 0, 1);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disable HOSTAP\n");
}
@@ -497,7 +497,7 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
};
if (sValue.dwValue == 1) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "up wpadev\n");
- memcpy(pDevice->wpadev->dev_addr, pDevice->dev->dev_addr, U_ETHER_ADDR_LEN);
+ memcpy(pDevice->wpadev->dev_addr, pDevice->dev->dev_addr, ETH_ALEN);
pDevice->bWPADEVUp = TRUE;
}
else {
@@ -593,7 +593,7 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
netif_stop_queue(pDevice->dev);
spin_lock_irq(&pDevice->lock);
- bScheduleCommand((HANDLE)pDevice, WLAN_CMD_RUN_AP, NULL);
+ bScheduleCommand((void *)pDevice, WLAN_CMD_RUN_AP, NULL);
spin_unlock_irq(&pDevice->lock);
break;
@@ -714,12 +714,12 @@ if(wpa_Result.authenticated==TRUE) {
}
/*
-VOID
+void
vConfigWEPKey (
- IN PSDevice pDevice,
- IN DWORD dwKeyIndex,
- IN PBYTE pbyKey,
- IN ULONG uKeyLength
+ PSDevice pDevice,
+ DWORD dwKeyIndex,
+ PBYTE pbyKey,
+ ULONG uKeyLength
)
{
int ii;
diff --git a/drivers/staging/vt6655/ioctl.h b/drivers/staging/vt6655/ioctl.h
index 07d228399c3..0d10c2a923c 100644
--- a/drivers/staging/vt6655/ioctl.h
+++ b/drivers/staging/vt6655/ioctl.h
@@ -43,11 +43,11 @@
int private_ioctl(PSDevice pDevice, struct ifreq *rq);
/*
-VOID vConfigWEPKey (
- IN PSDevice pDevice,
- IN DWORD dwKeyIndex,
- IN PBYTE pbyKey,
- IN ULONG uKeyLength
+void vConfigWEPKey (
+ PSDevice pDevice,
+ DWORD dwKeyIndex,
+ PBYTE pbyKey,
+ ULONG uKeyLength
);
*/
diff --git a/drivers/staging/vt6655/iwctl.c b/drivers/staging/vt6655/iwctl.c
index 78b49830a25..cf69034fc0f 100644
--- a/drivers/staging/vt6655/iwctl.c
+++ b/drivers/staging/vt6655/iwctl.c
@@ -190,7 +190,7 @@ if(pDevice->byReAssocCount > 0) { //reject scan when re-associating!
}
spin_lock_irq(&pDevice->lock);
- BSSvClearBSSList((HANDLE)pDevice, pDevice->bLinkPass);
+ BSSvClearBSSList((void *)pDevice, pDevice->bLinkPass);
//mike add: active scan OR passive scan OR desire_ssid scan
if(wrq->length == sizeof(struct iw_scan_req)) {
@@ -208,7 +208,7 @@ if(pDevice->byReAssocCount > 0) { //reject scan when re-associating!
pMgmt->eScanType = WMAC_SCAN_PASSIVE;
PRINT_K("SIOCSIWSCAN:[desired_ssid=%s,len=%d]\n",((PWLAN_IE_SSID)abyScanSSID)->abySSID,
((PWLAN_IE_SSID)abyScanSSID)->len);
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, abyScanSSID);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, abyScanSSID);
spin_unlock_irq(&pDevice->lock);
return 0;
@@ -223,7 +223,7 @@ if(pDevice->byReAssocCount > 0) { //reject scan when re-associating!
pMgmt->eScanType = WMAC_SCAN_PASSIVE;
//printk("SIOCSIWSCAN:WLAN_CMD_BSSID_SCAN\n");
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
spin_unlock_irq(&pDevice->lock);
return 0;
@@ -699,7 +699,6 @@ if (pMgmt->eScanState == WMAC_IS_SCANNING) {
if (wrq->sa_family != ARPHRD_ETHER)
rc = -EINVAL;
else {
- memset(pMgmt->abyDesireBSSID, 0xFF, 6);
memcpy(pMgmt->abyDesireBSSID, wrq->sa_data, 6);
//2008-0409-05, <Add> by Einsn Liu
if((pDevice->bLinkPass == TRUE) &&
@@ -889,7 +888,6 @@ if (pMgmt->eScanState == WMAC_IS_SCANNING) {
BYTE abyTmpDesireSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
UINT ii , uSameBssidNum=0;
- memset(abyTmpDesireSSID,0,sizeof(abyTmpDesireSSID));
memcpy(abyTmpDesireSSID,pMgmt->abyDesireSSID,sizeof(abyTmpDesireSSID));
pCurr = BSSpSearchBSSList(pDevice,
NULL,
@@ -899,10 +897,10 @@ if (pMgmt->eScanState == WMAC_IS_SCANNING) {
if (pCurr == NULL){
PRINT_K("SIOCSIWESSID:hidden ssid site survey before associate.......\n");
- vResetCommandTimer((HANDLE) pDevice);
+ vResetCommandTimer((void *) pDevice);
pMgmt->eScanType = WMAC_SCAN_ACTIVE;
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_SSID, pMgmt->abyDesireSSID);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, pMgmt->abyDesireSSID);
}
else { //mike:to find out if that desired SSID is a hidden-ssid AP ,
// by means of judging if there are two same BSSID exist in list ?
@@ -914,10 +912,10 @@ if (pMgmt->eScanState == WMAC_IS_SCANNING) {
}
if(uSameBssidNum >= 2) { //hit: desired AP is in hidden ssid mode!!!
printk("SIOCSIWESSID:hidden ssid directly associate.......\n");
- vResetCommandTimer((HANDLE) pDevice);
+ vResetCommandTimer((void *) pDevice);
pMgmt->eScanType = WMAC_SCAN_PASSIVE; //this scan type,you'll submit scan result!
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_SSID, pMgmt->abyDesireSSID);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, pMgmt->abyDesireSSID);
}
}
}
@@ -1662,11 +1660,11 @@ int iwctl_siwpower(struct net_device *dev,
}
if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
pDevice->ePSMode = WMAC_POWER_FAST;
- PSvEnablePowerSaving((HANDLE)pDevice, pMgmt->wListenInterval);
+ PSvEnablePowerSaving((void *)pDevice, pMgmt->wListenInterval);
} else if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_PERIOD) {
pDevice->ePSMode = WMAC_POWER_FAST;
- PSvEnablePowerSaving((HANDLE)pDevice, pMgmt->wListenInterval);
+ PSvEnablePowerSaving((void *)pDevice, pMgmt->wListenInterval);
}
switch (wrq->flags & IW_POWER_MODE) {
case IW_POWER_UNICAST_R:
@@ -1702,7 +1700,8 @@ int iwctl_giwpower(struct net_device *dev,
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWPOWER \n");
- if ((wrq->disabled = (mode == WMAC_POWER_CAM)))
+ wrq->disabled = (mode == WMAC_POWER_CAM);
+ if (wrq->disabled)
return 0;
if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
@@ -2097,7 +2096,7 @@ int iwctl_siwmlme(struct net_device *dev,
switch(mlme->cmd){
case IW_MLME_DEAUTH:
//this command seems to be not complete,please test it --einsnliu
- //bScheduleCommand((HANDLE) pDevice, WLAN_CMD_DEAUTH, (PBYTE)&reason);
+ //bScheduleCommand((void *) pDevice, WLAN_CMD_DEAUTH, (PBYTE)&reason);
break;
case IW_MLME_DISASSOC:
if(pDevice->bLinkPass == TRUE){
@@ -2105,7 +2104,7 @@ int iwctl_siwmlme(struct net_device *dev,
//clear related flags
memset(pMgmt->abyDesireBSSID, 0xFF,6);
KeyvInitTable(&pDevice->sKey, pDevice->PortOffset);
- bScheduleCommand((HANDLE)pDevice, WLAN_CMD_DISASSOCIATE, NULL);
+ bScheduleCommand((void *)pDevice, WLAN_CMD_DISASSOCIATE, NULL);
}
break;
default:
diff --git a/drivers/staging/vt6655/key.c b/drivers/staging/vt6655/key.c
index a4d2184d826..bfc5c509d90 100644
--- a/drivers/staging/vt6655/key.c
+++ b/drivers/staging/vt6655/key.c
@@ -58,7 +58,7 @@ static int msglevel =MSG_LEVEL_INFO;
/*--------------------- Static Variables --------------------------*/
/*--------------------- Static Functions --------------------------*/
-static VOID
+static void
s_vCheckKeyTableValid (PSKeyManagement pTable, DWORD_PTR dwIoBase)
{
int i;
@@ -96,7 +96,7 @@ s_vCheckKeyTableValid (PSKeyManagement pTable, DWORD_PTR dwIoBase)
* Return Value: none
*
*/
-VOID KeyvInitTable (PSKeyManagement pTable, DWORD_PTR dwIoBase)
+void KeyvInitTable (PSKeyManagement pTable, DWORD_PTR dwIoBase)
{
int i;
int jj;
@@ -104,10 +104,10 @@ VOID KeyvInitTable (PSKeyManagement pTable, DWORD_PTR dwIoBase)
for (i=0;i<MAX_KEY_TABLE;i++) {
pTable->KeyTable[i].bInUse = FALSE;
pTable->KeyTable[i].PairwiseKey.bKeyValid = FALSE;
- pTable->KeyTable[i].PairwiseKey.pvKeyTable = (PVOID)&pTable->KeyTable[i];
+ pTable->KeyTable[i].PairwiseKey.pvKeyTable = (void *)&pTable->KeyTable[i];
for (jj=0; jj < MAX_GROUP_KEY; jj++) {
pTable->KeyTable[i].GroupKey[jj].bKeyValid = FALSE;
- pTable->KeyTable[i].GroupKey[jj].pvKeyTable = (PVOID)&pTable->KeyTable[i];
+ pTable->KeyTable[i].GroupKey[jj].pvKeyTable = (void *)&pTable->KeyTable[i];
}
pTable->KeyTable[i].wKeyCtl = 0;
pTable->KeyTable[i].dwGTKeyIndex = 0;
@@ -132,10 +132,10 @@ VOID KeyvInitTable (PSKeyManagement pTable, DWORD_PTR dwIoBase)
*
*/
BOOL KeybGetKey (
- IN PSKeyManagement pTable,
- IN PBYTE pbyBSSID,
- IN DWORD dwKeyIndex,
- OUT PSKeyItem *pKey
+ PSKeyManagement pTable,
+ PBYTE pbyBSSID,
+ DWORD dwKeyIndex,
+ PSKeyItem *pKey
)
{
int i;
@@ -281,7 +281,7 @@ BOOL KeybSetKey (
}
}
if (j < (MAX_KEY_TABLE-1)) {
- memcpy(pTable->KeyTable[j].abyBSSID,pbyBSSID,U_ETHER_ADDR_LEN);
+ memcpy(pTable->KeyTable[j].abyBSSID,pbyBSSID,ETH_ALEN);
pTable->KeyTable[j].bInUse = TRUE;
if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
// Pairwise key
@@ -470,7 +470,7 @@ BOOL KeybRemoveAllKey (
* Return Value: TRUE if success otherwise FALSE
*
*/
-VOID KeyvRemoveWEPKey (
+void KeyvRemoveWEPKey (
PSKeyManagement pTable,
DWORD dwKeyIndex,
DWORD_PTR dwIoBase
@@ -492,7 +492,7 @@ VOID KeyvRemoveWEPKey (
return;
}
-VOID KeyvRemoveAllWEPKey (
+void KeyvRemoveAllWEPKey (
PSKeyManagement pTable,
DWORD_PTR dwIoBase
)
@@ -518,10 +518,10 @@ VOID KeyvRemoveAllWEPKey (
*
*/
BOOL KeybGetTransmitKey (
- IN PSKeyManagement pTable,
- IN PBYTE pbyBSSID,
- IN DWORD dwKeyType,
- OUT PSKeyItem *pKey
+ PSKeyManagement pTable,
+ PBYTE pbyBSSID,
+ DWORD dwKeyType,
+ PSKeyItem *pKey
)
{
int i, ii;
@@ -598,8 +598,8 @@ BOOL KeybGetTransmitKey (
*
*/
BOOL KeybCheckPairewiseKey (
- IN PSKeyManagement pTable,
- OUT PSKeyItem *pKey
+ PSKeyManagement pTable,
+ PSKeyItem *pKey
)
{
int i;
@@ -656,7 +656,7 @@ BOOL KeybSetDefaultKey (
}
pTable->KeyTable[MAX_KEY_TABLE-1].bInUse = TRUE;
- for(ii=0;ii<U_ETHER_ADDR_LEN;ii++)
+ for(ii=0;ii<ETH_ALEN;ii++)
pTable->KeyTable[MAX_KEY_TABLE-1].abyBSSID[ii] = 0xFF;
// Group key
diff --git a/drivers/staging/vt6655/key.h b/drivers/staging/vt6655/key.h
index ba797c7b3c1..39403d93aeb 100644
--- a/drivers/staging/vt6655/key.h
+++ b/drivers/staging/vt6655/key.h
@@ -66,12 +66,12 @@ typedef struct tagSKeyItem
BYTE byCipherSuite;
BYTE byReserved0;
DWORD dwKeyIndex;
- PVOID pvKeyTable;
+ void *pvKeyTable;
} SKeyItem, *PSKeyItem; //64
typedef struct tagSKeyTable
{
- BYTE abyBSSID[U_ETHER_ADDR_LEN]; //6
+ BYTE abyBSSID[ETH_ALEN]; //6
BYTE byReserved0[2]; //8
SKeyItem PairwiseKey;
SKeyItem GroupKey[MAX_GROUP_KEY]; //64*5 = 320, 320+8=328
@@ -101,13 +101,13 @@ typedef struct tagSKeyManagement
/*--------------------- Export Functions --------------------------*/
-VOID KeyvInitTable(PSKeyManagement pTable, DWORD_PTR dwIoBase);
+void KeyvInitTable(PSKeyManagement pTable, DWORD_PTR dwIoBase);
BOOL KeybGetKey(
- IN PSKeyManagement pTable,
- IN PBYTE pbyBSSID,
- IN DWORD dwKeyIndex,
- OUT PSKeyItem *pKey
+ PSKeyManagement pTable,
+ PBYTE pbyBSSID,
+ DWORD dwKeyIndex,
+ PSKeyItem *pKey
);
BOOL KeybSetKey(
@@ -141,15 +141,15 @@ BOOL KeybRemoveKey(
);
BOOL KeybGetTransmitKey(
- IN PSKeyManagement pTable,
- IN PBYTE pbyBSSID,
- IN DWORD dwKeyType,
- OUT PSKeyItem *pKey
+ PSKeyManagement pTable,
+ PBYTE pbyBSSID,
+ DWORD dwKeyType,
+ PSKeyItem *pKey
);
BOOL KeybCheckPairewiseKey(
- IN PSKeyManagement pTable,
- OUT PSKeyItem *pKey
+ PSKeyManagement pTable,
+ PSKeyItem *pKey
);
BOOL KeybRemoveAllKey(
@@ -158,13 +158,13 @@ BOOL KeybRemoveAllKey(
DWORD_PTR dwIoBase
);
-VOID KeyvRemoveWEPKey(
+void KeyvRemoveWEPKey(
PSKeyManagement pTable,
DWORD dwKeyIndex,
DWORD_PTR dwIoBase
);
-VOID KeyvRemoveAllWEPKey(
+void KeyvRemoveAllWEPKey(
PSKeyManagement pTable,
DWORD_PTR dwIoBase
);
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index cdd7cd5e409..f1ef7da75c2 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -103,7 +103,7 @@ static int msglevel =MSG_LEVEL_INFO;
* Return Value: none
*
*/
-VOID MACvReadAllRegs (DWORD_PTR dwIoBase, PBYTE pbyMacRegs)
+void MACvReadAllRegs (DWORD_PTR dwIoBase, PBYTE pbyMacRegs)
{
int ii;
@@ -234,7 +234,7 @@ BYTE MACbyReadMultiAddr (DWORD_PTR dwIoBase, UINT uByteIdx)
* Return Value: none
*
*/
-VOID MACvWriteMultiAddr (DWORD_PTR dwIoBase, UINT uByteIdx, BYTE byData)
+void MACvWriteMultiAddr (DWORD_PTR dwIoBase, UINT uByteIdx, BYTE byData)
{
MACvSelectPage1(dwIoBase);
VNSvOutPortB(dwIoBase + MAC_REG_MAR0 + uByteIdx, byData);
@@ -676,7 +676,7 @@ void MACvSaveContext (DWORD_PTR dwIoBase, PBYTE pbyCxtBuf)
* Return Value: none
*
*/
-VOID MACvRestoreContext (DWORD_PTR dwIoBase, PBYTE pbyCxtBuf)
+void MACvRestoreContext (DWORD_PTR dwIoBase, PBYTE pbyCxtBuf)
{
int ii;
@@ -1244,7 +1244,7 @@ void MACvSetCurrTXDescAddr (int iTxType, DWORD_PTR dwIoBase, DWORD dwCurrDescAdd
* Return Value: none
*
*/
-VOID MACvTimer0MicroSDelay (DWORD_PTR dwIoBase, UINT uDelay)
+void MACvTimer0MicroSDelay (DWORD_PTR dwIoBase, UINT uDelay)
{
BYTE byValue;
UINT uu,ii;
diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h
index 3ba87fb64d3..5eb7f57f718 100644
--- a/drivers/staging/vt6655/mac.h
+++ b/drivers/staging/vt6655/mac.h
@@ -1075,7 +1075,7 @@
/*--------------------- Export Functions --------------------------*/
extern WORD TxRate_iwconfig;//2008-5-8 <add> by chester
-VOID MACvReadAllRegs(DWORD_PTR dwIoBase, PBYTE pbyMacRegs);
+void MACvReadAllRegs(DWORD_PTR dwIoBase, PBYTE pbyMacRegs);
BOOL MACbIsRegBitsOn(DWORD_PTR dwIoBase, BYTE byRegOfs, BYTE byTestBits);
BOOL MACbIsRegBitsOff(DWORD_PTR dwIoBase, BYTE byRegOfs, BYTE byTestBits);
@@ -1083,32 +1083,32 @@ BOOL MACbIsRegBitsOff(DWORD_PTR dwIoBase, BYTE byRegOfs, BYTE byTestBits);
BOOL MACbIsIntDisable(DWORD_PTR dwIoBase);
BYTE MACbyReadMultiAddr(DWORD_PTR dwIoBase, UINT uByteIdx);
-VOID MACvWriteMultiAddr(DWORD_PTR dwIoBase, UINT uByteIdx, BYTE byData);
-VOID MACvSetMultiAddrByHash(DWORD_PTR dwIoBase, BYTE byHashIdx);
-VOID MACvResetMultiAddrByHash(DWORD_PTR dwIoBase, BYTE byHashIdx);
+void MACvWriteMultiAddr(DWORD_PTR dwIoBase, UINT uByteIdx, BYTE byData);
+void MACvSetMultiAddrByHash(DWORD_PTR dwIoBase, BYTE byHashIdx);
+void MACvResetMultiAddrByHash(DWORD_PTR dwIoBase, BYTE byHashIdx);
-VOID MACvSetRxThreshold(DWORD_PTR dwIoBase, BYTE byThreshold);
-VOID MACvGetRxThreshold(DWORD_PTR dwIoBase, PBYTE pbyThreshold);
+void MACvSetRxThreshold(DWORD_PTR dwIoBase, BYTE byThreshold);
+void MACvGetRxThreshold(DWORD_PTR dwIoBase, PBYTE pbyThreshold);
-VOID MACvSetTxThreshold(DWORD_PTR dwIoBase, BYTE byThreshold);
-VOID MACvGetTxThreshold(DWORD_PTR dwIoBase, PBYTE pbyThreshold);
+void MACvSetTxThreshold(DWORD_PTR dwIoBase, BYTE byThreshold);
+void MACvGetTxThreshold(DWORD_PTR dwIoBase, PBYTE pbyThreshold);
-VOID MACvSetDmaLength(DWORD_PTR dwIoBase, BYTE byDmaLength);
-VOID MACvGetDmaLength(DWORD_PTR dwIoBase, PBYTE pbyDmaLength);
+void MACvSetDmaLength(DWORD_PTR dwIoBase, BYTE byDmaLength);
+void MACvGetDmaLength(DWORD_PTR dwIoBase, PBYTE pbyDmaLength);
-VOID MACvSetShortRetryLimit(DWORD_PTR dwIoBase, BYTE byRetryLimit);
-VOID MACvGetShortRetryLimit(DWORD_PTR dwIoBase, PBYTE pbyRetryLimit);
+void MACvSetShortRetryLimit(DWORD_PTR dwIoBase, BYTE byRetryLimit);
+void MACvGetShortRetryLimit(DWORD_PTR dwIoBase, PBYTE pbyRetryLimit);
-VOID MACvSetLongRetryLimit(DWORD_PTR dwIoBase, BYTE byRetryLimit);
-VOID MACvGetLongRetryLimit(DWORD_PTR dwIoBase, PBYTE pbyRetryLimit);
+void MACvSetLongRetryLimit(DWORD_PTR dwIoBase, BYTE byRetryLimit);
+void MACvGetLongRetryLimit(DWORD_PTR dwIoBase, PBYTE pbyRetryLimit);
-VOID MACvSetLoopbackMode(DWORD_PTR dwIoBase, BYTE byLoopbackMode);
+void MACvSetLoopbackMode(DWORD_PTR dwIoBase, BYTE byLoopbackMode);
BOOL MACbIsInLoopbackMode(DWORD_PTR dwIoBase);
-VOID MACvSetPacketFilter(DWORD_PTR dwIoBase, WORD wFilterType);
+void MACvSetPacketFilter(DWORD_PTR dwIoBase, WORD wFilterType);
-VOID MACvSaveContext(DWORD_PTR dwIoBase, PBYTE pbyCxtBuf);
-VOID MACvRestoreContext(DWORD_PTR dwIoBase, PBYTE pbyCxtBuf);
+void MACvSaveContext(DWORD_PTR dwIoBase, PBYTE pbyCxtBuf);
+void MACvRestoreContext(DWORD_PTR dwIoBase, PBYTE pbyCxtBuf);
BOOL MACbCompareContext(DWORD_PTR dwIoBase, PBYTE pbyCxtBuf);
BOOL MACbSoftwareReset(DWORD_PTR dwIoBase);
@@ -1117,14 +1117,14 @@ BOOL MACbSafeRxOff(DWORD_PTR dwIoBase);
BOOL MACbSafeTxOff(DWORD_PTR dwIoBase);
BOOL MACbSafeStop(DWORD_PTR dwIoBase);
BOOL MACbShutdown(DWORD_PTR dwIoBase);
-VOID MACvInitialize(DWORD_PTR dwIoBase);
-VOID MACvSetCurrRx0DescAddr(DWORD_PTR dwIoBase, DWORD dwCurrDescAddr);
-VOID MACvSetCurrRx1DescAddr(DWORD_PTR dwIoBase, DWORD dwCurrDescAddr);
-VOID MACvSetCurrTXDescAddr(int iTxType, DWORD_PTR dwIoBase, DWORD dwCurrDescAddr);
-VOID MACvSetCurrTx0DescAddrEx(DWORD_PTR dwIoBase, DWORD dwCurrDescAddr);
-VOID MACvSetCurrAC0DescAddrEx(DWORD_PTR dwIoBase, DWORD dwCurrDescAddr);
-VOID MACvSetCurrSyncDescAddrEx(DWORD_PTR dwIoBase, DWORD dwCurrDescAddr);
-VOID MACvSetCurrATIMDescAddrEx(DWORD_PTR dwIoBase, DWORD dwCurrDescAddr);
+void MACvInitialize(DWORD_PTR dwIoBase);
+void MACvSetCurrRx0DescAddr(DWORD_PTR dwIoBase, DWORD dwCurrDescAddr);
+void MACvSetCurrRx1DescAddr(DWORD_PTR dwIoBase, DWORD dwCurrDescAddr);
+void MACvSetCurrTXDescAddr(int iTxType, DWORD_PTR dwIoBase, DWORD dwCurrDescAddr);
+void MACvSetCurrTx0DescAddrEx(DWORD_PTR dwIoBase, DWORD dwCurrDescAddr);
+void MACvSetCurrAC0DescAddrEx(DWORD_PTR dwIoBase, DWORD dwCurrDescAddr);
+void MACvSetCurrSyncDescAddrEx(DWORD_PTR dwIoBase, DWORD dwCurrDescAddr);
+void MACvSetCurrATIMDescAddrEx(DWORD_PTR dwIoBase, DWORD dwCurrDescAddr);
void MACvTimer0MicroSDelay(DWORD_PTR dwIoBase, UINT uDelay);
void MACvOneShotTimer0MicroSec(DWORD_PTR dwIoBase, UINT uDelayTime);
void MACvOneShotTimer1MicroSec(DWORD_PTR dwIoBase, UINT uDelayTime);
diff --git a/drivers/staging/vt6655/mib.c b/drivers/staging/vt6655/mib.c
index fb11595c82c..4ca7877075b 100644
--- a/drivers/staging/vt6655/mib.c
+++ b/drivers/staging/vt6655/mib.c
@@ -190,7 +190,7 @@ void STAvUpdateRDStatCounter (PSStatCounter pStatistic,
pStatistic->ullRsrOK++;
- if (cbFrameLength >= U_ETHER_ADDR_LEN) {
+ if (cbFrameLength >= ETH_ALEN) {
// update counters in case that successful transmit
if (byRSR & RSR_ADDRBROAD) {
pStatistic->ullRxBroadcastFrames++;
@@ -359,9 +359,9 @@ void STAvUpdateRDStatCounter (PSStatCounter pStatistic,
else if ((512 <= cbFrameLength) && (cbFrameLength <= 1023)) {
pStatistic->dwRsrRxFrmLen512_1023++;
}
- else if ((1024 <= cbFrameLength) && (cbFrameLength <= MAX_PACKET_LEN + 4)) {
+ else if ((1024 <= cbFrameLength) && (cbFrameLength <= ETH_FRAME_LEN + 4)) {
pStatistic->dwRsrRxFrmLen1024_1518++;
- } else if (cbFrameLength > MAX_PACKET_LEN + 4) {
+ } else if (cbFrameLength > ETH_FRAME_LEN + 4) {
pStatistic->dwRsrLong++;
}
diff --git a/drivers/staging/vt6655/mib.h b/drivers/staging/vt6655/mib.h
index 2aa2b91de72..2308319a405 100644
--- a/drivers/staging/vt6655/mib.h
+++ b/drivers/staging/vt6655/mib.h
@@ -78,7 +78,7 @@ typedef struct tagSMib2Counter {
LONG ifType;
LONG ifMtu;
DWORD ifSpeed;
- BYTE ifPhysAddress[U_ETHER_ADDR_LEN];
+ BYTE ifPhysAddress[ETH_ALEN];
LONG ifAdminStatus;
LONG ifOperStatus;
DWORD ifLastChange;
diff --git a/drivers/staging/vt6655/michael.c b/drivers/staging/vt6655/michael.c
index c930e0cdb85..0bf57efdede 100644
--- a/drivers/staging/vt6655/michael.c
+++ b/drivers/staging/vt6655/michael.c
@@ -49,12 +49,12 @@
/*--------------------- Static Functions --------------------------*/
/*
static DWORD s_dwGetUINT32(BYTE * p); // Get DWORD from 4 bytes LSByte first
-static VOID s_vPutUINT32(BYTE* p, DWORD val); // Put DWORD into 4 bytes LSByte first
+static void s_vPutUINT32(BYTE* p, DWORD val); // Put DWORD into 4 bytes LSByte first
*/
-static VOID s_vClear(void); // Clear the internal message,
+static void s_vClear(void); // Clear the internal message,
// resets the object to the state just after construction.
-static VOID s_vSetKey(DWORD dwK0, DWORD dwK1);
-static VOID s_vAppendByte(BYTE b); // Add a single byte to the internal message
+static void s_vSetKey(DWORD dwK0, DWORD dwK1);
+static void s_vAppendByte(BYTE b); // Add a single byte to the internal message
/*--------------------- Export Variables --------------------------*/
static DWORD L, R; // Current state
@@ -78,7 +78,7 @@ static DWORD s_dwGetUINT32 (BYTE * p)
return res;
}
-static VOID s_vPutUINT32 (BYTE* p, DWORD val)
+static void s_vPutUINT32 (BYTE* p, DWORD val)
// Convert from DWORD to BYTE[] in a portable way
{
UINT i;
@@ -90,7 +90,7 @@ static VOID s_vPutUINT32 (BYTE* p, DWORD val)
}
*/
-static VOID s_vClear (void)
+static void s_vClear (void)
{
// Reset the state to the empty message.
L = K0;
@@ -99,7 +99,7 @@ static VOID s_vClear (void)
M = 0;
}
-static VOID s_vSetKey (DWORD dwK0, DWORD dwK1)
+static void s_vSetKey (DWORD dwK0, DWORD dwK1)
{
// Set the key
K0 = dwK0;
@@ -108,7 +108,7 @@ static VOID s_vSetKey (DWORD dwK0, DWORD dwK1)
s_vClear();
}
-static VOID s_vAppendByte (BYTE b)
+static void s_vAppendByte (BYTE b)
{
// Append the byte to our word-sized buffer
M |= b << (8*nBytesInM);
@@ -131,14 +131,14 @@ static VOID s_vAppendByte (BYTE b)
}
}
-VOID MIC_vInit (DWORD dwK0, DWORD dwK1)
+void MIC_vInit (DWORD dwK0, DWORD dwK1)
{
// Set the key
s_vSetKey(dwK0, dwK1);
}
-VOID MIC_vUnInit (void)
+void MIC_vUnInit (void)
{
// Wipe the key material
K0 = 0;
@@ -149,7 +149,7 @@ VOID MIC_vUnInit (void)
s_vClear();
}
-VOID MIC_vAppend (PBYTE src, UINT nBytes)
+void MIC_vAppend (PBYTE src, UINT nBytes)
{
// This is simple
while (nBytes > 0)
@@ -159,7 +159,7 @@ VOID MIC_vAppend (PBYTE src, UINT nBytes)
}
}
-VOID MIC_vGetMIC (PDWORD pdwL, PDWORD pdwR)
+void MIC_vGetMIC (PDWORD pdwL, PDWORD pdwR)
{
// Append the minimum padding
s_vAppendByte(0x5a);
diff --git a/drivers/staging/vt6655/michael.h b/drivers/staging/vt6655/michael.h
index 3f79b52832d..97de77b4da2 100644
--- a/drivers/staging/vt6655/michael.h
+++ b/drivers/staging/vt6655/michael.h
@@ -35,16 +35,16 @@
/*--------------------- Export Types ------------------------------*/
-VOID MIC_vInit(DWORD dwK0, DWORD dwK1);
+void MIC_vInit(DWORD dwK0, DWORD dwK1);
-VOID MIC_vUnInit(void);
+void MIC_vUnInit(void);
// Append bytes to the message to be MICed
-VOID MIC_vAppend(PBYTE src, UINT nBytes);
+void MIC_vAppend(PBYTE src, UINT nBytes);
// Get the MIC result. Destination should accept 8 bytes of result.
// This also resets the message to empty.
-VOID MIC_vGetMIC(PDWORD pdwL, PDWORD pdwR);
+void MIC_vGetMIC(PDWORD pdwL, PDWORD pdwR);
/*--------------------- Export Macros ------------------------------*/
diff --git a/drivers/staging/vt6655/power.c b/drivers/staging/vt6655/power.c
index 84eda045538..64c22c3e4bb 100644
--- a/drivers/staging/vt6655/power.c
+++ b/drivers/staging/vt6655/power.c
@@ -74,10 +74,10 @@ static int msglevel =MSG_LEVEL_INFO;
-*/
-VOID
+void
PSvEnablePowerSaving(
- IN HANDLE hDeviceContext,
- IN WORD wListenInterval
+ void *hDeviceContext,
+ WORD wListenInterval
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -118,7 +118,7 @@ PSvEnablePowerSaving(
pDevice->bEnablePSMode = TRUE;
if (pDevice->eOPMode == OP_MODE_ADHOC) {
-// bMgrPrepareBeaconToSend((HANDLE)pDevice, pMgmt);
+// bMgrPrepareBeaconToSend((void *)pDevice, pMgmt);
}
// We don't send null pkt in ad hoc mode since beacon will handle this.
else if (pDevice->eOPMode == OP_MODE_INFRASTRUCTURE) {
@@ -144,9 +144,9 @@ PSvEnablePowerSaving(
*
-*/
-VOID
+void
PSvDisablePowerSaving(
- IN HANDLE hDeviceContext
+ void *hDeviceContext
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -184,9 +184,9 @@ PSvDisablePowerSaving(
BOOL
PSbConsiderPowerDown(
- IN HANDLE hDeviceContext,
- IN BOOL bCheckRxDMA,
- IN BOOL bCheckCountToWakeUp
+ void *hDeviceContext,
+ BOOL bCheckRxDMA,
+ BOOL bCheckCountToWakeUp
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -250,9 +250,9 @@ PSbConsiderPowerDown(
-VOID
+void
PSvSendPSPOLL(
- IN HANDLE hDeviceContext
+ void *hDeviceContext
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -298,7 +298,7 @@ PSvSendPSPOLL(
-*/
BOOL
PSbSendNullPacket(
- IN HANDLE hDeviceContext
+ void *hDeviceContext
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -384,7 +384,7 @@ PSbSendNullPacket(
BOOL
PSbIsNextTBTTWakeUp(
- IN HANDLE hDeviceContext
+ void *hDeviceContext
)
{
diff --git a/drivers/staging/vt6655/power.h b/drivers/staging/vt6655/power.h
index 30634fabfe9..c0dbe216e97 100644
--- a/drivers/staging/vt6655/power.h
+++ b/drivers/staging/vt6655/power.h
@@ -45,40 +45,40 @@
/*--------------------- Export Functions --------------------------*/
-// IN PSDevice pDevice
-// IN PSDevice hDeviceContext
+// PSDevice pDevice
+// PSDevice hDeviceContext
BOOL
PSbConsiderPowerDown(
- IN HANDLE hDeviceContext,
- IN BOOL bCheckRxDMA,
- IN BOOL bCheckCountToWakeUp
+ void *hDeviceContext,
+ BOOL bCheckRxDMA,
+ BOOL bCheckCountToWakeUp
);
-VOID
+void
PSvDisablePowerSaving(
- IN HANDLE hDeviceContext
+ void *hDeviceContext
);
-VOID
+void
PSvEnablePowerSaving(
- IN HANDLE hDeviceContext,
- IN WORD wListenInterval
+ void *hDeviceContext,
+ WORD wListenInterval
);
-VOID
+void
PSvSendPSPOLL(
- IN HANDLE hDeviceContext
+ void *hDeviceContext
);
BOOL
PSbSendNullPacket(
- IN HANDLE hDeviceContext
+ void *hDeviceContext
);
BOOL
PSbIsNextTBTTWakeUp(
- IN HANDLE hDeviceContext
+ void *hDeviceContext
);
#endif //__POWER_H__
diff --git a/drivers/staging/vt6655/rc4.c b/drivers/staging/vt6655/rc4.c
index e6c61312fd2..4a53f159cb3 100644
--- a/drivers/staging/vt6655/rc4.c
+++ b/drivers/staging/vt6655/rc4.c
@@ -32,7 +32,7 @@
#include "rc4.h"
-VOID rc4_init(PRC4Ext pRC4, PBYTE pbyKey, UINT cbKey_len)
+void rc4_init(PRC4Ext pRC4, PBYTE pbyKey, UINT cbKey_len)
{
UINT ust1, ust2;
UINT keyindex;
@@ -78,7 +78,7 @@ UINT rc4_byte(PRC4Ext pRC4)
return pbyst[(ustx + usty) & 0xff];
}
-VOID rc4_encrypt(PRC4Ext pRC4, PBYTE pbyDest,
+void rc4_encrypt(PRC4Ext pRC4, PBYTE pbyDest,
PBYTE pbySrc, UINT cbData_len)
{
UINT ii;
diff --git a/drivers/staging/vt6655/rc4.h b/drivers/staging/vt6655/rc4.h
index bf607c9d446..e65cae69efa 100644
--- a/drivers/staging/vt6655/rc4.h
+++ b/drivers/staging/vt6655/rc4.h
@@ -40,7 +40,7 @@ typedef struct {
BYTE abystate[256];
} RC4Ext, *PRC4Ext;
-VOID rc4_init(PRC4Ext pRC4, PBYTE pbyKey, UINT cbKey_len);
+void rc4_init(PRC4Ext pRC4, PBYTE pbyKey, UINT cbKey_len);
UINT rc4_byte(PRC4Ext pRC4);
void rc4_encrypt(PRC4Ext pRC4, PBYTE pbyDest, PBYTE pbySrc, UINT cbData_len);
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index 01ab73f1cc3..7cb86fe2eeb 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -808,7 +808,7 @@ BOOL RFbAL2230SelectChannel (DWORD_PTR dwIoBase, BYTE byChannel)
*
*/
BOOL RFbInit (
- IN PSDevice pDevice
+ PSDevice pDevice
)
{
BOOL bResult = TRUE;
@@ -846,7 +846,7 @@ BOOL bResult = TRUE;
*
*/
BOOL RFbShutDown (
- IN PSDevice pDevice
+ PSDevice pDevice
)
{
BOOL bResult = TRUE;
@@ -997,9 +997,9 @@ BOOL RFvWriteWakeProgSyn (DWORD_PTR dwIoBase, BYTE byRFType, UINT uChannel)
*
*/
BOOL RFbSetPower (
- IN PSDevice pDevice,
- IN UINT uRATE,
- IN UINT uCH
+ PSDevice pDevice,
+ UINT uRATE,
+ UINT uCH
)
{
BOOL bResult = TRUE;
@@ -1136,9 +1136,9 @@ BYTE byPwrdBm = 0;
*/
BOOL RFbRawSetPower (
- IN PSDevice pDevice,
- IN BYTE byPwr,
- IN UINT uRATE
+ PSDevice pDevice,
+ BYTE byPwr,
+ UINT uRATE
)
{
BOOL bResult = TRUE;
@@ -1201,10 +1201,10 @@ DWORD dwMax7230Pwr = 0;
* Return Value: none
*
-*/
-VOID
+void
RFvRSSITodBm (
- IN PSDevice pDevice,
- IN BYTE byCurrRSSI,
+ PSDevice pDevice,
+ BYTE byCurrRSSI,
long * pldBm
)
{
diff --git a/drivers/staging/vt6655/rf.h b/drivers/staging/vt6655/rf.h
index f316bcced8e..25dfc7942f6 100644
--- a/drivers/staging/vt6655/rf.h
+++ b/drivers/staging/vt6655/rf.h
@@ -79,20 +79,20 @@
BOOL IFRFbWriteEmbeded(DWORD_PTR dwIoBase, DWORD dwData);
BOOL RFbSelectChannel(DWORD_PTR dwIoBase, BYTE byRFType, BYTE byChannel);
BOOL RFbInit (
- IN PSDevice pDevice
+ PSDevice pDevice
);
BOOL RFvWriteWakeProgSyn(DWORD_PTR dwIoBase, BYTE byRFType, UINT uChannel);
BOOL RFbSetPower(PSDevice pDevice, UINT uRATE, UINT uCH);
BOOL RFbRawSetPower(
- IN PSDevice pDevice,
- IN BYTE byPwr,
- IN UINT uRATE
+ PSDevice pDevice,
+ BYTE byPwr,
+ UINT uRATE
);
-VOID
+void
RFvRSSITodBm(
- IN PSDevice pDevice,
- IN BYTE byCurrRSSI,
+ PSDevice pDevice,
+ BYTE byCurrRSSI,
long *pldBm
);
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index ed3070edcac..a0445c3427e 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -25,19 +25,19 @@
* Date: May 20, 2003
*
* Functions:
- * s_vGenerateTxParameter - Generate tx dma requried parameter.
+ * s_vGenerateTxParameter - Generate tx dma required parameter.
* vGenerateMACHeader - Translate 802.3 to 802.11 header
- * cbGetFragCount - Caculate fragement number count
+ * cbGetFragCount - Caculate fragment number count
* csBeacon_xmit - beacon tx function
* csMgmt_xmit - management tx function
* s_cbFillTxBufHead - fulfill tx dma buffer header
* s_uGetDataDuration - get tx data required duration
* s_uFillDataHead- fulfill tx data duration header
- * s_uGetRTSCTSDuration- get rtx/cts requried duration
+ * s_uGetRTSCTSDuration- get rtx/cts required duration
* s_uGetRTSCTSRsvTime- get rts/cts reserved time
* s_uGetTxRsvTime- get frame reserved time
* s_vFillCTSHead- fulfill CTS ctl header
- * s_vFillFragParameter- Set fragement ctl parameter.
+ * s_vFillFragParameter- Set fragment ctl parameter.
* s_vFillRTSHead- fulfill RTS ctl header
* s_vFillTxKey- fulfill tx encrypt key
* s_vSWencryption- Software encrypt header
@@ -115,93 +115,93 @@ const WORD wFB_Opt1[2][5] = {
static
-VOID
+void
s_vFillTxKey(
- IN PSDevice pDevice,
- IN PBYTE pbyBuf,
- IN PBYTE pbyIVHead,
- IN PSKeyItem pTransmitKey,
- IN PBYTE pbyHdrBuf,
- IN WORD wPayloadLen,
- OUT PBYTE pMICHDR
+ PSDevice pDevice,
+ PBYTE pbyBuf,
+ PBYTE pbyIVHead,
+ PSKeyItem pTransmitKey,
+ PBYTE pbyHdrBuf,
+ WORD wPayloadLen,
+ PBYTE pMICHDR
);
static
-VOID
+void
s_vFillRTSHead(
- IN PSDevice pDevice,
- IN BYTE byPktType,
- IN PVOID pvRTS,
- IN UINT cbFrameLength,
- IN BOOL bNeedAck,
- IN BOOL bDisCRC,
- IN PSEthernetHeader psEthHeader,
- IN WORD wCurrentRate,
- IN BYTE byFBOption
+ PSDevice pDevice,
+ BYTE byPktType,
+ void * pvRTS,
+ UINT cbFrameLength,
+ BOOL bNeedAck,
+ BOOL bDisCRC,
+ PSEthernetHeader psEthHeader,
+ WORD wCurrentRate,
+ BYTE byFBOption
);
static
-VOID
+void
s_vGenerateTxParameter(
- IN PSDevice pDevice,
- IN BYTE byPktType,
- IN PVOID pTxBufHead,
- IN PVOID pvRrvTime,
- IN PVOID pvRTS,
- IN PVOID pvCTS,
- IN UINT cbFrameSize,
- IN BOOL bNeedACK,
- IN UINT uDMAIdx,
- IN PSEthernetHeader psEthHeader,
- IN WORD wCurrentRate
+ PSDevice pDevice,
+ BYTE byPktType,
+ void * pTxBufHead,
+ void * pvRrvTime,
+ void * pvRTS,
+ void * pvCTS,
+ UINT cbFrameSize,
+ BOOL bNeedACK,
+ UINT uDMAIdx,
+ PSEthernetHeader psEthHeader,
+ WORD wCurrentRate
);
static void s_vFillFragParameter(
- IN PSDevice pDevice,
- IN PBYTE pbyBuffer,
- IN UINT uTxType,
- IN PVOID pvtdCurr,
- IN WORD wFragType,
- IN UINT cbReqCount
+ PSDevice pDevice,
+ PBYTE pbyBuffer,
+ UINT uTxType,
+ void * pvtdCurr,
+ WORD wFragType,
+ UINT cbReqCount
);
static
UINT
s_cbFillTxBufHead (
- IN PSDevice pDevice,
- IN BYTE byPktType,
- IN PBYTE pbyTxBufferAddr,
- IN UINT cbFrameBodySize,
- IN UINT uDMAIdx,
- IN PSTxDesc pHeadTD,
- IN PSEthernetHeader psEthHeader,
- IN PBYTE pPacket,
- IN BOOL bNeedEncrypt,
- IN PSKeyItem pTransmitKey,
- IN UINT uNodeIndex,
- OUT PUINT puMACfragNum
+ PSDevice pDevice,
+ BYTE byPktType,
+ PBYTE pbyTxBufferAddr,
+ UINT cbFrameBodySize,
+ UINT uDMAIdx,
+ PSTxDesc pHeadTD,
+ PSEthernetHeader psEthHeader,
+ PBYTE pPacket,
+ BOOL bNeedEncrypt,
+ PSKeyItem pTransmitKey,
+ UINT uNodeIndex,
+ PUINT puMACfragNum
);
static
UINT
s_uFillDataHead (
- IN PSDevice pDevice,
- IN BYTE byPktType,
- IN PVOID pTxDataHead,
- IN UINT cbFrameLength,
- IN UINT uDMAIdx,
- IN BOOL bNeedAck,
- IN UINT uFragIdx,
- IN UINT cbLastFragmentSize,
- IN UINT uMACfragNum,
- IN BYTE byFBOption,
- IN WORD wCurrentRate
+ PSDevice pDevice,
+ BYTE byPktType,
+ void * pTxDataHead,
+ UINT cbFrameLength,
+ UINT uDMAIdx,
+ BOOL bNeedAck,
+ UINT uFragIdx,
+ UINT cbLastFragmentSize,
+ UINT uMACfragNum,
+ BYTE byFBOption,
+ WORD wCurrentRate
);
@@ -210,15 +210,15 @@ s_uFillDataHead (
static
-VOID
+void
s_vFillTxKey (
- IN PSDevice pDevice,
- IN PBYTE pbyBuf,
- IN PBYTE pbyIVHead,
- IN PSKeyItem pTransmitKey,
- IN PBYTE pbyHdrBuf,
- IN WORD wPayloadLen,
- OUT PBYTE pMICHDR
+ PSDevice pDevice,
+ PBYTE pbyBuf,
+ PBYTE pbyIVHead,
+ PSKeyItem pTransmitKey,
+ PBYTE pbyHdrBuf,
+ WORD wPayloadLen,
+ PBYTE pMICHDR
)
{
PDWORD pdwIV = (PDWORD) pbyIVHead;
@@ -328,12 +328,12 @@ s_vFillTxKey (
static
-VOID
+void
s_vSWencryption (
- IN PSDevice pDevice,
- IN PSKeyItem pTransmitKey,
- IN PBYTE pbyPayloadHead,
- IN WORD wPayloadSize
+ PSDevice pDevice,
+ PSKeyItem pTransmitKey,
+ PBYTE pbyPayloadHead,
+ WORD wPayloadSize
)
{
UINT cbICVlen = 4;
@@ -379,11 +379,11 @@ s_vSWencryption (
static
UINT
s_uGetTxRsvTime (
- IN PSDevice pDevice,
- IN BYTE byPktType,
- IN UINT cbFrameLength,
- IN WORD wRate,
- IN BOOL bNeedAck
+ PSDevice pDevice,
+ BYTE byPktType,
+ UINT cbFrameLength,
+ WORD wRate,
+ BOOL bNeedAck
)
{
UINT uDataTime, uAckTime;
@@ -410,11 +410,11 @@ s_uGetTxRsvTime (
static
UINT
s_uGetRTSCTSRsvTime (
- IN PSDevice pDevice,
- IN BYTE byRTSRsvType,
- IN BYTE byPktType,
- IN UINT cbFrameLength,
- IN WORD wCurrentRate
+ PSDevice pDevice,
+ BYTE byRTSRsvType,
+ BYTE byPktType,
+ UINT cbFrameLength,
+ WORD wCurrentRate
)
{
UINT uRrvTime , uRTSTime, uCTSTime, uAckTime, uDataTime;
@@ -452,16 +452,16 @@ s_uGetRTSCTSRsvTime (
static
UINT
s_uGetDataDuration (
- IN PSDevice pDevice,
- IN BYTE byDurType,
- IN UINT cbFrameLength,
- IN BYTE byPktType,
- IN WORD wRate,
- IN BOOL bNeedAck,
- IN UINT uFragIdx,
- IN UINT cbLastFragmentSize,
- IN UINT uMACfragNum,
- IN BYTE byFBOption
+ PSDevice pDevice,
+ BYTE byDurType,
+ UINT cbFrameLength,
+ BYTE byPktType,
+ WORD wRate,
+ BOOL bNeedAck,
+ UINT uFragIdx,
+ UINT cbLastFragmentSize,
+ UINT uMACfragNum,
+ BYTE byFBOption
)
{
BOOL bLastFrag = 0;
@@ -623,13 +623,13 @@ s_uGetDataDuration (
static
UINT
s_uGetRTSCTSDuration (
- IN PSDevice pDevice,
- IN BYTE byDurType,
- IN UINT cbFrameLength,
- IN BYTE byPktType,
- IN WORD wRate,
- IN BOOL bNeedAck,
- IN BYTE byFBOption
+ PSDevice pDevice,
+ BYTE byDurType,
+ UINT cbFrameLength,
+ BYTE byPktType,
+ WORD wRate,
+ BOOL bNeedAck,
+ BYTE byFBOption
)
{
UINT uCTSTime = 0, uDurTime = 0;
@@ -721,17 +721,17 @@ s_uGetRTSCTSDuration (
static
UINT
s_uFillDataHead (
- IN PSDevice pDevice,
- IN BYTE byPktType,
- IN PVOID pTxDataHead,
- IN UINT cbFrameLength,
- IN UINT uDMAIdx,
- IN BOOL bNeedAck,
- IN UINT uFragIdx,
- IN UINT cbLastFragmentSize,
- IN UINT uMACfragNum,
- IN BYTE byFBOption,
- IN WORD wCurrentRate
+ PSDevice pDevice,
+ BYTE byPktType,
+ void * pTxDataHead,
+ UINT cbFrameLength,
+ UINT uDMAIdx,
+ BOOL bNeedAck,
+ UINT uFragIdx,
+ UINT cbLastFragmentSize,
+ UINT uMACfragNum,
+ BYTE byFBOption,
+ WORD wCurrentRate
)
{
WORD wLen = 0x0000;
@@ -851,17 +851,17 @@ s_uFillDataHead (
static
-VOID
+void
s_vFillRTSHead (
- IN PSDevice pDevice,
- IN BYTE byPktType,
- IN PVOID pvRTS,
- IN UINT cbFrameLength,
- IN BOOL bNeedAck,
- IN BOOL bDisCRC,
- IN PSEthernetHeader psEthHeader,
- IN WORD wCurrentRate,
- IN BYTE byFBOption
+ PSDevice pDevice,
+ BYTE byPktType,
+ void * pvRTS,
+ UINT cbFrameLength,
+ BOOL bNeedAck,
+ BOOL bDisCRC,
+ PSEthernetHeader psEthHeader,
+ WORD wCurrentRate,
+ BYTE byFBOption
)
{
UINT uRTSFrameLen = 20;
@@ -877,7 +877,7 @@ s_vFillRTSHead (
}
// Note: So far RTSHead dosen't appear in ATIM & Beacom DMA, so we don't need to take them into account.
- // Otherwise, we need to modified codes for them.
+ // Otherwise, we need to modify codes for them.
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
if (byFBOption == AUTO_FB_NONE) {
PSRTS_g pBuf = (PSRTS_g)pvRTS;
@@ -900,16 +900,16 @@ s_vFillRTSHead (
pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
(pDevice->eOPMode == OP_MODE_AP)) {
- memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
}
else {
- memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
}
if (pDevice->eOPMode == OP_MODE_AP) {
- memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
}
else {
- memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
}
}
else {
@@ -938,17 +938,17 @@ s_vFillRTSHead (
if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
(pDevice->eOPMode == OP_MODE_AP)) {
- memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
}
else {
- memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
}
if (pDevice->eOPMode == OP_MODE_AP) {
- memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
}
else {
- memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
}
} // if (byFBOption == AUTO_FB_NONE)
@@ -969,17 +969,17 @@ s_vFillRTSHead (
if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
(pDevice->eOPMode == OP_MODE_AP)) {
- memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
}
else {
- memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
}
if (pDevice->eOPMode == OP_MODE_AP) {
- memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
}
else {
- memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
}
}
@@ -1000,16 +1000,16 @@ s_vFillRTSHead (
if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
(pDevice->eOPMode == OP_MODE_AP)) {
- memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
}
else {
- memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
}
if (pDevice->eOPMode == OP_MODE_AP) {
- memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
}
else {
- memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
}
}
}
@@ -1029,33 +1029,33 @@ s_vFillRTSHead (
if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
(pDevice->eOPMode == OP_MODE_AP)) {
- memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
}
else {
- memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
}
if (pDevice->eOPMode == OP_MODE_AP) {
- memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
}
else {
- memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
}
}
}
static
-VOID
+void
s_vFillCTSHead (
- IN PSDevice pDevice,
- IN UINT uDMAIdx,
- IN BYTE byPktType,
- IN PVOID pvCTS,
- IN UINT cbFrameLength,
- IN BOOL bNeedAck,
- IN BOOL bDisCRC,
- IN WORD wCurrentRate,
- IN BYTE byFBOption
+ PSDevice pDevice,
+ UINT uDMAIdx,
+ BYTE byPktType,
+ void * pvCTS,
+ UINT cbFrameLength,
+ BOOL bNeedAck,
+ BOOL bDisCRC,
+ WORD wCurrentRate,
+ BYTE byFBOption
)
{
UINT uCTSFrameLen = 14;
@@ -1098,7 +1098,7 @@ s_vFillCTSHead (
pBuf->Data.wDurationID = pBuf->wDuration_ba;
pBuf->Data.wFrameControl = TYPE_CTL_CTS;//0x00C4
pBuf->Data.wReserved = 0x0000;
- memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyCurrentNetAddr[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyCurrentNetAddr[0]), ETH_ALEN);
} else { //if (byFBOption != AUTO_FB_NONE && uDMAIdx != TYPE_ATIMDMA && uDMAIdx != TYPE_BEACONDMA)
PSCTS pBuf = (PSCTS)pvCTS;
@@ -1116,7 +1116,7 @@ s_vFillCTSHead (
pBuf->Data.wDurationID = pBuf->wDuration_ba;
pBuf->Data.wFrameControl = TYPE_CTL_CTS;//0x00C4
pBuf->Data.wReserved = 0x0000;
- memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyCurrentNetAddr[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyCurrentNetAddr[0]), ETH_ALEN);
}
}
}
@@ -1133,7 +1133,7 @@ s_vFillCTSHead (
*
* Parameters:
* In:
- * pDevice - Pointer to adpater
+ * pDevice - Pointer to adapter
* pTxDataHead - Transmit Data Buffer
* pTxBufHead - pTxBufHead
* pvRrvTime - pvRrvTime
@@ -1150,19 +1150,19 @@ s_vFillCTSHead (
-*/
// UINT cbFrameSize,//Hdr+Payload+FCS
static
-VOID
+void
s_vGenerateTxParameter (
- IN PSDevice pDevice,
- IN BYTE byPktType,
- IN PVOID pTxBufHead,
- IN PVOID pvRrvTime,
- IN PVOID pvRTS,
- IN PVOID pvCTS,
- IN UINT cbFrameSize,
- IN BOOL bNeedACK,
- IN UINT uDMAIdx,
- IN PSEthernetHeader psEthHeader,
- IN WORD wCurrentRate
+ PSDevice pDevice,
+ BYTE byPktType,
+ void * pTxBufHead,
+ void * pvRrvTime,
+ void * pvRTS,
+ void * pvCTS,
+ UINT cbFrameSize,
+ BOOL bNeedACK,
+ UINT uDMAIdx,
+ PSEthernetHeader psEthHeader,
+ WORD wCurrentRate
)
{
UINT cbMACHdLen = WLAN_HDR_ADDR3_LEN; //24
@@ -1268,14 +1268,14 @@ s_vGenerateTxParameter (
UINT cbFragmentSize,//Hdr+payoad+FCS
*/
static
-VOID
+void
s_vFillFragParameter(
- IN PSDevice pDevice,
- IN PBYTE pbyBuffer,
- IN UINT uTxType,
- IN PVOID pvtdCurr,
- IN WORD wFragType,
- IN UINT cbReqCount
+ PSDevice pDevice,
+ PBYTE pbyBuffer,
+ UINT uTxType,
+ void * pvtdCurr,
+ WORD wFragType,
+ UINT cbReqCount
)
{
PSTxBufHead pTxBufHead = (PSTxBufHead) pbyBuffer;
@@ -1318,18 +1318,18 @@ s_vFillFragParameter(
static
UINT
s_cbFillTxBufHead (
- IN PSDevice pDevice,
- IN BYTE byPktType,
- IN PBYTE pbyTxBufferAddr,
- IN UINT cbFrameBodySize,
- IN UINT uDMAIdx,
- IN PSTxDesc pHeadTD,
- IN PSEthernetHeader psEthHeader,
- IN PBYTE pPacket,
- IN BOOL bNeedEncrypt,
- IN PSKeyItem pTransmitKey,
- IN UINT uNodeIndex,
- OUT PUINT puMACfragNum
+ PSDevice pDevice,
+ BYTE byPktType,
+ PBYTE pbyTxBufferAddr,
+ UINT cbFrameBodySize,
+ UINT uDMAIdx,
+ PSTxDesc pHeadTD,
+ PSEthernetHeader psEthHeader,
+ PBYTE pPacket,
+ BOOL bNeedEncrypt,
+ PSKeyItem pTransmitKey,
+ UINT uNodeIndex,
+ PUINT puMACfragNum
)
{
UINT cbMACHdLen;
@@ -1376,11 +1376,11 @@ s_cbFillTxBufHead (
PSTxBufHead psTxBufHd = (PSTxBufHead) pbyTxBufferAddr;
// UINT tmpDescIdx;
UINT cbHeaderLength = 0;
- PVOID pvRrvTime;
+ void * pvRrvTime;
PSMICHDRHead pMICHDR;
- PVOID pvRTS;
- PVOID pvCTS;
- PVOID pvTxDataHd;
+ void * pvRTS;
+ void * pvCTS;
+ void * pvTxDataHd;
WORD wTxBufSize; // FFinfo size
UINT uTotalCopyLength = 0;
BYTE byFBOption = AUTO_FB_NONE;
@@ -1544,7 +1544,7 @@ s_cbFillTxBufHead (
}
} // Auto Fall Back
}
- memset((PVOID)(pbyTxBufferAddr + wTxBufSize), 0, (cbHeaderLength - wTxBufSize));
+ memset((void *)(pbyTxBufferAddr + wTxBufSize), 0, (cbHeaderLength - wTxBufSize));
//////////////////////////////////////////////////////////////////
if ((bNeedEncrypt == TRUE) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
@@ -1600,7 +1600,7 @@ s_cbFillTxBufHead (
//Fill FIFO,RrvTime,RTS,and CTS
- s_vGenerateTxParameter(pDevice, byPktType, (PVOID)psTxBufHd, pvRrvTime, pvRTS, pvCTS,
+ s_vGenerateTxParameter(pDevice, byPktType, (void *)psTxBufHd, pvRrvTime, pvRTS, pvCTS,
cbFragmentSize, bNeedACK, uDMAIdx, psEthHeader, pDevice->wCurrentRate);
//Fill DataHead
uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFragmentSize, uDMAIdx, bNeedACK,
@@ -1622,7 +1622,7 @@ s_cbFillTxBufHead (
// 802.1H
- if (ntohs(psEthHeader->wType) > MAX_DATA_LEN) {
+ if (ntohs(psEthHeader->wType) > ETH_DATA_LEN) {
if ((psEthHeader->wType == TYPE_PKT_IPX) ||
(psEthHeader->wType == cpu_to_le16(0xF380))) {
memcpy((PBYTE) (pbyPayloadHead), &pDevice->abySNAP_Bridgetunnel[0], 6);
@@ -1643,7 +1643,7 @@ s_cbFillTxBufHead (
//if (pDevice->bAES) {
// s_vFillMICHDR(pDevice, (PBYTE)pMICHDR, pbyMacHdr, (WORD)cbFragPayloadSize);
//}
- //cbReqCount += s_uDoEncryption(pDevice, psEthHeader, (PVOID)psTxBufHd, byKeySel,
+ //cbReqCount += s_uDoEncryption(pDevice, psEthHeader, (void *)psTxBufHd, byKeySel,
// pbyPayloadHead, (WORD)cbFragPayloadSize, uDMAIdx);
@@ -1653,7 +1653,7 @@ s_cbFillTxBufHead (
uLength = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cb802_1_H_len;
//copy TxBufferHeader + MacHeader to desc
- memcpy(pbyBuffer, (PVOID)psTxBufHd, uLength);
+ memcpy(pbyBuffer, (void *)psTxBufHd, uLength);
// Copy the Packet into a tx Buffer
memcpy((pbyBuffer + uLength), (pPacket + 14), (cbFragPayloadSize - cb802_1_H_len));
@@ -1685,7 +1685,7 @@ s_cbFillTxBufHead (
//4.Set Sequence Control
//5.Get S/W generate FCS
//--------------------
- s_vFillFragParameter(pDevice, pbyBuffer, uDMAIdx, (PVOID)ptdCurr, wFragType, cbReqCount);
+ s_vFillFragParameter(pDevice, pbyBuffer, uDMAIdx, (void *)ptdCurr, wFragType, cbReqCount);
ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding;
ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
@@ -1704,7 +1704,7 @@ s_cbFillTxBufHead (
wFragType = FRAGCTL_ENDFRAG;
//Fill FIFO,RrvTime,RTS,and CTS
- s_vGenerateTxParameter(pDevice, byPktType, (PVOID)psTxBufHd, pvRrvTime, pvRTS, pvCTS,
+ s_vGenerateTxParameter(pDevice, byPktType, (void *)psTxBufHd, pvRrvTime, pvRTS, pvCTS,
cbLastFragmentSize, bNeedACK, uDMAIdx, psEthHeader, pDevice->wCurrentRate);
//Fill DataHead
uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbLastFragmentSize, uDMAIdx, bNeedACK,
@@ -1740,7 +1740,7 @@ s_cbFillTxBufHead (
uLength = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen;
//copy TxBufferHeader + MacHeader to desc
- memcpy(pbyBuffer, (PVOID)psTxBufHd, uLength);
+ memcpy(pbyBuffer, (void *)psTxBufHd, uLength);
// Copy the Packet into a tx Buffer
if (bMIC2Frag == FALSE) {
@@ -1814,7 +1814,7 @@ s_cbFillTxBufHead (
//--------------------
- s_vFillFragParameter(pDevice, pbyBuffer, uDMAIdx, (PVOID)ptdCurr, wFragType, cbReqCount);
+ s_vFillFragParameter(pDevice, pbyBuffer, uDMAIdx, (void *)ptdCurr, wFragType, cbReqCount);
ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding;
ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
@@ -1834,7 +1834,7 @@ s_cbFillTxBufHead (
wFragType = FRAGCTL_MIDFRAG;
//Fill FIFO,RrvTime,RTS,and CTS
- s_vGenerateTxParameter(pDevice, byPktType, (PVOID)psTxBufHd, pvRrvTime, pvRTS, pvCTS,
+ s_vGenerateTxParameter(pDevice, byPktType, (void *)psTxBufHd, pvRrvTime, pvRTS, pvCTS,
cbFragmentSize, bNeedACK, uDMAIdx, psEthHeader, pDevice->wCurrentRate);
//Fill DataHead
uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFragmentSize, uDMAIdx, bNeedACK,
@@ -1864,7 +1864,7 @@ s_cbFillTxBufHead (
//if (pDevice->bAES) {
// s_vFillMICHDR(pDevice, (PBYTE)pMICHDR, pbyMacHdr, (WORD)cbFragPayloadSize);
//}
- //cbReqCount += s_uDoEncryption(pDevice, psEthHeader, (PVOID)psTxBufHd, byKeySel,
+ //cbReqCount += s_uDoEncryption(pDevice, psEthHeader, (void *)psTxBufHd, byKeySel,
// pbyPayloadHead, (WORD)cbFragPayloadSize, uDMAIdx);
@@ -1875,7 +1875,7 @@ s_cbFillTxBufHead (
uLength = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen;
//copy TxBufferHeader + MacHeader to desc
- memcpy(pbyBuffer, (PVOID)psTxBufHd, uLength);
+ memcpy(pbyBuffer, (void *)psTxBufHd, uLength);
// Copy the Packet into a tx Buffer
memcpy((pbyBuffer + uLength),
@@ -1941,7 +1941,7 @@ s_cbFillTxBufHead (
//5.Get S/W generate FCS
//--------------------
- s_vFillFragParameter(pDevice, pbyBuffer, uDMAIdx, (PVOID)ptdCurr, wFragType, cbReqCount);
+ s_vFillFragParameter(pDevice, pbyBuffer, uDMAIdx, (void *)ptdCurr, wFragType, cbReqCount);
ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding;
ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
@@ -1964,7 +1964,7 @@ s_cbFillTxBufHead (
psTxBufHd->wFragCtl |= (WORD)wFragType;
//Fill FIFO,RrvTime,RTS,and CTS
- s_vGenerateTxParameter(pDevice, byPktType, (PVOID)psTxBufHd, pvRrvTime, pvRTS, pvCTS,
+ s_vGenerateTxParameter(pDevice, byPktType, (void *)psTxBufHd, pvRrvTime, pvRTS, pvCTS,
cbFrameSize, bNeedACK, uDMAIdx, psEthHeader, pDevice->wCurrentRate);
//Fill DataHead
uDuration = s_uFillDataHead(pDevice, byPktType, pvTxDataHd, cbFrameSize, uDMAIdx, bNeedACK,
@@ -1986,7 +1986,7 @@ s_cbFillTxBufHead (
}
// 802.1H
- if (ntohs(psEthHeader->wType) > MAX_DATA_LEN) {
+ if (ntohs(psEthHeader->wType) > ETH_DATA_LEN) {
if ((psEthHeader->wType == TYPE_PKT_IPX) ||
(psEthHeader->wType == cpu_to_le16(0xF380))) {
memcpy((PBYTE) (pbyPayloadHead), &pDevice->abySNAP_Bridgetunnel[0], 6);
@@ -2015,7 +2015,7 @@ s_cbFillTxBufHead (
uLength = cbHeaderLength + cbMACHdLen + uPadding + cbIVlen + cb802_1_H_len;
//copy TxBufferHeader + MacHeader to desc
- memcpy(pbyBuffer, (PVOID)psTxBufHd, uLength);
+ memcpy(pbyBuffer, (void *)psTxBufHd, uLength);
// Copy the Packet into a tx Buffer
memcpy((pbyBuffer + uLength),
@@ -2093,21 +2093,21 @@ s_cbFillTxBufHead (
}
-VOID
+void
vGenerateFIFOHeader (
- IN PSDevice pDevice,
- IN BYTE byPktType,
- IN PBYTE pbyTxBufferAddr,
- IN BOOL bNeedEncrypt,
- IN UINT cbPayloadSize,
- IN UINT uDMAIdx,
- IN PSTxDesc pHeadTD,
- IN PSEthernetHeader psEthHeader,
- IN PBYTE pPacket,
- IN PSKeyItem pTransmitKey,
- IN UINT uNodeIndex,
- OUT PUINT puMACfragNum,
- OUT PUINT pcbHeaderSize
+ PSDevice pDevice,
+ BYTE byPktType,
+ PBYTE pbyTxBufferAddr,
+ BOOL bNeedEncrypt,
+ UINT cbPayloadSize,
+ UINT uDMAIdx,
+ PSTxDesc pHeadTD,
+ PSEthernetHeader psEthHeader,
+ PBYTE pPacket,
+ PSKeyItem pTransmitKey,
+ UINT uNodeIndex,
+ PUINT puMACfragNum,
+ PUINT pcbHeaderSize
)
{
UINT wTxBufSize; // FFinfo size
@@ -2252,7 +2252,7 @@ vGenerateFIFOHeader (
*
* Parameters:
* In:
- * pDevice - Pointer to adpater
+ * pDevice - Pointer to adapter
* dwTxBufferAddr - Transmit Buffer
* pPacket - Packet from upper layer
* cbPacketSize - Transmit Data Length
@@ -2264,16 +2264,16 @@ vGenerateFIFOHeader (
*
-*/
-VOID
+void
vGenerateMACHeader (
- IN PSDevice pDevice,
- IN PBYTE pbyBufferAddr,
- IN WORD wDuration,
- IN PSEthernetHeader psEthHeader,
- IN BOOL bNeedEncrypt,
- IN WORD wFragType,
- IN UINT uDMAIdx,
- IN UINT uFragIdx
+ PSDevice pDevice,
+ PBYTE pbyBufferAddr,
+ WORD wDuration,
+ PSEthernetHeader psEthHeader,
+ BOOL bNeedEncrypt,
+ WORD wFragType,
+ UINT uDMAIdx,
+ UINT uFragIdx
)
{
PS802_11Header pMACHeader = (PS802_11Header)pbyBufferAddr;
@@ -2287,21 +2287,21 @@ vGenerateMACHeader (
}
if (pDevice->eOPMode == OP_MODE_AP) {
- memcpy(&(pMACHeader->abyAddr1[0]), &(psEthHeader->abyDstAddr[0]), U_ETHER_ADDR_LEN);
- memcpy(&(pMACHeader->abyAddr2[0]), &(pDevice->abyBSSID[0]), U_ETHER_ADDR_LEN);
- memcpy(&(pMACHeader->abyAddr3[0]), &(psEthHeader->abySrcAddr[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pMACHeader->abyAddr1[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
+ memcpy(&(pMACHeader->abyAddr2[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
+ memcpy(&(pMACHeader->abyAddr3[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
pMACHeader->wFrameCtl |= FC_FROMDS;
}
else {
if (pDevice->eOPMode == OP_MODE_ADHOC) {
- memcpy(&(pMACHeader->abyAddr1[0]), &(psEthHeader->abyDstAddr[0]), U_ETHER_ADDR_LEN);
- memcpy(&(pMACHeader->abyAddr2[0]), &(psEthHeader->abySrcAddr[0]), U_ETHER_ADDR_LEN);
- memcpy(&(pMACHeader->abyAddr3[0]), &(pDevice->abyBSSID[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pMACHeader->abyAddr1[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
+ memcpy(&(pMACHeader->abyAddr2[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
+ memcpy(&(pMACHeader->abyAddr3[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
}
else {
- memcpy(&(pMACHeader->abyAddr3[0]), &(psEthHeader->abyDstAddr[0]), U_ETHER_ADDR_LEN);
- memcpy(&(pMACHeader->abyAddr2[0]), &(psEthHeader->abySrcAddr[0]), U_ETHER_ADDR_LEN);
- memcpy(&(pMACHeader->abyAddr1[0]), &(pDevice->abyBSSID[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pMACHeader->abyAddr3[0]), &(psEthHeader->abyDstAddr[0]), ETH_ALEN);
+ memcpy(&(pMACHeader->abyAddr2[0]), &(psEthHeader->abySrcAddr[0]), ETH_ALEN);
+ memcpy(&(pMACHeader->abyAddr1[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
pMACHeader->wFrameCtl |= FC_TODS;
}
}
@@ -2342,9 +2342,9 @@ CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
PSTxDesc pFrstTD;
BYTE byPktType;
PBYTE pbyTxBufferAddr;
- PVOID pvRTS;
+ void * pvRTS;
PSCTS pCTS;
- PVOID pvTxDataHd;
+ void * pvTxDataHd;
UINT uDuration;
UINT cbReqCount;
PS802_11Header pMACHeader;
@@ -2362,8 +2362,8 @@ CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
WORD wTxBufSize;
UINT cbMacHdLen;
SEthernetHeader sEthHeader;
- PVOID pvRrvTime;
- PVOID pMICHDR;
+ void * pvRrvTime;
+ void * pMICHDR;
PSMgmtObject pMgmt = pDevice->pMgmt;
WORD wCurrentRate = RATE_1M;
@@ -2516,10 +2516,10 @@ CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
cbHeaderSize = wTxBufSize + sizeof(SRrvTime_ab) + sizeof(STxDataHead_ab);
}
- memset((PVOID)(pbyTxBufferAddr + wTxBufSize), 0, (cbHeaderSize - wTxBufSize));
+ memset((void *)(pbyTxBufferAddr + wTxBufSize), 0, (cbHeaderSize - wTxBufSize));
- memcpy(&(sEthHeader.abyDstAddr[0]), &(pPacket->p80211Header->sA3.abyAddr1[0]), U_ETHER_ADDR_LEN);
- memcpy(&(sEthHeader.abySrcAddr[0]), &(pPacket->p80211Header->sA3.abyAddr2[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(sEthHeader.abyDstAddr[0]), &(pPacket->p80211Header->sA3.abyAddr1[0]), ETH_ALEN);
+ memcpy(&(sEthHeader.abySrcAddr[0]), &(pPacket->p80211Header->sA3.abyAddr2[0]), ETH_ALEN);
//=========================
// No Fragmentation
//=========================
@@ -2738,10 +2738,10 @@ CMD_STATUS csBeacon_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket) {
UINT
cbGetFragCount (
- IN PSDevice pDevice,
- IN PSKeyItem pTransmitKey,
- IN UINT cbFrameBodySize,
- IN PSEthernetHeader psEthHeader
+ PSDevice pDevice,
+ PSKeyItem pTransmitKey,
+ UINT cbFrameBodySize,
+ PSEthernetHeader psEthHeader
)
{
UINT cbMACHdLen;
@@ -2825,15 +2825,15 @@ cbGetFragCount (
}
-VOID
+void
vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, PBYTE pbMPDU, UINT cbMPDULen) {
PSTxDesc pFrstTD;
BYTE byPktType;
PBYTE pbyTxBufferAddr;
- PVOID pvRTS;
- PVOID pvCTS;
- PVOID pvTxDataHd;
+ void * pvRTS;
+ void * pvCTS;
+ void * pvTxDataHd;
UINT uDuration;
UINT cbReqCount;
PS802_11Header pMACHeader;
@@ -2857,8 +2857,8 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, PBYTE pbMPDU, UINT cbMPDU
WORD wTxBufSize;
UINT cbMacHdLen;
SEthernetHeader sEthHeader;
- PVOID pvRrvTime;
- PVOID pMICHDR;
+ void * pvRrvTime;
+ void * pMICHDR;
PSMgmtObject pMgmt = pDevice->pMgmt;
WORD wCurrentRate = RATE_1M;
PUWLAN_80211HDR p80211Header;
@@ -3061,9 +3061,9 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, PBYTE pbMPDU, UINT cbMPDU
}
- memset((PVOID)(pbyTxBufferAddr + wTxBufSize), 0, (cbHeaderSize - wTxBufSize));
- memcpy(&(sEthHeader.abyDstAddr[0]), &(p80211Header->sA3.abyAddr1[0]), U_ETHER_ADDR_LEN);
- memcpy(&(sEthHeader.abySrcAddr[0]), &(p80211Header->sA3.abyAddr2[0]), U_ETHER_ADDR_LEN);
+ memset((void *)(pbyTxBufferAddr + wTxBufSize), 0, (cbHeaderSize - wTxBufSize));
+ memcpy(&(sEthHeader.abyDstAddr[0]), &(p80211Header->sA3.abyAddr1[0]), ETH_ALEN);
+ memcpy(&(sEthHeader.abySrcAddr[0]), &(p80211Header->sA3.abyAddr2[0]), ETH_ALEN);
//=========================
// No Fragmentation
//=========================
diff --git a/drivers/staging/vt6655/rxtx.h b/drivers/staging/vt6655/rxtx.h
index 5da815efe70..b008fc23adb 100644
--- a/drivers/staging/vt6655/rxtx.h
+++ b/drivers/staging/vt6655/rxtx.h
@@ -40,67 +40,67 @@
/*--------------------- Export Functions --------------------------*/
/*
-VOID vGenerateMACHeader(
- IN PSDevice pDevice,
- IN DWORD dwTxBufferAddr,
- IN PBYTE pbySkbData,
- IN UINT cbPacketSize,
- IN BOOL bDMA0Used,
- OUT PUINT pcbHeadSize,
- OUT PUINT pcbAppendPayload
+void vGenerateMACHeader(
+ PSDevice pDevice,
+ DWORD dwTxBufferAddr,
+ PBYTE pbySkbData,
+ UINT cbPacketSize,
+ BOOL bDMA0Used,
+ PUINT pcbHeadSize,
+ PUINT pcbAppendPayload
);
-VOID vProcessRxMACHeader (
- IN PSDevice pDevice,
- IN DWORD dwRxBufferAddr,
- IN UINT cbPacketSize,
- IN BOOL bIsWEP,
- OUT PUINT pcbHeadSize
+void vProcessRxMACHeader (
+ PSDevice pDevice,
+ DWORD dwRxBufferAddr,
+ UINT cbPacketSize,
+ BOOL bIsWEP,
+ PUINT pcbHeadSize
);
*/
-VOID
+void
vGenerateMACHeader (
- IN PSDevice pDevice,
- IN PBYTE pbyBufferAddr,
- IN WORD wDuration,
- IN PSEthernetHeader psEthHeader,
- IN BOOL bNeedEncrypt,
- IN WORD wFragType,
- IN UINT uDMAIdx,
- IN UINT uFragIdx
+ PSDevice pDevice,
+ PBYTE pbyBufferAddr,
+ WORD wDuration,
+ PSEthernetHeader psEthHeader,
+ BOOL bNeedEncrypt,
+ WORD wFragType,
+ UINT uDMAIdx,
+ UINT uFragIdx
);
UINT
cbGetFragCount(
- IN PSDevice pDevice,
- IN PSKeyItem pTransmitKey,
- IN UINT cbFrameBodySize,
- IN PSEthernetHeader psEthHeader
+ PSDevice pDevice,
+ PSKeyItem pTransmitKey,
+ UINT cbFrameBodySize,
+ PSEthernetHeader psEthHeader
);
-VOID
+void
vGenerateFIFOHeader (
- IN PSDevice pDevice,
- IN BYTE byPktTyp,
- IN PBYTE pbyTxBufferAddr,
- IN BOOL bNeedEncrypt,
- IN UINT cbPayloadSize,
- IN UINT uDMAIdx,
- IN PSTxDesc pHeadTD,
- IN PSEthernetHeader psEthHeader,
- IN PBYTE pPacket,
- IN PSKeyItem pTransmitKey,
- IN UINT uNodeIndex,
- OUT PUINT puMACfragNum,
- OUT PUINT pcbHeaderSize
+ PSDevice pDevice,
+ BYTE byPktTyp,
+ PBYTE pbyTxBufferAddr,
+ BOOL bNeedEncrypt,
+ UINT cbPayloadSize,
+ UINT uDMAIdx,
+ PSTxDesc pHeadTD,
+ PSEthernetHeader psEthHeader,
+ PBYTE pPacket,
+ PSKeyItem pTransmitKey,
+ UINT uNodeIndex,
+ PUINT puMACfragNum,
+ PUINT pcbHeaderSize
);
-VOID vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, PBYTE pbMPDU, UINT cbMPDULen);
+void vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb, PBYTE pbMPDU, UINT cbMPDULen);
CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket);
CMD_STATUS csBeacon_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket);
diff --git a/drivers/staging/vt6655/srom.c b/drivers/staging/vt6655/srom.c
index 5a7c6ca724b..418575fdc2c 100644
--- a/drivers/staging/vt6655/srom.c
+++ b/drivers/staging/vt6655/srom.c
@@ -85,15 +85,15 @@ BYTE SROMbyReadEmbedded(DWORD_PTR dwIoBase, BYTE byContntOffset)
byData = 0xFF;
VNSvInPortB(dwIoBase + MAC_REG_I2MCFG, &byOrg);
- // turn off hardware retry for getting NACK
+ /* turn off hardware retry for getting NACK */
VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, (byOrg & (~I2MCFG_NORETRY)));
for (wNoACK = 0; wNoACK < W_MAX_I2CRETRY; wNoACK++) {
VNSvOutPortB(dwIoBase + MAC_REG_I2MTGID, EEP_I2C_DEV_ID);
VNSvOutPortB(dwIoBase + MAC_REG_I2MTGAD, byContntOffset);
- // issue read command
+ /* issue read command */
VNSvOutPortB(dwIoBase + MAC_REG_I2MCSR, I2MCSR_EEMR);
- // wait DONE be set
+ /* wait DONE be set */
for (wDelay = 0; wDelay < W_MAX_TIMEOUT; wDelay++) {
VNSvInPortB(dwIoBase + MAC_REG_I2MCSR, &byWait);
if (byWait & (I2MCSR_DONE | I2MCSR_NACK))
@@ -125,7 +125,7 @@ BYTE SROMbyReadEmbedded(DWORD_PTR dwIoBase, BYTE byContntOffset)
* Return Value: TRUE if succeeded; FALSE if failed.
*
*/
-BOOL SROMbWriteEmbedded (DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byData)
+BOOL SROMbWriteEmbedded(DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byData)
{
WORD wDelay, wNoACK;
BYTE byWait;
@@ -133,16 +133,16 @@ BOOL SROMbWriteEmbedded (DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byData)
BYTE byOrg;
VNSvInPortB(dwIoBase + MAC_REG_I2MCFG, &byOrg);
- // turn off hardware retry for getting NACK
+ /* turn off hardware retry for getting NACK */
VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, (byOrg & (~I2MCFG_NORETRY)));
for (wNoACK = 0; wNoACK < W_MAX_I2CRETRY; wNoACK++) {
VNSvOutPortB(dwIoBase + MAC_REG_I2MTGID, EEP_I2C_DEV_ID);
VNSvOutPortB(dwIoBase + MAC_REG_I2MTGAD, byContntOffset);
VNSvOutPortB(dwIoBase + MAC_REG_I2MDOPT, byData);
- // issue write command
+ /* issue write command */
VNSvOutPortB(dwIoBase + MAC_REG_I2MCSR, I2MCSR_EEMW);
- // wait DONE be set
+ /* wait DONE be set */
for (wDelay = 0; wDelay < W_MAX_TIMEOUT; wDelay++) {
VNSvInPortB(dwIoBase + MAC_REG_I2MCSR, &byWait);
if (byWait & (I2MCSR_DONE | I2MCSR_NACK))
@@ -178,7 +178,7 @@ BOOL SROMbWriteEmbedded (DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byData)
* Return Value: none
*
*/
-void SROMvRegBitsOn (DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byBits)
+void SROMvRegBitsOn(DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byBits)
{
BYTE byOrgData;
@@ -199,7 +199,7 @@ void SROMvRegBitsOn (DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byBits)
* none
*
*/
-void SROMvRegBitsOff (DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byBits)
+void SROMvRegBitsOff(DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byBits)
{
BYTE byOrgData;
@@ -222,7 +222,7 @@ void SROMvRegBitsOff (DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byBits)
* Return Value: TRUE if all test bits on; otherwise FALSE
*
*/
-BOOL SROMbIsRegBitsOn (DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byTestBits)
+BOOL SROMbIsRegBitsOn(DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byTestBits)
{
BYTE byOrgData;
@@ -245,7 +245,7 @@ BOOL SROMbIsRegBitsOn (DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byTestBits)
* Return Value: TRUE if all test bits off; otherwise FALSE
*
*/
-BOOL SROMbIsRegBitsOff (DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byTestBits)
+BOOL SROMbIsRegBitsOff(DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byTestBits)
{
BYTE byOrgData;
@@ -266,11 +266,11 @@ BOOL SROMbIsRegBitsOff (DWORD_PTR dwIoBase, BYTE byContntOffset, BYTE byTestBits
* Return Value: none
*
*/
-void SROMvReadAllContents (DWORD_PTR dwIoBase, PBYTE pbyEepromRegs)
+void SROMvReadAllContents(DWORD_PTR dwIoBase, PBYTE pbyEepromRegs)
{
int ii;
- // ii = Rom Address
+ /* ii = Rom Address */
for (ii = 0; ii < EEP_MAX_CONTEXT_SIZE; ii++) {
*pbyEepromRegs = SROMbyReadEmbedded(dwIoBase,(BYTE) ii);
pbyEepromRegs++;
@@ -291,11 +291,11 @@ void SROMvReadAllContents (DWORD_PTR dwIoBase, PBYTE pbyEepromRegs)
* Return Value: none
*
*/
-void SROMvWriteAllContents (DWORD_PTR dwIoBase, PBYTE pbyEepromRegs)
+void SROMvWriteAllContents(DWORD_PTR dwIoBase, PBYTE pbyEepromRegs)
{
int ii;
- // ii = Rom Address
+ /* ii = Rom Address */
for (ii = 0; ii < EEP_MAX_CONTEXT_SIZE; ii++) {
SROMbWriteEmbedded(dwIoBase,(BYTE) ii, *pbyEepromRegs);
pbyEepromRegs++;
@@ -315,12 +315,12 @@ void SROMvWriteAllContents (DWORD_PTR dwIoBase, PBYTE pbyEepromRegs)
* Return Value: none
*
*/
-void SROMvReadEtherAddress (DWORD_PTR dwIoBase, PBYTE pbyEtherAddress)
+void SROMvReadEtherAddress(DWORD_PTR dwIoBase, PBYTE pbyEtherAddress)
{
BYTE ii;
- // ii = Rom Address
- for (ii = 0; ii < U_ETHER_ADDR_LEN; ii++) {
+ /* ii = Rom Address */
+ for (ii = 0; ii < ETH_ALEN; ii++) {
*pbyEtherAddress = SROMbyReadEmbedded(dwIoBase, ii);
pbyEtherAddress++;
}
@@ -340,12 +340,12 @@ void SROMvReadEtherAddress (DWORD_PTR dwIoBase, PBYTE pbyEtherAddress)
* Return Value: none
*
*/
-void SROMvWriteEtherAddress (DWORD_PTR dwIoBase, PBYTE pbyEtherAddress)
+void SROMvWriteEtherAddress(DWORD_PTR dwIoBase, PBYTE pbyEtherAddress)
{
BYTE ii;
- // ii = Rom Address
- for (ii = 0; ii < U_ETHER_ADDR_LEN; ii++) {
+ /* ii = Rom Address */
+ for (ii = 0; ii < ETH_ALEN; ii++) {
SROMbWriteEmbedded(dwIoBase, ii, *pbyEtherAddress);
pbyEtherAddress++;
}
@@ -364,15 +364,15 @@ void SROMvWriteEtherAddress (DWORD_PTR dwIoBase, PBYTE pbyEtherAddress)
* Return Value: none
*
*/
-void SROMvReadSubSysVenId (DWORD_PTR dwIoBase, PDWORD pdwSubSysVenId)
+void SROMvReadSubSysVenId(DWORD_PTR dwIoBase, PDWORD pdwSubSysVenId)
{
PBYTE pbyData;
pbyData = (PBYTE)pdwSubSysVenId;
- // sub vendor
+ /* sub vendor */
*pbyData = SROMbyReadEmbedded(dwIoBase, 6);
*(pbyData+1) = SROMbyReadEmbedded(dwIoBase, 7);
- // sub system
+ /* sub system */
*(pbyData+2) = SROMbyReadEmbedded(dwIoBase, 8);
*(pbyData+3) = SROMbyReadEmbedded(dwIoBase, 9);
}
@@ -389,7 +389,7 @@ void SROMvReadSubSysVenId (DWORD_PTR dwIoBase, PDWORD pdwSubSysVenId)
* Return Value: TRUE if success; otherwise FALSE
*
*/
-BOOL SROMbAutoLoad (DWORD_PTR dwIoBase)
+BOOL SROMbAutoLoad(DWORD_PTR dwIoBase)
{
BYTE byWait;
int ii;
@@ -397,12 +397,12 @@ BOOL SROMbAutoLoad (DWORD_PTR dwIoBase)
BYTE byOrg;
VNSvInPortB(dwIoBase + MAC_REG_I2MCFG, &byOrg);
- // turn on hardware retry
+ /* turn on hardware retry */
VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, (byOrg | I2MCFG_NORETRY));
MACvRegBitsOn(dwIoBase, MAC_REG_I2MCSR, I2MCSR_AUTOLD);
- // ii = Rom Address
+ /* ii = Rom Address */
for (ii = 0; ii < EEP_MAX_CONTEXT_SIZE; ii++) {
MACvTimer0MicroSDelay(dwIoBase, CB_EEPROM_READBYTE_WAIT);
VNSvInPortB(dwIoBase + MAC_REG_I2MCSR, &byWait);
diff --git a/drivers/staging/vt6655/srom.h b/drivers/staging/vt6655/srom.h
index ba123ee61d2..dbb3f5efe97 100644
--- a/drivers/staging/vt6655/srom.h
+++ b/drivers/staging/vt6655/srom.h
@@ -150,7 +150,7 @@ void SROMvWriteAllContents(DWORD_PTR dwIoBase, PBYTE pbyEepromRegs);
void SROMvReadEtherAddress(DWORD_PTR dwIoBase, PBYTE pbyEtherAddress);
void SROMvWriteEtherAddress(DWORD_PTR dwIoBase, PBYTE pbyEtherAddress);
-VOID SROMvReadSubSysVenId(DWORD_PTR dwIoBase, PDWORD pdwSubSysVenId);
+void SROMvReadSubSysVenId(DWORD_PTR dwIoBase, PDWORD pdwSubSysVenId);
BOOL SROMbAutoLoad (DWORD_PTR dwIoBase);
diff --git a/drivers/staging/vt6655/tether.c b/drivers/staging/vt6655/tether.c
index c90b469ad54..d8ba67395cb 100644
--- a/drivers/staging/vt6655/tether.c
+++ b/drivers/staging/vt6655/tether.c
@@ -68,7 +68,7 @@ BYTE ETHbyGetHashIndexByCrc32 (PBYTE pbyMultiAddr)
BYTE byHash = 0;
// get the least 6-bits from CRC generator
- byTmpHash = (BYTE)(CRCdwCrc32(pbyMultiAddr, U_ETHER_ADDR_LEN,
+ byTmpHash = (BYTE)(CRCdwCrc32(pbyMultiAddr, ETH_ALEN,
0xFFFFFFFFL) & 0x3F);
// reverse most bit to least bit
for (ii = 0; ii < (sizeof(byTmpHash) * 8); ii++) {
diff --git a/drivers/staging/vt6655/tether.h b/drivers/staging/vt6655/tether.h
index 5a3c326436c..3c9acd7903a 100644
--- a/drivers/staging/vt6655/tether.h
+++ b/drivers/staging/vt6655/tether.h
@@ -29,30 +29,23 @@
#ifndef __TETHER_H__
#define __TETHER_H__
+#include <linux/if_ether.h>
#include "ttype.h"
/*--------------------- Export Definitions -------------------------*/
//
// constants
//
-#define U_ETHER_ADDR_LEN 6 // Ethernet address length
-#define U_TYPE_LEN 2 //
-#define U_CRC_LEN 4 //
-#define U_HEADER_LEN (U_ETHER_ADDR_LEN * 2 + U_TYPE_LEN)
-#define U_ETHER_ADDR_STR_LEN (U_ETHER_ADDR_LEN * 2 + 1)
+#define U_ETHER_ADDR_STR_LEN (ETH_ALEN * 2 + 1)
// Ethernet address string length
#define MIN_DATA_LEN 46 // min data length
-#define MAX_DATA_LEN 1500 // max data length
-#define MIN_PACKET_LEN (MIN_DATA_LEN + U_HEADER_LEN)
+#define MIN_PACKET_LEN (MIN_DATA_LEN + ETH_HLEN)
// 60
// min total packet length (tx)
-#define MAX_PACKET_LEN (MAX_DATA_LEN + U_HEADER_LEN)
- // 1514
- // max total packet length (tx)
-#define MAX_LOOKAHEAD_SIZE MAX_PACKET_LEN
+#define MAX_LOOKAHEAD_SIZE ETH_FRAME_LEN
#define U_MULTI_ADDR_LEN 8 // multicast address length
@@ -167,8 +160,8 @@
// Ethernet packet
//
typedef struct tagSEthernetHeader {
- BYTE abyDstAddr[U_ETHER_ADDR_LEN];
- BYTE abySrcAddr[U_ETHER_ADDR_LEN];
+ BYTE abyDstAddr[ETH_ALEN];
+ BYTE abySrcAddr[ETH_ALEN];
WORD wType;
}__attribute__ ((__packed__))
SEthernetHeader, *PSEthernetHeader;
@@ -178,8 +171,8 @@ SEthernetHeader, *PSEthernetHeader;
// 802_3 packet
//
typedef struct tagS802_3Header {
- BYTE abyDstAddr[U_ETHER_ADDR_LEN];
- BYTE abySrcAddr[U_ETHER_ADDR_LEN];
+ BYTE abyDstAddr[ETH_ALEN];
+ BYTE abySrcAddr[ETH_ALEN];
WORD wLen;
}__attribute__ ((__packed__))
S802_3Header, *PS802_3Header;
@@ -190,11 +183,11 @@ S802_3Header, *PS802_3Header;
typedef struct tagS802_11Header {
WORD wFrameCtl;
WORD wDurationID;
- BYTE abyAddr1[U_ETHER_ADDR_LEN];
- BYTE abyAddr2[U_ETHER_ADDR_LEN];
- BYTE abyAddr3[U_ETHER_ADDR_LEN];
+ BYTE abyAddr1[ETH_ALEN];
+ BYTE abyAddr2[ETH_ALEN];
+ BYTE abyAddr3[ETH_ALEN];
WORD wSeqCtl;
- BYTE abyAddr4[U_ETHER_ADDR_LEN];
+ BYTE abyAddr4[ETH_ALEN];
}__attribute__ ((__packed__))
S802_11Header, *PS802_11Header;
diff --git a/drivers/staging/vt6655/tkip.c b/drivers/staging/vt6655/tkip.c
index 8ca154080e9..f83af5913aa 100644
--- a/drivers/staging/vt6655/tkip.c
+++ b/drivers/staging/vt6655/tkip.c
@@ -183,7 +183,7 @@ unsigned int rotr1(unsigned int a)
* Return Value: none
*
*/
-VOID TKIPvMixKey(
+void TKIPvMixKey(
PBYTE pbyTKey,
PBYTE pbyTA,
WORD wTSC15_0,
diff --git a/drivers/staging/vt6655/tkip.h b/drivers/staging/vt6655/tkip.h
index 847ecdf97ee..3dfa7f5ee7e 100644
--- a/drivers/staging/vt6655/tkip.h
+++ b/drivers/staging/vt6655/tkip.h
@@ -46,7 +46,7 @@
/*--------------------- Export Functions --------------------------*/
-VOID TKIPvMixKey(
+void TKIPvMixKey(
PBYTE pbyTKey,
PBYTE pbyTA,
WORD wTSC15_0,
diff --git a/drivers/staging/vt6655/ttype.h b/drivers/staging/vt6655/ttype.h
index 4dfad04bb25..2921083a9f2 100644
--- a/drivers/staging/vt6655/ttype.h
+++ b/drivers/staging/vt6655/ttype.h
@@ -33,14 +33,6 @@
/******* Common definitions and typedefs ***********************************/
-#ifndef VOID
-#define VOID void
-#endif
-
-#ifndef IN
-#define IN
-#endif
-
#ifndef OUT
#define OUT
#endif
@@ -58,11 +50,6 @@ typedef int BOOL;
#define FALSE 0
#endif
-
-#if !defined(SUCCESS)
-#define SUCCESS 0
-#endif
-
//2007-0809-01<Add>by MikeLiu
#ifndef update_BssList
#define update_BssList
@@ -141,13 +128,4 @@ typedef DWORD * PDWORD;
typedef QWORD * PQWORD;
-typedef void * PVOID;
-
-// handle declaration
-#ifdef STRICT
-typedef void *HANDLE;
-#else
-typedef PVOID HANDLE;
-#endif
-
#endif // __TTYPE_H__
diff --git a/drivers/staging/vt6655/vntwifi.c b/drivers/staging/vt6655/vntwifi.c
index fbe27a834ce..b527a019188 100644
--- a/drivers/staging/vt6655/vntwifi.c
+++ b/drivers/staging/vt6655/vntwifi.c
@@ -69,10 +69,10 @@
* Return Value: none
*
-*/
-VOID
+void
VNTWIFIvSetOPMode (
- IN PVOID pMgmtHandle,
- IN WMAC_CONFIG_MODE eOPMode
+ void *pMgmtHandle,
+ WMAC_CONFIG_MODE eOPMode
)
{
PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
@@ -98,12 +98,12 @@ VNTWIFIvSetOPMode (
* Return Value: none
*
-*/
-VOID
+void
VNTWIFIvSetIBSSParameter (
- IN PVOID pMgmtHandle,
- IN WORD wBeaconPeriod,
- IN WORD wATIMWindow,
- IN UINT uChannel
+ void *pMgmtHandle,
+ WORD wBeaconPeriod,
+ WORD wATIMWindow,
+ UINT uChannel
)
{
PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
@@ -129,7 +129,7 @@ VNTWIFIvSetIBSSParameter (
-*/
PWLAN_IE_SSID
VNTWIFIpGetCurrentSSID (
- IN PVOID pMgmtHandle
+ void *pMgmtHandle
)
{
PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
@@ -152,7 +152,7 @@ VNTWIFIpGetCurrentSSID (
-*/
UINT
VNTWIFIpGetCurrentChannel (
- IN PVOID pMgmtHandle
+ void *pMgmtHandle
)
{
PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
@@ -178,7 +178,7 @@ VNTWIFIpGetCurrentChannel (
-*/
WORD
VNTWIFIwGetAssocID (
- IN PVOID pMgmtHandle
+ void *pMgmtHandle
)
{
PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
@@ -204,8 +204,8 @@ VNTWIFIwGetAssocID (
-*/
BYTE
VNTWIFIbyGetMaxSupportRate (
- IN PWLAN_IE_SUPP_RATES pSupportRateIEs,
- IN PWLAN_IE_SUPP_RATES pExtSupportRateIEs
+ PWLAN_IE_SUPP_RATES pSupportRateIEs,
+ PWLAN_IE_SUPP_RATES pExtSupportRateIEs
)
{
BYTE byMaxSupportRate = RATE_1M;
@@ -250,9 +250,9 @@ VNTWIFIbyGetMaxSupportRate (
-*/
BYTE
VNTWIFIbyGetACKTxRate (
- IN BYTE byRxDataRate,
- IN PWLAN_IE_SUPP_RATES pSupportRateIEs,
- IN PWLAN_IE_SUPP_RATES pExtSupportRateIEs
+ BYTE byRxDataRate,
+ PWLAN_IE_SUPP_RATES pSupportRateIEs,
+ PWLAN_IE_SUPP_RATES pExtSupportRateIEs
)
{
BYTE byMaxAckRate;
@@ -306,10 +306,10 @@ VNTWIFIbyGetACKTxRate (
* Return Value: none
*
-*/
-VOID
+void
VNTWIFIvSetAuthenticationMode (
- IN PVOID pMgmtHandle,
- IN WMAC_AUTHENTICATION_MODE eAuthMode
+ void *pMgmtHandle,
+ WMAC_AUTHENTICATION_MODE eAuthMode
)
{
PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
@@ -338,10 +338,10 @@ VNTWIFIvSetAuthenticationMode (
* Return Value: none
*
-*/
-VOID
+void
VNTWIFIvSetEncryptionMode (
- IN PVOID pMgmtHandle,
- IN WMAC_ENCRYPTION_MODE eEncryptionMode
+ void *pMgmtHandle,
+ WMAC_ENCRYPTION_MODE eEncryptionMode
)
{
PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
@@ -360,8 +360,8 @@ VNTWIFIvSetEncryptionMode (
BOOL
VNTWIFIbConfigPhyMode (
- IN PVOID pMgmtHandle,
- IN CARD_PHY_TYPE ePhyType
+ void *pMgmtHandle,
+ CARD_PHY_TYPE ePhyType
)
{
PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
@@ -379,10 +379,10 @@ VNTWIFIbConfigPhyMode (
}
-VOID
+void
VNTWIFIbGetConfigPhyMode (
- IN PVOID pMgmtHandle,
- OUT PVOID pePhyType
+ void *pMgmtHandle,
+ void *pePhyType
)
{
PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
@@ -424,11 +424,11 @@ VNTWIFIbGetConfigPhyMode (
*
-*/
-VOID
+void
VNTWIFIvQueryBSSList (
- IN PVOID pMgmtHandle,
- OUT PUINT puBSSCount,
- OUT PVOID *pvFirstBSS
+ void *pMgmtHandle,
+ PUINT puBSSCount,
+ void **pvFirstBSS
)
{
UINT ii = 0;
@@ -454,11 +454,11 @@ VNTWIFIvQueryBSSList (
-VOID
+void
VNTWIFIvGetNextBSS (
- IN PVOID pMgmtHandle,
- IN PVOID pvCurrentBSS,
- OUT PVOID *pvNextBSS
+ void *pMgmtHandle,
+ void *pvCurrentBSS,
+ void **pvNextBSS
)
{
PKnownBSS pBSS = (PKnownBSS) pvCurrentBSS;
@@ -494,13 +494,13 @@ VNTWIFIvGetNextBSS (
* Return Value: none
*
-*/
-VOID
+void
VNTWIFIvUpdateNodeTxCounter(
- IN PVOID pMgmtHandle,
- IN PBYTE pbyDestAddress,
- IN BOOL bTxOk,
- IN WORD wRate,
- IN PBYTE pbyTxFailCount
+ void *pMgmtHandle,
+ PBYTE pbyDestAddress,
+ BOOL bTxOk,
+ WORD wRate,
+ PBYTE pbyTxFailCount
)
{
PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
@@ -529,14 +529,14 @@ VNTWIFIvUpdateNodeTxCounter(
}
-VOID
+void
VNTWIFIvGetTxRate(
- IN PVOID pMgmtHandle,
- IN PBYTE pbyDestAddress,
- OUT PWORD pwTxDataRate,
- OUT PBYTE pbyACKRate,
- OUT PBYTE pbyCCKBasicRate,
- OUT PBYTE pbyOFDMBasicRate
+ void *pMgmtHandle,
+ PBYTE pbyDestAddress,
+ PWORD pwTxDataRate,
+ PBYTE pbyACKRate,
+ PBYTE pbyCCKBasicRate,
+ PBYTE pbyOFDMBasicRate
)
{
PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
@@ -603,8 +603,8 @@ VNTWIFIvGetTxRate(
BYTE
VNTWIFIbyGetKeyCypher(
- IN PVOID pMgmtHandle,
- IN BOOL bGroupKey
+ void *pMgmtHandle,
+ BOOL bGroupKey
)
{
PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle;
@@ -620,8 +620,8 @@ VNTWIFIbyGetKeyCypher(
/*
BOOL
VNTWIFIbInit(
- IN PVOID pAdapterHandler,
- OUT PVOID *pMgmtHandler
+ void *pAdapterHandler,
+ void **pMgmtHandler
)
{
@@ -636,7 +636,7 @@ VNTWIFIbInit(
}
memset(pMgmt, 0, sizeof(SMgmtObject));
- pMgmt->pAdapter = (PVOID) pAdapterHandler;
+ pMgmt->pAdapter = (void *) pAdapterHandler;
// should initial MAC address abyMACAddr
for(ii=0;ii<WLAN_BSSID_LEN;ii++) {
@@ -664,9 +664,9 @@ VNTWIFIbInit(
BOOL
VNTWIFIbSetPMKIDCache (
- IN PVOID pMgmtObject,
- IN ULONG ulCount,
- IN PVOID pPMKIDInfo
+ void *pMgmtObject,
+ ULONG ulCount,
+ void *pPMKIDInfo
)
{
PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject;
@@ -683,7 +683,7 @@ VNTWIFIbSetPMKIDCache (
WORD
VNTWIFIwGetMaxSupportRate(
- IN PVOID pMgmtObject
+ void *pMgmtObject
)
{
WORD wRate = RATE_54M;
@@ -702,10 +702,10 @@ VNTWIFIwGetMaxSupportRate(
}
-VOID
+void
VNTWIFIvSet11h (
- IN PVOID pMgmtObject,
- IN BOOL b11hEnable
+ void *pMgmtObject,
+ BOOL b11hEnable
)
{
PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject;
@@ -715,13 +715,13 @@ VNTWIFIvSet11h (
BOOL
VNTWIFIbMeasureReport(
- IN PVOID pMgmtObject,
- IN BOOL bEndOfReport,
- IN PVOID pvMeasureEID,
- IN BYTE byReportMode,
- IN BYTE byBasicMap,
- IN BYTE byCCAFraction,
- IN PBYTE pbyRPIs
+ void *pMgmtObject,
+ BOOL bEndOfReport,
+ void *pvMeasureEID,
+ BYTE byReportMode,
+ BYTE byBasicMap,
+ BYTE byCCAFraction,
+ PBYTE pbyRPIs
)
{
PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject;
@@ -775,8 +775,8 @@ VNTWIFIbMeasureReport(
BOOL
VNTWIFIbChannelSwitch(
- IN PVOID pMgmtObject,
- IN BYTE byNewChannel
+ void *pMgmtObject,
+ BYTE byNewChannel
)
{
PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject;
@@ -791,8 +791,8 @@ VNTWIFIbChannelSwitch(
/*
BOOL
VNTWIFIbRadarPresent(
- IN PVOID pMgmtObject,
- IN BYTE byChannel
+ void *pMgmtObject,
+ BYTE byChannel
)
{
PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject;
diff --git a/drivers/staging/vt6655/vntwifi.h b/drivers/staging/vt6655/vntwifi.h
index 2854dfcb19a..c91dfd79adc 100644
--- a/drivers/staging/vt6655/vntwifi.h
+++ b/drivers/staging/vt6655/vntwifi.h
@@ -140,123 +140,123 @@ typedef enum tagWMAC_POWER_MODE {
/*--------------------- Export Functions --------------------------*/
-VOID
+void
VNTWIFIvSetIBSSParameter (
- IN PVOID pMgmtHandle,
- IN WORD wBeaconPeriod,
- IN WORD wATIMWindow,
- IN UINT uChannel
+ void *pMgmtHandle,
+ WORD wBeaconPeriod,
+ WORD wATIMWindow,
+ UINT uChannel
);
-VOID
+void
VNTWIFIvSetOPMode (
- IN PVOID pMgmtHandle,
- IN WMAC_CONFIG_MODE eOPMode
+ void *pMgmtHandle,
+ WMAC_CONFIG_MODE eOPMode
);
PWLAN_IE_SSID
VNTWIFIpGetCurrentSSID(
- IN PVOID pMgmtHandle
+ void *pMgmtHandle
);
UINT
VNTWIFIpGetCurrentChannel(
- IN PVOID pMgmtHandle
+ void *pMgmtHandle
);
WORD
VNTWIFIwGetAssocID (
- IN PVOID pMgmtHandle
+ void *pMgmtHandle
);
BYTE
VNTWIFIbyGetMaxSupportRate (
- IN PWLAN_IE_SUPP_RATES pSupportRateIEs,
- IN PWLAN_IE_SUPP_RATES pExtSupportRateIEs
+ PWLAN_IE_SUPP_RATES pSupportRateIEs,
+ PWLAN_IE_SUPP_RATES pExtSupportRateIEs
);
BYTE
VNTWIFIbyGetACKTxRate (
- IN BYTE byRxDataRate,
- IN PWLAN_IE_SUPP_RATES pSupportRateIEs,
- IN PWLAN_IE_SUPP_RATES pExtSupportRateIEs
+ BYTE byRxDataRate,
+ PWLAN_IE_SUPP_RATES pSupportRateIEs,
+ PWLAN_IE_SUPP_RATES pExtSupportRateIEs
);
-VOID
+void
VNTWIFIvSetAuthenticationMode (
- IN PVOID pMgmtHandle,
- IN WMAC_AUTHENTICATION_MODE eAuthMode
+ void *pMgmtHandle,
+ WMAC_AUTHENTICATION_MODE eAuthMode
);
-VOID
+void
VNTWIFIvSetEncryptionMode (
- IN PVOID pMgmtHandle,
- IN WMAC_ENCRYPTION_MODE eEncryptionMode
+ void *pMgmtHandle,
+ WMAC_ENCRYPTION_MODE eEncryptionMode
);
BOOL
VNTWIFIbConfigPhyMode(
- IN PVOID pMgmtHandle,
- IN CARD_PHY_TYPE ePhyType
+ void *pMgmtHandle,
+ CARD_PHY_TYPE ePhyType
);
-VOID
+void
VNTWIFIbGetConfigPhyMode(
- IN PVOID pMgmtHandle,
- OUT PVOID pePhyType
+ void *pMgmtHandle,
+ void *pePhyType
);
-VOID
+void
VNTWIFIvQueryBSSList(
- IN PVOID pMgmtHandle,
- OUT PUINT puBSSCount,
- OUT PVOID *pvFirstBSS
+ void *pMgmtHandle,
+ PUINT puBSSCount,
+ void **pvFirstBSS
);
-VOID
+void
VNTWIFIvGetNextBSS (
- IN PVOID pMgmtHandle,
- IN PVOID pvCurrentBSS,
- OUT PVOID *pvNextBSS
+ void *pMgmtHandle,
+ void *pvCurrentBSS,
+ void **pvNextBSS
);
-VOID
+void
VNTWIFIvUpdateNodeTxCounter(
- IN PVOID pMgmtHandle,
- IN PBYTE pbyDestAddress,
- IN BOOL bTxOk,
- IN WORD wRate,
- IN PBYTE pbyTxFailCount
+ void *pMgmtHandle,
+ PBYTE pbyDestAddress,
+ BOOL bTxOk,
+ WORD wRate,
+ PBYTE pbyTxFailCount
);
-VOID
+void
VNTWIFIvGetTxRate(
- IN PVOID pMgmtHandle,
- IN PBYTE pbyDestAddress,
- OUT PWORD pwTxDataRate,
- OUT PBYTE pbyACKRate,
- OUT PBYTE pbyCCKBasicRate,
- OUT PBYTE pbyOFDMBasicRate
+ void *pMgmtHandle,
+ PBYTE pbyDestAddress,
+ PWORD pwTxDataRate,
+ PBYTE pbyACKRate,
+ PBYTE pbyCCKBasicRate,
+ PBYTE pbyOFDMBasicRate
);
/*
BOOL
VNTWIFIbInit(
- IN PVOID pAdapterHandler,
- OUT PVOID *pMgmtHandler
+ void *pAdapterHandler,
+ void **pMgmtHandler
);
*/
BYTE
VNTWIFIbyGetKeyCypher(
- IN PVOID pMgmtHandle,
- IN BOOL bGroupKey
+ void *pMgmtHandle,
+ BOOL bGroupKey
);
@@ -264,49 +264,49 @@ VNTWIFIbyGetKeyCypher(
BOOL
VNTWIFIbSetPMKIDCache (
- IN PVOID pMgmtObject,
- IN ULONG ulCount,
- IN PVOID pPMKIDInfo
+ void *pMgmtObject,
+ ULONG ulCount,
+ void *pPMKIDInfo
);
BOOL
VNTWIFIbCommandRunning (
- IN PVOID pMgmtObject
+ void *pMgmtObject
);
WORD
VNTWIFIwGetMaxSupportRate(
- IN PVOID pMgmtObject
+ void *pMgmtObject
);
// for 802.11h
-VOID
+void
VNTWIFIvSet11h (
- IN PVOID pMgmtObject,
- IN BOOL b11hEnable
+ void *pMgmtObject,
+ BOOL b11hEnable
);
BOOL
VNTWIFIbMeasureReport(
- IN PVOID pMgmtObject,
- IN BOOL bEndOfReport,
- IN PVOID pvMeasureEID,
- IN BYTE byReportMode,
- IN BYTE byBasicMap,
- IN BYTE byCCAFraction,
- IN PBYTE pbyRPIs
+ void *pMgmtObject,
+ BOOL bEndOfReport,
+ void *pvMeasureEID,
+ BYTE byReportMode,
+ BYTE byBasicMap,
+ BYTE byCCAFraction,
+ PBYTE pbyRPIs
);
BOOL
VNTWIFIbChannelSwitch(
- IN PVOID pMgmtObject,
- IN BYTE byNewChannel
+ void *pMgmtObject,
+ BYTE byNewChannel
);
/*
BOOL
VNTWIFIbRadarPresent(
- IN PVOID pMgmtObject,
- IN BYTE byChannel
+ void *pMgmtObject,
+ BYTE byChannel
);
*/
diff --git a/drivers/staging/vt6655/wcmd.c b/drivers/staging/vt6655/wcmd.c
index c9eabf9995d..28665d870f5 100644
--- a/drivers/staging/vt6655/wcmd.c
+++ b/drivers/staging/vt6655/wcmd.c
@@ -66,21 +66,21 @@ static int msglevel =MSG_LEVEL_INFO;
/*--------------------- Static Functions --------------------------*/
static
-VOID
+void
s_vProbeChannel(
- IN PSDevice pDevice
+ PSDevice pDevice
);
static
PSTxMgmtPacket
s_MgrMakeProbeRequest(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PBYTE pScanBSSID,
- IN PWLAN_IE_SSID pSSID,
- IN PWLAN_IE_SUPP_RATES pCurrRates,
- IN PWLAN_IE_SUPP_RATES pCurrExtSuppRates
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PBYTE pScanBSSID,
+ PWLAN_IE_SSID pSSID,
+ PWLAN_IE_SUPP_RATES pCurrRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates
);
@@ -202,9 +202,9 @@ vAdHocBeaconRestart(PSDevice pDevice)
-*/
static
-VOID
+void
s_vProbeChannel(
- IN PSDevice pDevice
+ PSDevice pDevice
)
{
//1M, 2M, 5M, 11M, 18M, 24M, 36M, 54M
@@ -267,12 +267,12 @@ s_vProbeChannel(
PSTxMgmtPacket
s_MgrMakeProbeRequest(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PBYTE pScanBSSID,
- IN PWLAN_IE_SSID pSSID,
- IN PWLAN_IE_SUPP_RATES pCurrRates,
- IN PWLAN_IE_SUPP_RATES pCurrExtSuppRates
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PBYTE pScanBSSID,
+ PWLAN_IE_SSID pSSID,
+ PWLAN_IE_SUPP_RATES pCurrRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates
)
{
@@ -317,10 +317,10 @@ s_MgrMakeProbeRequest(
-VOID
+void
vCommandTimerWait(
- IN HANDLE hDeviceContext,
- IN UINT MSecond
+ void *hDeviceContext,
+ UINT MSecond
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -337,9 +337,9 @@ vCommandTimerWait(
-VOID
+void
vCommandTimer (
- IN HANDLE hDeviceContext
+ void *hDeviceContext
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -382,7 +382,7 @@ vCommandTimer (
// wait all Data TD complete
if (pDevice->iTDUsed[TYPE_AC0DMA] != 0){
spin_unlock_irq(&pDevice->lock);
- vCommandTimerWait((HANDLE)pDevice, 10);
+ vCommandTimerWait((void *)pDevice, 10);
return;
};
@@ -424,7 +424,7 @@ vCommandTimer (
pMgmt->abyScanBSSID[5] = 0xFF;
pItemSSID->byElementID = WLAN_EID_SSID;
// clear bssid list
- // BSSvClearBSSList((HANDLE)pDevice, pDevice->bLinkPass);
+ // BSSvClearBSSList((void *)pDevice, pDevice->bLinkPass);
pMgmt->eScanState = WMAC_IS_SCANNING;
}
@@ -453,11 +453,11 @@ vCommandTimer (
(pMgmt->uScanChannel < CB_MAX_CHANNEL_24G)) {
s_vProbeChannel(pDevice);
spin_unlock_irq(&pDevice->lock);
- vCommandTimerWait((HANDLE)pDevice, WCMD_ACTIVE_SCAN_TIME);
+ vCommandTimerWait((void *)pDevice, WCMD_ACTIVE_SCAN_TIME);
return;
} else {
spin_unlock_irq(&pDevice->lock);
- vCommandTimerWait((HANDLE)pDevice, WCMD_PASSIVE_SCAN_TIME);
+ vCommandTimerWait((void *)pDevice, WCMD_PASSIVE_SCAN_TIME);
return;
}
@@ -501,7 +501,7 @@ vCommandTimer (
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Send Disassociation Packet..\n");
// reason = 8 : disassoc because sta has left
- vMgrDisassocBeginSta((HANDLE)pDevice, pMgmt, pMgmt->abyCurrBSSID, (8), &Status);
+ vMgrDisassocBeginSta((void *)pDevice, pMgmt, pMgmt->abyCurrBSSID, (8), &Status);
pDevice->bLinkPass = FALSE;
// unlock command busy
pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
@@ -515,7 +515,7 @@ vCommandTimer (
pDevice->eCommandState = WLAN_DISASSOCIATE_WAIT;
// wait all Control TD complete
if (pDevice->iTDUsed[TYPE_TXDMA0] != 0){
- vCommandTimerWait((HANDLE)pDevice, 10);
+ vCommandTimerWait((void *)pDevice, 10);
spin_unlock_irq(&pDevice->lock);
return;
};
@@ -528,7 +528,7 @@ vCommandTimer (
case WLAN_DISASSOCIATE_WAIT :
// wait all Control TD complete
if (pDevice->iTDUsed[TYPE_TXDMA0] != 0){
- vCommandTimerWait((HANDLE)pDevice, 10);
+ vCommandTimerWait((void *)pDevice, 10);
spin_unlock_irq(&pDevice->lock);
return;
};
@@ -578,24 +578,24 @@ printk("chester-abyDesireSSID=%s\n",((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySS
// set initial state
pMgmt->eCurrState = WMAC_STATE_IDLE;
pMgmt->eCurrMode = WMAC_MODE_STANDBY;
- PSvDisablePowerSaving((HANDLE)pDevice);
+ PSvDisablePowerSaving((void *)pDevice);
BSSvClearNodeDBTable(pDevice, 0);
- vMgrJoinBSSBegin((HANDLE)pDevice, &Status);
+ vMgrJoinBSSBegin((void *)pDevice, &Status);
// if Infra mode
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED)) {
// Call mgr to begin the deauthentication
// reason = (3) beacuse sta has left ESS
if (pMgmt->eCurrState>= WMAC_STATE_AUTH) {
- vMgrDeAuthenBeginSta((HANDLE)pDevice, pMgmt, pMgmt->abyCurrBSSID, (3), &Status);
+ vMgrDeAuthenBeginSta((void *)pDevice, pMgmt, pMgmt->abyCurrBSSID, (3), &Status);
}
// Call mgr to begin the authentication
- vMgrAuthenBeginSta((HANDLE)pDevice, pMgmt, &Status);
+ vMgrAuthenBeginSta((void *)pDevice, pMgmt, &Status);
if (Status == CMD_STATUS_SUCCESS) {
pDevice->byLinkWaitCount = 0;
pDevice->eCommandState = WLAN_AUTHENTICATE_WAIT;
- vCommandTimerWait((HANDLE)pDevice, AUTHENTICATE_TIMEOUT);
+ vCommandTimerWait((void *)pDevice, AUTHENTICATE_TIMEOUT);
spin_unlock_irq(&pDevice->lock);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Set eCommandState = WLAN_AUTHENTICATE_WAIT\n");
return;
@@ -615,7 +615,7 @@ printk("chester-abyDesireSSID=%s\n",((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySS
}
else {
// start own IBSS
- vMgrCreateOwnIBSS((HANDLE)pDevice, &Status);
+ vMgrCreateOwnIBSS((void *)pDevice, &Status);
if (Status != CMD_STATUS_SUCCESS){
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " WLAN_CMD_IBSS_CREATE fail ! \n");
};
@@ -627,7 +627,7 @@ printk("chester-abyDesireSSID=%s\n",((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySS
if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA ||
pMgmt->eConfigMode == WMAC_CONFIG_AUTO) {
// start own IBSS
- vMgrCreateOwnIBSS((HANDLE)pDevice, &Status);
+ vMgrCreateOwnIBSS((void *)pDevice, &Status);
if (Status != CMD_STATUS_SUCCESS){
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" WLAN_CMD_IBSS_CREATE fail ! \n");
};
@@ -661,12 +661,12 @@ printk("chester-abyDesireSSID=%s\n",((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySS
// Call mgr to begin the association
pDevice->byLinkWaitCount = 0;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCurrState == WMAC_STATE_AUTH\n");
- vMgrAssocBeginSta((HANDLE)pDevice, pMgmt, &Status);
+ vMgrAssocBeginSta((void *)pDevice, pMgmt, &Status);
if (Status == CMD_STATUS_SUCCESS) {
pDevice->byLinkWaitCount = 0;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState = WLAN_ASSOCIATE_WAIT\n");
pDevice->eCommandState = WLAN_ASSOCIATE_WAIT;
- vCommandTimerWait((HANDLE)pDevice, ASSOCIATE_TIMEOUT);
+ vCommandTimerWait((void *)pDevice, ASSOCIATE_TIMEOUT);
spin_unlock_irq(&pDevice->lock);
return;
}
@@ -679,7 +679,7 @@ printk("chester-abyDesireSSID=%s\n",((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySS
pDevice->byLinkWaitCount ++;
printk("WLAN_AUTHENTICATE_WAIT:wait %d times!!\n",pDevice->byLinkWaitCount);
spin_unlock_irq(&pDevice->lock);
- vCommandTimerWait((HANDLE)pDevice, AUTHENTICATE_TIMEOUT/2);
+ vCommandTimerWait((void *)pDevice, AUTHENTICATE_TIMEOUT/2);
return;
}
pDevice->byLinkWaitCount = 0;
@@ -702,7 +702,7 @@ printk("chester-abyDesireSSID=%s\n",((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySS
if (pMgmt->eCurrState == WMAC_STATE_ASSOC) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCurrState == WMAC_STATE_ASSOC\n");
if (pDevice->ePSMode != WMAC_POWER_CAM) {
- PSvEnablePowerSaving((HANDLE)pDevice, pMgmt->wListenInterval);
+ PSvEnablePowerSaving((void *)pDevice, pMgmt->wListenInterval);
}
if (pMgmt->eAuthenMode >= WMAC_AUTH_WPA) {
KeybRemoveAllKey(&(pDevice->sKey), pDevice->abyBSSID, pDevice->PortOffset);
@@ -743,7 +743,7 @@ printk("chester-abyDesireSSID=%s\n",((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySS
pDevice->byLinkWaitCount ++;
printk("WLAN_ASSOCIATE_WAIT:wait %d times!!\n",pDevice->byLinkWaitCount);
spin_unlock_irq(&pDevice->lock);
- vCommandTimerWait((HANDLE)pDevice, ASSOCIATE_TIMEOUT/2);
+ vCommandTimerWait((void *)pDevice, ASSOCIATE_TIMEOUT/2);
return;
}
pDevice->byLinkWaitCount = 0;
@@ -779,7 +779,7 @@ printk("chester-abyDesireSSID=%s\n",((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySS
pMgmt->eCurrState = WMAC_STATE_IDLE;
pDevice->bFixRate = FALSE;
- vMgrCreateOwnIBSS((HANDLE)pDevice, &Status);
+ vMgrCreateOwnIBSS((void *)pDevice, &Status);
if (Status != CMD_STATUS_SUCCESS){
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " vMgrCreateOwnIBSS fail ! \n");
};
@@ -869,12 +869,12 @@ printk("chester-abyDesireSSID=%s\n",((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySS
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_CMD_CHECK_BBSENSITIVITY_START\n");
// wait all TD complete
if (pDevice->iTDUsed[TYPE_AC0DMA] != 0){
- vCommandTimerWait((HANDLE)pDevice, 10);
+ vCommandTimerWait((void *)pDevice, 10);
spin_unlock_irq(&pDevice->lock);
return;
}
if (pDevice->iTDUsed[TYPE_TXDMA0] != 0){
- vCommandTimerWait((HANDLE)pDevice, 10);
+ vCommandTimerWait((void *)pDevice, 10);
spin_unlock_irq(&pDevice->lock);
return;
}
@@ -971,7 +971,7 @@ s_bCommandComplete (
}
- vCommandTimerWait((HANDLE)pDevice, 0);
+ vCommandTimerWait((void *)pDevice, 0);
}
return TRUE;
@@ -980,9 +980,9 @@ s_bCommandComplete (
BOOL bScheduleCommand (
- IN HANDLE hDeviceContext,
- IN CMD_CODE eCommand,
- IN PBYTE pbyItem0
+ void *hDeviceContext,
+ CMD_CODE eCommand,
+ PBYTE pbyItem0
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -1061,7 +1061,7 @@ BOOL bScheduleCommand (
*
*/
BOOL bClearBSSID_SCAN (
- IN HANDLE hDeviceContext
+ void *hDeviceContext
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -1081,9 +1081,9 @@ BOOL bClearBSSID_SCAN (
}
//mike add:reset command timer
-VOID
+void
vResetCommandTimer(
- IN HANDLE hDeviceContext
+ void *hDeviceContext
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -1105,9 +1105,9 @@ vResetCommandTimer(
#ifdef TxInSleep
-VOID
+void
BSSvSecondTxData(
- IN HANDLE hDeviceContext
+ void *hDeviceContext
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
diff --git a/drivers/staging/vt6655/wcmd.h b/drivers/staging/vt6655/wcmd.h
index af32e57e335..c3c41808951 100644
--- a/drivers/staging/vt6655/wcmd.h
+++ b/drivers/staging/vt6655/wcmd.h
@@ -109,36 +109,36 @@ typedef enum tagCMD_STATE {
/*--------------------- Export Functions --------------------------*/
-VOID
+void
vResetCommandTimer(
- IN HANDLE hDeviceContext
+ void *hDeviceContext
);
-VOID
+void
vCommandTimer (
- IN HANDLE hDeviceContext
+ void *hDeviceContext
);
BOOL bClearBSSID_SCAN(
- IN HANDLE hDeviceContext
+ void *hDeviceContext
);
BOOL
bScheduleCommand(
- IN HANDLE hDeviceContext,
- IN CMD_CODE eCommand,
- IN PBYTE pbyItem0
+ void *hDeviceContext,
+ CMD_CODE eCommand,
+ PBYTE pbyItem0
);
-VOID
+void
vCommandTimerWait(
- IN HANDLE hDeviceContext,
- IN UINT MSecond
+ void *hDeviceContext,
+ UINT MSecond
);
#ifdef TxInSleep
-VOID
+void
BSSvSecondTxData(
- IN HANDLE hDeviceContext
+ void *hDeviceContext
);
#endif
diff --git a/drivers/staging/vt6655/wctl.c b/drivers/staging/vt6655/wctl.c
index 4406f8caa55..64a66b2f1fc 100644
--- a/drivers/staging/vt6655/wctl.c
+++ b/drivers/staging/vt6655/wctl.c
@@ -89,7 +89,7 @@ BOOL WCTLbIsDuplicate (PSCache pCache, PS802_11Header pMACHeader)
/* Not fount in cache - insert */
pCacheEntry = &pCache->asCacheEntry[pCache->uInPtr];
pCacheEntry->wFmSequence = pMACHeader->wSeqCtl;
- memcpy(&(pCacheEntry->abyAddr2[0]), &(pMACHeader->abyAddr2[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pCacheEntry->abyAddr2[0]), &(pMACHeader->abyAddr2[0]), ETH_ALEN);
ADD_ONE_WITH_WRAP_AROUND(pCache->uInPtr, DUPLICATE_RX_CACHE_LENGTH);
return FALSE;
}
@@ -151,7 +151,7 @@ UINT ii;
pDevice->sRxDFCB[ii].bInUse = TRUE;
pDevice->sRxDFCB[ii].wSequence = (pMACHeader->wSeqCtl >> 4);
pDevice->sRxDFCB[ii].wFragNum = (pMACHeader->wSeqCtl & 0x000F);
- memcpy(&(pDevice->sRxDFCB[ii].abyAddr2[0]), &(pMACHeader->abyAddr2[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pDevice->sRxDFCB[ii].abyAddr2[0]), &(pMACHeader->abyAddr2[0]), ETH_ALEN);
return(ii);
}
}
diff --git a/drivers/staging/vt6655/wmgr.c b/drivers/staging/vt6655/wmgr.c
index 659be05a33e..8af356fd139 100644
--- a/drivers/staging/vt6655/wmgr.c
+++ b/drivers/staging/vt6655/wmgr.c
@@ -94,160 +94,160 @@ static int msglevel =MSG_LEVEL_INFO;
/*--------------------- Static Functions --------------------------*/
//2008-8-4 <add> by chester
static BOOL ChannelExceedZoneType(
- IN PSDevice pDevice,
- IN BYTE byCurrChannel
+ PSDevice pDevice,
+ BYTE byCurrChannel
);
// Association/diassociation functions
static
PSTxMgmtPacket
s_MgrMakeAssocRequest(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PBYTE pDAddr,
- IN WORD wCurrCapInfo,
- IN WORD wListenInterval,
- IN PWLAN_IE_SSID pCurrSSID,
- IN PWLAN_IE_SUPP_RATES pCurrRates,
- IN PWLAN_IE_SUPP_RATES pCurrExtSuppRates
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PBYTE pDAddr,
+ WORD wCurrCapInfo,
+ WORD wListenInterval,
+ PWLAN_IE_SSID pCurrSSID,
+ PWLAN_IE_SUPP_RATES pCurrRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates
);
static
-VOID
+void
s_vMgrRxAssocRequest(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket,
- IN UINT uNodeIndex
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket,
+ UINT uNodeIndex
);
static
PSTxMgmtPacket
s_MgrMakeReAssocRequest(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PBYTE pDAddr,
- IN WORD wCurrCapInfo,
- IN WORD wListenInterval,
- IN PWLAN_IE_SSID pCurrSSID,
- IN PWLAN_IE_SUPP_RATES pCurrRates,
- IN PWLAN_IE_SUPP_RATES pCurrExtSuppRates
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PBYTE pDAddr,
+ WORD wCurrCapInfo,
+ WORD wListenInterval,
+ PWLAN_IE_SSID pCurrSSID,
+ PWLAN_IE_SUPP_RATES pCurrRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates
);
static
-VOID
+void
s_vMgrRxAssocResponse(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket,
- IN BOOL bReAssocType
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket,
+ BOOL bReAssocType
);
static
-VOID
+void
s_vMgrRxDisassociation(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket
);
// Authentication/deauthen functions
static
-VOID
+void
s_vMgrRxAuthenSequence_1(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PWLAN_FR_AUTHEN pFrame
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PWLAN_FR_AUTHEN pFrame
);
static
-VOID
+void
s_vMgrRxAuthenSequence_2(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PWLAN_FR_AUTHEN pFrame
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PWLAN_FR_AUTHEN pFrame
);
static
-VOID
+void
s_vMgrRxAuthenSequence_3(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PWLAN_FR_AUTHEN pFrame
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PWLAN_FR_AUTHEN pFrame
);
static
-VOID
+void
s_vMgrRxAuthenSequence_4(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PWLAN_FR_AUTHEN pFrame
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PWLAN_FR_AUTHEN pFrame
);
static
-VOID
+void
s_vMgrRxAuthentication(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket
);
static
-VOID
+void
s_vMgrRxDeauthentication(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket
);
// Scan functions
// probe request/response functions
static
-VOID
+void
s_vMgrRxProbeRequest(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket
);
static
-VOID
+void
s_vMgrRxProbeResponse(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket
);
// beacon functions
static
-VOID
+void
s_vMgrRxBeacon(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket,
- IN BOOL bInScan
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket,
+ BOOL bInScan
);
static
-VOID
+void
s_vMgrFormatTIM(
- IN PSMgmtObject pMgmt,
- IN PWLAN_IE_TIM pTIM
+ PSMgmtObject pMgmt,
+ PWLAN_IE_TIM pTIM
);
static
PSTxMgmtPacket
s_MgrMakeBeacon(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN WORD wCurrCapInfo,
- IN WORD wCurrBeaconPeriod,
- IN UINT uCurrChannel,
- IN WORD wCurrATIMWinodw,
- IN PWLAN_IE_SSID pCurrSSID,
- IN PBYTE pCurrBSSID,
- IN PWLAN_IE_SUPP_RATES pCurrSuppRates,
- IN PWLAN_IE_SUPP_RATES pCurrExtSuppRates
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ WORD wCurrCapInfo,
+ WORD wCurrBeaconPeriod,
+ UINT uCurrChannel,
+ WORD wCurrATIMWinodw,
+ PWLAN_IE_SSID pCurrSSID,
+ PBYTE pCurrBSSID,
+ PWLAN_IE_SUPP_RATES pCurrSuppRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates
);
@@ -255,78 +255,78 @@ s_MgrMakeBeacon(
static
PSTxMgmtPacket
s_MgrMakeAssocResponse(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN WORD wCurrCapInfo,
- IN WORD wAssocStatus,
- IN WORD wAssocAID,
- IN PBYTE pDstAddr,
- IN PWLAN_IE_SUPP_RATES pCurrSuppRates,
- IN PWLAN_IE_SUPP_RATES pCurrExtSuppRates
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ WORD wCurrCapInfo,
+ WORD wAssocStatus,
+ WORD wAssocAID,
+ PBYTE pDstAddr,
+ PWLAN_IE_SUPP_RATES pCurrSuppRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates
);
// ReAssociation response
static
PSTxMgmtPacket
s_MgrMakeReAssocResponse(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN WORD wCurrCapInfo,
- IN WORD wAssocStatus,
- IN WORD wAssocAID,
- IN PBYTE pDstAddr,
- IN PWLAN_IE_SUPP_RATES pCurrSuppRates,
- IN PWLAN_IE_SUPP_RATES pCurrExtSuppRates
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ WORD wCurrCapInfo,
+ WORD wAssocStatus,
+ WORD wAssocAID,
+ PBYTE pDstAddr,
+ PWLAN_IE_SUPP_RATES pCurrSuppRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates
);
// Probe response
static
PSTxMgmtPacket
s_MgrMakeProbeResponse(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN WORD wCurrCapInfo,
- IN WORD wCurrBeaconPeriod,
- IN UINT uCurrChannel,
- IN WORD wCurrATIMWinodw,
- IN PBYTE pDstAddr,
- IN PWLAN_IE_SSID pCurrSSID,
- IN PBYTE pCurrBSSID,
- IN PWLAN_IE_SUPP_RATES pCurrSuppRates,
- IN PWLAN_IE_SUPP_RATES pCurrExtSuppRates,
- IN BYTE byPHYType
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ WORD wCurrCapInfo,
+ WORD wCurrBeaconPeriod,
+ UINT uCurrChannel,
+ WORD wCurrATIMWinodw,
+ PBYTE pDstAddr,
+ PWLAN_IE_SSID pCurrSSID,
+ PBYTE pCurrBSSID,
+ PWLAN_IE_SUPP_RATES pCurrSuppRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates,
+ BYTE byPHYType
);
// received status
static
-VOID
+void
s_vMgrLogStatus(
- IN PSMgmtObject pMgmt,
- IN WORD wStatus
+ PSMgmtObject pMgmt,
+ WORD wStatus
);
static
-VOID
+void
s_vMgrSynchBSS (
- IN PSDevice pDevice,
- IN UINT uBSSMode,
- IN PKnownBSS pCurr,
- OUT PCMD_STATUS pStatus
+ PSDevice pDevice,
+ UINT uBSSMode,
+ PKnownBSS pCurr,
+ PCMD_STATUS pStatus
);
static BOOL
s_bCipherMatch (
- IN PKnownBSS pBSSNode,
- IN NDIS_802_11_ENCRYPTION_STATUS EncStatus,
- OUT PBYTE pbyCCSPK,
- OUT PBYTE pbyCCSGK
+ PKnownBSS pBSSNode,
+ NDIS_802_11_ENCRYPTION_STATUS EncStatus,
+ PBYTE pbyCCSPK,
+ PBYTE pbyCCSGK
);
- static VOID Encyption_Rebuild(
- IN PSDevice pDevice,
- IN PKnownBSS pCurr
+ static void Encyption_Rebuild(
+ PSDevice pDevice,
+ PKnownBSS pCurr
);
@@ -347,9 +347,9 @@ s_bCipherMatch (
*
-*/
-VOID
+void
vMgrObjectInit(
- IN HANDLE hDeviceContext
+ void *hDeviceContext
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -368,7 +368,7 @@ vMgrObjectInit(
pMgmt->byCSSPK = KEY_CTL_NONE;
pMgmt->byCSSGK = KEY_CTL_NONE;
pMgmt->wIBSSBeaconPeriod = DEFAULT_IBSS_BI;
- BSSvClearBSSList((HANDLE)pDevice, FALSE);
+ BSSvClearBSSList((void *)pDevice, FALSE);
return;
}
@@ -385,7 +385,7 @@ vMgrObjectInit(
void
vMgrTimerInit(
- IN HANDLE hDeviceContext
+ void *hDeviceContext
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -431,9 +431,9 @@ vMgrTimerInit(
*
-*/
-VOID
+void
vMgrObjectReset(
- IN HANDLE hDeviceContext
+ void *hDeviceContext
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -460,11 +460,11 @@ vMgrObjectReset(
-*/
-VOID
+void
vMgrAssocBeginSta(
- IN HANDLE hDeviceContext,
- IN PSMgmtObject pMgmt,
- OUT PCMD_STATUS pStatus
+ void *hDeviceContext,
+ PSMgmtObject pMgmt,
+ PCMD_STATUS pStatus
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -536,11 +536,11 @@ vMgrAssocBeginSta(
*
-*/
-VOID
+void
vMgrReAssocBeginSta(
- IN HANDLE hDeviceContext,
- IN PSMgmtObject pMgmt,
- OUT PCMD_STATUS pStatus
+ void *hDeviceContext,
+ PSMgmtObject pMgmt,
+ PCMD_STATUS pStatus
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -615,13 +615,13 @@ vMgrReAssocBeginSta(
*
-*/
-VOID
+void
vMgrDisassocBeginSta(
- IN HANDLE hDeviceContext,
- IN PSMgmtObject pMgmt,
- IN PBYTE abyDestAddress,
- IN WORD wReason,
- OUT PCMD_STATUS pStatus
+ void *hDeviceContext,
+ PSMgmtObject pMgmt,
+ PBYTE abyDestAddress,
+ WORD wReason,
+ PCMD_STATUS pStatus
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -678,12 +678,12 @@ vMgrDisassocBeginSta(
-*/
static
-VOID
+void
s_vMgrRxAssocRequest(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket,
- IN UINT uNodeIndex
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket,
+ UINT uNodeIndex
)
{
WLAN_FR_ASSOCREQ sFrame;
@@ -736,7 +736,7 @@ s_vMgrRxAssocRequest(
}
- RATEvParseMaxRate((PVOID)pDevice,
+ RATEvParseMaxRate((void *)pDevice,
(PWLAN_IE_SUPP_RATES)abyCurrSuppRates,
(PWLAN_IE_SUPP_RATES)abyCurrExtSuppRates,
FALSE, // do not change our basic rate
@@ -840,12 +840,12 @@ s_vMgrRxAssocRequest(
-*/
static
-VOID
+void
s_vMgrRxReAssocRequest(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket,
- IN UINT uNodeIndex
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket,
+ UINT uNodeIndex
)
{
WLAN_FR_REASSOCREQ sFrame;
@@ -895,7 +895,7 @@ s_vMgrRxReAssocRequest(
}
- RATEvParseMaxRate((PVOID)pDevice,
+ RATEvParseMaxRate((void *)pDevice,
(PWLAN_IE_SUPP_RATES)abyCurrSuppRates,
(PWLAN_IE_SUPP_RATES)abyCurrExtSuppRates,
FALSE, // do not change our basic rate
@@ -990,12 +990,12 @@ s_vMgrRxReAssocRequest(
-*/
static
-VOID
+void
s_vMgrRxAssocResponse(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket,
- IN BOOL bReAssocType
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket,
+ BOOL bReAssocType
)
{
WLAN_FR_ASSOCRESP sFrame;
@@ -1041,7 +1041,7 @@ s_vMgrRxAssocResponse(
};
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "Association Successful, AID=%d.\n", pMgmt->wCurrAID & ~(BIT14|BIT15));
pMgmt->eCurrState = WMAC_STATE_ASSOC;
- BSSvUpdateAPNode((HANDLE)pDevice, sFrame.pwCapInfo, sFrame.pSuppRates, sFrame.pExtSuppRates);
+ BSSvUpdateAPNode((void *)pDevice, sFrame.pwCapInfo, sFrame.pSuppRates, sFrame.pExtSuppRates);
pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "Link with AP(SSID): %s\n", pItemSSID->abySSID);
pDevice->bLinkPass = TRUE;
@@ -1150,11 +1150,11 @@ if(pMgmt->eCurrState == WMAC_STATE_ASSOC)
*
-*/
-VOID
+void
vMgrAuthenBeginSta(
- IN HANDLE hDeviceContext,
- IN PSMgmtObject pMgmt,
- OUT PCMD_STATUS pStatus
+ void *hDeviceContext,
+ PSMgmtObject pMgmt,
+ PCMD_STATUS pStatus
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -1208,13 +1208,13 @@ vMgrAuthenBeginSta(
*
-*/
-VOID
+void
vMgrDeAuthenBeginSta(
- IN HANDLE hDeviceContext,
- IN PSMgmtObject pMgmt,
- IN PBYTE abyDestAddress,
- IN WORD wReason,
- OUT PCMD_STATUS pStatus
+ void *hDeviceContext,
+ PSMgmtObject pMgmt,
+ PBYTE abyDestAddress,
+ WORD wReason,
+ PCMD_STATUS pStatus
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -1265,11 +1265,11 @@ vMgrDeAuthenBeginSta(
-*/
static
-VOID
+void
s_vMgrRxAuthentication(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket
)
{
WLAN_FR_AUTHEN sFrame;
@@ -1323,11 +1323,11 @@ s_vMgrRxAuthentication(
static
-VOID
+void
s_vMgrRxAuthenSequence_1(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PWLAN_FR_AUTHEN pFrame
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PWLAN_FR_AUTHEN pFrame
)
{
PSTxMgmtPacket pTxPacket = NULL;
@@ -1429,11 +1429,11 @@ s_vMgrRxAuthenSequence_1(
-*/
static
-VOID
+void
s_vMgrRxAuthenSequence_2(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PWLAN_FR_AUTHEN pFrame
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PWLAN_FR_AUTHEN pFrame
)
{
WLAN_FR_AUTHEN sFrame;
@@ -1455,7 +1455,7 @@ s_vMgrRxAuthenSequence_2(
}
if (pDevice->eCommandState == WLAN_AUTHENTICATE_WAIT ) {
// spin_unlock_irq(&pDevice->lock);
-// vCommandTimerWait((HANDLE)pDevice, 0);
+// vCommandTimerWait((void *)pDevice, 0);
// spin_lock_irq(&pDevice->lock);
}
@@ -1502,7 +1502,7 @@ s_vMgrRxAuthenSequence_2(
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Mgt:rx Auth_reply sequence_2 status error ...\n");
if ( pDevice->eCommandState == WLAN_AUTHENTICATE_WAIT ) {
// spin_unlock_irq(&pDevice->lock);
-// vCommandTimerWait((HANDLE)pDevice, 0);
+// vCommandTimerWait((void *)pDevice, 0);
// spin_lock_irq(&pDevice->lock);
}
s_vMgrLogStatus(pMgmt, cpu_to_le16((*(pFrame->pwStatus))));
@@ -1531,11 +1531,11 @@ s_vMgrRxAuthenSequence_2(
-*/
static
-VOID
+void
s_vMgrRxAuthenSequence_3(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PWLAN_FR_AUTHEN pFrame
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PWLAN_FR_AUTHEN pFrame
)
{
PSTxMgmtPacket pTxPacket = NULL;
@@ -1619,11 +1619,11 @@ reply:
*
-*/
static
-VOID
+void
s_vMgrRxAuthenSequence_4(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PWLAN_FR_AUTHEN pFrame
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PWLAN_FR_AUTHEN pFrame
)
{
@@ -1640,7 +1640,7 @@ s_vMgrRxAuthenSequence_4(
if ( pDevice->eCommandState == WLAN_AUTHENTICATE_WAIT ) {
// spin_unlock_irq(&pDevice->lock);
-// vCommandTimerWait((HANDLE)pDevice, 0);
+// vCommandTimerWait((void *)pDevice, 0);
// spin_lock_irq(&pDevice->lock);
}
@@ -1658,11 +1658,11 @@ s_vMgrRxAuthenSequence_4(
-*/
static
-VOID
+void
s_vMgrRxDisassociation(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket
)
{
WLAN_FR_DISASSOC sFrame;
@@ -1737,11 +1737,11 @@ s_vMgrRxDisassociation(
-*/
static
-VOID
+void
s_vMgrRxDeauthentication(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket
)
{
WLAN_FR_DEAUTHEN sFrame;
@@ -1827,8 +1827,8 @@ s_vMgrRxDeauthentication(
-*/
static BOOL
ChannelExceedZoneType(
- IN PSDevice pDevice,
- IN BYTE byCurrChannel
+ PSDevice pDevice,
+ BYTE byCurrChannel
)
{
BOOL exceed=FALSE;
@@ -1863,12 +1863,12 @@ ChannelExceedZoneType(
-*/
static
-VOID
+void
s_vMgrRxBeacon(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket,
- IN BOOL bInScan
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket,
+ BOOL bInScan
)
{
@@ -1943,10 +1943,10 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
sERP.byERP = 0;
}
- pBSSList = BSSpAddrIsInBSSList((HANDLE)pDevice, sFrame.pHdr->sA3.abyAddr3, sFrame.pSSID);
+ pBSSList = BSSpAddrIsInBSSList((void *)pDevice, sFrame.pHdr->sA3.abyAddr3, sFrame.pSSID);
if (pBSSList == NULL) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Beacon/insert: RxChannel = : %d\n", byCurrChannel);
- BSSbInsertToBSSList((HANDLE)pDevice,
+ BSSbInsertToBSSList((void *)pDevice,
sFrame.pHdr->sA3.abyAddr3,
*sFrame.pqwTimestamp,
*sFrame.pwBeaconInterval,
@@ -1962,12 +1962,12 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
sFrame.pIE_Quiet,
sFrame.len - WLAN_HDR_ADDR3_LEN,
sFrame.pHdr->sA4.abyAddr4, // payload of beacon
- (HANDLE)pRxPacket
+ (void *)pRxPacket
);
}
else {
// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"update bcn: RxChannel = : %d\n", byCurrChannel);
- BSSbUpdateToBSSList((HANDLE)pDevice,
+ BSSbUpdateToBSSList((void *)pDevice,
*sFrame.pqwTimestamp,
*sFrame.pwBeaconInterval,
*sFrame.pwCapInfo,
@@ -1984,7 +1984,7 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
pBSSList,
sFrame.len - WLAN_HDR_ADDR3_LEN,
sFrame.pHdr->sA4.abyAddr4, // payload of probresponse
- (HANDLE)pRxPacket
+ (void *)pRxPacket
);
}
@@ -2091,7 +2091,7 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
pMgmt->abyCurrExtSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)pBSSList->abyExtSuppRates,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
uRateLen);
- RATEvParseMaxRate( (PVOID)pDevice,
+ RATEvParseMaxRate( (void *)pDevice,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
TRUE,
@@ -2256,7 +2256,7 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)sFrame.pSuppRates,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
WLAN_RATES_MAXLEN_11B);
- RATEvParseMaxRate( (PVOID)pDevice,
+ RATEvParseMaxRate( (void *)pDevice,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
NULL,
TRUE,
@@ -2277,7 +2277,7 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)sFrame.pSuppRates,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
WLAN_RATES_MAXLEN_11B);
- RATEvParseMaxRate( (PVOID)pDevice,
+ RATEvParseMaxRate( (void *)pDevice,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
NULL,
TRUE,
@@ -2353,7 +2353,7 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
// set highest basic rate
// s_vSetHighestBasicRate(pDevice, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates);
// Prepare beacon frame
- bMgrPrepareBeaconToSend((HANDLE)pDevice, pMgmt);
+ bMgrPrepareBeaconToSend((void *)pDevice, pMgmt);
// }
};
}
@@ -2384,10 +2384,10 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
* CMD_STATUS
*
-*/
-VOID
+void
vMgrCreateOwnIBSS(
- IN HANDLE hDeviceContext,
- OUT PCMD_STATUS pStatus
+ void *hDeviceContext,
+ PCMD_STATUS pStatus
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -2485,7 +2485,7 @@ vMgrCreateOwnIBSS(
// set basic rate
- RATEvParseMaxRate((PVOID)pDevice, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
+ RATEvParseMaxRate((void *)pDevice, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates, TRUE,
&wMaxBasicRate, &wMaxSuppRate, &wSuppRate,
&byTopCCKBasicRate, &byTopOFDMBasicRate);
@@ -2629,7 +2629,7 @@ vMgrCreateOwnIBSS(
pMgmt->eCurrState = WMAC_STATE_STARTED;
// Prepare beacon to send
- if (bMgrPrepareBeaconToSend((HANDLE)pDevice, pMgmt)) {
+ if (bMgrPrepareBeaconToSend((void *)pDevice, pMgmt)) {
*pStatus = CMD_STATUS_SUCCESS;
}
@@ -2651,10 +2651,10 @@ vMgrCreateOwnIBSS(
*
-*/
-VOID
+void
vMgrJoinBSSBegin(
- IN HANDLE hDeviceContext,
- OUT PCMD_STATUS pStatus
+ void *hDeviceContext,
+ PCMD_STATUS pStatus
)
{
@@ -2781,7 +2781,7 @@ vMgrJoinBSSBegin(
}
}
- RATEvParseMaxRate((PVOID)pDevice, pItemRates, pItemExtRates, TRUE,
+ RATEvParseMaxRate((void *)pDevice, pItemRates, pItemExtRates, TRUE,
&wMaxBasicRate, &wMaxSuppRate, &wSuppRate,
&byTopCCKBasicRate, &byTopOFDMBasicRate);
@@ -2802,12 +2802,12 @@ vMgrJoinBSSBegin(
// Add current BSS to Candidate list
// This should only works for WPA2 BSS, and WPA2 BSS check must be done before.
if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2) {
- BOOL bResult = bAdd_PMKID_Candidate((HANDLE)pDevice, pMgmt->abyCurrBSSID, &pCurr->sRSNCapObj);
+ BOOL bResult = bAdd_PMKID_Candidate((void *)pDevice, pMgmt->abyCurrBSSID, &pCurr->sRSNCapObj);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"bAdd_PMKID_Candidate: 1(%d)\n", bResult);
if (bResult == FALSE) {
- vFlush_PMKID_Candidate((HANDLE)pDevice);
+ vFlush_PMKID_Candidate((void *)pDevice);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"vFlush_PMKID_Candidate: 4\n");
- bAdd_PMKID_Candidate((HANDLE)pDevice, pMgmt->abyCurrBSSID, &pCurr->sRSNCapObj);
+ bAdd_PMKID_Candidate((void *)pDevice, pMgmt->abyCurrBSSID, &pCurr->sRSNCapObj);
}
}
@@ -2867,7 +2867,7 @@ vMgrJoinBSSBegin(
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
WLAN_RATES_MAXLEN_11B);
// set basic rate
- RATEvParseMaxRate((PVOID)pDevice, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
+ RATEvParseMaxRate((void *)pDevice, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
NULL, TRUE, &wMaxBasicRate, &wMaxSuppRate, &wSuppRate,
&byTopCCKBasicRate, &byTopOFDMBasicRate);
@@ -2898,7 +2898,7 @@ vMgrJoinBSSBegin(
// and if registry setting is short preamble we can turn on too.
// Prepare beacon
- bMgrPrepareBeaconToSend((HANDLE)pDevice, pMgmt);
+ bMgrPrepareBeaconToSend((void *)pDevice, pMgmt);
}
else {
pMgmt->eCurrState = WMAC_STATE_IDLE;
@@ -2920,12 +2920,12 @@ vMgrJoinBSSBegin(
*
-*/
static
-VOID
+void
s_vMgrSynchBSS (
- IN PSDevice pDevice,
- IN UINT uBSSMode,
- IN PKnownBSS pCurr,
- OUT PCMD_STATUS pStatus
+ PSDevice pDevice,
+ UINT uBSSMode,
+ PKnownBSS pCurr,
+ PCMD_STATUS pStatus
)
{
CARD_PHY_TYPE ePhyType = PHY_TYPE_11B;
@@ -2967,7 +2967,7 @@ s_vMgrSynchBSS (
pDevice->byPreambleType = 0;
pDevice->wBasicRate = 0;
// Set Basic Rate
- CARDbAddBasicRate((PVOID)pDevice, RATE_1M);
+ CARDbAddBasicRate((void *)pDevice, RATE_1M);
// calculate TSF offset
// TSF Offset = Received Timestamp TSF - Marked Local's TSF
CARDbUpdateTSF(pDevice, pCurr->byRxRate, pCurr->qwBSSTimestamp, pCurr->qwLocalTSF);
@@ -3088,9 +3088,9 @@ s_vMgrSynchBSS (
//mike add: fix NetworkManager 0.7.0 hidden ssid mode in WPA encryption
// ,need reset eAuthenMode and eEncryptionStatus
- static VOID Encyption_Rebuild(
- IN PSDevice pDevice,
- IN PKnownBSS pCurr
+ static void Encyption_Rebuild(
+ PSDevice pDevice,
+ PKnownBSS pCurr
)
{
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
@@ -3140,15 +3140,15 @@ s_vMgrSynchBSS (
*
*
* Return Value:
- * VOID
+ * void
*
-*/
static
-VOID
+void
s_vMgrFormatTIM(
- IN PSMgmtObject pMgmt,
- IN PWLAN_IE_TIM pTIM
+ PSMgmtObject pMgmt,
+ PWLAN_IE_TIM pTIM
)
{
BYTE byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
@@ -3222,16 +3222,16 @@ s_vMgrFormatTIM(
static
PSTxMgmtPacket
s_MgrMakeBeacon(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN WORD wCurrCapInfo,
- IN WORD wCurrBeaconPeriod,
- IN UINT uCurrChannel,
- IN WORD wCurrATIMWinodw,
- IN PWLAN_IE_SSID pCurrSSID,
- IN PBYTE pCurrBSSID,
- IN PWLAN_IE_SUPP_RATES pCurrSuppRates,
- IN PWLAN_IE_SUPP_RATES pCurrExtSuppRates
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ WORD wCurrCapInfo,
+ WORD wCurrBeaconPeriod,
+ UINT uCurrChannel,
+ WORD wCurrATIMWinodw,
+ PWLAN_IE_SSID pCurrSSID,
+ PBYTE pCurrBSSID,
+ PWLAN_IE_SUPP_RATES pCurrSuppRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates
)
{
PSTxMgmtPacket pTxPacket = NULL;
@@ -3451,18 +3451,18 @@ s_MgrMakeBeacon(
PSTxMgmtPacket
s_MgrMakeProbeResponse(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN WORD wCurrCapInfo,
- IN WORD wCurrBeaconPeriod,
- IN UINT uCurrChannel,
- IN WORD wCurrATIMWinodw,
- IN PBYTE pDstAddr,
- IN PWLAN_IE_SSID pCurrSSID,
- IN PBYTE pCurrBSSID,
- IN PWLAN_IE_SUPP_RATES pCurrSuppRates,
- IN PWLAN_IE_SUPP_RATES pCurrExtSuppRates,
- IN BYTE byPHYType
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ WORD wCurrCapInfo,
+ WORD wCurrBeaconPeriod,
+ UINT uCurrChannel,
+ WORD wCurrATIMWinodw,
+ PBYTE pDstAddr,
+ PWLAN_IE_SSID pCurrSSID,
+ PBYTE pCurrBSSID,
+ PWLAN_IE_SUPP_RATES pCurrSuppRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates,
+ BYTE byPHYType
)
{
PSTxMgmtPacket pTxPacket = NULL;
@@ -3640,14 +3640,14 @@ s_MgrMakeProbeResponse(
PSTxMgmtPacket
s_MgrMakeAssocRequest(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PBYTE pDAddr,
- IN WORD wCurrCapInfo,
- IN WORD wListenInterval,
- IN PWLAN_IE_SSID pCurrSSID,
- IN PWLAN_IE_SUPP_RATES pCurrRates,
- IN PWLAN_IE_SUPP_RATES pCurrExtSuppRates
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PBYTE pDAddr,
+ WORD wCurrCapInfo,
+ WORD wListenInterval,
+ PWLAN_IE_SSID pCurrSSID,
+ PWLAN_IE_SUPP_RATES pCurrRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates
)
{
PSTxMgmtPacket pTxPacket = NULL;
@@ -3869,7 +3869,7 @@ s_MgrMakeAssocRequest(
*pwPMKID = 0; // Initialize PMKID count
pbyRSN += 2; // Point to PMKID list
for (ii = 0; ii < pDevice->gsPMKID.BSSIDInfoCount; ii++) {
- if ( !memcmp(&pDevice->gsPMKID.BSSIDInfo[ii].BSSID[0], pMgmt->abyCurrBSSID, U_ETHER_ADDR_LEN)) {
+ if ( !memcmp(&pDevice->gsPMKID.BSSIDInfo[ii].BSSID[0], pMgmt->abyCurrBSSID, ETH_ALEN)) {
(*pwPMKID) ++;
memcpy(pbyRSN, pDevice->gsPMKID.BSSIDInfo[ii].PMKID, 16);
pbyRSN += 16;
@@ -3915,14 +3915,14 @@ s_MgrMakeAssocRequest(
PSTxMgmtPacket
s_MgrMakeReAssocRequest(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PBYTE pDAddr,
- IN WORD wCurrCapInfo,
- IN WORD wListenInterval,
- IN PWLAN_IE_SSID pCurrSSID,
- IN PWLAN_IE_SUPP_RATES pCurrRates,
- IN PWLAN_IE_SUPP_RATES pCurrExtSuppRates
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PBYTE pDAddr,
+ WORD wCurrCapInfo,
+ WORD wListenInterval,
+ PWLAN_IE_SSID pCurrSSID,
+ PWLAN_IE_SUPP_RATES pCurrRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates
)
{
PSTxMgmtPacket pTxPacket = NULL;
@@ -4125,7 +4125,7 @@ s_MgrMakeReAssocRequest(
*pwPMKID = 0; // Initialize PMKID count
pbyRSN += 2; // Point to PMKID list
for (ii = 0; ii < pDevice->gsPMKID.BSSIDInfoCount; ii++) {
- if ( !memcmp(&pDevice->gsPMKID.BSSIDInfo[ii].BSSID[0], pMgmt->abyCurrBSSID, U_ETHER_ADDR_LEN)) {
+ if ( !memcmp(&pDevice->gsPMKID.BSSIDInfo[ii].BSSID[0], pMgmt->abyCurrBSSID, ETH_ALEN)) {
(*pwPMKID) ++;
memcpy(pbyRSN, pDevice->gsPMKID.BSSIDInfo[ii].PMKID, 16);
pbyRSN += 16;
@@ -4167,14 +4167,14 @@ s_MgrMakeReAssocRequest(
PSTxMgmtPacket
s_MgrMakeAssocResponse(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN WORD wCurrCapInfo,
- IN WORD wAssocStatus,
- IN WORD wAssocAID,
- IN PBYTE pDstAddr,
- IN PWLAN_IE_SUPP_RATES pCurrSuppRates,
- IN PWLAN_IE_SUPP_RATES pCurrExtSuppRates
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ WORD wCurrCapInfo,
+ WORD wAssocStatus,
+ WORD wAssocAID,
+ PBYTE pDstAddr,
+ PWLAN_IE_SUPP_RATES pCurrSuppRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates
)
{
PSTxMgmtPacket pTxPacket = NULL;
@@ -4241,14 +4241,14 @@ s_MgrMakeAssocResponse(
PSTxMgmtPacket
s_MgrMakeReAssocResponse(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN WORD wCurrCapInfo,
- IN WORD wAssocStatus,
- IN WORD wAssocAID,
- IN PBYTE pDstAddr,
- IN PWLAN_IE_SUPP_RATES pCurrSuppRates,
- IN PWLAN_IE_SUPP_RATES pCurrExtSuppRates
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ WORD wCurrCapInfo,
+ WORD wAssocStatus,
+ WORD wAssocAID,
+ PBYTE pDstAddr,
+ PWLAN_IE_SUPP_RATES pCurrSuppRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates
)
{
PSTxMgmtPacket pTxPacket = NULL;
@@ -4313,11 +4313,11 @@ s_MgrMakeReAssocResponse(
-*/
static
-VOID
+void
s_vMgrRxProbeResponse(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket
)
{
PKnownBSS pBSSList = NULL;
@@ -4378,9 +4378,9 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
// update or insert the bss
- pBSSList = BSSpAddrIsInBSSList((HANDLE)pDevice, sFrame.pHdr->sA3.abyAddr3, sFrame.pSSID);
+ pBSSList = BSSpAddrIsInBSSList((void *)pDevice, sFrame.pHdr->sA3.abyAddr3, sFrame.pSSID);
if (pBSSList) {
- BSSbUpdateToBSSList((HANDLE)pDevice,
+ BSSbUpdateToBSSList((void *)pDevice,
*sFrame.pqwTimestamp,
*sFrame.pwBeaconInterval,
*sFrame.pwCapInfo,
@@ -4397,12 +4397,12 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
pBSSList,
sFrame.len - WLAN_HDR_ADDR3_LEN,
sFrame.pHdr->sA4.abyAddr4, // payload of probresponse
- (HANDLE)pRxPacket
+ (void *)pRxPacket
);
}
else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Probe resp/insert: RxChannel = : %d\n", byCurrChannel);
- BSSbInsertToBSSList((HANDLE)pDevice,
+ BSSbInsertToBSSList((void *)pDevice,
sFrame.pHdr->sA3.abyAddr3,
*sFrame.pqwTimestamp,
*sFrame.pwBeaconInterval,
@@ -4418,7 +4418,7 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
sFrame.pIE_Quiet,
sFrame.len - WLAN_HDR_ADDR3_LEN,
sFrame.pHdr->sA4.abyAddr4, // payload of beacon
- (HANDLE)pRxPacket
+ (void *)pRxPacket
);
}
return;
@@ -4438,11 +4438,11 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
static
-VOID
+void
s_vMgrRxProbeRequest(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket
)
{
WLAN_FR_PROBEREQ sFrame;
@@ -4534,11 +4534,11 @@ s_vMgrRxProbeRequest(
-*/
-VOID
+void
vMgrRxManagePacket(
- IN HANDLE hDeviceContext,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket
+ void *hDeviceContext,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -4685,8 +4685,8 @@ vMgrRxManagePacket(
-*/
BOOL
bMgrPrepareBeaconToSend(
- IN HANDLE hDeviceContext,
- IN PSMgmtObject pMgmt
+ void *hDeviceContext,
+ PSMgmtObject pMgmt
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -4738,10 +4738,10 @@ bMgrPrepareBeaconToSend(
*
-*/
static
-VOID
+void
s_vMgrLogStatus(
- IN PSMgmtObject pMgmt,
- IN WORD wStatus
+ PSMgmtObject pMgmt,
+ WORD wStatus
)
{
switch( wStatus ){
@@ -4809,9 +4809,9 @@ s_vMgrLogStatus(
-*/
BOOL
bAdd_PMKID_Candidate (
- IN HANDLE hDeviceContext,
- IN PBYTE pbyBSSID,
- IN PSRSNCapObject psRSNCapObj
+ void *hDeviceContext,
+ PBYTE pbyBSSID,
+ PSRSNCapObject psRSNCapObj
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -4831,7 +4831,7 @@ bAdd_PMKID_Candidate (
// Update Old Candidate
for (ii = 0; ii < pDevice->gsPMKIDCandidate.NumCandidates; ii++) {
pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[ii];
- if ( !memcmp(pCandidateList->BSSID, pbyBSSID, U_ETHER_ADDR_LEN)) {
+ if ( !memcmp(pCandidateList->BSSID, pbyBSSID, ETH_ALEN)) {
if ((psRSNCapObj->bRSNCapExist == TRUE) && (psRSNCapObj->wRSNCap & BIT0)) {
pCandidateList->Flags |= NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED;
} else {
@@ -4848,7 +4848,7 @@ bAdd_PMKID_Candidate (
} else {
pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED);
}
- memcpy(pCandidateList->BSSID, pbyBSSID, U_ETHER_ADDR_LEN);
+ memcpy(pCandidateList->BSSID, pbyBSSID, ETH_ALEN);
pDevice->gsPMKIDCandidate.NumCandidates++;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"NumCandidates:%d\n", (int)pDevice->gsPMKIDCandidate.NumCandidates);
return TRUE;
@@ -4868,9 +4868,9 @@ bAdd_PMKID_Candidate (
* Return Value: none.
*
-*/
-VOID
+void
vFlush_PMKID_Candidate (
- IN HANDLE hDeviceContext
+ void *hDeviceContext
)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -4883,10 +4883,10 @@ vFlush_PMKID_Candidate (
static BOOL
s_bCipherMatch (
- IN PKnownBSS pBSSNode,
- IN NDIS_802_11_ENCRYPTION_STATUS EncStatus,
- OUT PBYTE pbyCCSPK,
- OUT PBYTE pbyCCSGK
+ PKnownBSS pBSSNode,
+ NDIS_802_11_ENCRYPTION_STATUS EncStatus,
+ PBYTE pbyCCSPK,
+ PBYTE pbyCCSGK
)
{
BYTE byMulticastCipher = KEY_CTL_INVALID;
diff --git a/drivers/staging/vt6655/wmgr.h b/drivers/staging/vt6655/wmgr.h
index 1c1f2ea5782..9ae7e0d55bc 100644
--- a/drivers/staging/vt6655/wmgr.h
+++ b/drivers/staging/vt6655/wmgr.h
@@ -249,7 +249,7 @@ typedef struct tagSRxMgmtPacket {
typedef struct tagSMgmtObject
{
- PVOID pAdapter;
+ void * pAdapter;
// MAC address
BYTE abyMACAddr[WLAN_ADDR_LEN];
@@ -401,102 +401,102 @@ typedef struct tagSMgmtObject
void
vMgrObjectInit(
- IN HANDLE hDeviceContext
+ void *hDeviceContext
);
void
vMgrTimerInit(
- IN HANDLE hDeviceContext
+ void *hDeviceContext
);
-VOID
+void
vMgrObjectReset(
- IN HANDLE hDeviceContext
+ void *hDeviceContext
);
void
vMgrAssocBeginSta(
- IN HANDLE hDeviceContext,
- IN PSMgmtObject pMgmt,
- OUT PCMD_STATUS pStatus
+ void *hDeviceContext,
+ PSMgmtObject pMgmt,
+ PCMD_STATUS pStatus
);
-VOID
+void
vMgrReAssocBeginSta(
- IN HANDLE hDeviceContext,
- IN PSMgmtObject pMgmt,
- OUT PCMD_STATUS pStatus
+ void *hDeviceContext,
+ PSMgmtObject pMgmt,
+ PCMD_STATUS pStatus
);
-VOID
+void
vMgrDisassocBeginSta(
- IN HANDLE hDeviceContext,
- IN PSMgmtObject pMgmt,
- IN PBYTE abyDestAddress,
- IN WORD wReason,
- OUT PCMD_STATUS pStatus
+ void *hDeviceContext,
+ PSMgmtObject pMgmt,
+ PBYTE abyDestAddress,
+ WORD wReason,
+ PCMD_STATUS pStatus
);
-VOID
+void
vMgrAuthenBeginSta(
- IN HANDLE hDeviceContext,
- IN PSMgmtObject pMgmt,
- OUT PCMD_STATUS pStatus
+ void *hDeviceContext,
+ PSMgmtObject pMgmt,
+ PCMD_STATUS pStatus
);
-VOID
+void
vMgrCreateOwnIBSS(
- IN HANDLE hDeviceContext,
- OUT PCMD_STATUS pStatus
+ void *hDeviceContext,
+ PCMD_STATUS pStatus
);
-VOID
+void
vMgrJoinBSSBegin(
- IN HANDLE hDeviceContext,
- OUT PCMD_STATUS pStatus
+ void *hDeviceContext,
+ PCMD_STATUS pStatus
);
-VOID
+void
vMgrRxManagePacket(
- IN HANDLE hDeviceContext,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket
+ void *hDeviceContext,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket
);
/*
-VOID
+void
vMgrScanBegin(
- IN HANDLE hDeviceContext,
- OUT PCMD_STATUS pStatus
+ void *hDeviceContext,
+ PCMD_STATUS pStatus
);
*/
-VOID
+void
vMgrDeAuthenBeginSta(
- IN HANDLE hDeviceContext,
- IN PSMgmtObject pMgmt,
- IN PBYTE abyDestAddress,
- IN WORD wReason,
- OUT PCMD_STATUS pStatus
+ void *hDeviceContext,
+ PSMgmtObject pMgmt,
+ PBYTE abyDestAddress,
+ WORD wReason,
+ PCMD_STATUS pStatus
);
BOOL
bMgrPrepareBeaconToSend(
- IN HANDLE hDeviceContext,
- IN PSMgmtObject pMgmt
+ void *hDeviceContext,
+ PSMgmtObject pMgmt
);
BOOL
bAdd_PMKID_Candidate (
- IN HANDLE hDeviceContext,
- IN PBYTE pbyBSSID,
- IN PSRSNCapObject psRSNCapObj
+ void *hDeviceContext,
+ PBYTE pbyBSSID,
+ PSRSNCapObject psRSNCapObj
);
-VOID
+void
vFlush_PMKID_Candidate (
- IN HANDLE hDeviceContext
+ void *hDeviceContext
);
#endif // __WMGR_H__
diff --git a/drivers/staging/vt6655/wpa.c b/drivers/staging/vt6655/wpa.c
index 5da671418b5..da5c814e200 100644
--- a/drivers/staging/vt6655/wpa.c
+++ b/drivers/staging/vt6655/wpa.c
@@ -68,9 +68,9 @@ const BYTE abyOUI05[4] = { 0x00, 0x50, 0xf2, 0x05 };
*
-*/
-VOID
+void
WPA_ClearRSN (
- IN PKnownBSS pBSSList
+ PKnownBSS pBSSList
)
{
int ii;
@@ -104,10 +104,10 @@ WPA_ClearRSN (
* Return Value: none.
*
-*/
-VOID
+void
WPA_ParseRSN (
- IN PKnownBSS pBSSList,
- IN PWLAN_IE_RSN_EXT pRSN
+ PKnownBSS pBSSList,
+ PWLAN_IE_RSN_EXT pRSN
)
{
PWLAN_IE_RSN_AUTH pIE_RSN_Auth = NULL;
@@ -241,7 +241,7 @@ BOOL
WPA_SearchRSN (
BYTE byCmd,
BYTE byEncrypt,
- IN PKnownBSS pBSSList
+ PKnownBSS pBSSList
)
{
int ii;
@@ -299,7 +299,7 @@ WPA_SearchRSN (
-*/
BOOL
WPAb_Is_RSN (
- IN PWLAN_IE_RSN_EXT pRSN
+ PWLAN_IE_RSN_EXT pRSN
)
{
if (pRSN == NULL)
diff --git a/drivers/staging/vt6655/wpa.h b/drivers/staging/vt6655/wpa.h
index 9d9ce01d0c6..80d990b09d2 100644
--- a/drivers/staging/vt6655/wpa.h
+++ b/drivers/staging/vt6655/wpa.h
@@ -58,27 +58,27 @@
/*--------------------- Export Functions --------------------------*/
-VOID
+void
WPA_ClearRSN(
- IN PKnownBSS pBSSList
+ PKnownBSS pBSSList
);
-VOID
+void
WPA_ParseRSN(
- IN PKnownBSS pBSSList,
- IN PWLAN_IE_RSN_EXT pRSN
+ PKnownBSS pBSSList,
+ PWLAN_IE_RSN_EXT pRSN
);
BOOL
WPA_SearchRSN(
BYTE byCmd,
BYTE byEncrypt,
- IN PKnownBSS pBSSList
+ PKnownBSS pBSSList
);
BOOL
WPAb_Is_RSN(
- IN PWLAN_IE_RSN_EXT pRSN
+ PWLAN_IE_RSN_EXT pRSN
);
#endif // __WPA_H__
diff --git a/drivers/staging/vt6655/wpa2.c b/drivers/staging/vt6655/wpa2.c
index 931b6bd360e..7a42a0aad7d 100644
--- a/drivers/staging/vt6655/wpa2.c
+++ b/drivers/staging/vt6655/wpa2.c
@@ -72,9 +72,9 @@ const BYTE abyOUIPSK[4] = { 0x00, 0x0F, 0xAC, 0x02 };
* Return Value: none.
*
-*/
-VOID
+void
WPA2_ClearRSN (
- IN PKnownBSS pBSSNode
+ PKnownBSS pBSSNode
)
{
int ii;
@@ -107,10 +107,10 @@ WPA2_ClearRSN (
* Return Value: none.
*
-*/
-VOID
+void
WPA2vParseRSN (
- IN PKnownBSS pBSSNode,
- IN PWLAN_IE_RSN pRSN
+ PKnownBSS pBSSNode,
+ PWLAN_IE_RSN pRSN
)
{
int i, j;
@@ -263,8 +263,8 @@ WPA2vParseRSN (
-*/
UINT
WPA2uSetIEs(
- IN PVOID pMgmtHandle,
- OUT PWLAN_IE_RSN pRSNIEs
+ void *pMgmtHandle,
+ PWLAN_IE_RSN pRSNIEs
)
{
PSMgmtObject pMgmt = (PSMgmtObject) pMgmtHandle;
@@ -346,7 +346,7 @@ WPA2uSetIEs(
*pwPMKID = 0; // Initialize PMKID count
pbyBuffer = &pRSNIEs->abyRSN[20]; // Point to PMKID list
for (ii = 0; ii < pMgmt->gsPMKIDCache.BSSIDInfoCount; ii++) {
- if ( !memcmp(&pMgmt->gsPMKIDCache.BSSIDInfo[ii].abyBSSID[0], pMgmt->abyCurrBSSID, U_ETHER_ADDR_LEN)) {
+ if ( !memcmp(&pMgmt->gsPMKIDCache.BSSIDInfo[ii].abyBSSID[0], pMgmt->abyCurrBSSID, ETH_ALEN)) {
(*pwPMKID) ++;
memcpy(pbyBuffer, pMgmt->gsPMKIDCache.BSSIDInfo[ii].abyPMKID, 16);
pbyBuffer += 16;
diff --git a/drivers/staging/vt6655/wpa2.h b/drivers/staging/vt6655/wpa2.h
index e553b386900..7200db37f43 100644
--- a/drivers/staging/vt6655/wpa2.h
+++ b/drivers/staging/vt6655/wpa2.h
@@ -58,21 +58,21 @@ typedef struct tagSPMKIDCache {
/*--------------------- Export Functions --------------------------*/
-VOID
+void
WPA2_ClearRSN (
- IN PKnownBSS pBSSNode
+ PKnownBSS pBSSNode
);
-VOID
+void
WPA2vParseRSN (
- IN PKnownBSS pBSSNode,
- IN PWLAN_IE_RSN pRSN
+ PKnownBSS pBSSNode,
+ PWLAN_IE_RSN pRSN
);
UINT
WPA2uSetIEs(
- IN PVOID pMgmtHandle,
- OUT PWLAN_IE_RSN pRSNIEs
+ void *pMgmtHandle,
+ PWLAN_IE_RSN pRSNIEs
);
#endif // __WPA2_H__
diff --git a/drivers/staging/vt6655/wpactl.c b/drivers/staging/vt6655/wpactl.c
index 574e0b0a9c2..22c2fab3f32 100644
--- a/drivers/staging/vt6655/wpactl.c
+++ b/drivers/staging/vt6655/wpactl.c
@@ -101,7 +101,7 @@ static int wpa_init_wpadev(PSDevice pDevice)
wpadev_priv = netdev_priv(pDevice->wpadev);
*wpadev_priv = *pDevice;
- memcpy(pDevice->wpadev->dev_addr, dev->dev_addr, U_ETHER_ADDR_LEN);
+ memcpy(pDevice->wpadev->dev_addr, dev->dev_addr, ETH_ALEN);
pDevice->wpadev->base_addr = dev->base_addr;
pDevice->wpadev->irq = dev->irq;
pDevice->wpadev->mem_start = dev->mem_start;
@@ -496,7 +496,7 @@ static int wpa_set_disassociate(PSDevice pDevice,
spin_lock_irq(&pDevice->lock);
if (pDevice->bLinkPass) {
if (!memcmp(param->addr, pMgmt->abyCurrBSSID, 6))
- bScheduleCommand((HANDLE)pDevice, WLAN_CMD_DISASSOCIATE, NULL);
+ bScheduleCommand((void *)pDevice, WLAN_CMD_DISASSOCIATE, NULL);
}
spin_unlock_irq(&pDevice->lock);
@@ -525,8 +525,8 @@ static int wpa_set_scan(PSDevice pDevice,
int ret = 0;
spin_lock_irq(&pDevice->lock);
- BSSvClearBSSList((HANDLE)pDevice, pDevice->bLinkPass);
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
+ BSSvClearBSSList((void *)pDevice, pDevice->bLinkPass);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
spin_unlock_irq(&pDevice->lock);
return ret;
@@ -681,13 +681,12 @@ static int wpa_get_scan(PSDevice pDevice,
count++;
};
- pBuf = kmalloc(sizeof(struct viawget_scan_result) * count, (int)GFP_ATOMIC);
+ pBuf = kcalloc(count, sizeof(struct viawget_scan_result), (int)GFP_ATOMIC);
if (pBuf == NULL) {
ret = -ENOMEM;
return ret;
}
- memset(pBuf, 0, sizeof(struct viawget_scan_result) * count);
scan_buf = (struct viawget_scan_result *)pBuf;
pBSS = &(pMgmt->sBSSList[0]);
for (ii = 0, jj = 0; ii < MAX_BSS_NUM ; ii++) {
@@ -786,7 +785,7 @@ static int wpa_set_associate(PSDevice pDevice,
memcpy(pMgmt->abyDesireBSSID, param->u.wpa_associate.bssid, 6);
else
{
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, pItemSSID->abySSID);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, pItemSSID->abySSID);
}
if (param->u.wpa_associate.wpa_ie_len == 0) {
@@ -870,11 +869,11 @@ if (!((pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) ||
if (pCurr == NULL){
printk("wpa_set_associate---->hidden mode site survey before associate.......\n");
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
};
}
/****************************************************************/
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_SSID, NULL);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL);
spin_unlock_irq(&pDevice->lock);
return ret;
@@ -905,7 +904,7 @@ int wpa_ioctl(PSDevice pDevice, struct iw_point *p)
p->length > VIAWGET_WPA_MAX_BUF_SIZE || !p->pointer)
return -EINVAL;
- param = (struct viawget_wpa_param *) kmalloc((int)p->length, (int)GFP_KERNEL);
+ param = kmalloc((int)p->length, (int)GFP_KERNEL);
if (param == NULL)
return -ENOMEM;
diff --git a/drivers/staging/vt6655/wroute.c b/drivers/staging/vt6655/wroute.c
index 1d02040e80e..bf92fb9908f 100644
--- a/drivers/staging/vt6655/wroute.c
+++ b/drivers/staging/vt6655/wroute.c
@@ -91,11 +91,11 @@ BOOL ROUTEbRelay (PSDevice pDevice, PBYTE pbySkbData, UINT uDataLen, UINT uNodeI
pHeadTD->m_td1TD1.byTCR = (TCR_EDP|TCR_STP);
- memcpy(pDevice->sTxEthHeader.abyDstAddr, (PBYTE)pbySkbData, U_HEADER_LEN);
+ memcpy(pDevice->sTxEthHeader.abyDstAddr, (PBYTE)pbySkbData, ETH_HLEN);
- cbFrameBodySize = uDataLen - U_HEADER_LEN;
+ cbFrameBodySize = uDataLen - ETH_HLEN;
- if (ntohs(pDevice->sTxEthHeader.wType) > MAX_DATA_LEN) {
+ if (ntohs(pDevice->sTxEthHeader.wType) > ETH_DATA_LEN) {
cbFrameBodySize += 8;
}
diff --git a/drivers/staging/vt6656/80211hdr.h b/drivers/staging/vt6656/80211hdr.h
index e5cee6fd053..15c6ef1a635 100644
--- a/drivers/staging/vt6656/80211hdr.h
+++ b/drivers/staging/vt6656/80211hdr.h
@@ -16,16 +16,13 @@
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
- *
* File: 80211hdr.h
*
* Purpose: 802.11 MAC headers related pre-defines and macros.
*
- *
* Author: Lyndon Chen
*
* Date: Apr 8, 2002
- *
*/
#ifndef __80211HDR_H__
@@ -34,7 +31,8 @@
#include "ttype.h"
/*--------------------- Export Definitions -------------------------*/
-// bit type
+
+/* bit type */
#define BIT0 0x00000001
#define BIT1 0x00000002
#define BIT2 0x00000004
@@ -68,7 +66,7 @@
#define BIT30 0x40000000
#define BIT31 0x80000000
-// 802.11 frame related, defined as 802.11 spec
+/* 802.11 frame related, defined as 802.11 spec */
#define WLAN_ADDR_LEN 6
#define WLAN_CRC_LEN 4
#define WLAN_CRC32_LEN 4
@@ -80,13 +78,14 @@
#define WLAN_HDR_ADDR4_LEN 30
#define WLAN_IEHDR_LEN 2
#define WLAN_SSID_MAXLEN 32
-//#define WLAN_RATES_MAXLEN 255
+/* #define WLAN_RATES_MAXLEN 255 */
#define WLAN_RATES_MAXLEN 16
#define WLAN_RATES_MAXLEN_11B 4
#define WLAN_RSN_MAXLEN 32
#define WLAN_DATA_MAXLEN 2312
-#define WLAN_A3FR_MAXLEN (WLAN_HDR_ADDR3_LEN + WLAN_DATA_MAXLEN + WLAN_CRC_LEN)
-
+#define WLAN_A3FR_MAXLEN (WLAN_HDR_ADDR3_LEN \
+ + WLAN_DATA_MAXLEN \
+ + WLAN_CRC_LEN)
#define WLAN_BEACON_FR_MAXLEN WLAN_A3FR_MAXLEN
#define WLAN_ATIM_FR_MAXLEN (WLAN_HDR_ADDR3_LEN + 0)
@@ -101,12 +100,11 @@
#define WLAN_AUTHEN_FR_MAXLEN WLAN_A3FR_MAXLEN
#define WLAN_DEAUTHEN_FR_MAXLEN (WLAN_HDR_ADDR3_LEN + 2)
-
#define WLAN_WEP_NKEYS 4
#define WLAN_WEP40_KEYLEN 5
#define WLAN_WEP104_KEYLEN 13
#define WLAN_WEP232_KEYLEN 29
-//#define WLAN_WEPMAX_KEYLEN 29
+/* #define WLAN_WEPMAX_KEYLEN 29 */
#define WLAN_WEPMAX_KEYLEN 32
#define WLAN_CHALLENGE_IE_MAXLEN 255
#define WLAN_CHALLENGE_IE_LEN 130
@@ -115,7 +113,7 @@
#define WLAN_WEP_ICV_LEN 4
#define WLAN_FRAGS_MAX 16
-// Frame Type
+/* Frame Type */
#define WLAN_TYPE_MGR 0x00
#define WLAN_TYPE_CTL 0x01
#define WLAN_TYPE_DATA 0x02
@@ -124,8 +122,7 @@
#define WLAN_FTYPE_CTL 0x01
#define WLAN_FTYPE_DATA 0x02
-
-// Frame Subtypes
+/* Frame Subtypes */
#define WLAN_FSTYPE_ASSOCREQ 0x00
#define WLAN_FSTYPE_ASSOCRESP 0x01
#define WLAN_FSTYPE_REASSOCREQ 0x02
@@ -139,7 +136,7 @@
#define WLAN_FSTYPE_DEAUTHEN 0x0c
#define WLAN_FSTYPE_ACTION 0x0d
-// Control
+/* Control */
#define WLAN_FSTYPE_PSPOLL 0x0a
#define WLAN_FSTYPE_RTS 0x0b
#define WLAN_FSTYPE_CTS 0x0c
@@ -147,7 +144,7 @@
#define WLAN_FSTYPE_CFEND 0x0e
#define WLAN_FSTYPE_CFENDCFACK 0x0f
-// Data
+/* Data */
#define WLAN_FSTYPE_DATAONLY 0x00
#define WLAN_FSTYPE_DATA_CFACK 0x01
#define WLAN_FSTYPE_DATA_CFPOLL 0x02
@@ -157,13 +154,13 @@
#define WLAN_FSTYPE_CFPOLL 0x06
#define WLAN_FSTYPE_CFACK_CFPOLL 0x07
-
#ifdef __BIG_ENDIAN
-// GET & SET Frame Control bit
+/* GET & SET Frame Control bit */
#define WLAN_GET_FC_PRVER(n) ((((WORD)(n) >> 8) & (BIT0 | BIT1))
#define WLAN_GET_FC_FTYPE(n) ((((WORD)(n) >> 8) & (BIT2 | BIT3)) >> 2)
-#define WLAN_GET_FC_FSTYPE(n) ((((WORD)(n) >> 8) & (BIT4|BIT5|BIT6|BIT7)) >> 4)
+#define WLAN_GET_FC_FSTYPE(n) ((((WORD)(n) >> 8) \
+ & (BIT4|BIT5|BIT6|BIT7)) >> 4)
#define WLAN_GET_FC_TODS(n) ((((WORD)(n) << 8) & (BIT8)) >> 8)
#define WLAN_GET_FC_FROMDS(n) ((((WORD)(n) << 8) & (BIT9)) >> 9)
#define WLAN_GET_FC_MOREFRAG(n) ((((WORD)(n) << 8) & (BIT10)) >> 10)
@@ -173,12 +170,12 @@
#define WLAN_GET_FC_ISWEP(n) ((((WORD)(n) << 8) & (BIT14)) >> 14)
#define WLAN_GET_FC_ORDER(n) ((((WORD)(n) << 8) & (BIT15)) >> 15)
-// Sequence Field bit
+/* Sequence Field bit */
#define WLAN_GET_SEQ_FRGNUM(n) (((WORD)(n) >> 8) & (BIT0|BIT1|BIT2|BIT3))
-#define WLAN_GET_SEQ_SEQNUM(n) ((((WORD)(n) >> 8) & (~(BIT0|BIT1|BIT2|BIT3))) >> 4)
-
+#define WLAN_GET_SEQ_SEQNUM(n) ((((WORD)(n) >> 8) \
+ & (~(BIT0|BIT1|BIT2|BIT3))) >> 4)
-// Capability Field bit
+/* Capability Field bit */
#define WLAN_GET_CAP_INFO_ESS(n) (((n) >> 8) & BIT0)
#define WLAN_GET_CAP_INFO_IBSS(n) ((((n) >> 8) & BIT1) >> 1)
#define WLAN_GET_CAP_INFO_CFPOLLABLE(n) ((((n) >> 8) & BIT2) >> 2)
@@ -192,10 +189,9 @@
#define WLAN_GET_CAP_INFO_DSSSOFDM(n) ((((n)) & BIT13) >> 13)
#define WLAN_GET_CAP_INFO_GRPACK(n) ((((n)) & BIT14) >> 14)
-
#else
-// GET & SET Frame Control bit
+/* GET & SET Frame Control bit */
#define WLAN_GET_FC_PRVER(n) (((WORD)(n)) & (BIT0 | BIT1))
#define WLAN_GET_FC_FTYPE(n) ((((WORD)(n)) & (BIT2 | BIT3)) >> 2)
#define WLAN_GET_FC_FSTYPE(n) ((((WORD)(n)) & (BIT4|BIT5|BIT6|BIT7)) >> 4)
@@ -208,13 +204,11 @@
#define WLAN_GET_FC_ISWEP(n) ((((WORD)(n)) & (BIT14)) >> 14)
#define WLAN_GET_FC_ORDER(n) ((((WORD)(n)) & (BIT15)) >> 15)
-
-// Sequence Field bit
+/* Sequence Field bit */
#define WLAN_GET_SEQ_FRGNUM(n) (((WORD)(n)) & (BIT0|BIT1|BIT2|BIT3))
#define WLAN_GET_SEQ_SEQNUM(n) ((((WORD)(n)) & (~(BIT0|BIT1|BIT2|BIT3))) >> 4)
-
-// Capability Field bit
+/* Capability Field bit */
#define WLAN_GET_CAP_INFO_ESS(n) ((n) & BIT0)
#define WLAN_GET_CAP_INFO_IBSS(n) (((n) & BIT1) >> 1)
#define WLAN_GET_CAP_INFO_CFPOLLABLE(n) (((n) & BIT2) >> 2)
@@ -228,9 +222,7 @@
#define WLAN_GET_CAP_INFO_DSSSOFDM(n) (((n) & BIT13) >> 13)
#define WLAN_GET_CAP_INFO_GRPACK(n) (((n) & BIT14) >> 14)
-
-#endif //#ifdef __BIG_ENDIAN
-
+#endif /* #ifdef __BIG_ENDIAN */
#define WLAN_SET_CAP_INFO_ESS(n) (n)
#define WLAN_SET_CAP_INFO_IBSS(n) ((n) << 1)
@@ -245,7 +237,6 @@
#define WLAN_SET_CAP_INFO_DSSSOFDM(n) ((n) << 13)
#define WLAN_SET_CAP_INFO_GRPACK(n) ((n) << 14)
-
#define WLAN_SET_FC_PRVER(n) ((WORD)(n))
#define WLAN_SET_FC_FTYPE(n) (((WORD)(n)) << 2)
#define WLAN_SET_FC_FSTYPE(n) (((WORD)(n)) << 4)
@@ -261,7 +252,7 @@
#define WLAN_SET_SEQ_FRGNUM(n) ((WORD)(n))
#define WLAN_SET_SEQ_SEQNUM(n) (((WORD)(n)) << 4)
-// ERP Field bit
+/* ERP Field bit */
#define WLAN_GET_ERP_NONERP_PRESENT(n) ((n) & BIT0)
#define WLAN_GET_ERP_USE_PROTECTION(n) (((n) & BIT1) >> 1)
@@ -271,21 +262,19 @@
#define WLAN_SET_ERP_USE_PROTECTION(n) ((n) << 1)
#define WLAN_SET_ERP_BARKER_MODE(n) ((n) << 2)
-
-
-// Support & Basic Rates field
+/* Support & Basic Rates field */
#define WLAN_MGMT_IS_BASICRATE(b) ((b) & BIT7)
#define WLAN_MGMT_GET_RATE(b) ((b) & ~BIT7)
-// TIM field
+/* TIM field */
#define WLAN_MGMT_IS_MULTICAST_TIM(b) ((b) & BIT0)
#define WLAN_MGMT_GET_TIM_OFFSET(b) (((b) & ~BIT0) >> 1)
-// 3-Addr & 4-Addr
+/* 3-Addr & 4-Addr */
#define WLAN_HDR_A3_DATA_PTR(p) (((PBYTE)(p)) + WLAN_HDR_ADDR3_LEN)
#define WLAN_HDR_A4_DATA_PTR(p) (((PBYTE)(p)) + WLAN_HDR_ADDR4_LEN)
-// IEEE ADDR
+/* IEEE ADDR */
#define IEEE_ADDR_UNIVERSAL 0x02
#define IEEE_ADDR_GROUP 0x01
@@ -293,7 +282,7 @@ typedef struct {
BYTE abyAddr[6];
} IEEE_ADDR, *PIEEE_ADDR;
-// 802.11 Header Format
+/* 802.11 Header Format */
typedef struct tagWLAN_80211HDR_A2 {
@@ -302,7 +291,7 @@ typedef struct tagWLAN_80211HDR_A2 {
BYTE abyAddr1[WLAN_ADDR_LEN];
BYTE abyAddr2[WLAN_ADDR_LEN];
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
WLAN_80211HDR_A2, *PWLAN_80211HDR_A2;
typedef struct tagWLAN_80211HDR_A3 {
@@ -314,7 +303,7 @@ typedef struct tagWLAN_80211HDR_A3 {
BYTE abyAddr3[WLAN_ADDR_LEN];
WORD wSeqCtl;
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
WLAN_80211HDR_A3, *PWLAN_80211HDR_A3;
typedef struct tagWLAN_80211HDR_A4 {
@@ -327,10 +316,9 @@ typedef struct tagWLAN_80211HDR_A4 {
WORD wSeqCtl;
BYTE abyAddr4[WLAN_ADDR_LEN];
-}__attribute__ ((__packed__))
+} __attribute__ ((__packed__))
WLAN_80211HDR_A4, *PWLAN_80211HDR_A4;
-
typedef union tagUWLAN_80211HDR {
WLAN_80211HDR_A2 sA2;
@@ -339,15 +327,10 @@ typedef union tagUWLAN_80211HDR {
} UWLAN_80211HDR, *PUWLAN_80211HDR;
-
/*--------------------- Export Classes ----------------------------*/
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Functions --------------------------*/
-
-
-#endif // __80211HDR_H__
-
-
+#endif /* __80211HDR_H__ */
diff --git a/drivers/staging/vt6656/80211mgr.c b/drivers/staging/vt6656/80211mgr.c
index 8fa1a8e5a21..f24dc55e68f 100644
--- a/drivers/staging/vt6656/80211mgr.c
+++ b/drivers/staging/vt6656/80211mgr.c
@@ -89,9 +89,9 @@ static int msglevel =MSG_LEVEL_INFO;
*
-*/
-VOID
+void
vMgrEncodeBeacon(
- IN PWLAN_FR_BEACON pFrame
+ PWLAN_FR_BEACON pFrame
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
@@ -121,9 +121,9 @@ vMgrEncodeBeacon(
-*/
-VOID
+void
vMgrDecodeBeacon(
- IN PWLAN_FR_BEACON pFrame
+ PWLAN_FR_BEACON pFrame
)
{
PWLAN_IE pItem;
@@ -242,9 +242,9 @@ vMgrDecodeBeacon(
-*/
-VOID
+void
vMgrEncodeIBSSATIM(
- IN PWLAN_FR_IBSSATIM pFrame
+ PWLAN_FR_IBSSATIM pFrame
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
@@ -265,9 +265,9 @@ vMgrEncodeIBSSATIM(
*
-*/
-VOID
+void
vMgrDecodeIBSSATIM(
- IN PWLAN_FR_IBSSATIM pFrame
+ PWLAN_FR_IBSSATIM pFrame
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
@@ -287,9 +287,9 @@ vMgrDecodeIBSSATIM(
*
-*/
-VOID
+void
vMgrEncodeDisassociation(
- IN PWLAN_FR_DISASSOC pFrame
+ PWLAN_FR_DISASSOC pFrame
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
@@ -315,9 +315,9 @@ vMgrEncodeDisassociation(
*
-*/
-VOID
+void
vMgrDecodeDisassociation(
- IN PWLAN_FR_DISASSOC pFrame
+ PWLAN_FR_DISASSOC pFrame
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
@@ -341,9 +341,9 @@ vMgrDecodeDisassociation(
-*/
-VOID
+void
vMgrEncodeAssocRequest(
- IN PWLAN_FR_ASSOCREQ pFrame
+ PWLAN_FR_ASSOCREQ pFrame
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
@@ -368,9 +368,9 @@ vMgrEncodeAssocRequest(
*
-*/
-VOID
+void
vMgrDecodeAssocRequest(
- IN PWLAN_FR_ASSOCREQ pFrame
+ PWLAN_FR_ASSOCREQ pFrame
)
{
PWLAN_IE pItem;
@@ -434,9 +434,9 @@ vMgrDecodeAssocRequest(
*
-*/
-VOID
+void
vMgrEncodeAssocResponse(
- IN PWLAN_FR_ASSOCRESP pFrame
+ PWLAN_FR_ASSOCRESP pFrame
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
@@ -466,9 +466,9 @@ vMgrEncodeAssocResponse(
*
-*/
-VOID
+void
vMgrDecodeAssocResponse(
- IN PWLAN_FR_ASSOCRESP pFrame
+ PWLAN_FR_ASSOCRESP pFrame
)
{
PWLAN_IE pItem;
@@ -512,9 +512,9 @@ vMgrDecodeAssocResponse(
*
-*/
-VOID
+void
vMgrEncodeReassocRequest(
- IN PWLAN_FR_REASSOCREQ pFrame
+ PWLAN_FR_REASSOCREQ pFrame
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
@@ -544,9 +544,9 @@ vMgrEncodeReassocRequest(
-*/
-VOID
+void
vMgrDecodeReassocRequest(
- IN PWLAN_FR_REASSOCREQ pFrame
+ PWLAN_FR_REASSOCREQ pFrame
)
{
PWLAN_IE pItem;
@@ -616,9 +616,9 @@ vMgrDecodeReassocRequest(
-*/
-VOID
+void
vMgrEncodeProbeRequest(
- IN PWLAN_FR_PROBEREQ pFrame
+ PWLAN_FR_PROBEREQ pFrame
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
@@ -637,9 +637,9 @@ vMgrEncodeProbeRequest(
*
-*/
-VOID
+void
vMgrDecodeProbeRequest(
- IN PWLAN_FR_PROBEREQ pFrame
+ PWLAN_FR_PROBEREQ pFrame
)
{
PWLAN_IE pItem;
@@ -690,9 +690,9 @@ vMgrDecodeProbeRequest(
-*/
-VOID
+void
vMgrEncodeProbeResponse(
- IN PWLAN_FR_PROBERESP pFrame
+ PWLAN_FR_PROBERESP pFrame
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
@@ -724,9 +724,9 @@ vMgrEncodeProbeResponse(
*
-*/
-VOID
+void
vMgrDecodeProbeResponse(
- IN PWLAN_FR_PROBERESP pFrame
+ PWLAN_FR_PROBERESP pFrame
)
{
PWLAN_IE pItem;
@@ -838,9 +838,9 @@ vMgrDecodeProbeResponse(
*
-*/
-VOID
+void
vMgrEncodeAuthen(
- IN PWLAN_FR_AUTHEN pFrame
+ PWLAN_FR_AUTHEN pFrame
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
@@ -869,9 +869,9 @@ vMgrEncodeAuthen(
*
-*/
-VOID
+void
vMgrDecodeAuthen(
- IN PWLAN_FR_AUTHEN pFrame
+ PWLAN_FR_AUTHEN pFrame
)
{
PWLAN_IE pItem;
@@ -909,9 +909,9 @@ vMgrDecodeAuthen(
*
-*/
-VOID
+void
vMgrEncodeDeauthen(
- IN PWLAN_FR_DEAUTHEN pFrame
+ PWLAN_FR_DEAUTHEN pFrame
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
@@ -936,9 +936,9 @@ vMgrEncodeDeauthen(
*
-*/
-VOID
+void
vMgrDecodeDeauthen(
- IN PWLAN_FR_DEAUTHEN pFrame
+ PWLAN_FR_DEAUTHEN pFrame
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
@@ -962,9 +962,9 @@ vMgrDecodeDeauthen(
*
-*/
-VOID
+void
vMgrEncodeReassocResponse(
- IN PWLAN_FR_REASSOCRESP pFrame
+ PWLAN_FR_REASSOCRESP pFrame
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
@@ -995,9 +995,9 @@ vMgrEncodeReassocResponse(
-*/
-VOID
+void
vMgrDecodeReassocResponse(
- IN PWLAN_FR_REASSOCRESP pFrame
+ PWLAN_FR_REASSOCRESP pFrame
)
{
PWLAN_IE pItem;
diff --git a/drivers/staging/vt6656/80211mgr.h b/drivers/staging/vt6656/80211mgr.h
index a4ea8249216..c140a957d9d 100644
--- a/drivers/staging/vt6656/80211mgr.h
+++ b/drivers/staging/vt6656/80211mgr.h
@@ -524,8 +524,8 @@ typedef struct _WLAN_IE_IBSS_DFS {
// prototype structure, all mgmt frame types will start with these members
typedef struct tagWLAN_FR_MGMT {
- UINT uType;
- UINT len;
+ unsigned int uType;
+ unsigned int len;
PBYTE pBuf;
PUWLAN_80211HDR pHdr;
@@ -534,8 +534,8 @@ typedef struct tagWLAN_FR_MGMT {
// Beacon frame
typedef struct tagWLAN_FR_BEACON {
- UINT uType;
- UINT len;
+ unsigned int uType;
+ unsigned int len;
PBYTE pBuf;
PUWLAN_80211HDR pHdr;
// fixed fields
@@ -566,8 +566,8 @@ typedef struct tagWLAN_FR_BEACON {
// IBSS ATIM frame
typedef struct tagWLAN_FR_IBSSATIM {
- UINT uType;
- UINT len;
+ unsigned int uType;
+ unsigned int len;
PBYTE pBuf;
PUWLAN_80211HDR pHdr;
@@ -580,8 +580,8 @@ typedef struct tagWLAN_FR_IBSSATIM {
// Disassociation
typedef struct tagWLAN_FR_DISASSOC {
- UINT uType;
- UINT len;
+ unsigned int uType;
+ unsigned int len;
PBYTE pBuf;
PUWLAN_80211HDR pHdr;
/*-- fixed fields -----------*/
@@ -593,8 +593,8 @@ typedef struct tagWLAN_FR_DISASSOC {
// Association Request
typedef struct tagWLAN_FR_ASSOCREQ {
- UINT uType;
- UINT len;
+ unsigned int uType;
+ unsigned int len;
PBYTE pBuf;
PUWLAN_80211HDR pHdr;
/*-- fixed fields -----------*/
@@ -617,8 +617,8 @@ typedef struct tagWLAN_FR_ASSOCREQ {
// Association Response
typedef struct tagWLAN_FR_ASSOCRESP {
- UINT uType;
- UINT len;
+ unsigned int uType;
+ unsigned int len;
PBYTE pBuf;
PUWLAN_80211HDR pHdr;
/*-- fixed fields -----------*/
@@ -634,8 +634,8 @@ typedef struct tagWLAN_FR_ASSOCRESP {
// Reassociation Request
typedef struct tagWLAN_FR_REASSOCREQ {
- UINT uType;
- UINT len;
+ unsigned int uType;
+ unsigned int len;
PBYTE pBuf;
PUWLAN_80211HDR pHdr;
@@ -659,8 +659,8 @@ typedef struct tagWLAN_FR_REASSOCREQ {
// Reassociation Response
typedef struct tagWLAN_FR_REASSOCRESP {
- UINT uType;
- UINT len;
+ unsigned int uType;
+ unsigned int len;
PBYTE pBuf;
PUWLAN_80211HDR pHdr;
/*-- fixed fields -----------*/
@@ -676,8 +676,8 @@ typedef struct tagWLAN_FR_REASSOCRESP {
// Probe Request
typedef struct tagWLAN_FR_PROBEREQ {
- UINT uType;
- UINT len;
+ unsigned int uType;
+ unsigned int len;
PBYTE pBuf;
PUWLAN_80211HDR pHdr;
/*-- fixed fields -----------*/
@@ -691,8 +691,8 @@ typedef struct tagWLAN_FR_PROBEREQ {
// Probe Response
typedef struct tagWLAN_FR_PROBERESP {
- UINT uType;
- UINT len;
+ unsigned int uType;
+ unsigned int len;
PBYTE pBuf;
PUWLAN_80211HDR pHdr;
/*-- fixed fields -----------*/
@@ -720,8 +720,8 @@ typedef struct tagWLAN_FR_PROBERESP {
// Authentication
typedef struct tagWLAN_FR_AUTHEN {
- UINT uType;
- UINT len;
+ unsigned int uType;
+ unsigned int len;
PBYTE pBuf;
PUWLAN_80211HDR pHdr;
/*-- fixed fields -----------*/
@@ -736,8 +736,8 @@ typedef struct tagWLAN_FR_AUTHEN {
// Deauthenication
typedef struct tagWLAN_FR_DEAUTHEN {
- UINT uType;
- UINT len;
+ unsigned int uType;
+ unsigned int len;
PBYTE pBuf;
PUWLAN_80211HDR pHdr;
/*-- fixed fields -----------*/
@@ -748,114 +748,114 @@ typedef struct tagWLAN_FR_DEAUTHEN {
} WLAN_FR_DEAUTHEN, *PWLAN_FR_DEAUTHEN;
/*--------------------- Export Functions --------------------------*/
-VOID
+void
vMgrEncodeBeacon(
- IN PWLAN_FR_BEACON pFrame
+ PWLAN_FR_BEACON pFrame
);
-VOID
+void
vMgrDecodeBeacon(
- IN PWLAN_FR_BEACON pFrame
+ PWLAN_FR_BEACON pFrame
);
-VOID
+void
vMgrEncodeIBSSATIM(
- IN PWLAN_FR_IBSSATIM pFrame
+ PWLAN_FR_IBSSATIM pFrame
);
-VOID
+void
vMgrDecodeIBSSATIM(
- IN PWLAN_FR_IBSSATIM pFrame
+ PWLAN_FR_IBSSATIM pFrame
);
-VOID
+void
vMgrEncodeDisassociation(
- IN PWLAN_FR_DISASSOC pFrame
+ PWLAN_FR_DISASSOC pFrame
);
-VOID
+void
vMgrDecodeDisassociation(
- IN PWLAN_FR_DISASSOC pFrame
+ PWLAN_FR_DISASSOC pFrame
);
-VOID
+void
vMgrEncodeAssocRequest(
- IN PWLAN_FR_ASSOCREQ pFrame
+ PWLAN_FR_ASSOCREQ pFrame
);
-VOID
+void
vMgrDecodeAssocRequest(
- IN PWLAN_FR_ASSOCREQ pFrame
+ PWLAN_FR_ASSOCREQ pFrame
);
-VOID
+void
vMgrEncodeAssocResponse(
- IN PWLAN_FR_ASSOCRESP pFrame
+ PWLAN_FR_ASSOCRESP pFrame
);
-VOID
+void
vMgrDecodeAssocResponse(
- IN PWLAN_FR_ASSOCRESP pFrame
+ PWLAN_FR_ASSOCRESP pFrame
);
-VOID
+void
vMgrEncodeReassocRequest(
- IN PWLAN_FR_REASSOCREQ pFrame
+ PWLAN_FR_REASSOCREQ pFrame
);
-VOID
+void
vMgrDecodeReassocRequest(
- IN PWLAN_FR_REASSOCREQ pFrame
+ PWLAN_FR_REASSOCREQ pFrame
);
-VOID
+void
vMgrEncodeProbeRequest(
- IN PWLAN_FR_PROBEREQ pFrame
+ PWLAN_FR_PROBEREQ pFrame
);
-VOID
+void
vMgrDecodeProbeRequest(
- IN PWLAN_FR_PROBEREQ pFrame
+ PWLAN_FR_PROBEREQ pFrame
);
-VOID
+void
vMgrEncodeProbeResponse(
- IN PWLAN_FR_PROBERESP pFrame
+ PWLAN_FR_PROBERESP pFrame
);
-VOID
+void
vMgrDecodeProbeResponse(
- IN PWLAN_FR_PROBERESP pFrame
+ PWLAN_FR_PROBERESP pFrame
);
-VOID
+void
vMgrEncodeAuthen(
- IN PWLAN_FR_AUTHEN pFrame
+ PWLAN_FR_AUTHEN pFrame
);
-VOID
+void
vMgrDecodeAuthen(
- IN PWLAN_FR_AUTHEN pFrame
+ PWLAN_FR_AUTHEN pFrame
);
-VOID
+void
vMgrEncodeDeauthen(
- IN PWLAN_FR_DEAUTHEN pFrame
+ PWLAN_FR_DEAUTHEN pFrame
);
-VOID
+void
vMgrDecodeDeauthen(
- IN PWLAN_FR_DEAUTHEN pFrame
+ PWLAN_FR_DEAUTHEN pFrame
);
-VOID
+void
vMgrEncodeReassocResponse(
- IN PWLAN_FR_REASSOCRESP pFrame
+ PWLAN_FR_REASSOCRESP pFrame
);
-VOID
+void
vMgrDecodeReassocResponse(
- IN PWLAN_FR_REASSOCRESP pFrame
+ PWLAN_FR_REASSOCRESP pFrame
);
#endif// __80211MGR_H__
diff --git a/drivers/staging/vt6656/aes_ccmp.c b/drivers/staging/vt6656/aes_ccmp.c
index 401a7d267c9..b3d367b9bdc 100644
--- a/drivers/staging/vt6656/aes_ccmp.c
+++ b/drivers/staging/vt6656/aes_ccmp.c
@@ -16,7 +16,6 @@
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
- *
* File: aes_ccmp.c
*
* Purpose: AES_CCMP decryption
@@ -28,9 +27,7 @@
* Functions:
* AESbGenCCMP - Parsing RX-packet
*
- *
* Revision History:
- *
*/
#include "device.h"
@@ -46,62 +43,61 @@
* SBOX Table
*/
-BYTE sbox_table[256] =
-{
-0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
-0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
-0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
-0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
-0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
-0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
-0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
-0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
-0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
-0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
-0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
-0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
-0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
-0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
-0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
-0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+BYTE sbox_table[256] = {
+ 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
+ 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
+ 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
+ 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
+ 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
+ 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
+ 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
+ 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
+ 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
+ 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
+ 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
+ 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
+ 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
+ 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
+ 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
+ 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
};
BYTE dot2_table[256] = {
-0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
-0x20, 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e,
-0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
-0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e, 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
-0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e, 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
-0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae, 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
-0xc0, 0xc2, 0xc4, 0xc6, 0xc8, 0xca, 0xcc, 0xce, 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde,
-0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee, 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
-0x1b, 0x19, 0x1f, 0x1d, 0x13, 0x11, 0x17, 0x15, 0x0b, 0x09, 0x0f, 0x0d, 0x03, 0x01, 0x07, 0x05,
-0x3b, 0x39, 0x3f, 0x3d, 0x33, 0x31, 0x37, 0x35, 0x2b, 0x29, 0x2f, 0x2d, 0x23, 0x21, 0x27, 0x25,
-0x5b, 0x59, 0x5f, 0x5d, 0x53, 0x51, 0x57, 0x55, 0x4b, 0x49, 0x4f, 0x4d, 0x43, 0x41, 0x47, 0x45,
-0x7b, 0x79, 0x7f, 0x7d, 0x73, 0x71, 0x77, 0x75, 0x6b, 0x69, 0x6f, 0x6d, 0x63, 0x61, 0x67, 0x65,
-0x9b, 0x99, 0x9f, 0x9d, 0x93, 0x91, 0x97, 0x95, 0x8b, 0x89, 0x8f, 0x8d, 0x83, 0x81, 0x87, 0x85,
-0xbb, 0xb9, 0xbf, 0xbd, 0xb3, 0xb1, 0xb7, 0xb5, 0xab, 0xa9, 0xaf, 0xad, 0xa3, 0xa1, 0xa7, 0xa5,
-0xdb, 0xd9, 0xdf, 0xdd, 0xd3, 0xd1, 0xd7, 0xd5, 0xcb, 0xc9, 0xcf, 0xcd, 0xc3, 0xc1, 0xc7, 0xc5,
-0xfb, 0xf9, 0xff, 0xfd, 0xf3, 0xf1, 0xf7, 0xf5, 0xeb, 0xe9, 0xef, 0xed, 0xe3, 0xe1, 0xe7, 0xe5
+ 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
+ 0x20, 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e,
+ 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
+ 0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e, 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
+ 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e, 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
+ 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae, 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
+ 0xc0, 0xc2, 0xc4, 0xc6, 0xc8, 0xca, 0xcc, 0xce, 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde,
+ 0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee, 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
+ 0x1b, 0x19, 0x1f, 0x1d, 0x13, 0x11, 0x17, 0x15, 0x0b, 0x09, 0x0f, 0x0d, 0x03, 0x01, 0x07, 0x05,
+ 0x3b, 0x39, 0x3f, 0x3d, 0x33, 0x31, 0x37, 0x35, 0x2b, 0x29, 0x2f, 0x2d, 0x23, 0x21, 0x27, 0x25,
+ 0x5b, 0x59, 0x5f, 0x5d, 0x53, 0x51, 0x57, 0x55, 0x4b, 0x49, 0x4f, 0x4d, 0x43, 0x41, 0x47, 0x45,
+ 0x7b, 0x79, 0x7f, 0x7d, 0x73, 0x71, 0x77, 0x75, 0x6b, 0x69, 0x6f, 0x6d, 0x63, 0x61, 0x67, 0x65,
+ 0x9b, 0x99, 0x9f, 0x9d, 0x93, 0x91, 0x97, 0x95, 0x8b, 0x89, 0x8f, 0x8d, 0x83, 0x81, 0x87, 0x85,
+ 0xbb, 0xb9, 0xbf, 0xbd, 0xb3, 0xb1, 0xb7, 0xb5, 0xab, 0xa9, 0xaf, 0xad, 0xa3, 0xa1, 0xa7, 0xa5,
+ 0xdb, 0xd9, 0xdf, 0xdd, 0xd3, 0xd1, 0xd7, 0xd5, 0xcb, 0xc9, 0xcf, 0xcd, 0xc3, 0xc1, 0xc7, 0xc5,
+ 0xfb, 0xf9, 0xff, 0xfd, 0xf3, 0xf1, 0xf7, 0xf5, 0xeb, 0xe9, 0xef, 0xed, 0xe3, 0xe1, 0xe7, 0xe5
};
BYTE dot3_table[256] = {
-0x00, 0x03, 0x06, 0x05, 0x0c, 0x0f, 0x0a, 0x09, 0x18, 0x1b, 0x1e, 0x1d, 0x14, 0x17, 0x12, 0x11,
-0x30, 0x33, 0x36, 0x35, 0x3c, 0x3f, 0x3a, 0x39, 0x28, 0x2b, 0x2e, 0x2d, 0x24, 0x27, 0x22, 0x21,
-0x60, 0x63, 0x66, 0x65, 0x6c, 0x6f, 0x6a, 0x69, 0x78, 0x7b, 0x7e, 0x7d, 0x74, 0x77, 0x72, 0x71,
-0x50, 0x53, 0x56, 0x55, 0x5c, 0x5f, 0x5a, 0x59, 0x48, 0x4b, 0x4e, 0x4d, 0x44, 0x47, 0x42, 0x41,
-0xc0, 0xc3, 0xc6, 0xc5, 0xcc, 0xcf, 0xca, 0xc9, 0xd8, 0xdb, 0xde, 0xdd, 0xd4, 0xd7, 0xd2, 0xd1,
-0xf0, 0xf3, 0xf6, 0xf5, 0xfc, 0xff, 0xfa, 0xf9, 0xe8, 0xeb, 0xee, 0xed, 0xe4, 0xe7, 0xe2, 0xe1,
-0xa0, 0xa3, 0xa6, 0xa5, 0xac, 0xaf, 0xaa, 0xa9, 0xb8, 0xbb, 0xbe, 0xbd, 0xb4, 0xb7, 0xb2, 0xb1,
-0x90, 0x93, 0x96, 0x95, 0x9c, 0x9f, 0x9a, 0x99, 0x88, 0x8b, 0x8e, 0x8d, 0x84, 0x87, 0x82, 0x81,
-0x9b, 0x98, 0x9d, 0x9e, 0x97, 0x94, 0x91, 0x92, 0x83, 0x80, 0x85, 0x86, 0x8f, 0x8c, 0x89, 0x8a,
-0xab, 0xa8, 0xad, 0xae, 0xa7, 0xa4, 0xa1, 0xa2, 0xb3, 0xb0, 0xb5, 0xb6, 0xbf, 0xbc, 0xb9, 0xba,
-0xfb, 0xf8, 0xfd, 0xfe, 0xf7, 0xf4, 0xf1, 0xf2, 0xe3, 0xe0, 0xe5, 0xe6, 0xef, 0xec, 0xe9, 0xea,
-0xcb, 0xc8, 0xcd, 0xce, 0xc7, 0xc4, 0xc1, 0xc2, 0xd3, 0xd0, 0xd5, 0xd6, 0xdf, 0xdc, 0xd9, 0xda,
-0x5b, 0x58, 0x5d, 0x5e, 0x57, 0x54, 0x51, 0x52, 0x43, 0x40, 0x45, 0x46, 0x4f, 0x4c, 0x49, 0x4a,
-0x6b, 0x68, 0x6d, 0x6e, 0x67, 0x64, 0x61, 0x62, 0x73, 0x70, 0x75, 0x76, 0x7f, 0x7c, 0x79, 0x7a,
-0x3b, 0x38, 0x3d, 0x3e, 0x37, 0x34, 0x31, 0x32, 0x23, 0x20, 0x25, 0x26, 0x2f, 0x2c, 0x29, 0x2a,
-0x0b, 0x08, 0x0d, 0x0e, 0x07, 0x04, 0x01, 0x02, 0x13, 0x10, 0x15, 0x16, 0x1f, 0x1c, 0x19, 0x1a
+ 0x00, 0x03, 0x06, 0x05, 0x0c, 0x0f, 0x0a, 0x09, 0x18, 0x1b, 0x1e, 0x1d, 0x14, 0x17, 0x12, 0x11,
+ 0x30, 0x33, 0x36, 0x35, 0x3c, 0x3f, 0x3a, 0x39, 0x28, 0x2b, 0x2e, 0x2d, 0x24, 0x27, 0x22, 0x21,
+ 0x60, 0x63, 0x66, 0x65, 0x6c, 0x6f, 0x6a, 0x69, 0x78, 0x7b, 0x7e, 0x7d, 0x74, 0x77, 0x72, 0x71,
+ 0x50, 0x53, 0x56, 0x55, 0x5c, 0x5f, 0x5a, 0x59, 0x48, 0x4b, 0x4e, 0x4d, 0x44, 0x47, 0x42, 0x41,
+ 0xc0, 0xc3, 0xc6, 0xc5, 0xcc, 0xcf, 0xca, 0xc9, 0xd8, 0xdb, 0xde, 0xdd, 0xd4, 0xd7, 0xd2, 0xd1,
+ 0xf0, 0xf3, 0xf6, 0xf5, 0xfc, 0xff, 0xfa, 0xf9, 0xe8, 0xeb, 0xee, 0xed, 0xe4, 0xe7, 0xe2, 0xe1,
+ 0xa0, 0xa3, 0xa6, 0xa5, 0xac, 0xaf, 0xaa, 0xa9, 0xb8, 0xbb, 0xbe, 0xbd, 0xb4, 0xb7, 0xb2, 0xb1,
+ 0x90, 0x93, 0x96, 0x95, 0x9c, 0x9f, 0x9a, 0x99, 0x88, 0x8b, 0x8e, 0x8d, 0x84, 0x87, 0x82, 0x81,
+ 0x9b, 0x98, 0x9d, 0x9e, 0x97, 0x94, 0x91, 0x92, 0x83, 0x80, 0x85, 0x86, 0x8f, 0x8c, 0x89, 0x8a,
+ 0xab, 0xa8, 0xad, 0xae, 0xa7, 0xa4, 0xa1, 0xa2, 0xb3, 0xb0, 0xb5, 0xb6, 0xbf, 0xbc, 0xb9, 0xba,
+ 0xfb, 0xf8, 0xfd, 0xfe, 0xf7, 0xf4, 0xf1, 0xf2, 0xe3, 0xe0, 0xe5, 0xe6, 0xef, 0xec, 0xe9, 0xea,
+ 0xcb, 0xc8, 0xcd, 0xce, 0xc7, 0xc4, 0xc1, 0xc2, 0xd3, 0xd0, 0xd5, 0xd6, 0xdf, 0xdc, 0xd9, 0xda,
+ 0x5b, 0x58, 0x5d, 0x5e, 0x57, 0x54, 0x51, 0x52, 0x43, 0x40, 0x45, 0x46, 0x4f, 0x4c, 0x49, 0x4a,
+ 0x6b, 0x68, 0x6d, 0x6e, 0x67, 0x64, 0x61, 0x62, 0x73, 0x70, 0x75, 0x76, 0x7f, 0x7c, 0x79, 0x7a,
+ 0x3b, 0x38, 0x3d, 0x3e, 0x37, 0x34, 0x31, 0x32, 0x23, 0x20, 0x25, 0x26, 0x2f, 0x2c, 0x29, 0x2a,
+ 0x0b, 0x08, 0x0d, 0x0e, 0x07, 0x04, 0x01, 0x02, 0x13, 0x10, 0x15, 0x16, 0x1f, 0x1c, 0x19, 0x1a
};
/*--------------------- Static Functions --------------------------*/
@@ -112,120 +108,111 @@ BYTE dot3_table[256] = {
void xor_128(BYTE *a, BYTE *b, BYTE *out)
{
-PDWORD dwPtrA = (PDWORD) a;
-PDWORD dwPtrB = (PDWORD) b;
-PDWORD dwPtrOut =(PDWORD) out;
-
- (*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
- (*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
- (*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
- (*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
+ PDWORD dwPtrA = (PDWORD) a;
+ PDWORD dwPtrB = (PDWORD) b;
+ PDWORD dwPtrOut = (PDWORD) out;
+
+ (*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
+ (*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
+ (*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
+ (*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
}
void xor_32(BYTE *a, BYTE *b, BYTE *out)
{
-PDWORD dwPtrA = (PDWORD) a;
-PDWORD dwPtrB = (PDWORD) b;
-PDWORD dwPtrOut =(PDWORD) out;
+ PDWORD dwPtrA = (PDWORD) a;
+ PDWORD dwPtrB = (PDWORD) b;
+ PDWORD dwPtrOut = (PDWORD) out;
- (*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
+ (*dwPtrOut++) = (*dwPtrA++) ^ (*dwPtrB++);
}
void AddRoundKey(BYTE *key, int round)
{
-BYTE sbox_key[4];
-BYTE rcon_table[10] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36};
+ BYTE sbox_key[4];
+ BYTE rcon_table[10] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36};
- sbox_key[0] = sbox_table[key[13]];
- sbox_key[1] = sbox_table[key[14]];
- sbox_key[2] = sbox_table[key[15]];
- sbox_key[3] = sbox_table[key[12]];
+ sbox_key[0] = sbox_table[key[13]];
+ sbox_key[1] = sbox_table[key[14]];
+ sbox_key[2] = sbox_table[key[15]];
+ sbox_key[3] = sbox_table[key[12]];
- key[0] = key[0] ^ rcon_table[round];
- xor_32(&key[0], sbox_key, &key[0]);
+ key[0] = key[0] ^ rcon_table[round];
+ xor_32(&key[0], sbox_key, &key[0]);
- xor_32(&key[4], &key[0], &key[4]);
- xor_32(&key[8], &key[4], &key[8]);
- xor_32(&key[12], &key[8], &key[12]);
+ xor_32(&key[4], &key[0], &key[4]);
+ xor_32(&key[8], &key[4], &key[8]);
+ xor_32(&key[12], &key[8], &key[12]);
}
void SubBytes(BYTE *in, BYTE *out)
{
-int i;
+ int i;
- for (i=0; i< 16; i++)
- {
- out[i] = sbox_table[in[i]];
- }
+ for (i = 0; i < 16; i++)
+ out[i] = sbox_table[in[i]];
}
void ShiftRows(BYTE *in, BYTE *out)
{
- out[0] = in[0];
- out[1] = in[5];
- out[2] = in[10];
- out[3] = in[15];
- out[4] = in[4];
- out[5] = in[9];
- out[6] = in[14];
- out[7] = in[3];
- out[8] = in[8];
- out[9] = in[13];
- out[10] = in[2];
- out[11] = in[7];
- out[12] = in[12];
- out[13] = in[1];
- out[14] = in[6];
- out[15] = in[11];
+ out[0] = in[0];
+ out[1] = in[5];
+ out[2] = in[10];
+ out[3] = in[15];
+ out[4] = in[4];
+ out[5] = in[9];
+ out[6] = in[14];
+ out[7] = in[3];
+ out[8] = in[8];
+ out[9] = in[13];
+ out[10] = in[2];
+ out[11] = in[7];
+ out[12] = in[12];
+ out[13] = in[1];
+ out[14] = in[6];
+ out[15] = in[11];
}
void MixColumns(BYTE *in, BYTE *out)
{
- out[0] = dot2_table[in[0]] ^ dot3_table[in[1]] ^ in[2] ^ in[3];
- out[1] = in[0] ^ dot2_table[in[1]] ^ dot3_table[in[2]] ^ in[3];
- out[2] = in[0] ^ in[1] ^ dot2_table[in[2]] ^ dot3_table[in[3]];
- out[3] = dot3_table[in[0]] ^ in[1] ^ in[2] ^ dot2_table[in[3]];
+ out[0] = dot2_table[in[0]] ^ dot3_table[in[1]] ^ in[2] ^ in[3];
+ out[1] = in[0] ^ dot2_table[in[1]] ^ dot3_table[in[2]] ^ in[3];
+ out[2] = in[0] ^ in[1] ^ dot2_table[in[2]] ^ dot3_table[in[3]];
+ out[3] = dot3_table[in[0]] ^ in[1] ^ in[2] ^ dot2_table[in[3]];
}
-
void AESv128(BYTE *key, BYTE *data, BYTE *ciphertext)
{
-int i;
-int round;
-BYTE TmpdataA[16];
-BYTE TmpdataB[16];
-BYTE abyRoundKey[16];
-
- for(i=0; i<16; i++)
- abyRoundKey[i] = key[i];
-
- for (round = 0; round < 11; round++)
- {
- if (round == 0)
- {
- xor_128(abyRoundKey, data, ciphertext);
- AddRoundKey(abyRoundKey, round);
- }
- else if (round == 10)
- {
- SubBytes(ciphertext, TmpdataA);
- ShiftRows(TmpdataA, TmpdataB);
- xor_128(TmpdataB, abyRoundKey, ciphertext);
- }
- else // round 1 ~ 9
- {
- SubBytes(ciphertext, TmpdataA);
- ShiftRows(TmpdataA, TmpdataB);
- MixColumns(&TmpdataB[0], &TmpdataA[0]);
- MixColumns(&TmpdataB[4], &TmpdataA[4]);
- MixColumns(&TmpdataB[8], &TmpdataA[8]);
- MixColumns(&TmpdataB[12], &TmpdataA[12]);
- xor_128(TmpdataA, abyRoundKey, ciphertext);
- AddRoundKey(abyRoundKey, round);
- }
- }
+ int i;
+ int round;
+ BYTE TmpdataA[16];
+ BYTE TmpdataB[16];
+ BYTE abyRoundKey[16];
+
+ for (i = 0; i < 16; i++)
+ abyRoundKey[i] = key[i];
+
+ for (round = 0; round < 11; round++) {
+ if (round == 0) {
+ xor_128(abyRoundKey, data, ciphertext);
+ AddRoundKey(abyRoundKey, round);
+ } else if (round == 10) {
+ SubBytes(ciphertext, TmpdataA);
+ ShiftRows(TmpdataA, TmpdataB);
+ xor_128(TmpdataB, abyRoundKey, ciphertext);
+ } else { /* round 1 ~ 9 */
+ SubBytes(ciphertext, TmpdataA);
+ ShiftRows(TmpdataA, TmpdataB);
+ MixColumns(&TmpdataB[0], &TmpdataA[0]);
+ MixColumns(&TmpdataB[4], &TmpdataA[4]);
+ MixColumns(&TmpdataB[8], &TmpdataA[8]);
+ MixColumns(&TmpdataB[12], &TmpdataA[12]);
+ xor_128(TmpdataA, abyRoundKey, ciphertext);
+ AddRoundKey(abyRoundKey, round);
+ }
+ }
}
@@ -243,160 +230,157 @@ BYTE abyRoundKey[16];
* Return Value: MIC compare result
*
*/
+
BOOL AESbGenCCMP(PBYTE pbyRxKey, PBYTE pbyFrame, WORD wFrameSize)
{
-BYTE abyNonce[13];
-BYTE MIC_IV[16];
-BYTE MIC_HDR1[16];
-BYTE MIC_HDR2[16];
-BYTE abyMIC[16];
-BYTE abyCTRPLD[16];
-BYTE abyTmp[16];
-BYTE abyPlainText[16];
-BYTE abyLastCipher[16];
-
-PS802_11Header pMACHeader = (PS802_11Header) pbyFrame;
-PBYTE pbyIV;
-PBYTE pbyPayload;
-WORD wHLen = 22;
-WORD wPayloadSize = wFrameSize - 8 - 8 - 4 - WLAN_HDR_ADDR3_LEN;//8 is IV, 8 is MIC, 4 is CRC
-BOOL bA4 = FALSE;
-BYTE byTmp;
-WORD wCnt;
-int ii,jj,kk;
-
-
- pbyIV = pbyFrame + WLAN_HDR_ADDR3_LEN;
- if ( WLAN_GET_FC_TODS(*(PWORD)pbyFrame) &&
- WLAN_GET_FC_FROMDS(*(PWORD)pbyFrame) ) {
- bA4 = TRUE;
- pbyIV += 6; // 6 is 802.11 address4
- wHLen += 6;
- wPayloadSize -= 6;
- }
- pbyPayload = pbyIV + 8; //IV-length
-
- abyNonce[0] = 0x00; //now is 0, if Qos here will be priority
- memcpy(&(abyNonce[1]), pMACHeader->abyAddr2, U_ETHER_ADDR_LEN);
- abyNonce[7] = pbyIV[7];
- abyNonce[8] = pbyIV[6];
- abyNonce[9] = pbyIV[5];
- abyNonce[10] = pbyIV[4];
- abyNonce[11] = pbyIV[1];
- abyNonce[12] = pbyIV[0];
-
- //MIC_IV
- MIC_IV[0] = 0x59;
- memcpy(&(MIC_IV[1]), &(abyNonce[0]), 13);
- MIC_IV[14] = (BYTE)(wPayloadSize >> 8);
- MIC_IV[15] = (BYTE)(wPayloadSize & 0xff);
-
- //MIC_HDR1
- MIC_HDR1[0] = (BYTE)(wHLen >> 8);
- MIC_HDR1[1] = (BYTE)(wHLen & 0xff);
- byTmp = (BYTE)(pMACHeader->wFrameCtl & 0xff);
- MIC_HDR1[2] = byTmp & 0x8f;
- byTmp = (BYTE)(pMACHeader->wFrameCtl >> 8);
- byTmp &= 0x87;
- MIC_HDR1[3] = byTmp | 0x40;
- memcpy(&(MIC_HDR1[4]), pMACHeader->abyAddr1, U_ETHER_ADDR_LEN);
- memcpy(&(MIC_HDR1[10]), pMACHeader->abyAddr2, U_ETHER_ADDR_LEN);
-
- //MIC_HDR2
- memcpy(&(MIC_HDR2[0]), pMACHeader->abyAddr3, U_ETHER_ADDR_LEN);
- byTmp = (BYTE)(pMACHeader->wSeqCtl & 0xff);
- MIC_HDR2[6] = byTmp & 0x0f;
- MIC_HDR2[7] = 0;
- if ( bA4 ) {
- memcpy(&(MIC_HDR2[8]), pMACHeader->abyAddr4, U_ETHER_ADDR_LEN);
- } else {
- MIC_HDR2[8] = 0x00;
- MIC_HDR2[9] = 0x00;
- MIC_HDR2[10] = 0x00;
- MIC_HDR2[11] = 0x00;
- MIC_HDR2[12] = 0x00;
- MIC_HDR2[13] = 0x00;
- }
- MIC_HDR2[14] = 0x00;
- MIC_HDR2[15] = 0x00;
-
- //CCMP
- AESv128(pbyRxKey,MIC_IV,abyMIC);
- for ( kk=0; kk<16; kk++ ) {
- abyTmp[kk] = MIC_HDR1[kk] ^ abyMIC[kk];
- }
- AESv128(pbyRxKey,abyTmp,abyMIC);
- for ( kk=0; kk<16; kk++ ) {
- abyTmp[kk] = MIC_HDR2[kk] ^ abyMIC[kk];
- }
- AESv128(pbyRxKey,abyTmp,abyMIC);
-
- wCnt = 1;
- abyCTRPLD[0] = 0x01;
- memcpy(&(abyCTRPLD[1]), &(abyNonce[0]), 13);
-
- for(jj=wPayloadSize; jj>16; jj=jj-16) {
-
- abyCTRPLD[14] = (BYTE) (wCnt >> 8);
- abyCTRPLD[15] = (BYTE) (wCnt & 0xff);
-
- AESv128(pbyRxKey,abyCTRPLD,abyTmp);
-
- for ( kk=0; kk<16; kk++ ) {
- abyPlainText[kk] = abyTmp[kk] ^ pbyPayload[kk];
- }
- for ( kk=0; kk<16; kk++ ) {
- abyTmp[kk] = abyMIC[kk] ^ abyPlainText[kk];
- }
- AESv128(pbyRxKey,abyTmp,abyMIC);
-
- memcpy(pbyPayload, abyPlainText, 16);
- wCnt++;
- pbyPayload += 16;
- } //for wPayloadSize
-
- //last payload
- memcpy(&(abyLastCipher[0]), pbyPayload, jj);
- for ( ii=jj; ii<16; ii++ ) {
- abyLastCipher[ii] = 0x00;
- }
-
- abyCTRPLD[14] = (BYTE) (wCnt >> 8);
- abyCTRPLD[15] = (BYTE) (wCnt & 0xff);
-
- AESv128(pbyRxKey,abyCTRPLD,abyTmp);
- for ( kk=0; kk<16; kk++ ) {
- abyPlainText[kk] = abyTmp[kk] ^ abyLastCipher[kk];
- }
- memcpy(pbyPayload, abyPlainText, jj);
- pbyPayload += jj;
-
- //for MIC calculation
- for ( ii=jj; ii<16; ii++ ) {
- abyPlainText[ii] = 0x00;
- }
- for ( kk=0; kk<16; kk++ ) {
- abyTmp[kk] = abyMIC[kk] ^ abyPlainText[kk];
- }
- AESv128(pbyRxKey,abyTmp,abyMIC);
-
- //=>above is the calculate MIC
- //--------------------------------------------
-
- wCnt = 0;
- abyCTRPLD[14] = (BYTE) (wCnt >> 8);
- abyCTRPLD[15] = (BYTE) (wCnt & 0xff);
- AESv128(pbyRxKey,abyCTRPLD,abyTmp);
- for ( kk=0; kk<8; kk++ ) {
- abyTmp[kk] = abyTmp[kk] ^ pbyPayload[kk];
- }
- //=>above is the dec-MIC from packet
- //--------------------------------------------
-
- if ( !memcmp(abyMIC,abyTmp,8) ) {
- return TRUE;
- } else {
- return FALSE;
- }
-
+ BYTE abyNonce[13];
+ BYTE MIC_IV[16];
+ BYTE MIC_HDR1[16];
+ BYTE MIC_HDR2[16];
+ BYTE abyMIC[16];
+ BYTE abyCTRPLD[16];
+ BYTE abyTmp[16];
+ BYTE abyPlainText[16];
+ BYTE abyLastCipher[16];
+
+ PS802_11Header pMACHeader = (PS802_11Header) pbyFrame;
+ PBYTE pbyIV;
+ PBYTE pbyPayload;
+ WORD wHLen = 22;
+ /* 8 is IV, 8 is MIC, 4 is CRC */
+ WORD wPayloadSize = wFrameSize - 8 - 8 - 4 - WLAN_HDR_ADDR3_LEN;
+ BOOL bA4 = FALSE;
+ BYTE byTmp;
+ WORD wCnt;
+ int ii, jj, kk;
+
+ pbyIV = pbyFrame + WLAN_HDR_ADDR3_LEN;
+ if (WLAN_GET_FC_TODS(*(PWORD) pbyFrame) &&
+ WLAN_GET_FC_FROMDS(*(PWORD) pbyFrame)) {
+ bA4 = TRUE;
+ pbyIV += 6; /* 6 is 802.11 address4 */
+ wHLen += 6;
+ wPayloadSize -= 6;
+ }
+ pbyPayload = pbyIV + 8; /* IV-length */
+
+ abyNonce[0] = 0x00; /* now is 0, if Qos here will be priority */
+ memcpy(&(abyNonce[1]), pMACHeader->abyAddr2, ETH_ALEN);
+ abyNonce[7] = pbyIV[7];
+ abyNonce[8] = pbyIV[6];
+ abyNonce[9] = pbyIV[5];
+ abyNonce[10] = pbyIV[4];
+ abyNonce[11] = pbyIV[1];
+ abyNonce[12] = pbyIV[0];
+
+ /* MIC_IV */
+ MIC_IV[0] = 0x59;
+ memcpy(&(MIC_IV[1]), &(abyNonce[0]), 13);
+ MIC_IV[14] = (BYTE)(wPayloadSize >> 8);
+ MIC_IV[15] = (BYTE)(wPayloadSize & 0xff);
+
+ /* MIC_HDR1 */
+ MIC_HDR1[0] = (BYTE)(wHLen >> 8);
+ MIC_HDR1[1] = (BYTE)(wHLen & 0xff);
+ byTmp = (BYTE)(pMACHeader->wFrameCtl & 0xff);
+ MIC_HDR1[2] = byTmp & 0x8f;
+ byTmp = (BYTE)(pMACHeader->wFrameCtl >> 8);
+ byTmp &= 0x87;
+ MIC_HDR1[3] = byTmp | 0x40;
+ memcpy(&(MIC_HDR1[4]), pMACHeader->abyAddr1, ETH_ALEN);
+ memcpy(&(MIC_HDR1[10]), pMACHeader->abyAddr2, ETH_ALEN);
+
+ /* MIC_HDR2 */
+ memcpy(&(MIC_HDR2[0]), pMACHeader->abyAddr3, ETH_ALEN);
+ byTmp = (BYTE)(pMACHeader->wSeqCtl & 0xff);
+ MIC_HDR2[6] = byTmp & 0x0f;
+ MIC_HDR2[7] = 0;
+
+ if (bA4) {
+ memcpy(&(MIC_HDR2[8]), pMACHeader->abyAddr4, ETH_ALEN);
+ } else {
+ MIC_HDR2[8] = 0x00;
+ MIC_HDR2[9] = 0x00;
+ MIC_HDR2[10] = 0x00;
+ MIC_HDR2[11] = 0x00;
+ MIC_HDR2[12] = 0x00;
+ MIC_HDR2[13] = 0x00;
+ }
+ MIC_HDR2[14] = 0x00;
+ MIC_HDR2[15] = 0x00;
+
+ /* CCMP */
+ AESv128(pbyRxKey, MIC_IV, abyMIC);
+ for (kk = 0; kk < 16; kk++)
+ abyTmp[kk] = MIC_HDR1[kk] ^ abyMIC[kk];
+
+ AESv128(pbyRxKey, abyTmp, abyMIC);
+ for (kk = 0; kk < 16; kk++)
+ abyTmp[kk] = MIC_HDR2[kk] ^ abyMIC[kk];
+
+ AESv128(pbyRxKey, abyTmp, abyMIC);
+
+ wCnt = 1;
+ abyCTRPLD[0] = 0x01;
+ memcpy(&(abyCTRPLD[1]), &(abyNonce[0]), 13);
+
+ for (jj = wPayloadSize; jj > 16; jj = jj-16) {
+
+ abyCTRPLD[14] = (BYTE) (wCnt >> 8);
+ abyCTRPLD[15] = (BYTE) (wCnt & 0xff);
+
+ AESv128(pbyRxKey, abyCTRPLD, abyTmp);
+
+ for (kk = 0; kk < 16; kk++)
+ abyPlainText[kk] = abyTmp[kk] ^ pbyPayload[kk];
+
+ for (kk = 0; kk < 16; kk++)
+ abyTmp[kk] = abyMIC[kk] ^ abyPlainText[kk];
+
+ AESv128(pbyRxKey, abyTmp, abyMIC);
+
+ memcpy(pbyPayload, abyPlainText, 16);
+ wCnt++;
+ pbyPayload += 16;
+ } /* for wPayloadSize */
+
+ /* last payload */
+ memcpy(&(abyLastCipher[0]), pbyPayload, jj);
+ for (ii = jj; ii < 16; ii++)
+ abyLastCipher[ii] = 0x00;
+
+ abyCTRPLD[14] = (BYTE) (wCnt >> 8);
+ abyCTRPLD[15] = (BYTE) (wCnt & 0xff);
+
+ AESv128(pbyRxKey, abyCTRPLD, abyTmp);
+ for (kk = 0; kk < 16; kk++)
+ abyPlainText[kk] = abyTmp[kk] ^ abyLastCipher[kk];
+
+ memcpy(pbyPayload, abyPlainText, jj);
+ pbyPayload += jj;
+
+ /* for MIC calculation */
+ for (ii = jj; ii < 16; ii++)
+ abyPlainText[ii] = 0x00;
+ for (kk = 0; kk < 16; kk++)
+ abyTmp[kk] = abyMIC[kk] ^ abyPlainText[kk];
+
+ AESv128(pbyRxKey, abyTmp, abyMIC);
+
+ /* => above is the calculated MIC */
+
+ wCnt = 0;
+ abyCTRPLD[14] = (BYTE) (wCnt >> 8);
+ abyCTRPLD[15] = (BYTE) (wCnt & 0xff);
+ AESv128(pbyRxKey, abyCTRPLD, abyTmp);
+
+ for (kk = 0; kk < 8; kk++)
+ abyTmp[kk] = abyTmp[kk] ^ pbyPayload[kk];
+
+ /* => above is the packet dec-MIC */
+
+ if (!memcmp(abyMIC, abyTmp, 8))
+ return TRUE;
+ else
+ return FALSE;
}
diff --git a/drivers/staging/vt6656/aes_ccmp.h b/drivers/staging/vt6656/aes_ccmp.h
index f2ba1d5aa1e..353bd210a50 100644
--- a/drivers/staging/vt6656/aes_ccmp.h
+++ b/drivers/staging/vt6656/aes_ccmp.h
@@ -43,4 +43,4 @@
/*--------------------- Export Functions --------------------------*/
BOOL AESbGenCCMP(PBYTE pbyRxKey, PBYTE pbyFrame, WORD wFrameSize);
-#endif //__AES_H__
+#endif /* __AES_CCMP_H__ */
diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
index 7dc01dbfc6f..d3de94f36c6 100644
--- a/drivers/staging/vt6656/baseband.c
+++ b/drivers/staging/vt6656/baseband.c
@@ -662,11 +662,11 @@ const WORD awcFrameTime[MAX_RATE] =
/*
static
-ULONG
+unsigned long
s_ulGetLowSQ3(PSDevice pDevice);
static
-ULONG
+unsigned long
s_ulGetRatio(PSDevice pDevice);
static
@@ -689,19 +689,19 @@ s_vClearSQ3Value(PSDevice pDevice);
* Return Value: FrameTime
*
*/
-UINT
+unsigned int
BBuGetFrameTime (
- IN BYTE byPreambleType,
- IN BYTE byPktType,
- IN UINT cbFrameLength,
- IN WORD wRate
+ BYTE byPreambleType,
+ BYTE byPktType,
+ unsigned int cbFrameLength,
+ WORD wRate
)
{
- UINT uFrameTime;
- UINT uPreamble;
- UINT uTmp;
- UINT uRateIdx = (UINT)wRate;
- UINT uRate = 0;
+ unsigned int uFrameTime;
+ unsigned int uPreamble;
+ unsigned int uTmp;
+ unsigned int uRateIdx = (unsigned int)wRate;
+ unsigned int uRate = 0;
if (uRateIdx > RATE_54M) {
@@ -709,7 +709,7 @@ BBuGetFrameTime (
return 0;
}
- uRate = (UINT)awcFrameTime[uRateIdx];
+ uRate = (unsigned int)awcFrameTime[uRateIdx];
if (uRateIdx <= 3) { //CCK mode
@@ -756,20 +756,20 @@ BBuGetFrameTime (
* Return Value: none
*
*/
-VOID
+void
BBvCaculateParameter (
- IN PSDevice pDevice,
- IN UINT cbFrameLength,
- IN WORD wRate,
- IN BYTE byPacketType,
- OUT PWORD pwPhyLen,
- OUT PBYTE pbyPhySrv,
- OUT PBYTE pbyPhySgn
+ PSDevice pDevice,
+ unsigned int cbFrameLength,
+ WORD wRate,
+ BYTE byPacketType,
+ PWORD pwPhyLen,
+ PBYTE pbyPhySrv,
+ PBYTE pbyPhySgn
)
{
- UINT cbBitCount;
- UINT cbUsCount = 0;
- UINT cbTmp;
+ unsigned int cbBitCount;
+ unsigned int cbUsCount = 0;
+ unsigned int cbTmp;
BOOL bExtBit;
BYTE byPreambleType = pDevice->byPreambleType;
BOOL bCCK = pDevice->bCCK;
@@ -929,7 +929,7 @@ BBvCaculateParameter (
* Return Value: none
*
*/
-VOID
+void
BBvSetAntennaMode (PSDevice pDevice, BYTE byAntennaMode)
{
//{{ RobertYu: 20041124, ABG Mode, VC1/VC2 define, make the ANT_A, ANT_B inverted
@@ -1274,7 +1274,7 @@ void BBvLoopbackOff (PSDevice pDevice)
* Return Value: none
*
*/
-VOID
+void
BBvSetShortSlotTime (PSDevice pDevice)
{
BYTE byBBVGA=0;
@@ -1295,7 +1295,7 @@ BBvSetShortSlotTime (PSDevice pDevice)
}
-VOID BBvSetVGAGainOffset(PSDevice pDevice, BYTE byData)
+void BBvSetVGAGainOffset(PSDevice pDevice, BYTE byData)
{
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xE7, byData);
@@ -1324,7 +1324,7 @@ VOID BBvSetVGAGainOffset(PSDevice pDevice, BYTE byData)
* Return Value: none
*
*/
-VOID
+void
BBvSoftwareReset (PSDevice pDevice)
{
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x50, 0x40);
@@ -1345,14 +1345,14 @@ BBvSoftwareReset (PSDevice pDevice)
* Return Value: none
*
*/
-VOID
+void
BBvSetDeepSleep (PSDevice pDevice)
{
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x0c, 0x17);//CR12
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x0D, 0xB9);//CR13
}
-VOID
+void
BBvExitDeepSleep (PSDevice pDevice)
{
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x0C, 0x00);//CR12
@@ -1360,13 +1360,11 @@ BBvExitDeepSleep (PSDevice pDevice)
}
-static
-ULONG
-s_ulGetLowSQ3(PSDevice pDevice)
+static unsigned long s_ulGetLowSQ3(PSDevice pDevice)
{
-int ii;
-ULONG ulSQ3 = 0;
-ULONG ulMaxPacket;
+ int ii;
+ unsigned long ulSQ3 = 0;
+ unsigned long ulMaxPacket;
ulMaxPacket = pDevice->aulPktNum[RATE_54M];
if ( pDevice->aulPktNum[RATE_54M] != 0 ) {
@@ -1382,16 +1380,12 @@ ULONG ulMaxPacket;
return ulSQ3;
}
-
-
-static
-ULONG
-s_ulGetRatio (PSDevice pDevice)
+static unsigned long s_ulGetRatio(PSDevice pDevice)
{
-int ii,jj;
-ULONG ulRatio = 0;
-ULONG ulMaxPacket;
-ULONG ulPacketNum;
+ int ii, jj;
+ unsigned long ulRatio = 0;
+ unsigned long ulMaxPacket;
+ unsigned long ulPacketNum;
//This is a thousand-ratio
ulMaxPacket = pDevice->aulPktNum[RATE_54M];
@@ -1445,7 +1439,7 @@ s_vClearSQ3Value (PSDevice pDevice)
*
*/
-VOID
+void
BBvAntennaDiversity (PSDevice pDevice, BYTE byRxRate, BYTE bySQ3)
{
@@ -1513,7 +1507,9 @@ BBvAntennaDiversity (PSDevice pDevice, BYTE byRxRate, BYTE bySQ3)
if ( pDevice->byTMax == 0 )
return;
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_CHANGE_ANTENNA, NULL);
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_CHANGE_ANTENNA,
+ NULL);
pDevice->byAntennaState = 1;
@@ -1543,7 +1539,9 @@ BBvAntennaDiversity (PSDevice pDevice, BYTE byRxRate, BYTE bySQ3)
((pDevice->ulSQ3_State1 != 0) && (pDevice->ulSQ3_State0 != 0) && (pDevice->ulSQ3_State0 < pDevice->ulSQ3_State1))
) {
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_CHANGE_ANTENNA, NULL);
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_CHANGE_ANTENNA,
+ NULL);
pDevice->TimerSQ3Tmax3.expires = RUN_AT(pDevice->byTMax3 * HZ);
pDevice->TimerSQ3Tmax2.expires = RUN_AT(pDevice->byTMax2 * HZ);
@@ -1576,17 +1574,14 @@ BBvAntennaDiversity (PSDevice pDevice, BYTE byRxRate, BYTE bySQ3)
*
-*/
-VOID
-TimerSQ3CallBack (
- IN HANDLE hDeviceContext
- )
+void TimerSQ3CallBack(void *hDeviceContext)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"TimerSQ3CallBack...");
spin_lock_irq(&pDevice->lock);
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_CHANGE_ANTENNA, NULL);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_CHANGE_ANTENNA, NULL);
pDevice->byAntennaState = 0;
s_vClearSQ3Value(pDevice);
pDevice->TimerSQ3Tmax3.expires = RUN_AT(pDevice->byTMax3 * HZ);
@@ -1618,10 +1613,7 @@ TimerSQ3CallBack (
*
-*/
-VOID
-TimerSQ3Tmax3CallBack (
- IN HANDLE hDeviceContext
- )
+void TimerSQ3Tmax3CallBack(void *hDeviceContext)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -1639,7 +1631,7 @@ TimerSQ3Tmax3CallBack (
return;
}
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_CHANGE_ANTENNA, NULL);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_CHANGE_ANTENNA, NULL);
pDevice->byAntennaState = 1;
del_timer(&pDevice->TimerSQ3Tmax3);
del_timer(&pDevice->TimerSQ3Tmax2);
@@ -1650,10 +1642,10 @@ TimerSQ3Tmax3CallBack (
return;
}
-VOID
+void
BBvUpdatePreEDThreshold(
- IN PSDevice pDevice,
- IN BOOL bScanning)
+ PSDevice pDevice,
+ BOOL bScanning)
{
diff --git a/drivers/staging/vt6656/baseband.h b/drivers/staging/vt6656/baseband.h
index e991a7e68d4..bc4633d5fea 100644
--- a/drivers/staging/vt6656/baseband.h
+++ b/drivers/staging/vt6656/baseband.h
@@ -96,51 +96,44 @@
/*--------------------- Export Functions --------------------------*/
-UINT
+unsigned int
BBuGetFrameTime(
- IN BYTE byPreambleType,
- IN BYTE byFreqType,
- IN UINT cbFrameLength,
- IN WORD wRate
+ BYTE byPreambleType,
+ BYTE byFreqType,
+ unsigned int cbFrameLength,
+ WORD wRate
);
-VOID
+void
BBvCaculateParameter (
- IN PSDevice pDevice,
- IN UINT cbFrameLength,
- IN WORD wRate,
- IN BYTE byPacketType,
- OUT PWORD pwPhyLen,
- OUT PBYTE pbyPhySrv,
- OUT PBYTE pbyPhySgn
+ PSDevice pDevice,
+ unsigned int cbFrameLength,
+ WORD wRate,
+ BYTE byPacketType,
+ PWORD pwPhyLen,
+ PBYTE pbyPhySrv,
+ PBYTE pbyPhySgn
);
// timer for antenna diversity
-VOID
-TimerSQ3CallBack (
- IN HANDLE hDeviceContext
- );
-
-VOID
-TimerSQ3Tmax3CallBack (
- IN HANDLE hDeviceContext
- );
+void TimerSQ3CallBack(void *hDeviceContext);
+void TimerSQ3Tmax3CallBack(void *hDeviceContext);
-VOID BBvAntennaDiversity (PSDevice pDevice, BYTE byRxRate, BYTE bySQ3);
-void BBvLoopbackOn (PSDevice pDevice);
-void BBvLoopbackOff (PSDevice pDevice);
-void BBvSoftwareReset (PSDevice pDevice);
+void BBvAntennaDiversity(PSDevice pDevice, BYTE byRxRate, BYTE bySQ3);
+void BBvLoopbackOn(PSDevice pDevice);
+void BBvLoopbackOff(PSDevice pDevice);
+void BBvSoftwareReset(PSDevice pDevice);
void BBvSetShortSlotTime(PSDevice pDevice);
-VOID BBvSetVGAGainOffset(PSDevice pDevice, BYTE byData);
+void BBvSetVGAGainOffset(PSDevice pDevice, BYTE byData);
void BBvSetAntennaMode(PSDevice pDevice, BYTE byAntennaMode);
BOOL BBbVT3184Init (PSDevice pDevice);
-VOID BBvSetDeepSleep (PSDevice pDevice);
-VOID BBvExitDeepSleep (PSDevice pDevice);
-VOID BBvUpdatePreEDThreshold(
- IN PSDevice pDevice,
- IN BOOL bScanning
+void BBvSetDeepSleep(PSDevice pDevice);
+void BBvExitDeepSleep(PSDevice pDevice);
+void BBvUpdatePreEDThreshold(
+ PSDevice pDevice,
+ BOOL bScanning
);
-#endif // __BASEBAND_H__
+#endif /* __BASEBAND_H__ */
diff --git a/drivers/staging/vt6656/bssdb.c b/drivers/staging/vt6656/bssdb.c
index 6b1678bfd61..36ed61b595c 100644
--- a/drivers/staging/vt6656/bssdb.c
+++ b/drivers/staging/vt6656/bssdb.c
@@ -91,19 +91,13 @@ const WORD awHWRetry1[5][5] = {
/*--------------------- Static Functions --------------------------*/
-VOID s_vCheckSensitivity(
- IN HANDLE hDeviceContext
- );
-
-VOID s_vCheckPreEDThreshold(
- IN HANDLE hDeviceContext
- );
+void s_vCheckSensitivity(void *hDeviceContext);
+void s_vCheckPreEDThreshold(void *hDeviceContext);
#ifdef Calcu_LinkQual
-VOID s_uCalculateLinkQual(
- IN HANDLE hDeviceContext
- );
+void s_uCalculateLinkQual(void *hDeviceContext);
#endif
+
/*--------------------- Export Variables --------------------------*/
@@ -123,13 +117,10 @@ VOID s_uCalculateLinkQual(
*
-*/
-PKnownBSS
-BSSpSearchBSSList(
- IN HANDLE hDeviceContext,
- IN PBYTE pbyDesireBSSID,
- IN PBYTE pbyDesireSSID,
- IN CARD_PHY_TYPE ePhyType
- )
+PKnownBSS BSSpSearchBSSList(void *hDeviceContext,
+ PBYTE pbyDesireBSSID,
+ PBYTE pbyDesireSSID,
+ CARD_PHY_TYPE ePhyType)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
@@ -138,8 +129,8 @@ BSSpSearchBSSList(
PKnownBSS pCurrBSS = NULL;
PKnownBSS pSelect = NULL;
BYTE ZeroBSSID[WLAN_BSSID_LEN]={0x00,0x00,0x00,0x00,0x00,0x00};
- UINT ii = 0;
- UINT jj = 0; //DavidWang
+ unsigned int ii = 0;
+ unsigned int jj = 0;
if (pbyDesireBSSID != NULL) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BSSpSearchBSSList BSSID[%02X %02X %02X-%02X %02X %02X]\n",
*pbyDesireBSSID,*(pbyDesireBSSID+1),*(pbyDesireBSSID+2),
@@ -296,15 +287,11 @@ pDevice->bSameBSSMaxNum = jj;
-*/
-VOID
-BSSvClearBSSList(
- IN HANDLE hDeviceContext,
- IN BOOL bKeepCurrBSSID
- )
+void BSSvClearBSSList(void *hDeviceContext, BOOL bKeepCurrBSSID)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- UINT ii;
+ unsigned int ii;
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
if (bKeepCurrBSSID) {
@@ -342,17 +329,14 @@ BSSvClearBSSList(
* TRUE if found.
*
-*/
-PKnownBSS
-BSSpAddrIsInBSSList(
- IN HANDLE hDeviceContext,
- IN PBYTE abyBSSID,
- IN PWLAN_IE_SSID pSSID
- )
+PKnownBSS BSSpAddrIsInBSSList(void *hDeviceContext,
+ PBYTE abyBSSID,
+ PWLAN_IE_SSID pSSID)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
PKnownBSS pBSSList = NULL;
- UINT ii;
+ unsigned int ii;
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
pBSSList = &(pMgmt->sBSSList[ii]);
@@ -383,33 +367,30 @@ BSSpAddrIsInBSSList(
*
-*/
-BOOL
-BSSbInsertToBSSList (
- IN HANDLE hDeviceContext,
- IN PBYTE abyBSSIDAddr,
- IN QWORD qwTimestamp,
- IN WORD wBeaconInterval,
- IN WORD wCapInfo,
- IN BYTE byCurrChannel,
- IN PWLAN_IE_SSID pSSID,
- IN PWLAN_IE_SUPP_RATES pSuppRates,
- IN PWLAN_IE_SUPP_RATES pExtSuppRates,
- IN PERPObject psERP,
- IN PWLAN_IE_RSN pRSN,
- IN PWLAN_IE_RSN_EXT pRSNWPA,
- IN PWLAN_IE_COUNTRY pIE_Country,
- IN PWLAN_IE_QUIET pIE_Quiet,
- IN UINT uIELength,
- IN PBYTE pbyIEs,
- IN HANDLE pRxPacketContext
- )
+BOOL BSSbInsertToBSSList(void *hDeviceContext,
+ PBYTE abyBSSIDAddr,
+ QWORD qwTimestamp,
+ WORD wBeaconInterval,
+ WORD wCapInfo,
+ BYTE byCurrChannel,
+ PWLAN_IE_SSID pSSID,
+ PWLAN_IE_SUPP_RATES pSuppRates,
+ PWLAN_IE_SUPP_RATES pExtSuppRates,
+ PERPObject psERP,
+ PWLAN_IE_RSN pRSN,
+ PWLAN_IE_RSN_EXT pRSNWPA,
+ PWLAN_IE_COUNTRY pIE_Country,
+ PWLAN_IE_QUIET pIE_Quiet,
+ unsigned int uIELength,
+ PBYTE pbyIEs,
+ void *pRxPacketContext)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
PSRxMgmtPacket pRxPacket = (PSRxMgmtPacket)pRxPacketContext;
PKnownBSS pBSSList = NULL;
- UINT ii;
+ unsigned int ii;
BOOL bParsingQuiet = FALSE;
@@ -484,24 +465,27 @@ BSSbInsertToBSSList (
WPA_ClearRSN(pBSSList);
if (pRSNWPA != NULL) {
- UINT uLen = pRSNWPA->len + 2;
-
- if (uLen <= (uIELength - (UINT)(ULONG_PTR)((PBYTE)pRSNWPA - pbyIEs))) {
- pBSSList->wWPALen = uLen;
- memcpy(pBSSList->byWPAIE, pRSNWPA, uLen);
- WPA_ParseRSN(pBSSList, pRSNWPA);
- }
+ unsigned int uLen = pRSNWPA->len + 2;
+
+ if (uLen <= (uIELength -
+ (unsigned int) (ULONG_PTR) ((PBYTE) pRSNWPA - pbyIEs))) {
+ pBSSList->wWPALen = uLen;
+ memcpy(pBSSList->byWPAIE, pRSNWPA, uLen);
+ WPA_ParseRSN(pBSSList, pRSNWPA);
+ }
}
WPA2_ClearRSN(pBSSList);
if (pRSN != NULL) {
- UINT uLen = pRSN->len + 2;
- if (uLen <= (uIELength - (UINT)(ULONG_PTR)((PBYTE)pRSN - pbyIEs))) {
- pBSSList->wRSNLen = uLen;
- memcpy(pBSSList->byRSNIE, pRSN, uLen);
- WPA2vParseRSN(pBSSList, pRSN);
- }
+ unsigned int uLen = pRSN->len + 2;
+
+ if (uLen <= (uIELength -
+ (unsigned int) (ULONG_PTR) ((PBYTE) pRSN - pbyIEs))) {
+ pBSSList->wRSNLen = uLen;
+ memcpy(pBSSList->byRSNIE, pRSN, uLen);
+ WPA2vParseRSN(pBSSList, pRSN);
+ }
}
if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) || (pBSSList->bWPA2Valid == TRUE)) {
@@ -518,7 +502,9 @@ BSSbInsertToBSSList (
if ((bIs802_1x == TRUE) && (pSSID->len == ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len) &&
( !memcmp(pSSID->abySSID, ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->abySSID, pSSID->len))) {
- bAdd_PMKID_Candidate((HANDLE)pDevice, pBSSList->abyBSSID, &pBSSList->sRSNCapObj);
+ bAdd_PMKID_Candidate((void *) pDevice,
+ pBSSList->abyBSSID,
+ &pBSSList->sRSNCapObj);
if ((pDevice->bLinkPass == TRUE) && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
if ((KeybGetTransmitKey(&(pDevice->sKey), pDevice->abyBSSID, PAIRWISE_KEY, &pTransmitKey) == TRUE) ||
@@ -602,33 +588,30 @@ BSSbInsertToBSSList (
-*/
// TODO: input structure modify
-BOOL
-BSSbUpdateToBSSList (
- IN HANDLE hDeviceContext,
- IN QWORD qwTimestamp,
- IN WORD wBeaconInterval,
- IN WORD wCapInfo,
- IN BYTE byCurrChannel,
- IN BOOL bChannelHit,
- IN PWLAN_IE_SSID pSSID,
- IN PWLAN_IE_SUPP_RATES pSuppRates,
- IN PWLAN_IE_SUPP_RATES pExtSuppRates,
- IN PERPObject psERP,
- IN PWLAN_IE_RSN pRSN,
- IN PWLAN_IE_RSN_EXT pRSNWPA,
- IN PWLAN_IE_COUNTRY pIE_Country,
- IN PWLAN_IE_QUIET pIE_Quiet,
- IN PKnownBSS pBSSList,
- IN UINT uIELength,
- IN PBYTE pbyIEs,
- IN HANDLE pRxPacketContext
- )
+BOOL BSSbUpdateToBSSList(void *hDeviceContext,
+ QWORD qwTimestamp,
+ WORD wBeaconInterval,
+ WORD wCapInfo,
+ BYTE byCurrChannel,
+ BOOL bChannelHit,
+ PWLAN_IE_SSID pSSID,
+ PWLAN_IE_SUPP_RATES pSuppRates,
+ PWLAN_IE_SUPP_RATES pExtSuppRates,
+ PERPObject psERP,
+ PWLAN_IE_RSN pRSN,
+ PWLAN_IE_RSN_EXT pRSNWPA,
+ PWLAN_IE_COUNTRY pIE_Country,
+ PWLAN_IE_QUIET pIE_Quiet,
+ PKnownBSS pBSSList,
+ unsigned int uIELength,
+ PBYTE pbyIEs,
+ void *pRxPacketContext)
{
int ii, jj;
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
PSRxMgmtPacket pRxPacket = (PSRxMgmtPacket)pRxPacketContext;
- LONG ldBm, ldBmSum;
+ signed long ldBm, ldBmSum;
BOOL bParsingQuiet = FALSE;
// BYTE abyTmpSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
@@ -687,24 +670,26 @@ BSSbUpdateToBSSList (
WPA_ClearRSN(pBSSList); //mike update
- if (pRSNWPA != NULL) {
- UINT uLen = pRSNWPA->len + 2;
- if (uLen <= (uIELength - (UINT)(ULONG_PTR)((PBYTE)pRSNWPA - pbyIEs))) {
- pBSSList->wWPALen = uLen;
- memcpy(pBSSList->byWPAIE, pRSNWPA, uLen);
- WPA_ParseRSN(pBSSList, pRSNWPA);
- }
- }
+ if (pRSNWPA != NULL) {
+ unsigned int uLen = pRSNWPA->len + 2;
+ if (uLen <= (uIELength -
+ (unsigned int) (ULONG_PTR) ((PBYTE) pRSNWPA - pbyIEs))) {
+ pBSSList->wWPALen = uLen;
+ memcpy(pBSSList->byWPAIE, pRSNWPA, uLen);
+ WPA_ParseRSN(pBSSList, pRSNWPA);
+ }
+ }
WPA2_ClearRSN(pBSSList); //mike update
if (pRSN != NULL) {
- UINT uLen = pRSN->len + 2;
- if (uLen <= (uIELength - (UINT)(ULONG_PTR)((PBYTE)pRSN - pbyIEs))) {
- pBSSList->wRSNLen = uLen;
- memcpy(pBSSList->byRSNIE, pRSN, uLen);
- WPA2vParseRSN(pBSSList, pRSN);
- }
+ unsigned int uLen = pRSN->len + 2;
+ if (uLen <= (uIELength -
+ (unsigned int) (ULONG_PTR) ((PBYTE) pRSN - pbyIEs))) {
+ pBSSList->wRSNLen = uLen;
+ memcpy(pBSSList->byRSNIE, pRSN, uLen);
+ WPA2vParseRSN(pBSSList, pRSN);
+ }
}
if (pRxPacket->uRSSI != 0) {
@@ -768,16 +753,13 @@ BSSbUpdateToBSSList (
*
-*/
-BOOL
-BSSbIsSTAInNodeDB(
- IN HANDLE hDeviceContext,
- IN PBYTE abyDstAddr,
- OUT PUINT puNodeIndex
- )
+BOOL BSSbIsSTAInNodeDB(void *hDeviceContext,
+ PBYTE abyDstAddr,
+ PUINT puNodeIndex)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- UINT ii;
+ unsigned int ii;
// Index = 0 reserved for AP Node
for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) {
@@ -804,18 +786,14 @@ BSSbIsSTAInNodeDB(
* None
*
-*/
-VOID
-BSSvCreateOneNode(
- IN HANDLE hDeviceContext,
- OUT PUINT puNodeIndex
- )
+void BSSvCreateOneNode(void *hDeviceContext, PUINT puNodeIndex)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- UINT ii;
- UINT BigestCount = 0;
- UINT SelectIndex;
+ unsigned int ii;
+ unsigned int BigestCount = 0;
+ unsigned int SelectIndex;
struct sk_buff *skb;
// Index = 0 reserved for AP Node (In STA mode)
// Index = 0 reserved for Broadcast/MultiCast (In AP mode)
@@ -869,11 +847,8 @@ BSSvCreateOneNode(
* None
*
-*/
-VOID
-BSSvRemoveOneNode(
- IN HANDLE hDeviceContext,
- IN UINT uNodeIndex
- )
+
+void BSSvRemoveOneNode(void *hDeviceContext, unsigned int uNodeIndex)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -902,17 +877,14 @@ BSSvRemoveOneNode(
*
-*/
-VOID
-BSSvUpdateAPNode(
- IN HANDLE hDeviceContext,
- IN PWORD pwCapInfo,
- IN PWLAN_IE_SUPP_RATES pSuppRates,
- IN PWLAN_IE_SUPP_RATES pExtSuppRates
- )
+void BSSvUpdateAPNode(void *hDeviceContext,
+ PWORD pwCapInfo,
+ PWLAN_IE_SUPP_RATES pSuppRates,
+ PWLAN_IE_SUPP_RATES pExtSuppRates)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- UINT uRateLen = WLAN_RATES_MAXLEN;
+ unsigned int uRateLen = WLAN_RATES_MAXLEN;
memset(&pMgmt->sNodeDBTable[0], 0, sizeof(KnownNodeDB));
@@ -926,7 +898,7 @@ BSSvUpdateAPNode(
pMgmt->abyCurrExtSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)pExtSuppRates,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
uRateLen);
- RATEvParseMaxRate((PVOID) pDevice,
+ RATEvParseMaxRate((void *) pDevice,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
TRUE,
@@ -946,10 +918,6 @@ BSSvUpdateAPNode(
};
-
-
-
-
/*+
*
* Routine Description:
@@ -961,11 +929,7 @@ BSSvUpdateAPNode(
*
-*/
-
-VOID
-BSSvAddMulticastNode(
- IN HANDLE hDeviceContext
- )
+void BSSvAddMulticastNode(void *hDeviceContext)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
@@ -976,7 +940,7 @@ BSSvAddMulticastNode(
pMgmt->sNodeDBTable[0].bActive = TRUE;
pMgmt->sNodeDBTable[0].bPSEnable = FALSE;
skb_queue_head_init(&pMgmt->sNodeDBTable[0].sTxPSQueue);
- RATEvParseMaxRate((PVOID) pDevice,
+ RATEvParseMaxRate((void *) pDevice,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
TRUE,
@@ -991,10 +955,6 @@ BSSvAddMulticastNode(
};
-
-
-
-
/*+
*
* Routine Description:
@@ -1008,19 +968,15 @@ BSSvAddMulticastNode(
*
-*/
-
-VOID
-BSSvSecondCallBack(
- IN HANDLE hDeviceContext
- )
+void BSSvSecondCallBack(void *hDeviceContext)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- UINT ii;
+ unsigned int ii;
PWLAN_IE_SSID pItemSSID, pCurrSSID;
- UINT uSleepySTACnt = 0;
- UINT uNonShortSlotSTACnt = 0;
- UINT uLongPreambleSTACnt = 0;
+ unsigned int uSleepySTACnt = 0;
+ unsigned int uNonShortSlotSTACnt = 0;
+ unsigned int uLongPreambleSTACnt = 0;
viawget_wpa_header *wpahdr; //DavidWang
spin_lock_irq(&pDevice->lock);
@@ -1080,7 +1036,7 @@ if((pMgmt->eCurrState!=WMAC_STATE_ASSOC) &&
#endif
#ifdef Calcu_LinkQual
- s_uCalculateLinkQual((HANDLE)pDevice);
+ s_uCalculateLinkQual((void *)pDevice);
#endif
for (ii = 0; ii < (MAX_NODE_NUM + 1); ii++) {
@@ -1131,12 +1087,14 @@ if((pMgmt->eCurrState!=WMAC_STATE_ASSOC) &&
*/
if (ii > 0) {
// ii = 0 for multicast node (AP & Adhoc)
- RATEvTxRateFallBack((PVOID)pDevice, &(pMgmt->sNodeDBTable[ii]));
+ RATEvTxRateFallBack((void *)pDevice,
+ &(pMgmt->sNodeDBTable[ii]));
}
else {
// ii = 0 reserved for unicast AP node (Infra STA)
- if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA)
- RATEvTxRateFallBack((PVOID)pDevice, &(pMgmt->sNodeDBTable[ii]));
+ if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA)
+ RATEvTxRateFallBack((void *)pDevice,
+ &(pMgmt->sNodeDBTable[ii]));
}
}
@@ -1177,14 +1135,14 @@ if((pMgmt->eCurrState!=WMAC_STATE_ASSOC) &&
if (pDevice->bShortSlotTime) {
pDevice->bShortSlotTime = FALSE;
BBvSetShortSlotTime(pDevice);
- vUpdateIFS((PVOID)pDevice);
+ vUpdateIFS((void *)pDevice);
}
}
else {
if (!pDevice->bShortSlotTime) {
pDevice->bShortSlotTime = TRUE;
BBvSetShortSlotTime(pDevice);
- vUpdateIFS((PVOID)pDevice);
+ vUpdateIFS((void *)pDevice);
}
}
@@ -1224,14 +1182,16 @@ if((pMgmt->eCurrState!=WMAC_STATE_ASSOC) &&
// DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "Callback inactive Count = [%d]\n", pMgmt->sNodeDBTable[0].uInActiveCount);
if (pDevice->bUpdateBBVGA) {
- // s_vCheckSensitivity((HANDLE) pDevice);
- s_vCheckPreEDThreshold((HANDLE)pDevice);
+ /* s_vCheckSensitivity((void *) pDevice); */
+ s_vCheckPreEDThreshold((void *) pDevice);
}
if ((pMgmt->sNodeDBTable[0].uInActiveCount >= (LOST_BEACON_COUNT/2)) &&
(pDevice->byBBVGACurrent != pDevice->abyBBVGA[0]) ) {
pDevice->byBBVGANew = pDevice->abyBBVGA[0];
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_CHANGE_BBSENSITIVITY, NULL);
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_CHANGE_BBSENSITIVITY,
+ NULL);
}
if (pMgmt->sNodeDBTable[0].uInActiveCount >= LOST_BEACON_COUNT) {
@@ -1279,9 +1239,13 @@ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "bRoaming %d, !\n", pDevice->bRoaming );
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "bIsRoaming %d, !\n", pDevice->bIsRoaming );
if ((pDevice->bRoaming == TRUE)&&(pDevice->bIsRoaming == TRUE)){
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Fast Roaming ...\n");
- BSSvClearBSSList((HANDLE)pDevice, pDevice->bLinkPass);
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_SSID, pMgmt->abyDesireSSID);
+ BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass);
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_BSSID_SCAN,
+ pMgmt->abyDesireSSID);
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_SSID,
+ pMgmt->abyDesireSSID);
pDevice->uAutoReConnectTime = 0;
pDevice->uIsroamingTime = 0;
pDevice->bRoaming = FALSE;
@@ -1324,10 +1288,14 @@ else {
pDevice->eEncryptionStatus = pDevice->eOldEncryptionStatus;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Roaming ...\n");
- BSSvClearBSSList((HANDLE)pDevice, pDevice->bLinkPass);
- pMgmt->eScanType = WMAC_SCAN_ACTIVE;
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_SSID, pMgmt->abyDesireSSID);
+ BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass);
+ pMgmt->eScanType = WMAC_SCAN_ACTIVE;
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_BSSID_SCAN,
+ pMgmt->abyDesireSSID);
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_SSID,
+ pMgmt->abyDesireSSID);
pDevice->uAutoReConnectTime = 0;
}
}
@@ -1343,17 +1311,17 @@ else {
else {
DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Adhoc re-scaning ...\n");
pMgmt->eScanType = WMAC_SCAN_ACTIVE;
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_SSID, NULL);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL);
pDevice->uAutoReConnectTime = 0;
};
}
if (pMgmt->eCurrState == WMAC_STATE_JOINTED) {
- if (pDevice->bUpdateBBVGA) {
- //s_vCheckSensitivity((HANDLE) pDevice);
- s_vCheckPreEDThreshold((HANDLE)pDevice);
- }
+ if (pDevice->bUpdateBBVGA) {
+ /* s_vCheckSensitivity((void *) pDevice); */
+ s_vCheckPreEDThreshold((void *) pDevice);
+ }
if (pMgmt->sNodeDBTable[0].uInActiveCount >=ADHOC_LOST_BEACON_COUNT) {
DBG_PRT(MSG_LEVEL_NOTICE, KERN_INFO "Lost other STA beacon [%d] sec, started !\n", pMgmt->sNodeDBTable[0].uInActiveCount);
pMgmt->sNodeDBTable[0].uInActiveCount = 0;
@@ -1377,9 +1345,6 @@ else {
return;
}
-
-
-
/*+
*
* Routine Description:
@@ -1393,30 +1358,23 @@ else {
*
-*/
-
-
-VOID
-BSSvUpdateNodeTxCounter(
- IN HANDLE hDeviceContext,
- IN PSStatCounter pStatistic,
- IN BYTE byTSR,
- IN BYTE byPktNO
- )
+void BSSvUpdateNodeTxCounter(void *hDeviceContext,
+ PSStatCounter pStatistic,
+ BYTE byTSR,
+ BYTE byPktNO)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- UINT uNodeIndex = 0;
+ unsigned int uNodeIndex = 0;
BYTE byTxRetry;
WORD wRate;
WORD wFallBackRate = RATE_1M;
BYTE byFallBack;
- UINT ii;
+ unsigned int ii;
PBYTE pbyDestAddr;
BYTE byPktNum;
WORD wFIFOCtl;
-
-
byPktNum = (byPktNO & 0x0F) >> 4;
byTxRetry = (byTSR & 0xF0) >> 4;
wRate = (WORD) (byPktNO & 0xF0) >> 4;
@@ -1483,11 +1441,13 @@ BSSvUpdateNodeTxCounter(
}
};
- if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) ||
+ if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) ||
(pMgmt->eCurrMode == WMAC_MODE_ESS_AP)) {
- if (BSSbIsSTAInNodeDB((HANDLE)pDevice, pbyDestAddr, &uNodeIndex)){
- pMgmt->sNodeDBTable[uNodeIndex].uTxAttempts += 1;
+ if (BSSbIsSTAInNodeDB((void *) pDevice,
+ pbyDestAddr,
+ &uNodeIndex)) {
+ pMgmt->sNodeDBTable[uNodeIndex].uTxAttempts += 1;
if ( !(byTSR & (TSR_TMO | TSR_RETRYTMO))) {
// transmit success, TxAttempts at least plus one
pMgmt->sNodeDBTable[uNodeIndex].uTxOk[MAX_RATE]++;
@@ -1542,9 +1502,6 @@ BSSvUpdateNodeTxCounter(
}
-
-
-
/*+
*
* Routine Description:
@@ -1563,18 +1520,13 @@ BSSvUpdateNodeTxCounter(
*
-*/
-
-VOID
-BSSvClearNodeDBTable(
- IN HANDLE hDeviceContext,
- IN UINT uStartIndex
- )
-
+void BSSvClearNodeDBTable(void *hDeviceContext,
+ unsigned int uStartIndex)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
struct sk_buff *skb;
- UINT ii;
+ unsigned int ii;
for (ii = uStartIndex; ii < (MAX_NODE_NUM + 1); ii++) {
if (pMgmt->sNodeDBTable[ii].bActive) {
@@ -1592,10 +1544,7 @@ BSSvClearNodeDBTable(
return;
};
-
-VOID s_vCheckSensitivity(
- IN HANDLE hDeviceContext
- )
+void s_vCheckSensitivity(void *hDeviceContext)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PKnownBSS pBSSList = NULL;
@@ -1606,9 +1555,9 @@ VOID s_vCheckSensitivity(
((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED))) {
pBSSList = BSSpAddrIsInBSSList(pDevice, pMgmt->abyCurrBSSID, (PWLAN_IE_SSID)pMgmt->abyCurrSSID);
if (pBSSList != NULL) {
- // Updata BB Reg if RSSI is too strong.
- LONG LocalldBmAverage = 0;
- LONG uNumofdBm = 0;
+ /* Update BB register if RSSI is too strong */
+ signed long LocalldBmAverage = 0;
+ signed long uNumofdBm = 0;
for (ii = 0; ii < RSSI_STAT_COUNT; ii++) {
if (pBSSList->ldBmAverage[ii] != 0) {
uNumofdBm ++;
@@ -1627,7 +1576,9 @@ VOID s_vCheckSensitivity(
if (pDevice->byBBVGANew != pDevice->byBBVGACurrent) {
pDevice->uBBVGADiffCount++;
if (pDevice->uBBVGADiffCount >= BB_VGA_CHANGE_THRESHOLD)
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_CHANGE_BBSENSITIVITY, NULL);
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_CHANGE_BBSENSITIVITY,
+ NULL);
} else {
pDevice->uBBVGADiffCount = 0;
}
@@ -1637,14 +1588,12 @@ VOID s_vCheckSensitivity(
}
#ifdef Calcu_LinkQual
-VOID s_uCalculateLinkQual(
- IN HANDLE hDeviceContext
- )
+void s_uCalculateLinkQual(void *hDeviceContext)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
- ULONG TxOkRatio, TxCnt;
- ULONG RxOkRatio,RxCnt;
- ULONG RssiRatio;
+ unsigned long TxOkRatio, TxCnt;
+ unsigned long RxOkRatio, RxCnt;
+ unsigned long RssiRatio;
long ldBm;
TxCnt = pDevice->scStatistic.TxNoRetryOkCount +
@@ -1685,14 +1634,11 @@ else
}
#endif
-VOID
-BSSvClearAnyBSSJoinRecord (
- IN HANDLE hDeviceContext
- )
+void BSSvClearAnyBSSJoinRecord(void *hDeviceContext)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- UINT ii;
+ unsigned int ii;
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
pMgmt->sBSSList[ii].bSelected = FALSE;
@@ -1700,9 +1646,7 @@ BSSvClearAnyBSSJoinRecord (
return;
}
-VOID s_vCheckPreEDThreshold(
- IN HANDLE hDeviceContext
- )
+void s_vCheckPreEDThreshold(void *hDeviceContext)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PKnownBSS pBSSList = NULL;
diff --git a/drivers/staging/vt6656/bssdb.h b/drivers/staging/vt6656/bssdb.h
index f365b6b8bf6..9686d8600d6 100644
--- a/drivers/staging/vt6656/bssdb.h
+++ b/drivers/staging/vt6656/bssdb.h
@@ -97,10 +97,10 @@ typedef struct tagKnownBSS {
// BSS info
BOOL bActive;
BYTE abyBSSID[WLAN_BSSID_LEN];
- UINT uChannel;
+ unsigned int uChannel;
BYTE abySuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
BYTE abyExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
- UINT uRSSI;
+ unsigned int uRSSI;
BYTE bySQ;
WORD wBeaconInterval;
WORD wCapInfo;
@@ -109,9 +109,9 @@ typedef struct tagKnownBSS {
// WORD wATIMWindow;
BYTE byRSSIStatCnt;
- LONG ldBmMAX;
- LONG ldBmAverage[RSSI_STAT_COUNT];
- LONG ldBmAverRange;
+ signed long ldBmMAX;
+ signed long ldBmAverage[RSSI_STAT_COUNT];
+ signed long ldBmAverRange;
//For any BSSID selection improvment
BOOL bSelected;
@@ -141,9 +141,9 @@ typedef struct tagKnownBSS {
WORD wRSNLen;
// Clear count
- UINT uClearCount;
+ unsigned int uClearCount;
// BYTE abyIEs[WLAN_BEACON_FR_MAXLEN];
- UINT uIELength;
+ unsigned int uIELength;
QWORD qwBSSTimestamp;
QWORD qwLocalTSF; // local TSF timer
@@ -178,7 +178,7 @@ typedef struct tagKnownNodeDB {
BOOL bShortPreamble;
BOOL bERPExist;
BOOL bShortSlotTime;
- UINT uInActiveCount;
+ unsigned int uInActiveCount;
WORD wMaxBasicRate; //Get from byTopOFDMBasicRate or byTopCCKBasicRate which depends on packetTyp.
WORD wMaxSuppRate; //Records the highest supported rate getting from SuppRates IE and ExtSuppRates IE in Beacon.
WORD wSuppRate;
@@ -194,166 +194,114 @@ typedef struct tagKnownNodeDB {
BOOL bPSEnable;
BOOL bRxPSPoll;
BYTE byAuthSequence;
- ULONG ulLastRxJiffer;
+ unsigned long ulLastRxJiffer;
BYTE bySuppRate;
DWORD dwFlags;
WORD wEnQueueCnt;
BOOL bOnFly;
- ULONGLONG KeyRSC;
+ unsigned long long KeyRSC;
BYTE byKeyIndex;
DWORD dwKeyIndex;
BYTE byCipherSuite;
DWORD dwTSC47_16;
WORD wTSC15_0;
- UINT uWepKeyLength;
+ unsigned int uWepKeyLength;
BYTE abyWepKey[WLAN_WEPMAX_KEYLEN];
//
// Auto rate fallback vars
BOOL bIsInFallback;
- UINT uAverageRSSI;
- UINT uRateRecoveryTimeout;
- UINT uRatePollTimeout;
- UINT uTxFailures;
- UINT uTxAttempts;
-
- UINT uTxRetry;
- UINT uFailureRatio;
- UINT uRetryRatio;
- UINT uTxOk[MAX_RATE+1];
- UINT uTxFail[MAX_RATE+1];
- UINT uTimeCount;
+ unsigned int uAverageRSSI;
+ unsigned int uRateRecoveryTimeout;
+ unsigned int uRatePollTimeout;
+ unsigned int uTxFailures;
+ unsigned int uTxAttempts;
+
+ unsigned int uTxRetry;
+ unsigned int uFailureRatio;
+ unsigned int uRetryRatio;
+ unsigned int uTxOk[MAX_RATE+1];
+ unsigned int uTxFail[MAX_RATE+1];
+ unsigned int uTimeCount;
} KnownNodeDB, *PKnownNodeDB;
-
/*--------------------- Export Functions --------------------------*/
-
-
-PKnownBSS
-BSSpSearchBSSList(
- IN HANDLE hDeviceContext,
- IN PBYTE pbyDesireBSSID,
- IN PBYTE pbyDesireSSID,
- IN CARD_PHY_TYPE ePhyType
- );
-
-PKnownBSS
-BSSpAddrIsInBSSList(
- IN HANDLE hDeviceContext,
- IN PBYTE abyBSSID,
- IN PWLAN_IE_SSID pSSID
- );
-
-VOID
-BSSvClearBSSList(
- IN HANDLE hDeviceContext,
- IN BOOL bKeepCurrBSSID
- );
-
-BOOL
-BSSbInsertToBSSList(
- IN HANDLE hDeviceContext,
- IN PBYTE abyBSSIDAddr,
- IN QWORD qwTimestamp,
- IN WORD wBeaconInterval,
- IN WORD wCapInfo,
- IN BYTE byCurrChannel,
- IN PWLAN_IE_SSID pSSID,
- IN PWLAN_IE_SUPP_RATES pSuppRates,
- IN PWLAN_IE_SUPP_RATES pExtSuppRates,
- IN PERPObject psERP,
- IN PWLAN_IE_RSN pRSN,
- IN PWLAN_IE_RSN_EXT pRSNWPA,
- IN PWLAN_IE_COUNTRY pIE_Country,
- IN PWLAN_IE_QUIET pIE_Quiet,
- IN UINT uIELength,
- IN PBYTE pbyIEs,
- IN HANDLE pRxPacketContext
- );
-
-
-BOOL
-BSSbUpdateToBSSList(
- IN HANDLE hDeviceContext,
- IN QWORD qwTimestamp,
- IN WORD wBeaconInterval,
- IN WORD wCapInfo,
- IN BYTE byCurrChannel,
- IN BOOL bChannelHit,
- IN PWLAN_IE_SSID pSSID,
- IN PWLAN_IE_SUPP_RATES pSuppRates,
- IN PWLAN_IE_SUPP_RATES pExtSuppRates,
- IN PERPObject psERP,
- IN PWLAN_IE_RSN pRSN,
- IN PWLAN_IE_RSN_EXT pRSNWPA,
- IN PWLAN_IE_COUNTRY pIE_Country,
- IN PWLAN_IE_QUIET pIE_Quiet,
- IN PKnownBSS pBSSList,
- IN UINT uIELength,
- IN PBYTE pbyIEs,
- IN HANDLE pRxPacketContext
- );
-
-
-BOOL
-BSSbIsSTAInNodeDB(
- IN HANDLE hDeviceContext,
- IN PBYTE abyDstAddr,
- OUT PUINT puNodeIndex
- );
-
-VOID
-BSSvCreateOneNode(
- IN HANDLE hDeviceContext,
- OUT PUINT puNodeIndex
- );
-
-VOID
-BSSvUpdateAPNode(
- IN HANDLE hDeviceContext,
- IN PWORD pwCapInfo,
- IN PWLAN_IE_SUPP_RATES pItemRates,
- IN PWLAN_IE_SUPP_RATES pExtSuppRates
- );
-
-
-VOID
-BSSvSecondCallBack(
- IN HANDLE hDeviceContext
- );
-
-
-VOID
-BSSvUpdateNodeTxCounter(
- IN HANDLE hDeviceContext,
- IN PSStatCounter pStatistic,
- IN BYTE byTSR,
- IN BYTE byPktNO
- );
-
-VOID
-BSSvRemoveOneNode(
- IN HANDLE hDeviceContext,
- IN UINT uNodeIndex
- );
-
-VOID
-BSSvAddMulticastNode(
- IN HANDLE hDeviceContext
- );
-
-
-VOID
-BSSvClearNodeDBTable(
- IN HANDLE hDeviceContext,
- IN UINT uStartIndex
- );
-
-VOID
-BSSvClearAnyBSSJoinRecord(
- IN HANDLE hDeviceContext
- );
-
-#endif //__BSSDB_H__
+PKnownBSS BSSpSearchBSSList(void *hDeviceContext,
+ PBYTE pbyDesireBSSID,
+ PBYTE pbyDesireSSID,
+ CARD_PHY_TYPE ePhyType);
+
+PKnownBSS BSSpAddrIsInBSSList(void *hDeviceContext,
+ PBYTE abyBSSID,
+ PWLAN_IE_SSID pSSID);
+
+void BSSvClearBSSList(void *hDeviceContext, BOOL bKeepCurrBSSID);
+
+BOOL BSSbInsertToBSSList(void *hDeviceContext,
+ PBYTE abyBSSIDAddr,
+ QWORD qwTimestamp,
+ WORD wBeaconInterval,
+ WORD wCapInfo,
+ BYTE byCurrChannel,
+ PWLAN_IE_SSID pSSID,
+ PWLAN_IE_SUPP_RATES pSuppRates,
+ PWLAN_IE_SUPP_RATES pExtSuppRates,
+ PERPObject psERP,
+ PWLAN_IE_RSN pRSN,
+ PWLAN_IE_RSN_EXT pRSNWPA,
+ PWLAN_IE_COUNTRY pIE_Country,
+ PWLAN_IE_QUIET pIE_Quiet,
+ unsigned int uIELength,
+ PBYTE pbyIEs,
+ void *pRxPacketContext);
+
+BOOL BSSbUpdateToBSSList(void *hDeviceContext,
+ QWORD qwTimestamp,
+ WORD wBeaconInterval,
+ WORD wCapInfo,
+ BYTE byCurrChannel,
+ BOOL bChannelHit,
+ PWLAN_IE_SSID pSSID,
+ PWLAN_IE_SUPP_RATES pSuppRates,
+ PWLAN_IE_SUPP_RATES pExtSuppRates,
+ PERPObject psERP,
+ PWLAN_IE_RSN pRSN,
+ PWLAN_IE_RSN_EXT pRSNWPA,
+ PWLAN_IE_COUNTRY pIE_Country,
+ PWLAN_IE_QUIET pIE_Quiet,
+ PKnownBSS pBSSList,
+ unsigned int uIELength,
+ PBYTE pbyIEs,
+ void *pRxPacketContext);
+
+BOOL BSSbIsSTAInNodeDB(void *hDeviceContext,
+ PBYTE abyDstAddr,
+ PUINT puNodeIndex);
+
+void BSSvCreateOneNode(void *hDeviceContext, PUINT puNodeIndex);
+
+void BSSvUpdateAPNode(void *hDeviceContext,
+ PWORD pwCapInfo,
+ PWLAN_IE_SUPP_RATES pItemRates,
+ PWLAN_IE_SUPP_RATES pExtSuppRates);
+
+void BSSvSecondCallBack(void *hDeviceContext);
+
+void BSSvUpdateNodeTxCounter(void *hDeviceContext,
+ PSStatCounter pStatistic,
+ BYTE byTSR,
+ BYTE byPktNO);
+
+void BSSvRemoveOneNode(void *hDeviceContext,
+ unsigned int uNodeIndex);
+
+void BSSvAddMulticastNode(void *hDeviceContext);
+
+void BSSvClearNodeDBTable(void *hDeviceContext,
+ unsigned int uStartIndex);
+
+void BSSvClearAnyBSSJoinRecord(void *hDeviceContext);
+
+#endif /* __BSSDB_H__ */
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index d73efeea2d5..fe4ec913ffe 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -95,7 +95,7 @@ const WORD cwRXBCNTSFOff[MAX_RATE] =
* Return Value: TRUE if succeeded; FALSE if failed.
*
*/
-BOOL CARDbSetMediaChannel (PVOID pDeviceHandler, UINT uConnectionChannel)
+BOOL CARDbSetMediaChannel(void *pDeviceHandler, unsigned int uConnectionChannel)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
BOOL bResult = TRUE;
@@ -156,11 +156,10 @@ BOOL bResult = TRUE;
* Return Value: response Control frame rate
*
*/
-static
-WORD swGetCCKControlRate(PVOID pDeviceHandler, WORD wRateIdx)
+static WORD swGetCCKControlRate(void *pDeviceHandler, WORD wRateIdx)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
- UINT ui = (UINT)wRateIdx;
+ unsigned int ui = (unsigned int)wRateIdx;
while (ui > RATE_1M) {
if (pDevice->wBasicRate & ((WORD)1 << ui)) {
return (WORD)ui;
@@ -183,11 +182,10 @@ WORD swGetCCKControlRate(PVOID pDeviceHandler, WORD wRateIdx)
* Return Value: response Control frame rate
*
*/
-static
-WORD swGetOFDMControlRate (PVOID pDeviceHandler, WORD wRateIdx)
+static WORD swGetOFDMControlRate(void *pDeviceHandler, WORD wRateIdx)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
- UINT ui = (UINT)wRateIdx;
+ unsigned int ui = (unsigned int)wRateIdx;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BASIC RATE: %X\n", pDevice->wBasicRate);
@@ -222,12 +220,12 @@ WORD swGetOFDMControlRate (PVOID pDeviceHandler, WORD wRateIdx)
* Return Value: none
*
*/
-VOID
+void
CARDvCaculateOFDMRParameter (
- IN WORD wRate,
- IN BYTE byBBType,
- OUT PBYTE pbyTxRate,
- OUT PBYTE pbyRsvTime
+ WORD wRate,
+ BYTE byBBType,
+ PBYTE pbyTxRate,
+ PBYTE pbyRsvTime
)
{
switch (wRate) {
@@ -334,7 +332,7 @@ CARDvCaculateOFDMRParameter (
* Return Value: None.
*
*/
-void CARDvSetRSPINF (PVOID pDeviceHandler, BYTE byBBType)
+void CARDvSetRSPINF(void *pDeviceHandler, BYTE byBBType)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
BYTE abyServ[4] = {0,0,0,0}; // For CCK
@@ -486,7 +484,7 @@ void CARDvSetRSPINF (PVOID pDeviceHandler, BYTE byBBType)
* Return Value: None.
*
*/
-void vUpdateIFS (PVOID pDeviceHandler)
+void vUpdateIFS(void *pDeviceHandler)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
//Set SIFS, DIFS, EIFS, SlotTime, CwMin
@@ -510,7 +508,7 @@ void vUpdateIFS (PVOID pDeviceHandler)
else {// PK_TYPE_11GA & PK_TYPE_11GB
BYTE byRate = 0;
BOOL bOFDMRate = FALSE;
- UINT ii = 0;
+ unsigned int ii = 0;
PWLAN_IE_SUPP_RATES pItemRates = NULL;
pDevice->uSIFS = C_SIFS_BG;
@@ -571,7 +569,7 @@ void vUpdateIFS (PVOID pDeviceHandler)
&byMaxMin);
}
-void CARDvUpdateBasicTopRate (PVOID pDeviceHandler)
+void CARDvUpdateBasicTopRate(void *pDeviceHandler)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
BYTE byTopOFDM = RATE_24M, byTopCCK = RATE_1M;
@@ -610,7 +608,7 @@ BYTE ii;
* Return Value: TRUE if succeeded; FALSE if failed.
*
*/
-BOOL CARDbAddBasicRate (PVOID pDeviceHandler, WORD wRateIdx)
+BOOL CARDbAddBasicRate(void *pDeviceHandler, WORD wRateIdx)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
WORD wRate = (WORD)(1<<wRateIdx);
@@ -623,7 +621,7 @@ WORD wRate = (WORD)(1<<wRateIdx);
return(TRUE);
}
-BOOL CARDbIsOFDMinBasicRate (PVOID pDeviceHandler)
+BOOL CARDbIsOFDMinBasicRate(void *pDeviceHandler)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
int ii;
@@ -635,7 +633,7 @@ int ii;
return FALSE;
}
-BYTE CARDbyGetPktType (PVOID pDeviceHandler)
+BYTE CARDbyGetPktType(void *pDeviceHandler)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -707,7 +705,8 @@ QWORD CARDqGetTSFOffset (BYTE byRxRate, QWORD qwTSF1, QWORD qwTSF2)
* Return Value: none
*
*/
-void CARDvAdjustTSF (PVOID pDeviceHandler, BYTE byRxRate, QWORD qwBSSTimestamp, QWORD qwLocalTSF)
+void CARDvAdjustTSF(void *pDeviceHandler, BYTE byRxRate,
+ QWORD qwBSSTimestamp, QWORD qwLocalTSF)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -756,7 +755,7 @@ void CARDvAdjustTSF (PVOID pDeviceHandler, BYTE byRxRate, QWORD qwBSSTimestamp,
* Return Value: TRUE if success; otherwise FALSE
*
*/
-BOOL CARDbGetCurrentTSF (PVOID pDeviceHandler, PQWORD pqwCurrTSF)
+BOOL CARDbGetCurrentTSF(void *pDeviceHandler, PQWORD pqwCurrTSF)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -778,7 +777,7 @@ BOOL CARDbGetCurrentTSF (PVOID pDeviceHandler, PQWORD pqwCurrTSF)
* Return Value: TRUE if success; otherwise FALSE
*
*/
-BOOL CARDbClearCurrentTSF(PVOID pDeviceHandler)
+BOOL CARDbClearCurrentTSF(void *pDeviceHandler)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -807,9 +806,9 @@ BOOL CARDbClearCurrentTSF(PVOID pDeviceHandler)
QWORD CARDqGetNextTBTT (QWORD qwTSF, WORD wBeaconInterval)
{
- UINT uLowNextTBTT;
- UINT uHighRemain, uLowRemain;
- UINT uBeaconInterval;
+ unsigned int uLowNextTBTT;
+ unsigned int uHighRemain, uLowRemain;
+ unsigned int uBeaconInterval;
uBeaconInterval = wBeaconInterval * 1024;
// Next TBTT = ((local_current_TSF / beacon_interval) + 1 ) * beacon_interval
@@ -844,7 +843,7 @@ QWORD CARDqGetNextTBTT (QWORD qwTSF, WORD wBeaconInterval)
* Return Value: none
*
*/
-void CARDvSetFirstNextTBTT (PVOID pDeviceHandler, WORD wBeaconInterval)
+void CARDvSetFirstNextTBTT(void *pDeviceHandler, WORD wBeaconInterval)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
@@ -898,7 +897,8 @@ void CARDvSetFirstNextTBTT (PVOID pDeviceHandler, WORD wBeaconInterval)
* Return Value: none
*
*/
-void CARDvUpdateNextTBTT (PVOID pDeviceHandler, QWORD qwTSF, WORD wBeaconInterval)
+void CARDvUpdateNextTBTT(void *pDeviceHandler, QWORD qwTSF,
+ WORD wBeaconInterval)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
DWORD dwLoTBTT,dwHiTBTT;
@@ -945,7 +945,7 @@ void CARDvUpdateNextTBTT (PVOID pDeviceHandler, QWORD qwTSF, WORD wBeaconInterva
* Return Value: TRUE if success; otherwise FALSE
*
*/
-BOOL CARDbRadioPowerOff (PVOID pDeviceHandler)
+BOOL CARDbRadioPowerOff(void *pDeviceHandler)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
BOOL bResult = TRUE;
@@ -986,7 +986,7 @@ BOOL bResult = TRUE;
* Return Value: TRUE if success; otherwise FALSE
*
*/
-BOOL CARDbRadioPowerOn (PVOID pDeviceHandler)
+BOOL CARDbRadioPowerOn(void *pDeviceHandler)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
BOOL bResult = TRUE;
@@ -1019,7 +1019,7 @@ BOOL bResult = TRUE;
return bResult;
}
-void CARDvSetBSSMode (PVOID pDeviceHandler)
+void CARDvSetBSSMode(void *pDeviceHandler)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
// Set BB and packet type at the same time.//{{RobertYu:20050222, AL7230 have two TX PA output, only connet to b/g now
@@ -1080,10 +1080,10 @@ void CARDvSetBSSMode (PVOID pDeviceHandler)
-*/
BOOL
CARDbChannelSwitch (
- IN PVOID pDeviceHandler,
- IN BYTE byMode,
- IN BYTE byNewChannel,
- IN BYTE byCount
+ void *pDeviceHandler,
+ BYTE byMode,
+ BYTE byNewChannel,
+ BYTE byCount
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
diff --git a/drivers/staging/vt6656/card.h b/drivers/staging/vt6656/card.h
index aa90c2cb446..6c91343d0d1 100644
--- a/drivers/staging/vt6656/card.h
+++ b/drivers/staging/vt6656/card.h
@@ -33,63 +33,57 @@
/*--------------------- Export Definitions -------------------------*/
-
/*--------------------- Export Classes ----------------------------*/
-// Init card type
+/* init card type */
typedef enum _CARD_PHY_TYPE {
-
- PHY_TYPE_AUTO=0,
+ PHY_TYPE_AUTO = 0,
PHY_TYPE_11B,
PHY_TYPE_11G,
PHY_TYPE_11A
} CARD_PHY_TYPE, *PCARD_PHY_TYPE;
typedef enum _CARD_OP_MODE {
-
- OP_MODE_INFRASTRUCTURE=0,
+ OP_MODE_INFRASTRUCTURE = 0,
OP_MODE_ADHOC,
OP_MODE_AP,
OP_MODE_UNKNOWN
} CARD_OP_MODE, *PCARD_OP_MODE;
#define CB_MAX_CHANNEL_24G 14
-//#define CB_MAX_CHANNEL_5G 24
-#define CB_MAX_CHANNEL_5G 42 //[20050104] add channel9(5045MHz), 41==>42
+/* #define CB_MAX_CHANNEL_5G 24 */
+#define CB_MAX_CHANNEL_5G 42 /* add channel9(5045MHz), 41==>42 */
#define CB_MAX_CHANNEL (CB_MAX_CHANNEL_24G+CB_MAX_CHANNEL_5G)
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Functions --------------------------*/
-BOOL CARDbSetMediaChannel(PVOID pDeviceHandler, UINT uConnectionChannel);
-void CARDvSetRSPINF(PVOID pDeviceHandler, BYTE byBBType);
-void vUpdateIFS(PVOID pDeviceHandler);
-void CARDvUpdateBasicTopRate(PVOID pDeviceHandler);
-BOOL CARDbAddBasicRate(PVOID pDeviceHandler, WORD wRateIdx);
-BOOL CARDbIsOFDMinBasicRate(PVOID pDeviceHandler);
-void CARDvAdjustTSF(PVOID pDeviceHandler, BYTE byRxRate, QWORD qwBSSTimestamp, QWORD qwLocalTSF);
-BOOL CARDbGetCurrentTSF (PVOID pDeviceHandler, PQWORD pqwCurrTSF);
-BOOL CARDbClearCurrentTSF(PVOID pDeviceHandler);
-void CARDvSetFirstNextTBTT(PVOID pDeviceHandler, WORD wBeaconInterval);
-void CARDvUpdateNextTBTT(PVOID pDeviceHandler, QWORD qwTSF, WORD wBeaconInterval);
+BOOL CARDbSetMediaChannel(void *pDeviceHandler,
+ unsigned int uConnectionChannel);
+void CARDvSetRSPINF(void *pDeviceHandler, BYTE byBBType);
+void vUpdateIFS(void *pDeviceHandler);
+void CARDvUpdateBasicTopRate(void *pDeviceHandler);
+BOOL CARDbAddBasicRate(void *pDeviceHandler, WORD wRateIdx);
+BOOL CARDbIsOFDMinBasicRate(void *pDeviceHandler);
+void CARDvAdjustTSF(void *pDeviceHandler, BYTE byRxRate,
+ QWORD qwBSSTimestamp, QWORD qwLocalTSF);
+BOOL CARDbGetCurrentTSF(void *pDeviceHandler, PQWORD pqwCurrTSF);
+BOOL CARDbClearCurrentTSF(void *pDeviceHandler);
+void CARDvSetFirstNextTBTT(void *pDeviceHandler, WORD wBeaconInterval);
+void CARDvUpdateNextTBTT(void *pDeviceHandler, QWORD qwTSF,
+ WORD wBeaconInterval);
QWORD CARDqGetNextTBTT(QWORD qwTSF, WORD wBeaconInterval);
QWORD CARDqGetTSFOffset(BYTE byRxRate, QWORD qwTSF1, QWORD qwTSF2);
-BOOL CARDbRadioPowerOff(PVOID pDeviceHandler);
-BOOL CARDbRadioPowerOn(PVOID pDeviceHandler);
-BYTE CARDbyGetPktType(PVOID pDeviceHandler);
-void CARDvSetBSSMode(PVOID pDeviceHandler);
-
-BOOL
-CARDbChannelSwitch (
- IN PVOID pDeviceHandler,
- IN BYTE byMode,
- IN BYTE byNewChannel,
- IN BYTE byCount
- );
-
-#endif // __CARD_H__
-
+BOOL CARDbRadioPowerOff(void *pDeviceHandler);
+BOOL CARDbRadioPowerOn(void *pDeviceHandler);
+BYTE CARDbyGetPktType(void *pDeviceHandler);
+void CARDvSetBSSMode(void *pDeviceHandler);
+BOOL CARDbChannelSwitch(void *pDeviceHandler,
+ BYTE byMode,
+ BYTE byNewChannel,
+ BYTE byCount);
+#endif /* __CARD_H__ */
diff --git a/drivers/staging/vt6656/channel.c b/drivers/staging/vt6656/channel.c
index f7136b0073b..f49b6e13339 100644
--- a/drivers/staging/vt6656/channel.c
+++ b/drivers/staging/vt6656/channel.c
@@ -116,7 +116,7 @@ static SChannelTblElement sChannelTbl[CB_MAX_CHANNEL+1] =
static struct
{
BYTE byChannelCountryCode; /* The country code */
- CHAR chCountryCode[2];
+ char chCountryCode[2];
BYTE bChannelIdxList[CB_MAX_CHANNEL]; /* Available channels Index */
BYTE byPower[CB_MAX_CHANNEL];
} ChannelRuleTab[] =
@@ -389,7 +389,7 @@ static struct
// 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 (Value 23 ~ 56)
************************************************************************/
BOOL
-ChannelValid(UINT CountryCode, UINT ChannelIndex)
+ChannelValid(unsigned int CountryCode, unsigned int ChannelIndex)
{
BOOL bValid;
@@ -425,8 +425,8 @@ exit:
************************************************************************/
BOOL
CHvChannelGetList (
- IN UINT uCountryCodeIdx,
- OUT PBYTE pbyChannelTable
+ unsigned int uCountryCodeIdx,
+ PBYTE pbyChannelTable
)
{
if (uCountryCodeIdx >= CCODE_MAX) {
@@ -437,11 +437,11 @@ CHvChannelGetList (
}
-VOID CHvInitChannelTable (PVOID pDeviceHandler)
+void CHvInitChannelTable(void *pDeviceHandler)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
BOOL bMultiBand = FALSE;
- UINT ii;
+ unsigned int ii;
for(ii=1;ii<=CB_MAX_CHANNEL;ii++) {
sChannelTbl[ii].bValid = FALSE;
diff --git a/drivers/staging/vt6656/channel.h b/drivers/staging/vt6656/channel.h
index 2306b202565..91c2ffc6f1f 100644
--- a/drivers/staging/vt6656/channel.h
+++ b/drivers/staging/vt6656/channel.h
@@ -37,21 +37,21 @@
/*--------------------- Export Classes ----------------------------*/
typedef struct tagSChannelTblElement {
BYTE byChannelNumber;
- UINT uFrequency;
+ unsigned int uFrequency;
BOOL bValid;
}SChannelTblElement, *PSChannelTblElement;
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Functions --------------------------*/
-BOOL ChannelValid(UINT CountryCode, UINT ChannelNum);
-VOID CHvInitChannelTable (PVOID pDeviceHandler);
+BOOL ChannelValid(unsigned int CountryCode, unsigned int ChannelNum);
+void CHvInitChannelTable(void *pDeviceHandler);
BYTE CHbyGetChannelMapping(BYTE byChannelNumber);
BOOL
CHvChannelGetList (
- IN UINT uCountryCodeIdx,
- OUT PBYTE pbyChannelTable
+ unsigned int uCountryCodeIdx,
+ PBYTE pbyChannelTable
);
#endif /* _REGULATE_H_ */
diff --git a/drivers/staging/vt6656/control.c b/drivers/staging/vt6656/control.c
index 7dba7710e59..8aab6718ff4 100644
--- a/drivers/staging/vt6656/control.c
+++ b/drivers/staging/vt6656/control.c
@@ -30,11 +30,13 @@
* CONTROLnsRequestIn - Read variable length bytes from MEM/BB/MAC/EEPROM
* ControlvWriteByte - Write one byte to MEM/BB/MAC/EEPROM
* ControlvReadByte - Read one byte from MEM/BB/MAC/EEPROM
- * ControlvMaskByte - Read one byte from MEM/BB/MAC/EEPROM and clear/set some bits in the same address
+ * ControlvMaskByte - Read one byte from MEM/BB/MAC/EEPROM and clear/set
+ * some bits in the same address
*
* Revision History:
* 04-05-2004 Jerry Chen: Initial release
- * 11-24-2004 Warren Hsu: Add ControlvWriteByte,ControlvReadByte,ControlvMaskByte
+ * 11-24-2004 Warren Hsu: Add ControlvWriteByte, ControlvReadByte,
+ * ControlvMaskByte
*
*/
@@ -42,8 +44,8 @@
#include "rndis.h"
/*--------------------- Static Definitions -------------------------*/
-//static int msglevel =MSG_LEVEL_INFO;
-//static int msglevel =MSG_LEVEL_DEBUG;
+/* static int msglevel =MSG_LEVEL_INFO; */
+/* static int msglevel =MSG_LEVEL_DEBUG; */
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
@@ -54,56 +56,43 @@
/*--------------------- Export Functions --------------------------*/
-
-void ControlvWriteByte(PSDevice pDevice, BYTE byRegType, BYTE byRegOfs, BYTE byData)
+void ControlvWriteByte(PSDevice pDevice, BYTE byRegType, BYTE byRegOfs,
+ BYTE byData)
{
-BYTE byData1;
-
- byData1 = byData;
-
- CONTROLnsRequestOut(pDevice,
- MESSAGE_TYPE_WRITE,
- byRegOfs,
- byRegType,
- 1,
- &byData1
- );
-
+ BYTE byData1;
+ byData1 = byData;
+ CONTROLnsRequestOut(pDevice,
+ MESSAGE_TYPE_WRITE,
+ byRegOfs,
+ byRegType,
+ 1,
+ &byData1);
}
-
-void ControlvReadByte(PSDevice pDevice, BYTE byRegType, BYTE byRegOfs, PBYTE pbyData)
+void ControlvReadByte(PSDevice pDevice, BYTE byRegType, BYTE byRegOfs,
+ PBYTE pbyData)
{
-NTSTATUS ntStatus;
-BYTE byData1;
-
-
- ntStatus = CONTROLnsRequestIn(pDevice,
- MESSAGE_TYPE_READ,
- byRegOfs,
- byRegType,
- 1,
- &byData1);
-
- *pbyData = byData1;
-
+ NTSTATUS ntStatus;
+ BYTE byData1;
+ ntStatus = CONTROLnsRequestIn(pDevice,
+ MESSAGE_TYPE_READ,
+ byRegOfs,
+ byRegType,
+ 1,
+ &byData1);
+ *pbyData = byData1;
}
-
-
-void ControlvMaskByte(PSDevice pDevice, BYTE byRegType, BYTE byRegOfs, BYTE byMask, BYTE byData)
+void ControlvMaskByte(PSDevice pDevice, BYTE byRegType, BYTE byRegOfs,
+ BYTE byMask, BYTE byData)
{
-BYTE pbyData[2];
-
- pbyData[0] = byData;
- pbyData[1] = byMask;
-
- CONTROLnsRequestOut(pDevice,
- MESSAGE_TYPE_WRITE_MASK,
- byRegOfs,
- byRegType,
- 2,
- pbyData
- );
-
+ BYTE pbyData[2];
+ pbyData[0] = byData;
+ pbyData[1] = byMask;
+ CONTROLnsRequestOut(pDevice,
+ MESSAGE_TYPE_WRITE_MASK,
+ byRegOfs,
+ byRegType,
+ 2,
+ pbyData);
}
diff --git a/drivers/staging/vt6656/control.h b/drivers/staging/vt6656/control.h
index 4d9a777a706..146b450e13d 100644
--- a/drivers/staging/vt6656/control.h
+++ b/drivers/staging/vt6656/control.h
@@ -54,30 +54,27 @@
/*--------------------- Export Functions --------------------------*/
void ControlvWriteByte(
- IN PSDevice pDevice,
- IN BYTE byRegType,
- IN BYTE byRegOfs,
- IN BYTE byData
+ PSDevice pDevice,
+ BYTE byRegType,
+ BYTE byRegOfs,
+ BYTE byData
);
void ControlvReadByte(
- IN PSDevice pDevice,
- IN BYTE byRegType,
- IN BYTE byRegOfs,
- IN PBYTE pbyData
+ PSDevice pDevice,
+ BYTE byRegType,
+ BYTE byRegOfs,
+ PBYTE pbyData
);
void ControlvMaskByte(
- IN PSDevice pDevice,
- IN BYTE byRegType,
- IN BYTE byRegOfs,
- IN BYTE byMask,
- IN BYTE byData
+ PSDevice pDevice,
+ BYTE byRegType,
+ BYTE byRegOfs,
+ BYTE byMask,
+ BYTE byData
);
-#endif // __RCV_H__
-
-
-
+#endif /* __CONTROL_H__ */
diff --git a/drivers/staging/vt6656/datarate.c b/drivers/staging/vt6656/datarate.c
index 968feb466b0..2e183ddbfd0 100644
--- a/drivers/staging/vt6656/datarate.c
+++ b/drivers/staging/vt6656/datarate.c
@@ -65,16 +65,9 @@ const BYTE acbyIERate[MAX_RATE] =
/*--------------------- Static Functions --------------------------*/
-VOID s_vResetCounter (
- IN PKnownNodeDB psNodeDBTable
- );
+void s_vResetCounter(PKnownNodeDB psNodeDBTable);
-
-
-VOID
-s_vResetCounter (
- IN PKnownNodeDB psNodeDBTable
- )
+void s_vResetCounter(PKnownNodeDB psNodeDBTable)
{
BYTE ii;
@@ -107,7 +100,7 @@ s_vResetCounter (
-*/
BYTE
DATARATEbyGetRateIdx (
- IN BYTE byRate
+ BYTE byRate
)
{
BYTE ii;
@@ -161,7 +154,7 @@ DATARATEbyGetRateIdx (
-*/
WORD
RATEwGetRateIdx(
- IN BYTE byRate
+ BYTE byRate
)
{
WORD ii;
@@ -195,25 +188,24 @@ RATEwGetRateIdx(
* Return Value: none
*
-*/
-VOID
-RATEvParseMaxRate (
- IN PVOID pDeviceHandler,
- IN PWLAN_IE_SUPP_RATES pItemRates,
- IN PWLAN_IE_SUPP_RATES pItemExtRates,
- IN BOOL bUpdateBasicRate,
- OUT PWORD pwMaxBasicRate,
- OUT PWORD pwMaxSuppRate,
- OUT PWORD pwSuppRate,
- OUT PBYTE pbyTopCCKRate,
- OUT PBYTE pbyTopOFDMRate
+void RATEvParseMaxRate(
+ void *pDeviceHandler,
+ PWLAN_IE_SUPP_RATES pItemRates,
+ PWLAN_IE_SUPP_RATES pItemExtRates,
+ BOOL bUpdateBasicRate,
+ PWORD pwMaxBasicRate,
+ PWORD pwMaxSuppRate,
+ PWORD pwSuppRate,
+ PBYTE pbyTopCCKRate,
+ PBYTE pbyTopOFDMRate
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
-UINT ii;
+unsigned int ii;
BYTE byHighSuppRate = 0;
BYTE byRate = 0;
WORD wOldBasicRate = pDevice->wBasicRate;
-UINT uRateLen;
+unsigned int uRateLen;
if (pItemRates == NULL)
@@ -236,7 +228,7 @@ UINT uRateLen;
if (WLAN_MGMT_IS_BASICRATE(byRate) &&
(bUpdateBasicRate == TRUE)) {
// Add to basic rate set, update pDevice->byTopCCKBasicRate and pDevice->byTopOFDMBasicRate
- CARDbAddBasicRate((PVOID)pDevice, RATEwGetRateIdx(byRate));
+ CARDbAddBasicRate((void *)pDevice, RATEwGetRateIdx(byRate));
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ParseMaxRate AddBasicRate: %d\n", RATEwGetRateIdx(byRate));
}
byRate = (BYTE)(pItemRates->abyRates[ii]&0x7F);
@@ -249,7 +241,7 @@ UINT uRateLen;
if ((pItemExtRates != NULL) && (pItemExtRates->byElementID == WLAN_EID_EXTSUPP_RATES) &&
(pDevice->byBBType != BB_TYPE_11B)) {
- UINT uExtRateLen = pItemExtRates->len;
+ unsigned int uExtRateLen = pItemExtRates->len;
if (uExtRateLen > WLAN_RATES_MAXLEN)
uExtRateLen = WLAN_RATES_MAXLEN;
@@ -259,7 +251,7 @@ UINT uRateLen;
// select highest basic rate
if (WLAN_MGMT_IS_BASICRATE(pItemExtRates->abyRates[ii])) {
// Add to basic rate set, update pDevice->byTopCCKBasicRate and pDevice->byTopOFDMBasicRate
- CARDbAddBasicRate((PVOID)pDevice, RATEwGetRateIdx(byRate));
+ CARDbAddBasicRate((void *)pDevice, RATEwGetRateIdx(byRate));
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ParseMaxRate AddBasicRate: %d\n", RATEwGetRateIdx(byRate));
}
byRate = (BYTE)(pItemExtRates->abyRates[ii]&0x7F);
@@ -272,7 +264,8 @@ UINT uRateLen;
}
} //if(pItemExtRates != NULL)
- if ((pDevice->byPacketType == PK_TYPE_11GB) && CARDbIsOFDMinBasicRate((PVOID)pDevice)) {
+ if ((pDevice->byPacketType == PK_TYPE_11GB)
+ && CARDbIsOFDMinBasicRate((void *)pDevice)) {
pDevice->byPacketType = PK_TYPE_11GA;
}
@@ -284,7 +277,7 @@ UINT uRateLen;
else
*pwMaxBasicRate = pDevice->byTopOFDMBasicRate;
if (wOldBasicRate != pDevice->wBasicRate)
- CARDvSetRSPINF((PVOID)pDevice, pDevice->byBBType);
+ CARDvSetRSPINF((void *)pDevice, pDevice->byBBType);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Exit ParseMaxRate\n");
}
@@ -308,17 +301,17 @@ UINT uRateLen;
#define AUTORATE_TXCNT_THRESHOLD 20
#define AUTORATE_INC_THRESHOLD 30
-VOID
-RATEvTxRateFallBack (
- IN PVOID pDeviceHandler,
- IN PKnownNodeDB psNodeDBTable
+void
+RATEvTxRateFallBack(
+ void *pDeviceHandler,
+ PKnownNodeDB psNodeDBTable
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
#if 1 //mike fixed old: use packet lose ratio algorithm to control rate
WORD wIdxDownRate = 0;
-UINT ii;
+unsigned int ii;
BOOL bAutoRate[MAX_RATE] = {TRUE,TRUE,TRUE,TRUE,FALSE,FALSE,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE};
DWORD dwThroughputTbl[MAX_RATE] = {10, 20, 55, 110, 60, 90, 120, 180, 240, 360, 480, 540};
DWORD dwThroughput = 0;
@@ -399,7 +392,7 @@ DWORD dwTxDiff = 0;
#else //mike fixed new: use differ-signal strength to control rate
WORD wIdxUpRate = 0;
BOOL bAutoRate[MAX_RATE] = {TRUE,TRUE,TRUE,TRUE,FALSE,FALSE,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE};
-UINT ii;
+unsigned int ii;
long ldBm;
if (pMgmt->eScanState != WMAC_NO_SCANNING) {
@@ -473,12 +466,12 @@ if (wIdxUpRate == RATE_54M ) { //11a/g
-*/
BYTE
RATEuSetIE (
- IN PWLAN_IE_SUPP_RATES pSrcRates,
- IN PWLAN_IE_SUPP_RATES pDstRates,
- IN UINT uRateLen
+ PWLAN_IE_SUPP_RATES pSrcRates,
+ PWLAN_IE_SUPP_RATES pDstRates,
+ unsigned int uRateLen
)
{
- UINT ii, uu, uRateCnt = 0;
+ unsigned int ii, uu, uRateCnt = 0;
if ((pSrcRates == NULL) || (pDstRates == NULL))
return 0;
diff --git a/drivers/staging/vt6656/datarate.h b/drivers/staging/vt6656/datarate.h
index 68f206e2707..c6f5163ff9b 100644
--- a/drivers/staging/vt6656/datarate.h
+++ b/drivers/staging/vt6656/datarate.h
@@ -69,42 +69,41 @@
-VOID
+void
RATEvParseMaxRate(
- IN PVOID pDeviceHandler,
- IN PWLAN_IE_SUPP_RATES pItemRates,
- IN PWLAN_IE_SUPP_RATES pItemExtRates,
- IN BOOL bUpdateBasicRate,
- OUT PWORD pwMaxBasicRate,
- OUT PWORD pwMaxSuppRate,
- OUT PWORD pwSuppRate,
- OUT PBYTE pbyTopCCKRate,
- OUT PBYTE pbyTopOFDMRate
+ void *pDeviceHandler,
+ PWLAN_IE_SUPP_RATES pItemRates,
+ PWLAN_IE_SUPP_RATES pItemExtRates,
+ BOOL bUpdateBasicRate,
+ PWORD pwMaxBasicRate,
+ PWORD pwMaxSuppRate,
+ PWORD pwSuppRate,
+ PBYTE pbyTopCCKRate,
+ PBYTE pbyTopOFDMRate
);
-VOID
+void
RATEvTxRateFallBack(
- IN PVOID pDeviceHandler,
- IN PKnownNodeDB psNodeDBTable
+ void *pDeviceHandler,
+ PKnownNodeDB psNodeDBTable
);
BYTE
RATEuSetIE(
- IN PWLAN_IE_SUPP_RATES pSrcRates,
- IN PWLAN_IE_SUPP_RATES pDstRates,
- IN UINT uRateLen
+ PWLAN_IE_SUPP_RATES pSrcRates,
+ PWLAN_IE_SUPP_RATES pDstRates,
+ unsigned int uRateLen
);
WORD
RATEwGetRateIdx(
- IN BYTE byRate
+ BYTE byRate
);
BYTE
DATARATEbyGetRateIdx(
- IN BYTE byRate
+ BYTE byRate
);
-
-#endif //__DATARATE_H__
+#endif /* __DATARATE_H__ */
diff --git a/drivers/staging/vt6656/desc.h b/drivers/staging/vt6656/desc.h
index f10530fcc99..07f794ec6db 100644
--- a/drivers/staging/vt6656/desc.h
+++ b/drivers/staging/vt6656/desc.h
@@ -206,8 +206,8 @@ typedef const SRrvTime_atim *PCSRrvTime_atim;
typedef struct tagSRTSData {
WORD wFrameControl;
WORD wDurationID;
- BYTE abyRA[U_ETHER_ADDR_LEN];
- BYTE abyTA[U_ETHER_ADDR_LEN];
+ BYTE abyRA[ETH_ALEN];
+ BYTE abyTA[ETH_ALEN];
}__attribute__ ((__packed__))
SRTSData, *PSRTSData;
typedef const SRTSData *PCSRTSData;
@@ -282,7 +282,7 @@ typedef const SRTS_a_FB *PCSRTS_a_FB;
typedef struct tagSCTSData {
WORD wFrameControl;
WORD wDurationID;
- BYTE abyRA[U_ETHER_ADDR_LEN];
+ BYTE abyRA[ETH_ALEN];
WORD wReserved;
}__attribute__ ((__packed__))
SCTSData, *PSCTSData;
@@ -436,8 +436,4 @@ SKeyEntry;
/*--------------------- Export Functions --------------------------*/
-
-
-
-#endif // __DESC_H__
-
+#endif /* __DESC_H__ */
diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h
index 8b541d1d0e2..ef9fd97d3ca 100644
--- a/drivers/staging/vt6656/device.h
+++ b/drivers/staging/vt6656/device.h
@@ -107,7 +107,7 @@
#define MAC_MAX_CONTEXT_REG (256+128)
#define MAX_MULTICAST_ADDRESS_NUM 32
-#define MULTICAST_ADDRESS_LIST_SIZE (MAX_MULTICAST_ADDRESS_NUM * U_ETHER_ADDR_LEN)
+#define MULTICAST_ADDRESS_LIST_SIZE (MAX_MULTICAST_ADDRESS_NUM * ETH_ALEN)
//#define OP_MODE_INFRASTRUCTURE 0
@@ -209,9 +209,9 @@ typedef enum _CONTEXT_TYPE {
// RCB (Receive Control Block)
typedef struct _RCB
{
- PVOID Next;
- LONG Ref;
- PVOID pDevice;
+ void *Next;
+ signed long Ref;
+ void *pDevice;
struct urb *pUrb;
SRxMgmtPacket sMngPacket;
struct sk_buff* skb;
@@ -222,34 +222,33 @@ typedef struct _RCB
// used to track bulk out irps
typedef struct _USB_SEND_CONTEXT {
- PVOID pDevice;
+ void *pDevice;
struct sk_buff *pPacket;
struct urb *pUrb;
- UINT uBufLen;
+ unsigned int uBufLen;
CONTEXT_TYPE Type;
SEthernetHeader sEthHeader;
- PVOID Next;
+ void *Next;
BOOL bBoolInUse;
- UCHAR Data[MAX_TOTAL_SIZE_WITH_ALL_HEADERS];
+ unsigned char Data[MAX_TOTAL_SIZE_WITH_ALL_HEADERS];
} USB_SEND_CONTEXT, *PUSB_SEND_CONTEXT;
-//structure got from configuration file as user desired default setting.
-typedef struct _DEFAULT_CONFIG{
- INT ZoneType;
- INT eConfigMode;
- INT eAuthenMode; //open/wep/wpa
- INT bShareKeyAlgorithm; //open-open/open-sharekey/wep-sharekey
- INT keyidx; //wepkey index
- INT eEncryptionStatus;
-
-}DEFAULT_CONFIG,*PDEFAULT_CONFIG;
+/* structure got from configuration file as user-desired default settings */
+typedef struct _DEFAULT_CONFIG {
+ signed int ZoneType;
+ signed int eConfigMode;
+ signed int eAuthenMode; /* open/wep/wpa */
+ signed int bShareKeyAlgorithm; /* open-open/{open,wep}-sharekey */
+ signed int keyidx; /* wepkey index */
+ signed int eEncryptionStatus;
+} DEFAULT_CONFIG, *PDEFAULT_CONFIG;
//
// Structure to keep track of usb interrupt packets
//
typedef struct {
- UINT uDataLen;
+ unsigned int uDataLen;
PBYTE pDataBuf;
// struct urb *pUrb;
BOOL bInUse;
@@ -296,7 +295,7 @@ typedef enum __DEVICE_NDIS_STATUS {
#define NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED 0x01
// PMKID Structures
-typedef UCHAR NDIS_802_11_PMKID_VALUE[16];
+typedef unsigned char NDIS_802_11_PMKID_VALUE[16];
typedef enum _NDIS_802_11_WEP_STATUS
@@ -328,7 +327,7 @@ typedef enum _NDIS_802_11_STATUS_TYPE
//Added new types for PMKID Candidate lists.
typedef struct _PMKID_CANDIDATE {
NDIS_802_11_MAC_ADDRESS BSSID;
- ULONG Flags;
+ unsigned long Flags;
} PMKID_CANDIDATE, *PPMKID_CANDIDATE;
@@ -339,15 +338,15 @@ typedef struct _BSSID_INFO
} BSSID_INFO, *PBSSID_INFO;
typedef struct tagSPMKID {
- ULONG Length;
- ULONG BSSIDInfoCount;
+ unsigned long Length;
+ unsigned long BSSIDInfoCount;
BSSID_INFO BSSIDInfo[MAX_BSSIDINFO_4_PMKID];
} SPMKID, *PSPMKID;
typedef struct tagSPMKIDCandidateEvent {
NDIS_802_11_STATUS_TYPE StatusType;
- ULONG Version; // Version of the structure
- ULONG NumCandidates; // No. of pmkid candidates
+ unsigned long Version; /* Version of the structure */
+ unsigned long NumCandidates; /* No. of pmkid candidates */
PMKID_CANDIDATE CandidateList[MAX_PMKIDLIST];
} SPMKIDCandidateEvent, *PSPMKIDCandidateEvent;
@@ -369,7 +368,7 @@ typedef struct tagSQuietControl {
// The receive duplicate detection cache entry
typedef struct tagSCacheEntry{
WORD wFmSequence;
- BYTE abyAddr2[U_ETHER_ADDR_LEN];
+ BYTE abyAddr2[ETH_ALEN];
WORD wFrameCtl;
} SCacheEntry, *PSCacheEntry;
@@ -377,7 +376,7 @@ typedef struct tagSCache{
/* The receive cache is updated circularly. The next entry to be written is
* indexed by the "InPtr".
*/
- UINT uInPtr; // Place to use next
+ unsigned int uInPtr; /* Place to use next */
SCacheEntry asCacheEntry[DUPLICATE_RX_CACHE_LENGTH];
} SCache, *PSCache;
@@ -387,11 +386,11 @@ typedef struct tagSDeFragControlBlock
{
WORD wSequence;
WORD wFragNum;
- BYTE abyAddr2[U_ETHER_ADDR_LEN];
- UINT uLifetime;
+ BYTE abyAddr2[ETH_ALEN];
+ unsigned int uLifetime;
struct sk_buff* skb;
PBYTE pbyRxBuffer;
- UINT cbFrameLength;
+ unsigned int cbFrameLength;
BOOL bInUse;
} SDeFragControlBlock, *PSDeFragControlBlock;
@@ -435,7 +434,7 @@ typedef struct __device_opt {
int short_retry;
int long_retry;
int bbp_type;
- U32 flags;
+ u32 flags;
} OPTIONS, *POPTIONS;
@@ -453,25 +452,25 @@ typedef struct __device_info {
struct tasklet_struct ReadWorkItem;
struct tasklet_struct RxMngWorkItem;
- U32 rx_buf_sz;
+ u32 rx_buf_sz;
int multicast_limit;
BYTE byRxMode;
spinlock_t lock;
- U32 rx_bytes;
+ u32 rx_bytes;
BYTE byRevId;
- U32 flags;
- ULONG Flags;
+ u32 flags;
+ unsigned long Flags;
SCache sDupRxCache;
SDeFragControlBlock sRxDFCB[CB_MAX_RX_FRAG];
- UINT cbDFCB;
- UINT cbFreeDFCB;
- UINT uCurrentDFCBIdx;
+ unsigned int cbDFCB;
+ unsigned int cbFreeDFCB;
+ unsigned int uCurrentDFCBIdx;
// +++USB
@@ -479,29 +478,29 @@ typedef struct __device_info {
struct urb *pInterruptURB;
struct usb_ctrlrequest sUsbCtlRequest;
- UINT int_interval;
+ unsigned int int_interval;
//
// Variables to track resources for the BULK In Pipe
//
PRCB pRCBMem;
PRCB apRCB[CB_MAX_RX_DESC];
- UINT cbRD;
+ unsigned int cbRD;
PRCB FirstRecvFreeList;
PRCB LastRecvFreeList;
- UINT NumRecvFreeList;
+ unsigned int NumRecvFreeList;
PRCB FirstRecvMngList;
PRCB LastRecvMngList;
- UINT NumRecvMngList;
+ unsigned int NumRecvMngList;
BOOL bIsRxWorkItemQueued;
BOOL bIsRxMngWorkItemQueued;
- ULONG ulRcvRefCount; // number of packets that have not been returned back
+ unsigned long ulRcvRefCount; /* packets that have not returned back */
//
// Variables to track resources for the BULK Out Pipe
//
PUSB_SEND_CONTEXT apTD[CB_MAX_TX_DESC];
- UINT cbTD;
+ unsigned int cbTD;
//
// Variables to track resources for the Interript In Pipe
@@ -518,20 +517,20 @@ typedef struct __device_info {
//
// Statistic for USB
// protect with spinlock
- ULONG ulBulkInPosted;
- ULONG ulBulkInError;
- ULONG ulBulkInContCRCError;
- ULONG ulBulkInBytesRead;
+ unsigned long ulBulkInPosted;
+ unsigned long ulBulkInError;
+ unsigned long ulBulkInContCRCError;
+ unsigned long ulBulkInBytesRead;
- ULONG ulBulkOutPosted;
- ULONG ulBulkOutError;
- ULONG ulBulkOutContCRCError;
- ULONG ulBulkOutBytesWrite;
+ unsigned long ulBulkOutPosted;
+ unsigned long ulBulkOutError;
+ unsigned long ulBulkOutContCRCError;
+ unsigned long ulBulkOutBytesWrite;
- ULONG ulIntInPosted;
- ULONG ulIntInError;
- ULONG ulIntInContCRCError;
- ULONG ulIntInBytesRead;
+ unsigned long ulIntInPosted;
+ unsigned long ulIntInError;
+ unsigned long ulIntInContCRCError;
+ unsigned long ulIntInBytesRead;
// Version control
@@ -547,10 +546,10 @@ typedef struct __device_info {
BYTE byOriginalZonetype;
BOOL bLinkPass; // link status: OK or fail
- BYTE abyCurrentNetAddr[U_ETHER_ADDR_LEN];
- BYTE abyPermanentNetAddr[U_ETHER_ADDR_LEN];
+ BYTE abyCurrentNetAddr[ETH_ALEN];
+ BYTE abyPermanentNetAddr[ETH_ALEN];
// SW network address
-// BYTE abySoftwareNetAddr[U_ETHER_ADDR_LEN];
+ /* u8 abySoftwareNetAddr[ETH_ALEN]; */
BOOL bExistSWNetAddr;
// Adapter statistics
@@ -561,24 +560,24 @@ typedef struct __device_info {
//
// Maintain statistical debug info.
//
- ULONG packetsReceived;
- ULONG packetsReceivedDropped;
- ULONG packetsReceivedOverflow;
- ULONG packetsSent;
- ULONG packetsSentDropped;
- ULONG SendContextsInUse;
- ULONG RcvBuffersInUse;
+ unsigned long packetsReceived;
+ unsigned long packetsReceivedDropped;
+ unsigned long packetsReceivedOverflow;
+ unsigned long packetsSent;
+ unsigned long packetsSentDropped;
+ unsigned long SendContextsInUse;
+ unsigned long RcvBuffersInUse;
// 802.11 management
SMgmtObject sMgmtObj;
QWORD qwCurrTSF;
- UINT cbBulkInMax;
+ unsigned int cbBulkInMax;
BOOL bPSRxBeacon;
// 802.11 MAC specific
- UINT uCurrRSSI;
+ unsigned int uCurrRSSI;
BYTE byCurrSQ;
@@ -599,30 +598,31 @@ typedef struct __device_info {
BOOL bDiversityRegCtlON;
BOOL bDiversityEnable;
- ULONG ulDiversityNValue;
- ULONG ulDiversityMValue;
+ unsigned long ulDiversityNValue;
+ unsigned long ulDiversityMValue;
BYTE byTMax;
BYTE byTMax2;
BYTE byTMax3;
- ULONG ulSQ3TH;
+ unsigned long ulSQ3TH;
- ULONG uDiversityCnt;
+ unsigned long uDiversityCnt;
BYTE byAntennaState;
- ULONG ulRatio_State0;
- ULONG ulRatio_State1;
- ULONG ulSQ3_State0;
- ULONG ulSQ3_State1;
-
- ULONG aulSQ3Val[MAX_RATE];
- ULONG aulPktNum[MAX_RATE];
-
- // IFS & Cw
- UINT uSIFS; //Current SIFS
- UINT uDIFS; //Current DIFS
- UINT uEIFS; //Current EIFS
- UINT uSlot; //Current SlotTime
- UINT uCwMin; //Current CwMin
- UINT uCwMax; //CwMax is fixed on 1023.
+ unsigned long ulRatio_State0;
+ unsigned long ulRatio_State1;
+ unsigned long ulSQ3_State0;
+ unsigned long ulSQ3_State1;
+
+ unsigned long aulSQ3Val[MAX_RATE];
+ unsigned long aulPktNum[MAX_RATE];
+
+ /* IFS & Cw */
+ unsigned int uSIFS; /* Current SIFS */
+ unsigned int uDIFS; /* Current DIFS */
+ unsigned int uEIFS; /* Current EIFS */
+ unsigned int uSlot; /* Current SlotTime */
+ unsigned int uCwMin; /* Current CwMin */
+ unsigned int uCwMax; /* CwMax is fixed on 1023 */
+
// PHY parameter
BYTE bySIFS;
BYTE byDIFS;
@@ -647,7 +647,7 @@ typedef struct __device_info {
BYTE byMinChannel;
BYTE byMaxChannel;
- UINT uConnectionRate;
+ unsigned int uConnectionRate;
BYTE byPreambleType;
BYTE byShortPreamble;
@@ -671,8 +671,8 @@ typedef struct __device_info {
CARD_OP_MODE eOPMode;
BOOL bBSSIDFilter;
WORD wMaxTransmitMSDULifetime;
- BYTE abyBSSID[U_ETHER_ADDR_LEN];
- BYTE abyDesireBSSID[U_ETHER_ADDR_LEN];
+ BYTE abyBSSID[ETH_ALEN];
+ BYTE abyDesireBSSID[ETH_ALEN];
WORD wCTSDuration; // update while speed change
WORD wACKDuration; // update while speed change
WORD wRTSTransmitLen; // update while speed change
@@ -701,7 +701,7 @@ typedef struct __device_info {
WORD wListenInterval;
BOOL bPWBitOn;
WMAC_POWER_MODE ePSMode;
- ULONG ulPSModeWaitTx;
+ unsigned long ulPSModeWaitTx;
BOOL bPSModeTxBurst;
// Beacon releated
@@ -710,7 +710,7 @@ typedef struct __device_info {
BOOL bBeaconSent;
BOOL bFixRate;
BYTE byCurrentCh;
- UINT uScanTime;
+ unsigned int uScanTime;
CMD_STATE eCommandState;
@@ -721,15 +721,15 @@ typedef struct __device_info {
BOOL bStopBeacon;
BOOL bStopDataPkt;
BOOL bStopTx0Pkt;
- UINT uAutoReConnectTime;
- UINT uIsroamingTime;
+ unsigned int uAutoReConnectTime;
+ unsigned int uIsroamingTime;
// 802.11 counter
CMD_ITEM eCmdQueue[CMD_Q_SIZE];
- UINT uCmdDequeueIdx;
- UINT uCmdEnqueueIdx;
- UINT cbFreeCmdQueue;
+ unsigned int uCmdDequeueIdx;
+ unsigned int uCmdEnqueueIdx;
+ unsigned int cbFreeCmdQueue;
BOOL bCmdRunning;
BOOL bCmdClear;
BOOL bNeedRadioOFF;
@@ -741,7 +741,7 @@ typedef struct __device_info {
BYTE bSameBSSCurNum; //DavidWang
BOOL bRoaming;
BOOL b11hEable;
- ULONG ulTxPower;
+ unsigned long ulTxPower;
// Encryption
NDIS_802_11_WEP_STATUS eEncryptionStatus;
@@ -762,11 +762,11 @@ typedef struct __device_info {
BOOL bAES;
BYTE byCntMeasure;
- UINT uKeyLength;
+ unsigned int uKeyLength;
BYTE abyKey[WLAN_WEP232_KEYLEN];
// for AP mode
- UINT uAssocCount;
+ unsigned int uAssocCount;
BOOL bMoreData;
// QoS
@@ -781,11 +781,11 @@ typedef struct __device_info {
// For Update BaseBand VGA Gain Offset
BOOL bUpdateBBVGA;
- UINT uBBVGADiffCount;
+ unsigned int uBBVGADiffCount;
BYTE byBBVGANew;
BYTE byBBVGACurrent;
BYTE abyBBVGA[BB_VGA_LEVEL];
- LONG ldBmThreshold[BB_VGA_LEVEL];
+ signed long ldBmThreshold[BB_VGA_LEVEL];
BYTE byBBPreEDRSSI;
BYTE byBBPreEDIndex;
@@ -813,7 +813,7 @@ typedef struct __device_info {
//2007-0115-01<Add>by MikeLiu
#ifdef TxInSleep
struct timer_list sTimerTxData;
- ULONG nTxDataTimeCout;
+ unsigned long nTxDataTimeCout;
BOOL fTxDataInSleep;
BOOL IsTxDataTrigger;
#endif
@@ -826,9 +826,9 @@ typedef struct __device_info {
SEthernetHeader sTxEthHeader;
SEthernetHeader sRxEthHeader;
- BYTE abyBroadcastAddr[U_ETHER_ADDR_LEN];
- BYTE abySNAP_RFC1042[U_ETHER_ADDR_LEN];
- BYTE abySNAP_Bridgetunnel[U_ETHER_ADDR_LEN];
+ BYTE abyBroadcastAddr[ETH_ALEN];
+ BYTE abySNAP_RFC1042[ETH_ALEN];
+ BYTE abySNAP_Bridgetunnel[ETH_ALEN];
// Pre-Authentication & PMK cache
SPMKID gsPMKID;
@@ -864,7 +864,7 @@ typedef struct __device_info {
struct net_device *apdev;
int (*tx_80211)(struct sk_buff *skb, struct net_device *dev);
#endif
- UINT uChannel;
+ unsigned int uChannel;
struct iw_statistics wstats; // wireless stats
BOOL bCommit;
@@ -929,7 +929,9 @@ typedef struct __device_info {
/*--------------------- Export Functions --------------------------*/
-//BOOL device_dma0_xmit(PSDevice pDevice, struct sk_buff *skb, UINT uNodeIndex);
+/* BOOL device_dma0_xmit(PSDevice pDevice, struct sk_buff *skb,
+ * unsigned int uNodeIndex);
+ */
BOOL device_alloc_frag_buf(PSDevice pDevice, PSDeFragControlBlock pDeF);
#endif
diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
index 835c6d6967b..9afe76cacef 100644
--- a/drivers/staging/vt6656/dpc.c
+++ b/drivers/staging/vt6656/dpc.c
@@ -74,70 +74,69 @@ const BYTE acbyRxRate[MAX_RATE] =
/*--------------------- Static Functions --------------------------*/
-static BYTE s_byGetRateIdx(IN BYTE byRate);
-
+static BYTE s_byGetRateIdx(BYTE byRate);
static
-VOID
+void
s_vGetDASA(
- IN PBYTE pbyRxBufferAddr,
- OUT PUINT pcbHeaderSize,
- OUT PSEthernetHeader psEthHeader
+ PBYTE pbyRxBufferAddr,
+ PUINT pcbHeaderSize,
+ PSEthernetHeader psEthHeader
);
static
-VOID
+void
s_vProcessRxMACHeader (
- IN PSDevice pDevice,
- IN PBYTE pbyRxBufferAddr,
- IN UINT cbPacketSize,
- IN BOOL bIsWEP,
- IN BOOL bExtIV,
- OUT PUINT pcbHeadSize
+ PSDevice pDevice,
+ PBYTE pbyRxBufferAddr,
+ unsigned int cbPacketSize,
+ BOOL bIsWEP,
+ BOOL bExtIV,
+ PUINT pcbHeadSize
);
static BOOL s_bAPModeRxCtl(
- IN PSDevice pDevice,
- IN PBYTE pbyFrame,
- IN INT iSANodeIndex
+ PSDevice pDevice,
+ PBYTE pbyFrame,
+ signed int iSANodeIndex
);
static BOOL s_bAPModeRxData (
- IN PSDevice pDevice,
- IN struct sk_buff* skb,
- IN UINT FrameSize,
- IN UINT cbHeaderOffset,
- IN INT iSANodeIndex,
- IN INT iDANodeIndex
+ PSDevice pDevice,
+ struct sk_buff *skb,
+ unsigned int FrameSize,
+ unsigned int cbHeaderOffset,
+ signed int iSANodeIndex,
+ signed int iDANodeIndex
);
static BOOL s_bHandleRxEncryption(
- IN PSDevice pDevice,
- IN PBYTE pbyFrame,
- IN UINT FrameSize,
- IN PBYTE pbyRsr,
- OUT PBYTE pbyNewRsr,
- OUT PSKeyItem *pKeyOut,
+ PSDevice pDevice,
+ PBYTE pbyFrame,
+ unsigned int FrameSize,
+ PBYTE pbyRsr,
+ PBYTE pbyNewRsr,
+ PSKeyItem * pKeyOut,
int * pbExtIV,
- OUT PWORD pwRxTSC15_0,
- OUT PDWORD pdwRxTSC47_16
+ PWORD pwRxTSC15_0,
+ PDWORD pdwRxTSC47_16
);
static BOOL s_bHostWepRxEncryption(
- IN PSDevice pDevice,
- IN PBYTE pbyFrame,
- IN UINT FrameSize,
- IN PBYTE pbyRsr,
- IN BOOL bOnFly,
- IN PSKeyItem pKey,
- OUT PBYTE pbyNewRsr,
+ PSDevice pDevice,
+ PBYTE pbyFrame,
+ unsigned int FrameSize,
+ PBYTE pbyRsr,
+ BOOL bOnFly,
+ PSKeyItem pKey,
+ PBYTE pbyNewRsr,
int * pbExtIV,
- OUT PWORD pwRxTSC15_0,
- OUT PDWORD pdwRxTSC47_16
+ PWORD pwRxTSC15_0,
+ PDWORD pdwRxTSC47_16
);
@@ -161,18 +160,18 @@ static BOOL s_bHostWepRxEncryption(
*
-*/
static
-VOID
+void
s_vProcessRxMACHeader (
- IN PSDevice pDevice,
- IN PBYTE pbyRxBufferAddr,
- IN UINT cbPacketSize,
- IN BOOL bIsWEP,
- IN BOOL bExtIV,
- OUT PUINT pcbHeadSize
+ PSDevice pDevice,
+ PBYTE pbyRxBufferAddr,
+ unsigned int cbPacketSize,
+ BOOL bIsWEP,
+ BOOL bExtIV,
+ PUINT pcbHeadSize
)
{
PBYTE pbyRxBuffer;
- UINT cbHeaderSize = 0;
+ unsigned int cbHeaderSize = 0;
PWORD pwType;
PS802_11Header pMACHeader;
int ii;
@@ -234,11 +233,11 @@ s_vProcessRxMACHeader (
}
}
- cbHeaderSize -= (U_ETHER_ADDR_LEN * 2);
+ cbHeaderSize -= (ETH_ALEN * 2);
pbyRxBuffer = (PBYTE) (pbyRxBufferAddr + cbHeaderSize);
- for(ii=0;ii<U_ETHER_ADDR_LEN;ii++)
+ for (ii = 0; ii < ETH_ALEN; ii++)
*pbyRxBuffer++ = pDevice->sRxEthHeader.abyDstAddr[ii];
- for(ii=0;ii<U_ETHER_ADDR_LEN;ii++)
+ for (ii = 0; ii < ETH_ALEN; ii++)
*pbyRxBuffer++ = pDevice->sRxEthHeader.abySrcAddr[ii];
*pcbHeadSize = cbHeaderSize;
@@ -247,7 +246,7 @@ s_vProcessRxMACHeader (
-static BYTE s_byGetRateIdx (IN BYTE byRate)
+static BYTE s_byGetRateIdx(BYTE byRate)
{
BYTE byRateIdx;
@@ -260,50 +259,55 @@ static BYTE s_byGetRateIdx (IN BYTE byRate)
static
-VOID
+void
s_vGetDASA (
- IN PBYTE pbyRxBufferAddr,
- OUT PUINT pcbHeaderSize,
- OUT PSEthernetHeader psEthHeader
+ PBYTE pbyRxBufferAddr,
+ PUINT pcbHeaderSize,
+ PSEthernetHeader psEthHeader
)
{
- UINT cbHeaderSize = 0;
- PS802_11Header pMACHeader;
- int ii;
-
- pMACHeader = (PS802_11Header) (pbyRxBufferAddr + cbHeaderSize);
-
- if ((pMACHeader->wFrameCtl & FC_TODS) == 0) {
- if (pMACHeader->wFrameCtl & FC_FROMDS) {
- for(ii=0;ii<U_ETHER_ADDR_LEN;ii++) {
- psEthHeader->abyDstAddr[ii] = pMACHeader->abyAddr1[ii];
- psEthHeader->abySrcAddr[ii] = pMACHeader->abyAddr3[ii];
- }
- }
- else {
- // IBSS mode
- for(ii=0;ii<U_ETHER_ADDR_LEN;ii++) {
- psEthHeader->abyDstAddr[ii] = pMACHeader->abyAddr1[ii];
- psEthHeader->abySrcAddr[ii] = pMACHeader->abyAddr2[ii];
- }
- }
- }
- else {
- // Is AP mode..
- if (pMACHeader->wFrameCtl & FC_FROMDS) {
- for(ii=0;ii<U_ETHER_ADDR_LEN;ii++) {
- psEthHeader->abyDstAddr[ii] = pMACHeader->abyAddr3[ii];
- psEthHeader->abySrcAddr[ii] = pMACHeader->abyAddr4[ii];
- cbHeaderSize += 6;
- }
- }
- else {
- for(ii=0;ii<U_ETHER_ADDR_LEN;ii++) {
- psEthHeader->abyDstAddr[ii] = pMACHeader->abyAddr3[ii];
- psEthHeader->abySrcAddr[ii] = pMACHeader->abyAddr2[ii];
- }
- }
- };
+ unsigned int cbHeaderSize = 0;
+ PS802_11Header pMACHeader;
+ int ii;
+
+ pMACHeader = (PS802_11Header) (pbyRxBufferAddr + cbHeaderSize);
+
+ if ((pMACHeader->wFrameCtl & FC_TODS) == 0) {
+ if (pMACHeader->wFrameCtl & FC_FROMDS) {
+ for (ii = 0; ii < ETH_ALEN; ii++) {
+ psEthHeader->abyDstAddr[ii] =
+ pMACHeader->abyAddr1[ii];
+ psEthHeader->abySrcAddr[ii] =
+ pMACHeader->abyAddr3[ii];
+ }
+ } else {
+ /* IBSS mode */
+ for (ii = 0; ii < ETH_ALEN; ii++) {
+ psEthHeader->abyDstAddr[ii] =
+ pMACHeader->abyAddr1[ii];
+ psEthHeader->abySrcAddr[ii] =
+ pMACHeader->abyAddr2[ii];
+ }
+ }
+ } else {
+ /* Is AP mode.. */
+ if (pMACHeader->wFrameCtl & FC_FROMDS) {
+ for (ii = 0; ii < ETH_ALEN; ii++) {
+ psEthHeader->abyDstAddr[ii] =
+ pMACHeader->abyAddr3[ii];
+ psEthHeader->abySrcAddr[ii] =
+ pMACHeader->abyAddr4[ii];
+ cbHeaderSize += 6;
+ }
+ } else {
+ for (ii = 0; ii < ETH_ALEN; ii++) {
+ psEthHeader->abyDstAddr[ii] =
+ pMACHeader->abyAddr3[ii];
+ psEthHeader->abySrcAddr[ii] =
+ pMACHeader->abyAddr2[ii];
+ }
+ }
+ };
*pcbHeaderSize = cbHeaderSize;
}
@@ -312,9 +316,9 @@ s_vGetDASA (
BOOL
RXbBulkInProcessData (
- IN PSDevice pDevice,
- IN PRCB pRCB,
- IN ULONG BytesToIndicate
+ PSDevice pDevice,
+ PRCB pRCB,
+ unsigned long BytesToIndicate
)
{
@@ -329,26 +333,26 @@ RXbBulkInProcessData (
PQWORD pqwTSFTime;
PBYTE pbyFrame;
BOOL bDeFragRx = FALSE;
- UINT cbHeaderOffset;
- UINT FrameSize;
+ unsigned int cbHeaderOffset;
+ unsigned int FrameSize;
WORD wEtherType = 0;
- INT iSANodeIndex = -1;
- INT iDANodeIndex = -1;
- UINT ii;
- UINT cbIVOffset;
+ signed int iSANodeIndex = -1;
+ signed int iDANodeIndex = -1;
+ unsigned int ii;
+ unsigned int cbIVOffset;
PBYTE pbyRxSts;
PBYTE pbyRxRate;
PBYTE pbySQ;
#ifdef Calcu_LinkQual
PBYTE pby3SQ;
#endif
- UINT cbHeaderSize;
+ unsigned int cbHeaderSize;
PSKeyItem pKey = NULL;
WORD wRxTSC15_0 = 0;
DWORD dwRxTSC47_16 = 0;
SKeyItem STempKey;
// 802.11h RPI
- //LONG ldBm = 0;
+ /* signed long ldBm = 0; */
BOOL bIsWEP = FALSE;
BOOL bExtIV = FALSE;
DWORD dwWbkStatus;
@@ -368,7 +372,7 @@ RXbBulkInProcessData (
//[31:16]RcvByteCount ( not include 4-byte Status )
dwWbkStatus = *( (PDWORD)(skb->data) );
- FrameSize = (UINT)(dwWbkStatus >> 16);
+ FrameSize = (unsigned int)(dwWbkStatus >> 16);
FrameSize += 4;
if (BytesToIndicate != FrameSize) {
@@ -930,9 +934,9 @@ RXbBulkInProcessData (
if (bIsWEP) {
WORD wLocalTSC15_0 = 0;
DWORD dwLocalTSC47_16 = 0;
- ULONGLONG RSC = 0;
+ unsigned long long RSC = 0;
// endian issues
- RSC = *((ULONGLONG *) &(pKey->KeyRSC));
+ RSC = *((unsigned long long *) &(pKey->KeyRSC));
wLocalTSC15_0 = (WORD) RSC;
dwLocalTSC47_16 = (DWORD) (RSC>>16);
@@ -1017,9 +1021,9 @@ RXbBulkInProcessData (
static BOOL s_bAPModeRxCtl (
- IN PSDevice pDevice,
- IN PBYTE pbyFrame,
- IN INT iSANodeIndex
+ PSDevice pDevice,
+ PBYTE pbyFrame,
+ signed int iSANodeIndex
)
{
PS802_11Header p802_11Header;
@@ -1065,7 +1069,9 @@ static BOOL s_bAPModeRxCtl (
// delcare received ps-poll event
if (IS_CTL_PSPOLL(pbyFrame)) {
pMgmt->sNodeDBTable[iSANodeIndex].bRxPSPoll = TRUE;
- bScheduleCommand((HANDLE)pDevice, WLAN_CMD_RX_PSPOLL, NULL);
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_RX_PSPOLL,
+ NULL);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dpc: WLAN_CMD_RX_PSPOLL 1\n");
}
else {
@@ -1074,7 +1080,9 @@ static BOOL s_bAPModeRxCtl (
if (!IS_FC_POWERMGT(pbyFrame)) {
pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable = FALSE;
pMgmt->sNodeDBTable[iSANodeIndex].bRxPSPoll = TRUE;
- bScheduleCommand((HANDLE)pDevice, WLAN_CMD_RX_PSPOLL, NULL);
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_RX_PSPOLL,
+ NULL);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dpc: WLAN_CMD_RX_PSPOLL 2\n");
}
}
@@ -1090,7 +1098,9 @@ static BOOL s_bAPModeRxCtl (
if (pMgmt->sNodeDBTable[iSANodeIndex].wEnQueueCnt > 0) {
pMgmt->sNodeDBTable[iSANodeIndex].bPSEnable = FALSE;
pMgmt->sNodeDBTable[iSANodeIndex].bRxPSPoll = TRUE;
- bScheduleCommand((HANDLE)pDevice, WLAN_CMD_RX_PSPOLL, NULL);
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_RX_PSPOLL,
+ NULL);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dpc: WLAN_CMD_RX_PSPOLL 3\n");
}
@@ -1139,18 +1149,18 @@ static BOOL s_bAPModeRxCtl (
}
static BOOL s_bHandleRxEncryption (
- IN PSDevice pDevice,
- IN PBYTE pbyFrame,
- IN UINT FrameSize,
- IN PBYTE pbyRsr,
- OUT PBYTE pbyNewRsr,
- OUT PSKeyItem *pKeyOut,
+ PSDevice pDevice,
+ PBYTE pbyFrame,
+ unsigned int FrameSize,
+ PBYTE pbyRsr,
+ PBYTE pbyNewRsr,
+ PSKeyItem * pKeyOut,
int * pbExtIV,
- OUT PWORD pwRxTSC15_0,
- OUT PDWORD pdwRxTSC47_16
+ PWORD pwRxTSC15_0,
+ PDWORD pdwRxTSC47_16
)
{
- UINT PayloadLen = FrameSize;
+ unsigned int PayloadLen = FrameSize;
PBYTE pbyIV;
BYTE byKeyIdx;
PSKeyItem pKey = NULL;
@@ -1285,20 +1295,20 @@ static BOOL s_bHandleRxEncryption (
static BOOL s_bHostWepRxEncryption (
- IN PSDevice pDevice,
- IN PBYTE pbyFrame,
- IN UINT FrameSize,
- IN PBYTE pbyRsr,
- IN BOOL bOnFly,
- IN PSKeyItem pKey,
- OUT PBYTE pbyNewRsr,
+ PSDevice pDevice,
+ PBYTE pbyFrame,
+ unsigned int FrameSize,
+ PBYTE pbyRsr,
+ BOOL bOnFly,
+ PSKeyItem pKey,
+ PBYTE pbyNewRsr,
int * pbExtIV,
- OUT PWORD pwRxTSC15_0,
- OUT PDWORD pdwRxTSC47_16
+ PWORD pwRxTSC15_0,
+ PDWORD pdwRxTSC47_16
)
{
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- UINT PayloadLen = FrameSize;
+ unsigned int PayloadLen = FrameSize;
PBYTE pbyIV;
BYTE byKeyIdx;
BYTE byDecMode = KEY_CTL_WEP;
@@ -1417,12 +1427,12 @@ static BOOL s_bHostWepRxEncryption (
static BOOL s_bAPModeRxData (
- IN PSDevice pDevice,
- IN struct sk_buff* skb,
- IN UINT FrameSize,
- IN UINT cbHeaderOffset,
- IN INT iSANodeIndex,
- IN INT iDANodeIndex
+ PSDevice pDevice,
+ struct sk_buff *skb,
+ unsigned int FrameSize,
+ unsigned int cbHeaderOffset,
+ signed int iSANodeIndex,
+ signed int iDANodeIndex
)
{
@@ -1493,7 +1503,8 @@ static BOOL s_bAPModeRxData (
iDANodeIndex = 0;
if ((pDevice->uAssocCount > 1) && (iDANodeIndex >= 0)) {
- bRelayPacketSend(pDevice, (PBYTE)(skb->data + cbHeaderOffset), FrameSize, (UINT)iDANodeIndex);
+ bRelayPacketSend(pDevice, (PBYTE) (skb->data + cbHeaderOffset),
+ FrameSize, (unsigned int) iDANodeIndex);
}
if (bRelayOnly)
@@ -1509,10 +1520,7 @@ static BOOL s_bAPModeRxData (
-VOID
-RXvWorkItem(
- PVOID Context
- )
+void RXvWorkItem(void *Context)
{
PSDevice pDevice = (PSDevice) Context;
NTSTATUS ntStatus;
@@ -1535,10 +1543,10 @@ RXvWorkItem(
}
-VOID
+void
RXvFreeRCB(
- IN PRCB pRCB,
- IN BOOL bReAllocSkb
+ PRCB pRCB,
+ BOOL bReAllocSkb
)
{
PSDevice pDevice = (PSDevice)pRCB->pDevice;
@@ -1575,10 +1583,7 @@ RXvFreeRCB(
}
-VOID
-RXvMngWorkItem(
- PVOID Context
- )
+void RXvMngWorkItem(void *Context)
{
PSDevice pDevice = (PSDevice) Context;
PRCB pRCB=NULL;
@@ -1598,7 +1603,7 @@ RXvMngWorkItem(
}
ASSERT(pRCB);// cannot be NULL
pRxPacket = &(pRCB->sMngPacket);
- vMgrRxManagePacket((HANDLE)pDevice, &(pDevice->sMgmtObj), pRxPacket);
+ vMgrRxManagePacket((void *) pDevice, &(pDevice->sMgmtObj), pRxPacket);
pRCB->Ref--;
if(pRCB->Ref == 0) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"RxvFreeMng %d %d\n",pDevice->NumRecvFreeList, pDevice->NumRecvMngList);
diff --git a/drivers/staging/vt6656/dpc.h b/drivers/staging/vt6656/dpc.h
index df148b45396..d4fca43af4f 100644
--- a/drivers/staging/vt6656/dpc.h
+++ b/drivers/staging/vt6656/dpc.h
@@ -41,30 +41,21 @@
/*--------------------- Export Functions --------------------------*/
-VOID
-RXvWorkItem(
- PVOID Context
- );
+void RXvWorkItem(void *Context);
-VOID
-RXvMngWorkItem(
- PVOID Context
- );
+void RXvMngWorkItem(void *Context);
-VOID
+void
RXvFreeRCB(
- IN PRCB pRCB,
- IN BOOL bReAllocSkb
+ PRCB pRCB,
+ BOOL bReAllocSkb
);
BOOL
RXbBulkInProcessData(
- IN PSDevice pDevice,
- IN PRCB pRCB,
- IN ULONG BytesToIndicate
+ PSDevice pDevice,
+ PRCB pRCB,
+ unsigned long BytesToIndicate
);
-#endif // __RXTX_H__
-
-
-
+#endif /* __RXTX_H__ */
diff --git a/drivers/staging/vt6656/firmware.c b/drivers/staging/vt6656/firmware.c
index 585b6b12c5b..e1f96d7086f 100644
--- a/drivers/staging/vt6656/firmware.c
+++ b/drivers/staging/vt6656/firmware.c
@@ -770,7 +770,7 @@ const BYTE abyFirmware[] = {
BOOL
FIRMWAREbDownload(
- IN PSDevice pDevice
+ PSDevice pDevice
)
{
NDIS_STATUS NdisStatus;
@@ -820,7 +820,7 @@ FIRMWAREbDownload(
BOOL
FIRMWAREbBrach2Sram(
- IN PSDevice pDevice
+ PSDevice pDevice
)
{
NDIS_STATUS NdisStatus;
@@ -845,7 +845,7 @@ FIRMWAREbBrach2Sram(
BOOL
FIRMWAREbCheckVersion(
- IN PSDevice pDevice
+ PSDevice pDevice
)
{
NTSTATUS ntStatus;
diff --git a/drivers/staging/vt6656/firmware.h b/drivers/staging/vt6656/firmware.h
index 97f8559f2a5..b2f5b5818a9 100644
--- a/drivers/staging/vt6656/firmware.h
+++ b/drivers/staging/vt6656/firmware.h
@@ -43,18 +43,17 @@
BOOL
FIRMWAREbDownload(
- IN PSDevice pDevice
+ PSDevice pDevice
);
BOOL
FIRMWAREbBrach2Sram(
- IN PSDevice pDevice
+ PSDevice pDevice
);
BOOL
FIRMWAREbCheckVersion(
- IN PSDevice pDevice
+ PSDevice pDevice
);
-
-#endif // __FIRMWARE_H__
+#endif /* __FIRMWARE_H__ */
diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
index 1078d616c49..f70e922a615 100644
--- a/drivers/staging/vt6656/hostap.c
+++ b/drivers/staging/vt6656/hostap.c
@@ -91,10 +91,9 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
- pDevice->apdev = (struct net_device *)kmalloc(sizeof(struct net_device), GFP_KERNEL);
+ pDevice->apdev = kzalloc(sizeof(struct net_device), GFP_KERNEL);
if (pDevice->apdev == NULL)
return -ENOMEM;
- memset(pDevice->apdev, 0, sizeof(struct net_device));
apdev_priv = netdev_priv(pDevice->apdev);
*apdev_priv = *pDevice;
@@ -178,7 +177,7 @@ static int hostap_disable_hostapd(PSDevice pDevice, int rtnl_locked)
*
*/
-int hostap_set_hostapd(PSDevice pDevice, int val, int rtnl_locked)
+int vt6656_hostap_set_hostapd(PSDevice pDevice, int val, int rtnl_locked)
{
if (val < 0 || val > 1)
return -EINVAL;
@@ -211,7 +210,7 @@ int hostap_set_hostapd(PSDevice pDevice, int val, int rtnl_locked)
static int hostap_remove_sta(PSDevice pDevice,
struct viawget_hostapd_param *param)
{
- UINT uNodeIndex;
+ unsigned int uNodeIndex;
if (BSSbIsSTAInNodeDB(pDevice, param->sta_addr, &uNodeIndex)) {
@@ -240,7 +239,7 @@ static int hostap_add_sta(PSDevice pDevice,
struct viawget_hostapd_param *param)
{
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- UINT uNodeIndex;
+ unsigned int uNodeIndex;
if (!BSSbIsSTAInNodeDB(pDevice, param->sta_addr, &uNodeIndex)) {
@@ -300,7 +299,7 @@ static int hostap_get_info_sta(PSDevice pDevice,
struct viawget_hostapd_param *param)
{
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- UINT uNodeIndex;
+ unsigned int uNodeIndex;
if (BSSbIsSTAInNodeDB(pDevice, param->sta_addr, &uNodeIndex)) {
param->u.get_info_sta.inactive_sec =
@@ -334,7 +333,7 @@ static int hostap_reset_txexc_sta(PSDevice pDevice,
struct viawget_hostapd_param *param)
{
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- UINT uNodeIndex;
+ unsigned int uNodeIndex;
if (BSSbIsSTAInNodeDB(pDevice, param->sta_addr, &uNodeIndex)) {
pMgmt->sNodeDBTable[uNodeIndex].uTxAttempts = 0;
@@ -364,13 +363,13 @@ static int hostap_set_flags_sta(PSDevice pDevice,
struct viawget_hostapd_param *param)
{
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- UINT uNodeIndex;
+ unsigned int uNodeIndex;
if (BSSbIsSTAInNodeDB(pDevice, param->sta_addr, &uNodeIndex)) {
pMgmt->sNodeDBTable[uNodeIndex].dwFlags |= param->u.set_flags_sta.flags_or;
pMgmt->sNodeDBTable[uNodeIndex].dwFlags &= param->u.set_flags_sta.flags_and;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " dwFlags = %x \n",
- (UINT)pMgmt->sNodeDBTable[uNodeIndex].dwFlags);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " dwFlags = %x\n",
+ (unsigned int) pMgmt->sNodeDBTable[uNodeIndex].dwFlags);
}
else {
return -ENOENT;
@@ -744,7 +743,7 @@ static int hostap_get_encryption(PSDevice pDevice,
/*
* Description:
- * hostap_ioctl main function supported for hostap deamon.
+ * vt6656_hostap_ioctl main function supported for hostap deamon.
*
* Parameters:
* In:
@@ -756,7 +755,7 @@ static int hostap_get_encryption(PSDevice pDevice,
*
*/
-int hostap_ioctl(PSDevice pDevice, struct iw_point *p)
+int vt6656_hostap_ioctl(PSDevice pDevice, struct iw_point *p)
{
struct viawget_hostapd_param *param;
int ret = 0;
@@ -766,7 +765,7 @@ int hostap_ioctl(PSDevice pDevice, struct iw_point *p)
p->length > VIAWGET_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
return -EINVAL;
- param = (struct viawget_hostapd_param *) kmalloc((int)p->length, (int)GFP_KERNEL);
+ param = kmalloc((int)p->length, (int)GFP_KERNEL);
if (param == NULL)
return -ENOMEM;
@@ -844,7 +843,7 @@ int hostap_ioctl(PSDevice pDevice, struct iw_point *p)
return -EOPNOTSUPP;
default:
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "hostap_ioctl: unknown cmd=%d\n",
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "vt6656_hostap_ioctl: unknown cmd=%d\n",
(int)param->cmd);
return -EOPNOTSUPP;
break;
diff --git a/drivers/staging/vt6656/hostap.h b/drivers/staging/vt6656/hostap.h
index 8fd667b542b..b660aee1ca0 100644
--- a/drivers/staging/vt6656/hostap.h
+++ b/drivers/staging/vt6656/hostap.h
@@ -61,10 +61,7 @@
#define ARPHRD_IEEE80211 801
#endif
-int hostap_set_hostapd(PSDevice pDevice, int val, int rtnl_locked);
-int hostap_ioctl(PSDevice pDevice, struct iw_point *p);
-
-#endif // __HOSTAP_H__
-
-
+int vt6656_hostap_set_hostapd(PSDevice pDevice, int val, int rtnl_locked);
+int vt6656_hostap_ioctl(PSDevice pDevice, struct iw_point *p);
+#endif /* __HOSTAP_H__ */
diff --git a/drivers/staging/vt6656/int.c b/drivers/staging/vt6656/int.c
index 35053be6900..89f5b18bdf1 100644
--- a/drivers/staging/vt6656/int.c
+++ b/drivers/staging/vt6656/int.c
@@ -41,8 +41,8 @@
#include "usbpipe.h"
/*--------------------- Static Definitions -------------------------*/
-//static int msglevel =MSG_LEVEL_DEBUG;
-static int msglevel =MSG_LEVEL_INFO;
+/* static int msglevel = MSG_LEVEL_DEBUG; */
+static int msglevel = MSG_LEVEL_INFO;
/*--------------------- Static Classes ----------------------------*/
@@ -74,120 +74,151 @@ static int msglevel =MSG_LEVEL_INFO;
*
* Notes:
*
- * USB reads are by nature 'Blocking', and when in a read, the device looks like it's
- * in a 'stall' condition, so we deliberately time out every second if we've gotten no data
+ * USB reads are by nature 'Blocking', and when in a read, the device looks
+ * like it's in a 'stall' condition, so we deliberately time out every second
+ * if we've gotten no data
*
-*/
-VOID
-INTvWorkItem(
- PVOID Context
- )
+void INTvWorkItem(void *Context)
{
- PSDevice pDevice = (PSDevice) Context;
- NTSTATUS ntStatus;
+ PSDevice pDevice = (PSDevice) Context;
+ NTSTATUS ntStatus;
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Interrupt Polling Thread\n");
-
- spin_lock_irq(&pDevice->lock);
- if (pDevice->fKillEventPollingThread != TRUE) {
- ntStatus = PIPEnsInterruptRead(pDevice);
- }
- spin_unlock_irq(&pDevice->lock);
-
- }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Interrupt Polling Thread\n");
+ spin_lock_irq(&pDevice->lock);
+ if (pDevice->fKillEventPollingThread != TRUE)
+ ntStatus = PIPEnsInterruptRead(pDevice);
+ spin_unlock_irq(&pDevice->lock);
+}
NTSTATUS
-INTnsProcessData(
- IN PSDevice pDevice
- )
+INTnsProcessData(PSDevice pDevice)
{
- NTSTATUS status = STATUS_SUCCESS;
- PSINTData pINTData;
- PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- struct net_device_stats* pStats = &pDevice->stats;
-
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsInterruptProcessData\n");
-
- pINTData = (PSINTData) pDevice->intBuf.pDataBuf;
- if (pINTData->byTSR0 & TSR_VALID) {
- STAvUpdateTDStatCounter (&(pDevice->scStatistic), (BYTE) (pINTData->byPkt0 & 0x0F), (BYTE) (pINTData->byPkt0>>4), pINTData->byTSR0);
- BSSvUpdateNodeTxCounter (pDevice, &(pDevice->scStatistic), pINTData->byTSR0, pINTData->byPkt0);
- //DBG_PRN_GRP01(("TSR0 %02x\n", pINTData->byTSR0));
- }
- if (pINTData->byTSR1 & TSR_VALID) {
- STAvUpdateTDStatCounter (&(pDevice->scStatistic), (BYTE) (pINTData->byPkt1 & 0x0F), (BYTE) (pINTData->byPkt1>>4), pINTData->byTSR1);
- BSSvUpdateNodeTxCounter (pDevice, &(pDevice->scStatistic), pINTData->byTSR1, pINTData->byPkt1);
- //DBG_PRN_GRP01(("TSR1 %02x\n", pINTData->byTSR1));
- }
- if (pINTData->byTSR2 & TSR_VALID) {
- STAvUpdateTDStatCounter (&(pDevice->scStatistic), (BYTE) (pINTData->byPkt2 & 0x0F), (BYTE) (pINTData->byPkt2>>4), pINTData->byTSR2);
- BSSvUpdateNodeTxCounter (pDevice, &(pDevice->scStatistic), pINTData->byTSR2, pINTData->byPkt2);
- //DBG_PRN_GRP01(("TSR2 %02x\n", pINTData->byTSR2));
- }
- if (pINTData->byTSR3 & TSR_VALID) {
- STAvUpdateTDStatCounter (&(pDevice->scStatistic), (BYTE) (pINTData->byPkt3 & 0x0F), (BYTE) (pINTData->byPkt3>>4), pINTData->byTSR3);
- BSSvUpdateNodeTxCounter (pDevice, &(pDevice->scStatistic), pINTData->byTSR3, pINTData->byPkt3);
- //DBG_PRN_GRP01(("TSR3 %02x\n", pINTData->byTSR3));
- }
- if ( pINTData->byISR0 != 0 ) {
- if (pINTData->byISR0 & ISR_BNTX) {
-
- if (pDevice->eOPMode == OP_MODE_AP) {
- if(pMgmt->byDTIMCount > 0) {
- pMgmt->byDTIMCount --;
- pMgmt->sNodeDBTable[0].bRxPSPoll = FALSE;
- } else if(pMgmt->byDTIMCount == 0) {
- // check if mutltcast tx bufferring
- pMgmt->byDTIMCount = pMgmt->byDTIMPeriod - 1;
- pMgmt->sNodeDBTable[0].bRxPSPoll = TRUE;
- if (pMgmt->sNodeDBTable[0].bPSEnable) {
- bScheduleCommand((HANDLE)pDevice, WLAN_CMD_RX_PSPOLL, NULL);
- }
- }
- bScheduleCommand((HANDLE)pDevice, WLAN_CMD_BECON_SEND, NULL);
- } // if (pDevice->eOPMode == OP_MODE_AP)
-
- pDevice->bBeaconSent = TRUE;
- } else {
- pDevice->bBeaconSent = FALSE;
- }
- if (pINTData->byISR0 & ISR_TBTT) {
- if ( pDevice->bEnablePSMode ) {
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_TBTT_WAKEUP, NULL);
- }
- if ( pDevice->bChannelSwitch ) {
- pDevice->byChannelSwitchCount--;
- if ( pDevice->byChannelSwitchCount == 0 ) {
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_11H_CHSW, NULL);
- }
- }
- }
- LODWORD(pDevice->qwCurrTSF) = pINTData->dwLoTSF;
- HIDWORD(pDevice->qwCurrTSF) = pINTData->dwHiTSF;
- //DBG_PRN_GRP01(("ISR0 = %02x ,LoTsf = %08x,HiTsf = %08x\n", pINTData->byISR0, pINTData->dwLoTSF,pINTData->dwHiTSF));
-
- STAvUpdate802_11Counter(&pDevice->s802_11Counter, &pDevice->scStatistic, pINTData->byRTSSuccess,
- pINTData->byRTSFail, pINTData->byACKFail, pINTData->byFCSErr );
- STAvUpdateIsrStatCounter(&pDevice->scStatistic, pINTData->byISR0, pINTData->byISR1);
-
- }
-
- if ( pINTData->byISR1 != 0 ) {
- if (pINTData->byISR1 & ISR_GPIO3) {
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_RADIO, NULL);
- }
- }
- pDevice->intBuf.uDataLen = 0;
- pDevice->intBuf.bInUse = FALSE;
-
- pStats->tx_packets = pDevice->scStatistic.ullTsrOK;
- pStats->tx_bytes = pDevice->scStatistic.ullTxDirectedBytes +
- pDevice->scStatistic.ullTxMulticastBytes +
- pDevice->scStatistic.ullTxBroadcastBytes;
- pStats->tx_errors = pDevice->scStatistic.dwTsrErr;
- pStats->tx_dropped = pDevice->scStatistic.dwTsrErr;
-
- return status;
+ NTSTATUS status = STATUS_SUCCESS;
+ PSINTData pINTData;
+ PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
+ struct net_device_stats *pStats = &pDevice->stats;
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsInterruptProcessData\n");
+
+ pINTData = (PSINTData) pDevice->intBuf.pDataBuf;
+ if (pINTData->byTSR0 & TSR_VALID) {
+ STAvUpdateTDStatCounter(&(pDevice->scStatistic),
+ (BYTE) (pINTData->byPkt0 & 0x0F),
+ (BYTE) (pINTData->byPkt0>>4),
+ pINTData->byTSR0);
+ BSSvUpdateNodeTxCounter(pDevice,
+ &(pDevice->scStatistic),
+ pINTData->byTSR0,
+ pINTData->byPkt0);
+ /*DBG_PRN_GRP01(("TSR0 %02x\n", pINTData->byTSR0));*/
+ }
+ if (pINTData->byTSR1 & TSR_VALID) {
+ STAvUpdateTDStatCounter(&(pDevice->scStatistic),
+ (BYTE) (pINTData->byPkt1 & 0x0F),
+ (BYTE) (pINTData->byPkt1>>4),
+ pINTData->byTSR1);
+ BSSvUpdateNodeTxCounter(pDevice,
+ &(pDevice->scStatistic),
+ pINTData->byTSR1,
+ pINTData->byPkt1);
+ /*DBG_PRN_GRP01(("TSR1 %02x\n", pINTData->byTSR1));*/
+ }
+ if (pINTData->byTSR2 & TSR_VALID) {
+ STAvUpdateTDStatCounter(&(pDevice->scStatistic),
+ (BYTE) (pINTData->byPkt2 & 0x0F),
+ (BYTE) (pINTData->byPkt2>>4),
+ pINTData->byTSR2);
+ BSSvUpdateNodeTxCounter(pDevice,
+ &(pDevice->scStatistic),
+ pINTData->byTSR2,
+ pINTData->byPkt2);
+ /*DBG_PRN_GRP01(("TSR2 %02x\n", pINTData->byTSR2));*/
+ }
+ if (pINTData->byTSR3 & TSR_VALID) {
+ STAvUpdateTDStatCounter(&(pDevice->scStatistic),
+ (BYTE) (pINTData->byPkt3 & 0x0F),
+ (BYTE) (pINTData->byPkt3>>4),
+ pINTData->byTSR3);
+ BSSvUpdateNodeTxCounter(pDevice,
+ &(pDevice->scStatistic),
+ pINTData->byTSR3,
+ pINTData->byPkt3);
+ /*DBG_PRN_GRP01(("TSR3 %02x\n", pINTData->byTSR3));*/
+ }
+ if (pINTData->byISR0 != 0) {
+ if (pINTData->byISR0 & ISR_BNTX) {
+ if (pDevice->eOPMode == OP_MODE_AP) {
+ if (pMgmt->byDTIMCount > 0) {
+ pMgmt->byDTIMCount--;
+ pMgmt->sNodeDBTable[0].bRxPSPoll =
+ FALSE;
+ } else if (pMgmt->byDTIMCount == 0) {
+ /* check if mutltcast tx bufferring */
+ pMgmt->byDTIMCount =
+ pMgmt->byDTIMPeriod-1;
+ pMgmt->sNodeDBTable[0].bRxPSPoll = TRUE;
+ if (pMgmt->sNodeDBTable[0].bPSEnable)
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_RX_PSPOLL,
+ NULL);
+ }
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_BECON_SEND,
+ NULL);
+ } /* if (pDevice->eOPMode == OP_MODE_AP) */
+ pDevice->bBeaconSent = TRUE;
+ } else {
+ pDevice->bBeaconSent = FALSE;
+ }
+ if (pINTData->byISR0 & ISR_TBTT) {
+ if (pDevice->bEnablePSMode)
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_TBTT_WAKEUP,
+ NULL);
+ if (pDevice->bChannelSwitch) {
+ pDevice->byChannelSwitchCount--;
+ if (pDevice->byChannelSwitchCount == 0)
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_11H_CHSW,
+ NULL);
+ }
+ }
+ LODWORD(pDevice->qwCurrTSF) = pINTData->dwLoTSF;
+ HIDWORD(pDevice->qwCurrTSF) = pINTData->dwHiTSF;
+ /*DBG_PRN_GRP01(("ISR0 = %02x ,
+ LoTsf = %08x,
+ HiTsf = %08x\n",
+ pINTData->byISR0,
+ pINTData->dwLoTSF,
+ pINTData->dwHiTSF)); */
+
+ STAvUpdate802_11Counter(&pDevice->s802_11Counter,
+ &pDevice->scStatistic,
+ pINTData->byRTSSuccess,
+ pINTData->byRTSFail,
+ pINTData->byACKFail,
+ pINTData->byFCSErr);
+ STAvUpdateIsrStatCounter(&pDevice->scStatistic,
+ pINTData->byISR0,
+ pINTData->byISR1);
+ }
+
+ if (pINTData->byISR1 != 0)
+ if (pINTData->byISR1 & ISR_GPIO3)
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_RADIO,
+ NULL);
+ pDevice->intBuf.uDataLen = 0;
+ pDevice->intBuf.bInUse = FALSE;
+
+ pStats->tx_packets = pDevice->scStatistic.ullTsrOK;
+ pStats->tx_bytes = pDevice->scStatistic.ullTxDirectedBytes +
+ pDevice->scStatistic.ullTxMulticastBytes +
+ pDevice->scStatistic.ullTxBroadcastBytes;
+ pStats->tx_errors = pDevice->scStatistic.dwTsrErr;
+ pStats->tx_dropped = pDevice->scStatistic.dwTsrErr;
+
+ return status;
}
diff --git a/drivers/staging/vt6656/int.h b/drivers/staging/vt6656/int.h
index 15e815a3596..cdf355130de 100644
--- a/drivers/staging/vt6656/int.h
+++ b/drivers/staging/vt6656/int.h
@@ -67,17 +67,11 @@ SINTData, *PSINTData;
/*--------------------- Export Functions --------------------------*/
-VOID
-INTvWorkItem(
- PVOID Context
- );
+void INTvWorkItem(void *Context);
NTSTATUS
INTnsProcessData(
- IN PSDevice pDevice
+ PSDevice pDevice
);
-#endif // __INT_H__
-
-
-
+#endif /* __INT_H__ */
diff --git a/drivers/staging/vt6656/iocmd.h b/drivers/staging/vt6656/iocmd.h
index 49bfe15f133..fbba1d53e49 100644
--- a/drivers/staging/vt6656/iocmd.h
+++ b/drivers/staging/vt6656/iocmd.h
@@ -37,12 +37,6 @@
#define DEF
#endif
-//typedef int BOOL;
-//typedef uint32_t u32;
-//typedef uint16_t u16;
-//typedef uint8_t u8;
-
-
// ioctl Command code
#define MAGIC_CODE 0x3142
#define IOCTL_CMD_TEST (SIOCDEVPRIVATE + 0)
@@ -50,7 +44,6 @@
#define IOCTL_CMD_HOSTAPD (SIOCDEVPRIVATE + 2)
#define IOCTL_CMD_WPA (SIOCDEVPRIVATE + 3)
-
typedef enum tagWMAC_CMD {
WLAN_CMD_BSS_SCAN,
@@ -90,7 +83,6 @@ typedef enum tagWZONETYPE {
#define ADHOC_STARTED 1
#define ADHOC_JOINTED 2
-
#define PHY80211a 0
#define PHY80211b 1
#define PHY80211g 2
@@ -109,10 +101,10 @@ typedef enum tagWZONETYPE {
//
#pragma pack(1)
typedef struct tagSCmdRequest {
- U8 name[16];
+ u8 name[16];
void *data;
- U16 wResult;
- U16 wCmdCode;
+ u16 wResult;
+ u16 wCmdCode;
} SCmdRequest, *PSCmdRequest;
//
@@ -121,21 +113,20 @@ typedef struct tagSCmdRequest {
typedef struct tagSCmdScan {
- U8 ssid[SSID_MAXLEN + 2];
+ u8 ssid[SSID_MAXLEN + 2];
} SCmdScan, *PSCmdScan;
-
//
// BSS Join
//
typedef struct tagSCmdBSSJoin {
- U16 wBSSType;
- U16 wBBPType;
- U8 ssid[SSID_MAXLEN + 2];
- U32 uChannel;
+ u16 wBSSType;
+ u16 wBBPType;
+ u8 ssid[SSID_MAXLEN + 2];
+ u32 uChannel;
BOOL bPSEnable;
BOOL bShareKeyAuth;
@@ -155,83 +146,80 @@ typedef struct tagSCmdZoneTypeSet {
#ifdef WPA_SM_Transtatus
typedef struct tagSWPAResult {
char ifname[100];
- U8 proto;
- U8 key_mgmt;
- U8 eap_type;
+ u8 proto;
+ u8 key_mgmt;
+ u8 eap_type;
BOOL authenticated;
} SWPAResult, *PSWPAResult;
#endif
typedef struct tagSCmdStartAP {
- U16 wBSSType;
- U16 wBBPType;
- U8 ssid[SSID_MAXLEN + 2];
- U32 uChannel;
- U32 uBeaconInt;
+ u16 wBSSType;
+ u16 wBBPType;
+ u8 ssid[SSID_MAXLEN + 2];
+ u32 uChannel;
+ u32 uBeaconInt;
BOOL bShareKeyAuth;
- U8 byBasicRate;
+ u8 byBasicRate;
} SCmdStartAP, *PSCmdStartAP;
-
typedef struct tagSCmdSetWEP {
BOOL bEnableWep;
- U8 byKeyIndex;
- U8 abyWepKey[WEP_NKEYS][WEP_KEYMAXLEN];
+ u8 byKeyIndex;
+ u8 abyWepKey[WEP_NKEYS][WEP_KEYMAXLEN];
BOOL bWepKeyAvailable[WEP_NKEYS];
- U32 auWepKeyLength[WEP_NKEYS];
+ u32 auWepKeyLength[WEP_NKEYS];
} SCmdSetWEP, *PSCmdSetWEP;
-
-
typedef struct tagSBSSIDItem {
- U32 uChannel;
- U8 abyBSSID[BSSID_LEN];
- U8 abySSID[SSID_MAXLEN + 1];
- U16 wBeaconInterval;
- U16 wCapInfo;
- U8 byNetType;
+ u32 uChannel;
+ u8 abyBSSID[BSSID_LEN];
+ u8 abySSID[SSID_MAXLEN + 1];
+ u16 wBeaconInterval;
+ u16 wCapInfo;
+ u8 byNetType;
BOOL bWEPOn;
- U32 uRSSI;
+ u32 uRSSI;
} SBSSIDItem;
typedef struct tagSBSSIDList {
- U32 uItem;
+ u32 uItem;
SBSSIDItem sBSSIDList[0];
} SBSSIDList, *PSBSSIDList;
typedef struct tagSNodeItem {
// STA info
- U16 wAID;
- U8 abyMACAddr[6];
- U16 wTxDataRate;
- U16 wInActiveCount;
- U16 wEnQueueCnt;
- U16 wFlags;
+ u16 wAID;
+ u8 abyMACAddr[6];
+ u16 wTxDataRate;
+ u16 wInActiveCount;
+ u16 wEnQueueCnt;
+ u16 wFlags;
BOOL bPWBitOn;
- U8 byKeyIndex;
- U16 wWepKeyLength;
- U8 abyWepKey[WEP_KEYMAXLEN];
+ u8 byKeyIndex;
+ u16 wWepKeyLength;
+ u8 abyWepKey[WEP_KEYMAXLEN];
// Auto rate fallback vars
BOOL bIsInFallback;
- U32 uTxFailures;
- U32 uTxAttempts;
- U16 wFailureRatio;
+ u32 uTxFailures;
+ u32 uTxAttempts;
+ u16 wFailureRatio;
} SNodeItem;
typedef struct tagSNodeList {
- U32 uItem;
+ u32 uItem;
SNodeItem sNodeList[0];
} SNodeList, *PSNodeList;
@@ -240,12 +228,12 @@ typedef struct tagSNodeList {
typedef struct tagSCmdLinkStatus {
BOOL bLink;
- U16 wBSSType;
- U8 byState;
- U8 abyBSSID[BSSID_LEN];
- U8 abySSID[SSID_MAXLEN + 2];
- U32 uChannel;
- U32 uLinkRate;
+ u16 wBSSType;
+ u8 byState;
+ u8 abyBSSID[BSSID_LEN];
+ u8 abySSID[SSID_MAXLEN + 2];
+ u32 uChannel;
+ u32 uLinkRate;
} SCmdLinkStatus, *PSCmdLinkStatus;
@@ -253,18 +241,18 @@ typedef struct tagSCmdLinkStatus {
// 802.11 counter
//
typedef struct tagSDot11MIBCount {
- U32 TransmittedFragmentCount;
- U32 MulticastTransmittedFrameCount;
- U32 FailedCount;
- U32 RetryCount;
- U32 MultipleRetryCount;
- U32 RTSSuccessCount;
- U32 RTSFailureCount;
- U32 ACKFailureCount;
- U32 FrameDuplicateCount;
- U32 ReceivedFragmentCount;
- U32 MulticastReceivedFrameCount;
- U32 FCSErrorCount;
+ u32 TransmittedFragmentCount;
+ u32 MulticastTransmittedFrameCount;
+ u32 FailedCount;
+ u32 RetryCount;
+ u32 MultipleRetryCount;
+ u32 RTSSuccessCount;
+ u32 RTSFailureCount;
+ u32 ACKFailureCount;
+ u32 FrameDuplicateCount;
+ u32 ReceivedFragmentCount;
+ u32 MulticastReceivedFrameCount;
+ u32 FCSErrorCount;
} SDot11MIBCount, *PSDot11MIBCount;
@@ -276,119 +264,115 @@ typedef struct tagSStatMIBCount {
//
// ISR status count
//
- U32 dwIsrTx0OK;
- U32 dwIsrTx1OK;
- U32 dwIsrBeaconTxOK;
- U32 dwIsrRxOK;
- U32 dwIsrTBTTInt;
- U32 dwIsrSTIMERInt;
- U32 dwIsrUnrecoverableError;
- U32 dwIsrSoftInterrupt;
- U32 dwIsrRxNoBuf;
+ u32 dwIsrTx0OK;
+ u32 dwIsrTx1OK;
+ u32 dwIsrBeaconTxOK;
+ u32 dwIsrRxOK;
+ u32 dwIsrTBTTInt;
+ u32 dwIsrSTIMERInt;
+ u32 dwIsrUnrecoverableError;
+ u32 dwIsrSoftInterrupt;
+ u32 dwIsrRxNoBuf;
/////////////////////////////////////
- U32 dwIsrUnknown; // unknown interrupt count
+ u32 dwIsrUnknown; /* unknown interrupt count */
// RSR status count
//
- U32 dwRsrFrmAlgnErr;
- U32 dwRsrErr;
- U32 dwRsrCRCErr;
- U32 dwRsrCRCOk;
- U32 dwRsrBSSIDOk;
- U32 dwRsrADDROk;
- U32 dwRsrICVOk;
- U32 dwNewRsrShortPreamble;
- U32 dwRsrLong;
- U32 dwRsrRunt;
-
- U32 dwRsrRxControl;
- U32 dwRsrRxData;
- U32 dwRsrRxManage;
-
- U32 dwRsrRxPacket;
- U32 dwRsrRxOctet;
- U32 dwRsrBroadcast;
- U32 dwRsrMulticast;
- U32 dwRsrDirected;
+ u32 dwRsrFrmAlgnErr;
+ u32 dwRsrErr;
+ u32 dwRsrCRCErr;
+ u32 dwRsrCRCOk;
+ u32 dwRsrBSSIDOk;
+ u32 dwRsrADDROk;
+ u32 dwRsrICVOk;
+ u32 dwNewRsrShortPreamble;
+ u32 dwRsrLong;
+ u32 dwRsrRunt;
+
+ u32 dwRsrRxControl;
+ u32 dwRsrRxData;
+ u32 dwRsrRxManage;
+
+ u32 dwRsrRxPacket;
+ u32 dwRsrRxOctet;
+ u32 dwRsrBroadcast;
+ u32 dwRsrMulticast;
+ u32 dwRsrDirected;
// 64-bit OID
- U32 ullRsrOK;
+ u32 ullRsrOK;
// for some optional OIDs (64 bits) and DMI support
- U32 ullRxBroadcastBytes;
- U32 ullRxMulticastBytes;
- U32 ullRxDirectedBytes;
- U32 ullRxBroadcastFrames;
- U32 ullRxMulticastFrames;
- U32 ullRxDirectedFrames;
-
- U32 dwRsrRxFragment;
- U32 dwRsrRxFrmLen64;
- U32 dwRsrRxFrmLen65_127;
- U32 dwRsrRxFrmLen128_255;
- U32 dwRsrRxFrmLen256_511;
- U32 dwRsrRxFrmLen512_1023;
- U32 dwRsrRxFrmLen1024_1518;
+ u32 ullRxBroadcastBytes;
+ u32 ullRxMulticastBytes;
+ u32 ullRxDirectedBytes;
+ u32 ullRxBroadcastFrames;
+ u32 ullRxMulticastFrames;
+ u32 ullRxDirectedFrames;
+
+ u32 dwRsrRxFragment;
+ u32 dwRsrRxFrmLen64;
+ u32 dwRsrRxFrmLen65_127;
+ u32 dwRsrRxFrmLen128_255;
+ u32 dwRsrRxFrmLen256_511;
+ u32 dwRsrRxFrmLen512_1023;
+ u32 dwRsrRxFrmLen1024_1518;
// TSR0,1 status count
//
- U32 dwTsrTotalRetry[2]; // total collision retry count
- U32 dwTsrOnceRetry[2]; // this packet only occur one collision
- U32 dwTsrMoreThanOnceRetry[2]; // this packet occur more than one collision
- U32 dwTsrRetry[2]; // this packet has ever occur collision,
- // that is (dwTsrOnceCollision0 + dwTsrMoreThanOnceCollision0)
- U32 dwTsrACKData[2];
- U32 dwTsrErr[2];
- U32 dwAllTsrOK[2];
- U32 dwTsrRetryTimeout[2];
- U32 dwTsrTransmitTimeout[2];
-
- U32 dwTsrTxPacket[2];
- U32 dwTsrTxOctet[2];
- U32 dwTsrBroadcast[2];
- U32 dwTsrMulticast[2];
- U32 dwTsrDirected[2];
+ u32 dwTsrTotalRetry[2]; /* total collision retry count */
+ u32 dwTsrOnceRetry[2]; /* this packet had one collision */
+ u32 dwTsrMoreThanOnceRetry[2]; /* this packet had many collisions */
+ u32 dwTsrRetry[2]; /* this packet has ever occur collision,
+ * that is (dwTsrOnceCollision0 plus
+ * dwTsrMoreThanOnceCollision0) */
+ u32 dwTsrACKData[2];
+ u32 dwTsrErr[2];
+ u32 dwAllTsrOK[2];
+ u32 dwTsrRetryTimeout[2];
+ u32 dwTsrTransmitTimeout[2];
+
+ u32 dwTsrTxPacket[2];
+ u32 dwTsrTxOctet[2];
+ u32 dwTsrBroadcast[2];
+ u32 dwTsrMulticast[2];
+ u32 dwTsrDirected[2];
// RD/TD count
- U32 dwCntRxFrmLength;
- U32 dwCntTxBufLength;
+ u32 dwCntRxFrmLength;
+ u32 dwCntTxBufLength;
- U8 abyCntRxPattern[16];
- U8 abyCntTxPattern[16];
+ u8 abyCntRxPattern[16];
+ u8 abyCntTxPattern[16];
- // Software check....
- U32 dwCntRxDataErr; // rx buffer data software compare CRC err count
- U32 dwCntDecryptErr; // rx buffer data software compare CRC err count
- U32 dwCntRxICVErr; // rx buffer data software compare CRC err count
- U32 idxRxErrorDesc; // index for rx data error RD
+ /* Software check.... */
+ u32 dwCntRxDataErr; /* rx buffer data CRC err count */
+ u32 dwCntDecryptErr; /* rx buffer data CRC err count */
+ u32 dwCntRxICVErr; /* rx buffer data CRC err count */
+ u32 idxRxErrorDesc; /* index for rx data error RD */
- // 64-bit OID
- U32 ullTsrOK[2];
+ /* 64-bit OID */
+ u32 ullTsrOK[2];
// for some optional OIDs (64 bits) and DMI support
- U32 ullTxBroadcastFrames[2];
- U32 ullTxMulticastFrames[2];
- U32 ullTxDirectedFrames[2];
- U32 ullTxBroadcastBytes[2];
- U32 ullTxMulticastBytes[2];
- U32 ullTxDirectedBytes[2];
+ u32 ullTxBroadcastFrames[2];
+ u32 ullTxMulticastFrames[2];
+ u32 ullTxDirectedFrames[2];
+ u32 ullTxBroadcastBytes[2];
+ u32 ullTxMulticastBytes[2];
+ u32 ullTxDirectedBytes[2];
} SStatMIBCount, *PSStatMIBCount;
-
-
-
typedef struct tagSCmdValue {
- U32 dwValue;
+ u32 dwValue;
} SCmdValue, *PSCmdValue;
-
//
// hostapd & viawget ioctl related
//
-
// VIAGWET_IOCTL_HOSTAPD ioctl() cmd:
enum {
VIAWGET_HOSTAPD_FLUSH = 1,
@@ -405,71 +389,62 @@ enum {
VIAWGET_HOSTAPD_STA_CLEAR_STATS = 12,
};
-
#define VIAWGET_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \
((int) (&((struct viawget_hostapd_param *) 0)->u.generic_elem.data))
// Maximum length for algorithm names (-1 for nul termination) used in ioctl()
-
-
struct viawget_hostapd_param {
- U32 cmd;
- U8 sta_addr[6];
+ u32 cmd;
+ u8 sta_addr[6];
union {
struct {
- U16 aid;
- U16 capability;
- U8 tx_supp_rates;
+ u16 aid;
+ u16 capability;
+ u8 tx_supp_rates;
} add_sta;
struct {
- U32 inactive_sec;
+ u32 inactive_sec;
} get_info_sta;
struct {
- U8 alg;
- U32 flags;
- U32 err;
- U8 idx;
- U8 seq[8];
- U16 key_len;
- U8 key[0];
+ u8 alg;
+ u32 flags;
+ u32 err;
+ u8 idx;
+ u8 seq[8];
+ u16 key_len;
+ u8 key[0];
} crypt;
struct {
- U32 flags_and;
- U32 flags_or;
+ u32 flags_and;
+ u32 flags_or;
} set_flags_sta;
struct {
- U16 rid;
- U16 len;
- U8 data[0];
+ u16 rid;
+ u16 len;
+ u8 data[0];
} rid;
struct {
- U8 len;
- U8 data[0];
+ u8 len;
+ u8 data[0];
} generic_elem;
struct {
- U16 cmd;
- U16 reason_code;
+ u16 cmd;
+ u16 reason_code;
} mlme;
struct {
- U8 ssid_len;
- U8 ssid[32];
+ u8 ssid_len;
+ u8 ssid[32];
} scan_req;
} u;
};
-
-
/*--------------------- Export Classes ----------------------------*/
/*--------------------- Export Variables --------------------------*/
-
/*--------------------- Export Types ------------------------------*/
-
/*--------------------- Export Functions --------------------------*/
-
-
-#endif //__IOCMD_H__
+#endif /* __IOCMD_H__ */
diff --git a/drivers/staging/vt6656/ioctl.c b/drivers/staging/vt6656/ioctl.c
index 6f33005a615..19a84b66b09 100644
--- a/drivers/staging/vt6656/ioctl.c
+++ b/drivers/staging/vt6656/ioctl.c
@@ -72,16 +72,16 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
SNodeList sNodeList;
PSBSSIDList pList;
PSNodeList pNodeList;
- UINT cbListCount;
+ unsigned int cbListCount;
PKnownBSS pBSS;
PKnownNodeDB pNode;
- UINT ii, jj;
+ unsigned int ii, jj;
SCmdLinkStatus sLinkStatus;
BYTE abySuppRates[] = {WLAN_EID_SUPP_RATES, 4, 0x02, 0x04, 0x0B, 0x16};
BYTE abyNullAddr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
DWORD dwKeyIndex= 0;
BYTE abyScanSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- LONG ldBm;
+ signed long ldBm;
pReq->wResult = 0;
@@ -100,16 +100,21 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
memcpy(abyScanSSID, pItemSSID, pItemSSID->len + WLAN_IEHDR_LEN);
}
spin_lock_irq(&pDevice->lock);
- if (memcmp(pMgmt->abyCurrBSSID, &abyNullAddr[0], 6) == 0)
- BSSvClearBSSList((HANDLE)pDevice, FALSE);
- else
- BSSvClearBSSList((HANDLE)pDevice, pDevice->bLinkPass);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_BSS_SCAN..begin \n");
- if (pItemSSID->len != 0)
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, abyScanSSID);
- else
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
+ if (memcmp(pMgmt->abyCurrBSSID, &abyNullAddr[0], 6) == 0)
+ BSSvClearBSSList((void *) pDevice, FALSE);
+ else
+ BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass);
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_BSS_SCAN..begin\n");
+
+ if (pItemSSID->len != 0)
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_BSSID_SCAN,
+ abyScanSSID);
+ else
+ bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
+
spin_unlock_irq(&pDevice->lock);
break;
@@ -207,8 +212,10 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
netif_stop_queue(pDevice->dev);
spin_lock_irq(&pDevice->lock);
pMgmt->eCurrState = WMAC_STATE_IDLE;
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_SSID, NULL);
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_BSSID_SCAN,
+ pMgmt->abyDesireSSID);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL);
spin_unlock_irq(&pDevice->lock);
break;
@@ -330,7 +337,7 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
pList->sBSSIDList[ii].wBeaconInterval = pBSS->wBeaconInterval;
pList->sBSSIDList[ii].wCapInfo = pBSS->wCapInfo;
RFvRSSITodBm(pDevice, (BYTE)(pBSS->uRSSI), &ldBm);
- pList->sBSSIDList[ii].uRSSI = (UINT)ldBm;
+ pList->sBSSIDList[ii].uRSSI = (unsigned int) ldBm;
// pList->sBSSIDList[ii].uRSSI = pBSS->uRSSI;
memcpy(pList->sBSSIDList[ii].abyBSSID, pBSS->abyBSSID, WLAN_BSSID_LEN);
pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
@@ -412,7 +419,7 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
break;
};
if (sValue.dwValue == 1) {
- if (hostap_set_hostapd(pDevice, 1, 1) == 0){
+ if (vt6656_hostap_set_hostapd(pDevice, 1, 1) == 0){
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enable HOSTAP\n");
}
else {
@@ -421,7 +428,7 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
}
}
else {
- hostap_set_hostapd(pDevice, 0, 1);
+ vt6656_hostap_set_hostapd(pDevice, 0, 1);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disable HOSTAP\n");
}
@@ -480,7 +487,9 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
};
if (sValue.dwValue == 1) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "up wpadev\n");
- memcpy(pDevice->wpadev->dev_addr, pDevice->dev->dev_addr, U_ETHER_ADDR_LEN);
+ memcpy(pDevice->wpadev->dev_addr,
+ pDevice->dev->dev_addr,
+ ETH_ALEN);
pDevice->bWPADEVUp = TRUE;
}
else {
@@ -574,7 +583,7 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq) {
netif_stop_queue(pDevice->dev);
spin_lock_irq(&pDevice->lock);
- bScheduleCommand((HANDLE)pDevice, WLAN_CMD_RUN_AP, NULL);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_RUN_AP, NULL);
spin_unlock_irq(&pDevice->lock);
break;
diff --git a/drivers/staging/vt6656/ioctl.h b/drivers/staging/vt6656/ioctl.h
index 07d228399c3..caa4ac963d9 100644
--- a/drivers/staging/vt6656/ioctl.h
+++ b/drivers/staging/vt6656/ioctl.h
@@ -43,15 +43,12 @@
int private_ioctl(PSDevice pDevice, struct ifreq *rq);
/*
-VOID vConfigWEPKey (
- IN PSDevice pDevice,
- IN DWORD dwKeyIndex,
- IN PBYTE pbyKey,
- IN ULONG uKeyLength
+void vConfigWEPKey (
+ PSDevice pDevice,
+ DWORD dwKeyIndex,
+ PBYTE pbyKey,
+ unsigned long uKeyLength
);
*/
-#endif // __IOCTL_H__
-
-
-
+#endif /* __IOCTL_H__ */
diff --git a/drivers/staging/vt6656/iowpa.h b/drivers/staging/vt6656/iowpa.h
index 5750b5b548e..da03edcbacb 100644
--- a/drivers/staging/vt6656/iowpa.h
+++ b/drivers/staging/vt6656/iowpa.h
@@ -153,6 +153,4 @@ struct viawget_scan_result {
/*--------------------- Export Functions --------------------------*/
-
-
-#endif //__IOWPA_H__
+#endif /* __IOWPA_H__ */
diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c
index b7c6a22fe32..fa40522d4a9 100644
--- a/drivers/staging/vt6656/iwctl.c
+++ b/drivers/staging/vt6656/iwctl.c
@@ -209,9 +209,9 @@ if(pDevice->byReAssocCount > 0) { //reject scan when re-associating!
spin_lock_irq(&pDevice->lock);
- #ifdef update_BssList
- BSSvClearBSSList((HANDLE)pDevice, pDevice->bLinkPass);
- #endif
+#ifdef update_BssList
+ BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass);
+#endif
//mike add: active scan OR passive scan OR desire_ssid scan
if(wrq->length == sizeof(struct iw_scan_req)) {
@@ -229,7 +229,7 @@ if(pDevice->byReAssocCount > 0) { //reject scan when re-associating!
pMgmt->eScanType = WMAC_SCAN_PASSIVE;
PRINT_K("SIOCSIWSCAN:[desired_ssid=%s,len=%d]\n",((PWLAN_IE_SSID)abyScanSSID)->abySSID,
((PWLAN_IE_SSID)abyScanSSID)->len);
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, abyScanSSID);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, abyScanSSID);
spin_unlock_irq(&pDevice->lock);
return 0;
@@ -244,7 +244,7 @@ if(pDevice->byReAssocCount > 0) { //reject scan when re-associating!
pMgmt->eScanType = WMAC_SCAN_PASSIVE;
//printk("SIOCSIWSCAN:WLAN_CMD_BSSID_SCAN\n");
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
spin_unlock_irq(&pDevice->lock);
return 0;
@@ -758,7 +758,6 @@ int iwctl_siwap(struct net_device *dev,
if (wrq->sa_family != ARPHRD_ETHER)
rc = -EINVAL;
else {
- memset(pMgmt->abyDesireBSSID, 0xFF, 6);
memcpy(pMgmt->abyDesireBSSID, wrq->sa_data, 6);
//mike :add
@@ -770,7 +769,7 @@ int iwctl_siwap(struct net_device *dev,
//mike add: if desired AP is hidden ssid(there are two same BSSID in list),
// then ignore,because you don't known which one to be connect with??
{
- UINT ii , uSameBssidNum=0;
+ unsigned int ii, uSameBssidNum = 0;
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
if (pMgmt->sBSSList[ii].bActive &&
IS_ETH_ADDRESS_EQUAL(pMgmt->sBSSList[ii].abyBSSID,pMgmt->abyDesireBSSID)) {
@@ -934,9 +933,8 @@ int iwctl_siwessid(struct net_device *dev,
{
PKnownBSS pCurr = NULL;
BYTE abyTmpDesireSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
- UINT ii , uSameBssidNum=0;
+ unsigned int ii, uSameBssidNum = 0;
- memset(abyTmpDesireSSID,0,sizeof(abyTmpDesireSSID));
memcpy(abyTmpDesireSSID,pMgmt->abyDesireSSID,sizeof(abyTmpDesireSSID));
pCurr = BSSpSearchBSSList(pDevice,
NULL,
@@ -946,10 +944,14 @@ int iwctl_siwessid(struct net_device *dev,
if (pCurr == NULL){
PRINT_K("SIOCSIWESSID:hidden ssid site survey before associate.......\n");
- vResetCommandTimer((HANDLE) pDevice);
+ vResetCommandTimer((void *) pDevice);
pMgmt->eScanType = WMAC_SCAN_ACTIVE;
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_SSID, pMgmt->abyDesireSSID);
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_BSSID_SCAN,
+ pMgmt->abyDesireSSID);
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_SSID,
+ pMgmt->abyDesireSSID);
}
else { //mike:to find out if that desired SSID is a hidden-ssid AP ,
// by means of judging if there are two same BSSID exist in list ?
@@ -961,10 +963,14 @@ int iwctl_siwessid(struct net_device *dev,
}
if(uSameBssidNum >= 2) { //hit: desired AP is in hidden ssid mode!!!
PRINT_K("SIOCSIWESSID:hidden ssid directly associate.......\n");
- vResetCommandTimer((HANDLE) pDevice);
+ vResetCommandTimer((void *) pDevice);
pMgmt->eScanType = WMAC_SCAN_PASSIVE; //this scan type,you'll submit scan result!
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_SSID, pMgmt->abyDesireSSID);
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_BSSID_SCAN,
+ pMgmt->abyDesireSSID);
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_SSID,
+ pMgmt->abyDesireSSID);
}
}
}
@@ -1434,7 +1440,7 @@ int iwctl_giwencode(struct net_device *dev,
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
int rc = 0;
char abyKey[WLAN_WEP232_KEYLEN];
- UINT index = (UINT)(wrq->flags & IW_ENCODE_INDEX);
+ unsigned int index = (unsigned int)(wrq->flags & IW_ENCODE_INDEX);
PSKeyItem pKey = NULL;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWENCODE\n");
@@ -1480,7 +1486,7 @@ int iwctl_giwencode(struct net_device *dev,
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
char abyKey[WLAN_WEP232_KEYLEN];
- UINT index = (UINT)(wrq->flags & IW_ENCODE_INDEX);
+ unsigned int index = (unsigned int)(wrq->flags & IW_ENCODE_INDEX);
PSKeyItem pKey = NULL;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWENCODE\n");
@@ -1556,11 +1562,11 @@ int iwctl_siwpower(struct net_device *dev,
}
if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
pDevice->ePSMode = WMAC_POWER_FAST;
- PSvEnablePowerSaving((HANDLE)pDevice, pMgmt->wListenInterval);
+ PSvEnablePowerSaving((void *) pDevice, pMgmt->wListenInterval);
} else if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_PERIOD) {
pDevice->ePSMode = WMAC_POWER_FAST;
- PSvEnablePowerSaving((HANDLE)pDevice, pMgmt->wListenInterval);
+ PSvEnablePowerSaving((void *) pDevice, pMgmt->wListenInterval);
}
switch (wrq->flags & IW_POWER_MODE) {
case IW_POWER_UNICAST_R:
@@ -2009,12 +2015,16 @@ int iwctl_siwmlme(struct net_device *dev,
case IW_MLME_DEAUTH:
//this command seems to be not complete,please test it --einsnliu
//printk("iwctl_siwmlme--->send DEAUTH\n");
- //bScheduleCommand((HANDLE) pDevice, WLAN_CMD_DEAUTH, (PBYTE)&reason);
+ /* bScheduleCommand((void *) pDevice,
+ WLAN_CMD_DEAUTH,
+ (PBYTE)&reason); */
//break;
case IW_MLME_DISASSOC:
if(pDevice->bLinkPass == TRUE){
PRINT_K("iwctl_siwmlme--->send DISASSOCIATE\n");
- bScheduleCommand((HANDLE)pDevice, WLAN_CMD_DISASSOCIATE, NULL);
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_DISASSOCIATE,
+ NULL);
}
break;
default:
diff --git a/drivers/staging/vt6656/iwctl.h b/drivers/staging/vt6656/iwctl.h
index 3096de0ba1b..df9a4cf3baa 100644
--- a/drivers/staging/vt6656/iwctl.h
+++ b/drivers/staging/vt6656/iwctl.h
@@ -223,7 +223,4 @@ int iwctl_siwmlme(struct net_device *dev,
extern const struct iw_handler_def iwctl_handler_def;
extern const struct iw_priv_args iwctl_private_args;
-#endif // __IWCTL_H__
-
-
-
+#endif /* __IWCTL_H__ */
diff --git a/drivers/staging/vt6656/key.c b/drivers/staging/vt6656/key.c
index 13fc69a1a08..b0890c181e7 100644
--- a/drivers/staging/vt6656/key.c
+++ b/drivers/staging/vt6656/key.c
@@ -60,8 +60,8 @@ static int msglevel =MSG_LEVEL_INFO;
/*--------------------- Static Variables --------------------------*/
/*--------------------- Static Functions --------------------------*/
-static VOID
-s_vCheckKeyTableValid (PVOID pDeviceHandler, PSKeyManagement pTable)
+static void s_vCheckKeyTableValid(void *pDeviceHandler,
+ PSKeyManagement pTable)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
int i;
@@ -112,7 +112,7 @@ s_vCheckKeyTableValid (PVOID pDeviceHandler, PSKeyManagement pTable)
* Return Value: none
*
*/
-VOID KeyvInitTable(PVOID pDeviceHandler, PSKeyManagement pTable)
+void KeyvInitTable(void *pDeviceHandler, PSKeyManagement pTable)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
int i;
@@ -123,10 +123,12 @@ VOID KeyvInitTable(PVOID pDeviceHandler, PSKeyManagement pTable)
for (i=0;i<MAX_KEY_TABLE;i++) {
pTable->KeyTable[i].bInUse = FALSE;
pTable->KeyTable[i].PairwiseKey.bKeyValid = FALSE;
- pTable->KeyTable[i].PairwiseKey.pvKeyTable = (PVOID)&pTable->KeyTable[i];
+ pTable->KeyTable[i].PairwiseKey.pvKeyTable =
+ (void *)&pTable->KeyTable[i];
for (jj=0; jj < MAX_GROUP_KEY; jj++) {
pTable->KeyTable[i].GroupKey[jj].bKeyValid = FALSE;
- pTable->KeyTable[i].GroupKey[jj].pvKeyTable = (PVOID) &(pTable->KeyTable[i]);
+ pTable->KeyTable[i].GroupKey[jj].pvKeyTable =
+ (void *) &(pTable->KeyTable[i]);
}
pTable->KeyTable[i].wKeyCtl = 0;
pTable->KeyTable[i].dwGTKeyIndex = 0;
@@ -162,12 +164,8 @@ VOID KeyvInitTable(PVOID pDeviceHandler, PSKeyManagement pTable)
* Return Value: TRUE if found otherwise FALSE
*
*/
-BOOL KeybGetKey (
- IN PSKeyManagement pTable,
- IN PBYTE pbyBSSID,
- IN DWORD dwKeyIndex,
- OUT PSKeyItem *pKey
- )
+BOOL KeybGetKey(PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyIndex,
+ PSKeyItem *pKey)
{
int i;
@@ -220,12 +218,12 @@ BOOL KeybGetKey (
* Return Value: TRUE if success otherwise FALSE
*
*/
-BOOL KeybSetKey (
- PVOID pDeviceHandler,
+BOOL KeybSetKey(
+ void *pDeviceHandler,
PSKeyManagement pTable,
PBYTE pbyBSSID,
DWORD dwKeyIndex,
- ULONG uKeyLength,
+ unsigned long uKeyLength,
PQWORD pKeyRSC,
PBYTE pbyKey,
BYTE byKeyDecMode
@@ -233,9 +231,9 @@ BOOL KeybSetKey (
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
int i,j;
- UINT ii;
+ unsigned int ii;
PSKeyItem pKey;
- UINT uKeyIdx;
+ unsigned int uKeyIdx;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetKey: %lX\n", dwKeyIndex);
@@ -312,7 +310,7 @@ BOOL KeybSetKey (
}
}
if (j < (MAX_KEY_TABLE-1)) {
- memcpy(pTable->KeyTable[j].abyBSSID,pbyBSSID,U_ETHER_ADDR_LEN);
+ memcpy(pTable->KeyTable[j].abyBSSID, pbyBSSID, ETH_ALEN);
pTable->KeyTable[j].bInUse = TRUE;
if ((dwKeyIndex & PAIRWISE_KEY) != 0) {
// Pairwise key
@@ -393,8 +391,8 @@ BOOL KeybSetKey (
* Return Value: TRUE if success otherwise FALSE
*
*/
-BOOL KeybRemoveKey (
- PVOID pDeviceHandler,
+BOOL KeybRemoveKey(
+ void *pDeviceHandler,
PSKeyManagement pTable,
PBYTE pbyBSSID,
DWORD dwKeyIndex
@@ -474,8 +472,8 @@ BOOL KeybRemoveKey (
* Return Value: TRUE if success otherwise FALSE
*
*/
-BOOL KeybRemoveAllKey (
- PVOID pDeviceHandler,
+BOOL KeybRemoveAllKey(
+ void *pDeviceHandler,
PSKeyManagement pTable,
PBYTE pbyBSSID
)
@@ -510,8 +508,8 @@ BOOL KeybRemoveAllKey (
* Return Value: TRUE if success otherwise FALSE
*
*/
-VOID KeyvRemoveWEPKey (
- PVOID pDeviceHandler,
+void KeyvRemoveWEPKey(
+ void *pDeviceHandler,
PSKeyManagement pTable,
DWORD dwKeyIndex
)
@@ -533,8 +531,8 @@ VOID KeyvRemoveWEPKey (
return;
}
-VOID KeyvRemoveAllWEPKey (
- PVOID pDeviceHandler,
+void KeyvRemoveAllWEPKey(
+ void *pDeviceHandler,
PSKeyManagement pTable
)
{
@@ -561,12 +559,8 @@ VOID KeyvRemoveAllWEPKey (
* Return Value: TRUE if found otherwise FALSE
*
*/
-BOOL KeybGetTransmitKey (
- IN PSKeyManagement pTable,
- IN PBYTE pbyBSSID,
- IN DWORD dwKeyType,
- OUT PSKeyItem *pKey
- )
+BOOL KeybGetTransmitKey(PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyType,
+ PSKeyItem *pKey)
{
int i, ii;
@@ -641,10 +635,7 @@ BOOL KeybGetTransmitKey (
* Return Value: TRUE if found otherwise FALSE
*
*/
-BOOL KeybCheckPairewiseKey (
- IN PSKeyManagement pTable,
- OUT PSKeyItem *pKey
- )
+BOOL KeybCheckPairewiseKey(PSKeyManagement pTable, PSKeyItem *pKey)
{
int i;
@@ -675,23 +666,23 @@ BOOL KeybCheckPairewiseKey (
* Return Value: TRUE if success otherwise FALSE
*
*/
-BOOL KeybSetDefaultKey (
- PVOID pDeviceHandler,
+BOOL KeybSetDefaultKey(
+ void *pDeviceHandler,
PSKeyManagement pTable,
DWORD dwKeyIndex,
- ULONG uKeyLength,
+ unsigned long uKeyLength,
PQWORD pKeyRSC,
PBYTE pbyKey,
BYTE byKeyDecMode
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
- UINT ii;
+ unsigned int ii;
PSKeyItem pKey;
- UINT uKeyIdx;
-
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetDefaultKey: %1x, %d \n", (int)dwKeyIndex, (int)uKeyLength);
+ unsigned int uKeyIdx;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enter KeybSetDefaultKey: %1x, %d\n",
+ (int) dwKeyIndex, (int) uKeyLength);
if ((dwKeyIndex & PAIRWISE_KEY) != 0) { // Pairwise key
return (FALSE);
@@ -700,7 +691,7 @@ BOOL KeybSetDefaultKey (
}
pTable->KeyTable[MAX_KEY_TABLE-1].bInUse = TRUE;
- for(ii=0;ii<U_ETHER_ADDR_LEN;ii++)
+ for (ii = 0; ii < ETH_ALEN; ii++)
pTable->KeyTable[MAX_KEY_TABLE-1].abyBSSID[ii] = 0xFF;
// Group key
@@ -783,11 +774,11 @@ BOOL KeybSetDefaultKey (
* Return Value: TRUE if success otherwise FALSE
*
*/
-BOOL KeybSetAllGroupKey (
- PVOID pDeviceHandler,
+BOOL KeybSetAllGroupKey(
+ void *pDeviceHandler,
PSKeyManagement pTable,
DWORD dwKeyIndex,
- ULONG uKeyLength,
+ unsigned long uKeyLength,
PQWORD pKeyRSC,
PBYTE pbyKey,
BYTE byKeyDecMode
@@ -795,9 +786,9 @@ BOOL KeybSetAllGroupKey (
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
int i;
- UINT ii;
+ unsigned int ii;
PSKeyItem pKey;
- UINT uKeyIdx;
+ unsigned int uKeyIdx;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetAllGroupKey: %lX\n", dwKeyIndex);
diff --git a/drivers/staging/vt6656/key.h b/drivers/staging/vt6656/key.h
index b15a64cb386..f749c7a027d 100644
--- a/drivers/staging/vt6656/key.h
+++ b/drivers/staging/vt6656/key.h
@@ -58,7 +58,7 @@
typedef struct tagSKeyItem
{
BOOL bKeyValid;
- ULONG uKeyLength;
+ unsigned long uKeyLength;
BYTE abyKey[MAX_KEY_LEN];
QWORD KeyRSC;
DWORD dwTSC47_16;
@@ -66,12 +66,12 @@ typedef struct tagSKeyItem
BYTE byCipherSuite;
BYTE byReserved0;
DWORD dwKeyIndex;
- PVOID pvKeyTable;
+ void *pvKeyTable;
} SKeyItem, *PSKeyItem; //64
typedef struct tagSKeyTable
{
- BYTE abyBSSID[U_ETHER_ADDR_LEN]; //6
+ BYTE abyBSSID[ETH_ALEN]; /* 6 */
BYTE byReserved0[2]; //8
SKeyItem PairwiseKey;
SKeyItem GroupKey[MAX_GROUP_KEY]; //64*5 = 320, 320+8=328
@@ -97,81 +97,69 @@ typedef struct tagSKeyManagement
/*--------------------- Export Functions --------------------------*/
-VOID KeyvInitTable(PVOID pDeviceHandler, PSKeyManagement pTable);
+void KeyvInitTable(void *pDeviceHandler, PSKeyManagement pTable);
-BOOL KeybGetKey(
- IN PSKeyManagement pTable,
- IN PBYTE pbyBSSID,
- IN DWORD dwKeyIndex,
- OUT PSKeyItem *pKey
- );
+BOOL KeybGetKey(PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyIndex,
+ PSKeyItem *pKey);
BOOL KeybSetKey(
- PVOID pDeviceHandler,
+ void *pDeviceHandler,
PSKeyManagement pTable,
PBYTE pbyBSSID,
DWORD dwKeyIndex,
- ULONG uKeyLength,
+ unsigned long uKeyLength,
PQWORD pKeyRSC,
PBYTE pbyKey,
BYTE byKeyDecMode
);
BOOL KeybRemoveKey(
- PVOID pDeviceHandler,
+ void *pDeviceHandler,
PSKeyManagement pTable,
PBYTE pbyBSSID,
DWORD dwKeyIndex
);
-BOOL KeybRemoveAllKey (
- PVOID pDeviceHandler,
+BOOL KeybRemoveAllKey(
+ void *pDeviceHandler,
PSKeyManagement pTable,
PBYTE pbyBSSID
);
-VOID KeyvRemoveWEPKey(
- PVOID pDeviceHandler,
+void KeyvRemoveWEPKey(
+ void *pDeviceHandler,
PSKeyManagement pTable,
DWORD dwKeyIndex
);
-VOID KeyvRemoveAllWEPKey(
- PVOID pDeviceHandler,
+void KeyvRemoveAllWEPKey(
+ void *pDeviceHandler,
PSKeyManagement pTable
);
-BOOL KeybGetTransmitKey(
- IN PSKeyManagement pTable,
- IN PBYTE pbyBSSID,
- IN DWORD dwKeyType,
- OUT PSKeyItem *pKey
- );
+BOOL KeybGetTransmitKey(PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyType,
+ PSKeyItem *pKey);
-BOOL KeybCheckPairewiseKey(
- IN PSKeyManagement pTable,
- OUT PSKeyItem *pKey
- );
+BOOL KeybCheckPairewiseKey(PSKeyManagement pTable, PSKeyItem *pKey);
-BOOL KeybSetDefaultKey (
- PVOID pDeviceHandler,
+BOOL KeybSetDefaultKey(
+ void *pDeviceHandler,
PSKeyManagement pTable,
DWORD dwKeyIndex,
- ULONG uKeyLength,
+ unsigned long uKeyLength,
PQWORD pKeyRSC,
PBYTE pbyKey,
BYTE byKeyDecMode
);
-BOOL KeybSetAllGroupKey (
- PVOID pDeviceHandler,
+BOOL KeybSetAllGroupKey(
+ void *pDeviceHandler,
PSKeyManagement pTable,
DWORD dwKeyIndex,
- ULONG uKeyLength,
+ unsigned long uKeyLength,
PQWORD pKeyRSC,
PBYTE pbyKey,
BYTE byKeyDecMode
);
-#endif // __KEY_H__
-
+#endif /* __KEY_H__ */
diff --git a/drivers/staging/vt6656/mac.c b/drivers/staging/vt6656/mac.c
index 55a798668fa..0ab3db025f3 100644
--- a/drivers/staging/vt6656/mac.c
+++ b/drivers/staging/vt6656/mac.c
@@ -70,7 +70,7 @@ static int msglevel =MSG_LEVEL_INFO;
*/
void MACvSetMultiAddrByHash (PSDevice pDevice, BYTE byHashIdx)
{
- UINT uByteIdx;
+ unsigned int uByteIdx;
BYTE byBitMask;
BYTE pbyData[2];
@@ -110,7 +110,7 @@ void MACvSetMultiAddrByHash (PSDevice pDevice, BYTE byHashIdx)
* Return Value: none
*
*/
-VOID MACvWriteMultiAddr (PSDevice pDevice, UINT uByteIdx, BYTE byData)
+void MACvWriteMultiAddr(PSDevice pDevice, unsigned int uByteIdx, BYTE byData)
{
BYTE byData1;
@@ -199,7 +199,7 @@ BYTE pbyData[4];
* Return Value: none
*
*/
-void MACvDisableKeyEntry (PSDevice pDevice, UINT uEntryIdx)
+void MACvDisableKeyEntry(PSDevice pDevice, unsigned int uEntryIdx)
{
WORD wOffset;
BYTE byData;
@@ -239,7 +239,9 @@ BYTE byData;
* Return Value: none
*
*/
-void MACvSetKeyEntry (PSDevice pDevice, WORD wKeyCtl, UINT uEntryIdx, UINT uKeyIdx, PBYTE pbyAddr, PDWORD pdwKey)
+void MACvSetKeyEntry(PSDevice pDevice, WORD wKeyCtl,
+ unsigned int uEntryIdx, unsigned int uKeyIdx,
+ PBYTE pbyAddr, PDWORD pdwKey)
{
PBYTE pbyKey;
WORD wOffset;
@@ -247,10 +249,6 @@ DWORD dwData1,dwData2;
int ii;
BYTE pbyData[24];
-
-
-
-
if ( pDevice->byLocalID <= MAC_REVISION_A1 ) {
if ( pDevice->sMgmtObj.byCSSPK == KEY_CTL_CCMP )
return;
diff --git a/drivers/staging/vt6656/mac.h b/drivers/staging/vt6656/mac.h
index fe99e3df2a5..775c70928ec 100644
--- a/drivers/staging/vt6656/mac.h
+++ b/drivers/staging/vt6656/mac.h
@@ -421,12 +421,13 @@
/*--------------------- Export Functions --------------------------*/
void MACvSetMultiAddrByHash (PSDevice pDevice, BYTE byHashIdx);
-VOID MACvWriteMultiAddr (PSDevice pDevice, UINT uByteIdx, BYTE byData);
+void MACvWriteMultiAddr(PSDevice pDevice, unsigned int uByteIdx, BYTE byData);
BOOL MACbShutdown(PSDevice pDevice);;
void MACvSetBBType(PSDevice pDevice,BYTE byType);
void MACvSetMISCFifo (PSDevice pDevice, WORD wOffset, DWORD dwData);
-void MACvDisableKeyEntry(PSDevice pDevice, UINT uEntryIdx);
-void MACvSetKeyEntry(PSDevice pDevice, WORD wKeyCtl, UINT uEntryIdx, UINT uKeyIdx, PBYTE pbyAddr, PDWORD pdwKey);
+void MACvDisableKeyEntry(PSDevice pDevice, unsigned int uEntryIdx);
+void MACvSetKeyEntry(PSDevice pDevice, WORD wKeyCtl, unsigned int uEntryIdx,
+ unsigned int uKeyIdx, PBYTE pbyAddr, PDWORD pdwKey);
void MACvRegBitsOff(PSDevice pDevice, BYTE byRegOfs, BYTE byBits);
void MACvRegBitsOn(PSDevice pDevice, BYTE byRegOfs, BYTE byBits);
@@ -439,4 +440,4 @@ void MACvEnableBarkerPreambleMd(PSDevice pDevice);
void MACvDisableBarkerPreambleMd(PSDevice pDevice);
void MACvWriteBeaconInterval(PSDevice pDevice, WORD wInterval);
-#endif // __MAC_H__
+#endif /* __MAC_H__ */
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index a8e1adbc959..098b0455e32 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -26,7 +26,7 @@
*
* Functions:
*
- * vntwusb_found1 - module initial (insmod) driver entry
+ * vt6656_probe - module initial (insmod) driver entry
* device_remove1 - module remove entry
* device_open - allocate dma/descripter resource & initial mac/bbp function
* device_xmit - asynchrous data tx function
@@ -222,15 +222,11 @@ DEVICE_PARAM(b80211hEnable, "802.11h mode");
// Static vars definitions
//
-
-
-static struct usb_device_id vntwusb_table[] = {
+static struct usb_device_id vt6656_table[] __devinitdata = {
{USB_DEVICE(VNT_USB_VENDOR_ID, VNT_USB_PRODUCT_ID)},
{}
};
-
-
// Frequency list (map channels to frequencies)
/*
static const long frequency_list[] = {
@@ -250,15 +246,17 @@ static const long frequency_list[] = {
static const struct iw_handler_def iwctl_handler_def;
*/
+/*--------------------- Static Functions --------------------------*/
+static int vt6656_probe(struct usb_interface *intf,
+ const struct usb_device_id *id);
+static void vt6656_disconnect(struct usb_interface *intf);
-/*--------------------- Static Functions --------------------------*/
-static int vntwusb_found1(struct usb_interface *intf, const struct usb_device_id *id);
-static void vntwusb_disconnect(struct usb_interface *intf);
#ifdef CONFIG_PM /* Minimal support for suspend and resume */
-static int vntwusb_suspend(struct usb_interface *intf, pm_message_t message);
-static int vntwusb_resume(struct usb_interface *intf);
-#endif
+static int vt6656_suspend(struct usb_interface *intf, pm_message_t message);
+static int vt6656_resume(struct usb_interface *intf);
+#endif /* CONFIG_PM */
+
static struct net_device_stats *device_get_stats(struct net_device *dev);
static int device_open(struct net_device *dev);
static int device_xmit(struct sk_buff *skb, struct net_device *dev);
@@ -279,8 +277,10 @@ static void device_free_frag_bufs(PSDevice pDevice);
static BOOL device_alloc_bufs(PSDevice pDevice);
static int Read_config_file(PSDevice pDevice);
-static UCHAR *Config_FileOperation(PSDevice pDevice);
-static int Config_FileGetParameter(UCHAR *string, UCHAR *dest,UCHAR *source);
+static unsigned char *Config_FileOperation(PSDevice pDevice);
+static int Config_FileGetParameter(unsigned char *string,
+ unsigned char *dest,
+ unsigned char *source);
//2008-0714<Add>by Mike Liu
static BOOL device_release_WPADEV(PSDevice pDevice);
@@ -297,14 +297,13 @@ static void usb_device_reset(PSDevice pDevice);
static void
device_set_options(PSDevice pDevice) {
- BYTE abyBroadcastAddr[U_ETHER_ADDR_LEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- BYTE abySNAP_RFC1042[U_ETHER_ADDR_LEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00};
- BYTE abySNAP_Bridgetunnel[U_ETHER_ADDR_LEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0xF8};
-
+ BYTE abyBroadcastAddr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ BYTE abySNAP_RFC1042[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00};
+ u8 abySNAP_Bridgetunnel[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0xF8};
- memcpy(pDevice->abyBroadcastAddr, abyBroadcastAddr, U_ETHER_ADDR_LEN);
- memcpy(pDevice->abySNAP_RFC1042, abySNAP_RFC1042, U_ETHER_ADDR_LEN);
- memcpy(pDevice->abySNAP_Bridgetunnel, abySNAP_Bridgetunnel, U_ETHER_ADDR_LEN);
+ memcpy(pDevice->abyBroadcastAddr, abyBroadcastAddr, ETH_ALEN);
+ memcpy(pDevice->abySNAP_RFC1042, abySNAP_RFC1042, ETH_ALEN);
+ memcpy(pDevice->abySNAP_Bridgetunnel, abySNAP_Bridgetunnel, ETH_ALEN);
pDevice->cbTD = TX_DESC_DEF0;
pDevice->cbRD = RX_DESC_DEF0;
@@ -334,20 +333,20 @@ device_set_options(PSDevice pDevice) {
}
-static VOID device_init_diversity_timer(PSDevice pDevice) {
-
+static void device_init_diversity_timer(PSDevice pDevice)
+{
init_timer(&pDevice->TimerSQ3Tmax1);
- pDevice->TimerSQ3Tmax1.data = (ULONG)pDevice;
+ pDevice->TimerSQ3Tmax1.data = (unsigned long)pDevice;
pDevice->TimerSQ3Tmax1.function = (TimerFunction)TimerSQ3CallBack;
pDevice->TimerSQ3Tmax1.expires = RUN_AT(HZ);
init_timer(&pDevice->TimerSQ3Tmax2);
- pDevice->TimerSQ3Tmax2.data = (ULONG)pDevice;
+ pDevice->TimerSQ3Tmax2.data = (unsigned long)pDevice;
pDevice->TimerSQ3Tmax2.function = (TimerFunction)TimerSQ3CallBack;
pDevice->TimerSQ3Tmax2.expires = RUN_AT(HZ);
init_timer(&pDevice->TimerSQ3Tmax3);
- pDevice->TimerSQ3Tmax3.data = (ULONG)pDevice;
+ pDevice->TimerSQ3Tmax3.data = (unsigned long)pDevice;
pDevice->TimerSQ3Tmax3.function = (TimerFunction)TimerSQ3Tmax3CallBack;
pDevice->TimerSQ3Tmax3.expires = RUN_AT(HZ);
@@ -361,11 +360,11 @@ static VOID device_init_diversity_timer(PSDevice pDevice) {
static BOOL device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
{
- BYTE abyBroadcastAddr[U_ETHER_ADDR_LEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- BYTE abySNAP_RFC1042[U_ETHER_ADDR_LEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00};
- BYTE abySNAP_Bridgetunnel[U_ETHER_ADDR_LEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0xF8};
+ u8 abyBroadcastAddr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ u8 abySNAP_RFC1042[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00};
+ u8 abySNAP_Bridgetunnel[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0xF8};
BYTE byAntenna;
- UINT ii;
+ unsigned int ii;
CMD_CARD_INIT sInitCmd;
NTSTATUS ntStatus = STATUS_SUCCESS;
RSP_CARD_INIT sInitRsp;
@@ -377,10 +376,12 @@ static BOOL device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "---->INIbInitAdapter. [%d][%d]\n", InitType, pDevice->byPacketType);
spin_lock_irq(&pDevice->lock);
- if (InitType == DEVICE_INIT_COLD) {
- memcpy(pDevice->abyBroadcastAddr, abyBroadcastAddr, U_ETHER_ADDR_LEN);
- memcpy(pDevice->abySNAP_RFC1042, abySNAP_RFC1042, U_ETHER_ADDR_LEN);
- memcpy(pDevice->abySNAP_Bridgetunnel, abySNAP_Bridgetunnel, U_ETHER_ADDR_LEN);
+ if (InitType == DEVICE_INIT_COLD) {
+ memcpy(pDevice->abyBroadcastAddr, abyBroadcastAddr, ETH_ALEN);
+ memcpy(pDevice->abySNAP_RFC1042, abySNAP_RFC1042, ETH_ALEN);
+ memcpy(pDevice->abySNAP_Bridgetunnel,
+ abySNAP_Bridgetunnel,
+ ETH_ALEN);
if ( !FIRMWAREbCheckVersion(pDevice) ) {
if (FIRMWAREbDownload(pDevice) == TRUE) {
@@ -605,7 +606,9 @@ static BOOL device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
// get Permanent network address
memcpy(pDevice->abyPermanentNetAddr,&(sInitRsp.byNetAddr[0]),6);
- memcpy(pDevice->abyCurrentNetAddr, pDevice->abyPermanentNetAddr, U_ETHER_ADDR_LEN);
+ memcpy(pDevice->abyCurrentNetAddr,
+ pDevice->abyPermanentNetAddr,
+ ETH_ALEN);
// if exist SW network address, use SW network address.
@@ -712,7 +715,8 @@ static BOOL device_release_WPADEV(PSDevice pDevice)
}
#ifdef CONFIG_PM /* Minimal support for suspend and resume */
-static int vntwusb_suspend(struct usb_interface *intf, pm_message_t message)
+
+static int vt6656_suspend(struct usb_interface *intf, pm_message_t message)
{
PSDevice pDevice = usb_get_intfdata(intf);
struct net_device *dev = pDevice->dev;
@@ -727,7 +731,7 @@ if(dev != NULL) {
return 0;
}
-static int vntwusb_resume(struct usb_interface *intf)
+static int vt6656_resume(struct usb_interface *intf)
{
PSDevice pDevice = usb_get_intfdata(intf);
struct net_device *dev = pDevice->dev;
@@ -742,8 +746,8 @@ static int vntwusb_resume(struct usb_interface *intf)
}
return 0;
}
-#endif
+#endif /* CONFIG_PM */
static const struct net_device_ops device_netdev_ops = {
.ndo_open = device_open,
@@ -755,10 +759,10 @@ static const struct net_device_ops device_netdev_ops = {
};
-static int
-vntwusb_found1(struct usb_interface *intf, const struct usb_device_id *id)
+static int __devinit
+vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
- BYTE fake_mac[U_ETHER_ADDR_LEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x01};//fake MAC address
+ u8 fake_mac[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
struct usb_device *udev = interface_to_usbdev(intf);
int rc = 0;
struct net_device *netdev = NULL;
@@ -789,7 +793,7 @@ vntwusb_found1(struct usb_interface *intf, const struct usb_device_id *id)
spin_lock_init(&pDevice->lock);
pDevice->tx_80211 = device_dma0_tx_80211;
- pDevice->sMgmtObj.pAdapter = (PVOID)pDevice;
+ pDevice->sMgmtObj.pAdapter = (void *)pDevice;
netdev->netdev_ops = &device_netdev_ops;
@@ -799,7 +803,7 @@ vntwusb_found1(struct usb_interface *intf, const struct usb_device_id *id)
//2007-0821-01<Add>by MikeLiu
usb_set_intfdata(intf, pDevice);
SET_NETDEV_DEV(netdev, &intf->dev);
- memcpy(pDevice->dev->dev_addr, fake_mac, U_ETHER_ADDR_LEN); //use fake mac address
+ memcpy(pDevice->dev->dev_addr, fake_mac, ETH_ALEN);
rc = register_netdev(netdev);
if (rc != 0) {
printk(KERN_ERR DEVICE_NAME " Failed to register netdev\n");
@@ -841,7 +845,8 @@ err_nomem:
}
-static VOID device_free_tx_bufs(PSDevice pDevice) {
+static void device_free_tx_bufs(PSDevice pDevice)
+{
PUSB_SEND_CONTEXT pTxContext;
int ii;
@@ -860,7 +865,8 @@ static VOID device_free_tx_bufs(PSDevice pDevice) {
}
-static VOID device_free_rx_bufs(PSDevice pDevice) {
+static void device_free_rx_bufs(PSDevice pDevice)
+{
PRCB pRCB;
int ii;
@@ -892,8 +898,8 @@ static void usb_device_reset(PSDevice pDevice)
return ;
}
-static VOID device_free_int_bufs(PSDevice pDevice) {
-
+static void device_free_int_bufs(PSDevice pDevice)
+{
if (pDevice->intBuf.pDataBuf != NULL)
kfree(pDevice->intBuf.pDataBuf);
return;
@@ -915,7 +921,7 @@ static BOOL device_alloc_bufs(PSDevice pDevice) {
goto free_tx;
}
pDevice->apTD[ii] = pTxContext;
- pTxContext->pDevice = (PVOID) pDevice;
+ pTxContext->pDevice = (void *) pDevice;
//allocate URBs
pTxContext->pUrb = usb_alloc_urb(0, GFP_ATOMIC);
if (pTxContext->pUrb == NULL) {
@@ -944,7 +950,7 @@ static BOOL device_alloc_bufs(PSDevice pDevice) {
for (ii = 0; ii < pDevice->cbRD; ii++) {
pDevice->apRCB[ii] = pRCB;
- pRCB->pDevice = (PVOID) pDevice;
+ pRCB->pDevice = (void *) pDevice;
//allocate URBs
pRCB->pUrb = usb_alloc_urb(0, GFP_ATOMIC);
@@ -1102,8 +1108,8 @@ static int device_open(struct net_device *dev) {
// Init for Key Management
KeyvInitTable(pDevice,&pDevice->sKey);
- memcpy(pDevice->sMgmtObj.abyMACAddr, pDevice->abyCurrentNetAddr, U_ETHER_ADDR_LEN);
- memcpy(pDevice->dev->dev_addr, pDevice->abyCurrentNetAddr, U_ETHER_ADDR_LEN);
+ memcpy(pDevice->sMgmtObj.abyMACAddr, pDevice->abyCurrentNetAddr, ETH_ALEN);
+ memcpy(pDevice->dev->dev_addr, pDevice->abyCurrentNetAddr, ETH_ALEN);
pDevice->bStopTx0Pkt = FALSE;
pDevice->bStopDataPkt = FALSE;
pDevice->bRoaming = FALSE; //DavidWang
@@ -1154,12 +1160,12 @@ static int device_open(struct net_device *dev) {
}
if (pDevice->sMgmtObj.eConfigMode == WMAC_CONFIG_AP) {
- bScheduleCommand((HANDLE)pDevice, WLAN_CMD_RUN_AP, NULL);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_RUN_AP, NULL);
}
else {
//mike:mark@2008-11-10
- bScheduleCommand((HANDLE)pDevice, WLAN_CMD_BSSID_SCAN, NULL);
- //bScheduleCommand((HANDLE)pDevice, WLAN_CMD_SSID, NULL);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
+ /* bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL); */
}
@@ -1216,7 +1222,7 @@ static int device_close(struct net_device *dev) {
//2007-1121-02<Add>by EinsnLiu
if (pDevice->bLinkPass) {
- bScheduleCommand((HANDLE)pDevice, WLAN_CMD_DISASSOCIATE, NULL);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_DISASSOCIATE, NULL);
mdelay(30);
}
//End Add
@@ -1285,8 +1291,7 @@ device_release_WPADEV(pDevice);
}
-static void vntwusb_disconnect(struct usb_interface *intf)
-
+static void __devexit vt6656_disconnect(struct usb_interface *intf)
{
PSDevice pDevice = usb_get_intfdata(intf);
@@ -1333,7 +1338,7 @@ device_release_WPADEV(pDevice);
static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev) {
PSDevice pDevice=netdev_priv(dev);
PBYTE pbMPDU;
- UINT cbMPDULen = 0;
+ unsigned int cbMPDULen = 0;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_dma0_tx_80211\n");
@@ -1408,24 +1413,27 @@ static inline u32 ether_crc(int length, unsigned char *data)
}
//find out the start position of str2 from str1
-static UCHAR *kstrstr(const UCHAR *str1,const UCHAR *str2) {
- int str1_len=strlen(str1);
- int str2_len=strlen(str2);
+static unsigned char *kstrstr(const unsigned char *str1,
+ const unsigned char *str2) {
+ int str1_len = strlen(str1);
+ int str2_len = strlen(str2);
while (str1_len >= str2_len) {
str1_len--;
if(memcmp(str1,str2,str2_len)==0)
- return (UCHAR *)str1;
+ return (unsigned char *) str1;
str1++;
}
return NULL;
}
-static int Config_FileGetParameter(UCHAR *string, UCHAR *dest,UCHAR *source)
+static int Config_FileGetParameter(unsigned char *string,
+ unsigned char *dest,
+ unsigned char *source)
{
- UCHAR buf1[100];
- UCHAR buf2[100];
- UCHAR *start_p=NULL,*end_p=NULL,*tmp_p=NULL;
+ unsigned char buf1[100];
+ unsigned char buf2[100];
+ unsigned char *start_p = NULL, *end_p = NULL, *tmp_p = NULL;
int ii;
memset(buf1,0,100);
@@ -1434,7 +1442,8 @@ static int Config_FileGetParameter(UCHAR *string, UCHAR *dest,UCHAR *source)
source+=strlen(buf1);
//find target string start point
- if((start_p = kstrstr(source,buf1))==NULL)
+ start_p = kstrstr(source,buf1);
+ if (start_p == NULL)
return FALSE;
//check if current config line is marked by "#" ??
@@ -1446,7 +1455,8 @@ for(ii=1;;ii++) {
}
//find target string end point
- if((end_p = kstrstr(start_p,"\n"))==NULL) { //cann't find "\n",but don't care
+ end_p = kstrstr(start_p,"\n");
+ if (end_p == NULL) { //can't find "\n",but don't care
end_p=start_p+strlen(start_p); //no include "\n"
}
@@ -1455,7 +1465,8 @@ for(ii=1;;ii++) {
buf2[end_p-start_p]='\0';
//find value
- if((start_p = kstrstr(buf2,"="))==NULL)
+ start_p = kstrstr(buf2,"=");
+ if (start_p == NULL)
return FALSE;
memset(buf1,0,100);
strcpy(buf1,start_p+1);
@@ -1474,13 +1485,14 @@ for(ii=1;;ii++) {
}
//if read fail,return NULL,or return data pointer;
-static UCHAR *Config_FileOperation(PSDevice pDevice) {
- UCHAR *config_path=CONFIG_PATH;
- UCHAR *buffer=NULL;
+static unsigned char *Config_FileOperation(PSDevice pDevice)
+{
+ unsigned char *config_path = CONFIG_PATH;
+ unsigned char *buffer = NULL;
struct file *filp=NULL;
mm_segment_t old_fs = get_fs();
//int oldfsuid=0,oldfsgid=0;
- int result=0;
+ int result = 0;
set_fs (KERNEL_DS);
/* Can't do this anymore, so we rely on correct filesystem permissions:
@@ -1505,7 +1517,7 @@ static UCHAR *Config_FileOperation(PSDevice pDevice) {
goto error1;
}
- buffer = (UCHAR *)kmalloc(1024, GFP_KERNEL);
+ buffer = kmalloc(1024, GFP_KERNEL);
if(buffer==NULL) {
printk("alllocate mem for file fail?\n");
result = -1;
@@ -1539,16 +1551,17 @@ if(result!=0) {
//return --->-1:fail; >=0:successful
static int Read_config_file(PSDevice pDevice) {
- int result=0;
- UCHAR tmpbuffer[100];
- UCHAR *buffer=NULL;
+ int result = 0;
+ unsigned char tmpbuffer[100];
+ unsigned char *buffer = NULL;
//init config setting
pDevice->config_file.ZoneType = -1;
pDevice->config_file.eAuthenMode = -1;
pDevice->config_file.eEncryptionStatus = -1;
- if((buffer=Config_FileOperation(pDevice)) ==NULL) {
+ buffer = Config_FileOperation(pDevice);
+ if (buffer == NULL) {
result =-1;
return result;
}
@@ -1596,7 +1609,7 @@ static void device_set_multi(struct net_device *dev) {
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
u32 mc_filter[2];
int ii;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
BYTE pbyData[8] = {0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff};
BYTE byTmpMode = 0;
int rc;
@@ -1632,8 +1645,8 @@ static void device_set_multi(struct net_device *dev) {
}
else {
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
}
for (ii = 0; ii < 4; ii++) {
@@ -2064,7 +2077,7 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
rc = 0;
}
- rc = hostap_ioctl(pDevice, &wrq->u.data);
+ rc = vt6656_hostap_ioctl(pDevice, &wrq->u.data);
break;
case IOCTL_CMD_WPA:
@@ -2094,16 +2107,16 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
netif_stop_queue(pDevice->dev);
spin_lock_irq(&pDevice->lock);
- bScheduleCommand((HANDLE)pDevice, WLAN_CMD_RUN_AP, NULL);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_RUN_AP, NULL);
spin_unlock_irq(&pDevice->lock);
}
else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Commit the settings\n");
spin_lock_irq(&pDevice->lock);
//2007-1121-01<Modify>by EinsnLiu
- if (pDevice->bLinkPass&&
+ if (pDevice->bLinkPass &&
memcmp(pMgmt->abyCurrSSID,pMgmt->abyDesireSSID,WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN)) {
- bScheduleCommand((HANDLE)pDevice, WLAN_CMD_DISASSOCIATE, NULL);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_DISASSOCIATE, NULL);
} else {
pDevice->bLinkPass = FALSE;
pMgmt->eCurrState = WMAC_STATE_IDLE;
@@ -2114,10 +2127,14 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
netif_stop_queue(pDevice->dev);
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
pMgmt->eScanType = WMAC_SCAN_ACTIVE;
- if(pDevice->bWPASuppWextEnabled !=TRUE)
+ if (!pDevice->bWPASuppWextEnabled)
#endif
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_SSID, NULL);
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_BSSID_SCAN,
+ pMgmt->abyDesireSSID);
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_SSID,
+ NULL);
spin_unlock_irq(&pDevice->lock);
}
pDevice->bCommit = FALSE;
@@ -2153,35 +2170,29 @@ static int ethtool_ioctl(struct net_device *dev, void *useraddr)
/*------------------------------------------------------------------*/
+MODULE_DEVICE_TABLE(usb, vt6656_table);
-MODULE_DEVICE_TABLE(usb, vntwusb_table);
-
-
-static struct usb_driver vntwusb_driver = {
- .name = DEVICE_NAME,
- .probe = vntwusb_found1,
- .disconnect = vntwusb_disconnect,
- .id_table = vntwusb_table,
-
-//2008-0920-01<Add>by MikeLiu
-//for supporting S3 & S4 function
+static struct usb_driver vt6656_driver = {
+ .name = DEVICE_NAME,
+ .probe = vt6656_probe,
+ .disconnect = vt6656_disconnect,
+ .id_table = vt6656_table,
#ifdef CONFIG_PM
- .suspend = vntwusb_suspend,
- .resume = vntwusb_resume,
-#endif
+ .suspend = vt6656_suspend,
+ .resume = vt6656_resume,
+#endif /* CONFIG_PM */
};
-static int __init vntwusb_init_module(void)
+static int __init vt6656_init_module(void)
{
printk(KERN_NOTICE DEVICE_FULL_DRV_NAM " " DEVICE_VERSION);
- return usb_register(&vntwusb_driver);
+ return usb_register(&vt6656_driver);
}
-static void __exit vntwusb_cleanup_module(void)
+static void __exit vt6656_cleanup_module(void)
{
- usb_deregister(&vntwusb_driver);
+ usb_deregister(&vt6656_driver);
}
-module_init(vntwusb_init_module);
-module_exit(vntwusb_cleanup_module);
-
+module_init(vt6656_init_module);
+module_exit(vt6656_cleanup_module);
diff --git a/drivers/staging/vt6656/mib.c b/drivers/staging/vt6656/mib.c
index 910e610b7cc..b694fc86d74 100644
--- a/drivers/staging/vt6656/mib.c
+++ b/drivers/staging/vt6656/mib.c
@@ -152,33 +152,36 @@ void STAvUpdateIsrStatCounter (PSStatCounter pStatistic, BYTE byIsr0, BYTE byIsr
* Return Value: none
*
*/
-void STAvUpdateRDStatCounter (PSStatCounter pStatistic,
- BYTE byRSR, BYTE byNewRSR, BYTE byRxSts, BYTE byRxRate,
- PBYTE pbyBuffer, UINT cbFrameLength)
+void STAvUpdateRDStatCounter(PSStatCounter pStatistic,
+ BYTE byRSR, BYTE byNewRSR,
+ BYTE byRxSts, BYTE byRxRate,
+ PBYTE pbyBuffer, unsigned int cbFrameLength)
{
- //need change
- PS802_11Header pHeader = (PS802_11Header)pbyBuffer;
+ /* need change */
+ PS802_11Header pHeader = (PS802_11Header)pbyBuffer;
- if (byRSR & RSR_ADDROK)
- pStatistic->dwRsrADDROk++;
- if (byRSR & RSR_CRCOK) {
- pStatistic->dwRsrCRCOk++;
+ if (byRSR & RSR_ADDROK)
+ pStatistic->dwRsrADDROk++;
+ if (byRSR & RSR_CRCOK) {
+ pStatistic->dwRsrCRCOk++;
+ pStatistic->ullRsrOK++;
- pStatistic->ullRsrOK++;
-
- if (cbFrameLength >= U_ETHER_ADDR_LEN) {
- // update counters in case that successful transmit
+ if (cbFrameLength >= ETH_ALEN) {
+ /* update counters in case of successful transmission */
if (byRSR & RSR_ADDRBROAD) {
pStatistic->ullRxBroadcastFrames++;
- pStatistic->ullRxBroadcastBytes += (ULONGLONG)cbFrameLength;
+ pStatistic->ullRxBroadcastBytes +=
+ (unsigned long long) cbFrameLength;
}
else if (byRSR & RSR_ADDRMULTI) {
pStatistic->ullRxMulticastFrames++;
- pStatistic->ullRxMulticastBytes += (ULONGLONG)cbFrameLength;
+ pStatistic->ullRxMulticastBytes +=
+ (unsigned long long) cbFrameLength;
}
else {
pStatistic->ullRxDirectedFrames++;
- pStatistic->ullRxDirectedBytes += (ULONGLONG)cbFrameLength;
+ pStatistic->ullRxDirectedBytes +=
+ (unsigned long long) cbFrameLength;
}
}
}
@@ -188,87 +191,114 @@ void STAvUpdateRDStatCounter (PSStatCounter pStatistic,
if(byRSR & RSR_CRCOK) {
pStatistic->CustomStat.ullRsr11MCRCOk++;
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"11M: ALL[%d], OK[%d]:[%02x]\n", (INT)pStatistic->CustomStat.ullRsr11M, (INT)pStatistic->CustomStat.ullRsr11MCRCOk, byRSR);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "11M: ALL[%d], OK[%d]:[%02x]\n",
+ (signed int) pStatistic->CustomStat.ullRsr11M,
+ (signed int) pStatistic->CustomStat.ullRsr11MCRCOk, byRSR);
}
else if(byRxRate==11) {
pStatistic->CustomStat.ullRsr5M++;
if(byRSR & RSR_CRCOK) {
pStatistic->CustomStat.ullRsr5MCRCOk++;
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" 5M: ALL[%d], OK[%d]:[%02x]\n", (INT)pStatistic->CustomStat.ullRsr5M, (INT)pStatistic->CustomStat.ullRsr5MCRCOk, byRSR);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " 5M: ALL[%d], OK[%d]:[%02x]\n",
+ (signed int) pStatistic->CustomStat.ullRsr5M,
+ (signed int) pStatistic->CustomStat.ullRsr5MCRCOk, byRSR);
}
else if(byRxRate==4) {
pStatistic->CustomStat.ullRsr2M++;
if(byRSR & RSR_CRCOK) {
pStatistic->CustomStat.ullRsr2MCRCOk++;
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" 2M: ALL[%d], OK[%d]:[%02x]\n", (INT)pStatistic->CustomStat.ullRsr2M, (INT)pStatistic->CustomStat.ullRsr2MCRCOk, byRSR);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " 2M: ALL[%d], OK[%d]:[%02x]\n",
+ (signed int) pStatistic->CustomStat.ullRsr2M,
+ (signed int) pStatistic->CustomStat.ullRsr2MCRCOk, byRSR);
}
else if(byRxRate==2){
pStatistic->CustomStat.ullRsr1M++;
if(byRSR & RSR_CRCOK) {
pStatistic->CustomStat.ullRsr1MCRCOk++;
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" 1M: ALL[%d], OK[%d]:[%02x]\n", (INT)pStatistic->CustomStat.ullRsr1M, (INT)pStatistic->CustomStat.ullRsr1MCRCOk, byRSR);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " 1M: ALL[%d], OK[%d]:[%02x]\n",
+ (signed int) pStatistic->CustomStat.ullRsr1M,
+ (signed int) pStatistic->CustomStat.ullRsr1MCRCOk, byRSR);
}
else if(byRxRate==12){
pStatistic->CustomStat.ullRsr6M++;
if(byRSR & RSR_CRCOK) {
pStatistic->CustomStat.ullRsr6MCRCOk++;
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" 6M: ALL[%d], OK[%d]\n", (INT)pStatistic->CustomStat.ullRsr6M, (INT)pStatistic->CustomStat.ullRsr6MCRCOk);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " 6M: ALL[%d], OK[%d]\n",
+ (signed int) pStatistic->CustomStat.ullRsr6M,
+ (signed int) pStatistic->CustomStat.ullRsr6MCRCOk);
}
else if(byRxRate==18){
pStatistic->CustomStat.ullRsr9M++;
if(byRSR & RSR_CRCOK) {
pStatistic->CustomStat.ullRsr9MCRCOk++;
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" 9M: ALL[%d], OK[%d]\n", (INT)pStatistic->CustomStat.ullRsr9M, (INT)pStatistic->CustomStat.ullRsr9MCRCOk);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " 9M: ALL[%d], OK[%d]\n",
+ (signed int) pStatistic->CustomStat.ullRsr9M,
+ (signed int) pStatistic->CustomStat.ullRsr9MCRCOk);
}
else if(byRxRate==24){
pStatistic->CustomStat.ullRsr12M++;
if(byRSR & RSR_CRCOK) {
pStatistic->CustomStat.ullRsr12MCRCOk++;
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"12M: ALL[%d], OK[%d]\n", (INT)pStatistic->CustomStat.ullRsr12M, (INT)pStatistic->CustomStat.ullRsr12MCRCOk);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "12M: ALL[%d], OK[%d]\n",
+ (signed int) pStatistic->CustomStat.ullRsr12M,
+ (signed int) pStatistic->CustomStat.ullRsr12MCRCOk);
}
else if(byRxRate==36){
pStatistic->CustomStat.ullRsr18M++;
if(byRSR & RSR_CRCOK) {
pStatistic->CustomStat.ullRsr18MCRCOk++;
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"18M: ALL[%d], OK[%d]\n", (INT)pStatistic->CustomStat.ullRsr18M, (INT)pStatistic->CustomStat.ullRsr18MCRCOk);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "18M: ALL[%d], OK[%d]\n",
+ (signed int) pStatistic->CustomStat.ullRsr18M,
+ (signed int) pStatistic->CustomStat.ullRsr18MCRCOk);
}
else if(byRxRate==48){
pStatistic->CustomStat.ullRsr24M++;
if(byRSR & RSR_CRCOK) {
pStatistic->CustomStat.ullRsr24MCRCOk++;
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"24M: ALL[%d], OK[%d]\n", (INT)pStatistic->CustomStat.ullRsr24M, (INT)pStatistic->CustomStat.ullRsr24MCRCOk);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "24M: ALL[%d], OK[%d]\n",
+ (signed int) pStatistic->CustomStat.ullRsr24M,
+ (signed int) pStatistic->CustomStat.ullRsr24MCRCOk);
}
else if(byRxRate==72){
pStatistic->CustomStat.ullRsr36M++;
if(byRSR & RSR_CRCOK) {
pStatistic->CustomStat.ullRsr36MCRCOk++;
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"36M: ALL[%d], OK[%d]\n", (INT)pStatistic->CustomStat.ullRsr36M, (INT)pStatistic->CustomStat.ullRsr36MCRCOk);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "36M: ALL[%d], OK[%d]\n",
+ (signed int) pStatistic->CustomStat.ullRsr36M,
+ (signed int) pStatistic->CustomStat.ullRsr36MCRCOk);
}
else if(byRxRate==96){
pStatistic->CustomStat.ullRsr48M++;
if(byRSR & RSR_CRCOK) {
pStatistic->CustomStat.ullRsr48MCRCOk++;
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"48M: ALL[%d], OK[%d]\n", (INT)pStatistic->CustomStat.ullRsr48M, (INT)pStatistic->CustomStat.ullRsr48MCRCOk);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "48M: ALL[%d], OK[%d]\n",
+ (signed int) pStatistic->CustomStat.ullRsr48M,
+ (signed int) pStatistic->CustomStat.ullRsr48MCRCOk);
}
else if(byRxRate==108){
pStatistic->CustomStat.ullRsr54M++;
if(byRSR & RSR_CRCOK) {
pStatistic->CustomStat.ullRsr54MCRCOk++;
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"54M: ALL[%d], OK[%d]\n", (INT)pStatistic->CustomStat.ullRsr54M, (INT)pStatistic->CustomStat.ullRsr54MCRCOk);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "54M: ALL[%d], OK[%d]\n",
+ (signed int) pStatistic->CustomStat.ullRsr54M,
+ (signed int) pStatistic->CustomStat.ullRsr54MCRCOk);
}
else {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Unknown: Total[%d], CRCOK[%d]\n", (INT)pStatistic->dwRsrRxPacket+1, (INT)pStatistic->dwRsrCRCOk);
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "Unknown: Total[%d], CRCOK[%d]\n",
+ (signed int) pStatistic->dwRsrRxPacket+1,
+ (signed int)pStatistic->dwRsrCRCOk);
}
if (byRSR & RSR_BSSIDOK)
@@ -370,7 +400,7 @@ STAvUpdateRDStatCounterEx (
BYTE byRxSts,
BYTE byRxRate,
PBYTE pbyBuffer,
- UINT cbFrameLength
+ unsigned int cbFrameLength
)
{
STAvUpdateRDStatCounter(
@@ -510,19 +540,22 @@ STAvUpdate802_11Counter(
)
{
//p802_11Counter->TransmittedFragmentCount
- p802_11Counter->MulticastTransmittedFrameCount = (ULONGLONG) (pStatistic->dwTsrBroadcast +
- pStatistic->dwTsrMulticast);
- p802_11Counter->FailedCount = (ULONGLONG) (pStatistic->dwTsrErr);
- p802_11Counter->RetryCount = (ULONGLONG) (pStatistic->dwTsrRetry);
- p802_11Counter->MultipleRetryCount = (ULONGLONG) (pStatistic->dwTsrMoreThanOnceRetry);
+ p802_11Counter->MulticastTransmittedFrameCount =
+ (unsigned long long) (pStatistic->dwTsrBroadcast +
+ pStatistic->dwTsrMulticast);
+ p802_11Counter->FailedCount = (unsigned long long) (pStatistic->dwTsrErr);
+ p802_11Counter->RetryCount = (unsigned long long) (pStatistic->dwTsrRetry);
+ p802_11Counter->MultipleRetryCount =
+ (unsigned long long) (pStatistic->dwTsrMoreThanOnceRetry);
//p802_11Counter->FrameDuplicateCount
- p802_11Counter->RTSSuccessCount += (ULONGLONG) byRTSSuccess;
- p802_11Counter->RTSFailureCount += (ULONGLONG) byRTSFail;
- p802_11Counter->ACKFailureCount += (ULONGLONG) byACKFail;
- p802_11Counter->FCSErrorCount += (ULONGLONG) byFCSErr;
+ p802_11Counter->RTSSuccessCount += (unsigned long long) byRTSSuccess;
+ p802_11Counter->RTSFailureCount += (unsigned long long) byRTSFail;
+ p802_11Counter->ACKFailureCount += (unsigned long long) byACKFail;
+ p802_11Counter->FCSErrorCount += (unsigned long long) byFCSErr;
//p802_11Counter->ReceivedFragmentCount
- p802_11Counter->MulticastReceivedFrameCount = (ULONGLONG) (pStatistic->dwRsrBroadcast +
- pStatistic->dwRsrMulticast);
+ p802_11Counter->MulticastReceivedFrameCount =
+ (unsigned long long) (pStatistic->dwRsrBroadcast +
+ pStatistic->dwRsrMulticast);
}
/*
diff --git a/drivers/staging/vt6656/mib.h b/drivers/staging/vt6656/mib.h
index ac996d2cd91..0455ec9d327 100644
--- a/drivers/staging/vt6656/mib.h
+++ b/drivers/staging/vt6656/mib.h
@@ -52,32 +52,34 @@ typedef struct tagSUSBCounter {
typedef struct tagSDot11Counters {
-// ULONG Length; // Length of structure
- ULONGLONG TransmittedFragmentCount;
- ULONGLONG MulticastTransmittedFrameCount;
- ULONGLONG FailedCount;
- ULONGLONG RetryCount;
- ULONGLONG MultipleRetryCount;
- ULONGLONG RTSSuccessCount;
- ULONGLONG RTSFailureCount;
- ULONGLONG ACKFailureCount;
- ULONGLONG FrameDuplicateCount;
- ULONGLONG ReceivedFragmentCount;
- ULONGLONG MulticastReceivedFrameCount;
- ULONGLONG FCSErrorCount;
- ULONGLONG TKIPLocalMICFailures;
- ULONGLONG TKIPRemoteMICFailures;
- ULONGLONG TKIPICVErrors;
- ULONGLONG TKIPCounterMeasuresInvoked;
- ULONGLONG TKIPReplays;
- ULONGLONG CCMPFormatErrors;
- ULONGLONG CCMPReplays;
- ULONGLONG CCMPDecryptErrors;
- ULONGLONG FourWayHandshakeFailures;
-// ULONGLONG WEPUndecryptableCount;
-// ULONGLONG WEPICVErrorCount;
-// ULONGLONG DecryptSuccessCount;
-// ULONGLONG DecryptFailureCount;
+ /* unsigned long Length; // Length of structure */
+ unsigned long long TransmittedFragmentCount;
+ unsigned long long MulticastTransmittedFrameCount;
+ unsigned long long FailedCount;
+ unsigned long long RetryCount;
+ unsigned long long MultipleRetryCount;
+ unsigned long long RTSSuccessCount;
+ unsigned long long RTSFailureCount;
+ unsigned long long ACKFailureCount;
+ unsigned long long FrameDuplicateCount;
+ unsigned long long ReceivedFragmentCount;
+ unsigned long long MulticastReceivedFrameCount;
+ unsigned long long FCSErrorCount;
+ unsigned long long TKIPLocalMICFailures;
+ unsigned long long TKIPRemoteMICFailures;
+ unsigned long long TKIPICVErrors;
+ unsigned long long TKIPCounterMeasuresInvoked;
+ unsigned long long TKIPReplays;
+ unsigned long long CCMPFormatErrors;
+ unsigned long long CCMPReplays;
+ unsigned long long CCMPDecryptErrors;
+ unsigned long long FourWayHandshakeFailures;
+ /*
+ * unsigned long long WEPUndecryptableCount;
+ * unsigned long long WEPICVErrorCount;
+ * unsigned long long DecryptSuccessCount;
+ * unsigned long long DecryptFailureCount;
+ */
} SDot11Counters, *PSDot11Counters;
@@ -85,15 +87,15 @@ typedef struct tagSDot11Counters {
// MIB2 counter
//
typedef struct tagSMib2Counter {
- LONG ifIndex;
+ signed long ifIndex;
char ifDescr[256]; // max size 255 plus zero ending
// e.g. "interface 1"
- LONG ifType;
- LONG ifMtu;
+ signed long ifType;
+ signed long ifMtu;
DWORD ifSpeed;
- BYTE ifPhysAddress[U_ETHER_ADDR_LEN];
- LONG ifAdminStatus;
- LONG ifOperStatus;
+ BYTE ifPhysAddress[ETH_ALEN];
+ signed long ifAdminStatus;
+ signed long ifOperStatus;
DWORD ifLastChange;
DWORD ifInOctets;
DWORD ifInUcastPkts;
@@ -124,7 +126,7 @@ typedef struct tagSMib2Counter {
// RMON counter
//
typedef struct tagSRmonCounter {
- LONG etherStatsIndex;
+ signed long etherStatsIndex;
DWORD etherStatsDataSource;
DWORD etherStatsDropEvents;
DWORD etherStatsOctets;
@@ -151,37 +153,37 @@ typedef struct tagSRmonCounter {
// Custom counter
//
typedef struct tagSCustomCounters {
- ULONG Length;
-
- ULONGLONG ullTsrAllOK;
-
- ULONGLONG ullRsr11M;
- ULONGLONG ullRsr5M;
- ULONGLONG ullRsr2M;
- ULONGLONG ullRsr1M;
-
- ULONGLONG ullRsr11MCRCOk;
- ULONGLONG ullRsr5MCRCOk;
- ULONGLONG ullRsr2MCRCOk;
- ULONGLONG ullRsr1MCRCOk;
-
- ULONGLONG ullRsr54M;
- ULONGLONG ullRsr48M;
- ULONGLONG ullRsr36M;
- ULONGLONG ullRsr24M;
- ULONGLONG ullRsr18M;
- ULONGLONG ullRsr12M;
- ULONGLONG ullRsr9M;
- ULONGLONG ullRsr6M;
-
- ULONGLONG ullRsr54MCRCOk;
- ULONGLONG ullRsr48MCRCOk;
- ULONGLONG ullRsr36MCRCOk;
- ULONGLONG ullRsr24MCRCOk;
- ULONGLONG ullRsr18MCRCOk;
- ULONGLONG ullRsr12MCRCOk;
- ULONGLONG ullRsr9MCRCOk;
- ULONGLONG ullRsr6MCRCOk;
+ unsigned long Length;
+
+ unsigned long long ullTsrAllOK;
+
+ unsigned long long ullRsr11M;
+ unsigned long long ullRsr5M;
+ unsigned long long ullRsr2M;
+ unsigned long long ullRsr1M;
+
+ unsigned long long ullRsr11MCRCOk;
+ unsigned long long ullRsr5MCRCOk;
+ unsigned long long ullRsr2MCRCOk;
+ unsigned long long ullRsr1MCRCOk;
+
+ unsigned long long ullRsr54M;
+ unsigned long long ullRsr48M;
+ unsigned long long ullRsr36M;
+ unsigned long long ullRsr24M;
+ unsigned long long ullRsr18M;
+ unsigned long long ullRsr12M;
+ unsigned long long ullRsr9M;
+ unsigned long long ullRsr6M;
+
+ unsigned long long ullRsr54MCRCOk;
+ unsigned long long ullRsr48MCRCOk;
+ unsigned long long ullRsr36MCRCOk;
+ unsigned long long ullRsr24MCRCOk;
+ unsigned long long ullRsr18MCRCOk;
+ unsigned long long ullRsr12MCRCOk;
+ unsigned long long ullRsr9MCRCOk;
+ unsigned long long ullRsr6MCRCOk;
} SCustomCounters, *PSCustomCounters;
@@ -190,7 +192,7 @@ typedef struct tagSCustomCounters {
// Custom counter
//
typedef struct tagSISRCounters {
- ULONG Length;
+ unsigned long Length;
DWORD dwIsrTx0OK;
DWORD dwIsrAC0TxOK;
@@ -231,7 +233,7 @@ typedef struct tagSTxPktInfo {
BYTE byBroadMultiUni;
WORD wLength;
WORD wFIFOCtl;
- BYTE abyDestAddr[U_ETHER_ADDR_LEN];
+ BYTE abyDestAddr[ETH_ALEN];
} STxPktInfo, *PSTxPktInfo;
@@ -277,15 +279,15 @@ typedef struct tagSStatCounter {
DWORD dwRsrMulticast;
DWORD dwRsrDirected;
// 64-bit OID
- ULONGLONG ullRsrOK;
+ unsigned long long ullRsrOK;
// for some optional OIDs (64 bits) and DMI support
- ULONGLONG ullRxBroadcastBytes;
- ULONGLONG ullRxMulticastBytes;
- ULONGLONG ullRxDirectedBytes;
- ULONGLONG ullRxBroadcastFrames;
- ULONGLONG ullRxMulticastFrames;
- ULONGLONG ullRxDirectedFrames;
+ unsigned long long ullRxBroadcastBytes;
+ unsigned long long ullRxMulticastBytes;
+ unsigned long long ullRxDirectedBytes;
+ unsigned long long ullRxBroadcastFrames;
+ unsigned long long ullRxMulticastFrames;
+ unsigned long long ullRxDirectedFrames;
DWORD dwRsrRxFragment;
DWORD dwRsrRxFrmLen64;
@@ -330,15 +332,15 @@ typedef struct tagSStatCounter {
// 64-bit OID
- ULONGLONG ullTsrOK;
+ unsigned long long ullTsrOK;
// for some optional OIDs (64 bits) and DMI support
- ULONGLONG ullTxBroadcastFrames;
- ULONGLONG ullTxMulticastFrames;
- ULONGLONG ullTxDirectedFrames;
- ULONGLONG ullTxBroadcastBytes;
- ULONGLONG ullTxMulticastBytes;
- ULONGLONG ullTxDirectedBytes;
+ unsigned long long ullTxBroadcastFrames;
+ unsigned long long ullTxMulticastFrames;
+ unsigned long long ullTxDirectedFrames;
+ unsigned long long ullTxBroadcastBytes;
+ unsigned long long ullTxMulticastBytes;
+ unsigned long long ullTxDirectedBytes;
// for autorate
DWORD dwTxOk[MAX_RATE+1];
@@ -356,15 +358,15 @@ typedef struct tagSStatCounter {
#ifdef Calcu_LinkQual
//Tx count:
- ULONG TxNoRetryOkCount; //success tx no retry !
- ULONG TxRetryOkCount; //success tx but retry !
- ULONG TxFailCount; //fail tx ?
+ unsigned long TxNoRetryOkCount; /* success tx no retry ! */
+ unsigned long TxRetryOkCount; /* success tx but retry ! */
+ unsigned long TxFailCount; /* fail tx ? */
//Rx count:
- ULONG RxOkCnt; //success rx !
- ULONG RxFcsErrCnt; //fail rx ?
+ unsigned long RxOkCnt; /* success rx ! */
+ unsigned long RxFcsErrCnt; /* fail rx ? */
//statistic
- ULONG SignalStren;
- ULONG LinkQuality;
+ unsigned long SignalStren;
+ unsigned long LinkQuality;
#endif
} SStatCounter, *PSStatCounter;
@@ -382,13 +384,14 @@ void STAvClearAllCounter(PSStatCounter pStatistic);
void STAvUpdateIsrStatCounter (PSStatCounter pStatistic, BYTE byIsr0, BYTE byIsr1);
void STAvUpdateRDStatCounter(PSStatCounter pStatistic,
- BYTE byRSR, BYTE byNewRSR, BYTE byRxSts, BYTE byRxRate,
- PBYTE pbyBuffer, UINT cbFrameLength);
+ BYTE byRSR, BYTE byNewRSR, BYTE byRxSts,
+ BYTE byRxRate, PBYTE pbyBuffer,
+ unsigned int cbFrameLength);
void STAvUpdateRDStatCounterEx(PSStatCounter pStatistic,
- BYTE byRSR, BYTE byNewRSR, BYTE byRxSts, BYTE byRxRate,
- PBYTE pbyBuffer, UINT cbFrameLength);
-
+ BYTE byRSR, BYTE byNewRSR, BYTE byRxSts,
+ BYTE byRxRate, PBYTE pbyBuffer,
+ unsigned int cbFrameLength);
void
STAvUpdateTDStatCounter (
@@ -417,7 +420,4 @@ STAvUpdateUSBCounter(
NTSTATUS ntStatus
);
-#endif // __MIB_H__
-
-
-
+#endif /* __MIB_H__ */
diff --git a/drivers/staging/vt6656/michael.c b/drivers/staging/vt6656/michael.c
index c930e0cdb85..671a8cf33e2 100644
--- a/drivers/staging/vt6656/michael.c
+++ b/drivers/staging/vt6656/michael.c
@@ -48,20 +48,23 @@
/*--------------------- Static Functions --------------------------*/
/*
-static DWORD s_dwGetUINT32(BYTE * p); // Get DWORD from 4 bytes LSByte first
-static VOID s_vPutUINT32(BYTE* p, DWORD val); // Put DWORD into 4 bytes LSByte first
-*/
-static VOID s_vClear(void); // Clear the internal message,
- // resets the object to the state just after construction.
-static VOID s_vSetKey(DWORD dwK0, DWORD dwK1);
-static VOID s_vAppendByte(BYTE b); // Add a single byte to the internal message
+ * static DWORD s_dwGetUINT32(BYTE * p); Get DWORD from
+ * 4 bytes LSByte first
+ * static void s_vPutUINT32(BYTE* p, DWORD val); Put DWORD into
+ * 4 bytes LSByte first
+ */
+static void s_vClear(void); /* Clear the internal message,
+ * resets the object to the
+ * state just after construction. */
+static void s_vSetKey(DWORD dwK0, DWORD dwK1);
+static void s_vAppendByte(BYTE b); /* Add a single byte to the internal
+ * message */
/*--------------------- Export Variables --------------------------*/
-static DWORD L, R; // Current state
-
-static DWORD K0, K1; // Key
-static DWORD M; // Message accumulator (single word)
-static UINT nBytesInM; // # bytes in M
+static DWORD L, R; /* Current state */
+static DWORD K0, K1; /* Key */
+static DWORD M; /* Message accumulator (single word) */
+static unsigned int nBytesInM; /* # bytes in M */
/*--------------------- Export Functions --------------------------*/
@@ -69,113 +72,105 @@ static UINT nBytesInM; // # bytes in M
static DWORD s_dwGetUINT32 (BYTE * p)
// Convert from BYTE[] to DWORD in a portable way
{
- DWORD res = 0;
- UINT i;
- for(i=0; i<4; i++ )
- {
- res |= (*p++) << (8*i);
- }
- return res;
+ DWORD res = 0;
+ unsigned int i;
+ for(i=0; i<4; i++ )
+ res |= (*p++) << (8*i);
+ return res;
}
-static VOID s_vPutUINT32 (BYTE* p, DWORD val)
+static void s_vPutUINT32(BYTE *p, DWORD val)
// Convert from DWORD to BYTE[] in a portable way
{
- UINT i;
- for(i=0; i<4; i++ )
- {
- *p++ = (BYTE) (val & 0xff);
- val >>= 8;
- }
+ unsigned int i;
+ for(i=0; i<4; i++ ) {
+ *p++ = (BYTE) (val & 0xff);
+ val >>= 8;
+ }
}
*/
-static VOID s_vClear (void)
+static void s_vClear(void)
{
- // Reset the state to the empty message.
- L = K0;
- R = K1;
- nBytesInM = 0;
- M = 0;
+ /* Reset the state to the empty message. */
+ L = K0;
+ R = K1;
+ nBytesInM = 0;
+ M = 0;
}
-static VOID s_vSetKey (DWORD dwK0, DWORD dwK1)
+static void s_vSetKey(DWORD dwK0, DWORD dwK1)
{
- // Set the key
- K0 = dwK0;
- K1 = dwK1;
- // and reset the message
- s_vClear();
+ /* Set the key */
+ K0 = dwK0;
+ K1 = dwK1;
+ /* and reset the message */
+ s_vClear();
}
-static VOID s_vAppendByte (BYTE b)
+static void s_vAppendByte(BYTE b)
{
- // Append the byte to our word-sized buffer
- M |= b << (8*nBytesInM);
- nBytesInM++;
- // Process the word if it is full.
- if( nBytesInM >= 4 )
- {
- L ^= M;
- R ^= ROL32( L, 17 );
- L += R;
- R ^= ((L & 0xff00ff00) >> 8) | ((L & 0x00ff00ff) << 8);
- L += R;
- R ^= ROL32( L, 3 );
- L += R;
- R ^= ROR32( L, 2 );
- L += R;
- // Clear the buffer
- M = 0;
- nBytesInM = 0;
- }
+ /* Append the byte to our word-sized buffer */
+ M |= b << (8*nBytesInM);
+ nBytesInM++;
+ /* Process the word if it is full. */
+ if (nBytesInM >= 4) {
+ L ^= M;
+ R ^= ROL32(L, 17);
+ L += R;
+ R ^= ((L & 0xff00ff00) >> 8) | ((L & 0x00ff00ff) << 8);
+ L += R;
+ R ^= ROL32(L, 3);
+ L += R;
+ R ^= ROR32(L, 2);
+ L += R;
+ /* Clear the buffer */
+ M = 0;
+ nBytesInM = 0;
+ }
}
-VOID MIC_vInit (DWORD dwK0, DWORD dwK1)
+void MIC_vInit(DWORD dwK0, DWORD dwK1)
{
- // Set the key
- s_vSetKey(dwK0, dwK1);
+ /* Set the key */
+ s_vSetKey(dwK0, dwK1);
}
-VOID MIC_vUnInit (void)
+void MIC_vUnInit(void)
{
- // Wipe the key material
- K0 = 0;
- K1 = 0;
+ /* Wipe the key material */
+ K0 = 0;
+ K1 = 0;
- // And the other fields as well.
- //Note that this sets (L,R) to (K0,K1) which is just fine.
- s_vClear();
+ /* And the other fields as well. */
+ /* Note that this sets (L,R) to (K0,K1) which is just fine. */
+ s_vClear();
}
-VOID MIC_vAppend (PBYTE src, UINT nBytes)
+void MIC_vAppend(PBYTE src, unsigned int nBytes)
{
- // This is simple
- while (nBytes > 0)
- {
- s_vAppendByte(*src++);
- nBytes--;
- }
+ /* This is simple */
+ while (nBytes > 0) {
+ s_vAppendByte(*src++);
+ nBytes--;
+ }
}
-VOID MIC_vGetMIC (PDWORD pdwL, PDWORD pdwR)
+void MIC_vGetMIC(PDWORD pdwL, PDWORD pdwR)
{
- // Append the minimum padding
- s_vAppendByte(0x5a);
- s_vAppendByte(0);
- s_vAppendByte(0);
- s_vAppendByte(0);
- s_vAppendByte(0);
- // and then zeroes until the length is a multiple of 4
- while( nBytesInM != 0 )
- {
- s_vAppendByte(0);
- }
- // The s_vAppendByte function has already computed the result.
- *pdwL = L;
- *pdwR = R;
- // Reset to the empty message.
- s_vClear();
+ /* Append the minimum padding */
+ s_vAppendByte(0x5a);
+ s_vAppendByte(0);
+ s_vAppendByte(0);
+ s_vAppendByte(0);
+ s_vAppendByte(0);
+ /* and then zeroes until the length is a multiple of 4 */
+ while (nBytesInM != 0)
+ s_vAppendByte(0);
+ /* The s_vAppendByte function has already computed the result. */
+ *pdwL = L;
+ *pdwR = R;
+ /* Reset to the empty message. */
+ s_vClear();
}
-
diff --git a/drivers/staging/vt6656/michael.h b/drivers/staging/vt6656/michael.h
index 3f79b52832d..3ab60928ef3 100644
--- a/drivers/staging/vt6656/michael.h
+++ b/drivers/staging/vt6656/michael.h
@@ -35,16 +35,16 @@
/*--------------------- Export Types ------------------------------*/
-VOID MIC_vInit(DWORD dwK0, DWORD dwK1);
+void MIC_vInit(DWORD dwK0, DWORD dwK1);
-VOID MIC_vUnInit(void);
+void MIC_vUnInit(void);
// Append bytes to the message to be MICed
-VOID MIC_vAppend(PBYTE src, UINT nBytes);
+void MIC_vAppend(PBYTE src, unsigned int nBytes);
// Get the MIC result. Destination should accept 8 bytes of result.
// This also resets the message to empty.
-VOID MIC_vGetMIC(PDWORD pdwL, PDWORD pdwR);
+void MIC_vGetMIC(PDWORD pdwL, PDWORD pdwR);
/*--------------------- Export Macros ------------------------------*/
@@ -53,6 +53,4 @@ VOID MIC_vGetMIC(PDWORD pdwL, PDWORD pdwR);
( ((A) << (n)) | ( ((A)>>(32-(n))) & ( (1UL << (n)) - 1 ) ) )
#define ROR32( A, n ) ROL32( (A), 32-(n) )
-#endif //__MICHAEL_H__
-
-
+#endif /* __MICHAEL_H__ */
diff --git a/drivers/staging/vt6656/power.c b/drivers/staging/vt6656/power.c
index b5702b098e1..766c5be6fd2 100644
--- a/drivers/staging/vt6656/power.c
+++ b/drivers/staging/vt6656/power.c
@@ -50,19 +50,14 @@
/*--------------------- Static Definitions -------------------------*/
-
-
-
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
static int msglevel =MSG_LEVEL_INFO;
/*--------------------- Static Functions --------------------------*/
-
/*--------------------- Export Variables --------------------------*/
-
/*--------------------- Export Functions --------------------------*/
/*+
@@ -75,12 +70,8 @@ static int msglevel =MSG_LEVEL_INFO;
*
-*/
-
-VOID
-PSvEnablePowerSaving(
- IN HANDLE hDeviceContext,
- IN WORD wListenInterval
- )
+void PSvEnablePowerSaving(void *hDeviceContext,
+ WORD wListenInterval)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
@@ -128,7 +119,7 @@ PSvEnablePowerSaving(
pDevice->bEnablePSMode = TRUE;
if (pDevice->eOPMode == OP_MODE_ADHOC) {
-// bMgrPrepareBeaconToSend((HANDLE)pDevice, pMgmt);
+ /* bMgrPrepareBeaconToSend((void *) pDevice, pMgmt); */
}
// We don't send null pkt in ad hoc mode since beacon will handle this.
else if (pDevice->eOPMode == OP_MODE_INFRASTRUCTURE) {
@@ -139,11 +130,6 @@ PSvEnablePowerSaving(
return;
}
-
-
-
-
-
/*+
*
* Routine Description:
@@ -154,10 +140,7 @@ PSvEnablePowerSaving(
*
-*/
-VOID
-PSvDisablePowerSaving(
- IN HANDLE hDeviceContext
- )
+void PSvDisablePowerSaving(void *hDeviceContext)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
// PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
@@ -187,7 +170,6 @@ PSvDisablePowerSaving(
return;
}
-
/*+
*
* Routine Description:
@@ -198,13 +180,9 @@ PSvDisablePowerSaving(
* FALSE, if fail
-*/
-
-BOOL
-PSbConsiderPowerDown(
- IN HANDLE hDeviceContext,
- IN BOOL bCheckRxDMA,
- IN BOOL bCheckCountToWakeUp
- )
+BOOL PSbConsiderPowerDown(void *hDeviceContext,
+ BOOL bCheckRxDMA,
+ BOOL bCheckCountToWakeUp)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
@@ -248,8 +226,6 @@ PSbConsiderPowerDown(
return TRUE;
}
-
-
/*+
*
* Routine Description:
@@ -260,12 +236,7 @@ PSbConsiderPowerDown(
*
-*/
-
-
-VOID
-PSvSendPSPOLL(
- IN HANDLE hDeviceContext
- )
+void PSvSendPSPOLL(void *hDeviceContext)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
@@ -297,8 +268,6 @@ PSvSendPSPOLL(
return;
}
-
-
/*+
*
* Routine Description:
@@ -308,10 +277,8 @@ PSvSendPSPOLL(
* None.
*
-*/
-BOOL
-PSbSendNullPacket(
- IN HANDLE hDeviceContext
- )
+
+BOOL PSbSendNullPacket(void *hDeviceContext)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSTxMgmtPacket pTxPacket = NULL;
@@ -388,10 +355,7 @@ PSbSendNullPacket(
*
-*/
-BOOL
-PSbIsNextTBTTWakeUp(
- IN HANDLE hDeviceContext
- )
+BOOL PSbIsNextTBTTWakeUp(void *hDeviceContext)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
diff --git a/drivers/staging/vt6656/power.h b/drivers/staging/vt6656/power.h
index c33c93a86f5..50792bb8c97 100644
--- a/drivers/staging/vt6656/power.h
+++ b/drivers/staging/vt6656/power.h
@@ -45,40 +45,17 @@
/*--------------------- Export Functions --------------------------*/
-// IN PSDevice pDevice
-// IN PSDevice hDeviceContext
+/* PSDevice pDevice */
+/* PSDevice hDeviceContext */
-BOOL
-PSbConsiderPowerDown(
- IN HANDLE hDeviceContext,
- IN BOOL bCheckRxDMA,
- IN BOOL bCheckCountToWakeUp
- );
+BOOL PSbConsiderPowerDown(void *hDeviceContext,
+ BOOL bCheckRxDMA,
+ BOOL bCheckCountToWakeUp);
-VOID
-PSvDisablePowerSaving(
- IN HANDLE hDeviceContext
- );
+void PSvDisablePowerSaving(void *hDeviceContext);
+void PSvEnablePowerSaving(void *hDeviceContext, WORD wListenInterval);
+void PSvSendPSPOLL(void *hDeviceContext);
+BOOL PSbSendNullPacket(void *hDeviceContext);
+BOOL PSbIsNextTBTTWakeUp(void *hDeviceContext);
-VOID
-PSvEnablePowerSaving(
- IN HANDLE hDeviceContext,
- IN WORD wListenInterval
- );
-
-VOID
-PSvSendPSPOLL(
- IN HANDLE hDeviceContext
- );
-
-BOOL
-PSbSendNullPacket(
- IN HANDLE hDeviceContext
- );
-
-BOOL
-PSbIsNextTBTTWakeUp(
- IN HANDLE hDeviceContext
- );
-
-#endif //__POWER_H__
+#endif /* __POWER_H__ */
diff --git a/drivers/staging/vt6656/rc4.c b/drivers/staging/vt6656/rc4.c
index e6c61312fd2..5c3c2d0552b 100644
--- a/drivers/staging/vt6656/rc4.c
+++ b/drivers/staging/vt6656/rc4.c
@@ -32,56 +32,56 @@
#include "rc4.h"
-VOID rc4_init(PRC4Ext pRC4, PBYTE pbyKey, UINT cbKey_len)
+void rc4_init(PRC4Ext pRC4, PBYTE pbyKey, unsigned int cbKey_len)
{
- UINT ust1, ust2;
- UINT keyindex;
- UINT stateindex;
- PBYTE pbyst;
- UINT idx;
+ unsigned int ust1, ust2;
+ unsigned int keyindex;
+ unsigned int stateindex;
+ PBYTE pbyst;
+ unsigned int idx;
- pbyst = pRC4->abystate;
- pRC4->ux = 0;
- pRC4->uy = 0;
- for (idx = 0; idx < 256; idx++)
- pbyst[idx] = (BYTE)idx;
- keyindex = 0;
- stateindex = 0;
- for (idx = 0; idx < 256; idx++) {
- ust1 = pbyst[idx];
- stateindex = (stateindex + pbyKey[keyindex] + ust1) & 0xff;
- ust2 = pbyst[stateindex];
- pbyst[stateindex] = (BYTE)ust1;
- pbyst[idx] = (BYTE)ust2;
- if (++keyindex >= cbKey_len)
- keyindex = 0;
- }
+ pbyst = pRC4->abystate;
+ pRC4->ux = 0;
+ pRC4->uy = 0;
+ for (idx = 0; idx < 256; idx++)
+ pbyst[idx] = (BYTE)idx;
+ keyindex = 0;
+ stateindex = 0;
+ for (idx = 0; idx < 256; idx++) {
+ ust1 = pbyst[idx];
+ stateindex = (stateindex + pbyKey[keyindex] + ust1) & 0xff;
+ ust2 = pbyst[stateindex];
+ pbyst[stateindex] = (BYTE)ust1;
+ pbyst[idx] = (BYTE)ust2;
+ if (++keyindex >= cbKey_len)
+ keyindex = 0;
+ }
}
-UINT rc4_byte(PRC4Ext pRC4)
+unsigned int rc4_byte(PRC4Ext pRC4)
{
- UINT ux;
- UINT uy;
- UINT ustx, usty;
- PBYTE pbyst;
+ unsigned int ux;
+ unsigned int uy;
+ unsigned int ustx, usty;
+ PBYTE pbyst;
- pbyst = pRC4->abystate;
- ux = (pRC4->ux + 1) & 0xff;
- ustx = pbyst[ux];
- uy = (ustx + pRC4->uy) & 0xff;
- usty = pbyst[uy];
- pRC4->ux = ux;
- pRC4->uy = uy;
- pbyst[uy] = (BYTE)ustx;
- pbyst[ux] = (BYTE)usty;
+ pbyst = pRC4->abystate;
+ ux = (pRC4->ux + 1) & 0xff;
+ ustx = pbyst[ux];
+ uy = (ustx + pRC4->uy) & 0xff;
+ usty = pbyst[uy];
+ pRC4->ux = ux;
+ pRC4->uy = uy;
+ pbyst[uy] = (BYTE)ustx;
+ pbyst[ux] = (BYTE)usty;
- return pbyst[(ustx + usty) & 0xff];
+ return pbyst[(ustx + usty) & 0xff];
}
-VOID rc4_encrypt(PRC4Ext pRC4, PBYTE pbyDest,
- PBYTE pbySrc, UINT cbData_len)
+void rc4_encrypt(PRC4Ext pRC4, PBYTE pbyDest,
+ PBYTE pbySrc, unsigned int cbData_len)
{
- UINT ii;
- for (ii = 0; ii < cbData_len; ii++)
- pbyDest[ii] = (BYTE)(pbySrc[ii] ^ rc4_byte(pRC4));
+ unsigned int ii;
+ for (ii = 0; ii < cbData_len; ii++)
+ pbyDest[ii] = (BYTE)(pbySrc[ii] ^ rc4_byte(pRC4));
}
diff --git a/drivers/staging/vt6656/rc4.h b/drivers/staging/vt6656/rc4.h
index bf607c9d446..d447879c8f9 100644
--- a/drivers/staging/vt6656/rc4.h
+++ b/drivers/staging/vt6656/rc4.h
@@ -35,13 +35,14 @@
/*--------------------- Export Definitions -------------------------*/
/*--------------------- Export Types ------------------------------*/
typedef struct {
- UINT ux;
- UINT uy;
+ unsigned int ux;
+ unsigned int uy;
BYTE abystate[256];
} RC4Ext, *PRC4Ext;
-VOID rc4_init(PRC4Ext pRC4, PBYTE pbyKey, UINT cbKey_len);
-UINT rc4_byte(PRC4Ext pRC4);
-void rc4_encrypt(PRC4Ext pRC4, PBYTE pbyDest, PBYTE pbySrc, UINT cbData_len);
+void rc4_init(PRC4Ext pRC4, PBYTE pbyKey, unsigned int cbKey_len);
+unsigned int rc4_byte(PRC4Ext pRC4);
+void rc4_encrypt(PRC4Ext pRC4, PBYTE pbyDest, PBYTE pbySrc,
+ unsigned int cbData_len);
-#endif //__RC4_H__
+#endif /* __RC4_H__ */
diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
index 405c4f71b5f..3fd0478a9a5 100644
--- a/drivers/staging/vt6656/rf.c
+++ b/drivers/staging/vt6656/rf.c
@@ -757,9 +757,9 @@ BOOL IFRFbWriteEmbeded (PSDevice pDevice, DWORD dwData)
*
*/
BOOL RFbSetPower (
- IN PSDevice pDevice,
- IN UINT uRATE,
- IN UINT uCH
+ PSDevice pDevice,
+ unsigned int uRATE,
+ unsigned int uCH
)
{
BOOL bResult = TRUE;
@@ -811,9 +811,9 @@ BYTE byPwr = pDevice->byCCKPwr;
*
*/
BOOL RFbRawSetPower (
- IN PSDevice pDevice,
- IN BYTE byPwr,
- IN UINT uRATE
+ PSDevice pDevice,
+ BYTE byPwr,
+ unsigned int uRATE
)
{
BOOL bResult = TRUE;
@@ -954,16 +954,16 @@ BOOL bResult = TRUE;
* Return Value: none
*
-*/
-VOID
+void
RFvRSSITodBm (
- IN PSDevice pDevice,
- IN BYTE byCurrRSSI,
+ PSDevice pDevice,
+ BYTE byCurrRSSI,
long * pldBm
)
{
BYTE byIdx = (((byCurrRSSI & 0xC0) >> 6) & 0x03);
- LONG b = (byCurrRSSI & 0x3F);
- LONG a = 0;
+ signed long b = (byCurrRSSI & 0x3F);
+ signed long a = 0;
BYTE abyAIROHARF[4] = {0, 18, 0, 40};
switch (pDevice->byRFType) {
@@ -984,9 +984,9 @@ RFvRSSITodBm (
-VOID
+void
RFbRFTableDownload (
- IN PSDevice pDevice
+ PSDevice pDevice
)
{
WORD wLength1 = 0,wLength2 = 0 ,wLength3 = 0;
@@ -1133,9 +1133,9 @@ BYTE abyArray[256];
// RobertYu:20060412, TWIF1.11 adjust LO Current for 11b mode
BOOL s_bVT3226D0_11bLoCurrentAdjust(
- IN PSDevice pDevice,
- IN BYTE byChannel,
- IN BOOL b11bMode )
+ PSDevice pDevice,
+ BYTE byChannel,
+ BOOL b11bMode)
{
BOOL bResult;
diff --git a/drivers/staging/vt6656/rf.h b/drivers/staging/vt6656/rf.h
index 55d882f78f2..d4f8b94132b 100644
--- a/drivers/staging/vt6656/rf.h
+++ b/drivers/staging/vt6656/rf.h
@@ -65,36 +65,33 @@ extern const BYTE RFaby11aChannelIndex[200];
BOOL IFRFbWriteEmbeded(PSDevice pDevice, DWORD dwData);
BOOL RFbSetPower (
- IN PSDevice pDevice,
- IN UINT uRATE,
- IN UINT uCH
+ PSDevice pDevice,
+ unsigned int uRATE,
+ unsigned int uCH
);
BOOL RFbRawSetPower(
- IN PSDevice pDevice,
- IN BYTE byPwr,
- IN UINT uRATE
+ PSDevice pDevice,
+ BYTE byPwr,
+ unsigned int uRATE
);
-VOID
+void
RFvRSSITodBm (
- IN PSDevice pDevice,
- IN BYTE byCurrRSSI,
+ PSDevice pDevice,
+ BYTE byCurrRSSI,
long * pldBm
);
-VOID
+void
RFbRFTableDownload (
- IN PSDevice pDevice
+ PSDevice pDevice
);
BOOL s_bVT3226D0_11bLoCurrentAdjust(
- IN PSDevice pDevice,
- IN BYTE byChannel,
- IN BOOL b11bMode
+ PSDevice pDevice,
+ BYTE byChannel,
+ BOOL b11bMode
);
-#endif // __RF_H__
-
-
-
+#endif /* __RF_H__ */
diff --git a/drivers/staging/vt6656/rndis.h b/drivers/staging/vt6656/rndis.h
index 1d32d81079b..ac842dd13a6 100644
--- a/drivers/staging/vt6656/rndis.h
+++ b/drivers/staging/vt6656/rndis.h
@@ -158,5 +158,4 @@ typedef struct _CMD_CHANGE_BBTYPE
/*--------------------- Export Functions --------------------------*/
-
-#endif // _RNDIS_H_
+#endif /* _RNDIS_H_ */
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index d9fa36c9523..3e7e56649a5 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -25,17 +25,17 @@
* Date: May 20, 2003
*
* Functions:
- * s_vGenerateTxParameter - Generate tx dma requried parameter.
+ * s_vGenerateTxParameter - Generate tx dma required parameter.
* s_vGenerateMACHeader - Translate 802.3 to 802.11 header
* csBeacon_xmit - beacon tx function
* csMgmt_xmit - management tx function
* s_uGetDataDuration - get tx data required duration
* s_uFillDataHead- fulfill tx data duration header
- * s_uGetRTSCTSDuration- get rtx/cts requried duration
+ * s_uGetRTSCTSDuration- get rtx/cts required duration
* s_uGetRTSCTSRsvTime- get rts/cts reserved time
* s_uGetTxRsvTime- get frame reserved time
* s_vFillCTSHead- fulfill CTS ctl header
- * s_vFillFragParameter- Set fragement ctl parameter.
+ * s_vFillFragParameter- Set fragment ctl parameter.
* s_vFillRTSHead- fulfill RTS ctl header
* s_vFillTxKey- fulfill tx encrypt key
* s_vSWencryption- Software encrypt header
@@ -113,181 +113,173 @@ const WORD wFB_Opt1[2][5] = {
/*--------------------- Static Functions --------------------------*/
static
-VOID
+void
s_vSaveTxPktInfo(
- IN PSDevice pDevice,
- IN BYTE byPktNum,
- IN PBYTE pbyDestAddr,
- IN WORD wPktLength,
- IN WORD wFIFOCtl
+ PSDevice pDevice,
+ BYTE byPktNum,
+ PBYTE pbyDestAddr,
+ WORD wPktLength,
+ WORD wFIFOCtl
);
static
-PVOID
+void *
s_vGetFreeContext(
PSDevice pDevice
);
static
-VOID
+void
s_vGenerateTxParameter(
- IN PSDevice pDevice,
- IN BYTE byPktType,
- IN WORD wCurrentRate,
- IN PVOID pTxBufHead,
- IN PVOID pvRrvTime,
- IN PVOID pvRTS,
- IN PVOID pvCTS,
- IN UINT cbFrameSize,
- IN BOOL bNeedACK,
- IN UINT uDMAIdx,
- IN PSEthernetHeader psEthHeader
+ PSDevice pDevice,
+ BYTE byPktType,
+ WORD wCurrentRate,
+ void *pTxBufHead,
+ void *pvRrvTime,
+ void *pvRTS,
+ void *pvCTS,
+ unsigned int cbFrameSize,
+ BOOL bNeedACK,
+ unsigned int uDMAIdx,
+ PSEthernetHeader psEthHeader
);
-static
-UINT
-s_uFillDataHead (
- IN PSDevice pDevice,
- IN BYTE byPktType,
- IN WORD wCurrentRate,
- IN PVOID pTxDataHead,
- IN UINT cbFrameLength,
- IN UINT uDMAIdx,
- IN BOOL bNeedAck,
- IN UINT uFragIdx,
- IN UINT cbLastFragmentSize,
- IN UINT uMACfragNum,
- IN BYTE byFBOption
+static unsigned int s_uFillDataHead(
+ PSDevice pDevice,
+ BYTE byPktType,
+ WORD wCurrentRate,
+ void *pTxDataHead,
+ unsigned int cbFrameLength,
+ unsigned int uDMAIdx,
+ BOOL bNeedAck,
+ unsigned int uFragIdx,
+ unsigned int cbLastFragmentSize,
+ unsigned int uMACfragNum,
+ BYTE byFBOption
);
static
-VOID
+void
s_vGenerateMACHeader (
- IN PSDevice pDevice,
- IN PBYTE pbyBufferAddr,
- IN WORD wDuration,
- IN PSEthernetHeader psEthHeader,
- IN BOOL bNeedEncrypt,
- IN WORD wFragType,
- IN UINT uDMAIdx,
- IN UINT uFragIdx
+ PSDevice pDevice,
+ PBYTE pbyBufferAddr,
+ WORD wDuration,
+ PSEthernetHeader psEthHeader,
+ BOOL bNeedEncrypt,
+ WORD wFragType,
+ unsigned int uDMAIdx,
+ unsigned int uFragIdx
);
static
-VOID
+void
s_vFillTxKey(
- IN PSDevice pDevice,
- IN PBYTE pbyBuf,
- IN PBYTE pbyIVHead,
- IN PSKeyItem pTransmitKey,
- IN PBYTE pbyHdrBuf,
- IN WORD wPayloadLen,
- OUT PBYTE pMICHDR
+ PSDevice pDevice,
+ PBYTE pbyBuf,
+ PBYTE pbyIVHead,
+ PSKeyItem pTransmitKey,
+ PBYTE pbyHdrBuf,
+ WORD wPayloadLen,
+ PBYTE pMICHDR
);
static
-VOID
+void
s_vSWencryption (
- IN PSDevice pDevice,
- IN PSKeyItem pTransmitKey,
- IN PBYTE pbyPayloadHead,
- IN WORD wPayloadSize
+ PSDevice pDevice,
+ PSKeyItem pTransmitKey,
+ PBYTE pbyPayloadHead,
+ WORD wPayloadSize
);
-static
-UINT
-s_uGetTxRsvTime (
- IN PSDevice pDevice,
- IN BYTE byPktType,
- IN UINT cbFrameLength,
- IN WORD wRate,
- IN BOOL bNeedAck
+static unsigned int s_uGetTxRsvTime(
+ PSDevice pDevice,
+ BYTE byPktType,
+ unsigned int cbFrameLength,
+ WORD wRate,
+ BOOL bNeedAck
);
-static
-UINT
-s_uGetRTSCTSRsvTime (
- IN PSDevice pDevice,
- IN BYTE byRTSRsvType,
- IN BYTE byPktType,
- IN UINT cbFrameLength,
- IN WORD wCurrentRate
+static unsigned int s_uGetRTSCTSRsvTime(
+ PSDevice pDevice,
+ BYTE byRTSRsvType,
+ BYTE byPktType,
+ unsigned int cbFrameLength,
+ WORD wCurrentRate
);
static
-VOID
+void
s_vFillCTSHead (
- IN PSDevice pDevice,
- IN UINT uDMAIdx,
- IN BYTE byPktType,
- IN PVOID pvCTS,
- IN UINT cbFrameLength,
- IN BOOL bNeedAck,
- IN BOOL bDisCRC,
- IN WORD wCurrentRate,
- IN BYTE byFBOption
+ PSDevice pDevice,
+ unsigned int uDMAIdx,
+ BYTE byPktType,
+ void *pvCTS,
+ unsigned int cbFrameLength,
+ BOOL bNeedAck,
+ BOOL bDisCRC,
+ WORD wCurrentRate,
+ BYTE byFBOption
);
static
-VOID
+void
s_vFillRTSHead(
- IN PSDevice pDevice,
- IN BYTE byPktType,
- IN PVOID pvRTS,
- IN UINT cbFrameLength,
- IN BOOL bNeedAck,
- IN BOOL bDisCRC,
- IN PSEthernetHeader psEthHeader,
- IN WORD wCurrentRate,
- IN BYTE byFBOption
+ PSDevice pDevice,
+ BYTE byPktType,
+ void *pvRTS,
+ unsigned int cbFrameLength,
+ BOOL bNeedAck,
+ BOOL bDisCRC,
+ PSEthernetHeader psEthHeader,
+ WORD wCurrentRate,
+ BYTE byFBOption
);
-static
-UINT
-s_uGetDataDuration (
- IN PSDevice pDevice,
- IN BYTE byDurType,
- IN UINT cbFrameLength,
- IN BYTE byPktType,
- IN WORD wRate,
- IN BOOL bNeedAck,
- IN UINT uFragIdx,
- IN UINT cbLastFragmentSize,
- IN UINT uMACfragNum,
- IN BYTE byFBOption
+static unsigned int s_uGetDataDuration(
+ PSDevice pDevice,
+ BYTE byDurType,
+ unsigned int cbFrameLength,
+ BYTE byPktType,
+ WORD wRate,
+ BOOL bNeedAck,
+ unsigned int uFragIdx,
+ unsigned int cbLastFragmentSize,
+ unsigned int uMACfragNum,
+ BYTE byFBOption
);
static
-UINT
+unsigned int
s_uGetRTSCTSDuration (
- IN PSDevice pDevice,
- IN BYTE byDurType,
- IN UINT cbFrameLength,
- IN BYTE byPktType,
- IN WORD wRate,
- IN BOOL bNeedAck,
- IN BYTE byFBOption
+ PSDevice pDevice,
+ BYTE byDurType,
+ unsigned int cbFrameLength,
+ BYTE byPktType,
+ WORD wRate,
+ BOOL bNeedAck,
+ BYTE byFBOption
);
/*--------------------- Export Variables --------------------------*/
static
-PVOID
+void *
s_vGetFreeContext(
PSDevice pDevice
)
{
PUSB_SEND_CONTEXT pContext = NULL;
PUSB_SEND_CONTEXT pReturnContext = NULL;
- UINT ii;
+ unsigned int ii;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GetFreeContext()\n");
@@ -302,12 +294,12 @@ s_vGetFreeContext(
if ( ii == pDevice->cbTD ) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"No Free Tx Context\n");
}
- return ((PVOID) pReturnContext);
+ return (void *) pReturnContext;
}
static
-VOID
+void
s_vSaveTxPktInfo(PSDevice pDevice, BYTE byPktNum, PBYTE pbyDestAddr, WORD wPktLength, WORD wFIFOCtl)
{
PSStatCounter pStatistic=&(pDevice->scStatistic);
@@ -322,22 +314,24 @@ s_vSaveTxPktInfo(PSDevice pDevice, BYTE byPktNum, PBYTE pbyDestAddr, WORD wPktLe
pStatistic->abyTxPktInfo[byPktNum].wLength = wPktLength;
pStatistic->abyTxPktInfo[byPktNum].wFIFOCtl = wFIFOCtl;
- memcpy(pStatistic->abyTxPktInfo[byPktNum].abyDestAddr, pbyDestAddr, U_ETHER_ADDR_LEN);
+ memcpy(pStatistic->abyTxPktInfo[byPktNum].abyDestAddr,
+ pbyDestAddr,
+ ETH_ALEN);
}
static
-VOID
+void
s_vFillTxKey (
- IN PSDevice pDevice,
- IN PBYTE pbyBuf,
- IN PBYTE pbyIVHead,
- IN PSKeyItem pTransmitKey,
- IN PBYTE pbyHdrBuf,
- IN WORD wPayloadLen,
- OUT PBYTE pMICHDR
+ PSDevice pDevice,
+ PBYTE pbyBuf,
+ PBYTE pbyIVHead,
+ PSKeyItem pTransmitKey,
+ PBYTE pbyHdrBuf,
+ WORD wPayloadLen,
+ PBYTE pMICHDR
)
{
PDWORD pdwIV = (PDWORD) pbyIVHead;
@@ -446,15 +440,15 @@ s_vFillTxKey (
static
-VOID
+void
s_vSWencryption (
- IN PSDevice pDevice,
- IN PSKeyItem pTransmitKey,
- IN PBYTE pbyPayloadHead,
- IN WORD wPayloadSize
+ PSDevice pDevice,
+ PSKeyItem pTransmitKey,
+ PBYTE pbyPayloadHead,
+ WORD wPayloadSize
)
{
- UINT cbICVlen = 4;
+ unsigned int cbICVlen = 4;
DWORD dwICV = 0xFFFFFFFFL;
PDWORD pdwICV;
@@ -495,16 +489,16 @@ s_vSWencryption (
PK_TYPE_11GA 3
*/
static
-UINT
+unsigned int
s_uGetTxRsvTime (
- IN PSDevice pDevice,
- IN BYTE byPktType,
- IN UINT cbFrameLength,
- IN WORD wRate,
- IN BOOL bNeedAck
+ PSDevice pDevice,
+ BYTE byPktType,
+ unsigned int cbFrameLength,
+ WORD wRate,
+ BOOL bNeedAck
)
{
- UINT uDataTime, uAckTime;
+ unsigned int uDataTime, uAckTime;
uDataTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, cbFrameLength, wRate);
if (byPktType == PK_TYPE_11B) {//llb,CCK mode
@@ -523,16 +517,16 @@ s_uGetTxRsvTime (
//byFreqType: 0=>5GHZ 1=>2.4GHZ
static
-UINT
+unsigned int
s_uGetRTSCTSRsvTime (
- IN PSDevice pDevice,
- IN BYTE byRTSRsvType,
- IN BYTE byPktType,
- IN UINT cbFrameLength,
- IN WORD wCurrentRate
+ PSDevice pDevice,
+ BYTE byRTSRsvType,
+ BYTE byPktType,
+ unsigned int cbFrameLength,
+ WORD wCurrentRate
)
{
- UINT uRrvTime , uRTSTime, uCTSTime, uAckTime, uDataTime;
+ unsigned int uRrvTime , uRTSTime, uCTSTime, uAckTime, uDataTime;
uRrvTime = uRTSTime = uCTSTime = uAckTime = uDataTime = 0;
@@ -565,23 +559,22 @@ s_uGetRTSCTSRsvTime (
//byFreqType 0: 5GHz, 1:2.4Ghz
static
-UINT
+unsigned int
s_uGetDataDuration (
- IN PSDevice pDevice,
- IN BYTE byDurType,
- IN UINT cbFrameLength,
- IN BYTE byPktType,
- IN WORD wRate,
- IN BOOL bNeedAck,
- IN UINT uFragIdx,
- IN UINT cbLastFragmentSize,
- IN UINT uMACfragNum,
- IN BYTE byFBOption
+ PSDevice pDevice,
+ BYTE byDurType,
+ unsigned int cbFrameLength,
+ BYTE byPktType,
+ WORD wRate,
+ BOOL bNeedAck,
+ unsigned int uFragIdx,
+ unsigned int cbLastFragmentSize,
+ unsigned int uMACfragNum,
+ BYTE byFBOption
)
{
BOOL bLastFrag = 0;
- UINT uAckTime =0, uNextPktTime = 0;
-
+ unsigned int uAckTime = 0, uNextPktTime = 0;
if (uFragIdx == (uMACfragNum-1)) {
bLastFrag = 1;
@@ -735,18 +728,18 @@ s_uGetDataDuration (
//byFreqType: 0=>5GHZ 1=>2.4GHZ
static
-UINT
+unsigned int
s_uGetRTSCTSDuration (
- IN PSDevice pDevice,
- IN BYTE byDurType,
- IN UINT cbFrameLength,
- IN BYTE byPktType,
- IN WORD wRate,
- IN BOOL bNeedAck,
- IN BYTE byFBOption
+ PSDevice pDevice,
+ BYTE byDurType,
+ unsigned int cbFrameLength,
+ BYTE byPktType,
+ WORD wRate,
+ BOOL bNeedAck,
+ BYTE byFBOption
)
{
- UINT uCTSTime = 0, uDurTime = 0;
+ unsigned int uCTSTime = 0, uDurTime = 0;
switch (byDurType) {
@@ -834,19 +827,19 @@ s_uGetRTSCTSDuration (
static
-UINT
+unsigned int
s_uFillDataHead (
- IN PSDevice pDevice,
- IN BYTE byPktType,
- IN WORD wCurrentRate,
- IN PVOID pTxDataHead,
- IN UINT cbFrameLength,
- IN UINT uDMAIdx,
- IN BOOL bNeedAck,
- IN UINT uFragIdx,
- IN UINT cbLastFragmentSize,
- IN UINT uMACfragNum,
- IN BYTE byFBOption
+ PSDevice pDevice,
+ BYTE byPktType,
+ WORD wCurrentRate,
+ void *pTxDataHead,
+ unsigned int cbFrameLength,
+ unsigned int uDMAIdx,
+ BOOL bNeedAck,
+ unsigned int uFragIdx,
+ unsigned int cbLastFragmentSize,
+ unsigned int uMACfragNum,
+ BYTE byFBOption
)
{
@@ -979,20 +972,20 @@ s_uFillDataHead (
static
-VOID
+void
s_vFillRTSHead (
- IN PSDevice pDevice,
- IN BYTE byPktType,
- IN PVOID pvRTS,
- IN UINT cbFrameLength,
- IN BOOL bNeedAck,
- IN BOOL bDisCRC,
- IN PSEthernetHeader psEthHeader,
- IN WORD wCurrentRate,
- IN BYTE byFBOption
+ PSDevice pDevice,
+ BYTE byPktType,
+ void *pvRTS,
+ unsigned int cbFrameLength,
+ BOOL bNeedAck,
+ BOOL bDisCRC,
+ PSEthernetHeader psEthHeader,
+ WORD wCurrentRate,
+ BYTE byFBOption
)
{
- UINT uRTSFrameLen = 20;
+ unsigned int uRTSFrameLen = 20;
WORD wLen = 0x0000;
if (pvRTS == NULL)
@@ -1026,18 +1019,27 @@ s_vFillRTSHead (
pBuf->Data.wDurationID = pBuf->wDuration_aa;
//Get RTS Frame body
pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
- if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
- (pDevice->eOPMode == OP_MODE_AP)) {
- memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), U_ETHER_ADDR_LEN);
- }
+
+ if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
+ (pDevice->eOPMode == OP_MODE_AP)) {
+ memcpy(&(pBuf->Data.abyRA[0]),
+ &(psEthHeader->abyDstAddr[0]),
+ ETH_ALEN);
+ }
else {
- memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), U_ETHER_ADDR_LEN);
- }
- if (pDevice->eOPMode == OP_MODE_AP) {
- memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), U_ETHER_ADDR_LEN);
- }
+ memcpy(&(pBuf->Data.abyRA[0]),
+ &(pDevice->abyBSSID[0]),
+ ETH_ALEN);
+ }
+ if (pDevice->eOPMode == OP_MODE_AP) {
+ memcpy(&(pBuf->Data.abyTA[0]),
+ &(pDevice->abyBSSID[0]),
+ ETH_ALEN);
+ }
else {
- memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyTA[0]),
+ &(psEthHeader->abySrcAddr[0]),
+ ETH_ALEN);
}
}
else {
@@ -1063,19 +1065,27 @@ s_vFillRTSHead (
//Get RTS Frame body
pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
- if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
- (pDevice->eOPMode == OP_MODE_AP)) {
- memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), U_ETHER_ADDR_LEN);
- }
+ if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
+ (pDevice->eOPMode == OP_MODE_AP)) {
+ memcpy(&(pBuf->Data.abyRA[0]),
+ &(psEthHeader->abyDstAddr[0]),
+ ETH_ALEN);
+ }
else {
- memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyRA[0]),
+ &(pDevice->abyBSSID[0]),
+ ETH_ALEN);
}
- if (pDevice->eOPMode == OP_MODE_AP) {
- memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), U_ETHER_ADDR_LEN);
- }
+ if (pDevice->eOPMode == OP_MODE_AP) {
+ memcpy(&(pBuf->Data.abyTA[0]),
+ &(pDevice->abyBSSID[0]),
+ ETH_ALEN);
+ }
else {
- memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyTA[0]),
+ &(psEthHeader->abySrcAddr[0]),
+ ETH_ALEN);
}
} // if (byFBOption == AUTO_FB_NONE)
@@ -1094,20 +1104,26 @@ s_vFillRTSHead (
//Get RTS Frame body
pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
- if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
- (pDevice->eOPMode == OP_MODE_AP)) {
- memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), U_ETHER_ADDR_LEN);
- }
- else {
- memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), U_ETHER_ADDR_LEN);
- }
-
- if (pDevice->eOPMode == OP_MODE_AP) {
- memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), U_ETHER_ADDR_LEN);
- }
- else {
- memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), U_ETHER_ADDR_LEN);
- }
+ if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
+ (pDevice->eOPMode == OP_MODE_AP)) {
+ memcpy(&(pBuf->Data.abyRA[0]),
+ &(psEthHeader->abyDstAddr[0]),
+ ETH_ALEN);
+ } else {
+ memcpy(&(pBuf->Data.abyRA[0]),
+ &(pDevice->abyBSSID[0]),
+ ETH_ALEN);
+ }
+
+ if (pDevice->eOPMode == OP_MODE_AP) {
+ memcpy(&(pBuf->Data.abyTA[0]),
+ &(pDevice->abyBSSID[0]),
+ ETH_ALEN);
+ } else {
+ memcpy(&(pBuf->Data.abyTA[0]),
+ &(psEthHeader->abySrcAddr[0]),
+ ETH_ALEN);
+ }
}
else {
@@ -1125,19 +1141,25 @@ s_vFillRTSHead (
//Get RTS Frame body
pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
- if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
- (pDevice->eOPMode == OP_MODE_AP)) {
- memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), U_ETHER_ADDR_LEN);
- }
- else {
- memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), U_ETHER_ADDR_LEN);
- }
- if (pDevice->eOPMode == OP_MODE_AP) {
- memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), U_ETHER_ADDR_LEN);
- }
- else {
- memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), U_ETHER_ADDR_LEN);
- }
+ if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
+ (pDevice->eOPMode == OP_MODE_AP)) {
+ memcpy(&(pBuf->Data.abyRA[0]),
+ &(psEthHeader->abyDstAddr[0]),
+ ETH_ALEN);
+ } else {
+ memcpy(&(pBuf->Data.abyRA[0]),
+ &(pDevice->abyBSSID[0]),
+ ETH_ALEN);
+ }
+ if (pDevice->eOPMode == OP_MODE_AP) {
+ memcpy(&(pBuf->Data.abyTA[0]),
+ &(pDevice->abyBSSID[0]),
+ ETH_ALEN);
+ } else {
+ memcpy(&(pBuf->Data.abyTA[0]),
+ &(psEthHeader->abySrcAddr[0]),
+ ETH_ALEN);
+ }
}
}
else if (byPktType == PK_TYPE_11B) {
@@ -1153,39 +1175,45 @@ s_vFillRTSHead (
//Get RTS Frame body
pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
-
- if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
+ if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
(pDevice->eOPMode == OP_MODE_AP)) {
- memcpy(&(pBuf->Data.abyRA[0]), &(psEthHeader->abyDstAddr[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyRA[0]),
+ &(psEthHeader->abyDstAddr[0]),
+ ETH_ALEN);
}
else {
- memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyBSSID[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyRA[0]),
+ &(pDevice->abyBSSID[0]),
+ ETH_ALEN);
}
if (pDevice->eOPMode == OP_MODE_AP) {
- memcpy(&(pBuf->Data.abyTA[0]), &(pDevice->abyBSSID[0]), U_ETHER_ADDR_LEN);
- }
- else {
- memcpy(&(pBuf->Data.abyTA[0]), &(psEthHeader->abySrcAddr[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyTA[0]),
+ &(pDevice->abyBSSID[0]),
+ ETH_ALEN);
+ } else {
+ memcpy(&(pBuf->Data.abyTA[0]),
+ &(psEthHeader->abySrcAddr[0]),
+ ETH_ALEN);
}
}
}
static
-VOID
+void
s_vFillCTSHead (
- IN PSDevice pDevice,
- IN UINT uDMAIdx,
- IN BYTE byPktType,
- IN PVOID pvCTS,
- IN UINT cbFrameLength,
- IN BOOL bNeedAck,
- IN BOOL bDisCRC,
- IN WORD wCurrentRate,
- IN BYTE byFBOption
+ PSDevice pDevice,
+ unsigned int uDMAIdx,
+ BYTE byPktType,
+ void *pvCTS,
+ unsigned int cbFrameLength,
+ BOOL bNeedAck,
+ BOOL bDisCRC,
+ WORD wCurrentRate,
+ BYTE byFBOption
)
{
- UINT uCTSFrameLen = 14;
+ unsigned int uCTSFrameLen = 14;
WORD wLen = 0x0000;
if (pvCTS == NULL) {
@@ -1222,7 +1250,9 @@ s_vFillCTSHead (
pBuf->Data.wDurationID = pBuf->wDuration_ba;
pBuf->Data.wFrameControl = TYPE_CTL_CTS;//0x00C4
pBuf->Data.wReserved = 0x0000;
- memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyCurrentNetAddr[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyRA[0]),
+ &(pDevice->abyCurrentNetAddr[0]),
+ ETH_ALEN);
} else { //if (byFBOption != AUTO_FB_NONE && uDMAIdx != TYPE_ATIMDMA && uDMAIdx != TYPE_BEACONDMA)
PSCTS pBuf = (PSCTS)pvCTS;
//Get SignalField,ServiceField,Length
@@ -1239,16 +1269,13 @@ s_vFillCTSHead (
pBuf->Data.wDurationID = pBuf->wDuration_ba;
pBuf->Data.wFrameControl = TYPE_CTL_CTS;//0x00C4
pBuf->Data.wReserved = 0x0000;
- memcpy(&(pBuf->Data.abyRA[0]), &(pDevice->abyCurrentNetAddr[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pBuf->Data.abyRA[0]),
+ &(pDevice->abyCurrentNetAddr[0]),
+ ETH_ALEN);
}
}
}
-
-
-
-
-
/*+
*
* Description:
@@ -1271,24 +1298,24 @@ s_vFillCTSHead (
* Return Value: none
*
-*/
-// UINT cbFrameSize,//Hdr+Payload+FCS
+
static
-VOID
+void
s_vGenerateTxParameter (
- IN PSDevice pDevice,
- IN BYTE byPktType,
- IN WORD wCurrentRate,
- IN PVOID pTxBufHead,
- IN PVOID pvRrvTime,
- IN PVOID pvRTS,
- IN PVOID pvCTS,
- IN UINT cbFrameSize,
- IN BOOL bNeedACK,
- IN UINT uDMAIdx,
- IN PSEthernetHeader psEthHeader
+ PSDevice pDevice,
+ BYTE byPktType,
+ WORD wCurrentRate,
+ void *pTxBufHead,
+ void *pvRrvTime,
+ void *pvRTS,
+ void *pvCTS,
+ unsigned int cbFrameSize,
+ BOOL bNeedACK,
+ unsigned int uDMAIdx,
+ PSEthernetHeader psEthHeader
)
{
- UINT cbMACHdLen = WLAN_HDR_ADDR3_LEN; //24
+ unsigned int cbMACHdLen = WLAN_HDR_ADDR3_LEN; /* 24 */
WORD wFifoCtl;
BOOL bDisCRC = FALSE;
BYTE byFBOption = AUTO_FB_NONE;
@@ -1386,44 +1413,45 @@ s_vGenerateTxParameter (
/*
PBYTE pbyBuffer,//point to pTxBufHead
WORD wFragType,//00:Non-Frag, 01:Start, 02:Mid, 03:Last
- UINT cbFragmentSize,//Hdr+payoad+FCS
+ unsigned int cbFragmentSize,//Hdr+payoad+FCS
*/
BOOL
s_bPacketToWirelessUsb(
- IN PSDevice pDevice,
- IN BYTE byPktType,
- IN PBYTE usbPacketBuf,
- IN BOOL bNeedEncryption,
- IN UINT uSkbPacketLen,
- IN UINT uDMAIdx,
- IN PSEthernetHeader psEthHeader,
- IN PBYTE pPacket,
- IN PSKeyItem pTransmitKey,
- IN UINT uNodeIndex,
- IN WORD wCurrentRate,
- OUT UINT *pcbHeaderLen,
- OUT UINT *pcbTotalLen
+ PSDevice pDevice,
+ BYTE byPktType,
+ PBYTE usbPacketBuf,
+ BOOL bNeedEncryption,
+ unsigned int uSkbPacketLen,
+ unsigned int uDMAIdx,
+ PSEthernetHeader psEthHeader,
+ PBYTE pPacket,
+ PSKeyItem pTransmitKey,
+ unsigned int uNodeIndex,
+ WORD wCurrentRate,
+ unsigned int *pcbHeaderLen,
+ unsigned int *pcbTotalLen
)
{
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- UINT cbFrameSize,cbFrameBodySize;
+ unsigned int cbFrameSize, cbFrameBodySize;
PTX_BUFFER pTxBufHead;
- UINT cb802_1_H_len;
- UINT cbIVlen=0,cbICVlen=0,cbMIClen=0,cbMACHdLen=0,cbFCSlen=4;
- UINT cbMICHDR = 0;
+ unsigned int cb802_1_H_len;
+ unsigned int cbIVlen = 0, cbICVlen = 0, cbMIClen = 0,
+ cbMACHdLen = 0, cbFCSlen = 4;
+ unsigned int cbMICHDR = 0;
BOOL bNeedACK,bRTS;
PBYTE pbyType,pbyMacHdr,pbyIVHead,pbyPayloadHead,pbyTxBufferAddr;
- BYTE abySNAP_RFC1042[6] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00};
- BYTE abySNAP_Bridgetunnel[6] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0xF8};
- UINT uDuration;
- UINT cbHeaderLength= 0,uPadding = 0;
- PVOID pvRrvTime;
+ BYTE abySNAP_RFC1042[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00};
+ BYTE abySNAP_Bridgetunnel[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0xF8};
+ unsigned int uDuration;
+ unsigned int cbHeaderLength = 0, uPadding = 0;
+ void *pvRrvTime;
PSMICHDRHead pMICHDR;
- PVOID pvRTS;
- PVOID pvCTS;
- PVOID pvTxDataHd;
+ void *pvRTS;
+ void *pvCTS;
+ void *pvTxDataHd;
BYTE byFBOption = AUTO_FB_NONE,byFragType;
WORD wTxBufSize;
DWORD dwMICKey0,dwMICKey1,dwMIC_Priority,dwCRC;
@@ -1455,7 +1483,7 @@ s_bPacketToWirelessUsb(
cb802_1_H_len = 0;
}
- cbFrameBodySize = uSkbPacketLen - U_HEADER_LEN + cb802_1_H_len;
+ cbFrameBodySize = uSkbPacketLen - ETH_HLEN + cb802_1_H_len;
//Set packet type
pTxBufHead->wFIFOCtl |= (WORD)(byPktType<<8);
@@ -1658,7 +1686,8 @@ s_bPacketToWirelessUsb(
//Fill FIFO,RrvTime,RTS,and CTS
- s_vGenerateTxParameter(pDevice, byPktType, wCurrentRate, (PVOID)pbyTxBufferAddr, pvRrvTime, pvRTS, pvCTS,
+ s_vGenerateTxParameter(pDevice, byPktType, wCurrentRate,
+ (void *)pbyTxBufferAddr, pvRrvTime, pvRTS, pvCTS,
cbFrameSize, bNeedACK, uDMAIdx, psEthHeader);
//Fill DataHead
uDuration = s_uFillDataHead(pDevice, byPktType, wCurrentRate, pvTxDataHd, cbFrameSize, uDMAIdx, bNeedACK,
@@ -1700,13 +1729,13 @@ s_bPacketToWirelessUsb(
if (pPacket != NULL) {
// Copy the Packet into a tx Buffer
memcpy((pbyPayloadHead + cb802_1_H_len),
- (pPacket + U_HEADER_LEN),
- uSkbPacketLen - U_HEADER_LEN
+ (pPacket + ETH_HLEN),
+ uSkbPacketLen - ETH_HLEN
);
} else {
// while bRelayPacketSend psEthHeader is point to header+payload
- memcpy((pbyPayloadHead + cb802_1_H_len), ((PBYTE)psEthHeader)+U_HEADER_LEN, uSkbPacketLen - U_HEADER_LEN);
+ memcpy((pbyPayloadHead + cb802_1_H_len), ((PBYTE)psEthHeader) + ETH_HLEN, uSkbPacketLen - ETH_HLEN);
}
ASSERT(uLength == cbNdisBodySize);
@@ -1772,7 +1801,7 @@ s_bPacketToWirelessUsb(
}
if (pDevice->bSoftwareGenCrcErr == TRUE) {
- UINT cbLen;
+ unsigned int cbLen;
PDWORD pdwCRC;
dwCRC = 0xFFFFFFFFL;
@@ -1820,16 +1849,16 @@ s_bPacketToWirelessUsb(
*
-*/
-VOID
+void
s_vGenerateMACHeader (
- IN PSDevice pDevice,
- IN PBYTE pbyBufferAddr,
- IN WORD wDuration,
- IN PSEthernetHeader psEthHeader,
- IN BOOL bNeedEncrypt,
- IN WORD wFragType,
- IN UINT uDMAIdx,
- IN UINT uFragIdx
+ PSDevice pDevice,
+ PBYTE pbyBufferAddr,
+ WORD wDuration,
+ PSEthernetHeader psEthHeader,
+ BOOL bNeedEncrypt,
+ WORD wFragType,
+ unsigned int uDMAIdx,
+ unsigned int uFragIdx
)
{
PS802_11Header pMACHeader = (PS802_11Header)pbyBufferAddr;
@@ -1843,21 +1872,35 @@ s_vGenerateMACHeader (
}
if (pDevice->eOPMode == OP_MODE_AP) {
- memcpy(&(pMACHeader->abyAddr1[0]), &(psEthHeader->abyDstAddr[0]), U_ETHER_ADDR_LEN);
- memcpy(&(pMACHeader->abyAddr2[0]), &(pDevice->abyBSSID[0]), U_ETHER_ADDR_LEN);
- memcpy(&(pMACHeader->abyAddr3[0]), &(psEthHeader->abySrcAddr[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pMACHeader->abyAddr1[0]),
+ &(psEthHeader->abyDstAddr[0]),
+ ETH_ALEN);
+ memcpy(&(pMACHeader->abyAddr2[0]), &(pDevice->abyBSSID[0]), ETH_ALEN);
+ memcpy(&(pMACHeader->abyAddr3[0]),
+ &(psEthHeader->abySrcAddr[0]),
+ ETH_ALEN);
pMACHeader->wFrameCtl |= FC_FROMDS;
- }
- else {
- if (pDevice->eOPMode == OP_MODE_ADHOC) {
- memcpy(&(pMACHeader->abyAddr1[0]), &(psEthHeader->abyDstAddr[0]), U_ETHER_ADDR_LEN);
- memcpy(&(pMACHeader->abyAddr2[0]), &(psEthHeader->abySrcAddr[0]), U_ETHER_ADDR_LEN);
- memcpy(&(pMACHeader->abyAddr3[0]), &(pDevice->abyBSSID[0]), U_ETHER_ADDR_LEN);
- }
- else {
- memcpy(&(pMACHeader->abyAddr3[0]), &(psEthHeader->abyDstAddr[0]), U_ETHER_ADDR_LEN);
- memcpy(&(pMACHeader->abyAddr2[0]), &(psEthHeader->abySrcAddr[0]), U_ETHER_ADDR_LEN);
- memcpy(&(pMACHeader->abyAddr1[0]), &(pDevice->abyBSSID[0]), U_ETHER_ADDR_LEN);
+ } else {
+ if (pDevice->eOPMode == OP_MODE_ADHOC) {
+ memcpy(&(pMACHeader->abyAddr1[0]),
+ &(psEthHeader->abyDstAddr[0]),
+ ETH_ALEN);
+ memcpy(&(pMACHeader->abyAddr2[0]),
+ &(psEthHeader->abySrcAddr[0]),
+ ETH_ALEN);
+ memcpy(&(pMACHeader->abyAddr3[0]),
+ &(pDevice->abyBSSID[0]),
+ ETH_ALEN);
+ } else {
+ memcpy(&(pMACHeader->abyAddr3[0]),
+ &(psEthHeader->abyDstAddr[0]),
+ ETH_ALEN);
+ memcpy(&(pMACHeader->abyAddr2[0]),
+ &(psEthHeader->abySrcAddr[0]),
+ ETH_ALEN);
+ memcpy(&(pMACHeader->abyAddr1[0]),
+ &(pDevice->abyBSSID[0]),
+ ETH_ALEN);
pMACHeader->wFrameCtl |= FC_TODS;
}
}
@@ -1908,34 +1951,34 @@ s_vGenerateMACHeader (
-*/
CMD_STATUS csMgmt_xmit(
- IN PSDevice pDevice,
- IN PSTxMgmtPacket pPacket
+ PSDevice pDevice,
+ PSTxMgmtPacket pPacket
)
{
BYTE byPktType;
PBYTE pbyTxBufferAddr;
- PVOID pvRTS;
+ void *pvRTS;
PSCTS pCTS;
- PVOID pvTxDataHd;
- UINT uDuration;
- UINT cbReqCount;
+ void *pvTxDataHd;
+ unsigned int uDuration;
+ unsigned int cbReqCount;
PS802_11Header pMACHeader;
- UINT cbHeaderSize;
- UINT cbFrameBodySize;
+ unsigned int cbHeaderSize;
+ unsigned int cbFrameBodySize;
BOOL bNeedACK;
BOOL bIsPSPOLL = FALSE;
PSTxBufHead pTxBufHead;
- UINT cbFrameSize;
- UINT cbIVlen = 0;
- UINT cbICVlen = 0;
- UINT cbMIClen = 0;
- UINT cbFCSlen = 4;
- UINT uPadding = 0;
+ unsigned int cbFrameSize;
+ unsigned int cbIVlen = 0;
+ unsigned int cbICVlen = 0;
+ unsigned int cbMIClen = 0;
+ unsigned int cbFCSlen = 4;
+ unsigned int uPadding = 0;
WORD wTxBufSize;
- UINT cbMacHdLen;
+ unsigned int cbMacHdLen;
SEthernetHeader sEthHeader;
- PVOID pvRrvTime;
- PVOID pMICHDR;
+ void *pvRrvTime;
+ void *pMICHDR;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
WORD wCurrentRate = RATE_1M;
PTX_BUFFER pTX_Buffer;
@@ -2087,10 +2130,15 @@ CMD_STATUS csMgmt_xmit(
cbHeaderSize = wTxBufSize + sizeof(SRrvTime_ab) + sizeof(STxDataHead_ab);
}
- memset((PVOID)(pbyTxBufferAddr + wTxBufSize), 0, (cbHeaderSize - wTxBufSize));
+ memset((void *)(pbyTxBufferAddr + wTxBufSize), 0,
+ (cbHeaderSize - wTxBufSize));
- memcpy(&(sEthHeader.abyDstAddr[0]), &(pPacket->p80211Header->sA3.abyAddr1[0]), U_ETHER_ADDR_LEN);
- memcpy(&(sEthHeader.abySrcAddr[0]), &(pPacket->p80211Header->sA3.abyAddr2[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(sEthHeader.abyDstAddr[0]),
+ &(pPacket->p80211Header->sA3.abyAddr1[0]),
+ ETH_ALEN);
+ memcpy(&(sEthHeader.abySrcAddr[0]),
+ &(pPacket->p80211Header->sA3.abyAddr2[0]),
+ ETH_ALEN);
//=========================
// No Fragmentation
//=========================
@@ -2197,20 +2245,20 @@ CMD_STATUS csMgmt_xmit(
CMD_STATUS
csBeacon_xmit(
- IN PSDevice pDevice,
- IN PSTxMgmtPacket pPacket
+ PSDevice pDevice,
+ PSTxMgmtPacket pPacket
)
{
- UINT cbFrameSize = pPacket->cbMPDULen + WLAN_FCS_LEN;
- UINT cbHeaderSize = 0;
+ unsigned int cbFrameSize = pPacket->cbMPDULen + WLAN_FCS_LEN;
+ unsigned int cbHeaderSize = 0;
WORD wTxBufSize = sizeof(STxShortBufHead);
PSTxShortBufHead pTxBufHead;
PS802_11Header pMACHeader;
PSTxDataHead_ab pTxDataHead;
WORD wCurrentRate;
- UINT cbFrameBodySize;
- UINT cbReqCount;
+ unsigned int cbFrameBodySize;
+ unsigned int cbReqCount;
PBEACON_BUFFER pTX_Buffer;
PBYTE pbyTxBufferAddr;
PUSB_SEND_CONTEXT pContext;
@@ -2288,50 +2336,50 @@ csBeacon_xmit(
-VOID
+void
vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) {
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
BYTE byPktType;
PBYTE pbyTxBufferAddr;
- PVOID pvRTS;
- PVOID pvCTS;
- PVOID pvTxDataHd;
- UINT uDuration;
- UINT cbReqCount;
+ void *pvRTS;
+ void *pvCTS;
+ void *pvTxDataHd;
+ unsigned int uDuration;
+ unsigned int cbReqCount;
PS802_11Header pMACHeader;
- UINT cbHeaderSize;
- UINT cbFrameBodySize;
+ unsigned int cbHeaderSize;
+ unsigned int cbFrameBodySize;
BOOL bNeedACK;
BOOL bIsPSPOLL = FALSE;
PSTxBufHead pTxBufHead;
- UINT cbFrameSize;
- UINT cbIVlen = 0;
- UINT cbICVlen = 0;
- UINT cbMIClen = 0;
- UINT cbFCSlen = 4;
- UINT uPadding = 0;
- UINT cbMICHDR = 0;
- UINT uLength = 0;
+ unsigned int cbFrameSize;
+ unsigned int cbIVlen = 0;
+ unsigned int cbICVlen = 0;
+ unsigned int cbMIClen = 0;
+ unsigned int cbFCSlen = 4;
+ unsigned int uPadding = 0;
+ unsigned int cbMICHDR = 0;
+ unsigned int uLength = 0;
DWORD dwMICKey0, dwMICKey1;
DWORD dwMIC_Priority;
PDWORD pdwMIC_L;
PDWORD pdwMIC_R;
WORD wTxBufSize;
- UINT cbMacHdLen;
+ unsigned int cbMacHdLen;
SEthernetHeader sEthHeader;
- PVOID pvRrvTime;
- PVOID pMICHDR;
+ void *pvRrvTime;
+ void *pMICHDR;
WORD wCurrentRate = RATE_1M;
PUWLAN_80211HDR p80211Header;
- UINT uNodeIndex = 0;
+ unsigned int uNodeIndex = 0;
BOOL bNodeExist = FALSE;
SKeyItem STempKey;
PSKeyItem pTransmitKey = NULL;
PBYTE pbyIVHead;
PBYTE pbyPayloadHead;
PBYTE pbyMacHdr;
- UINT cbExtSuppRate = 0;
+ unsigned int cbExtSuppRate = 0;
PTX_BUFFER pTX_Buffer;
PUSB_SEND_CONTEXT pContext;
// PWLAN_IE pItem;
@@ -2520,9 +2568,14 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) {
pvTxDataHd = (PSTxDataHead_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR);
cbHeaderSize = wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR + sizeof(STxDataHead_ab);
}
- memset((PVOID)(pbyTxBufferAddr + wTxBufSize), 0, (cbHeaderSize - wTxBufSize));
- memcpy(&(sEthHeader.abyDstAddr[0]), &(p80211Header->sA3.abyAddr1[0]), U_ETHER_ADDR_LEN);
- memcpy(&(sEthHeader.abySrcAddr[0]), &(p80211Header->sA3.abyAddr2[0]), U_ETHER_ADDR_LEN);
+ memset((void *)(pbyTxBufferAddr + wTxBufSize), 0,
+ (cbHeaderSize - wTxBufSize));
+ memcpy(&(sEthHeader.abyDstAddr[0]),
+ &(p80211Header->sA3.abyAddr1[0]),
+ ETH_ALEN);
+ memcpy(&(sEthHeader.abySrcAddr[0]),
+ &(p80211Header->sA3.abyAddr2[0]),
+ ETH_ALEN);
//=========================
// No Fragmentation
//=========================
@@ -2692,21 +2745,21 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) {
NTSTATUS
nsDMA_tx_packet(
- IN PSDevice pDevice,
- IN UINT uDMAIdx,
- IN struct sk_buff *skb
+ PSDevice pDevice,
+ unsigned int uDMAIdx,
+ struct sk_buff *skb
)
{
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- UINT BytesToWrite =0,uHeaderLen = 0;
- UINT uNodeIndex = 0;
+ unsigned int BytesToWrite = 0, uHeaderLen = 0;
+ unsigned int uNodeIndex = 0;
BYTE byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
WORD wAID;
BYTE byPktType;
BOOL bNeedEncryption = FALSE;
PSKeyItem pTransmitKey = NULL;
SKeyItem STempKey;
- UINT ii;
+ unsigned int ii;
BOOL bTKIP_UseGTK = FALSE;
BOOL bNeedDeAuth = FALSE;
PBYTE pbyBSSID;
@@ -2714,7 +2767,7 @@ nsDMA_tx_packet(
PUSB_SEND_CONTEXT pContext;
BOOL fConvertedPacket;
PTX_BUFFER pTX_Buffer;
- UINT status;
+ unsigned int status;
WORD wKeepRate = pDevice->wCurrentRate;
struct net_device_stats* pStats = &pDevice->stats;
//#ifdef WPA_SM_Transtatus
@@ -2796,7 +2849,7 @@ nsDMA_tx_packet(
return STATUS_RESOURCES;
}
- memcpy(pDevice->sTxEthHeader.abyDstAddr, (PBYTE)(skb->data), U_HEADER_LEN);
+ memcpy(pDevice->sTxEthHeader.abyDstAddr, (PBYTE)(skb->data), ETH_HLEN);
//mike add:station mode check eapol-key challenge--->
{
@@ -2805,10 +2858,10 @@ nsDMA_tx_packet(
BYTE Descriptor_type;
WORD Key_info;
- Protocol_Version = skb->data[U_HEADER_LEN];
- Packet_Type = skb->data[U_HEADER_LEN+1];
- Descriptor_type = skb->data[U_HEADER_LEN+1+1+2];
- Key_info = (skb->data[U_HEADER_LEN+1+1+2+1] << 8)|(skb->data[U_HEADER_LEN+1+1+2+2]);
+ Protocol_Version = skb->data[ETH_HLEN];
+ Packet_Type = skb->data[ETH_HLEN+1];
+ Descriptor_type = skb->data[ETH_HLEN+1+1+2];
+ Key_info = (skb->data[ETH_HLEN+1+1+2+1] << 8)|(skb->data[ETH_HLEN+1+1+2+2]);
if (pDevice->sTxEthHeader.wType == TYPE_PKT_802_1x) {
if(((Protocol_Version==1) ||(Protocol_Version==2)) &&
(Packet_Type==3)) { //802.1x OR eapol-key challenge frame transfer
@@ -2971,10 +3024,12 @@ nsDMA_tx_packet(
}
}
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dma_tx: pDevice->wCurrentRate = %d \n", pDevice->wCurrentRate);
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "dma_tx: pDevice->wCurrentRate = %d\n",
+ pDevice->wCurrentRate);
if (wKeepRate != pDevice->wCurrentRate) {
- bScheduleCommand((HANDLE)pDevice, WLAN_CMD_SETPOWER, NULL);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_SETPOWER, NULL);
}
if (pDevice->wCurrentRate <= RATE_11M) {
@@ -3057,7 +3112,9 @@ nsDMA_tx_packet(
if ( pDevice->bEnablePSMode == TRUE ) {
if ( !pDevice->bPSModeTxBurst ) {
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_MAC_DISPOWERSAVING, NULL);
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_MAC_DISPOWERSAVING,
+ NULL);
pDevice->bPSModeTxBurst = TRUE;
}
}
@@ -3077,7 +3134,7 @@ nsDMA_tx_packet(
if (bNeedDeAuth == TRUE) {
WORD wReason = WLAN_MGMT_REASON_MIC_FAILURE;
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_DEAUTH, (PBYTE)&wReason);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_DEAUTH, (PBYTE) &wReason);
}
if(status!=STATUS_PENDING) {
@@ -3110,14 +3167,14 @@ nsDMA_tx_packet(
BOOL
bRelayPacketSend (
- IN PSDevice pDevice,
- IN PBYTE pbySkbData,
- IN UINT uDataLen,
- IN UINT uNodeIndex
+ PSDevice pDevice,
+ PBYTE pbySkbData,
+ unsigned int uDataLen,
+ unsigned int uNodeIndex
)
{
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- UINT BytesToWrite =0,uHeaderLen = 0;
+ unsigned int BytesToWrite = 0, uHeaderLen = 0;
BYTE byPktType = PK_TYPE_11B;
BOOL bNeedEncryption = FALSE;
SKeyItem STempKey;
@@ -3127,7 +3184,7 @@ bRelayPacketSend (
BYTE byPktTyp;
BOOL fConvertedPacket;
PTX_BUFFER pTX_Buffer;
- UINT status;
+ unsigned int status;
WORD wKeepRate = pDevice->wCurrentRate;
@@ -3138,7 +3195,7 @@ bRelayPacketSend (
return FALSE;
}
- memcpy(pDevice->sTxEthHeader.abyDstAddr, (PBYTE)pbySkbData, U_HEADER_LEN);
+ memcpy(pDevice->sTxEthHeader.abyDstAddr, (PBYTE)pbySkbData, ETH_HLEN);
if (pDevice->bEncryptionEnable == TRUE) {
bNeedEncryption = TRUE;
@@ -3197,9 +3254,8 @@ bRelayPacketSend (
pDevice->wCurrentRate = pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate;
}
-
if (wKeepRate != pDevice->wCurrentRate) {
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_SETPOWER, NULL);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_SETPOWER, NULL);
}
if (pDevice->wCurrentRate <= RATE_11M)
diff --git a/drivers/staging/vt6656/rxtx.h b/drivers/staging/vt6656/rxtx.h
index 6bc22d371c1..f90de42d7ab 100644
--- a/drivers/staging/vt6656/rxtx.h
+++ b/drivers/staging/vt6656/rxtx.h
@@ -43,8 +43,8 @@
typedef struct tagSRTSDataF {
WORD wFrameControl;
WORD wDurationID;
- BYTE abyRA[U_ETHER_ADDR_LEN];
- BYTE abyTA[U_ETHER_ADDR_LEN];
+ BYTE abyRA[ETH_ALEN];
+ BYTE abyTA[ETH_ALEN];
} SRTSDataF, *PSRTSDataF;
//
@@ -53,7 +53,7 @@ typedef struct tagSRTSDataF {
typedef struct tagSCTSDataF {
WORD wFrameControl;
WORD wDurationID;
- BYTE abyRA[U_ETHER_ADDR_LEN];
+ BYTE abyRA[ETH_ALEN];
WORD wReserved;
} SCTSDataF, *PSCTSDataF;
@@ -667,28 +667,28 @@ typedef struct tagSBEACON_BUFFER
BOOL
bPacketToWirelessUsb(
- IN PSDevice pDevice,
- IN BYTE byPktType,
- IN PBYTE usbPacketBuf,
- IN BOOL bNeedEncrypt,
- IN UINT cbPayloadSize,
- IN UINT uDMAIdx,
- IN PSEthernetHeader psEthHeader,
- IN PBYTE pPacket,
- IN PSKeyItem pTransmitKey,
- IN UINT uNodeIndex,
- IN WORD wCurrentRate,
- OUT UINT *pcbHeaderLen,
- OUT UINT *pcbTotalLen
+ PSDevice pDevice,
+ BYTE byPktType,
+ PBYTE usbPacketBuf,
+ BOOL bNeedEncrypt,
+ unsigned int cbPayloadSize,
+ unsigned int uDMAIdx,
+ PSEthernetHeader psEthHeader,
+ PBYTE pPacket,
+ PSKeyItem pTransmitKey,
+ unsigned int uNodeIndex,
+ WORD wCurrentRate,
+ unsigned int *pcbHeaderLen,
+ unsigned int *pcbTotalLen
);
-VOID vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb);
-NTSTATUS nsDMA_tx_packet(PSDevice pDevice, UINT uDMAIdx, struct sk_buff *skb);
+void vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb);
+NTSTATUS nsDMA_tx_packet(PSDevice pDevice,
+ unsigned int uDMAIdx,
+ struct sk_buff *skb);
CMD_STATUS csMgmt_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket);
CMD_STATUS csBeacon_xmit(PSDevice pDevice, PSTxMgmtPacket pPacket);
-BOOL bRelayPacketSend(PSDevice pDevice, PBYTE pbySkbData, UINT uDataLen, UINT uNodeIndex);
-
-#endif // __RXTX_H__
-
-
+BOOL bRelayPacketSend(PSDevice pDevice, PBYTE pbySkbData,
+ unsigned int uDataLen, unsigned int uNodeIndex);
+#endif /* __RXTX_H__ */
diff --git a/drivers/staging/vt6656/srom.h b/drivers/staging/vt6656/srom.h
index 4c89e7ad6b4..dba21a54414 100644
--- a/drivers/staging/vt6656/srom.h
+++ b/drivers/staging/vt6656/srom.h
@@ -124,4 +124,4 @@ typedef struct tagSSromReg {
/*--------------------- Export Functions --------------------------*/
-#endif // __EEPROM_H__
+#endif /* __EEPROM_H__ */
diff --git a/drivers/staging/vt6656/tcrc.c b/drivers/staging/vt6656/tcrc.c
index 5f0c74763f8..e25021e850a 100644
--- a/drivers/staging/vt6656/tcrc.c
+++ b/drivers/staging/vt6656/tcrc.c
@@ -41,7 +41,7 @@
/*--------------------- Static Variables --------------------------*/
-// 32-bit CRC table
+/* 32-bit CRC table */
static const DWORD s_adwCrc32Table[256] = {
0x00000000L, 0x77073096L, 0xEE0E612CL, 0x990951BAL,
0x076DC419L, 0x706AF48FL, 0xE963A535L, 0x9E6495A3L,
@@ -132,17 +132,18 @@ static const DWORD s_adwCrc32Table[256] = {
* Return Value: CRC-32
*
-*/
-DWORD CRCdwCrc32 (PBYTE pbyData, UINT cbByte, DWORD dwCrcSeed)
+DWORD CRCdwCrc32(PBYTE pbyData, unsigned int cbByte, DWORD dwCrcSeed)
{
- DWORD dwCrc;
+ DWORD dwCrc;
- dwCrc = dwCrcSeed;
- while (cbByte--) {
- dwCrc = s_adwCrc32Table[(BYTE)((dwCrc ^ (*pbyData)) & 0xFF)] ^ (dwCrc >> 8);
- pbyData++;
- }
+ dwCrc = dwCrcSeed;
+ while (cbByte--) {
+ dwCrc = s_adwCrc32Table[(BYTE)((dwCrc ^ (*pbyData)) & 0xFF)] ^
+ (dwCrc >> 8);
+ pbyData++;
+ }
- return dwCrc;
+ return dwCrc;
}
@@ -164,7 +165,7 @@ DWORD CRCdwCrc32 (PBYTE pbyData, UINT cbByte, DWORD dwCrcSeed)
* Return Value: CRC-32
*
-*/
-DWORD CRCdwGetCrc32 (PBYTE pbyData, UINT cbByte)
+DWORD CRCdwGetCrc32(PBYTE pbyData, unsigned int cbByte)
{
return ~CRCdwCrc32(pbyData, cbByte, 0xFFFFFFFFL);
}
@@ -190,7 +191,7 @@ DWORD CRCdwGetCrc32 (PBYTE pbyData, UINT cbByte)
* Return Value: CRC-32
*
-*/
-DWORD CRCdwGetCrc32Ex(PBYTE pbyData, UINT cbByte, DWORD dwPreCRC)
+DWORD CRCdwGetCrc32Ex(PBYTE pbyData, unsigned int cbByte, DWORD dwPreCRC)
{
return CRCdwCrc32(pbyData, cbByte, dwPreCRC);
}
diff --git a/drivers/staging/vt6656/tcrc.h b/drivers/staging/vt6656/tcrc.h
index 5faa48b0a74..4dfd01e477a 100644
--- a/drivers/staging/vt6656/tcrc.h
+++ b/drivers/staging/vt6656/tcrc.h
@@ -43,11 +43,8 @@
/*--------------------- Export Functions --------------------------*/
-DWORD CRCdwCrc32(PBYTE pbyData, UINT cbByte, DWORD dwCrcSeed);
-DWORD CRCdwGetCrc32(PBYTE pbyData, UINT cbByte);
-DWORD CRCdwGetCrc32Ex(PBYTE pbyData, UINT cbByte, DWORD dwPreCRC);
-
-#endif // __TCRC_H__
-
-
+DWORD CRCdwCrc32(PBYTE pbyData, unsigned int cbByte, DWORD dwCrcSeed);
+DWORD CRCdwGetCrc32(PBYTE pbyData, unsigned int cbByte);
+DWORD CRCdwGetCrc32Ex(PBYTE pbyData, unsigned int cbByte, DWORD dwPreCRC);
+#endif /* __TCRC_H__ */
diff --git a/drivers/staging/vt6656/tether.c b/drivers/staging/vt6656/tether.c
index c90b469ad54..4f368f174b2 100644
--- a/drivers/staging/vt6656/tether.c
+++ b/drivers/staging/vt6656/tether.c
@@ -61,25 +61,25 @@
* Return Value: Hash value
*
*/
-BYTE ETHbyGetHashIndexByCrc32 (PBYTE pbyMultiAddr)
+BYTE ETHbyGetHashIndexByCrc32(PBYTE pbyMultiAddr)
{
- int ii;
- BYTE byTmpHash;
- BYTE byHash = 0;
+ int ii;
+ BYTE byTmpHash;
+ BYTE byHash = 0;
- // get the least 6-bits from CRC generator
- byTmpHash = (BYTE)(CRCdwCrc32(pbyMultiAddr, U_ETHER_ADDR_LEN,
- 0xFFFFFFFFL) & 0x3F);
- // reverse most bit to least bit
- for (ii = 0; ii < (sizeof(byTmpHash) * 8); ii++) {
- byHash <<= 1;
- if (byTmpHash & 0x01)
- byHash |= 1;
- byTmpHash >>= 1;
- }
+ /* get the least 6-bits from CRC generator */
+ byTmpHash = (BYTE)(CRCdwCrc32(pbyMultiAddr, ETH_ALEN,
+ 0xFFFFFFFFL) & 0x3F);
+ /* reverse most bit to least bit */
+ for (ii = 0; ii < (sizeof(byTmpHash) * 8); ii++) {
+ byHash <<= 1;
+ if (byTmpHash & 0x01)
+ byHash |= 1;
+ byTmpHash >>= 1;
+ }
- // adjust 6-bits to the right most
- return (byHash >> 2);
+ /* adjust 6-bits to the right most */
+ return byHash >> 2;
}
@@ -96,14 +96,13 @@ BYTE ETHbyGetHashIndexByCrc32 (PBYTE pbyMultiAddr)
* Return Value: TRUE if ok; FALSE if error.
*
*/
-BOOL ETHbIsBufferCrc32Ok (PBYTE pbyBuffer, UINT cbFrameLength)
+BOOL ETHbIsBufferCrc32Ok(PBYTE pbyBuffer, unsigned int cbFrameLength)
{
- DWORD dwCRC;
+ DWORD dwCRC;
- dwCRC = CRCdwGetCrc32(pbyBuffer, cbFrameLength - 4);
- if (cpu_to_le32(*((PDWORD)(pbyBuffer + cbFrameLength - 4))) != dwCRC) {
- return FALSE;
- }
- return TRUE;
+ dwCRC = CRCdwGetCrc32(pbyBuffer, cbFrameLength - 4);
+ if (cpu_to_le32(*((PDWORD)(pbyBuffer + cbFrameLength - 4))) != dwCRC)
+ return FALSE;
+ return TRUE;
}
diff --git a/drivers/staging/vt6656/tether.h b/drivers/staging/vt6656/tether.h
index 5a3c326436c..d63586d5cdb 100644
--- a/drivers/staging/vt6656/tether.h
+++ b/drivers/staging/vt6656/tether.h
@@ -29,26 +29,24 @@
#ifndef __TETHER_H__
#define __TETHER_H__
+#include <linux/if_ether.h>
#include "ttype.h"
/*--------------------- Export Definitions -------------------------*/
//
// constants
//
-#define U_ETHER_ADDR_LEN 6 // Ethernet address length
-#define U_TYPE_LEN 2 //
#define U_CRC_LEN 4 //
-#define U_HEADER_LEN (U_ETHER_ADDR_LEN * 2 + U_TYPE_LEN)
-#define U_ETHER_ADDR_STR_LEN (U_ETHER_ADDR_LEN * 2 + 1)
+#define U_ETHER_ADDR_STR_LEN (ETH_ALEN * 2 + 1)
// Ethernet address string length
#define MIN_DATA_LEN 46 // min data length
#define MAX_DATA_LEN 1500 // max data length
-#define MIN_PACKET_LEN (MIN_DATA_LEN + U_HEADER_LEN)
+#define MIN_PACKET_LEN (MIN_DATA_LEN + ETH_HLEN)
// 60
// min total packet length (tx)
-#define MAX_PACKET_LEN (MAX_DATA_LEN + U_HEADER_LEN)
+#define MAX_PACKET_LEN (MAX_DATA_LEN + ETH_HLEN)
// 1514
// max total packet length (tx)
@@ -167,8 +165,8 @@
// Ethernet packet
//
typedef struct tagSEthernetHeader {
- BYTE abyDstAddr[U_ETHER_ADDR_LEN];
- BYTE abySrcAddr[U_ETHER_ADDR_LEN];
+ BYTE abyDstAddr[ETH_ALEN];
+ BYTE abySrcAddr[ETH_ALEN];
WORD wType;
}__attribute__ ((__packed__))
SEthernetHeader, *PSEthernetHeader;
@@ -178,8 +176,8 @@ SEthernetHeader, *PSEthernetHeader;
// 802_3 packet
//
typedef struct tagS802_3Header {
- BYTE abyDstAddr[U_ETHER_ADDR_LEN];
- BYTE abySrcAddr[U_ETHER_ADDR_LEN];
+ BYTE abyDstAddr[ETH_ALEN];
+ BYTE abySrcAddr[ETH_ALEN];
WORD wLen;
}__attribute__ ((__packed__))
S802_3Header, *PS802_3Header;
@@ -190,11 +188,11 @@ S802_3Header, *PS802_3Header;
typedef struct tagS802_11Header {
WORD wFrameCtl;
WORD wDurationID;
- BYTE abyAddr1[U_ETHER_ADDR_LEN];
- BYTE abyAddr2[U_ETHER_ADDR_LEN];
- BYTE abyAddr3[U_ETHER_ADDR_LEN];
+ BYTE abyAddr1[ETH_ALEN];
+ BYTE abyAddr2[ETH_ALEN];
+ BYTE abyAddr3[ETH_ALEN];
WORD wSeqCtl;
- BYTE abyAddr4[U_ETHER_ADDR_LEN];
+ BYTE abyAddr4[ETH_ALEN];
}__attribute__ ((__packed__))
S802_11Header, *PS802_11Header;
@@ -228,9 +226,6 @@ S802_11Header, *PS802_11Header;
BYTE ETHbyGetHashIndexByCrc32(PBYTE pbyMultiAddr);
//BYTE ETHbyGetHashIndexByCrc(PBYTE pbyMultiAddr);
-BOOL ETHbIsBufferCrc32Ok(PBYTE pbyBuffer, UINT cbFrameLength);
-
-#endif // __TETHER_H__
-
-
+BOOL ETHbIsBufferCrc32Ok(PBYTE pbyBuffer, unsigned int cbFrameLength);
+#endif /* __TETHER_H__ */
diff --git a/drivers/staging/vt6656/tkip.c b/drivers/staging/vt6656/tkip.c
index 8ca154080e9..f83af5913aa 100644
--- a/drivers/staging/vt6656/tkip.c
+++ b/drivers/staging/vt6656/tkip.c
@@ -183,7 +183,7 @@ unsigned int rotr1(unsigned int a)
* Return Value: none
*
*/
-VOID TKIPvMixKey(
+void TKIPvMixKey(
PBYTE pbyTKey,
PBYTE pbyTA,
WORD wTSC15_0,
diff --git a/drivers/staging/vt6656/tkip.h b/drivers/staging/vt6656/tkip.h
index 847ecdf97ee..47c3a853b92 100644
--- a/drivers/staging/vt6656/tkip.h
+++ b/drivers/staging/vt6656/tkip.h
@@ -46,7 +46,7 @@
/*--------------------- Export Functions --------------------------*/
-VOID TKIPvMixKey(
+void TKIPvMixKey(
PBYTE pbyTKey,
PBYTE pbyTA,
WORD wTSC15_0,
@@ -54,7 +54,4 @@ VOID TKIPvMixKey(
PBYTE pbyRC4Key
);
-#endif // __TKIP_H__
-
-
-
+#endif /* __TKIP_H__ */
diff --git a/drivers/staging/vt6656/tmacro.h b/drivers/staging/vt6656/tmacro.h
index e96c140de05..3c81e2b0791 100644
--- a/drivers/staging/vt6656/tmacro.h
+++ b/drivers/staging/vt6656/tmacro.h
@@ -57,6 +57,4 @@
#define MAKEDWORD(lw, hw) ((DWORD)(((WORD)(lw)) | (((DWORD)((WORD)(hw))) << 16)))
#endif
-#endif // __TMACRO_H__
-
-
+#endif /* __TMACRO_H__ */
diff --git a/drivers/staging/vt6656/ttype.h b/drivers/staging/vt6656/ttype.h
index 9ee3f436fc3..c27f9858e2e 100644
--- a/drivers/staging/vt6656/ttype.h
+++ b/drivers/staging/vt6656/ttype.h
@@ -26,25 +26,11 @@
*
*/
-
#ifndef __TTYPE_H__
#define __TTYPE_H__
-
/******* Common definitions and typedefs ***********************************/
-#ifndef VOID
-#define VOID void
-#endif
-
-#ifndef IN
-#define IN
-#endif
-
-#ifndef OUT
-#define OUT
-#endif
-
//2007-0115-05<Add>by MikeLiu
#ifndef TxInSleep
#define TxInSleep
@@ -52,7 +38,6 @@
//DavidWang
-
//2007-0814-01<Add>by MikeLiu
#ifndef Safe_Close
#define Safe_Close
@@ -72,11 +57,6 @@ typedef int BOOL;
#define FALSE 0
#endif
-
-#if !defined(SUCCESS)
-#define SUCCESS 0
-#endif
-
//2007-0809-01<Add>by MikeLiu
#ifndef update_BssList
#define update_BssList
@@ -92,31 +72,6 @@ typedef int BOOL;
/****** Simple typedefs ***************************************************/
-/* These lines assume that your compiler's longs are 32 bits and
- * shorts are 16 bits. It is already assumed that chars are 8 bits,
- * but it doesn't matter if they're signed or unsigned.
- */
-
-typedef signed char I8; /* 8-bit signed integer */
-
-typedef unsigned char U8; /* 8-bit unsigned integer */
-typedef unsigned short U16; /* 16-bit unsigned integer */
-typedef unsigned long U32; /* 32-bit unsigned integer */
-
-
-typedef char CHAR;
-typedef signed short SHORT;
-typedef signed int INT;
-typedef signed long LONG;
-
-typedef unsigned char UCHAR;
-typedef unsigned short USHORT;
-typedef unsigned int UINT;
-typedef unsigned long ULONG;
-typedef unsigned long long ULONGLONG; //64 bit
-
-
-
typedef unsigned char BYTE; // 8-bit
typedef unsigned short WORD; // 16-bit
typedef unsigned long DWORD; // 32-bit
@@ -133,7 +88,6 @@ typedef union tagUQuadWord {
} UQuadWord;
typedef UQuadWord QWORD; // 64-bit
-
/****** Common pointer types ***********************************************/
typedef unsigned long ULONG_PTR; // 32-bit
@@ -150,13 +104,4 @@ typedef DWORD * PDWORD;
typedef QWORD * PQWORD;
-typedef void * PVOID;
-
-// handle declaration
-#ifdef STRICT
-typedef void *HANDLE;
-#else
-typedef PVOID HANDLE;
-#endif
-
-#endif // __TTYPE_H__
+#endif /* __TTYPE_H__ */
diff --git a/drivers/staging/vt6656/upc.h b/drivers/staging/vt6656/upc.h
index acd1b661490..b33aba4b12c 100644
--- a/drivers/staging/vt6656/upc.h
+++ b/drivers/staging/vt6656/upc.h
@@ -141,7 +141,7 @@
#define PCAvDelayByIO(uDelayUnit) { \
BYTE byData; \
- ULONG ii; \
+ unsigned long ii; \
\
if (uDelayUnit <= 50) { \
udelay(uDelayUnit); \
@@ -159,8 +159,4 @@
/*--------------------- Export Functions --------------------------*/
-
-
-
-#endif // __UPC_H__
-
+#endif /* __UPC_H__ */
diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
index 65e91a332a6..fd2355e34fb 100644
--- a/drivers/staging/vt6656/usbpipe.c
+++ b/drivers/staging/vt6656/usbpipe.c
@@ -71,36 +71,36 @@ static int msglevel =MSG_LEVEL_INFO;
/*--------------------- Static Functions --------------------------*/
static
-VOID
+void
s_nsInterruptUsbIoCompleteRead(
- IN struct urb *urb
+ struct urb *urb
);
static
-VOID
+void
s_nsBulkInUsbIoCompleteRead(
- IN struct urb *urb
+ struct urb *urb
);
static
-VOID
+void
s_nsBulkOutIoCompleteWrite(
- IN struct urb *urb
+ struct urb *urb
);
static
-VOID
+void
s_nsControlInUsbIoCompleteRead(
- IN struct urb *urb
+ struct urb *urb
);
static
-VOID
+void
s_nsControlInUsbIoCompleteWrite(
- IN struct urb *urb
+ struct urb *urb
);
/*--------------------- Export Variables --------------------------*/
@@ -111,12 +111,12 @@ s_nsControlInUsbIoCompleteWrite(
NTSTATUS
PIPEnsControlOutAsyn(
- IN PSDevice pDevice,
- IN BYTE byRequest,
- IN WORD wValue,
- IN WORD wIndex,
- IN WORD wLength,
- IN PBYTE pbyBuffer
+ PSDevice pDevice,
+ BYTE byRequest,
+ WORD wValue,
+ WORD wIndex,
+ WORD wLength,
+ PBYTE pbyBuffer
)
{
NTSTATUS ntStatus;
@@ -142,7 +142,7 @@ PIPEnsControlOutAsyn(
0x40, // RequestType
wValue,
wIndex,
- (PVOID) pbyBuffer,
+ (void *) pbyBuffer,
wLength,
HZ
);
@@ -162,12 +162,12 @@ PIPEnsControlOutAsyn(
NTSTATUS
PIPEnsControlOut(
- IN PSDevice pDevice,
- IN BYTE byRequest,
- IN WORD wValue,
- IN WORD wIndex,
- IN WORD wLength,
- IN PBYTE pbyBuffer
+ PSDevice pDevice,
+ BYTE byRequest,
+ WORD wValue,
+ WORD wIndex,
+ WORD wLength,
+ PBYTE pbyBuffer
)
{
NTSTATUS ntStatus = 0;
@@ -193,7 +193,8 @@ PIPEnsControlOut(
usb_sndctrlpipe(pDevice->usb , 0), (char *) &pDevice->sUsbCtlRequest,
pbyBuffer, wLength, s_nsControlInUsbIoCompleteWrite, pDevice);
- if ((ntStatus = usb_submit_urb(pDevice->pControlURB, GFP_ATOMIC)) != 0) {
+ ntStatus = usb_submit_urb(pDevice->pControlURB, GFP_ATOMIC);
+ if (ntStatus != 0) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"control send request submission failed: %d\n", ntStatus);
return STATUS_FAILURE;
}
@@ -223,12 +224,12 @@ PIPEnsControlOut(
NTSTATUS
PIPEnsControlIn(
- IN PSDevice pDevice,
- IN BYTE byRequest,
- IN WORD wValue,
- IN WORD wIndex,
- IN WORD wLength,
- IN OUT PBYTE pbyBuffer
+ PSDevice pDevice,
+ BYTE byRequest,
+ WORD wValue,
+ WORD wIndex,
+ WORD wLength,
+ PBYTE pbyBuffer
)
{
NTSTATUS ntStatus = 0;
@@ -251,7 +252,8 @@ PIPEnsControlIn(
usb_rcvctrlpipe(pDevice->usb , 0), (char *) &pDevice->sUsbCtlRequest,
pbyBuffer, wLength, s_nsControlInUsbIoCompleteRead, pDevice);
- if ((ntStatus = usb_submit_urb(pDevice->pControlURB, GFP_ATOMIC)) != 0) {
+ ntStatus = usb_submit_urb(pDevice->pControlURB, GFP_ATOMIC);
+ if (ntStatus != 0) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"control request submission failed: %d\n", ntStatus);
}else {
MP_SET_FLAG(pDevice, fMP_CONTROL_READS);
@@ -277,9 +279,9 @@ PIPEnsControlIn(
}
static
-VOID
+void
s_nsControlInUsbIoCompleteWrite(
- IN struct urb *urb
+ struct urb *urb
)
{
PSDevice pDevice;
@@ -318,9 +320,9 @@ s_nsControlInUsbIoCompleteWrite(
*
*/
static
-VOID
+void
s_nsControlInUsbIoCompleteRead(
- IN struct urb *urb
+ struct urb *urb
)
{
PSDevice pDevice;
@@ -360,7 +362,7 @@ s_nsControlInUsbIoCompleteRead(
*/
NTSTATUS
PIPEnsInterruptRead(
- IN PSDevice pDevice
+ PSDevice pDevice
)
{
NTSTATUS ntStatus = STATUS_FAILURE;
@@ -383,7 +385,7 @@ PIPEnsInterruptRead(
usb_fill_int_urb(pDevice->pInterruptURB,
pDevice->usb,
usb_rcvintpipe(pDevice->usb, 1),
- (PVOID) pDevice->intBuf.pDataBuf,
+ (void *) pDevice->intBuf.pDataBuf,
MAX_INTERRUPT_SIZE,
s_nsInterruptUsbIoCompleteRead,
pDevice,
@@ -394,7 +396,7 @@ PIPEnsInterruptRead(
usb_fill_int_urb(pDevice->pInterruptURB,
pDevice->usb,
usb_rcvintpipe(pDevice->usb, 1),
- (PVOID) pDevice->intBuf.pDataBuf,
+ (void *) pDevice->intBuf.pDataBuf,
MAX_INTERRUPT_SIZE,
s_nsInterruptUsbIoCompleteRead,
pDevice,
@@ -407,14 +409,15 @@ PIPEnsInterruptRead(
usb_fill_bulk_urb(pDevice->pInterruptURB,
pDevice->usb,
usb_rcvbulkpipe(pDevice->usb, 1),
- (PVOID) pDevice->intBuf.pDataBuf,
+ (void *) pDevice->intBuf.pDataBuf,
MAX_INTERRUPT_SIZE,
s_nsInterruptUsbIoCompleteRead,
pDevice);
#endif
#endif
- if ((ntStatus = usb_submit_urb(pDevice->pInterruptURB, GFP_ATOMIC)) != 0) {
+ ntStatus = usb_submit_urb(pDevice->pInterruptURB, GFP_ATOMIC);
+ if (ntStatus != 0) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Submit int URB failed %d\n", ntStatus);
}
@@ -438,9 +441,9 @@ usb_fill_bulk_urb(pDevice->pInterruptURB,
*
*/
static
-VOID
+void
s_nsInterruptUsbIoCompleteRead(
- IN struct urb *urb
+ struct urb *urb
)
{
@@ -481,12 +484,11 @@ s_nsInterruptUsbIoCompleteRead(
pDevice->fKillEventPollingThread = TRUE;
// }
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"IntUSBIoCompleteControl STATUS = %d\n", ntStatus );
- }
- else {
- pDevice->ulIntInBytesRead += (ULONG)urb->actual_length;
- pDevice->ulIntInContCRCError = 0;
- pDevice->bEventAvailable = TRUE;
- INTnsProcessData(pDevice);
+ } else {
+ pDevice->ulIntInBytesRead += (unsigned long) urb->actual_length;
+ pDevice->ulIntInContCRCError = 0;
+ pDevice->bEventAvailable = TRUE;
+ INTnsProcessData(pDevice);
}
STAvUpdateUSBCounter(&pDevice->scStatistic.USB_InterruptStat, ntStatus);
@@ -494,7 +496,8 @@ s_nsInterruptUsbIoCompleteRead(
if (pDevice->fKillEventPollingThread != TRUE) {
#if 0 //reserve int URB submit
- if ((ntStatus = usb_submit_urb(urb, GFP_ATOMIC)) != 0) {
+ ntStatus = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ntStatus != 0) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Re-Submit int URB failed %d\n", ntStatus);
}
#else //replace int URB submit by bulk transfer
@@ -502,12 +505,13 @@ s_nsInterruptUsbIoCompleteRead(
usb_fill_bulk_urb(pDevice->pInterruptURB,
pDevice->usb,
usb_rcvbulkpipe(pDevice->usb, 1),
- (PVOID) pDevice->intBuf.pDataBuf,
+ (void *) pDevice->intBuf.pDataBuf,
MAX_INTERRUPT_SIZE,
s_nsInterruptUsbIoCompleteRead,
pDevice);
- if ((ntStatus = usb_submit_urb(pDevice->pInterruptURB, GFP_ATOMIC)) != 0) {
+ ntStatus = usb_submit_urb(pDevice->pInterruptURB, GFP_ATOMIC);
+ if (ntStatus != 0) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Submit int URB failed %d\n", ntStatus);
}
@@ -538,8 +542,8 @@ s_nsInterruptUsbIoCompleteRead(
*/
NTSTATUS
PIPEnsBulkInUsbRead(
- IN PSDevice pDevice,
- IN PRCB pRCB
+ PSDevice pDevice,
+ PRCB pRCB
)
{
NTSTATUS ntStatus= 0;
@@ -567,12 +571,13 @@ PIPEnsBulkInUsbRead(
usb_fill_bulk_urb(pUrb,
pDevice->usb,
usb_rcvbulkpipe(pDevice->usb, 2),
- (PVOID) (pRCB->skb->data),
+ (void *) (pRCB->skb->data),
MAX_TOTAL_SIZE_WITH_ALL_HEADERS,
s_nsBulkInUsbIoCompleteRead,
pRCB);
- if((ntStatus = usb_submit_urb(pUrb, GFP_ATOMIC)) != 0){
+ ntStatus = usb_submit_urb(pUrb, GFP_ATOMIC);
+ if (ntStatus != 0) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Submit Rx URB failed %d\n", ntStatus);
return STATUS_FAILURE ;
}
@@ -600,15 +605,15 @@ PIPEnsBulkInUsbRead(
*
*/
static
-VOID
+void
s_nsBulkInUsbIoCompleteRead(
- IN struct urb *urb
+ struct urb *urb
)
{
PRCB pRCB = (PRCB)urb->context;
PSDevice pDevice = (PSDevice)pRCB->pDevice;
- ULONG bytesRead;
+ unsigned long bytesRead;
BOOL bIndicateReceive = FALSE;
BOOL bReAllocSkb = FALSE;
NTSTATUS status;
@@ -681,8 +686,8 @@ s_nsBulkInUsbIoCompleteRead(
*/
NDIS_STATUS
PIPEnsSendBulkOut(
- IN PSDevice pDevice,
- IN PUSB_SEND_CONTEXT pContext
+ PSDevice pDevice,
+ PUSB_SEND_CONTEXT pContext
)
{
NTSTATUS status;
@@ -712,13 +717,14 @@ PIPEnsSendBulkOut(
usb_fill_bulk_urb(
pUrb,
pDevice->usb,
- usb_sndbulkpipe(pDevice->usb, 3),
- (PVOID) &(pContext->Data[0]),
+ usb_sndbulkpipe(pDevice->usb, 3),
+ (void *) &(pContext->Data[0]),
pContext->uBufLen,
s_nsBulkOutIoCompleteWrite,
pContext);
- if((status = usb_submit_urb(pUrb, GFP_ATOMIC))!=0)
+ status = usb_submit_urb(pUrb, GFP_ATOMIC);
+ if (status != 0)
{
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Submit Tx URB failed %d\n", status);
return STATUS_FAILURE;
@@ -759,15 +765,15 @@ PIPEnsSendBulkOut(
*
*/
static
-VOID
+void
s_nsBulkOutIoCompleteWrite(
- IN struct urb *urb
+ struct urb *urb
)
{
PSDevice pDevice;
NTSTATUS status;
CONTEXT_TYPE ContextType;
- ULONG ulBufLen;
+ unsigned long ulBufLen;
PUSB_SEND_CONTEXT pContext;
diff --git a/drivers/staging/vt6656/usbpipe.h b/drivers/staging/vt6656/usbpipe.h
index c422d1d0873..f852b39027a 100644
--- a/drivers/staging/vt6656/usbpipe.h
+++ b/drivers/staging/vt6656/usbpipe.h
@@ -43,34 +43,34 @@
NTSTATUS
PIPEnsControlOut(
- IN PSDevice pDevice,
- IN BYTE byRequest,
- IN WORD wValue,
- IN WORD wIndex,
- IN WORD wLength,
- IN PBYTE pbyBuffer
+ PSDevice pDevice,
+ BYTE byRequest,
+ WORD wValue,
+ WORD wIndex,
+ WORD wLength,
+ PBYTE pbyBuffer
);
NTSTATUS
PIPEnsControlOutAsyn(
- IN PSDevice pDevice,
- IN BYTE byRequest,
- IN WORD wValue,
- IN WORD wIndex,
- IN WORD wLength,
- IN PBYTE pbyBuffer
+ PSDevice pDevice,
+ BYTE byRequest,
+ WORD wValue,
+ WORD wIndex,
+ WORD wLength,
+ PBYTE pbyBuffer
);
NTSTATUS
PIPEnsControlIn(
- IN PSDevice pDevice,
- IN BYTE byRequest,
- IN WORD wValue,
- IN WORD wIndex,
- IN WORD wLength,
- IN OUT PBYTE pbyBuffer
+ PSDevice pDevice,
+ BYTE byRequest,
+ WORD wValue,
+ WORD wIndex,
+ WORD wLength,
+ PBYTE pbyBuffer
);
@@ -78,22 +78,19 @@ PIPEnsControlIn(
NTSTATUS
PIPEnsInterruptRead(
- IN PSDevice pDevice
+ PSDevice pDevice
);
NTSTATUS
PIPEnsBulkInUsbRead(
- IN PSDevice pDevice,
- IN PRCB pRCB
+ PSDevice pDevice,
+ PRCB pRCB
);
NTSTATUS
PIPEnsSendBulkOut(
- IN PSDevice pDevice,
- IN PUSB_SEND_CONTEXT pContext
+ PSDevice pDevice,
+ PUSB_SEND_CONTEXT pContext
);
-#endif // __USBPIPE_H__
-
-
-
+#endif /* __USBPIPE_H__ */
diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c
index 51b2dcfbab9..72e21b6f0e8 100644
--- a/drivers/staging/vt6656/wcmd.c
+++ b/drivers/staging/vt6656/wcmd.c
@@ -69,21 +69,21 @@ static int msglevel =MSG_LEVEL_INFO;
/*--------------------- Static Functions --------------------------*/
static
-VOID
+void
s_vProbeChannel(
- IN PSDevice pDevice
+ PSDevice pDevice
);
static
PSTxMgmtPacket
s_MgrMakeProbeRequest(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PBYTE pScanBSSID,
- IN PWLAN_IE_SSID pSSID,
- IN PWLAN_IE_SUPP_RATES pCurrRates,
- IN PWLAN_IE_SUPP_RATES pCurrExtSuppRates
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PBYTE pScanBSSID,
+ PWLAN_IE_SSID pSSID,
+ PWLAN_IE_SUPP_RATES pCurrRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates
);
@@ -94,18 +94,12 @@ s_bCommandComplete (
);
-static
-BOOL s_bClearBSSID_SCAN (
- IN HANDLE hDeviceContext
- );
+static BOOL s_bClearBSSID_SCAN(void *hDeviceContext);
/*--------------------- Export Variables --------------------------*/
-
/*--------------------- Export Functions --------------------------*/
-
-
/*
* Description:
* Stop AdHoc beacon during scan process
@@ -119,6 +113,7 @@ BOOL s_bClearBSSID_SCAN (
* Return Value: none
*
*/
+
static
void
vAdHocBeaconStop(PSDevice pDevice)
@@ -210,9 +205,9 @@ vAdHocBeaconRestart(PSDevice pDevice)
-*/
static
-VOID
+void
s_vProbeChannel(
- IN PSDevice pDevice
+ PSDevice pDevice
)
{
//1M, 2M, 5M, 11M, 18M, 24M, 36M, 54M
@@ -224,7 +219,7 @@ s_vProbeChannel(
PBYTE pbyRate;
PSTxMgmtPacket pTxPacket;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- UINT ii;
+ unsigned int ii;
if (pDevice->byBBType == BB_TYPE_11A) {
@@ -275,12 +270,12 @@ s_vProbeChannel(
PSTxMgmtPacket
s_MgrMakeProbeRequest(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PBYTE pScanBSSID,
- IN PWLAN_IE_SSID pSSID,
- IN PWLAN_IE_SUPP_RATES pCurrRates,
- IN PWLAN_IE_SUPP_RATES pCurrExtSuppRates
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PBYTE pScanBSSID,
+ PWLAN_IE_SSID pSSID,
+ PWLAN_IE_SUPP_RATES pCurrRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates
)
{
@@ -321,41 +316,27 @@ s_MgrMakeProbeRequest(
return pTxPacket;
}
-
-
-
-
-VOID
-vCommandTimerWait(
- IN HANDLE hDeviceContext,
- IN UINT MSecond
- )
+void vCommandTimerWait(void *hDeviceContext, unsigned int MSecond)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
init_timer(&pDevice->sTimerCommand);
- pDevice->sTimerCommand.data = (ULONG)pDevice;
+ pDevice->sTimerCommand.data = (unsigned long)pDevice;
pDevice->sTimerCommand.function = (TimerFunction)vRunCommand;
// RUN_AT :1 msec ~= (HZ/1024)
- pDevice->sTimerCommand.expires = (UINT)RUN_AT((MSecond * HZ) >> 10);
+ pDevice->sTimerCommand.expires = (unsigned int)RUN_AT((MSecond * HZ) >> 10);
add_timer(&pDevice->sTimerCommand);
return;
}
-
-
-
-VOID
-vRunCommand(
- IN HANDLE hDeviceContext
- )
+void vRunCommand(void *hDeviceContext)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
PWLAN_IE_SSID pItemSSID;
PWLAN_IE_SSID pItemSSIDCurr;
CMD_STATUS Status;
- UINT ii;
+ unsigned int ii;
BYTE byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
struct sk_buff *skb;
BYTE byData;
@@ -435,7 +416,8 @@ vRunCommand(
pMgmt->abyScanBSSID[5] = 0xFF;
pItemSSID->byElementID = WLAN_EID_SSID;
// clear bssid list
- // BSSvClearBSSList((HANDLE)pDevice, pDevice->bLinkPass);
+ /* BSSvClearBSSList((void *) pDevice,
+ pDevice->bLinkPass); */
pMgmt->eScanState = WMAC_IS_SCANNING;
pDevice->byScanBBType = pDevice->byBBType; //lucas
pDevice->bStopDataPkt = TRUE;
@@ -480,11 +462,11 @@ vRunCommand(
(pMgmt->uScanChannel < CB_MAX_CHANNEL_24G)) {
s_vProbeChannel(pDevice);
spin_unlock_irq(&pDevice->lock);
- vCommandTimerWait((HANDLE)pDevice, 100);
+ vCommandTimerWait((void *) pDevice, 100);
return;
} else {
spin_unlock_irq(&pDevice->lock);
- vCommandTimerWait((HANDLE)pDevice, WCMD_PASSIVE_SCAN_TIME);
+ vCommandTimerWait((void *) pDevice, WCMD_PASSIVE_SCAN_TIME);
return;
}
@@ -552,7 +534,11 @@ vRunCommand(
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Send Disassociation Packet..\n");
// reason = 8 : disassoc because sta has left
- vMgrDisassocBeginSta((HANDLE)pDevice, pMgmt, pMgmt->abyCurrBSSID, (8), &Status);
+ vMgrDisassocBeginSta((void *) pDevice,
+ pMgmt,
+ pMgmt->abyCurrBSSID,
+ (8),
+ &Status);
pDevice->bLinkPass = FALSE;
ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW);
// unlock command busy
@@ -614,22 +600,26 @@ vRunCommand(
// set initial state
pMgmt->eCurrState = WMAC_STATE_IDLE;
pMgmt->eCurrMode = WMAC_MODE_STANDBY;
- PSvDisablePowerSaving((HANDLE)pDevice);
+ PSvDisablePowerSaving((void *) pDevice);
BSSvClearNodeDBTable(pDevice, 0);
- vMgrJoinBSSBegin((HANDLE)pDevice, &Status);
+ vMgrJoinBSSBegin((void *) pDevice, &Status);
// if Infra mode
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED)) {
// Call mgr to begin the deauthentication
// reason = (3) beacuse sta has left ESS
- if (pMgmt->eCurrState>= WMAC_STATE_AUTH) {
- vMgrDeAuthenBeginSta((HANDLE)pDevice, pMgmt, pMgmt->abyCurrBSSID, (3), &Status);
- }
+ if (pMgmt->eCurrState >= WMAC_STATE_AUTH) {
+ vMgrDeAuthenBeginSta((void *)pDevice,
+ pMgmt,
+ pMgmt->abyCurrBSSID,
+ (3),
+ &Status);
+ }
// Call mgr to begin the authentication
- vMgrAuthenBeginSta((HANDLE)pDevice, pMgmt, &Status);
+ vMgrAuthenBeginSta((void *) pDevice, pMgmt, &Status);
if (Status == CMD_STATUS_SUCCESS) {
pDevice->byLinkWaitCount = 0;
pDevice->eCommandState = WLAN_AUTHENTICATE_WAIT;
- vCommandTimerWait((HANDLE)pDevice, AUTHENTICATE_TIMEOUT);
+ vCommandTimerWait((void *) pDevice, AUTHENTICATE_TIMEOUT);
spin_unlock_irq(&pDevice->lock);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Set eCommandState = WLAN_AUTHENTICATE_WAIT\n");
return;
@@ -648,10 +638,12 @@ vRunCommand(
}
else {
// start own IBSS
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "CreateOwn IBSS by CurrMode = IBSS_STA \n");
- vMgrCreateOwnIBSS((HANDLE)pDevice, &Status);
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "CreateOwn IBSS by CurrMode = IBSS_STA\n");
+ vMgrCreateOwnIBSS((void *) pDevice, &Status);
if (Status != CMD_STATUS_SUCCESS){
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " WLAN_CMD_IBSS_CREATE fail ! \n");
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "WLAN_CMD_IBSS_CREATE fail!\n");
};
BSSvAddMulticastNode(pDevice);
}
@@ -662,10 +654,12 @@ vRunCommand(
if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA ||
pMgmt->eConfigMode == WMAC_CONFIG_AUTO) {
// start own IBSS
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "CreateOwn IBSS by CurrMode = STANDBY \n");
- vMgrCreateOwnIBSS((HANDLE)pDevice, &Status);
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "CreateOwn IBSS by CurrMode = STANDBY\n");
+ vMgrCreateOwnIBSS((void *) pDevice, &Status);
if (Status != CMD_STATUS_SUCCESS){
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" WLAN_CMD_IBSS_CREATE fail ! \n");
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "WLAN_CMD_IBSS_CREATE fail!\n");
};
BSSvAddMulticastNode(pDevice);
s_bClearBSSID_SCAN(pDevice);
@@ -701,12 +695,12 @@ vRunCommand(
pDevice->byLinkWaitCount = 0;
// Call mgr to begin the association
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCurrState == WMAC_STATE_AUTH\n");
- vMgrAssocBeginSta((HANDLE)pDevice, pMgmt, &Status);
+ vMgrAssocBeginSta((void *) pDevice, pMgmt, &Status);
if (Status == CMD_STATUS_SUCCESS) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState = WLAN_ASSOCIATE_WAIT\n");
pDevice->byLinkWaitCount = 0;
pDevice->eCommandState = WLAN_ASSOCIATE_WAIT;
- vCommandTimerWait((HANDLE)pDevice, ASSOCIATE_TIMEOUT);
+ vCommandTimerWait((void *) pDevice, ASSOCIATE_TIMEOUT);
spin_unlock_irq(&pDevice->lock);
return;
}
@@ -718,7 +712,7 @@ vRunCommand(
pDevice->byLinkWaitCount ++;
printk("WLAN_AUTHENTICATE_WAIT:wait %d times!!\n",pDevice->byLinkWaitCount);
spin_unlock_irq(&pDevice->lock);
- vCommandTimerWait((HANDLE)pDevice, AUTHENTICATE_TIMEOUT/2);
+ vCommandTimerWait((void *) pDevice, AUTHENTICATE_TIMEOUT/2);
return;
}
pDevice->byLinkWaitCount = 0;
@@ -742,7 +736,8 @@ vRunCommand(
if (pMgmt->eCurrState == WMAC_STATE_ASSOC) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCurrState == WMAC_STATE_ASSOC\n");
if (pDevice->ePSMode != WMAC_POWER_CAM) {
- PSvEnablePowerSaving((HANDLE)pDevice, pMgmt->wListenInterval);
+ PSvEnablePowerSaving((void *) pDevice,
+ pMgmt->wListenInterval);
}
/*
if (pMgmt->eAuthenMode >= WMAC_AUTH_WPA) {
@@ -765,7 +760,7 @@ vRunCommand(
// printk("Re-initial TxDataTimer****\n");
del_timer(&pDevice->sTimerTxData);
init_timer(&pDevice->sTimerTxData);
- pDevice->sTimerTxData.data = (ULONG)pDevice;
+ pDevice->sTimerTxData.data = (unsigned long) pDevice;
pDevice->sTimerTxData.function = (TimerFunction)BSSvSecondTxData;
pDevice->sTimerTxData.expires = RUN_AT(10*HZ); //10s callback
pDevice->fTxDataInSleep = FALSE;
@@ -786,7 +781,7 @@ vRunCommand(
pDevice->byLinkWaitCount ++;
printk("WLAN_ASSOCIATE_WAIT:wait %d times!!\n",pDevice->byLinkWaitCount);
spin_unlock_irq(&pDevice->lock);
- vCommandTimerWait((HANDLE)pDevice, ASSOCIATE_TIMEOUT/2);
+ vCommandTimerWait((void *) pDevice, ASSOCIATE_TIMEOUT/2);
return;
}
pDevice->byLinkWaitCount = 0;
@@ -823,9 +818,10 @@ vRunCommand(
pMgmt->eCurrState = WMAC_STATE_IDLE;
pDevice->bFixRate = FALSE;
- vMgrCreateOwnIBSS((HANDLE)pDevice, &Status);
- if (Status != CMD_STATUS_SUCCESS){
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " vMgrCreateOwnIBSS fail ! \n");
+ vMgrCreateOwnIBSS((void *) pDevice, &Status);
+ if (Status != CMD_STATUS_SUCCESS) {
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "vMgrCreateOwnIBSS fail!\n");
};
// alway turn off unicast bit
MACvRegBitsOff(pDevice, MAC_REG_RCR, RCR_UNICAST);
@@ -948,7 +944,11 @@ vRunCommand(
if (pDevice->bLinkPass == TRUE) {
// reason = 8 : disassoc because sta has left
- vMgrDisassocBeginSta((HANDLE)pDevice, pMgmt, pMgmt->abyCurrBSSID, (8), &Status);
+ vMgrDisassocBeginSta((void *) pDevice,
+ pMgmt,
+ pMgmt->abyCurrBSSID,
+ (8),
+ &Status);
pDevice->bLinkPass = FALSE;
// unlock command busy
pMgmt->eCurrState = WMAC_STATE_IDLE;
@@ -1185,18 +1185,15 @@ s_bCommandComplete (
break;
}
-
- vCommandTimerWait((HANDLE)pDevice, 0);
+ vCommandTimerWait((void *) pDevice, 0);
}
return TRUE;
}
-BOOL bScheduleCommand (
- IN HANDLE hDeviceContext,
- IN CMD_CODE eCommand,
- IN PBYTE pbyItem0
- )
+BOOL bScheduleCommand(void *hDeviceContext,
+ CMD_CODE eCommand,
+ PBYTE pbyItem0)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -1264,14 +1261,11 @@ BOOL bScheduleCommand (
* Return Value: TRUE if success; otherwise FALSE
*
*/
-static
-BOOL s_bClearBSSID_SCAN (
- IN HANDLE hDeviceContext
- )
+static BOOL s_bClearBSSID_SCAN(void *hDeviceContext)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
- UINT uCmdDequeueIdx = pDevice->uCmdDequeueIdx;
- UINT ii;
+ unsigned int uCmdDequeueIdx = pDevice->uCmdDequeueIdx;
+ unsigned int ii;
if ((pDevice->cbFreeCmdQueue < CMD_Q_SIZE) && (uCmdDequeueIdx != pDevice->uCmdEnqueueIdx)) {
for (ii = 0; ii < (CMD_Q_SIZE - pDevice->cbFreeCmdQueue); ii ++) {
@@ -1287,10 +1281,7 @@ BOOL s_bClearBSSID_SCAN (
//mike add:reset command timer
-VOID
-vResetCommandTimer(
- IN HANDLE hDeviceContext
- )
+void vResetCommandTimer(void *hDeviceContext)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -1298,7 +1289,7 @@ vResetCommandTimer(
del_timer(&pDevice->sTimerCommand);
//init timer
init_timer(&pDevice->sTimerCommand);
- pDevice->sTimerCommand.data = (ULONG)pDevice;
+ pDevice->sTimerCommand.data = (unsigned long)pDevice;
pDevice->sTimerCommand.function = (TimerFunction)vRunCommand;
pDevice->sTimerCommand.expires = RUN_AT(HZ);
pDevice->cbFreeCmdQueue = CMD_Q_SIZE;
@@ -1311,10 +1302,7 @@ vResetCommandTimer(
//2007-0115-08<Add>by MikeLiu
#ifdef TxInSleep
-VOID
-BSSvSecondTxData(
- IN HANDLE hDeviceContext
- )
+void BSSvSecondTxData(void *hDeviceContext)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
diff --git a/drivers/staging/vt6656/wcmd.h b/drivers/staging/vt6656/wcmd.h
index 90d672b462b..09c4411c689 100644
--- a/drivers/staging/vt6656/wcmd.h
+++ b/drivers/staging/vt6656/wcmd.h
@@ -105,46 +105,32 @@ typedef enum tagCMD_STATE {
WLAN_CMD_IDLE
} CMD_STATE, *PCMD_STATE;
-
-
/*--------------------- Export Classes ----------------------------*/
/*--------------------- Export Variables --------------------------*/
-
/*--------------------- Export Types ------------------------------*/
-
/*--------------------- Export Functions --------------------------*/
-VOID
-vResetCommandTimer(
- IN HANDLE hDeviceContext
- );
-BOOL
-bScheduleCommand(
- IN HANDLE hDeviceContext,
- IN CMD_CODE eCommand,
- IN PBYTE pbyItem0
- );
+void vResetCommandTimer(void *hDeviceContext);
+
+BOOL bScheduleCommand(void *hDeviceContext,
+ CMD_CODE eCommand,
+ PBYTE pbyItem0);
+
+void vRunCommand(void *hDeviceContext);
-VOID
-vRunCommand(
- IN HANDLE hDeviceContext
- );
/*
-VOID
+void
WCMDvCommandThread(
- PVOID Context
+ void * Context
);
*/
//2007-0115-09<Add>by MikeLiu
#ifdef TxInSleep
-VOID
-BSSvSecondTxData(
- IN HANDLE hDeviceContext
- );
+void BSSvSecondTxData(void *hDeviceContext);
#endif
-#endif //__WCMD_H__
+#endif /* __WCMD_H__ */
diff --git a/drivers/staging/vt6656/wctl.c b/drivers/staging/vt6656/wctl.c
index 40986da1e4a..857ce0bc00a 100644
--- a/drivers/staging/vt6656/wctl.c
+++ b/drivers/staging/vt6656/wctl.c
@@ -69,8 +69,8 @@
BOOL WCTLbIsDuplicate (PSCache pCache, PS802_11Header pMACHeader)
{
- UINT uIndex;
- UINT ii;
+ unsigned int uIndex;
+ unsigned int ii;
PSCacheEntry pCacheEntry;
if (IS_FC_RETRY(pMACHeader)) {
@@ -91,7 +91,7 @@ BOOL WCTLbIsDuplicate (PSCache pCache, PS802_11Header pMACHeader)
/* Not fount in cache - insert */
pCacheEntry = &pCache->asCacheEntry[pCache->uInPtr];
pCacheEntry->wFmSequence = pMACHeader->wSeqCtl;
- memcpy(&(pCacheEntry->abyAddr2[0]), &(pMACHeader->abyAddr2[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pCacheEntry->abyAddr2[0]), &(pMACHeader->abyAddr2[0]), ETH_ALEN);
pCacheEntry->wFrameCtl = pMACHeader->wFrameCtl;
ADD_ONE_WITH_WRAP_AROUND(pCache->uInPtr, DUPLICATE_RX_CACHE_LENGTH);
return FALSE;
@@ -111,9 +111,9 @@ BOOL WCTLbIsDuplicate (PSCache pCache, PS802_11Header pMACHeader)
* Return Value: index number in Defragment Database
*
*/
-UINT WCTLuSearchDFCB (PSDevice pDevice, PS802_11Header pMACHeader)
+unsigned int WCTLuSearchDFCB(PSDevice pDevice, PS802_11Header pMACHeader)
{
-UINT ii;
+ unsigned int ii;
for(ii=0;ii<pDevice->cbDFCB;ii++) {
if ((pDevice->sRxDFCB[ii].bInUse == TRUE) &&
@@ -141,9 +141,9 @@ UINT ii;
* Return Value: index number in Defragment Database
*
*/
-UINT WCTLuInsertDFCB (PSDevice pDevice, PS802_11Header pMACHeader)
+unsigned int WCTLuInsertDFCB(PSDevice pDevice, PS802_11Header pMACHeader)
{
-UINT ii;
+ unsigned int ii;
if (pDevice->cbFreeDFCB == 0)
return(pDevice->cbDFCB);
@@ -154,7 +154,9 @@ UINT ii;
pDevice->sRxDFCB[ii].bInUse = TRUE;
pDevice->sRxDFCB[ii].wSequence = (pMACHeader->wSeqCtl >> 4);
pDevice->sRxDFCB[ii].wFragNum = (pMACHeader->wSeqCtl & 0x000F);
- memcpy(&(pDevice->sRxDFCB[ii].abyAddr2[0]), &(pMACHeader->abyAddr2[0]), U_ETHER_ADDR_LEN);
+ memcpy(&(pDevice->sRxDFCB[ii].abyAddr2[0]),
+ &(pMACHeader->abyAddr2[0]),
+ ETH_ALEN);
return(ii);
}
}
@@ -178,9 +180,10 @@ UINT ii;
* Return Value: TRUE if it is valid fragment packet and we have resource to defragment; otherwise FALSE
*
*/
-BOOL WCTLbHandleFragment (PSDevice pDevice, PS802_11Header pMACHeader, UINT cbFrameLength, BOOL bWEP, BOOL bExtIV)
+BOOL WCTLbHandleFragment(PSDevice pDevice, PS802_11Header pMACHeader,
+ unsigned int cbFrameLength, BOOL bWEP, BOOL bExtIV)
{
-UINT uHeaderSize;
+unsigned int uHeaderSize;
if (bWEP == TRUE) {
diff --git a/drivers/staging/vt6656/wctl.h b/drivers/staging/vt6656/wctl.h
index a1ac4791bfd..7270af68c89 100644
--- a/drivers/staging/vt6656/wctl.h
+++ b/drivers/staging/vt6656/wctl.h
@@ -90,7 +90,6 @@
(uVar)++; \
}
-
/*--------------------- Export Classes ----------------------------*/
/*--------------------- Export Variables --------------------------*/
@@ -98,11 +97,9 @@
/*--------------------- Export Functions --------------------------*/
BOOL WCTLbIsDuplicate(PSCache pCache, PS802_11Header pMACHeader);
-BOOL WCTLbHandleFragment(PSDevice pDevice, PS802_11Header pMACHeader, UINT cbFrameLength, BOOL bWEP, BOOL bExtIV);
-UINT WCTLuSearchDFCB(PSDevice pDevice, PS802_11Header pMACHeader);
-UINT WCTLuInsertDFCB(PSDevice pDevice, PS802_11Header pMACHeader);
-
-#endif // __WCTL_H__
-
-
+BOOL WCTLbHandleFragment(PSDevice pDevice, PS802_11Header pMACHeader,
+ unsigned int cbFrameLength, BOOL bWEP, BOOL bExtIV);
+unsigned int WCTLuSearchDFCB(PSDevice pDevice, PS802_11Header pMACHeader);
+unsigned int WCTLuInsertDFCB(PSDevice pDevice, PS802_11Header pMACHeader);
+#endif /* __WCTL_H__ */
diff --git a/drivers/staging/vt6656/wmgr.c b/drivers/staging/vt6656/wmgr.c
index 330aea69d23..93c15f0580f 100644
--- a/drivers/staging/vt6656/wmgr.c
+++ b/drivers/staging/vt6656/wmgr.c
@@ -94,160 +94,160 @@ static int msglevel =MSG_LEVEL_INFO;
/*--------------------- Static Functions --------------------------*/
//2008-0730-01<Add>by MikeLiu
static BOOL ChannelExceedZoneType(
- IN PSDevice pDevice,
- IN BYTE byCurrChannel
+ PSDevice pDevice,
+ BYTE byCurrChannel
);
// Association/diassociation functions
static
PSTxMgmtPacket
s_MgrMakeAssocRequest(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PBYTE pDAddr,
- IN WORD wCurrCapInfo,
- IN WORD wListenInterval,
- IN PWLAN_IE_SSID pCurrSSID,
- IN PWLAN_IE_SUPP_RATES pCurrRates,
- IN PWLAN_IE_SUPP_RATES pCurrExtSuppRates
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PBYTE pDAddr,
+ WORD wCurrCapInfo,
+ WORD wListenInterval,
+ PWLAN_IE_SSID pCurrSSID,
+ PWLAN_IE_SUPP_RATES pCurrRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates
);
static
-VOID
+void
s_vMgrRxAssocRequest(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket,
- IN UINT uNodeIndex
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket,
+ unsigned int uNodeIndex
);
static
PSTxMgmtPacket
s_MgrMakeReAssocRequest(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PBYTE pDAddr,
- IN WORD wCurrCapInfo,
- IN WORD wListenInterval,
- IN PWLAN_IE_SSID pCurrSSID,
- IN PWLAN_IE_SUPP_RATES pCurrRates,
- IN PWLAN_IE_SUPP_RATES pCurrExtSuppRates
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PBYTE pDAddr,
+ WORD wCurrCapInfo,
+ WORD wListenInterval,
+ PWLAN_IE_SSID pCurrSSID,
+ PWLAN_IE_SUPP_RATES pCurrRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates
);
static
-VOID
+void
s_vMgrRxAssocResponse(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket,
- IN BOOL bReAssocType
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket,
+ BOOL bReAssocType
);
static
-VOID
+void
s_vMgrRxDisassociation(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket
);
// Authentication/deauthen functions
static
-VOID
+void
s_vMgrRxAuthenSequence_1(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PWLAN_FR_AUTHEN pFrame
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PWLAN_FR_AUTHEN pFrame
);
static
-VOID
+void
s_vMgrRxAuthenSequence_2(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PWLAN_FR_AUTHEN pFrame
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PWLAN_FR_AUTHEN pFrame
);
static
-VOID
+void
s_vMgrRxAuthenSequence_3(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PWLAN_FR_AUTHEN pFrame
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PWLAN_FR_AUTHEN pFrame
);
static
-VOID
+void
s_vMgrRxAuthenSequence_4(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PWLAN_FR_AUTHEN pFrame
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PWLAN_FR_AUTHEN pFrame
);
static
-VOID
+void
s_vMgrRxAuthentication(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket
);
static
-VOID
+void
s_vMgrRxDeauthentication(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket
);
// Scan functions
// probe request/response functions
static
-VOID
+void
s_vMgrRxProbeRequest(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket
);
static
-VOID
+void
s_vMgrRxProbeResponse(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket
);
// beacon functions
static
-VOID
+void
s_vMgrRxBeacon(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket,
- IN BOOL bInScan
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket,
+ BOOL bInScan
);
static
-VOID
+void
s_vMgrFormatTIM(
- IN PSMgmtObject pMgmt,
- IN PWLAN_IE_TIM pTIM
+ PSMgmtObject pMgmt,
+ PWLAN_IE_TIM pTIM
);
static
PSTxMgmtPacket
s_MgrMakeBeacon(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN WORD wCurrCapInfo,
- IN WORD wCurrBeaconPeriod,
- IN UINT uCurrChannel,
- IN WORD wCurrATIMWinodw,
- IN PWLAN_IE_SSID pCurrSSID,
- IN PBYTE pCurrBSSID,
- IN PWLAN_IE_SUPP_RATES pCurrSuppRates,
- IN PWLAN_IE_SUPP_RATES pCurrExtSuppRates
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ WORD wCurrCapInfo,
+ WORD wCurrBeaconPeriod,
+ unsigned int uCurrChannel,
+ WORD wCurrATIMWinodw,
+ PWLAN_IE_SSID pCurrSSID,
+ PBYTE pCurrBSSID,
+ PWLAN_IE_SUPP_RATES pCurrSuppRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates
);
@@ -255,88 +255,84 @@ s_MgrMakeBeacon(
static
PSTxMgmtPacket
s_MgrMakeAssocResponse(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN WORD wCurrCapInfo,
- IN WORD wAssocStatus,
- IN WORD wAssocAID,
- IN PBYTE pDstAddr,
- IN PWLAN_IE_SUPP_RATES pCurrSuppRates,
- IN PWLAN_IE_SUPP_RATES pCurrExtSuppRates
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ WORD wCurrCapInfo,
+ WORD wAssocStatus,
+ WORD wAssocAID,
+ PBYTE pDstAddr,
+ PWLAN_IE_SUPP_RATES pCurrSuppRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates
);
// ReAssociation response
static
PSTxMgmtPacket
s_MgrMakeReAssocResponse(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN WORD wCurrCapInfo,
- IN WORD wAssocStatus,
- IN WORD wAssocAID,
- IN PBYTE pDstAddr,
- IN PWLAN_IE_SUPP_RATES pCurrSuppRates,
- IN PWLAN_IE_SUPP_RATES pCurrExtSuppRates
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ WORD wCurrCapInfo,
+ WORD wAssocStatus,
+ WORD wAssocAID,
+ PBYTE pDstAddr,
+ PWLAN_IE_SUPP_RATES pCurrSuppRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates
);
// Probe response
static
PSTxMgmtPacket
s_MgrMakeProbeResponse(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN WORD wCurrCapInfo,
- IN WORD wCurrBeaconPeriod,
- IN UINT uCurrChannel,
- IN WORD wCurrATIMWinodw,
- IN PBYTE pDstAddr,
- IN PWLAN_IE_SSID pCurrSSID,
- IN PBYTE pCurrBSSID,
- IN PWLAN_IE_SUPP_RATES pCurrSuppRates,
- IN PWLAN_IE_SUPP_RATES pCurrExtSuppRates,
- IN BYTE byPHYType
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ WORD wCurrCapInfo,
+ WORD wCurrBeaconPeriod,
+ unsigned int uCurrChannel,
+ WORD wCurrATIMWinodw,
+ PBYTE pDstAddr,
+ PWLAN_IE_SSID pCurrSSID,
+ PBYTE pCurrBSSID,
+ PWLAN_IE_SUPP_RATES pCurrSuppRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates,
+ BYTE byPHYType
);
// received status
static
-VOID
+void
s_vMgrLogStatus(
- IN PSMgmtObject pMgmt,
- IN WORD wStatus
+ PSMgmtObject pMgmt,
+ WORD wStatus
);
static
-VOID
+void
s_vMgrSynchBSS (
- IN PSDevice pDevice,
- IN UINT uBSSMode,
- IN PKnownBSS pCurr,
- OUT PCMD_STATUS pStatus
+ PSDevice pDevice,
+ unsigned int uBSSMode,
+ PKnownBSS pCurr,
+ PCMD_STATUS pStatus
);
static BOOL
s_bCipherMatch (
- IN PKnownBSS pBSSNode,
- IN NDIS_802_11_ENCRYPTION_STATUS EncStatus,
- OUT PBYTE pbyCCSPK,
- OUT PBYTE pbyCCSGK
+ PKnownBSS pBSSNode,
+ NDIS_802_11_ENCRYPTION_STATUS EncStatus,
+ PBYTE pbyCCSPK,
+ PBYTE pbyCCSGK
);
- static VOID Encyption_Rebuild(
- IN PSDevice pDevice,
- IN PKnownBSS pCurr
+ static void Encyption_Rebuild(
+ PSDevice pDevice,
+ PKnownBSS pCurr
);
-
-
/*--------------------- Export Variables --------------------------*/
-
/*--------------------- Export Functions --------------------------*/
-
/*+
*
* Routine Description:
@@ -347,10 +343,7 @@ s_bCipherMatch (
*
-*/
-VOID
-vMgrObjectInit(
- IN HANDLE hDeviceContext
- )
+void vMgrObjectInit(void *hDeviceContext)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
@@ -368,22 +361,22 @@ vMgrObjectInit(
pMgmt->byCSSPK = KEY_CTL_NONE;
pMgmt->byCSSGK = KEY_CTL_NONE;
pMgmt->wIBSSBeaconPeriod = DEFAULT_IBSS_BI;
- BSSvClearBSSList((HANDLE)pDevice, FALSE);
+ BSSvClearBSSList((void *) pDevice, FALSE);
init_timer(&pMgmt->sTimerSecondCallback);
- pMgmt->sTimerSecondCallback.data = (ULONG)pDevice;
+ pMgmt->sTimerSecondCallback.data = (unsigned long)pDevice;
pMgmt->sTimerSecondCallback.function = (TimerFunction)BSSvSecondCallBack;
pMgmt->sTimerSecondCallback.expires = RUN_AT(HZ);
init_timer(&pDevice->sTimerCommand);
- pDevice->sTimerCommand.data = (ULONG)pDevice;
+ pDevice->sTimerCommand.data = (unsigned long)pDevice;
pDevice->sTimerCommand.function = (TimerFunction)vRunCommand;
pDevice->sTimerCommand.expires = RUN_AT(HZ);
//2007-0115-10<Add>by MikeLiu
#ifdef TxInSleep
init_timer(&pDevice->sTimerTxData);
- pDevice->sTimerTxData.data = (ULONG)pDevice;
+ pDevice->sTimerTxData.data = (unsigned long)pDevice;
pDevice->sTimerTxData.function = (TimerFunction)BSSvSecondTxData;
pDevice->sTimerTxData.expires = RUN_AT(10*HZ); //10s callback
pDevice->fTxDataInSleep = FALSE;
@@ -401,8 +394,6 @@ vMgrObjectInit(
return;
}
-
-
/*+
*
* Routine Description:
@@ -414,13 +405,9 @@ vMgrObjectInit(
*
-*/
-
-VOID
-vMgrAssocBeginSta(
- IN HANDLE hDeviceContext,
- IN PSMgmtObject pMgmt,
- OUT PCMD_STATUS pStatus
- )
+void vMgrAssocBeginSta(void *hDeviceContext,
+ PSMgmtObject pMgmt,
+ PCMD_STATUS pStatus)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSTxMgmtPacket pTxPacket;
@@ -491,12 +478,9 @@ vMgrAssocBeginSta(
*
-*/
-VOID
-vMgrReAssocBeginSta(
- IN HANDLE hDeviceContext,
- IN PSMgmtObject pMgmt,
- OUT PCMD_STATUS pStatus
- )
+void vMgrReAssocBeginSta(void *hDeviceContext,
+ PSMgmtObject pMgmt,
+ PCMD_STATUS pStatus)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSTxMgmtPacket pTxPacket;
@@ -570,14 +554,11 @@ vMgrReAssocBeginSta(
*
-*/
-VOID
-vMgrDisassocBeginSta(
- IN HANDLE hDeviceContext,
- IN PSMgmtObject pMgmt,
- IN PBYTE abyDestAddress,
- IN WORD wReason,
- OUT PCMD_STATUS pStatus
- )
+void vMgrDisassocBeginSta(void *hDeviceContext,
+ PSMgmtObject pMgmt,
+ PBYTE abyDestAddress,
+ WORD wReason,
+ PCMD_STATUS pStatus)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSTxMgmtPacket pTxPacket = NULL;
@@ -633,12 +614,12 @@ vMgrDisassocBeginSta(
-*/
static
-VOID
+void
s_vMgrRxAssocRequest(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket,
- IN UINT uNodeIndex
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket,
+ unsigned int uNodeIndex
)
{
WLAN_FR_ASSOCREQ sFrame;
@@ -646,7 +627,7 @@ s_vMgrRxAssocRequest(
PSTxMgmtPacket pTxPacket;
WORD wAssocStatus = 0;
WORD wAssocAID = 0;
- UINT uRateLen = WLAN_RATES_MAXLEN;
+ unsigned int uRateLen = WLAN_RATES_MAXLEN;
BYTE abyCurrSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
BYTE abyCurrExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
@@ -691,7 +672,7 @@ s_vMgrRxAssocRequest(
}
- RATEvParseMaxRate((PVOID)pDevice,
+ RATEvParseMaxRate((void *)pDevice,
(PWLAN_IE_SUPP_RATES)abyCurrSuppRates,
(PWLAN_IE_SUPP_RATES)abyCurrExtSuppRates,
FALSE, // do not change our basic rate
@@ -789,12 +770,12 @@ s_vMgrRxAssocRequest(
-*/
static
-VOID
+void
s_vMgrRxReAssocRequest(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket,
- IN UINT uNodeIndex
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket,
+ unsigned int uNodeIndex
)
{
WLAN_FR_REASSOCREQ sFrame;
@@ -802,7 +783,7 @@ s_vMgrRxReAssocRequest(
PSTxMgmtPacket pTxPacket;
WORD wAssocStatus = 0;
WORD wAssocAID = 0;
- UINT uRateLen = WLAN_RATES_MAXLEN;
+ unsigned int uRateLen = WLAN_RATES_MAXLEN;
BYTE abyCurrSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
BYTE abyCurrExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
@@ -844,7 +825,7 @@ s_vMgrRxReAssocRequest(
}
- RATEvParseMaxRate((PVOID)pDevice,
+ RATEvParseMaxRate((void *)pDevice,
(PWLAN_IE_SUPP_RATES)abyCurrSuppRates,
(PWLAN_IE_SUPP_RATES)abyCurrExtSuppRates,
FALSE, // do not change our basic rate
@@ -936,12 +917,12 @@ s_vMgrRxReAssocRequest(
-*/
static
-VOID
+void
s_vMgrRxAssocResponse(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket,
- IN BOOL bReAssocType
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket,
+ BOOL bReAssocType
)
{
WLAN_FR_ASSOCRESP sFrame;
@@ -958,12 +939,12 @@ s_vMgrRxAssocResponse(
sFrame.pBuf = (PBYTE)pRxPacket->p80211Header;
// decode the frame
vMgrDecodeAssocResponse(&sFrame);
- if ((sFrame.pwCapInfo == 0) ||
- (sFrame.pwStatus == 0) ||
- (sFrame.pwAid == 0) ||
- (sFrame.pSuppRates == 0)){
- DBG_PORT80(0xCC);
- return;
+ if ((sFrame.pwCapInfo == NULL)
+ || (sFrame.pwStatus == NULL)
+ || (sFrame.pwAid == NULL)
+ || (sFrame.pSuppRates == NULL)) {
+ DBG_PORT80(0xCC);
+ return;
};
pMgmt->sAssocInfo.AssocInfo.ResponseFixedIEs.Capabilities = *(sFrame.pwCapInfo);
@@ -987,7 +968,10 @@ s_vMgrRxAssocResponse(
};
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "Association Successful, AID=%d.\n", pMgmt->wCurrAID & ~(BIT14|BIT15));
pMgmt->eCurrState = WMAC_STATE_ASSOC;
- BSSvUpdateAPNode((HANDLE)pDevice, sFrame.pwCapInfo, sFrame.pSuppRates, sFrame.pExtSuppRates);
+ BSSvUpdateAPNode((void *) pDevice,
+ sFrame.pwCapInfo,
+ sFrame.pSuppRates,
+ sFrame.pExtSuppRates);
pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "Link with AP(SSID): %s\n", pItemSSID->abySSID);
pDevice->bLinkPass = TRUE;
@@ -1089,8 +1073,6 @@ if(pMgmt->eCurrState == WMAC_STATE_ASSOC)
return;
}
-
-
/*+
*
* Routine Description:
@@ -1102,12 +1084,9 @@ if(pMgmt->eCurrState == WMAC_STATE_ASSOC)
*
-*/
-VOID
-vMgrAuthenBeginSta(
- IN HANDLE hDeviceContext,
- IN PSMgmtObject pMgmt,
- OUT PCMD_STATUS pStatus
- )
+void vMgrAuthenBeginSta(void *hDeviceContext,
+ PSMgmtObject pMgmt,
+ PCMD_STATUS pStatus)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
WLAN_FR_AUTHEN sFrame;
@@ -1147,8 +1126,6 @@ vMgrAuthenBeginSta(
return ;
}
-
-
/*+
*
* Routine Description:
@@ -1160,14 +1137,11 @@ vMgrAuthenBeginSta(
*
-*/
-VOID
-vMgrDeAuthenBeginSta(
- IN HANDLE hDeviceContext,
- IN PSMgmtObject pMgmt,
- IN PBYTE abyDestAddress,
- IN WORD wReason,
- OUT PCMD_STATUS pStatus
- )
+void vMgrDeAuthenBeginSta(void *hDeviceContext,
+ PSMgmtObject pMgmt,
+ PBYTE abyDestAddress,
+ WORD wReason,
+ PCMD_STATUS pStatus)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
WLAN_FR_DEAUTHEN sFrame;
@@ -1217,11 +1191,11 @@ vMgrDeAuthenBeginSta(
-*/
static
-VOID
+void
s_vMgrRxAuthentication(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket
)
{
WLAN_FR_AUTHEN sFrame;
@@ -1275,15 +1249,15 @@ s_vMgrRxAuthentication(
static
-VOID
+void
s_vMgrRxAuthenSequence_1(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PWLAN_FR_AUTHEN pFrame
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PWLAN_FR_AUTHEN pFrame
)
{
PSTxMgmtPacket pTxPacket = NULL;
- UINT uNodeIndex;
+ unsigned int uNodeIndex;
WLAN_FR_AUTHEN sFrame;
PSKeyItem pTransmitKey;
@@ -1381,11 +1355,11 @@ s_vMgrRxAuthenSequence_1(
-*/
static
-VOID
+void
s_vMgrRxAuthenSequence_2(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PWLAN_FR_AUTHEN pFrame
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PWLAN_FR_AUTHEN pFrame
)
{
WLAN_FR_AUTHEN sFrame;
@@ -1405,12 +1379,11 @@ s_vMgrRxAuthenSequence_2(
s_vMgrLogStatus(pMgmt, cpu_to_le16((*(pFrame->pwStatus))));
pMgmt->eCurrState = WMAC_STATE_IDLE;
}
- if (pDevice->eCommandState == WLAN_AUTHENTICATE_WAIT ) {
-// spin_unlock_irq(&pDevice->lock);
-// vCommandTimerWait((HANDLE)pDevice, 0);
-// spin_lock_irq(&pDevice->lock);
+ if (pDevice->eCommandState == WLAN_AUTHENTICATE_WAIT) {
+ /* spin_unlock_irq(&pDevice->lock);
+ vCommandTimerWait((void *) pDevice, 0);
+ spin_lock_irq(&pDevice->lock); */
}
-
break;
case WLAN_AUTH_ALG_SHAREDKEY:
@@ -1453,9 +1426,9 @@ s_vMgrRxAuthenSequence_2(
else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Mgt:rx Auth_reply sequence_2 status error ...\n");
if ( pDevice->eCommandState == WLAN_AUTHENTICATE_WAIT ) {
-// spin_unlock_irq(&pDevice->lock);
-// vCommandTimerWait((HANDLE)pDevice, 0);
-// spin_lock_irq(&pDevice->lock);
+ /* spin_unlock_irq(&pDevice->lock);
+ vCommandTimerWait((void *) pDevice, 0);
+ spin_lock_irq(&pDevice->lock); */
}
s_vMgrLogStatus(pMgmt, cpu_to_le16((*(pFrame->pwStatus))));
}
@@ -1483,16 +1456,16 @@ s_vMgrRxAuthenSequence_2(
-*/
static
-VOID
+void
s_vMgrRxAuthenSequence_3(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PWLAN_FR_AUTHEN pFrame
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PWLAN_FR_AUTHEN pFrame
)
{
PSTxMgmtPacket pTxPacket = NULL;
- UINT uStatusCode = 0 ;
- UINT uNodeIndex = 0;
+ unsigned int uStatusCode = 0 ;
+ unsigned int uNodeIndex = 0;
WLAN_FR_AUTHEN sFrame;
if (!WLAN_GET_FC_ISWEP(pFrame->pHdr->sA3.wFrameCtl)) {
@@ -1571,11 +1544,11 @@ reply:
*
-*/
static
-VOID
+void
s_vMgrRxAuthenSequence_4(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PWLAN_FR_AUTHEN pFrame
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PWLAN_FR_AUTHEN pFrame
)
{
@@ -1591,11 +1564,10 @@ s_vMgrRxAuthenSequence_4(
}
if ( pDevice->eCommandState == WLAN_AUTHENTICATE_WAIT ) {
-// spin_unlock_irq(&pDevice->lock);
-// vCommandTimerWait((HANDLE)pDevice, 0);
-// spin_lock_irq(&pDevice->lock);
+ /* spin_unlock_irq(&pDevice->lock);
+ vCommandTimerWait((void *) pDevice, 0);
+ spin_lock_irq(&pDevice->lock); */
}
-
}
/*+
@@ -1610,15 +1582,15 @@ s_vMgrRxAuthenSequence_4(
-*/
static
-VOID
+void
s_vMgrRxDisassociation(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket
)
{
WLAN_FR_DISASSOC sFrame;
- UINT uNodeIndex = 0;
+ unsigned int uNodeIndex = 0;
CMD_STATUS CmdStatus;
viawget_wpa_header *wpahdr;
@@ -1700,15 +1672,15 @@ s_vMgrRxDisassociation(
-*/
static
-VOID
+void
s_vMgrRxDeauthentication(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket
)
{
WLAN_FR_DEAUTHEN sFrame;
- UINT uNodeIndex = 0;
+ unsigned int uNodeIndex = 0;
viawget_wpa_header *wpahdr;
@@ -1791,8 +1763,8 @@ s_vMgrRxDeauthentication(
-*/
static BOOL
ChannelExceedZoneType(
- IN PSDevice pDevice,
- IN BYTE byCurrChannel
+ PSDevice pDevice,
+ BYTE byCurrChannel
)
{
BOOL exceed=FALSE;
@@ -1826,12 +1798,12 @@ ChannelExceedZoneType(
-*/
static
-VOID
+void
s_vMgrRxBeacon(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket,
- IN BOOL bInScan
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket,
+ BOOL bInScan
)
{
@@ -1845,17 +1817,17 @@ s_vMgrRxBeacon(
BOOL bUpdateTSF = FALSE;
BOOL bIsAPBeacon = FALSE;
BOOL bIsChannelEqual = FALSE;
- UINT uLocateByteIndex;
+ unsigned int uLocateByteIndex;
BYTE byTIMBitOn = 0;
WORD wAIDNumber = 0;
- UINT uNodeIndex;
+ unsigned int uNodeIndex;
QWORD qwTimestamp, qwLocalTSF;
QWORD qwCurrTSF;
WORD wStartIndex = 0;
WORD wAIDIndex = 0;
BYTE byCurrChannel = pRxPacket->byRxChannel;
ERPObject sERP;
- UINT uRateLen = WLAN_RATES_MAXLEN;
+ unsigned int uRateLen = WLAN_RATES_MAXLEN;
BOOL bChannelHit = FALSE;
BYTE byOldPreambleType;
@@ -1871,14 +1843,14 @@ s_vMgrRxBeacon(
// decode the beacon frame
vMgrDecodeBeacon(&sFrame);
- if ((sFrame.pwBeaconInterval == 0) ||
- (sFrame.pwCapInfo == 0) ||
- (sFrame.pSSID == 0) ||
- (sFrame.pSuppRates == 0) ) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Rx beacon frame error\n");
- return;
- };
+ if ((sFrame.pwBeaconInterval == NULL)
+ || (sFrame.pwCapInfo == NULL)
+ || (sFrame.pSSID == NULL)
+ || (sFrame.pSuppRates == NULL)) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Rx beacon frame error\n");
+ return;
+ };
if( byCurrChannel > CB_MAX_CHANNEL_24G )
{
@@ -1913,10 +1885,12 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
sERP.byERP = 0;
}
- pBSSList = BSSpAddrIsInBSSList((HANDLE)pDevice, sFrame.pHdr->sA3.abyAddr3, sFrame.pSSID);
+ pBSSList = BSSpAddrIsInBSSList((void *) pDevice,
+ sFrame.pHdr->sA3.abyAddr3,
+ sFrame.pSSID);
if (pBSSList == NULL) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Beacon/insert: RxChannel = : %d\n", byCurrChannel);
- BSSbInsertToBSSList((HANDLE)pDevice,
+ BSSbInsertToBSSList((void *) pDevice,
sFrame.pHdr->sA3.abyAddr3,
*sFrame.pqwTimestamp,
*sFrame.pwBeaconInterval,
@@ -1932,12 +1906,11 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
sFrame.pIE_Quiet,
sFrame.len - WLAN_HDR_ADDR3_LEN,
sFrame.pHdr->sA4.abyAddr4, // payload of beacon
- (HANDLE)pRxPacket
- );
+ (void *) pRxPacket);
}
else {
// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"update bcn: RxChannel = : %d\n", byCurrChannel);
- BSSbUpdateToBSSList((HANDLE)pDevice,
+ BSSbUpdateToBSSList((void *) pDevice,
*sFrame.pqwTimestamp,
*sFrame.pwBeaconInterval,
*sFrame.pwCapInfo,
@@ -1954,8 +1927,7 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
pBSSList,
sFrame.len - WLAN_HDR_ADDR3_LEN,
sFrame.pHdr->sA4.abyAddr4, // payload of probresponse
- (HANDLE)pRxPacket
- );
+ (void *) pRxPacket);
}
@@ -2089,7 +2061,7 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
pMgmt->abyCurrExtSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)pBSSList->abyExtSuppRates,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
uRateLen);
- RATEvParseMaxRate( (PVOID)pDevice,
+ RATEvParseMaxRate((void *)pDevice,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates,
TRUE,
@@ -2152,9 +2124,9 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
if (bTSFLargeDiff)
bUpdateTSF = TRUE;
- if ((pDevice->bEnablePSMode == TRUE) &&(sFrame.pTIM != 0)) {
+ if ((pDevice->bEnablePSMode == TRUE) && (sFrame.pTIM)) {
- // deal with DTIM, analysis TIM
+ /* deal with DTIM, analysis TIM */
pMgmt->bMulticastTIM = WLAN_MGMT_IS_MULTICAST_TIM(sFrame.pTIM->byBitMapCtl) ? TRUE : FALSE ;
pMgmt->byDTIMCount = sFrame.pTIM->byDTIMCount;
pMgmt->byDTIMPeriod = sFrame.pTIM->byDTIMPeriod;
@@ -2227,7 +2199,7 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)sFrame.pSuppRates,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
WLAN_RATES_MAXLEN_11B);
- RATEvParseMaxRate( (PVOID)pDevice,
+ RATEvParseMaxRate((void *)pDevice,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
NULL,
TRUE,
@@ -2248,7 +2220,7 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
pMgmt->abyCurrSuppRates[1] = RATEuSetIE((PWLAN_IE_SUPP_RATES)sFrame.pSuppRates,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
WLAN_RATES_MAXLEN_11B);
- RATEvParseMaxRate( (PVOID)pDevice,
+ RATEvParseMaxRate((void *)pDevice,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
NULL,
TRUE,
@@ -2324,7 +2296,7 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
// set highest basic rate
// s_vSetHighestBasicRate(pDevice, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates);
// Prepare beacon frame
- bMgrPrepareBeaconToSend((HANDLE)pDevice, pMgmt);
+ bMgrPrepareBeaconToSend((void *) pDevice, pMgmt);
// }
};
}
@@ -2341,8 +2313,6 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
return;
}
-
-
/*+
*
* Routine Description:
@@ -2355,11 +2325,9 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
* CMD_STATUS
*
-*/
-VOID
-vMgrCreateOwnIBSS(
- IN HANDLE hDeviceContext,
- OUT PCMD_STATUS pStatus
- )
+
+void vMgrCreateOwnIBSS(void *hDeviceContext,
+ PCMD_STATUS pStatus)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
@@ -2368,7 +2336,7 @@ vMgrCreateOwnIBSS(
BYTE byTopCCKBasicRate;
BYTE byTopOFDMBasicRate;
QWORD qwCurrTSF;
- UINT ii;
+ unsigned int ii;
BYTE abyRATE[] = {0x82, 0x84, 0x8B, 0x96, 0x24, 0x30, 0x48, 0x6C, 0x0C, 0x12, 0x18, 0x60};
BYTE abyCCK_RATE[] = {0x82, 0x84, 0x8B, 0x96};
BYTE abyOFDM_RATE[] = {0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C};
@@ -2466,7 +2434,8 @@ vMgrCreateOwnIBSS(
// set basic rate
- RATEvParseMaxRate((PVOID)pDevice, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
+ RATEvParseMaxRate((void *)pDevice,
+ (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrExtSuppRates, TRUE,
&wMaxBasicRate, &wMaxSuppRate, &wSuppRate,
&byTopCCKBasicRate, &byTopOFDMBasicRate);
@@ -2608,13 +2577,11 @@ vMgrCreateOwnIBSS(
pMgmt->eCurrState = WMAC_STATE_STARTED;
// Prepare beacon to send
- if (bMgrPrepareBeaconToSend((HANDLE)pDevice, pMgmt)) {
- *pStatus = CMD_STATUS_SUCCESS;
- }
- return ;
-}
-
+ if (bMgrPrepareBeaconToSend((void *) pDevice, pMgmt))
+ *pStatus = CMD_STATUS_SUCCESS;
+ return;
+}
/*+
*
@@ -2629,21 +2596,16 @@ vMgrCreateOwnIBSS(
*
-*/
-VOID
-vMgrJoinBSSBegin(
- IN HANDLE hDeviceContext,
- OUT PCMD_STATUS pStatus
- )
+void vMgrJoinBSSBegin(void *hDeviceContext, PCMD_STATUS pStatus)
{
-
PSDevice pDevice = (PSDevice)hDeviceContext;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
PKnownBSS pCurr = NULL;
- UINT ii, uu;
+ unsigned int ii, uu;
PWLAN_IE_SUPP_RATES pItemRates = NULL;
PWLAN_IE_SUPP_RATES pItemExtRates = NULL;
PWLAN_IE_SSID pItemSSID;
- UINT uRateLen = WLAN_RATES_MAXLEN;
+ unsigned int uRateLen = WLAN_RATES_MAXLEN;
WORD wMaxBasicRate = RATE_1M;
WORD wMaxSuppRate = RATE_1M;
WORD wSuppRate;
@@ -2743,9 +2705,10 @@ vMgrJoinBSSBegin(
uRateLen);
// Stuffing Rate IE
if ((pItemExtRates->len > 0) && (pItemRates->len < 8)) {
- for (ii = 0; ii < (UINT)(8 - pItemRates->len); ) {
- pItemRates->abyRates[pItemRates->len + ii] = pItemExtRates->abyRates[ii];
- ii ++;
+ for (ii = 0; ii < (unsigned int) (8 - pItemRates->len); ) {
+ pItemRates->abyRates[pItemRates->len + ii] =
+ pItemExtRates->abyRates[ii];
+ ii++;
if (pItemExtRates->len <= ii)
break;
}
@@ -2760,7 +2723,7 @@ vMgrJoinBSSBegin(
}
}
- RATEvParseMaxRate((PVOID)pDevice, pItemRates, pItemExtRates, TRUE,
+ RATEvParseMaxRate((void *)pDevice, pItemRates, pItemExtRates, TRUE,
&wMaxBasicRate, &wMaxSuppRate, &wSuppRate,
&byTopCCKBasicRate, &byTopOFDMBasicRate);
vUpdateIFS(pDevice);
@@ -2781,12 +2744,17 @@ vMgrJoinBSSBegin(
// Add current BSS to Candidate list
// This should only works for WPA2 BSS, and WPA2 BSS check must be done before.
if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2) {
- BOOL bResult = bAdd_PMKID_Candidate((HANDLE)pDevice, pMgmt->abyCurrBSSID, &pCurr->sRSNCapObj);
+ BOOL bResult = bAdd_PMKID_Candidate((void *) pDevice,
+ pMgmt->abyCurrBSSID,
+ &pCurr->sRSNCapObj);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"bAdd_PMKID_Candidate: 1(%d)\n", bResult);
if (bResult == FALSE) {
- vFlush_PMKID_Candidate((HANDLE)pDevice);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"vFlush_PMKID_Candidate: 4\n");
- bAdd_PMKID_Candidate((HANDLE)pDevice, pMgmt->abyCurrBSSID, &pCurr->sRSNCapObj);
+ vFlush_PMKID_Candidate((void *) pDevice);
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "vFlush_PMKID_Candidate: 4\n");
+ bAdd_PMKID_Candidate((void *) pDevice,
+ pMgmt->abyCurrBSSID,
+ &pCurr->sRSNCapObj);
}
}
@@ -2899,7 +2867,8 @@ vMgrJoinBSSBegin(
(PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
WLAN_RATES_MAXLEN_11B);
// set basic rate
- RATEvParseMaxRate((PVOID)pDevice, (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
+ RATEvParseMaxRate((void *)pDevice,
+ (PWLAN_IE_SUPP_RATES)pMgmt->abyCurrSuppRates,
NULL, TRUE, &wMaxBasicRate, &wMaxSuppRate, &wSuppRate,
&byTopCCKBasicRate, &byTopOFDMBasicRate);
vUpdateIFS(pDevice);
@@ -2938,7 +2907,7 @@ vMgrJoinBSSBegin(
CARDvSetRSPINF(pDevice, (BYTE)pDevice->byBBType);
// Prepare beacon
- bMgrPrepareBeaconToSend((HANDLE)pDevice, pMgmt);
+ bMgrPrepareBeaconToSend((void *) pDevice, pMgmt);
}
else {
pMgmt->eCurrState = WMAC_STATE_IDLE;
@@ -2960,12 +2929,12 @@ vMgrJoinBSSBegin(
*
-*/
static
-VOID
+void
s_vMgrSynchBSS (
- IN PSDevice pDevice,
- IN UINT uBSSMode,
- IN PKnownBSS pCurr,
- OUT PCMD_STATUS pStatus
+ PSDevice pDevice,
+ unsigned int uBSSMode,
+ PKnownBSS pCurr,
+ PCMD_STATUS pStatus
)
{
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
@@ -3004,7 +2973,7 @@ s_vMgrSynchBSS (
pDevice->byPreambleType = 0;
pDevice->wBasicRate = 0;
// Set Basic Rate
- CARDbAddBasicRate((PVOID)pDevice, RATE_1M);
+ CARDbAddBasicRate((void *)pDevice, RATE_1M);
// calculate TSF offset
// TSF Offset = Received Timestamp TSF - Marked Local's TSF
@@ -3122,13 +3091,13 @@ s_vMgrSynchBSS (
//mike add: fix NetworkManager 0.7.0 hidden ssid mode in WPA encryption
// ,need reset eAuthenMode and eEncryptionStatus
- static VOID Encyption_Rebuild(
- IN PSDevice pDevice,
- IN PKnownBSS pCurr
+ static void Encyption_Rebuild(
+ PSDevice pDevice,
+ PKnownBSS pCurr
)
{
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
- // UINT ii , uSameBssidNum=0;
+ /* unsigned int ii, uSameBssidNum=0; */
// for (ii = 0; ii < MAX_BSS_NUM; ii++) {
// if (pMgmt->sBSSList[ii].bActive &&
@@ -3174,20 +3143,20 @@ s_vMgrSynchBSS (
*
*
* Return Value:
- * VOID
+ * void
*
-*/
static
-VOID
+void
s_vMgrFormatTIM(
- IN PSMgmtObject pMgmt,
- IN PWLAN_IE_TIM pTIM
+ PSMgmtObject pMgmt,
+ PWLAN_IE_TIM pTIM
)
{
BYTE byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
BYTE byMap;
- UINT ii, jj;
+ unsigned int ii, jj;
BOOL bStartFound = FALSE;
BOOL bMulticast = FALSE;
WORD wStartIndex = 0;
@@ -3256,16 +3225,16 @@ s_vMgrFormatTIM(
static
PSTxMgmtPacket
s_MgrMakeBeacon(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN WORD wCurrCapInfo,
- IN WORD wCurrBeaconPeriod,
- IN UINT uCurrChannel,
- IN WORD wCurrATIMWinodw,
- IN PWLAN_IE_SSID pCurrSSID,
- IN PBYTE pCurrBSSID,
- IN PWLAN_IE_SUPP_RATES pCurrSuppRates,
- IN PWLAN_IE_SUPP_RATES pCurrExtSuppRates
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ WORD wCurrCapInfo,
+ WORD wCurrBeaconPeriod,
+ unsigned int uCurrChannel,
+ WORD wCurrATIMWinodw,
+ PWLAN_IE_SSID pCurrSSID,
+ PBYTE pCurrBSSID,
+ PWLAN_IE_SUPP_RATES pCurrSuppRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates
)
{
PSTxMgmtPacket pTxPacket = NULL;
@@ -3430,18 +3399,18 @@ s_MgrMakeBeacon(
PSTxMgmtPacket
s_MgrMakeProbeResponse(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN WORD wCurrCapInfo,
- IN WORD wCurrBeaconPeriod,
- IN UINT uCurrChannel,
- IN WORD wCurrATIMWinodw,
- IN PBYTE pDstAddr,
- IN PWLAN_IE_SSID pCurrSSID,
- IN PBYTE pCurrBSSID,
- IN PWLAN_IE_SUPP_RATES pCurrSuppRates,
- IN PWLAN_IE_SUPP_RATES pCurrExtSuppRates,
- IN BYTE byPHYType
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ WORD wCurrCapInfo,
+ WORD wCurrBeaconPeriod,
+ unsigned int uCurrChannel,
+ WORD wCurrATIMWinodw,
+ PBYTE pDstAddr,
+ PWLAN_IE_SSID pCurrSSID,
+ PBYTE pCurrBSSID,
+ PWLAN_IE_SUPP_RATES pCurrSuppRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates,
+ BYTE byPHYType
)
{
PSTxMgmtPacket pTxPacket = NULL;
@@ -3562,14 +3531,14 @@ s_MgrMakeProbeResponse(
PSTxMgmtPacket
s_MgrMakeAssocRequest(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PBYTE pDAddr,
- IN WORD wCurrCapInfo,
- IN WORD wListenInterval,
- IN PWLAN_IE_SSID pCurrSSID,
- IN PWLAN_IE_SUPP_RATES pCurrRates,
- IN PWLAN_IE_SUPP_RATES pCurrExtSuppRates
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PBYTE pDAddr,
+ WORD wCurrCapInfo,
+ WORD wListenInterval,
+ PWLAN_IE_SSID pCurrSSID,
+ PWLAN_IE_SUPP_RATES pCurrRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates
)
{
PSTxMgmtPacket pTxPacket = NULL;
@@ -3704,7 +3673,7 @@ s_MgrMakeAssocRequest(
} else if (((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) ||
(pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) &&
(pMgmt->pCurrBSS != NULL)) {
- UINT ii;
+ unsigned int ii;
PWORD pwPMKID;
// WPA IE
@@ -3773,13 +3742,17 @@ s_MgrMakeAssocRequest(
pwPMKID = (PWORD)pbyRSN; // Point to PMKID count
*pwPMKID = 0; // Initialize PMKID count
pbyRSN += 2; // Point to PMKID list
- for (ii = 0; ii < pDevice->gsPMKID.BSSIDInfoCount; ii++) {
- if ( !memcmp(&pDevice->gsPMKID.BSSIDInfo[ii].BSSID[0], pMgmt->abyCurrBSSID, U_ETHER_ADDR_LEN)) {
- (*pwPMKID) ++;
- memcpy(pbyRSN, pDevice->gsPMKID.BSSIDInfo[ii].PMKID, 16);
- pbyRSN += 16;
- }
- }
+ for (ii = 0; ii < pDevice->gsPMKID.BSSIDInfoCount; ii++) {
+ if (!memcmp(&pDevice->gsPMKID.BSSIDInfo[ii].BSSID[0],
+ pMgmt->abyCurrBSSID,
+ ETH_ALEN)) {
+ (*pwPMKID)++;
+ memcpy(pbyRSN,
+ pDevice->gsPMKID.BSSIDInfo[ii].PMKID,
+ 16);
+ pbyRSN += 16;
+ }
+ }
if (*pwPMKID != 0) {
sFrame.pRSN->len += (2 + (*pwPMKID)*16);
}
@@ -3820,14 +3793,14 @@ s_MgrMakeAssocRequest(
PSTxMgmtPacket
s_MgrMakeReAssocRequest(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PBYTE pDAddr,
- IN WORD wCurrCapInfo,
- IN WORD wListenInterval,
- IN PWLAN_IE_SSID pCurrSSID,
- IN PWLAN_IE_SUPP_RATES pCurrRates,
- IN PWLAN_IE_SUPP_RATES pCurrExtSuppRates
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PBYTE pDAddr,
+ WORD wCurrCapInfo,
+ WORD wListenInterval,
+ PWLAN_IE_SSID pCurrSSID,
+ PWLAN_IE_SUPP_RATES pCurrRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates
)
{
PSTxMgmtPacket pTxPacket = NULL;
@@ -3960,7 +3933,7 @@ s_MgrMakeReAssocRequest(
} else if (((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) ||
(pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) &&
(pMgmt->pCurrBSS != NULL)) {
- UINT ii;
+ unsigned int ii;
PWORD pwPMKID;
/* WPA IE */
@@ -4030,10 +4003,14 @@ s_MgrMakeReAssocRequest(
*pwPMKID = 0; // Initialize PMKID count
pbyRSN += 2; // Point to PMKID list
for (ii = 0; ii < pDevice->gsPMKID.BSSIDInfoCount; ii++) {
- if ( !memcmp(&pDevice->gsPMKID.BSSIDInfo[ii].BSSID[0], pMgmt->abyCurrBSSID, U_ETHER_ADDR_LEN)) {
- (*pwPMKID) ++;
- memcpy(pbyRSN, pDevice->gsPMKID.BSSIDInfo[ii].PMKID, 16);
- pbyRSN += 16;
+ if (!memcmp(&pDevice->gsPMKID.BSSIDInfo[ii].BSSID[0],
+ pMgmt->abyCurrBSSID,
+ ETH_ALEN)) {
+ (*pwPMKID)++;
+ memcpy(pbyRSN,
+ pDevice->gsPMKID.BSSIDInfo[ii].PMKID,
+ 16);
+ pbyRSN += 16;
}
}
if (*pwPMKID != 0) {
@@ -4057,8 +4034,6 @@ s_MgrMakeReAssocRequest(
return pTxPacket;
}
-
-
/*+
*
* Routine Description:
@@ -4070,17 +4045,16 @@ s_MgrMakeReAssocRequest(
*
-*/
-
PSTxMgmtPacket
s_MgrMakeAssocResponse(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN WORD wCurrCapInfo,
- IN WORD wAssocStatus,
- IN WORD wAssocAID,
- IN PBYTE pDstAddr,
- IN PWLAN_IE_SUPP_RATES pCurrSuppRates,
- IN PWLAN_IE_SUPP_RATES pCurrExtSuppRates
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ WORD wCurrCapInfo,
+ WORD wAssocStatus,
+ WORD wAssocAID,
+ PBYTE pDstAddr,
+ PWLAN_IE_SUPP_RATES pCurrSuppRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates
)
{
PSTxMgmtPacket pTxPacket = NULL;
@@ -4147,14 +4121,14 @@ s_MgrMakeAssocResponse(
PSTxMgmtPacket
s_MgrMakeReAssocResponse(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN WORD wCurrCapInfo,
- IN WORD wAssocStatus,
- IN WORD wAssocAID,
- IN PBYTE pDstAddr,
- IN PWLAN_IE_SUPP_RATES pCurrSuppRates,
- IN PWLAN_IE_SUPP_RATES pCurrExtSuppRates
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ WORD wCurrCapInfo,
+ WORD wAssocStatus,
+ WORD wAssocAID,
+ PBYTE pDstAddr,
+ PWLAN_IE_SUPP_RATES pCurrSuppRates,
+ PWLAN_IE_SUPP_RATES pCurrExtSuppRates
)
{
PSTxMgmtPacket pTxPacket = NULL;
@@ -4219,11 +4193,11 @@ s_MgrMakeReAssocResponse(
-*/
static
-VOID
+void
s_vMgrRxProbeResponse(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket
)
{
PKnownBSS pBSSList = NULL;
@@ -4239,14 +4213,16 @@ s_vMgrRxProbeResponse(
sFrame.pBuf = (PBYTE)pRxPacket->p80211Header;
vMgrDecodeProbeResponse(&sFrame);
- if ((sFrame.pqwTimestamp == 0) ||
- (sFrame.pwBeaconInterval == 0) ||
- (sFrame.pwCapInfo == 0) ||
- (sFrame.pSSID == 0) ||
- (sFrame.pSuppRates == 0)) {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Probe resp:Fail addr:[%p] \n", pRxPacket->p80211Header);
- DBG_PORT80(0xCC);
- return;
+ if ((sFrame.pqwTimestamp == NULL)
+ || (sFrame.pwBeaconInterval == NULL)
+ || (sFrame.pwCapInfo == NULL)
+ || (sFrame.pSSID == NULL)
+ || (sFrame.pSuppRates == NULL)) {
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Probe resp:Fail addr:[%p]\n",
+ pRxPacket->p80211Header);
+ DBG_PORT80(0xCC);
+ return;
};
if(sFrame.pSSID->len == 0)
@@ -4256,22 +4232,23 @@ s_vMgrRxProbeResponse(
//{{ RobertYu:20050201, 11a byCurrChannel != sFrame.pDSParms->byCurrChannel mapping
if( byCurrChannel > CB_MAX_CHANNEL_24G )
{
- if (sFrame.pDSParms != 0) {
- if (byCurrChannel == RFaby11aChannelIndex[sFrame.pDSParms->byCurrChannel-1])
- bChannelHit = TRUE;
- byCurrChannel = RFaby11aChannelIndex[sFrame.pDSParms->byCurrChannel-1];
+ if (sFrame.pDSParms) {
+ if (byCurrChannel ==
+ RFaby11aChannelIndex[sFrame.pDSParms->byCurrChannel-1])
+ bChannelHit = TRUE;
+ byCurrChannel =
+ RFaby11aChannelIndex[sFrame.pDSParms->byCurrChannel-1];
} else {
- bChannelHit = TRUE;
+ bChannelHit = TRUE;
}
-
} else {
- if (sFrame.pDSParms != 0) {
- if (byCurrChannel == sFrame.pDSParms->byCurrChannel)
- bChannelHit = TRUE;
- byCurrChannel = sFrame.pDSParms->byCurrChannel;
- } else {
- bChannelHit = TRUE;
- }
+ if (sFrame.pDSParms) {
+ if (byCurrChannel == sFrame.pDSParms->byCurrChannel)
+ bChannelHit = TRUE;
+ byCurrChannel = sFrame.pDSParms->byCurrChannel;
+ } else {
+ bChannelHit = TRUE;
+ }
}
//RobertYu:20050201
@@ -4279,7 +4256,7 @@ s_vMgrRxProbeResponse(
if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
return;
- if (sFrame.pERP != NULL) {
+ if (sFrame.pERP) {
sERP.byERP = sFrame.pERP->byContext;
sERP.bERPExist = TRUE;
} else {
@@ -4289,31 +4266,32 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
// update or insert the bss
- pBSSList = BSSpAddrIsInBSSList((HANDLE)pDevice, sFrame.pHdr->sA3.abyAddr3, sFrame.pSSID);
+ pBSSList = BSSpAddrIsInBSSList((void *) pDevice,
+ sFrame.pHdr->sA3.abyAddr3,
+ sFrame.pSSID);
if (pBSSList) {
- BSSbUpdateToBSSList((HANDLE)pDevice,
- *sFrame.pqwTimestamp,
- *sFrame.pwBeaconInterval,
- *sFrame.pwCapInfo,
- byCurrChannel,
- bChannelHit,
- sFrame.pSSID,
- sFrame.pSuppRates,
- sFrame.pExtSuppRates,
- &sERP,
- sFrame.pRSN,
- sFrame.pRSNWPA,
- sFrame.pIE_Country,
- sFrame.pIE_Quiet,
- pBSSList,
- sFrame.len - WLAN_HDR_ADDR3_LEN,
- sFrame.pHdr->sA4.abyAddr4, // payload of probresponse
- (HANDLE)pRxPacket
- );
- }
- else {
+ BSSbUpdateToBSSList((void *) pDevice,
+ *sFrame.pqwTimestamp,
+ *sFrame.pwBeaconInterval,
+ *sFrame.pwCapInfo,
+ byCurrChannel,
+ bChannelHit,
+ sFrame.pSSID,
+ sFrame.pSuppRates,
+ sFrame.pExtSuppRates,
+ &sERP,
+ sFrame.pRSN,
+ sFrame.pRSNWPA,
+ sFrame.pIE_Country,
+ sFrame.pIE_Quiet,
+ pBSSList,
+ sFrame.len - WLAN_HDR_ADDR3_LEN,
+ /* payload of probresponse */
+ sFrame.pHdr->sA4.abyAddr4,
+ (void *) pRxPacket);
+ } else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Probe resp/insert: RxChannel = : %d\n", byCurrChannel);
- BSSbInsertToBSSList((HANDLE)pDevice,
+ BSSbInsertToBSSList((void *) pDevice,
sFrame.pHdr->sA3.abyAddr3,
*sFrame.pqwTimestamp,
*sFrame.pwBeaconInterval,
@@ -4328,9 +4306,8 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
sFrame.pIE_Country,
sFrame.pIE_Quiet,
sFrame.len - WLAN_HDR_ADDR3_LEN,
- sFrame.pHdr->sA4.abyAddr4, // payload of beacon
- (HANDLE)pRxPacket
- );
+ sFrame.pHdr->sA4.abyAddr4, /* payload of beacon */
+ (void *) pRxPacket);
}
return;
@@ -4349,11 +4326,11 @@ if(ChannelExceedZoneType(pDevice,byCurrChannel)==TRUE)
static
-VOID
+void
s_vMgrRxProbeRequest(
- IN PSDevice pDevice,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket
+ PSDevice pDevice,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket
)
{
WLAN_FR_PROBEREQ sFrame;
@@ -4426,10 +4403,6 @@ s_vMgrRxProbeRequest(
return;
}
-
-
-
-
/*+
*
* Routine Description:
@@ -4444,17 +4417,13 @@ s_vMgrRxProbeRequest(
*
-*/
-
-VOID
-vMgrRxManagePacket(
- IN HANDLE hDeviceContext,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket
- )
+void vMgrRxManagePacket(void *hDeviceContext,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
BOOL bInScan = FALSE;
- UINT uNodeIndex = 0;
+ unsigned int uNodeIndex = 0;
NODE_STATE eNodeState = 0;
CMD_STATUS Status;
@@ -4583,9 +4552,6 @@ vMgrRxManagePacket(
return;
}
-
-
-
/*+
*
* Routine Description:
@@ -4597,11 +4563,7 @@ vMgrRxManagePacket(
* TRUE if success; FALSE if failed.
*
-*/
-BOOL
-bMgrPrepareBeaconToSend(
- IN HANDLE hDeviceContext,
- IN PSMgmtObject pMgmt
- )
+BOOL bMgrPrepareBeaconToSend(void *hDeviceContext, PSMgmtObject pMgmt)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PSTxMgmtPacket pTxPacket;
@@ -4653,10 +4615,10 @@ bMgrPrepareBeaconToSend(
*
-*/
static
-VOID
+void
s_vMgrLogStatus(
- IN PSMgmtObject pMgmt,
- IN WORD wStatus
+ PSMgmtObject pMgmt,
+ WORD wStatus
)
{
switch( wStatus ){
@@ -4705,7 +4667,6 @@ s_vMgrLogStatus(
}
}
-
/*
*
* Description:
@@ -4722,16 +4683,14 @@ s_vMgrLogStatus(
* Return Value: none.
*
-*/
-BOOL
-bAdd_PMKID_Candidate (
- IN HANDLE hDeviceContext,
- IN PBYTE pbyBSSID,
- IN PSRSNCapObject psRSNCapObj
- )
+
+BOOL bAdd_PMKID_Candidate(void *hDeviceContext,
+ PBYTE pbyBSSID,
+ PSRSNCapObject psRSNCapObj)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
PPMKID_CANDIDATE pCandidateList;
- UINT ii = 0;
+ unsigned int ii = 0;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"bAdd_PMKID_Candidate START: (%d)\n", (int)pDevice->gsPMKIDCandidate.NumCandidates);
@@ -4745,13 +4704,16 @@ bAdd_PMKID_Candidate (
// Update Old Candidate
for (ii = 0; ii < pDevice->gsPMKIDCandidate.NumCandidates; ii++) {
- pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[ii];
- if ( !memcmp(pCandidateList->BSSID, pbyBSSID, U_ETHER_ADDR_LEN)) {
- if ((psRSNCapObj->bRSNCapExist == TRUE) && (psRSNCapObj->wRSNCap & BIT0)) {
- pCandidateList->Flags |= NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED;
- } else {
- pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED);
- }
+ pCandidateList = &pDevice->gsPMKIDCandidate.CandidateList[ii];
+ if (!memcmp(pCandidateList->BSSID, pbyBSSID, ETH_ALEN)) {
+ if ((psRSNCapObj->bRSNCapExist == TRUE)
+ && (psRSNCapObj->wRSNCap & BIT0)) {
+ pCandidateList->Flags |=
+ NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED;
+ } else {
+ pCandidateList->Flags &=
+ ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED);
+ }
return TRUE;
}
}
@@ -4763,7 +4725,7 @@ bAdd_PMKID_Candidate (
} else {
pCandidateList->Flags &= ~(NDIS_802_11_PMKID_CANDIDATE_PREAUTH_ENABLED);
}
- memcpy(pCandidateList->BSSID, pbyBSSID, U_ETHER_ADDR_LEN);
+ memcpy(pCandidateList->BSSID, pbyBSSID, ETH_ALEN);
pDevice->gsPMKIDCandidate.NumCandidates++;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"NumCandidates:%d\n", (int)pDevice->gsPMKIDCandidate.NumCandidates);
return TRUE;
@@ -4783,10 +4745,8 @@ bAdd_PMKID_Candidate (
* Return Value: none.
*
-*/
-VOID
-vFlush_PMKID_Candidate (
- IN HANDLE hDeviceContext
- )
+
+void vFlush_PMKID_Candidate(void *hDeviceContext)
{
PSDevice pDevice = (PSDevice)hDeviceContext;
@@ -4798,10 +4758,10 @@ vFlush_PMKID_Candidate (
static BOOL
s_bCipherMatch (
- IN PKnownBSS pBSSNode,
- IN NDIS_802_11_ENCRYPTION_STATUS EncStatus,
- OUT PBYTE pbyCCSPK,
- OUT PBYTE pbyCCSGK
+ PKnownBSS pBSSNode,
+ NDIS_802_11_ENCRYPTION_STATUS EncStatus,
+ PBYTE pbyCCSPK,
+ PBYTE pbyCCSGK
)
{
BYTE byMulticastCipher = KEY_CTL_INVALID;
diff --git a/drivers/staging/vt6656/wmgr.h b/drivers/staging/vt6656/wmgr.h
index c682a7fcbef..1e5b916aea1 100644
--- a/drivers/staging/vt6656/wmgr.h
+++ b/drivers/staging/vt6656/wmgr.h
@@ -84,37 +84,37 @@
//mike define: make timer to expire after desired times
#define timer_expire(timer,next_tick) mod_timer(&timer, RUN_AT(next_tick))
-typedef void (*TimerFunction)(ULONG);
+typedef void (*TimerFunction)(unsigned long);
//+++ NDIS related
-typedef UCHAR NDIS_802_11_MAC_ADDRESS[6];
+typedef unsigned char NDIS_802_11_MAC_ADDRESS[ETH_ALEN];
typedef struct _NDIS_802_11_AI_REQFI
{
- USHORT Capabilities;
- USHORT ListenInterval;
+ unsigned short Capabilities;
+ unsigned short ListenInterval;
NDIS_802_11_MAC_ADDRESS CurrentAPAddress;
} NDIS_802_11_AI_REQFI, *PNDIS_802_11_AI_REQFI;
typedef struct _NDIS_802_11_AI_RESFI
{
- USHORT Capabilities;
- USHORT StatusCode;
- USHORT AssociationId;
+ unsigned short Capabilities;
+ unsigned short StatusCode;
+ unsigned short AssociationId;
} NDIS_802_11_AI_RESFI, *PNDIS_802_11_AI_RESFI;
typedef struct _NDIS_802_11_ASSOCIATION_INFORMATION
{
- ULONG Length;
- USHORT AvailableRequestFixedIEs;
+ unsigned long Length;
+ unsigned short AvailableRequestFixedIEs;
NDIS_802_11_AI_REQFI RequestFixedIEs;
- ULONG RequestIELength;
- ULONG OffsetRequestIEs;
- USHORT AvailableResponseFixedIEs;
+ unsigned long RequestIELength;
+ unsigned long OffsetRequestIEs;
+ unsigned short AvailableResponseFixedIEs;
NDIS_802_11_AI_RESFI ResponseFixedIEs;
- ULONG ResponseIELength;
- ULONG OffsetResponseIEs;
+ unsigned long ResponseIELength;
+ unsigned long OffsetResponseIEs;
} NDIS_802_11_ASSOCIATION_INFORMATION, *PNDIS_802_11_ASSOCIATION_INFORMATION;
@@ -123,7 +123,7 @@ typedef struct tagSAssocInfo {
NDIS_802_11_ASSOCIATION_INFORMATION AssocInfo;
BYTE abyIEs[WLAN_BEACON_FR_MAXLEN+WLAN_BEACON_FR_MAXLEN];
// store ReqIEs set by OID_802_11_ASSOCIATION_INFORMATION
- ULONG RequestIELength;
+ unsigned long RequestIELength;
BYTE abyReqIEs[WLAN_BEACON_FR_MAXLEN];
} SAssocInfo, *PSAssocInfo;
//---
@@ -222,8 +222,8 @@ typedef enum tagWMAC_POWER_MODE {
typedef struct tagSTxMgmtPacket {
PUWLAN_80211HDR p80211Header;
- UINT cbMPDULen;
- UINT cbPayloadLen;
+ unsigned int cbMPDULen;
+ unsigned int cbPayloadLen;
} STxMgmtPacket, *PSTxMgmtPacket;
@@ -233,9 +233,9 @@ typedef struct tagSRxMgmtPacket {
PUWLAN_80211HDR p80211Header;
QWORD qwLocalTSF;
- UINT cbMPDULen;
- UINT cbPayloadLen;
- UINT uRSSI;
+ unsigned int cbMPDULen;
+ unsigned int cbPayloadLen;
+ unsigned int uRSSI;
BYTE bySQ;
BYTE byRxRate;
BYTE byRxChannel;
@@ -246,8 +246,7 @@ typedef struct tagSRxMgmtPacket {
typedef struct tagSMgmtObject
{
-
- PVOID pAdapter;
+ void *pAdapter;
// MAC address
BYTE abyMACAddr[WLAN_ADDR_LEN];
@@ -273,21 +272,21 @@ typedef struct tagSMgmtObject
BOOL bCurrBSSIDFilterOn;
// Current state vars
- UINT uCurrChannel;
+ unsigned int uCurrChannel;
BYTE abyCurrSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
BYTE abyCurrExtSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
BYTE abyCurrSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
BYTE abyCurrBSSID[WLAN_BSSID_LEN];
WORD wCurrCapInfo;
WORD wCurrAID;
- UINT uRSSITrigger;
+ unsigned int uRSSITrigger;
WORD wCurrATIMWindow;
WORD wCurrBeaconPeriod;
BOOL bIsDS;
BYTE byERPContext;
CMD_STATE eCommandState;
- UINT uScanChannel;
+ unsigned int uScanChannel;
// Desire joinning BSS vars
BYTE abyDesireSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
@@ -302,22 +301,22 @@ typedef struct tagSMgmtObject
// Adhoc or AP configuration vars
WORD wIBSSBeaconPeriod;
WORD wIBSSATIMWindow;
- UINT uIBSSChannel;
+ unsigned int uIBSSChannel;
BYTE abyIBSSSuppRates[WLAN_IEHDR_LEN + WLAN_RATES_MAXLEN + 1];
BYTE byAPBBType;
BYTE abyWPAIE[MAX_WPA_IE_LEN];
WORD wWPAIELen;
- UINT uAssocCount;
+ unsigned int uAssocCount;
BOOL bMoreData;
// Scan state vars
WMAC_SCAN_STATE eScanState;
WMAC_SCAN_TYPE eScanType;
- UINT uScanStartCh;
- UINT uScanEndCh;
+ unsigned int uScanStartCh;
+ unsigned int uScanEndCh;
WORD wScanSteps;
- UINT uScanBSSType;
+ unsigned int uScanBSSType;
// Desire scannig vars
BYTE abyScanSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
BYTE abyScanBSSID[WLAN_BSSID_LEN];
@@ -345,8 +344,8 @@ typedef struct tagSMgmtObject
BYTE abyPSTxMap[MAX_NODE_NUM + 1];
// managment command related
- UINT uCmdBusy;
- UINT uCmdHostAPBusy;
+ unsigned int uCmdBusy;
+ unsigned int uCmdHostAPBusy;
// managment packet pool
PBYTE pbyMgmtPacketPool;
@@ -390,7 +389,7 @@ typedef struct tagSMgmtObject
BOOL bSwitchChannel;
BYTE byNewChannel;
PWLAN_IE_MEASURE_REP pCurrMeasureEIDRep;
- UINT uLengthOfRepEIDs;
+ unsigned int uLengthOfRepEIDs;
BYTE abyCurrentMSRReq[sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN];
BYTE abyCurrentMSRRep[sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN];
BYTE abyIECountry[WLAN_A3FR_MAXLEN];
@@ -401,102 +400,61 @@ typedef struct tagSMgmtObject
} SMgmtObject, *PSMgmtObject;
-
/*--------------------- Export Macros ------------------------------*/
-
/*--------------------- Export Functions --------------------------*/
+void vMgrObjectInit(void *hDeviceContext);
-void
-vMgrObjectInit(
- IN HANDLE hDeviceContext
- );
+void vMgrAssocBeginSta(void *hDeviceContext,
+ PSMgmtObject pMgmt,
+ PCMD_STATUS pStatus);
+void vMgrReAssocBeginSta(void *hDeviceContext,
+ PSMgmtObject pMgmt,
+ PCMD_STATUS pStatus);
-void
-vMgrAssocBeginSta(
- IN HANDLE hDeviceContext,
- IN PSMgmtObject pMgmt,
- OUT PCMD_STATUS pStatus
- );
+void vMgrDisassocBeginSta(void *hDeviceContext,
+ PSMgmtObject pMgmt,
+ PBYTE abyDestAddress,
+ WORD wReason,
+ PCMD_STATUS pStatus);
-VOID
-vMgrReAssocBeginSta(
- IN HANDLE hDeviceContext,
- IN PSMgmtObject pMgmt,
- OUT PCMD_STATUS pStatus
- );
+void vMgrAuthenBeginSta(void *hDeviceContext,
+ PSMgmtObject pMgmt,
+ PCMD_STATUS pStatus);
-VOID
-vMgrDisassocBeginSta(
- IN HANDLE hDeviceContext,
- IN PSMgmtObject pMgmt,
- IN PBYTE abyDestAddress,
- IN WORD wReason,
- OUT PCMD_STATUS pStatus
- );
+void vMgrCreateOwnIBSS(void *hDeviceContext,
+ PCMD_STATUS pStatus);
-VOID
-vMgrAuthenBeginSta(
- IN HANDLE hDeviceContext,
- IN PSMgmtObject pMgmt,
- OUT PCMD_STATUS pStatus
- );
+void vMgrJoinBSSBegin(void *hDeviceContext,
+ PCMD_STATUS pStatus);
-VOID
-vMgrCreateOwnIBSS(
- IN HANDLE hDeviceContext,
- OUT PCMD_STATUS pStatus
- );
-
-VOID
-vMgrJoinBSSBegin(
- IN HANDLE hDeviceContext,
- OUT PCMD_STATUS pStatus
- );
-
-VOID
-vMgrRxManagePacket(
- IN HANDLE hDeviceContext,
- IN PSMgmtObject pMgmt,
- IN PSRxMgmtPacket pRxPacket
- );
+void vMgrRxManagePacket(void *hDeviceContext,
+ PSMgmtObject pMgmt,
+ PSRxMgmtPacket pRxPacket);
/*
-VOID
+void
vMgrScanBegin(
- IN HANDLE hDeviceContext,
- OUT PCMD_STATUS pStatus
+ void *hDeviceContext,
+ PCMD_STATUS pStatus
);
*/
-VOID
-vMgrDeAuthenBeginSta(
- IN HANDLE hDeviceContext,
- IN PSMgmtObject pMgmt,
- IN PBYTE abyDestAddress,
- IN WORD wReason,
- OUT PCMD_STATUS pStatus
- );
+void vMgrDeAuthenBeginSta(void *hDeviceContext,
+ PSMgmtObject pMgmt,
+ PBYTE abyDestAddress,
+ WORD wReason,
+ PCMD_STATUS pStatus);
-BOOL
-bMgrPrepareBeaconToSend(
- IN HANDLE hDeviceContext,
- IN PSMgmtObject pMgmt
- );
+BOOL bMgrPrepareBeaconToSend(void *hDeviceContext,
+ PSMgmtObject pMgmt);
+BOOL bAdd_PMKID_Candidate(void *hDeviceContext,
+ PBYTE pbyBSSID,
+ PSRSNCapObject psRSNCapObj);
-BOOL
-bAdd_PMKID_Candidate (
- IN HANDLE hDeviceContext,
- IN PBYTE pbyBSSID,
- IN PSRSNCapObject psRSNCapObj
- );
-
-VOID
-vFlush_PMKID_Candidate (
- IN HANDLE hDeviceContext
- );
+void vFlush_PMKID_Candidate(void *hDeviceContext);
-#endif // __WMGR_H__
+#endif /* __WMGR_H__ */
diff --git a/drivers/staging/vt6656/wpa.c b/drivers/staging/vt6656/wpa.c
index 5da671418b5..1fa6c9b88ed 100644
--- a/drivers/staging/vt6656/wpa.c
+++ b/drivers/staging/vt6656/wpa.c
@@ -68,9 +68,9 @@ const BYTE abyOUI05[4] = { 0x00, 0x50, 0xf2, 0x05 };
*
-*/
-VOID
+void
WPA_ClearRSN (
- IN PKnownBSS pBSSList
+ PKnownBSS pBSSList
)
{
int ii;
@@ -104,10 +104,10 @@ WPA_ClearRSN (
* Return Value: none.
*
-*/
-VOID
+void
WPA_ParseRSN (
- IN PKnownBSS pBSSList,
- IN PWLAN_IE_RSN_EXT pRSN
+ PKnownBSS pBSSList,
+ PWLAN_IE_RSN_EXT pRSN
)
{
PWLAN_IE_RSN_AUTH pIE_RSN_Auth = NULL;
@@ -241,7 +241,7 @@ BOOL
WPA_SearchRSN (
BYTE byCmd,
BYTE byEncrypt,
- IN PKnownBSS pBSSList
+ PKnownBSS pBSSList
)
{
int ii;
@@ -299,7 +299,7 @@ WPA_SearchRSN (
-*/
BOOL
WPAb_Is_RSN (
- IN PWLAN_IE_RSN_EXT pRSN
+ PWLAN_IE_RSN_EXT pRSN
)
{
if (pRSN == NULL)
diff --git a/drivers/staging/vt6656/wpa.h b/drivers/staging/vt6656/wpa.h
index 9d9ce01d0c6..889489adbb8 100644
--- a/drivers/staging/vt6656/wpa.h
+++ b/drivers/staging/vt6656/wpa.h
@@ -58,27 +58,27 @@
/*--------------------- Export Functions --------------------------*/
-VOID
+void
WPA_ClearRSN(
- IN PKnownBSS pBSSList
+ PKnownBSS pBSSList
);
-VOID
+void
WPA_ParseRSN(
- IN PKnownBSS pBSSList,
- IN PWLAN_IE_RSN_EXT pRSN
+ PKnownBSS pBSSList,
+ PWLAN_IE_RSN_EXT pRSN
);
BOOL
WPA_SearchRSN(
BYTE byCmd,
BYTE byEncrypt,
- IN PKnownBSS pBSSList
+ PKnownBSS pBSSList
);
BOOL
WPAb_Is_RSN(
- IN PWLAN_IE_RSN_EXT pRSN
+ PWLAN_IE_RSN_EXT pRSN
);
-#endif // __WPA_H__
+#endif /* __WPA_H__ */
diff --git a/drivers/staging/vt6656/wpa2.c b/drivers/staging/vt6656/wpa2.c
index fa3aeedfb27..6d13190885d 100644
--- a/drivers/staging/vt6656/wpa2.c
+++ b/drivers/staging/vt6656/wpa2.c
@@ -71,9 +71,9 @@ const BYTE abyOUIPSK[4] = { 0x00, 0x0F, 0xAC, 0x02 };
* Return Value: none.
*
-*/
-VOID
+void
WPA2_ClearRSN (
- IN PKnownBSS pBSSNode
+ PKnownBSS pBSSNode
)
{
int ii;
@@ -106,10 +106,10 @@ WPA2_ClearRSN (
* Return Value: none.
*
-*/
-VOID
+void
WPA2vParseRSN (
- IN PKnownBSS pBSSNode,
- IN PWLAN_IE_RSN pRSN
+ PKnownBSS pBSSNode,
+ PWLAN_IE_RSN pRSN
)
{
int i, j;
@@ -260,15 +260,14 @@ WPA2vParseRSN (
* Return Value: length of IEs.
*
-*/
-UINT
-WPA2uSetIEs(
- IN PVOID pMgmtHandle,
- OUT PWLAN_IE_RSN pRSNIEs
+unsigned int
+WPA2uSetIEs(void *pMgmtHandle,
+ PWLAN_IE_RSN pRSNIEs
)
{
PSMgmtObject pMgmt = (PSMgmtObject) pMgmtHandle;
PBYTE pbyBuffer = NULL;
- UINT ii = 0;
+ unsigned int ii = 0;
PWORD pwPMKID = NULL;
if (pRSNIEs == NULL) {
@@ -337,20 +336,25 @@ WPA2uSetIEs(
}
pRSNIEs->len +=2;
- if ((pMgmt->gsPMKIDCache.BSSIDInfoCount > 0) &&
- (pMgmt->bRoaming == TRUE) &&
+ if ((pMgmt->gsPMKIDCache.BSSIDInfoCount > 0) &&
+ (pMgmt->bRoaming == TRUE) &&
(pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
- // RSN PMKID
- pwPMKID = (PWORD)(&pRSNIEs->abyRSN[18]); // Point to PMKID count
- *pwPMKID = 0; // Initialize PMKID count
- pbyBuffer = &pRSNIEs->abyRSN[20]; // Point to PMKID list
- for (ii = 0; ii < pMgmt->gsPMKIDCache.BSSIDInfoCount; ii++) {
- if ( !memcmp(&pMgmt->gsPMKIDCache.BSSIDInfo[ii].abyBSSID[0], pMgmt->abyCurrBSSID, U_ETHER_ADDR_LEN)) {
- (*pwPMKID) ++;
- memcpy(pbyBuffer, pMgmt->gsPMKIDCache.BSSIDInfo[ii].abyPMKID, 16);
- pbyBuffer += 16;
- }
- }
+ /* RSN PMKID, pointer to PMKID count */
+ pwPMKID = (PWORD)(&pRSNIEs->abyRSN[18]);
+ *pwPMKID = 0; /* Initialize PMKID count */
+ pbyBuffer = &pRSNIEs->abyRSN[20]; /* Point to PMKID list */
+ for (ii = 0; ii < pMgmt->gsPMKIDCache.BSSIDInfoCount; ii++) {
+ if (!memcmp(&pMgmt->
+ gsPMKIDCache.BSSIDInfo[ii].abyBSSID[0],
+ pMgmt->abyCurrBSSID,
+ ETH_ALEN)) {
+ (*pwPMKID)++;
+ memcpy(pbyBuffer,
+ pMgmt->gsPMKIDCache.BSSIDInfo[ii].abyPMKID,
+ 16);
+ pbyBuffer += 16;
+ }
+ }
if (*pwPMKID != 0) {
pRSNIEs->len += (2 + (*pwPMKID)*16);
} else {
diff --git a/drivers/staging/vt6656/wpa2.h b/drivers/staging/vt6656/wpa2.h
index e553b386900..429a910a5c5 100644
--- a/drivers/staging/vt6656/wpa2.h
+++ b/drivers/staging/vt6656/wpa2.h
@@ -45,7 +45,7 @@ typedef struct tagsPMKIDInfo {
} PMKIDInfo, *PPMKIDInfo;
typedef struct tagSPMKIDCache {
- ULONG BSSIDInfoCount;
+ unsigned long BSSIDInfoCount;
PMKIDInfo BSSIDInfo[MAX_PMKID_CACHE];
} SPMKIDCache, *PSPMKIDCache;
@@ -58,21 +58,21 @@ typedef struct tagSPMKIDCache {
/*--------------------- Export Functions --------------------------*/
-VOID
+void
WPA2_ClearRSN (
- IN PKnownBSS pBSSNode
+ PKnownBSS pBSSNode
);
-VOID
+void
WPA2vParseRSN (
- IN PKnownBSS pBSSNode,
- IN PWLAN_IE_RSN pRSN
+ PKnownBSS pBSSNode,
+ PWLAN_IE_RSN pRSN
);
-UINT
+unsigned int
WPA2uSetIEs(
- IN PVOID pMgmtHandle,
- OUT PWLAN_IE_RSN pRSNIEs
+ void *pMgmtHandle,
+ PWLAN_IE_RSN pRSNIEs
);
-#endif // __WPA2_H__
+#endif /* __WPA2_H__ */
diff --git a/drivers/staging/vt6656/wpactl.c b/drivers/staging/vt6656/wpactl.c
index 4555bc0448b..961f583368a 100644
--- a/drivers/staging/vt6656/wpactl.c
+++ b/drivers/staging/vt6656/wpactl.c
@@ -103,7 +103,7 @@ static int wpa_init_wpadev(PSDevice pDevice)
wpadev_priv = netdev_priv(pDevice->wpadev);
*wpadev_priv = *pDevice;
- memcpy(pDevice->wpadev->dev_addr, dev->dev_addr, U_ETHER_ADDR_LEN);
+ memcpy(pDevice->wpadev->dev_addr, dev->dev_addr, ETH_ALEN);
pDevice->wpadev->base_addr = dev->base_addr;
pDevice->wpadev->irq = dev->irq;
pDevice->wpadev->mem_start = dev->mem_start;
@@ -489,7 +489,7 @@ static int wpa_set_disassociate(PSDevice pDevice,
spin_lock_irq(&pDevice->lock);
if (pDevice->bLinkPass) {
if (!memcmp(param->addr, pMgmt->abyCurrBSSID, 6))
- bScheduleCommand((HANDLE)pDevice, WLAN_CMD_DISASSOCIATE, NULL);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_DISASSOCIATE, NULL);
}
spin_unlock_irq(&pDevice->lock);
@@ -513,7 +513,7 @@ static int wpa_set_disassociate(PSDevice pDevice,
*/
static int wpa_set_scan(PSDevice pDevice,
- struct viawget_wpa_param *param)
+ struct viawget_wpa_param *param)
{
int ret = 0;
@@ -531,9 +531,11 @@ memcpy(pItemSSID->abySSID, param->u.scan_req.ssid, param->u.scan_req.ssid_len);
pItemSSID->len = param->u.scan_req.ssid_len;
spin_lock_irq(&pDevice->lock);
- BSSvClearBSSList((HANDLE)pDevice, pDevice->bLinkPass);
- // bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
+ BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass);
+ /* bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL); */
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_BSSID_SCAN,
+ pMgmt->abyDesireSSID);
spin_unlock_irq(&pDevice->lock);
return ret;
@@ -676,13 +678,12 @@ static int wpa_get_scan(PSDevice pDevice,
count++;
};
- pBuf = kmalloc(sizeof(struct viawget_scan_result) * count, (int)GFP_ATOMIC);
+ pBuf = kcalloc(count, sizeof(struct viawget_scan_result), (int)GFP_ATOMIC);
if (pBuf == NULL) {
ret = -ENOMEM;
return ret;
}
- memset(pBuf, 0, sizeof(struct viawget_scan_result) * count);
scan_buf = (struct viawget_scan_result *)pBuf;
pBSS = &(pMgmt->sBSSList[0]);
for (ii = 0, jj = 0; ii < MAX_BSS_NUM ; ii++) {
@@ -886,12 +887,14 @@ static int wpa_set_associate(PSDevice pDevice,
if (pCurr == NULL){
printk("wpa_set_associate---->hidden mode site survey before associate.......\n");
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
+ bScheduleCommand((void *) pDevice,
+ WLAN_CMD_BSSID_SCAN,
+ pMgmt->abyDesireSSID);
};
}
/****************************************************************/
- bScheduleCommand((HANDLE) pDevice, WLAN_CMD_SSID, NULL);
+ bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL);
spin_unlock_irq(&pDevice->lock);
return ret;
@@ -922,7 +925,7 @@ int wpa_ioctl(PSDevice pDevice, struct iw_point *p)
p->length > VIAWGET_WPA_MAX_BUF_SIZE || !p->pointer)
return -EINVAL;
- param = (struct viawget_wpa_param *) kmalloc((int)p->length, (int)GFP_KERNEL);
+ param = kmalloc((int)p->length, (int)GFP_KERNEL);
if (param == NULL)
return -ENOMEM;
diff --git a/drivers/staging/vt6656/wpactl.h b/drivers/staging/vt6656/wpactl.h
index 56179f01311..00c8451ab50 100644
--- a/drivers/staging/vt6656/wpactl.h
+++ b/drivers/staging/vt6656/wpactl.h
@@ -52,9 +52,7 @@ typedef enum { KEY_MGMT_802_1X, KEY_MGMT_PSK, KEY_MGMT_NONE,
#define GENERIC_INFO_ELEM 0xdd
#define RSN_INFO_ELEM 0x30
-
-
-typedef ULONGLONG NDIS_802_11_KEY_RSC;
+typedef unsigned long long NDIS_802_11_KEY_RSC;
/*--------------------- Export Classes ----------------------------*/
@@ -66,7 +64,4 @@ int wpa_set_wpadev(PSDevice pDevice, int val);
int wpa_ioctl(PSDevice pDevice, struct iw_point *p);
int wpa_set_keys(PSDevice pDevice, void *ctx, BOOL fcpfkernel);
-#endif // __WPACL_H__
-
-
-
+#endif /* __WPACL_H__ */
diff --git a/drivers/staging/wavelan/Kconfig b/drivers/staging/wavelan/Kconfig
deleted file mode 100644
index af655668c2a..00000000000
--- a/drivers/staging/wavelan/Kconfig
+++ /dev/null
@@ -1,38 +0,0 @@
-config WAVELAN
- tristate "AT&T/Lucent old WaveLAN & DEC RoamAbout DS ISA support"
- depends on ISA && WLAN
- select WIRELESS_EXT
- select WEXT_SPY
- select WEXT_PRIV
- ---help---
- The Lucent WaveLAN (formerly NCR and AT&T; or DEC RoamAbout DS) is
- a Radio LAN (wireless Ethernet-like Local Area Network) using the
- radio frequencies 900 MHz and 2.4 GHz.
-
- If you want to use an ISA WaveLAN card under Linux, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. Some more specific
- information is contained in
- <file:Documentation/networking/wavelan.txt> and in the source code
- <file:drivers/net/wireless/wavelan.p.h>.
-
- You will also need the wireless tools package available from
- <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
- Please read the man pages contained therein.
-
- To compile this driver as a module, choose M here: the module will be
- called wavelan.
-
-config PCMCIA_WAVELAN
- tristate "AT&T/Lucent old WaveLAN Pcmcia wireless support"
- depends on PCMCIA && WLAN
- select WIRELESS_EXT
- select WEXT_SPY
- select WEXT_PRIV
- help
- Say Y here if you intend to attach an AT&T/Lucent Wavelan PCMCIA
- (PC-card) wireless Ethernet networking card to your computer. This
- driver is for the non-IEEE-802.11 Wavelan cards.
-
- To compile this driver as a module, choose M here: the module will be
- called wavelan_cs. If unsure, say N.
diff --git a/drivers/staging/wavelan/Makefile b/drivers/staging/wavelan/Makefile
deleted file mode 100644
index 1cde17c69a4..00000000000
--- a/drivers/staging/wavelan/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-obj-$(CONFIG_WAVELAN) += wavelan.o
-obj-$(CONFIG_PCMCIA_WAVELAN) += wavelan_cs.o
diff --git a/drivers/staging/wavelan/TODO b/drivers/staging/wavelan/TODO
deleted file mode 100644
index 9bd15a2f6d9..00000000000
--- a/drivers/staging/wavelan/TODO
+++ /dev/null
@@ -1,7 +0,0 @@
-TODO:
- - step up and maintain this driver to ensure that it continues
- to work. Having the hardware for this is pretty much a
- requirement. If this does not happen, the will be removed in
- the 2.6.35 kernel release.
-
-Please send patches to Greg Kroah-Hartman <greg@kroah.com>.
diff --git a/drivers/staging/wavelan/i82586.h b/drivers/staging/wavelan/i82586.h
deleted file mode 100644
index 5f65b250646..00000000000
--- a/drivers/staging/wavelan/i82586.h
+++ /dev/null
@@ -1,413 +0,0 @@
-/*
- * Intel 82586 IEEE 802.3 Ethernet LAN Coprocessor.
- *
- * See:
- * Intel Microcommunications 1991
- * p1-1 to p1-37
- * Intel order No. 231658
- * ISBN 1-55512-119-5
- *
- * Unfortunately, the above chapter mentions neither
- * the System Configuration Pointer (SCP) nor the
- * Intermediate System Configuration Pointer (ISCP),
- * so we probably need to look elsewhere for the
- * whole story -- some recommend the "Intel LAN
- * Components manual" but I have neither a copy
- * nor a full reference. But "elsewhere" may be
- * in the same publication...
- * The description of a later device, the
- * "82596CA High-Performance 32-Bit Local Area Network
- * Coprocessor", (ibid. p1-38 to p1-109) does mention
- * the SCP and ISCP and also has an i82586 compatibility
- * mode. Even more useful is "AP-235 An 82586 Data Link
- * Driver" (ibid. p1-337 to p1-417).
- */
-
-#define I82586_MEMZ (64 * 1024)
-
-#define I82586_SCP_ADDR (I82586_MEMZ - sizeof(scp_t))
-
-#define ADDR_LEN 6
-#define I82586NULL 0xFFFF
-
-#define toff(t,p,f) (unsigned short)((void *)(&((t *)((void *)0 + (p)))->f) - (void *)0)
-
-/*
- * System Configuration Pointer (SCP).
- */
-typedef struct scp_t scp_t;
-struct scp_t
-{
- unsigned short scp_sysbus; /* 82586 bus width: */
-#define SCP_SY_16BBUS (0x0 << 0) /* 16 bits */
-#define SCP_SY_8BBUS (0x1 << 0) /* 8 bits. */
- unsigned short scp_junk[2]; /* Unused */
- unsigned short scp_iscpl; /* lower 16 bits of ISCP_ADDR */
- unsigned short scp_iscph; /* upper 16 bits of ISCP_ADDR */
-};
-
-/*
- * Intermediate System Configuration Pointer (ISCP).
- */
-typedef struct iscp_t iscp_t;
-struct iscp_t
-{
- unsigned short iscp_busy; /* set by CPU before first CA, */
- /* cleared by 82586 after read. */
- unsigned short iscp_offset; /* offset of SCB */
- unsigned short iscp_basel; /* base of SCB */
- unsigned short iscp_baseh; /* " */
-};
-
-/*
- * System Control Block (SCB).
- * The 82586 writes its status to scb_status and then
- * raises an interrupt to alert the CPU.
- * The CPU writes a command to scb_command and
- * then issues a Channel Attention (CA) to alert the 82586.
- */
-typedef struct scb_t scb_t;
-struct scb_t
-{
- unsigned short scb_status; /* Status of 82586 */
-#define SCB_ST_INT (0xF << 12) /* Some of: */
-#define SCB_ST_CX (0x1 << 15) /* Cmd completed */
-#define SCB_ST_FR (0x1 << 14) /* Frame received */
-#define SCB_ST_CNA (0x1 << 13) /* Cmd unit not active */
-#define SCB_ST_RNR (0x1 << 12) /* Rcv unit not ready */
-#define SCB_ST_JUNK0 (0x1 << 11) /* 0 */
-#define SCB_ST_CUS (0x7 << 8) /* Cmd unit status */
-#define SCB_ST_CUS_IDLE (0 << 8) /* Idle */
-#define SCB_ST_CUS_SUSP (1 << 8) /* Suspended */
-#define SCB_ST_CUS_ACTV (2 << 8) /* Active */
-#define SCB_ST_JUNK1 (0x1 << 7) /* 0 */
-#define SCB_ST_RUS (0x7 << 4) /* Rcv unit status */
-#define SCB_ST_RUS_IDLE (0 << 4) /* Idle */
-#define SCB_ST_RUS_SUSP (1 << 4) /* Suspended */
-#define SCB_ST_RUS_NRES (2 << 4) /* No resources */
-#define SCB_ST_RUS_RDY (4 << 4) /* Ready */
- unsigned short scb_command; /* Next command */
-#define SCB_CMD_ACK_CX (0x1 << 15) /* Ack cmd completion */
-#define SCB_CMD_ACK_FR (0x1 << 14) /* Ack frame received */
-#define SCB_CMD_ACK_CNA (0x1 << 13) /* Ack CU not active */
-#define SCB_CMD_ACK_RNR (0x1 << 12) /* Ack RU not ready */
-#define SCB_CMD_JUNKX (0x1 << 11) /* Unused */
-#define SCB_CMD_CUC (0x7 << 8) /* Command Unit command */
-#define SCB_CMD_CUC_NOP (0 << 8) /* Nop */
-#define SCB_CMD_CUC_GO (1 << 8) /* Start cbl_offset */
-#define SCB_CMD_CUC_RES (2 << 8) /* Resume execution */
-#define SCB_CMD_CUC_SUS (3 << 8) /* Suspend " */
-#define SCB_CMD_CUC_ABT (4 << 8) /* Abort " */
-#define SCB_CMD_RESET (0x1 << 7) /* Reset chip (hardware) */
-#define SCB_CMD_RUC (0x7 << 4) /* Receive Unit command */
-#define SCB_CMD_RUC_NOP (0 << 4) /* Nop */
-#define SCB_CMD_RUC_GO (1 << 4) /* Start rfa_offset */
-#define SCB_CMD_RUC_RES (2 << 4) /* Resume reception */
-#define SCB_CMD_RUC_SUS (3 << 4) /* Suspend " */
-#define SCB_CMD_RUC_ABT (4 << 4) /* Abort " */
- unsigned short scb_cbl_offset; /* Offset of first command unit */
- /* Action Command */
- unsigned short scb_rfa_offset; /* Offset of first Receive */
- /* Frame Descriptor in the */
- /* Receive Frame Area */
- unsigned short scb_crcerrs; /* Properly aligned frames */
- /* received with a CRC error */
- unsigned short scb_alnerrs; /* Misaligned frames received */
- /* with a CRC error */
- unsigned short scb_rscerrs; /* Frames lost due to no space */
- unsigned short scb_ovrnerrs; /* Frames lost due to slow bus */
-};
-
-#define scboff(p,f) toff(scb_t, p, f)
-
-/*
- * The eight Action Commands.
- */
-typedef enum acmd_e acmd_e;
-enum acmd_e
-{
- acmd_nop = 0, /* Do nothing */
- acmd_ia_setup = 1, /* Load an (ethernet) address into the */
- /* 82586 */
- acmd_configure = 2, /* Update the 82586 operating parameters */
- acmd_mc_setup = 3, /* Load a list of (ethernet) multicast */
- /* addresses into the 82586 */
- acmd_transmit = 4, /* Transmit a frame */
- acmd_tdr = 5, /* Perform a Time Domain Reflectometer */
- /* test on the serial link */
- acmd_dump = 6, /* Copy 82586 registers to memory */
- acmd_diagnose = 7, /* Run an internal self test */
-};
-
-/*
- * Generic Action Command header.
- */
-typedef struct ach_t ach_t;
-struct ach_t
-{
- unsigned short ac_status; /* Command status: */
-#define AC_SFLD_C (0x1 << 15) /* Command completed */
-#define AC_SFLD_B (0x1 << 14) /* Busy executing */
-#define AC_SFLD_OK (0x1 << 13) /* Completed error free */
-#define AC_SFLD_A (0x1 << 12) /* Command aborted */
-#define AC_SFLD_FAIL (0x1 << 11) /* Selftest failed */
-#define AC_SFLD_S10 (0x1 << 10) /* No carrier sense */
- /* during transmission */
-#define AC_SFLD_S9 (0x1 << 9) /* Tx unsuccessful: */
- /* (stopped) lost CTS */
-#define AC_SFLD_S8 (0x1 << 8) /* Tx unsuccessful: */
- /* (stopped) slow DMA */
-#define AC_SFLD_S7 (0x1 << 7) /* Tx deferred: */
- /* other link traffic */
-#define AC_SFLD_S6 (0x1 << 6) /* Heart Beat: collision */
- /* detect after last tx */
-#define AC_SFLD_S5 (0x1 << 5) /* Tx stopped: */
- /* excessive collisions */
-#define AC_SFLD_MAXCOL (0xF << 0) /* Collision count */
- unsigned short ac_command; /* Command specifier: */
-#define AC_CFLD_EL (0x1 << 15) /* End of command list */
-#define AC_CFLD_S (0x1 << 14) /* Suspend on completion */
-#define AC_CFLD_I (0x1 << 13) /* Interrupt on completion */
-#define AC_CFLD_CMD (0x7 << 0) /* acmd_e */
- unsigned short ac_link; /* Next Action Command */
-};
-
-#define acoff(p,f) toff(ach_t, p, f)
-
-/*
- * The Nop Action Command.
- */
-typedef struct ac_nop_t ac_nop_t;
-struct ac_nop_t
-{
- ach_t nop_h;
-};
-
-/*
- * The IA-Setup Action Command.
- */
-typedef struct ac_ias_t ac_ias_t;
-struct ac_ias_t
-{
- ach_t ias_h;
- unsigned char ias_addr[ADDR_LEN]; /* The (ethernet) address */
-};
-
-/*
- * The Configure Action Command.
- */
-typedef struct ac_cfg_t ac_cfg_t;
-struct ac_cfg_t
-{
- ach_t cfg_h;
- unsigned char cfg_byte_cnt; /* Size foll data: 4-12 */
-#define AC_CFG_BYTE_CNT(v) (((v) & 0xF) << 0)
- unsigned char cfg_fifolim; /* FIFO threshold */
-#define AC_CFG_FIFOLIM(v) (((v) & 0xF) << 0)
- unsigned char cfg_byte8;
-#define AC_CFG_SAV_BF(v) (((v) & 0x1) << 7) /* Save rxd bad frames */
-#define AC_CFG_SRDY(v) (((v) & 0x1) << 6) /* SRDY/ARDY pin means */
- /* external sync. */
- unsigned char cfg_byte9;
-#define AC_CFG_ELPBCK(v) (((v) & 0x1) << 7) /* External loopback */
-#define AC_CFG_ILPBCK(v) (((v) & 0x1) << 6) /* Internal loopback */
-#define AC_CFG_PRELEN(v) (((v) & 0x3) << 4) /* Preamble length */
-#define AC_CFG_PLEN_2 0 /* 2 bytes */
-#define AC_CFG_PLEN_4 1 /* 4 bytes */
-#define AC_CFG_PLEN_8 2 /* 8 bytes */
-#define AC_CFG_PLEN_16 3 /* 16 bytes */
-#define AC_CFG_ALOC(v) (((v) & 0x1) << 3) /* Addr/len data is */
- /* explicit in buffers */
-#define AC_CFG_ADDRLEN(v) (((v) & 0x7) << 0) /* Bytes per address */
- unsigned char cfg_byte10;
-#define AC_CFG_BOFMET(v) (((v) & 0x1) << 7) /* Use alternate expo. */
- /* backoff method */
-#define AC_CFG_ACR(v) (((v) & 0x7) << 4) /* Accelerated cont. res. */
-#define AC_CFG_LINPRIO(v) (((v) & 0x7) << 0) /* Linear priority */
- unsigned char cfg_ifs; /* Interframe spacing */
- unsigned char cfg_slotl; /* Slot time (low byte) */
- unsigned char cfg_byte13;
-#define AC_CFG_RETRYNUM(v) (((v) & 0xF) << 4) /* Max. collision retry */
-#define AC_CFG_SLTTMHI(v) (((v) & 0x7) << 0) /* Slot time (high bits) */
- unsigned char cfg_byte14;
-#define AC_CFG_FLGPAD(v) (((v) & 0x1) << 7) /* Pad with HDLC flags */
-#define AC_CFG_BTSTF(v) (((v) & 0x1) << 6) /* Do HDLC bitstuffing */
-#define AC_CFG_CRC16(v) (((v) & 0x1) << 5) /* 16 bit CCITT CRC */
-#define AC_CFG_NCRC(v) (((v) & 0x1) << 4) /* Insert no CRC */
-#define AC_CFG_TNCRS(v) (((v) & 0x1) << 3) /* Tx even if no carrier */
-#define AC_CFG_MANCH(v) (((v) & 0x1) << 2) /* Manchester coding */
-#define AC_CFG_BCDIS(v) (((v) & 0x1) << 1) /* Disable broadcast */
-#define AC_CFG_PRM(v) (((v) & 0x1) << 0) /* Promiscuous mode */
- unsigned char cfg_byte15;
-#define AC_CFG_ICDS(v) (((v) & 0x1) << 7) /* Internal collision */
- /* detect source */
-#define AC_CFG_CDTF(v) (((v) & 0x7) << 4) /* Collision detect */
- /* filter in bit times */
-#define AC_CFG_ICSS(v) (((v) & 0x1) << 3) /* Internal carrier */
- /* sense source */
-#define AC_CFG_CSTF(v) (((v) & 0x7) << 0) /* Carrier sense */
- /* filter in bit times */
- unsigned short cfg_min_frm_len;
-#define AC_CFG_MNFRM(v) (((v) & 0xFF) << 0) /* Min. bytes/frame (<= 255) */
-};
-
-/*
- * The MC-Setup Action Command.
- */
-typedef struct ac_mcs_t ac_mcs_t;
-struct ac_mcs_t
-{
- ach_t mcs_h;
- unsigned short mcs_cnt; /* No. of bytes of MC addresses */
-#if 0
- unsigned char mcs_data[ADDR_LEN]; /* The first MC address .. */
- ...
-#endif
-};
-
-#define I82586_MAX_MULTICAST_ADDRESSES 128 /* Hardware hashed filter */
-
-/*
- * The Transmit Action Command.
- */
-typedef struct ac_tx_t ac_tx_t;
-struct ac_tx_t
-{
- ach_t tx_h;
- unsigned short tx_tbd_offset; /* Address of list of buffers. */
-#if 0
-Linux packets are passed down with the destination MAC address
-and length/type field already prepended to the data,
-so we do not need to insert it. Consistent with this
-we must also set the AC_CFG_ALOC(..) flag during the
-ac_cfg_t action command.
- unsigned char tx_addr[ADDR_LEN]; /* The frame dest. address */
- unsigned short tx_length; /* The frame length */
-#endif /* 0 */
-};
-
-/*
- * The Time Domain Reflectometer Action Command.
- */
-typedef struct ac_tdr_t ac_tdr_t;
-struct ac_tdr_t
-{
- ach_t tdr_h;
- unsigned short tdr_result; /* Result. */
-#define AC_TDR_LNK_OK (0x1 << 15) /* No link problem */
-#define AC_TDR_XCVR_PRB (0x1 << 14) /* Txcvr cable problem */
-#define AC_TDR_ET_OPN (0x1 << 13) /* Open on the link */
-#define AC_TDR_ET_SRT (0x1 << 12) /* Short on the link */
-#define AC_TDR_TIME (0x7FF << 0) /* Distance to problem */
- /* site in transmit */
- /* clock cycles */
-};
-
-/*
- * The Dump Action Command.
- */
-typedef struct ac_dmp_t ac_dmp_t;
-struct ac_dmp_t
-{
- ach_t dmp_h;
- unsigned short dmp_offset; /* Result. */
-};
-
-/*
- * Size of the result of the dump command.
- */
-#define DUMPBYTES 170
-
-/*
- * The Diagnose Action Command.
- */
-typedef struct ac_dgn_t ac_dgn_t;
-struct ac_dgn_t
-{
- ach_t dgn_h;
-};
-
-/*
- * Transmit Buffer Descriptor (TBD).
- */
-typedef struct tbd_t tbd_t;
-struct tbd_t
-{
- unsigned short tbd_status; /* Written by the CPU */
-#define TBD_STATUS_EOF (0x1 << 15) /* This TBD is the */
- /* last for this frame */
-#define TBD_STATUS_ACNT (0x3FFF << 0) /* Actual count of data */
- /* bytes in this buffer */
- unsigned short tbd_next_bd_offset; /* Next in list */
- unsigned short tbd_bufl; /* Buffer address (low) */
- unsigned short tbd_bufh; /* " " (high) */
-};
-
-/*
- * Receive Buffer Descriptor (RBD).
- */
-typedef struct rbd_t rbd_t;
-struct rbd_t
-{
- unsigned short rbd_status; /* Written by the 82586 */
-#define RBD_STATUS_EOF (0x1 << 15) /* This RBD is the */
- /* last for this frame */
-#define RBD_STATUS_F (0x1 << 14) /* ACNT field is valid */
-#define RBD_STATUS_ACNT (0x3FFF << 0) /* Actual no. of data */
- /* bytes in this buffer */
- unsigned short rbd_next_rbd_offset; /* Next rbd in list */
- unsigned short rbd_bufl; /* Data pointer (low) */
- unsigned short rbd_bufh; /* " " (high) */
- unsigned short rbd_el_size; /* EL+Data buf. size */
-#define RBD_EL (0x1 << 15) /* This BD is the */
- /* last in the list */
-#define RBD_SIZE (0x3FFF << 0) /* No. of bytes the */
- /* buffer can hold */
-};
-
-#define rbdoff(p,f) toff(rbd_t, p, f)
-
-/*
- * Frame Descriptor (FD).
- */
-typedef struct fd_t fd_t;
-struct fd_t
-{
- unsigned short fd_status; /* Written by the 82586 */
-#define FD_STATUS_C (0x1 << 15) /* Completed storing frame */
-#define FD_STATUS_B (0x1 << 14) /* FD was consumed by RU */
-#define FD_STATUS_OK (0x1 << 13) /* Frame rxd successfully */
-#define FD_STATUS_S11 (0x1 << 11) /* CRC error */
-#define FD_STATUS_S10 (0x1 << 10) /* Alignment error */
-#define FD_STATUS_S9 (0x1 << 9) /* Ran out of resources */
-#define FD_STATUS_S8 (0x1 << 8) /* Rx DMA overrun */
-#define FD_STATUS_S7 (0x1 << 7) /* Frame too short */
-#define FD_STATUS_S6 (0x1 << 6) /* No EOF flag */
- unsigned short fd_command; /* Command */
-#define FD_COMMAND_EL (0x1 << 15) /* Last FD in list */
-#define FD_COMMAND_S (0x1 << 14) /* Suspend RU after rx */
- unsigned short fd_link_offset; /* Next FD */
- unsigned short fd_rbd_offset; /* First RBD (data) */
- /* Prepared by CPU, */
- /* updated by 82586 */
-#if 0
-I think the rest is unused since we
-have set AC_CFG_ALOC(..). However, just
-in case, we leave the space.
-#endif /* 0 */
- unsigned char fd_dest[ADDR_LEN]; /* Destination address */
- /* Written by 82586 */
- unsigned char fd_src[ADDR_LEN]; /* Source address */
- /* Written by 82586 */
- unsigned short fd_length; /* Frame length or type */
- /* Written by 82586 */
-};
-
-#define fdoff(p,f) toff(fd_t, p, f)
-
-/*
- * This software may only be used and distributed
- * according to the terms of the GNU General Public License.
- *
- * For more details, see wavelan.c.
- */
diff --git a/drivers/staging/wavelan/wavelan.c b/drivers/staging/wavelan/wavelan.c
deleted file mode 100644
index 54ca63196fd..00000000000
--- a/drivers/staging/wavelan/wavelan.c
+++ /dev/null
@@ -1,4383 +0,0 @@
-/*
- * WaveLAN ISA driver
- *
- * Jean II - HPLB '96
- *
- * Reorganisation and extension of the driver.
- * Original copyright follows (also see the end of this file).
- * See wavelan.p.h for details.
- *
- *
- *
- * AT&T GIS (nee NCR) WaveLAN card:
- * An Ethernet-like radio transceiver
- * controlled by an Intel 82586 coprocessor.
- */
-
-#include "wavelan.p.h" /* Private header */
-
-/************************* MISC SUBROUTINES **************************/
-/*
- * Subroutines which won't fit in one of the following category
- * (WaveLAN modem or i82586)
- */
-
-/*------------------------------------------------------------------*/
-/*
- * Translate irq number to PSA irq parameter
- */
-static u8 wv_irq_to_psa(int irq)
-{
- if (irq < 0 || irq >= ARRAY_SIZE(irqvals))
- return 0;
-
- return irqvals[irq];
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Translate PSA irq parameter to irq number
- */
-static int __init wv_psa_to_irq(u8 irqval)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(irqvals); i++)
- if (irqvals[i] == irqval)
- return i;
-
- return -1;
-}
-
-/********************* HOST ADAPTER SUBROUTINES *********************/
-/*
- * Useful subroutines to manage the WaveLAN ISA interface
- *
- * One major difference with the PCMCIA hardware (except the port mapping)
- * is that we have to keep the state of the Host Control Register
- * because of the interrupt enable & bus size flags.
- */
-
-/*------------------------------------------------------------------*/
-/*
- * Read from card's Host Adaptor Status Register.
- */
-static inline u16 hasr_read(unsigned long ioaddr)
-{
- return (inw(HASR(ioaddr)));
-} /* hasr_read */
-
-/*------------------------------------------------------------------*/
-/*
- * Write to card's Host Adapter Command Register.
- */
-static inline void hacr_write(unsigned long ioaddr, u16 hacr)
-{
- outw(hacr, HACR(ioaddr));
-} /* hacr_write */
-
-/*------------------------------------------------------------------*/
-/*
- * Write to card's Host Adapter Command Register. Include a delay for
- * those times when it is needed.
- */
-static void hacr_write_slow(unsigned long ioaddr, u16 hacr)
-{
- hacr_write(ioaddr, hacr);
- /* delay might only be needed sometimes */
- mdelay(1);
-} /* hacr_write_slow */
-
-/*------------------------------------------------------------------*/
-/*
- * Set the channel attention bit.
- */
-static inline void set_chan_attn(unsigned long ioaddr, u16 hacr)
-{
- hacr_write(ioaddr, hacr | HACR_CA);
-} /* set_chan_attn */
-
-/*------------------------------------------------------------------*/
-/*
- * Reset, and then set host adaptor into default mode.
- */
-static inline void wv_hacr_reset(unsigned long ioaddr)
-{
- hacr_write_slow(ioaddr, HACR_RESET);
- hacr_write(ioaddr, HACR_DEFAULT);
-} /* wv_hacr_reset */
-
-/*------------------------------------------------------------------*/
-/*
- * Set the I/O transfer over the ISA bus to 8-bit mode
- */
-static inline void wv_16_off(unsigned long ioaddr, u16 hacr)
-{
- hacr &= ~HACR_16BITS;
- hacr_write(ioaddr, hacr);
-} /* wv_16_off */
-
-/*------------------------------------------------------------------*/
-/*
- * Set the I/O transfer over the ISA bus to 8-bit mode
- */
-static inline void wv_16_on(unsigned long ioaddr, u16 hacr)
-{
- hacr |= HACR_16BITS;
- hacr_write(ioaddr, hacr);
-} /* wv_16_on */
-
-/*------------------------------------------------------------------*/
-/*
- * Disable interrupts on the WaveLAN hardware.
- * (called by wv_82586_stop())
- */
-static inline void wv_ints_off(struct net_device * dev)
-{
- net_local *lp = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
-
- lp->hacr &= ~HACR_INTRON;
- hacr_write(ioaddr, lp->hacr);
-} /* wv_ints_off */
-
-/*------------------------------------------------------------------*/
-/*
- * Enable interrupts on the WaveLAN hardware.
- * (called by wv_hw_reset())
- */
-static inline void wv_ints_on(struct net_device * dev)
-{
- net_local *lp = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
-
- lp->hacr |= HACR_INTRON;
- hacr_write(ioaddr, lp->hacr);
-} /* wv_ints_on */
-
-/******************* MODEM MANAGEMENT SUBROUTINES *******************/
-/*
- * Useful subroutines to manage the modem of the WaveLAN
- */
-
-/*------------------------------------------------------------------*/
-/*
- * Read the Parameter Storage Area from the WaveLAN card's memory
- */
-/*
- * Read bytes from the PSA.
- */
-static void psa_read(unsigned long ioaddr, u16 hacr, int o, /* offset in PSA */
- u8 * b, /* buffer to fill */
- int n)
-{ /* size to read */
- wv_16_off(ioaddr, hacr);
-
- while (n-- > 0) {
- outw(o, PIOR2(ioaddr));
- o++;
- *b++ = inb(PIOP2(ioaddr));
- }
-
- wv_16_on(ioaddr, hacr);
-} /* psa_read */
-
-/*------------------------------------------------------------------*/
-/*
- * Write the Parameter Storage Area to the WaveLAN card's memory.
- */
-static void psa_write(unsigned long ioaddr, u16 hacr, int o, /* Offset in PSA */
- u8 * b, /* Buffer in memory */
- int n)
-{ /* Length of buffer */
- int count = 0;
-
- wv_16_off(ioaddr, hacr);
-
- while (n-- > 0) {
- outw(o, PIOR2(ioaddr));
- o++;
-
- outb(*b, PIOP2(ioaddr));
- b++;
-
- /* Wait for the memory to finish its write cycle */
- count = 0;
- while ((count++ < 100) &&
- (hasr_read(ioaddr) & HASR_PSA_BUSY)) mdelay(1);
- }
-
- wv_16_on(ioaddr, hacr);
-} /* psa_write */
-
-#ifdef SET_PSA_CRC
-/*------------------------------------------------------------------*/
-/*
- * Calculate the PSA CRC
- * Thanks to Valster, Nico <NVALSTER@wcnd.nl.lucent.com> for the code
- * NOTE: By specifying a length including the CRC position the
- * returned value should be zero. (i.e. a correct checksum in the PSA)
- *
- * The Windows drivers don't use the CRC, but the AP and the PtP tool
- * depend on it.
- */
-static u16 psa_crc(u8 * psa, /* The PSA */
- int size)
-{ /* Number of short for CRC */
- int byte_cnt; /* Loop on the PSA */
- u16 crc_bytes = 0; /* Data in the PSA */
- int bit_cnt; /* Loop on the bits of the short */
-
- for (byte_cnt = 0; byte_cnt < size; byte_cnt++) {
- crc_bytes ^= psa[byte_cnt]; /* Its an xor */
-
- for (bit_cnt = 1; bit_cnt < 9; bit_cnt++) {
- if (crc_bytes & 0x0001)
- crc_bytes = (crc_bytes >> 1) ^ 0xA001;
- else
- crc_bytes >>= 1;
- }
- }
-
- return crc_bytes;
-} /* psa_crc */
-#endif /* SET_PSA_CRC */
-
-/*------------------------------------------------------------------*/
-/*
- * update the checksum field in the Wavelan's PSA
- */
-static void update_psa_checksum(struct net_device * dev, unsigned long ioaddr, u16 hacr)
-{
-#ifdef SET_PSA_CRC
- psa_t psa;
- u16 crc;
-
- /* read the parameter storage area */
- psa_read(ioaddr, hacr, 0, (unsigned char *) &psa, sizeof(psa));
-
- /* update the checksum */
- crc = psa_crc((unsigned char *) &psa,
- sizeof(psa) - sizeof(psa.psa_crc[0]) -
- sizeof(psa.psa_crc[1])
- - sizeof(psa.psa_crc_status));
-
- psa.psa_crc[0] = crc & 0xFF;
- psa.psa_crc[1] = (crc & 0xFF00) >> 8;
-
- /* Write it ! */
- psa_write(ioaddr, hacr, (char *) &psa.psa_crc - (char *) &psa,
- (unsigned char *) &psa.psa_crc, 2);
-
-#ifdef DEBUG_IOCTL_INFO
- printk(KERN_DEBUG "%s: update_psa_checksum(): crc = 0x%02x%02x\n",
- dev->name, psa.psa_crc[0], psa.psa_crc[1]);
-
- /* Check again (luxury !) */
- crc = psa_crc((unsigned char *) &psa,
- sizeof(psa) - sizeof(psa.psa_crc_status));
-
- if (crc != 0)
- printk(KERN_WARNING
- "%s: update_psa_checksum(): CRC does not agree with PSA data (even after recalculating)\n",
- dev->name);
-#endif /* DEBUG_IOCTL_INFO */
-#endif /* SET_PSA_CRC */
-} /* update_psa_checksum */
-
-/*------------------------------------------------------------------*/
-/*
- * Write 1 byte to the MMC.
- */
-static void mmc_out(unsigned long ioaddr, u16 o, u8 d)
-{
- int count = 0;
-
- /* Wait for MMC to go idle */
- while ((count++ < 100) && (inw(HASR(ioaddr)) & HASR_MMC_BUSY))
- udelay(10);
-
- outw((u16) (((u16) d << 8) | (o << 1) | 1), MMCR(ioaddr));
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Routine to write bytes to the Modem Management Controller.
- * We start at the end because it is the way it should be!
- */
-static void mmc_write(unsigned long ioaddr, u8 o, u8 * b, int n)
-{
- o += n;
- b += n;
-
- while (n-- > 0)
- mmc_out(ioaddr, --o, *(--b));
-} /* mmc_write */
-
-/*------------------------------------------------------------------*/
-/*
- * Read a byte from the MMC.
- * Optimised version for 1 byte, avoid using memory.
- */
-static u8 mmc_in(unsigned long ioaddr, u16 o)
-{
- int count = 0;
-
- while ((count++ < 100) && (inw(HASR(ioaddr)) & HASR_MMC_BUSY))
- udelay(10);
- outw(o << 1, MMCR(ioaddr));
-
- while ((count++ < 100) && (inw(HASR(ioaddr)) & HASR_MMC_BUSY))
- udelay(10);
- return (u8) (inw(MMCR(ioaddr)) >> 8);
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Routine to read bytes from the Modem Management Controller.
- * The implementation is complicated by a lack of address lines,
- * which prevents decoding of the low-order bit.
- * (code has just been moved in the above function)
- * We start at the end because it is the way it should be!
- */
-static inline void mmc_read(unsigned long ioaddr, u8 o, u8 * b, int n)
-{
- o += n;
- b += n;
-
- while (n-- > 0)
- *(--b) = mmc_in(ioaddr, --o);
-} /* mmc_read */
-
-/*------------------------------------------------------------------*/
-/*
- * Get the type of encryption available.
- */
-static inline int mmc_encr(unsigned long ioaddr)
-{ /* I/O port of the card */
- int temp;
-
- temp = mmc_in(ioaddr, mmroff(0, mmr_des_avail));
- if ((temp != MMR_DES_AVAIL_DES) && (temp != MMR_DES_AVAIL_AES))
- return 0;
- else
- return temp;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wait for the frequency EEPROM to complete a command.
- * I hope this one will be optimally inlined.
- */
-static inline void fee_wait(unsigned long ioaddr, /* I/O port of the card */
- int delay, /* Base delay to wait for */
- int number)
-{ /* Number of time to wait */
- int count = 0; /* Wait only a limited time */
-
- while ((count++ < number) &&
- (mmc_in(ioaddr, mmroff(0, mmr_fee_status)) &
- MMR_FEE_STATUS_BUSY)) udelay(delay);
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Read bytes from the Frequency EEPROM (frequency select cards).
- */
-static void fee_read(unsigned long ioaddr, /* I/O port of the card */
- u16 o, /* destination offset */
- u16 * b, /* data buffer */
- int n)
-{ /* number of registers */
- b += n; /* Position at the end of the area */
-
- /* Write the address */
- mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), o + n - 1);
-
- /* Loop on all buffer */
- while (n-- > 0) {
- /* Write the read command */
- mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl),
- MMW_FEE_CTRL_READ);
-
- /* Wait until EEPROM is ready (should be quick). */
- fee_wait(ioaddr, 10, 100);
-
- /* Read the value. */
- *--b = ((mmc_in(ioaddr, mmroff(0, mmr_fee_data_h)) << 8) |
- mmc_in(ioaddr, mmroff(0, mmr_fee_data_l)));
- }
-}
-
-
-/*------------------------------------------------------------------*/
-/*
- * Write bytes from the Frequency EEPROM (frequency select cards).
- * This is a bit complicated, because the frequency EEPROM has to
- * be unprotected and the write enabled.
- * Jean II
- */
-static void fee_write(unsigned long ioaddr, /* I/O port of the card */
- u16 o, /* destination offset */
- u16 * b, /* data buffer */
- int n)
-{ /* number of registers */
- b += n; /* Position at the end of the area. */
-
-#ifdef EEPROM_IS_PROTECTED /* disabled */
-#ifdef DOESNT_SEEM_TO_WORK /* disabled */
- /* Ask to read the protected register */
- mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PRREAD);
-
- fee_wait(ioaddr, 10, 100);
-
- /* Read the protected register. */
- printk("Protected 2: %02X-%02X\n",
- mmc_in(ioaddr, mmroff(0, mmr_fee_data_h)),
- mmc_in(ioaddr, mmroff(0, mmr_fee_data_l)));
-#endif /* DOESNT_SEEM_TO_WORK */
-
- /* Enable protected register. */
- mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), MMW_FEE_ADDR_EN);
- mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PREN);
-
- fee_wait(ioaddr, 10, 100);
-
- /* Unprotect area. */
- mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), o + n);
- mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PRWRITE);
-#ifdef DOESNT_SEEM_TO_WORK /* disabled */
- /* or use: */
- mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PRCLEAR);
-#endif /* DOESNT_SEEM_TO_WORK */
-
- fee_wait(ioaddr, 10, 100);
-#endif /* EEPROM_IS_PROTECTED */
-
- /* Write enable. */
- mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), MMW_FEE_ADDR_EN);
- mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_WREN);
-
- fee_wait(ioaddr, 10, 100);
-
- /* Write the EEPROM address. */
- mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), o + n - 1);
-
- /* Loop on all buffer */
- while (n-- > 0) {
- /* Write the value. */
- mmc_out(ioaddr, mmwoff(0, mmw_fee_data_h), (*--b) >> 8);
- mmc_out(ioaddr, mmwoff(0, mmw_fee_data_l), *b & 0xFF);
-
- /* Write the write command. */
- mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl),
- MMW_FEE_CTRL_WRITE);
-
- /* WaveLAN documentation says to wait at least 10 ms for EEBUSY = 0 */
- mdelay(10);
- fee_wait(ioaddr, 10, 100);
- }
-
- /* Write disable. */
- mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), MMW_FEE_ADDR_DS);
- mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_WDS);
-
- fee_wait(ioaddr, 10, 100);
-
-#ifdef EEPROM_IS_PROTECTED /* disabled */
- /* Reprotect EEPROM. */
- mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), 0x00);
- mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PRWRITE);
-
- fee_wait(ioaddr, 10, 100);
-#endif /* EEPROM_IS_PROTECTED */
-}
-
-/************************ I82586 SUBROUTINES *************************/
-/*
- * Useful subroutines to manage the Ethernet controller
- */
-
-/*------------------------------------------------------------------*/
-/*
- * Read bytes from the on-board RAM.
- * Why does inlining this function make it fail?
- */
-static /*inline */ void obram_read(unsigned long ioaddr,
- u16 o, u8 * b, int n)
-{
- outw(o, PIOR1(ioaddr));
- insw(PIOP1(ioaddr), (unsigned short *) b, (n + 1) >> 1);
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Write bytes to the on-board RAM.
- */
-static inline void obram_write(unsigned long ioaddr, u16 o, u8 * b, int n)
-{
- outw(o, PIOR1(ioaddr));
- outsw(PIOP1(ioaddr), (unsigned short *) b, (n + 1) >> 1);
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Acknowledge the reading of the status issued by the i82586.
- */
-static void wv_ack(struct net_device * dev)
-{
- net_local *lp = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
- u16 scb_cs;
- int i;
-
- obram_read(ioaddr, scboff(OFFSET_SCB, scb_status),
- (unsigned char *) &scb_cs, sizeof(scb_cs));
- scb_cs &= SCB_ST_INT;
-
- if (scb_cs == 0)
- return;
-
- obram_write(ioaddr, scboff(OFFSET_SCB, scb_command),
- (unsigned char *) &scb_cs, sizeof(scb_cs));
-
- set_chan_attn(ioaddr, lp->hacr);
-
- for (i = 1000; i > 0; i--) {
- obram_read(ioaddr, scboff(OFFSET_SCB, scb_command),
- (unsigned char *) &scb_cs, sizeof(scb_cs));
- if (scb_cs == 0)
- break;
-
- udelay(10);
- }
- udelay(100);
-
-#ifdef DEBUG_CONFIG_ERROR
- if (i <= 0)
- printk(KERN_INFO
- "%s: wv_ack(): board not accepting command.\n",
- dev->name);
-#endif
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Set channel attention bit and busy wait until command has
- * completed, then acknowledge completion of the command.
- */
-static int wv_synchronous_cmd(struct net_device * dev, const char *str)
-{
- net_local *lp = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
- u16 scb_cmd;
- ach_t cb;
- int i;
-
- scb_cmd = SCB_CMD_CUC & SCB_CMD_CUC_GO;
- obram_write(ioaddr, scboff(OFFSET_SCB, scb_command),
- (unsigned char *) &scb_cmd, sizeof(scb_cmd));
-
- set_chan_attn(ioaddr, lp->hacr);
-
- for (i = 1000; i > 0; i--) {
- obram_read(ioaddr, OFFSET_CU, (unsigned char *) &cb,
- sizeof(cb));
- if (cb.ac_status & AC_SFLD_C)
- break;
-
- udelay(10);
- }
- udelay(100);
-
- if (i <= 0 || !(cb.ac_status & AC_SFLD_OK)) {
-#ifdef DEBUG_CONFIG_ERROR
- printk(KERN_INFO "%s: %s failed; status = 0x%x\n",
- dev->name, str, cb.ac_status);
-#endif
-#ifdef DEBUG_I82586_SHOW
- wv_scb_show(ioaddr);
-#endif
- return -1;
- }
-
- /* Ack the status */
- wv_ack(dev);
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Configuration commands completion interrupt.
- * Check if done, and if OK.
- */
-static int
-wv_config_complete(struct net_device * dev, unsigned long ioaddr, net_local * lp)
-{
- unsigned short mcs_addr;
- unsigned short status;
- int ret;
-
-#ifdef DEBUG_INTERRUPT_TRACE
- printk(KERN_DEBUG "%s: ->wv_config_complete()\n", dev->name);
-#endif
-
- mcs_addr = lp->tx_first_in_use + sizeof(ac_tx_t) + sizeof(ac_nop_t)
- + sizeof(tbd_t) + sizeof(ac_cfg_t) + sizeof(ac_ias_t);
-
- /* Read the status of the last command (set mc list). */
- obram_read(ioaddr, acoff(mcs_addr, ac_status),
- (unsigned char *) &status, sizeof(status));
-
- /* If not completed -> exit */
- if ((status & AC_SFLD_C) == 0)
- ret = 0; /* Not ready to be scrapped */
- else {
-#ifdef DEBUG_CONFIG_ERROR
- unsigned short cfg_addr;
- unsigned short ias_addr;
-
- /* Check mc_config command */
- if ((status & AC_SFLD_OK) != AC_SFLD_OK)
- printk(KERN_INFO
- "%s: wv_config_complete(): set_multicast_address failed; status = 0x%x\n",
- dev->name, status);
-
- /* check ia-config command */
- ias_addr = mcs_addr - sizeof(ac_ias_t);
- obram_read(ioaddr, acoff(ias_addr, ac_status),
- (unsigned char *) &status, sizeof(status));
- if ((status & AC_SFLD_OK) != AC_SFLD_OK)
- printk(KERN_INFO
- "%s: wv_config_complete(): set_MAC_address failed; status = 0x%x\n",
- dev->name, status);
-
- /* Check config command. */
- cfg_addr = ias_addr - sizeof(ac_cfg_t);
- obram_read(ioaddr, acoff(cfg_addr, ac_status),
- (unsigned char *) &status, sizeof(status));
- if ((status & AC_SFLD_OK) != AC_SFLD_OK)
- printk(KERN_INFO
- "%s: wv_config_complete(): configure failed; status = 0x%x\n",
- dev->name, status);
-#endif /* DEBUG_CONFIG_ERROR */
-
- ret = 1; /* Ready to be scrapped */
- }
-
-#ifdef DEBUG_INTERRUPT_TRACE
- printk(KERN_DEBUG "%s: <-wv_config_complete() - %d\n", dev->name,
- ret);
-#endif
- return ret;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Command completion interrupt.
- * Reclaim as many freed tx buffers as we can.
- * (called in wavelan_interrupt()).
- * Note : the spinlock is already grabbed for us.
- */
-static int wv_complete(struct net_device * dev, unsigned long ioaddr, net_local * lp)
-{
- int nreaped = 0;
-
-#ifdef DEBUG_INTERRUPT_TRACE
- printk(KERN_DEBUG "%s: ->wv_complete()\n", dev->name);
-#endif
-
- /* Loop on all the transmit buffers */
- while (lp->tx_first_in_use != I82586NULL) {
- unsigned short tx_status;
-
- /* Read the first transmit buffer */
- obram_read(ioaddr, acoff(lp->tx_first_in_use, ac_status),
- (unsigned char *) &tx_status,
- sizeof(tx_status));
-
- /* If not completed -> exit */
- if ((tx_status & AC_SFLD_C) == 0)
- break;
-
- /* Hack for reconfiguration */
- if (tx_status == 0xFFFF)
- if (!wv_config_complete(dev, ioaddr, lp))
- break; /* Not completed */
-
- /* We now remove this buffer */
- nreaped++;
- --lp->tx_n_in_use;
-
-/*
-if (lp->tx_n_in_use > 0)
- printk("%c", "0123456789abcdefghijk"[lp->tx_n_in_use]);
-*/
-
- /* Was it the last one? */
- if (lp->tx_n_in_use <= 0)
- lp->tx_first_in_use = I82586NULL;
- else {
- /* Next one in the chain */
- lp->tx_first_in_use += TXBLOCKZ;
- if (lp->tx_first_in_use >=
- OFFSET_CU +
- NTXBLOCKS * TXBLOCKZ) lp->tx_first_in_use -=
- NTXBLOCKS * TXBLOCKZ;
- }
-
- /* Hack for reconfiguration */
- if (tx_status == 0xFFFF)
- continue;
-
- /* Now, check status of the finished command */
- if (tx_status & AC_SFLD_OK) {
- int ncollisions;
-
- dev->stats.tx_packets++;
- ncollisions = tx_status & AC_SFLD_MAXCOL;
- dev->stats.collisions += ncollisions;
-#ifdef DEBUG_TX_INFO
- if (ncollisions > 0)
- printk(KERN_DEBUG
- "%s: wv_complete(): tx completed after %d collisions.\n",
- dev->name, ncollisions);
-#endif
- } else {
- dev->stats.tx_errors++;
- if (tx_status & AC_SFLD_S10) {
- dev->stats.tx_carrier_errors++;
-#ifdef DEBUG_TX_FAIL
- printk(KERN_DEBUG
- "%s: wv_complete(): tx error: no CS.\n",
- dev->name);
-#endif
- }
- if (tx_status & AC_SFLD_S9) {
- dev->stats.tx_carrier_errors++;
-#ifdef DEBUG_TX_FAIL
- printk(KERN_DEBUG
- "%s: wv_complete(): tx error: lost CTS.\n",
- dev->name);
-#endif
- }
- if (tx_status & AC_SFLD_S8) {
- dev->stats.tx_fifo_errors++;
-#ifdef DEBUG_TX_FAIL
- printk(KERN_DEBUG
- "%s: wv_complete(): tx error: slow DMA.\n",
- dev->name);
-#endif
- }
- if (tx_status & AC_SFLD_S6) {
- dev->stats.tx_heartbeat_errors++;
-#ifdef DEBUG_TX_FAIL
- printk(KERN_DEBUG
- "%s: wv_complete(): tx error: heart beat.\n",
- dev->name);
-#endif
- }
- if (tx_status & AC_SFLD_S5) {
- dev->stats.tx_aborted_errors++;
-#ifdef DEBUG_TX_FAIL
- printk(KERN_DEBUG
- "%s: wv_complete(): tx error: too many collisions.\n",
- dev->name);
-#endif
- }
- }
-
-#ifdef DEBUG_TX_INFO
- printk(KERN_DEBUG
- "%s: wv_complete(): tx completed, tx_status 0x%04x\n",
- dev->name, tx_status);
-#endif
- }
-
-#ifdef DEBUG_INTERRUPT_INFO
- if (nreaped > 1)
- printk(KERN_DEBUG "%s: wv_complete(): reaped %d\n",
- dev->name, nreaped);
-#endif
-
- /*
- * Inform upper layers.
- */
- if (lp->tx_n_in_use < NTXBLOCKS - 1) {
- netif_wake_queue(dev);
- }
-#ifdef DEBUG_INTERRUPT_TRACE
- printk(KERN_DEBUG "%s: <-wv_complete()\n", dev->name);
-#endif
- return nreaped;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Reconfigure the i82586, or at least ask for it.
- * Because wv_82586_config uses a transmission buffer, we must do it
- * when we are sure that there is one left, so we do it now
- * or in wavelan_packet_xmit() (I can't find any better place,
- * wavelan_interrupt is not an option), so you may experience
- * delays sometimes.
- */
-static void wv_82586_reconfig(struct net_device * dev)
-{
- net_local *lp = netdev_priv(dev);
- unsigned long flags;
-
- /* Arm the flag, will be cleard in wv_82586_config() */
- lp->reconfig_82586 = 1;
-
- /* Check if we can do it now ! */
- if((netif_running(dev)) && !(netif_queue_stopped(dev))) {
- spin_lock_irqsave(&lp->spinlock, flags);
- /* May fail */
- wv_82586_config(dev);
- spin_unlock_irqrestore(&lp->spinlock, flags);
- }
- else {
-#ifdef DEBUG_CONFIG_INFO
- printk(KERN_DEBUG
- "%s: wv_82586_reconfig(): delayed (state = %lX)\n",
- dev->name, dev->state);
-#endif
- }
-}
-
-/********************* DEBUG & INFO SUBROUTINES *********************/
-/*
- * This routine is used in the code to show information for debugging.
- * Most of the time, it dumps the contents of hardware structures.
- */
-
-#ifdef DEBUG_PSA_SHOW
-/*------------------------------------------------------------------*/
-/*
- * Print the formatted contents of the Parameter Storage Area.
- */
-static void wv_psa_show(psa_t * p)
-{
- printk(KERN_DEBUG "##### WaveLAN PSA contents: #####\n");
- printk(KERN_DEBUG "psa_io_base_addr_1: 0x%02X %02X %02X %02X\n",
- p->psa_io_base_addr_1,
- p->psa_io_base_addr_2,
- p->psa_io_base_addr_3, p->psa_io_base_addr_4);
- printk(KERN_DEBUG "psa_rem_boot_addr_1: 0x%02X %02X %02X\n",
- p->psa_rem_boot_addr_1,
- p->psa_rem_boot_addr_2, p->psa_rem_boot_addr_3);
- printk(KERN_DEBUG "psa_holi_params: 0x%02x, ", p->psa_holi_params);
- printk("psa_int_req_no: %d\n", p->psa_int_req_no);
-#ifdef DEBUG_SHOW_UNUSED
- printk(KERN_DEBUG "psa_unused0[]: %pM\n", p->psa_unused0);
-#endif /* DEBUG_SHOW_UNUSED */
- printk(KERN_DEBUG "psa_univ_mac_addr[]: %pM\n", p->psa_univ_mac_addr);
- printk(KERN_DEBUG "psa_local_mac_addr[]: %pM\n", p->psa_local_mac_addr);
- printk(KERN_DEBUG "psa_univ_local_sel: %d, ",
- p->psa_univ_local_sel);
- printk("psa_comp_number: %d, ", p->psa_comp_number);
- printk("psa_thr_pre_set: 0x%02x\n", p->psa_thr_pre_set);
- printk(KERN_DEBUG "psa_feature_select/decay_prm: 0x%02x, ",
- p->psa_feature_select);
- printk("psa_subband/decay_update_prm: %d\n", p->psa_subband);
- printk(KERN_DEBUG "psa_quality_thr: 0x%02x, ", p->psa_quality_thr);
- printk("psa_mod_delay: 0x%02x\n", p->psa_mod_delay);
- printk(KERN_DEBUG "psa_nwid: 0x%02x%02x, ", p->psa_nwid[0],
- p->psa_nwid[1]);
- printk("psa_nwid_select: %d\n", p->psa_nwid_select);
- printk(KERN_DEBUG "psa_encryption_select: %d, ",
- p->psa_encryption_select);
- printk
- ("psa_encryption_key[]: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
- p->psa_encryption_key[0], p->psa_encryption_key[1],
- p->psa_encryption_key[2], p->psa_encryption_key[3],
- p->psa_encryption_key[4], p->psa_encryption_key[5],
- p->psa_encryption_key[6], p->psa_encryption_key[7]);
- printk(KERN_DEBUG "psa_databus_width: %d\n", p->psa_databus_width);
- printk(KERN_DEBUG "psa_call_code/auto_squelch: 0x%02x, ",
- p->psa_call_code[0]);
- printk
- ("psa_call_code[]: %02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
- p->psa_call_code[0], p->psa_call_code[1], p->psa_call_code[2],
- p->psa_call_code[3], p->psa_call_code[4], p->psa_call_code[5],
- p->psa_call_code[6], p->psa_call_code[7]);
-#ifdef DEBUG_SHOW_UNUSED
- printk(KERN_DEBUG "psa_reserved[]: %02X:%02X\n",
- p->psa_reserved[0],
- p->psa_reserved[1]);
-#endif /* DEBUG_SHOW_UNUSED */
- printk(KERN_DEBUG "psa_conf_status: %d, ", p->psa_conf_status);
- printk("psa_crc: 0x%02x%02x, ", p->psa_crc[0], p->psa_crc[1]);
- printk("psa_crc_status: 0x%02x\n", p->psa_crc_status);
-} /* wv_psa_show */
-#endif /* DEBUG_PSA_SHOW */
-
-#ifdef DEBUG_MMC_SHOW
-/*------------------------------------------------------------------*/
-/*
- * Print the formatted status of the Modem Management Controller.
- * This function needs to be completed.
- */
-static void wv_mmc_show(struct net_device * dev)
-{
- unsigned long ioaddr = dev->base_addr;
- net_local *lp = netdev_priv(dev);
- mmr_t m;
-
- /* Basic check */
- if (hasr_read(ioaddr) & HASR_NO_CLK) {
- printk(KERN_WARNING
- "%s: wv_mmc_show: modem not connected\n",
- dev->name);
- return;
- }
-
- /* Read the mmc */
- mmc_out(ioaddr, mmwoff(0, mmw_freeze), 1);
- mmc_read(ioaddr, 0, (u8 *) & m, sizeof(m));
- mmc_out(ioaddr, mmwoff(0, mmw_freeze), 0);
-
- /* Don't forget to update statistics */
- lp->wstats.discard.nwid +=
- (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l;
-
- printk(KERN_DEBUG "##### WaveLAN modem status registers: #####\n");
-#ifdef DEBUG_SHOW_UNUSED
- printk(KERN_DEBUG
- "mmc_unused0[]: %02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
- m.mmr_unused0[0], m.mmr_unused0[1], m.mmr_unused0[2],
- m.mmr_unused0[3], m.mmr_unused0[4], m.mmr_unused0[5],
- m.mmr_unused0[6], m.mmr_unused0[7]);
-#endif /* DEBUG_SHOW_UNUSED */
- printk(KERN_DEBUG "Encryption algorithm: %02X - Status: %02X\n",
- m.mmr_des_avail, m.mmr_des_status);
-#ifdef DEBUG_SHOW_UNUSED
- printk(KERN_DEBUG "mmc_unused1[]: %02X:%02X:%02X:%02X:%02X\n",
- m.mmr_unused1[0],
- m.mmr_unused1[1],
- m.mmr_unused1[2], m.mmr_unused1[3], m.mmr_unused1[4]);
-#endif /* DEBUG_SHOW_UNUSED */
- printk(KERN_DEBUG "dce_status: 0x%x [%s%s%s%s]\n",
- m.mmr_dce_status,
- (m.
- mmr_dce_status & MMR_DCE_STATUS_RX_BUSY) ?
- "energy detected," : "",
- (m.
- mmr_dce_status & MMR_DCE_STATUS_LOOPT_IND) ?
- "loop test indicated," : "",
- (m.
- mmr_dce_status & MMR_DCE_STATUS_TX_BUSY) ?
- "transmitter on," : "",
- (m.
- mmr_dce_status & MMR_DCE_STATUS_JBR_EXPIRED) ?
- "jabber timer expired," : "");
- printk(KERN_DEBUG "Dsp ID: %02X\n", m.mmr_dsp_id);
-#ifdef DEBUG_SHOW_UNUSED
- printk(KERN_DEBUG "mmc_unused2[]: %02X:%02X\n",
- m.mmr_unused2[0], m.mmr_unused2[1]);
-#endif /* DEBUG_SHOW_UNUSED */
- printk(KERN_DEBUG "# correct_nwid: %d, # wrong_nwid: %d\n",
- (m.mmr_correct_nwid_h << 8) | m.mmr_correct_nwid_l,
- (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l);
- printk(KERN_DEBUG "thr_pre_set: 0x%x [current signal %s]\n",
- m.mmr_thr_pre_set & MMR_THR_PRE_SET,
- (m.
- mmr_thr_pre_set & MMR_THR_PRE_SET_CUR) ? "above" :
- "below");
- printk(KERN_DEBUG "signal_lvl: %d [%s], ",
- m.mmr_signal_lvl & MMR_SIGNAL_LVL,
- (m.
- mmr_signal_lvl & MMR_SIGNAL_LVL_VALID) ? "new msg" :
- "no new msg");
- printk("silence_lvl: %d [%s], ",
- m.mmr_silence_lvl & MMR_SILENCE_LVL,
- (m.
- mmr_silence_lvl & MMR_SILENCE_LVL_VALID) ? "update done" :
- "no new update");
- printk("sgnl_qual: 0x%x [%s]\n", m.mmr_sgnl_qual & MMR_SGNL_QUAL,
- (m.
- mmr_sgnl_qual & MMR_SGNL_QUAL_ANT) ? "Antenna 1" :
- "Antenna 0");
-#ifdef DEBUG_SHOW_UNUSED
- printk(KERN_DEBUG "netw_id_l: %x\n", m.mmr_netw_id_l);
-#endif /* DEBUG_SHOW_UNUSED */
-} /* wv_mmc_show */
-#endif /* DEBUG_MMC_SHOW */
-
-#ifdef DEBUG_I82586_SHOW
-/*------------------------------------------------------------------*/
-/*
- * Print the last block of the i82586 memory.
- */
-static void wv_scb_show(unsigned long ioaddr)
-{
- scb_t scb;
-
- obram_read(ioaddr, OFFSET_SCB, (unsigned char *) &scb,
- sizeof(scb));
-
- printk(KERN_DEBUG "##### WaveLAN system control block: #####\n");
-
- printk(KERN_DEBUG "status: ");
- printk("stat 0x%x[%s%s%s%s] ",
- (scb.
- scb_status & (SCB_ST_CX | SCB_ST_FR | SCB_ST_CNA |
- SCB_ST_RNR)) >> 12,
- (scb.
- scb_status & SCB_ST_CX) ? "command completion interrupt," :
- "", (scb.scb_status & SCB_ST_FR) ? "frame received," : "",
- (scb.
- scb_status & SCB_ST_CNA) ? "command unit not active," : "",
- (scb.
- scb_status & SCB_ST_RNR) ? "receiving unit not ready," :
- "");
- printk("cus 0x%x[%s%s%s] ", (scb.scb_status & SCB_ST_CUS) >> 8,
- ((scb.scb_status & SCB_ST_CUS) ==
- SCB_ST_CUS_IDLE) ? "idle" : "",
- ((scb.scb_status & SCB_ST_CUS) ==
- SCB_ST_CUS_SUSP) ? "suspended" : "",
- ((scb.scb_status & SCB_ST_CUS) ==
- SCB_ST_CUS_ACTV) ? "active" : "");
- printk("rus 0x%x[%s%s%s%s]\n", (scb.scb_status & SCB_ST_RUS) >> 4,
- ((scb.scb_status & SCB_ST_RUS) ==
- SCB_ST_RUS_IDLE) ? "idle" : "",
- ((scb.scb_status & SCB_ST_RUS) ==
- SCB_ST_RUS_SUSP) ? "suspended" : "",
- ((scb.scb_status & SCB_ST_RUS) ==
- SCB_ST_RUS_NRES) ? "no resources" : "",
- ((scb.scb_status & SCB_ST_RUS) ==
- SCB_ST_RUS_RDY) ? "ready" : "");
-
- printk(KERN_DEBUG "command: ");
- printk("ack 0x%x[%s%s%s%s] ",
- (scb.
- scb_command & (SCB_CMD_ACK_CX | SCB_CMD_ACK_FR |
- SCB_CMD_ACK_CNA | SCB_CMD_ACK_RNR)) >> 12,
- (scb.
- scb_command & SCB_CMD_ACK_CX) ? "ack cmd completion," : "",
- (scb.
- scb_command & SCB_CMD_ACK_FR) ? "ack frame received," : "",
- (scb.
- scb_command & SCB_CMD_ACK_CNA) ? "ack CU not active," : "",
- (scb.
- scb_command & SCB_CMD_ACK_RNR) ? "ack RU not ready," : "");
- printk("cuc 0x%x[%s%s%s%s%s] ",
- (scb.scb_command & SCB_CMD_CUC) >> 8,
- ((scb.scb_command & SCB_CMD_CUC) ==
- SCB_CMD_CUC_NOP) ? "nop" : "",
- ((scb.scb_command & SCB_CMD_CUC) ==
- SCB_CMD_CUC_GO) ? "start cbl_offset" : "",
- ((scb.scb_command & SCB_CMD_CUC) ==
- SCB_CMD_CUC_RES) ? "resume execution" : "",
- ((scb.scb_command & SCB_CMD_CUC) ==
- SCB_CMD_CUC_SUS) ? "suspend execution" : "",
- ((scb.scb_command & SCB_CMD_CUC) ==
- SCB_CMD_CUC_ABT) ? "abort execution" : "");
- printk("ruc 0x%x[%s%s%s%s%s]\n",
- (scb.scb_command & SCB_CMD_RUC) >> 4,
- ((scb.scb_command & SCB_CMD_RUC) ==
- SCB_CMD_RUC_NOP) ? "nop" : "",
- ((scb.scb_command & SCB_CMD_RUC) ==
- SCB_CMD_RUC_GO) ? "start rfa_offset" : "",
- ((scb.scb_command & SCB_CMD_RUC) ==
- SCB_CMD_RUC_RES) ? "resume reception" : "",
- ((scb.scb_command & SCB_CMD_RUC) ==
- SCB_CMD_RUC_SUS) ? "suspend reception" : "",
- ((scb.scb_command & SCB_CMD_RUC) ==
- SCB_CMD_RUC_ABT) ? "abort reception" : "");
-
- printk(KERN_DEBUG "cbl_offset 0x%x ", scb.scb_cbl_offset);
- printk("rfa_offset 0x%x\n", scb.scb_rfa_offset);
-
- printk(KERN_DEBUG "crcerrs %d ", scb.scb_crcerrs);
- printk("alnerrs %d ", scb.scb_alnerrs);
- printk("rscerrs %d ", scb.scb_rscerrs);
- printk("ovrnerrs %d\n", scb.scb_ovrnerrs);
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Print the formatted status of the i82586's receive unit.
- */
-static void wv_ru_show(struct net_device * dev)
-{
- printk(KERN_DEBUG
- "##### WaveLAN i82586 receiver unit status: #####\n");
- printk(KERN_DEBUG "ru:");
- /*
- * Not implemented yet
- */
- printk("\n");
-} /* wv_ru_show */
-
-/*------------------------------------------------------------------*/
-/*
- * Display info about one control block of the i82586 memory.
- */
-static void wv_cu_show_one(struct net_device * dev, net_local * lp, int i, u16 p)
-{
- unsigned long ioaddr;
- ac_tx_t actx;
-
- ioaddr = dev->base_addr;
-
- printk("%d: 0x%x:", i, p);
-
- obram_read(ioaddr, p, (unsigned char *) &actx, sizeof(actx));
- printk(" status=0x%x,", actx.tx_h.ac_status);
- printk(" command=0x%x,", actx.tx_h.ac_command);
-
- /*
- {
- tbd_t tbd;
-
- obram_read(ioaddr, actx.tx_tbd_offset, (unsigned char *)&tbd, sizeof(tbd));
- printk(" tbd_status=0x%x,", tbd.tbd_status);
- }
- */
-
- printk("|");
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Print status of the command unit of the i82586.
- */
-static void wv_cu_show(struct net_device * dev)
-{
- net_local *lp = netdev_priv(dev);
- unsigned int i;
- u16 p;
-
- printk(KERN_DEBUG
- "##### WaveLAN i82586 command unit status: #####\n");
-
- printk(KERN_DEBUG);
- for (i = 0, p = lp->tx_first_in_use; i < NTXBLOCKS; i++) {
- wv_cu_show_one(dev, lp, i, p);
-
- p += TXBLOCKZ;
- if (p >= OFFSET_CU + NTXBLOCKS * TXBLOCKZ)
- p -= NTXBLOCKS * TXBLOCKZ;
- }
- printk("\n");
-}
-#endif /* DEBUG_I82586_SHOW */
-
-#ifdef DEBUG_DEVICE_SHOW
-/*------------------------------------------------------------------*/
-/*
- * Print the formatted status of the WaveLAN PCMCIA device driver.
- */
-static void wv_dev_show(struct net_device * dev)
-{
- printk(KERN_DEBUG "dev:");
- printk(" state=%lX,", dev->state);
- printk(" trans_start=%ld,", dev->trans_start);
- printk(" flags=0x%x,", dev->flags);
- printk("\n");
-} /* wv_dev_show */
-
-/*------------------------------------------------------------------*/
-/*
- * Print the formatted status of the WaveLAN PCMCIA device driver's
- * private information.
- */
-static void wv_local_show(struct net_device * dev)
-{
- net_local *lp;
-
- lp = netdev_priv(dev);
-
- printk(KERN_DEBUG "local:");
- printk(" tx_n_in_use=%d,", lp->tx_n_in_use);
- printk(" hacr=0x%x,", lp->hacr);
- printk(" rx_head=0x%x,", lp->rx_head);
- printk(" rx_last=0x%x,", lp->rx_last);
- printk(" tx_first_free=0x%x,", lp->tx_first_free);
- printk(" tx_first_in_use=0x%x,", lp->tx_first_in_use);
- printk("\n");
-} /* wv_local_show */
-#endif /* DEBUG_DEVICE_SHOW */
-
-#if defined(DEBUG_RX_INFO) || defined(DEBUG_TX_INFO)
-/*------------------------------------------------------------------*/
-/*
- * Dump packet header (and content if necessary) on the screen
- */
-static inline void wv_packet_info(u8 * p, /* Packet to dump */
- int length, /* Length of the packet */
- char *msg1, /* Name of the device */
- char *msg2)
-{ /* Name of the function */
- int i;
- int maxi;
-
- printk(KERN_DEBUG
- "%s: %s(): dest %pM, length %d\n",
- msg1, msg2, p, length);
- printk(KERN_DEBUG
- "%s: %s(): src %pM, type 0x%02X%02X\n",
- msg1, msg2, &p[6], p[12], p[13]);
-
-#ifdef DEBUG_PACKET_DUMP
-
- printk(KERN_DEBUG "data=\"");
-
- if ((maxi = length) > DEBUG_PACKET_DUMP)
- maxi = DEBUG_PACKET_DUMP;
- for (i = 14; i < maxi; i++)
- if (p[i] >= ' ' && p[i] <= '~')
- printk(" %c", p[i]);
- else
- printk("%02X", p[i]);
- if (maxi < length)
- printk("..");
- printk("\"\n");
- printk(KERN_DEBUG "\n");
-#endif /* DEBUG_PACKET_DUMP */
-}
-#endif /* defined(DEBUG_RX_INFO) || defined(DEBUG_TX_INFO) */
-
-/*------------------------------------------------------------------*/
-/*
- * This is the information which is displayed by the driver at startup.
- * There are lots of flags for configuring it to your liking.
- */
-static void wv_init_info(struct net_device * dev)
-{
- short ioaddr = dev->base_addr;
- net_local *lp = netdev_priv(dev);
- psa_t psa;
-
- /* Read the parameter storage area */
- psa_read(ioaddr, lp->hacr, 0, (unsigned char *) &psa, sizeof(psa));
-
-#ifdef DEBUG_PSA_SHOW
- wv_psa_show(&psa);
-#endif
-#ifdef DEBUG_MMC_SHOW
- wv_mmc_show(dev);
-#endif
-#ifdef DEBUG_I82586_SHOW
- wv_cu_show(dev);
-#endif
-
-#ifdef DEBUG_BASIC_SHOW
- /* Now, let's go for the basic stuff. */
- printk(KERN_NOTICE "%s: WaveLAN at %#x, %pM, IRQ %d",
- dev->name, ioaddr, dev->dev_addr, dev->irq);
-
- /* Print current network ID. */
- if (psa.psa_nwid_select)
- printk(", nwid 0x%02X-%02X", psa.psa_nwid[0],
- psa.psa_nwid[1]);
- else
- printk(", nwid off");
-
- /* If 2.00 card */
- if (!(mmc_in(ioaddr, mmroff(0, mmr_fee_status)) &
- (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY))) {
- unsigned short freq;
-
- /* Ask the EEPROM to read the frequency from the first area. */
- fee_read(ioaddr, 0x00, &freq, 1);
-
- /* Print frequency */
- printk(", 2.00, %ld", (freq >> 6) + 2400L);
-
- /* Hack! */
- if (freq & 0x20)
- printk(".5");
- } else {
- printk(", PC");
- switch (psa.psa_comp_number) {
- case PSA_COMP_PC_AT_915:
- case PSA_COMP_PC_AT_2400:
- printk("-AT");
- break;
- case PSA_COMP_PC_MC_915:
- case PSA_COMP_PC_MC_2400:
- printk("-MC");
- break;
- case PSA_COMP_PCMCIA_915:
- printk("MCIA");
- break;
- default:
- printk("?");
- }
- printk(", ");
- switch (psa.psa_subband) {
- case PSA_SUBBAND_915:
- printk("915");
- break;
- case PSA_SUBBAND_2425:
- printk("2425");
- break;
- case PSA_SUBBAND_2460:
- printk("2460");
- break;
- case PSA_SUBBAND_2484:
- printk("2484");
- break;
- case PSA_SUBBAND_2430_5:
- printk("2430.5");
- break;
- default:
- printk("?");
- }
- }
-
- printk(" MHz\n");
-#endif /* DEBUG_BASIC_SHOW */
-
-#ifdef DEBUG_VERSION_SHOW
- /* Print version information */
- printk(KERN_NOTICE "%s", version);
-#endif
-} /* wv_init_info */
-
-/********************* IOCTL, STATS & RECONFIG *********************/
-/*
- * We found here routines that are called by Linux on different
- * occasions after the configuration and not for transmitting data
- * These may be called when the user use ifconfig, /proc/net/dev
- * or wireless extensions
- */
-
-
-/*------------------------------------------------------------------*/
-/*
- * Set or clear the multicast filter for this adaptor.
- * num_addrs == -1 Promiscuous mode, receive all packets
- * num_addrs == 0 Normal mode, clear multicast list
- * num_addrs > 0 Multicast mode, receive normal and MC packets,
- * and do best-effort filtering.
- */
-static void wavelan_set_multicast_list(struct net_device * dev)
-{
- net_local *lp = netdev_priv(dev);
-
-#ifdef DEBUG_IOCTL_TRACE
- printk(KERN_DEBUG "%s: ->wavelan_set_multicast_list()\n",
- dev->name);
-#endif
-
-#ifdef DEBUG_IOCTL_INFO
- printk(KERN_DEBUG
- "%s: wavelan_set_multicast_list(): setting Rx mode %02X to %d addresses.\n",
- dev->name, dev->flags, netdev_mc_count(dev));
-#endif
-
- /* Are we asking for promiscuous mode,
- * or all multicast addresses (we don't have that!)
- * or too many multicast addresses for the hardware filter? */
- if ((dev->flags & IFF_PROMISC) ||
- (dev->flags & IFF_ALLMULTI) ||
- (netdev_mc_count(dev) > I82586_MAX_MULTICAST_ADDRESSES)) {
- /*
- * Enable promiscuous mode: receive all packets.
- */
- if (!lp->promiscuous) {
- lp->promiscuous = 1;
- lp->mc_count = 0;
-
- wv_82586_reconfig(dev);
- }
- } else
- /* Are there multicast addresses to send? */
- if (!netdev_mc_empty(dev)) {
- /*
- * Disable promiscuous mode, but receive all packets
- * in multicast list
- */
-#ifdef MULTICAST_AVOID
- if (lp->promiscuous || (netdev_mc_count(dev) != lp->mc_count))
-#endif
- {
- lp->promiscuous = 0;
- lp->mc_count = netdev_mc_count(dev);
-
- wv_82586_reconfig(dev);
- }
- } else {
- /*
- * Switch to normal mode: disable promiscuous mode and
- * clear the multicast list.
- */
- if (lp->promiscuous || lp->mc_count == 0) {
- lp->promiscuous = 0;
- lp->mc_count = 0;
-
- wv_82586_reconfig(dev);
- }
- }
-#ifdef DEBUG_IOCTL_TRACE
- printk(KERN_DEBUG "%s: <-wavelan_set_multicast_list()\n",
- dev->name);
-#endif
-}
-
-/*------------------------------------------------------------------*/
-/*
- * This function doesn't exist.
- * (Note : it was a nice way to test the reconfigure stuff...)
- */
-#ifdef SET_MAC_ADDRESS
-static int wavelan_set_mac_address(struct net_device * dev, void *addr)
-{
- struct sockaddr *mac = addr;
-
- /* Copy the address. */
- memcpy(dev->dev_addr, mac->sa_data, WAVELAN_ADDR_SIZE);
-
- /* Reconfigure the beast. */
- wv_82586_reconfig(dev);
-
- return 0;
-}
-#endif /* SET_MAC_ADDRESS */
-
-
-/*------------------------------------------------------------------*/
-/*
- * Frequency setting (for hardware capable of it)
- * It's a bit complicated and you don't really want to look into it.
- * (called in wavelan_ioctl)
- */
-static int wv_set_frequency(unsigned long ioaddr, /* I/O port of the card */
- iw_freq * frequency)
-{
- const int BAND_NUM = 10; /* Number of bands */
- long freq = 0L; /* offset to 2.4 GHz in .5 MHz */
-#ifdef DEBUG_IOCTL_INFO
- int i;
-#endif
-
- /* Setting by frequency */
- /* Theoretically, you may set any frequency between
- * the two limits with a 0.5 MHz precision. In practice,
- * I don't want you to have trouble with local regulations.
- */
- if ((frequency->e == 1) &&
- (frequency->m >= (int) 2.412e8)
- && (frequency->m <= (int) 2.487e8)) {
- freq = ((frequency->m / 10000) - 24000L) / 5;
- }
-
- /* Setting by channel (same as wfreqsel) */
- /* Warning: each channel is 22 MHz wide, so some of the channels
- * will interfere. */
- if ((frequency->e == 0) && (frequency->m < BAND_NUM)) {
- /* Get frequency offset. */
- freq = channel_bands[frequency->m] >> 1;
- }
-
- /* Verify that the frequency is allowed. */
- if (freq != 0L) {
- u16 table[10]; /* Authorized frequency table */
-
- /* Read the frequency table. */
- fee_read(ioaddr, 0x71, table, 10);
-
-#ifdef DEBUG_IOCTL_INFO
- printk(KERN_DEBUG "Frequency table: ");
- for (i = 0; i < 10; i++) {
- printk(" %04X", table[i]);
- }
- printk("\n");
-#endif
-
- /* Look in the table to see whether the frequency is allowed. */
- if (!(table[9 - ((freq - 24) / 16)] &
- (1 << ((freq - 24) % 16)))) return -EINVAL; /* not allowed */
- } else
- return -EINVAL;
-
- /* if we get a usable frequency */
- if (freq != 0L) {
- unsigned short area[16];
- unsigned short dac[2];
- unsigned short area_verify[16];
- unsigned short dac_verify[2];
- /* Corresponding gain (in the power adjust value table)
- * See AT&T WaveLAN Data Manual, REF 407-024689/E, page 3-8
- * and WCIN062D.DOC, page 6.2.9. */
- unsigned short power_limit[] = { 40, 80, 120, 160, 0 };
- int power_band = 0; /* Selected band */
- unsigned short power_adjust; /* Correct value */
-
- /* Search for the gain. */
- power_band = 0;
- while ((freq > power_limit[power_band]) &&
- (power_limit[++power_band] != 0));
-
- /* Read the first area. */
- fee_read(ioaddr, 0x00, area, 16);
-
- /* Read the DAC. */
- fee_read(ioaddr, 0x60, dac, 2);
-
- /* Read the new power adjust value. */
- fee_read(ioaddr, 0x6B - (power_band >> 1), &power_adjust,
- 1);
- if (power_band & 0x1)
- power_adjust >>= 8;
- else
- power_adjust &= 0xFF;
-
-#ifdef DEBUG_IOCTL_INFO
- printk(KERN_DEBUG "WaveLAN EEPROM Area 1: ");
- for (i = 0; i < 16; i++) {
- printk(" %04X", area[i]);
- }
- printk("\n");
-
- printk(KERN_DEBUG "WaveLAN EEPROM DAC: %04X %04X\n",
- dac[0], dac[1]);
-#endif
-
- /* Frequency offset (for info only) */
- area[0] = ((freq << 5) & 0xFFE0) | (area[0] & 0x1F);
-
- /* Receiver Principle main divider coefficient */
- area[3] = (freq >> 1) + 2400L - 352L;
- area[2] = ((freq & 0x1) << 4) | (area[2] & 0xFFEF);
-
- /* Transmitter Main divider coefficient */
- area[13] = (freq >> 1) + 2400L;
- area[12] = ((freq & 0x1) << 4) | (area[2] & 0xFFEF);
-
- /* Other parts of the area are flags, bit streams or unused. */
-
- /* Set the value in the DAC. */
- dac[1] = ((power_adjust >> 1) & 0x7F) | (dac[1] & 0xFF80);
- dac[0] = ((power_adjust & 0x1) << 4) | (dac[0] & 0xFFEF);
-
- /* Write the first area. */
- fee_write(ioaddr, 0x00, area, 16);
-
- /* Write the DAC. */
- fee_write(ioaddr, 0x60, dac, 2);
-
- /* We now should verify here that the writing of the EEPROM went OK. */
-
- /* Reread the first area. */
- fee_read(ioaddr, 0x00, area_verify, 16);
-
- /* Reread the DAC. */
- fee_read(ioaddr, 0x60, dac_verify, 2);
-
- /* Compare. */
- if (memcmp(area, area_verify, 16 * 2) ||
- memcmp(dac, dac_verify, 2 * 2)) {
-#ifdef DEBUG_IOCTL_ERROR
- printk(KERN_INFO
- "WaveLAN: wv_set_frequency: unable to write new frequency to EEPROM(?).\n");
-#endif
- return -EOPNOTSUPP;
- }
-
- /* We must download the frequency parameters to the
- * synthesizers (from the EEPROM - area 1)
- * Note: as the EEPROM is automatically decremented, we set the end
- * if the area... */
- mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), 0x0F);
- mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl),
- MMW_FEE_CTRL_READ | MMW_FEE_CTRL_DWLD);
-
- /* Wait until the download is finished. */
- fee_wait(ioaddr, 100, 100);
-
- /* We must now download the power adjust value (gain) to
- * the synthesizers (from the EEPROM - area 7 - DAC). */
- mmc_out(ioaddr, mmwoff(0, mmw_fee_addr), 0x61);
- mmc_out(ioaddr, mmwoff(0, mmw_fee_ctrl),
- MMW_FEE_CTRL_READ | MMW_FEE_CTRL_DWLD);
-
- /* Wait for the download to finish. */
- fee_wait(ioaddr, 100, 100);
-
-#ifdef DEBUG_IOCTL_INFO
- /* Verification of what we have done */
-
- printk(KERN_DEBUG "WaveLAN EEPROM Area 1: ");
- for (i = 0; i < 16; i++) {
- printk(" %04X", area_verify[i]);
- }
- printk("\n");
-
- printk(KERN_DEBUG "WaveLAN EEPROM DAC: %04X %04X\n",
- dac_verify[0], dac_verify[1]);
-#endif
-
- return 0;
- } else
- return -EINVAL; /* Bah, never get there... */
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Give the list of available frequencies.
- */
-static int wv_frequency_list(unsigned long ioaddr, /* I/O port of the card */
- iw_freq * list, /* List of frequencies to fill */
- int max)
-{ /* Maximum number of frequencies */
- u16 table[10]; /* Authorized frequency table */
- long freq = 0L; /* offset to 2.4 GHz in .5 MHz + 12 MHz */
- int i; /* index in the table */
- int c = 0; /* Channel number */
-
- /* Read the frequency table. */
- fee_read(ioaddr, 0x71 /* frequency table */ , table, 10);
-
- /* Check all frequencies. */
- i = 0;
- for (freq = 0; freq < 150; freq++)
- /* Look in the table if the frequency is allowed */
- if (table[9 - (freq / 16)] & (1 << (freq % 16))) {
- /* Compute approximate channel number */
- while ((c < ARRAY_SIZE(channel_bands)) &&
- (((channel_bands[c] >> 1) - 24) < freq))
- c++;
- list[i].i = c; /* Set the list index */
-
- /* put in the list */
- list[i].m = (((freq + 24) * 5) + 24000L) * 10000;
- list[i++].e = 1;
-
- /* Check number. */
- if (i >= max)
- return (i);
- }
-
- return (i);
-}
-
-#ifdef IW_WIRELESS_SPY
-/*------------------------------------------------------------------*/
-/*
- * Gather wireless spy statistics: for each packet, compare the source
- * address with our list, and if they match, get the statistics.
- * Sorry, but this function really needs the wireless extensions.
- */
-static inline void wl_spy_gather(struct net_device * dev,
- u8 * mac, /* MAC address */
- u8 * stats) /* Statistics to gather */
-{
- struct iw_quality wstats;
-
- wstats.qual = stats[2] & MMR_SGNL_QUAL;
- wstats.level = stats[0] & MMR_SIGNAL_LVL;
- wstats.noise = stats[1] & MMR_SILENCE_LVL;
- wstats.updated = 0x7;
-
- /* Update spy records */
- wireless_spy_update(dev, mac, &wstats);
-}
-#endif /* IW_WIRELESS_SPY */
-
-#ifdef HISTOGRAM
-/*------------------------------------------------------------------*/
-/*
- * This function calculates a histogram of the signal level.
- * As the noise is quite constant, it's like doing it on the SNR.
- * We have defined a set of interval (lp->his_range), and each time
- * the level goes in that interval, we increment the count (lp->his_sum).
- * With this histogram you may detect if one WaveLAN is really weak,
- * or you may also calculate the mean and standard deviation of the level.
- */
-static inline void wl_his_gather(struct net_device * dev, u8 * stats)
-{ /* Statistics to gather */
- net_local *lp = netdev_priv(dev);
- u8 level = stats[0] & MMR_SIGNAL_LVL;
- int i;
-
- /* Find the correct interval. */
- i = 0;
- while ((i < (lp->his_number - 1))
- && (level >= lp->his_range[i++]));
-
- /* Increment interval counter. */
- (lp->his_sum[i])++;
-}
-#endif /* HISTOGRAM */
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get protocol name
- */
-static int wavelan_get_name(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- strcpy(wrqu->name, "WaveLAN");
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set NWID
- */
-static int wavelan_set_nwid(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- unsigned long ioaddr = dev->base_addr;
- net_local *lp = netdev_priv(dev); /* lp is not unused */
- psa_t psa;
- mm_t m;
- unsigned long flags;
- int ret = 0;
-
- /* Disable interrupts and save flags. */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* Set NWID in WaveLAN. */
- if (!wrqu->nwid.disabled) {
- /* Set NWID in psa */
- psa.psa_nwid[0] = (wrqu->nwid.value & 0xFF00) >> 8;
- psa.psa_nwid[1] = wrqu->nwid.value & 0xFF;
- psa.psa_nwid_select = 0x01;
- psa_write(ioaddr, lp->hacr,
- (char *) psa.psa_nwid - (char *) &psa,
- (unsigned char *) psa.psa_nwid, 3);
-
- /* Set NWID in mmc. */
- m.w.mmw_netw_id_l = psa.psa_nwid[1];
- m.w.mmw_netw_id_h = psa.psa_nwid[0];
- mmc_write(ioaddr,
- (char *) &m.w.mmw_netw_id_l -
- (char *) &m,
- (unsigned char *) &m.w.mmw_netw_id_l, 2);
- mmc_out(ioaddr, mmwoff(0, mmw_loopt_sel), 0x00);
- } else {
- /* Disable NWID in the psa. */
- psa.psa_nwid_select = 0x00;
- psa_write(ioaddr, lp->hacr,
- (char *) &psa.psa_nwid_select -
- (char *) &psa,
- (unsigned char *) &psa.psa_nwid_select,
- 1);
-
- /* Disable NWID in the mmc (no filtering). */
- mmc_out(ioaddr, mmwoff(0, mmw_loopt_sel),
- MMW_LOOPT_SEL_DIS_NWID);
- }
- /* update the Wavelan checksum */
- update_psa_checksum(dev, ioaddr, lp->hacr);
-
- /* Enable interrupts and restore flags. */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- return ret;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get NWID
- */
-static int wavelan_get_nwid(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- unsigned long ioaddr = dev->base_addr;
- net_local *lp = netdev_priv(dev); /* lp is not unused */
- psa_t psa;
- unsigned long flags;
- int ret = 0;
-
- /* Disable interrupts and save flags. */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* Read the NWID. */
- psa_read(ioaddr, lp->hacr,
- (char *) psa.psa_nwid - (char *) &psa,
- (unsigned char *) psa.psa_nwid, 3);
- wrqu->nwid.value = (psa.psa_nwid[0] << 8) + psa.psa_nwid[1];
- wrqu->nwid.disabled = !(psa.psa_nwid_select);
- wrqu->nwid.fixed = 1; /* Superfluous */
-
- /* Enable interrupts and restore flags. */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- return ret;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set frequency
- */
-static int wavelan_set_freq(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- unsigned long ioaddr = dev->base_addr;
- net_local *lp = netdev_priv(dev); /* lp is not unused */
- unsigned long flags;
- int ret;
-
- /* Disable interrupts and save flags. */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* Attempt to recognise 2.00 cards (2.4 GHz frequency selectable). */
- if (!(mmc_in(ioaddr, mmroff(0, mmr_fee_status)) &
- (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY)))
- ret = wv_set_frequency(ioaddr, &(wrqu->freq));
- else
- ret = -EOPNOTSUPP;
-
- /* Enable interrupts and restore flags. */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- return ret;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get frequency
- */
-static int wavelan_get_freq(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- unsigned long ioaddr = dev->base_addr;
- net_local *lp = netdev_priv(dev); /* lp is not unused */
- psa_t psa;
- unsigned long flags;
- int ret = 0;
-
- /* Disable interrupts and save flags. */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* Attempt to recognise 2.00 cards (2.4 GHz frequency selectable).
- * Does it work for everybody, especially old cards? */
- if (!(mmc_in(ioaddr, mmroff(0, mmr_fee_status)) &
- (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY))) {
- unsigned short freq;
-
- /* Ask the EEPROM to read the frequency from the first area. */
- fee_read(ioaddr, 0x00, &freq, 1);
- wrqu->freq.m = ((freq >> 5) * 5 + 24000L) * 10000;
- wrqu->freq.e = 1;
- } else {
- psa_read(ioaddr, lp->hacr,
- (char *) &psa.psa_subband - (char *) &psa,
- (unsigned char *) &psa.psa_subband, 1);
-
- if (psa.psa_subband <= 4) {
- wrqu->freq.m = fixed_bands[psa.psa_subband];
- wrqu->freq.e = (psa.psa_subband != 0);
- } else
- ret = -EOPNOTSUPP;
- }
-
- /* Enable interrupts and restore flags. */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- return ret;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set level threshold
- */
-static int wavelan_set_sens(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- unsigned long ioaddr = dev->base_addr;
- net_local *lp = netdev_priv(dev); /* lp is not unused */
- psa_t psa;
- unsigned long flags;
- int ret = 0;
-
- /* Disable interrupts and save flags. */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* Set the level threshold. */
- /* We should complain loudly if wrqu->sens.fixed = 0, because we
- * can't set auto mode... */
- psa.psa_thr_pre_set = wrqu->sens.value & 0x3F;
- psa_write(ioaddr, lp->hacr,
- (char *) &psa.psa_thr_pre_set - (char *) &psa,
- (unsigned char *) &psa.psa_thr_pre_set, 1);
- /* update the Wavelan checksum */
- update_psa_checksum(dev, ioaddr, lp->hacr);
- mmc_out(ioaddr, mmwoff(0, mmw_thr_pre_set),
- psa.psa_thr_pre_set);
-
- /* Enable interrupts and restore flags. */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- return ret;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get level threshold
- */
-static int wavelan_get_sens(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- unsigned long ioaddr = dev->base_addr;
- net_local *lp = netdev_priv(dev); /* lp is not unused */
- psa_t psa;
- unsigned long flags;
- int ret = 0;
-
- /* Disable interrupts and save flags. */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* Read the level threshold. */
- psa_read(ioaddr, lp->hacr,
- (char *) &psa.psa_thr_pre_set - (char *) &psa,
- (unsigned char *) &psa.psa_thr_pre_set, 1);
- wrqu->sens.value = psa.psa_thr_pre_set & 0x3F;
- wrqu->sens.fixed = 1;
-
- /* Enable interrupts and restore flags. */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- return ret;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set encryption key
- */
-static int wavelan_set_encode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- unsigned long ioaddr = dev->base_addr;
- net_local *lp = netdev_priv(dev); /* lp is not unused */
- unsigned long flags;
- psa_t psa;
- int ret = 0;
-
- /* Disable interrupts and save flags. */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* Check if capable of encryption */
- if (!mmc_encr(ioaddr)) {
- ret = -EOPNOTSUPP;
- }
-
- /* Check the size of the key */
- if((wrqu->encoding.length != 8) && (wrqu->encoding.length != 0)) {
- ret = -EINVAL;
- }
-
- if(!ret) {
- /* Basic checking... */
- if (wrqu->encoding.length == 8) {
- /* Copy the key in the driver */
- memcpy(psa.psa_encryption_key, extra,
- wrqu->encoding.length);
- psa.psa_encryption_select = 1;
-
- psa_write(ioaddr, lp->hacr,
- (char *) &psa.psa_encryption_select -
- (char *) &psa,
- (unsigned char *) &psa.
- psa_encryption_select, 8 + 1);
-
- mmc_out(ioaddr, mmwoff(0, mmw_encr_enable),
- MMW_ENCR_ENABLE_EN | MMW_ENCR_ENABLE_MODE);
- mmc_write(ioaddr, mmwoff(0, mmw_encr_key),
- (unsigned char *) &psa.
- psa_encryption_key, 8);
- }
-
- /* disable encryption */
- if (wrqu->encoding.flags & IW_ENCODE_DISABLED) {
- psa.psa_encryption_select = 0;
- psa_write(ioaddr, lp->hacr,
- (char *) &psa.psa_encryption_select -
- (char *) &psa,
- (unsigned char *) &psa.
- psa_encryption_select, 1);
-
- mmc_out(ioaddr, mmwoff(0, mmw_encr_enable), 0);
- }
- /* update the Wavelan checksum */
- update_psa_checksum(dev, ioaddr, lp->hacr);
- }
-
- /* Enable interrupts and restore flags. */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- return ret;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get encryption key
- */
-static int wavelan_get_encode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- unsigned long ioaddr = dev->base_addr;
- net_local *lp = netdev_priv(dev); /* lp is not unused */
- psa_t psa;
- unsigned long flags;
- int ret = 0;
-
- /* Disable interrupts and save flags. */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* Check if encryption is available */
- if (!mmc_encr(ioaddr)) {
- ret = -EOPNOTSUPP;
- } else {
- /* Read the encryption key */
- psa_read(ioaddr, lp->hacr,
- (char *) &psa.psa_encryption_select -
- (char *) &psa,
- (unsigned char *) &psa.
- psa_encryption_select, 1 + 8);
-
- /* encryption is enabled ? */
- if (psa.psa_encryption_select)
- wrqu->encoding.flags = IW_ENCODE_ENABLED;
- else
- wrqu->encoding.flags = IW_ENCODE_DISABLED;
- wrqu->encoding.flags |= mmc_encr(ioaddr);
-
- /* Copy the key to the user buffer */
- wrqu->encoding.length = 8;
- memcpy(extra, psa.psa_encryption_key, wrqu->encoding.length);
- }
-
- /* Enable interrupts and restore flags. */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- return ret;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get range info
- */
-static int wavelan_get_range(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- unsigned long ioaddr = dev->base_addr;
- net_local *lp = netdev_priv(dev); /* lp is not unused */
- struct iw_range *range = (struct iw_range *) extra;
- unsigned long flags;
- int ret = 0;
-
- /* Set the length (very important for backward compatibility) */
- wrqu->data.length = sizeof(struct iw_range);
-
- /* Set all the info we don't care or don't know about to zero */
- memset(range, 0, sizeof(struct iw_range));
-
- /* Set the Wireless Extension versions */
- range->we_version_compiled = WIRELESS_EXT;
- range->we_version_source = 9;
-
- /* Set information in the range struct. */
- range->throughput = 1.6 * 1000 * 1000; /* don't argue on this ! */
- range->min_nwid = 0x0000;
- range->max_nwid = 0xFFFF;
-
- range->sensitivity = 0x3F;
- range->max_qual.qual = MMR_SGNL_QUAL;
- range->max_qual.level = MMR_SIGNAL_LVL;
- range->max_qual.noise = MMR_SILENCE_LVL;
- range->avg_qual.qual = MMR_SGNL_QUAL; /* Always max */
- /* Need to get better values for those two */
- range->avg_qual.level = 30;
- range->avg_qual.noise = 8;
-
- range->num_bitrates = 1;
- range->bitrate[0] = 2000000; /* 2 Mb/s */
-
- /* Event capability (kernel + driver) */
- range->event_capa[0] = (IW_EVENT_CAPA_MASK(0x8B02) |
- IW_EVENT_CAPA_MASK(0x8B04));
- range->event_capa[1] = IW_EVENT_CAPA_K_1;
-
- /* Disable interrupts and save flags. */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* Attempt to recognise 2.00 cards (2.4 GHz frequency selectable). */
- if (!(mmc_in(ioaddr, mmroff(0, mmr_fee_status)) &
- (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY))) {
- range->num_channels = 10;
- range->num_frequency = wv_frequency_list(ioaddr, range->freq,
- IW_MAX_FREQUENCIES);
- } else
- range->num_channels = range->num_frequency = 0;
-
- /* Encryption supported ? */
- if (mmc_encr(ioaddr)) {
- range->encoding_size[0] = 8; /* DES = 64 bits key */
- range->num_encoding_sizes = 1;
- range->max_encoding_tokens = 1; /* Only one key possible */
- } else {
- range->num_encoding_sizes = 0;
- range->max_encoding_tokens = 0;
- }
-
- /* Enable interrupts and restore flags. */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- return ret;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Private Handler : set quality threshold
- */
-static int wavelan_set_qthr(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- unsigned long ioaddr = dev->base_addr;
- net_local *lp = netdev_priv(dev); /* lp is not unused */
- psa_t psa;
- unsigned long flags;
-
- /* Disable interrupts and save flags. */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- psa.psa_quality_thr = *(extra) & 0x0F;
- psa_write(ioaddr, lp->hacr,
- (char *) &psa.psa_quality_thr - (char *) &psa,
- (unsigned char *) &psa.psa_quality_thr, 1);
- /* update the Wavelan checksum */
- update_psa_checksum(dev, ioaddr, lp->hacr);
- mmc_out(ioaddr, mmwoff(0, mmw_quality_thr),
- psa.psa_quality_thr);
-
- /* Enable interrupts and restore flags. */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Private Handler : get quality threshold
- */
-static int wavelan_get_qthr(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- unsigned long ioaddr = dev->base_addr;
- net_local *lp = netdev_priv(dev); /* lp is not unused */
- psa_t psa;
- unsigned long flags;
-
- /* Disable interrupts and save flags. */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- psa_read(ioaddr, lp->hacr,
- (char *) &psa.psa_quality_thr - (char *) &psa,
- (unsigned char *) &psa.psa_quality_thr, 1);
- *(extra) = psa.psa_quality_thr & 0x0F;
-
- /* Enable interrupts and restore flags. */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- return 0;
-}
-
-#ifdef HISTOGRAM
-/*------------------------------------------------------------------*/
-/*
- * Wireless Private Handler : set histogram
- */
-static int wavelan_set_histo(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- net_local *lp = netdev_priv(dev); /* lp is not unused */
-
- /* Check the number of intervals. */
- if (wrqu->data.length > 16) {
- return(-E2BIG);
- }
-
- /* Disable histo while we copy the addresses.
- * As we don't disable interrupts, we need to do this */
- lp->his_number = 0;
-
- /* Are there ranges to copy? */
- if (wrqu->data.length > 0) {
- /* Copy interval ranges to the driver */
- memcpy(lp->his_range, extra, wrqu->data.length);
-
- {
- int i;
- printk(KERN_DEBUG "Histo :");
- for(i = 0; i < wrqu->data.length; i++)
- printk(" %d", lp->his_range[i]);
- printk("\n");
- }
-
- /* Reset result structure. */
- memset(lp->his_sum, 0x00, sizeof(long) * 16);
- }
-
- /* Now we can set the number of ranges */
- lp->his_number = wrqu->data.length;
-
- return(0);
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Private Handler : get histogram
- */
-static int wavelan_get_histo(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- net_local *lp = netdev_priv(dev); /* lp is not unused */
-
- /* Set the number of intervals. */
- wrqu->data.length = lp->his_number;
-
- /* Give back the distribution statistics */
- if(lp->his_number > 0)
- memcpy(extra, lp->his_sum, sizeof(long) * lp->his_number);
-
- return(0);
-}
-#endif /* HISTOGRAM */
-
-/*------------------------------------------------------------------*/
-/*
- * Structures to export the Wireless Handlers
- */
-
-static const iw_handler wavelan_handler[] =
-{
- NULL, /* SIOCSIWNAME */
- wavelan_get_name, /* SIOCGIWNAME */
- wavelan_set_nwid, /* SIOCSIWNWID */
- wavelan_get_nwid, /* SIOCGIWNWID */
- wavelan_set_freq, /* SIOCSIWFREQ */
- wavelan_get_freq, /* SIOCGIWFREQ */
- NULL, /* SIOCSIWMODE */
- NULL, /* SIOCGIWMODE */
- wavelan_set_sens, /* SIOCSIWSENS */
- wavelan_get_sens, /* SIOCGIWSENS */
- NULL, /* SIOCSIWRANGE */
- wavelan_get_range, /* SIOCGIWRANGE */
- NULL, /* SIOCSIWPRIV */
- NULL, /* SIOCGIWPRIV */
- NULL, /* SIOCSIWSTATS */
- NULL, /* SIOCGIWSTATS */
- iw_handler_set_spy, /* SIOCSIWSPY */
- iw_handler_get_spy, /* SIOCGIWSPY */
- iw_handler_set_thrspy, /* SIOCSIWTHRSPY */
- iw_handler_get_thrspy, /* SIOCGIWTHRSPY */
- NULL, /* SIOCSIWAP */
- NULL, /* SIOCGIWAP */
- NULL, /* -- hole -- */
- NULL, /* SIOCGIWAPLIST */
- NULL, /* -- hole -- */
- NULL, /* -- hole -- */
- NULL, /* SIOCSIWESSID */
- NULL, /* SIOCGIWESSID */
- NULL, /* SIOCSIWNICKN */
- NULL, /* SIOCGIWNICKN */
- NULL, /* -- hole -- */
- NULL, /* -- hole -- */
- NULL, /* SIOCSIWRATE */
- NULL, /* SIOCGIWRATE */
- NULL, /* SIOCSIWRTS */
- NULL, /* SIOCGIWRTS */
- NULL, /* SIOCSIWFRAG */
- NULL, /* SIOCGIWFRAG */
- NULL, /* SIOCSIWTXPOW */
- NULL, /* SIOCGIWTXPOW */
- NULL, /* SIOCSIWRETRY */
- NULL, /* SIOCGIWRETRY */
- /* Bummer ! Why those are only at the end ??? */
- wavelan_set_encode, /* SIOCSIWENCODE */
- wavelan_get_encode, /* SIOCGIWENCODE */
-};
-
-static const iw_handler wavelan_private_handler[] =
-{
- wavelan_set_qthr, /* SIOCIWFIRSTPRIV */
- wavelan_get_qthr, /* SIOCIWFIRSTPRIV + 1 */
-#ifdef HISTOGRAM
- wavelan_set_histo, /* SIOCIWFIRSTPRIV + 2 */
- wavelan_get_histo, /* SIOCIWFIRSTPRIV + 3 */
-#endif /* HISTOGRAM */
-};
-
-static const struct iw_priv_args wavelan_private_args[] = {
-/*{ cmd, set_args, get_args, name } */
- { SIOCSIPQTHR, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, 0, "setqualthr" },
- { SIOCGIPQTHR, 0, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, "getqualthr" },
- { SIOCSIPHISTO, IW_PRIV_TYPE_BYTE | 16, 0, "sethisto" },
- { SIOCGIPHISTO, 0, IW_PRIV_TYPE_INT | 16, "gethisto" },
-};
-
-static const struct iw_handler_def wavelan_handler_def =
-{
- .num_standard = ARRAY_SIZE(wavelan_handler),
- .num_private = ARRAY_SIZE(wavelan_private_handler),
- .num_private_args = ARRAY_SIZE(wavelan_private_args),
- .standard = wavelan_handler,
- .private = wavelan_private_handler,
- .private_args = wavelan_private_args,
- .get_wireless_stats = wavelan_get_wireless_stats,
-};
-
-/*------------------------------------------------------------------*/
-/*
- * Get wireless statistics.
- * Called by /proc/net/wireless
- */
-static iw_stats *wavelan_get_wireless_stats(struct net_device * dev)
-{
- unsigned long ioaddr = dev->base_addr;
- net_local *lp = netdev_priv(dev);
- mmr_t m;
- iw_stats *wstats;
- unsigned long flags;
-
-#ifdef DEBUG_IOCTL_TRACE
- printk(KERN_DEBUG "%s: ->wavelan_get_wireless_stats()\n",
- dev->name);
-#endif
-
- /* Check */
- if (lp == (net_local *) NULL)
- return (iw_stats *) NULL;
-
- /* Disable interrupts and save flags. */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- wstats = &lp->wstats;
-
- /* Get data from the mmc. */
- mmc_out(ioaddr, mmwoff(0, mmw_freeze), 1);
-
- mmc_read(ioaddr, mmroff(0, mmr_dce_status), &m.mmr_dce_status, 1);
- mmc_read(ioaddr, mmroff(0, mmr_wrong_nwid_l), &m.mmr_wrong_nwid_l,
- 2);
- mmc_read(ioaddr, mmroff(0, mmr_thr_pre_set), &m.mmr_thr_pre_set,
- 4);
-
- mmc_out(ioaddr, mmwoff(0, mmw_freeze), 0);
-
- /* Copy data to wireless stuff. */
- wstats->status = m.mmr_dce_status & MMR_DCE_STATUS;
- wstats->qual.qual = m.mmr_sgnl_qual & MMR_SGNL_QUAL;
- wstats->qual.level = m.mmr_signal_lvl & MMR_SIGNAL_LVL;
- wstats->qual.noise = m.mmr_silence_lvl & MMR_SILENCE_LVL;
- wstats->qual.updated = (((m. mmr_signal_lvl & MMR_SIGNAL_LVL_VALID) >> 7)
- | ((m.mmr_signal_lvl & MMR_SIGNAL_LVL_VALID) >> 6)
- | ((m.mmr_silence_lvl & MMR_SILENCE_LVL_VALID) >> 5));
- wstats->discard.nwid += (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l;
- wstats->discard.code = 0L;
- wstats->discard.misc = 0L;
-
- /* Enable interrupts and restore flags. */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
-#ifdef DEBUG_IOCTL_TRACE
- printk(KERN_DEBUG "%s: <-wavelan_get_wireless_stats()\n",
- dev->name);
-#endif
- return &lp->wstats;
-}
-
-/************************* PACKET RECEPTION *************************/
-/*
- * This part deals with receiving the packets.
- * The interrupt handler gets an interrupt when a packet has been
- * successfully received and calls this part.
- */
-
-/*------------------------------------------------------------------*/
-/*
- * This routine does the actual copying of data (including the Ethernet
- * header structure) from the WaveLAN card to an sk_buff chain that
- * will be passed up to the network interface layer. NOTE: we
- * currently don't handle trailer protocols (neither does the rest of
- * the network interface), so if that is needed, it will (at least in
- * part) be added here. The contents of the receive ring buffer are
- * copied to a message chain that is then passed to the kernel.
- *
- * Note: if any errors occur, the packet is "dropped on the floor".
- * (called by wv_packet_rcv())
- */
-static void
-wv_packet_read(struct net_device * dev, u16 buf_off, int sksize)
-{
- net_local *lp = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
- struct sk_buff *skb;
-
-#ifdef DEBUG_RX_TRACE
- printk(KERN_DEBUG "%s: ->wv_packet_read(0x%X, %d)\n",
- dev->name, buf_off, sksize);
-#endif
-
- /* Allocate buffer for the data */
- if ((skb = dev_alloc_skb(sksize)) == (struct sk_buff *) NULL) {
-#ifdef DEBUG_RX_ERROR
- printk(KERN_INFO
- "%s: wv_packet_read(): could not alloc_skb(%d, GFP_ATOMIC).\n",
- dev->name, sksize);
-#endif
- dev->stats.rx_dropped++;
- return;
- }
-
- /* Copy the packet to the buffer. */
- obram_read(ioaddr, buf_off, skb_put(skb, sksize), sksize);
- skb->protocol = eth_type_trans(skb, dev);
-
-#ifdef DEBUG_RX_INFO
- wv_packet_info(skb_mac_header(skb), sksize, dev->name,
- "wv_packet_read");
-#endif /* DEBUG_RX_INFO */
-
- /* Statistics-gathering and associated stuff.
- * It seem a bit messy with all the define, but it's really
- * simple... */
- if (
-#ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */
- (lp->spy_data.spy_number > 0) ||
-#endif /* IW_WIRELESS_SPY */
-#ifdef HISTOGRAM
- (lp->his_number > 0) ||
-#endif /* HISTOGRAM */
- 0) {
- u8 stats[3]; /* signal level, noise level, signal quality */
-
- /* Read signal level, silence level and signal quality bytes */
- /* Note: in the PCMCIA hardware, these are part of the frame.
- * It seems that for the ISA hardware, it's nowhere to be
- * found in the frame, so I'm obliged to do this (it has a
- * side effect on /proc/net/wireless).
- * Any ideas?
- */
- mmc_out(ioaddr, mmwoff(0, mmw_freeze), 1);
- mmc_read(ioaddr, mmroff(0, mmr_signal_lvl), stats, 3);
- mmc_out(ioaddr, mmwoff(0, mmw_freeze), 0);
-
-#ifdef DEBUG_RX_INFO
- printk(KERN_DEBUG
- "%s: wv_packet_read(): Signal level %d/63, Silence level %d/63, signal quality %d/16\n",
- dev->name, stats[0] & 0x3F, stats[1] & 0x3F,
- stats[2] & 0x0F);
-#endif
-
- /* Spying stuff */
-#ifdef IW_WIRELESS_SPY
- wl_spy_gather(dev, skb_mac_header(skb) + WAVELAN_ADDR_SIZE,
- stats);
-#endif /* IW_WIRELESS_SPY */
-#ifdef HISTOGRAM
- wl_his_gather(dev, stats);
-#endif /* HISTOGRAM */
- }
-
- /*
- * Hand the packet to the network module.
- */
- netif_rx(skb);
-
- /* Keep statistics up to date */
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += sksize;
-
-#ifdef DEBUG_RX_TRACE
- printk(KERN_DEBUG "%s: <-wv_packet_read()\n", dev->name);
-#endif
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Transfer as many packets as we can
- * from the device RAM.
- * (called in wavelan_interrupt()).
- * Note : the spinlock is already grabbed for us.
- */
-static void wv_receive(struct net_device * dev)
-{
- unsigned long ioaddr = dev->base_addr;
- net_local *lp = netdev_priv(dev);
- fd_t fd;
- rbd_t rbd;
- int nreaped = 0;
-
-#ifdef DEBUG_RX_TRACE
- printk(KERN_DEBUG "%s: ->wv_receive()\n", dev->name);
-#endif
-
- /* Loop on each received packet. */
- for (;;) {
- obram_read(ioaddr, lp->rx_head, (unsigned char *) &fd,
- sizeof(fd));
-
- /* Note about the status :
- * It start up to be 0 (the value we set). Then, when the RU
- * grab the buffer to prepare for reception, it sets the
- * FD_STATUS_B flag. When the RU has finished receiving the
- * frame, it clears FD_STATUS_B, set FD_STATUS_C to indicate
- * completion and set the other flags to indicate the eventual
- * errors. FD_STATUS_OK indicates that the reception was OK.
- */
-
- /* If the current frame is not complete, we have reached the end. */
- if ((fd.fd_status & FD_STATUS_C) != FD_STATUS_C)
- break; /* This is how we exit the loop. */
-
- nreaped++;
-
- /* Check whether frame was correctly received. */
- if ((fd.fd_status & FD_STATUS_OK) == FD_STATUS_OK) {
- /* Does the frame contain a pointer to the data? Let's check. */
- if (fd.fd_rbd_offset != I82586NULL) {
- /* Read the receive buffer descriptor */
- obram_read(ioaddr, fd.fd_rbd_offset,
- (unsigned char *) &rbd,
- sizeof(rbd));
-
-#ifdef DEBUG_RX_ERROR
- if ((rbd.rbd_status & RBD_STATUS_EOF) !=
- RBD_STATUS_EOF) printk(KERN_INFO
- "%s: wv_receive(): missing EOF flag.\n",
- dev->name);
-
- if ((rbd.rbd_status & RBD_STATUS_F) !=
- RBD_STATUS_F) printk(KERN_INFO
- "%s: wv_receive(): missing F flag.\n",
- dev->name);
-#endif /* DEBUG_RX_ERROR */
-
- /* Read the packet and transmit to Linux */
- wv_packet_read(dev, rbd.rbd_bufl,
- rbd.
- rbd_status &
- RBD_STATUS_ACNT);
- }
-#ifdef DEBUG_RX_ERROR
- else /* if frame has no data */
- printk(KERN_INFO
- "%s: wv_receive(): frame has no data.\n",
- dev->name);
-#endif
- } else { /* If reception was no successful */
-
- dev->stats.rx_errors++;
-
-#ifdef DEBUG_RX_INFO
- printk(KERN_DEBUG
- "%s: wv_receive(): frame not received successfully (%X).\n",
- dev->name, fd.fd_status);
-#endif
-
-#ifdef DEBUG_RX_ERROR
- if ((fd.fd_status & FD_STATUS_S6) != 0)
- printk(KERN_INFO
- "%s: wv_receive(): no EOF flag.\n",
- dev->name);
-#endif
-
- if ((fd.fd_status & FD_STATUS_S7) != 0) {
- dev->stats.rx_length_errors++;
-#ifdef DEBUG_RX_FAIL
- printk(KERN_DEBUG
- "%s: wv_receive(): frame too short.\n",
- dev->name);
-#endif
- }
-
- if ((fd.fd_status & FD_STATUS_S8) != 0) {
- dev->stats.rx_over_errors++;
-#ifdef DEBUG_RX_FAIL
- printk(KERN_DEBUG
- "%s: wv_receive(): rx DMA overrun.\n",
- dev->name);
-#endif
- }
-
- if ((fd.fd_status & FD_STATUS_S9) != 0) {
- dev->stats.rx_fifo_errors++;
-#ifdef DEBUG_RX_FAIL
- printk(KERN_DEBUG
- "%s: wv_receive(): ran out of resources.\n",
- dev->name);
-#endif
- }
-
- if ((fd.fd_status & FD_STATUS_S10) != 0) {
- dev->stats.rx_frame_errors++;
-#ifdef DEBUG_RX_FAIL
- printk(KERN_DEBUG
- "%s: wv_receive(): alignment error.\n",
- dev->name);
-#endif
- }
-
- if ((fd.fd_status & FD_STATUS_S11) != 0) {
- dev->stats.rx_crc_errors++;
-#ifdef DEBUG_RX_FAIL
- printk(KERN_DEBUG
- "%s: wv_receive(): CRC error.\n",
- dev->name);
-#endif
- }
- }
-
- fd.fd_status = 0;
- obram_write(ioaddr, fdoff(lp->rx_head, fd_status),
- (unsigned char *) &fd.fd_status,
- sizeof(fd.fd_status));
-
- fd.fd_command = FD_COMMAND_EL;
- obram_write(ioaddr, fdoff(lp->rx_head, fd_command),
- (unsigned char *) &fd.fd_command,
- sizeof(fd.fd_command));
-
- fd.fd_command = 0;
- obram_write(ioaddr, fdoff(lp->rx_last, fd_command),
- (unsigned char *) &fd.fd_command,
- sizeof(fd.fd_command));
-
- lp->rx_last = lp->rx_head;
- lp->rx_head = fd.fd_link_offset;
- } /* for(;;) -> loop on all frames */
-
-#ifdef DEBUG_RX_INFO
- if (nreaped > 1)
- printk(KERN_DEBUG "%s: wv_receive(): reaped %d\n",
- dev->name, nreaped);
-#endif
-#ifdef DEBUG_RX_TRACE
- printk(KERN_DEBUG "%s: <-wv_receive()\n", dev->name);
-#endif
-}
-
-/*********************** PACKET TRANSMISSION ***********************/
-/*
- * This part deals with sending packets through the WaveLAN.
- *
- */
-
-/*------------------------------------------------------------------*/
-/*
- * This routine fills in the appropriate registers and memory
- * locations on the WaveLAN card and starts the card off on
- * the transmit.
- *
- * The principle:
- * Each block contains a transmit command, a NOP command,
- * a transmit block descriptor and a buffer.
- * The CU read the transmit block which point to the tbd,
- * read the tbd and the content of the buffer.
- * When it has finish with it, it goes to the next command
- * which in our case is the NOP. The NOP points on itself,
- * so the CU stop here.
- * When we add the next block, we modify the previous nop
- * to make it point on the new tx command.
- * Simple, isn't it ?
- *
- * (called in wavelan_packet_xmit())
- */
-static int wv_packet_write(struct net_device * dev, void *buf, short length)
-{
- net_local *lp = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
- unsigned short txblock;
- unsigned short txpred;
- unsigned short tx_addr;
- unsigned short nop_addr;
- unsigned short tbd_addr;
- unsigned short buf_addr;
- ac_tx_t tx;
- ac_nop_t nop;
- tbd_t tbd;
- int clen = length;
- unsigned long flags;
-
-#ifdef DEBUG_TX_TRACE
- printk(KERN_DEBUG "%s: ->wv_packet_write(%d)\n", dev->name,
- length);
-#endif
-
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* Check nothing bad has happened */
- if (lp->tx_n_in_use == (NTXBLOCKS - 1)) {
-#ifdef DEBUG_TX_ERROR
- printk(KERN_INFO "%s: wv_packet_write(): Tx queue full.\n",
- dev->name);
-#endif
- spin_unlock_irqrestore(&lp->spinlock, flags);
- return 1;
- }
-
- /* Calculate addresses of next block and previous block. */
- txblock = lp->tx_first_free;
- txpred = txblock - TXBLOCKZ;
- if (txpred < OFFSET_CU)
- txpred += NTXBLOCKS * TXBLOCKZ;
- lp->tx_first_free += TXBLOCKZ;
- if (lp->tx_first_free >= OFFSET_CU + NTXBLOCKS * TXBLOCKZ)
- lp->tx_first_free -= NTXBLOCKS * TXBLOCKZ;
-
- lp->tx_n_in_use++;
-
- /* Calculate addresses of the different parts of the block. */
- tx_addr = txblock;
- nop_addr = tx_addr + sizeof(tx);
- tbd_addr = nop_addr + sizeof(nop);
- buf_addr = tbd_addr + sizeof(tbd);
-
- /*
- * Transmit command
- */
- tx.tx_h.ac_status = 0;
- obram_write(ioaddr, toff(ac_tx_t, tx_addr, tx_h.ac_status),
- (unsigned char *) &tx.tx_h.ac_status,
- sizeof(tx.tx_h.ac_status));
-
- /*
- * NOP command
- */
- nop.nop_h.ac_status = 0;
- obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_status),
- (unsigned char *) &nop.nop_h.ac_status,
- sizeof(nop.nop_h.ac_status));
- nop.nop_h.ac_link = nop_addr;
- obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_link),
- (unsigned char *) &nop.nop_h.ac_link,
- sizeof(nop.nop_h.ac_link));
-
- /*
- * Transmit buffer descriptor
- */
- tbd.tbd_status = TBD_STATUS_EOF | (TBD_STATUS_ACNT & clen);
- tbd.tbd_next_bd_offset = I82586NULL;
- tbd.tbd_bufl = buf_addr;
- tbd.tbd_bufh = 0;
- obram_write(ioaddr, tbd_addr, (unsigned char *) &tbd, sizeof(tbd));
-
- /*
- * Data
- */
- obram_write(ioaddr, buf_addr, buf, length);
-
- /*
- * Overwrite the predecessor NOP link
- * so that it points to this txblock.
- */
- nop_addr = txpred + sizeof(tx);
- nop.nop_h.ac_status = 0;
- obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_status),
- (unsigned char *) &nop.nop_h.ac_status,
- sizeof(nop.nop_h.ac_status));
- nop.nop_h.ac_link = txblock;
- obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_link),
- (unsigned char *) &nop.nop_h.ac_link,
- sizeof(nop.nop_h.ac_link));
-
- /* Make sure the watchdog will keep quiet for a while */
- dev->trans_start = jiffies;
-
- /* Keep stats up to date. */
- dev->stats.tx_bytes += length;
-
- if (lp->tx_first_in_use == I82586NULL)
- lp->tx_first_in_use = txblock;
-
- if (lp->tx_n_in_use < NTXBLOCKS - 1)
- netif_wake_queue(dev);
-
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
-#ifdef DEBUG_TX_INFO
- wv_packet_info((u8 *) buf, length, dev->name,
- "wv_packet_write");
-#endif /* DEBUG_TX_INFO */
-
-#ifdef DEBUG_TX_TRACE
- printk(KERN_DEBUG "%s: <-wv_packet_write()\n", dev->name);
-#endif
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * This routine is called when we want to send a packet (NET3 callback)
- * In this routine, we check if the harware is ready to accept
- * the packet. We also prevent reentrance. Then we call the function
- * to send the packet.
- */
-static netdev_tx_t wavelan_packet_xmit(struct sk_buff *skb,
- struct net_device * dev)
-{
- net_local *lp = netdev_priv(dev);
- unsigned long flags;
- char data[ETH_ZLEN];
-
-#ifdef DEBUG_TX_TRACE
- printk(KERN_DEBUG "%s: ->wavelan_packet_xmit(0x%X)\n", dev->name,
- (unsigned) skb);
-#endif
-
- /*
- * Block a timer-based transmit from overlapping.
- * In other words, prevent reentering this routine.
- */
- netif_stop_queue(dev);
-
- /* If somebody has asked to reconfigure the controller,
- * we can do it now.
- */
- if (lp->reconfig_82586) {
- spin_lock_irqsave(&lp->spinlock, flags);
- wv_82586_config(dev);
- spin_unlock_irqrestore(&lp->spinlock, flags);
- /* Check that we can continue */
- if (lp->tx_n_in_use == (NTXBLOCKS - 1))
- return NETDEV_TX_BUSY;
- }
-
- /* Do we need some padding? */
- /* Note : on wireless the propagation time is in the order of 1us,
- * and we don't have the Ethernet specific requirement of beeing
- * able to detect collisions, therefore in theory we don't really
- * need to pad. Jean II */
- if (skb->len < ETH_ZLEN) {
- memset(data, 0, ETH_ZLEN);
- skb_copy_from_linear_data(skb, data, skb->len);
- /* Write packet on the card */
- if(wv_packet_write(dev, data, ETH_ZLEN))
- return NETDEV_TX_BUSY; /* We failed */
- }
- else if(wv_packet_write(dev, skb->data, skb->len))
- return NETDEV_TX_BUSY; /* We failed */
-
-
- dev_kfree_skb(skb);
-
-#ifdef DEBUG_TX_TRACE
- printk(KERN_DEBUG "%s: <-wavelan_packet_xmit()\n", dev->name);
-#endif
- return NETDEV_TX_OK;
-}
-
-/*********************** HARDWARE CONFIGURATION ***********************/
-/*
- * This part does the real job of starting and configuring the hardware.
- */
-
-/*--------------------------------------------------------------------*/
-/*
- * Routine to initialize the Modem Management Controller.
- * (called by wv_hw_reset())
- */
-static int wv_mmc_init(struct net_device * dev)
-{
- unsigned long ioaddr = dev->base_addr;
- net_local *lp = netdev_priv(dev);
- psa_t psa;
- mmw_t m;
- int configured;
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: ->wv_mmc_init()\n", dev->name);
-#endif
-
- /* Read the parameter storage area. */
- psa_read(ioaddr, lp->hacr, 0, (unsigned char *) &psa, sizeof(psa));
-
-#ifdef USE_PSA_CONFIG
- configured = psa.psa_conf_status & 1;
-#else
- configured = 0;
-#endif
-
- /* Is the PSA is not configured */
- if (!configured) {
- /* User will be able to configure NWID later (with iwconfig). */
- psa.psa_nwid[0] = 0;
- psa.psa_nwid[1] = 0;
-
- /* no NWID checking since NWID is not set */
- psa.psa_nwid_select = 0;
-
- /* Disable encryption */
- psa.psa_encryption_select = 0;
-
- /* Set to standard values:
- * 0x04 for AT,
- * 0x01 for MCA,
- * 0x04 for PCMCIA and 2.00 card (AT&T 407-024689/E document)
- */
- if (psa.psa_comp_number & 1)
- psa.psa_thr_pre_set = 0x01;
- else
- psa.psa_thr_pre_set = 0x04;
- psa.psa_quality_thr = 0x03;
-
- /* It is configured */
- psa.psa_conf_status |= 1;
-
-#ifdef USE_PSA_CONFIG
- /* Write the psa. */
- psa_write(ioaddr, lp->hacr,
- (char *) psa.psa_nwid - (char *) &psa,
- (unsigned char *) psa.psa_nwid, 4);
- psa_write(ioaddr, lp->hacr,
- (char *) &psa.psa_thr_pre_set - (char *) &psa,
- (unsigned char *) &psa.psa_thr_pre_set, 1);
- psa_write(ioaddr, lp->hacr,
- (char *) &psa.psa_quality_thr - (char *) &psa,
- (unsigned char *) &psa.psa_quality_thr, 1);
- psa_write(ioaddr, lp->hacr,
- (char *) &psa.psa_conf_status - (char *) &psa,
- (unsigned char *) &psa.psa_conf_status, 1);
- /* update the Wavelan checksum */
- update_psa_checksum(dev, ioaddr, lp->hacr);
-#endif
- }
-
- /* Zero the mmc structure. */
- memset(&m, 0x00, sizeof(m));
-
- /* Copy PSA info to the mmc. */
- m.mmw_netw_id_l = psa.psa_nwid[1];
- m.mmw_netw_id_h = psa.psa_nwid[0];
-
- if (psa.psa_nwid_select & 1)
- m.mmw_loopt_sel = 0x00;
- else
- m.mmw_loopt_sel = MMW_LOOPT_SEL_DIS_NWID;
-
- memcpy(&m.mmw_encr_key, &psa.psa_encryption_key,
- sizeof(m.mmw_encr_key));
-
- if (psa.psa_encryption_select)
- m.mmw_encr_enable =
- MMW_ENCR_ENABLE_EN | MMW_ENCR_ENABLE_MODE;
- else
- m.mmw_encr_enable = 0;
-
- m.mmw_thr_pre_set = psa.psa_thr_pre_set & 0x3F;
- m.mmw_quality_thr = psa.psa_quality_thr & 0x0F;
-
- /*
- * Set default modem control parameters.
- * See NCR document 407-0024326 Rev. A.
- */
- m.mmw_jabber_enable = 0x01;
- m.mmw_freeze = 0;
- m.mmw_anten_sel = MMW_ANTEN_SEL_ALG_EN;
- m.mmw_ifs = 0x20;
- m.mmw_mod_delay = 0x04;
- m.mmw_jam_time = 0x38;
-
- m.mmw_des_io_invert = 0;
- m.mmw_decay_prm = 0;
- m.mmw_decay_updat_prm = 0;
-
- /* Write all info to MMC. */
- mmc_write(ioaddr, 0, (u8 *) & m, sizeof(m));
-
- /* The following code starts the modem of the 2.00 frequency
- * selectable cards at power on. It's not strictly needed for the
- * following boots.
- * The original patch was by Joe Finney for the PCMCIA driver, but
- * I've cleaned it up a bit and added documentation.
- * Thanks to Loeke Brederveld from Lucent for the info.
- */
-
- /* Attempt to recognise 2.00 cards (2.4 GHz frequency selectable)
- * Does it work for everybody, especially old cards? */
- /* Note: WFREQSEL verifies that it is able to read a sensible
- * frequency from EEPROM (address 0x00) and that MMR_FEE_STATUS_ID
- * is 0xA (Xilinx version) or 0xB (Ariadne version).
- * My test is more crude but does work. */
- if (!(mmc_in(ioaddr, mmroff(0, mmr_fee_status)) &
- (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY))) {
- /* We must download the frequency parameters to the
- * synthesizers (from the EEPROM - area 1)
- * Note: as the EEPROM is automatically decremented, we set the end
- * if the area... */
- m.mmw_fee_addr = 0x0F;
- m.mmw_fee_ctrl = MMW_FEE_CTRL_READ | MMW_FEE_CTRL_DWLD;
- mmc_write(ioaddr, (char *) &m.mmw_fee_ctrl - (char *) &m,
- (unsigned char *) &m.mmw_fee_ctrl, 2);
-
- /* Wait until the download is finished. */
- fee_wait(ioaddr, 100, 100);
-
-#ifdef DEBUG_CONFIG_INFO
- /* The frequency was in the last word downloaded. */
- mmc_read(ioaddr, (char *) &m.mmw_fee_data_l - (char *) &m,
- (unsigned char *) &m.mmw_fee_data_l, 2);
-
- /* Print some info for the user. */
- printk(KERN_DEBUG
- "%s: WaveLAN 2.00 recognised (frequency select). Current frequency = %ld\n",
- dev->name,
- ((m.
- mmw_fee_data_h << 4) | (m.mmw_fee_data_l >> 4)) *
- 5 / 2 + 24000L);
-#endif
-
- /* We must now download the power adjust value (gain) to
- * the synthesizers (from the EEPROM - area 7 - DAC). */
- m.mmw_fee_addr = 0x61;
- m.mmw_fee_ctrl = MMW_FEE_CTRL_READ | MMW_FEE_CTRL_DWLD;
- mmc_write(ioaddr, (char *) &m.mmw_fee_ctrl - (char *) &m,
- (unsigned char *) &m.mmw_fee_ctrl, 2);
-
- /* Wait until the download is finished. */
- }
- /* if 2.00 card */
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: <-wv_mmc_init()\n", dev->name);
-#endif
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Construct the fd and rbd structures.
- * Start the receive unit.
- * (called by wv_hw_reset())
- */
-static int wv_ru_start(struct net_device * dev)
-{
- net_local *lp = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
- u16 scb_cs;
- fd_t fd;
- rbd_t rbd;
- u16 rx;
- u16 rx_next;
- int i;
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: ->wv_ru_start()\n", dev->name);
-#endif
-
- obram_read(ioaddr, scboff(OFFSET_SCB, scb_status),
- (unsigned char *) &scb_cs, sizeof(scb_cs));
- if ((scb_cs & SCB_ST_RUS) == SCB_ST_RUS_RDY)
- return 0;
-
- lp->rx_head = OFFSET_RU;
-
- for (i = 0, rx = lp->rx_head; i < NRXBLOCKS; i++, rx = rx_next) {
- rx_next =
- (i == NRXBLOCKS - 1) ? lp->rx_head : rx + RXBLOCKZ;
-
- fd.fd_status = 0;
- fd.fd_command = (i == NRXBLOCKS - 1) ? FD_COMMAND_EL : 0;
- fd.fd_link_offset = rx_next;
- fd.fd_rbd_offset = rx + sizeof(fd);
- obram_write(ioaddr, rx, (unsigned char *) &fd, sizeof(fd));
-
- rbd.rbd_status = 0;
- rbd.rbd_next_rbd_offset = I82586NULL;
- rbd.rbd_bufl = rx + sizeof(fd) + sizeof(rbd);
- rbd.rbd_bufh = 0;
- rbd.rbd_el_size = RBD_EL | (RBD_SIZE & MAXDATAZ);
- obram_write(ioaddr, rx + sizeof(fd),
- (unsigned char *) &rbd, sizeof(rbd));
-
- lp->rx_last = rx;
- }
-
- obram_write(ioaddr, scboff(OFFSET_SCB, scb_rfa_offset),
- (unsigned char *) &lp->rx_head, sizeof(lp->rx_head));
-
- scb_cs = SCB_CMD_RUC_GO;
- obram_write(ioaddr, scboff(OFFSET_SCB, scb_command),
- (unsigned char *) &scb_cs, sizeof(scb_cs));
-
- set_chan_attn(ioaddr, lp->hacr);
-
- for (i = 1000; i > 0; i--) {
- obram_read(ioaddr, scboff(OFFSET_SCB, scb_command),
- (unsigned char *) &scb_cs, sizeof(scb_cs));
- if (scb_cs == 0)
- break;
-
- udelay(10);
- }
-
- if (i <= 0) {
-#ifdef DEBUG_CONFIG_ERROR
- printk(KERN_INFO
- "%s: wavelan_ru_start(): board not accepting command.\n",
- dev->name);
-#endif
- return -1;
- }
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: <-wv_ru_start()\n", dev->name);
-#endif
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Initialise the transmit blocks.
- * Start the command unit executing the NOP
- * self-loop of the first transmit block.
- *
- * Here we create the list of send buffers used to transmit packets
- * between the PC and the command unit. For each buffer, we create a
- * buffer descriptor (pointing on the buffer), a transmit command
- * (pointing to the buffer descriptor) and a NOP command.
- * The transmit command is linked to the NOP, and the NOP to itself.
- * When we will have finished executing the transmit command, we will
- * then loop on the NOP. By releasing the NOP link to a new command,
- * we may send another buffer.
- *
- * (called by wv_hw_reset())
- */
-static int wv_cu_start(struct net_device * dev)
-{
- net_local *lp = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
- int i;
- u16 txblock;
- u16 first_nop;
- u16 scb_cs;
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: ->wv_cu_start()\n", dev->name);
-#endif
-
- lp->tx_first_free = OFFSET_CU;
- lp->tx_first_in_use = I82586NULL;
-
- for (i = 0, txblock = OFFSET_CU;
- i < NTXBLOCKS; i++, txblock += TXBLOCKZ) {
- ac_tx_t tx;
- ac_nop_t nop;
- tbd_t tbd;
- unsigned short tx_addr;
- unsigned short nop_addr;
- unsigned short tbd_addr;
- unsigned short buf_addr;
-
- tx_addr = txblock;
- nop_addr = tx_addr + sizeof(tx);
- tbd_addr = nop_addr + sizeof(nop);
- buf_addr = tbd_addr + sizeof(tbd);
-
- tx.tx_h.ac_status = 0;
- tx.tx_h.ac_command = acmd_transmit | AC_CFLD_I;
- tx.tx_h.ac_link = nop_addr;
- tx.tx_tbd_offset = tbd_addr;
- obram_write(ioaddr, tx_addr, (unsigned char *) &tx,
- sizeof(tx));
-
- nop.nop_h.ac_status = 0;
- nop.nop_h.ac_command = acmd_nop;
- nop.nop_h.ac_link = nop_addr;
- obram_write(ioaddr, nop_addr, (unsigned char *) &nop,
- sizeof(nop));
-
- tbd.tbd_status = TBD_STATUS_EOF;
- tbd.tbd_next_bd_offset = I82586NULL;
- tbd.tbd_bufl = buf_addr;
- tbd.tbd_bufh = 0;
- obram_write(ioaddr, tbd_addr, (unsigned char *) &tbd,
- sizeof(tbd));
- }
-
- first_nop =
- OFFSET_CU + (NTXBLOCKS - 1) * TXBLOCKZ + sizeof(ac_tx_t);
- obram_write(ioaddr, scboff(OFFSET_SCB, scb_cbl_offset),
- (unsigned char *) &first_nop, sizeof(first_nop));
-
- scb_cs = SCB_CMD_CUC_GO;
- obram_write(ioaddr, scboff(OFFSET_SCB, scb_command),
- (unsigned char *) &scb_cs, sizeof(scb_cs));
-
- set_chan_attn(ioaddr, lp->hacr);
-
- for (i = 1000; i > 0; i--) {
- obram_read(ioaddr, scboff(OFFSET_SCB, scb_command),
- (unsigned char *) &scb_cs, sizeof(scb_cs));
- if (scb_cs == 0)
- break;
-
- udelay(10);
- }
-
- if (i <= 0) {
-#ifdef DEBUG_CONFIG_ERROR
- printk(KERN_INFO
- "%s: wavelan_cu_start(): board not accepting command.\n",
- dev->name);
-#endif
- return -1;
- }
-
- lp->tx_n_in_use = 0;
- netif_start_queue(dev);
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: <-wv_cu_start()\n", dev->name);
-#endif
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * This routine does a standard configuration of the WaveLAN
- * controller (i82586).
- *
- * It initialises the scp, iscp and scb structure
- * The first two are just pointers to the next.
- * The last one is used for basic configuration and for basic
- * communication (interrupt status).
- *
- * (called by wv_hw_reset())
- */
-static int wv_82586_start(struct net_device * dev)
-{
- net_local *lp = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
- scp_t scp; /* system configuration pointer */
- iscp_t iscp; /* intermediate scp */
- scb_t scb; /* system control block */
- ach_t cb; /* Action command header */
- u8 zeroes[512];
- int i;
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: ->wv_82586_start()\n", dev->name);
-#endif
-
- /*
- * Clear the onboard RAM.
- */
- memset(&zeroes[0], 0x00, sizeof(zeroes));
- for (i = 0; i < I82586_MEMZ; i += sizeof(zeroes))
- obram_write(ioaddr, i, &zeroes[0], sizeof(zeroes));
-
- /*
- * Construct the command unit structures:
- * scp, iscp, scb, cb.
- */
- memset(&scp, 0x00, sizeof(scp));
- scp.scp_sysbus = SCP_SY_16BBUS;
- scp.scp_iscpl = OFFSET_ISCP;
- obram_write(ioaddr, OFFSET_SCP, (unsigned char *) &scp,
- sizeof(scp));
-
- memset(&iscp, 0x00, sizeof(iscp));
- iscp.iscp_busy = 1;
- iscp.iscp_offset = OFFSET_SCB;
- obram_write(ioaddr, OFFSET_ISCP, (unsigned char *) &iscp,
- sizeof(iscp));
-
- /* Our first command is to reset the i82586. */
- memset(&scb, 0x00, sizeof(scb));
- scb.scb_command = SCB_CMD_RESET;
- scb.scb_cbl_offset = OFFSET_CU;
- scb.scb_rfa_offset = OFFSET_RU;
- obram_write(ioaddr, OFFSET_SCB, (unsigned char *) &scb,
- sizeof(scb));
-
- set_chan_attn(ioaddr, lp->hacr);
-
- /* Wait for command to finish. */
- for (i = 1000; i > 0; i--) {
- obram_read(ioaddr, OFFSET_ISCP, (unsigned char *) &iscp,
- sizeof(iscp));
-
- if (iscp.iscp_busy == (unsigned short) 0)
- break;
-
- udelay(10);
- }
-
- if (i <= 0) {
-#ifdef DEBUG_CONFIG_ERROR
- printk(KERN_INFO
- "%s: wv_82586_start(): iscp_busy timeout.\n",
- dev->name);
-#endif
- return -1;
- }
-
- /* Check command completion. */
- for (i = 15; i > 0; i--) {
- obram_read(ioaddr, OFFSET_SCB, (unsigned char *) &scb,
- sizeof(scb));
-
- if (scb.scb_status == (SCB_ST_CX | SCB_ST_CNA))
- break;
-
- udelay(10);
- }
-
- if (i <= 0) {
-#ifdef DEBUG_CONFIG_ERROR
- printk(KERN_INFO
- "%s: wv_82586_start(): status: expected 0x%02x, got 0x%02x.\n",
- dev->name, SCB_ST_CX | SCB_ST_CNA, scb.scb_status);
-#endif
- return -1;
- }
-
- wv_ack(dev);
-
- /* Set the action command header. */
- memset(&cb, 0x00, sizeof(cb));
- cb.ac_command = AC_CFLD_EL | (AC_CFLD_CMD & acmd_diagnose);
- cb.ac_link = OFFSET_CU;
- obram_write(ioaddr, OFFSET_CU, (unsigned char *) &cb, sizeof(cb));
-
- if (wv_synchronous_cmd(dev, "diag()") == -1)
- return -1;
-
- obram_read(ioaddr, OFFSET_CU, (unsigned char *) &cb, sizeof(cb));
- if (cb.ac_status & AC_SFLD_FAIL) {
-#ifdef DEBUG_CONFIG_ERROR
- printk(KERN_INFO
- "%s: wv_82586_start(): i82586 Self Test failed.\n",
- dev->name);
-#endif
- return -1;
- }
-#ifdef DEBUG_I82586_SHOW
- wv_scb_show(ioaddr);
-#endif
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: <-wv_82586_start()\n", dev->name);
-#endif
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * This routine does a standard configuration of the WaveLAN
- * controller (i82586).
- *
- * This routine is a violent hack. We use the first free transmit block
- * to make our configuration. In the buffer area, we create the three
- * configuration commands (linked). We make the previous NOP point to
- * the beginning of the buffer instead of the tx command. After, we go
- * as usual to the NOP command.
- * Note that only the last command (mc_set) will generate an interrupt.
- *
- * (called by wv_hw_reset(), wv_82586_reconfig(), wavelan_packet_xmit())
- */
-static void wv_82586_config(struct net_device * dev)
-{
- net_local *lp = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
- unsigned short txblock;
- unsigned short txpred;
- unsigned short tx_addr;
- unsigned short nop_addr;
- unsigned short tbd_addr;
- unsigned short cfg_addr;
- unsigned short ias_addr;
- unsigned short mcs_addr;
- ac_tx_t tx;
- ac_nop_t nop;
- ac_cfg_t cfg; /* Configure action */
- ac_ias_t ias; /* IA-setup action */
- ac_mcs_t mcs; /* Multicast setup */
- struct dev_mc_list *dmi;
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: ->wv_82586_config()\n", dev->name);
-#endif
-
- /* Check nothing bad has happened */
- if (lp->tx_n_in_use == (NTXBLOCKS - 1)) {
-#ifdef DEBUG_CONFIG_ERROR
- printk(KERN_INFO "%s: wv_82586_config(): Tx queue full.\n",
- dev->name);
-#endif
- return;
- }
-
- /* Calculate addresses of next block and previous block. */
- txblock = lp->tx_first_free;
- txpred = txblock - TXBLOCKZ;
- if (txpred < OFFSET_CU)
- txpred += NTXBLOCKS * TXBLOCKZ;
- lp->tx_first_free += TXBLOCKZ;
- if (lp->tx_first_free >= OFFSET_CU + NTXBLOCKS * TXBLOCKZ)
- lp->tx_first_free -= NTXBLOCKS * TXBLOCKZ;
-
- lp->tx_n_in_use++;
-
- /* Calculate addresses of the different parts of the block. */
- tx_addr = txblock;
- nop_addr = tx_addr + sizeof(tx);
- tbd_addr = nop_addr + sizeof(nop);
- cfg_addr = tbd_addr + sizeof(tbd_t); /* beginning of the buffer */
- ias_addr = cfg_addr + sizeof(cfg);
- mcs_addr = ias_addr + sizeof(ias);
-
- /*
- * Transmit command
- */
- tx.tx_h.ac_status = 0xFFFF; /* Fake completion value */
- obram_write(ioaddr, toff(ac_tx_t, tx_addr, tx_h.ac_status),
- (unsigned char *) &tx.tx_h.ac_status,
- sizeof(tx.tx_h.ac_status));
-
- /*
- * NOP command
- */
- nop.nop_h.ac_status = 0;
- obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_status),
- (unsigned char *) &nop.nop_h.ac_status,
- sizeof(nop.nop_h.ac_status));
- nop.nop_h.ac_link = nop_addr;
- obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_link),
- (unsigned char *) &nop.nop_h.ac_link,
- sizeof(nop.nop_h.ac_link));
-
- /* Create a configure action. */
- memset(&cfg, 0x00, sizeof(cfg));
-
- /*
- * For Linux we invert AC_CFG_ALOC() so as to conform
- * to the way that net packets reach us from above.
- * (See also ac_tx_t.)
- *
- * Updated from Wavelan Manual WCIN085B
- */
- cfg.cfg_byte_cnt =
- AC_CFG_BYTE_CNT(sizeof(ac_cfg_t) - sizeof(ach_t));
- cfg.cfg_fifolim = AC_CFG_FIFOLIM(4);
- cfg.cfg_byte8 = AC_CFG_SAV_BF(1) | AC_CFG_SRDY(0);
- cfg.cfg_byte9 = AC_CFG_ELPBCK(0) |
- AC_CFG_ILPBCK(0) |
- AC_CFG_PRELEN(AC_CFG_PLEN_2) |
- AC_CFG_ALOC(1) | AC_CFG_ADDRLEN(WAVELAN_ADDR_SIZE);
- cfg.cfg_byte10 = AC_CFG_BOFMET(1) |
- AC_CFG_ACR(6) | AC_CFG_LINPRIO(0);
- cfg.cfg_ifs = 0x20;
- cfg.cfg_slotl = 0x0C;
- cfg.cfg_byte13 = AC_CFG_RETRYNUM(15) | AC_CFG_SLTTMHI(0);
- cfg.cfg_byte14 = AC_CFG_FLGPAD(0) |
- AC_CFG_BTSTF(0) |
- AC_CFG_CRC16(0) |
- AC_CFG_NCRC(0) |
- AC_CFG_TNCRS(1) |
- AC_CFG_MANCH(0) |
- AC_CFG_BCDIS(0) | AC_CFG_PRM(lp->promiscuous);
- cfg.cfg_byte15 = AC_CFG_ICDS(0) |
- AC_CFG_CDTF(0) | AC_CFG_ICSS(0) | AC_CFG_CSTF(0);
-/*
- cfg.cfg_min_frm_len = AC_CFG_MNFRM(64);
-*/
- cfg.cfg_min_frm_len = AC_CFG_MNFRM(8);
-
- cfg.cfg_h.ac_command = (AC_CFLD_CMD & acmd_configure);
- cfg.cfg_h.ac_link = ias_addr;
- obram_write(ioaddr, cfg_addr, (unsigned char *) &cfg, sizeof(cfg));
-
- /* Set up the MAC address */
- memset(&ias, 0x00, sizeof(ias));
- ias.ias_h.ac_command = (AC_CFLD_CMD & acmd_ia_setup);
- ias.ias_h.ac_link = mcs_addr;
- memcpy(&ias.ias_addr[0], (unsigned char *) &dev->dev_addr[0],
- sizeof(ias.ias_addr));
- obram_write(ioaddr, ias_addr, (unsigned char *) &ias, sizeof(ias));
-
- /* Initialize adapter's Ethernet multicast addresses */
- memset(&mcs, 0x00, sizeof(mcs));
- mcs.mcs_h.ac_command = AC_CFLD_I | (AC_CFLD_CMD & acmd_mc_setup);
- mcs.mcs_h.ac_link = nop_addr;
- mcs.mcs_cnt = WAVELAN_ADDR_SIZE * lp->mc_count;
- obram_write(ioaddr, mcs_addr, (unsigned char *) &mcs, sizeof(mcs));
-
- /* Any address to set? */
- if (lp->mc_count) {
- netdev_for_each_mc_addr(dmi, dev)
- outsw(PIOP1(ioaddr), (u16 *) dmi->dmi_addr,
- WAVELAN_ADDR_SIZE >> 1);
-
-#ifdef DEBUG_CONFIG_INFO
- printk(KERN_DEBUG
- "%s: wv_82586_config(): set %d multicast addresses:\n",
- dev->name, lp->mc_count);
- netdev_for_each_mc_addr(dmi, dev)
- printk(KERN_DEBUG " %pM\n", dmi->dmi_addr);
-#endif
- }
-
- /*
- * Overwrite the predecessor NOP link
- * so that it points to the configure action.
- */
- nop_addr = txpred + sizeof(tx);
- nop.nop_h.ac_status = 0;
- obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_status),
- (unsigned char *) &nop.nop_h.ac_status,
- sizeof(nop.nop_h.ac_status));
- nop.nop_h.ac_link = cfg_addr;
- obram_write(ioaddr, toff(ac_nop_t, nop_addr, nop_h.ac_link),
- (unsigned char *) &nop.nop_h.ac_link,
- sizeof(nop.nop_h.ac_link));
-
- /* Job done, clear the flag */
- lp->reconfig_82586 = 0;
-
- if (lp->tx_first_in_use == I82586NULL)
- lp->tx_first_in_use = txblock;
-
- if (lp->tx_n_in_use == (NTXBLOCKS - 1))
- netif_stop_queue(dev);
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: <-wv_82586_config()\n", dev->name);
-#endif
-}
-
-/*------------------------------------------------------------------*/
-/*
- * This routine, called by wavelan_close(), gracefully stops the
- * WaveLAN controller (i82586).
- * (called by wavelan_close())
- */
-static void wv_82586_stop(struct net_device * dev)
-{
- net_local *lp = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
- u16 scb_cmd;
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: ->wv_82586_stop()\n", dev->name);
-#endif
-
- /* Suspend both command unit and receive unit. */
- scb_cmd =
- (SCB_CMD_CUC & SCB_CMD_CUC_SUS) | (SCB_CMD_RUC &
- SCB_CMD_RUC_SUS);
- obram_write(ioaddr, scboff(OFFSET_SCB, scb_command),
- (unsigned char *) &scb_cmd, sizeof(scb_cmd));
- set_chan_attn(ioaddr, lp->hacr);
-
- /* No more interrupts */
- wv_ints_off(dev);
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: <-wv_82586_stop()\n", dev->name);
-#endif
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Totally reset the WaveLAN and restart it.
- * Performs the following actions:
- * 1. A power reset (reset DMA)
- * 2. Initialize the radio modem (using wv_mmc_init)
- * 3. Reset & Configure LAN controller (using wv_82586_start)
- * 4. Start the LAN controller's command unit
- * 5. Start the LAN controller's receive unit
- * (called by wavelan_interrupt(), wavelan_watchdog() & wavelan_open())
- */
-static int wv_hw_reset(struct net_device * dev)
-{
- net_local *lp = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: ->wv_hw_reset(dev=0x%x)\n", dev->name,
- (unsigned int) dev);
-#endif
-
- /* Increase the number of resets done. */
- lp->nresets++;
-
- wv_hacr_reset(ioaddr);
- lp->hacr = HACR_DEFAULT;
-
- if ((wv_mmc_init(dev) < 0) || (wv_82586_start(dev) < 0))
- return -1;
-
- /* Enable the card to send interrupts. */
- wv_ints_on(dev);
-
- /* Start card functions */
- if (wv_cu_start(dev) < 0)
- return -1;
-
- /* Setup the controller and parameters */
- wv_82586_config(dev);
-
- /* Finish configuration with the receive unit */
- if (wv_ru_start(dev) < 0)
- return -1;
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: <-wv_hw_reset()\n", dev->name);
-#endif
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Check if there is a WaveLAN at the specific base address.
- * As a side effect, this reads the MAC address.
- * (called in wavelan_probe() and init_module())
- */
-static int wv_check_ioaddr(unsigned long ioaddr, u8 * mac)
-{
- int i; /* Loop counter */
-
- /* Check if the base address if available. */
- if (!request_region(ioaddr, sizeof(ha_t), "wavelan probe"))
- return -EBUSY; /* ioaddr already used */
-
- /* Reset host interface */
- wv_hacr_reset(ioaddr);
-
- /* Read the MAC address from the parameter storage area. */
- psa_read(ioaddr, HACR_DEFAULT, psaoff(0, psa_univ_mac_addr),
- mac, 6);
-
- release_region(ioaddr, sizeof(ha_t));
-
- /*
- * Check the first three octets of the address for the manufacturer's code.
- * Note: if this can't find your WaveLAN card, you've got a
- * non-NCR/AT&T/Lucent ISA card. See wavelan.p.h for detail on
- * how to configure your card.
- */
- for (i = 0; i < ARRAY_SIZE(MAC_ADDRESSES); i++)
- if ((mac[0] == MAC_ADDRESSES[i][0]) &&
- (mac[1] == MAC_ADDRESSES[i][1]) &&
- (mac[2] == MAC_ADDRESSES[i][2]))
- return 0;
-
-#ifdef DEBUG_CONFIG_INFO
- printk(KERN_WARNING
- "WaveLAN (0x%3X): your MAC address might be %02X:%02X:%02X.\n",
- ioaddr, mac[0], mac[1], mac[2]);
-#endif
- return -ENODEV;
-}
-
-/************************ INTERRUPT HANDLING ************************/
-
-/*
- * This function is the interrupt handler for the WaveLAN card. This
- * routine will be called whenever:
- */
-static irqreturn_t wavelan_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev;
- unsigned long ioaddr;
- net_local *lp;
- u16 hasr;
- u16 status;
- u16 ack_cmd;
-
- dev = dev_id;
-
-#ifdef DEBUG_INTERRUPT_TRACE
- printk(KERN_DEBUG "%s: ->wavelan_interrupt()\n", dev->name);
-#endif
-
- lp = netdev_priv(dev);
- ioaddr = dev->base_addr;
-
-#ifdef DEBUG_INTERRUPT_INFO
- /* Check state of our spinlock */
- if(spin_is_locked(&lp->spinlock))
- printk(KERN_DEBUG
- "%s: wavelan_interrupt(): spinlock is already locked !!!\n",
- dev->name);
-#endif
-
- /* Prevent reentrancy. We need to do that because we may have
- * multiple interrupt handler running concurrently.
- * It is safe because interrupts are disabled before acquiring
- * the spinlock. */
- spin_lock(&lp->spinlock);
-
- /* We always had spurious interrupts at startup, but lately I
- * saw them comming *between* the request_irq() and the
- * spin_lock_irqsave() in wavelan_open(), so the spinlock
- * protection is no enough.
- * So, we also check lp->hacr that will tell us is we enabled
- * irqs or not (see wv_ints_on()).
- * We can't use netif_running(dev) because we depend on the
- * proper processing of the irq generated during the config. */
-
- /* Which interrupt it is ? */
- hasr = hasr_read(ioaddr);
-
-#ifdef DEBUG_INTERRUPT_INFO
- printk(KERN_INFO
- "%s: wavelan_interrupt(): hasr 0x%04x; hacr 0x%04x.\n",
- dev->name, hasr, lp->hacr);
-#endif
-
- /* Check modem interrupt */
- if ((hasr & HASR_MMC_INTR) && (lp->hacr & HACR_MMC_INT_ENABLE)) {
- u8 dce_status;
-
- /*
- * Interrupt from the modem management controller.
- * This will clear it -- ignored for now.
- */
- mmc_read(ioaddr, mmroff(0, mmr_dce_status), &dce_status,
- sizeof(dce_status));
-
-#ifdef DEBUG_INTERRUPT_ERROR
- printk(KERN_INFO
- "%s: wavelan_interrupt(): unexpected mmc interrupt: status 0x%04x.\n",
- dev->name, dce_status);
-#endif
- }
-
- /* Check if not controller interrupt */
- if (((hasr & HASR_82586_INTR) == 0) ||
- ((lp->hacr & HACR_82586_INT_ENABLE) == 0)) {
-#ifdef DEBUG_INTERRUPT_ERROR
- printk(KERN_INFO
- "%s: wavelan_interrupt(): interrupt not coming from i82586 - hasr 0x%04x.\n",
- dev->name, hasr);
-#endif
- spin_unlock (&lp->spinlock);
- return IRQ_NONE;
- }
-
- /* Read interrupt data. */
- obram_read(ioaddr, scboff(OFFSET_SCB, scb_status),
- (unsigned char *) &status, sizeof(status));
-
- /*
- * Acknowledge the interrupt(s).
- */
- ack_cmd = status & SCB_ST_INT;
- obram_write(ioaddr, scboff(OFFSET_SCB, scb_command),
- (unsigned char *) &ack_cmd, sizeof(ack_cmd));
- set_chan_attn(ioaddr, lp->hacr);
-
-#ifdef DEBUG_INTERRUPT_INFO
- printk(KERN_DEBUG "%s: wavelan_interrupt(): status 0x%04x.\n",
- dev->name, status);
-#endif
-
- /* Command completed. */
- if ((status & SCB_ST_CX) == SCB_ST_CX) {
-#ifdef DEBUG_INTERRUPT_INFO
- printk(KERN_DEBUG
- "%s: wavelan_interrupt(): command completed.\n",
- dev->name);
-#endif
- wv_complete(dev, ioaddr, lp);
- }
-
- /* Frame received. */
- if ((status & SCB_ST_FR) == SCB_ST_FR) {
-#ifdef DEBUG_INTERRUPT_INFO
- printk(KERN_DEBUG
- "%s: wavelan_interrupt(): received packet.\n",
- dev->name);
-#endif
- wv_receive(dev);
- }
-
- /* Check the state of the command unit. */
- if (((status & SCB_ST_CNA) == SCB_ST_CNA) ||
- (((status & SCB_ST_CUS) != SCB_ST_CUS_ACTV) &&
- (netif_running(dev)))) {
-#ifdef DEBUG_INTERRUPT_ERROR
- printk(KERN_INFO
- "%s: wavelan_interrupt(): CU inactive -- restarting\n",
- dev->name);
-#endif
- wv_hw_reset(dev);
- }
-
- /* Check the state of the command unit. */
- if (((status & SCB_ST_RNR) == SCB_ST_RNR) ||
- (((status & SCB_ST_RUS) != SCB_ST_RUS_RDY) &&
- (netif_running(dev)))) {
-#ifdef DEBUG_INTERRUPT_ERROR
- printk(KERN_INFO
- "%s: wavelan_interrupt(): RU not ready -- restarting\n",
- dev->name);
-#endif
- wv_hw_reset(dev);
- }
-
- /* Release spinlock */
- spin_unlock (&lp->spinlock);
-
-#ifdef DEBUG_INTERRUPT_TRACE
- printk(KERN_DEBUG "%s: <-wavelan_interrupt()\n", dev->name);
-#endif
- return IRQ_HANDLED;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Watchdog: when we start a transmission, a timer is set for us in the
- * kernel. If the transmission completes, this timer is disabled. If
- * the timer expires, we are called and we try to unlock the hardware.
- */
-static void wavelan_watchdog(struct net_device * dev)
-{
- net_local *lp = netdev_priv(dev);
- u_long ioaddr = dev->base_addr;
- unsigned long flags;
- unsigned int nreaped;
-
-#ifdef DEBUG_INTERRUPT_TRACE
- printk(KERN_DEBUG "%s: ->wavelan_watchdog()\n", dev->name);
-#endif
-
-#ifdef DEBUG_INTERRUPT_ERROR
- printk(KERN_INFO "%s: wavelan_watchdog: watchdog timer expired\n",
- dev->name);
-#endif
-
- /* Check that we came here for something */
- if (lp->tx_n_in_use <= 0) {
- return;
- }
-
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* Try to see if some buffers are not free (in case we missed
- * an interrupt */
- nreaped = wv_complete(dev, ioaddr, lp);
-
-#ifdef DEBUG_INTERRUPT_INFO
- printk(KERN_DEBUG
- "%s: wavelan_watchdog(): %d reaped, %d remain.\n",
- dev->name, nreaped, lp->tx_n_in_use);
-#endif
-
-#ifdef DEBUG_PSA_SHOW
- {
- psa_t psa;
- psa_read(dev, 0, (unsigned char *) &psa, sizeof(psa));
- wv_psa_show(&psa);
- }
-#endif
-#ifdef DEBUG_MMC_SHOW
- wv_mmc_show(dev);
-#endif
-#ifdef DEBUG_I82586_SHOW
- wv_cu_show(dev);
-#endif
-
- /* If no buffer has been freed */
- if (nreaped == 0) {
-#ifdef DEBUG_INTERRUPT_ERROR
- printk(KERN_INFO
- "%s: wavelan_watchdog(): cleanup failed, trying reset\n",
- dev->name);
-#endif
- wv_hw_reset(dev);
- }
-
- /* At this point, we should have some free Tx buffer ;-) */
- if (lp->tx_n_in_use < NTXBLOCKS - 1)
- netif_wake_queue(dev);
-
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
-#ifdef DEBUG_INTERRUPT_TRACE
- printk(KERN_DEBUG "%s: <-wavelan_watchdog()\n", dev->name);
-#endif
-}
-
-/********************* CONFIGURATION CALLBACKS *********************/
-/*
- * Here are the functions called by the Linux networking code (NET3)
- * for initialization, configuration and deinstallations of the
- * WaveLAN ISA hardware.
- */
-
-/*------------------------------------------------------------------*/
-/*
- * Configure and start up the WaveLAN PCMCIA adaptor.
- * Called by NET3 when it "opens" the device.
- */
-static int wavelan_open(struct net_device * dev)
-{
- net_local *lp = netdev_priv(dev);
- unsigned long flags;
-
-#ifdef DEBUG_CALLBACK_TRACE
- printk(KERN_DEBUG "%s: ->wavelan_open(dev=0x%x)\n", dev->name,
- (unsigned int) dev);
-#endif
-
- /* Check irq */
- if (dev->irq == 0) {
-#ifdef DEBUG_CONFIG_ERROR
- printk(KERN_WARNING "%s: wavelan_open(): no IRQ\n",
- dev->name);
-#endif
- return -ENXIO;
- }
-
- if (request_irq(dev->irq, &wavelan_interrupt, 0, "WaveLAN", dev) != 0)
- {
-#ifdef DEBUG_CONFIG_ERROR
- printk(KERN_WARNING "%s: wavelan_open(): invalid IRQ\n",
- dev->name);
-#endif
- return -EAGAIN;
- }
-
- spin_lock_irqsave(&lp->spinlock, flags);
-
- if (wv_hw_reset(dev) != -1) {
- netif_start_queue(dev);
- } else {
- free_irq(dev->irq, dev);
-#ifdef DEBUG_CONFIG_ERROR
- printk(KERN_INFO
- "%s: wavelan_open(): impossible to start the card\n",
- dev->name);
-#endif
- spin_unlock_irqrestore(&lp->spinlock, flags);
- return -EAGAIN;
- }
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
-#ifdef DEBUG_CALLBACK_TRACE
- printk(KERN_DEBUG "%s: <-wavelan_open()\n", dev->name);
-#endif
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Shut down the WaveLAN ISA card.
- * Called by NET3 when it "closes" the device.
- */
-static int wavelan_close(struct net_device * dev)
-{
- net_local *lp = netdev_priv(dev);
- unsigned long flags;
-
-#ifdef DEBUG_CALLBACK_TRACE
- printk(KERN_DEBUG "%s: ->wavelan_close(dev=0x%x)\n", dev->name,
- (unsigned int) dev);
-#endif
-
- netif_stop_queue(dev);
-
- /*
- * Flush the Tx and disable Rx.
- */
- spin_lock_irqsave(&lp->spinlock, flags);
- wv_82586_stop(dev);
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- free_irq(dev->irq, dev);
-
-#ifdef DEBUG_CALLBACK_TRACE
- printk(KERN_DEBUG "%s: <-wavelan_close()\n", dev->name);
-#endif
- return 0;
-}
-
-static const struct net_device_ops wavelan_netdev_ops = {
- .ndo_open = wavelan_open,
- .ndo_stop = wavelan_close,
- .ndo_start_xmit = wavelan_packet_xmit,
- .ndo_set_multicast_list = wavelan_set_multicast_list,
- .ndo_tx_timeout = wavelan_watchdog,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_validate_addr = eth_validate_addr,
-#ifdef SET_MAC_ADDRESS
- .ndo_set_mac_address = wavelan_set_mac_address
-#else
- .ndo_set_mac_address = eth_mac_addr,
-#endif
-};
-
-
-/*------------------------------------------------------------------*/
-/*
- * Probe an I/O address, and if the WaveLAN is there configure the
- * device structure
- * (called by wavelan_probe() and via init_module()).
- */
-static int __init wavelan_config(struct net_device *dev, unsigned short ioaddr)
-{
- u8 irq_mask;
- int irq;
- net_local *lp;
- mac_addr mac;
- int err;
-
- if (!request_region(ioaddr, sizeof(ha_t), "wavelan"))
- return -EADDRINUSE;
-
- err = wv_check_ioaddr(ioaddr, mac);
- if (err)
- goto out;
-
- memcpy(dev->dev_addr, mac, 6);
-
- dev->base_addr = ioaddr;
-
-#ifdef DEBUG_CALLBACK_TRACE
- printk(KERN_DEBUG "%s: ->wavelan_config(dev=0x%x, ioaddr=0x%lx)\n",
- dev->name, (unsigned int) dev, ioaddr);
-#endif
-
- /* Check IRQ argument on command line. */
- if (dev->irq != 0) {
- irq_mask = wv_irq_to_psa(dev->irq);
-
- if (irq_mask == 0) {
-#ifdef DEBUG_CONFIG_ERROR
- printk(KERN_WARNING
- "%s: wavelan_config(): invalid IRQ %d ignored.\n",
- dev->name, dev->irq);
-#endif
- dev->irq = 0;
- } else {
-#ifdef DEBUG_CONFIG_INFO
- printk(KERN_DEBUG
- "%s: wavelan_config(): changing IRQ to %d\n",
- dev->name, dev->irq);
-#endif
- psa_write(ioaddr, HACR_DEFAULT,
- psaoff(0, psa_int_req_no), &irq_mask, 1);
- /* update the Wavelan checksum */
- update_psa_checksum(dev, ioaddr, HACR_DEFAULT);
- wv_hacr_reset(ioaddr);
- }
- }
-
- psa_read(ioaddr, HACR_DEFAULT, psaoff(0, psa_int_req_no),
- &irq_mask, 1);
- if ((irq = wv_psa_to_irq(irq_mask)) == -1) {
-#ifdef DEBUG_CONFIG_ERROR
- printk(KERN_INFO
- "%s: wavelan_config(): could not wavelan_map_irq(%d).\n",
- dev->name, irq_mask);
-#endif
- err = -EAGAIN;
- goto out;
- }
-
- dev->irq = irq;
-
- dev->mem_start = 0x0000;
- dev->mem_end = 0x0000;
- dev->if_port = 0;
-
- /* Initialize device structures */
- memset(netdev_priv(dev), 0, sizeof(net_local));
- lp = netdev_priv(dev);
-
- /* Back link to the device structure. */
- lp->dev = dev;
- /* Add the device at the beginning of the linked list. */
- lp->next = wavelan_list;
- wavelan_list = lp;
-
- lp->hacr = HACR_DEFAULT;
-
- /* Multicast stuff */
- lp->promiscuous = 0;
- lp->mc_count = 0;
-
- /* Init spinlock */
- spin_lock_init(&lp->spinlock);
-
- dev->netdev_ops = &wavelan_netdev_ops;
- dev->watchdog_timeo = WATCHDOG_JIFFIES;
- dev->wireless_handlers = &wavelan_handler_def;
- lp->wireless_data.spy_data = &lp->spy_data;
- dev->wireless_data = &lp->wireless_data;
-
- dev->mtu = WAVELAN_MTU;
-
- /* Display nice information. */
- wv_init_info(dev);
-
-#ifdef DEBUG_CALLBACK_TRACE
- printk(KERN_DEBUG "%s: <-wavelan_config()\n", dev->name);
-#endif
- return 0;
-out:
- release_region(ioaddr, sizeof(ha_t));
- return err;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Check for a network adaptor of this type. Return '0' iff one
- * exists. There seem to be different interpretations of
- * the initial value of dev->base_addr.
- * We follow the example in drivers/net/ne.c.
- * (called in "Space.c")
- */
-struct net_device * __init wavelan_probe(int unit)
-{
- struct net_device *dev;
- short base_addr;
- int def_irq;
- int i;
- int r = 0;
-
- /* compile-time check the sizes of structures */
- BUILD_BUG_ON(sizeof(psa_t) != PSA_SIZE);
- BUILD_BUG_ON(sizeof(mmw_t) != MMW_SIZE);
- BUILD_BUG_ON(sizeof(mmr_t) != MMR_SIZE);
- BUILD_BUG_ON(sizeof(ha_t) != HA_SIZE);
-
- dev = alloc_etherdev(sizeof(net_local));
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- base_addr = dev->base_addr;
- def_irq = dev->irq;
-
-#ifdef DEBUG_CALLBACK_TRACE
- printk(KERN_DEBUG
- "%s: ->wavelan_probe(dev=%p (base_addr=0x%x))\n",
- dev->name, dev, (unsigned int) dev->base_addr);
-#endif
-
- /* Don't probe at all. */
- if (base_addr < 0) {
-#ifdef DEBUG_CONFIG_ERROR
- printk(KERN_WARNING
- "%s: wavelan_probe(): invalid base address\n",
- dev->name);
-#endif
- r = -ENXIO;
- } else if (base_addr > 0x100) { /* Check a single specified location. */
- r = wavelan_config(dev, base_addr);
-#ifdef DEBUG_CONFIG_INFO
- if (r != 0)
- printk(KERN_DEBUG
- "%s: wavelan_probe(): no device at specified base address (0x%X) or address already in use\n",
- dev->name, base_addr);
-#endif
-
-#ifdef DEBUG_CALLBACK_TRACE
- printk(KERN_DEBUG "%s: <-wavelan_probe()\n", dev->name);
-#endif
- } else { /* Scan all possible addresses of the WaveLAN hardware. */
- for (i = 0; i < ARRAY_SIZE(iobase); i++) {
- dev->irq = def_irq;
- if (wavelan_config(dev, iobase[i]) == 0) {
-#ifdef DEBUG_CALLBACK_TRACE
- printk(KERN_DEBUG
- "%s: <-wavelan_probe()\n",
- dev->name);
-#endif
- break;
- }
- }
- if (i == ARRAY_SIZE(iobase))
- r = -ENODEV;
- }
- if (r)
- goto out;
- r = register_netdev(dev);
- if (r)
- goto out1;
- return dev;
-out1:
- release_region(dev->base_addr, sizeof(ha_t));
- wavelan_list = wavelan_list->next;
-out:
- free_netdev(dev);
- return ERR_PTR(r);
-}
-
-/****************************** MODULE ******************************/
-/*
- * Module entry point: insertion and removal
- */
-
-#ifdef MODULE
-/*------------------------------------------------------------------*/
-/*
- * Insertion of the module
- * I'm now quite proud of the multi-device support.
- */
-int __init init_module(void)
-{
- int ret = -EIO; /* Return error if no cards found */
- int i;
-
-#ifdef DEBUG_MODULE_TRACE
- printk(KERN_DEBUG "-> init_module()\n");
-#endif
-
- /* If probing is asked */
- if (io[0] == 0) {
-#ifdef DEBUG_CONFIG_ERROR
- printk(KERN_WARNING
- "WaveLAN init_module(): doing device probing (bad !)\n");
- printk(KERN_WARNING
- "Specify base addresses while loading module to correct the problem\n");
-#endif
-
- /* Copy the basic set of address to be probed. */
- for (i = 0; i < ARRAY_SIZE(iobase); i++)
- io[i] = iobase[i];
- }
-
-
- /* Loop on all possible base addresses. */
- for (i = 0; i < ARRAY_SIZE(io) && io[i] != 0; i++) {
- struct net_device *dev = alloc_etherdev(sizeof(net_local));
- if (!dev)
- break;
- if (name[i])
- strcpy(dev->name, name[i]); /* Copy name */
- dev->base_addr = io[i];
- dev->irq = irq[i];
-
- /* Check if there is something at this base address. */
- if (wavelan_config(dev, io[i]) == 0) {
- if (register_netdev(dev) != 0) {
- release_region(dev->base_addr, sizeof(ha_t));
- wavelan_list = wavelan_list->next;
- } else {
- ret = 0;
- continue;
- }
- }
- free_netdev(dev);
- }
-
-#ifdef DEBUG_CONFIG_ERROR
- if (!wavelan_list)
- printk(KERN_WARNING
- "WaveLAN init_module(): no device found\n");
-#endif
-
-#ifdef DEBUG_MODULE_TRACE
- printk(KERN_DEBUG "<- init_module()\n");
-#endif
- return ret;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Removal of the module
- */
-void cleanup_module(void)
-{
-#ifdef DEBUG_MODULE_TRACE
- printk(KERN_DEBUG "-> cleanup_module()\n");
-#endif
-
- /* Loop on all devices and release them. */
- while (wavelan_list) {
- struct net_device *dev = wavelan_list->dev;
-
-#ifdef DEBUG_CONFIG_INFO
- printk(KERN_DEBUG
- "%s: cleanup_module(): removing device at 0x%x\n",
- dev->name, (unsigned int) dev);
-#endif
- unregister_netdev(dev);
-
- release_region(dev->base_addr, sizeof(ha_t));
- wavelan_list = wavelan_list->next;
-
- free_netdev(dev);
- }
-
-#ifdef DEBUG_MODULE_TRACE
- printk(KERN_DEBUG "<- cleanup_module()\n");
-#endif
-}
-#endif /* MODULE */
-MODULE_LICENSE("GPL");
-
-/*
- * This software may only be used and distributed
- * according to the terms of the GNU General Public License.
- *
- * This software was developed as a component of the
- * Linux operating system.
- * It is based on other device drivers and information
- * either written or supplied by:
- * Ajay Bakre (bakre@paul.rutgers.edu),
- * Donald Becker (becker@scyld.com),
- * Loeke Brederveld (Loeke.Brederveld@Utrecht.NCR.com),
- * Anders Klemets (klemets@it.kth.se),
- * Vladimir V. Kolpakov (w@stier.koenig.ru),
- * Marc Meertens (Marc.Meertens@Utrecht.NCR.com),
- * Pauline Middelink (middelin@polyware.iaf.nl),
- * Robert Morris (rtm@das.harvard.edu),
- * Jean Tourrilhes (jt@hplb.hpl.hp.com),
- * Girish Welling (welling@paul.rutgers.edu),
- *
- * Thanks go also to:
- * James Ashton (jaa101@syseng.anu.edu.au),
- * Alan Cox (alan@lxorguk.ukuu.org.uk),
- * Allan Creighton (allanc@cs.usyd.edu.au),
- * Matthew Geier (matthew@cs.usyd.edu.au),
- * Remo di Giovanni (remo@cs.usyd.edu.au),
- * Eckhard Grah (grah@wrcs1.urz.uni-wuppertal.de),
- * Vipul Gupta (vgupta@cs.binghamton.edu),
- * Mark Hagan (mhagan@wtcpost.daytonoh.NCR.COM),
- * Tim Nicholson (tim@cs.usyd.edu.au),
- * Ian Parkin (ian@cs.usyd.edu.au),
- * John Rosenberg (johnr@cs.usyd.edu.au),
- * George Rossi (george@phm.gov.au),
- * Arthur Scott (arthur@cs.usyd.edu.au),
- * Peter Storey,
- * for their assistance and advice.
- *
- * Please send bug reports, updates, comments to:
- *
- * Bruce Janson Email: bruce@cs.usyd.edu.au
- * Basser Department of Computer Science Phone: +61-2-9351-3423
- * University of Sydney, N.S.W., 2006, AUSTRALIA Fax: +61-2-9351-3838
- */
diff --git a/drivers/staging/wavelan/wavelan.h b/drivers/staging/wavelan/wavelan.h
deleted file mode 100644
index 9ab360558ff..00000000000
--- a/drivers/staging/wavelan/wavelan.h
+++ /dev/null
@@ -1,370 +0,0 @@
-/*
- * WaveLAN ISA driver
- *
- * Jean II - HPLB '96
- *
- * Reorganisation and extension of the driver.
- * Original copyright follows. See wavelan.p.h for details.
- *
- * This file contains the declarations for the WaveLAN hardware. Note that
- * the WaveLAN ISA includes a i82586 controller (see definitions in
- * file i82586.h).
- *
- * The main difference between the ISA hardware and the PCMCIA one is
- * the Ethernet controller (i82586 instead of i82593).
- * The i82586 allows multiple transmit buffers. The PSA needs to be accessed
- * through the host interface.
- */
-
-#ifndef _WAVELAN_H
-#define _WAVELAN_H
-
-/************************** MAGIC NUMBERS ***************************/
-
-/* Detection of the WaveLAN card is done by reading the MAC
- * address from the card and checking it. If you have a non-AT&T
- * product (OEM, like DEC RoamAbout, Digital Ocean, or Epson),
- * you might need to modify this part to accommodate your hardware.
- */
-static const char MAC_ADDRESSES[][3] =
-{
- { 0x08, 0x00, 0x0E }, /* AT&T WaveLAN (standard) & DEC RoamAbout */
- { 0x08, 0x00, 0x6A }, /* AT&T WaveLAN (alternate) */
- { 0x00, 0x00, 0xE1 }, /* Hitachi Wavelan */
- { 0x00, 0x60, 0x1D } /* Lucent Wavelan (another one) */
- /* Add your card here and send me the patch! */
-};
-
-#define WAVELAN_ADDR_SIZE 6 /* Size of a MAC address */
-
-#define WAVELAN_MTU 1500 /* Maximum size of WaveLAN packet */
-
-#define MAXDATAZ (WAVELAN_ADDR_SIZE + WAVELAN_ADDR_SIZE + 2 + WAVELAN_MTU)
-
-/*
- * Constants used to convert channels to frequencies
- */
-
-/* Frequency available in the 2.0 modem, in units of 250 kHz
- * (as read in the offset register of the dac area).
- * Used to map channel numbers used by `wfreqsel' to frequencies
- */
-static const short channel_bands[] = { 0x30, 0x58, 0x64, 0x7A, 0x80, 0xA8,
- 0xD0, 0xF0, 0xF8, 0x150 };
-
-/* Frequencies of the 1.0 modem (fixed frequencies).
- * Use to map the PSA `subband' to a frequency
- * Note : all frequencies apart from the first one need to be multiplied by 10
- */
-static const int fixed_bands[] = { 915e6, 2.425e8, 2.46e8, 2.484e8, 2.4305e8 };
-
-
-
-/*************************** PC INTERFACE ****************************/
-
-/*
- * Host Adaptor structure.
- * (base is board port address).
- */
-typedef union hacs_u hacs_u;
-union hacs_u
-{
- unsigned short hu_command; /* Command register */
-#define HACR_RESET 0x0001 /* Reset board */
-#define HACR_CA 0x0002 /* Set Channel Attention for 82586 */
-#define HACR_16BITS 0x0004 /* 16-bit operation (0 => 8bits) */
-#define HACR_OUT0 0x0008 /* General purpose output pin 0 */
- /* not used - must be 1 */
-#define HACR_OUT1 0x0010 /* General purpose output pin 1 */
- /* not used - must be 1 */
-#define HACR_82586_INT_ENABLE 0x0020 /* Enable 82586 interrupts */
-#define HACR_MMC_INT_ENABLE 0x0040 /* Enable MMC interrupts */
-#define HACR_INTR_CLR_ENABLE 0x0080 /* Enable interrupt status read/clear */
- unsigned short hu_status; /* Status Register */
-#define HASR_82586_INTR 0x0001 /* Interrupt request from 82586 */
-#define HASR_MMC_INTR 0x0002 /* Interrupt request from MMC */
-#define HASR_MMC_BUSY 0x0004 /* MMC busy indication */
-#define HASR_PSA_BUSY 0x0008 /* LAN parameter storage area busy */
-} __attribute__ ((packed));
-
-typedef struct ha_t ha_t;
-struct ha_t
-{
- hacs_u ha_cs; /* Command and status registers */
-#define ha_command ha_cs.hu_command
-#define ha_status ha_cs.hu_status
- unsigned short ha_mmcr; /* Modem Management Ctrl Register */
- unsigned short ha_pior0; /* Program I/O Address Register Port 0 */
- unsigned short ha_piop0; /* Program I/O Port 0 */
- unsigned short ha_pior1; /* Program I/O Address Register Port 1 */
- unsigned short ha_piop1; /* Program I/O Port 1 */
- unsigned short ha_pior2; /* Program I/O Address Register Port 2 */
- unsigned short ha_piop2; /* Program I/O Port 2 */
-};
-
-#define HA_SIZE 16
-
-#define hoff(p,f) (unsigned short)((void *)(&((ha_t *)((void *)0 + (p)))->f) - (void *)0)
-#define HACR(p) hoff(p, ha_command)
-#define HASR(p) hoff(p, ha_status)
-#define MMCR(p) hoff(p, ha_mmcr)
-#define PIOR0(p) hoff(p, ha_pior0)
-#define PIOP0(p) hoff(p, ha_piop0)
-#define PIOR1(p) hoff(p, ha_pior1)
-#define PIOP1(p) hoff(p, ha_piop1)
-#define PIOR2(p) hoff(p, ha_pior2)
-#define PIOP2(p) hoff(p, ha_piop2)
-
-/*
- * Program I/O Mode Register values.
- */
-#define STATIC_PIO 0 /* Mode 1: static mode */
- /* RAM access ??? */
-#define AUTOINCR_PIO 1 /* Mode 2: auto increment mode */
- /* RAM access ??? */
-#define AUTODECR_PIO 2 /* Mode 3: auto decrement mode */
- /* RAM access ??? */
-#define PARAM_ACCESS_PIO 3 /* Mode 4: LAN parameter access mode */
- /* Parameter access. */
-#define PIO_MASK 3 /* register mask */
-#define PIOM(cmd,piono) ((u_short)cmd << 10 << (piono * 2))
-
-#define HACR_DEFAULT (HACR_OUT0 | HACR_OUT1 | HACR_16BITS | PIOM(STATIC_PIO, 0) | PIOM(AUTOINCR_PIO, 1) | PIOM(PARAM_ACCESS_PIO, 2))
-#define HACR_INTRON (HACR_82586_INT_ENABLE | HACR_MMC_INT_ENABLE | HACR_INTR_CLR_ENABLE)
-
-/************************** MEMORY LAYOUT **************************/
-
-/*
- * Onboard 64 k RAM layout.
- * (Offsets from 0x0000.)
- */
-#define OFFSET_RU 0x0000 /* 75% memory */
-#define OFFSET_CU 0xC000 /* 25% memory */
-#define OFFSET_SCB (OFFSET_ISCP - sizeof(scb_t))
-#define OFFSET_ISCP (OFFSET_SCP - sizeof(iscp_t))
-#define OFFSET_SCP I82586_SCP_ADDR
-
-#define RXBLOCKZ (sizeof(fd_t) + sizeof(rbd_t) + MAXDATAZ)
-#define TXBLOCKZ (sizeof(ac_tx_t) + sizeof(ac_nop_t) + sizeof(tbd_t) + MAXDATAZ)
-
-#define NRXBLOCKS ((OFFSET_CU - OFFSET_RU) / RXBLOCKZ)
-#define NTXBLOCKS ((OFFSET_SCB - OFFSET_CU) / TXBLOCKZ)
-
-/********************** PARAMETER STORAGE AREA **********************/
-
-/*
- * Parameter Storage Area (PSA).
- */
-typedef struct psa_t psa_t;
-struct psa_t
-{
- unsigned char psa_io_base_addr_1; /* [0x00] Base address 1 ??? */
- unsigned char psa_io_base_addr_2; /* [0x01] Base address 2 */
- unsigned char psa_io_base_addr_3; /* [0x02] Base address 3 */
- unsigned char psa_io_base_addr_4; /* [0x03] Base address 4 */
- unsigned char psa_rem_boot_addr_1; /* [0x04] Remote Boot Address 1 */
- unsigned char psa_rem_boot_addr_2; /* [0x05] Remote Boot Address 2 */
- unsigned char psa_rem_boot_addr_3; /* [0x06] Remote Boot Address 3 */
- unsigned char psa_holi_params; /* [0x07] HOst Lan Interface (HOLI) Parameters */
- unsigned char psa_int_req_no; /* [0x08] Interrupt Request Line */
- unsigned char psa_unused0[7]; /* [0x09-0x0F] unused */
-
- unsigned char psa_univ_mac_addr[WAVELAN_ADDR_SIZE]; /* [0x10-0x15] Universal (factory) MAC Address */
- unsigned char psa_local_mac_addr[WAVELAN_ADDR_SIZE]; /* [0x16-1B] Local MAC Address */
- unsigned char psa_univ_local_sel; /* [0x1C] Universal Local Selection */
-#define PSA_UNIVERSAL 0 /* Universal (factory) */
-#define PSA_LOCAL 1 /* Local */
- unsigned char psa_comp_number; /* [0x1D] Compatibility Number: */
-#define PSA_COMP_PC_AT_915 0 /* PC-AT 915 MHz */
-#define PSA_COMP_PC_MC_915 1 /* PC-MC 915 MHz */
-#define PSA_COMP_PC_AT_2400 2 /* PC-AT 2.4 GHz */
-#define PSA_COMP_PC_MC_2400 3 /* PC-MC 2.4 GHz */
-#define PSA_COMP_PCMCIA_915 4 /* PCMCIA 915 MHz or 2.0 */
- unsigned char psa_thr_pre_set; /* [0x1E] Modem Threshold Preset */
- unsigned char psa_feature_select; /* [0x1F] Call code required (1=on) */
-#define PSA_FEATURE_CALL_CODE 0x01 /* Call code required (Japan) */
- unsigned char psa_subband; /* [0x20] Subband */
-#define PSA_SUBBAND_915 0 /* 915 MHz or 2.0 */
-#define PSA_SUBBAND_2425 1 /* 2425 MHz */
-#define PSA_SUBBAND_2460 2 /* 2460 MHz */
-#define PSA_SUBBAND_2484 3 /* 2484 MHz */
-#define PSA_SUBBAND_2430_5 4 /* 2430.5 MHz */
- unsigned char psa_quality_thr; /* [0x21] Modem Quality Threshold */
- unsigned char psa_mod_delay; /* [0x22] Modem Delay (?) (reserved) */
- unsigned char psa_nwid[2]; /* [0x23-0x24] Network ID */
- unsigned char psa_nwid_select; /* [0x25] Network ID Select On/Off */
- unsigned char psa_encryption_select; /* [0x26] Encryption On/Off */
- unsigned char psa_encryption_key[8]; /* [0x27-0x2E] Encryption Key */
- unsigned char psa_databus_width; /* [0x2F] AT bus width select 8/16 */
- unsigned char psa_call_code[8]; /* [0x30-0x37] (Japan) Call Code */
- unsigned char psa_nwid_prefix[2]; /* [0x38-0x39] Roaming domain */
- unsigned char psa_reserved[2]; /* [0x3A-0x3B] Reserved - fixed 00 */
- unsigned char psa_conf_status; /* [0x3C] Conf Status, bit 0=1:config*/
- unsigned char psa_crc[2]; /* [0x3D] CRC-16 over PSA */
- unsigned char psa_crc_status; /* [0x3F] CRC Valid Flag */
-};
-
-#define PSA_SIZE 64
-
-/* Calculate offset of a field in the above structure.
- * Warning: only even addresses are used. */
-#define psaoff(p,f) ((unsigned short) ((void *)(&((psa_t *) ((void *) NULL + (p)))->f) - (void *) NULL))
-
-/******************** MODEM MANAGEMENT INTERFACE ********************/
-
-/*
- * Modem Management Controller (MMC) write structure.
- */
-typedef struct mmw_t mmw_t;
-struct mmw_t
-{
- unsigned char mmw_encr_key[8]; /* encryption key */
- unsigned char mmw_encr_enable; /* Enable or disable encryption. */
-#define MMW_ENCR_ENABLE_MODE 0x02 /* mode of security option */
-#define MMW_ENCR_ENABLE_EN 0x01 /* Enable security option. */
- unsigned char mmw_unused0[1]; /* unused */
- unsigned char mmw_des_io_invert; /* encryption option */
-#define MMW_DES_IO_INVERT_RES 0x0F /* reserved */
-#define MMW_DES_IO_INVERT_CTRL 0xF0 /* control (?) (set to 0) */
- unsigned char mmw_unused1[5]; /* unused */
- unsigned char mmw_loopt_sel; /* looptest selection */
-#define MMW_LOOPT_SEL_DIS_NWID 0x40 /* Disable NWID filtering. */
-#define MMW_LOOPT_SEL_INT 0x20 /* Activate Attention Request. */
-#define MMW_LOOPT_SEL_LS 0x10 /* looptest, no collision avoidance */
-#define MMW_LOOPT_SEL_LT3A 0x08 /* looptest 3a */
-#define MMW_LOOPT_SEL_LT3B 0x04 /* looptest 3b */
-#define MMW_LOOPT_SEL_LT3C 0x02 /* looptest 3c */
-#define MMW_LOOPT_SEL_LT3D 0x01 /* looptest 3d */
- unsigned char mmw_jabber_enable; /* jabber timer enable */
- /* Abort transmissions > 200 ms */
- unsigned char mmw_freeze; /* freeze or unfreeze signal level */
- /* 0 : signal level & qual updated for every new message, 1 : frozen */
- unsigned char mmw_anten_sel; /* antenna selection */
-#define MMW_ANTEN_SEL_SEL 0x01 /* direct antenna selection */
-#define MMW_ANTEN_SEL_ALG_EN 0x02 /* antenna selection algo. enable */
- unsigned char mmw_ifs; /* inter frame spacing */
- /* min time between transmission in bit periods (.5 us) - bit 0 ignored */
- unsigned char mmw_mod_delay; /* modem delay (synchro) */
- unsigned char mmw_jam_time; /* jamming time (after collision) */
- unsigned char mmw_unused2[1]; /* unused */
- unsigned char mmw_thr_pre_set; /* level threshold preset */
- /* Discard all packet with signal < this value (4) */
- unsigned char mmw_decay_prm; /* decay parameters */
- unsigned char mmw_decay_updat_prm; /* decay update parameters */
- unsigned char mmw_quality_thr; /* quality (z-quotient) threshold */
- /* Discard all packet with quality < this value (3) */
- unsigned char mmw_netw_id_l; /* NWID low order byte */
- unsigned char mmw_netw_id_h; /* NWID high order byte */
- /* Network ID or Domain : create virtual net on the air */
-
- /* 2.0 Hardware extension - frequency selection support */
- unsigned char mmw_mode_select; /* for analog tests (set to 0) */
- unsigned char mmw_unused3[1]; /* unused */
- unsigned char mmw_fee_ctrl; /* frequency EEPROM control */
-#define MMW_FEE_CTRL_PRE 0x10 /* Enable protected instructions. */
-#define MMW_FEE_CTRL_DWLD 0x08 /* Download EEPROM to mmc. */
-#define MMW_FEE_CTRL_CMD 0x07 /* EEPROM commands: */
-#define MMW_FEE_CTRL_READ 0x06 /* Read */
-#define MMW_FEE_CTRL_WREN 0x04 /* Write enable */
-#define MMW_FEE_CTRL_WRITE 0x05 /* Write data to address. */
-#define MMW_FEE_CTRL_WRALL 0x04 /* Write data to all addresses. */
-#define MMW_FEE_CTRL_WDS 0x04 /* Write disable */
-#define MMW_FEE_CTRL_PRREAD 0x16 /* Read addr from protect register */
-#define MMW_FEE_CTRL_PREN 0x14 /* Protect register enable */
-#define MMW_FEE_CTRL_PRCLEAR 0x17 /* Unprotect all registers. */
-#define MMW_FEE_CTRL_PRWRITE 0x15 /* Write address in protect register */
-#define MMW_FEE_CTRL_PRDS 0x14 /* Protect register disable */
- /* Never issue the PRDS command: it's irreversible! */
-
- unsigned char mmw_fee_addr; /* EEPROM address */
-#define MMW_FEE_ADDR_CHANNEL 0xF0 /* Select the channel. */
-#define MMW_FEE_ADDR_OFFSET 0x0F /* Offset in channel data */
-#define MMW_FEE_ADDR_EN 0xC0 /* FEE_CTRL enable operations */
-#define MMW_FEE_ADDR_DS 0x00 /* FEE_CTRL disable operations */
-#define MMW_FEE_ADDR_ALL 0x40 /* FEE_CTRL all operations */
-#define MMW_FEE_ADDR_CLEAR 0xFF /* FEE_CTRL clear operations */
-
- unsigned char mmw_fee_data_l; /* Write data to EEPROM. */
- unsigned char mmw_fee_data_h; /* high octet */
- unsigned char mmw_ext_ant; /* Setting for external antenna */
-#define MMW_EXT_ANT_EXTANT 0x01 /* Select external antenna */
-#define MMW_EXT_ANT_POL 0x02 /* Polarity of the antenna */
-#define MMW_EXT_ANT_INTERNAL 0x00 /* Internal antenna */
-#define MMW_EXT_ANT_EXTERNAL 0x03 /* External antenna */
-#define MMW_EXT_ANT_IQ_TEST 0x1C /* IQ test pattern (set to 0) */
-} __attribute__ ((packed));
-
-#define MMW_SIZE 37
-
-#define mmwoff(p,f) (unsigned short)((void *)(&((mmw_t *)((void *)0 + (p)))->f) - (void *)0)
-
-/*
- * Modem Management Controller (MMC) read structure.
- */
-typedef struct mmr_t mmr_t;
-struct mmr_t
-{
- unsigned char mmr_unused0[8]; /* unused */
- unsigned char mmr_des_status; /* encryption status */
- unsigned char mmr_des_avail; /* encryption available (0x55 read) */
-#define MMR_DES_AVAIL_DES 0x55 /* DES available */
-#define MMR_DES_AVAIL_AES 0x33 /* AES (AT&T) available */
- unsigned char mmr_des_io_invert; /* des I/O invert register */
- unsigned char mmr_unused1[5]; /* unused */
- unsigned char mmr_dce_status; /* DCE status */
-#define MMR_DCE_STATUS_RX_BUSY 0x01 /* receiver busy */
-#define MMR_DCE_STATUS_LOOPT_IND 0x02 /* loop test indicated */
-#define MMR_DCE_STATUS_TX_BUSY 0x04 /* transmitter on */
-#define MMR_DCE_STATUS_JBR_EXPIRED 0x08 /* jabber timer expired */
-#define MMR_DCE_STATUS 0x0F /* mask to get the bits */
- unsigned char mmr_dsp_id; /* DSP ID (AA = Daedalus rev A) */
- unsigned char mmr_unused2[2]; /* unused */
- unsigned char mmr_correct_nwid_l; /* # of correct NWIDs rxd (low) */
- unsigned char mmr_correct_nwid_h; /* # of correct NWIDs rxd (high) */
- /* Warning: read high-order octet first! */
- unsigned char mmr_wrong_nwid_l; /* # of wrong NWIDs rxd (low) */
- unsigned char mmr_wrong_nwid_h; /* # of wrong NWIDs rxd (high) */
- unsigned char mmr_thr_pre_set; /* level threshold preset */
-#define MMR_THR_PRE_SET 0x3F /* level threshold preset */
-#define MMR_THR_PRE_SET_CUR 0x80 /* Current signal above it */
- unsigned char mmr_signal_lvl; /* signal level */
-#define MMR_SIGNAL_LVL 0x3F /* signal level */
-#define MMR_SIGNAL_LVL_VALID 0x80 /* Updated since last read */
- unsigned char mmr_silence_lvl; /* silence level (noise) */
-#define MMR_SILENCE_LVL 0x3F /* silence level */
-#define MMR_SILENCE_LVL_VALID 0x80 /* Updated since last read */
- unsigned char mmr_sgnl_qual; /* signal quality */
-#define MMR_SGNL_QUAL 0x0F /* signal quality */
-#define MMR_SGNL_QUAL_ANT 0x80 /* current antenna used */
- unsigned char mmr_netw_id_l; /* NWID low order byte (?) */
- unsigned char mmr_unused3[3]; /* unused */
-
- /* 2.0 Hardware extension - frequency selection support */
- unsigned char mmr_fee_status; /* Status of frequency EEPROM */
-#define MMR_FEE_STATUS_ID 0xF0 /* Modem revision ID */
-#define MMR_FEE_STATUS_DWLD 0x08 /* Download in progress */
-#define MMR_FEE_STATUS_BUSY 0x04 /* EEPROM busy */
- unsigned char mmr_unused4[1]; /* unused */
- unsigned char mmr_fee_data_l; /* Read data from EEPROM (low) */
- unsigned char mmr_fee_data_h; /* Read data from EEPROM (high) */
-} __attribute__ ((packed));
-
-#define MMR_SIZE 36
-
-#define mmroff(p,f) (unsigned short)((void *)(&((mmr_t *)((void *)0 + (p)))->f) - (void *)0)
-
-/* Make the two above structures one */
-typedef union mm_t
-{
- struct mmw_t w; /* Write to the mmc */
- struct mmr_t r; /* Read from the mmc */
-} mm_t;
-
-#endif /* _WAVELAN_H */
-
-/*
- * This software may only be used and distributed
- * according to the terms of the GNU General Public License.
- *
- * For more details, see wavelan.c.
- */
diff --git a/drivers/staging/wavelan/wavelan.p.h b/drivers/staging/wavelan/wavelan.p.h
deleted file mode 100644
index dbe8de6e5f5..00000000000
--- a/drivers/staging/wavelan/wavelan.p.h
+++ /dev/null
@@ -1,696 +0,0 @@
-/*
- * WaveLAN ISA driver
- *
- * Jean II - HPLB '96
- *
- * Reorganisation and extension of the driver.
- *
- * This file contains all definitions and declarations necessary for the
- * WaveLAN ISA driver. This file is a private header, so it should
- * be included only in wavelan.c!
- */
-
-#ifndef WAVELAN_P_H
-#define WAVELAN_P_H
-
-/************************** DOCUMENTATION ***************************/
-/*
- * This driver provides a Linux interface to the WaveLAN ISA hardware.
- * The WaveLAN is a product of Lucent (http://www.wavelan.com/).
- * This division was formerly part of NCR and then AT&T.
- * WaveLANs are also distributed by DEC (RoamAbout DS) and Digital Ocean.
- *
- * To learn how to use this driver, read the NET3 HOWTO.
- * If you want to exploit the many other functionalities, read the comments
- * in the code.
- *
- * This driver is the result of the effort of many people (see below).
- */
-
-/* ------------------------ SPECIFIC NOTES ------------------------ */
-/*
- * Web page
- * --------
- * I try to maintain a web page with the Wireless LAN Howto at :
- * http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Wavelan.html
- *
- * SMP
- * ---
- * We now are SMP compliant (I eventually fixed the remaining bugs).
- * The driver has been tested on a dual P6-150 and survived my usual
- * set of torture tests.
- * Anyway, I spent enough time chasing interrupt re-entrancy during
- * errors or reconfigure, and I designed the locked/unlocked sections
- * of the driver with great care, and with the recent addition of
- * the spinlock (thanks to the new API), we should be quite close to
- * the truth.
- * The SMP/IRQ locking is quite coarse and conservative (i.e. not fast),
- * but better safe than sorry (especially at 2 Mb/s ;-).
- *
- * I have also looked into disabling only our interrupt on the card
- * (via HACR) instead of all interrupts in the processor (via cli),
- * so that other driver are not impacted, and it look like it's
- * possible, but it's very tricky to do right (full of races). As
- * the gain would be mostly for SMP systems, it can wait...
- *
- * Debugging and options
- * ---------------------
- * You will find below a set of '#define" allowing a very fine control
- * on the driver behaviour and the debug messages printed.
- * The main options are :
- * o SET_PSA_CRC, to have your card correctly recognised by
- * an access point and the Point-to-Point diagnostic tool.
- * o USE_PSA_CONFIG, to read configuration from the PSA (EEprom)
- * (otherwise we always start afresh with some defaults)
- *
- * wavelan.o is too darned big
- * ---------------------------
- * That's true! There is a very simple way to reduce the driver
- * object by 33%! Comment out the following line:
- * #include <linux/wireless.h>
- * Other compile options can also reduce the size of it...
- *
- * MAC address and hardware detection:
- * -----------------------------------
- * The detection code for the WaveLAN checks that the first three
- * octets of the MAC address fit the company code. This type of
- * detection works well for AT&T cards (because the AT&T code is
- * hardcoded in wavelan.h), but of course will fail for other
- * manufacturers.
- *
- * If you are sure that your card is derived from the WaveLAN,
- * here is the way to configure it:
- * 1) Get your MAC address
- * a) With your card utilities (wfreqsel, instconf, etc.)
- * b) With the driver:
- * o compile the kernel with DEBUG_CONFIG_INFO enabled
- * o Boot and look the card messages
- * 2) Set your MAC code (3 octets) in MAC_ADDRESSES[][3] (wavelan.h)
- * 3) Compile and verify
- * 4) Send me the MAC code. I will include it in the next version.
- *
- */
-
-/* --------------------- WIRELESS EXTENSIONS --------------------- */
-/*
- * This driver is the first to support "wireless extensions".
- * This set of extensions provides a standard way to control the wireless
- * characteristics of the hardware. Applications such as mobile IP may
- * take advantage of it.
- *
- * It might be a good idea as well to fetch the wireless tools to
- * configure the device and play a bit.
- */
-
-/* ---------------------------- FILES ---------------------------- */
-/*
- * wavelan.c: actual code for the driver: C functions
- *
- * wavelan.p.h: private header: local types and variables for driver
- *
- * wavelan.h: description of the hardware interface and structs
- *
- * i82586.h: description of the Ethernet controller
- */
-
-/* --------------------------- HISTORY --------------------------- */
-/*
- * This is based on information in the drivers' headers. It may not be
- * accurate, and I guarantee only my best effort.
- *
- * The history of the WaveLAN drivers is as complicated as the history of
- * the WaveLAN itself (NCR -> AT&T -> Lucent).
- *
- * It all started with Anders Klemets <klemets@paul.rutgers.edu>
- * writing a WaveLAN ISA driver for the Mach microkernel. Girish
- * Welling <welling@paul.rutgers.edu> had also worked on it.
- * Keith Moore modified this for the PCMCIA hardware.
- *
- * Robert Morris <rtm@das.harvard.edu> ported these two drivers to BSDI
- * and added specific PCMCIA support (there is currently no equivalent
- * of the PCMCIA package under BSD).
- *
- * Jim Binkley <jrb@cs.pdx.edu> ported both BSDI drivers to FreeBSD.
- *
- * Bruce Janson <bruce@cs.usyd.edu.au> ported the BSDI ISA driver to Linux.
- *
- * Anthony D. Joseph <adj@lcs.mit.edu> started to modify Bruce's driver
- * (with help of the BSDI PCMCIA driver) for PCMCIA.
- * Yunzhou Li <yunzhou@strat.iol.unh.edu> finished this work.
- * Joe Finney <joe@comp.lancs.ac.uk> patched the driver to start
- * 2.00 cards correctly (2.4 GHz with frequency selection).
- * David Hinds <dahinds@users.sourceforge.net> integrated the whole in his
- * PCMCIA package (and bug corrections).
- *
- * I (Jean Tourrilhes - jt@hplb.hpl.hp.com) then started to make some
- * patches to the PCMCIA driver. Later, I added code in the ISA driver
- * for Wireless Extensions and full support of frequency selection
- * cards. Then, I did the same to the PCMCIA driver, and did some
- * reorganisation. Finally, I came back to the ISA driver to
- * upgrade it at the same level as the PCMCIA one and reorganise
- * the code.
- * Loeke Brederveld <lbrederv@wavelan.com> from Lucent has given me
- * much needed information on the WaveLAN hardware.
- */
-
-/* The original copyrights and literature mention others' names and
- * credits. I don't know what their part in this development was.
- */
-
-/* By the way, for the copyright and legal stuff:
- * almost everybody wrote code under the GNU or BSD license (or similar),
- * and want their original copyright to remain somewhere in the
- * code (for myself, I go with the GPL).
- * Nobody wants to take responsibility for anything, except the fame.
- */
-
-/* --------------------------- CREDITS --------------------------- */
-/*
- * This software was developed as a component of the
- * Linux operating system.
- * It is based on other device drivers and information
- * either written or supplied by:
- * Ajay Bakre <bakre@paul.rutgers.edu>,
- * Donald Becker <becker@cesdis.gsfc.nasa.gov>,
- * Loeke Brederveld <Loeke.Brederveld@Utrecht.NCR.com>,
- * Brent Elphick <belphick@uwaterloo.ca>,
- * Anders Klemets <klemets@it.kth.se>,
- * Vladimir V. Kolpakov <w@stier.koenig.ru>,
- * Marc Meertens <Marc.Meertens@Utrecht.NCR.com>,
- * Pauline Middelink <middelin@polyware.iaf.nl>,
- * Robert Morris <rtm@das.harvard.edu>,
- * Jean Tourrilhes <jt@hpl.hp.com>,
- * Girish Welling <welling@paul.rutgers.edu>,
- * Clark Woodworth <clark@hiway1.exit109.com>
- * Yongguang Zhang <ygz@isl.hrl.hac.com>
- *
- * Thanks go also to:
- * James Ashton <jaa101@syseng.anu.edu.au>,
- * Alan Cox <alan@lxorguk.ukuu.org.uk>,
- * Allan Creighton <allanc@cs.usyd.edu.au>,
- * Matthew Geier <matthew@cs.usyd.edu.au>,
- * Remo di Giovanni <remo@cs.usyd.edu.au>,
- * Eckhard Grah <grah@wrcs1.urz.uni-wuppertal.de>,
- * Vipul Gupta <vgupta@cs.binghamton.edu>,
- * Mark Hagan <mhagan@wtcpost.daytonoh.NCR.COM>,
- * Tim Nicholson <tim@cs.usyd.edu.au>,
- * Ian Parkin <ian@cs.usyd.edu.au>,
- * John Rosenberg <johnr@cs.usyd.edu.au>,
- * George Rossi <george@phm.gov.au>,
- * Arthur Scott <arthur@cs.usyd.edu.au>,
- * Stanislav Sinyagin <stas@isf.ru>
- * and Peter Storey for their assistance and advice.
- *
- * Additional Credits:
- *
- * My development has been done initially under Debian 1.1 (Linux 2.0.x)
- * and now under Debian 2.2, initially with an HP Vectra XP/60, and now
- * an HP Vectra XP/90.
- *
- */
-
-/* ------------------------- IMPROVEMENTS ------------------------- */
-/*
- * I proudly present:
- *
- * Changes made in first pre-release:
- * ----------------------------------
- * - reorganisation of the code, function name change
- * - creation of private header (wavelan.p.h)
- * - reorganised debug messages
- * - more comments, history, etc.
- * - mmc_init: configure the PSA if not done
- * - mmc_init: correct default value of level threshold for PCMCIA
- * - mmc_init: 2.00 detection better code for 2.00 initialization
- * - better info at startup
- * - IRQ setting (note: this setting is permanent)
- * - watchdog: change strategy (and solve module removal problems)
- * - add wireless extensions (ioctl and get_wireless_stats)
- * get/set nwid/frequency on fly, info for /proc/net/wireless
- * - more wireless extensions: SETSPY and GETSPY
- * - make wireless extensions optional
- * - private ioctl to set/get quality and level threshold, histogram
- * - remove /proc/net/wavelan
- * - suppress useless stuff from lp (net_local)
- * - kernel 2.1 support (copy_to/from_user instead of memcpy_to/fromfs)
- * - add message level (debug stuff in /var/adm/debug and errors not
- * displayed at console and still in /var/adm/messages)
- * - multi device support
- * - start fixing the probe (init code)
- * - more inlines
- * - man page
- * - many other minor details and cleanups
- *
- * Changes made in second pre-release:
- * -----------------------------------
- * - clean up init code (probe and module init)
- * - better multiple device support (module)
- * - name assignment (module)
- *
- * Changes made in third pre-release:
- * ----------------------------------
- * - be more conservative on timers
- * - preliminary support for multicast (I still lack some details)
- *
- * Changes made in fourth pre-release:
- * -----------------------------------
- * - multicast (revisited and finished)
- * - avoid reset in set_multicast_list (a really big hack)
- * if somebody could apply this code for other i82586 based drivers
- * - share onboard memory 75% RU and 25% CU (instead of 50/50)
- *
- * Changes made for release in 2.1.15:
- * -----------------------------------
- * - change the detection code for multi manufacturer code support
- *
- * Changes made for release in 2.1.17:
- * -----------------------------------
- * - update to wireless extensions changes
- * - silly bug in card initial configuration (psa_conf_status)
- *
- * Changes made for release in 2.1.27 & 2.0.30:
- * --------------------------------------------
- * - small bug in debug code (probably not the last one...)
- * - remove extern keyword for wavelan_probe()
- * - level threshold is now a standard wireless extension (version 4 !)
- * - modules parameters types (new module interface)
- *
- * Changes made for release in 2.1.36:
- * -----------------------------------
- * - byte count stats (courtesy of David Hinds)
- * - remove dev_tint stuff (courtesy of David Hinds)
- * - encryption setting from Brent Elphick (thanks a lot!)
- * - 'ioaddr' to 'u_long' for the Alpha (thanks to Stanislav Sinyagin)
- *
- * Other changes (not by me) :
- * -------------------------
- * - Spelling and gramar "rectification".
- *
- * Changes made for release in 2.0.37 & 2.2.2 :
- * ------------------------------------------
- * - Correct status in /proc/net/wireless
- * - Set PSA CRC to make PtP diagnostic tool happy (Bob Gray)
- * - Module init code don't fail if we found at least one card in
- * the address list (Karlis Peisenieks)
- * - Missing parenthesis (Christopher Peterson)
- * - Correct i82586 configuration parameters
- * - Encryption initialisation bug (Robert McCormack)
- * - New mac addresses detected in the probe
- * - Increase watchdog for busy environments
- *
- * Changes made for release in 2.0.38 & 2.2.7 :
- * ------------------------------------------
- * - Correct the reception logic to better report errors and avoid
- * sending bogus packet up the stack
- * - Delay RU config to avoid corrupting first received packet
- * - Change config completion code (to actually check something)
- * - Avoid reading out of bound in skbuf to transmit
- * - Rectify a lot of (useless) debugging code
- * - Change the way to `#ifdef SET_PSA_CRC'
- *
- * Changes made for release in 2.2.11 & 2.3.13 :
- * -------------------------------------------
- * - Change e-mail and web page addresses
- * - Watchdog timer is now correctly expressed in HZ, not in jiffies
- * - Add channel number to the list of frequencies in range
- * - Add the (short) list of bit-rates in range
- * - Developp a new sensitivity... (sens.value & sens.fixed)
- *
- * Changes made for release in 2.2.14 & 2.3.23 :
- * -------------------------------------------
- * - Fix check for root permission (break instead of exit)
- * - New nwid & encoding setting (Wireless Extension 9)
- *
- * Changes made for release in 2.3.49 :
- * ----------------------------------
- * - Indentation reformating (Alan)
- * - Update to new network API (softnet - 2.3.43) :
- * o replace dev->tbusy (Alan)
- * o replace dev->tstart (Alan)
- * o remove dev->interrupt (Alan)
- * o add SMP locking via spinlock in splxx (me)
- * o add spinlock in interrupt handler (me)
- * o use kernel watchdog instead of ours (me)
- * o increase watchdog timeout (kernel is more sensitive) (me)
- * o verify that all the changes make sense and work (me)
- * - Fixup a potential gotcha when reconfiguring and thighten a bit
- * the interactions with Tx queue.
- *
- * Changes made for release in 2.4.0 :
- * ---------------------------------
- * - Fix spinlock stupid bugs that I left in. The driver is now SMP
- * compliant and doesn't lockup at startup.
- *
- * Changes made for release in 2.5.2 :
- * ---------------------------------
- * - Use new driver API for Wireless Extensions :
- * o got rid of wavelan_ioctl()
- * o use a bunch of iw_handler instead
- *
- * Changes made for release in 2.5.35 :
- * ----------------------------------
- * - Set dev->trans_start to avoid filling the logs
- * - Handle better spurious/bogus interrupt
- * - Avoid deadlocks in mmc_out()/mmc_in()
- *
- * Wishes & dreams:
- * ----------------
- * - roaming (see Pcmcia driver)
- */
-
-/***************************** INCLUDES *****************************/
-
-#include <linux/module.h>
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/stat.h>
-#include <linux/ptrace.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/uaccess.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <linux/timer.h>
-#include <linux/init.h>
-
-#include <linux/wireless.h> /* Wireless extensions */
-#include <net/iw_handler.h> /* Wireless handlers */
-
-/* WaveLAN declarations */
-#include "i82586.h"
-#include "wavelan.h"
-
-/************************** DRIVER OPTIONS **************************/
-/*
- * `#define' or `#undef' the following constant to change the behaviour
- * of the driver...
- */
-#undef SET_PSA_CRC /* Calculate and set the CRC on PSA (slower) */
-#define USE_PSA_CONFIG /* Use info from the PSA. */
-#undef EEPROM_IS_PROTECTED /* doesn't seem to be necessary */
-#define MULTICAST_AVOID /* Avoid extra multicast (I'm sceptical). */
-#undef SET_MAC_ADDRESS /* Experimental */
-
-/* Warning: this stuff will slow down the driver. */
-#define WIRELESS_SPY /* Enable spying addresses. */
-#undef HISTOGRAM /* Enable histogram of signal level. */
-
-/****************************** DEBUG ******************************/
-
-#undef DEBUG_MODULE_TRACE /* module insertion/removal */
-#undef DEBUG_CALLBACK_TRACE /* calls made by Linux */
-#undef DEBUG_INTERRUPT_TRACE /* calls to handler */
-#undef DEBUG_INTERRUPT_INFO /* type of interrupt and so on */
-#define DEBUG_INTERRUPT_ERROR /* problems */
-#undef DEBUG_CONFIG_TRACE /* Trace the config functions. */
-#undef DEBUG_CONFIG_INFO /* what's going on */
-#define DEBUG_CONFIG_ERROR /* errors on configuration */
-#undef DEBUG_TX_TRACE /* transmission calls */
-#undef DEBUG_TX_INFO /* header of the transmitted packet */
-#undef DEBUG_TX_FAIL /* Normal failure conditions */
-#define DEBUG_TX_ERROR /* Unexpected conditions */
-#undef DEBUG_RX_TRACE /* transmission calls */
-#undef DEBUG_RX_INFO /* header of the received packet */
-#undef DEBUG_RX_FAIL /* Normal failure conditions */
-#define DEBUG_RX_ERROR /* Unexpected conditions */
-
-#undef DEBUG_PACKET_DUMP /* Dump packet on the screen if defined to 32. */
-#undef DEBUG_IOCTL_TRACE /* misc. call by Linux */
-#undef DEBUG_IOCTL_INFO /* various debugging info */
-#define DEBUG_IOCTL_ERROR /* what's going wrong */
-#define DEBUG_BASIC_SHOW /* Show basic startup info. */
-#undef DEBUG_VERSION_SHOW /* Print version info. */
-#undef DEBUG_PSA_SHOW /* Dump PSA to screen. */
-#undef DEBUG_MMC_SHOW /* Dump mmc to screen. */
-#undef DEBUG_SHOW_UNUSED /* Show unused fields too. */
-#undef DEBUG_I82586_SHOW /* Show i82586 status. */
-#undef DEBUG_DEVICE_SHOW /* Show device parameters. */
-
-/************************ CONSTANTS & MACROS ************************/
-
-#ifdef DEBUG_VERSION_SHOW
-static const char *version = "wavelan.c : v24 (SMP + wireless extensions) 11/12/01\n";
-#endif
-
-/* Watchdog temporisation */
-#define WATCHDOG_JIFFIES (512*HZ/100)
-
-/* ------------------------ PRIVATE IOCTL ------------------------ */
-
-#define SIOCSIPQTHR SIOCIWFIRSTPRIV /* Set quality threshold */
-#define SIOCGIPQTHR SIOCIWFIRSTPRIV + 1 /* Get quality threshold */
-
-#define SIOCSIPHISTO SIOCIWFIRSTPRIV + 2 /* Set histogram ranges */
-#define SIOCGIPHISTO SIOCIWFIRSTPRIV + 3 /* Get histogram values */
-
-/****************************** TYPES ******************************/
-
-/* Shortcuts */
-typedef struct iw_statistics iw_stats;
-typedef struct iw_quality iw_qual;
-typedef struct iw_freq iw_freq;typedef struct net_local net_local;
-typedef struct timer_list timer_list;
-
-/* Basic types */
-typedef u_char mac_addr[WAVELAN_ADDR_SIZE]; /* Hardware address */
-
-/*
- * Static specific data for the interface.
- *
- * For each network interface, Linux keeps data in two structures: "device"
- * keeps the generic data (same format for everybody) and "net_local" keeps
- * additional specific data.
- */
-struct net_local
-{
- net_local * next; /* linked list of the devices */
- struct net_device * dev; /* reverse link */
- spinlock_t spinlock; /* Serialize access to the hardware (SMP) */
- int nresets; /* number of hardware resets */
- u_char reconfig_82586; /* We need to reconfigure the controller. */
- u_char promiscuous; /* promiscuous mode */
- int mc_count; /* number of multicast addresses */
- u_short hacr; /* current host interface state */
-
- int tx_n_in_use;
- u_short rx_head;
- u_short rx_last;
- u_short tx_first_free;
- u_short tx_first_in_use;
-
- iw_stats wstats; /* Wireless-specific statistics */
-
- struct iw_spy_data spy_data;
- struct iw_public_data wireless_data;
-
-#ifdef HISTOGRAM
- int his_number; /* number of intervals */
- u_char his_range[16]; /* boundaries of interval ]n-1; n] */
- u_long his_sum[16]; /* sum in interval */
-#endif /* HISTOGRAM */
-};
-
-/**************************** PROTOTYPES ****************************/
-
-/* ----------------------- MISC. SUBROUTINES ------------------------ */
-static u_char
- wv_irq_to_psa(int);
-static int
- wv_psa_to_irq(u_char);
-/* ------------------- HOST ADAPTER SUBROUTINES ------------------- */
-static inline u_short /* data */
- hasr_read(u_long); /* Read the host interface: base address */
-static inline void
- hacr_write(u_long, /* Write to host interface: base address */
- u_short), /* data */
- hacr_write_slow(u_long,
- u_short),
- set_chan_attn(u_long, /* ioaddr */
- u_short), /* hacr */
- wv_hacr_reset(u_long), /* ioaddr */
- wv_16_off(u_long, /* ioaddr */
- u_short), /* hacr */
- wv_16_on(u_long, /* ioaddr */
- u_short), /* hacr */
- wv_ints_off(struct net_device *),
- wv_ints_on(struct net_device *);
-/* ----------------- MODEM MANAGEMENT SUBROUTINES ----------------- */
-static void
- psa_read(u_long, /* Read the Parameter Storage Area. */
- u_short, /* hacr */
- int, /* offset in PSA */
- u_char *, /* buffer to fill */
- int), /* size to read */
- psa_write(u_long, /* Write to the PSA. */
- u_short, /* hacr */
- int, /* offset in PSA */
- u_char *, /* buffer in memory */
- int); /* length of buffer */
-static inline void
- mmc_out(u_long, /* Write 1 byte to the Modem Manag Control. */
- u_short,
- u_char),
- mmc_write(u_long, /* Write n bytes to the MMC. */
- u_char,
- u_char *,
- int);
-static inline u_char /* Read 1 byte from the MMC. */
- mmc_in(u_long,
- u_short);
-static inline void
- mmc_read(u_long, /* Read n bytes from the MMC. */
- u_char,
- u_char *,
- int),
- fee_wait(u_long, /* Wait for frequency EEPROM: base address */
- int, /* base delay to wait for */
- int); /* time to wait */
-static void
- fee_read(u_long, /* Read the frequency EEPROM: base address */
- u_short, /* destination offset */
- u_short *, /* data buffer */
- int); /* number of registers */
-/* ---------------------- I82586 SUBROUTINES ----------------------- */
-static /*inline*/ void
- obram_read(u_long, /* ioaddr */
- u_short, /* o */
- u_char *, /* b */
- int); /* n */
-static inline void
- obram_write(u_long, /* ioaddr */
- u_short, /* o */
- u_char *, /* b */
- int); /* n */
-static void
- wv_ack(struct net_device *);
-static inline int
- wv_synchronous_cmd(struct net_device *,
- const char *),
- wv_config_complete(struct net_device *,
- u_long,
- net_local *);
-static int
- wv_complete(struct net_device *,
- u_long,
- net_local *);
-static inline void
- wv_82586_reconfig(struct net_device *);
-/* ------------------- DEBUG & INFO SUBROUTINES ------------------- */
-#ifdef DEBUG_I82586_SHOW
-static void
- wv_scb_show(unsigned short);
-#endif
-static inline void
- wv_init_info(struct net_device *); /* display startup info */
-/* ------------------- IOCTL, STATS & RECONFIG ------------------- */
-static iw_stats *
- wavelan_get_wireless_stats(struct net_device *);
-static void
- wavelan_set_multicast_list(struct net_device *);
-/* ----------------------- PACKET RECEPTION ----------------------- */
-static inline void
- wv_packet_read(struct net_device *, /* Read a packet from a frame. */
- u_short,
- int),
- wv_receive(struct net_device *); /* Read all packets waiting. */
-/* --------------------- PACKET TRANSMISSION --------------------- */
-static inline int
- wv_packet_write(struct net_device *, /* Write a packet to the Tx buffer. */
- void *,
- short);
-static netdev_tx_t
- wavelan_packet_xmit(struct sk_buff *, /* Send a packet. */
- struct net_device *);
-/* -------------------- HARDWARE CONFIGURATION -------------------- */
-static inline int
- wv_mmc_init(struct net_device *), /* Initialize the modem. */
- wv_ru_start(struct net_device *), /* Start the i82586 receiver unit. */
- wv_cu_start(struct net_device *), /* Start the i82586 command unit. */
- wv_82586_start(struct net_device *); /* Start the i82586. */
-static void
- wv_82586_config(struct net_device *); /* Configure the i82586. */
-static inline void
- wv_82586_stop(struct net_device *);
-static int
- wv_hw_reset(struct net_device *), /* Reset the WaveLAN hardware. */
- wv_check_ioaddr(u_long, /* ioaddr */
- u_char *); /* mac address (read) */
-/* ---------------------- INTERRUPT HANDLING ---------------------- */
-static irqreturn_t
- wavelan_interrupt(int, /* interrupt handler */
- void *);
-static void
- wavelan_watchdog(struct net_device *); /* transmission watchdog */
-/* ------------------- CONFIGURATION CALLBACKS ------------------- */
-static int
- wavelan_open(struct net_device *), /* Open the device. */
- wavelan_close(struct net_device *), /* Close the device. */
- wavelan_config(struct net_device *, unsigned short);/* Configure one device. */
-extern struct net_device *wavelan_probe(int unit); /* See Space.c. */
-
-/**************************** VARIABLES ****************************/
-
-/*
- * This is the root of the linked list of WaveLAN drivers
- * It is use to verify that we don't reuse the same base address
- * for two different drivers and to clean up when removing the module.
- */
-static net_local * wavelan_list = (net_local *) NULL;
-
-/*
- * This table is used to translate the PSA value to IRQ number
- * and vice versa.
- */
-static u_char irqvals[] =
-{
- 0, 0, 0, 0x01,
- 0x02, 0x04, 0, 0x08,
- 0, 0, 0x10, 0x20,
- 0x40, 0, 0, 0x80,
-};
-
-/*
- * Table of the available I/O addresses (base addresses) for WaveLAN
- */
-static unsigned short iobase[] =
-{
-#if 0
- /* Leave out 0x3C0 for now -- seems to clash with some video
- * controllers.
- * Leave out the others too -- we will always use 0x390 and leave
- * 0x300 for the Ethernet device.
- * Jean II: 0x3E0 is fine as well.
- */
- 0x300, 0x390, 0x3E0, 0x3C0
-#endif /* 0 */
- 0x390, 0x3E0
-};
-
-#ifdef MODULE
-/* Parameters set by insmod */
-static int io[4];
-static int irq[4];
-static char *name[4];
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(name, charp, NULL, 0);
-
-MODULE_PARM_DESC(io, "WaveLAN I/O base address(es),required");
-MODULE_PARM_DESC(irq, "WaveLAN IRQ number(s)");
-MODULE_PARM_DESC(name, "WaveLAN interface neme(s)");
-#endif /* MODULE */
-
-#endif /* WAVELAN_P_H */
diff --git a/drivers/staging/wavelan/wavelan_cs.c b/drivers/staging/wavelan/wavelan_cs.c
deleted file mode 100644
index 04f691d127b..00000000000
--- a/drivers/staging/wavelan/wavelan_cs.c
+++ /dev/null
@@ -1,4610 +0,0 @@
-/*
- * Wavelan Pcmcia driver
- *
- * Jean II - HPLB '96
- *
- * Reorganisation and extension of the driver.
- * Original copyright follow. See wavelan_cs.p.h for details.
- *
- * This code is derived from Anthony D. Joseph's code and all the changes here
- * are also under the original copyright below.
- *
- * This code supports version 2.00 of WaveLAN/PCMCIA cards (2.4GHz), and
- * can work on Linux 2.0.36 with support of David Hinds' PCMCIA Card Services
- *
- * Joe Finney (joe@comp.lancs.ac.uk) at Lancaster University in UK added
- * critical code in the routine to initialize the Modem Management Controller.
- *
- * Thanks to Alan Cox and Bruce Janson for their advice.
- *
- * -- Yunzhou Li (scip4166@nus.sg)
- *
-#ifdef WAVELAN_ROAMING
- * Roaming support added 07/22/98 by Justin Seger (jseger@media.mit.edu)
- * based on patch by Joe Finney from Lancaster University.
-#endif
- *
- * Lucent (formerly AT&T GIS, formerly NCR) WaveLAN PCMCIA card: An
- * Ethernet-like radio transceiver controlled by an Intel 82593 coprocessor.
- *
- * A non-shared memory PCMCIA ethernet driver for linux
- *
- * ISA version modified to support PCMCIA by Anthony Joseph (adj@lcs.mit.edu)
- *
- *
- * Joseph O'Sullivan & John Langford (josullvn@cs.cmu.edu & jcl@cs.cmu.edu)
- *
- * Apr 2 '98 made changes to bring the i82593 control/int handling in line
- * with offical specs...
- *
- ****************************************************************************
- * Copyright 1995
- * Anthony D. Joseph
- * Massachusetts Institute of Technology
- *
- * Permission to use, copy, modify, and distribute this program
- * for any purpose and without fee is hereby granted, provided
- * that this copyright and permission notice appear on all copies
- * and supporting documentation, the name of M.I.T. not be used
- * in advertising or publicity pertaining to distribution of the
- * program without specific prior permission, and notice be given
- * in supporting documentation that copying and distribution is
- * by permission of M.I.T. M.I.T. makes no representations about
- * the suitability of this software for any purpose. It is pro-
- * vided "as is" without express or implied warranty.
- ****************************************************************************
- *
- */
-
-/* Do *NOT* add other headers here, you are guaranteed to be wrong - Jean II */
-#include "wavelan_cs.p.h" /* Private header */
-
-#ifdef WAVELAN_ROAMING
-static void wl_cell_expiry(unsigned long data);
-static void wl_del_wavepoint(wavepoint_history *wavepoint, struct net_local *lp);
-static void wv_nwid_filter(unsigned char mode, net_local *lp);
-#endif /* WAVELAN_ROAMING */
-
-/************************* MISC SUBROUTINES **************************/
-/*
- * Subroutines which won't fit in one of the following category
- * (wavelan modem or i82593)
- */
-
-/******************* MODEM MANAGEMENT SUBROUTINES *******************/
-/*
- * Useful subroutines to manage the modem of the wavelan
- */
-
-/*------------------------------------------------------------------*/
-/*
- * Read from card's Host Adaptor Status Register.
- */
-static inline u_char
-hasr_read(u_long base)
-{
- return(inb(HASR(base)));
-} /* hasr_read */
-
-/*------------------------------------------------------------------*/
-/*
- * Write to card's Host Adapter Command Register.
- */
-static inline void
-hacr_write(u_long base,
- u_char hacr)
-{
- outb(hacr, HACR(base));
-} /* hacr_write */
-
-/*------------------------------------------------------------------*/
-/*
- * Write to card's Host Adapter Command Register. Include a delay for
- * those times when it is needed.
- */
-static void
-hacr_write_slow(u_long base,
- u_char hacr)
-{
- hacr_write(base, hacr);
- /* delay might only be needed sometimes */
- mdelay(1);
-} /* hacr_write_slow */
-
-/*------------------------------------------------------------------*/
-/*
- * Read the Parameter Storage Area from the WaveLAN card's memory
- */
-static void
-psa_read(struct net_device * dev,
- int o, /* offset in PSA */
- u_char * b, /* buffer to fill */
- int n) /* size to read */
-{
- net_local *lp = netdev_priv(dev);
- u_char __iomem *ptr = lp->mem + PSA_ADDR + (o << 1);
-
- while(n-- > 0)
- {
- *b++ = readb(ptr);
- /* Due to a lack of address decode pins, the WaveLAN PCMCIA card
- * only supports reading even memory addresses. That means the
- * increment here MUST be two.
- * Because of that, we can't use memcpy_fromio()...
- */
- ptr += 2;
- }
-} /* psa_read */
-
-/*------------------------------------------------------------------*/
-/*
- * Write the Parameter Storage Area to the WaveLAN card's memory
- */
-static void
-psa_write(struct net_device * dev,
- int o, /* Offset in psa */
- u_char * b, /* Buffer in memory */
- int n) /* Length of buffer */
-{
- net_local *lp = netdev_priv(dev);
- u_char __iomem *ptr = lp->mem + PSA_ADDR + (o << 1);
- int count = 0;
- unsigned int base = dev->base_addr;
- /* As there seem to have no flag PSA_BUSY as in the ISA model, we are
- * oblige to verify this address to know when the PSA is ready... */
- volatile u_char __iomem *verify = lp->mem + PSA_ADDR +
- (psaoff(0, psa_comp_number) << 1);
-
- /* Authorize writing to PSA */
- hacr_write(base, HACR_PWR_STAT | HACR_ROM_WEN);
-
- while(n-- > 0)
- {
- /* write to PSA */
- writeb(*b++, ptr);
- ptr += 2;
-
- /* I don't have the spec, so I don't know what the correct
- * sequence to write is. This hack seem to work for me... */
- count = 0;
- while((readb(verify) != PSA_COMP_PCMCIA_915) && (count++ < 100))
- mdelay(1);
- }
-
- /* Put the host interface back in standard state */
- hacr_write(base, HACR_DEFAULT);
-} /* psa_write */
-
-#ifdef SET_PSA_CRC
-/*------------------------------------------------------------------*/
-/*
- * Calculate the PSA CRC
- * Thanks to Valster, Nico <NVALSTER@wcnd.nl.lucent.com> for the code
- * NOTE: By specifying a length including the CRC position the
- * returned value should be zero. (i.e. a correct checksum in the PSA)
- *
- * The Windows drivers don't use the CRC, but the AP and the PtP tool
- * depend on it.
- */
-static u_short
-psa_crc(unsigned char * psa, /* The PSA */
- int size) /* Number of short for CRC */
-{
- int byte_cnt; /* Loop on the PSA */
- u_short crc_bytes = 0; /* Data in the PSA */
- int bit_cnt; /* Loop on the bits of the short */
-
- for(byte_cnt = 0; byte_cnt < size; byte_cnt++ )
- {
- crc_bytes ^= psa[byte_cnt]; /* Its an xor */
-
- for(bit_cnt = 1; bit_cnt < 9; bit_cnt++ )
- {
- if(crc_bytes & 0x0001)
- crc_bytes = (crc_bytes >> 1) ^ 0xA001;
- else
- crc_bytes >>= 1 ;
- }
- }
-
- return crc_bytes;
-} /* psa_crc */
-#endif /* SET_PSA_CRC */
-
-/*------------------------------------------------------------------*/
-/*
- * update the checksum field in the Wavelan's PSA
- */
-static void
-update_psa_checksum(struct net_device * dev)
-{
-#ifdef SET_PSA_CRC
- psa_t psa;
- u_short crc;
-
- /* read the parameter storage area */
- psa_read(dev, 0, (unsigned char *) &psa, sizeof(psa));
-
- /* update the checksum */
- crc = psa_crc((unsigned char *) &psa,
- sizeof(psa) - sizeof(psa.psa_crc[0]) - sizeof(psa.psa_crc[1])
- - sizeof(psa.psa_crc_status));
-
- psa.psa_crc[0] = crc & 0xFF;
- psa.psa_crc[1] = (crc & 0xFF00) >> 8;
-
- /* Write it ! */
- psa_write(dev, (char *)&psa.psa_crc - (char *)&psa,
- (unsigned char *)&psa.psa_crc, 2);
-
-#ifdef DEBUG_IOCTL_INFO
- printk (KERN_DEBUG "%s: update_psa_checksum(): crc = 0x%02x%02x\n",
- dev->name, psa.psa_crc[0], psa.psa_crc[1]);
-
- /* Check again (luxury !) */
- crc = psa_crc((unsigned char *) &psa,
- sizeof(psa) - sizeof(psa.psa_crc_status));
-
- if(crc != 0)
- printk(KERN_WARNING "%s: update_psa_checksum(): CRC does not agree with PSA data (even after recalculating)\n", dev->name);
-#endif /* DEBUG_IOCTL_INFO */
-#endif /* SET_PSA_CRC */
-} /* update_psa_checksum */
-
-/*------------------------------------------------------------------*/
-/*
- * Write 1 byte to the MMC.
- */
-static void
-mmc_out(u_long base,
- u_short o,
- u_char d)
-{
- int count = 0;
-
- /* Wait for MMC to go idle */
- while((count++ < 100) && (inb(HASR(base)) & HASR_MMI_BUSY))
- udelay(10);
-
- outb((u_char)((o << 1) | MMR_MMI_WR), MMR(base));
- outb(d, MMD(base));
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Routine to write bytes to the Modem Management Controller.
- * We start by the end because it is the way it should be !
- */
-static void
-mmc_write(u_long base,
- u_char o,
- u_char * b,
- int n)
-{
- o += n;
- b += n;
-
- while(n-- > 0 )
- mmc_out(base, --o, *(--b));
-} /* mmc_write */
-
-/*------------------------------------------------------------------*/
-/*
- * Read 1 byte from the MMC.
- * Optimised version for 1 byte, avoid using memory...
- */
-static u_char
-mmc_in(u_long base,
- u_short o)
-{
- int count = 0;
-
- while((count++ < 100) && (inb(HASR(base)) & HASR_MMI_BUSY))
- udelay(10);
- outb(o << 1, MMR(base)); /* Set the read address */
-
- outb(0, MMD(base)); /* Required dummy write */
-
- while((count++ < 100) && (inb(HASR(base)) & HASR_MMI_BUSY))
- udelay(10);
- return (u_char) (inb(MMD(base))); /* Now do the actual read */
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Routine to read bytes from the Modem Management Controller.
- * The implementation is complicated by a lack of address lines,
- * which prevents decoding of the low-order bit.
- * (code has just been moved in the above function)
- * We start by the end because it is the way it should be !
- */
-static void
-mmc_read(u_long base,
- u_char o,
- u_char * b,
- int n)
-{
- o += n;
- b += n;
-
- while(n-- > 0)
- *(--b) = mmc_in(base, --o);
-} /* mmc_read */
-
-/*------------------------------------------------------------------*/
-/*
- * Get the type of encryption available...
- */
-static inline int
-mmc_encr(u_long base) /* i/o port of the card */
-{
- int temp;
-
- temp = mmc_in(base, mmroff(0, mmr_des_avail));
- if((temp != MMR_DES_AVAIL_DES) && (temp != MMR_DES_AVAIL_AES))
- return 0;
- else
- return temp;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wait for the frequency EEprom to complete a command...
- */
-static void
-fee_wait(u_long base, /* i/o port of the card */
- int delay, /* Base delay to wait for */
- int number) /* Number of time to wait */
-{
- int count = 0; /* Wait only a limited time */
-
- while((count++ < number) &&
- (mmc_in(base, mmroff(0, mmr_fee_status)) & MMR_FEE_STATUS_BUSY))
- udelay(delay);
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Read bytes from the Frequency EEprom (frequency select cards).
- */
-static void
-fee_read(u_long base, /* i/o port of the card */
- u_short o, /* destination offset */
- u_short * b, /* data buffer */
- int n) /* number of registers */
-{
- b += n; /* Position at the end of the area */
-
- /* Write the address */
- mmc_out(base, mmwoff(0, mmw_fee_addr), o + n - 1);
-
- /* Loop on all buffer */
- while(n-- > 0)
- {
- /* Write the read command */
- mmc_out(base, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_READ);
-
- /* Wait until EEprom is ready (should be quick !) */
- fee_wait(base, 10, 100);
-
- /* Read the value */
- *--b = ((mmc_in(base, mmroff(0, mmr_fee_data_h)) << 8) |
- mmc_in(base, mmroff(0, mmr_fee_data_l)));
- }
-}
-
-
-/*------------------------------------------------------------------*/
-/*
- * Write bytes from the Frequency EEprom (frequency select cards).
- * This is a bit complicated, because the frequency eeprom has to
- * be unprotected and the write enabled.
- * Jean II
- */
-static void
-fee_write(u_long base, /* i/o port of the card */
- u_short o, /* destination offset */
- u_short * b, /* data buffer */
- int n) /* number of registers */
-{
- b += n; /* Position at the end of the area */
-
-#ifdef EEPROM_IS_PROTECTED /* disabled */
-#ifdef DOESNT_SEEM_TO_WORK /* disabled */
- /* Ask to read the protected register */
- mmc_out(base, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PRREAD);
-
- fee_wait(base, 10, 100);
-
- /* Read the protected register */
- printk("Protected 2 : %02X-%02X\n",
- mmc_in(base, mmroff(0, mmr_fee_data_h)),
- mmc_in(base, mmroff(0, mmr_fee_data_l)));
-#endif /* DOESNT_SEEM_TO_WORK */
-
- /* Enable protected register */
- mmc_out(base, mmwoff(0, mmw_fee_addr), MMW_FEE_ADDR_EN);
- mmc_out(base, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PREN);
-
- fee_wait(base, 10, 100);
-
- /* Unprotect area */
- mmc_out(base, mmwoff(0, mmw_fee_addr), o + n);
- mmc_out(base, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PRWRITE);
-#ifdef DOESNT_SEEM_TO_WORK /* disabled */
- /* Or use : */
- mmc_out(base, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PRCLEAR);
-#endif /* DOESNT_SEEM_TO_WORK */
-
- fee_wait(base, 10, 100);
-#endif /* EEPROM_IS_PROTECTED */
-
- /* Write enable */
- mmc_out(base, mmwoff(0, mmw_fee_addr), MMW_FEE_ADDR_EN);
- mmc_out(base, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_WREN);
-
- fee_wait(base, 10, 100);
-
- /* Write the EEprom address */
- mmc_out(base, mmwoff(0, mmw_fee_addr), o + n - 1);
-
- /* Loop on all buffer */
- while(n-- > 0)
- {
- /* Write the value */
- mmc_out(base, mmwoff(0, mmw_fee_data_h), (*--b) >> 8);
- mmc_out(base, mmwoff(0, mmw_fee_data_l), *b & 0xFF);
-
- /* Write the write command */
- mmc_out(base, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_WRITE);
-
- /* Wavelan doc says : wait at least 10 ms for EEBUSY = 0 */
- mdelay(10);
- fee_wait(base, 10, 100);
- }
-
- /* Write disable */
- mmc_out(base, mmwoff(0, mmw_fee_addr), MMW_FEE_ADDR_DS);
- mmc_out(base, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_WDS);
-
- fee_wait(base, 10, 100);
-
-#ifdef EEPROM_IS_PROTECTED /* disabled */
- /* Reprotect EEprom */
- mmc_out(base, mmwoff(0, mmw_fee_addr), 0x00);
- mmc_out(base, mmwoff(0, mmw_fee_ctrl), MMW_FEE_CTRL_PRWRITE);
-
- fee_wait(base, 10, 100);
-#endif /* EEPROM_IS_PROTECTED */
-}
-
-/******************* WaveLAN Roaming routines... ********************/
-
-#ifdef WAVELAN_ROAMING /* Conditional compile, see wavelan_cs.h */
-
-static unsigned char WAVELAN_BEACON_ADDRESS[] = {0x09,0x00,0x0e,0x20,0x03,0x00};
-
-static void wv_roam_init(struct net_device *dev)
-{
- net_local *lp= netdev_priv(dev);
-
- /* Do not remove this unless you have a good reason */
- printk(KERN_NOTICE "%s: Warning, you have enabled roaming on"
- " device %s !\n", dev->name, dev->name);
- printk(KERN_NOTICE "Roaming is currently an experimental unsupported feature"
- " of the Wavelan driver.\n");
- printk(KERN_NOTICE "It may work, but may also make the driver behave in"
- " erratic ways or crash.\n");
-
- lp->wavepoint_table.head=NULL; /* Initialise WavePoint table */
- lp->wavepoint_table.num_wavepoints=0;
- lp->wavepoint_table.locked=0;
- lp->curr_point=NULL; /* No default WavePoint */
- lp->cell_search=0;
-
- lp->cell_timer.data=(long)lp; /* Start cell expiry timer */
- lp->cell_timer.function=wl_cell_expiry;
- lp->cell_timer.expires=jiffies+CELL_TIMEOUT;
- add_timer(&lp->cell_timer);
-
- wv_nwid_filter(NWID_PROMISC,lp) ; /* Enter NWID promiscuous mode */
- /* to build up a good WavePoint */
- /* table... */
- printk(KERN_DEBUG "WaveLAN: Roaming enabled on device %s\n",dev->name);
-}
-
-static void wv_roam_cleanup(struct net_device *dev)
-{
- wavepoint_history *ptr,*old_ptr;
- net_local *lp= netdev_priv(dev);
-
- printk(KERN_DEBUG "WaveLAN: Roaming Disabled on device %s\n",dev->name);
-
- /* Fixme : maybe we should check that the timer exist before deleting it */
- del_timer(&lp->cell_timer); /* Remove cell expiry timer */
- ptr=lp->wavepoint_table.head; /* Clear device's WavePoint table */
- while(ptr!=NULL)
- {
- old_ptr=ptr;
- ptr=ptr->next;
- wl_del_wavepoint(old_ptr,lp);
- }
-}
-
-/* Enable/Disable NWID promiscuous mode on a given device */
-static void wv_nwid_filter(unsigned char mode, net_local *lp)
-{
- mm_t m;
- unsigned long flags;
-
-#ifdef WAVELAN_ROAMING_DEBUG
- printk(KERN_DEBUG "WaveLAN: NWID promisc %s, device %s\n",(mode==NWID_PROMISC) ? "on" : "off", lp->dev->name);
-#endif
-
- /* Disable interrupts & save flags */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- m.w.mmw_loopt_sel = (mode==NWID_PROMISC) ? MMW_LOOPT_SEL_DIS_NWID : 0x00;
- mmc_write(lp->dev->base_addr, (char *)&m.w.mmw_loopt_sel - (char *)&m, (unsigned char *)&m.w.mmw_loopt_sel, 1);
-
- if(mode==NWID_PROMISC)
- lp->cell_search=1;
- else
- lp->cell_search=0;
-
- /* ReEnable interrupts & restore flags */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-}
-
-/* Find a record in the WavePoint table matching a given NWID */
-static wavepoint_history *wl_roam_check(unsigned short nwid, net_local *lp)
-{
- wavepoint_history *ptr=lp->wavepoint_table.head;
-
- while(ptr!=NULL){
- if(ptr->nwid==nwid)
- return ptr;
- ptr=ptr->next;
- }
- return NULL;
-}
-
-/* Create a new wavepoint table entry */
-static wavepoint_history *wl_new_wavepoint(unsigned short nwid, unsigned char seq, net_local* lp)
-{
- wavepoint_history *new_wavepoint;
-
-#ifdef WAVELAN_ROAMING_DEBUG
- printk(KERN_DEBUG "WaveLAN: New Wavepoint, NWID:%.4X\n",nwid);
-#endif
-
- if(lp->wavepoint_table.num_wavepoints==MAX_WAVEPOINTS)
- return NULL;
-
- new_wavepoint = kmalloc(sizeof(wavepoint_history),GFP_ATOMIC);
- if(new_wavepoint==NULL)
- return NULL;
-
- new_wavepoint->nwid=nwid; /* New WavePoints NWID */
- new_wavepoint->average_fast=0; /* Running Averages..*/
- new_wavepoint->average_slow=0;
- new_wavepoint->qualptr=0; /* Start of ringbuffer */
- new_wavepoint->last_seq=seq-1; /* Last sequence no.seen */
- memset(new_wavepoint->sigqual,0,WAVEPOINT_HISTORY);/* Empty ringbuffer */
-
- new_wavepoint->next=lp->wavepoint_table.head;/* Add to wavepoint table */
- new_wavepoint->prev=NULL;
-
- if(lp->wavepoint_table.head!=NULL)
- lp->wavepoint_table.head->prev=new_wavepoint;
-
- lp->wavepoint_table.head=new_wavepoint;
-
- lp->wavepoint_table.num_wavepoints++; /* no. of visible wavepoints */
-
- return new_wavepoint;
-}
-
-/* Remove a wavepoint entry from WavePoint table */
-static void wl_del_wavepoint(wavepoint_history *wavepoint, struct net_local *lp)
-{
- if(wavepoint==NULL)
- return;
-
- if(lp->curr_point==wavepoint)
- lp->curr_point=NULL;
-
- if(wavepoint->prev!=NULL)
- wavepoint->prev->next=wavepoint->next;
-
- if(wavepoint->next!=NULL)
- wavepoint->next->prev=wavepoint->prev;
-
- if(lp->wavepoint_table.head==wavepoint)
- lp->wavepoint_table.head=wavepoint->next;
-
- lp->wavepoint_table.num_wavepoints--;
- kfree(wavepoint);
-}
-
-/* Timer callback function - checks WavePoint table for stale entries */
-static void wl_cell_expiry(unsigned long data)
-{
- net_local *lp=(net_local *)data;
- wavepoint_history *wavepoint=lp->wavepoint_table.head,*old_point;
-
-#if WAVELAN_ROAMING_DEBUG > 1
- printk(KERN_DEBUG "WaveLAN: Wavepoint timeout, dev %s\n",lp->dev->name);
-#endif
-
- if(lp->wavepoint_table.locked)
- {
-#if WAVELAN_ROAMING_DEBUG > 1
- printk(KERN_DEBUG "WaveLAN: Wavepoint table locked...\n");
-#endif
-
- lp->cell_timer.expires=jiffies+1; /* If table in use, come back later */
- add_timer(&lp->cell_timer);
- return;
- }
-
- while(wavepoint!=NULL)
- {
- if(time_after(jiffies, wavepoint->last_seen + CELL_TIMEOUT))
- {
-#ifdef WAVELAN_ROAMING_DEBUG
- printk(KERN_DEBUG "WaveLAN: Bye bye %.4X\n",wavepoint->nwid);
-#endif
-
- old_point=wavepoint;
- wavepoint=wavepoint->next;
- wl_del_wavepoint(old_point,lp);
- }
- else
- wavepoint=wavepoint->next;
- }
- lp->cell_timer.expires=jiffies+CELL_TIMEOUT;
- add_timer(&lp->cell_timer);
-}
-
-/* Update SNR history of a wavepoint */
-static void wl_update_history(wavepoint_history *wavepoint, unsigned char sigqual, unsigned char seq)
-{
- int i=0,num_missed=0,ptr=0;
- int average_fast=0,average_slow=0;
-
- num_missed=(seq-wavepoint->last_seq)%WAVEPOINT_HISTORY;/* Have we missed
- any beacons? */
- if(num_missed)
- for(i=0;i<num_missed;i++)
- {
- wavepoint->sigqual[wavepoint->qualptr++]=0; /* If so, enter them as 0's */
- wavepoint->qualptr %=WAVEPOINT_HISTORY; /* in the ringbuffer. */
- }
- wavepoint->last_seen=jiffies; /* Add beacon to history */
- wavepoint->last_seq=seq;
- wavepoint->sigqual[wavepoint->qualptr++]=sigqual;
- wavepoint->qualptr %=WAVEPOINT_HISTORY;
- ptr=(wavepoint->qualptr-WAVEPOINT_FAST_HISTORY+WAVEPOINT_HISTORY)%WAVEPOINT_HISTORY;
-
- for(i=0;i<WAVEPOINT_FAST_HISTORY;i++) /* Update running averages */
- {
- average_fast+=wavepoint->sigqual[ptr++];
- ptr %=WAVEPOINT_HISTORY;
- }
-
- average_slow=average_fast;
- for(i=WAVEPOINT_FAST_HISTORY;i<WAVEPOINT_HISTORY;i++)
- {
- average_slow+=wavepoint->sigqual[ptr++];
- ptr %=WAVEPOINT_HISTORY;
- }
-
- wavepoint->average_fast=average_fast/WAVEPOINT_FAST_HISTORY;
- wavepoint->average_slow=average_slow/WAVEPOINT_HISTORY;
-}
-
-/* Perform a handover to a new WavePoint */
-static void wv_roam_handover(wavepoint_history *wavepoint, net_local *lp)
-{
- unsigned int base = lp->dev->base_addr;
- mm_t m;
- unsigned long flags;
-
- if(wavepoint==lp->curr_point) /* Sanity check... */
- {
- wv_nwid_filter(!NWID_PROMISC,lp);
- return;
- }
-
-#ifdef WAVELAN_ROAMING_DEBUG
- printk(KERN_DEBUG "WaveLAN: Doing handover to %.4X, dev %s\n",wavepoint->nwid,lp->dev->name);
-#endif
-
- /* Disable interrupts & save flags */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- m.w.mmw_netw_id_l = wavepoint->nwid & 0xFF;
- m.w.mmw_netw_id_h = (wavepoint->nwid & 0xFF00) >> 8;
-
- mmc_write(base, (char *)&m.w.mmw_netw_id_l - (char *)&m, (unsigned char *)&m.w.mmw_netw_id_l, 2);
-
- /* ReEnable interrupts & restore flags */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- wv_nwid_filter(!NWID_PROMISC,lp);
- lp->curr_point=wavepoint;
-}
-
-/* Called when a WavePoint beacon is received */
-static void wl_roam_gather(struct net_device * dev,
- u_char * hdr, /* Beacon header */
- u_char * stats) /* SNR, Signal quality
- of packet */
-{
- wavepoint_beacon *beacon= (wavepoint_beacon *)hdr; /* Rcvd. Beacon */
- unsigned short nwid=ntohs(beacon->nwid);
- unsigned short sigqual=stats[2] & MMR_SGNL_QUAL; /* SNR of beacon */
- wavepoint_history *wavepoint=NULL; /* WavePoint table entry */
- net_local *lp = netdev_priv(dev); /* Device info */
-
-#ifdef I_NEED_THIS_FEATURE
- /* Some people don't need this, some other may need it */
- nwid=nwid^ntohs(beacon->domain_id);
-#endif
-
-#if WAVELAN_ROAMING_DEBUG > 1
- printk(KERN_DEBUG "WaveLAN: beacon, dev %s:\n",dev->name);
- printk(KERN_DEBUG "Domain: %.4X NWID: %.4X SigQual=%d\n",ntohs(beacon->domain_id),nwid,sigqual);
-#endif
-
- lp->wavepoint_table.locked=1; /* <Mutex> */
-
- wavepoint=wl_roam_check(nwid,lp); /* Find WavePoint table entry */
- if(wavepoint==NULL) /* If no entry, Create a new one... */
- {
- wavepoint=wl_new_wavepoint(nwid,beacon->seq,lp);
- if(wavepoint==NULL)
- goto out;
- }
- if(lp->curr_point==NULL) /* If this is the only WavePoint, */
- wv_roam_handover(wavepoint, lp); /* Jump on it! */
-
- wl_update_history(wavepoint, sigqual, beacon->seq); /* Update SNR history
- stats. */
-
- if(lp->curr_point->average_slow < SEARCH_THRESH_LOW) /* If our current */
- if(!lp->cell_search) /* WavePoint is getting faint, */
- wv_nwid_filter(NWID_PROMISC,lp); /* start looking for a new one */
-
- if(wavepoint->average_slow >
- lp->curr_point->average_slow + WAVELAN_ROAMING_DELTA)
- wv_roam_handover(wavepoint, lp); /* Handover to a better WavePoint */
-
- if(lp->curr_point->average_slow > SEARCH_THRESH_HIGH) /* If our SNR is */
- if(lp->cell_search) /* getting better, drop out of cell search mode */
- wv_nwid_filter(!NWID_PROMISC,lp);
-
-out:
- lp->wavepoint_table.locked=0; /* </MUTEX> :-) */
-}
-
-/* Test this MAC frame a WavePoint beacon */
-static inline int WAVELAN_BEACON(unsigned char *data)
-{
- wavepoint_beacon *beacon= (wavepoint_beacon *)data;
- static const wavepoint_beacon beacon_template={0xaa,0xaa,0x03,0x08,0x00,0x0e,0x20,0x03,0x00};
-
- if(memcmp(beacon,&beacon_template,9)==0)
- return 1;
- else
- return 0;
-}
-#endif /* WAVELAN_ROAMING */
-
-/************************ I82593 SUBROUTINES *************************/
-/*
- * Useful subroutines to manage the Ethernet controller
- */
-
-/*------------------------------------------------------------------*/
-/*
- * Routine to synchronously send a command to the i82593 chip.
- * Should be called with interrupts disabled.
- * (called by wv_packet_write(), wv_ru_stop(), wv_ru_start(),
- * wv_82593_config() & wv_diag())
- */
-static int
-wv_82593_cmd(struct net_device * dev,
- char * str,
- int cmd,
- int result)
-{
- unsigned int base = dev->base_addr;
- int status;
- int wait_completed;
- long spin;
-
- /* Spin until the chip finishes executing its current command (if any) */
- spin = 1000;
- do
- {
- /* Time calibration of the loop */
- udelay(10);
-
- /* Read the interrupt register */
- outb(OP0_NOP | CR0_STATUS_3, LCCR(base));
- status = inb(LCSR(base));
- }
- while(((status & SR3_EXEC_STATE_MASK) != SR3_EXEC_IDLE) && (spin-- > 0));
-
- /* If the interrupt hasn't been posted */
- if (spin < 0) {
-#ifdef DEBUG_INTERRUPT_ERROR
- printk(KERN_INFO "wv_82593_cmd: %s timeout (previous command), status 0x%02x\n",
- str, status);
-#endif
- return(FALSE);
- }
-
- /* Issue the command to the controller */
- outb(cmd, LCCR(base));
-
- /* If we don't have to check the result of the command
- * Note : this mean that the irq handler will deal with that */
- if(result == SR0_NO_RESULT)
- return(TRUE);
-
- /* We are waiting for command completion */
- wait_completed = TRUE;
-
- /* Busy wait while the LAN controller executes the command. */
- spin = 1000;
- do
- {
- /* Time calibration of the loop */
- udelay(10);
-
- /* Read the interrupt register */
- outb(CR0_STATUS_0 | OP0_NOP, LCCR(base));
- status = inb(LCSR(base));
-
- /* Check if there was an interrupt posted */
- if((status & SR0_INTERRUPT))
- {
- /* Acknowledge the interrupt */
- outb(CR0_INT_ACK | OP0_NOP, LCCR(base));
-
- /* Check if interrupt is a command completion */
- if(((status & SR0_BOTH_RX_TX) != SR0_BOTH_RX_TX) &&
- ((status & SR0_BOTH_RX_TX) != 0x0) &&
- !(status & SR0_RECEPTION))
- {
- /* Signal command completion */
- wait_completed = FALSE;
- }
- else
- {
- /* Note : Rx interrupts will be handled later, because we can
- * handle multiple Rx packets at once */
-#ifdef DEBUG_INTERRUPT_INFO
- printk(KERN_INFO "wv_82593_cmd: not our interrupt\n");
-#endif
- }
- }
- }
- while(wait_completed && (spin-- > 0));
-
- /* If the interrupt hasn't be posted */
- if(wait_completed)
- {
-#ifdef DEBUG_INTERRUPT_ERROR
- printk(KERN_INFO "wv_82593_cmd: %s timeout, status 0x%02x\n",
- str, status);
-#endif
- return(FALSE);
- }
-
- /* Check the return code returned by the card (see above) against
- * the expected return code provided by the caller */
- if((status & SR0_EVENT_MASK) != result)
- {
-#ifdef DEBUG_INTERRUPT_ERROR
- printk(KERN_INFO "wv_82593_cmd: %s failed, status = 0x%x\n",
- str, status);
-#endif
- return(FALSE);
- }
-
- return(TRUE);
-} /* wv_82593_cmd */
-
-/*------------------------------------------------------------------*/
-/*
- * This routine does a 593 op-code number 7, and obtains the diagnose
- * status for the WaveLAN.
- */
-static inline int
-wv_diag(struct net_device * dev)
-{
- return(wv_82593_cmd(dev, "wv_diag(): diagnose",
- OP0_DIAGNOSE, SR0_DIAGNOSE_PASSED));
-} /* wv_diag */
-
-/*------------------------------------------------------------------*/
-/*
- * Routine to read len bytes from the i82593's ring buffer, starting at
- * chip address addr. The results read from the chip are stored in buf.
- * The return value is the address to use for next the call.
- */
-static int
-read_ringbuf(struct net_device * dev,
- int addr,
- char * buf,
- int len)
-{
- unsigned int base = dev->base_addr;
- int ring_ptr = addr;
- int chunk_len;
- char * buf_ptr = buf;
-
- /* Get all the buffer */
- while(len > 0)
- {
- /* Position the Program I/O Register at the ring buffer pointer */
- outb(ring_ptr & 0xff, PIORL(base));
- outb(((ring_ptr >> 8) & PIORH_MASK), PIORH(base));
-
- /* First, determine how much we can read without wrapping around the
- ring buffer */
- if((addr + len) < (RX_BASE + RX_SIZE))
- chunk_len = len;
- else
- chunk_len = RX_BASE + RX_SIZE - addr;
- insb(PIOP(base), buf_ptr, chunk_len);
- buf_ptr += chunk_len;
- len -= chunk_len;
- ring_ptr = (ring_ptr - RX_BASE + chunk_len) % RX_SIZE + RX_BASE;
- }
- return(ring_ptr);
-} /* read_ringbuf */
-
-/*------------------------------------------------------------------*/
-/*
- * Reconfigure the i82593, or at least ask for it...
- * Because wv_82593_config use the transmission buffer, we must do it
- * when we are sure that there is no transmission, so we do it now
- * or in wavelan_packet_xmit() (I can't find any better place,
- * wavelan_interrupt is not an option...), so you may experience
- * some delay sometime...
- */
-static void
-wv_82593_reconfig(struct net_device * dev)
-{
- net_local * lp = netdev_priv(dev);
- struct pcmcia_device * link = lp->link;
- unsigned long flags;
-
- /* Arm the flag, will be cleard in wv_82593_config() */
- lp->reconfig_82593 = TRUE;
-
- /* Check if we can do it now ! */
- if((link->open) && (netif_running(dev)) && !(netif_queue_stopped(dev)))
- {
- spin_lock_irqsave(&lp->spinlock, flags); /* Disable interrupts */
- wv_82593_config(dev);
- spin_unlock_irqrestore(&lp->spinlock, flags); /* Re-enable interrupts */
- }
- else
- {
-#ifdef DEBUG_IOCTL_INFO
- printk(KERN_DEBUG
- "%s: wv_82593_reconfig(): delayed (state = %lX, link = %d)\n",
- dev->name, dev->state, link->open);
-#endif
- }
-}
-
-/********************* DEBUG & INFO SUBROUTINES *********************/
-/*
- * This routines are used in the code to show debug informations.
- * Most of the time, it dump the content of hardware structures...
- */
-
-#ifdef DEBUG_PSA_SHOW
-/*------------------------------------------------------------------*/
-/*
- * Print the formatted contents of the Parameter Storage Area.
- */
-static void
-wv_psa_show(psa_t * p)
-{
- printk(KERN_DEBUG "##### wavelan psa contents: #####\n");
- printk(KERN_DEBUG "psa_io_base_addr_1: 0x%02X %02X %02X %02X\n",
- p->psa_io_base_addr_1,
- p->psa_io_base_addr_2,
- p->psa_io_base_addr_3,
- p->psa_io_base_addr_4);
- printk(KERN_DEBUG "psa_rem_boot_addr_1: 0x%02X %02X %02X\n",
- p->psa_rem_boot_addr_1,
- p->psa_rem_boot_addr_2,
- p->psa_rem_boot_addr_3);
- printk(KERN_DEBUG "psa_holi_params: 0x%02x, ", p->psa_holi_params);
- printk("psa_int_req_no: %d\n", p->psa_int_req_no);
-#ifdef DEBUG_SHOW_UNUSED
- printk(KERN_DEBUG "psa_unused0[]: %pM\n", p->psa_unused0);
-#endif /* DEBUG_SHOW_UNUSED */
- printk(KERN_DEBUG "psa_univ_mac_addr[]: %pM\n", p->psa_univ_mac_addr);
- printk(KERN_DEBUG "psa_local_mac_addr[]: %pM\n", p->psa_local_mac_addr);
- printk(KERN_DEBUG "psa_univ_local_sel: %d, ", p->psa_univ_local_sel);
- printk("psa_comp_number: %d, ", p->psa_comp_number);
- printk("psa_thr_pre_set: 0x%02x\n", p->psa_thr_pre_set);
- printk(KERN_DEBUG "psa_feature_select/decay_prm: 0x%02x, ",
- p->psa_feature_select);
- printk("psa_subband/decay_update_prm: %d\n", p->psa_subband);
- printk(KERN_DEBUG "psa_quality_thr: 0x%02x, ", p->psa_quality_thr);
- printk("psa_mod_delay: 0x%02x\n", p->psa_mod_delay);
- printk(KERN_DEBUG "psa_nwid: 0x%02x%02x, ", p->psa_nwid[0], p->psa_nwid[1]);
- printk("psa_nwid_select: %d\n", p->psa_nwid_select);
- printk(KERN_DEBUG "psa_encryption_select: %d, ", p->psa_encryption_select);
- printk("psa_encryption_key[]: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
- p->psa_encryption_key[0],
- p->psa_encryption_key[1],
- p->psa_encryption_key[2],
- p->psa_encryption_key[3],
- p->psa_encryption_key[4],
- p->psa_encryption_key[5],
- p->psa_encryption_key[6],
- p->psa_encryption_key[7]);
- printk(KERN_DEBUG "psa_databus_width: %d\n", p->psa_databus_width);
- printk(KERN_DEBUG "psa_call_code/auto_squelch: 0x%02x, ",
- p->psa_call_code[0]);
- printk("psa_call_code[]: %02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
- p->psa_call_code[0],
- p->psa_call_code[1],
- p->psa_call_code[2],
- p->psa_call_code[3],
- p->psa_call_code[4],
- p->psa_call_code[5],
- p->psa_call_code[6],
- p->psa_call_code[7]);
-#ifdef DEBUG_SHOW_UNUSED
- printk(KERN_DEBUG "psa_reserved[]: %02X:%02X\n",
- p->psa_reserved[0],
- p->psa_reserved[1]);
-#endif /* DEBUG_SHOW_UNUSED */
- printk(KERN_DEBUG "psa_conf_status: %d, ", p->psa_conf_status);
- printk("psa_crc: 0x%02x%02x, ", p->psa_crc[0], p->psa_crc[1]);
- printk("psa_crc_status: 0x%02x\n", p->psa_crc_status);
-} /* wv_psa_show */
-#endif /* DEBUG_PSA_SHOW */
-
-#ifdef DEBUG_MMC_SHOW
-/*------------------------------------------------------------------*/
-/*
- * Print the formatted status of the Modem Management Controller.
- * This function need to be completed...
- */
-static void
-wv_mmc_show(struct net_device * dev)
-{
- unsigned int base = dev->base_addr;
- net_local * lp = netdev_priv(dev);
- mmr_t m;
-
- /* Basic check */
- if(hasr_read(base) & HASR_NO_CLK)
- {
- printk(KERN_WARNING "%s: wv_mmc_show: modem not connected\n",
- dev->name);
- return;
- }
-
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* Read the mmc */
- mmc_out(base, mmwoff(0, mmw_freeze), 1);
- mmc_read(base, 0, (u_char *)&m, sizeof(m));
- mmc_out(base, mmwoff(0, mmw_freeze), 0);
-
- /* Don't forget to update statistics */
- lp->wstats.discard.nwid += (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l;
-
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- printk(KERN_DEBUG "##### wavelan modem status registers: #####\n");
-#ifdef DEBUG_SHOW_UNUSED
- printk(KERN_DEBUG "mmc_unused0[]: %02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
- m.mmr_unused0[0],
- m.mmr_unused0[1],
- m.mmr_unused0[2],
- m.mmr_unused0[3],
- m.mmr_unused0[4],
- m.mmr_unused0[5],
- m.mmr_unused0[6],
- m.mmr_unused0[7]);
-#endif /* DEBUG_SHOW_UNUSED */
- printk(KERN_DEBUG "Encryption algorithm: %02X - Status: %02X\n",
- m.mmr_des_avail, m.mmr_des_status);
-#ifdef DEBUG_SHOW_UNUSED
- printk(KERN_DEBUG "mmc_unused1[]: %02X:%02X:%02X:%02X:%02X\n",
- m.mmr_unused1[0],
- m.mmr_unused1[1],
- m.mmr_unused1[2],
- m.mmr_unused1[3],
- m.mmr_unused1[4]);
-#endif /* DEBUG_SHOW_UNUSED */
- printk(KERN_DEBUG "dce_status: 0x%x [%s%s%s%s]\n",
- m.mmr_dce_status,
- (m.mmr_dce_status & MMR_DCE_STATUS_RX_BUSY) ? "energy detected,":"",
- (m.mmr_dce_status & MMR_DCE_STATUS_LOOPT_IND) ?
- "loop test indicated," : "",
- (m.mmr_dce_status & MMR_DCE_STATUS_TX_BUSY) ? "transmitter on," : "",
- (m.mmr_dce_status & MMR_DCE_STATUS_JBR_EXPIRED) ?
- "jabber timer expired," : "");
- printk(KERN_DEBUG "Dsp ID: %02X\n",
- m.mmr_dsp_id);
-#ifdef DEBUG_SHOW_UNUSED
- printk(KERN_DEBUG "mmc_unused2[]: %02X:%02X\n",
- m.mmr_unused2[0],
- m.mmr_unused2[1]);
-#endif /* DEBUG_SHOW_UNUSED */
- printk(KERN_DEBUG "# correct_nwid: %d, # wrong_nwid: %d\n",
- (m.mmr_correct_nwid_h << 8) | m.mmr_correct_nwid_l,
- (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l);
- printk(KERN_DEBUG "thr_pre_set: 0x%x [current signal %s]\n",
- m.mmr_thr_pre_set & MMR_THR_PRE_SET,
- (m.mmr_thr_pre_set & MMR_THR_PRE_SET_CUR) ? "above" : "below");
- printk(KERN_DEBUG "signal_lvl: %d [%s], ",
- m.mmr_signal_lvl & MMR_SIGNAL_LVL,
- (m.mmr_signal_lvl & MMR_SIGNAL_LVL_VALID) ? "new msg" : "no new msg");
- printk("silence_lvl: %d [%s], ", m.mmr_silence_lvl & MMR_SILENCE_LVL,
- (m.mmr_silence_lvl & MMR_SILENCE_LVL_VALID) ? "update done" : "no new update");
- printk("sgnl_qual: 0x%x [%s]\n", m.mmr_sgnl_qual & MMR_SGNL_QUAL,
- (m.mmr_sgnl_qual & MMR_SGNL_QUAL_ANT) ? "Antenna 1" : "Antenna 0");
-#ifdef DEBUG_SHOW_UNUSED
- printk(KERN_DEBUG "netw_id_l: %x\n", m.mmr_netw_id_l);
-#endif /* DEBUG_SHOW_UNUSED */
-} /* wv_mmc_show */
-#endif /* DEBUG_MMC_SHOW */
-
-#ifdef DEBUG_I82593_SHOW
-/*------------------------------------------------------------------*/
-/*
- * Print the formatted status of the i82593's receive unit.
- */
-static void
-wv_ru_show(struct net_device * dev)
-{
- net_local *lp = netdev_priv(dev);
-
- printk(KERN_DEBUG "##### wavelan i82593 receiver status: #####\n");
- printk(KERN_DEBUG "ru: rfp %d stop %d", lp->rfp, lp->stop);
- /*
- * Not implemented yet...
- */
- printk("\n");
-} /* wv_ru_show */
-#endif /* DEBUG_I82593_SHOW */
-
-#ifdef DEBUG_DEVICE_SHOW
-/*------------------------------------------------------------------*/
-/*
- * Print the formatted status of the WaveLAN PCMCIA device driver.
- */
-static void
-wv_dev_show(struct net_device * dev)
-{
- printk(KERN_DEBUG "dev:");
- printk(" state=%lX,", dev->state);
- printk(" trans_start=%ld,", dev->trans_start);
- printk(" flags=0x%x,", dev->flags);
- printk("\n");
-} /* wv_dev_show */
-
-/*------------------------------------------------------------------*/
-/*
- * Print the formatted status of the WaveLAN PCMCIA device driver's
- * private information.
- */
-static void
-wv_local_show(struct net_device * dev)
-{
- net_local *lp = netdev_priv(dev);
-
- printk(KERN_DEBUG "local:");
- /*
- * Not implemented yet...
- */
- printk("\n");
-} /* wv_local_show */
-#endif /* DEBUG_DEVICE_SHOW */
-
-#if defined(DEBUG_RX_INFO) || defined(DEBUG_TX_INFO)
-/*------------------------------------------------------------------*/
-/*
- * Dump packet header (and content if necessary) on the screen
- */
-static void
-wv_packet_info(u_char * p, /* Packet to dump */
- int length, /* Length of the packet */
- char * msg1, /* Name of the device */
- char * msg2) /* Name of the function */
-{
- int i;
- int maxi;
-
- printk(KERN_DEBUG "%s: %s(): dest %pM, length %d\n",
- msg1, msg2, p, length);
- printk(KERN_DEBUG "%s: %s(): src %pM, type 0x%02X%02X\n",
- msg1, msg2, &p[6], p[12], p[13]);
-
-#ifdef DEBUG_PACKET_DUMP
-
- printk(KERN_DEBUG "data=\"");
-
- if((maxi = length) > DEBUG_PACKET_DUMP)
- maxi = DEBUG_PACKET_DUMP;
- for(i = 14; i < maxi; i++)
- if(p[i] >= ' ' && p[i] <= '~')
- printk(" %c", p[i]);
- else
- printk("%02X", p[i]);
- if(maxi < length)
- printk("..");
- printk("\"\n");
- printk(KERN_DEBUG "\n");
-#endif /* DEBUG_PACKET_DUMP */
-}
-#endif /* defined(DEBUG_RX_INFO) || defined(DEBUG_TX_INFO) */
-
-/*------------------------------------------------------------------*/
-/*
- * This is the information which is displayed by the driver at startup
- * There is a lot of flag to configure it at your will...
- */
-static void
-wv_init_info(struct net_device * dev)
-{
- unsigned int base = dev->base_addr;
- psa_t psa;
-
- /* Read the parameter storage area */
- psa_read(dev, 0, (unsigned char *) &psa, sizeof(psa));
-
-#ifdef DEBUG_PSA_SHOW
- wv_psa_show(&psa);
-#endif
-#ifdef DEBUG_MMC_SHOW
- wv_mmc_show(dev);
-#endif
-#ifdef DEBUG_I82593_SHOW
- wv_ru_show(dev);
-#endif
-
-#ifdef DEBUG_BASIC_SHOW
- /* Now, let's go for the basic stuff */
- printk(KERN_NOTICE "%s: WaveLAN: port %#x, irq %d, hw_addr %pM",
- dev->name, base, dev->irq, dev->dev_addr);
-
- /* Print current network id */
- if(psa.psa_nwid_select)
- printk(", nwid 0x%02X-%02X", psa.psa_nwid[0], psa.psa_nwid[1]);
- else
- printk(", nwid off");
-
- /* If 2.00 card */
- if(!(mmc_in(base, mmroff(0, mmr_fee_status)) &
- (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY)))
- {
- unsigned short freq;
-
- /* Ask the EEprom to read the frequency from the first area */
- fee_read(base, 0x00 /* 1st area - frequency... */,
- &freq, 1);
-
- /* Print frequency */
- printk(", 2.00, %ld", (freq >> 6) + 2400L);
-
- /* Hack !!! */
- if(freq & 0x20)
- printk(".5");
- }
- else
- {
- printk(", PCMCIA, ");
- switch (psa.psa_subband)
- {
- case PSA_SUBBAND_915:
- printk("915");
- break;
- case PSA_SUBBAND_2425:
- printk("2425");
- break;
- case PSA_SUBBAND_2460:
- printk("2460");
- break;
- case PSA_SUBBAND_2484:
- printk("2484");
- break;
- case PSA_SUBBAND_2430_5:
- printk("2430.5");
- break;
- default:
- printk("unknown");
- }
- }
-
- printk(" MHz\n");
-#endif /* DEBUG_BASIC_SHOW */
-
-#ifdef DEBUG_VERSION_SHOW
- /* Print version information */
- printk(KERN_NOTICE "%s", version);
-#endif
-} /* wv_init_info */
-
-/********************* IOCTL, STATS & RECONFIG *********************/
-/*
- * We found here routines that are called by Linux on differents
- * occasions after the configuration and not for transmitting data
- * These may be called when the user use ifconfig, /proc/net/dev
- * or wireless extensions
- */
-
-
-/*------------------------------------------------------------------*/
-/*
- * Set or clear the multicast filter for this adaptor.
- * num_addrs == -1 Promiscuous mode, receive all packets
- * num_addrs == 0 Normal mode, clear multicast list
- * num_addrs > 0 Multicast mode, receive normal and MC packets,
- * and do best-effort filtering.
- */
-
-static void
-wavelan_set_multicast_list(struct net_device * dev)
-{
- net_local * lp = netdev_priv(dev);
-
-#ifdef DEBUG_IOCTL_TRACE
- printk(KERN_DEBUG "%s: ->wavelan_set_multicast_list()\n", dev->name);
-#endif
-
-#ifdef DEBUG_IOCTL_INFO
- printk(KERN_DEBUG "%s: wavelan_set_multicast_list(): setting Rx mode %02X to %d addresses.\n",
- dev->name, dev->flags, netdev_mc_count(dev));
-#endif
-
- if(dev->flags & IFF_PROMISC)
- {
- /*
- * Enable promiscuous mode: receive all packets.
- */
- if(!lp->promiscuous)
- {
- lp->promiscuous = 1;
- lp->allmulticast = 0;
- lp->mc_count = 0;
-
- wv_82593_reconfig(dev);
- }
- }
- else
- /* If all multicast addresses
- * or too much multicast addresses for the hardware filter */
- if((dev->flags & IFF_ALLMULTI) ||
- (netdev_mc_count(dev) > I82593_MAX_MULTICAST_ADDRESSES))
- {
- /*
- * Disable promiscuous mode, but active the all multicast mode
- */
- if(!lp->allmulticast)
- {
- lp->promiscuous = 0;
- lp->allmulticast = 1;
- lp->mc_count = 0;
-
- wv_82593_reconfig(dev);
- }
- }
- else
- /* If there is some multicast addresses to send */
- if (!netdev_mc_empty(dev)) {
- /*
- * Disable promiscuous mode, but receive all packets
- * in multicast list
- */
-#ifdef MULTICAST_AVOID
- if(lp->promiscuous || lp->allmulticast ||
- (netdev_mc_count(dev) != lp->mc_count))
-#endif
- {
- lp->promiscuous = 0;
- lp->allmulticast = 0;
- lp->mc_count = netdev_mc_count(dev);
-
- wv_82593_reconfig(dev);
- }
- }
- else
- {
- /*
- * Switch to normal mode: disable promiscuous mode and
- * clear the multicast list.
- */
- if(lp->promiscuous || lp->mc_count == 0)
- {
- lp->promiscuous = 0;
- lp->allmulticast = 0;
- lp->mc_count = 0;
-
- wv_82593_reconfig(dev);
- }
- }
-#ifdef DEBUG_IOCTL_TRACE
- printk(KERN_DEBUG "%s: <-wavelan_set_multicast_list()\n", dev->name);
-#endif
-}
-
-/*------------------------------------------------------------------*/
-/*
- * This function doesn't exist...
- * (Note : it was a nice way to test the reconfigure stuff...)
- */
-#ifdef SET_MAC_ADDRESS
-static int
-wavelan_set_mac_address(struct net_device * dev,
- void * addr)
-{
- struct sockaddr * mac = addr;
-
- /* Copy the address */
- memcpy(dev->dev_addr, mac->sa_data, WAVELAN_ADDR_SIZE);
-
- /* Reconfig the beast */
- wv_82593_reconfig(dev);
-
- return 0;
-}
-#endif /* SET_MAC_ADDRESS */
-
-
-/*------------------------------------------------------------------*/
-/*
- * Frequency setting (for hardware able of it)
- * It's a bit complicated and you don't really want to look into it...
- */
-static int
-wv_set_frequency(u_long base, /* i/o port of the card */
- iw_freq * frequency)
-{
- const int BAND_NUM = 10; /* Number of bands */
- long freq = 0L; /* offset to 2.4 GHz in .5 MHz */
-#ifdef DEBUG_IOCTL_INFO
- int i;
-#endif
-
- /* Setting by frequency */
- /* Theoritically, you may set any frequency between
- * the two limits with a 0.5 MHz precision. In practice,
- * I don't want you to have trouble with local
- * regulations... */
- if((frequency->e == 1) &&
- (frequency->m >= (int) 2.412e8) && (frequency->m <= (int) 2.487e8))
- {
- freq = ((frequency->m / 10000) - 24000L) / 5;
- }
-
- /* Setting by channel (same as wfreqsel) */
- /* Warning : each channel is 22MHz wide, so some of the channels
- * will interfere... */
- if((frequency->e == 0) &&
- (frequency->m >= 0) && (frequency->m < BAND_NUM))
- {
- /* Get frequency offset. */
- freq = channel_bands[frequency->m] >> 1;
- }
-
- /* Verify if the frequency is allowed */
- if(freq != 0L)
- {
- u_short table[10]; /* Authorized frequency table */
-
- /* Read the frequency table */
- fee_read(base, 0x71 /* frequency table */,
- table, 10);
-
-#ifdef DEBUG_IOCTL_INFO
- printk(KERN_DEBUG "Frequency table :");
- for(i = 0; i < 10; i++)
- {
- printk(" %04X",
- table[i]);
- }
- printk("\n");
-#endif
-
- /* Look in the table if the frequency is allowed */
- if(!(table[9 - ((freq - 24) / 16)] &
- (1 << ((freq - 24) % 16))))
- return -EINVAL; /* not allowed */
- }
- else
- return -EINVAL;
-
- /* If we get a usable frequency */
- if(freq != 0L)
- {
- unsigned short area[16];
- unsigned short dac[2];
- unsigned short area_verify[16];
- unsigned short dac_verify[2];
- /* Corresponding gain (in the power adjust value table)
- * see AT&T Wavelan Data Manual, REF 407-024689/E, page 3-8
- * & WCIN062D.DOC, page 6.2.9 */
- unsigned short power_limit[] = { 40, 80, 120, 160, 0 };
- int power_band = 0; /* Selected band */
- unsigned short power_adjust; /* Correct value */
-
- /* Search for the gain */
- power_band = 0;
- while((freq > power_limit[power_band]) &&
- (power_limit[++power_band] != 0))
- ;
-
- /* Read the first area */
- fee_read(base, 0x00,
- area, 16);
-
- /* Read the DAC */
- fee_read(base, 0x60,
- dac, 2);
-
- /* Read the new power adjust value */
- fee_read(base, 0x6B - (power_band >> 1),
- &power_adjust, 1);
- if(power_band & 0x1)
- power_adjust >>= 8;
- else
- power_adjust &= 0xFF;
-
-#ifdef DEBUG_IOCTL_INFO
- printk(KERN_DEBUG "Wavelan EEprom Area 1 :");
- for(i = 0; i < 16; i++)
- {
- printk(" %04X",
- area[i]);
- }
- printk("\n");
-
- printk(KERN_DEBUG "Wavelan EEprom DAC : %04X %04X\n",
- dac[0], dac[1]);
-#endif
-
- /* Frequency offset (for info only...) */
- area[0] = ((freq << 5) & 0xFFE0) | (area[0] & 0x1F);
-
- /* Receiver Principle main divider coefficient */
- area[3] = (freq >> 1) + 2400L - 352L;
- area[2] = ((freq & 0x1) << 4) | (area[2] & 0xFFEF);
-
- /* Transmitter Main divider coefficient */
- area[13] = (freq >> 1) + 2400L;
- area[12] = ((freq & 0x1) << 4) | (area[2] & 0xFFEF);
-
- /* Others part of the area are flags, bit streams or unused... */
-
- /* Set the value in the DAC */
- dac[1] = ((power_adjust >> 1) & 0x7F) | (dac[1] & 0xFF80);
- dac[0] = ((power_adjust & 0x1) << 4) | (dac[0] & 0xFFEF);
-
- /* Write the first area */
- fee_write(base, 0x00,
- area, 16);
-
- /* Write the DAC */
- fee_write(base, 0x60,
- dac, 2);
-
- /* We now should verify here that the EEprom writing was ok */
-
- /* ReRead the first area */
- fee_read(base, 0x00,
- area_verify, 16);
-
- /* ReRead the DAC */
- fee_read(base, 0x60,
- dac_verify, 2);
-
- /* Compare */
- if(memcmp(area, area_verify, 16 * 2) ||
- memcmp(dac, dac_verify, 2 * 2))
- {
-#ifdef DEBUG_IOCTL_ERROR
- printk(KERN_INFO "Wavelan: wv_set_frequency : unable to write new frequency to EEprom (?)\n");
-#endif
- return -EOPNOTSUPP;
- }
-
- /* We must download the frequency parameters to the
- * synthetisers (from the EEprom - area 1)
- * Note : as the EEprom is auto decremented, we set the end
- * if the area... */
- mmc_out(base, mmwoff(0, mmw_fee_addr), 0x0F);
- mmc_out(base, mmwoff(0, mmw_fee_ctrl),
- MMW_FEE_CTRL_READ | MMW_FEE_CTRL_DWLD);
-
- /* Wait until the download is finished */
- fee_wait(base, 100, 100);
-
- /* We must now download the power adjust value (gain) to
- * the synthetisers (from the EEprom - area 7 - DAC) */
- mmc_out(base, mmwoff(0, mmw_fee_addr), 0x61);
- mmc_out(base, mmwoff(0, mmw_fee_ctrl),
- MMW_FEE_CTRL_READ | MMW_FEE_CTRL_DWLD);
-
- /* Wait until the download is finished */
- fee_wait(base, 100, 100);
-
-#ifdef DEBUG_IOCTL_INFO
- /* Verification of what we have done... */
-
- printk(KERN_DEBUG "Wavelan EEprom Area 1 :");
- for(i = 0; i < 16; i++)
- {
- printk(" %04X",
- area_verify[i]);
- }
- printk("\n");
-
- printk(KERN_DEBUG "Wavelan EEprom DAC : %04X %04X\n",
- dac_verify[0], dac_verify[1]);
-#endif
-
- return 0;
- }
- else
- return -EINVAL; /* Bah, never get there... */
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Give the list of available frequencies
- */
-static int
-wv_frequency_list(u_long base, /* i/o port of the card */
- iw_freq * list, /* List of frequency to fill */
- int max) /* Maximum number of frequencies */
-{
- u_short table[10]; /* Authorized frequency table */
- long freq = 0L; /* offset to 2.4 GHz in .5 MHz + 12 MHz */
- int i; /* index in the table */
- const int BAND_NUM = 10; /* Number of bands */
- int c = 0; /* Channel number */
-
- /* Read the frequency table */
- fee_read(base, 0x71 /* frequency table */,
- table, 10);
-
- /* Look all frequencies */
- i = 0;
- for(freq = 0; freq < 150; freq++)
- /* Look in the table if the frequency is allowed */
- if(table[9 - (freq / 16)] & (1 << (freq % 16)))
- {
- /* Compute approximate channel number */
- while((((channel_bands[c] >> 1) - 24) < freq) &&
- (c < BAND_NUM))
- c++;
- list[i].i = c; /* Set the list index */
-
- /* put in the list */
- list[i].m = (((freq + 24) * 5) + 24000L) * 10000;
- list[i++].e = 1;
-
- /* Check number */
- if(i >= max)
- return(i);
- }
-
- return(i);
-}
-
-#ifdef IW_WIRELESS_SPY
-/*------------------------------------------------------------------*/
-/*
- * Gather wireless spy statistics : for each packet, compare the source
- * address with out list, and if match, get the stats...
- * Sorry, but this function really need wireless extensions...
- */
-static inline void
-wl_spy_gather(struct net_device * dev,
- u_char * mac, /* MAC address */
- u_char * stats) /* Statistics to gather */
-{
- struct iw_quality wstats;
-
- wstats.qual = stats[2] & MMR_SGNL_QUAL;
- wstats.level = stats[0] & MMR_SIGNAL_LVL;
- wstats.noise = stats[1] & MMR_SILENCE_LVL;
- wstats.updated = 0x7;
-
- /* Update spy records */
- wireless_spy_update(dev, mac, &wstats);
-}
-#endif /* IW_WIRELESS_SPY */
-
-#ifdef HISTOGRAM
-/*------------------------------------------------------------------*/
-/*
- * This function calculate an histogram on the signal level.
- * As the noise is quite constant, it's like doing it on the SNR.
- * We have defined a set of interval (lp->his_range), and each time
- * the level goes in that interval, we increment the count (lp->his_sum).
- * With this histogram you may detect if one wavelan is really weak,
- * or you may also calculate the mean and standard deviation of the level...
- */
-static inline void
-wl_his_gather(struct net_device * dev,
- u_char * stats) /* Statistics to gather */
-{
- net_local * lp = netdev_priv(dev);
- u_char level = stats[0] & MMR_SIGNAL_LVL;
- int i;
-
- /* Find the correct interval */
- i = 0;
- while((i < (lp->his_number - 1)) && (level >= lp->his_range[i++]))
- ;
-
- /* Increment interval counter */
- (lp->his_sum[i])++;
-}
-#endif /* HISTOGRAM */
-
-static void wl_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-{
- strncpy(info->driver, "wavelan_cs", sizeof(info->driver)-1);
-}
-
-static const struct ethtool_ops ops = {
- .get_drvinfo = wl_get_drvinfo
-};
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get protocol name
- */
-static int wavelan_get_name(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- strcpy(wrqu->name, "WaveLAN");
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set NWID
- */
-static int wavelan_set_nwid(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- unsigned int base = dev->base_addr;
- net_local *lp = netdev_priv(dev);
- psa_t psa;
- mm_t m;
- unsigned long flags;
- int ret = 0;
-
- /* Disable interrupts and save flags. */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* Set NWID in WaveLAN. */
- if (!wrqu->nwid.disabled) {
- /* Set NWID in psa */
- psa.psa_nwid[0] = (wrqu->nwid.value & 0xFF00) >> 8;
- psa.psa_nwid[1] = wrqu->nwid.value & 0xFF;
- psa.psa_nwid_select = 0x01;
- psa_write(dev,
- (char *) psa.psa_nwid - (char *) &psa,
- (unsigned char *) psa.psa_nwid, 3);
-
- /* Set NWID in mmc. */
- m.w.mmw_netw_id_l = psa.psa_nwid[1];
- m.w.mmw_netw_id_h = psa.psa_nwid[0];
- mmc_write(base,
- (char *) &m.w.mmw_netw_id_l -
- (char *) &m,
- (unsigned char *) &m.w.mmw_netw_id_l, 2);
- mmc_out(base, mmwoff(0, mmw_loopt_sel), 0x00);
- } else {
- /* Disable NWID in the psa. */
- psa.psa_nwid_select = 0x00;
- psa_write(dev,
- (char *) &psa.psa_nwid_select -
- (char *) &psa,
- (unsigned char *) &psa.psa_nwid_select,
- 1);
-
- /* Disable NWID in the mmc (no filtering). */
- mmc_out(base, mmwoff(0, mmw_loopt_sel),
- MMW_LOOPT_SEL_DIS_NWID);
- }
- /* update the Wavelan checksum */
- update_psa_checksum(dev);
-
- /* Enable interrupts and restore flags. */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- return ret;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get NWID
- */
-static int wavelan_get_nwid(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- net_local *lp = netdev_priv(dev);
- psa_t psa;
- unsigned long flags;
- int ret = 0;
-
- /* Disable interrupts and save flags. */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* Read the NWID. */
- psa_read(dev,
- (char *) psa.psa_nwid - (char *) &psa,
- (unsigned char *) psa.psa_nwid, 3);
- wrqu->nwid.value = (psa.psa_nwid[0] << 8) + psa.psa_nwid[1];
- wrqu->nwid.disabled = !(psa.psa_nwid_select);
- wrqu->nwid.fixed = 1; /* Superfluous */
-
- /* Enable interrupts and restore flags. */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- return ret;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set frequency
- */
-static int wavelan_set_freq(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- unsigned int base = dev->base_addr;
- net_local *lp = netdev_priv(dev);
- unsigned long flags;
- int ret;
-
- /* Disable interrupts and save flags. */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* Attempt to recognise 2.00 cards (2.4 GHz frequency selectable). */
- if (!(mmc_in(base, mmroff(0, mmr_fee_status)) &
- (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY)))
- ret = wv_set_frequency(base, &(wrqu->freq));
- else
- ret = -EOPNOTSUPP;
-
- /* Enable interrupts and restore flags. */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- return ret;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get frequency
- */
-static int wavelan_get_freq(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- unsigned int base = dev->base_addr;
- net_local *lp = netdev_priv(dev);
- psa_t psa;
- unsigned long flags;
- int ret = 0;
-
- /* Disable interrupts and save flags. */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* Attempt to recognise 2.00 cards (2.4 GHz frequency selectable).
- * Does it work for everybody, especially old cards? */
- if (!(mmc_in(base, mmroff(0, mmr_fee_status)) &
- (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY))) {
- unsigned short freq;
-
- /* Ask the EEPROM to read the frequency from the first area. */
- fee_read(base, 0x00, &freq, 1);
- wrqu->freq.m = ((freq >> 5) * 5 + 24000L) * 10000;
- wrqu->freq.e = 1;
- } else {
- psa_read(dev,
- (char *) &psa.psa_subband - (char *) &psa,
- (unsigned char *) &psa.psa_subband, 1);
-
- if (psa.psa_subband <= 4) {
- wrqu->freq.m = fixed_bands[psa.psa_subband];
- wrqu->freq.e = (psa.psa_subband != 0);
- } else
- ret = -EOPNOTSUPP;
- }
-
- /* Enable interrupts and restore flags. */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- return ret;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set level threshold
- */
-static int wavelan_set_sens(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- unsigned int base = dev->base_addr;
- net_local *lp = netdev_priv(dev);
- psa_t psa;
- unsigned long flags;
- int ret = 0;
-
- /* Disable interrupts and save flags. */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* Set the level threshold. */
- /* We should complain loudly if wrqu->sens.fixed = 0, because we
- * can't set auto mode... */
- psa.psa_thr_pre_set = wrqu->sens.value & 0x3F;
- psa_write(dev,
- (char *) &psa.psa_thr_pre_set - (char *) &psa,
- (unsigned char *) &psa.psa_thr_pre_set, 1);
- /* update the Wavelan checksum */
- update_psa_checksum(dev);
- mmc_out(base, mmwoff(0, mmw_thr_pre_set),
- psa.psa_thr_pre_set);
-
- /* Enable interrupts and restore flags. */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- return ret;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get level threshold
- */
-static int wavelan_get_sens(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- net_local *lp = netdev_priv(dev);
- psa_t psa;
- unsigned long flags;
- int ret = 0;
-
- /* Disable interrupts and save flags. */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* Read the level threshold. */
- psa_read(dev,
- (char *) &psa.psa_thr_pre_set - (char *) &psa,
- (unsigned char *) &psa.psa_thr_pre_set, 1);
- wrqu->sens.value = psa.psa_thr_pre_set & 0x3F;
- wrqu->sens.fixed = 1;
-
- /* Enable interrupts and restore flags. */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- return ret;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set encryption key
- */
-static int wavelan_set_encode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- unsigned int base = dev->base_addr;
- net_local *lp = netdev_priv(dev);
- unsigned long flags;
- psa_t psa;
- int ret = 0;
-
- /* Disable interrupts and save flags. */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* Check if capable of encryption */
- if (!mmc_encr(base)) {
- ret = -EOPNOTSUPP;
- }
-
- /* Check the size of the key */
- if((wrqu->encoding.length != 8) && (wrqu->encoding.length != 0)) {
- ret = -EINVAL;
- }
-
- if(!ret) {
- /* Basic checking... */
- if (wrqu->encoding.length == 8) {
- /* Copy the key in the driver */
- memcpy(psa.psa_encryption_key, extra,
- wrqu->encoding.length);
- psa.psa_encryption_select = 1;
-
- psa_write(dev,
- (char *) &psa.psa_encryption_select -
- (char *) &psa,
- (unsigned char *) &psa.
- psa_encryption_select, 8 + 1);
-
- mmc_out(base, mmwoff(0, mmw_encr_enable),
- MMW_ENCR_ENABLE_EN | MMW_ENCR_ENABLE_MODE);
- mmc_write(base, mmwoff(0, mmw_encr_key),
- (unsigned char *) &psa.
- psa_encryption_key, 8);
- }
-
- /* disable encryption */
- if (wrqu->encoding.flags & IW_ENCODE_DISABLED) {
- psa.psa_encryption_select = 0;
- psa_write(dev,
- (char *) &psa.psa_encryption_select -
- (char *) &psa,
- (unsigned char *) &psa.
- psa_encryption_select, 1);
-
- mmc_out(base, mmwoff(0, mmw_encr_enable), 0);
- }
- /* update the Wavelan checksum */
- update_psa_checksum(dev);
- }
-
- /* Enable interrupts and restore flags. */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- return ret;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get encryption key
- */
-static int wavelan_get_encode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- unsigned int base = dev->base_addr;
- net_local *lp = netdev_priv(dev);
- psa_t psa;
- unsigned long flags;
- int ret = 0;
-
- /* Disable interrupts and save flags. */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* Check if encryption is available */
- if (!mmc_encr(base)) {
- ret = -EOPNOTSUPP;
- } else {
- /* Read the encryption key */
- psa_read(dev,
- (char *) &psa.psa_encryption_select -
- (char *) &psa,
- (unsigned char *) &psa.
- psa_encryption_select, 1 + 8);
-
- /* encryption is enabled ? */
- if (psa.psa_encryption_select)
- wrqu->encoding.flags = IW_ENCODE_ENABLED;
- else
- wrqu->encoding.flags = IW_ENCODE_DISABLED;
- wrqu->encoding.flags |= mmc_encr(base);
-
- /* Copy the key to the user buffer */
- wrqu->encoding.length = 8;
- memcpy(extra, psa.psa_encryption_key, wrqu->encoding.length);
- }
-
- /* Enable interrupts and restore flags. */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- return ret;
-}
-
-#ifdef WAVELAN_ROAMING_EXT
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set ESSID (domain)
- */
-static int wavelan_set_essid(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- net_local *lp = netdev_priv(dev);
- unsigned long flags;
- int ret = 0;
-
- /* Disable interrupts and save flags. */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* Check if disable */
- if(wrqu->data.flags == 0)
- lp->filter_domains = 0;
- else {
- char essid[IW_ESSID_MAX_SIZE + 1];
- char * endp;
-
- /* Terminate the string */
- memcpy(essid, extra, wrqu->data.length);
- essid[IW_ESSID_MAX_SIZE] = '\0';
-
-#ifdef DEBUG_IOCTL_INFO
- printk(KERN_DEBUG "SetEssid : ``%s''\n", essid);
-#endif /* DEBUG_IOCTL_INFO */
-
- /* Convert to a number (note : Wavelan specific) */
- lp->domain_id = simple_strtoul(essid, &endp, 16);
- /* Has it worked ? */
- if(endp > essid)
- lp->filter_domains = 1;
- else {
- lp->filter_domains = 0;
- ret = -EINVAL;
- }
- }
-
- /* Enable interrupts and restore flags. */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- return ret;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get ESSID (domain)
- */
-static int wavelan_get_essid(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- net_local *lp = netdev_priv(dev);
-
- /* Is the domain ID active ? */
- wrqu->data.flags = lp->filter_domains;
-
- /* Copy Domain ID into a string (Wavelan specific) */
- /* Sound crazy, be we can't have a snprintf in the kernel !!! */
- sprintf(extra, "%lX", lp->domain_id);
- extra[IW_ESSID_MAX_SIZE] = '\0';
-
- /* Set the length */
- wrqu->data.length = strlen(extra);
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set AP address
- */
-static int wavelan_set_wap(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
-#ifdef DEBUG_IOCTL_INFO
- printk(KERN_DEBUG "Set AP to : %pM\n", wrqu->ap_addr.sa_data);
-#endif /* DEBUG_IOCTL_INFO */
-
- return -EOPNOTSUPP;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get AP address
- */
-static int wavelan_get_wap(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- /* Should get the real McCoy instead of own Ethernet address */
- memcpy(wrqu->ap_addr.sa_data, dev->dev_addr, WAVELAN_ADDR_SIZE);
- wrqu->ap_addr.sa_family = ARPHRD_ETHER;
-
- return -EOPNOTSUPP;
-}
-#endif /* WAVELAN_ROAMING_EXT */
-
-#ifdef WAVELAN_ROAMING
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set mode
- */
-static int wavelan_set_mode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- net_local *lp = netdev_priv(dev);
- unsigned long flags;
- int ret = 0;
-
- /* Disable interrupts and save flags. */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* Check mode */
- switch(wrqu->mode) {
- case IW_MODE_ADHOC:
- if(do_roaming) {
- wv_roam_cleanup(dev);
- do_roaming = 0;
- }
- break;
- case IW_MODE_INFRA:
- if(!do_roaming) {
- wv_roam_init(dev);
- do_roaming = 1;
- }
- break;
- default:
- ret = -EINVAL;
- }
-
- /* Enable interrupts and restore flags. */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- return ret;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get mode
- */
-static int wavelan_get_mode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- if(do_roaming)
- wrqu->mode = IW_MODE_INFRA;
- else
- wrqu->mode = IW_MODE_ADHOC;
-
- return 0;
-}
-#endif /* WAVELAN_ROAMING */
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get range info
- */
-static int wavelan_get_range(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- unsigned int base = dev->base_addr;
- net_local *lp = netdev_priv(dev);
- struct iw_range *range = (struct iw_range *) extra;
- unsigned long flags;
- int ret = 0;
-
- /* Set the length (very important for backward compatibility) */
- wrqu->data.length = sizeof(struct iw_range);
-
- /* Set all the info we don't care or don't know about to zero */
- memset(range, 0, sizeof(struct iw_range));
-
- /* Set the Wireless Extension versions */
- range->we_version_compiled = WIRELESS_EXT;
- range->we_version_source = 9;
-
- /* Set information in the range struct. */
- range->throughput = 1.4 * 1000 * 1000; /* don't argue on this ! */
- range->min_nwid = 0x0000;
- range->max_nwid = 0xFFFF;
-
- range->sensitivity = 0x3F;
- range->max_qual.qual = MMR_SGNL_QUAL;
- range->max_qual.level = MMR_SIGNAL_LVL;
- range->max_qual.noise = MMR_SILENCE_LVL;
- range->avg_qual.qual = MMR_SGNL_QUAL; /* Always max */
- /* Need to get better values for those two */
- range->avg_qual.level = 30;
- range->avg_qual.noise = 8;
-
- range->num_bitrates = 1;
- range->bitrate[0] = 2000000; /* 2 Mb/s */
-
- /* Event capability (kernel + driver) */
- range->event_capa[0] = (IW_EVENT_CAPA_MASK(0x8B02) |
- IW_EVENT_CAPA_MASK(0x8B04) |
- IW_EVENT_CAPA_MASK(0x8B06));
- range->event_capa[1] = IW_EVENT_CAPA_K_1;
-
- /* Disable interrupts and save flags. */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* Attempt to recognise 2.00 cards (2.4 GHz frequency selectable). */
- if (!(mmc_in(base, mmroff(0, mmr_fee_status)) &
- (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY))) {
- range->num_channels = 10;
- range->num_frequency = wv_frequency_list(base, range->freq,
- IW_MAX_FREQUENCIES);
- } else
- range->num_channels = range->num_frequency = 0;
-
- /* Encryption supported ? */
- if (mmc_encr(base)) {
- range->encoding_size[0] = 8; /* DES = 64 bits key */
- range->num_encoding_sizes = 1;
- range->max_encoding_tokens = 1; /* Only one key possible */
- } else {
- range->num_encoding_sizes = 0;
- range->max_encoding_tokens = 0;
- }
-
- /* Enable interrupts and restore flags. */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- return ret;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Private Handler : set quality threshold
- */
-static int wavelan_set_qthr(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- unsigned int base = dev->base_addr;
- net_local *lp = netdev_priv(dev);
- psa_t psa;
- unsigned long flags;
-
- /* Disable interrupts and save flags. */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- psa.psa_quality_thr = *(extra) & 0x0F;
- psa_write(dev,
- (char *) &psa.psa_quality_thr - (char *) &psa,
- (unsigned char *) &psa.psa_quality_thr, 1);
- /* update the Wavelan checksum */
- update_psa_checksum(dev);
- mmc_out(base, mmwoff(0, mmw_quality_thr),
- psa.psa_quality_thr);
-
- /* Enable interrupts and restore flags. */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Private Handler : get quality threshold
- */
-static int wavelan_get_qthr(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- net_local *lp = netdev_priv(dev);
- psa_t psa;
- unsigned long flags;
-
- /* Disable interrupts and save flags. */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- psa_read(dev,
- (char *) &psa.psa_quality_thr - (char *) &psa,
- (unsigned char *) &psa.psa_quality_thr, 1);
- *(extra) = psa.psa_quality_thr & 0x0F;
-
- /* Enable interrupts and restore flags. */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- return 0;
-}
-
-#ifdef WAVELAN_ROAMING
-/*------------------------------------------------------------------*/
-/*
- * Wireless Private Handler : set roaming
- */
-static int wavelan_set_roam(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- net_local *lp = netdev_priv(dev);
- unsigned long flags;
-
- /* Disable interrupts and save flags. */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* Note : should check if user == root */
- if(do_roaming && (*extra)==0)
- wv_roam_cleanup(dev);
- else if(do_roaming==0 && (*extra)!=0)
- wv_roam_init(dev);
-
- do_roaming = (*extra);
-
- /* Enable interrupts and restore flags. */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Private Handler : get quality threshold
- */
-static int wavelan_get_roam(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- *(extra) = do_roaming;
-
- return 0;
-}
-#endif /* WAVELAN_ROAMING */
-
-#ifdef HISTOGRAM
-/*------------------------------------------------------------------*/
-/*
- * Wireless Private Handler : set histogram
- */
-static int wavelan_set_histo(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- net_local *lp = netdev_priv(dev);
-
- /* Check the number of intervals. */
- if (wrqu->data.length > 16) {
- return(-E2BIG);
- }
-
- /* Disable histo while we copy the addresses.
- * As we don't disable interrupts, we need to do this */
- lp->his_number = 0;
-
- /* Are there ranges to copy? */
- if (wrqu->data.length > 0) {
- /* Copy interval ranges to the driver */
- memcpy(lp->his_range, extra, wrqu->data.length);
-
- {
- int i;
- printk(KERN_DEBUG "Histo :");
- for(i = 0; i < wrqu->data.length; i++)
- printk(" %d", lp->his_range[i]);
- printk("\n");
- }
-
- /* Reset result structure. */
- memset(lp->his_sum, 0x00, sizeof(long) * 16);
- }
-
- /* Now we can set the number of ranges */
- lp->his_number = wrqu->data.length;
-
- return(0);
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Private Handler : get histogram
- */
-static int wavelan_get_histo(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- net_local *lp = netdev_priv(dev);
-
- /* Set the number of intervals. */
- wrqu->data.length = lp->his_number;
-
- /* Give back the distribution statistics */
- if(lp->his_number > 0)
- memcpy(extra, lp->his_sum, sizeof(long) * lp->his_number);
-
- return(0);
-}
-#endif /* HISTOGRAM */
-
-/*------------------------------------------------------------------*/
-/*
- * Structures to export the Wireless Handlers
- */
-
-static const struct iw_priv_args wavelan_private_args[] = {
-/*{ cmd, set_args, get_args, name } */
- { SIOCSIPQTHR, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, 0, "setqualthr" },
- { SIOCGIPQTHR, 0, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, "getqualthr" },
- { SIOCSIPROAM, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, 0, "setroam" },
- { SIOCGIPROAM, 0, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, "getroam" },
- { SIOCSIPHISTO, IW_PRIV_TYPE_BYTE | 16, 0, "sethisto" },
- { SIOCGIPHISTO, 0, IW_PRIV_TYPE_INT | 16, "gethisto" },
-};
-
-static const iw_handler wavelan_handler[] =
-{
- NULL, /* SIOCSIWNAME */
- wavelan_get_name, /* SIOCGIWNAME */
- wavelan_set_nwid, /* SIOCSIWNWID */
- wavelan_get_nwid, /* SIOCGIWNWID */
- wavelan_set_freq, /* SIOCSIWFREQ */
- wavelan_get_freq, /* SIOCGIWFREQ */
-#ifdef WAVELAN_ROAMING
- wavelan_set_mode, /* SIOCSIWMODE */
- wavelan_get_mode, /* SIOCGIWMODE */
-#else /* WAVELAN_ROAMING */
- NULL, /* SIOCSIWMODE */
- NULL, /* SIOCGIWMODE */
-#endif /* WAVELAN_ROAMING */
- wavelan_set_sens, /* SIOCSIWSENS */
- wavelan_get_sens, /* SIOCGIWSENS */
- NULL, /* SIOCSIWRANGE */
- wavelan_get_range, /* SIOCGIWRANGE */
- NULL, /* SIOCSIWPRIV */
- NULL, /* SIOCGIWPRIV */
- NULL, /* SIOCSIWSTATS */
- NULL, /* SIOCGIWSTATS */
- iw_handler_set_spy, /* SIOCSIWSPY */
- iw_handler_get_spy, /* SIOCGIWSPY */
- iw_handler_set_thrspy, /* SIOCSIWTHRSPY */
- iw_handler_get_thrspy, /* SIOCGIWTHRSPY */
-#ifdef WAVELAN_ROAMING_EXT
- wavelan_set_wap, /* SIOCSIWAP */
- wavelan_get_wap, /* SIOCGIWAP */
- NULL, /* -- hole -- */
- NULL, /* SIOCGIWAPLIST */
- NULL, /* -- hole -- */
- NULL, /* -- hole -- */
- wavelan_set_essid, /* SIOCSIWESSID */
- wavelan_get_essid, /* SIOCGIWESSID */
-#else /* WAVELAN_ROAMING_EXT */
- NULL, /* SIOCSIWAP */
- NULL, /* SIOCGIWAP */
- NULL, /* -- hole -- */
- NULL, /* SIOCGIWAPLIST */
- NULL, /* -- hole -- */
- NULL, /* -- hole -- */
- NULL, /* SIOCSIWESSID */
- NULL, /* SIOCGIWESSID */
-#endif /* WAVELAN_ROAMING_EXT */
- NULL, /* SIOCSIWNICKN */
- NULL, /* SIOCGIWNICKN */
- NULL, /* -- hole -- */
- NULL, /* -- hole -- */
- NULL, /* SIOCSIWRATE */
- NULL, /* SIOCGIWRATE */
- NULL, /* SIOCSIWRTS */
- NULL, /* SIOCGIWRTS */
- NULL, /* SIOCSIWFRAG */
- NULL, /* SIOCGIWFRAG */
- NULL, /* SIOCSIWTXPOW */
- NULL, /* SIOCGIWTXPOW */
- NULL, /* SIOCSIWRETRY */
- NULL, /* SIOCGIWRETRY */
- wavelan_set_encode, /* SIOCSIWENCODE */
- wavelan_get_encode, /* SIOCGIWENCODE */
-};
-
-static const iw_handler wavelan_private_handler[] =
-{
- wavelan_set_qthr, /* SIOCIWFIRSTPRIV */
- wavelan_get_qthr, /* SIOCIWFIRSTPRIV + 1 */
-#ifdef WAVELAN_ROAMING
- wavelan_set_roam, /* SIOCIWFIRSTPRIV + 2 */
- wavelan_get_roam, /* SIOCIWFIRSTPRIV + 3 */
-#else /* WAVELAN_ROAMING */
- NULL, /* SIOCIWFIRSTPRIV + 2 */
- NULL, /* SIOCIWFIRSTPRIV + 3 */
-#endif /* WAVELAN_ROAMING */
-#ifdef HISTOGRAM
- wavelan_set_histo, /* SIOCIWFIRSTPRIV + 4 */
- wavelan_get_histo, /* SIOCIWFIRSTPRIV + 5 */
-#endif /* HISTOGRAM */
-};
-
-static const struct iw_handler_def wavelan_handler_def =
-{
- .num_standard = ARRAY_SIZE(wavelan_handler),
- .num_private = ARRAY_SIZE(wavelan_private_handler),
- .num_private_args = ARRAY_SIZE(wavelan_private_args),
- .standard = wavelan_handler,
- .private = wavelan_private_handler,
- .private_args = wavelan_private_args,
- .get_wireless_stats = wavelan_get_wireless_stats,
-};
-
-/*------------------------------------------------------------------*/
-/*
- * Get wireless statistics
- * Called by /proc/net/wireless...
- */
-static iw_stats *
-wavelan_get_wireless_stats(struct net_device * dev)
-{
- unsigned int base = dev->base_addr;
- net_local * lp = netdev_priv(dev);
- mmr_t m;
- iw_stats * wstats;
- unsigned long flags;
-
-#ifdef DEBUG_IOCTL_TRACE
- printk(KERN_DEBUG "%s: ->wavelan_get_wireless_stats()\n", dev->name);
-#endif
-
- /* Disable interrupts & save flags */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- wstats = &lp->wstats;
-
- /* Get data from the mmc */
- mmc_out(base, mmwoff(0, mmw_freeze), 1);
-
- mmc_read(base, mmroff(0, mmr_dce_status), &m.mmr_dce_status, 1);
- mmc_read(base, mmroff(0, mmr_wrong_nwid_l), &m.mmr_wrong_nwid_l, 2);
- mmc_read(base, mmroff(0, mmr_thr_pre_set), &m.mmr_thr_pre_set, 4);
-
- mmc_out(base, mmwoff(0, mmw_freeze), 0);
-
- /* Copy data to wireless stuff */
- wstats->status = m.mmr_dce_status & MMR_DCE_STATUS;
- wstats->qual.qual = m.mmr_sgnl_qual & MMR_SGNL_QUAL;
- wstats->qual.level = m.mmr_signal_lvl & MMR_SIGNAL_LVL;
- wstats->qual.noise = m.mmr_silence_lvl & MMR_SILENCE_LVL;
- wstats->qual.updated = (((m.mmr_signal_lvl & MMR_SIGNAL_LVL_VALID) >> 7) |
- ((m.mmr_signal_lvl & MMR_SIGNAL_LVL_VALID) >> 6) |
- ((m.mmr_silence_lvl & MMR_SILENCE_LVL_VALID) >> 5));
- wstats->discard.nwid += (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l;
- wstats->discard.code = 0L;
- wstats->discard.misc = 0L;
-
- /* ReEnable interrupts & restore flags */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
-#ifdef DEBUG_IOCTL_TRACE
- printk(KERN_DEBUG "%s: <-wavelan_get_wireless_stats()\n", dev->name);
-#endif
- return &lp->wstats;
-}
-
-/************************* PACKET RECEPTION *************************/
-/*
- * This part deal with receiving the packets.
- * The interrupt handler get an interrupt when a packet has been
- * successfully received and called this part...
- */
-
-/*------------------------------------------------------------------*/
-/*
- * Calculate the starting address of the frame pointed to by the receive
- * frame pointer and verify that the frame seem correct
- * (called by wv_packet_rcv())
- */
-static int
-wv_start_of_frame(struct net_device * dev,
- int rfp, /* end of frame */
- int wrap) /* start of buffer */
-{
- unsigned int base = dev->base_addr;
- int rp;
- int len;
-
- rp = (rfp - 5 + RX_SIZE) % RX_SIZE;
- outb(rp & 0xff, PIORL(base));
- outb(((rp >> 8) & PIORH_MASK), PIORH(base));
- len = inb(PIOP(base));
- len |= inb(PIOP(base)) << 8;
-
- /* Sanity checks on size */
- /* Frame too big */
- if(len > MAXDATAZ + 100)
- {
-#ifdef DEBUG_RX_ERROR
- printk(KERN_INFO "%s: wv_start_of_frame: Received frame too large, rfp %d len 0x%x\n",
- dev->name, rfp, len);
-#endif
- return(-1);
- }
-
- /* Frame too short */
- if(len < 7)
- {
-#ifdef DEBUG_RX_ERROR
- printk(KERN_INFO "%s: wv_start_of_frame: Received null frame, rfp %d len 0x%x\n",
- dev->name, rfp, len);
-#endif
- return(-1);
- }
-
- /* Wrap around buffer */
- if(len > ((wrap - (rfp - len) + RX_SIZE) % RX_SIZE)) /* magic formula ! */
- {
-#ifdef DEBUG_RX_ERROR
- printk(KERN_INFO "%s: wv_start_of_frame: wrap around buffer, wrap %d rfp %d len 0x%x\n",
- dev->name, wrap, rfp, len);
-#endif
- return(-1);
- }
-
- return((rp - len + RX_SIZE) % RX_SIZE);
-} /* wv_start_of_frame */
-
-/*------------------------------------------------------------------*/
-/*
- * This routine does the actual copy of data (including the ethernet
- * header structure) from the WaveLAN card to an sk_buff chain that
- * will be passed up to the network interface layer. NOTE: We
- * currently don't handle trailer protocols (neither does the rest of
- * the network interface), so if that is needed, it will (at least in
- * part) be added here. The contents of the receive ring buffer are
- * copied to a message chain that is then passed to the kernel.
- *
- * Note: if any errors occur, the packet is "dropped on the floor"
- * (called by wv_packet_rcv())
- */
-static void
-wv_packet_read(struct net_device * dev,
- int fd_p,
- int sksize)
-{
- net_local * lp = netdev_priv(dev);
- struct sk_buff * skb;
-
-#ifdef DEBUG_RX_TRACE
- printk(KERN_DEBUG "%s: ->wv_packet_read(0x%X, %d)\n",
- dev->name, fd_p, sksize);
-#endif
-
- /* Allocate some buffer for the new packet */
- if((skb = dev_alloc_skb(sksize+2)) == (struct sk_buff *) NULL)
- {
-#ifdef DEBUG_RX_ERROR
- printk(KERN_INFO "%s: wv_packet_read(): could not alloc_skb(%d, GFP_ATOMIC)\n",
- dev->name, sksize);
-#endif
- dev->stats.rx_dropped++;
- /*
- * Not only do we want to return here, but we also need to drop the
- * packet on the floor to clear the interrupt.
- */
- return;
- }
-
- skb_reserve(skb, 2);
- fd_p = read_ringbuf(dev, fd_p, (char *) skb_put(skb, sksize), sksize);
- skb->protocol = eth_type_trans(skb, dev);
-
-#ifdef DEBUG_RX_INFO
- wv_packet_info(skb_mac_header(skb), sksize, dev->name, "wv_packet_read");
-#endif /* DEBUG_RX_INFO */
-
- /* Statistics gathering & stuff associated.
- * It seem a bit messy with all the define, but it's really simple... */
- if(
-#ifdef IW_WIRELESS_SPY
- (lp->spy_data.spy_number > 0) ||
-#endif /* IW_WIRELESS_SPY */
-#ifdef HISTOGRAM
- (lp->his_number > 0) ||
-#endif /* HISTOGRAM */
-#ifdef WAVELAN_ROAMING
- (do_roaming) ||
-#endif /* WAVELAN_ROAMING */
- 0)
- {
- u_char stats[3]; /* Signal level, Noise level, Signal quality */
-
- /* read signal level, silence level and signal quality bytes */
- fd_p = read_ringbuf(dev, (fd_p + 4) % RX_SIZE + RX_BASE,
- stats, 3);
-#ifdef DEBUG_RX_INFO
- printk(KERN_DEBUG "%s: wv_packet_read(): Signal level %d/63, Silence level %d/63, signal quality %d/16\n",
- dev->name, stats[0] & 0x3F, stats[1] & 0x3F, stats[2] & 0x0F);
-#endif
-
-#ifdef WAVELAN_ROAMING
- if(do_roaming)
- if(WAVELAN_BEACON(skb->data))
- wl_roam_gather(dev, skb->data, stats);
-#endif /* WAVELAN_ROAMING */
-
-#ifdef WIRELESS_SPY
- wl_spy_gather(dev, skb_mac_header(skb) + WAVELAN_ADDR_SIZE, stats);
-#endif /* WIRELESS_SPY */
-#ifdef HISTOGRAM
- wl_his_gather(dev, stats);
-#endif /* HISTOGRAM */
- }
-
- /*
- * Hand the packet to the Network Module
- */
- netif_rx(skb);
-
- /* Keep stats up to date */
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += sksize;
-
-#ifdef DEBUG_RX_TRACE
- printk(KERN_DEBUG "%s: <-wv_packet_read()\n", dev->name);
-#endif
- return;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * This routine is called by the interrupt handler to initiate a
- * packet transfer from the card to the network interface layer above
- * this driver. This routine checks if a buffer has been successfully
- * received by the WaveLAN card. If so, the routine wv_packet_read is
- * called to do the actual transfer of the card's data including the
- * ethernet header into a packet consisting of an sk_buff chain.
- * (called by wavelan_interrupt())
- * Note : the spinlock is already grabbed for us and irq are disabled.
- */
-static void
-wv_packet_rcv(struct net_device * dev)
-{
- unsigned int base = dev->base_addr;
- net_local * lp = netdev_priv(dev);
- int newrfp;
- int rp;
- int len;
- int f_start;
- int status;
- int i593_rfp;
- int stat_ptr;
- u_char c[4];
-
-#ifdef DEBUG_RX_TRACE
- printk(KERN_DEBUG "%s: ->wv_packet_rcv()\n", dev->name);
-#endif
-
- /* Get the new receive frame pointer from the i82593 chip */
- outb(CR0_STATUS_2 | OP0_NOP, LCCR(base));
- i593_rfp = inb(LCSR(base));
- i593_rfp |= inb(LCSR(base)) << 8;
- i593_rfp %= RX_SIZE;
-
- /* Get the new receive frame pointer from the WaveLAN card.
- * It is 3 bytes more than the increment of the i82593 receive
- * frame pointer, for each packet. This is because it includes the
- * 3 roaming bytes added by the mmc.
- */
- newrfp = inb(RPLL(base));
- newrfp |= inb(RPLH(base)) << 8;
- newrfp %= RX_SIZE;
-
-#ifdef DEBUG_RX_INFO
- printk(KERN_DEBUG "%s: wv_packet_rcv(): i593_rfp %d stop %d newrfp %d lp->rfp %d\n",
- dev->name, i593_rfp, lp->stop, newrfp, lp->rfp);
-#endif
-
-#ifdef DEBUG_RX_ERROR
- /* If no new frame pointer... */
- if(lp->overrunning || newrfp == lp->rfp)
- printk(KERN_INFO "%s: wv_packet_rcv(): no new frame: i593_rfp %d stop %d newrfp %d lp->rfp %d\n",
- dev->name, i593_rfp, lp->stop, newrfp, lp->rfp);
-#endif
-
- /* Read all frames (packets) received */
- while(newrfp != lp->rfp)
- {
- /* A frame is composed of the packet, followed by a status word,
- * the length of the frame (word) and the mmc info (SNR & qual).
- * It's because the length is at the end that we can only scan
- * frames backward. */
-
- /* Find the first frame by skipping backwards over the frames */
- rp = newrfp; /* End of last frame */
- while(((f_start = wv_start_of_frame(dev, rp, newrfp)) != lp->rfp) &&
- (f_start != -1))
- rp = f_start;
-
- /* If we had a problem */
- if(f_start == -1)
- {
-#ifdef DEBUG_RX_ERROR
- printk(KERN_INFO "wavelan_cs: cannot find start of frame ");
- printk(" i593_rfp %d stop %d newrfp %d lp->rfp %d\n",
- i593_rfp, lp->stop, newrfp, lp->rfp);
-#endif
- lp->rfp = rp; /* Get to the last usable frame */
- continue;
- }
-
- /* f_start point to the beggining of the first frame received
- * and rp to the beggining of the next one */
-
- /* Read status & length of the frame */
- stat_ptr = (rp - 7 + RX_SIZE) % RX_SIZE;
- stat_ptr = read_ringbuf(dev, stat_ptr, c, 4);
- status = c[0] | (c[1] << 8);
- len = c[2] | (c[3] << 8);
-
- /* Check status */
- if((status & RX_RCV_OK) != RX_RCV_OK)
- {
- dev->stats.rx_errors++;
- if(status & RX_NO_SFD)
- dev->stats.rx_frame_errors++;
- if(status & RX_CRC_ERR)
- dev->stats.rx_crc_errors++;
- if(status & RX_OVRRUN)
- dev->stats.rx_over_errors++;
-
-#ifdef DEBUG_RX_FAIL
- printk(KERN_DEBUG "%s: wv_packet_rcv(): packet not received ok, status = 0x%x\n",
- dev->name, status);
-#endif
- }
- else
- /* Read the packet and transmit to Linux */
- wv_packet_read(dev, f_start, len - 2);
-
- /* One frame has been processed, skip it */
- lp->rfp = rp;
- }
-
- /*
- * Update the frame stop register, but set it to less than
- * the full 8K to allow space for 3 bytes of signal strength
- * per packet.
- */
- lp->stop = (i593_rfp + RX_SIZE - ((RX_SIZE / 64) * 3)) % RX_SIZE;
- outb(OP0_SWIT_TO_PORT_1 | CR0_CHNL, LCCR(base));
- outb(CR1_STOP_REG_UPDATE | (lp->stop >> RX_SIZE_SHIFT), LCCR(base));
- outb(OP1_SWIT_TO_PORT_0, LCCR(base));
-
-#ifdef DEBUG_RX_TRACE
- printk(KERN_DEBUG "%s: <-wv_packet_rcv()\n", dev->name);
-#endif
-}
-
-/*********************** PACKET TRANSMISSION ***********************/
-/*
- * This part deal with sending packet through the wavelan
- * We copy the packet to the send buffer and then issue the send
- * command to the i82593. The result of this operation will be
- * checked in wavelan_interrupt()
- */
-
-/*------------------------------------------------------------------*/
-/*
- * This routine fills in the appropriate registers and memory
- * locations on the WaveLAN card and starts the card off on
- * the transmit.
- * (called in wavelan_packet_xmit())
- */
-static void
-wv_packet_write(struct net_device * dev,
- void * buf,
- short length)
-{
- net_local * lp = netdev_priv(dev);
- unsigned int base = dev->base_addr;
- unsigned long flags;
- int clen = length;
- register u_short xmtdata_base = TX_BASE;
-
-#ifdef DEBUG_TX_TRACE
- printk(KERN_DEBUG "%s: ->wv_packet_write(%d)\n", dev->name, length);
-#endif
-
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* Write the length of data buffer followed by the buffer */
- outb(xmtdata_base & 0xff, PIORL(base));
- outb(((xmtdata_base >> 8) & PIORH_MASK) | PIORH_SEL_TX, PIORH(base));
- outb(clen & 0xff, PIOP(base)); /* lsb */
- outb(clen >> 8, PIOP(base)); /* msb */
-
- /* Send the data */
- outsb(PIOP(base), buf, clen);
-
- /* Indicate end of transmit chain */
- outb(OP0_NOP, PIOP(base));
- /* josullvn@cs.cmu.edu: need to send a second NOP for alignment... */
- outb(OP0_NOP, PIOP(base));
-
- /* Reset the transmit DMA pointer */
- hacr_write_slow(base, HACR_PWR_STAT | HACR_TX_DMA_RESET);
- hacr_write(base, HACR_DEFAULT);
- /* Send the transmit command */
- wv_82593_cmd(dev, "wv_packet_write(): transmit",
- OP0_TRANSMIT, SR0_NO_RESULT);
-
- /* Make sure the watchdog will keep quiet for a while */
- dev->trans_start = jiffies;
-
- /* Keep stats up to date */
- dev->stats.tx_bytes += length;
-
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
-#ifdef DEBUG_TX_INFO
- wv_packet_info((u_char *) buf, length, dev->name, "wv_packet_write");
-#endif /* DEBUG_TX_INFO */
-
-#ifdef DEBUG_TX_TRACE
- printk(KERN_DEBUG "%s: <-wv_packet_write()\n", dev->name);
-#endif
-}
-
-/*------------------------------------------------------------------*/
-/*
- * This routine is called when we want to send a packet (NET3 callback)
- * In this routine, we check if the harware is ready to accept
- * the packet. We also prevent reentrance. Then, we call the function
- * to send the packet...
- */
-static netdev_tx_t
-wavelan_packet_xmit(struct sk_buff * skb,
- struct net_device * dev)
-{
- net_local * lp = netdev_priv(dev);
- unsigned long flags;
-
-#ifdef DEBUG_TX_TRACE
- printk(KERN_DEBUG "%s: ->wavelan_packet_xmit(0x%X)\n", dev->name,
- (unsigned) skb);
-#endif
-
- /*
- * Block a timer-based transmit from overlapping a previous transmit.
- * In other words, prevent reentering this routine.
- */
- netif_stop_queue(dev);
-
- /* If somebody has asked to reconfigure the controller,
- * we can do it now */
- if(lp->reconfig_82593)
- {
- spin_lock_irqsave(&lp->spinlock, flags); /* Disable interrupts */
- wv_82593_config(dev);
- spin_unlock_irqrestore(&lp->spinlock, flags); /* Re-enable interrupts */
- /* Note : the configure procedure was totally synchronous,
- * so the Tx buffer is now free */
- }
-
- /* Check if we need some padding */
- /* Note : on wireless the propagation time is in the order of 1us,
- * and we don't have the Ethernet specific requirement of beeing
- * able to detect collisions, therefore in theory we don't really
- * need to pad. Jean II */
- if (skb_padto(skb, ETH_ZLEN))
- return NETDEV_TX_OK;
-
- wv_packet_write(dev, skb->data, skb->len);
-
- dev_kfree_skb(skb);
-
-#ifdef DEBUG_TX_TRACE
- printk(KERN_DEBUG "%s: <-wavelan_packet_xmit()\n", dev->name);
-#endif
- return NETDEV_TX_OK;
-}
-
-/********************** HARDWARE CONFIGURATION **********************/
-/*
- * This part do the real job of starting and configuring the hardware.
- */
-
-/*------------------------------------------------------------------*/
-/*
- * Routine to initialize the Modem Management Controller.
- * (called by wv_hw_config())
- */
-static int
-wv_mmc_init(struct net_device * dev)
-{
- unsigned int base = dev->base_addr;
- psa_t psa;
- mmw_t m;
- int configured;
- int i; /* Loop counter */
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: ->wv_mmc_init()\n", dev->name);
-#endif
-
- /* Read the parameter storage area */
- psa_read(dev, 0, (unsigned char *) &psa, sizeof(psa));
-
- /*
- * Check the first three octets of the MAC addr for the manufacturer's code.
- * Note: If you get the error message below, you've got a
- * non-NCR/AT&T/Lucent PCMCIA cards, see wavelan_cs.h for detail on
- * how to configure your card...
- */
- for (i = 0; i < ARRAY_SIZE(MAC_ADDRESSES); i++)
- if ((psa.psa_univ_mac_addr[0] == MAC_ADDRESSES[i][0]) &&
- (psa.psa_univ_mac_addr[1] == MAC_ADDRESSES[i][1]) &&
- (psa.psa_univ_mac_addr[2] == MAC_ADDRESSES[i][2]))
- break;
-
- /* If we have not found it... */
- if (i == ARRAY_SIZE(MAC_ADDRESSES))
- {
-#ifdef DEBUG_CONFIG_ERRORS
- printk(KERN_WARNING "%s: wv_mmc_init(): Invalid MAC address: %02X:%02X:%02X:...\n",
- dev->name, psa.psa_univ_mac_addr[0],
- psa.psa_univ_mac_addr[1], psa.psa_univ_mac_addr[2]);
-#endif
- return FALSE;
- }
-
- /* Get the MAC address */
- memcpy(&dev->dev_addr[0], &psa.psa_univ_mac_addr[0], WAVELAN_ADDR_SIZE);
-
-#ifdef USE_PSA_CONFIG
- configured = psa.psa_conf_status & 1;
-#else
- configured = 0;
-#endif
-
- /* Is the PSA is not configured */
- if(!configured)
- {
- /* User will be able to configure NWID after (with iwconfig) */
- psa.psa_nwid[0] = 0;
- psa.psa_nwid[1] = 0;
-
- /* As NWID is not set : no NWID checking */
- psa.psa_nwid_select = 0;
-
- /* Disable encryption */
- psa.psa_encryption_select = 0;
-
- /* Set to standard values
- * 0x04 for AT,
- * 0x01 for MCA,
- * 0x04 for PCMCIA and 2.00 card (AT&T 407-024689/E document)
- */
- if (psa.psa_comp_number & 1)
- psa.psa_thr_pre_set = 0x01;
- else
- psa.psa_thr_pre_set = 0x04;
- psa.psa_quality_thr = 0x03;
-
- /* It is configured */
- psa.psa_conf_status |= 1;
-
-#ifdef USE_PSA_CONFIG
- /* Write the psa */
- psa_write(dev, (char *)psa.psa_nwid - (char *)&psa,
- (unsigned char *)psa.psa_nwid, 4);
- psa_write(dev, (char *)&psa.psa_thr_pre_set - (char *)&psa,
- (unsigned char *)&psa.psa_thr_pre_set, 1);
- psa_write(dev, (char *)&psa.psa_quality_thr - (char *)&psa,
- (unsigned char *)&psa.psa_quality_thr, 1);
- psa_write(dev, (char *)&psa.psa_conf_status - (char *)&psa,
- (unsigned char *)&psa.psa_conf_status, 1);
- /* update the Wavelan checksum */
- update_psa_checksum(dev);
-#endif /* USE_PSA_CONFIG */
- }
-
- /* Zero the mmc structure */
- memset(&m, 0x00, sizeof(m));
-
- /* Copy PSA info to the mmc */
- m.mmw_netw_id_l = psa.psa_nwid[1];
- m.mmw_netw_id_h = psa.psa_nwid[0];
-
- if(psa.psa_nwid_select & 1)
- m.mmw_loopt_sel = 0x00;
- else
- m.mmw_loopt_sel = MMW_LOOPT_SEL_DIS_NWID;
-
- memcpy(&m.mmw_encr_key, &psa.psa_encryption_key,
- sizeof(m.mmw_encr_key));
-
- if(psa.psa_encryption_select)
- m.mmw_encr_enable = MMW_ENCR_ENABLE_EN | MMW_ENCR_ENABLE_MODE;
- else
- m.mmw_encr_enable = 0;
-
- m.mmw_thr_pre_set = psa.psa_thr_pre_set & 0x3F;
- m.mmw_quality_thr = psa.psa_quality_thr & 0x0F;
-
- /*
- * Set default modem control parameters.
- * See NCR document 407-0024326 Rev. A.
- */
- m.mmw_jabber_enable = 0x01;
- m.mmw_anten_sel = MMW_ANTEN_SEL_ALG_EN;
- m.mmw_ifs = 0x20;
- m.mmw_mod_delay = 0x04;
- m.mmw_jam_time = 0x38;
-
- m.mmw_des_io_invert = 0;
- m.mmw_freeze = 0;
- m.mmw_decay_prm = 0;
- m.mmw_decay_updat_prm = 0;
-
- /* Write all info to mmc */
- mmc_write(base, 0, (u_char *)&m, sizeof(m));
-
- /* The following code start the modem of the 2.00 frequency
- * selectable cards at power on. It's not strictly needed for the
- * following boots...
- * The original patch was by Joe Finney for the PCMCIA driver, but
- * I've cleaned it a bit and add documentation.
- * Thanks to Loeke Brederveld from Lucent for the info.
- */
-
- /* Attempt to recognise 2.00 cards (2.4 GHz frequency selectable)
- * (does it work for everybody ? - especially old cards...) */
- /* Note : WFREQSEL verify that it is able to read from EEprom
- * a sensible frequency (address 0x00) + that MMR_FEE_STATUS_ID
- * is 0xA (Xilinx version) or 0xB (Ariadne version).
- * My test is more crude but do work... */
- if(!(mmc_in(base, mmroff(0, mmr_fee_status)) &
- (MMR_FEE_STATUS_DWLD | MMR_FEE_STATUS_BUSY)))
- {
- /* We must download the frequency parameters to the
- * synthetisers (from the EEprom - area 1)
- * Note : as the EEprom is auto decremented, we set the end
- * if the area... */
- m.mmw_fee_addr = 0x0F;
- m.mmw_fee_ctrl = MMW_FEE_CTRL_READ | MMW_FEE_CTRL_DWLD;
- mmc_write(base, (char *)&m.mmw_fee_ctrl - (char *)&m,
- (unsigned char *)&m.mmw_fee_ctrl, 2);
-
- /* Wait until the download is finished */
- fee_wait(base, 100, 100);
-
-#ifdef DEBUG_CONFIG_INFO
- /* The frequency was in the last word downloaded... */
- mmc_read(base, (char *)&m.mmw_fee_data_l - (char *)&m,
- (unsigned char *)&m.mmw_fee_data_l, 2);
-
- /* Print some info for the user */
- printk(KERN_DEBUG "%s: Wavelan 2.00 recognised (frequency select) : Current frequency = %ld\n",
- dev->name,
- ((m.mmw_fee_data_h << 4) |
- (m.mmw_fee_data_l >> 4)) * 5 / 2 + 24000L);
-#endif
-
- /* We must now download the power adjust value (gain) to
- * the synthetisers (from the EEprom - area 7 - DAC) */
- m.mmw_fee_addr = 0x61;
- m.mmw_fee_ctrl = MMW_FEE_CTRL_READ | MMW_FEE_CTRL_DWLD;
- mmc_write(base, (char *)&m.mmw_fee_ctrl - (char *)&m,
- (unsigned char *)&m.mmw_fee_ctrl, 2);
-
- /* Wait until the download is finished */
- } /* if 2.00 card */
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: <-wv_mmc_init()\n", dev->name);
-#endif
- return TRUE;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Routine to gracefully turn off reception, and wait for any commands
- * to complete.
- * (called in wv_ru_start() and wavelan_close() and wavelan_event())
- */
-static int
-wv_ru_stop(struct net_device * dev)
-{
- unsigned int base = dev->base_addr;
- net_local * lp = netdev_priv(dev);
- unsigned long flags;
- int status;
- int spin;
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: ->wv_ru_stop()\n", dev->name);
-#endif
-
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* First, send the LAN controller a stop receive command */
- wv_82593_cmd(dev, "wv_graceful_shutdown(): stop-rcv",
- OP0_STOP_RCV, SR0_NO_RESULT);
-
- /* Then, spin until the receive unit goes idle */
- spin = 300;
- do
- {
- udelay(10);
- outb(OP0_NOP | CR0_STATUS_3, LCCR(base));
- status = inb(LCSR(base));
- }
- while(((status & SR3_RCV_STATE_MASK) != SR3_RCV_IDLE) && (spin-- > 0));
-
- /* Now, spin until the chip finishes executing its current command */
- do
- {
- udelay(10);
- outb(OP0_NOP | CR0_STATUS_3, LCCR(base));
- status = inb(LCSR(base));
- }
- while(((status & SR3_EXEC_STATE_MASK) != SR3_EXEC_IDLE) && (spin-- > 0));
-
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- /* If there was a problem */
- if(spin <= 0)
- {
-#ifdef DEBUG_CONFIG_ERRORS
- printk(KERN_INFO "%s: wv_ru_stop(): The chip doesn't want to stop...\n",
- dev->name);
-#endif
- return FALSE;
- }
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: <-wv_ru_stop()\n", dev->name);
-#endif
- return TRUE;
-} /* wv_ru_stop */
-
-/*------------------------------------------------------------------*/
-/*
- * This routine starts the receive unit running. First, it checks if
- * the card is actually ready. Then the card is instructed to receive
- * packets again.
- * (called in wv_hw_reset() & wavelan_open())
- */
-static int
-wv_ru_start(struct net_device * dev)
-{
- unsigned int base = dev->base_addr;
- net_local * lp = netdev_priv(dev);
- unsigned long flags;
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: ->wv_ru_start()\n", dev->name);
-#endif
-
- /*
- * We need to start from a quiescent state. To do so, we could check
- * if the card is already running, but instead we just try to shut
- * it down. First, we disable reception (in case it was already enabled).
- */
- if(!wv_ru_stop(dev))
- return FALSE;
-
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* Now we know that no command is being executed. */
-
- /* Set the receive frame pointer and stop pointer */
- lp->rfp = 0;
- outb(OP0_SWIT_TO_PORT_1 | CR0_CHNL, LCCR(base));
-
- /* Reset ring management. This sets the receive frame pointer to 1 */
- outb(OP1_RESET_RING_MNGMT, LCCR(base));
-
-#if 0
- /* XXX the i82593 manual page 6-4 seems to indicate that the stop register
- should be set as below */
- /* outb(CR1_STOP_REG_UPDATE|((RX_SIZE - 0x40)>> RX_SIZE_SHIFT),LCCR(base));*/
-#elif 0
- /* but I set it 0 instead */
- lp->stop = 0;
-#else
- /* but I set it to 3 bytes per packet less than 8K */
- lp->stop = (0 + RX_SIZE - ((RX_SIZE / 64) * 3)) % RX_SIZE;
-#endif
- outb(CR1_STOP_REG_UPDATE | (lp->stop >> RX_SIZE_SHIFT), LCCR(base));
- outb(OP1_INT_ENABLE, LCCR(base));
- outb(OP1_SWIT_TO_PORT_0, LCCR(base));
-
- /* Reset receive DMA pointer */
- hacr_write_slow(base, HACR_PWR_STAT | HACR_TX_DMA_RESET);
- hacr_write_slow(base, HACR_DEFAULT);
-
- /* Receive DMA on channel 1 */
- wv_82593_cmd(dev, "wv_ru_start(): rcv-enable",
- CR0_CHNL | OP0_RCV_ENABLE, SR0_NO_RESULT);
-
-#ifdef DEBUG_I82593_SHOW
- {
- int status;
- int opri;
- int spin = 10000;
-
- /* spin until the chip starts receiving */
- do
- {
- outb(OP0_NOP | CR0_STATUS_3, LCCR(base));
- status = inb(LCSR(base));
- if(spin-- <= 0)
- break;
- }
- while(((status & SR3_RCV_STATE_MASK) != SR3_RCV_ACTIVE) &&
- ((status & SR3_RCV_STATE_MASK) != SR3_RCV_READY));
- printk(KERN_DEBUG "rcv status is 0x%x [i:%d]\n",
- (status & SR3_RCV_STATE_MASK), i);
- }
-#endif
-
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: <-wv_ru_start()\n", dev->name);
-#endif
- return TRUE;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * This routine does a standard config of the WaveLAN controller (i82593).
- * In the ISA driver, this is integrated in wavelan_hardware_reset()
- * (called by wv_hw_config(), wv_82593_reconfig() & wavelan_packet_xmit())
- */
-static int
-wv_82593_config(struct net_device * dev)
-{
- unsigned int base = dev->base_addr;
- net_local * lp = netdev_priv(dev);
- struct i82593_conf_block cfblk;
- int ret = TRUE;
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: ->wv_82593_config()\n", dev->name);
-#endif
-
- /* Create & fill i82593 config block
- *
- * Now conform to Wavelan document WCIN085B
- */
- memset(&cfblk, 0x00, sizeof(struct i82593_conf_block));
- cfblk.d6mod = FALSE; /* Run in i82593 advanced mode */
- cfblk.fifo_limit = 5; /* = 56 B rx and 40 B tx fifo thresholds */
- cfblk.forgnesi = FALSE; /* 0=82C501, 1=AMD7992B compatibility */
- cfblk.fifo_32 = 1;
- cfblk.throttle_enb = FALSE;
- cfblk.contin = TRUE; /* enable continuous mode */
- cfblk.cntrxint = FALSE; /* enable continuous mode receive interrupts */
- cfblk.addr_len = WAVELAN_ADDR_SIZE;
- cfblk.acloc = TRUE; /* Disable source addr insertion by i82593 */
- cfblk.preamb_len = 0; /* 2 bytes preamble (SFD) */
- cfblk.loopback = FALSE;
- cfblk.lin_prio = 0; /* conform to 802.3 backoff algorithm */
- cfblk.exp_prio = 5; /* conform to 802.3 backoff algorithm */
- cfblk.bof_met = 1; /* conform to 802.3 backoff algorithm */
- cfblk.ifrm_spc = 0x20 >> 4; /* 32 bit times interframe spacing */
- cfblk.slottim_low = 0x20 >> 5; /* 32 bit times slot time */
- cfblk.slottim_hi = 0x0;
- cfblk.max_retr = 15;
- cfblk.prmisc = ((lp->promiscuous) ? TRUE: FALSE); /* Promiscuous mode */
- cfblk.bc_dis = FALSE; /* Enable broadcast reception */
- cfblk.crs_1 = TRUE; /* Transmit without carrier sense */
- cfblk.nocrc_ins = FALSE; /* i82593 generates CRC */
- cfblk.crc_1632 = FALSE; /* 32-bit Autodin-II CRC */
- cfblk.crs_cdt = FALSE; /* CD not to be interpreted as CS */
- cfblk.cs_filter = 0; /* CS is recognized immediately */
- cfblk.crs_src = FALSE; /* External carrier sense */
- cfblk.cd_filter = 0; /* CD is recognized immediately */
- cfblk.min_fr_len = ETH_ZLEN >> 2; /* Minimum frame length 64 bytes */
- cfblk.lng_typ = FALSE; /* Length field > 1500 = type field */
- cfblk.lng_fld = TRUE; /* Disable 802.3 length field check */
- cfblk.rxcrc_xf = TRUE; /* Don't transfer CRC to memory */
- cfblk.artx = TRUE; /* Disable automatic retransmission */
- cfblk.sarec = TRUE; /* Disable source addr trig of CD */
- cfblk.tx_jabber = TRUE; /* Disable jabber jam sequence */
- cfblk.hash_1 = FALSE; /* Use bits 0-5 in mc address hash */
- cfblk.lbpkpol = TRUE; /* Loopback pin active high */
- cfblk.fdx = FALSE; /* Disable full duplex operation */
- cfblk.dummy_6 = 0x3f; /* all ones */
- cfblk.mult_ia = FALSE; /* No multiple individual addresses */
- cfblk.dis_bof = FALSE; /* Disable the backoff algorithm ?! */
- cfblk.dummy_1 = TRUE; /* set to 1 */
- cfblk.tx_ifs_retrig = 3; /* Hmm... Disabled */
-#ifdef MULTICAST_ALL
- cfblk.mc_all = (lp->allmulticast ? TRUE: FALSE); /* Allow all multicasts */
-#else
- cfblk.mc_all = FALSE; /* No multicast all mode */
-#endif
- cfblk.rcv_mon = 0; /* Monitor mode disabled */
- cfblk.frag_acpt = TRUE; /* Do not accept fragments */
- cfblk.tstrttrs = FALSE; /* No start transmission threshold */
- cfblk.fretx = TRUE; /* FIFO automatic retransmission */
- cfblk.syncrqs = FALSE; /* Synchronous DRQ deassertion... */
- cfblk.sttlen = TRUE; /* 6 byte status registers */
- cfblk.rx_eop = TRUE; /* Signal EOP on packet reception */
- cfblk.tx_eop = TRUE; /* Signal EOP on packet transmission */
- cfblk.rbuf_size = RX_SIZE>>11; /* Set receive buffer size */
- cfblk.rcvstop = TRUE; /* Enable Receive Stop Register */
-
-#ifdef DEBUG_I82593_SHOW
- print_hex_dump(KERN_DEBUG, "wavelan_cs: config block: ", DUMP_PREFIX_NONE,
- 16, 1, &cfblk, sizeof(struct i82593_conf_block), false);
-#endif
-
- /* Copy the config block to the i82593 */
- outb(TX_BASE & 0xff, PIORL(base));
- outb(((TX_BASE >> 8) & PIORH_MASK) | PIORH_SEL_TX, PIORH(base));
- outb(sizeof(struct i82593_conf_block) & 0xff, PIOP(base)); /* lsb */
- outb(sizeof(struct i82593_conf_block) >> 8, PIOP(base)); /* msb */
- outsb(PIOP(base), (char *) &cfblk, sizeof(struct i82593_conf_block));
-
- /* reset transmit DMA pointer */
- hacr_write_slow(base, HACR_PWR_STAT | HACR_TX_DMA_RESET);
- hacr_write(base, HACR_DEFAULT);
- if(!wv_82593_cmd(dev, "wv_82593_config(): configure",
- OP0_CONFIGURE, SR0_CONFIGURE_DONE))
- ret = FALSE;
-
- /* Initialize adapter's ethernet MAC address */
- outb(TX_BASE & 0xff, PIORL(base));
- outb(((TX_BASE >> 8) & PIORH_MASK) | PIORH_SEL_TX, PIORH(base));
- outb(WAVELAN_ADDR_SIZE, PIOP(base)); /* byte count lsb */
- outb(0, PIOP(base)); /* byte count msb */
- outsb(PIOP(base), &dev->dev_addr[0], WAVELAN_ADDR_SIZE);
-
- /* reset transmit DMA pointer */
- hacr_write_slow(base, HACR_PWR_STAT | HACR_TX_DMA_RESET);
- hacr_write(base, HACR_DEFAULT);
- if(!wv_82593_cmd(dev, "wv_82593_config(): ia-setup",
- OP0_IA_SETUP, SR0_IA_SETUP_DONE))
- ret = FALSE;
-
-#ifdef WAVELAN_ROAMING
- /* If roaming is enabled, join the "Beacon Request" multicast group... */
- /* But only if it's not in there already! */
- if(do_roaming)
- dev_mc_add(dev,WAVELAN_BEACON_ADDRESS, WAVELAN_ADDR_SIZE, 1);
-#endif /* WAVELAN_ROAMING */
-
- /* If any multicast address to set */
- if(lp->mc_count)
- {
- struct dev_mc_list *dmi;
- int addrs_len = WAVELAN_ADDR_SIZE * lp->mc_count;
-
-#ifdef DEBUG_CONFIG_INFO
- printk(KERN_DEBUG "%s: wv_hw_config(): set %d multicast addresses:\n",
- dev->name, lp->mc_count);
- netdev_for_each_mc_addr(dmi, dev)
- printk(KERN_DEBUG " %pM\n", dmi->dmi_addr);
-#endif
-
- /* Initialize adapter's ethernet multicast addresses */
- outb(TX_BASE & 0xff, PIORL(base));
- outb(((TX_BASE >> 8) & PIORH_MASK) | PIORH_SEL_TX, PIORH(base));
- outb(addrs_len & 0xff, PIOP(base)); /* byte count lsb */
- outb((addrs_len >> 8), PIOP(base)); /* byte count msb */
- netdev_for_each_mc_addr(dmi, dev)
- outsb(PIOP(base), dmi->dmi_addr, dmi->dmi_addrlen);
-
- /* reset transmit DMA pointer */
- hacr_write_slow(base, HACR_PWR_STAT | HACR_TX_DMA_RESET);
- hacr_write(base, HACR_DEFAULT);
- if(!wv_82593_cmd(dev, "wv_82593_config(): mc-setup",
- OP0_MC_SETUP, SR0_MC_SETUP_DONE))
- ret = FALSE;
- /* remember to avoid repeated reset */
- lp->mc_count = netdev_mc_count(dev);
- }
-
- /* Job done, clear the flag */
- lp->reconfig_82593 = FALSE;
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: <-wv_82593_config()\n", dev->name);
-#endif
- return(ret);
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Read the Access Configuration Register, perform a software reset,
- * and then re-enable the card's software.
- *
- * If I understand correctly : reset the pcmcia interface of the
- * wavelan.
- * (called by wv_config())
- */
-static int
-wv_pcmcia_reset(struct net_device * dev)
-{
- int i;
- conf_reg_t reg = { 0, CS_READ, CISREG_COR, 0 };
- struct pcmcia_device * link = ((net_local *)netdev_priv(dev))->link;
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: ->wv_pcmcia_reset()\n", dev->name);
-#endif
-
- i = pcmcia_access_configuration_register(link, &reg);
- if (i != 0)
- return FALSE;
-
-#ifdef DEBUG_CONFIG_INFO
- printk(KERN_DEBUG "%s: wavelan_pcmcia_reset(): Config reg is 0x%x\n",
- dev->name, (u_int) reg.Value);
-#endif
-
- reg.Action = CS_WRITE;
- reg.Value = reg.Value | COR_SW_RESET;
- i = pcmcia_access_configuration_register(link, &reg);
- if (i != 0)
- return FALSE;
-
- reg.Action = CS_WRITE;
- reg.Value = COR_LEVEL_IRQ | COR_CONFIG;
- i = pcmcia_access_configuration_register(link, &reg);
- if (i != 0)
- return FALSE;
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: <-wv_pcmcia_reset()\n", dev->name);
-#endif
- return TRUE;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * wavelan_hw_config() is called after a CARD_INSERTION event is
- * received, to configure the wavelan hardware.
- * Note that the reception will be enabled in wavelan->open(), so the
- * device is configured but idle...
- * Performs the following actions:
- * 1. A pcmcia software reset (using wv_pcmcia_reset())
- * 2. A power reset (reset DMA)
- * 3. Reset the LAN controller
- * 4. Initialize the radio modem (using wv_mmc_init)
- * 5. Configure LAN controller (using wv_82593_config)
- * 6. Perform a diagnostic on the LAN controller
- * (called by wavelan_event() & wv_hw_reset())
- */
-static int
-wv_hw_config(struct net_device * dev)
-{
- net_local * lp = netdev_priv(dev);
- unsigned int base = dev->base_addr;
- unsigned long flags;
- int ret = FALSE;
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: ->wv_hw_config()\n", dev->name);
-#endif
-
- /* compile-time check the sizes of structures */
- BUILD_BUG_ON(sizeof(psa_t) != PSA_SIZE);
- BUILD_BUG_ON(sizeof(mmw_t) != MMW_SIZE);
- BUILD_BUG_ON(sizeof(mmr_t) != MMR_SIZE);
-
- /* Reset the pcmcia interface */
- if(wv_pcmcia_reset(dev) == FALSE)
- return FALSE;
-
- /* Disable interrupts */
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* Disguised goto ;-) */
- do
- {
- /* Power UP the module + reset the modem + reset host adapter
- * (in fact, reset DMA channels) */
- hacr_write_slow(base, HACR_RESET);
- hacr_write(base, HACR_DEFAULT);
-
- /* Check if the module has been powered up... */
- if(hasr_read(base) & HASR_NO_CLK)
- {
-#ifdef DEBUG_CONFIG_ERRORS
- printk(KERN_WARNING "%s: wv_hw_config(): modem not connected or not a wavelan card\n",
- dev->name);
-#endif
- break;
- }
-
- /* initialize the modem */
- if(wv_mmc_init(dev) == FALSE)
- {
-#ifdef DEBUG_CONFIG_ERRORS
- printk(KERN_WARNING "%s: wv_hw_config(): Can't configure the modem\n",
- dev->name);
-#endif
- break;
- }
-
- /* reset the LAN controller (i82593) */
- outb(OP0_RESET, LCCR(base));
- mdelay(1); /* A bit crude ! */
-
- /* Initialize the LAN controller */
- if(wv_82593_config(dev) == FALSE)
- {
-#ifdef DEBUG_CONFIG_ERRORS
- printk(KERN_INFO "%s: wv_hw_config(): i82593 init failed\n",
- dev->name);
-#endif
- break;
- }
-
- /* Diagnostic */
- if(wv_diag(dev) == FALSE)
- {
-#ifdef DEBUG_CONFIG_ERRORS
- printk(KERN_INFO "%s: wv_hw_config(): i82593 diagnostic failed\n",
- dev->name);
-#endif
- break;
- }
-
- /*
- * insert code for loopback test here
- */
-
- /* The device is now configured */
- lp->configured = 1;
- ret = TRUE;
- }
- while(0);
-
- /* Re-enable interrupts */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: <-wv_hw_config()\n", dev->name);
-#endif
- return(ret);
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Totally reset the wavelan and restart it.
- * Performs the following actions:
- * 1. Call wv_hw_config()
- * 2. Start the LAN controller's receive unit
- * (called by wavelan_event(), wavelan_watchdog() and wavelan_open())
- */
-static void
-wv_hw_reset(struct net_device * dev)
-{
- net_local * lp = netdev_priv(dev);
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: ->wv_hw_reset()\n", dev->name);
-#endif
-
- lp->nresets++;
- lp->configured = 0;
-
- /* Call wv_hw_config() for most of the reset & init stuff */
- if(wv_hw_config(dev) == FALSE)
- return;
-
- /* start receive unit */
- wv_ru_start(dev);
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: <-wv_hw_reset()\n", dev->name);
-#endif
-}
-
-/*------------------------------------------------------------------*/
-/*
- * wv_pcmcia_config() is called after a CARD_INSERTION event is
- * received, to configure the PCMCIA socket, and to make the ethernet
- * device available to the system.
- * (called by wavelan_event())
- */
-static int
-wv_pcmcia_config(struct pcmcia_device * link)
-{
- struct net_device * dev = (struct net_device *) link->priv;
- int i;
- win_req_t req;
- memreq_t mem;
- net_local * lp = netdev_priv(dev);
-
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "->wv_pcmcia_config(0x%p)\n", link);
-#endif
-
- do
- {
- i = pcmcia_request_io(link, &link->io);
- if (i != 0)
- break;
-
- /*
- * Now allocate an interrupt line. Note that this does not
- * actually assign a handler to the interrupt.
- */
- i = pcmcia_request_irq(link, &link->irq);
- if (i != 0)
- break;
-
- /*
- * This actually configures the PCMCIA socket -- setting up
- * the I/O windows and the interrupt mapping.
- */
- link->conf.ConfigIndex = 1;
- i = pcmcia_request_configuration(link, &link->conf);
- if (i != 0)
- break;
-
- /*
- * Allocate a small memory window. Note that the struct pcmcia_device
- * structure provides space for one window handle -- if your
- * device needs several windows, you'll need to keep track of
- * the handles in your private data structure, link->priv.
- */
- req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
- req.Base = req.Size = 0;
- req.AccessSpeed = mem_speed;
- i = pcmcia_request_window(link, &req, &link->win);
- if (i != 0)
- break;
-
- lp->mem = ioremap(req.Base, req.Size);
- dev->mem_start = (u_long)lp->mem;
- dev->mem_end = dev->mem_start + req.Size;
-
- mem.CardOffset = 0; mem.Page = 0;
- i = pcmcia_map_mem_page(link, link->win, &mem);
- if (i != 0)
- break;
-
- /* Feed device with this info... */
- dev->irq = link->irq.AssignedIRQ;
- dev->base_addr = link->io.BasePort1;
- netif_start_queue(dev);
-
-#ifdef DEBUG_CONFIG_INFO
- printk(KERN_DEBUG "wv_pcmcia_config: MEMSTART %p IRQ %d IOPORT 0x%x\n",
- lp->mem, dev->irq, (u_int) dev->base_addr);
-#endif
-
- SET_NETDEV_DEV(dev, &link->dev);
- i = register_netdev(dev);
- if(i != 0)
- {
-#ifdef DEBUG_CONFIG_ERRORS
- printk(KERN_INFO "wv_pcmcia_config(): register_netdev() failed\n");
-#endif
- break;
- }
- }
- while(0); /* Humm... Disguised goto !!! */
-
- /* If any step failed, release any partially configured state */
- if(i != 0)
- {
- wv_pcmcia_release(link);
- return FALSE;
- }
-
- strcpy(((net_local *) netdev_priv(dev))->node.dev_name, dev->name);
- link->dev_node = &((net_local *) netdev_priv(dev))->node;
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "<-wv_pcmcia_config()\n");
-#endif
- return TRUE;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * After a card is removed, wv_pcmcia_release() will unregister the net
- * device, and release the PCMCIA configuration. If the device is
- * still open, this will be postponed until it is closed.
- */
-static void
-wv_pcmcia_release(struct pcmcia_device *link)
-{
- struct net_device * dev = (struct net_device *) link->priv;
- net_local * lp = netdev_priv(dev);
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: -> wv_pcmcia_release(0x%p)\n", dev->name, link);
-#endif
-
- iounmap(lp->mem);
- pcmcia_disable_device(link);
-
-#ifdef DEBUG_CONFIG_TRACE
- printk(KERN_DEBUG "%s: <- wv_pcmcia_release()\n", dev->name);
-#endif
-}
-
-/************************ INTERRUPT HANDLING ************************/
-
-/*
- * This function is the interrupt handler for the WaveLAN card. This
- * routine will be called whenever:
- * 1. A packet is received.
- * 2. A packet has successfully been transferred and the unit is
- * ready to transmit another packet.
- * 3. A command has completed execution.
- */
-static irqreturn_t
-wavelan_interrupt(int irq,
- void * dev_id)
-{
- struct net_device * dev = dev_id;
- net_local * lp;
- unsigned int base;
- int status0;
- u_int tx_status;
-
-#ifdef DEBUG_INTERRUPT_TRACE
- printk(KERN_DEBUG "%s: ->wavelan_interrupt()\n", dev->name);
-#endif
-
- lp = netdev_priv(dev);
- base = dev->base_addr;
-
-#ifdef DEBUG_INTERRUPT_INFO
- /* Check state of our spinlock (it should be cleared) */
- if(spin_is_locked(&lp->spinlock))
- printk(KERN_DEBUG
- "%s: wavelan_interrupt(): spinlock is already locked !!!\n",
- dev->name);
-#endif
-
- /* Prevent reentrancy. We need to do that because we may have
- * multiple interrupt handler running concurrently.
- * It is safe because interrupts are disabled before aquiring
- * the spinlock. */
- spin_lock(&lp->spinlock);
-
- /* Treat all pending interrupts */
- while(1)
- {
- /* ---------------- INTERRUPT CHECKING ---------------- */
- /*
- * Look for the interrupt and verify the validity
- */
- outb(CR0_STATUS_0 | OP0_NOP, LCCR(base));
- status0 = inb(LCSR(base));
-
-#ifdef DEBUG_INTERRUPT_INFO
- printk(KERN_DEBUG "status0 0x%x [%s => 0x%x]", status0,
- (status0&SR0_INTERRUPT)?"int":"no int",status0&~SR0_INTERRUPT);
- if(status0&SR0_INTERRUPT)
- {
- printk(" [%s => %d]\n", (status0 & SR0_CHNL) ? "chnl" :
- ((status0 & SR0_EXECUTION) ? "cmd" :
- ((status0 & SR0_RECEPTION) ? "recv" : "unknown")),
- (status0 & SR0_EVENT_MASK));
- }
- else
- printk("\n");
-#endif
-
- /* Return if no actual interrupt from i82593 (normal exit) */
- if(!(status0 & SR0_INTERRUPT))
- break;
-
- /* If interrupt is both Rx and Tx or none...
- * This code in fact is there to catch the spurious interrupt
- * when you remove the wavelan pcmcia card from the socket */
- if(((status0 & SR0_BOTH_RX_TX) == SR0_BOTH_RX_TX) ||
- ((status0 & SR0_BOTH_RX_TX) == 0x0))
- {
-#ifdef DEBUG_INTERRUPT_INFO
- printk(KERN_INFO "%s: wv_interrupt(): bogus interrupt (or from dead card) : %X\n",
- dev->name, status0);
-#endif
- /* Acknowledge the interrupt */
- outb(CR0_INT_ACK | OP0_NOP, LCCR(base));
- break;
- }
-
- /* ----------------- RECEIVING PACKET ----------------- */
- /*
- * When the wavelan signal the reception of a new packet,
- * we call wv_packet_rcv() to copy if from the buffer and
- * send it to NET3
- */
- if(status0 & SR0_RECEPTION)
- {
-#ifdef DEBUG_INTERRUPT_INFO
- printk(KERN_DEBUG "%s: wv_interrupt(): receive\n", dev->name);
-#endif
-
- if((status0 & SR0_EVENT_MASK) == SR0_STOP_REG_HIT)
- {
-#ifdef DEBUG_INTERRUPT_ERROR
- printk(KERN_INFO "%s: wv_interrupt(): receive buffer overflow\n",
- dev->name);
-#endif
- dev->stats.rx_over_errors++;
- lp->overrunning = 1;
- }
-
- /* Get the packet */
- wv_packet_rcv(dev);
- lp->overrunning = 0;
-
- /* Acknowledge the interrupt */
- outb(CR0_INT_ACK | OP0_NOP, LCCR(base));
- continue;
- }
-
- /* ---------------- COMMAND COMPLETION ---------------- */
- /*
- * Interrupts issued when the i82593 has completed a command.
- * Most likely : transmission done
- */
-
- /* If a transmission has been done */
- if((status0 & SR0_EVENT_MASK) == SR0_TRANSMIT_DONE ||
- (status0 & SR0_EVENT_MASK) == SR0_RETRANSMIT_DONE ||
- (status0 & SR0_EVENT_MASK) == SR0_TRANSMIT_NO_CRC_DONE)
- {
-#ifdef DEBUG_TX_ERROR
- if((status0 & SR0_EVENT_MASK) == SR0_TRANSMIT_NO_CRC_DONE)
- printk(KERN_INFO "%s: wv_interrupt(): packet transmitted without CRC.\n",
- dev->name);
-#endif
-
- /* Get transmission status */
- tx_status = inb(LCSR(base));
- tx_status |= (inb(LCSR(base)) << 8);
-#ifdef DEBUG_INTERRUPT_INFO
- printk(KERN_DEBUG "%s: wv_interrupt(): transmission done\n",
- dev->name);
- {
- u_int rcv_bytes;
- u_char status3;
- rcv_bytes = inb(LCSR(base));
- rcv_bytes |= (inb(LCSR(base)) << 8);
- status3 = inb(LCSR(base));
- printk(KERN_DEBUG "tx_status 0x%02x rcv_bytes 0x%02x status3 0x%x\n",
- tx_status, rcv_bytes, (u_int) status3);
- }
-#endif
- /* Check for possible errors */
- if((tx_status & TX_OK) != TX_OK)
- {
- dev->stats.tx_errors++;
-
- if(tx_status & TX_FRTL)
- {
-#ifdef DEBUG_TX_ERROR
- printk(KERN_INFO "%s: wv_interrupt(): frame too long\n",
- dev->name);
-#endif
- }
- if(tx_status & TX_UND_RUN)
- {
-#ifdef DEBUG_TX_FAIL
- printk(KERN_DEBUG "%s: wv_interrupt(): DMA underrun\n",
- dev->name);
-#endif
- dev->stats.tx_aborted_errors++;
- }
- if(tx_status & TX_LOST_CTS)
- {
-#ifdef DEBUG_TX_FAIL
- printk(KERN_DEBUG "%s: wv_interrupt(): no CTS\n", dev->name);
-#endif
- dev->stats.tx_carrier_errors++;
- }
- if(tx_status & TX_LOST_CRS)
- {
-#ifdef DEBUG_TX_FAIL
- printk(KERN_DEBUG "%s: wv_interrupt(): no carrier\n",
- dev->name);
-#endif
- dev->stats.tx_carrier_errors++;
- }
- if(tx_status & TX_HRT_BEAT)
- {
-#ifdef DEBUG_TX_FAIL
- printk(KERN_DEBUG "%s: wv_interrupt(): heart beat\n", dev->name);
-#endif
- dev->stats.tx_heartbeat_errors++;
- }
- if(tx_status & TX_DEFER)
- {
-#ifdef DEBUG_TX_FAIL
- printk(KERN_DEBUG "%s: wv_interrupt(): channel jammed\n",
- dev->name);
-#endif
- }
- /* Ignore late collisions since they're more likely to happen
- * here (the WaveLAN design prevents the LAN controller from
- * receiving while it is transmitting). We take action only when
- * the maximum retransmit attempts is exceeded.
- */
- if(tx_status & TX_COLL)
- {
- if(tx_status & TX_MAX_COL)
- {
-#ifdef DEBUG_TX_FAIL
- printk(KERN_DEBUG "%s: wv_interrupt(): channel congestion\n",
- dev->name);
-#endif
- if(!(tx_status & TX_NCOL_MASK))
- {
- dev->stats.collisions += 0x10;
- }
- }
- }
- } /* if(!(tx_status & TX_OK)) */
-
- dev->stats.collisions += (tx_status & TX_NCOL_MASK);
- dev->stats.tx_packets++;
-
- netif_wake_queue(dev);
- outb(CR0_INT_ACK | OP0_NOP, LCCR(base)); /* Acknowledge the interrupt */
- }
- else /* if interrupt = transmit done or retransmit done */
- {
-#ifdef DEBUG_INTERRUPT_ERROR
- printk(KERN_INFO "wavelan_cs: unknown interrupt, status0 = %02x\n",
- status0);
-#endif
- outb(CR0_INT_ACK | OP0_NOP, LCCR(base)); /* Acknowledge the interrupt */
- }
- } /* while(1) */
-
- spin_unlock(&lp->spinlock);
-
-#ifdef DEBUG_INTERRUPT_TRACE
- printk(KERN_DEBUG "%s: <-wavelan_interrupt()\n", dev->name);
-#endif
-
- /* We always return IRQ_HANDLED, because we will receive empty
- * interrupts under normal operations. Anyway, it doesn't matter
- * as we are dealing with an ISA interrupt that can't be shared.
- *
- * Explanation : under heavy receive, the following happens :
- * ->wavelan_interrupt()
- * (status0 & SR0_INTERRUPT) != 0
- * ->wv_packet_rcv()
- * (status0 & SR0_INTERRUPT) != 0
- * ->wv_packet_rcv()
- * (status0 & SR0_INTERRUPT) == 0 // i.e. no more event
- * <-wavelan_interrupt()
- * ->wavelan_interrupt()
- * (status0 & SR0_INTERRUPT) == 0 // i.e. empty interrupt
- * <-wavelan_interrupt()
- * Jean II */
- return IRQ_HANDLED;
-} /* wv_interrupt */
-
-/*------------------------------------------------------------------*/
-/*
- * Watchdog: when we start a transmission, a timer is set for us in the
- * kernel. If the transmission completes, this timer is disabled. If
- * the timer expires, we are called and we try to unlock the hardware.
- *
- * Note : This watchdog is move clever than the one in the ISA driver,
- * because it try to abort the current command before reseting
- * everything...
- * On the other hand, it's a bit simpler, because we don't have to
- * deal with the multiple Tx buffers...
- */
-static void
-wavelan_watchdog(struct net_device * dev)
-{
- net_local * lp = netdev_priv(dev);
- unsigned int base = dev->base_addr;
- unsigned long flags;
- int aborted = FALSE;
-
-#ifdef DEBUG_INTERRUPT_TRACE
- printk(KERN_DEBUG "%s: ->wavelan_watchdog()\n", dev->name);
-#endif
-
-#ifdef DEBUG_INTERRUPT_ERROR
- printk(KERN_INFO "%s: wavelan_watchdog: watchdog timer expired\n",
- dev->name);
-#endif
-
- spin_lock_irqsave(&lp->spinlock, flags);
-
- /* Ask to abort the current command */
- outb(OP0_ABORT, LCCR(base));
-
- /* Wait for the end of the command (a bit hackish) */
- if(wv_82593_cmd(dev, "wavelan_watchdog(): abort",
- OP0_NOP | CR0_STATUS_3, SR0_EXECUTION_ABORTED))
- aborted = TRUE;
-
- /* Release spinlock here so that wv_hw_reset() can grab it */
- spin_unlock_irqrestore(&lp->spinlock, flags);
-
- /* Check if we were successful in aborting it */
- if(!aborted)
- {
- /* It seem that it wasn't enough */
-#ifdef DEBUG_INTERRUPT_ERROR
- printk(KERN_INFO "%s: wavelan_watchdog: abort failed, trying reset\n",
- dev->name);
-#endif
- wv_hw_reset(dev);
- }
-
-#ifdef DEBUG_PSA_SHOW
- {
- psa_t psa;
- psa_read(dev, 0, (unsigned char *) &psa, sizeof(psa));
- wv_psa_show(&psa);
- }
-#endif
-#ifdef DEBUG_MMC_SHOW
- wv_mmc_show(dev);
-#endif
-#ifdef DEBUG_I82593_SHOW
- wv_ru_show(dev);
-#endif
-
- /* We are no more waiting for something... */
- netif_wake_queue(dev);
-
-#ifdef DEBUG_INTERRUPT_TRACE
- printk(KERN_DEBUG "%s: <-wavelan_watchdog()\n", dev->name);
-#endif
-}
-
-/********************* CONFIGURATION CALLBACKS *********************/
-/*
- * Here are the functions called by the pcmcia package (cardmgr) and
- * linux networking (NET3) for initialization, configuration and
- * deinstallations of the Wavelan Pcmcia Hardware.
- */
-
-/*------------------------------------------------------------------*/
-/*
- * Configure and start up the WaveLAN PCMCIA adaptor.
- * Called by NET3 when it "open" the device.
- */
-static int
-wavelan_open(struct net_device * dev)
-{
- net_local * lp = netdev_priv(dev);
- struct pcmcia_device * link = lp->link;
- unsigned int base = dev->base_addr;
-
-#ifdef DEBUG_CALLBACK_TRACE
- printk(KERN_DEBUG "%s: ->wavelan_open(dev=0x%x)\n", dev->name,
- (unsigned int) dev);
-#endif
-
- /* Check if the modem is powered up (wavelan_close() power it down */
- if(hasr_read(base) & HASR_NO_CLK)
- {
- /* Power up (power up time is 250us) */
- hacr_write(base, HACR_DEFAULT);
-
- /* Check if the module has been powered up... */
- if(hasr_read(base) & HASR_NO_CLK)
- {
-#ifdef DEBUG_CONFIG_ERRORS
- printk(KERN_WARNING "%s: wavelan_open(): modem not connected\n",
- dev->name);
-#endif
- return FALSE;
- }
- }
-
- /* Start reception and declare the driver ready */
- if(!lp->configured)
- return FALSE;
- if(!wv_ru_start(dev))
- wv_hw_reset(dev); /* If problem : reset */
- netif_start_queue(dev);
-
- /* Mark the device as used */
- link->open++;
-
-#ifdef WAVELAN_ROAMING
- if(do_roaming)
- wv_roam_init(dev);
-#endif /* WAVELAN_ROAMING */
-
-#ifdef DEBUG_CALLBACK_TRACE
- printk(KERN_DEBUG "%s: <-wavelan_open()\n", dev->name);
-#endif
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Shutdown the WaveLAN PCMCIA adaptor.
- * Called by NET3 when it "close" the device.
- */
-static int
-wavelan_close(struct net_device * dev)
-{
- struct pcmcia_device * link = ((net_local *)netdev_priv(dev))->link;
- unsigned int base = dev->base_addr;
-
-#ifdef DEBUG_CALLBACK_TRACE
- printk(KERN_DEBUG "%s: ->wavelan_close(dev=0x%x)\n", dev->name,
- (unsigned int) dev);
-#endif
-
- /* If the device isn't open, then nothing to do */
- if(!link->open)
- {
-#ifdef DEBUG_CONFIG_INFO
- printk(KERN_DEBUG "%s: wavelan_close(): device not open\n", dev->name);
-#endif
- return 0;
- }
-
-#ifdef WAVELAN_ROAMING
- /* Cleanup of roaming stuff... */
- if(do_roaming)
- wv_roam_cleanup(dev);
-#endif /* WAVELAN_ROAMING */
-
- link->open--;
-
- /* If the card is still present */
- if(netif_running(dev))
- {
- netif_stop_queue(dev);
-
- /* Stop receiving new messages and wait end of transmission */
- wv_ru_stop(dev);
-
- /* Power down the module */
- hacr_write(base, HACR_DEFAULT & (~HACR_PWR_STAT));
- }
-
-#ifdef DEBUG_CALLBACK_TRACE
- printk(KERN_DEBUG "%s: <-wavelan_close()\n", dev->name);
-#endif
- return 0;
-}
-
-static const struct net_device_ops wavelan_netdev_ops = {
- .ndo_open = wavelan_open,
- .ndo_stop = wavelan_close,
- .ndo_start_xmit = wavelan_packet_xmit,
- .ndo_set_multicast_list = wavelan_set_multicast_list,
-#ifdef SET_MAC_ADDRESS
- .ndo_set_mac_address = wavelan_set_mac_address,
-#endif
- .ndo_tx_timeout = wavelan_watchdog,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/*------------------------------------------------------------------*/
-/*
- * wavelan_attach() creates an "instance" of the driver, allocating
- * local data structures for one device (one interface). The device
- * is registered with Card Services.
- *
- * The dev_link structure is initialized, but we don't actually
- * configure the card at this point -- we wait until we receive a
- * card insertion event.
- */
-static int
-wavelan_probe(struct pcmcia_device *p_dev)
-{
- struct net_device * dev; /* Interface generic data */
- net_local * lp; /* Interface specific data */
- int ret;
-
-#ifdef DEBUG_CALLBACK_TRACE
- printk(KERN_DEBUG "-> wavelan_attach()\n");
-#endif
-
- /* The io structure describes IO port mapping */
- p_dev->io.NumPorts1 = 8;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.IOAddrLines = 3;
-
- /* Interrupt setup */
- p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- p_dev->irq.Handler = wavelan_interrupt;
-
- /* General socket configuration */
- p_dev->conf.Attributes = CONF_ENABLE_IRQ;
- p_dev->conf.IntType = INT_MEMORY_AND_IO;
-
- /* Allocate the generic data structure */
- dev = alloc_etherdev(sizeof(net_local));
- if (!dev)
- return -ENOMEM;
-
- p_dev->priv = dev;
-
- lp = netdev_priv(dev);
-
- /* Init specific data */
- lp->configured = 0;
- lp->reconfig_82593 = FALSE;
- lp->nresets = 0;
- /* Multicast stuff */
- lp->promiscuous = 0;
- lp->allmulticast = 0;
- lp->mc_count = 0;
-
- /* Init spinlock */
- spin_lock_init(&lp->spinlock);
-
- /* back links */
- lp->dev = dev;
-
- /* wavelan NET3 callbacks */
- dev->netdev_ops = &wavelan_netdev_ops;
- dev->watchdog_timeo = WATCHDOG_JIFFIES;
- SET_ETHTOOL_OPS(dev, &ops);
-
- dev->wireless_handlers = &wavelan_handler_def;
- lp->wireless_data.spy_data = &lp->spy_data;
- dev->wireless_data = &lp->wireless_data;
-
- /* Other specific data */
- dev->mtu = WAVELAN_MTU;
-
- ret = wv_pcmcia_config(p_dev);
- if (ret)
- return ret;
-
- ret = wv_hw_config(dev);
- if (ret) {
- dev->irq = 0;
- pcmcia_disable_device(p_dev);
- return ret;
- }
-
- wv_init_info(dev);
-
-#ifdef DEBUG_CALLBACK_TRACE
- printk(KERN_DEBUG "<- wavelan_attach()\n");
-#endif
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * This deletes a driver "instance". The device is de-registered with
- * Card Services. If it has been released, all local data structures
- * are freed. Otherwise, the structures will be freed when the device
- * is released.
- */
-static void
-wavelan_detach(struct pcmcia_device *link)
-{
-#ifdef DEBUG_CALLBACK_TRACE
- printk(KERN_DEBUG "-> wavelan_detach(0x%p)\n", link);
-#endif
-
- /* Some others haven't done their job : give them another chance */
- wv_pcmcia_release(link);
-
- /* Free pieces */
- if(link->priv)
- {
- struct net_device * dev = (struct net_device *) link->priv;
-
- /* Remove ourselves from the kernel list of ethernet devices */
- /* Warning : can't be called from interrupt, timer or wavelan_close() */
- if (link->dev_node)
- unregister_netdev(dev);
- link->dev_node = NULL;
- ((net_local *)netdev_priv(dev))->link = NULL;
- ((net_local *)netdev_priv(dev))->dev = NULL;
- free_netdev(dev);
- }
-
-#ifdef DEBUG_CALLBACK_TRACE
- printk(KERN_DEBUG "<- wavelan_detach()\n");
-#endif
-}
-
-static int wavelan_suspend(struct pcmcia_device *link)
-{
- struct net_device * dev = (struct net_device *) link->priv;
-
- /* NB: wavelan_close will be called, but too late, so we are
- * obliged to close nicely the wavelan here. David, could you
- * close the device before suspending them ? And, by the way,
- * could you, on resume, add a "route add -net ..." after the
- * ifconfig up ? Thanks... */
-
- /* Stop receiving new messages and wait end of transmission */
- wv_ru_stop(dev);
-
- if (link->open)
- netif_device_detach(dev);
-
- /* Power down the module */
- hacr_write(dev->base_addr, HACR_DEFAULT & (~HACR_PWR_STAT));
-
- return 0;
-}
-
-static int wavelan_resume(struct pcmcia_device *link)
-{
- struct net_device * dev = (struct net_device *) link->priv;
-
- if (link->open) {
- wv_hw_reset(dev);
- netif_device_attach(dev);
- }
-
- return 0;
-}
-
-
-static struct pcmcia_device_id wavelan_ids[] = {
- PCMCIA_DEVICE_PROD_ID12("AT&T","WaveLAN/PCMCIA", 0xe7c5affd, 0x1bc50975),
- PCMCIA_DEVICE_PROD_ID12("Digital", "RoamAbout/DS", 0x9999ab35, 0x00d05e06),
- PCMCIA_DEVICE_PROD_ID12("Lucent Technologies", "WaveLAN/PCMCIA", 0x23eb9949, 0x1bc50975),
- PCMCIA_DEVICE_PROD_ID12("NCR", "WaveLAN/PCMCIA", 0x24358cd4, 0x1bc50975),
- PCMCIA_DEVICE_NULL,
-};
-MODULE_DEVICE_TABLE(pcmcia, wavelan_ids);
-
-static struct pcmcia_driver wavelan_driver = {
- .owner = THIS_MODULE,
- .drv = {
- .name = "wavelan_cs",
- },
- .probe = wavelan_probe,
- .remove = wavelan_detach,
- .id_table = wavelan_ids,
- .suspend = wavelan_suspend,
- .resume = wavelan_resume,
-};
-
-static int __init
-init_wavelan_cs(void)
-{
- return pcmcia_register_driver(&wavelan_driver);
-}
-
-static void __exit
-exit_wavelan_cs(void)
-{
- pcmcia_unregister_driver(&wavelan_driver);
-}
-
-module_init(init_wavelan_cs);
-module_exit(exit_wavelan_cs);
diff --git a/drivers/staging/wavelan/wavelan_cs.h b/drivers/staging/wavelan/wavelan_cs.h
deleted file mode 100644
index 2e4bfe4147c..00000000000
--- a/drivers/staging/wavelan/wavelan_cs.h
+++ /dev/null
@@ -1,386 +0,0 @@
-/*
- * Wavelan Pcmcia driver
- *
- * Jean II - HPLB '96
- *
- * Reorganization and extension of the driver.
- * Original copyright follow. See wavelan_cs.h for details.
- *
- * This file contain the declarations of the Wavelan hardware. Note that
- * the Pcmcia Wavelan include a i82593 controller (see definitions in
- * file i82593.h).
- *
- * The main difference between the pcmcia hardware and the ISA one is
- * the Ethernet Controller (i82593 instead of i82586). The i82593 allow
- * only one send buffer. The PSA (Parameter Storage Area : EEprom for
- * permanent storage of various info) is memory mapped, but not the
- * MMI (Modem Management Interface).
- */
-
-/*
- * Definitions for the AT&T GIS (formerly NCR) WaveLAN PCMCIA card:
- * An Ethernet-like radio transceiver controlled by an Intel 82593
- * coprocessor.
- *
- *
- ****************************************************************************
- * Copyright 1995
- * Anthony D. Joseph
- * Massachusetts Institute of Technology
- *
- * Permission to use, copy, modify, and distribute this program
- * for any purpose and without fee is hereby granted, provided
- * that this copyright and permission notice appear on all copies
- * and supporting documentation, the name of M.I.T. not be used
- * in advertising or publicity pertaining to distribution of the
- * program without specific prior permission, and notice be given
- * in supporting documentation that copying and distribution is
- * by permission of M.I.T. M.I.T. makes no representations about
- * the suitability of this software for any purpose. It is pro-
- * vided "as is" without express or implied warranty.
- ****************************************************************************
- *
- *
- * Credits:
- * Special thanks to Jan Hoogendoorn of AT&T GIS Utrecht for
- * providing extremely useful information about WaveLAN PCMCIA hardware
- *
- * This driver is based upon several other drivers, in particular:
- * David Hinds' Linux driver for the PCMCIA 3c589 ethernet adapter
- * Bruce Janson's Linux driver for the AT-bus WaveLAN adapter
- * Anders Klemets' PCMCIA WaveLAN adapter driver
- * Robert Morris' BSDI driver for the PCMCIA WaveLAN adapter
- */
-
-#ifndef _WAVELAN_CS_H
-#define _WAVELAN_CS_H
-
-/************************** MAGIC NUMBERS ***************************/
-
-/* The detection of the wavelan card is made by reading the MAC address
- * from the card and checking it. If you have a non AT&T product (OEM,
- * like DEC RoamAbout, or Digital Ocean, Epson, ...), you must modify this
- * part to accommodate your hardware...
- */
-static const unsigned char MAC_ADDRESSES[][3] =
-{
- { 0x08, 0x00, 0x0E }, /* AT&T Wavelan (standard) & DEC RoamAbout */
- { 0x08, 0x00, 0x6A }, /* AT&T Wavelan (alternate) */
- { 0x00, 0x00, 0xE1 }, /* Hitachi Wavelan */
- { 0x00, 0x60, 0x1D } /* Lucent Wavelan (another one) */
- /* Add your card here and send me the patch ! */
-};
-
-/*
- * Constants used to convert channels to frequencies
- */
-
-/* Frequency available in the 2.0 modem, in units of 250 kHz
- * (as read in the offset register of the dac area).
- * Used to map channel numbers used by `wfreqsel' to frequencies
- */
-static const short channel_bands[] = { 0x30, 0x58, 0x64, 0x7A, 0x80, 0xA8,
- 0xD0, 0xF0, 0xF8, 0x150 };
-
-/* Frequencies of the 1.0 modem (fixed frequencies).
- * Use to map the PSA `subband' to a frequency
- * Note : all frequencies apart from the first one need to be multiplied by 10
- */
-static const int fixed_bands[] = { 915e6, 2.425e8, 2.46e8, 2.484e8, 2.4305e8 };
-
-
-/*************************** PC INTERFACE ****************************/
-
-/* WaveLAN host interface definitions */
-
-#define LCCR(base) (base) /* LAN Controller Command Register */
-#define LCSR(base) (base) /* LAN Controller Status Register */
-#define HACR(base) (base+0x1) /* Host Adapter Command Register */
-#define HASR(base) (base+0x1) /* Host Adapter Status Register */
-#define PIORL(base) (base+0x2) /* Program I/O Register Low */
-#define RPLL(base) (base+0x2) /* Receive Pointer Latched Low */
-#define PIORH(base) (base+0x3) /* Program I/O Register High */
-#define RPLH(base) (base+0x3) /* Receive Pointer Latched High */
-#define PIOP(base) (base+0x4) /* Program I/O Port */
-#define MMR(base) (base+0x6) /* MMI Address Register */
-#define MMD(base) (base+0x7) /* MMI Data Register */
-
-/* Host Adaptor Command Register bit definitions */
-
-#define HACR_LOF (1 << 3) /* Lock Out Flag, toggle every 250ms */
-#define HACR_PWR_STAT (1 << 4) /* Power State, 1=active, 0=sleep */
-#define HACR_TX_DMA_RESET (1 << 5) /* Reset transmit DMA ptr on high */
-#define HACR_RX_DMA_RESET (1 << 6) /* Reset receive DMA ptr on high */
-#define HACR_ROM_WEN (1 << 7) /* EEPROM write enabled when true */
-
-#define HACR_RESET (HACR_TX_DMA_RESET | HACR_RX_DMA_RESET)
-#define HACR_DEFAULT (HACR_PWR_STAT)
-
-/* Host Adapter Status Register bit definitions */
-
-#define HASR_MMI_BUSY (1 << 2) /* MMI is busy when true */
-#define HASR_LOF (1 << 3) /* Lock out flag status */
-#define HASR_NO_CLK (1 << 4) /* active when modem not connected */
-
-/* Miscellaneous bit definitions */
-
-#define PIORH_SEL_TX (1 << 5) /* PIOR points to 0=rx/1=tx buffer */
-#define MMR_MMI_WR (1 << 0) /* Next MMI cycle is 0=read, 1=write */
-#define PIORH_MASK 0x1f /* only low 5 bits are significant */
-#define RPLH_MASK 0x1f /* only low 5 bits are significant */
-#define MMI_ADDR_MASK 0x7e /* Bits 1-6 of MMR are significant */
-
-/* Attribute Memory map */
-
-#define CIS_ADDR 0x0000 /* Card Information Status Register */
-#define PSA_ADDR 0x0e00 /* Parameter Storage Area address */
-#define EEPROM_ADDR 0x1000 /* EEPROM address (unused ?) */
-#define COR_ADDR 0x4000 /* Configuration Option Register */
-
-/* Configuration Option Register bit definitions */
-
-#define COR_CONFIG (1 << 0) /* Config Index, 0 when unconfigured */
-#define COR_SW_RESET (1 << 7) /* Software Reset on true */
-#define COR_LEVEL_IRQ (1 << 6) /* Level IRQ */
-
-/* Local Memory map */
-
-#define RX_BASE 0x0000 /* Receive memory, 8 kB */
-#define TX_BASE 0x2000 /* Transmit memory, 2 kB */
-#define UNUSED_BASE 0x2800 /* Unused, 22 kB */
-#define RX_SIZE (TX_BASE-RX_BASE) /* Size of receive area */
-#define RX_SIZE_SHIFT 6 /* Bits to shift in stop register */
-
-#define TRUE 1
-#define FALSE 0
-
-#define MOD_ENAL 1
-#define MOD_PROM 2
-
-/* Size of a MAC address */
-#define WAVELAN_ADDR_SIZE 6
-
-/* Maximum size of Wavelan packet */
-#define WAVELAN_MTU 1500
-
-#define MAXDATAZ (6 + 6 + 2 + WAVELAN_MTU)
-
-/********************** PARAMETER STORAGE AREA **********************/
-
-/*
- * Parameter Storage Area (PSA).
- */
-typedef struct psa_t psa_t;
-struct psa_t
-{
- /* For the PCMCIA Adapter, locations 0x00-0x0F are unused and fixed at 00 */
- unsigned char psa_io_base_addr_1; /* [0x00] Base address 1 ??? */
- unsigned char psa_io_base_addr_2; /* [0x01] Base address 2 */
- unsigned char psa_io_base_addr_3; /* [0x02] Base address 3 */
- unsigned char psa_io_base_addr_4; /* [0x03] Base address 4 */
- unsigned char psa_rem_boot_addr_1; /* [0x04] Remote Boot Address 1 */
- unsigned char psa_rem_boot_addr_2; /* [0x05] Remote Boot Address 2 */
- unsigned char psa_rem_boot_addr_3; /* [0x06] Remote Boot Address 3 */
- unsigned char psa_holi_params; /* [0x07] HOst Lan Interface (HOLI) Parameters */
- unsigned char psa_int_req_no; /* [0x08] Interrupt Request Line */
- unsigned char psa_unused0[7]; /* [0x09-0x0F] unused */
-
- unsigned char psa_univ_mac_addr[WAVELAN_ADDR_SIZE]; /* [0x10-0x15] Universal (factory) MAC Address */
- unsigned char psa_local_mac_addr[WAVELAN_ADDR_SIZE]; /* [0x16-1B] Local MAC Address */
- unsigned char psa_univ_local_sel; /* [0x1C] Universal Local Selection */
-#define PSA_UNIVERSAL 0 /* Universal (factory) */
-#define PSA_LOCAL 1 /* Local */
- unsigned char psa_comp_number; /* [0x1D] Compatability Number: */
-#define PSA_COMP_PC_AT_915 0 /* PC-AT 915 MHz */
-#define PSA_COMP_PC_MC_915 1 /* PC-MC 915 MHz */
-#define PSA_COMP_PC_AT_2400 2 /* PC-AT 2.4 GHz */
-#define PSA_COMP_PC_MC_2400 3 /* PC-MC 2.4 GHz */
-#define PSA_COMP_PCMCIA_915 4 /* PCMCIA 915 MHz or 2.0 */
- unsigned char psa_thr_pre_set; /* [0x1E] Modem Threshold Preset */
- unsigned char psa_feature_select; /* [0x1F] Call code required (1=on) */
-#define PSA_FEATURE_CALL_CODE 0x01 /* Call code required (Japan) */
- unsigned char psa_subband; /* [0x20] Subband */
-#define PSA_SUBBAND_915 0 /* 915 MHz or 2.0 */
-#define PSA_SUBBAND_2425 1 /* 2425 MHz */
-#define PSA_SUBBAND_2460 2 /* 2460 MHz */
-#define PSA_SUBBAND_2484 3 /* 2484 MHz */
-#define PSA_SUBBAND_2430_5 4 /* 2430.5 MHz */
- unsigned char psa_quality_thr; /* [0x21] Modem Quality Threshold */
- unsigned char psa_mod_delay; /* [0x22] Modem Delay ??? (reserved) */
- unsigned char psa_nwid[2]; /* [0x23-0x24] Network ID */
- unsigned char psa_nwid_select; /* [0x25] Network ID Select On Off */
- unsigned char psa_encryption_select; /* [0x26] Encryption On Off */
- unsigned char psa_encryption_key[8]; /* [0x27-0x2E] Encryption Key */
- unsigned char psa_databus_width; /* [0x2F] AT bus width select 8/16 */
- unsigned char psa_call_code[8]; /* [0x30-0x37] (Japan) Call Code */
- unsigned char psa_nwid_prefix[2]; /* [0x38-0x39] Roaming domain */
- unsigned char psa_reserved[2]; /* [0x3A-0x3B] Reserved - fixed 00 */
- unsigned char psa_conf_status; /* [0x3C] Conf Status, bit 0=1:config*/
- unsigned char psa_crc[2]; /* [0x3D] CRC-16 over PSA */
- unsigned char psa_crc_status; /* [0x3F] CRC Valid Flag */
-};
-
-/* Size for structure checking (if padding is correct) */
-#define PSA_SIZE 64
-
-/* Calculate offset of a field in the above structure
- * Warning : only even addresses are used */
-#define psaoff(p,f) ((unsigned short) ((void *)(&((psa_t *) ((void *) NULL + (p)))->f) - (void *) NULL))
-
-/******************** MODEM MANAGEMENT INTERFACE ********************/
-
-/*
- * Modem Management Controller (MMC) write structure.
- */
-typedef struct mmw_t mmw_t;
-struct mmw_t
-{
- unsigned char mmw_encr_key[8]; /* encryption key */
- unsigned char mmw_encr_enable; /* enable/disable encryption */
-#define MMW_ENCR_ENABLE_MODE 0x02 /* Mode of security option */
-#define MMW_ENCR_ENABLE_EN 0x01 /* Enable security option */
- unsigned char mmw_unused0[1]; /* unused */
- unsigned char mmw_des_io_invert; /* Encryption option */
-#define MMW_DES_IO_INVERT_RES 0x0F /* Reserved */
-#define MMW_DES_IO_INVERT_CTRL 0xF0 /* Control ??? (set to 0) */
- unsigned char mmw_unused1[5]; /* unused */
- unsigned char mmw_loopt_sel; /* looptest selection */
-#define MMW_LOOPT_SEL_DIS_NWID 0x40 /* disable NWID filtering */
-#define MMW_LOOPT_SEL_INT 0x20 /* activate Attention Request */
-#define MMW_LOOPT_SEL_LS 0x10 /* looptest w/o collision avoidance */
-#define MMW_LOOPT_SEL_LT3A 0x08 /* looptest 3a */
-#define MMW_LOOPT_SEL_LT3B 0x04 /* looptest 3b */
-#define MMW_LOOPT_SEL_LT3C 0x02 /* looptest 3c */
-#define MMW_LOOPT_SEL_LT3D 0x01 /* looptest 3d */
- unsigned char mmw_jabber_enable; /* jabber timer enable */
- /* Abort transmissions > 200 ms */
- unsigned char mmw_freeze; /* freeze / unfreeeze signal level */
- /* 0 : signal level & qual updated for every new message, 1 : frozen */
- unsigned char mmw_anten_sel; /* antenna selection */
-#define MMW_ANTEN_SEL_SEL 0x01 /* direct antenna selection */
-#define MMW_ANTEN_SEL_ALG_EN 0x02 /* antenna selection algo. enable */
- unsigned char mmw_ifs; /* inter frame spacing */
- /* min time between transmission in bit periods (.5 us) - bit 0 ignored */
- unsigned char mmw_mod_delay; /* modem delay (synchro) */
- unsigned char mmw_jam_time; /* jamming time (after collision) */
- unsigned char mmw_unused2[1]; /* unused */
- unsigned char mmw_thr_pre_set; /* level threshold preset */
- /* Discard all packet with signal < this value (4) */
- unsigned char mmw_decay_prm; /* decay parameters */
- unsigned char mmw_decay_updat_prm; /* decay update parameterz */
- unsigned char mmw_quality_thr; /* quality (z-quotient) threshold */
- /* Discard all packet with quality < this value (3) */
- unsigned char mmw_netw_id_l; /* NWID low order byte */
- unsigned char mmw_netw_id_h; /* NWID high order byte */
- /* Network ID or Domain : create virtual net on the air */
-
- /* 2.0 Hardware extension - frequency selection support */
- unsigned char mmw_mode_select; /* for analog tests (set to 0) */
- unsigned char mmw_unused3[1]; /* unused */
- unsigned char mmw_fee_ctrl; /* frequency eeprom control */
-#define MMW_FEE_CTRL_PRE 0x10 /* Enable protected instructions */
-#define MMW_FEE_CTRL_DWLD 0x08 /* Download eeprom to mmc */
-#define MMW_FEE_CTRL_CMD 0x07 /* EEprom commands : */
-#define MMW_FEE_CTRL_READ 0x06 /* Read */
-#define MMW_FEE_CTRL_WREN 0x04 /* Write enable */
-#define MMW_FEE_CTRL_WRITE 0x05 /* Write data to address */
-#define MMW_FEE_CTRL_WRALL 0x04 /* Write data to all addresses */
-#define MMW_FEE_CTRL_WDS 0x04 /* Write disable */
-#define MMW_FEE_CTRL_PRREAD 0x16 /* Read addr from protect register */
-#define MMW_FEE_CTRL_PREN 0x14 /* Protect register enable */
-#define MMW_FEE_CTRL_PRCLEAR 0x17 /* Unprotect all registers */
-#define MMW_FEE_CTRL_PRWRITE 0x15 /* Write addr in protect register */
-#define MMW_FEE_CTRL_PRDS 0x14 /* Protect register disable */
- /* Never issue this command (PRDS) : it's irreversible !!! */
-
- unsigned char mmw_fee_addr; /* EEprom address */
-#define MMW_FEE_ADDR_CHANNEL 0xF0 /* Select the channel */
-#define MMW_FEE_ADDR_OFFSET 0x0F /* Offset in channel data */
-#define MMW_FEE_ADDR_EN 0xC0 /* FEE_CTRL enable operations */
-#define MMW_FEE_ADDR_DS 0x00 /* FEE_CTRL disable operations */
-#define MMW_FEE_ADDR_ALL 0x40 /* FEE_CTRL all operations */
-#define MMW_FEE_ADDR_CLEAR 0xFF /* FEE_CTRL clear operations */
-
- unsigned char mmw_fee_data_l; /* Write data to EEprom */
- unsigned char mmw_fee_data_h; /* high octet */
- unsigned char mmw_ext_ant; /* Setting for external antenna */
-#define MMW_EXT_ANT_EXTANT 0x01 /* Select external antenna */
-#define MMW_EXT_ANT_POL 0x02 /* Polarity of the antenna */
-#define MMW_EXT_ANT_INTERNAL 0x00 /* Internal antenna */
-#define MMW_EXT_ANT_EXTERNAL 0x03 /* External antenna */
-#define MMW_EXT_ANT_IQ_TEST 0x1C /* IQ test pattern (set to 0) */
-} __attribute__((packed));
-
-/* Size for structure checking (if padding is correct) */
-#define MMW_SIZE 37
-
-/* Calculate offset of a field in the above structure */
-#define mmwoff(p,f) (unsigned short)((void *)(&((mmw_t *)((void *)0 + (p)))->f) - (void *)0)
-
-
-/*
- * Modem Management Controller (MMC) read structure.
- */
-typedef struct mmr_t mmr_t;
-struct mmr_t
-{
- unsigned char mmr_unused0[8]; /* unused */
- unsigned char mmr_des_status; /* encryption status */
- unsigned char mmr_des_avail; /* encryption available (0x55 read) */
-#define MMR_DES_AVAIL_DES 0x55 /* DES available */
-#define MMR_DES_AVAIL_AES 0x33 /* AES (AT&T) available */
- unsigned char mmr_des_io_invert; /* des I/O invert register */
- unsigned char mmr_unused1[5]; /* unused */
- unsigned char mmr_dce_status; /* DCE status */
-#define MMR_DCE_STATUS_RX_BUSY 0x01 /* receiver busy */
-#define MMR_DCE_STATUS_LOOPT_IND 0x02 /* loop test indicated */
-#define MMR_DCE_STATUS_TX_BUSY 0x04 /* transmitter on */
-#define MMR_DCE_STATUS_JBR_EXPIRED 0x08 /* jabber timer expired */
-#define MMR_DCE_STATUS 0x0F /* mask to get the bits */
- unsigned char mmr_dsp_id; /* DSP id (AA = Daedalus rev A) */
- unsigned char mmr_unused2[2]; /* unused */
- unsigned char mmr_correct_nwid_l; /* # of correct NWID's rxd (low) */
- unsigned char mmr_correct_nwid_h; /* # of correct NWID's rxd (high) */
- /* Warning : Read high order octet first !!! */
- unsigned char mmr_wrong_nwid_l; /* # of wrong NWID's rxd (low) */
- unsigned char mmr_wrong_nwid_h; /* # of wrong NWID's rxd (high) */
- unsigned char mmr_thr_pre_set; /* level threshold preset */
-#define MMR_THR_PRE_SET 0x3F /* level threshold preset */
-#define MMR_THR_PRE_SET_CUR 0x80 /* Current signal above it */
- unsigned char mmr_signal_lvl; /* signal level */
-#define MMR_SIGNAL_LVL 0x3F /* signal level */
-#define MMR_SIGNAL_LVL_VALID 0x80 /* Updated since last read */
- unsigned char mmr_silence_lvl; /* silence level (noise) */
-#define MMR_SILENCE_LVL 0x3F /* silence level */
-#define MMR_SILENCE_LVL_VALID 0x80 /* Updated since last read */
- unsigned char mmr_sgnl_qual; /* signal quality */
-#define MMR_SGNL_QUAL 0x0F /* signal quality */
-#define MMR_SGNL_QUAL_ANT 0x80 /* current antenna used */
- unsigned char mmr_netw_id_l; /* NWID low order byte ??? */
- unsigned char mmr_unused3[3]; /* unused */
-
- /* 2.0 Hardware extension - frequency selection support */
- unsigned char mmr_fee_status; /* Status of frequency eeprom */
-#define MMR_FEE_STATUS_ID 0xF0 /* Modem revision id */
-#define MMR_FEE_STATUS_DWLD 0x08 /* Download in progress */
-#define MMR_FEE_STATUS_BUSY 0x04 /* EEprom busy */
- unsigned char mmr_unused4[1]; /* unused */
- unsigned char mmr_fee_data_l; /* Read data from eeprom (low) */
- unsigned char mmr_fee_data_h; /* Read data from eeprom (high) */
-};
-
-/* Size for structure checking (if padding is correct) */
-#define MMR_SIZE 36
-
-/* Calculate offset of a field in the above structure */
-#define mmroff(p,f) (unsigned short)((void *)(&((mmr_t *)((void *)0 + (p)))->f) - (void *)0)
-
-
-/* Make the two above structures one */
-typedef union mm_t
-{
- struct mmw_t w; /* Write to the mmc */
- struct mmr_t r; /* Read from the mmc */
-} mm_t;
-
-#endif /* _WAVELAN_CS_H */
diff --git a/drivers/staging/wavelan/wavelan_cs.p.h b/drivers/staging/wavelan/wavelan_cs.p.h
deleted file mode 100644
index 8fbfaa8a5a6..00000000000
--- a/drivers/staging/wavelan/wavelan_cs.p.h
+++ /dev/null
@@ -1,766 +0,0 @@
-/*
- * Wavelan Pcmcia driver
- *
- * Jean II - HPLB '96
- *
- * Reorganisation and extension of the driver.
- *
- * This file contain all definition and declarations necessary for the
- * wavelan pcmcia driver. This file is a private header, so it should
- * be included only on wavelan_cs.c !!!
- */
-
-#ifndef WAVELAN_CS_P_H
-#define WAVELAN_CS_P_H
-
-/************************** DOCUMENTATION **************************/
-/*
- * This driver provide a Linux interface to the Wavelan Pcmcia hardware
- * The Wavelan is a product of Lucent (http://www.wavelan.com/).
- * This division was formerly part of NCR and then AT&T.
- * Wavelan are also distributed by DEC (RoamAbout DS)...
- *
- * To know how to use this driver, read the PCMCIA HOWTO.
- * If you want to exploit the many other fonctionalities, look comments
- * in the code...
- *
- * This driver is the result of the effort of many peoples (see below).
- */
-
-/* ------------------------ SPECIFIC NOTES ------------------------ */
-/*
- * Web page
- * --------
- * I try to maintain a web page with the Wireless LAN Howto at :
- * http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Wavelan.html
- *
- * SMP
- * ---
- * We now are SMP compliant (I eventually fixed the remaining bugs).
- * The driver has been tested on a dual P6-150 and survived my usual
- * set of torture tests.
- * Anyway, I spent enough time chasing interrupt re-entrancy during
- * errors or reconfigure, and I designed the locked/unlocked sections
- * of the driver with great care, and with the recent addition of
- * the spinlock (thanks to the new API), we should be quite close to
- * the truth.
- * The SMP/IRQ locking is quite coarse and conservative (i.e. not fast),
- * but better safe than sorry (especially at 2 Mb/s ;-).
- *
- * I have also looked into disabling only our interrupt on the card
- * (via HACR) instead of all interrupts in the processor (via cli),
- * so that other driver are not impacted, and it look like it's
- * possible, but it's very tricky to do right (full of races). As
- * the gain would be mostly for SMP systems, it can wait...
- *
- * Debugging and options
- * ---------------------
- * You will find below a set of '#define" allowing a very fine control
- * on the driver behaviour and the debug messages printed.
- * The main options are :
- * o WAVELAN_ROAMING, for the experimental roaming support.
- * o SET_PSA_CRC, to have your card correctly recognised by
- * an access point and the Point-to-Point diagnostic tool.
- * o USE_PSA_CONFIG, to read configuration from the PSA (EEprom)
- * (otherwise we always start afresh with some defaults)
- *
- * wavelan_cs.o is darn too big
- * -------------------------
- * That's true ! There is a very simple way to reduce the driver
- * object by 33% (yes !). Comment out the following line :
- * #include <linux/wireless.h>
- * Other compile options can also reduce the size of it...
- *
- * MAC address and hardware detection :
- * ----------------------------------
- * The detection code of the wavelan chech that the first 3
- * octets of the MAC address fit the company code. This type of
- * detection work well for AT&T cards (because the AT&T code is
- * hardcoded in wavelan_cs.h), but of course will fail for other
- * manufacturer.
- *
- * If you are sure that your card is derived from the wavelan,
- * here is the way to configure it :
- * 1) Get your MAC address
- * a) With your card utilities (wfreqsel, instconf, ...)
- * b) With the driver :
- * o compile the kernel with DEBUG_CONFIG_INFO enabled
- * o Boot and look the card messages
- * 2) Set your MAC code (3 octets) in MAC_ADDRESSES[][3] (wavelan_cs.h)
- * 3) Compile & verify
- * 4) Send me the MAC code - I will include it in the next version...
- *
- */
-
-/* --------------------- WIRELESS EXTENSIONS --------------------- */
-/*
- * This driver is the first one to support "wireless extensions".
- * This set of extensions provide you some way to control the wireless
- * caracteristics of the hardware in a standard way and support for
- * applications for taking advantage of it (like Mobile IP).
- *
- * It might be a good idea as well to fetch the wireless tools to
- * configure the device and play a bit.
- */
-
-/* ---------------------------- FILES ---------------------------- */
-/*
- * wavelan_cs.c : The actual code for the driver - C functions
- *
- * wavelan_cs.p.h : Private header : local types / vars for the driver
- *
- * wavelan_cs.h : Description of the hardware interface & structs
- *
- * i82593.h : Description if the Ethernet controller
- */
-
-/* --------------------------- HISTORY --------------------------- */
-/*
- * The history of the Wavelan drivers is as complicated as history of
- * the Wavelan itself (NCR -> AT&T -> Lucent).
- *
- * All started with Anders Klemets <klemets@paul.rutgers.edu>,
- * writing a Wavelan ISA driver for the MACH microkernel. Girish
- * Welling <welling@paul.rutgers.edu> had also worked on it.
- * Keith Moore modify this for the Pcmcia hardware.
- *
- * Robert Morris <rtm@das.harvard.edu> port these two drivers to BSDI
- * and add specific Pcmcia support (there is currently no equivalent
- * of the PCMCIA package under BSD...).
- *
- * Jim Binkley <jrb@cs.pdx.edu> port both BSDI drivers to FreeBSD.
- *
- * Bruce Janson <bruce@cs.usyd.edu.au> port the BSDI ISA driver to Linux.
- *
- * Anthony D. Joseph <adj@lcs.mit.edu> started modify Bruce driver
- * (with help of the BSDI PCMCIA driver) for PCMCIA.
- * Yunzhou Li <yunzhou@strat.iol.unh.edu> finished is work.
- * Joe Finney <joe@comp.lancs.ac.uk> patched the driver to start
- * correctly 2.00 cards (2.4 GHz with frequency selection).
- * David Hinds <dahinds@users.sourceforge.net> integrated the whole in his
- * Pcmcia package (+ bug corrections).
- *
- * I (Jean Tourrilhes - jt@hplb.hpl.hp.com) then started to make some
- * patchs to the Pcmcia driver. After, I added code in the ISA driver
- * for Wireless Extensions and full support of frequency selection
- * cards. Now, I'm doing the same to the Pcmcia driver + some
- * reorganisation.
- * Loeke Brederveld <lbrederv@wavelan.com> from Lucent has given me
- * much needed informations on the Wavelan hardware.
- */
-
-/* By the way : for the copyright & legal stuff :
- * Almost everybody wrote code under GNU or BSD license (or alike),
- * and want that their original copyright remain somewhere in the
- * code (for myself, I go with the GPL).
- * Nobody want to take responsibility for anything, except the fame...
- */
-
-/* --------------------------- CREDITS --------------------------- */
-/*
- * Credits:
- * Special thanks to Jan Hoogendoorn of AT&T GIS Utrecht and
- * Loeke Brederveld of Lucent for providing extremely useful
- * information about WaveLAN PCMCIA hardware
- *
- * This driver is based upon several other drivers, in particular:
- * David Hinds' Linux driver for the PCMCIA 3c589 ethernet adapter
- * Bruce Janson's Linux driver for the AT-bus WaveLAN adapter
- * Anders Klemets' PCMCIA WaveLAN adapter driver
- * Robert Morris' BSDI driver for the PCMCIA WaveLAN adapter
- *
- * Additional Credits:
- *
- * This software was originally developed under Linux 1.2.3
- * (Slackware 2.0 distribution).
- * And then under Linux 2.0.x (Debian 1.1 -> 2.2 - pcmcia 2.8.18+)
- * with an HP OmniBook 4000 and then a 5500.
- *
- * It is based on other device drivers and information either written
- * or supplied by:
- * James Ashton (jaa101@syseng.anu.edu.au),
- * Ajay Bakre (bakre@paul.rutgers.edu),
- * Donald Becker (becker@super.org),
- * Jim Binkley <jrb@cs.pdx.edu>,
- * Loeke Brederveld <lbrederv@wavelan.com>,
- * Allan Creighton (allanc@cs.su.oz.au),
- * Brent Elphick <belphick@uwaterloo.ca>,
- * Joe Finney <joe@comp.lancs.ac.uk>,
- * Matthew Geier (matthew@cs.su.oz.au),
- * Remo di Giovanni (remo@cs.su.oz.au),
- * Mark Hagan (mhagan@wtcpost.daytonoh.NCR.COM),
- * David Hinds <dahinds@users.sourceforge.net>,
- * Jan Hoogendoorn (c/o marteijn@lucent.com),
- * Bruce Janson <bruce@cs.usyd.edu.au>,
- * Anthony D. Joseph <adj@lcs.mit.edu>,
- * Anders Klemets (klemets@paul.rutgers.edu),
- * Yunzhou Li <yunzhou@strat.iol.unh.edu>,
- * Marc Meertens (mmeertens@lucent.com),
- * Keith Moore,
- * Robert Morris (rtm@das.harvard.edu),
- * Ian Parkin (ian@cs.su.oz.au),
- * John Rosenberg (johnr@cs.su.oz.au),
- * George Rossi (george@phm.gov.au),
- * Arthur Scott (arthur@cs.su.oz.au),
- * Stanislav Sinyagin <stas@isf.ru>
- * Peter Storey,
- * Jean Tourrilhes <jt@hpl.hp.com>,
- * Girish Welling (welling@paul.rutgers.edu)
- * Clark Woodworth <clark@hiway1.exit109.com>
- * Yongguang Zhang <ygz@isl.hrl.hac.com>...
- */
-
-/* ------------------------- IMPROVEMENTS ------------------------- */
-/*
- * I proudly present :
- *
- * Changes made in 2.8.22 :
- * ----------------------
- * - improved wv_set_multicast_list
- * - catch spurious interrupt
- * - correct release of the device
- *
- * Changes mades in release :
- * ------------------------
- * - Reorganisation of the code, function name change
- * - Creation of private header (wavelan_cs.h)
- * - Reorganised debug messages
- * - More comments, history, ...
- * - Configure earlier (in "insert" instead of "open")
- * and do things only once
- * - mmc_init : configure the PSA if not done
- * - mmc_init : 2.00 detection better code for 2.00 init
- * - better info at startup
- * - Correct a HUGE bug (volatile & uncalibrated busy loop)
- * in wv_82593_cmd => config speedup
- * - Stop receiving & power down on close (and power up on open)
- * use "ifconfig down" & "ifconfig up ; route add -net ..."
- * - Send packets : add watchdog instead of pooling
- * - Receive : check frame wrap around & try to recover some frames
- * - wavelan_set_multicast_list : avoid reset
- * - add wireless extensions (ioctl & get_wireless_stats)
- * get/set nwid/frequency on fly, info for /proc/net/wireless
- * - Suppress useless stuff from lp (net_local), but add link
- * - More inlines
- * - Lot of others minor details & cleanups
- *
- * Changes made in second release :
- * ------------------------------
- * - Optimise wv_85893_reconfig stuff, fix potential problems
- * - Change error values for ioctl
- * - Non blocking wv_ru_stop() + call wv_reset() in case of problems
- * - Remove development printk from wavelan_watchdog()
- * - Remove of the watchdog to wavelan_close instead of wavelan_release
- * fix potential problems...
- * - Start debugging suspend stuff (but it's still a bit weird)
- * - Debug & optimize dump header/packet in Rx & Tx (debug)
- * - Use "readb" and "writeb" to be kernel 2.1 compliant
- * - Better handling of bogus interrupts
- * - Wireless extension : SETSPY and GETSPY
- * - Remove old stuff (stats - for those needing it, just ask me...)
- * - Make wireless extensions optional
- *
- * Changes made in third release :
- * -----------------------------
- * - cleanups & typos
- * - modif wireless ext (spy -> only one pointer)
- * - new private ioctl to set/get quality & level threshold
- * - Init : correct default value of level threshold for pcmcia
- * - kill watchdog in hw_reset
- * - more 2.1 support (copy_to/from_user instead of memcpy_to/fromfs)
- * - Add message level (debug stuff in /var/adm/debug & errors not
- * displayed at console and still in /var/adm/messages)
- *
- * Changes made in fourth release :
- * ------------------------------
- * - multicast support (yes !) thanks to Yongguang Zhang.
- *
- * Changes made in fifth release (2.9.0) :
- * -------------------------------------
- * - Revisited multicast code (it was mostly wrong).
- * - protect code in wv_82593_reconfig with dev->tbusy (oups !)
- *
- * Changes made in sixth release (2.9.1a) :
- * --------------------------------------
- * - Change the detection code for multi manufacturer code support
- * - Correct bug (hang kernel) in init when we were "rejecting" a card
- *
- * Changes made in seventh release (2.9.1b) :
- * ----------------------------------------
- * - Update to wireless extensions changes
- * - Silly bug in card initial configuration (psa_conf_status)
- *
- * Changes made in eigth release :
- * -----------------------------
- * - Small bug in debug code (probably not the last one...)
- * - 1.2.13 support (thanks to Clark Woodworth)
- *
- * Changes made for release in 2.9.2b :
- * ----------------------------------
- * - Level threshold is now a standard wireless extension (version 4 !)
- * - modules parameters types for kernel > 2.1.17
- * - updated man page
- * - Others cleanup from David Hinds
- *
- * Changes made for release in 2.9.5 :
- * ---------------------------------
- * - byte count stats (courtesy of David Hinds)
- * - Remove dev_tint stuff (courtesy of David Hinds)
- * - Others cleanup from David Hinds
- * - Encryption setting from Brent Elphick (thanks a lot !)
- * - 'base' to 'u_long' for the Alpha (thanks to Stanislav Sinyagin)
- *
- * Changes made for release in 2.9.6 :
- * ---------------------------------
- * - fix bug : no longuer disable watchdog in case of bogus interrupt
- * - increase timeout in config code for picky hardware
- * - mask unused bits in status (Wireless Extensions)
- *
- * Changes integrated by Justin Seger <jseger@MIT.EDU> & David Hinds :
- * -----------------------------------------------------------------
- * - Roaming "hack" from Joe Finney <joe@comp.lancs.ac.uk>
- * - PSA CRC code from Bob Gray <rgray@bald.cs.dartmouth.edu>
- * - Better initialisation of the i82593 controller
- * from Joseph K. O'Sullivan <josullvn+@cs.cmu.edu>
- *
- * Changes made for release in 3.0.10 :
- * ----------------------------------
- * - Fix eject "hang" of the driver under 2.2.X :
- * o create wv_flush_stale_links()
- * o Rename wavelan_release to wv_pcmcia_release & move up
- * o move unregister_netdev to wavelan_detach()
- * o wavelan_release() no longer call wavelan_detach()
- * o Suppress "release" timer
- * o Other cleanups & fixes
- * - New MAC address in the probe
- * - Reorg PSA_CRC code (endian neutral & cleaner)
- * - Correct initialisation of the i82593 from Lucent manual
- * - Put back the watchdog, with larger timeout
- * - TRANSMIT_NO_CRC is a "normal" error, so recover from it
- * from Derrick J Brashear <shadow@dementia.org>
- * - Better handling of TX and RX normal failure conditions
- * - #ifdef out all the roaming code
- * - Add ESSID & "AP current address" ioctl stubs
- * - General cleanup of the code
- *
- * Changes made for release in 3.0.13 :
- * ----------------------------------
- * - Re-enable compilation of roaming code by default, but with
- * do_roaming = 0
- * - Nuke `nwid=nwid^ntohs(beacon->domain_id)' in wl_roam_gather
- * at the demand of John Carol Langford <jcl@gs176.sp.cs.cmu.edu>
- * - Introduced WAVELAN_ROAMING_EXT for incomplete ESSID stuff.
- *
- * Changes made for release in 3.0.15 :
- * ----------------------------------
- * - Change e-mail and web page addresses
- * - Watchdog timer is now correctly expressed in HZ, not in jiffies
- * - Add channel number to the list of frequencies in range
- * - Add the (short) list of bit-rates in range
- * - Developp a new sensitivity... (sens.value & sens.fixed)
- *
- * Changes made for release in 3.1.2 :
- * ---------------------------------
- * - Fix check for root permission (break instead of exit)
- * - New nwid & encoding setting (Wireless Extension 9)
- *
- * Changes made for release in 3.1.12 :
- * ----------------------------------
- * - reworked wv_82593_cmd to avoid using the IRQ handler and doing
- * ugly things with interrupts.
- * - Add IRQ protection in 82593_config/ru_start/ru_stop/watchdog
- * - Update to new network API (softnet - 2.3.43) :
- * o replace dev->tbusy (David + me)
- * o replace dev->tstart (David + me)
- * o remove dev->interrupt (David)
- * o add SMP locking via spinlock in splxx (me)
- * o add spinlock in interrupt handler (me)
- * o use kernel watchdog instead of ours (me)
- * o verify that all the changes make sense and work (me)
- * - Re-sync kernel/pcmcia versions (not much actually)
- * - A few other cleanups (David & me)...
- *
- * Changes made for release in 3.1.22 :
- * ----------------------------------
- * - Check that SMP works, remove annoying log message
- *
- * Changes made for release in 3.1.24 :
- * ----------------------------------
- * - Fix unfrequent card lockup when watchdog was reseting the hardware :
- * o control first busy loop in wv_82593_cmd()
- * o Extend spinlock protection in wv_hw_config()
- *
- * Changes made for release in 3.1.33 :
- * ----------------------------------
- * - Optional use new driver API for Wireless Extensions :
- * o got rid of wavelan_ioctl()
- * o use a bunch of iw_handler instead
- *
- * Changes made for release in 3.2.1 :
- * ---------------------------------
- * - Set dev->trans_start to avoid filling the logs
- * (and generating useless abort commands)
- * - Avoid deadlocks in mmc_out()/mmc_in()
- *
- * Wishes & dreams:
- * ----------------
- * - Cleanup and integrate the roaming code
- * (std debug, set DomainID, decay avg and co...)
- */
-
-/***************************** INCLUDES *****************************/
-
-/* Linux headers that we need */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/in.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/system.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/ioport.h>
-#include <linux/fcntl.h>
-#include <linux/ethtool.h>
-#include <linux/wireless.h> /* Wireless extensions */
-#include <net/iw_handler.h> /* New driver API */
-
-/* Pcmcia headers that we need */
-#include <pcmcia/cs_types.h>
-#include <pcmcia/cs.h>
-#include <pcmcia/cistpl.h>
-#include <pcmcia/cisreg.h>
-#include <pcmcia/ds.h>
-
-/* Wavelan declarations */
-#include <linux/i82593.h> /* Definitions for the Intel chip */
-
-#include "wavelan_cs.h" /* Others bits of the hardware */
-
-/************************** DRIVER OPTIONS **************************/
-/*
- * `#define' or `#undef' the following constant to change the behaviour
- * of the driver...
- */
-#define WAVELAN_ROAMING /* Include experimental roaming code */
-#undef WAVELAN_ROAMING_EXT /* Enable roaming wireless extensions */
-#undef SET_PSA_CRC /* Set the CRC in PSA (slower) */
-#define USE_PSA_CONFIG /* Use info from the PSA */
-#undef EEPROM_IS_PROTECTED /* Doesn't seem to be necessary */
-#define MULTICAST_AVOID /* Avoid extra multicast (I'm sceptical) */
-#undef SET_MAC_ADDRESS /* Experimental */
-
-/* Warning : these stuff will slow down the driver... */
-#define WIRELESS_SPY /* Enable spying addresses */
-#undef HISTOGRAM /* Enable histogram of sig level... */
-
-/****************************** DEBUG ******************************/
-
-#undef DEBUG_MODULE_TRACE /* Module insertion/removal */
-#undef DEBUG_CALLBACK_TRACE /* Calls made by Linux */
-#undef DEBUG_INTERRUPT_TRACE /* Calls to handler */
-#undef DEBUG_INTERRUPT_INFO /* type of interrupt & so on */
-#define DEBUG_INTERRUPT_ERROR /* problems */
-#undef DEBUG_CONFIG_TRACE /* Trace the config functions */
-#undef DEBUG_CONFIG_INFO /* What's going on... */
-#define DEBUG_CONFIG_ERRORS /* Errors on configuration */
-#undef DEBUG_TX_TRACE /* Transmission calls */
-#undef DEBUG_TX_INFO /* Header of the transmitted packet */
-#undef DEBUG_TX_FAIL /* Normal failure conditions */
-#define DEBUG_TX_ERROR /* Unexpected conditions */
-#undef DEBUG_RX_TRACE /* Transmission calls */
-#undef DEBUG_RX_INFO /* Header of the transmitted packet */
-#undef DEBUG_RX_FAIL /* Normal failure conditions */
-#define DEBUG_RX_ERROR /* Unexpected conditions */
-#undef DEBUG_PACKET_DUMP /* Dump packet on the screen */
-#undef DEBUG_IOCTL_TRACE /* Misc call by Linux */
-#undef DEBUG_IOCTL_INFO /* Various debug info */
-#define DEBUG_IOCTL_ERROR /* What's going wrong */
-#define DEBUG_BASIC_SHOW /* Show basic startup info */
-#undef DEBUG_VERSION_SHOW /* Print version info */
-#undef DEBUG_PSA_SHOW /* Dump psa to screen */
-#undef DEBUG_MMC_SHOW /* Dump mmc to screen */
-#undef DEBUG_SHOW_UNUSED /* Show also unused fields */
-#undef DEBUG_I82593_SHOW /* Show i82593 status */
-#undef DEBUG_DEVICE_SHOW /* Show device parameters */
-
-/************************ CONSTANTS & MACROS ************************/
-
-#ifdef DEBUG_VERSION_SHOW
-static const char *version = "wavelan_cs.c : v24 (SMP + wireless extensions) 11/1/02\n";
-#endif
-
-/* Watchdog temporisation */
-#define WATCHDOG_JIFFIES (256*HZ/100)
-
-/* Fix a bug in some old wireless extension definitions */
-#ifndef IW_ESSID_MAX_SIZE
-#define IW_ESSID_MAX_SIZE 32
-#endif
-
-/* ------------------------ PRIVATE IOCTL ------------------------ */
-
-#define SIOCSIPQTHR SIOCIWFIRSTPRIV /* Set quality threshold */
-#define SIOCGIPQTHR SIOCIWFIRSTPRIV + 1 /* Get quality threshold */
-#define SIOCSIPROAM SIOCIWFIRSTPRIV + 2 /* Set roaming state */
-#define SIOCGIPROAM SIOCIWFIRSTPRIV + 3 /* Get roaming state */
-
-#define SIOCSIPHISTO SIOCIWFIRSTPRIV + 4 /* Set histogram ranges */
-#define SIOCGIPHISTO SIOCIWFIRSTPRIV + 5 /* Get histogram values */
-
-/*************************** WaveLAN Roaming **************************/
-#ifdef WAVELAN_ROAMING /* Conditional compile, see above in options */
-
-#define WAVELAN_ROAMING_DEBUG 0 /* 1 = Trace of handover decisions */
- /* 2 = Info on each beacon rcvd... */
-#define MAX_WAVEPOINTS 7 /* Max visible at one time */
-#define WAVEPOINT_HISTORY 5 /* SNR sample history slow search */
-#define WAVEPOINT_FAST_HISTORY 2 /* SNR sample history fast search */
-#define SEARCH_THRESH_LOW 10 /* SNR to enter cell search */
-#define SEARCH_THRESH_HIGH 13 /* SNR to leave cell search */
-#define WAVELAN_ROAMING_DELTA 1 /* Hysteresis value (+/- SNR) */
-#define CELL_TIMEOUT 2*HZ /* in jiffies */
-
-#define FAST_CELL_SEARCH 1 /* Boolean values... */
-#define NWID_PROMISC 1 /* for code clarity. */
-
-typedef struct wavepoint_beacon
-{
- unsigned char dsap, /* Unused */
- ssap, /* Unused */
- ctrl, /* Unused */
- O,U,I, /* Unused */
- spec_id1, /* Unused */
- spec_id2, /* Unused */
- pdu_type, /* Unused */
- seq; /* WavePoint beacon sequence number */
- __be16 domain_id, /* WavePoint Domain ID */
- nwid; /* WavePoint NWID */
-} wavepoint_beacon;
-
-typedef struct wavepoint_history
-{
- unsigned short nwid; /* WavePoint's NWID */
- int average_slow; /* SNR running average */
- int average_fast; /* SNR running average */
- unsigned char sigqual[WAVEPOINT_HISTORY]; /* Ringbuffer of recent SNR's */
- unsigned char qualptr; /* Index into ringbuffer */
- unsigned char last_seq; /* Last seq. no seen for WavePoint */
- struct wavepoint_history *next; /* Next WavePoint in table */
- struct wavepoint_history *prev; /* Previous WavePoint in table */
- unsigned long last_seen; /* Time of last beacon recvd, jiffies */
-} wavepoint_history;
-
-struct wavepoint_table
-{
- wavepoint_history *head; /* Start of ringbuffer */
- int num_wavepoints; /* No. of WavePoints visible */
- unsigned char locked; /* Table lock */
-};
-
-#endif /* WAVELAN_ROAMING */
-
-/****************************** TYPES ******************************/
-
-/* Shortcuts */
-typedef struct iw_statistics iw_stats;
-typedef struct iw_quality iw_qual;
-typedef struct iw_freq iw_freq;
-typedef struct net_local net_local;
-typedef struct timer_list timer_list;
-
-/* Basic types */
-typedef u_char mac_addr[WAVELAN_ADDR_SIZE]; /* Hardware address */
-
-/*
- * Static specific data for the interface.
- *
- * For each network interface, Linux keep data in two structure. "device"
- * keep the generic data (same format for everybody) and "net_local" keep
- * the additional specific data.
- */
-struct net_local
-{
- dev_node_t node; /* ???? What is this stuff ???? */
- struct net_device * dev; /* Reverse link... */
- spinlock_t spinlock; /* Serialize access to the hardware (SMP) */
- struct pcmcia_device * link; /* pcmcia structure */
- int nresets; /* Number of hw resets */
- u_char configured; /* If it is configured */
- u_char reconfig_82593; /* Need to reconfigure the controller */
- u_char promiscuous; /* Promiscuous mode */
- u_char allmulticast; /* All Multicast mode */
- int mc_count; /* Number of multicast addresses */
-
- int stop; /* Current i82593 Stop Hit Register */
- int rfp; /* Last DMA machine receive pointer */
- int overrunning; /* Receiver overrun flag */
-
- iw_stats wstats; /* Wireless specific stats */
-
- struct iw_spy_data spy_data;
- struct iw_public_data wireless_data;
-
-#ifdef HISTOGRAM
- int his_number; /* Number of intervals */
- u_char his_range[16]; /* Boundaries of interval ]n-1; n] */
- u_long his_sum[16]; /* Sum in interval */
-#endif /* HISTOGRAM */
-#ifdef WAVELAN_ROAMING
- u_long domain_id; /* Domain ID we lock on for roaming */
- int filter_domains; /* Check Domain ID of beacon found */
- struct wavepoint_table wavepoint_table; /* Table of visible WavePoints*/
- wavepoint_history * curr_point; /* Current wavepoint */
- int cell_search; /* Searching for new cell? */
- struct timer_list cell_timer; /* Garbage collection */
-#endif /* WAVELAN_ROAMING */
- void __iomem *mem;
-};
-
-/* ----------------- MODEM MANAGEMENT SUBROUTINES ----------------- */
-static inline u_char /* data */
- hasr_read(u_long); /* Read the host interface : base address */
-static void
- hacr_write(u_long, /* Write to host interface : base address */
- u_char), /* data */
- hacr_write_slow(u_long,
- u_char);
-static void
- psa_read(struct net_device *, /* Read the Parameter Storage Area */
- int, /* offset in PSA */
- u_char *, /* buffer to fill */
- int), /* size to read */
- psa_write(struct net_device *, /* Write to the PSA */
- int, /* Offset in psa */
- u_char *, /* Buffer in memory */
- int); /* Length of buffer */
-static void
- mmc_out(u_long, /* Write 1 byte to the Modem Manag Control */
- u_short,
- u_char),
- mmc_write(u_long, /* Write n bytes to the MMC */
- u_char,
- u_char *,
- int);
-static u_char /* Read 1 byte from the MMC */
- mmc_in(u_long,
- u_short);
-static void
- mmc_read(u_long, /* Read n bytes from the MMC */
- u_char,
- u_char *,
- int),
- fee_wait(u_long, /* Wait for frequency EEprom : base address */
- int, /* Base delay to wait for */
- int); /* Number of time to wait */
-static void
- fee_read(u_long, /* Read the frequency EEprom : base address */
- u_short, /* destination offset */
- u_short *, /* data buffer */
- int); /* number of registers */
-/* ---------------------- I82593 SUBROUTINES ----------------------- */
-static int
- wv_82593_cmd(struct net_device *, /* synchronously send a command to i82593 */
- char *,
- int,
- int);
-static inline int
- wv_diag(struct net_device *); /* Diagnostique the i82593 */
-static int
- read_ringbuf(struct net_device *, /* Read a receive buffer */
- int,
- char *,
- int);
-static void
- wv_82593_reconfig(struct net_device *); /* Reconfigure the controller */
-/* ------------------- DEBUG & INFO SUBROUTINES ------------------- */
-static void
- wv_init_info(struct net_device *); /* display startup info */
-/* ------------------- IOCTL, STATS & RECONFIG ------------------- */
-static iw_stats *
- wavelan_get_wireless_stats(struct net_device *);
-/* ----------------------- PACKET RECEPTION ----------------------- */
-static int
- wv_start_of_frame(struct net_device *, /* Seek beggining of current frame */
- int, /* end of frame */
- int); /* start of buffer */
-static void
- wv_packet_read(struct net_device *, /* Read a packet from a frame */
- int,
- int),
- wv_packet_rcv(struct net_device *); /* Read all packets waiting */
-/* --------------------- PACKET TRANSMISSION --------------------- */
-static void
- wv_packet_write(struct net_device *, /* Write a packet to the Tx buffer */
- void *,
- short);
-static netdev_tx_t
- wavelan_packet_xmit(struct sk_buff *, /* Send a packet */
- struct net_device *);
-/* -------------------- HARDWARE CONFIGURATION -------------------- */
-static int
- wv_mmc_init(struct net_device *); /* Initialize the modem */
-static int
- wv_ru_stop(struct net_device *), /* Stop the i82593 receiver unit */
- wv_ru_start(struct net_device *); /* Start the i82593 receiver unit */
-static int
- wv_82593_config(struct net_device *); /* Configure the i82593 */
-static int
- wv_pcmcia_reset(struct net_device *); /* Reset the pcmcia interface */
-static int
- wv_hw_config(struct net_device *); /* Reset & configure the whole hardware */
-static void
- wv_hw_reset(struct net_device *); /* Same, + start receiver unit */
-static int
- wv_pcmcia_config(struct pcmcia_device *); /* Configure the pcmcia interface */
-static void
- wv_pcmcia_release(struct pcmcia_device *);/* Remove a device */
-/* ---------------------- INTERRUPT HANDLING ---------------------- */
-static irqreturn_t
- wavelan_interrupt(int, /* Interrupt handler */
- void *);
-static void
- wavelan_watchdog(struct net_device *); /* Transmission watchdog */
-/* ------------------- CONFIGURATION CALLBACKS ------------------- */
-static int
- wavelan_open(struct net_device *), /* Open the device */
- wavelan_close(struct net_device *); /* Close the device */
-static void
- wavelan_detach(struct pcmcia_device *p_dev); /* Destroy a removed device */
-
-/**************************** VARIABLES ****************************/
-
-/*
- * Parameters that can be set with 'insmod'
- * The exact syntax is 'insmod wavelan_cs.o <var>=<value>'
- */
-
-/* Shared memory speed, in ns */
-static int mem_speed = 0;
-
-/* New module interface */
-module_param(mem_speed, int, 0);
-
-#ifdef WAVELAN_ROAMING /* Conditional compile, see above in options */
-/* Enable roaming mode ? No ! Please keep this to 0 */
-static int do_roaming = 0;
-module_param(do_roaming, bool, 0);
-#endif /* WAVELAN_ROAMING */
-
-MODULE_LICENSE("GPL");
-
-#endif /* WAVELAN_CS_P_H */
-
diff --git a/drivers/staging/winbond/README b/drivers/staging/winbond/TODO
index cb944e4bf17..8c1baaf6d8a 100644
--- a/drivers/staging/winbond/README
+++ b/drivers/staging/winbond/TODO
@@ -2,10 +2,11 @@ TODO:
- sparse cleanups
- checkpatch cleanups
- kerneldoc cleanups
+ - fix severeCamelCaseInfestation
- remove typedefs
- remove unused ioctls
- use cfg80211 for regulatory stuff
- fix 4k stack problems
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
-Pavel Machek <pavel@suse.cz>
+Pavel Machek <pavel@ucw.cz>
diff --git a/drivers/staging/winbond/core.h b/drivers/staging/winbond/core.h
index 0a2060bf4f9..b87d6c07600 100644
--- a/drivers/staging/winbond/core.h
+++ b/drivers/staging/winbond/core.h
@@ -12,14 +12,16 @@
#define WB_MAX_LINK_NAME_LEN 40
struct wbsoft_priv {
- u32 adapterIndex; // 20060703.4 Add for using padapterContext global adapter point
+ u32 adapterIndex; /* 20060703.4 Add for using padapterContext
+ global adapter point */
- struct wb_local_para sLocalPara; // Myself connected parameters
+ struct wb_local_para sLocalPara; /* Myself connected
+ parameters */
- MLME_FRAME sMlmeFrame; // connect to peerSTA parameters
+ MLME_FRAME sMlmeFrame; /* connect to peerSTA parameters */
- struct wb35_mto_params sMtoPara; // MTO_struct ...
- struct hw_data sHwData; //For HAL
+ struct wb35_mto_params sMtoPara; /* MTO_struct ... */
+ struct hw_data sHwData; /*For HAL */
struct wb35_mds Mds;
spinlock_t SpinLock;
@@ -30,7 +32,7 @@ struct wbsoft_priv {
u32 TxByteCount;
struct sk_buff *packet_return;
- s32 netif_state_stop; // 1: stop 0: normal
+ s32 netif_state_stop; /* 1: stop 0: normal */
struct iw_statistics iw_stats;
u8 LinkName[WB_MAX_LINK_NAME_LEN];
diff --git a/drivers/staging/winbond/localpara.h b/drivers/staging/winbond/localpara.h
index fcf6a0442dc..d7980575bed 100644
--- a/drivers/staging/winbond/localpara.h
+++ b/drivers/staging/winbond/localpara.h
@@ -1,263 +1,267 @@
#ifndef __WINBOND_LOCALPARA_H
#define __WINBOND_LOCALPARA_H
-//=============================================================
-// LocalPara.h -
-//=============================================================
+/*
+ * =============================================================
+ * LocalPara.h -
+ * =============================================================
+ */
#include "mac_structures.h"
-//Define the local ability
+/* Define the local ability */
-#define LOCAL_DEFAULT_BEACON_PERIOD 100 //ms
-#define LOCAL_DEFAULT_ATIM_WINDOW 0
-#define LOCAL_DEFAULT_ERP_CAPABILITY 0x0431 //0x0001: ESS
- //0x0010: Privacy
- //0x0020: short preamble
- //0x0400: short slot time
-#define LOCAL_DEFAULT_LISTEN_INTERVAL 5
+#define LOCAL_DEFAULT_BEACON_PERIOD 100 /* ms */
+#define LOCAL_DEFAULT_ATIM_WINDOW 0
+#define LOCAL_DEFAULT_ERP_CAPABILITY 0x0431 /*
+ * 0x0001: ESS
+ * 0x0010: Privacy
+ * 0x0020: short preamble
+ * 0x0400: short slot time
+ */
+#define LOCAL_DEFAULT_LISTEN_INTERVAL 5
-//#define LOCAL_DEFAULT_24_CHANNEL_NUM 11 // channel 1..11
-#define LOCAL_DEFAULT_24_CHANNEL_NUM 13 // channel 1..13
-#define LOCAL_DEFAULT_5_CHANNEL_NUM 8 // channel 36..64
+#define LOCAL_DEFAULT_24_CHANNEL_NUM 13 /* channel 1..13 */
+#define LOCAL_DEFAULT_5_CHANNEL_NUM 8 /* channel 36..64 */
-#define LOCAL_USA_24_CHANNEL_NUM 11
-#define LOCAL_USA_5_CHANNEL_NUM 12
-#define LOCAL_EUROPE_24_CHANNEL_NUM 13
-#define LOCAL_EUROPE_5_CHANNEL_NUM 19
-#define LOCAL_JAPAN_24_CHANNEL_NUM 14
-#define LOCAL_JAPAN_5_CHANNEL_NUM 11
-#define LOCAL_UNKNOWN_24_CHANNEL_NUM 14
-#define LOCAL_UNKNOWN_5_CHANNEL_NUM 34 //not include 165
+#define LOCAL_USA_24_CHANNEL_NUM 11
+#define LOCAL_USA_5_CHANNEL_NUM 12
+#define LOCAL_EUROPE_24_CHANNEL_NUM 13
+#define LOCAL_EUROPE_5_CHANNEL_NUM 19
+#define LOCAL_JAPAN_24_CHANNEL_NUM 14
+#define LOCAL_JAPAN_5_CHANNEL_NUM 11
+#define LOCAL_UNKNOWN_24_CHANNEL_NUM 14
+#define LOCAL_UNKNOWN_5_CHANNEL_NUM 34 /* not include 165 */
-
-#define psLOCAL (&(adapter->sLocalPara))
+#define psLOCAL (&(adapter->sLocalPara))
#define MODE_802_11_BG 0
#define MODE_802_11_A 1
#define MODE_802_11_ABG 2
#define MODE_802_11_BG_IBSS 3
#define MODE_802_11_B 4
-#define MODE_AUTO 255
+#define MODE_AUTO 255
#define BAND_TYPE_DSSS 0
#define BAND_TYPE_OFDM_24 1
#define BAND_TYPE_OFDM_5 2
-//refer Bitmap2RateValue table
-#define LOCAL_ALL_SUPPORTED_RATES_BITMAP 0x130c1a66 //the bitmap value of all the H/W supported rates
- //1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54
-#define LOCAL_OFDM_SUPPORTED_RATES_BITMAP 0x130c1240 //the bitmap value of all the H/W supported rates
- //except to non-OFDM rates
- //6, 9, 12, 18, 24, 36, 48, 54
-
-#define LOCAL_11B_SUPPORTED_RATE_BITMAP 0x826
-#define LOCAL_11B_BASIC_RATE_BITMAP 0x826
-#define LOCAL_11B_OPERATION_RATE_BITMAP 0x826
-#define LOCAL_11G_BASIC_RATE_BITMAP 0x826 //1, 2, 5.5, 11
-#define LOCAL_11G_OPERATION_RATE_BITMAP 0x130c1240 //6, 9, 12, 18, 24, 36, 48, 54
-#define LOCAL_11A_BASIC_RATE_BITMAP 0x01001040 //6, 12, 24
-#define LOCAL_11A_OPERATION_RATE_BITMAP 0x120c0200 //9, 18, 36, 48, 54
-
-
-
-#define PWR_ACTIVE 0
-#define PWR_SAVE 1
+/* refer Bitmap2RateValue table */
+
+/* the bitmap value of all the H/W supported rates: */
+/* 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54 */
+#define LOCAL_ALL_SUPPORTED_RATES_BITMAP 0x130c1a66
+/* the bitmap value of all the H/W supported rates except to non-OFDM rates: */
+/* 6, 9, 12, 18, 24, 36, 48, 54 */
+#define LOCAL_OFDM_SUPPORTED_RATES_BITMAP 0x130c1240
+#define LOCAL_11B_SUPPORTED_RATE_BITMAP 0x826
+#define LOCAL_11B_BASIC_RATE_BITMAP 0x826
+#define LOCAL_11B_OPERATION_RATE_BITMAP 0x826
+#define LOCAL_11G_BASIC_RATE_BITMAP 0x826 /* 1, 2, 5.5, 11 */
+#define LOCAL_11G_OPERATION_RATE_BITMAP 0x130c1240 /* 6, 9, 12, 18, 24, 36, 48, 54 */
+#define LOCAL_11A_BASIC_RATE_BITMAP 0x01001040 /* 6, 12, 24 */
+#define LOCAL_11A_OPERATION_RATE_BITMAP 0x120c0200 /* 9, 18, 36, 48, 54 */
+
+
+#define PWR_ACTIVE 0
+#define PWR_SAVE 1
#define PWR_TX_IDLE_CYCLE 6
-//bPreambleMode and bSlotTimeMode
-#define AUTO_MODE 0
-#define LONG_MODE 1
-
-//Region definition
-#define REGION_AUTO 0xff
-#define REGION_UNKNOWN 0
-#define REGION_EUROPE 1 //ETSI
-#define REGION_JAPAN 2 //MKK
-#define REGION_USA 3 //FCC
-#define REGION_FRANCE 4 //FRANCE
-#define REGION_SPAIN 5 //SPAIN
-#define REGION_ISRAEL 6 //ISRAEL
-//#define REGION_CANADA 7 //IC
+/* bPreambleMode and bSlotTimeMode */
+#define AUTO_MODE 0
+#define LONG_MODE 1
+
+/* Region definition */
+#define REGION_AUTO 0xff
+#define REGION_UNKNOWN 0
+#define REGION_EUROPE 1 /* ETSI */
+#define REGION_JAPAN 2 /* MKK */
+#define REGION_USA 3 /* FCC */
+#define REGION_FRANCE 4 /* FRANCE */
+#define REGION_SPAIN 5 /* SPAIN */
+#define REGION_ISRAEL 6 /* ISRAEL */
#define MAX_BSS_DESCRIPT_ELEMENT 32
-#define MAX_PMKID_CandidateList 16
-
-//High byte : Event number, low byte : reason
-//Event definition
-//-- SME/MLME event
-#define EVENT_RCV_DEAUTH 0x0100
-#define EVENT_JOIN_FAIL 0x0200
-#define EVENT_AUTH_FAIL 0x0300
-#define EVENT_ASSOC_FAIL 0x0400
-#define EVENT_LOST_SIGNAL 0x0500
-#define EVENT_BSS_DESCRIPT_LACK 0x0600
-#define EVENT_COUNTERMEASURE 0x0700
-#define EVENT_JOIN_FILTER 0x0800
-//-- TX/RX event
-#define EVENT_RX_BUFF_UNAVAILABLE 0x4100
-
-#define EVENT_CONNECT 0x8100
-#define EVENT_DISCONNECT 0x8200
-#define EVENT_SCAN_REQ 0x8300
-
-//Reason of Event
+#define MAX_PMKID_CandidateList 16
+
+/*
+ * High byte : Event number, low byte : reason
+ * Event definition
+ * -- SME/MLME event
+ */
+#define EVENT_RCV_DEAUTH 0x0100
+#define EVENT_JOIN_FAIL 0x0200
+#define EVENT_AUTH_FAIL 0x0300
+#define EVENT_ASSOC_FAIL 0x0400
+#define EVENT_LOST_SIGNAL 0x0500
+#define EVENT_BSS_DESCRIPT_LACK 0x0600
+#define EVENT_COUNTERMEASURE 0x0700
+#define EVENT_JOIN_FILTER 0x0800
+/* -- TX/RX event */
+#define EVENT_RX_BUFF_UNAVAILABLE 0x4100
+
+#define EVENT_CONNECT 0x8100
+#define EVENT_DISCONNECT 0x8200
+#define EVENT_SCAN_REQ 0x8300
+
+/* Reason of Event */
#define EVENT_REASON_FILTER_BASIC_RATE 0x0001
-#define EVENT_REASON_FILTER_PRIVACY 0x0002
+#define EVENT_REASON_FILTER_PRIVACY 0x0002
#define EVENT_REASON_FILTER_AUTH_MODE 0x0003
-#define EVENT_REASON_TIMEOUT 0x00ff
+#define EVENT_REASON_TIMEOUT 0x00ff
-// 20061108 WPS IE buffer
-#define MAX_IE_APPEND_SIZE 256 + 4 // Due to [E id][Length][OUI][Data] may 257 bytes
+/* Due to[E id][Length][OUI][Data] may be 257 bytes */
+#define MAX_IE_APPEND_SIZE (256 + 4)
-struct chan_info
-{
- u8 band;
- u8 ChanNo;
+struct chan_info {
+ u8 band;
+ u8 ChanNo;
};
-struct radio_off
-{
- u8 boHwRadioOff;
- u8 boSwRadioOff;
+struct radio_off {
+ u8 boHwRadioOff;
+ u8 boSwRadioOff;
};
-//===========================================================================
-struct wb_local_para
-{
- u8 PermanentAddress[ MAC_ADDR_LENGTH + 2 ]; // read from EPROM, manufacture set for each NetCard
- u8 ThisMacAddress[ MAC_ADDR_LENGTH + 2 ]; // the driver will use actually.
-
- u32 MTUsize; // Ind to Uplayer, Max transmission unit size
-
- u8 region_INF; //region setting from INF
- u8 region; //real region setting of the device
- u8 Reserved_1[2];
-
- //// power-save variables
- u8 iPowerSaveMode; // 0 indicates it is on, 1 indicates it is off
- u8 ATIMmode;
- u8 ExcludeUnencrypted;
-
- u16 CheckCountForPS; //Unit ime count for the decision to enter PS mode
- u8 boHasTxActivity; //tx activity has occurred
- u8 boMacPsValid; //Power save mode obtained from H/W is valid or not
-
- //// Rate
- u8 TxRateMode; // Initial, input from Registry, may be updated by GUI
- //Tx Rate Mode: auto(DTO on), max, 1M, 2M, ..
- u8 CurrentTxRate; // The current Tx rate
- u8 CurrentTxRateForMng; // The current Tx rate for management frames
- // It will be decided before connection succeeds.
- u8 CurrentTxFallbackRate;
-
- //for Rate handler
- u8 BRateSet[32]; //basic rate set
- u8 SRateSet[32]; //support rate set
-
- u8 NumOfBRate;
- u8 NumOfSRate;
- u8 NumOfDsssRateInSRate; //number of DSSS rates in supported rate set
- u8 reserved1;
-
- u32 dwBasicRateBitmap; //bit map of basic rates
- u32 dwSupportRateBitmap; //bit map of all support rates including
- //basic and operational rates
-
- ////For SME/MLME handler
- u16 wOldSTAindex; // valid when boHandover=TRUE, store old connected STA index
- u16 wConnectedSTAindex; // Index of peerly connected AP or IBSS in
- // the descriptionset.
- u16 Association_ID; // The Association ID in the (Re)Association
- // Response frame.
- u16 ListenInterval; // The listen interval when SME invoking MLME_
- // (Re)Associate_Request().
-
- struct radio_off RadioOffStatus;
- u8 Reserved0[2];
-
- u8 boMsRadioOff; // Ndis demands to be true when set Disassoc. OID and be false when set SSID OID.
- u8 bAntennaNo; //which antenna
- u8 bConnectFlag; //the connect status flag for roaming task
-
- u8 RoamStatus;
- u8 reserved7[3];
-
- struct chan_info CurrentChan; //Current channel no. and channel band. It may be changed by scanning.
- u8 boHandover; // Roaming, Hnadover to other AP.
- u8 boCCAbusy;
-
- u16 CWMax; // It may not be the real value that H/W used
- u8 CWMin; // 255: set according to 802.11 spec.
- u8 reserved2;
-
- //11G:
- u8 bMacOperationMode; // operation in 802.11b or 802.11g
- u8 bSlotTimeMode; //AUTO, s32
- u8 bPreambleMode; //AUTO, s32
- u8 boNonERPpresent;
-
- u8 boProtectMechanism; // H/W will take the necessary action based on this variable
- u8 boShortPreamble; // H/W will take the necessary action based on this variable
- u8 boShortSlotTime; // H/W will take the necessary action based on this variable
- u8 reserved_3;
-
- u32 RSN_IE_Bitmap; //added by WS
- u32 RSN_OUI_Type; //added by WS
-
- //For the BSSID
- u8 HwBssid[MAC_ADDR_LENGTH + 2];
- u32 HwBssidValid;
-
- //For scan list
- u8 BssListCount; //Total count of valid descriptor indexes
- u8 boReceiveUncorrectInfo; //important settings in beacon/probe resp. have been changed
- u8 NoOfJoinerInIbss;
- u8 reserved_4;
-
- u8 BssListIndex[ (MAX_BSS_DESCRIPT_ELEMENT+3) & ~0x03 ]; //Store the valid descriptor indexes obtained from scannings
- u8 JoinerInIbss[ (MAX_BSS_DESCRIPT_ELEMENT+3) & ~0x03 ]; //save the BssDescriptor index in this
- //IBSS. The index 0 is local descriptor
- //(psLOCAL->wConnectedSTAindex).
- //If CONNECTED : NoOfJoinerInIbss >=2
- // else : NoOfJoinerInIbss <=1
-
- //// General Statistics, count at Rx_handler or Tx_callback interrupt handler
- u64 GS_XMIT_OK; // Good Frames Transmitted
- u64 GS_RCV_OK; // Good Frames Received
- u32 GS_RCV_ERROR; // Frames received with crc error
- u32 GS_XMIT_ERROR; // Bad Frames Transmitted
- u32 GS_RCV_NO_BUFFER; // Receive Buffer underrun
- u32 GS_XMIT_ONE_COLLISION; // one collision
- u32 GS_XMIT_MORE_COLLISIONS;// more collisions
-
- //================================================================
- // Statistics (no matter whether it had done successfully) -wkchen
- //================================================================
- u32 _NumRxMSDU;
- u32 _NumTxMSDU;
- u32 _dot11WEPExcludedCount;
- u32 _dot11WEPUndecryptableCount;
- u32 _dot11FrameDuplicateCount;
-
- struct chan_info IbssChanSetting; // 2B. Start IBSS Channel setting by registry or WWU.
- u8 reserved_5[2]; //It may not be used after considering RF type,
- //region and modulation type.
-
- u8 reserved_6[2]; //two variables are for wep key error detection added by ws 02/02/04
-
- u32 bWepKeyError;
- u32 bToSelfPacketReceived;
- u32 WepKeyDetectTimerCount;
-
- u16 SignalLostTh;
- u16 SignalRoamTh;
-
- // 20061108 WPS IE Append
+struct wb_local_para {
+ /* read from EPROM, manufacture set for each NetCard */
+ u8 PermanentAddress[MAC_ADDR_LENGTH + 2];
+ /* the driver will use this one actually. */
+ u8 ThisMacAddress[MAC_ADDR_LENGTH + 2];
+ u32 MTUsize; /* Ind to Uplayer, Max transmission unit size */
+ u8 region_INF; /* region setting from INF */
+ u8 region; /* real region setting of the device */
+ u8 Reserved_1[2];
+
+ /* power-save variables */
+ u8 iPowerSaveMode; /* 0 indicates on, 1 indicates off */
+ u8 ATIMmode;
+ u8 ExcludeUnencrypted;
+ /* Unit ime count for the decision to enter PS mode */
+ u16 CheckCountForPS;
+ u8 boHasTxActivity;/* tx activity has occurred */
+ u8 boMacPsValid; /* Power save mode obtained from H/W is valid or not */
+
+ /* Rate */
+ u8 TxRateMode; /*
+ * Initial, input from Registry,
+ * may be updated by GUI
+ * Tx Rate Mode: auto(DTO on), max, 1M, 2M, ..
+ */
+ u8 CurrentTxRate; /* The current Tx rate */
+ u8 CurrentTxRateForMng; /*
+ * The current Tx rate for management
+ * frames. It will be decided before
+ * connection succeeds.
+ */
+ u8 CurrentTxFallbackRate;
+
+ /* for Rate handler */
+ u8 BRateSet[32]; /* basic rate set */
+ u8 SRateSet[32]; /* support rate set */
+
+ u8 NumOfBRate;
+ u8 NumOfSRate;
+ u8 NumOfDsssRateInSRate; /* number of DSSS rates in supported rate set */
+ u8 reserved1;
+
+ u32 dwBasicRateBitmap; /* bit map of basic rates */
+
+ u32 dwSupportRateBitmap; /* bit map of all support rates including basic and operational rates */
+
+
+ /* For SME/MLME handler */
+
+ u16 wOldSTAindex; /* valid when boHandover=TRUE, store old connected STA index */
+ u16 wConnectedSTAindex; /* Index of peerly connected AP or IBSS in the descriptionset. */
+ u16 Association_ID; /* The Association ID in the (Re)Association Response frame. */
+ u16 ListenInterval; /* The listen interval when SME invoking MLME_ (Re)Associate_Request(). */
+
+ struct radio_off RadioOffStatus;
+ u8 Reserved0[2];
+ u8 boMsRadioOff; /* Ndis demands to be true when set Disassoc. OID and be false when set SSID OID. */
+ u8 bAntennaNo; /* which antenna */
+ u8 bConnectFlag; /* the connect status flag for roaming task */
+
+ u8 RoamStatus;
+ u8 reserved7[3];
+
+ struct chan_info CurrentChan; /* Current channel no. and channel band. It may be changed by scanning. */
+ u8 boHandover; /* Roaming, Hnadover to other AP. */
+ u8 boCCAbusy;
+
+ u16 CWMax; /* It may not be the real value that H/W used */
+ u8 CWMin; /* 255: set according to 802.11 spec. */
+ u8 reserved2;
+
+ /* 11G: */
+ u8 bMacOperationMode; /* operation in 802.11b or 802.11g */
+ u8 bSlotTimeMode; /* AUTO, s32 */
+ u8 bPreambleMode; /* AUTO, s32 */
+ u8 boNonERPpresent;
+
+ u8 boProtectMechanism; /* H/W will take the necessary action based on this variable */
+ u8 boShortPreamble; /* Same here */
+ u8 boShortSlotTime; /* Same here */
+ u8 reserved_3;
+
+ u32 RSN_IE_Bitmap;
+ u32 RSN_OUI_Type;
+
+ /* For the BSSID */
+ u8 HwBssid[MAC_ADDR_LENGTH + 2];
+ u32 HwBssidValid;
+
+ /* For scan list */
+ u8 BssListCount; /* Total count of valid descriptor indexes */
+ u8 boReceiveUncorrectInfo; /* important settings in beacon/probe resp. have been changed */
+ u8 NoOfJoinerInIbss;
+ u8 reserved_4;
+
+ /* Store the valid descriptor indexes obtained from scannings */
+ u8 BssListIndex[(MAX_BSS_DESCRIPT_ELEMENT + 3) & ~0x03];
+ /*
+ * Save the BssDescriptor index in this IBSS.
+ * The index 0 is local descriptor (psLOCAL->wConnectedSTAindex).
+ * If CONNECTED : NoOfJoinerInIbss >= 2
+ * else : NoOfJoinerInIbss <= 1
+ */
+ u8 JoinerInIbss[(MAX_BSS_DESCRIPT_ELEMENT + 3) & ~0x03];
+
+ /* General Statistics, count at Rx_handler or Tx_callback interrupt handler */
+ u64 GS_XMIT_OK; /* Good Frames Transmitted */
+ u64 GS_RCV_OK; /* Good Frames Received */
+ u32 GS_RCV_ERROR; /* Frames received with crc error */
+ u32 GS_XMIT_ERROR; /* Bad Frames Transmitted */
+ u32 GS_RCV_NO_BUFFER; /* Receive Buffer underrun */
+ u32 GS_XMIT_ONE_COLLISION; /* one collision */
+ u32 GS_XMIT_MORE_COLLISIONS;/* more collisions */
+
+ /*
+ * ================================================================
+ * Statistics (no matter whether it had done successfully) -wkchen
+ * ================================================================
+ */
+ u32 _NumRxMSDU;
+ u32 _NumTxMSDU;
+ u32 _dot11WEPExcludedCount;
+ u32 _dot11WEPUndecryptableCount;
+ u32 _dot11FrameDuplicateCount;
+
+ struct chan_info IbssChanSetting; /* 2B. Start IBSS Channel setting by registry or WWU. */
+ u8 reserved_5[2]; /* It may not be used after considering RF type, region and modulation type. */
+
+ u8 reserved_6[2]; /* two variables are for wep key error detection */
+ u32 bWepKeyError;
+ u32 bToSelfPacketReceived;
+ u32 WepKeyDetectTimerCount;
+
+ u16 SignalLostTh;
+ u16 SignalRoamTh;
+
u8 IE_Append_data[MAX_IE_APPEND_SIZE];
u16 IE_Append_size;
u16 reserved_7;
-
};
#endif
diff --git a/drivers/staging/winbond/mac_structures.h b/drivers/staging/winbond/mac_structures.h
index 0d1619601c0..7441015cb18 100644
--- a/drivers/staging/winbond/mac_structures.h
+++ b/drivers/staging/winbond/mac_structures.h
@@ -1,4 +1,4 @@
-//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+/*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// MAC_Structures.h
//
// This file contains the definitions and data structures used by SW-MAC.
@@ -16,24 +16,24 @@
// Deleted some unused.
// 20021129 PD43 Austin
// 20030617 increase the 802.11g definition
-//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
#ifndef _MAC_Structures_H_
#define _MAC_Structures_H_
#include <linux/skbuff.h>
-//=========================================================
+/*=========================================================
// Some miscellaneous definitions
-//-----
+//-----*/
#define MAX_CHANNELS 30
#define MAC_ADDR_LENGTH 6
-#define MAX_WEP_KEY_SIZE 16 // 128 bits
-#define MAX_802_11_FRAGMENT_NUMBER 10 // By spec
+#define MAX_WEP_KEY_SIZE 16 /* 128 bits */
+#define MAX_802_11_FRAGMENT_NUMBER 10 /* By spec */
-//========================================================
+/* ========================================================
// 802.11 Frame define
-//-----
+//----- */
#define MASK_PROTOCOL_VERSION_TYPE 0x0F
#define MASK_FRAGMENT_NUMBER 0x000F
#define SEQUENCE_NUMBER_SHIFT 4
@@ -41,8 +41,8 @@
#define DOT_11_MAC_HEADER_SIZE 24
#define DOT_11_SNAP_SIZE 6
#define DOT_11_DURATION_OFFSET 2
-#define DOT_11_SEQUENCE_OFFSET 22 //Sequence control offset
-#define DOT_11_TYPE_OFFSET 30 //The start offset of 802.11 Frame//
+#define DOT_11_SEQUENCE_OFFSET 22 /* Sequence control offset */
+#define DOT_11_TYPE_OFFSET 30 /* The start offset of 802.11 Frame// */
#define DOT_11_DATA_OFFSET 24
#define DOT_11_DA_OFFSET 4
#define DOT_3_TYPE_ARP 0x80F3
@@ -54,7 +54,7 @@
#define MAX_ETHERNET_PACKET_SIZE 1514
-//----- management : Type of Bits (2, 3) and Subtype of Bits (4, 5, 6, 7)
+/* ----- management : Type of Bits (2, 3) and Subtype of Bits (4, 5, 6, 7) */
#define MAC_SUBTYPE_MNGMNT_ASSOC_REQUEST 0x00
#define MAC_SUBTYPE_MNGMNT_ASSOC_RESPONSE 0x10
#define MAC_SUBTYPE_MNGMNT_REASSOC_REQUEST 0x20
@@ -67,7 +67,7 @@
#define MAC_SUBTYPE_MNGMNT_AUTHENTICATION 0xB0
#define MAC_SUBTYPE_MNGMNT_DEAUTHENTICATION 0xC0
-//----- control : Type of Bits (2, 3) and Subtype of Bits (4, 5, 6, 7)
+/* ----- control : Type of Bits (2, 3) and Subtype of Bits (4, 5, 6, 7) */
#define MAC_SUBTYPE_CONTROL_PSPOLL 0xA4
#define MAC_SUBTYPE_CONTROL_RTS 0xB4
#define MAC_SUBTYPE_CONTROL_CTS 0xC4
@@ -75,7 +75,7 @@
#define MAC_SUBTYPE_CONTROL_CFEND 0xE4
#define MAC_SUBTYPE_CONTROL_CFEND_CFACK 0xF4
-//----- data : Type of Bits (2, 3) and Subtype of Bits (4, 5, 6, 7)
+/* ----- data : Type of Bits (2, 3) and Subtype of Bits (4, 5, 6, 7) */
#define MAC_SUBTYPE_DATA 0x08
#define MAC_SUBTYPE_DATA_CFACK 0x18
#define MAC_SUBTYPE_DATA_CFPOLL 0x28
@@ -85,12 +85,12 @@
#define MAC_SUBTYPE_DATA_CFPOLL_NULL 0x68
#define MAC_SUBTYPE_DATA_CFACK_CFPOLL_NULL 0x78
-//----- Frame Type of Bits (2, 3)
+/* ----- Frame Type of Bits (2, 3) */
#define MAC_TYPE_MANAGEMENT 0x00
#define MAC_TYPE_CONTROL 0x04
#define MAC_TYPE_DATA 0x08
-//----- definitions for Management Frame Element ID (1 BYTE)
+/* ----- definitions for Management Frame Element ID (1 BYTE) */
#define ELEMENT_ID_SSID 0
#define ELEMENT_ID_SUPPORTED_RATES 1
#define ELEMENT_ID_FH_PARAMETER_SET 2
@@ -116,13 +116,6 @@
#define WLAN_MAX_PAIRWISE_CIPHER_SUITE_COUNT ((u16) 6)
#define WLAN_MAX_AUTH_KEY_MGT_SUITE_LIST_COUNT ((u16) 2)
-//========================================================
-typedef enum enum_PowerManagementMode
-{
- ACTIVE = 0,
- POWER_SAVE
-} WB_PM_Mode, *PWB_PM_MODE;
-
//===================================================================
// Reason Code (Table 18): indicate the reason of DisAssoc, DeAuthen
// length of ReasonCode is 2 Octs.
@@ -137,7 +130,7 @@ typedef enum enum_PowerManagementMode
#define REASON_CLASS3_FRAME_FROM_NONASSO_STA 7
#define DISASS_REASON_LEFT_BSS 8
#define REASON_NOT_AUTH_YET 9
-//802.11i define
+/* 802.11i define */
#define REASON_INVALID_IE 13
#define REASON_MIC_ERROR 14
#define REASON_4WAY_HANDSHAKE_TIMEOUT 15
@@ -182,13 +175,12 @@ enum enum_MMPDUResultCode
} WB_MMPDURESULTCODE, *PWB_MMPDURESULTCODE;
*/
-//===========================================================
+/*===========================================================
// enum_TxRate --
// Define the transmission constants based on W89C32 MAC
// target specification.
-//===========================================================
-typedef enum enum_TxRate
-{
+//===========================================================*/
+typedef enum enum_TxRate {
TXRATE_1M = 0,
TXRATE_2MLONG = 2,
TXRATE_2MSHORT = 3,
@@ -196,7 +188,7 @@ typedef enum enum_TxRate
TXRATE_55MSHORT = 5,
TXRATE_11MLONG = 6,
TXRATE_11MSHORT = 7,
- TXRATE_AUTO = 255 // PD43 20021108
+ TXRATE_AUTO = 255 /* PD43 20021108 */
} WB_TXRATE, *PWB_TXRATE;
@@ -232,7 +224,7 @@ typedef enum enum_TxRate
#define RATE_54M 108
#define RATE_MAX 255
-//CAPABILITY
+/* CAPABILITY */
#define CAPABILITY_ESS_BIT 0x0001
#define CAPABILITY_IBSS_BIT 0x0002
#define CAPABILITY_CF_POLL_BIT 0x0004
@@ -245,53 +237,48 @@ typedef enum enum_TxRate
#define CAPABILITY_DSSS_OFDM_BIT 0x2000
-struct Capability_Information_Element
-{
- union
- {
- u16 __attribute__ ((packed)) wValue;
- #ifdef _BIG_ENDIAN_ //20060926 add by anson's endian
- struct _Capability
- {
- //-- 11G --
- u8 Reserved3 : 2;
- u8 DSSS_OFDM : 1;
- u8 Reserved2 : 2;
- u8 Short_Slot_Time : 1;
- u8 Reserved1 : 2;
- u8 Channel_Agility : 1;
- u8 PBCC : 1;
- u8 ShortPreamble : 1;
- u8 CF_Privacy : 1;
- u8 CF_Poll_Request : 1;
- u8 CF_Pollable : 1;
- u8 IBSS : 1;
- u8 ESS : 1;
+struct Capability_Information_Element {
+ union {
+ u16 __attribute__ ((packed)) wValue;
+ #ifdef _BIG_ENDIAN_ /* 20060926 add by anson's endian */
+ struct _Capability {
+ /* -- 11G -- */
+ u8 Reserved3:2;
+ u8 DSSS_OFDM:1;
+ u8 Reserved2:2;
+ u8 Short_Slot_Time:1;
+ u8 Reserved1:2;
+ u8 Channel_Agility:1;
+ u8 PBCC:1;
+ u8 ShortPreamble:1;
+ u8 CF_Privacy:1;
+ u8 CF_Poll_Request:1;
+ u8 CF_Pollable:1;
+ u8 IBSS:1;
+ u8 ESS:1;
} __attribute__ ((packed)) Capability;
#else
- struct _Capability
- {
- u8 ESS : 1;
- u8 IBSS : 1;
- u8 CF_Pollable : 1;
- u8 CF_Poll_Request : 1;
- u8 CF_Privacy : 1;
- u8 ShortPreamble : 1;
- u8 PBCC : 1;
- u8 Channel_Agility : 1;
- u8 Reserved1 : 2;
- //-- 11G --
- u8 Short_Slot_Time : 1;
- u8 Reserved2 : 2;
- u8 DSSS_OFDM : 1;
- u8 Reserved3 : 2;
+ struct _Capability {
+ u8 ESS:1;
+ u8 IBSS:1;
+ u8 CF_Pollable:1;
+ u8 CF_Poll_Request:1;
+ u8 CF_Privacy:1;
+ u8 ShortPreamble:1;
+ u8 PBCC:1;
+ u8 Channel_Agility:1;
+ u8 Reserved1:2;
+ /* -- 11G -- */
+ u8 Short_Slot_Time:1;
+ u8 Reserved2:2;
+ u8 DSSS_OFDM:1;
+ u8 Reserved3:2;
} __attribute__ ((packed)) Capability;
#endif
- }__attribute__ ((packed)) ;
-}__attribute__ ((packed));
+ } __attribute__ ((packed)) ;
+} __attribute__ ((packed));
-struct FH_Parameter_Set_Element
-{
+struct FH_Parameter_Set_Element {
u8 Element_ID;
u8 Length;
u8 Dwell_Time[2];
@@ -300,39 +287,34 @@ struct FH_Parameter_Set_Element
u8 Hop_Index;
};
-struct DS_Parameter_Set_Element
-{
+struct DS_Parameter_Set_Element {
u8 Element_ID;
u8 Length;
u8 Current_Channel;
};
-struct Supported_Rates_Element
-{
+struct Supported_Rates_Element {
u8 Element_ID;
u8 Length;
u8 SupportedRates[8];
-}__attribute__ ((packed));
+} __attribute__ ((packed));
-struct SSID_Element
-{
+struct SSID_Element {
u8 Element_ID;
u8 Length;
u8 SSID[32];
-}__attribute__ ((packed)) ;
+} __attribute__ ((packed)) ;
-struct CF_Parameter_Set_Element
-{
+struct CF_Parameter_Set_Element {
u8 Element_ID;
u8 Length;
u8 CFP_Count;
u8 CFP_Period;
- u8 CFP_MaxDuration[2]; // in Time Units
- u8 CFP_DurRemaining[2]; // in time units
+ u8 CFP_MaxDuration[2]; /* in Time Units */
+ u8 CFP_DurRemaining[2]; /* in time units */
};
-struct TIM_Element
-{
+struct TIM_Element {
u8 Element_ID;
u8 Length;
u8 DTIM_Count;
@@ -341,24 +323,21 @@ struct TIM_Element
u8 Partial_Virtual_Bitmap[251];
};
-struct IBSS_Parameter_Set_Element
-{
+struct IBSS_Parameter_Set_Element {
u8 Element_ID;
u8 Length;
u8 ATIM_Window[2];
};
-struct Challenge_Text_Element
-{
+struct Challenge_Text_Element {
u8 Element_ID;
u8 Length;
u8 Challenge_Text[253];
};
-struct PHY_Parameter_Set_Element
-{
-// int aSlotTime;
-// int aSifsTime;
+struct PHY_Parameter_Set_Element {
+/* int aSlotTime; */
+/* int aSifsTime; */
s32 aCCATime;
s32 aRxTxTurnaroundTime;
s32 aTxPLCPDelay;
@@ -374,18 +353,17 @@ struct PHY_Parameter_Set_Element
s32 aPLCPHeaderLength;
s32 aMPDUDurationFactor;
s32 aMPDUMaxLength;
-// int aCWmin;
-// int aCWmax;
+/* int aCWmin; */
+/* int aCWmax; */
};
-//-- 11G --
-struct ERP_Information_Element
-{
+/* -- 11G -- */
+struct ERP_Information_Element {
u8 Element_ID;
u8 Length;
- #ifdef _BIG_ENDIAN_ //20060926 add by anson's endian
- u8 Reserved:5; //20060926 add by anson
- u8 Barker_Preamble_Mode:1;
+ #ifdef _BIG_ENDIAN_ /* 20060926 add by anson's endian */
+ u8 Reserved:5; /* 20060926 add by anson */
+ u8 Barker_Preamble_Mode:1;
u8 Use_Protection:1;
u8 NonERP_Present:1;
#else
@@ -396,54 +374,53 @@ struct ERP_Information_Element
#endif
};
-struct Extended_Supported_Rates_Element
-{
+struct Extended_Supported_Rates_Element {
u8 Element_ID;
u8 Length;
u8 ExtendedSupportedRates[255];
-}__attribute__ ((packed));
+} __attribute__ ((packed));
-//WPA(802.11i draft 3.0)
+/* WPA(802.11i draft 3.0) */
#define VERSION_WPA 1
#ifdef _WPA2_
#define VERSION_WPA2 1
-#endif //end def _WPA2_
-#define OUI_WPA 0x00F25000 //WPA2.0 OUI=00:50:F2, the MSB is reserved for suite type
+#endif /* end def _WPA2_ */
+#define OUI_WPA 0x00F25000 /* WPA2.0 OUI=00:50:F2, the MSB is reserved for suite type */
#ifdef _WPA2_
-#define OUI_WPA2 0x00AC0F00 // for wpa2 change to 0x00ACOF04 by Ws 26/04/04
-#endif //end def _WPA2_
+#define OUI_WPA2 0x00AC0F00 /* for wpa2 change to 0x00ACOF04 by Ws 26/04/04 */
+#endif /* end def _WPA2_ */
#define OUI_WPA_ADDITIONAL 0x01
-#define WLAN_MIN_RSN_WPA_LENGTH 6 //added by ws 09/10/04
+#define WLAN_MIN_RSN_WPA_LENGTH 6 /* added by ws 09/10/04 */
#ifdef _WPA2_
-#define WLAN_MIN_RSN_WPA2_LENGTH 2 // Fix to 2 09/14/05
-#endif //end def _WPA2_
+#define WLAN_MIN_RSN_WPA2_LENGTH 2 /* Fix to 2 09/14/05 */
+#endif /* end def _WPA2_ */
#define oui_wpa (u32)(OUI_WPA|OUI_WPA_ADDITIONAL)
-#define WPA_OUI_BIG ((u32) 0x01F25000)//added by ws 09/23/04
-#define WPA_OUI_LITTLE ((u32) 0x01F25001)//added by ws 09/23/04
+#define WPA_OUI_BIG ((u32) 0x01F25000)/* added by ws 09/23/04 */
+#define WPA_OUI_LITTLE ((u32) 0x01F25001)/* added by ws 09/23/04 */
-#define WPA_WPS_OUI cpu_to_le32(0x04F25000) // 20061108 For WPS. It's little endian. Big endian is 0x0050F204
+#define WPA_WPS_OUI cpu_to_le32(0x04F25000) /* 20061108 For WPS. It's little endian. Big endian is 0x0050F204 */
-//-----WPA2-----
+/* -----WPA2----- */
#ifdef _WPA2_
#define WPA2_OUI_BIG ((u32)0x01AC0F00)
#define WPA2_OUI_LITTLE ((u32)0x01AC0F01)
-#endif //end def _WPA2_
+#endif /* end def _WPA2_ */
-//Authentication suite
-#define OUI_AUTH_WPA_NONE 0x00 //for WPA_NONE
+/* Authentication suite */
+#define OUI_AUTH_WPA_NONE 0x00 /* for WPA_NONE */
#define OUI_AUTH_8021X 0x01
#define OUI_AUTH_PSK 0x02
-//Cipher suite
-#define OUI_CIPHER_GROUP_KEY 0x00 //added by ws 05/21/04
+/* Cipher suite */
+#define OUI_CIPHER_GROUP_KEY 0x00 /* added by ws 05/21/04 */
#define OUI_CIPHER_WEP_40 0x01
#define OUI_CIPHER_TKIP 0x02
#define OUI_CIPHER_CCMP 0x04
#define OUI_CIPHER_WEP_104 0x05
-typedef struct _SUITE_SELECTOR_
+struct suite_selector
{
union
{
@@ -454,35 +431,35 @@ typedef struct _SUITE_SELECTOR_
u8 Type;
}SuitSelector;
};
-}SUITE_SELECTOR;
+};
//-- WPA --
struct RSN_Information_Element
{
u8 Element_ID;
u8 Length;
- SUITE_SELECTOR OuiWPAAdditional;//WPA version 2.0 additional field, and should be 00:50:F2:01
+ struct suite_selector OuiWPAAdditional; /* WPA version 2.0 additional field, and should be 00:50:F2:01 */
u16 Version;
- SUITE_SELECTOR GroupKeySuite;
+ struct suite_selector GroupKeySuite;
u16 PairwiseKeySuiteCount;
- SUITE_SELECTOR PairwiseKeySuite[1];
+ struct suite_selector PairwiseKeySuite[1];
}__attribute__ ((packed));
struct RSN_Auth_Sub_Information_Element
{
u16 AuthKeyMngtSuiteCount;
- SUITE_SELECTOR AuthKeyMngtSuite[1];
+ struct suite_selector AuthKeyMngtSuite[1];
}__attribute__ ((packed));
-//-- WPA2 --
+/* -- WPA2 -- */
struct RSN_Capability_Element
{
union
{
u16 __attribute__ ((packed)) wValue;
- #ifdef _BIG_ENDIAN_ //20060927 add by anson's endian
+ #ifdef _BIG_ENDIAN_ /* 20060927 add by anson's endian */
struct _RSN_Capability
{
- u16 __attribute__ ((packed)) Reserved2 : 8; // 20051201
+ u16 __attribute__ ((packed)) Reserved2 : 8; /* 20051201 */
u16 __attribute__ ((packed)) Reserved1 : 2;
u16 __attribute__ ((packed)) GTK_Replay_Counter : 2;
u16 __attribute__ ((packed)) PTK_Replay_Counter : 2;
@@ -497,7 +474,7 @@ struct RSN_Capability_Element
u16 __attribute__ ((packed)) PTK_Replay_Counter : 2;
u16 __attribute__ ((packed)) GTK_Replay_Counter : 2;
u16 __attribute__ ((packed)) Reserved1 : 2;
- u16 __attribute__ ((packed)) Reserved2 : 8; // 20051201
+ u16 __attribute__ ((packed)) Reserved2 : 8; /* 20051201 */
}__attribute__ ((packed)) RSN_Capability;
#endif
@@ -505,43 +482,43 @@ struct RSN_Capability_Element
}__attribute__ ((packed)) ;
#ifdef _WPA2_
-typedef struct _PMKID
+struct pmkid
{
u8 pValue[16];
-}PMKID;
+};
struct WPA2_RSN_Information_Element
{
u8 Element_ID;
u8 Length;
u16 Version;
- SUITE_SELECTOR GroupKeySuite;
+ struct suite_selector GroupKeySuite;
u16 PairwiseKeySuiteCount;
- SUITE_SELECTOR PairwiseKeySuite[1];
+ struct suite_selector PairwiseKeySuite[1];
}__attribute__ ((packed));
struct WPA2_RSN_Auth_Sub_Information_Element
{
u16 AuthKeyMngtSuiteCount;
- SUITE_SELECTOR AuthKeyMngtSuite[1];
+ struct suite_selector AuthKeyMngtSuite[1];
}__attribute__ ((packed));
struct PMKID_Information_Element
{
u16 PMKID_Count;
- PMKID pmkid [16] ;
+ struct pmkid pmkid[16];
}__attribute__ ((packed));
-#endif //enddef _WPA2_
-//============================================================
+#endif /* enddef _WPA2_ */
+/*============================================================
// MAC Frame structure (different type) and subfield structure
-//============================================================
+//============================================================*/
struct MAC_frame_control
{
- u8 mac_frame_info; // a combination of the [Protocol Version, Control Type, Control Subtype]
- #ifdef _BIG_ENDIAN_ //20060927 add by anson's endian
+ u8 mac_frame_info; /* a combination of the [Protocol Version, Control Type, Control Subtype]*/
+ #ifdef _BIG_ENDIAN_ /* 20060927 add by anson's endian */
u8 order:1;
u8 WEP:1;
u8 more_data:1;
@@ -563,24 +540,24 @@ struct MAC_frame_control
} __attribute__ ((packed));
struct Management_Frame {
- struct MAC_frame_control frame_control; // 2B, ToDS,FromDS,MoreFrag,MoreData,Order=0
+ struct MAC_frame_control frame_control; /* 2B, ToDS,FromDS,MoreFrag,MoreData,Order=0 */
u16 duration;
- u8 DA[MAC_ADDR_LENGTH]; // Addr1
- u8 SA[MAC_ADDR_LENGTH]; // Addr2
- u8 BSSID[MAC_ADDR_LENGTH]; // Addr3
+ u8 DA[MAC_ADDR_LENGTH]; /* Addr1 */
+ u8 SA[MAC_ADDR_LENGTH]; /* Addr2 */
+ u8 BSSID[MAC_ADDR_LENGTH]; /* Addr3 */
u16 Sequence_Control;
- // Management Frame Body <= 325 bytes
- // FCS 4 bytes
-}__attribute__ ((packed));
+ /* Management Frame Body <= 325 bytes */
+ /* FCS 4 bytes */
+} __attribute__ ((packed));
-// SW-MAC don't Tx/Rx Control-Frame, HW-MAC do it.
+/* SW-MAC don't Tx/Rx Control-Frame, HW-MAC do it. */
struct Control_Frame {
- struct MAC_frame_control frame_control; // ToDS,FromDS,MoreFrag,Retry,MoreData,WEP,Order=0
+ struct MAC_frame_control frame_control; /* ToDS,FromDS,MoreFrag,Retry,MoreData,WEP,Order=0 */
u16 duration;
u8 RA[MAC_ADDR_LENGTH];
u8 TA[MAC_ADDR_LENGTH];
u16 FCS;
-}__attribute__ ((packed));
+} __attribute__ ((packed));
struct Data_Frame {
struct MAC_frame_control frame_control;
@@ -589,32 +566,29 @@ struct Data_Frame {
u8 Addr2[MAC_ADDR_LENGTH];
u8 Addr3[MAC_ADDR_LENGTH];
u16 Sequence_Control;
- u8 Addr4[MAC_ADDR_LENGTH]; // only exist when ToDS=FromDS=1
- // Data Frame Body <= 2312
- // FCS
-}__attribute__ ((packed));
+ u8 Addr4[MAC_ADDR_LENGTH]; /* only exist when ToDS=FromDS=1 */
+ /* Data Frame Body <= 2312 */
+ /* FCS */
+} __attribute__ ((packed));
-struct Disassociation_Frame_Body
-{
+struct Disassociation_Frame_Body {
u16 reasonCode;
-}__attribute__ ((packed));
+} __attribute__ ((packed));
-struct Association_Request_Frame_Body
-{
+struct Association_Request_Frame_Body {
u16 capability_information;
u16 listenInterval;
- u8 Current_AP_Address[MAC_ADDR_LENGTH];//for reassociation only
- // SSID (2+32 bytes)
- // Supported_Rates (2+8 bytes)
-}__attribute__ ((packed));
+ u8 Current_AP_Address[MAC_ADDR_LENGTH];/* for reassociation only */
+ /* SSID (2+32 bytes) */
+ /* Supported_Rates (2+8 bytes) */
+} __attribute__ ((packed));
-struct Association_Response_Frame_Body
-{
+struct Association_Response_Frame_Body {
u16 capability_information;
u16 statusCode;
u16 Association_ID;
struct Supported_Rates_Element supportedRates;
-}__attribute__ ((packed));
+} __attribute__ ((packed));
/*struct Reassociation_Request_Frame_Body
{
@@ -624,44 +598,40 @@ struct Association_Response_Frame_Body
// SSID (2+32 bytes)
// Supported_Rates (2+8 bytes)
};*/
-// eliminated by WS 07/22/04 comboined with associateion request frame.
+/* eliminated by WS 07/22/04 comboined with associateion request frame. */
-struct Reassociation_Response_Frame_Body
-{
+struct Reassociation_Response_Frame_Body {
u16 capability_information;
u16 statusCode;
u16 Association_ID;
struct Supported_Rates_Element supportedRates;
-}__attribute__ ((packed));
+} __attribute__ ((packed));
-struct Deauthentication_Frame_Body
-{
+struct Deauthentication_Frame_Body {
u16 reasonCode;
-}__attribute__ ((packed));
+} __attribute__ ((packed));
-struct Probe_Response_Frame_Body
-{
+struct Probe_Response_Frame_Body {
u16 Timestamp;
u16 Beacon_Interval;
u16 Capability_Information;
- // SSID
+ /* SSID
// Supported_Rates
// PHY parameter Set (DS Parameters)
// CF parameter Set
- // IBSS parameter Set
-}__attribute__ ((packed));
+ // IBSS parameter Set */
+} __attribute__ ((packed));
-struct Authentication_Frame_Body
-{
+struct Authentication_Frame_Body {
u16 algorithmNumber;
u16 sequenceNumber;
u16 statusCode;
- // NB: don't include ChallengeText in this structure
- // struct Challenge_Text_Element sChallengeTextElement; // wkchen added
-}__attribute__ ((packed));
+ /* NB: don't include ChallengeText in this structure
+ // struct Challenge_Text_Element sChallengeTextElement; // wkchen added */
+} __attribute__ ((packed));
-#endif // _MAC_Structure_H_
+#endif /* _MAC_Structure_H_ */
diff --git a/drivers/staging/winbond/mds.c b/drivers/staging/winbond/mds.c
index 37e0c185d11..e8320a6f59a 100644
--- a/drivers/staging/winbond/mds.c
+++ b/drivers/staging/winbond/mds.c
@@ -6,7 +6,7 @@
#include "wblinux_f.h"
unsigned char
-Mds_initial(struct wbsoft_priv * adapter)
+Mds_initial(struct wbsoft_priv *adapter)
{
struct wb35_mds *pMds = &adapter->Mds;
@@ -18,7 +18,7 @@ Mds_initial(struct wbsoft_priv * adapter)
}
void
-Mds_Destroy(struct wbsoft_priv * adapter)
+Mds_Destroy(struct wbsoft_priv *adapter)
{
}
@@ -43,53 +43,53 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter, struct wb35_descriptor
pT01 = (PT01_DESCRIPTOR)(buffer+4);
pNextT00 = (PT00_DESCRIPTOR)(buffer+OffsetSize);
- if( buffer[ DOT_11_DA_OFFSET+8 ] & 0x1 ) // +8 for USB hdr
+ if( buffer[ DOT_11_DA_OFFSET+8 ] & 0x1 ) /* +8 for USB hdr */
boGroupAddr = true;
- //========================================
- // Set RTS/CTS mechanism
- //========================================
+ /******************************************
+ * Set RTS/CTS mechanism
+ ******************************************/
if (!boGroupAddr)
{
- //NOTE : If the protection mode is enabled and the MSDU will be fragmented,
- // the tx rates of MPDUs will all be DSSS rates. So it will not use
- // CTS-to-self in this case. CTS-To-self will only be used when without
- // fragmentation. -- 20050112
- BodyLen = (u16)pT00->T00_frame_length; //include 802.11 header
- BodyLen += 4; //CRC
+ /* NOTE : If the protection mode is enabled and the MSDU will be fragmented,
+ * the tx rates of MPDUs will all be DSSS rates. So it will not use
+ * CTS-to-self in this case. CTS-To-self will only be used when without
+ * fragmentation. -- 20050112 */
+ BodyLen = (u16)pT00->T00_frame_length; /* include 802.11 header */
+ BodyLen += 4; /* CRC */
if( BodyLen >= CURRENT_RTS_THRESHOLD )
- RTS_on = true; // Using RTS
+ RTS_on = true; /* Using RTS */
else
{
- if( pT01->T01_modulation_type ) // Is using OFDM
+ if( pT01->T01_modulation_type ) /* Is using OFDM */
{
- if( CURRENT_PROTECT_MECHANISM ) // Is using protect
- CTS_on = true; // Using CTS
+ if( CURRENT_PROTECT_MECHANISM ) /* Is using protect */
+ CTS_on = true; /* Using CTS */
}
}
}
if( RTS_on || CTS_on )
{
- if( pT01->T01_modulation_type) // Is using OFDM
+ if( pT01->T01_modulation_type) /* Is using OFDM */
{
- //CTS duration
- // 2 SIFS + DATA transmit time + 1 ACK
- // ACK Rate : 24 Mega bps
- // ACK frame length = 14 bytes
+ /* CTS duration
+ * 2 SIFS + DATA transmit time + 1 ACK
+ * ACK Rate : 24 Mega bps
+ * ACK frame length = 14 bytes */
Duration = 2*DEFAULT_SIFSTIME +
2*PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION +
((BodyLen*8 + 22 + Rate*4 - 1)/(Rate*4))*Tsym +
((112 + 22 + 95)/96)*Tsym;
}
- else //DSSS
+ else /* DSSS */
{
- //CTS duration
- // 2 SIFS + DATA transmit time + 1 ACK
- // Rate : ?? Mega bps
- // ACK frame length = 14 bytes
- if( pT01->T01_plcp_header_length ) //long preamble
+ /* CTS duration
+ * 2 SIFS + DATA transmit time + 1 ACK
+ * Rate : ?? Mega bps
+ * ACK frame length = 14 bytes */
+ if( pT01->T01_plcp_header_length ) /* long preamble */
Duration = LONG_PREAMBLE_PLUS_PLCPHEADER_TIME*2;
else
Duration = SHORT_PREAMBLE_PLUS_PLCPHEADER_TIME*2;
@@ -100,21 +100,21 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter, struct wb35_descriptor
if( RTS_on )
{
- if( pT01->T01_modulation_type ) // Is using OFDM
+ if( pT01->T01_modulation_type ) /* Is using OFDM */
{
- //CTS + 1 SIFS + CTS duration
- //CTS Rate : 24 Mega bps
- //CTS frame length = 14 bytes
+ /* CTS + 1 SIFS + CTS duration
+ * CTS Rate : 24 Mega bps
+ * CTS frame length = 14 bytes */
Duration += (DEFAULT_SIFSTIME +
PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION +
((112 + 22 + 95)/96)*Tsym);
}
else
{
- //CTS + 1 SIFS + CTS duration
- //CTS Rate : ?? Mega bps
- //CTS frame length = 14 bytes
- if( pT01->T01_plcp_header_length ) //long preamble
+ /* CTS + 1 SIFS + CTS duration
+ * CTS Rate : ?? Mega bps
+ * CTS frame length = 14 bytes */
+ if( pT01->T01_plcp_header_length ) /* long preamble */
Duration += LONG_PREAMBLE_PLUS_PLCPHEADER_TIME;
else
Duration += SHORT_PREAMBLE_PLUS_PLCPHEADER_TIME;
@@ -123,15 +123,15 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter, struct wb35_descriptor
}
}
- // Set the value into USB descriptor
+ /* Set the value into USB descriptor */
pT01->T01_add_rts = RTS_on ? 1 : 0;
pT01->T01_add_cts = CTS_on ? 1 : 0;
pT01->T01_rts_cts_duration = Duration;
}
- //=====================================
- // Fill the more fragment descriptor
- //=====================================
+ /******************************************
+ * Fill the more fragment descriptor
+ ******************************************/
if( boGroupAddr )
Duration = 0;
else
@@ -139,14 +139,14 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter, struct wb35_descriptor
for( i=pDes->FragmentCount-1; i>0; i-- )
{
NextBodyLen = (u16)pNextT00->T00_frame_length;
- NextBodyLen += 4; //CRC
+ NextBodyLen += 4; /* CRC */
if( pT01->T01_modulation_type )
{
- //OFDM
- // data transmit time + 3 SIFS + 2 ACK
- // Rate : ??Mega bps
- // ACK frame length = 14 bytes, tx rate = 24M
+ /* OFDM
+ * data transmit time + 3 SIFS + 2 ACK
+ * Rate : ??Mega bps
+ * ACK frame length = 14 bytes, tx rate = 24M */
Duration = PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION * 3;
Duration += (((NextBodyLen*8 + 22 + Rate*4 - 1)/(Rate*4)) * Tsym +
(((2*14)*8 + 22 + 95)/96)*Tsym +
@@ -154,12 +154,12 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter, struct wb35_descriptor
}
else
{
- //DSSS
- // data transmit time + 2 ACK + 3 SIFS
- // Rate : ??Mega bps
- // ACK frame length = 14 bytes
- //TODO :
- if( pT01->T01_plcp_header_length ) //long preamble
+ /* DSSS
+ * data transmit time + 2 ACK + 3 SIFS
+ * Rate : ??Mega bps
+ * ACK frame length = 14 bytes
+ * TODO : */
+ if( pT01->T01_plcp_header_length ) /* long preamble */
Duration = LONG_PREAMBLE_PLUS_PLCPHEADER_TIME*3;
else
Duration = SHORT_PREAMBLE_PLUS_PLCPHEADER_TIME*3;
@@ -168,39 +168,39 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter, struct wb35_descriptor
DEFAULT_SIFSTIME*3 );
}
- ((u16 *)buffer)[5] = cpu_to_le16(Duration);// 4 USHOR for skip 8B USB, 2USHORT=FC + Duration
+ ((u16 *)buffer)[5] = cpu_to_le16(Duration); /* 4 USHOR for skip 8B USB, 2USHORT=FC + Duration */
- //----20061009 add by anson's endian
+ /* ----20061009 add by anson's endian */
pNextT00->value = cpu_to_le32(pNextT00->value);
pT01->value = cpu_to_le32( pT01->value );
- //----end 20061009 add by anson's endian
+ /* ----end 20061009 add by anson's endian */
buffer += OffsetSize;
pT01 = (PT01_DESCRIPTOR)(buffer+4);
- if (i != 1) //The last fragment will not have the next fragment
+ if (i != 1) /* The last fragment will not have the next fragment */
pNextT00 = (PT00_DESCRIPTOR)(buffer+OffsetSize);
}
- //=====================================
- // Fill the last fragment descriptor
- //=====================================
+ /*******************************************
+ * Fill the last fragment descriptor
+ *******************************************/
if( pT01->T01_modulation_type )
{
- //OFDM
- // 1 SIFS + 1 ACK
- // Rate : 24 Mega bps
- // ACK frame length = 14 bytes
+ /* OFDM
+ * 1 SIFS + 1 ACK
+ * Rate : 24 Mega bps
+ * ACK frame length = 14 bytes */
Duration = PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION;
- //The Tx rate of ACK use 24M
+ /* The Tx rate of ACK use 24M */
Duration += (((112 + 22 + 95)/96)*Tsym + DEFAULT_SIFSTIME );
}
else
{
- // DSSS
- // 1 ACK + 1 SIFS
- // Rate : ?? Mega bps
- // ACK frame length = 14 bytes(112 bits)
- if( pT01->T01_plcp_header_length ) //long preamble
+ /* DSSS
+ * 1 ACK + 1 SIFS
+ * Rate : ?? Mega bps
+ * ACK frame length = 14 bytes(112 bits) */
+ if( pT01->T01_plcp_header_length ) /* long preamble */
Duration = LONG_PREAMBLE_PLUS_PLCPHEADER_TIME;
else
Duration = SHORT_PREAMBLE_PLUS_PLCPHEADER_TIME;
@@ -209,14 +209,14 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter, struct wb35_descriptor
}
}
- ((u16 *)buffer)[5] = cpu_to_le16(Duration);// 4 USHOR for skip 8B USB, 2USHORT=FC + Duration
+ ((u16 *)buffer)[5] = cpu_to_le16(Duration); /* 4 USHOR for skip 8B USB, 2USHORT=FC + Duration */
pT00->value = cpu_to_le32(pT00->value);
pT01->value = cpu_to_le32(pT01->value);
- //--end 20061009 add
+ /* --end 20061009 add */
}
-// The function return the 4n size of usb pk
+/* The function return the 4n size of usb pk */
static u16 Mds_BodyCopy(struct wbsoft_priv *adapter, struct wb35_descriptor *pDes, u8 *TargetBuffer)
{
PT00_DESCRIPTOR pT00;
@@ -229,8 +229,8 @@ static u16 Mds_BodyCopy(struct wbsoft_priv *adapter, struct wb35_descriptor *pDe
u8 buf_index, FragmentCount = 0;
- // Copy fragment body
- buffer = TargetBuffer; // shift 8B usb + 24B 802.11
+ /* Copy fragment body */
+ buffer = TargetBuffer; /* shift 8B usb + 24B 802.11 */
SizeLeft = pDes->buffer_total_size;
buf_index = pDes->buffer_start_index;
@@ -240,35 +240,35 @@ static u16 Mds_BodyCopy(struct wbsoft_priv *adapter, struct wb35_descriptor *pDe
CopySize = SizeLeft;
if (SizeLeft > pDes->FragmentThreshold) {
CopySize = pDes->FragmentThreshold;
- pT00->T00_frame_length = 24 + CopySize;//Set USB length
+ pT00->T00_frame_length = 24 + CopySize; /* Set USB length */
} else
- pT00->T00_frame_length = 24 + SizeLeft;//Set USB length
+ pT00->T00_frame_length = 24 + SizeLeft; /* Set USB length */
SizeLeft -= CopySize;
- // 1 Byte operation
+ /* 1 Byte operation */
pctmp = (u8 *)( buffer + 8 + DOT_11_SEQUENCE_OFFSET );
*pctmp &= 0xf0;
- *pctmp |= FragmentCount;//931130.5.m
+ *pctmp |= FragmentCount; /* 931130.5.m */
if( !FragmentCount )
pT00->T00_first_mpdu = 1;
- buffer += 32; // 8B usb + 24B 802.11 header
+ buffer += 32; /* 8B usb + 24B 802.11 header */
Size += 32;
- // Copy into buffer
+ /* Copy into buffer */
stmp = CopySize + 3;
- stmp &= ~0x03;//4n Alignment
- Size += stmp;// Current 4n offset of mpdu
+ stmp &= ~0x03; /* 4n Alignment */
+ Size += stmp; /* Current 4n offset of mpdu */
while (CopySize) {
- // Copy body
+ /* Copy body */
src_buffer = pDes->buffer_address[buf_index];
CopyLeft = CopySize;
if (CopySize >= pDes->buffer_size[buf_index]) {
CopyLeft = pDes->buffer_size[buf_index];
- // Get the next buffer of descriptor
+ /* Get the next buffer of descriptor */
buf_index++;
buf_index %= MAX_DESCRIPTOR_BUFFER_INDEX;
} else {
@@ -283,14 +283,14 @@ static u16 Mds_BodyCopy(struct wbsoft_priv *adapter, struct wb35_descriptor *pDe
CopySize -= CopyLeft;
}
- // 931130.5.n
+ /* 931130.5.n */
if (pMds->MicAdd) {
if (!SizeLeft) {
pMds->MicWriteAddress[ pMds->MicWriteIndex ] = buffer - pMds->MicAdd;
pMds->MicWriteSize[ pMds->MicWriteIndex ] = pMds->MicAdd;
pMds->MicAdd = 0;
}
- else if( SizeLeft < 8 ) //931130.5.p
+ else if( SizeLeft < 8 ) /* 931130.5.p */
{
pMds->MicAdd = SizeLeft;
pMds->MicWriteAddress[ pMds->MicWriteIndex ] = buffer - ( 8 - SizeLeft );
@@ -299,10 +299,10 @@ static u16 Mds_BodyCopy(struct wbsoft_priv *adapter, struct wb35_descriptor *pDe
}
}
- // Does it need to generate the new header for next mpdu?
+ /* Does it need to generate the new header for next mpdu? */
if (SizeLeft) {
- buffer = TargetBuffer + Size; // Get the next 4n start address
- memcpy( buffer, TargetBuffer, 32 );//Copy 8B USB +24B 802.11
+ buffer = TargetBuffer + Size; /* Get the next 4n start address */
+ memcpy( buffer, TargetBuffer, 32 ); /* Copy 8B USB +24B 802.11 */
pT00 = (PT00_DESCRIPTOR)buffer;
pT00->T00_first_mpdu = 0;
}
@@ -312,16 +312,16 @@ static u16 Mds_BodyCopy(struct wbsoft_priv *adapter, struct wb35_descriptor *pDe
pT00->T00_last_mpdu = 1;
pT00->T00_IsLastMpdu = 1;
- buffer = (u8 *)pT00 + 8; // +8 for USB hdr
- buffer[1] &= ~0x04; // Clear more frag bit of 802.11 frame control
- pDes->FragmentCount = FragmentCount; // Update the correct fragment number
+ buffer = (u8 *)pT00 + 8; /* +8 for USB hdr */
+ buffer[1] &= ~0x04; /* Clear more frag bit of 802.11 frame control */
+ pDes->FragmentCount = FragmentCount; /* Update the correct fragment number */
return Size;
}
-static void Mds_HeaderCopy(struct wbsoft_priv * adapter, struct wb35_descriptor *pDes, u8 *TargetBuffer)
+static void Mds_HeaderCopy(struct wbsoft_priv *adapter, struct wb35_descriptor *pDes, u8 *TargetBuffer)
{
struct wb35_mds *pMds = &adapter->Mds;
- u8 *src_buffer = pDes->buffer_address[0];//931130.5.g
+ u8 *src_buffer = pDes->buffer_address[0]; /* 931130.5.g */
PT00_DESCRIPTOR pT00;
PT01_DESCRIPTOR pT01;
u16 stmp;
@@ -330,44 +330,44 @@ static void Mds_HeaderCopy(struct wbsoft_priv * adapter, struct wb35_descriptor
stmp = pDes->buffer_total_size;
- //
- // Set USB header 8 byte
- //
+ /*
+ * Set USB header 8 byte
+ */
pT00 = (PT00_DESCRIPTOR)TargetBuffer;
TargetBuffer += 4;
pT01 = (PT01_DESCRIPTOR)TargetBuffer;
TargetBuffer += 4;
- pT00->value = 0;// Clear
- pT01->value = 0;// Clear
+ pT00->value = 0; /* Clear */
+ pT01->value = 0; /* Clear */
- pT00->T00_tx_packet_id = pDes->Descriptor_ID;// Set packet ID
- pT00->T00_header_length = 24;// Set header length
- pT01->T01_retry_abort_ebable = 1;//921013 931130.5.h
+ pT00->T00_tx_packet_id = pDes->Descriptor_ID; /* Set packet ID */
+ pT00->T00_header_length = 24; /* Set header length */
+ pT01->T01_retry_abort_ebable = 1; /* 921013 931130.5.h */
- // Key ID setup
+ /* Key ID setup */
pT01->T01_wep_id = 0;
- FragmentThreshold = DEFAULT_FRAGMENT_THRESHOLD; //Do not fragment
- // Copy full data, the 1'st buffer contain all the data 931130.5.j
- memcpy( TargetBuffer, src_buffer, DOT_11_MAC_HEADER_SIZE );// Copy header
+ FragmentThreshold = DEFAULT_FRAGMENT_THRESHOLD; /* Do not fragment */
+ /* Copy full data, the 1'st buffer contain all the data 931130.5.j */
+ memcpy( TargetBuffer, src_buffer, DOT_11_MAC_HEADER_SIZE ); /* Copy header */
pDes->buffer_address[0] = src_buffer + DOT_11_MAC_HEADER_SIZE;
pDes->buffer_total_size -= DOT_11_MAC_HEADER_SIZE;
pDes->buffer_size[0] = pDes->buffer_total_size;
- // Set fragment threshold
+ /* Set fragment threshold */
FragmentThreshold -= (DOT_11_MAC_HEADER_SIZE + 4);
pDes->FragmentThreshold = FragmentThreshold;
- // Set more frag bit
- TargetBuffer[1] |= 0x04;// Set more frag bit
+ /* Set more frag bit */
+ TargetBuffer[1] |= 0x04; /* Set more frag bit */
- //
- // Set tx rate
- //
- stmp = *(u16 *)(TargetBuffer+30); // 2n alignment address
+ /*
+ * Set tx rate
+ */
+ stmp = *(u16 *)(TargetBuffer+30); /* 2n alignment address */
- //Use basic rate
+ /* Use basic rate */
ctmp1 = ctmpf = CURRENT_TX_RATE_FOR_MNG;
pDes->TxRate = ctmp1;
@@ -381,10 +381,10 @@ static void Mds_HeaderCopy(struct wbsoft_priv * adapter, struct wb35_descriptor
if( i == 1 )
ctmp1 = ctmpf;
- pMds->TxRate[pDes->Descriptor_ID][i] = ctmp1; // backup the ta rate and fall back rate
+ pMds->TxRate[pDes->Descriptor_ID][i] = ctmp1; /* backup the ta rate and fall back rate */
if( ctmp1 == 108) ctmp2 = 7;
- else if( ctmp1 == 96 ) ctmp2 = 6; // Rate convert for USB
+ else if( ctmp1 == 96 ) ctmp2 = 6; /* Rate convert for USB */
else if( ctmp1 == 72 ) ctmp2 = 5;
else if( ctmp1 == 48 ) ctmp2 = 4;
else if( ctmp1 == 36 ) ctmp2 = 3;
@@ -394,7 +394,7 @@ static void Mds_HeaderCopy(struct wbsoft_priv * adapter, struct wb35_descriptor
else if( ctmp1 == 22 ) ctmp2 = 3;
else if( ctmp1 == 11 ) ctmp2 = 2;
else if( ctmp1 == 4 ) ctmp2 = 1;
- else ctmp2 = 0; // if( ctmp1 == 2 ) or default
+ else ctmp2 = 0; /* if( ctmp1 == 2 ) or default */
if( i == 0 )
pT01->T01_transmit_rate = ctmp2;
@@ -402,21 +402,21 @@ static void Mds_HeaderCopy(struct wbsoft_priv * adapter, struct wb35_descriptor
pT01->T01_fall_back_rate = ctmp2;
}
- //
- // Set preamble type
- //
- if ((pT01->T01_modulation_type == 0) && (pT01->T01_transmit_rate == 0)) // RATE_1M
+ /*
+ * Set preamble type
+ */
+ if ((pT01->T01_modulation_type == 0) && (pT01->T01_transmit_rate == 0)) /* RATE_1M */
pDes->PreambleMode = WLAN_PREAMBLE_TYPE_LONG;
else
pDes->PreambleMode = CURRENT_PREAMBLE_MODE;
- pT01->T01_plcp_header_length = pDes->PreambleMode; // Set preamble
+ pT01->T01_plcp_header_length = pDes->PreambleMode; /* Set preamble */
}
void
-Mds_Tx(struct wbsoft_priv * adapter)
+Mds_Tx(struct wbsoft_priv *adapter)
{
- struct hw_data * pHwData = &adapter->sHwData;
+ struct hw_data *pHwData = &adapter->sHwData;
struct wb35_mds *pMds = &adapter->Mds;
struct wb35_descriptor TxDes;
struct wb35_descriptor *pTxDes = &TxDes;
@@ -431,21 +431,21 @@ Mds_Tx(struct wbsoft_priv * adapter)
if (!hal_driver_init_OK(pHwData))
return;
- //Only one thread can be run here
+ /* Only one thread can be run here */
if (atomic_inc_return(&pMds->TxThreadCount) != 1)
goto cleanup;
- // Start to fill the data
+ /* Start to fill the data */
do {
FillIndex = pMds->TxFillIndex;
- if (pMds->TxOwner[FillIndex]) { // Is owned by software 0:Yes 1:No
+ if (pMds->TxOwner[FillIndex]) { /* Is owned by software 0:Yes 1:No */
#ifdef _PE_TX_DUMP_
printk("[Mds_Tx] Tx Owner is H/W.\n");
#endif
break;
}
- XmitBufAddress = pMds->pTxBuffer + (MAX_USB_TX_BUFFER * FillIndex); //Get buffer
+ XmitBufAddress = pMds->pTxBuffer + (MAX_USB_TX_BUFFER * FillIndex); /* Get buffer */
XmitBufSize = 0;
FillCount = 0;
do {
@@ -453,37 +453,37 @@ Mds_Tx(struct wbsoft_priv * adapter)
if (!PacketSize)
break;
- //For Check the buffer resource
+ /* For Check the buffer resource */
FragmentThreshold = CURRENT_FRAGMENT_THRESHOLD;
- //931130.5.b
+ /* 931130.5.b */
FragmentCount = PacketSize/FragmentThreshold + 1;
- stmp = PacketSize + FragmentCount*32 + 8;//931130.5.c 8:MIC
+ stmp = PacketSize + FragmentCount*32 + 8; /* 931130.5.c 8:MIC */
if ((XmitBufSize + stmp) >= MAX_USB_TX_BUFFER) {
printk("[Mds_Tx] Excess max tx buffer.\n");
- break; // buffer is not enough
+ break; /* buffer is not enough */
}
- //
- // Start transmitting
- //
+ /*
+ * Start transmitting
+ */
BufferFilled = true;
/* Leaves first u8 intact */
memset((u8 *)pTxDes + 1, 0, sizeof(struct wb35_descriptor) - 1);
- TxDesIndex = pMds->TxDesIndex;//Get the current ID
+ TxDesIndex = pMds->TxDesIndex; /* Get the current ID */
pTxDes->Descriptor_ID = TxDesIndex;
- pMds->TxDesFrom[ TxDesIndex ] = 2;//Storing the information of source comming from
+ pMds->TxDesFrom[ TxDesIndex ] = 2; /* Storing the information of source comming from */
pMds->TxDesIndex++;
pMds->TxDesIndex %= MAX_USB_TX_DESCRIPTOR;
MLME_GetNextPacket( adapter, pTxDes );
- // Copy header. 8byte USB + 24byte 802.11Hdr. Set TxRate, Preamble type
+ /* Copy header. 8byte USB + 24byte 802.11Hdr. Set TxRate, Preamble type */
Mds_HeaderCopy( adapter, pTxDes, XmitBufAddress );
- // For speed up Key setting
+ /* For speed up Key setting */
if (pTxDes->EapFix) {
#ifdef _PE_TX_DUMP_
printk("35: EPA 4th frame detected. Size = %d\n", PacketSize);
@@ -491,41 +491,41 @@ Mds_Tx(struct wbsoft_priv * adapter)
pHwData->IsKeyPreSet = 1;
}
- // Copy (fragment) frame body, and set USB, 802.11 hdr flag
+ /* Copy (fragment) frame body, and set USB, 802.11 hdr flag */
CurrentSize = Mds_BodyCopy(adapter, pTxDes, XmitBufAddress);
- // Set RTS/CTS and Normal duration field into buffer
+ /* Set RTS/CTS and Normal duration field into buffer */
Mds_DurationSet(adapter, pTxDes, XmitBufAddress);
- //Shift to the next address
+ /* Shift to the next address */
XmitBufSize += CurrentSize;
XmitBufAddress += CurrentSize;
#ifdef _IBSS_BEACON_SEQ_STICK_
- if ((XmitBufAddress[ DOT_11_DA_OFFSET+8 ] & 0xfc) != MAC_SUBTYPE_MNGMNT_PROBE_REQUEST) // +8 for USB hdr
+ if ((XmitBufAddress[ DOT_11_DA_OFFSET+8 ] & 0xfc) != MAC_SUBTYPE_MNGMNT_PROBE_REQUEST) /* +8 for USB hdr */
#endif
pMds->TxToggle = true;
- // Get packet to transmit completed, 1:TESTSTA 2:MLME 3: Ndis data
+ /* Get packet to transmit completed, 1:TESTSTA 2:MLME 3: Ndis data */
MLME_SendComplete(adapter, 0, true);
- // Software TSC count 20060214
+ /* Software TSC count 20060214 */
pMds->TxTsc++;
if (pMds->TxTsc == 0)
pMds->TxTsc_2++;
- FillCount++; // 20060928
- } while (HAL_USB_MODE_BURST(pHwData)); // End of multiple MSDU copy loop. false = single true = multiple sending
+ FillCount++; /* 20060928 */
+ } while (HAL_USB_MODE_BURST(pHwData)); /* End of multiple MSDU copy loop. false = single true = multiple sending */
- // Move to the next one, if necessary
+ /* Move to the next one, if necessary */
if (BufferFilled) {
- // size setting
+ /* size setting */
pMds->TxBufferSize[ FillIndex ] = XmitBufSize;
- // 20060928 set Tx count
+ /* 20060928 set Tx count */
pMds->TxCountInBuffer[FillIndex] = FillCount;
- // Set owner flag
+ /* Set owner flag */
pMds->TxOwner[FillIndex] = 1;
pMds->TxFillIndex++;
@@ -534,14 +534,14 @@ Mds_Tx(struct wbsoft_priv * adapter)
} else
break;
- if (!PacketSize) // No more pk for transmitting
+ if (!PacketSize) /* No more pk for transmitting */
break;
} while(true);
- //
- // Start to send by lower module
- //
+ /*
+ * Start to send by lower module
+ */
if (!pHwData->IsKeyPreSet)
Wb35Tx_start(adapter);
@@ -550,28 +550,28 @@ Mds_Tx(struct wbsoft_priv * adapter)
}
void
-Mds_SendComplete(struct wbsoft_priv * adapter, PT02_DESCRIPTOR pT02)
+Mds_SendComplete(struct wbsoft_priv *adapter, PT02_DESCRIPTOR pT02)
{
struct wb35_mds *pMds = &adapter->Mds;
- struct hw_data * pHwData = &adapter->sHwData;
+ struct hw_data *pHwData = &adapter->sHwData;
u8 PacketId = (u8)pT02->T02_Tx_PktID;
unsigned char SendOK = true;
u8 RetryCount, TxRate;
- if (pT02->T02_IgnoreResult) // Don't care the result
+ if (pT02->T02_IgnoreResult) /* Don't care the result */
return;
if (pT02->T02_IsLastMpdu) {
- //TODO: DTO -- get the retry count and fragment count
- // Tx rate
+ /* TODO: DTO -- get the retry count and fragment count */
+ /* Tx rate */
TxRate = pMds->TxRate[ PacketId ][ 0 ];
RetryCount = (u8)pT02->T02_MPDU_Cnt;
if (pT02->value & FLAG_ERROR_TX_MASK) {
SendOK = false;
if (pT02->T02_transmit_abort || pT02->T02_out_of_MaxTxMSDULiftTime) {
- //retry error
+ /* retry error */
pHwData->dto_tx_retry_count += (RetryCount+1);
- //[for tx debug]
+ /* [for tx debug] */
if (RetryCount<7)
pHwData->tx_retry_count[RetryCount] += RetryCount;
else
@@ -583,7 +583,7 @@ Mds_SendComplete(struct wbsoft_priv * adapter, PT02_DESCRIPTOR pT02)
}
pHwData->dto_tx_frag_count += (RetryCount+1);
- //[for tx debug]
+ /* [for tx debug] */
if (pT02->T02_transmit_abort_due_to_TBTT)
pHwData->tx_TBTT_start_count++;
if (pT02->T02_transmit_without_encryption_due_to_wep_on_false)
@@ -596,7 +596,7 @@ Mds_SendComplete(struct wbsoft_priv * adapter, PT02_DESCRIPTOR pT02)
MTO_SetTxCount(adapter, TxRate, RetryCount);
}
- // Clear send result buffer
+ /* Clear send result buffer */
pMds->TxResult[ PacketId ] = 0;
} else
pMds->TxResult[ PacketId ] |= ((u16)(pT02->value & 0x0ffff));
diff --git a/drivers/staging/winbond/mds_f.h b/drivers/staging/winbond/mds_f.h
index e09dd4b879d..20e97bfe01e 100644
--- a/drivers/staging/winbond/mds_f.h
+++ b/drivers/staging/winbond/mds_f.h
@@ -4,17 +4,17 @@
#include "wbhal_s.h"
#include "core.h"
-unsigned char Mds_initial( struct wbsoft_priv *adapter );
-void Mds_Destroy( struct wbsoft_priv *adapter );
-void Mds_Tx( struct wbsoft_priv *adapter );
-void Mds_SendComplete( struct wbsoft_priv *adapter, PT02_DESCRIPTOR pT02 );
-void Mds_MpduProcess( struct wbsoft_priv *adapter, struct wb35_descriptor *pRxDes );
+unsigned char Mds_initial(struct wbsoft_priv *adapter);
+void Mds_Destroy(struct wbsoft_priv *adapter);
+void Mds_Tx(struct wbsoft_priv *adapter);
+void Mds_SendComplete(struct wbsoft_priv *adapter, PT02_DESCRIPTOR pt02);
+void Mds_MpduProcess(struct wbsoft_priv *adapter, struct wb35_descriptor *prxdes);
extern void DataDmp(u8 *pdata, u32 len, u32 offset);
-// For data frame sending 20060802
-u16 MDS_GetPacketSize( struct wbsoft_priv *adapter );
-void MDS_GetNextPacket( struct wbsoft_priv *adapter, struct wb35_descriptor *pDes );
-void MDS_GetNextPacketComplete( struct wbsoft_priv *adapter, struct wb35_descriptor *pDes );
-void MDS_SendResult( struct wbsoft_priv *adapter, u8 PacketId, unsigned char SendOK );
+/* For data frame sending */
+u16 MDS_GetPacketSize(struct wbsoft_priv *adapter);
+void MDS_GetNextPacket(struct wbsoft_priv *adapter, struct wb35_descriptor *pdes);
+void MDS_GetNextPacketComplete(struct wbsoft_priv *adapter, struct wb35_descriptor *pdes);
+void MDS_SendResult(struct wbsoft_priv *adapter, u8 packetid, unsigned char sendok);
#endif
diff --git a/drivers/staging/winbond/mds_s.h b/drivers/staging/winbond/mds_s.h
index 217ff0819a9..89328c5dbda 100644
--- a/drivers/staging/winbond/mds_s.h
+++ b/drivers/staging/winbond/mds_s.h
@@ -15,122 +15,121 @@ enum {
WLAN_PREAMBLE_TYPE_LONG,
};
-////////////////////////////////////////////////////////////////////////////////////////////////////////
-#define MAX_USB_TX_DESCRIPTOR 15 // IS89C35 ability
-#define MAX_USB_TX_BUFFER_NUMBER 4 // Virtual pre-buffer number of MAX_USB_TX_BUFFER
-#define MAX_USB_TX_BUFFER 4096 // IS89C35 ability 4n alignment is required for hardware
+/*****************************************************************************/
+#define MAX_USB_TX_DESCRIPTOR 15 /* IS89C35 ability */
+#define MAX_USB_TX_BUFFER_NUMBER 4 /* Virtual pre-buffer number of MAX_USB_TX_BUFFER */
+#define MAX_USB_TX_BUFFER 4096 /* IS89C35 ability 4n alignment is required for hardware */
-#define AUTH_REQUEST_PAIRWISE_ERROR 0 // _F flag setting
-#define AUTH_REQUEST_GROUP_ERROR 1 // _F flag setting
+#define AUTH_REQUEST_PAIRWISE_ERROR 0 /* _F flag setting */
+#define AUTH_REQUEST_GROUP_ERROR 1 /* _F flag setting */
-#define CURRENT_FRAGMENT_THRESHOLD (adapter->Mds.TxFragmentThreshold & ~0x1)
-#define CURRENT_PREAMBLE_MODE psLOCAL->boShortPreamble?WLAN_PREAMBLE_TYPE_SHORT:WLAN_PREAMBLE_TYPE_LONG
-#define CURRENT_TX_RATE_FOR_MNG adapter->sLocalPara.CurrentTxRateForMng
-#define CURRENT_PROTECT_MECHANISM psLOCAL->boProtectMechanism
-#define CURRENT_RTS_THRESHOLD adapter->Mds.TxRTSThreshold
+#define CURRENT_FRAGMENT_THRESHOLD (adapter->Mds.TxFragmentThreshold & ~0x1)
+#define CURRENT_PREAMBLE_MODE (psLOCAL->boShortPreamble ? WLAN_PREAMBLE_TYPE_SHORT : WLAN_PREAMBLE_TYPE_LONG)
+#define CURRENT_TX_RATE_FOR_MNG (adapter->sLocalPara.CurrentTxRateForMng)
+#define CURRENT_PROTECT_MECHANISM (psLOCAL->boProtectMechanism)
+#define CURRENT_RTS_THRESHOLD (adapter->Mds.TxRTSThreshold)
-#define MIB_GS_XMIT_OK_INC adapter->sLocalPara.GS_XMIT_OK++
-#define MIB_GS_RCV_OK_INC adapter->sLocalPara.GS_RCV_OK++
-#define MIB_GS_XMIT_ERROR_INC adapter->sLocalPara.GS_XMIT_ERROR
+#define MIB_GS_XMIT_OK_INC (adapter->sLocalPara.GS_XMIT_OK++)
+#define MIB_GS_RCV_OK_INC (adapter->sLocalPara.GS_RCV_OK++)
+#define MIB_GS_XMIT_ERROR_INC (adapter->sLocalPara.GS_XMIT_ERROR)
-//---------- TX -----------------------------------
+/* ---------- TX ----------------------------------- */
#define ETHERNET_TX_DESCRIPTORS MAX_USB_TX_BUFFER_NUMBER
-//---------- RX ------------------------------------
-#define ETHERNET_RX_DESCRIPTORS 8 //It's not necessary to allocate more than 2 in sync indicate
-
-//================================================================
-// Configration default value
-//================================================================
-#define DEFAULT_MULTICASTLISTMAX 32 // standard
-#define DEFAULT_TX_BURSTLENGTH 3 // 32 Longwords
-#define DEFAULT_RX_BURSTLENGTH 3 // 32 Longwords
-#define DEFAULT_TX_THRESHOLD 0 // Full Packet
-#define DEFAULT_RX_THRESHOLD 0 // Full Packet
-#define DEFAULT_MAXTXRATE 6 // 11 Mbps (Long)
-#define DEFAULT_CHANNEL 3 // Chennel 3
-#define DEFAULT_RTSThreshold 2347 // Disable RTS
-//#define DEFAULT_PME 1 // Enable
-#define DEFAULT_PME 0 // Disable
-#define DEFAULT_SIFSTIME 10
-#define DEFAULT_ACKTIME_1ML 304 // 148+44+112 911220 by LCC
-#define DEFAULT_ACKTIME_2ML 248 // 148+44+56 911220 by LCC
-#define DEFAULT_FRAGMENT_THRESHOLD 2346 // No fragment
-#define DEFAULT_PREAMBLE_LENGTH 72
+/* ---------- RX ----------------------------------- */
+#define ETHERNET_RX_DESCRIPTORS 8 /* It's not necessary to allocate more than 2 in sync indicate */
+
+/*
+ * ================================================================
+ * Configration default value
+ * ================================================================
+ */
+#define DEFAULT_MULTICASTLISTMAX 32 /* standard */
+#define DEFAULT_TX_BURSTLENGTH 3 /* 32 Longwords */
+#define DEFAULT_RX_BURSTLENGTH 3 /* 32 Longwords */
+#define DEFAULT_TX_THRESHOLD 0 /* Full Packet */
+#define DEFAULT_RX_THRESHOLD 0 /* Full Packet */
+#define DEFAULT_MAXTXRATE 6 /* 11 Mbps (Long) */
+#define DEFAULT_CHANNEL 3 /* Chennel 3 */
+#define DEFAULT_RTSThreshold 2347 /* Disable RTS */
+#define DEFAULT_PME 0 /* Disable */
+#define DEFAULT_SIFSTIME 10
+#define DEFAULT_ACKTIME_1ML 304 /* 148 + 44 + 112 */
+#define DEFAULT_ACKTIME_2ML 248 /* 148 + 44 + 56 */
+#define DEFAULT_FRAGMENT_THRESHOLD 2346 /* No fragment */
+#define DEFAULT_PREAMBLE_LENGTH 72
#define DEFAULT_PLCPHEADERTIME_LENGTH 24
-/*------------------------------------------------------------------------
- 0.96 sec since time unit of the R03 for the current, W89C32 is about 60ns
- instead of 960 ns. This shall be fixed in the future W89C32
- -------------------------------------------------------------------------*/
-#define DEFAULT_MAX_RECEIVE_TIME 16440000
+/*
+ * ------------------------------------------------------------------------
+ * 0.96 sec since time unit of the R03 for the current, W89C32 is about 60ns
+ * instead of 960 ns. This shall be fixed in the future W89C32
+ * -------------------------------------------------------------------------
+ */
+#define DEFAULT_MAX_RECEIVE_TIME 16440000
-#define RX_BUF_SIZE 2352 // 600 // For 301 must be multiple of 8
-#define MAX_RX_DESCRIPTORS 18 // Rx Layer 2
+#define RX_BUF_SIZE 2352 /* 600 - For 301 must be multiple of 8 */
+#define MAX_RX_DESCRIPTORS 18 /* Rx Layer 2 */
+/* For brand-new rx system */
+#define MDS_ID_IGNORE ETHERNET_RX_DESCRIPTORS
-// For brand-new rx system
-#define MDS_ID_IGNORE ETHERNET_RX_DESCRIPTORS
-
-// For Tx Packet status classify
-#define PACKET_FREE_TO_USE 0
-#define PACKET_COME_FROM_NDIS 0x08
-#define PACKET_COME_FROM_MLME 0x80
-#define PACKET_SEND_COMPLETE 0xff
+/* For Tx Packet status classify */
+#define PACKET_FREE_TO_USE 0
+#define PACKET_COME_FROM_NDIS 0x08
+#define PACKET_COME_FROM_MLME 0x80
+#define PACKET_SEND_COMPLETE 0xff
struct wb35_mds {
- // For Tx usage
- u8 TxOwner[ ((MAX_USB_TX_BUFFER_NUMBER + 3) & ~0x03) ];
+ /* For Tx usage */
+ u8 TxOwner[((MAX_USB_TX_BUFFER_NUMBER + 3) & ~0x03)];
u8 *pTxBuffer;
- u16 TxBufferSize[ ((MAX_USB_TX_BUFFER_NUMBER + 1) & ~0x01) ];
- u8 TxDesFrom[ ((MAX_USB_TX_DESCRIPTOR + 3) & ~0x03) ];//931130.4.u // 1: MLME 2: NDIS control 3: NDIS data
- u8 TxCountInBuffer[ ((MAX_USB_TX_DESCRIPTOR + 3) & ~0x03) ]; // 20060928
+ u16 TxBufferSize[((MAX_USB_TX_BUFFER_NUMBER + 1) & ~0x01)];
+ u8 TxDesFrom[((MAX_USB_TX_DESCRIPTOR + 3) & ~0x03)];/* 1: MLME 2: NDIS control 3: NDIS data */
+ u8 TxCountInBuffer[((MAX_USB_TX_DESCRIPTOR + 3) & ~0x03)];
- u8 TxFillIndex;//the next index of TxBuffer can be used
- u8 TxDesIndex;//The next index of TxDes can be used
- u8 ScanTxPause; //data Tx pause because the scanning is progressing, but probe request Tx won't.
- u8 TxPause;//For pause the Mds_Tx modult
+ u8 TxFillIndex; /* the next index of TxBuffer can be used */
+ u8 TxDesIndex; /* The next index of TxDes can be used */
+ u8 ScanTxPause; /* data Tx pause because the scanning is progressing, but probe request Tx won't. */
+ u8 TxPause; /*For pause the Mds_Tx modult */
- atomic_t TxThreadCount;//For thread counting 931130.4.v
-//950301 delete due to HW
-// atomic_t TxConcurrentCount;//931130.4.w
+ atomic_t TxThreadCount; /* For thread counting */
- u16 TxResult[ ((MAX_USB_TX_DESCRIPTOR + 1) & ~0x01) ];//Collect the sending result of Mpdu
+ u16 TxResult[((MAX_USB_TX_DESCRIPTOR + 1) & ~0x01)];/* Collect the sending result of Mpdu */
- u8 MicRedundant[8]; // For tmp use
- u8 *MicWriteAddress[2]; //The start address to fill the Mic, use 2 point due to Mic maybe fragment
+ u8 MicRedundant[8]; /* For tmp use */
+ u8 *MicWriteAddress[2]; /* The start address to fill the Mic, use 2 point due to Mic maybe fragment */
- u16 MicWriteSize[2]; //931130.4.x
+ u16 MicWriteSize[2];
- u16 MicAdd; // If want to add the Mic, this variable equal to 8
- u16 MicWriteIndex;//The number of MicWriteAddress 931130.4.y
+ u16 MicAdd; /* If want to add the Mic, this variable equal to 8 */
+ u16 MicWriteIndex; /* The number of MicWriteAddress */
- u8 TxRate[ ((MAX_USB_TX_DESCRIPTOR+1)&~0x01) ][2]; // [0] current tx rate, [1] fall back rate
- u8 TxInfo[ ((MAX_USB_TX_DESCRIPTOR+1)&~0x01) ]; //Store information for callback function
+ u8 TxRate[((MAX_USB_TX_DESCRIPTOR + 1) & ~0x01)][2]; /* [0] current tx rate, [1] fall back rate */
+ u8 TxInfo[((MAX_USB_TX_DESCRIPTOR + 1) & ~0x01)]; /*Store information for callback function */
- //WKCHEN added for scanning mechanism
- u8 TxToggle; //It is TRUE if there are tx activities in some time interval
+ /* for scanning mechanism */
+ u8 TxToggle; /* It is TRUE if there are tx activities in some time interval */
u8 Reserved_[3];
- //---------- for Tx Parameter
- u16 TxFragmentThreshold; // For frame body only
+ /* ---- for Tx Parameter */
+ u16 TxFragmentThreshold; /* For frame body only */
u16 TxRTSThreshold;
- u32 MaxReceiveTime;//911220.3 Add
-
- // depend on OS,
- u32 MulticastListNo;
- u32 PacketFilter; // Setting by NDIS, the current packet filter in use.
- u8 MulticastAddressesArray[DEFAULT_MULTICASTLISTMAX][MAC_ADDR_LENGTH];
+ u32 MaxReceiveTime;
- //COUNTERMEASURE
- u8 bMICfailCount;
- u8 boCounterMeasureBlock;
- u8 reserved_4[2];
+ /* depend on OS, */
+ u32 MulticastListNo;
+ u32 PacketFilter; /* Setting by NDIS, the current packet filter in use. */
+ u8 MulticastAddressesArray[DEFAULT_MULTICASTLISTMAX][MAC_ADDR_LENGTH];
- u32 TxTsc; // 20060214
- u32 TxTsc_2; // 20060214
+ /* COUNTERMEASURE */
+ u8 bMICfailCount;
+ u8 boCounterMeasureBlock;
+ u8 reserved_4[2];
+ u32 TxTsc;
+ u32 TxTsc_2;
};
#endif
diff --git a/drivers/staging/winbond/mlme_s.h b/drivers/staging/winbond/mlme_s.h
index 1217a1c025e..a7ef3c78022 100644
--- a/drivers/staging/winbond/mlme_s.h
+++ b/drivers/staging/winbond/mlme_s.h
@@ -7,28 +7,29 @@
#include "mac_structures.h"
#include "mds_s.h"
-//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-// Mlme.h
-// Define the related definitions of MLME module
-// history -- 01/14/03' created
-//
-//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
-#define AUTH_REJECT_REASON_CHALLENGE_FAIL 1
-
-//====== the state of MLME module
-#define INACTIVE 0x0
-#define IDLE_SCAN 0x1
-
-//====== the state of MLME/ESS module
-#define STATE_1 0x2
-#define AUTH_REQ 0x3
-#define AUTH_WEP 0x4
-#define STATE_2 0x5
-#define ASSOC_REQ 0x6
-#define STATE_3 0x7
-
-//====== the state of MLME/IBSS module
+/*
+ * +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ * Mlme.h
+ * Define the related definitions of MLME module
+ *
+ * +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ */
+
+#define AUTH_REJECT_REASON_CHALLENGE_FAIL 1
+
+/* the state of MLME module */
+#define INACTIVE 0x0
+#define IDLE_SCAN 0x1
+
+/* the state of MLME/ESS module */
+#define STATE_1 0x2
+#define AUTH_REQ 0x3
+#define AUTH_WEP 0x4
+#define STATE_2 0x5
+#define ASSOC_REQ 0x6
+#define STATE_3 0x7
+
+/* the state of MLME/IBSS module */
#define IBSS_JOIN_SYNC 0x8
#define IBSS_AUTH_REQ 0x9
#define IBSS_AUTH_CHANLGE 0xa
@@ -38,159 +39,150 @@
-//=========================================
-//depend on D5C(MAC timing control 03 register): MaxTxMSDULifeTime default 0x80000us
-#define AUTH_FAIL_TIMEOUT 550
-#define ASSOC_FAIL_TIMEOUT 550
+/*
+ * =========================================
+ * depend on D5C(MAC timing control 03 register):
+ * MaxTxMSDULifeTime default 0x80000us
+ */
+#define AUTH_FAIL_TIMEOUT 550
+#define ASSOC_FAIL_TIMEOUT 550
#define REASSOC_FAIL_TIMEOUT 550
-
-
-//
-// MLME task global CONSTANTS, STRUCTURE, variables
-//
-
-
-/////////////////////////////////////////////////////////////
-// enum_ResultCode --
-// Result code returned from MLME to SME.
-//
-/////////////////////////////////////////////////////////////
-// PD43 20030829 Modifiled
-//#define SUCCESS 0
-#define MLME_SUCCESS 0 //follow spec.
-#define INVALID_PARAMETERS 1 //Not following spec.
-#define NOT_SUPPPORTED 2
-#define TIMEOUT 3
-#define TOO_MANY_SIMULTANEOUS_REQUESTS 4
-#define REFUSED 5
-#define BSS_ALREADY_STARTED_OR_JOINED 6
-#define TRANSMIT_FRAME_FAIL 7
-#define NO_BSS_FOUND 8
-#define RETRY 9
-#define GIVE_UP 10
-
-
-#define OPEN_AUTH 0
-#define SHARE_AUTH 1
-#define ANY_AUTH 2
-#define WPA_AUTH 3 //for WPA
-#define WPAPSK_AUTH 4
-#define WPANONE_AUTH 5
-///////////////////////////////////////////// added by ws 04/19/04
+/* MLME task global CONSTANTS, STRUCTURE, variables */
+
+/* =========================================
+ * enum_ResultCode --
+ * Result code returned from MLME to SME.
+ * =========================================
+ */
+#define MLME_SUCCESS 0 /* follow spec. */
+#define INVALID_PARAMETERS 1 /* Not following spec. */
+#define NOT_SUPPPORTED 2
+#define TIMEOUT 3
+#define TOO_MANY_SIMULTANEOUS_REQUESTS 4
+#define REFUSED 5
+#define BSS_ALREADY_STARTED_OR_JOINED 6
+#define TRANSMIT_FRAME_FAIL 7
+#define NO_BSS_FOUND 8
+#define RETRY 9
+#define GIVE_UP 10
+
+
+#define OPEN_AUTH 0
+#define SHARE_AUTH 1
+#define ANY_AUTH 2
+#define WPA_AUTH 3 /* for WPA */
+#define WPAPSK_AUTH 4
+#define WPANONE_AUTH 5
#ifdef _WPA2_
-#define WPA2_AUTH 6//for WPA2
-#define WPA2PSK_AUTH 7
-#endif //end def _WPA2_
-
-//////////////////////////////////////////////////////////////////
-//define the msg type of MLME module
-//////////////////////////////////////////////////////////////////
-//--------------------------------------------------------
-//from SME
-
-#define MLMEMSG_AUTH_REQ 0x0b
-#define MLMEMSG_DEAUTH_REQ 0x0c
-#define MLMEMSG_ASSOC_REQ 0x0d
-#define MLMEMSG_REASSOC_REQ 0x0e
-#define MLMEMSG_DISASSOC_REQ 0x0f
-#define MLMEMSG_START_IBSS_REQ 0x10
-#define MLMEMSG_IBSS_NET_CFM 0x11
-
-//from RX :
-#define MLMEMSG_RCV_MLMEFRAME 0x20
-#define MLMEMSG_RCV_ASSOCRSP 0x22
-#define MLMEMSG_RCV_REASSOCRSP 0x24
-#define MLMEMSG_RCV_DISASSOC 0x2b
-#define MLMEMSG_RCV_AUTH 0x2c
-#define MLMEMSG_RCV_DEAUTH 0x2d
-
-
-//from TX callback
-#define MLMEMSG_TX_CALLBACK 0x40
-#define MLMEMSG_ASSOCREQ_CALLBACK 0x41
-#define MLMEMSG_REASSOCREQ_CALLBACK 0x43
-#define MLMEMSG_DISASSOC_CALLBACK 0x4a
-#define MLMEMSG_AUTH_CALLBACK 0x4c
-#define MLMEMSG_DEAUTH_CALLBACK 0x4d
-
-//#define MLMEMSG_JOIN_FAIL 4
-//#define MLMEMSG_AUTHEN_FAIL 18
-#define MLMEMSG_TIMEOUT 0x50
-
-///////////////////////////////////////////////////////////////////////////
-//Global data structures
-#define MAX_NUM_TX_MMPDU 2
-#define MAX_MMPDU_SIZE 1512
-#define MAX_NUM_RX_MMPDU 6
-
-
-///////////////////////////////////////////////////////////////////////////
-//MACRO
-#define boMLME_InactiveState(_AA_) (_AA_->wState==INACTIVE)
-#define boMLME_IdleScanState(_BB_) (_BB_->wState==IDLE_SCAN)
-#define boMLME_FoundSTAinfo(_CC_) (_CC_->wState>=IDLE_SCAN)
-
-typedef struct _MLME_FRAME
-{
- //NDIS_PACKET MLME_Packet;
- s8 * pMMPDU;
- u16 len;
- u8 DataType;
- u8 IsInUsed;
+#define WPA2_AUTH 6 /* for WPA2 */
+#define WPA2PSK_AUTH 7
+#endif /* end def _WPA2_ */
+
+/*
+ * =========================================
+ * define the msg type of MLME module
+ * =========================================
+ */
+
+/* from SME */
+#define MLMEMSG_AUTH_REQ 0x0b
+#define MLMEMSG_DEAUTH_REQ 0x0c
+#define MLMEMSG_ASSOC_REQ 0x0d
+#define MLMEMSG_REASSOC_REQ 0x0e
+#define MLMEMSG_DISASSOC_REQ 0x0f
+#define MLMEMSG_START_IBSS_REQ 0x10
+#define MLMEMSG_IBSS_NET_CFM 0x11
+
+/* from RX */
+#define MLMEMSG_RCV_MLMEFRAME 0x20
+#define MLMEMSG_RCV_ASSOCRSP 0x22
+#define MLMEMSG_RCV_REASSOCRSP 0x24
+#define MLMEMSG_RCV_DISASSOC 0x2b
+#define MLMEMSG_RCV_AUTH 0x2c
+#define MLMEMSG_RCV_DEAUTH 0x2d
+
+
+/* from TX callback */
+#define MLMEMSG_TX_CALLBACK 0x40
+#define MLMEMSG_ASSOCREQ_CALLBACK 0x41
+#define MLMEMSG_REASSOCREQ_CALLBACK 0x43
+#define MLMEMSG_DISASSOC_CALLBACK 0x4a
+#define MLMEMSG_AUTH_CALLBACK 0x4c
+#define MLMEMSG_DEAUTH_CALLBACK 0x4d
+
+#define MLMEMSG_TIMEOUT 0x50
+
+/*
+ * ==============================================
+ * Global data structures
+ * ==============================================
+ */
+#define MAX_NUM_TX_MMPDU 2
+#define MAX_MMPDU_SIZE 1512
+#define MAX_NUM_RX_MMPDU 6
+
+
+/*
+ * ==============================================
+ * MACRO
+ * ==============================================
+ */
+#define boMLME_InactiveState(_AA_) (_AA_->wState == INACTIVE)
+#define boMLME_IdleScanState(_BB_) (_BB_->wState == IDLE_SCAN)
+#define boMLME_FoundSTAinfo(_CC_) (_CC_->wState >= IDLE_SCAN)
+
+typedef struct _MLME_FRAME {
+ s8 *pMMPDU;
+ u16 len;
+ u8 DataType;
+ u8 IsInUsed;
spinlock_t MLMESpinLock;
- u8 TxMMPDU[MAX_NUM_TX_MMPDU][MAX_MMPDU_SIZE];
- u8 TxMMPDUInUse[ (MAX_NUM_TX_MMPDU+3) & ~0x03 ];
+ u8 TxMMPDU[MAX_NUM_TX_MMPDU][MAX_MMPDU_SIZE];
+ u8 TxMMPDUInUse[(MAX_NUM_TX_MMPDU + 3) & ~0x03];
u16 wNumTxMMPDU;
u16 wNumTxMMPDUDiscarded;
- u8 RxMMPDU[MAX_NUM_RX_MMPDU][MAX_MMPDU_SIZE];
- u8 SaveRxBufSlotInUse[ (MAX_NUM_RX_MMPDU+3) & ~0x03 ];
+ u8 RxMMPDU[MAX_NUM_RX_MMPDU][MAX_MMPDU_SIZE];
+ u8 SaveRxBufSlotInUse[(MAX_NUM_RX_MMPDU + 3) & ~0x03];
u16 wNumRxMMPDU;
u16 wNumRxMMPDUDiscarded;
- u16 wNumRxMMPDUInMLME; // Number of the Rx MMPDU
- u16 reserved_1; // in MLME.
- // excluding the discarded
+ u16 wNumRxMMPDUInMLME; /* Number of the Rx MMPDU */
+ u16 reserved_1; /* in MLME. */
+ /* excluding the discarded */
} MLME_FRAME, *psMLME_FRAME;
typedef struct _AUTHREQ {
- u8 peerMACaddr[MAC_ADDR_LENGTH];
- u16 wAuthAlgorithm;
-
+ u8 peerMACaddr[MAC_ADDR_LENGTH];
+ u16 wAuthAlgorithm;
} MLME_AUTHREQ_PARA, *psMLME_AUTHREQ_PARA;
typedef struct _ASSOCREQ {
- u8 PeerSTAAddr[MAC_ADDR_LENGTH];
- u16 CapabilityInfo;
- u16 ListenInterval;
-
-}__attribute__ ((packed)) MLME_ASSOCREQ_PARA, *psMLME_ASSOCREQ_PARA;
+ u8 PeerSTAAddr[MAC_ADDR_LENGTH];
+ u16 CapabilityInfo;
+ u16 ListenInterval;
+} __attribute__ ((packed)) MLME_ASSOCREQ_PARA, *psMLME_ASSOCREQ_PARA;
typedef struct _REASSOCREQ {
- u8 NewAPAddr[MAC_ADDR_LENGTH];
- u16 CapabilityInfo;
- u16 ListenInterval;
-
-}__attribute__ ((packed)) MLME_REASSOCREQ_PARA, *psMLME_REASSOCREQ_PARA;
+ u8 NewAPAddr[MAC_ADDR_LENGTH];
+ u16 CapabilityInfo;
+ u16 ListenInterval;
+} __attribute__ ((packed)) MLME_REASSOCREQ_PARA, *psMLME_REASSOCREQ_PARA;
typedef struct _MLMECALLBACK {
-
- u8 *psFramePtr;
- u8 bResult;
-
+ u8 *psFramePtr;
+ u8 bResult;
} MLME_TXCALLBACK, *psMLME_TXCALLBACK;
-typedef struct _RXDATA
-{
+typedef struct _RXDATA {
s32 FrameLength;
- u8 __attribute__ ((packed)) *pbFramePtr;
-
-}__attribute__ ((packed)) RXDATA, *psRXDATA;
+ u8 __attribute__ ((packed)) *pbFramePtr;
+} __attribute__ ((packed)) RXDATA, *psRXDATA;
#endif
diff --git a/drivers/staging/winbond/mlmetxrx.c b/drivers/staging/winbond/mlmetxrx.c
index f856b94a781..dcd8a11b5d0 100644
--- a/drivers/staging/winbond/mlmetxrx.c
+++ b/drivers/staging/winbond/mlmetxrx.c
@@ -1,26 +1,26 @@
-//============================================================================
-// Module Name:
-// MLMETxRx.C
-//
-// Description:
-// The interface between MDS (MAC Data Service) and MLME.
-//
-// Revision History:
-// --------------------------------------------------------------------------
-// 200209 UN20 Jennifer Xu
-// Initial Release
-// 20021108 PD43 Austin Liu
-// 20030117 PD43 Austin Liu
-// Deleted MLMEReturnPacket and MLMEProcThread()
-//
-// Copyright (c) 1996-2002 Winbond Electronics Corp. All Rights Reserved.
-//============================================================================
+/* ============================================================================
+ Module Name:
+ MLMETxRx.C
+
+ Description:
+ The interface between MDS (MAC Data Service) and MLME.
+
+ Revision History:
+ --------------------------------------------------------------------------
+ 200209 UN20 Jennifer Xu
+ Initial Release
+ 20021108 PD43 Austin Liu
+ 20030117 PD43 Austin Liu
+ Deleted MLMEReturnPacket and MLMEProcThread()
+
+ Copyright (c) 1996-2002 Winbond Electronics Corp. All Rights Reserved.
+============================================================================ */
#include "sysdef.h"
#include "mds_f.h"
-//=============================================================================
-u8 MLMESendFrame(struct wbsoft_priv * adapter, u8 *pMMPDU, u16 len, u8 DataType)
+/* ============================================================================= */
+u8 MLMESendFrame(struct wbsoft_priv *adapter, u8 *pMMPDU, u16 len, u8 DataType)
/* DataType : FRAME_TYPE_802_11_MANAGEMENT, FRAME_TYPE_802_11_MANAGEMENT_CHALLENGE,
FRAME_TYPE_802_11_DATA */
{
@@ -30,17 +30,17 @@ u8 MLMESendFrame(struct wbsoft_priv * adapter, u8 *pMMPDU, u16 len, u8 DataType)
}
adapter->sMlmeFrame.IsInUsed = PACKET_COME_FROM_MLME;
- // Keep information for sending
+ /* Keep information for sending */
adapter->sMlmeFrame.pMMPDU = pMMPDU;
adapter->sMlmeFrame.DataType = DataType;
- // len must be the last setting due to QUERY_SIZE_SECOND of Mds
+ /* len must be the last setting due to QUERY_SIZE_SECOND of Mds */
adapter->sMlmeFrame.len = len;
adapter->sMlmeFrame.wNumTxMMPDU++;
- // H/W will enter power save by set the register. S/W don't send null frame
- //with PWRMgt bit enbled to enter power save now.
+ /* H/W will enter power save by set the register. S/W don't send null frame
+ with PWRMgt bit enbled to enter power save now. */
- // Transmit NDIS packet
+ /* Transmit NDIS packet */
Mds_Tx(adapter);
return true;
}
@@ -60,7 +60,7 @@ static void MLMEfreeMMPDUBuffer(struct wbsoft_priv *adapter, s8 *pData)
{
int i;
- // Reclaim the data buffer
+ /* Reclaim the data buffer */
for (i = 0; i < MAX_NUM_TX_MMPDU; i++) {
if (pData == (s8 *)&(adapter->sMlmeFrame.TxMMPDU[i]))
break;
@@ -68,24 +68,24 @@ static void MLMEfreeMMPDUBuffer(struct wbsoft_priv *adapter, s8 *pData)
if (adapter->sMlmeFrame.TxMMPDUInUse[i])
adapter->sMlmeFrame.TxMMPDUInUse[i] = false;
else {
- // Something wrong
- // PD43 Add debug code here???
+ /* Something wrong
+ PD43 Add debug code here??? */
}
}
void
-MLME_SendComplete(struct wbsoft_priv * adapter, u8 PacketID, unsigned char SendOK)
+MLME_SendComplete(struct wbsoft_priv *adapter, u8 PacketID, unsigned char SendOK)
{
MLME_TXCALLBACK TxCallback;
- // Reclaim the data buffer
+ /* Reclaim the data buffer */
adapter->sMlmeFrame.len = 0;
- MLMEfreeMMPDUBuffer( adapter, adapter->sMlmeFrame.pMMPDU );
+ MLMEfreeMMPDUBuffer(adapter, adapter->sMlmeFrame.pMMPDU);
TxCallback.bResult = MLME_SUCCESS;
- // Return resource
+ /* Return resource */
adapter->sMlmeFrame.IsInUsed = PACKET_FREE_TO_USE;
}
diff --git a/drivers/staging/winbond/mlmetxrx_f.h b/drivers/staging/winbond/mlmetxrx_f.h
index 6c04e3e03e3..d1aa2617d24 100644
--- a/drivers/staging/winbond/mlmetxrx_f.h
+++ b/drivers/staging/winbond/mlmetxrx_f.h
@@ -1,10 +1,10 @@
-//================================================================
+/* ================================================================
// MLMETxRx.H --
//
// Functions defined in MLMETxRx.c.
//
// Copyright (c) 2002 Winbond Electrics Corp. All Rights Reserved.
-//================================================================
+//================================================================ */
#ifndef _MLMETXRX_H
#define _MLMETXRX_H
@@ -12,7 +12,7 @@
void MLME_GetNextPacket(struct wbsoft_priv *adapter, struct wb35_descriptor *pDes);
u8 MLMESendFrame(struct wbsoft_priv *adapter,
- u8 * pMMPDU, u16 len, u8 DataType);
+ u8 *pMMPDU, u16 len, u8 DataType);
void
MLME_SendComplete(struct wbsoft_priv *adapter, u8 PacketID,
diff --git a/drivers/staging/winbond/mto.c b/drivers/staging/winbond/mto.c
index 5e7fa1cd0ae..9cd212783d6 100644
--- a/drivers/staging/winbond/mto.c
+++ b/drivers/staging/winbond/mto.c
@@ -1,222 +1,181 @@
-//============================================================================
-// MTO.C -
-//
-// Description:
-// MAC Throughput Optimization for W89C33 802.11g WLAN STA.
-//
-// The following MIB attributes or internal variables will be affected
-// while the MTO is being executed:
-// dot11FragmentationThreshold,
-// dot11RTSThreshold,
-// transmission rate and PLCP preamble type,
-// CCA mode,
-// antenna diversity.
-//
-// Revision history:
-// --------------------------------------------------------------------------
-// 20031227 UN20 Pete Chao
-// First draft
-// 20031229 Turbo copy from PD43
-// 20040210 Kevin revised
-// Copyright (c) 2003 Winbond Electronics Corp. All rights reserved.
-//============================================================================
-
-// LA20040210_DTO kevin
+/*
+ * ============================================================================
+ * MTO.C -
+ *
+ * Description:
+ * MAC Throughput Optimization for W89C33 802.11g WLAN STA.
+ *
+ * The following MIB attributes or internal variables will be affected
+ * while the MTO is being executed:
+ * dot11FragmentationThreshold,
+ * dot11RTSThreshold,
+ * transmission rate and PLCP preamble type,
+ * CCA mode,
+ * antenna diversity.
+ *
+ * Copyright (c) 2003 Winbond Electronics Corp. All rights reserved.
+ * ============================================================================
+ */
+
#include "sysdef.h"
#include "sme_api.h"
#include "wbhal_f.h"
-// Declare SQ3 to rate and fragmentation threshold table
-// Declare fragmentation thresholds table
-#define MTO_MAX_FRAG_TH_LEVELS 5
-#define MTO_MAX_DATA_RATE_LEVELS 12
+/* Declare SQ3 to rate and fragmentation threshold table */
+/* Declare fragmentation thresholds table */
+#define MTO_MAX_FRAG_TH_LEVELS 5
+#define MTO_MAX_DATA_RATE_LEVELS 12
-u16 MTO_Frag_Th_Tbl[MTO_MAX_FRAG_TH_LEVELS] =
-{
- 256, 384, 512, 768, 1536
+u16 MTO_Frag_Th_Tbl[MTO_MAX_FRAG_TH_LEVELS] = {
+ 256, 384, 512, 768, 1536
};
-// Declare data rate table
-//The following table will be changed at anytime if the opration rate supported by AP don't
-//match the table
+/*
+ * Declare data rate table:
+ * The following table will be changed at anytime if the opration rate
+ * supported by AP don't match the table
+ */
static u8 MTO_Data_Rate_Tbl[MTO_MAX_DATA_RATE_LEVELS] = {
- 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108
+ 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108
};
-static int TotalTxPkt = 0;
-static int TotalTxPktRetry = 0;
-static int retryrate_rec[MTO_MAX_DATA_RATE_LEVELS];//this record the retry rate at different data rate
+static int TotalTxPkt;
+static int TotalTxPktRetry;
+/* this record the retry rate at different data rate */
+static int retryrate_rec[MTO_MAX_DATA_RATE_LEVELS];
-static int PeriodTotalTxPkt = 0;
-static int PeriodTotalTxPktRetry = 0;
+static int PeriodTotalTxPkt;
+static int PeriodTotalTxPktRetry;
-static u8 boSparseTxTraffic = false;
+static u8 boSparseTxTraffic;
void MTO_Init(struct wbsoft_priv *adapter);
void TxRateReductionCtrl(struct wbsoft_priv *adapter);
-/** 1.1.31.1000 Turbo modify */
void MTO_SetTxCount(struct wbsoft_priv *adapter, u8 t0, u8 index);
void MTO_TxFailed(struct wbsoft_priv *adapter);
void hal_get_dto_para(struct wbsoft_priv *adapter, char *buffer);
-//===========================================================================
-// MTO_Init --
-//
-// Description:
-// Initialize MTO parameters.
-//
-// This function should be invoked during system initialization.
-//
-// Arguments:
-// adapter - The pointer to the Miniport adapter Context
-//
-// Return Value:
-// None
-//============================================================================
+/*
+ * ===========================================================================
+ * MTO_Init --
+ *
+ * Description:
+ * Initialize MTO parameters.
+ *
+ * This function should be invoked during system initialization.
+ *
+ * Arguments:
+ * adapter - The pointer to the Miniport adapter Context
+ * ===========================================================================
+ */
void MTO_Init(struct wbsoft_priv *adapter)
{
- int i;
-
- //[WKCHEN]MTO_CCA_MODE_SETUP()= (u8) hal_get_cca_mode(MTO_HAL());
- //[WKCHEN]MTO_CCA_MODE() = MTO_CCA_MODE_SETUP();
-
- //MTO_PREAMBLE_TYPE() = MTO_PREAMBLE_LONG;
- MTO_PREAMBLE_TYPE() = MTO_PREAMBLE_SHORT; // for test
-
- MTO_CNT_ANT(0) = 0;
- MTO_CNT_ANT(1) = 0;
- MTO_SQ_ANT(0) = 0;
- MTO_SQ_ANT(1) = 0;
-
- MTO_AGING_TIMEOUT() = 0;
-
- // The following parameters should be initialized to the values set by user
- //
- //MTO_RATE_LEVEL() = 10;
- MTO_RATE_LEVEL() = 0;
- MTO_FRAG_TH_LEVEL() = 4;
- /** 1.1.23.1000 Turbo modify from -1 to +1
- MTO_RTS_THRESHOLD() = MTO_FRAG_TH() - 1;
- MTO_RTS_THRESHOLD_SETUP() = MTO_FRAG_TH() - 1;
- */
- MTO_RTS_THRESHOLD() = MTO_FRAG_TH() + 1;
- MTO_RTS_THRESHOLD_SETUP() = MTO_FRAG_TH() + 1;
- // 1.1.23.1000 Turbo add for mto change preamble from 0 to 1
- MTO_RATE_CHANGE_ENABLE() = 1;
- MTO_FRAG_CHANGE_ENABLE() = 0; // 1.1.29.1000 Turbo add don't support frag
- //The default valud of ANTDIV_DEFAULT_ON will be decided by EEPROM
- //#ifdef ANTDIV_DEFAULT_ON
- //#else
- //#endif
- MTO_POWER_CHANGE_ENABLE() = 1;
- MTO_PREAMBLE_CHANGE_ENABLE()= 1;
- MTO_RTS_CHANGE_ENABLE() = 0; // 1.1.29.1000 Turbo add don't support frag
- // 20040512 Turbo add
- //old_antenna[0] = 1;
- //old_antenna[1] = 0;
- //old_antenna[2] = 1;
- //old_antenna[3] = 0;
- for (i=0;i<MTO_MAX_DATA_RATE_LEVELS;i++)
- retryrate_rec[i]=5;
+ int i;
+
+ MTO_PREAMBLE_TYPE() = MTO_PREAMBLE_SHORT; /* for test */
+
+ MTO_CNT_ANT(0) = 0;
+ MTO_CNT_ANT(1) = 0;
+ MTO_SQ_ANT(0) = 0;
+ MTO_SQ_ANT(1) = 0;
+
+ MTO_AGING_TIMEOUT() = 0;
+
+ /* The following parameters should be initialized to the values set by user */
+ MTO_RATE_LEVEL() = 0;
+ MTO_FRAG_TH_LEVEL() = 4;
+ MTO_RTS_THRESHOLD() = MTO_FRAG_TH() + 1;
+ MTO_RTS_THRESHOLD_SETUP() = MTO_FRAG_TH() + 1;
+ MTO_RATE_CHANGE_ENABLE() = 1;
+ MTO_FRAG_CHANGE_ENABLE() = 0;
+ MTO_POWER_CHANGE_ENABLE() = 1;
+ MTO_PREAMBLE_CHANGE_ENABLE() = 1;
+ MTO_RTS_CHANGE_ENABLE() = 0;
+
+ for (i = 0; i < MTO_MAX_DATA_RATE_LEVELS; i++)
+ retryrate_rec[i] = 5;
MTO_TXFLOWCOUNT() = 0;
- //--------- DTO threshold parameters -------------
- //MTOPARA_PERIODIC_CHECK_CYCLE() = 50;
- MTOPARA_PERIODIC_CHECK_CYCLE() = 10;
- MTOPARA_RSSI_TH_FOR_ANTDIV() = 10;
- MTOPARA_TXCOUNT_TH_FOR_CALC_RATE() = 50;
- MTOPARA_TXRATE_INC_TH() = 10;
- MTOPARA_TXRATE_DEC_TH() = 30;
- MTOPARA_TXRATE_EQ_TH() = 40;
- MTOPARA_TXRATE_BACKOFF() = 12;
- MTOPARA_TXRETRYRATE_REDUCE() = 6;
- if ( MTO_TXPOWER_FROM_EEPROM == 0xff)
- {
- switch( MTO_HAL()->phy_type)
- {
- case RF_AIROHA_2230:
- case RF_AIROHA_2230S: // 20060420 Add this
- MTOPARA_TXPOWER_INDEX() = 46; // MAX-8 // @@ Only for AL 2230
- break;
- case RF_AIROHA_7230:
- MTOPARA_TXPOWER_INDEX() = 49;
- break;
- case RF_WB_242:
- MTOPARA_TXPOWER_INDEX() = 10;
- break;
- case RF_WB_242_1:
- MTOPARA_TXPOWER_INDEX() = 24; // ->10 20060316.1 modify
- break;
+ /* --------- DTO threshold parameters ------------- */
+ MTOPARA_PERIODIC_CHECK_CYCLE() = 10;
+ MTOPARA_RSSI_TH_FOR_ANTDIV() = 10;
+ MTOPARA_TXCOUNT_TH_FOR_CALC_RATE() = 50;
+ MTOPARA_TXRATE_INC_TH() = 10;
+ MTOPARA_TXRATE_DEC_TH() = 30;
+ MTOPARA_TXRATE_EQ_TH() = 40;
+ MTOPARA_TXRATE_BACKOFF() = 12;
+ MTOPARA_TXRETRYRATE_REDUCE() = 6;
+ if (MTO_TXPOWER_FROM_EEPROM == 0xff) {
+ switch (MTO_HAL()->phy_type) {
+ case RF_AIROHA_2230:
+ case RF_AIROHA_2230S:
+ MTOPARA_TXPOWER_INDEX() = 46; /* MAX-8 @@ Only for AL 2230 */
+ break;
+ case RF_AIROHA_7230:
+ MTOPARA_TXPOWER_INDEX() = 49;
+ break;
+ case RF_WB_242:
+ MTOPARA_TXPOWER_INDEX() = 10;
+ break;
+ case RF_WB_242_1:
+ MTOPARA_TXPOWER_INDEX() = 24;
+ break;
}
- }
- else //follow the setting from EEPROM
+ } else { /* follow the setting from EEPROM */
MTOPARA_TXPOWER_INDEX() = MTO_TXPOWER_FROM_EEPROM;
- RFSynthesizer_SetPowerIndex(MTO_HAL(), (u8)MTOPARA_TXPOWER_INDEX());
- //------------------------------------------------
+ }
+ RFSynthesizer_SetPowerIndex(MTO_HAL(), (u8) MTOPARA_TXPOWER_INDEX());
+ /* ------------------------------------------------ */
- // For RSSI turning 20060808.4 Cancel load from EEPROM
+ /* For RSSI turning -- Cancel load from EEPROM */
MTO_DATA().RSSI_high = -41;
MTO_DATA().RSSI_low = -60;
}
-//===========================================================================
-// Description:
-// If we enable DTO, we will ignore the tx count with different tx rate from
-// DTO rate. This is because when we adjust DTO tx rate, there could be some
-// packets in the tx queue with previous tx rate
+/* ===========================================================================
+ * Description:
+ * If we enable DTO, we will ignore the tx count with different tx rate
+ * from DTO rate. This is because when we adjust DTO tx rate, there could
+ * be some packets in the tx queue with previous tx rate
+ */
+
void MTO_SetTxCount(struct wbsoft_priv *adapter, u8 tx_rate, u8 index)
{
MTO_TXFLOWCOUNT()++;
- if ((MTO_ENABLE==1) && (MTO_RATE_CHANGE_ENABLE()==1))
- {
- if(tx_rate == MTO_DATA_RATE())
- {
- if (index == 0)
- {
+ if ((MTO_ENABLE == 1) && (MTO_RATE_CHANGE_ENABLE() == 1)) {
+ if (tx_rate == MTO_DATA_RATE()) {
+ if (index == 0) {
if (boSparseTxTraffic)
MTO_HAL()->dto_tx_frag_count += MTOPARA_PERIODIC_CHECK_CYCLE();
else
MTO_HAL()->dto_tx_frag_count += 1;
- }
- else
- {
- if (index<8)
- {
+ } else {
+ if (index < 8) {
MTO_HAL()->dto_tx_retry_count += index;
- MTO_HAL()->dto_tx_frag_count += (index+1);
- }
- else
- {
+ MTO_HAL()->dto_tx_frag_count += (index + 1);
+ } else {
MTO_HAL()->dto_tx_retry_count += 7;
MTO_HAL()->dto_tx_frag_count += 7;
}
}
- }
- else if(MTO_DATA_RATE()>48 && tx_rate ==48)
- {//ALFRED
- if (index<3) //for reduciing data rate scheme ,
- //do not calcu different data rate
- //3 is the reducing data rate at retry
- {
+ } else if (MTO_DATA_RATE() > 48 && tx_rate == 48) {
+ /* for reducing data rate scheme, do not calculate different data rate. 3 is the reducing data rate at retry. */
+ if (index < 3) {
MTO_HAL()->dto_tx_retry_count += index;
- MTO_HAL()->dto_tx_frag_count += (index+1);
- }
- else
- {
+ MTO_HAL()->dto_tx_frag_count += (index + 1);
+ } else {
MTO_HAL()->dto_tx_retry_count += 3;
MTO_HAL()->dto_tx_frag_count += 3;
}
}
- }
- else
- {
+ } else {
MTO_HAL()->dto_tx_retry_count += index;
- MTO_HAL()->dto_tx_frag_count += (index+1);
+ MTO_HAL()->dto_tx_frag_count += (index + 1);
}
- TotalTxPkt ++;
- TotalTxPktRetry += (index+1);
+ TotalTxPkt++;
+ TotalTxPktRetry += (index + 1);
- PeriodTotalTxPkt ++;
- PeriodTotalTxPktRetry += (index+1);
+ PeriodTotalTxPkt++;
+ PeriodTotalTxPktRetry += (index + 1);
}
diff --git a/drivers/staging/winbond/mto.h b/drivers/staging/winbond/mto.h
index fb4781d5781..a0f659cf99f 100644
--- a/drivers/staging/winbond/mto.h
+++ b/drivers/staging/winbond/mto.h
@@ -1,13 +1,10 @@
-//==================================================================
-// MTO.H
-//
-// Revision history
-//=================================
-// 20030110 UN20 Pete Chao
-// Initial Release
-//
-// Copyright (c) 2003 Winbond Electronics Corp. All rights reserved.
-//==================================================================
+/*
+ * ==================================================================
+ * MTO.H
+ *
+ * Copyright (c) 2003 Winbond Electronics Corp. All rights reserved.
+ * ==================================================================
+ */
#ifndef __MTO_H__
#define __MTO_H__
@@ -15,115 +12,104 @@
struct wbsoft_priv;
-// LA20040210_DTO kevin
-//#define MTO_PREAMBLE_LONG 0
-//#define MTO_PREAMBLE_SHORT 1
#define MTO_PREAMBLE_LONG WLAN_PREAMBLE_TYPE_LONG
#define MTO_PREAMBLE_SHORT WLAN_PREAMBLE_TYPE_SHORT
-//============================================================================
-// struct _MTOParameters --
-//
-// Defines the parameters used in the MAC Throughput Optimization algorithm
-//============================================================================
+/* Defines the parameters used in the MAC Throughput Optimization algorithm */
struct wb35_mto_params {
- //--------- wkchen added -------------
- u32 TxFlowCount; //to judge what kind the tx flow(sparse or busy) is
- //------------------------------------------------
+ u32 TxFlowCount; /* to judge what kind the tx flow(sparse or busy) is */
- //--------- DTO threshold parameters -------------
- u16 DTO_PeriodicCheckCycle;
- u16 DTO_RssiThForAntDiv;
+ /* --------- DTO threshold parameters ------------- */
+ u16 DTO_PeriodicCheckCycle;
+ u16 DTO_RssiThForAntDiv;
- u16 DTO_TxCountThForCalcNewRate;
- u16 DTO_TxRateIncTh;
+ u16 DTO_TxCountThForCalcNewRate;
+ u16 DTO_TxRateIncTh;
- u16 DTO_TxRateDecTh;
- u16 DTO_TxRateEqTh;
+ u16 DTO_TxRateDecTh;
+ u16 DTO_TxRateEqTh;
- u16 DTO_TxRateBackOff;
- u16 DTO_TxRetryRateReduce;
+ u16 DTO_TxRateBackOff;
+ u16 DTO_TxRetryRateReduce;
- u16 DTO_TxPowerIndex; //0 ~ 31
- u16 reserved_1;
- //------------------------------------------------
+ u16 DTO_TxPowerIndex; /* 0 ~ 31 */
+ u16 reserved_1;
+ /* ------------------------------------------------ */
- u8 PowerChangeEnable;
- u8 AntDiversityEnable;
- u8 CCA_Mode;
- u8 CCA_Mode_Setup;
- u8 Preamble_Type;
- u8 PreambleChangeEnable;
+ u8 PowerChangeEnable;
+ u8 AntDiversityEnable;
+ u8 CCA_Mode;
+ u8 CCA_Mode_Setup;
+ u8 Preamble_Type;
+ u8 PreambleChangeEnable;
- u8 DataRateLevel;
- u8 DataRateChangeEnable;
- u8 FragThresholdLevel;
- u8 FragThresholdChangeEnable;
+ u8 DataRateLevel;
+ u8 DataRateChangeEnable;
+ u8 FragThresholdLevel;
+ u8 FragThresholdChangeEnable;
- u16 RTSThreshold;
- u16 RTSThreshold_Setup;
+ u16 RTSThreshold;
+ u16 RTSThreshold_Setup;
- u32 AvgIdleSlot;
- u32 Pr_Interf;
- u32 AvgGapBtwnInterf;
+ u32 AvgIdleSlot;
+ u32 Pr_Interf;
+ u32 AvgGapBtwnInterf;
- u8 RTSChangeEnable;
- u8 Ant_sel;
- u8 aging_timeout;
- u8 reserved_2;
+ u8 RTSChangeEnable;
+ u8 Ant_sel;
+ u8 aging_timeout;
+ u8 reserved_2;
- u32 Cnt_Ant[2];
- u32 SQ_Ant[2];
+ u32 Cnt_Ant[2];
+ u32 SQ_Ant[2];
-// 20040510 remove from globe vairable
- u8 FallbackRateLevel;
- u8 OfdmRateLevel;
+ u8 FallbackRateLevel;
+ u8 OfdmRateLevel;
- u8 RatePolicy;
- u8 reserved_3[3];
-
- // For RSSI turning
- s32 RSSI_high;
- s32 RSSI_low;
+ u8 RatePolicy;
+ u8 reserved_3[3];
+ /* For RSSI turning */
+ s32 RSSI_high;
+ s32 RSSI_low;
};
-#define MTO_DATA() (adapter->sMtoPara)
-#define MTO_HAL() (&adapter->sHwData)
-#define MTO_SET_PREAMBLE_TYPE(x) // 20040511 Turbo mark LM_PREAMBLE_TYPE(&pcore_data->lm_data) = (x)
-#define MTO_ENABLE (adapter->sLocalPara.TxRateMode == RATE_AUTO)
-#define MTO_TXPOWER_FROM_EEPROM (adapter->sHwData.PowerIndexFromEEPROM)
-#define LOCAL_ANTENNA_NO() (adapter->sLocalPara.bAntennaNo)
-#define LOCAL_IS_CONNECTED() (adapter->sLocalPara.wConnectedSTAindex != 0)
-#define MTO_INITTXRATE_MODE (adapter->sHwData.SoftwareSet&0x2) //bit 1
+#define MTO_DATA() (adapter->sMtoPara)
+#define MTO_HAL() (&adapter->sHwData)
+#define MTO_SET_PREAMBLE_TYPE(x) /* Turbo mark LM_PREAMBLE_TYPE(&pcore_data->lm_data) = (x) */
+#define MTO_ENABLE (adapter->sLocalPara.TxRateMode == RATE_AUTO)
+#define MTO_TXPOWER_FROM_EEPROM (adapter->sHwData.PowerIndexFromEEPROM)
+#define LOCAL_ANTENNA_NO() (adapter->sLocalPara.bAntennaNo)
+#define LOCAL_IS_CONNECTED() (adapter->sLocalPara.wConnectedSTAindex != 0)
+#define MTO_INITTXRATE_MODE (adapter->sHwData.SoftwareSet&0x2) /* bit 1 */
-#define MTO_POWER_CHANGE_ENABLE() MTO_DATA().PowerChangeEnable
-#define MTO_CCA_MODE() MTO_DATA().CCA_Mode
-#define MTO_CCA_MODE_SETUP() MTO_DATA().CCA_Mode_Setup
-#define MTO_PREAMBLE_TYPE() MTO_DATA().Preamble_Type
-#define MTO_PREAMBLE_CHANGE_ENABLE() MTO_DATA().PreambleChangeEnable
+#define MTO_POWER_CHANGE_ENABLE() MTO_DATA().PowerChangeEnable
+#define MTO_CCA_MODE() MTO_DATA().CCA_Mode
+#define MTO_CCA_MODE_SETUP() MTO_DATA().CCA_Mode_Setup
+#define MTO_PREAMBLE_TYPE() MTO_DATA().Preamble_Type
+#define MTO_PREAMBLE_CHANGE_ENABLE() MTO_DATA().PreambleChangeEnable
-#define MTO_RATE_LEVEL() MTO_DATA().DataRateLevel
+#define MTO_RATE_LEVEL() MTO_DATA().DataRateLevel
#define MTO_OFDM_RATE_LEVEL() MTO_DATA().OfdmRateLevel
-#define MTO_RATE_CHANGE_ENABLE() MTO_DATA().DataRateChangeEnable
-#define MTO_FRAG_TH_LEVEL() MTO_DATA().FragThresholdLevel
-#define MTO_FRAG_CHANGE_ENABLE() MTO_DATA().FragThresholdChangeEnable
-#define MTO_RTS_THRESHOLD() MTO_DATA().RTSThreshold
-#define MTO_RTS_CHANGE_ENABLE() MTO_DATA().RTSChangeEnable
-#define MTO_RTS_THRESHOLD_SETUP() MTO_DATA().RTSThreshold_Setup
+#define MTO_RATE_CHANGE_ENABLE() MTO_DATA().DataRateChangeEnable
+#define MTO_FRAG_TH_LEVEL() MTO_DATA().FragThresholdLevel
+#define MTO_FRAG_CHANGE_ENABLE() MTO_DATA().FragThresholdChangeEnable
+#define MTO_RTS_THRESHOLD() MTO_DATA().RTSThreshold
+#define MTO_RTS_CHANGE_ENABLE() MTO_DATA().RTSChangeEnable
+#define MTO_RTS_THRESHOLD_SETUP() MTO_DATA().RTSThreshold_Setup
-#define MTO_AVG_IDLE_SLOT() MTO_DATA().AvgIdleSlot
-#define MTO_PR_INTERF() MTO_DATA().Pr_Interf
-#define MTO_AVG_GAP_BTWN_INTERF() MTO_DATA().AvgGapBtwnInterf
+#define MTO_AVG_IDLE_SLOT() MTO_DATA().AvgIdleSlot
+#define MTO_PR_INTERF() MTO_DATA().Pr_Interf
+#define MTO_AVG_GAP_BTWN_INTERF() MTO_DATA().AvgGapBtwnInterf
-#define MTO_CNT_ANT(x) MTO_DATA().Cnt_Ant[(x)]
-#define MTO_SQ_ANT(x) MTO_DATA().SQ_Ant[(x)]
-#define MTO_AGING_TIMEOUT() MTO_DATA().aging_timeout
+#define MTO_CNT_ANT(x) MTO_DATA().Cnt_Ant[(x)]
+#define MTO_SQ_ANT(x) MTO_DATA().SQ_Ant[(x)]
+#define MTO_AGING_TIMEOUT() MTO_DATA().aging_timeout
+#define MTO_TXFLOWCOUNT() MTO_DATA().TxFlowCount
-#define MTO_TXFLOWCOUNT() MTO_DATA().TxFlowCount
-//--------- DTO threshold parameters -------------
+/* --------- DTO threshold parameters ------------- */
#define MTOPARA_PERIODIC_CHECK_CYCLE() MTO_DATA().DTO_PeriodicCheckCycle
#define MTOPARA_RSSI_TH_FOR_ANTDIV() MTO_DATA().DTO_RssiThForAntDiv
#define MTOPARA_TXCOUNT_TH_FOR_CALC_RATE() MTO_DATA().DTO_TxCountThForCalcNewRate
@@ -133,13 +119,13 @@ struct wb35_mto_params {
#define MTOPARA_TXRATE_BACKOFF() MTO_DATA().DTO_TxRateBackOff
#define MTOPARA_TXRETRYRATE_REDUCE() MTO_DATA().DTO_TxRetryRateReduce
#define MTOPARA_TXPOWER_INDEX() MTO_DATA().DTO_TxPowerIndex
-//------------------------------------------------
+/* ------------------------------------------------ */
-extern u16 MTO_Frag_Th_Tbl[];
+extern u16 MTO_Frag_Th_Tbl[];
-#define MTO_DATA_RATE() MTO_Data_Rate_Tbl[MTO_RATE_LEVEL()]
-#define MTO_FRAG_TH() MTO_Frag_Th_Tbl[MTO_FRAG_TH_LEVEL()]
+#define MTO_DATA_RATE() MTO_Data_Rate_Tbl[MTO_RATE_LEVEL()]
+#define MTO_FRAG_TH() MTO_Frag_Th_Tbl[MTO_FRAG_TH_LEVEL()]
extern void MTO_Init(struct wbsoft_priv *);
extern void MTO_PeriodicTimerExpired(struct wbsoft_priv *);
@@ -148,6 +134,5 @@ extern u8 MTO_GetTxRate(struct wbsoft_priv *adapter, u32 fpdu_len);
extern u8 MTO_GetTxFallbackRate(struct wbsoft_priv *adapter);
extern void MTO_SetTxCount(struct wbsoft_priv *adapter, u8 t0, u8 index);
-#endif //__MTO_H__
-
+#endif /* __MTO_H__ */
diff --git a/drivers/staging/winbond/phy_calibration.c b/drivers/staging/winbond/phy_calibration.c
index 8c56962ab80..78935865df1 100644
--- a/drivers/staging/winbond/phy_calibration.c
+++ b/drivers/staging/winbond/phy_calibration.c
@@ -25,10 +25,7 @@
#define FIXED(X) ((s32)((X) * 32768.0))
#define DEG2RAD(X) 0.017453 * (X)
-/****************** LOCAL TYPE DEFINITION SECTION ***************************/
-typedef s32 fixed; /* 16.16 fixed-point */
-
-static const fixed Angles[]=
+static const s32 Angles[] =
{
FIXED(DEG2RAD(45.0)), FIXED(DEG2RAD(26.565)), FIXED(DEG2RAD(14.0362)),
FIXED(DEG2RAD(7.12502)), FIXED(DEG2RAD(3.57633)), FIXED(DEG2RAD(1.78991)),
@@ -300,7 +297,7 @@ u32 _sqrt(u32 sqsum)
/****************************************************************************/
void _sin_cos(s32 angle, s32 *sin, s32 *cos)
{
- fixed X, Y, TargetAngle, CurrAngle;
+ s32 X, Y, TargetAngle, CurrAngle;
unsigned Step;
X=FIXED(AG_CONST); // AG_CONST * cos(0)
@@ -310,7 +307,7 @@ void _sin_cos(s32 angle, s32 *sin, s32 *cos)
for (Step=0; Step < 12; Step++)
{
- fixed NewX;
+ s32 NewX;
if(TargetAngle > CurrAngle)
{
diff --git a/drivers/staging/winbond/phy_calibration.h b/drivers/staging/winbond/phy_calibration.h
index 51c8fde81e2..30320314883 100644
--- a/drivers/staging/winbond/phy_calibration.h
+++ b/drivers/staging/winbond/phy_calibration.h
@@ -3,105 +3,82 @@
#include "wbhal_f.h"
-// 20031229 Turbo add
-#define REG_AGC_CTRL1 0x1000
-#define REG_AGC_CTRL2 0x1004
-#define REG_AGC_CTRL3 0x1008
-#define REG_AGC_CTRL4 0x100C
-#define REG_AGC_CTRL5 0x1010
-#define REG_AGC_CTRL6 0x1014
-#define REG_AGC_CTRL7 0x1018
-#define REG_AGC_CTRL8 0x101C
-#define REG_AGC_CTRL9 0x1020
-#define REG_AGC_CTRL10 0x1024
-#define REG_CCA_CTRL 0x1028
-#define REG_A_ACQ_CTRL 0x102C
-#define REG_B_ACQ_CTRL 0x1030
-#define REG_A_TXRX_CTRL 0x1034
-#define REG_B_TXRX_CTRL 0x1038
-#define REG_A_TX_COEF3 0x103C
-#define REG_A_TX_COEF2 0x1040
-#define REG_A_TX_COEF1 0x1044
-#define REG_B_TX_COEF2 0x1048
-#define REG_B_TX_COEF1 0x104C
-#define REG_MODE_CTRL 0x1050
-#define REG_CALIB_DATA 0x1054
-#define REG_IQ_ALPHA 0x1058
-#define REG_DC_CANCEL 0x105C
-#define REG_WTO_READ 0x1060
-#define REG_OFFSET_READ 0x1064
-#define REG_CALIB_READ1 0x1068
-#define REG_CALIB_READ2 0x106C
-#define REG_A_FREQ_EST 0x1070
-
-
-
-
-// 20031101 Turbo add
-#define MASK_AMER_OFF_REG BIT(31)
-
-#define MASK_BMER_OFF_REG BIT(31)
-
-#define MASK_LNA_FIX_GAIN (BIT(3)|BIT(4))
-#define MASK_AGC_FIX BIT(1)
-
-#define MASK_AGC_FIX_GAIN 0xFF00
-
-#define MASK_ADC_DC_CAL_STR BIT(10)
-#define MASK_CALIB_START BIT(4)
-#define MASK_IQCAL_TONE_SEL (BIT(3)|BIT(2))
-#define MASK_IQCAL_MODE (BIT(1)|BIT(0))
-
-#define MASK_TX_CAL_0 0xF0000000
-#define TX_CAL_0_SHIFT 28
-#define MASK_TX_CAL_1 0x0F000000
-#define TX_CAL_1_SHIFT 24
-#define MASK_TX_CAL_2 0x00F00000
-#define TX_CAL_2_SHIFT 20
-#define MASK_TX_CAL_3 0x000F0000
-#define TX_CAL_3_SHIFT 16
-#define MASK_RX_CAL_0 0x0000F000
-#define RX_CAL_0_SHIFT 12
-#define MASK_RX_CAL_1 0x00000F00
-#define RX_CAL_1_SHIFT 8
-#define MASK_RX_CAL_2 0x000000F0
-#define RX_CAL_2_SHIFT 4
-#define MASK_RX_CAL_3 0x0000000F
-#define RX_CAL_3_SHIFT 0
-
-#define MASK_CANCEL_DC_I 0x3E0
-#define CANCEL_DC_I_SHIFT 5
-#define MASK_CANCEL_DC_Q 0x01F
-#define CANCEL_DC_Q_SHIFT 0
-
-// LA20040210 kevin
-//#define MASK_ADC_DC_CAL_I(x) (((x)&0x1FE00)>>9)
-//#define MASK_ADC_DC_CAL_Q(x) ((x)&0x1FF)
-#define MASK_ADC_DC_CAL_I(x) (((x)&0x0003FE00)>>9)
-#define MASK_ADC_DC_CAL_Q(x) ((x)&0x000001FF)
-
-// LA20040210 kevin (Turbo has wrong definition)
-//#define MASK_IQCAL_TONE_I 0x7FFC000
-//#define SHIFT_IQCAL_TONE_I(x) ((x)>>13)
-//#define MASK_IQCAL_TONE_Q 0x1FFF
-//#define SHIFT_IQCAL_TONE_Q(x) ((x)>>0)
-#define MASK_IQCAL_TONE_I 0x00001FFF
-#define SHIFT_IQCAL_TONE_I(x) ((x)>>0)
-#define MASK_IQCAL_TONE_Q 0x03FFE000
-#define SHIFT_IQCAL_TONE_Q(x) ((x)>>13)
-
-// LA20040210 kevin (Turbo has wrong definition)
-//#define MASK_IQCAL_IMAGE_I 0x7FFC000
-//#define SHIFT_IQCAL_IMAGE_I(x) ((x)>>13)
-//#define MASK_IQCAL_IMAGE_Q 0x1FFF
-//#define SHIFT_IQCAL_IMAGE_Q(x) ((x)>>0)
-
-//#define MASK_IQCAL_IMAGE_I 0x00001FFF
-//#define SHIFT_IQCAL_IMAGE_I(x) ((x)>>0)
-//#define MASK_IQCAL_IMAGE_Q 0x03FFE000
-//#define SHIFT_IQCAL_IMAGE_Q(x) ((x)>>13)
-
-void phy_set_rf_data( struct hw_data * pHwData, u32 index, u32 value );
-#define phy_init_rf( _A ) //RFSynthesizer_initial( _A )
+#define REG_AGC_CTRL1 0x1000
+#define REG_AGC_CTRL2 0x1004
+#define REG_AGC_CTRL3 0x1008
+#define REG_AGC_CTRL4 0x100C
+#define REG_AGC_CTRL5 0x1010
+#define REG_AGC_CTRL6 0x1014
+#define REG_AGC_CTRL7 0x1018
+#define REG_AGC_CTRL8 0x101C
+#define REG_AGC_CTRL9 0x1020
+#define REG_AGC_CTRL10 0x1024
+#define REG_CCA_CTRL 0x1028
+#define REG_A_ACQ_CTRL 0x102C
+#define REG_B_ACQ_CTRL 0x1030
+#define REG_A_TXRX_CTRL 0x1034
+#define REG_B_TXRX_CTRL 0x1038
+#define REG_A_TX_COEF3 0x103C
+#define REG_A_TX_COEF2 0x1040
+#define REG_A_TX_COEF1 0x1044
+#define REG_B_TX_COEF2 0x1048
+#define REG_B_TX_COEF1 0x104C
+#define REG_MODE_CTRL 0x1050
+#define REG_CALIB_DATA 0x1054
+#define REG_IQ_ALPHA 0x1058
+#define REG_DC_CANCEL 0x105C
+#define REG_WTO_READ 0x1060
+#define REG_OFFSET_READ 0x1064
+#define REG_CALIB_READ1 0x1068
+#define REG_CALIB_READ2 0x106C
+#define REG_A_FREQ_EST 0x1070
+
+
+#define MASK_AMER_OFF_REG BIT(31)
+
+#define MASK_BMER_OFF_REG BIT(31)
+
+#define MASK_LNA_FIX_GAIN (BIT(3) | BIT(4))
+#define MASK_AGC_FIX BIT(1)
+
+#define MASK_AGC_FIX_GAIN 0xFF00
+
+#define MASK_ADC_DC_CAL_STR BIT(10)
+#define MASK_CALIB_START BIT(4)
+#define MASK_IQCAL_TONE_SEL (BIT(3) | BIT(2))
+#define MASK_IQCAL_MODE (BIT(1) | BIT(0))
+
+#define MASK_TX_CAL_0 0xF0000000
+#define TX_CAL_0_SHIFT 28
+#define MASK_TX_CAL_1 0x0F000000
+#define TX_CAL_1_SHIFT 24
+#define MASK_TX_CAL_2 0x00F00000
+#define TX_CAL_2_SHIFT 20
+#define MASK_TX_CAL_3 0x000F0000
+#define TX_CAL_3_SHIFT 16
+#define MASK_RX_CAL_0 0x0000F000
+#define RX_CAL_0_SHIFT 12
+#define MASK_RX_CAL_1 0x00000F00
+#define RX_CAL_1_SHIFT 8
+#define MASK_RX_CAL_2 0x000000F0
+#define RX_CAL_2_SHIFT 4
+#define MASK_RX_CAL_3 0x0000000F
+#define RX_CAL_3_SHIFT 0
+
+#define MASK_CANCEL_DC_I 0x3E0
+#define CANCEL_DC_I_SHIFT 5
+#define MASK_CANCEL_DC_Q 0x01F
+#define CANCEL_DC_Q_SHIFT 0
+
+#define MASK_ADC_DC_CAL_I(x) (((x) & 0x0003FE00) >> 9)
+#define MASK_ADC_DC_CAL_Q(x) ((x) & 0x000001FF)
+
+#define MASK_IQCAL_TONE_I 0x00001FFF
+#define SHIFT_IQCAL_TONE_I(x) ((x) >> 0)
+#define MASK_IQCAL_TONE_Q 0x03FFE000
+#define SHIFT_IQCAL_TONE_Q(x) ((x) >> 13)
+
+void phy_set_rf_data(struct hw_data *pHwData, u32 index, u32 value);
+#define phy_init_rf(_A) /* RFSynthesizer_initial(_A) */
#endif
diff --git a/drivers/staging/winbond/reg.c b/drivers/staging/winbond/reg.c
index 5f5048af26a..d9a8128b21f 100644
--- a/drivers/staging/winbond/reg.c
+++ b/drivers/staging/winbond/reg.c
@@ -1,514 +1,452 @@
#include "sysdef.h"
#include "wbhal_f.h"
-///////////////////////////////////////////////////////////////////////////////////////////////////
-// Original Phy.h
-//*****************************************************************************
-
-/*****************************************************************************
-; For MAXIM2825/6/7 Ver. 331 or more
-; Edited by Tiger, Sep-17-2003
-; revised by Ben, Sep-18-2003
-
-0x00 0x000a2
-0x01 0x21cc0
-;0x02 0x13802
-0x02 0x1383a
-
-;channe1 01 ; 0x03 0x30142 ; 0x04 0x0b333;
-;channe1 02 ;0x03 0x32141 ;0x04 0x08444;
-;channe1 03 ;0x03 0x32143 ;0x04 0x0aeee;
-;channe1 04 ;0x03 0x32142 ;0x04 0x0b333;
-;channe1 05 ;0x03 0x31141 ;0x04 0x08444;
-;channe1 06 ;
-0x03 0x31143;
-0x04 0x0aeee;
-;channe1 07 ;0x03 0x31142 ;0x04 0x0b333;
-;channe1 08 ;0x03 0x33141 ;0x04 0x08444;
-;channe1 09 ;0x03 0x33143 ;0x04 0x0aeee;
-;channe1 10 ;0x03 0x33142 ;0x04 0x0b333;
-;channe1 11 ;0x03 0x30941 ;0x04 0x08444;
-;channe1 12 ;0x03 0x30943 ;0x04 0x0aeee;
-;channe1 13 ;0x03 0x30942 ;0x04 0x0b333;
-
-0x05 0x28986
-0x06 0x18008
-0x07 0x38400
-0x08 0x05100; 100 Hz DC
-;0x08 0x05900; 30 KHz DC
-0x09 0x24f08
-0x0a 0x17e00, 0x17ea0
-0x0b 0x37d80
-0x0c 0x0c900 // 0x0ca00 (lager power 9db than 0x0c000), 0x0c000
-*****************************************************************************/
-// MAX2825 (pure b/g)
-u32 max2825_rf_data[] =
-{
- (0x00<<18)|0x000a2,
- (0x01<<18)|0x21cc0,
- (0x02<<18)|0x13806,
- (0x03<<18)|0x30142,
- (0x04<<18)|0x0b333,
- (0x05<<18)|0x289A6,
- (0x06<<18)|0x18008,
- (0x07<<18)|0x38000,
- (0x08<<18)|0x05100,
- (0x09<<18)|0x24f08,
- (0x0A<<18)|0x14000,
- (0x0B<<18)|0x37d80,
- (0x0C<<18)|0x0c100 // 11a: 0x0c300, 11g: 0x0c100
-};
+/*
+ * ====================================================
+ * Original Phy.h
+ * ====================================================
+ */
-u32 max2825_channel_data_24[][3] =
-{
- {(0x03<<18)|0x30142, (0x04<<18)|0x0b333, (0x05<<18)|0x289A6}, // channe1 01
- {(0x03<<18)|0x32141, (0x04<<18)|0x08444, (0x05<<18)|0x289A6}, // channe1 02
- {(0x03<<18)|0x32143, (0x04<<18)|0x0aeee, (0x05<<18)|0x289A6}, // channe1 03
- {(0x03<<18)|0x32142, (0x04<<18)|0x0b333, (0x05<<18)|0x289A6}, // channe1 04
- {(0x03<<18)|0x31141, (0x04<<18)|0x08444, (0x05<<18)|0x289A6}, // channe1 05
- {(0x03<<18)|0x31143, (0x04<<18)|0x0aeee, (0x05<<18)|0x289A6}, // channe1 06
- {(0x03<<18)|0x31142, (0x04<<18)|0x0b333, (0x05<<18)|0x289A6}, // channe1 07
- {(0x03<<18)|0x33141, (0x04<<18)|0x08444, (0x05<<18)|0x289A6}, // channe1 08
- {(0x03<<18)|0x33143, (0x04<<18)|0x0aeee, (0x05<<18)|0x289A6}, // channe1 09
- {(0x03<<18)|0x33142, (0x04<<18)|0x0b333, (0x05<<18)|0x289A6}, // channe1 10
- {(0x03<<18)|0x30941, (0x04<<18)|0x08444, (0x05<<18)|0x289A6}, // channe1 11
- {(0x03<<18)|0x30943, (0x04<<18)|0x0aeee, (0x05<<18)|0x289A6}, // channe1 12
- {(0x03<<18)|0x30942, (0x04<<18)|0x0b333, (0x05<<18)|0x289A6}, // channe1 13
- {(0x03<<18)|0x32941, (0x04<<18)|0x09999, (0x05<<18)|0x289A6} // 14 (2484MHz) hhmodify
+/*
+ * ====================================================
+ * For MAXIM2825/6/7 Ver. 331 or more
+ *
+ * 0x00 0x000a2
+ * 0x01 0x21cc0
+ * 0x02 0x13802
+ * 0x02 0x1383a
+ *
+ * channe1 01 ; 0x03 0x30142 ; 0x04 0x0b333;
+ * channe1 02 ; 0x03 0x32141 ; 0x04 0x08444;
+ * channe1 03 ; 0x03 0x32143 ; 0x04 0x0aeee;
+ * channe1 04 ; 0x03 0x32142 ; 0x04 0x0b333;
+ * channe1 05 ; 0x03 0x31141 ; 0x04 0x08444;
+ * channe1 06 ; 0x03 0x31143 ; 0x04 0x0aeee;
+ * channe1 07 ; 0x03 0x31142 ; 0x04 0x0b333;
+ * channe1 08 ; 0x03 0x33141 ; 0x04 0x08444;
+ * channe1 09 ; 0x03 0x33143 ; 0x04 0x0aeee;
+ * channe1 10 ; 0x03 0x33142 ; 0x04 0x0b333;
+ * channe1 11 ; 0x03 0x30941 ; 0x04 0x08444;
+ * channe1 12 ; 0x03 0x30943 ; 0x04 0x0aeee;
+ * channe1 13 ; 0x03 0x30942 ; 0x04 0x0b333;
+ *
+ * 0x05 0x28986
+ * 0x06 0x18008
+ * 0x07 0x38400
+ * 0x08 0x05100; 100 Hz DC
+ * 0x08 0x05900; 30 KHz DC
+ * 0x09 0x24f08
+ * 0x0a 0x17e00, 0x17ea0
+ * 0x0b 0x37d80
+ * 0x0c 0x0c900 -- 0x0ca00 (lager power 9db than 0x0c000), 0x0c000
+ */
+
+/* MAX2825 (pure b/g) */
+u32 max2825_rf_data[] = {
+ (0x00<<18) | 0x000a2,
+ (0x01<<18) | 0x21cc0,
+ (0x02<<18) | 0x13806,
+ (0x03<<18) | 0x30142,
+ (0x04<<18) | 0x0b333,
+ (0x05<<18) | 0x289A6,
+ (0x06<<18) | 0x18008,
+ (0x07<<18) | 0x38000,
+ (0x08<<18) | 0x05100,
+ (0x09<<18) | 0x24f08,
+ (0x0A<<18) | 0x14000,
+ (0x0B<<18) | 0x37d80,
+ (0x0C<<18) | 0x0c100 /* 11a: 0x0c300, 11g: 0x0c100 */
};
-u32 max2825_power_data_24[] = {(0x0C<<18)|0x0c000, (0x0C<<18)|0x0c100};
-
-/****************************************************************************/
-// MAX2827 (a/b/g)
-u32 max2827_rf_data[] =
-{
- (0x00<<18)|0x000a2,
- (0x01<<18)|0x21cc0,
- (0x02<<18)|0x13806,
- (0x03<<18)|0x30142,
- (0x04<<18)|0x0b333,
- (0x05<<18)|0x289A6,
- (0x06<<18)|0x18008,
- (0x07<<18)|0x38000,
- (0x08<<18)|0x05100,
- (0x09<<18)|0x24f08,
- (0x0A<<18)|0x14000,
- (0x0B<<18)|0x37d80,
- (0x0C<<18)|0x0c100 // 11a: 0x0c300, 11g: 0x0c100
+u32 max2825_channel_data_24[][3] = {
+ {(0x03 << 18) | 0x30142, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x289A6}, /* channel 01 */
+ {(0x03 << 18) | 0x32141, (0x04 << 18) | 0x08444, (0x05 << 18) | 0x289A6}, /* channel 02 */
+ {(0x03 << 18) | 0x32143, (0x04 << 18) | 0x0aeee, (0x05 << 18) | 0x289A6}, /* channel 03 */
+ {(0x03 << 18) | 0x32142, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x289A6}, /* channel 04 */
+ {(0x03 << 18) | 0x31141, (0x04 << 18) | 0x08444, (0x05 << 18) | 0x289A6}, /* channel 05 */
+ {(0x03 << 18) | 0x31143, (0x04 << 18) | 0x0aeee, (0x05 << 18) | 0x289A6}, /* channel 06 */
+ {(0x03 << 18) | 0x31142, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x289A6}, /* channel 07 */
+ {(0x03 << 18) | 0x33141, (0x04 << 18) | 0x08444, (0x05 << 18) | 0x289A6}, /* channel 08 */
+ {(0x03 << 18) | 0x33143, (0x04 << 18) | 0x0aeee, (0x05 << 18) | 0x289A6}, /* channel 09 */
+ {(0x03 << 18) | 0x33142, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x289A6}, /* channel 10 */
+ {(0x03 << 18) | 0x30941, (0x04 << 18) | 0x08444, (0x05 << 18) | 0x289A6}, /* channel 11 */
+ {(0x03 << 18) | 0x30943, (0x04 << 18) | 0x0aeee, (0x05 << 18) | 0x289A6}, /* channel 12 */
+ {(0x03 << 18) | 0x30942, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x289A6}, /* channel 13 */
+ {(0x03 << 18) | 0x32941, (0x04 << 18) | 0x09999, (0x05 << 18) | 0x289A6} /* channel 14 (2484MHz) */
};
-u32 max2827_channel_data_24[][3] =
-{
- {(0x03<<18)|0x30142, (0x04<<18)|0x0b333, (0x05<<18)|0x289A6}, // channe1 01
- {(0x03<<18)|0x32141, (0x04<<18)|0x08444, (0x05<<18)|0x289A6}, // channe1 02
- {(0x03<<18)|0x32143, (0x04<<18)|0x0aeee, (0x05<<18)|0x289A6}, // channe1 03
- {(0x03<<18)|0x32142, (0x04<<18)|0x0b333, (0x05<<18)|0x289A6}, // channe1 04
- {(0x03<<18)|0x31141, (0x04<<18)|0x08444, (0x05<<18)|0x289A6}, // channe1 05
- {(0x03<<18)|0x31143, (0x04<<18)|0x0aeee, (0x05<<18)|0x289A6}, // channe1 06
- {(0x03<<18)|0x31142, (0x04<<18)|0x0b333, (0x05<<18)|0x289A6}, // channe1 07
- {(0x03<<18)|0x33141, (0x04<<18)|0x08444, (0x05<<18)|0x289A6}, // channe1 08
- {(0x03<<18)|0x33143, (0x04<<18)|0x0aeee, (0x05<<18)|0x289A6}, // channe1 09
- {(0x03<<18)|0x33142, (0x04<<18)|0x0b333, (0x05<<18)|0x289A6}, // channe1 10
- {(0x03<<18)|0x30941, (0x04<<18)|0x08444, (0x05<<18)|0x289A6}, // channe1 11
- {(0x03<<18)|0x30943, (0x04<<18)|0x0aeee, (0x05<<18)|0x289A6}, // channe1 12
- {(0x03<<18)|0x30942, (0x04<<18)|0x0b333, (0x05<<18)|0x289A6}, // channe1 13
- {(0x03<<18)|0x32941, (0x04<<18)|0x09999, (0x05<<18)|0x289A6} // 14 (2484MHz) hhmodify
+u32 max2825_power_data_24[] = {(0x0C << 18) | 0x0c000, (0x0C << 18) | 0x0c100};
+
+/* ========================================== */
+/* MAX2827 (a/b/g) */
+u32 max2827_rf_data[] = {
+ (0x00 << 18) | 0x000a2,
+ (0x01 << 18) | 0x21cc0,
+ (0x02 << 18) | 0x13806,
+ (0x03 << 18) | 0x30142,
+ (0x04 << 18) | 0x0b333,
+ (0x05 << 18) | 0x289A6,
+ (0x06 << 18) | 0x18008,
+ (0x07 << 18) | 0x38000,
+ (0x08 << 18) | 0x05100,
+ (0x09 << 18) | 0x24f08,
+ (0x0A << 18) | 0x14000,
+ (0x0B << 18) | 0x37d80,
+ (0x0C << 18) | 0x0c100 /* 11a: 0x0c300, 11g: 0x0c100 */
};
-u32 max2827_channel_data_50[][3] =
-{
- {(0x03<<18)|0x33cc3, (0x04<<18)|0x08ccc, (0x05<<18)|0x2A9A6}, // channel 36
- {(0x03<<18)|0x302c0, (0x04<<18)|0x08000, (0x05<<18)|0x2A9A6}, // channel 40
- {(0x03<<18)|0x302c2, (0x04<<18)|0x0b333, (0x05<<18)|0x2A9A6}, // channel 44
- {(0x03<<18)|0x322c1, (0x04<<18)|0x09999, (0x05<<18)|0x2A9A6}, // channel 48
- {(0x03<<18)|0x312c1, (0x04<<18)|0x0a666, (0x05<<18)|0x2A9A6}, // channel 52
- {(0x03<<18)|0x332c3, (0x04<<18)|0x08ccc, (0x05<<18)|0x2A9A6}, // channel 56
- {(0x03<<18)|0x30ac0, (0x04<<18)|0x08000, (0x05<<18)|0x2A9A6}, // channel 60
- {(0x03<<18)|0x30ac2, (0x04<<18)|0x0b333, (0x05<<18)|0x2A9A6} // channel 64
+u32 max2827_channel_data_24[][3] = {
+ {(0x03 << 18) | 0x30142, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x289A6}, /* channe1 01 */
+ {(0x03 << 18) | 0x32141, (0x04 << 18) | 0x08444, (0x05 << 18) | 0x289A6}, /* channe1 02 */
+ {(0x03 << 18) | 0x32143, (0x04 << 18) | 0x0aeee, (0x05 << 18) | 0x289A6}, /* channe1 03 */
+ {(0x03 << 18) | 0x32142, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x289A6}, /* channe1 04 */
+ {(0x03 << 18) | 0x31141, (0x04 << 18) | 0x08444, (0x05 << 18) | 0x289A6}, /* channe1 05 */
+ {(0x03 << 18) | 0x31143, (0x04 << 18) | 0x0aeee, (0x05 << 18) | 0x289A6}, /* channe1 06 */
+ {(0x03 << 18) | 0x31142, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x289A6}, /* channe1 07 */
+ {(0x03 << 18) | 0x33141, (0x04 << 18) | 0x08444, (0x05 << 18) | 0x289A6}, /* channe1 08 */
+ {(0x03 << 18) | 0x33143, (0x04 << 18) | 0x0aeee, (0x05 << 18) | 0x289A6}, /* channe1 09 */
+ {(0x03 << 18) | 0x33142, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x289A6}, /* channe1 10 */
+ {(0x03 << 18) | 0x30941, (0x04 << 18) | 0x08444, (0x05 << 18) | 0x289A6}, /* channe1 11 */
+ {(0x03 << 18) | 0x30943, (0x04 << 18) | 0x0aeee, (0x05 << 18) | 0x289A6}, /* channe1 12 */
+ {(0x03 << 18) | 0x30942, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x289A6}, /* channe1 13 */
+ {(0x03 << 18) | 0x32941, (0x04 << 18) | 0x09999, (0x05 << 18) | 0x289A6} /* channel 14 (2484MHz) */
};
-u32 max2827_power_data_24[] = {(0x0C<<18)|0x0C000, (0x0C<<18)|0x0D600, (0x0C<<18)|0x0C100};
-u32 max2827_power_data_50[] = {(0x0C<<18)|0x0C400, (0x0C<<18)|0x0D500, (0x0C<<18)|0x0C300};
-
-/****************************************************************************/
-// MAX2828 (a/b/g)
-u32 max2828_rf_data[] =
-{
- (0x00<<18)|0x000a2,
- (0x01<<18)|0x21cc0,
- (0x02<<18)|0x13806,
- (0x03<<18)|0x30142,
- (0x04<<18)|0x0b333,
- (0x05<<18)|0x289A6,
- (0x06<<18)|0x18008,
- (0x07<<18)|0x38000,
- (0x08<<18)|0x05100,
- (0x09<<18)|0x24f08,
- (0x0A<<18)|0x14000,
- (0x0B<<18)|0x37d80,
- (0x0C<<18)|0x0c100 // 11a: 0x0c300, 11g: 0x0c100
+u32 max2827_channel_data_50[][3] = {
+ {(0x03 << 18) | 0x33cc3, (0x04 << 18) | 0x08ccc, (0x05 << 18) | 0x2A9A6}, /* channel 36 */
+ {(0x03 << 18) | 0x302c0, (0x04 << 18) | 0x08000, (0x05 << 18) | 0x2A9A6}, /* channel 40 */
+ {(0x03 << 18) | 0x302c2, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x2A9A6}, /* channel 44 */
+ {(0x03 << 18) | 0x322c1, (0x04 << 18) | 0x09999, (0x05 << 18) | 0x2A9A6}, /* channel 48 */
+ {(0x03 << 18) | 0x312c1, (0x04 << 18) | 0x0a666, (0x05 << 18) | 0x2A9A6}, /* channel 52 */
+ {(0x03 << 18) | 0x332c3, (0x04 << 18) | 0x08ccc, (0x05 << 18) | 0x2A9A6}, /* channel 56 */
+ {(0x03 << 18) | 0x30ac0, (0x04 << 18) | 0x08000, (0x05 << 18) | 0x2A9A6}, /* channel 60 */
+ {(0x03 << 18) | 0x30ac2, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x2A9A6} /* channel 64 */
};
-u32 max2828_channel_data_24[][3] =
-{
- {(0x03<<18)|0x30142, (0x04<<18)|0x0b333, (0x05<<18)|0x289A6}, // channe1 01
- {(0x03<<18)|0x32141, (0x04<<18)|0x08444, (0x05<<18)|0x289A6}, // channe1 02
- {(0x03<<18)|0x32143, (0x04<<18)|0x0aeee, (0x05<<18)|0x289A6}, // channe1 03
- {(0x03<<18)|0x32142, (0x04<<18)|0x0b333, (0x05<<18)|0x289A6}, // channe1 04
- {(0x03<<18)|0x31141, (0x04<<18)|0x08444, (0x05<<18)|0x289A6}, // channe1 05
- {(0x03<<18)|0x31143, (0x04<<18)|0x0aeee, (0x05<<18)|0x289A6}, // channe1 06
- {(0x03<<18)|0x31142, (0x04<<18)|0x0b333, (0x05<<18)|0x289A6}, // channe1 07
- {(0x03<<18)|0x33141, (0x04<<18)|0x08444, (0x05<<18)|0x289A6}, // channe1 08
- {(0x03<<18)|0x33143, (0x04<<18)|0x0aeee, (0x05<<18)|0x289A6}, // channe1 09
- {(0x03<<18)|0x33142, (0x04<<18)|0x0b333, (0x05<<18)|0x289A6}, // channe1 10
- {(0x03<<18)|0x30941, (0x04<<18)|0x08444, (0x05<<18)|0x289A6}, // channe1 11
- {(0x03<<18)|0x30943, (0x04<<18)|0x0aeee, (0x05<<18)|0x289A6}, // channe1 12
- {(0x03<<18)|0x30942, (0x04<<18)|0x0b333, (0x05<<18)|0x289A6}, // channe1 13
- {(0x03<<18)|0x32941, (0x04<<18)|0x09999, (0x05<<18)|0x289A6} // 14 (2484MHz) hhmodify
+u32 max2827_power_data_24[] = {(0x0C << 18) | 0x0C000, (0x0C << 18) | 0x0D600, (0x0C << 18) | 0x0C100};
+u32 max2827_power_data_50[] = {(0x0C << 18) | 0x0C400, (0x0C << 18) | 0x0D500, (0x0C << 18) | 0x0C300};
+
+/* ======================================================= */
+/* MAX2828 (a/b/g) */
+u32 max2828_rf_data[] = {
+ (0x00 << 18) | 0x000a2,
+ (0x01 << 18) | 0x21cc0,
+ (0x02 << 18) | 0x13806,
+ (0x03 << 18) | 0x30142,
+ (0x04 << 18) | 0x0b333,
+ (0x05 << 18) | 0x289A6,
+ (0x06 << 18) | 0x18008,
+ (0x07 << 18) | 0x38000,
+ (0x08 << 18) | 0x05100,
+ (0x09 << 18) | 0x24f08,
+ (0x0A << 18) | 0x14000,
+ (0x0B << 18) | 0x37d80,
+ (0x0C << 18) | 0x0c100 /* 11a: 0x0c300, 11g: 0x0c100 */
};
-u32 max2828_channel_data_50[][3] =
-{
- {(0x03<<18)|0x33cc3, (0x04<<18)|0x08ccc, (0x05<<18)|0x289A6}, // channel 36
- {(0x03<<18)|0x302c0, (0x04<<18)|0x08000, (0x05<<18)|0x289A6}, // channel 40
- {(0x03<<18)|0x302c2, (0x04<<18)|0x0b333, (0x05<<18)|0x289A6}, // channel 44
- {(0x03<<18)|0x322c1, (0x04<<18)|0x09999, (0x05<<18)|0x289A6}, // channel 48
- {(0x03<<18)|0x312c1, (0x04<<18)|0x0a666, (0x05<<18)|0x289A6}, // channel 52
- {(0x03<<18)|0x332c3, (0x04<<18)|0x08ccc, (0x05<<18)|0x289A6}, // channel 56
- {(0x03<<18)|0x30ac0, (0x04<<18)|0x08000, (0x05<<18)|0x289A6}, // channel 60
- {(0x03<<18)|0x30ac2, (0x04<<18)|0x0b333, (0x05<<18)|0x289A6} // channel 64
+u32 max2828_channel_data_24[][3] = {
+ {(0x03 << 18) | 0x30142, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x289A6}, /* channe1 01 */
+ {(0x03 << 18) | 0x32141, (0x04 << 18) | 0x08444, (0x05 << 18) | 0x289A6}, /* channe1 02 */
+ {(0x03 << 18) | 0x32143, (0x04 << 18) | 0x0aeee, (0x05 << 18) | 0x289A6}, /* channe1 03 */
+ {(0x03 << 18) | 0x32142, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x289A6}, /* channe1 04 */
+ {(0x03 << 18) | 0x31141, (0x04 << 18) | 0x08444, (0x05 << 18) | 0x289A6}, /* channe1 05 */
+ {(0x03 << 18) | 0x31143, (0x04 << 18) | 0x0aeee, (0x05 << 18) | 0x289A6}, /* channe1 06 */
+ {(0x03 << 18) | 0x31142, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x289A6}, /* channe1 07 */
+ {(0x03 << 18) | 0x33141, (0x04 << 18) | 0x08444, (0x05 << 18) | 0x289A6}, /* channe1 08 */
+ {(0x03 << 18) | 0x33143, (0x04 << 18) | 0x0aeee, (0x05 << 18) | 0x289A6}, /* channe1 09 */
+ {(0x03 << 18) | 0x33142, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x289A6}, /* channe1 10 */
+ {(0x03 << 18) | 0x30941, (0x04 << 18) | 0x08444, (0x05 << 18) | 0x289A6}, /* channe1 11 */
+ {(0x03 << 18) | 0x30943, (0x04 << 18) | 0x0aeee, (0x05 << 18) | 0x289A6}, /* channe1 12 */
+ {(0x03 << 18) | 0x30942, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x289A6}, /* channe1 13 */
+ {(0x03 << 18) | 0x32941, (0x04 << 18) | 0x09999, (0x05 << 18) | 0x289A6} /* channel 14 (2484MHz) */
};
-u32 max2828_power_data_24[] = {(0x0C<<18)|0x0c000, (0x0C<<18)|0x0c100};
-u32 max2828_power_data_50[] = {(0x0C<<18)|0x0c000, (0x0C<<18)|0x0c100};
-
-/****************************************************************************/
-// LA20040728 kevin
-// MAX2829 (a/b/g)
-u32 max2829_rf_data[] =
-{
- (0x00<<18)|0x000a2,
- (0x01<<18)|0x23520,
- (0x02<<18)|0x13802,
- (0x03<<18)|0x30142,
- (0x04<<18)|0x0b333,
- (0x05<<18)|0x28906,
- (0x06<<18)|0x18008,
- (0x07<<18)|0x3B500,
- (0x08<<18)|0x05100,
- (0x09<<18)|0x24f08,
- (0x0A<<18)|0x14000,
- (0x0B<<18)|0x37d80,
- (0x0C<<18)|0x0F300 //TXVGA=51, (MAX-6 dB)
+u32 max2828_channel_data_50[][3] = {
+ {(0x03 << 18) | 0x33cc3, (0x04 << 18) | 0x08ccc, (0x05 << 18) | 0x289A6}, /* channel 36 */
+ {(0x03 << 18) | 0x302c0, (0x04 << 18) | 0x08000, (0x05 << 18) | 0x289A6}, /* channel 40 */
+ {(0x03 << 18) | 0x302c2, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x289A6}, /* channel 44 */
+ {(0x03 << 18) | 0x322c1, (0x04 << 18) | 0x09999, (0x05 << 18) | 0x289A6}, /* channel 48 */
+ {(0x03 << 18) | 0x312c1, (0x04 << 18) | 0x0a666, (0x05 << 18) | 0x289A6}, /* channel 52 */
+ {(0x03 << 18) | 0x332c3, (0x04 << 18) | 0x08ccc, (0x05 << 18) | 0x289A6}, /* channel 56 */
+ {(0x03 << 18) | 0x30ac0, (0x04 << 18) | 0x08000, (0x05 << 18) | 0x289A6}, /* channel 60 */
+ {(0x03 << 18) | 0x30ac2, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x289A6} /* channel 64 */
};
-u32 max2829_channel_data_24[][3] =
-{
- {(3<<18)|0x30142, (4<<18)|0x0b333, (5<<18)|0x289C6}, // 01 (2412MHz)
- {(3<<18)|0x32141, (4<<18)|0x08444, (5<<18)|0x289C6}, // 02 (2417MHz)
- {(3<<18)|0x32143, (4<<18)|0x0aeee, (5<<18)|0x289C6}, // 03 (2422MHz)
- {(3<<18)|0x32142, (4<<18)|0x0b333, (5<<18)|0x289C6}, // 04 (2427MHz)
- {(3<<18)|0x31141, (4<<18)|0x08444, (5<<18)|0x289C6}, // 05 (2432MHz)
- {(3<<18)|0x31143, (4<<18)|0x0aeee, (5<<18)|0x289C6}, // 06 (2437MHz)
- {(3<<18)|0x31142, (4<<18)|0x0b333, (5<<18)|0x289C6}, // 07 (2442MHz)
- {(3<<18)|0x33141, (4<<18)|0x08444, (5<<18)|0x289C6}, // 08 (2447MHz)
- {(3<<18)|0x33143, (4<<18)|0x0aeee, (5<<18)|0x289C6}, // 09 (2452MHz)
- {(3<<18)|0x33142, (4<<18)|0x0b333, (5<<18)|0x289C6}, // 10 (2457MHz)
- {(3<<18)|0x30941, (4<<18)|0x08444, (5<<18)|0x289C6}, // 11 (2462MHz)
- {(3<<18)|0x30943, (4<<18)|0x0aeee, (5<<18)|0x289C6}, // 12 (2467MHz)
- {(3<<18)|0x30942, (4<<18)|0x0b333, (5<<18)|0x289C6}, // 13 (2472MHz)
- {(3<<18)|0x32941, (4<<18)|0x09999, (5<<18)|0x289C6}, // 14 (2484MHz) hh-modify
+u32 max2828_power_data_24[] = {(0x0C << 18) | 0x0c000, (0x0C << 18) | 0x0c100};
+u32 max2828_power_data_50[] = {(0x0C << 18) | 0x0c000, (0x0C << 18) | 0x0c100};
+
+/* ========================================================== */
+/* MAX2829 (a/b/g) */
+u32 max2829_rf_data[] = {
+ (0x00 << 18) | 0x000a2,
+ (0x01 << 18) | 0x23520,
+ (0x02 << 18) | 0x13802,
+ (0x03 << 18) | 0x30142,
+ (0x04 << 18) | 0x0b333,
+ (0x05 << 18) | 0x28906,
+ (0x06 << 18) | 0x18008,
+ (0x07 << 18) | 0x3B500,
+ (0x08 << 18) | 0x05100,
+ (0x09 << 18) | 0x24f08,
+ (0x0A << 18) | 0x14000,
+ (0x0B << 18) | 0x37d80,
+ (0x0C << 18) | 0x0F300 /* TXVGA=51, (MAX-6 dB) */
};
-u32 max2829_channel_data_50[][4] =
-{
- {36, (3<<18)|0x33cc3, (4<<18)|0x08ccc, (5<<18)|0x2A946}, // 36 (5.180GHz)
- {40, (3<<18)|0x302c0, (4<<18)|0x08000, (5<<18)|0x2A946}, // 40 (5.200GHz)
- {44, (3<<18)|0x302c2, (4<<18)|0x0b333, (5<<18)|0x2A946}, // 44 (5.220GHz)
- {48, (3<<18)|0x322c1, (4<<18)|0x09999, (5<<18)|0x2A946}, // 48 (5.240GHz)
- {52, (3<<18)|0x312c1, (4<<18)|0x0a666, (5<<18)|0x2A946}, // 52 (5.260GHz)
- {56, (3<<18)|0x332c3, (4<<18)|0x08ccc, (5<<18)|0x2A946}, // 56 (5.280GHz)
- {60, (3<<18)|0x30ac0, (4<<18)|0x08000, (5<<18)|0x2A946}, // 60 (5.300GHz)
- {64, (3<<18)|0x30ac2, (4<<18)|0x0b333, (5<<18)|0x2A946}, // 64 (5.320GHz)
-
- {100, (3<<18)|0x30ec0, (4<<18)|0x08000, (5<<18)|0x2A9C6}, // 100 (5.500GHz)
- {104, (3<<18)|0x30ec2, (4<<18)|0x0b333, (5<<18)|0x2A9C6}, // 104 (5.520GHz)
- {108, (3<<18)|0x32ec1, (4<<18)|0x09999, (5<<18)|0x2A9C6}, // 108 (5.540GHz)
- {112, (3<<18)|0x31ec1, (4<<18)|0x0a666, (5<<18)|0x2A9C6}, // 112 (5.560GHz)
- {116, (3<<18)|0x33ec3, (4<<18)|0x08ccc, (5<<18)|0x2A9C6}, // 116 (5.580GHz)
- {120, (3<<18)|0x301c0, (4<<18)|0x08000, (5<<18)|0x2A9C6}, // 120 (5.600GHz)
- {124, (3<<18)|0x301c2, (4<<18)|0x0b333, (5<<18)|0x2A9C6}, // 124 (5.620GHz)
- {128, (3<<18)|0x321c1, (4<<18)|0x09999, (5<<18)|0x2A9C6}, // 128 (5.640GHz)
- {132, (3<<18)|0x311c1, (4<<18)|0x0a666, (5<<18)|0x2A9C6}, // 132 (5.660GHz)
- {136, (3<<18)|0x331c3, (4<<18)|0x08ccc, (5<<18)|0x2A9C6}, // 136 (5.680GHz)
- {140, (3<<18)|0x309c0, (4<<18)|0x08000, (5<<18)|0x2A9C6}, // 140 (5.700GHz)
-
- {149, (3<<18)|0x329c2, (4<<18)|0x0b333, (5<<18)|0x2A9C6}, // 149 (5.745GHz)
- {153, (3<<18)|0x319c1, (4<<18)|0x09999, (5<<18)|0x2A9C6}, // 153 (5.765GHz)
- {157, (3<<18)|0x339c1, (4<<18)|0x0a666, (5<<18)|0x2A9C6}, // 157 (5.785GHz)
- {161, (3<<18)|0x305c3, (4<<18)|0x08ccc, (5<<18)|0x2A9C6}, // 161 (5.805GHz)
-
- // Japan
- { 184, (3<<18)|0x308c2, (4<<18)|0x0b333, (5<<18)|0x2A946}, // 184 (4.920GHz)
- { 188, (3<<18)|0x328c1, (4<<18)|0x09999, (5<<18)|0x2A946}, // 188 (4.940GHz)
- { 192, (3<<18)|0x318c1, (4<<18)|0x0a666, (5<<18)|0x2A946}, // 192 (4.960GHz)
- { 196, (3<<18)|0x338c3, (4<<18)|0x08ccc, (5<<18)|0x2A946}, // 196 (4.980GHz)
- { 8, (3<<18)|0x324c1, (4<<18)|0x09999, (5<<18)|0x2A946}, // 8 (5.040GHz)
- { 12, (3<<18)|0x314c1, (4<<18)|0x0a666, (5<<18)|0x2A946}, // 12 (5.060GHz)
- { 16, (3<<18)|0x334c3, (4<<18)|0x08ccc, (5<<18)|0x2A946}, // 16 (5.080GHz)
- { 34, (3<<18)|0x31cc2, (4<<18)|0x0b333, (5<<18)|0x2A946}, // 34 (5.170GHz)
- { 38, (3<<18)|0x33cc1, (4<<18)|0x09999, (5<<18)|0x2A946}, // 38 (5.190GHz)
- { 42, (3<<18)|0x302c1, (4<<18)|0x0a666, (5<<18)|0x2A946}, // 42 (5.210GHz)
- { 46, (3<<18)|0x322c3, (4<<18)|0x08ccc, (5<<18)|0x2A946}, // 46 (5.230GHz)
+u32 max2829_channel_data_24[][3] = {
+ {(3 << 18) | 0x30142, (4 << 18) | 0x0b333, (5 << 18) | 0x289C6}, /* 01 (2412MHz) */
+ {(3 << 18) | 0x32141, (4 << 18) | 0x08444, (5 << 18) | 0x289C6}, /* 02 (2417MHz) */
+ {(3 << 18) | 0x32143, (4 << 18) | 0x0aeee, (5 << 18) | 0x289C6}, /* 03 (2422MHz) */
+ {(3 << 18) | 0x32142, (4 << 18) | 0x0b333, (5 << 18) | 0x289C6}, /* 04 (2427MHz) */
+ {(3 << 18) | 0x31141, (4 << 18) | 0x08444, (5 << 18) | 0x289C6}, /* 05 (2432MHz) */
+ {(3 << 18) | 0x31143, (4 << 18) | 0x0aeee, (5 << 18) | 0x289C6}, /* 06 (2437MHz) */
+ {(3 << 18) | 0x31142, (4 << 18) | 0x0b333, (5 << 18) | 0x289C6}, /* 07 (2442MHz) */
+ {(3 << 18) | 0x33141, (4 << 18) | 0x08444, (5 << 18) | 0x289C6}, /* 08 (2447MHz) */
+ {(3 << 18) | 0x33143, (4 << 18) | 0x0aeee, (5 << 18) | 0x289C6}, /* 09 (2452MHz) */
+ {(3 << 18) | 0x33142, (4 << 18) | 0x0b333, (5 << 18) | 0x289C6}, /* 10 (2457MHz) */
+ {(3 << 18) | 0x30941, (4 << 18) | 0x08444, (5 << 18) | 0x289C6}, /* 11 (2462MHz) */
+ {(3 << 18) | 0x30943, (4 << 18) | 0x0aeee, (5 << 18) | 0x289C6}, /* 12 (2467MHz) */
+ {(3 << 18) | 0x30942, (4 << 18) | 0x0b333, (5 << 18) | 0x289C6}, /* 13 (2472MHz) */
+ {(3 << 18) | 0x32941, (4 << 18) | 0x09999, (5 << 18) | 0x289C6}, /* 14 (2484MHz) */
};
-/*****************************************************************************
-; For MAXIM2825/6/7 Ver. 317 or less
-; Edited by Tiger, Sep-17-2003 for 2.4Ghz channels
-; Updated by Tiger, Sep-22-2003 for 5.0Ghz channels
-; Corrected by Tiger, Sep-23-2003, for 0x03 and 0x04 of 5.0Ghz channels
-
-0x00 0x00080
-0x01 0x214c0
-0x02 0x13802
-
-;2.4GHz Channels
-;channe1 01 (2.412GHz); 0x03 0x30143 ;0x04 0x0accc
-;channe1 02 (2.417GHz); 0x03 0x32140 ;0x04 0x09111
-;channe1 03 (2.422GHz); 0x03 0x32142 ;0x04 0x0bbbb
-;channe1 04 (2.427GHz); 0x03 0x32143 ;0x04 0x0accc
-;channe1 05 (2.432GHz); 0x03 0x31140 ;0x04 0x09111
-;channe1 06 (2.437GHz); 0x03 0x31142 ;0x04 0x0bbbb
-;channe1 07 (2.442GHz); 0x03 0x31143 ;0x04 0x0accc
-;channe1 08 (2.447GHz); 0x03 0x33140 ;0x04 0x09111
-;channe1 09 (2.452GHz); 0x03 0x33142 ;0x04 0x0bbbb
-;channe1 10 (2.457GHz); 0x03 0x33143 ;0x04 0x0accc
-;channe1 11 (2.462GHz); 0x03 0x30940 ;0x04 0x09111
-;channe1 12 (2.467GHz); 0x03 0x30942 ;0x04 0x0bbbb
-;channe1 13 (2.472GHz); 0x03 0x30943 ;0x04 0x0accc
-
-;5.0Ghz Channels
-;channel 36 (5.180GHz); 0x03 0x33cc0 ;0x04 0x0b333
-;channel 40 (5.200GHz); 0x03 0x302c0 ;0x04 0x08000
-;channel 44 (5.220GHz); 0x03 0x302c2 ;0x04 0x0b333
-;channel 48 (5.240GHz); 0x03 0x322c1 ;0x04 0x09999
-;channel 52 (5.260GHz); 0x03 0x312c1 ;0x04 0x0a666
-;channel 56 (5.280GHz); 0x03 0x332c3 ;0x04 0x08ccc
-;channel 60 (5.300GHz); 0x03 0x30ac0 ;0x04 0x08000
-;channel 64 (5.320GHz); 0x03 0x30ac2 ;0x04 0x08333
-
-;2.4GHz band ;0x05 0x28986;
-;5.0GHz band
-0x05 0x2a986
-
-0x06 0x18008
-0x07 0x38400
-0x08 0x05108
-0x09 0x27ff8
-0x0a 0x14000
-0x0b 0x37f99
-0x0c 0x0c000
-*****************************************************************************/
-u32 maxim_317_rf_data[] =
-{
- (0x00<<18)|0x000a2,
- (0x01<<18)|0x214c0,
- (0x02<<18)|0x13802,
- (0x03<<18)|0x30143,
- (0x04<<18)|0x0accc,
- (0x05<<18)|0x28986,
- (0x06<<18)|0x18008,
- (0x07<<18)|0x38400,
- (0x08<<18)|0x05108,
- (0x09<<18)|0x27ff8,
- (0x0A<<18)|0x14000,
- (0x0B<<18)|0x37f99,
- (0x0C<<18)|0x0c000
+u32 max2829_channel_data_50[][4] = {
+ {36, (3 << 18) | 0x33cc3, (4 << 18) | 0x08ccc, (5 << 18) | 0x2A946}, /* 36 (5.180GHz) */
+ {40, (3 << 18) | 0x302c0, (4 << 18) | 0x08000, (5 << 18) | 0x2A946}, /* 40 (5.200GHz) */
+ {44, (3 << 18) | 0x302c2, (4 << 18) | 0x0b333, (5 << 18) | 0x2A946}, /* 44 (5.220GHz) */
+ {48, (3 << 18) | 0x322c1, (4 << 18) | 0x09999, (5 << 18) | 0x2A946}, /* 48 (5.240GHz) */
+ {52, (3 << 18) | 0x312c1, (4 << 18) | 0x0a666, (5 << 18) | 0x2A946}, /* 52 (5.260GHz) */
+ {56, (3 << 18) | 0x332c3, (4 << 18) | 0x08ccc, (5 << 18) | 0x2A946}, /* 56 (5.280GHz) */
+ {60, (3 << 18) | 0x30ac0, (4 << 18) | 0x08000, (5 << 18) | 0x2A946}, /* 60 (5.300GHz) */
+ {64, (3 << 18) | 0x30ac2, (4 << 18) | 0x0b333, (5 << 18) | 0x2A946}, /* 64 (5.320GHz) */
+
+ {100, (3 << 18) | 0x30ec0, (4 << 18) | 0x08000, (5 << 18) | 0x2A9C6}, /* 100 (5.500GHz) */
+ {104, (3 << 18) | 0x30ec2, (4 << 18) | 0x0b333, (5 << 18) | 0x2A9C6}, /* 104 (5.520GHz) */
+ {108, (3 << 18) | 0x32ec1, (4 << 18) | 0x09999, (5 << 18) | 0x2A9C6}, /* 108 (5.540GHz) */
+ {112, (3 << 18) | 0x31ec1, (4 << 18) | 0x0a666, (5 << 18) | 0x2A9C6}, /* 112 (5.560GHz) */
+ {116, (3 << 18) | 0x33ec3, (4 << 18) | 0x08ccc, (5 << 18) | 0x2A9C6}, /* 116 (5.580GHz) */
+ {120, (3 << 18) | 0x301c0, (4 << 18) | 0x08000, (5 << 18) | 0x2A9C6}, /* 120 (5.600GHz) */
+ {124, (3 << 18) | 0x301c2, (4 << 18) | 0x0b333, (5 << 18) | 0x2A9C6}, /* 124 (5.620GHz) */
+ {128, (3 << 18) | 0x321c1, (4 << 18) | 0x09999, (5 << 18) | 0x2A9C6}, /* 128 (5.640GHz) */
+ {132, (3 << 18) | 0x311c1, (4 << 18) | 0x0a666, (5 << 18) | 0x2A9C6}, /* 132 (5.660GHz) */
+ {136, (3 << 18) | 0x331c3, (4 << 18) | 0x08ccc, (5 << 18) | 0x2A9C6}, /* 136 (5.680GHz) */
+ {140, (3 << 18) | 0x309c0, (4 << 18) | 0x08000, (5 << 18) | 0x2A9C6}, /* 140 (5.700GHz) */
+
+ {149, (3 << 18) | 0x329c2, (4 << 18) | 0x0b333, (5 << 18) | 0x2A9C6}, /* 149 (5.745GHz) */
+ {153, (3 << 18) | 0x319c1, (4 << 18) | 0x09999, (5 << 18) | 0x2A9C6}, /* 153 (5.765GHz) */
+ {157, (3 << 18) | 0x339c1, (4 << 18) | 0x0a666, (5 << 18) | 0x2A9C6}, /* 157 (5.785GHz) */
+ {161, (3 << 18) | 0x305c3, (4 << 18) | 0x08ccc, (5 << 18) | 0x2A9C6}, /* 161 (5.805GHz) */
+
+ /* Japan */
+ { 184, (3 << 18) | 0x308c2, (4 << 18) | 0x0b333, (5 << 18) | 0x2A946}, /* 184 (4.920GHz) */
+ { 188, (3 << 18) | 0x328c1, (4 << 18) | 0x09999, (5 << 18) | 0x2A946}, /* 188 (4.940GHz) */
+ { 192, (3 << 18) | 0x318c1, (4 << 18) | 0x0a666, (5 << 18) | 0x2A946}, /* 192 (4.960GHz) */
+ { 196, (3 << 18) | 0x338c3, (4 << 18) | 0x08ccc, (5 << 18) | 0x2A946}, /* 196 (4.980GHz) */
+ { 8, (3 << 18) | 0x324c1, (4 << 18) | 0x09999, (5 << 18) | 0x2A946}, /* 8 (5.040GHz) */
+ { 12, (3 << 18) | 0x314c1, (4 << 18) | 0x0a666, (5 << 18) | 0x2A946}, /* 12 (5.060GHz) */
+ { 16, (3 << 18) | 0x334c3, (4 << 18) | 0x08ccc, (5 << 18) | 0x2A946}, /* 16 (5.080GHz) */
+ { 34, (3 << 18) | 0x31cc2, (4 << 18) | 0x0b333, (5 << 18) | 0x2A946}, /* 34 (5.170GHz) */
+ { 38, (3 << 18) | 0x33cc1, (4 << 18) | 0x09999, (5 << 18) | 0x2A946}, /* 38 (5.190GHz) */
+ { 42, (3 << 18) | 0x302c1, (4 << 18) | 0x0a666, (5 << 18) | 0x2A946}, /* 42 (5.210GHz) */
+ { 46, (3 << 18) | 0x322c3, (4 << 18) | 0x08ccc, (5 << 18) | 0x2A946}, /* 46 (5.230GHz) */
};
-u32 maxim_317_channel_data_24[][3] =
-{
- {(0x03<<18)|0x30143, (0x04<<18)|0x0accc, (0x05<<18)|0x28986}, // channe1 01
- {(0x03<<18)|0x32140, (0x04<<18)|0x09111, (0x05<<18)|0x28986}, // channe1 02
- {(0x03<<18)|0x32142, (0x04<<18)|0x0bbbb, (0x05<<18)|0x28986}, // channe1 03
- {(0x03<<18)|0x32143, (0x04<<18)|0x0accc, (0x05<<18)|0x28986}, // channe1 04
- {(0x03<<18)|0x31140, (0x04<<18)|0x09111, (0x05<<18)|0x28986}, // channe1 05
- {(0x03<<18)|0x31142, (0x04<<18)|0x0bbbb, (0x05<<18)|0x28986}, // channe1 06
- {(0x03<<18)|0x31143, (0x04<<18)|0x0accc, (0x05<<18)|0x28986}, // channe1 07
- {(0x03<<18)|0x33140, (0x04<<18)|0x09111, (0x05<<18)|0x28986}, // channe1 08
- {(0x03<<18)|0x33142, (0x04<<18)|0x0bbbb, (0x05<<18)|0x28986}, // channe1 09
- {(0x03<<18)|0x33143, (0x04<<18)|0x0accc, (0x05<<18)|0x28986}, // channe1 10
- {(0x03<<18)|0x30940, (0x04<<18)|0x09111, (0x05<<18)|0x28986}, // channe1 11
- {(0x03<<18)|0x30942, (0x04<<18)|0x0bbbb, (0x05<<18)|0x28986}, // channe1 12
- {(0x03<<18)|0x30943, (0x04<<18)|0x0accc, (0x05<<18)|0x28986} // channe1 13
+/*
+ * ====================================================================
+ * For MAXIM2825/6/7 Ver. 317 or less
+ *
+ * 0x00 0x00080
+ * 0x01 0x214c0
+ * 0x02 0x13802
+ *
+ * 2.4GHz Channels
+ * channe1 01 (2.412GHz); 0x03 0x30143 ;0x04 0x0accc
+ * channe1 02 (2.417GHz); 0x03 0x32140 ;0x04 0x09111
+ * channe1 03 (2.422GHz); 0x03 0x32142 ;0x04 0x0bbbb
+ * channe1 04 (2.427GHz); 0x03 0x32143 ;0x04 0x0accc
+ * channe1 05 (2.432GHz); 0x03 0x31140 ;0x04 0x09111
+ * channe1 06 (2.437GHz); 0x03 0x31142 ;0x04 0x0bbbb
+ * channe1 07 (2.442GHz); 0x03 0x31143 ;0x04 0x0accc
+ * channe1 08 (2.447GHz); 0x03 0x33140 ;0x04 0x09111
+ * channe1 09 (2.452GHz); 0x03 0x33142 ;0x04 0x0bbbb
+ * channe1 10 (2.457GHz); 0x03 0x33143 ;0x04 0x0accc
+ * channe1 11 (2.462GHz); 0x03 0x30940 ;0x04 0x09111
+ * channe1 12 (2.467GHz); 0x03 0x30942 ;0x04 0x0bbbb
+ * channe1 13 (2.472GHz); 0x03 0x30943 ;0x04 0x0accc
+ *
+ * 5.0Ghz Channels
+ * channel 36 (5.180GHz); 0x03 0x33cc0 ;0x04 0x0b333
+ * channel 40 (5.200GHz); 0x03 0x302c0 ;0x04 0x08000
+ * channel 44 (5.220GHz); 0x03 0x302c2 ;0x04 0x0b333
+ * channel 48 (5.240GHz); 0x03 0x322c1 ;0x04 0x09999
+ * channel 52 (5.260GHz); 0x03 0x312c1 ;0x04 0x0a666
+ * channel 56 (5.280GHz); 0x03 0x332c3 ;0x04 0x08ccc
+ * channel 60 (5.300GHz); 0x03 0x30ac0 ;0x04 0x08000
+ * channel 64 (5.320GHz); 0x03 0x30ac2 ;0x04 0x08333
+ *
+ * 2.4GHz band ; 0x05 0x28986;
+ * 5.0GHz band ; 0x05 0x2a986
+ * 0x06 0x18008
+ * 0x07 0x38400
+ * 0x08 0x05108
+ * 0x09 0x27ff8
+ * 0x0a 0x14000
+ * 0x0b 0x37f99
+ * 0x0c 0x0c000
+ * ====================================================================
+ */
+u32 maxim_317_rf_data[] = {
+ (0x00 << 18) | 0x000a2,
+ (0x01 << 18) | 0x214c0,
+ (0x02 << 18) | 0x13802,
+ (0x03 << 18) | 0x30143,
+ (0x04 << 18) | 0x0accc,
+ (0x05 << 18) | 0x28986,
+ (0x06 << 18) | 0x18008,
+ (0x07 << 18) | 0x38400,
+ (0x08 << 18) | 0x05108,
+ (0x09 << 18) | 0x27ff8,
+ (0x0A << 18) | 0x14000,
+ (0x0B << 18) | 0x37f99,
+ (0x0C << 18) | 0x0c000
};
-u32 maxim_317_channel_data_50[][3] =
-{
- {(0x03<<18)|0x33cc0, (0x04<<18)|0x0b333, (0x05<<18)|0x2a986}, // channel 36
- {(0x03<<18)|0x302c0, (0x04<<18)|0x08000, (0x05<<18)|0x2a986}, // channel 40
- {(0x03<<18)|0x302c3, (0x04<<18)|0x0accc, (0x05<<18)|0x2a986}, // channel 44
- {(0x03<<18)|0x322c1, (0x04<<18)|0x09666, (0x05<<18)|0x2a986}, // channel 48
- {(0x03<<18)|0x312c2, (0x04<<18)|0x09999, (0x05<<18)|0x2a986}, // channel 52
- {(0x03<<18)|0x332c0, (0x04<<18)|0x0b333, (0x05<<18)|0x2a99e}, // channel 56
- {(0x03<<18)|0x30ac0, (0x04<<18)|0x08000, (0x05<<18)|0x2a99e}, // channel 60
- {(0x03<<18)|0x30ac3, (0x04<<18)|0x0accc, (0x05<<18)|0x2a99e} // channel 64
+u32 maxim_317_channel_data_24[][3] = {
+ {(0x03 << 18) | 0x30143, (0x04 << 18) | 0x0accc, (0x05 << 18) | 0x28986}, /* channe1 01 */
+ {(0x03 << 18) | 0x32140, (0x04 << 18) | 0x09111, (0x05 << 18) | 0x28986}, /* channe1 02 */
+ {(0x03 << 18) | 0x32142, (0x04 << 18) | 0x0bbbb, (0x05 << 18) | 0x28986}, /* channe1 03 */
+ {(0x03 << 18) | 0x32143, (0x04 << 18) | 0x0accc, (0x05 << 18) | 0x28986}, /* channe1 04 */
+ {(0x03 << 18) | 0x31140, (0x04 << 18) | 0x09111, (0x05 << 18) | 0x28986}, /* channe1 05 */
+ {(0x03 << 18) | 0x31142, (0x04 << 18) | 0x0bbbb, (0x05 << 18) | 0x28986}, /* channe1 06 */
+ {(0x03 << 18) | 0x31143, (0x04 << 18) | 0x0accc, (0x05 << 18) | 0x28986}, /* channe1 07 */
+ {(0x03 << 18) | 0x33140, (0x04 << 18) | 0x09111, (0x05 << 18) | 0x28986}, /* channe1 08 */
+ {(0x03 << 18) | 0x33142, (0x04 << 18) | 0x0bbbb, (0x05 << 18) | 0x28986}, /* channe1 09 */
+ {(0x03 << 18) | 0x33143, (0x04 << 18) | 0x0accc, (0x05 << 18) | 0x28986}, /* channe1 10 */
+ {(0x03 << 18) | 0x30940, (0x04 << 18) | 0x09111, (0x05 << 18) | 0x28986}, /* channe1 11 */
+ {(0x03 << 18) | 0x30942, (0x04 << 18) | 0x0bbbb, (0x05 << 18) | 0x28986}, /* channe1 12 */
+ {(0x03 << 18) | 0x30943, (0x04 << 18) | 0x0accc, (0x05 << 18) | 0x28986} /* channe1 13 */
};
-u32 maxim_317_power_data_24[] = {(0x0C<<18)|0x0c000, (0x0C<<18)|0x0c100};
-u32 maxim_317_power_data_50[] = {(0x0C<<18)|0x0c000, (0x0C<<18)|0x0c100};
-
-/*****************************************************************************
-;;AL2230 MP (Mass Production Version)
-;;RF Registers Setting for Airoha AL2230 silicon after June 1st, 2004
-;;Updated by Tiger Huang (June 1st, 2004)
-;;20-bit length and LSB first
-
-;;Ch01 (2412MHz) ;0x00 0x09EFC ;0x01 0x8CCCC;
-;;Ch02 (2417MHz) ;0x00 0x09EFC ;0x01 0x8CCCD;
-;;Ch03 (2422MHz) ;0x00 0x09E7C ;0x01 0x8CCCC;
-;;Ch04 (2427MHz) ;0x00 0x09E7C ;0x01 0x8CCCD;
-;;Ch05 (2432MHz) ;0x00 0x05EFC ;0x01 0x8CCCC;
-;;Ch06 (2437MHz) ;0x00 0x05EFC ;0x01 0x8CCCD;
-;;Ch07 (2442MHz) ;0x00 0x05E7C ;0x01 0x8CCCC;
-;;Ch08 (2447MHz) ;0x00 0x05E7C ;0x01 0x8CCCD;
-;;Ch09 (2452MHz) ;0x00 0x0DEFC ;0x01 0x8CCCC;
-;;Ch10 (2457MHz) ;0x00 0x0DEFC ;0x01 0x8CCCD;
-;;Ch11 (2462MHz) ;0x00 0x0DE7C ;0x01 0x8CCCC;
-;;Ch12 (2467MHz) ;0x00 0x0DE7C ;0x01 0x8CCCD;
-;;Ch13 (2472MHz) ;0x00 0x03EFC ;0x01 0x8CCCC;
-;;Ch14 (2484Mhz) ;0x00 0x03E7C ;0x01 0x86666;
-
-0x02 0x401D8; RXDCOC BW 100Hz for RXHP low
-;;0x02 0x481DC; RXDCOC BW 30Khz for RXHP low
-
-0x03 0xCFFF0
-0x04 0x23800
-0x05 0xA3B72
-0x06 0x6DA01
-0x07 0xE1688
-0x08 0x11600
-0x09 0x99E02
-0x0A 0x5DDB0
-0x0B 0xD9900
-0x0C 0x3FFBD
-0x0D 0xB0000
-0x0F 0xF00A0
-
-;RF Calibration for Airoha AL2230
-;Edit by Ben Chang (01/30/04)
-;Updated by Tiger Huang (03/03/04)
-0x0f 0xf00a0 ; Initial Setting
-0x0f 0xf00b0 ; Activate TX DCC
-0x0f 0xf02a0 ; Activate Phase Calibration
-0x0f 0xf00e0 ; Activate Filter RC Calibration
-0x0f 0xf00a0 ; Restore Initial Setting
-*****************************************************************************/
-
-u32 al2230_rf_data[] =
-{
- (0x00<<20)|0x09EFC,
- (0x01<<20)|0x8CCCC,
- (0x02<<20)|0x40058,// 20060627 Anson 0x401D8,
- (0x03<<20)|0xCFFF0,
- (0x04<<20)|0x24100,// 20060627 Anson 0x23800,
- (0x05<<20)|0xA3B2F,// 20060627 Anson 0xA3B72
- (0x06<<20)|0x6DA01,
- (0x07<<20)|0xE3628,// 20060627 Anson 0xE1688,
- (0x08<<20)|0x11600,
- (0x09<<20)|0x9DC02,// 20060627 Anosn 0x97602,//0x99E02, //0x9AE02
- (0x0A<<20)|0x5ddb0, // 941206 For QCOM interference 0x588b0,//0x5DDB0, 940601 adj 0x5aa30 for bluetooth
- (0x0B<<20)|0xD9900,
- (0x0C<<20)|0x3FFBD,
- (0x0D<<20)|0xB0000,
- (0x0F<<20)|0xF01A0 // 20060627 Anson 0xF00A0
+u32 maxim_317_channel_data_50[][3] = {
+ {(0x03 << 18) | 0x33cc0, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x2a986}, /* channel 36 */
+ {(0x03 << 18) | 0x302c0, (0x04 << 18) | 0x08000, (0x05 << 18) | 0x2a986}, /* channel 40 */
+ {(0x03 << 18) | 0x302c3, (0x04 << 18) | 0x0accc, (0x05 << 18) | 0x2a986}, /* channel 44 */
+ {(0x03 << 18) | 0x322c1, (0x04 << 18) | 0x09666, (0x05 << 18) | 0x2a986}, /* channel 48 */
+ {(0x03 << 18) | 0x312c2, (0x04 << 18) | 0x09999, (0x05 << 18) | 0x2a986}, /* channel 52 */
+ {(0x03 << 18) | 0x332c0, (0x04 << 18) | 0x0b333, (0x05 << 18) | 0x2a99e}, /* channel 56 */
+ {(0x03 << 18) | 0x30ac0, (0x04 << 18) | 0x08000, (0x05 << 18) | 0x2a99e}, /* channel 60 */
+ {(0x03 << 18) | 0x30ac3, (0x04 << 18) | 0x0accc, (0x05 << 18) | 0x2a99e} /* channel 64 */
};
-u32 al2230s_rf_data[] =
-{
- (0x00<<20)|0x09EFC,
- (0x01<<20)|0x8CCCC,
- (0x02<<20)|0x40058,// 20060419 0x401D8,
- (0x03<<20)|0xCFFF0,
- (0x04<<20)|0x24100,// 20060419 0x23800,
- (0x05<<20)|0xA3B2F,// 20060419 0xA3B72,
- (0x06<<20)|0x6DA01,
- (0x07<<20)|0xE3628,// 20060419 0xE1688,
- (0x08<<20)|0x11600,
- (0x09<<20)|0x9DC02,// 20060419 0x97602,//0x99E02, //0x9AE02
- (0x0A<<20)|0x5DDB0,// 941206 For QCOM interference 0x588b0,//0x5DDB0, 940601 adj 0x5aa30 for bluetooth
- (0x0B<<20)|0xD9900,
- (0x0C<<20)|0x3FFBD,
- (0x0D<<20)|0xB0000,
- (0x0F<<20)|0xF01A0 // 20060419 0xF00A0
+u32 maxim_317_power_data_24[] = {(0x0C << 18) | 0x0c000, (0x0C << 18) | 0x0c100};
+u32 maxim_317_power_data_50[] = {(0x0C << 18) | 0x0c000, (0x0C << 18) | 0x0c100};
+
+/*
+ * ===================================================================
+ * AL2230 MP (Mass Production Version)
+ * RF Registers Setting for Airoha AL2230 silicon after June 1st, 2004
+ * 20-bit length and LSB first
+ *
+ * Ch01 (2412MHz) ;0x00 0x09EFC ;0x01 0x8CCCC;
+ * Ch02 (2417MHz) ;0x00 0x09EFC ;0x01 0x8CCCD;
+ * Ch03 (2422MHz) ;0x00 0x09E7C ;0x01 0x8CCCC;
+ * Ch04 (2427MHz) ;0x00 0x09E7C ;0x01 0x8CCCD;
+ * Ch05 (2432MHz) ;0x00 0x05EFC ;0x01 0x8CCCC;
+ * Ch06 (2437MHz) ;0x00 0x05EFC ;0x01 0x8CCCD;
+ * Ch07 (2442MHz) ;0x00 0x05E7C ;0x01 0x8CCCC;
+ * Ch08 (2447MHz) ;0x00 0x05E7C ;0x01 0x8CCCD;
+ * Ch09 (2452MHz) ;0x00 0x0DEFC ;0x01 0x8CCCC;
+ * Ch10 (2457MHz) ;0x00 0x0DEFC ;0x01 0x8CCCD;
+ * Ch11 (2462MHz) ;0x00 0x0DE7C ;0x01 0x8CCCC;
+ * Ch12 (2467MHz) ;0x00 0x0DE7C ;0x01 0x8CCCD;
+ * Ch13 (2472MHz) ;0x00 0x03EFC ;0x01 0x8CCCC;
+ * Ch14 (2484Mhz) ;0x00 0x03E7C ;0x01 0x86666;
+ *
+ * 0x02 0x401D8; RXDCOC BW 100Hz for RXHP low
+ * 0x02 0x481DC; RXDCOC BW 30Khz for RXHP low
+ *
+ * 0x03 0xCFFF0
+ * 0x04 0x23800
+ * 0x05 0xA3B72
+ * 0x06 0x6DA01
+ * 0x07 0xE1688
+ * 0x08 0x11600
+ * 0x09 0x99E02
+ * 0x0A 0x5DDB0
+ * 0x0B 0xD9900
+ * 0x0C 0x3FFBD
+ * 0x0D 0xB0000
+ * 0x0F 0xF00A0
+ *
+ * RF Calibration for Airoha AL2230
+ *
+ * 0x0f 0xf00a0 ; Initial Setting
+ * 0x0f 0xf00b0 ; Activate TX DCC
+ * 0x0f 0xf02a0 ; Activate Phase Calibration
+ * 0x0f 0xf00e0 ; Activate Filter RC Calibration
+ * 0x0f 0xf00a0 ; Restore Initial Setting
+ * ==================================================================
+ */
+u32 al2230_rf_data[] = {
+ (0x00 << 20) | 0x09EFC,
+ (0x01 << 20) | 0x8CCCC,
+ (0x02 << 20) | 0x40058,
+ (0x03 << 20) | 0xCFFF0,
+ (0x04 << 20) | 0x24100,
+ (0x05 << 20) | 0xA3B2F,
+ (0x06 << 20) | 0x6DA01,
+ (0x07 << 20) | 0xE3628,
+ (0x08 << 20) | 0x11600,
+ (0x09 << 20) | 0x9DC02,
+ (0x0A << 20) | 0x5ddb0,
+ (0x0B << 20) | 0xD9900,
+ (0x0C << 20) | 0x3FFBD,
+ (0x0D << 20) | 0xB0000,
+ (0x0F << 20) | 0xF01A0
};
-u32 al2230_channel_data_24[][2] =
-{
- {(0x00<<20)|0x09EFC, (0x01<<20)|0x8CCCC}, // channe1 01
- {(0x00<<20)|0x09EFC, (0x01<<20)|0x8CCCD}, // channe1 02
- {(0x00<<20)|0x09E7C, (0x01<<20)|0x8CCCC}, // channe1 03
- {(0x00<<20)|0x09E7C, (0x01<<20)|0x8CCCD}, // channe1 04
- {(0x00<<20)|0x05EFC, (0x01<<20)|0x8CCCC}, // channe1 05
- {(0x00<<20)|0x05EFC, (0x01<<20)|0x8CCCD}, // channe1 06
- {(0x00<<20)|0x05E7C, (0x01<<20)|0x8CCCC}, // channe1 07
- {(0x00<<20)|0x05E7C, (0x01<<20)|0x8CCCD}, // channe1 08
- {(0x00<<20)|0x0DEFC, (0x01<<20)|0x8CCCC}, // channe1 09
- {(0x00<<20)|0x0DEFC, (0x01<<20)|0x8CCCD}, // channe1 10
- {(0x00<<20)|0x0DE7C, (0x01<<20)|0x8CCCC}, // channe1 11
- {(0x00<<20)|0x0DE7C, (0x01<<20)|0x8CCCD}, // channe1 12
- {(0x00<<20)|0x03EFC, (0x01<<20)|0x8CCCC}, // channe1 13
- {(0x00<<20)|0x03E7C, (0x01<<20)|0x86666} // channe1 14
+u32 al2230s_rf_data[] = {
+ (0x00 << 20) | 0x09EFC,
+ (0x01 << 20) | 0x8CCCC,
+ (0x02 << 20) | 0x40058,
+ (0x03 << 20) | 0xCFFF0,
+ (0x04 << 20) | 0x24100,
+ (0x05 << 20) | 0xA3B2F,
+ (0x06 << 20) | 0x6DA01,
+ (0x07 << 20) | 0xE3628,
+ (0x08 << 20) | 0x11600,
+ (0x09 << 20) | 0x9DC02,
+ (0x0A << 20) | 0x5DDB0,
+ (0x0B << 20) | 0xD9900,
+ (0x0C << 20) | 0x3FFBD,
+ (0x0D << 20) | 0xB0000,
+ (0x0F << 20) | 0xF01A0
};
-// Current setting. u32 airoha_power_data_24[] = {(0x09<<20)|0x90202, (0x09<<20)|0x96602, (0x09<<20)|0x97602};
-#define AIROHA_TXVGA_LOW_INDEX 31 // Index for 0x90202
-#define AIROHA_TXVGA_MIDDLE_INDEX 12 // Index for 0x96602
-#define AIROHA_TXVGA_HIGH_INDEX 8 // Index for 0x97602 1.0.24.0 1.0.28.0
-/*
-u32 airoha_power_data_24[] =
-{
- 0x9FE02, // Max - 0 dB
- 0x9BE02, // Max - 1 dB
- 0x9DE02, // Max - 2 dB
- 0x99E02, // Max - 3 dB
- 0x9EE02, // Max - 4 dB
- 0x9AE02, // Max - 5 dB
- 0x9CE02, // Max - 6 dB
- 0x98E02, // Max - 7 dB
- 0x97602, // Max - 8 dB
- 0x93602, // Max - 9 dB
- 0x95602, // Max - 10 dB
- 0x91602, // Max - 11 dB
- 0x96602, // Max - 12 dB
- 0x92602, // Max - 13 dB
- 0x94602, // Max - 14 dB
- 0x90602, // Max - 15 dB
- 0x97A02, // Max - 16 dB
- 0x93A02, // Max - 17 dB
- 0x95A02, // Max - 18 dB
- 0x91A02, // Max - 19 dB
- 0x96A02, // Max - 20 dB
- 0x92A02, // Max - 21 dB
- 0x94A02, // Max - 22 dB
- 0x90A02, // Max - 23 dB
- 0x97202, // Max - 24 dB
- 0x93202, // Max - 25 dB
- 0x95202, // Max - 26 dB
- 0x91202, // Max - 27 dB
- 0x96202, // Max - 28 dB
- 0x92202, // Max - 29 dB
- 0x94202, // Max - 30 dB
- 0x90202 // Max - 31 dB
+u32 al2230_channel_data_24[][2] = {
+ {(0x00 << 20) | 0x09EFC, (0x01 << 20) | 0x8CCCC}, /* channe1 01 */
+ {(0x00 << 20) | 0x09EFC, (0x01 << 20) | 0x8CCCD}, /* channe1 02 */
+ {(0x00 << 20) | 0x09E7C, (0x01 << 20) | 0x8CCCC}, /* channe1 03 */
+ {(0x00 << 20) | 0x09E7C, (0x01 << 20) | 0x8CCCD}, /* channe1 04 */
+ {(0x00 << 20) | 0x05EFC, (0x01 << 20) | 0x8CCCC}, /* channe1 05 */
+ {(0x00 << 20) | 0x05EFC, (0x01 << 20) | 0x8CCCD}, /* channe1 06 */
+ {(0x00 << 20) | 0x05E7C, (0x01 << 20) | 0x8CCCC}, /* channe1 07 */
+ {(0x00 << 20) | 0x05E7C, (0x01 << 20) | 0x8CCCD}, /* channe1 08 */
+ {(0x00 << 20) | 0x0DEFC, (0x01 << 20) | 0x8CCCC}, /* channe1 09 */
+ {(0x00 << 20) | 0x0DEFC, (0x01 << 20) | 0x8CCCD}, /* channe1 10 */
+ {(0x00 << 20) | 0x0DE7C, (0x01 << 20) | 0x8CCCC}, /* channe1 11 */
+ {(0x00 << 20) | 0x0DE7C, (0x01 << 20) | 0x8CCCD}, /* channe1 12 */
+ {(0x00 << 20) | 0x03EFC, (0x01 << 20) | 0x8CCCC}, /* channe1 13 */
+ {(0x00 << 20) | 0x03E7C, (0x01 << 20) | 0x86666} /* channe1 14 */
};
-*/
-// 20040927 1.1.69.1000 ybjiang
-// from John
-u32 al2230_txvga_data[][2] =
-{
- //value , index
+/* Current setting. u32 airoha_power_data_24[] = {(0x09 << 20) | 0x90202, (0x09 << 20) | 0x96602, (0x09 << 20) | 0x97602}; */
+#define AIROHA_TXVGA_LOW_INDEX 31 /* Index for 0x90202 */
+#define AIROHA_TXVGA_MIDDLE_INDEX 12 /* Index for 0x96602 */
+#define AIROHA_TXVGA_HIGH_INDEX 8 /* Index for 0x97602 1.0.24.0 1.0.28.0 */
+
+u32 al2230_txvga_data[][2] = {
+ /* value , index */
{0x090202, 0},
{0x094202, 2},
{0x092202, 4},
@@ -551,263 +489,242 @@ u32 al2230_txvga_data[][2] =
{0x09FE02, 63}
};
-//--------------------------------
-// For Airoha AL7230, 2.4Ghz band
-// Edit by Tiger, (March, 9, 2005)
-// 24bit, MSB first
-
-//channel independent registers:
-u32 al7230_rf_data_24[] =
-{
- (0x00<<24)|0x003790,
- (0x01<<24)|0x133331,
- (0x02<<24)|0x841FF2,
- (0x03<<24)|0x3FDFA3,
- (0x04<<24)|0x7FD784,
- (0x05<<24)|0x802B55,
- (0x06<<24)|0x56AF36,
- (0x07<<24)|0xCE0207,
- (0x08<<24)|0x6EBC08,
- (0x09<<24)|0x221BB9,
- (0x0A<<24)|0xE0000A,
- (0x0B<<24)|0x08071B,
- (0x0C<<24)|0x000A3C,
- (0x0D<<24)|0xFFFFFD,
- (0x0E<<24)|0x00000E,
- (0x0F<<24)|0x1ABA8F
+/*
+ * ==========================================
+ * For Airoha AL7230, 2.4Ghz band
+ * 24bit, MSB first
+ */
+
+/* channel independent registers: */
+u32 al7230_rf_data_24[] = {
+ (0x00 << 24) | 0x003790,
+ (0x01 << 24) | 0x133331,
+ (0x02 << 24) | 0x841FF2,
+ (0x03 << 24) | 0x3FDFA3,
+ (0x04 << 24) | 0x7FD784,
+ (0x05 << 24) | 0x802B55,
+ (0x06 << 24) | 0x56AF36,
+ (0x07 << 24) | 0xCE0207,
+ (0x08 << 24) | 0x6EBC08,
+ (0x09 << 24) | 0x221BB9,
+ (0x0A << 24) | 0xE0000A,
+ (0x0B << 24) | 0x08071B,
+ (0x0C << 24) | 0x000A3C,
+ (0x0D << 24) | 0xFFFFFD,
+ (0x0E << 24) | 0x00000E,
+ (0x0F << 24) | 0x1ABA8F
};
-u32 al7230_channel_data_24[][2] =
-{
- {(0x00<<24)|0x003790, (0x01<<24)|0x133331}, // channe1 01
- {(0x00<<24)|0x003790, (0x01<<24)|0x1B3331}, // channe1 02
- {(0x00<<24)|0x003790, (0x01<<24)|0x033331}, // channe1 03
- {(0x00<<24)|0x003790, (0x01<<24)|0x0B3331}, // channe1 04
- {(0x00<<24)|0x0037A0, (0x01<<24)|0x133331}, // channe1 05
- {(0x00<<24)|0x0037A0, (0x01<<24)|0x1B3331}, // channe1 06
- {(0x00<<24)|0x0037A0, (0x01<<24)|0x033331}, // channe1 07
- {(0x00<<24)|0x0037A0, (0x01<<24)|0x0B3331}, // channe1 08
- {(0x00<<24)|0x0037B0, (0x01<<24)|0x133331}, // channe1 09
- {(0x00<<24)|0x0037B0, (0x01<<24)|0x1B3331}, // channe1 10
- {(0x00<<24)|0x0037B0, (0x01<<24)|0x033331}, // channe1 11
- {(0x00<<24)|0x0037B0, (0x01<<24)|0x0B3331}, // channe1 12
- {(0x00<<24)|0x0037C0, (0x01<<24)|0x133331}, // channe1 13
- {(0x00<<24)|0x0037C0, (0x01<<24)|0x066661} // channel 14
+u32 al7230_channel_data_24[][2] = {
+ {(0x00 << 24) | 0x003790, (0x01 << 24) | 0x133331}, /* channe1 01 */
+ {(0x00 << 24) | 0x003790, (0x01 << 24) | 0x1B3331}, /* channe1 02 */
+ {(0x00 << 24) | 0x003790, (0x01 << 24) | 0x033331}, /* channe1 03 */
+ {(0x00 << 24) | 0x003790, (0x01 << 24) | 0x0B3331}, /* channe1 04 */
+ {(0x00 << 24) | 0x0037A0, (0x01 << 24) | 0x133331}, /* channe1 05 */
+ {(0x00 << 24) | 0x0037A0, (0x01 << 24) | 0x1B3331}, /* channe1 06 */
+ {(0x00 << 24) | 0x0037A0, (0x01 << 24) | 0x033331}, /* channe1 07 */
+ {(0x00 << 24) | 0x0037A0, (0x01 << 24) | 0x0B3331}, /* channe1 08 */
+ {(0x00 << 24) | 0x0037B0, (0x01 << 24) | 0x133331}, /* channe1 09 */
+ {(0x00 << 24) | 0x0037B0, (0x01 << 24) | 0x1B3331}, /* channe1 10 */
+ {(0x00 << 24) | 0x0037B0, (0x01 << 24) | 0x033331}, /* channe1 11 */
+ {(0x00 << 24) | 0x0037B0, (0x01 << 24) | 0x0B3331}, /* channe1 12 */
+ {(0x00 << 24) | 0x0037C0, (0x01 << 24) | 0x133331}, /* channe1 13 */
+ {(0x00 << 24) | 0x0037C0, (0x01 << 24) | 0x066661} /* channel 14 */
};
-//channel independent registers:
-u32 al7230_rf_data_50[] =
-{
- (0x00<<24)|0x0FF520,
- (0x01<<24)|0x000001,
- (0x02<<24)|0x451FE2,
- (0x03<<24)|0x5FDFA3,
- (0x04<<24)|0x6FD784,
- (0x05<<24)|0x853F55,
- (0x06<<24)|0x56AF36,
- (0x07<<24)|0xCE0207,
- (0x08<<24)|0x6EBC08,
- (0x09<<24)|0x221BB9,
- (0x0A<<24)|0xE0600A,
- (0x0B<<24)|0x08044B,
- (0x0C<<24)|0x00143C,
- (0x0D<<24)|0xFFFFFD,
- (0x0E<<24)|0x00000E,
- (0x0F<<24)|0x12BACF //5Ghz default state
+/* channel independent registers: */
+u32 al7230_rf_data_50[] = {
+ (0x00 << 24) | 0x0FF520,
+ (0x01 << 24) | 0x000001,
+ (0x02 << 24) | 0x451FE2,
+ (0x03 << 24) | 0x5FDFA3,
+ (0x04 << 24) | 0x6FD784,
+ (0x05 << 24) | 0x853F55,
+ (0x06 << 24) | 0x56AF36,
+ (0x07 << 24) | 0xCE0207,
+ (0x08 << 24) | 0x6EBC08,
+ (0x09 << 24) | 0x221BB9,
+ (0x0A << 24) | 0xE0600A,
+ (0x0B << 24) | 0x08044B,
+ (0x0C << 24) | 0x00143C,
+ (0x0D << 24) | 0xFFFFFD,
+ (0x0E << 24) | 0x00000E,
+ (0x0F << 24) | 0x12BACF /* 5Ghz default state */
};
-u32 al7230_channel_data_5[][4] =
-{
- //channel dependent registers: 0x00, 0x01 and 0x04
- //11J ===========
- {184, (0x00<<24)|0x0FF520, (0x01<<24)|0x000001, (0x04<<24)|0x67F784}, // channel 184
- {188, (0x00<<24)|0x0FF520, (0x01<<24)|0x0AAAA1, (0x04<<24)|0x77F784}, // channel 188
- {192, (0x00<<24)|0x0FF530, (0x01<<24)|0x155551, (0x04<<24)|0x77F784}, // channel 192
- {196, (0x00<<24)|0x0FF530, (0x01<<24)|0x000001, (0x04<<24)|0x67F784}, // channel 196
- {8, (0x00<<24)|0x0FF540, (0x01<<24)|0x000001, (0x04<<24)|0x67F784}, // channel 008
- {12, (0x00<<24)|0x0FF540, (0x01<<24)|0x0AAAA1, (0x04<<24)|0x77F784}, // channel 012
- {16, (0x00<<24)|0x0FF550, (0x01<<24)|0x155551, (0x04<<24)|0x77F784}, // channel 016
- {34, (0x00<<24)|0x0FF560, (0x01<<24)|0x055551, (0x04<<24)|0x77F784}, // channel 034
- {38, (0x00<<24)|0x0FF570, (0x01<<24)|0x100001, (0x04<<24)|0x77F784}, // channel 038
- {42, (0x00<<24)|0x0FF570, (0x01<<24)|0x1AAAA1, (0x04<<24)|0x77F784}, // channel 042
- {46, (0x00<<24)|0x0FF570, (0x01<<24)|0x055551, (0x04<<24)|0x77F784}, // channel 046
- //11 A/H =========
- {36, (0x00<<24)|0x0FF560, (0x01<<24)|0x0AAAA1, (0x04<<24)|0x77F784}, // channel 036
- {40, (0x00<<24)|0x0FF570, (0x01<<24)|0x155551, (0x04<<24)|0x77F784}, // channel 040
- {44, (0x00<<24)|0x0FF570, (0x01<<24)|0x000001, (0x04<<24)|0x67F784}, // channel 044
- {48, (0x00<<24)|0x0FF570, (0x01<<24)|0x0AAAA1, (0x04<<24)|0x77F784}, // channel 048
- {52, (0x00<<24)|0x0FF580, (0x01<<24)|0x155551, (0x04<<24)|0x77F784}, // channel 052
- {56, (0x00<<24)|0x0FF580, (0x01<<24)|0x000001, (0x04<<24)|0x67F784}, // channel 056
- {60, (0x00<<24)|0x0FF580, (0x01<<24)|0x0AAAA1, (0x04<<24)|0x77F784}, // channel 060
- {64, (0x00<<24)|0x0FF590, (0x01<<24)|0x155551, (0x04<<24)|0x77F784}, // channel 064
- {100, (0x00<<24)|0x0FF5C0, (0x01<<24)|0x155551, (0x04<<24)|0x77F784}, // channel 100
- {104, (0x00<<24)|0x0FF5C0, (0x01<<24)|0x000001, (0x04<<24)|0x67F784}, // channel 104
- {108, (0x00<<24)|0x0FF5C0, (0x01<<24)|0x0AAAA1, (0x04<<24)|0x77F784}, // channel 108
- {112, (0x00<<24)|0x0FF5D0, (0x01<<24)|0x155551, (0x04<<24)|0x77F784}, // channel 112
- {116, (0x00<<24)|0x0FF5D0, (0x01<<24)|0x000001, (0x04<<24)|0x67F784}, // channel 116
- {120, (0x00<<24)|0x0FF5D0, (0x01<<24)|0x0AAAA1, (0x04<<24)|0x77F784}, // channel 120
- {124, (0x00<<24)|0x0FF5E0, (0x01<<24)|0x155551, (0x04<<24)|0x77F784}, // channel 124
- {128, (0x00<<24)|0x0FF5E0, (0x01<<24)|0x000001, (0x04<<24)|0x67F784}, // channel 128
- {132, (0x00<<24)|0x0FF5E0, (0x01<<24)|0x0AAAA1, (0x04<<24)|0x77F784}, // channel 132
- {136, (0x00<<24)|0x0FF5F0, (0x01<<24)|0x155551, (0x04<<24)|0x77F784}, // channel 136
- {140, (0x00<<24)|0x0FF5F0, (0x01<<24)|0x000001, (0x04<<24)|0x67F784}, // channel 140
- {149, (0x00<<24)|0x0FF600, (0x01<<24)|0x180001, (0x04<<24)|0x77F784}, // channel 149
- {153, (0x00<<24)|0x0FF600, (0x01<<24)|0x02AAA1, (0x04<<24)|0x77F784}, // channel 153
- {157, (0x00<<24)|0x0FF600, (0x01<<24)|0x0D5551, (0x04<<24)|0x77F784}, // channel 157
- {161, (0x00<<24)|0x0FF610, (0x01<<24)|0x180001, (0x04<<24)|0x77F784}, // channel 161
- {165, (0x00<<24)|0x0FF610, (0x01<<24)|0x02AAA1, (0x04<<24)|0x77F784} // channel 165
+u32 al7230_channel_data_5[][4] = {
+ /* channel dependent registers: 0x00, 0x01 and 0x04 */
+ /* 11J =========== */
+ {184, (0x00 << 24) | 0x0FF520, (0x01 << 24) | 0x000001, (0x04 << 24) | 0x67F784}, /* channel 184 */
+ {188, (0x00 << 24) | 0x0FF520, (0x01 << 24) | 0x0AAAA1, (0x04 << 24) | 0x77F784}, /* channel 188 */
+ {192, (0x00 << 24) | 0x0FF530, (0x01 << 24) | 0x155551, (0x04 << 24) | 0x77F784}, /* channel 192 */
+ {196, (0x00 << 24) | 0x0FF530, (0x01 << 24) | 0x000001, (0x04 << 24) | 0x67F784}, /* channel 196 */
+ {8, (0x00 << 24) | 0x0FF540, (0x01 << 24) | 0x000001, (0x04 << 24) | 0x67F784}, /* channel 008 */
+ {12, (0x00 << 24) | 0x0FF540, (0x01 << 24) | 0x0AAAA1, (0x04 << 24) | 0x77F784}, /* channel 012 */
+ {16, (0x00 << 24) | 0x0FF550, (0x01 << 24) | 0x155551, (0x04 << 24) | 0x77F784}, /* channel 016 */
+ {34, (0x00 << 24) | 0x0FF560, (0x01 << 24) | 0x055551, (0x04 << 24) | 0x77F784}, /* channel 034 */
+ {38, (0x00 << 24) | 0x0FF570, (0x01 << 24) | 0x100001, (0x04 << 24) | 0x77F784}, /* channel 038 */
+ {42, (0x00 << 24) | 0x0FF570, (0x01 << 24) | 0x1AAAA1, (0x04 << 24) | 0x77F784}, /* channel 042 */
+ {46, (0x00 << 24) | 0x0FF570, (0x01 << 24) | 0x055551, (0x04 << 24) | 0x77F784}, /* channel 046 */
+ /* 11 A/H ========= */
+ {36, (0x00 << 24) | 0x0FF560, (0x01 << 24) | 0x0AAAA1, (0x04 << 24) | 0x77F784}, /* channel 036 */
+ {40, (0x00 << 24) | 0x0FF570, (0x01 << 24) | 0x155551, (0x04 << 24) | 0x77F784}, /* channel 040 */
+ {44, (0x00 << 24) | 0x0FF570, (0x01 << 24) | 0x000001, (0x04 << 24) | 0x67F784}, /* channel 044 */
+ {48, (0x00 << 24) | 0x0FF570, (0x01 << 24) | 0x0AAAA1, (0x04 << 24) | 0x77F784}, /* channel 048 */
+ {52, (0x00 << 24) | 0x0FF580, (0x01 << 24) | 0x155551, (0x04 << 24) | 0x77F784}, /* channel 052 */
+ {56, (0x00 << 24) | 0x0FF580, (0x01 << 24) | 0x000001, (0x04 << 24) | 0x67F784}, /* channel 056 */
+ {60, (0x00 << 24) | 0x0FF580, (0x01 << 24) | 0x0AAAA1, (0x04 << 24) | 0x77F784}, /* channel 060 */
+ {64, (0x00 << 24) | 0x0FF590, (0x01 << 24) | 0x155551, (0x04 << 24) | 0x77F784}, /* channel 064 */
+ {100, (0x00 << 24) | 0x0FF5C0, (0x01 << 24) | 0x155551, (0x04 << 24) | 0x77F784}, /* channel 100 */
+ {104, (0x00 << 24) | 0x0FF5C0, (0x01 << 24) | 0x000001, (0x04 << 24) | 0x67F784}, /* channel 104 */
+ {108, (0x00 << 24) | 0x0FF5C0, (0x01 << 24) | 0x0AAAA1, (0x04 << 24) | 0x77F784}, /* channel 108 */
+ {112, (0x00 << 24) | 0x0FF5D0, (0x01 << 24) | 0x155551, (0x04 << 24) | 0x77F784}, /* channel 112 */
+ {116, (0x00 << 24) | 0x0FF5D0, (0x01 << 24) | 0x000001, (0x04 << 24) | 0x67F784}, /* channel 116 */
+ {120, (0x00 << 24) | 0x0FF5D0, (0x01 << 24) | 0x0AAAA1, (0x04 << 24) | 0x77F784}, /* channel 120 */
+ {124, (0x00 << 24) | 0x0FF5E0, (0x01 << 24) | 0x155551, (0x04 << 24) | 0x77F784}, /* channel 124 */
+ {128, (0x00 << 24) | 0x0FF5E0, (0x01 << 24) | 0x000001, (0x04 << 24) | 0x67F784}, /* channel 128 */
+ {132, (0x00 << 24) | 0x0FF5E0, (0x01 << 24) | 0x0AAAA1, (0x04 << 24) | 0x77F784}, /* channel 132 */
+ {136, (0x00 << 24) | 0x0FF5F0, (0x01 << 24) | 0x155551, (0x04 << 24) | 0x77F784}, /* channel 136 */
+ {140, (0x00 << 24) | 0x0FF5F0, (0x01 << 24) | 0x000001, (0x04 << 24) | 0x67F784}, /* channel 140 */
+ {149, (0x00 << 24) | 0x0FF600, (0x01 << 24) | 0x180001, (0x04 << 24) | 0x77F784}, /* channel 149 */
+ {153, (0x00 << 24) | 0x0FF600, (0x01 << 24) | 0x02AAA1, (0x04 << 24) | 0x77F784}, /* channel 153 */
+ {157, (0x00 << 24) | 0x0FF600, (0x01 << 24) | 0x0D5551, (0x04 << 24) | 0x77F784}, /* channel 157 */
+ {161, (0x00 << 24) | 0x0FF610, (0x01 << 24) | 0x180001, (0x04 << 24) | 0x77F784}, /* channel 161 */
+ {165, (0x00 << 24) | 0x0FF610, (0x01 << 24) | 0x02AAA1, (0x04 << 24) | 0x77F784} /* channel 165 */
};
-//; RF Calibration <=== Register 0x0F
-//0x0F 0x1ABA8F; start from 2.4Ghz default state
-//0x0F 0x9ABA8F; TXDC compensation
-//0x0F 0x3ABA8F; RXFIL adjustment
-//0x0F 0x1ABA8F; restore 2.4Ghz default state
-
-//;TXVGA Mapping Table <=== Register 0x0B
-u32 al7230_txvga_data[][2] =
-{
- {0x08040B, 0}, //TXVGA=0;
- {0x08041B, 1}, //TXVGA=1;
- {0x08042B, 2}, //TXVGA=2;
- {0x08043B, 3}, //TXVGA=3;
- {0x08044B, 4}, //TXVGA=4;
- {0x08045B, 5}, //TXVGA=5;
- {0x08046B, 6}, //TXVGA=6;
- {0x08047B, 7}, //TXVGA=7;
- {0x08048B, 8}, //TXVGA=8;
- {0x08049B, 9}, //TXVGA=9;
- {0x0804AB, 10}, //TXVGA=10;
- {0x0804BB, 11}, //TXVGA=11;
- {0x0804CB, 12}, //TXVGA=12;
- {0x0804DB, 13}, //TXVGA=13;
- {0x0804EB, 14}, //TXVGA=14;
- {0x0804FB, 15}, //TXVGA=15;
- {0x08050B, 16}, //TXVGA=16;
- {0x08051B, 17}, //TXVGA=17;
- {0x08052B, 18}, //TXVGA=18;
- {0x08053B, 19}, //TXVGA=19;
- {0x08054B, 20}, //TXVGA=20;
- {0x08055B, 21}, //TXVGA=21;
- {0x08056B, 22}, //TXVGA=22;
- {0x08057B, 23}, //TXVGA=23;
- {0x08058B, 24}, //TXVGA=24;
- {0x08059B, 25}, //TXVGA=25;
- {0x0805AB, 26}, //TXVGA=26;
- {0x0805BB, 27}, //TXVGA=27;
- {0x0805CB, 28}, //TXVGA=28;
- {0x0805DB, 29}, //TXVGA=29;
- {0x0805EB, 30}, //TXVGA=30;
- {0x0805FB, 31}, //TXVGA=31;
- {0x08060B, 32}, //TXVGA=32;
- {0x08061B, 33}, //TXVGA=33;
- {0x08062B, 34}, //TXVGA=34;
- {0x08063B, 35}, //TXVGA=35;
- {0x08064B, 36}, //TXVGA=36;
- {0x08065B, 37}, //TXVGA=37;
- {0x08066B, 38}, //TXVGA=38;
- {0x08067B, 39}, //TXVGA=39;
- {0x08068B, 40}, //TXVGA=40;
- {0x08069B, 41}, //TXVGA=41;
- {0x0806AB, 42}, //TXVGA=42;
- {0x0806BB, 43}, //TXVGA=43;
- {0x0806CB, 44}, //TXVGA=44;
- {0x0806DB, 45}, //TXVGA=45;
- {0x0806EB, 46}, //TXVGA=46;
- {0x0806FB, 47}, //TXVGA=47;
- {0x08070B, 48}, //TXVGA=48;
- {0x08071B, 49}, //TXVGA=49;
- {0x08072B, 50}, //TXVGA=50;
- {0x08073B, 51}, //TXVGA=51;
- {0x08074B, 52}, //TXVGA=52;
- {0x08075B, 53}, //TXVGA=53;
- {0x08076B, 54}, //TXVGA=54;
- {0x08077B, 55}, //TXVGA=55;
- {0x08078B, 56}, //TXVGA=56;
- {0x08079B, 57}, //TXVGA=57;
- {0x0807AB, 58}, //TXVGA=58;
- {0x0807BB, 59}, //TXVGA=59;
- {0x0807CB, 60}, //TXVGA=60;
- {0x0807DB, 61}, //TXVGA=61;
- {0x0807EB, 62}, //TXVGA=62;
- {0x0807FB, 63}, //TXVGA=63;
+/*
+ * RF Calibration <=== Register 0x0F
+ * 0x0F 0x1ABA8F; start from 2.4Ghz default state
+ * 0x0F 0x9ABA8F; TXDC compensation
+ * 0x0F 0x3ABA8F; RXFIL adjustment
+ * 0x0F 0x1ABA8F; restore 2.4Ghz default state
+ */
+
+/* TXVGA Mapping Table <=== Register 0x0B */
+u32 al7230_txvga_data[][2] = {
+ {0x08040B, 0}, /* TXVGA = 0; */
+ {0x08041B, 1}, /* TXVGA = 1; */
+ {0x08042B, 2}, /* TXVGA = 2; */
+ {0x08043B, 3}, /* TXVGA = 3; */
+ {0x08044B, 4}, /* TXVGA = 4; */
+ {0x08045B, 5}, /* TXVGA = 5; */
+ {0x08046B, 6}, /* TXVGA = 6; */
+ {0x08047B, 7}, /* TXVGA = 7; */
+ {0x08048B, 8}, /* TXVGA = 8; */
+ {0x08049B, 9}, /* TXVGA = 9; */
+ {0x0804AB, 10}, /* TXVGA = 10; */
+ {0x0804BB, 11}, /* TXVGA = 11; */
+ {0x0804CB, 12}, /* TXVGA = 12; */
+ {0x0804DB, 13}, /* TXVGA = 13; */
+ {0x0804EB, 14}, /* TXVGA = 14; */
+ {0x0804FB, 15}, /* TXVGA = 15; */
+ {0x08050B, 16}, /* TXVGA = 16; */
+ {0x08051B, 17}, /* TXVGA = 17; */
+ {0x08052B, 18}, /* TXVGA = 18; */
+ {0x08053B, 19}, /* TXVGA = 19; */
+ {0x08054B, 20}, /* TXVGA = 20; */
+ {0x08055B, 21}, /* TXVGA = 21; */
+ {0x08056B, 22}, /* TXVGA = 22; */
+ {0x08057B, 23}, /* TXVGA = 23; */
+ {0x08058B, 24}, /* TXVGA = 24; */
+ {0x08059B, 25}, /* TXVGA = 25; */
+ {0x0805AB, 26}, /* TXVGA = 26; */
+ {0x0805BB, 27}, /* TXVGA = 27; */
+ {0x0805CB, 28}, /* TXVGA = 28; */
+ {0x0805DB, 29}, /* TXVGA = 29; */
+ {0x0805EB, 30}, /* TXVGA = 30; */
+ {0x0805FB, 31}, /* TXVGA = 31; */
+ {0x08060B, 32}, /* TXVGA = 32; */
+ {0x08061B, 33}, /* TXVGA = 33; */
+ {0x08062B, 34}, /* TXVGA = 34; */
+ {0x08063B, 35}, /* TXVGA = 35; */
+ {0x08064B, 36}, /* TXVGA = 36; */
+ {0x08065B, 37}, /* TXVGA = 37; */
+ {0x08066B, 38}, /* TXVGA = 38; */
+ {0x08067B, 39}, /* TXVGA = 39; */
+ {0x08068B, 40}, /* TXVGA = 40; */
+ {0x08069B, 41}, /* TXVGA = 41; */
+ {0x0806AB, 42}, /* TXVGA = 42; */
+ {0x0806BB, 43}, /* TXVGA = 43; */
+ {0x0806CB, 44}, /* TXVGA = 44; */
+ {0x0806DB, 45}, /* TXVGA = 45; */
+ {0x0806EB, 46}, /* TXVGA = 46; */
+ {0x0806FB, 47}, /* TXVGA = 47; */
+ {0x08070B, 48}, /* TXVGA = 48; */
+ {0x08071B, 49}, /* TXVGA = 49; */
+ {0x08072B, 50}, /* TXVGA = 50; */
+ {0x08073B, 51}, /* TXVGA = 51; */
+ {0x08074B, 52}, /* TXVGA = 52; */
+ {0x08075B, 53}, /* TXVGA = 53; */
+ {0x08076B, 54}, /* TXVGA = 54; */
+ {0x08077B, 55}, /* TXVGA = 55; */
+ {0x08078B, 56}, /* TXVGA = 56; */
+ {0x08079B, 57}, /* TXVGA = 57; */
+ {0x0807AB, 58}, /* TXVGA = 58; */
+ {0x0807BB, 59}, /* TXVGA = 59; */
+ {0x0807CB, 60}, /* TXVGA = 60; */
+ {0x0807DB, 61}, /* TXVGA = 61; */
+ {0x0807EB, 62}, /* TXVGA = 62; */
+ {0x0807FB, 63}, /* TXVGA = 63; */
};
-//--------------------------------
-
+/* ============================================= */
-//; W89RF242 RFIC SPI programming initial data
-//; Winbond WLAN 11g RFIC BB-SPI register -- version FA5976A rev 1.3b
-//; Update Date: Ocotber 3, 2005 by PP10 Hsiang-Te Ho
-//;
-//; Version 1.3b revision items: (Oct. 1, 2005 by HTHo) for FA5976A
-u32 w89rf242_rf_data[] =
-{
- (0x00<<24)|0xF86100, // 20060721 0xF86100, //; 3E184; MODA (0x00) -- Normal mode ; calibration off
- (0x01<<24)|0xEFFFC2, //; 3BFFF; MODB (0x01) -- turn off RSSI, and other circuits are turned on
- (0x02<<24)|0x102504, //; 04094; FSET (0x02) -- default 20MHz crystal ; Icmp=1.5mA
- (0x03<<24)|0x026286, //; 0098A; FCHN (0x03) -- default CH7, 2442MHz
- (0x04<<24)|0x000208, // 20060612.1.a 0x0002C8, // 20050818 // 20050816 0x000388
- //; 02008; FCAL (0x04) -- XTAL Freq Trim=001000 (socket board#1); FA5976AYG_v1.3C
- (0x05<<24)|0x24C60A, // 20060612.1.a 0x24C58A, // 941003 0x24C48A, // 20050818.2 0x24848A, // 20050818 // 20050816 0x24C48A
- //; 09316; GANA (0x05) -- TX VGA default (TXVGA=0x18(12)) & TXGPK=110 ; FA5976A_1.3D
- (0x06<<24)|0x3432CC, // 941003 0x26C34C, // 20050818 0x06B40C
- //; 0D0CB; GANB (0x06) -- RXDC(DC offset) on; LNA=11; RXVGA=001011(11) ; RXFLSW=11(010001); RXGPK=00; RXGCF=00; -50dBm input
- (0x07<<24)|0x0C68CE, // 20050818.2 0x0C66CE, // 20050818 // 20050816 0x0C68CE
- //; 031A3; FILT (0x07) -- TX/RX filter with auto-tuning; TFLBW=011; RFLBW=100
- (0x08<<24)|0x100010, //; 04000; TCAL (0x08) -- //for LO
- (0x09<<24)|0x004012, // 20060612.1.a 0x6E4012, // 0x004012,
- //; 1B900; RCALA (0x09) -- FASTS=11; HPDE=01 (100nsec); SEHP=1 (select B0 pin=RXHP); RXHP=1 (Turn on RXHP function)(FA5976A_1.3C)
- (0x0A<<24)|0x704014, //; 1C100; RCALB (0x0A)
- (0x0B<<24)|0x18BDD6, // 941003 0x1805D6, // 20050818.2 0x1801D6, // 20050818 // 20050816 0x1805D6
- //; 062F7; IQCAL (0x0B) -- Turn on LO phase tuner=0111 & RX-LO phase = 0111; FA5976A_1.3B (2005/09/29)
- (0x0C<<24)|0x575558, // 20050818.2 0x555558, // 20050818 // 20050816 0x575558
- //; 15D55 ; IBSA (0x0C) -- IFPre =11 ; TC5376A_v1.3A for corner
- (0x0D<<24)|0x55545A, // 20060612.1.a 0x55555A,
- //; 15555 ; IBSB (0x0D)
- (0x0E<<24)|0x5557DC, // 20060612.1.a 0x55555C, // 941003 0x5557DC,
- //; 1555F ; IBSC (0x0E) -- IRLNA & IRLNB (PTAT & Const current)=01/01; FA5976B_1.3F (2005/11/25)
- (0x10<<24)|0x000C20, // 941003 0x000020, // 20050818
- //; 00030 ; TMODA (0x10) -- LNA_gain_step=0011 ; LNA=15/16dB
- (0x11<<24)|0x0C0022, // 941003 0x030022 // 20050818.2 0x030022 // 20050818 // 20050816 0x0C0022
- //; 03000 ; TMODB (0x11) -- Turn ON RX-Q path Test Switch; To improve IQ path group delay (FA5976A_1.3C)
- (0x12<<24)|0x000024 // 20060612.1.a 0x001824 // 941003 add
- //; TMODC (0x12) -- Turn OFF Tempearure sensor
+/*
+ * W89RF242 RFIC SPI programming initial data
+ * Winbond WLAN 11g RFIC BB-SPI register -- version FA5976A rev 1.3b
+ */
+u32 w89rf242_rf_data[] = {
+ (0x00 << 24) | 0xF86100, /* 3E184; MODA (0x00) -- Normal mode ; calibration off */
+ (0x01 << 24) | 0xEFFFC2, /* 3BFFF; MODB (0x01) -- turn off RSSI, and other circuits are turned on */
+ (0x02 << 24) | 0x102504, /* 04094; FSET (0x02) -- default 20MHz crystal ; Icmp=1.5mA */
+ (0x03 << 24) | 0x026286, /* 0098A; FCHN (0x03) -- default CH7, 2442MHz */
+ (0x04 << 24) | 0x000208, /* 02008; FCAL (0x04) -- XTAL Freq Trim=001000 (socket board#1); FA5976AYG_v1.3C */
+ (0x05 << 24) | 0x24C60A, /* 09316; GANA (0x05) -- TX VGA default (TXVGA=0x18(12)) & TXGPK=110 ; FA5976A_1.3D */
+ (0x06 << 24) | 0x3432CC, /* 0D0CB; GANB (0x06) -- RXDC(DC offset) on; LNA=11; RXVGA=001011(11) ; RXFLSW=11(010001); RXGPK=00; RXGCF=00; -50dBm input */
+ (0x07 << 24) | 0x0C68CE, /* 031A3; FILT (0x07) -- TX/RX filter with auto-tuning; TFLBW=011; RFLBW=100 */
+ (0x08 << 24) | 0x100010, /* 04000; TCAL (0x08) -- for LO */
+ (0x09 << 24) | 0x004012, /* 1B900; RCALA (0x09) -- FASTS=11; HPDE=01 (100nsec); SEHP=1 (select B0 pin=RXHP); RXHP=1 (Turn on RXHP function)(FA5976A_1.3C) */
+ (0x0A << 24) | 0x704014, /* 1C100; RCALB (0x0A) */
+ (0x0B << 24) | 0x18BDD6, /* 062F7; IQCAL (0x0B) -- Turn on LO phase tuner=0111 & RX-LO phase = 0111; FA5976A_1.3B */
+ (0x0C << 24) | 0x575558, /* 15D55 ; IBSA (0x0C) -- IFPre =11 ; TC5376A_v1.3A for corner */
+ (0x0D << 24) | 0x55545A, /* 15555 ; IBSB (0x0D) */
+ (0x0E << 24) | 0x5557DC, /* 1555F ; IBSC (0x0E) -- IRLNA & IRLNB (PTAT & Const current)=01/01; FA5976B_1.3F */
+ (0x10 << 24) | 0x000C20, /* 00030 ; TMODA (0x10) -- LNA_gain_step=0011 ; LNA=15/16dB */
+ (0x11 << 24) | 0x0C0022, /* 03000 ; TMODB (0x11) -- Turn ON RX-Q path Test Switch; To improve IQ path group delay (FA5976A_1.3C) */
+ (0x12 << 24) | 0x000024 /* TMODC (0x12) -- Turn OFF Tempearure sensor */
};
-u32 w89rf242_channel_data_24[][2] =
-{
- {(0x03<<24)|0x025B06, (0x04<<24)|0x080408}, // channe1 01
- {(0x03<<24)|0x025C46, (0x04<<24)|0x080408}, // channe1 02
- {(0x03<<24)|0x025D86, (0x04<<24)|0x080408}, // channe1 03
- {(0x03<<24)|0x025EC6, (0x04<<24)|0x080408}, // channe1 04
- {(0x03<<24)|0x026006, (0x04<<24)|0x080408}, // channe1 05
- {(0x03<<24)|0x026146, (0x04<<24)|0x080408}, // channe1 06
- {(0x03<<24)|0x026286, (0x04<<24)|0x080408}, // channe1 07
- {(0x03<<24)|0x0263C6, (0x04<<24)|0x080408}, // channe1 08
- {(0x03<<24)|0x026506, (0x04<<24)|0x080408}, // channe1 09
- {(0x03<<24)|0x026646, (0x04<<24)|0x080408}, // channe1 10
- {(0x03<<24)|0x026786, (0x04<<24)|0x080408}, // channe1 11
- {(0x03<<24)|0x0268C6, (0x04<<24)|0x080408}, // channe1 12
- {(0x03<<24)|0x026A06, (0x04<<24)|0x080408}, // channe1 13
- {(0x03<<24)|0x026D06, (0x04<<24)|0x080408} // channe1 14
+u32 w89rf242_channel_data_24[][2] = {
+ {(0x03 << 24) | 0x025B06, (0x04 << 24) | 0x080408}, /* channe1 01 */
+ {(0x03 << 24) | 0x025C46, (0x04 << 24) | 0x080408}, /* channe1 02 */
+ {(0x03 << 24) | 0x025D86, (0x04 << 24) | 0x080408}, /* channe1 03 */
+ {(0x03 << 24) | 0x025EC6, (0x04 << 24) | 0x080408}, /* channe1 04 */
+ {(0x03 << 24) | 0x026006, (0x04 << 24) | 0x080408}, /* channe1 05 */
+ {(0x03 << 24) | 0x026146, (0x04 << 24) | 0x080408}, /* channe1 06 */
+ {(0x03 << 24) | 0x026286, (0x04 << 24) | 0x080408}, /* channe1 07 */
+ {(0x03 << 24) | 0x0263C6, (0x04 << 24) | 0x080408}, /* channe1 08 */
+ {(0x03 << 24) | 0x026506, (0x04 << 24) | 0x080408}, /* channe1 09 */
+ {(0x03 << 24) | 0x026646, (0x04 << 24) | 0x080408}, /* channe1 10 */
+ {(0x03 << 24) | 0x026786, (0x04 << 24) | 0x080408}, /* channe1 11 */
+ {(0x03 << 24) | 0x0268C6, (0x04 << 24) | 0x080408}, /* channe1 12 */
+ {(0x03 << 24) | 0x026A06, (0x04 << 24) | 0x080408}, /* channe1 13 */
+ {(0x03 << 24) | 0x026D06, (0x04 << 24) | 0x080408} /* channe1 14 */
};
-u32 w89rf242_power_data_24[] = {(0x05<<24)|0x24C48A, (0x05<<24)|0x24C48A, (0x05<<24)|0x24C48A};
+u32 w89rf242_power_data_24[] = {(0x05 << 24) | 0x24C48A, (0x05 << 24) | 0x24C48A, (0x05 << 24) | 0x24C48A};
-// 20060315.6 Enlarge for new scale
-// 20060316.6 20060619.2.a add mapping array
-u32 w89rf242_txvga_old_mapping[][2] =
-{
- {0, 0} , // New <-> Old
+u32 w89rf242_txvga_old_mapping[][2] = {
+ {0, 0} , /* New <-> Old */
{1, 1} ,
{2, 2} ,
{3, 3} ,
{4, 4} ,
{6, 5} ,
- {8, 6 },
- {10, 7 },
- {12, 8 },
- {14, 9 },
+ {8, 6},
+ {10, 7},
+ {12, 8},
+ {14, 9},
{16, 10},
{18, 11},
{20, 12},
@@ -818,1704 +735,1514 @@ u32 w89rf242_txvga_old_mapping[][2] =
{30, 17},
{32, 18},
{34, 19},
+};
+u32 w89rf242_txvga_data[][5] = {
+ /* low gain mode */
+ {(0x05 << 24) | 0x24C00A, 0, 0x00292315, 0x0800FEFF, 0x52523131}, /* min gain */
+ {(0x05 << 24) | 0x24C80A, 1, 0x00292315, 0x0800FEFF, 0x52523131},
+ {(0x05 << 24) | 0x24C04A, 2, 0x00292315, 0x0800FEFF, 0x52523131}, /* (default) +14dBm (ANT) */
+ {(0x05 << 24) | 0x24C84A, 3, 0x00292315, 0x0800FEFF, 0x52523131},
-};
+ /* TXVGA=0x10 */
+ {(0x05 << 24) | 0x24C40A, 4, 0x00292315, 0x0800FEFF, 0x60603838},
+ {(0x05 << 24) | 0x24C40A, 5, 0x00262114, 0x0700FEFF, 0x65653B3B},
-// 20060619.3 modify from Bruce's mail
-u32 w89rf242_txvga_data[][5] =
-{
- //low gain mode
- { (0x05<<24)|0x24C00A, 0, 0x00292315, 0x0800FEFF, 0x52523131 },// ; min gain
- { (0x05<<24)|0x24C80A, 1, 0x00292315, 0x0800FEFF, 0x52523131 },
- { (0x05<<24)|0x24C04A, 2, 0x00292315, 0x0800FEFF, 0x52523131 },// (default) +14dBm (ANT)
- { (0x05<<24)|0x24C84A, 3, 0x00292315, 0x0800FEFF, 0x52523131 },
-
- //TXVGA=0x10
- { (0x05<<24)|0x24C40A, 4, 0x00292315, 0x0800FEFF, 0x60603838 },
- { (0x05<<24)|0x24C40A, 5, 0x00262114, 0x0700FEFF, 0x65653B3B },
-
- //TXVGA=0x11
- { (0x05<<24)|0x24C44A, 6, 0x00241F13, 0x0700FFFF, 0x58583333 },
- { (0x05<<24)|0x24C44A, 7, 0x00292315, 0x0800FEFF, 0x5E5E3737 },
-
- //TXVGA=0x12
- { (0x05<<24)|0x24C48A, 8, 0x00262114, 0x0700FEFF, 0x53533030 },
- { (0x05<<24)|0x24C48A, 9, 0x00241F13, 0x0700FFFF, 0x59593434 },
-
- //TXVGA=0x13
- { (0x05<<24)|0x24C4CA, 10, 0x00292315, 0x0800FEFF, 0x52523030 },
- { (0x05<<24)|0x24C4CA, 11, 0x00262114, 0x0700FEFF, 0x56563232 },
-
- //TXVGA=0x14
- { (0x05<<24)|0x24C50A, 12, 0x00292315, 0x0800FEFF, 0x54543131 },
- { (0x05<<24)|0x24C50A, 13, 0x00262114, 0x0700FEFF, 0x58583434 },
-
- //TXVGA=0x15
- { (0x05<<24)|0x24C54A, 14, 0x00292315, 0x0800FEFF, 0x54543131 },
- { (0x05<<24)|0x24C54A, 15, 0x00262114, 0x0700FEFF, 0x59593434 },
-
- //TXVGA=0x16
- { (0x05<<24)|0x24C58A, 16, 0x00292315, 0x0800FEFF, 0x55553131 },
- { (0x05<<24)|0x24C58A, 17, 0x00292315, 0x0800FEFF, 0x5B5B3535 },
-
- //TXVGA=0x17
- { (0x05<<24)|0x24C5CA, 18, 0x00262114, 0x0700FEFF, 0x51512F2F },
- { (0x05<<24)|0x24C5CA, 19, 0x00241F13, 0x0700FFFF, 0x55553131 },
-
- //TXVGA=0x18
- { (0x05<<24)|0x24C60A, 20, 0x00292315, 0x0800FEFF, 0x4F4F2E2E },
- { (0x05<<24)|0x24C60A, 21, 0x00262114, 0x0700FEFF, 0x53533030 },
-
- //TXVGA=0x19
- { (0x05<<24)|0x24C64A, 22, 0x00292315, 0x0800FEFF, 0x4E4E2D2D },
- { (0x05<<24)|0x24C64A, 23, 0x00262114, 0x0700FEFF, 0x53533030 },
-
- //TXVGA=0x1A
- { (0x05<<24)|0x24C68A, 24, 0x00292315, 0x0800FEFF, 0x50502E2E },
- { (0x05<<24)|0x24C68A, 25, 0x00262114, 0x0700FEFF, 0x55553131 },
-
- //TXVGA=0x1B
- { (0x05<<24)|0x24C6CA, 26, 0x00262114, 0x0700FEFF, 0x53533030 },
- { (0x05<<24)|0x24C6CA, 27, 0x00292315, 0x0800FEFF, 0x5A5A3434 },
-
- //TXVGA=0x1C
- { (0x05<<24)|0x24C70A, 28, 0x00292315, 0x0800FEFF, 0x55553131 },
- { (0x05<<24)|0x24C70A, 29, 0x00292315, 0x0800FEFF, 0x5D5D3636 },
-
- //TXVGA=0x1D
- { (0x05<<24)|0x24C74A, 30, 0x00292315, 0x0800FEFF, 0x5F5F3737 },
- { (0x05<<24)|0x24C74A, 31, 0x00262114, 0x0700FEFF, 0x65653B3B },
-
- //TXVGA=0x1E
- { (0x05<<24)|0x24C78A, 32, 0x00292315, 0x0800FEFF, 0x66663B3B },
- { (0x05<<24)|0x24C78A, 33, 0x00262114, 0x0700FEFF, 0x70704141 },
-
- //TXVGA=0x1F
- { (0x05<<24)|0x24C7CA, 34, 0x00292315, 0x0800FEFF, 0x72724242 }
+ /* TXVGA=0x11 */
+ { (0x05 << 24) | 0x24C44A, 6, 0x00241F13, 0x0700FFFF, 0x58583333},
+ { (0x05 << 24) | 0x24C44A, 7, 0x00292315, 0x0800FEFF, 0x5E5E3737},
+
+ /* TXVGA=0x12 */
+ {(0x05 << 24) | 0x24C48A, 8, 0x00262114, 0x0700FEFF, 0x53533030},
+ {(0x05 << 24) | 0x24C48A, 9, 0x00241F13, 0x0700FFFF, 0x59593434},
+
+ /* TXVGA=0x13 */
+ {(0x05 << 24) | 0x24C4CA, 10, 0x00292315, 0x0800FEFF, 0x52523030},
+ {(0x05 << 24) | 0x24C4CA, 11, 0x00262114, 0x0700FEFF, 0x56563232},
+
+ /* TXVGA=0x14 */
+ {(0x05 << 24) | 0x24C50A, 12, 0x00292315, 0x0800FEFF, 0x54543131},
+ {(0x05 << 24) | 0x24C50A, 13, 0x00262114, 0x0700FEFF, 0x58583434},
+
+ /* TXVGA=0x15 */
+ {(0x05 << 24) | 0x24C54A, 14, 0x00292315, 0x0800FEFF, 0x54543131},
+ {(0x05 << 24) | 0x24C54A, 15, 0x00262114, 0x0700FEFF, 0x59593434},
+
+ /* TXVGA=0x16 */
+ {(0x05 << 24) | 0x24C58A, 16, 0x00292315, 0x0800FEFF, 0x55553131},
+ {(0x05 << 24) | 0x24C58A, 17, 0x00292315, 0x0800FEFF, 0x5B5B3535},
+
+ /* TXVGA=0x17 */
+ {(0x05 << 24) | 0x24C5CA, 18, 0x00262114, 0x0700FEFF, 0x51512F2F},
+ {(0x05 << 24) | 0x24C5CA, 19, 0x00241F13, 0x0700FFFF, 0x55553131},
+
+ /* TXVGA=0x18 */
+ {(0x05 << 24) | 0x24C60A, 20, 0x00292315, 0x0800FEFF, 0x4F4F2E2E},
+ {(0x05 << 24) | 0x24C60A, 21, 0x00262114, 0x0700FEFF, 0x53533030},
+
+ /* TXVGA=0x19 */
+ {(0x05 << 24) | 0x24C64A, 22, 0x00292315, 0x0800FEFF, 0x4E4E2D2D},
+ {(0x05 << 24) | 0x24C64A, 23, 0x00262114, 0x0700FEFF, 0x53533030},
+
+ /* TXVGA=0x1A */
+ {(0x05 << 24) | 0x24C68A, 24, 0x00292315, 0x0800FEFF, 0x50502E2E},
+ {(0x05 << 24) | 0x24C68A, 25, 0x00262114, 0x0700FEFF, 0x55553131},
+
+ /* TXVGA=0x1B */
+ {(0x05 << 24) | 0x24C6CA, 26, 0x00262114, 0x0700FEFF, 0x53533030},
+ {(0x05 << 24) | 0x24C6CA, 27, 0x00292315, 0x0800FEFF, 0x5A5A3434},
+
+ /* TXVGA=0x1C */
+ {(0x05 << 24) | 0x24C70A, 28, 0x00292315, 0x0800FEFF, 0x55553131},
+ {(0x05 << 24) | 0x24C70A, 29, 0x00292315, 0x0800FEFF, 0x5D5D3636},
+
+ /* TXVGA=0x1D */
+ {(0x05 << 24) | 0x24C74A, 30, 0x00292315, 0x0800FEFF, 0x5F5F3737},
+ {(0x05 << 24) | 0x24C74A, 31, 0x00262114, 0x0700FEFF, 0x65653B3B},
+
+ /* TXVGA=0x1E */
+ {(0x05 << 24) | 0x24C78A, 32, 0x00292315, 0x0800FEFF, 0x66663B3B},
+ {(0x05 << 24) | 0x24C78A, 33, 0x00262114, 0x0700FEFF, 0x70704141},
+
+ /* TXVGA=0x1F */
+ {(0x05 << 24) | 0x24C7CA, 34, 0x00292315, 0x0800FEFF, 0x72724242}
};
-///////////////////////////////////////////////////////////////////////////////////////////////////
-///////////////////////////////////////////////////////////////////////////////////////////////////
-///////////////////////////////////////////////////////////////////////////////////////////////////
-
-
-
-//=============================================================================================================
-// Uxx_ReadEthernetAddress --
-//
-// Routine Description:
-// Reads in the Ethernet address from the IC.
-//
-// Arguments:
-// pHwData - The pHwData structure
-//
-// Return Value:
-//
-// The address is stored in EthernetIDAddr.
-//=============================================================================================================
-void
-Uxx_ReadEthernetAddress( struct hw_data * pHwData )
+/* ================================================================================================== */
+
+
+
+/*
+ * =============================================================================================================
+ * Uxx_ReadEthernetAddress --
+ *
+ * Routine Description:
+ * Reads in the Ethernet address from the IC.
+ *
+ * Arguments:
+ * pHwData - The pHwData structure
+ *
+ * Return Value:
+ *
+ * The address is stored in EthernetIDAddr.
+ * =============================================================================================================
+ */
+void Uxx_ReadEthernetAddress(struct hw_data *pHwData)
{
u32 ltmp;
- // Reading Ethernet address from EEPROM and set into hardware due to MAC address maybe change.
- // Only unplug and plug again can make hardware read EEPROM again. 20060727
- Wb35Reg_WriteSync( pHwData, 0x03b4, 0x08000000 ); // Start EEPROM access + Read + address(0x0d)
- Wb35Reg_ReadSync( pHwData, 0x03b4, &ltmp );
- *(u16 *)pHwData->PermanentMacAddress = cpu_to_le16((u16)ltmp); //20060926 anson's endian
- Wb35Reg_WriteSync( pHwData, 0x03b4, 0x08010000 ); // Start EEPROM access + Read + address(0x0d)
- Wb35Reg_ReadSync( pHwData, 0x03b4, &ltmp );
- *(u16 *)(pHwData->PermanentMacAddress + 2) = cpu_to_le16((u16)ltmp); //20060926 anson's endian
- Wb35Reg_WriteSync( pHwData, 0x03b4, 0x08020000 ); // Start EEPROM access + Read + address(0x0d)
- Wb35Reg_ReadSync( pHwData, 0x03b4, &ltmp );
- *(u16 *)(pHwData->PermanentMacAddress + 4) = cpu_to_le16((u16)ltmp); //20060926 anson's endian
+ /*
+ * Reading Ethernet address from EEPROM and set into hardware due to MAC address maybe change.
+ * Only unplug and plug again can make hardware read EEPROM again.
+ */
+ Wb35Reg_WriteSync(pHwData, 0x03b4, 0x08000000); /* Start EEPROM access + Read + address(0x0d) */
+ Wb35Reg_ReadSync(pHwData, 0x03b4, &ltmp);
+ *(u16 *)pHwData->PermanentMacAddress = cpu_to_le16((u16) ltmp);
+ Wb35Reg_WriteSync(pHwData, 0x03b4, 0x08010000); /* Start EEPROM access + Read + address(0x0d) */
+ Wb35Reg_ReadSync(pHwData, 0x03b4, &ltmp);
+ *(u16 *)(pHwData->PermanentMacAddress + 2) = cpu_to_le16((u16) ltmp);
+ Wb35Reg_WriteSync(pHwData, 0x03b4, 0x08020000); /* Start EEPROM access + Read + address(0x0d) */
+ Wb35Reg_ReadSync(pHwData, 0x03b4, &ltmp);
+ *(u16 *)(pHwData->PermanentMacAddress + 4) = cpu_to_le16((u16) ltmp);
*(u16 *)(pHwData->PermanentMacAddress + 6) = 0;
- Wb35Reg_WriteSync( pHwData, 0x03e8, cpu_to_le32(*(u32 *)pHwData->PermanentMacAddress) ); //20060926 anson's endian
- Wb35Reg_WriteSync( pHwData, 0x03ec, cpu_to_le32(*(u32 *)(pHwData->PermanentMacAddress+4)) ); //20060926 anson's endian
+ Wb35Reg_WriteSync(pHwData, 0x03e8, cpu_to_le32(*(u32 *)pHwData->PermanentMacAddress));
+ Wb35Reg_WriteSync(pHwData, 0x03ec, cpu_to_le32(*(u32 *)(pHwData->PermanentMacAddress + 4)));
}
-//===============================================================================================================
-// CardGetMulticastBit --
-// Description:
-// For a given multicast address, returns the byte and bit in the card multicast registers that it hashes to.
-// Calls CardComputeCrc() to determine the CRC value.
-// Arguments:
-// Address - the address
-// Byte - the byte that it hashes to
-// Value - will have a 1 in the relevant bit
-// Return Value:
-// None.
-//==============================================================================================================
-void CardGetMulticastBit( u8 Address[ETH_ALEN], u8 *Byte, u8 *Value )
+/*
+ * ===============================================================================================================
+ * CardGetMulticastBit --
+ * Description:
+ * For a given multicast address, returns the byte and bit in the card multicast registers that it hashes to.
+ * Calls CardComputeCrc() to determine the CRC value.
+ * Arguments:
+ * Address - the address
+ * Byte - the byte that it hashes to
+ * Value - will have a 1 in the relevant bit
+ * Return Value:
+ * None.
+ * ==============================================================================================================
+ */
+void CardGetMulticastBit(u8 Address[ETH_ALEN], u8 *Byte, u8 *Value)
{
- u32 Crc;
- u32 BitNumber;
+ u32 Crc;
+ u32 BitNumber;
- // First compute the CRC.
- Crc = CardComputeCrc(Address, ETH_ALEN);
+ /* First compute the CRC. */
+ Crc = CardComputeCrc(Address, ETH_ALEN);
- // The computed CRC is bit0~31 from left to right
- //At first we should do right shift 25bits, and read 7bits by using '&', 2^7=128
+ /* The computed CRC is bit0~31 from left to right */
+ /* At first we should do right shift 25bits, and read 7bits by using '&', 2^7=128 */
BitNumber = (u32) ((Crc >> 26) & 0x3f);
- *Byte = (u8) (BitNumber >> 3);// 900514 original (BitNumber / 8)
- *Value = (u8) ((u8)1 << (BitNumber % 8));
+ *Byte = (u8) (BitNumber >> 3); /* 900514 original (BitNumber / 8) */
+ *Value = (u8) ((u8) 1 << (BitNumber % 8));
}
-void Uxx_power_on_procedure( struct hw_data * pHwData )
+void Uxx_power_on_procedure(struct hw_data *pHwData)
{
u32 ltmp, loop;
- if( pHwData->phy_type <= RF_MAXIM_V1 )
- Wb35Reg_WriteSync( pHwData, 0x03d4, 0xffffff38 );
- else
- {
- Wb35Reg_WriteSync( pHwData, 0x03f4, 0xFF5807FF );// 20060721 For NEW IC 0xFF5807FF
-
- // 20060511.1 Fix the following 4 steps for Rx of RF 2230 initial fail
- Wb35Reg_WriteSync( pHwData, 0x03d4, 0x80 );// regulator on only
- msleep(10); // Modify 20051221.1.b
- Wb35Reg_WriteSync( pHwData, 0x03d4, 0xb8 );// REG_ON RF_RSTN on, and
- msleep(10); // Modify 20051221.1.b
-
+ if (pHwData->phy_type <= RF_MAXIM_V1)
+ Wb35Reg_WriteSync(pHwData, 0x03d4, 0xffffff38);
+ else {
+ Wb35Reg_WriteSync(pHwData, 0x03f4, 0xFF5807FF);
+ Wb35Reg_WriteSync(pHwData, 0x03d4, 0x80); /* regulator on only */
+ msleep(10);
+ Wb35Reg_WriteSync(pHwData, 0x03d4, 0xb8); /* REG_ON RF_RSTN on, and */
+ msleep(10);
ltmp = 0x4968;
- if( (pHwData->phy_type == RF_WB_242) ||
- (RF_WB_242_1 == pHwData->phy_type) ) // 20060619.5 Add
+ if ((pHwData->phy_type == RF_WB_242) ||
+ (RF_WB_242_1 == pHwData->phy_type))
ltmp = 0x4468;
- Wb35Reg_WriteSync( pHwData, 0x03d0, ltmp );
- Wb35Reg_WriteSync( pHwData, 0x03d4, 0xa0 );// PLL_PD REF_PD set to 0
+ Wb35Reg_WriteSync(pHwData, 0x03d0, ltmp);
+ Wb35Reg_WriteSync(pHwData, 0x03d4, 0xa0); /* PLL_PD REF_PD set to 0 */
- msleep(20); // Modify 20051221.1.b
- Wb35Reg_ReadSync( pHwData, 0x03d0, &ltmp );
- loop = 500; // Wait for 5 second 20061101
- while( !(ltmp & 0x20) && loop-- )
- {
- msleep(10); // Modify 20051221.1.b
- if( !Wb35Reg_ReadSync( pHwData, 0x03d0, &ltmp ) )
+ msleep(20);
+ Wb35Reg_ReadSync(pHwData, 0x03d0, &ltmp);
+ loop = 500; /* Wait for 5 second */
+ while (!(ltmp & 0x20) && loop--) {
+ msleep(10);
+ if (!Wb35Reg_ReadSync(pHwData, 0x03d0, &ltmp))
break;
}
- Wb35Reg_WriteSync( pHwData, 0x03d4, 0xe0 );// MLK_EN
+ Wb35Reg_WriteSync(pHwData, 0x03d4, 0xe0); /* MLK_EN */
}
- Wb35Reg_WriteSync( pHwData, 0x03b0, 1 );// Reset hardware first
- msleep(10); // Add this 20051221.1.b
+ Wb35Reg_WriteSync(pHwData, 0x03b0, 1); /* Reset hardware first */
+ msleep(10);
- // Set burst write delay
- Wb35Reg_WriteSync( pHwData, 0x03f8, 0x7ff );
+ /* Set burst write delay */
+ Wb35Reg_WriteSync(pHwData, 0x03f8, 0x7ff);
}
-void Set_ChanIndep_RfData_al7230_24( struct hw_data * pHwData, u32 *pltmp ,char number)
+void Set_ChanIndep_RfData_al7230_24(struct hw_data *pHwData, u32 *pltmp , char number)
{
u8 i;
- for( i=0; i<number; i++ )
- {
+ for (i = 0; i < number; i++) {
pHwData->phy_para[i] = al7230_rf_data_24[i];
- pltmp[i] = (1 << 31) | (0 << 30) | (24 << 24) | (al7230_rf_data_24[i]&0xffffff);
+ pltmp[i] = (1 << 31) | (0 << 30) | (24 << 24) | (al7230_rf_data_24[i] & 0xffffff);
}
}
-void Set_ChanIndep_RfData_al7230_50( struct hw_data * pHwData, u32 *pltmp, char number)
+void Set_ChanIndep_RfData_al7230_50(struct hw_data *pHwData, u32 *pltmp, char number)
{
u8 i;
- for( i=0; i<number; i++ )
- {
+ for (i = 0; i < number; i++) {
pHwData->phy_para[i] = al7230_rf_data_50[i];
- pltmp[i] = (1 << 31) | (0 << 30) | (24 << 24) | (al7230_rf_data_50[i]&0xffffff);
+ pltmp[i] = (1 << 31) | (0 << 30) | (24 << 24) | (al7230_rf_data_50[i] & 0xffffff);
}
}
-//=============================================================================================================
-// RFSynthesizer_initial --
-//=============================================================================================================
-void
-RFSynthesizer_initial(struct hw_data * pHwData)
+/*
+ * =============================================================================================================
+ * RFSynthesizer_initial --
+ * =============================================================================================================
+ */
+void RFSynthesizer_initial(struct hw_data *pHwData)
{
u32 altmp[32];
- u32 * pltmp = altmp;
+ u32 *pltmp = altmp;
u32 ltmp;
- u8 number=0x00; // The number of register vale
+ u8 number = 0x00; /* The number of register vale */
u8 i;
- //
- // bit[31] SPI Enable.
- // 1=perform synthesizer program operation. This bit will
- // cleared automatically after the operation is completed.
- // bit[30] SPI R/W Control
- // 0=write, 1=read
- // bit[29:24] SPI Data Format Length
- // bit[17:4 ] RF Data bits.
- // bit[3 :0 ] RF address.
- switch( pHwData->phy_type )
- {
+ /*
+ * bit[31] SPI Enable.
+ * 1=perform synthesizer program operation. This bit will
+ * cleared automatically after the operation is completed.
+ * bit[30] SPI R/W Control
+ * 0=write, 1=read
+ * bit[29:24] SPI Data Format Length
+ * bit[17:4 ] RF Data bits.
+ * bit[3 :0 ] RF address.
+ */
+ switch (pHwData->phy_type) {
case RF_MAXIM_2825:
- case RF_MAXIM_V1: // 11g Winbond 2nd BB(with Phy board (v1) + Maxim 331)
- number = sizeof(max2825_rf_data)/sizeof(max2825_rf_data[0]);
- for( i=0; i<number; i++ )
- {
- pHwData->phy_para[i] = max2825_rf_data[i];// Backup Rf parameter
- pltmp[i] = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse( max2825_rf_data[i], 18);
+ case RF_MAXIM_V1: /* 11g Winbond 2nd BB(with Phy board (v1) + Maxim 331) */
+ number = sizeof(max2825_rf_data) / sizeof(max2825_rf_data[0]);
+ for (i = 0; i < number; i++) {
+ pHwData->phy_para[i] = max2825_rf_data[i]; /* Backup Rf parameter */
+ pltmp[i] = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(max2825_rf_data[i], 18);
}
break;
-
case RF_MAXIM_2827:
- number = sizeof(max2827_rf_data)/sizeof(max2827_rf_data[0]);
- for( i=0; i<number; i++ )
- {
+ number = sizeof(max2827_rf_data) / sizeof(max2827_rf_data[0]);
+ for (i = 0; i < number; i++) {
pHwData->phy_para[i] = max2827_rf_data[i];
- pltmp[i] = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse( max2827_rf_data[i], 18);
+ pltmp[i] = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(max2827_rf_data[i], 18);
}
break;
-
case RF_MAXIM_2828:
- number = sizeof(max2828_rf_data)/sizeof(max2828_rf_data[0]);
- for( i=0; i<number; i++ )
- {
+ number = sizeof(max2828_rf_data) / sizeof(max2828_rf_data[0]);
+ for (i = 0; i < number; i++) {
pHwData->phy_para[i] = max2828_rf_data[i];
- pltmp[i] = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse( max2828_rf_data[i], 18);
+ pltmp[i] = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(max2828_rf_data[i], 18);
}
break;
-
case RF_MAXIM_2829:
- number = sizeof(max2829_rf_data)/sizeof(max2829_rf_data[0]);
- for( i=0; i<number; i++ )
- {
+ number = sizeof(max2829_rf_data) / sizeof(max2829_rf_data[0]);
+ for (i = 0; i < number; i++) {
pHwData->phy_para[i] = max2829_rf_data[i];
- pltmp[i] = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse( max2829_rf_data[i], 18);
+ pltmp[i] = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(max2829_rf_data[i], 18);
}
break;
-
case RF_AIROHA_2230:
- number = sizeof(al2230_rf_data)/sizeof(al2230_rf_data[0]);
- for( i=0; i<number; i++ )
- {
+ number = sizeof(al2230_rf_data) / sizeof(al2230_rf_data[0]);
+ for (i = 0; i < number; i++) {
pHwData->phy_para[i] = al2230_rf_data[i];
- pltmp[i] = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse( al2230_rf_data[i], 20);
+ pltmp[i] = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse(al2230_rf_data[i], 20);
}
break;
-
case RF_AIROHA_2230S:
- number = sizeof(al2230s_rf_data)/sizeof(al2230s_rf_data[0]);
- for( i=0; i<number; i++ )
- {
+ number = sizeof(al2230s_rf_data) / sizeof(al2230s_rf_data[0]);
+ for (i = 0; i < number; i++) {
pHwData->phy_para[i] = al2230s_rf_data[i];
- pltmp[i] = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse( al2230s_rf_data[i], 20);
+ pltmp[i] = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse(al2230s_rf_data[i], 20);
}
break;
-
case RF_AIROHA_7230:
-
- //Start to fill RF parameters, PLL_ON should be pulled low.
- Wb35Reg_WriteSync( pHwData, 0x03dc, 0x00000000 );
-#ifdef _PE_STATE_DUMP_
+ /* Start to fill RF parameters, PLL_ON should be pulled low. */
+ Wb35Reg_WriteSync(pHwData, 0x03dc, 0x00000000);
+ #ifdef _PE_STATE_DUMP_
printk("* PLL_ON low\n");
-#endif
-
- number = sizeof(al7230_rf_data_24)/sizeof(al7230_rf_data_24[0]);
+ #endif
+ number = sizeof(al7230_rf_data_24) / sizeof(al7230_rf_data_24[0]);
Set_ChanIndep_RfData_al7230_24(pHwData, pltmp, number);
break;
-
case RF_WB_242:
- case RF_WB_242_1: // 20060619.5 Add
- number = sizeof(w89rf242_rf_data)/sizeof(w89rf242_rf_data[0]);
- for( i=0; i<number; i++ )
- {
+ case RF_WB_242_1:
+ number = sizeof(w89rf242_rf_data) / sizeof(w89rf242_rf_data[0]);
+ for (i = 0; i < number; i++) {
ltmp = w89rf242_rf_data[i];
- if( i == 4 ) // Update the VCO trim from EEPROM
- {
- ltmp &= ~0xff0; // Mask bit4 ~bit11
- ltmp |= pHwData->VCO_trim<<4;
+ if (i == 4) { /* Update the VCO trim from EEPROM */
+ ltmp &= ~0xff0; /* Mask bit4 ~bit11 */
+ ltmp |= pHwData->VCO_trim << 4;
}
pHwData->phy_para[i] = ltmp;
- pltmp[i] = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( ltmp, 24);
+ pltmp[i] = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse(ltmp, 24);
}
break;
}
pHwData->phy_number = number;
- // The 16 is the maximum capability of hardware. Here use 12
- if( number > 12 ) {
- //Wb35Reg_BurstWrite( pHwData, 0x0864, pltmp, 12, NO_INCREMENT );
- for( i=0; i<12; i++ ) // For Al2230
- Wb35Reg_WriteSync( pHwData, 0x0864, pltmp[i] );
+ /* The 16 is the maximum capability of hardware. Here use 12 */
+ if (number > 12) {
+ for (i = 0; i < 12; i++) /* For Al2230 */
+ Wb35Reg_WriteSync(pHwData, 0x0864, pltmp[i]);
pltmp += 12;
number -= 12;
}
- // Write to register. number must less and equal than 16
- for( i=0; i<number; i++ )
- Wb35Reg_WriteSync( pHwData, 0x864, pltmp[i] );
+ /* Write to register. number must less and equal than 16 */
+ for (i = 0; i < number; i++)
+ Wb35Reg_WriteSync(pHwData, 0x864, pltmp[i]);
- // 20060630.1 Calibration only 1 time
- if( pHwData->CalOneTime )
+ /* Calibration only 1 time */
+ if (pHwData->CalOneTime)
return;
pHwData->CalOneTime = 1;
- switch( pHwData->phy_type )
- {
- case RF_AIROHA_2230:
-
- // 20060511.1 --- Modifying the follow step for Rx issue-----------------
- ltmp = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse( (0x07<<20)|0xE168E, 20);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- msleep(10);
- ltmp = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse( al2230_rf_data[7], 20);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- msleep(10);
-
- case RF_AIROHA_2230S: // 20060420 Add this
-
- // 20060511.1 --- Modifying the follow step for Rx issue-----------------
- Wb35Reg_WriteSync( pHwData, 0x03d4, 0x80 );// regulator on only
- msleep(10); // Modify 20051221.1.b
-
- Wb35Reg_WriteSync( pHwData, 0x03d4, 0xa0 );// PLL_PD REF_PD set to 0
- msleep(10); // Modify 20051221.1.b
-
- Wb35Reg_WriteSync( pHwData, 0x03d4, 0xe0 );// MLK_EN
- Wb35Reg_WriteSync( pHwData, 0x03b0, 1 );// Reset hardware first
- msleep(10); // Add this 20051221.1.b
- //------------------------------------------------------------------------
-
- // The follow code doesn't use the burst-write mode
- //phy_set_rf_data(phw_data, 0x0F, (0x0F<<20) | 0xF01A0); //Raise Initial Setting
- ltmp = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse( (0x0F<<20) | 0xF01A0, 20);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
-
- ltmp = pHwData->reg.BB5C & 0xfffff000;
- Wb35Reg_WriteSync( pHwData, 0x105c, ltmp );
- pHwData->reg.BB50 |= 0x13;//(MASK_IQCAL_MODE|MASK_CALIB_START);//20060315.1 modify
- Wb35Reg_WriteSync(pHwData, 0x1050, pHwData->reg.BB50);
- msleep(5);
-
- //phy_set_rf_data(phw_data, 0x0F, (0x0F<<20) | 0xF01B0); //Activate Filter Cal.
- ltmp = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse( (0x0F<<20) | 0xF01B0, 20);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- msleep(5);
-
- //phy_set_rf_data(phw_data, 0x0F, (0x0F<<20) | 0xF01e0); //Activate TX DCC
- ltmp = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse( (0x0F<<20) | 0xF01E0, 20);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- msleep(5);
-
- //phy_set_rf_data(phw_data, 0x0F, (0x0F<<20) | 0xF01A0); //Resotre Initial Setting
- ltmp = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse( (0x0F<<20) | 0xF01A0, 20);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
-
-// //Force TXI(Q)P(N) to normal control
- Wb35Reg_WriteSync( pHwData, 0x105c, pHwData->reg.BB5C );
- pHwData->reg.BB50 &= ~0x13;//(MASK_IQCAL_MODE|MASK_CALIB_START);
- Wb35Reg_WriteSync( pHwData, 0x1050, pHwData->reg.BB50);
- break;
-
- case RF_AIROHA_7230:
-
- //RF parameters have filled completely, PLL_ON should be
- //pulled high
- Wb35Reg_WriteSync( pHwData, 0x03dc, 0x00000080 );
- #ifdef _PE_STATE_DUMP_
- printk("* PLL_ON high\n");
- #endif
-
- //2.4GHz
- //ltmp = (1 << 31) | (0 << 30) | (24 << 24) | 0x1ABA8F;
- //Wb35Reg_WriteSync pHwData, 0x0864, ltmp );
- //msleep(1); // Sleep 1 ms
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | 0x9ABA8F;
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- msleep(5);
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | 0x3ABA8F;
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- msleep(5);
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | 0x1ABA8F;
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- msleep(5);
-
- //5GHz
- Wb35Reg_WriteSync( pHwData, 0x03dc, 0x00000000 );
- #ifdef _PE_STATE_DUMP_
- printk("* PLL_ON low\n");
- #endif
-
- number = sizeof(al7230_rf_data_50)/sizeof(al7230_rf_data_50[0]);
- Set_ChanIndep_RfData_al7230_50(pHwData, pltmp, number);
- // Write to register. number must less and equal than 16
- for( i=0; i<number; i++ )
- Wb35Reg_WriteSync( pHwData, 0x0864, pltmp[i] );
- msleep(5);
-
- Wb35Reg_WriteSync( pHwData, 0x03dc, 0x00000080 );
- #ifdef _PE_STATE_DUMP_
- printk("* PLL_ON high\n");
- #endif
-
- //ltmp = (1 << 31) | (0 << 30) | (24 << 24) | 0x12BACF;
- //Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | 0x9ABA8F;
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- msleep(5);
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | 0x3ABA8F;
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- msleep(5);
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | 0x12BACF;
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- msleep(5);
-
- //Wb35Reg_WriteSync( pHwData, 0x03dc, 0x00000080 );
- //printk("* PLL_ON high\n");
- break;
-
- case RF_WB_242:
- case RF_WB_242_1: // 20060619.5 Add
-
- //
- // ; Version 1.3B revision items: for FA5976A , October 3, 2005 by HTHo
- //
- ltmp = pHwData->reg.BB5C & 0xfffff000;
- Wb35Reg_WriteSync( pHwData, 0x105c, ltmp );
- Wb35Reg_WriteSync( pHwData, 0x1058, 0 );
- pHwData->reg.BB50 |= 0x3;//(MASK_IQCAL_MODE|MASK_CALIB_START);//20060630
- Wb35Reg_WriteSync(pHwData, 0x1050, pHwData->reg.BB50);
-
- //----- Calibration (1). VCO frequency calibration
- //Calibration (1a.0). Synthesizer reset (HTHo corrected 2005/05/10)
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x0F<<24) | 0x00101E, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- msleep(5); // Sleep 5ms
- //Calibration (1a). VCO frequency calibration mode ; waiting 2msec VCO calibration time
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x00<<24) | 0xFE69c0, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- msleep(2); // Sleep 2ms
-
- //----- Calibration (2). TX baseband Gm-C filter auto-tuning
- //Calibration (2a). turn off ENCAL signal
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x00<<24) | 0xF8EBC0, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- //Calibration (2b.0). TX filter auto-tuning BW: TFLBW=101 (TC5376A default)
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x07<<24) | 0x0C68CE, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- //Calibration (2b). send TX reset signal (HTHo corrected May 10, 2005)
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x0F<<24) | 0x00201E, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- //Calibration (2c). turn-on TX Gm-C filter auto-tuning
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x00<<24) | 0xFCEBC0, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- udelay(150); // Sleep 150 us
- //turn off ENCAL signal
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x00<<24) | 0xF8EBC0, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
-
- //----- Calibration (3). RX baseband Gm-C filter auto-tuning
- //Calibration (3a). turn off ENCAL signal
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x00<<24) | 0xFAEDC0, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- //Calibration (3b.0). RX filter auto-tuning BW: RFLBW=100 (TC5376A+corner default; July 26, 2005)
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x07<<24) | 0x0C68CE, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- //Calibration (3b). send RX reset signal (HTHo corrected May 10, 2005)
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x0F<<24) | 0x00401E, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- //Calibration (3c). turn-on RX Gm-C filter auto-tuning
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x00<<24) | 0xFEEDC0, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- udelay(150); // Sleep 150 us
- //Calibration (3e). turn off ENCAL signal
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x00<<24) | 0xFAEDC0, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
-
- //----- Calibration (4). TX LO leakage calibration
- //Calibration (4a). TX LO leakage calibration
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x00<<24) | 0xFD6BC0, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- udelay(150); // Sleep 150 us
-
- //----- Calibration (5). RX DC offset calibration
- //Calibration (5a). turn off ENCAL signal and set to RX SW DC caliration mode
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x00<<24) | 0xFAEDC0, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- //Calibration (5b). turn off AGC servo-loop & RSSI
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x01<<24) | 0xEBFFC2, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
-
- //; for LNA=11 --------
- //Calibration (5c-h). RX DC offset current bias ON; & LNA=11; RXVGA=111111
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x06<<24) | 0x343FCC, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- //Calibration (5d). turn on RX DC offset cal function; and waiting 2 msec cal time
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x00<<24) | 0xFF6DC0, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- msleep(2); // Sleep 2ms
- //Calibration (5f). turn off ENCAL signal
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x00<<24) | 0xFAEDC0, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
-
- //; for LNA=10 --------
- //Calibration (5c-m). RX DC offset current bias ON; & LNA=10; RXVGA=111111
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x06<<24) | 0x342FCC, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- //Calibration (5d). turn on RX DC offset cal function; and waiting 2 msec cal time
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x00<<24) | 0xFF6DC0, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- msleep(2); // Sleep 2ms
- //Calibration (5f). turn off ENCAL signal
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x00<<24) | 0xFAEDC0, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
-
- //; for LNA=01 --------
- //Calibration (5c-m). RX DC offset current bias ON; & LNA=01; RXVGA=111111
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x06<<24) | 0x341FCC, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- //Calibration (5d). turn on RX DC offset cal function; and waiting 2 msec cal time
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x00<<24) | 0xFF6DC0, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- msleep(2); // Sleep 2ms
- //Calibration (5f). turn off ENCAL signal
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x00<<24) | 0xFAEDC0, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
-
- //; for LNA=00 --------
- //Calibration (5c-l). RX DC offset current bias ON; & LNA=00; RXVGA=111111
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x06<<24) | 0x340FCC, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- //Calibration (5d). turn on RX DC offset cal function; and waiting 2 msec cal time
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x00<<24) | 0xFF6DC0, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- msleep(2); // Sleep 2ms
- //Calibration (5f). turn off ENCAL signal
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x00<<24) | 0xFAEDC0, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- //Calibration (5g). turn on AGC servo-loop
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x01<<24) | 0xEFFFC2, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
-
- //; ----- Calibration (7). Switch RF chip to normal mode
- //0x00 0xF86100 ; 3E184 ; Switch RF chip to normal mode
-// msleep(10); // @@ 20060721
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( (0x00<<24) | 0xF86100, 24);
- Wb35Reg_WriteSync( pHwData, 0x0864, ltmp );
- msleep(5); // Sleep 5 ms
-
-// //write back
-// Wb35Reg_WriteSync(pHwData, 0x105c, pHwData->reg.BB5C);
-// pHwData->reg.BB50 &= ~0x13;//(MASK_IQCAL_MODE|MASK_CALIB_START); // 20060315.1 fix
-// Wb35Reg_WriteSync(pHwData, 0x1050, pHwData->reg.BB50);
-// msleep(1); // Sleep 1 ms
- break;
+ switch (pHwData->phy_type) {
+ case RF_AIROHA_2230:
+ ltmp = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse((0x07 << 20) | 0xE168E, 20);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ msleep(10);
+ ltmp = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse(al2230_rf_data[7], 20);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ msleep(10);
+ case RF_AIROHA_2230S:
+ Wb35Reg_WriteSync(pHwData, 0x03d4, 0x80); /* regulator on only */
+ msleep(10);
+ Wb35Reg_WriteSync(pHwData, 0x03d4, 0xa0); /* PLL_PD REF_PD set to 0 */
+ msleep(10);
+ Wb35Reg_WriteSync(pHwData, 0x03d4, 0xe0); /* MLK_EN */
+ Wb35Reg_WriteSync(pHwData, 0x03b0, 1); /* Reset hardware first */
+ msleep(10);
+ /* ========================================================= */
+
+ /* The follow code doesn't use the burst-write mode */
+ ltmp = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse((0x0F<<20) | 0xF01A0, 20);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+
+ ltmp = pHwData->reg.BB5C & 0xfffff000;
+ Wb35Reg_WriteSync(pHwData, 0x105c, ltmp);
+ pHwData->reg.BB50 |= 0x13; /* (MASK_IQCAL_MODE|MASK_CALIB_START) */
+ Wb35Reg_WriteSync(pHwData, 0x1050, pHwData->reg.BB50);
+ msleep(5);
+
+ ltmp = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse((0x0F << 20) | 0xF01B0, 20);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ msleep(5);
+
+ ltmp = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse((0x0F << 20) | 0xF01E0, 20);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ msleep(5);
+
+ ltmp = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse((0x0F << 20) | 0xF01A0, 20);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp) ;
+
+ Wb35Reg_WriteSync(pHwData, 0x105c, pHwData->reg.BB5C);
+ pHwData->reg.BB50 &= ~0x13; /* (MASK_IQCAL_MODE|MASK_CALIB_START); */
+ Wb35Reg_WriteSync(pHwData, 0x1050, pHwData->reg.BB50);
+ break;
+ case RF_AIROHA_7230:
+ /* RF parameters have filled completely, PLL_ON should be pulled high */
+ Wb35Reg_WriteSync(pHwData, 0x03dc, 0x00000080);
+ #ifdef _PE_STATE_DUMP_
+ printk("* PLL_ON high\n");
+ #endif
+
+ /* 2.4GHz */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | 0x9ABA8F;
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ msleep(5);
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | 0x3ABA8F;
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ msleep(5);
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | 0x1ABA8F;
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ msleep(5);
+
+ /* 5GHz */
+ Wb35Reg_WriteSync(pHwData, 0x03dc, 0x00000000);
+ #ifdef _PE_STATE_DUMP_
+ printk("* PLL_ON low\n");
+ #endif
+
+ number = sizeof(al7230_rf_data_50) / sizeof(al7230_rf_data_50[0]);
+ Set_ChanIndep_RfData_al7230_50(pHwData, pltmp, number);
+ /* Write to register. number must less and equal than 16 */
+ for (i = 0; i < number; i++)
+ Wb35Reg_WriteSync(pHwData, 0x0864, pltmp[i]);
+ msleep(5);
+
+ Wb35Reg_WriteSync(pHwData, 0x03dc, 0x00000080);
+ #ifdef _PE_STATE_DUMP_
+ printk("* PLL_ON high\n");
+ #endif
+
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | 0x9ABA8F;
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ msleep(5);
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | 0x3ABA8F;
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ msleep(5);
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | 0x12BACF;
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ msleep(5);
+ break;
+ case RF_WB_242:
+ case RF_WB_242_1:
+ /* for FA5976A */
+ ltmp = pHwData->reg.BB5C & 0xfffff000;
+ Wb35Reg_WriteSync(pHwData, 0x105c, ltmp);
+ Wb35Reg_WriteSync(pHwData, 0x1058, 0);
+ pHwData->reg.BB50 |= 0x3; /* (MASK_IQCAL_MODE|MASK_CALIB_START); */
+ Wb35Reg_WriteSync(pHwData, 0x1050, pHwData->reg.BB50);
+
+ /* ----- Calibration (1). VCO frequency calibration */
+ /* Calibration (1a.0). Synthesizer reset */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x0F<<24) | 0x00101E, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ msleep(5);
+ /* Calibration (1a). VCO frequency calibration mode ; waiting 2msec VCO calibration time */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x00<<24) | 0xFE69c0, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ msleep(2);
+
+ /* ----- Calibration (2). TX baseband Gm-C filter auto-tuning */
+ /* Calibration (2a). turn off ENCAL signal */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x00<<24) | 0xF8EBC0, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ /* Calibration (2b.0). TX filter auto-tuning BW: TFLBW=101 (TC5376A default) */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x07<<24) | 0x0C68CE, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ /* Calibration (2b). send TX reset signal */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x0F<<24) | 0x00201E, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ /* Calibration (2c). turn-on TX Gm-C filter auto-tuning */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x00<<24) | 0xFCEBC0, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ udelay(150); /* Sleep 150 us */
+ /* turn off ENCAL signal */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x00<<24) | 0xF8EBC0, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+
+ /* ----- Calibration (3). RX baseband Gm-C filter auto-tuning */
+ /* Calibration (3a). turn off ENCAL signal */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x00<<24) | 0xFAEDC0, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ /* Calibration (3b.0). RX filter auto-tuning BW: RFLBW=100 (TC5376A+corner default;) */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x07<<24) | 0x0C68CE, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ /* Calibration (3b). send RX reset signal */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x0F<<24) | 0x00401E, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ /* Calibration (3c). turn-on RX Gm-C filter auto-tuning */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x00<<24) | 0xFEEDC0, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ udelay(150); /* Sleep 150 us */
+ /* Calibration (3e). turn off ENCAL signal */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x00<<24) | 0xFAEDC0, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+
+ /* ----- Calibration (4). TX LO leakage calibration */
+ /* Calibration (4a). TX LO leakage calibration */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x00<<24) | 0xFD6BC0, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ udelay(150); /* Sleep 150 us */
+
+ /* ----- Calibration (5). RX DC offset calibration */
+ /* Calibration (5a). turn off ENCAL signal and set to RX SW DC calibration mode */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x00<<24) | 0xFAEDC0, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ /* Calibration (5b). turn off AGC servo-loop & RSSI */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x01<<24) | 0xEBFFC2, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+
+ /* for LNA=11 -------- */
+ /* Calibration (5c-h). RX DC offset current bias ON; & LNA=11; RXVGA=111111 */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x06<<24) | 0x343FCC, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ /* Calibration (5d). turn on RX DC offset cal function; and waiting 2 msec cal time */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x00<<24) | 0xFF6DC0, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ msleep(2);
+ /* Calibration (5f). turn off ENCAL signal */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x00<<24) | 0xFAEDC0, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+
+ /* for LNA=10 -------- */
+ /* Calibration (5c-m). RX DC offset current bias ON; & LNA=10; RXVGA=111111 */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x06<<24) | 0x342FCC, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ /* Calibration (5d). turn on RX DC offset cal function; and waiting 2 msec cal time */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x00<<24) | 0xFF6DC0, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ msleep(2);
+ /* Calibration (5f). turn off ENCAL signal */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x00<<24) | 0xFAEDC0, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+
+ /* for LNA=01 -------- */
+ /* Calibration (5c-m). RX DC offset current bias ON; & LNA=01; RXVGA=111111 */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x06<<24) | 0x341FCC, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ /* Calibration (5d). turn on RX DC offset cal function; and waiting 2 msec cal time */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x00<<24) | 0xFF6DC0, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ msleep(2);
+ /* Calibration (5f). turn off ENCAL signal */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x00<<24) | 0xFAEDC0, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+
+ /* for LNA=00 -------- */
+ /* Calibration (5c-l). RX DC offset current bias ON; & LNA=00; RXVGA=111111 */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x06<<24) | 0x340FCC, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ /* Calibration (5d). turn on RX DC offset cal function; and waiting 2 msec cal time */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x00<<24) | 0xFF6DC0, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ msleep(2);
+ /* Calibration (5f). turn off ENCAL signal */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x00<<24) | 0xFAEDC0, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ /* Calibration (5g). turn on AGC servo-loop */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x01<<24) | 0xEFFFC2, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+
+ /* ----- Calibration (7). Switch RF chip to normal mode */
+ /* 0x00 0xF86100 ; 3E184 ; Switch RF chip to normal mode */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse((0x00<<24) | 0xF86100, 24);
+ Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
+ msleep(5);
+ break;
}
}
-void BBProcessor_AL7230_2400( struct hw_data * pHwData)
+void BBProcessor_AL7230_2400(struct hw_data *pHwData)
{
struct wb35_reg *reg = &pHwData->reg;
u32 pltmp[12];
- pltmp[0] = 0x16A8337A; // 0x16a5215f; // 0x1000 AGC_Ctrl1
- pltmp[1] = 0x9AFF9AA6; // 0x9aff9ca6; // 0x1004 AGC_Ctrl2
- pltmp[2] = 0x55D00A04; // 0x55d00a04; // 0x1008 AGC_Ctrl3
- pltmp[3] = 0xFFF72031; // 0xFfFf2138; // 0x100c AGC_Ctrl4
+ pltmp[0] = 0x16A8337A; /* 0x1000 AGC_Ctrl1 */
+ pltmp[1] = 0x9AFF9AA6; /* 0x1004 AGC_Ctrl2 */
+ pltmp[2] = 0x55D00A04; /* 0x1008 AGC_Ctrl3 */
+ pltmp[3] = 0xFFF72031; /* 0x100c AGC_Ctrl4 */
reg->BB0C = 0xFFF72031;
- pltmp[4] = 0x0FacDCC5; // 0x1010 AGC_Ctrl5 // 20050927 0x0FacDCB7
- pltmp[5] = 0x00CAA333; // 0x00eaa333; // 0x1014 AGC_Ctrl6
- pltmp[6] = 0xF2211111; // 0x11111111; // 0x1018 AGC_Ctrl7
- pltmp[7] = 0x0FA3F0ED; // 0x101c AGC_Ctrl8
- pltmp[8] = 0x06443440; // 0x1020 AGC_Ctrl9
- pltmp[9] = 0xA8002A79; // 0xa9002A79; // 0x1024 AGC_Ctrl10
- pltmp[10] = 0x40000528; // 20050927 0x40000228
- pltmp[11] = 0x232D7F30; // 0x23457f30;// 0x102c A_ACQ_Ctrl
+ pltmp[4] = 0x0FacDCC5; /* 0x1010 AGC_Ctrl5 */
+ pltmp[5] = 0x00CAA333; /* 0x1014 AGC_Ctrl6 */
+ pltmp[6] = 0xF2211111; /* 0x1018 AGC_Ctrl7 */
+ pltmp[7] = 0x0FA3F0ED; /* 0x101c AGC_Ctrl8 */
+ pltmp[8] = 0x06443440; /* 0x1020 AGC_Ctrl9 */
+ pltmp[9] = 0xA8002A79; /* 0x1024 AGC_Ctrl10 */
+ pltmp[10] = 0x40000528;
+ pltmp[11] = 0x232D7F30; /* 0x102c A_ACQ_Ctrl */
reg->BB2C = 0x232D7F30;
- Wb35Reg_BurstWrite( pHwData, 0x1000, pltmp, 12, AUTO_INCREMENT );
+ Wb35Reg_BurstWrite(pHwData, 0x1000, pltmp, 12, AUTO_INCREMENT);
- pltmp[0] = 0x00002c54; // 0x1030 B_ACQ_Ctrl
+ pltmp[0] = 0x00002c54; /* 0x1030 B_ACQ_Ctrl */
reg->BB30 = 0x00002c54;
- pltmp[1] = 0x00C0D6C5; // 0x1034 A_TXRX_Ctrl
- pltmp[2] = 0x5B2C8769; // 0x1038 B_TXRX_Ctrl
- pltmp[3] = 0x00000000; // 0x103c 11a TX LS filter
+ pltmp[1] = 0x00C0D6C5; /* 0x1034 A_TXRX_Ctrl */
+ pltmp[2] = 0x5B2C8769; /* 0x1038 B_TXRX_Ctrl */
+ pltmp[3] = 0x00000000; /* 0x103c 11a TX LS filter */
reg->BB3C = 0x00000000;
- pltmp[4] = 0x00003F29; // 0x1040 11a TX LS filter
- pltmp[5] = 0x0EFEFBFE; // 0x1044 11a TX LS filter
- pltmp[6] = 0x00332C1B; // 0x00453B24; // 0x1048 11b TX RC filter
- pltmp[7] = 0x0A00FEFF; // 0x0E00FEFF; // 0x104c 11b TX RC filter
- pltmp[8] = 0x2B106208; // 0x1050 MODE_Ctrl
+ pltmp[4] = 0x00003F29; /* 0x1040 11a TX LS filter */
+ pltmp[5] = 0x0EFEFBFE; /* 0x1044 11a TX LS filter */
+ pltmp[6] = 0x00332C1B; /* 0x1048 11b TX RC filter */
+ pltmp[7] = 0x0A00FEFF; /* 0x104c 11b TX RC filter */
+ pltmp[8] = 0x2B106208; /* 0x1050 MODE_Ctrl */
reg->BB50 = 0x2B106208;
- pltmp[9] = 0; // 0x1054
+ pltmp[9] = 0; /* 0x1054 */
reg->BB54 = 0x00000000;
- pltmp[10] = 0x52524242; // 0x64645252; // 0x1058 IQ_Alpha
+ pltmp[10] = 0x52524242; /* 0x1058 IQ_Alpha */
reg->BB58 = 0x52524242;
- pltmp[11] = 0xAA0AC000; // 0x105c DC_Cancel
- Wb35Reg_BurstWrite( pHwData, 0x1030, pltmp, 12, AUTO_INCREMENT );
-
+ pltmp[11] = 0xAA0AC000; /* 0x105c DC_Cancel */
+ Wb35Reg_BurstWrite(pHwData, 0x1030, pltmp, 12, AUTO_INCREMENT);
}
-void BBProcessor_AL7230_5000( struct hw_data * pHwData)
+void BBProcessor_AL7230_5000(struct hw_data *pHwData)
{
struct wb35_reg *reg = &pHwData->reg;
u32 pltmp[12];
- pltmp[0] = 0x16AA6678; // 0x1000 AGC_Ctrl1
- pltmp[1] = 0x9AFFA0B2; // 0x1004 AGC_Ctrl2
- pltmp[2] = 0x55D00A04; // 0x1008 AGC_Ctrl3
- pltmp[3] = 0xEFFF233E; // 0x100c AGC_Ctrl4
+ pltmp[0] = 0x16AA6678; /* 0x1000 AGC_Ctrl1 */
+ pltmp[1] = 0x9AFFA0B2; /* 0x1004 AGC_Ctrl2 */
+ pltmp[2] = 0x55D00A04; /* 0x1008 AGC_Ctrl3 */
+ pltmp[3] = 0xEFFF233E; /* 0x100c AGC_Ctrl4 */
reg->BB0C = 0xEFFF233E;
- pltmp[4] = 0x0FacDCC5; // 0x1010 AGC_Ctrl5 // 20050927 0x0FacDCB7
- pltmp[5] = 0x00CAA333; // 0x1014 AGC_Ctrl6
- pltmp[6] = 0xF2432111; // 0x1018 AGC_Ctrl7
- pltmp[7] = 0x0FA3F0ED; // 0x101c AGC_Ctrl8
- pltmp[8] = 0x05C43440; // 0x1020 AGC_Ctrl9
- pltmp[9] = 0x00002A79; // 0x1024 AGC_Ctrl10
- pltmp[10] = 0x40000528; // 20050927 0x40000228
- pltmp[11] = 0x232FDF30;// 0x102c A_ACQ_Ctrl
+ pltmp[4] = 0x0FacDCC5; /* 0x1010 AGC_Ctrl5 */
+ pltmp[5] = 0x00CAA333; /* 0x1014 AGC_Ctrl6 */
+ pltmp[6] = 0xF2432111; /* 0x1018 AGC_Ctrl7 */
+ pltmp[7] = 0x0FA3F0ED; /* 0x101c AGC_Ctrl8 */
+ pltmp[8] = 0x05C43440; /* 0x1020 AGC_Ctrl9 */
+ pltmp[9] = 0x00002A79; /* 0x1024 AGC_Ctrl10 */
+ pltmp[10] = 0x40000528;
+ pltmp[11] = 0x232FDF30;/* 0x102c A_ACQ_Ctrl */
reg->BB2C = 0x232FDF30;
- Wb35Reg_BurstWrite( pHwData, 0x1000, pltmp, 12, AUTO_INCREMENT );
+ Wb35Reg_BurstWrite(pHwData, 0x1000, pltmp, 12, AUTO_INCREMENT);
- pltmp[0] = 0x80002C7C; // 0x1030 B_ACQ_Ctrl
- pltmp[1] = 0x00C0D6C5; // 0x1034 A_TXRX_Ctrl
- pltmp[2] = 0x5B2C8769; // 0x1038 B_TXRX_Ctrl
- pltmp[3] = 0x00000000; // 0x103c 11a TX LS filter
+ pltmp[0] = 0x80002C7C; /* 0x1030 B_ACQ_Ctrl */
+ pltmp[1] = 0x00C0D6C5; /* 0x1034 A_TXRX_Ctrl */
+ pltmp[2] = 0x5B2C8769; /* 0x1038 B_TXRX_Ctrl */
+ pltmp[3] = 0x00000000; /* 0x103c 11a TX LS filter */
reg->BB3C = 0x00000000;
- pltmp[4] = 0x00003F29; // 0x1040 11a TX LS filter
- pltmp[5] = 0x0EFEFBFE; // 0x1044 11a TX LS filter
- pltmp[6] = 0x00332C1B; // 0x1048 11b TX RC filter
- pltmp[7] = 0x0A00FEFF; // 0x104c 11b TX RC filter
- pltmp[8] = 0x2B107208; // 0x1050 MODE_Ctrl
+ pltmp[4] = 0x00003F29; /* 0x1040 11a TX LS filter */
+ pltmp[5] = 0x0EFEFBFE; /* 0x1044 11a TX LS filter */
+ pltmp[6] = 0x00332C1B; /* 0x1048 11b TX RC filter */
+ pltmp[7] = 0x0A00FEFF; /* 0x104c 11b TX RC filter */
+ pltmp[8] = 0x2B107208; /* 0x1050 MODE_Ctrl */
reg->BB50 = 0x2B107208;
- pltmp[9] = 0; // 0x1054
+ pltmp[9] = 0; /* 0x1054 */
reg->BB54 = 0x00000000;
- pltmp[10] = 0x52524242; // 0x1058 IQ_Alpha
+ pltmp[10] = 0x52524242; /* 0x1058 IQ_Alpha */
reg->BB58 = 0x52524242;
- pltmp[11] = 0xAA0AC000; // 0x105c DC_Cancel
- Wb35Reg_BurstWrite( pHwData, 0x1030, pltmp, 12, AUTO_INCREMENT );
-
+ pltmp[11] = 0xAA0AC000; /* 0x105c DC_Cancel */
+ Wb35Reg_BurstWrite(pHwData, 0x1030, pltmp, 12, AUTO_INCREMENT);
}
-//=============================================================================================================
-// BBProcessorPowerupInit --
-//
-// Description:
-// Initialize the Baseband processor.
-//
-// Arguments:
-// pHwData - Handle of the USB Device.
-//
-// Return values:
-// None.
-//=============================================================================================================
-void
-BBProcessor_initial( struct hw_data * pHwData )
+/*
+ * ===========================================================================
+ * BBProcessorPowerupInit --
+ *
+ * Description:
+ * Initialize the Baseband processor.
+ *
+ * Arguments:
+ * pHwData - Handle of the USB Device.
+ *
+ * Return values:
+ * None.
+ *============================================================================
+ */
+void BBProcessor_initial(struct hw_data *pHwData)
{
struct wb35_reg *reg = &pHwData->reg;
u32 i, pltmp[12];
- switch( pHwData->phy_type )
- {
- case RF_MAXIM_V1: // Initializng the Winbond 2nd BB(with Phy board (v1) + Maxim 331)
-
- pltmp[0] = 0x16F47E77; // 0x1000 AGC_Ctrl1
- pltmp[1] = 0x9AFFAEA4; // 0x1004 AGC_Ctrl2
- pltmp[2] = 0x55D00A04; // 0x1008 AGC_Ctrl3
- pltmp[3] = 0xEFFF1A34; // 0x100c AGC_Ctrl4
- reg->BB0C = 0xEFFF1A34;
- pltmp[4] = 0x0FABE0B7; // 0x1010 AGC_Ctrl5
- pltmp[5] = 0x00CAA332; // 0x1014 AGC_Ctrl6
- pltmp[6] = 0xF6632111; // 0x1018 AGC_Ctrl7
- pltmp[7] = 0x0FA3F0ED; // 0x101c AGC_Ctrl8
- pltmp[8] = 0x04CC3640; // 0x1020 AGC_Ctrl9
- pltmp[9] = 0x00002A79; // 0x1024 AGC_Ctrl10
- pltmp[10] = (pHwData->phy_type==3) ? 0x40000a28 : 0x40000228; // 0x1028 MAXIM_331(b31=0) + WBRF_V1(b11=1) : MAXIM_331(b31=0) + WBRF_V2(b11=0)
- pltmp[11] = 0x232FDF30; // 0x102c A_ACQ_Ctrl
- reg->BB2C = 0x232FDF30; //Modify for 33's 1.0.95.xxx version, antenna 1
- Wb35Reg_BurstWrite( pHwData, 0x1000, pltmp, 12, AUTO_INCREMENT );
-
- pltmp[0] = 0x00002C54; // 0x1030 B_ACQ_Ctrl
- reg->BB30 = 0x00002C54;
- pltmp[1] = 0x00C0D6C5; // 0x1034 A_TXRX_Ctrl
- pltmp[2] = 0x5B6C8769; // 0x1038 B_TXRX_Ctrl
- pltmp[3] = 0x00000000; // 0x103c 11a TX LS filter
- reg->BB3C = 0x00000000;
- pltmp[4] = 0x00003F29; // 0x1040 11a TX LS filter
- pltmp[5] = 0x0EFEFBFE; // 0x1044 11a TX LS filter
- pltmp[6] = 0x00453B24; // 0x1048 11b TX RC filter
- pltmp[7] = 0x0E00FEFF; // 0x104c 11b TX RC filter
- pltmp[8] = 0x27106208; // 0x1050 MODE_Ctrl
- reg->BB50 = 0x27106208;
- pltmp[9] = 0; // 0x1054
- reg->BB54 = 0x00000000;
- pltmp[10] = 0x64646464; // 0x1058 IQ_Alpha
- reg->BB58 = 0x64646464;
- pltmp[11] = 0xAA0AC000; // 0x105c DC_Cancel
- Wb35Reg_BurstWrite( pHwData, 0x1030, pltmp, 12, AUTO_INCREMENT );
-
- Wb35Reg_Write( pHwData, 0x1070, 0x00000045 );
- break;
-
- //------------------------------------------------------------------
- //[20040722 WK]
- //Only for baseband version 2
-// case RF_MAXIM_317:
- case RF_MAXIM_2825:
- case RF_MAXIM_2827:
- case RF_MAXIM_2828:
-
- pltmp[0] = 0x16b47e77; // 0x1000 AGC_Ctrl1
- pltmp[1] = 0x9affaea4; // 0x1004 AGC_Ctrl2
- pltmp[2] = 0x55d00a04; // 0x1008 AGC_Ctrl3
- pltmp[3] = 0xefff1a34; // 0x100c AGC_Ctrl4
- reg->BB0C = 0xefff1a34;
- pltmp[4] = 0x0fabe0b7; // 0x1010 AGC_Ctrl5
- pltmp[5] = 0x00caa332; // 0x1014 AGC_Ctrl6
- pltmp[6] = 0xf6632111; // 0x1018 AGC_Ctrl7
- pltmp[7] = 0x0FA3F0ED; // 0x101c AGC_Ctrl8
- pltmp[8] = 0x04CC3640; // 0x1020 AGC_Ctrl9
- pltmp[9] = 0x00002A79; // 0x1024 AGC_Ctrl10
- pltmp[10] = 0x40000528; // 0x40000128; Modify for 33's 1.0.95
- pltmp[11] = 0x232fdf30; // 0x102c A_ACQ_Ctrl
- reg->BB2C = 0x232fdf30; //Modify for 33's 1.0.95.xxx version, antenna 1
- Wb35Reg_BurstWrite( pHwData, 0x1000, pltmp, 12, AUTO_INCREMENT );
-
- pltmp[0] = 0x00002C54; // 0x1030 B_ACQ_Ctrl
- reg->BB30 = 0x00002C54;
- pltmp[1] = 0x00C0D6C5; // 0x1034 A_TXRX_Ctrl
- pltmp[2] = 0x5B6C8769; // 0x1038 B_TXRX_Ctrl
- pltmp[3] = 0x00000000; // 0x103c 11a TX LS filter
- reg->BB3C = 0x00000000;
- pltmp[4] = 0x00003F29; // 0x1040 11a TX LS filter
- pltmp[5] = 0x0EFEFBFE; // 0x1044 11a TX LS filter
- pltmp[6] = 0x00453B24; // 0x1048 11b TX RC filter
- pltmp[7] = 0x0D00FDFF; // 0x104c 11b TX RC filter
- pltmp[8] = 0x27106208; // 0x1050 MODE_Ctrl
- reg->BB50 = 0x27106208;
- pltmp[9] = 0; // 0x1054
- reg->BB54 = 0x00000000;
- pltmp[10] = 0x64646464; // 0x1058 IQ_Alpha
- reg->BB58 = 0x64646464;
- pltmp[11] = 0xAA28C000; // 0x105c DC_Cancel
- Wb35Reg_BurstWrite( pHwData, 0x1030, pltmp, 12, AUTO_INCREMENT );
-
- Wb35Reg_Write( pHwData, 0x1070, 0x00000045 );
- break;
-
- case RF_MAXIM_2829:
-
- pltmp[0] = 0x16b47e77; // 0x1000 AGC_Ctrl1
- pltmp[1] = 0x9affaea4; // 0x1004 AGC_Ctrl2
- pltmp[2] = 0x55d00a04; // 0x1008 AGC_Ctrl3
- pltmp[3] = 0xf4ff1632; // 0xefff1a34; // 0x100c AGC_Ctrl4 Modify for 33's 1.0.95
- reg->BB0C = 0xf4ff1632; // 0xefff1a34; Modify for 33's 1.0.95
- pltmp[4] = 0x0fabe0b7; // 0x1010 AGC_Ctrl5
- pltmp[5] = 0x00caa332; // 0x1014 AGC_Ctrl6
- pltmp[6] = 0xf8632112; // 0xf6632111; // 0x1018 AGC_Ctrl7 Modify for 33's 1.0.95
- pltmp[7] = 0x0FA3F0ED; // 0x101c AGC_Ctrl8
- pltmp[8] = 0x04CC3640; // 0x1020 AGC_Ctrl9
- pltmp[9] = 0x00002A79; // 0x1024 AGC_Ctrl10
- pltmp[10] = 0x40000528; // 0x40000128; modify for 33's 1.0.95
- pltmp[11] = 0x232fdf30; // 0x102c A_ACQ_Ctrl
- reg->BB2C = 0x232fdf30; //Modify for 33's 1.0.95.xxx version, antenna 1
- Wb35Reg_BurstWrite( pHwData, 0x1000, pltmp, 12, AUTO_INCREMENT );
-
- pltmp[0] = 0x00002C54; // 0x1030 B_ACQ_Ctrl
- reg->BB30 = 0x00002C54;
- pltmp[1] = 0x00C0D6C5; // 0x1034 A_TXRX_Ctrl
- pltmp[2] = 0x5b2c8769; // 0x5B6C8769; // 0x1038 B_TXRX_Ctrl Modify for 33's 1.0.95
- pltmp[3] = 0x00000000; // 0x103c 11a TX LS filter
- reg->BB3C = 0x00000000;
- pltmp[4] = 0x00003F29; // 0x1040 11a TX LS filter
- pltmp[5] = 0x0EFEFBFE; // 0x1044 11a TX LS filter
- pltmp[6] = 0x002c2617; // 0x00453B24; // 0x1048 11b TX RC filter Modify for 33's 1.0.95
- pltmp[7] = 0x0800feff; // 0x0D00FDFF; // 0x104c 11b TX RC filter Modify for 33's 1.0.95
- pltmp[8] = 0x27106208; // 0x1050 MODE_Ctrl
- reg->BB50 = 0x27106208;
- pltmp[9] = 0; // 0x1054
- reg->BB54 = 0x00000000;
- pltmp[10] = 0x64644a4a; // 0x64646464; // 0x1058 IQ_Alpha Modify for 33's 1.0.95
- reg->BB58 = 0x64646464;
- pltmp[11] = 0xAA28C000; // 0x105c DC_Cancel
- Wb35Reg_BurstWrite( pHwData, 0x1030, pltmp, 12, AUTO_INCREMENT );
-
- Wb35Reg_Write( pHwData, 0x1070, 0x00000045 );
- break;
-
- case RF_AIROHA_2230:
-
- pltmp[0] = 0X16764A77; // 0x1000 AGC_Ctrl1 //0x16765A77
- pltmp[1] = 0x9affafb2; // 0x1004 AGC_Ctrl2
- pltmp[2] = 0x55d00a04; // 0x1008 AGC_Ctrl3
- pltmp[3] = 0xFFFd203c; // 0xFFFb203a; // 0x100c AGC_Ctrl4 Modify for 33's 1.0.95.xxx version
- reg->BB0C = 0xFFFd203c;
- pltmp[4] = 0X0FBFDCc5; // 0X0FBFDCA0; // 0x1010 AGC_Ctrl5 //0x0FB2E0B7 Modify for 33's 1.0.95.xxx version
- pltmp[5] = 0x00caa332; // 0x00caa333; // 0x1014 AGC_Ctrl6 Modify for 33's 1.0.95.xxx version
- pltmp[6] = 0XF6632111; // 0XF1632112; // 0x1018 AGC_Ctrl7 //0xf6632112 Modify for 33's 1.0.95.xxx version
- pltmp[7] = 0x0FA3F0ED; // 0x101c AGC_Ctrl8
- pltmp[8] = 0x04C43640; // 0x1020 AGC_Ctrl9
- pltmp[9] = 0x00002A79; // 0x1024 AGC_Ctrl10
- pltmp[10] = 0X40000528; //0x40000228
- pltmp[11] = 0x232dfF30; // 0x232A9F30; // 0x102c A_ACQ_Ctrl //0x232a9730
- reg->BB2C = 0x232dfF30; //Modify for 33's 1.0.95.xxx version, antenna 1
- Wb35Reg_BurstWrite( pHwData, 0x1000, pltmp, 12, AUTO_INCREMENT );
-
- pltmp[0] = 0x00002C54; // 0x1030 B_ACQ_Ctrl
- reg->BB30 = 0x00002C54;
- pltmp[1] = 0x00C0D6C5; // 0x1034 A_TXRX_Ctrl
- pltmp[2] = 0x5B2C8769; // 0x1038 B_TXRX_Ctrl //0x5B6C8769
- pltmp[3] = 0x00000000; // 0x103c 11a TX LS filter
- reg->BB3C = 0x00000000;
- pltmp[4] = 0x00003F29; // 0x1040 11a TX LS filter
- pltmp[5] = 0x0EFEFBFE; // 0x1044 11a TX LS filter
- pltmp[6] = BB48_DEFAULT_AL2230_11G; // 0x1048 11b TX RC filter 20060613.2
- reg->BB48 = BB48_DEFAULT_AL2230_11G; // 20051221 ch14 20060613.2
- pltmp[7] = BB4C_DEFAULT_AL2230_11G; // 0x104c 11b TX RC filter 20060613.2
- reg->BB4C = BB4C_DEFAULT_AL2230_11G; // 20060613.1 20060613.2
- pltmp[8] = 0x27106200; // 0x1050 MODE_Ctrl
- reg->BB50 = 0x27106200;
- pltmp[9] = 0; // 0x1054
- reg->BB54 = 0x00000000;
- pltmp[10] = 0x52524242; // 0x1058 IQ_Alpha
- reg->BB58 = 0x52524242;
- pltmp[11] = 0xAA0AC000; // 0x105c DC_Cancel
- Wb35Reg_BurstWrite( pHwData, 0x1030, pltmp, 12, AUTO_INCREMENT );
-
- Wb35Reg_Write( pHwData, 0x1070, 0x00000045 );
- break;
+ switch (pHwData->phy_type) {
+ case RF_MAXIM_V1: /* Initializng the Winbond 2nd BB(with Phy board (v1) + Maxim 331) */
+ pltmp[0] = 0x16F47E77; /* 0x1000 AGC_Ctrl1 */
+ pltmp[1] = 0x9AFFAEA4; /* 0x1004 AGC_Ctrl2 */
+ pltmp[2] = 0x55D00A04; /* 0x1008 AGC_Ctrl3 */
+ pltmp[3] = 0xEFFF1A34; /* 0x100c AGC_Ctrl4 */
+ reg->BB0C = 0xEFFF1A34;
+ pltmp[4] = 0x0FABE0B7; /* 0x1010 AGC_Ctrl5 */
+ pltmp[5] = 0x00CAA332; /* 0x1014 AGC_Ctrl6 */
+ pltmp[6] = 0xF6632111; /* 0x1018 AGC_Ctrl7 */
+ pltmp[7] = 0x0FA3F0ED; /* 0x101c AGC_Ctrl8 */
+ pltmp[8] = 0x04CC3640; /* 0x1020 AGC_Ctrl9 */
+ pltmp[9] = 0x00002A79; /* 0x1024 AGC_Ctrl10 */
+ pltmp[10] = (pHwData->phy_type == 3) ? 0x40000a28 : 0x40000228; /* 0x1028 MAXIM_331(b31=0) + WBRF_V1(b11=1) : MAXIM_331(b31=0) + WBRF_V2(b11=0) */
+ pltmp[11] = 0x232FDF30; /* 0x102c A_ACQ_Ctrl */
+ reg->BB2C = 0x232FDF30; /* Modify for 33's 1.0.95.xxx version, antenna 1 */
+ Wb35Reg_BurstWrite(pHwData, 0x1000, pltmp, 12, AUTO_INCREMENT);
+
+ pltmp[0] = 0x00002C54; /* 0x1030 B_ACQ_Ctrl */
+ reg->BB30 = 0x00002C54;
+ pltmp[1] = 0x00C0D6C5; /* 0x1034 A_TXRX_Ctrl */
+ pltmp[2] = 0x5B6C8769; /* 0x1038 B_TXRX_Ctrl */
+ pltmp[3] = 0x00000000; /* 0x103c 11a TX LS filter */
+ reg->BB3C = 0x00000000;
+ pltmp[4] = 0x00003F29; /* 0x1040 11a TX LS filter */
+ pltmp[5] = 0x0EFEFBFE; /* 0x1044 11a TX LS filter */
+ pltmp[6] = 0x00453B24; /* 0x1048 11b TX RC filter */
+ pltmp[7] = 0x0E00FEFF; /* 0x104c 11b TX RC filter */
+ pltmp[8] = 0x27106208; /* 0x1050 MODE_Ctrl */
+ reg->BB50 = 0x27106208;
+ pltmp[9] = 0; /* 0x1054 */
+ reg->BB54 = 0x00000000;
+ pltmp[10] = 0x64646464; /* 0x1058 IQ_Alpha */
+ reg->BB58 = 0x64646464;
+ pltmp[11] = 0xAA0AC000; /* 0x105c DC_Cancel */
+ Wb35Reg_BurstWrite(pHwData, 0x1030, pltmp, 12, AUTO_INCREMENT);
+
+ Wb35Reg_Write(pHwData, 0x1070, 0x00000045);
+ break;
- case RF_AIROHA_2230S: // 20060420 Add this
-
- pltmp[0] = 0X16764A77; // 0x1000 AGC_Ctrl1 //0x16765A77
- pltmp[1] = 0x9affafb2; // 0x1004 AGC_Ctrl2
- pltmp[2] = 0x55d00a04; // 0x1008 AGC_Ctrl3
- pltmp[3] = 0xFFFd203c; // 0xFFFb203a; // 0x100c AGC_Ctrl4 Modify for 33's 1.0.95.xxx version
- reg->BB0C = 0xFFFd203c;
- pltmp[4] = 0X0FBFDCc5; // 0X0FBFDCA0; // 0x1010 AGC_Ctrl5 //0x0FB2E0B7 Modify for 33's 1.0.95.xxx version
- pltmp[5] = 0x00caa332; // 0x00caa333; // 0x1014 AGC_Ctrl6 Modify for 33's 1.0.95.xxx version
- pltmp[6] = 0XF6632111; // 0XF1632112; // 0x1018 AGC_Ctrl7 //0xf6632112 Modify for 33's 1.0.95.xxx version
- pltmp[7] = 0x0FA3F0ED; // 0x101c AGC_Ctrl8
- pltmp[8] = 0x04C43640; // 0x1020 AGC_Ctrl9
- pltmp[9] = 0x00002A79; // 0x1024 AGC_Ctrl10
- pltmp[10] = 0X40000528; //0x40000228
- pltmp[11] = 0x232dfF30; // 0x232A9F30; // 0x102c A_ACQ_Ctrl //0x232a9730
- reg->BB2C = 0x232dfF30; //Modify for 33's 1.0.95.xxx version, antenna 1
- Wb35Reg_BurstWrite( pHwData, 0x1000, pltmp, 12, AUTO_INCREMENT );
-
- pltmp[0] = 0x00002C54; // 0x1030 B_ACQ_Ctrl
- reg->BB30 = 0x00002C54;
- pltmp[1] = 0x00C0D6C5; // 0x1034 A_TXRX_Ctrl
- pltmp[2] = 0x5B2C8769; // 0x1038 B_TXRX_Ctrl //0x5B6C8769
- pltmp[3] = 0x00000000; // 0x103c 11a TX LS filter
- reg->BB3C = 0x00000000;
- pltmp[4] = 0x00003F29; // 0x1040 11a TX LS filter
- pltmp[5] = 0x0EFEFBFE; // 0x1044 11a TX LS filter
- pltmp[6] = BB48_DEFAULT_AL2230_11G; // 0x1048 11b TX RC filter 20060613.2
- reg->BB48 = BB48_DEFAULT_AL2230_11G; // 20051221 ch14 20060613.2
- pltmp[7] = BB4C_DEFAULT_AL2230_11G; // 0x104c 11b TX RC filter 20060613.2
- reg->BB4C = BB4C_DEFAULT_AL2230_11G; // 20060613.1
- pltmp[8] = 0x27106200; // 0x1050 MODE_Ctrl
- reg->BB50 = 0x27106200;
- pltmp[9] = 0; // 0x1054
- reg->BB54 = 0x00000000;
- pltmp[10] = 0x52523232; // 20060419 0x52524242; // 0x1058 IQ_Alpha
- reg->BB58 = 0x52523232; // 20060419 0x52524242;
- pltmp[11] = 0xAA0AC000; // 0x105c DC_Cancel
- Wb35Reg_BurstWrite( pHwData, 0x1030, pltmp, 12, AUTO_INCREMENT );
-
- Wb35Reg_Write( pHwData, 0x1070, 0x00000045 );
- break;
+ case RF_MAXIM_2825:
+ case RF_MAXIM_2827:
+ case RF_MAXIM_2828:
+ pltmp[0] = 0x16b47e77; /* 0x1000 AGC_Ctrl1 */
+ pltmp[1] = 0x9affaea4; /* 0x1004 AGC_Ctrl2 */
+ pltmp[2] = 0x55d00a04; /* 0x1008 AGC_Ctrl3 */
+ pltmp[3] = 0xefff1a34; /* 0x100c AGC_Ctrl4 */
+ reg->BB0C = 0xefff1a34;
+ pltmp[4] = 0x0fabe0b7; /* 0x1010 AGC_Ctrl5 */
+ pltmp[5] = 0x00caa332; /* 0x1014 AGC_Ctrl6 */
+ pltmp[6] = 0xf6632111; /* 0x1018 AGC_Ctrl7 */
+ pltmp[7] = 0x0FA3F0ED; /* 0x101c AGC_Ctrl8 */
+ pltmp[8] = 0x04CC3640; /* 0x1020 AGC_Ctrl9 */
+ pltmp[9] = 0x00002A79; /* 0x1024 AGC_Ctrl10 */
+ pltmp[10] = 0x40000528;
+ pltmp[11] = 0x232fdf30; /* 0x102c A_ACQ_Ctrl */
+ reg->BB2C = 0x232fdf30; /* antenna 1 */
+ Wb35Reg_BurstWrite(pHwData, 0x1000, pltmp, 12, AUTO_INCREMENT);
+
+ pltmp[0] = 0x00002C54; /* 0x1030 B_ACQ_Ctrl */
+ reg->BB30 = 0x00002C54;
+ pltmp[1] = 0x00C0D6C5; /* 0x1034 A_TXRX_Ctrl */
+ pltmp[2] = 0x5B6C8769; /* 0x1038 B_TXRX_Ctrl */
+ pltmp[3] = 0x00000000; /* 0x103c 11a TX LS filter */
+ reg->BB3C = 0x00000000;
+ pltmp[4] = 0x00003F29; /* 0x1040 11a TX LS filter */
+ pltmp[5] = 0x0EFEFBFE; /* 0x1044 11a TX LS filter */
+ pltmp[6] = 0x00453B24; /* 0x1048 11b TX RC filter */
+ pltmp[7] = 0x0D00FDFF; /* 0x104c 11b TX RC filter */
+ pltmp[8] = 0x27106208; /* 0x1050 MODE_Ctrl */
+ reg->BB50 = 0x27106208;
+ pltmp[9] = 0; /* 0x1054 */
+ reg->BB54 = 0x00000000;
+ pltmp[10] = 0x64646464; /* 0x1058 IQ_Alpha */
+ reg->BB58 = 0x64646464;
+ pltmp[11] = 0xAA28C000; /* 0x105c DC_Cancel */
+ Wb35Reg_BurstWrite(pHwData, 0x1030, pltmp, 12, AUTO_INCREMENT);
+
+ Wb35Reg_Write(pHwData, 0x1070, 0x00000045);
+ break;
- case RF_AIROHA_7230:
-/*
- pltmp[0] = 0x16a84a77; // 0x1000 AGC_Ctrl1
- pltmp[1] = 0x9affafb2; // 0x1004 AGC_Ctrl2
- pltmp[2] = 0x55d00a04; // 0x1008 AGC_Ctrl3
- pltmp[3] = 0xFFFb203a; // 0x100c AGC_Ctrl4
- reg->BB0c = 0xFFFb203a;
- pltmp[4] = 0x0FBFDCB7; // 0x1010 AGC_Ctrl5
- pltmp[5] = 0x00caa333; // 0x1014 AGC_Ctrl6
- pltmp[6] = 0xf6632112; // 0x1018 AGC_Ctrl7
- pltmp[7] = 0x0FA3F0ED; // 0x101c AGC_Ctrl8
- pltmp[8] = 0x04C43640; // 0x1020 AGC_Ctrl9
- pltmp[9] = 0x00002A79; // 0x1024 AGC_Ctrl10
- pltmp[10] = 0x40000228;
- pltmp[11] = 0x232A9F30;// 0x102c A_ACQ_Ctrl
- reg->BB2c = 0x232A9F30;
- Wb35Reg_BurstWrite( pHwData, 0x1000, pltmp, 12, AUTO_INCREMENT );
-
- pltmp[0] = 0x00002C54; // 0x1030 B_ACQ_Ctrl
- reg->BB30 = 0x00002C54;
- pltmp[1] = 0x00C0D6C5; // 0x1034 A_TXRX_Ctrl
- pltmp[2] = 0x5B2C8769; // 0x1038 B_TXRX_Ctrl
- pltmp[3] = 0x00000000; // 0x103c 11a TX LS filter
- reg->BB3c = 0x00000000;
- pltmp[4] = 0x00003F29; // 0x1040 11a TX LS filter
- pltmp[5] = 0x0EFEFBFE; // 0x1044 11a TX LS filter
- pltmp[6] = 0x00453B24; // 0x1048 11b TX RC filter
- pltmp[7] = 0x0E00FEFF; // 0x104c 11b TX RC filter
- pltmp[8] = 0x27106200; // 0x1050 MODE_Ctrl
- reg->BB50 = 0x27106200;
- pltmp[9] = 0; // 0x1054
- reg->BB54 = 0x00000000;
- pltmp[10] = 0x64645252; // 0x1058 IQ_Alpha
- reg->BB58 = 0x64645252;
- pltmp[11] = 0xAA0AC000; // 0x105c DC_Cancel
- Wb35Reg_BurstWrite( pHwData, 0x1030, pltmp, 12, AUTO_INCREMENT );
-*/
- BBProcessor_AL7230_2400( pHwData );
-
- Wb35Reg_Write( pHwData, 0x1070, 0x00000045 );
- break;
+ case RF_MAXIM_2829:
+ pltmp[0] = 0x16b47e77; /* 0x1000 AGC_Ctrl1 */
+ pltmp[1] = 0x9affaea4; /* 0x1004 AGC_Ctrl2 */
+ pltmp[2] = 0x55d00a04; /* 0x1008 AGC_Ctrl3 */
+ pltmp[3] = 0xf4ff1632; /* 0x100c AGC_Ctrl4 */
+ reg->BB0C = 0xf4ff1632;
+ pltmp[4] = 0x0fabe0b7; /* 0x1010 AGC_Ctrl5 */
+ pltmp[5] = 0x00caa332; /* 0x1014 AGC_Ctrl6 */
+ pltmp[6] = 0xf8632112; /* 0x1018 AGC_Ctrl7 */
+ pltmp[7] = 0x0FA3F0ED; /* 0x101c AGC_Ctrl8 */
+ pltmp[8] = 0x04CC3640; /* 0x1020 AGC_Ctrl9 */
+ pltmp[9] = 0x00002A79; /* 0x1024 AGC_Ctrl10 */
+ pltmp[10] = 0x40000528;
+ pltmp[11] = 0x232fdf30; /* 0x102c A_ACQ_Ctrl */
+ reg->BB2C = 0x232fdf30; /* antenna 1 */
+ Wb35Reg_BurstWrite(pHwData, 0x1000, pltmp, 12, AUTO_INCREMENT);
+
+ pltmp[0] = 0x00002C54; /* 0x1030 B_ACQ_Ctrl */
+ reg->BB30 = 0x00002C54;
+ pltmp[1] = 0x00C0D6C5; /* 0x1034 A_TXRX_Ctrl */
+ pltmp[2] = 0x5b2c8769; /* 0x1038 B_TXRX_Ctrl */
+ pltmp[3] = 0x00000000; /* 0x103c 11a TX LS filter */
+ reg->BB3C = 0x00000000;
+ pltmp[4] = 0x00003F29; /* 0x1040 11a TX LS filter */
+ pltmp[5] = 0x0EFEFBFE; /* 0x1044 11a TX LS filter */
+ pltmp[6] = 0x002c2617; /* 0x1048 11b TX RC filter */
+ pltmp[7] = 0x0800feff; /* 0x104c 11b TX RC filter */
+ pltmp[8] = 0x27106208; /* 0x1050 MODE_Ctrl */
+ reg->BB50 = 0x27106208;
+ pltmp[9] = 0; /* 0x1054 */
+ reg->BB54 = 0x00000000;
+ pltmp[10] = 0x64644a4a; /* 0x1058 IQ_Alpha */
+ reg->BB58 = 0x64646464;
+ pltmp[11] = 0xAA28C000; /* 0x105c DC_Cancel */
+ Wb35Reg_BurstWrite(pHwData, 0x1030, pltmp, 12, AUTO_INCREMENT);
+ Wb35Reg_Write(pHwData, 0x1070, 0x00000045);
+ break;
+ case RF_AIROHA_2230:
+ pltmp[0] = 0X16764A77; /* 0x1000 AGC_Ctrl1 */
+ pltmp[1] = 0x9affafb2; /* 0x1004 AGC_Ctrl2 */
+ pltmp[2] = 0x55d00a04; /* 0x1008 AGC_Ctrl3 */
+ pltmp[3] = 0xFFFd203c; /* 0x100c AGC_Ctrl4 */
+ reg->BB0C = 0xFFFd203c;
+ pltmp[4] = 0X0FBFDCc5; /* 0x1010 AGC_Ctrl5 */
+ pltmp[5] = 0x00caa332; /* 0x1014 AGC_Ctrl6 */
+ pltmp[6] = 0XF6632111; /* 0x1018 AGC_Ctrl7 */
+ pltmp[7] = 0x0FA3F0ED; /* 0x101c AGC_Ctrl8 */
+ pltmp[8] = 0x04C43640; /* 0x1020 AGC_Ctrl9 */
+ pltmp[9] = 0x00002A79; /* 0x1024 AGC_Ctrl10 */
+ pltmp[10] = 0X40000528;
+ pltmp[11] = 0x232dfF30; /* 0x102c A_ACQ_Ctrl */
+ reg->BB2C = 0x232dfF30; /* antenna 1 */
+ Wb35Reg_BurstWrite(pHwData, 0x1000, pltmp, 12, AUTO_INCREMENT);
+
+ pltmp[0] = 0x00002C54; /* 0x1030 B_ACQ_Ctrl */
+ reg->BB30 = 0x00002C54;
+ pltmp[1] = 0x00C0D6C5; /* 0x1034 A_TXRX_Ctrl */
+ pltmp[2] = 0x5B2C8769; /* 0x1038 B_TXRX_Ctrl */
+ pltmp[3] = 0x00000000; /* 0x103c 11a TX LS filter */
+ reg->BB3C = 0x00000000;
+ pltmp[4] = 0x00003F29; /* 0x1040 11a TX LS filter */
+ pltmp[5] = 0x0EFEFBFE; /* 0x1044 11a TX LS filter */
+ pltmp[6] = BB48_DEFAULT_AL2230_11G; /* 0x1048 11b TX RC filter */
+ reg->BB48 = BB48_DEFAULT_AL2230_11G; /* 20051221 ch14 */
+ pltmp[7] = BB4C_DEFAULT_AL2230_11G; /* 0x104c 11b TX RC filter */
+ reg->BB4C = BB4C_DEFAULT_AL2230_11G;
+ pltmp[8] = 0x27106200; /* 0x1050 MODE_Ctrl */
+ reg->BB50 = 0x27106200;
+ pltmp[9] = 0; /* 0x1054 */
+ reg->BB54 = 0x00000000;
+ pltmp[10] = 0x52524242; /* 0x1058 IQ_Alpha */
+ reg->BB58 = 0x52524242;
+ pltmp[11] = 0xAA0AC000; /* 0x105c DC_Cancel */
+ Wb35Reg_BurstWrite(pHwData, 0x1030, pltmp, 12, AUTO_INCREMENT);
+
+ Wb35Reg_Write(pHwData, 0x1070, 0x00000045);
+ break;
+ case RF_AIROHA_2230S:
+ pltmp[0] = 0X16764A77; /* 0x1000 AGC_Ctrl1 */
+ pltmp[1] = 0x9affafb2; /* 0x1004 AGC_Ctrl2 */
+ pltmp[2] = 0x55d00a04; /* 0x1008 AGC_Ctrl3 */
+ pltmp[3] = 0xFFFd203c; /* 0x100c AGC_Ctrl4 */
+ reg->BB0C = 0xFFFd203c;
+ pltmp[4] = 0X0FBFDCc5; /* 0x1010 AGC_Ctrl5 */
+ pltmp[5] = 0x00caa332; /* 0x1014 AGC_Ctrl6 */
+ pltmp[6] = 0XF6632111; /* 0x1018 AGC_Ctrl7 */
+ pltmp[7] = 0x0FA3F0ED; /* 0x101c AGC_Ctrl8 */
+ pltmp[8] = 0x04C43640; /* 0x1020 AGC_Ctrl9 */
+ pltmp[9] = 0x00002A79; /* 0x1024 AGC_Ctrl10 */
+ pltmp[10] = 0X40000528;
+ pltmp[11] = 0x232dfF30; /* 0x102c A_ACQ_Ctrl */
+ reg->BB2C = 0x232dfF30; /* antenna 1 */
+ Wb35Reg_BurstWrite(pHwData, 0x1000, pltmp, 12, AUTO_INCREMENT);
+
+ pltmp[0] = 0x00002C54; /* 0x1030 B_ACQ_Ctrl */
+ reg->BB30 = 0x00002C54;
+ pltmp[1] = 0x00C0D6C5; /* 0x1034 A_TXRX_Ctrl */
+ pltmp[2] = 0x5B2C8769; /* 0x1038 B_TXRX_Ctrl */
+ pltmp[3] = 0x00000000; /* 0x103c 11a TX LS filter */
+ reg->BB3C = 0x00000000;
+ pltmp[4] = 0x00003F29; /* 0x1040 11a TX LS filter */
+ pltmp[5] = 0x0EFEFBFE; /* 0x1044 11a TX LS filter */
+ pltmp[6] = BB48_DEFAULT_AL2230_11G; /* 0x1048 11b TX RC filter */
+ reg->BB48 = BB48_DEFAULT_AL2230_11G; /* ch14 */
+ pltmp[7] = BB4C_DEFAULT_AL2230_11G; /* 0x104c 11b TX RC filter */
+ reg->BB4C = BB4C_DEFAULT_AL2230_11G;
+ pltmp[8] = 0x27106200; /* 0x1050 MODE_Ctrl */
+ reg->BB50 = 0x27106200;
+ pltmp[9] = 0; /* 0x1054 */
+ reg->BB54 = 0x00000000;
+ pltmp[10] = 0x52523232; /* 0x1058 IQ_Alpha */
+ reg->BB58 = 0x52523232;
+ pltmp[11] = 0xAA0AC000; /* 0x105c DC_Cancel */
+ Wb35Reg_BurstWrite(pHwData, 0x1030, pltmp, 12, AUTO_INCREMENT);
+
+ Wb35Reg_Write(pHwData, 0x1070, 0x00000045);
+ break;
+ case RF_AIROHA_7230:
+ BBProcessor_AL7230_2400(pHwData);
- case RF_WB_242:
- case RF_WB_242_1: // 20060619.5 Add
-
- pltmp[0] = 0x16A8525D; // 0x1000 AGC_Ctrl1
- pltmp[1] = 0x9AFF9ABA; // 0x1004 AGC_Ctrl2
- pltmp[2] = 0x55D00A04; // 0x1008 AGC_Ctrl3
- pltmp[3] = 0xEEE91C32; // 0x100c AGC_Ctrl4
- reg->BB0C = 0xEEE91C32;
- pltmp[4] = 0x0FACDCC5; // 0x1010 AGC_Ctrl5
- pltmp[5] = 0x000AA344; // 0x1014 AGC_Ctrl6
- pltmp[6] = 0x22222221; // 0x1018 AGC_Ctrl7
- pltmp[7] = 0x0FA3F0ED; // 0x101c AGC_Ctrl8
- pltmp[8] = 0x04CC3440; // 20051018 0x03CB3440; // 0x1020 AGC_Ctrl9 20051014 0x03C33440
- pltmp[9] = 0xA9002A79; // 0x1024 AGC_Ctrl10
- pltmp[10] = 0x40000528; // 0x1028
- pltmp[11] = 0x23457F30; // 0x102c A_ACQ_Ctrl
- reg->BB2C = 0x23457F30;
- Wb35Reg_BurstWrite( pHwData, 0x1000, pltmp, 12, AUTO_INCREMENT );
-
- pltmp[0] = 0x00002C54; // 0x1030 B_ACQ_Ctrl
- reg->BB30 = 0x00002C54;
- pltmp[1] = 0x00C0D6C5; // 0x1034 A_TXRX_Ctrl
- pltmp[2] = 0x5B2C8769; // 0x1038 B_TXRX_Ctrl
- pltmp[3] = pHwData->BB3c_cal; // 0x103c 11a TX LS filter
- reg->BB3C = pHwData->BB3c_cal;
- pltmp[4] = 0x00003F29; // 0x1040 11a TX LS filter
- pltmp[5] = 0x0EFEFBFE; // 0x1044 11a TX LS filter
- pltmp[6] = BB48_DEFAULT_WB242_11G; // 0x1048 11b TX RC filter 20060613.2
- reg->BB48 = BB48_DEFAULT_WB242_11G; // 20060613.1 20060613.2
- pltmp[7] = BB4C_DEFAULT_WB242_11G; // 0x104c 11b TX RC filter 20060613.2
- reg->BB4C = BB4C_DEFAULT_WB242_11G; // 20060613.1 20060613.2
- pltmp[8] = 0x27106208; // 0x1050 MODE_Ctrl
- reg->BB50 = 0x27106208;
- pltmp[9] = pHwData->BB54_cal; // 0x1054
- reg->BB54 = pHwData->BB54_cal;
- pltmp[10] = 0x52523131; // 0x1058 IQ_Alpha
- reg->BB58 = 0x52523131;
- pltmp[11] = 0xAA0AC000; // 20060825 0xAA2AC000; // 0x105c DC_Cancel
- Wb35Reg_BurstWrite( pHwData, 0x1030, pltmp, 12, AUTO_INCREMENT );
-
- Wb35Reg_Write( pHwData, 0x1070, 0x00000045 );
- break;
- }
+ Wb35Reg_Write(pHwData, 0x1070, 0x00000045);
+ break;
+ case RF_WB_242:
+ case RF_WB_242_1:
+ pltmp[0] = 0x16A8525D; /* 0x1000 AGC_Ctrl1 */
+ pltmp[1] = 0x9AFF9ABA; /* 0x1004 AGC_Ctrl2 */
+ pltmp[2] = 0x55D00A04; /* 0x1008 AGC_Ctrl3 */
+ pltmp[3] = 0xEEE91C32; /* 0x100c AGC_Ctrl4 */
+ reg->BB0C = 0xEEE91C32;
+ pltmp[4] = 0x0FACDCC5; /* 0x1010 AGC_Ctrl5 */
+ pltmp[5] = 0x000AA344; /* 0x1014 AGC_Ctrl6 */
+ pltmp[6] = 0x22222221; /* 0x1018 AGC_Ctrl7 */
+ pltmp[7] = 0x0FA3F0ED; /* 0x101c AGC_Ctrl8 */
+ pltmp[8] = 0x04CC3440; /* 0x1020 AGC_Ctrl9 */
+ pltmp[9] = 0xA9002A79; /* 0x1024 AGC_Ctrl10 */
+ pltmp[10] = 0x40000528; /* 0x1028 */
+ pltmp[11] = 0x23457F30; /* 0x102c A_ACQ_Ctrl */
+ reg->BB2C = 0x23457F30;
+ Wb35Reg_BurstWrite(pHwData, 0x1000, pltmp, 12, AUTO_INCREMENT);
+
+ pltmp[0] = 0x00002C54; /* 0x1030 B_ACQ_Ctrl */
+ reg->BB30 = 0x00002C54;
+ pltmp[1] = 0x00C0D6C5; /* 0x1034 A_TXRX_Ctrl */
+ pltmp[2] = 0x5B2C8769; /* 0x1038 B_TXRX_Ctrl */
+ pltmp[3] = pHwData->BB3c_cal; /* 0x103c 11a TX LS filter */
+ reg->BB3C = pHwData->BB3c_cal;
+ pltmp[4] = 0x00003F29; /* 0x1040 11a TX LS filter */
+ pltmp[5] = 0x0EFEFBFE; /* 0x1044 11a TX LS filter */
+ pltmp[6] = BB48_DEFAULT_WB242_11G; /* 0x1048 11b TX RC filter */
+ reg->BB48 = BB48_DEFAULT_WB242_11G;
+ pltmp[7] = BB4C_DEFAULT_WB242_11G; /* 0x104c 11b TX RC filter */
+ reg->BB4C = BB4C_DEFAULT_WB242_11G;
+ pltmp[8] = 0x27106208; /* 0x1050 MODE_Ctrl */
+ reg->BB50 = 0x27106208;
+ pltmp[9] = pHwData->BB54_cal; /* 0x1054 */
+ reg->BB54 = pHwData->BB54_cal;
+ pltmp[10] = 0x52523131; /* 0x1058 IQ_Alpha */
+ reg->BB58 = 0x52523131;
+ pltmp[11] = 0xAA0AC000; /* 0x105c DC_Cancel */
+ Wb35Reg_BurstWrite(pHwData, 0x1030, pltmp, 12, AUTO_INCREMENT);
+
+ Wb35Reg_Write(pHwData, 0x1070, 0x00000045);
+ break;
+ }
- // Fill the LNA table
- reg->LNAValue[0] = (u8)(reg->BB0C & 0xff);
+ /* Fill the LNA table */
+ reg->LNAValue[0] = (u8) (reg->BB0C & 0xff);
reg->LNAValue[1] = 0;
- reg->LNAValue[2] = (u8)((reg->BB0C & 0xff00)>>8);
+ reg->LNAValue[2] = (u8) ((reg->BB0C & 0xff00) >> 8);
reg->LNAValue[3] = 0;
- // Fill SQ3 table
- for( i=0; i<MAX_SQ3_FILTER_SIZE; i++ )
- reg->SQ3_filter[i] = 0x2f; // half of Bit 0 ~ 6
+ /* Fill SQ3 table */
+ for (i = 0; i < MAX_SQ3_FILTER_SIZE; i++)
+ reg->SQ3_filter[i] = 0x2f; /* half of Bit 0 ~ 6 */
}
-void set_tx_power_per_channel_max2829( struct hw_data * pHwData, struct chan_info Channel)
+void set_tx_power_per_channel_max2829(struct hw_data *pHwData, struct chan_info Channel)
{
- RFSynthesizer_SetPowerIndex( pHwData, 100 ); // 20060620.1 Modify
+ RFSynthesizer_SetPowerIndex(pHwData, 100);
}
-void set_tx_power_per_channel_al2230( struct hw_data * pHwData, struct chan_info Channel )
+void set_tx_power_per_channel_al2230(struct hw_data *pHwData, struct chan_info Channel)
{
u8 index = 100;
- if (pHwData->TxVgaFor24[Channel.ChanNo - 1] != 0xff) // 20060620.1 Add
+ if (pHwData->TxVgaFor24[Channel.ChanNo - 1] != 0xff)
index = pHwData->TxVgaFor24[Channel.ChanNo - 1];
- RFSynthesizer_SetPowerIndex( pHwData, index );
+ RFSynthesizer_SetPowerIndex(pHwData, index);
}
-void set_tx_power_per_channel_al7230( struct hw_data * pHwData, struct chan_info Channel)
+void set_tx_power_per_channel_al7230(struct hw_data *pHwData, struct chan_info Channel)
{
u8 i, index = 100;
- switch ( Channel.band )
- {
- case BAND_TYPE_DSSS:
- case BAND_TYPE_OFDM_24:
- {
- if (pHwData->TxVgaFor24[Channel.ChanNo - 1] != 0xff)
- index = pHwData->TxVgaFor24[Channel.ChanNo - 1];
- }
- break;
- case BAND_TYPE_OFDM_5:
- {
- for (i =0; i<35; i++)
- {
- if (Channel.ChanNo == pHwData->TxVgaFor50[i].ChanNo)
- {
- if (pHwData->TxVgaFor50[i].TxVgaValue != 0xff)
- index = pHwData->TxVgaFor50[i].TxVgaValue;
- break;
- }
- }
+ switch (Channel.band) {
+ case BAND_TYPE_DSSS:
+ case BAND_TYPE_OFDM_24:
+ if (pHwData->TxVgaFor24[Channel.ChanNo - 1] != 0xff)
+ index = pHwData->TxVgaFor24[Channel.ChanNo - 1];
+ break;
+ case BAND_TYPE_OFDM_5:
+ for (i = 0; i < 35; i++) {
+ if (Channel.ChanNo == pHwData->TxVgaFor50[i].ChanNo) {
+ if (pHwData->TxVgaFor50[i].TxVgaValue != 0xff)
+ index = pHwData->TxVgaFor50[i].TxVgaValue;
+ break;
}
- break;
+ }
+ break;
}
- RFSynthesizer_SetPowerIndex( pHwData, index );
+ RFSynthesizer_SetPowerIndex(pHwData, index);
}
-void set_tx_power_per_channel_wb242( struct hw_data * pHwData, struct chan_info Channel)
+void set_tx_power_per_channel_wb242(struct hw_data *pHwData, struct chan_info Channel)
{
u8 index = 100;
- switch ( Channel.band )
- {
- case BAND_TYPE_DSSS:
- case BAND_TYPE_OFDM_24:
- {
- if (pHwData->TxVgaFor24[Channel.ChanNo - 1] != 0xff)
- index = pHwData->TxVgaFor24[Channel.ChanNo - 1];
- }
- break;
- case BAND_TYPE_OFDM_5:
- break;
+ switch (Channel.band) {
+ case BAND_TYPE_DSSS:
+ case BAND_TYPE_OFDM_24:
+ if (pHwData->TxVgaFor24[Channel.ChanNo - 1] != 0xff)
+ index = pHwData->TxVgaFor24[Channel.ChanNo - 1];
+ break;
+ case BAND_TYPE_OFDM_5:
+ break;
}
- RFSynthesizer_SetPowerIndex( pHwData, index );
+ RFSynthesizer_SetPowerIndex(pHwData, index);
}
-//=============================================================================================================
-// RFSynthesizer_SwitchingChannel --
-//
-// Description:
-// Swithch the RF channel.
-//
-// Arguments:
-// pHwData - Handle of the USB Device.
-// Channel - The channel no.
-//
-// Return values:
-// None.
-//=============================================================================================================
-void
-RFSynthesizer_SwitchingChannel( struct hw_data * pHwData, struct chan_info Channel )
+/*
+ * ==========================================================================
+ * RFSynthesizer_SwitchingChannel --
+ *
+ * Description:
+ * Swithch the RF channel.
+ *
+ * Arguments:
+ * pHwData - Handle of the USB Device.
+ * Channel - The channel no.
+ *
+ * Return values:
+ * None.
+ * ===========================================================================
+ */
+void RFSynthesizer_SwitchingChannel(struct hw_data *pHwData, struct chan_info Channel)
{
struct wb35_reg *reg = &pHwData->reg;
- u32 pltmp[16]; // The 16 is the maximum capability of hardware
+ u32 pltmp[16]; /* The 16 is the maximum capability of hardware */
u32 count, ltmp;
u8 i, j, number;
u8 ChnlTmp;
- switch( pHwData->phy_type )
- {
- case RF_MAXIM_2825:
- case RF_MAXIM_V1: // 11g Winbond 2nd BB(with Phy board (v1) + Maxim 331)
-
- if( Channel.band <= BAND_TYPE_OFDM_24 ) // channel 1 ~ 13
- {
- for( i=0; i<3; i++ )
- pltmp[i] = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse( max2825_channel_data_24[Channel.ChanNo-1][i], 18);
- Wb35Reg_BurstWrite( pHwData, 0x0864, pltmp, 3, NO_INCREMENT );
- }
- RFSynthesizer_SetPowerIndex( pHwData, 100 );
- break;
-
- case RF_MAXIM_2827:
-
- if( Channel.band <= BAND_TYPE_OFDM_24 ) // channel 1 ~ 13
- {
- for( i=0; i<3; i++ )
- pltmp[i] = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse( max2827_channel_data_24[Channel.ChanNo-1][i], 18);
- Wb35Reg_BurstWrite( pHwData, 0x0864, pltmp, 3, NO_INCREMENT );
- }
- else if( Channel.band == BAND_TYPE_OFDM_5 ) // channel 36 ~ 64
- {
- ChnlTmp = (Channel.ChanNo - 36) / 4;
- for( i=0; i<3; i++ )
- pltmp[i] = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse( max2827_channel_data_50[ChnlTmp][i], 18);
- Wb35Reg_BurstWrite( pHwData, 0x0864, pltmp, 3, NO_INCREMENT );
- }
- RFSynthesizer_SetPowerIndex( pHwData, 100 );
- break;
-
- case RF_MAXIM_2828:
-
- if( Channel.band <= BAND_TYPE_OFDM_24 ) // channel 1 ~ 13
- {
- for( i=0; i<3; i++ )
- pltmp[i] = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse( max2828_channel_data_24[Channel.ChanNo-1][i], 18);
- Wb35Reg_BurstWrite( pHwData, 0x0864, pltmp, 3, NO_INCREMENT );
- }
- else if( Channel.band == BAND_TYPE_OFDM_5 ) // channel 36 ~ 64
- {
- ChnlTmp = (Channel.ChanNo - 36) / 4;
- for ( i = 0; i < 3; i++)
- pltmp[i] = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse( max2828_channel_data_50[ChnlTmp][i], 18);
- Wb35Reg_BurstWrite( pHwData, 0x0864, pltmp, 3, NO_INCREMENT );
- }
- RFSynthesizer_SetPowerIndex( pHwData, 100 );
- break;
-
- case RF_MAXIM_2829:
+ switch (pHwData->phy_type) {
+ case RF_MAXIM_2825:
+ case RF_MAXIM_V1: /* 11g Winbond 2nd BB(with Phy board (v1) + Maxim 331) */
- if( Channel.band <= BAND_TYPE_OFDM_24)
- {
- for( i=0; i<3; i++ )
- pltmp[i] = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse( max2829_channel_data_24[Channel.ChanNo-1][i], 18);
- Wb35Reg_BurstWrite( pHwData, 0x0864, pltmp, 3, NO_INCREMENT );
- }
- else if( Channel.band == BAND_TYPE_OFDM_5 )
- {
- count = sizeof(max2829_channel_data_50) / sizeof(max2829_channel_data_50[0]);
-
- for( i=0; i<count; i++ )
- {
- if( max2829_channel_data_50[i][0] == Channel.ChanNo )
- {
- for( j=0; j<3; j++ )
- pltmp[j] = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse( max2829_channel_data_50[i][j+1], 18);
- Wb35Reg_BurstWrite( pHwData, 0x0864, pltmp, 3, NO_INCREMENT );
-
- if( (max2829_channel_data_50[i][3] & 0x3FFFF) == 0x2A946 )
- {
- ltmp = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse( (5<<18)|0x2A906, 18);
- Wb35Reg_Write( pHwData, 0x0864, ltmp );
- }
- else // 0x2A9C6
- {
- ltmp = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse( (5<<18)|0x2A986, 18);
- Wb35Reg_Write( pHwData, 0x0864, ltmp );
- }
+ if (Channel.band <= BAND_TYPE_OFDM_24) { /* channel 1 ~ 13 */
+ for (i = 0; i < 3; i++)
+ pltmp[i] = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(max2825_channel_data_24[Channel.ChanNo-1][i], 18);
+ Wb35Reg_BurstWrite(pHwData, 0x0864, pltmp, 3, NO_INCREMENT);
+ }
+ RFSynthesizer_SetPowerIndex(pHwData, 100);
+ break;
+ case RF_MAXIM_2827:
+ if (Channel.band <= BAND_TYPE_OFDM_24) { /* channel 1 ~ 13 */
+ for (i = 0; i < 3; i++)
+ pltmp[i] = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(max2827_channel_data_24[Channel.ChanNo-1][i], 18);
+ Wb35Reg_BurstWrite(pHwData, 0x0864, pltmp, 3, NO_INCREMENT);
+ } else if (Channel.band == BAND_TYPE_OFDM_5) { /* channel 36 ~ 64 */
+ ChnlTmp = (Channel.ChanNo - 36) / 4;
+ for (i = 0; i < 3; i++)
+ pltmp[i] = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(max2827_channel_data_50[ChnlTmp][i], 18);
+ Wb35Reg_BurstWrite(pHwData, 0x0864, pltmp, 3, NO_INCREMENT);
+ }
+ RFSynthesizer_SetPowerIndex(pHwData, 100);
+ break;
+ case RF_MAXIM_2828:
+ if (Channel.band <= BAND_TYPE_OFDM_24) { /* channel 1 ~ 13 */
+ for (i = 0; i < 3; i++)
+ pltmp[i] = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(max2828_channel_data_24[Channel.ChanNo-1][i], 18);
+ Wb35Reg_BurstWrite(pHwData, 0x0864, pltmp, 3, NO_INCREMENT);
+ } else if (Channel.band == BAND_TYPE_OFDM_5) { /* channel 36 ~ 64 */
+ ChnlTmp = (Channel.ChanNo - 36) / 4;
+ for (i = 0; i < 3; i++)
+ pltmp[i] = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(max2828_channel_data_50[ChnlTmp][i], 18);
+ Wb35Reg_BurstWrite(pHwData, 0x0864, pltmp, 3, NO_INCREMENT);
+ }
+ RFSynthesizer_SetPowerIndex(pHwData, 100);
+ break;
+ case RF_MAXIM_2829:
+ if (Channel.band <= BAND_TYPE_OFDM_24) {
+ for (i = 0; i < 3; i++)
+ pltmp[i] = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(max2829_channel_data_24[Channel.ChanNo-1][i], 18);
+ Wb35Reg_BurstWrite(pHwData, 0x0864, pltmp, 3, NO_INCREMENT);
+ } else if (Channel.band == BAND_TYPE_OFDM_5) {
+ count = sizeof(max2829_channel_data_50) / sizeof(max2829_channel_data_50[0]);
+
+ for (i = 0; i < count; i++) {
+ if (max2829_channel_data_50[i][0] == Channel.ChanNo) {
+ for (j = 0; j < 3; j++)
+ pltmp[j] = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(max2829_channel_data_50[i][j+1], 18);
+ Wb35Reg_BurstWrite(pHwData, 0x0864, pltmp, 3, NO_INCREMENT);
+
+ if ((max2829_channel_data_50[i][3] & 0x3FFFF) == 0x2A946) {
+ ltmp = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse((5 << 18) | 0x2A906, 18);
+ Wb35Reg_Write(pHwData, 0x0864, ltmp);
+ } else { /* 0x2A9C6 */
+ ltmp = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse((5 << 18) | 0x2A986, 18);
+ Wb35Reg_Write(pHwData, 0x0864, ltmp);
}
}
}
- set_tx_power_per_channel_max2829( pHwData, Channel );
- break;
-
- case RF_AIROHA_2230:
- case RF_AIROHA_2230S: // 20060420 Add this
-
- if( Channel.band <= BAND_TYPE_OFDM_24 ) // channel 1 ~ 14
- {
- for( i=0; i<2; i++ )
- pltmp[i] = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse( al2230_channel_data_24[Channel.ChanNo-1][i], 20);
- Wb35Reg_BurstWrite( pHwData, 0x0864, pltmp, 2, NO_INCREMENT );
+ }
+ set_tx_power_per_channel_max2829(pHwData, Channel);
+ break;
+ case RF_AIROHA_2230:
+ case RF_AIROHA_2230S:
+ if (Channel.band <= BAND_TYPE_OFDM_24) { /* channel 1 ~ 14 */
+ for (i = 0; i < 2; i++)
+ pltmp[i] = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse(al2230_channel_data_24[Channel.ChanNo-1][i], 20);
+ Wb35Reg_BurstWrite(pHwData, 0x0864, pltmp, 2, NO_INCREMENT);
+ }
+ set_tx_power_per_channel_al2230(pHwData, Channel);
+ break;
+ case RF_AIROHA_7230:
+ /* Channel independent registers */
+ if (Channel.band != pHwData->band) {
+ if (Channel.band <= BAND_TYPE_OFDM_24) {
+ /* Update BB register */
+ BBProcessor_AL7230_2400(pHwData);
+
+ number = sizeof(al7230_rf_data_24) / sizeof(al7230_rf_data_24[0]);
+ Set_ChanIndep_RfData_al7230_24(pHwData, pltmp, number);
+ } else {
+ /* Update BB register */
+ BBProcessor_AL7230_5000(pHwData);
+
+ number = sizeof(al7230_rf_data_50) / sizeof(al7230_rf_data_50[0]);
+ Set_ChanIndep_RfData_al7230_50(pHwData, pltmp, number);
}
- set_tx_power_per_channel_al2230( pHwData, Channel );
- break;
- case RF_AIROHA_7230:
-
- //Start to fill RF parameters, PLL_ON should be pulled low.
- //Wb35Reg_Write( pHwData, 0x03dc, 0x00000000 );
- //printk("* PLL_ON low\n");
-
- //Channel independent registers
- if( Channel.band != pHwData->band)
- {
- if (Channel.band <= BAND_TYPE_OFDM_24)
- {
- //Update BB register
- BBProcessor_AL7230_2400(pHwData);
-
- number = sizeof(al7230_rf_data_24)/sizeof(al7230_rf_data_24[0]);
- Set_ChanIndep_RfData_al7230_24(pHwData, pltmp, number);
- }
- else
- {
- //Update BB register
- BBProcessor_AL7230_5000(pHwData);
-
- number = sizeof(al7230_rf_data_50)/sizeof(al7230_rf_data_50[0]);
- Set_ChanIndep_RfData_al7230_50(pHwData, pltmp, number);
- }
-
- // Write to register. number must less and equal than 16
- Wb35Reg_BurstWrite( pHwData, 0x0864, pltmp, number, NO_INCREMENT );
- #ifdef _PE_STATE_DUMP_
- printk("Band changed\n");
- #endif
- }
+ /* Write to register. number must less and equal than 16 */
+ Wb35Reg_BurstWrite(pHwData, 0x0864, pltmp, number, NO_INCREMENT);
+ #ifdef _PE_STATE_DUMP_
+ printk("Band changed\n");
+ #endif
+ }
- if( Channel.band <= BAND_TYPE_OFDM_24 ) // channel 1 ~ 14
- {
- for( i=0; i<2; i++ )
- pltmp[i] = (1 << 31) | (0 << 30) | (24 << 24) | (al7230_channel_data_24[Channel.ChanNo-1][i]&0xffffff);
- Wb35Reg_BurstWrite( pHwData, 0x0864, pltmp, 2, NO_INCREMENT );
+ if (Channel.band <= BAND_TYPE_OFDM_24) { /* channel 1 ~ 14 */
+ for (i = 0; i < 2; i++)
+ pltmp[i] = (1 << 31) | (0 << 30) | (24 << 24) | (al7230_channel_data_24[Channel.ChanNo-1][i]&0xffffff);
+ Wb35Reg_BurstWrite(pHwData, 0x0864, pltmp, 2, NO_INCREMENT);
+ } else if (Channel.band == BAND_TYPE_OFDM_5) {
+ /* Update Reg12 */
+ if ((Channel.ChanNo > 64) && (Channel.ChanNo <= 165)) {
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | 0x00143c;
+ Wb35Reg_Write(pHwData, 0x0864, ltmp);
+ } else { /* reg12 = 0x00147c at Channel 4920 ~ 5320 */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | 0x00147c;
+ Wb35Reg_Write(pHwData, 0x0864, ltmp);
}
- else if( Channel.band == BAND_TYPE_OFDM_5 )
- {
- //Update Reg12
- if ((Channel.ChanNo > 64) && (Channel.ChanNo <= 165))
- {
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | 0x00143c;
- Wb35Reg_Write( pHwData, 0x0864, ltmp );
- }
- else //reg12 = 0x00147c at Channel 4920 ~ 5320
- {
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | 0x00147c;
- Wb35Reg_Write( pHwData, 0x0864, ltmp );
- }
- count = sizeof(al7230_channel_data_5) / sizeof(al7230_channel_data_5[0]);
+ count = sizeof(al7230_channel_data_5) / sizeof(al7230_channel_data_5[0]);
- for (i=0; i<count; i++)
- {
- if (al7230_channel_data_5[i][0] == Channel.ChanNo)
- {
- for( j=0; j<3; j++ )
- pltmp[j] = (1 << 31) | (0 << 30) | (24 << 24) | ( al7230_channel_data_5[i][j+1]&0xffffff);
- Wb35Reg_BurstWrite( pHwData, 0x0864, pltmp, 3, NO_INCREMENT );
- }
+ for (i = 0; i < count; i++) {
+ if (al7230_channel_data_5[i][0] == Channel.ChanNo) {
+ for (j = 0; j < 3; j++)
+ pltmp[j] = (1 << 31) | (0 << 30) | (24 << 24) | (al7230_channel_data_5[i][j+1] & 0xffffff);
+ Wb35Reg_BurstWrite(pHwData, 0x0864, pltmp, 3, NO_INCREMENT);
}
}
- set_tx_power_per_channel_al7230(pHwData, Channel);
- break;
-
- case RF_WB_242:
- case RF_WB_242_1: // 20060619.5 Add
+ }
+ set_tx_power_per_channel_al7230(pHwData, Channel);
+ break;
+ case RF_WB_242:
+ case RF_WB_242_1:
- if( Channel.band <= BAND_TYPE_OFDM_24 ) // channel 1 ~ 14
- {
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( w89rf242_channel_data_24[Channel.ChanNo-1][0], 24);
- Wb35Reg_Write( pHwData, 0x864, ltmp );
- }
- set_tx_power_per_channel_wb242(pHwData, Channel);
- break;
+ if (Channel.band <= BAND_TYPE_OFDM_24) { /* channel 1 ~ 14 */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse(w89rf242_channel_data_24[Channel.ChanNo-1][0], 24);
+ Wb35Reg_Write(pHwData, 0x864, ltmp);
+ }
+ set_tx_power_per_channel_wb242(pHwData, Channel);
+ break;
}
- if( Channel.band <= BAND_TYPE_OFDM_24 )
- {
- // BB: select 2.4 GHz, bit[12-11]=00
- reg->BB50 &= ~(BIT(11)|BIT(12));
- Wb35Reg_Write( pHwData, 0x1050, reg->BB50 ); // MODE_Ctrl
- // MAC: select 2.4 GHz, bit[5]=0
+ if (Channel.band <= BAND_TYPE_OFDM_24) {
+ /* BB: select 2.4 GHz, bit[12-11]=00 */
+ reg->BB50 &= ~(BIT(11) | BIT(12));
+ Wb35Reg_Write(pHwData, 0x1050, reg->BB50); /* MODE_Ctrl */
+ /* MAC: select 2.4 GHz, bit[5]=0 */
reg->M78_ERPInformation &= ~BIT(5);
- Wb35Reg_Write( pHwData, 0x0878, reg->M78_ERPInformation );
- // enable 11b Baseband
+ Wb35Reg_Write(pHwData, 0x0878, reg->M78_ERPInformation);
+ /* enable 11b Baseband */
reg->BB30 &= ~BIT(31);
- Wb35Reg_Write( pHwData, 0x1030, reg->BB30 );
- }
- else if( (Channel.band == BAND_TYPE_OFDM_5) )
- {
- // BB: select 5 GHz
- reg->BB50 &= ~(BIT(11)|BIT(12));
- if (Channel.ChanNo <=64 )
- reg->BB50 |= BIT(12); // 10-5.25GHz
+ Wb35Reg_Write(pHwData, 0x1030, reg->BB30);
+ } else if (Channel.band == BAND_TYPE_OFDM_5) {
+ /* BB: select 5 GHz */
+ reg->BB50 &= ~(BIT(11) | BIT(12));
+ if (Channel.ChanNo <= 64)
+ reg->BB50 |= BIT(12); /* 10-5.25GHz */
else if ((Channel.ChanNo >= 100) && (Channel.ChanNo <= 124))
- reg->BB50 |= BIT(11); // 01-5.48GHz
- else if ((Channel.ChanNo >=128) && (Channel.ChanNo <= 161))
- reg->BB50 |= (BIT(12)|BIT(11)); // 11-5.775GHz
- else //Chan 184 ~ 196 will use bit[12-11] = 10 in version sh-src-1.2.25
+ reg->BB50 |= BIT(11); /* 01-5.48GHz */
+ else if ((Channel.ChanNo >= 128) && (Channel.ChanNo <= 161))
+ reg->BB50 |= (BIT(12) | BIT(11)); /* 11-5.775GHz */
+ else /* Chan 184 ~ 196 will use bit[12-11] = 10 in version sh-src-1.2.25 */
reg->BB50 |= BIT(12);
- Wb35Reg_Write( pHwData, 0x1050, reg->BB50 ); // MODE_Ctrl
+ Wb35Reg_Write(pHwData, 0x1050, reg->BB50); /* MODE_Ctrl */
- //(1) M78 should alway use 2.4G setting when using RF_AIROHA_7230
- //(2) BB30 has been updated previously.
- if (pHwData->phy_type != RF_AIROHA_7230)
- {
- // MAC: select 5 GHz, bit[5]=1
+ /* (1) M78 should alway use 2.4G setting when using RF_AIROHA_7230 */
+ /* (2) BB30 has been updated previously. */
+ if (pHwData->phy_type != RF_AIROHA_7230) {
+ /* MAC: select 5 GHz, bit[5]=1 */
reg->M78_ERPInformation |= BIT(5);
- Wb35Reg_Write( pHwData, 0x0878, reg->M78_ERPInformation );
+ Wb35Reg_Write(pHwData, 0x0878, reg->M78_ERPInformation);
- // disable 11b Baseband
+ /* disable 11b Baseband */
reg->BB30 |= BIT(31);
- Wb35Reg_Write( pHwData, 0x1030, reg->BB30 );
+ Wb35Reg_Write(pHwData, 0x1030, reg->BB30);
}
}
}
-//Set the tx power directly from DUT GUI, not from the EEPROM. Return the current setting
-u8 RFSynthesizer_SetPowerIndex( struct hw_data * pHwData, u8 PowerIndex )
+/*
+ * Set the tx power directly from DUT GUI, not from the EEPROM.
+ * Return the current setting
+ */
+u8 RFSynthesizer_SetPowerIndex(struct hw_data *pHwData, u8 PowerIndex)
{
u32 Band = pHwData->band;
- u8 index=0;
+ u8 index = 0;
- if( pHwData->power_index == PowerIndex ) // 20060620.1 Add
+ if (pHwData->power_index == PowerIndex)
return PowerIndex;
- if (RF_MAXIM_2825 == pHwData->phy_type)
- {
- // Channel 1 - 13
- index = RFSynthesizer_SetMaxim2825Power( pHwData, PowerIndex );
- }
- else if (RF_MAXIM_2827 == pHwData->phy_type)
- {
- if( Band <= BAND_TYPE_OFDM_24 ) // Channel 1 - 13
- index = RFSynthesizer_SetMaxim2827_24Power( pHwData, PowerIndex );
- else// if( Band == BAND_TYPE_OFDM_5 ) // Channel 36 - 64
- index = RFSynthesizer_SetMaxim2827_50Power( pHwData, PowerIndex );
- }
- else if (RF_MAXIM_2828 == pHwData->phy_type)
- {
- if( Band <= BAND_TYPE_OFDM_24 ) // Channel 1 - 13
- index = RFSynthesizer_SetMaxim2828_24Power( pHwData, PowerIndex );
- else// if( Band == BAND_TYPE_OFDM_5 ) // Channel 36 - 64
- index = RFSynthesizer_SetMaxim2828_50Power( pHwData, PowerIndex );
- }
- else if( RF_AIROHA_2230 == pHwData->phy_type )
- {
- //Power index: 0 ~ 63 // Channel 1 - 14
- index = RFSynthesizer_SetAiroha2230Power( pHwData, PowerIndex );
- index = (u8)al2230_txvga_data[index][1];
- }
- else if( RF_AIROHA_2230S == pHwData->phy_type ) // 20060420 Add this
- {
- //Power index: 0 ~ 63 // Channel 1 - 14
- index = RFSynthesizer_SetAiroha2230Power( pHwData, PowerIndex );
- index = (u8)al2230_txvga_data[index][1];
- }
- else if( RF_AIROHA_7230 == pHwData->phy_type )
- {
- //Power index: 0 ~ 63
- index = RFSynthesizer_SetAiroha7230Power( pHwData, PowerIndex );
+ if (RF_MAXIM_2825 == pHwData->phy_type) {
+ /* Channel 1 - 13 */
+ index = RFSynthesizer_SetMaxim2825Power(pHwData, PowerIndex);
+ } else if (RF_MAXIM_2827 == pHwData->phy_type) {
+ if (Band <= BAND_TYPE_OFDM_24) /* Channel 1 - 13 */
+ index = RFSynthesizer_SetMaxim2827_24Power(pHwData, PowerIndex);
+ else /* Channel 36 - 64 */
+ index = RFSynthesizer_SetMaxim2827_50Power(pHwData, PowerIndex);
+ } else if (RF_MAXIM_2828 == pHwData->phy_type) {
+ if (Band <= BAND_TYPE_OFDM_24) /* Channel 1 - 13 */
+ index = RFSynthesizer_SetMaxim2828_24Power(pHwData, PowerIndex);
+ else /* Channel 36 - 64 */
+ index = RFSynthesizer_SetMaxim2828_50Power(pHwData, PowerIndex);
+ } else if (RF_AIROHA_2230 == pHwData->phy_type) {
+ /* Power index: 0 ~ 63 --- Channel 1 - 14 */
+ index = RFSynthesizer_SetAiroha2230Power(pHwData, PowerIndex);
+ index = (u8) al2230_txvga_data[index][1];
+ } else if (RF_AIROHA_2230S == pHwData->phy_type) {
+ /* Power index: 0 ~ 63 --- Channel 1 - 14 */
+ index = RFSynthesizer_SetAiroha2230Power(pHwData, PowerIndex);
+ index = (u8) al2230_txvga_data[index][1];
+ } else if (RF_AIROHA_7230 == pHwData->phy_type) {
+ /* Power index: 0 ~ 63 */
+ index = RFSynthesizer_SetAiroha7230Power(pHwData, PowerIndex);
index = (u8)al7230_txvga_data[index][1];
- }
- else if( (RF_WB_242 == pHwData->phy_type) ||
- (RF_WB_242_1 == pHwData->phy_type) ) // 20060619.5 Add
- {
- //Power index: 0 ~ 19 for original. New range is 0 ~ 33
- index = RFSynthesizer_SetWinbond242Power( pHwData, PowerIndex );
+ } else if ((RF_WB_242 == pHwData->phy_type) ||
+ (RF_WB_242_1 == pHwData->phy_type)) {
+ /* Power index: 0 ~ 19 for original. New range is 0 ~ 33 */
+ index = RFSynthesizer_SetWinbond242Power(pHwData, PowerIndex);
index = (u8)w89rf242_txvga_data[index][1];
}
- pHwData->power_index = index; // Backup current
+ pHwData->power_index = index; /* Backup current */
return index;
}
-//-- Sub function
-u8 RFSynthesizer_SetMaxim2828_24Power( struct hw_data * pHwData, u8 index )
+/* -- Sub function */
+u8 RFSynthesizer_SetMaxim2828_24Power(struct hw_data *pHwData, u8 index)
{
- u32 PowerData;
- if( index > 1 ) index = 1;
- PowerData = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse( max2828_power_data_24[index], 18);
- Wb35Reg_Write( pHwData, 0x0864, PowerData );
+ u32 PowerData;
+ if (index > 1)
+ index = 1;
+ PowerData = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(max2828_power_data_24[index], 18);
+ Wb35Reg_Write(pHwData, 0x0864, PowerData);
return index;
}
-//--
-u8 RFSynthesizer_SetMaxim2828_50Power( struct hw_data * pHwData, u8 index )
+
+u8 RFSynthesizer_SetMaxim2828_50Power(struct hw_data *pHwData, u8 index)
{
- u32 PowerData;
- if( index > 1 ) index = 1;
- PowerData = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse( max2828_power_data_50[index], 18);
- Wb35Reg_Write( pHwData, 0x0864, PowerData );
+ u32 PowerData;
+ if (index > 1)
+ index = 1;
+ PowerData = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(max2828_power_data_50[index], 18);
+ Wb35Reg_Write(pHwData, 0x0864, PowerData);
return index;
}
-//--
-u8 RFSynthesizer_SetMaxim2827_24Power( struct hw_data * pHwData, u8 index )
+
+u8 RFSynthesizer_SetMaxim2827_24Power(struct hw_data *pHwData, u8 index)
{
- u32 PowerData;
- if( index > 1 ) index = 1;
- PowerData = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse( max2827_power_data_24[index], 18);
- Wb35Reg_Write( pHwData, 0x0864, PowerData );
+ u32 PowerData;
+ if (index > 1)
+ index = 1;
+ PowerData = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(max2827_power_data_24[index], 18);
+ Wb35Reg_Write(pHwData, 0x0864, PowerData);
return index;
}
-//--
-u8 RFSynthesizer_SetMaxim2827_50Power( struct hw_data * pHwData, u8 index )
+
+u8 RFSynthesizer_SetMaxim2827_50Power(struct hw_data *pHwData, u8 index)
{
- u32 PowerData;
- if( index > 1 ) index = 1;
- PowerData = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse( max2827_power_data_50[index], 18);
- Wb35Reg_Write( pHwData, 0x0864, PowerData );
+ u32 PowerData;
+ if (index > 1)
+ index = 1;
+ PowerData = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(max2827_power_data_50[index], 18);
+ Wb35Reg_Write(pHwData, 0x0864, PowerData);
return index;
}
-//--
-u8 RFSynthesizer_SetMaxim2825Power( struct hw_data * pHwData, u8 index )
+
+u8 RFSynthesizer_SetMaxim2825Power(struct hw_data *pHwData, u8 index)
{
- u32 PowerData;
- if( index > 1 ) index = 1;
- PowerData = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse( max2825_power_data_24[index], 18);
- Wb35Reg_Write( pHwData, 0x0864, PowerData );
+ u32 PowerData;
+ if (index > 1)
+ index = 1;
+ PowerData = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(max2825_power_data_24[index], 18);
+ Wb35Reg_Write(pHwData, 0x0864, PowerData);
return index;
}
-//--
-u8 RFSynthesizer_SetAiroha2230Power( struct hw_data * pHwData, u8 index )
+
+u8 RFSynthesizer_SetAiroha2230Power(struct hw_data *pHwData, u8 index)
{
- u32 PowerData;
- u8 i,count;
+ u32 PowerData;
+ u8 i, count;
count = sizeof(al2230_txvga_data) / sizeof(al2230_txvga_data[0]);
- for (i=0; i<count; i++)
- {
+ for (i = 0; i < count; i++) {
if (al2230_txvga_data[i][1] >= index)
break;
}
if (i == count)
i--;
- PowerData = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse( al2230_txvga_data[i][0], 20);
- Wb35Reg_Write( pHwData, 0x0864, PowerData );
+ PowerData = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse(al2230_txvga_data[i][0], 20);
+ Wb35Reg_Write(pHwData, 0x0864, PowerData);
return i;
}
-//--
-u8 RFSynthesizer_SetAiroha7230Power( struct hw_data * pHwData, u8 index )
+
+u8 RFSynthesizer_SetAiroha7230Power(struct hw_data *pHwData, u8 index)
{
- u32 PowerData;
- u8 i,count;
+ u32 PowerData;
+ u8 i, count;
- //PowerData = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse( airoha_power_data_24[index], 20);
count = sizeof(al7230_txvga_data) / sizeof(al7230_txvga_data[0]);
- for (i=0; i<count; i++)
- {
+ for (i = 0; i < count; i++) {
if (al7230_txvga_data[i][1] >= index)
break;
}
if (i == count)
i--;
- PowerData = (1 << 31) | (0 << 30) | (24 << 24) | (al7230_txvga_data[i][0]&0xffffff);
- Wb35Reg_Write( pHwData, 0x0864, PowerData );
+ PowerData = (1 << 31) | (0 << 30) | (24 << 24) | (al7230_txvga_data[i][0] & 0xffffff);
+ Wb35Reg_Write(pHwData, 0x0864, PowerData);
return i;
}
-u8 RFSynthesizer_SetWinbond242Power( struct hw_data * pHwData, u8 index )
+u8 RFSynthesizer_SetWinbond242Power(struct hw_data *pHwData, u8 index)
{
- u32 PowerData;
- u8 i,count;
+ u32 PowerData;
+ u8 i, count;
count = sizeof(w89rf242_txvga_data) / sizeof(w89rf242_txvga_data[0]);
- for (i=0; i<count; i++)
- {
+ for (i = 0; i < count; i++) {
if (w89rf242_txvga_data[i][1] >= index)
break;
}
if (i == count)
i--;
- // Set TxVga into RF
- PowerData = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( w89rf242_txvga_data[i][0], 24);
- Wb35Reg_Write( pHwData, 0x0864, PowerData );
-
- // Update BB48 BB4C BB58 for high precision txvga
- Wb35Reg_Write( pHwData, 0x1048, w89rf242_txvga_data[i][2] );
- Wb35Reg_Write( pHwData, 0x104c, w89rf242_txvga_data[i][3] );
- Wb35Reg_Write( pHwData, 0x1058, w89rf242_txvga_data[i][4] );
-
-// Rf vga 0 ~ 3 for temperature compensate. It will affect the scan Bss.
-// The i value equals to 8 or 7 usually. So It's not necessary to setup this RF register.
-// if( i <= 3 )
-// PowerData = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( 0x000024, 24 );
-// else
-// PowerData = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse( 0x001824, 24 );
-// Wb35Reg_Write( pHwData, 0x0864, PowerData );
+ /* Set TxVga into RF */
+ PowerData = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse(w89rf242_txvga_data[i][0], 24);
+ Wb35Reg_Write(pHwData, 0x0864, PowerData);
+
+ /* Update BB48 BB4C BB58 for high precision txvga */
+ Wb35Reg_Write(pHwData, 0x1048, w89rf242_txvga_data[i][2]);
+ Wb35Reg_Write(pHwData, 0x104c, w89rf242_txvga_data[i][3]);
+ Wb35Reg_Write(pHwData, 0x1058, w89rf242_txvga_data[i][4]);
+
return i;
}
-//===========================================================================================================
-// Dxx_initial --
-// Mxx_initial --
- //
-// Routine Description:
-// Initial the hardware setting and module variable
- //
-//===========================================================================================================
-void Dxx_initial( struct hw_data * pHwData )
+/*
+ * ===========================================================================
+ * Dxx_initial --
+ * Mxx_initial --
+ *
+ * Routine Description:
+ * Initial the hardware setting and module variable
+ * ===========================================================================
+ */
+void Dxx_initial(struct hw_data *pHwData)
{
struct wb35_reg *reg = &pHwData->reg;
- // Old IC:Single mode only.
- // New IC: operation decide by Software set bit[4]. 1:multiple 0: single
- reg->D00_DmaControl = 0xc0000004; //Txon, Rxon, multiple Rx for new 4k DMA
- //Txon, Rxon, single Rx for old 8k ASIC
- if( !HAL_USB_MODE_BURST( pHwData ) )
- reg->D00_DmaControl = 0xc0000000;//Txon, Rxon, single Rx for new 4k DMA
+ /*
+ * Old IC: Single mode only.
+ * New IC: operation decide by Software set bit[4]. 1:multiple 0: single
+ */
+ reg->D00_DmaControl = 0xc0000004; /* Txon, Rxon, multiple Rx for new 4k DMA */
+ /* Txon, Rxon, single Rx for old 8k ASIC */
+ if (!HAL_USB_MODE_BURST(pHwData))
+ reg->D00_DmaControl = 0xc0000000; /* Txon, Rxon, single Rx for new 4k DMA */
- Wb35Reg_WriteSync( pHwData, 0x0400, reg->D00_DmaControl );
+ Wb35Reg_WriteSync(pHwData, 0x0400, reg->D00_DmaControl);
}
-void Mxx_initial( struct hw_data * pHwData )
+void Mxx_initial(struct hw_data *pHwData)
{
struct wb35_reg *reg = &pHwData->reg;
- u32 tmp;
- u32 pltmp[11];
+ u32 tmp;
+ u32 pltmp[11];
u16 i;
- //======================================================
- // Initial Mxx register
- //======================================================
+ /*
+ * ======================================================
+ * Initial Mxx register
+ * ======================================================
+ */
- // M00 bit set
-#ifdef _IBSS_BEACON_SEQ_STICK_
- reg->M00_MacControl = 0; // Solve beacon sequence number stop by software
-#else
- reg->M00_MacControl = 0x80000000; // Solve beacon sequence number stop by hardware
-#endif
+ /* M00 bit set */
+ #ifdef _IBSS_BEACON_SEQ_STICK_
+ reg->M00_MacControl = 0; /* Solve beacon sequence number stop by software */
+ #else
+ reg->M00_MacControl = 0x80000000; /* Solve beacon sequence number stop by hardware */
+ #endif
- // M24 disable enter power save, BB RxOn and enable NAV attack
+ /* M24 disable enter power save, BB RxOn and enable NAV attack */
reg->M24_MacControl = 0x08040042;
pltmp[0] = reg->M24_MacControl;
- pltmp[1] = 0; // Skip M28, because no initialize value is required.
+ pltmp[1] = 0; /* Skip M28, because no initialize value is required. */
- // M2C CWmin and CWmax setting
+ /* M2C CWmin and CWmax setting */
pHwData->cwmin = DEFAULT_CWMIN;
pHwData->cwmax = DEFAULT_CWMAX;
reg->M2C_MacControl = DEFAULT_CWMIN << 10;
reg->M2C_MacControl |= DEFAULT_CWMAX;
pltmp[2] = reg->M2C_MacControl;
- // M30 BSSID
+ /* M30 BSSID */
pltmp[3] = *(u32 *)pHwData->bssid;
- // M34
+ /* M34 */
pHwData->AID = DEFAULT_AID;
- tmp = *(u16 *)(pHwData->bssid+4);
+ tmp = *(u16 *) (pHwData->bssid + 4);
tmp |= DEFAULT_AID << 16;
pltmp[4] = tmp;
- // M38
- reg->M38_MacControl = (DEFAULT_RATE_RETRY_LIMIT<<8) | (DEFAULT_LONG_RETRY_LIMIT << 4) | DEFAULT_SHORT_RETRY_LIMIT;
+ /* M38 */
+ reg->M38_MacControl = (DEFAULT_RATE_RETRY_LIMIT << 8) | (DEFAULT_LONG_RETRY_LIMIT << 4) | DEFAULT_SHORT_RETRY_LIMIT;
pltmp[5] = reg->M38_MacControl;
- // M3C
+ /* M3C */
tmp = (DEFAULT_PIFST << 26) | (DEFAULT_EIFST << 16) | (DEFAULT_DIFST << 8) | (DEFAULT_SIFST << 4) | DEFAULT_OSIFST ;
reg->M3C_MacControl = tmp;
pltmp[6] = tmp;
- // M40
+ /* M40 */
pHwData->slot_time_select = DEFAULT_SLOT_TIME;
tmp = (DEFAULT_ATIMWD << 16) | DEFAULT_SLOT_TIME;
reg->M40_MacControl = tmp;
pltmp[7] = tmp;
- // M44
- tmp = DEFAULT_MAX_TX_MSDU_LIFE_TIME << 10; // *1024
+ /* M44 */
+ tmp = DEFAULT_MAX_TX_MSDU_LIFE_TIME << 10; /* *1024 */
reg->M44_MacControl = tmp;
pltmp[8] = tmp;
- // M48
+ /* M48 */
pHwData->BeaconPeriod = DEFAULT_BEACON_INTERVAL;
pHwData->ProbeDelay = DEFAULT_PROBE_DELAY_TIME;
tmp = (DEFAULT_BEACON_INTERVAL << 16) | DEFAULT_PROBE_DELAY_TIME;
reg->M48_MacControl = tmp;
pltmp[9] = tmp;
- //M4C
+ /* M4C */
reg->M4C_MacStatus = (DEFAULT_PROTOCOL_VERSION << 30) | (DEFAULT_MAC_POWER_STATE << 28) | (DEFAULT_DTIM_ALERT_TIME << 24);
pltmp[10] = reg->M4C_MacStatus;
- // Burst write
- //Wb35Reg_BurstWrite( pHwData, 0x0824, pltmp, 11, AUTO_INCREMENT );
- for( i=0; i<11; i++ )
- Wb35Reg_WriteSync( pHwData, 0x0824 + i*4, pltmp[i] );
+ for (i = 0; i < 11; i++)
+ Wb35Reg_WriteSync(pHwData, 0x0824 + i * 4, pltmp[i]);
- // M60
- Wb35Reg_WriteSync( pHwData, 0x0860, 0x12481248 );
+ /* M60 */
+ Wb35Reg_WriteSync(pHwData, 0x0860, 0x12481248);
reg->M60_MacControl = 0x12481248;
- // M68
- Wb35Reg_WriteSync( pHwData, 0x0868, 0x00050900 ); // 20051018 0x000F0F00 ); // 940930 0x00131300
+ /* M68 */
+ Wb35Reg_WriteSync(pHwData, 0x0868, 0x00050900);
reg->M68_MacControl = 0x00050900;
- // M98
- Wb35Reg_WriteSync( pHwData, 0x0898, 0xffff8888 );
+ /* M98 */
+ Wb35Reg_WriteSync(pHwData, 0x0898, 0xffff8888);
reg->M98_MacControl = 0xffff8888;
}
-void Uxx_power_off_procedure( struct hw_data * pHwData )
+void Uxx_power_off_procedure(struct hw_data *pHwData)
{
- // SW, PMU reset and turn off clock
- Wb35Reg_WriteSync( pHwData, 0x03b0, 3 );
- Wb35Reg_WriteSync( pHwData, 0x03f0, 0xf9 );
+ /* SW, PMU reset and turn off clock */
+ Wb35Reg_WriteSync(pHwData, 0x03b0, 3);
+ Wb35Reg_WriteSync(pHwData, 0x03f0, 0xf9);
}
-//Decide the TxVga of every channel
-void GetTxVgaFromEEPROM( struct hw_data * pHwData )
+/*Decide the TxVga of every channel */
+void GetTxVgaFromEEPROM(struct hw_data *pHwData)
{
- u32 i, j, ltmp;
- u16 Value[MAX_TXVGA_EEPROM];
- u8 *pctmp;
- u8 ctmp=0;
-
- // Get the entire TxVga setting in EEPROM
- for( i=0; i<MAX_TXVGA_EEPROM; i++ )
- {
- Wb35Reg_WriteSync( pHwData, 0x03b4, 0x08100000 + 0x00010000*i );
- Wb35Reg_ReadSync( pHwData, 0x03b4, &ltmp );
- Value[i] = (u16)( ltmp & 0xffff ); // Get 16 bit available
- Value[i] = cpu_to_le16( Value[i] ); // [7:0]2412 [7:0]2417 ....
+ u32 i, j, ltmp;
+ u16 Value[MAX_TXVGA_EEPROM];
+ u8 *pctmp;
+ u8 ctmp = 0;
+
+ /* Get the entire TxVga setting in EEPROM */
+ for (i = 0; i < MAX_TXVGA_EEPROM; i++) {
+ Wb35Reg_WriteSync(pHwData, 0x03b4, 0x08100000 + 0x00010000 * i);
+ Wb35Reg_ReadSync(pHwData, 0x03b4, &ltmp);
+ Value[i] = (u16) (ltmp & 0xffff); /* Get 16 bit available */
+ Value[i] = cpu_to_le16(Value[i]); /* [7:0]2412 [7:0]2417 .... */
}
- // Adjust the filed which fills with reserved value.
- pctmp = (u8 *)Value;
- for( i=0; i<(MAX_TXVGA_EEPROM*2); i++ )
- {
- if( pctmp[i] != 0xff )
+ /* Adjust the filed which fills with reserved value. */
+ pctmp = (u8 *) Value;
+ for (i = 0; i < (MAX_TXVGA_EEPROM * 2); i++) {
+ if (pctmp[i] != 0xff)
ctmp = pctmp[i];
else
pctmp[i] = ctmp;
}
- // Adjust WB_242 to WB_242_1 TxVga scale
- if( pHwData->phy_type == RF_WB_242 )
- {
- for( i=0; i<4; i++ ) // Only 2412 2437 2462 2484 case must be modified
- {
- for( j=0; j<(sizeof(w89rf242_txvga_old_mapping)/sizeof(w89rf242_txvga_old_mapping[0])); j++ )
- {
- if( pctmp[i] < (u8)w89rf242_txvga_old_mapping[j][1] )
- {
- pctmp[i] = (u8)w89rf242_txvga_old_mapping[j][0];
+ /* Adjust WB_242 to WB_242_1 TxVga scale */
+ if (pHwData->phy_type == RF_WB_242) {
+ for (i = 0; i < 4; i++) { /* Only 2412 2437 2462 2484 case must be modified */
+ for (j = 0; j < (sizeof(w89rf242_txvga_old_mapping) / sizeof(w89rf242_txvga_old_mapping[0])); j++) {
+ if (pctmp[i] < (u8) w89rf242_txvga_old_mapping[j][1]) {
+ pctmp[i] = (u8) w89rf242_txvga_old_mapping[j][0];
break;
}
}
- if( j == (sizeof(w89rf242_txvga_old_mapping)/sizeof(w89rf242_txvga_old_mapping[0])) )
+ if (j == (sizeof(w89rf242_txvga_old_mapping) / sizeof(w89rf242_txvga_old_mapping[0])))
pctmp[i] = (u8)w89rf242_txvga_old_mapping[j-1][0];
}
}
- // 20060621 Add
- memcpy( pHwData->TxVgaSettingInEEPROM, pctmp, MAX_TXVGA_EEPROM*2 ); //MAX_TXVGA_EEPROM is u16 count
- EEPROMTxVgaAdjust( pHwData );
+ memcpy(pHwData->TxVgaSettingInEEPROM, pctmp, MAX_TXVGA_EEPROM * 2); /* MAX_TXVGA_EEPROM is u16 count */
+ EEPROMTxVgaAdjust(pHwData);
}
-// This function will affect the TxVga parameter in HAL. If hal_set_current_channel
-// or RFSynthesizer_SetPowerIndex be called, new TxVga will take effect.
-// TxVgaSettingInEEPROM of sHwData is an u8 array point to EEPROM contain for IS89C35
-// This function will use default TxVgaSettingInEEPROM data to calculate new TxVga.
-void EEPROMTxVgaAdjust( struct hw_data * pHwData ) // 20060619.5 Add
+/*
+ * This function will affect the TxVga parameter in HAL. If hal_set_current_channel
+ * or RFSynthesizer_SetPowerIndex be called, new TxVga will take effect.
+ * TxVgaSettingInEEPROM of sHwData is an u8 array point to EEPROM contain for IS89C35
+ * This function will use default TxVgaSettingInEEPROM data to calculate new TxVga.
+ */
+void EEPROMTxVgaAdjust(struct hw_data *pHwData)
{
- u8 * pTxVga = pHwData->TxVgaSettingInEEPROM;
- s16 i, stmp;
+ u8 *pTxVga = pHwData->TxVgaSettingInEEPROM;
+ s16 i, stmp;
- //-- 2.4G -- 20060704.2 Request from Tiger
- //channel 1 ~ 5
+ /* -- 2.4G -- */
+ /* channel 1 ~ 5 */
stmp = pTxVga[1] - pTxVga[0];
- for( i=0; i<5; i++ )
- pHwData->TxVgaFor24[i] = pTxVga[0] + stmp*i/4;
- //channel 6 ~ 10
+ for (i = 0; i < 5; i++)
+ pHwData->TxVgaFor24[i] = pTxVga[0] + stmp * i / 4;
+ /* channel 6 ~ 10 */
stmp = pTxVga[2] - pTxVga[1];
- for( i=5; i<10; i++ )
- pHwData->TxVgaFor24[i] = pTxVga[1] + stmp*(i-5)/4;
- //channel 11 ~ 13
+ for (i = 5; i < 10; i++)
+ pHwData->TxVgaFor24[i] = pTxVga[1] + stmp * (i - 5) / 4;
+ /* channel 11 ~ 13 */
stmp = pTxVga[3] - pTxVga[2];
- for( i=10; i<13; i++ )
- pHwData->TxVgaFor24[i] = pTxVga[2] + stmp*(i-10)/2;
- //channel 14
+ for (i = 10; i < 13; i++)
+ pHwData->TxVgaFor24[i] = pTxVga[2] + stmp * (i - 10) / 2;
+ /* channel 14 */
pHwData->TxVgaFor24[13] = pTxVga[3];
- //-- 5G --
- if( pHwData->phy_type == RF_AIROHA_7230 )
- {
- //channel 184
+ /* -- 5G -- */
+ if (pHwData->phy_type == RF_AIROHA_7230) {
+ /* channel 184 */
pHwData->TxVgaFor50[0].ChanNo = 184;
pHwData->TxVgaFor50[0].TxVgaValue = pTxVga[4];
- //channel 196
+ /* channel 196 */
pHwData->TxVgaFor50[3].ChanNo = 196;
pHwData->TxVgaFor50[3].TxVgaValue = pTxVga[5];
- //interpolate
+ /* interpolate */
pHwData->TxVgaFor50[1].ChanNo = 188;
pHwData->TxVgaFor50[2].ChanNo = 192;
stmp = pTxVga[5] - pTxVga[4];
- pHwData->TxVgaFor50[2].TxVgaValue = pTxVga[5] - stmp/3;
- pHwData->TxVgaFor50[1].TxVgaValue = pTxVga[5] - stmp*2/3;
+ pHwData->TxVgaFor50[2].TxVgaValue = pTxVga[5] - stmp / 3;
+ pHwData->TxVgaFor50[1].TxVgaValue = pTxVga[5] - stmp * 2 / 3;
- //channel 16
+ /* channel 16 */
pHwData->TxVgaFor50[6].ChanNo = 16;
pHwData->TxVgaFor50[6].TxVgaValue = pTxVga[6];
pHwData->TxVgaFor50[4].ChanNo = 8;
@@ -2523,7 +2250,7 @@ void EEPROMTxVgaAdjust( struct hw_data * pHwData ) // 20060619.5 Add
pHwData->TxVgaFor50[5].ChanNo = 12;
pHwData->TxVgaFor50[5].TxVgaValue = pTxVga[6];
- //channel 36
+ /* channel 36 */
pHwData->TxVgaFor50[8].ChanNo = 36;
pHwData->TxVgaFor50[8].TxVgaValue = pTxVga[7];
pHwData->TxVgaFor50[7].ChanNo = 34;
@@ -2531,153 +2258,135 @@ void EEPROMTxVgaAdjust( struct hw_data * pHwData ) // 20060619.5 Add
pHwData->TxVgaFor50[9].ChanNo = 38;
pHwData->TxVgaFor50[9].TxVgaValue = pTxVga[7];
- //channel 40
+ /* channel 40 */
pHwData->TxVgaFor50[10].ChanNo = 40;
pHwData->TxVgaFor50[10].TxVgaValue = pTxVga[8];
- //channel 48
+ /* channel 48 */
pHwData->TxVgaFor50[14].ChanNo = 48;
pHwData->TxVgaFor50[14].TxVgaValue = pTxVga[9];
- //interpolate
+ /* interpolate */
pHwData->TxVgaFor50[11].ChanNo = 42;
pHwData->TxVgaFor50[12].ChanNo = 44;
pHwData->TxVgaFor50[13].ChanNo = 46;
stmp = pTxVga[9] - pTxVga[8];
- pHwData->TxVgaFor50[13].TxVgaValue = pTxVga[9] - stmp/4;
- pHwData->TxVgaFor50[12].TxVgaValue = pTxVga[9] - stmp*2/4;
- pHwData->TxVgaFor50[11].TxVgaValue = pTxVga[9] - stmp*3/4;
+ pHwData->TxVgaFor50[13].TxVgaValue = pTxVga[9] - stmp / 4;
+ pHwData->TxVgaFor50[12].TxVgaValue = pTxVga[9] - stmp * 2 / 4;
+ pHwData->TxVgaFor50[11].TxVgaValue = pTxVga[9] - stmp * 3 / 4;
- //channel 52
+ /* channel 52 */
pHwData->TxVgaFor50[15].ChanNo = 52;
pHwData->TxVgaFor50[15].TxVgaValue = pTxVga[10];
- //channel 64
+ /* channel 64 */
pHwData->TxVgaFor50[18].ChanNo = 64;
pHwData->TxVgaFor50[18].TxVgaValue = pTxVga[11];
- //interpolate
+ /* interpolate */
pHwData->TxVgaFor50[16].ChanNo = 56;
pHwData->TxVgaFor50[17].ChanNo = 60;
stmp = pTxVga[11] - pTxVga[10];
- pHwData->TxVgaFor50[17].TxVgaValue = pTxVga[11] - stmp/3;
- pHwData->TxVgaFor50[16].TxVgaValue = pTxVga[11] - stmp*2/3;
+ pHwData->TxVgaFor50[17].TxVgaValue = pTxVga[11] - stmp / 3;
+ pHwData->TxVgaFor50[16].TxVgaValue = pTxVga[11] - stmp * 2 / 3;
- //channel 100
+ /* channel 100 */
pHwData->TxVgaFor50[19].ChanNo = 100;
pHwData->TxVgaFor50[19].TxVgaValue = pTxVga[12];
- //channel 112
+ /* channel 112 */
pHwData->TxVgaFor50[22].ChanNo = 112;
pHwData->TxVgaFor50[22].TxVgaValue = pTxVga[13];
- //interpolate
+ /* interpolate */
pHwData->TxVgaFor50[20].ChanNo = 104;
pHwData->TxVgaFor50[21].ChanNo = 108;
stmp = pTxVga[13] - pTxVga[12];
- pHwData->TxVgaFor50[21].TxVgaValue = pTxVga[13] - stmp/3;
- pHwData->TxVgaFor50[20].TxVgaValue = pTxVga[13] - stmp*2/3;
+ pHwData->TxVgaFor50[21].TxVgaValue = pTxVga[13] - stmp / 3;
+ pHwData->TxVgaFor50[20].TxVgaValue = pTxVga[13] - stmp * 2 / 3;
- //channel 128
+ /* channel 128 */
pHwData->TxVgaFor50[26].ChanNo = 128;
pHwData->TxVgaFor50[26].TxVgaValue = pTxVga[14];
- //interpolate
+ /* interpolate */
pHwData->TxVgaFor50[23].ChanNo = 116;
pHwData->TxVgaFor50[24].ChanNo = 120;
pHwData->TxVgaFor50[25].ChanNo = 124;
stmp = pTxVga[14] - pTxVga[13];
- pHwData->TxVgaFor50[25].TxVgaValue = pTxVga[14] - stmp/4;
- pHwData->TxVgaFor50[24].TxVgaValue = pTxVga[14] - stmp*2/4;
- pHwData->TxVgaFor50[23].TxVgaValue = pTxVga[14] - stmp*3/4;
+ pHwData->TxVgaFor50[25].TxVgaValue = pTxVga[14] - stmp / 4;
+ pHwData->TxVgaFor50[24].TxVgaValue = pTxVga[14] - stmp * 2 / 4;
+ pHwData->TxVgaFor50[23].TxVgaValue = pTxVga[14] - stmp * 3 / 4;
- //channel 140
+ /* channel 140 */
pHwData->TxVgaFor50[29].ChanNo = 140;
pHwData->TxVgaFor50[29].TxVgaValue = pTxVga[15];
- //interpolate
+ /* interpolate */
pHwData->TxVgaFor50[27].ChanNo = 132;
pHwData->TxVgaFor50[28].ChanNo = 136;
stmp = pTxVga[15] - pTxVga[14];
- pHwData->TxVgaFor50[28].TxVgaValue = pTxVga[15] - stmp/3;
- pHwData->TxVgaFor50[27].TxVgaValue = pTxVga[15] - stmp*2/3;
+ pHwData->TxVgaFor50[28].TxVgaValue = pTxVga[15] - stmp / 3;
+ pHwData->TxVgaFor50[27].TxVgaValue = pTxVga[15] - stmp * 2 / 3;
- //channel 149
+ /* channel 149 */
pHwData->TxVgaFor50[30].ChanNo = 149;
pHwData->TxVgaFor50[30].TxVgaValue = pTxVga[16];
- //channel 165
+ /* channel 165 */
pHwData->TxVgaFor50[34].ChanNo = 165;
pHwData->TxVgaFor50[34].TxVgaValue = pTxVga[17];
- //interpolate
+ /* interpolate */
pHwData->TxVgaFor50[31].ChanNo = 153;
pHwData->TxVgaFor50[32].ChanNo = 157;
pHwData->TxVgaFor50[33].ChanNo = 161;
stmp = pTxVga[17] - pTxVga[16];
- pHwData->TxVgaFor50[33].TxVgaValue = pTxVga[17] - stmp/4;
- pHwData->TxVgaFor50[32].TxVgaValue = pTxVga[17] - stmp*2/4;
- pHwData->TxVgaFor50[31].TxVgaValue = pTxVga[17] - stmp*3/4;
+ pHwData->TxVgaFor50[33].TxVgaValue = pTxVga[17] - stmp / 4;
+ pHwData->TxVgaFor50[32].TxVgaValue = pTxVga[17] - stmp * 2 / 4;
+ pHwData->TxVgaFor50[31].TxVgaValue = pTxVga[17] - stmp * 3 / 4;
}
#ifdef _PE_STATE_DUMP_
- printk(" TxVgaFor24 : \n");
- DataDmp((u8 *)pHwData->TxVgaFor24, 14 ,0);
- printk(" TxVgaFor50 : \n");
- DataDmp((u8 *)pHwData->TxVgaFor50, 70 ,0);
+ printk(" TxVgaFor24 :\n");
+ DataDmp((u8 *)pHwData->TxVgaFor24, 14 , 0);
+ printk(" TxVgaFor50 :\n");
+ DataDmp((u8 *)pHwData->TxVgaFor50, 70 , 0);
#endif
}
-void BBProcessor_RateChanging( struct hw_data * pHwData, u8 rate ) // 20060613.1
+void BBProcessor_RateChanging(struct hw_data *pHwData, u8 rate)
{
struct wb35_reg *reg = &pHwData->reg;
- unsigned char Is11bRate;
+ unsigned char Is11bRate;
Is11bRate = (rate % 6) ? 1 : 0;
- switch( pHwData->phy_type )
- {
- case RF_AIROHA_2230:
- case RF_AIROHA_2230S: // 20060420 Add this
- if( Is11bRate )
- {
- if( (reg->BB48 != BB48_DEFAULT_AL2230_11B) &&
- (reg->BB4C != BB4C_DEFAULT_AL2230_11B) )
- {
- Wb35Reg_Write( pHwData, 0x1048, BB48_DEFAULT_AL2230_11B );
- Wb35Reg_Write( pHwData, 0x104c, BB4C_DEFAULT_AL2230_11B );
- }
+ switch (pHwData->phy_type) {
+ case RF_AIROHA_2230:
+ case RF_AIROHA_2230S:
+ if (Is11bRate) {
+ if ((reg->BB48 != BB48_DEFAULT_AL2230_11B) &&
+ (reg->BB4C != BB4C_DEFAULT_AL2230_11B)) {
+ Wb35Reg_Write(pHwData, 0x1048, BB48_DEFAULT_AL2230_11B);
+ Wb35Reg_Write(pHwData, 0x104c, BB4C_DEFAULT_AL2230_11B);
}
- else
- {
- if( (reg->BB48 != BB48_DEFAULT_AL2230_11G) &&
- (reg->BB4C != BB4C_DEFAULT_AL2230_11G) )
- {
- Wb35Reg_Write( pHwData, 0x1048, BB48_DEFAULT_AL2230_11G );
- Wb35Reg_Write( pHwData, 0x104c, BB4C_DEFAULT_AL2230_11G );
- }
+ } else {
+ if ((reg->BB48 != BB48_DEFAULT_AL2230_11G) &&
+ (reg->BB4C != BB4C_DEFAULT_AL2230_11G)) {
+ Wb35Reg_Write(pHwData, 0x1048, BB48_DEFAULT_AL2230_11G);
+ Wb35Reg_Write(pHwData, 0x104c, BB4C_DEFAULT_AL2230_11G);
}
- break;
-
- case RF_WB_242: // 20060623 The fix only for old TxVGA setting
- if( Is11bRate )
- {
- if( (reg->BB48 != BB48_DEFAULT_WB242_11B) &&
- (reg->BB4C != BB4C_DEFAULT_WB242_11B) )
- {
- reg->BB48 = BB48_DEFAULT_WB242_11B;
- reg->BB4C = BB4C_DEFAULT_WB242_11B;
- Wb35Reg_Write( pHwData, 0x1048, BB48_DEFAULT_WB242_11B );
- Wb35Reg_Write( pHwData, 0x104c, BB4C_DEFAULT_WB242_11B );
- }
+ }
+ break;
+ case RF_WB_242:
+ if (Is11bRate) {
+ if ((reg->BB48 != BB48_DEFAULT_WB242_11B) &&
+ (reg->BB4C != BB4C_DEFAULT_WB242_11B)) {
+ reg->BB48 = BB48_DEFAULT_WB242_11B;
+ reg->BB4C = BB4C_DEFAULT_WB242_11B;
+ Wb35Reg_Write(pHwData, 0x1048, BB48_DEFAULT_WB242_11B);
+ Wb35Reg_Write(pHwData, 0x104c, BB4C_DEFAULT_WB242_11B);
}
- else
- {
- if( (reg->BB48 != BB48_DEFAULT_WB242_11G) &&
- (reg->BB4C != BB4C_DEFAULT_WB242_11G) )
- {
- reg->BB48 = BB48_DEFAULT_WB242_11G;
- reg->BB4C = BB4C_DEFAULT_WB242_11G;
- Wb35Reg_Write( pHwData, 0x1048, BB48_DEFAULT_WB242_11G );
- Wb35Reg_Write( pHwData, 0x104c, BB4C_DEFAULT_WB242_11G );
- }
+ } else {
+ if ((reg->BB48 != BB48_DEFAULT_WB242_11G) &&
+ (reg->BB4C != BB4C_DEFAULT_WB242_11G)) {
+ reg->BB48 = BB48_DEFAULT_WB242_11G;
+ reg->BB4C = BB4C_DEFAULT_WB242_11G;
+ Wb35Reg_Write(pHwData, 0x1048, BB48_DEFAULT_WB242_11G);
+ Wb35Reg_Write(pHwData, 0x104c, BB4C_DEFAULT_WB242_11G);
}
- break;
+ }
+ break;
}
}
-
-
-
-
-
-
diff --git a/drivers/staging/winbond/scan_s.h b/drivers/staging/winbond/scan_s.h
index 209717f5d47..85e7523196d 100644
--- a/drivers/staging/winbond/scan_s.h
+++ b/drivers/staging/winbond/scan_s.h
@@ -4,117 +4,107 @@
#include <linux/types.h>
#include "localpara.h"
-//
-// SCAN task global CONSTANTS, STRUCTURES, variables
-//
-
-//////////////////////////////////////////////////////////////////////////
-//define the msg type of SCAN module
-#define SCANMSG_SCAN_REQ 0x01
-#define SCANMSG_BEACON 0x02
+/*
+ * SCAN task global CONSTANTS, STRUCTURES, variables
+ */
+
+/* define the msg type of SCAN module */
+#define SCANMSG_SCAN_REQ 0x01
+#define SCANMSG_BEACON 0x02
#define SCANMSG_PROBE_RESPONSE 0x03
-#define SCANMSG_TIMEOUT 0x04
+#define SCANMSG_TIMEOUT 0x04
#define SCANMSG_TXPROBE_FAIL 0x05
#define SCANMSG_ENABLE_BGSCAN 0x06
-#define SCANMSG_STOP_SCAN 0x07
+#define SCANMSG_STOP_SCAN 0x07
-// BSS Type =>conform to
-// IBSS : ToDS/FromDS = 00
-// Infrastructure : ToDS/FromDS = 01
+/*
+ * BSS Type =>conform to
+ * IBSS : ToDS/FromDS = 00
+ * Infrastructure : ToDS/FromDS = 01
+ */
#define IBSS_NET 0
#define ESS_NET 1
#define ANYBSS_NET 2
-// Scan Type
+/* Scan Type */
#define ACTIVE_SCAN 0
-#define PASSIVE_SCAN 1
+#define PASSIVE_SCAN 1
+
+/* Global data structures, Initial Scan & Background Scan */
+typedef struct _SCAN_REQ_PARA { /* mandatory parameters for SCAN request */
-///////////////////////////////////////////////////////////////////////////
-//Global data structures, Initial Scan & Background Scan
-typedef struct _SCAN_REQ_PARA //mandatory parameters for SCAN request
-{
- u32 ScanType; //passive/active scan
+ u32 ScanType; /* passive/active scan */
u8 reserved_1[2];
- struct SSID_Element sSSID; // 34B. scan only for this SSID
+ struct SSID_Element sSSID; /* 34B. scan only for this SSID */
u8 reserved_2[2];
} SCAN_REQ_PARA, *psSCAN_REQ_PARA;
-typedef struct _SCAN_PARAMETERS
-{
- u16 wState;
- u16 iCurrentChannelIndex;
+typedef struct _SCAN_PARAMETERS {
+ u16 wState;
+ u16 iCurrentChannelIndex;
SCAN_REQ_PARA sScanReq;
- u8 BSSID[MAC_ADDR_LENGTH + 2]; //scan only for this BSSID
+ u8 BSSID[MAC_ADDR_LENGTH + 2]; /* scan only for this BSSID */
- u32 BssType; //scan only for this BSS type
+ u32 BssType; /* scan only for this BSS type */
- //struct SSID_Element sSSID; //scan only for this SSID
- u16 ProbeDelay;
- u16 MinChannelTime;
+ u16 ProbeDelay;
+ u16 MinChannelTime;
- u16 MaxChannelTime;
- u16 reserved_1;
+ u16 MaxChannelTime;
+ u16 reserved_1;
- s32 iBgScanPeriod; // XP: 5 sec
+ s32 iBgScanPeriod; /* XP: 5 sec */
- u8 boBgScan; // Wb: enable BG scan, For XP, this value must be FALSE
- u8 boFastScan; // Wb: reserved
- u8 boCCAbusy; // Wb: HWMAC CCA busy status
- u8 reserved_2;
+ u8 boBgScan; /* Wb: enable BG scan, For XP, this value must be FALSE */
+ u8 boFastScan; /* Wb: reserved */
+ u8 boCCAbusy; /* Wb: HWMAC CCA busy status */
+ u8 reserved_2;
struct timer_list timer;
- u32 ScanTimeStamp; //Increase 1 per background scan(1 minute)
- u32 BssTimeStamp; //Increase 1 per connect status check
- u32 RxNumPerAntenna[2]; //
-
- u8 AntennaToggle; //
- u8 boInTimerHandler;
- u8 boTimerActive; // Wb: reserved
- u8 boSave;
+ u32 ScanTimeStamp; /* Increase 1 per background scan(1 minute) */
+ u32 BssTimeStamp; /* Increase 1 per connect status check */
+ u32 RxNumPerAntenna[2];
- u32 BScanEnable; // Background scan enable. Default is On
+ u8 AntennaToggle;
+ u8 boInTimerHandler;
+ u8 boTimerActive; /* Wb: reserved */
+ u8 boSave;
+ u32 BScanEnable; /* Background scan enable. Default is On */
} SCAN_PARAMETERS, *psSCAN_PARAMETERS;
-// Encapsulate 'adapter' data structure
-#define psSCAN (&(adapter->sScanPara))
-#define psSCANREQ (&(adapter->sScanPara.sScanReq))
-
-//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-// scan.h
-// Define the related definitions of scan module
-// history -- 01/14/03' created
-//
-//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
-//Define the state of scan module
-#define SCAN_INACTIVE 0
-#define WAIT_PROBE_DELAY 1
-#define WAIT_RESPONSE_MIN 2
-#define WAIT_RESPONSE_MAX_ACTIVE 3
-#define WAIT_BEACON_MAX_PASSIVE 4
-#define SCAN_COMPLETE 5
-#define BG_SCAN 6
-#define BG_SCANNING 7
-
-
-// The value will load from EEPROM
-// If 0xff is set in EEPOM, the driver will use SCAN_MAX_CHNL_TIME instead.
-// The definition is in WbHal.h
-// #define SCAN_MAX_CHNL_TIME (50)
-
-
-
-// static functions
-
-//static void ScanTimerHandler(struct wbsoft_priv * adapter);
-//static void vScanTimerStart(struct wbsoft_priv * adapter, int timeout_value);
-//static void vScanTimerStop(struct wbsoft_priv * adapter);
-
+/* Encapsulate 'adapter' data structure */
+#define psSCAN (&(adapter->sScanPara))
+#define psSCANREQ (&(adapter->sScanPara.sScanReq))
+
+/*
+ * ===========================================================
+ * scan.h
+ * Define the related definitions of scan module
+ *
+ * ===========================================================
+ */
+
+/* Define the state of scan module */
+#define SCAN_INACTIVE 0
+#define WAIT_PROBE_DELAY 1
+#define WAIT_RESPONSE_MIN 2
+#define WAIT_RESPONSE_MAX_ACTIVE 3
+#define WAIT_BEACON_MAX_PASSIVE 4
+#define SCAN_COMPLETE 5
+#define BG_SCAN 6
+#define BG_SCANNING 7
+
+
+/*
+ * The value will load from EEPROM
+ * If 0xff is set in EEPOM, the driver will use SCAN_MAX_CHNL_TIME instead.
+ * The definition is in WbHal.h
+ */
#endif
diff --git a/drivers/staging/winbond/sme_api.h b/drivers/staging/winbond/sme_api.h
index b5898294eb8..8f4596c9e9b 100644
--- a/drivers/staging/winbond/sme_api.h
+++ b/drivers/staging/winbond/sme_api.h
@@ -2,12 +2,6 @@
* sme_api.h
*
* Copyright(C) 2002 Winbond Electronics Corp.
- *
- * modification history
- * ---------------------------------------------------------------------------
- * 1.00.001, 2003-04-21, Kevin created
- * 1.00.002, 2003-05-14, PD43 & PE20 modified
- *
*/
#ifndef __SME_API_H__
@@ -18,42 +12,42 @@
#include "localpara.h"
/****************** CONSTANT AND MACRO SECTION ******************************/
-#define _INLINE __inline
+#define _INLINE __inline
-#define MEDIA_STATE_DISCONNECTED 0
-#define MEDIA_STATE_CONNECTED 1
+#define MEDIA_STATE_DISCONNECTED 0
+#define MEDIA_STATE_CONNECTED 1
-//ARRAY CHECK
-#define MAX_POWER_TO_DB 32
+/* ARRAY CHECK */
+#define MAX_POWER_TO_DB 32
/****************** TYPE DEFINITION SECTION *********************************/
/****************** EXPORTED FUNCTION DECLARATION SECTION *******************/
-// OID_802_11_BSSID
+/* OID_802_11_BSSID */
s8 sme_get_bssid(void *pcore_data, u8 *pbssid);
-s8 sme_get_desired_bssid(void *pcore_data, u8 *pbssid);//Not use
+s8 sme_get_desired_bssid(void *pcore_data, u8 *pbssid); /* Not use */
s8 sme_set_desired_bssid(void *pcore_data, u8 *pbssid);
-// OID_802_11_SSID
+/* OID_802_11_SSID */
s8 sme_get_ssid(void *pcore_data, u8 *pssid, u8 *pssid_len);
-s8 sme_get_desired_ssid(void *pcore_data, u8 *pssid, u8 *pssid_len);// Not use
+s8 sme_get_desired_ssid(void *pcore_data, u8 *pssid, u8 *pssid_len);/* Not use */
s8 sme_set_desired_ssid(void *pcore_data, u8 *pssid, u8 ssid_len);
-// OID_802_11_INFRASTRUCTURE_MODE
+/* OID_802_11_INFRASTRUCTURE_MODE */
s8 sme_get_bss_type(void *pcore_data, u8 *pbss_type);
-s8 sme_get_desired_bss_type(void *pcore_data, u8 *pbss_type);//Not use
+s8 sme_get_desired_bss_type(void *pcore_data, u8 *pbss_type); /* Not use */
s8 sme_set_desired_bss_type(void *pcore_data, u8 bss_type);
-// OID_802_11_FRAGMENTATION_THRESHOLD
+/* OID_802_11_FRAGMENTATION_THRESHOLD */
s8 sme_get_fragment_threshold(void *pcore_data, u32 *pthreshold);
s8 sme_set_fragment_threshold(void *pcore_data, u32 threshold);
-// OID_802_11_RTS_THRESHOLD
+/* OID_802_11_RTS_THRESHOLD */
s8 sme_get_rts_threshold(void *pcore_data, u32 *pthreshold);
s8 sme_set_rts_threshold(void *pcore_data, u32 threshold);
-// OID_802_11_CONFIGURATION
+/* OID_802_11_CONFIGURATION */
s8 sme_get_beacon_period(void *pcore_data, u16 *pbeacon_period);
s8 sme_set_beacon_period(void *pcore_data, u16 beacon_period);
@@ -64,116 +58,69 @@ s8 sme_get_current_channel(void *pcore_data, u8 *pcurrent_channel);
s8 sme_get_current_band(void *pcore_data, u8 *pcurrent_band);
s8 sme_set_current_channel(void *pcore_data, u8 current_channel);
-// OID_802_11_BSSID_LIST
+/* OID_802_11_BSSID_LIST */
s8 sme_get_scan_bss_count(void *pcore_data, u8 *pcount);
s8 sme_get_scan_bss(void *pcore_data, u8 index, void **ppbss);
s8 sme_get_connected_bss(void *pcore_data, void **ppbss_now);
-// OID_802_11_AUTHENTICATION_MODE
+/* OID_802_11_AUTHENTICATION_MODE */
s8 sme_get_auth_mode(void *pcore_data, u8 *pauth_mode);
s8 sme_set_auth_mode(void *pcore_data, u8 auth_mode);
-// OID_802_11_WEP_STATUS / OID_802_11_ENCRYPTION_STATUS
+/* OID_802_11_WEP_STATUS / OID_802_11_ENCRYPTION_STATUS */
s8 sme_get_wep_mode(void *pcore_data, u8 *pwep_mode);
s8 sme_set_wep_mode(void *pcore_data, u8 wep_mode);
-//s8 sme_get_encryption_status(void *pcore_data, u8 *pstatus);
-//s8 sme_set_encryption_status(void *pcore_data, u8 status);
-
-// ???????????????????????????????????????
-// OID_GEN_VENDOR_ID
-// OID_802_3_PERMANENT_ADDRESS
+/* OID_GEN_VENDOR_ID */
+/* OID_802_3_PERMANENT_ADDRESS */
s8 sme_get_permanent_mac_addr(void *pcore_data, u8 *pmac_addr);
-// OID_802_3_CURRENT_ADDRESS
+/* OID_802_3_CURRENT_ADDRESS */
s8 sme_get_current_mac_addr(void *pcore_data, u8 *pmac_addr);
-// OID_802_11_NETWORK_TYPE_IN_USE
+/* OID_802_11_NETWORK_TYPE_IN_USE */
s8 sme_get_network_type_in_use(void *pcore_data, u8 *ptype);
s8 sme_set_network_type_in_use(void *pcore_data, u8 type);
-// OID_802_11_SUPPORTED_RATES
+/* OID_802_11_SUPPORTED_RATES */
s8 sme_get_supported_rate(void *pcore_data, u8 *prates);
-// OID_802_11_ADD_WEP
-//12/29/03' wkchen
+/* OID_802_11_ADD_WEP */
s8 sme_set_add_wep(void *pcore_data, u32 key_index, u32 key_len,
u8 *Address, u8 *key);
-// OID_802_11_REMOVE_WEP
+/* OID_802_11_REMOVE_WEP */
s8 sme_set_remove_wep(void *pcre_data, u32 key_index);
-// OID_802_11_DISASSOCIATE
+/* OID_802_11_DISASSOCIATE */
s8 sme_set_disassociate(void *pcore_data);
-// OID_802_11_POWER_MODE
+/* OID_802_11_POWER_MODE */
s8 sme_get_power_mode(void *pcore_data, u8 *pmode);
s8 sme_set_power_mode(void *pcore_data, u8 mode);
-// OID_802_11_BSSID_LIST_SCAN
+/* OID_802_11_BSSID_LIST_SCAN */
s8 sme_set_bssid_list_scan(void *pcore_data, void *pscan_para);
-// OID_802_11_RELOAD_DEFAULTS
+/* OID_802_11_RELOAD_DEFAULTS */
s8 sme_set_reload_defaults(void *pcore_data, u8 reload_type);
-// The following SME API functions are used for WPA
-//
-// Mandatory OIDs for WPA
-//
-
-// OID_802_11_ADD_KEY
-//s8 sme_set_add_key(void *pcore_data, NDIS_802_11_KEY *pkey);
-
-// OID_802_11_REMOVE_KEY
-//s8 sme_set_remove_key(void *pcore_data, NDIS_802_11_REMOVE_KEY *pkey);
-
-// OID_802_11_ASSOCIATION_INFORMATION
-//s8 sme_set_association_information(void *pcore_data,
-// NDIS_802_11_ASSOCIATION_INFORMATION *pinfo);
-
-// OID_802_11_TEST
-//s8 sme_set_test(void *pcore_data, NDIS_802_11_TEST *ptest_data);
-
-//--------------------------------------------------------------------------//
-/*
-// The left OIDs
-
-// OID_802_11_NETWORK_TYPES_SUPPORTED
-// OID_802_11_TX_POWER_LEVEL
-// OID_802_11_RSSI_TRIGGER
-// OID_802_11_NUMBER_OF_ANTENNAS
-// OID_802_11_RX_ANTENNA_SELECTED
-// OID_802_11_TX_ANTENNA_SELECTED
-// OID_802_11_STATISTICS
-// OID_802_11_DESIRED_RATES
-// OID_802_11_PRIVACY_FILTER
-
-*/
-
/*------------------------- none-standard ----------------------------------*/
s8 sme_get_connect_status(void *pcore_data, u8 *pstatus);
-
-/*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
-//s8 sme_get_scan_type(void *pcore_data, u8 *pscan_type);
-//s8 sme_set_scan_type(void *pcore_data, u8 scan_type);
-
-//s8 sme_get_scan_channel_list(void *pcore_data, u8 *pscan_type);
-//s8 sme_set_scan_channel_list(void *pcore_data, u8 scan_type);
-
+/*--------------------------------------------------------------------------*/
void sme_get_encryption_status(void *pcore_data, u8 *EncryptStatus);
void sme_set_encryption_status(void *pcore_data, u8 EncryptStatus);
-s8 sme_add_key(void *pcore_data,
- u32 key_index,
- u8 key_len,
- u8 key_type,
- u8 *key_bssid,
- //u8 *key_rsc,
- u8 *ptx_tsc,
- u8 *prx_tsc,
- u8 *key_material);
+s8 sme_add_key(void *pcore_data,
+ u32 key_index,
+ u8 key_len,
+ u8 key_type,
+ u8 *key_bssid,
+ u8 *ptx_tsc,
+ u8 *prx_tsc,
+ u8 *key_material);
void sme_remove_default_key(void *pcore_data, int index);
void sme_remove_mapping_key(void *pcore_data, u8 *pmac_addr);
void sme_clear_all_mapping_key(void *pcore_data);
@@ -202,60 +149,44 @@ u8 sme_set_rx_antenna(void *pcore_data, u32 RxAntenna);
void sme_get_tx_antenna(void *pcore_data, u32 *TxAntenna);
s8 sme_set_tx_antenna(void *pcore_data, u32 TxAntenna);
s8 sme_set_IBSS_chan(void *pcore_data, struct chan_info chan);
-
-//20061108 WPS
s8 sme_set_IE_append(void *pcore_data, u8 *buffer, u16 buf_len);
-
-
-
-//================== Local functions ======================
-//#ifdef _HSINCHU
-//void drv_translate_rssi(); // HW RSSI bit -> NDIS RSSI representation
-//void drv_translate_bss_description(); // Local bss desc -> NDIS bss desc
-//void drv_translate_channel(u8 NetworkType, u8 ChannelNumber, u32 *freq); // channel number -> channel /freq.
-//#endif _HSINCHU
-//
-static const u32 PowerDbToMw[] =
-{
- 56, //mW, MAX - 0, 17.5 dbm
- 40, //mW, MAX - 1, 16.0 dbm
- 30, //mW, MAX - 2, 14.8 dbm
- 20, //mW, MAX - 3, 13.0 dbm
- 15, //mW, MAX - 4, 11.8 dbm
- 12, //mW, MAX - 5, 10.6 dbm
- 9, //mW, MAX - 6, 9.4 dbm
- 7, //mW, MAX - 7, 8.3 dbm
- 5, //mW, MAX - 8, 6.4 dbm
- 4, //mW, MAX - 9, 5.3 dbm
- 3, //mW, MAX - 10, 4.0 dbm
- 2, //mW, MAX - 11, ? dbm
- 2, //mW, MAX - 12, ? dbm
- 2, //mW, MAX - 13, ? dbm
- 2, //mW, MAX - 14, ? dbm
- 2, //mW, MAX - 15, ? dbm
- 2, //mW, MAX - 16, ? dbm
- 2, //mW, MAX - 17, ? dbm
- 2, //mW, MAX - 18, ? dbm
- 1, //mW, MAX - 19, ? dbm
- 1, //mW, MAX - 20, ? dbm
- 1, //mW, MAX - 21, ? dbm
- 1, //mW, MAX - 22, ? dbm
- 1, //mW, MAX - 23, ? dbm
- 1, //mW, MAX - 24, ? dbm
- 1, //mW, MAX - 25, ? dbm
- 1, //mW, MAX - 26, ? dbm
- 1, //mW, MAX - 27, ? dbm
- 1, //mW, MAX - 28, ? dbm
- 1, //mW, MAX - 29, ? dbm
- 1, //mW, MAX - 30, ? dbm
- 1 //mW, MAX - 31, ? dbm
+/* ================== Local functions ====================== */
+static const u32 PowerDbToMw[] = {
+ 56, /* mW, MAX - 0, 17.5 dbm */
+ 40, /* mW, MAX - 1, 16.0 dbm */
+ 30, /* mW, MAX - 2, 14.8 dbm */
+ 20, /* mW, MAX - 3, 13.0 dbm */
+ 15, /* mW, MAX - 4, 11.8 dbm */
+ 12, /* mW, MAX - 5, 10.6 dbm */
+ 9, /* mW, MAX - 6, 9.4 dbm */
+ 7, /* mW, MAX - 7, 8.3 dbm */
+ 5, /* mW, MAX - 8, 6.4 dbm */
+ 4, /* mW, MAX - 9, 5.3 dbm */
+ 3, /* mW, MAX - 10, 4.0 dbm */
+ 2, /* mW, MAX - 11, ? dbm */
+ 2, /* mW, MAX - 12, ? dbm */
+ 2, /* mW, MAX - 13, ? dbm */
+ 2, /* mW, MAX - 14, ? dbm */
+ 2, /* mW, MAX - 15, ? dbm */
+ 2, /* mW, MAX - 16, ? dbm */
+ 2, /* mW, MAX - 17, ? dbm */
+ 2, /* mW, MAX - 18, ? dbm */
+ 1, /* mW, MAX - 19, ? dbm */
+ 1, /* mW, MAX - 20, ? dbm */
+ 1, /* mW, MAX - 21, ? dbm */
+ 1, /* mW, MAX - 22, ? dbm */
+ 1, /* mW, MAX - 23, ? dbm */
+ 1, /* mW, MAX - 24, ? dbm */
+ 1, /* mW, MAX - 25, ? dbm */
+ 1, /* mW, MAX - 26, ? dbm */
+ 1, /* mW, MAX - 27, ? dbm */
+ 1, /* mW, MAX - 28, ? dbm */
+ 1, /* mW, MAX - 29, ? dbm */
+ 1, /* mW, MAX - 30, ? dbm */
+ 1 /* mW, MAX - 31, ? dbm */
};
-
-
-
-
#endif /* __SME_API_H__ */
diff --git a/drivers/staging/winbond/sysdef.h b/drivers/staging/winbond/sysdef.h
index 251b9c553b6..9195adf98e1 100644
--- a/drivers/staging/winbond/sysdef.h
+++ b/drivers/staging/winbond/sysdef.h
@@ -1,31 +1,19 @@
+/* Winbond WLAN System Configuration defines */
-
-//
-// Winbond WLAN System Configuration defines
-//
-
-//=====================================================================
-// Current directory is Linux
-// The definition WB_LINUX is a keyword for this OS
-//=====================================================================
#ifndef SYS_DEF_H
#define SYS_DEF_H
#define WB_LINUX
#define WB_LINUX_WPA_PSK
-
-//#define _IBSS_BEACON_SEQ_STICK_
#define _USE_FALLBACK_RATE_
-//#define ANTDIV_DEFAULT_ON
-
-#define _WPA2_ // 20061122 It's needed for current Linux driver
+#define _WPA2_
#ifndef _WPA_PSK_DEBUG
#undef _WPA_PSK_DEBUG
#endif
-// debug print options, mark what debug you don't need
+/* debug print options, mark what debug you don't need */
#ifdef FULL_DEBUG
#define _PE_STATE_DUMP_
diff --git a/drivers/staging/winbond/wb35reg.c b/drivers/staging/winbond/wb35reg.c
index 1b93547ff5b..770722385ee 100644
--- a/drivers/staging/winbond/wb35reg.c
+++ b/drivers/staging/winbond/wb35reg.c
@@ -6,60 +6,61 @@
extern void phy_calibration_winbond(struct hw_data *phw_data, u32 frequency);
-// true : read command process successfully
-// false : register not support
-// RegisterNo : start base
-// pRegisterData : data point
-// NumberOfData : number of register data
-// Flag : AUTO_INCREMENT - RegisterNo will auto increment 4
-// NO_INCREMENT - Function will write data into the same register
-unsigned char
-Wb35Reg_BurstWrite(struct hw_data * pHwData, u16 RegisterNo, u32 * pRegisterData, u8 NumberOfData, u8 Flag)
+/*
+ * true : read command process successfully
+ * false : register not support
+ * RegisterNo : start base
+ * pRegisterData : data point
+ * NumberOfData : number of register data
+ * Flag : AUTO_INCREMENT - RegisterNo will auto increment 4
+ * NO_INCREMENT - Function will write data into the same register
+ */
+unsigned char Wb35Reg_BurstWrite(struct hw_data *pHwData, u16 RegisterNo, u32 *pRegisterData, u8 NumberOfData, u8 Flag)
{
- struct wb35_reg *reg = &pHwData->reg;
- struct urb *urb = NULL;
- struct wb35_reg_queue *reg_queue = NULL;
- u16 UrbSize;
- struct usb_ctrlrequest *dr;
- u16 i, DataSize = NumberOfData*4;
-
- // Module shutdown
+ struct wb35_reg *reg = &pHwData->reg;
+ struct urb *urb = NULL;
+ struct wb35_reg_queue *reg_queue = NULL;
+ u16 UrbSize;
+ struct usb_ctrlrequest *dr;
+ u16 i, DataSize = NumberOfData * 4;
+
+ /* Module shutdown */
if (pHwData->SurpriseRemove)
return false;
- // Trying to use burst write function if use new hardware
+ /* Trying to use burst write function if use new hardware */
UrbSize = sizeof(struct wb35_reg_queue) + DataSize + sizeof(struct usb_ctrlrequest);
reg_queue = kzalloc(UrbSize, GFP_ATOMIC);
urb = usb_alloc_urb(0, GFP_ATOMIC);
- if( urb && reg_queue ) {
- reg_queue->DIRECT = 2;// burst write register
+ if (urb && reg_queue) {
+ reg_queue->DIRECT = 2; /* burst write register */
reg_queue->INDEX = RegisterNo;
reg_queue->pBuffer = (u32 *)((u8 *)reg_queue + sizeof(struct wb35_reg_queue));
- memcpy( reg_queue->pBuffer, pRegisterData, DataSize );
- //the function for reversing register data from little endian to big endian
- for( i=0; i<NumberOfData ; i++ )
- reg_queue->pBuffer[i] = cpu_to_le32( reg_queue->pBuffer[i] );
+ memcpy(reg_queue->pBuffer, pRegisterData, DataSize);
+ /* the function for reversing register data from little endian to big endian */
+ for (i = 0; i < NumberOfData ; i++)
+ reg_queue->pBuffer[i] = cpu_to_le32(reg_queue->pBuffer[i]);
dr = (struct usb_ctrlrequest *)((u8 *)reg_queue + sizeof(struct wb35_reg_queue) + DataSize);
dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE;
- dr->bRequest = 0x04; // USB or vendor-defined request code, burst mode
- dr->wValue = cpu_to_le16( Flag ); // 0: Register number auto-increment, 1: No auto increment
- dr->wIndex = cpu_to_le16( RegisterNo );
- dr->wLength = cpu_to_le16( DataSize );
+ dr->bRequest = 0x04; /* USB or vendor-defined request code, burst mode */
+ dr->wValue = cpu_to_le16(Flag); /* 0: Register number auto-increment, 1: No auto increment */
+ dr->wIndex = cpu_to_le16(RegisterNo);
+ dr->wLength = cpu_to_le16(DataSize);
reg_queue->Next = NULL;
reg_queue->pUsbReq = dr;
reg_queue->urb = urb;
- spin_lock_irq( &reg->EP0VM_spin_lock );
+ spin_lock_irq(&reg->EP0VM_spin_lock);
if (reg->reg_first == NULL)
reg->reg_first = reg_queue;
else
reg->reg_last->Next = reg_queue;
reg->reg_last = reg_queue;
- spin_unlock_irq( &reg->EP0VM_spin_lock );
+ spin_unlock_irq(&reg->EP0VM_spin_lock);
- // Start EP0VM
+ /* Start EP0VM */
Wb35Reg_EP0VM_start(pHwData);
return true;
@@ -73,8 +74,7 @@ Wb35Reg_BurstWrite(struct hw_data * pHwData, u16 RegisterNo, u32 * pRegisterData
return false;
}
-void
-Wb35Reg_Update(struct hw_data * pHwData, u16 RegisterNo, u32 RegisterValue)
+void Wb35Reg_Update(struct hw_data *pHwData, u16 RegisterNo, u32 RegisterValue)
{
struct wb35_reg *reg = &pHwData->reg;
switch (RegisterNo) {
@@ -116,97 +116,96 @@ Wb35Reg_Update(struct hw_data * pHwData, u16 RegisterNo, u32 RegisterValue)
}
}
-// true : read command process successfully
-// false : register not support
-unsigned char
-Wb35Reg_WriteSync( struct hw_data * pHwData, u16 RegisterNo, u32 RegisterValue )
+/*
+ * true : read command process successfully
+ * false : register not support
+ */
+unsigned char Wb35Reg_WriteSync(struct hw_data *pHwData, u16 RegisterNo, u32 RegisterValue)
{
struct wb35_reg *reg = &pHwData->reg;
int ret = -1;
- // Module shutdown
+ /* Module shutdown */
if (pHwData->SurpriseRemove)
return false;
RegisterValue = cpu_to_le32(RegisterValue);
- // update the register by send usb message------------------------------------
+ /* update the register by send usb message */
reg->SyncIoPause = 1;
- // 20060717.5 Wait until EP0VM stop
+ /* Wait until EP0VM stop */
while (reg->EP0vm_state != VM_STOP)
msleep(10);
- // Sync IoCallDriver
+ /* Sync IoCallDriver */
reg->EP0vm_state = VM_RUNNING;
- ret = usb_control_msg( pHwData->WbUsb.udev,
- usb_sndctrlpipe( pHwData->WbUsb.udev, 0 ),
+ ret = usb_control_msg(pHwData->WbUsb.udev,
+ usb_sndctrlpipe(pHwData->WbUsb.udev, 0),
0x03, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
- 0x0,RegisterNo, &RegisterValue, 4, HZ*100 );
+ 0x0, RegisterNo, &RegisterValue, 4, HZ * 100);
reg->EP0vm_state = VM_STOP;
reg->SyncIoPause = 0;
Wb35Reg_EP0VM_start(pHwData);
if (ret < 0) {
- #ifdef _PE_REG_DUMP_
+#ifdef _PE_REG_DUMP_
printk("EP0 Write register usb message sending error\n");
- #endif
-
- pHwData->SurpriseRemove = 1; // 20060704.2
+#endif
+ pHwData->SurpriseRemove = 1;
return false;
}
-
return true;
}
-// true : read command process successfully
-// false : register not support
-unsigned char
-Wb35Reg_Write( struct hw_data * pHwData, u16 RegisterNo, u32 RegisterValue )
+/*
+ * true : read command process successfully
+ * false : register not support
+ */
+unsigned char Wb35Reg_Write(struct hw_data *pHwData, u16 RegisterNo, u32 RegisterValue)
{
- struct wb35_reg *reg = &pHwData->reg;
- struct usb_ctrlrequest *dr;
- struct urb *urb = NULL;
- struct wb35_reg_queue *reg_queue = NULL;
- u16 UrbSize;
-
+ struct wb35_reg *reg = &pHwData->reg;
+ struct usb_ctrlrequest *dr;
+ struct urb *urb = NULL;
+ struct wb35_reg_queue *reg_queue = NULL;
+ u16 UrbSize;
- // Module shutdown
+ /* Module shutdown */
if (pHwData->SurpriseRemove)
return false;
- // update the register by send urb request------------------------------------
+ /* update the register by send urb request */
UrbSize = sizeof(struct wb35_reg_queue) + sizeof(struct usb_ctrlrequest);
reg_queue = kzalloc(UrbSize, GFP_ATOMIC);
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (urb && reg_queue) {
- reg_queue->DIRECT = 1;// burst write register
+ reg_queue->DIRECT = 1; /* burst write register */
reg_queue->INDEX = RegisterNo;
reg_queue->VALUE = cpu_to_le32(RegisterValue);
reg_queue->RESERVED_VALID = false;
dr = (struct usb_ctrlrequest *)((u8 *)reg_queue + sizeof(struct wb35_reg_queue));
- dr->bRequestType = USB_TYPE_VENDOR|USB_DIR_OUT |USB_RECIP_DEVICE;
- dr->bRequest = 0x03; // USB or vendor-defined request code, burst mode
+ dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE;
+ dr->bRequest = 0x03; /* USB or vendor-defined request code, burst mode */
dr->wValue = cpu_to_le16(0x0);
dr->wIndex = cpu_to_le16(RegisterNo);
dr->wLength = cpu_to_le16(4);
- // Enter the sending queue
+ /* Enter the sending queue */
reg_queue->Next = NULL;
reg_queue->pUsbReq = dr;
reg_queue->urb = urb;
- spin_lock_irq(&reg->EP0VM_spin_lock );
+ spin_lock_irq(&reg->EP0VM_spin_lock);
if (reg->reg_first == NULL)
reg->reg_first = reg_queue;
else
reg->reg_last->Next = reg_queue;
reg->reg_last = reg_queue;
- spin_unlock_irq( &reg->EP0VM_spin_lock );
+ spin_unlock_irq(&reg->EP0VM_spin_lock);
- // Start EP0VM
+ /* Start EP0VM */
Wb35Reg_EP0VM_start(pHwData);
return true;
@@ -218,56 +217,60 @@ Wb35Reg_Write( struct hw_data * pHwData, u16 RegisterNo, u32 RegisterValue )
}
}
-//This command will be executed with a user defined value. When it completes,
-//this value is useful. For example, hal_set_current_channel will use it.
-// true : read command process successfully
-// false : register not support
-unsigned char
-Wb35Reg_WriteWithCallbackValue( struct hw_data * pHwData, u16 RegisterNo, u32 RegisterValue,
- s8 *pValue, s8 Len)
+/*
+ * This command will be executed with a user defined value. When it completes,
+ * this value is useful. For example, hal_set_current_channel will use it.
+ * true : read command process successfully
+ * false : register not support
+ */
+unsigned char Wb35Reg_WriteWithCallbackValue(struct hw_data *pHwData,
+ u16 RegisterNo,
+ u32 RegisterValue,
+ s8 *pValue,
+ s8 Len)
{
- struct wb35_reg *reg = &pHwData->reg;
- struct usb_ctrlrequest *dr;
- struct urb *urb = NULL;
- struct wb35_reg_queue *reg_queue = NULL;
- u16 UrbSize;
+ struct wb35_reg *reg = &pHwData->reg;
+ struct usb_ctrlrequest *dr;
+ struct urb *urb = NULL;
+ struct wb35_reg_queue *reg_queue = NULL;
+ u16 UrbSize;
- // Module shutdown
+ /* Module shutdown */
if (pHwData->SurpriseRemove)
return false;
- // update the register by send urb request------------------------------------
+ /* update the register by send urb request */
UrbSize = sizeof(struct wb35_reg_queue) + sizeof(struct usb_ctrlrequest);
reg_queue = kzalloc(UrbSize, GFP_ATOMIC);
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (urb && reg_queue) {
- reg_queue->DIRECT = 1;// burst write register
+ reg_queue->DIRECT = 1; /* burst write register */
reg_queue->INDEX = RegisterNo;
reg_queue->VALUE = cpu_to_le32(RegisterValue);
- //NOTE : Users must guarantee the size of value will not exceed the buffer size.
+ /* NOTE : Users must guarantee the size of value will not exceed the buffer size. */
memcpy(reg_queue->RESERVED, pValue, Len);
reg_queue->RESERVED_VALID = true;
dr = (struct usb_ctrlrequest *)((u8 *)reg_queue + sizeof(struct wb35_reg_queue));
- dr->bRequestType = USB_TYPE_VENDOR|USB_DIR_OUT |USB_RECIP_DEVICE;
- dr->bRequest = 0x03; // USB or vendor-defined request code, burst mode
+ dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE;
+ dr->bRequest = 0x03; /* USB or vendor-defined request code, burst mode */
dr->wValue = cpu_to_le16(0x0);
dr->wIndex = cpu_to_le16(RegisterNo);
dr->wLength = cpu_to_le16(4);
- // Enter the sending queue
+ /* Enter the sending queue */
reg_queue->Next = NULL;
reg_queue->pUsbReq = dr;
reg_queue->urb = urb;
- spin_lock_irq (&reg->EP0VM_spin_lock );
- if( reg->reg_first == NULL )
+ spin_lock_irq(&reg->EP0VM_spin_lock);
+ if (reg->reg_first == NULL)
reg->reg_first = reg_queue;
else
reg->reg_last->Next = reg_queue;
reg->reg_last = reg_queue;
- spin_unlock_irq ( &reg->EP0VM_spin_lock );
+ spin_unlock_irq(&reg->EP0VM_spin_lock);
- // Start EP0VM
+ /* Start EP0VM */
Wb35Reg_EP0VM_start(pHwData);
return true;
} else {
@@ -278,115 +281,114 @@ Wb35Reg_WriteWithCallbackValue( struct hw_data * pHwData, u16 RegisterNo, u32 Re
}
}
-// true : read command process successfully
-// false : register not support
-// pRegisterValue : It must be a resident buffer due to asynchronous read register.
-unsigned char
-Wb35Reg_ReadSync( struct hw_data * pHwData, u16 RegisterNo, u32 * pRegisterValue )
+/*
+ * true : read command process successfully
+ * false : register not support
+ * pRegisterValue : It must be a resident buffer due to
+ * asynchronous read register.
+ */
+unsigned char Wb35Reg_ReadSync(struct hw_data *pHwData, u16 RegisterNo, u32 *pRegisterValue)
{
struct wb35_reg *reg = &pHwData->reg;
- u32 * pltmp = pRegisterValue;
- int ret = -1;
+ u32 *pltmp = pRegisterValue;
+ int ret = -1;
- // Module shutdown
+ /* Module shutdown */
if (pHwData->SurpriseRemove)
return false;
- // Read the register by send usb message------------------------------------
-
+ /* Read the register by send usb message */
reg->SyncIoPause = 1;
- // 20060717.5 Wait until EP0VM stop
+ /* Wait until EP0VM stop */
while (reg->EP0vm_state != VM_STOP)
msleep(10);
reg->EP0vm_state = VM_RUNNING;
- ret = usb_control_msg( pHwData->WbUsb.udev,
+ ret = usb_control_msg(pHwData->WbUsb.udev,
usb_rcvctrlpipe(pHwData->WbUsb.udev, 0),
- 0x01, USB_TYPE_VENDOR|USB_RECIP_DEVICE|USB_DIR_IN,
- 0x0, RegisterNo, pltmp, 4, HZ*100 );
+ 0x01, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
+ 0x0, RegisterNo, pltmp, 4, HZ * 100);
*pRegisterValue = cpu_to_le32(*pltmp);
reg->EP0vm_state = VM_STOP;
- Wb35Reg_Update( pHwData, RegisterNo, *pRegisterValue );
+ Wb35Reg_Update(pHwData, RegisterNo, *pRegisterValue);
reg->SyncIoPause = 0;
- Wb35Reg_EP0VM_start( pHwData );
+ Wb35Reg_EP0VM_start(pHwData);
if (ret < 0) {
- #ifdef _PE_REG_DUMP_
+#ifdef _PE_REG_DUMP_
printk("EP0 Read register usb message sending error\n");
- #endif
-
- pHwData->SurpriseRemove = 1; // 20060704.2
+#endif
+ pHwData->SurpriseRemove = 1;
return false;
}
-
return true;
}
-// true : read command process successfully
-// false : register not support
-// pRegisterValue : It must be a resident buffer due to asynchronous read register.
-unsigned char
-Wb35Reg_Read(struct hw_data * pHwData, u16 RegisterNo, u32 * pRegisterValue )
+/*
+ * true : read command process successfully
+ * false : register not support
+ * pRegisterValue : It must be a resident buffer due to
+ * asynchronous read register.
+ */
+unsigned char Wb35Reg_Read(struct hw_data *pHwData, u16 RegisterNo, u32 *pRegisterValue)
{
- struct wb35_reg *reg = &pHwData->reg;
- struct usb_ctrlrequest * dr;
- struct urb *urb;
- struct wb35_reg_queue *reg_queue;
- u16 UrbSize;
+ struct wb35_reg *reg = &pHwData->reg;
+ struct usb_ctrlrequest *dr;
+ struct urb *urb;
+ struct wb35_reg_queue *reg_queue;
+ u16 UrbSize;
- // Module shutdown
+ /* Module shutdown */
if (pHwData->SurpriseRemove)
return false;
- // update the variable by send Urb to read register ------------------------------------
+ /* update the variable by send Urb to read register */
UrbSize = sizeof(struct wb35_reg_queue) + sizeof(struct usb_ctrlrequest);
reg_queue = kzalloc(UrbSize, GFP_ATOMIC);
urb = usb_alloc_urb(0, GFP_ATOMIC);
- if( urb && reg_queue )
- {
- reg_queue->DIRECT = 0;// read register
+ if (urb && reg_queue) {
+ reg_queue->DIRECT = 0; /* read register */
reg_queue->INDEX = RegisterNo;
reg_queue->pBuffer = pRegisterValue;
dr = (struct usb_ctrlrequest *)((u8 *)reg_queue + sizeof(struct wb35_reg_queue));
- dr->bRequestType = USB_TYPE_VENDOR|USB_RECIP_DEVICE|USB_DIR_IN;
- dr->bRequest = 0x01; // USB or vendor-defined request code, burst mode
+ dr->bRequestType = USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN;
+ dr->bRequest = 0x01; /* USB or vendor-defined request code, burst mode */
dr->wValue = cpu_to_le16(0x0);
- dr->wIndex = cpu_to_le16 (RegisterNo);
- dr->wLength = cpu_to_le16 (4);
+ dr->wIndex = cpu_to_le16(RegisterNo);
+ dr->wLength = cpu_to_le16(4);
- // Enter the sending queue
+ /* Enter the sending queue */
reg_queue->Next = NULL;
reg_queue->pUsbReq = dr;
reg_queue->urb = urb;
- spin_lock_irq ( &reg->EP0VM_spin_lock );
- if( reg->reg_first == NULL )
+ spin_lock_irq(&reg->EP0VM_spin_lock);
+ if (reg->reg_first == NULL)
reg->reg_first = reg_queue;
else
reg->reg_last->Next = reg_queue;
reg->reg_last = reg_queue;
- spin_unlock_irq( &reg->EP0VM_spin_lock );
+ spin_unlock_irq(&reg->EP0VM_spin_lock);
- // Start EP0VM
- Wb35Reg_EP0VM_start( pHwData );
+ /* Start EP0VM */
+ Wb35Reg_EP0VM_start(pHwData);
return true;
} else {
if (urb)
- usb_free_urb( urb );
+ usb_free_urb(urb);
kfree(reg_queue);
return false;
}
}
-void
-Wb35Reg_EP0VM_start( struct hw_data * pHwData )
+void Wb35Reg_EP0VM_start(struct hw_data *pHwData)
{
struct wb35_reg *reg = &pHwData->reg;
@@ -397,15 +399,14 @@ Wb35Reg_EP0VM_start( struct hw_data * pHwData )
atomic_dec(&reg->RegFireCount);
}
-void
-Wb35Reg_EP0VM(struct hw_data * pHwData )
+void Wb35Reg_EP0VM(struct hw_data *pHwData)
{
- struct wb35_reg *reg = &pHwData->reg;
- struct urb *urb;
- struct usb_ctrlrequest *dr;
- u32 * pBuffer;
+ struct wb35_reg *reg = &pHwData->reg;
+ struct urb *urb;
+ struct usb_ctrlrequest *dr;
+ u32 *pBuffer;
int ret = -1;
- struct wb35_reg_queue *reg_queue;
+ struct wb35_reg_queue *reg_queue;
if (reg->SyncIoPause)
@@ -414,27 +415,27 @@ Wb35Reg_EP0VM(struct hw_data * pHwData )
if (pHwData->SurpriseRemove)
goto cleanup;
- // Get the register data and send to USB through Irp
- spin_lock_irq( &reg->EP0VM_spin_lock );
+ /* Get the register data and send to USB through Irp */
+ spin_lock_irq(&reg->EP0VM_spin_lock);
reg_queue = reg->reg_first;
- spin_unlock_irq( &reg->EP0VM_spin_lock );
+ spin_unlock_irq(&reg->EP0VM_spin_lock);
if (!reg_queue)
goto cleanup;
- // Get an Urb, send it
+ /* Get an Urb, send it */
urb = (struct urb *)reg_queue->urb;
dr = reg_queue->pUsbReq;
urb = reg_queue->urb;
pBuffer = reg_queue->pBuffer;
- if (reg_queue->DIRECT == 1) // output
+ if (reg_queue->DIRECT == 1) /* output */
pBuffer = &reg_queue->VALUE;
- usb_fill_control_urb( urb, pHwData->WbUsb.udev,
- REG_DIRECTION(pHwData->WbUsb.udev,reg_queue),
- (u8 *)dr,pBuffer,cpu_to_le16(dr->wLength),
- Wb35Reg_EP0VM_complete, (void*)pHwData);
+ usb_fill_control_urb(urb, pHwData->WbUsb.udev,
+ REG_DIRECTION(pHwData->WbUsb.udev, reg_queue),
+ (u8 *)dr, pBuffer, cpu_to_le16(dr->wLength),
+ Wb35Reg_EP0VM_complete, (void *)pHwData);
reg->EP0vm_state = VM_RUNNING;
@@ -446,7 +447,6 @@ Wb35Reg_EP0VM(struct hw_data * pHwData )
#endif
goto cleanup;
}
-
return;
cleanup:
@@ -455,29 +455,28 @@ Wb35Reg_EP0VM(struct hw_data * pHwData )
}
-void
-Wb35Reg_EP0VM_complete(struct urb *urb)
+void Wb35Reg_EP0VM_complete(struct urb *urb)
{
- struct hw_data * pHwData = (struct hw_data *)urb->context;
- struct wb35_reg *reg = &pHwData->reg;
- struct wb35_reg_queue *reg_queue;
+ struct hw_data *pHwData = (struct hw_data *)urb->context;
+ struct wb35_reg *reg = &pHwData->reg;
+ struct wb35_reg_queue *reg_queue;
- // Variable setting
+ /* Variable setting */
reg->EP0vm_state = VM_COMPLETED;
reg->EP0VM_status = urb->status;
- if (pHwData->SurpriseRemove) { // Let WbWlanHalt to handle surprise remove
+ if (pHwData->SurpriseRemove) { /* Let WbWlanHalt to handle surprise remove */
reg->EP0vm_state = VM_STOP;
atomic_dec(&reg->RegFireCount);
} else {
- // Complete to send, remove the URB from the first
- spin_lock_irq( &reg->EP0VM_spin_lock );
+ /* Complete to send, remove the URB from the first */
+ spin_lock_irq(&reg->EP0VM_spin_lock);
reg_queue = reg->reg_first;
if (reg_queue == reg->reg_last)
reg->reg_last = NULL;
reg->reg_first = reg->reg_first->Next;
- spin_unlock_irq( &reg->EP0VM_spin_lock );
+ spin_unlock_irq(&reg->EP0VM_spin_lock);
if (reg->EP0VM_status) {
#ifdef _PE_REG_DUMP_
@@ -486,37 +485,35 @@ Wb35Reg_EP0VM_complete(struct urb *urb)
reg->EP0vm_state = VM_STOP;
pHwData->SurpriseRemove = 1;
} else {
- // Success. Update the result
+ /* Success. Update the result */
- // Start the next send
+ /* Start the next send */
Wb35Reg_EP0VM(pHwData);
}
- kfree(reg_queue);
+ kfree(reg_queue);
}
usb_free_urb(urb);
}
-void
-Wb35Reg_destroy(struct hw_data * pHwData)
+void Wb35Reg_destroy(struct hw_data *pHwData)
{
- struct wb35_reg *reg = &pHwData->reg;
- struct urb *urb;
- struct wb35_reg_queue *reg_queue;
-
+ struct wb35_reg *reg = &pHwData->reg;
+ struct urb *urb;
+ struct wb35_reg_queue *reg_queue;
Uxx_power_off_procedure(pHwData);
- // Wait for Reg operation completed
+ /* Wait for Reg operation completed */
do {
- msleep(10); // Delay for waiting function enter 940623.1.a
+ msleep(10); /* Delay for waiting function enter */
} while (reg->EP0vm_state != VM_STOP);
- msleep(10); // Delay for waiting function enter 940623.1.b
+ msleep(10); /* Delay for waiting function enter */
- // Release all the data in RegQueue
- spin_lock_irq( &reg->EP0VM_spin_lock );
+ /* Release all the data in RegQueue */
+ spin_lock_irq(&reg->EP0VM_spin_lock);
reg_queue = reg->reg_first;
while (reg_queue) {
if (reg_queue == reg->reg_last)
@@ -524,84 +521,88 @@ Wb35Reg_destroy(struct hw_data * pHwData)
reg->reg_first = reg->reg_first->Next;
urb = reg_queue->urb;
- spin_unlock_irq( &reg->EP0VM_spin_lock );
+ spin_unlock_irq(&reg->EP0VM_spin_lock);
if (urb) {
usb_free_urb(urb);
kfree(reg_queue);
} else {
- #ifdef _PE_REG_DUMP_
+#ifdef _PE_REG_DUMP_
printk("EP0 queue release error\n");
- #endif
+#endif
}
- spin_lock_irq( &reg->EP0VM_spin_lock );
+ spin_lock_irq(&reg->EP0VM_spin_lock);
reg_queue = reg->reg_first;
}
- spin_unlock_irq( &reg->EP0VM_spin_lock );
+ spin_unlock_irq(&reg->EP0VM_spin_lock);
}
-//====================================================================================
-// The function can be run in passive-level only.
-//====================================================================================
-unsigned char Wb35Reg_initial(struct hw_data * pHwData)
+/*
+ * =======================================================================
+ * The function can be run in passive-level only.
+ * =========================================================================
+ */
+unsigned char Wb35Reg_initial(struct hw_data *pHwData)
{
- struct wb35_reg *reg=&pHwData->reg;
+ struct wb35_reg *reg = &pHwData->reg;
u32 ltmp;
u32 SoftwareSet, VCO_trim, TxVga, Region_ScanInterval;
- // Spin lock is acquired for read and write IRP command
- spin_lock_init( &reg->EP0VM_spin_lock );
-
- // Getting RF module type from EEPROM ------------------------------------
- Wb35Reg_WriteSync( pHwData, 0x03b4, 0x080d0000 ); // Start EEPROM access + Read + address(0x0d)
- Wb35Reg_ReadSync( pHwData, 0x03b4, &ltmp );
-
- //Update RF module type and determine the PHY type by inf or EEPROM
- reg->EEPROMPhyType = (u8)( ltmp & 0xff );
- // 0 V MAX2825, 1 V MAX2827, 2 V MAX2828, 3 V MAX2829
- // 16V AL2230, 17 - AL7230, 18 - AL2230S
- // 32 Reserved
- // 33 - W89RF242(TxVGA 0~19), 34 - W89RF242(TxVGA 0~34)
+ /* Spin lock is acquired for read and write IRP command */
+ spin_lock_init(&reg->EP0VM_spin_lock);
+
+ /* Getting RF module type from EEPROM */
+ Wb35Reg_WriteSync(pHwData, 0x03b4, 0x080d0000); /* Start EEPROM access + Read + address(0x0d) */
+ Wb35Reg_ReadSync(pHwData, 0x03b4, &ltmp);
+
+ /* Update RF module type and determine the PHY type by inf or EEPROM */
+ reg->EEPROMPhyType = (u8)(ltmp & 0xff);
+ /*
+ * 0 V MAX2825, 1 V MAX2827, 2 V MAX2828, 3 V MAX2829
+ * 16V AL2230, 17 - AL7230, 18 - AL2230S
+ * 32 Reserved
+ * 33 - W89RF242(TxVGA 0~19), 34 - W89RF242(TxVGA 0~34)
+ */
if (reg->EEPROMPhyType != RF_DECIDE_BY_INF) {
- if( (reg->EEPROMPhyType == RF_MAXIM_2825) ||
+ if ((reg->EEPROMPhyType == RF_MAXIM_2825) ||
(reg->EEPROMPhyType == RF_MAXIM_2827) ||
(reg->EEPROMPhyType == RF_MAXIM_2828) ||
(reg->EEPROMPhyType == RF_MAXIM_2829) ||
(reg->EEPROMPhyType == RF_MAXIM_V1) ||
(reg->EEPROMPhyType == RF_AIROHA_2230) ||
- (reg->EEPROMPhyType == RF_AIROHA_2230S) ||
+ (reg->EEPROMPhyType == RF_AIROHA_2230S) ||
(reg->EEPROMPhyType == RF_AIROHA_7230) ||
- (reg->EEPROMPhyType == RF_WB_242) ||
+ (reg->EEPROMPhyType == RF_WB_242) ||
(reg->EEPROMPhyType == RF_WB_242_1))
pHwData->phy_type = reg->EEPROMPhyType;
}
- // Power On procedure running. The relative parameter will be set according to phy_type
- Uxx_power_on_procedure( pHwData );
+ /* Power On procedure running. The relative parameter will be set according to phy_type */
+ Uxx_power_on_procedure(pHwData);
- // Reading MAC address
- Uxx_ReadEthernetAddress( pHwData );
+ /* Reading MAC address */
+ Uxx_ReadEthernetAddress(pHwData);
- // Read VCO trim for RF parameter
- Wb35Reg_WriteSync( pHwData, 0x03b4, 0x08200000 );
- Wb35Reg_ReadSync( pHwData, 0x03b4, &VCO_trim );
+ /* Read VCO trim for RF parameter */
+ Wb35Reg_WriteSync(pHwData, 0x03b4, 0x08200000);
+ Wb35Reg_ReadSync(pHwData, 0x03b4, &VCO_trim);
- // Read Antenna On/Off of software flag
- Wb35Reg_WriteSync( pHwData, 0x03b4, 0x08210000 );
- Wb35Reg_ReadSync( pHwData, 0x03b4, &SoftwareSet );
+ /* Read Antenna On/Off of software flag */
+ Wb35Reg_WriteSync(pHwData, 0x03b4, 0x08210000);
+ Wb35Reg_ReadSync(pHwData, 0x03b4, &SoftwareSet);
- // Read TXVGA
- Wb35Reg_WriteSync( pHwData, 0x03b4, 0x08100000 );
- Wb35Reg_ReadSync( pHwData, 0x03b4, &TxVga );
+ /* Read TXVGA */
+ Wb35Reg_WriteSync(pHwData, 0x03b4, 0x08100000);
+ Wb35Reg_ReadSync(pHwData, 0x03b4, &TxVga);
- // Get Scan interval setting from EEPROM offset 0x1c
- Wb35Reg_WriteSync( pHwData, 0x03b4, 0x081d0000 );
- Wb35Reg_ReadSync( pHwData, 0x03b4, &Region_ScanInterval );
+ /* Get Scan interval setting from EEPROM offset 0x1c */
+ Wb35Reg_WriteSync(pHwData, 0x03b4, 0x081d0000);
+ Wb35Reg_ReadSync(pHwData, 0x03b4, &Region_ScanInterval);
- // Update Ethernet address
- memcpy( pHwData->CurrentMacAddress, pHwData->PermanentMacAddress, ETH_ALEN );
+ /* Update Ethernet address */
+ memcpy(pHwData->CurrentMacAddress, pHwData->PermanentMacAddress, ETH_ALEN);
- // Update software variable
+ /* Update software variable */
pHwData->SoftwareSet = (u16)(SoftwareSet & 0xffff);
TxVga &= 0x000000ff;
pHwData->PowerIndexFromEEPROM = (u8)TxVga;
@@ -609,22 +610,22 @@ unsigned char Wb35Reg_initial(struct hw_data * pHwData)
if (pHwData->VCO_trim == 0xff)
pHwData->VCO_trim = 0x28;
- reg->EEPROMRegion = (u8)(Region_ScanInterval>>8); // 20060720
- if( reg->EEPROMRegion<1 || reg->EEPROMRegion>6 )
+ reg->EEPROMRegion = (u8)(Region_ScanInterval >> 8);
+ if (reg->EEPROMRegion < 1 || reg->EEPROMRegion > 6)
reg->EEPROMRegion = REGION_AUTO;
- //For Get Tx VGA from EEPROM 20060315.5 move here
- GetTxVgaFromEEPROM( pHwData );
+ /* For Get Tx VGA from EEPROM */
+ GetTxVgaFromEEPROM(pHwData);
- // Set Scan Interval
+ /* Set Scan Interval */
pHwData->Scan_Interval = (u8)(Region_ScanInterval & 0xff) * 10;
- if ((pHwData->Scan_Interval == 2550) || (pHwData->Scan_Interval < 10)) // Is default setting 0xff * 10
+ if ((pHwData->Scan_Interval == 2550) || (pHwData->Scan_Interval < 10)) /* Is default setting 0xff * 10 */
pHwData->Scan_Interval = SCAN_MAX_CHNL_TIME;
- // Initial register
+ /* Initial register */
RFSynthesizer_initial(pHwData);
- BBProcessor_initial(pHwData); // Async write, must wait until complete
+ BBProcessor_initial(pHwData); /* Async write, must wait until complete */
Wb35Reg_phy_calibration(pHwData);
@@ -634,113 +635,104 @@ unsigned char Wb35Reg_initial(struct hw_data * pHwData)
if (pHwData->SurpriseRemove)
return false;
else
- return true; // Initial fail
+ return true; /* Initial fail */
}
-//===================================================================================
-// CardComputeCrc --
-//
-// Description:
-// Runs the AUTODIN II CRC algorithm on buffer Buffer of length, Length.
-//
-// Arguments:
-// Buffer - the input buffer
-// Length - the length of Buffer
-//
-// Return Value:
-// The 32-bit CRC value.
-//
-// Note:
-// This is adapted from the comments in the assembly language
-// version in _GENREQ.ASM of the DWB NE1000/2000 driver.
-//==================================================================================
-u32
-CardComputeCrc(u8 * Buffer, u32 Length)
+/*
+ * ================================================================
+ * CardComputeCrc --
+ *
+ * Description:
+ * Runs the AUTODIN II CRC algorithm on buffer Buffer of length, Length.
+ *
+ * Arguments:
+ * Buffer - the input buffer
+ * Length - the length of Buffer
+ *
+ * Return Value:
+ * The 32-bit CRC value.
+ * ===================================================================
+ */
+u32 CardComputeCrc(u8 *Buffer, u32 Length)
{
- u32 Crc, Carry;
- u32 i, j;
- u8 CurByte;
-
- Crc = 0xffffffff;
-
- for (i = 0; i < Length; i++) {
-
- CurByte = Buffer[i];
-
- for (j = 0; j < 8; j++) {
-
- Carry = ((Crc & 0x80000000) ? 1 : 0) ^ (CurByte & 0x01);
- Crc <<= 1;
- CurByte >>= 1;
-
- if (Carry) {
- Crc =(Crc ^ 0x04c11db6) | Carry;
- }
- }
- }
-
- return Crc;
+ u32 Crc, Carry;
+ u32 i, j;
+ u8 CurByte;
+
+ Crc = 0xffffffff;
+
+ for (i = 0; i < Length; i++) {
+ CurByte = Buffer[i];
+ for (j = 0; j < 8; j++) {
+ Carry = ((Crc & 0x80000000) ? 1 : 0) ^ (CurByte & 0x01);
+ Crc <<= 1;
+ CurByte >>= 1;
+ if (Carry)
+ Crc = (Crc ^ 0x04c11db6) | Carry;
+ }
+ }
+ return Crc;
}
-//==================================================================
-// BitReverse --
-// Reverse the bits in the input argument, dwData, which is
-// regarded as a string of bits with the length, DataLength.
-//
-// Arguments:
-// dwData :
-// DataLength :
-//
-// Return:
-// The converted value.
-//==================================================================
-u32 BitReverse( u32 dwData, u32 DataLength)
+/*
+ * ==================================================================
+ * BitReverse --
+ * Reverse the bits in the input argument, dwData, which is
+ * regarded as a string of bits with the length, DataLength.
+ *
+ * Arguments:
+ * dwData :
+ * DataLength :
+ *
+ * Return:
+ * The converted value.
+ * ==================================================================
+ */
+u32 BitReverse(u32 dwData, u32 DataLength)
{
- u32 HalfLength, i, j;
- u32 BitA, BitB;
+ u32 HalfLength, i, j;
+ u32 BitA, BitB;
- if ( DataLength <= 0) return 0; // No conversion is done.
+ if (DataLength <= 0)
+ return 0; /* No conversion is done. */
dwData = dwData & (0xffffffff >> (32 - DataLength));
HalfLength = DataLength / 2;
- for ( i = 0, j = DataLength-1 ; i < HalfLength; i++, j--)
- {
- BitA = GetBit( dwData, i);
- BitB = GetBit( dwData, j);
+ for (i = 0, j = DataLength - 1; i < HalfLength; i++, j--) {
+ BitA = GetBit(dwData, i);
+ BitB = GetBit(dwData, j);
if (BitA && !BitB) {
- dwData = ClearBit( dwData, i);
- dwData = SetBit( dwData, j);
+ dwData = ClearBit(dwData, i);
+ dwData = SetBit(dwData, j);
} else if (!BitA && BitB) {
- dwData = SetBit( dwData, i);
- dwData = ClearBit( dwData, j);
- } else
- {
- // Do nothing since these two bits are of the save values.
+ dwData = SetBit(dwData, i);
+ dwData = ClearBit(dwData, j);
+ } else {
+ /* Do nothing since these two bits are of the save values. */
}
}
-
return dwData;
}
-void Wb35Reg_phy_calibration( struct hw_data * pHwData )
+void Wb35Reg_phy_calibration(struct hw_data *pHwData)
{
- u32 BB3c, BB54;
+ u32 BB3c, BB54;
if ((pHwData->phy_type == RF_WB_242) ||
(pHwData->phy_type == RF_WB_242_1)) {
- phy_calibration_winbond ( pHwData, 2412 ); // Sync operation
- Wb35Reg_ReadSync( pHwData, 0x103c, &BB3c );
- Wb35Reg_ReadSync( pHwData, 0x1054, &BB54 );
+ phy_calibration_winbond(pHwData, 2412); /* Sync operation */
+ Wb35Reg_ReadSync(pHwData, 0x103c, &BB3c);
+ Wb35Reg_ReadSync(pHwData, 0x1054, &BB54);
pHwData->BB3c_cal = BB3c;
pHwData->BB54_cal = BB54;
RFSynthesizer_initial(pHwData);
- BBProcessor_initial(pHwData); // Async operation
+ BBProcessor_initial(pHwData); /* Async operation */
- Wb35Reg_WriteSync( pHwData, 0x103c, BB3c );
- Wb35Reg_WriteSync( pHwData, 0x1054, BB54 );
+ Wb35Reg_WriteSync(pHwData, 0x103c, BB3c);
+ Wb35Reg_WriteSync(pHwData, 0x1054, BB54);
}
}
diff --git a/drivers/staging/winbond/wb35reg_f.h b/drivers/staging/winbond/wb35reg_f.h
index d352bce5c17..bf23c108419 100644
--- a/drivers/staging/winbond/wb35reg_f.h
+++ b/drivers/staging/winbond/wb35reg_f.h
@@ -3,59 +3,63 @@
#include "wbhal_s.h"
-//====================================
-// Interface function declare
-//====================================
-unsigned char Wb35Reg_initial( struct hw_data * pHwData );
-void Uxx_power_on_procedure( struct hw_data * pHwData );
-void Uxx_power_off_procedure( struct hw_data * pHwData );
-void Uxx_ReadEthernetAddress( struct hw_data * pHwData );
-void Dxx_initial( struct hw_data * pHwData );
-void Mxx_initial( struct hw_data * pHwData );
-void RFSynthesizer_initial( struct hw_data * pHwData );
-//void RFSynthesizer_SwitchingChannel( struct hw_data * pHwData, s8 Channel );
-void RFSynthesizer_SwitchingChannel( struct hw_data * pHwData, struct chan_info Channel );
-void BBProcessor_initial( struct hw_data * pHwData );
-void BBProcessor_RateChanging( struct hw_data * pHwData, u8 rate ); // 20060613.1
-//void RF_RateChanging( struct hw_data * pHwData, u8 rate ); // 20060626.5.c Add
-u8 RFSynthesizer_SetPowerIndex( struct hw_data * pHwData, u8 PowerIndex );
-u8 RFSynthesizer_SetMaxim2828_24Power( struct hw_data *, u8 index );
-u8 RFSynthesizer_SetMaxim2828_50Power( struct hw_data *, u8 index );
-u8 RFSynthesizer_SetMaxim2827_24Power( struct hw_data *, u8 index );
-u8 RFSynthesizer_SetMaxim2827_50Power( struct hw_data *, u8 index );
-u8 RFSynthesizer_SetMaxim2825Power( struct hw_data *, u8 index );
-u8 RFSynthesizer_SetAiroha2230Power( struct hw_data *, u8 index );
-u8 RFSynthesizer_SetAiroha7230Power( struct hw_data *, u8 index );
-u8 RFSynthesizer_SetWinbond242Power( struct hw_data *, u8 index );
-void GetTxVgaFromEEPROM( struct hw_data * pHwData );
-void EEPROMTxVgaAdjust( struct hw_data * pHwData ); // 20060619.5 Add
-
-#define RFWriteControlData( _A, _V ) Wb35Reg_Write( _A, 0x0864, _V )
-
-void Wb35Reg_destroy( struct hw_data * pHwData );
-
-unsigned char Wb35Reg_Read( struct hw_data * pHwData, u16 RegisterNo, u32 * pRegisterValue );
-unsigned char Wb35Reg_ReadSync( struct hw_data * pHwData, u16 RegisterNo, u32 * pRegisterValue );
-unsigned char Wb35Reg_Write( struct hw_data * pHwData, u16 RegisterNo, u32 RegisterValue );
-unsigned char Wb35Reg_WriteSync( struct hw_data * pHwData, u16 RegisterNo, u32 RegisterValue );
-unsigned char Wb35Reg_WriteWithCallbackValue( struct hw_data * pHwData,
- u16 RegisterNo,
- u32 RegisterValue,
- s8 *pValue,
- s8 Len);
-unsigned char Wb35Reg_BurstWrite( struct hw_data * pHwData, u16 RegisterNo, u32 * pRegisterData, u8 NumberOfData, u8 Flag );
-
-void Wb35Reg_EP0VM( struct hw_data * pHwData );
-void Wb35Reg_EP0VM_start( struct hw_data * pHwData );
+/*
+ * ====================================
+ * Interface function declare
+ * ====================================
+ */
+unsigned char Wb35Reg_initial(struct hw_data *hw_data);
+void Uxx_power_on_procedure(struct hw_data *hw_data);
+void Uxx_power_off_procedure(struct hw_data *hw_data);
+void Uxx_ReadEthernetAddress(struct hw_data *hw_data);
+void Dxx_initial(struct hw_data *hw_data);
+void Mxx_initial(struct hw_data *hw_data);
+void RFSynthesizer_initial(struct hw_data *hw_data);
+void RFSynthesizer_SwitchingChannel(struct hw_data *hw_data, struct chan_info channel);
+void BBProcessor_initial(struct hw_data *hw_data);
+void BBProcessor_RateChanging(struct hw_data *hw_data, u8 rate);
+u8 RFSynthesizer_SetPowerIndex(struct hw_data *hw_data, u8 power_index);
+u8 RFSynthesizer_SetMaxim2828_24Power(struct hw_data *, u8 index);
+u8 RFSynthesizer_SetMaxim2828_50Power(struct hw_data *, u8 index);
+u8 RFSynthesizer_SetMaxim2827_24Power(struct hw_data *, u8 index);
+u8 RFSynthesizer_SetMaxim2827_50Power(struct hw_data *, u8 index);
+u8 RFSynthesizer_SetMaxim2825Power(struct hw_data *, u8 index);
+u8 RFSynthesizer_SetAiroha2230Power(struct hw_data *, u8 index);
+u8 RFSynthesizer_SetAiroha7230Power(struct hw_data *, u8 index);
+u8 RFSynthesizer_SetWinbond242Power(struct hw_data *, u8 index);
+void GetTxVgaFromEEPROM(struct hw_data *hw_data);
+void EEPROMTxVgaAdjust(struct hw_data *hw_data);
+
+#define RFWriteControlData(_A, _V) Wb35Reg_Write(_A, 0x0864, _V)
+
+void Wb35Reg_destroy(struct hw_data *hw_data);
+
+unsigned char Wb35Reg_Read(struct hw_data *hw_data, u16 register_no, u32 *register_value);
+unsigned char Wb35Reg_ReadSync(struct hw_data *hw_data, u16 register_no, u32 *register_value);
+unsigned char Wb35Reg_Write(struct hw_data *hw_data, u16 register_no, u32 register_value);
+unsigned char Wb35Reg_WriteSync(struct hw_data *hw_data, u16 register_no, u32 register_value);
+unsigned char Wb35Reg_WriteWithCallbackValue(struct hw_data *hw_data,
+ u16 register_no,
+ u32 register_value,
+ s8 *value,
+ s8 len);
+unsigned char Wb35Reg_BurstWrite(struct hw_data *hw_data,
+ u16 register_no,
+ u32 *register_data,
+ u8 number_of_data,
+ u8 flag);
+
+void Wb35Reg_EP0VM(struct hw_data *hw_data);
+void Wb35Reg_EP0VM_start(struct hw_data *hw_data);
void Wb35Reg_EP0VM_complete(struct urb *urb);
-u32 BitReverse( u32 dwData, u32 DataLength);
+u32 BitReverse(u32 data, u32 data_length);
-void CardGetMulticastBit( u8 Address[MAC_ADDR_LENGTH], u8 *Byte, u8 *Value );
-u32 CardComputeCrc( u8 * Buffer, u32 Length );
+void CardGetMulticastBit(u8 address[MAC_ADDR_LENGTH], u8 *byte, u8 *value);
+u32 CardComputeCrc(u8 *buffer, u32 length);
-void Wb35Reg_phy_calibration( struct hw_data * pHwData );
-void Wb35Reg_Update( struct hw_data * pHwData, u16 RegisterNo, u32 RegisterValue );
-unsigned char adjust_TXVGA_for_iq_mag( struct hw_data * pHwData );
+void Wb35Reg_phy_calibration(struct hw_data *hw_data);
+void Wb35Reg_Update(struct hw_data *hw_data, u16 register_no, u32 register_value);
+unsigned char adjust_TXVGA_for_iq_mag(struct hw_data *hw_data);
#endif
diff --git a/drivers/staging/winbond/wb35reg_s.h b/drivers/staging/winbond/wb35reg_s.h
index 32ef4b8a2d2..4eff009444b 100644
--- a/drivers/staging/winbond/wb35reg_s.h
+++ b/drivers/staging/winbond/wb35reg_s.h
@@ -5,98 +5,100 @@
#include <linux/types.h>
#include <asm/atomic.h>
-//=======================================================================================
-/*
- HAL setting function
-
- ========================================
- |Uxx| |Dxx| |Mxx| |BB| |RF|
- ========================================
- | |
- Wb35Reg_Read Wb35Reg_Write
-
- ----------------------------------------
- WbUsb_CallUSBDASync supplied By WbUsb module
-*/
-//=======================================================================================
-
-#define GetBit( dwData, i) ( dwData & (0x00000001 << i))
-#define SetBit( dwData, i) ( dwData | (0x00000001 << i))
-#define ClearBit( dwData, i) ( dwData & ~(0x00000001 << i))
-
-#define IGNORE_INCREMENT 0
-#define AUTO_INCREMENT 0
-#define NO_INCREMENT 1
-#define REG_DIRECTION(_x,_y) ((_y)->DIRECT ==0 ? usb_rcvctrlpipe(_x,0) : usb_sndctrlpipe(_x,0))
-#define REG_BUF_SIZE(_x) ((_x)->bRequest== 0x04 ? cpu_to_le16((_x)->wLength) : 4)
-
-// 20060613.2 Add the follow definition
+/* =========================================================================
+ *
+ * HAL setting function
+ *
+ * ========================================
+ * |Uxx| |Dxx| |Mxx| |BB| |RF|
+ * ========================================
+ * | |
+ * Wb35Reg_Read Wb35Reg_Write
+ *
+ * ----------------------------------------
+ * WbUsb_CallUSBDASync supplied By WbUsb module
+ * ==========================================================================
+ */
+#define GetBit(dwData, i) (dwData & (0x00000001 << i))
+#define SetBit(dwData, i) (dwData | (0x00000001 << i))
+#define ClearBit(dwData, i) (dwData & ~(0x00000001 << i))
+
+#define IGNORE_INCREMENT 0
+#define AUTO_INCREMENT 0
+#define NO_INCREMENT 1
+#define REG_DIRECTION(_x, _y) ((_y)->DIRECT == 0 ? usb_rcvctrlpipe(_x, 0) : usb_sndctrlpipe(_x, 0))
+#define REG_BUF_SIZE(_x) ((_x)->bRequest == 0x04 ? cpu_to_le16((_x)->wLength) : 4)
+
#define BB48_DEFAULT_AL2230_11B 0x0033447c
#define BB4C_DEFAULT_AL2230_11B 0x0A00FEFF
#define BB48_DEFAULT_AL2230_11G 0x00332C1B
#define BB4C_DEFAULT_AL2230_11G 0x0A00FEFF
-#define BB48_DEFAULT_WB242_11B 0x00292315 //backoff 2dB
-#define BB4C_DEFAULT_WB242_11B 0x0800FEFF //backoff 2dB
-//#define BB48_DEFAULT_WB242_11B 0x00201B11 //backoff 4dB
-//#define BB4C_DEFAULT_WB242_11B 0x0600FF00 //backoff 4dB
+#define BB48_DEFAULT_WB242_11B 0x00292315 /* backoff 2dB */
+#define BB4C_DEFAULT_WB242_11B 0x0800FEFF /* backoff 2dB */
#define BB48_DEFAULT_WB242_11G 0x00453B24
#define BB4C_DEFAULT_WB242_11G 0x0E00FEFF
-//====================================
-// Default setting for Mxx
-//====================================
-#define DEFAULT_CWMIN 31 //(M2C) CWmin. Its value is in the range 0-31.
-#define DEFAULT_CWMAX 1023 //(M2C) CWmax. Its value is in the range 0-1023.
-#define DEFAULT_AID 1 //(M34) AID. Its value is in the range 1-2007.
+/*
+ * ====================================
+ * Default setting for Mxx
+ * ====================================
+ */
+#define DEFAULT_CWMIN 31 /* (M2C) CWmin. Its value is in the range 0-31. */
+#define DEFAULT_CWMAX 1023 /* (M2C) CWmax. Its value is in the range 0-1023. */
+#define DEFAULT_AID 1 /* (M34) AID. Its value is in the range 1-2007. */
#ifdef _USE_FALLBACK_RATE_
-#define DEFAULT_RATE_RETRY_LIMIT 2 //(M38) as named
+#define DEFAULT_RATE_RETRY_LIMIT 2 /* (M38) as named */
#else
-#define DEFAULT_RATE_RETRY_LIMIT 7 //(M38) as named
+#define DEFAULT_RATE_RETRY_LIMIT 7 /* (M38) as named */
#endif
-#define DEFAULT_LONG_RETRY_LIMIT 7 //(M38) LongRetryLimit. Its value is in the range 0-15.
-#define DEFAULT_SHORT_RETRY_LIMIT 7 //(M38) ShortRetryLimit. Its value is in the range 0-15.
-#define DEFAULT_PIFST 25 //(M3C) PIFS Time. Its value is in the range 0-65535.
-#define DEFAULT_EIFST 354 //(M3C) EIFS Time. Its value is in the range 0-1048575.
-#define DEFAULT_DIFST 45 //(M3C) DIFS Time. Its value is in the range 0-65535.
-#define DEFAULT_SIFST 5 //(M3C) SIFS Time. Its value is in the range 0-65535.
-#define DEFAULT_OSIFST 10 //(M3C) Original SIFS Time. Its value is in the range 0-15.
-#define DEFAULT_ATIMWD 0 //(M40) ATIM Window. Its value is in the range 0-65535.
-#define DEFAULT_SLOT_TIME 20 //(M40) ($) SlotTime. Its value is in the range 0-255.
-#define DEFAULT_MAX_TX_MSDU_LIFE_TIME 512 //(M44) MaxTxMSDULifeTime. Its value is in the range 0-4294967295.
-#define DEFAULT_BEACON_INTERVAL 500 //(M48) Beacon Interval. Its value is in the range 0-65535.
-#define DEFAULT_PROBE_DELAY_TIME 200 //(M48) Probe Delay Time. Its value is in the range 0-65535.
-#define DEFAULT_PROTOCOL_VERSION 0 //(M4C)
-#define DEFAULT_MAC_POWER_STATE 2 //(M4C) 2: MAC at power active
-#define DEFAULT_DTIM_ALERT_TIME 0
+#define DEFAULT_LONG_RETRY_LIMIT 7 /* (M38) LongRetryLimit. Its value is in the range 0-15. */
+#define DEFAULT_SHORT_RETRY_LIMIT 7 /* (M38) ShortRetryLimit. Its value is in the range 0-15. */
+#define DEFAULT_PIFST 25 /* (M3C) PIFS Time. Its value is in the range 0-65535. */
+#define DEFAULT_EIFST 354 /* (M3C) EIFS Time. Its value is in the range 0-1048575. */
+#define DEFAULT_DIFST 45 /* (M3C) DIFS Time. Its value is in the range 0-65535. */
+#define DEFAULT_SIFST 5 /* (M3C) SIFS Time. Its value is in the range 0-65535. */
+#define DEFAULT_OSIFST 10 /* (M3C) Original SIFS Time. Its value is in the range 0-15. */
+#define DEFAULT_ATIMWD 0 /* (M40) ATIM Window. Its value is in the range 0-65535. */
+#define DEFAULT_SLOT_TIME 20 /* (M40) ($) SlotTime. Its value is in the range 0-255. */
+#define DEFAULT_MAX_TX_MSDU_LIFE_TIME 512 /* (M44) MaxTxMSDULifeTime. Its value is in the range 0-4294967295. */
+#define DEFAULT_BEACON_INTERVAL 500 /* (M48) Beacon Interval. Its value is in the range 0-65535. */
+#define DEFAULT_PROBE_DELAY_TIME 200 /* (M48) Probe Delay Time. Its value is in the range 0-65535. */
+#define DEFAULT_PROTOCOL_VERSION 0 /* (M4C) */
+#define DEFAULT_MAC_POWER_STATE 2 /* (M4C) 2: MAC at power active */
+#define DEFAULT_DTIM_ALERT_TIME 0
struct wb35_reg_queue {
- struct urb *urb;
+ struct urb *urb;
void *pUsbReq;
void *Next;
union {
u32 VALUE;
u32 *pBuffer;
};
- u8 RESERVED[4]; // space reserved for communication
- u16 INDEX; // For storing the register index
- u8 RESERVED_VALID; // Indicate whether the RESERVED space is valid at this command.
- u8 DIRECT; // 0:In 1:Out
+ u8 RESERVED[4]; /* space reserved for communication */
+ u16 INDEX; /* For storing the register index */
+ u8 RESERVED_VALID; /* Indicate whether the RESERVED space is valid at this command. */
+ u8 DIRECT; /* 0:In 1:Out */
};
-//====================================
-// Internal variable for module
-//====================================
+/*
+ * ====================================
+ * Internal variable for module
+ * ====================================
+ */
#define MAX_SQ3_FILTER_SIZE 5
struct wb35_reg {
- //============================
- // Register Bank backup
- //============================
- u32 U1B0; //bit16 record the h/w radio on/off status
+ /*
+ * ============================
+ * Register Bank backup
+ * ============================
+ */
+ u32 U1B0; /* bit16 record the h/w radio on/off status */
u32 U1BC_LEDConfigure;
u32 D00_DmaControl;
u32 M00_MacControl;
@@ -105,68 +107,65 @@ struct wb35_reg {
u32 M04_MulticastAddress1;
u32 M08_MulticastAddress2;
};
- u8 Multicast[8]; // contents of card multicast registers
+ u8 Multicast[8]; /* contents of card multicast registers */
};
u32 M24_MacControl;
u32 M28_MacControl;
u32 M2C_MacControl;
u32 M38_MacControl;
- u32 M3C_MacControl; // 20060214 backup only
+ u32 M3C_MacControl;
u32 M40_MacControl;
- u32 M44_MacControl; // 20060214 backup only
- u32 M48_MacControl; // 20060214 backup only
+ u32 M44_MacControl;
+ u32 M48_MacControl;
u32 M4C_MacStatus;
- u32 M60_MacControl; // 20060214 backup only
- u32 M68_MacControl; // 20060214 backup only
- u32 M70_MacControl; // 20060214 backup only
- u32 M74_MacControl; // 20060214 backup only
- u32 M78_ERPInformation;//930206.2.b
- u32 M7C_MacControl; // 20060214 backup only
- u32 M80_MacControl; // 20060214 backup only
- u32 M84_MacControl; // 20060214 backup only
- u32 M88_MacControl; // 20060214 backup only
- u32 M98_MacControl; // 20060214 backup only
-
- //[20040722 WK]
- //Baseband register
- u32 BB0C; // Used for LNA calculation
- u32 BB2C; //
- u32 BB30; //11b acquisition control register
+ u32 M60_MacControl;
+ u32 M68_MacControl;
+ u32 M70_MacControl;
+ u32 M74_MacControl;
+ u32 M78_ERPInformation;
+ u32 M7C_MacControl;
+ u32 M80_MacControl;
+ u32 M84_MacControl;
+ u32 M88_MacControl;
+ u32 M98_MacControl;
+
+ /* Baseband register */
+ u32 BB0C; /* Used for LNA calculation */
+ u32 BB2C;
+ u32 BB30; /* 11b acquisition control register */
u32 BB3C;
- u32 BB48; // 20051221.1.a 20060613.1 Fix OBW issue of 11b/11g rate
- u32 BB4C; // 20060613.1 Fix OBW issue of 11b/11g rate
- u32 BB50; //mode control register
+ u32 BB48;
+ u32 BB4C;
+ u32 BB50; /* mode control register */
u32 BB54;
- u32 BB58; //IQ_ALPHA
- u32 BB5C; // For test
- u32 BB60; // for WTO read value
-
- //-------------------
- // VM
- //-------------------
- spinlock_t EP0VM_spin_lock; // 4B
- u32 EP0VM_status;//$$
+ u32 BB58; /* IQ_ALPHA */
+ u32 BB5C; /* For test */
+ u32 BB60; /* for WTO read value */
+
+ /* VM */
+ spinlock_t EP0VM_spin_lock; /* 4B */
+ u32 EP0VM_status; /* $$ */
struct wb35_reg_queue *reg_first;
struct wb35_reg_queue *reg_last;
- atomic_t RegFireCount;
+ atomic_t RegFireCount;
- // Hardware status
+ /* Hardware status */
u8 EP0vm_state;
u8 mac_power_save;
- u8 EEPROMPhyType; // 0 ~ 15 for Maxim (0 Ä„V MAX2825, 1 Ä„V MAX2827, 2 Ä„V MAX2828, 3 Ä„V MAX2829),
- // 16 ~ 31 for Airoha (16 Ä„V AL2230, 11 - AL7230)
- // 32 ~ Reserved
- // 33 ~ 47 For WB242 ( 33 - WB242, 34 - WB242 with new Txvga 0.5 db step)
- // 48 ~ 255 ARE RESERVED.
- u8 EEPROMRegion; //Region setting in EEPROM
-
- u32 SyncIoPause; // If user use the Sync Io to access Hw, then pause the async access
-
- u8 LNAValue[4]; //Table for speed up running
+ u8 EEPROMPhyType; /*
+ * 0 ~ 15 for Maxim (0 Ä„V MAX2825, 1 Ä„V MAX2827, 2 Ä„V MAX2828, 3 Ä„V MAX2829),
+ * 16 ~ 31 for Airoha (16 Ä„V AL2230, 11 - AL7230)
+ * 32 ~ Reserved
+ * 33 ~ 47 For WB242 ( 33 - WB242, 34 - WB242 with new Txvga 0.5 db step)
+ * 48 ~ 255 ARE RESERVED.
+ */
+ u8 EEPROMRegion; /* Region setting in EEPROM */
+
+ u32 SyncIoPause; /* If user use the Sync Io to access Hw, then pause the async access */
+
+ u8 LNAValue[4]; /* Table for speed up running */
u32 SQ3_filter[MAX_SQ3_FILTER_SIZE];
u32 SQ3_index;
-
};
-
#endif
diff --git a/drivers/staging/winbond/wb35rx.c b/drivers/staging/winbond/wb35rx.c
index d7b57e62db0..efe82b141c1 100644
--- a/drivers/staging/winbond/wb35rx.c
+++ b/drivers/staging/winbond/wb35rx.c
@@ -1,13 +1,15 @@
-//============================================================================
-// Copyright (c) 1996-2002 Winbond Electronic Corporation
-//
-// Module Name:
-// Wb35Rx.c
-//
-// Abstract:
-// Processing the Rx message from down layer
-//
-//============================================================================
+/*
+ * ============================================================================
+ * Copyright (c) 1996-2002 Winbond Electronic Corporation
+ *
+ * Module Name:
+ * Wb35Rx.c
+ *
+ * Abstract:
+ * Processing the Rx message from down layer
+ *
+ * ============================================================================
+ */
#include <linux/usb.h>
#include <linux/slab.h>
@@ -30,16 +32,7 @@ static void packet_came(struct ieee80211_hw *hw, char *pRxBufferAddress, int Pac
return;
}
- memcpy(skb_put(skb, PacketSize),
- pRxBufferAddress,
- PacketSize);
-
-/*
- rx_status.rate = 10;
- rx_status.channel = 1;
- rx_status.freq = 12345;
- rx_status.phymode = MODE_IEEE80211B;
-*/
+ memcpy(skb_put(skb, PacketSize), pRxBufferAddress, PacketSize);
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
ieee80211_rx_irqsafe(hw, skb);
@@ -47,7 +40,7 @@ static void packet_came(struct ieee80211_hw *hw, char *pRxBufferAddress, int Pac
static void Wb35Rx_adjust(struct wb35_descriptor *pRxDes)
{
- u32 * pRxBufferAddress;
+ u32 *pRxBufferAddress;
u32 DecryptionMethod;
u32 i;
u16 BufferSize;
@@ -56,81 +49,80 @@ static void Wb35Rx_adjust(struct wb35_descriptor *pRxDes)
pRxBufferAddress = pRxDes->buffer_address[0];
BufferSize = pRxDes->buffer_size[0];
- // Adjust the last part of data. Only data left
- BufferSize -= 4; // For CRC-32
+ /* Adjust the last part of data. Only data left */
+ BufferSize -= 4; /* For CRC-32 */
if (DecryptionMethod)
BufferSize -= 4;
- if (DecryptionMethod == 3) // For CCMP
+ if (DecryptionMethod == 3) /* For CCMP */
BufferSize -= 4;
- // Adjust the IV field which after 802.11 header and ICV field.
- if (DecryptionMethod == 1) // For WEP
- {
- for( i=6; i>0; i-- )
- pRxBufferAddress[i] = pRxBufferAddress[i-1];
+ /* Adjust the IV field which after 802.11 header and ICV field. */
+ if (DecryptionMethod == 1) { /* For WEP */
+ for (i = 6; i > 0; i--)
+ pRxBufferAddress[i] = pRxBufferAddress[i - 1];
pRxDes->buffer_address[0] = pRxBufferAddress + 1;
- BufferSize -= 4; // 4 byte for IV
- }
- else if( DecryptionMethod ) // For TKIP and CCMP
- {
- for (i=7; i>1; i--)
- pRxBufferAddress[i] = pRxBufferAddress[i-2];
- pRxDes->buffer_address[0] = pRxBufferAddress + 2;//Update the descriptor, shift 8 byte
- BufferSize -= 8; // 8 byte for IV + ICV
+ BufferSize -= 4; /* 4 byte for IV */
+ } else if (DecryptionMethod) { /* For TKIP and CCMP */
+ for (i = 7; i > 1; i--)
+ pRxBufferAddress[i] = pRxBufferAddress[i - 2];
+ pRxDes->buffer_address[0] = pRxBufferAddress + 2; /* Update the descriptor, shift 8 byte */
+ BufferSize -= 8; /* 8 byte for IV + ICV */
}
pRxDes->buffer_size[0] = BufferSize;
}
static u16 Wb35Rx_indicate(struct ieee80211_hw *hw)
{
- struct wbsoft_priv *priv = hw->priv;
- struct hw_data * pHwData = &priv->sHwData;
+ struct wbsoft_priv *priv = hw->priv;
+ struct hw_data *pHwData = &priv->sHwData;
struct wb35_descriptor RxDes;
- struct wb35_rx *pWb35Rx = &pHwData->Wb35Rx;
- u8 * pRxBufferAddress;
- u16 PacketSize;
- u16 stmp, BufferSize, stmp2 = 0;
- u32 RxBufferId;
+ struct wb35_rx *pWb35Rx = &pHwData->Wb35Rx;
+ u8 *pRxBufferAddress;
+ u16 PacketSize;
+ u16 stmp, BufferSize, stmp2 = 0;
+ u32 RxBufferId;
- // Only one thread be allowed to run into the following
+ /* Only one thread be allowed to run into the following */
do {
RxBufferId = pWb35Rx->RxProcessIndex;
- if (pWb35Rx->RxOwner[ RxBufferId ]) //Owner by VM
+ if (pWb35Rx->RxOwner[RxBufferId]) /* Owner by VM */
break;
pWb35Rx->RxProcessIndex++;
pWb35Rx->RxProcessIndex %= MAX_USB_RX_BUFFER_NUMBER;
pRxBufferAddress = pWb35Rx->pDRx;
- BufferSize = pWb35Rx->RxBufferSize[ RxBufferId ];
+ BufferSize = pWb35Rx->RxBufferSize[RxBufferId];
- // Parse the bulkin buffer
+ /* Parse the bulkin buffer */
while (BufferSize >= 4) {
- if ((cpu_to_le32(*(u32 *)pRxBufferAddress) & 0x0fffffff) == RX_END_TAG) //Is ending? 921002.9.a
+ if ((cpu_to_le32(*(u32 *)pRxBufferAddress) & 0x0fffffff) == RX_END_TAG) /* Is ending? */
break;
- // Get the R00 R01 first
+ /* Get the R00 R01 first */
RxDes.R00.value = le32_to_cpu(*(u32 *)pRxBufferAddress);
PacketSize = (u16)RxDes.R00.R00_receive_byte_count;
- RxDes.R01.value = le32_to_cpu(*((u32 *)(pRxBufferAddress+4)));
- // For new DMA 4k
+ RxDes.R01.value = le32_to_cpu(*((u32 *)(pRxBufferAddress + 4)));
+ /* For new DMA 4k */
if ((PacketSize & 0x03) > 0)
PacketSize -= 4;
- // Basic check for Rx length. Is length valid?
+ /* Basic check for Rx length. Is length valid? */
if (PacketSize > MAX_PACKET_SIZE) {
- #ifdef _PE_RX_DUMP_
+#ifdef _PE_RX_DUMP_
printk("Serious ERROR : Rx data size too long, size =%d\n", PacketSize);
- #endif
+#endif
pWb35Rx->EP3vm_state = VM_STOP;
pWb35Rx->Ep3ErrorCount2++;
break;
}
- // Start to process Rx buffer
-// RxDes.Descriptor_ID = RxBufferId; // Due to synchronous indicate, the field doesn't necessary to use.
- BufferSize -= 8; //subtract 8 byte for 35's USB header length
+ /*
+ * Wb35Rx_indicate() is called synchronously so it isn't
+ * necessary to set "RxDes.Desctriptor_ID = RxBufferID;"
+ */
+ BufferSize -= 8; /* subtract 8 byte for 35's USB header length */
pRxBufferAddress += 8;
RxDes.buffer_address[0] = pRxBufferAddress;
@@ -142,18 +134,17 @@ static u16 Wb35Rx_indicate(struct ieee80211_hw *hw)
packet_came(hw, pRxBufferAddress, PacketSize);
- // Move RxBuffer point to the next
+ /* Move RxBuffer point to the next */
stmp = PacketSize + 3;
- stmp &= ~0x03; // 4n alignment
+ stmp &= ~0x03; /* 4n alignment */
pRxBufferAddress += stmp;
BufferSize -= stmp;
stmp2 += stmp;
}
- // Reclaim resource
- pWb35Rx->RxOwner[ RxBufferId ] = 1;
+ /* Reclaim resource */
+ pWb35Rx->RxOwner[RxBufferId] = 1;
} while (true);
-
return stmp2;
}
@@ -161,112 +152,110 @@ static void Wb35Rx(struct ieee80211_hw *hw);
static void Wb35Rx_Complete(struct urb *urb)
{
- struct ieee80211_hw *hw = urb->context;
- struct wbsoft_priv *priv = hw->priv;
- struct hw_data * pHwData = &priv->sHwData;
- struct wb35_rx *pWb35Rx = &pHwData->Wb35Rx;
- u8 * pRxBufferAddress;
- u32 SizeCheck;
- u16 BulkLength;
- u32 RxBufferId;
- R00_DESCRIPTOR R00;
-
- // Variable setting
+ struct ieee80211_hw *hw = urb->context;
+ struct wbsoft_priv *priv = hw->priv;
+ struct hw_data *pHwData = &priv->sHwData;
+ struct wb35_rx *pWb35Rx = &pHwData->Wb35Rx;
+ u8 *pRxBufferAddress;
+ u32 SizeCheck;
+ u16 BulkLength;
+ u32 RxBufferId;
+ R00_DESCRIPTOR R00;
+
+ /* Variable setting */
pWb35Rx->EP3vm_state = VM_COMPLETED;
- pWb35Rx->EP3VM_status = urb->status;//Store the last result of Irp
+ pWb35Rx->EP3VM_status = urb->status; /* Store the last result of Irp */
RxBufferId = pWb35Rx->CurrentRxBufferId;
pRxBufferAddress = pWb35Rx->pDRx;
BulkLength = (u16)urb->actual_length;
- // The IRP is completed
+ /* The IRP is completed */
pWb35Rx->EP3vm_state = VM_COMPLETED;
- if (pHwData->SurpriseRemove || pHwData->HwStop) // Must be here, or RxBufferId is invalid
+ if (pHwData->SurpriseRemove || pHwData->HwStop) /* Must be here, or RxBufferId is invalid */
goto error;
if (pWb35Rx->rx_halt)
goto error;
- // Start to process the data only in successful condition
- pWb35Rx->RxOwner[ RxBufferId ] = 0; // Set the owner to driver
+ /* Start to process the data only in successful condition */
+ pWb35Rx->RxOwner[RxBufferId] = 0; /* Set the owner to driver */
R00.value = le32_to_cpu(*(u32 *)pRxBufferAddress);
- // The URB is completed, check the result
+ /* The URB is completed, check the result */
if (pWb35Rx->EP3VM_status != 0) {
- #ifdef _PE_USB_STATE_DUMP_
+#ifdef _PE_USB_STATE_DUMP_
printk("EP3 IoCompleteRoutine return error\n");
- #endif
+#endif
pWb35Rx->EP3vm_state = VM_STOP;
goto error;
}
- // 20060220 For recovering. check if operating in single USB mode
+ /* For recovering. check if operating in single USB mode */
if (!HAL_USB_MODE_BURST(pHwData)) {
- SizeCheck = R00.R00_receive_byte_count; //20060926 anson's endian
+ SizeCheck = R00.R00_receive_byte_count;
if ((SizeCheck & 0x03) > 0)
SizeCheck -= 4;
SizeCheck = (SizeCheck + 3) & ~0x03;
- SizeCheck += 12; // 8 + 4 badbeef
+ SizeCheck += 12; /* 8 + 4 badbeef */
if ((BulkLength > 1600) ||
(SizeCheck > 1600) ||
(BulkLength != SizeCheck) ||
- (BulkLength == 0)) { // Add for fail Urb
+ (BulkLength == 0)) { /* Add for fail Urb */
pWb35Rx->EP3vm_state = VM_STOP;
pWb35Rx->Ep3ErrorCount2++;
}
}
- // Indicating the receiving data
+ /* Indicating the receiving data */
pWb35Rx->ByteReceived += BulkLength;
- pWb35Rx->RxBufferSize[ RxBufferId ] = BulkLength;
+ pWb35Rx->RxBufferSize[RxBufferId] = BulkLength;
- if (!pWb35Rx->RxOwner[ RxBufferId ])
+ if (!pWb35Rx->RxOwner[RxBufferId])
Wb35Rx_indicate(hw);
kfree(pWb35Rx->pDRx);
- // Do the next receive
+ /* Do the next receive */
Wb35Rx(hw);
return;
error:
- pWb35Rx->RxOwner[ RxBufferId ] = 1; // Set the owner to hardware
+ pWb35Rx->RxOwner[RxBufferId] = 1; /* Set the owner to hardware */
atomic_dec(&pWb35Rx->RxFireCounter);
pWb35Rx->EP3vm_state = VM_STOP;
}
-// This function cannot reentrain
+/* This function cannot reentrain */
static void Wb35Rx(struct ieee80211_hw *hw)
{
- struct wbsoft_priv *priv = hw->priv;
- struct hw_data * pHwData = &priv->sHwData;
- struct wb35_rx *pWb35Rx = &pHwData->Wb35Rx;
- u8 * pRxBufferAddress;
- struct urb *urb = pWb35Rx->RxUrb;
- int retv;
- u32 RxBufferId;
-
- //
- // Issuing URB
- //
+ struct wbsoft_priv *priv = hw->priv;
+ struct hw_data *pHwData = &priv->sHwData;
+ struct wb35_rx *pWb35Rx = &pHwData->Wb35Rx;
+ u8 *pRxBufferAddress;
+ struct urb *urb = pWb35Rx->RxUrb;
+ int retv;
+ u32 RxBufferId;
+
+ /* Issuing URB */
if (pHwData->SurpriseRemove || pHwData->HwStop)
goto error;
if (pWb35Rx->rx_halt)
goto error;
- // Get RxBuffer's ID
+ /* Get RxBuffer's ID */
RxBufferId = pWb35Rx->RxBufferId;
if (!pWb35Rx->RxOwner[RxBufferId]) {
- // It's impossible to run here.
- #ifdef _PE_RX_DUMP_
+ /* It's impossible to run here. */
+#ifdef _PE_RX_DUMP_
printk("Rx driver fifo unavailable\n");
- #endif
+#endif
goto error;
}
- // Update buffer point, then start to bulkin the data from USB
+ /* Update buffer point, then start to bulkin the data from USB */
pWb35Rx->RxBufferId++;
pWb35Rx->RxBufferId %= MAX_USB_RX_BUFFER_NUMBER;
@@ -295,18 +284,18 @@ static void Wb35Rx(struct ieee80211_hw *hw)
return;
error:
- // VM stop
+ /* VM stop */
pWb35Rx->EP3vm_state = VM_STOP;
atomic_dec(&pWb35Rx->RxFireCounter);
}
void Wb35Rx_start(struct ieee80211_hw *hw)
{
- struct wbsoft_priv *priv = hw->priv;
- struct hw_data * pHwData = &priv->sHwData;
- struct wb35_rx *pWb35Rx = &pHwData->Wb35Rx;
+ struct wbsoft_priv *priv = hw->priv;
+ struct hw_data *pHwData = &priv->sHwData;
+ struct wb35_rx *pWb35Rx = &pHwData->Wb35Rx;
- // Allow only one thread to run into the Wb35Rx() function
+ /* Allow only one thread to run into the Wb35Rx() function */
if (atomic_inc_return(&pWb35Rx->RxFireCounter) == 1) {
pWb35Rx->EP3vm_state = VM_RUNNING;
Wb35Rx(hw);
@@ -314,11 +303,10 @@ void Wb35Rx_start(struct ieee80211_hw *hw)
atomic_dec(&pWb35Rx->RxFireCounter);
}
-//=====================================================================================
-static void Wb35Rx_reset_descriptor( struct hw_data * pHwData )
+static void Wb35Rx_reset_descriptor(struct hw_data *pHwData)
{
- struct wb35_rx *pWb35Rx = &pHwData->Wb35Rx;
- u32 i;
+ struct wb35_rx *pWb35Rx = &pHwData->Wb35Rx;
+ u32 i;
pWb35Rx->ByteReceived = 0;
pWb35Rx->RxProcessIndex = 0;
@@ -326,49 +314,49 @@ static void Wb35Rx_reset_descriptor( struct hw_data * pHwData )
pWb35Rx->EP3vm_state = VM_STOP;
pWb35Rx->rx_halt = 0;
- // Initial the Queue. The last buffer is reserved for used if the Rx resource is unavailable.
- for( i=0; i<MAX_USB_RX_BUFFER_NUMBER; i++ )
+ /* Initial the Queue. The last buffer is reserved for used if the Rx resource is unavailable. */
+ for (i = 0; i < MAX_USB_RX_BUFFER_NUMBER; i++)
pWb35Rx->RxOwner[i] = 1;
}
-unsigned char Wb35Rx_initial(struct hw_data * pHwData)
+unsigned char Wb35Rx_initial(struct hw_data *pHwData)
{
struct wb35_rx *pWb35Rx = &pHwData->Wb35Rx;
- // Initial the Buffer Queue
- Wb35Rx_reset_descriptor( pHwData );
+ /* Initial the Buffer Queue */
+ Wb35Rx_reset_descriptor(pHwData);
pWb35Rx->RxUrb = usb_alloc_urb(0, GFP_ATOMIC);
- return (!!pWb35Rx->RxUrb);
+ return !!pWb35Rx->RxUrb;
}
-void Wb35Rx_stop(struct hw_data * pHwData)
+void Wb35Rx_stop(struct hw_data *pHwData)
{
struct wb35_rx *pWb35Rx = &pHwData->Wb35Rx;
- // Canceling the Irp if already sends it out.
+ /* Canceling the Irp if already sends it out. */
if (pWb35Rx->EP3vm_state == VM_RUNNING) {
- usb_unlink_urb( pWb35Rx->RxUrb ); // Only use unlink, let Wb35Rx_destroy to free them
- #ifdef _PE_RX_DUMP_
+ usb_unlink_urb(pWb35Rx->RxUrb); /* Only use unlink, let Wb35Rx_destroy to free them */
+#ifdef _PE_RX_DUMP_
printk("EP3 Rx stop\n");
- #endif
+#endif
}
}
-// Needs process context
-void Wb35Rx_destroy(struct hw_data * pHwData)
+/* Needs process context */
+void Wb35Rx_destroy(struct hw_data *pHwData)
{
struct wb35_rx *pWb35Rx = &pHwData->Wb35Rx;
do {
- msleep(10); // Delay for waiting function enter 940623.1.a
+ msleep(10); /* Delay for waiting function enter */
} while (pWb35Rx->EP3vm_state != VM_STOP);
- msleep(10); // Delay for waiting function exit 940623.1.b
+ msleep(10); /* Delay for waiting function exit */
if (pWb35Rx->RxUrb)
- usb_free_urb( pWb35Rx->RxUrb );
- #ifdef _PE_RX_DUMP_
+ usb_free_urb(pWb35Rx->RxUrb);
+#ifdef _PE_RX_DUMP_
printk("Wb35Rx_destroy OK\n");
- #endif
+#endif
}
diff --git a/drivers/staging/winbond/wb35tx_f.h b/drivers/staging/winbond/wb35tx_f.h
index a7af9cbe202..1d3b515f83b 100644
--- a/drivers/staging/winbond/wb35tx_f.h
+++ b/drivers/staging/winbond/wb35tx_f.h
@@ -4,18 +4,20 @@
#include "core.h"
#include "wbhal_f.h"
-//====================================
-// Interface function declare
-//====================================
-unsigned char Wb35Tx_initial( struct hw_data * pHwData );
-void Wb35Tx_destroy( struct hw_data * pHwData );
-unsigned char Wb35Tx_get_tx_buffer( struct hw_data * pHwData, u8 **pBuffer );
+/*
+ * ====================================
+ * Interface function declare
+ * ====================================
+ */
+unsigned char Wb35Tx_initial(struct hw_data *hw_data);
+void Wb35Tx_destroy(struct hw_data *hw_data);
+unsigned char Wb35Tx_get_tx_buffer(struct hw_data *hw_data, u8 **buffer);
void Wb35Tx_EP2VM_start(struct wbsoft_priv *adapter);
void Wb35Tx_start(struct wbsoft_priv *adapter);
-void Wb35Tx_stop( struct hw_data * pHwData );
+void Wb35Tx_stop(struct hw_data *hw_data);
-void Wb35Tx_CurrentTime(struct wbsoft_priv *adapter, u32 TimeCount);
+void Wb35Tx_CurrentTime(struct wbsoft_priv *adapter, u32 time_count);
#endif
diff --git a/drivers/staging/winbond/wbhal_f.h b/drivers/staging/winbond/wbhal_f.h
index 64a008db30f..401c024bead 100644
--- a/drivers/staging/winbond/wbhal_f.h
+++ b/drivers/staging/winbond/wbhal_f.h
@@ -1,70 +1,91 @@
-//=====================================================================
-// Device related include
-//=====================================================================
+/*
+ * =====================================================================
+ * Device related include
+ * =====================================================================
+*/
#include "wb35reg_f.h"
#include "wb35tx_f.h"
#include "wb35rx_f.h"
#include "core.h"
-//====================================================================================
-// Function declaration
-//====================================================================================
-void hal_remove_mapping_key( struct hw_data * pHwData, u8 *pmac_addr );
-void hal_remove_default_key( struct hw_data * pHwData, u32 index );
-unsigned char hal_set_mapping_key( struct hw_data * adapter, u8 *pmac_addr, u8 null_key, u8 wep_on, u8 *ptx_tsc, u8 *prx_tsc, u8 key_type, u8 key_len, u8 *pkey_data );
-unsigned char hal_set_default_key( struct hw_data * adapter, u8 index, u8 null_key, u8 wep_on, u8 *ptx_tsc, u8 *prx_tsc, u8 key_type, u8 key_len, u8 *pkey_data );
-void hal_clear_all_default_key( struct hw_data * pHwData );
-void hal_clear_all_group_key( struct hw_data * pHwData );
-void hal_clear_all_mapping_key( struct hw_data * pHwData );
-void hal_clear_all_key( struct hw_data * pHwData );
-void hal_set_power_save_mode( struct hw_data * pHwData, unsigned char power_save, unsigned char wakeup, unsigned char dtim );
-void hal_get_power_save_mode( struct hw_data * pHwData, u8 *pin_pwr_save );
-void hal_set_slot_time( struct hw_data * pHwData, u8 type );
-#define hal_set_atim_window( _A, _ATM )
-void hal_start_bss( struct hw_data * pHwData, u8 mac_op_mode );
-void hal_join_request( struct hw_data * pHwData, u8 bss_type ); // 0:BSS STA 1:IBSS STA//
-void hal_stop_sync_bss( struct hw_data * pHwData );
-void hal_resume_sync_bss( struct hw_data * pHwData);
-void hal_set_aid( struct hw_data * pHwData, u16 aid );
-void hal_set_bssid( struct hw_data * pHwData, u8 *pbssid );
-void hal_get_bssid( struct hw_data * pHwData, u8 *pbssid );
-void hal_set_listen_interval( struct hw_data * pHwData, u16 listen_interval );
-void hal_set_cap_info( struct hw_data * pHwData, u16 capability_info );
-void hal_set_ssid( struct hw_data * pHwData, u8 *pssid, u8 ssid_len );
-void hal_start_tx0( struct hw_data * pHwData );
-#define hal_get_cwmin( _A ) ( (_A)->cwmin )
-void hal_set_cwmax( struct hw_data * pHwData, u16 cwin_max );
-#define hal_get_cwmax( _A ) ( (_A)->cwmax )
-void hal_set_rsn_wpa( struct hw_data * pHwData, u32 * RSN_IE_Bitmap , u32 * RSN_OUI_type , unsigned char bDesiredAuthMode);
-void hal_set_connect_info( struct hw_data * pHwData, unsigned char boConnect );
-u8 hal_get_est_sq3( struct hw_data * pHwData, u8 Count );
-void hal_descriptor_indicate( struct hw_data * pHwData, struct wb35_descriptor *pDes );
-u8 hal_get_antenna_number( struct hw_data * pHwData );
-u32 hal_get_bss_pk_cnt( struct hw_data * pHwData );
-#define hal_get_region_from_EEPROM( _A ) ( (_A)->reg.EEPROMRegion )
-#define hal_get_tx_buffer( _A, _B ) Wb35Tx_get_tx_buffer( _A, _B )
-#define hal_software_set( _A ) (_A->SoftwareSet)
-#define hal_driver_init_OK( _A ) (_A->IsInitOK)
-#define hal_rssi_boundary_high( _A ) (_A->RSSI_high)
-#define hal_rssi_boundary_low( _A ) (_A->RSSI_low)
-#define hal_scan_interval( _A ) (_A->Scan_Interval)
-
-#define PHY_DEBUG( msg, args... )
-
-#define hal_get_time_count( _P ) (_P->time_count/10) // return 100ms count
-#define hal_detect_error( _P ) (_P->WbUsb.DetectCount)
-
-//-------------------------------------------------------------------------
-// The follow function is unused for IS89C35
-//-------------------------------------------------------------------------
+/* =====================================================================
+ * Function declaration
+ * =====================================================================
+ */
+void hal_remove_mapping_key(struct hw_data *hw_data, u8 *mac_addr);
+void hal_remove_default_key(struct hw_data *hw_data, u32 index);
+unsigned char hal_set_mapping_key(struct hw_data *adapter, u8 *mac_addr,
+ u8 null_key, u8 wep_on, u8 *tx_tsc,
+ u8 *rx_tsc, u8 key_type, u8 key_len,
+ u8 *key_data);
+unsigned char hal_set_default_key(struct hw_data *adapter, u8 index,
+ u8 null_key, u8 wep_on, u8 *tx_tsc,
+ u8 *rx_tsc, u8 key_type, u8 key_len,
+ u8 *key_data);
+void hal_clear_all_default_key(struct hw_data *hw_data);
+void hal_clear_all_group_key(struct hw_data *hw_data);
+void hal_clear_all_mapping_key(struct hw_data *hw_data);
+void hal_clear_all_key(struct hw_data *hw_data);
+void hal_set_power_save_mode(struct hw_data *hw_data, unsigned char power_save,
+ unsigned char wakeup, unsigned char dtim);
+void hal_get_power_save_mode(struct hw_data *hw_data, u8 *in_pwr_save);
+void hal_set_slot_time(struct hw_data *hw_data, u8 type);
+
+#define hal_set_atim_window(_A, _ATM)
+
+void hal_start_bss(struct hw_data *hw_data, u8 mac_op_mode);
+
+/* 0:BSS STA 1:IBSS STA */
+void hal_join_request(struct hw_data *hw_data, u8 bss_type);
+
+void hal_stop_sync_bss(struct hw_data *hw_data);
+void hal_resume_sync_bss(struct hw_data *hw_data);
+void hal_set_aid(struct hw_data *hw_data, u16 aid);
+void hal_set_bssid(struct hw_data *hw_data, u8 *bssid);
+void hal_get_bssid(struct hw_data *hw_data, u8 *bssid);
+void hal_set_listen_interval(struct hw_data *hw_data, u16 listen_interval);
+void hal_set_cap_info(struct hw_data *hw_data, u16 capability_info);
+void hal_set_ssid(struct hw_data *hw_data, u8 *ssid, u8 ssid_len);
+void hal_start_tx0(struct hw_data *hw_data);
+
+#define hal_get_cwmin(_A) ((_A)->cwmin)
+
+void hal_set_cwmax(struct hw_data *hw_data, u16 cwin_max);
+
+#define hal_get_cwmax(_A) ((_A)->cwmax)
+
+void hal_set_rsn_wpa(struct hw_data *hw_data, u32 *rsn_ie_bitmap,
+ u32 *rsn_oui_type , unsigned char desired_auth_mode);
+void hal_set_connect_info(struct hw_data *hw_data, unsigned char bo_connect);
+u8 hal_get_est_sq3(struct hw_data *hw_data, u8 count);
+void hal_descriptor_indicate(struct hw_data *hw_data,
+ struct wb35_descriptor *des);
+u8 hal_get_antenna_number(struct hw_data *hw_data);
+u32 hal_get_bss_pk_cnt(struct hw_data *hw_data);
+
+#define hal_get_region_from_EEPROM(_A) ((_A)->reg.EEPROMRegion)
+#define hal_get_tx_buffer(_A, _B) Wb35Tx_get_tx_buffer(_A, _B)
+#define hal_software_set(_A) (_A->SoftwareSet)
+#define hal_driver_init_OK(_A) (_A->IsInitOK)
+#define hal_rssi_boundary_high(_A) (_A->RSSI_high)
+#define hal_rssi_boundary_low(_A) (_A->RSSI_low)
+#define hal_scan_interval(_A) (_A->Scan_Interval)
+
+#define PHY_DEBUG(msg, args...)
+
+/* return 100ms count */
+#define hal_get_time_count(_P) (_P->time_count / 10)
+#define hal_detect_error(_P) (_P->WbUsb.DetectCount)
+
+/* The follow function is unused for IS89C35 */
#define hal_disable_interrupt(_A)
#define hal_enable_interrupt(_A)
-#define hal_get_interrupt_type( _A)
+#define hal_get_interrupt_type(_A)
#define hal_get_clear_interrupt(_A)
-#define hal_ibss_disconnect(_A) hal_stop_sync_bss(_A)
+#define hal_ibss_disconnect(_A) (hal_stop_sync_bss(_A))
#define hal_join_request_stop(_A)
-#define hw_get_cxx_reg( _A, _B, _C )
-#define hw_set_cxx_reg( _A, _B, _C )
+#define hw_get_cxx_reg(_A, _B, _C)
+#define hw_set_cxx_reg(_A, _B, _C)
diff --git a/drivers/staging/winbond/wbhal_s.h b/drivers/staging/winbond/wbhal_s.h
index 372a05e3021..33457c2e39b 100644
--- a/drivers/staging/winbond/wbhal_s.h
+++ b/drivers/staging/winbond/wbhal_s.h
@@ -4,179 +4,166 @@
#include <linux/types.h>
#include <linux/if_ether.h> /* for ETH_ALEN */
-//[20040722 WK]
-#define HAL_LED_SET_MASK 0x001c //20060901 Extend
-#define HAL_LED_SET_SHIFT 2
+#define HAL_LED_SET_MASK 0x001c
+#define HAL_LED_SET_SHIFT 2
-//supported RF type
+/* supported RF type */
#define RF_MAXIM_2825 0
#define RF_MAXIM_2827 1
#define RF_MAXIM_2828 2
#define RF_MAXIM_2829 3
-#define RF_MAXIM_V1 15
+#define RF_MAXIM_V1 15
#define RF_AIROHA_2230 16
#define RF_AIROHA_7230 17
-#define RF_AIROHA_2230S 18 // 20060420 Add this
-// #define RF_RFMD_2959 32 // 20060626 Remove all about RFMD
-#define RF_WB_242 33
-#define RF_WB_242_1 34 // 20060619.5 Add
+#define RF_AIROHA_2230S 18
+#define RF_WB_242 33
+#define RF_WB_242_1 34
#define RF_DECIDE_BY_INF 255
-//----------------------------------------------------------------
-// The follow define connect to upper layer
-// User must modify for connection between HAL and upper layer
-//----------------------------------------------------------------
-
-
-
-
-/////////////////////////////////////////////////////////////////////////////////////////////////////
-//================================================================================================
-// Common define
-//================================================================================================
-#define HAL_USB_MODE_BURST( _H ) (_H->SoftwareSet & 0x20 ) // Bit 5 20060901 Modify
-
-// Scan interval
-#define SCAN_MAX_CHNL_TIME (50)
-
-// For TxL2 Frame typr recognise
+/*
+ * ----------------------------------------------------------------
+ * The follow define connect to upper layer
+ * User must modify for connection between HAL and upper layer
+ * ----------------------------------------------------------------
+ */
+
+/*
+ * ==============================
+ * Common define
+ * ==============================
+ */
+/* Bit 5 */
+#define HAL_USB_MODE_BURST(_H) (_H->SoftwareSet & 0x20)
+
+/* Scan interval */
+#define SCAN_MAX_CHNL_TIME (50)
+
+/* For TxL2 Frame typr recognise */
#define FRAME_TYPE_802_3_DATA 0
#define FRAME_TYPE_802_11_MANAGEMENT 1
-#define FRAME_TYPE_802_11_MANAGEMENT_CHALLENGE 2
+#define FRAME_TYPE_802_11_MANAGEMENT_CHALLENGE 2
#define FRAME_TYPE_802_11_CONTROL 3
#define FRAME_TYPE_802_11_DATA 4
#define FRAME_TYPE_PROMISCUOUS 5
-// The follow definition is used for convert the frame--------------------
-#define DOT_11_SEQUENCE_OFFSET 22 //Sequence control offset
+/* The follow definition is used for convert the frame------------ */
+#define DOT_11_SEQUENCE_OFFSET 22 /* Sequence control offset */
#define DOT_3_TYPE_OFFSET 12
-#define DOT_11_MAC_HEADER_SIZE 24
+#define DOT_11_MAC_HEADER_SIZE 24
#define DOT_11_SNAP_SIZE 6
-#define DOT_11_TYPE_OFFSET 30 //The start offset of 802.11 Frame. Type encapsulatuin.
+#define DOT_11_TYPE_OFFSET 30 /* The start offset of 802.11 Frame. Type encapsulation. */
#define DEFAULT_SIFSTIME 10
-#define DEFAULT_FRAGMENT_THRESHOLD 2346 // No fragment
+#define DEFAULT_FRAGMENT_THRESHOLD 2346 /* No fragment */
#define DEFAULT_MSDU_LIFE_TIME 0xffff
-#define LONG_PREAMBLE_PLUS_PLCPHEADER_TIME (144+48)
-#define SHORT_PREAMBLE_PLUS_PLCPHEADER_TIME (72+24)
-#define PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION (16+4+6)
-#define Tsym 4
+#define LONG_PREAMBLE_PLUS_PLCPHEADER_TIME (144 + 48)
+#define SHORT_PREAMBLE_PLUS_PLCPHEADER_TIME (72 + 24)
+#define PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION (16 + 4 + 6)
+#define Tsym 4
-// Frame Type of Bits (2, 3)---------------------------------------------
+/* Frame Type of Bits (2, 3)----------------------------------- */
#define MAC_TYPE_MANAGEMENT 0x00
#define MAC_TYPE_CONTROL 0x04
#define MAC_TYPE_DATA 0x08
-#define MASK_FRAGMENT_NUMBER 0x000F
-#define SEQUENCE_NUMBER_SHIFT 4
+#define MASK_FRAGMENT_NUMBER 0x000F
+#define SEQUENCE_NUMBER_SHIFT 4
#define HAL_WOL_TYPE_WAKEUP_FRAME 0x01
#define HAL_WOL_TYPE_MAGIC_PACKET 0x02
-// 20040106 ADDED
-#define HAL_KEYTYPE_WEP40 0
-#define HAL_KEYTYPE_WEP104 1
-#define HAL_KEYTYPE_TKIP 2 // 128 bit key
-#define HAL_KEYTYPE_AES_CCMP 3 // 128 bit key
+#define HAL_KEYTYPE_WEP40 0
+#define HAL_KEYTYPE_WEP104 1
+#define HAL_KEYTYPE_TKIP 2 /* 128 bit key */
+#define HAL_KEYTYPE_AES_CCMP 3 /* 128 bit key */
-// For VM state
+/* For VM state */
enum {
VM_STOP = 0,
VM_RUNNING,
VM_COMPLETED
};
-//-----------------------------------------------------
-// Normal Key table format
-//-----------------------------------------------------
-// The order of KEY index is MAPPING_KEY_START_INDEX > GROUP_KEY_START_INDEX
-#define MAX_KEY_TABLE 24 // 24 entry for storing key data
+/*
+ * ================================
+ * Normal Key table format
+ * ================================
+ */
+
+/* The order of KEY index is MAPPING_KEY_START_INDEX > GROUP_KEY_START_INDEX */
+#define MAX_KEY_TABLE 24 /* 24 entry for storing key data */
#define GROUP_KEY_START_INDEX 4
#define MAPPING_KEY_START_INDEX 8
-//--------------------------------------------------------
-// Descriptor
-//--------------------------------------------------------
-#define MAX_DESCRIPTOR_BUFFER_INDEX 8 // Have to multiple of 2
-//#define FLAG_ERROR_TX_MASK cpu_to_le32(0x000000bf) //20061009 marked by anson's endian
-#define FLAG_ERROR_TX_MASK 0x000000bf //20061009 anson's endian
-//#define FLAG_ERROR_RX_MASK 0x00000c3f
-//#define FLAG_ERROR_RX_MASK cpu_to_le32(0x0000083f) //20061009 marked by anson's endian
- //Don't care replay error,
- //it is handled by S/W
-#define FLAG_ERROR_RX_MASK 0x0000083f //20060926 anson's endian
-
-#define FLAG_BAND_RX_MASK 0x10000000 //Bit 28
-
-typedef struct _R00_DESCRIPTOR
-{
- union
- {
+/*
+ * =========================================
+ * Descriptor
+ * =========================================
+ */
+#define MAX_DESCRIPTOR_BUFFER_INDEX 8 /* Have to multiple of 2 */
+#define FLAG_ERROR_TX_MASK 0x000000bf
+#define FLAG_ERROR_RX_MASK 0x0000083f
+
+#define FLAG_BAND_RX_MASK 0x10000000 /* Bit 28 */
+
+typedef struct _R00_DESCRIPTOR {
+ union {
u32 value;
- #ifdef _BIG_ENDIAN_ //20060926 anson's endian
- struct
- {
+#ifdef _BIG_ENDIAN_
+ struct {
u32 R00_packet_or_buffer_status:1;
u32 R00_packet_in_fifo:1;
u32 R00_RESERVED:2;
u32 R00_receive_byte_count:12;
u32 R00_receive_time_index:16;
};
- #else
- struct
- {
+#else
+ struct {
u32 R00_receive_time_index:16;
u32 R00_receive_byte_count:12;
u32 R00_RESERVED:2;
u32 R00_packet_in_fifo:1;
u32 R00_packet_or_buffer_status:1;
};
- #endif
+#endif
};
} R00_DESCRIPTOR, *PR00_DESCRIPTOR;
-typedef struct _T00_DESCRIPTOR
-{
- union
- {
+typedef struct _T00_DESCRIPTOR {
+ union {
u32 value;
- #ifdef _BIG_ENDIAN_ //20061009 anson's endian
- struct
- {
- u32 T00_first_mpdu:1; // for hardware use
- u32 T00_last_mpdu:1; // for hardware use
- u32 T00_IsLastMpdu:1;// 0: not 1:Yes for software used
- u32 T00_IgnoreResult:1;// The same mechanism with T00 setting. 050111 Modify for TS
- u32 T00_RESERVED_ID:2;//3 bit ID reserved
- u32 T00_tx_packet_id:4;//930519.4.e 930810.3.c
+#ifdef _BIG_ENDIAN_
+ struct {
+ u32 T00_first_mpdu:1; /* for hardware use */
+ u32 T00_last_mpdu:1; /* for hardware use */
+ u32 T00_IsLastMpdu:1;/* 0:not 1:Yes for software used */
+ u32 T00_IgnoreResult:1;/* The same mechanism with T00 setting. */
+ u32 T00_RESERVED_ID:2;/* 3 bit ID reserved */
+ u32 T00_tx_packet_id:4;
u32 T00_RESERVED:4;
u32 T00_header_length:6;
u32 T00_frame_length:12;
};
- #else
- struct
- {
+#else
+ struct {
u32 T00_frame_length:12;
u32 T00_header_length:6;
u32 T00_RESERVED:4;
- u32 T00_tx_packet_id:4;//930519.4.e 930810.3.c
- u32 T00_RESERVED_ID:2;//3 bit ID reserved
- u32 T00_IgnoreResult:1;// The same mechanism with T00 setting. 050111 Modify for TS
- u32 T00_IsLastMpdu:1;// 0: not 1:Yes for software used
- u32 T00_last_mpdu:1; // for hardware use
- u32 T00_first_mpdu:1; // for hardware use
+ u32 T00_tx_packet_id:4;
+ u32 T00_RESERVED_ID:2; /* 3 bit ID reserved */
+ u32 T00_IgnoreResult:1; /* The same mechanism with T00 setting. */
+ u32 T00_IsLastMpdu:1; /* 0:not 1:Yes for software used */
+ u32 T00_last_mpdu:1; /* for hardware use */
+ u32 T00_first_mpdu:1; /* for hardware use */
};
- #endif
+#endif
};
} T00_DESCRIPTOR, *PT00_DESCRIPTOR;
-typedef struct _R01_DESCRIPTOR
-{
- union
- {
+typedef struct _R01_DESCRIPTOR {
+ union {
u32 value;
- #ifdef _BIG_ENDIAN_ //20060926 add by anson's endian
- struct
- {
+#ifdef _BIG_ENDIAN_
+ struct {
u32 R01_RESERVED:3;
u32 R01_mod_type:1;
u32 R01_pre_type:1;
@@ -197,9 +184,8 @@ typedef struct _R01_DESCRIPTOR
u32 R01_icv_error:1;
u32 R01_crc_error:1;
};
- #else
- struct
- {
+#else
+ struct {
u32 R01_crc_error:1;
u32 R01_icv_error:1;
u32 R01_null_key_to_authentication_frame:1;
@@ -220,18 +206,15 @@ typedef struct _R01_DESCRIPTOR
u32 R01_mod_type:1;
u32 R01_RESERVED:3;
};
- #endif
+#endif
};
} R01_DESCRIPTOR, *PR01_DESCRIPTOR;
-typedef struct _T01_DESCRIPTOR
-{
- union
- {
+typedef struct _T01_DESCRIPTOR {
+ union {
u32 value;
- #ifdef _BIG_ENDIAN_ //20061009 anson's endian
- struct
- {
+#ifdef _BIG_ENDIAN_
+ struct {
u32 T01_rts_cts_duration:16;
u32 T01_fall_back_rate:3;
u32 T01_add_rts:1;
@@ -245,9 +228,8 @@ typedef struct _T01_DESCRIPTOR
u32 T01_loop_back_wep_mode:1;
u32 T01_retry_abort_ebable:1;
};
- #else
- struct
- {
+#else
+ struct {
u32 T01_retry_abort_ebable:1;
u32 T01_loop_back_wep_mode:1;
u32 T01_inhibit_crc:1;
@@ -261,21 +243,18 @@ typedef struct _T01_DESCRIPTOR
u32 T01_fall_back_rate:3;
u32 T01_rts_cts_duration:16;
};
- #endif
+#endif
};
} T01_DESCRIPTOR, *PT01_DESCRIPTOR;
-typedef struct _T02_DESCRIPTOR
-{
- union
- {
+typedef struct _T02_DESCRIPTOR {
+ union {
u32 value;
- #ifdef _BIG_ENDIAN_ //20061009 add by anson's endian
- struct
- {
- u32 T02_IsLastMpdu:1;// The same mechanism with T00 setting
- u32 T02_IgnoreResult:1;// The same mechanism with T00 setting. 050111 Modify for TS
- u32 T02_RESERVED_ID:2;// The same mechanism with T00 setting
+#ifdef _BIG_ENDIAN_
+ struct {
+ u32 T02_IsLastMpdu:1; /* The same mechanism with T00 setting */
+ u32 T02_IgnoreResult:1; /* The same mechanism with T00 setting. */
+ u32 T02_RESERVED_ID:2; /* The same mechanism with T00 setting */
u32 T02_Tx_PktID:4;
u32 T02_MPDU_Cnt:4;
u32 T02_RTS_Cnt:4;
@@ -290,9 +269,8 @@ typedef struct _T02_DESCRIPTOR
u32 T02_transmit_abort:1;
u32 T02_transmit_fail:1;
};
- #else
- struct
- {
+#else
+ struct {
u32 T02_transmit_fail:1;
u32 T02_transmit_abort:1;
u32 T02_out_of_MaxTxMSDULiftTime:1;
@@ -306,122 +284,120 @@ typedef struct _T02_DESCRIPTOR
u32 T02_RTS_Cnt:4;
u32 T02_MPDU_Cnt:4;
u32 T02_Tx_PktID:4;
- u32 T02_RESERVED_ID:2;// The same mechanism with T00 setting
- u32 T02_IgnoreResult:1;// The same mechanism with T00 setting. 050111 Modify for TS
- u32 T02_IsLastMpdu:1;// The same mechanism with T00 setting
+ u32 T02_RESERVED_ID:2; /* The same mechanism with T00 setting */
+ u32 T02_IgnoreResult:1; /* The same mechanism with T00 setting. */
+ u32 T02_IsLastMpdu:1; /* The same mechanism with T00 setting */
};
- #endif
+#endif
};
} T02_DESCRIPTOR, *PT02_DESCRIPTOR;
-struct wb35_descriptor { // Skip length = 8 DWORD
- // ID for descriptor ---, The field doesn't be cleard in the operation of Descriptor definition
+struct wb35_descriptor { /* Skip length = 8 DWORD */
+ /* ID for descriptor ---, The field doesn't be cleard in the operation of Descriptor definition */
u8 Descriptor_ID;
- //----------------------The above region doesn't be cleared by DESCRIPTOR_RESET------
+ /* ----------------------The above region doesn't be cleared by DESCRIPTOR_RESET------ */
u8 RESERVED[3];
u16 FragmentThreshold;
- u8 InternalUsed;//Only can be used by operation of descriptor definition
- u8 Type;// 0: 802.3 1:802.11 data frame 2:802.11 management frame
+ u8 InternalUsed; /* Only can be used by operation of descriptor definition */
+ u8 Type; /* 0: 802.3 1:802.11 data frame 2:802.11 management frame */
- u8 PreambleMode;// 0: short 1:long
+ u8 PreambleMode;/* 0: short 1:long */
u8 TxRate;
u8 FragmentCount;
- u8 EapFix; // For speed up key install
+ u8 EapFix; /* For speed up key install */
- // For R00 and T00 ----------------------------------------------
- union
- {
+ /* For R00 and T00 ------------------------------ */
+ union {
R00_DESCRIPTOR R00;
T00_DESCRIPTOR T00;
};
- // For R01 and T01 ----------------------------------------------
- union
- {
+ /* For R01 and T01 ------------------------------ */
+ union {
R01_DESCRIPTOR R01;
T01_DESCRIPTOR T01;
};
- // For R02 and T02 ----------------------------------------------
- union
- {
- u32 R02;
+ /* For R02 and T02 ------------------------------ */
+ union {
+ u32 R02;
T02_DESCRIPTOR T02;
};
- // For R03 and T03 ----------------------------------------------
- // For software used
- union
- {
+ /* For R03 and T03 ------------------------------ */
+ /* For software used */
+ union {
u32 R03;
u32 T03;
- struct
- {
+ struct {
u8 buffer_number;
u8 buffer_start_index;
u16 buffer_total_size;
};
};
- // For storing the buffer
- u16 buffer_size[ MAX_DESCRIPTOR_BUFFER_INDEX ];
- void* buffer_address[ MAX_DESCRIPTOR_BUFFER_INDEX ];//931130.4.q
-
+ /* For storing the buffer */
+ u16 buffer_size[MAX_DESCRIPTOR_BUFFER_INDEX];
+ void *buffer_address[MAX_DESCRIPTOR_BUFFER_INDEX];
};
-#define DEFAULT_NULL_PACKET_COUNT 180000 //20060828.1 Add. 180 seconds
+#define DEFAULT_NULL_PACKET_COUNT 180000 /* 180 seconds */
-#define MAX_TXVGA_EEPROM 9 //How many word(u16) of EEPROM will be used for TxVGA
-#define MAX_RF_PARAMETER 32
+#define MAX_TXVGA_EEPROM 9 /* How many word(u16) of EEPROM will be used for TxVGA */
+#define MAX_RF_PARAMETER 32
typedef struct _TXVGA_FOR_50 {
- u8 ChanNo;
- u8 TxVgaValue;
+ u8 ChanNo;
+ u8 TxVgaValue;
} TXVGA_FOR_50;
-//=====================================================================
-// Device related include
-//=====================================================================
+/*
+ * ==============================================
+ * Device related include
+ * ==============================================
+ */
#include "wbusb_s.h"
#include "wb35reg_s.h"
#include "wb35tx_s.h"
#include "wb35rx_s.h"
-// For Hal using ==================================================================
+/* For Hal using ============================================ */
struct hw_data {
- // For compatible with 33
+ /* For compatible with 33 */
u32 revision;
- u32 BB3c_cal; // The value for Tx calibration comes from EEPROM
- u32 BB54_cal; // The value for Rx calibration comes from EEPROM
+ u32 BB3c_cal; /* The value for Tx calibration comes from EEPROM */
+ u32 BB54_cal; /* The value for Rx calibration comes from EEPROM */
-
- // For surprise remove
- u32 SurpriseRemove; // 0: Normal 1: Surprise remove
+ /* For surprise remove */
+ u32 SurpriseRemove; /* 0: Normal 1: Surprise remove */
u8 IsKeyPreSet;
- u8 CalOneTime; // 20060630.1
+ u8 CalOneTime;
u8 VCO_trim;
- // For Fix 1'st DMA bug
u32 FragCount;
- u32 DMAFix; //V1_DMA_FIX The variable can be removed if driver want to save mem space for V2.
-
- //===============================================
- // Definition for MAC address
- //===============================================
- u8 PermanentMacAddress[ETH_ALEN + 2]; // The Enthernet addr that are stored in EEPROM. + 2 to 8-byte alignment
- u8 CurrentMacAddress[ETH_ALEN + 2]; // The Enthernet addr that are in used. + 2 to 8-byte alignment
-
- //=====================================================================
- // Definition for 802.11
- //=====================================================================
- u8 *bssid_pointer; // Used by hal_get_bssid for return value
- u8 bssid[8];// Only 6 byte will be used. 8 byte is required for read buffer
- u8 ssid[32];// maximum ssid length is 32 byte
+ u32 DMAFix; /* V1_DMA_FIX The variable can be removed if driver want to save mem space for V2. */
+
+ /*
+ * ===============================================
+ * Definition for MAC address
+ * ===============================================
+ */
+ u8 PermanentMacAddress[ETH_ALEN + 2]; /* The Ethernet addr that are stored in EEPROM. + 2 to 8-byte alignment */
+ u8 CurrentMacAddress[ETH_ALEN + 2]; /* The Enthernet addr that are in used. + 2 to 8-byte alignment */
+
+ /*
+ * =========================================
+ * Definition for 802.11
+ * =========================================
+ */
+ u8 *bssid_pointer; /* Used by hal_get_bssid for return value */
+ u8 bssid[8]; /* Only 6 byte will be used. 8 byte is required for read buffer */
+ u8 ssid[32]; /* maximum ssid length is 32 byte */
u16 AID;
u8 ssid_length;
@@ -433,112 +409,118 @@ struct hw_data {
u16 BeaconPeriod;
u16 ProbeDelay;
- u8 bss_type;// 0: IBSS_NET or 1:ESS_NET
- u8 preamble;// 0: short preamble, 1: long preamble
- u8 slot_time_select;// 9 or 20 value
- u8 phy_type;// Phy select
+ u8 bss_type;/* 0: IBSS_NET or 1:ESS_NET */
+ u8 preamble;/* 0: short preamble, 1: long preamble */
+ u8 slot_time_select; /* 9 or 20 value */
+ u8 phy_type; /* Phy select */
u32 phy_para[MAX_RF_PARAMETER];
u32 phy_number;
- u32 CurrentRadioSw; // 20060320.2 0:On 1:Off
- u32 CurrentRadioHw; // 20060825 0:On 1:Off
+ u32 CurrentRadioSw; /* 0:On 1:Off */
+ u32 CurrentRadioHw; /* 0:On 1:Off */
- u8 *power_save_point; // Used by hal_get_power_save_mode for return value
+ u8 *power_save_point; /* Used by hal_get_power_save_mode for return value */
u8 cwmin;
u8 desired_power_save;
- u8 dtim;// Is running dtim
- u8 mapping_key_replace_index;//In Key table, the next index be replaced 931130.4.r
+ u8 dtim; /* Is running dtim */
+ u8 mapping_key_replace_index; /* In Key table, the next index be replaced */
u16 MaxReceiveLifeTime;
u16 FragmentThreshold;
u16 FragmentThreshold_tmp;
u16 cwmax;
- u8 Key_slot[MAX_KEY_TABLE][8]; //Ownership record for key slot. For Alignment
- u32 Key_content[MAX_KEY_TABLE][12]; // 10DW for each entry + 2 for burst command( Off and On valid bit)
+ u8 Key_slot[MAX_KEY_TABLE][8]; /* Ownership record for key slot. For Alignment */
+ u32 Key_content[MAX_KEY_TABLE][12]; /* 10DW for each entry + 2 for burst command (Off and On valid bit) */
u8 CurrentDefaultKeyIndex;
u32 CurrentDefaultKeyLength;
- //========================================================================
- // Variable for each module
- //========================================================================
- struct wb_usb WbUsb; // Need WbUsb.h
- struct wb35_reg reg; // Need Wb35Reg.h
- struct wb35_tx Wb35Tx; // Need Wb35Tx.h
- struct wb35_rx Wb35Rx; // Need Wb35Rx.h
+ /*
+ * ==================================================
+ * Variable for each module
+ * ==================================================
+ */
+ struct wb_usb WbUsb; /* Need WbUsb.h */
+ struct wb35_reg reg; /* Need Wb35Reg.h */
+ struct wb35_tx Wb35Tx; /* Need Wb35Tx.h */
+ struct wb35_rx Wb35Rx; /* Need Wb35Rx.h */
- struct timer_list LEDTimer;// For LED
+ struct timer_list LEDTimer; /* For LED */
- u32 LEDpoint;// For LED
+ u32 LEDpoint; /* For LED */
- u32 dto_tx_retry_count; // LA20040210_DTO kevin
- u32 dto_tx_frag_count; // LA20040210_DTO kevin
- u32 rx_ok_count[13]; // index=0: total rx ok
- //u32 rx_ok_bytes[13]; // index=0, total rx ok bytes
- u32 rx_err_count[13]; // index=0: total rx err
+ u32 dto_tx_retry_count;
+ u32 dto_tx_frag_count;
+ u32 rx_ok_count[13]; /* index=0: total rx ok */
+ u32 rx_err_count[13]; /* index=0: total rx err */
- //for Tx debug
+ /* for Tx debug */
u32 tx_TBTT_start_count;
u32 tx_ETR_count;
u32 tx_WepOn_false_count;
u32 tx_Null_key_count;
u32 tx_retry_count[8];
- u8 PowerIndexFromEEPROM; // For 2412MHz
- u8 power_index;
- u8 IsWaitJoinComplete; // TRUE: set join request
- u8 band;
+ u8 PowerIndexFromEEPROM; /* For 2412MHz */
+ u8 power_index;
+ u8 IsWaitJoinComplete; /* TRUE: set join request */
+ u8 band;
- u16 SoftwareSet;
- u16 Reserved_s;
+ u16 SoftwareSet;
+ u16 Reserved_s;
- u32 IsInitOK; // 0: Driver starting 1: Driver init OK
+ u32 IsInitOK; /* 0: Driver starting 1: Driver init OK */
- // For Phy calibration
- s32 iq_rsdl_gain_tx_d2;
- s32 iq_rsdl_phase_tx_d2;
- u32 txvga_setting_for_cal; // 20060703.1 Add
+ /* For Phy calibration */
+ s32 iq_rsdl_gain_tx_d2;
+ s32 iq_rsdl_phase_tx_d2;
+ u32 txvga_setting_for_cal;
- u8 TxVgaSettingInEEPROM[ (((MAX_TXVGA_EEPROM*2)+3) & ~0x03) ]; // 20060621 For backup EEPROM value
- u8 TxVgaFor24[16]; // Max is 14, 2 for alignment
- TXVGA_FOR_50 TxVgaFor50[36]; // 35 channels in 5G. 35x2 = 70 byte. 2 for alignments
+ u8 TxVgaSettingInEEPROM[(((MAX_TXVGA_EEPROM * 2) + 3) & ~0x03)]; /* For EEPROM value */
+ u8 TxVgaFor24[16]; /* Max is 14, 2 for alignment */
+ TXVGA_FOR_50 TxVgaFor50[36]; /* 35 channels in 5G. 35x2 = 70 byte. 2 for alignments */
- u16 Scan_Interval;
- u16 RESERVED6;
+ u16 Scan_Interval;
+ u16 RESERVED6;
- // LED control
+ /* LED control */
u32 LED_control;
- // LED_control 4 byte: Gray_Led_1[3] Gray_Led_0[2] Led[1] Led[0]
- // Gray_Led
- // For Led gray setting
- // Led
- // 0: normal control, LED behavior will decide by EEPROM setting
- // 1: Turn off specific LED
- // 2: Always on specific LED
- // 3: slow blinking specific LED
- // 4: fast blinking specific LED
- // 5: WPS led control is set. Led0 is Red, Led1 id Green
- // Led[1] is parameter for WPS LED mode
- // // 1:InProgress 2: Error 3: Session overlap 4: Success 20061108 control
-
- u32 LED_LinkOn; //Turn LED on control
- u32 LED_Scanning; // Let LED in scan process control
- u32 LED_Blinking; // Temp variable for shining
+ /*
+ * LED_control 4 byte: Gray_Led_1[3] Gray_Led_0[2] Led[1] Led[0]
+ * Gray_Led
+ * For Led gray setting
+ * Led
+ * 0: normal control,
+ * LED behavior will decide by EEPROM setting
+ * 1: Turn off specific LED
+ * 2: Always on specific LED
+ * 3: slow blinking specific LED
+ * 4: fast blinking specific LED
+ * 5: WPS led control is set. Led0 is Red, Led1 id Green
+ *
+ * Led[1] is parameter for WPS LED mode
+ * 1:InProgress
+ * 2: Error
+ * 3: Session overlap
+ * 4: Success control
+ */
+ u32 LED_LinkOn; /* Turn LED on control */
+ u32 LED_Scanning; /* Let LED in scan process control */
+ u32 LED_Blinking; /* Temp variable for shining */
u32 RxByteCountLast;
u32 TxByteCountLast;
atomic_t SurpriseRemoveCount;
- // For global timer
- u32 time_count;//TICK_TIME_100ms 1 = 100ms
+ /* For global timer */
+ u32 time_count; /* TICK_TIME_100ms 1 = 100ms */
- // For error recover
+ /* For error recover */
u32 HwStop;
- // 20060828.1 for avoid AP disconnect
+ /* For avoid AP disconnect */
u32 NullPacketCount;
-
};
#endif
diff --git a/drivers/staging/winbond/wblinux_f.h b/drivers/staging/winbond/wblinux_f.h
index 868e8772724..0a9d214f718 100644
--- a/drivers/staging/winbond/wblinux_f.h
+++ b/drivers/staging/winbond/wblinux_f.h
@@ -4,13 +4,14 @@
#include "core.h"
#include "mds_s.h"
-//=========================================================================
-// Copyright (c) 1996-2004 Winbond Electronic Corporation
-//
-// wblinux_f.h
-//
-int wb35_start_xmit(struct sk_buff *skb, struct net_device *netdev );
-void wb35_set_multicast( struct net_device *netdev );
-struct net_device_stats * wb35_netdev_stats( struct net_device *netdev );
-
+/*
+ * ====================================================================
+ * Copyright (c) 1996-2004 Winbond Electronic Corporation
+ *
+ * wblinux_f.h
+ * ====================================================================
+ */
+int wb35_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+void wb35_set_multicast(struct net_device *netdev);
+struct net_device_stats *wb35_netdev_stats(struct net_device *netdev);
#endif
diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
index 3482eec1865..681419d6856 100644
--- a/drivers/staging/winbond/wbusb.c
+++ b/drivers/staging/winbond/wbusb.c
@@ -92,10 +92,10 @@ static int wbsoft_get_stats(struct ieee80211_hw *hw,
return 0;
}
-static u64 wbsoft_prepare_multicast(struct ieee80211_hw *hw, int mc_count,
- struct dev_addr_list *mc_list)
+static u64 wbsoft_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
{
- return mc_count;
+ return netdev_hw_addr_list_count(mc_list);
}
static void wbsoft_configure_filter(struct ieee80211_hw *dev,
@@ -142,19 +142,17 @@ static void hal_set_radio_mode(struct hw_data *pHwData, unsigned char radio_off)
if (pHwData->SurpriseRemove)
return;
- if (radio_off) //disable Baseband receive off
- {
- pHwData->CurrentRadioSw = 1; // off
+ if (radio_off) { /* disable Baseband receive off */
+ pHwData->CurrentRadioSw = 1; /* off */
reg->M24_MacControl &= 0xffffffbf;
} else {
- pHwData->CurrentRadioSw = 0; // on
+ pHwData->CurrentRadioSw = 0; /* on */
reg->M24_MacControl |= 0x00000040;
}
Wb35Reg_Write(pHwData, 0x0824, reg->M24_MacControl);
}
-static void
-hal_set_current_channel_ex(struct hw_data *pHwData, struct chan_info channel)
+static void hal_set_current_channel_ex(struct hw_data *pHwData, struct chan_info channel)
{
struct wb35_reg *reg = &pHwData->reg;
@@ -163,17 +161,18 @@ hal_set_current_channel_ex(struct hw_data *pHwData, struct chan_info channel)
printk("Going to channel: %d/%d\n", channel.band, channel.ChanNo);
- RFSynthesizer_SwitchingChannel(pHwData, channel); // Switch channel
+ RFSynthesizer_SwitchingChannel(pHwData, channel); /* Switch channel */
pHwData->Channel = channel.ChanNo;
pHwData->band = channel.band;
#ifdef _PE_STATE_DUMP_
printk("Set channel is %d, band =%d\n", pHwData->Channel,
pHwData->band);
#endif
- reg->M28_MacControl &= ~0xff; // Clean channel information field
+ reg->M28_MacControl &= ~0xff; /* Clean channel information field */
reg->M28_MacControl |= channel.ChanNo;
Wb35Reg_WriteWithCallbackValue(pHwData, 0x0828, reg->M28_MacControl,
- (s8 *) & channel, sizeof(struct chan_info));
+ (s8 *) &channel,
+ sizeof(struct chan_info));
}
static void hal_set_current_channel(struct hw_data *pHwData, struct chan_info channel)
@@ -188,21 +187,22 @@ static void hal_set_accept_broadcast(struct hw_data *pHwData, u8 enable)
if (pHwData->SurpriseRemove)
return;
- reg->M00_MacControl &= ~0x02000000; //The HW value
+ reg->M00_MacControl &= ~0x02000000; /* The HW value */
if (enable)
- reg->M00_MacControl |= 0x02000000; //The HW value
+ reg->M00_MacControl |= 0x02000000; /* The HW value */
Wb35Reg_Write(pHwData, 0x0800, reg->M00_MacControl);
}
-//for wep key error detection, we need to accept broadcast packets to be received temporary.
+/* For wep key error detection, we need to accept broadcast packets to be received temporary. */
static void hal_set_accept_promiscuous(struct hw_data *pHwData, u8 enable)
{
struct wb35_reg *reg = &pHwData->reg;
if (pHwData->SurpriseRemove)
return;
+
if (enable) {
reg->M00_MacControl |= 0x00400000;
Wb35Reg_Write(pHwData, 0x0800, reg->M00_MacControl);
@@ -219,9 +219,9 @@ static void hal_set_accept_multicast(struct hw_data *pHwData, u8 enable)
if (pHwData->SurpriseRemove)
return;
- reg->M00_MacControl &= ~0x01000000; //The HW value
+ reg->M00_MacControl &= ~0x01000000; /* The HW value */
if (enable)
- reg->M00_MacControl |= 0x01000000; //The HW value
+ reg->M00_MacControl |= 0x01000000; /* The HW value */
Wb35Reg_Write(pHwData, 0x0800, reg->M00_MacControl);
}
@@ -232,13 +232,12 @@ static void hal_set_accept_beacon(struct hw_data *pHwData, u8 enable)
if (pHwData->SurpriseRemove)
return;
- // 20040108 debug
- if (!enable) //Due to SME and MLME are not suitable for 35
+ if (!enable) /* Due to SME and MLME are not suitable for 35 */
return;
- reg->M00_MacControl &= ~0x04000000; //The HW value
+ reg->M00_MacControl &= ~0x04000000; /* The HW value */
if (enable)
- reg->M00_MacControl |= 0x04000000; //The HW value
+ reg->M00_MacControl |= 0x04000000; /* The HW value */
Wb35Reg_Write(pHwData, 0x0800, reg->M00_MacControl);
}
@@ -283,8 +282,7 @@ static const struct ieee80211_ops wbsoft_ops = {
.get_tsf = wbsoft_get_tsf,
};
-static void
-hal_set_ethernet_address(struct hw_data *pHwData, u8 * current_address)
+static void hal_set_ethernet_address(struct hw_data *pHwData, u8 *current_address)
{
u32 ltmp[2];
@@ -294,14 +292,12 @@ hal_set_ethernet_address(struct hw_data *pHwData, u8 * current_address)
memcpy(pHwData->CurrentMacAddress, current_address, ETH_ALEN);
ltmp[0] = cpu_to_le32(*(u32 *) pHwData->CurrentMacAddress);
- ltmp[1] =
- cpu_to_le32(*(u32 *) (pHwData->CurrentMacAddress + 4)) & 0xffff;
+ ltmp[1] = cpu_to_le32(*(u32 *) (pHwData->CurrentMacAddress + 4)) & 0xffff;
Wb35Reg_BurstWrite(pHwData, 0x03e8, ltmp, 2, AUTO_INCREMENT);
}
-static void
-hal_get_permanent_address(struct hw_data *pHwData, u8 * pethernet_address)
+static void hal_get_permanent_address(struct hw_data *pHwData, u8 *pethernet_address)
{
if (pHwData->SurpriseRemove)
return;
@@ -319,7 +315,7 @@ static void hal_stop(struct hw_data *pHwData)
pHwData->Wb35Tx.tx_halt = 1;
Wb35Tx_stop(pHwData);
- reg->D00_DmaControl &= ~0xc0000000; //Tx Off, Rx Off
+ reg->D00_DmaControl &= ~0xc0000000; /* Tx Off, Rx Off */
Wb35Reg_Write(pHwData, 0x0400, reg->D00_DmaControl);
}
@@ -346,14 +342,14 @@ u8 hal_get_antenna_number(struct hw_data *pHwData)
}
/* 0 : radio on; 1: radio off */
-static u8 hal_get_hw_radio_off(struct hw_data * pHwData)
+static u8 hal_get_hw_radio_off(struct hw_data *pHwData)
{
struct wb35_reg *reg = &pHwData->reg;
if (pHwData->SurpriseRemove)
return 1;
- //read the bit16 of register U1B0
+ /* read the bit16 of register U1B0 */
Wb35Reg_Read(pHwData, 0x3b0, &reg->U1B0);
if ((reg->U1B0 & 0x00010000)) {
pHwData->CurrentRadioHw = 1;
@@ -387,104 +383,98 @@ static void hal_led_control(unsigned long data)
if (pHwData->LED_control) {
ltmp2 = pHwData->LED_control & 0xff;
- if (ltmp2 == 5) // 5 is WPS mode
- {
+ if (ltmp2 == 5) { /* 5 is WPS mode */
TimeInterval = 100;
ltmp2 = (pHwData->LED_control >> 8) & 0xff;
switch (ltmp2) {
- case 1: // [0.2 On][0.1 Off]...
+ case 1: /* [0.2 On][0.1 Off]... */
pHwData->LED_Blinking %= 3;
- ltmp = 0x1010; // Led 1 & 0 Green and Red
- if (pHwData->LED_Blinking == 2) // Turn off
+ ltmp = 0x1010; /* Led 1 & 0 Green and Red */
+ if (pHwData->LED_Blinking == 2) /* Turn off */
ltmp = 0;
break;
- case 2: // [0.1 On][0.1 Off]...
+ case 2: /* [0.1 On][0.1 Off]... */
pHwData->LED_Blinking %= 2;
- ltmp = 0x0010; // Led 0 red color
- if (pHwData->LED_Blinking) // Turn off
+ ltmp = 0x0010; /* Led 0 red color */
+ if (pHwData->LED_Blinking) /* Turn off */
ltmp = 0;
break;
- case 3: // [0.1 On][0.1 Off][0.1 On][0.1 Off][0.1 On][0.1 Off][0.1 On][0.1 Off][0.1 On][0.1 Off][0.5 Off]...
+ case 3: /* [0.1 On][0.1 Off][0.1 On][0.1 Off][0.1 On][0.1 Off][0.1 On][0.1 Off][0.1 On][0.1 Off][0.5 Off]... */
pHwData->LED_Blinking %= 15;
- ltmp = 0x0010; // Led 0 red color
- if ((pHwData->LED_Blinking >= 9) || (pHwData->LED_Blinking % 2)) // Turn off 0.6 sec
+ ltmp = 0x0010; /* Led 0 red color */
+ if ((pHwData->LED_Blinking >= 9) || (pHwData->LED_Blinking % 2)) /* Turn off 0.6 sec */
ltmp = 0;
break;
- case 4: // [300 On][ off ]
- ltmp = 0x1000; // Led 1 Green color
+ case 4: /* [300 On][ off ] */
+ ltmp = 0x1000; /* Led 1 Green color */
if (pHwData->LED_Blinking >= 3000)
- ltmp = 0; // led maybe on after 300sec * 32bit counter overlap.
+ ltmp = 0; /* led maybe on after 300sec * 32bit counter overlap. */
break;
}
pHwData->LED_Blinking++;
reg->U1BC_LEDConfigure = ltmp;
- if (LEDSet != 7) // Only 111 mode has 2 LEDs on PCB.
- {
- reg->U1BC_LEDConfigure |= (ltmp & 0xff) << 8; // Copy LED result to each LED control register
+ if (LEDSet != 7) { /* Only 111 mode has 2 LEDs on PCB. */
+ reg->U1BC_LEDConfigure |= (ltmp & 0xff) << 8; /* Copy LED result to each LED control register */
reg->U1BC_LEDConfigure |= (ltmp & 0xff00) >> 8;
}
Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure);
}
- } else if (pHwData->CurrentRadioSw || pHwData->CurrentRadioHw) // If radio off
- {
+ } else if (pHwData->CurrentRadioSw || pHwData->CurrentRadioHw) { /* If radio off */
if (reg->U1BC_LEDConfigure & 0x1010) {
reg->U1BC_LEDConfigure &= ~0x1010;
Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure);
}
} else {
switch (LEDSet) {
- case 4: // [100] Only 1 Led be placed on PCB and use pin 21 of IC. Use LED_0 for showing
- if (!pHwData->LED_LinkOn) // Blink only if not Link On
- {
- // Blinking if scanning is on progress
+ case 4: /* [100] Only 1 Led be placed on PCB and use pin 21 of IC. Use LED_0 for showing */
+ if (!pHwData->LED_LinkOn) { /* Blink only if not Link On */
+ /* Blinking if scanning is on progress */
if (pHwData->LED_Scanning) {
if (pHwData->LED_Blinking == 0) {
reg->U1BC_LEDConfigure |= 0x10;
- Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); // LED_0 On
+ Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); /* LED_0 On */
pHwData->LED_Blinking = 1;
TimeInterval = 300;
} else {
reg->U1BC_LEDConfigure &= ~0x10;
- Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); // LED_0 Off
+ Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); /* LED_0 Off */
pHwData->LED_Blinking = 0;
TimeInterval = 300;
}
} else {
- //Turn Off LED_0
+ /* Turn Off LED_0 */
if (reg->U1BC_LEDConfigure & 0x10) {
reg->U1BC_LEDConfigure &= ~0x10;
- Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); // LED_0 Off
+ Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); /* LED_0 Off */
}
}
} else {
- // Turn On LED_0
+ /* Turn On LED_0 */
if ((reg->U1BC_LEDConfigure & 0x10) == 0) {
reg->U1BC_LEDConfigure |= 0x10;
- Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); // LED_0 Off
+ Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); /* LED_0 Off */
}
}
break;
-
- case 6: // [110] Only 1 Led be placed on PCB and use pin 21 of IC. Use LED_0 for showing
- if (!pHwData->LED_LinkOn) // Blink only if not Link On
- {
- // Blinking if scanning is on progress
+ case 6: /* [110] Only 1 Led be placed on PCB and use pin 21 of IC. Use LED_0 for showing */
+ if (!pHwData->LED_LinkOn) { /* Blink only if not Link On */
+ /* Blinking if scanning is on progress */
if (pHwData->LED_Scanning) {
if (pHwData->LED_Blinking == 0) {
reg->U1BC_LEDConfigure &= ~0xf;
reg->U1BC_LEDConfigure |= 0x10;
- Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); // LED_0 On
+ Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); /* LED_0 On */
pHwData->LED_Blinking = 1;
TimeInterval = 300;
} else {
reg->U1BC_LEDConfigure &= ~0x1f;
- Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); // LED_0 Off
+ Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); /* LED_0 Off */
pHwData->LED_Blinking = 0;
TimeInterval = 300;
}
} else {
- // 20060901 Gray blinking if in disconnect state and not scanning
+ /* Gray blinking if in disconnect state and not scanning */
ltmp = reg->U1BC_LEDConfigure;
reg->U1BC_LEDConfigure &= ~0x1f;
if (LED_GRAY2[(pHwData->LED_Blinking % 30)]) {
@@ -494,85 +484,78 @@ static void hal_led_control(unsigned long data)
}
pHwData->LED_Blinking++;
if (reg->U1BC_LEDConfigure != ltmp)
- Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); // LED_0 Off
+ Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); /* LED_0 Off */
TimeInterval = 100;
}
} else {
- // Turn On LED_0
+ /* Turn On LED_0 */
if ((reg->U1BC_LEDConfigure & 0x10) == 0) {
reg->U1BC_LEDConfigure |= 0x10;
- Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); // LED_0 Off
+ Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); /* LED_0 Off */
}
}
break;
-
- case 5: // [101] Only 1 Led be placed on PCB and use LED_1 for showing
- if (!pHwData->LED_LinkOn) // Blink only if not Link On
- {
- // Blinking if scanning is on progress
+ case 5: /* [101] Only 1 Led be placed on PCB and use LED_1 for showing */
+ if (!pHwData->LED_LinkOn) { /* Blink only if not Link On */
+ /* Blinking if scanning is on progress */
if (pHwData->LED_Scanning) {
if (pHwData->LED_Blinking == 0) {
- reg->U1BC_LEDConfigure |=
- 0x1000;
- Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); // LED_1 On
+ reg->U1BC_LEDConfigure |= 0x1000;
+ Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); /* LED_1 On */
pHwData->LED_Blinking = 1;
TimeInterval = 300;
} else {
- reg->U1BC_LEDConfigure &=
- ~0x1000;
- Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); // LED_1 Off
+ reg->U1BC_LEDConfigure &= ~0x1000;
+ Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); /* LED_1 Off */
pHwData->LED_Blinking = 0;
TimeInterval = 300;
}
} else {
- //Turn Off LED_1
+ /* Turn Off LED_1 */
if (reg->U1BC_LEDConfigure & 0x1000) {
- reg->U1BC_LEDConfigure &=
- ~0x1000;
- Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); // LED_1 Off
+ reg->U1BC_LEDConfigure &= ~0x1000;
+ Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); /* LED_1 Off */
}
}
} else {
- // Is transmitting/receiving ??
+ /* Is transmitting/receiving ?? */
if ((adapter->RxByteCount !=
pHwData->RxByteCountLast)
|| (adapter->TxByteCount !=
pHwData->TxByteCountLast)) {
if ((reg->U1BC_LEDConfigure & 0x3000) !=
0x3000) {
- reg->U1BC_LEDConfigure |=
- 0x3000;
- Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); // LED_1 On
+ reg->U1BC_LEDConfigure |= 0x3000;
+ Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); /* LED_1 On */
}
- // Update variable
+ /* Update variable */
pHwData->RxByteCountLast =
adapter->RxByteCount;
pHwData->TxByteCountLast =
adapter->TxByteCount;
TimeInterval = 200;
} else {
- // Turn On LED_1 and blinking if transmitting/receiving
+ /* Turn On LED_1 and blinking if transmitting/receiving */
if ((reg->U1BC_LEDConfigure & 0x3000) !=
0x1000) {
reg->U1BC_LEDConfigure &=
~0x3000;
reg->U1BC_LEDConfigure |=
0x1000;
- Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); // LED_1 On
+ Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); /* LED_1 On */
}
}
}
break;
-
- default: // Default setting. 2 LED be placed on PCB. LED_0: Link On LED_1 Active
+ default: /* Default setting. 2 LED be placed on PCB. LED_0: Link On LED_1 Active */
if ((reg->U1BC_LEDConfigure & 0x3000) != 0x3000) {
- reg->U1BC_LEDConfigure |= 0x3000; // LED_1 is always on and event enable
+ reg->U1BC_LEDConfigure |= 0x3000; /* LED_1 is always on and event enable */
Wb35Reg_Write(pHwData, 0x03bc,
reg->U1BC_LEDConfigure);
}
if (pHwData->LED_Blinking) {
- // Gray blinking
+ /* Gray blinking */
reg->U1BC_LEDConfigure &= ~0x0f;
reg->U1BC_LEDConfigure |= 0x10;
reg->U1BC_LEDConfigure |=
@@ -584,7 +567,7 @@ static void hal_led_control(unsigned long data)
if (pHwData->LED_Blinking < 40)
TimeInterval = 100;
else {
- pHwData->LED_Blinking = 0; // Stop blinking
+ pHwData->LED_Blinking = 0; /* Stop blinking */
reg->U1BC_LEDConfigure &= ~0x0f;
Wb35Reg_Write(pHwData, 0x03bc,
reg->U1BC_LEDConfigure);
@@ -593,16 +576,14 @@ static void hal_led_control(unsigned long data)
}
if (pHwData->LED_LinkOn) {
- if (!(reg->U1BC_LEDConfigure & 0x10)) // Check the LED_0
- {
- //Try to turn ON LED_0 after gray blinking
+ if (!(reg->U1BC_LEDConfigure & 0x10)) { /* Check the LED_0 */
+ /* Try to turn ON LED_0 after gray blinking */
reg->U1BC_LEDConfigure |= 0x10;
- pHwData->LED_Blinking = 1; //Start blinking
+ pHwData->LED_Blinking = 1; /* Start blinking */
TimeInterval = 50;
}
} else {
- if (reg->U1BC_LEDConfigure & 0x10) // Check the LED_0
- {
+ if (reg->U1BC_LEDConfigure & 0x10) { /* Check the LED_0 */
reg->U1BC_LEDConfigure &= ~0x10;
Wb35Reg_Write(pHwData, 0x03bc,
reg->U1BC_LEDConfigure);
@@ -611,7 +592,7 @@ static void hal_led_control(unsigned long data)
break;
}
- //20060828.1 Active send null packet to avoid AP disconnect
+ /* Active send null packet to avoid AP disconnect */
if (pHwData->LED_LinkOn) {
pHwData->NullPacketCount += TimeInterval;
if (pHwData->NullPacketCount >=
@@ -622,7 +603,7 @@ static void hal_led_control(unsigned long data)
}
pHwData->time_count += TimeInterval;
- Wb35Tx_CurrentTime(adapter, pHwData->time_count); // 20060928 add
+ Wb35Tx_CurrentTime(adapter, pHwData->time_count);
pHwData->LEDTimer.expires = jiffies + msecs_to_jiffies(TimeInterval);
add_timer(&pHwData->LEDTimer);
}
@@ -654,7 +635,7 @@ static int hal_init_hardware(struct ieee80211_hw *hw)
SoftwareSet = hal_software_set(pHwData);
#ifdef Vendor2
- // Try to make sure the EEPROM contain
+ /* Try to make sure the EEPROM contain */
SoftwareSet >>= 8;
if (SoftwareSet != 0x82)
return false;
diff --git a/drivers/staging/winbond/wbusb_s.h b/drivers/staging/winbond/wbusb_s.h
index 0c7e6a383f2..8961ae594c4 100644
--- a/drivers/staging/winbond/wbusb_s.h
+++ b/drivers/staging/winbond/wbusb_s.h
@@ -1,16 +1,10 @@
-//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-// Copyright (c) 1996-2004 Winbond Electronic Corporation
-//
-// Module Name:
-// wbusb_s.h
-//
-// Abstract:
-// Linux driver.
-//
-// Author:
-//
-//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
+/* =========================================================
+ * Copyright (c) 1996-2004 Winbond Electronic Corporation
+ *
+ * Module Name:
+ * wbusb_s.h
+ * =========================================================
+ */
#ifndef __WINBOND_WBUSB_S_H
#define __WINBOND_WBUSB_S_H
@@ -18,8 +12,7 @@
struct wb_usb {
u32 IsUsb20;
- struct usb_device *udev;
+ struct usb_device *udev;
u32 DetectCount;
};
-
#endif
diff --git a/drivers/staging/wlags49_h2/Kconfig b/drivers/staging/wlags49_h2/Kconfig
index 92053fe7013..b6fc2ca7d85 100644
--- a/drivers/staging/wlags49_h2/Kconfig
+++ b/drivers/staging/wlags49_h2/Kconfig
@@ -1,6 +1,6 @@
config WLAGS49_H2
tristate "Agere Systems HERMES II Wireless PC Card Model 0110"
- depends on WLAN_80211 && WIRELESS_EXT && PCMCIA
+ depends on WLAN && WIRELESS_EXT && PCMCIA
select WEXT_SPY
---help---
Driver for wireless cards using Agere's HERMES II chipset
diff --git a/drivers/staging/wlags49_h2/README.wlags49 b/drivers/staging/wlags49_h2/README.wlags49
index 7586fd09adc..f65acd6f5f5 100644
--- a/drivers/staging/wlags49_h2/README.wlags49
+++ b/drivers/staging/wlags49_h2/README.wlags49
@@ -84,7 +84,7 @@ TABLE OF CONTENTS.
the functions to interface to the Network Interface Card (NIC). The HCF
provides for all WaveLAN NIC types one standard interface to the MSF.
This I/F is called the Wireless Connection Interface (WCI) and is the
- subject of a seperate document (025726).
+ subject of a separate document (025726).
The HCF directory contains firmware images to allow the card to operate in
either station (STA) or Access Point (AP) mode. In the build process, the
diff --git a/drivers/staging/wlags49_h2/ap_h2.c b/drivers/staging/wlags49_h2/ap_h2.c
index f5123d2cb4c..eb8244c4d6f 100644
--- a/drivers/staging/wlags49_h2/ap_h2.c
+++ b/drivers/staging/wlags49_h2/ap_h2.c
@@ -25,10 +25,10 @@
*/
-#include "hcfcfg.h" // to get hcf_16 etc defined as well as
- // possible settings which inluence mdd.h or dhf.h
-#include "mdd.h" //to get COMP_ID_STA etc defined
-#include "dhf.h" //used to be "fhfmem.h", to get memblock,plugrecord,
+#include "hcfcfg.h" /* to get hcf_16 etc defined as well as */
+ /* possible settings which inluence mdd.h or dhf.h */
+#include "mdd.h" /* to get COMP_ID_STA etc defined */
+#include "dhf.h" /* used to be "fhfmem.h", to get memblock,plugrecord, */
static const hcf_8 fw_image_1_data[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -3238,59 +3238,59 @@ static const hcf_8 fw_image_4_data[] = {
static const CFG_IDENTITY_STRCT fw_image_infoidentity[] = {
{
- sizeof( CFG_IDENTITY_STRCT ) / sizeof(hcf_16) - 1,
+ sizeof(CFG_IDENTITY_STRCT) / sizeof(hcf_16) - 1,
CFG_FW_IDENTITY,
COMP_ID_FW_AP,
- 2, //Variant
- 2, //Major
- 36 //Minor
+ 2, /* Variant /
+ 2, /* Major */
+ 36 /* Minor */
},
- { 0000, 0000, 0000, 0000, 0000, 0000 } //endsentinel
+ { 0000, 0000, 0000, 0000, 0000, 0000 } /* endsentinel */
};
static const CFG_PROG_STRCT fw_image_code[] = {
{
8,
CFG_PROG,
- CFG_PROG_VOLATILE, // mode
- 0x0146, // sizeof(fw_image_1_data),
- 0x00000060, // Target address in NIC Memory
- 0x0000, // CRC: yes/no TYPE: primary/station/tertiary
- (hcf_8 FAR *) fw_image_1_data
+ CFG_PROG_VOLATILE, /* mode */
+ 0x0146, /* sizeof(fw_image_1_data), */
+ 0x00000060, /* Target address in NIC Memory */
+ 0x0000, /* CRC: yes/no TYPE: primary/station/tertiary */
+ (hcf_8 FAR *) fw_image_1_data
},
{
8,
CFG_PROG,
- CFG_PROG_VOLATILE, // mode
- 0x1918, // sizeof(fw_image_2_data),
- 0x00000C16, // Target address in NIC Memory
- 0x0000, // CRC: yes/no TYPE: primary/station/tertiary
- (hcf_8 FAR *) fw_image_2_data
+ CFG_PROG_VOLATILE, /* mode */
+ 0x1918, /* sizeof(fw_image_2_data), */
+ 0x00000C16, /* Target address in NIC Memory */
+ 0x0000, /* CRC: yes/no TYPE: primary/station/tertiary */
+ (hcf_8 FAR *) fw_image_2_data
},
{
8,
CFG_PROG,
- CFG_PROG_VOLATILE, // mode
- 0x01bc, // sizeof(fw_image_3_data),
- 0x001E252E, // Target address in NIC Memory
- 0x0000, // CRC: yes/no TYPE: primary/station/tertiary
- (hcf_8 FAR *) fw_image_3_data
+ CFG_PROG_VOLATILE, /* mode */
+ 0x01bc, /* sizeof(fw_image_3_data), */
+ 0x001E252E, /* Target address in NIC Memory */
+ 0x0000, /* CRC: yes/no TYPE: primary/station/tertiary */
+ (hcf_8 FAR *) fw_image_3_data
},
{
8,
CFG_PROG,
- CFG_PROG_VOLATILE, // mode
- 0xab28, // sizeof(fw_image_4_data),
- 0x001F4000, // Target address in NIC Memory
- 0x0000, // CRC: yes/no TYPE: primary/station/tertiary
- (hcf_8 FAR *) fw_image_4_data
+ CFG_PROG_VOLATILE, /* mode */
+ 0xab28, /* sizeof(fw_image_4_data), */
+ 0x001F4000, /* Target address in NIC Memory */
+ 0x0000, /* CRC: yes/no TYPE: primary/station/tertiary */
+ (hcf_8 FAR *) fw_image_4_data
},
{
5,
CFG_PROG,
- CFG_PROG_STOP, // mode
+ CFG_PROG_STOP, /* mode*/
0000,
- 0x000F1297, // Start execution address
+ 0x000F1297, /* Start execution address */
},
{ 0000, 0000, 0000, 0000, 00000000, 0000, 00000000}
};
@@ -3301,7 +3301,7 @@ static const CFG_RANGE20_STRCT fw_image_infocompat[] = {
COMP_ROLE_SUPL,
COMP_ID_APF,
{
- { 2, 2, 4 } //variant, bottom, top
+ { 2, 2, 4 } /* variant, bottom, top */
}
},
{ 3 + ((20 * sizeof(CFG_RANGE_SPEC_STRCT)) / sizeof(hcf_16)),
@@ -3309,9 +3309,9 @@ static const CFG_RANGE20_STRCT fw_image_infocompat[] = {
COMP_ROLE_ACT,
COMP_ID_MFI,
{
- { 4, 6, 7 }, //variant, bottom, top
- { 5, 6, 7 }, //variant, bottom, top
- { 6, 6, 7 } //variant, bottom, top
+ { 4, 6, 7 }, /* variant, bottom, top */
+ { 5, 6, 7 }, /* variant, bottom, top */
+ { 6, 6, 7 } /* variant, bottom, top */
}
},
{ 3 + ((20 * sizeof(CFG_RANGE_SPEC_STRCT)) / sizeof(hcf_16)),
@@ -3319,18 +3319,18 @@ static const CFG_RANGE20_STRCT fw_image_infocompat[] = {
COMP_ROLE_ACT,
COMP_ID_CFI,
{
- { 2, 1, 2 } //variant, bottom, top
+ { 2, 1, 2 } /* variant, bottom, top */
}
},
- { 0000, 0000, 0000, 0000, { { 0000, 0000, 0000 } } } //endsentinel
+ { 0000, 0000, 0000, 0000, { { 0000, 0000, 0000 } } } /* endsentinel */
};
memimage fw_image = {
- "FUPU7D37dhfwci\001C", //signature, <format number>, C/Bin type
+ "FUPU7D37dhfwci\001C", /* signature, <format number>, C/Bin type */
(CFG_PROG_STRCT *) fw_image_code,
0x000F1297,
- 00000000, //(dummy) pdaplug
- 00000000, //(dummy) priplug
+ 00000000, /* (dummy) pdaplug */
+ 00000000, /* (dummy) priplug */
(CFG_RANGE20_STRCT *) fw_image_infocompat,
(CFG_IDENTITY_STRCT *) fw_image_infoidentity,
};
diff --git a/drivers/staging/wlags49_h2/debug.h b/drivers/staging/wlags49_h2/debug.h
index 0b52e17b301..2c3dd140a35 100644
--- a/drivers/staging/wlags49_h2/debug.h
+++ b/drivers/staging/wlags49_h2/debug.h
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright © 2003 Agere Systems Inc.
+ * Copyright (c) 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -70,7 +70,7 @@
#else
#undef DBG
#define DBG 1
-#endif //DBG
+#endif /* DBG */
@@ -84,7 +84,7 @@
#ifndef DBG_LVL
#define DBG_LVL 5 /* yields nothing via init_module,
original value of 5 yields DBG_TRACE_ON and DBG_VERBOSE_ON */
-#endif // DBG_LVL
+#endif /* DBG_LVL*/
#define DBG_ERROR_ON 0x00000001L
@@ -100,80 +100,94 @@
#define DBG_DEFAULTS (DBG_ERROR_ON | DBG_WARNING_ON | DBG_BREAK_ON)
-#define DBG_FLAGS(A) (A)->DebugFlag
-#define DBG_NAME(A) (A)->dbgName
-#define DBG_LEVEL(A) (A)->dbgLevel
+#define DBG_FLAGS(A) ((A)->DebugFlag)
+#define DBG_NAME(A) ((A)->dbgName)
+#define DBG_LEVEL(A) ((A)->dbgLevel)
#ifndef PRINTK
# define PRINTK(S...) printk(S)
-#endif // PRINTK
+#endif /* PRINTK */
#ifndef DBG_PRINT
# define DBG_PRINT(S...) PRINTK(KERN_DEBUG S)
-#endif // DBG_PRINT
+#endif /* DBG_PRINT */
#ifndef DBG_PRINTC
# define DBG_PRINTC(S...) PRINTK(S)
-#endif // DBG_PRINTC
+#endif /* DBG_PRINTC */
#ifndef DBG_TRAP
# define DBG_TRAP {}
-#endif // DBG_TRAP
+#endif /* DBG_TRAP */
#define _ENTER_STR ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
#define _LEAVE_STR "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"
-#define _DBG_ENTER(A) DBG_PRINT("%s:%.*s:%s\n",DBG_NAME(A),++DBG_LEVEL(A),_ENTER_STR,__FUNC__)
-#define _DBG_LEAVE(A) DBG_PRINT("%s:%.*s:%s\n",DBG_NAME(A),DBG_LEVEL(A)--,_LEAVE_STR,__FUNC__)
+#define _DBG_ENTER(A) DBG_PRINT("%s:%.*s:%s\n", DBG_NAME(A), ++DBG_LEVEL(A), _ENTER_STR, __FUNC__)
+#define _DBG_LEAVE(A) DBG_PRINT("%s:%.*s:%s\n", DBG_NAME(A), DBG_LEVEL(A)--, _LEAVE_STR, __FUNC__)
#define DBG_FUNC(F) static const char *__FUNC__ = F;
-#define DBG_ENTER(A) {if (DBG_FLAGS(A) & DBG_TRACE_ON) _DBG_ENTER(A);}
+#define DBG_ENTER(A) {if (DBG_FLAGS(A) & DBG_TRACE_ON) \
+ _DBG_ENTER(A); }
-#define DBG_LEAVE(A) {if (DBG_FLAGS(A) & DBG_TRACE_ON) _DBG_LEAVE(A);}
+#define DBG_LEAVE(A) {if (DBG_FLAGS(A) & DBG_TRACE_ON) \
+ _DBG_LEAVE(A); }
-#define DBG_PARAM(A,N,F,S...) {if (DBG_FLAGS(A) & DBG_PARAM_ON) \
- DBG_PRINT(" %s -- "F"\n",N,S);}
+#define DBG_PARAM(A, N, F, S...) {if (DBG_FLAGS(A) & DBG_PARAM_ON) \
+ DBG_PRINT(" %s -- "F"\n", N, S); }
-#define DBG_ERROR(A,S...) {if (DBG_FLAGS(A) & DBG_ERROR_ON) \
- {DBG_PRINT("%s:ERROR:%s ",DBG_NAME(A),__FUNC__);DBG_PRINTC(S);DBG_TRAP;}}
+#define DBG_ERROR(A, S...) {if (DBG_FLAGS(A) & DBG_ERROR_ON) {\
+ DBG_PRINT("%s:ERROR:%s ", DBG_NAME(A), __FUNC__);\
+ DBG_PRINTC(S); \
+ DBG_TRAP; \
+ } \
+ }
-#define DBG_WARNING(A,S...) {if (DBG_FLAGS(A) & DBG_WARNING_ON) \
- {DBG_PRINT("%s:WARNING:%s ",DBG_NAME(A),__FUNC__);DBG_PRINTC(S);}}
+#define DBG_WARNING(A, S...) {if (DBG_FLAGS(A) & DBG_WARNING_ON) {\
+ DBG_PRINT("%s:WARNING:%s ", DBG_NAME(A), __FUNC__);\
+ DBG_PRINTC(S); } }
-#define DBG_NOTICE(A,S...) {if (DBG_FLAGS(A) & DBG_NOTICE_ON) \
- {DBG_PRINT("%s:NOTICE:%s ",DBG_NAME(A),__FUNC__);DBG_PRINTC(S);}}
+#define DBG_NOTICE(A, S...) {if (DBG_FLAGS(A) & DBG_NOTICE_ON) {\
+ DBG_PRINT("%s:NOTICE:%s ", DBG_NAME(A), __FUNC__);\
+ DBG_PRINTC(S); \
+ } \
+ }
-#define DBG_TRACE(A,S...) do {if (DBG_FLAGS(A) & DBG_TRACE_ON) \
- {DBG_PRINT("%s:%s ",DBG_NAME(A),__FUNC__);DBG_PRINTC(S);}} while (0)
+#define DBG_TRACE(A, S...) do {if (DBG_FLAGS(A) & DBG_TRACE_ON) {\
+ DBG_PRINT("%s:%s ", DBG_NAME(A), __FUNC__);\
+ DBG_PRINTC(S); } } while (0)
-#define DBG_RX(A,S...) {if (DBG_FLAGS(A) & DBG_RX_ON) \
- {DBG_PRINT(S);}}
+#define DBG_RX(A, S...) {if (DBG_FLAGS(A) & DBG_RX_ON) {\
+ DBG_PRINT(S); } }
-#define DBG_TX(A,S...) {if (DBG_FLAGS(A) & DBG_TX_ON) \
- {DBG_PRINT(S);}}
+#define DBG_TX(A, S...) {if (DBG_FLAGS(A) & DBG_TX_ON) {\
+ DBG_PRINT(S); } }
-#define DBG_DS(A,S...) {if (DBG_FLAGS(A) & DBG_DS_ON) \
- {DBG_PRINT(S);}}
+#define DBG_DS(A, S...) {if (DBG_FLAGS(A) & DBG_DS_ON) {\
+ DBG_PRINT(S); } }
-#define DBG_ASSERT(C) {if (!(C)) \
- {DBG_PRINT("ASSERT(%s) -- %s#%d (%s)\n", \
- #C,__FILE__,__LINE__,__FUNC__); \
- DBG_TRAP;}}
+#define DBG_ASSERT(C) { \
+ if (!(C)) {\
+ DBG_PRINT("ASSERT(%s) -- %s#%d (%s)\n", \
+ #C, __FILE__, __LINE__, __FUNC__); \
+ DBG_TRAP; \
+ } \
+ }
typedef struct {
char *dbgName;
@@ -183,7 +197,7 @@ typedef struct {
/****************************************************************************/
-#else // DBG
+#else /* DBG */
/****************************************************************************/
#define DBG_DEFN
@@ -192,21 +206,21 @@ typedef struct {
#define DBG_PRINT(S...)
#define DBG_ENTER(A)
#define DBG_LEAVE(A)
-#define DBG_PARAM(A,N,F,S...)
-#define DBG_ERROR(A,S...)
-#define DBG_WARNING(A,S...)
-#define DBG_NOTICE(A,S...)
-#define DBG_TRACE(A,S...)
-#define DBG_RX(A,S...)
-#define DBG_TX(A,S...)
-#define DBG_DS(A,S...)
+#define DBG_PARAM(A, N, F, S...)
+#define DBG_ERROR(A, S...)
+#define DBG_WARNING(A, S...)
+#define DBG_NOTICE(A, S...)
+#define DBG_TRACE(A, S...)
+#define DBG_RX(A, S...)
+#define DBG_TX(A, S...)
+#define DBG_DS(A, S...)
#define DBG_ASSERT(C)
-#endif // DBG
+#endif /* DBG */
/****************************************************************************/
-#endif // _DEBUG_H
+#endif /* _DEBUG_H */
diff --git a/drivers/staging/wlags49_h2/dhf.c b/drivers/staging/wlags49_h2/dhf.c
index b6f5834b1af..bb80b547cc1 100644
--- a/drivers/staging/wlags49_h2/dhf.c
+++ b/drivers/staging/wlags49_h2/dhf.c
@@ -1,5 +1,5 @@
-// vim:tw=110:ts=4:
+/* vim:tw=110:ts=4: */
/**************************************************************************************************************
*
* FILE : DHF.C
@@ -54,8 +54,8 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
-* COPYRIGHT © 1999 - 2000 by Lucent Technologies. All Rights Reserved
-* COPYRIGHT © 2001 - 2004 by Agere Systems Inc. All Rights Reserved
+* COPYRIGHT (C) 1999 - 2000 by Lucent Technologies. All Rights Reserved
+* COPYRIGHT (C) 2001 - 2004 by Agere Systems Inc. All Rights Reserved
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -97,7 +97,7 @@
#include "dhf.h"
#include "mmd.h"
-//to distinguish MMD from HCF asserts by means of line number
+/* to distinguish MMD from HCF asserts by means of line number */
#undef FILE_NAME_OFFSET
#define FILE_NAME_OFFSET MMD_FILE_NAME_OFFSET
/*-----------------------------------------------------------------------------
@@ -106,23 +106,9 @@
*
*---------------------------------------------------------------------------*/
-// 12345678901234
+/* 12345678901234 */
char signature[14] = "FUPU7D37dhfwci";
-//The binary download function "relocates" the image using constructions like:
-// fw->identity = (CFG_IDENTITY_STRCT FAR *)((char FAR *)fw->identity + (hcf_32)fw );
-//under some of the memory models under MSVC 1.52 these constructions degrade to 16-bits pointer arithmetic.
-//fw->identity is limited, such that adding it to fw, does not need to carry over from offset to segment.
-//However the segment is not set at all.
-//As a workaround the PSEUDO_CHARP macro is introduced which is a char pointer except for MSVC 1.52, in
-//which case we know that a 32-bit quantity is adequate as a pointer.
-//Note that other platforms may experience comparable problems when using the binary download feature.
-#if defined(_MSC_VER) && _MSC_VER == 800 // Visual C++ 1.5
-#define PSEUDO_CHARP hcf_32
-#else
-#define PSEUDO_CHARP hcf_8*
-#endif
-
/*-----------------------------------------------------------------------------
*
* LTV-records retrieved from the NIC to:
@@ -132,12 +118,12 @@ char signature[14] = "FUPU7D37dhfwci";
*
*---------------------------------------------------------------------------*/
-// for USB/H1 we needed a smaller value than the CFG_DL_BUF_STRCT reported 8192
-// for the time being it seems simpler to always use 2000 for USB/H1 as well as all other cases rather than
-// using the "fixed anyway" CFG_DL_BUF_STRCT.
+/* for USB/H1 we needed a smaller value than the CFG_DL_BUF_STRCT reported 8192
+ for the time being it seems simpler to always use 2000 for USB/H1 as well as all other cases rather than
+ using the "fixed anyway" CFG_DL_BUF_STRCT. */
#define DL_SIZE 2000
-//CFG_IDENTITY_STRCT pri_identity = { LOF(CFG_IDENTITY_STRCT), CFG_PRI_IDENTITY };
+/* CFG_IDENTITY_STRCT pri_identity = { LOF(CFG_IDENTITY_STRCT), CFG_PRI_IDENTITY }; */
CFG_SUP_RANGE_STRCT mfi_sup = { LOF(CFG_SUP_RANGE_STRCT), CFG_NIC_MFI_SUP_RANGE };
CFG_SUP_RANGE_STRCT cfi_sup = { LOF(CFG_SUP_RANGE_STRCT), CFG_NIC_CFI_SUP_RANGE };
/* Note: could be used rather than the above explained and defined DL_SIZE if need arises
@@ -164,7 +150,7 @@ LTV_INFO_STRUCT ltv_info[] = {
/***********************************************************************************************************/
/*************************************** PROTOTYPES ******************************************************/
/***********************************************************************************************************/
-static int check_comp_fw( memimage *fw );
+static int check_comp_fw(memimage *fw);
/************************************************************************************************************
@@ -185,18 +171,18 @@ static int check_comp_fw( memimage *fw );
*.ENDDOC END DOCUMENTATION
*************************************************************************************************************/
int
-check_comp_fw( memimage *fw )
+check_comp_fw(memimage *fw)
{
CFG_RANGE20_STRCT *p;
int rc = HCF_SUCCESS;
-CFG_RANGE_SPEC_STRCT* i;
+CFG_RANGE_SPEC_STRCT *i;
- switch( fw->identity->typ ) {
- case CFG_FW_IDENTITY: //Station F/W
- case COMP_ID_FW_AP_FAKE: //;?is this useful (used to be: CFG_AP_IDENTITY)
+ switch (fw->identity->typ) {
+ case CFG_FW_IDENTITY: /* Station F/W */
+ case COMP_ID_FW_AP_FAKE: /* ;?is this useful (used to be: CFG_AP_IDENTITY) */
break;
- default:
- MMDASSERT( DO_ASSERT, fw->identity->typ ) //unknown/unsupported firmware_type:
+ default:
+ MMDASSERT(DO_ASSERT, fw->identity->typ) /* unknown/unsupported firmware_type: */
rc = DHF_ERR_INCOMP_FW;
return rc; /* ;? how useful is this anyway,
* till that is sorted out might as well violate my own single exit principle
@@ -204,29 +190,29 @@ CFG_RANGE_SPEC_STRCT* i;
}
p = fw->compat;
i = NULL;
- while( p->len && i == NULL ) { // check the MFI ranges
- if ( p->typ == CFG_MFI_ACT_RANGES_STA ) {
- i = mmd_check_comp( (void*)p, &mfi_sup );
+ while (p->len && i == NULL) { /* check the MFI ranges */
+ if (p->typ == CFG_MFI_ACT_RANGES_STA) {
+ i = mmd_check_comp((void *)p, &mfi_sup);
}
p++;
}
- MMDASSERT( i, 0 ) //MFI: NIC Supplier not compatible with F/W image Actor
- if ( i ) {
+ MMDASSERT(i, 0) /* MFI: NIC Supplier not compatible with F/W image Actor */
+ if (i) {
p = fw->compat;
i = NULL;
- while ( p->len && i == NULL ) { // check the CFI ranges
- if ( p->typ == CFG_CFI_ACT_RANGES_STA ) {
- i = mmd_check_comp( (void*)p, &cfi_sup );
+ while (p->len && i == NULL) { /* check the CFI ranges */
+ if (p->typ == CFG_CFI_ACT_RANGES_STA) {
+ i = mmd_check_comp((void *)p, &cfi_sup);
}
p++;
}
- MMDASSERT( i, 0 ) //CFI: NIC Supplier not compatible with F/W image Actor
+ MMDASSERT(i, 0) /* CFI: NIC Supplier not compatible with F/W image Actor */
}
- if ( i == NULL ) {
+ if (i == NULL) {
rc = DHF_ERR_INCOMP_FW;
}
return rc;
-} // check_comp_fw
+} /* check_comp_fw */
@@ -271,31 +257,34 @@ CFG_RANGE_SPEC_STRCT* i;
*.ENDDOC END DOCUMENTATION
*************************************************************************************************************/
int
-dhf_download_binary( memimage *fw )
+dhf_download_binary(memimage *fw)
{
int rc = HCF_SUCCESS;
CFG_PROG_STRCT *p;
int i;
- //validate the image
- for ( i = 0; i < sizeof(signature) && fw->signature[i] == signature[i]; i++ ) /*NOP*/;
- if ( i != sizeof(signature) ||
+ /* validate the image */
+ for (i = 0; i < sizeof(signature) && fw->signature[i] == signature[i]; i++)
+ ; /* NOP */
+ if (i != sizeof(signature) ||
fw->signature[i] != 0x01 ||
- //test for Little/Big Endian Binary flag
- fw->signature[i+1] != ( /*HCF_BIG_ENDIAN ? 'B' : */ 'L' ) ) rc = DHF_ERR_INCOMP_FW;
- else { //Little Endian Binary format
- fw->codep = (CFG_PROG_STRCT FAR *)((PSEUDO_CHARP)fw->codep + (hcf_32)fw );
- fw->identity = (CFG_IDENTITY_STRCT FAR *)((PSEUDO_CHARP)fw->identity + (hcf_32)fw );
- fw->compat = (CFG_RANGE20_STRCT FAR *)((PSEUDO_CHARP)fw->compat + (hcf_32)fw );
- for ( i = 0; fw->p[i]; i++ ) fw->p[i] = ((PSEUDO_CHARP)fw->p[i] + (hcf_32)fw );
+ /* test for Little/Big Endian Binary flag */
+ fw->signature[i+1] != (/* HCF_BIG_ENDIAN ? 'B' : */ 'L'))
+ rc = DHF_ERR_INCOMP_FW;
+ else { /* Little Endian Binary format */
+ fw->codep = (CFG_PROG_STRCT FAR*)((char *)fw->codep + (hcf_32)fw);
+ fw->identity = (CFG_IDENTITY_STRCT FAR*)((char *)fw->identity + (hcf_32)fw);
+ fw->compat = (CFG_RANGE20_STRCT FAR*)((char *)fw->compat + (hcf_32)fw);
+ for (i = 0; fw->p[i]; i++)
+ fw->p[i] = ((char *)fw->p[i] + (hcf_32)fw);
p = fw->codep;
- while ( p->len ) {
- p->host_addr = (PSEUDO_CHARP)p->host_addr + (hcf_32)fw;
+ while (p->len) {
+ p->host_addr = (char *)p->host_addr + (hcf_32)fw;
p++;
}
}
return rc;
-} // dhf_download_binary
+} /* dhf_download_binary */
/*************************************************************************************************************
@@ -351,7 +340,7 @@ int i;
*.ENDDOC END DOCUMENTATION
*************************************************************************************************************/
int
-dhf_download_fw( void *ifbp, memimage *fw )
+dhf_download_fw(void *ifbp, memimage *fw)
{
int rc = HCF_SUCCESS;
LTV_INFO_STRUCT_PTR pp = ltv_info;
@@ -359,32 +348,34 @@ CFG_PROG_STRCT *p = fw->codep;
LTVP ltvp;
int i;
- MMDASSERT( fw != NULL, 0 )
- //validate the image
- for ( i = 0; i < sizeof(signature) && fw->signature[i] == signature[i]; i++ ) /*NOP*/;
- if ( i != sizeof(signature) ||
+ MMDASSERT(fw != NULL, 0)
+ /* validate the image */
+ for (i = 0; i < sizeof(signature) && fw->signature[i] == signature[i]; i++)
+ ; /* NOP */
+ if (i != sizeof(signature) ||
fw->signature[i] != 0x01 ||
- //check for binary image
- ( fw->signature[i+1] != 'C' && fw->signature[i+1] != ( /*HCF_BIG_ENDIAN ? 'B' : */ 'L' ) ) )
+ /* check for binary image */
+ (fw->signature[i+1] != 'C' && fw->signature[i+1] != (/*HCF_BIG_ENDIAN ? 'B' : */ 'L')))
rc = DHF_ERR_INCOMP_FW;
-// Retrieve all information needed for download from the NIC
- while ( ( rc == HCF_SUCCESS ) && ( ( ltvp = pp->ltvp) != NULL ) ) {
- ltvp->len = pp++->len; // Set len to original len. This len is changed to real len by GET_INFO()
- rc = GET_INFO( ltvp );
- MMDASSERT( rc == HCF_SUCCESS, rc )
- MMDASSERT( rc == HCF_SUCCESS, ltvp->typ )
- MMDASSERT( rc == HCF_SUCCESS, ltvp->len )
+/* Retrieve all information needed for download from the NIC */
+ while ((rc == HCF_SUCCESS) && ((ltvp = pp->ltvp) != NULL)) {
+ ltvp->len = pp++->len; /* Set len to original len. This len is changed to real len by GET_INFO() */
+ rc = GET_INFO(ltvp);
+ MMDASSERT(rc == HCF_SUCCESS, rc)
+ MMDASSERT(rc == HCF_SUCCESS, ltvp->typ)
+ MMDASSERT(rc == HCF_SUCCESS, ltvp->len)
}
- if ( rc == HCF_SUCCESS ) rc = check_comp_fw( fw );
- if ( rc == HCF_SUCCESS ) {
- while ( rc == HCF_SUCCESS && p->len ) {
- rc = PUT_INFO( p );
+ if (rc == HCF_SUCCESS)
+ rc = check_comp_fw(fw);
+ if (rc == HCF_SUCCESS) {
+ while (rc == HCF_SUCCESS && p->len) {
+ rc = PUT_INFO(p);
p++;
}
}
- MMDASSERT( rc == HCF_SUCCESS, rc )
+ MMDASSERT(rc == HCF_SUCCESS, rc)
return rc;
-} // dhf_download_fw
+} /* dhf_download_fw */
diff --git a/drivers/staging/wlags49_h2/dhf.h b/drivers/staging/wlags49_h2/dhf.h
index c071f342a65..dbe0611fd03 100644
--- a/drivers/staging/wlags49_h2/dhf.h
+++ b/drivers/staging/wlags49_h2/dhf.h
@@ -1,5 +1,5 @@
-// vim:tw=110:ts=4:
+/* vim:tw=110:ts=4: */
#ifndef DHF_H
#define DHF_H
@@ -38,9 +38,9 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
-* COPYRIGHT © 1994 - 1995 by AT&T. All Rights Reserved
-* COPYRIGHT © 1999 - 2000 by Lucent Technologies. All Rights Reserved
-* COPYRIGHT © 2001 - 2004 by Agere Systems Inc. All Rights Reserved
+* COPYRIGHT (C) 1994 - 1995 by AT&T. All Rights Reserved
+* COPYRIGHT (C) 1999 - 2000 by Lucent Technologies. All Rights Reserved
+* COPYRIGHT (C) 2001 - 2004 by Agere Systems Inc. All Rights Reserved
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -82,27 +82,27 @@
#include <windef.h>
#endif
-#include "hcf.h" // includes HCFCFG.H too
+#include "hcf.h" /* includes HCFCFG.H too */
#ifdef DHF_UIL
-#define GET_INFO( pp ) uil_get_info( (LTVP)pp )
-#define PUT_INFO( pp ) uil_put_info( (LTVP)pp )
+#define GET_INFO(pp) uil_get_info((LTVP)pp)
+#define PUT_INFO(pp) uil_put_info((LTVP)pp)
#else
-#define GET_INFO( pp ) hcf_get_info( ifbp, (LTVP)pp )
-#define PUT_INFO( pp ) hcf_put_info( ifbp, (LTVP)pp )
+#define GET_INFO(pp) hcf_get_info(ifbp, (LTVP)pp)
+#define PUT_INFO(pp) hcf_put_info(ifbp, (LTVP)pp)
#endif
/*---- Defines --------------------------------------------------------------*/
-#define CODEMASK 0x0000FFFFL // Codemask for plug records
+#define CODEMASK 0x0000FFFFL /* Codemask for plug records */
/*---- Error numbers --------------------------------------------------------*/
-#define DHF_ERR_INCOMP_FW 0x40 //Image not compatible with NIC
+#define DHF_ERR_INCOMP_FW 0x40 /* Image not compatible with NIC */
/*---- Type definitions -----------------------------------------------------*/
-//* needed by dhf_wrap.c
-//
+/* needed by dhf_wrap.c */
+
typedef struct {
LTVP ltvp;
hcf_16 len;
@@ -119,9 +119,9 @@ typedef struct {
*/
typedef struct {
- hcf_32 code; // Code to plug
- hcf_32 addr; // Address within the memory image to plug it in
- hcf_32 len; // The # of bytes which are available to store it
+ hcf_32 code; /* Code to plug */
+ hcf_32 addr; /* Address within the memory image to plug it in */
+ hcf_32 len; /* The # of bytes which are available to store it */
} plugrecord;
/*
@@ -159,7 +159,7 @@ typedef struct {
char str[MAX_DEBUGEXPORT_LEN];
} exportrecord;
-// Offsets in memimage array p[]
+/* Offsets in memimage array p[] */
#define FWSTRINGS_FUNCTION 0
#define FWEXPORTS_FUNCTION 1
@@ -188,13 +188,13 @@ typedef struct {
* The end of the array is indicated by a plug record of which all fields are zero.
*/
typedef struct {
- char signature[14+1+1]; // signature (see DHF.C) + C/LE-Bin/BE-Bin-flag + format version
- CFG_PROG_STRCT FAR *codep; //
- hcf_32 execution; // Execution address of the firmware
- void FAR *place_holder_1;
+ char signature[14+1+1]; /* signature (see DHF.C) + C/LE-Bin/BE-Bin-flag + format version */
+ CFG_PROG_STRCT FAR *codep; /* */
+ hcf_32 execution; /* Execution address of the firmware */
+ void FAR *place_holder_1;
void FAR *place_holder_2;
- CFG_RANGE20_STRCT FAR *compat; // Pointer to the compatibility info records
- CFG_IDENTITY_STRCT FAR *identity; // Pointer to the identity info records
+ CFG_RANGE20_STRCT FAR *compat; /* Pointer to the compatibility info records */
+ CFG_IDENTITY_STRCT FAR *identity; /* Pointer to the identity info records */
void FAR *p[2]; /* (Up to 9) pointers for (future) expansion
* currently in use:
* - F/W printf information
@@ -209,8 +209,8 @@ typedef struct {
*
*---------------------------------------------------------------------------*/
-EXTERN_C int dhf_download_fw( void *ifbp, memimage *fw ); // ifbp, ignored when using the UIL
-EXTERN_C int dhf_download_binary( memimage *fw );
+EXTERN_C int dhf_download_fw(void *ifbp, memimage *fw); /* ifbp, ignored when using the UIL */
+EXTERN_C int dhf_download_binary(memimage *fw);
/*-----------------------------------------------------------------------------
@@ -219,8 +219,8 @@ EXTERN_C int dhf_download_binary( memimage *fw );
*
*---------------------------------------------------------------------------*/
-// defined in DHF.C; see there for comments
-EXTERN_C hcf_16 *find_record_in_pda( hcf_16 *pdap, hcf_16 code );
+/* defined in DHF.C; see there for comments */
+EXTERN_C hcf_16 *find_record_in_pda(hcf_16 *pdap, hcf_16 code);
-#endif // DHF_H
+#endif /* DHF_H */
diff --git a/drivers/staging/wlags49_h2/dhfcfg.h b/drivers/staging/wlags49_h2/dhfcfg.h
index a0c26c678c5..75c279f268a 100644
--- a/drivers/staging/wlags49_h2/dhfcfg.h
+++ b/drivers/staging/wlags49_h2/dhfcfg.h
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright © 2003 Agere Systems Inc.
+ * Copyright (c) 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -77,82 +77,82 @@
*---------------------------------------------------------------------------*/
-// Define DHF_WCI if you want to use the WCI to access the ORiNOCO card.
-// Define DHF_UIL if you want to use the UIL to access the ORiNOCO card.
-// You must define either DHF_WCI or DHF_UIL. If neither of the two is defined
-// or both a compile error is generated.
+/* Define DHF_WCI if you want to use the WCI to access the ORiNOCO card.
+ Define DHF_UIL if you want to use the UIL to access the ORiNOCO card.
+ You must define either DHF_WCI or DHF_UIL. If neither of the two is defined
+ or both a compile error is generated. */
#define DHF_WCI
-//!!!#define DHF_UIL
+/* !!!#define DHF_UIL */
-// Define DHF_BIG_ENDIAN if you are working on a big endian platform.
-// Define DHF_LITTLE_ENDIAN if you are working on a little endian platform.
-// You must define either DHF_BIG_ENDIAN or DHF_LITTLE_ENDIAN. If neither of
-// the two is defined or both a compile error is generated.
+/* Define DHF_BIG_ENDIAN if you are working on a big endian platform.
+ Define DHF_LITTLE_ENDIAN if you are working on a little endian platform.
+ You must define either DHF_BIG_ENDIAN or DHF_LITTLE_ENDIAN. If neither of
+ the two is defined or both a compile error is generated. */
#ifdef USE_BIG_ENDIAN
#define DHF_BIG_ENDIAN
#else
#define DHF_LITTLE_ENDIAN
#endif /* USE_BIG_ENDIAN */
-// Define DHF_WIN if you are working on Windows platform.
-// Define DHF_DOS if you are working on DOS.
-// You must define either DHF_WIN or DHF_DOS. If neither of
-// the two is defined or both a compile error is generated.
-//!!!#define DHF_WIN
-//!!!#define DHF_DOS
+/* Define DHF_WIN if you are working on Windows platform.
+ Define DHF_DOS if you are working on DOS.
+ You must define either DHF_WIN or DHF_DOS. If neither of
+ the two is defined or both a compile error is generated.
+ !!!#define DHF_WIN
+ !!!#define DHF_DOS */
-// Define if you want the DHF to users. Not defining DHF_GET_RES_MSG
-// leads to a decrease in code size as message strings are not included.
-//!!!#define DHF_GET_RES_MSG
+/* Define if you want the DHF to users. Not defining DHF_GET_RES_MSG
+ leads to a decrease in code size as message strings are not included.
+ !!!#define DHF_GET_RES_MSG */
-// Linux driver specific
-// Prevent inclusion of stdlib.h and string.h
+/* Linux driver specific
+ Prevent inclusion of stdlib.h and string.h */
#define _INC_STDLIB
#define _INC_STRING
-//-----------------------------------------------------------------------------
-// Define one or more of the following DSF #defines if you want to implement
-// the related DSF-function. Function dsf_callback must allways be implemented.
-// See file DHF.H for prototypes of the functions.
-
-// Define DSF_ALLOC if you want to manage memory allocation and de-allocation
-// for the DHF. If DSF_ALLOC is defined you must implement dsf_alloc and dsf_free.
-//!!!#define DSF_ALLOC
-
-// Define DSF_CONFIRM if you want the DHF to ask the user for confirmation in a
-// number of situations. If DSF_CONFIRM is defined you must implement dsf_confirm.
-// Not defining DSF_CONFIRM leads to a decrease in code size as confirmation
-// strings are not included.
-//!!!#define DSF_CONFIRM
-
-// Define DSF_DEBUG_MESSAGE if you want debug messages added to your output.
-// If you define DSF_DEBUG_MESSAGE then you must implement function
-// dsf_debug_message.
-//#define DSF_DEBUG_MESSAGE
-
-// Define DSF_ASSERT if you want asserts to be activated.
-// If you define DSF_ASSERT then you must implement function dsf_assert.
-//#define DBG 1
-//#define DSF_ASSERT
-
-// Define DSF_DBWIN if you want asserts and debug messages to be send to a debug
-// window like SOFTICE or DebugView from SysInternals.
-//!!!#define DSF_DBWIN
-//!!! Not implemented yet!
-
-// Define DSF_VOLATILE_ONLY if you only wants to use valatile functions
-// This is a typical setting for a AP and a driver.
+/*-----------------------------------------------------------------------------
+ Define one or more of the following DSF #defines if you want to implement
+ the related DSF-function. Function dsf_callback must allways be implemented.
+ See file DHF.H for prototypes of the functions. */
+
+/* Define DSF_ALLOC if you want to manage memory allocation and de-allocation
+ for the DHF. If DSF_ALLOC is defined you must implement dsf_alloc and dsf_free.
+ !!!#define DSF_ALLOC */
+
+/* Define DSF_CONFIRM if you want the DHF to ask the user for confirmation in a
+ number of situations. If DSF_CONFIRM is defined you must implement dsf_confirm.
+ Not defining DSF_CONFIRM leads to a decrease in code size as confirmation
+ strings are not included.
+ !!!#define DSF_CONFIRM */
+
+/* Define DSF_DEBUG_MESSAGE if you want debug messages added to your output.
+ If you define DSF_DEBUG_MESSAGE then you must implement function
+ dsf_debug_message.
+ #define DSF_DEBUG_MESSAGE */
+
+/* Define DSF_ASSERT if you want asserts to be activated.
+ If you define DSF_ASSERT then you must implement function dsf_assert.
+ #define DBG 1
+ #define DSF_ASSERT */
+
+/* Define DSF_DBWIN if you want asserts and debug messages to be send to a debug
+ window like SOFTICE or DebugView from SysInternals.
+ !!!#define DSF_DBWIN
+ !!! Not implemented yet! */
+
+/* Define DSF_VOLATILE_ONLY if you only wants to use valatile functions
+ This is a typical setting for a AP and a driver. */
#define DSF_VOLATILE_ONLY
-// Define DSF_HERMESII if you want to use the DHF for the Hermes-II
+/* Define DSF_HERMESII if you want to use the DHF for the Hermes-II */
#ifdef HERMES2
#define DSF_HERMESII
#else
#undef DSF_HERMESII
-#endif // HERMES2
+#endif /* HERMES2 */
-// Define DSF_BINARY_FILE if you want to use the DHF in combination with
-// reading the Firmware from a separate binary file.
-//!!!#define DSF_BINARY_FILE
+/* Define DSF_BINARY_FILE if you want to use the DHF in combination with
+ reading the Firmware from a separate binary file.
+ !!!#define DSF_BINARY_FILE */
-#endif // DHFCFG_H
+#endif /* DHFCFG_H */
diff --git a/drivers/staging/wlags49_h2/hcf.c b/drivers/staging/wlags49_h2/hcf.c
index 6e39f5081e2..390628c6c1e 100644
--- a/drivers/staging/wlags49_h2/hcf.c
+++ b/drivers/staging/wlags49_h2/hcf.c
@@ -990,7 +990,8 @@ int rc = HCF_ERR_INCOMP_FW;
ifbp->IFB_CntlOpt |= DMA_ENABLED;
HCFASSERT( NT_ASSERT, NEVER_TESTED )
// make the entire rx descriptor chain DMA-owned, so the DMA engine can (re-)use it.
- if ( ( p = ifbp->IFB_FirstDesc[DMA_RX] ) != NULL ) { //;? Think this over again in the light of the new chaining strategy
+ p = ifbp->IFB_FirstDesc[DMA_RX];
+ if (p != NULL) { //;? Think this over again in the light of the new chaining strategy
if ( 1 ) { //begin alternative
HCFASSERT( NT_ASSERT, NEVER_TESTED )
put_frame_lst( ifbp, ifbp->IFB_FirstDesc[DMA_RX], DMA_RX );
@@ -2087,7 +2088,8 @@ wci_bufp pt; //pointer with the "right" type, just to help ease writing macr
OPW( HREG_AUX_OFFSET, (hcf_16)(PLUG_DATA_OFFSET & 0x7E) );
io_port = ifbp->IFB_IOBase + HREG_AUX_DATA; //to prevent side effects of the MSF-defined macro
p = ltvp->val; //destination char pointer (in LTV record)
- if ( ( i = len - 1 ) > 0 ) {
+ i = len - 1;
+ if (i > 0 ) {
pt = (wci_bufp)p; //just to help ease writing macros with embedded assembly
IN_PORT_STRING_8_16( io_port, pt, i ); //space used by T: -1
}
@@ -2674,7 +2676,8 @@ hcf_16 fid = 0;
#if (HCF_EXT) & HCF_EXT_TX_CONT // Continuous transmit test
if ( tx_cntl == HFS_TX_CNTL_TX_CONT ) {
- if ( ( fid = get_fid( ifbp ) ) != 0 ) {
+ fid = get_fid(ifbp);
+ if (fid != 0 ) {
//setup BAP to begin of TxFS
(void)setup_bap( ifbp, fid, 0, IO_OUT );
//copy all the fragments in a transparent fashion
@@ -2700,7 +2703,8 @@ hcf_16 fid = 0;
#if (HCF_TYPE) & HCF_TYPE_WPA
tx_cntl |= ifbp->IFB_MICTxCntl;
#endif // HCF_TYPE_WPA
- if ( (fid = ifbp->IFB_TxFID) == 0 && ( fid = get_fid( ifbp ) ) != 0 ) /* 4 */
+ fid = ifbp->IFB_TxFID;
+ if (fid == 0 && ( fid = get_fid( ifbp ) ) != 0 ) /* 4 */
/* skip the next compound statement if:
- pre-put message or
- no fid available (which should never occur if the MSF adheres to the WCI)
@@ -4860,7 +4864,8 @@ PROT_CNT_INI
int rc;
HCFTRACE( ifbp, HCF_TRACE_STRIO );
- if ( ( rc = ifbp->IFB_DefunctStat ) == HCF_SUCCESS ) { /*2*/
+ rc = ifbp->IFB_DefunctStat;
+ if (rc == HCF_SUCCESS) { /*2*/
OPW( HREG_SELECT_1, fid ); /*4*/
OPW( HREG_OFFSET_1, offset );
if ( type == IO_IN ) {
diff --git a/drivers/staging/wlags49_h2/wl_cs.c b/drivers/staging/wlags49_h2/wl_cs.c
index 9da42e66085..10abd406b09 100644
--- a/drivers/staging/wlags49_h2/wl_cs.c
+++ b/drivers/staging/wlags49_h2/wl_cs.c
@@ -105,13 +105,6 @@
/*******************************************************************************
- * macro definitions
- ******************************************************************************/
-#define CS_CHECK(fn, ret) do { \
- last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; \
- } while (0)
-
-/*******************************************************************************
* global definitions
******************************************************************************/
#if DBG
@@ -156,15 +149,12 @@ static int wl_adapter_attach(struct pcmcia_device *link)
link->io.NumPorts1 = HCF_NUM_IO_PORTS;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
link->io.IOAddrLines = 6;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT;
- link->irq.IRQInfo1 = IRQ_INFO2_VALID | IRQ_LEVEL_ID;
- link->irq.Handler = &wl_isr;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.ConfigIndex = 5;
link->conf.Present = PRESENT_OPTION;
- link->priv = link->irq.Instance = dev;
+ link->priv = dev;
lp = wl_priv(dev);
lp->link = link;
@@ -305,7 +295,7 @@ void wl_adapter_insert( struct pcmcia_device *link )
{
struct net_device *dev;
int i;
- int last_fn, last_ret;
+ int ret;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_adapter_insert" );
@@ -317,21 +307,27 @@ void wl_adapter_insert( struct pcmcia_device *link )
/* Do we need to allocate an interrupt? */
link->conf.Attributes |= CONF_ENABLE_IRQ;
- CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io));
- CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
- CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
+ ret = pcmcia_request_io(link, &link->io);
+ if (ret != 0)
+ goto failed;
+
+ ret = pcmcia_request_irq(link, (void *) wl_isr);
+ if (ret != 0)
+ goto failed;
+ ret = pcmcia_request_configuration(link, &link->conf);
+ if (ret != 0)
+ goto failed;
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
- SET_NETDEV_DEV(dev, &handle_to_dev(link));
+ SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
printk("%s: register_netdev() failed\n", MODULE_NAME);
goto failed;
}
- link->dev_node = &( wl_priv(dev) )->node;
- strcpy(( wl_priv(dev) )->node.dev_name, dev->name);
+
register_wlags_sysfs(dev);
printk(KERN_INFO "%s: Wireless, io_addr %#03lx, irq %d, ""mac_address ",
@@ -343,11 +339,6 @@ void wl_adapter_insert( struct pcmcia_device *link )
DBG_LEAVE( DbgInfo );
return;
-
-cs_failed:
- cs_error( link, last_fn, last_ret );
-
-
failed:
wl_adapter_release( link );
diff --git a/drivers/staging/wlags49_h2/wl_cs.h b/drivers/staging/wlags49_h2/wl_cs.h
index 2a0e67450fb..a9b8828a1a2 100644
--- a/drivers/staging/wlags49_h2/wl_cs.h
+++ b/drivers/staging/wlags49_h2/wl_cs.h
@@ -84,10 +84,6 @@ int wl_adapter_close(struct net_device *dev);
int wl_adapter_is_open(struct net_device *dev);
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
-void cs_error(client_handle_t handle, int func, int ret);
-#endif
-
const char *DbgEvent( int mask );
diff --git a/drivers/staging/wlags49_h2/wl_internal.h b/drivers/staging/wlags49_h2/wl_internal.h
index 466fb6215ac..d9a0ad039c1 100644
--- a/drivers/staging/wlags49_h2/wl_internal.h
+++ b/drivers/staging/wlags49_h2/wl_internal.h
@@ -69,9 +69,6 @@
******************************************************************************/
#include <linux/version.h>
#ifdef BUS_PCMCIA
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
-#include <pcmcia/version.h>
-#endif
#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
@@ -866,7 +863,6 @@ struct wl_private
{
#ifdef BUS_PCMCIA
- dev_node_t node;
struct pcmcia_device *link;
#endif // BUS_PCMCIA
@@ -1013,13 +1009,13 @@ extern inline struct wl_private *wl_priv(struct net_device *dev)
* SPARC, due to its weird semantics for save/restore flags. extern
* inline should prevent the kernel from linking or module from
* loading if they are not inlined. */
-extern inline void wl_lock(struct wl_private *lp,
+static inline void wl_lock(struct wl_private *lp,
unsigned long *flags)
{
spin_lock_irqsave(&lp->slock, *flags);
}
-extern inline void wl_unlock(struct wl_private *lp,
+static inline void wl_unlock(struct wl_private *lp,
unsigned long *flags)
{
spin_unlock_irqrestore(&lp->slock, *flags);
diff --git a/drivers/staging/wlags49_h2/wl_main.c b/drivers/staging/wlags49_h2/wl_main.c
index cf0c38468b2..88d0d472142 100644
--- a/drivers/staging/wlags49_h2/wl_main.c
+++ b/drivers/staging/wlags49_h2/wl_main.c
@@ -3591,7 +3591,8 @@ int scull_read_procmem(char *buf, char **start, off_t offset, int len, int *eof,
len=0;
- if ( ( lp = ((struct net_device *)data)->priv ) == NULL ) {
+ lp = ((struct net_device *)data)->priv;
+ if (lp == NULL) {
len += sprintf(buf+len,"No wl_private in scull_read_procmem\n" );
} else if ( lp->wlags49_type == 0 ){
ifbp = &lp->hcfCtx;
diff --git a/drivers/staging/wlags49_h2/wl_netdev.c b/drivers/staging/wlags49_h2/wl_netdev.c
index 1db73ebcae2..1aa61dbdb79 100644
--- a/drivers/staging/wlags49_h2/wl_netdev.c
+++ b/drivers/staging/wlags49_h2/wl_netdev.c
@@ -463,15 +463,10 @@ static void wl_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
// strncpy(info.fw_version, priv->fw_name,
// sizeof(info.fw_version) - 1);
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,20))
if (dev->dev.parent) {
dev_set_name(dev->dev.parent, "%s", info->bus_info);
//strncpy(info->bus_info, dev->dev.parent->bus_id,
// sizeof(info->bus_info) - 1);
-#else
- if (dev->class_dev.parent) {
- sizeof(info->bus_info) - 1);
-#endif
} else {
snprintf(info->bus_info, sizeof(info->bus_info) - 1,
"PCMCIA FIXME");
@@ -930,8 +925,10 @@ int wl_rx(struct net_device *dev)
port = ( hfs_stat >> 8 ) & 0x0007;
DBG_RX( DbgInfo, "Rx frame for port %d\n", port );
- if(( pktlen = lp->hcfCtx.IFB_RxLen ) != 0 ) {
- if(( skb = ALLOC_SKB( pktlen )) != NULL ) {
+ pktlen = lp->hcfCtx.IFB_RxLen;
+ if (pktlen != 0) {
+ skb = ALLOC_SKB(pktlen);
+ if (skb != NULL) {
/* Set the netdev based on the port */
switch( port ) {
#ifdef USE_WDS
@@ -1050,7 +1047,7 @@ void wl_multicast( struct net_device *dev )
//;?seems reasonable that even an AP-only driver could afford this small additional footprint
int x;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
/*------------------------------------------------------------------------*/
@@ -1073,9 +1070,9 @@ void wl_multicast( struct net_device *dev )
DBG_PRINT( " mc_count: %d\n", netdev_mc_count(dev));
- netdev_for_each_mc_addr(mclist, dev)
- DBG_PRINT( " %s (%d)\n", DbgHwAddr(mclist->dmi_addr),
- mclist->dmi_addrlen );
+ netdev_for_each_mc_addr(ha, dev)
+ DBG_PRINT(" %s (%d)\n", DbgHwAddr(ha->addr),
+ dev->addr_len);
}
#endif /* DBG */
@@ -1120,9 +1117,9 @@ void wl_multicast( struct net_device *dev )
lp->ltvRecord.typ = CFG_GROUP_ADDR;
x = 0;
- netdev_for_each_mc_addr(mclist, dev)
+ netdev_for_each_mc_addr(ha, dev)
memcpy(&(lp->ltvRecord.u.u8[x++ * ETH_ALEN]),
- mclist->dmi_addr, ETH_ALEN);
+ ha->addr, ETH_ALEN);
DBG_PRINT( "Setting multicast list\n" );
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
} else {
@@ -1177,7 +1174,6 @@ void wl_multicast( struct net_device *dev, int num_addrs, void *addrs )
#endif /* NEW_MULTICAST */
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30))
static const struct net_device_ops wl_netdev_ops =
{
.ndo_start_xmit = &wl_tx_port0,
@@ -1197,7 +1193,6 @@ static const struct net_device_ops wl_netdev_ops =
.ndo_poll_controller = wl_poll,
#endif
};
-#endif // (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30))
/*******************************************************************************
* wl_device_alloc()
@@ -1251,27 +1246,7 @@ struct net_device * wl_device_alloc( void )
lp->wireless_data.spy_data = &lp->spy_data;
dev->wireless_data = &lp->wireless_data;
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30))
dev->netdev_ops = &wl_netdev_ops;
-#else
- dev->hard_start_xmit = &wl_tx_port0;
-
- dev->set_config = &wl_config;
- dev->get_stats = &wl_stats;
- dev->set_multicast_list = &wl_multicast;
-
- dev->init = &wl_insert;
- dev->open = &wl_adapter_open;
- dev->stop = &wl_adapter_close;
- dev->do_ioctl = &wl_ioctl;
-
- dev->tx_timeout = &wl_tx_timeout;
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = wl_poll;
-#endif
-
-#endif // (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30))
dev->watchdog_timeo = TX_TIMEOUT;
@@ -1995,8 +1970,10 @@ int wl_rx_dma( struct net_device *dev )
port = ( hfs_stat >> 8 ) & 0x0007;
DBG_RX( DbgInfo, "Rx frame for port %d\n", port );
- if(( pktlen = GET_BUF_CNT( desc_next )) != 0 ) {
- if(( skb = ALLOC_SKB( pktlen )) != NULL ) {
+ pktlen = GET_BUF_CNT(desc_next);
+ if (pktlen != 0) {
+ skb = ALLOC_SKB(pktlen);
+ if (skb != NULL) {
switch( port ) {
#ifdef USE_WDS
case 1:
diff --git a/drivers/staging/wlags49_h2/wl_priv.c b/drivers/staging/wlags49_h2/wl_priv.c
index 727ea8a483a..260d4f0d47b 100644
--- a/drivers/staging/wlags49_h2/wl_priv.c
+++ b/drivers/staging/wlags49_h2/wl_priv.c
@@ -503,7 +503,8 @@ int wvlan_uil_send_diag_msg( struct uilreq *urq, struct wl_private *lp )
return result;
}
- if ((data = kmalloc(urq->len, GFP_KERNEL)) != NULL) {
+ data = kmalloc(urq->len, GFP_KERNEL);
+ if (data != NULL) {
memset( Descp, 0, sizeof( DESC_STRCT ));
memcpy( data, urq->data, urq->len );
@@ -617,7 +618,8 @@ int wvlan_uil_put_info( struct uilreq *urq, struct wl_private *lp )
LTV record, try to allocate it from the kernel stack.
Otherwise, we just use our local LTV record. */
if( urq->len > sizeof( lp->ltvRecord )) {
- if(( pLtv = (ltv_t *)kmalloc( urq->len, GFP_KERNEL )) != NULL ) {
+ pLtv = kmalloc(urq->len, GFP_KERNEL);
+ if (pLtv != NULL) {
ltvAllocated = TRUE;
} else {
DBG_ERROR( DbgInfo, "Alloc FAILED\n" );
@@ -652,7 +654,7 @@ int wvlan_uil_put_info( struct uilreq *urq, struct wl_private *lp )
pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
break;
/* CFG_CNF_OWN_SSID currently same as CNF_DESIRED_SSID. Do we
- need seperate storage for this? */
+ need separate storage for this? */
//case CFG_CNF_OWN_SSID:
case CFG_CNF_OWN_ATIM_WINDOW:
lp->atimWindow = pLtv->u.u16[0];
@@ -1296,7 +1298,8 @@ int wvlan_uil_get_info( struct uilreq *urq, struct wl_private *lp )
LTV record, try to allocate it from the kernel stack.
Otherwise, we just use our local LTV record. */
if( urq->len > sizeof( lp->ltvRecord )) {
- if(( pLtv = (ltv_t *)kmalloc( urq->len, GFP_KERNEL )) != NULL ) {
+ pLtv = kmalloc(urq->len, GFP_KERNEL);
+ if (pLtv != NULL) {
ltvAllocated = TRUE;
/* Copy the command/length information into the new buffer. */
diff --git a/drivers/staging/wlags49_h2/wl_profile.c b/drivers/staging/wlags49_h2/wl_profile.c
index 1e0c75f2855..292d5792dd7 100644
--- a/drivers/staging/wlags49_h2/wl_profile.c
+++ b/drivers/staging/wlags49_h2/wl_profile.c
@@ -91,7 +91,7 @@
#include <debug.h>
#include <hcf.h>
-//#include <hcfdef.h>
+/* #include <hcfdef.h> */
#include <wl_if.h>
#include <wl_internal.h>
@@ -113,20 +113,22 @@ extern p_u32 DebugFlag;
extern dbg_info_t *DbgInfo;
#endif
-int parse_yes_no( char* value );
+int parse_yes_no(char *value);
-int parse_yes_no( char* value ) {
-int rc = 0; //default to NO for invalid parameters
+int parse_yes_no(char *value)
+{
+int rc = 0; /* default to NO for invalid parameters */
- if ( strlen( value ) == 1 ) {
- if ( ( value[0] | ('Y'^'y') ) == 'y' ) rc = 1;
-// } else {
-// this should not be debug time info, it is an enduser data entry error ;?
-// DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_MICROWAVE_ROBUSTNESS );
+ if (strlen(value) == 1) {
+ if ((value[0] | ('Y'^'y')) == 'y')
+ rc = 1;
+ /* } else { */
+ /* this should not be debug time info, it is an enduser data entry error ;? */
+ /* DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_MICROWAVE_ROBUSTNESS); */
}
return rc;
-} // parse_yes_no
+} /* parse_yes_no */
/*******************************************************************************
@@ -149,13 +151,13 @@ int rc = 0; //default to NO for invalid parameters
* N/A
*
******************************************************************************/
-void parse_config( struct net_device *dev )
+void parse_config(struct net_device *dev)
{
int file_desc;
-#if 0 // BIN_DL
- int rc;
+#if 0 /* BIN_DL */
+ int rc;
char *cp = NULL;
-#endif // BIN_DL
+#endif /* BIN_DL */
char buffer[MAX_LINE_SIZE];
char filename[MAX_LINE_SIZE];
mm_segment_t fs;
@@ -163,48 +165,47 @@ void parse_config( struct net_device *dev )
ENCSTRCT sEncryption;
/*------------------------------------------------------------------------*/
- DBG_FUNC( "parse_config" );
- DBG_ENTER( DbgInfo );
+ DBG_FUNC("parse_config");
+ DBG_ENTER(DbgInfo);
/* Get the wavelan specific info for this device */
wvlan_config = (struct wl_private *)dev->priv;
- if ( wvlan_config == NULL ) {
- DBG_ERROR( DbgInfo, "Wavelan specific info struct not present?\n" );
+ if (wvlan_config == NULL) {
+ DBG_ERROR(DbgInfo, "Wavelan specific info struct not present?\n");
return;
}
/* setup the default encryption string */
- strcpy( wvlan_config->szEncryption, DEF_CRYPT_STR );
+ strcpy(wvlan_config->szEncryption, DEF_CRYPT_STR);
/* Obtain a user-space process context, storing the original context */
- fs = get_fs( );
- set_fs( get_ds( ));
+ fs = get_fs();
+ set_fs(get_ds());
/* Determine the filename for this device and attempt to open it */
- sprintf( filename, "%s%s", ROOT_CONFIG_FILENAME, dev->name );
- file_desc = open( filename, O_RDONLY, 0 );
- if ( file_desc != -1 ) {
- DBG_TRACE( DbgInfo, "Wireless config file found. Parsing options...\n" );
+ sprintf(filename, "%s%s", ROOT_CONFIG_FILENAME, dev->name);
+ file_desc = open(filename, O_RDONLY, 0);
+ if (file_desc != -1) {
+ DBG_TRACE(DbgInfo, "Wireless config file found. Parsing options...\n");
/* Read out the options */
- while( readline( file_desc, buffer )) {
- translate_option( buffer, wvlan_config );
- }
+ while (readline(file_desc, buffer))
+ translate_option(buffer, wvlan_config);
/* Close the file */
- close( file_desc ); //;?even if file_desc == -1 ???
+ close(file_desc); /* ;?even if file_desc == -1 ??? */
} else {
- DBG_TRACE( DbgInfo, "No iwconfig file found for this device; "
- "config.opts or wireless.opts will be used\n" );
+ DBG_TRACE(DbgInfo, "No iwconfig file found for this device; "
+ "config.opts or wireless.opts will be used\n");
}
/* Return to the original context */
- set_fs( fs );
+ set_fs(fs);
/* convert the WEP keys, if read in as key1, key2, type of data */
- if ( wvlan_config->EnableEncryption ) {
- memset( &sEncryption, 0, sizeof( sEncryption ));
+ if (wvlan_config->EnableEncryption) {
+ memset(&sEncryption, 0, sizeof(sEncryption));
- wl_wep_decode( CRYPT_CODE, &sEncryption,
- wvlan_config->szEncryption );
+ wl_wep_decode(CRYPT_CODE, &sEncryption,
+ wvlan_config->szEncryption);
/* the Linux driver likes to use 1-4 for the key IDs, and then
convert to 0-3 when sending to the card. The Windows code
@@ -216,65 +217,64 @@ void parse_config( struct net_device *dev )
sEncryption.wEnabled = wvlan_config->EnableEncryption;
sEncryption.wTxKeyID = wvlan_config->TransmitKeyID - 1;
- memcpy( &sEncryption.EncStr, &wvlan_config->DefaultKeys,
- sizeof( CFG_DEFAULT_KEYS_STRCT ));
+ memcpy(&sEncryption.EncStr, &wvlan_config->DefaultKeys,
+ sizeof(CFG_DEFAULT_KEYS_STRCT));
- memset( wvlan_config->szEncryption, 0, sizeof( wvlan_config->szEncryption ));
+ memset(wvlan_config->szEncryption, 0, sizeof(wvlan_config->szEncryption));
- wl_wep_code( CRYPT_CODE, wvlan_config->szEncryption, &sEncryption,
- sizeof( sEncryption ));
+ wl_wep_code(CRYPT_CODE, wvlan_config->szEncryption, &sEncryption,
+ sizeof(sEncryption));
}
/* decode the encryption string for the call to wl_commit() */
- wl_wep_decode( CRYPT_CODE, &sEncryption, wvlan_config->szEncryption );
+ wl_wep_decode(CRYPT_CODE, &sEncryption, wvlan_config->szEncryption);
wvlan_config->TransmitKeyID = sEncryption.wTxKeyID + 1;
wvlan_config->EnableEncryption = sEncryption.wEnabled;
- memcpy( &wvlan_config->DefaultKeys, &sEncryption.EncStr,
- sizeof( CFG_DEFAULT_KEYS_STRCT ));
+ memcpy(&wvlan_config->DefaultKeys, &sEncryption.EncStr,
+ sizeof(CFG_DEFAULT_KEYS_STRCT));
-#if 0 //BIN_DL
+#if 0 /* BIN_DL */
/* Obtain a user-space process context, storing the original context */
- fs = get_fs( );
- set_fs( get_ds( ));
-
- //;?just to fake something
- strcpy(/*wvlan_config->fw_image_*/filename, "/etc/agere/fw.bin" );
- file_desc = open( /*wvlan_config->fw_image_*/filename, 0, 0 );
- if ( file_desc == -1 ) {
- DBG_ERROR( DbgInfo, "No image file found\n" );
+ fs = get_fs();
+ set_fs(get_ds());
+
+ /* ;?just to fake something */
+ strcpy(/*wvlan_config->fw_image_*/filename, "/etc/agere/fw.bin");
+ file_desc = open(/*wvlan_config->fw_image_*/filename, 0, 0);
+ if (file_desc == -1) {
+ DBG_ERROR(DbgInfo, "No image file found\n");
} else {
- DBG_TRACE( DbgInfo, "F/W image file found\n" );
-#define DHF_ALLOC_SIZE 96000 //just below 96K, let's hope it suffices for now and for the future
- cp = (char*)vmalloc( DHF_ALLOC_SIZE );
- if ( cp == NULL ) {
- DBG_ERROR( DbgInfo, "error in vmalloc\n" );
+ DBG_TRACE(DbgInfo, "F/W image file found\n");
+#define DHF_ALLOC_SIZE 96000 /* just below 96K, let's hope it suffices for now and for the future */
+ cp = (char *)vmalloc(DHF_ALLOC_SIZE);
+ if (cp == NULL) {
+ DBG_ERROR(DbgInfo, "error in vmalloc\n");
} else {
- rc = read( file_desc, cp, DHF_ALLOC_SIZE );
- if ( rc == DHF_ALLOC_SIZE ) {
- DBG_ERROR( DbgInfo, "buffer too small, %d\n", DHF_ALLOC_SIZE );
- } else if ( rc > 0 ) {
- DBG_TRACE( DbgInfo, "read O.K.: %d bytes %.12s\n", rc, cp );
- rc = read( file_desc, &cp[rc], 1 );
- if ( rc == 0 ) {
- DBG_TRACE( DbgInfo, "no more to read\n" );
- }
+ rc = read(file_desc, cp, DHF_ALLOC_SIZE);
+ if (rc == DHF_ALLOC_SIZE) {
+ DBG_ERROR(DbgInfo, "buffer too small, %d\n", DHF_ALLOC_SIZE);
+ } else if (rc > 0) {
+ DBG_TRACE(DbgInfo, "read O.K.: %d bytes %.12s\n", rc, cp);
+ rc = read(file_desc, &cp[rc], 1);
+ if (rc == 0)
+ DBG_TRACE(DbgInfo, "no more to read\n");
}
- if ( rc != 0 ) {
- DBG_ERROR( DbgInfo, "file not read in one swoop or other error"\
- ", give up, too complicated, rc = %0X\n", rc );
+ if (rc != 0) {
+ DBG_ERROR(DbgInfo, "file not read in one swoop or other error"\
+ ", give up, too complicated, rc = %0X\n", rc);
}
- vfree( cp );
+ vfree(cp);
}
- close( file_desc );
+ close(file_desc);
}
- set_fs( fs ); /* Return to the original context */
-#endif // BIN_DL
+ set_fs(fs); /* Return to the original context */
+#endif /* BIN_DL */
- DBG_LEAVE( DbgInfo );
+ DBG_LEAVE(DbgInfo);
return;
-} // parse_config
+} /* parse_config */
/*******************************************************************************
* readline()
@@ -298,17 +298,17 @@ void parse_config( struct net_device *dev )
* -1 on error
*
******************************************************************************/
-int readline( int filedesc, char *buffer )
+int readline(int filedesc, char *buffer)
{
int result = -1;
int bytes_read = 0;
/*------------------------------------------------------------------------*/
/* Make sure the file descriptor is good */
- if ( filedesc != -1 ) {
+ if (filedesc != -1) {
/* Read in from the file byte by byte until a newline is reached */
- while(( result = read( filedesc, &buffer[bytes_read], 1 )) == 1 ) {
- if ( buffer[bytes_read] == '\n' ) {
+ while ((result = read(filedesc, &buffer[bytes_read], 1)) == 1) {
+ if (buffer[bytes_read] == '\n') {
buffer[bytes_read] = '\0';
bytes_read++;
break;
@@ -318,12 +318,11 @@ int readline( int filedesc, char *buffer )
}
/* Return the number of bytes read */
- if ( result == -1 ) {
+ if (result == -1)
return result;
- } else {
+ else
return bytes_read;
- }
-} // readline
+} /* readline */
/*============================================================================*/
/*******************************************************************************
@@ -346,7 +345,7 @@ int readline( int filedesc, char *buffer )
* N/A
*
******************************************************************************/
-void translate_option( char *buffer, struct wl_private *lp )
+void translate_option(char *buffer, struct wl_private *lp)
{
unsigned int value_convert = 0;
int string_length = 0;
@@ -355,18 +354,17 @@ void translate_option( char *buffer, struct wl_private *lp )
u_char mac_value[ETH_ALEN];
/*------------------------------------------------------------------------*/
- DBG_FUNC( "translate_option" );
+ DBG_FUNC("translate_option");
- if ( buffer == NULL || lp == NULL ) {
- DBG_ERROR( DbgInfo, "Config file buffer and/or wavelan buffer ptr NULL\n" );
+ if (buffer == NULL || lp == NULL) {
+ DBG_ERROR(DbgInfo, "Config file buffer and/or wavelan buffer ptr NULL\n");
return;
}
- ParseConfigLine( buffer, &key, &value );
+ ParseConfigLine(buffer, &key, &value);
- if ( key == NULL || value == NULL ) {
+ if (key == NULL || value == NULL)
return;
- }
/* Determine which key it is and perform the appropriate action */
@@ -375,367 +373,316 @@ void translate_option( char *buffer, struct wl_private *lp )
/* handle DebugFlag as early as possible so it starts its influence as early
* as possible
*/
- if ( strcmp( key, PARM_NAME_DEBUG_FLAG ) == 0 ) {
- if ( DebugFlag == ~0 ) { //if DebugFlag is not specified on the command line
- if ( DbgInfo->DebugFlag == 0 ) { /* if pc_debug did not set DebugFlag (i.e.pc_debug is
+ if (strcmp(key, PARM_NAME_DEBUG_FLAG) == 0) {
+ if (DebugFlag == ~0) { /* if DebugFlag is not specified on the command line */
+ if (DbgInfo->DebugFlag == 0) { /* if pc_debug did not set DebugFlag (i.e.pc_debug is
* not specified or specified outside the 4-8 range
*/
DbgInfo->DebugFlag |= DBG_DEFAULTS;
}
} else {
- DbgInfo->DebugFlag = simple_strtoul(value, NULL, 0); //;?DebugFlag;
+ DbgInfo->DebugFlag = simple_strtoul(value, NULL, 0); /* ;?DebugFlag; */
}
- DbgInfo->DebugFlag = simple_strtoul(value, NULL, 0); //;?Delete ASAP
+ DbgInfo->DebugFlag = simple_strtoul(value, NULL, 0); /* ;?Delete ASAP */
}
#endif /* DBG */
- if ( strcmp( key, PARM_NAME_AUTH_KEY_MGMT_SUITE ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_AUTH_KEY_MGMT_SUITE, value );
+ if (strcmp(key, PARM_NAME_AUTH_KEY_MGMT_SUITE) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_AUTH_KEY_MGMT_SUITE, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= PARM_MIN_AUTH_KEY_MGMT_SUITE ) || ( value_convert <= PARM_MAX_AUTH_KEY_MGMT_SUITE )) {
+ if ((value_convert >= PARM_MIN_AUTH_KEY_MGMT_SUITE) || (value_convert <= PARM_MAX_AUTH_KEY_MGMT_SUITE))
lp->AuthKeyMgmtSuite = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_AUTH_KEY_MGMT_SUITE );
- }
- }
- else if ( strcmp( key, PARM_NAME_BRSC_2GHZ ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_BRSC_2GHZ, value );
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_AUTH_KEY_MGMT_SUITE);
+ } else if (strcmp(key, PARM_NAME_BRSC_2GHZ) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_BRSC_2GHZ, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= PARM_MIN_BRSC ) || ( value_convert <= PARM_MAX_BRSC )) {
+ if ((value_convert >= PARM_MIN_BRSC) || (value_convert <= PARM_MAX_BRSC))
lp->brsc[0] = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invaid; will be ignored\n", PARM_NAME_BRSC_2GHZ );
- }
- }
- else if ( strcmp( key, PARM_NAME_BRSC_5GHZ ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_BRSC_5GHZ, value );
+ else
+ DBG_WARNING(DbgInfo, "%s invaid; will be ignored\n", PARM_NAME_BRSC_2GHZ);
+ } else if (strcmp(key, PARM_NAME_BRSC_5GHZ) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_BRSC_5GHZ, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= PARM_MIN_BRSC ) || ( value_convert <= PARM_MAX_BRSC )) {
+ if ((value_convert >= PARM_MIN_BRSC) || (value_convert <= PARM_MAX_BRSC))
lp->brsc[1] = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invaid; will be ignored\n", PARM_NAME_BRSC_5GHZ );
- }
- }
- else if (( strcmp( key, PARM_NAME_DESIRED_SSID ) == 0 ) || ( strcmp( key, PARM_NAME_OWN_SSID ) == 0 )) {
- DBG_TRACE( DbgInfo, "SSID, value: %s\n", value );
+ else
+ DBG_WARNING(DbgInfo, "%s invaid; will be ignored\n", PARM_NAME_BRSC_5GHZ);
+ } else if ((strcmp(key, PARM_NAME_DESIRED_SSID) == 0) || (strcmp(key, PARM_NAME_OWN_SSID) == 0)) {
+ DBG_TRACE(DbgInfo, "SSID, value: %s\n", value);
- memset( lp->NetworkName, 0, ( PARM_MAX_NAME_LEN + 1 ));
+ memset(lp->NetworkName, 0, (PARM_MAX_NAME_LEN + 1));
/* Make sure the value isn't too long */
- string_length = strlen( value );
- if ( string_length > PARM_MAX_NAME_LEN ) {
- DBG_WARNING( DbgInfo, "SSID too long; will be truncated\n" );
+ string_length = strlen(value);
+ if (string_length > PARM_MAX_NAME_LEN) {
+ DBG_WARNING(DbgInfo, "SSID too long; will be truncated\n");
string_length = PARM_MAX_NAME_LEN;
}
- memcpy( lp->NetworkName, value, string_length );
+ memcpy(lp->NetworkName, value, string_length);
}
#if 0
- else if ( strcmp( key, PARM_NAME_DOWNLOAD_FIRMWARE ) == 0 ) {
- DBG_TRACE( DbgInfo, "DOWNLOAD_FIRMWARE, value: %s\n", value );
- memset( lp->fw_image_filename, 0, ( MAX_LINE_SIZE + 1 ));
+ else if (strcmp(key, PARM_NAME_DOWNLOAD_FIRMWARE) == 0) {
+ DBG_TRACE(DbgInfo, "DOWNLOAD_FIRMWARE, value: %s\n", value);
+ memset(lp->fw_image_filename, 0, (MAX_LINE_SIZE + 1));
/* Make sure the value isn't too long */
- string_length = strlen( value );
- if ( string_length > MAX_LINE_SIZE ) {
- DBG_WARNING( DbgInfo, "F/W image file name too long; will be ignored\n" );
- } else {
- memcpy( lp->fw_image_filename, value, string_length );
- }
+ string_length = strlen(value);
+ if (string_length > MAX_LINE_SIZE)
+ DBG_WARNING(DbgInfo, "F/W image file name too long; will be ignored\n");
+ else
+ memcpy(lp->fw_image_filename, value, string_length);
}
#endif
- else if ( strcmp( key, PARM_NAME_ENABLE_ENCRYPTION ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_ENABLE_ENCRYPTION, value );
+ else if (strcmp(key, PARM_NAME_ENABLE_ENCRYPTION) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_ENABLE_ENCRYPTION, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= PARM_MIN_ENABLE_ENCRYPTION ) && ( value_convert <= PARM_MAX_ENABLE_ENCRYPTION )) {
+ if ((value_convert >= PARM_MIN_ENABLE_ENCRYPTION) && (value_convert <= PARM_MAX_ENABLE_ENCRYPTION))
lp->EnableEncryption = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_ENABLE_ENCRYPTION );
- }
- }
- else if ( strcmp( key, PARM_NAME_ENCRYPTION ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_ENCRYPTION, value );
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_ENABLE_ENCRYPTION);
+ } else if (strcmp(key, PARM_NAME_ENCRYPTION) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_ENCRYPTION, value);
- memset( lp->szEncryption, 0, sizeof( lp->szEncryption ));
+ memset(lp->szEncryption, 0, sizeof(lp->szEncryption));
/* Make sure the value isn't too long */
- string_length = strlen( value );
- if ( string_length > sizeof( lp->szEncryption ) ) {
- DBG_WARNING( DbgInfo, "%s too long; will be truncated\n", PARM_NAME_ENCRYPTION );
- string_length = sizeof( lp->szEncryption );
+ string_length = strlen(value);
+ if (string_length > sizeof(lp->szEncryption)) {
+ DBG_WARNING(DbgInfo, "%s too long; will be truncated\n", PARM_NAME_ENCRYPTION);
+ string_length = sizeof(lp->szEncryption);
}
- memcpy( lp->szEncryption, value, string_length );
- }
- else if ( strcmp( key, PARM_NAME_KEY1 ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_KEY1, value );
+ memcpy(lp->szEncryption, value, string_length);
+ } else if (strcmp(key, PARM_NAME_KEY1) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_KEY1, value);
- if ( is_valid_key_string( value )) {
- memset( lp->DefaultKeys.key[0].key, 0, MAX_KEY_SIZE );
+ if (is_valid_key_string(value)) {
+ memset(lp->DefaultKeys.key[0].key, 0, MAX_KEY_SIZE);
- key_string2key( value, &lp->DefaultKeys.key[0] );
+ key_string2key(value, &lp->DefaultKeys.key[0]);
} else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_KEY1 );
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_KEY1);
}
- }
- else if ( strcmp( key, PARM_NAME_KEY2 ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_KEY2, value );
+ } else if (strcmp(key, PARM_NAME_KEY2) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_KEY2, value);
- if ( is_valid_key_string( value )) {
- memset( lp->DefaultKeys.key[1].key, 0, MAX_KEY_SIZE );
+ if (is_valid_key_string(value)) {
+ memset(lp->DefaultKeys.key[1].key, 0, MAX_KEY_SIZE);
- key_string2key( value, &lp->DefaultKeys.key[1] );
+ key_string2key(value, &lp->DefaultKeys.key[1]);
} else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_KEY2 );
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_KEY2);
}
- }
- else if ( strcmp( key, PARM_NAME_KEY3 ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_KEY3, value );
+ } else if (strcmp(key, PARM_NAME_KEY3) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_KEY3, value);
- if ( is_valid_key_string( value )) {
- memset( lp->DefaultKeys.key[2].key, 0, MAX_KEY_SIZE );
+ if (is_valid_key_string(value)) {
+ memset(lp->DefaultKeys.key[2].key, 0, MAX_KEY_SIZE);
- key_string2key( value, &lp->DefaultKeys.key[2] );
+ key_string2key(value, &lp->DefaultKeys.key[2]);
} else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_KEY3 );
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_KEY3);
}
- }
- else if ( strcmp( key, PARM_NAME_KEY4 ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_KEY4, value );
+ } else if (strcmp(key, PARM_NAME_KEY4) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_KEY4, value);
- if ( is_valid_key_string( value )) {
- memset( lp->DefaultKeys.key[3].key, 0, MAX_KEY_SIZE );
+ if (is_valid_key_string(value)) {
+ memset(lp->DefaultKeys.key[3].key, 0, MAX_KEY_SIZE);
- key_string2key( value, &lp->DefaultKeys.key[3] );
+ key_string2key(value, &lp->DefaultKeys.key[3]);
} else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_KEY4 );
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_KEY4);
}
}
/* New Parameters for WARP */
- else if ( strcmp( key, PARM_NAME_LOAD_BALANCING ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_LOAD_BALANCING, value );
+ else if (strcmp(key, PARM_NAME_LOAD_BALANCING) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_LOAD_BALANCING, value);
lp->loadBalancing = parse_yes_no(value);
- }
- else if ( strcmp( key, PARM_NAME_MEDIUM_DISTRIBUTION ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_MEDIUM_DISTRIBUTION, value );
+ } else if (strcmp(key, PARM_NAME_MEDIUM_DISTRIBUTION) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_MEDIUM_DISTRIBUTION, value);
lp->mediumDistribution = parse_yes_no(value);
- }
- else if ( strcmp( key, PARM_NAME_MICROWAVE_ROBUSTNESS) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_MICROWAVE_ROBUSTNESS, value );
+ } else if (strcmp(key, PARM_NAME_MICROWAVE_ROBUSTNESS) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_MICROWAVE_ROBUSTNESS, value);
lp->MicrowaveRobustness = parse_yes_no(value);
- }
- else if ( strcmp( key, PARM_NAME_MULTICAST_RATE ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_MULTICAST_RATE, value );
+ } else if (strcmp(key, PARM_NAME_MULTICAST_RATE) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_MULTICAST_RATE, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= PARM_MIN_MULTICAST_RATE ) && ( value_convert <= PARM_MAX_MULTICAST_RATE )) {
+ if ((value_convert >= PARM_MIN_MULTICAST_RATE) && (value_convert <= PARM_MAX_MULTICAST_RATE))
lp->MulticastRate[0] = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_MULTICAST_RATE );
- }
- }
- else if ( strcmp( key, PARM_NAME_OWN_CHANNEL ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_OWN_CHANNEL, value );
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_MULTICAST_RATE);
+ } else if (strcmp(key, PARM_NAME_OWN_CHANNEL) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_OWN_CHANNEL, value);
value_convert = simple_strtoul(value, NULL, 0);
- if ( wl_is_a_valid_chan( value_convert )) {
- if ( value_convert > 14 ) {
+ if (wl_is_a_valid_chan(value_convert)) {
+ if (value_convert > 14)
value_convert = value_convert | 0x100;
- }
lp->Channel = value_convert;
} else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_OWN_CHANNEL );
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_OWN_CHANNEL);
}
- }
- else if ( strcmp( key, PARM_NAME_OWN_NAME ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_OWN_NAME, value );
+ } else if (strcmp(key, PARM_NAME_OWN_NAME) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_OWN_NAME, value);
- memset( lp->StationName, 0, ( PARM_MAX_NAME_LEN + 1 ));
+ memset(lp->StationName, 0, (PARM_MAX_NAME_LEN + 1));
/* Make sure the value isn't too long */
- string_length = strlen( value );
- if ( string_length > PARM_MAX_NAME_LEN ) {
- DBG_WARNING( DbgInfo, "%s too long; will be truncated\n", PARM_NAME_OWN_NAME );
+ string_length = strlen(value);
+ if (string_length > PARM_MAX_NAME_LEN) {
+ DBG_WARNING(DbgInfo, "%s too long; will be truncated\n", PARM_NAME_OWN_NAME);
string_length = PARM_MAX_NAME_LEN;
}
- memcpy( lp->StationName, value, string_length );
- }
- else if ( strcmp( key, PARM_NAME_RTS_THRESHOLD ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_RTS_THRESHOLD, value );
+ memcpy(lp->StationName, value, string_length);
+ } else if (strcmp(key, PARM_NAME_RTS_THRESHOLD) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_RTS_THRESHOLD, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= PARM_MIN_RTS_THRESHOLD ) && ( value_convert <= PARM_MAX_RTS_THRESHOLD )) {
+ if ((value_convert >= PARM_MIN_RTS_THRESHOLD) && (value_convert <= PARM_MAX_RTS_THRESHOLD))
lp->RTSThreshold = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_RTS_THRESHOLD );
- }
- }
- else if ( strcmp( key, PARM_NAME_SRSC_2GHZ ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_SRSC_2GHZ, value );
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_RTS_THRESHOLD);
+ } else if (strcmp(key, PARM_NAME_SRSC_2GHZ) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_SRSC_2GHZ, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= PARM_MIN_SRSC ) || ( value_convert <= PARM_MAX_SRSC )) {
+ if ((value_convert >= PARM_MIN_SRSC) || (value_convert <= PARM_MAX_SRSC))
lp->srsc[0] = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invaid; will be ignored\n", PARM_NAME_SRSC_2GHZ );
- }
- }
- else if ( strcmp( key, PARM_NAME_SRSC_5GHZ ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_SRSC_5GHZ, value );
+ else
+ DBG_WARNING(DbgInfo, "%s invaid; will be ignored\n", PARM_NAME_SRSC_2GHZ);
+ } else if (strcmp(key, PARM_NAME_SRSC_5GHZ) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_SRSC_5GHZ, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= PARM_MIN_SRSC ) || ( value_convert <= PARM_MAX_SRSC )) {
+ if ((value_convert >= PARM_MIN_SRSC) || (value_convert <= PARM_MAX_SRSC))
lp->srsc[1] = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invaid; will be ignored\n", PARM_NAME_SRSC_5GHZ );
- }
- }
- else if ( strcmp( key, PARM_NAME_SYSTEM_SCALE ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_SYSTEM_SCALE, value );
+ else
+ DBG_WARNING(DbgInfo, "%s invaid; will be ignored\n", PARM_NAME_SRSC_5GHZ);
+ } else if (strcmp(key, PARM_NAME_SYSTEM_SCALE) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_SYSTEM_SCALE, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= PARM_MIN_SYSTEM_SCALE ) && ( value_convert <= PARM_MAX_SYSTEM_SCALE )) {
+ if ((value_convert >= PARM_MIN_SYSTEM_SCALE) && (value_convert <= PARM_MAX_SYSTEM_SCALE))
lp->DistanceBetweenAPs = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_SYSTEM_SCALE );
- }
- }
- else if ( strcmp( key, PARM_NAME_TX_KEY ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_TX_KEY, value );
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_SYSTEM_SCALE);
+ } else if (strcmp(key, PARM_NAME_TX_KEY) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_TX_KEY, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= PARM_MIN_TX_KEY ) && ( value_convert <= PARM_MAX_TX_KEY )) {
+ if ((value_convert >= PARM_MIN_TX_KEY) && (value_convert <= PARM_MAX_TX_KEY))
lp->TransmitKeyID = simple_strtoul(value, NULL, 0);
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_TX_KEY );
- }
- }
- else if ( strcmp( key, PARM_NAME_TX_RATE ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_TX_RATE, value );
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_TX_KEY);
+ } else if (strcmp(key, PARM_NAME_TX_RATE) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_TX_RATE, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= PARM_MIN_TX_RATE ) && ( value_convert <= PARM_MAX_TX_RATE )) {
+ if ((value_convert >= PARM_MIN_TX_RATE) && (value_convert <= PARM_MAX_TX_RATE))
lp->TxRateControl[0] = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_TX_RATE );
- }
- }
- else if ( strcmp( key, PARM_NAME_TX_POW_LEVEL ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_TX_POW_LEVEL, value );
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_TX_RATE);
+ } else if (strcmp(key, PARM_NAME_TX_POW_LEVEL) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_TX_POW_LEVEL, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= PARM_MIN_TX_POW_LEVEL ) || ( value_convert <= PARM_MAX_TX_POW_LEVEL )) {
+ if ((value_convert >= PARM_MIN_TX_POW_LEVEL) || (value_convert <= PARM_MAX_TX_POW_LEVEL))
lp->txPowLevel = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_TX_POW_LEVEL );
- }
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_TX_POW_LEVEL);
}
/* Need to add? : Country code, Short/Long retry */
/* Configuration parameters specific to STA mode */
-#if 1 //;? (HCF_TYPE) & HCF_TYPE_STA
-//;?seems reasonable that even an AP-only driver could afford this small additional footprint
- if ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_STA ) {
- //;?should we return an error status in AP mode
- if ( strcmp( key, PARM_NAME_PORT_TYPE ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_PORT_TYPE, value );
+#if 1 /* ;? (HCF_TYPE) & HCF_TYPE_STA */
+/* ;?seems reasonable that even an AP-only driver could afford this small additional footprint */
+ if (CNV_INT_TO_LITTLE(lp->hcfCtx.IFB_FWIdentity.comp_id) == COMP_ID_FW_STA) {
+ /* ;?should we return an error status in AP mode */
+ if (strcmp(key, PARM_NAME_PORT_TYPE) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_PORT_TYPE, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert == PARM_MIN_PORT_TYPE ) || ( value_convert == PARM_MAX_PORT_TYPE )) {
+ if ((value_convert == PARM_MIN_PORT_TYPE) || (value_convert == PARM_MAX_PORT_TYPE))
lp->PortType = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_PORT_TYPE );
- }
- }
- else if ( strcmp( key, PARM_NAME_PM_ENABLED ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_PM_ENABLED, value );
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_PORT_TYPE);
+ } else if (strcmp(key, PARM_NAME_PM_ENABLED) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_PM_ENABLED, value);
value_convert = simple_strtoul(value, NULL, 0);
/* ;? how about wl_main.c containing
- * VALID_PARAM( PARM_PM_ENABLED <= WVLAN_PM_STATE_STANDARD ||
- * ( PARM_PM_ENABLED & 0x7FFF ) <= WVLAN_PM_STATE_STANDARD );
+ * VALID_PARAM(PARM_PM_ENABLED <= WVLAN_PM_STATE_STANDARD ||
+ * (PARM_PM_ENABLED & 0x7FFF) <= WVLAN_PM_STATE_STANDARD);
*/
- if ( ( value_convert & 0x7FFF ) <= PARM_MAX_PM_ENABLED) {
+ if ((value_convert & 0x7FFF) <= PARM_MAX_PM_ENABLED) {
lp->PMEnabled = value_convert;
} else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_PM_ENABLED );
- //;?this is a data entry error, hence not a DBG_WARNING
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_PM_ENABLED);
+ /* ;?this is a data entry error, hence not a DBG_WARNING */
}
- }
- else if ( strcmp( key, PARM_NAME_CREATE_IBSS ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_CREATE_IBSS, value );
+ } else if (strcmp(key, PARM_NAME_CREATE_IBSS) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_CREATE_IBSS, value);
lp->CreateIBSS = parse_yes_no(value);
- }
- else if ( strcmp( key, PARM_NAME_MULTICAST_RX ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_MULTICAST_RX, value );
+ } else if (strcmp(key, PARM_NAME_MULTICAST_RX) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_MULTICAST_RX, value);
lp->MulticastReceive = parse_yes_no(value);
- }
- else if ( strcmp( key, PARM_NAME_MAX_SLEEP ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_MAX_SLEEP, value );
+ } else if (strcmp(key, PARM_NAME_MAX_SLEEP) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_MAX_SLEEP, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= 0 ) && ( value_convert <= 65535 )) {
+ if ((value_convert >= 0) && (value_convert <= 65535))
lp->MaxSleepDuration = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_MAX_SLEEP );
- }
- }
- else if ( strcmp( key, PARM_NAME_NETWORK_ADDR ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_NETWORK_ADDR, value );
-
- if ( parse_mac_address( value, mac_value ) == ETH_ALEN ) {
- memcpy( lp->MACAddress, mac_value, ETH_ALEN );
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_NETWORK_ADDR );
- }
- }
- else if ( strcmp( key, PARM_NAME_AUTHENTICATION ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_AUTHENTICATION, value );
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_MAX_SLEEP);
+ } else if (strcmp(key, PARM_NAME_NETWORK_ADDR) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_NETWORK_ADDR, value);
+
+ if (parse_mac_address(value, mac_value) == ETH_ALEN)
+ memcpy(lp->MACAddress, mac_value, ETH_ALEN);
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_NETWORK_ADDR);
+ } else if (strcmp(key, PARM_NAME_AUTHENTICATION) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_AUTHENTICATION, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= PARM_MIN_AUTHENTICATION ) && ( value_convert <= PARM_MAX_AUTHENTICATION )) {
+ if ((value_convert >= PARM_MIN_AUTHENTICATION) && (value_convert <= PARM_MAX_AUTHENTICATION))
lp->authentication = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_AUTHENTICATION );
- }
- }
- else if ( strcmp( key, PARM_NAME_OWN_ATIM_WINDOW ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_OWN_ATIM_WINDOW, value );
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_AUTHENTICATION);
+ } else if (strcmp(key, PARM_NAME_OWN_ATIM_WINDOW) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_OWN_ATIM_WINDOW, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= PARM_MIN_OWN_ATIM_WINDOW ) && ( value_convert <= PARM_MAX_OWN_ATIM_WINDOW )) {
+ if ((value_convert >= PARM_MIN_OWN_ATIM_WINDOW) && (value_convert <= PARM_MAX_OWN_ATIM_WINDOW))
lp->atimWindow = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_OWN_ATIM_WINDOW );
- }
- }
- else if ( strcmp( key, PARM_NAME_PM_HOLDOVER_DURATION ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_PM_HOLDOVER_DURATION, value );
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_OWN_ATIM_WINDOW);
+ } else if (strcmp(key, PARM_NAME_PM_HOLDOVER_DURATION) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_PM_HOLDOVER_DURATION, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= PARM_MIN_PM_HOLDOVER_DURATION ) && ( value_convert <= PARM_MAX_PM_HOLDOVER_DURATION )) {
+ if ((value_convert >= PARM_MIN_PM_HOLDOVER_DURATION) && (value_convert <= PARM_MAX_PM_HOLDOVER_DURATION))
lp->holdoverDuration = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_PM_HOLDOVER_DURATION );
- }
- }
- else if ( strcmp( key, PARM_NAME_PROMISCUOUS_MODE ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_PROMISCUOUS_MODE, value );
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_PM_HOLDOVER_DURATION);
+ } else if (strcmp(key, PARM_NAME_PROMISCUOUS_MODE) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_PROMISCUOUS_MODE, value);
lp->promiscuousMode = parse_yes_no(value);
- }
- else if ( strcmp( key, PARM_NAME_CONNECTION_CONTROL ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_CONNECTION_CONTROL, value );
+ } else if (strcmp(key, PARM_NAME_CONNECTION_CONTROL) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_CONNECTION_CONTROL, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= PARM_MIN_CONNECTION_CONTROL ) && ( value_convert <= PARM_MAX_CONNECTION_CONTROL )) {
+ if ((value_convert >= PARM_MIN_CONNECTION_CONTROL) && (value_convert <= PARM_MAX_CONNECTION_CONTROL))
lp->connectionControl = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_CONNECTION_CONTROL );
- }
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_CONNECTION_CONTROL);
}
/* Need to add? : Probe Data Rate */
@@ -743,237 +690,193 @@ void translate_option( char *buffer, struct wl_private *lp )
#endif /* (HCF_TYPE) & HCF_TYPE_STA */
/* Configuration parameters specific to AP mode */
-#if 1 //;? (HCF_TYPE) & HCF_TYPE_AP
- //;?should we restore this to allow smaller memory footprint
- if ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_AP ) {
- if ( strcmp( key, PARM_NAME_OWN_DTIM_PERIOD ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_OWN_DTIM_PERIOD, value );
+#if 1 /* ;? (HCF_TYPE) & HCF_TYPE_AP */
+ /* ;?should we restore this to allow smaller memory footprint */
+ if (CNV_INT_TO_LITTLE(lp->hcfCtx.IFB_FWIdentity.comp_id) == COMP_ID_FW_AP) {
+ if (strcmp(key, PARM_NAME_OWN_DTIM_PERIOD) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_OWN_DTIM_PERIOD, value);
value_convert = simple_strtoul(value, NULL, 0);
- if ( value_convert >= PARM_MIN_OWN_DTIM_PERIOD ) {
+ if (value_convert >= PARM_MIN_OWN_DTIM_PERIOD)
lp->DTIMPeriod = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_OWN_DTIM_PERIOD );
- }
- }
- else if ( strcmp( key, PARM_NAME_REJECT_ANY ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_REJECT_ANY, value );
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_OWN_DTIM_PERIOD);
+ } else if (strcmp(key, PARM_NAME_REJECT_ANY) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_REJECT_ANY, value);
lp->RejectAny = parse_yes_no(value);
- }
- else if ( strcmp( key, PARM_NAME_EXCLUDE_UNENCRYPTED ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_EXCLUDE_UNENCRYPTED, value );
+ } else if (strcmp(key, PARM_NAME_EXCLUDE_UNENCRYPTED) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_EXCLUDE_UNENCRYPTED, value);
lp->ExcludeUnencrypted = parse_yes_no(value);
- }
- else if ( strcmp( key, PARM_NAME_MULTICAST_PM_BUFFERING ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_MULTICAST_PM_BUFFERING, value );
+ } else if (strcmp(key, PARM_NAME_MULTICAST_PM_BUFFERING) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_MULTICAST_PM_BUFFERING, value);
lp->ExcludeUnencrypted = parse_yes_no(value);
- }
- else if ( strcmp( key, PARM_NAME_INTRA_BSS_RELAY ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_INTRA_BSS_RELAY, value );
+ } else if (strcmp(key, PARM_NAME_INTRA_BSS_RELAY) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_INTRA_BSS_RELAY, value);
lp->ExcludeUnencrypted = parse_yes_no(value);
- }
- else if ( strcmp( key, PARM_NAME_OWN_BEACON_INTERVAL ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_OWN_BEACON_INTERVAL, value );
+ } else if (strcmp(key, PARM_NAME_OWN_BEACON_INTERVAL) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_OWN_BEACON_INTERVAL, value);
value_convert = simple_strtoul(value, NULL, 0);
- if ( value_convert >= PARM_MIN_OWN_BEACON_INTERVAL ) {
+ if (value_convert >= PARM_MIN_OWN_BEACON_INTERVAL)
lp->ownBeaconInterval = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_OWN_BEACON_INTERVAL );
- }
- }
- else if ( strcmp( key, PARM_NAME_COEXISTENCE ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_COEXISTENCE, value );
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_OWN_BEACON_INTERVAL);
+ } else if (strcmp(key, PARM_NAME_COEXISTENCE) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_COEXISTENCE, value);
value_convert = simple_strtoul(value, NULL, 0);
- if ( value_convert >= PARM_MIN_COEXISTENCE ) {
+ if (value_convert >= PARM_MIN_COEXISTENCE)
lp->coexistence = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_COEXISTENCE );
- }
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_COEXISTENCE);
}
#ifdef USE_WDS
- else if ( strcmp( key, PARM_NAME_RTS_THRESHOLD1 ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_RTS_THRESHOLD1, value );
+ else if (strcmp(key, PARM_NAME_RTS_THRESHOLD1) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_RTS_THRESHOLD1, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= PARM_MIN_RTS_THRESHOLD ) && ( value_convert <= PARM_MAX_RTS_THRESHOLD )) {
+ if ((value_convert >= PARM_MIN_RTS_THRESHOLD) && (value_convert <= PARM_MAX_RTS_THRESHOLD))
lp->wds_port[0].rtsThreshold = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_RTS_THRESHOLD1 );
- }
- }
- else if ( strcmp( key, PARM_NAME_RTS_THRESHOLD2 ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_RTS_THRESHOLD2, value );
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_RTS_THRESHOLD1);
+ } else if (strcmp(key, PARM_NAME_RTS_THRESHOLD2) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_RTS_THRESHOLD2, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= PARM_MIN_RTS_THRESHOLD ) && ( value_convert <= PARM_MAX_RTS_THRESHOLD )) {
+ if ((value_convert >= PARM_MIN_RTS_THRESHOLD) && (value_convert <= PARM_MAX_RTS_THRESHOLD))
lp->wds_port[1].rtsThreshold = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_RTS_THRESHOLD2 );
- }
- }
- else if ( strcmp( key, PARM_NAME_RTS_THRESHOLD3 ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_RTS_THRESHOLD3, value );
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_RTS_THRESHOLD2);
+ } else if (strcmp(key, PARM_NAME_RTS_THRESHOLD3) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_RTS_THRESHOLD3, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= PARM_MIN_RTS_THRESHOLD ) && ( value_convert <= PARM_MAX_RTS_THRESHOLD )) {
+ if ((value_convert >= PARM_MIN_RTS_THRESHOLD) && (value_convert <= PARM_MAX_RTS_THRESHOLD))
lp->wds_port[2].rtsThreshold = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_RTS_THRESHOLD3 );
- }
- }
- else if ( strcmp( key, PARM_NAME_RTS_THRESHOLD4 ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_RTS_THRESHOLD4, value );
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_RTS_THRESHOLD3);
+ } else if (strcmp(key, PARM_NAME_RTS_THRESHOLD4) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_RTS_THRESHOLD4, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= PARM_MIN_RTS_THRESHOLD ) && ( value_convert <= PARM_MAX_RTS_THRESHOLD )) {
+ if ((value_convert >= PARM_MIN_RTS_THRESHOLD) && (value_convert <= PARM_MAX_RTS_THRESHOLD))
lp->wds_port[3].rtsThreshold = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_RTS_THRESHOLD4 );
- }
- }
- else if ( strcmp( key, PARM_NAME_RTS_THRESHOLD5 ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_RTS_THRESHOLD5, value );
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_RTS_THRESHOLD4);
+ } else if (strcmp(key, PARM_NAME_RTS_THRESHOLD5) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_RTS_THRESHOLD5, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= PARM_MIN_RTS_THRESHOLD ) && ( value_convert <= PARM_MAX_RTS_THRESHOLD )) {
+ if ((value_convert >= PARM_MIN_RTS_THRESHOLD) && (value_convert <= PARM_MAX_RTS_THRESHOLD))
lp->wds_port[4].rtsThreshold = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_RTS_THRESHOLD5 );
- }
- }
- else if ( strcmp( key, PARM_NAME_RTS_THRESHOLD6 ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_RTS_THRESHOLD6, value );
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_RTS_THRESHOLD5);
+ } else if (strcmp(key, PARM_NAME_RTS_THRESHOLD6) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_RTS_THRESHOLD6, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= PARM_MIN_RTS_THRESHOLD ) && ( value_convert <= PARM_MAX_RTS_THRESHOLD )) {
+ if ((value_convert >= PARM_MIN_RTS_THRESHOLD) && (value_convert <= PARM_MAX_RTS_THRESHOLD))
lp->wds_port[5].rtsThreshold = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_RTS_THRESHOLD6 );
- }
- }
- else if ( strcmp( key, PARM_NAME_TX_RATE1 ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_TX_RATE1, value );
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_RTS_THRESHOLD6);
+ } else if (strcmp(key, PARM_NAME_TX_RATE1) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_TX_RATE1, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= PARM_MIN_TX_RATE ) && ( value_convert <= PARM_MAX_TX_RATE )) {
+ if ((value_convert >= PARM_MIN_TX_RATE) && (value_convert <= PARM_MAX_TX_RATE))
lp->wds_port[0].txRateCntl = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_TX_RATE1 );
- }
- }
- else if ( strcmp( key, PARM_NAME_TX_RATE2 ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_TX_RATE2, value );
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_TX_RATE1);
+ } else if (strcmp(key, PARM_NAME_TX_RATE2) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_TX_RATE2, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= PARM_MIN_TX_RATE ) && ( value_convert <= PARM_MAX_TX_RATE )) {
+ if ((value_convert >= PARM_MIN_TX_RATE) && (value_convert <= PARM_MAX_TX_RATE))
lp->wds_port[1].txRateCntl = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_TX_RATE2 );
- }
- }
- else if ( strcmp( key, PARM_NAME_TX_RATE3 ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_TX_RATE3, value );
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_TX_RATE2);
+ } else if (strcmp(key, PARM_NAME_TX_RATE3) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_TX_RATE3, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= PARM_MIN_TX_RATE ) && ( value_convert <= PARM_MAX_TX_RATE )) {
+ if ((value_convert >= PARM_MIN_TX_RATE) && (value_convert <= PARM_MAX_TX_RATE))
lp->wds_port[2].txRateCntl = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_TX_RATE3 );
- }
- }
- else if ( strcmp( key, PARM_NAME_TX_RATE4 ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_TX_RATE4, value );
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_TX_RATE3);
+ } else if (strcmp(key, PARM_NAME_TX_RATE4) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_TX_RATE4, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= PARM_MIN_TX_RATE ) && ( value_convert <= PARM_MAX_TX_RATE )) {
+ if ((value_convert >= PARM_MIN_TX_RATE) && (value_convert <= PARM_MAX_TX_RATE))
lp->wds_port[3].txRateCntl = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_TX_RATE4 );
- }
- }
- else if ( strcmp( key, PARM_NAME_TX_RATE5 ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_TX_RATE5, value );
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_TX_RATE4);
+ } else if (strcmp(key, PARM_NAME_TX_RATE5) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_TX_RATE5, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= PARM_MIN_TX_RATE ) && ( value_convert <= PARM_MAX_TX_RATE )) {
+ if ((value_convert >= PARM_MIN_TX_RATE) && (value_convert <= PARM_MAX_TX_RATE))
lp->wds_port[4].txRateCntl = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_TX_RATE5 );
- }
- }
- else if ( strcmp( key, PARM_NAME_TX_RATE6 ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_TX_RATE6, value );
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_TX_RATE5);
+ } else if (strcmp(key, PARM_NAME_TX_RATE6) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_TX_RATE6, value);
value_convert = simple_strtoul(value, NULL, 0);
- if (( value_convert >= PARM_MIN_TX_RATE ) && ( value_convert <= PARM_MAX_TX_RATE )) {
+ if ((value_convert >= PARM_MIN_TX_RATE) && (value_convert <= PARM_MAX_TX_RATE))
lp->wds_port[5].txRateCntl = value_convert;
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_TX_RATE6 );
- }
- }
- else if ( strcmp( key, PARM_NAME_WDS_ADDRESS1 ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_WDS_ADDRESS1, value );
-
- if ( parse_mac_address( value, mac_value ) == ETH_ALEN ) {
- memcpy( lp->wds_port[0].wdsAddress, mac_value, ETH_ALEN );
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_WDS_ADDRESS1 );
- }
- }
- else if ( strcmp( key, PARM_NAME_WDS_ADDRESS2 ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_WDS_ADDRESS2, value );
-
- if ( parse_mac_address( value, mac_value ) == ETH_ALEN ) {
- memcpy( lp->wds_port[1].wdsAddress, mac_value, ETH_ALEN );
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_WDS_ADDRESS2 );
- }
- }
- else if ( strcmp( key, PARM_NAME_WDS_ADDRESS3 ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_WDS_ADDRESS3, value );
-
- if ( parse_mac_address( value, mac_value ) == ETH_ALEN ) {
- memcpy( lp->wds_port[2].wdsAddress, mac_value, ETH_ALEN );
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_WDS_ADDRESS3 );
- }
- }
- else if ( strcmp( key, PARM_NAME_WDS_ADDRESS4 ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_WDS_ADDRESS4, value );
-
- if ( parse_mac_address( value, mac_value ) == ETH_ALEN ) {
- memcpy( lp->wds_port[3].wdsAddress, mac_value, ETH_ALEN );
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_WDS_ADDRESS4 );
- }
- }
- else if ( strcmp( key, PARM_NAME_WDS_ADDRESS5 ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_WDS_ADDRESS5, value );
-
- if ( parse_mac_address( value, mac_value ) == ETH_ALEN ) {
- memcpy( lp->wds_port[4].wdsAddress, mac_value, ETH_ALEN );
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_WDS_ADDRESS5 );
- }
- }
- else if ( strcmp( key, PARM_NAME_WDS_ADDRESS6 ) == 0 ) {
- DBG_TRACE( DbgInfo, "%s, value: %s\n", PARM_NAME_WDS_ADDRESS6, value );
-
- if ( parse_mac_address( value, mac_value ) == ETH_ALEN ) {
- memcpy( lp->wds_port[5].wdsAddress, mac_value, ETH_ALEN );
- } else {
- DBG_WARNING( DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_WDS_ADDRESS6 );
- }
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_TX_RATE6);
+ } else if (strcmp(key, PARM_NAME_WDS_ADDRESS1) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_WDS_ADDRESS1, value);
+
+ if (parse_mac_address(value, mac_value) == ETH_ALEN)
+ memcpy(lp->wds_port[0].wdsAddress, mac_value, ETH_ALEN);
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_WDS_ADDRESS1);
+ } else if (strcmp(key, PARM_NAME_WDS_ADDRESS2) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_WDS_ADDRESS2, value);
+
+ if (parse_mac_address(value, mac_value) == ETH_ALEN)
+ memcpy(lp->wds_port[1].wdsAddress, mac_value, ETH_ALEN);
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_WDS_ADDRESS2);
+ } else if (strcmp(key, PARM_NAME_WDS_ADDRESS3) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_WDS_ADDRESS3, value);
+
+ if (parse_mac_address(value, mac_value) == ETH_ALEN)
+ memcpy(lp->wds_port[2].wdsAddress, mac_value, ETH_ALEN);
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_WDS_ADDRESS3);
+ } else if (strcmp(key, PARM_NAME_WDS_ADDRESS4) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_WDS_ADDRESS4, value);
+
+ if (parse_mac_address(value, mac_value) == ETH_ALEN)
+ memcpy(lp->wds_port[3].wdsAddress, mac_value, ETH_ALEN);
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_WDS_ADDRESS4);
+ } else if (strcmp(key, PARM_NAME_WDS_ADDRESS5) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_WDS_ADDRESS5, value);
+
+ if (parse_mac_address(value, mac_value) == ETH_ALEN)
+ memcpy(lp->wds_port[4].wdsAddress, mac_value, ETH_ALEN);
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_WDS_ADDRESS5);
+ } else if (strcmp(key, PARM_NAME_WDS_ADDRESS6) == 0) {
+ DBG_TRACE(DbgInfo, "%s, value: %s\n", PARM_NAME_WDS_ADDRESS6, value);
+
+ if (parse_mac_address(value, mac_value) == ETH_ALEN)
+ memcpy(lp->wds_port[5].wdsAddress, mac_value, ETH_ALEN);
+ else
+ DBG_WARNING(DbgInfo, "%s invalid; will be ignored\n", PARM_NAME_WDS_ADDRESS6);
}
#endif /* USE_WDS */
}
#endif /* (HCF_TYPE) & HCF_TYPE_AP */
return;
-} // translate_option
+} /* translate_option */
/*============================================================================*/
/*******************************************************************************
@@ -996,7 +899,7 @@ void translate_option( char *buffer, struct wl_private *lp )
* The number of bytes in the final MAC address, should equal to ETH_ALEN.
*
******************************************************************************/
-int parse_mac_address( char *value, u_char *byte_array )
+int parse_mac_address(char *value, u_char *byte_array)
{
int value_offset = 0;
int array_offset = 0;
@@ -1004,11 +907,11 @@ int parse_mac_address( char *value, u_char *byte_array )
char byte_field[3];
/*------------------------------------------------------------------------*/
- memset( byte_field, '\0', 3 );
+ memset(byte_field, '\0', 3);
- while( value[value_offset] != '\0' ) {
+ while (value[value_offset] != '\0') {
/* Skip over the colon chars seperating the bytes, if they exist */
- if ( value[value_offset] == ':' ) {
+ if (value[value_offset] == ':') {
value_offset++;
continue;
}
@@ -1018,9 +921,9 @@ int parse_mac_address( char *value, u_char *byte_array )
value_offset++;
/* Once the byte_field is filled, convert it and store it */
- if ( field_offset == 2 ) {
+ if (field_offset == 2) {
byte_field[field_offset] = '\0';
- byte_array[array_offset] = simple_strtoul( byte_field, NULL, 16 );
+ byte_array[array_offset] = simple_strtoul(byte_field, NULL, 16);
field_offset = 0;
array_offset++;
}
@@ -1029,7 +932,7 @@ int parse_mac_address( char *value, u_char *byte_array )
/* Use the array_offset as a check; 6 bytes should be written to the
byte_array */
return array_offset;
-} // parse_mac_address
+} /* parse_mac_address */
/*============================================================================*/
/*******************************************************************************
@@ -1052,42 +955,42 @@ int parse_mac_address( char *value, u_char *byte_array )
* N/A
*
******************************************************************************/
-void ParseConfigLine( char *pszLine, char **ppszLVal, char **ppszRVal )
+void ParseConfigLine(char *pszLine, char **ppszLVal, char **ppszRVal)
{
int i;
int size;
/*------------------------------------------------------------------------*/
- DBG_FUNC( "ParseConfigLine" );
- DBG_ENTER( DbgInfo );
+ DBG_FUNC("ParseConfigLine");
+ DBG_ENTER(DbgInfo);
/* get a snapshot of our string size */
- size = strlen( pszLine );
+ size = strlen(pszLine);
*ppszLVal = NULL;
*ppszRVal = NULL;
- if ( pszLine[0] != '#' && /* skip the line if it is a comment */
- pszLine[0] != '\n'&& /* if it's an empty UNIX line, do nothing */
- !( pszLine[0] == '\r' && pszLine[1] == '\n' ) /* if it's an empty MS-DOS line, do nothing */
- ) {
+ if (pszLine[0] != '#' && /* skip the line if it is a comment */
+ pszLine[0] != '\n' && /* if it's an empty UNIX line, do nothing */
+ !(pszLine[0] == '\r' && pszLine[1] == '\n') /* if it's an empty MS-DOS line, do nothing */
+ ) {
/* advance past any whitespace, and assign the L-value */
- for( i = 0; i < size; i++ ) {
- if ( pszLine[i] != ' ' ) {
+ for (i = 0; i < size; i++) {
+ if (pszLine[i] != ' ') {
*ppszLVal = &pszLine[i];
break;
}
}
/* advance to the end of the l-value*/
- for( i++; i < size; i++ ) {
- if ( pszLine[i] == ' ' || pszLine[i] == '=' ) {
+ for (i++; i < size; i++) {
+ if (pszLine[i] == ' ' || pszLine[i] == '=') {
pszLine[i] = '\0';
break;
}
}
/* make any whitespace and the equal sign a NULL character, and
advance to the R-Value */
- for( i++; i < size; i++ ) {
- if ( pszLine[i] == ' ' || pszLine[i] == '=' ) {
+ for (i++; i < size; i++) {
+ if (pszLine[i] == ' ' || pszLine[i] == '=') {
pszLine[i] = '\0';
continue;
}
@@ -1095,17 +998,15 @@ void ParseConfigLine( char *pszLine, char **ppszLVal, char **ppszRVal )
break;
}
/* make the line ending character(s) a NULL */
- for( i++; i < size; i++ ) {
- if ( pszLine[i] == '\n' ) {
+ for (i++; i < size; i++) {
+ if (pszLine[i] == '\n')
pszLine[i] = '\0';
- }
- if (( pszLine[i] == '\r' ) && ( pszLine[i+1] == '\n' )) {
+ if ((pszLine[i] == '\r') && (pszLine[i+1] == '\n'))
pszLine[i] = '\0';
- }
}
}
- DBG_LEAVE( DbgInfo );
-} // ParseConfigLine
+ DBG_LEAVE(DbgInfo);
+} /* ParseConfigLine */
/*============================================================================*/
-#endif // USE_PROFILE
+#endif /* USE_PROFILE */
diff --git a/drivers/staging/wlags49_h2/wl_sysfs.c b/drivers/staging/wlags49_h2/wl_sysfs.c
index 864e01a736c..e4c8804ac37 100644
--- a/drivers/staging/wlags49_h2/wl_sysfs.c
+++ b/drivers/staging/wlags49_h2/wl_sysfs.c
@@ -46,7 +46,8 @@ static ssize_t show_tallies(struct device *d, struct device_attribute *attr,
if (dev_isalive(dev)) {
wl_lock(lp, &flags);
- if ((ret = wl_get_tallies(lp, &tallies)) == 0) {
+ ret = wl_get_tallies(lp, &tallies);
+ if (ret == 0) {
wl_unlock(lp, &flags);
ret = snprintf(buf, PAGE_SIZE,
"TxUnicastFrames: %u\n"
diff --git a/drivers/staging/wlags49_h2/wl_wext.c b/drivers/staging/wlags49_h2/wl_wext.c
index 4434e006548..06467f1bf90 100644
--- a/drivers/staging/wlags49_h2/wl_wext.c
+++ b/drivers/staging/wlags49_h2/wl_wext.c
@@ -82,17 +82,10 @@
in the build. */
#ifdef WIRELESS_EXT
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
-#define IWE_STREAM_ADD_EVENT(info, buf, end, iwe, len) \
- iwe_stream_add_event(buf, end, iwe, len)
-#define IWE_STREAM_ADD_POINT(info, buf, end, iwe, msg) \
- iwe_stream_add_point(buf, end, iwe, msg)
-#else
#define IWE_STREAM_ADD_EVENT(info, buf, end, iwe, len) \
iwe_stream_add_event(info, buf, end, iwe, len)
#define IWE_STREAM_ADD_POINT(info, buf, end, iwe, msg) \
iwe_stream_add_point(info, buf, end, iwe, msg)
-#endif
@@ -3940,7 +3933,7 @@ void wl_wext_event_mic_failed( struct net_device *dev )
MLME-MICHAELMICFAILURE.indication(keyid=# broadcast/unicast addr=addr2)
*/
- /* NOTE: Format of MAC address (using colons to seperate bytes) may cause
+ /* NOTE: Format of MAC address (using colons to separate bytes) may cause
a problem in future versions of the supplicant, if they ever
actually parse these parameters */
#if DBG
diff --git a/drivers/staging/wlags49_h25/Kconfig b/drivers/staging/wlags49_h25/Kconfig
index 304a8c96ce3..dcc170929c1 100644
--- a/drivers/staging/wlags49_h25/Kconfig
+++ b/drivers/staging/wlags49_h25/Kconfig
@@ -1,6 +1,6 @@
config WLAGS49_H25
tristate "Linksys HERMES II.5 WCF54G_Wireless-G_CompactFlash_Card"
- depends on WLAN_80211 && WIRELESS_EXT && PCMCIA
+ depends on WLAN && WIRELESS_EXT && PCMCIA
select WEXT_SPY
---help---
Driver for wireless cards using Agere's HERMES II.5 chipset
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 5df56f0238d..a41db5dc8c7 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -62,9 +62,9 @@
*
* hfa384x_drvr_xxxconfig An example of the drvr level abstraction. These
* functions are wrappers for the RID get/set
-* sequence. They call copy_[to|from]_bap() and
-* cmd_access(). These functions operate on the
-* RIDs and buffers without validation. The caller
+* sequence. They call copy_[to|from]_bap() and
+* cmd_access(). These functions operate on the
+* RIDs and buffers without validation. The caller
* is responsible for that.
*
* API wrapper functions:
@@ -144,7 +144,6 @@ enum cmd_mode {
DOWAIT = 0,
DOASYNC
};
-typedef enum cmd_mode CMD_MODE;
#define THROTTLE_JIFFIES (HZ/8)
#define URB_ASYNC_UNLINK 0
@@ -206,12 +205,11 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
struct usbctlx_completor {
int (*complete) (struct usbctlx_completor *);
};
-typedef struct usbctlx_completor usbctlx_completor_t;
static int
hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
hfa384x_usbctlx_t *ctlx,
- usbctlx_completor_t *completor);
+ struct usbctlx_completor *completor);
static int
unlocked_usbctlx_cancel_async(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
@@ -232,13 +230,13 @@ usbctlx_get_rridresult(const hfa384x_usb_rridresp_t *rridresp,
/* Low level req/resp CTLX formatters and submitters */
static int
hfa384x_docmd(hfa384x_t *hw,
- CMD_MODE mode,
+ enum cmd_mode mode,
hfa384x_metacmd_t *cmd,
ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data);
static int
hfa384x_dorrid(hfa384x_t *hw,
- CMD_MODE mode,
+ enum cmd_mode mode,
u16 rid,
void *riddata,
unsigned int riddatalen,
@@ -246,7 +244,7 @@ hfa384x_dorrid(hfa384x_t *hw,
static int
hfa384x_dowrid(hfa384x_t *hw,
- CMD_MODE mode,
+ enum cmd_mode mode,
u16 rid,
void *riddata,
unsigned int riddatalen,
@@ -254,7 +252,7 @@ hfa384x_dowrid(hfa384x_t *hw,
static int
hfa384x_dormem(hfa384x_t *hw,
- CMD_MODE mode,
+ enum cmd_mode mode,
u16 page,
u16 offset,
void *data,
@@ -263,7 +261,7 @@ hfa384x_dormem(hfa384x_t *hw,
static int
hfa384x_dowmem(hfa384x_t *hw,
- CMD_MODE mode,
+ enum cmd_mode mode,
u16 page,
u16 offset,
void *data,
@@ -351,7 +349,8 @@ static int submit_rx_urb(hfa384x_t *hw, gfp_t memflags)
hw->rx_urb_skb = skb;
result = -ENOLINK;
- if (!hw->wlandev->hwremoved && !test_bit(WORK_RX_HALT, &hw->usb_flags)) {
+ if (!hw->wlandev->hwremoved &&
+ !test_bit(WORK_RX_HALT, &hw->usb_flags)) {
result = SUBMIT_URB(&hw->rx_urb, memflags);
/* Check whether we need to reset the RX pipe */
@@ -451,7 +450,7 @@ static void hfa384x_usb_defer(struct work_struct *data)
if (test_bit(WORK_RX_HALT, &hw->usb_flags)) {
int ret;
- usb_kill_urb(&hw->rx_urb); /* Cannot be holding spinlock! */
+ usb_kill_urb(&hw->rx_urb); /* Cannot be holding spinlock! */
ret = usb_clear_halt(hw->usb, hw->endp_in);
if (ret != 0) {
@@ -668,26 +667,26 @@ usbctlx_get_rridresult(const hfa384x_usb_rridresp_t *rridresp,
* when processing a CTLX that returns a hfa384x_cmdresult_t structure.
----------------------------------------------------------------*/
struct usbctlx_cmd_completor {
- usbctlx_completor_t head;
+ struct usbctlx_completor head;
const hfa384x_usb_cmdresp_t *cmdresp;
hfa384x_cmdresult_t *result;
};
-typedef struct usbctlx_cmd_completor usbctlx_cmd_completor_t;
-static int usbctlx_cmd_completor_fn(usbctlx_completor_t *head)
+static inline int usbctlx_cmd_completor_fn(struct usbctlx_completor *head)
{
- usbctlx_cmd_completor_t *complete = (usbctlx_cmd_completor_t *) head;
+ struct usbctlx_cmd_completor *complete;
+
+ complete = (struct usbctlx_cmd_completor *) head;
return usbctlx_get_status(complete->cmdresp, complete->result);
}
-static inline usbctlx_completor_t *init_cmd_completor(usbctlx_cmd_completor_t *
- completor,
- const
- hfa384x_usb_cmdresp_t *
- cmdresp,
- hfa384x_cmdresult_t *
- result)
+static inline struct usbctlx_completor *init_cmd_completor(
+ struct usbctlx_cmd_completor
+ *completor,
+ const hfa384x_usb_cmdresp_t
+ *cmdresp,
+ hfa384x_cmdresult_t *result)
{
completor->head.complete = usbctlx_cmd_completor_fn;
completor->cmdresp = cmdresp;
@@ -701,19 +700,19 @@ static inline usbctlx_completor_t *init_cmd_completor(usbctlx_cmd_completor_t *
* when processing a CTLX that reads a RID.
----------------------------------------------------------------*/
struct usbctlx_rrid_completor {
- usbctlx_completor_t head;
+ struct usbctlx_completor head;
const hfa384x_usb_rridresp_t *rridresp;
void *riddata;
unsigned int riddatalen;
};
-typedef struct usbctlx_rrid_completor usbctlx_rrid_completor_t;
-static int usbctlx_rrid_completor_fn(usbctlx_completor_t *head)
+static int usbctlx_rrid_completor_fn(struct usbctlx_completor *head)
{
- usbctlx_rrid_completor_t *complete = (usbctlx_rrid_completor_t *) head;
+ struct usbctlx_rrid_completor *complete;
hfa384x_rridresult_t rridresult;
+ complete = (struct usbctlx_rrid_completor *) head;
usbctlx_get_rridresult(complete->rridresp, &rridresult);
/* Validate the length, note body len calculation in bytes */
@@ -729,12 +728,13 @@ static int usbctlx_rrid_completor_fn(usbctlx_completor_t *head)
return 0;
}
-static inline usbctlx_completor_t *init_rrid_completor(usbctlx_rrid_completor_t
- *completor,
- const
- hfa384x_usb_rridresp_t *
- rridresp, void *riddata,
- unsigned int riddatalen)
+static inline struct usbctlx_completor *init_rrid_completor(
+ struct usbctlx_rrid_completor
+ *completor,
+ const hfa384x_usb_rridresp_t
+ *rridresp,
+ void *riddata,
+ unsigned int riddatalen)
{
completor->head.complete = usbctlx_rrid_completor_fn;
completor->rridresp = rridresp;
@@ -747,14 +747,14 @@ static inline usbctlx_completor_t *init_rrid_completor(usbctlx_rrid_completor_t
* Completor object:
* Interprets the results of a synchronous RID-write
----------------------------------------------------------------*/
-typedef usbctlx_cmd_completor_t usbctlx_wrid_completor_t;
+typedef struct usbctlx_cmd_completor usbctlx_wrid_completor_t;
#define init_wrid_completor init_cmd_completor
/*----------------------------------------------------------------
* Completor object:
* Interprets the results of a synchronous memory-write
----------------------------------------------------------------*/
-typedef usbctlx_cmd_completor_t usbctlx_wmem_completor_t;
+typedef struct usbctlx_cmd_completor usbctlx_wmem_completor_t;
#define init_wmem_completor init_cmd_completor
/*----------------------------------------------------------------
@@ -762,7 +762,7 @@ typedef usbctlx_cmd_completor_t usbctlx_wmem_completor_t;
* Interprets the results of a synchronous memory-read
----------------------------------------------------------------*/
struct usbctlx_rmem_completor {
- usbctlx_completor_t head;
+ struct usbctlx_completor head;
const hfa384x_usb_rmemresp_t *rmemresp;
void *data;
@@ -770,7 +770,7 @@ struct usbctlx_rmem_completor {
};
typedef struct usbctlx_rmem_completor usbctlx_rmem_completor_t;
-static int usbctlx_rmem_completor_fn(usbctlx_completor_t *head)
+static int usbctlx_rmem_completor_fn(struct usbctlx_completor *head)
{
usbctlx_rmem_completor_t *complete = (usbctlx_rmem_completor_t *) head;
@@ -779,11 +779,13 @@ static int usbctlx_rmem_completor_fn(usbctlx_completor_t *head)
return 0;
}
-static inline usbctlx_completor_t *init_rmem_completor(usbctlx_rmem_completor_t
- *completor,
- hfa384x_usb_rmemresp_t
- *rmemresp, void *data,
- unsigned int len)
+static inline struct usbctlx_completor *init_rmem_completor(
+ usbctlx_rmem_completor_t
+ *completor,
+ hfa384x_usb_rmemresp_t
+ *rmemresp,
+ void *data,
+ unsigned int len)
{
completor->head.complete = usbctlx_rmem_completor_fn;
completor->rmemresp = rmemresp;
@@ -1226,7 +1228,7 @@ int hfa384x_corereset(hfa384x_t *hw, int holdtime, int settletime, int genesis)
*
* Arguments:
* hw device structure
-* ctlx CTLX ptr
+* ctlx CTLX ptr
* completor functor object to decide what to
* do with the CTLX's result.
*
@@ -1244,7 +1246,7 @@ int hfa384x_corereset(hfa384x_t *hw, int holdtime, int settletime, int genesis)
----------------------------------------------------------------*/
static int hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
hfa384x_usbctlx_t *ctlx,
- usbctlx_completor_t *completor)
+ struct usbctlx_completor *completor)
{
unsigned long flags;
int result;
@@ -1359,7 +1361,7 @@ cleanup:
----------------------------------------------------------------*/
static int
hfa384x_docmd(hfa384x_t *hw,
- CMD_MODE mode,
+ enum cmd_mode mode,
hfa384x_metacmd_t *cmd,
ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data)
{
@@ -1394,7 +1396,7 @@ hfa384x_docmd(hfa384x_t *hw,
if (result != 0) {
kfree(ctlx);
} else if (mode == DOWAIT) {
- usbctlx_cmd_completor_t completor;
+ struct usbctlx_cmd_completor completor;
result =
hfa384x_usbctlx_complete_sync(hw, ctlx,
@@ -1448,7 +1450,7 @@ done:
----------------------------------------------------------------*/
static int
hfa384x_dorrid(hfa384x_t *hw,
- CMD_MODE mode,
+ enum cmd_mode mode,
u16 rid,
void *riddata,
unsigned int riddatalen,
@@ -1481,7 +1483,7 @@ hfa384x_dorrid(hfa384x_t *hw,
if (result != 0) {
kfree(ctlx);
} else if (mode == DOWAIT) {
- usbctlx_rrid_completor_t completor;
+ struct usbctlx_rrid_completor completor;
result =
hfa384x_usbctlx_complete_sync(hw, ctlx,
@@ -1506,7 +1508,7 @@ done:
*
* Arguments:
* hw device structure
-* CMD_MODE DOWAIT or DOASYNC
+* enum cmd_mode DOWAIT or DOASYNC
* rid RID code
* riddata Data portion of RID formatted for MAC
* riddatalen Length of the data portion in bytes
@@ -1529,7 +1531,7 @@ done:
----------------------------------------------------------------*/
static int
hfa384x_dowrid(hfa384x_t *hw,
- CMD_MODE mode,
+ enum cmd_mode mode,
u16 rid,
void *riddata,
unsigned int riddatalen,
@@ -1616,7 +1618,7 @@ done:
----------------------------------------------------------------*/
static int
hfa384x_dormem(hfa384x_t *hw,
- CMD_MODE mode,
+ enum cmd_mode mode,
u16 page,
u16 offset,
void *data,
@@ -1707,7 +1709,7 @@ done:
----------------------------------------------------------------*/
static int
hfa384x_dowmem(hfa384x_t *hw,
- CMD_MODE mode,
+ enum cmd_mode mode,
u16 page,
u16 offset,
void *data,
@@ -2075,12 +2077,9 @@ int hfa384x_drvr_flashdl_write(hfa384x_t *hw, u32 daddr, void *buf, u32 len)
(j * HFA384x_USB_RWMEM_MAXLEN);
writepage = HFA384x_ADDR_CMD_MKPAGE(dlbufaddr +
- (j *
- HFA384x_USB_RWMEM_MAXLEN));
- writeoffset =
- HFA384x_ADDR_CMD_MKOFF(dlbufaddr +
- (j *
- HFA384x_USB_RWMEM_MAXLEN));
+ (j * HFA384x_USB_RWMEM_MAXLEN));
+ writeoffset = HFA384x_ADDR_CMD_MKOFF(dlbufaddr +
+ (j * HFA384x_USB_RWMEM_MAXLEN));
writelen = burnlen - (j * HFA384x_USB_RWMEM_MAXLEN);
writelen = writelen > HFA384x_USB_RWMEM_MAXLEN ?
@@ -2133,7 +2132,7 @@ exit_proc:
* 0 success
* >0 f/w reported error - f/w status code
* <0 driver reported error
-* -ENODATA length mismatch between argument and retrieved
+* -ENODATA length mismatch between argument and retrieved
* record.
*
* Side effects:
@@ -2451,7 +2450,9 @@ int hfa384x_drvr_readpda(hfa384x_t *hw, void *buf, unsigned int len)
currpage = HFA384x_ADDR_CMD_MKPAGE(pdaloc[i].cardaddr);
curroffset = HFA384x_ADDR_CMD_MKOFF(pdaloc[i].cardaddr);
- result = hfa384x_dormem_wait(hw, currpage, curroffset, buf, len); /* units of bytes */
+ /* units of bytes */
+ result = hfa384x_dormem_wait(hw, currpage, curroffset, buf,
+ len);
if (result) {
printk(KERN_WARNING
@@ -2611,20 +2612,18 @@ int hfa384x_drvr_start(hfa384x_t *hw)
if (result1 != 0) {
if (result2 != 0) {
printk(KERN_ERR
- "cmd_initialize() failed on two attempts, results %d and %d\n",
- result1, result2);
+ "cmd_initialize() failed on two attempts, results %d and %d\n",
+ result1, result2);
usb_kill_urb(&hw->rx_urb);
goto done;
} else {
pr_debug("First cmd_initialize() failed (result %d),\n",
result1);
- pr_debug
- ("but second attempt succeeded. All should be ok\n");
+ pr_debug("but second attempt succeeded. All should be ok\n");
}
} else if (result2 != 0) {
- printk(KERN_WARNING
- "First cmd_initialize() succeeded, but second attempt failed (result=%d)\n",
- result2);
+ printk(KERN_WARNING "First cmd_initialize() succeeded, but second attempt failed (result=%d)\n",
+ result2);
printk(KERN_WARNING
"Most likely the card will be functional\n");
goto done;
@@ -3382,8 +3381,7 @@ retry:
* our request has been acknowledged. Odd,
* but our OUT URB is still alive...
*/
- pr_debug
- ("Causality violation: please reboot Universe, or email linux-wlan-devel@lists.linux-wlan.com\n");
+ pr_debug("Causality violation: please reboot Universe\n");
ctlx->state = CTLX_RESP_COMPLETE;
break;
@@ -3442,7 +3440,7 @@ static void hfa384x_usbin_txcompl(wlandevice_t *wlandev,
{
u16 status;
- status = le16_to_cpu(usbin->type); /* yeah I know it says type... */
+ status = le16_to_cpu(usbin->type); /* yeah I know it says type... */
/* Was there an error? */
if (HFA384x_TXSTATUS_ISERROR(status))
@@ -3583,7 +3581,7 @@ static void hfa384x_int_rxmonitor(wlandevice_t *wlandev,
struct sk_buff *skb;
hfa384x_t *hw = wlandev->priv;
- /* Don't forget the status, time, and data_len fields are in host order */
+ /* Remember the status, time, and data_len fields are in host order */
/* Figure out how big the frame is */
fc = le16_to_cpu(rxdesc->frame_control);
hdrlen = p80211_headerlen(fc);
@@ -3632,7 +3630,8 @@ static void hfa384x_int_rxmonitor(wlandevice_t *wlandev,
caphdr->encoding = htonl(1); /* cck */
}
- /* Copy the 802.11 header to the skb (ctl frames may be less than a full header) */
+ /* Copy the 802.11 header to the skb
+ (ctl frames may be less than a full header) */
datap = skb_put(skb, hdrlen);
memcpy(datap, &(rxdesc->frame_control), hdrlen);
@@ -3644,7 +3643,8 @@ static void hfa384x_int_rxmonitor(wlandevice_t *wlandev,
/* check for unencrypted stuff if WEP bit set. */
if (*(datap - hdrlen + 1) & 0x40) /* wep set */
if ((*(datap) == 0xaa) && (*(datap + 1) == 0xaa))
- *(datap - hdrlen + 1) &= 0xbf; /* clear wep; it's the 802.2 header! */
+ /* clear wep; it's the 802.2 header! */
+ *(datap - hdrlen + 1) &= 0xbf;
}
if (hw->sniff_fcs) {
@@ -3846,9 +3846,9 @@ retry:
default:
/* This is NOT a valid CTLX "success" state! */
printk(KERN_ERR
- "Illegal CTLX[%d] success state(%s, %d) in OUT URB\n",
- le16_to_cpu(ctlx->outbuf.type),
- ctlxstr(ctlx->state), urb->status);
+ "Illegal CTLX[%d] success state(%s, %d) in OUT URB\n",
+ le16_to_cpu(ctlx->outbuf.type),
+ ctlxstr(ctlx->state), urb->status);
break;
} /* switch */
} else {
diff --git a/drivers/staging/wlan-ng/p80211conv.c b/drivers/staging/wlan-ng/p80211conv.c
index a1605fbc809..059e15055b7 100644
--- a/drivers/staging/wlan-ng/p80211conv.c
+++ b/drivers/staging/wlan-ng/p80211conv.c
@@ -208,7 +208,7 @@ int skb_ether_to_p80211(wlandevice_t *wlandev, u32 ethconv,
p80211_wep->data = kmalloc(skb->len, GFP_ATOMIC);
foo = wep_encrypt(wlandev, skb->data, p80211_wep->data,
skb->len,
- (wlandev->hostwep &HOSTWEP_DEFAULTKEY_MASK),
+ (wlandev->hostwep & HOSTWEP_DEFAULTKEY_MASK),
p80211_wep->iv, p80211_wep->icv);
if (foo) {
printk(KERN_WARNING
@@ -601,7 +601,7 @@ int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb)
}
/* Allocate the rxmeta */
- rxmeta = kmalloc(sizeof(p80211_rxmeta_t), GFP_ATOMIC);
+ rxmeta = kzalloc(sizeof(p80211_rxmeta_t), GFP_ATOMIC);
if (rxmeta == NULL) {
printk(KERN_ERR "%s: Failed to allocate rxmeta.\n",
@@ -611,7 +611,6 @@ int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb)
}
/* Initialize the rxmeta */
- memset(rxmeta, 0, sizeof(p80211_rxmeta_t));
rxmeta->wlandev = wlandev;
rxmeta->hosttime = jiffies;
diff --git a/drivers/staging/wlan-ng/p80211req.c b/drivers/staging/wlan-ng/p80211req.c
index e1e7bf1bf27..207f080cfc9 100644
--- a/drivers/staging/wlan-ng/p80211req.c
+++ b/drivers/staging/wlan-ng/p80211req.c
@@ -107,7 +107,8 @@ int p80211req_dorequest(wlandevice_t *wlandev, u8 *msgbuf)
}
/* Check Permissions */
- if (!capable(CAP_NET_ADMIN) && (msg->msgcode != DIDmsg_dot11req_mibget)) {
+ if (!capable(CAP_NET_ADMIN) &&
+ (msg->msgcode != DIDmsg_dot11req_mibget)) {
printk(KERN_ERR
"%s: only dot11req_mibget allowed for non-root.\n",
wlandev->name);
@@ -128,7 +129,7 @@ int p80211req_dorequest(wlandevice_t *wlandev, u8 *msgbuf)
wlandev->mlmerequest(wlandev, msg);
clear_bit(1, &(wlandev->request_pending));
- return result; /* if result==0, msg->status still may contain an err */
+ return result; /* if result==0, msg->status still may contain an err */
}
/*----------------------------------------------------------------
diff --git a/drivers/staging/wlan-ng/p80211wext.c b/drivers/staging/wlan-ng/p80211wext.c
index 83f1d6cd799..387194d4a6e 100644
--- a/drivers/staging/wlan-ng/p80211wext.c
+++ b/drivers/staging/wlan-ng/p80211wext.c
@@ -49,7 +49,6 @@
#include <linux/uaccess.h>
#include <asm/byteorder.h>
#include <linux/if_ether.h>
-#include <linux/bitops.h>
#include "p80211types.h"
#include "p80211hdr.h"
@@ -74,7 +73,7 @@ static u8 p80211_mhz_to_channel(u16 mhz)
if (mhz >= 5000)
return (mhz - 5000) / 5;
- if (mhz == 2482)
+ if (mhz == 2484)
return 14;
if (mhz >= 2407)
@@ -126,18 +125,38 @@ static int qual_as_percent(int snr)
return 100;
}
-static int p80211wext_dorequest(wlandevice_t *wlandev, u32 did, u32 data)
+static int p80211wext_setmib(wlandevice_t *wlandev, u32 did, u32 data)
{
p80211msg_dot11req_mibset_t msg;
- p80211item_uint32_t mibitem;
+ p80211item_uint32_t *mibitem =
+ (p80211item_uint32_t *)&msg.mibattribute.data;
int result;
msg.msgcode = DIDmsg_dot11req_mibset;
- memset(&mibitem, 0, sizeof(mibitem));
- mibitem.did = did;
- mibitem.data = data;
- memcpy(&msg.mibattribute.data, &mibitem, sizeof(mibitem));
+ memset(mibitem, 0, sizeof(*mibitem));
+ mibitem->did = did;
+ mibitem->data = data;
+ result = p80211req_dorequest(wlandev, (u8 *) &msg);
+
+ return result;
+}
+
+/*
+ * get a 32 bit mib value
+ */
+static int p80211wext_getmib(wlandevice_t *wlandev, u32 did, u32 *data)
+{
+ p80211msg_dot11req_mibset_t msg;
+ p80211item_uint32_t *mibitem =
+ (p80211item_uint32_t *)&msg.mibattribute.data;
+ int result;
+
+ msg.msgcode = DIDmsg_dot11req_mibget;
+ memset(mibitem, 0, sizeof(*mibitem));
+ mibitem->did = did;
result = p80211req_dorequest(wlandev, (u8 *) &msg);
+ if (!result)
+ *data = mibitem->data;
return result;
}
@@ -263,32 +282,26 @@ static int p80211wext_giwfreq(netdevice_t *dev,
struct iw_freq *freq, char *extra)
{
wlandevice_t *wlandev = dev->ml_priv;
- p80211item_uint32_t mibitem;
- p80211msg_dot11req_mibset_t msg;
int result;
int err = 0;
+ unsigned int value;
- msg.msgcode = DIDmsg_dot11req_mibget;
- memset(&mibitem, 0, sizeof(mibitem));
- mibitem.did = DIDmib_dot11phy_dot11PhyDSSSTable_dot11CurrentChannel;
- memcpy(&msg.mibattribute.data, &mibitem, sizeof(mibitem));
- result = p80211req_dorequest(wlandev, (u8 *) &msg);
-
+ result = p80211wext_getmib(wlandev,
+ DIDmib_dot11phy_dot11PhyDSSSTable_dot11CurrentChannel,
+ &value);
if (result) {
err = -EFAULT;
goto exit;
}
- memcpy(&mibitem, &msg.mibattribute.data, sizeof(mibitem));
-
- if (mibitem.data > NUM_CHANNELS) {
+ if (value > NUM_CHANNELS) {
err = -EFAULT;
goto exit;
}
/* convert into frequency instead of a channel */
freq->e = 1;
- freq->m = p80211_channel_to_mhz(mibitem.data, 0) * 100000;
+ freq->m = p80211_channel_to_mhz(value, 0) * 100000;
exit:
return err;
@@ -299,28 +312,23 @@ static int p80211wext_siwfreq(netdevice_t *dev,
struct iw_freq *freq, char *extra)
{
wlandevice_t *wlandev = dev->ml_priv;
- p80211item_uint32_t mibitem;
- p80211msg_dot11req_mibset_t msg;
int result;
int err = 0;
+ unsigned int value;
if (!wlan_wext_write) {
- err = (-EOPNOTSUPP);
+ err = -EOPNOTSUPP;
goto exit;
}
- msg.msgcode = DIDmsg_dot11req_mibset;
- memset(&mibitem, 0, sizeof(mibitem));
- mibitem.did = DIDmib_dot11phy_dot11PhyDSSSTable_dot11CurrentChannel;
- mibitem.status = P80211ENUM_msgitem_status_data_ok;
-
if ((freq->e == 0) && (freq->m <= 1000))
- mibitem.data = freq->m;
+ value = freq->m;
else
- mibitem.data = p80211_mhz_to_channel(freq->m);
+ value = p80211_mhz_to_channel(freq->m);
- memcpy(&msg.mibattribute.data, &mibitem, sizeof(mibitem));
- result = p80211req_dorequest(wlandev, (u8 *) &msg);
+ result = p80211wext_setmib(wlandev,
+ DIDmib_dot11phy_dot11PhyDSSSTable_dot11CurrentChannel,
+ value);
if (result) {
err = -EFAULT;
@@ -360,13 +368,11 @@ static int p80211wext_siwmode(netdevice_t *dev,
__u32 *mode, char *extra)
{
wlandevice_t *wlandev = dev->ml_priv;
- p80211item_uint32_t mibitem;
- p80211msg_dot11req_mibset_t msg;
int result;
int err = 0;
if (!wlan_wext_write) {
- err = (-EOPNOTSUPP);
+ err = -EOPNOTSUPP;
goto exit;
}
@@ -397,16 +403,11 @@ static int p80211wext_siwmode(netdevice_t *dev,
}
/* Set Operation mode to the PORT TYPE RID */
- msg.msgcode = DIDmsg_dot11req_mibset;
- memset(&mibitem, 0, sizeof(mibitem));
- mibitem.did = DIDmib_p2_p2Static_p2CnfPortType;
- mibitem.data = (*mode == IW_MODE_ADHOC) ? 0 : 1;
- memcpy(&msg.mibattribute.data, &mibitem, sizeof(mibitem));
- result = p80211req_dorequest(wlandev, (u8 *) &msg);
-
+ result = p80211wext_setmib(wlandev,
+ DIDmib_p2_p2Static_p2CnfPortType,
+ (*mode == IW_MODE_ADHOC) ? 0 : 1);
if (result)
err = -EFAULT;
-
exit:
return err;
}
@@ -563,9 +564,9 @@ static int p80211wext_siwencode(netdevice_t *dev,
/* Set current key number only if no keys are given */
if (erq->flags & IW_ENCODE_NOKEY) {
result =
- p80211wext_dorequest(wlandev,
- DIDmib_dot11smt_dot11PrivacyTable_dot11WEPDefaultKeyID,
- i);
+ p80211wext_setmib(wlandev,
+ DIDmib_dot11smt_dot11PrivacyTable_dot11WEPDefaultKeyID,
+ i);
if (result) {
err = -EFAULT;
@@ -587,7 +588,6 @@ static int p80211wext_siwencode(netdevice_t *dev,
-------------------------------------------------------------*/
if (erq->length > 0) {
-
/* copy the key from the driver cache as the keys are read-only MIBs */
wlandev->wep_keylens[i] = erq->length;
memcpy(wlandev->wep_keys[i], key, erq->length);
@@ -637,12 +637,12 @@ static int p80211wext_siwencode(netdevice_t *dev,
/* Check the PrivacyInvoked flag */
if (erq->flags & IW_ENCODE_DISABLED) {
result =
- p80211wext_dorequest(wlandev,
+ p80211wext_setmib(wlandev,
DIDmib_dot11smt_dot11PrivacyTable_dot11PrivacyInvoked,
P80211ENUM_truth_false);
} else {
result =
- p80211wext_dorequest(wlandev,
+ p80211wext_setmib(wlandev,
DIDmib_dot11smt_dot11PrivacyTable_dot11PrivacyInvoked,
P80211ENUM_truth_true);
}
@@ -661,12 +661,12 @@ static int p80211wext_siwencode(netdevice_t *dev,
*/
if (erq->flags & IW_ENCODE_RESTRICTED) {
result =
- p80211wext_dorequest(wlandev,
+ p80211wext_setmib(wlandev,
DIDmib_dot11smt_dot11PrivacyTable_dot11ExcludeUnencrypted,
P80211ENUM_truth_true);
} else if (erq->flags & IW_ENCODE_OPEN) {
result =
- p80211wext_dorequest(wlandev,
+ p80211wext_setmib(wlandev,
DIDmib_dot11smt_dot11PrivacyTable_dot11ExcludeUnencrypted,
P80211ENUM_truth_false);
}
@@ -768,24 +768,16 @@ static int p80211wext_giwrate(netdevice_t *dev,
struct iw_param *rrq, char *extra)
{
wlandevice_t *wlandev = dev->ml_priv;
- p80211item_uint32_t mibitem;
- p80211msg_dot11req_mibset_t msg;
int result;
int err = 0;
+ unsigned int value;
- msg.msgcode = DIDmsg_dot11req_mibget;
- memset(&mibitem, 0, sizeof(mibitem));
- mibitem.did = DIDmib_p2_p2MAC_p2CurrentTxRate;
- memcpy(&msg.mibattribute.data, &mibitem, sizeof(mibitem));
- result = p80211req_dorequest(wlandev, (u8 *) &msg);
-
+ result = p80211wext_getmib(wlandev, DIDmib_p2_p2MAC_p2CurrentTxRate, &value);
if (result) {
err = -EFAULT;
goto exit;
}
- memcpy(&mibitem, &msg.mibattribute.data, sizeof(mibitem));
-
rrq->fixed = 0; /* can it change? */
rrq->disabled = 0;
rrq->value = 0;
@@ -795,7 +787,7 @@ static int p80211wext_giwrate(netdevice_t *dev,
#define HFA384x_RATEBIT_5dot5 ((u16)4)
#define HFA384x_RATEBIT_11 ((u16)8)
- switch (mibitem.data) {
+ switch (value) {
case HFA384x_RATEBIT_1:
rrq->value = 1000000;
break;
@@ -820,25 +812,19 @@ static int p80211wext_giwrts(netdevice_t *dev,
struct iw_param *rts, char *extra)
{
wlandevice_t *wlandev = dev->ml_priv;
- p80211item_uint32_t mibitem;
- p80211msg_dot11req_mibset_t msg;
int result;
int err = 0;
+ unsigned int value;
- msg.msgcode = DIDmsg_dot11req_mibget;
- memset(&mibitem, 0, sizeof(mibitem));
- mibitem.did = DIDmib_dot11mac_dot11OperationTable_dot11RTSThreshold;
- memcpy(&msg.mibattribute.data, &mibitem, sizeof(mibitem));
- result = p80211req_dorequest(wlandev, (u8 *) &msg);
-
+ result = p80211wext_getmib(wlandev,
+ DIDmib_dot11mac_dot11OperationTable_dot11RTSThreshold,
+ &value);
if (result) {
err = -EFAULT;
goto exit;
}
- memcpy(&mibitem, &msg.mibattribute.data, sizeof(mibitem));
-
- rts->value = mibitem.data;
+ rts->value = value;
rts->disabled = (rts->value == 2347);
rts->fixed = 1;
@@ -851,27 +837,23 @@ static int p80211wext_siwrts(netdevice_t *dev,
struct iw_param *rts, char *extra)
{
wlandevice_t *wlandev = dev->ml_priv;
- p80211item_uint32_t mibitem;
- p80211msg_dot11req_mibset_t msg;
int result;
int err = 0;
+ unsigned int value;
if (!wlan_wext_write) {
- err = (-EOPNOTSUPP);
+ err = -EOPNOTSUPP;
goto exit;
}
- msg.msgcode = DIDmsg_dot11req_mibget;
- memset(&mibitem, 0, sizeof(mibitem));
- mibitem.did = DIDmib_dot11mac_dot11OperationTable_dot11RTSThreshold;
if (rts->disabled)
- mibitem.data = 2347;
+ value = 2347;
else
- mibitem.data = rts->value;
-
- memcpy(&msg.mibattribute.data, &mibitem, sizeof(mibitem));
- result = p80211req_dorequest(wlandev, (u8 *) &msg);
+ value = rts->value;
+ result = p80211wext_setmib(wlandev,
+ DIDmib_dot11mac_dot11OperationTable_dot11RTSThreshold,
+ value);
if (result) {
err = -EFAULT;
goto exit;
@@ -886,26 +868,19 @@ static int p80211wext_giwfrag(netdevice_t *dev,
struct iw_param *frag, char *extra)
{
wlandevice_t *wlandev = dev->ml_priv;
- p80211item_uint32_t mibitem;
- p80211msg_dot11req_mibset_t msg;
int result;
int err = 0;
+ unsigned int value;
- msg.msgcode = DIDmsg_dot11req_mibget;
- memset(&mibitem, 0, sizeof(mibitem));
- mibitem.did =
- DIDmib_dot11mac_dot11OperationTable_dot11FragmentationThreshold;
- memcpy(&msg.mibattribute.data, &mibitem, sizeof(mibitem));
- result = p80211req_dorequest(wlandev, (u8 *) &msg);
-
+ result = p80211wext_getmib(wlandev,
+ DIDmib_dot11mac_dot11OperationTable_dot11FragmentationThreshold,
+ &value);
if (result) {
err = -EFAULT;
goto exit;
}
- memcpy(&mibitem, &msg.mibattribute.data, sizeof(mibitem));
-
- frag->value = mibitem.data;
+ frag->value = value;
frag->disabled = (frag->value == 2346);
frag->fixed = 1;
@@ -918,28 +893,23 @@ static int p80211wext_siwfrag(netdevice_t *dev,
struct iw_param *frag, char *extra)
{
wlandevice_t *wlandev = dev->ml_priv;
- p80211item_uint32_t mibitem;
- p80211msg_dot11req_mibset_t msg;
int result;
int err = 0;
+ int value;
if (!wlan_wext_write) {
err = (-EOPNOTSUPP);
goto exit;
}
- msg.msgcode = DIDmsg_dot11req_mibset;
- memset(&mibitem, 0, sizeof(mibitem));
- mibitem.did =
- DIDmib_dot11mac_dot11OperationTable_dot11FragmentationThreshold;
-
if (frag->disabled)
- mibitem.data = 2346;
+ value = 2346;
else
- mibitem.data = frag->value;
+ value = frag->value;
- memcpy(&msg.mibattribute.data, &mibitem, sizeof(mibitem));
- result = p80211req_dorequest(wlandev, (u8 *) &msg);
+ result = p80211wext_setmib(wlandev,
+ DIDmib_dot11mac_dot11OperationTable_dot11FragmentationThreshold,
+ value);
if (result) {
err = -EFAULT;
@@ -963,56 +933,40 @@ static int p80211wext_giwretry(netdevice_t *dev,
struct iw_param *rrq, char *extra)
{
wlandevice_t *wlandev = dev->ml_priv;
- p80211item_uint32_t mibitem;
- p80211msg_dot11req_mibset_t msg;
int result;
int err = 0;
u16 shortretry, longretry, lifetime;
+ unsigned int value;
- msg.msgcode = DIDmsg_dot11req_mibget;
- memset(&mibitem, 0, sizeof(mibitem));
- mibitem.did = DIDmib_dot11mac_dot11OperationTable_dot11ShortRetryLimit;
-
- memcpy(&msg.mibattribute.data, &mibitem, sizeof(mibitem));
- result = p80211req_dorequest(wlandev, (u8 *) &msg);
-
+ result = p80211wext_getmib(wlandev,
+ DIDmib_dot11mac_dot11OperationTable_dot11ShortRetryLimit,
+ &value);
if (result) {
err = -EFAULT;
goto exit;
}
- memcpy(&mibitem, &msg.mibattribute.data, sizeof(mibitem));
-
- shortretry = mibitem.data;
-
- mibitem.did = DIDmib_dot11mac_dot11OperationTable_dot11LongRetryLimit;
-
- memcpy(&msg.mibattribute.data, &mibitem, sizeof(mibitem));
- result = p80211req_dorequest(wlandev, (u8 *) &msg);
+ shortretry = value;
+ result = p80211wext_getmib(wlandev,
+ DIDmib_dot11mac_dot11OperationTable_dot11LongRetryLimit,
+ &value);
if (result) {
err = -EFAULT;
goto exit;
}
- memcpy(&mibitem, &msg.mibattribute.data, sizeof(mibitem));
-
- longretry = mibitem.data;
-
- mibitem.did =
- DIDmib_dot11mac_dot11OperationTable_dot11MaxTransmitMSDULifetime;
-
- memcpy(&msg.mibattribute.data, &mibitem, sizeof(mibitem));
- result = p80211req_dorequest(wlandev, (u8 *) &msg);
+ longretry = value;
+ result = p80211wext_getmib(wlandev,
+ DIDmib_dot11mac_dot11OperationTable_dot11MaxTransmitMSDULifetime,
+ &value);
if (result) {
err = -EFAULT;
goto exit;
}
- memcpy(&mibitem, &msg.mibattribute.data, sizeof(mibitem));
-
- lifetime = mibitem.data;
+ lifetime = value;
rrq->disabled = 0;
@@ -1045,8 +999,7 @@ static int p80211wext_siwretry(netdevice_t *dev,
p80211msg_dot11req_mibset_t msg;
int result;
int err = 0;
-
- memset(&mibitem, 0, sizeof(mibitem));
+ unsigned int value;
if (!wlan_wext_write) {
err = (-EOPNOTSUPP);
@@ -1061,26 +1014,20 @@ static int p80211wext_siwretry(netdevice_t *dev,
msg.msgcode = DIDmsg_dot11req_mibset;
if ((rrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
- mibitem.did =
- DIDmib_dot11mac_dot11OperationTable_dot11MaxTransmitMSDULifetime;
- mibitem.data = rrq->value /= 1024;
-
- memcpy(&msg.mibattribute.data, &mibitem, sizeof(mibitem));
- result = p80211req_dorequest(wlandev, (u8 *) &msg);
+ value = rrq->value /= 1024;
+ result = p80211wext_setmib(wlandev,
+ DIDmib_dot11mac_dot11OperationTable_dot11MaxTransmitMSDULifetime,
+ value);
if (result) {
err = -EFAULT;
goto exit;
}
} else {
if (rrq->flags & IW_RETRY_LONG) {
- mibitem.did =
- DIDmib_dot11mac_dot11OperationTable_dot11LongRetryLimit;
- mibitem.data = rrq->value;
-
- memcpy(&msg.mibattribute.data, &mibitem,
- sizeof(mibitem));
- result = p80211req_dorequest(wlandev, (u8 *) &msg);
+ result = p80211wext_setmib(wlandev,
+ DIDmib_dot11mac_dot11OperationTable_dot11LongRetryLimit,
+ rrq->value);
if (result) {
err = -EFAULT;
@@ -1089,13 +1036,9 @@ static int p80211wext_siwretry(netdevice_t *dev,
}
if (rrq->flags & IW_RETRY_SHORT) {
- mibitem.did =
- DIDmib_dot11mac_dot11OperationTable_dot11ShortRetryLimit;
- mibitem.data = rrq->value;
-
- memcpy(&msg.mibattribute.data, &mibitem,
- sizeof(mibitem));
- result = p80211req_dorequest(wlandev, (u8 *) &msg);
+ result = p80211wext_setmib(wlandev,
+ DIDmib_dot11mac_dot11OperationTable_dot11ShortRetryLimit,
+ rrq->value);
if (result) {
err = -EFAULT;
@@ -1118,22 +1061,20 @@ static int p80211wext_siwtxpow(netdevice_t *dev,
p80211msg_dot11req_mibset_t msg;
int result;
int err = 0;
+ unsigned int value;
if (!wlan_wext_write) {
err = (-EOPNOTSUPP);
goto exit;
}
- msg.msgcode = DIDmsg_dot11req_mibset;
- memset(&mibitem, 0, sizeof(mibitem));
- mibitem.did =
- DIDmib_dot11phy_dot11PhyTxPowerTable_dot11CurrentTxPowerLevel;
if (rrq->fixed == 0)
- mibitem.data = 30;
+ value = 30;
else
- mibitem.data = rrq->value;
- memcpy(&msg.mibattribute.data, &mibitem, sizeof(mibitem));
- result = p80211req_dorequest(wlandev, (u8 *) &msg);
+ value = rrq->value;
+ result = p80211wext_setmib(wlandev,
+ DIDmib_dot11phy_dot11PhyTxPowerTable_dot11CurrentTxPowerLevel,
+ value);
if (result) {
err = -EFAULT;
@@ -1149,33 +1090,25 @@ static int p80211wext_giwtxpow(netdevice_t *dev,
struct iw_param *rrq, char *extra)
{
wlandevice_t *wlandev = dev->ml_priv;
- p80211item_uint32_t mibitem;
- p80211msg_dot11req_mibset_t msg;
int result;
int err = 0;
+ unsigned int value;
- msg.msgcode = DIDmsg_dot11req_mibget;
-
- memset(&mibitem, 0, sizeof(mibitem));
- mibitem.did =
- DIDmib_dot11phy_dot11PhyTxPowerTable_dot11CurrentTxPowerLevel;
-
- memcpy(&msg.mibattribute.data, &mibitem, sizeof(mibitem));
- result = p80211req_dorequest(wlandev, (u8 *) &msg);
+ result = p80211wext_getmib(wlandev,
+ DIDmib_dot11phy_dot11PhyTxPowerTable_dot11CurrentTxPowerLevel,
+ &value);
if (result) {
err = -EFAULT;
goto exit;
}
- memcpy(&mibitem, &msg.mibattribute.data, sizeof(mibitem));
-
/* XXX handle OFF by setting disabled = 1; */
rrq->flags = 0; /* IW_TXPOW_DBM; */
rrq->disabled = 0;
rrq->fixed = 0;
- rrq->value = mibitem.data;
+ rrq->value = value;
exit:
return err;
@@ -1480,7 +1413,7 @@ static int p80211wext_set_encodeext(struct net_device *dev,
}
pr_debug("setting default key (%d)\n", idx);
result =
- p80211wext_dorequest(wlandev,
+ p80211wext_setmib(wlandev,
DIDmib_dot11smt_dot11PrivacyTable_dot11WEPDefaultKeyID,
idx);
if (result)
@@ -1600,12 +1533,12 @@ static int p80211_wext_set_iwauth(struct net_device *dev,
pr_debug("drop_unencrypted %d\n", param->value);
if (param->value)
result =
- p80211wext_dorequest(wlandev,
+ p80211wext_setmib(wlandev,
DIDmib_dot11smt_dot11PrivacyTable_dot11ExcludeUnencrypted,
P80211ENUM_truth_true);
else
result =
- p80211wext_dorequest(wlandev,
+ p80211wext_setmib(wlandev,
DIDmib_dot11smt_dot11PrivacyTable_dot11ExcludeUnencrypted,
P80211ENUM_truth_false);
break;
@@ -1614,12 +1547,12 @@ static int p80211_wext_set_iwauth(struct net_device *dev,
pr_debug("privacy invoked %d\n", param->value);
if (param->value)
result =
- p80211wext_dorequest(wlandev,
+ p80211wext_setmib(wlandev,
DIDmib_dot11smt_dot11PrivacyTable_dot11PrivacyInvoked,
P80211ENUM_truth_true);
else
result =
- p80211wext_dorequest(wlandev,
+ p80211wext_setmib(wlandev,
DIDmib_dot11smt_dot11PrivacyTable_dot11PrivacyInvoked,
P80211ENUM_truth_false);
@@ -1681,64 +1614,50 @@ static int p80211_wext_get_iwauth(struct net_device *dev,
return result;
}
+#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
+
static iw_handler p80211wext_handlers[] = {
- (iw_handler) p80211wext_siwcommit, /* SIOCSIWCOMMIT */
- (iw_handler) p80211wext_giwname, /* SIOCGIWNAME */
- (iw_handler) NULL, /* SIOCSIWNWID */
- (iw_handler) NULL, /* SIOCGIWNWID */
- (iw_handler) p80211wext_siwfreq, /* SIOCSIWFREQ */
- (iw_handler) p80211wext_giwfreq, /* SIOCGIWFREQ */
- (iw_handler) p80211wext_siwmode, /* SIOCSIWMODE */
- (iw_handler) p80211wext_giwmode, /* SIOCGIWMODE */
- (iw_handler) NULL, /* SIOCSIWSENS */
- (iw_handler) NULL, /* SIOCGIWSENS */
- (iw_handler) NULL, /* not used *//* SIOCSIWRANGE */
- (iw_handler) p80211wext_giwrange, /* SIOCGIWRANGE */
- (iw_handler) NULL, /* not used *//* SIOCSIWPRIV */
- (iw_handler) NULL, /* kernel code *//* SIOCGIWPRIV */
- (iw_handler) NULL, /* not used *//* SIOCSIWSTATS */
- (iw_handler) NULL, /* kernel code *//* SIOCGIWSTATS */
- (iw_handler) p80211wext_siwspy, /* SIOCSIWSPY */
- (iw_handler) p80211wext_giwspy, /* SIOCGIWSPY */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* SIOCSIWAP */
- (iw_handler) p80211wext_giwap, /* SIOCGIWAP */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* SIOCGIWAPLIST */
- (iw_handler) p80211wext_siwscan, /* SIOCSIWSCAN */
- (iw_handler) p80211wext_giwscan, /* SIOCGIWSCAN */
- (iw_handler) p80211wext_siwessid, /* SIOCSIWESSID */
- (iw_handler) p80211wext_giwessid, /* SIOCGIWESSID */
- (iw_handler) NULL, /* SIOCSIWNICKN */
- (iw_handler) p80211wext_giwessid, /* SIOCGIWNICKN */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* SIOCSIWRATE */
- (iw_handler) p80211wext_giwrate, /* SIOCGIWRATE */
- (iw_handler) p80211wext_siwrts, /* SIOCSIWRTS */
- (iw_handler) p80211wext_giwrts, /* SIOCGIWRTS */
- (iw_handler) p80211wext_siwfrag, /* SIOCSIWFRAG */
- (iw_handler) p80211wext_giwfrag, /* SIOCGIWFRAG */
- (iw_handler) p80211wext_siwtxpow, /* SIOCSIWTXPOW */
- (iw_handler) p80211wext_giwtxpow, /* SIOCGIWTXPOW */
- (iw_handler) p80211wext_siwretry, /* SIOCSIWRETRY */
- (iw_handler) p80211wext_giwretry, /* SIOCGIWRETRY */
- (iw_handler) p80211wext_siwencode, /* SIOCSIWENCODE */
- (iw_handler) p80211wext_giwencode, /* SIOCGIWENCODE */
- (iw_handler) NULL, /* SIOCSIWPOWER */
- (iw_handler) NULL, /* SIOCGIWPOWER */
+ IW_IOCTL(SIOCSIWCOMMIT) = (iw_handler) p80211wext_siwcommit,
+ IW_IOCTL(SIOCGIWNAME) = (iw_handler) p80211wext_giwname,
+/* SIOCSIWNWID,SIOCGIWNWID */
+ IW_IOCTL(SIOCSIWFREQ) = (iw_handler) p80211wext_siwfreq,
+ IW_IOCTL(SIOCGIWFREQ) = (iw_handler) p80211wext_giwfreq,
+ IW_IOCTL(SIOCSIWMODE) = (iw_handler) p80211wext_siwmode,
+ IW_IOCTL(SIOCGIWMODE) = (iw_handler) p80211wext_giwmode,
+/* SIOCSIWSENS,SIOCGIWSENS,SIOCSIWRANGE */
+ IW_IOCTL(SIOCGIWRANGE) = (iw_handler) p80211wext_giwrange,
+/* SIOCSIWPRIV,SIOCGIWPRIV,SIOCSIWSTATS,SIOCGIWSTATS */
+ IW_IOCTL(SIOCSIWSPY) = (iw_handler) p80211wext_siwspy,
+ IW_IOCTL(SIOCGIWSPY) = (iw_handler) p80211wext_giwspy,
+/* SIOCSIWAP */
+ IW_IOCTL(SIOCGIWAP) = (iw_handler) p80211wext_giwap,
+/* SIOCGIWAPLIST */
+ IW_IOCTL(SIOCSIWSCAN) = (iw_handler) p80211wext_siwscan,
+ IW_IOCTL(SIOCGIWSCAN) = (iw_handler) p80211wext_giwscan,
+ IW_IOCTL(SIOCSIWESSID) = (iw_handler) p80211wext_siwessid,
+ IW_IOCTL(SIOCGIWESSID) = (iw_handler) p80211wext_giwessid,
+/* SIOCSIWNICKN */
+ IW_IOCTL(SIOCGIWNICKN) = (iw_handler) p80211wext_giwessid,
+/* SIOCSIWRATE */
+ IW_IOCTL(SIOCGIWRATE) = (iw_handler) p80211wext_giwrate,
+ IW_IOCTL(SIOCSIWRTS) = (iw_handler) p80211wext_siwrts,
+ IW_IOCTL(SIOCGIWRTS) = (iw_handler) p80211wext_giwrts,
+ IW_IOCTL(SIOCSIWFRAG) = (iw_handler) p80211wext_siwfrag,
+ IW_IOCTL(SIOCGIWFRAG) = (iw_handler) p80211wext_giwfrag,
+ IW_IOCTL(SIOCSIWTXPOW) = (iw_handler) p80211wext_siwtxpow,
+ IW_IOCTL(SIOCGIWTXPOW) = (iw_handler) p80211wext_giwtxpow,
+ IW_IOCTL(SIOCSIWRETRY) = (iw_handler) p80211wext_siwretry,
+ IW_IOCTL(SIOCGIWRETRY) = (iw_handler) p80211wext_giwretry,
+ IW_IOCTL(SIOCSIWENCODE) = (iw_handler) p80211wext_siwencode,
+ IW_IOCTL(SIOCGIWENCODE) = (iw_handler) p80211wext_giwencode,
+/* SIOCSIWPOWER,SIOCGIWPOWER */
/* WPA operations */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* SIOCSIWGENIE set generic IE */
- (iw_handler) NULL, /* SIOCGIWGENIE get generic IE */
- (iw_handler) p80211_wext_set_iwauth, /* SIOCSIWAUTH set authentication mode params */
- (iw_handler) p80211_wext_get_iwauth, /* SIOCGIWAUTH get authentication mode params */
-
- (iw_handler) p80211wext_set_encodeext, /* SIOCSIWENCODEEXT set encoding token & mode */
- (iw_handler) p80211wext_get_encodeext, /* SIOCGIWENCODEEXT get encoding token & mode */
- (iw_handler) NULL, /* SIOCSIWPMKSA PMKSA cache operation */
+/* SIOCSIWGENIE,SIOCGIWGENIE generic IE */
+ IW_IOCTL(SIOCSIWAUTH) = (iw_handler) p80211_wext_set_iwauth, /*set authentication mode params */
+ IW_IOCTL(SIOCGIWAUTH) = (iw_handler) p80211_wext_get_iwauth, /*get authentication mode params */
+ IW_IOCTL(SIOCSIWENCODEEXT) = (iw_handler) p80211wext_set_encodeext, /*set encoding token & mode */
+ IW_IOCTL(SIOCGIWENCODEEXT) = (iw_handler) p80211wext_get_encodeext, /*get encoding token & mode */
+/* SIOCSIWPMKSA PMKSA cache operation */
};
struct iw_handler_def p80211wext_handler_def = {
diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c
index d383ea85c9b..d20c8797bcc 100644
--- a/drivers/staging/wlan-ng/prism2fw.c
+++ b/drivers/staging/wlan-ng/prism2fw.c
@@ -160,21 +160,30 @@ hfa384x_caplevel_t priid;
/*================================================================*/
/* Local Function Declarations */
-int prism2_fwapply(const struct ihex_binrec *rfptr, wlandevice_t *wlandev);
-int read_fwfile(const struct ihex_binrec *rfptr);
-int mkimage(imgchunk_t *clist, unsigned int *ccnt);
-int read_cardpda(pda_t *pda, wlandevice_t *wlandev);
-int mkpdrlist(pda_t *pda);
-int plugimage(imgchunk_t *fchunk, unsigned int nfchunks,
+static int prism2_fwapply(const struct ihex_binrec *rfptr,
+wlandevice_t *wlandev);
+
+static int read_fwfile(const struct ihex_binrec *rfptr);
+
+static int mkimage(imgchunk_t *clist, unsigned int *ccnt);
+
+static int read_cardpda(pda_t *pda, wlandevice_t *wlandev);
+
+static int mkpdrlist(pda_t *pda);
+
+static int plugimage(imgchunk_t *fchunk, unsigned int nfchunks,
s3plugrec_t *s3plug, unsigned int ns3plug, pda_t * pda);
-int crcimage(imgchunk_t *fchunk, unsigned int nfchunks,
+
+static int crcimage(imgchunk_t *fchunk, unsigned int nfchunks,
s3crcrec_t *s3crc, unsigned int ns3crc);
-int writeimage(wlandevice_t *wlandev, imgchunk_t *fchunk,
+
+static int writeimage(wlandevice_t *wlandev, imgchunk_t *fchunk,
unsigned int nfchunks);
-void free_chunks(imgchunk_t *fchunk, unsigned int *nfchunks);
-void free_srecs(void);
+static void free_chunks(imgchunk_t *fchunk, unsigned int *nfchunks);
+
+static void free_srecs(void);
-int validate_identity(void);
+static int validate_identity(void);
/*================================================================*/
/* Function Definitions */
@@ -255,7 +264,7 @@ int prism2_fwapply(const struct ihex_binrec *rfptr, wlandevice_t *wlandev)
/* clear the pda and add an initial END record */
memset(&pda, 0, sizeof(pda));
pda.rec[0] = (hfa384x_pdrec_t *) pda.buf;
- pda.rec[0]->len = cpu_to_le16(2); /* len in words *//* len in words */
+ pda.rec[0]->len = cpu_to_le16(2); /* len in words */
pda.rec[0]->code = cpu_to_le16(HFA384x_PDR_END_OF_PDA);
pda.nrec = 1;
@@ -527,13 +536,12 @@ int mkimage(imgchunk_t *clist, unsigned int *ccnt)
/* Allocate buffer space for chunks */
for (i = 0; i < *ccnt; i++) {
- clist[i].data = kmalloc(clist[i].len, GFP_KERNEL);
+ clist[i].data = kzalloc(clist[i].len, GFP_KERNEL);
if (clist[i].data == NULL) {
printk(KERN_ERR
"failed to allocate image space, exitting.\n");
return 1;
}
- memset(clist[i].data, 0, clist[i].len);
pr_debug("chunk[%d]: addr=0x%06x len=%d\n",
i, clist[i].addr, clist[i].len);
}
diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c
index 31ac8da39c8..6cd09352f89 100644
--- a/drivers/staging/wlan-ng/prism2sta.c
+++ b/drivers/staging/wlan-ng/prism2sta.c
@@ -426,7 +426,7 @@ static int prism2sta_mlmerequest(wlandevice_t *wlandev, p80211msg_t *msg)
* msgp ptr to msg buffer
*
* Returns:
-* A p80211 message resultcode value.
+* A p80211 message resultcode value.
*
* Side effects:
*
@@ -458,7 +458,7 @@ u32 prism2sta_ifstate(wlandevice_t *wlandev, u32 ifstate)
"hfa384x_drvr_start() failed,"
"result=%d\n", (int)result);
result =
- P80211ENUM_resultcode_implementation_failure;
+ P80211ENUM_resultcode_implementation_failure;
wlandev->msdstate = WLAN_MSD_HWPRESENT;
break;
}
@@ -503,7 +503,7 @@ u32 prism2sta_ifstate(wlandevice_t *wlandev, u32 ifstate)
"hfa384x_drvr_start() failed,"
"result=%d\n", (int)result);
result =
- P80211ENUM_resultcode_implementation_failure;
+ P80211ENUM_resultcode_implementation_failure;
wlandev->msdstate = WLAN_MSD_HWPRESENT;
break;
}
@@ -514,7 +514,7 @@ u32 prism2sta_ifstate(wlandevice_t *wlandev, u32 ifstate)
"prism2sta_getcardinfo() failed,"
"result=%d\n", (int)result);
result =
- P80211ENUM_resultcode_implementation_failure;
+ P80211ENUM_resultcode_implementation_failure;
hfa384x_drvr_stop(hw);
wlandev->msdstate = WLAN_MSD_HWPRESENT;
break;
@@ -525,7 +525,7 @@ u32 prism2sta_ifstate(wlandevice_t *wlandev, u32 ifstate)
"prism2sta_globalsetup() failed,"
"result=%d\n", (int)result);
result =
- P80211ENUM_resultcode_implementation_failure;
+ P80211ENUM_resultcode_implementation_failure;
hfa384x_drvr_stop(hw);
wlandev->msdstate = WLAN_MSD_HWPRESENT;
break;
@@ -1178,8 +1178,8 @@ static void prism2sta_inf_chinforesults(wlandevice_t *wlandev,
chinforesult->active =
le16_to_cpu(inf->info.chinforesult.result[n].
active);
- pr_debug
- ("chinfo: channel %d, %s level (avg/peak)=%d/%d dB, pcf %d\n",
+ pr_debug
+ ("chinfo: channel %d, %s level (avg/peak)=%d/%d dB, pcf %d\n",
channel + 1,
chinforesult->
active & HFA384x_CHINFORESULT_BSSACTIVE ? "signal"
@@ -1246,7 +1246,9 @@ void prism2sta_processing_defer(struct work_struct *data)
netif_carrier_on(wlandev->netdev);
- /* If we are joining a specific AP, set our state and reset retries */
+ /* If we are joining a specific AP, set our
+ * state and reset retries
+ */
if (hw->join_ap == 1)
hw->join_ap = 2;
hw->join_retries = 60;
@@ -1261,9 +1263,9 @@ void prism2sta_processing_defer(struct work_struct *data)
/* Collect the BSSID, and set state to allow tx */
result = hfa384x_drvr_getconfig(hw,
- HFA384x_RID_CURRENTBSSID,
- wlandev->bssid,
- WLAN_BSSID_LEN);
+ HFA384x_RID_CURRENTBSSID,
+ wlandev->bssid,
+ WLAN_BSSID_LEN);
if (result) {
pr_debug
("getconfig(0x%02x) failed, result = %d\n",
@@ -1286,8 +1288,8 @@ void prism2sta_processing_defer(struct work_struct *data)
/* Collect the port status */
result = hfa384x_drvr_getconfig16(hw,
- HFA384x_RID_PORTSTATUS,
- &portstatus);
+ HFA384x_RID_PORTSTATUS,
+ &portstatus);
if (result) {
pr_debug
("getconfig(0x%02x) failed, result = %d\n",
@@ -1322,7 +1324,7 @@ void prism2sta_processing_defer(struct work_struct *data)
&joinreq,
HFA384x_RID_JOINREQUEST_LEN);
printk(KERN_INFO
- "linkstatus=DISCONNECTED (re-submitting join)\n");
+ "linkstatus=DISCONNECTED (re-submitting join)\n");
} else {
if (wlandev->netdev->type == ARPHRD_ETHER)
printk(KERN_INFO
@@ -1509,14 +1511,15 @@ static void prism2sta_inf_assocstatus(wlandevice_t *wlandev,
rec.reason = le16_to_cpu(rec.reason);
/*
- ** Find the address in the list of authenticated stations. If it wasn't
- ** found, then this address has not been previously authenticated and
- ** something weird has happened if this is anything other than an
- ** "authentication failed" message. If the address was found, then
- ** set the "associated" flag for that station, based on whether the
- ** station is associating or losing its association. Something weird
- ** has also happened if we find the address in the list of authenticated
- ** stations but we are getting an "authentication failed" message.
+ ** Find the address in the list of authenticated stations.
+ ** If it wasn't found, then this address has not been previously
+ ** authenticated and something weird has happened if this is
+ ** anything other than an "authentication failed" message.
+ ** If the address was found, then set the "associated" flag for
+ ** that station, based on whether the station is associating or
+ ** losing its association. Something weird has also happened
+ ** if we find the address in the list of authenticated stations
+ ** but we are getting an "authentication failed" message.
*/
for (i = 0; i < hw->authlist.cnt; i++)
@@ -1526,7 +1529,7 @@ static void prism2sta_inf_assocstatus(wlandevice_t *wlandev,
if (i >= hw->authlist.cnt) {
if (rec.assocstatus != HFA384x_ASSOCSTATUS_AUTHFAIL)
printk(KERN_WARNING
- "assocstatus info frame received for non-authenticated station.\n");
+ "assocstatus info frame received for non-authenticated station.\n");
} else {
hw->authlist.assoc[i] =
(rec.assocstatus == HFA384x_ASSOCSTATUS_STAASSOC ||
@@ -1534,7 +1537,7 @@ static void prism2sta_inf_assocstatus(wlandevice_t *wlandev,
if (rec.assocstatus == HFA384x_ASSOCSTATUS_AUTHFAIL)
printk(KERN_WARNING
- "authfail assocstatus info frame received for authenticated station.\n");
+"authfail assocstatus info frame received for authenticated station.\n");
}
return;
@@ -1681,12 +1684,12 @@ static void prism2sta_inf_authreq_defer(wlandevice_t *wlandev,
}
/*
- ** If the authentication is okay, then add the MAC address to the list
- ** of authenticated stations. Don't add the address if it is already in
- ** the list. (802.11b does not seem to disallow a station from issuing
- ** an authentication request when the station is already authenticated.
- ** Does this sort of thing ever happen? We might as well do the check
- ** just in case.)
+ ** If the authentication is okay, then add the MAC address to the
+ ** list of authenticated stations. Don't add the address if it
+ ** is already in the list. (802.11b does not seem to disallow
+ ** a station from issuing an authentication request when the
+ ** station is already authenticated. Does this sort of thing
+ ** ever happen? We might as well do the check just in case.)
*/
added = 0;
@@ -1931,7 +1934,7 @@ void prism2sta_ev_alloc(wlandevice_t *wlandev)
* the created wlandevice_t structure.
*
* Side effects:
-* also allocates the priv/hw structures.
+* also allocates the priv/hw structures.
*
* Call context:
* process thread
@@ -1995,9 +1998,9 @@ void prism2sta_commsqual_defer(struct work_struct *data)
/* It only makes sense to poll these in non-IBSS */
if (wlandev->macmode != WLAN_MACMODE_IBSS_STA) {
- result = hfa384x_drvr_getconfig(hw, HFA384x_RID_DBMCOMMSQUALITY,
- &hw->qual,
- HFA384x_RID_DBMCOMMSQUALITY_LEN);
+ result = hfa384x_drvr_getconfig(
+ hw, HFA384x_RID_DBMCOMMSQUALITY,
+ &hw->qual, HFA384x_RID_DBMCOMMSQUALITY_LEN);
if (result) {
printk(KERN_ERR "error fetching commsqual\n");
diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
index 501d27f74c7..f5cff751db2 100644
--- a/drivers/staging/wlan-ng/prism2usb.c
+++ b/drivers/staging/wlan-ng/prism2usb.c
@@ -285,11 +285,76 @@ exit:
usb_set_intfdata(interface, NULL);
}
+#ifdef CONFIG_PM
+static int prism2sta_suspend(struct usb_interface *interface,
+ pm_message_t message)
+{
+ hfa384x_t *hw = NULL;
+ wlandevice_t *wlandev;
+ wlandev = (wlandevice_t *) usb_get_intfdata(interface);
+ if (!wlandev)
+ return -ENODEV;
+
+ hw = wlandev->priv;
+ if (!hw)
+ return -ENODEV;
+
+ prism2sta_ifstate(wlandev, P80211ENUM_ifstate_disable);
+
+ usb_kill_urb(&hw->rx_urb);
+ usb_kill_urb(&hw->tx_urb);
+ usb_kill_urb(&hw->ctlx_urb);
+
+ return 0;
+}
+
+static int prism2sta_resume(struct usb_interface *interface)
+{
+ int result = 0;
+ hfa384x_t *hw = NULL;
+ wlandevice_t *wlandev;
+ wlandev = (wlandevice_t *) usb_get_intfdata(interface);
+ if (!wlandev)
+ return -ENODEV;
+
+ hw = wlandev->priv;
+ if (!hw)
+ return -ENODEV;
+
+ /* Do a chip-level reset on the MAC */
+ if (prism2_doreset) {
+ result = hfa384x_corereset(hw,
+ prism2_reset_holdtime,
+ prism2_reset_settletime, 0);
+ if (result != 0) {
+ unregister_wlandev(wlandev);
+ hfa384x_destroy(hw);
+ printk(KERN_ERR
+ "%s: hfa384x_corereset() failed.\n", dev_info);
+ kfree(wlandev);
+ kfree(hw);
+ wlandev = NULL;
+ return -ENODEV;
+ }
+ }
+
+ prism2sta_ifstate(wlandev, P80211ENUM_ifstate_enable);
+
+ return 0;
+}
+#else
+#define prism2sta_suspend NULL
+#define prism2sta_resume NULL
+#endif /* CONFIG_PM */
+
static struct usb_driver prism2_usb_driver = {
.name = "prism2_usb",
.probe = prism2sta_probe_usb,
.disconnect = prism2sta_disconnect_usb,
.id_table = usb_prism_tbl,
+ .suspend = prism2sta_suspend,
+ .resume = prism2sta_resume,
+ .reset_resume = prism2sta_resume,
/* fops, minor? */
};
diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
index e89304c7256..b53deee25d7 100644
--- a/drivers/telephony/ixj.c
+++ b/drivers/telephony/ixj.c
@@ -5879,20 +5879,13 @@ out:
static int ixj_build_filter_cadence(IXJ *j, IXJ_FILTER_CADENCE __user * cp)
{
IXJ_FILTER_CADENCE *lcp;
- lcp = kmalloc(sizeof(IXJ_FILTER_CADENCE), GFP_KERNEL);
- if (lcp == NULL) {
+ lcp = memdup_user(cp, sizeof(IXJ_FILTER_CADENCE));
+ if (IS_ERR(lcp)) {
if(ixjdebug & 0x0001) {
- printk(KERN_INFO "Could not allocate memory for cadence\n");
+ printk(KERN_INFO "Could not allocate memory for cadence or could not copy cadence to kernel\n");
}
- return -ENOMEM;
+ return PTR_ERR(lcp);
}
- if (copy_from_user(lcp, cp, sizeof(IXJ_FILTER_CADENCE))) {
- if(ixjdebug & 0x0001) {
- printk(KERN_INFO "Could not copy cadence to kernel\n");
- }
- kfree(lcp);
- return -EFAULT;
- }
if (lcp->filter > 5) {
if(ixjdebug & 0x0001) {
printk(KERN_INFO "Cadence out of range\n");
diff --git a/drivers/telephony/ixj_pcmcia.c b/drivers/telephony/ixj_pcmcia.c
index d442fd35620..99cb2246ac7 100644
--- a/drivers/telephony/ixj_pcmcia.c
+++ b/drivers/telephony/ixj_pcmcia.c
@@ -22,7 +22,6 @@
typedef struct ixj_info_t {
int ndev;
- dev_node_t node;
struct ixj *port;
} ixj_info_t;
@@ -155,8 +154,6 @@ static int ixj_config(struct pcmcia_device * link)
j = ixj_pcmcia_probe(link->io.BasePort1, link->io.BasePort1 + 0x10);
info->ndev = 1;
- info->node.major = PHONE_MAJOR;
- link->dev_node = &info->node;
ixj_get_serial(link, j);
return 0;
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index 1e9ba4bdffe..1335456b4f9 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -127,8 +127,6 @@ MODULE_PARM_DESC(ModemOption, "default: 0x10,0x00,0x00,0x00,0x20");
#define ENDPOINT_ISOC_DATA 0x07
#define ENDPOINT_FIRMWARE 0x05
-#define hex2int(c) ( (c >= '0') && (c <= '9') ? (c - '0') : ((c & 0xf) + 9) )
-
struct speedtch_params {
unsigned int altsetting;
unsigned int BMaxDSL;
@@ -669,7 +667,8 @@ static int speedtch_atm_start(struct usbatm_data *usbatm, struct atm_dev *atm_de
memset(atm_dev->esi, 0, sizeof(atm_dev->esi));
if (usb_string(usb_dev, usb_dev->descriptor.iSerialNumber, mac_str, sizeof(mac_str)) == 12) {
for (i = 0; i < 6; i++)
- atm_dev->esi[i] = (hex2int(mac_str[i * 2]) * 16) + (hex2int(mac_str[i * 2 + 1]));
+ atm_dev->esi[i] = (hex_to_bin(mac_str[i * 2]) << 4) +
+ hex_to_bin(mac_str[i * 2 + 1]);
}
/* Start modem synchronisation */
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index 25f01b536f6..e213d3fa492 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -94,19 +94,19 @@
} while (0)
#define uea_enters(usb_dev) \
- uea_vdbg(usb_dev, "entering %s\n", __func__)
+ uea_vdbg(usb_dev, "entering %s\n" , __func__)
#define uea_leaves(usb_dev) \
- uea_vdbg(usb_dev, "leaving %s\n", __func__)
+ uea_vdbg(usb_dev, "leaving %s\n" , __func__)
-#define uea_err(usb_dev, format,args...) \
- dev_err(&(usb_dev)->dev ,"[UEAGLE-ATM] " format , ##args)
+#define uea_err(usb_dev, format, args...) \
+ dev_err(&(usb_dev)->dev , "[UEAGLE-ATM] " format , ##args)
-#define uea_warn(usb_dev, format,args...) \
- dev_warn(&(usb_dev)->dev ,"[Ueagle-atm] " format, ##args)
+#define uea_warn(usb_dev, format, args...) \
+ dev_warn(&(usb_dev)->dev , "[Ueagle-atm] " format, ##args)
-#define uea_info(usb_dev, format,args...) \
- dev_info(&(usb_dev)->dev ,"[ueagle-atm] " format, ##args)
+#define uea_info(usb_dev, format, args...) \
+ dev_info(&(usb_dev)->dev , "[ueagle-atm] " format, ##args)
struct intr_pkt;
@@ -289,7 +289,7 @@ enum {
#define IS_ISDN(x) \
((x)->annex & ANNEXB)
-#define INS_TO_USBDEV(ins) ins->usb_dev
+#define INS_TO_USBDEV(ins) (ins->usb_dev)
#define GET_STATUS(data) \
((data >> 8) & 0xf)
@@ -304,7 +304,7 @@ enum {
* The FW_GET_BYTE() macro is provided only for consistency.
*/
-#define FW_GET_BYTE(p) *((__u8 *) (p))
+#define FW_GET_BYTE(p) (*((__u8 *) (p)))
#define FW_DIR "ueagle-atm/"
#define UEA_FW_NAME_MAX 30
@@ -315,7 +315,7 @@ enum {
#define ACK_TIMEOUT msecs_to_jiffies(3000)
-#define UEA_INTR_IFACE_NO 0
+#define UEA_INTR_IFACE_NO 0
#define UEA_US_IFACE_NO 1
#define UEA_DS_IFACE_NO 2
@@ -326,9 +326,9 @@ enum {
#define UEA_INTR_PIPE 0x04
#define UEA_ISO_DATA_PIPE 0x08
-#define UEA_E1_SET_BLOCK 0x0001
+#define UEA_E1_SET_BLOCK 0x0001
#define UEA_E4_SET_BLOCK 0x002c
-#define UEA_SET_MODE 0x0003
+#define UEA_SET_MODE 0x0003
#define UEA_SET_2183_DATA 0x0004
#define UEA_SET_TIMEOUT 0x0011
@@ -366,7 +366,7 @@ struct l1_code {
u8 string_header[E4_L1_STRING_HEADER];
u8 page_number_to_block_index[E4_MAX_PAGE_NUMBER];
struct block_index page_header[E4_NO_SWAPPAGE_HEADERS];
- u8 code [0];
+ u8 code[0];
} __attribute__ ((packed));
/* structures describing a block within a DSP page */
@@ -428,7 +428,8 @@ struct block_info_e4 {
#define E4_MODEMREADY 0x1
#define E1_MAKEFUNCTION(t, s) (((t) & 0xf) << 4 | ((s) & 0xf))
-#define E4_MAKEFUNCTION(t, st, s) (((t) & 0xf) << 8 | ((st) & 0xf) << 4 | ((s) & 0xf))
+#define E4_MAKEFUNCTION(t, st, s) (((t) & 0xf) << 8 | \
+ ((st) & 0xf) << 4 | ((s) & 0xf))
#define E1_MAKESA(a, b, c, d) \
(((c) & 0xff) << 24 | \
@@ -473,7 +474,7 @@ struct cmv_e4 {
__be16 wFunction;
__be16 wOffset;
__be16 wAddress;
- __be32 dwData [6];
+ __be32 dwData[6];
} __attribute__ ((packed));
/* structures representing swap information */
@@ -534,11 +535,13 @@ struct intr_pkt {
static struct usb_driver uea_driver;
static DEFINE_MUTEX(uea_mutex);
-static const char *chip_name[] = {"ADI930", "Eagle I", "Eagle II", "Eagle III", "Eagle IV"};
+static const char *chip_name[] = {"ADI930", "Eagle I", "Eagle II", "Eagle III",
+ "Eagle IV"};
static int modem_index;
static unsigned int debug;
-static unsigned int altsetting[NB_MODEM] = {[0 ... (NB_MODEM - 1)] = FASTEST_ISO_INTF};
+static unsigned int altsetting[NB_MODEM] = {
+ [0 ... (NB_MODEM - 1)] = FASTEST_ISO_INTF};
static int sync_wait[NB_MODEM];
static char *cmv_file[NB_MODEM];
static int annex[NB_MODEM];
@@ -555,7 +558,7 @@ MODULE_PARM_DESC(cmv_file,
"file name with configuration and management variables");
module_param_array(annex, uint, NULL, 0644);
MODULE_PARM_DESC(annex,
- "manually set annex a/b (0=auto, 1=annex a, 2=annex b)");
+ "manually set annex a/b (0=auto, 1=annex a, 2=annex b)");
#define uea_wait(sc, cond, timeo) \
({ \
@@ -602,7 +605,8 @@ static int uea_send_modem_cmd(struct usb_device *usb,
return (ret == size) ? 0 : -EIO;
}
-static void uea_upload_pre_firmware(const struct firmware *fw_entry, void *context)
+static void uea_upload_pre_firmware(const struct firmware *fw_entry,
+ void *context)
{
struct usb_device *usb = context;
const u8 *pfw;
@@ -707,7 +711,8 @@ static int uea_load_firmware(struct usb_device *usb, unsigned int ver)
}
ret = request_firmware_nowait(THIS_MODULE, 1, fw_name, &usb->dev,
- GFP_KERNEL, usb, uea_upload_pre_firmware);
+ GFP_KERNEL, usb,
+ uea_upload_pre_firmware);
if (ret)
uea_err(usb, "firmware %s is not available\n", fw_name);
else
@@ -876,7 +881,7 @@ static int request_dsp(struct uea_softc *sc)
if (ret < 0) {
uea_err(INS_TO_USBDEV(sc),
"requesting firmware %s failed with error %d\n",
- dsp_name, ret);
+ dsp_name, ret);
return ret;
}
@@ -994,14 +999,17 @@ static void __uea_load_page_e4(struct uea_softc *sc, u8 pageno, int boot)
blockidx = &p->page_header[blockno];
blocksize = E4_PAGE_BYTES(blockidx->PageSize);
- blockoffset = sc->dsp_firm->data + le32_to_cpu(blockidx->PageOffset);
+ blockoffset = sc->dsp_firm->data + le32_to_cpu(
+ blockidx->PageOffset);
bi.dwSize = cpu_to_be32(blocksize);
bi.dwAddress = cpu_to_be32(le32_to_cpu(blockidx->PageAddress));
uea_dbg(INS_TO_USBDEV(sc),
- "sending block %u for DSP page %u size %u address %x\n",
- blockno, pageno, blocksize, le32_to_cpu(blockidx->PageAddress));
+ "sending block %u for DSP page "
+ "%u size %u address %x\n",
+ blockno, pageno, blocksize,
+ le32_to_cpu(blockidx->PageAddress));
/* send block info through the IDMA pipe */
if (uea_idma_write(sc, &bi, E4_BLOCK_INFO_SIZE))
@@ -1042,7 +1050,8 @@ static void uea_load_page_e4(struct work_struct *work)
p = (struct l1_code *) sc->dsp_firm->data;
if (pageno >= le16_to_cpu(p->page_header[0].PageNumber)) {
- uea_err(INS_TO_USBDEV(sc), "invalid DSP page %u requested\n", pageno);
+ uea_err(INS_TO_USBDEV(sc), "invalid DSP "
+ "page %u requested\n", pageno);
return;
}
@@ -1059,7 +1068,7 @@ static void uea_load_page_e4(struct work_struct *work)
__uea_load_page_e4(sc, i, 1);
}
- uea_dbg(INS_TO_USBDEV(sc),"sending start bi\n");
+ uea_dbg(INS_TO_USBDEV(sc) , "sending start bi\n");
bi.wHdr = cpu_to_be16(UEA_BIHDR);
bi.bBootPage = 0;
@@ -1139,8 +1148,10 @@ static int uea_cmv_e1(struct uea_softc *sc,
uea_enters(INS_TO_USBDEV(sc));
uea_vdbg(INS_TO_USBDEV(sc), "Function : %d-%d, Address : %c%c%c%c, "
"offset : 0x%04x, data : 0x%08x\n",
- E1_FUNCTION_TYPE(function), E1_FUNCTION_SUBTYPE(function),
- E1_GETSA1(address), E1_GETSA2(address), E1_GETSA3(address),
+ E1_FUNCTION_TYPE(function),
+ E1_FUNCTION_SUBTYPE(function),
+ E1_GETSA1(address), E1_GETSA2(address),
+ E1_GETSA3(address),
E1_GETSA4(address), offset, data);
/* we send a request, but we expect a reply */
@@ -1157,7 +1168,8 @@ static int uea_cmv_e1(struct uea_softc *sc,
cmv.wOffsetAddress = cpu_to_le16(offset);
put_unaligned_le32(data >> 16 | data << 16, &cmv.dwData);
- ret = uea_request(sc, UEA_E1_SET_BLOCK, UEA_MPTX_START, sizeof(cmv), &cmv);
+ ret = uea_request(sc, UEA_E1_SET_BLOCK, UEA_MPTX_START,
+ sizeof(cmv), &cmv);
if (ret < 0)
return ret;
ret = wait_cmv_ack(sc);
@@ -1191,7 +1203,8 @@ static int uea_cmv_e4(struct uea_softc *sc,
cmv.wOffset = cpu_to_be16(offset);
cmv.dwData[0] = cpu_to_be32(data);
- ret = uea_request(sc, UEA_E4_SET_BLOCK, UEA_MPTX_START, sizeof(cmv), &cmv);
+ ret = uea_request(sc, UEA_E4_SET_BLOCK, UEA_MPTX_START,
+ sizeof(cmv), &cmv);
if (ret < 0)
return ret;
ret = wait_cmv_ack(sc);
@@ -1208,7 +1221,7 @@ static inline int uea_read_cmv_e1(struct uea_softc *sc,
uea_err(INS_TO_USBDEV(sc),
"reading cmv failed with error %d\n", ret);
else
- *data = sc->data;
+ *data = sc->data;
return ret;
}
@@ -1216,13 +1229,14 @@ static inline int uea_read_cmv_e1(struct uea_softc *sc,
static inline int uea_read_cmv_e4(struct uea_softc *sc,
u8 size, u16 group, u16 address, u16 offset, u32 *data)
{
- int ret = uea_cmv_e4(sc, E4_MAKEFUNCTION(E4_MEMACCESS, E4_REQUESTREAD, size),
+ int ret = uea_cmv_e4(sc, E4_MAKEFUNCTION(E4_MEMACCESS,
+ E4_REQUESTREAD, size),
group, address, offset, 0);
if (ret < 0)
uea_err(INS_TO_USBDEV(sc),
"reading cmv failed with error %d\n", ret);
else {
- *data = sc->data;
+ *data = sc->data;
/* size is in 16-bit word quantities */
if (size > 2)
*(data + 1) = sc->data1;
@@ -1245,7 +1259,8 @@ static inline int uea_write_cmv_e1(struct uea_softc *sc,
static inline int uea_write_cmv_e4(struct uea_softc *sc,
u8 size, u16 group, u16 address, u16 offset, u32 data)
{
- int ret = uea_cmv_e4(sc, E4_MAKEFUNCTION(E4_MEMACCESS, E4_REQUESTWRITE, size),
+ int ret = uea_cmv_e4(sc, E4_MAKEFUNCTION(E4_MEMACCESS,
+ E4_REQUESTWRITE, size),
group, address, offset, data);
if (ret < 0)
uea_err(INS_TO_USBDEV(sc),
@@ -1442,27 +1457,29 @@ static int uea_stat_e4(struct uea_softc *sc)
return ret;
switch (sc->stats.phy.state) {
- case 0x0: /* not yet synchronized */
- case 0x1:
- case 0x3:
- case 0x4:
- uea_dbg(INS_TO_USBDEV(sc), "modem not yet synchronized\n");
- return 0;
- case 0x5: /* initialization */
- case 0x6:
- case 0x9:
- case 0xa:
- uea_dbg(INS_TO_USBDEV(sc), "modem initializing\n");
- return 0;
- case 0x2: /* fail ... */
- uea_info(INS_TO_USBDEV(sc), "modem synchronization failed"
- " (may be try other cmv/dsp)\n");
- return -EAGAIN;
- case 0x7: /* operational */
- break;
- default:
- uea_warn(INS_TO_USBDEV(sc), "unknown state: %x\n", sc->stats.phy.state);
- return 0;
+ case 0x0: /* not yet synchronized */
+ case 0x1:
+ case 0x3:
+ case 0x4:
+ uea_dbg(INS_TO_USBDEV(sc), "modem not yet "
+ "synchronized\n");
+ return 0;
+ case 0x5: /* initialization */
+ case 0x6:
+ case 0x9:
+ case 0xa:
+ uea_dbg(INS_TO_USBDEV(sc), "modem initializing\n");
+ return 0;
+ case 0x2: /* fail ... */
+ uea_info(INS_TO_USBDEV(sc), "modem synchronization "
+ "failed (may be try other cmv/dsp)\n");
+ return -EAGAIN;
+ case 0x7: /* operational */
+ break;
+ default:
+ uea_warn(INS_TO_USBDEV(sc), "unknown state: %x\n",
+ sc->stats.phy.state);
+ return 0;
}
if (data != 7) {
@@ -1502,9 +1519,9 @@ static int uea_stat_e4(struct uea_softc *sc)
if (sc->stats.phy.flags) {
uea_dbg(INS_TO_USBDEV(sc), "Stat flag = 0x%x\n",
sc->stats.phy.flags);
- if (sc->stats.phy.flags & 1) //delineation LOSS
+ if (sc->stats.phy.flags & 1) /* delineation LOSS */
return -EAGAIN;
- if (sc->stats.phy.flags & 0x4000) //Reset Flag
+ if (sc->stats.phy.flags & 0x4000) /* Reset Flag */
return -EAGAIN;
return 0;
}
@@ -1618,7 +1635,8 @@ static int request_cmvs(struct uea_softc *sc,
if (ret < 0) {
/* if caller can handle old version, try to provide it */
if (*ver == 1) {
- uea_warn(INS_TO_USBDEV(sc), "requesting firmware %s failed, "
+ uea_warn(INS_TO_USBDEV(sc), "requesting "
+ "firmware %s failed, "
"try to get older cmvs\n", cmv_name);
return request_cmvs_old(sc, cmvs, fw);
}
@@ -1632,8 +1650,8 @@ static int request_cmvs(struct uea_softc *sc,
data = (u8 *) (*fw)->data;
if (size < 4 || strncmp(data, "cmv2", 4) != 0) {
if (*ver == 1) {
- uea_warn(INS_TO_USBDEV(sc), "firmware %s is corrupted, "
- "try to get older cmvs\n", cmv_name);
+ uea_warn(INS_TO_USBDEV(sc), "firmware %s is corrupted,"
+ " try to get older cmvs\n", cmv_name);
release_firmware(*fw);
return request_cmvs_old(sc, cmvs, fw);
}
@@ -1670,7 +1688,7 @@ static int uea_send_cmvs_e1(struct uea_softc *sc)
int i, ret, len;
void *cmvs_ptr;
const struct firmware *cmvs_fw;
- int ver = 1; // we can handle v1 cmv firmware version;
+ int ver = 1; /* we can handle v1 cmv firmware version; */
/* Enter in R-IDLE (cmv) until instructed otherwise */
ret = uea_write_cmv_e1(sc, E1_SA_CNTL, 0, 1);
@@ -1685,7 +1703,7 @@ static int uea_send_cmvs_e1(struct uea_softc *sc)
sc->stats.phy.firmid);
/* get options */
- ret = len = request_cmvs(sc, &cmvs_ptr, &cmvs_fw, &ver);
+ ret = len = request_cmvs(sc, &cmvs_ptr, &cmvs_fw, &ver);
if (ret < 0)
return ret;
@@ -1697,9 +1715,10 @@ static int uea_send_cmvs_e1(struct uea_softc *sc)
"please update your firmware\n");
for (i = 0; i < len; i++) {
- ret = uea_write_cmv_e1(sc, get_unaligned_le32(&cmvs_v1[i].address),
- get_unaligned_le16(&cmvs_v1[i].offset),
- get_unaligned_le32(&cmvs_v1[i].data));
+ ret = uea_write_cmv_e1(sc,
+ get_unaligned_le32(&cmvs_v1[i].address),
+ get_unaligned_le16(&cmvs_v1[i].offset),
+ get_unaligned_le32(&cmvs_v1[i].data));
if (ret < 0)
goto out;
}
@@ -1707,9 +1726,10 @@ static int uea_send_cmvs_e1(struct uea_softc *sc)
struct uea_cmvs_v2 *cmvs_v2 = cmvs_ptr;
for (i = 0; i < len; i++) {
- ret = uea_write_cmv_e1(sc, get_unaligned_le32(&cmvs_v2[i].address),
- (u16) get_unaligned_le32(&cmvs_v2[i].offset),
- get_unaligned_le32(&cmvs_v2[i].data));
+ ret = uea_write_cmv_e1(sc,
+ get_unaligned_le32(&cmvs_v2[i].address),
+ (u16) get_unaligned_le32(&cmvs_v2[i].offset),
+ get_unaligned_le32(&cmvs_v2[i].data));
if (ret < 0)
goto out;
}
@@ -1722,7 +1742,8 @@ static int uea_send_cmvs_e1(struct uea_softc *sc)
/* Enter in R-ACT-REQ */
ret = uea_write_cmv_e1(sc, E1_SA_CNTL, 0, 2);
uea_vdbg(INS_TO_USBDEV(sc), "Entering in R-ACT-REQ state\n");
- uea_info(INS_TO_USBDEV(sc), "modem started, waiting synchronization...\n");
+ uea_info(INS_TO_USBDEV(sc), "modem started, waiting "
+ "synchronization...\n");
out:
release_firmware(cmvs_fw);
return ret;
@@ -1733,7 +1754,7 @@ static int uea_send_cmvs_e4(struct uea_softc *sc)
int i, ret, len;
void *cmvs_ptr;
const struct firmware *cmvs_fw;
- int ver = 2; // we can only handle v2 cmv firmware version;
+ int ver = 2; /* we can only handle v2 cmv firmware version; */
/* Enter in R-IDLE (cmv) until instructed otherwise */
ret = uea_write_cmv_e4(sc, 1, E4_SA_CNTL, 0, 0, 1);
@@ -1750,7 +1771,7 @@ static int uea_send_cmvs_e4(struct uea_softc *sc)
/* get options */
- ret = len = request_cmvs(sc, &cmvs_ptr, &cmvs_fw, &ver);
+ ret = len = request_cmvs(sc, &cmvs_ptr, &cmvs_fw, &ver);
if (ret < 0)
return ret;
@@ -1760,10 +1781,10 @@ static int uea_send_cmvs_e4(struct uea_softc *sc)
for (i = 0; i < len; i++) {
ret = uea_write_cmv_e4(sc, 1,
- get_unaligned_le32(&cmvs_v2[i].group),
- get_unaligned_le32(&cmvs_v2[i].address),
- get_unaligned_le32(&cmvs_v2[i].offset),
- get_unaligned_le32(&cmvs_v2[i].data));
+ get_unaligned_le32(&cmvs_v2[i].group),
+ get_unaligned_le32(&cmvs_v2[i].address),
+ get_unaligned_le32(&cmvs_v2[i].offset),
+ get_unaligned_le32(&cmvs_v2[i].data));
if (ret < 0)
goto out;
}
@@ -1776,7 +1797,8 @@ static int uea_send_cmvs_e4(struct uea_softc *sc)
/* Enter in R-ACT-REQ */
ret = uea_write_cmv_e4(sc, 1, E4_SA_CNTL, 0, 0, 2);
uea_vdbg(INS_TO_USBDEV(sc), "Entering in R-ACT-REQ state\n");
- uea_info(INS_TO_USBDEV(sc), "modem started, waiting synchronization...\n");
+ uea_info(INS_TO_USBDEV(sc), "modem started, waiting "
+ "synchronization...\n");
out:
release_firmware(cmvs_fw);
return ret;
@@ -1812,7 +1834,7 @@ static int uea_start_reset(struct uea_softc *sc)
uea_request(sc, UEA_SET_MODE, UEA_LOOPBACK_ON, 0, NULL);
uea_request(sc, UEA_SET_MODE, UEA_BOOT_IDMA, 0, NULL);
- /* enter reset mode */
+ /* enter reset mode */
uea_request(sc, UEA_SET_MODE, UEA_START_RESET, 0, NULL);
/* original driver use 200ms, but windows driver use 100ms */
@@ -1824,7 +1846,7 @@ static int uea_start_reset(struct uea_softc *sc)
uea_request(sc, UEA_SET_MODE, UEA_END_RESET, 0, NULL);
if (UEA_CHIP_VERSION(sc) != EAGLE_IV) {
- /* clear tx and rx mailboxes */
+ /* clear tx and rx mailboxes */
uea_request(sc, UEA_SET_2183_DATA, UEA_MPTX_MAILBOX, 2, &zero);
uea_request(sc, UEA_SET_2183_DATA, UEA_MPRX_MAILBOX, 2, &zero);
uea_request(sc, UEA_SET_2183_DATA, UEA_SWAP_MAILBOX, 2, &zero);
@@ -1835,9 +1857,11 @@ static int uea_start_reset(struct uea_softc *sc)
return ret;
if (UEA_CHIP_VERSION(sc) == EAGLE_IV)
- sc->cmv_dsc.e4.function = E4_MAKEFUNCTION(E4_ADSLDIRECTIVE, E4_MODEMREADY, 1);
+ sc->cmv_dsc.e4.function = E4_MAKEFUNCTION(E4_ADSLDIRECTIVE,
+ E4_MODEMREADY, 1);
else
- sc->cmv_dsc.e1.function = E1_MAKEFUNCTION(E1_ADSLDIRECTIVE, E1_MODEMREADY);
+ sc->cmv_dsc.e1.function = E1_MAKEFUNCTION(E1_ADSLDIRECTIVE,
+ E1_MODEMREADY);
/* demask interrupt */
sc->booting = 0;
@@ -1937,7 +1961,8 @@ static int load_XILINX_firmware(struct uea_softc *sc)
value = 0;
ret = uea_send_modem_cmd(sc->usb_dev, 0xe, 1, &value);
if (ret < 0)
- uea_err(sc->usb_dev, "elsa de-assert failed with error %d\n", ret);
+ uea_err(sc->usb_dev, "elsa de-assert failed with error"
+ " %d\n", ret);
err1:
release_firmware(fw_entry);
@@ -1966,13 +1991,15 @@ static void uea_dispatch_cmv_e1(struct uea_softc *sc, struct intr_pkt *intr)
if (UEA_CHIP_VERSION(sc) == ADI930
&& cmv->bFunction == E1_MAKEFUNCTION(2, 2)) {
cmv->wIndex = cpu_to_le16(dsc->idx);
- put_unaligned_le32(dsc->address, &cmv->dwSymbolicAddress);
+ put_unaligned_le32(dsc->address,
+ &cmv->dwSymbolicAddress);
cmv->wOffsetAddress = cpu_to_le16(dsc->offset);
} else
goto bad2;
}
- if (cmv->bFunction == E1_MAKEFUNCTION(E1_ADSLDIRECTIVE, E1_MODEMREADY)) {
+ if (cmv->bFunction == E1_MAKEFUNCTION(E1_ADSLDIRECTIVE,
+ E1_MODEMREADY)) {
wake_up_cmv_ack(sc);
uea_leaves(INS_TO_USBDEV(sc));
return;
@@ -2021,7 +2048,8 @@ static void uea_dispatch_cmv_e4(struct uea_softc *sc, struct intr_pkt *intr)
if (be16_to_cpu(cmv->wFunction) != dsc->function)
goto bad2;
- if (be16_to_cpu(cmv->wFunction) == E4_MAKEFUNCTION(E4_ADSLDIRECTIVE, E4_MODEMREADY, 1)) {
+ if (be16_to_cpu(cmv->wFunction) == E4_MAKEFUNCTION(E4_ADSLDIRECTIVE,
+ E4_MODEMREADY, 1)) {
wake_up_cmv_ack(sc);
uea_leaves(INS_TO_USBDEV(sc));
return;
@@ -2048,14 +2076,16 @@ bad2:
return;
}
-static void uea_schedule_load_page_e1(struct uea_softc *sc, struct intr_pkt *intr)
+static void uea_schedule_load_page_e1(struct uea_softc *sc,
+ struct intr_pkt *intr)
{
sc->pageno = intr->e1_bSwapPageNo;
sc->ovl = intr->e1_bOvl >> 4 | intr->e1_bOvl << 4;
queue_work(sc->work_q, &sc->task);
}
-static void uea_schedule_load_page_e4(struct uea_softc *sc, struct intr_pkt *intr)
+static void uea_schedule_load_page_e4(struct uea_softc *sc,
+ struct intr_pkt *intr)
{
sc->pageno = intr->e4_bSwapPageNo;
queue_work(sc->work_q, &sc->task);
@@ -2263,8 +2293,8 @@ out:
static DEVICE_ATTR(stat_status, S_IWUGO | S_IRUGO, read_status, reboot);
-static ssize_t read_human_status(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t read_human_status(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int ret = -ENODEV;
int modem_state;
@@ -2289,7 +2319,7 @@ static ssize_t read_human_status(struct device *dev, struct device_attribute *at
case 0xa:
modem_state = 1;
break;
- case 0x7: /* operational */
+ case 0x7: /* operational */
modem_state = 2;
break;
case 0x2: /* fail ... */
@@ -2324,7 +2354,8 @@ out:
return ret;
}
-static DEVICE_ATTR(stat_human_status, S_IWUGO | S_IRUGO, read_human_status, NULL);
+static DEVICE_ATTR(stat_human_status, S_IWUGO | S_IRUGO,
+ read_human_status, NULL);
static ssize_t read_delin(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -2358,25 +2389,25 @@ out:
static DEVICE_ATTR(stat_delin, S_IWUGO | S_IRUGO, read_delin, NULL);
-#define UEA_ATTR(name, reset) \
+#define UEA_ATTR(name, reset) \
\
-static ssize_t read_##name(struct device *dev, \
+static ssize_t read_##name(struct device *dev, \
struct device_attribute *attr, char *buf) \
-{ \
- int ret = -ENODEV; \
- struct uea_softc *sc; \
- \
- mutex_lock(&uea_mutex); \
+{ \
+ int ret = -ENODEV; \
+ struct uea_softc *sc; \
+ \
+ mutex_lock(&uea_mutex); \
sc = dev_to_uea(dev); \
- if (!sc) \
- goto out; \
+ if (!sc) \
+ goto out; \
ret = snprintf(buf, 10, "%08x\n", sc->stats.phy.name); \
if (reset) \
sc->stats.phy.name = 0; \
-out: \
- mutex_unlock(&uea_mutex); \
- return ret; \
-} \
+out: \
+ mutex_unlock(&uea_mutex); \
+ return ret; \
+} \
\
static DEVICE_ATTR(stat_##name, S_IRUGO, read_##name, NULL)
@@ -2527,12 +2558,14 @@ static int uea_bind(struct usbatm_data *usbatm, struct usb_interface *intf,
else if (sc->driver_info & AUTO_ANNEX_B)
sc->annex = ANNEXB;
else
- sc->annex = (le16_to_cpu(sc->usb_dev->descriptor.bcdDevice) & 0x80)?ANNEXB:ANNEXA;
+ sc->annex = (le16_to_cpu
+ (sc->usb_dev->descriptor.bcdDevice) & 0x80) ? ANNEXB : ANNEXA;
alt = altsetting[sc->modem_index];
/* ADI930 don't support iso */
if (UEA_CHIP_VERSION(id) != ADI930 && alt > 0) {
- if (alt <= 8 && usb_set_interface(usb, UEA_DS_IFACE_NO, alt) == 0) {
+ if (alt <= 8 &&
+ usb_set_interface(usb, UEA_DS_IFACE_NO, alt) == 0) {
uea_dbg(usb, "set alternate %u for 2 interface\n", alt);
uea_info(usb, "using iso mode\n");
usbatm->flags |= UDSL_USE_ISOC | UDSL_IGNORE_EILSEQ;
@@ -2621,40 +2654,74 @@ static void uea_disconnect(struct usb_interface *intf)
* List of supported VID/PID
*/
static const struct usb_device_id uea_ids[] = {
- {USB_DEVICE(ANALOG_VID, ADI930_PID_PREFIRM), .driver_info = ADI930 | PREFIRM},
- {USB_DEVICE(ANALOG_VID, ADI930_PID_PSTFIRM), .driver_info = ADI930 | PSTFIRM},
- {USB_DEVICE(ANALOG_VID, EAGLE_I_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM},
- {USB_DEVICE(ANALOG_VID, EAGLE_I_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM},
- {USB_DEVICE(ANALOG_VID, EAGLE_II_PID_PREFIRM), .driver_info = EAGLE_II | PREFIRM},
- {USB_DEVICE(ANALOG_VID, EAGLE_II_PID_PSTFIRM), .driver_info = EAGLE_II | PSTFIRM},
- {USB_DEVICE(ANALOG_VID, EAGLE_IIC_PID_PREFIRM), .driver_info = EAGLE_II | PREFIRM},
- {USB_DEVICE(ANALOG_VID, EAGLE_IIC_PID_PSTFIRM), .driver_info = EAGLE_II | PSTFIRM},
- {USB_DEVICE(ANALOG_VID, EAGLE_III_PID_PREFIRM), .driver_info = EAGLE_III | PREFIRM},
- {USB_DEVICE(ANALOG_VID, EAGLE_III_PID_PSTFIRM), .driver_info = EAGLE_III | PSTFIRM},
- {USB_DEVICE(ANALOG_VID, EAGLE_IV_PID_PREFIRM), .driver_info = EAGLE_IV | PREFIRM},
- {USB_DEVICE(ANALOG_VID, EAGLE_IV_PID_PSTFIRM), .driver_info = EAGLE_IV | PSTFIRM},
- {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_A_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM},
- {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_A_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A},
- {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_B_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM},
- {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_B_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B},
- {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_A_PID_PREFIRM), .driver_info = EAGLE_II | PREFIRM},
- {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_A_PID_PSTFIRM), .driver_info = EAGLE_II | PSTFIRM | AUTO_ANNEX_A},
- {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_B_PID_PREFIRM), .driver_info = EAGLE_II | PREFIRM},
- {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_B_PID_PSTFIRM), .driver_info = EAGLE_II | PSTFIRM | AUTO_ANNEX_B},
- {USB_DEVICE(ELSA_VID, ELSA_PID_PREFIRM), .driver_info = ADI930 | PREFIRM},
- {USB_DEVICE(ELSA_VID, ELSA_PID_PSTFIRM), .driver_info = ADI930 | PSTFIRM},
- {USB_DEVICE(ELSA_VID, ELSA_PID_A_PREFIRM), .driver_info = ADI930 | PREFIRM},
- {USB_DEVICE(ELSA_VID, ELSA_PID_A_PSTFIRM), .driver_info = ADI930 | PSTFIRM | AUTO_ANNEX_A},
- {USB_DEVICE(ELSA_VID, ELSA_PID_B_PREFIRM), .driver_info = ADI930 | PREFIRM},
- {USB_DEVICE(ELSA_VID, ELSA_PID_B_PSTFIRM), .driver_info = ADI930 | PSTFIRM | AUTO_ANNEX_B},
- {USB_DEVICE(USR_VID, MILLER_A_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM},
- {USB_DEVICE(USR_VID, MILLER_A_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A},
- {USB_DEVICE(USR_VID, MILLER_B_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM},
- {USB_DEVICE(USR_VID, MILLER_B_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B},
- {USB_DEVICE(USR_VID, HEINEKEN_A_PID_PREFIRM),.driver_info = EAGLE_I | PREFIRM},
- {USB_DEVICE(USR_VID, HEINEKEN_A_PID_PSTFIRM),.driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A},
- {USB_DEVICE(USR_VID, HEINEKEN_B_PID_PREFIRM),.driver_info = EAGLE_I | PREFIRM},
- {USB_DEVICE(USR_VID, HEINEKEN_B_PID_PSTFIRM),.driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B},
+ {USB_DEVICE(ANALOG_VID, ADI930_PID_PREFIRM),
+ .driver_info = ADI930 | PREFIRM},
+ {USB_DEVICE(ANALOG_VID, ADI930_PID_PSTFIRM),
+ .driver_info = ADI930 | PSTFIRM},
+ {USB_DEVICE(ANALOG_VID, EAGLE_I_PID_PREFIRM),
+ .driver_info = EAGLE_I | PREFIRM},
+ {USB_DEVICE(ANALOG_VID, EAGLE_I_PID_PSTFIRM),
+ .driver_info = EAGLE_I | PSTFIRM},
+ {USB_DEVICE(ANALOG_VID, EAGLE_II_PID_PREFIRM),
+ .driver_info = EAGLE_II | PREFIRM},
+ {USB_DEVICE(ANALOG_VID, EAGLE_II_PID_PSTFIRM),
+ .driver_info = EAGLE_II | PSTFIRM},
+ {USB_DEVICE(ANALOG_VID, EAGLE_IIC_PID_PREFIRM),
+ .driver_info = EAGLE_II | PREFIRM},
+ {USB_DEVICE(ANALOG_VID, EAGLE_IIC_PID_PSTFIRM),
+ .driver_info = EAGLE_II | PSTFIRM},
+ {USB_DEVICE(ANALOG_VID, EAGLE_III_PID_PREFIRM),
+ .driver_info = EAGLE_III | PREFIRM},
+ {USB_DEVICE(ANALOG_VID, EAGLE_III_PID_PSTFIRM),
+ .driver_info = EAGLE_III | PSTFIRM},
+ {USB_DEVICE(ANALOG_VID, EAGLE_IV_PID_PREFIRM),
+ .driver_info = EAGLE_IV | PREFIRM},
+ {USB_DEVICE(ANALOG_VID, EAGLE_IV_PID_PSTFIRM),
+ .driver_info = EAGLE_IV | PSTFIRM},
+ {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_A_PID_PREFIRM),
+ .driver_info = EAGLE_I | PREFIRM},
+ {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_A_PID_PSTFIRM),
+ .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A},
+ {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_B_PID_PREFIRM),
+ .driver_info = EAGLE_I | PREFIRM},
+ {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_B_PID_PSTFIRM),
+ .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B},
+ {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_A_PID_PREFIRM),
+ .driver_info = EAGLE_II | PREFIRM},
+ {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_A_PID_PSTFIRM),
+ .driver_info = EAGLE_II | PSTFIRM | AUTO_ANNEX_A},
+ {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_B_PID_PREFIRM),
+ .driver_info = EAGLE_II | PREFIRM},
+ {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_B_PID_PSTFIRM),
+ .driver_info = EAGLE_II | PSTFIRM | AUTO_ANNEX_B},
+ {USB_DEVICE(ELSA_VID, ELSA_PID_PREFIRM),
+ .driver_info = ADI930 | PREFIRM},
+ {USB_DEVICE(ELSA_VID, ELSA_PID_PSTFIRM),
+ .driver_info = ADI930 | PSTFIRM},
+ {USB_DEVICE(ELSA_VID, ELSA_PID_A_PREFIRM),
+ .driver_info = ADI930 | PREFIRM},
+ {USB_DEVICE(ELSA_VID, ELSA_PID_A_PSTFIRM),
+ .driver_info = ADI930 | PSTFIRM | AUTO_ANNEX_A},
+ {USB_DEVICE(ELSA_VID, ELSA_PID_B_PREFIRM),
+ .driver_info = ADI930 | PREFIRM},
+ {USB_DEVICE(ELSA_VID, ELSA_PID_B_PSTFIRM),
+ .driver_info = ADI930 | PSTFIRM | AUTO_ANNEX_B},
+ {USB_DEVICE(USR_VID, MILLER_A_PID_PREFIRM),
+ .driver_info = EAGLE_I | PREFIRM},
+ {USB_DEVICE(USR_VID, MILLER_A_PID_PSTFIRM),
+ .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A},
+ {USB_DEVICE(USR_VID, MILLER_B_PID_PREFIRM),
+ .driver_info = EAGLE_I | PREFIRM},
+ {USB_DEVICE(USR_VID, MILLER_B_PID_PSTFIRM),
+ .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B},
+ {USB_DEVICE(USR_VID, HEINEKEN_A_PID_PREFIRM),
+ .driver_info = EAGLE_I | PREFIRM},
+ {USB_DEVICE(USR_VID, HEINEKEN_A_PID_PSTFIRM),
+ .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A},
+ {USB_DEVICE(USR_VID, HEINEKEN_B_PID_PREFIRM),
+ .driver_info = EAGLE_I | PREFIRM},
+ {USB_DEVICE(USR_VID, HEINEKEN_B_PID_PSTFIRM),
+ .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B},
{}
};
diff --git a/drivers/usb/c67x00/c67x00-hcd.h b/drivers/usb/c67x00/c67x00-hcd.h
index e8c6d94b251..74e44621e31 100644
--- a/drivers/usb/c67x00/c67x00-hcd.h
+++ b/drivers/usb/c67x00/c67x00-hcd.h
@@ -28,7 +28,7 @@
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/usb.h>
-#include "../core/hcd.h"
+#include <linux/usb/hcd.h>
#include "c67x00.h"
/*
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 5e1a253b08a..0c2f14ff969 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -892,7 +892,7 @@ static void acm_write_buffers_free(struct acm *acm)
struct usb_device *usb_dev = interface_to_usbdev(acm->control);
for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++)
- usb_buffer_free(usb_dev, acm->writesize, wb->buf, wb->dmah);
+ usb_free_coherent(usb_dev, acm->writesize, wb->buf, wb->dmah);
}
static void acm_read_buffers_free(struct acm *acm)
@@ -901,8 +901,8 @@ static void acm_read_buffers_free(struct acm *acm)
int i, n = acm->rx_buflimit;
for (i = 0; i < n; i++)
- usb_buffer_free(usb_dev, acm->readsize,
- acm->rb[i].base, acm->rb[i].dma);
+ usb_free_coherent(usb_dev, acm->readsize,
+ acm->rb[i].base, acm->rb[i].dma);
}
/* Little helper: write buffers allocate */
@@ -912,13 +912,13 @@ static int acm_write_buffers_alloc(struct acm *acm)
struct acm_wb *wb;
for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++) {
- wb->buf = usb_buffer_alloc(acm->dev, acm->writesize, GFP_KERNEL,
+ wb->buf = usb_alloc_coherent(acm->dev, acm->writesize, GFP_KERNEL,
&wb->dmah);
if (!wb->buf) {
while (i != 0) {
--i;
--wb;
- usb_buffer_free(acm->dev, acm->writesize,
+ usb_free_coherent(acm->dev, acm->writesize,
wb->buf, wb->dmah);
}
return -ENOMEM;
@@ -1177,7 +1177,7 @@ made_compressed_probe:
tty_port_init(&acm->port);
acm->port.ops = &acm_port_ops;
- buf = usb_buffer_alloc(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
+ buf = usb_alloc_coherent(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
if (!buf) {
dev_dbg(&intf->dev, "out of memory (ctrl buffer alloc)\n");
goto alloc_fail2;
@@ -1210,11 +1210,11 @@ made_compressed_probe:
for (i = 0; i < num_rx_buf; i++) {
struct acm_rb *rb = &(acm->rb[i]);
- rb->base = usb_buffer_alloc(acm->dev, readsize,
+ rb->base = usb_alloc_coherent(acm->dev, readsize,
GFP_KERNEL, &rb->dma);
if (!rb->base) {
dev_dbg(&intf->dev,
- "out of memory (read bufs usb_buffer_alloc)\n");
+ "out of memory (read bufs usb_alloc_coherent)\n");
goto alloc_fail7;
}
}
@@ -1306,7 +1306,7 @@ alloc_fail7:
alloc_fail5:
acm_write_buffers_free(acm);
alloc_fail4:
- usb_buffer_free(usb_dev, ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
+ usb_free_coherent(usb_dev, ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
alloc_fail2:
kfree(acm);
alloc_fail:
@@ -1356,8 +1356,8 @@ static void acm_disconnect(struct usb_interface *intf)
stop_data_traffic(acm);
acm_write_buffers_free(acm);
- usb_buffer_free(usb_dev, acm->ctrlsize, acm->ctrl_buffer,
- acm->ctrl_dma);
+ usb_free_coherent(usb_dev, acm->ctrlsize, acm->ctrl_buffer,
+ acm->ctrl_dma);
acm_read_buffers_free(acm);
if (!acm->combined_interfaces)
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 4a8e87ec6ce..5eeb570b9a6 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -124,8 +124,8 @@ struct acm {
unsigned char clocal; /* termios CLOCAL */
unsigned int ctrl_caps; /* control capabilities from the class specific header */
unsigned int susp_count; /* number of suspended interfaces */
- int combined_interfaces:1; /* control and data collapsed */
- int is_int_ep:1; /* interrupt endpoints contrary to spec used */
+ unsigned int combined_interfaces:1; /* control and data collapsed */
+ unsigned int is_int_ep:1; /* interrupt endpoints contrary to spec used */
u8 bInterval;
struct acm_wb *delayed_wb; /* write queued for a device about to be woken */
};
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 189141ca4e0..094c76b5de1 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -276,14 +276,14 @@ static void free_urbs(struct wdm_device *desc)
static void cleanup(struct wdm_device *desc)
{
- usb_buffer_free(interface_to_usbdev(desc->intf),
- desc->wMaxPacketSize,
- desc->sbuf,
- desc->validity->transfer_dma);
- usb_buffer_free(interface_to_usbdev(desc->intf),
- desc->wMaxCommand,
- desc->inbuf,
- desc->response->transfer_dma);
+ usb_free_coherent(interface_to_usbdev(desc->intf),
+ desc->wMaxPacketSize,
+ desc->sbuf,
+ desc->validity->transfer_dma);
+ usb_free_coherent(interface_to_usbdev(desc->intf),
+ desc->wMaxCommand,
+ desc->inbuf,
+ desc->response->transfer_dma);
kfree(desc->orq);
kfree(desc->irq);
kfree(desc->ubuf);
@@ -705,17 +705,17 @@ next_desc:
if (!desc->ubuf)
goto err;
- desc->sbuf = usb_buffer_alloc(interface_to_usbdev(intf),
+ desc->sbuf = usb_alloc_coherent(interface_to_usbdev(intf),
desc->wMaxPacketSize,
GFP_KERNEL,
&desc->validity->transfer_dma);
if (!desc->sbuf)
goto err;
- desc->inbuf = usb_buffer_alloc(interface_to_usbdev(intf),
- desc->bMaxPacketSize0,
- GFP_KERNEL,
- &desc->response->transfer_dma);
+ desc->inbuf = usb_alloc_coherent(interface_to_usbdev(intf),
+ desc->bMaxPacketSize0,
+ GFP_KERNEL,
+ &desc->response->transfer_dma);
if (!desc->inbuf)
goto err2;
@@ -742,15 +742,15 @@ out:
return rv;
err3:
usb_set_intfdata(intf, NULL);
- usb_buffer_free(interface_to_usbdev(desc->intf),
- desc->bMaxPacketSize0,
+ usb_free_coherent(interface_to_usbdev(desc->intf),
+ desc->bMaxPacketSize0,
desc->inbuf,
desc->response->transfer_dma);
err2:
- usb_buffer_free(interface_to_usbdev(desc->intf),
- desc->wMaxPacketSize,
- desc->sbuf,
- desc->validity->transfer_dma);
+ usb_free_coherent(interface_to_usbdev(desc->intf),
+ desc->wMaxPacketSize,
+ desc->sbuf,
+ desc->validity->transfer_dma);
err:
free_urbs(desc);
kfree(desc->ubuf);
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 93b5f85d7ce..2250095db0a 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -27,7 +27,7 @@
* v0.11 - add proto_bias option (Pete Zaitcev)
* v0.12 - add hpoj.sourceforge.net ioctls (David Paschal)
* v0.13 - alloc space for statusbuf (<status> not on stack);
- * use usb_buffer_alloc() for read buf & write buf;
+ * use usb_alloc_coherent() for read buf & write buf;
* none - Maintained in Linux kernel after v0.13
*/
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index 3ba2fff7149..2c6965484fe 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -14,7 +14,7 @@
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/usb.h>
-#include "hcd.h"
+#include <linux/usb/hcd.h>
/*
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 0d3af6a6ee4..83126b03e7c 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -1,12 +1,14 @@
#include <linux/usb.h>
#include <linux/usb/ch9.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/quirks.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <asm/byteorder.h>
#include "usb.h"
-#include "hcd.h"
+
#define USB_MAXALTSETTING 128 /* Hard limit */
#define USB_MAXENDPOINTS 30 /* Hard limit */
@@ -19,32 +21,6 @@ static inline const char *plural(int n)
return (n == 1 ? "" : "s");
}
-/* FIXME: this is a kludge */
-static int find_next_descriptor_more(unsigned char *buffer, int size,
- int dt1, int dt2, int dt3, int *num_skipped)
-{
- struct usb_descriptor_header *h;
- int n = 0;
- unsigned char *buffer0 = buffer;
-
- /* Find the next descriptor of type dt1 or dt2 or dt3 */
- while (size > 0) {
- h = (struct usb_descriptor_header *) buffer;
- if (h->bDescriptorType == dt1 || h->bDescriptorType == dt2 ||
- h->bDescriptorType == dt3)
- break;
- buffer += h->bLength;
- size -= h->bLength;
- ++n;
- }
-
- /* Store the number of descriptors skipped and return the
- * number of bytes skipped */
- if (num_skipped)
- *num_skipped = n;
- return buffer - buffer0;
-}
-
static int find_next_descriptor(unsigned char *buffer, int size,
int dt1, int dt2, int *num_skipped)
{
@@ -69,47 +45,41 @@ static int find_next_descriptor(unsigned char *buffer, int size,
return buffer - buffer0;
}
-static int usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
+static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
int inum, int asnum, struct usb_host_endpoint *ep,
- int num_ep, unsigned char *buffer, int size)
+ unsigned char *buffer, int size)
{
- unsigned char *buffer_start = buffer;
- struct usb_ss_ep_comp_descriptor *desc;
- int retval;
- int num_skipped;
+ struct usb_ss_ep_comp_descriptor *desc;
int max_tx;
- int i;
+ /* The SuperSpeed endpoint companion descriptor is supposed to
+ * be the first thing immediately following the endpoint descriptor.
+ */
desc = (struct usb_ss_ep_comp_descriptor *) buffer;
- if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) {
+ if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP ||
+ size < USB_DT_SS_EP_COMP_SIZE) {
dev_warn(ddev, "No SuperSpeed endpoint companion for config %d "
" interface %d altsetting %d ep %d: "
"using minimum values\n",
cfgno, inum, asnum, ep->desc.bEndpointAddress);
- /*
- * The next descriptor is for an Endpoint or Interface,
- * no extra descriptors to copy into the companion structure,
- * and we didn't eat up any of the buffer.
+
+ /* Fill in some default values.
+ * Leave bmAttributes as zero, which will mean no streams for
+ * bulk, and isoc won't support multiple bursts of packets.
+ * With bursts of only one packet, and a Mult of 1, the max
+ * amount of data moved per endpoint service interval is one
+ * packet.
*/
- return 0;
+ ep->ss_ep_comp.bLength = USB_DT_SS_EP_COMP_SIZE;
+ ep->ss_ep_comp.bDescriptorType = USB_DT_SS_ENDPOINT_COMP;
+ if (usb_endpoint_xfer_isoc(&ep->desc) ||
+ usb_endpoint_xfer_int(&ep->desc))
+ ep->ss_ep_comp.wBytesPerInterval =
+ ep->desc.wMaxPacketSize;
+ return;
}
- memcpy(&ep->ss_ep_comp->desc, desc, USB_DT_SS_EP_COMP_SIZE);
- desc = &ep->ss_ep_comp->desc;
- buffer += desc->bLength;
- size -= desc->bLength;
- /* Eat up the other descriptors we don't care about */
- ep->ss_ep_comp->extra = buffer;
- i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT,
- USB_DT_INTERFACE, &num_skipped);
- ep->ss_ep_comp->extralen = i;
- buffer += i;
- size -= i;
- retval = buffer - buffer_start;
- if (num_skipped > 0)
- dev_dbg(ddev, "skipped %d descriptor%s after %s\n",
- num_skipped, plural(num_skipped),
- "SuperSpeed endpoint companion");
+ memcpy(&ep->ss_ep_comp, desc, USB_DT_SS_EP_COMP_SIZE);
/* Check the various values */
if (usb_endpoint_xfer_control(&ep->desc) && desc->bMaxBurst != 0) {
@@ -117,47 +87,48 @@ static int usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
"config %d interface %d altsetting %d ep %d: "
"setting to zero\n", desc->bMaxBurst,
cfgno, inum, asnum, ep->desc.bEndpointAddress);
- desc->bMaxBurst = 0;
- }
- if (desc->bMaxBurst > 15) {
+ ep->ss_ep_comp.bMaxBurst = 0;
+ } else if (desc->bMaxBurst > 15) {
dev_warn(ddev, "Endpoint with bMaxBurst = %d in "
"config %d interface %d altsetting %d ep %d: "
"setting to 15\n", desc->bMaxBurst,
cfgno, inum, asnum, ep->desc.bEndpointAddress);
- desc->bMaxBurst = 15;
+ ep->ss_ep_comp.bMaxBurst = 15;
}
- if ((usb_endpoint_xfer_control(&ep->desc) || usb_endpoint_xfer_int(&ep->desc))
- && desc->bmAttributes != 0) {
+
+ if ((usb_endpoint_xfer_control(&ep->desc) ||
+ usb_endpoint_xfer_int(&ep->desc)) &&
+ desc->bmAttributes != 0) {
dev_warn(ddev, "%s endpoint with bmAttributes = %d in "
"config %d interface %d altsetting %d ep %d: "
"setting to zero\n",
usb_endpoint_xfer_control(&ep->desc) ? "Control" : "Bulk",
desc->bmAttributes,
cfgno, inum, asnum, ep->desc.bEndpointAddress);
- desc->bmAttributes = 0;
- }
- if (usb_endpoint_xfer_bulk(&ep->desc) && desc->bmAttributes > 16) {
+ ep->ss_ep_comp.bmAttributes = 0;
+ } else if (usb_endpoint_xfer_bulk(&ep->desc) &&
+ desc->bmAttributes > 16) {
dev_warn(ddev, "Bulk endpoint with more than 65536 streams in "
"config %d interface %d altsetting %d ep %d: "
"setting to max\n",
cfgno, inum, asnum, ep->desc.bEndpointAddress);
- desc->bmAttributes = 16;
- }
- if (usb_endpoint_xfer_isoc(&ep->desc) && desc->bmAttributes > 2) {
+ ep->ss_ep_comp.bmAttributes = 16;
+ } else if (usb_endpoint_xfer_isoc(&ep->desc) &&
+ desc->bmAttributes > 2) {
dev_warn(ddev, "Isoc endpoint has Mult of %d in "
"config %d interface %d altsetting %d ep %d: "
"setting to 3\n", desc->bmAttributes + 1,
cfgno, inum, asnum, ep->desc.bEndpointAddress);
- desc->bmAttributes = 2;
+ ep->ss_ep_comp.bmAttributes = 2;
}
- if (usb_endpoint_xfer_isoc(&ep->desc)) {
+
+ if (usb_endpoint_xfer_isoc(&ep->desc))
max_tx = ep->desc.wMaxPacketSize * (desc->bMaxBurst + 1) *
(desc->bmAttributes + 1);
- } else if (usb_endpoint_xfer_int(&ep->desc)) {
+ else if (usb_endpoint_xfer_int(&ep->desc))
max_tx = ep->desc.wMaxPacketSize * (desc->bMaxBurst + 1);
- } else {
- goto valid;
- }
+ else
+ max_tx = 999999;
if (desc->wBytesPerInterval > max_tx) {
dev_warn(ddev, "%s endpoint with wBytesPerInterval of %d in "
"config %d interface %d altsetting %d ep %d: "
@@ -166,10 +137,8 @@ static int usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
desc->wBytesPerInterval,
cfgno, inum, asnum, ep->desc.bEndpointAddress,
max_tx);
- desc->wBytesPerInterval = max_tx;
+ ep->ss_ep_comp.wBytesPerInterval = max_tx;
}
-valid:
- return retval;
}
static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
@@ -291,61 +260,19 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
cfgno, inum, asnum, d->bEndpointAddress,
maxp);
}
- /* Allocate room for and parse any SS endpoint companion descriptors */
- if (to_usb_device(ddev)->speed == USB_SPEED_SUPER) {
- endpoint->extra = buffer;
- i = find_next_descriptor_more(buffer, size, USB_DT_SS_ENDPOINT_COMP,
- USB_DT_ENDPOINT, USB_DT_INTERFACE, &n);
- endpoint->extralen = i;
- buffer += i;
- size -= i;
-
- /* Allocate space for the SS endpoint companion descriptor */
- endpoint->ss_ep_comp = kzalloc(sizeof(struct usb_host_ss_ep_comp),
- GFP_KERNEL);
- if (!endpoint->ss_ep_comp)
- return -ENOMEM;
- /* Fill in some default values (may be overwritten later) */
- endpoint->ss_ep_comp->desc.bLength = USB_DT_SS_EP_COMP_SIZE;
- endpoint->ss_ep_comp->desc.bDescriptorType = USB_DT_SS_ENDPOINT_COMP;
- endpoint->ss_ep_comp->desc.bMaxBurst = 0;
- /*
- * Leave bmAttributes as zero, which will mean no streams for
- * bulk, and isoc won't support multiple bursts of packets.
- * With bursts of only one packet, and a Mult of 1, the max
- * amount of data moved per endpoint service interval is one
- * packet.
- */
- if (usb_endpoint_xfer_isoc(&endpoint->desc) ||
- usb_endpoint_xfer_int(&endpoint->desc))
- endpoint->ss_ep_comp->desc.wBytesPerInterval =
- endpoint->desc.wMaxPacketSize;
-
- if (size > 0) {
- retval = usb_parse_ss_endpoint_companion(ddev, cfgno,
- inum, asnum, endpoint, num_ep, buffer,
- size);
- if (retval >= 0) {
- buffer += retval;
- retval = buffer - buffer0;
- }
- } else {
- dev_warn(ddev, "config %d interface %d altsetting %d "
- "endpoint 0x%X has no "
- "SuperSpeed companion descriptor\n",
- cfgno, inum, asnum, d->bEndpointAddress);
- retval = buffer - buffer0;
- }
- } else {
- /* Skip over any Class Specific or Vendor Specific descriptors;
- * find the next endpoint or interface descriptor */
- endpoint->extra = buffer;
- i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT,
- USB_DT_INTERFACE, &n);
- endpoint->extralen = i;
- retval = buffer - buffer0 + i;
- }
+ /* Parse a possible SuperSpeed endpoint companion descriptor */
+ if (to_usb_device(ddev)->speed == USB_SPEED_SUPER)
+ usb_parse_ss_endpoint_companion(ddev, cfgno,
+ inum, asnum, endpoint, buffer, size);
+
+ /* Skip over any Class Specific or Vendor Specific descriptors;
+ * find the next endpoint or interface descriptor */
+ endpoint->extra = buffer;
+ i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT,
+ USB_DT_INTERFACE, &n);
+ endpoint->extralen = i;
+ retval = buffer - buffer0 + i;
if (n > 0)
dev_dbg(ddev, "skipped %d descriptor%s after %s\n",
n, plural(n), "endpoint");
@@ -478,9 +405,10 @@ skip_to_next_interface_descriptor:
return buffer - buffer0 + i;
}
-static int usb_parse_configuration(struct device *ddev, int cfgidx,
+static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
struct usb_host_config *config, unsigned char *buffer, int size)
{
+ struct device *ddev = &dev->dev;
unsigned char *buffer0 = buffer;
int cfgno;
int nintf, nintf_orig;
@@ -549,6 +477,16 @@ static int usb_parse_configuration(struct device *ddev, int cfgidx,
}
inum = d->bInterfaceNumber;
+
+ if ((dev->quirks & USB_QUIRK_HONOR_BNUMINTERFACES) &&
+ n >= nintf_orig) {
+ dev_warn(ddev, "config %d has more interface "
+ "descriptors, than it declares in "
+ "bNumInterfaces, ignoring interface "
+ "number: %d\n", cfgno, inum);
+ continue;
+ }
+
if (inum >= nintf_orig)
dev_warn(ddev, "config %d has an invalid "
"interface number: %d but max is %d\n",
@@ -722,7 +660,6 @@ int usb_get_configuration(struct usb_device *dev)
int ncfg = dev->descriptor.bNumConfigurations;
int result = 0;
unsigned int cfgno, length;
- unsigned char *buffer;
unsigned char *bigbuffer;
struct usb_config_descriptor *desc;
@@ -751,17 +688,16 @@ int usb_get_configuration(struct usb_device *dev)
if (!dev->rawdescriptors)
goto err2;
- buffer = kmalloc(USB_DT_CONFIG_SIZE, GFP_KERNEL);
- if (!buffer)
+ desc = kmalloc(USB_DT_CONFIG_SIZE, GFP_KERNEL);
+ if (!desc)
goto err2;
- desc = (struct usb_config_descriptor *)buffer;
result = 0;
for (; cfgno < ncfg; cfgno++) {
/* We grab just the first descriptor so we know how long
* the whole configuration is */
result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
- buffer, USB_DT_CONFIG_SIZE);
+ desc, USB_DT_CONFIG_SIZE);
if (result < 0) {
dev_err(ddev, "unable to read config index %d "
"descriptor/%s: %d\n", cfgno, "start", result);
@@ -800,7 +736,7 @@ int usb_get_configuration(struct usb_device *dev)
dev->rawdescriptors[cfgno] = bigbuffer;
- result = usb_parse_configuration(&dev->dev, cfgno,
+ result = usb_parse_configuration(dev, cfgno,
&dev->config[cfgno], bigbuffer, length);
if (result < 0) {
++cfgno;
@@ -810,7 +746,7 @@ int usb_get_configuration(struct usb_device *dev)
result = 0;
err:
- kfree(buffer);
+ kfree(desc);
out_not_authorized:
dev->descriptor.bNumConfigurations = cfgno;
err2:
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index 19bc03a9fec..3449742c00e 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -1,7 +1,8 @@
/*
* devices.c
* (C) Copyright 1999 Randy Dunlap.
- * (C) Copyright 1999,2000 Thomas Sailer <sailer@ife.ee.ethz.ch>. (proc file per device)
+ * (C) Copyright 1999,2000 Thomas Sailer <sailer@ife.ee.ethz.ch>.
+ * (proc file per device)
* (C) Copyright 1999 Deti Fliegl (new USB architecture)
*
* This program is free software; you can redistribute it and/or modify
@@ -55,11 +56,11 @@
#include <linux/usb.h>
#include <linux/smp_lock.h>
#include <linux/usbdevice_fs.h>
+#include <linux/usb/hcd.h>
#include <linux/mutex.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "usb.h"
-#include "hcd.h"
/* Define ALLOW_SERIAL_NUMBER if you want to see the serial number of devices */
#define ALLOW_SERIAL_NUMBER
@@ -138,8 +139,8 @@ struct class_info {
char *class_name;
};
-static const struct class_info clas_info[] =
-{ /* max. 5 chars. per name string */
+static const struct class_info clas_info[] = {
+ /* max. 5 chars. per name string */
{USB_CLASS_PER_INTERFACE, ">ifc"},
{USB_CLASS_AUDIO, "audio"},
{USB_CLASS_COMM, "comm."},
@@ -191,8 +192,10 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
if (speed == USB_SPEED_HIGH) {
switch (le16_to_cpu(desc->wMaxPacketSize) & (0x03 << 11)) {
- case 1 << 11: bandwidth = 2; break;
- case 2 << 11: bandwidth = 3; break;
+ case 1 << 11:
+ bandwidth = 2; break;
+ case 2 << 11:
+ bandwidth = 3; break;
}
}
@@ -200,7 +203,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
switch (usb_endpoint_type(desc)) {
case USB_ENDPOINT_XFER_CONTROL:
type = "Ctrl";
- if (speed == USB_SPEED_HIGH) /* uframes per NAK */
+ if (speed == USB_SPEED_HIGH) /* uframes per NAK */
interval = desc->bInterval;
else
interval = 0;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 3466fdc5bb1..c2f62a3993d 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -43,6 +43,7 @@
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usbdevice_fs.h>
+#include <linux/usb/hcd.h> /* for usbcore internals */
#include <linux/cdev.h>
#include <linux/notifier.h>
#include <linux/security.h>
@@ -50,9 +51,7 @@
#include <asm/byteorder.h>
#include <linux/moduleparam.h>
-#include "hcd.h" /* for usbcore internals */
#include "usb.h"
-#include "hub.h"
#define USB_MAXBUS 64
#define USB_DEVICE_MAX USB_MAXBUS * 128
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 2f3dc4cdf79..ded550eda5d 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -26,8 +26,9 @@
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/quirks.h>
+#include <linux/usb/hcd.h>
#include <linux/pm_runtime.h>
-#include "hcd.h"
+
#include "usb.h"
@@ -333,7 +334,8 @@ static int usb_probe_interface(struct device *dev)
usb_cancel_queued_reset(intf);
/* Unbound interfaces are always runtime-PM-disabled and -suspended */
- pm_runtime_disable(dev);
+ if (driver->supports_autosuspend)
+ pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
usb_autosuspend_device(udev);
@@ -388,7 +390,8 @@ static int usb_unbind_interface(struct device *dev)
intf->needs_remote_wakeup = 0;
/* Unbound interfaces are always runtime-PM-disabled and -suspended */
- pm_runtime_disable(dev);
+ if (driver->supports_autosuspend)
+ pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
/* Undo any residual pm_autopm_get_interface_* calls */
@@ -437,14 +440,17 @@ int usb_driver_claim_interface(struct usb_driver *driver,
iface->condition = USB_INTERFACE_BOUND;
- /* Claimed interfaces are initially inactive (suspended). They are
- * runtime-PM-enabled only if the driver has autosuspend support.
- * They are sensitive to their children's power states.
+ /* Claimed interfaces are initially inactive (suspended) and
+ * runtime-PM-enabled, but only if the driver has autosuspend
+ * support. Otherwise they are marked active, to prevent the
+ * device from being autosuspended, but left disabled. In either
+ * case they are sensitive to their children's power states.
*/
- pm_runtime_set_suspended(dev);
pm_suspend_ignore_children(dev, false);
if (driver->supports_autosuspend)
pm_runtime_enable(dev);
+ else
+ pm_runtime_set_active(dev);
/* if interface was already added, bind now; else let
* the future device_add() bind it, bypassing probe()
@@ -1355,13 +1361,9 @@ int usb_resume(struct device *dev, pm_message_t msg)
*
* The caller must hold @udev's device lock.
*/
-int usb_enable_autosuspend(struct usb_device *udev)
+void usb_enable_autosuspend(struct usb_device *udev)
{
- if (udev->autosuspend_disabled) {
- udev->autosuspend_disabled = 0;
- usb_autosuspend_device(udev);
- }
- return 0;
+ pm_runtime_allow(&udev->dev);
}
EXPORT_SYMBOL_GPL(usb_enable_autosuspend);
@@ -1374,16 +1376,9 @@ EXPORT_SYMBOL_GPL(usb_enable_autosuspend);
*
* The caller must hold @udev's device lock.
*/
-int usb_disable_autosuspend(struct usb_device *udev)
+void usb_disable_autosuspend(struct usb_device *udev)
{
- int rc = 0;
-
- if (!udev->autosuspend_disabled) {
- rc = usb_autoresume_device(udev);
- if (rc == 0)
- udev->autosuspend_disabled = 1;
- }
- return rc;
+ pm_runtime_forbid(&udev->dev);
}
EXPORT_SYMBOL_GPL(usb_disable_autosuspend);
@@ -1485,9 +1480,6 @@ int usb_autoresume_device(struct usb_device *udev)
* 0, a delayed autosuspend request for @intf's device is attempted. The
* attempt may fail (see autosuspend_check()).
*
- * If the driver has set @intf->needs_remote_wakeup then autosuspend will
- * take place only if the device's remote-wakeup facility is enabled.
- *
* This routine can run only in process context.
*/
void usb_autopm_put_interface(struct usb_interface *intf)
@@ -1530,7 +1522,7 @@ void usb_autopm_put_interface_async(struct usb_interface *intf)
atomic_dec(&intf->pm_usage_cnt);
pm_runtime_put_noidle(&intf->dev);
- if (!udev->autosuspend_disabled) {
+ if (udev->dev.power.runtime_auto) {
/* Optimization: Don't schedule a delayed autosuspend if
* the timer is already running and the expiration time
* wouldn't change.
@@ -1672,14 +1664,14 @@ EXPORT_SYMBOL_GPL(usb_autopm_get_interface_no_resume);
/* Internal routine to check whether we may autosuspend a device. */
static int autosuspend_check(struct usb_device *udev)
{
- int i;
+ int w, i;
struct usb_interface *intf;
unsigned long suspend_time, j;
/* Fail if autosuspend is disabled, or any interfaces are in use, or
* any interface drivers require remote wakeup but it isn't available.
*/
- udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
+ w = 0;
if (udev->actconfig) {
for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
intf = udev->actconfig->interface[i];
@@ -1693,12 +1685,7 @@ static int autosuspend_check(struct usb_device *udev)
continue;
if (atomic_read(&intf->dev.power.usage_count) > 0)
return -EBUSY;
- if (intf->needs_remote_wakeup &&
- !udev->do_remote_wakeup) {
- dev_dbg(&udev->dev, "remote wakeup needed "
- "for autosuspend\n");
- return -EOPNOTSUPP;
- }
+ w |= intf->needs_remote_wakeup;
/* Don't allow autosuspend if the device will need
* a reset-resume and any of its interface drivers
@@ -1714,6 +1701,11 @@ static int autosuspend_check(struct usb_device *udev)
}
}
}
+ if (w && !device_can_wakeup(&udev->dev)) {
+ dev_dbg(&udev->dev, "remote wakeup needed for autosuspend\n");
+ return -EOPNOTSUPP;
+ }
+ udev->do_remote_wakeup = w;
/* If everything is okay but the device hasn't been idle for long
* enough, queue a delayed autosuspend request.
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 2c95153c0f2..9a34ccb0a1c 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -18,8 +18,8 @@
*/
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include "usb.h"
-#include "hcd.h"
static inline const char *plural(int n)
{
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 15286533c15..1cf2d1e79a5 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -21,6 +21,7 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -33,7 +34,6 @@
#endif
#include "usb.h"
-#include "hcd.h"
/* PCI-based HCs are common, but plenty of non-PCI HCs are used too */
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 2f8cedda800..12742f152f4 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -38,14 +38,12 @@
#include <asm/unaligned.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
-#include <linux/mutex.h>
#include <linux/pm_runtime.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include "usb.h"
-#include "hcd.h"
-#include "hub.h"
/*-------------------------------------------------------------------------*/
@@ -1261,6 +1259,51 @@ static void hcd_free_coherent(struct usb_bus *bus, dma_addr_t *dma_handle,
*dma_handle = 0;
}
+static void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+{
+ enum dma_data_direction dir;
+
+ if (urb->transfer_flags & URB_SETUP_MAP_SINGLE)
+ dma_unmap_single(hcd->self.controller,
+ urb->setup_dma,
+ sizeof(struct usb_ctrlrequest),
+ DMA_TO_DEVICE);
+ else if (urb->transfer_flags & URB_SETUP_MAP_LOCAL)
+ hcd_free_coherent(urb->dev->bus,
+ &urb->setup_dma,
+ (void **) &urb->setup_packet,
+ sizeof(struct usb_ctrlrequest),
+ DMA_TO_DEVICE);
+
+ dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ if (urb->transfer_flags & URB_DMA_MAP_SG)
+ dma_unmap_sg(hcd->self.controller,
+ urb->sg,
+ urb->num_sgs,
+ dir);
+ else if (urb->transfer_flags & URB_DMA_MAP_PAGE)
+ dma_unmap_page(hcd->self.controller,
+ urb->transfer_dma,
+ urb->transfer_buffer_length,
+ dir);
+ else if (urb->transfer_flags & URB_DMA_MAP_SINGLE)
+ dma_unmap_single(hcd->self.controller,
+ urb->transfer_dma,
+ urb->transfer_buffer_length,
+ dir);
+ else if (urb->transfer_flags & URB_MAP_LOCAL)
+ hcd_free_coherent(urb->dev->bus,
+ &urb->transfer_dma,
+ &urb->transfer_buffer,
+ urb->transfer_buffer_length,
+ dir);
+
+ /* Make it safe to call this routine more than once */
+ urb->transfer_flags &= ~(URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
+ URB_DMA_MAP_SG | URB_DMA_MAP_PAGE |
+ URB_DMA_MAP_SINGLE | URB_MAP_LOCAL);
+}
+
static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
@@ -1272,11 +1315,8 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
* unless it uses pio or talks to another transport,
* or uses the provided scatter gather list for bulk.
*/
- if (is_root_hub(urb->dev))
- return 0;
- if (usb_endpoint_xfer_control(&urb->ep->desc)
- && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) {
+ if (usb_endpoint_xfer_control(&urb->ep->desc)) {
if (hcd->self.uses_dma) {
urb->setup_dma = dma_map_single(
hcd->self.controller,
@@ -1286,27 +1326,64 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
if (dma_mapping_error(hcd->self.controller,
urb->setup_dma))
return -EAGAIN;
- } else if (hcd->driver->flags & HCD_LOCAL_MEM)
+ urb->transfer_flags |= URB_SETUP_MAP_SINGLE;
+ } else if (hcd->driver->flags & HCD_LOCAL_MEM) {
ret = hcd_alloc_coherent(
urb->dev->bus, mem_flags,
&urb->setup_dma,
(void **)&urb->setup_packet,
sizeof(struct usb_ctrlrequest),
DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+ urb->transfer_flags |= URB_SETUP_MAP_LOCAL;
+ }
}
dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- if (ret == 0 && urb->transfer_buffer_length != 0
+ if (urb->transfer_buffer_length != 0
&& !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
if (hcd->self.uses_dma) {
- urb->transfer_dma = dma_map_single (
- hcd->self.controller,
- urb->transfer_buffer,
- urb->transfer_buffer_length,
- dir);
- if (dma_mapping_error(hcd->self.controller,
+ if (urb->num_sgs) {
+ int n = dma_map_sg(
+ hcd->self.controller,
+ urb->sg,
+ urb->num_sgs,
+ dir);
+ if (n <= 0)
+ ret = -EAGAIN;
+ else
+ urb->transfer_flags |= URB_DMA_MAP_SG;
+ if (n != urb->num_sgs) {
+ urb->num_sgs = n;
+ urb->transfer_flags |=
+ URB_DMA_SG_COMBINED;
+ }
+ } else if (urb->sg) {
+ struct scatterlist *sg = urb->sg;
+ urb->transfer_dma = dma_map_page(
+ hcd->self.controller,
+ sg_page(sg),
+ sg->offset,
+ urb->transfer_buffer_length,
+ dir);
+ if (dma_mapping_error(hcd->self.controller,
urb->transfer_dma))
- return -EAGAIN;
+ ret = -EAGAIN;
+ else
+ urb->transfer_flags |= URB_DMA_MAP_PAGE;
+ } else {
+ urb->transfer_dma = dma_map_single(
+ hcd->self.controller,
+ urb->transfer_buffer,
+ urb->transfer_buffer_length,
+ dir);
+ if (dma_mapping_error(hcd->self.controller,
+ urb->transfer_dma))
+ ret = -EAGAIN;
+ else
+ urb->transfer_flags |= URB_DMA_MAP_SINGLE;
+ }
} else if (hcd->driver->flags & HCD_LOCAL_MEM) {
ret = hcd_alloc_coherent(
urb->dev->bus, mem_flags,
@@ -1314,55 +1391,16 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
&urb->transfer_buffer,
urb->transfer_buffer_length,
dir);
-
- if (ret && usb_endpoint_xfer_control(&urb->ep->desc)
- && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP))
- hcd_free_coherent(urb->dev->bus,
- &urb->setup_dma,
- (void **)&urb->setup_packet,
- sizeof(struct usb_ctrlrequest),
- DMA_TO_DEVICE);
+ if (ret == 0)
+ urb->transfer_flags |= URB_MAP_LOCAL;
}
+ if (ret && (urb->transfer_flags & (URB_SETUP_MAP_SINGLE |
+ URB_SETUP_MAP_LOCAL)))
+ unmap_urb_for_dma(hcd, urb);
}
return ret;
}
-static void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
-{
- enum dma_data_direction dir;
-
- if (is_root_hub(urb->dev))
- return;
-
- if (usb_endpoint_xfer_control(&urb->ep->desc)
- && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) {
- if (hcd->self.uses_dma)
- dma_unmap_single(hcd->self.controller, urb->setup_dma,
- sizeof(struct usb_ctrlrequest),
- DMA_TO_DEVICE);
- else if (hcd->driver->flags & HCD_LOCAL_MEM)
- hcd_free_coherent(urb->dev->bus, &urb->setup_dma,
- (void **)&urb->setup_packet,
- sizeof(struct usb_ctrlrequest),
- DMA_TO_DEVICE);
- }
-
- dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- if (urb->transfer_buffer_length != 0
- && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
- if (hcd->self.uses_dma)
- dma_unmap_single(hcd->self.controller,
- urb->transfer_dma,
- urb->transfer_buffer_length,
- dir);
- else if (hcd->driver->flags & HCD_LOCAL_MEM)
- hcd_free_coherent(urb->dev->bus, &urb->transfer_dma,
- &urb->transfer_buffer,
- urb->transfer_buffer_length,
- dir);
- }
-}
-
/*-------------------------------------------------------------------------*/
/* may be called in any context with a valid urb->dev usecount
@@ -1391,21 +1429,20 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
* URBs must be submitted in process context with interrupts
* enabled.
*/
- status = map_urb_for_dma(hcd, urb, mem_flags);
- if (unlikely(status)) {
- usbmon_urb_submit_error(&hcd->self, urb, status);
- goto error;
- }
- if (is_root_hub(urb->dev))
+ if (is_root_hub(urb->dev)) {
status = rh_urb_enqueue(hcd, urb);
- else
- status = hcd->driver->urb_enqueue(hcd, urb, mem_flags);
+ } else {
+ status = map_urb_for_dma(hcd, urb, mem_flags);
+ if (likely(status == 0)) {
+ status = hcd->driver->urb_enqueue(hcd, urb, mem_flags);
+ if (unlikely(status))
+ unmap_urb_for_dma(hcd, urb);
+ }
+ }
if (unlikely(status)) {
usbmon_urb_submit_error(&hcd->self, urb, status);
- unmap_urb_for_dma(hcd, urb);
- error:
urb->hcpriv = NULL;
INIT_LIST_HEAD(&urb->urb_list);
atomic_dec(&urb->use_count);
@@ -1775,6 +1812,75 @@ void usb_hcd_reset_endpoint(struct usb_device *udev,
}
}
+/**
+ * usb_alloc_streams - allocate bulk endpoint stream IDs.
+ * @interface: alternate setting that includes all endpoints.
+ * @eps: array of endpoints that need streams.
+ * @num_eps: number of endpoints in the array.
+ * @num_streams: number of streams to allocate.
+ * @mem_flags: flags hcd should use to allocate memory.
+ *
+ * Sets up a group of bulk endpoints to have num_streams stream IDs available.
+ * Drivers may queue multiple transfers to different stream IDs, which may
+ * complete in a different order than they were queued.
+ */
+int usb_alloc_streams(struct usb_interface *interface,
+ struct usb_host_endpoint **eps, unsigned int num_eps,
+ unsigned int num_streams, gfp_t mem_flags)
+{
+ struct usb_hcd *hcd;
+ struct usb_device *dev;
+ int i;
+
+ dev = interface_to_usbdev(interface);
+ hcd = bus_to_hcd(dev->bus);
+ if (!hcd->driver->alloc_streams || !hcd->driver->free_streams)
+ return -EINVAL;
+ if (dev->speed != USB_SPEED_SUPER)
+ return -EINVAL;
+
+ /* Streams only apply to bulk endpoints. */
+ for (i = 0; i < num_eps; i++)
+ if (!usb_endpoint_xfer_bulk(&eps[i]->desc))
+ return -EINVAL;
+
+ return hcd->driver->alloc_streams(hcd, dev, eps, num_eps,
+ num_streams, mem_flags);
+}
+EXPORT_SYMBOL_GPL(usb_alloc_streams);
+
+/**
+ * usb_free_streams - free bulk endpoint stream IDs.
+ * @interface: alternate setting that includes all endpoints.
+ * @eps: array of endpoints to remove streams from.
+ * @num_eps: number of endpoints in the array.
+ * @mem_flags: flags hcd should use to allocate memory.
+ *
+ * Reverts a group of bulk endpoints back to not using stream IDs.
+ * Can fail if we are given bad arguments, or HCD is broken.
+ */
+void usb_free_streams(struct usb_interface *interface,
+ struct usb_host_endpoint **eps, unsigned int num_eps,
+ gfp_t mem_flags)
+{
+ struct usb_hcd *hcd;
+ struct usb_device *dev;
+ int i;
+
+ dev = interface_to_usbdev(interface);
+ hcd = bus_to_hcd(dev->bus);
+ if (dev->speed != USB_SPEED_SUPER)
+ return;
+
+ /* Streams only apply to bulk endpoints. */
+ for (i = 0; i < num_eps; i++)
+ if (!usb_endpoint_xfer_bulk(&eps[i]->desc))
+ return;
+
+ hcd->driver->free_streams(hcd, dev, eps, num_eps, mem_flags);
+}
+EXPORT_SYMBOL_GPL(usb_free_streams);
+
/* Protect against drivers that try to unlink URBs after the device
* is gone, by waiting until all unlinks for @udev are finished.
* Since we don't currently track URBs by device, simply wait until
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
deleted file mode 100644
index a3cdb09734a..00000000000
--- a/drivers/usb/core/hcd.h
+++ /dev/null
@@ -1,578 +0,0 @@
-/*
- * Copyright (c) 2001-2002 by David Brownell
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef __USB_CORE_HCD_H
-#define __USB_CORE_HCD_H
-
-#ifdef __KERNEL__
-
-#include <linux/rwsem.h>
-
-#define MAX_TOPO_LEVEL 6
-
-/* This file contains declarations of usbcore internals that are mostly
- * used or exposed by Host Controller Drivers.
- */
-
-/*
- * USB Packet IDs (PIDs)
- */
-#define USB_PID_EXT 0xf0 /* USB 2.0 LPM ECN */
-#define USB_PID_OUT 0xe1
-#define USB_PID_ACK 0xd2
-#define USB_PID_DATA0 0xc3
-#define USB_PID_PING 0xb4 /* USB 2.0 */
-#define USB_PID_SOF 0xa5
-#define USB_PID_NYET 0x96 /* USB 2.0 */
-#define USB_PID_DATA2 0x87 /* USB 2.0 */
-#define USB_PID_SPLIT 0x78 /* USB 2.0 */
-#define USB_PID_IN 0x69
-#define USB_PID_NAK 0x5a
-#define USB_PID_DATA1 0x4b
-#define USB_PID_PREAMBLE 0x3c /* Token mode */
-#define USB_PID_ERR 0x3c /* USB 2.0: handshake mode */
-#define USB_PID_SETUP 0x2d
-#define USB_PID_STALL 0x1e
-#define USB_PID_MDATA 0x0f /* USB 2.0 */
-
-/*-------------------------------------------------------------------------*/
-
-/*
- * USB Host Controller Driver (usb_hcd) framework
- *
- * Since "struct usb_bus" is so thin, you can't share much code in it.
- * This framework is a layer over that, and should be more sharable.
- *
- * @authorized_default: Specifies if new devices are authorized to
- * connect by default or they require explicit
- * user space authorization; this bit is settable
- * through /sys/class/usb_host/X/authorized_default.
- * For the rest is RO, so we don't lock to r/w it.
- */
-
-/*-------------------------------------------------------------------------*/
-
-struct usb_hcd {
-
- /*
- * housekeeping
- */
- struct usb_bus self; /* hcd is-a bus */
- struct kref kref; /* reference counter */
-
- const char *product_desc; /* product/vendor string */
- char irq_descr[24]; /* driver + bus # */
-
- struct timer_list rh_timer; /* drives root-hub polling */
- struct urb *status_urb; /* the current status urb */
-#ifdef CONFIG_USB_SUSPEND
- struct work_struct wakeup_work; /* for remote wakeup */
-#endif
-
- /*
- * hardware info/state
- */
- const struct hc_driver *driver; /* hw-specific hooks */
-
- /* Flags that need to be manipulated atomically */
- unsigned long flags;
-#define HCD_FLAG_HW_ACCESSIBLE 0x00000001
-#define HCD_FLAG_SAW_IRQ 0x00000002
-
- unsigned rh_registered:1;/* is root hub registered? */
-
- /* The next flag is a stopgap, to be removed when all the HCDs
- * support the new root-hub polling mechanism. */
- unsigned uses_new_polling:1;
- unsigned poll_rh:1; /* poll for rh status? */
- unsigned poll_pending:1; /* status has changed? */
- unsigned wireless:1; /* Wireless USB HCD */
- unsigned authorized_default:1;
- unsigned has_tt:1; /* Integrated TT in root hub */
-
- int irq; /* irq allocated */
- void __iomem *regs; /* device memory/io */
- u64 rsrc_start; /* memory/io resource start */
- u64 rsrc_len; /* memory/io resource length */
- unsigned power_budget; /* in mA, 0 = no limit */
-
- /* bandwidth_mutex should be taken before adding or removing
- * any new bus bandwidth constraints:
- * 1. Before adding a configuration for a new device.
- * 2. Before removing the configuration to put the device into
- * the addressed state.
- * 3. Before selecting a different configuration.
- * 4. Before selecting an alternate interface setting.
- *
- * bandwidth_mutex should be dropped after a successful control message
- * to the device, or resetting the bandwidth after a failed attempt.
- */
- struct mutex bandwidth_mutex;
-
-
-#define HCD_BUFFER_POOLS 4
- struct dma_pool *pool [HCD_BUFFER_POOLS];
-
- int state;
-# define __ACTIVE 0x01
-# define __SUSPEND 0x04
-# define __TRANSIENT 0x80
-
-# define HC_STATE_HALT 0
-# define HC_STATE_RUNNING (__ACTIVE)
-# define HC_STATE_QUIESCING (__SUSPEND|__TRANSIENT|__ACTIVE)
-# define HC_STATE_RESUMING (__SUSPEND|__TRANSIENT)
-# define HC_STATE_SUSPENDED (__SUSPEND)
-
-#define HC_IS_RUNNING(state) ((state) & __ACTIVE)
-#define HC_IS_SUSPENDED(state) ((state) & __SUSPEND)
-
- /* more shared queuing code would be good; it should support
- * smarter scheduling, handle transaction translators, etc;
- * input size of periodic table to an interrupt scheduler.
- * (ohci 32, uhci 1024, ehci 256/512/1024).
- */
-
- /* The HC driver's private data is stored at the end of
- * this structure.
- */
- unsigned long hcd_priv[0]
- __attribute__ ((aligned(sizeof(unsigned long))));
-};
-
-/* 2.4 does this a bit differently ... */
-static inline struct usb_bus *hcd_to_bus(struct usb_hcd *hcd)
-{
- return &hcd->self;
-}
-
-static inline struct usb_hcd *bus_to_hcd(struct usb_bus *bus)
-{
- return container_of(bus, struct usb_hcd, self);
-}
-
-struct hcd_timeout { /* timeouts we allocate */
- struct list_head timeout_list;
- struct timer_list timer;
-};
-
-/*-------------------------------------------------------------------------*/
-
-
-struct hc_driver {
- const char *description; /* "ehci-hcd" etc */
- const char *product_desc; /* product/vendor string */
- size_t hcd_priv_size; /* size of private data */
-
- /* irq handler */
- irqreturn_t (*irq) (struct usb_hcd *hcd);
-
- int flags;
-#define HCD_MEMORY 0x0001 /* HC regs use memory (else I/O) */
-#define HCD_LOCAL_MEM 0x0002 /* HC needs local memory */
-#define HCD_USB11 0x0010 /* USB 1.1 */
-#define HCD_USB2 0x0020 /* USB 2.0 */
-#define HCD_USB3 0x0040 /* USB 3.0 */
-#define HCD_MASK 0x0070
-
- /* called to init HCD and root hub */
- int (*reset) (struct usb_hcd *hcd);
- int (*start) (struct usb_hcd *hcd);
-
- /* NOTE: these suspend/resume calls relate to the HC as
- * a whole, not just the root hub; they're for PCI bus glue.
- */
- /* called after suspending the hub, before entering D3 etc */
- int (*pci_suspend)(struct usb_hcd *hcd);
-
- /* called after entering D0 (etc), before resuming the hub */
- int (*pci_resume)(struct usb_hcd *hcd, bool hibernated);
-
- /* cleanly make HCD stop writing memory and doing I/O */
- void (*stop) (struct usb_hcd *hcd);
-
- /* shutdown HCD */
- void (*shutdown) (struct usb_hcd *hcd);
-
- /* return current frame number */
- int (*get_frame_number) (struct usb_hcd *hcd);
-
- /* manage i/o requests, device state */
- int (*urb_enqueue)(struct usb_hcd *hcd,
- struct urb *urb, gfp_t mem_flags);
- int (*urb_dequeue)(struct usb_hcd *hcd,
- struct urb *urb, int status);
-
- /* hw synch, freeing endpoint resources that urb_dequeue can't */
- void (*endpoint_disable)(struct usb_hcd *hcd,
- struct usb_host_endpoint *ep);
-
- /* (optional) reset any endpoint state such as sequence number
- and current window */
- void (*endpoint_reset)(struct usb_hcd *hcd,
- struct usb_host_endpoint *ep);
-
- /* root hub support */
- int (*hub_status_data) (struct usb_hcd *hcd, char *buf);
- int (*hub_control) (struct usb_hcd *hcd,
- u16 typeReq, u16 wValue, u16 wIndex,
- char *buf, u16 wLength);
- int (*bus_suspend)(struct usb_hcd *);
- int (*bus_resume)(struct usb_hcd *);
- int (*start_port_reset)(struct usb_hcd *, unsigned port_num);
-
- /* force handover of high-speed port to full-speed companion */
- void (*relinquish_port)(struct usb_hcd *, int);
- /* has a port been handed over to a companion? */
- int (*port_handed_over)(struct usb_hcd *, int);
-
- /* CLEAR_TT_BUFFER completion callback */
- void (*clear_tt_buffer_complete)(struct usb_hcd *,
- struct usb_host_endpoint *);
-
- /* xHCI specific functions */
- /* Called by usb_alloc_dev to alloc HC device structures */
- int (*alloc_dev)(struct usb_hcd *, struct usb_device *);
- /* Called by usb_disconnect to free HC device structures */
- void (*free_dev)(struct usb_hcd *, struct usb_device *);
-
- /* Bandwidth computation functions */
- /* Note that add_endpoint() can only be called once per endpoint before
- * check_bandwidth() or reset_bandwidth() must be called.
- * drop_endpoint() can only be called once per endpoint also.
- * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
- * add the endpoint to the schedule with possibly new parameters denoted by a
- * different endpoint descriptor in usb_host_endpoint.
- * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
- * not allowed.
- */
- /* Allocate endpoint resources and add them to a new schedule */
- int (*add_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *);
- /* Drop an endpoint from a new schedule */
- int (*drop_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *);
- /* Check that a new hardware configuration, set using
- * endpoint_enable and endpoint_disable, does not exceed bus
- * bandwidth. This must be called before any set configuration
- * or set interface requests are sent to the device.
- */
- int (*check_bandwidth)(struct usb_hcd *, struct usb_device *);
- /* Reset the device schedule to the last known good schedule,
- * which was set from a previous successful call to
- * check_bandwidth(). This reverts any add_endpoint() and
- * drop_endpoint() calls since that last successful call.
- * Used for when a check_bandwidth() call fails due to resource
- * or bandwidth constraints.
- */
- void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
- /* Returns the hardware-chosen device address */
- int (*address_device)(struct usb_hcd *, struct usb_device *udev);
- /* Notifies the HCD after a hub descriptor is fetched.
- * Will block.
- */
- int (*update_hub_device)(struct usb_hcd *, struct usb_device *hdev,
- struct usb_tt *tt, gfp_t mem_flags);
- int (*reset_device)(struct usb_hcd *, struct usb_device *);
-};
-
-extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
-extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
- int status);
-extern void usb_hcd_unlink_urb_from_ep(struct usb_hcd *hcd, struct urb *urb);
-
-extern int usb_hcd_submit_urb(struct urb *urb, gfp_t mem_flags);
-extern int usb_hcd_unlink_urb(struct urb *urb, int status);
-extern void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb,
- int status);
-extern void usb_hcd_flush_endpoint(struct usb_device *udev,
- struct usb_host_endpoint *ep);
-extern void usb_hcd_disable_endpoint(struct usb_device *udev,
- struct usb_host_endpoint *ep);
-extern void usb_hcd_reset_endpoint(struct usb_device *udev,
- struct usb_host_endpoint *ep);
-extern void usb_hcd_synchronize_unlinks(struct usb_device *udev);
-extern int usb_hcd_alloc_bandwidth(struct usb_device *udev,
- struct usb_host_config *new_config,
- struct usb_host_interface *old_alt,
- struct usb_host_interface *new_alt);
-extern int usb_hcd_get_frame_number(struct usb_device *udev);
-
-extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver,
- struct device *dev, const char *bus_name);
-extern struct usb_hcd *usb_get_hcd(struct usb_hcd *hcd);
-extern void usb_put_hcd(struct usb_hcd *hcd);
-extern int usb_add_hcd(struct usb_hcd *hcd,
- unsigned int irqnum, unsigned long irqflags);
-extern void usb_remove_hcd(struct usb_hcd *hcd);
-
-struct platform_device;
-extern void usb_hcd_platform_shutdown(struct platform_device *dev);
-
-#ifdef CONFIG_PCI
-struct pci_dev;
-struct pci_device_id;
-extern int usb_hcd_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id);
-extern void usb_hcd_pci_remove(struct pci_dev *dev);
-extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
-
-#ifdef CONFIG_PM_SLEEP
-extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
-#endif
-#endif /* CONFIG_PCI */
-
-/* pci-ish (pdev null is ok) buffer alloc/mapping support */
-int hcd_buffer_create(struct usb_hcd *hcd);
-void hcd_buffer_destroy(struct usb_hcd *hcd);
-
-void *hcd_buffer_alloc(struct usb_bus *bus, size_t size,
- gfp_t mem_flags, dma_addr_t *dma);
-void hcd_buffer_free(struct usb_bus *bus, size_t size,
- void *addr, dma_addr_t dma);
-
-/* generic bus glue, needed for host controllers that don't use PCI */
-extern irqreturn_t usb_hcd_irq(int irq, void *__hcd);
-
-extern void usb_hc_died(struct usb_hcd *hcd);
-extern void usb_hcd_poll_rh_status(struct usb_hcd *hcd);
-
-/* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */
-#define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1)
-#define usb_dotoggle(dev, ep, out) ((dev)->toggle[out] ^= (1 << (ep)))
-#define usb_settoggle(dev, ep, out, bit) \
- ((dev)->toggle[out] = ((dev)->toggle[out] & ~(1 << (ep))) | \
- ((bit) << (ep)))
-
-/* -------------------------------------------------------------------------- */
-
-/* Enumeration is only for the hub driver, or HCD virtual root hubs */
-extern struct usb_device *usb_alloc_dev(struct usb_device *parent,
- struct usb_bus *, unsigned port);
-extern int usb_new_device(struct usb_device *dev);
-extern void usb_disconnect(struct usb_device **);
-
-extern int usb_get_configuration(struct usb_device *dev);
-extern void usb_destroy_configuration(struct usb_device *dev);
-
-/*-------------------------------------------------------------------------*/
-
-/*
- * HCD Root Hub support
- */
-
-#include "hub.h"
-
-/* (shifted) direction/type/recipient from the USB 2.0 spec, table 9.2 */
-#define DeviceRequest \
- ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_DEVICE)<<8)
-#define DeviceOutRequest \
- ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_DEVICE)<<8)
-
-#define InterfaceRequest \
- ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
-
-#define EndpointRequest \
- ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
-#define EndpointOutRequest \
- ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
-
-/* class requests from the USB 2.0 hub spec, table 11-15 */
-/* GetBusState and SetHubDescriptor are optional, omitted */
-#define ClearHubFeature (0x2000 | USB_REQ_CLEAR_FEATURE)
-#define ClearPortFeature (0x2300 | USB_REQ_CLEAR_FEATURE)
-#define GetHubDescriptor (0xa000 | USB_REQ_GET_DESCRIPTOR)
-#define GetHubStatus (0xa000 | USB_REQ_GET_STATUS)
-#define GetPortStatus (0xa300 | USB_REQ_GET_STATUS)
-#define SetHubFeature (0x2000 | USB_REQ_SET_FEATURE)
-#define SetPortFeature (0x2300 | USB_REQ_SET_FEATURE)
-
-
-/*-------------------------------------------------------------------------*/
-
-/*
- * Generic bandwidth allocation constants/support
- */
-#define FRAME_TIME_USECS 1000L
-#define BitTime(bytecount) (7 * 8 * bytecount / 6) /* with integer truncation */
- /* Trying not to use worst-case bit-stuffing
- * of (7/6 * 8 * bytecount) = 9.33 * bytecount */
- /* bytecount = data payload byte count */
-
-#define NS_TO_US(ns) ((ns + 500L) / 1000L)
- /* convert & round nanoseconds to microseconds */
-
-
-/*
- * Full/low speed bandwidth allocation constants/support.
- */
-#define BW_HOST_DELAY 1000L /* nanoseconds */
-#define BW_HUB_LS_SETUP 333L /* nanoseconds */
- /* 4 full-speed bit times (est.) */
-
-#define FRAME_TIME_BITS 12000L /* frame = 1 millisecond */
-#define FRAME_TIME_MAX_BITS_ALLOC (90L * FRAME_TIME_BITS / 100L)
-#define FRAME_TIME_MAX_USECS_ALLOC (90L * FRAME_TIME_USECS / 100L)
-
-/*
- * Ceiling [nano/micro]seconds (typical) for that many bytes at high speed
- * ISO is a bit less, no ACK ... from USB 2.0 spec, 5.11.3 (and needed
- * to preallocate bandwidth)
- */
-#define USB2_HOST_DELAY 5 /* nsec, guess */
-#define HS_NSECS(bytes) (((55 * 8 * 2083) \
- + (2083UL * (3 + BitTime(bytes))))/1000 \
- + USB2_HOST_DELAY)
-#define HS_NSECS_ISO(bytes) (((38 * 8 * 2083) \
- + (2083UL * (3 + BitTime(bytes))))/1000 \
- + USB2_HOST_DELAY)
-#define HS_USECS(bytes) NS_TO_US (HS_NSECS(bytes))
-#define HS_USECS_ISO(bytes) NS_TO_US (HS_NSECS_ISO(bytes))
-
-extern long usb_calc_bus_time(int speed, int is_input,
- int isoc, int bytecount);
-
-/*-------------------------------------------------------------------------*/
-
-extern void usb_set_device_state(struct usb_device *udev,
- enum usb_device_state new_state);
-
-/*-------------------------------------------------------------------------*/
-
-/* exported only within usbcore */
-
-extern struct list_head usb_bus_list;
-extern struct mutex usb_bus_list_lock;
-extern wait_queue_head_t usb_kill_urb_queue;
-
-extern int usb_find_interface_driver(struct usb_device *dev,
- struct usb_interface *interface);
-
-#define usb_endpoint_out(ep_dir) (!((ep_dir) & USB_DIR_IN))
-
-#ifdef CONFIG_PM
-extern void usb_root_hub_lost_power(struct usb_device *rhdev);
-extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg);
-extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg);
-#endif /* CONFIG_PM */
-
-#ifdef CONFIG_USB_SUSPEND
-extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd);
-#else
-static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd)
-{
- return;
-}
-#endif /* CONFIG_USB_SUSPEND */
-
-
-/*
- * USB device fs stuff
- */
-
-#ifdef CONFIG_USB_DEVICEFS
-
-/*
- * these are expected to be called from the USB core/hub thread
- * with the kernel lock held
- */
-extern void usbfs_update_special(void);
-extern int usbfs_init(void);
-extern void usbfs_cleanup(void);
-
-#else /* CONFIG_USB_DEVICEFS */
-
-static inline void usbfs_update_special(void) {}
-static inline int usbfs_init(void) { return 0; }
-static inline void usbfs_cleanup(void) { }
-
-#endif /* CONFIG_USB_DEVICEFS */
-
-/*-------------------------------------------------------------------------*/
-
-#if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
-
-struct usb_mon_operations {
- void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
- void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
- void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
- /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
-};
-
-extern struct usb_mon_operations *mon_ops;
-
-static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
-{
- if (bus->monitored)
- (*mon_ops->urb_submit)(bus, urb);
-}
-
-static inline void usbmon_urb_submit_error(struct usb_bus *bus, struct urb *urb,
- int error)
-{
- if (bus->monitored)
- (*mon_ops->urb_submit_error)(bus, urb, error);
-}
-
-static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
- int status)
-{
- if (bus->monitored)
- (*mon_ops->urb_complete)(bus, urb, status);
-}
-
-int usb_mon_register(struct usb_mon_operations *ops);
-void usb_mon_deregister(void);
-
-#else
-
-static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb) {}
-static inline void usbmon_urb_submit_error(struct usb_bus *bus, struct urb *urb,
- int error) {}
-static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
- int status) {}
-
-#endif /* CONFIG_USB_MON || CONFIG_USB_MON_MODULE */
-
-/*-------------------------------------------------------------------------*/
-
-/* hub.h ... DeviceRemovable in 2.4.2-ac11, gone in 2.4.10 */
-/* bleech -- resurfaced in 2.4.11 or 2.4.12 */
-#define bitmap DeviceRemovable
-
-
-/*-------------------------------------------------------------------------*/
-
-/* random stuff */
-
-#define RUN_CONTEXT (in_irq() ? "in_irq" \
- : (in_interrupt() ? "in_interrupt" : "can sleep"))
-
-
-/* This rwsem is for use only by the hub driver and ehci-hcd.
- * Nobody else should touch it.
- */
-extern struct rw_semaphore ehci_cf_port_reset_rwsem;
-
-/* Keep track of which host controller drivers are loaded */
-#define USB_UHCI_LOADED 0
-#define USB_OHCI_LOADED 1
-#define USB_EHCI_LOADED 2
-extern unsigned long usb_hcds_loaded;
-
-#endif /* __KERNEL__ */
-
-#endif /* __USB_CORE_HCD_H */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 0940ccd6f4f..83e7bbbe97f 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -19,6 +19,7 @@
#include <linux/ioctl.h>
#include <linux/usb.h>
#include <linux/usbdevice_fs.h>
+#include <linux/usb/hcd.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/freezer.h>
@@ -28,8 +29,6 @@
#include <asm/byteorder.h>
#include "usb.h"
-#include "hcd.h"
-#include "hub.h"
/* if we are in debug mode, always announce new devices */
#ifdef DEBUG
@@ -154,11 +153,11 @@ static int usb_reset_and_verify_device(struct usb_device *udev);
static inline char *portspeed(int portstatus)
{
- if (portstatus & (1 << USB_PORT_FEAT_HIGHSPEED))
+ if (portstatus & USB_PORT_STAT_HIGH_SPEED)
return "480 Mb/s";
- else if (portstatus & (1 << USB_PORT_FEAT_LOWSPEED))
+ else if (portstatus & USB_PORT_STAT_LOW_SPEED)
return "1.5 Mb/s";
- else if (portstatus & (1 << USB_PORT_FEAT_SUPERSPEED))
+ else if (portstatus & USB_PORT_STAT_SUPER_SPEED)
return "5.0 Gb/s";
else
return "12 Mb/s";
@@ -745,8 +744,20 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
!(portstatus & USB_PORT_STAT_CONNECTION) ||
!udev ||
udev->state == USB_STATE_NOTATTACHED)) {
- clear_port_feature(hdev, port1, USB_PORT_FEAT_ENABLE);
- portstatus &= ~USB_PORT_STAT_ENABLE;
+ /*
+ * USB3 protocol ports will automatically transition
+ * to Enabled state when detect an USB3.0 device attach.
+ * Do not disable USB3 protocol ports.
+ * FIXME: USB3 root hub and external hubs are treated
+ * differently here.
+ */
+ if (hdev->descriptor.bDeviceProtocol != 3 ||
+ (!hdev->parent &&
+ !(portstatus & USB_PORT_STAT_SUPER_SPEED))) {
+ clear_port_feature(hdev, port1,
+ USB_PORT_FEAT_ENABLE);
+ portstatus &= ~USB_PORT_STAT_ENABLE;
+ }
}
/* Clear status-change flags; we'll debounce later */
@@ -1784,7 +1795,6 @@ int usb_new_device(struct usb_device *udev)
* sysfs power/wakeup controls wakeup enabled/disabled
*/
device_init_wakeup(&udev->dev, 0);
- device_set_wakeup_enable(&udev->dev, 1);
}
/* Tell the runtime-PM framework the device is active */
@@ -3038,7 +3048,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
/* maybe switch power back on (e.g. root hub was reset) */
if ((wHubCharacteristics & HUB_CHAR_LPSM) < 2
- && !(portstatus & (1 << USB_PORT_FEAT_POWER)))
+ && !(portstatus & USB_PORT_STAT_POWER))
set_port_feature(hdev, port1, USB_PORT_FEAT_POWER);
if (portstatus & USB_PORT_STAT_ENABLE)
@@ -3076,7 +3086,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
if (!(hcd->driver->flags & HCD_USB3))
udev->speed = USB_SPEED_UNKNOWN;
else if ((hdev->parent == NULL) &&
- (portstatus & (1 << USB_PORT_FEAT_SUPERSPEED)))
+ (portstatus & USB_PORT_STAT_SUPER_SPEED))
udev->speed = USB_SPEED_SUPER;
else
udev->speed = USB_SPEED_UNKNOWN;
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
deleted file mode 100644
index de8081f065e..00000000000
--- a/drivers/usb/core/hub.h
+++ /dev/null
@@ -1,205 +0,0 @@
-#ifndef __LINUX_HUB_H
-#define __LINUX_HUB_H
-
-/*
- * Hub protocol and driver data structures.
- *
- * Some of these are known to the "virtual root hub" code
- * in host controller drivers.
- */
-
-#include <linux/list.h>
-#include <linux/workqueue.h>
-#include <linux/compiler.h> /* likely()/unlikely() */
-
-/*
- * Hub request types
- */
-
-#define USB_RT_HUB (USB_TYPE_CLASS | USB_RECIP_DEVICE)
-#define USB_RT_PORT (USB_TYPE_CLASS | USB_RECIP_OTHER)
-
-/*
- * Hub class requests
- * See USB 2.0 spec Table 11-16
- */
-#define HUB_CLEAR_TT_BUFFER 8
-#define HUB_RESET_TT 9
-#define HUB_GET_TT_STATE 10
-#define HUB_STOP_TT 11
-
-/*
- * Hub Class feature numbers
- * See USB 2.0 spec Table 11-17
- */
-#define C_HUB_LOCAL_POWER 0
-#define C_HUB_OVER_CURRENT 1
-
-/*
- * Port feature numbers
- * See USB 2.0 spec Table 11-17
- */
-#define USB_PORT_FEAT_CONNECTION 0
-#define USB_PORT_FEAT_ENABLE 1
-#define USB_PORT_FEAT_SUSPEND 2 /* L2 suspend */
-#define USB_PORT_FEAT_OVER_CURRENT 3
-#define USB_PORT_FEAT_RESET 4
-#define USB_PORT_FEAT_L1 5 /* L1 suspend */
-#define USB_PORT_FEAT_POWER 8
-#define USB_PORT_FEAT_LOWSPEED 9
-/* This value was never in Table 11-17 */
-#define USB_PORT_FEAT_HIGHSPEED 10
-/* This value is also fake */
-#define USB_PORT_FEAT_SUPERSPEED 11
-#define USB_PORT_FEAT_C_CONNECTION 16
-#define USB_PORT_FEAT_C_ENABLE 17
-#define USB_PORT_FEAT_C_SUSPEND 18
-#define USB_PORT_FEAT_C_OVER_CURRENT 19
-#define USB_PORT_FEAT_C_RESET 20
-#define USB_PORT_FEAT_TEST 21
-#define USB_PORT_FEAT_INDICATOR 22
-#define USB_PORT_FEAT_C_PORT_L1 23
-
-/*
- * Hub Status and Hub Change results
- * See USB 2.0 spec Table 11-19 and Table 11-20
- */
-struct usb_port_status {
- __le16 wPortStatus;
- __le16 wPortChange;
-} __attribute__ ((packed));
-
-/*
- * wPortStatus bit field
- * See USB 2.0 spec Table 11-21
- */
-#define USB_PORT_STAT_CONNECTION 0x0001
-#define USB_PORT_STAT_ENABLE 0x0002
-#define USB_PORT_STAT_SUSPEND 0x0004
-#define USB_PORT_STAT_OVERCURRENT 0x0008
-#define USB_PORT_STAT_RESET 0x0010
-#define USB_PORT_STAT_L1 0x0020
-/* bits 6 to 7 are reserved */
-#define USB_PORT_STAT_POWER 0x0100
-#define USB_PORT_STAT_LOW_SPEED 0x0200
-#define USB_PORT_STAT_HIGH_SPEED 0x0400
-#define USB_PORT_STAT_TEST 0x0800
-#define USB_PORT_STAT_INDICATOR 0x1000
-/* bits 13 to 15 are reserved */
-
-/*
- * wPortChange bit field
- * See USB 2.0 spec Table 11-22
- * Bits 0 to 4 shown, bits 5 to 15 are reserved
- */
-#define USB_PORT_STAT_C_CONNECTION 0x0001
-#define USB_PORT_STAT_C_ENABLE 0x0002
-#define USB_PORT_STAT_C_SUSPEND 0x0004
-#define USB_PORT_STAT_C_OVERCURRENT 0x0008
-#define USB_PORT_STAT_C_RESET 0x0010
-#define USB_PORT_STAT_C_L1 0x0020
-
-/*
- * wHubCharacteristics (masks)
- * See USB 2.0 spec Table 11-13, offset 3
- */
-#define HUB_CHAR_LPSM 0x0003 /* D1 .. D0 */
-#define HUB_CHAR_COMPOUND 0x0004 /* D2 */
-#define HUB_CHAR_OCPM 0x0018 /* D4 .. D3 */
-#define HUB_CHAR_TTTT 0x0060 /* D6 .. D5 */
-#define HUB_CHAR_PORTIND 0x0080 /* D7 */
-
-struct usb_hub_status {
- __le16 wHubStatus;
- __le16 wHubChange;
-} __attribute__ ((packed));
-
-/*
- * Hub Status & Hub Change bit masks
- * See USB 2.0 spec Table 11-19 and Table 11-20
- * Bits 0 and 1 for wHubStatus and wHubChange
- * Bits 2 to 15 are reserved for both
- */
-#define HUB_STATUS_LOCAL_POWER 0x0001
-#define HUB_STATUS_OVERCURRENT 0x0002
-#define HUB_CHANGE_LOCAL_POWER 0x0001
-#define HUB_CHANGE_OVERCURRENT 0x0002
-
-
-/*
- * Hub descriptor
- * See USB 2.0 spec Table 11-13
- */
-
-#define USB_DT_HUB (USB_TYPE_CLASS | 0x09)
-#define USB_DT_HUB_NONVAR_SIZE 7
-
-struct usb_hub_descriptor {
- __u8 bDescLength;
- __u8 bDescriptorType;
- __u8 bNbrPorts;
- __le16 wHubCharacteristics;
- __u8 bPwrOn2PwrGood;
- __u8 bHubContrCurrent;
- /* add 1 bit for hub status change; round to bytes */
- __u8 DeviceRemovable[(USB_MAXCHILDREN + 1 + 7) / 8];
- __u8 PortPwrCtrlMask[(USB_MAXCHILDREN + 1 + 7) / 8];
-} __attribute__ ((packed));
-
-
-/* port indicator status selectors, tables 11-7 and 11-25 */
-#define HUB_LED_AUTO 0
-#define HUB_LED_AMBER 1
-#define HUB_LED_GREEN 2
-#define HUB_LED_OFF 3
-
-enum hub_led_mode {
- INDICATOR_AUTO = 0,
- INDICATOR_CYCLE,
- /* software blinks for attention: software, hardware, reserved */
- INDICATOR_GREEN_BLINK, INDICATOR_GREEN_BLINK_OFF,
- INDICATOR_AMBER_BLINK, INDICATOR_AMBER_BLINK_OFF,
- INDICATOR_ALT_BLINK, INDICATOR_ALT_BLINK_OFF
-} __attribute__ ((packed));
-
-struct usb_device;
-
-/* Transaction Translator Think Times, in bits */
-#define HUB_TTTT_8_BITS 0x00
-#define HUB_TTTT_16_BITS 0x20
-#define HUB_TTTT_24_BITS 0x40
-#define HUB_TTTT_32_BITS 0x60
-
-/*
- * As of USB 2.0, full/low speed devices are segregated into trees.
- * One type grows from USB 1.1 host controllers (OHCI, UHCI etc).
- * The other type grows from high speed hubs when they connect to
- * full/low speed devices using "Transaction Translators" (TTs).
- *
- * TTs should only be known to the hub driver, and high speed bus
- * drivers (only EHCI for now). They affect periodic scheduling and
- * sometimes control/bulk error recovery.
- */
-struct usb_tt {
- struct usb_device *hub; /* upstream highspeed hub */
- int multi; /* true means one TT per port */
- unsigned think_time; /* think time in ns */
-
- /* for control/bulk error recovery (CLEAR_TT_BUFFER) */
- spinlock_t lock;
- struct list_head clear_list; /* of usb_tt_clear */
- struct work_struct clear_work;
-};
-
-struct usb_tt_clear {
- struct list_head clear_list;
- unsigned tt;
- u16 devinfo;
- struct usb_hcd *hcd;
- struct usb_host_endpoint *ep;
-};
-
-extern int usb_hub_clear_tt_buffer(struct urb *urb);
-extern void usb_ep0_reinit(struct usb_device *);
-
-#endif /* __LINUX_HUB_H */
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index 111a01a747f..1a27618b67d 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -40,9 +40,9 @@
#include <linux/notifier.h>
#include <linux/seq_file.h>
#include <linux/smp_lock.h>
+#include <linux/usb/hcd.h>
#include <asm/byteorder.h>
#include "usb.h"
-#include "hcd.h"
#define USBFS_DEFAULT_DEVMODE (S_IWUSR | S_IRUGO)
#define USBFS_DEFAULT_BUSMODE (S_IXUGO | S_IRUGO)
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index cd220277c6c..a73e08fdab3 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -14,9 +14,9 @@
#include <linux/device.h>
#include <linux/scatterlist.h>
#include <linux/usb/quirks.h>
+#include <linux/usb/hcd.h> /* for usbcore internals */
#include <asm/byteorder.h>
-#include "hcd.h" /* for usbcore internals */
#include "usb.h"
static void cancel_async_set_config(struct usb_device *udev);
@@ -226,8 +226,7 @@ int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
struct urb *urb;
struct usb_host_endpoint *ep;
- ep = (usb_pipein(pipe) ? usb_dev->ep_in : usb_dev->ep_out)
- [usb_pipeendpoint(pipe)];
+ ep = usb_pipe_endpoint(usb_dev, pipe);
if (!ep || len < 0)
return -EINVAL;
@@ -259,9 +258,6 @@ static void sg_clean(struct usb_sg_request *io)
kfree(io->urbs);
io->urbs = NULL;
}
- if (io->dev->dev.dma_mask != NULL)
- usb_buffer_unmap_sg(io->dev, usb_pipein(io->pipe),
- io->sg, io->nents);
io->dev = NULL;
}
@@ -364,7 +360,6 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
{
int i;
int urb_flags;
- int dma;
int use_sg;
if (!io || !dev || !sg
@@ -376,114 +371,76 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
spin_lock_init(&io->lock);
io->dev = dev;
io->pipe = pipe;
- io->sg = sg;
- io->nents = nents;
-
- /* not all host controllers use DMA (like the mainstream pci ones);
- * they can use PIO (sl811) or be software over another transport.
- */
- dma = (dev->dev.dma_mask != NULL);
- if (dma)
- io->entries = usb_buffer_map_sg(dev, usb_pipein(pipe),
- sg, nents);
- else
- io->entries = nents;
-
- /* initialize all the urbs we'll use */
- if (io->entries <= 0)
- return io->entries;
if (dev->bus->sg_tablesize > 0) {
- io->urbs = kmalloc(sizeof *io->urbs, mem_flags);
use_sg = true;
+ io->entries = 1;
} else {
- io->urbs = kmalloc(io->entries * sizeof *io->urbs, mem_flags);
use_sg = false;
+ io->entries = nents;
}
+
+ /* initialize all the urbs we'll use */
+ io->urbs = kmalloc(io->entries * sizeof *io->urbs, mem_flags);
if (!io->urbs)
goto nomem;
- urb_flags = 0;
- if (dma)
- urb_flags |= URB_NO_TRANSFER_DMA_MAP;
+ urb_flags = URB_NO_INTERRUPT;
if (usb_pipein(pipe))
urb_flags |= URB_SHORT_NOT_OK;
- if (use_sg) {
- io->urbs[0] = usb_alloc_urb(0, mem_flags);
- if (!io->urbs[0]) {
- io->entries = 0;
- goto nomem;
- }
+ for_each_sg(sg, sg, io->entries, i) {
+ struct urb *urb;
+ unsigned len;
- io->urbs[0]->dev = NULL;
- io->urbs[0]->pipe = pipe;
- io->urbs[0]->interval = period;
- io->urbs[0]->transfer_flags = urb_flags;
-
- io->urbs[0]->complete = sg_complete;
- io->urbs[0]->context = io;
- /* A length of zero means transfer the whole sg list */
- io->urbs[0]->transfer_buffer_length = length;
- if (length == 0) {
- for_each_sg(sg, sg, io->entries, i) {
- io->urbs[0]->transfer_buffer_length +=
- sg_dma_len(sg);
- }
+ urb = usb_alloc_urb(0, mem_flags);
+ if (!urb) {
+ io->entries = i;
+ goto nomem;
}
- io->urbs[0]->sg = io;
- io->urbs[0]->num_sgs = io->entries;
- io->entries = 1;
- } else {
- urb_flags |= URB_NO_INTERRUPT;
- for_each_sg(sg, sg, io->entries, i) {
- unsigned len;
-
- io->urbs[i] = usb_alloc_urb(0, mem_flags);
- if (!io->urbs[i]) {
- io->entries = i;
- goto nomem;
+ io->urbs[i] = urb;
+
+ urb->dev = NULL;
+ urb->pipe = pipe;
+ urb->interval = period;
+ urb->transfer_flags = urb_flags;
+ urb->complete = sg_complete;
+ urb->context = io;
+ urb->sg = sg;
+
+ if (use_sg) {
+ /* There is no single transfer buffer */
+ urb->transfer_buffer = NULL;
+ urb->num_sgs = nents;
+
+ /* A length of zero means transfer the whole sg list */
+ len = length;
+ if (len == 0) {
+ for_each_sg(sg, sg, nents, i)
+ len += sg->length;
}
-
- io->urbs[i]->dev = NULL;
- io->urbs[i]->pipe = pipe;
- io->urbs[i]->interval = period;
- io->urbs[i]->transfer_flags = urb_flags;
-
- io->urbs[i]->complete = sg_complete;
- io->urbs[i]->context = io;
-
+ } else {
/*
- * Some systems need to revert to PIO when DMA is temporarily
- * unavailable. For their sakes, both transfer_buffer and
- * transfer_dma are set when possible.
- *
- * Note that if IOMMU coalescing occurred, we cannot
- * trust sg_page anymore, so check if S/G list shrunk.
+ * Some systems can't use DMA; they use PIO instead.
+ * For their sakes, transfer_buffer is set whenever
+ * possible.
*/
- if (io->nents == io->entries && !PageHighMem(sg_page(sg)))
- io->urbs[i]->transfer_buffer = sg_virt(sg);
+ if (!PageHighMem(sg_page(sg)))
+ urb->transfer_buffer = sg_virt(sg);
else
- io->urbs[i]->transfer_buffer = NULL;
-
- if (dma) {
- io->urbs[i]->transfer_dma = sg_dma_address(sg);
- len = sg_dma_len(sg);
- } else {
- /* hc may use _only_ transfer_buffer */
- len = sg->length;
- }
+ urb->transfer_buffer = NULL;
+ len = sg->length;
if (length) {
len = min_t(unsigned, len, length);
length -= len;
if (length == 0)
io->entries = i + 1;
}
- io->urbs[i]->transfer_buffer_length = len;
}
- io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
+ urb->transfer_buffer_length = len;
}
+ io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
/* transaction state */
io->count = io->entries;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index f073c5cb4e7..f22d03df8b1 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -71,6 +71,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* SKYMEDI USB_DRIVE */
{ USB_DEVICE(0x1516, 0x8628), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* BUILDWIN Photo Frame */
+ { USB_DEVICE(0x1908, 0x1315), .driver_info =
+ USB_QUIRK_HONOR_BNUMINTERFACES },
+
/* INTEL VALUE SSD */
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 43c002e3a9a..448f5b47fc4 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -383,13 +383,24 @@ static DEVICE_ATTR(autosuspend, S_IRUGO | S_IWUSR,
static const char on_string[] = "on";
static const char auto_string[] = "auto";
+static void warn_level(void) {
+ static int level_warned;
+
+ if (!level_warned) {
+ level_warned = 1;
+ printk(KERN_WARNING "WARNING! power/level is deprecated; "
+ "use power/control instead\n");
+ }
+}
+
static ssize_t
show_level(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_device *udev = to_usb_device(dev);
const char *p = auto_string;
- if (udev->state != USB_STATE_SUSPENDED && udev->autosuspend_disabled)
+ warn_level();
+ if (udev->state != USB_STATE_SUSPENDED && !udev->dev.power.runtime_auto)
p = on_string;
return sprintf(buf, "%s\n", p);
}
@@ -401,8 +412,9 @@ set_level(struct device *dev, struct device_attribute *attr,
struct usb_device *udev = to_usb_device(dev);
int len = count;
char *cp;
- int rc;
+ int rc = count;
+ warn_level();
cp = memchr(buf, '\n', count);
if (cp)
len = cp - buf;
@@ -411,17 +423,17 @@ set_level(struct device *dev, struct device_attribute *attr,
if (len == sizeof on_string - 1 &&
strncmp(buf, on_string, len) == 0)
- rc = usb_disable_autosuspend(udev);
+ usb_disable_autosuspend(udev);
else if (len == sizeof auto_string - 1 &&
strncmp(buf, auto_string, len) == 0)
- rc = usb_enable_autosuspend(udev);
+ usb_enable_autosuspend(udev);
else
rc = -EINVAL;
usb_unlock_device(udev);
- return (rc < 0 ? rc : count);
+ return rc;
}
static DEVICE_ATTR(level, S_IRUGO | S_IWUSR, show_level, set_level);
@@ -646,7 +658,8 @@ const struct attribute_group *usb_device_groups[] = {
/* Binary descriptors */
static ssize_t
-read_descriptors(struct kobject *kobj, struct bin_attribute *attr,
+read_descriptors(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 45a32dadb40..7c0555548ac 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -6,7 +6,7 @@
#include <linux/log2.h>
#include <linux/usb.h>
#include <linux/wait.h>
-#include "hcd.h"
+#include <linux/usb/hcd.h>
#define to_urb(d) container_of(d, struct urb, kref)
@@ -308,8 +308,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
* will be required to set urb->ep directly and we will eliminate
* urb->pipe.
*/
- ep = (usb_pipein(urb->pipe) ? dev->ep_in : dev->ep_out)
- [usb_pipeendpoint(urb->pipe)];
+ ep = usb_pipe_endpoint(dev, urb->pipe);
if (!ep)
return -ENOENT;
@@ -333,9 +332,12 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
is_out = usb_endpoint_dir_out(&ep->desc);
}
- /* Cache the direction for later use */
- urb->transfer_flags = (urb->transfer_flags & ~URB_DIR_MASK) |
- (is_out ? URB_DIR_OUT : URB_DIR_IN);
+ /* Clear the internal flags and cache the direction for later use */
+ urb->transfer_flags &= ~(URB_DIR_MASK | URB_DMA_MAP_SINGLE |
+ URB_DMA_MAP_PAGE | URB_DMA_MAP_SG | URB_MAP_LOCAL |
+ URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
+ URB_DMA_SG_COMBINED);
+ urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN);
if (xfertype != USB_ENDPOINT_XFER_CONTROL &&
dev->state < USB_STATE_CONFIGURED)
@@ -396,8 +398,8 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
return -EPIPE; /* The most suitable error code :-) */
/* enforce simple/standard policy */
- allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP |
- URB_NO_INTERRUPT | URB_DIR_MASK | URB_FREE_BUFFER);
+ allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT | URB_DIR_MASK |
+ URB_FREE_BUFFER);
switch (xfertype) {
case USB_ENDPOINT_XFER_BULK:
if (is_out)
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 0561430f2ed..5ae14f6c1e7 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -32,6 +32,7 @@
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
@@ -41,7 +42,6 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
-#include "hcd.h"
#include "usb.h"
@@ -573,7 +573,7 @@ int usb_lock_device_for_reset(struct usb_device *udev,
iface->condition == USB_INTERFACE_UNBOUND))
return -EINTR;
- while (usb_trylock_device(udev) != 0) {
+ while (!usb_trylock_device(udev)) {
/* If we can't acquire the lock after waiting one second,
* we're probably deadlocked */
@@ -593,76 +593,6 @@ int usb_lock_device_for_reset(struct usb_device *udev,
}
EXPORT_SYMBOL_GPL(usb_lock_device_for_reset);
-static struct usb_device *match_device(struct usb_device *dev,
- u16 vendor_id, u16 product_id)
-{
- struct usb_device *ret_dev = NULL;
- int child;
-
- dev_dbg(&dev->dev, "check for vendor %04x, product %04x ...\n",
- le16_to_cpu(dev->descriptor.idVendor),
- le16_to_cpu(dev->descriptor.idProduct));
-
- /* see if this device matches */
- if ((vendor_id == le16_to_cpu(dev->descriptor.idVendor)) &&
- (product_id == le16_to_cpu(dev->descriptor.idProduct))) {
- dev_dbg(&dev->dev, "matched this device!\n");
- ret_dev = usb_get_dev(dev);
- goto exit;
- }
-
- /* look through all of the children of this device */
- for (child = 0; child < dev->maxchild; ++child) {
- if (dev->children[child]) {
- usb_lock_device(dev->children[child]);
- ret_dev = match_device(dev->children[child],
- vendor_id, product_id);
- usb_unlock_device(dev->children[child]);
- if (ret_dev)
- goto exit;
- }
- }
-exit:
- return ret_dev;
-}
-
-/**
- * usb_find_device - find a specific usb device in the system
- * @vendor_id: the vendor id of the device to find
- * @product_id: the product id of the device to find
- *
- * Returns a pointer to a struct usb_device if such a specified usb
- * device is present in the system currently. The usage count of the
- * device will be incremented if a device is found. Make sure to call
- * usb_put_dev() when the caller is finished with the device.
- *
- * If a device with the specified vendor and product id is not found,
- * NULL is returned.
- */
-struct usb_device *usb_find_device(u16 vendor_id, u16 product_id)
-{
- struct list_head *buslist;
- struct usb_bus *bus;
- struct usb_device *dev = NULL;
-
- mutex_lock(&usb_bus_list_lock);
- for (buslist = usb_bus_list.next;
- buslist != &usb_bus_list;
- buslist = buslist->next) {
- bus = container_of(buslist, struct usb_bus, bus_list);
- if (!bus->root_hub)
- continue;
- usb_lock_device(bus->root_hub);
- dev = match_device(bus->root_hub, vendor_id, product_id);
- usb_unlock_device(bus->root_hub);
- if (dev)
- goto exit;
- }
-exit:
- mutex_unlock(&usb_bus_list_lock);
- return dev;
-}
-
/**
* usb_get_current_frame_number - return current bus frame number
* @dev: the device whose bus is being queried
@@ -775,7 +705,7 @@ EXPORT_SYMBOL_GPL(usb_free_coherent);
* @urb: urb whose transfer_buffer/setup_packet will be mapped
*
* Return value is either null (indicating no buffer could be mapped), or
- * the parameter. URB_NO_TRANSFER_DMA_MAP and URB_NO_SETUP_DMA_MAP are
+ * the parameter. URB_NO_TRANSFER_DMA_MAP is
* added to urb->transfer_flags if the operation succeeds. If the device
* is connected to this system through a non-DMA controller, this operation
* always succeeds.
@@ -803,17 +733,11 @@ struct urb *usb_buffer_map(struct urb *urb)
urb->transfer_buffer, urb->transfer_buffer_length,
usb_pipein(urb->pipe)
? DMA_FROM_DEVICE : DMA_TO_DEVICE);
- if (usb_pipecontrol(urb->pipe))
- urb->setup_dma = dma_map_single(controller,
- urb->setup_packet,
- sizeof(struct usb_ctrlrequest),
- DMA_TO_DEVICE);
/* FIXME generic api broken like pci, can't report errors */
/* if (urb->transfer_dma == DMA_ADDR_INVALID) return 0; */
} else
urb->transfer_dma = ~0;
- urb->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP
- | URB_NO_SETUP_DMA_MAP);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
return urb;
}
EXPORT_SYMBOL_GPL(usb_buffer_map);
@@ -881,18 +805,13 @@ void usb_buffer_unmap(struct urb *urb)
urb->transfer_dma, urb->transfer_buffer_length,
usb_pipein(urb->pipe)
? DMA_FROM_DEVICE : DMA_TO_DEVICE);
- if (usb_pipecontrol(urb->pipe))
- dma_unmap_single(controller,
- urb->setup_dma,
- sizeof(struct usb_ctrlrequest),
- DMA_TO_DEVICE);
}
- urb->transfer_flags &= ~(URB_NO_TRANSFER_DMA_MAP
- | URB_NO_SETUP_DMA_MAP);
+ urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP;
}
EXPORT_SYMBOL_GPL(usb_buffer_unmap);
#endif /* 0 */
+#if 0
/**
* usb_buffer_map_sg - create scatterlist DMA mapping(s) for an endpoint
* @dev: device to which the scatterlist will be mapped
@@ -936,6 +855,7 @@ int usb_buffer_map_sg(const struct usb_device *dev, int is_in,
is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE) ? : -ENOMEM;
}
EXPORT_SYMBOL_GPL(usb_buffer_map_sg);
+#endif
/* XXX DISABLED, no users currently. If you wish to re-enable this
* XXX please determine whether the sync is to transfer ownership of
@@ -972,6 +892,7 @@ void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in,
EXPORT_SYMBOL_GPL(usb_buffer_dmasync_sg);
#endif
+#if 0
/**
* usb_buffer_unmap_sg - free DMA mapping(s) for a scatterlist
* @dev: device to which the scatterlist will be mapped
@@ -997,6 +918,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
EXPORT_SYMBOL_GPL(usb_buffer_unmap_sg);
+#endif
/* To disable USB, kernel command line is 'nousb' not 'usbcore.nousb' */
#ifdef MODULE
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index 6e98a369784..94ecdbc758c 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -19,6 +19,9 @@
#include <linux/usb/ch9.h>
#include <linux/usb/ehci_def.h>
#include <linux/delay.h>
+#include <linux/serial_core.h>
+#include <linux/kgdb.h>
+#include <linux/kthread.h>
#include <asm/io.h>
#include <asm/pci-direct.h>
#include <asm/fixmap.h>
@@ -55,6 +58,7 @@ static struct ehci_regs __iomem *ehci_regs;
static struct ehci_dbg_port __iomem *ehci_debug;
static int dbgp_not_safe; /* Cannot use debug device during ehci reset */
static unsigned int dbgp_endpoint_out;
+static unsigned int dbgp_endpoint_in;
struct ehci_dev {
u32 bus;
@@ -91,6 +95,13 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
return (x & ~0x0f) | (len & 0x0f);
}
+#ifdef CONFIG_KGDB
+static struct kgdb_io kgdbdbgp_io_ops;
+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
+#else
+#define dbgp_kgdb_mode (0)
+#endif
+
/*
* USB Packet IDs (PIDs)
*/
@@ -182,11 +193,10 @@ static void dbgp_breath(void)
/* Sleep to give the debug port a chance to breathe */
}
-static int dbgp_wait_until_done(unsigned ctrl)
+static int dbgp_wait_until_done(unsigned ctrl, int loop)
{
u32 pids, lpid;
int ret;
- int loop = DBGP_LOOPS;
retry:
writel(ctrl | DBGP_GO, &ehci_debug->control);
@@ -276,13 +286,13 @@ static int dbgp_bulk_write(unsigned devnum, unsigned endpoint,
dbgp_set_data(bytes, size);
writel(addr, &ehci_debug->address);
writel(pids, &ehci_debug->pids);
- ret = dbgp_wait_until_done(ctrl);
+ ret = dbgp_wait_until_done(ctrl, DBGP_LOOPS);
return ret;
}
static int dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
- int size)
+ int size, int loops)
{
u32 pids, addr, ctrl;
int ret;
@@ -302,7 +312,7 @@ static int dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
writel(addr, &ehci_debug->address);
writel(pids, &ehci_debug->pids);
- ret = dbgp_wait_until_done(ctrl);
+ ret = dbgp_wait_until_done(ctrl, loops);
if (ret < 0)
return ret;
@@ -343,12 +353,12 @@ static int dbgp_control_msg(unsigned devnum, int requesttype,
dbgp_set_data(&req, sizeof(req));
writel(addr, &ehci_debug->address);
writel(pids, &ehci_debug->pids);
- ret = dbgp_wait_until_done(ctrl);
+ ret = dbgp_wait_until_done(ctrl, DBGP_LOOPS);
if (ret < 0)
return ret;
/* Read the result */
- return dbgp_bulk_read(devnum, 0, data, size);
+ return dbgp_bulk_read(devnum, 0, data, size, DBGP_LOOPS);
}
/* Find a PCI capability */
@@ -559,6 +569,7 @@ try_again:
goto err;
}
dbgp_endpoint_out = dbgp_desc.bDebugOutEndpoint;
+ dbgp_endpoint_in = dbgp_desc.bDebugInEndpoint;
/* Move the device to 127 if it isn't already there */
if (devnum != USB_DEBUG_DEVNUM) {
@@ -968,8 +979,9 @@ int dbgp_reset_prep(void)
if (!ehci_debug)
return 0;
- if (early_dbgp_console.index != -1 &&
- !(early_dbgp_console.flags & CON_BOOT))
+ if ((early_dbgp_console.index != -1 &&
+ !(early_dbgp_console.flags & CON_BOOT)) ||
+ dbgp_kgdb_mode)
return 1;
/* This means the console is not initialized, or should get
* shutdown so as to allow for reuse of the usb device, which
@@ -982,3 +994,93 @@ int dbgp_reset_prep(void)
return 0;
}
EXPORT_SYMBOL_GPL(dbgp_reset_prep);
+
+#ifdef CONFIG_KGDB
+
+static char kgdbdbgp_buf[DBGP_MAX_PACKET];
+static int kgdbdbgp_buf_sz;
+static int kgdbdbgp_buf_idx;
+static int kgdbdbgp_loop_cnt = DBGP_LOOPS;
+
+static int kgdbdbgp_read_char(void)
+{
+ int ret;
+
+ if (kgdbdbgp_buf_idx < kgdbdbgp_buf_sz) {
+ char ch = kgdbdbgp_buf[kgdbdbgp_buf_idx++];
+ return ch;
+ }
+
+ ret = dbgp_bulk_read(USB_DEBUG_DEVNUM, dbgp_endpoint_in,
+ &kgdbdbgp_buf, DBGP_MAX_PACKET,
+ kgdbdbgp_loop_cnt);
+ if (ret <= 0)
+ return NO_POLL_CHAR;
+ kgdbdbgp_buf_sz = ret;
+ kgdbdbgp_buf_idx = 1;
+ return kgdbdbgp_buf[0];
+}
+
+static void kgdbdbgp_write_char(u8 chr)
+{
+ early_dbgp_write(NULL, &chr, 1);
+}
+
+static struct kgdb_io kgdbdbgp_io_ops = {
+ .name = "kgdbdbgp",
+ .read_char = kgdbdbgp_read_char,
+ .write_char = kgdbdbgp_write_char,
+};
+
+static int kgdbdbgp_wait_time;
+
+static int __init kgdbdbgp_parse_config(char *str)
+{
+ char *ptr;
+
+ if (!ehci_debug) {
+ if (early_dbgp_init(str))
+ return -1;
+ }
+ ptr = strchr(str, ',');
+ if (ptr) {
+ ptr++;
+ kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
+ }
+ kgdb_register_io_module(&kgdbdbgp_io_ops);
+ kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
+
+ return 0;
+}
+early_param("kgdbdbgp", kgdbdbgp_parse_config);
+
+static int kgdbdbgp_reader_thread(void *ptr)
+{
+ int ret;
+
+ while (readl(&ehci_debug->control) & DBGP_ENABLED) {
+ kgdbdbgp_loop_cnt = 1;
+ ret = kgdbdbgp_read_char();
+ kgdbdbgp_loop_cnt = DBGP_LOOPS;
+ if (ret != NO_POLL_CHAR) {
+ if (ret == 0x3 || ret == '$') {
+ if (ret == '$')
+ kgdbdbgp_buf_idx--;
+ kgdb_breakpoint();
+ }
+ continue;
+ }
+ schedule_timeout_interruptible(kgdbdbgp_wait_time * HZ);
+ }
+ return 0;
+}
+
+static int __init kgdbdbgp_start_thread(void)
+{
+ if (dbgp_kgdb_mode && kgdbdbgp_wait_time)
+ kthread_run(kgdbdbgp_reader_thread, NULL, "%s", "dbgp");
+
+ return 0;
+}
+module_init(kgdbdbgp_start_thread);
+#endif /* CONFIG_KGDB */
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 11a3e0fa433..649c0c5f715 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -710,6 +710,43 @@ config USB_GADGETFS
Say "y" to link the driver statically, or "m" to build a
dynamically linked module called "gadgetfs".
+config USB_FUNCTIONFS
+ tristate "Function Filesystem (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ help
+ The Function Filesystem (FunctioFS) lets one create USB
+ composite functions in user space in the same way as GadgetFS
+ lets one create USB gadgets in user space. This allows creation
+ of composite gadgets such that some of the functions are
+ implemented in kernel space (for instance Ethernet, serial or
+ mass storage) and other are implemented in user space.
+
+ Say "y" to link the driver statically, or "m" to build
+ a dynamically linked module called "g_ffs".
+
+config USB_FUNCTIONFS_ETH
+ bool "Include CDC ECM (Ethernet) function"
+ depends on USB_FUNCTIONFS && NET
+ help
+ Include an CDC ECM (Ethernet) funcion in the CDC ECM (Funcion)
+ Filesystem. If you also say "y" to the RNDIS query below the
+ gadget will have two configurations.
+
+config USB_FUNCTIONFS_RNDIS
+ bool "Include RNDIS (Ethernet) function"
+ depends on USB_FUNCTIONFS && NET
+ help
+ Include an RNDIS (Ethernet) funcion in the Funcion Filesystem.
+ If you also say "y" to the CDC ECM query above the gadget will
+ have two configurations.
+
+config USB_FUNCTIONFS_GENERIC
+ bool "Include 'pure' configuration"
+ depends on USB_FUNCTIONFS && (USB_FUNCTIONFS_ETH || USB_FUNCTIONFS_RNDIS)
+ help
+ Include a configuration with FunctionFS and no Ethernet
+ configuration.
+
config USB_FILE_STORAGE
tristate "File-backed Storage Gadget"
depends on BLOCK
@@ -863,11 +900,30 @@ config USB_G_MULTI_CDC
If unsure, say "y".
+config USB_G_HID
+ tristate "HID Gadget"
+ help
+ The HID gadget driver provides generic emulation of USB
+ Human Interface Devices (HID).
+
+ For more information, see Documentation/usb/gadget_hid.txt which
+ includes sample code for accessing the device files.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "g_hid".
# put drivers that need isochronous transfer support (for audio
# or video class gadget drivers), or specific hardware, here.
+config USB_G_WEBCAM
+ tristate "USB Webcam Gadget"
+ depends on VIDEO_DEV
+ help
+ The Webcam Gadget acts as a composite USB Audio and Video Class
+ device. It provides a userspace API to process UVC control requests
+ and stream video data to the host.
-# - none yet
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "g_webcam".
endchoice
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 43b51da8d72..9bcde110feb 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -20,7 +20,7 @@ obj-$(CONFIG_USB_ATMEL_USBA) += atmel_usba_udc.o
obj-$(CONFIG_USB_FSL_USB2) += fsl_usb2_udc.o
fsl_usb2_udc-objs := fsl_udc_core.o
ifeq ($(CONFIG_ARCH_MXC),y)
-fsl_usb2_udc-objs += fsl_mx3_udc.o
+fsl_usb2_udc-objs += fsl_mxc_udc.o
endif
obj-$(CONFIG_USB_M66592) += m66592-udc.o
obj-$(CONFIG_USB_R8A66597) += r8a66597-udc.o
@@ -43,18 +43,24 @@ g_mass_storage-objs := mass_storage.o
g_printer-objs := printer.o
g_cdc-objs := cdc2.o
g_multi-objs := multi.o
+g_hid-objs := hid.o
g_nokia-objs := nokia.o
+g_webcam-objs := webcam.o
obj-$(CONFIG_USB_ZERO) += g_zero.o
obj-$(CONFIG_USB_AUDIO) += g_audio.o
obj-$(CONFIG_USB_ETH) += g_ether.o
obj-$(CONFIG_USB_GADGETFS) += gadgetfs.o
+obj-$(CONFIG_USB_FUNCTIONFS) += g_ffs.o
+obj-$(CONFIG_USB_ETH_FUNCTIONFS) += g_eth_ffs.o
obj-$(CONFIG_USB_FILE_STORAGE) += g_file_storage.o
obj-$(CONFIG_USB_MASS_STORAGE) += g_mass_storage.o
obj-$(CONFIG_USB_G_SERIAL) += g_serial.o
obj-$(CONFIG_USB_G_PRINTER) += g_printer.o
obj-$(CONFIG_USB_MIDI_GADGET) += g_midi.o
obj-$(CONFIG_USB_CDC_COMPOSITE) += g_cdc.o
+obj-$(CONFIG_USB_G_HID) += g_hid.o
obj-$(CONFIG_USB_G_MULTI) += g_multi.o
obj-$(CONFIG_USB_G_NOKIA) += g_nokia.o
+obj-$(CONFIG_USB_G_WEBCAM) += g_webcam.o
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index df1bae9b048..eaa79c8a9b8 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -366,6 +366,13 @@ rescan:
if (is_done)
done(ep, req, 0);
else if (ep->is_pingpong) {
+ /*
+ * One dummy read to delay the code because of a HW glitch:
+ * CSR returns bad RXCOUNT when read too soon after updating
+ * RX_DATA_BK flags.
+ */
+ csr = __raw_readl(creg);
+
bufferspace -= count;
buf += count;
goto rescan;
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index 75a256f3d45..d623c7bda1f 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -48,10 +48,9 @@ static int queue_dbg_open(struct inode *inode, struct file *file)
spin_lock_irq(&ep->udc->lock);
list_for_each_entry(req, &ep->queue, queue) {
- req_copy = kmalloc(sizeof(*req_copy), GFP_ATOMIC);
+ req_copy = kmemdup(req, sizeof(*req_copy), GFP_ATOMIC);
if (!req_copy)
goto fail;
- memcpy(req_copy, req, sizeof(*req_copy));
list_add_tail(&req_copy->queue, queue_data);
}
spin_unlock_irq(&ep->udc->lock);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 09289bb1e20..391d169f8d0 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -36,7 +36,7 @@
*/
/* big enough to hold our biggest descriptor */
-#define USB_BUFSIZ 512
+#define USB_BUFSIZ 1024
static struct usb_composite_driver *composite;
@@ -85,7 +85,7 @@ MODULE_PARM_DESC(iSerialNumber, "SerialNumber string");
* This function returns the value of the function's bind(), which is
* zero for success else a negative errno value.
*/
-int __init usb_add_function(struct usb_configuration *config,
+int usb_add_function(struct usb_configuration *config,
struct usb_function *function)
{
int value = -EINVAL;
@@ -215,7 +215,7 @@ int usb_function_activate(struct usb_function *function)
* Returns the interface ID which was allocated; or -ENODEV if no
* more interface IDs can be allocated.
*/
-int __init usb_interface_id(struct usb_configuration *config,
+int usb_interface_id(struct usb_configuration *config,
struct usb_function *function)
{
unsigned id = config->next_interface_id;
@@ -480,7 +480,7 @@ done:
* assigns global resources including string IDs, and per-configuration
* resources such as interface IDs and endpoints.
*/
-int __init usb_add_config(struct usb_composite_dev *cdev,
+int usb_add_config(struct usb_composite_dev *cdev,
struct usb_configuration *config)
{
int status = -EINVAL;
@@ -677,7 +677,7 @@ static int get_string(struct usb_composite_dev *cdev,
* ensure that for example different functions don't wrongly assign
* different meanings to the same identifier.
*/
-int __init usb_string_id(struct usb_composite_dev *cdev)
+int usb_string_id(struct usb_composite_dev *cdev)
{
if (cdev->next_string_id < 254) {
/* string id 0 is reserved */
@@ -898,7 +898,19 @@ static void composite_disconnect(struct usb_gadget *gadget)
/*-------------------------------------------------------------------------*/
-static void /* __init_or_exit */
+static ssize_t composite_show_suspended(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_gadget *gadget = dev_to_usb_gadget(dev);
+ struct usb_composite_dev *cdev = get_gadget_data(gadget);
+
+ return sprintf(buf, "%d\n", cdev->suspended);
+}
+
+static DEVICE_ATTR(suspended, 0444, composite_show_suspended, NULL);
+
+static void
composite_unbind(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
@@ -944,10 +956,11 @@ composite_unbind(struct usb_gadget *gadget)
}
kfree(cdev);
set_gadget_data(gadget, NULL);
+ device_remove_file(&gadget->dev, &dev_attr_suspended);
composite = NULL;
}
-static void __init
+static void
string_override_one(struct usb_gadget_strings *tab, u8 id, const char *s)
{
struct usb_string *str = tab->strings;
@@ -960,7 +973,7 @@ string_override_one(struct usb_gadget_strings *tab, u8 id, const char *s)
}
}
-static void __init
+static void
string_override(struct usb_gadget_strings **tab, u8 id, const char *s)
{
while (*tab) {
@@ -969,7 +982,7 @@ string_override(struct usb_gadget_strings **tab, u8 id, const char *s)
}
}
-static int __init composite_bind(struct usb_gadget *gadget)
+static int composite_bind(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev;
int status = -ENOMEM;
@@ -1004,6 +1017,14 @@ static int __init composite_bind(struct usb_gadget *gadget)
*/
usb_ep_autoconfig_reset(cdev->gadget);
+ /* standardized runtime overrides for device ID data */
+ if (idVendor)
+ cdev->desc.idVendor = cpu_to_le16(idVendor);
+ if (idProduct)
+ cdev->desc.idProduct = cpu_to_le16(idProduct);
+ if (bcdDevice)
+ cdev->desc.bcdDevice = cpu_to_le16(bcdDevice);
+
/* composite gadget needs to assign strings for whole device (like
* serial number), register function drivers, potentially update
* power state and consumption, etc
@@ -1015,14 +1036,6 @@ static int __init composite_bind(struct usb_gadget *gadget)
cdev->desc = *composite->dev;
cdev->desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
- /* standardized runtime overrides for device ID data */
- if (idVendor)
- cdev->desc.idVendor = cpu_to_le16(idVendor);
- if (idProduct)
- cdev->desc.idProduct = cpu_to_le16(idProduct);
- if (bcdDevice)
- cdev->desc.bcdDevice = cpu_to_le16(bcdDevice);
-
/* strings can't be assigned before bind() allocates the
* releavnt identifiers
*/
@@ -1036,6 +1049,10 @@ static int __init composite_bind(struct usb_gadget *gadget)
string_override(composite->strings,
cdev->desc.iSerialNumber, iSerialNumber);
+ status = device_create_file(&gadget->dev, &dev_attr_suspended);
+ if (status)
+ goto fail;
+
INFO(cdev, "%s ready\n", composite->name);
return 0;
@@ -1064,6 +1081,8 @@ composite_suspend(struct usb_gadget *gadget)
}
if (composite->suspend)
composite->suspend(cdev);
+
+ cdev->suspended = 1;
}
static void
@@ -1084,6 +1103,8 @@ composite_resume(struct usb_gadget *gadget)
f->resume(f);
}
}
+
+ cdev->suspended = 0;
}
/*-------------------------------------------------------------------------*/
@@ -1092,7 +1113,6 @@ static struct usb_gadget_driver composite_driver = {
.speed = USB_SPEED_HIGH,
.bind = composite_bind,
- /* .unbind = __exit_p(composite_unbind), */
.unbind = composite_unbind,
.setup = composite_setup,
@@ -1121,7 +1141,7 @@ static struct usb_gadget_driver composite_driver = {
* while it was binding. That would usually be done in order to wait for
* some userspace participation.
*/
-int __init usb_composite_register(struct usb_composite_driver *driver)
+int usb_composite_register(struct usb_composite_driver *driver)
{
if (!driver || !driver->dev || !driver->bind || composite)
return -EINVAL;
@@ -1142,7 +1162,7 @@ int __init usb_composite_register(struct usb_composite_driver *driver)
* This function is used to unregister drivers using the composite
* driver framework.
*/
-void /* __exit */ usb_composite_unregister(struct usb_composite_driver *driver)
+void usb_composite_unregister(struct usb_composite_driver *driver)
{
if (composite != driver)
return;
diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
index 47e8e722682..09084fd646a 100644
--- a/drivers/usb/gadget/config.c
+++ b/drivers/usb/gadget/config.c
@@ -128,7 +128,7 @@ int usb_gadget_config_buf(
* with identifiers (for interfaces, strings, endpoints, and more)
* as needed by a given function instance.
*/
-struct usb_descriptor_header **__init
+struct usb_descriptor_header **
usb_copy_descriptors(struct usb_descriptor_header **src)
{
struct usb_descriptor_header **tmp;
@@ -175,7 +175,7 @@ usb_copy_descriptors(struct usb_descriptor_header **src)
* intended use is to help remembering the endpoint descriptor to use
* when enabling a given endpoint.
*/
-struct usb_endpoint_descriptor *__init
+struct usb_endpoint_descriptor *
usb_find_endpoint(
struct usb_descriptor_header **src,
struct usb_descriptor_header **copy,
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index 5e096648518..4f9e578cde9 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -47,6 +47,7 @@
#include <linux/platform_device.h>
#include <linux/usb.h>
#include <linux/usb/gadget.h>
+#include <linux/usb/hcd.h>
#include <asm/byteorder.h>
#include <asm/io.h>
@@ -55,9 +56,6 @@
#include <asm/unaligned.h>
-#include "../core/hcd.h"
-
-
#define DRIVER_DESC "USB Host+Gadget Emulator"
#define DRIVER_VERSION "02 May 2005"
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 3568de210f7..8a832488ccd 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -34,12 +34,12 @@
/* we must assign addresses for configurable endpoints (like net2280) */
-static __initdata unsigned epnum;
+static unsigned epnum;
// #define MANY_ENDPOINTS
#ifdef MANY_ENDPOINTS
/* more than 15 configurable endpoints */
-static __initdata unsigned in_epnum;
+static unsigned in_epnum;
#endif
@@ -59,7 +59,7 @@ static __initdata unsigned in_epnum;
* NOTE: each endpoint is unidirectional, as specified by its USB
* descriptor; and isn't specific to a configuration or altsetting.
*/
-static int __init
+static int
ep_matches (
struct usb_gadget *gadget,
struct usb_ep *ep,
@@ -187,7 +187,7 @@ ep_matches (
return 1;
}
-static struct usb_ep * __init
+static struct usb_ep *
find_ep (struct usb_gadget *gadget, const char *name)
{
struct usb_ep *ep;
@@ -229,7 +229,7 @@ find_ep (struct usb_gadget *gadget, const char *name)
*
* On failure, this returns a null endpoint descriptor.
*/
-struct usb_ep * __init usb_ep_autoconfig (
+struct usb_ep *usb_ep_autoconfig (
struct usb_gadget *gadget,
struct usb_endpoint_descriptor *desc
)
@@ -304,7 +304,7 @@ struct usb_ep * __init usb_ep_autoconfig (
* state such as ep->driver_data and the record of assigned endpoints
* used by usb_ep_autoconfig().
*/
-void __init usb_ep_autoconfig_reset (struct usb_gadget *gadget)
+void usb_ep_autoconfig_reset (struct usb_gadget *gadget)
{
struct usb_ep *ep;
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index 400e1ebe697..d47a123f15a 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -116,7 +116,7 @@ acm_iad_descriptor = {
};
-static struct usb_interface_descriptor acm_control_interface_desc __initdata = {
+static struct usb_interface_descriptor acm_control_interface_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
@@ -127,7 +127,7 @@ static struct usb_interface_descriptor acm_control_interface_desc __initdata = {
/* .iInterface = DYNAMIC */
};
-static struct usb_interface_descriptor acm_data_interface_desc __initdata = {
+static struct usb_interface_descriptor acm_data_interface_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
@@ -138,7 +138,7 @@ static struct usb_interface_descriptor acm_data_interface_desc __initdata = {
/* .iInterface = DYNAMIC */
};
-static struct usb_cdc_header_desc acm_header_desc __initdata = {
+static struct usb_cdc_header_desc acm_header_desc = {
.bLength = sizeof(acm_header_desc),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_HEADER_TYPE,
@@ -146,7 +146,7 @@ static struct usb_cdc_header_desc acm_header_desc __initdata = {
};
static struct usb_cdc_call_mgmt_descriptor
-acm_call_mgmt_descriptor __initdata = {
+acm_call_mgmt_descriptor = {
.bLength = sizeof(acm_call_mgmt_descriptor),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE,
@@ -154,14 +154,14 @@ acm_call_mgmt_descriptor __initdata = {
/* .bDataInterface = DYNAMIC */
};
-static struct usb_cdc_acm_descriptor acm_descriptor __initdata = {
+static struct usb_cdc_acm_descriptor acm_descriptor = {
.bLength = sizeof(acm_descriptor),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_ACM_TYPE,
.bmCapabilities = USB_CDC_CAP_LINE,
};
-static struct usb_cdc_union_desc acm_union_desc __initdata = {
+static struct usb_cdc_union_desc acm_union_desc = {
.bLength = sizeof(acm_union_desc),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_UNION_TYPE,
@@ -171,7 +171,7 @@ static struct usb_cdc_union_desc acm_union_desc __initdata = {
/* full speed support: */
-static struct usb_endpoint_descriptor acm_fs_notify_desc __initdata = {
+static struct usb_endpoint_descriptor acm_fs_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
@@ -180,21 +180,21 @@ static struct usb_endpoint_descriptor acm_fs_notify_desc __initdata = {
.bInterval = 1 << GS_LOG2_NOTIFY_INTERVAL,
};
-static struct usb_endpoint_descriptor acm_fs_in_desc __initdata = {
+static struct usb_endpoint_descriptor acm_fs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
-static struct usb_endpoint_descriptor acm_fs_out_desc __initdata = {
+static struct usb_endpoint_descriptor acm_fs_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
-static struct usb_descriptor_header *acm_fs_function[] __initdata = {
+static struct usb_descriptor_header *acm_fs_function[] = {
(struct usb_descriptor_header *) &acm_iad_descriptor,
(struct usb_descriptor_header *) &acm_control_interface_desc,
(struct usb_descriptor_header *) &acm_header_desc,
@@ -210,7 +210,7 @@ static struct usb_descriptor_header *acm_fs_function[] __initdata = {
/* high speed support: */
-static struct usb_endpoint_descriptor acm_hs_notify_desc __initdata = {
+static struct usb_endpoint_descriptor acm_hs_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
@@ -219,21 +219,21 @@ static struct usb_endpoint_descriptor acm_hs_notify_desc __initdata = {
.bInterval = GS_LOG2_NOTIFY_INTERVAL+4,
};
-static struct usb_endpoint_descriptor acm_hs_in_desc __initdata = {
+static struct usb_endpoint_descriptor acm_hs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
-static struct usb_endpoint_descriptor acm_hs_out_desc __initdata = {
+static struct usb_endpoint_descriptor acm_hs_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
-static struct usb_descriptor_header *acm_hs_function[] __initdata = {
+static struct usb_descriptor_header *acm_hs_function[] = {
(struct usb_descriptor_header *) &acm_iad_descriptor,
(struct usb_descriptor_header *) &acm_control_interface_desc,
(struct usb_descriptor_header *) &acm_header_desc,
@@ -571,7 +571,7 @@ static int acm_send_break(struct gserial *port, int duration)
/*-------------------------------------------------------------------------*/
/* ACM function driver setup/binding */
-static int __init
+static int
acm_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
@@ -719,7 +719,7 @@ static inline bool can_support_cdc(struct usb_configuration *c)
* handle all the ones it binds. Caller is also responsible
* for calling @gserial_cleanup() before module unload.
*/
-int __init acm_bind_config(struct usb_configuration *c, u8 port_num)
+int acm_bind_config(struct usb_configuration *c, u8 port_num)
{
struct f_acm *acm;
int status;
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
index 4e595324c61..544257a89ed 100644
--- a/drivers/usb/gadget/f_ecm.c
+++ b/drivers/usb/gadget/f_ecm.c
@@ -113,7 +113,7 @@ static inline unsigned ecm_bitrate(struct usb_gadget *g)
/* interface descriptor: */
-static struct usb_interface_descriptor ecm_control_intf __initdata = {
+static struct usb_interface_descriptor ecm_control_intf = {
.bLength = sizeof ecm_control_intf,
.bDescriptorType = USB_DT_INTERFACE,
@@ -126,7 +126,7 @@ static struct usb_interface_descriptor ecm_control_intf __initdata = {
/* .iInterface = DYNAMIC */
};
-static struct usb_cdc_header_desc ecm_header_desc __initdata = {
+static struct usb_cdc_header_desc ecm_header_desc = {
.bLength = sizeof ecm_header_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_HEADER_TYPE,
@@ -134,7 +134,7 @@ static struct usb_cdc_header_desc ecm_header_desc __initdata = {
.bcdCDC = cpu_to_le16(0x0110),
};
-static struct usb_cdc_union_desc ecm_union_desc __initdata = {
+static struct usb_cdc_union_desc ecm_union_desc = {
.bLength = sizeof(ecm_union_desc),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_UNION_TYPE,
@@ -142,7 +142,7 @@ static struct usb_cdc_union_desc ecm_union_desc __initdata = {
/* .bSlaveInterface0 = DYNAMIC */
};
-static struct usb_cdc_ether_desc ecm_desc __initdata = {
+static struct usb_cdc_ether_desc ecm_desc = {
.bLength = sizeof ecm_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_ETHERNET_TYPE,
@@ -157,7 +157,7 @@ static struct usb_cdc_ether_desc ecm_desc __initdata = {
/* the default data interface has no endpoints ... */
-static struct usb_interface_descriptor ecm_data_nop_intf __initdata = {
+static struct usb_interface_descriptor ecm_data_nop_intf = {
.bLength = sizeof ecm_data_nop_intf,
.bDescriptorType = USB_DT_INTERFACE,
@@ -172,7 +172,7 @@ static struct usb_interface_descriptor ecm_data_nop_intf __initdata = {
/* ... but the "real" data interface has two bulk endpoints */
-static struct usb_interface_descriptor ecm_data_intf __initdata = {
+static struct usb_interface_descriptor ecm_data_intf = {
.bLength = sizeof ecm_data_intf,
.bDescriptorType = USB_DT_INTERFACE,
@@ -187,7 +187,7 @@ static struct usb_interface_descriptor ecm_data_intf __initdata = {
/* full speed support: */
-static struct usb_endpoint_descriptor fs_ecm_notify_desc __initdata = {
+static struct usb_endpoint_descriptor fs_ecm_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -197,7 +197,7 @@ static struct usb_endpoint_descriptor fs_ecm_notify_desc __initdata = {
.bInterval = 1 << LOG2_STATUS_INTERVAL_MSEC,
};
-static struct usb_endpoint_descriptor fs_ecm_in_desc __initdata = {
+static struct usb_endpoint_descriptor fs_ecm_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -205,7 +205,7 @@ static struct usb_endpoint_descriptor fs_ecm_in_desc __initdata = {
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
-static struct usb_endpoint_descriptor fs_ecm_out_desc __initdata = {
+static struct usb_endpoint_descriptor fs_ecm_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -213,7 +213,7 @@ static struct usb_endpoint_descriptor fs_ecm_out_desc __initdata = {
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
-static struct usb_descriptor_header *ecm_fs_function[] __initdata = {
+static struct usb_descriptor_header *ecm_fs_function[] = {
/* CDC ECM control descriptors */
(struct usb_descriptor_header *) &ecm_control_intf,
(struct usb_descriptor_header *) &ecm_header_desc,
@@ -231,7 +231,7 @@ static struct usb_descriptor_header *ecm_fs_function[] __initdata = {
/* high speed support: */
-static struct usb_endpoint_descriptor hs_ecm_notify_desc __initdata = {
+static struct usb_endpoint_descriptor hs_ecm_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -240,7 +240,7 @@ static struct usb_endpoint_descriptor hs_ecm_notify_desc __initdata = {
.wMaxPacketSize = cpu_to_le16(ECM_STATUS_BYTECOUNT),
.bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
};
-static struct usb_endpoint_descriptor hs_ecm_in_desc __initdata = {
+static struct usb_endpoint_descriptor hs_ecm_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -249,7 +249,7 @@ static struct usb_endpoint_descriptor hs_ecm_in_desc __initdata = {
.wMaxPacketSize = cpu_to_le16(512),
};
-static struct usb_endpoint_descriptor hs_ecm_out_desc __initdata = {
+static struct usb_endpoint_descriptor hs_ecm_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -258,7 +258,7 @@ static struct usb_endpoint_descriptor hs_ecm_out_desc __initdata = {
.wMaxPacketSize = cpu_to_le16(512),
};
-static struct usb_descriptor_header *ecm_hs_function[] __initdata = {
+static struct usb_descriptor_header *ecm_hs_function[] = {
/* CDC ECM control descriptors */
(struct usb_descriptor_header *) &ecm_control_intf,
(struct usb_descriptor_header *) &ecm_header_desc,
@@ -597,7 +597,7 @@ static void ecm_close(struct gether *geth)
/* ethernet function driver setup/binding */
-static int __init
+static int
ecm_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
@@ -763,7 +763,8 @@ ecm_unbind(struct usb_configuration *c, struct usb_function *f)
* Caller must have called @gether_setup(). Caller is also responsible
* for calling @gether_cleanup() before module unload.
*/
-int __init ecm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
+int
+ecm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
{
struct f_ecm *ecm;
int status;
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
new file mode 100644
index 00000000000..d69eccf5f19
--- /dev/null
+++ b/drivers/usb/gadget/f_fs.c
@@ -0,0 +1,2442 @@
+/*
+ * f_fs.c -- user mode filesystem api for usb composite funtcion controllers
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ * Author: Michal Nazarewicz <m.nazarewicz@samsung.com>
+ *
+ * Based on inode.c (GadgetFS):
+ * Copyright (C) 2003-2004 David Brownell
+ * Copyright (C) 2003 Agilent Technologies
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/blkdev.h>
+#include <linux/pagemap.h>
+#include <asm/unaligned.h>
+#include <linux/smp_lock.h>
+
+#include <linux/usb/composite.h>
+#include <linux/usb/functionfs.h>
+
+
+#define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
+
+
+/* Debuging *****************************************************************/
+
+#define ffs_printk(level, fmt, args...) printk(level "f_fs: " fmt "\n", ## args)
+
+#define FERR(...) ffs_printk(KERN_ERR, __VA_ARGS__)
+#define FINFO(...) ffs_printk(KERN_INFO, __VA_ARGS__)
+
+#ifdef DEBUG
+# define FDBG(...) ffs_printk(KERN_DEBUG, __VA_ARGS__)
+#else
+# define FDBG(...) do { } while (0)
+#endif /* DEBUG */
+
+#ifdef VERBOSE_DEBUG
+# define FVDBG FDBG
+#else
+# define FVDBG(...) do { } while (0)
+#endif /* VERBOSE_DEBUG */
+
+#define ENTER() FVDBG("%s()", __func__)
+
+#ifdef VERBOSE_DEBUG
+# define ffs_dump_mem(prefix, ptr, len) \
+ print_hex_dump_bytes("f_fs" prefix ": ", DUMP_PREFIX_NONE, ptr, len)
+#else
+# define ffs_dump_mem(prefix, ptr, len) do { } while (0)
+#endif
+
+
+/* The data structure and setup file ****************************************/
+
+enum ffs_state {
+ /* Waiting for descriptors and strings. */
+ /* In this state no open(2), read(2) or write(2) on epfiles
+ * may succeed (which should not be the problem as there
+ * should be no such files opened in the firts place). */
+ FFS_READ_DESCRIPTORS,
+ FFS_READ_STRINGS,
+
+ /* We've got descriptors and strings. We are or have called
+ * functionfs_ready_callback(). functionfs_bind() may have
+ * been called but we don't know. */
+ /* This is the only state in which operations on epfiles may
+ * succeed. */
+ FFS_ACTIVE,
+
+ /* All endpoints have been closed. This state is also set if
+ * we encounter an unrecoverable error. The only
+ * unrecoverable error is situation when after reading strings
+ * from user space we fail to initialise EP files or
+ * functionfs_ready_callback() returns with error (<0). */
+ /* In this state no open(2), read(2) or write(2) (both on ep0
+ * as well as epfile) may succeed (at this point epfiles are
+ * unlinked and all closed so this is not a problem; ep0 is
+ * also closed but ep0 file exists and so open(2) on ep0 must
+ * fail). */
+ FFS_CLOSING
+};
+
+
+enum ffs_setup_state {
+ /* There is no setup request pending. */
+ FFS_NO_SETUP,
+ /* User has read events and there was a setup request event
+ * there. The next read/write on ep0 will handle the
+ * request. */
+ FFS_SETUP_PENDING,
+ /* There was event pending but before user space handled it
+ * some other event was introduced which canceled existing
+ * setup. If this state is set read/write on ep0 return
+ * -EIDRM. This state is only set when adding event. */
+ FFS_SETUP_CANCELED
+};
+
+
+
+struct ffs_epfile;
+struct ffs_function;
+
+struct ffs_data {
+ struct usb_gadget *gadget;
+
+ /* Protect access read/write operations, only one read/write
+ * at a time. As a consequence protects ep0req and company.
+ * While setup request is being processed (queued) this is
+ * held. */
+ struct mutex mutex;
+
+ /* Protect access to enpoint related structures (basically
+ * usb_ep_queue(), usb_ep_dequeue(), etc. calls) except for
+ * endpint zero. */
+ spinlock_t eps_lock;
+
+ /* XXX REVISIT do we need our own request? Since we are not
+ * handling setup requests immidiatelly user space may be so
+ * slow that another setup will be sent to the gadget but this
+ * time not to us but another function and then there could be
+ * a race. Is taht the case? Or maybe we can use cdev->req
+ * after all, maybe we just need some spinlock for that? */
+ struct usb_request *ep0req; /* P: mutex */
+ struct completion ep0req_completion; /* P: mutex */
+ int ep0req_status; /* P: mutex */
+
+ /* reference counter */
+ atomic_t ref;
+ /* how many files are opened (EP0 and others) */
+ atomic_t opened;
+
+ /* EP0 state */
+ enum ffs_state state;
+
+ /*
+ * Possible transations:
+ * + FFS_NO_SETUP -> FFS_SETUP_PENDING -- P: ev.waitq.lock
+ * happens only in ep0 read which is P: mutex
+ * + FFS_SETUP_PENDING -> FFS_NO_SETUP -- P: ev.waitq.lock
+ * happens only in ep0 i/o which is P: mutex
+ * + FFS_SETUP_PENDING -> FFS_SETUP_CANCELED -- P: ev.waitq.lock
+ * + FFS_SETUP_CANCELED -> FFS_NO_SETUP -- cmpxchg
+ */
+ enum ffs_setup_state setup_state;
+
+#define FFS_SETUP_STATE(ffs) \
+ ((enum ffs_setup_state)cmpxchg(&(ffs)->setup_state, \
+ FFS_SETUP_CANCELED, FFS_NO_SETUP))
+
+ /* Events & such. */
+ struct {
+ u8 types[4];
+ unsigned short count;
+ /* XXX REVISIT need to update it in some places, or do we? */
+ unsigned short can_stall;
+ struct usb_ctrlrequest setup;
+
+ wait_queue_head_t waitq;
+ } ev; /* the whole structure, P: ev.waitq.lock */
+
+ /* Flags */
+ unsigned long flags;
+#define FFS_FL_CALL_CLOSED_CALLBACK 0
+#define FFS_FL_BOUND 1
+
+ /* Active function */
+ struct ffs_function *func;
+
+ /* Device name, write once when file system is mounted.
+ * Intendet for user to read if she wants. */
+ const char *dev_name;
+ /* Private data for our user (ie. gadget). Managed by
+ * user. */
+ void *private_data;
+
+ /* filled by __ffs_data_got_descs() */
+ /* real descriptors are 16 bytes after raw_descs (so you need
+ * to skip 16 bytes (ie. ffs->raw_descs + 16) to get to the
+ * first full speed descriptor). raw_descs_length and
+ * raw_fs_descs_length do not have those 16 bytes added. */
+ const void *raw_descs;
+ unsigned raw_descs_length;
+ unsigned raw_fs_descs_length;
+ unsigned fs_descs_count;
+ unsigned hs_descs_count;
+
+ unsigned short strings_count;
+ unsigned short interfaces_count;
+ unsigned short eps_count;
+ unsigned short _pad1;
+
+ /* filled by __ffs_data_got_strings() */
+ /* ids in stringtabs are set in functionfs_bind() */
+ const void *raw_strings;
+ struct usb_gadget_strings **stringtabs;
+
+ /* File system's super block, write once when file system is mounted. */
+ struct super_block *sb;
+
+ /* File permissions, written once when fs is mounted*/
+ struct ffs_file_perms {
+ umode_t mode;
+ uid_t uid;
+ gid_t gid;
+ } file_perms;
+
+ /* The endpoint files, filled by ffs_epfiles_create(),
+ * destroyed by ffs_epfiles_destroy(). */
+ struct ffs_epfile *epfiles;
+};
+
+/* Reference counter handling */
+static void ffs_data_get(struct ffs_data *ffs);
+static void ffs_data_put(struct ffs_data *ffs);
+/* Creates new ffs_data object. */
+static struct ffs_data *__must_check ffs_data_new(void) __attribute__((malloc));
+
+/* Opened counter handling. */
+static void ffs_data_opened(struct ffs_data *ffs);
+static void ffs_data_closed(struct ffs_data *ffs);
+
+/* Called with ffs->mutex held; take over ownerrship of data. */
+static int __must_check
+__ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
+static int __must_check
+__ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len);
+
+
+/* The function structure ***************************************************/
+
+struct ffs_ep;
+
+struct ffs_function {
+ struct usb_configuration *conf;
+ struct usb_gadget *gadget;
+ struct ffs_data *ffs;
+
+ struct ffs_ep *eps;
+ u8 eps_revmap[16];
+ short *interfaces_nums;
+
+ struct usb_function function;
+};
+
+
+static struct ffs_function *ffs_func_from_usb(struct usb_function *f)
+{
+ return container_of(f, struct ffs_function, function);
+}
+
+static void ffs_func_free(struct ffs_function *func);
+
+
+static void ffs_func_eps_disable(struct ffs_function *func);
+static int __must_check ffs_func_eps_enable(struct ffs_function *func);
+
+
+static int ffs_func_bind(struct usb_configuration *,
+ struct usb_function *);
+static void ffs_func_unbind(struct usb_configuration *,
+ struct usb_function *);
+static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned);
+static void ffs_func_disable(struct usb_function *);
+static int ffs_func_setup(struct usb_function *,
+ const struct usb_ctrlrequest *);
+static void ffs_func_suspend(struct usb_function *);
+static void ffs_func_resume(struct usb_function *);
+
+
+static int ffs_func_revmap_ep(struct ffs_function *func, u8 num);
+static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf);
+
+
+
+/* The endpoints structures *************************************************/
+
+struct ffs_ep {
+ struct usb_ep *ep; /* P: ffs->eps_lock */
+ struct usb_request *req; /* P: epfile->mutex */
+
+ /* [0]: full speed, [1]: high speed */
+ struct usb_endpoint_descriptor *descs[2];
+
+ u8 num;
+
+ int status; /* P: epfile->mutex */
+};
+
+struct ffs_epfile {
+ /* Protects ep->ep and ep->req. */
+ struct mutex mutex;
+ wait_queue_head_t wait;
+
+ struct ffs_data *ffs;
+ struct ffs_ep *ep; /* P: ffs->eps_lock */
+
+ struct dentry *dentry;
+
+ char name[5];
+
+ unsigned char in; /* P: ffs->eps_lock */
+ unsigned char isoc; /* P: ffs->eps_lock */
+
+ unsigned char _pad;
+};
+
+
+static int __must_check ffs_epfiles_create(struct ffs_data *ffs);
+static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
+
+static struct inode *__must_check
+ffs_sb_create_file(struct super_block *sb, const char *name, void *data,
+ const struct file_operations *fops,
+ struct dentry **dentry_p);
+
+
+/* Misc helper functions ****************************************************/
+
+static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
+ __attribute__((warn_unused_result, nonnull));
+static char *ffs_prepare_buffer(const char * __user buf, size_t len)
+ __attribute__((warn_unused_result, nonnull));
+
+
+/* Control file aka ep0 *****************************************************/
+
+static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct ffs_data *ffs = req->context;
+
+ complete_all(&ffs->ep0req_completion);
+}
+
+
+static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
+{
+ struct usb_request *req = ffs->ep0req;
+ int ret;
+
+ req->zero = len < le16_to_cpu(ffs->ev.setup.wLength);
+
+ spin_unlock_irq(&ffs->ev.waitq.lock);
+
+ req->buf = data;
+ req->length = len;
+
+ INIT_COMPLETION(ffs->ep0req_completion);
+
+ ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
+ if (unlikely(ret < 0))
+ return ret;
+
+ ret = wait_for_completion_interruptible(&ffs->ep0req_completion);
+ if (unlikely(ret)) {
+ usb_ep_dequeue(ffs->gadget->ep0, req);
+ return -EINTR;
+ }
+
+ ffs->setup_state = FFS_NO_SETUP;
+ return ffs->ep0req_status;
+}
+
+static int __ffs_ep0_stall(struct ffs_data *ffs)
+{
+ if (ffs->ev.can_stall) {
+ FVDBG("ep0 stall\n");
+ usb_ep_set_halt(ffs->gadget->ep0);
+ ffs->setup_state = FFS_NO_SETUP;
+ return -EL2HLT;
+ } else {
+ FDBG("bogus ep0 stall!\n");
+ return -ESRCH;
+ }
+}
+
+
+static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ptr)
+{
+ struct ffs_data *ffs = file->private_data;
+ ssize_t ret;
+ char *data;
+
+ ENTER();
+
+ /* Fast check if setup was canceled */
+ if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED)
+ return -EIDRM;
+
+ /* Acquire mutex */
+ ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
+ if (unlikely(ret < 0))
+ return ret;
+
+
+ /* Check state */
+ switch (ffs->state) {
+ case FFS_READ_DESCRIPTORS:
+ case FFS_READ_STRINGS:
+ /* Copy data */
+ if (unlikely(len < 16)) {
+ ret = -EINVAL;
+ break;
+ }
+
+ data = ffs_prepare_buffer(buf, len);
+ if (unlikely(IS_ERR(data))) {
+ ret = PTR_ERR(data);
+ break;
+ }
+
+ /* Handle data */
+ if (ffs->state == FFS_READ_DESCRIPTORS) {
+ FINFO("read descriptors");
+ ret = __ffs_data_got_descs(ffs, data, len);
+ if (unlikely(ret < 0))
+ break;
+
+ ffs->state = FFS_READ_STRINGS;
+ ret = len;
+ } else {
+ FINFO("read strings");
+ ret = __ffs_data_got_strings(ffs, data, len);
+ if (unlikely(ret < 0))
+ break;
+
+ ret = ffs_epfiles_create(ffs);
+ if (unlikely(ret)) {
+ ffs->state = FFS_CLOSING;
+ break;
+ }
+
+ ffs->state = FFS_ACTIVE;
+ mutex_unlock(&ffs->mutex);
+
+ ret = functionfs_ready_callback(ffs);
+ if (unlikely(ret < 0)) {
+ ffs->state = FFS_CLOSING;
+ return ret;
+ }
+
+ set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
+ return len;
+ }
+ break;
+
+
+ case FFS_ACTIVE:
+ data = NULL;
+ /* We're called from user space, we can use _irq
+ * rather then _irqsave */
+ spin_lock_irq(&ffs->ev.waitq.lock);
+ switch (FFS_SETUP_STATE(ffs)) {
+ case FFS_SETUP_CANCELED:
+ ret = -EIDRM;
+ goto done_spin;
+
+ case FFS_NO_SETUP:
+ ret = -ESRCH;
+ goto done_spin;
+
+ case FFS_SETUP_PENDING:
+ break;
+ }
+
+ /* FFS_SETUP_PENDING */
+ if (!(ffs->ev.setup.bRequestType & USB_DIR_IN)) {
+ spin_unlock_irq(&ffs->ev.waitq.lock);
+ ret = __ffs_ep0_stall(ffs);
+ break;
+ }
+
+ /* FFS_SETUP_PENDING and not stall */
+ len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
+
+ spin_unlock_irq(&ffs->ev.waitq.lock);
+
+ data = ffs_prepare_buffer(buf, len);
+ if (unlikely(IS_ERR(data))) {
+ ret = PTR_ERR(data);
+ break;
+ }
+
+ spin_lock_irq(&ffs->ev.waitq.lock);
+
+ /* We are guaranteed to be still in FFS_ACTIVE state
+ * but the state of setup could have changed from
+ * FFS_SETUP_PENDING to FFS_SETUP_CANCELED so we need
+ * to check for that. If that happened we copied data
+ * from user space in vain but it's unlikely. */
+ /* For sure we are not in FFS_NO_SETUP since this is
+ * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP
+ * transition can be performed and it's protected by
+ * mutex. */
+
+ if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED) {
+ ret = -EIDRM;
+done_spin:
+ spin_unlock_irq(&ffs->ev.waitq.lock);
+ } else {
+ /* unlocks spinlock */
+ ret = __ffs_ep0_queue_wait(ffs, data, len);
+ }
+ kfree(data);
+ break;
+
+
+ default:
+ ret = -EBADFD;
+ break;
+ }
+
+
+ mutex_unlock(&ffs->mutex);
+ return ret;
+}
+
+
+
+static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
+ size_t n)
+{
+ /* We are holding ffs->ev.waitq.lock and ffs->mutex and we need
+ * to release them. */
+
+ struct usb_functionfs_event events[n];
+ unsigned i = 0;
+
+ memset(events, 0, sizeof events);
+
+ do {
+ events[i].type = ffs->ev.types[i];
+ if (events[i].type == FUNCTIONFS_SETUP) {
+ events[i].u.setup = ffs->ev.setup;
+ ffs->setup_state = FFS_SETUP_PENDING;
+ }
+ } while (++i < n);
+
+ if (n < ffs->ev.count) {
+ ffs->ev.count -= n;
+ memmove(ffs->ev.types, ffs->ev.types + n,
+ ffs->ev.count * sizeof *ffs->ev.types);
+ } else {
+ ffs->ev.count = 0;
+ }
+
+ spin_unlock_irq(&ffs->ev.waitq.lock);
+ mutex_unlock(&ffs->mutex);
+
+ return unlikely(__copy_to_user(buf, events, sizeof events))
+ ? -EFAULT : sizeof events;
+}
+
+
+static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ptr)
+{
+ struct ffs_data *ffs = file->private_data;
+ char *data = NULL;
+ size_t n;
+ int ret;
+
+ ENTER();
+
+ /* Fast check if setup was canceled */
+ if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED)
+ return -EIDRM;
+
+ /* Acquire mutex */
+ ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
+ if (unlikely(ret < 0))
+ return ret;
+
+
+ /* Check state */
+ if (ffs->state != FFS_ACTIVE) {
+ ret = -EBADFD;
+ goto done_mutex;
+ }
+
+
+ /* We're called from user space, we can use _irq rather then
+ * _irqsave */
+ spin_lock_irq(&ffs->ev.waitq.lock);
+
+ switch (FFS_SETUP_STATE(ffs)) {
+ case FFS_SETUP_CANCELED:
+ ret = -EIDRM;
+ break;
+
+ case FFS_NO_SETUP:
+ n = len / sizeof(struct usb_functionfs_event);
+ if (unlikely(!n)) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if ((file->f_flags & O_NONBLOCK) && !ffs->ev.count) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ if (unlikely(wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq, ffs->ev.count))) {
+ ret = -EINTR;
+ break;
+ }
+
+ return __ffs_ep0_read_events(ffs, buf,
+ min(n, (size_t)ffs->ev.count));
+
+
+ case FFS_SETUP_PENDING:
+ if (ffs->ev.setup.bRequestType & USB_DIR_IN) {
+ spin_unlock_irq(&ffs->ev.waitq.lock);
+ ret = __ffs_ep0_stall(ffs);
+ goto done_mutex;
+ }
+
+ len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
+
+ spin_unlock_irq(&ffs->ev.waitq.lock);
+
+ if (likely(len)) {
+ data = kmalloc(len, GFP_KERNEL);
+ if (unlikely(!data)) {
+ ret = -ENOMEM;
+ goto done_mutex;
+ }
+ }
+
+ spin_lock_irq(&ffs->ev.waitq.lock);
+
+ /* See ffs_ep0_write() */
+ if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED) {
+ ret = -EIDRM;
+ break;
+ }
+
+ /* unlocks spinlock */
+ ret = __ffs_ep0_queue_wait(ffs, data, len);
+ if (likely(ret > 0) && unlikely(__copy_to_user(buf, data, len)))
+ ret = -EFAULT;
+ goto done_mutex;
+
+ default:
+ ret = -EBADFD;
+ break;
+ }
+
+ spin_unlock_irq(&ffs->ev.waitq.lock);
+done_mutex:
+ mutex_unlock(&ffs->mutex);
+ kfree(data);
+ return ret;
+}
+
+
+
+static int ffs_ep0_open(struct inode *inode, struct file *file)
+{
+ struct ffs_data *ffs = inode->i_private;
+
+ ENTER();
+
+ if (unlikely(ffs->state == FFS_CLOSING))
+ return -EBUSY;
+
+ file->private_data = ffs;
+ ffs_data_opened(ffs);
+
+ return 0;
+}
+
+
+static int ffs_ep0_release(struct inode *inode, struct file *file)
+{
+ struct ffs_data *ffs = file->private_data;
+
+ ENTER();
+
+ ffs_data_closed(ffs);
+
+ return 0;
+}
+
+
+static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
+{
+ struct ffs_data *ffs = file->private_data;
+ struct usb_gadget *gadget = ffs->gadget;
+ long ret;
+
+ ENTER();
+
+ if (code == FUNCTIONFS_INTERFACE_REVMAP) {
+ struct ffs_function *func = ffs->func;
+ ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
+ } else if (gadget->ops->ioctl) {
+ lock_kernel();
+ ret = gadget->ops->ioctl(gadget, code, value);
+ unlock_kernel();
+ } else {
+ ret = -ENOTTY;
+ }
+
+ return ret;
+}
+
+
+static const struct file_operations ffs_ep0_operations = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+
+ .open = ffs_ep0_open,
+ .write = ffs_ep0_write,
+ .read = ffs_ep0_read,
+ .release = ffs_ep0_release,
+ .unlocked_ioctl = ffs_ep0_ioctl,
+};
+
+
+/* "Normal" endpoints operations ********************************************/
+
+
+static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
+{
+ ENTER();
+ if (likely(req->context)) {
+ struct ffs_ep *ep = _ep->driver_data;
+ ep->status = req->status ? req->status : req->actual;
+ complete(req->context);
+ }
+}
+
+
+static ssize_t ffs_epfile_io(struct file *file,
+ char __user *buf, size_t len, int read)
+{
+ struct ffs_epfile *epfile = file->private_data;
+ struct ffs_ep *ep;
+ char *data = NULL;
+ ssize_t ret;
+ int halt;
+
+ goto first_try;
+ do {
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+ mutex_unlock(&epfile->mutex);
+
+first_try:
+ /* Are we still active? */
+ if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) {
+ ret = -ENODEV;
+ goto error;
+ }
+
+ /* Wait for endpoint to be enabled */
+ ep = epfile->ep;
+ if (!ep) {
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ goto error;
+ }
+
+ if (unlikely(wait_event_interruptible
+ (epfile->wait, (ep = epfile->ep)))) {
+ ret = -EINTR;
+ goto error;
+ }
+ }
+
+ /* Do we halt? */
+ halt = !read == !epfile->in;
+ if (halt && epfile->isoc) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* Allocate & copy */
+ if (!halt && !data) {
+ data = kzalloc(len, GFP_KERNEL);
+ if (unlikely(!data))
+ return -ENOMEM;
+
+ if (!read &&
+ unlikely(__copy_from_user(data, buf, len))) {
+ ret = -EFAULT;
+ goto error;
+ }
+ }
+
+ /* We will be using request */
+ ret = ffs_mutex_lock(&epfile->mutex,
+ file->f_flags & O_NONBLOCK);
+ if (unlikely(ret))
+ goto error;
+
+ /* We're called from user space, we can use _irq rather then
+ * _irqsave */
+ spin_lock_irq(&epfile->ffs->eps_lock);
+
+ /* While we were acquiring mutex endpoint got disabled
+ * or changed? */
+ } while (unlikely(epfile->ep != ep));
+
+ /* Halt */
+ if (unlikely(halt)) {
+ if (likely(epfile->ep == ep) && !WARN_ON(!ep->ep))
+ usb_ep_set_halt(ep->ep);
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+ ret = -EBADMSG;
+ } else {
+ /* Fire the request */
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ struct usb_request *req = ep->req;
+ req->context = &done;
+ req->complete = ffs_epfile_io_complete;
+ req->buf = data;
+ req->length = len;
+
+ ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
+
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+
+ if (unlikely(ret < 0)) {
+ /* nop */
+ } else if (unlikely(wait_for_completion_interruptible(&done))) {
+ ret = -EINTR;
+ usb_ep_dequeue(ep->ep, req);
+ } else {
+ ret = ep->status;
+ if (read && ret > 0 &&
+ unlikely(copy_to_user(buf, data, ret)))
+ ret = -EFAULT;
+ }
+ }
+
+ mutex_unlock(&epfile->mutex);
+error:
+ kfree(data);
+ return ret;
+}
+
+
+static ssize_t
+ffs_epfile_write(struct file *file, const char __user *buf, size_t len,
+ loff_t *ptr)
+{
+ ENTER();
+
+ return ffs_epfile_io(file, (char __user *)buf, len, 0);
+}
+
+static ssize_t
+ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr)
+{
+ ENTER();
+
+ return ffs_epfile_io(file, buf, len, 1);
+}
+
+static int
+ffs_epfile_open(struct inode *inode, struct file *file)
+{
+ struct ffs_epfile *epfile = inode->i_private;
+
+ ENTER();
+
+ if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
+ return -ENODEV;
+
+ file->private_data = epfile;
+ ffs_data_opened(epfile->ffs);
+
+ return 0;
+}
+
+static int
+ffs_epfile_release(struct inode *inode, struct file *file)
+{
+ struct ffs_epfile *epfile = inode->i_private;
+
+ ENTER();
+
+ ffs_data_closed(epfile->ffs);
+
+ return 0;
+}
+
+
+static long ffs_epfile_ioctl(struct file *file, unsigned code,
+ unsigned long value)
+{
+ struct ffs_epfile *epfile = file->private_data;
+ int ret;
+
+ ENTER();
+
+ if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
+ return -ENODEV;
+
+ spin_lock_irq(&epfile->ffs->eps_lock);
+ if (likely(epfile->ep)) {
+ switch (code) {
+ case FUNCTIONFS_FIFO_STATUS:
+ ret = usb_ep_fifo_status(epfile->ep->ep);
+ break;
+ case FUNCTIONFS_FIFO_FLUSH:
+ usb_ep_fifo_flush(epfile->ep->ep);
+ ret = 0;
+ break;
+ case FUNCTIONFS_CLEAR_HALT:
+ ret = usb_ep_clear_halt(epfile->ep->ep);
+ break;
+ case FUNCTIONFS_ENDPOINT_REVMAP:
+ ret = epfile->ep->num;
+ break;
+ default:
+ ret = -ENOTTY;
+ }
+ } else {
+ ret = -ENODEV;
+ }
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+
+ return ret;
+}
+
+
+static const struct file_operations ffs_epfile_operations = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+
+ .open = ffs_epfile_open,
+ .write = ffs_epfile_write,
+ .read = ffs_epfile_read,
+ .release = ffs_epfile_release,
+ .unlocked_ioctl = ffs_epfile_ioctl,
+};
+
+
+
+/* File system and super block operations ***********************************/
+
+/*
+ * Mounting the filesystem creates a controller file, used first for
+ * function configuration then later for event monitoring.
+ */
+
+
+static struct inode *__must_check
+ffs_sb_make_inode(struct super_block *sb, void *data,
+ const struct file_operations *fops,
+ const struct inode_operations *iops,
+ struct ffs_file_perms *perms)
+{
+ struct inode *inode;
+
+ ENTER();
+
+ inode = new_inode(sb);
+
+ if (likely(inode)) {
+ struct timespec current_time = CURRENT_TIME;
+
+ inode->i_mode = perms->mode;
+ inode->i_uid = perms->uid;
+ inode->i_gid = perms->gid;
+ inode->i_atime = current_time;
+ inode->i_mtime = current_time;
+ inode->i_ctime = current_time;
+ inode->i_private = data;
+ if (fops)
+ inode->i_fop = fops;
+ if (iops)
+ inode->i_op = iops;
+ }
+
+ return inode;
+}
+
+
+/* Create "regular" file */
+
+static struct inode *ffs_sb_create_file(struct super_block *sb,
+ const char *name, void *data,
+ const struct file_operations *fops,
+ struct dentry **dentry_p)
+{
+ struct ffs_data *ffs = sb->s_fs_info;
+ struct dentry *dentry;
+ struct inode *inode;
+
+ ENTER();
+
+ dentry = d_alloc_name(sb->s_root, name);
+ if (unlikely(!dentry))
+ return NULL;
+
+ inode = ffs_sb_make_inode(sb, data, fops, NULL, &ffs->file_perms);
+ if (unlikely(!inode)) {
+ dput(dentry);
+ return NULL;
+ }
+
+ d_add(dentry, inode);
+ if (dentry_p)
+ *dentry_p = dentry;
+
+ return inode;
+}
+
+
+/* Super block */
+
+static const struct super_operations ffs_sb_operations = {
+ .statfs = simple_statfs,
+ .drop_inode = generic_delete_inode,
+};
+
+struct ffs_sb_fill_data {
+ struct ffs_file_perms perms;
+ umode_t root_mode;
+ const char *dev_name;
+};
+
+static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
+{
+ struct ffs_sb_fill_data *data = _data;
+ struct inode *inode;
+ struct dentry *d;
+ struct ffs_data *ffs;
+
+ ENTER();
+
+ /* Initialize data */
+ ffs = ffs_data_new();
+ if (unlikely(!ffs))
+ goto enomem0;
+
+ ffs->sb = sb;
+ ffs->dev_name = data->dev_name;
+ ffs->file_perms = data->perms;
+
+ sb->s_fs_info = ffs;
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_magic = FUNCTIONFS_MAGIC;
+ sb->s_op = &ffs_sb_operations;
+ sb->s_time_gran = 1;
+
+ /* Root inode */
+ data->perms.mode = data->root_mode;
+ inode = ffs_sb_make_inode(sb, NULL,
+ &simple_dir_operations,
+ &simple_dir_inode_operations,
+ &data->perms);
+ if (unlikely(!inode))
+ goto enomem1;
+ d = d_alloc_root(inode);
+ if (unlikely(!d))
+ goto enomem2;
+ sb->s_root = d;
+
+ /* EP0 file */
+ if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
+ &ffs_ep0_operations, NULL)))
+ goto enomem3;
+
+ return 0;
+
+enomem3:
+ dput(d);
+enomem2:
+ iput(inode);
+enomem1:
+ ffs_data_put(ffs);
+enomem0:
+ return -ENOMEM;
+}
+
+
+static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
+{
+ ENTER();
+
+ if (!opts || !*opts)
+ return 0;
+
+ for (;;) {
+ char *end, *eq, *comma;
+ unsigned long value;
+
+ /* Option limit */
+ comma = strchr(opts, ',');
+ if (comma)
+ *comma = 0;
+
+ /* Value limit */
+ eq = strchr(opts, '=');
+ if (unlikely(!eq)) {
+ FERR("'=' missing in %s", opts);
+ return -EINVAL;
+ }
+ *eq = 0;
+
+ /* Parse value */
+ value = simple_strtoul(eq + 1, &end, 0);
+ if (unlikely(*end != ',' && *end != 0)) {
+ FERR("%s: invalid value: %s", opts, eq + 1);
+ return -EINVAL;
+ }
+
+ /* Interpret option */
+ switch (eq - opts) {
+ case 5:
+ if (!memcmp(opts, "rmode", 5))
+ data->root_mode = (value & 0555) | S_IFDIR;
+ else if (!memcmp(opts, "fmode", 5))
+ data->perms.mode = (value & 0666) | S_IFREG;
+ else
+ goto invalid;
+ break;
+
+ case 4:
+ if (!memcmp(opts, "mode", 4)) {
+ data->root_mode = (value & 0555) | S_IFDIR;
+ data->perms.mode = (value & 0666) | S_IFREG;
+ } else {
+ goto invalid;
+ }
+ break;
+
+ case 3:
+ if (!memcmp(opts, "uid", 3))
+ data->perms.uid = value;
+ else if (!memcmp(opts, "gid", 3))
+ data->perms.gid = value;
+ else
+ goto invalid;
+ break;
+
+ default:
+invalid:
+ FERR("%s: invalid option", opts);
+ return -EINVAL;
+ }
+
+ /* Next iteration */
+ if (!comma)
+ break;
+ opts = comma + 1;
+ }
+
+ return 0;
+}
+
+
+/* "mount -t functionfs dev_name /dev/function" ends up here */
+
+static int
+ffs_fs_get_sb(struct file_system_type *t, int flags,
+ const char *dev_name, void *opts, struct vfsmount *mnt)
+{
+ struct ffs_sb_fill_data data = {
+ .perms = {
+ .mode = S_IFREG | 0600,
+ .uid = 0,
+ .gid = 0
+ },
+ .root_mode = S_IFDIR | 0500,
+ };
+ int ret;
+
+ ENTER();
+
+ ret = functionfs_check_dev_callback(dev_name);
+ if (unlikely(ret < 0))
+ return ret;
+
+ ret = ffs_fs_parse_opts(&data, opts);
+ if (unlikely(ret < 0))
+ return ret;
+
+ data.dev_name = dev_name;
+ return get_sb_single(t, flags, &data, ffs_sb_fill, mnt);
+}
+
+static void
+ffs_fs_kill_sb(struct super_block *sb)
+{
+ void *ptr;
+
+ ENTER();
+
+ kill_litter_super(sb);
+ ptr = xchg(&sb->s_fs_info, NULL);
+ if (ptr)
+ ffs_data_put(ptr);
+}
+
+static struct file_system_type ffs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "functionfs",
+ .get_sb = ffs_fs_get_sb,
+ .kill_sb = ffs_fs_kill_sb,
+};
+
+
+
+/* Driver's main init/cleanup functions *************************************/
+
+
+static int functionfs_init(void)
+{
+ int ret;
+
+ ENTER();
+
+ ret = register_filesystem(&ffs_fs_type);
+ if (likely(!ret))
+ FINFO("file system registered");
+ else
+ FERR("failed registering file system (%d)", ret);
+
+ return ret;
+}
+
+static void functionfs_cleanup(void)
+{
+ ENTER();
+
+ FINFO("unloading");
+ unregister_filesystem(&ffs_fs_type);
+}
+
+
+
+/* ffs_data and ffs_function construction and destruction code **************/
+
+static void ffs_data_clear(struct ffs_data *ffs);
+static void ffs_data_reset(struct ffs_data *ffs);
+
+
+static void ffs_data_get(struct ffs_data *ffs)
+{
+ ENTER();
+
+ atomic_inc(&ffs->ref);
+}
+
+static void ffs_data_opened(struct ffs_data *ffs)
+{
+ ENTER();
+
+ atomic_inc(&ffs->ref);
+ atomic_inc(&ffs->opened);
+}
+
+static void ffs_data_put(struct ffs_data *ffs)
+{
+ ENTER();
+
+ if (unlikely(atomic_dec_and_test(&ffs->ref))) {
+ FINFO("%s(): freeing", __func__);
+ ffs_data_clear(ffs);
+ BUG_ON(mutex_is_locked(&ffs->mutex) ||
+ spin_is_locked(&ffs->ev.waitq.lock) ||
+ waitqueue_active(&ffs->ev.waitq) ||
+ waitqueue_active(&ffs->ep0req_completion.wait));
+ kfree(ffs);
+ }
+}
+
+
+
+static void ffs_data_closed(struct ffs_data *ffs)
+{
+ ENTER();
+
+ if (atomic_dec_and_test(&ffs->opened)) {
+ ffs->state = FFS_CLOSING;
+ ffs_data_reset(ffs);
+ }
+
+ ffs_data_put(ffs);
+}
+
+
+static struct ffs_data *ffs_data_new(void)
+{
+ struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
+ if (unlikely(!ffs))
+ return 0;
+
+ ENTER();
+
+ atomic_set(&ffs->ref, 1);
+ atomic_set(&ffs->opened, 0);
+ ffs->state = FFS_READ_DESCRIPTORS;
+ mutex_init(&ffs->mutex);
+ spin_lock_init(&ffs->eps_lock);
+ init_waitqueue_head(&ffs->ev.waitq);
+ init_completion(&ffs->ep0req_completion);
+
+ /* XXX REVISIT need to update it in some places, or do we? */
+ ffs->ev.can_stall = 1;
+
+ return ffs;
+}
+
+
+static void ffs_data_clear(struct ffs_data *ffs)
+{
+ ENTER();
+
+ if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags))
+ functionfs_closed_callback(ffs);
+
+ BUG_ON(ffs->gadget);
+
+ if (ffs->epfiles)
+ ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
+
+ kfree(ffs->raw_descs);
+ kfree(ffs->raw_strings);
+ kfree(ffs->stringtabs);
+}
+
+
+static void ffs_data_reset(struct ffs_data *ffs)
+{
+ ENTER();
+
+ ffs_data_clear(ffs);
+
+ ffs->epfiles = NULL;
+ ffs->raw_descs = NULL;
+ ffs->raw_strings = NULL;
+ ffs->stringtabs = NULL;
+
+ ffs->raw_descs_length = 0;
+ ffs->raw_fs_descs_length = 0;
+ ffs->fs_descs_count = 0;
+ ffs->hs_descs_count = 0;
+
+ ffs->strings_count = 0;
+ ffs->interfaces_count = 0;
+ ffs->eps_count = 0;
+
+ ffs->ev.count = 0;
+
+ ffs->state = FFS_READ_DESCRIPTORS;
+ ffs->setup_state = FFS_NO_SETUP;
+ ffs->flags = 0;
+}
+
+
+static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
+{
+ unsigned i, count;
+
+ ENTER();
+
+ if (WARN_ON(ffs->state != FFS_ACTIVE
+ || test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
+ return -EBADFD;
+
+ ffs_data_get(ffs);
+
+ ffs->ep0req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
+ if (unlikely(!ffs->ep0req))
+ return -ENOMEM;
+ ffs->ep0req->complete = ffs_ep0_complete;
+ ffs->ep0req->context = ffs;
+
+ /* Get strings identifiers */
+ for (count = ffs->strings_count, i = 0; i < count; ++i) {
+ struct usb_gadget_strings **lang;
+
+ int id = usb_string_id(cdev);
+ if (unlikely(id < 0)) {
+ usb_ep_free_request(cdev->gadget->ep0, ffs->ep0req);
+ ffs->ep0req = NULL;
+ return id;
+ }
+
+ lang = ffs->stringtabs;
+ do {
+ (*lang)->strings[i].id = id;
+ ++lang;
+ } while (*lang);
+ }
+
+ ffs->gadget = cdev->gadget;
+ return 0;
+}
+
+
+static void functionfs_unbind(struct ffs_data *ffs)
+{
+ ENTER();
+
+ if (!WARN_ON(!ffs->gadget)) {
+ usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
+ ffs->ep0req = NULL;
+ ffs->gadget = NULL;
+ ffs_data_put(ffs);
+ }
+}
+
+
+static int ffs_epfiles_create(struct ffs_data *ffs)
+{
+ struct ffs_epfile *epfile, *epfiles;
+ unsigned i, count;
+
+ ENTER();
+
+ count = ffs->eps_count;
+ epfiles = kzalloc(count * sizeof *epfiles, GFP_KERNEL);
+ if (!epfiles)
+ return -ENOMEM;
+
+ epfile = epfiles;
+ for (i = 1; i <= count; ++i, ++epfile) {
+ epfile->ffs = ffs;
+ mutex_init(&epfile->mutex);
+ init_waitqueue_head(&epfile->wait);
+ sprintf(epfiles->name, "ep%u", i);
+ if (!unlikely(ffs_sb_create_file(ffs->sb, epfiles->name, epfile,
+ &ffs_epfile_operations,
+ &epfile->dentry))) {
+ ffs_epfiles_destroy(epfiles, i - 1);
+ return -ENOMEM;
+ }
+ }
+
+ ffs->epfiles = epfiles;
+ return 0;
+}
+
+
+static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
+{
+ struct ffs_epfile *epfile = epfiles;
+
+ ENTER();
+
+ for (; count; --count, ++epfile) {
+ BUG_ON(mutex_is_locked(&epfile->mutex) ||
+ waitqueue_active(&epfile->wait));
+ if (epfile->dentry) {
+ d_delete(epfile->dentry);
+ dput(epfile->dentry);
+ epfile->dentry = NULL;
+ }
+ }
+
+ kfree(epfiles);
+}
+
+
+static int functionfs_add(struct usb_composite_dev *cdev,
+ struct usb_configuration *c,
+ struct ffs_data *ffs)
+{
+ struct ffs_function *func;
+ int ret;
+
+ ENTER();
+
+ func = kzalloc(sizeof *func, GFP_KERNEL);
+ if (unlikely(!func))
+ return -ENOMEM;
+
+ func->function.name = "Function FS Gadget";
+ func->function.strings = ffs->stringtabs;
+
+ func->function.bind = ffs_func_bind;
+ func->function.unbind = ffs_func_unbind;
+ func->function.set_alt = ffs_func_set_alt;
+ /*func->function.get_alt = ffs_func_get_alt;*/
+ func->function.disable = ffs_func_disable;
+ func->function.setup = ffs_func_setup;
+ func->function.suspend = ffs_func_suspend;
+ func->function.resume = ffs_func_resume;
+
+ func->conf = c;
+ func->gadget = cdev->gadget;
+ func->ffs = ffs;
+ ffs_data_get(ffs);
+
+ ret = usb_add_function(c, &func->function);
+ if (unlikely(ret))
+ ffs_func_free(func);
+
+ return ret;
+}
+
+static void ffs_func_free(struct ffs_function *func)
+{
+ ENTER();
+
+ ffs_data_put(func->ffs);
+
+ kfree(func->eps);
+ /* eps and interfaces_nums are allocated in the same chunk so
+ * only one free is required. Descriptors are also allocated
+ * in the same chunk. */
+
+ kfree(func);
+}
+
+
+static void ffs_func_eps_disable(struct ffs_function *func)
+{
+ struct ffs_ep *ep = func->eps;
+ struct ffs_epfile *epfile = func->ffs->epfiles;
+ unsigned count = func->ffs->eps_count;
+ unsigned long flags;
+
+ spin_lock_irqsave(&func->ffs->eps_lock, flags);
+ do {
+ /* pending requests get nuked */
+ if (likely(ep->ep))
+ usb_ep_disable(ep->ep);
+ epfile->ep = NULL;
+
+ ++ep;
+ ++epfile;
+ } while (--count);
+ spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
+}
+
+static int ffs_func_eps_enable(struct ffs_function *func)
+{
+ struct ffs_data *ffs = func->ffs;
+ struct ffs_ep *ep = func->eps;
+ struct ffs_epfile *epfile = ffs->epfiles;
+ unsigned count = ffs->eps_count;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&func->ffs->eps_lock, flags);
+ do {
+ struct usb_endpoint_descriptor *ds;
+ ds = ep->descs[ep->descs[1] ? 1 : 0];
+
+ ep->ep->driver_data = ep;
+ ret = usb_ep_enable(ep->ep, ds);
+ if (likely(!ret)) {
+ epfile->ep = ep;
+ epfile->in = usb_endpoint_dir_in(ds);
+ epfile->isoc = usb_endpoint_xfer_isoc(ds);
+ } else {
+ break;
+ }
+
+ wake_up(&epfile->wait);
+
+ ++ep;
+ ++epfile;
+ } while (--count);
+ spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
+
+ return ret;
+}
+
+
+/* Parsing and building descriptors and strings *****************************/
+
+
+/* This validates if data pointed by data is a valid USB descriptor as
+ * well as record how many interfaces, endpoints and strings are
+ * required by given configuration. Returns address afther the
+ * descriptor or NULL if data is invalid. */
+
+enum ffs_entity_type {
+ FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT
+};
+
+typedef int (*ffs_entity_callback)(enum ffs_entity_type entity,
+ u8 *valuep,
+ struct usb_descriptor_header *desc,
+ void *priv);
+
+static int __must_check ffs_do_desc(char *data, unsigned len,
+ ffs_entity_callback entity, void *priv)
+{
+ struct usb_descriptor_header *_ds = (void *)data;
+ u8 length;
+ int ret;
+
+ ENTER();
+
+ /* At least two bytes are required: length and type */
+ if (len < 2) {
+ FVDBG("descriptor too short");
+ return -EINVAL;
+ }
+
+ /* If we have at least as many bytes as the descriptor takes? */
+ length = _ds->bLength;
+ if (len < length) {
+ FVDBG("descriptor longer then available data");
+ return -EINVAL;
+ }
+
+#define __entity_check_INTERFACE(val) 1
+#define __entity_check_STRING(val) (val)
+#define __entity_check_ENDPOINT(val) ((val) & USB_ENDPOINT_NUMBER_MASK)
+#define __entity(type, val) do { \
+ FVDBG("entity " #type "(%02x)", (val)); \
+ if (unlikely(!__entity_check_ ##type(val))) { \
+ FVDBG("invalid entity's value"); \
+ return -EINVAL; \
+ } \
+ ret = entity(FFS_ ##type, &val, _ds, priv); \
+ if (unlikely(ret < 0)) { \
+ FDBG("entity " #type "(%02x); ret = %d", \
+ (val), ret); \
+ return ret; \
+ } \
+ } while (0)
+
+ /* Parse descriptor depending on type. */
+ switch (_ds->bDescriptorType) {
+ case USB_DT_DEVICE:
+ case USB_DT_CONFIG:
+ case USB_DT_STRING:
+ case USB_DT_DEVICE_QUALIFIER:
+ /* function can't have any of those */
+ FVDBG("descriptor reserved for gadget: %d", _ds->bDescriptorType);
+ return -EINVAL;
+
+ case USB_DT_INTERFACE: {
+ struct usb_interface_descriptor *ds = (void *)_ds;
+ FVDBG("interface descriptor");
+ if (length != sizeof *ds)
+ goto inv_length;
+
+ __entity(INTERFACE, ds->bInterfaceNumber);
+ if (ds->iInterface)
+ __entity(STRING, ds->iInterface);
+ }
+ break;
+
+ case USB_DT_ENDPOINT: {
+ struct usb_endpoint_descriptor *ds = (void *)_ds;
+ FVDBG("endpoint descriptor");
+ if (length != USB_DT_ENDPOINT_SIZE &&
+ length != USB_DT_ENDPOINT_AUDIO_SIZE)
+ goto inv_length;
+ __entity(ENDPOINT, ds->bEndpointAddress);
+ }
+ break;
+
+ case USB_DT_OTG:
+ if (length != sizeof(struct usb_otg_descriptor))
+ goto inv_length;
+ break;
+
+ case USB_DT_INTERFACE_ASSOCIATION: {
+ struct usb_interface_assoc_descriptor *ds = (void *)_ds;
+ FVDBG("interface association descriptor");
+ if (length != sizeof *ds)
+ goto inv_length;
+ if (ds->iFunction)
+ __entity(STRING, ds->iFunction);
+ }
+ break;
+
+ case USB_DT_OTHER_SPEED_CONFIG:
+ case USB_DT_INTERFACE_POWER:
+ case USB_DT_DEBUG:
+ case USB_DT_SECURITY:
+ case USB_DT_CS_RADIO_CONTROL:
+ /* TODO */
+ FVDBG("unimplemented descriptor: %d", _ds->bDescriptorType);
+ return -EINVAL;
+
+ default:
+ /* We should never be here */
+ FVDBG("unknown descriptor: %d", _ds->bDescriptorType);
+ return -EINVAL;
+
+ inv_length:
+ FVDBG("invalid length: %d (descriptor %d)",
+ _ds->bLength, _ds->bDescriptorType);
+ return -EINVAL;
+ }
+
+#undef __entity
+#undef __entity_check_DESCRIPTOR
+#undef __entity_check_INTERFACE
+#undef __entity_check_STRING
+#undef __entity_check_ENDPOINT
+
+ return length;
+}
+
+
+static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
+ ffs_entity_callback entity, void *priv)
+{
+ const unsigned _len = len;
+ unsigned long num = 0;
+
+ ENTER();
+
+ for (;;) {
+ int ret;
+
+ if (num == count)
+ data = NULL;
+
+ /* Record "descriptor" entitny */
+ ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv);
+ if (unlikely(ret < 0)) {
+ FDBG("entity DESCRIPTOR(%02lx); ret = %d", num, ret);
+ return ret;
+ }
+
+ if (!data)
+ return _len - len;
+
+ ret = ffs_do_desc(data, len, entity, priv);
+ if (unlikely(ret < 0)) {
+ FDBG("%s returns %d", __func__, ret);
+ return ret;
+ }
+
+ len -= ret;
+ data += ret;
+ ++num;
+ }
+}
+
+
+static int __ffs_data_do_entity(enum ffs_entity_type type,
+ u8 *valuep, struct usb_descriptor_header *desc,
+ void *priv)
+{
+ struct ffs_data *ffs = priv;
+
+ ENTER();
+
+ switch (type) {
+ case FFS_DESCRIPTOR:
+ break;
+
+ case FFS_INTERFACE:
+ /* Interfaces are indexed from zero so if we
+ * encountered interface "n" then there are at least
+ * "n+1" interfaces. */
+ if (*valuep >= ffs->interfaces_count)
+ ffs->interfaces_count = *valuep + 1;
+ break;
+
+ case FFS_STRING:
+ /* Strings are indexed from 1 (0 is magic ;) reserved
+ * for languages list or some such) */
+ if (*valuep > ffs->strings_count)
+ ffs->strings_count = *valuep;
+ break;
+
+ case FFS_ENDPOINT:
+ /* Endpoints are indexed from 1 as well. */
+ if ((*valuep & USB_ENDPOINT_NUMBER_MASK) > ffs->eps_count)
+ ffs->eps_count = (*valuep & USB_ENDPOINT_NUMBER_MASK);
+ break;
+ }
+
+ return 0;
+}
+
+
+static int __ffs_data_got_descs(struct ffs_data *ffs,
+ char *const _data, size_t len)
+{
+ unsigned fs_count, hs_count;
+ int fs_len, ret = -EINVAL;
+ char *data = _data;
+
+ ENTER();
+
+ if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_DESCRIPTORS_MAGIC ||
+ get_unaligned_le32(data + 4) != len))
+ goto error;
+ fs_count = get_unaligned_le32(data + 8);
+ hs_count = get_unaligned_le32(data + 12);
+
+ if (!fs_count && !hs_count)
+ goto einval;
+
+ data += 16;
+ len -= 16;
+
+ if (likely(fs_count)) {
+ fs_len = ffs_do_descs(fs_count, data, len,
+ __ffs_data_do_entity, ffs);
+ if (unlikely(fs_len < 0)) {
+ ret = fs_len;
+ goto error;
+ }
+
+ data += fs_len;
+ len -= fs_len;
+ } else {
+ fs_len = 0;
+ }
+
+ if (likely(hs_count)) {
+ ret = ffs_do_descs(hs_count, data, len,
+ __ffs_data_do_entity, ffs);
+ if (unlikely(ret < 0))
+ goto error;
+ } else {
+ ret = 0;
+ }
+
+ if (unlikely(len != ret))
+ goto einval;
+
+ ffs->raw_fs_descs_length = fs_len;
+ ffs->raw_descs_length = fs_len + ret;
+ ffs->raw_descs = _data;
+ ffs->fs_descs_count = fs_count;
+ ffs->hs_descs_count = hs_count;
+
+ return 0;
+
+einval:
+ ret = -EINVAL;
+error:
+ kfree(_data);
+ return ret;
+}
+
+
+
+static int __ffs_data_got_strings(struct ffs_data *ffs,
+ char *const _data, size_t len)
+{
+ u32 str_count, needed_count, lang_count;
+ struct usb_gadget_strings **stringtabs, *t;
+ struct usb_string *strings, *s;
+ const char *data = _data;
+
+ ENTER();
+
+ if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
+ get_unaligned_le32(data + 4) != len))
+ goto error;
+ str_count = get_unaligned_le32(data + 8);
+ lang_count = get_unaligned_le32(data + 12);
+
+ /* if one is zero the other must be zero */
+ if (unlikely(!str_count != !lang_count))
+ goto error;
+
+ /* Do we have at least as many strings as descriptors need? */
+ needed_count = ffs->strings_count;
+ if (unlikely(str_count < needed_count))
+ goto error;
+
+ /* If we don't need any strings just return and free all
+ * memory */
+ if (!needed_count) {
+ kfree(_data);
+ return 0;
+ }
+
+ /* Allocate */
+ {
+ /* Allocate everything in one chunk so there's less
+ * maintanance. */
+ struct {
+ struct usb_gadget_strings *stringtabs[lang_count + 1];
+ struct usb_gadget_strings stringtab[lang_count];
+ struct usb_string strings[lang_count*(needed_count+1)];
+ } *d;
+ unsigned i = 0;
+
+ d = kmalloc(sizeof *d, GFP_KERNEL);
+ if (unlikely(!d)) {
+ kfree(_data);
+ return -ENOMEM;
+ }
+
+ stringtabs = d->stringtabs;
+ t = d->stringtab;
+ i = lang_count;
+ do {
+ *stringtabs++ = t++;
+ } while (--i);
+ *stringtabs = NULL;
+
+ stringtabs = d->stringtabs;
+ t = d->stringtab;
+ s = d->strings;
+ strings = s;
+ }
+
+ /* For each language */
+ data += 16;
+ len -= 16;
+
+ do { /* lang_count > 0 so we can use do-while */
+ unsigned needed = needed_count;
+
+ if (unlikely(len < 3))
+ goto error_free;
+ t->language = get_unaligned_le16(data);
+ t->strings = s;
+ ++t;
+
+ data += 2;
+ len -= 2;
+
+ /* For each string */
+ do { /* str_count > 0 so we can use do-while */
+ size_t length = strnlen(data, len);
+
+ if (unlikely(length == len))
+ goto error_free;
+
+ /* user may provide more strings then we need,
+ * if that's the case we simply ingore the
+ * rest */
+ if (likely(needed)) {
+ /* s->id will be set while adding
+ * function to configuration so for
+ * now just leave garbage here. */
+ s->s = data;
+ --needed;
+ ++s;
+ }
+
+ data += length + 1;
+ len -= length + 1;
+ } while (--str_count);
+
+ s->id = 0; /* terminator */
+ s->s = NULL;
+ ++s;
+
+ } while (--lang_count);
+
+ /* Some garbage left? */
+ if (unlikely(len))
+ goto error_free;
+
+ /* Done! */
+ ffs->stringtabs = stringtabs;
+ ffs->raw_strings = _data;
+
+ return 0;
+
+error_free:
+ kfree(stringtabs);
+error:
+ kfree(_data);
+ return -EINVAL;
+}
+
+
+
+
+/* Events handling and management *******************************************/
+
+static void __ffs_event_add(struct ffs_data *ffs,
+ enum usb_functionfs_event_type type)
+{
+ enum usb_functionfs_event_type rem_type1, rem_type2 = type;
+ int neg = 0;
+
+ /* Abort any unhandled setup */
+ /* We do not need to worry about some cmpxchg() changing value
+ * of ffs->setup_state without holding the lock because when
+ * state is FFS_SETUP_PENDING cmpxchg() in several places in
+ * the source does nothing. */
+ if (ffs->setup_state == FFS_SETUP_PENDING)
+ ffs->setup_state = FFS_SETUP_CANCELED;
+
+ switch (type) {
+ case FUNCTIONFS_RESUME:
+ rem_type2 = FUNCTIONFS_SUSPEND;
+ /* FALL THGOUTH */
+ case FUNCTIONFS_SUSPEND:
+ case FUNCTIONFS_SETUP:
+ rem_type1 = type;
+ /* discard all similar events */
+ break;
+
+ case FUNCTIONFS_BIND:
+ case FUNCTIONFS_UNBIND:
+ case FUNCTIONFS_DISABLE:
+ case FUNCTIONFS_ENABLE:
+ /* discard everything other then power management. */
+ rem_type1 = FUNCTIONFS_SUSPEND;
+ rem_type2 = FUNCTIONFS_RESUME;
+ neg = 1;
+ break;
+
+ default:
+ BUG();
+ }
+
+ {
+ u8 *ev = ffs->ev.types, *out = ev;
+ unsigned n = ffs->ev.count;
+ for (; n; --n, ++ev)
+ if ((*ev == rem_type1 || *ev == rem_type2) == neg)
+ *out++ = *ev;
+ else
+ FVDBG("purging event %d", *ev);
+ ffs->ev.count = out - ffs->ev.types;
+ }
+
+ FVDBG("adding event %d", type);
+ ffs->ev.types[ffs->ev.count++] = type;
+ wake_up_locked(&ffs->ev.waitq);
+}
+
+static void ffs_event_add(struct ffs_data *ffs,
+ enum usb_functionfs_event_type type)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
+ __ffs_event_add(ffs, type);
+ spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
+}
+
+
+/* Bind/unbind USB function hooks *******************************************/
+
+static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
+ struct usb_descriptor_header *desc,
+ void *priv)
+{
+ struct usb_endpoint_descriptor *ds = (void *)desc;
+ struct ffs_function *func = priv;
+ struct ffs_ep *ffs_ep;
+
+ /* If hs_descriptors is not NULL then we are reading hs
+ * descriptors now */
+ const int isHS = func->function.hs_descriptors != NULL;
+ unsigned idx;
+
+ if (type != FFS_DESCRIPTOR)
+ return 0;
+
+ if (isHS)
+ func->function.hs_descriptors[(long)valuep] = desc;
+ else
+ func->function.descriptors[(long)valuep] = desc;
+
+ if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
+ return 0;
+
+ idx = (ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) - 1;
+ ffs_ep = func->eps + idx;
+
+ if (unlikely(ffs_ep->descs[isHS])) {
+ FVDBG("two %sspeed descriptors for EP %d",
+ isHS ? "high" : "full",
+ ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+ return -EINVAL;
+ }
+ ffs_ep->descs[isHS] = ds;
+
+ ffs_dump_mem(": Original ep desc", ds, ds->bLength);
+ if (ffs_ep->ep) {
+ ds->bEndpointAddress = ffs_ep->descs[0]->bEndpointAddress;
+ if (!ds->wMaxPacketSize)
+ ds->wMaxPacketSize = ffs_ep->descs[0]->wMaxPacketSize;
+ } else {
+ struct usb_request *req;
+ struct usb_ep *ep;
+
+ FVDBG("autoconfig");
+ ep = usb_ep_autoconfig(func->gadget, ds);
+ if (unlikely(!ep))
+ return -ENOTSUPP;
+ ep->driver_data = func->eps + idx;;
+
+ req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (unlikely(!req))
+ return -ENOMEM;
+
+ ffs_ep->ep = ep;
+ ffs_ep->req = req;
+ func->eps_revmap[ds->bEndpointAddress &
+ USB_ENDPOINT_NUMBER_MASK] = idx + 1;
+ }
+ ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength);
+
+ return 0;
+}
+
+
+static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
+ struct usb_descriptor_header *desc,
+ void *priv)
+{
+ struct ffs_function *func = priv;
+ unsigned idx;
+ u8 newValue;
+
+ switch (type) {
+ default:
+ case FFS_DESCRIPTOR:
+ /* Handled in previous pass by __ffs_func_bind_do_descs() */
+ return 0;
+
+ case FFS_INTERFACE:
+ idx = *valuep;
+ if (func->interfaces_nums[idx] < 0) {
+ int id = usb_interface_id(func->conf, &func->function);
+ if (unlikely(id < 0))
+ return id;
+ func->interfaces_nums[idx] = id;
+ }
+ newValue = func->interfaces_nums[idx];
+ break;
+
+ case FFS_STRING:
+ /* String' IDs are allocated when fsf_data is bound to cdev */
+ newValue = func->ffs->stringtabs[0]->strings[*valuep - 1].id;
+ break;
+
+ case FFS_ENDPOINT:
+ /* USB_DT_ENDPOINT are handled in
+ * __ffs_func_bind_do_descs(). */
+ if (desc->bDescriptorType == USB_DT_ENDPOINT)
+ return 0;
+
+ idx = (*valuep & USB_ENDPOINT_NUMBER_MASK) - 1;
+ if (unlikely(!func->eps[idx].ep))
+ return -EINVAL;
+
+ {
+ struct usb_endpoint_descriptor **descs;
+ descs = func->eps[idx].descs;
+ newValue = descs[descs[0] ? 0 : 1]->bEndpointAddress;
+ }
+ break;
+ }
+
+ FVDBG("%02x -> %02x", *valuep, newValue);
+ *valuep = newValue;
+ return 0;
+}
+
+static int ffs_func_bind(struct usb_configuration *c,
+ struct usb_function *f)
+{
+ struct ffs_function *func = ffs_func_from_usb(f);
+ struct ffs_data *ffs = func->ffs;
+
+ const int full = !!func->ffs->fs_descs_count;
+ const int high = gadget_is_dualspeed(func->gadget) &&
+ func->ffs->hs_descs_count;
+
+ int ret;
+
+ /* Make it a single chunk, less management later on */
+ struct {
+ struct ffs_ep eps[ffs->eps_count];
+ struct usb_descriptor_header
+ *fs_descs[full ? ffs->fs_descs_count + 1 : 0];
+ struct usb_descriptor_header
+ *hs_descs[high ? ffs->hs_descs_count + 1 : 0];
+ short inums[ffs->interfaces_count];
+ char raw_descs[high ? ffs->raw_descs_length
+ : ffs->raw_fs_descs_length];
+ } *data;
+
+ ENTER();
+
+ /* Only high speed but not supported by gadget? */
+ if (unlikely(!(full | high)))
+ return -ENOTSUPP;
+
+ /* Allocate */
+ data = kmalloc(sizeof *data, GFP_KERNEL);
+ if (unlikely(!data))
+ return -ENOMEM;
+
+ /* Zero */
+ memset(data->eps, 0, sizeof data->eps);
+ memcpy(data->raw_descs, ffs->raw_descs + 16, sizeof data->raw_descs);
+ memset(data->inums, 0xff, sizeof data->inums);
+ for (ret = ffs->eps_count; ret; --ret)
+ data->eps[ret].num = -1;
+
+ /* Save pointers */
+ func->eps = data->eps;
+ func->interfaces_nums = data->inums;
+
+ /* Go throught all the endpoint descriptors and allocate
+ * endpoints first, so that later we can rewrite the endpoint
+ * numbers without worying that it may be described later on. */
+ if (likely(full)) {
+ func->function.descriptors = data->fs_descs;
+ ret = ffs_do_descs(ffs->fs_descs_count,
+ data->raw_descs,
+ sizeof data->raw_descs,
+ __ffs_func_bind_do_descs, func);
+ if (unlikely(ret < 0))
+ goto error;
+ } else {
+ ret = 0;
+ }
+
+ if (likely(high)) {
+ func->function.hs_descriptors = data->hs_descs;
+ ret = ffs_do_descs(ffs->hs_descs_count,
+ data->raw_descs + ret,
+ (sizeof data->raw_descs) - ret,
+ __ffs_func_bind_do_descs, func);
+ }
+
+ /* Now handle interface numbers allocation and interface and
+ * enpoint numbers rewritting. We can do that in one go
+ * now. */
+ ret = ffs_do_descs(ffs->fs_descs_count +
+ (high ? ffs->hs_descs_count : 0),
+ data->raw_descs, sizeof data->raw_descs,
+ __ffs_func_bind_do_nums, func);
+ if (unlikely(ret < 0))
+ goto error;
+
+ /* And we're done */
+ ffs_event_add(ffs, FUNCTIONFS_BIND);
+ return 0;
+
+error:
+ /* XXX Do we need to release all claimed endpoints here? */
+ return ret;
+}
+
+
+/* Other USB function hooks *************************************************/
+
+static void ffs_func_unbind(struct usb_configuration *c,
+ struct usb_function *f)
+{
+ struct ffs_function *func = ffs_func_from_usb(f);
+ struct ffs_data *ffs = func->ffs;
+
+ ENTER();
+
+ if (ffs->func == func) {
+ ffs_func_eps_disable(func);
+ ffs->func = NULL;
+ }
+
+ ffs_event_add(ffs, FUNCTIONFS_UNBIND);
+
+ ffs_func_free(func);
+}
+
+
+static int ffs_func_set_alt(struct usb_function *f,
+ unsigned interface, unsigned alt)
+{
+ struct ffs_function *func = ffs_func_from_usb(f);
+ struct ffs_data *ffs = func->ffs;
+ int ret = 0, intf;
+
+ if (alt != (unsigned)-1) {
+ intf = ffs_func_revmap_intf(func, interface);
+ if (unlikely(intf < 0))
+ return intf;
+ }
+
+ if (ffs->func)
+ ffs_func_eps_disable(ffs->func);
+
+ if (ffs->state != FFS_ACTIVE)
+ return -ENODEV;
+
+ if (alt == (unsigned)-1) {
+ ffs->func = NULL;
+ ffs_event_add(ffs, FUNCTIONFS_DISABLE);
+ return 0;
+ }
+
+ ffs->func = func;
+ ret = ffs_func_eps_enable(func);
+ if (likely(ret >= 0))
+ ffs_event_add(ffs, FUNCTIONFS_ENABLE);
+ return ret;
+}
+
+static void ffs_func_disable(struct usb_function *f)
+{
+ ffs_func_set_alt(f, 0, (unsigned)-1);
+}
+
+static int ffs_func_setup(struct usb_function *f,
+ const struct usb_ctrlrequest *creq)
+{
+ struct ffs_function *func = ffs_func_from_usb(f);
+ struct ffs_data *ffs = func->ffs;
+ unsigned long flags;
+ int ret;
+
+ ENTER();
+
+ FVDBG("creq->bRequestType = %02x", creq->bRequestType);
+ FVDBG("creq->bRequest = %02x", creq->bRequest);
+ FVDBG("creq->wValue = %04x", le16_to_cpu(creq->wValue));
+ FVDBG("creq->wIndex = %04x", le16_to_cpu(creq->wIndex));
+ FVDBG("creq->wLength = %04x", le16_to_cpu(creq->wLength));
+
+ /* Most requests directed to interface go throught here
+ * (notable exceptions are set/get interface) so we need to
+ * handle them. All other either handled by composite or
+ * passed to usb_configuration->setup() (if one is set). No
+ * matter, we will handle requests directed to endpoint here
+ * as well (as it's straightforward) but what to do with any
+ * other request? */
+
+ if (ffs->state != FFS_ACTIVE)
+ return -ENODEV;
+
+ switch (creq->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_INTERFACE:
+ ret = ffs_func_revmap_intf(func, le16_to_cpu(creq->wIndex));
+ if (unlikely(ret < 0))
+ return ret;
+ break;
+
+ case USB_RECIP_ENDPOINT:
+ ret = ffs_func_revmap_ep(func, le16_to_cpu(creq->wIndex));
+ if (unlikely(ret < 0))
+ return ret;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
+ ffs->ev.setup = *creq;
+ ffs->ev.setup.wIndex = cpu_to_le16(ret);
+ __ffs_event_add(ffs, FUNCTIONFS_SETUP);
+ spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
+
+ return 0;
+}
+
+static void ffs_func_suspend(struct usb_function *f)
+{
+ ENTER();
+ ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND);
+}
+
+static void ffs_func_resume(struct usb_function *f)
+{
+ ENTER();
+ ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME);
+}
+
+
+
+/* Enpoint and interface numbers reverse mapping ****************************/
+
+static int ffs_func_revmap_ep(struct ffs_function *func, u8 num)
+{
+ num = func->eps_revmap[num & USB_ENDPOINT_NUMBER_MASK];
+ return num ? num : -EDOM;
+}
+
+static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf)
+{
+ short *nums = func->interfaces_nums;
+ unsigned count = func->ffs->interfaces_count;
+
+ for (; count; --count, ++nums) {
+ if (*nums >= 0 && *nums == intf)
+ return nums - func->interfaces_nums;
+ }
+
+ return -EDOM;
+}
+
+
+/* Misc helper functions ****************************************************/
+
+static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
+{
+ return nonblock
+ ? likely(mutex_trylock(mutex)) ? 0 : -EAGAIN
+ : mutex_lock_interruptible(mutex);
+}
+
+
+static char *ffs_prepare_buffer(const char * __user buf, size_t len)
+{
+ char *data;
+
+ if (unlikely(!len))
+ return NULL;
+
+ data = kmalloc(len, GFP_KERNEL);
+ if (unlikely(!data))
+ return ERR_PTR(-ENOMEM);
+
+ if (unlikely(__copy_from_user(data, buf, len))) {
+ kfree(data);
+ return ERR_PTR(-EFAULT);
+ }
+
+ FVDBG("Buffer from user space:");
+ ffs_dump_mem("", data, len);
+
+ return data;
+}
diff --git a/drivers/usb/gadget/f_hid.c b/drivers/usb/gadget/f_hid.c
new file mode 100644
index 00000000000..1e00ff9866a
--- /dev/null
+++ b/drivers/usb/gadget/f_hid.c
@@ -0,0 +1,673 @@
+/*
+ * f_hid.c -- USB HID function driver
+ *
+ * Copyright (C) 2010 Fabien Chouteau <fabien.chouteau@barco.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/module.h>
+#include <linux/hid.h>
+#include <linux/cdev.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/smp_lock.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/usb/g_hid.h>
+
+static int major, minors;
+static struct class *hidg_class;
+
+/*-------------------------------------------------------------------------*/
+/* HID gadget struct */
+
+struct f_hidg {
+ /* configuration */
+ unsigned char bInterfaceSubClass;
+ unsigned char bInterfaceProtocol;
+ unsigned short report_desc_length;
+ char *report_desc;
+ unsigned short report_length;
+
+ /* recv report */
+ char *set_report_buff;
+ unsigned short set_report_length;
+ spinlock_t spinlock;
+ wait_queue_head_t read_queue;
+
+ /* send report */
+ struct mutex lock;
+ bool write_pending;
+ wait_queue_head_t write_queue;
+ struct usb_request *req;
+
+ int minor;
+ struct cdev cdev;
+ struct usb_function func;
+ struct usb_ep *in_ep;
+ struct usb_endpoint_descriptor *fs_in_ep_desc;
+ struct usb_endpoint_descriptor *hs_in_ep_desc;
+};
+
+static inline struct f_hidg *func_to_hidg(struct usb_function *f)
+{
+ return container_of(f, struct f_hidg, func);
+}
+
+/*-------------------------------------------------------------------------*/
+/* Static descriptors */
+
+static struct usb_interface_descriptor hidg_interface_desc = {
+ .bLength = sizeof hidg_interface_desc,
+ .bDescriptorType = USB_DT_INTERFACE,
+ /* .bInterfaceNumber = DYNAMIC */
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_HID,
+ /* .bInterfaceSubClass = DYNAMIC */
+ /* .bInterfaceProtocol = DYNAMIC */
+ /* .iInterface = DYNAMIC */
+};
+
+static struct hid_descriptor hidg_desc = {
+ .bLength = sizeof hidg_desc,
+ .bDescriptorType = HID_DT_HID,
+ .bcdHID = 0x0101,
+ .bCountryCode = 0x00,
+ .bNumDescriptors = 0x1,
+ /*.desc[0].bDescriptorType = DYNAMIC */
+ /*.desc[0].wDescriptorLenght = DYNAMIC */
+};
+
+/* High-Speed Support */
+
+static struct usb_endpoint_descriptor hidg_hs_in_ep_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ /*.wMaxPacketSize = DYNAMIC */
+ .bInterval = 4, /* FIXME: Add this field in the
+ * HID gadget configuration?
+ * (struct hidg_func_descriptor)
+ */
+};
+
+static struct usb_descriptor_header *hidg_hs_descriptors[] = {
+ (struct usb_descriptor_header *)&hidg_interface_desc,
+ (struct usb_descriptor_header *)&hidg_desc,
+ (struct usb_descriptor_header *)&hidg_hs_in_ep_desc,
+ NULL,
+};
+
+/* Full-Speed Support */
+
+static struct usb_endpoint_descriptor hidg_fs_in_ep_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ /*.wMaxPacketSize = DYNAMIC */
+ .bInterval = 10, /* FIXME: Add this field in the
+ * HID gadget configuration?
+ * (struct hidg_func_descriptor)
+ */
+};
+
+static struct usb_descriptor_header *hidg_fs_descriptors[] = {
+ (struct usb_descriptor_header *)&hidg_interface_desc,
+ (struct usb_descriptor_header *)&hidg_desc,
+ (struct usb_descriptor_header *)&hidg_fs_in_ep_desc,
+ NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+/* Char Device */
+
+static ssize_t f_hidg_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ptr)
+{
+ struct f_hidg *hidg = (struct f_hidg *)file->private_data;
+ char *tmp_buff = NULL;
+ unsigned long flags;
+
+ if (!count)
+ return 0;
+
+ if (!access_ok(VERIFY_WRITE, buffer, count))
+ return -EFAULT;
+
+ spin_lock_irqsave(&hidg->spinlock, flags);
+
+#define READ_COND (hidg->set_report_buff != NULL)
+
+ while (!READ_COND) {
+ spin_unlock_irqrestore(&hidg->spinlock, flags);
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ if (wait_event_interruptible(hidg->read_queue, READ_COND))
+ return -ERESTARTSYS;
+
+ spin_lock_irqsave(&hidg->spinlock, flags);
+ }
+
+
+ count = min_t(unsigned, count, hidg->set_report_length);
+ tmp_buff = hidg->set_report_buff;
+ hidg->set_report_buff = NULL;
+
+ spin_unlock_irqrestore(&hidg->spinlock, flags);
+
+ if (tmp_buff != NULL) {
+ /* copy to user outside spinlock */
+ count -= copy_to_user(buffer, tmp_buff, count);
+ kfree(tmp_buff);
+ } else
+ count = -ENOMEM;
+
+ return count;
+}
+
+static void f_hidg_req_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_hidg *hidg = (struct f_hidg *)ep->driver_data;
+
+ if (req->status != 0) {
+ ERROR(hidg->func.config->cdev,
+ "End Point Request ERROR: %d\n", req->status);
+ }
+
+ hidg->write_pending = 0;
+ wake_up(&hidg->write_queue);
+}
+
+static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *offp)
+{
+ struct f_hidg *hidg = (struct f_hidg *)file->private_data;
+ ssize_t status = -ENOMEM;
+
+ if (!access_ok(VERIFY_READ, buffer, count))
+ return -EFAULT;
+
+ mutex_lock(&hidg->lock);
+
+#define WRITE_COND (!hidg->write_pending)
+
+ /* write queue */
+ while (!WRITE_COND) {
+ mutex_unlock(&hidg->lock);
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ if (wait_event_interruptible_exclusive(
+ hidg->write_queue, WRITE_COND))
+ return -ERESTARTSYS;
+
+ mutex_lock(&hidg->lock);
+ }
+
+ count = min_t(unsigned, count, hidg->report_length);
+ status = copy_from_user(hidg->req->buf, buffer, count);
+
+ if (status != 0) {
+ ERROR(hidg->func.config->cdev,
+ "copy_from_user error\n");
+ mutex_unlock(&hidg->lock);
+ return -EINVAL;
+ }
+
+ hidg->req->status = 0;
+ hidg->req->zero = 0;
+ hidg->req->length = count;
+ hidg->req->complete = f_hidg_req_complete;
+ hidg->req->context = hidg;
+ hidg->write_pending = 1;
+
+ status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC);
+ if (status < 0) {
+ ERROR(hidg->func.config->cdev,
+ "usb_ep_queue error on int endpoint %zd\n", status);
+ hidg->write_pending = 0;
+ wake_up(&hidg->write_queue);
+ } else {
+ status = count;
+ }
+
+ mutex_unlock(&hidg->lock);
+
+ return status;
+}
+
+static unsigned int f_hidg_poll(struct file *file, poll_table *wait)
+{
+ struct f_hidg *hidg = (struct f_hidg *)file->private_data;
+ unsigned int ret = 0;
+
+ poll_wait(file, &hidg->read_queue, wait);
+ poll_wait(file, &hidg->write_queue, wait);
+
+ if (WRITE_COND)
+ ret |= POLLOUT | POLLWRNORM;
+
+ if (READ_COND)
+ ret |= POLLIN | POLLRDNORM;
+
+ return ret;
+}
+
+#undef WRITE_COND
+#undef READ_COND
+
+static int f_hidg_release(struct inode *inode, struct file *fd)
+{
+ fd->private_data = NULL;
+ return 0;
+}
+
+static int f_hidg_open(struct inode *inode, struct file *fd)
+{
+ struct f_hidg *hidg =
+ container_of(inode->i_cdev, struct f_hidg, cdev);
+
+ fd->private_data = hidg;
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+/* usb_function */
+
+static void hidg_set_report_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_hidg *hidg = (struct f_hidg *)req->context;
+
+ if (req->status != 0 || req->buf == NULL || req->actual == 0) {
+ ERROR(hidg->func.config->cdev, "%s FAILED\n", __func__);
+ return;
+ }
+
+ spin_lock(&hidg->spinlock);
+
+ hidg->set_report_buff = krealloc(hidg->set_report_buff,
+ req->actual, GFP_ATOMIC);
+
+ if (hidg->set_report_buff == NULL) {
+ spin_unlock(&hidg->spinlock);
+ return;
+ }
+ hidg->set_report_length = req->actual;
+ memcpy(hidg->set_report_buff, req->buf, req->actual);
+
+ spin_unlock(&hidg->spinlock);
+
+ wake_up(&hidg->read_queue);
+
+ return;
+}
+
+static int hidg_setup(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct f_hidg *hidg = func_to_hidg(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int status = 0;
+ __u16 value, length;
+
+ value = __le16_to_cpu(ctrl->wValue);
+ length = __le16_to_cpu(ctrl->wLength);
+
+ VDBG(cdev, "hid_setup crtl_request : bRequestType:0x%x bRequest:0x%x "
+ "Value:0x%x\n", ctrl->bRequestType, ctrl->bRequest, value);
+
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
+ | HID_REQ_GET_REPORT):
+ VDBG(cdev, "get_report\n");
+
+ /* send an empty report */
+ length = min_t(unsigned, length, hidg->report_length);
+ memset(req->buf, 0x0, length);
+
+ goto respond;
+ break;
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
+ | HID_REQ_GET_PROTOCOL):
+ VDBG(cdev, "get_protocol\n");
+ goto stall;
+ break;
+
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
+ | HID_REQ_SET_REPORT):
+ VDBG(cdev, "set_report | wLenght=%d\n", ctrl->wLength);
+ req->context = hidg;
+ req->complete = hidg_set_report_complete;
+ goto respond;
+ break;
+
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
+ | HID_REQ_SET_PROTOCOL):
+ VDBG(cdev, "set_protocol\n");
+ goto stall;
+ break;
+
+ case ((USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_INTERFACE) << 8
+ | USB_REQ_GET_DESCRIPTOR):
+ switch (value >> 8) {
+ case HID_DT_REPORT:
+ VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: REPORT\n");
+ length = min_t(unsigned short, length,
+ hidg->report_desc_length);
+ memcpy(req->buf, hidg->report_desc, length);
+ goto respond;
+ break;
+
+ default:
+ VDBG(cdev, "Unknown decriptor request 0x%x\n",
+ value >> 8);
+ goto stall;
+ break;
+ }
+ break;
+
+ default:
+ VDBG(cdev, "Unknown request 0x%x\n",
+ ctrl->bRequest);
+ goto stall;
+ break;
+ }
+
+stall:
+ return -EOPNOTSUPP;
+
+respond:
+ req->zero = 0;
+ req->length = length;
+ status = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (status < 0)
+ ERROR(cdev, "usb_ep_queue error on ep0 %d\n", value);
+ return status;
+}
+
+static void hidg_disable(struct usb_function *f)
+{
+ struct f_hidg *hidg = func_to_hidg(f);
+
+ usb_ep_disable(hidg->in_ep);
+ hidg->in_ep->driver_data = NULL;
+
+ return;
+}
+
+static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct f_hidg *hidg = func_to_hidg(f);
+ const struct usb_endpoint_descriptor *ep_desc;
+ int status = 0;
+
+ VDBG(cdev, "hidg_set_alt intf:%d alt:%d\n", intf, alt);
+
+ if (hidg->in_ep != NULL) {
+ /* restart endpoint */
+ if (hidg->in_ep->driver_data != NULL)
+ usb_ep_disable(hidg->in_ep);
+
+ ep_desc = ep_choose(f->config->cdev->gadget,
+ hidg->hs_in_ep_desc, hidg->fs_in_ep_desc);
+ status = usb_ep_enable(hidg->in_ep, ep_desc);
+ if (status < 0) {
+ ERROR(cdev, "Enable endpoint FAILED!\n");
+ goto fail;
+ }
+ hidg->in_ep->driver_data = hidg;
+ }
+fail:
+ return status;
+}
+
+const struct file_operations f_hidg_fops = {
+ .owner = THIS_MODULE,
+ .open = f_hidg_open,
+ .release = f_hidg_release,
+ .write = f_hidg_write,
+ .read = f_hidg_read,
+ .poll = f_hidg_poll,
+};
+
+static int __init hidg_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_ep *ep;
+ struct f_hidg *hidg = func_to_hidg(f);
+ int status;
+ dev_t dev;
+
+ /* allocate instance-specific interface IDs, and patch descriptors */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ hidg_interface_desc.bInterfaceNumber = status;
+
+
+ /* allocate instance-specific endpoints */
+ status = -ENODEV;
+ ep = usb_ep_autoconfig(c->cdev->gadget, &hidg_fs_in_ep_desc);
+ if (!ep)
+ goto fail;
+ ep->driver_data = c->cdev; /* claim */
+ hidg->in_ep = ep;
+
+ /* preallocate request and buffer */
+ status = -ENOMEM;
+ hidg->req = usb_ep_alloc_request(hidg->in_ep, GFP_KERNEL);
+ if (!hidg->req)
+ goto fail;
+
+
+ hidg->req->buf = kmalloc(hidg->report_length, GFP_KERNEL);
+ if (!hidg->req->buf)
+ goto fail;
+
+ /* set descriptor dynamic values */
+ hidg_interface_desc.bInterfaceSubClass = hidg->bInterfaceSubClass;
+ hidg_interface_desc.bInterfaceProtocol = hidg->bInterfaceProtocol;
+ hidg_hs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
+ hidg_fs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
+ hidg_desc.desc[0].bDescriptorType = HID_DT_REPORT;
+ hidg_desc.desc[0].wDescriptorLength =
+ cpu_to_le16(hidg->report_desc_length);
+
+ hidg->set_report_buff = NULL;
+
+ /* copy descriptors */
+ f->descriptors = usb_copy_descriptors(hidg_fs_descriptors);
+ if (!f->descriptors)
+ goto fail;
+
+ hidg->fs_in_ep_desc = usb_find_endpoint(hidg_fs_descriptors,
+ f->descriptors,
+ &hidg_fs_in_ep_desc);
+
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ hidg_hs_in_ep_desc.bEndpointAddress =
+ hidg_fs_in_ep_desc.bEndpointAddress;
+ f->hs_descriptors = usb_copy_descriptors(hidg_hs_descriptors);
+ if (!f->hs_descriptors)
+ goto fail;
+ hidg->hs_in_ep_desc = usb_find_endpoint(hidg_hs_descriptors,
+ f->hs_descriptors,
+ &hidg_hs_in_ep_desc);
+ } else {
+ hidg->hs_in_ep_desc = NULL;
+ }
+
+ mutex_init(&hidg->lock);
+ spin_lock_init(&hidg->spinlock);
+ init_waitqueue_head(&hidg->write_queue);
+ init_waitqueue_head(&hidg->read_queue);
+
+ /* create char device */
+ cdev_init(&hidg->cdev, &f_hidg_fops);
+ dev = MKDEV(major, hidg->minor);
+ status = cdev_add(&hidg->cdev, dev, 1);
+ if (status)
+ goto fail;
+
+ device_create(hidg_class, NULL, dev, NULL, "%s%d", "hidg", hidg->minor);
+
+ return 0;
+
+fail:
+ ERROR(f->config->cdev, "hidg_bind FAILED\n");
+ if (hidg->req != NULL) {
+ kfree(hidg->req->buf);
+ if (hidg->in_ep != NULL)
+ usb_ep_free_request(hidg->in_ep, hidg->req);
+ }
+
+ usb_free_descriptors(f->hs_descriptors);
+ usb_free_descriptors(f->descriptors);
+
+ return status;
+}
+
+static void hidg_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_hidg *hidg = func_to_hidg(f);
+
+ device_destroy(hidg_class, MKDEV(major, hidg->minor));
+ cdev_del(&hidg->cdev);
+
+ /* disable/free request and end point */
+ usb_ep_disable(hidg->in_ep);
+ usb_ep_dequeue(hidg->in_ep, hidg->req);
+ kfree(hidg->req->buf);
+ usb_ep_free_request(hidg->in_ep, hidg->req);
+
+ /* free descriptors copies */
+ usb_free_descriptors(f->hs_descriptors);
+ usb_free_descriptors(f->descriptors);
+
+ kfree(hidg->report_desc);
+ kfree(hidg->set_report_buff);
+ kfree(hidg);
+}
+
+/*-------------------------------------------------------------------------*/
+/* Strings */
+
+#define CT_FUNC_HID_IDX 0
+
+static struct usb_string ct_func_string_defs[] = {
+ [CT_FUNC_HID_IDX].s = "HID Interface",
+ {}, /* end of list */
+};
+
+static struct usb_gadget_strings ct_func_string_table = {
+ .language = 0x0409, /* en-US */
+ .strings = ct_func_string_defs,
+};
+
+static struct usb_gadget_strings *ct_func_strings[] = {
+ &ct_func_string_table,
+ NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+/* usb_configuration */
+
+int __init hidg_bind_config(struct usb_configuration *c,
+ struct hidg_func_descriptor *fdesc, int index)
+{
+ struct f_hidg *hidg;
+ int status;
+
+ if (index >= minors)
+ return -ENOENT;
+
+ /* maybe allocate device-global string IDs, and patch descriptors */
+ if (ct_func_string_defs[CT_FUNC_HID_IDX].id == 0) {
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ ct_func_string_defs[CT_FUNC_HID_IDX].id = status;
+ hidg_interface_desc.iInterface = status;
+ }
+
+ /* allocate and initialize one new instance */
+ hidg = kzalloc(sizeof *hidg, GFP_KERNEL);
+ if (!hidg)
+ return -ENOMEM;
+
+ hidg->minor = index;
+ hidg->bInterfaceSubClass = fdesc->subclass;
+ hidg->bInterfaceProtocol = fdesc->protocol;
+ hidg->report_length = fdesc->report_length;
+ hidg->report_desc_length = fdesc->report_desc_length;
+ hidg->report_desc = kmemdup(fdesc->report_desc,
+ fdesc->report_desc_length,
+ GFP_KERNEL);
+ if (!hidg->report_desc) {
+ kfree(hidg);
+ return -ENOMEM;
+ }
+
+ hidg->func.name = "hid";
+ hidg->func.strings = ct_func_strings;
+ hidg->func.bind = hidg_bind;
+ hidg->func.unbind = hidg_unbind;
+ hidg->func.set_alt = hidg_set_alt;
+ hidg->func.disable = hidg_disable;
+ hidg->func.setup = hidg_setup;
+
+ status = usb_add_function(c, &hidg->func);
+ if (status)
+ kfree(hidg);
+
+ return status;
+}
+
+int __init ghid_setup(struct usb_gadget *g, int count)
+{
+ int status;
+ dev_t dev;
+
+ hidg_class = class_create(THIS_MODULE, "hidg");
+
+ status = alloc_chrdev_region(&dev, 0, count, "hidg");
+ if (!status) {
+ major = MAJOR(dev);
+ minors = count;
+ }
+
+ return status;
+}
+
+void ghid_cleanup(void)
+{
+ if (major) {
+ unregister_chrdev_region(MKDEV(major, 0), minors);
+ major = minors = 0;
+ }
+
+ class_destroy(hidg_class);
+ hidg_class = NULL;
+}
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index f4911c09022..7d05a0be5c6 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -163,6 +163,10 @@
* ro setting are not allowed when the medium is loaded or if CD-ROM
* emulation is being used.
*
+ * When a LUN receive an "eject" SCSI request (Start/Stop Unit),
+ * if the LUN is removable, the backing file is released to simulate
+ * ejection.
+ *
*
* This function is heavily based on "File-backed Storage Gadget" by
* Alan Stern which in turn is heavily based on "Gadget Zero" by David
@@ -302,7 +306,6 @@ static const char fsg_string_interface[] = "Mass Storage";
#define FSG_NO_INTR_EP 1
-#define FSG_BUFFHD_STATIC_BUFFER 1
#define FSG_NO_DEVICE_STRINGS 1
#define FSG_NO_OTG 1
#define FSG_NO_INTR_EP 1
@@ -1385,12 +1388,50 @@ static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
static int do_start_stop(struct fsg_common *common)
{
- if (!common->curlun) {
+ struct fsg_lun *curlun = common->curlun;
+ int loej, start;
+
+ if (!curlun) {
return -EINVAL;
- } else if (!common->curlun->removable) {
- common->curlun->sense_data = SS_INVALID_COMMAND;
+ } else if (!curlun->removable) {
+ curlun->sense_data = SS_INVALID_COMMAND;
return -EINVAL;
}
+
+ loej = common->cmnd[4] & 0x02;
+ start = common->cmnd[4] & 0x01;
+
+ /* eject code from file_storage.c:do_start_stop() */
+
+ if ((common->cmnd[1] & ~0x01) != 0 || /* Mask away Immed */
+ (common->cmnd[4] & ~0x03) != 0) { /* Mask LoEj, Start */
+ curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+ return -EINVAL;
+ }
+
+ if (!start) {
+ /* Are we allowed to unload the media? */
+ if (curlun->prevent_medium_removal) {
+ LDBG(curlun, "unload attempt prevented\n");
+ curlun->sense_data = SS_MEDIUM_REMOVAL_PREVENTED;
+ return -EINVAL;
+ }
+ if (loej) { /* Simulate an unload/eject */
+ up_read(&common->filesem);
+ down_write(&common->filesem);
+ fsg_lun_close(curlun);
+ up_write(&common->filesem);
+ down_read(&common->filesem);
+ }
+ } else {
+
+ /* Our emulation doesn't support mounting; the medium is
+ * available for use as soon as it is loaded. */
+ if (!fsg_lun_is_open(curlun)) {
+ curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
+ return -EINVAL;
+ }
+ }
return 0;
}
@@ -2701,10 +2742,8 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
/* Maybe allocate device-global string IDs, and patch descriptors */
if (fsg_strings[FSG_STRING_INTERFACE].id == 0) {
rc = usb_string_id(cdev);
- if (rc < 0) {
- kfree(common);
- return ERR_PTR(rc);
- }
+ if (unlikely(rc < 0))
+ goto error_release;
fsg_strings[FSG_STRING_INTERFACE].id = rc;
fsg_intf_desc.iInterface = rc;
}
@@ -2712,9 +2751,9 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
/* Create the LUNs, open their backing files, and register the
* LUN devices in sysfs. */
curlun = kzalloc(nluns * sizeof *curlun, GFP_KERNEL);
- if (!curlun) {
- kfree(common);
- return ERR_PTR(-ENOMEM);
+ if (unlikely(!curlun)) {
+ rc = -ENOMEM;
+ goto error_release;
}
common->luns = curlun;
@@ -2762,13 +2801,19 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
/* Data buffers cyclic list */
- /* Buffers in buffhds are static -- no need for additional
- * allocation. */
bh = common->buffhds;
- i = FSG_NUM_BUFFERS - 1;
+ i = FSG_NUM_BUFFERS;
+ goto buffhds_first_it;
do {
bh->next = bh + 1;
- } while (++bh, --i);
+ ++bh;
+buffhds_first_it:
+ bh->buf = kmalloc(FSG_BUFLEN, GFP_KERNEL);
+ if (unlikely(!bh->buf)) {
+ rc = -ENOMEM;
+ goto error_release;
+ }
+ } while (--i);
bh->next = common->buffhds;
@@ -2867,10 +2912,7 @@ error_release:
static void fsg_common_release(struct kref *ref)
{
- struct fsg_common *common =
- container_of(ref, struct fsg_common, ref);
- unsigned i = common->nluns;
- struct fsg_lun *lun = common->luns;
+ struct fsg_common *common = container_of(ref, struct fsg_common, ref);
/* If the thread isn't already dead, tell it to exit now */
if (common->state != FSG_STATE_TERMINATED) {
@@ -2881,17 +2923,29 @@ static void fsg_common_release(struct kref *ref)
complete(&common->thread_notifier);
}
- /* Beware tempting for -> do-while optimization: when in error
- * recovery nluns may be zero. */
+ if (likely(common->luns)) {
+ struct fsg_lun *lun = common->luns;
+ unsigned i = common->nluns;
+
+ /* In error recovery common->nluns may be zero. */
+ for (; i; --i, ++lun) {
+ device_remove_file(&lun->dev, &dev_attr_ro);
+ device_remove_file(&lun->dev, &dev_attr_file);
+ fsg_lun_close(lun);
+ device_unregister(&lun->dev);
+ }
+
+ kfree(common->luns);
+ }
- for (; i; --i, ++lun) {
- device_remove_file(&lun->dev, &dev_attr_ro);
- device_remove_file(&lun->dev, &dev_attr_file);
- fsg_lun_close(lun);
- device_unregister(&lun->dev);
+ {
+ struct fsg_buffhd *bh = common->buffhds;
+ unsigned i = FSG_NUM_BUFFERS;
+ do {
+ kfree(bh->buf);
+ } while (++bh, --i);
}
- kfree(common->luns);
if (common->free_storage_on_release)
kfree(common);
}
@@ -2906,11 +2960,13 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
DBG(fsg, "unbind\n");
fsg_common_put(fsg->common);
+ usb_free_descriptors(fsg->function.descriptors);
+ usb_free_descriptors(fsg->function.hs_descriptors);
kfree(fsg);
}
-static int __init fsg_bind(struct usb_configuration *c, struct usb_function *f)
+static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
{
struct fsg_dev *fsg = fsg_from_func(f);
struct usb_gadget *gadget = c->cdev->gadget;
@@ -2946,7 +3002,9 @@ static int __init fsg_bind(struct usb_configuration *c, struct usb_function *f)
fsg_fs_bulk_in_desc.bEndpointAddress;
fsg_hs_bulk_out_desc.bEndpointAddress =
fsg_fs_bulk_out_desc.bEndpointAddress;
- f->hs_descriptors = fsg_hs_function;
+ f->hs_descriptors = usb_copy_descriptors(fsg_hs_function);
+ if (unlikely(!f->hs_descriptors))
+ return -ENOMEM;
}
return 0;
@@ -2978,7 +3036,11 @@ static int fsg_add(struct usb_composite_dev *cdev,
fsg->function.name = FSG_DRIVER_DESC;
fsg->function.strings = fsg_strings_array;
- fsg->function.descriptors = fsg_fs_function;
+ fsg->function.descriptors = usb_copy_descriptors(fsg_fs_function);
+ if (unlikely(!fsg->function.descriptors)) {
+ rc = -ENOMEM;
+ goto error_free_fsg;
+ }
fsg->function.bind = fsg_bind;
fsg->function.unbind = fsg_unbind;
fsg->function.setup = fsg_setup;
@@ -2993,11 +3055,19 @@ static int fsg_add(struct usb_composite_dev *cdev,
* call to usb_add_function() was successful. */
rc = usb_add_function(c, &fsg->function);
+ if (unlikely(rc))
+ goto error_free_all;
- if (likely(rc == 0))
- fsg_common_get(fsg->common);
- else
- kfree(fsg);
+ fsg_common_get(fsg->common);
+ return 0;
+
+error_free_all:
+ usb_free_descriptors(fsg->function.descriptors);
+ /* fsg_bind() might have copied those; or maybe not? who cares
+ * -- free it just in case. */
+ usb_free_descriptors(fsg->function.hs_descriptors);
+error_free_fsg:
+ kfree(fsg);
return rc;
}
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 56b022150f2..882484a4039 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -122,7 +122,7 @@ static unsigned int bitrate(struct usb_gadget *g)
/* interface descriptor: */
-static struct usb_interface_descriptor rndis_control_intf __initdata = {
+static struct usb_interface_descriptor rndis_control_intf = {
.bLength = sizeof rndis_control_intf,
.bDescriptorType = USB_DT_INTERFACE,
@@ -135,7 +135,7 @@ static struct usb_interface_descriptor rndis_control_intf __initdata = {
/* .iInterface = DYNAMIC */
};
-static struct usb_cdc_header_desc header_desc __initdata = {
+static struct usb_cdc_header_desc header_desc = {
.bLength = sizeof header_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_HEADER_TYPE,
@@ -143,7 +143,7 @@ static struct usb_cdc_header_desc header_desc __initdata = {
.bcdCDC = cpu_to_le16(0x0110),
};
-static struct usb_cdc_call_mgmt_descriptor call_mgmt_descriptor __initdata = {
+static struct usb_cdc_call_mgmt_descriptor call_mgmt_descriptor = {
.bLength = sizeof call_mgmt_descriptor,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE,
@@ -152,7 +152,7 @@ static struct usb_cdc_call_mgmt_descriptor call_mgmt_descriptor __initdata = {
.bDataInterface = 0x01,
};
-static struct usb_cdc_acm_descriptor rndis_acm_descriptor __initdata = {
+static struct usb_cdc_acm_descriptor rndis_acm_descriptor = {
.bLength = sizeof rndis_acm_descriptor,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_ACM_TYPE,
@@ -160,7 +160,7 @@ static struct usb_cdc_acm_descriptor rndis_acm_descriptor __initdata = {
.bmCapabilities = 0x00,
};
-static struct usb_cdc_union_desc rndis_union_desc __initdata = {
+static struct usb_cdc_union_desc rndis_union_desc = {
.bLength = sizeof(rndis_union_desc),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_UNION_TYPE,
@@ -170,7 +170,7 @@ static struct usb_cdc_union_desc rndis_union_desc __initdata = {
/* the data interface has two bulk endpoints */
-static struct usb_interface_descriptor rndis_data_intf __initdata = {
+static struct usb_interface_descriptor rndis_data_intf = {
.bLength = sizeof rndis_data_intf,
.bDescriptorType = USB_DT_INTERFACE,
@@ -198,7 +198,7 @@ rndis_iad_descriptor = {
/* full speed support: */
-static struct usb_endpoint_descriptor fs_notify_desc __initdata = {
+static struct usb_endpoint_descriptor fs_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -208,7 +208,7 @@ static struct usb_endpoint_descriptor fs_notify_desc __initdata = {
.bInterval = 1 << LOG2_STATUS_INTERVAL_MSEC,
};
-static struct usb_endpoint_descriptor fs_in_desc __initdata = {
+static struct usb_endpoint_descriptor fs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -216,7 +216,7 @@ static struct usb_endpoint_descriptor fs_in_desc __initdata = {
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
-static struct usb_endpoint_descriptor fs_out_desc __initdata = {
+static struct usb_endpoint_descriptor fs_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -224,7 +224,7 @@ static struct usb_endpoint_descriptor fs_out_desc __initdata = {
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
-static struct usb_descriptor_header *eth_fs_function[] __initdata = {
+static struct usb_descriptor_header *eth_fs_function[] = {
(struct usb_descriptor_header *) &rndis_iad_descriptor,
/* control interface matches ACM, not Ethernet */
(struct usb_descriptor_header *) &rndis_control_intf,
@@ -242,7 +242,7 @@ static struct usb_descriptor_header *eth_fs_function[] __initdata = {
/* high speed support: */
-static struct usb_endpoint_descriptor hs_notify_desc __initdata = {
+static struct usb_endpoint_descriptor hs_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -251,7 +251,7 @@ static struct usb_endpoint_descriptor hs_notify_desc __initdata = {
.wMaxPacketSize = cpu_to_le16(STATUS_BYTECOUNT),
.bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
};
-static struct usb_endpoint_descriptor hs_in_desc __initdata = {
+static struct usb_endpoint_descriptor hs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -260,7 +260,7 @@ static struct usb_endpoint_descriptor hs_in_desc __initdata = {
.wMaxPacketSize = cpu_to_le16(512),
};
-static struct usb_endpoint_descriptor hs_out_desc __initdata = {
+static struct usb_endpoint_descriptor hs_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -269,7 +269,7 @@ static struct usb_endpoint_descriptor hs_out_desc __initdata = {
.wMaxPacketSize = cpu_to_le16(512),
};
-static struct usb_descriptor_header *eth_hs_function[] __initdata = {
+static struct usb_descriptor_header *eth_hs_function[] = {
(struct usb_descriptor_header *) &rndis_iad_descriptor,
/* control interface matches ACM, not Ethernet */
(struct usb_descriptor_header *) &rndis_control_intf,
@@ -594,7 +594,7 @@ static void rndis_close(struct gether *geth)
/* ethernet function driver setup/binding */
-static int __init
+static int
rndis_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
@@ -786,7 +786,8 @@ static inline bool can_support_rndis(struct usb_configuration *c)
* Caller must have called @gether_setup(). Caller is also responsible
* for calling @gether_cleanup() before module unload.
*/
-int __init rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
+int
+rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
{
struct f_rndis *rndis;
int status;
diff --git a/drivers/usb/gadget/f_uvc.c b/drivers/usb/gadget/f_uvc.c
new file mode 100644
index 00000000000..fc2611f8b32
--- /dev/null
+++ b/drivers/usb/gadget/f_uvc.c
@@ -0,0 +1,661 @@
+/*
+ * uvc_gadget.c -- USB Video Class Gadget driver
+ *
+ * Copyright (C) 2009-2010
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/video.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-event.h>
+
+#include "uvc.h"
+
+unsigned int uvc_trace_param;
+
+/* --------------------------------------------------------------------------
+ * Function descriptors
+ */
+
+/* string IDs are assigned dynamically */
+
+#define UVC_STRING_ASSOCIATION_IDX 0
+#define UVC_STRING_CONTROL_IDX 1
+#define UVC_STRING_STREAMING_IDX 2
+
+static struct usb_string uvc_en_us_strings[] = {
+ [UVC_STRING_ASSOCIATION_IDX].s = "UVC Camera",
+ [UVC_STRING_CONTROL_IDX].s = "Video Control",
+ [UVC_STRING_STREAMING_IDX].s = "Video Streaming",
+ { }
+};
+
+static struct usb_gadget_strings uvc_stringtab = {
+ .language = 0x0409, /* en-us */
+ .strings = uvc_en_us_strings,
+};
+
+static struct usb_gadget_strings *uvc_function_strings[] = {
+ &uvc_stringtab,
+ NULL,
+};
+
+#define UVC_INTF_VIDEO_CONTROL 0
+#define UVC_INTF_VIDEO_STREAMING 1
+
+static struct usb_interface_assoc_descriptor uvc_iad __initdata = {
+ .bLength = USB_DT_INTERFACE_ASSOCIATION_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
+ .bFirstInterface = 0,
+ .bInterfaceCount = 2,
+ .bFunctionClass = USB_CLASS_VIDEO,
+ .bFunctionSubClass = 0x03,
+ .bFunctionProtocol = 0x00,
+ .iFunction = 0,
+};
+
+static struct usb_interface_descriptor uvc_control_intf __initdata = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = UVC_INTF_VIDEO_CONTROL,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 0x01,
+ .bInterfaceProtocol = 0x00,
+ .iInterface = 0,
+};
+
+static struct usb_endpoint_descriptor uvc_control_ep __initdata = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(16),
+ .bInterval = 8,
+};
+
+static struct uvc_control_endpoint_descriptor uvc_control_cs_ep __initdata = {
+ .bLength = UVC_DT_CONTROL_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_CS_ENDPOINT,
+ .bDescriptorSubType = UVC_EP_INTERRUPT,
+ .wMaxTransferSize = cpu_to_le16(16),
+};
+
+static struct usb_interface_descriptor uvc_streaming_intf_alt0 __initdata = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = UVC_INTF_VIDEO_STREAMING,
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 0x02,
+ .bInterfaceProtocol = 0x00,
+ .iInterface = 0,
+};
+
+static struct usb_interface_descriptor uvc_streaming_intf_alt1 __initdata = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = UVC_INTF_VIDEO_STREAMING,
+ .bAlternateSetting = 1,
+ .bNumEndpoints = 1,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 0x02,
+ .bInterfaceProtocol = 0x00,
+ .iInterface = 0,
+};
+
+static struct usb_endpoint_descriptor uvc_streaming_ep = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_ISOC,
+ .wMaxPacketSize = cpu_to_le16(512),
+ .bInterval = 1,
+};
+
+static const struct usb_descriptor_header * const uvc_fs_streaming[] = {
+ (struct usb_descriptor_header *) &uvc_streaming_intf_alt1,
+ (struct usb_descriptor_header *) &uvc_streaming_ep,
+ NULL,
+};
+
+static const struct usb_descriptor_header * const uvc_hs_streaming[] = {
+ (struct usb_descriptor_header *) &uvc_streaming_intf_alt1,
+ (struct usb_descriptor_header *) &uvc_streaming_ep,
+ NULL,
+};
+
+/* --------------------------------------------------------------------------
+ * Control requests
+ */
+
+static void
+uvc_function_ep0_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct uvc_device *uvc = req->context;
+ struct v4l2_event v4l2_event;
+ struct uvc_event *uvc_event = (void *)&v4l2_event.u.data;
+
+ if (uvc->event_setup_out) {
+ uvc->event_setup_out = 0;
+
+ memset(&v4l2_event, 0, sizeof(v4l2_event));
+ v4l2_event.type = UVC_EVENT_DATA;
+ uvc_event->data.length = req->actual;
+ memcpy(&uvc_event->data.data, req->buf, req->actual);
+ v4l2_event_queue(uvc->vdev, &v4l2_event);
+ }
+}
+
+static int
+uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct uvc_device *uvc = to_uvc(f);
+ struct v4l2_event v4l2_event;
+ struct uvc_event *uvc_event = (void *)&v4l2_event.u.data;
+
+ /* printk(KERN_INFO "setup request %02x %02x value %04x index %04x %04x\n",
+ * ctrl->bRequestType, ctrl->bRequest, le16_to_cpu(ctrl->wValue),
+ * le16_to_cpu(ctrl->wIndex), le16_to_cpu(ctrl->wLength));
+ */
+
+ if ((ctrl->bRequestType & USB_TYPE_MASK) != USB_TYPE_CLASS) {
+ INFO(f->config->cdev, "invalid request type\n");
+ return -EINVAL;
+ }
+
+ /* Stall too big requests. */
+ if (le16_to_cpu(ctrl->wLength) > UVC_MAX_REQUEST_SIZE)
+ return -EINVAL;
+
+ memset(&v4l2_event, 0, sizeof(v4l2_event));
+ v4l2_event.type = UVC_EVENT_SETUP;
+ memcpy(&uvc_event->req, ctrl, sizeof(uvc_event->req));
+ v4l2_event_queue(uvc->vdev, &v4l2_event);
+
+ return 0;
+}
+
+static int
+uvc_function_get_alt(struct usb_function *f, unsigned interface)
+{
+ struct uvc_device *uvc = to_uvc(f);
+
+ INFO(f->config->cdev, "uvc_function_get_alt(%u)\n", interface);
+
+ if (interface == uvc->control_intf)
+ return 0;
+ else if (interface != uvc->streaming_intf)
+ return -EINVAL;
+ else
+ return uvc->state == UVC_STATE_STREAMING ? 1 : 0;
+}
+
+static int
+uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
+{
+ struct uvc_device *uvc = to_uvc(f);
+ struct v4l2_event v4l2_event;
+ struct uvc_event *uvc_event = (void *)&v4l2_event.u.data;
+
+ INFO(f->config->cdev, "uvc_function_set_alt(%u, %u)\n", interface, alt);
+
+ if (interface == uvc->control_intf) {
+ if (alt)
+ return -EINVAL;
+
+ if (uvc->state == UVC_STATE_DISCONNECTED) {
+ memset(&v4l2_event, 0, sizeof(v4l2_event));
+ v4l2_event.type = UVC_EVENT_CONNECT;
+ uvc_event->speed = f->config->cdev->gadget->speed;
+ v4l2_event_queue(uvc->vdev, &v4l2_event);
+
+ uvc->state = UVC_STATE_CONNECTED;
+ }
+
+ return 0;
+ }
+
+ if (interface != uvc->streaming_intf)
+ return -EINVAL;
+
+ /* TODO
+ if (usb_endpoint_xfer_bulk(&uvc->desc.vs_ep))
+ return alt ? -EINVAL : 0;
+ */
+
+ switch (alt) {
+ case 0:
+ if (uvc->state != UVC_STATE_STREAMING)
+ return 0;
+
+ if (uvc->video.ep)
+ usb_ep_disable(uvc->video.ep);
+
+ memset(&v4l2_event, 0, sizeof(v4l2_event));
+ v4l2_event.type = UVC_EVENT_STREAMOFF;
+ v4l2_event_queue(uvc->vdev, &v4l2_event);
+
+ uvc->state = UVC_STATE_CONNECTED;
+ break;
+
+ case 1:
+ if (uvc->state != UVC_STATE_CONNECTED)
+ return 0;
+
+ if (uvc->video.ep)
+ usb_ep_enable(uvc->video.ep, &uvc_streaming_ep);
+
+ memset(&v4l2_event, 0, sizeof(v4l2_event));
+ v4l2_event.type = UVC_EVENT_STREAMON;
+ v4l2_event_queue(uvc->vdev, &v4l2_event);
+
+ uvc->state = UVC_STATE_STREAMING;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+uvc_function_disable(struct usb_function *f)
+{
+ struct uvc_device *uvc = to_uvc(f);
+ struct v4l2_event v4l2_event;
+
+ INFO(f->config->cdev, "uvc_function_disable\n");
+
+ memset(&v4l2_event, 0, sizeof(v4l2_event));
+ v4l2_event.type = UVC_EVENT_DISCONNECT;
+ v4l2_event_queue(uvc->vdev, &v4l2_event);
+
+ uvc->state = UVC_STATE_DISCONNECTED;
+}
+
+/* --------------------------------------------------------------------------
+ * Connection / disconnection
+ */
+
+void
+uvc_function_connect(struct uvc_device *uvc)
+{
+ struct usb_composite_dev *cdev = uvc->func.config->cdev;
+ int ret;
+
+ if ((ret = usb_function_activate(&uvc->func)) < 0)
+ INFO(cdev, "UVC connect failed with %d\n", ret);
+}
+
+void
+uvc_function_disconnect(struct uvc_device *uvc)
+{
+ struct usb_composite_dev *cdev = uvc->func.config->cdev;
+ int ret;
+
+ if ((ret = usb_function_deactivate(&uvc->func)) < 0)
+ INFO(cdev, "UVC disconnect failed with %d\n", ret);
+}
+
+/* --------------------------------------------------------------------------
+ * USB probe and disconnect
+ */
+
+static int
+uvc_register_video(struct uvc_device *uvc)
+{
+ struct usb_composite_dev *cdev = uvc->func.config->cdev;
+ struct video_device *video;
+
+ /* TODO reference counting. */
+ video = video_device_alloc();
+ if (video == NULL)
+ return -ENOMEM;
+
+ video->parent = &cdev->gadget->dev;
+ video->minor = -1;
+ video->fops = &uvc_v4l2_fops;
+ video->release = video_device_release;
+ strncpy(video->name, cdev->gadget->name, sizeof(video->name));
+
+ uvc->vdev = video;
+ video_set_drvdata(video, uvc);
+
+ return video_register_device(video, VFL_TYPE_GRABBER, -1);
+}
+
+#define UVC_COPY_DESCRIPTOR(mem, dst, desc) \
+ do { \
+ memcpy(mem, desc, (desc)->bLength); \
+ *(dst)++ = mem; \
+ mem += (desc)->bLength; \
+ } while (0);
+
+#define UVC_COPY_DESCRIPTORS(mem, dst, src) \
+ do { \
+ const struct usb_descriptor_header * const *__src; \
+ for (__src = src; *__src; ++__src) { \
+ memcpy(mem, *__src, (*__src)->bLength); \
+ *dst++ = mem; \
+ mem += (*__src)->bLength; \
+ } \
+ } while (0)
+
+static struct usb_descriptor_header ** __init
+uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
+{
+ struct uvc_input_header_descriptor *uvc_streaming_header;
+ struct uvc_header_descriptor *uvc_control_header;
+ const struct uvc_descriptor_header * const *uvc_streaming_cls;
+ const struct usb_descriptor_header * const *uvc_streaming_std;
+ const struct usb_descriptor_header * const *src;
+ struct usb_descriptor_header **dst;
+ struct usb_descriptor_header **hdr;
+ unsigned int control_size;
+ unsigned int streaming_size;
+ unsigned int n_desc;
+ unsigned int bytes;
+ void *mem;
+
+ uvc_streaming_cls = (speed == USB_SPEED_FULL)
+ ? uvc->desc.fs_streaming : uvc->desc.hs_streaming;
+ uvc_streaming_std = (speed == USB_SPEED_FULL)
+ ? uvc_fs_streaming : uvc_hs_streaming;
+
+ /* Descriptors layout
+ *
+ * uvc_iad
+ * uvc_control_intf
+ * Class-specific UVC control descriptors
+ * uvc_control_ep
+ * uvc_control_cs_ep
+ * uvc_streaming_intf_alt0
+ * Class-specific UVC streaming descriptors
+ * uvc_{fs|hs}_streaming
+ */
+
+ /* Count descriptors and compute their size. */
+ control_size = 0;
+ streaming_size = 0;
+ bytes = uvc_iad.bLength + uvc_control_intf.bLength
+ + uvc_control_ep.bLength + uvc_control_cs_ep.bLength
+ + uvc_streaming_intf_alt0.bLength;
+ n_desc = 5;
+
+ for (src = (const struct usb_descriptor_header**)uvc->desc.control; *src; ++src) {
+ control_size += (*src)->bLength;
+ bytes += (*src)->bLength;
+ n_desc++;
+ }
+ for (src = (const struct usb_descriptor_header**)uvc_streaming_cls; *src; ++src) {
+ streaming_size += (*src)->bLength;
+ bytes += (*src)->bLength;
+ n_desc++;
+ }
+ for (src = uvc_streaming_std; *src; ++src) {
+ bytes += (*src)->bLength;
+ n_desc++;
+ }
+
+ mem = kmalloc((n_desc + 1) * sizeof(*src) + bytes, GFP_KERNEL);
+ if (mem == NULL)
+ return NULL;
+
+ hdr = mem;
+ dst = mem;
+ mem += (n_desc + 1) * sizeof(*src);
+
+ /* Copy the descriptors. */
+ UVC_COPY_DESCRIPTOR(mem, dst, &uvc_iad);
+ UVC_COPY_DESCRIPTOR(mem, dst, &uvc_control_intf);
+
+ uvc_control_header = mem;
+ UVC_COPY_DESCRIPTORS(mem, dst,
+ (const struct usb_descriptor_header**)uvc->desc.control);
+ uvc_control_header->wTotalLength = cpu_to_le16(control_size);
+ uvc_control_header->bInCollection = 1;
+ uvc_control_header->baInterfaceNr[0] = uvc->streaming_intf;
+
+ UVC_COPY_DESCRIPTOR(mem, dst, &uvc_control_ep);
+ UVC_COPY_DESCRIPTOR(mem, dst, &uvc_control_cs_ep);
+ UVC_COPY_DESCRIPTOR(mem, dst, &uvc_streaming_intf_alt0);
+
+ uvc_streaming_header = mem;
+ UVC_COPY_DESCRIPTORS(mem, dst,
+ (const struct usb_descriptor_header**)uvc_streaming_cls);
+ uvc_streaming_header->wTotalLength = cpu_to_le16(streaming_size);
+ uvc_streaming_header->bEndpointAddress = uvc_streaming_ep.bEndpointAddress;
+
+ UVC_COPY_DESCRIPTORS(mem, dst, uvc_streaming_std);
+
+ *dst = NULL;
+ return hdr;
+}
+
+static void
+uvc_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct uvc_device *uvc = to_uvc(f);
+
+ INFO(cdev, "uvc_function_unbind\n");
+
+ if (uvc->vdev) {
+ if (uvc->vdev->minor == -1)
+ video_device_release(uvc->vdev);
+ else
+ video_unregister_device(uvc->vdev);
+ uvc->vdev = NULL;
+ }
+
+ if (uvc->control_ep)
+ uvc->control_ep->driver_data = NULL;
+ if (uvc->video.ep)
+ uvc->video.ep->driver_data = NULL;
+
+ if (uvc->control_req) {
+ usb_ep_free_request(cdev->gadget->ep0, uvc->control_req);
+ kfree(uvc->control_buf);
+ }
+
+ kfree(f->descriptors);
+ kfree(f->hs_descriptors);
+
+ kfree(uvc);
+}
+
+static int __init
+uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct uvc_device *uvc = to_uvc(f);
+ struct usb_ep *ep;
+ int ret = -EINVAL;
+
+ INFO(cdev, "uvc_function_bind\n");
+
+ /* Allocate endpoints. */
+ ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep);
+ if (!ep) {
+ INFO(cdev, "Unable to allocate control EP\n");
+ goto error;
+ }
+ uvc->control_ep = ep;
+ ep->driver_data = uvc;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &uvc_streaming_ep);
+ if (!ep) {
+ INFO(cdev, "Unable to allocate streaming EP\n");
+ goto error;
+ }
+ uvc->video.ep = ep;
+ ep->driver_data = uvc;
+
+ /* Allocate interface IDs. */
+ if ((ret = usb_interface_id(c, f)) < 0)
+ goto error;
+ uvc_iad.bFirstInterface = ret;
+ uvc_control_intf.bInterfaceNumber = ret;
+ uvc->control_intf = ret;
+
+ if ((ret = usb_interface_id(c, f)) < 0)
+ goto error;
+ uvc_streaming_intf_alt0.bInterfaceNumber = ret;
+ uvc_streaming_intf_alt1.bInterfaceNumber = ret;
+ uvc->streaming_intf = ret;
+
+ /* Copy descriptors. */
+ f->descriptors = uvc_copy_descriptors(uvc, USB_SPEED_FULL);
+ f->hs_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_HIGH);
+
+ /* Preallocate control endpoint request. */
+ uvc->control_req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
+ uvc->control_buf = kmalloc(UVC_MAX_REQUEST_SIZE, GFP_KERNEL);
+ if (uvc->control_req == NULL || uvc->control_buf == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ uvc->control_req->buf = uvc->control_buf;
+ uvc->control_req->complete = uvc_function_ep0_complete;
+ uvc->control_req->context = uvc;
+
+ /* Avoid letting this gadget enumerate until the userspace server is
+ * active.
+ */
+ if ((ret = usb_function_deactivate(f)) < 0)
+ goto error;
+
+ /* Initialise video. */
+ ret = uvc_video_init(&uvc->video);
+ if (ret < 0)
+ goto error;
+
+ /* Register a V4L2 device. */
+ ret = uvc_register_video(uvc);
+ if (ret < 0) {
+ printk(KERN_INFO "Unable to register video device\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ uvc_function_unbind(c, f);
+ return ret;
+}
+
+/* --------------------------------------------------------------------------
+ * USB gadget function
+ */
+
+/**
+ * uvc_bind_config - add a UVC function to a configuration
+ * @c: the configuration to support the UVC instance
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @uvc_setup(). Caller is also responsible for
+ * calling @uvc_cleanup() before module unload.
+ */
+int __init
+uvc_bind_config(struct usb_configuration *c,
+ const struct uvc_descriptor_header * const *control,
+ const struct uvc_descriptor_header * const *fs_streaming,
+ const struct uvc_descriptor_header * const *hs_streaming)
+{
+ struct uvc_device *uvc;
+ int ret = 0;
+
+ /* TODO Check if the USB device controller supports the required
+ * features.
+ */
+ if (!gadget_is_dualspeed(c->cdev->gadget))
+ return -EINVAL;
+
+ uvc = kzalloc(sizeof(*uvc), GFP_KERNEL);
+ if (uvc == NULL)
+ return -ENOMEM;
+
+ uvc->state = UVC_STATE_DISCONNECTED;
+
+ /* Validate the descriptors. */
+ if (control == NULL || control[0] == NULL ||
+ control[0]->bDescriptorSubType != UVC_DT_HEADER)
+ goto error;
+
+ if (fs_streaming == NULL || fs_streaming[0] == NULL ||
+ fs_streaming[0]->bDescriptorSubType != UVC_DT_INPUT_HEADER)
+ goto error;
+
+ if (hs_streaming == NULL || hs_streaming[0] == NULL ||
+ hs_streaming[0]->bDescriptorSubType != UVC_DT_INPUT_HEADER)
+ goto error;
+
+ uvc->desc.control = control;
+ uvc->desc.fs_streaming = fs_streaming;
+ uvc->desc.hs_streaming = hs_streaming;
+
+ /* Allocate string descriptor numbers. */
+ if ((ret = usb_string_id(c->cdev)) < 0)
+ goto error;
+ uvc_en_us_strings[UVC_STRING_ASSOCIATION_IDX].id = ret;
+ uvc_iad.iFunction = ret;
+
+ if ((ret = usb_string_id(c->cdev)) < 0)
+ goto error;
+ uvc_en_us_strings[UVC_STRING_CONTROL_IDX].id = ret;
+ uvc_control_intf.iInterface = ret;
+
+ if ((ret = usb_string_id(c->cdev)) < 0)
+ goto error;
+ uvc_en_us_strings[UVC_STRING_STREAMING_IDX].id = ret;
+ uvc_streaming_intf_alt0.iInterface = ret;
+ uvc_streaming_intf_alt1.iInterface = ret;
+
+ /* Register the function. */
+ uvc->func.name = "uvc";
+ uvc->func.strings = uvc_function_strings;
+ uvc->func.bind = uvc_function_bind;
+ uvc->func.unbind = uvc_function_unbind;
+ uvc->func.get_alt = uvc_function_get_alt;
+ uvc->func.set_alt = uvc_function_set_alt;
+ uvc->func.disable = uvc_function_disable;
+ uvc->func.setup = uvc_function_setup;
+
+ ret = usb_add_function(c, &uvc->func);
+ if (ret)
+ kfree(uvc);
+
+ return 0;
+
+error:
+ kfree(uvc);
+ return ret;
+}
+
+module_param_named(trace, uvc_trace_param, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(trace, "Trace level bitmask");
+
diff --git a/drivers/usb/gadget/f_uvc.h b/drivers/usb/gadget/f_uvc.h
new file mode 100644
index 00000000000..8a5db7c4fe7
--- /dev/null
+++ b/drivers/usb/gadget/f_uvc.h
@@ -0,0 +1,376 @@
+/*
+ * f_uvc.h -- USB Video Class Gadget driver
+ *
+ * Copyright (C) 2009-2010
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#ifndef _F_UVC_H_
+#define _F_UVC_H_
+
+#include <linux/usb/composite.h>
+
+#define USB_CLASS_VIDEO_CONTROL 1
+#define USB_CLASS_VIDEO_STREAMING 2
+
+struct uvc_descriptor_header {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+} __attribute__ ((packed));
+
+struct uvc_header_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u16 bcdUVC;
+ __u16 wTotalLength;
+ __u32 dwClockFrequency;
+ __u8 bInCollection;
+ __u8 baInterfaceNr[];
+} __attribute__((__packed__));
+
+#define UVC_HEADER_DESCRIPTOR(n) uvc_header_descriptor_##n
+
+#define DECLARE_UVC_HEADER_DESCRIPTOR(n) \
+struct UVC_HEADER_DESCRIPTOR(n) { \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __u16 bcdUVC; \
+ __u16 wTotalLength; \
+ __u32 dwClockFrequency; \
+ __u8 bInCollection; \
+ __u8 baInterfaceNr[n]; \
+} __attribute__ ((packed))
+
+struct uvc_input_terminal_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bTerminalID;
+ __u16 wTerminalType;
+ __u8 bAssocTerminal;
+ __u8 iTerminal;
+} __attribute__((__packed__));
+
+struct uvc_output_terminal_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bTerminalID;
+ __u16 wTerminalType;
+ __u8 bAssocTerminal;
+ __u8 bSourceID;
+ __u8 iTerminal;
+} __attribute__((__packed__));
+
+struct uvc_camera_terminal_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bTerminalID;
+ __u16 wTerminalType;
+ __u8 bAssocTerminal;
+ __u8 iTerminal;
+ __u16 wObjectiveFocalLengthMin;
+ __u16 wObjectiveFocalLengthMax;
+ __u16 wOcularFocalLength;
+ __u8 bControlSize;
+ __u8 bmControls[3];
+} __attribute__((__packed__));
+
+struct uvc_selector_unit_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bUnitID;
+ __u8 bNrInPins;
+ __u8 baSourceID[0];
+ __u8 iSelector;
+} __attribute__((__packed__));
+
+#define UVC_SELECTOR_UNIT_DESCRIPTOR(n) \
+ uvc_selector_unit_descriptor_##n
+
+#define DECLARE_UVC_SELECTOR_UNIT_DESCRIPTOR(n) \
+struct UVC_SELECTOR_UNIT_DESCRIPTOR(n) { \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __u8 bUnitID; \
+ __u8 bNrInPins; \
+ __u8 baSourceID[n]; \
+ __u8 iSelector; \
+} __attribute__ ((packed))
+
+struct uvc_processing_unit_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bUnitID;
+ __u8 bSourceID;
+ __u16 wMaxMultiplier;
+ __u8 bControlSize;
+ __u8 bmControls[2];
+ __u8 iProcessing;
+} __attribute__((__packed__));
+
+struct uvc_extension_unit_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bUnitID;
+ __u8 guidExtensionCode[16];
+ __u8 bNumControls;
+ __u8 bNrInPins;
+ __u8 baSourceID[0];
+ __u8 bControlSize;
+ __u8 bmControls[0];
+ __u8 iExtension;
+} __attribute__((__packed__));
+
+#define UVC_EXTENSION_UNIT_DESCRIPTOR(p, n) \
+ uvc_extension_unit_descriptor_##p_##n
+
+#define DECLARE_UVC_EXTENSION_UNIT_DESCRIPTOR(p, n) \
+struct UVC_EXTENSION_UNIT_DESCRIPTOR(p, n) { \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __u8 bUnitID; \
+ __u8 guidExtensionCode[16]; \
+ __u8 bNumControls; \
+ __u8 bNrInPins; \
+ __u8 baSourceID[p]; \
+ __u8 bControlSize; \
+ __u8 bmControls[n]; \
+ __u8 iExtension; \
+} __attribute__ ((packed))
+
+struct uvc_control_endpoint_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u16 wMaxTransferSize;
+} __attribute__((__packed__));
+
+#define UVC_DT_HEADER 1
+#define UVC_DT_INPUT_TERMINAL 2
+#define UVC_DT_OUTPUT_TERMINAL 3
+#define UVC_DT_SELECTOR_UNIT 4
+#define UVC_DT_PROCESSING_UNIT 5
+#define UVC_DT_EXTENSION_UNIT 6
+
+#define UVC_DT_HEADER_SIZE(n) (12+(n))
+#define UVC_DT_INPUT_TERMINAL_SIZE 8
+#define UVC_DT_OUTPUT_TERMINAL_SIZE 9
+#define UVC_DT_CAMERA_TERMINAL_SIZE(n) (15+(n))
+#define UVC_DT_SELECTOR_UNIT_SIZE(n) (6+(n))
+#define UVC_DT_PROCESSING_UNIT_SIZE(n) (9+(n))
+#define UVC_DT_EXTENSION_UNIT_SIZE(p,n) (24+(p)+(n))
+#define UVC_DT_CONTROL_ENDPOINT_SIZE 5
+
+struct uvc_input_header_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bNumFormats;
+ __u16 wTotalLength;
+ __u8 bEndpointAddress;
+ __u8 bmInfo;
+ __u8 bTerminalLink;
+ __u8 bStillCaptureMethod;
+ __u8 bTriggerSupport;
+ __u8 bTriggerUsage;
+ __u8 bControlSize;
+ __u8 bmaControls[];
+} __attribute__((__packed__));
+
+#define UVC_INPUT_HEADER_DESCRIPTOR(n, p) \
+ uvc_input_header_descriptor_##n_##p
+
+#define DECLARE_UVC_INPUT_HEADER_DESCRIPTOR(n, p) \
+struct UVC_INPUT_HEADER_DESCRIPTOR(n, p) { \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __u8 bNumFormats; \
+ __u16 wTotalLength; \
+ __u8 bEndpointAddress; \
+ __u8 bmInfo; \
+ __u8 bTerminalLink; \
+ __u8 bStillCaptureMethod; \
+ __u8 bTriggerSupport; \
+ __u8 bTriggerUsage; \
+ __u8 bControlSize; \
+ __u8 bmaControls[p][n]; \
+} __attribute__ ((packed))
+
+struct uvc_output_header_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bNumFormats;
+ __u16 wTotalLength;
+ __u8 bEndpointAddress;
+ __u8 bTerminalLink;
+ __u8 bControlSize;
+ __u8 bmaControls[];
+} __attribute__((__packed__));
+
+#define UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) \
+ uvc_output_header_descriptor_##n_##p
+
+#define DECLARE_UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) \
+struct UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) { \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __u8 bNumFormats; \
+ __u16 wTotalLength; \
+ __u8 bEndpointAddress; \
+ __u8 bTerminalLink; \
+ __u8 bControlSize; \
+ __u8 bmaControls[p][n]; \
+} __attribute__ ((packed))
+
+struct uvc_format_uncompressed {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bFormatIndex;
+ __u8 bNumFrameDescriptors;
+ __u8 guidFormat[16];
+ __u8 bBitsPerPixel;
+ __u8 bDefaultFrameIndex;
+ __u8 bAspectRatioX;
+ __u8 bAspectRatioY;
+ __u8 bmInterfaceFlags;
+ __u8 bCopyProtect;
+} __attribute__((__packed__));
+
+struct uvc_frame_uncompressed {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bFrameIndex;
+ __u8 bmCapabilities;
+ __u16 wWidth;
+ __u16 wHeight;
+ __u32 dwMinBitRate;
+ __u32 dwMaxBitRate;
+ __u32 dwMaxVideoFrameBufferSize;
+ __u32 dwDefaultFrameInterval;
+ __u8 bFrameIntervalType;
+ __u32 dwFrameInterval[];
+} __attribute__((__packed__));
+
+#define UVC_FRAME_UNCOMPRESSED(n) \
+ uvc_frame_uncompressed_##n
+
+#define DECLARE_UVC_FRAME_UNCOMPRESSED(n) \
+struct UVC_FRAME_UNCOMPRESSED(n) { \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __u8 bFrameIndex; \
+ __u8 bmCapabilities; \
+ __u16 wWidth; \
+ __u16 wHeight; \
+ __u32 dwMinBitRate; \
+ __u32 dwMaxBitRate; \
+ __u32 dwMaxVideoFrameBufferSize; \
+ __u32 dwDefaultFrameInterval; \
+ __u8 bFrameIntervalType; \
+ __u32 dwFrameInterval[n]; \
+} __attribute__ ((packed))
+
+struct uvc_format_mjpeg {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bFormatIndex;
+ __u8 bNumFrameDescriptors;
+ __u8 bmFlags;
+ __u8 bDefaultFrameIndex;
+ __u8 bAspectRatioX;
+ __u8 bAspectRatioY;
+ __u8 bmInterfaceFlags;
+ __u8 bCopyProtect;
+} __attribute__((__packed__));
+
+struct uvc_frame_mjpeg {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bFrameIndex;
+ __u8 bmCapabilities;
+ __u16 wWidth;
+ __u16 wHeight;
+ __u32 dwMinBitRate;
+ __u32 dwMaxBitRate;
+ __u32 dwMaxVideoFrameBufferSize;
+ __u32 dwDefaultFrameInterval;
+ __u8 bFrameIntervalType;
+ __u32 dwFrameInterval[];
+} __attribute__((__packed__));
+
+#define UVC_FRAME_MJPEG(n) \
+ uvc_frame_mjpeg_##n
+
+#define DECLARE_UVC_FRAME_MJPEG(n) \
+struct UVC_FRAME_MJPEG(n) { \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __u8 bFrameIndex; \
+ __u8 bmCapabilities; \
+ __u16 wWidth; \
+ __u16 wHeight; \
+ __u32 dwMinBitRate; \
+ __u32 dwMaxBitRate; \
+ __u32 dwMaxVideoFrameBufferSize; \
+ __u32 dwDefaultFrameInterval; \
+ __u8 bFrameIntervalType; \
+ __u32 dwFrameInterval[n]; \
+} __attribute__ ((packed))
+
+struct uvc_color_matching_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bColorPrimaries;
+ __u8 bTransferCharacteristics;
+ __u8 bMatrixCoefficients;
+} __attribute__((__packed__));
+
+#define UVC_DT_INPUT_HEADER 1
+#define UVC_DT_OUTPUT_HEADER 2
+#define UVC_DT_FORMAT_UNCOMPRESSED 4
+#define UVC_DT_FRAME_UNCOMPRESSED 5
+#define UVC_DT_FORMAT_MJPEG 6
+#define UVC_DT_FRAME_MJPEG 7
+#define UVC_DT_COLOR_MATCHING 13
+
+#define UVC_DT_INPUT_HEADER_SIZE(n, p) (13+(n*p))
+#define UVC_DT_OUTPUT_HEADER_SIZE(n, p) (9+(n*p))
+#define UVC_DT_FORMAT_UNCOMPRESSED_SIZE 27
+#define UVC_DT_FRAME_UNCOMPRESSED_SIZE(n) (26+4*(n))
+#define UVC_DT_FORMAT_MJPEG_SIZE 11
+#define UVC_DT_FRAME_MJPEG_SIZE(n) (26+4*(n))
+#define UVC_DT_COLOR_MATCHING_SIZE 6
+
+extern int uvc_bind_config(struct usb_configuration *c,
+ const struct uvc_descriptor_header * const *control,
+ const struct uvc_descriptor_header * const *fs_streaming,
+ const struct uvc_descriptor_header * const *hs_streaming);
+
+#endif /* _F_UVC_H_ */
+
diff --git a/drivers/usb/gadget/fsl_mx3_udc.c b/drivers/usb/gadget/fsl_mxc_udc.c
index 20a802ecaa1..d0b8bde59e5 100644
--- a/drivers/usb/gadget/fsl_mx3_udc.c
+++ b/drivers/usb/gadget/fsl_mxc_udc.c
@@ -50,12 +50,14 @@ int fsl_udc_clk_init(struct platform_device *pdev)
goto egusb;
}
- freq = clk_get_rate(mxc_usb_clk);
- if (pdata->phy_mode != FSL_USB2_PHY_ULPI &&
- (freq < 59999000 || freq > 60001000)) {
- dev_err(&pdev->dev, "USB_CLK=%lu, should be 60MHz\n", freq);
- ret = -EINVAL;
- goto eclkrate;
+ if (!cpu_is_mx51()) {
+ freq = clk_get_rate(mxc_usb_clk);
+ if (pdata->phy_mode != FSL_USB2_PHY_ULPI &&
+ (freq < 59999000 || freq > 60001000)) {
+ dev_err(&pdev->dev, "USB_CLK=%lu, should be 60MHz\n", freq);
+ ret = -EINVAL;
+ goto eclkrate;
+ }
}
ret = clk_enable(mxc_usb_clk);
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 3537d51073b..2928523268b 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -2768,8 +2768,11 @@ static const struct of_device_id qe_udc_match[] __devinitconst = {
MODULE_DEVICE_TABLE(of, qe_udc_match);
static struct of_platform_driver udc_driver = {
- .name = (char *)driver_name,
- .match_table = qe_udc_match,
+ .driver = {
+ .name = (char *)driver_name,
+ .owner = THIS_MODULE,
+ .of_match_table = qe_udc_match,
+ },
.probe = qe_udc_probe,
.remove = __devexit_p(qe_udc_remove),
#ifdef CONFIG_PM
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index fa3d142ba64..08a9a62a39e 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -489,7 +489,7 @@ static int fsl_ep_enable(struct usb_ep *_ep,
case USB_ENDPOINT_XFER_ISOC:
/* Calculate transactions needed for high bandwidth iso */
mult = (unsigned char)(1 + ((max >> 11) & 0x03));
- max = max & 0x8ff; /* bit 0~10 */
+ max = max & 0x7ff; /* bit 0~10 */
/* 3 transactions at most */
if (mult > 3)
goto en_done;
diff --git a/drivers/usb/gadget/g_ffs.c b/drivers/usb/gadget/g_ffs.c
new file mode 100644
index 00000000000..4b0e4a040d6
--- /dev/null
+++ b/drivers/usb/gadget/g_ffs.c
@@ -0,0 +1,426 @@
+#include <linux/module.h>
+#include <linux/utsname.h>
+
+
+/*
+ * kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module. So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+#if defined CONFIG_USB_FUNCTIONFS_ETH || defined CONFIG_USB_FUNCTIONFS_RNDIS
+# if defined USB_ETH_RNDIS
+# undef USB_ETH_RNDIS
+# endif
+# ifdef CONFIG_USB_FUNCTIONFS_RNDIS
+# define USB_ETH_RNDIS y
+# endif
+
+# include "f_ecm.c"
+# include "f_subset.c"
+# ifdef USB_ETH_RNDIS
+# include "f_rndis.c"
+# include "rndis.c"
+# endif
+# include "u_ether.c"
+
+static u8 gfs_hostaddr[ETH_ALEN];
+#else
+# if !defined CONFIG_USB_FUNCTIONFS_GENERIC
+# define CONFIG_USB_FUNCTIONFS_GENERIC
+# endif
+# define gether_cleanup() do { } while (0)
+# define gether_setup(gadget, hostaddr) ((int)0)
+#endif
+
+#include "f_fs.c"
+
+
+#define DRIVER_NAME "g_ffs"
+#define DRIVER_DESC "USB Function Filesystem"
+#define DRIVER_VERSION "24 Aug 2004"
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Michal Nazarewicz");
+MODULE_LICENSE("GPL");
+
+
+static unsigned short gfs_vendor_id = 0x0525; /* XXX NetChip */
+static unsigned short gfs_product_id = 0xa4ac; /* XXX */
+
+static struct usb_device_descriptor gfs_dev_desc = {
+ .bLength = sizeof gfs_dev_desc,
+ .bDescriptorType = USB_DT_DEVICE,
+
+ .bcdUSB = cpu_to_le16(0x0200),
+ .bDeviceClass = USB_CLASS_PER_INTERFACE,
+
+ /* Vendor and product id can be overridden by module parameters. */
+ /* .idVendor = cpu_to_le16(gfs_vendor_id), */
+ /* .idProduct = cpu_to_le16(gfs_product_id), */
+ /* .bcdDevice = f(hardware) */
+ /* .iManufacturer = DYNAMIC */
+ /* .iProduct = DYNAMIC */
+ /* NO SERIAL NUMBER */
+ .bNumConfigurations = 1,
+};
+
+#define GFS_MODULE_PARAM_DESC(name, field) \
+ MODULE_PARM_DESC(name, "Value of the " #field " field of the device descriptor sent to the host. Takes effect only prior to the user-space driver registering to the FunctionFS.")
+
+module_param_named(usb_class, gfs_dev_desc.bDeviceClass, byte, 0644);
+GFS_MODULE_PARAM_DESC(usb_class, bDeviceClass);
+module_param_named(usb_subclass, gfs_dev_desc.bDeviceSubClass, byte, 0644);
+GFS_MODULE_PARAM_DESC(usb_subclass, bDeviceSubClass);
+module_param_named(usb_protocol, gfs_dev_desc.bDeviceProtocol, byte, 0644);
+GFS_MODULE_PARAM_DESC(usb_protocol, bDeviceProtocol);
+module_param_named(usb_vendor, gfs_vendor_id, ushort, 0644);
+GFS_MODULE_PARAM_DESC(usb_vendor, idVendor);
+module_param_named(usb_product, gfs_product_id, ushort, 0644);
+GFS_MODULE_PARAM_DESC(usb_product, idProduct);
+
+
+
+static const struct usb_descriptor_header *gfs_otg_desc[] = {
+ (const struct usb_descriptor_header *)
+ &(const struct usb_otg_descriptor) {
+ .bLength = sizeof(struct usb_otg_descriptor),
+ .bDescriptorType = USB_DT_OTG,
+
+ /* REVISIT SRP-only hardware is possible, although
+ * it would not be called "OTG" ... */
+ .bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
+ },
+
+ NULL
+};
+
+/* string IDs are assigned dynamically */
+
+enum {
+ GFS_STRING_MANUFACTURER_IDX,
+ GFS_STRING_PRODUCT_IDX,
+#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
+ GFS_STRING_RNDIS_CONFIG_IDX,
+#endif
+#ifdef CONFIG_USB_FUNCTIONFS_ETH
+ GFS_STRING_ECM_CONFIG_IDX,
+#endif
+#ifdef CONFIG_USB_FUNCTIONFS_GENERIC
+ GFS_STRING_GENERIC_CONFIG_IDX,
+#endif
+};
+
+static char gfs_manufacturer[50];
+static const char gfs_driver_desc[] = DRIVER_DESC;
+static const char gfs_short_name[] = DRIVER_NAME;
+
+static struct usb_string gfs_strings[] = {
+ [GFS_STRING_MANUFACTURER_IDX].s = gfs_manufacturer,
+ [GFS_STRING_PRODUCT_IDX].s = gfs_driver_desc,
+#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
+ [GFS_STRING_RNDIS_CONFIG_IDX].s = "FunctionFS + RNDIS",
+#endif
+#ifdef CONFIG_USB_FUNCTIONFS_ETH
+ [GFS_STRING_ECM_CONFIG_IDX].s = "FunctionFS + ECM",
+#endif
+#ifdef CONFIG_USB_FUNCTIONFS_GENERIC
+ [GFS_STRING_GENERIC_CONFIG_IDX].s = "FunctionFS",
+#endif
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings *gfs_dev_strings[] = {
+ &(struct usb_gadget_strings) {
+ .language = 0x0409, /* en-us */
+ .strings = gfs_strings,
+ },
+ NULL,
+};
+
+
+#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
+static int gfs_do_rndis_config(struct usb_configuration *c);
+
+static struct usb_configuration gfs_rndis_config_driver = {
+ .label = "FunctionFS + RNDIS",
+ .bind = gfs_do_rndis_config,
+ .bConfigurationValue = 1,
+ /* .iConfiguration = DYNAMIC */
+ .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
+};
+# define gfs_add_rndis_config(cdev) \
+ usb_add_config(cdev, &gfs_rndis_config_driver)
+#else
+# define gfs_add_rndis_config(cdev) 0
+#endif
+
+
+#ifdef CONFIG_USB_FUNCTIONFS_ETH
+static int gfs_do_ecm_config(struct usb_configuration *c);
+
+static struct usb_configuration gfs_ecm_config_driver = {
+ .label = "FunctionFS + ECM",
+ .bind = gfs_do_ecm_config,
+ .bConfigurationValue = 1,
+ /* .iConfiguration = DYNAMIC */
+ .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
+};
+# define gfs_add_ecm_config(cdev) \
+ usb_add_config(cdev, &gfs_ecm_config_driver)
+#else
+# define gfs_add_ecm_config(cdev) 0
+#endif
+
+
+#ifdef CONFIG_USB_FUNCTIONFS_GENERIC
+static int gfs_do_generic_config(struct usb_configuration *c);
+
+static struct usb_configuration gfs_generic_config_driver = {
+ .label = "FunctionFS",
+ .bind = gfs_do_generic_config,
+ .bConfigurationValue = 2,
+ /* .iConfiguration = DYNAMIC */
+ .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
+};
+# define gfs_add_generic_config(cdev) \
+ usb_add_config(cdev, &gfs_generic_config_driver)
+#else
+# define gfs_add_generic_config(cdev) 0
+#endif
+
+
+static int gfs_bind(struct usb_composite_dev *cdev);
+static int gfs_unbind(struct usb_composite_dev *cdev);
+
+static struct usb_composite_driver gfs_driver = {
+ .name = gfs_short_name,
+ .dev = &gfs_dev_desc,
+ .strings = gfs_dev_strings,
+ .bind = gfs_bind,
+ .unbind = gfs_unbind,
+};
+
+
+static struct ffs_data *gfs_ffs_data;
+static unsigned long gfs_registered;
+
+
+static int gfs_init(void)
+{
+ ENTER();
+
+ return functionfs_init();
+}
+module_init(gfs_init);
+
+static void gfs_exit(void)
+{
+ ENTER();
+
+ if (test_and_clear_bit(0, &gfs_registered))
+ usb_composite_unregister(&gfs_driver);
+
+ functionfs_cleanup();
+}
+module_exit(gfs_exit);
+
+
+static int functionfs_ready_callback(struct ffs_data *ffs)
+{
+ int ret;
+
+ ENTER();
+
+ if (WARN_ON(test_and_set_bit(0, &gfs_registered)))
+ return -EBUSY;
+
+ gfs_ffs_data = ffs;
+ ret = usb_composite_register(&gfs_driver);
+ if (unlikely(ret < 0))
+ clear_bit(0, &gfs_registered);
+ return ret;
+}
+
+static void functionfs_closed_callback(struct ffs_data *ffs)
+{
+ ENTER();
+
+ if (test_and_clear_bit(0, &gfs_registered))
+ usb_composite_unregister(&gfs_driver);
+}
+
+
+static int functionfs_check_dev_callback(const char *dev_name)
+{
+ return 0;
+}
+
+
+
+static int gfs_bind(struct usb_composite_dev *cdev)
+{
+ int ret;
+
+ ENTER();
+
+ if (WARN_ON(!gfs_ffs_data))
+ return -ENODEV;
+
+ ret = gether_setup(cdev->gadget, gfs_hostaddr);
+ if (unlikely(ret < 0))
+ goto error_quick;
+
+ gfs_dev_desc.idVendor = cpu_to_le16(gfs_vendor_id);
+ gfs_dev_desc.idProduct = cpu_to_le16(gfs_product_id);
+
+ snprintf(gfs_manufacturer, sizeof gfs_manufacturer, "%s %s with %s",
+ init_utsname()->sysname, init_utsname()->release,
+ cdev->gadget->name);
+ ret = usb_string_id(cdev);
+ if (unlikely(ret < 0))
+ goto error;
+ gfs_strings[GFS_STRING_MANUFACTURER_IDX].id = ret;
+ gfs_dev_desc.iManufacturer = ret;
+
+ ret = usb_string_id(cdev);
+ if (unlikely(ret < 0))
+ goto error;
+ gfs_strings[GFS_STRING_PRODUCT_IDX].id = ret;
+ gfs_dev_desc.iProduct = ret;
+
+#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
+ ret = usb_string_id(cdev);
+ if (unlikely(ret < 0))
+ goto error;
+ gfs_strings[GFS_STRING_RNDIS_CONFIG_IDX].id = ret;
+ gfs_rndis_config_driver.iConfiguration = ret;
+#endif
+
+#ifdef CONFIG_USB_FUNCTIONFS_ETH
+ ret = usb_string_id(cdev);
+ if (unlikely(ret < 0))
+ goto error;
+ gfs_strings[GFS_STRING_ECM_CONFIG_IDX].id = ret;
+ gfs_ecm_config_driver.iConfiguration = ret;
+#endif
+
+#ifdef CONFIG_USB_FUNCTIONFS_GENERIC
+ ret = usb_string_id(cdev);
+ if (unlikely(ret < 0))
+ goto error;
+ gfs_strings[GFS_STRING_GENERIC_CONFIG_IDX].id = ret;
+ gfs_generic_config_driver.iConfiguration = ret;
+#endif
+
+ ret = functionfs_bind(gfs_ffs_data, cdev);
+ if (unlikely(ret < 0))
+ goto error;
+
+ ret = gfs_add_rndis_config(cdev);
+ if (unlikely(ret < 0))
+ goto error_unbind;
+
+ ret = gfs_add_ecm_config(cdev);
+ if (unlikely(ret < 0))
+ goto error_unbind;
+
+ ret = gfs_add_generic_config(cdev);
+ if (unlikely(ret < 0))
+ goto error_unbind;
+
+ return 0;
+
+error_unbind:
+ functionfs_unbind(gfs_ffs_data);
+error:
+ gether_cleanup();
+error_quick:
+ gfs_ffs_data = NULL;
+ return ret;
+}
+
+static int gfs_unbind(struct usb_composite_dev *cdev)
+{
+ ENTER();
+
+ /* We may have been called in an error recovery frem
+ * composite_bind() after gfs_unbind() failure so we need to
+ * check if gfs_ffs_data is not NULL since gfs_bind() handles
+ * all error recovery itself. I'd rather we werent called
+ * from composite on orror recovery, but what you're gonna
+ * do...? */
+
+ if (gfs_ffs_data) {
+ gether_cleanup();
+ functionfs_unbind(gfs_ffs_data);
+ gfs_ffs_data = NULL;
+ }
+
+ return 0;
+}
+
+
+static int __gfs_do_config(struct usb_configuration *c,
+ int (*eth)(struct usb_configuration *c, u8 *ethaddr),
+ u8 *ethaddr)
+{
+ int ret;
+
+ if (WARN_ON(!gfs_ffs_data))
+ return -ENODEV;
+
+ if (gadget_is_otg(c->cdev->gadget)) {
+ c->descriptors = gfs_otg_desc;
+ c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+ }
+
+ if (eth) {
+ ret = eth(c, ethaddr);
+ if (unlikely(ret < 0))
+ return ret;
+ }
+
+ ret = functionfs_add(c->cdev, c, gfs_ffs_data);
+ if (unlikely(ret < 0))
+ return ret;
+
+ return 0;
+}
+
+#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
+static int gfs_do_rndis_config(struct usb_configuration *c)
+{
+ ENTER();
+
+ return __gfs_do_config(c, rndis_bind_config, gfs_hostaddr);
+}
+#endif
+
+#ifdef CONFIG_USB_FUNCTIONFS_ETH
+static int gfs_do_ecm_config(struct usb_configuration *c)
+{
+ ENTER();
+
+ return __gfs_do_config(c,
+ can_support_ecm(c->cdev->gadget)
+ ? ecm_bind_config : geth_bind_config,
+ gfs_hostaddr);
+}
+#endif
+
+#ifdef CONFIG_USB_FUNCTIONFS_GENERIC
+static int gfs_do_generic_config(struct usb_configuration *c)
+{
+ ENTER();
+
+ return __gfs_do_config(c, NULL, NULL);
+}
+#endif
diff --git a/drivers/usb/gadget/hid.c b/drivers/usb/gadget/hid.c
new file mode 100644
index 00000000000..775722686ed
--- /dev/null
+++ b/drivers/usb/gadget/hid.c
@@ -0,0 +1,298 @@
+/*
+ * hid.c -- HID Composite driver
+ *
+ * Based on multi.c
+ *
+ * Copyright (C) 2010 Fabien Chouteau <fabien.chouteau@barco.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+
+#define DRIVER_DESC "HID Gadget"
+#define DRIVER_VERSION "2010/03/16"
+
+/*-------------------------------------------------------------------------*/
+
+#define HIDG_VENDOR_NUM 0x0525 /* XXX NetChip */
+#define HIDG_PRODUCT_NUM 0xa4ac /* Linux-USB HID gadget */
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module. So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+#include "f_hid.c"
+
+
+struct hidg_func_node {
+ struct list_head node;
+ struct hidg_func_descriptor *func;
+};
+
+static LIST_HEAD(hidg_func_list);
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_device_descriptor device_desc = {
+ .bLength = sizeof device_desc,
+ .bDescriptorType = USB_DT_DEVICE,
+
+ .bcdUSB = cpu_to_le16(0x0200),
+
+ /* .bDeviceClass = USB_CLASS_COMM, */
+ /* .bDeviceSubClass = 0, */
+ /* .bDeviceProtocol = 0, */
+ .bDeviceClass = 0xEF,
+ .bDeviceSubClass = 2,
+ .bDeviceProtocol = 1,
+ /* .bMaxPacketSize0 = f(hardware) */
+
+ /* Vendor and product id can be overridden by module parameters. */
+ .idVendor = cpu_to_le16(HIDG_VENDOR_NUM),
+ .idProduct = cpu_to_le16(HIDG_PRODUCT_NUM),
+ /* .bcdDevice = f(hardware) */
+ /* .iManufacturer = DYNAMIC */
+ /* .iProduct = DYNAMIC */
+ /* NO SERIAL NUMBER */
+ .bNumConfigurations = 1,
+};
+
+static struct usb_otg_descriptor otg_descriptor = {
+ .bLength = sizeof otg_descriptor,
+ .bDescriptorType = USB_DT_OTG,
+
+ /* REVISIT SRP-only hardware is possible, although
+ * it would not be called "OTG" ...
+ */
+ .bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
+};
+
+static const struct usb_descriptor_header *otg_desc[] = {
+ (struct usb_descriptor_header *) &otg_descriptor,
+ NULL,
+};
+
+
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX 0
+#define STRING_PRODUCT_IDX 1
+
+static char manufacturer[50];
+
+static struct usb_string strings_dev[] = {
+ [STRING_MANUFACTURER_IDX].s = manufacturer,
+ [STRING_PRODUCT_IDX].s = DRIVER_DESC,
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+ .language = 0x0409, /* en-us */
+ .strings = strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+ &stringtab_dev,
+ NULL,
+};
+
+
+
+/****************************** Configurations ******************************/
+
+static int __init do_config(struct usb_configuration *c)
+{
+ struct hidg_func_node *e;
+ int func = 0, status = 0;
+
+ if (gadget_is_otg(c->cdev->gadget)) {
+ c->descriptors = otg_desc;
+ c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+ }
+
+ list_for_each_entry(e, &hidg_func_list, node) {
+ status = hidg_bind_config(c, e->func, func++);
+ if (status)
+ break;
+ }
+
+ return status;
+}
+
+static struct usb_configuration config_driver = {
+ .label = "HID Gadget",
+ .bind = do_config,
+ .bConfigurationValue = 1,
+ /* .iConfiguration = DYNAMIC */
+ .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
+};
+
+/****************************** Gadget Bind ******************************/
+
+static int __init hid_bind(struct usb_composite_dev *cdev)
+{
+ struct usb_gadget *gadget = cdev->gadget;
+ struct list_head *tmp;
+ int status, gcnum, funcs = 0;
+
+ list_for_each(tmp, &hidg_func_list)
+ funcs++;
+
+ if (!funcs)
+ return -ENODEV;
+
+ /* set up HID */
+ status = ghid_setup(cdev->gadget, funcs);
+ if (status < 0)
+ return status;
+
+ gcnum = usb_gadget_controller_number(gadget);
+ if (gcnum >= 0)
+ device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum);
+ else
+ device_desc.bcdDevice = cpu_to_le16(0x0300 | 0x0099);
+
+
+ /* Allocate string descriptor numbers ... note that string
+ * contents can be overridden by the composite_dev glue.
+ */
+
+ /* device descriptor strings: manufacturer, product */
+ snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
+ init_utsname()->sysname, init_utsname()->release,
+ gadget->name);
+ status = usb_string_id(cdev);
+ if (status < 0)
+ return status;
+ strings_dev[STRING_MANUFACTURER_IDX].id = status;
+ device_desc.iManufacturer = status;
+
+ status = usb_string_id(cdev);
+ if (status < 0)
+ return status;
+ strings_dev[STRING_PRODUCT_IDX].id = status;
+ device_desc.iProduct = status;
+
+ /* register our configuration */
+ status = usb_add_config(cdev, &config_driver);
+ if (status < 0)
+ return status;
+
+ dev_info(&gadget->dev, DRIVER_DESC ", version: " DRIVER_VERSION "\n");
+
+ return 0;
+}
+
+static int __exit hid_unbind(struct usb_composite_dev *cdev)
+{
+ ghid_cleanup();
+ return 0;
+}
+
+static int __init hidg_plat_driver_probe(struct platform_device *pdev)
+{
+ struct hidg_func_descriptor *func = pdev->dev.platform_data;
+ struct hidg_func_node *entry;
+
+ if (!func) {
+ dev_err(&pdev->dev, "Platform data missing\n");
+ return -ENODEV;
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->func = func;
+ list_add_tail(&entry->node, &hidg_func_list);
+
+ return 0;
+}
+
+static int __devexit hidg_plat_driver_remove(struct platform_device *pdev)
+{
+ struct hidg_func_node *e, *n;
+
+ list_for_each_entry_safe(e, n, &hidg_func_list, node) {
+ list_del(&e->node);
+ kfree(e);
+ }
+
+ return 0;
+}
+
+
+/****************************** Some noise ******************************/
+
+
+static struct usb_composite_driver hidg_driver = {
+ .name = "g_hid",
+ .dev = &device_desc,
+ .strings = dev_strings,
+ .bind = hid_bind,
+ .unbind = __exit_p(hid_unbind),
+};
+
+static struct platform_driver hidg_plat_driver = {
+ .remove = __devexit_p(hidg_plat_driver_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "hidg",
+ },
+};
+
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Fabien Chouteau, Peter Korsgaard");
+MODULE_LICENSE("GPL");
+
+static int __init hidg_init(void)
+{
+ int status;
+
+ status = platform_driver_probe(&hidg_plat_driver,
+ hidg_plat_driver_probe);
+ if (status < 0)
+ return status;
+
+ status = usb_composite_register(&hidg_driver);
+ if (status < 0)
+ platform_driver_unregister(&hidg_plat_driver);
+
+ return status;
+}
+module_init(hidg_init);
+
+static void __exit hidg_cleanup(void)
+{
+ platform_driver_unregister(&hidg_plat_driver);
+ usb_composite_unregister(&hidg_driver);
+}
+module_exit(hidg_cleanup);
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 6b8bf8c781c..43abf55d8c6 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -794,7 +794,7 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
}
static int
-printer_fsync(struct file *fd, struct dentry *dentry, int datasync)
+printer_fsync(struct file *fd, int datasync)
{
struct printer_dev *dev = fd->private_data;
unsigned long flags;
diff --git a/drivers/usb/gadget/pxa27x_udc.h b/drivers/usb/gadget/pxa27x_udc.h
index ff61e4866e8..cd16231d8c7 100644
--- a/drivers/usb/gadget/pxa27x_udc.h
+++ b/drivers/usb/gadget/pxa27x_udc.h
@@ -360,7 +360,7 @@ struct pxa_ep {
* Specific pxa endpoint data, needed for hardware initialization
*/
unsigned dir_in:1;
- unsigned addr:3;
+ unsigned addr:4;
unsigned config:2;
unsigned interface:3;
unsigned alternate:3;
diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c
index 868d8ee8675..04c462ff0ea 100644
--- a/drivers/usb/gadget/storage_common.c
+++ b/drivers/usb/gadget/storage_common.c
@@ -654,7 +654,7 @@ static int fsg_lun_fsync_sub(struct fsg_lun *curlun)
if (curlun->ro || !filp)
return 0;
- return vfs_fsync(filp, filp->f_path.dentry, 1);
+ return vfs_fsync(filp, 1);
}
static void store_cdrom_address(u8 *dest, int msf, u32 addr)
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 07f4178ad17..1da755a1c85 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -715,7 +715,7 @@ static u8 __init nibble(unsigned char c)
return 0;
}
-static int __init get_ether_addr(const char *str, u8 *dev_addr)
+static int get_ether_addr(const char *str, u8 *dev_addr)
{
if (str) {
unsigned i;
@@ -764,7 +764,7 @@ static struct device_type gadget_type = {
*
* Returns negative errno, or zero on success
*/
-int __init gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
+int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
{
struct eth_dev *dev;
struct net_device *net;
diff --git a/drivers/usb/gadget/uvc.h b/drivers/usb/gadget/uvc.h
new file mode 100644
index 00000000000..0a705e63c93
--- /dev/null
+++ b/drivers/usb/gadget/uvc.h
@@ -0,0 +1,241 @@
+/*
+ * uvc_gadget.h -- USB Video Class Gadget driver
+ *
+ * Copyright (C) 2009-2010
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#ifndef _UVC_GADGET_H_
+#define _UVC_GADGET_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <linux/usb/ch9.h>
+
+#define UVC_EVENT_FIRST (V4L2_EVENT_PRIVATE_START + 0)
+#define UVC_EVENT_CONNECT (V4L2_EVENT_PRIVATE_START + 0)
+#define UVC_EVENT_DISCONNECT (V4L2_EVENT_PRIVATE_START + 1)
+#define UVC_EVENT_STREAMON (V4L2_EVENT_PRIVATE_START + 2)
+#define UVC_EVENT_STREAMOFF (V4L2_EVENT_PRIVATE_START + 3)
+#define UVC_EVENT_SETUP (V4L2_EVENT_PRIVATE_START + 4)
+#define UVC_EVENT_DATA (V4L2_EVENT_PRIVATE_START + 5)
+#define UVC_EVENT_LAST (V4L2_EVENT_PRIVATE_START + 5)
+
+struct uvc_request_data
+{
+ unsigned int length;
+ __u8 data[60];
+};
+
+struct uvc_event
+{
+ union {
+ enum usb_device_speed speed;
+ struct usb_ctrlrequest req;
+ struct uvc_request_data data;
+ };
+};
+
+#define UVCIOC_SEND_RESPONSE _IOW('U', 1, struct uvc_request_data)
+
+#define UVC_INTF_CONTROL 0
+#define UVC_INTF_STREAMING 1
+
+/* ------------------------------------------------------------------------
+ * UVC constants & structures
+ */
+
+/* Values for bmHeaderInfo (Video and Still Image Payload Headers, 2.4.3.3) */
+#define UVC_STREAM_EOH (1 << 7)
+#define UVC_STREAM_ERR (1 << 6)
+#define UVC_STREAM_STI (1 << 5)
+#define UVC_STREAM_RES (1 << 4)
+#define UVC_STREAM_SCR (1 << 3)
+#define UVC_STREAM_PTS (1 << 2)
+#define UVC_STREAM_EOF (1 << 1)
+#define UVC_STREAM_FID (1 << 0)
+
+struct uvc_streaming_control {
+ __u16 bmHint;
+ __u8 bFormatIndex;
+ __u8 bFrameIndex;
+ __u32 dwFrameInterval;
+ __u16 wKeyFrameRate;
+ __u16 wPFrameRate;
+ __u16 wCompQuality;
+ __u16 wCompWindowSize;
+ __u16 wDelay;
+ __u32 dwMaxVideoFrameSize;
+ __u32 dwMaxPayloadTransferSize;
+ __u32 dwClockFrequency;
+ __u8 bmFramingInfo;
+ __u8 bPreferedVersion;
+ __u8 bMinVersion;
+ __u8 bMaxVersion;
+} __attribute__((__packed__));
+
+/* ------------------------------------------------------------------------
+ * Debugging, printing and logging
+ */
+
+#ifdef __KERNEL__
+
+#include <linux/usb.h> /* For usb_endpoint_* */
+#include <linux/usb/gadget.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-fh.h>
+
+#include "uvc_queue.h"
+
+#define UVC_TRACE_PROBE (1 << 0)
+#define UVC_TRACE_DESCR (1 << 1)
+#define UVC_TRACE_CONTROL (1 << 2)
+#define UVC_TRACE_FORMAT (1 << 3)
+#define UVC_TRACE_CAPTURE (1 << 4)
+#define UVC_TRACE_CALLS (1 << 5)
+#define UVC_TRACE_IOCTL (1 << 6)
+#define UVC_TRACE_FRAME (1 << 7)
+#define UVC_TRACE_SUSPEND (1 << 8)
+#define UVC_TRACE_STATUS (1 << 9)
+
+#define UVC_WARN_MINMAX 0
+#define UVC_WARN_PROBE_DEF 1
+
+extern unsigned int uvc_trace_param;
+
+#define uvc_trace(flag, msg...) \
+ do { \
+ if (uvc_trace_param & flag) \
+ printk(KERN_DEBUG "uvcvideo: " msg); \
+ } while (0)
+
+#define uvc_warn_once(dev, warn, msg...) \
+ do { \
+ if (!test_and_set_bit(warn, &dev->warnings)) \
+ printk(KERN_INFO "uvcvideo: " msg); \
+ } while (0)
+
+#define uvc_printk(level, msg...) \
+ printk(level "uvcvideo: " msg)
+
+/* ------------------------------------------------------------------------
+ * Driver specific constants
+ */
+
+#define DRIVER_VERSION "0.1.0"
+#define DRIVER_VERSION_NUMBER KERNEL_VERSION(0, 1, 0)
+
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+
+#define UVC_NUM_REQUESTS 4
+#define UVC_MAX_REQUEST_SIZE 64
+#define UVC_MAX_EVENTS 4
+
+#define USB_DT_INTERFACE_ASSOCIATION_SIZE 8
+#define USB_CLASS_MISC 0xef
+
+/* ------------------------------------------------------------------------
+ * Structures
+ */
+
+struct uvc_video
+{
+ struct usb_ep *ep;
+
+ /* Frame parameters */
+ u8 bpp;
+ u32 fcc;
+ unsigned int width;
+ unsigned int height;
+ unsigned int imagesize;
+
+ /* Requests */
+ unsigned int req_size;
+ struct usb_request *req[UVC_NUM_REQUESTS];
+ __u8 *req_buffer[UVC_NUM_REQUESTS];
+ struct list_head req_free;
+ spinlock_t req_lock;
+
+ void (*encode) (struct usb_request *req, struct uvc_video *video,
+ struct uvc_buffer *buf);
+
+ /* Context data used by the completion handler */
+ __u32 payload_size;
+ __u32 max_payload_size;
+
+ struct uvc_video_queue queue;
+ unsigned int fid;
+};
+
+enum uvc_state
+{
+ UVC_STATE_DISCONNECTED,
+ UVC_STATE_CONNECTED,
+ UVC_STATE_STREAMING,
+};
+
+struct uvc_device
+{
+ struct video_device *vdev;
+ enum uvc_state state;
+ struct usb_function func;
+ struct uvc_video video;
+
+ /* Descriptors */
+ struct {
+ const struct uvc_descriptor_header * const *control;
+ const struct uvc_descriptor_header * const *fs_streaming;
+ const struct uvc_descriptor_header * const *hs_streaming;
+ } desc;
+
+ unsigned int control_intf;
+ struct usb_ep *control_ep;
+ struct usb_request *control_req;
+ void *control_buf;
+
+ unsigned int streaming_intf;
+
+ /* Events */
+ unsigned int event_length;
+ unsigned int event_setup_out : 1;
+};
+
+static inline struct uvc_device *to_uvc(struct usb_function *f)
+{
+ return container_of(f, struct uvc_device, func);
+}
+
+struct uvc_file_handle
+{
+ struct v4l2_fh vfh;
+ struct uvc_video *device;
+};
+
+#define to_uvc_file_handle(handle) \
+ container_of(handle, struct uvc_file_handle, vfh)
+
+extern struct v4l2_file_operations uvc_v4l2_fops;
+
+/* ------------------------------------------------------------------------
+ * Functions
+ */
+
+extern int uvc_video_enable(struct uvc_video *video, int enable);
+extern int uvc_video_init(struct uvc_video *video);
+extern int uvc_video_pump(struct uvc_video *video);
+
+extern void uvc_endpoint_stream(struct uvc_device *dev);
+
+extern void uvc_function_connect(struct uvc_device *uvc);
+extern void uvc_function_disconnect(struct uvc_device *uvc);
+
+#endif /* __KERNEL__ */
+
+#endif /* _UVC_GADGET_H_ */
+
diff --git a/drivers/usb/gadget/uvc_queue.c b/drivers/usb/gadget/uvc_queue.c
new file mode 100644
index 00000000000..43891991bf2
--- /dev/null
+++ b/drivers/usb/gadget/uvc_queue.c
@@ -0,0 +1,583 @@
+/*
+ * uvc_queue.c -- USB Video Class driver - Buffers management
+ *
+ * Copyright (C) 2005-2010
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/videodev2.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+#include <asm/atomic.h>
+
+#include "uvc.h"
+
+/* ------------------------------------------------------------------------
+ * Video buffers queue management.
+ *
+ * Video queues is initialized by uvc_queue_init(). The function performs
+ * basic initialization of the uvc_video_queue struct and never fails.
+ *
+ * Video buffer allocation and freeing are performed by uvc_alloc_buffers and
+ * uvc_free_buffers respectively. The former acquires the video queue lock,
+ * while the later must be called with the lock held (so that allocation can
+ * free previously allocated buffers). Trying to free buffers that are mapped
+ * to user space will return -EBUSY.
+ *
+ * Video buffers are managed using two queues. However, unlike most USB video
+ * drivers that use an in queue and an out queue, we use a main queue to hold
+ * all queued buffers (both 'empty' and 'done' buffers), and an irq queue to
+ * hold empty buffers. This design (copied from video-buf) minimizes locking
+ * in interrupt, as only one queue is shared between interrupt and user
+ * contexts.
+ *
+ * Use cases
+ * ---------
+ *
+ * Unless stated otherwise, all operations that modify the irq buffers queue
+ * are protected by the irq spinlock.
+ *
+ * 1. The user queues the buffers, starts streaming and dequeues a buffer.
+ *
+ * The buffers are added to the main and irq queues. Both operations are
+ * protected by the queue lock, and the later is protected by the irq
+ * spinlock as well.
+ *
+ * The completion handler fetches a buffer from the irq queue and fills it
+ * with video data. If no buffer is available (irq queue empty), the handler
+ * returns immediately.
+ *
+ * When the buffer is full, the completion handler removes it from the irq
+ * queue, marks it as ready (UVC_BUF_STATE_DONE) and wakes its wait queue.
+ * At that point, any process waiting on the buffer will be woken up. If a
+ * process tries to dequeue a buffer after it has been marked ready, the
+ * dequeing will succeed immediately.
+ *
+ * 2. Buffers are queued, user is waiting on a buffer and the device gets
+ * disconnected.
+ *
+ * When the device is disconnected, the kernel calls the completion handler
+ * with an appropriate status code. The handler marks all buffers in the
+ * irq queue as being erroneous (UVC_BUF_STATE_ERROR) and wakes them up so
+ * that any process waiting on a buffer gets woken up.
+ *
+ * Waking up up the first buffer on the irq list is not enough, as the
+ * process waiting on the buffer might restart the dequeue operation
+ * immediately.
+ *
+ */
+
+void uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type)
+{
+ mutex_init(&queue->mutex);
+ spin_lock_init(&queue->irqlock);
+ INIT_LIST_HEAD(&queue->mainqueue);
+ INIT_LIST_HEAD(&queue->irqqueue);
+ queue->type = type;
+}
+
+/*
+ * Allocate the video buffers.
+ *
+ * Pages are reserved to make sure they will not be swapped, as they will be
+ * filled in the URB completion handler.
+ *
+ * Buffers will be individually mapped, so they must all be page aligned.
+ */
+int uvc_alloc_buffers(struct uvc_video_queue *queue, unsigned int nbuffers,
+ unsigned int buflength)
+{
+ unsigned int bufsize = PAGE_ALIGN(buflength);
+ unsigned int i;
+ void *mem = NULL;
+ int ret;
+
+ if (nbuffers > UVC_MAX_VIDEO_BUFFERS)
+ nbuffers = UVC_MAX_VIDEO_BUFFERS;
+
+ mutex_lock(&queue->mutex);
+
+ if ((ret = uvc_free_buffers(queue)) < 0)
+ goto done;
+
+ /* Bail out if no buffers should be allocated. */
+ if (nbuffers == 0)
+ goto done;
+
+ /* Decrement the number of buffers until allocation succeeds. */
+ for (; nbuffers > 0; --nbuffers) {
+ mem = vmalloc_32(nbuffers * bufsize);
+ if (mem != NULL)
+ break;
+ }
+
+ if (mem == NULL) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ for (i = 0; i < nbuffers; ++i) {
+ memset(&queue->buffer[i], 0, sizeof queue->buffer[i]);
+ queue->buffer[i].buf.index = i;
+ queue->buffer[i].buf.m.offset = i * bufsize;
+ queue->buffer[i].buf.length = buflength;
+ queue->buffer[i].buf.type = queue->type;
+ queue->buffer[i].buf.sequence = 0;
+ queue->buffer[i].buf.field = V4L2_FIELD_NONE;
+ queue->buffer[i].buf.memory = V4L2_MEMORY_MMAP;
+ queue->buffer[i].buf.flags = 0;
+ init_waitqueue_head(&queue->buffer[i].wait);
+ }
+
+ queue->mem = mem;
+ queue->count = nbuffers;
+ queue->buf_size = bufsize;
+ ret = nbuffers;
+
+done:
+ mutex_unlock(&queue->mutex);
+ return ret;
+}
+
+/*
+ * Free the video buffers.
+ *
+ * This function must be called with the queue lock held.
+ */
+int uvc_free_buffers(struct uvc_video_queue *queue)
+{
+ unsigned int i;
+
+ for (i = 0; i < queue->count; ++i) {
+ if (queue->buffer[i].vma_use_count != 0)
+ return -EBUSY;
+ }
+
+ if (queue->count) {
+ vfree(queue->mem);
+ queue->count = 0;
+ }
+
+ return 0;
+}
+
+static void __uvc_query_buffer(struct uvc_buffer *buf,
+ struct v4l2_buffer *v4l2_buf)
+{
+ memcpy(v4l2_buf, &buf->buf, sizeof *v4l2_buf);
+
+ if (buf->vma_use_count)
+ v4l2_buf->flags |= V4L2_BUF_FLAG_MAPPED;
+
+ switch (buf->state) {
+ case UVC_BUF_STATE_ERROR:
+ case UVC_BUF_STATE_DONE:
+ v4l2_buf->flags |= V4L2_BUF_FLAG_DONE;
+ break;
+ case UVC_BUF_STATE_QUEUED:
+ case UVC_BUF_STATE_ACTIVE:
+ v4l2_buf->flags |= V4L2_BUF_FLAG_QUEUED;
+ break;
+ case UVC_BUF_STATE_IDLE:
+ default:
+ break;
+ }
+}
+
+int uvc_query_buffer(struct uvc_video_queue *queue,
+ struct v4l2_buffer *v4l2_buf)
+{
+ int ret = 0;
+
+ mutex_lock(&queue->mutex);
+ if (v4l2_buf->index >= queue->count) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ __uvc_query_buffer(&queue->buffer[v4l2_buf->index], v4l2_buf);
+
+done:
+ mutex_unlock(&queue->mutex);
+ return ret;
+}
+
+/*
+ * Queue a video buffer. Attempting to queue a buffer that has already been
+ * queued will return -EINVAL.
+ */
+int uvc_queue_buffer(struct uvc_video_queue *queue,
+ struct v4l2_buffer *v4l2_buf)
+{
+ struct uvc_buffer *buf;
+ unsigned long flags;
+ int ret = 0;
+
+ uvc_trace(UVC_TRACE_CAPTURE, "Queuing buffer %u.\n", v4l2_buf->index);
+
+ if (v4l2_buf->type != queue->type ||
+ v4l2_buf->memory != V4L2_MEMORY_MMAP) {
+ uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) "
+ "and/or memory (%u).\n", v4l2_buf->type,
+ v4l2_buf->memory);
+ return -EINVAL;
+ }
+
+ mutex_lock(&queue->mutex);
+ if (v4l2_buf->index >= queue->count) {
+ uvc_trace(UVC_TRACE_CAPTURE, "[E] Out of range index.\n");
+ ret = -EINVAL;
+ goto done;
+ }
+
+ buf = &queue->buffer[v4l2_buf->index];
+ if (buf->state != UVC_BUF_STATE_IDLE) {
+ uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state "
+ "(%u).\n", buf->state);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ v4l2_buf->bytesused > buf->buf.length) {
+ uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ buf->buf.bytesused = 0;
+ else
+ buf->buf.bytesused = v4l2_buf->bytesused;
+
+ spin_lock_irqsave(&queue->irqlock, flags);
+ if (queue->flags & UVC_QUEUE_DISCONNECTED) {
+ spin_unlock_irqrestore(&queue->irqlock, flags);
+ ret = -ENODEV;
+ goto done;
+ }
+ buf->state = UVC_BUF_STATE_QUEUED;
+
+ ret = (queue->flags & UVC_QUEUE_PAUSED) != 0;
+ queue->flags &= ~UVC_QUEUE_PAUSED;
+
+ list_add_tail(&buf->stream, &queue->mainqueue);
+ list_add_tail(&buf->queue, &queue->irqqueue);
+ spin_unlock_irqrestore(&queue->irqlock, flags);
+
+done:
+ mutex_unlock(&queue->mutex);
+ return ret;
+}
+
+static int uvc_queue_waiton(struct uvc_buffer *buf, int nonblocking)
+{
+ if (nonblocking) {
+ return (buf->state != UVC_BUF_STATE_QUEUED &&
+ buf->state != UVC_BUF_STATE_ACTIVE)
+ ? 0 : -EAGAIN;
+ }
+
+ return wait_event_interruptible(buf->wait,
+ buf->state != UVC_BUF_STATE_QUEUED &&
+ buf->state != UVC_BUF_STATE_ACTIVE);
+}
+
+/*
+ * Dequeue a video buffer. If nonblocking is false, block until a buffer is
+ * available.
+ */
+int uvc_dequeue_buffer(struct uvc_video_queue *queue,
+ struct v4l2_buffer *v4l2_buf, int nonblocking)
+{
+ struct uvc_buffer *buf;
+ int ret = 0;
+
+ if (v4l2_buf->type != queue->type ||
+ v4l2_buf->memory != V4L2_MEMORY_MMAP) {
+ uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) "
+ "and/or memory (%u).\n", v4l2_buf->type,
+ v4l2_buf->memory);
+ return -EINVAL;
+ }
+
+ mutex_lock(&queue->mutex);
+ if (list_empty(&queue->mainqueue)) {
+ uvc_trace(UVC_TRACE_CAPTURE, "[E] Empty buffer queue.\n");
+ ret = -EINVAL;
+ goto done;
+ }
+
+ buf = list_first_entry(&queue->mainqueue, struct uvc_buffer, stream);
+ if ((ret = uvc_queue_waiton(buf, nonblocking)) < 0)
+ goto done;
+
+ uvc_trace(UVC_TRACE_CAPTURE, "Dequeuing buffer %u (%u, %u bytes).\n",
+ buf->buf.index, buf->state, buf->buf.bytesused);
+
+ switch (buf->state) {
+ case UVC_BUF_STATE_ERROR:
+ uvc_trace(UVC_TRACE_CAPTURE, "[W] Corrupted data "
+ "(transmission error).\n");
+ ret = -EIO;
+ case UVC_BUF_STATE_DONE:
+ buf->state = UVC_BUF_STATE_IDLE;
+ break;
+
+ case UVC_BUF_STATE_IDLE:
+ case UVC_BUF_STATE_QUEUED:
+ case UVC_BUF_STATE_ACTIVE:
+ default:
+ uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state %u "
+ "(driver bug?).\n", buf->state);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ list_del(&buf->stream);
+ __uvc_query_buffer(buf, v4l2_buf);
+
+done:
+ mutex_unlock(&queue->mutex);
+ return ret;
+}
+
+/*
+ * Poll the video queue.
+ *
+ * This function implements video queue polling and is intended to be used by
+ * the device poll handler.
+ */
+unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
+ poll_table *wait)
+{
+ struct uvc_buffer *buf;
+ unsigned int mask = 0;
+
+ mutex_lock(&queue->mutex);
+ if (list_empty(&queue->mainqueue))
+ goto done;
+
+ buf = list_first_entry(&queue->mainqueue, struct uvc_buffer, stream);
+
+ poll_wait(file, &buf->wait, wait);
+ if (buf->state == UVC_BUF_STATE_DONE ||
+ buf->state == UVC_BUF_STATE_ERROR)
+ mask |= POLLOUT | POLLWRNORM;
+
+done:
+ mutex_unlock(&queue->mutex);
+ return mask;
+}
+
+/*
+ * VMA operations.
+ */
+static void uvc_vm_open(struct vm_area_struct *vma)
+{
+ struct uvc_buffer *buffer = vma->vm_private_data;
+ buffer->vma_use_count++;
+}
+
+static void uvc_vm_close(struct vm_area_struct *vma)
+{
+ struct uvc_buffer *buffer = vma->vm_private_data;
+ buffer->vma_use_count--;
+}
+
+static struct vm_operations_struct uvc_vm_ops = {
+ .open = uvc_vm_open,
+ .close = uvc_vm_close,
+};
+
+/*
+ * Memory-map a buffer.
+ *
+ * This function implements video buffer memory mapping and is intended to be
+ * used by the device mmap handler.
+ */
+int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
+{
+ struct uvc_buffer *uninitialized_var(buffer);
+ struct page *page;
+ unsigned long addr, start, size;
+ unsigned int i;
+ int ret = 0;
+
+ start = vma->vm_start;
+ size = vma->vm_end - vma->vm_start;
+
+ mutex_lock(&queue->mutex);
+
+ for (i = 0; i < queue->count; ++i) {
+ buffer = &queue->buffer[i];
+ if ((buffer->buf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
+ break;
+ }
+
+ if (i == queue->count || size != queue->buf_size) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /*
+ * VM_IO marks the area as being an mmaped region for I/O to a
+ * device. It also prevents the region from being core dumped.
+ */
+ vma->vm_flags |= VM_IO;
+
+ addr = (unsigned long)queue->mem + buffer->buf.m.offset;
+ while (size > 0) {
+ page = vmalloc_to_page((void *)addr);
+ if ((ret = vm_insert_page(vma, start, page)) < 0)
+ goto done;
+
+ start += PAGE_SIZE;
+ addr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+
+ vma->vm_ops = &uvc_vm_ops;
+ vma->vm_private_data = buffer;
+ uvc_vm_open(vma);
+
+done:
+ mutex_unlock(&queue->mutex);
+ return ret;
+}
+
+/*
+ * Enable or disable the video buffers queue.
+ *
+ * The queue must be enabled before starting video acquisition and must be
+ * disabled after stopping it. This ensures that the video buffers queue
+ * state can be properly initialized before buffers are accessed from the
+ * interrupt handler.
+ *
+ * Enabling the video queue initializes parameters (such as sequence number,
+ * sync pattern, ...). If the queue is already enabled, return -EBUSY.
+ *
+ * Disabling the video queue cancels the queue and removes all buffers from
+ * the main queue.
+ *
+ * This function can't be called from interrupt context. Use
+ * uvc_queue_cancel() instead.
+ */
+int uvc_queue_enable(struct uvc_video_queue *queue, int enable)
+{
+ unsigned int i;
+ int ret = 0;
+
+ mutex_lock(&queue->mutex);
+ if (enable) {
+ if (uvc_queue_streaming(queue)) {
+ ret = -EBUSY;
+ goto done;
+ }
+ queue->sequence = 0;
+ queue->flags |= UVC_QUEUE_STREAMING;
+ queue->buf_used = 0;
+ } else {
+ uvc_queue_cancel(queue, 0);
+ INIT_LIST_HEAD(&queue->mainqueue);
+
+ for (i = 0; i < queue->count; ++i)
+ queue->buffer[i].state = UVC_BUF_STATE_IDLE;
+
+ queue->flags &= ~UVC_QUEUE_STREAMING;
+ }
+
+done:
+ mutex_unlock(&queue->mutex);
+ return ret;
+}
+
+/*
+ * Cancel the video buffers queue.
+ *
+ * Cancelling the queue marks all buffers on the irq queue as erroneous,
+ * wakes them up and removes them from the queue.
+ *
+ * If the disconnect parameter is set, further calls to uvc_queue_buffer will
+ * fail with -ENODEV.
+ *
+ * This function acquires the irq spinlock and can be called from interrupt
+ * context.
+ */
+void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect)
+{
+ struct uvc_buffer *buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->irqlock, flags);
+ while (!list_empty(&queue->irqqueue)) {
+ buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
+ queue);
+ list_del(&buf->queue);
+ buf->state = UVC_BUF_STATE_ERROR;
+ wake_up(&buf->wait);
+ }
+ /* This must be protected by the irqlock spinlock to avoid race
+ * conditions between uvc_queue_buffer and the disconnection event that
+ * could result in an interruptible wait in uvc_dequeue_buffer. Do not
+ * blindly replace this logic by checking for the UVC_DEV_DISCONNECTED
+ * state outside the queue code.
+ */
+ if (disconnect)
+ queue->flags |= UVC_QUEUE_DISCONNECTED;
+ spin_unlock_irqrestore(&queue->irqlock, flags);
+}
+
+struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
+ struct uvc_buffer *buf)
+{
+ struct uvc_buffer *nextbuf;
+ unsigned long flags;
+
+ if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) &&
+ buf->buf.length != buf->buf.bytesused) {
+ buf->state = UVC_BUF_STATE_QUEUED;
+ buf->buf.bytesused = 0;
+ return buf;
+ }
+
+ spin_lock_irqsave(&queue->irqlock, flags);
+ list_del(&buf->queue);
+ if (!list_empty(&queue->irqqueue))
+ nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
+ queue);
+ else
+ nextbuf = NULL;
+ spin_unlock_irqrestore(&queue->irqlock, flags);
+
+ buf->buf.sequence = queue->sequence++;
+ do_gettimeofday(&buf->buf.timestamp);
+
+ wake_up(&buf->wait);
+ return nextbuf;
+}
+
+struct uvc_buffer *uvc_queue_head(struct uvc_video_queue *queue)
+{
+ struct uvc_buffer *buf = NULL;
+
+ if (!list_empty(&queue->irqqueue))
+ buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
+ queue);
+ else
+ queue->flags |= UVC_QUEUE_PAUSED;
+
+ return buf;
+}
+
diff --git a/drivers/usb/gadget/uvc_queue.h b/drivers/usb/gadget/uvc_queue.h
new file mode 100644
index 00000000000..7f5a33fe7ae
--- /dev/null
+++ b/drivers/usb/gadget/uvc_queue.h
@@ -0,0 +1,89 @@
+#ifndef _UVC_QUEUE_H_
+#define _UVC_QUEUE_H_
+
+#ifdef __KERNEL__
+
+#include <linux/kernel.h>
+#include <linux/poll.h>
+#include <linux/videodev2.h>
+
+/* Maximum frame size in bytes, for sanity checking. */
+#define UVC_MAX_FRAME_SIZE (16*1024*1024)
+/* Maximum number of video buffers. */
+#define UVC_MAX_VIDEO_BUFFERS 32
+
+/* ------------------------------------------------------------------------
+ * Structures.
+ */
+
+enum uvc_buffer_state {
+ UVC_BUF_STATE_IDLE = 0,
+ UVC_BUF_STATE_QUEUED = 1,
+ UVC_BUF_STATE_ACTIVE = 2,
+ UVC_BUF_STATE_DONE = 3,
+ UVC_BUF_STATE_ERROR = 4,
+};
+
+struct uvc_buffer {
+ unsigned long vma_use_count;
+ struct list_head stream;
+
+ /* Touched by interrupt handler. */
+ struct v4l2_buffer buf;
+ struct list_head queue;
+ wait_queue_head_t wait;
+ enum uvc_buffer_state state;
+};
+
+#define UVC_QUEUE_STREAMING (1 << 0)
+#define UVC_QUEUE_DISCONNECTED (1 << 1)
+#define UVC_QUEUE_DROP_INCOMPLETE (1 << 2)
+#define UVC_QUEUE_PAUSED (1 << 3)
+
+struct uvc_video_queue {
+ enum v4l2_buf_type type;
+
+ void *mem;
+ unsigned int flags;
+ __u32 sequence;
+
+ unsigned int count;
+ unsigned int buf_size;
+ unsigned int buf_used;
+ struct uvc_buffer buffer[UVC_MAX_VIDEO_BUFFERS];
+ struct mutex mutex; /* protects buffers and mainqueue */
+ spinlock_t irqlock; /* protects irqqueue */
+
+ struct list_head mainqueue;
+ struct list_head irqqueue;
+};
+
+extern void uvc_queue_init(struct uvc_video_queue *queue,
+ enum v4l2_buf_type type);
+extern int uvc_alloc_buffers(struct uvc_video_queue *queue,
+ unsigned int nbuffers, unsigned int buflength);
+extern int uvc_free_buffers(struct uvc_video_queue *queue);
+extern int uvc_query_buffer(struct uvc_video_queue *queue,
+ struct v4l2_buffer *v4l2_buf);
+extern int uvc_queue_buffer(struct uvc_video_queue *queue,
+ struct v4l2_buffer *v4l2_buf);
+extern int uvc_dequeue_buffer(struct uvc_video_queue *queue,
+ struct v4l2_buffer *v4l2_buf, int nonblocking);
+extern int uvc_queue_enable(struct uvc_video_queue *queue, int enable);
+extern void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect);
+extern struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
+ struct uvc_buffer *buf);
+extern unsigned int uvc_queue_poll(struct uvc_video_queue *queue,
+ struct file *file, poll_table *wait);
+extern int uvc_queue_mmap(struct uvc_video_queue *queue,
+ struct vm_area_struct *vma);
+static inline int uvc_queue_streaming(struct uvc_video_queue *queue)
+{
+ return queue->flags & UVC_QUEUE_STREAMING;
+}
+extern struct uvc_buffer *uvc_queue_head(struct uvc_video_queue *queue);
+
+#endif /* __KERNEL__ */
+
+#endif /* _UVC_QUEUE_H_ */
+
diff --git a/drivers/usb/gadget/uvc_v4l2.c b/drivers/usb/gadget/uvc_v4l2.c
new file mode 100644
index 00000000000..a7989f29837
--- /dev/null
+++ b/drivers/usb/gadget/uvc_v4l2.c
@@ -0,0 +1,374 @@
+/*
+ * uvc_v4l2.c -- USB Video Class Gadget driver
+ *
+ * Copyright (C) 2009-2010
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/version.h>
+#include <linux/videodev2.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+
+#include "uvc.h"
+#include "uvc_queue.h"
+
+/* --------------------------------------------------------------------------
+ * Requests handling
+ */
+
+static int
+uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data)
+{
+ struct usb_composite_dev *cdev = uvc->func.config->cdev;
+ struct usb_request *req = uvc->control_req;
+
+ if (data->length < 0)
+ return usb_ep_set_halt(cdev->gadget->ep0);
+
+ req->length = min(uvc->event_length, data->length);
+ req->zero = data->length < uvc->event_length;
+ req->dma = DMA_ADDR_INVALID;
+
+ memcpy(req->buf, data->data, data->length);
+
+ return usb_ep_queue(cdev->gadget->ep0, req, GFP_KERNEL);
+}
+
+/* --------------------------------------------------------------------------
+ * V4L2
+ */
+
+struct uvc_format
+{
+ u8 bpp;
+ u32 fcc;
+};
+
+static struct uvc_format uvc_formats[] = {
+ { 16, V4L2_PIX_FMT_YUYV },
+ { 0, V4L2_PIX_FMT_MJPEG },
+};
+
+static int
+uvc_v4l2_get_format(struct uvc_video *video, struct v4l2_format *fmt)
+{
+ fmt->fmt.pix.pixelformat = video->fcc;
+ fmt->fmt.pix.width = video->width;
+ fmt->fmt.pix.height = video->height;
+ fmt->fmt.pix.field = V4L2_FIELD_NONE;
+ fmt->fmt.pix.bytesperline = video->bpp * video->width / 8;
+ fmt->fmt.pix.sizeimage = video->imagesize;
+ fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->fmt.pix.priv = 0;
+
+ return 0;
+}
+
+static int
+uvc_v4l2_set_format(struct uvc_video *video, struct v4l2_format *fmt)
+{
+ struct uvc_format *format;
+ unsigned int imagesize;
+ unsigned int bpl;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(uvc_formats); ++i) {
+ format = &uvc_formats[i];
+ if (format->fcc == fmt->fmt.pix.pixelformat)
+ break;
+ }
+
+ if (format == NULL || format->fcc != fmt->fmt.pix.pixelformat) {
+ printk(KERN_INFO "Unsupported format 0x%08x.\n",
+ fmt->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+
+ bpl = format->bpp * fmt->fmt.pix.width / 8;
+ imagesize = bpl ? bpl * fmt->fmt.pix.height : fmt->fmt.pix.sizeimage;
+
+ video->fcc = format->fcc;
+ video->bpp = format->bpp;
+ video->width = fmt->fmt.pix.width;
+ video->height = fmt->fmt.pix.height;
+ video->imagesize = imagesize;
+
+ fmt->fmt.pix.field = V4L2_FIELD_NONE;
+ fmt->fmt.pix.bytesperline = bpl;
+ fmt->fmt.pix.sizeimage = imagesize;
+ fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->fmt.pix.priv = 0;
+
+ return 0;
+}
+
+static int
+uvc_v4l2_open(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct uvc_device *uvc = video_get_drvdata(vdev);
+ struct uvc_file_handle *handle;
+ int ret;
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (handle == NULL)
+ return -ENOMEM;
+
+ ret = v4l2_fh_init(&handle->vfh, vdev);
+ if (ret < 0)
+ goto error;
+
+ ret = v4l2_event_init(&handle->vfh);
+ if (ret < 0)
+ goto error;
+
+ ret = v4l2_event_alloc(&handle->vfh, 8);
+ if (ret < 0)
+ goto error;
+
+ v4l2_fh_add(&handle->vfh);
+
+ handle->device = &uvc->video;
+ file->private_data = &handle->vfh;
+
+ uvc_function_connect(uvc);
+ return 0;
+
+error:
+ v4l2_fh_exit(&handle->vfh);
+ return ret;
+}
+
+static int
+uvc_v4l2_release(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct uvc_device *uvc = video_get_drvdata(vdev);
+ struct uvc_file_handle *handle = to_uvc_file_handle(file->private_data);
+ struct uvc_video *video = handle->device;
+
+ uvc_function_disconnect(uvc);
+
+ uvc_video_enable(video, 0);
+ mutex_lock(&video->queue.mutex);
+ if (uvc_free_buffers(&video->queue) < 0)
+ printk(KERN_ERR "uvc_v4l2_release: Unable to free "
+ "buffers.\n");
+ mutex_unlock(&video->queue.mutex);
+
+ file->private_data = NULL;
+ v4l2_fh_del(&handle->vfh);
+ v4l2_fh_exit(&handle->vfh);
+ kfree(handle);
+ return 0;
+}
+
+static long
+uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct uvc_device *uvc = video_get_drvdata(vdev);
+ struct uvc_file_handle *handle = to_uvc_file_handle(file->private_data);
+ struct usb_composite_dev *cdev = uvc->func.config->cdev;
+ struct uvc_video *video = &uvc->video;
+ int ret = 0;
+
+ switch (cmd) {
+ /* Query capabilities */
+ case VIDIOC_QUERYCAP:
+ {
+ struct v4l2_capability *cap = arg;
+
+ memset(cap, 0, sizeof *cap);
+ strncpy(cap->driver, "g_uvc", sizeof(cap->driver));
+ strncpy(cap->card, cdev->gadget->name, sizeof(cap->card));
+ strncpy(cap->bus_info, dev_name(&cdev->gadget->dev),
+ sizeof cap->bus_info);
+ cap->version = DRIVER_VERSION_NUMBER;
+ cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ break;
+ }
+
+ /* Get & Set format */
+ case VIDIOC_G_FMT:
+ {
+ struct v4l2_format *fmt = arg;
+
+ if (fmt->type != video->queue.type)
+ return -EINVAL;
+
+ return uvc_v4l2_get_format(video, fmt);
+ }
+
+ case VIDIOC_S_FMT:
+ {
+ struct v4l2_format *fmt = arg;
+
+ if (fmt->type != video->queue.type)
+ return -EINVAL;
+
+ return uvc_v4l2_set_format(video, fmt);
+ }
+
+ /* Buffers & streaming */
+ case VIDIOC_REQBUFS:
+ {
+ struct v4l2_requestbuffers *rb = arg;
+
+ if (rb->type != video->queue.type ||
+ rb->memory != V4L2_MEMORY_MMAP)
+ return -EINVAL;
+
+ ret = uvc_alloc_buffers(&video->queue, rb->count,
+ video->imagesize);
+ if (ret < 0)
+ return ret;
+
+ rb->count = ret;
+ ret = 0;
+ break;
+ }
+
+ case VIDIOC_QUERYBUF:
+ {
+ struct v4l2_buffer *buf = arg;
+
+ if (buf->type != video->queue.type)
+ return -EINVAL;
+
+ return uvc_query_buffer(&video->queue, buf);
+ }
+
+ case VIDIOC_QBUF:
+ if ((ret = uvc_queue_buffer(&video->queue, arg)) < 0)
+ return ret;
+
+ return uvc_video_pump(video);
+
+ case VIDIOC_DQBUF:
+ return uvc_dequeue_buffer(&video->queue, arg,
+ file->f_flags & O_NONBLOCK);
+
+ case VIDIOC_STREAMON:
+ {
+ int *type = arg;
+
+ if (*type != video->queue.type)
+ return -EINVAL;
+
+ return uvc_video_enable(video, 1);
+ }
+
+ case VIDIOC_STREAMOFF:
+ {
+ int *type = arg;
+
+ if (*type != video->queue.type)
+ return -EINVAL;
+
+ return uvc_video_enable(video, 0);
+ }
+
+ /* Events */
+ case VIDIOC_DQEVENT:
+ {
+ struct v4l2_event *event = arg;
+
+ ret = v4l2_event_dequeue(&handle->vfh, event,
+ file->f_flags & O_NONBLOCK);
+ if (ret == 0 && event->type == UVC_EVENT_SETUP) {
+ struct uvc_event *uvc_event = (void *)&event->u.data;
+
+ /* Tell the complete callback to generate an event for
+ * the next request that will be enqueued by
+ * uvc_event_write.
+ */
+ uvc->event_setup_out =
+ !(uvc_event->req.bRequestType & USB_DIR_IN);
+ uvc->event_length = uvc_event->req.wLength;
+ }
+
+ return ret;
+ }
+
+ case VIDIOC_SUBSCRIBE_EVENT:
+ {
+ struct v4l2_event_subscription *sub = arg;
+
+ if (sub->type < UVC_EVENT_FIRST || sub->type > UVC_EVENT_LAST)
+ return -EINVAL;
+
+ return v4l2_event_subscribe(&handle->vfh, arg);
+ }
+
+ case VIDIOC_UNSUBSCRIBE_EVENT:
+ return v4l2_event_unsubscribe(&handle->vfh, arg);
+
+ case UVCIOC_SEND_RESPONSE:
+ ret = uvc_send_response(uvc, arg);
+ break;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ return ret;
+}
+
+static long
+uvc_v4l2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, uvc_v4l2_do_ioctl);
+}
+
+static int
+uvc_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct uvc_device *uvc = video_get_drvdata(vdev);
+
+ return uvc_queue_mmap(&uvc->video.queue, vma);
+}
+
+static unsigned int
+uvc_v4l2_poll(struct file *file, poll_table *wait)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct uvc_device *uvc = video_get_drvdata(vdev);
+ struct uvc_file_handle *handle = to_uvc_file_handle(file->private_data);
+ unsigned int mask = 0;
+
+ poll_wait(file, &handle->vfh.events->wait, wait);
+ if (v4l2_event_pending(&handle->vfh))
+ mask |= POLLPRI;
+
+ mask |= uvc_queue_poll(&uvc->video.queue, file, wait);
+
+ return mask;
+}
+
+struct v4l2_file_operations uvc_v4l2_fops = {
+ .owner = THIS_MODULE,
+ .open = uvc_v4l2_open,
+ .release = uvc_v4l2_release,
+ .ioctl = uvc_v4l2_ioctl,
+ .mmap = uvc_v4l2_mmap,
+ .poll = uvc_v4l2_poll,
+};
+
diff --git a/drivers/usb/gadget/uvc_video.c b/drivers/usb/gadget/uvc_video.c
new file mode 100644
index 00000000000..de8cbc46518
--- /dev/null
+++ b/drivers/usb/gadget/uvc_video.c
@@ -0,0 +1,386 @@
+/*
+ * uvc_video.c -- USB Video Class Gadget driver
+ *
+ * Copyright (C) 2009-2010
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include <media/v4l2-dev.h>
+
+#include "uvc.h"
+#include "uvc_queue.h"
+
+/* --------------------------------------------------------------------------
+ * Video codecs
+ */
+
+static int
+uvc_video_encode_header(struct uvc_video *video, struct uvc_buffer *buf,
+ u8 *data, int len)
+{
+ data[0] = 2;
+ data[1] = UVC_STREAM_EOH | video->fid;
+
+ if (buf->buf.bytesused - video->queue.buf_used <= len - 2)
+ data[1] |= UVC_STREAM_EOF;
+
+ return 2;
+}
+
+static int
+uvc_video_encode_data(struct uvc_video *video, struct uvc_buffer *buf,
+ u8 *data, int len)
+{
+ struct uvc_video_queue *queue = &video->queue;
+ unsigned int nbytes;
+ void *mem;
+
+ /* Copy video data to the USB buffer. */
+ mem = queue->mem + buf->buf.m.offset + queue->buf_used;
+ nbytes = min((unsigned int)len, buf->buf.bytesused - queue->buf_used);
+
+ memcpy(data, mem, nbytes);
+ queue->buf_used += nbytes;
+
+ return nbytes;
+}
+
+static void
+uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video,
+ struct uvc_buffer *buf)
+{
+ void *mem = req->buf;
+ int len = video->req_size;
+ int ret;
+
+ /* Add a header at the beginning of the payload. */
+ if (video->payload_size == 0) {
+ ret = uvc_video_encode_header(video, buf, mem, len);
+ video->payload_size += ret;
+ mem += ret;
+ len -= ret;
+ }
+
+ /* Process video data. */
+ len = min((int)(video->max_payload_size - video->payload_size), len);
+ ret = uvc_video_encode_data(video, buf, mem, len);
+
+ video->payload_size += ret;
+ len -= ret;
+
+ req->length = video->req_size - len;
+ req->zero = video->payload_size == video->max_payload_size;
+
+ if (buf->buf.bytesused == video->queue.buf_used) {
+ video->queue.buf_used = 0;
+ buf->state = UVC_BUF_STATE_DONE;
+ uvc_queue_next_buffer(&video->queue, buf);
+ video->fid ^= UVC_STREAM_FID;
+
+ video->payload_size = 0;
+ }
+
+ if (video->payload_size == video->max_payload_size ||
+ buf->buf.bytesused == video->queue.buf_used)
+ video->payload_size = 0;
+}
+
+static void
+uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video,
+ struct uvc_buffer *buf)
+{
+ void *mem = req->buf;
+ int len = video->req_size;
+ int ret;
+
+ /* Add the header. */
+ ret = uvc_video_encode_header(video, buf, mem, len);
+ mem += ret;
+ len -= ret;
+
+ /* Process video data. */
+ ret = uvc_video_encode_data(video, buf, mem, len);
+ len -= ret;
+
+ req->length = video->req_size - len;
+
+ if (buf->buf.bytesused == video->queue.buf_used) {
+ video->queue.buf_used = 0;
+ buf->state = UVC_BUF_STATE_DONE;
+ uvc_queue_next_buffer(&video->queue, buf);
+ video->fid ^= UVC_STREAM_FID;
+ }
+}
+
+/* --------------------------------------------------------------------------
+ * Request handling
+ */
+
+/*
+ * I somehow feel that synchronisation won't be easy to achieve here. We have
+ * three events that control USB requests submission:
+ *
+ * - USB request completion: the completion handler will resubmit the request
+ * if a video buffer is available.
+ *
+ * - USB interface setting selection: in response to a SET_INTERFACE request,
+ * the handler will start streaming if a video buffer is available and if
+ * video is not currently streaming.
+ *
+ * - V4L2 buffer queueing: the driver will start streaming if video is not
+ * currently streaming.
+ *
+ * Race conditions between those 3 events might lead to deadlocks or other
+ * nasty side effects.
+ *
+ * The "video currently streaming" condition can't be detected by the irqqueue
+ * being empty, as a request can still be in flight. A separate "queue paused"
+ * flag is thus needed.
+ *
+ * The paused flag will be set when we try to retrieve the irqqueue head if the
+ * queue is empty, and cleared when we queue a buffer.
+ *
+ * The USB request completion handler will get the buffer at the irqqueue head
+ * under protection of the queue spinlock. If the queue is empty, the streaming
+ * paused flag will be set. Right after releasing the spinlock a userspace
+ * application can queue a buffer. The flag will then cleared, and the ioctl
+ * handler will restart the video stream.
+ */
+static void
+uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct uvc_video *video = req->context;
+ struct uvc_buffer *buf;
+ unsigned long flags;
+ int ret;
+
+ switch (req->status) {
+ case 0:
+ break;
+
+ case -ESHUTDOWN:
+ printk(KERN_INFO "VS request cancelled.\n");
+ goto requeue;
+
+ default:
+ printk(KERN_INFO "VS request completed with status %d.\n",
+ req->status);
+ goto requeue;
+ }
+
+ spin_lock_irqsave(&video->queue.irqlock, flags);
+ buf = uvc_queue_head(&video->queue);
+ if (buf == NULL) {
+ spin_unlock_irqrestore(&video->queue.irqlock, flags);
+ goto requeue;
+ }
+
+ video->encode(req, video, buf);
+
+ if ((ret = usb_ep_queue(ep, req, GFP_ATOMIC)) < 0) {
+ printk(KERN_INFO "Failed to queue request (%d).\n", ret);
+ usb_ep_set_halt(ep);
+ spin_unlock_irqrestore(&video->queue.irqlock, flags);
+ goto requeue;
+ }
+ spin_unlock_irqrestore(&video->queue.irqlock, flags);
+
+ return;
+
+requeue:
+ spin_lock_irqsave(&video->req_lock, flags);
+ list_add_tail(&req->list, &video->req_free);
+ spin_unlock_irqrestore(&video->req_lock, flags);
+}
+
+static int
+uvc_video_free_requests(struct uvc_video *video)
+{
+ unsigned int i;
+
+ for (i = 0; i < UVC_NUM_REQUESTS; ++i) {
+ if (video->req[i]) {
+ usb_ep_free_request(video->ep, video->req[i]);
+ video->req[i] = NULL;
+ }
+
+ if (video->req_buffer[i]) {
+ kfree(video->req_buffer[i]);
+ video->req_buffer[i] = NULL;
+ }
+ }
+
+ INIT_LIST_HEAD(&video->req_free);
+ video->req_size = 0;
+ return 0;
+}
+
+static int
+uvc_video_alloc_requests(struct uvc_video *video)
+{
+ unsigned int i;
+ int ret = -ENOMEM;
+
+ BUG_ON(video->req_size);
+
+ for (i = 0; i < UVC_NUM_REQUESTS; ++i) {
+ video->req_buffer[i] = kmalloc(video->ep->maxpacket, GFP_KERNEL);
+ if (video->req_buffer[i] == NULL)
+ goto error;
+
+ video->req[i] = usb_ep_alloc_request(video->ep, GFP_KERNEL);
+ if (video->req[i] == NULL)
+ goto error;
+
+ video->req[i]->buf = video->req_buffer[i];
+ video->req[i]->length = 0;
+ video->req[i]->dma = DMA_ADDR_INVALID;
+ video->req[i]->complete = uvc_video_complete;
+ video->req[i]->context = video;
+
+ list_add_tail(&video->req[i]->list, &video->req_free);
+ }
+
+ video->req_size = video->ep->maxpacket;
+ return 0;
+
+error:
+ uvc_video_free_requests(video);
+ return ret;
+}
+
+/* --------------------------------------------------------------------------
+ * Video streaming
+ */
+
+/*
+ * uvc_video_pump - Pump video data into the USB requests
+ *
+ * This function fills the available USB requests (listed in req_free) with
+ * video data from the queued buffers.
+ */
+int
+uvc_video_pump(struct uvc_video *video)
+{
+ struct usb_request *req;
+ struct uvc_buffer *buf;
+ unsigned long flags;
+ int ret;
+
+ /* FIXME TODO Race between uvc_video_pump and requests completion
+ * handler ???
+ */
+
+ while (1) {
+ /* Retrieve the first available USB request, protected by the
+ * request lock.
+ */
+ spin_lock_irqsave(&video->req_lock, flags);
+ if (list_empty(&video->req_free)) {
+ spin_unlock_irqrestore(&video->req_lock, flags);
+ return 0;
+ }
+ req = list_first_entry(&video->req_free, struct usb_request,
+ list);
+ list_del(&req->list);
+ spin_unlock_irqrestore(&video->req_lock, flags);
+
+ /* Retrieve the first available video buffer and fill the
+ * request, protected by the video queue irqlock.
+ */
+ spin_lock_irqsave(&video->queue.irqlock, flags);
+ buf = uvc_queue_head(&video->queue);
+ if (buf == NULL) {
+ spin_unlock_irqrestore(&video->queue.irqlock, flags);
+ break;
+ }
+
+ video->encode(req, video, buf);
+
+ /* Queue the USB request */
+ if ((ret = usb_ep_queue(video->ep, req, GFP_KERNEL)) < 0) {
+ printk(KERN_INFO "Failed to queue request (%d)\n", ret);
+ usb_ep_set_halt(video->ep);
+ spin_unlock_irqrestore(&video->queue.irqlock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(&video->queue.irqlock, flags);
+ }
+
+ spin_lock_irqsave(&video->req_lock, flags);
+ list_add_tail(&req->list, &video->req_free);
+ spin_unlock_irqrestore(&video->req_lock, flags);
+ return 0;
+}
+
+/*
+ * Enable or disable the video stream.
+ */
+int
+uvc_video_enable(struct uvc_video *video, int enable)
+{
+ unsigned int i;
+ int ret;
+
+ if (video->ep == NULL) {
+ printk(KERN_INFO "Video enable failed, device is "
+ "uninitialized.\n");
+ return -ENODEV;
+ }
+
+ if (!enable) {
+ for (i = 0; i < UVC_NUM_REQUESTS; ++i)
+ usb_ep_dequeue(video->ep, video->req[i]);
+
+ uvc_video_free_requests(video);
+ uvc_queue_enable(&video->queue, 0);
+ return 0;
+ }
+
+ if ((ret = uvc_queue_enable(&video->queue, 1)) < 0)
+ return ret;
+
+ if ((ret = uvc_video_alloc_requests(video)) < 0)
+ return ret;
+
+ if (video->max_payload_size) {
+ video->encode = uvc_video_encode_bulk;
+ video->payload_size = 0;
+ } else
+ video->encode = uvc_video_encode_isoc;
+
+ return uvc_video_pump(video);
+}
+
+/*
+ * Initialize the UVC video stream.
+ */
+int
+uvc_video_init(struct uvc_video *video)
+{
+ INIT_LIST_HEAD(&video->req_free);
+ spin_lock_init(&video->req_lock);
+
+ video->fcc = V4L2_PIX_FMT_YUYV;
+ video->bpp = 16;
+ video->width = 320;
+ video->height = 240;
+ video->imagesize = 320 * 240 * 2;
+
+ /* Initialize the video buffers queue. */
+ uvc_queue_init(&video->queue, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ return 0;
+}
+
diff --git a/drivers/usb/gadget/webcam.c b/drivers/usb/gadget/webcam.c
new file mode 100644
index 00000000000..417fd688769
--- /dev/null
+++ b/drivers/usb/gadget/webcam.c
@@ -0,0 +1,399 @@
+/*
+ * webcam.c -- USB webcam gadget driver
+ *
+ * Copyright (C) 2009-2010
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/usb/video.h>
+
+#include "f_uvc.h"
+
+/*
+ * Kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module. So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+#include "f_uvc.c"
+#include "uvc_queue.c"
+#include "uvc_v4l2.c"
+#include "uvc_video.c"
+
+/* --------------------------------------------------------------------------
+ * Device descriptor
+ */
+
+#define WEBCAM_VENDOR_ID 0x1d6b /* Linux Foundation */
+#define WEBCAM_PRODUCT_ID 0x0102 /* Webcam A/V gadget */
+#define WEBCAM_DEVICE_BCD 0x0010 /* 0.10 */
+
+static char webcam_vendor_label[] = "Linux Foundation";
+static char webcam_product_label[] = "Webcam gadget";
+static char webcam_config_label[] = "Video";
+
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX 0
+#define STRING_PRODUCT_IDX 1
+#define STRING_DESCRIPTION_IDX 2
+
+static struct usb_string webcam_strings[] = {
+ [STRING_MANUFACTURER_IDX].s = webcam_vendor_label,
+ [STRING_PRODUCT_IDX].s = webcam_product_label,
+ [STRING_DESCRIPTION_IDX].s = webcam_config_label,
+ { }
+};
+
+static struct usb_gadget_strings webcam_stringtab = {
+ .language = 0x0409, /* en-us */
+ .strings = webcam_strings,
+};
+
+static struct usb_gadget_strings *webcam_device_strings[] = {
+ &webcam_stringtab,
+ NULL,
+};
+
+static struct usb_device_descriptor webcam_device_descriptor = {
+ .bLength = USB_DT_DEVICE_SIZE,
+ .bDescriptorType = USB_DT_DEVICE,
+ .bcdUSB = cpu_to_le16(0x0200),
+ .bDeviceClass = USB_CLASS_MISC,
+ .bDeviceSubClass = 0x02,
+ .bDeviceProtocol = 0x01,
+ .bMaxPacketSize0 = 0, /* dynamic */
+ .idVendor = cpu_to_le16(WEBCAM_VENDOR_ID),
+ .idProduct = cpu_to_le16(WEBCAM_PRODUCT_ID),
+ .bcdDevice = cpu_to_le16(WEBCAM_DEVICE_BCD),
+ .iManufacturer = 0, /* dynamic */
+ .iProduct = 0, /* dynamic */
+ .iSerialNumber = 0, /* dynamic */
+ .bNumConfigurations = 0, /* dynamic */
+};
+
+DECLARE_UVC_HEADER_DESCRIPTOR(1);
+
+static const struct UVC_HEADER_DESCRIPTOR(1) uvc_control_header = {
+ .bLength = UVC_DT_HEADER_SIZE(1),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_DT_HEADER,
+ .bcdUVC = cpu_to_le16(0x0100),
+ .wTotalLength = 0, /* dynamic */
+ .dwClockFrequency = cpu_to_le32(48000000),
+ .bInCollection = 0, /* dynamic */
+ .baInterfaceNr[0] = 0, /* dynamic */
+};
+
+static const struct uvc_camera_terminal_descriptor uvc_camera_terminal = {
+ .bLength = UVC_DT_CAMERA_TERMINAL_SIZE(3),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_DT_INPUT_TERMINAL,
+ .bTerminalID = 1,
+ .wTerminalType = cpu_to_le16(0x0201),
+ .bAssocTerminal = 0,
+ .iTerminal = 0,
+ .wObjectiveFocalLengthMin = cpu_to_le16(0),
+ .wObjectiveFocalLengthMax = cpu_to_le16(0),
+ .wOcularFocalLength = cpu_to_le16(0),
+ .bControlSize = 3,
+ .bmControls[0] = 2,
+ .bmControls[1] = 0,
+ .bmControls[2] = 0,
+};
+
+static const struct uvc_processing_unit_descriptor uvc_processing = {
+ .bLength = UVC_DT_PROCESSING_UNIT_SIZE(2),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_DT_PROCESSING_UNIT,
+ .bUnitID = 2,
+ .bSourceID = 1,
+ .wMaxMultiplier = cpu_to_le16(16*1024),
+ .bControlSize = 2,
+ .bmControls[0] = 1,
+ .bmControls[1] = 0,
+ .iProcessing = 0,
+};
+
+static const struct uvc_output_terminal_descriptor uvc_output_terminal = {
+ .bLength = UVC_DT_OUTPUT_TERMINAL_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_DT_OUTPUT_TERMINAL,
+ .bTerminalID = 3,
+ .wTerminalType = cpu_to_le16(0x0101),
+ .bAssocTerminal = 0,
+ .bSourceID = 2,
+ .iTerminal = 0,
+};
+
+DECLARE_UVC_INPUT_HEADER_DESCRIPTOR(1, 2);
+
+static const struct UVC_INPUT_HEADER_DESCRIPTOR(1, 2) uvc_input_header = {
+ .bLength = UVC_DT_INPUT_HEADER_SIZE(1, 2),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_DT_INPUT_HEADER,
+ .bNumFormats = 2,
+ .wTotalLength = 0, /* dynamic */
+ .bEndpointAddress = 0, /* dynamic */
+ .bmInfo = 0,
+ .bTerminalLink = 3,
+ .bStillCaptureMethod = 0,
+ .bTriggerSupport = 0,
+ .bTriggerUsage = 0,
+ .bControlSize = 1,
+ .bmaControls[0][0] = 0,
+ .bmaControls[1][0] = 4,
+};
+
+static const struct uvc_format_uncompressed uvc_format_yuv = {
+ .bLength = UVC_DT_FORMAT_UNCOMPRESSED_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_DT_FORMAT_UNCOMPRESSED,
+ .bFormatIndex = 1,
+ .bNumFrameDescriptors = 2,
+ .guidFormat =
+ { 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00,
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71},
+ .bBitsPerPixel = 16,
+ .bDefaultFrameIndex = 1,
+ .bAspectRatioX = 0,
+ .bAspectRatioY = 0,
+ .bmInterfaceFlags = 0,
+ .bCopyProtect = 0,
+};
+
+DECLARE_UVC_FRAME_UNCOMPRESSED(1);
+DECLARE_UVC_FRAME_UNCOMPRESSED(3);
+
+static const struct UVC_FRAME_UNCOMPRESSED(3) uvc_frame_yuv_360p = {
+ .bLength = UVC_DT_FRAME_UNCOMPRESSED_SIZE(3),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_DT_FRAME_UNCOMPRESSED,
+ .bFrameIndex = 1,
+ .bmCapabilities = 0,
+ .wWidth = cpu_to_le16(640),
+ .wHeight = cpu_to_le16(360),
+ .dwMinBitRate = cpu_to_le32(18432000),
+ .dwMaxBitRate = cpu_to_le32(55296000),
+ .dwMaxVideoFrameBufferSize = cpu_to_le32(460800),
+ .dwDefaultFrameInterval = cpu_to_le32(666666),
+ .bFrameIntervalType = 3,
+ .dwFrameInterval[0] = cpu_to_le32(666666),
+ .dwFrameInterval[1] = cpu_to_le32(1000000),
+ .dwFrameInterval[2] = cpu_to_le32(5000000),
+};
+
+static const struct UVC_FRAME_UNCOMPRESSED(1) uvc_frame_yuv_720p = {
+ .bLength = UVC_DT_FRAME_UNCOMPRESSED_SIZE(1),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_DT_FRAME_UNCOMPRESSED,
+ .bFrameIndex = 2,
+ .bmCapabilities = 0,
+ .wWidth = cpu_to_le16(1280),
+ .wHeight = cpu_to_le16(720),
+ .dwMinBitRate = cpu_to_le32(29491200),
+ .dwMaxBitRate = cpu_to_le32(29491200),
+ .dwMaxVideoFrameBufferSize = cpu_to_le32(1843200),
+ .dwDefaultFrameInterval = cpu_to_le32(5000000),
+ .bFrameIntervalType = 1,
+ .dwFrameInterval[0] = cpu_to_le32(5000000),
+};
+
+static const struct uvc_format_mjpeg uvc_format_mjpg = {
+ .bLength = UVC_DT_FORMAT_MJPEG_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_DT_FORMAT_MJPEG,
+ .bFormatIndex = 2,
+ .bNumFrameDescriptors = 2,
+ .bmFlags = 0,
+ .bDefaultFrameIndex = 1,
+ .bAspectRatioX = 0,
+ .bAspectRatioY = 0,
+ .bmInterfaceFlags = 0,
+ .bCopyProtect = 0,
+};
+
+DECLARE_UVC_FRAME_MJPEG(1);
+DECLARE_UVC_FRAME_MJPEG(3);
+
+static const struct UVC_FRAME_MJPEG(3) uvc_frame_mjpg_360p = {
+ .bLength = UVC_DT_FRAME_MJPEG_SIZE(3),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_DT_FRAME_MJPEG,
+ .bFrameIndex = 1,
+ .bmCapabilities = 0,
+ .wWidth = cpu_to_le16(640),
+ .wHeight = cpu_to_le16(360),
+ .dwMinBitRate = cpu_to_le32(18432000),
+ .dwMaxBitRate = cpu_to_le32(55296000),
+ .dwMaxVideoFrameBufferSize = cpu_to_le32(460800),
+ .dwDefaultFrameInterval = cpu_to_le32(666666),
+ .bFrameIntervalType = 3,
+ .dwFrameInterval[0] = cpu_to_le32(666666),
+ .dwFrameInterval[1] = cpu_to_le32(1000000),
+ .dwFrameInterval[2] = cpu_to_le32(5000000),
+};
+
+static const struct UVC_FRAME_MJPEG(1) uvc_frame_mjpg_720p = {
+ .bLength = UVC_DT_FRAME_MJPEG_SIZE(1),
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_DT_FRAME_MJPEG,
+ .bFrameIndex = 2,
+ .bmCapabilities = 0,
+ .wWidth = cpu_to_le16(1280),
+ .wHeight = cpu_to_le16(720),
+ .dwMinBitRate = cpu_to_le32(29491200),
+ .dwMaxBitRate = cpu_to_le32(29491200),
+ .dwMaxVideoFrameBufferSize = cpu_to_le32(1843200),
+ .dwDefaultFrameInterval = cpu_to_le32(5000000),
+ .bFrameIntervalType = 1,
+ .dwFrameInterval[0] = cpu_to_le32(5000000),
+};
+
+static const struct uvc_color_matching_descriptor uvc_color_matching = {
+ .bLength = UVC_DT_COLOR_MATCHING_SIZE,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = UVC_DT_COLOR_MATCHING,
+ .bColorPrimaries = 1,
+ .bTransferCharacteristics = 1,
+ .bMatrixCoefficients = 4,
+};
+
+static const struct uvc_descriptor_header * const uvc_control_cls[] = {
+ (const struct uvc_descriptor_header *) &uvc_control_header,
+ (const struct uvc_descriptor_header *) &uvc_camera_terminal,
+ (const struct uvc_descriptor_header *) &uvc_processing,
+ (const struct uvc_descriptor_header *) &uvc_output_terminal,
+ NULL,
+};
+
+static const struct uvc_descriptor_header * const uvc_fs_streaming_cls[] = {
+ (const struct uvc_descriptor_header *) &uvc_input_header,
+ (const struct uvc_descriptor_header *) &uvc_format_yuv,
+ (const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
+ (const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
+ (const struct uvc_descriptor_header *) &uvc_format_mjpg,
+ (const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
+ (const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
+ (const struct uvc_descriptor_header *) &uvc_color_matching,
+ NULL,
+};
+
+static const struct uvc_descriptor_header * const uvc_hs_streaming_cls[] = {
+ (const struct uvc_descriptor_header *) &uvc_input_header,
+ (const struct uvc_descriptor_header *) &uvc_format_yuv,
+ (const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
+ (const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
+ (const struct uvc_descriptor_header *) &uvc_format_mjpg,
+ (const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
+ (const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
+ (const struct uvc_descriptor_header *) &uvc_color_matching,
+ NULL,
+};
+
+/* --------------------------------------------------------------------------
+ * USB configuration
+ */
+
+static int __init
+webcam_config_bind(struct usb_configuration *c)
+{
+ return uvc_bind_config(c, uvc_control_cls, uvc_fs_streaming_cls,
+ uvc_hs_streaming_cls);
+}
+
+static struct usb_configuration webcam_config_driver = {
+ .label = webcam_config_label,
+ .bind = webcam_config_bind,
+ .bConfigurationValue = 1,
+ .iConfiguration = 0, /* dynamic */
+ .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
+ .bMaxPower = CONFIG_USB_GADGET_VBUS_DRAW / 2,
+};
+
+static int /* __init_or_exit */
+webcam_unbind(struct usb_composite_dev *cdev)
+{
+ return 0;
+}
+
+static int __init
+webcam_bind(struct usb_composite_dev *cdev)
+{
+ int ret;
+
+ /* Allocate string descriptor numbers ... note that string contents
+ * can be overridden by the composite_dev glue.
+ */
+ if ((ret = usb_string_id(cdev)) < 0)
+ goto error;
+ webcam_strings[STRING_MANUFACTURER_IDX].id = ret;
+ webcam_device_descriptor.iManufacturer = ret;
+
+ if ((ret = usb_string_id(cdev)) < 0)
+ goto error;
+ webcam_strings[STRING_PRODUCT_IDX].id = ret;
+ webcam_device_descriptor.iProduct = ret;
+
+ if ((ret = usb_string_id(cdev)) < 0)
+ goto error;
+ webcam_strings[STRING_DESCRIPTION_IDX].id = ret;
+ webcam_config_driver.iConfiguration = ret;
+
+ /* Register our configuration. */
+ if ((ret = usb_add_config(cdev, &webcam_config_driver)) < 0)
+ goto error;
+
+ INFO(cdev, "Webcam Video Gadget\n");
+ return 0;
+
+error:
+ webcam_unbind(cdev);
+ return ret;
+}
+
+/* --------------------------------------------------------------------------
+ * Driver
+ */
+
+static struct usb_composite_driver webcam_driver = {
+ .name = "g_webcam",
+ .dev = &webcam_device_descriptor,
+ .strings = webcam_device_strings,
+ .bind = webcam_bind,
+ .unbind = webcam_unbind,
+};
+
+static int __init
+webcam_init(void)
+{
+ return usb_composite_register(&webcam_driver);
+}
+
+static void __exit
+webcam_cleanup(void)
+{
+ usb_composite_unregister(&webcam_driver);
+}
+
+module_init(webcam_init);
+module_exit(webcam_cleanup);
+
+MODULE_AUTHOR("Laurent Pinchart");
+MODULE_DESCRIPTION("Webcam Video Gadget");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.1.0");
+
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 8d3df0397de..f865be2276d 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -207,6 +207,21 @@ config USB_OHCI_HCD
To compile this driver as a module, choose M here: the
module will be called ohci-hcd.
+config USB_OHCI_HCD_OMAP1
+ bool "OHCI support for OMAP1/2 chips"
+ depends on USB_OHCI_HCD && (ARCH_OMAP1 || ARCH_OMAP2)
+ default y
+ ---help---
+ Enables support for the OHCI controller on OMAP1/2 chips.
+
+config USB_OHCI_HCD_OMAP3
+ bool "OHCI support for OMAP3 and later chips"
+ depends on USB_OHCI_HCD && (ARCH_OMAP3 || ARCH_OMAP4)
+ default y
+ ---help---
+ Enables support for the on-chip OHCI controller on
+ OMAP3 and later chips.
+
config USB_OHCI_HCD_PPC_SOC
bool "OHCI support for on-chip PPC USB controller"
depends on USB_OHCI_HCD && (STB03xxx || PPC_MPC52xx)
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index e3a74e75e82..faa61748db7 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -69,6 +69,15 @@ static void au1xxx_stop_ehc(void)
au_sync();
}
+static int au1xxx_ehci_setup(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int ret = ehci_init(hcd);
+
+ ehci->need_io_watchdog = 0;
+ return ret;
+}
+
static const struct hc_driver ehci_au1xxx_hc_driver = {
.description = hcd_name,
.product_desc = "Au1xxx EHCI",
@@ -86,7 +95,7 @@ static const struct hc_driver ehci_au1xxx_hc_driver = {
* FIXME -- ehci_init() doesn't do enough here.
* See ehci-ppc-soc for a complete implementation.
*/
- .reset = ehci_init,
+ .reset = au1xxx_ehci_setup,
.start = ehci_run,
.stop = ehci_stop,
.shutdown = ehci_shutdown,
@@ -215,26 +224,17 @@ static int ehci_hcd_au1xxx_drv_suspend(struct device *dev)
msleep(10);
/* Root hub was already suspended. Disable irq emission and
- * mark HW unaccessible, bail out if RH has been resumed. Use
- * the spinlock to properly synchronize with possible pending
- * RH suspend or resume activity.
- *
- * This is still racy as hcd->state is manipulated outside of
- * any locks =P But that will be a different fix.
+ * mark HW unaccessible. The PM and USB cores make sure that
+ * the root hub is either suspended or stopped.
*/
spin_lock_irqsave(&ehci->lock, flags);
- if (hcd->state != HC_STATE_SUSPENDED) {
- rc = -EINVAL;
- goto bail;
- }
+ ehci_prepare_ports_for_controller_suspend(ehci);
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
(void)ehci_readl(ehci, &ehci->regs->intr_enable);
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
au1xxx_stop_ehc();
-
-bail:
spin_unlock_irqrestore(&ehci->lock, flags);
// could save FLADJ in case of Vaux power loss
@@ -264,6 +264,7 @@ static int ehci_hcd_au1xxx_drv_resume(struct device *dev)
if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF) {
int mask = INTR_MASK;
+ ehci_prepare_ports_for_controller_resume(ehci);
if (!hcd->self.root_hub->do_remote_wakeup)
mask &= ~STS_PCD;
ehci_writel(ehci, mask, &ehci->regs->intr_enable);
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 0e26aa13f15..5cd967d2893 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -313,6 +313,7 @@ static int ehci_fsl_drv_suspend(struct device *dev)
struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
void __iomem *non_ehci = hcd->regs;
+ ehci_prepare_ports_for_controller_suspend(hcd_to_ehci(hcd));
if (!fsl_deep_sleep())
return 0;
@@ -327,6 +328,7 @@ static int ehci_fsl_drv_resume(struct device *dev)
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
void __iomem *non_ehci = hcd->regs;
+ ehci_prepare_ports_for_controller_resume(ehci);
if (!fsl_deep_sleep())
return 0;
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 13ead00aecd..ef3e88f0b3c 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -31,13 +31,12 @@
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
-#include "../core/hcd.h"
-
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/irq.h>
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index c7178bcde67..e7d3d8def28 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -106,12 +106,75 @@ static void ehci_handover_companion_ports(struct ehci_hcd *ehci)
ehci->owned_ports = 0;
}
+static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
+ bool suspending)
+{
+ int port;
+ u32 temp;
+
+ /* If remote wakeup is enabled for the root hub but disabled
+ * for the controller, we must adjust all the port wakeup flags
+ * when the controller is suspended or resumed. In all other
+ * cases they don't need to be changed.
+ */
+ if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup ||
+ device_may_wakeup(ehci_to_hcd(ehci)->self.controller))
+ return;
+
+ /* clear phy low-power mode before changing wakeup flags */
+ if (ehci->has_hostpc) {
+ port = HCS_N_PORTS(ehci->hcs_params);
+ while (port--) {
+ u32 __iomem *hostpc_reg;
+
+ hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
+ + HOSTPC0 + 4 * port);
+ temp = ehci_readl(ehci, hostpc_reg);
+ ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg);
+ }
+ msleep(5);
+ }
+
+ port = HCS_N_PORTS(ehci->hcs_params);
+ while (port--) {
+ u32 __iomem *reg = &ehci->regs->port_status[port];
+ u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
+ u32 t2 = t1 & ~PORT_WAKE_BITS;
+
+ /* If we are suspending the controller, clear the flags.
+ * If we are resuming the controller, set the wakeup flags.
+ */
+ if (!suspending) {
+ if (t1 & PORT_CONNECT)
+ t2 |= PORT_WKOC_E | PORT_WKDISC_E;
+ else
+ t2 |= PORT_WKOC_E | PORT_WKCONN_E;
+ }
+ ehci_vdbg(ehci, "port %d, %08x -> %08x\n",
+ port + 1, t1, t2);
+ ehci_writel(ehci, t2, reg);
+ }
+
+ /* enter phy low-power mode again */
+ if (ehci->has_hostpc) {
+ port = HCS_N_PORTS(ehci->hcs_params);
+ while (port--) {
+ u32 __iomem *hostpc_reg;
+
+ hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
+ + HOSTPC0 + 4 * port);
+ temp = ehci_readl(ehci, hostpc_reg);
+ ehci_writel(ehci, temp | HOSTPC_PHCD, hostpc_reg);
+ }
+ }
+}
+
static int ehci_bus_suspend (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
int port;
int mask;
- u32 __iomem *hostpc_reg = NULL;
+ int changed;
ehci_dbg(ehci, "suspend root hub\n");
@@ -155,15 +218,13 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
*/
ehci->bus_suspended = 0;
ehci->owned_ports = 0;
+ changed = 0;
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
u32 __iomem *reg = &ehci->regs->port_status [port];
u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
- u32 t2 = t1;
+ u32 t2 = t1 & ~PORT_WAKE_BITS;
- if (ehci->has_hostpc)
- hostpc_reg = (u32 __iomem *)((u8 *)ehci->regs
- + HOSTPC0 + 4 * (port & 0xff));
/* keep track of which ports we suspend */
if (t1 & PORT_OWNER)
set_bit(port, &ehci->owned_ports);
@@ -172,40 +233,45 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
set_bit(port, &ehci->bus_suspended);
}
- /* enable remote wakeup on all ports */
+ /* enable remote wakeup on all ports, if told to do so */
if (hcd->self.root_hub->do_remote_wakeup) {
/* only enable appropriate wake bits, otherwise the
* hardware can not go phy low power mode. If a race
* condition happens here(connection change during bits
* set), the port change detection will finally fix it.
*/
- if (t1 & PORT_CONNECT) {
+ if (t1 & PORT_CONNECT)
t2 |= PORT_WKOC_E | PORT_WKDISC_E;
- t2 &= ~PORT_WKCONN_E;
- } else {
+ else
t2 |= PORT_WKOC_E | PORT_WKCONN_E;
- t2 &= ~PORT_WKDISC_E;
- }
- } else
- t2 &= ~PORT_WAKE_BITS;
+ }
if (t1 != t2) {
ehci_vdbg (ehci, "port %d, %08x -> %08x\n",
port + 1, t1, t2);
ehci_writel(ehci, t2, reg);
- if (hostpc_reg) {
- u32 t3;
+ changed = 1;
+ }
+ }
- spin_unlock_irq(&ehci->lock);
- msleep(5);/* 5ms for HCD enter low pwr mode */
- spin_lock_irq(&ehci->lock);
- t3 = ehci_readl(ehci, hostpc_reg);
- ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg);
- t3 = ehci_readl(ehci, hostpc_reg);
- ehci_dbg(ehci, "Port%d phy low pwr mode %s\n",
+ if (changed && ehci->has_hostpc) {
+ spin_unlock_irq(&ehci->lock);
+ msleep(5); /* 5 ms for HCD to enter low-power mode */
+ spin_lock_irq(&ehci->lock);
+
+ port = HCS_N_PORTS(ehci->hcs_params);
+ while (port--) {
+ u32 __iomem *hostpc_reg;
+ u32 t3;
+
+ hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
+ + HOSTPC0 + 4 * port);
+ t3 = ehci_readl(ehci, hostpc_reg);
+ ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg);
+ t3 = ehci_readl(ehci, hostpc_reg);
+ ehci_dbg(ehci, "Port %d phy low-power mode %s\n",
port, (t3 & HOSTPC_PHCD) ?
"succeeded" : "failed");
- }
}
}
@@ -291,6 +357,25 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
msleep(8);
spin_lock_irq(&ehci->lock);
+ /* clear phy low-power mode before resume */
+ if (ehci->bus_suspended && ehci->has_hostpc) {
+ i = HCS_N_PORTS(ehci->hcs_params);
+ while (i--) {
+ if (test_bit(i, &ehci->bus_suspended)) {
+ u32 __iomem *hostpc_reg;
+
+ hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
+ + HOSTPC0 + 4 * i);
+ temp = ehci_readl(ehci, hostpc_reg);
+ ehci_writel(ehci, temp & ~HOSTPC_PHCD,
+ hostpc_reg);
+ }
+ }
+ spin_unlock_irq(&ehci->lock);
+ msleep(5);
+ spin_lock_irq(&ehci->lock);
+ }
+
/* manually resume the ports we suspended during bus_suspend() */
i = HCS_N_PORTS (ehci->hcs_params);
while (i--) {
@@ -659,7 +744,7 @@ static int ehci_hub_control (
* Even if OWNER is set, so the port is owned by the
* companion controller, khubd needs to be able to clear
* the port-change status bits (especially
- * USB_PORT_FEAT_C_CONNECTION).
+ * USB_PORT_STAT_C_CONNECTION).
*/
switch (wValue) {
@@ -675,16 +760,25 @@ static int ehci_hub_control (
goto error;
if (ehci->no_selective_suspend)
break;
- if (temp & PORT_SUSPEND) {
- if ((temp & PORT_PE) == 0)
- goto error;
- /* resume signaling for 20 msec */
- temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
- ehci_writel(ehci, temp | PORT_RESUME,
- status_reg);
- ehci->reset_done [wIndex] = jiffies
- + msecs_to_jiffies (20);
+ if (!(temp & PORT_SUSPEND))
+ break;
+ if ((temp & PORT_PE) == 0)
+ goto error;
+
+ /* clear phy low-power mode before resume */
+ if (hostpc_reg) {
+ temp1 = ehci_readl(ehci, hostpc_reg);
+ ehci_writel(ehci, temp1 & ~HOSTPC_PHCD,
+ hostpc_reg);
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ msleep(5);/* wait to leave low-power mode */
+ spin_lock_irqsave(&ehci->lock, flags);
}
+ /* resume signaling for 20 msec */
+ temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
+ ehci_writel(ehci, temp | PORT_RESUME, status_reg);
+ ehci->reset_done[wIndex] = jiffies
+ + msecs_to_jiffies(20);
break;
case USB_PORT_FEAT_C_SUSPEND:
clear_bit(wIndex, &ehci->port_c_suspend);
@@ -729,12 +823,12 @@ static int ehci_hub_control (
// wPortChange bits
if (temp & PORT_CSC)
- status |= 1 << USB_PORT_FEAT_C_CONNECTION;
+ status |= USB_PORT_STAT_C_CONNECTION << 16;
if (temp & PORT_PEC)
- status |= 1 << USB_PORT_FEAT_C_ENABLE;
+ status |= USB_PORT_STAT_C_ENABLE << 16;
if ((temp & PORT_OCC) && !ignore_oc){
- status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT;
+ status |= USB_PORT_STAT_C_OVERCURRENT << 16;
/*
* Hubs should disable port power on over-current.
@@ -791,7 +885,7 @@ static int ehci_hub_control (
if ((temp & PORT_RESET)
&& time_after_eq(jiffies,
ehci->reset_done[wIndex])) {
- status |= 1 << USB_PORT_FEAT_C_RESET;
+ status |= USB_PORT_STAT_C_RESET << 16;
ehci->reset_done [wIndex] = 0;
/* force reset to complete */
@@ -833,7 +927,7 @@ static int ehci_hub_control (
*/
if (temp & PORT_CONNECT) {
- status |= 1 << USB_PORT_FEAT_CONNECTION;
+ status |= USB_PORT_STAT_CONNECTION;
// status may be from integrated TT
if (ehci->has_hostpc) {
temp1 = ehci_readl(ehci, hostpc_reg);
@@ -842,11 +936,11 @@ static int ehci_hub_control (
status |= ehci_port_speed(ehci, temp);
}
if (temp & PORT_PE)
- status |= 1 << USB_PORT_FEAT_ENABLE;
+ status |= USB_PORT_STAT_ENABLE;
/* maybe the port was unsuspended without our knowledge */
if (temp & (PORT_SUSPEND|PORT_RESUME)) {
- status |= 1 << USB_PORT_FEAT_SUSPEND;
+ status |= USB_PORT_STAT_SUSPEND;
} else if (test_bit(wIndex, &ehci->suspended_ports)) {
clear_bit(wIndex, &ehci->suspended_ports);
ehci->reset_done[wIndex] = 0;
@@ -855,13 +949,13 @@ static int ehci_hub_control (
}
if (temp & PORT_OC)
- status |= 1 << USB_PORT_FEAT_OVER_CURRENT;
+ status |= USB_PORT_STAT_OVERCURRENT;
if (temp & PORT_RESET)
- status |= 1 << USB_PORT_FEAT_RESET;
+ status |= USB_PORT_STAT_RESET;
if (temp & PORT_POWER)
- status |= 1 << USB_PORT_FEAT_POWER;
+ status |= USB_PORT_STAT_POWER;
if (test_bit(wIndex, &ehci->port_c_suspend))
- status |= 1 << USB_PORT_FEAT_C_SUSPEND;
+ status |= USB_PORT_STAT_C_SUSPEND << 16;
#ifndef VERBOSE_DEBUG
if (status & ~0xffff) /* only if wPortChange is interesting */
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index ead59f42e69..544ccfd7056 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -199,8 +199,8 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
writel(pdata->portsc, hcd->regs + PORTSC_OFFSET);
mdelay(10);
- /* setup USBCONTROL. */
- ret = mxc_set_usbcontrol(pdev->id, pdata->flags);
+ /* setup specific usb hw */
+ ret = mxc_initialize_usb_hw(pdev->id, pdata->flags);
if (ret < 0)
goto err_init;
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 40a85833503..5450e628157 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -116,6 +116,8 @@
#define OMAP_UHH_DEBUG_CSR (0x44)
/* EHCI Register Set */
+#define EHCI_INSNREG04 (0xA0)
+#define EHCI_INSNREG04_DISABLE_UNSUSPEND (1 << 5)
#define EHCI_INSNREG05_ULPI (0xA4)
#define EHCI_INSNREG05_ULPI_CONTROL_SHIFT 31
#define EHCI_INSNREG05_ULPI_PORTSEL_SHIFT 24
@@ -181,7 +183,7 @@ struct ehci_hcd_omap {
void __iomem *ehci_base;
/* Regulators for USB PHYs.
- * Each PHY can have a seperate regulator.
+ * Each PHY can have a separate regulator.
*/
struct regulator *regulator[OMAP3_HS_USB_PORTS];
};
@@ -352,8 +354,8 @@ static int omap_start_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;
/* Bypass the TLL module for PHY mode operation */
- if (omap_rev() <= OMAP3430_REV_ES2_1) {
- dev_dbg(omap->dev, "OMAP3 ES version <= ES2.1 \n");
+ if (cpu_is_omap3430() && (omap_rev() <= OMAP3430_REV_ES2_1)) {
+ dev_dbg(omap->dev, "OMAP3 ES version <= ES2.1\n");
if ((omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY) ||
(omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY) ||
(omap->port_mode[2] == EHCI_HCD_OMAP_MODE_PHY))
@@ -382,6 +384,18 @@ static int omap_start_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
dev_dbg(omap->dev, "UHH setup done, uhh_hostconfig=%x\n", reg);
+ /*
+ * An undocumented "feature" in the OMAP3 EHCI controller,
+ * causes suspended ports to be taken out of suspend when
+ * the USBCMD.Run/Stop bit is cleared (for example when
+ * we do ehci_bus_suspend).
+ * This breaks suspend-resume if the root-hub is allowed
+ * to suspend. Writing 1 to this undocumented register bit
+ * disables this feature and restores normal behavior.
+ */
+ ehci_omap_writel(omap->ehci_base, EHCI_INSNREG04,
+ EHCI_INSNREG04_DISABLE_UNSUSPEND);
+
if ((omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL) ||
(omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL) ||
(omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL)) {
@@ -659,6 +673,9 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
goto err_add_hcd;
}
+ /* root ports should always stay powered */
+ ehci_port_power(omap->ehci, 1);
+
return 0;
err_add_hcd:
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index ead5f4f2aa5..d43d176161a 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -109,6 +109,9 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
return retval;
switch (pdev->vendor) {
+ case PCI_VENDOR_ID_NEC:
+ ehci->need_io_watchdog = 0;
+ break;
case PCI_VENDOR_ID_INTEL:
ehci->need_io_watchdog = 0;
if (pdev->device == 0x27cc) {
@@ -284,23 +287,15 @@ static int ehci_pci_suspend(struct usb_hcd *hcd)
msleep(10);
/* Root hub was already suspended. Disable irq emission and
- * mark HW unaccessible, bail out if RH has been resumed. Use
- * the spinlock to properly synchronize with possible pending
- * RH suspend or resume activity.
- *
- * This is still racy as hcd->state is manipulated outside of
- * any locks =P But that will be a different fix.
+ * mark HW unaccessible. The PM and USB cores make sure that
+ * the root hub is either suspended or stopped.
*/
spin_lock_irqsave (&ehci->lock, flags);
- if (hcd->state != HC_STATE_SUSPENDED) {
- rc = -EINVAL;
- goto bail;
- }
+ ehci_prepare_ports_for_controller_suspend(ehci);
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
(void)ehci_readl(ehci, &ehci->regs->intr_enable);
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- bail:
spin_unlock_irqrestore (&ehci->lock, flags);
// could save FLADJ in case of Vaux power loss
@@ -330,6 +325,7 @@ static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated)
!hibernated) {
int mask = INTR_MASK;
+ ehci_prepare_ports_for_controller_resume(ehci);
if (!hcd->self.root_hub->do_remote_wakeup)
mask &= ~STS_PCD;
ehci_writel(ehci, mask, &ehci->regs->intr_enable);
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 8df33b8a634..5aec92866ab 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -108,7 +108,7 @@ ppc44x_enable_bmt(struct device_node *dn)
static int __devinit
ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
{
- struct device_node *dn = op->node;
+ struct device_node *dn = op->dev.of_node;
struct usb_hcd *hcd;
struct ehci_hcd *ehci = NULL;
struct resource res;
@@ -274,13 +274,12 @@ MODULE_DEVICE_TABLE(of, ehci_hcd_ppc_of_match);
static struct of_platform_driver ehci_hcd_ppc_of_driver = {
- .name = "ppc-of-ehci",
- .match_table = ehci_hcd_ppc_of_match,
.probe = ehci_hcd_ppc_of_probe,
.remove = ehci_hcd_ppc_of_remove,
.shutdown = ehci_hcd_ppc_of_shutdown,
- .driver = {
- .name = "ppc-of-ehci",
- .owner = THIS_MODULE,
+ .driver = {
+ .name = "ppc-of-ehci",
+ .owner = THIS_MODULE,
+ .of_match_table = ehci_hcd_ppc_of_match,
},
};
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 89521775c56..11a79c4f4a9 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -663,7 +663,7 @@ qh_urb_transaction (
*/
i = urb->num_sgs;
if (len > 0 && i > 0) {
- sg = urb->sg->sg;
+ sg = urb->sg;
buf = sg_dma_address(sg);
/* urb->transfer_buffer_length may be smaller than the
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index f603bb2c0a8..013972bbde5 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -288,13 +288,12 @@ static const struct of_device_id ehci_hcd_xilinx_of_match[] = {
MODULE_DEVICE_TABLE(of, ehci_hcd_xilinx_of_match);
static struct of_platform_driver ehci_hcd_xilinx_of_driver = {
- .name = "xilinx-of-ehci",
- .match_table = ehci_hcd_xilinx_of_match,
.probe = ehci_hcd_xilinx_of_probe,
.remove = ehci_hcd_xilinx_of_remove,
.shutdown = ehci_hcd_xilinx_of_shutdown,
- .driver = {
- .name = "xilinx-of-ehci",
- .owner = THIS_MODULE,
+ .driver = {
+ .name = "xilinx-of-ehci",
+ .owner = THIS_MODULE,
+ .of_match_table = ehci_hcd_xilinx_of_match,
},
};
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 556c0b48f3a..650a687f285 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -536,6 +536,16 @@ struct ehci_fstn {
/*-------------------------------------------------------------------------*/
+/* Prepare the PORTSC wakeup flags during controller suspend/resume */
+
+#define ehci_prepare_ports_for_controller_suspend(ehci) \
+ ehci_adjust_port_wakeup_flags(ehci, true);
+
+#define ehci_prepare_ports_for_controller_resume(ehci) \
+ ehci_adjust_port_wakeup_flags(ehci, false);
+
+/*-------------------------------------------------------------------------*/
+
#ifdef CONFIG_USB_EHCI_ROOT_HUB_TT
/*
@@ -556,20 +566,20 @@ ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc)
case 0:
return 0;
case 1:
- return (1<<USB_PORT_FEAT_LOWSPEED);
+ return USB_PORT_STAT_LOW_SPEED;
case 2:
default:
- return (1<<USB_PORT_FEAT_HIGHSPEED);
+ return USB_PORT_STAT_HIGH_SPEED;
}
}
- return (1<<USB_PORT_FEAT_HIGHSPEED);
+ return USB_PORT_STAT_HIGH_SPEED;
}
#else
#define ehci_is_TDI(e) (0)
-#define ehci_port_speed(ehci, portsc) (1<<USB_PORT_FEAT_HIGHSPEED)
+#define ehci_port_speed(ehci, portsc) USB_PORT_STAT_HIGH_SPEED
#endif
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/fhci-dbg.c b/drivers/usb/host/fhci-dbg.c
index e799f86dab1..6fe55004911 100644
--- a/drivers/usb/host/fhci-dbg.c
+++ b/drivers/usb/host/fhci-dbg.c
@@ -20,7 +20,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/usb.h>
-#include "../core/hcd.h"
+#include <linux/usb/hcd.h>
#include "fhci.h"
void fhci_dbg_isr(struct fhci_hcd *fhci, int usb_er)
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 15379c63614..c7c8392a88b 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -25,12 +25,12 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/slab.h>
#include <asm/qe.h>
#include <asm/fsl_gtm.h>
-#include "../core/hcd.h"
#include "fhci.h"
void fhci_start_sof_timer(struct fhci_hcd *fhci)
@@ -565,7 +565,7 @@ static int __devinit of_fhci_probe(struct of_device *ofdev,
const struct of_device_id *ofid)
{
struct device *dev = &ofdev->dev;
- struct device_node *node = ofdev->node;
+ struct device_node *node = dev->of_node;
struct usb_hcd *hcd;
struct fhci_hcd *fhci;
struct resource usb_regs;
@@ -670,7 +670,7 @@ static int __devinit of_fhci_probe(struct of_device *ofdev,
}
for (j = 0; j < NUM_PINS; j++) {
- fhci->pins[j] = qe_pin_request(ofdev->node, j);
+ fhci->pins[j] = qe_pin_request(node, j);
if (IS_ERR(fhci->pins[j])) {
ret = PTR_ERR(fhci->pins[j]);
dev_err(dev, "can't get pin %d: %d\n", j, ret);
@@ -813,8 +813,11 @@ static const struct of_device_id of_fhci_match[] = {
MODULE_DEVICE_TABLE(of, of_fhci_match);
static struct of_platform_driver of_fhci_driver = {
- .name = "fsl,usb-fhci",
- .match_table = of_fhci_match,
+ .driver = {
+ .name = "fsl,usb-fhci",
+ .owner = THIS_MODULE,
+ .of_match_table = of_fhci_match,
+ },
.probe = of_fhci_probe,
.remove = __devexit_p(of_fhci_remove),
};
diff --git a/drivers/usb/host/fhci-hub.c b/drivers/usb/host/fhci-hub.c
index 0cfaedc3e12..348fe62e94f 100644
--- a/drivers/usb/host/fhci-hub.c
+++ b/drivers/usb/host/fhci-hub.c
@@ -22,9 +22,9 @@
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <linux/gpio.h>
#include <asm/qe.h>
-#include "../core/hcd.h"
#include "fhci.h"
/* virtual root hub specific descriptor */
diff --git a/drivers/usb/host/fhci-mem.c b/drivers/usb/host/fhci-mem.c
index 5591bfb499d..b0b88f57a5a 100644
--- a/drivers/usb/host/fhci-mem.c
+++ b/drivers/usb/host/fhci-mem.c
@@ -21,7 +21,7 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/usb.h>
-#include "../core/hcd.h"
+#include <linux/usb/hcd.h>
#include "fhci.h"
static void init_td(struct td *td)
diff --git a/drivers/usb/host/fhci-q.c b/drivers/usb/host/fhci-q.c
index f73c92359be..03be7494a47 100644
--- a/drivers/usb/host/fhci-q.c
+++ b/drivers/usb/host/fhci-q.c
@@ -22,7 +22,7 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/usb.h>
-#include "../core/hcd.h"
+#include <linux/usb/hcd.h>
#include "fhci.h"
/* maps the hardware error code to the USB error code */
diff --git a/drivers/usb/host/fhci-sched.c b/drivers/usb/host/fhci-sched.c
index ff43747a614..4f2cbdcc027 100644
--- a/drivers/usb/host/fhci-sched.c
+++ b/drivers/usb/host/fhci-sched.c
@@ -24,9 +24,9 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <asm/qe.h>
#include <asm/fsl_gtm.h>
-#include "../core/hcd.h"
#include "fhci.h"
static void recycle_frame(struct fhci_usb *usb, struct packet *pkt)
diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c
index 57013479d7f..7be548ca218 100644
--- a/drivers/usb/host/fhci-tds.c
+++ b/drivers/usb/host/fhci-tds.c
@@ -22,7 +22,7 @@
#include <linux/list.h>
#include <linux/io.h>
#include <linux/usb.h>
-#include "../core/hcd.h"
+#include <linux/usb/hcd.h>
#include "fhci.h"
#define DUMMY_BD_BUFFER 0xdeadbeef
diff --git a/drivers/usb/host/fhci.h b/drivers/usb/host/fhci.h
index 72dae1c5ab3..71c3caaea4c 100644
--- a/drivers/usb/host/fhci.h
+++ b/drivers/usb/host/fhci.h
@@ -20,13 +20,14 @@
#include <linux/kernel.h>
#include <linux/types.h>
+#include <linux/bug.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/kfifo.h>
#include <linux/io.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <asm/qe.h>
-#include "../core/hcd.h"
#define USB_CLOCK 48000000
@@ -515,9 +516,13 @@ static inline int cq_put(struct kfifo *kfifo, void *p)
static inline void *cq_get(struct kfifo *kfifo)
{
- void *p = NULL;
+ unsigned int sz;
+ void *p;
+
+ sz = kfifo_out(kfifo, (void *)&p, sizeof(p));
+ if (sz != sizeof(p))
+ return NULL;
- kfifo_out(kfifo, (void *)&p, sizeof(p));
return p;
}
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index 8a12f297645..ca0e98d8e1f 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -56,8 +56,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
-#include "../core/hcd.h"
#include "imx21-hcd.h"
#ifdef DEBUG
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index 92de71dc772..d9e82123de2 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -65,6 +65,7 @@
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/isp116x.h>
+#include <linux/usb/hcd.h>
#include <linux/platform_device.h>
#include <asm/io.h>
@@ -72,7 +73,6 @@
#include <asm/system.h>
#include <asm/byteorder.h>
-#include "../core/hcd.h"
#include "isp116x.h"
#define DRIVER_VERSION "03 Nov 2005"
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 217fb517020..20a0dfe0fe3 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -77,6 +77,7 @@
#include <linux/interrupt.h>
#include <linux/usb.h>
#include <linux/usb/isp1362.h>
+#include <linux/usb/hcd.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/io.h>
@@ -95,7 +96,6 @@ module_param(dbg_level, int, 0);
#define STUB_DEBUG_FILE
#endif
-#include "../core/hcd.h"
#include "../core/usb.h"
#include "isp1362.h"
@@ -1265,7 +1265,7 @@ static int isp1362_urb_enqueue(struct usb_hcd *hcd,
/* don't submit to a dead or disabled port */
if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
- (1 << USB_PORT_FEAT_ENABLE)) ||
+ USB_PORT_STAT_ENABLE) ||
!HC_IS_RUNNING(hcd->state)) {
kfree(ep);
retval = -ENODEV;
@@ -2217,7 +2217,7 @@ static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
{
if (isp1362_hcd->pde)
- remove_proc_entry(proc_filename, 0);
+ remove_proc_entry(proc_filename, NULL);
}
#endif
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index 9f01293600b..dbcafa29c77 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/io.h>
@@ -21,7 +22,6 @@
#include <asm/unaligned.h>
#include <asm/cacheflush.h>
-#include "../core/hcd.h"
#include "isp1760-hcd.h"
static struct kmem_cache *qtd_cachep;
@@ -111,7 +111,7 @@ struct isp1760_qh {
u32 ping;
};
-#define ehci_port_speed(priv, portsc) (1 << USB_PORT_FEAT_HIGHSPEED)
+#define ehci_port_speed(priv, portsc) USB_PORT_STAT_HIGH_SPEED
static unsigned int isp1760_readl(__u32 __iomem *regs)
{
@@ -713,12 +713,11 @@ static int check_error(struct ptd *ptd)
u32 dw3;
dw3 = le32_to_cpu(ptd->dw3);
- if (dw3 & DW3_HALT_BIT)
+ if (dw3 & DW3_HALT_BIT) {
error = -EPIPE;
- if (dw3 & DW3_ERROR_BIT) {
- printk(KERN_ERR "error bit is set in DW3\n");
- error = -EPIPE;
+ if (dw3 & DW3_ERROR_BIT)
+ pr_err("error bit is set in DW3\n");
}
if (dw3 & DW3_QTD_ACTIVE) {
@@ -1923,7 +1922,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
* Even if OWNER is set, so the port is owned by the
* companion controller, khubd needs to be able to clear
* the port-change status bits (especially
- * USB_PORT_FEAT_C_CONNECTION).
+ * USB_PORT_STAT_C_CONNECTION).
*/
switch (wValue) {
@@ -1987,7 +1986,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
/* wPortChange bits */
if (temp & PORT_CSC)
- status |= 1 << USB_PORT_FEAT_C_CONNECTION;
+ status |= USB_PORT_STAT_C_CONNECTION << 16;
/* whoever resumes must GetPortStatus to complete it!! */
@@ -2007,7 +2006,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
/* resume completed? */
else if (time_after_eq(jiffies,
priv->reset_done)) {
- status |= 1 << USB_PORT_FEAT_C_SUSPEND;
+ status |= USB_PORT_STAT_C_SUSPEND << 16;
priv->reset_done = 0;
/* stop resume signaling */
@@ -2031,7 +2030,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
if ((temp & PORT_RESET)
&& time_after_eq(jiffies,
priv->reset_done)) {
- status |= 1 << USB_PORT_FEAT_C_RESET;
+ status |= USB_PORT_STAT_C_RESET << 16;
priv->reset_done = 0;
/* force reset to complete */
@@ -2062,18 +2061,18 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
printk(KERN_ERR "Warning: PORT_OWNER is set\n");
if (temp & PORT_CONNECT) {
- status |= 1 << USB_PORT_FEAT_CONNECTION;
+ status |= USB_PORT_STAT_CONNECTION;
/* status may be from integrated TT */
status |= ehci_port_speed(priv, temp);
}
if (temp & PORT_PE)
- status |= 1 << USB_PORT_FEAT_ENABLE;
+ status |= USB_PORT_STAT_ENABLE;
if (temp & (PORT_SUSPEND|PORT_RESUME))
- status |= 1 << USB_PORT_FEAT_SUSPEND;
+ status |= USB_PORT_STAT_SUSPEND;
if (temp & PORT_RESET)
- status |= 1 << USB_PORT_FEAT_RESET;
+ status |= USB_PORT_STAT_RESET;
if (temp & PORT_POWER)
- status |= 1 << USB_PORT_FEAT_POWER;
+ status |= USB_PORT_STAT_POWER;
put_unaligned(cpu_to_le32(status), (__le32 *) buf);
break;
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index 4293cfd28d6..ec85d0c3cc3 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -13,8 +13,8 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/usb/isp1760.h>
+#include <linux/usb/hcd.h>
-#include "../core/hcd.h"
#include "isp1760-hcd.h"
#ifdef CONFIG_PPC_OF
@@ -31,12 +31,12 @@ static int of_isp1760_probe(struct of_device *dev,
const struct of_device_id *match)
{
struct usb_hcd *hcd;
- struct device_node *dp = dev->node;
+ struct device_node *dp = dev->dev.of_node;
struct resource *res;
struct resource memory;
struct of_irq oirq;
int virq;
- u64 res_len;
+ resource_size_t res_len;
int ret;
const unsigned int *prop;
unsigned int devflags = 0;
@@ -45,13 +45,12 @@ static int of_isp1760_probe(struct of_device *dev,
if (ret)
return -ENXIO;
- res = request_mem_region(memory.start, memory.end - memory.start + 1,
- dev_name(&dev->dev));
+ res_len = resource_size(&memory);
+
+ res = request_mem_region(memory.start, res_len, dev_name(&dev->dev));
if (!res)
return -EBUSY;
- res_len = memory.end - memory.start + 1;
-
if (of_irq_map_one(dp, 0, &oirq)) {
ret = -ENODEV;
goto release_reg;
@@ -92,7 +91,7 @@ static int of_isp1760_probe(struct of_device *dev,
return ret;
release_reg:
- release_mem_region(memory.start, memory.end - memory.start + 1);
+ release_mem_region(memory.start, res_len);
return ret;
}
@@ -121,8 +120,11 @@ static const struct of_device_id of_isp1760_match[] = {
MODULE_DEVICE_TABLE(of, of_isp1760_match);
static struct of_platform_driver isp1760_of_driver = {
- .name = "nxp-isp1760",
- .match_table = of_isp1760_match,
+ .driver = {
+ .name = "nxp-isp1760",
+ .owner = THIS_MODULE,
+ .of_match_table = of_isp1760_match,
+ },
.probe = of_isp1760_probe,
.remove = of_isp1760_remove,
};
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index afe59be2364..fc576557d8a 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -32,6 +32,7 @@
#include <linux/list.h>
#include <linux/usb.h>
#include <linux/usb/otg.h>
+#include <linux/usb/hcd.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/workqueue.h>
@@ -43,7 +44,6 @@
#include <asm/unaligned.h>
#include <asm/byteorder.h>
-#include "../core/hcd.h"
#define DRIVER_AUTHOR "Roman Weissgaerber, David Brownell"
#define DRIVER_DESC "USB 1.1 'Open' Host Controller (OHCI) Driver"
@@ -1006,9 +1006,14 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ohci_hcd_s3c2410_driver
#endif
-#ifdef CONFIG_ARCH_OMAP
+#ifdef CONFIG_USB_OHCI_HCD_OMAP1
#include "ohci-omap.c"
-#define PLATFORM_DRIVER ohci_hcd_omap_driver
+#define OMAP1_PLATFORM_DRIVER ohci_hcd_omap_driver
+#endif
+
+#ifdef CONFIG_USB_OHCI_HCD_OMAP3
+#include "ohci-omap3.c"
+#define OMAP3_PLATFORM_DRIVER ohci_hcd_omap3_driver
#endif
#ifdef CONFIG_ARCH_LH7A404
@@ -1092,6 +1097,8 @@ MODULE_LICENSE ("GPL");
#if !defined(PCI_DRIVER) && \
!defined(PLATFORM_DRIVER) && \
+ !defined(OMAP1_PLATFORM_DRIVER) && \
+ !defined(OMAP3_PLATFORM_DRIVER) && \
!defined(OF_PLATFORM_DRIVER) && \
!defined(SA1111_DRIVER) && \
!defined(PS3_SYSTEM_BUS_DRIVER) && \
@@ -1133,6 +1140,18 @@ static int __init ohci_hcd_mod_init(void)
goto error_platform;
#endif
+#ifdef OMAP1_PLATFORM_DRIVER
+ retval = platform_driver_register(&OMAP1_PLATFORM_DRIVER);
+ if (retval < 0)
+ goto error_omap1_platform;
+#endif
+
+#ifdef OMAP3_PLATFORM_DRIVER
+ retval = platform_driver_register(&OMAP3_PLATFORM_DRIVER);
+ if (retval < 0)
+ goto error_omap3_platform;
+#endif
+
#ifdef OF_PLATFORM_DRIVER
retval = of_register_platform_driver(&OF_PLATFORM_DRIVER);
if (retval < 0)
@@ -1200,6 +1219,14 @@ static int __init ohci_hcd_mod_init(void)
platform_driver_unregister(&PLATFORM_DRIVER);
error_platform:
#endif
+#ifdef OMAP1_PLATFORM_DRIVER
+ platform_driver_unregister(&OMAP1_PLATFORM_DRIVER);
+ error_omap1_platform:
+#endif
+#ifdef OMAP3_PLATFORM_DRIVER
+ platform_driver_unregister(&OMAP3_PLATFORM_DRIVER);
+ error_omap3_platform:
+#endif
#ifdef PS3_SYSTEM_BUS_DRIVER
ps3_ohci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
error_ps3:
diff --git a/drivers/usb/host/ohci-omap3.c b/drivers/usb/host/ohci-omap3.c
new file mode 100644
index 00000000000..2cc8a504b18
--- /dev/null
+++ b/drivers/usb/host/ohci-omap3.c
@@ -0,0 +1,735 @@
+/*
+ * ohci-omap3.c - driver for OHCI on OMAP3 and later processors
+ *
+ * Bus Glue for OMAP3 USBHOST 3 port OHCI controller
+ * This controller is also used in later OMAPs and AM35x chips
+ *
+ * Copyright (C) 2007-2010 Texas Instruments, Inc.
+ * Author: Vikram Pandita <vikram.pandita@ti.com>
+ * Author: Anand Gadiyar <gadiyar@ti.com>
+ *
+ * Based on ehci-omap.c and some other ohci glue layers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * TODO (last updated Mar 10th, 2010):
+ * - add kernel-doc
+ * - Factor out code common to EHCI to a separate file
+ * - Make EHCI and OHCI coexist together
+ * - needs newer silicon versions to actually work
+ * - the last one to be loaded currently steps on the other's toes
+ * - Add hooks for configuring transceivers, etc. at init/exit
+ * - Add aggressive clock-management code
+ */
+
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#include <plat/usb.h>
+
+/*
+ * OMAP USBHOST Register addresses: VIRTUAL ADDRESSES
+ * Use ohci_omap_readl()/ohci_omap_writel() functions
+ */
+
+/* TLL Register Set */
+#define OMAP_USBTLL_REVISION (0x00)
+#define OMAP_USBTLL_SYSCONFIG (0x10)
+#define OMAP_USBTLL_SYSCONFIG_CACTIVITY (1 << 8)
+#define OMAP_USBTLL_SYSCONFIG_SIDLEMODE (1 << 3)
+#define OMAP_USBTLL_SYSCONFIG_ENAWAKEUP (1 << 2)
+#define OMAP_USBTLL_SYSCONFIG_SOFTRESET (1 << 1)
+#define OMAP_USBTLL_SYSCONFIG_AUTOIDLE (1 << 0)
+
+#define OMAP_USBTLL_SYSSTATUS (0x14)
+#define OMAP_USBTLL_SYSSTATUS_RESETDONE (1 << 0)
+
+#define OMAP_USBTLL_IRQSTATUS (0x18)
+#define OMAP_USBTLL_IRQENABLE (0x1C)
+
+#define OMAP_TLL_SHARED_CONF (0x30)
+#define OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN (1 << 6)
+#define OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN (1 << 5)
+#define OMAP_TLL_SHARED_CONF_USB_DIVRATION (1 << 2)
+#define OMAP_TLL_SHARED_CONF_FCLK_REQ (1 << 1)
+#define OMAP_TLL_SHARED_CONF_FCLK_IS_ON (1 << 0)
+
+#define OMAP_TLL_CHANNEL_CONF(num) (0x040 + 0x004 * num)
+#define OMAP_TLL_CHANNEL_CONF_FSLSMODE_SHIFT 24
+#define OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF (1 << 11)
+#define OMAP_TLL_CHANNEL_CONF_ULPI_ULPIAUTOIDLE (1 << 10)
+#define OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE (1 << 9)
+#define OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE (1 << 8)
+#define OMAP_TLL_CHANNEL_CONF_CHANMODE_FSLS (1 << 1)
+#define OMAP_TLL_CHANNEL_CONF_CHANEN (1 << 0)
+
+#define OMAP_TLL_CHANNEL_COUNT 3
+
+/* UHH Register Set */
+#define OMAP_UHH_REVISION (0x00)
+#define OMAP_UHH_SYSCONFIG (0x10)
+#define OMAP_UHH_SYSCONFIG_MIDLEMODE (1 << 12)
+#define OMAP_UHH_SYSCONFIG_CACTIVITY (1 << 8)
+#define OMAP_UHH_SYSCONFIG_SIDLEMODE (1 << 3)
+#define OMAP_UHH_SYSCONFIG_ENAWAKEUP (1 << 2)
+#define OMAP_UHH_SYSCONFIG_SOFTRESET (1 << 1)
+#define OMAP_UHH_SYSCONFIG_AUTOIDLE (1 << 0)
+
+#define OMAP_UHH_SYSSTATUS (0x14)
+#define OMAP_UHH_SYSSTATUS_UHHRESETDONE (1 << 0)
+#define OMAP_UHH_SYSSTATUS_OHCIRESETDONE (1 << 1)
+#define OMAP_UHH_SYSSTATUS_EHCIRESETDONE (1 << 2)
+#define OMAP_UHH_HOSTCONFIG (0x40)
+#define OMAP_UHH_HOSTCONFIG_ULPI_BYPASS (1 << 0)
+#define OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS (1 << 0)
+#define OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS (1 << 11)
+#define OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS (1 << 12)
+#define OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN (1 << 2)
+#define OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN (1 << 3)
+#define OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN (1 << 4)
+#define OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN (1 << 5)
+#define OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS (1 << 8)
+#define OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS (1 << 9)
+#define OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS (1 << 10)
+
+#define OMAP_UHH_DEBUG_CSR (0x44)
+
+/*-------------------------------------------------------------------------*/
+
+static inline void ohci_omap_writel(void __iomem *base, u32 reg, u32 val)
+{
+ __raw_writel(val, base + reg);
+}
+
+static inline u32 ohci_omap_readl(void __iomem *base, u32 reg)
+{
+ return __raw_readl(base + reg);
+}
+
+static inline void ohci_omap_writeb(void __iomem *base, u8 reg, u8 val)
+{
+ __raw_writeb(val, base + reg);
+}
+
+static inline u8 ohci_omap_readb(void __iomem *base, u8 reg)
+{
+ return __raw_readb(base + reg);
+}
+
+/*-------------------------------------------------------------------------*/
+
+struct ohci_hcd_omap3 {
+ struct ohci_hcd *ohci;
+ struct device *dev;
+
+ struct clk *usbhost_ick;
+ struct clk *usbhost2_120m_fck;
+ struct clk *usbhost1_48m_fck;
+ struct clk *usbtll_fck;
+ struct clk *usbtll_ick;
+
+ /* port_mode: TLL/PHY, 2/3/4/6-PIN, DP-DM/DAT-SE0 */
+ enum ohci_omap3_port_mode port_mode[OMAP3_HS_USB_PORTS];
+ void __iomem *uhh_base;
+ void __iomem *tll_base;
+ void __iomem *ohci_base;
+
+ unsigned es2_compatibility:1;
+};
+
+/*-------------------------------------------------------------------------*/
+
+static void ohci_omap3_clock_power(struct ohci_hcd_omap3 *omap, int on)
+{
+ if (on) {
+ clk_enable(omap->usbtll_ick);
+ clk_enable(omap->usbtll_fck);
+ clk_enable(omap->usbhost_ick);
+ clk_enable(omap->usbhost1_48m_fck);
+ clk_enable(omap->usbhost2_120m_fck);
+ } else {
+ clk_disable(omap->usbhost2_120m_fck);
+ clk_disable(omap->usbhost1_48m_fck);
+ clk_disable(omap->usbhost_ick);
+ clk_disable(omap->usbtll_fck);
+ clk_disable(omap->usbtll_ick);
+ }
+}
+
+static int ohci_omap3_init(struct usb_hcd *hcd)
+{
+ dev_dbg(hcd->self.controller, "starting OHCI controller\n");
+
+ return ohci_init(hcd_to_ohci(hcd));
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int ohci_omap3_start(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int ret;
+
+ /*
+ * RemoteWakeupConnected has to be set explicitly before
+ * calling ohci_run. The reset value of RWC is 0.
+ */
+ ohci->hc_control = OHCI_CTRL_RWC;
+ writel(OHCI_CTRL_RWC, &ohci->regs->control);
+
+ ret = ohci_run(ohci);
+
+ if (ret < 0) {
+ dev_err(hcd->self.controller, "can't start\n");
+ ohci_stop(hcd);
+ }
+
+ return ret;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * convert the port-mode enum to a value we can use in the FSLSMODE
+ * field of USBTLL_CHANNEL_CONF
+ */
+static unsigned ohci_omap3_fslsmode(enum ohci_omap3_port_mode mode)
+{
+ switch (mode) {
+ case OMAP_OHCI_PORT_MODE_UNUSED:
+ case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0:
+ return 0x0;
+
+ case OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM:
+ return 0x1;
+
+ case OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0:
+ return 0x2;
+
+ case OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM:
+ return 0x3;
+
+ case OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0:
+ return 0x4;
+
+ case OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM:
+ return 0x5;
+
+ case OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0:
+ return 0x6;
+
+ case OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM:
+ return 0x7;
+
+ case OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0:
+ return 0xA;
+
+ case OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM:
+ return 0xB;
+ default:
+ pr_warning("Invalid port mode, using default\n");
+ return 0x0;
+ }
+}
+
+static void ohci_omap3_tll_config(struct ohci_hcd_omap3 *omap)
+{
+ u32 reg;
+ int i;
+
+ /* Program TLL SHARED CONF */
+ reg = ohci_omap_readl(omap->tll_base, OMAP_TLL_SHARED_CONF);
+ reg &= ~OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN;
+ reg &= ~OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN;
+ reg |= OMAP_TLL_SHARED_CONF_USB_DIVRATION;
+ reg |= OMAP_TLL_SHARED_CONF_FCLK_IS_ON;
+ ohci_omap_writel(omap->tll_base, OMAP_TLL_SHARED_CONF, reg);
+
+ /* Program each TLL channel */
+ /*
+ * REVISIT: Only the 3-pin and 4-pin PHY modes have
+ * actually been tested.
+ */
+ for (i = 0; i < OMAP_TLL_CHANNEL_COUNT; i++) {
+
+ /* Enable only those channels that are actually used */
+ if (omap->port_mode[i] == OMAP_OHCI_PORT_MODE_UNUSED)
+ continue;
+
+ reg = ohci_omap_readl(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i));
+ reg |= ohci_omap3_fslsmode(omap->port_mode[i])
+ << OMAP_TLL_CHANNEL_CONF_FSLSMODE_SHIFT;
+ reg |= OMAP_TLL_CHANNEL_CONF_CHANMODE_FSLS;
+ reg |= OMAP_TLL_CHANNEL_CONF_CHANEN;
+ ohci_omap_writel(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i), reg);
+ }
+}
+
+/* omap3_start_ohci
+ * - Start the TI USBHOST controller
+ */
+static int omap3_start_ohci(struct ohci_hcd_omap3 *omap, struct usb_hcd *hcd)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ u32 reg = 0;
+ int ret = 0;
+
+ dev_dbg(omap->dev, "starting TI OHCI USB Controller\n");
+
+ /* Get all the clock handles we need */
+ omap->usbhost_ick = clk_get(omap->dev, "usbhost_ick");
+ if (IS_ERR(omap->usbhost_ick)) {
+ dev_err(omap->dev, "could not get usbhost_ick\n");
+ ret = PTR_ERR(omap->usbhost_ick);
+ goto err_host_ick;
+ }
+
+ omap->usbhost2_120m_fck = clk_get(omap->dev, "usbhost_120m_fck");
+ if (IS_ERR(omap->usbhost2_120m_fck)) {
+ dev_err(omap->dev, "could not get usbhost_120m_fck\n");
+ ret = PTR_ERR(omap->usbhost2_120m_fck);
+ goto err_host_120m_fck;
+ }
+
+ omap->usbhost1_48m_fck = clk_get(omap->dev, "usbhost_48m_fck");
+ if (IS_ERR(omap->usbhost1_48m_fck)) {
+ dev_err(omap->dev, "could not get usbhost_48m_fck\n");
+ ret = PTR_ERR(omap->usbhost1_48m_fck);
+ goto err_host_48m_fck;
+ }
+
+ omap->usbtll_fck = clk_get(omap->dev, "usbtll_fck");
+ if (IS_ERR(omap->usbtll_fck)) {
+ dev_err(omap->dev, "could not get usbtll_fck\n");
+ ret = PTR_ERR(omap->usbtll_fck);
+ goto err_tll_fck;
+ }
+
+ omap->usbtll_ick = clk_get(omap->dev, "usbtll_ick");
+ if (IS_ERR(omap->usbtll_ick)) {
+ dev_err(omap->dev, "could not get usbtll_ick\n");
+ ret = PTR_ERR(omap->usbtll_ick);
+ goto err_tll_ick;
+ }
+
+ /* Now enable all the clocks in the correct order */
+ ohci_omap3_clock_power(omap, 1);
+
+ /* perform TLL soft reset, and wait until reset is complete */
+ ohci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
+ OMAP_USBTLL_SYSCONFIG_SOFTRESET);
+
+ /* Wait for TLL reset to complete */
+ while (!(ohci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
+ & OMAP_USBTLL_SYSSTATUS_RESETDONE)) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout)) {
+ dev_dbg(omap->dev, "operation timed out\n");
+ ret = -EINVAL;
+ goto err_sys_status;
+ }
+ }
+
+ dev_dbg(omap->dev, "TLL reset done\n");
+
+ /* (1<<3) = no idle mode only for initial debugging */
+ ohci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
+ OMAP_USBTLL_SYSCONFIG_ENAWAKEUP |
+ OMAP_USBTLL_SYSCONFIG_SIDLEMODE |
+ OMAP_USBTLL_SYSCONFIG_CACTIVITY);
+
+
+ /* Put UHH in NoIdle/NoStandby mode */
+ reg = ohci_omap_readl(omap->uhh_base, OMAP_UHH_SYSCONFIG);
+ reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
+ | OMAP_UHH_SYSCONFIG_SIDLEMODE
+ | OMAP_UHH_SYSCONFIG_CACTIVITY
+ | OMAP_UHH_SYSCONFIG_MIDLEMODE);
+ reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
+ reg &= ~OMAP_UHH_SYSCONFIG_SOFTRESET;
+
+ ohci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg);
+
+ reg = ohci_omap_readl(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
+
+ /* setup ULPI bypass and burst configurations */
+ reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN
+ | OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN
+ | OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN);
+ reg &= ~OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN;
+
+ /*
+ * REVISIT: Pi_CONNECT_STATUS controls MStandby
+ * assertion and Swakeup generation - let us not
+ * worry about this for now. OMAP HWMOD framework
+ * might take care of this later. If not, we can
+ * update these registers when adding aggressive
+ * clock management code.
+ *
+ * For now, turn off all the Pi_CONNECT_STATUS bits
+ *
+ if (omap->port_mode[0] == OMAP_OHCI_PORT_MODE_UNUSED)
+ reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS;
+ if (omap->port_mode[1] == OMAP_OHCI_PORT_MODE_UNUSED)
+ reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS;
+ if (omap->port_mode[2] == OMAP_OHCI_PORT_MODE_UNUSED)
+ reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;
+ */
+ reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS;
+ reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS;
+ reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;
+
+ if (omap->es2_compatibility) {
+ /*
+ * All OHCI modes need to go through the TLL,
+ * unlike in the EHCI case. So use UTMI mode
+ * for all ports for OHCI, on ES2.x silicon
+ */
+ dev_dbg(omap->dev, "OMAP3 ES version <= ES2.1\n");
+ reg |= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
+ } else {
+ dev_dbg(omap->dev, "OMAP3 ES version > ES2.1\n");
+ if (omap->port_mode[0] == OMAP_OHCI_PORT_MODE_UNUSED)
+ reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
+ else
+ reg |= OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
+
+ if (omap->port_mode[1] == OMAP_OHCI_PORT_MODE_UNUSED)
+ reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
+ else
+ reg |= OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
+
+ if (omap->port_mode[2] == OMAP_OHCI_PORT_MODE_UNUSED)
+ reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
+ else
+ reg |= OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
+
+ }
+ ohci_omap_writel(omap->uhh_base, OMAP_UHH_HOSTCONFIG, reg);
+ dev_dbg(omap->dev, "UHH setup done, uhh_hostconfig=%x\n", reg);
+
+ ohci_omap3_tll_config(omap);
+
+ return 0;
+
+err_sys_status:
+ ohci_omap3_clock_power(omap, 0);
+ clk_put(omap->usbtll_ick);
+
+err_tll_ick:
+ clk_put(omap->usbtll_fck);
+
+err_tll_fck:
+ clk_put(omap->usbhost1_48m_fck);
+
+err_host_48m_fck:
+ clk_put(omap->usbhost2_120m_fck);
+
+err_host_120m_fck:
+ clk_put(omap->usbhost_ick);
+
+err_host_ick:
+ return ret;
+}
+
+static void omap3_stop_ohci(struct ohci_hcd_omap3 *omap, struct usb_hcd *hcd)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+
+ dev_dbg(omap->dev, "stopping TI EHCI USB Controller\n");
+
+ /* Reset USBHOST for insmod/rmmod to work */
+ ohci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG,
+ OMAP_UHH_SYSCONFIG_SOFTRESET);
+ while (!(ohci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
+ & OMAP_UHH_SYSSTATUS_UHHRESETDONE)) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout))
+ dev_dbg(omap->dev, "operation timed out\n");
+ }
+
+ while (!(ohci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
+ & OMAP_UHH_SYSSTATUS_OHCIRESETDONE)) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout))
+ dev_dbg(omap->dev, "operation timed out\n");
+ }
+
+ while (!(ohci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
+ & OMAP_UHH_SYSSTATUS_EHCIRESETDONE)) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout))
+ dev_dbg(omap->dev, "operation timed out\n");
+ }
+
+ ohci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG, (1 << 1));
+
+ while (!(ohci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
+ & (1 << 0))) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout))
+ dev_dbg(omap->dev, "operation timed out\n");
+ }
+
+ ohci_omap3_clock_power(omap, 0);
+
+ if (omap->usbtll_fck != NULL) {
+ clk_put(omap->usbtll_fck);
+ omap->usbtll_fck = NULL;
+ }
+
+ if (omap->usbhost_ick != NULL) {
+ clk_put(omap->usbhost_ick);
+ omap->usbhost_ick = NULL;
+ }
+
+ if (omap->usbhost1_48m_fck != NULL) {
+ clk_put(omap->usbhost1_48m_fck);
+ omap->usbhost1_48m_fck = NULL;
+ }
+
+ if (omap->usbhost2_120m_fck != NULL) {
+ clk_put(omap->usbhost2_120m_fck);
+ omap->usbhost2_120m_fck = NULL;
+ }
+
+ if (omap->usbtll_ick != NULL) {
+ clk_put(omap->usbtll_ick);
+ omap->usbtll_ick = NULL;
+ }
+
+ dev_dbg(omap->dev, "Clock to USB host has been disabled\n");
+}
+
+/*-------------------------------------------------------------------------*/
+
+static const struct hc_driver ohci_omap3_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "OMAP3 OHCI Host Controller",
+ .hcd_priv_size = sizeof(struct ohci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ohci_irq,
+ .flags = HCD_USB11 | HCD_MEMORY,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ohci_omap3_init,
+ .start = ohci_omap3_start,
+ .stop = ohci_stop,
+ .shutdown = ohci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ohci_urb_enqueue,
+ .urb_dequeue = ohci_urb_dequeue,
+ .endpoint_disable = ohci_endpoint_disable,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ohci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ohci_hub_status_data,
+ .hub_control = ohci_hub_control,
+#ifdef CONFIG_PM
+ .bus_suspend = ohci_bus_suspend,
+ .bus_resume = ohci_bus_resume,
+#endif
+ .start_port_reset = ohci_start_port_reset,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * configure so an HC device and id are always provided
+ * always called with process context; sleeping is OK
+ */
+
+/**
+ * ohci_hcd_omap3_probe - initialize OMAP-based HCDs
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ */
+static int __devinit ohci_hcd_omap3_probe(struct platform_device *pdev)
+{
+ struct ohci_hcd_omap_platform_data *pdata = pdev->dev.platform_data;
+ struct ohci_hcd_omap3 *omap;
+ struct resource *res;
+ struct usb_hcd *hcd;
+ int ret = -ENODEV;
+ int irq;
+
+ if (usb_disabled())
+ goto err_disabled;
+
+ if (!pdata) {
+ dev_dbg(&pdev->dev, "missing platform_data\n");
+ goto err_pdata;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+
+ omap = kzalloc(sizeof(*omap), GFP_KERNEL);
+ if (!omap) {
+ ret = -ENOMEM;
+ goto err_disabled;
+ }
+
+ hcd = usb_create_hcd(&ohci_omap3_hc_driver, &pdev->dev,
+ dev_name(&pdev->dev));
+ if (!hcd) {
+ ret = -ENOMEM;
+ goto err_create_hcd;
+ }
+
+ platform_set_drvdata(pdev, omap);
+ omap->dev = &pdev->dev;
+ omap->port_mode[0] = pdata->port_mode[0];
+ omap->port_mode[1] = pdata->port_mode[1];
+ omap->port_mode[2] = pdata->port_mode[2];
+ omap->es2_compatibility = pdata->es2_compatibility;
+ omap->ohci = hcd_to_ohci(hcd);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+
+ hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+ if (!hcd->regs) {
+ dev_err(&pdev->dev, "OHCI ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ omap->uhh_base = ioremap(res->start, resource_size(res));
+ if (!omap->uhh_base) {
+ dev_err(&pdev->dev, "UHH ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_uhh_ioremap;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ omap->tll_base = ioremap(res->start, resource_size(res));
+ if (!omap->tll_base) {
+ dev_err(&pdev->dev, "TLL ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_tll_ioremap;
+ }
+
+ ret = omap3_start_ohci(omap, hcd);
+ if (ret) {
+ dev_dbg(&pdev->dev, "failed to start ehci\n");
+ goto err_start;
+ }
+
+ ohci_hcd_init(omap->ohci);
+
+ ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
+ if (ret) {
+ dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
+ goto err_add_hcd;
+ }
+
+ return 0;
+
+err_add_hcd:
+ omap3_stop_ohci(omap, hcd);
+
+err_start:
+ iounmap(omap->tll_base);
+
+err_tll_ioremap:
+ iounmap(omap->uhh_base);
+
+err_uhh_ioremap:
+ iounmap(hcd->regs);
+
+err_ioremap:
+ usb_put_hcd(hcd);
+
+err_create_hcd:
+ kfree(omap);
+err_pdata:
+err_disabled:
+ return ret;
+}
+
+/*
+ * may be called without controller electrically present
+ * may be called with controller, bus, and devices active
+ */
+
+/**
+ * ohci_hcd_omap3_remove - shutdown processing for OHCI HCDs
+ * @pdev: USB Host Controller being removed
+ *
+ * Reverses the effect of ohci_hcd_omap3_probe(), first invoking
+ * the HCD's stop() method. It is always called from a thread
+ * context, normally "rmmod", "apmd", or something similar.
+ */
+static int __devexit ohci_hcd_omap3_remove(struct platform_device *pdev)
+{
+ struct ohci_hcd_omap3 *omap = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = ohci_to_hcd(omap->ohci);
+
+ usb_remove_hcd(hcd);
+ omap3_stop_ohci(omap, hcd);
+ iounmap(hcd->regs);
+ iounmap(omap->tll_base);
+ iounmap(omap->uhh_base);
+ usb_put_hcd(hcd);
+ kfree(omap);
+
+ return 0;
+}
+
+static void ohci_hcd_omap3_shutdown(struct platform_device *pdev)
+{
+ struct ohci_hcd_omap3 *omap = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = ohci_to_hcd(omap->ohci);
+
+ if (hcd->driver->shutdown)
+ hcd->driver->shutdown(hcd);
+}
+
+static struct platform_driver ohci_hcd_omap3_driver = {
+ .probe = ohci_hcd_omap3_probe,
+ .remove = __devexit_p(ohci_hcd_omap3_remove),
+ .shutdown = ohci_hcd_omap3_shutdown,
+ .driver = {
+ .name = "ohci-omap3",
+ },
+};
+
+MODULE_ALIAS("platform:ohci-omap3");
+MODULE_AUTHOR("Anand Gadiyar <gadiyar@ti.com>");
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index 103263c230c..df165917412 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -83,7 +83,7 @@ static const struct hc_driver ohci_ppc_of_hc_driver = {
static int __devinit
ohci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
{
- struct device_node *dn = op->node;
+ struct device_node *dn = op->dev.of_node;
struct usb_hcd *hcd;
struct ohci_hcd *ohci;
struct resource res;
@@ -244,18 +244,13 @@ MODULE_DEVICE_TABLE(of, ohci_hcd_ppc_of_match);
static struct of_platform_driver ohci_hcd_ppc_of_driver = {
- .name = "ppc-of-ohci",
- .match_table = ohci_hcd_ppc_of_match,
.probe = ohci_hcd_ppc_of_probe,
.remove = ohci_hcd_ppc_of_remove,
.shutdown = ohci_hcd_ppc_of_shutdown,
-#ifdef CONFIG_PM
- /*.suspend = ohci_hcd_ppc_soc_drv_suspend,*/
- /*.resume = ohci_hcd_ppc_soc_drv_resume,*/
-#endif
- .driver = {
- .name = "ppc-of-ohci",
- .owner = THIS_MODULE,
+ .driver = {
+ .name = "ppc-of-ohci",
+ .owner = THIS_MODULE,
+ .of_match_table = ohci_hcd_ppc_of_match,
},
};
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index e62b30b3e42..f608dfd09a8 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -34,12 +34,11 @@
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
-#include "../core/hcd.h"
-
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/unaligned.h>
@@ -3154,10 +3153,10 @@ static inline unsigned int oxu_port_speed(struct oxu_hcd *oxu,
case 0:
return 0;
case 1:
- return 1 << USB_PORT_FEAT_LOWSPEED;
+ return USB_PORT_STAT_LOW_SPEED;
case 2:
default:
- return 1 << USB_PORT_FEAT_HIGHSPEED;
+ return USB_PORT_STAT_HIGH_SPEED;
}
}
@@ -3202,7 +3201,7 @@ static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq,
* Even if OWNER is set, so the port is owned by the
* companion controller, khubd needs to be able to clear
* the port-change status bits (especially
- * USB_PORT_FEAT_C_CONNECTION).
+ * USB_PORT_STAT_C_CONNECTION).
*/
switch (wValue) {
@@ -3264,11 +3263,11 @@ static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq,
/* wPortChange bits */
if (temp & PORT_CSC)
- status |= 1 << USB_PORT_FEAT_C_CONNECTION;
+ status |= USB_PORT_STAT_C_CONNECTION << 16;
if (temp & PORT_PEC)
- status |= 1 << USB_PORT_FEAT_C_ENABLE;
+ status |= USB_PORT_STAT_C_ENABLE << 16;
if ((temp & PORT_OCC) && !ignore_oc)
- status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT;
+ status |= USB_PORT_STAT_C_OVERCURRENT << 16;
/* whoever resumes must GetPortStatus to complete it!! */
if (temp & PORT_RESUME) {
@@ -3286,7 +3285,7 @@ static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq,
/* resume completed? */
else if (time_after_eq(jiffies,
oxu->reset_done[wIndex])) {
- status |= 1 << USB_PORT_FEAT_C_SUSPEND;
+ status |= USB_PORT_STAT_C_SUSPEND << 16;
oxu->reset_done[wIndex] = 0;
/* stop resume signaling */
@@ -3309,7 +3308,7 @@ static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq,
if ((temp & PORT_RESET)
&& time_after_eq(jiffies,
oxu->reset_done[wIndex])) {
- status |= 1 << USB_PORT_FEAT_C_RESET;
+ status |= USB_PORT_STAT_C_RESET << 16;
oxu->reset_done[wIndex] = 0;
/* force reset to complete */
@@ -3348,20 +3347,20 @@ static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq,
*/
if (temp & PORT_CONNECT) {
- status |= 1 << USB_PORT_FEAT_CONNECTION;
+ status |= USB_PORT_STAT_CONNECTION;
/* status may be from integrated TT */
status |= oxu_port_speed(oxu, temp);
}
if (temp & PORT_PE)
- status |= 1 << USB_PORT_FEAT_ENABLE;
+ status |= USB_PORT_STAT_ENABLE;
if (temp & (PORT_SUSPEND|PORT_RESUME))
- status |= 1 << USB_PORT_FEAT_SUSPEND;
+ status |= USB_PORT_STAT_SUSPEND;
if (temp & PORT_OC)
- status |= 1 << USB_PORT_FEAT_OVER_CURRENT;
+ status |= USB_PORT_STAT_OVERCURRENT;
if (temp & PORT_RESET)
- status |= 1 << USB_PORT_FEAT_RESET;
+ status |= USB_PORT_STAT_RESET;
if (temp & PORT_POWER)
- status |= 1 << USB_PORT_FEAT_POWER;
+ status |= USB_PORT_STAT_POWER;
#ifndef OXU_VERBOSE_DEBUG
if (status & ~0xffff) /* only if wPortChange is interesting */
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index d478ffad59b..6db57ab6079 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -33,6 +33,7 @@
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/mm.h>
@@ -40,7 +41,6 @@
#include <linux/slab.h>
#include <asm/cacheflush.h>
-#include "../core/hcd.h"
#include "r8a66597.h"
MODULE_DESCRIPTION("R8A66597 USB Host Controller Driver");
@@ -1018,10 +1018,10 @@ static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port,
rh->old_syssts = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST;
rh->scount = R8A66597_MAX_SAMPLING;
if (connect)
- rh->port |= 1 << USB_PORT_FEAT_CONNECTION;
+ rh->port |= USB_PORT_STAT_CONNECTION;
else
- rh->port &= ~(1 << USB_PORT_FEAT_CONNECTION);
- rh->port |= 1 << USB_PORT_FEAT_C_CONNECTION;
+ rh->port &= ~USB_PORT_STAT_CONNECTION;
+ rh->port |= USB_PORT_STAT_C_CONNECTION << 16;
r8a66597_root_hub_start_polling(r8a66597);
}
@@ -1059,15 +1059,14 @@ static void r8a66597_usb_connect(struct r8a66597 *r8a66597, int port)
u16 speed = get_rh_usb_speed(r8a66597, port);
struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
- rh->port &= ~((1 << USB_PORT_FEAT_HIGHSPEED) |
- (1 << USB_PORT_FEAT_LOWSPEED));
+ rh->port &= ~(USB_PORT_STAT_HIGH_SPEED | USB_PORT_STAT_LOW_SPEED);
if (speed == HSMODE)
- rh->port |= (1 << USB_PORT_FEAT_HIGHSPEED);
+ rh->port |= USB_PORT_STAT_HIGH_SPEED;
else if (speed == LSMODE)
- rh->port |= (1 << USB_PORT_FEAT_LOWSPEED);
+ rh->port |= USB_PORT_STAT_LOW_SPEED;
- rh->port &= ~(1 << USB_PORT_FEAT_RESET);
- rh->port |= 1 << USB_PORT_FEAT_ENABLE;
+ rh->port &= USB_PORT_STAT_RESET;
+ rh->port |= USB_PORT_STAT_ENABLE;
}
/* this function must be called with interrupt disabled */
@@ -1706,7 +1705,7 @@ static void r8a66597_root_hub_control(struct r8a66597 *r8a66597, int port)
u16 tmp;
struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
- if (rh->port & (1 << USB_PORT_FEAT_RESET)) {
+ if (rh->port & USB_PORT_STAT_RESET) {
unsigned long dvstctr_reg = get_dvstctr_reg(port);
tmp = r8a66597_read(r8a66597, dvstctr_reg);
@@ -1718,7 +1717,7 @@ static void r8a66597_root_hub_control(struct r8a66597 *r8a66597, int port)
r8a66597_usb_connect(r8a66597, port);
}
- if (!(rh->port & (1 << USB_PORT_FEAT_CONNECTION))) {
+ if (!(rh->port & USB_PORT_STAT_CONNECTION)) {
r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port));
r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port));
}
@@ -2186,7 +2185,7 @@ static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
- rh->port &= ~(1 << USB_PORT_FEAT_POWER);
+ rh->port &= ~USB_PORT_STAT_POWER;
break;
case USB_PORT_FEAT_SUSPEND:
break;
@@ -2227,12 +2226,12 @@ static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
break;
case USB_PORT_FEAT_POWER:
r8a66597_port_power(r8a66597, port, 1);
- rh->port |= (1 << USB_PORT_FEAT_POWER);
+ rh->port |= USB_PORT_STAT_POWER;
break;
case USB_PORT_FEAT_RESET: {
struct r8a66597_device *dev = rh->dev;
- rh->port |= (1 << USB_PORT_FEAT_RESET);
+ rh->port |= USB_PORT_STAT_RESET;
disable_r8a66597_pipe_all(r8a66597, dev);
free_usb_address(r8a66597, dev, 1);
@@ -2270,12 +2269,12 @@ static int r8a66597_bus_suspend(struct usb_hcd *hcd)
struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
unsigned long dvstctr_reg = get_dvstctr_reg(port);
- if (!(rh->port & (1 << USB_PORT_FEAT_ENABLE)))
+ if (!(rh->port & USB_PORT_STAT_ENABLE))
continue;
dbg("suspend port = %d", port);
r8a66597_bclr(r8a66597, UACT, dvstctr_reg); /* suspend */
- rh->port |= 1 << USB_PORT_FEAT_SUSPEND;
+ rh->port |= USB_PORT_STAT_SUSPEND;
if (rh->dev->udev->do_remote_wakeup) {
msleep(3); /* waiting last SOF */
@@ -2301,12 +2300,12 @@ static int r8a66597_bus_resume(struct usb_hcd *hcd)
struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
unsigned long dvstctr_reg = get_dvstctr_reg(port);
- if (!(rh->port & (1 << USB_PORT_FEAT_SUSPEND)))
+ if (!(rh->port & USB_PORT_STAT_SUSPEND))
continue;
dbg("resume port = %d", port);
- rh->port &= ~(1 << USB_PORT_FEAT_SUSPEND);
- rh->port |= 1 << USB_PORT_FEAT_C_SUSPEND;
+ rh->port &= ~USB_PORT_STAT_SUSPEND;
+ rh->port |= USB_PORT_STAT_C_SUSPEND < 16;
r8a66597_mdfy(r8a66597, RESUME, RESUME | UACT, dvstctr_reg);
msleep(50);
r8a66597_mdfy(r8a66597, UACT, RESUME | UACT, dvstctr_reg);
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 3b867a8af7b..bcf9f0e809d 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -45,6 +45,7 @@
#include <linux/interrupt.h>
#include <linux/usb.h>
#include <linux/usb/sl811.h>
+#include <linux/usb/hcd.h>
#include <linux/platform_device.h>
#include <asm/io.h>
@@ -53,7 +54,6 @@
#include <asm/byteorder.h>
#include <asm/unaligned.h>
-#include "../core/hcd.h"
#include "sl811.h"
@@ -90,10 +90,10 @@ static void port_power(struct sl811 *sl811, int is_on)
/* hub is inactive unless the port is powered */
if (is_on) {
- if (sl811->port1 & (1 << USB_PORT_FEAT_POWER))
+ if (sl811->port1 & USB_PORT_STAT_POWER)
return;
- sl811->port1 = (1 << USB_PORT_FEAT_POWER);
+ sl811->port1 = USB_PORT_STAT_POWER;
sl811->irq_enable = SL11H_INTMASK_INSRMV;
} else {
sl811->port1 = 0;
@@ -407,7 +407,7 @@ static struct sl811h_ep *start(struct sl811 *sl811, u8 bank)
static inline void start_transfer(struct sl811 *sl811)
{
- if (sl811->port1 & (1 << USB_PORT_FEAT_SUSPEND))
+ if (sl811->port1 & USB_PORT_STAT_SUSPEND)
return;
if (sl811->active_a == NULL) {
sl811->active_a = start(sl811, SL811_EP_A(SL811_HOST_BUF));
@@ -721,23 +721,23 @@ retry:
* force the reset and make khubd clean up later.
*/
if (irqstat & SL11H_INTMASK_RD)
- sl811->port1 &= ~(1 << USB_PORT_FEAT_CONNECTION);
+ sl811->port1 &= ~USB_PORT_STAT_CONNECTION;
else
- sl811->port1 |= 1 << USB_PORT_FEAT_CONNECTION;
+ sl811->port1 |= USB_PORT_STAT_CONNECTION;
- sl811->port1 |= 1 << USB_PORT_FEAT_C_CONNECTION;
+ sl811->port1 |= USB_PORT_STAT_C_CONNECTION << 16;
} else if (irqstat & SL11H_INTMASK_RD) {
- if (sl811->port1 & (1 << USB_PORT_FEAT_SUSPEND)) {
+ if (sl811->port1 & USB_PORT_STAT_SUSPEND) {
DBG("wakeup\n");
- sl811->port1 |= 1 << USB_PORT_FEAT_C_SUSPEND;
+ sl811->port1 |= USB_PORT_STAT_C_SUSPEND << 16;
sl811->stat_wake++;
} else
irqstat &= ~SL11H_INTMASK_RD;
}
if (irqstat) {
- if (sl811->port1 & (1 << USB_PORT_FEAT_ENABLE))
+ if (sl811->port1 & USB_PORT_STAT_ENABLE)
start_transfer(sl811);
ret = IRQ_HANDLED;
if (retries--)
@@ -819,7 +819,7 @@ static int sl811h_urb_enqueue(
spin_lock_irqsave(&sl811->lock, flags);
/* don't submit to a dead or disabled port */
- if (!(sl811->port1 & (1 << USB_PORT_FEAT_ENABLE))
+ if (!(sl811->port1 & USB_PORT_STAT_ENABLE)
|| !HC_IS_RUNNING(hcd->state)) {
retval = -ENODEV;
kfree(ep);
@@ -1119,9 +1119,9 @@ sl811h_timer(unsigned long _sl811)
unsigned long flags;
u8 irqstat;
u8 signaling = sl811->ctrl1 & SL11H_CTL1MASK_FORCE;
- const u32 mask = (1 << USB_PORT_FEAT_CONNECTION)
- | (1 << USB_PORT_FEAT_ENABLE)
- | (1 << USB_PORT_FEAT_LOWSPEED);
+ const u32 mask = USB_PORT_STAT_CONNECTION
+ | USB_PORT_STAT_ENABLE
+ | USB_PORT_STAT_LOW_SPEED;
spin_lock_irqsave(&sl811->lock, flags);
@@ -1135,8 +1135,8 @@ sl811h_timer(unsigned long _sl811)
switch (signaling) {
case SL11H_CTL1MASK_SE0:
DBG("end reset\n");
- sl811->port1 = (1 << USB_PORT_FEAT_C_RESET)
- | (1 << USB_PORT_FEAT_POWER);
+ sl811->port1 = (USB_PORT_STAT_C_RESET << 16)
+ | USB_PORT_STAT_POWER;
sl811->ctrl1 = 0;
/* don't wrongly ack RD */
if (irqstat & SL11H_INTMASK_INSRMV)
@@ -1144,7 +1144,7 @@ sl811h_timer(unsigned long _sl811)
break;
case SL11H_CTL1MASK_K:
DBG("end resume\n");
- sl811->port1 &= ~(1 << USB_PORT_FEAT_SUSPEND);
+ sl811->port1 &= ~USB_PORT_STAT_SUSPEND;
break;
default:
DBG("odd timer signaling: %02x\n", signaling);
@@ -1154,26 +1154,26 @@ sl811h_timer(unsigned long _sl811)
if (irqstat & SL11H_INTMASK_RD) {
/* usbcore nukes all pending transactions on disconnect */
- if (sl811->port1 & (1 << USB_PORT_FEAT_CONNECTION))
- sl811->port1 |= (1 << USB_PORT_FEAT_C_CONNECTION)
- | (1 << USB_PORT_FEAT_C_ENABLE);
+ if (sl811->port1 & USB_PORT_STAT_CONNECTION)
+ sl811->port1 |= (USB_PORT_STAT_C_CONNECTION << 16)
+ | (USB_PORT_STAT_C_ENABLE << 16);
sl811->port1 &= ~mask;
sl811->irq_enable = SL11H_INTMASK_INSRMV;
} else {
sl811->port1 |= mask;
if (irqstat & SL11H_INTMASK_DP)
- sl811->port1 &= ~(1 << USB_PORT_FEAT_LOWSPEED);
+ sl811->port1 &= ~USB_PORT_STAT_LOW_SPEED;
sl811->irq_enable = SL11H_INTMASK_INSRMV | SL11H_INTMASK_RD;
}
- if (sl811->port1 & (1 << USB_PORT_FEAT_CONNECTION)) {
+ if (sl811->port1 & USB_PORT_STAT_CONNECTION) {
u8 ctrl2 = SL811HS_CTL2_INIT;
sl811->irq_enable |= SL11H_INTMASK_DONE_A;
#ifdef USE_B
sl811->irq_enable |= SL11H_INTMASK_DONE_B;
#endif
- if (sl811->port1 & (1 << USB_PORT_FEAT_LOWSPEED)) {
+ if (sl811->port1 & USB_PORT_STAT_LOW_SPEED) {
sl811->ctrl1 |= SL11H_CTL1MASK_LSPD;
ctrl2 |= SL811HS_CTL2MASK_DSWAP;
}
@@ -1233,7 +1233,7 @@ sl811h_hub_control(
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
- sl811->port1 &= (1 << USB_PORT_FEAT_POWER);
+ sl811->port1 &= USB_PORT_STAT_POWER;
sl811->ctrl1 = 0;
sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
sl811->irq_enable = SL11H_INTMASK_INSRMV;
@@ -1241,7 +1241,7 @@ sl811h_hub_control(
sl811->irq_enable);
break;
case USB_PORT_FEAT_SUSPEND:
- if (!(sl811->port1 & (1 << USB_PORT_FEAT_SUSPEND)))
+ if (!(sl811->port1 & USB_PORT_STAT_SUSPEND))
break;
/* 20 msec of resume/K signaling, other irqs blocked */
@@ -1290,9 +1290,9 @@ sl811h_hub_control(
goto error;
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
- if (sl811->port1 & (1 << USB_PORT_FEAT_RESET))
+ if (sl811->port1 & USB_PORT_STAT_RESET)
goto error;
- if (!(sl811->port1 & (1 << USB_PORT_FEAT_ENABLE)))
+ if (!(sl811->port1 & USB_PORT_STAT_ENABLE))
goto error;
DBG("suspend...\n");
@@ -1303,9 +1303,9 @@ sl811h_hub_control(
port_power(sl811, 1);
break;
case USB_PORT_FEAT_RESET:
- if (sl811->port1 & (1 << USB_PORT_FEAT_SUSPEND))
+ if (sl811->port1 & USB_PORT_STAT_SUSPEND)
goto error;
- if (!(sl811->port1 & (1 << USB_PORT_FEAT_POWER)))
+ if (!(sl811->port1 & USB_PORT_STAT_POWER))
break;
/* 50 msec of reset/SE0 signaling, irqs blocked */
@@ -1314,7 +1314,7 @@ sl811h_hub_control(
sl811->irq_enable);
sl811->ctrl1 = SL11H_CTL1MASK_SE0;
sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
- sl811->port1 |= (1 << USB_PORT_FEAT_RESET);
+ sl811->port1 |= USB_PORT_STAT_RESET;
mod_timer(&sl811->timer, jiffies
+ msecs_to_jiffies(50));
break;
diff --git a/drivers/usb/host/sl811_cs.c b/drivers/usb/host/sl811_cs.c
index 39d253e841f..58cb73c8420 100644
--- a/drivers/usb/host/sl811_cs.c
+++ b/drivers/usb/host/sl811_cs.c
@@ -47,7 +47,6 @@ static const char driver_name[DEV_NAME_LEN] = "sl811_cs";
typedef struct local_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
} local_info_t;
static void sl811_cs_release(struct pcmcia_device * link);
@@ -163,8 +162,7 @@ static int sl811_cs_config_check(struct pcmcia_device *p_dev,
dflt->vpp1.param[CISTPL_POWER_VNOM]/10000;
/* we need an interrupt */
- if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
+ p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -186,7 +184,6 @@ static int sl811_cs_config_check(struct pcmcia_device *p_dev,
static int sl811_cs_config(struct pcmcia_device *link)
{
struct device *parent = &link->dev;
- local_info_t *dev = link->priv;
int ret;
dev_dbg(&link->dev, "sl811_cs_config\n");
@@ -197,31 +194,24 @@ static int sl811_cs_config(struct pcmcia_device *link)
/* require an IRQ and two registers */
if (!link->io.NumPorts1 || link->io.NumPorts1 < 2)
goto failed;
- if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
- goto failed;
- } else
+
+ if (!link->irq)
goto failed;
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
goto failed;
- sprintf(dev->node.dev_name, driver_name);
- dev->node.major = dev->node.minor = 0;
- link->dev_node = &dev->node;
-
- printk(KERN_INFO "%s: index 0x%02x: ",
- dev->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x: ",
+ link->conf.ConfigIndex);
if (link->conf.Vpp)
printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10);
- printk(", irq %d", link->irq.AssignedIRQ);
+ printk(", irq %d", link->irq);
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1+link->io.NumPorts1-1);
printk("\n");
- if (sl811_hc_init(parent, link->io.BasePort1, link->irq.AssignedIRQ)
+ if (sl811_hc_init(parent, link->io.BasePort1, link->irq)
< 0) {
failed:
printk(KERN_WARNING "sl811_cs_config failed\n");
@@ -241,10 +231,6 @@ static int sl811_cs_probe(struct pcmcia_device *link)
local->p_dev = link;
link->priv = local;
- /* Initialize */
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
- link->irq.Handler = NULL;
-
link->conf.Attributes = 0;
link->conf.IntType = INT_MEMORY_AND_IO;
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 228f2b070f2..5b31bae92db 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -49,6 +49,7 @@
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <linux/workqueue.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
@@ -56,7 +57,6 @@
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/byteorder.h>
-#include "../core/hcd.h"
/* FIXME ohci.h is ONLY for internal use by the OHCI driver.
* If you're going to try stuff like this, you need to split
@@ -1446,9 +1446,9 @@ static void u132_hcd_endp_work_scheduler(struct work_struct *work)
return;
} else {
int retval;
- u8 address = u132->addr[endp->usb_addr].address;
struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK &
endp->queue_next];
+ address = u132->addr[endp->usb_addr].address;
endp->active = 1;
ring->curr_endp = endp;
ring->in_use = 1;
@@ -3120,8 +3120,8 @@ static int __devinit u132_probe(struct platform_device *pdev)
ftdi_elan_gone_away(pdev);
return -ENOMEM;
} else {
- int retval = 0;
struct u132 *u132 = hcd_to_u132(hcd);
+ retval = 0;
hcd->rsrc_start = 0;
mutex_lock(&u132_module_lock);
list_add_tail(&u132->u132_list, &u132_static_list);
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 09197067fe6..6637e52736d 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -38,6 +38,7 @@
#include <linux/dmapool.h>
#include <linux/dma-mapping.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <linux/bitops.h>
#include <linux/dmi.h>
@@ -46,7 +47,6 @@
#include <asm/irq.h>
#include <asm/system.h>
-#include "../core/hcd.h"
#include "uhci-hcd.h"
#include "pci-quirks.h"
diff --git a/drivers/usb/host/whci/debug.c b/drivers/usb/host/whci/debug.c
index c5305b599ca..767af265e00 100644
--- a/drivers/usb/host/whci/debug.c
+++ b/drivers/usb/host/whci/debug.c
@@ -30,7 +30,7 @@ struct whc_dbg {
struct dentry *pzl_f;
};
-void qset_print(struct seq_file *s, struct whc_qset *qset)
+static void qset_print(struct seq_file *s, struct whc_qset *qset)
{
static const char *qh_type[] = {
"ctrl", "isoc", "bulk", "intr", "rsvd", "rsvd", "rsvd", "lpintr", };
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
index 141d049beb3..ab5a14fbfee 100644
--- a/drivers/usb/host/whci/qset.c
+++ b/drivers/usb/host/whci/qset.c
@@ -443,7 +443,7 @@ static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *u
remaining = urb->transfer_buffer_length;
- for_each_sg(urb->sg->sg, sg, urb->num_sgs, i) {
+ for_each_sg(urb->sg, sg, urb->num_sgs, i) {
dma_addr_t dma_addr;
size_t dma_remaining;
dma_addr_t sp, ep;
@@ -561,7 +561,7 @@ static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
remaining = urb->transfer_buffer_length;
- for_each_sg(urb->sg->sg, sg, urb->sg->nents, i) {
+ for_each_sg(urb->sg, sg, urb->num_sgs, i) {
size_t len;
size_t sg_remaining;
void *orig;
@@ -646,7 +646,7 @@ int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
wurb->urb = urb;
INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
- if (urb->sg) {
+ if (urb->num_sgs) {
ret = qset_add_urb_sg(whc, qset, urb, mem_flags);
if (ret == -EINVAL) {
qset_free_stds(qset, urb);
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 105fa8b025b..fcbf4abbf38 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -364,6 +364,30 @@ void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring)
xhci_debug_segment(xhci, seg);
}
+void xhci_dbg_ep_rings(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ struct xhci_virt_ep *ep)
+{
+ int i;
+ struct xhci_ring *ring;
+
+ if (ep->ep_state & EP_HAS_STREAMS) {
+ for (i = 1; i < ep->stream_info->num_streams; i++) {
+ ring = ep->stream_info->stream_rings[i];
+ xhci_dbg(xhci, "Dev %d endpoint %d stream ID %d:\n",
+ slot_id, ep_index, i);
+ xhci_debug_segment(xhci, ring->deq_seg);
+ }
+ } else {
+ ring = ep->ring;
+ if (!ring)
+ return;
+ xhci_dbg(xhci, "Dev %d endpoint ring %d:\n",
+ slot_id, ep_index);
+ xhci_debug_segment(xhci, ring->deq_seg);
+ }
+}
+
void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
{
u32 addr = (u32) erst->erst_dma_addr;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 208b805b80e..a1a7a979553 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -64,15 +64,15 @@ static void xhci_hub_descriptor(struct xhci_hcd *xhci,
static unsigned int xhci_port_speed(unsigned int port_status)
{
if (DEV_LOWSPEED(port_status))
- return 1 << USB_PORT_FEAT_LOWSPEED;
+ return USB_PORT_STAT_LOW_SPEED;
if (DEV_HIGHSPEED(port_status))
- return 1 << USB_PORT_FEAT_HIGHSPEED;
+ return USB_PORT_STAT_HIGH_SPEED;
if (DEV_SUPERSPEED(port_status))
- return 1 << USB_PORT_FEAT_SUPERSPEED;
+ return USB_PORT_STAT_SUPER_SPEED;
/*
* FIXME: Yes, we should check for full speed, but the core uses that as
* a default in portspeed() in usb/core/hub.c (which is the only place
- * USB_PORT_FEAT_*SPEED is used).
+ * USB_PORT_STAT_*_SPEED is used).
*/
return 0;
}
@@ -205,27 +205,27 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
/* wPortChange bits */
if (temp & PORT_CSC)
- status |= 1 << USB_PORT_FEAT_C_CONNECTION;
+ status |= USB_PORT_STAT_C_CONNECTION << 16;
if (temp & PORT_PEC)
- status |= 1 << USB_PORT_FEAT_C_ENABLE;
+ status |= USB_PORT_STAT_C_ENABLE << 16;
if ((temp & PORT_OCC))
- status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT;
+ status |= USB_PORT_STAT_C_OVERCURRENT << 16;
/*
* FIXME ignoring suspend, reset, and USB 2.1/3.0 specific
* changes
*/
if (temp & PORT_CONNECT) {
- status |= 1 << USB_PORT_FEAT_CONNECTION;
+ status |= USB_PORT_STAT_CONNECTION;
status |= xhci_port_speed(temp);
}
if (temp & PORT_PE)
- status |= 1 << USB_PORT_FEAT_ENABLE;
+ status |= USB_PORT_STAT_ENABLE;
if (temp & PORT_OC)
- status |= 1 << USB_PORT_FEAT_OVER_CURRENT;
+ status |= USB_PORT_STAT_OVERCURRENT;
if (temp & PORT_RESET)
- status |= 1 << USB_PORT_FEAT_RESET;
+ status |= USB_PORT_STAT_RESET;
if (temp & PORT_POWER)
- status |= 1 << USB_PORT_FEAT_POWER;
+ status |= USB_PORT_STAT_POWER;
xhci_dbg(xhci, "Get port status returned 0x%x\n", status);
put_unaligned(cpu_to_le32(status), (__le32 *) buf);
break;
@@ -298,7 +298,6 @@ error:
* Returns 0 if the status hasn't changed, or the number of bytes in buf.
* Ports are 0-indexed from the HCD point of view,
* and 1-indexed from the USB core pointer of view.
- * xHCI instances can have up to 127 ports, so FIXME if you see more than 15.
*
* Note that the status change bits will be cleared as soon as a port status
* change event is generated, so we use the saved status from that event.
@@ -315,14 +314,9 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
ports = HCS_MAX_PORTS(xhci->hcs_params1);
/* Initial status is no changes */
- buf[0] = 0;
+ retval = (ports + 8) / 8;
+ memset(buf, 0, retval);
status = 0;
- if (ports > 7) {
- buf[1] = 0;
- retval = 2;
- } else {
- retval = 1;
- }
spin_lock_irqsave(&xhci->lock, flags);
/* For each port, did anything change? If so, set that bit in buf. */
@@ -331,10 +325,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
NUM_PORT_REGS*i;
temp = xhci_readl(xhci, addr);
if (temp & (PORT_CSC | PORT_PEC | PORT_OCC)) {
- if (i < 7)
- buf[0] |= 1 << (i + 1);
- else
- buf[1] |= 1 << (i - 7);
+ buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
status = 1;
}
}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index d64f5724bfc..fd9e03afd91 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -41,13 +41,13 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flag
seg = kzalloc(sizeof *seg, flags);
if (!seg)
- return 0;
+ return NULL;
xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg);
seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
if (!seg->trbs) {
kfree(seg);
- return 0;
+ return NULL;
}
xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
seg->trbs, (unsigned long long)dma);
@@ -159,7 +159,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
ring = kzalloc(sizeof *(ring), flags);
xhci_dbg(xhci, "Allocating ring at %p\n", ring);
if (!ring)
- return 0;
+ return NULL;
INIT_LIST_HEAD(&ring->td_list);
if (num_segs == 0)
@@ -196,7 +196,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
fail:
xhci_ring_free(xhci, ring);
- return 0;
+ return NULL;
}
void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
@@ -247,7 +247,7 @@ static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
-struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
+static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
int type, gfp_t flags)
{
struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
@@ -265,7 +265,7 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
return ctx;
}
-void xhci_free_container_ctx(struct xhci_hcd *xhci,
+static void xhci_free_container_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx)
{
if (!ctx)
@@ -304,6 +304,422 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
(ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
}
+
+/***************** Streams structures manipulation *************************/
+
+void xhci_free_stream_ctx(struct xhci_hcd *xhci,
+ unsigned int num_stream_ctxs,
+ struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
+{
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+
+ if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
+ pci_free_consistent(pdev,
+ sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
+ stream_ctx, dma);
+ else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
+ return dma_pool_free(xhci->small_streams_pool,
+ stream_ctx, dma);
+ else
+ return dma_pool_free(xhci->medium_streams_pool,
+ stream_ctx, dma);
+}
+
+/*
+ * The stream context array for each endpoint with bulk streams enabled can
+ * vary in size, based on:
+ * - how many streams the endpoint supports,
+ * - the maximum primary stream array size the host controller supports,
+ * - and how many streams the device driver asks for.
+ *
+ * The stream context array must be a power of 2, and can be as small as
+ * 64 bytes or as large as 1MB.
+ */
+struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
+ unsigned int num_stream_ctxs, dma_addr_t *dma,
+ gfp_t mem_flags)
+{
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+
+ if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
+ return pci_alloc_consistent(pdev,
+ sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
+ dma);
+ else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
+ return dma_pool_alloc(xhci->small_streams_pool,
+ mem_flags, dma);
+ else
+ return dma_pool_alloc(xhci->medium_streams_pool,
+ mem_flags, dma);
+}
+
+struct xhci_ring *xhci_dma_to_transfer_ring(
+ struct xhci_virt_ep *ep,
+ u64 address)
+{
+ if (ep->ep_state & EP_HAS_STREAMS)
+ return radix_tree_lookup(&ep->stream_info->trb_address_map,
+ address >> SEGMENT_SHIFT);
+ return ep->ring;
+}
+
+/* Only use this when you know stream_info is valid */
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+static struct xhci_ring *dma_to_stream_ring(
+ struct xhci_stream_info *stream_info,
+ u64 address)
+{
+ return radix_tree_lookup(&stream_info->trb_address_map,
+ address >> SEGMENT_SHIFT);
+}
+#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
+
+struct xhci_ring *xhci_stream_id_to_ring(
+ struct xhci_virt_device *dev,
+ unsigned int ep_index,
+ unsigned int stream_id)
+{
+ struct xhci_virt_ep *ep = &dev->eps[ep_index];
+
+ if (stream_id == 0)
+ return ep->ring;
+ if (!ep->stream_info)
+ return NULL;
+
+ if (stream_id > ep->stream_info->num_streams)
+ return NULL;
+ return ep->stream_info->stream_rings[stream_id];
+}
+
+struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ unsigned int stream_id)
+{
+ struct xhci_virt_ep *ep;
+
+ ep = &xhci->devs[slot_id]->eps[ep_index];
+ /* Common case: no streams */
+ if (!(ep->ep_state & EP_HAS_STREAMS))
+ return ep->ring;
+
+ if (stream_id == 0) {
+ xhci_warn(xhci,
+ "WARN: Slot ID %u, ep index %u has streams, "
+ "but URB has no stream ID.\n",
+ slot_id, ep_index);
+ return NULL;
+ }
+
+ if (stream_id < ep->stream_info->num_streams)
+ return ep->stream_info->stream_rings[stream_id];
+
+ xhci_warn(xhci,
+ "WARN: Slot ID %u, ep index %u has "
+ "stream IDs 1 to %u allocated, "
+ "but stream ID %u is requested.\n",
+ slot_id, ep_index,
+ ep->stream_info->num_streams - 1,
+ stream_id);
+ return NULL;
+}
+
+/* Get the right ring for the given URB.
+ * If the endpoint supports streams, boundary check the URB's stream ID.
+ * If the endpoint doesn't support streams, return the singular endpoint ring.
+ */
+struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
+ struct urb *urb)
+{
+ return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
+ xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
+}
+
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+static int xhci_test_radix_tree(struct xhci_hcd *xhci,
+ unsigned int num_streams,
+ struct xhci_stream_info *stream_info)
+{
+ u32 cur_stream;
+ struct xhci_ring *cur_ring;
+ u64 addr;
+
+ for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
+ struct xhci_ring *mapped_ring;
+ int trb_size = sizeof(union xhci_trb);
+
+ cur_ring = stream_info->stream_rings[cur_stream];
+ for (addr = cur_ring->first_seg->dma;
+ addr < cur_ring->first_seg->dma + SEGMENT_SIZE;
+ addr += trb_size) {
+ mapped_ring = dma_to_stream_ring(stream_info, addr);
+ if (cur_ring != mapped_ring) {
+ xhci_warn(xhci, "WARN: DMA address 0x%08llx "
+ "didn't map to stream ID %u; "
+ "mapped to ring %p\n",
+ (unsigned long long) addr,
+ cur_stream,
+ mapped_ring);
+ return -EINVAL;
+ }
+ }
+ /* One TRB after the end of the ring segment shouldn't return a
+ * pointer to the current ring (although it may be a part of a
+ * different ring).
+ */
+ mapped_ring = dma_to_stream_ring(stream_info, addr);
+ if (mapped_ring != cur_ring) {
+ /* One TRB before should also fail */
+ addr = cur_ring->first_seg->dma - trb_size;
+ mapped_ring = dma_to_stream_ring(stream_info, addr);
+ }
+ if (mapped_ring == cur_ring) {
+ xhci_warn(xhci, "WARN: Bad DMA address 0x%08llx "
+ "mapped to valid stream ID %u; "
+ "mapped ring = %p\n",
+ (unsigned long long) addr,
+ cur_stream,
+ mapped_ring);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
+
+/*
+ * Change an endpoint's internal structure so it supports stream IDs. The
+ * number of requested streams includes stream 0, which cannot be used by device
+ * drivers.
+ *
+ * The number of stream contexts in the stream context array may be bigger than
+ * the number of streams the driver wants to use. This is because the number of
+ * stream context array entries must be a power of two.
+ *
+ * We need a radix tree for mapping physical addresses of TRBs to which stream
+ * ID they belong to. We need to do this because the host controller won't tell
+ * us which stream ring the TRB came from. We could store the stream ID in an
+ * event data TRB, but that doesn't help us for the cancellation case, since the
+ * endpoint may stop before it reaches that event data TRB.
+ *
+ * The radix tree maps the upper portion of the TRB DMA address to a ring
+ * segment that has the same upper portion of DMA addresses. For example, say I
+ * have segments of size 1KB, that are always 64-byte aligned. A segment may
+ * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
+ * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
+ * pass the radix tree a key to get the right stream ID:
+ *
+ * 0x10c90fff >> 10 = 0x43243
+ * 0x10c912c0 >> 10 = 0x43244
+ * 0x10c91400 >> 10 = 0x43245
+ *
+ * Obviously, only those TRBs with DMA addresses that are within the segment
+ * will make the radix tree return the stream ID for that ring.
+ *
+ * Caveats for the radix tree:
+ *
+ * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
+ * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
+ * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
+ * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
+ * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
+ * extended systems (where the DMA address can be bigger than 32-bits),
+ * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
+ */
+struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
+ unsigned int num_stream_ctxs,
+ unsigned int num_streams, gfp_t mem_flags)
+{
+ struct xhci_stream_info *stream_info;
+ u32 cur_stream;
+ struct xhci_ring *cur_ring;
+ unsigned long key;
+ u64 addr;
+ int ret;
+
+ xhci_dbg(xhci, "Allocating %u streams and %u "
+ "stream context array entries.\n",
+ num_streams, num_stream_ctxs);
+ if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
+ xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
+ return NULL;
+ }
+ xhci->cmd_ring_reserved_trbs++;
+
+ stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags);
+ if (!stream_info)
+ goto cleanup_trbs;
+
+ stream_info->num_streams = num_streams;
+ stream_info->num_stream_ctxs = num_stream_ctxs;
+
+ /* Initialize the array of virtual pointers to stream rings. */
+ stream_info->stream_rings = kzalloc(
+ sizeof(struct xhci_ring *)*num_streams,
+ mem_flags);
+ if (!stream_info->stream_rings)
+ goto cleanup_info;
+
+ /* Initialize the array of DMA addresses for stream rings for the HW. */
+ stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
+ num_stream_ctxs, &stream_info->ctx_array_dma,
+ mem_flags);
+ if (!stream_info->stream_ctx_array)
+ goto cleanup_ctx;
+ memset(stream_info->stream_ctx_array, 0,
+ sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
+
+ /* Allocate everything needed to free the stream rings later */
+ stream_info->free_streams_command =
+ xhci_alloc_command(xhci, true, true, mem_flags);
+ if (!stream_info->free_streams_command)
+ goto cleanup_ctx;
+
+ INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
+
+ /* Allocate rings for all the streams that the driver will use,
+ * and add their segment DMA addresses to the radix tree.
+ * Stream 0 is reserved.
+ */
+ for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
+ stream_info->stream_rings[cur_stream] =
+ xhci_ring_alloc(xhci, 1, true, mem_flags);
+ cur_ring = stream_info->stream_rings[cur_stream];
+ if (!cur_ring)
+ goto cleanup_rings;
+ cur_ring->stream_id = cur_stream;
+ /* Set deq ptr, cycle bit, and stream context type */
+ addr = cur_ring->first_seg->dma |
+ SCT_FOR_CTX(SCT_PRI_TR) |
+ cur_ring->cycle_state;
+ stream_info->stream_ctx_array[cur_stream].stream_ring = addr;
+ xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
+ cur_stream, (unsigned long long) addr);
+
+ key = (unsigned long)
+ (cur_ring->first_seg->dma >> SEGMENT_SHIFT);
+ ret = radix_tree_insert(&stream_info->trb_address_map,
+ key, cur_ring);
+ if (ret) {
+ xhci_ring_free(xhci, cur_ring);
+ stream_info->stream_rings[cur_stream] = NULL;
+ goto cleanup_rings;
+ }
+ }
+ /* Leave the other unused stream ring pointers in the stream context
+ * array initialized to zero. This will cause the xHC to give us an
+ * error if the device asks for a stream ID we don't have setup (if it
+ * was any other way, the host controller would assume the ring is
+ * "empty" and wait forever for data to be queued to that stream ID).
+ */
+#if XHCI_DEBUG
+ /* Do a little test on the radix tree to make sure it returns the
+ * correct values.
+ */
+ if (xhci_test_radix_tree(xhci, num_streams, stream_info))
+ goto cleanup_rings;
+#endif
+
+ return stream_info;
+
+cleanup_rings:
+ for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
+ cur_ring = stream_info->stream_rings[cur_stream];
+ if (cur_ring) {
+ addr = cur_ring->first_seg->dma;
+ radix_tree_delete(&stream_info->trb_address_map,
+ addr >> SEGMENT_SHIFT);
+ xhci_ring_free(xhci, cur_ring);
+ stream_info->stream_rings[cur_stream] = NULL;
+ }
+ }
+ xhci_free_command(xhci, stream_info->free_streams_command);
+cleanup_ctx:
+ kfree(stream_info->stream_rings);
+cleanup_info:
+ kfree(stream_info);
+cleanup_trbs:
+ xhci->cmd_ring_reserved_trbs--;
+ return NULL;
+}
+/*
+ * Sets the MaxPStreams field and the Linear Stream Array field.
+ * Sets the dequeue pointer to the stream context array.
+ */
+void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
+ struct xhci_ep_ctx *ep_ctx,
+ struct xhci_stream_info *stream_info)
+{
+ u32 max_primary_streams;
+ /* MaxPStreams is the number of stream context array entries, not the
+ * number we're actually using. Must be in 2^(MaxPstreams + 1) format.
+ * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
+ */
+ max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
+ xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n",
+ 1 << (max_primary_streams + 1));
+ ep_ctx->ep_info &= ~EP_MAXPSTREAMS_MASK;
+ ep_ctx->ep_info |= EP_MAXPSTREAMS(max_primary_streams);
+ ep_ctx->ep_info |= EP_HAS_LSA;
+ ep_ctx->deq = stream_info->ctx_array_dma;
+}
+
+/*
+ * Sets the MaxPStreams field and the Linear Stream Array field to 0.
+ * Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
+ * not at the beginning of the ring).
+ */
+void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
+ struct xhci_ep_ctx *ep_ctx,
+ struct xhci_virt_ep *ep)
+{
+ dma_addr_t addr;
+ ep_ctx->ep_info &= ~EP_MAXPSTREAMS_MASK;
+ ep_ctx->ep_info &= ~EP_HAS_LSA;
+ addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
+ ep_ctx->deq = addr | ep->ring->cycle_state;
+}
+
+/* Frees all stream contexts associated with the endpoint,
+ *
+ * Caller should fix the endpoint context streams fields.
+ */
+void xhci_free_stream_info(struct xhci_hcd *xhci,
+ struct xhci_stream_info *stream_info)
+{
+ int cur_stream;
+ struct xhci_ring *cur_ring;
+ dma_addr_t addr;
+
+ if (!stream_info)
+ return;
+
+ for (cur_stream = 1; cur_stream < stream_info->num_streams;
+ cur_stream++) {
+ cur_ring = stream_info->stream_rings[cur_stream];
+ if (cur_ring) {
+ addr = cur_ring->first_seg->dma;
+ radix_tree_delete(&stream_info->trb_address_map,
+ addr >> SEGMENT_SHIFT);
+ xhci_ring_free(xhci, cur_ring);
+ stream_info->stream_rings[cur_stream] = NULL;
+ }
+ }
+ xhci_free_command(xhci, stream_info->free_streams_command);
+ xhci->cmd_ring_reserved_trbs--;
+ if (stream_info->stream_ctx_array)
+ xhci_free_stream_ctx(xhci,
+ stream_info->num_stream_ctxs,
+ stream_info->stream_ctx_array,
+ stream_info->ctx_array_dma);
+
+ if (stream_info)
+ kfree(stream_info->stream_rings);
+ kfree(stream_info);
+}
+
+
+/***************** Device context manipulation *************************/
+
static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
struct xhci_virt_ep *ep)
{
@@ -328,9 +744,13 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
if (!dev)
return;
- for (i = 0; i < 31; ++i)
+ for (i = 0; i < 31; ++i) {
if (dev->eps[i].ring)
xhci_ring_free(xhci, dev->eps[i].ring);
+ if (dev->eps[i].stream_info)
+ xhci_free_stream_info(xhci,
+ dev->eps[i].stream_info);
+ }
if (dev->ring_cache) {
for (i = 0; i < dev->num_rings_cached; i++)
@@ -344,7 +764,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
xhci_free_container_ctx(xhci, dev->out_ctx);
kfree(xhci->devs[slot_id]);
- xhci->devs[slot_id] = 0;
+ xhci->devs[slot_id] = NULL;
}
int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
@@ -590,9 +1010,9 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
- if (udev->speed != USB_SPEED_SUPER || !ep->ss_ep_comp)
+ if (udev->speed != USB_SPEED_SUPER)
return 0;
- return ep->ss_ep_comp->desc.bmAttributes;
+ return ep->ss_ep_comp.bmAttributes;
}
static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
@@ -641,13 +1061,8 @@ static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
usb_endpoint_xfer_bulk(&ep->desc))
return 0;
- if (udev->speed == USB_SPEED_SUPER) {
- if (ep->ss_ep_comp)
- return ep->ss_ep_comp->desc.wBytesPerInterval;
- xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n");
- /* Assume no bursts, no multiple opportunities to send. */
- return ep->desc.wMaxPacketSize;
- }
+ if (udev->speed == USB_SPEED_SUPER)
+ return ep->ss_ep_comp.wBytesPerInterval;
max_packet = ep->desc.wMaxPacketSize & 0x3ff;
max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
@@ -655,6 +1070,9 @@ static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
return max_packet * (max_burst + 1);
}
+/* Set up an endpoint with one ring segment. Do not allocate stream rings.
+ * Drivers will have to call usb_alloc_streams() to do that.
+ */
int xhci_endpoint_init(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
struct usb_device *udev,
@@ -708,12 +1126,9 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
max_packet = ep->desc.wMaxPacketSize;
ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
/* dig out max burst from ep companion desc */
- if (!ep->ss_ep_comp) {
- xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n");
- max_packet = 0;
- } else {
- max_packet = ep->ss_ep_comp->desc.bMaxBurst;
- }
+ max_packet = ep->ss_ep_comp.bMaxBurst;
+ if (!max_packet)
+ xhci_warn(xhci, "WARN no SS endpoint bMaxBurst\n");
ep_ctx->ep_info2 |= MAX_BURST(max_packet);
break;
case USB_SPEED_HIGH:
@@ -1003,6 +1418,16 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
xhci->device_pool = NULL;
xhci_dbg(xhci, "Freed device context pool\n");
+ if (xhci->small_streams_pool)
+ dma_pool_destroy(xhci->small_streams_pool);
+ xhci->small_streams_pool = NULL;
+ xhci_dbg(xhci, "Freed small stream array pool\n");
+
+ if (xhci->medium_streams_pool)
+ dma_pool_destroy(xhci->medium_streams_pool);
+ xhci->medium_streams_pool = NULL;
+ xhci_dbg(xhci, "Freed medium stream array pool\n");
+
xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
if (xhci->dcbaa)
pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
@@ -1239,6 +1664,22 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
if (!xhci->segment_pool || !xhci->device_pool)
goto fail;
+ /* Linear stream context arrays don't have any boundary restrictions,
+ * and only need to be 16-byte aligned.
+ */
+ xhci->small_streams_pool =
+ dma_pool_create("xHCI 256 byte stream ctx arrays",
+ dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
+ xhci->medium_streams_pool =
+ dma_pool_create("xHCI 1KB stream ctx arrays",
+ dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
+ /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
+ * will be allocated with pci_alloc_consistent()
+ */
+
+ if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
+ goto fail;
+
/* Set up the command ring to have one segments for now. */
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
if (!xhci->cmd_ring)
@@ -1330,7 +1771,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
*/
init_completion(&xhci->addr_dev);
for (i = 0; i < MAX_HC_SLOTS; ++i)
- xhci->devs[i] = 0;
+ xhci->devs[i] = NULL;
if (scratchpad_alloc(xhci, flags))
goto fail;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 417d37aff8d..edffd81fc25 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -54,7 +54,7 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
int retval;
- hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 1;
+ hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2;
xhci->cap_regs = hcd->regs;
xhci->op_regs = hcd->regs +
@@ -132,6 +132,8 @@ static const struct hc_driver xhci_pci_hc_driver = {
.urb_dequeue = xhci_urb_dequeue,
.alloc_dev = xhci_alloc_dev,
.free_dev = xhci_free_dev,
+ .alloc_streams = xhci_alloc_streams,
+ .free_streams = xhci_free_streams,
.add_endpoint = xhci_add_endpoint,
.drop_endpoint = xhci_drop_endpoint,
.endpoint_reset = xhci_endpoint_reset,
@@ -175,12 +177,12 @@ static struct pci_driver xhci_pci_driver = {
.shutdown = usb_hcd_pci_shutdown,
};
-int xhci_register_pci()
+int xhci_register_pci(void)
{
return pci_register_driver(&xhci_pci_driver);
}
-void xhci_unregister_pci()
+void xhci_unregister_pci(void)
{
pci_unregister_driver(&xhci_pci_driver);
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 85d7e8f2085..36c858e5b52 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -112,6 +112,12 @@ static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
}
+static inline int enqueue_is_link_trb(struct xhci_ring *ring)
+{
+ struct xhci_link_trb *link = &ring->enqueue->link;
+ return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK));
+}
+
/* Updates trb to point to the next TRB in the ring, and updates seg if the next
* TRB is in a new segment. This does not skip over link TRBs, and it does not
* effect the ring dequeue or enqueue pointers.
@@ -193,20 +199,15 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
while (last_trb(xhci, ring, ring->enq_seg, next)) {
if (!consumer) {
if (ring != xhci->event_ring) {
- /* If we're not dealing with 0.95 hardware,
- * carry over the chain bit of the previous TRB
- * (which may mean the chain bit is cleared).
- */
- if (!xhci_link_trb_quirk(xhci)) {
- next->link.control &= ~TRB_CHAIN;
- next->link.control |= chain;
+ if (chain) {
+ next->link.control |= TRB_CHAIN;
+
+ /* Give this link TRB to the hardware */
+ wmb();
+ next->link.control ^= TRB_CYCLE;
+ } else {
+ break;
}
- /* Give this link TRB to the hardware */
- wmb();
- if (next->link.control & TRB_CYCLE)
- next->link.control &= (u32) ~TRB_CYCLE;
- else
- next->link.control |= (u32) TRB_CYCLE;
}
/* Toggle the cycle bit after the last ring segment. */
if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
@@ -242,10 +243,34 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
int i;
union xhci_trb *enq = ring->enqueue;
struct xhci_segment *enq_seg = ring->enq_seg;
+ struct xhci_segment *cur_seg;
+ unsigned int left_on_ring;
+
+ /* If we are currently pointing to a link TRB, advance the
+ * enqueue pointer before checking for space */
+ while (last_trb(xhci, ring, enq_seg, enq)) {
+ enq_seg = enq_seg->next;
+ enq = enq_seg->trbs;
+ }
/* Check if ring is empty */
- if (enq == ring->dequeue)
+ if (enq == ring->dequeue) {
+ /* Can't use link trbs */
+ left_on_ring = TRBS_PER_SEGMENT - 1;
+ for (cur_seg = enq_seg->next; cur_seg != enq_seg;
+ cur_seg = cur_seg->next)
+ left_on_ring += TRBS_PER_SEGMENT - 1;
+
+ /* Always need one TRB free in the ring. */
+ left_on_ring -= 1;
+ if (num_trbs > left_on_ring) {
+ xhci_warn(xhci, "Not enough room on ring; "
+ "need %u TRBs, %u TRBs left\n",
+ num_trbs, left_on_ring);
+ return 0;
+ }
return 1;
+ }
/* Make sure there's an extra empty TRB available */
for (i = 0; i <= num_trbs; ++i) {
if (enq == ring->dequeue)
@@ -295,7 +320,8 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
static void ring_ep_doorbell(struct xhci_hcd *xhci,
unsigned int slot_id,
- unsigned int ep_index)
+ unsigned int ep_index,
+ unsigned int stream_id)
{
struct xhci_virt_ep *ep;
unsigned int ep_state;
@@ -306,11 +332,16 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
ep_state = ep->ep_state;
/* Don't ring the doorbell for this endpoint if there are pending
* cancellations because the we don't want to interrupt processing.
+ * We don't want to restart any stream rings if there's a set dequeue
+ * pointer command pending because the device can choose to start any
+ * stream once the endpoint is on the HW schedule.
+ * FIXME - check all the stream rings for pending cancellations.
*/
if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING)
&& !(ep_state & EP_HALTED)) {
field = xhci_readl(xhci, db_addr) & DB_MASK;
- xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr);
+ field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id);
+ xhci_writel(xhci, field, db_addr);
/* Flush PCI posted writes - FIXME Matthew Wilcox says this
* isn't time-critical and we shouldn't make the CPU wait for
* the flush.
@@ -319,6 +350,31 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
}
}
+/* Ring the doorbell for any rings with pending URBs */
+static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
+ unsigned int slot_id,
+ unsigned int ep_index)
+{
+ unsigned int stream_id;
+ struct xhci_virt_ep *ep;
+
+ ep = &xhci->devs[slot_id]->eps[ep_index];
+
+ /* A ring has pending URBs if its TD list is not empty */
+ if (!(ep->ep_state & EP_HAS_STREAMS)) {
+ if (!(list_empty(&ep->ring->td_list)))
+ ring_ep_doorbell(xhci, slot_id, ep_index, 0);
+ return;
+ }
+
+ for (stream_id = 1; stream_id < ep->stream_info->num_streams;
+ stream_id++) {
+ struct xhci_stream_info *stream_info = ep->stream_info;
+ if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
+ ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
+ }
+}
+
/*
* Find the segment that trb is in. Start searching in start_seg.
* If we must move past a segment that has a link TRB with a toggle cycle state
@@ -334,13 +390,14 @@ static struct xhci_segment *find_trb_seg(
while (cur_seg->trbs > trb ||
&cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
- if (TRB_TYPE(generic_trb->field[3]) == TRB_LINK &&
+ if ((generic_trb->field[3] & TRB_TYPE_BITMASK) ==
+ TRB_TYPE(TRB_LINK) &&
(generic_trb->field[3] & LINK_TOGGLE))
*cycle_state = ~(*cycle_state) & 0x1;
cur_seg = cur_seg->next;
if (cur_seg == start_seg)
/* Looped over the entire list. Oops! */
- return 0;
+ return NULL;
}
return cur_seg;
}
@@ -361,14 +418,23 @@ static struct xhci_segment *find_trb_seg(
*/
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
- struct xhci_td *cur_td, struct xhci_dequeue_state *state)
+ unsigned int stream_id, struct xhci_td *cur_td,
+ struct xhci_dequeue_state *state)
{
struct xhci_virt_device *dev = xhci->devs[slot_id];
- struct xhci_ring *ep_ring = dev->eps[ep_index].ring;
+ struct xhci_ring *ep_ring;
struct xhci_generic_trb *trb;
struct xhci_ep_ctx *ep_ctx;
dma_addr_t addr;
+ ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
+ ep_index, stream_id);
+ if (!ep_ring) {
+ xhci_warn(xhci, "WARN can't find new dequeue state "
+ "for invalid stream ID %u.\n",
+ stream_id);
+ return;
+ }
state->new_cycle_state = 0;
xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
state->new_deq_seg = find_trb_seg(cur_td->start_seg,
@@ -390,7 +456,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
BUG();
trb = &state->new_deq_ptr->generic;
- if (TRB_TYPE(trb->field[3]) == TRB_LINK &&
+ if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
(trb->field[3] & LINK_TOGGLE))
state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
@@ -448,11 +514,13 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
}
static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
- unsigned int ep_index, struct xhci_segment *deq_seg,
+ unsigned int ep_index, unsigned int stream_id,
+ struct xhci_segment *deq_seg,
union xhci_trb *deq_ptr, u32 cycle_state);
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
+ unsigned int stream_id,
struct xhci_dequeue_state *deq_state)
{
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
@@ -464,7 +532,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
deq_state->new_deq_ptr,
(unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
deq_state->new_cycle_state);
- queue_set_tr_deq(xhci, slot_id, ep_index,
+ queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
deq_state->new_deq_seg,
deq_state->new_deq_ptr,
(u32) deq_state->new_cycle_state);
@@ -523,7 +591,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
struct xhci_ring *ep_ring;
struct xhci_virt_ep *ep;
struct list_head *entry;
- struct xhci_td *cur_td = 0;
+ struct xhci_td *cur_td = NULL;
struct xhci_td *last_unlinked_td;
struct xhci_dequeue_state deq_state;
@@ -532,11 +600,10 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
ep = &xhci->devs[slot_id]->eps[ep_index];
- ep_ring = ep->ring;
if (list_empty(&ep->cancelled_td_list)) {
xhci_stop_watchdog_timer_in_irq(xhci, ep);
- ring_ep_doorbell(xhci, slot_id, ep_index);
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
return;
}
@@ -550,15 +617,36 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
cur_td->first_trb,
(unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
+ ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
+ if (!ep_ring) {
+ /* This shouldn't happen unless a driver is mucking
+ * with the stream ID after submission. This will
+ * leave the TD on the hardware ring, and the hardware
+ * will try to execute it, and may access a buffer
+ * that has already been freed. In the best case, the
+ * hardware will execute it, and the event handler will
+ * ignore the completion event for that TD, since it was
+ * removed from the td_list for that endpoint. In
+ * short, don't muck with the stream ID after
+ * submission.
+ */
+ xhci_warn(xhci, "WARN Cancelled URB %p "
+ "has invalid stream ID %u.\n",
+ cur_td->urb,
+ cur_td->urb->stream_id);
+ goto remove_finished_td;
+ }
/*
* If we stopped on the TD we need to cancel, then we have to
* move the xHC endpoint ring dequeue pointer past this TD.
*/
if (cur_td == ep->stopped_td)
- xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td,
- &deq_state);
+ xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
+ cur_td->urb->stream_id,
+ cur_td, &deq_state);
else
td_to_noop(xhci, ep_ring, cur_td);
+remove_finished_td:
/*
* The event handler won't see a completion for this TD anymore,
* so remove it from the endpoint ring's TD list. Keep it in
@@ -572,12 +660,16 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
xhci_queue_new_dequeue_state(xhci,
- slot_id, ep_index, &deq_state);
+ slot_id, ep_index,
+ ep->stopped_td->urb->stream_id,
+ &deq_state);
xhci_ring_cmd_db(xhci);
} else {
- /* Otherwise just ring the doorbell to restart the ring */
- ring_ep_doorbell(xhci, slot_id, ep_index);
+ /* Otherwise ring the doorbell(s) to restart queued transfers */
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
+ ep->stopped_td = NULL;
+ ep->stopped_trb = NULL;
/*
* Drop the lock and complete the URBs in the cancelled TD list.
@@ -734,6 +826,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
{
unsigned int slot_id;
unsigned int ep_index;
+ unsigned int stream_id;
struct xhci_ring *ep_ring;
struct xhci_virt_device *dev;
struct xhci_ep_ctx *ep_ctx;
@@ -741,8 +834,19 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
+ stream_id = TRB_TO_STREAM_ID(trb->generic.field[2]);
dev = xhci->devs[slot_id];
- ep_ring = dev->eps[ep_index].ring;
+
+ ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
+ if (!ep_ring) {
+ xhci_warn(xhci, "WARN Set TR deq ptr command for "
+ "freed stream ID %u\n",
+ stream_id);
+ /* XXX: Harmless??? */
+ dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
+ return;
+ }
+
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
@@ -787,7 +891,8 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
}
dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
- ring_ep_doorbell(xhci, slot_id, ep_index);
+ /* Restart any rings with pending URBs */
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
static void handle_reset_ep_completion(struct xhci_hcd *xhci,
@@ -796,11 +901,9 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
{
int slot_id;
unsigned int ep_index;
- struct xhci_ring *ep_ring;
slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
- ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
/* This command will only fail if the endpoint wasn't halted,
* but we don't care.
*/
@@ -818,9 +921,9 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
false);
xhci_ring_cmd_db(xhci);
} else {
- /* Clear our internal halted state and restart the ring */
+ /* Clear our internal halted state and restart the ring(s) */
xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
- ring_ep_doorbell(xhci, slot_id, ep_index);
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
}
@@ -897,16 +1000,19 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
* Configure endpoint commands can come from the USB core
* configuration or alt setting changes, or because the HW
* needed an extra configure endpoint command after a reset
- * endpoint command. In the latter case, the xHCI driver is
- * not waiting on the configure endpoint command.
+ * endpoint command or streams were being configured.
+ * If the command was for a halted endpoint, the xHCI driver
+ * is not waiting on the configure endpoint command.
*/
ctrl_ctx = xhci_get_input_control_ctx(xhci,
virt_dev->in_ctx);
/* Input ctx add_flags are the endpoint index plus one */
ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1;
/* A usb_set_interface() call directly after clearing a halted
- * condition may race on this quirky hardware.
- * Not worth worrying about, since this is prototype hardware.
+ * condition may race on this quirky hardware. Not worth
+ * worrying about, since this is prototype hardware. Not sure
+ * if this will work for streams, but streams support was
+ * untested on this prototype.
*/
if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
ep_index != (unsigned int) -1 &&
@@ -919,10 +1025,10 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
xhci_dbg(xhci, "Completed config ep cmd - "
"last ep index = %d, state = %d\n",
ep_index, ep_state);
- /* Clear our internal halted state and restart ring */
+ /* Clear internal halted state and restart ring(s) */
xhci->devs[slot_id]->eps[ep_index].ep_state &=
~EP_HALTED;
- ring_ep_doorbell(xhci, slot_id, ep_index);
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
break;
}
bandwidth_change:
@@ -1018,7 +1124,7 @@ struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
do {
if (start_dma == 0)
- return 0;
+ return NULL;
/* We may get an event for a Link TRB in the middle of a TD */
end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
&cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
@@ -1040,7 +1146,7 @@ struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
suspect_dma <= end_trb_dma))
return cur_seg;
}
- return 0;
+ return NULL;
} else {
/* Might still be somewhere in this segment */
if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
@@ -1050,19 +1156,27 @@ struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
} while (cur_seg != start_seg);
- return 0;
+ return NULL;
}
static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
+ unsigned int stream_id,
struct xhci_td *td, union xhci_trb *event_trb)
{
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
ep->ep_state |= EP_HALTED;
ep->stopped_td = td;
ep->stopped_trb = event_trb;
+ ep->stopped_stream = stream_id;
+
xhci_queue_reset_ep(xhci, slot_id, ep_index);
xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
+
+ ep->stopped_td = NULL;
+ ep->stopped_trb = NULL;
+ ep->stopped_stream = 0;
+
xhci_ring_cmd_db(xhci);
}
@@ -1119,11 +1233,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
struct xhci_ring *ep_ring;
unsigned int slot_id;
int ep_index;
- struct xhci_td *td = 0;
+ struct xhci_td *td = NULL;
dma_addr_t event_dma;
struct xhci_segment *event_seg;
union xhci_trb *event_trb;
- struct urb *urb = 0;
+ struct urb *urb = NULL;
int status = -EINPROGRESS;
struct xhci_ep_ctx *ep_ctx;
u32 trb_comp_code;
@@ -1140,10 +1254,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
ep_index = TRB_TO_EP_ID(event->flags) - 1;
xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
ep = &xdev->eps[ep_index];
- ep_ring = ep->ring;
+ ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
- xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n");
+ xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
+ "or incorrect stream ring\n");
return -ENODEV;
}
@@ -1274,7 +1389,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
td->urb->actual_length = 0;
xhci_cleanup_halted_endpoint(xhci,
- slot_id, ep_index, td, event_trb);
+ slot_id, ep_index, 0, td, event_trb);
goto td_cleanup;
}
/*
@@ -1390,8 +1505,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
cur_trb != event_trb;
next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
- if (TRB_TYPE(cur_trb->generic.field[3]) != TRB_TR_NOOP &&
- TRB_TYPE(cur_trb->generic.field[3]) != TRB_LINK)
+ if ((cur_trb->generic.field[3] &
+ TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
+ (cur_trb->generic.field[3] &
+ TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
td->urb->actual_length +=
TRB_LEN(cur_trb->generic.field[2]);
}
@@ -1423,6 +1540,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
*/
ep->stopped_td = td;
ep->stopped_trb = event_trb;
+ ep->stopped_stream = ep_ring->stream_id;
} else if (xhci_requires_manual_halt_cleanup(xhci,
ep_ctx, trb_comp_code)) {
/* Other types of errors halt the endpoint, but the
@@ -1431,7 +1549,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
* xHCI hardware manually.
*/
xhci_cleanup_halted_endpoint(xhci,
- slot_id, ep_index, td, event_trb);
+ slot_id, ep_index, ep_ring->stream_id, td, event_trb);
} else {
/* Update ring dequeue pointer */
while (ep_ring->dequeue != td->last_trb)
@@ -1621,20 +1739,66 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
xhci_err(xhci, "ERROR no room on ep ring\n");
return -ENOMEM;
}
+
+ if (enqueue_is_link_trb(ep_ring)) {
+ struct xhci_ring *ring = ep_ring;
+ union xhci_trb *next;
+
+ xhci_dbg(xhci, "prepare_ring: pointing to link trb\n");
+ next = ring->enqueue;
+
+ while (last_trb(xhci, ring, ring->enq_seg, next)) {
+
+ /* If we're not dealing with 0.95 hardware,
+ * clear the chain bit.
+ */
+ if (!xhci_link_trb_quirk(xhci))
+ next->link.control &= ~TRB_CHAIN;
+ else
+ next->link.control |= TRB_CHAIN;
+
+ wmb();
+ next->link.control ^= (u32) TRB_CYCLE;
+
+ /* Toggle the cycle bit after the last ring segment. */
+ if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
+ ring->cycle_state = (ring->cycle_state ? 0 : 1);
+ if (!in_interrupt()) {
+ xhci_dbg(xhci, "queue_trb: Toggle cycle "
+ "state for ring %p = %i\n",
+ ring, (unsigned int)ring->cycle_state);
+ }
+ }
+ ring->enq_seg = ring->enq_seg->next;
+ ring->enqueue = ring->enq_seg->trbs;
+ next = ring->enqueue;
+ }
+ }
+
return 0;
}
static int prepare_transfer(struct xhci_hcd *xhci,
struct xhci_virt_device *xdev,
unsigned int ep_index,
+ unsigned int stream_id,
unsigned int num_trbs,
struct urb *urb,
struct xhci_td **td,
gfp_t mem_flags)
{
int ret;
+ struct xhci_ring *ep_ring;
struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
- ret = prepare_ring(xhci, xdev->eps[ep_index].ring,
+
+ ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
+ if (!ep_ring) {
+ xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
+ stream_id);
+ return -EINVAL;
+ }
+
+ ret = prepare_ring(xhci, ep_ring,
ep_ctx->ep_info & EP_STATE_MASK,
num_trbs, mem_flags);
if (ret)
@@ -1654,9 +1818,9 @@ static int prepare_transfer(struct xhci_hcd *xhci,
(*td)->urb = urb;
urb->hcpriv = (void *) (*td);
/* Add this TD to the tail of the endpoint ring's TD list */
- list_add_tail(&(*td)->td_list, &xdev->eps[ep_index].ring->td_list);
- (*td)->start_seg = xdev->eps[ep_index].ring->enq_seg;
- (*td)->first_trb = xdev->eps[ep_index].ring->enqueue;
+ list_add_tail(&(*td)->td_list, &ep_ring->td_list);
+ (*td)->start_seg = ep_ring->enq_seg;
+ (*td)->first_trb = ep_ring->enqueue;
return 0;
}
@@ -1672,7 +1836,7 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
xhci_dbg(xhci, "count sg list trbs: \n");
num_trbs = 0;
- for_each_sg(urb->sg->sg, sg, num_sgs, i) {
+ for_each_sg(urb->sg, sg, num_sgs, i) {
unsigned int previous_total_trbs = num_trbs;
unsigned int len = sg_dma_len(sg);
@@ -1722,7 +1886,7 @@ static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
}
static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
- unsigned int ep_index, int start_cycle,
+ unsigned int ep_index, unsigned int stream_id, int start_cycle,
struct xhci_generic_trb *start_trb, struct xhci_td *td)
{
/*
@@ -1731,7 +1895,7 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
*/
wmb();
start_trb->field[3] |= start_cycle;
- ring_ep_doorbell(xhci, slot_id, ep_index);
+ ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
}
/*
@@ -1805,12 +1969,16 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct xhci_generic_trb *start_trb;
int start_cycle;
- ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
+ ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
+ if (!ep_ring)
+ return -EINVAL;
+
num_trbs = count_sg_trbs_needed(xhci, urb);
num_sgs = urb->num_sgs;
trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
- ep_index, num_trbs, urb, &td, mem_flags);
+ ep_index, urb->stream_id,
+ num_trbs, urb, &td, mem_flags);
if (trb_buff_len < 0)
return trb_buff_len;
/*
@@ -1831,7 +1999,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
* the amount of memory allocated for this scatter-gather list.
* 3. TRBs buffers can't cross 64KB boundaries.
*/
- sg = urb->sg->sg;
+ sg = urb->sg;
addr = (u64) sg_dma_address(sg);
this_sg_len = sg_dma_len(sg);
trb_buff_len = TRB_MAX_BUFF_SIZE -
@@ -1919,7 +2087,8 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
} while (running_total < urb->transfer_buffer_length);
check_trb_math(urb, num_trbs, running_total);
- giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
+ giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
+ start_cycle, start_trb, td);
return 0;
}
@@ -1938,10 +2107,12 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
int running_total, trb_buff_len, ret;
u64 addr;
- if (urb->sg)
+ if (urb->num_sgs)
return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
- ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
+ ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
+ if (!ep_ring)
+ return -EINVAL;
num_trbs = 0;
/* How much data is (potentially) left before the 64KB boundary? */
@@ -1968,7 +2139,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
(unsigned long long)urb->transfer_dma,
num_trbs);
- ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
+ ret = prepare_transfer(xhci, xhci->devs[slot_id],
+ ep_index, urb->stream_id,
num_trbs, urb, &td, mem_flags);
if (ret < 0)
return ret;
@@ -2038,7 +2210,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
} while (running_total < urb->transfer_buffer_length);
check_trb_math(urb, num_trbs, running_total);
- giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
+ giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
+ start_cycle, start_trb, td);
return 0;
}
@@ -2055,7 +2228,9 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
u32 field, length_field;
struct xhci_td *td;
- ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
+ ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
+ if (!ep_ring)
+ return -EINVAL;
/*
* Need to copy setup packet into setup TRB, so we can't use the setup
@@ -2076,8 +2251,9 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
*/
if (urb->transfer_buffer_length > 0)
num_trbs++;
- ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs,
- urb, &td, mem_flags);
+ ret = prepare_transfer(xhci, xhci->devs[slot_id],
+ ep_index, urb->stream_id,
+ num_trbs, urb, &td, mem_flags);
if (ret < 0)
return ret;
@@ -2132,7 +2308,8 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* Event on completion */
field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
- giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
+ giveback_first_trb(xhci, slot_id, ep_index, 0,
+ start_cycle, start_trb, td);
return 0;
}
@@ -2244,12 +2421,14 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
* This should not be used for endpoints that have streams enabled.
*/
static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
- unsigned int ep_index, struct xhci_segment *deq_seg,
+ unsigned int ep_index, unsigned int stream_id,
+ struct xhci_segment *deq_seg,
union xhci_trb *deq_ptr, u32 cycle_state)
{
dma_addr_t addr;
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
+ u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
u32 type = TRB_TYPE(TRB_SET_DEQ);
addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
@@ -2260,7 +2439,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
return 0;
}
return queue_command(xhci, lower_32_bits(addr) | cycle_state,
- upper_32_bits(addr), 0,
+ upper_32_bits(addr), trb_stream_id,
trb_slot_id | trb_ep_index | type, false);
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 7e427727390..40e0a0c221b 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -21,6 +21,7 @@
*/
#include <linux/irq.h>
+#include <linux/log2.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
@@ -352,11 +353,7 @@ void xhci_event_ring_work(unsigned long arg)
if (!xhci->devs[i])
continue;
for (j = 0; j < 31; ++j) {
- struct xhci_ring *ring = xhci->devs[i]->eps[j].ring;
- if (!ring)
- continue;
- xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j);
- xhci_debug_segment(xhci, ring->deq_seg);
+ xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]);
}
}
@@ -726,8 +723,21 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
spin_lock_irqsave(&xhci->lock, flags);
if (xhci->xhc_state & XHCI_STATE_DYING)
goto dying;
- ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
- slot_id, ep_index);
+ if (xhci->devs[slot_id]->eps[ep_index].ep_state &
+ EP_GETTING_STREAMS) {
+ xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
+ "is transitioning to using streams.\n");
+ ret = -EINVAL;
+ } else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
+ EP_GETTING_NO_STREAMS) {
+ xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
+ "is transitioning to "
+ "not having streams.\n");
+ ret = -EINVAL;
+ } else {
+ ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
+ slot_id, ep_index);
+ }
spin_unlock_irqrestore(&xhci->lock, flags);
} else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
spin_lock_irqsave(&xhci->lock, flags);
@@ -825,7 +835,12 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
xhci_debug_ring(xhci, xhci->event_ring);
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
- ep_ring = ep->ring;
+ ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
+ if (!ep_ring) {
+ ret = -EINVAL;
+ goto done;
+ }
+
xhci_dbg(xhci, "Endpoint ring:\n");
xhci_debug_ring(xhci, ep_ring);
td = (struct xhci_td *) urb->hcpriv;
@@ -1369,7 +1384,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
* or it will attempt to resend it on the next doorbell ring.
*/
xhci_find_new_dequeue_state(xhci, udev->slot_id,
- ep_index, ep->stopped_td,
+ ep_index, ep->stopped_stream, ep->stopped_td,
&deq_state);
/* HW with the reset endpoint quirk will use the saved dequeue state to
@@ -1378,10 +1393,12 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
xhci_dbg(xhci, "Queueing new dequeue state\n");
xhci_queue_new_dequeue_state(xhci, udev->slot_id,
- ep_index, &deq_state);
+ ep_index, ep->stopped_stream, &deq_state);
} else {
/* Better hope no one uses the input context between now and the
* reset endpoint completion!
+ * XXX: No idea how this hardware will react when stream rings
+ * are enabled.
*/
xhci_dbg(xhci, "Setting up input context for "
"configure endpoint command\n");
@@ -1438,12 +1455,391 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
kfree(virt_ep->stopped_td);
xhci_ring_cmd_db(xhci);
}
+ virt_ep->stopped_td = NULL;
+ virt_ep->stopped_trb = NULL;
+ virt_ep->stopped_stream = 0;
spin_unlock_irqrestore(&xhci->lock, flags);
if (ret)
xhci_warn(xhci, "FIXME allocate a new ring segment\n");
}
+static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
+ struct usb_device *udev, struct usb_host_endpoint *ep,
+ unsigned int slot_id)
+{
+ int ret;
+ unsigned int ep_index;
+ unsigned int ep_state;
+
+ if (!ep)
+ return -EINVAL;
+ ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, __func__);
+ if (ret <= 0)
+ return -EINVAL;
+ if (ep->ss_ep_comp.bmAttributes == 0) {
+ xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
+ " descriptor for ep 0x%x does not support streams\n",
+ ep->desc.bEndpointAddress);
+ return -EINVAL;
+ }
+
+ ep_index = xhci_get_endpoint_index(&ep->desc);
+ ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
+ if (ep_state & EP_HAS_STREAMS ||
+ ep_state & EP_GETTING_STREAMS) {
+ xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
+ "already has streams set up.\n",
+ ep->desc.bEndpointAddress);
+ xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
+ "dynamic stream context array reallocation.\n");
+ return -EINVAL;
+ }
+ if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
+ xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
+ "endpoint 0x%x; URBs are pending.\n",
+ ep->desc.bEndpointAddress);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
+ unsigned int *num_streams, unsigned int *num_stream_ctxs)
+{
+ unsigned int max_streams;
+
+ /* The stream context array size must be a power of two */
+ *num_stream_ctxs = roundup_pow_of_two(*num_streams);
+ /*
+ * Find out how many primary stream array entries the host controller
+ * supports. Later we may use secondary stream arrays (similar to 2nd
+ * level page entries), but that's an optional feature for xHCI host
+ * controllers. xHCs must support at least 4 stream IDs.
+ */
+ max_streams = HCC_MAX_PSA(xhci->hcc_params);
+ if (*num_stream_ctxs > max_streams) {
+ xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
+ max_streams);
+ *num_stream_ctxs = max_streams;
+ *num_streams = max_streams;
+ }
+}
+
+/* Returns an error code if one of the endpoint already has streams.
+ * This does not change any data structures, it only checks and gathers
+ * information.
+ */
+static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
+ struct usb_device *udev,
+ struct usb_host_endpoint **eps, unsigned int num_eps,
+ unsigned int *num_streams, u32 *changed_ep_bitmask)
+{
+ unsigned int max_streams;
+ unsigned int endpoint_flag;
+ int i;
+ int ret;
+
+ for (i = 0; i < num_eps; i++) {
+ ret = xhci_check_streams_endpoint(xhci, udev,
+ eps[i], udev->slot_id);
+ if (ret < 0)
+ return ret;
+
+ max_streams = USB_SS_MAX_STREAMS(
+ eps[i]->ss_ep_comp.bmAttributes);
+ if (max_streams < (*num_streams - 1)) {
+ xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
+ eps[i]->desc.bEndpointAddress,
+ max_streams);
+ *num_streams = max_streams+1;
+ }
+
+ endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
+ if (*changed_ep_bitmask & endpoint_flag)
+ return -EINVAL;
+ *changed_ep_bitmask |= endpoint_flag;
+ }
+ return 0;
+}
+
+static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
+ struct usb_device *udev,
+ struct usb_host_endpoint **eps, unsigned int num_eps)
+{
+ u32 changed_ep_bitmask = 0;
+ unsigned int slot_id;
+ unsigned int ep_index;
+ unsigned int ep_state;
+ int i;
+
+ slot_id = udev->slot_id;
+ if (!xhci->devs[slot_id])
+ return 0;
+
+ for (i = 0; i < num_eps; i++) {
+ ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+ ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
+ /* Are streams already being freed for the endpoint? */
+ if (ep_state & EP_GETTING_NO_STREAMS) {
+ xhci_warn(xhci, "WARN Can't disable streams for "
+ "endpoint 0x%x\n, "
+ "streams are being disabled already.",
+ eps[i]->desc.bEndpointAddress);
+ return 0;
+ }
+ /* Are there actually any streams to free? */
+ if (!(ep_state & EP_HAS_STREAMS) &&
+ !(ep_state & EP_GETTING_STREAMS)) {
+ xhci_warn(xhci, "WARN Can't disable streams for "
+ "endpoint 0x%x\n, "
+ "streams are already disabled!",
+ eps[i]->desc.bEndpointAddress);
+ xhci_warn(xhci, "WARN xhci_free_streams() called "
+ "with non-streams endpoint\n");
+ return 0;
+ }
+ changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
+ }
+ return changed_ep_bitmask;
+}
+
+/*
+ * The USB device drivers use this function (though the HCD interface in USB
+ * core) to prepare a set of bulk endpoints to use streams. Streams are used to
+ * coordinate mass storage command queueing across multiple endpoints (basically
+ * a stream ID == a task ID).
+ *
+ * Setting up streams involves allocating the same size stream context array
+ * for each endpoint and issuing a configure endpoint command for all endpoints.
+ *
+ * Don't allow the call to succeed if one endpoint only supports one stream
+ * (which means it doesn't support streams at all).
+ *
+ * Drivers may get less stream IDs than they asked for, if the host controller
+ * hardware or endpoints claim they can't support the number of requested
+ * stream IDs.
+ */
+int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint **eps, unsigned int num_eps,
+ unsigned int num_streams, gfp_t mem_flags)
+{
+ int i, ret;
+ struct xhci_hcd *xhci;
+ struct xhci_virt_device *vdev;
+ struct xhci_command *config_cmd;
+ unsigned int ep_index;
+ unsigned int num_stream_ctxs;
+ unsigned long flags;
+ u32 changed_ep_bitmask = 0;
+
+ if (!eps)
+ return -EINVAL;
+
+ /* Add one to the number of streams requested to account for
+ * stream 0 that is reserved for xHCI usage.
+ */
+ num_streams += 1;
+ xhci = hcd_to_xhci(hcd);
+ xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
+ num_streams);
+
+ config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
+ if (!config_cmd) {
+ xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
+ return -ENOMEM;
+ }
+
+ /* Check to make sure all endpoints are not already configured for
+ * streams. While we're at it, find the maximum number of streams that
+ * all the endpoints will support and check for duplicate endpoints.
+ */
+ spin_lock_irqsave(&xhci->lock, flags);
+ ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
+ num_eps, &num_streams, &changed_ep_bitmask);
+ if (ret < 0) {
+ xhci_free_command(xhci, config_cmd);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return ret;
+ }
+ if (num_streams <= 1) {
+ xhci_warn(xhci, "WARN: endpoints can't handle "
+ "more than one stream.\n");
+ xhci_free_command(xhci, config_cmd);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return -EINVAL;
+ }
+ vdev = xhci->devs[udev->slot_id];
+ /* Mark each endpoint as being in transistion, so
+ * xhci_urb_enqueue() will reject all URBs.
+ */
+ for (i = 0; i < num_eps; i++) {
+ ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+ vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
+ }
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ /* Setup internal data structures and allocate HW data structures for
+ * streams (but don't install the HW structures in the input context
+ * until we're sure all memory allocation succeeded).
+ */
+ xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
+ xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
+ num_stream_ctxs, num_streams);
+
+ for (i = 0; i < num_eps; i++) {
+ ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+ vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
+ num_stream_ctxs,
+ num_streams, mem_flags);
+ if (!vdev->eps[ep_index].stream_info)
+ goto cleanup;
+ /* Set maxPstreams in endpoint context and update deq ptr to
+ * point to stream context array. FIXME
+ */
+ }
+
+ /* Set up the input context for a configure endpoint command. */
+ for (i = 0; i < num_eps; i++) {
+ struct xhci_ep_ctx *ep_ctx;
+
+ ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+ ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
+
+ xhci_endpoint_copy(xhci, config_cmd->in_ctx,
+ vdev->out_ctx, ep_index);
+ xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
+ vdev->eps[ep_index].stream_info);
+ }
+ /* Tell the HW to drop its old copy of the endpoint context info
+ * and add the updated copy from the input context.
+ */
+ xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
+ vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
+
+ /* Issue and wait for the configure endpoint command */
+ ret = xhci_configure_endpoint(xhci, udev, config_cmd,
+ false, false);
+
+ /* xHC rejected the configure endpoint command for some reason, so we
+ * leave the old ring intact and free our internal streams data
+ * structure.
+ */
+ if (ret < 0)
+ goto cleanup;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ for (i = 0; i < num_eps; i++) {
+ ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+ vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
+ xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
+ udev->slot_id, ep_index);
+ vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
+ }
+ xhci_free_command(xhci, config_cmd);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ /* Subtract 1 for stream 0, which drivers can't use */
+ return num_streams - 1;
+
+cleanup:
+ /* If it didn't work, free the streams! */
+ for (i = 0; i < num_eps; i++) {
+ ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+ xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
+ vdev->eps[ep_index].stream_info = NULL;
+ /* FIXME Unset maxPstreams in endpoint context and
+ * update deq ptr to point to normal string ring.
+ */
+ vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
+ vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
+ xhci_endpoint_zero(xhci, vdev, eps[i]);
+ }
+ xhci_free_command(xhci, config_cmd);
+ return -ENOMEM;
+}
+
+/* Transition the endpoint from using streams to being a "normal" endpoint
+ * without streams.
+ *
+ * Modify the endpoint context state, submit a configure endpoint command,
+ * and free all endpoint rings for streams if that completes successfully.
+ */
+int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint **eps, unsigned int num_eps,
+ gfp_t mem_flags)
+{
+ int i, ret;
+ struct xhci_hcd *xhci;
+ struct xhci_virt_device *vdev;
+ struct xhci_command *command;
+ unsigned int ep_index;
+ unsigned long flags;
+ u32 changed_ep_bitmask;
+
+ xhci = hcd_to_xhci(hcd);
+ vdev = xhci->devs[udev->slot_id];
+
+ /* Set up a configure endpoint command to remove the streams rings */
+ spin_lock_irqsave(&xhci->lock, flags);
+ changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
+ udev, eps, num_eps);
+ if (changed_ep_bitmask == 0) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return -EINVAL;
+ }
+
+ /* Use the xhci_command structure from the first endpoint. We may have
+ * allocated too many, but the driver may call xhci_free_streams() for
+ * each endpoint it grouped into one call to xhci_alloc_streams().
+ */
+ ep_index = xhci_get_endpoint_index(&eps[0]->desc);
+ command = vdev->eps[ep_index].stream_info->free_streams_command;
+ for (i = 0; i < num_eps; i++) {
+ struct xhci_ep_ctx *ep_ctx;
+
+ ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+ ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
+ xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
+ EP_GETTING_NO_STREAMS;
+
+ xhci_endpoint_copy(xhci, command->in_ctx,
+ vdev->out_ctx, ep_index);
+ xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx,
+ &vdev->eps[ep_index]);
+ }
+ xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
+ vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ /* Issue and wait for the configure endpoint command,
+ * which must succeed.
+ */
+ ret = xhci_configure_endpoint(xhci, udev, command,
+ false, true);
+
+ /* xHC rejected the configure endpoint command for some reason, so we
+ * leave the streams rings intact.
+ */
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ for (i = 0; i < num_eps; i++) {
+ ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+ xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
+ vdev->eps[ep_index].stream_info = NULL;
+ /* FIXME Unset maxPstreams in endpoint context and
+ * update deq ptr to point to normal string ring.
+ */
+ vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
+ vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
+ }
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ return 0;
+}
+
/*
* This submits a Reset Device Command, which will set the device state to 0,
* set the device address to 0, and disable all the endpoints except the default
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index ea389e9a493..dada2fb5926 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -26,8 +26,8 @@
#include <linux/usb.h>
#include <linux/timer.h>
#include <linux/kernel.h>
+#include <linux/usb/hcd.h>
-#include "../core/hcd.h"
/* Code sharing between pci-quirks and xhci hcd */
#include "xhci-ext-caps.h"
@@ -117,7 +117,7 @@ struct xhci_cap_regs {
/* true: no secondary Stream ID Support */
#define HCC_NSS(p) ((p) & (1 << 7))
/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
-#define HCC_MAX_PSA (1 << ((((p) >> 12) & 0xf) + 1))
+#define HCC_MAX_PSA(p) (1 << ((((p) >> 12) & 0xf) + 1))
/* Extended Capabilities pointer from PCI base - section 5.3.6 */
#define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p)
@@ -444,6 +444,7 @@ struct xhci_doorbell_array {
/* Endpoint Target - bits 0:7 */
#define EPI_TO_DB(p) (((p) + 1) & 0xff)
+#define STREAM_ID_TO_DB(p) (((p) & 0xffff) << 16)
/**
@@ -585,6 +586,10 @@ struct xhci_ep_ctx {
/* Interval - period between requests to an endpoint - 125u increments. */
#define EP_INTERVAL(p) ((p & 0xff) << 16)
#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff))
+#define EP_MAXPSTREAMS_MASK (0x1f << 10)
+#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK)
+/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */
+#define EP_HAS_LSA (1 << 15)
/* ep_info2 bitmasks */
/*
@@ -648,8 +653,50 @@ struct xhci_command {
/* add context bitmasks */
#define ADD_EP(x) (0x1 << x)
+struct xhci_stream_ctx {
+ /* 64-bit stream ring address, cycle state, and stream type */
+ u64 stream_ring;
+ /* offset 0x14 - 0x1f reserved for HC internal use */
+ u32 reserved[2];
+};
+
+/* Stream Context Types (section 6.4.1) - bits 3:1 of stream ctx deq ptr */
+#define SCT_FOR_CTX(p) (((p) << 1) & 0x7)
+/* Secondary stream array type, dequeue pointer is to a transfer ring */
+#define SCT_SEC_TR 0
+/* Primary stream array type, dequeue pointer is to a transfer ring */
+#define SCT_PRI_TR 1
+/* Dequeue pointer is for a secondary stream array (SSA) with 8 entries */
+#define SCT_SSA_8 2
+#define SCT_SSA_16 3
+#define SCT_SSA_32 4
+#define SCT_SSA_64 5
+#define SCT_SSA_128 6
+#define SCT_SSA_256 7
+
+/* Assume no secondary streams for now */
+struct xhci_stream_info {
+ struct xhci_ring **stream_rings;
+ /* Number of streams, including stream 0 (which drivers can't use) */
+ unsigned int num_streams;
+ /* The stream context array may be bigger than
+ * the number of streams the driver asked for
+ */
+ struct xhci_stream_ctx *stream_ctx_array;
+ unsigned int num_stream_ctxs;
+ dma_addr_t ctx_array_dma;
+ /* For mapping physical TRB addresses to segments in stream rings */
+ struct radix_tree_root trb_address_map;
+ struct xhci_command *free_streams_command;
+};
+
+#define SMALL_STREAM_ARRAY_SIZE 256
+#define MEDIUM_STREAM_ARRAY_SIZE 1024
+
struct xhci_virt_ep {
struct xhci_ring *ring;
+ /* Related to endpoints that are configured to use stream IDs only */
+ struct xhci_stream_info *stream_info;
/* Temporary storage in case the configure endpoint command fails and we
* have to restore the device state to the previous state
*/
@@ -658,11 +705,17 @@ struct xhci_virt_ep {
#define SET_DEQ_PENDING (1 << 0)
#define EP_HALTED (1 << 1) /* For stall handling */
#define EP_HALT_PENDING (1 << 2) /* For URB cancellation */
+/* Transitioning the endpoint to using streams, don't enqueue URBs */
+#define EP_GETTING_STREAMS (1 << 3)
+#define EP_HAS_STREAMS (1 << 4)
+/* Transitioning the endpoint to not using streams, don't enqueue URBs */
+#define EP_GETTING_NO_STREAMS (1 << 5)
/* ---- Related to URB cancellation ---- */
struct list_head cancelled_td_list;
/* The TRB that was last reported in a stopped endpoint ring */
union xhci_trb *stopped_trb;
struct xhci_td *stopped_td;
+ unsigned int stopped_stream;
/* Watchdog timer for stop endpoint command to cancel URBs */
struct timer_list stop_cmd_timer;
int stop_cmds_pending;
@@ -710,14 +763,6 @@ struct xhci_device_context_array {
*/
-struct xhci_stream_ctx {
- /* 64-bit stream ring address, cycle state, and stream type */
- u64 stream_ring;
- /* offset 0x14 - 0x1f reserved for HC internal use */
- u32 reserved[2];
-};
-
-
struct xhci_transfer_event {
/* 64-bit buffer address, or immediate data */
u64 buffer;
@@ -828,6 +873,10 @@ struct xhci_event_cmd {
#define TRB_TO_EP_INDEX(p) ((((p) & (0x1f << 16)) >> 16) - 1)
#define EP_ID_FOR_TRB(p) ((((p) + 1) & 0x1f) << 16)
+/* Set TR Dequeue Pointer command TRB fields */
+#define TRB_TO_STREAM_ID(p) ((((p) & (0xffff << 16)) >> 16))
+#define STREAM_ID_FOR_TRB(p) ((((p)) & 0xffff) << 16)
+
/* Port Status Change Event TRB fields */
/* Port ID - bits 31:24 */
@@ -952,6 +1001,10 @@ union xhci_trb {
/* Allow two commands + a link TRB, along with any reserved command TRBs */
#define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3)
#define SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
+/* SEGMENT_SHIFT should be log2(SEGMENT_SIZE).
+ * Change this if you change TRBS_PER_SEGMENT!
+ */
+#define SEGMENT_SHIFT 10
/* TRB buffer pointers can't cross 64KB boundaries */
#define TRB_MAX_BUFF_SHIFT 16
#define TRB_MAX_BUFF_SIZE (1 << TRB_MAX_BUFF_SHIFT)
@@ -993,6 +1046,7 @@ struct xhci_ring {
* if we own the TRB (if we are the consumer). See section 4.9.1.
*/
u32 cycle_state;
+ unsigned int stream_id;
};
struct xhci_erst_entry {
@@ -1088,6 +1142,8 @@ struct xhci_hcd {
/* DMA pools */
struct dma_pool *device_pool;
struct dma_pool *segment_pool;
+ struct dma_pool *small_streams_pool;
+ struct dma_pool *medium_streams_pool;
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
/* Poll the rings - for debugging */
@@ -1216,6 +1272,9 @@ void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep);
char *xhci_get_slot_state(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx);
+void xhci_dbg_ep_rings(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ struct xhci_virt_ep *ep);
/* xHCI memory management */
void xhci_mem_cleanup(struct xhci_hcd *xhci);
@@ -1242,6 +1301,29 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
unsigned int ep_index);
+struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
+ unsigned int num_stream_ctxs,
+ unsigned int num_streams, gfp_t flags);
+void xhci_free_stream_info(struct xhci_hcd *xhci,
+ struct xhci_stream_info *stream_info);
+void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
+ struct xhci_ep_ctx *ep_ctx,
+ struct xhci_stream_info *stream_info);
+void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
+ struct xhci_ep_ctx *ep_ctx,
+ struct xhci_virt_ep *ep);
+struct xhci_ring *xhci_dma_to_transfer_ring(
+ struct xhci_virt_ep *ep,
+ u64 address);
+struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
+ struct urb *urb);
+struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ unsigned int stream_id);
+struct xhci_ring *xhci_stream_id_to_ring(
+ struct xhci_virt_device *dev,
+ unsigned int ep_index,
+ unsigned int stream_id);
struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
bool allocate_in_ctx, bool allocate_completion,
gfp_t mem_flags);
@@ -1266,6 +1348,12 @@ int xhci_get_frame(struct usb_hcd *hcd);
irqreturn_t xhci_irq(struct usb_hcd *hcd);
int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev);
void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint **eps, unsigned int num_eps,
+ unsigned int num_streams, gfp_t mem_flags);
+int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint **eps, unsigned int num_eps,
+ gfp_t mem_flags);
int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags);
@@ -1308,9 +1396,11 @@ int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id);
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
- struct xhci_td *cur_td, struct xhci_dequeue_state *state);
+ unsigned int stream_id, struct xhci_td *cur_td,
+ struct xhci_dequeue_state *state);
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
+ unsigned int stream_id,
struct xhci_dequeue_state *deq_state);
void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
struct usb_device *udev, unsigned int ep_index);
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index 094f91cbc57..1fa6ce3e4a2 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -259,7 +259,7 @@ static int appledisplay_probe(struct usb_interface *iface,
}
/* Allocate buffer for interrupt data */
- pdata->urbdata = usb_buffer_alloc(pdata->udev, ACD_URB_BUFFER_LEN,
+ pdata->urbdata = usb_alloc_coherent(pdata->udev, ACD_URB_BUFFER_LEN,
GFP_KERNEL, &pdata->urb->transfer_dma);
if (!pdata->urbdata) {
retval = -ENOMEM;
@@ -316,7 +316,7 @@ error:
if (pdata->urb) {
usb_kill_urb(pdata->urb);
if (pdata->urbdata)
- usb_buffer_free(pdata->udev, ACD_URB_BUFFER_LEN,
+ usb_free_coherent(pdata->udev, ACD_URB_BUFFER_LEN,
pdata->urbdata, pdata->urb->transfer_dma);
usb_free_urb(pdata->urb);
}
@@ -337,7 +337,7 @@ static void appledisplay_disconnect(struct usb_interface *iface)
usb_kill_urb(pdata->urb);
cancel_delayed_work(&pdata->work);
backlight_device_unregister(pdata->bd);
- usb_buffer_free(pdata->udev, ACD_URB_BUFFER_LEN,
+ usb_free_coherent(pdata->udev, ACD_URB_BUFFER_LEN,
pdata->urbdata, pdata->urb->transfer_dma);
usb_free_urb(pdata->urb);
kfree(pdata->msgdata);
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index 1edb6d36189..82e16630a78 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -73,7 +73,7 @@ static struct list_head ftdi_static_list;
*/
#include "usb_u132.h"
#include <asm/io.h>
-#include "../core/hcd.h"
+#include <linux/usb/hcd.h>
/* FIXME ohci.h is ONLY for internal use by the OHCI driver.
* If you're going to try stuff like this, you need to split
@@ -734,7 +734,7 @@ static void ftdi_elan_write_bulk_callback(struct urb *urb)
dev_err(&ftdi->udev->dev, "urb=%p write bulk status received: %"
"d\n", urb, status);
}
- usb_buffer_free(urb->dev, urb->transfer_buffer_length,
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
urb->transfer_buffer, urb->transfer_dma);
}
@@ -795,7 +795,7 @@ static int ftdi_elan_command_engine(struct usb_ftdi *ftdi)
total_size);
return -ENOMEM;
}
- buf = usb_buffer_alloc(ftdi->udev, total_size, GFP_KERNEL,
+ buf = usb_alloc_coherent(ftdi->udev, total_size, GFP_KERNEL,
&urb->transfer_dma);
if (!buf) {
dev_err(&ftdi->udev->dev, "could not get a buffer to write %d c"
@@ -829,7 +829,7 @@ static int ftdi_elan_command_engine(struct usb_ftdi *ftdi)
dev_err(&ftdi->udev->dev, "failed %d to submit urb %p to write "
"%d commands totaling %d bytes to the Uxxx\n", retval,
urb, command_size, total_size);
- usb_buffer_free(ftdi->udev, total_size, buf, urb->transfer_dma);
+ usb_free_coherent(ftdi->udev, total_size, buf, urb->transfer_dma);
usb_free_urb(urb);
return retval;
}
@@ -1167,7 +1167,7 @@ static ssize_t ftdi_elan_write(struct file *file,
retval = -ENOMEM;
goto error_1;
}
- buf = usb_buffer_alloc(ftdi->udev, count, GFP_KERNEL,
+ buf = usb_alloc_coherent(ftdi->udev, count, GFP_KERNEL,
&urb->transfer_dma);
if (!buf) {
retval = -ENOMEM;
@@ -1192,7 +1192,7 @@ static ssize_t ftdi_elan_write(struct file *file,
exit:
return count;
error_3:
- usb_buffer_free(ftdi->udev, count, buf, urb->transfer_dma);
+ usb_free_coherent(ftdi->udev, count, buf, urb->transfer_dma);
error_2:
usb_free_urb(urb);
error_1:
@@ -1968,7 +1968,7 @@ static int ftdi_elan_synchronize_flush(struct usb_ftdi *ftdi)
"ence\n");
return -ENOMEM;
}
- buf = usb_buffer_alloc(ftdi->udev, I, GFP_KERNEL, &urb->transfer_dma);
+ buf = usb_alloc_coherent(ftdi->udev, I, GFP_KERNEL, &urb->transfer_dma);
if (!buf) {
dev_err(&ftdi->udev->dev, "could not get a buffer for flush seq"
"uence\n");
@@ -1985,7 +1985,7 @@ static int ftdi_elan_synchronize_flush(struct usb_ftdi *ftdi)
if (retval) {
dev_err(&ftdi->udev->dev, "failed to submit urb containing the "
"flush sequence\n");
- usb_buffer_free(ftdi->udev, i, buf, urb->transfer_dma);
+ usb_free_coherent(ftdi->udev, i, buf, urb->transfer_dma);
usb_free_urb(urb);
return -ENOMEM;
}
@@ -2011,7 +2011,7 @@ static int ftdi_elan_synchronize_reset(struct usb_ftdi *ftdi)
"quence\n");
return -ENOMEM;
}
- buf = usb_buffer_alloc(ftdi->udev, I, GFP_KERNEL, &urb->transfer_dma);
+ buf = usb_alloc_coherent(ftdi->udev, I, GFP_KERNEL, &urb->transfer_dma);
if (!buf) {
dev_err(&ftdi->udev->dev, "could not get a buffer for the reset"
" sequence\n");
@@ -2030,7 +2030,7 @@ static int ftdi_elan_synchronize_reset(struct usb_ftdi *ftdi)
if (retval) {
dev_err(&ftdi->udev->dev, "failed to submit urb containing the "
"reset sequence\n");
- usb_buffer_free(ftdi->udev, i, buf, urb->transfer_dma);
+ usb_free_coherent(ftdi->udev, i, buf, urb->transfer_dma);
usb_free_urb(urb);
return -ENOMEM;
}
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index d3c85236388..7dc9d3c6998 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -239,8 +239,8 @@ static void iowarrior_write_callback(struct urb *urb)
__func__, status);
}
/* free up our allocated buffer */
- usb_buffer_free(urb->dev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
/* tell a waiting writer the interrupt-out-pipe is available again */
atomic_dec(&dev->write_busy);
wake_up_interruptible(&dev->write_wait);
@@ -421,8 +421,8 @@ static ssize_t iowarrior_write(struct file *file,
dbg("%s Unable to allocate urb ", __func__);
goto error_no_urb;
}
- buf = usb_buffer_alloc(dev->udev, dev->report_size,
- GFP_KERNEL, &int_out_urb->transfer_dma);
+ buf = usb_alloc_coherent(dev->udev, dev->report_size,
+ GFP_KERNEL, &int_out_urb->transfer_dma);
if (!buf) {
retval = -ENOMEM;
dbg("%s Unable to allocate buffer ", __func__);
@@ -459,8 +459,8 @@ static ssize_t iowarrior_write(struct file *file,
break;
}
error:
- usb_buffer_free(dev->udev, dev->report_size, buf,
- int_out_urb->transfer_dma);
+ usb_free_coherent(dev->udev, dev->report_size, buf,
+ int_out_urb->transfer_dma);
error_no_buffer:
usb_free_urb(int_out_urb);
error_no_urb:
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index aae95a009bd..30d930386b6 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -47,7 +47,6 @@
#include <linux/spinlock.h>
#include <linux/kref.h>
#include <linux/usb.h>
-#include <linux/smp_lock.h>
#include <linux/vmalloc.h>
#include "sisusb.h"
@@ -2416,14 +2415,11 @@ sisusb_open(struct inode *inode, struct file *file)
struct usb_interface *interface;
int subminor = iminor(inode);
- lock_kernel();
if (!(interface = usb_find_interface(&sisusb_driver, subminor))) {
- unlock_kernel();
return -ENODEV;
}
if (!(sisusb = usb_get_intfdata(interface))) {
- unlock_kernel();
return -ENODEV;
}
@@ -2431,13 +2427,11 @@ sisusb_open(struct inode *inode, struct file *file)
if (!sisusb->present || !sisusb->ready) {
mutex_unlock(&sisusb->lock);
- unlock_kernel();
return -ENODEV;
}
if (sisusb->isopen) {
mutex_unlock(&sisusb->lock);
- unlock_kernel();
return -EBUSY;
}
@@ -2446,13 +2440,11 @@ sisusb_open(struct inode *inode, struct file *file)
if (sisusb_init_gfxdevice(sisusb, 0)) {
mutex_unlock(&sisusb->lock);
dev_err(&sisusb->sisusb_dev->dev, "Failed to initialize device\n");
- unlock_kernel();
return -EIO;
}
} else {
mutex_unlock(&sisusb->lock);
dev_err(&sisusb->sisusb_dev->dev, "Device not attached to USB 2.0 hub\n");
- unlock_kernel();
return -EIO;
}
}
@@ -2465,7 +2457,6 @@ sisusb_open(struct inode *inode, struct file *file)
file->private_data = sisusb;
mutex_unlock(&sisusb->lock);
- unlock_kernel();
return 0;
}
@@ -2974,13 +2965,12 @@ sisusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct sisusb_usb_data *sisusb;
struct sisusb_info x;
struct sisusb_command y;
- int retval = 0;
+ long retval = 0;
u32 __user *argp = (u32 __user *)arg;
if (!(sisusb = (struct sisusb_usb_data *)file->private_data))
return -ENODEV;
- lock_kernel();
mutex_lock(&sisusb->lock);
/* Sanity check */
@@ -3039,7 +3029,6 @@ sisusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
err_out:
mutex_unlock(&sisusb->lock);
- unlock_kernel();
return retval;
}
diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c
index b271b0557a1..411e605f448 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_con.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_con.c
@@ -1187,9 +1187,9 @@ sisusbcon_do_font_op(struct sisusb_usb_data *sisusb, int set, int slot,
* And so is the hi_font_mask.
*/
for (i = 0; i < MAX_NR_CONSOLES; i++) {
- struct vc_data *c = vc_cons[i].d;
- if (c && c->vc_sw == &sisusb_con)
- c->vc_hi_font_mask = ch512 ? 0x0800 : 0;
+ struct vc_data *d = vc_cons[i].d;
+ if (d && d->vc_sw == &sisusb_con)
+ d->vc_hi_font_mask = ch512 ? 0x0800 : 0;
}
sisusb->current_font_512 = ch512;
@@ -1249,7 +1249,7 @@ sisusbcon_do_font_op(struct sisusb_usb_data *sisusb, int set, int slot,
mutex_unlock(&sisusb->lock);
if (dorecalc && c) {
- int i, rows = c->vc_scan_lines / fh;
+ int rows = c->vc_scan_lines / fh;
/* Now adjust our consoles' size */
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c
index 90aede90553..7828c764b32 100644
--- a/drivers/usb/misc/usblcd.c
+++ b/drivers/usb/misc/usblcd.c
@@ -205,8 +205,8 @@ static void lcd_write_bulk_callback(struct urb *urb)
}
/* free up our allocated buffer */
- usb_buffer_free(urb->dev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
up(&dev->limit_sem);
}
@@ -234,7 +234,7 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer, siz
goto err_no_buf;
}
- buf = usb_buffer_alloc(dev->udev, count, GFP_KERNEL, &urb->transfer_dma);
+ buf = usb_alloc_coherent(dev->udev, count, GFP_KERNEL, &urb->transfer_dma);
if (!buf) {
retval = -ENOMEM;
goto error;
@@ -268,7 +268,7 @@ exit:
error_unanchor:
usb_unanchor_urb(urb);
error:
- usb_buffer_free(dev->udev, count, buf, urb->transfer_dma);
+ usb_free_coherent(dev->udev, count, buf, urb->transfer_dma);
usb_free_urb(urb);
err_no_buf:
up(&dev->limit_sem);
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index a21cce6f740..16dffe99d9f 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -202,7 +202,7 @@ static struct urb *simple_alloc_urb (
urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
if (usb_pipein (pipe))
urb->transfer_flags |= URB_SHORT_NOT_OK;
- urb->transfer_buffer = usb_buffer_alloc (udev, bytes, GFP_KERNEL,
+ urb->transfer_buffer = usb_alloc_coherent (udev, bytes, GFP_KERNEL,
&urb->transfer_dma);
if (!urb->transfer_buffer) {
usb_free_urb (urb);
@@ -272,8 +272,8 @@ static inline int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
static void simple_free_urb (struct urb *urb)
{
- usb_buffer_free (urb->dev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
usb_free_urb (urb);
}
@@ -977,15 +977,13 @@ test_ctrl_queue (struct usbtest_dev *dev, struct usbtest_param *param)
if (!u)
goto cleanup;
- reqp = usb_buffer_alloc (udev, sizeof *reqp, GFP_KERNEL,
- &u->setup_dma);
+ reqp = kmalloc(sizeof *reqp, GFP_KERNEL);
if (!reqp)
goto cleanup;
reqp->setup = req;
reqp->number = i % NUM_SUBCASES;
reqp->expected = expected;
u->setup_packet = (char *) &reqp->setup;
- u->transfer_flags |= URB_NO_SETUP_DMA_MAP;
u->context = &context;
u->complete = ctrl_complete;
@@ -1017,10 +1015,7 @@ cleanup:
if (!urb [i])
continue;
urb [i]->dev = udev;
- if (urb [i]->setup_packet)
- usb_buffer_free (udev, sizeof (struct usb_ctrlrequest),
- urb [i]->setup_packet,
- urb [i]->setup_dma);
+ kfree(urb[i]->setup_packet);
simple_free_urb (urb [i]);
}
kfree (urb);
@@ -1421,7 +1416,7 @@ static struct urb *iso_alloc_urb (
urb->number_of_packets = packets;
urb->transfer_buffer_length = bytes;
- urb->transfer_buffer = usb_buffer_alloc (udev, bytes, GFP_KERNEL,
+ urb->transfer_buffer = usb_alloc_coherent (udev, bytes, GFP_KERNEL,
&urb->transfer_dma);
if (!urb->transfer_buffer) {
usb_free_urb (urb);
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index ddf7f9a1b33..61c76b13f0f 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -416,13 +416,13 @@ static unsigned int mon_bin_get_data(const struct mon_reader_bin *rp,
} else {
/* If IOMMU coalescing occurred, we cannot trust sg_page */
- if (urb->sg->nents != urb->num_sgs) {
+ if (urb->transfer_flags & URB_DMA_SG_COMBINED) {
*flag = 'D';
return length;
}
/* Copy up to the first non-addressable segment */
- for_each_sg(urb->sg->sg, sg, urb->num_sgs, i) {
+ for_each_sg(urb->sg, sg, urb->num_sgs, i) {
if (length == 0 || PageHighMem(sg_page(sg)))
break;
this_len = min_t(unsigned int, sg->length, length);
@@ -954,8 +954,7 @@ static int mon_bin_queued(struct mon_reader_bin *rp)
/*
*/
-static int mon_bin_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct mon_reader_bin *rp = file->private_data;
// struct mon_bus* mbus = rp->r.m_bus;
@@ -1095,6 +1094,19 @@ static int mon_bin_ioctl(struct inode *inode, struct file *file,
return ret;
}
+static long mon_bin_unlocked_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = mon_bin_ioctl(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
+
#ifdef CONFIG_COMPAT
static long mon_bin_compat_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
@@ -1148,14 +1160,13 @@ static long mon_bin_compat_ioctl(struct file *file,
return 0;
case MON_IOCG_STATS:
- return mon_bin_ioctl(NULL, file, cmd,
- (unsigned long) compat_ptr(arg));
+ return mon_bin_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
case MON_IOCQ_URB_LEN:
case MON_IOCQ_RING_SIZE:
case MON_IOCT_RING_SIZE:
case MON_IOCH_MFLUSH:
- return mon_bin_ioctl(NULL, file, cmd, arg);
+ return mon_bin_ioctl(file, cmd, arg);
default:
;
@@ -1239,7 +1250,7 @@ static const struct file_operations mon_fops_binary = {
.read = mon_bin_read,
/* .write = mon_text_write, */
.poll = mon_bin_poll,
- .ioctl = mon_bin_ioctl,
+ .unlocked_ioctl = mon_bin_unlocked_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = mon_bin_compat_ioctl,
#endif
diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
index e4af18b93c7..812dc288bb8 100644
--- a/drivers/usb/mon/mon_main.c
+++ b/drivers/usb/mon/mon_main.c
@@ -9,12 +9,13 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb.h>
+#include <linux/usb/hcd.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <linux/mutex.h>
#include "usb_mon.h"
-#include "../core/hcd.h"
+
static void mon_stop(struct mon_bus *mbus);
static void mon_dissolve(struct mon_bus *mbus, struct usb_bus *ubus);
diff --git a/drivers/usb/mon/mon_stat.c b/drivers/usb/mon/mon_stat.c
index 1becdc3837e..8ec94f15a73 100644
--- a/drivers/usb/mon/mon_stat.c
+++ b/drivers/usb/mon/mon_stat.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/fs.h>
+#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include "usb_mon.h"
@@ -63,6 +64,6 @@ const struct file_operations mon_fops_stat = {
.read = mon_stat_read,
/* .write = mon_stat_write, */
/* .poll = mon_stat_poll, */
- /* .ioctl = mon_stat_ioctl, */
+ /* .unlocked_ioctl = mon_stat_ioctl, */
.release = mon_stat_release,
};
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 4d0be130f49..a545d65f6e5 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -159,11 +159,9 @@ static inline char mon_text_get_data(struct mon_event_text *ep, struct urb *urb,
if (src == NULL)
return 'Z'; /* '0' would be not as pretty. */
} else {
- struct scatterlist *sg = urb->sg->sg;
+ struct scatterlist *sg = urb->sg;
- /* If IOMMU coalescing occurred, we cannot trust sg_page */
- if (urb->sg->nents != urb->num_sgs ||
- PageHighMem(sg_page(sg)))
+ if (PageHighMem(sg_page(sg)))
return 'D';
/* For the text interface we copy only the first sg buffer */
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 07fe490b44d..cfd38edfcf9 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -38,6 +38,7 @@ config USB_MUSB_SOC
default y if ARCH_DAVINCI
default y if ARCH_OMAP2430
default y if ARCH_OMAP3
+ default y if ARCH_OMAP4
default y if (BF54x && !BF544)
default y if (BF52x && !BF522 && !BF523)
@@ -50,6 +51,9 @@ comment "OMAP 243x high speed USB support"
comment "OMAP 343x high speed USB support"
depends on USB_MUSB_HDRC && ARCH_OMAP3
+comment "OMAP 44xx high speed USB support"
+ depends on USB_MUSB_HDRC && ARCH_OMAP4
+
comment "Blackfin high speed USB Support"
depends on USB_MUSB_HDRC && ((BF54x && !BF544) || (BF52x && !BF522 && !BF523))
@@ -153,7 +157,7 @@ config MUSB_PIO_ONLY
config USB_INVENTRA_DMA
bool
depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
- default ARCH_OMAP2430 || ARCH_OMAP3 || BLACKFIN
+ default ARCH_OMAP2430 || ARCH_OMAP3 || BLACKFIN || ARCH_OMAP4
help
Enable DMA transfers using Mentor's engine.
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
index 3a485dabebb..9705f716386 100644
--- a/drivers/usb/musb/Makefile
+++ b/drivers/usb/musb/Makefile
@@ -22,6 +22,10 @@ ifeq ($(CONFIG_ARCH_OMAP3430),y)
musb_hdrc-objs += omap2430.o
endif
+ifeq ($(CONFIG_ARCH_OMAP4),y)
+ musb_hdrc-objs += omap2430.o
+endif
+
ifeq ($(CONFIG_BF54x),y)
musb_hdrc-objs += blackfin.o
endif
@@ -38,6 +42,10 @@ ifeq ($(CONFIG_USB_MUSB_HDRC_HCD),y)
musb_hdrc-objs += musb_virthub.o musb_host.o
endif
+ifeq ($(CONFIG_DEBUG_FS),y)
+ musb_hdrc-objs += musb_debugfs.o
+endif
+
# the kconfig must guarantee that only one of the
# possible I/O schemes will be enabled at a time ...
# PIO only, or DMA (several potential schemes).
@@ -64,12 +72,6 @@ endif
################################################################################
-# FIXME remove all these extra "-DMUSB_* things, stick to CONFIG_*
-
-ifeq ($(CONFIG_USB_INVENTRA_MUSB_HAS_AHB_ID),y)
- EXTRA_CFLAGS += -DMUSB_AHB_ID
-endif
-
# Debugging
ifeq ($(CONFIG_USB_MUSB_DEBUG),y)
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index ec8d324237f..b611420a805 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -170,6 +170,13 @@ static irqreturn_t blackfin_interrupt(int irq, void *__hci)
retval = musb_interrupt(musb);
}
+ /* Start sampling ID pin, when plug is removed from MUSB */
+ if (is_otg_enabled(musb) && (musb->xceiv->state == OTG_STATE_B_IDLE
+ || musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) {
+ mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
+ musb->a_wait_bcon = TIMER_DELAY;
+ }
+
spin_unlock_irqrestore(&musb->lock, flags);
return retval;
@@ -180,6 +187,7 @@ static void musb_conn_timer_handler(unsigned long _musb)
struct musb *musb = (void *)_musb;
unsigned long flags;
u16 val;
+ static u8 toggle;
spin_lock_irqsave(&musb->lock, flags);
switch (musb->xceiv->state) {
@@ -187,10 +195,44 @@ static void musb_conn_timer_handler(unsigned long _musb)
case OTG_STATE_A_WAIT_BCON:
/* Start a new session */
val = musb_readw(musb->mregs, MUSB_DEVCTL);
+ val &= ~MUSB_DEVCTL_SESSION;
+ musb_writew(musb->mregs, MUSB_DEVCTL, val);
val |= MUSB_DEVCTL_SESSION;
musb_writew(musb->mregs, MUSB_DEVCTL, val);
+ /* Check if musb is host or peripheral. */
+ val = musb_readw(musb->mregs, MUSB_DEVCTL);
+
+ if (!(val & MUSB_DEVCTL_BDEVICE)) {
+ gpio_set_value(musb->config->gpio_vrsel, 1);
+ musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
+ } else {
+ gpio_set_value(musb->config->gpio_vrsel, 0);
+ /* Ignore VBUSERROR and SUSPEND IRQ */
+ val = musb_readb(musb->mregs, MUSB_INTRUSBE);
+ val &= ~MUSB_INTR_VBUSERROR;
+ musb_writeb(musb->mregs, MUSB_INTRUSBE, val);
+ val = MUSB_INTR_SUSPEND | MUSB_INTR_VBUSERROR;
+ musb_writeb(musb->mregs, MUSB_INTRUSB, val);
+ if (is_otg_enabled(musb))
+ musb->xceiv->state = OTG_STATE_B_IDLE;
+ else
+ musb_writeb(musb->mregs, MUSB_POWER, MUSB_POWER_HSENAB);
+ }
+ mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
+ break;
+ case OTG_STATE_B_IDLE:
+
+ if (!is_peripheral_enabled(musb))
+ break;
+ /* Start a new session. It seems that MUSB needs taking
+ * some time to recognize the type of the plug inserted?
+ */
+ val = musb_readw(musb->mregs, MUSB_DEVCTL);
+ val |= MUSB_DEVCTL_SESSION;
+ musb_writew(musb->mregs, MUSB_DEVCTL, val);
val = musb_readw(musb->mregs, MUSB_DEVCTL);
+
if (!(val & MUSB_DEVCTL_BDEVICE)) {
gpio_set_value(musb->config->gpio_vrsel, 1);
musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
@@ -205,12 +247,27 @@ static void musb_conn_timer_handler(unsigned long _musb)
val = MUSB_INTR_SUSPEND | MUSB_INTR_VBUSERROR;
musb_writeb(musb->mregs, MUSB_INTRUSB, val);
- val = MUSB_POWER_HSENAB;
- musb_writeb(musb->mregs, MUSB_POWER, val);
+ /* Toggle the Soft Conn bit, so that we can response to
+ * the inserting of either A-plug or B-plug.
+ */
+ if (toggle) {
+ val = musb_readb(musb->mregs, MUSB_POWER);
+ val &= ~MUSB_POWER_SOFTCONN;
+ musb_writeb(musb->mregs, MUSB_POWER, val);
+ toggle = 0;
+ } else {
+ val = musb_readb(musb->mregs, MUSB_POWER);
+ val |= MUSB_POWER_SOFTCONN;
+ musb_writeb(musb->mregs, MUSB_POWER, val);
+ toggle = 1;
+ }
+ /* The delay time is set to 1/4 second by default,
+ * shortening it, if accelerating A-plug detection
+ * is needed in OTG mode.
+ */
+ mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY / 4);
}
- mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
break;
-
default:
DBG(1, "%s state not handled\n", otg_state_string(musb));
break;
@@ -222,7 +279,7 @@ static void musb_conn_timer_handler(unsigned long _musb)
void musb_platform_enable(struct musb *musb)
{
- if (is_host_enabled(musb)) {
+ if (!is_otg_enabled(musb) && is_host_enabled(musb)) {
mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
musb->a_wait_bcon = TIMER_DELAY;
}
@@ -232,16 +289,12 @@ void musb_platform_disable(struct musb *musb)
{
}
-static void bfin_vbus_power(struct musb *musb, int is_on, int sleeping)
-{
-}
-
static void bfin_set_vbus(struct musb *musb, int is_on)
{
- if (is_on)
- gpio_set_value(musb->config->gpio_vrsel, 1);
- else
- gpio_set_value(musb->config->gpio_vrsel, 0);
+ int value = musb->config->gpio_vrsel_active;
+ if (!is_on)
+ value = !value;
+ gpio_set_value(musb->config->gpio_vrsel, value);
DBG(1, "VBUS %s, devctl %02x "
/* otg %3x conf %08x prcm %08x */ "\n",
@@ -256,7 +309,7 @@ static int bfin_set_power(struct otg_transceiver *x, unsigned mA)
void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
{
- if (is_host_enabled(musb))
+ if (!is_otg_enabled(musb) && is_host_enabled(musb))
mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
}
@@ -270,7 +323,7 @@ int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
return -EIO;
}
-int __init musb_platform_init(struct musb *musb)
+int __init musb_platform_init(struct musb *musb, void *board_data)
{
/*
@@ -339,23 +392,10 @@ int __init musb_platform_init(struct musb *musb)
return 0;
}
-int musb_platform_suspend(struct musb *musb)
-{
- return 0;
-}
-
-int musb_platform_resume(struct musb *musb)
-{
- return 0;
-}
-
-
int musb_platform_exit(struct musb *musb)
{
- bfin_vbus_power(musb, 0 /*off*/, 1);
gpio_free(musb->config->gpio_vrsel);
- musb_platform_suspend(musb);
return 0;
}
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index ce2e16fee0d..57624361c1d 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -376,7 +376,7 @@ int musb_platform_set_mode(struct musb *musb, u8 mode)
return -EIO;
}
-int __init musb_platform_init(struct musb *musb)
+int __init musb_platform_init(struct musb *musb, void *board_data)
{
void __iomem *tibase = musb->ctrl_base;
u32 revision;
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 705cc4ad873..fad70bc8355 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -149,6 +149,87 @@ static inline struct musb *dev_to_musb(struct device *dev)
/*-------------------------------------------------------------------------*/
+#ifndef CONFIG_BLACKFIN
+static int musb_ulpi_read(struct otg_transceiver *otg, u32 offset)
+{
+ void __iomem *addr = otg->io_priv;
+ int i = 0;
+ u8 r;
+ u8 power;
+
+ /* Make sure the transceiver is not in low power mode */
+ power = musb_readb(addr, MUSB_POWER);
+ power &= ~MUSB_POWER_SUSPENDM;
+ musb_writeb(addr, MUSB_POWER, power);
+
+ /* REVISIT: musbhdrc_ulpi_an.pdf recommends setting the
+ * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM.
+ */
+
+ musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset);
+ musb_writeb(addr, MUSB_ULPI_REG_CONTROL,
+ MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR);
+
+ while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
+ & MUSB_ULPI_REG_CMPLT)) {
+ i++;
+ if (i == 10000) {
+ DBG(3, "ULPI read timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ }
+ r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
+ r &= ~MUSB_ULPI_REG_CMPLT;
+ musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
+
+ return musb_readb(addr, MUSB_ULPI_REG_DATA);
+}
+
+static int musb_ulpi_write(struct otg_transceiver *otg,
+ u32 offset, u32 data)
+{
+ void __iomem *addr = otg->io_priv;
+ int i = 0;
+ u8 r = 0;
+ u8 power;
+
+ /* Make sure the transceiver is not in low power mode */
+ power = musb_readb(addr, MUSB_POWER);
+ power &= ~MUSB_POWER_SUSPENDM;
+ musb_writeb(addr, MUSB_POWER, power);
+
+ musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset);
+ musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)data);
+ musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ);
+
+ while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
+ & MUSB_ULPI_REG_CMPLT)) {
+ i++;
+ if (i == 10000) {
+ DBG(3, "ULPI write timed out\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
+ r &= ~MUSB_ULPI_REG_CMPLT;
+ musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
+
+ return 0;
+}
+#else
+#define musb_ulpi_read(a, b) NULL
+#define musb_ulpi_write(a, b, c) NULL
+#endif
+
+static struct otg_io_access_ops musb_ulpi_access = {
+ .read = musb_ulpi_read,
+ .write = musb_ulpi_write,
+};
+
+/*-------------------------------------------------------------------------*/
+
#if !defined(CONFIG_USB_TUSB6010) && !defined(CONFIG_BLACKFIN)
/*
@@ -353,8 +434,7 @@ void musb_hnp_stop(struct musb *musb)
* which cause occasional OPT A "Did not receive reset after connect"
* errors.
*/
- musb->port1_status &=
- ~(1 << USB_PORT_FEAT_C_CONNECTION);
+ musb->port1_status &= ~(USB_PORT_STAT_C_CONNECTION << 16);
}
#endif
@@ -530,8 +610,8 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
musb_writeb(mbase, MUSB_DEVCTL, devctl);
} else {
musb->port1_status |=
- (1 << USB_PORT_FEAT_OVER_CURRENT)
- | (1 << USB_PORT_FEAT_C_OVER_CURRENT);
+ USB_PORT_STAT_OVERCURRENT
+ | (USB_PORT_STAT_C_OVERCURRENT << 16);
}
break;
default:
@@ -986,7 +1066,8 @@ static void musb_shutdown(struct platform_device *pdev)
* more than selecting one of a bunch of predefined configurations.
*/
#if defined(CONFIG_USB_TUSB6010) || \
- defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3)
+ defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \
+ || defined(CONFIG_ARCH_OMAP4)
static ushort __initdata fifo_mode = 4;
#else
static ushort __initdata fifo_mode = 2;
@@ -996,24 +1077,13 @@ static ushort __initdata fifo_mode = 2;
module_param(fifo_mode, ushort, 0);
MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration");
-
-enum fifo_style { FIFO_RXTX, FIFO_TX, FIFO_RX } __attribute__ ((packed));
-enum buf_mode { BUF_SINGLE, BUF_DOUBLE } __attribute__ ((packed));
-
-struct fifo_cfg {
- u8 hw_ep_num;
- enum fifo_style style;
- enum buf_mode mode;
- u16 maxpacket;
-};
-
/*
* tables defining fifo_mode values. define more if you like.
* for host side, make sure both halves of ep1 are set up.
*/
/* mode 0 - fits in 2KB */
-static struct fifo_cfg __initdata mode_0_cfg[] = {
+static struct musb_fifo_cfg __initdata mode_0_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, },
@@ -1022,7 +1092,7 @@ static struct fifo_cfg __initdata mode_0_cfg[] = {
};
/* mode 1 - fits in 4KB */
-static struct fifo_cfg __initdata mode_1_cfg[] = {
+static struct musb_fifo_cfg __initdata mode_1_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, },
@@ -1031,7 +1101,7 @@ static struct fifo_cfg __initdata mode_1_cfg[] = {
};
/* mode 2 - fits in 4KB */
-static struct fifo_cfg __initdata mode_2_cfg[] = {
+static struct musb_fifo_cfg __initdata mode_2_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
@@ -1041,7 +1111,7 @@ static struct fifo_cfg __initdata mode_2_cfg[] = {
};
/* mode 3 - fits in 4KB */
-static struct fifo_cfg __initdata mode_3_cfg[] = {
+static struct musb_fifo_cfg __initdata mode_3_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
@@ -1051,7 +1121,7 @@ static struct fifo_cfg __initdata mode_3_cfg[] = {
};
/* mode 4 - fits in 16KB */
-static struct fifo_cfg __initdata mode_4_cfg[] = {
+static struct musb_fifo_cfg __initdata mode_4_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
@@ -1082,7 +1152,7 @@ static struct fifo_cfg __initdata mode_4_cfg[] = {
};
/* mode 5 - fits in 8KB */
-static struct fifo_cfg __initdata mode_5_cfg[] = {
+static struct musb_fifo_cfg __initdata mode_5_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
@@ -1120,7 +1190,7 @@ static struct fifo_cfg __initdata mode_5_cfg[] = {
*/
static int __init
fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
- const struct fifo_cfg *cfg, u16 offset)
+ const struct musb_fifo_cfg *cfg, u16 offset)
{
void __iomem *mbase = musb->mregs;
int size = 0;
@@ -1191,17 +1261,23 @@ fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0));
}
-static struct fifo_cfg __initdata ep0_cfg = {
+static struct musb_fifo_cfg __initdata ep0_cfg = {
.style = FIFO_RXTX, .maxpacket = 64,
};
static int __init ep_config_from_table(struct musb *musb)
{
- const struct fifo_cfg *cfg;
+ const struct musb_fifo_cfg *cfg;
unsigned i, n;
int offset;
struct musb_hw_ep *hw_ep = musb->endpoints;
+ if (musb->config->fifo_cfg) {
+ cfg = musb->config->fifo_cfg;
+ n = musb->config->fifo_cfg_size;
+ goto done;
+ }
+
switch (fifo_mode) {
default:
fifo_mode = 0;
@@ -1236,6 +1312,7 @@ static int __init ep_config_from_table(struct musb *musb)
musb_driver_name, fifo_mode);
+done:
offset = fifo_setup(musb, hw_ep, &ep0_cfg, 0);
/* assert(offset > 0) */
@@ -1461,7 +1538,8 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
/*-------------------------------------------------------------------------*/
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) || \
+ defined(CONFIG_ARCH_OMAP4)
static irqreturn_t generic_interrupt(int irq, void *__hci)
{
@@ -1948,7 +2026,7 @@ bad_config:
* isp1504, non-OTG, etc) mostly hooking up through ULPI.
*/
musb->isr = generic_interrupt;
- status = musb_platform_init(musb);
+ status = musb_platform_init(musb, plat->board_data);
if (status < 0)
goto fail2;
@@ -1957,6 +2035,11 @@ bad_config:
goto fail3;
}
+ if (!musb->xceiv->io_ops) {
+ musb->xceiv->io_priv = musb->mregs;
+ musb->xceiv->io_ops = &musb_ulpi_access;
+ }
+
#ifndef CONFIG_MUSB_PIO_ONLY
if (use_dma && dev->dma_mask) {
struct dma_controller *c;
@@ -2057,10 +2140,14 @@ bad_config:
if (status < 0)
goto fail3;
+ status = musb_init_debugfs(musb);
+ if (status < 0)
+ goto fail4;
+
#ifdef CONFIG_SYSFS
status = sysfs_create_group(&musb->controller->kobj, &musb_attr_group);
if (status)
- goto fail4;
+ goto fail5;
#endif
dev_info(dev, "USB %s mode controller at %p using %s, IRQ %d\n",
@@ -2077,6 +2164,9 @@ bad_config:
return 0;
+fail5:
+ musb_exit_debugfs(musb);
+
fail4:
if (!is_otg_enabled(musb) && is_host_enabled(musb))
usb_remove_hcd(musb_to_hcd(musb));
@@ -2153,6 +2243,7 @@ static int __exit musb_remove(struct platform_device *pdev)
* - Peripheral mode: peripheral is deactivated (or never-activated)
* - OTG mode: both roles are deactivated (or never-activated)
*/
+ musb_exit_debugfs(musb);
musb_shutdown(pdev);
#ifdef CONFIG_USB_MUSB_HDRC_HCD
if (musb->board_mode == MUSB_HOST)
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index ac17b004909..b22d02dea7d 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -69,7 +69,7 @@ struct musb_ep;
#include "musb_regs.h"
#include "musb_gadget.h"
-#include "../core/hcd.h"
+#include <linux/usb/hcd.h>
#include "musb_host.h"
@@ -213,7 +213,8 @@ enum musb_g_ep0_state {
*/
#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_ARCH_OMAP2430) \
- || defined(CONFIG_ARCH_OMAP3430) || defined(CONFIG_BLACKFIN)
+ || defined(CONFIG_ARCH_OMAP3430) || defined(CONFIG_BLACKFIN) \
+ || defined(CONFIG_ARCH_OMAP4)
/* REVISIT indexed access seemed to
* misbehave (on DaVinci) for at least peripheral IN ...
*/
@@ -596,7 +597,8 @@ extern void musb_hnp_stop(struct musb *musb);
extern int musb_platform_set_mode(struct musb *musb, u8 musb_mode);
#if defined(CONFIG_USB_TUSB6010) || defined(CONFIG_BLACKFIN) || \
- defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3)
+ defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \
+ defined(CONFIG_ARCH_OMAP4)
extern void musb_platform_try_idle(struct musb *musb, unsigned long timeout);
#else
#define musb_platform_try_idle(x, y) do {} while (0)
@@ -608,7 +610,7 @@ extern int musb_platform_get_vbus_status(struct musb *musb);
#define musb_platform_get_vbus_status(x) 0
#endif
-extern int __init musb_platform_init(struct musb *musb);
+extern int __init musb_platform_init(struct musb *musb, void *board_data);
extern int musb_platform_exit(struct musb *musb);
#endif /* __MUSB_CORE_H__ */
diff --git a/drivers/usb/musb/musb_debug.h b/drivers/usb/musb/musb_debug.h
index 9fc1db44c72..d73afdbde3e 100644
--- a/drivers/usb/musb/musb_debug.h
+++ b/drivers/usb/musb/musb_debug.h
@@ -59,4 +59,17 @@ static inline int _dbg_level(unsigned l)
extern const char *otg_state_string(struct musb *);
+#ifdef CONFIG_DEBUG_FS
+extern int musb_init_debugfs(struct musb *musb);
+extern void musb_exit_debugfs(struct musb *musb);
+#else
+static inline int musb_init_debugfs(struct musb *musb)
+{
+ return 0;
+}
+static inline void musb_exit_debugfs(struct musb *musb)
+{
+}
+#endif
+
#endif /* __MUSB_LINUX_DEBUG_H__ */
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
new file mode 100644
index 00000000000..bba76af0c0c
--- /dev/null
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -0,0 +1,294 @@
+/*
+ * MUSB OTG driver debugfs support
+ *
+ * Copyright 2010 Nokia Corporation
+ * Contact: Felipe Balbi <felipe.balbi@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/kobject.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#ifdef CONFIG_ARM
+#include <mach/hardware.h>
+#include <mach/memory.h>
+#include <asm/mach-types.h>
+#endif
+
+#include <asm/uaccess.h>
+
+#include "musb_core.h"
+#include "musb_debug.h"
+
+#ifdef CONFIG_ARCH_DAVINCI
+#include "davinci.h"
+#endif
+
+struct musb_register_map {
+ char *name;
+ unsigned offset;
+ unsigned size;
+};
+
+static const struct musb_register_map musb_regmap[] = {
+ { "FAddr", 0x00, 8 },
+ { "Power", 0x01, 8 },
+ { "Frame", 0x0c, 16 },
+ { "Index", 0x0e, 8 },
+ { "Testmode", 0x0f, 8 },
+ { "TxMaxPp", 0x10, 16 },
+ { "TxCSRp", 0x12, 16 },
+ { "RxMaxPp", 0x14, 16 },
+ { "RxCSR", 0x16, 16 },
+ { "RxCount", 0x18, 16 },
+ { "ConfigData", 0x1f, 8 },
+ { "DevCtl", 0x60, 8 },
+ { "MISC", 0x61, 8 },
+ { "TxFIFOsz", 0x62, 8 },
+ { "RxFIFOsz", 0x63, 8 },
+ { "TxFIFOadd", 0x64, 16 },
+ { "RxFIFOadd", 0x66, 16 },
+ { "VControl", 0x68, 32 },
+ { "HWVers", 0x6C, 16 },
+ { "EPInfo", 0x78, 8 },
+ { "RAMInfo", 0x79, 8 },
+ { "LinkInfo", 0x7A, 8 },
+ { "VPLen", 0x7B, 8 },
+ { "HS_EOF1", 0x7C, 8 },
+ { "FS_EOF1", 0x7D, 8 },
+ { "LS_EOF1", 0x7E, 8 },
+ { "SOFT_RST", 0x7F, 8 },
+ { "DMA_CNTLch0", 0x204, 16 },
+ { "DMA_ADDRch0", 0x208, 16 },
+ { "DMA_COUNTch0", 0x20C, 16 },
+ { "DMA_CNTLch1", 0x214, 16 },
+ { "DMA_ADDRch1", 0x218, 16 },
+ { "DMA_COUNTch1", 0x21C, 16 },
+ { "DMA_CNTLch2", 0x224, 16 },
+ { "DMA_ADDRch2", 0x228, 16 },
+ { "DMA_COUNTch2", 0x22C, 16 },
+ { "DMA_CNTLch3", 0x234, 16 },
+ { "DMA_ADDRch3", 0x238, 16 },
+ { "DMA_COUNTch3", 0x23C, 16 },
+ { "DMA_CNTLch4", 0x244, 16 },
+ { "DMA_ADDRch4", 0x248, 16 },
+ { "DMA_COUNTch4", 0x24C, 16 },
+ { "DMA_CNTLch5", 0x254, 16 },
+ { "DMA_ADDRch5", 0x258, 16 },
+ { "DMA_COUNTch5", 0x25C, 16 },
+ { "DMA_CNTLch6", 0x264, 16 },
+ { "DMA_ADDRch6", 0x268, 16 },
+ { "DMA_COUNTch6", 0x26C, 16 },
+ { "DMA_CNTLch7", 0x274, 16 },
+ { "DMA_ADDRch7", 0x278, 16 },
+ { "DMA_COUNTch7", 0x27C, 16 },
+ { } /* Terminating Entry */
+};
+
+static struct dentry *musb_debugfs_root;
+
+static int musb_regdump_show(struct seq_file *s, void *unused)
+{
+ struct musb *musb = s->private;
+ unsigned i;
+
+ seq_printf(s, "MUSB (M)HDRC Register Dump\n");
+
+ for (i = 0; i < ARRAY_SIZE(musb_regmap); i++) {
+ switch (musb_regmap[i].size) {
+ case 8:
+ seq_printf(s, "%-12s: %02x\n", musb_regmap[i].name,
+ musb_readb(musb->mregs, musb_regmap[i].offset));
+ break;
+ case 16:
+ seq_printf(s, "%-12s: %04x\n", musb_regmap[i].name,
+ musb_readw(musb->mregs, musb_regmap[i].offset));
+ break;
+ case 32:
+ seq_printf(s, "%-12s: %08x\n", musb_regmap[i].name,
+ musb_readl(musb->mregs, musb_regmap[i].offset));
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int musb_regdump_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, musb_regdump_show, inode->i_private);
+}
+
+static int musb_test_mode_show(struct seq_file *s, void *unused)
+{
+ struct musb *musb = s->private;
+ unsigned test;
+
+ test = musb_readb(musb->mregs, MUSB_TESTMODE);
+
+ if (test & MUSB_TEST_FORCE_HOST)
+ seq_printf(s, "force host\n");
+
+ if (test & MUSB_TEST_FIFO_ACCESS)
+ seq_printf(s, "fifo access\n");
+
+ if (test & MUSB_TEST_FORCE_FS)
+ seq_printf(s, "force full-speed\n");
+
+ if (test & MUSB_TEST_FORCE_HS)
+ seq_printf(s, "force high-speed\n");
+
+ if (test & MUSB_TEST_PACKET)
+ seq_printf(s, "test packet\n");
+
+ if (test & MUSB_TEST_K)
+ seq_printf(s, "test K\n");
+
+ if (test & MUSB_TEST_J)
+ seq_printf(s, "test J\n");
+
+ if (test & MUSB_TEST_SE0_NAK)
+ seq_printf(s, "test SE0 NAK\n");
+
+ return 0;
+}
+
+static const struct file_operations musb_regdump_fops = {
+ .open = musb_regdump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int musb_test_mode_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return single_open(file, musb_test_mode_show, inode->i_private);
+}
+
+static ssize_t musb_test_mode_write(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ struct musb *musb = file->private_data;
+ u8 test = 0;
+ char buf[18];
+
+ memset(buf, 0x00, sizeof(buf));
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ if (!strncmp(buf, "force host", 9))
+ test = MUSB_TEST_FORCE_HOST;
+
+ if (!strncmp(buf, "fifo access", 11))
+ test = MUSB_TEST_FIFO_ACCESS;
+
+ if (!strncmp(buf, "force full-speed", 15))
+ test = MUSB_TEST_FORCE_FS;
+
+ if (!strncmp(buf, "force high-speed", 15))
+ test = MUSB_TEST_FORCE_HS;
+
+ if (!strncmp(buf, "test packet", 10)) {
+ test = MUSB_TEST_PACKET;
+ musb_load_testpacket(musb);
+ }
+
+ if (!strncmp(buf, "test K", 6))
+ test = MUSB_TEST_K;
+
+ if (!strncmp(buf, "test J", 6))
+ test = MUSB_TEST_J;
+
+ if (!strncmp(buf, "test SE0 NAK", 12))
+ test = MUSB_TEST_SE0_NAK;
+
+ musb_writeb(musb->mregs, MUSB_TESTMODE, test);
+
+ return count;
+}
+
+static const struct file_operations musb_test_mode_fops = {
+ .open = musb_test_mode_open,
+ .write = musb_test_mode_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int __init musb_init_debugfs(struct musb *musb)
+{
+ struct dentry *root;
+ struct dentry *file;
+ int ret;
+
+ root = debugfs_create_dir("musb", NULL);
+ if (IS_ERR(root)) {
+ ret = PTR_ERR(root);
+ goto err0;
+ }
+
+ file = debugfs_create_file("regdump", S_IRUGO, root, musb,
+ &musb_regdump_fops);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto err1;
+ }
+
+ file = debugfs_create_file("testmode", S_IRUGO | S_IWUSR,
+ root, musb, &musb_test_mode_fops);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto err1;
+ }
+
+ musb_debugfs_root = root;
+
+ return 0;
+
+err1:
+ debugfs_remove_recursive(root);
+
+err0:
+ return ret;
+}
+
+void /* __init_or_exit */ musb_exit_debugfs(struct musb *musb)
+{
+ debugfs_remove_recursive(musb_debugfs_root);
+}
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 53d06451f82..21b9788d024 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -351,6 +351,31 @@ __acquires(musb->lock)
musb->test_mode_nr =
MUSB_TEST_PACKET;
break;
+
+ case 0xc0:
+ /* TEST_FORCE_HS */
+ pr_debug("TEST_FORCE_HS\n");
+ musb->test_mode_nr =
+ MUSB_TEST_FORCE_HS;
+ break;
+ case 0xc1:
+ /* TEST_FORCE_FS */
+ pr_debug("TEST_FORCE_FS\n");
+ musb->test_mode_nr =
+ MUSB_TEST_FORCE_FS;
+ break;
+ case 0xc2:
+ /* TEST_FIFO_ACCESS */
+ pr_debug("TEST_FIFO_ACCESS\n");
+ musb->test_mode_nr =
+ MUSB_TEST_FIFO_ACCESS;
+ break;
+ case 0xc3:
+ /* TEST_FORCE_HOST */
+ pr_debug("TEST_FORCE_HOST\n");
+ musb->test_mode_nr =
+ MUSB_TEST_FORCE_HOST;
+ break;
default:
goto stall;
}
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index fa55aacc385..244267527a6 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -75,6 +75,10 @@
/* MUSB ULPI VBUSCONTROL */
#define MUSB_ULPI_USE_EXTVBUS 0x01
#define MUSB_ULPI_USE_EXTVBUSIND 0x02
+/* ULPI_REG_CONTROL */
+#define MUSB_ULPI_REG_REQ (1 << 0)
+#define MUSB_ULPI_REG_CMPLT (1 << 1)
+#define MUSB_ULPI_RDN_WR (1 << 2)
/* TESTMODE */
#define MUSB_TEST_FORCE_HOST 0x80
@@ -251,6 +255,12 @@
/* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */
#define MUSB_HWVERS 0x6C /* 8 bit */
#define MUSB_ULPI_BUSCONTROL 0x70 /* 8 bit */
+#define MUSB_ULPI_INT_MASK 0x72 /* 8 bit */
+#define MUSB_ULPI_INT_SRC 0x73 /* 8 bit */
+#define MUSB_ULPI_REG_DATA 0x74 /* 8 bit */
+#define MUSB_ULPI_REG_ADDR 0x75 /* 8 bit */
+#define MUSB_ULPI_REG_CONTROL 0x76 /* 8 bit */
+#define MUSB_ULPI_RAW_DATA 0x77 /* 8 bit */
#define MUSB_EPINFO 0x78 /* 8 bit */
#define MUSB_RAMINFO 0x79 /* 8 bit */
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index 7775e1c0a21..92e85e027cf 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -183,8 +183,8 @@ static void musb_port_reset(struct musb *musb, bool do_reset)
void musb_root_disconnect(struct musb *musb)
{
- musb->port1_status = (1 << USB_PORT_FEAT_POWER)
- | (1 << USB_PORT_FEAT_C_CONNECTION);
+ musb->port1_status = USB_PORT_STAT_POWER
+ | (USB_PORT_STAT_C_CONNECTION << 16);
usb_hcd_poll_rh_status(musb_to_hcd(musb));
musb->is_active = 0;
diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h
index 613f95a058f..f763d62f151 100644
--- a/drivers/usb/musb/musbhsdma.h
+++ b/drivers/usb/musb/musbhsdma.h
@@ -102,26 +102,16 @@ static inline void musb_write_hsdma_addr(void __iomem *mbase,
static inline u32 musb_read_hsdma_count(void __iomem *mbase, u8 bchannel)
{
- u32 count = musb_readw(mbase,
+ return musb_readl(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH));
-
- count = count << 16;
-
- count |= musb_readw(mbase,
- MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW));
-
- return count;
}
static inline void musb_write_hsdma_count(void __iomem *mbase,
u8 bchannel, u32 len)
{
- musb_writew(mbase,
- MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW),
- ((u16)((u32) len & 0xFFFF)));
- musb_writew(mbase,
+ musb_writel(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH),
- ((u16)(((u32) len >> 16) & 0xFFFF)));
+ len);
}
#endif /* CONFIG_BLACKFIN */
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 82592633502..e06d65e36bf 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -32,17 +32,11 @@
#include <linux/clk.h>
#include <linux/io.h>
-#include <asm/mach-types.h>
-#include <mach/hardware.h>
#include <plat/mux.h>
#include "musb_core.h"
#include "omap2430.h"
-#ifdef CONFIG_ARCH_OMAP3430
-#define get_cpu_rev() 2
-#endif
-
static struct timer_list musb_idle_timer;
@@ -145,10 +139,6 @@ void musb_platform_enable(struct musb *musb)
void musb_platform_disable(struct musb *musb)
{
}
-static void omap_vbus_power(struct musb *musb, int is_on, int sleeping)
-{
-}
-
static void omap_set_vbus(struct musb *musb, int is_on)
{
u8 devctl;
@@ -199,9 +189,10 @@ int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
return 0;
}
-int __init musb_platform_init(struct musb *musb)
+int __init musb_platform_init(struct musb *musb, void *board_data)
{
u32 l;
+ struct omap_musb_board_data *data = board_data;
#if defined(CONFIG_ARCH_OMAP2430)
omap_cfg_reg(AE5_2430_USB0HS_STP);
@@ -235,7 +226,15 @@ int __init musb_platform_init(struct musb *musb)
musb_writel(musb->mregs, OTG_SYSCONFIG, l);
l = musb_readl(musb->mregs, OTG_INTERFSEL);
- l |= ULPI_12PIN;
+
+ if (data->interface_type == MUSB_INTERFACE_UTMI) {
+ /* OMAP4 uses Internal PHY GS70 which uses UTMI interface */
+ l &= ~ULPI_12PIN; /* Disable ULPI */
+ l |= UTMI_8BIT; /* Enable UTMI */
+ } else {
+ l |= ULPI_12PIN;
+ }
+
musb_writel(musb->mregs, OTG_INTERFSEL, l);
pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, "
@@ -246,8 +245,6 @@ int __init musb_platform_init(struct musb *musb)
musb_readl(musb->mregs, OTG_INTERFSEL),
musb_readl(musb->mregs, OTG_SIMENABLE));
- omap_vbus_power(musb, musb->board_mode == MUSB_HOST, 1);
-
if (is_host_enabled(musb))
musb->board_set_vbus = omap_set_vbus;
@@ -272,7 +269,7 @@ void musb_platform_restore_context(struct musb *musb,
}
#endif
-int musb_platform_suspend(struct musb *musb)
+static int musb_platform_suspend(struct musb *musb)
{
u32 l;
@@ -327,8 +324,6 @@ static int musb_platform_resume(struct musb *musb)
int musb_platform_exit(struct musb *musb)
{
- omap_vbus_power(musb, 0 /*off*/, 1);
-
musb_platform_suspend(musb);
return 0;
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 60d3938cafc..05c077f8f9a 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -1104,7 +1104,7 @@ err:
return -ENODEV;
}
-int __init musb_platform_init(struct musb *musb)
+int __init musb_platform_init(struct musb *musb, void *board_data)
{
struct platform_device *pdev;
struct resource *mem;
diff --git a/drivers/usb/otg/isp1301_omap.c b/drivers/usb/otg/isp1301_omap.c
index 78a20970926..45696949241 100644
--- a/drivers/usb/otg/isp1301_omap.c
+++ b/drivers/usb/otg/isp1301_omap.c
@@ -1654,7 +1654,7 @@ static int __init isp_init(void)
{
return i2c_add_driver(&isp1301_driver);
}
-module_init(isp_init);
+subsys_initcall(isp_init);
static void __exit isp_exit(void)
{
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
index 223cdf46ccb..0e8888588d4 100644
--- a/drivers/usb/otg/twl4030-usb.c
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -33,6 +33,7 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/usb/otg.h>
+#include <linux/usb/ulpi.h>
#include <linux/i2c/twl.h>
#include <linux/regulator/consumer.h>
#include <linux/err.h>
@@ -41,81 +42,7 @@
/* Register defines */
-#define VENDOR_ID_LO 0x00
-#define VENDOR_ID_HI 0x01
-#define PRODUCT_ID_LO 0x02
-#define PRODUCT_ID_HI 0x03
-
-#define FUNC_CTRL 0x04
-#define FUNC_CTRL_SET 0x05
-#define FUNC_CTRL_CLR 0x06
-#define FUNC_CTRL_SUSPENDM (1 << 6)
-#define FUNC_CTRL_RESET (1 << 5)
-#define FUNC_CTRL_OPMODE_MASK (3 << 3) /* bits 3 and 4 */
-#define FUNC_CTRL_OPMODE_NORMAL (0 << 3)
-#define FUNC_CTRL_OPMODE_NONDRIVING (1 << 3)
-#define FUNC_CTRL_OPMODE_DISABLE_BIT_NRZI (2 << 3)
-#define FUNC_CTRL_TERMSELECT (1 << 2)
-#define FUNC_CTRL_XCVRSELECT_MASK (3 << 0) /* bits 0 and 1 */
-#define FUNC_CTRL_XCVRSELECT_HS (0 << 0)
-#define FUNC_CTRL_XCVRSELECT_FS (1 << 0)
-#define FUNC_CTRL_XCVRSELECT_LS (2 << 0)
-#define FUNC_CTRL_XCVRSELECT_FS4LS (3 << 0)
-
-#define IFC_CTRL 0x07
-#define IFC_CTRL_SET 0x08
-#define IFC_CTRL_CLR 0x09
-#define IFC_CTRL_INTERFACE_PROTECT_DISABLE (1 << 7)
-#define IFC_CTRL_AUTORESUME (1 << 4)
-#define IFC_CTRL_CLOCKSUSPENDM (1 << 3)
-#define IFC_CTRL_CARKITMODE (1 << 2)
-#define IFC_CTRL_FSLSSERIALMODE_3PIN (1 << 1)
-
-#define TWL4030_OTG_CTRL 0x0A
-#define TWL4030_OTG_CTRL_SET 0x0B
-#define TWL4030_OTG_CTRL_CLR 0x0C
-#define TWL4030_OTG_CTRL_DRVVBUS (1 << 5)
-#define TWL4030_OTG_CTRL_CHRGVBUS (1 << 4)
-#define TWL4030_OTG_CTRL_DISCHRGVBUS (1 << 3)
-#define TWL4030_OTG_CTRL_DMPULLDOWN (1 << 2)
-#define TWL4030_OTG_CTRL_DPPULLDOWN (1 << 1)
-#define TWL4030_OTG_CTRL_IDPULLUP (1 << 0)
-
-#define USB_INT_EN_RISE 0x0D
-#define USB_INT_EN_RISE_SET 0x0E
-#define USB_INT_EN_RISE_CLR 0x0F
-#define USB_INT_EN_FALL 0x10
-#define USB_INT_EN_FALL_SET 0x11
-#define USB_INT_EN_FALL_CLR 0x12
-#define USB_INT_STS 0x13
-#define USB_INT_LATCH 0x14
-#define USB_INT_IDGND (1 << 4)
-#define USB_INT_SESSEND (1 << 3)
-#define USB_INT_SESSVALID (1 << 2)
-#define USB_INT_VBUSVALID (1 << 1)
-#define USB_INT_HOSTDISCONNECT (1 << 0)
-
-#define CARKIT_CTRL 0x19
-#define CARKIT_CTRL_SET 0x1A
-#define CARKIT_CTRL_CLR 0x1B
-#define CARKIT_CTRL_MICEN (1 << 6)
-#define CARKIT_CTRL_SPKRIGHTEN (1 << 5)
-#define CARKIT_CTRL_SPKLEFTEN (1 << 4)
-#define CARKIT_CTRL_RXDEN (1 << 3)
-#define CARKIT_CTRL_TXDEN (1 << 2)
-#define CARKIT_CTRL_IDGNDDRV (1 << 1)
-#define CARKIT_CTRL_CARKITPWR (1 << 0)
-#define CARKIT_PLS_CTRL 0x22
-#define CARKIT_PLS_CTRL_SET 0x23
-#define CARKIT_PLS_CTRL_CLR 0x24
-#define CARKIT_PLS_CTRL_SPKRRIGHT_BIASEN (1 << 3)
-#define CARKIT_PLS_CTRL_SPKRLEFT_BIASEN (1 << 2)
-#define CARKIT_PLS_CTRL_RXPLSEN (1 << 1)
-#define CARKIT_PLS_CTRL_TXPLSEN (1 << 0)
-
#define MCPC_CTRL 0x30
-#define MCPC_CTRL_SET 0x31
-#define MCPC_CTRL_CLR 0x32
#define MCPC_CTRL_RTSOL (1 << 7)
#define MCPC_CTRL_EXTSWR (1 << 6)
#define MCPC_CTRL_EXTSWC (1 << 5)
@@ -125,8 +52,6 @@
#define MCPC_CTRL_HS_UART (1 << 0)
#define MCPC_IO_CTRL 0x33
-#define MCPC_IO_CTRL_SET 0x34
-#define MCPC_IO_CTRL_CLR 0x35
#define MCPC_IO_CTRL_MICBIASEN (1 << 5)
#define MCPC_IO_CTRL_CTS_NPU (1 << 4)
#define MCPC_IO_CTRL_RXD_PU (1 << 3)
@@ -135,19 +60,13 @@
#define MCPC_IO_CTRL_RTSTYP (1 << 0)
#define MCPC_CTRL2 0x36
-#define MCPC_CTRL2_SET 0x37
-#define MCPC_CTRL2_CLR 0x38
#define MCPC_CTRL2_MCPC_CK_EN (1 << 0)
#define OTHER_FUNC_CTRL 0x80
-#define OTHER_FUNC_CTRL_SET 0x81
-#define OTHER_FUNC_CTRL_CLR 0x82
#define OTHER_FUNC_CTRL_BDIS_ACON_EN (1 << 4)
#define OTHER_FUNC_CTRL_FIVEWIRE_MODE (1 << 2)
#define OTHER_IFC_CTRL 0x83
-#define OTHER_IFC_CTRL_SET 0x84
-#define OTHER_IFC_CTRL_CLR 0x85
#define OTHER_IFC_CTRL_OE_INT_EN (1 << 6)
#define OTHER_IFC_CTRL_CEA2011_MODE (1 << 5)
#define OTHER_IFC_CTRL_FSLSSERIALMODE_4PIN (1 << 4)
@@ -156,11 +75,7 @@
#define OTHER_IFC_CTRL_ALT_INT_REROUTE (1 << 0)
#define OTHER_INT_EN_RISE 0x86
-#define OTHER_INT_EN_RISE_SET 0x87
-#define OTHER_INT_EN_RISE_CLR 0x88
#define OTHER_INT_EN_FALL 0x89
-#define OTHER_INT_EN_FALL_SET 0x8A
-#define OTHER_INT_EN_FALL_CLR 0x8B
#define OTHER_INT_STS 0x8C
#define OTHER_INT_LATCH 0x8D
#define OTHER_INT_VB_SESS_VLD (1 << 7)
@@ -178,13 +93,9 @@
#define ID_RES_GND (1 << 0)
#define POWER_CTRL 0xAC
-#define POWER_CTRL_SET 0xAD
-#define POWER_CTRL_CLR 0xAE
#define POWER_CTRL_OTG_ENAB (1 << 5)
#define OTHER_IFC_CTRL2 0xAF
-#define OTHER_IFC_CTRL2_SET 0xB0
-#define OTHER_IFC_CTRL2_CLR 0xB1
#define OTHER_IFC_CTRL2_ULPI_STP_LOW (1 << 4)
#define OTHER_IFC_CTRL2_ULPI_TXEN_POL (1 << 3)
#define OTHER_IFC_CTRL2_ULPI_4PIN_2430 (1 << 2)
@@ -193,14 +104,10 @@
#define OTHER_IFC_CTRL2_USB_INT_OUTSEL_INT2N (1 << 0)
#define REG_CTRL_EN 0xB2
-#define REG_CTRL_EN_SET 0xB3
-#define REG_CTRL_EN_CLR 0xB4
#define REG_CTRL_ERROR 0xB5
#define ULPI_I2C_CONFLICT_INTEN (1 << 0)
#define OTHER_FUNC_CTRL2 0xB8
-#define OTHER_FUNC_CTRL2_SET 0xB9
-#define OTHER_FUNC_CTRL2_CLR 0xBA
#define OTHER_FUNC_CTRL2_VBAT_TIMER_EN (1 << 0)
/* following registers do not have separate _clr and _set registers */
@@ -328,13 +235,13 @@ static inline int twl4030_usb_read(struct twl4030_usb *twl, u8 address)
static inline int
twl4030_usb_set_bits(struct twl4030_usb *twl, u8 reg, u8 bits)
{
- return twl4030_usb_write(twl, reg + 1, bits);
+ return twl4030_usb_write(twl, ULPI_SET(reg), bits);
}
static inline int
twl4030_usb_clear_bits(struct twl4030_usb *twl, u8 reg, u8 bits)
{
- return twl4030_usb_write(twl, reg + 2, bits);
+ return twl4030_usb_write(twl, ULPI_CLR(reg), bits);
}
/*-------------------------------------------------------------------------*/
@@ -393,11 +300,12 @@ static void twl4030_usb_set_mode(struct twl4030_usb *twl, int mode)
switch (mode) {
case T2_USB_MODE_ULPI:
- twl4030_usb_clear_bits(twl, IFC_CTRL, IFC_CTRL_CARKITMODE);
+ twl4030_usb_clear_bits(twl, ULPI_IFC_CTRL,
+ ULPI_IFC_CTRL_CARKITMODE);
twl4030_usb_set_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
- twl4030_usb_clear_bits(twl, FUNC_CTRL,
- FUNC_CTRL_XCVRSELECT_MASK |
- FUNC_CTRL_OPMODE_MASK);
+ twl4030_usb_clear_bits(twl, ULPI_FUNC_CTRL,
+ ULPI_FUNC_CTRL_XCVRSEL_MASK |
+ ULPI_FUNC_CTRL_OPMODE_MASK);
break;
case -1:
/* FIXME: power on defaults */
diff --git a/drivers/usb/otg/ulpi.c b/drivers/usb/otg/ulpi.c
index 9010225e0d0..b1b34693294 100644
--- a/drivers/usb/otg/ulpi.c
+++ b/drivers/usb/otg/ulpi.c
@@ -29,28 +29,6 @@
#include <linux/usb/otg.h>
#include <linux/usb/ulpi.h>
-/* ULPI register addresses */
-#define ULPI_VID_LOW 0x00 /* Vendor ID low */
-#define ULPI_VID_HIGH 0x01 /* Vendor ID high */
-#define ULPI_PID_LOW 0x02 /* Product ID low */
-#define ULPI_PID_HIGH 0x03 /* Product ID high */
-#define ULPI_ITFCTL 0x07 /* Interface Control */
-#define ULPI_OTGCTL 0x0A /* OTG Control */
-
-/* add to above register address to access Set/Clear functions */
-#define ULPI_REG_SET 0x01
-#define ULPI_REG_CLEAR 0x02
-
-/* ULPI OTG Control Register bits */
-#define ID_PULL_UP (1 << 0) /* enable ID Pull Up */
-#define DP_PULL_DOWN (1 << 1) /* enable DP Pull Down */
-#define DM_PULL_DOWN (1 << 2) /* enable DM Pull Down */
-#define DISCHRG_VBUS (1 << 3) /* Discharge Vbus */
-#define CHRG_VBUS (1 << 4) /* Charge Vbus */
-#define DRV_VBUS (1 << 5) /* Drive Vbus */
-#define DRV_VBUS_EXT (1 << 6) /* Drive Vbus external */
-#define USE_EXT_VBUS_IND (1 << 7) /* Use ext. Vbus indicator */
-
#define ULPI_ID(vendor, product) (((vendor) << 16) | (product))
#define TR_FLAG(flags, a, b) (((flags) & a) ? b : 0)
@@ -65,28 +43,28 @@ static int ulpi_set_flags(struct otg_transceiver *otg)
unsigned int flags = 0;
if (otg->flags & USB_OTG_PULLUP_ID)
- flags |= ID_PULL_UP;
+ flags |= ULPI_OTG_CTRL_ID_PULLUP;
if (otg->flags & USB_OTG_PULLDOWN_DM)
- flags |= DM_PULL_DOWN;
+ flags |= ULPI_OTG_CTRL_DM_PULLDOWN;
if (otg->flags & USB_OTG_PULLDOWN_DP)
- flags |= DP_PULL_DOWN;
+ flags |= ULPI_OTG_CTRL_DP_PULLDOWN;
if (otg->flags & USB_OTG_EXT_VBUS_INDICATOR)
- flags |= USE_EXT_VBUS_IND;
+ flags |= ULPI_OTG_CTRL_EXTVBUSIND;
- return otg_io_write(otg, flags, ULPI_OTGCTL + ULPI_REG_SET);
+ return otg_io_write(otg, flags, ULPI_SET(ULPI_OTG_CTRL));
}
static int ulpi_init(struct otg_transceiver *otg)
{
int i, vid, pid;
- vid = (otg_io_read(otg, ULPI_VID_HIGH) << 8) |
- otg_io_read(otg, ULPI_VID_LOW);
- pid = (otg_io_read(otg, ULPI_PID_HIGH) << 8) |
- otg_io_read(otg, ULPI_PID_LOW);
+ vid = (otg_io_read(otg, ULPI_VENDOR_ID_HIGH) << 8) |
+ otg_io_read(otg, ULPI_VENDOR_ID_LOW);
+ pid = (otg_io_read(otg, ULPI_PRODUCT_ID_HIGH) << 8) |
+ otg_io_read(otg, ULPI_PRODUCT_ID_LOW);
pr_info("ULPI transceiver vendor/product ID 0x%04x/0x%04x\n", vid, pid);
@@ -100,19 +78,19 @@ static int ulpi_init(struct otg_transceiver *otg)
static int ulpi_set_vbus(struct otg_transceiver *otg, bool on)
{
- unsigned int flags = otg_io_read(otg, ULPI_OTGCTL);
+ unsigned int flags = otg_io_read(otg, ULPI_OTG_CTRL);
- flags &= ~(DRV_VBUS | DRV_VBUS_EXT);
+ flags &= ~(ULPI_OTG_CTRL_DRVVBUS | ULPI_OTG_CTRL_DRVVBUS_EXT);
if (on) {
if (otg->flags & USB_OTG_DRV_VBUS)
- flags |= DRV_VBUS;
+ flags |= ULPI_OTG_CTRL_DRVVBUS;
if (otg->flags & USB_OTG_DRV_VBUS_EXT)
- flags |= DRV_VBUS_EXT;
+ flags |= ULPI_OTG_CTRL_DRVVBUS_EXT;
}
- return otg_io_write(otg, flags, ULPI_OTGCTL + ULPI_REG_SET);
+ return otg_io_write(otg, flags, ULPI_SET(ULPI_OTG_CTRL));
}
struct otg_transceiver *
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index a0ecb42cb33..bd8aab0ef1c 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -425,6 +425,16 @@ config USB_SERIAL_MOS7720
To compile this driver as a module, choose M here: the
module will be called mos7720.
+config USB_SERIAL_MOS7715_PARPORT
+ bool "Support for parallel port on the Moschip 7715"
+ depends on USB_SERIAL_MOS7720
+ depends on PARPORT=y || PARPORT=USB_SERIAL_MOS7720
+ select PARPORT_NOT_PC
+ ---help---
+ Say Y if you have a Moschip 7715 device and would like to use
+ the parallel port it provides. The port will register with
+ the parport subsystem as a low-level driver.
+
config USB_SERIAL_MOS7840
tristate "USB Moschip 7840/7820 USB Serial Driver"
---help---
@@ -485,6 +495,7 @@ config USB_SERIAL_QCAUX
config USB_SERIAL_QUALCOMM
tristate "USB Qualcomm Serial modem"
+ select USB_SERIAL_WWAN
help
Say Y here if you have a Qualcomm USB modem device. These are
usually wireless cellular modems.
@@ -576,8 +587,12 @@ config USB_SERIAL_XIRCOM
To compile this driver as a module, choose M here: the
module will be called keyspan_pda.
+config USB_SERIAL_WWAN
+ tristate
+
config USB_SERIAL_OPTION
tristate "USB driver for GSM and CDMA modems"
+ select USB_SERIAL_WWAN
help
Say Y here if you have a GSM or CDMA modem that's connected to USB.
@@ -619,6 +634,14 @@ config USB_SERIAL_VIVOPAY_SERIAL
To compile this driver as a module, choose M here: the
module will be called vivopay-serial.
+config USB_SERIAL_ZIO
+ tristate "ZIO Motherboard USB serial interface driver"
+ help
+ Say Y here if you want to use ZIO Motherboard.
+
+ To compile this driver as a module, choose M here: the
+ module will be called zio.
+
config USB_SERIAL_DEBUG
tristate "USB Debugging Device"
help
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index 83c9e431a56..e54c728c016 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -52,9 +52,11 @@ obj-$(CONFIG_USB_SERIAL_SIEMENS_MPI) += siemens_mpi.o
obj-$(CONFIG_USB_SERIAL_SIERRAWIRELESS) += sierra.o
obj-$(CONFIG_USB_SERIAL_SPCP8X5) += spcp8x5.o
obj-$(CONFIG_USB_SERIAL_SYMBOL) += symbolserial.o
+obj-$(CONFIG_USB_SERIAL_WWAN) += usb_wwan.o
obj-$(CONFIG_USB_SERIAL_TI) += ti_usb_3410_5052.o
obj-$(CONFIG_USB_SERIAL_VISOR) += visor.o
obj-$(CONFIG_USB_SERIAL_WHITEHEAT) += whiteheat.o
obj-$(CONFIG_USB_SERIAL_XIRCOM) += keyspan_pda.o
obj-$(CONFIG_USB_SERIAL_VIVOPAY_SERIAL) += vivopay-serial.o
+obj-$(CONFIG_USB_SERIAL_ZIO) += zio.o
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index 4fd7af98b1a..0db6ace16f7 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -1,7 +1,9 @@
/*
* AIRcable USB Bluetooth Dongle Driver.
*
+ * Copyright (C) 2010 Johan Hovold <jhovold@gmail.com>
* Copyright (C) 2006 Manuel Francisco Naranjo (naranjo.manuel@gmail.com)
+ *
* This program is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License version 2 as published by the
* Free Software Foundation.
@@ -42,10 +44,10 @@
*
*/
+#include <asm/unaligned.h>
#include <linux/tty.h>
#include <linux/slab.h>
#include <linux/tty_flip.h>
-#include <linux/circ_buf.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
@@ -55,16 +57,12 @@ static int debug;
#define AIRCABLE_VID 0x16CA
#define AIRCABLE_USB_PID 0x1502
-/* write buffer size defines */
-#define AIRCABLE_BUF_SIZE 2048
-
/* Protocol Stuff */
#define HCI_HEADER_LENGTH 0x4
#define TX_HEADER_0 0x20
#define TX_HEADER_1 0x29
#define RX_HEADER_0 0x00
#define RX_HEADER_1 0x20
-#define MAX_HCI_FRAMESIZE 60
#define HCI_COMPLETE_FRAME 64
/* rx_flags */
@@ -74,8 +72,8 @@ static int debug;
/*
* Version Information
*/
-#define DRIVER_VERSION "v1.0b2"
-#define DRIVER_AUTHOR "Naranjo, Manuel Francisco <naranjo.manuel@gmail.com>"
+#define DRIVER_VERSION "v2.0"
+#define DRIVER_AUTHOR "Naranjo, Manuel Francisco <naranjo.manuel@gmail.com>, Johan Hovold <jhovold@gmail.com>"
#define DRIVER_DESC "AIRcable USB Driver"
/* ID table that will be registered with USB core */
@@ -85,226 +83,21 @@ static const struct usb_device_id id_table[] = {
};
MODULE_DEVICE_TABLE(usb, id_table);
-
-/* Internal Structure */
-struct aircable_private {
- spinlock_t rx_lock; /* spinlock for the receive lines */
- struct circ_buf *tx_buf; /* write buffer */
- struct circ_buf *rx_buf; /* read buffer */
- int rx_flags; /* for throttilng */
- struct work_struct rx_work; /* work cue for the receiving line */
- struct usb_serial_port *port; /* USB port with which associated */
-};
-
-/* Private methods */
-
-/* Circular Buffer Methods, code from ti_usb_3410_5052 used */
-/*
- * serial_buf_clear
- *
- * Clear out all data in the circular buffer.
- */
-static void serial_buf_clear(struct circ_buf *cb)
-{
- cb->head = cb->tail = 0;
-}
-
-/*
- * serial_buf_alloc
- *
- * Allocate a circular buffer and all associated memory.
- */
-static struct circ_buf *serial_buf_alloc(void)
-{
- struct circ_buf *cb;
- cb = kmalloc(sizeof(struct circ_buf), GFP_KERNEL);
- if (cb == NULL)
- return NULL;
- cb->buf = kmalloc(AIRCABLE_BUF_SIZE, GFP_KERNEL);
- if (cb->buf == NULL) {
- kfree(cb);
- return NULL;
- }
- serial_buf_clear(cb);
- return cb;
-}
-
-/*
- * serial_buf_free
- *
- * Free the buffer and all associated memory.
- */
-static void serial_buf_free(struct circ_buf *cb)
-{
- kfree(cb->buf);
- kfree(cb);
-}
-
-/*
- * serial_buf_data_avail
- *
- * Return the number of bytes of data available in the circular
- * buffer.
- */
-static int serial_buf_data_avail(struct circ_buf *cb)
-{
- return CIRC_CNT(cb->head, cb->tail, AIRCABLE_BUF_SIZE);
-}
-
-/*
- * serial_buf_put
- *
- * Copy data data from a user buffer and put it into the circular buffer.
- * Restrict to the amount of space available.
- *
- * Return the number of bytes copied.
- */
-static int serial_buf_put(struct circ_buf *cb, const char *buf, int count)
-{
- int c, ret = 0;
- while (1) {
- c = CIRC_SPACE_TO_END(cb->head, cb->tail, AIRCABLE_BUF_SIZE);
- if (count < c)
- c = count;
- if (c <= 0)
- break;
- memcpy(cb->buf + cb->head, buf, c);
- cb->head = (cb->head + c) & (AIRCABLE_BUF_SIZE-1);
- buf += c;
- count -= c;
- ret = c;
- }
- return ret;
-}
-
-/*
- * serial_buf_get
- *
- * Get data from the circular buffer and copy to the given buffer.
- * Restrict to the amount of data available.
- *
- * Return the number of bytes copied.
- */
-static int serial_buf_get(struct circ_buf *cb, char *buf, int count)
-{
- int c, ret = 0;
- while (1) {
- c = CIRC_CNT_TO_END(cb->head, cb->tail, AIRCABLE_BUF_SIZE);
- if (count < c)
- c = count;
- if (c <= 0)
- break;
- memcpy(buf, cb->buf + cb->tail, c);
- cb->tail = (cb->tail + c) & (AIRCABLE_BUF_SIZE-1);
- buf += c;
- count -= c;
- ret = c;
- }
- return ret;
-}
-
-/* End of circula buffer methods */
-
-static void aircable_send(struct usb_serial_port *port)
+static int aircable_prepare_write_buffer(struct usb_serial_port *port,
+ void *dest, size_t size)
{
- int count, result;
- struct aircable_private *priv = usb_get_serial_port_data(port);
- unsigned char *buf;
- __le16 *dbuf;
- dbg("%s - port %d", __func__, port->number);
- if (port->write_urb_busy)
- return;
-
- count = min(serial_buf_data_avail(priv->tx_buf), MAX_HCI_FRAMESIZE);
- if (count == 0)
- return;
-
- buf = kzalloc(count + HCI_HEADER_LENGTH, GFP_ATOMIC);
- if (!buf) {
- dev_err(&port->dev, "%s- kzalloc(%d) failed.\n",
- __func__, count + HCI_HEADER_LENGTH);
- return;
- }
+ int count;
+ unsigned char *buf = dest;
+ count = kfifo_out_locked(&port->write_fifo, buf + HCI_HEADER_LENGTH,
+ size - HCI_HEADER_LENGTH, &port->lock);
buf[0] = TX_HEADER_0;
buf[1] = TX_HEADER_1;
- dbuf = (__le16 *)&buf[2];
- *dbuf = cpu_to_le16((u16)count);
- serial_buf_get(priv->tx_buf, buf + HCI_HEADER_LENGTH,
- MAX_HCI_FRAMESIZE);
-
- memcpy(port->write_urb->transfer_buffer, buf,
- count + HCI_HEADER_LENGTH);
-
- kfree(buf);
- port->write_urb_busy = 1;
- usb_serial_debug_data(debug, &port->dev, __func__,
- count + HCI_HEADER_LENGTH,
- port->write_urb->transfer_buffer);
- port->write_urb->transfer_buffer_length = count + HCI_HEADER_LENGTH;
- port->write_urb->dev = port->serial->dev;
- result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
-
- if (result) {
- dev_err(&port->dev,
- "%s - failed submitting write urb, error %d\n",
- __func__, result);
- port->write_urb_busy = 0;
- }
+ put_unaligned_le16(count, &buf[2]);
- schedule_work(&port->work);
+ return count + HCI_HEADER_LENGTH;
}
-static void aircable_read(struct work_struct *work)
-{
- struct aircable_private *priv =
- container_of(work, struct aircable_private, rx_work);
- struct usb_serial_port *port = priv->port;
- struct tty_struct *tty;
- unsigned char *data;
- int count;
- if (priv->rx_flags & THROTTLED) {
- if (priv->rx_flags & ACTUALLY_THROTTLED)
- schedule_work(&priv->rx_work);
- return;
- }
-
- /* By now I will flush data to the tty in packages of no more than
- * 64 bytes, to ensure I do not get throttled.
- * Ask USB mailing list for better aproach.
- */
- tty = tty_port_tty_get(&port->port);
-
- if (!tty) {
- schedule_work(&priv->rx_work);
- dev_err(&port->dev, "%s - No tty available\n", __func__);
- return ;
- }
-
- count = min(64, serial_buf_data_avail(priv->rx_buf));
-
- if (count <= 0)
- goto out; /* We have finished sending everything. */
-
- tty_prepare_flip_string(tty, &data, count);
- if (!data) {
- dev_err(&port->dev, "%s- kzalloc(%d) failed.",
- __func__, count);
- goto out;
- }
-
- serial_buf_get(priv->rx_buf, data, count);
-
- tty_flip_buffer_push(tty);
-
- if (serial_buf_data_avail(priv->rx_buf))
- schedule_work(&priv->rx_work);
-out:
- tty_kref_put(tty);
- return;
-}
-/* End of private methods */
-
static int aircable_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
@@ -330,247 +123,50 @@ static int aircable_probe(struct usb_serial *serial,
return 0;
}
-static int aircable_attach(struct usb_serial *serial)
-{
- struct usb_serial_port *port = serial->port[0];
- struct aircable_private *priv;
-
- priv = kzalloc(sizeof(struct aircable_private), GFP_KERNEL);
- if (!priv) {
- dev_err(&port->dev, "%s- kmalloc(%Zd) failed.\n", __func__,
- sizeof(struct aircable_private));
- return -ENOMEM;
- }
-
- /* Allocation of Circular Buffers */
- priv->tx_buf = serial_buf_alloc();
- if (priv->tx_buf == NULL) {
- kfree(priv);
- return -ENOMEM;
- }
-
- priv->rx_buf = serial_buf_alloc();
- if (priv->rx_buf == NULL) {
- kfree(priv->tx_buf);
- kfree(priv);
- return -ENOMEM;
- }
-
- priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED);
- priv->port = port;
- INIT_WORK(&priv->rx_work, aircable_read);
-
- usb_set_serial_port_data(serial->port[0], priv);
-
- return 0;
-}
-
-static void aircable_release(struct usb_serial *serial)
+static int aircable_process_packet(struct tty_struct *tty,
+ struct usb_serial_port *port, int has_headers,
+ char *packet, int len)
{
-
- struct usb_serial_port *port = serial->port[0];
- struct aircable_private *priv = usb_get_serial_port_data(port);
-
- dbg("%s", __func__);
-
- if (priv) {
- serial_buf_free(priv->tx_buf);
- serial_buf_free(priv->rx_buf);
- kfree(priv);
+ if (has_headers) {
+ len -= HCI_HEADER_LENGTH;
+ packet += HCI_HEADER_LENGTH;
}
-}
-
-static int aircable_write_room(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct aircable_private *priv = usb_get_serial_port_data(port);
- return serial_buf_data_avail(priv->tx_buf);
-}
-
-static int aircable_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *source, int count)
-{
- struct aircable_private *priv = usb_get_serial_port_data(port);
- int temp;
-
- dbg("%s - port %d, %d bytes", __func__, port->number, count);
-
- usb_serial_debug_data(debug, &port->dev, __func__, count, source);
-
- if (!count) {
- dbg("%s - write request of 0 bytes", __func__);
- return count;
+ if (len <= 0) {
+ dbg("%s - malformed packet", __func__);
+ return 0;
}
- temp = serial_buf_put(priv->tx_buf, source, count);
-
- aircable_send(port);
-
- if (count > AIRCABLE_BUF_SIZE)
- count = AIRCABLE_BUF_SIZE;
-
- return count;
+ tty_insert_flip_string(tty, packet, len);
+ return len;
}
-static void aircable_write_bulk_callback(struct urb *urb)
+static void aircable_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
- int status = urb->status;
- int result;
-
- dbg("%s - urb status: %d", __func__ , status);
-
- /* This has been taken from cypress_m8.c cypress_write_int_callback */
- switch (status) {
- case 0:
- /* success */
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* this urb is terminated, clean up */
- dbg("%s - urb shutting down with status: %d",
- __func__, status);
- port->write_urb_busy = 0;
- return;
- default:
- /* error in the urb, so we have to resubmit it */
- dbg("%s - Overflow in write", __func__);
- dbg("%s - nonzero write bulk status received: %d",
- __func__, status);
- port->write_urb->transfer_buffer_length = 1;
- port->write_urb->dev = port->serial->dev;
- result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
- if (result)
- dev_err(&urb->dev->dev,
- "%s - failed resubmitting write urb, error %d\n",
- __func__, result);
- else
- return;
- }
-
- port->write_urb_busy = 0;
-
- aircable_send(port);
-}
-
-static void aircable_read_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- struct aircable_private *priv = usb_get_serial_port_data(port);
+ char *data = (char *)urb->transfer_buffer;
struct tty_struct *tty;
- unsigned long no_packages, remaining, package_length, i;
- int result, shift = 0;
- unsigned char *temp;
- int status = urb->status;
-
- dbg("%s - port %d", __func__, port->number);
-
- if (status) {
- dbg("%s - urb status = %d", __func__, status);
- if (status == -EPROTO) {
- dbg("%s - caught -EPROTO, resubmitting the urb",
- __func__);
- usb_fill_bulk_urb(port->read_urb, port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- aircable_read_bulk_callback, port);
-
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result)
- dev_err(&urb->dev->dev,
- "%s - failed resubmitting read urb, error %d\n",
- __func__, result);
- return;
- }
- dbg("%s - unable to handle the error, exiting.", __func__);
- return;
- }
-
- usb_serial_debug_data(debug, &port->dev, __func__,
- urb->actual_length, urb->transfer_buffer);
+ int has_headers;
+ int count;
+ int len;
+ int i;
tty = tty_port_tty_get(&port->port);
- if (tty && urb->actual_length) {
- if (urb->actual_length <= 2) {
- /* This is an incomplete package */
- serial_buf_put(priv->rx_buf, urb->transfer_buffer,
- urb->actual_length);
- } else {
- temp = urb->transfer_buffer;
- if (temp[0] == RX_HEADER_0)
- shift = HCI_HEADER_LENGTH;
-
- remaining = urb->actual_length;
- no_packages = urb->actual_length / (HCI_COMPLETE_FRAME);
-
- if (urb->actual_length % HCI_COMPLETE_FRAME != 0)
- no_packages++;
+ if (!tty)
+ return;
- for (i = 0; i < no_packages; i++) {
- if (remaining > (HCI_COMPLETE_FRAME))
- package_length = HCI_COMPLETE_FRAME;
- else
- package_length = remaining;
- remaining -= package_length;
+ has_headers = (urb->actual_length > 2 && data[0] == RX_HEADER_0);
- serial_buf_put(priv->rx_buf,
- urb->transfer_buffer + shift +
- (HCI_COMPLETE_FRAME) * (i),
- package_length - shift);
- }
- }
- aircable_read(&priv->rx_work);
+ count = 0;
+ for (i = 0; i < urb->actual_length; i += HCI_COMPLETE_FRAME) {
+ len = min_t(int, urb->actual_length - i, HCI_COMPLETE_FRAME);
+ count += aircable_process_packet(tty, port, has_headers,
+ &data[i], len);
}
- tty_kref_put(tty);
-
- /* Schedule the next read */
- usb_fill_bulk_urb(port->read_urb, port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- aircable_read_bulk_callback, port);
-
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result && result != -EPERM)
- dev_err(&urb->dev->dev,
- "%s - failed resubmitting read urb, error %d\n",
- __func__, result);
-}
-
-/* Based on ftdi_sio.c throttle */
-static void aircable_throttle(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct aircable_private *priv = usb_get_serial_port_data(port);
- dbg("%s - port %d", __func__, port->number);
-
- spin_lock_irq(&priv->rx_lock);
- priv->rx_flags |= THROTTLED;
- spin_unlock_irq(&priv->rx_lock);
-}
-
-/* Based on ftdi_sio.c unthrottle */
-static void aircable_unthrottle(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct aircable_private *priv = usb_get_serial_port_data(port);
- int actually_throttled;
-
- dbg("%s - port %d", __func__, port->number);
-
- spin_lock_irq(&priv->rx_lock);
- actually_throttled = priv->rx_flags & ACTUALLY_THROTTLED;
- priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED);
- spin_unlock_irq(&priv->rx_lock);
-
- if (actually_throttled)
- schedule_work(&priv->rx_work);
+ if (count)
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
}
static struct usb_driver aircable_driver = {
@@ -589,15 +185,12 @@ static struct usb_serial_driver aircable_device = {
.usb_driver = &aircable_driver,
.id_table = id_table,
.num_ports = 1,
- .attach = aircable_attach,
+ .bulk_out_size = HCI_COMPLETE_FRAME,
.probe = aircable_probe,
- .release = aircable_release,
- .write = aircable_write,
- .write_room = aircable_write_room,
- .write_bulk_callback = aircable_write_bulk_callback,
- .read_bulk_callback = aircable_read_bulk_callback,
- .throttle = aircable_throttle,
- .unthrottle = aircable_unthrottle,
+ .process_read_urb = aircable_process_read_urb,
+ .prepare_write_buffer = aircable_prepare_write_buffer,
+ .throttle = usb_serial_generic_throttle,
+ .unthrottle = usb_serial_generic_unthrottle,
};
static int __init aircable_init(void)
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 9b66bf19f75..4e41a2a3942 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -42,7 +42,7 @@ static int debug;
* Version information
*/
-#define DRIVER_VERSION "v0.5"
+#define DRIVER_VERSION "v0.6"
#define DRIVER_AUTHOR "Bart Hartgers <bart.hartgers+ark3116@gmail.com>"
#define DRIVER_DESC "USB ARK3116 serial/IrDA driver"
#define DRIVER_DEV_DESC "ARK3116 RS232/IrDA"
@@ -355,14 +355,11 @@ static void ark3116_close(struct usb_serial_port *port)
/* deactivate interrupts */
ark3116_write_reg(serial, UART_IER, 0);
- /* shutdown any bulk reads that might be going on */
- if (serial->num_bulk_out)
- usb_kill_urb(port->write_urb);
- if (serial->num_bulk_in)
- usb_kill_urb(port->read_urb);
+ usb_serial_generic_close(port);
if (serial->num_interrupt_in)
usb_kill_urb(port->interrupt_in_urb);
}
+
}
static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port)
@@ -675,87 +672,45 @@ static void ark3116_read_int_callback(struct urb *urb)
* error for the next block of data as well...
* For now, let's pretend this can't happen.
*/
-
-static void send_to_tty(struct tty_struct *tty,
- const unsigned char *chars,
- size_t size, char flag)
+static void ark3116_process_read_urb(struct urb *urb)
{
- if (size == 0)
- return;
- if (flag == TTY_NORMAL) {
- tty_insert_flip_string(tty, chars, size);
- } else {
- int i;
- for (i = 0; i < size; ++i)
- tty_insert_flip_char(tty, chars[i], flag);
- }
-}
-
-static void ark3116_read_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
+ struct usb_serial_port *port = urb->context;
struct ark3116_private *priv = usb_get_serial_port_data(port);
- const __u8 *data = urb->transfer_buffer;
- int status = urb->status;
struct tty_struct *tty;
+ unsigned char *data = urb->transfer_buffer;
+ char tty_flag = TTY_NORMAL;
unsigned long flags;
- int result;
- char flag;
__u32 lsr;
- switch (status) {
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* this urb is terminated, clean up */
- dbg("%s - urb shutting down with status: %d",
- __func__, status);
+ /* update line status */
+ spin_lock_irqsave(&priv->status_lock, flags);
+ lsr = priv->lsr;
+ priv->lsr &= ~UART_LSR_BRK_ERROR_BITS;
+ spin_unlock_irqrestore(&priv->status_lock, flags);
+
+ if (!urb->actual_length)
return;
- default:
- dbg("%s - nonzero urb status received: %d",
- __func__, status);
- break;
- case 0: /* success */
- spin_lock_irqsave(&priv->status_lock, flags);
- lsr = priv->lsr;
- /* clear error bits */
- priv->lsr &= ~UART_LSR_BRK_ERROR_BITS;
- spin_unlock_irqrestore(&priv->status_lock, flags);
-
- if (unlikely(lsr & UART_LSR_BI))
- flag = TTY_BREAK;
- else if (unlikely(lsr & UART_LSR_PE))
- flag = TTY_PARITY;
- else if (unlikely(lsr & UART_LSR_FE))
- flag = TTY_FRAME;
- else
- flag = TTY_NORMAL;
-
- tty = tty_port_tty_get(&port->port);
- if (tty) {
- /* overrun is special, not associated with a char */
- if (unlikely(lsr & UART_LSR_OE))
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
- send_to_tty(tty, data, urb->actual_length, flag);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
- }
+ tty = tty_port_tty_get(&port->port);
+ if (!tty)
+ return;
+
+ if (lsr & UART_LSR_BRK_ERROR_BITS) {
+ if (lsr & UART_LSR_BI)
+ tty_flag = TTY_BREAK;
+ else if (lsr & UART_LSR_PE)
+ tty_flag = TTY_PARITY;
+ else if (lsr & UART_LSR_FE)
+ tty_flag = TTY_FRAME;
- /* Throttle the device if requested by tty */
- spin_lock_irqsave(&port->lock, flags);
- port->throttled = port->throttle_req;
- if (port->throttled) {
- spin_unlock_irqrestore(&port->lock, flags);
- return;
- } else
- spin_unlock_irqrestore(&port->lock, flags);
+ /* overrun is special, not associated with a char */
+ if (lsr & UART_LSR_OE)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
}
- /* Continue reading from device */
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result)
- dev_err(&urb->dev->dev, "%s - failed resubmitting"
- " read urb, error %d\n", __func__, result);
+ tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
+ urb->actual_length);
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
}
static struct usb_driver ark3116_driver = {
@@ -785,7 +740,7 @@ static struct usb_serial_driver ark3116_device = {
.close = ark3116_close,
.break_ctl = ark3116_break_ctl,
.read_int_callback = ark3116_read_int_callback,
- .read_bulk_callback = ark3116_read_bulk_callback,
+ .process_read_urb = ark3116_process_read_urb,
};
static int __init ark3116_init(void)
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index 1295e44e3f1..36df35295db 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2000 William Greathouse (wgreathouse@smva.com)
* Copyright (C) 2000-2001 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2010 Johan Hovold (jhovold@gmail.com)
*
* This program is largely derived from work by the linux-usb group
* and associated source files. Please see the usb/serial files for
@@ -84,7 +85,7 @@ static int debug;
/*
* Version Information
*/
-#define DRIVER_VERSION "v1.2"
+#define DRIVER_VERSION "v1.3"
#define DRIVER_AUTHOR "William Greathouse <wgreathouse@smva.com>"
#define DRIVER_DESC "USB Belkin Serial converter driver"
@@ -95,6 +96,7 @@ static int belkin_sa_open(struct tty_struct *tty,
struct usb_serial_port *port);
static void belkin_sa_close(struct usb_serial_port *port);
static void belkin_sa_read_int_callback(struct urb *urb);
+static void belkin_sa_process_read_urb(struct urb *urb);
static void belkin_sa_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios * old);
static void belkin_sa_break_ctl(struct tty_struct *tty, int break_state);
@@ -112,7 +114,6 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(BELKIN_DOCKSTATION_VID, BELKIN_DOCKSTATION_PID) },
{ } /* Terminating entry */
};
-
MODULE_DEVICE_TABLE(usb, id_table_combined);
static struct usb_driver belkin_driver = {
@@ -120,7 +121,7 @@ static struct usb_driver belkin_driver = {
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = id_table_combined,
- .no_dynamic_id = 1,
+ .no_dynamic_id = 1,
};
/* All of the device info needed for the serial converters */
@@ -136,7 +137,7 @@ static struct usb_serial_driver belkin_device = {
.open = belkin_sa_open,
.close = belkin_sa_close,
.read_int_callback = belkin_sa_read_int_callback,
- /* How we get the status info */
+ .process_read_urb = belkin_sa_process_read_urb,
.set_termios = belkin_sa_set_termios,
.break_ctl = belkin_sa_break_ctl,
.tiocmget = belkin_sa_tiocmget,
@@ -145,7 +146,6 @@ static struct usb_serial_driver belkin_device = {
.release = belkin_sa_release,
};
-
struct belkin_sa_private {
spinlock_t lock;
unsigned long control_state;
@@ -196,62 +196,43 @@ static int belkin_sa_startup(struct usb_serial *serial)
return 0;
}
-
static void belkin_sa_release(struct usb_serial *serial)
{
- struct belkin_sa_private *priv;
int i;
dbg("%s", __func__);
- for (i = 0; i < serial->num_ports; ++i) {
- /* My special items, the standard routines free my urbs */
- priv = usb_get_serial_port_data(serial->port[i]);
- kfree(priv);
- }
+ for (i = 0; i < serial->num_ports; ++i)
+ kfree(usb_get_serial_port_data(serial->port[i]));
}
-
-static int belkin_sa_open(struct tty_struct *tty,
+static int belkin_sa_open(struct tty_struct *tty,
struct usb_serial_port *port)
{
- int retval = 0;
+ int retval;
dbg("%s port %d", __func__, port->number);
- /*Start reading from the device*/
- /* TODO: Look at possibility of submitting multiple URBs to device to
- * enhance buffering. Win trace shows 16 initial read URBs.
- */
- port->read_urb->dev = port->serial->dev;
- retval = usb_submit_urb(port->read_urb, GFP_KERNEL);
- if (retval) {
- dev_err(&port->dev, "usb_submit_urb(read bulk) failed\n");
- goto exit;
- }
-
- port->interrupt_in_urb->dev = port->serial->dev;
retval = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (retval) {
- usb_kill_urb(port->read_urb);
dev_err(&port->dev, "usb_submit_urb(read int) failed\n");
+ return retval;
}
-exit:
- return retval;
-} /* belkin_sa_open */
+ retval = usb_serial_generic_open(tty, port);
+ if (retval)
+ usb_kill_urb(port->interrupt_in_urb);
+ return retval;
+}
static void belkin_sa_close(struct usb_serial_port *port)
{
dbg("%s port %d", __func__, port->number);
- /* shutdown our bulk reads and writes */
- usb_kill_urb(port->write_urb);
- usb_kill_urb(port->read_urb);
+ usb_serial_generic_close(port);
usb_kill_urb(port->interrupt_in_urb);
-} /* belkin_sa_close */
-
+}
static void belkin_sa_read_int_callback(struct urb *urb)
{
@@ -310,31 +291,7 @@ static void belkin_sa_read_int_callback(struct urb *urb)
else
priv->control_state &= ~TIOCM_CD;
- /* Now to report any errors */
priv->last_lsr = data[BELKIN_SA_LSR_INDEX];
-#if 0
- /*
- * fill in the flip buffer here, but I do not know the relation
- * to the current/next receive buffer or characters. I need
- * to look in to this before committing any code.
- */
- if (priv->last_lsr & BELKIN_SA_LSR_ERR) {
- tty = tty_port_tty_get(&port->port);
- /* Overrun Error */
- if (priv->last_lsr & BELKIN_SA_LSR_OE) {
- }
- /* Parity Error */
- if (priv->last_lsr & BELKIN_SA_LSR_PE) {
- }
- /* Framing Error */
- if (priv->last_lsr & BELKIN_SA_LSR_FE) {
- }
- /* Break Indicator */
- if (priv->last_lsr & BELKIN_SA_LSR_BI) {
- }
- tty_kref_put(tty);
- }
-#endif
spin_unlock_irqrestore(&priv->lock, flags);
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
@@ -343,6 +300,53 @@ exit:
"result %d\n", __func__, retval);
}
+static void belkin_sa_process_read_urb(struct urb *urb)
+{
+ struct usb_serial_port *port = urb->context;
+ struct belkin_sa_private *priv = usb_get_serial_port_data(port);
+ struct tty_struct *tty;
+ unsigned char *data = urb->transfer_buffer;
+ unsigned long flags;
+ unsigned char status;
+ char tty_flag;
+
+ /* Update line status */
+ tty_flag = TTY_NORMAL;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ status = priv->last_lsr;
+ priv->last_lsr &= ~BELKIN_SA_LSR_ERR;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (!urb->actual_length)
+ return;
+
+ tty = tty_port_tty_get(&port->port);
+ if (!tty)
+ return;
+
+ if (status & BELKIN_SA_LSR_ERR) {
+ /* Break takes precedence over parity, which takes precedence
+ * over framing errors. */
+ if (status & BELKIN_SA_LSR_BI)
+ tty_flag = TTY_BREAK;
+ else if (status & BELKIN_SA_LSR_PE)
+ tty_flag = TTY_PARITY;
+ else if (status & BELKIN_SA_LSR_FE)
+ tty_flag = TTY_FRAME;
+ dev_dbg(&port->dev, "tty_flag = %d\n", tty_flag);
+
+ /* Overrun is special, not associated with a char. */
+ if (status & BELKIN_SA_LSR_OE)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ }
+
+ tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
+ urb->actual_length);
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
+}
+
static void belkin_sa_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
@@ -482,8 +486,7 @@ static void belkin_sa_set_termios(struct tty_struct *tty,
spin_lock_irqsave(&priv->lock, flags);
priv->control_state = control_state;
spin_unlock_irqrestore(&priv->lock, flags);
-} /* belkin_sa_set_termios */
-
+}
static void belkin_sa_break_ctl(struct tty_struct *tty, int break_state)
{
@@ -494,7 +497,6 @@ static void belkin_sa_break_ctl(struct tty_struct *tty, int break_state)
dev_err(&port->dev, "Set break_ctl %d\n", break_state);
}
-
static int belkin_sa_tiocmget(struct tty_struct *tty, struct file *file)
{
struct usb_serial_port *port = tty->driver_data;
@@ -511,7 +513,6 @@ static int belkin_sa_tiocmget(struct tty_struct *tty, struct file *file)
return control_state;
}
-
static int belkin_sa_tiocmset(struct tty_struct *tty, struct file *file,
unsigned int set, unsigned int clear)
{
@@ -583,7 +584,6 @@ failed_usb_serial_register:
return retval;
}
-
static void __exit belkin_sa_exit (void)
{
usb_deregister(&belkin_driver);
diff --git a/drivers/usb/serial/belkin_sa.h b/drivers/usb/serial/belkin_sa.h
index c66a6730d38..c74b58ab56f 100644
--- a/drivers/usb/serial/belkin_sa.h
+++ b/drivers/usb/serial/belkin_sa.h
@@ -8,10 +8,10 @@
* and associated source files. Please see the usb/serial files for
* individual credits and copyrights.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
*
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
@@ -66,7 +66,7 @@
#ifdef WHEN_I_LEARN_THIS
#define BELKIN_SA_SET_MAGIC_REQUEST 17 /* I don't know, possibly flush */
/* (always in Wininit sequence before flow control) */
-#define BELKIN_SA_RESET xx /* Reset the port */
+#define BELKIN_SA_RESET xx /* Reset the port */
#define BELKIN_SA_GET_MODEM_STATUS xx /* Force return of modem status register */
#endif
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 7e8e3981841..63f7cc45bca 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -305,10 +305,7 @@ static void ch341_close(struct usb_serial_port *port)
{
dbg("%s - port %d", __func__, port->number);
- /* shutdown our urbs */
- dbg("%s - shutting down urbs", __func__);
- usb_kill_urb(port->write_urb);
- usb_kill_urb(port->read_urb);
+ usb_serial_generic_close(port);
usb_kill_urb(port->interrupt_in_urb);
}
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index f347da2ef00..1ee6b2ab0f8 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -66,7 +66,7 @@ static int usb_console_setup(struct console *co, char *options)
struct usb_serial_port *port;
int retval;
struct tty_struct *tty = NULL;
- struct ktermios *termios = NULL, dummy;
+ struct ktermios dummy;
dbg("%s", __func__);
@@ -141,15 +141,14 @@ static int usb_console_setup(struct console *co, char *options)
goto reset_open_count;
}
kref_init(&tty->kref);
- termios = kzalloc(sizeof(*termios), GFP_KERNEL);
- if (!termios) {
+ tty_port_tty_set(&port->port, tty);
+ tty->driver = usb_serial_tty_driver;
+ tty->index = co->index;
+ if (tty_init_termios(tty)) {
retval = -ENOMEM;
err("no more memory");
goto free_tty;
}
- memset(&dummy, 0, sizeof(struct ktermios));
- tty->termios = termios;
- tty_port_tty_set(&port->port, tty);
}
/* only call the device specific open if this
@@ -161,16 +160,16 @@ static int usb_console_setup(struct console *co, char *options)
if (retval) {
err("could not open USB console port");
- goto free_termios;
+ goto fail;
}
if (serial->type->set_termios) {
- termios->c_cflag = cflag;
- tty_termios_encode_baud_rate(termios, baud, baud);
+ tty->termios->c_cflag = cflag;
+ tty_termios_encode_baud_rate(tty->termios, baud, baud);
+ memset(&dummy, 0, sizeof(struct ktermios));
serial->type->set_termios(tty, port, &dummy);
tty_port_tty_set(&port->port, NULL);
- kfree(termios);
kfree(tty);
}
set_bit(ASYNCB_INITIALIZED, &port->port.flags);
@@ -180,14 +179,12 @@ static int usb_console_setup(struct console *co, char *options)
--port->port.count;
/* The console is special in terms of closing the device so
* indicate this port is now acting as a system console. */
- port->console = 1;
port->port.console = 1;
mutex_unlock(&serial->disc_mutex);
return retval;
- free_termios:
- kfree(termios);
+ fail:
tty_port_tty_set(&port->port, NULL);
free_tty:
kfree(tty);
@@ -217,7 +214,7 @@ static void usb_console_write(struct console *co,
dbg("%s - port %d, %d byte(s)", __func__, port->number, count);
- if (!port->console) {
+ if (!port->port.console) {
dbg("%s - port not opened", __func__);
return;
}
@@ -313,7 +310,7 @@ void usb_serial_console_exit(void)
{
if (usbcons_info.port) {
unregister_console(&usbcons);
- usbcons_info.port->console = 0;
+ usbcons_info.port->port.console = 0;
usbcons_info.port = NULL;
}
}
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index ec9b0449ccf..8b8c7976b4c 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -34,7 +34,6 @@
* Function Prototypes
*/
static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *);
-static void cp210x_cleanup(struct usb_serial_port *);
static void cp210x_close(struct usb_serial_port *);
static void cp210x_get_termios(struct tty_struct *,
struct usb_serial_port *port);
@@ -49,7 +48,6 @@ static int cp210x_tiocmset_port(struct usb_serial_port *port, struct file *,
unsigned int, unsigned int);
static void cp210x_break_ctl(struct tty_struct *, int);
static int cp210x_startup(struct usb_serial *);
-static void cp210x_disconnect(struct usb_serial *);
static void cp210x_dtr_rts(struct usb_serial_port *p, int on);
static int cp210x_carrier_raised(struct usb_serial_port *p);
@@ -61,6 +59,8 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
+ { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
+ { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
{ USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
{ USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */
{ USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */
@@ -72,9 +72,12 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x1601) }, /* Arkham Technology DS101 Adapter */
{ USB_DEVICE(0x10C4, 0x800A) }, /* SPORTident BSM7-D-USB main station */
{ USB_DEVICE(0x10C4, 0x803B) }, /* Pololu USB-serial converter */
+ { USB_DEVICE(0x10C4, 0x8044) }, /* Cygnal Debug Adapter */
+ { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */
{ USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
{ USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */
{ USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
+ { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
{ USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
{ USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */
{ USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */
@@ -82,12 +85,15 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */
{ USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */
{ USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */
+ { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */
{ USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
{ USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
{ USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
+ { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
{ USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
{ USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
{ USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
+ { USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */
{ USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
{ USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
{ USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
@@ -105,6 +111,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+ { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
{ USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
{ USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
{ USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */
@@ -115,6 +122,8 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
{ USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
{ USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
+ { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
+ { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
{ USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
{ } /* Terminating Entry */
@@ -138,6 +147,8 @@ static struct usb_serial_driver cp210x_device = {
.usb_driver = &cp210x_driver,
.id_table = id_table,
.num_ports = 1,
+ .bulk_in_size = 256,
+ .bulk_out_size = 256,
.open = cp210x_open,
.close = cp210x_close,
.break_ctl = cp210x_break_ctl,
@@ -145,7 +156,6 @@ static struct usb_serial_driver cp210x_device = {
.tiocmget = cp210x_tiocmget,
.tiocmset = cp210x_tiocmset,
.attach = cp210x_startup,
- .disconnect = cp210x_disconnect,
.dtr_rts = cp210x_dtr_rts,
.carrier_raised = cp210x_carrier_raised
};
@@ -370,7 +380,6 @@ static unsigned int cp210x_quantise_baudrate(unsigned int baud) {
static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port)
{
- struct usb_serial *serial = port->serial;
int result;
dbg("%s - port %d", __func__, port->number);
@@ -381,49 +390,20 @@ static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port)
return -EPROTO;
}
- /* Start reading from the device */
- usb_fill_bulk_urb(port->read_urb, serial->dev,
- usb_rcvbulkpipe(serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- serial->type->read_bulk_callback,
- port);
- result = usb_submit_urb(port->read_urb, GFP_KERNEL);
- if (result) {
- dev_err(&port->dev, "%s - failed resubmitting read urb, "
- "error %d\n", __func__, result);
+ result = usb_serial_generic_open(tty, port);
+ if (result)
return result;
- }
/* Configure the termios structure */
cp210x_get_termios(tty, port);
return 0;
}
-static void cp210x_cleanup(struct usb_serial_port *port)
-{
- struct usb_serial *serial = port->serial;
-
- dbg("%s - port %d", __func__, port->number);
-
- if (serial->dev) {
- /* shutdown any bulk reads that might be going on */
- if (serial->num_bulk_out)
- usb_kill_urb(port->write_urb);
- if (serial->num_bulk_in)
- usb_kill_urb(port->read_urb);
- }
-}
-
static void cp210x_close(struct usb_serial_port *port)
{
dbg("%s - port %d", __func__, port->number);
- /* shutdown our urbs */
- dbg("%s - shutting down urbs", __func__);
- usb_kill_urb(port->write_urb);
- usb_kill_urb(port->read_urb);
+ usb_serial_generic_close(port);
mutex_lock(&port->serial->disc_mutex);
if (!port->serial->disconnected)
@@ -807,17 +787,6 @@ static int cp210x_startup(struct usb_serial *serial)
return 0;
}
-static void cp210x_disconnect(struct usb_serial *serial)
-{
- int i;
-
- dbg("%s", __func__);
-
- /* Stop reads and writes on all ports */
- for (i = 0; i < serial->num_ports; ++i)
- cp210x_cleanup(serial->port[i]);
-}
-
static int __init cp210x_init(void)
{
int retval;
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index e23c77925e7..f5d06746cc3 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -64,6 +64,7 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/serial.h>
+#include <linux/kfifo.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <asm/unaligned.h>
@@ -79,13 +80,12 @@ static int unstable_bauds;
/*
* Version Information
*/
-#define DRIVER_VERSION "v1.09"
+#define DRIVER_VERSION "v1.10"
#define DRIVER_AUTHOR "Lonnie Mendez <dignome@gmail.com>, Neil Whelchel <koyama@firstlight.net>"
#define DRIVER_DESC "Cypress USB to Serial Driver"
/* write buffer size defines */
#define CYPRESS_BUF_SIZE 1024
-#define CYPRESS_CLOSING_WAIT (30*HZ)
static const struct usb_device_id id_table_earthmate[] = {
{ USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) },
@@ -135,7 +135,7 @@ struct cypress_private {
int bytes_out; /* used for statistics */
int cmd_count; /* used for statistics */
int cmd_ctrl; /* always set this to 1 before issuing a command */
- struct cypress_buf *buf; /* write buffer */
+ struct kfifo write_fifo; /* write fifo */
int write_urb_in_use; /* write urb in use indicator */
int write_urb_interval; /* interval to use for write urb */
int read_urb_interval; /* interval to use for read urb */
@@ -157,14 +157,6 @@ struct cypress_private {
struct ktermios tmp_termios; /* stores the old termios settings */
};
-/* write buffer structure */
-struct cypress_buf {
- unsigned int buf_size;
- char *buf_buf;
- char *buf_get;
- char *buf_put;
-};
-
/* function prototypes for the Cypress USB to serial device */
static int cypress_earthmate_startup(struct usb_serial *serial);
static int cypress_hidcom_startup(struct usb_serial *serial);
@@ -190,17 +182,6 @@ static void cypress_unthrottle(struct tty_struct *tty);
static void cypress_set_dead(struct usb_serial_port *port);
static void cypress_read_int_callback(struct urb *urb);
static void cypress_write_int_callback(struct urb *urb);
-/* write buffer functions */
-static struct cypress_buf *cypress_buf_alloc(unsigned int size);
-static void cypress_buf_free(struct cypress_buf *cb);
-static void cypress_buf_clear(struct cypress_buf *cb);
-static unsigned int cypress_buf_data_avail(struct cypress_buf *cb);
-static unsigned int cypress_buf_space_avail(struct cypress_buf *cb);
-static unsigned int cypress_buf_put(struct cypress_buf *cb,
- const char *buf, unsigned int count);
-static unsigned int cypress_buf_get(struct cypress_buf *cb,
- char *buf, unsigned int count);
-
static struct usb_serial_driver cypress_earthmate_device = {
.driver = {
@@ -503,8 +484,7 @@ static int generic_startup(struct usb_serial *serial)
priv->comm_is_ok = !0;
spin_lock_init(&priv->lock);
- priv->buf = cypress_buf_alloc(CYPRESS_BUF_SIZE);
- if (priv->buf == NULL) {
+ if (kfifo_alloc(&priv->write_fifo, CYPRESS_BUF_SIZE, GFP_KERNEL)) {
kfree(priv);
return -ENOMEM;
}
@@ -627,7 +607,7 @@ static void cypress_release(struct usb_serial *serial)
priv = usb_get_serial_port_data(serial->port[0]);
if (priv) {
- cypress_buf_free(priv->buf);
+ kfifo_free(&priv->write_fifo);
kfree(priv);
}
}
@@ -704,6 +684,7 @@ static void cypress_dtr_rts(struct usb_serial_port *port, int on)
static void cypress_close(struct usb_serial_port *port)
{
struct cypress_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
dbg("%s - port %d", __func__, port->number);
@@ -713,12 +694,14 @@ static void cypress_close(struct usb_serial_port *port)
mutex_unlock(&port->serial->disc_mutex);
return;
}
- cypress_buf_clear(priv->buf);
+ spin_lock_irqsave(&priv->lock, flags);
+ kfifo_reset_out(&priv->write_fifo);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
dbg("%s - stopping urbs", __func__);
usb_kill_urb(port->interrupt_in_urb);
usb_kill_urb(port->interrupt_out_urb);
-
if (stats)
dev_info(&port->dev, "Statistics: %d Bytes In | %d Bytes Out | %d Commands Issued\n",
priv->bytes_in, priv->bytes_out, priv->cmd_count);
@@ -730,7 +713,6 @@ static int cypress_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
struct cypress_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
dbg("%s - port %d, %d bytes", __func__, port->number, count);
@@ -745,9 +727,7 @@ static int cypress_write(struct tty_struct *tty, struct usb_serial_port *port,
if (!count)
return count;
- spin_lock_irqsave(&priv->lock, flags);
- count = cypress_buf_put(priv->buf, buf, count);
- spin_unlock_irqrestore(&priv->lock, flags);
+ count = kfifo_in_locked(&priv->write_fifo, buf, count, &priv->lock);
finish:
cypress_send(port);
@@ -807,9 +787,10 @@ static void cypress_send(struct usb_serial_port *port)
} else
spin_unlock_irqrestore(&priv->lock, flags);
- count = cypress_buf_get(priv->buf, &port->interrupt_out_buffer[offset],
- port->interrupt_out_size-offset);
-
+ count = kfifo_out_locked(&priv->write_fifo,
+ &port->interrupt_out_buffer[offset],
+ port->interrupt_out_size - offset,
+ &priv->lock);
if (count == 0)
return;
@@ -875,7 +856,7 @@ static int cypress_write_room(struct tty_struct *tty)
dbg("%s - port %d", __func__, port->number);
spin_lock_irqsave(&priv->lock, flags);
- room = cypress_buf_space_avail(priv->buf);
+ room = kfifo_avail(&priv->write_fifo);
spin_unlock_irqrestore(&priv->lock, flags);
dbg("%s - returns %d", __func__, room);
@@ -1143,7 +1124,7 @@ static int cypress_chars_in_buffer(struct tty_struct *tty)
dbg("%s - port %d", __func__, port->number);
spin_lock_irqsave(&priv->lock, flags);
- chars = cypress_buf_data_avail(priv->buf);
+ chars = kfifo_len(&priv->write_fifo);
spin_unlock_irqrestore(&priv->lock, flags);
dbg("%s - returns %d", __func__, chars);
@@ -1309,7 +1290,7 @@ static void cypress_read_int_callback(struct urb *urb)
/* process read if there is data other than line status */
if (tty && bytes > i) {
tty_insert_flip_string_fixed_flag(tty, data + i,
- bytes - i, tty_flag);
+ tty_flag, bytes - i);
tty_flip_buffer_push(tty);
}
@@ -1397,193 +1378,6 @@ static void cypress_write_int_callback(struct urb *urb)
/*****************************************************************************
- * Write buffer functions - buffering code from pl2303 used
- *****************************************************************************/
-
-/*
- * cypress_buf_alloc
- *
- * Allocate a circular buffer and all associated memory.
- */
-
-static struct cypress_buf *cypress_buf_alloc(unsigned int size)
-{
-
- struct cypress_buf *cb;
-
-
- if (size == 0)
- return NULL;
-
- cb = kmalloc(sizeof(struct cypress_buf), GFP_KERNEL);
- if (cb == NULL)
- return NULL;
-
- cb->buf_buf = kmalloc(size, GFP_KERNEL);
- if (cb->buf_buf == NULL) {
- kfree(cb);
- return NULL;
- }
-
- cb->buf_size = size;
- cb->buf_get = cb->buf_put = cb->buf_buf;
-
- return cb;
-
-}
-
-
-/*
- * cypress_buf_free
- *
- * Free the buffer and all associated memory.
- */
-
-static void cypress_buf_free(struct cypress_buf *cb)
-{
- if (cb) {
- kfree(cb->buf_buf);
- kfree(cb);
- }
-}
-
-
-/*
- * cypress_buf_clear
- *
- * Clear out all data in the circular buffer.
- */
-
-static void cypress_buf_clear(struct cypress_buf *cb)
-{
- if (cb != NULL)
- cb->buf_get = cb->buf_put;
- /* equivalent to a get of all data available */
-}
-
-
-/*
- * cypress_buf_data_avail
- *
- * Return the number of bytes of data available in the circular
- * buffer.
- */
-
-static unsigned int cypress_buf_data_avail(struct cypress_buf *cb)
-{
- if (cb != NULL)
- return (cb->buf_size + cb->buf_put - cb->buf_get)
- % cb->buf_size;
- else
- return 0;
-}
-
-
-/*
- * cypress_buf_space_avail
- *
- * Return the number of bytes of space available in the circular
- * buffer.
- */
-
-static unsigned int cypress_buf_space_avail(struct cypress_buf *cb)
-{
- if (cb != NULL)
- return (cb->buf_size + cb->buf_get - cb->buf_put - 1)
- % cb->buf_size;
- else
- return 0;
-}
-
-
-/*
- * cypress_buf_put
- *
- * Copy data data from a user buffer and put it into the circular buffer.
- * Restrict to the amount of space available.
- *
- * Return the number of bytes copied.
- */
-
-static unsigned int cypress_buf_put(struct cypress_buf *cb, const char *buf,
- unsigned int count)
-{
-
- unsigned int len;
-
-
- if (cb == NULL)
- return 0;
-
- len = cypress_buf_space_avail(cb);
- if (count > len)
- count = len;
-
- if (count == 0)
- return 0;
-
- len = cb->buf_buf + cb->buf_size - cb->buf_put;
- if (count > len) {
- memcpy(cb->buf_put, buf, len);
- memcpy(cb->buf_buf, buf+len, count - len);
- cb->buf_put = cb->buf_buf + count - len;
- } else {
- memcpy(cb->buf_put, buf, count);
- if (count < len)
- cb->buf_put += count;
- else /* count == len */
- cb->buf_put = cb->buf_buf;
- }
-
- return count;
-
-}
-
-
-/*
- * cypress_buf_get
- *
- * Get data from the circular buffer and copy to the given buffer.
- * Restrict to the amount of data available.
- *
- * Return the number of bytes copied.
- */
-
-static unsigned int cypress_buf_get(struct cypress_buf *cb, char *buf,
- unsigned int count)
-{
-
- unsigned int len;
-
-
- if (cb == NULL)
- return 0;
-
- len = cypress_buf_data_avail(cb);
- if (count > len)
- count = len;
-
- if (count == 0)
- return 0;
-
- len = cb->buf_buf + cb->buf_size - cb->buf_get;
- if (count > len) {
- memcpy(buf, cb->buf_get, len);
- memcpy(buf+len, cb->buf_buf, count - len);
- cb->buf_get = cb->buf_buf + count - len;
- } else {
- memcpy(buf, cb->buf_get, count);
- if (count < len)
- cb->buf_get += count;
- else /* count == len */
- cb->buf_get = cb->buf_buf;
- }
-
- return count;
-
-}
-
-/*****************************************************************************
* Module functions
*****************************************************************************/
diff --git a/drivers/usb/serial/cypress_m8.h b/drivers/usb/serial/cypress_m8.h
index 1fd360e0406..67cf6082688 100644
--- a/drivers/usb/serial/cypress_m8.h
+++ b/drivers/usb/serial/cypress_m8.h
@@ -1,27 +1,32 @@
#ifndef CYPRESS_M8_H
#define CYPRESS_M8_H
-/* definitions and function prototypes used for the cypress USB to Serial controller */
+/*
+ * definitions and function prototypes used for the cypress USB to Serial
+ * controller
+ */
-/* For sending our feature buffer - controlling serial communication states */
-/* Linux HID has no support for serial devices so we do this through the driver */
-#define HID_REQ_GET_REPORT 0x01
-#define HID_REQ_SET_REPORT 0x09
+/*
+ * For sending our feature buffer - controlling serial communication states.
+ * Linux HID has no support for serial devices so we do this through the driver
+ */
+#define HID_REQ_GET_REPORT 0x01
+#define HID_REQ_SET_REPORT 0x09
/* List other cypress USB to Serial devices here, and add them to the id_table */
/* DeLorme Earthmate USB - a GPS device */
-#define VENDOR_ID_DELORME 0x1163
-#define PRODUCT_ID_EARTHMATEUSB 0x0100
-#define PRODUCT_ID_EARTHMATEUSB_LT20 0x0200
+#define VENDOR_ID_DELORME 0x1163
+#define PRODUCT_ID_EARTHMATEUSB 0x0100
+#define PRODUCT_ID_EARTHMATEUSB_LT20 0x0200
/* Cypress HID->COM RS232 Adapter */
-#define VENDOR_ID_CYPRESS 0x04b4
-#define PRODUCT_ID_CYPHIDCOM 0x5500
+#define VENDOR_ID_CYPRESS 0x04b4
+#define PRODUCT_ID_CYPHIDCOM 0x5500
/* Powercom UPS, chip CY7C63723 */
-#define VENDOR_ID_POWERCOM 0x0d9f
-#define PRODUCT_ID_UPS 0x0002
+#define VENDOR_ID_POWERCOM 0x0d9f
+#define PRODUCT_ID_UPS 0x0002
/* Nokia CA-42 USB to serial cable */
#define VENDOR_ID_DAZZLE 0x07d0
@@ -29,17 +34,17 @@
/* End of device listing */
/* Used for setting / requesting serial line settings */
-#define CYPRESS_SET_CONFIG 0x01
-#define CYPRESS_GET_CONFIG 0x02
+#define CYPRESS_SET_CONFIG 0x01
+#define CYPRESS_GET_CONFIG 0x02
/* Used for throttle control */
-#define THROTTLED 0x1
-#define ACTUALLY_THROTTLED 0x2
+#define THROTTLED 0x1
+#define ACTUALLY_THROTTLED 0x2
-/* chiptypes - used in case firmware differs from the generic form ... offering
- * different baud speeds/etc.
+/*
+ * chiptypes - used in case firmware differs from the generic form ... offering
+ * different baud speeds/etc.
*/
-
#define CT_EARTHMATE 0x01
#define CT_CYPHIDCOM 0x02
#define CT_CA42V2 0x03
@@ -50,15 +55,15 @@
/* these are sent / read at byte 0 of the input/output hid reports */
/* You can find these values defined in the CY4601 USB to Serial design notes */
-#define CONTROL_DTR 0x20 /* data terminal ready - flow control - host to device */
+#define CONTROL_DTR 0x20 /* data terminal ready - flow control - host to device */
#define UART_DSR 0x20 /* data set ready - flow control - device to host */
-#define CONTROL_RTS 0x10 /* request to send - flow control - host to device */
+#define CONTROL_RTS 0x10 /* request to send - flow control - host to device */
#define UART_CTS 0x10 /* clear to send - flow control - device to host */
-#define UART_RI 0x10 /* ring indicator - modem - device to host */
+#define UART_RI 0x10 /* ring indicator - modem - device to host */
#define UART_CD 0x40 /* carrier detect - modem - device to host */
-#define CYP_ERROR 0x08 /* received from input report - device to host */
+#define CYP_ERROR 0x08 /* received from input report - device to host */
/* Note - the below has nothing to do with the "feature report" reset */
-#define CONTROL_RESET 0x08 /* sent with output report - host to device */
+#define CONTROL_RESET 0x08 /* sent with output report - host to device */
/* End of RS-232 protocol definitions */
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 68b0aa5e516..3edda3ed822 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -1703,8 +1703,8 @@ static int digi_read_inb_callback(struct urb *urb)
/* data length is len-1 (one byte of len is port_status) */
--len;
if (len > 0) {
- tty_insert_flip_string_fixed_flag(tty, data, len,
- flag);
+ tty_insert_flip_string_fixed_flag(tty, data, flag,
+ len);
tty_flip_buffer_push(tty);
}
}
diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
index 5f740a1eaca..504b5585ea4 100644
--- a/drivers/usb/serial/empeg.c
+++ b/drivers/usb/serial/empeg.c
@@ -13,44 +13,6 @@
*
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
- *
- * (07/16/2001) gb
- * remove unused code in empeg_close() (thanks to Oliver Neukum for
- * pointing this out) and rewrote empeg_set_termios().
- *
- * (05/30/2001) gkh
- * switched from using spinlock to a semaphore, which fixes lots of
- * problems.
- *
- * (04/08/2001) gb
- * Identify version on module load.
- *
- * (01/22/2001) gb
- * Added write_room() and chars_in_buffer() support.
- *
- * (12/21/2000) gb
- * Moved termio stuff inside the port->active check.
- * Moved MOD_DEC_USE_COUNT to end of empeg_close().
- *
- * (12/03/2000) gb
- * Added tty->ldisc.set_termios(port, tty, NULL) to empeg_open().
- * This notifies the tty driver that the termios have changed.
- *
- * (11/13/2000) gb
- * Moved tty->low_latency = 1 from empeg_read_bulk_callback() to
- * empeg_open() (It only needs to be set once - Doh!)
- *
- * (11/11/2000) gb
- * Updated to work with id_table structure.
- *
- * (11/04/2000) gb
- * Forked this from visor.c, and hacked it up to work with an
- * Empeg ltd. empeg-car player. Constructive criticism welcomed.
- * I would like to say, 'Thank You' to Greg Kroah-Hartman for the
- * use of his code, and for his guidance, advice and patience. :)
- * A 'Thank You' is in order for John Ripley of Empeg ltd for his
- * advice, and patience too.
- *
*/
#include <linux/kernel.h>
@@ -71,7 +33,7 @@ static int debug;
/*
* Version Information
*/
-#define DRIVER_VERSION "v1.2"
+#define DRIVER_VERSION "v1.3"
#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Gary Brubaker <xavyer@ix.netcom.com>"
#define DRIVER_DESC "USB Empeg Mark I/II Driver"
@@ -79,19 +41,8 @@ static int debug;
#define EMPEG_PRODUCT_ID 0x0001
/* function prototypes for an empeg-car player */
-static int empeg_open(struct tty_struct *tty, struct usb_serial_port *port);
-static void empeg_close(struct usb_serial_port *port);
-static int empeg_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf,
- int count);
-static int empeg_write_room(struct tty_struct *tty);
-static int empeg_chars_in_buffer(struct tty_struct *tty);
-static void empeg_throttle(struct tty_struct *tty);
-static void empeg_unthrottle(struct tty_struct *tty);
static int empeg_startup(struct usb_serial *serial);
static void empeg_init_termios(struct tty_struct *tty);
-static void empeg_write_bulk_callback(struct urb *urb);
-static void empeg_read_bulk_callback(struct urb *urb);
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(EMPEG_VENDOR_ID, EMPEG_PRODUCT_ID) },
@@ -105,7 +56,7 @@ static struct usb_driver empeg_driver = {
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = id_table,
- .no_dynamic_id = 1,
+ .no_dynamic_id = 1,
};
static struct usb_serial_driver empeg_device = {
@@ -114,291 +65,16 @@ static struct usb_serial_driver empeg_device = {
.name = "empeg",
},
.id_table = id_table,
- .usb_driver = &empeg_driver,
+ .usb_driver = &empeg_driver,
.num_ports = 1,
- .open = empeg_open,
- .close = empeg_close,
- .throttle = empeg_throttle,
- .unthrottle = empeg_unthrottle,
+ .bulk_out_size = 256,
+ .throttle = usb_serial_generic_throttle,
+ .unthrottle = usb_serial_generic_unthrottle,
.attach = empeg_startup,
.init_termios = empeg_init_termios,
- .write = empeg_write,
- .write_room = empeg_write_room,
- .chars_in_buffer = empeg_chars_in_buffer,
- .write_bulk_callback = empeg_write_bulk_callback,
- .read_bulk_callback = empeg_read_bulk_callback,
};
-#define NUM_URBS 16
-#define URB_TRANSFER_BUFFER_SIZE 4096
-
-static struct urb *write_urb_pool[NUM_URBS];
-static spinlock_t write_urb_pool_lock;
-static int bytes_in;
-static int bytes_out;
-
-/******************************************************************************
- * Empeg specific driver functions
- ******************************************************************************/
-static int empeg_open(struct tty_struct *tty,struct usb_serial_port *port)
-{
- struct usb_serial *serial = port->serial;
- int result = 0;
-
- dbg("%s - port %d", __func__, port->number);
-
- bytes_in = 0;
- bytes_out = 0;
-
- /* Start reading from the device */
- usb_fill_bulk_urb(
- port->read_urb,
- serial->dev,
- usb_rcvbulkpipe(serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- empeg_read_bulk_callback,
- port);
-
- result = usb_submit_urb(port->read_urb, GFP_KERNEL);
-
- if (result)
- dev_err(&port->dev,
- "%s - failed submitting read urb, error %d\n",
- __func__, result);
-
- return result;
-}
-
-
-static void empeg_close(struct usb_serial_port *port)
-{
- dbg("%s - port %d", __func__, port->number);
-
- /* shutdown our bulk read */
- usb_kill_urb(port->read_urb);
- /* Uncomment the following line if you want to see some statistics in your syslog */
- /* dev_info (&port->dev, "Bytes In = %d Bytes Out = %d\n", bytes_in, bytes_out); */
-}
-
-
-static int empeg_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count)
-{
- struct usb_serial *serial = port->serial;
- struct urb *urb;
- const unsigned char *current_position = buf;
- unsigned long flags;
- int status;
- int i;
- int bytes_sent = 0;
- int transfer_size;
-
- dbg("%s - port %d", __func__, port->number);
-
- while (count > 0) {
- /* try to find a free urb in our list of them */
- urb = NULL;
-
- spin_lock_irqsave(&write_urb_pool_lock, flags);
-
- for (i = 0; i < NUM_URBS; ++i) {
- if (write_urb_pool[i]->status != -EINPROGRESS) {
- urb = write_urb_pool[i];
- break;
- }
- }
-
- spin_unlock_irqrestore(&write_urb_pool_lock, flags);
-
- if (urb == NULL) {
- dbg("%s - no more free urbs", __func__);
- goto exit;
- }
-
- if (urb->transfer_buffer == NULL) {
- urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_ATOMIC);
- if (urb->transfer_buffer == NULL) {
- dev_err(&port->dev,
- "%s no more kernel memory...\n",
- __func__);
- goto exit;
- }
- }
-
- transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE);
-
- memcpy(urb->transfer_buffer, current_position, transfer_size);
-
- usb_serial_debug_data(debug, &port->dev, __func__, transfer_size, urb->transfer_buffer);
-
- /* build up our urb */
- usb_fill_bulk_urb(
- urb,
- serial->dev,
- usb_sndbulkpipe(serial->dev,
- port->bulk_out_endpointAddress),
- urb->transfer_buffer,
- transfer_size,
- empeg_write_bulk_callback,
- port);
-
- /* send it down the pipe */
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status) {
- dev_err(&port->dev, "%s - usb_submit_urb(write bulk) failed with status = %d\n", __func__, status);
- bytes_sent = status;
- break;
- }
-
- current_position += transfer_size;
- bytes_sent += transfer_size;
- count -= transfer_size;
- bytes_out += transfer_size;
-
- }
-exit:
- return bytes_sent;
-}
-
-
-static int empeg_write_room(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- unsigned long flags;
- int i;
- int room = 0;
-
- dbg("%s - port %d", __func__, port->number);
-
- spin_lock_irqsave(&write_urb_pool_lock, flags);
- /* tally up the number of bytes available */
- for (i = 0; i < NUM_URBS; ++i) {
- if (write_urb_pool[i]->status != -EINPROGRESS)
- room += URB_TRANSFER_BUFFER_SIZE;
- }
- spin_unlock_irqrestore(&write_urb_pool_lock, flags);
- dbg("%s - returns %d", __func__, room);
- return room;
-
-}
-
-
-static int empeg_chars_in_buffer(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- unsigned long flags;
- int i;
- int chars = 0;
-
- dbg("%s - port %d", __func__, port->number);
-
- spin_lock_irqsave(&write_urb_pool_lock, flags);
-
- /* tally up the number of bytes waiting */
- for (i = 0; i < NUM_URBS; ++i) {
- if (write_urb_pool[i]->status == -EINPROGRESS)
- chars += URB_TRANSFER_BUFFER_SIZE;
- }
-
- spin_unlock_irqrestore(&write_urb_pool_lock, flags);
- dbg("%s - returns %d", __func__, chars);
- return chars;
-}
-
-
-static void empeg_write_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- int status = urb->status;
-
- dbg("%s - port %d", __func__, port->number);
-
- if (status) {
- dbg("%s - nonzero write bulk status received: %d",
- __func__, status);
- return;
- }
-
- usb_serial_port_softint(port);
-}
-
-
-static void empeg_read_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- struct tty_struct *tty;
- unsigned char *data = urb->transfer_buffer;
- int result;
- int status = urb->status;
-
- dbg("%s - port %d", __func__, port->number);
-
- if (status) {
- dbg("%s - nonzero read bulk status received: %d",
- __func__, status);
- return;
- }
-
- usb_serial_debug_data(debug, &port->dev, __func__,
- urb->actual_length, data);
- tty = tty_port_tty_get(&port->port);
-
- if (urb->actual_length) {
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
- bytes_in += urb->actual_length;
- }
- tty_kref_put(tty);
-
- /* Continue trying to always read */
- usb_fill_bulk_urb(
- port->read_urb,
- port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- empeg_read_bulk_callback,
- port);
-
- result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
-
- if (result)
- dev_err(&urb->dev->dev,
- "%s - failed resubmitting read urb, error %d\n",
- __func__, result);
-
- return;
-
-}
-
-
-static void empeg_throttle(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- dbg("%s - port %d", __func__, port->number);
- usb_kill_urb(port->read_urb);
-}
-
-
-static void empeg_unthrottle(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- int result;
- dbg("%s - port %d", __func__, port->number);
-
- port->read_urb->dev = port->serial->dev;
- result = usb_submit_urb(port->read_urb, GFP_KERNEL);
- if (result)
- dev_err(&port->dev,
- "%s - failed submitting read urb, error %d\n",
- __func__, result);
-}
-
-
-static int empeg_startup(struct usb_serial *serial)
+static int empeg_startup(struct usb_serial *serial)
{
int r;
@@ -414,10 +90,8 @@ static int empeg_startup(struct usb_serial *serial)
/* continue on with initialization */
return r;
-
}
-
static void empeg_init_termios(struct tty_struct *tty)
{
struct ktermios *termios = tty->termios;
@@ -462,77 +136,28 @@ static void empeg_init_termios(struct tty_struct *tty)
tty_encode_baud_rate(tty, 115200, 115200);
}
-
static int __init empeg_init(void)
{
- struct urb *urb;
- int i, retval;
-
- /* create our write urb pool and transfer buffers */
- spin_lock_init(&write_urb_pool_lock);
- for (i = 0; i < NUM_URBS; ++i) {
- urb = usb_alloc_urb(0, GFP_KERNEL);
- write_urb_pool[i] = urb;
- if (urb == NULL) {
- printk(KERN_ERR "empeg: No more urbs???\n");
- continue;
- }
-
- urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
- GFP_KERNEL);
- if (!urb->transfer_buffer) {
- printk(KERN_ERR "empeg: %s - out of memory for urb "
- "buffers.", __func__);
- continue;
- }
- }
+ int retval;
retval = usb_serial_register(&empeg_device);
if (retval)
- goto failed_usb_serial_register;
+ return retval;
retval = usb_register(&empeg_driver);
- if (retval)
- goto failed_usb_register;
-
+ if (retval) {
+ usb_serial_deregister(&empeg_device);
+ return retval;
+ }
printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
DRIVER_DESC "\n");
return 0;
-failed_usb_register:
- usb_serial_deregister(&empeg_device);
-failed_usb_serial_register:
- for (i = 0; i < NUM_URBS; ++i) {
- if (write_urb_pool[i]) {
- kfree(write_urb_pool[i]->transfer_buffer);
- usb_free_urb(write_urb_pool[i]);
- }
- }
- return retval;
}
-
static void __exit empeg_exit(void)
{
- int i;
- unsigned long flags;
-
usb_deregister(&empeg_driver);
usb_serial_deregister(&empeg_device);
-
- spin_lock_irqsave(&write_urb_pool_lock, flags);
-
- for (i = 0; i < NUM_URBS; ++i) {
- if (write_urb_pool[i]) {
- /* FIXME - uncomment the following usb_kill_urb call
- * when the host controllers get fixed to set urb->dev
- * = NULL after the urb is finished. Otherwise this
- * call oopses. */
- /* usb_kill_urb(write_urb_pool[i]); */
- kfree(write_urb_pool[i]->transfer_buffer);
- usb_free_urb(write_urb_pool[i]);
- }
- }
- spin_unlock_irqrestore(&write_urb_pool_lock, flags);
}
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 1d7c4fac02e..050211afc07 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1,6 +1,8 @@
/*
* USB FTDI SIO driver
*
+ * Copyright (C) 2009 - 2010
+ * Johan Hovold (jhovold@gmail.com)
* Copyright (C) 1999 - 2001
* Greg Kroah-Hartman (greg@kroah.com)
* Bill Ryder (bryder@sgi.com)
@@ -49,8 +51,8 @@
/*
* Version Information
*/
-#define DRIVER_VERSION "v1.5.0"
-#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>, Andreas Mohr"
+#define DRIVER_VERSION "v1.6.0"
+#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>, Andreas Mohr, Johan Hovold <jhovold@gmail.com>"
#define DRIVER_DESC "USB FTDI Serial Converters Driver"
static int debug;
@@ -59,7 +61,7 @@ static __u16 product;
struct ftdi_private {
struct kref kref;
- ftdi_chip_type_t chip_type;
+ enum ftdi_chip_type chip_type;
/* type of device, either SIO or FT8U232AM */
int baud_base; /* baud base clock for divisor setting */
int custom_divisor; /* custom_divisor kludge, this is for
@@ -69,10 +71,6 @@ struct ftdi_private {
/* the last data state set - needed for doing
* a break
*/
- int write_offset; /* This is the offset in the usb data block to
- * write the serial data - it varies between
- * devices
- */
int flags; /* some ASYNC_xxxx flags are supported */
unsigned long last_dtr_rts; /* saved modem control outputs */
wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
@@ -87,9 +85,6 @@ struct ftdi_private {
be enabled */
unsigned int latency; /* latency setting in use */
- spinlock_t tx_lock; /* spinlock for transmit state */
- unsigned long tx_outstanding_bytes;
- unsigned long tx_outstanding_urbs;
unsigned short max_packet_size;
struct mutex cfg_lock; /* Avoid mess by parallel calls of config ioctl() and change_speed() */
};
@@ -768,9 +763,6 @@ static const char *ftdi_chip_name[] = {
};
-/* Constants for read urb and write urb */
-#define BUFSZ 512
-
/* Used for TIOCMIWAIT */
#define FTDI_STATUS_B0_MASK (FTDI_RS0_CTS | FTDI_RS0_DSR | FTDI_RS0_RI | FTDI_RS0_RLSD)
#define FTDI_STATUS_B1_MASK (FTDI_RS_BI)
@@ -787,13 +779,9 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port);
static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port);
static void ftdi_close(struct usb_serial_port *port);
static void ftdi_dtr_rts(struct usb_serial_port *port, int on);
-static int ftdi_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count);
-static int ftdi_write_room(struct tty_struct *tty);
-static int ftdi_chars_in_buffer(struct tty_struct *tty);
-static void ftdi_write_bulk_callback(struct urb *urb);
-static void ftdi_read_bulk_callback(struct urb *urb);
-static void ftdi_process_read(struct usb_serial_port *port);
+static void ftdi_process_read_urb(struct urb *urb);
+static int ftdi_prepare_write_buffer(struct usb_serial_port *port,
+ void *dest, size_t size);
static void ftdi_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old);
static int ftdi_tiocmget(struct tty_struct *tty, struct file *file);
@@ -802,8 +790,6 @@ static int ftdi_tiocmset(struct tty_struct *tty, struct file *file,
static int ftdi_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
static void ftdi_break_ctl(struct tty_struct *tty, int break_state);
-static void ftdi_throttle(struct tty_struct *tty);
-static void ftdi_unthrottle(struct tty_struct *tty);
static unsigned short int ftdi_232am_baud_base_to_divisor(int baud, int base);
static unsigned short int ftdi_232am_baud_to_divisor(int baud);
@@ -821,19 +807,18 @@ static struct usb_serial_driver ftdi_sio_device = {
.usb_driver = &ftdi_driver,
.id_table = id_table_combined,
.num_ports = 1,
+ .bulk_in_size = 512,
+ .bulk_out_size = 256,
.probe = ftdi_sio_probe,
.port_probe = ftdi_sio_port_probe,
.port_remove = ftdi_sio_port_remove,
.open = ftdi_open,
.close = ftdi_close,
.dtr_rts = ftdi_dtr_rts,
- .throttle = ftdi_throttle,
- .unthrottle = ftdi_unthrottle,
- .write = ftdi_write,
- .write_room = ftdi_write_room,
- .chars_in_buffer = ftdi_chars_in_buffer,
- .read_bulk_callback = ftdi_read_bulk_callback,
- .write_bulk_callback = ftdi_write_bulk_callback,
+ .throttle = usb_serial_generic_throttle,
+ .unthrottle = usb_serial_generic_unthrottle,
+ .process_read_urb = ftdi_process_read_urb,
+ .prepare_write_buffer = ftdi_prepare_write_buffer,
.tiocmget = ftdi_tiocmget,
.tiocmset = ftdi_tiocmset,
.ioctl = ftdi_ioctl,
@@ -849,9 +834,6 @@ static struct usb_serial_driver ftdi_sio_device = {
#define HIGH 1
#define LOW 0
-/* number of outstanding urbs to prevent userspace DoS from happening */
-#define URB_UPPER_LIMIT 42
-
/*
* ***************************************************************************
* Utility functions
@@ -987,7 +969,7 @@ static int update_mctrl(struct usb_serial_port *port, unsigned int set,
static __u32 get_ftdi_divisor(struct tty_struct *tty,
struct usb_serial_port *port)
-{ /* get_ftdi_divisor */
+{
struct ftdi_private *priv = usb_get_serial_port_data(port);
__u32 div_value = 0;
int div_okay = 1;
@@ -1211,12 +1193,11 @@ static int get_serial_info(struct usb_serial_port *port,
if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
return -EFAULT;
return 0;
-} /* get_serial_info */
-
+}
static int set_serial_info(struct tty_struct *tty,
struct usb_serial_port *port, struct serial_struct __user *newinfo)
-{ /* set_serial_info */
+{
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct serial_struct new_serial;
struct ftdi_private old_priv;
@@ -1279,8 +1260,7 @@ check_and_exit:
else
mutex_unlock(&priv->cfg_lock);
return 0;
-
-} /* set_serial_info */
+}
/* Determine type of FTDI chip based on USB config and descriptor. */
@@ -1294,7 +1274,6 @@ static void ftdi_determine_type(struct usb_serial_port *port)
/* Assume it is not the original SIO device for now. */
priv->baud_base = 48000000 / 2;
- priv->write_offset = 0;
version = le16_to_cpu(udev->descriptor.bcdDevice);
interfaces = udev->actconfig->desc.bNumInterfaces;
@@ -1336,7 +1315,6 @@ static void ftdi_determine_type(struct usb_serial_port *port)
/* Old device. Assume it's the original SIO. */
priv->chip_type = SIO;
priv->baud_base = 12000000 / 16;
- priv->write_offset = 1;
} else if (version < 0x400) {
/* Assume it's an FT8U232AM (or FT8U245AM) */
/* (It might be a BM because of the iSerialNumber bug,
@@ -1543,7 +1521,6 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
}
kref_init(&priv->kref);
- spin_lock_init(&priv->tx_lock);
mutex_init(&priv->cfg_lock);
init_waitqueue_head(&priv->delta_msr_wait);
@@ -1552,28 +1529,7 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
if (quirk && quirk->port_probe)
quirk->port_probe(priv);
- /* Increase the size of read buffers */
- kfree(port->bulk_in_buffer);
- port->bulk_in_buffer = kmalloc(BUFSZ, GFP_KERNEL);
- if (!port->bulk_in_buffer) {
- kfree(priv);
- return -ENOMEM;
- }
- if (port->read_urb) {
- port->read_urb->transfer_buffer = port->bulk_in_buffer;
- port->read_urb->transfer_buffer_length = BUFSZ;
- }
-
priv->port = port;
-
- /* Free port's existing write urb and transfer buffer. */
- if (port->write_urb) {
- usb_free_urb(port->write_urb);
- port->write_urb = NULL;
- }
- kfree(port->bulk_out_buffer);
- port->bulk_out_buffer = NULL;
-
usb_set_serial_port_data(port, priv);
ftdi_determine_type(port);
@@ -1594,7 +1550,7 @@ static void ftdi_USB_UIRT_setup(struct ftdi_private *priv)
priv->flags |= ASYNC_SPD_CUST;
priv->custom_divisor = 77;
priv->force_baud = 38400;
-} /* ftdi_USB_UIRT_setup */
+}
/* Setup for the HE-TIRA1 device, which requires hardwired
* baudrate (38400 gets mapped to 100000) and RTS-CTS enabled. */
@@ -1607,7 +1563,7 @@ static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv)
priv->custom_divisor = 240;
priv->force_baud = 38400;
priv->force_rtscts = 1;
-} /* ftdi_HE_TIRA1_setup */
+}
/*
* Module parameter to control latency timer for NDI FTDI-based USB devices.
@@ -1700,31 +1656,10 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port)
return 0;
}
-static int ftdi_submit_read_urb(struct usb_serial_port *port, gfp_t mem_flags)
-{
- struct urb *urb = port->read_urb;
- struct usb_serial *serial = port->serial;
- int result;
-
- usb_fill_bulk_urb(urb, serial->dev,
- usb_rcvbulkpipe(serial->dev,
- port->bulk_in_endpointAddress),
- urb->transfer_buffer,
- urb->transfer_buffer_length,
- ftdi_read_bulk_callback, port);
- result = usb_submit_urb(urb, mem_flags);
- if (result && result != -EPERM)
- dev_err(&port->dev,
- "%s - failed submitting read urb, error %d\n",
- __func__, result);
- return result;
-}
-
static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
-{ /* ftdi_open */
+{
struct usb_device *dev = port->serial->dev;
struct ftdi_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
int result;
dbg("%s", __func__);
@@ -1746,20 +1681,13 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
if (tty)
ftdi_set_termios(tty, port, tty->termios);
- /* Not throttled */
- spin_lock_irqsave(&port->lock, flags);
- port->throttled = 0;
- port->throttle_req = 0;
- spin_unlock_irqrestore(&port->lock, flags);
-
/* Start reading from the device */
- result = ftdi_submit_read_urb(port, GFP_KERNEL);
+ result = usb_serial_generic_open(tty, port);
if (!result)
kref_get(&priv->kref);
return result;
-} /* ftdi_open */
-
+}
static void ftdi_dtr_rts(struct usb_serial_port *port, int on)
{
@@ -1789,22 +1717,16 @@ static void ftdi_dtr_rts(struct usb_serial_port *port, int on)
* usbserial:__serial_close only calls ftdi_close if the point is open
*
* This only gets called when it is the last close
- *
- *
*/
-
static void ftdi_close(struct usb_serial_port *port)
-{ /* ftdi_close */
+{
struct ftdi_private *priv = usb_get_serial_port_data(port);
dbg("%s", __func__);
- /* shutdown our bulk read */
- usb_kill_urb(port->read_urb);
+ usb_serial_generic_close(port);
kref_put(&priv->kref, ftdi_sio_priv_release);
-} /* ftdi_close */
-
-
+}
/* The SIO requires the first byte to have:
* B0 1
@@ -1813,211 +1735,39 @@ static void ftdi_close(struct usb_serial_port *port)
*
* The new devices do not require this byte
*/
-static int ftdi_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count)
-{ /* ftdi_write */
- struct ftdi_private *priv = usb_get_serial_port_data(port);
- struct urb *urb;
- unsigned char *buffer;
- int data_offset ; /* will be 1 for the SIO and 0 otherwise */
- int status;
- int transfer_size;
+static int ftdi_prepare_write_buffer(struct usb_serial_port *port,
+ void *dest, size_t size)
+{
+ struct ftdi_private *priv;
+ int count;
unsigned long flags;
- dbg("%s port %d, %d bytes", __func__, port->number, count);
-
- if (count == 0) {
- dbg("write request of 0 bytes");
- return 0;
- }
- spin_lock_irqsave(&priv->tx_lock, flags);
- if (priv->tx_outstanding_urbs > URB_UPPER_LIMIT) {
- spin_unlock_irqrestore(&priv->tx_lock, flags);
- dbg("%s - write limit hit", __func__);
- return 0;
- }
- priv->tx_outstanding_urbs++;
- spin_unlock_irqrestore(&priv->tx_lock, flags);
-
- data_offset = priv->write_offset;
- dbg("data_offset set to %d", data_offset);
-
- /* Determine total transfer size */
- transfer_size = count;
- if (data_offset > 0) {
- /* Original sio needs control bytes too... */
- transfer_size += (data_offset *
- ((count + (priv->max_packet_size - 1 - data_offset)) /
- (priv->max_packet_size - data_offset)));
- }
-
- buffer = kmalloc(transfer_size, GFP_ATOMIC);
- if (!buffer) {
- dev_err(&port->dev,
- "%s ran out of kernel memory for urb ...\n", __func__);
- count = -ENOMEM;
- goto error_no_buffer;
- }
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- dev_err(&port->dev, "%s - no more free urbs\n", __func__);
- count = -ENOMEM;
- goto error_no_urb;
- }
+ priv = usb_get_serial_port_data(port);
- /* Copy data */
- if (data_offset > 0) {
- /* Original sio requires control byte at start of
- each packet. */
- int user_pktsz = priv->max_packet_size - data_offset;
- int todo = count;
- unsigned char *first_byte = buffer;
- const unsigned char *current_position = buf;
-
- while (todo > 0) {
- if (user_pktsz > todo)
- user_pktsz = todo;
- /* Write the control byte at the front of the packet*/
- *first_byte = 1 | ((user_pktsz) << 2);
- /* Copy data for packet */
- memcpy(first_byte + data_offset,
- current_position, user_pktsz);
- first_byte += user_pktsz + data_offset;
- current_position += user_pktsz;
- todo -= user_pktsz;
+ if (priv->chip_type == SIO) {
+ unsigned char *buffer = dest;
+ int i, len, c;
+
+ count = 0;
+ spin_lock_irqsave(&port->lock, flags);
+ for (i = 0; i < size - 1; i += priv->max_packet_size) {
+ len = min_t(int, size - i, priv->max_packet_size) - 1;
+ c = kfifo_out(&port->write_fifo, &buffer[i + 1], len);
+ if (!c)
+ break;
+ buffer[i] = (c << 2) + 1;
+ count += c + 1;
}
+ spin_unlock_irqrestore(&port->lock, flags);
} else {
- /* No control byte required. */
- /* Copy in the data to send */
- memcpy(buffer, buf, count);
- }
-
- usb_serial_debug_data(debug, &port->dev, __func__,
- transfer_size, buffer);
-
- /* fill the buffer and send it */
- usb_fill_bulk_urb(urb, port->serial->dev,
- usb_sndbulkpipe(port->serial->dev,
- port->bulk_out_endpointAddress),
- buffer, transfer_size,
- ftdi_write_bulk_callback, port);
-
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status) {
- dev_err(&port->dev,
- "%s - failed submitting write urb, error %d\n",
- __func__, status);
- count = status;
- goto error;
- } else {
- spin_lock_irqsave(&priv->tx_lock, flags);
- priv->tx_outstanding_bytes += count;
- spin_unlock_irqrestore(&priv->tx_lock, flags);
+ count = kfifo_out_locked(&port->write_fifo, dest, size,
+ &port->lock);
}
- /* we are done with this urb, so let the host driver
- * really free it when it is finished with it */
- usb_free_urb(urb);
-
- dbg("%s write returning: %d", __func__, count);
- return count;
-error:
- usb_free_urb(urb);
-error_no_urb:
- kfree(buffer);
-error_no_buffer:
- spin_lock_irqsave(&priv->tx_lock, flags);
- priv->tx_outstanding_urbs--;
- spin_unlock_irqrestore(&priv->tx_lock, flags);
return count;
-} /* ftdi_write */
-
-
-/* This function may get called when the device is closed */
-
-static void ftdi_write_bulk_callback(struct urb *urb)
-{
- unsigned long flags;
- struct usb_serial_port *port = urb->context;
- struct ftdi_private *priv;
- int data_offset; /* will be 1 for the SIO and 0 otherwise */
- unsigned long countback;
- int status = urb->status;
-
- /* free up the transfer buffer, as usb_free_urb() does not do this */
- kfree(urb->transfer_buffer);
-
- dbg("%s - port %d", __func__, port->number);
-
- priv = usb_get_serial_port_data(port);
- if (!priv) {
- dbg("%s - bad port private data pointer - exiting", __func__);
- return;
- }
- /* account for transferred data */
- countback = urb->transfer_buffer_length;
- data_offset = priv->write_offset;
- if (data_offset > 0) {
- /* Subtract the control bytes */
- countback -= (data_offset * DIV_ROUND_UP(countback, priv->max_packet_size));
- }
- spin_lock_irqsave(&priv->tx_lock, flags);
- --priv->tx_outstanding_urbs;
- priv->tx_outstanding_bytes -= countback;
- spin_unlock_irqrestore(&priv->tx_lock, flags);
-
- if (status) {
- dbg("nonzero write bulk status received: %d", status);
- }
-
- usb_serial_port_softint(port);
-} /* ftdi_write_bulk_callback */
-
-
-static int ftdi_write_room(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct ftdi_private *priv = usb_get_serial_port_data(port);
- int room;
- unsigned long flags;
-
- dbg("%s - port %d", __func__, port->number);
-
- spin_lock_irqsave(&priv->tx_lock, flags);
- if (priv->tx_outstanding_urbs < URB_UPPER_LIMIT) {
- /*
- * We really can take anything the user throws at us
- * but let's pick a nice big number to tell the tty
- * layer that we have lots of free space
- */
- room = 2048;
- } else {
- room = 0;
- }
- spin_unlock_irqrestore(&priv->tx_lock, flags);
- return room;
}
-static int ftdi_chars_in_buffer(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct ftdi_private *priv = usb_get_serial_port_data(port);
- int buffered;
- unsigned long flags;
-
- dbg("%s - port %d", __func__, port->number);
-
- spin_lock_irqsave(&priv->tx_lock, flags);
- buffered = (int)priv->tx_outstanding_bytes;
- spin_unlock_irqrestore(&priv->tx_lock, flags);
- if (buffered < 0) {
- dev_err(&port->dev, "%s outstanding tx bytes is negative!\n",
- __func__);
- buffered = 0;
- }
- return buffered;
-}
+#define FTDI_RS_ERR_MASK (FTDI_RS_BI | FTDI_RS_PE | FTDI_RS_FE | FTDI_RS_OE)
static int ftdi_process_packet(struct tty_struct *tty,
struct usb_serial_port *port, struct ftdi_private *priv,
@@ -2045,28 +1795,21 @@ static int ftdi_process_packet(struct tty_struct *tty,
priv->prev_status = status;
}
- /*
- * Although the device uses a bitmask and hence can have multiple
- * errors on a packet - the order here sets the priority the error is
- * returned to the tty layer.
- */
flag = TTY_NORMAL;
- if (packet[1] & FTDI_RS_OE) {
- flag = TTY_OVERRUN;
- dbg("OVERRRUN error");
- }
- if (packet[1] & FTDI_RS_BI) {
- flag = TTY_BREAK;
- dbg("BREAK received");
- usb_serial_handle_break(port);
- }
- if (packet[1] & FTDI_RS_PE) {
- flag = TTY_PARITY;
- dbg("PARITY error");
- }
- if (packet[1] & FTDI_RS_FE) {
- flag = TTY_FRAME;
- dbg("FRAMING error");
+ if (packet[1] & FTDI_RS_ERR_MASK) {
+ /* Break takes precedence over parity, which takes precedence
+ * over framing errors */
+ if (packet[1] & FTDI_RS_BI) {
+ flag = TTY_BREAK;
+ usb_serial_handle_break(port);
+ } else if (packet[1] & FTDI_RS_PE) {
+ flag = TTY_PARITY;
+ } else if (packet[1] & FTDI_RS_FE) {
+ flag = TTY_FRAME;
+ }
+ /* Overrun is special, not associated with a char */
+ if (packet[1] & FTDI_RS_OE)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
}
len -= 2;
@@ -2074,20 +1817,21 @@ static int ftdi_process_packet(struct tty_struct *tty,
return 0; /* status only */
ch = packet + 2;
- if (!(port->console && port->sysrq) && flag == TTY_NORMAL)
- tty_insert_flip_string(tty, ch, len);
- else {
+ if (port->port.console && port->sysrq) {
for (i = 0; i < len; i++, ch++) {
if (!usb_serial_handle_sysrq_char(tty, port, *ch))
tty_insert_flip_char(tty, *ch, flag);
}
+ } else {
+ tty_insert_flip_string_fixed_flag(tty, ch, flag, len);
}
+
return len;
}
-static void ftdi_process_read(struct usb_serial_port *port)
+static void ftdi_process_read_urb(struct urb *urb)
{
- struct urb *urb = port->read_urb;
+ struct usb_serial_port *port = urb->context;
struct tty_struct *tty;
struct ftdi_private *priv = usb_get_serial_port_data(port);
char *data = (char *)urb->transfer_buffer;
@@ -2109,32 +1853,6 @@ static void ftdi_process_read(struct usb_serial_port *port)
tty_kref_put(tty);
}
-static void ftdi_read_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- unsigned long flags;
-
- dbg("%s - port %d", __func__, port->number);
-
- if (urb->status) {
- dbg("%s - nonzero read bulk status received: %d",
- __func__, urb->status);
- return;
- }
-
- usb_serial_debug_data(debug, &port->dev, __func__,
- urb->actual_length, urb->transfer_buffer);
- ftdi_process_read(port);
-
- spin_lock_irqsave(&port->lock, flags);
- port->throttled = port->throttle_req;
- if (!port->throttled) {
- spin_unlock_irqrestore(&port->lock, flags);
- ftdi_submit_read_urb(port, GFP_ATOMIC);
- } else
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
static void ftdi_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
@@ -2165,15 +1883,13 @@ static void ftdi_break_ctl(struct tty_struct *tty, int break_state)
}
-
/* old_termios contains the original termios settings and tty->termios contains
* the new setting to be used
* WARNING: set_termios calls this with old_termios in kernel space
*/
-
static void ftdi_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
-{ /* ftdi_termios */
+{
struct usb_device *dev = port->serial->dev;
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct ktermios *termios = tty->termios;
@@ -2401,7 +2117,6 @@ static int ftdi_tiocmset(struct tty_struct *tty, struct file *file,
return update_mctrl(port, set, clear);
}
-
static int ftdi_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
@@ -2470,35 +2185,6 @@ static int ftdi_ioctl(struct tty_struct *tty, struct file *file,
return -ENOIOCTLCMD;
}
-static void ftdi_throttle(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- unsigned long flags;
-
- dbg("%s - port %d", __func__, port->number);
-
- spin_lock_irqsave(&port->lock, flags);
- port->throttle_req = 1;
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-void ftdi_unthrottle(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- int was_throttled;
- unsigned long flags;
-
- dbg("%s - port %d", __func__, port->number);
-
- spin_lock_irqsave(&port->lock, flags);
- was_throttled = port->throttled;
- port->throttled = port->throttle_req = 0;
- spin_unlock_irqrestore(&port->lock, flags);
-
- if (was_throttled)
- ftdi_submit_read_urb(port, GFP_KERNEL);
-}
-
static int __init ftdi_init(void)
{
int retval;
@@ -2529,15 +2215,12 @@ failed_sio_register:
return retval;
}
-
static void __exit ftdi_exit(void)
{
-
dbg("%s", __func__);
usb_deregister(&ftdi_driver);
usb_serial_deregister(&ftdi_sio_device);
-
}
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index ff9bf80327a..213fe3d6128 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -23,14 +23,16 @@
*/
/* Commands */
-#define FTDI_SIO_RESET 0 /* Reset the port */
-#define FTDI_SIO_MODEM_CTRL 1 /* Set the modem control register */
-#define FTDI_SIO_SET_FLOW_CTRL 2 /* Set flow control register */
-#define FTDI_SIO_SET_BAUD_RATE 3 /* Set baud rate */
-#define FTDI_SIO_SET_DATA 4 /* Set the data characteristics of the port */
-#define FTDI_SIO_GET_MODEM_STATUS 5 /* Retrieve current value of modem status register */
-#define FTDI_SIO_SET_EVENT_CHAR 6 /* Set the event character */
-#define FTDI_SIO_SET_ERROR_CHAR 7 /* Set the error character */
+#define FTDI_SIO_RESET 0 /* Reset the port */
+#define FTDI_SIO_MODEM_CTRL 1 /* Set the modem control register */
+#define FTDI_SIO_SET_FLOW_CTRL 2 /* Set flow control register */
+#define FTDI_SIO_SET_BAUD_RATE 3 /* Set baud rate */
+#define FTDI_SIO_SET_DATA 4 /* Set the data characteristics of
+ the port */
+#define FTDI_SIO_GET_MODEM_STATUS 5 /* Retrieve current value of modem
+ status register */
+#define FTDI_SIO_SET_EVENT_CHAR 6 /* Set the event character */
+#define FTDI_SIO_SET_ERROR_CHAR 7 /* Set the error character */
#define FTDI_SIO_SET_LATENCY_TIMER 9 /* Set the latency timer */
#define FTDI_SIO_GET_LATENCY_TIMER 10 /* Get the latency timer */
@@ -52,7 +54,7 @@
*/
/* Port Identifier Table */
-#define PIT_DEFAULT 0 /* SIOA */
+#define PIT_DEFAULT 0 /* SIOA */
#define PIT_SIOA 1 /* SIOA */
/* The device this driver is tested with one has only one port */
#define PIT_SIOB 2 /* SIOB */
@@ -103,20 +105,21 @@
* wLength: 0
* Data: None
* The BaudDivisor values are calculated as follows:
- * - BaseClock is either 12000000 or 48000000 depending on the device. FIXME: I wish
- * I knew how to detect old chips to select proper base clock!
+ * - BaseClock is either 12000000 or 48000000 depending on the device.
+ * FIXME: I wish I knew how to detect old chips to select proper base clock!
* - BaudDivisor is a fixed point number encoded in a funny way.
* (--WRONG WAY OF THINKING--)
* BaudDivisor is a fixed point number encoded with following bit weighs:
* (-2)(-1)(13..0). It is a radical with a denominator of 4, so values
* end with 0.0 (00...), 0.25 (10...), 0.5 (01...), and 0.75 (11...).
* (--THE REALITY--)
- * The both-bits-set has quite different meaning from 0.75 - the chip designers
- * have decided it to mean 0.125 instead of 0.75.
+ * The both-bits-set has quite different meaning from 0.75 - the chip
+ * designers have decided it to mean 0.125 instead of 0.75.
* This info looked up in FTDI application note "FT8U232 DEVICES \ Data Rates
* and Flow Control Consideration for USB to RS232".
* - BaudDivisor = (BaseClock / 16) / BaudRate, where the (=) operation should
- * automagically re-encode the resulting value to take fractions into consideration.
+ * automagically re-encode the resulting value to take fractions into
+ * consideration.
* As all values are integers, some bit twiddling is in order:
* BaudDivisor = (BaseClock / 16 / BaudRate) |
* (((BaseClock / 2 / BaudRate) & 4) ? 0x4000 // 0.5
@@ -146,7 +149,7 @@
* not supported by the FT8U232AM).
*/
-typedef enum {
+enum ftdi_chip_type {
SIO = 1,
FT8U232AM = 2,
FT232BM = 3,
@@ -154,37 +157,36 @@ typedef enum {
FT232RL = 5,
FT2232H = 6,
FT4232H = 7
-} ftdi_chip_type_t;
-
-typedef enum {
- ftdi_sio_b300 = 0,
- ftdi_sio_b600 = 1,
- ftdi_sio_b1200 = 2,
- ftdi_sio_b2400 = 3,
- ftdi_sio_b4800 = 4,
- ftdi_sio_b9600 = 5,
- ftdi_sio_b19200 = 6,
- ftdi_sio_b38400 = 7,
- ftdi_sio_b57600 = 8,
- ftdi_sio_b115200 = 9
-} FTDI_SIO_baudrate_t;
+};
+
+enum ftdi_sio_baudrate {
+ ftdi_sio_b300 = 0,
+ ftdi_sio_b600 = 1,
+ ftdi_sio_b1200 = 2,
+ ftdi_sio_b2400 = 3,
+ ftdi_sio_b4800 = 4,
+ ftdi_sio_b9600 = 5,
+ ftdi_sio_b19200 = 6,
+ ftdi_sio_b38400 = 7,
+ ftdi_sio_b57600 = 8,
+ ftdi_sio_b115200 = 9
+};
/*
- * The ftdi_8U232AM_xxMHz_byyy constants have been removed. The encoded divisor values
- * are calculated internally.
+ * The ftdi_8U232AM_xxMHz_byyy constants have been removed. The encoded divisor
+ * values are calculated internally.
*/
-
-#define FTDI_SIO_SET_DATA_REQUEST FTDI_SIO_SET_DATA
-#define FTDI_SIO_SET_DATA_REQUEST_TYPE 0x40
-#define FTDI_SIO_SET_DATA_PARITY_NONE (0x0 << 8)
-#define FTDI_SIO_SET_DATA_PARITY_ODD (0x1 << 8)
-#define FTDI_SIO_SET_DATA_PARITY_EVEN (0x2 << 8)
-#define FTDI_SIO_SET_DATA_PARITY_MARK (0x3 << 8)
-#define FTDI_SIO_SET_DATA_PARITY_SPACE (0x4 << 8)
-#define FTDI_SIO_SET_DATA_STOP_BITS_1 (0x0 << 11)
-#define FTDI_SIO_SET_DATA_STOP_BITS_15 (0x1 << 11)
-#define FTDI_SIO_SET_DATA_STOP_BITS_2 (0x2 << 11)
-#define FTDI_SIO_SET_BREAK (0x1 << 14)
+#define FTDI_SIO_SET_DATA_REQUEST FTDI_SIO_SET_DATA
+#define FTDI_SIO_SET_DATA_REQUEST_TYPE 0x40
+#define FTDI_SIO_SET_DATA_PARITY_NONE (0x0 << 8)
+#define FTDI_SIO_SET_DATA_PARITY_ODD (0x1 << 8)
+#define FTDI_SIO_SET_DATA_PARITY_EVEN (0x2 << 8)
+#define FTDI_SIO_SET_DATA_PARITY_MARK (0x3 << 8)
+#define FTDI_SIO_SET_DATA_PARITY_SPACE (0x4 << 8)
+#define FTDI_SIO_SET_DATA_STOP_BITS_1 (0x0 << 11)
+#define FTDI_SIO_SET_DATA_STOP_BITS_15 (0x1 << 11)
+#define FTDI_SIO_SET_DATA_STOP_BITS_2 (0x2 << 11)
+#define FTDI_SIO_SET_BREAK (0x1 << 14)
/* FTDI_SIO_SET_DATA */
/*
@@ -287,8 +289,8 @@ typedef enum {
*
* A value of zero in the hIndex field disables handshaking
*
- * If Xon/Xoff handshaking is specified, the hValue field should contain the XOFF character
- * and the lValue field contains the XON character.
+ * If Xon/Xoff handshaking is specified, the hValue field should contain the
+ * XOFF character and the lValue field contains the XON character.
*/
/*
@@ -373,7 +375,10 @@ typedef enum {
/* FTDI_SIO_SET_ERROR_CHAR */
-/* Set the parity error replacement character for the specified communications port */
+/*
+ * Set the parity error replacement character for the specified communications
+ * port
+ */
/*
* BmRequestType: 0100 0000b
@@ -496,9 +501,10 @@ typedef enum {
*
* IN Endpoint
*
- * The device reserves the first two bytes of data on this endpoint to contain the current
- * values of the modem and line status registers. In the absence of data, the device
- * generates a message consisting of these two status bytes every 40 ms
+ * The device reserves the first two bytes of data on this endpoint to contain
+ * the current values of the modem and line status registers. In the absence of
+ * data, the device generates a message consisting of these two status bytes
+ * every 40 ms
*
* Byte 0: Modem Status
*
@@ -530,21 +536,21 @@ typedef enum {
#define FTDI_RS0_RI (1 << 6)
#define FTDI_RS0_RLSD (1 << 7)
-#define FTDI_RS_DR 1
-#define FTDI_RS_OE (1<<1)
-#define FTDI_RS_PE (1<<2)
-#define FTDI_RS_FE (1<<3)
-#define FTDI_RS_BI (1<<4)
-#define FTDI_RS_THRE (1<<5)
-#define FTDI_RS_TEMT (1<<6)
-#define FTDI_RS_FIFO (1<<7)
+#define FTDI_RS_DR 1
+#define FTDI_RS_OE (1<<1)
+#define FTDI_RS_PE (1<<2)
+#define FTDI_RS_FE (1<<3)
+#define FTDI_RS_BI (1<<4)
+#define FTDI_RS_THRE (1<<5)
+#define FTDI_RS_TEMT (1<<6)
+#define FTDI_RS_FIFO (1<<7)
/*
* OUT Endpoint
*
- * This device reserves the first bytes of data on this endpoint contain the length
- * and port identifier of the message. For the FTDI USB Serial converter the port
- * identifier is always 1.
+ * This device reserves the first bytes of data on this endpoint contain the
+ * length and port identifier of the message. For the FTDI USB Serial converter
+ * the port identifier is always 1.
*
* Byte 0: Line Status
*
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 75482cbc399..94d86c3febc 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -275,8 +275,8 @@
/*
* Hameg HO820 and HO870 interface (using VID 0x0403)
*/
-#define HAMEG_HO820_PID 0xed74
-#define HAMEG_HO870_PID 0xed71
+#define HAMEG_HO820_PID 0xed74
+#define HAMEG_HO870_PID 0xed71
/*
* MaxStream devices www.maxstream.net
@@ -289,14 +289,14 @@
* and Mike Studer (K6EEP) <k6eep@hamsoftware.org>.
* Ian Abbott <abbotti@mev.co.uk> added a few more from the driver INF file.
*/
-#define FTDI_MHAM_KW_PID 0xEEE8 /* USB-KW interface */
-#define FTDI_MHAM_YS_PID 0xEEE9 /* USB-YS interface */
-#define FTDI_MHAM_Y6_PID 0xEEEA /* USB-Y6 interface */
-#define FTDI_MHAM_Y8_PID 0xEEEB /* USB-Y8 interface */
-#define FTDI_MHAM_IC_PID 0xEEEC /* USB-IC interface */
-#define FTDI_MHAM_DB9_PID 0xEEED /* USB-DB9 interface */
-#define FTDI_MHAM_RS232_PID 0xEEEE /* USB-RS232 interface */
-#define FTDI_MHAM_Y9_PID 0xEEEF /* USB-Y9 interface */
+#define FTDI_MHAM_KW_PID 0xEEE8 /* USB-KW interface */
+#define FTDI_MHAM_YS_PID 0xEEE9 /* USB-YS interface */
+#define FTDI_MHAM_Y6_PID 0xEEEA /* USB-Y6 interface */
+#define FTDI_MHAM_Y8_PID 0xEEEB /* USB-Y8 interface */
+#define FTDI_MHAM_IC_PID 0xEEEC /* USB-IC interface */
+#define FTDI_MHAM_DB9_PID 0xEEED /* USB-DB9 interface */
+#define FTDI_MHAM_RS232_PID 0xEEEE /* USB-RS232 interface */
+#define FTDI_MHAM_Y9_PID 0xEEEF /* USB-Y9 interface */
/* Domintell products http://www.domintell.com */
#define FTDI_DOMINTELL_DGQG_PID 0xEF50 /* Master */
@@ -483,9 +483,9 @@
* Blackfin gnICE JTAG
* http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice
*/
-#define ADI_VID 0x0456
-#define ADI_GNICE_PID 0xF000
-#define ADI_GNICEPLUS_PID 0xF001
+#define ADI_VID 0x0456
+#define ADI_GNICE_PID 0xF000
+#define ADI_GNICEPLUS_PID 0xF001
/*
* RATOC REX-USB60F
@@ -611,13 +611,13 @@
#define SEALEVEL_2802_7_PID 0X2872 /* SeaLINK+8/485 (2802) Port 7 */
#define SEALEVEL_2802_8_PID 0X2882 /* SeaLINK+8/485 (2802) Port 8 */
#define SEALEVEL_2803_1_PID 0X2813 /* SeaLINK+8 (2803) Port 1 */
-#define SEALEVEL_2803_2_PID 0X2823 /* SeaLINK+8 (2803) Port 2 */
-#define SEALEVEL_2803_3_PID 0X2833 /* SeaLINK+8 (2803) Port 3 */
-#define SEALEVEL_2803_4_PID 0X2843 /* SeaLINK+8 (2803) Port 4 */
-#define SEALEVEL_2803_5_PID 0X2853 /* SeaLINK+8 (2803) Port 5 */
-#define SEALEVEL_2803_6_PID 0X2863 /* SeaLINK+8 (2803) Port 6 */
-#define SEALEVEL_2803_7_PID 0X2873 /* SeaLINK+8 (2803) Port 7 */
-#define SEALEVEL_2803_8_PID 0X2883 /* SeaLINK+8 (2803) Port 8 */
+#define SEALEVEL_2803_2_PID 0X2823 /* SeaLINK+8 (2803) Port 2 */
+#define SEALEVEL_2803_3_PID 0X2833 /* SeaLINK+8 (2803) Port 3 */
+#define SEALEVEL_2803_4_PID 0X2843 /* SeaLINK+8 (2803) Port 4 */
+#define SEALEVEL_2803_5_PID 0X2853 /* SeaLINK+8 (2803) Port 5 */
+#define SEALEVEL_2803_6_PID 0X2863 /* SeaLINK+8 (2803) Port 6 */
+#define SEALEVEL_2803_7_PID 0X2873 /* SeaLINK+8 (2803) Port 7 */
+#define SEALEVEL_2803_8_PID 0X2883 /* SeaLINK+8 (2803) Port 8 */
/*
* JETI SPECTROMETER SPECBOS 1201
@@ -1013,7 +1013,7 @@
*/
#define EVOLUTION_VID 0xDEEE /* Vendor ID */
#define EVOLUTION_ER1_PID 0x0300 /* ER1 Control Module */
-#define EVO_8U232AM_PID 0x02FF /* Evolution robotics RCM2 (FT232AM)*/
+#define EVO_8U232AM_PID 0x02FF /* Evolution robotics RCM2 (FT232AM)*/
#define EVO_HYBRID_PID 0x0302 /* Evolution robotics RCM4 PID (FT232BM)*/
#define EVO_RCM4_PID 0x0303 /* Evolution robotics RCM4 PID */
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index f804acb138e..a817ced8283 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -1,6 +1,7 @@
/*
* USB Serial Converter Generic functions
*
+ * Copyright (C) 2010 Johan Hovold (jhovold@gmail.com)
* Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
*
* This program is free software; you can redistribute it and/or
@@ -12,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
+#include <linux/sysrq.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
@@ -117,7 +119,6 @@ void usb_serial_generic_deregister(void)
int usb_serial_generic_open(struct tty_struct *tty, struct usb_serial_port *port)
{
- struct usb_serial *serial = port->serial;
int result = 0;
unsigned long flags;
@@ -130,23 +131,8 @@ int usb_serial_generic_open(struct tty_struct *tty, struct usb_serial_port *port
spin_unlock_irqrestore(&port->lock, flags);
/* if we have a bulk endpoint, start reading from it */
- if (port->bulk_in_size) {
- /* Start reading from the device */
- usb_fill_bulk_urb(port->read_urb, serial->dev,
- usb_rcvbulkpipe(serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- ((serial->type->read_bulk_callback) ?
- serial->type->read_bulk_callback :
- usb_serial_generic_read_bulk_callback),
- port);
- result = usb_submit_urb(port->read_urb, GFP_KERNEL);
- if (result)
- dev_err(&port->dev,
- "%s - failed resubmitting read urb, error %d\n",
- __func__, result);
- }
+ if (port->bulk_in_size)
+ result = usb_serial_generic_submit_read_urb(port, GFP_KERNEL);
return result;
}
@@ -155,13 +141,22 @@ EXPORT_SYMBOL_GPL(usb_serial_generic_open);
static void generic_cleanup(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
+ unsigned long flags;
+ int i;
dbg("%s - port %d", __func__, port->number);
if (serial->dev) {
/* shutdown any bulk transfers that might be going on */
- if (port->bulk_out_size)
+ if (port->bulk_out_size) {
usb_kill_urb(port->write_urb);
+ for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i)
+ usb_kill_urb(port->write_urbs[i]);
+
+ spin_lock_irqsave(&port->lock, flags);
+ kfifo_reset_out(&port->write_fifo);
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
if (port->bulk_in_size)
usb_kill_urb(port->read_urb);
}
@@ -172,146 +167,68 @@ void usb_serial_generic_close(struct usb_serial_port *port)
dbg("%s - port %d", __func__, port->number);
generic_cleanup(port);
}
+EXPORT_SYMBOL_GPL(usb_serial_generic_close);
-static int usb_serial_multi_urb_write(struct tty_struct *tty,
- struct usb_serial_port *port, const unsigned char *buf, int count)
+int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port,
+ void *dest, size_t size)
{
- unsigned long flags;
- struct urb *urb;
- unsigned char *buffer;
- int status;
- int towrite;
- int bwrite = 0;
-
- dbg("%s - port %d", __func__, port->number);
-
- if (count == 0)
- dbg("%s - write request of 0 bytes", __func__);
-
- while (count > 0) {
- towrite = (count > port->bulk_out_size) ?
- port->bulk_out_size : count;
- spin_lock_irqsave(&port->lock, flags);
- if (port->urbs_in_flight >
- port->serial->type->max_in_flight_urbs) {
- spin_unlock_irqrestore(&port->lock, flags);
- dbg("%s - write limit hit", __func__);
- return bwrite;
- }
- port->tx_bytes_flight += towrite;
- port->urbs_in_flight++;
- spin_unlock_irqrestore(&port->lock, flags);
-
- buffer = kmalloc(towrite, GFP_ATOMIC);
- if (!buffer) {
- dev_err(&port->dev,
- "%s ran out of kernel memory for urb ...\n", __func__);
- goto error_no_buffer;
- }
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- dev_err(&port->dev, "%s - no more free urbs\n",
- __func__);
- goto error_no_urb;
- }
-
- /* Copy data */
- memcpy(buffer, buf + bwrite, towrite);
- usb_serial_debug_data(debug, &port->dev, __func__,
- towrite, buffer);
- /* fill the buffer and send it */
- usb_fill_bulk_urb(urb, port->serial->dev,
- usb_sndbulkpipe(port->serial->dev,
- port->bulk_out_endpointAddress),
- buffer, towrite,
- usb_serial_generic_write_bulk_callback, port);
-
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status) {
- dev_err(&port->dev,
- "%s - failed submitting write urb, error %d\n",
- __func__, status);
- goto error;
- }
-
- /* This urb is the responsibility of the host driver now */
- usb_free_urb(urb);
- dbg("%s write: %d", __func__, towrite);
- count -= towrite;
- bwrite += towrite;
- }
- return bwrite;
-
-error:
- usb_free_urb(urb);
-error_no_urb:
- kfree(buffer);
-error_no_buffer:
- spin_lock_irqsave(&port->lock, flags);
- port->urbs_in_flight--;
- port->tx_bytes_flight -= towrite;
- spin_unlock_irqrestore(&port->lock, flags);
- return bwrite;
+ return kfifo_out_locked(&port->write_fifo, dest, size, &port->lock);
}
/**
* usb_serial_generic_write_start - kick off an URB write
* @port: Pointer to the &struct usb_serial_port data
*
- * Returns the number of bytes queued on success. This will be zero if there
- * was nothing to send. Otherwise, it returns a negative errno value
+ * Returns zero on success, or a negative errno value
*/
static int usb_serial_generic_write_start(struct usb_serial_port *port)
{
- struct usb_serial *serial = port->serial;
- unsigned char *data;
- int result;
- int count;
+ struct urb *urb;
+ int count, result;
unsigned long flags;
- bool start_io;
+ int i;
- /* Atomically determine whether we can and need to start a USB
- * operation. */
+ if (test_and_set_bit_lock(USB_SERIAL_WRITE_BUSY, &port->flags))
+ return 0;
+retry:
spin_lock_irqsave(&port->lock, flags);
- if (port->write_urb_busy)
- start_io = false;
- else {
- start_io = (kfifo_len(&port->write_fifo) != 0);
- port->write_urb_busy = start_io;
+ if (!port->write_urbs_free || !kfifo_len(&port->write_fifo)) {
+ clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags);
+ spin_unlock_irqrestore(&port->lock, flags);
+ return 0;
}
+ i = (int)find_first_bit(&port->write_urbs_free,
+ ARRAY_SIZE(port->write_urbs));
spin_unlock_irqrestore(&port->lock, flags);
- if (!start_io)
- return 0;
-
- data = port->write_urb->transfer_buffer;
- count = kfifo_out_locked(&port->write_fifo, data, port->bulk_out_size, &port->lock);
- usb_serial_debug_data(debug, &port->dev, __func__, count, data);
-
- /* set up our urb */
- usb_fill_bulk_urb(port->write_urb, serial->dev,
- usb_sndbulkpipe(serial->dev,
- port->bulk_out_endpointAddress),
- port->write_urb->transfer_buffer, count,
- ((serial->type->write_bulk_callback) ?
- serial->type->write_bulk_callback :
- usb_serial_generic_write_bulk_callback),
- port);
-
- /* send the data out the bulk port */
- result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
+ urb = port->write_urbs[i];
+ count = port->serial->type->prepare_write_buffer(port,
+ urb->transfer_buffer,
+ port->bulk_out_size);
+ urb->transfer_buffer_length = count;
+ usb_serial_debug_data(debug, &port->dev, __func__, count,
+ urb->transfer_buffer);
+ result = usb_submit_urb(urb, GFP_ATOMIC);
if (result) {
- dev_err(&port->dev,
- "%s - failed submitting write urb, error %d\n",
+ dev_err(&port->dev, "%s - error submitting urb: %d\n",
__func__, result);
- /* don't have to grab the lock here, as we will
- retry if != 0 */
- port->write_urb_busy = 0;
- } else
- result = count;
+ clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags);
+ return result;
+ }
+ clear_bit(i, &port->write_urbs_free);
- return result;
+ spin_lock_irqsave(&port->lock, flags);
+ port->tx_bytes += count;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ /* Try sending off another urb, unless in irq context (in which case
+ * there will be no free urb). */
+ if (!in_irq())
+ goto retry;
+
+ clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags);
+
+ return 0;
}
/**
@@ -328,7 +245,6 @@ static int usb_serial_generic_write_start(struct usb_serial_port *port)
int usb_serial_generic_write(struct tty_struct *tty,
struct usb_serial_port *port, const unsigned char *buf, int count)
{
- struct usb_serial *serial = port->serial;
int result;
dbg("%s - port %d", __func__, port->number);
@@ -337,31 +253,23 @@ int usb_serial_generic_write(struct tty_struct *tty,
if (!port->bulk_out_size)
return -ENODEV;
- if (count == 0) {
- dbg("%s - write request of 0 bytes", __func__);
+ if (!count)
return 0;
- }
-
- if (serial->type->max_in_flight_urbs)
- return usb_serial_multi_urb_write(tty, port,
- buf, count);
count = kfifo_in_locked(&port->write_fifo, buf, count, &port->lock);
result = usb_serial_generic_write_start(port);
+ if (result)
+ return result;
- if (result >= 0)
- result = count;
-
- return result;
+ return count;
}
EXPORT_SYMBOL_GPL(usb_serial_generic_write);
int usb_serial_generic_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- struct usb_serial *serial = port->serial;
unsigned long flags;
- int room = 0;
+ int room;
dbg("%s - port %d", __func__, port->number);
@@ -369,14 +277,7 @@ int usb_serial_generic_write_room(struct tty_struct *tty)
return 0;
spin_lock_irqsave(&port->lock, flags);
- if (serial->type->max_in_flight_urbs) {
- if (port->urbs_in_flight < serial->type->max_in_flight_urbs)
- room = port->bulk_out_size *
- (serial->type->max_in_flight_urbs -
- port->urbs_in_flight);
- } else {
- room = kfifo_avail(&port->write_fifo);
- }
+ room = kfifo_avail(&port->write_fifo);
spin_unlock_irqrestore(&port->lock, flags);
dbg("%s - returns %d", __func__, room);
@@ -386,7 +287,6 @@ int usb_serial_generic_write_room(struct tty_struct *tty)
int usb_serial_generic_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- struct usb_serial *serial = port->serial;
unsigned long flags;
int chars;
@@ -396,61 +296,47 @@ int usb_serial_generic_chars_in_buffer(struct tty_struct *tty)
return 0;
spin_lock_irqsave(&port->lock, flags);
- if (serial->type->max_in_flight_urbs)
- chars = port->tx_bytes_flight;
- else
- chars = kfifo_len(&port->write_fifo);
+ chars = kfifo_len(&port->write_fifo) + port->tx_bytes;
spin_unlock_irqrestore(&port->lock, flags);
dbg("%s - returns %d", __func__, chars);
return chars;
}
-
-void usb_serial_generic_resubmit_read_urb(struct usb_serial_port *port,
- gfp_t mem_flags)
+int usb_serial_generic_submit_read_urb(struct usb_serial_port *port,
+ gfp_t mem_flags)
{
- struct urb *urb = port->read_urb;
- struct usb_serial *serial = port->serial;
int result;
- /* Continue reading from device */
- usb_fill_bulk_urb(urb, serial->dev,
- usb_rcvbulkpipe(serial->dev,
- port->bulk_in_endpointAddress),
- urb->transfer_buffer,
- urb->transfer_buffer_length,
- ((serial->type->read_bulk_callback) ?
- serial->type->read_bulk_callback :
- usb_serial_generic_read_bulk_callback), port);
-
- result = usb_submit_urb(urb, mem_flags);
+ result = usb_submit_urb(port->read_urb, mem_flags);
if (result && result != -EPERM) {
- dev_err(&port->dev,
- "%s - failed resubmitting read urb, error %d\n",
+ dev_err(&port->dev, "%s - error submitting urb: %d\n",
__func__, result);
}
+ return result;
}
-EXPORT_SYMBOL_GPL(usb_serial_generic_resubmit_read_urb);
+EXPORT_SYMBOL_GPL(usb_serial_generic_submit_read_urb);
-/* Push data to tty layer and resubmit the bulk read URB */
-static void flush_and_resubmit_read_urb(struct usb_serial_port *port)
+void usb_serial_generic_process_read_urb(struct urb *urb)
{
- struct urb *urb = port->read_urb;
- struct tty_struct *tty = tty_port_tty_get(&port->port);
+ struct usb_serial_port *port = urb->context;
+ struct tty_struct *tty;
char *ch = (char *)urb->transfer_buffer;
int i;
+ if (!urb->actual_length)
+ return;
+
+ tty = tty_port_tty_get(&port->port);
if (!tty)
- goto done;
+ return;
/* The per character mucking around with sysrq path it too slow for
stuff like 3G modems, so shortcircuit it in the 99.9999999% of cases
where the USB serial is not a console anyway */
- if (!port->console || !port->sysrq)
+ if (!port->port.console || !port->sysrq)
tty_insert_flip_string(tty, ch, urb->actual_length);
else {
- /* Push data to tty */
for (i = 0; i < urb->actual_length; i++, ch++) {
if (!usb_serial_handle_sysrq_char(tty, port, *ch))
tty_insert_flip_char(tty, *ch, TTY_NORMAL);
@@ -458,9 +344,8 @@ static void flush_and_resubmit_read_urb(struct usb_serial_port *port)
}
tty_flip_buffer_push(tty);
tty_kref_put(tty);
-done:
- usb_serial_generic_resubmit_read_urb(port, GFP_ATOMIC);
}
+EXPORT_SYMBOL_GPL(usb_serial_generic_process_read_urb);
void usb_serial_generic_read_bulk_callback(struct urb *urb)
{
@@ -479,13 +364,14 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
usb_serial_debug_data(debug, &port->dev, __func__,
urb->actual_length, data);
+ port->serial->type->process_read_urb(urb);
/* Throttle the device if requested by tty */
spin_lock_irqsave(&port->lock, flags);
port->throttled = port->throttle_req;
if (!port->throttled) {
spin_unlock_irqrestore(&port->lock, flags);
- flush_and_resubmit_read_urb(port);
+ usb_serial_generic_submit_read_urb(port, GFP_ATOMIC);
} else
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -496,30 +382,29 @@ void usb_serial_generic_write_bulk_callback(struct urb *urb)
unsigned long flags;
struct usb_serial_port *port = urb->context;
int status = urb->status;
+ int i;
dbg("%s - port %d", __func__, port->number);
- if (port->serial->type->max_in_flight_urbs) {
- kfree(urb->transfer_buffer);
+ for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i)
+ if (port->write_urbs[i] == urb)
+ break;
+
+ spin_lock_irqsave(&port->lock, flags);
+ port->tx_bytes -= urb->transfer_buffer_length;
+ set_bit(i, &port->write_urbs_free);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (status) {
+ dbg("%s - non-zero urb status: %d", __func__, status);
spin_lock_irqsave(&port->lock, flags);
- --port->urbs_in_flight;
- port->tx_bytes_flight -= urb->transfer_buffer_length;
- if (port->urbs_in_flight < 0)
- port->urbs_in_flight = 0;
+ kfifo_reset_out(&port->write_fifo);
spin_unlock_irqrestore(&port->lock, flags);
} else {
- port->write_urb_busy = 0;
-
- if (status)
- kfifo_reset_out(&port->write_fifo);
- else
- usb_serial_generic_write_start(port);
+ usb_serial_generic_write_start(port);
}
- if (status)
- dbg("%s - non-zero urb status: %d", __func__, status);
-
usb_serial_port_softint(port);
}
EXPORT_SYMBOL_GPL(usb_serial_generic_write_bulk_callback);
@@ -537,31 +422,31 @@ void usb_serial_generic_throttle(struct tty_struct *tty)
port->throttle_req = 1;
spin_unlock_irqrestore(&port->lock, flags);
}
+EXPORT_SYMBOL_GPL(usb_serial_generic_throttle);
void usb_serial_generic_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
int was_throttled;
- unsigned long flags;
dbg("%s - port %d", __func__, port->number);
/* Clear the throttle flags */
- spin_lock_irqsave(&port->lock, flags);
+ spin_lock_irq(&port->lock);
was_throttled = port->throttled;
port->throttled = port->throttle_req = 0;
- spin_unlock_irqrestore(&port->lock, flags);
+ spin_unlock_irq(&port->lock);
- if (was_throttled) {
- /* Resume reading from device */
- flush_and_resubmit_read_urb(port);
- }
+ if (was_throttled)
+ usb_serial_generic_submit_read_urb(port, GFP_KERNEL);
}
+EXPORT_SYMBOL_GPL(usb_serial_generic_unthrottle);
+#ifdef CONFIG_MAGIC_SYSRQ
int usb_serial_handle_sysrq_char(struct tty_struct *tty,
struct usb_serial_port *port, unsigned int ch)
{
- if (port->sysrq && port->console) {
+ if (port->sysrq && port->port.console) {
if (ch && time_before(jiffies, port->sysrq)) {
handle_sysrq(ch, tty);
port->sysrq = 0;
@@ -571,6 +456,13 @@ int usb_serial_handle_sysrq_char(struct tty_struct *tty,
}
return 0;
}
+#else
+int usb_serial_handle_sysrq_char(struct tty_struct *tty,
+ struct usb_serial_port *port, unsigned int ch)
+{
+ return 0;
+}
+#endif
EXPORT_SYMBOL_GPL(usb_serial_handle_sysrq_char);
int usb_serial_handle_break(struct usb_serial_port *port)
@@ -600,7 +492,7 @@ int usb_serial_generic_resume(struct usb_serial *serial)
c++;
}
- if (port->write_urb) {
+ if (port->bulk_out_size) {
r = usb_serial_generic_write_start(port);
if (r < 0)
c++;
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 3ef8df0ef88..76e6fb3aab7 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -3020,7 +3020,7 @@ static int edge_startup(struct usb_serial *serial)
/* set up our port private structures */
for (i = 0; i < serial->num_ports; ++i) {
- edge_port = kmalloc(sizeof(struct edgeport_port), GFP_KERNEL);
+ edge_port = kzalloc(sizeof(struct edgeport_port), GFP_KERNEL);
if (edge_port == NULL) {
dev_err(&serial->dev->dev, "%s - Out of memory\n",
__func__);
@@ -3033,7 +3033,6 @@ static int edge_startup(struct usb_serial *serial)
kfree(edge_serial);
return -ENOMEM;
}
- memset(edge_port, 0, sizeof(struct edgeport_port));
spin_lock_init(&edge_port->ep_lock);
edge_port->port = serial->port[i];
usb_set_serial_port_data(serial->port[i], edge_port);
diff --git a/drivers/usb/serial/io_edgeport.h b/drivers/usb/serial/io_edgeport.h
index cb201c1f67f..dced7ec6547 100644
--- a/drivers/usb/serial/io_edgeport.h
+++ b/drivers/usb/serial/io_edgeport.h
@@ -34,15 +34,15 @@
-/* The following table is used to map the USBx port number to
+/* The following table is used to map the USBx port number to
* the device serial number (or physical USB path), */
#define MAX_EDGEPORTS 64
struct comMapper {
char SerialNumber[MAX_SERIALNUMBER_LEN+1]; /* Serial number/usb path */
- int numPorts; /* Number of ports */
- int Original[MAX_RS232_PORTS]; /* Port numbers set by IOCTL */
- int Port[MAX_RS232_PORTS]; /* Actual used port numbers */
+ int numPorts; /* Number of ports */
+ int Original[MAX_RS232_PORTS]; /* Port numbers set by IOCTL */
+ int Port[MAX_RS232_PORTS]; /* Actual used port numbers */
};
@@ -51,7 +51,7 @@ struct comMapper {
/* /proc/edgeport Interface
* This interface uses read/write/lseek interface to talk to the edgeport driver
* the following read functions are supported: */
-#define PROC_GET_MAPPING_TO_PATH 1
+#define PROC_GET_MAPPING_TO_PATH 1
#define PROC_GET_COM_ENTRY 2
#define PROC_GET_EDGE_MANUF_DESCRIPTOR 3
#define PROC_GET_BOOT_DESCRIPTOR 4
@@ -64,7 +64,7 @@ struct comMapper {
/* the following write functions are supported: */
-#define PROC_SET_COM_MAPPING 1
+#define PROC_SET_COM_MAPPING 1
#define PROC_SET_COM_ENTRY 2
@@ -97,8 +97,8 @@ struct edgeport_product_info {
__u8 BoardRev; /* PCB revision level (chg only if s/w visible) */
__u8 BootMajorVersion; /* Boot Firmware version: xx. */
- __u8 BootMinorVersion; /* yy. */
- __le16 BootBuildNumber; /* zzzz (LE format) */
+ __u8 BootMinorVersion; /* yy. */
+ __le16 BootBuildNumber; /* zzzz (LE format) */
__u8 FirmwareMajorVersion; /* Operational Firmware version:xx. */
__u8 FirmwareMinorVersion; /* yy. */
diff --git a/drivers/usb/serial/io_ionsp.h b/drivers/usb/serial/io_ionsp.h
index 092e03d2dfc..5cc591bae54 100644
--- a/drivers/usb/serial/io_ionsp.h
+++ b/drivers/usb/serial/io_ionsp.h
@@ -89,10 +89,10 @@ All 16-bit fields are sent in little-endian (Intel) format.
//
struct int_status_pkt {
- __u16 RxBytesAvail; // Additional bytes available to
- // be read from Bulk IN pipe
- __u16 TxCredits[ MAX_RS232_PORTS ]; // Additional space available in
- // given port's TxBuffer
+ __u16 RxBytesAvail; // Additional bytes available to
+ // be read from Bulk IN pipe
+ __u16 TxCredits[MAX_RS232_PORTS]; // Additional space available in
+ // given port's TxBuffer
};
@@ -115,24 +115,24 @@ struct int_status_pkt {
#define IOSP_CMD_STAT_BIT 0x80 // If set, this is command/status header
#define IS_CMD_STAT_HDR(Byte1) ((Byte1) & IOSP_CMD_STAT_BIT)
-#define IS_DATA_HDR(Byte1) (! IS_CMD_STAT_HDR(Byte1))
+#define IS_DATA_HDR(Byte1) (!IS_CMD_STAT_HDR(Byte1))
#define IOSP_GET_HDR_PORT(Byte1) ((__u8) ((Byte1) & IOSP_PORT_MASK))
-#define IOSP_GET_HDR_DATA_LEN(Byte1, Byte2) ((__u16) ( ((__u16)((Byte1) & 0x78)) << 5) | (Byte2))
+#define IOSP_GET_HDR_DATA_LEN(Byte1, Byte2) ((__u16) (((__u16)((Byte1) & 0x78)) << 5) | (Byte2))
#define IOSP_GET_STATUS_CODE(Byte1) ((__u8) (((Byte1) & 0x78) >> 3))
//
// These macros build the 1st and 2nd bytes for a data header
//
-#define IOSP_BUILD_DATA_HDR1(Port, Len) ((__u8) (((Port) | ((__u8) (((__u16) (Len)) >> 5) & 0x78 ))))
+#define IOSP_BUILD_DATA_HDR1(Port, Len) ((__u8) (((Port) | ((__u8) (((__u16) (Len)) >> 5) & 0x78))))
#define IOSP_BUILD_DATA_HDR2(Port, Len) ((__u8) (Len))
//
// These macros build the 1st and 2nd bytes for a command header
//
-#define IOSP_BUILD_CMD_HDR1(Port, Cmd) ((__u8) ( IOSP_CMD_STAT_BIT | (Port) | ((__u8) ((Cmd) << 3)) ))
+#define IOSP_BUILD_CMD_HDR1(Port, Cmd) ((__u8) (IOSP_CMD_STAT_BIT | (Port) | ((__u8) ((Cmd) << 3))))
//--------------------------------------------------------------
@@ -194,24 +194,25 @@ struct int_status_pkt {
// Define macros to simplify building of IOSP cmds
//
-#define MAKE_CMD_WRITE_REG(ppBuf, pLen, Port, Reg, Val) \
- do { \
- (*(ppBuf))[0] = IOSP_BUILD_CMD_HDR1( (Port), IOSP_WRITE_UART_REG(Reg) ); \
- (*(ppBuf))[1] = (Val); \
- \
- *ppBuf += 2; \
- *pLen += 2; \
- } while (0)
+#define MAKE_CMD_WRITE_REG(ppBuf, pLen, Port, Reg, Val) \
+do { \
+ (*(ppBuf))[0] = IOSP_BUILD_CMD_HDR1((Port), \
+ IOSP_WRITE_UART_REG(Reg)); \
+ (*(ppBuf))[1] = (Val); \
+ \
+ *ppBuf += 2; \
+ *pLen += 2; \
+} while (0)
-#define MAKE_CMD_EXT_CMD(ppBuf, pLen, Port, ExtCmd, Param) \
- do { \
- (*(ppBuf))[0] = IOSP_BUILD_CMD_HDR1( (Port), IOSP_EXT_CMD ); \
- (*(ppBuf))[1] = (ExtCmd); \
- (*(ppBuf))[2] = (Param); \
- \
- *ppBuf += 3; \
- *pLen += 3; \
- } while (0)
+#define MAKE_CMD_EXT_CMD(ppBuf, pLen, Port, ExtCmd, Param) \
+do { \
+ (*(ppBuf))[0] = IOSP_BUILD_CMD_HDR1((Port), IOSP_EXT_CMD); \
+ (*(ppBuf))[1] = (ExtCmd); \
+ (*(ppBuf))[2] = (Param); \
+ \
+ *ppBuf += 3; \
+ *pLen += 3; \
+} while (0)
@@ -310,16 +311,16 @@ struct int_status_pkt {
//
// IOSP_CMD_RX_CHECK_REQ
//
-// This command is used to assist in the implementation of the
-// IOCTL_SERIAL_PURGE Windows IOCTL.
-// This IOSP command tries to place a marker at the end of the RX
-// queue in the Edgeport. If the Edgeport RX queue is full then
-// the Check will be discarded.
-// It is up to the device driver to timeout waiting for the
-// RX_CHECK_RSP. If a RX_CHECK_RSP is received, the driver is
-// sure that all data has been received from the edgeport and
+// This command is used to assist in the implementation of the
+// IOCTL_SERIAL_PURGE Windows IOCTL.
+// This IOSP command tries to place a marker at the end of the RX
+// queue in the Edgeport. If the Edgeport RX queue is full then
+// the Check will be discarded.
+// It is up to the device driver to timeout waiting for the
+// RX_CHECK_RSP. If a RX_CHECK_RSP is received, the driver is
+// sure that all data has been received from the edgeport and
// may now purge any internal RX buffers.
-// Note tat the sequence numbers may be used to detect lost
+// Note tat the sequence numbers may be used to detect lost
// CHECK_REQs.
// Example for Port 0
@@ -341,7 +342,7 @@ struct int_status_pkt {
//
// 1ssssPPP P1P1P1P1 [ P2P2P2P2P2 ]...
//
-// ssss: 00-07 2-byte status. ssss identifies which UART register
+// ssss: 00-07 2-byte status. ssss identifies which UART register
// has changed value, and the new value is in P1.
// Note that the ssss values do not correspond to the
// 16554 register numbers given in 16554.H. Instead,
@@ -383,14 +384,14 @@ struct int_status_pkt {
// returns this in order to report
// changes in modem status lines
// (CTS, DSR, RI, CD)
-//
+//
// 0x02 // Available for future expansion
-// 0x03 //
-// 0x04 //
-// 0x05 //
-// 0x06 //
-// 0x07 //
+// 0x03 //
+// 0x04 //
+// 0x05 //
+// 0x06 //
+// 0x07 //
/****************************************************
@@ -400,7 +401,7 @@ struct int_status_pkt {
#define IOSP_STATUS_LSR_DATA 0x08 // P1 is new value of LSR register (same as STATUS_LSR)
// P2 is errored character read from
-// RxFIFO after LSR reported an error.
+// RxFIFO after LSR reported an error.
#define IOSP_EXT_STATUS 0x09 // P1 is status/response code, param in P2.
@@ -408,7 +409,7 @@ struct int_status_pkt {
// Response Codes (P1 values) for 3-byte status messages
#define IOSP_EXT_STATUS_CHASE_RSP 0 // Reply to CHASE_PORT cmd. P2 is outcome:
-#define IOSP_EXT_STATUS_CHASE_PASS 0 // P2 = 0: All Tx data drained successfully
+#define IOSP_EXT_STATUS_CHASE_PASS 0 // P2 = 0: All Tx data drained successfully
#define IOSP_EXT_STATUS_CHASE_FAIL 1 // P2 = 1: Timed out (stuck due to flow
// control from remote device).
@@ -446,9 +447,9 @@ struct int_status_pkt {
// Macros to parse status messages
//
-#define IOSP_GET_STATUS_LEN(code) ( (code) < 8 ? 2 : ((code) < 0x0A ? 3 : 4) )
+#define IOSP_GET_STATUS_LEN(code) ((code) < 8 ? 2 : ((code) < 0x0A ? 3 : 4))
-#define IOSP_STATUS_IS_2BYTE(code) ( (code) < 0x08 )
-#define IOSP_STATUS_IS_3BYTE(code) ( ((code) >= 0x08) && ((code) <= 0x0B) )
-#define IOSP_STATUS_IS_4BYTE(code) ( ((code) >= 0x0C) && ((code) <= 0x0D) )
+#define IOSP_STATUS_IS_2BYTE(code) ((code) < 0x08)
+#define IOSP_STATUS_IS_3BYTE(code) (((code) >= 0x08) && ((code) <= 0x0B))
+#define IOSP_STATUS_IS_4BYTE(code) (((code) >= 0x0C) && ((code) <= 0x0D))
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index aa876f71f22..0fca2659206 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -36,6 +36,7 @@
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/serial.h>
+#include <linux/kfifo.h>
#include <linux/ioctl.h>
#include <linux/firmware.h>
#include <linux/uaccess.h>
@@ -56,10 +57,6 @@
#define EPROM_PAGE_SIZE 64
-struct edgeport_uart_buf_desc {
- __u32 count; /* Number of bytes currently in buffer */
-};
-
/* different hardware types */
#define HARDWARE_TYPE_930 0
#define HARDWARE_TYPE_TIUMP 1
@@ -87,14 +84,6 @@ struct product_info {
__u8 hardware_type; /* Type of hardware */
} __attribute__((packed));
-/* circular buffer */
-struct edge_buf {
- unsigned int buf_size;
- char *buf_buf;
- char *buf_get;
- char *buf_put;
-};
-
struct edgeport_port {
__u16 uart_base;
__u16 dma_address;
@@ -108,7 +97,6 @@ struct edgeport_port {
int baud_rate;
int close_pending;
int lsr_event;
- struct edgeport_uart_buf_desc tx;
struct async_icount icount;
wait_queue_head_t delta_msr_wait; /* for handling sleeping while
waiting for msr change to
@@ -119,7 +107,7 @@ struct edgeport_port {
spinlock_t ep_lock;
int ep_read_urb_state;
int ep_write_urb_in_use;
- struct edge_buf *ep_out_buf;
+ struct kfifo write_fifo;
};
struct edgeport_serial {
@@ -249,17 +237,6 @@ static void edge_send(struct tty_struct *tty);
static int edge_create_sysfs_attrs(struct usb_serial_port *port);
static int edge_remove_sysfs_attrs(struct usb_serial_port *port);
-/* circular buffer */
-static struct edge_buf *edge_buf_alloc(unsigned int size);
-static void edge_buf_free(struct edge_buf *eb);
-static void edge_buf_clear(struct edge_buf *eb);
-static unsigned int edge_buf_data_avail(struct edge_buf *eb);
-static unsigned int edge_buf_space_avail(struct edge_buf *eb);
-static unsigned int edge_buf_put(struct edge_buf *eb, const char *buf,
- unsigned int count);
-static unsigned int edge_buf_get(struct edge_buf *eb, char *buf,
- unsigned int count);
-
static int ti_vread_sync(struct usb_device *dev, __u8 request,
__u16 value, __u16 index, u8 *data, int size)
@@ -590,7 +567,7 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
add_wait_queue(&tty->write_wait, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
- if (edge_buf_data_avail(port->ep_out_buf) == 0
+ if (kfifo_len(&port->write_fifo) == 0
|| timeout == 0 || signal_pending(current)
|| !usb_get_intfdata(port->port->serial->interface))
/* disconnect */
@@ -602,7 +579,7 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
set_current_state(TASK_RUNNING);
remove_wait_queue(&tty->write_wait, &wait);
if (flush)
- edge_buf_clear(port->ep_out_buf);
+ kfifo_reset_out(&port->write_fifo);
spin_unlock_irqrestore(&port->ep_lock, flags);
tty_kref_put(tty);
@@ -2089,7 +2066,6 @@ static int edge_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *data, int count)
{
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
- unsigned long flags;
dbg("%s - port %d", __func__, port->number);
@@ -2103,10 +2079,8 @@ static int edge_write(struct tty_struct *tty, struct usb_serial_port *port,
if (edge_port->close_pending == 1)
return -ENODEV;
- spin_lock_irqsave(&edge_port->ep_lock, flags);
- count = edge_buf_put(edge_port->ep_out_buf, data, count);
- spin_unlock_irqrestore(&edge_port->ep_lock, flags);
-
+ count = kfifo_in_locked(&edge_port->write_fifo, data, count,
+ &edge_port->ep_lock);
edge_send(tty);
return count;
@@ -2129,7 +2103,7 @@ static void edge_send(struct tty_struct *tty)
return;
}
- count = edge_buf_get(edge_port->ep_out_buf,
+ count = kfifo_out(&edge_port->write_fifo,
port->write_urb->transfer_buffer,
port->bulk_out_size);
@@ -2185,7 +2159,7 @@ static int edge_write_room(struct tty_struct *tty)
return 0;
spin_lock_irqsave(&edge_port->ep_lock, flags);
- room = edge_buf_space_avail(edge_port->ep_out_buf);
+ room = kfifo_avail(&edge_port->write_fifo);
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
dbg("%s - returns %d", __func__, room);
@@ -2207,7 +2181,7 @@ static int edge_chars_in_buffer(struct tty_struct *tty)
return 0;
spin_lock_irqsave(&edge_port->ep_lock, flags);
- chars = edge_buf_data_avail(edge_port->ep_out_buf);
+ chars = kfifo_len(&edge_port->write_fifo);
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
dbg("%s - returns %d", __func__, chars);
@@ -2664,8 +2638,8 @@ static int edge_startup(struct usb_serial *serial)
goto cleanup;
}
spin_lock_init(&edge_port->ep_lock);
- edge_port->ep_out_buf = edge_buf_alloc(EDGE_OUT_BUF_SIZE);
- if (edge_port->ep_out_buf == NULL) {
+ if (kfifo_alloc(&edge_port->write_fifo, EDGE_OUT_BUF_SIZE,
+ GFP_KERNEL)) {
dev_err(&serial->dev->dev, "%s - Out of memory\n",
__func__);
kfree(edge_port);
@@ -2682,7 +2656,7 @@ static int edge_startup(struct usb_serial *serial)
cleanup:
for (--i; i >= 0; --i) {
edge_port = usb_get_serial_port_data(serial->port[i]);
- edge_buf_free(edge_port->ep_out_buf);
+ kfifo_free(&edge_port->write_fifo);
kfree(edge_port);
usb_set_serial_port_data(serial->port[i], NULL);
}
@@ -2713,7 +2687,7 @@ static void edge_release(struct usb_serial *serial)
for (i = 0; i < serial->num_ports; ++i) {
edge_port = usb_get_serial_port_data(serial->port[i]);
- edge_buf_free(edge_port->ep_out_buf);
+ kfifo_free(&edge_port->write_fifo);
kfree(edge_port);
}
kfree(usb_get_serial_data(serial));
@@ -2763,182 +2737,6 @@ static int edge_remove_sysfs_attrs(struct usb_serial_port *port)
}
-/* Circular Buffer */
-
-/*
- * edge_buf_alloc
- *
- * Allocate a circular buffer and all associated memory.
- */
-
-static struct edge_buf *edge_buf_alloc(unsigned int size)
-{
- struct edge_buf *eb;
-
-
- if (size == 0)
- return NULL;
-
- eb = kmalloc(sizeof(struct edge_buf), GFP_KERNEL);
- if (eb == NULL)
- return NULL;
-
- eb->buf_buf = kmalloc(size, GFP_KERNEL);
- if (eb->buf_buf == NULL) {
- kfree(eb);
- return NULL;
- }
-
- eb->buf_size = size;
- eb->buf_get = eb->buf_put = eb->buf_buf;
-
- return eb;
-}
-
-
-/*
- * edge_buf_free
- *
- * Free the buffer and all associated memory.
- */
-
-static void edge_buf_free(struct edge_buf *eb)
-{
- if (eb) {
- kfree(eb->buf_buf);
- kfree(eb);
- }
-}
-
-
-/*
- * edge_buf_clear
- *
- * Clear out all data in the circular buffer.
- */
-
-static void edge_buf_clear(struct edge_buf *eb)
-{
- if (eb != NULL)
- eb->buf_get = eb->buf_put;
- /* equivalent to a get of all data available */
-}
-
-
-/*
- * edge_buf_data_avail
- *
- * Return the number of bytes of data available in the circular
- * buffer.
- */
-
-static unsigned int edge_buf_data_avail(struct edge_buf *eb)
-{
- if (eb == NULL)
- return 0;
- return ((eb->buf_size + eb->buf_put - eb->buf_get) % eb->buf_size);
-}
-
-
-/*
- * edge_buf_space_avail
- *
- * Return the number of bytes of space available in the circular
- * buffer.
- */
-
-static unsigned int edge_buf_space_avail(struct edge_buf *eb)
-{
- if (eb == NULL)
- return 0;
- return ((eb->buf_size + eb->buf_get - eb->buf_put - 1) % eb->buf_size);
-}
-
-
-/*
- * edge_buf_put
- *
- * Copy data data from a user buffer and put it into the circular buffer.
- * Restrict to the amount of space available.
- *
- * Return the number of bytes copied.
- */
-
-static unsigned int edge_buf_put(struct edge_buf *eb, const char *buf,
- unsigned int count)
-{
- unsigned int len;
-
-
- if (eb == NULL)
- return 0;
-
- len = edge_buf_space_avail(eb);
- if (count > len)
- count = len;
-
- if (count == 0)
- return 0;
-
- len = eb->buf_buf + eb->buf_size - eb->buf_put;
- if (count > len) {
- memcpy(eb->buf_put, buf, len);
- memcpy(eb->buf_buf, buf+len, count - len);
- eb->buf_put = eb->buf_buf + count - len;
- } else {
- memcpy(eb->buf_put, buf, count);
- if (count < len)
- eb->buf_put += count;
- else /* count == len */
- eb->buf_put = eb->buf_buf;
- }
-
- return count;
-}
-
-
-/*
- * edge_buf_get
- *
- * Get data from the circular buffer and copy to the given buffer.
- * Restrict to the amount of data available.
- *
- * Return the number of bytes copied.
- */
-
-static unsigned int edge_buf_get(struct edge_buf *eb, char *buf,
- unsigned int count)
-{
- unsigned int len;
-
-
- if (eb == NULL)
- return 0;
-
- len = edge_buf_data_avail(eb);
- if (count > len)
- count = len;
-
- if (count == 0)
- return 0;
-
- len = eb->buf_buf + eb->buf_size - eb->buf_get;
- if (count > len) {
- memcpy(buf, eb->buf_get, len);
- memcpy(buf+len, eb->buf_buf, count - len);
- eb->buf_get = eb->buf_buf + count - len;
- } else {
- memcpy(buf, eb->buf_get, count);
- if (count < len)
- eb->buf_get += count;
- else /* count == len */
- eb->buf_get = eb->buf_buf;
- }
-
- return count;
-}
-
-
static struct usb_serial_driver edgeport_1port_device = {
.driver = {
.owner = THIS_MODULE,
diff --git a/drivers/usb/serial/io_ti.h b/drivers/usb/serial/io_ti.h
index cab84f2256b..1bd67b24f91 100644
--- a/drivers/usb/serial/io_ti.h
+++ b/drivers/usb/serial/io_ti.h
@@ -1,4 +1,4 @@
-/*****************************************************************************
+/*****************************************************************************
*
* Copyright (C) 1997-2002 Inside Out Networks, Inc.
*
@@ -22,10 +22,10 @@
#define DTK_ADDR_SPACE_I2C_TYPE_II 0x82 /* Addr is placed in I2C area */
#define DTK_ADDR_SPACE_I2C_TYPE_III 0x83 /* Addr is placed in I2C area */
-// UART Defines
-#define UMPMEM_BASE_UART1 0xFFA0 /* UMP UART1 base address */
-#define UMPMEM_BASE_UART2 0xFFB0 /* UMP UART2 base address */
-#define UMPMEM_OFFS_UART_LSR 0x05 /* UMP UART LSR register offset */
+/* UART Defines */
+#define UMPMEM_BASE_UART1 0xFFA0 /* UMP UART1 base address */
+#define UMPMEM_BASE_UART2 0xFFB0 /* UMP UART2 base address */
+#define UMPMEM_OFFS_UART_LSR 0x05 /* UMP UART LSR register offset */
/* Bits per character */
#define UMP_UART_CHAR5BITS 0x00
@@ -54,7 +54,7 @@
#define UMP_UART_LSR_RX_MASK 0x10
#define UMP_UART_LSR_TX_MASK 0x20
-#define UMP_UART_LSR_DATA_MASK ( LSR_PAR_ERR | LSR_FRM_ERR | LSR_BREAK )
+#define UMP_UART_LSR_DATA_MASK (LSR_PAR_ERR | LSR_FRM_ERR | LSR_BREAK)
/* Port Settings Constants) */
#define UMP_MASK_UART_FLAGS_RTS_FLOW 0x0001
@@ -79,50 +79,57 @@
#define UMP_PORT_DIR_OUT 0x01
#define UMP_PORT_DIR_IN 0x02
-// Address of Port 0
-#define UMPM_UART1_PORT 0x03
-
-// Commands
-#define UMPC_SET_CONFIG 0x05
-#define UMPC_OPEN_PORT 0x06
-#define UMPC_CLOSE_PORT 0x07
-#define UMPC_START_PORT 0x08
-#define UMPC_STOP_PORT 0x09
-#define UMPC_TEST_PORT 0x0A
-#define UMPC_PURGE_PORT 0x0B
-
-#define UMPC_COMPLETE_READ 0x80 // Force the Firmware to complete the current Read
-#define UMPC_HARDWARE_RESET 0x81 // Force UMP back into BOOT Mode
-#define UMPC_COPY_DNLD_TO_I2C 0x82 // Copy current download image to type 0xf2 record in 16k I2C
- // firmware will change 0xff record to type 2 record when complete
+/* Address of Port 0 */
+#define UMPM_UART1_PORT 0x03
+
+/* Commands */
+#define UMPC_SET_CONFIG 0x05
+#define UMPC_OPEN_PORT 0x06
+#define UMPC_CLOSE_PORT 0x07
+#define UMPC_START_PORT 0x08
+#define UMPC_STOP_PORT 0x09
+#define UMPC_TEST_PORT 0x0A
+#define UMPC_PURGE_PORT 0x0B
+
+/* Force the Firmware to complete the current Read */
+#define UMPC_COMPLETE_READ 0x80
+/* Force UMP back into BOOT Mode */
+#define UMPC_HARDWARE_RESET 0x81
+/*
+ * Copy current download image to type 0xf2 record in 16k I2C
+ * firmware will change 0xff record to type 2 record when complete
+ */
+#define UMPC_COPY_DNLD_TO_I2C 0x82
- // Special function register commands
- // wIndex is register address
- // wValue is MSB/LSB mask/data
-#define UMPC_WRITE_SFR 0x83 // Write SFR Register
+/*
+ * Special function register commands
+ * wIndex is register address
+ * wValue is MSB/LSB mask/data
+ */
+#define UMPC_WRITE_SFR 0x83 /* Write SFR Register */
- // wIndex is register address
-#define UMPC_READ_SFR 0x84 // Read SRF Register
+/* wIndex is register address */
+#define UMPC_READ_SFR 0x84 /* Read SRF Register */
- // Set or Clear DTR (wValue bit 0 Set/Clear) wIndex ModuleID (port)
+/* Set or Clear DTR (wValue bit 0 Set/Clear) wIndex ModuleID (port) */
#define UMPC_SET_CLR_DTR 0x85
- // Set or Clear RTS (wValue bit 0 Set/Clear) wIndex ModuleID (port)
+/* Set or Clear RTS (wValue bit 0 Set/Clear) wIndex ModuleID (port) */
#define UMPC_SET_CLR_RTS 0x86
- // Set or Clear LOOPBACK (wValue bit 0 Set/Clear) wIndex ModuleID (port)
+/* Set or Clear LOOPBACK (wValue bit 0 Set/Clear) wIndex ModuleID (port) */
#define UMPC_SET_CLR_LOOPBACK 0x87
- // Set or Clear BREAK (wValue bit 0 Set/Clear) wIndex ModuleID (port)
+/* Set or Clear BREAK (wValue bit 0 Set/Clear) wIndex ModuleID (port) */
#define UMPC_SET_CLR_BREAK 0x88
- // Read MSR wIndex ModuleID (port)
+/* Read MSR wIndex ModuleID (port) */
#define UMPC_READ_MSR 0x89
- /* Toolkit commands */
- /* Read-write group */
-#define UMPC_MEMORY_READ 0x92
-#define UMPC_MEMORY_WRITE 0x93
+/* Toolkit commands */
+/* Read-write group */
+#define UMPC_MEMORY_READ 0x92
+#define UMPC_MEMORY_WRITE 0x93
/*
* UMP DMA Definitions
@@ -130,8 +137,7 @@
#define UMPD_OEDB1_ADDRESS 0xFF08
#define UMPD_OEDB2_ADDRESS 0xFF10
-struct out_endpoint_desc_block
-{
+struct out_endpoint_desc_block {
__u8 Configuration;
__u8 XBufAddr;
__u8 XByteCount;
@@ -147,8 +153,8 @@ struct out_endpoint_desc_block
* TYPE DEFINITIONS
* Structures for Firmware commands
*/
-struct ump_uart_config /* UART settings */
-{
+/* UART settings */
+struct ump_uart_config {
__u16 wBaudRate; /* Baud rate */
__u16 wFlags; /* Bitmap mask of flags */
__u8 bDataBits; /* 5..8 - data bits per character */
@@ -165,8 +171,8 @@ struct ump_uart_config /* UART settings */
* TYPE DEFINITIONS
* Structures for USB interrupts
*/
-struct ump_interrupt /* Interrupt packet structure */
-{
+/* Interrupt packet structure */
+struct ump_interrupt {
__u8 bICode; /* Interrupt code (interrupt num) */
__u8 bIInfo; /* Interrupt information */
} __attribute__((packed));
diff --git a/drivers/usb/serial/io_usbvend.h b/drivers/usb/serial/io_usbvend.h
index 8e1a491e52a..51f83fbb73b 100644
--- a/drivers/usb/serial/io_usbvend.h
+++ b/drivers/usb/serial/io_usbvend.h
@@ -26,7 +26,7 @@
//
// Definitions of USB product IDs
-//
+//
#define USB_VENDOR_ID_ION 0x1608 // Our VID
#define USB_VENDOR_ID_TI 0x0451 // TI VID
@@ -54,7 +54,7 @@
// Product IDs - assigned to match middle digit of serial number (No longer true)
#define ION_DEVICE_ID_80251_NETCHIP 0x020 // This bit is set in the PID if this edgeport hardware$
- // is based on the 80251+Netchip.
+ // is based on the 80251+Netchip.
#define ION_DEVICE_ID_GENERATION_1 0x00 // Value for 930 based edgeports
#define ION_DEVICE_ID_GENERATION_2 0x01 // Value for 80251+Netchip.
@@ -134,7 +134,7 @@
#define ION_DEVICE_ID_TI_EDGEPORT_416 0x0212 // Edgeport/416
#define ION_DEVICE_ID_TI_EDGEPORT_1 0x0215 // Edgeport/1 RS232
#define ION_DEVICE_ID_TI_EDGEPORT_42 0x0217 // Edgeport/42 4 hub 2 RS232
-#define ION_DEVICE_ID_TI_EDGEPORT_22I 0x021A // Edgeport/22I is an Edgeport/4 with ports 1&2 RS422 and ports 3&4 RS232
+#define ION_DEVICE_ID_TI_EDGEPORT_22I 0x021A // Edgeport/22I is an Edgeport/4 with ports 1&2 RS422 and ports 3&4 RS232
#define ION_DEVICE_ID_TI_EDGEPORT_2C 0x021B // Edgeport/2c RS232
#define ION_DEVICE_ID_TI_EDGEPORT_221C 0x021C // Edgeport/221c is a TI based Edgeport/2 with lucent chip and
// 2 external hub ports - Large I2C
@@ -142,7 +142,7 @@
// 2 external hub ports - Large I2C
#define ION_DEVICE_ID_TI_EDGEPORT_21C 0x021E // Edgeport/21c is a TI based Edgeport/2 with lucent chip
-// Generation 3 devices -- 3410 based edgport/1 (256 byte I2C)
+// Generation 3 devices -- 3410 based edgport/1 (256 byte I2C)
#define ION_DEVICE_ID_TI_TI3410_EDGEPORT_1 0x0240 // Edgeport/1 RS232
#define ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I 0x0241 // Edgeport/1i- RS422 model
@@ -176,7 +176,7 @@
// Default to /P function
#define ION_DEVICE_ID_PLUS_PWR_HP4CD 0x30C // 5052 Plus Power HubPort/4CD+ (for Dell)
-#define ION_DEVICE_ID_PLUS_PWR_HP4C 0x30D // 5052 Plus Power HubPort/4C+
+#define ION_DEVICE_ID_PLUS_PWR_HP4C 0x30D // 5052 Plus Power HubPort/4C+
#define ION_DEVICE_ID_PLUS_PWR_PCI 0x30E // 3410 Plus Power PCI Host Controller 4 port
@@ -217,17 +217,17 @@
#define ION_DEVICE_ID_MT4X56USB 0x1403 // OEM device
-#define GENERATION_ID_FROM_USB_PRODUCT_ID( ProductId ) \
- ( (__u16) ((ProductId >> 8) & (ION_GENERATION_MASK)) )
+#define GENERATION_ID_FROM_USB_PRODUCT_ID(ProductId) \
+ ((__u16) ((ProductId >> 8) & (ION_GENERATION_MASK)))
-#define MAKE_USB_PRODUCT_ID( OemId, DeviceId ) \
- ( (__u16) (((OemId) << 10) || (DeviceId)) )
+#define MAKE_USB_PRODUCT_ID(OemId, DeviceId) \
+ ((__u16) (((OemId) << 10) || (DeviceId)))
-#define DEVICE_ID_FROM_USB_PRODUCT_ID( ProductId ) \
- ( (__u16) ((ProductId) & (EDGEPORT_DEVICE_ID_MASK)) )
+#define DEVICE_ID_FROM_USB_PRODUCT_ID(ProductId) \
+ ((__u16) ((ProductId) & (EDGEPORT_DEVICE_ID_MASK)))
-#define OEM_ID_FROM_USB_PRODUCT_ID( ProductId ) \
- ( (__u16) (((ProductId) >> 10) & 0x3F) )
+#define OEM_ID_FROM_USB_PRODUCT_ID(ProductId) \
+ ((__u16) (((ProductId) >> 10) & 0x3F))
//
// Definitions of parameters for download code. Note that these are
@@ -237,7 +237,7 @@
// TxCredits value below which driver won't bother sending (to prevent too many small writes).
// Send only if above 25%
-#define EDGE_FW_GET_TX_CREDITS_SEND_THRESHOLD(InitialCredit, MaxPacketSize) (max( ((InitialCredit) / 4), (MaxPacketSize) ))
+#define EDGE_FW_GET_TX_CREDITS_SEND_THRESHOLD(InitialCredit, MaxPacketSize) (max(((InitialCredit) / 4), (MaxPacketSize)))
#define EDGE_FW_BULK_MAX_PACKET_SIZE 64 // Max Packet Size for Bulk In Endpoint (EP1)
#define EDGE_FW_BULK_READ_BUFFER_SIZE 1024 // Size to use for Bulk reads
@@ -263,7 +263,7 @@
// wValue = 16-bit address
// wIndex = unused (though we could put segment 00: or FF: here)
// wLength = # bytes to read/write (max 64)
-//
+//
#define USB_REQUEST_ION_RESET_DEVICE 0 // Warm reboot Edgeport, retaining USB address
#define USB_REQUEST_ION_GET_EPIC_DESC 1 // Get Edgeport Compatibility Descriptor
@@ -278,7 +278,7 @@
#define USB_REQUEST_ION_ENABLE_SUSPEND 9 // Enable/Disable suspend feature
// (wValue != 0: Enable; wValue = 0: Disable)
-#define USB_REQUEST_ION_SEND_IOSP 10 // Send an IOSP command to the edgeport over the control pipe
+#define USB_REQUEST_ION_SEND_IOSP 10 // Send an IOSP command to the edgeport over the control pipe
#define USB_REQUEST_ION_RECV_IOSP 11 // Receive an IOSP command from the edgeport over the control pipe
@@ -301,8 +301,7 @@
// this is a "real" Edgeport.
//
-struct edge_compatibility_bits
-{
+struct edge_compatibility_bits {
// This __u32 defines which Vendor-specific commands/functionality
// the device supports on the default EP0 pipe.
@@ -334,24 +333,22 @@ struct edge_compatibility_bits
__u32 TrueEdgeport : 1; // 0001 Set if device is a 'real' Edgeport
// (Used only by driver, NEVER set by an EPiC device)
__u32 GenUnused : 31; // Available for future expansion, must be 0
-
};
#define EDGE_COMPATIBILITY_MASK0 0x0001
#define EDGE_COMPATIBILITY_MASK1 0x3FFF
#define EDGE_COMPATIBILITY_MASK2 0x0001
-struct edge_compatibility_descriptor
-{
+struct edge_compatibility_descriptor {
__u8 Length; // Descriptor Length (per USB spec)
__u8 DescType; // Descriptor Type (per USB spec, =DEVICE type)
__u8 EpicVer; // Version of EPiC spec supported
- // (Currently must be 1)
+ // (Currently must be 1)
__u8 NumPorts; // Number of serial ports supported
__u8 iDownloadFile; // Index of string containing download code filename
- // 0=no download, FF=download compiled into driver.
- __u8 Unused[ 3 ]; // Available for future expansion, must be 0
- // (Currently must be 0).
+ // 0=no download, FF=download compiled into driver.
+ __u8 Unused[3]; // Available for future expansion, must be 0
+ // (Currently must be 0).
__u8 MajorVersion; // Firmware version: xx.
__u8 MinorVersion; // yy.
__le16 BuildNumber; // zzzz (LE format)
@@ -359,9 +356,7 @@ struct edge_compatibility_descriptor
// The following structure contains __u32s, with each bit
// specifying whether the EPiC device supports the given
// command or functionality.
-
struct edge_compatibility_bits Supports;
-
};
// Values for iDownloadFile
@@ -391,8 +386,8 @@ struct edge_compatibility_descriptor
// Define the max block size that may be read or written
// in a read/write RAM/ROM command.
-#define MAX_SIZE_REQ_ION_READ_MEM ( (__u16) 64 )
-#define MAX_SIZE_REQ_ION_WRITE_MEM ( (__u16) 64 )
+#define MAX_SIZE_REQ_ION_READ_MEM ((__u16)64)
+#define MAX_SIZE_REQ_ION_WRITE_MEM ((__u16)64)
//
@@ -545,7 +540,7 @@ struct edge_boot_descriptor {
__u8 MajorVersion; // C6 Firmware version: xx.
__u8 MinorVersion; // C7 yy.
__le16 BuildNumber; // C8 zzzz (LE format)
-
+
__u16 EnumRootDescTable; // CA Root of ROM-based descriptor table
__u8 NumDescTypes; // CC Number of supported descriptor types
@@ -597,41 +592,36 @@ struct edge_boot_descriptor {
#define I2C_DESC_TYPE_ION 0 // Not defined by TI
-struct ti_i2c_desc
-{
+struct ti_i2c_desc {
__u8 Type; // Type of descriptor
__u16 Size; // Size of data only not including header
__u8 CheckSum; // Checksum (8 bit sum of data only)
__u8 Data[0]; // Data starts here
-}__attribute__((packed));
+} __attribute__((packed));
// for 5152 devices only (type 2 record)
// for 3410 the version is stored in the WATCHPORT_FIRMWARE_VERSION descriptor
-struct ti_i2c_firmware_rec
-{
+struct ti_i2c_firmware_rec {
__u8 Ver_Major; // Firmware Major version number
__u8 Ver_Minor; // Firmware Minor version number
__u8 Data[0]; // Download starts here
-}__attribute__((packed));
+} __attribute__((packed));
-struct watchport_firmware_version
-{
+struct watchport_firmware_version {
// Added 2 bytes for version number
__u8 Version_Major; // Download Version (for Watchport)
__u8 Version_Minor;
-}__attribute__((packed));
+} __attribute__((packed));
// Structure of header of download image in fw_down.h
-struct ti_i2c_image_header
-{
+struct ti_i2c_image_header {
__le16 Length;
__u8 CheckSum;
-}__attribute__((packed));
+} __attribute__((packed));
-struct ti_basic_descriptor
-{
+struct ti_basic_descriptor {
__u8 Power; // Self powered
// bit 7: 1 - power switching supported
// 0 - power switching not supported
@@ -663,9 +653,9 @@ struct ti_basic_descriptor
#define TI_I2C_SIZE_MASK 0x1f // 5 bits
#define TI_GET_I2C_SIZE(x) ((((x) & TI_I2C_SIZE_MASK)+1)*256)
-#define TI_MAX_I2C_SIZE ( 16 * 1024 )
+#define TI_MAX_I2C_SIZE (16 * 1024)
-#define TI_MANUF_VERSION_0 0
+#define TI_MANUF_VERSION_0 0
// IonConig2 flags
#define TI_CONFIG2_RS232 0x01
@@ -676,8 +666,7 @@ struct ti_basic_descriptor
#define TI_CONFIG2_WATCHPORT 0x10
-struct edge_ti_manuf_descriptor
-{
+struct edge_ti_manuf_descriptor {
__u8 IonConfig; // Config byte for ION manufacturing use
__u8 IonConfig2; // Expansion
__u8 Version; // Version
@@ -688,7 +677,7 @@ struct edge_ti_manuf_descriptor
__u8 HubConfig2; // Used to configure the Hub
__u8 TotalPorts; // Total Number of Com Ports for the entire device (All UMPs)
__u8 Reserved; // Reserved
-}__attribute__((packed));
+} __attribute__((packed));
#endif // if !defined(_USBVEND_H)
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index 3fea9298eb1..28913fa95fb 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -56,7 +56,6 @@
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
-#include "ipaq.h"
#define KP_RETRIES 100
@@ -64,7 +63,7 @@
* Version Information
*/
-#define DRIVER_VERSION "v0.5"
+#define DRIVER_VERSION "v1.0"
#define DRIVER_AUTHOR "Ganesh Varadarajan <ganesh@veritas.com>"
#define DRIVER_DESC "USB PocketPC PDA driver"
@@ -76,20 +75,8 @@ static int initial_wait;
/* Function prototypes for an ipaq */
static int ipaq_open(struct tty_struct *tty,
struct usb_serial_port *port);
-static void ipaq_close(struct usb_serial_port *port);
static int ipaq_calc_num_ports(struct usb_serial *serial);
static int ipaq_startup(struct usb_serial *serial);
-static int ipaq_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count);
-static int ipaq_write_bulk(struct usb_serial_port *port,
- const unsigned char *buf, int count);
-static void ipaq_write_gather(struct usb_serial_port *port);
-static void ipaq_read_bulk_callback(struct urb *urb);
-static void ipaq_write_bulk_callback(struct urb *urb);
-static int ipaq_write_room(struct tty_struct *tty);
-static int ipaq_chars_in_buffer(struct tty_struct *tty);
-static void ipaq_destroy_lists(struct usb_serial_port *port);
-
static struct usb_device_id ipaq_id_table [] = {
/* The first entry is a placeholder for the insmod-specified device */
@@ -558,7 +545,7 @@ static struct usb_driver ipaq_driver = {
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = ipaq_id_table,
- .no_dynamic_id = 1,
+ .no_dynamic_id = 1,
};
@@ -569,91 +556,24 @@ static struct usb_serial_driver ipaq_device = {
.name = "ipaq",
},
.description = "PocketPC PDA",
- .usb_driver = &ipaq_driver,
+ .usb_driver = &ipaq_driver,
.id_table = ipaq_id_table,
+ .bulk_in_size = 256,
+ .bulk_out_size = 256,
.open = ipaq_open,
- .close = ipaq_close,
.attach = ipaq_startup,
.calc_num_ports = ipaq_calc_num_ports,
- .write = ipaq_write,
- .write_room = ipaq_write_room,
- .chars_in_buffer = ipaq_chars_in_buffer,
- .read_bulk_callback = ipaq_read_bulk_callback,
- .write_bulk_callback = ipaq_write_bulk_callback,
};
-static spinlock_t write_list_lock;
-static int bytes_in;
-static int bytes_out;
-
static int ipaq_open(struct tty_struct *tty,
struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
- struct ipaq_private *priv;
- struct ipaq_packet *pkt;
- int i, result = 0;
+ int result = 0;
int retries = connect_retries;
dbg("%s - port %d", __func__, port->number);
- bytes_in = 0;
- bytes_out = 0;
- priv = kmalloc(sizeof(struct ipaq_private), GFP_KERNEL);
- if (priv == NULL) {
- dev_err(&port->dev, "%s - Out of memory\n", __func__);
- return -ENOMEM;
- }
- usb_set_serial_port_data(port, priv);
- priv->active = 0;
- priv->queue_len = 0;
- priv->free_len = 0;
- INIT_LIST_HEAD(&priv->queue);
- INIT_LIST_HEAD(&priv->freelist);
-
- for (i = 0; i < URBDATA_QUEUE_MAX / PACKET_SIZE; i++) {
- pkt = kmalloc(sizeof(struct ipaq_packet), GFP_KERNEL);
- if (pkt == NULL)
- goto enomem;
-
- pkt->data = kmalloc(PACKET_SIZE, GFP_KERNEL);
- if (pkt->data == NULL) {
- kfree(pkt);
- goto enomem;
- }
- pkt->len = 0;
- pkt->written = 0;
- INIT_LIST_HEAD(&pkt->list);
- list_add(&pkt->list, &priv->freelist);
- priv->free_len += PACKET_SIZE;
- }
-
- /*
- * Lose the small buffers usbserial provides. Make larger ones.
- */
-
- kfree(port->bulk_in_buffer);
- kfree(port->bulk_out_buffer);
- /* make sure the generic serial code knows */
- port->bulk_out_buffer = NULL;
-
- port->bulk_in_buffer = kmalloc(URBDATA_SIZE, GFP_KERNEL);
- if (port->bulk_in_buffer == NULL)
- goto enomem;
-
- port->bulk_out_buffer = kmalloc(URBDATA_SIZE, GFP_KERNEL);
- if (port->bulk_out_buffer == NULL) {
- /* the buffer is useless, free it */
- kfree(port->bulk_in_buffer);
- port->bulk_in_buffer = NULL;
- goto enomem;
- }
- port->read_urb->transfer_buffer = port->bulk_in_buffer;
- port->write_urb->transfer_buffer = port->bulk_out_buffer;
- port->read_urb->transfer_buffer_length = URBDATA_SIZE;
- port->bulk_out_size = port->write_urb->transfer_buffer_length
- = URBDATA_SIZE;
-
msleep(1000*initial_wait);
/*
@@ -663,7 +583,6 @@ static int ipaq_open(struct tty_struct *tty,
* through. Since this has a reasonably high failure rate, we retry
* several times.
*/
-
while (retries--) {
result = usb_control_msg(serial->dev,
usb_sndctrlpipe(serial->dev, 0), 0x22, 0x21,
@@ -673,269 +592,15 @@ static int ipaq_open(struct tty_struct *tty,
msleep(1000);
}
-
if (!retries && result) {
- dev_err(&port->dev, "%s - failed doing control urb, error %d\n", __func__, result);
- goto error;
- }
-
- /* Start reading from the device */
- usb_fill_bulk_urb(port->read_urb, serial->dev,
- usb_rcvbulkpipe(serial->dev, port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- ipaq_read_bulk_callback, port);
-
- result = usb_submit_urb(port->read_urb, GFP_KERNEL);
- if (result) {
- dev_err(&port->dev,
- "%s - failed submitting read urb, error %d\n",
- __func__, result);
- goto error;
- }
-
- return 0;
-
-enomem:
- result = -ENOMEM;
- dev_err(&port->dev, "%s - Out of memory\n", __func__);
-error:
- ipaq_destroy_lists(port);
- kfree(priv);
- return result;
-}
-
-
-static void ipaq_close(struct usb_serial_port *port)
-{
- struct ipaq_private *priv = usb_get_serial_port_data(port);
-
- dbg("%s - port %d", __func__, port->number);
-
- /*
- * shut down bulk read and write
- */
- usb_kill_urb(port->write_urb);
- usb_kill_urb(port->read_urb);
- ipaq_destroy_lists(port);
- kfree(priv);
- usb_set_serial_port_data(port, NULL);
-
- /* Uncomment the following line if you want to see some statistics
- * in your syslog */
- /* info ("Bytes In = %d Bytes Out = %d", bytes_in, bytes_out); */
-}
-
-static void ipaq_read_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- struct tty_struct *tty;
- unsigned char *data = urb->transfer_buffer;
- int result;
- int status = urb->status;
-
- dbg("%s - port %d", __func__, port->number);
-
- if (status) {
- dbg("%s - nonzero read bulk status received: %d",
- __func__, status);
- return;
- }
-
- usb_serial_debug_data(debug, &port->dev, __func__,
- urb->actual_length, data);
-
- tty = tty_port_tty_get(&port->port);
- if (tty && urb->actual_length) {
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
- bytes_in += urb->actual_length;
- }
- tty_kref_put(tty);
-
- /* Continue trying to always read */
- usb_fill_bulk_urb(port->read_urb, port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev, port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- ipaq_read_bulk_callback, port);
- result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
- if (result)
- dev_err(&port->dev,
- "%s - failed resubmitting read urb, error %d\n",
- __func__, result);
- return;
-}
-
-static int ipaq_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count)
-{
- const unsigned char *current_position = buf;
- int bytes_sent = 0;
- int transfer_size;
-
- dbg("%s - port %d", __func__, port->number);
-
- while (count > 0) {
- transfer_size = min(count, PACKET_SIZE);
- if (ipaq_write_bulk(port, current_position, transfer_size))
- break;
- current_position += transfer_size;
- bytes_sent += transfer_size;
- count -= transfer_size;
- bytes_out += transfer_size;
+ dev_err(&port->dev, "%s - failed doing control urb, error %d\n",
+ __func__, result);
+ return result;
}
- return bytes_sent;
+ return usb_serial_generic_open(tty, port);
}
-static int ipaq_write_bulk(struct usb_serial_port *port,
- const unsigned char *buf, int count)
-{
- struct ipaq_private *priv = usb_get_serial_port_data(port);
- struct ipaq_packet *pkt = NULL;
- int result = 0;
- unsigned long flags;
-
- if (priv->free_len <= 0) {
- dbg("%s - we're stuffed", __func__);
- return -EAGAIN;
- }
-
- spin_lock_irqsave(&write_list_lock, flags);
- if (!list_empty(&priv->freelist)) {
- pkt = list_entry(priv->freelist.next, struct ipaq_packet, list);
- list_del(&pkt->list);
- priv->free_len -= PACKET_SIZE;
- }
- spin_unlock_irqrestore(&write_list_lock, flags);
- if (pkt == NULL) {
- dbg("%s - we're stuffed", __func__);
- return -EAGAIN;
- }
-
- memcpy(pkt->data, buf, count);
- usb_serial_debug_data(debug, &port->dev, __func__, count, pkt->data);
-
- pkt->len = count;
- pkt->written = 0;
- spin_lock_irqsave(&write_list_lock, flags);
- list_add_tail(&pkt->list, &priv->queue);
- priv->queue_len += count;
- if (priv->active == 0) {
- priv->active = 1;
- ipaq_write_gather(port);
- spin_unlock_irqrestore(&write_list_lock, flags);
- result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
- if (result)
- dev_err(&port->dev,
- "%s - failed submitting write urb, error %d\n",
- __func__, result);
- } else {
- spin_unlock_irqrestore(&write_list_lock, flags);
- }
- return result;
-}
-
-static void ipaq_write_gather(struct usb_serial_port *port)
-{
- struct ipaq_private *priv = usb_get_serial_port_data(port);
- struct usb_serial *serial = port->serial;
- int count, room;
- struct ipaq_packet *pkt, *tmp;
- struct urb *urb = port->write_urb;
-
- room = URBDATA_SIZE;
- list_for_each_entry_safe(pkt, tmp, &priv->queue, list) {
- count = min(room, (int)(pkt->len - pkt->written));
- memcpy(urb->transfer_buffer + (URBDATA_SIZE - room),
- pkt->data + pkt->written, count);
- room -= count;
- pkt->written += count;
- priv->queue_len -= count;
- if (pkt->written == pkt->len) {
- list_move(&pkt->list, &priv->freelist);
- priv->free_len += PACKET_SIZE;
- }
- if (room == 0)
- break;
- }
-
- count = URBDATA_SIZE - room;
- usb_fill_bulk_urb(port->write_urb, serial->dev,
- usb_sndbulkpipe(serial->dev, port->bulk_out_endpointAddress),
- port->write_urb->transfer_buffer, count,
- ipaq_write_bulk_callback, port);
- return;
-}
-
-static void ipaq_write_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- struct ipaq_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
- int result;
- int status = urb->status;
-
- dbg("%s - port %d", __func__, port->number);
-
- if (status) {
- dbg("%s - nonzero write bulk status received: %d",
- __func__, status);
- return;
- }
-
- spin_lock_irqsave(&write_list_lock, flags);
- if (!list_empty(&priv->queue)) {
- ipaq_write_gather(port);
- spin_unlock_irqrestore(&write_list_lock, flags);
- result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
- if (result)
- dev_err(&port->dev,
- "%s - failed submitting write urb, error %d\n",
- __func__, result);
- } else {
- priv->active = 0;
- spin_unlock_irqrestore(&write_list_lock, flags);
- }
-
- usb_serial_port_softint(port);
-}
-
-static int ipaq_write_room(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct ipaq_private *priv = usb_get_serial_port_data(port);
-
- dbg("%s - freelen %d", __func__, priv->free_len);
- return priv->free_len;
-}
-
-static int ipaq_chars_in_buffer(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct ipaq_private *priv = usb_get_serial_port_data(port);
-
- dbg("%s - queuelen %d", __func__, priv->queue_len);
- return priv->queue_len;
-}
-
-static void ipaq_destroy_lists(struct usb_serial_port *port)
-{
- struct ipaq_private *priv = usb_get_serial_port_data(port);
- struct ipaq_packet *pkt, *tmp;
-
- list_for_each_entry_safe(pkt, tmp, &priv->queue, list) {
- kfree(pkt->data);
- kfree(pkt);
- }
- list_for_each_entry_safe(pkt, tmp, &priv->freelist, list) {
- kfree(pkt->data);
- kfree(pkt);
- }
-}
-
-
static int ipaq_calc_num_ports(struct usb_serial *serial)
{
/*
@@ -994,7 +659,6 @@ static int ipaq_startup(struct usb_serial *serial)
static int __init ipaq_init(void)
{
int retval;
- spin_lock_init(&write_list_lock);
retval = usb_serial_register(&ipaq_device);
if (retval)
goto failed_usb_serial_register;
@@ -1015,7 +679,6 @@ failed_usb_serial_register:
return retval;
}
-
static void __exit ipaq_exit(void)
{
usb_deregister(&ipaq_driver);
diff --git a/drivers/usb/serial/ipaq.h b/drivers/usb/serial/ipaq.h
deleted file mode 100644
index 2b9035918b8..00000000000
--- a/drivers/usb/serial/ipaq.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * USB Compaq iPAQ driver
- *
- * Copyright (C) 2001 - 2002
- * Ganesh Varadarajan <ganesh@veritas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
-
-#ifndef __LINUX_USB_SERIAL_IPAQ_H
-#define __LINUX_USB_SERIAL_IPAQ_H
-
-/*
- * Since we can't queue our bulk write urbs (don't know why - it just
- * doesn't work), we can send down only one write urb at a time. The simplistic
- * approach taken by the generic usbserial driver will work, but it's not good
- * for performance. Therefore, we buffer upto URBDATA_QUEUE_MAX bytes of write
- * requests coming from the line discipline. This is done by chaining them
- * in lists of struct ipaq_packet, each packet holding a maximum of
- * PACKET_SIZE bytes.
- *
- * ipaq_write() can be called from bottom half context; hence we can't
- * allocate memory for packets there. So we initialize a pool of packets at
- * the first open and maintain a freelist.
- *
- * The value of PACKET_SIZE was empirically determined by
- * checking the maximum write sizes sent down by the ppp ldisc.
- * URBDATA_QUEUE_MAX is set to 64K, which is the maximum TCP window size.
- */
-
-struct ipaq_packet {
- char *data;
- size_t len;
- size_t written;
- struct list_head list;
-};
-
-struct ipaq_private {
- int active;
- int queue_len;
- int free_len;
- struct list_head queue;
- struct list_head freelist;
-};
-
-#define URBDATA_SIZE 4096
-#define URBDATA_QUEUE_MAX (64 * 1024)
-#define PACKET_SIZE 256
-
-#endif
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
index e1d07840cee..ca77e88836b 100644
--- a/drivers/usb/serial/ipw.c
+++ b/drivers/usb/serial/ipw.c
@@ -34,7 +34,6 @@
* DCD, DTR, RTS, CTS which are currently faked.
* It's good enough for PPP at this point. It's based off all kinds of
* code found in usb/serial and usb/class
- *
*/
#include <linux/kernel.h>
@@ -52,7 +51,7 @@
/*
* Version Information
*/
-#define DRIVER_VERSION "v0.3"
+#define DRIVER_VERSION "v0.4"
#define DRIVER_AUTHOR "Roelf Diedericks"
#define DRIVER_DESC "IPWireless tty driver"
@@ -65,8 +64,6 @@
/* Message sizes */
#define EVENT_BUFFER_SIZE 0xFF
#define CHAR2INT16(c1, c0) (((u32)((c1) & 0xff) << 8) + (u32)((c0) & 0xff))
-#define NUM_BULK_URBS 24
-#define NUM_CONTROL_URBS 16
/* vendor/product pairs that are known work with this driver*/
#define IPW_VID 0x0bc3
@@ -151,47 +148,6 @@ static struct usb_driver usb_ipw_driver = {
static int debug;
-static void ipw_read_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- unsigned char *data = urb->transfer_buffer;
- struct tty_struct *tty;
- int result;
- int status = urb->status;
-
- dbg("%s - port %d", __func__, port->number);
-
- if (status) {
- dbg("%s - nonzero read bulk status received: %d",
- __func__, status);
- return;
- }
-
- usb_serial_debug_data(debug, &port->dev, __func__,
- urb->actual_length, data);
-
- tty = tty_port_tty_get(&port->port);
- if (tty && urb->actual_length) {
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
- }
- tty_kref_put(tty);
-
- /* Continue trying to always read */
- usb_fill_bulk_urb(port->read_urb, port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- ipw_read_bulk_callback, port);
- result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
- if (result)
- dev_err(&port->dev,
- "%s - failed resubmitting read urb, error %d\n",
- __func__, result);
- return;
-}
-
static int ipw_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct usb_device *dev = port->serial->dev;
@@ -229,15 +185,7 @@ static int ipw_open(struct tty_struct *tty, struct usb_serial_port *port)
/*--2: Start reading from the device */
dbg("%s: setting up bulk read callback", __func__);
- usb_fill_bulk_urb(port->read_urb, dev,
- usb_rcvbulkpipe(dev, port->bulk_in_endpointAddress),
- port->bulk_in_buffer,
- port->bulk_in_size,
- ipw_read_bulk_callback, port);
- result = usb_submit_urb(port->read_urb, GFP_KERNEL);
- if (result < 0)
- dbg("%s - usb_submit_urb(read bulk) failed with status %d",
- __func__, result);
+ usb_serial_generic_open(tty, port);
/*--3: Tell the modem to open the floodgates on the rx bulk channel */
dbg("%s:asking modem for RxRead (RXBULK_ON)", __func__);
@@ -267,35 +215,6 @@ static int ipw_open(struct tty_struct *tty, struct usb_serial_port *port)
dev_err(&port->dev,
"initial flowcontrol failed (error = %d)\n", result);
-
- /*--5: raise the dtr */
- dbg("%s:raising dtr", __func__);
- result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- IPW_SIO_SET_PIN,
- USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT,
- IPW_PIN_SETDTR,
- 0,
- NULL,
- 0,
- 200000);
- if (result < 0)
- dev_err(&port->dev,
- "setting dtr failed (error = %d)\n", result);
-
- /*--6: raise the rts */
- dbg("%s:raising rts", __func__);
- result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- IPW_SIO_SET_PIN,
- USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT,
- IPW_PIN_SETRTS,
- 0,
- NULL,
- 0,
- 200000);
- if (result < 0)
- dev_err(&port->dev,
- "setting dtr failed (error = %d)\n", result);
-
kfree(buf_flow_init);
return 0;
}
@@ -305,8 +224,8 @@ static void ipw_dtr_rts(struct usb_serial_port *port, int on)
struct usb_device *dev = port->serial->dev;
int result;
- /*--1: drop the dtr */
- dbg("%s:dropping dtr", __func__);
+ dbg("%s: on = %d", __func__, on);
+
result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
IPW_SIO_SET_PIN,
USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT,
@@ -316,22 +235,20 @@ static void ipw_dtr_rts(struct usb_serial_port *port, int on)
0,
200000);
if (result < 0)
- dev_err(&port->dev, "dropping dtr failed (error = %d)\n",
+ dev_err(&port->dev, "setting dtr failed (error = %d)\n",
result);
- /*--2: drop the rts */
- dbg("%s:dropping rts", __func__);
result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
IPW_SIO_SET_PIN, USB_TYPE_VENDOR |
- USB_RECIP_INTERFACE | USB_DIR_OUT,
+ USB_RECIP_INTERFACE | USB_DIR_OUT,
on ? IPW_PIN_SETRTS : IPW_PIN_CLRRTS,
0,
NULL,
0,
200000);
if (result < 0)
- dev_err(&port->dev,
- "dropping rts failed (error = %d)\n", result);
+ dev_err(&port->dev, "setting rts failed (error = %d)\n",
+ result);
}
static void ipw_close(struct usb_serial_port *port)
@@ -368,83 +285,7 @@ static void ipw_close(struct usb_serial_port *port)
dev_err(&port->dev,
"Disabling bulk RxRead failed (error = %d)\n", result);
- /* shutdown any in-flight urbs that we know about */
- usb_kill_urb(port->read_urb);
- usb_kill_urb(port->write_urb);
-}
-
-static void ipw_write_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- int status = urb->status;
-
- dbg("%s", __func__);
-
- port->write_urb_busy = 0;
-
- if (status)
- dbg("%s - nonzero write bulk status received: %d",
- __func__, status);
-
- usb_serial_port_softint(port);
-}
-
-static int ipw_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count)
-{
- struct usb_device *dev = port->serial->dev;
- int ret;
-
- dbg("%s: TOP: count=%d, in_interrupt=%ld", __func__,
- count, in_interrupt());
-
- if (count == 0) {
- dbg("%s - write request of 0 bytes", __func__);
- return 0;
- }
-
- spin_lock_bh(&port->lock);
- if (port->write_urb_busy) {
- spin_unlock_bh(&port->lock);
- dbg("%s - already writing", __func__);
- return 0;
- }
- port->write_urb_busy = 1;
- spin_unlock_bh(&port->lock);
-
- count = min(count, port->bulk_out_size);
- memcpy(port->bulk_out_buffer, buf, count);
-
- dbg("%s count now:%d", __func__, count);
-
- usb_fill_bulk_urb(port->write_urb, dev,
- usb_sndbulkpipe(dev, port->bulk_out_endpointAddress),
- port->write_urb->transfer_buffer,
- count,
- ipw_write_bulk_callback,
- port);
-
- ret = usb_submit_urb(port->write_urb, GFP_ATOMIC);
- if (ret != 0) {
- port->write_urb_busy = 0;
- dbg("%s - usb_submit_urb(write bulk) failed with error = %d",
- __func__, ret);
- return ret;
- }
-
- dbg("%s returning %d", __func__, count);
- return count;
-}
-
-static int ipw_probe(struct usb_serial_port *port)
-{
- return 0;
-}
-
-static int ipw_disconnect(struct usb_serial_port *port)
-{
- usb_set_serial_port_data(port, NULL);
- return 0;
+ usb_serial_generic_close(port);
}
static struct usb_serial_driver ipw_device = {
@@ -453,17 +294,12 @@ static struct usb_serial_driver ipw_device = {
.name = "ipw",
},
.description = "IPWireless converter",
- .usb_driver = &usb_ipw_driver,
+ .usb_driver = &usb_ipw_driver,
.id_table = usb_ipw_ids,
.num_ports = 1,
.open = ipw_open,
.close = ipw_close,
.dtr_rts = ipw_dtr_rts,
- .port_probe = ipw_probe,
- .port_remove = ipw_disconnect,
- .write = ipw_write,
- .write_bulk_callback = ipw_write_bulk_callback,
- .read_bulk_callback = ipw_read_bulk_callback,
};
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 4a0f5197423..ccbce4066d0 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2001-2002 Greg Kroah-Hartman (greg@kroah.com)
* Copyright (C) 2002 Gary Brubaker (xavyer@ix.netcom.com)
+ * Copyright (C) 2010 Johan Hovold (jhovold@gmail.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -72,8 +73,8 @@
/*
* Version Information
*/
-#define DRIVER_VERSION "v0.4"
-#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>"
+#define DRIVER_VERSION "v0.5"
+#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Johan Hovold <jhovold@gmail.com>"
#define DRIVER_DESC "USB IR Dongle driver"
static int debug;
@@ -87,11 +88,9 @@ static int xbof = -1;
static int ir_startup (struct usb_serial *serial);
static int ir_open(struct tty_struct *tty, struct usb_serial_port *port);
-static void ir_close(struct usb_serial_port *port);
-static int ir_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count);
-static void ir_write_bulk_callback (struct urb *urb);
-static void ir_read_bulk_callback (struct urb *urb);
+static int ir_prepare_write_buffer(struct usb_serial_port *port,
+ void *dest, size_t size);
+static void ir_process_read_urb(struct urb *urb);
static void ir_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios);
@@ -130,10 +129,8 @@ static struct usb_serial_driver ir_device = {
.set_termios = ir_set_termios,
.attach = ir_startup,
.open = ir_open,
- .close = ir_close,
- .write = ir_write,
- .write_bulk_callback = ir_write_bulk_callback,
- .read_bulk_callback = ir_read_bulk_callback,
+ .prepare_write_buffer = ir_prepare_write_buffer,
+ .process_read_urb = ir_process_read_urb,
};
static inline void irda_usb_dump_class_desc(struct usb_irda_cs_descriptor *desc)
@@ -198,7 +195,6 @@ error:
return NULL;
}
-
static u8 ir_xbof_change(u8 xbof)
{
u8 result;
@@ -237,7 +233,6 @@ static u8 ir_xbof_change(u8 xbof)
return(result);
}
-
static int ir_startup(struct usb_serial *serial)
{
struct usb_irda_cs_descriptor *irda_desc;
@@ -297,83 +292,22 @@ static int ir_startup(struct usb_serial *serial)
static int ir_open(struct tty_struct *tty, struct usb_serial_port *port)
{
- char *buffer;
- int result = 0;
+ int i;
dbg("%s - port %d", __func__, port->number);
- if (buffer_size) {
- /* override the default buffer sizes */
- buffer = kmalloc(buffer_size, GFP_KERNEL);
- if (!buffer) {
- dev_err(&port->dev, "%s - out of memory.\n", __func__);
- return -ENOMEM;
- }
- kfree(port->read_urb->transfer_buffer);
- port->read_urb->transfer_buffer = buffer;
- port->read_urb->transfer_buffer_length = buffer_size;
-
- buffer = kmalloc(buffer_size, GFP_KERNEL);
- if (!buffer) {
- dev_err(&port->dev, "%s - out of memory.\n", __func__);
- return -ENOMEM;
- }
- kfree(port->write_urb->transfer_buffer);
- port->write_urb->transfer_buffer = buffer;
- port->write_urb->transfer_buffer_length = buffer_size;
- port->bulk_out_size = buffer_size;
- }
+ for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i)
+ port->write_urbs[i]->transfer_flags = URB_ZERO_PACKET;
/* Start reading from the device */
- usb_fill_bulk_urb(
- port->read_urb,
- port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- ir_read_bulk_callback,
- port);
- result = usb_submit_urb(port->read_urb, GFP_KERNEL);
- if (result)
- dev_err(&port->dev,
- "%s - failed submitting read urb, error %d\n",
- __func__, result);
-
- return result;
+ return usb_serial_generic_open(tty, port);
}
-static void ir_close(struct usb_serial_port *port)
+static int ir_prepare_write_buffer(struct usb_serial_port *port,
+ void *dest, size_t size)
{
- dbg("%s - port %d", __func__, port->number);
-
- /* shutdown our bulk read */
- usb_kill_urb(port->read_urb);
-}
-
-static int ir_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count)
-{
- unsigned char *transfer_buffer;
- int result;
- int transfer_size;
-
- dbg("%s - port = %d, count = %d", __func__, port->number, count);
-
- if (count == 0)
- return 0;
-
- spin_lock_bh(&port->lock);
- if (port->write_urb_busy) {
- spin_unlock_bh(&port->lock);
- dbg("%s - already writing", __func__);
- return 0;
- }
- port->write_urb_busy = 1;
- spin_unlock_bh(&port->lock);
-
- transfer_buffer = port->write_urb->transfer_buffer;
- transfer_size = min(count, port->bulk_out_size - 1);
+ unsigned char *buf = dest;
+ int count;
/*
* The first byte of the packet we send to the device contains an
@@ -382,119 +316,57 @@ static int ir_write(struct tty_struct *tty, struct usb_serial_port *port,
*
* See section 5.4.2.2 of the USB IrDA spec.
*/
- *transfer_buffer = ir_xbof | ir_baud;
- ++transfer_buffer;
-
- memcpy(transfer_buffer, buf, transfer_size);
+ *buf = ir_xbof | ir_baud;
- usb_fill_bulk_urb(
- port->write_urb,
- port->serial->dev,
- usb_sndbulkpipe(port->serial->dev,
- port->bulk_out_endpointAddress),
- port->write_urb->transfer_buffer,
- transfer_size + 1,
- ir_write_bulk_callback,
- port);
-
- port->write_urb->transfer_flags = URB_ZERO_PACKET;
-
- result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
- if (result) {
- port->write_urb_busy = 0;
- dev_err(&port->dev,
- "%s - failed submitting write urb, error %d\n",
- __func__, result);
- } else
- result = transfer_size;
-
- return result;
+ count = kfifo_out_locked(&port->write_fifo, buf + 1, size - 1,
+ &port->lock);
+ return count + 1;
}
-static void ir_write_bulk_callback(struct urb *urb)
+static void ir_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
- int status = urb->status;
-
- dbg("%s - port %d", __func__, port->number);
+ unsigned char *data = urb->transfer_buffer;
+ struct tty_struct *tty;
- port->write_urb_busy = 0;
- if (status) {
- dbg("%s - nonzero write bulk status received: %d",
- __func__, status);
+ if (!urb->actual_length)
return;
- }
+ /*
+ * The first byte of the packet we get from the device
+ * contains a busy indicator and baud rate change.
+ * See section 5.4.1.2 of the USB IrDA spec.
+ */
+ if (*data & 0x0f)
+ ir_baud = *data & 0x0f;
- usb_serial_debug_data(
- debug,
- &port->dev,
- __func__,
- urb->actual_length,
- urb->transfer_buffer);
+ if (urb->actual_length == 1)
+ return;
- usb_serial_port_softint(port);
+ tty = tty_port_tty_get(&port->port);
+ if (!tty)
+ return;
+ tty_insert_flip_string(tty, data + 1, urb->actual_length - 1);
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
}
-static void ir_read_bulk_callback(struct urb *urb)
+static void ir_set_termios_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
- struct tty_struct *tty;
- unsigned char *data = urb->transfer_buffer;
- int result;
int status = urb->status;
dbg("%s - port %d", __func__, port->number);
- switch (status) {
- case 0: /* Successful */
- /*
- * The first byte of the packet we get from the device
- * contains a busy indicator and baud rate change.
- * See section 5.4.1.2 of the USB IrDA spec.
- */
- if ((*data & 0x0f) > 0)
- ir_baud = *data & 0x0f;
- usb_serial_debug_data(debug, &port->dev, __func__,
- urb->actual_length, data);
- tty = tty_port_tty_get(&port->port);
- tty_insert_flip_string(tty, data+1, urb->actual_length - 1);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
-
- /*
- * No break here.
- * We want to resubmit the urb so we can read
- * again.
- */
-
- case -EPROTO: /* taking inspiration from pl2303.c */
- /* Continue trying to always read */
- usb_fill_bulk_urb(
- port->read_urb,
- port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- ir_read_bulk_callback,
- port);
-
- result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
- if (result)
- dev_err(&port->dev, "%s - failed resubmitting read urb, error %d\n",
- __func__, result);
- break ;
- default:
- dbg("%s - nonzero read bulk status received: %d",
- __func__, status);
- break ;
- }
- return;
+ kfree(urb->transfer_buffer);
+
+ if (status)
+ dbg("%s - non-zero urb status: %d", __func__, status);
}
static void ir_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
+ struct urb *urb;
unsigned char *transfer_buffer;
int result;
speed_t baud;
@@ -548,41 +420,63 @@ static void ir_set_termios(struct tty_struct *tty,
else
ir_xbof = ir_xbof_change(xbof) ;
- /* FIXME need to check to see if our write urb is busy right
- * now, or use a urb pool.
- *
+ /* Only speed changes are supported */
+ tty_termios_copy_hw(tty->termios, old_termios);
+ tty_encode_baud_rate(tty, baud, baud);
+
+ /*
* send the baud change out on an "empty" data packet
*/
- transfer_buffer = port->write_urb->transfer_buffer;
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ dev_err(&port->dev, "%s - no more urbs\n", __func__);
+ return;
+ }
+ transfer_buffer = kmalloc(1, GFP_KERNEL);
+ if (!transfer_buffer) {
+ dev_err(&port->dev, "%s - out of memory\n", __func__);
+ goto err_buf;
+ }
+
*transfer_buffer = ir_xbof | ir_baud;
usb_fill_bulk_urb(
- port->write_urb,
+ urb,
port->serial->dev,
usb_sndbulkpipe(port->serial->dev,
port->bulk_out_endpointAddress),
- port->write_urb->transfer_buffer,
+ transfer_buffer,
1,
- ir_write_bulk_callback,
+ ir_set_termios_callback,
port);
- port->write_urb->transfer_flags = URB_ZERO_PACKET;
+ urb->transfer_flags = URB_ZERO_PACKET;
- result = usb_submit_urb(port->write_urb, GFP_KERNEL);
- if (result)
- dev_err(&port->dev,
- "%s - failed submitting write urb, error %d\n",
- __func__, result);
+ result = usb_submit_urb(urb, GFP_KERNEL);
+ if (result) {
+ dev_err(&port->dev, "%s - failed to submit urb: %d\n",
+ __func__, result);
+ goto err_subm;
+ }
- /* Only speed changes are supported */
- tty_termios_copy_hw(tty->termios, old_termios);
- tty_encode_baud_rate(tty, baud, baud);
+ usb_free_urb(urb);
+
+ return;
+err_subm:
+ kfree(transfer_buffer);
+err_buf:
+ usb_free_urb(urb);
}
static int __init ir_init(void)
{
int retval;
+ if (buffer_size) {
+ ir_device.bulk_in_size = buffer_size;
+ ir_device.bulk_out_size = buffer_size;
+ }
+
retval = usb_serial_register(&ir_device);
if (retval)
goto failed_usb_serial_register;
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 43f13cf2f01..74551cb2e8e 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -1044,34 +1044,6 @@ static int iuu_open(struct tty_struct *tty, struct usb_serial_port *port)
if (buf == NULL)
return -ENOMEM;
- /* fixup the endpoint buffer size */
- kfree(port->bulk_out_buffer);
- port->bulk_out_buffer = kmalloc(512, GFP_KERNEL);
- port->bulk_out_size = 512;
- kfree(port->bulk_in_buffer);
- port->bulk_in_buffer = kmalloc(512, GFP_KERNEL);
- port->bulk_in_size = 512;
-
- if (!port->bulk_out_buffer || !port->bulk_in_buffer) {
- kfree(port->bulk_out_buffer);
- kfree(port->bulk_in_buffer);
- kfree(buf);
- return -ENOMEM;
- }
-
- usb_fill_bulk_urb(port->write_urb, port->serial->dev,
- usb_sndbulkpipe(port->serial->dev,
- port->bulk_out_endpointAddress),
- port->bulk_out_buffer, 512,
- NULL, NULL);
-
-
- usb_fill_bulk_urb(port->read_urb, port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- port->bulk_in_buffer, 512,
- NULL, NULL);
-
priv->poll = 0;
/* initialize writebuf */
@@ -1277,6 +1249,8 @@ static struct usb_serial_driver iuu_device = {
},
.id_table = id_table,
.num_ports = 1,
+ .bulk_in_size = 512,
+ .bulk_out_size = 512,
.port_probe = iuu_create_sysfs_attrs,
.port_remove = iuu_remove_sysfs_attrs,
.open = iuu_open,
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 8eef91ba4b1..cdbe8bf7f67 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -1,6 +1,7 @@
/*
* KLSI KL5KUSB105 chip RS232 converter driver
*
+ * Copyright (C) 2010 Johan Hovold <jhovold@gmail.com>
* Copyright (C) 2001 Utz-Uwe Haus <haus@uuhaus.de>
*
* This program is free software; you can redistribute it and/or modify
@@ -34,17 +35,6 @@
* implement handshaking or decide that we do not support it
*/
-/* History:
- * 0.3a - implemented pools of write URBs
- * 0.3 - alpha version for public testing
- * 0.2 - TIOCMGET works, so autopilot(1) can be used!
- * 0.1 - can be used to do pilot-xfer -p /dev/ttyUSB0 -l
- *
- * The driver skeleton is mainly based on mct_u232.c and various other
- * pieces of code shamelessly copied from the drivers/usb/serial/ directory.
- */
-
-
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -64,8 +54,8 @@ static int debug;
/*
* Version Information
*/
-#define DRIVER_VERSION "v0.3a"
-#define DRIVER_AUTHOR "Utz-Uwe Haus <haus@uuhaus.de>"
+#define DRIVER_VERSION "v0.4"
+#define DRIVER_AUTHOR "Utz-Uwe Haus <haus@uuhaus.de>, Johan Hovold <jhovold@gmail.com>"
#define DRIVER_DESC "KLSI KL5KUSB105 chipset USB->Serial Converter driver"
@@ -73,23 +63,17 @@ static int debug;
* Function prototypes
*/
static int klsi_105_startup(struct usb_serial *serial);
-static void klsi_105_disconnect(struct usb_serial *serial);
static void klsi_105_release(struct usb_serial *serial);
static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port);
static void klsi_105_close(struct usb_serial_port *port);
-static int klsi_105_write(struct tty_struct *tty,
- struct usb_serial_port *port, const unsigned char *buf, int count);
-static void klsi_105_write_bulk_callback(struct urb *urb);
-static int klsi_105_chars_in_buffer(struct tty_struct *tty);
-static int klsi_105_write_room(struct tty_struct *tty);
-static void klsi_105_read_bulk_callback(struct urb *urb);
static void klsi_105_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old);
-static void klsi_105_throttle(struct tty_struct *tty);
-static void klsi_105_unthrottle(struct tty_struct *tty);
static int klsi_105_tiocmget(struct tty_struct *tty, struct file *file);
static int klsi_105_tiocmset(struct tty_struct *tty, struct file *file,
unsigned int set, unsigned int clear);
+static void klsi_105_process_read_urb(struct urb *urb);
+static int klsi_105_prepare_write_buffer(struct usb_serial_port *port,
+ void *dest, size_t size);
/*
* All of the device info needed for the KLSI converters.
@@ -107,7 +91,7 @@ static struct usb_driver kl5kusb105d_driver = {
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = id_table,
- .no_dynamic_id = 1,
+ .no_dynamic_id = 1,
};
static struct usb_serial_driver kl5kusb105d_device = {
@@ -115,26 +99,23 @@ static struct usb_serial_driver kl5kusb105d_device = {
.owner = THIS_MODULE,
.name = "kl5kusb105d",
},
- .description = "KL5KUSB105D / PalmConnect",
- .usb_driver = &kl5kusb105d_driver,
- .id_table = id_table,
- .num_ports = 1,
- .open = klsi_105_open,
- .close = klsi_105_close,
- .write = klsi_105_write,
- .write_bulk_callback = klsi_105_write_bulk_callback,
- .chars_in_buffer = klsi_105_chars_in_buffer,
- .write_room = klsi_105_write_room,
- .read_bulk_callback = klsi_105_read_bulk_callback,
- .set_termios = klsi_105_set_termios,
- /*.break_ctl = klsi_105_break_ctl,*/
- .tiocmget = klsi_105_tiocmget,
- .tiocmset = klsi_105_tiocmset,
- .attach = klsi_105_startup,
- .disconnect = klsi_105_disconnect,
- .release = klsi_105_release,
- .throttle = klsi_105_throttle,
- .unthrottle = klsi_105_unthrottle,
+ .description = "KL5KUSB105D / PalmConnect",
+ .usb_driver = &kl5kusb105d_driver,
+ .id_table = id_table,
+ .num_ports = 1,
+ .bulk_out_size = 64,
+ .open = klsi_105_open,
+ .close = klsi_105_close,
+ .set_termios = klsi_105_set_termios,
+ /*.break_ctl = klsi_105_break_ctl,*/
+ .tiocmget = klsi_105_tiocmget,
+ .tiocmset = klsi_105_tiocmset,
+ .attach = klsi_105_startup,
+ .release = klsi_105_release,
+ .throttle = usb_serial_generic_throttle,
+ .unthrottle = usb_serial_generic_unthrottle,
+ .process_read_urb = klsi_105_process_read_urb,
+ .prepare_write_buffer = klsi_105_prepare_write_buffer,
};
struct klsi_105_port_settings {
@@ -145,18 +126,11 @@ struct klsi_105_port_settings {
__u8 unknown2;
} __attribute__ ((packed));
-/* we implement a pool of NUM_URBS urbs per usb_serial */
-#define NUM_URBS 1
-#define URB_TRANSFER_BUFFER_SIZE 64
struct klsi_105_private {
struct klsi_105_port_settings cfg;
struct ktermios termios;
unsigned long line_state; /* modem line settings */
- /* write pool */
- struct urb *write_urb_pool[NUM_URBS];
spinlock_t lock;
- unsigned long bytes_in;
- unsigned long bytes_out;
};
@@ -189,7 +163,7 @@ static int klsi_105_chg_port_settings(struct usb_serial_port *port,
settings->pktlen, settings->baudrate, settings->databits,
settings->unknown1, settings->unknown2);
return rc;
-} /* klsi_105_chg_port_settings */
+}
/* translate a 16-bit status value from the device to linux's TIO bits */
static unsigned long klsi_105_status2linestate(const __u16 status)
@@ -202,6 +176,7 @@ static unsigned long klsi_105_status2linestate(const __u16 status)
return res;
}
+
/*
* Read line control via vendor command and return result through
* *line_state_p
@@ -258,7 +233,7 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
static int klsi_105_startup(struct usb_serial *serial)
{
struct klsi_105_private *priv;
- int i, j;
+ int i;
/* check if we support the product id (see keyspan.c)
* FIXME
@@ -282,29 +257,9 @@ static int klsi_105_startup(struct usb_serial *serial)
priv->line_state = 0;
- priv->bytes_in = 0;
- priv->bytes_out = 0;
usb_set_serial_port_data(serial->port[i], priv);
spin_lock_init(&priv->lock);
- for (j = 0; j < NUM_URBS; j++) {
- struct urb *urb = usb_alloc_urb(0, GFP_KERNEL);
-
- priv->write_urb_pool[j] = urb;
- if (urb == NULL) {
- dev_err(&serial->dev->dev, "No more urbs???\n");
- goto err_cleanup;
- }
-
- urb->transfer_buffer =
- kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL);
- if (!urb->transfer_buffer) {
- dev_err(&serial->dev->dev,
- "%s - out of memory for urb buffers.\n",
- __func__);
- goto err_cleanup;
- }
- }
/* priv->termios is left uninitalized until port opening */
init_waitqueue_head(&serial->port[i]->write_wait);
@@ -315,44 +270,11 @@ static int klsi_105_startup(struct usb_serial *serial)
err_cleanup:
for (; i >= 0; i--) {
priv = usb_get_serial_port_data(serial->port[i]);
- for (j = 0; j < NUM_URBS; j++) {
- if (priv->write_urb_pool[j]) {
- kfree(priv->write_urb_pool[j]->transfer_buffer);
- usb_free_urb(priv->write_urb_pool[j]);
- }
- }
+ kfree(priv);
usb_set_serial_port_data(serial->port[i], NULL);
}
return -ENOMEM;
-} /* klsi_105_startup */
-
-
-static void klsi_105_disconnect(struct usb_serial *serial)
-{
- int i;
-
- dbg("%s", __func__);
-
- /* stop reads and writes on all ports */
- for (i = 0; i < serial->num_ports; ++i) {
- struct klsi_105_private *priv =
- usb_get_serial_port_data(serial->port[i]);
-
- if (priv) {
- /* kill our write urb pool */
- int j;
- struct urb **write_urbs = priv->write_urb_pool;
-
- for (j = 0; j < NUM_URBS; j++) {
- if (write_urbs[j]) {
- usb_kill_urb(write_urbs[j]);
- usb_free_urb(write_urbs[j]);
- }
- }
- }
- }
-} /* klsi_105_disconnect */
-
+}
static void klsi_105_release(struct usb_serial *serial)
{
@@ -360,13 +282,9 @@ static void klsi_105_release(struct usb_serial *serial)
dbg("%s", __func__);
- for (i = 0; i < serial->num_ports; ++i) {
- struct klsi_105_private *priv =
- usb_get_serial_port_data(serial->port[i]);
-
- kfree(priv);
- }
-} /* klsi_105_release */
+ for (i = 0; i < serial->num_ports; ++i)
+ kfree(usb_get_serial_port_data(serial->port[i]));
+}
static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
{
@@ -416,18 +334,8 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
spin_unlock_irqrestore(&priv->lock, flags);
/* READ_ON and urb submission */
- usb_fill_bulk_urb(port->read_urb, port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- klsi_105_read_bulk_callback,
- port);
-
- rc = usb_submit_urb(port->read_urb, GFP_KERNEL);
+ rc = usb_serial_generic_open(tty, port);
if (rc) {
- dev_err(&port->dev, "%s - failed submitting read urb, "
- "error %d\n", __func__, rc);
retval = rc;
goto exit;
}
@@ -460,12 +368,10 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
exit:
kfree(cfg);
return retval;
-} /* klsi_105_open */
-
+}
static void klsi_105_close(struct usb_serial_port *port)
{
- struct klsi_105_private *priv = usb_get_serial_port_data(port);
int rc;
dbg("%s port %d", __func__, port->number);
@@ -488,239 +394,62 @@ static void klsi_105_close(struct usb_serial_port *port)
mutex_unlock(&port->serial->disc_mutex);
/* shutdown our bulk reads and writes */
- usb_kill_urb(port->write_urb);
- usb_kill_urb(port->read_urb);
- /* unlink our write pool */
- /* FIXME */
+ usb_serial_generic_close(port);
+
/* wgg - do I need this? I think so. */
usb_kill_urb(port->interrupt_in_urb);
- dev_info(&port->serial->dev->dev,
- "port stats: %ld bytes in, %ld bytes out\n",
- priv->bytes_in, priv->bytes_out);
-} /* klsi_105_close */
-
+}
/* We need to write a complete 64-byte data block and encode the
* number actually sent in the first double-byte, LSB-order. That
* leaves at most 62 bytes of payload.
*/
-#define KLSI_105_DATA_OFFSET 2 /* in the bulk urb data block */
-
-
-static int klsi_105_write(struct tty_struct *tty,
- struct usb_serial_port *port, const unsigned char *buf, int count)
+#define KLSI_HDR_LEN 2
+static int klsi_105_prepare_write_buffer(struct usb_serial_port *port,
+ void *dest, size_t size)
{
- struct klsi_105_private *priv = usb_get_serial_port_data(port);
- int result, size;
- int bytes_sent = 0;
-
- dbg("%s - port %d", __func__, port->number);
-
- while (count > 0) {
- /* try to find a free urb (write 0 bytes if none) */
- struct urb *urb = NULL;
- unsigned long flags;
- int i;
- /* since the pool is per-port we might not need
- the spin lock !? */
- spin_lock_irqsave(&priv->lock, flags);
- for (i = 0; i < NUM_URBS; i++) {
- if (priv->write_urb_pool[i]->status != -EINPROGRESS) {
- urb = priv->write_urb_pool[i];
- dbg("%s - using pool URB %d", __func__, i);
- break;
- }
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (urb == NULL) {
- dbg("%s - no more free urbs", __func__);
- goto exit;
- }
-
- if (urb->transfer_buffer == NULL) {
- urb->transfer_buffer =
- kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_ATOMIC);
- if (urb->transfer_buffer == NULL) {
- dev_err(&port->dev,
- "%s - no more kernel memory...\n",
- __func__);
- goto exit;
- }
- }
-
- size = min(count, port->bulk_out_size - KLSI_105_DATA_OFFSET);
- size = min(size, URB_TRANSFER_BUFFER_SIZE -
- KLSI_105_DATA_OFFSET);
-
- memcpy(urb->transfer_buffer + KLSI_105_DATA_OFFSET, buf, size);
-
- /* write payload size into transfer buffer */
- ((__u8 *)urb->transfer_buffer)[0] = (__u8) (size & 0xFF);
- ((__u8 *)urb->transfer_buffer)[1] = (__u8) ((size & 0xFF00)>>8);
-
- /* set up our urb */
- usb_fill_bulk_urb(urb, port->serial->dev,
- usb_sndbulkpipe(port->serial->dev,
- port->bulk_out_endpointAddress),
- urb->transfer_buffer,
- URB_TRANSFER_BUFFER_SIZE,
- klsi_105_write_bulk_callback,
- port);
-
- /* send the data out the bulk port */
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result) {
- dev_err(&port->dev,
- "%s - failed submitting write urb, error %d\n",
- __func__, result);
- goto exit;
- }
- buf += size;
- bytes_sent += size;
- count -= size;
- }
-exit:
- /* lockless, but it's for debug info only... */
- priv->bytes_out += bytes_sent;
-
- return bytes_sent; /* that's how much we wrote */
-} /* klsi_105_write */
-
-static void klsi_105_write_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- int status = urb->status;
-
- dbg("%s - port %d", __func__, port->number);
-
- if (status) {
- dbg("%s - nonzero write bulk status received: %d", __func__,
- status);
- return;
- }
+ unsigned char *buf = dest;
+ int count;
- usb_serial_port_softint(port);
-} /* klsi_105_write_bulk_completion_callback */
+ count = kfifo_out_locked(&port->write_fifo, buf + KLSI_HDR_LEN, size,
+ &port->lock);
+ put_unaligned_le16(count, buf);
-
-/* return number of characters currently in the writing process */
-static int klsi_105_chars_in_buffer(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- int chars = 0;
- int i;
- unsigned long flags;
- struct klsi_105_private *priv = usb_get_serial_port_data(port);
-
- spin_lock_irqsave(&priv->lock, flags);
-
- for (i = 0; i < NUM_URBS; ++i) {
- if (priv->write_urb_pool[i]->status == -EINPROGRESS)
- chars += URB_TRANSFER_BUFFER_SIZE;
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- dbg("%s - returns %d", __func__, chars);
- return chars;
+ return count + KLSI_HDR_LEN;
}
-static int klsi_105_write_room(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- unsigned long flags;
- int i;
- int room = 0;
- struct klsi_105_private *priv = usb_get_serial_port_data(port);
-
- spin_lock_irqsave(&priv->lock, flags);
- for (i = 0; i < NUM_URBS; ++i) {
- if (priv->write_urb_pool[i]->status != -EINPROGRESS)
- room += URB_TRANSFER_BUFFER_SIZE;
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- dbg("%s - returns %d", __func__, room);
- return room;
-}
-
-
-
-static void klsi_105_read_bulk_callback(struct urb *urb)
+/* The data received is preceded by a length double-byte in LSB-first order.
+ */
+static void klsi_105_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
- struct klsi_105_private *priv = usb_get_serial_port_data(port);
- struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
- int rc;
- int status = urb->status;
+ struct tty_struct *tty;
+ unsigned len;
- dbg("%s - port %d", __func__, port->number);
+ /* empty urbs seem to happen, we ignore them */
+ if (!urb->actual_length)
+ return;
- /* The urb might have been killed. */
- if (status) {
- dbg("%s - nonzero read bulk status received: %d", __func__,
- status);
+ if (urb->actual_length <= KLSI_HDR_LEN) {
+ dbg("%s - malformed packet", __func__);
return;
}
- /* The data received is again preceded by a length double-byte in LSB-
- * first order (see klsi_105_write() )
- */
- if (urb->actual_length == 0) {
- /* empty urbs seem to happen, we ignore them */
- /* dbg("%s - emtpy URB", __func__); */
- ;
- } else if (urb->actual_length <= 2) {
- dbg("%s - size %d URB not understood", __func__,
- urb->actual_length);
- usb_serial_debug_data(debug, &port->dev, __func__,
- urb->actual_length, data);
- } else {
- int bytes_sent = ((__u8 *) data)[0] +
- ((unsigned int) ((__u8 *) data)[1] << 8);
- tty = tty_port_tty_get(&port->port);
- /* we should immediately resubmit the URB, before attempting
- * to pass the data on to the tty layer. But that needs locking
- * against re-entry an then mixed-up data because of
- * intermixed tty_flip_buffer_push()s
- * FIXME
- */
- usb_serial_debug_data(debug, &port->dev, __func__,
- urb->actual_length, data);
-
- if (bytes_sent + 2 > urb->actual_length) {
- dbg("%s - trying to read more data than available"
- " (%d vs. %d)", __func__,
- bytes_sent+2, urb->actual_length);
- /* cap at implied limit */
- bytes_sent = urb->actual_length - 2;
- }
-
- tty_insert_flip_string(tty, data + 2, bytes_sent);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty = tty_port_tty_get(&port->port);
+ if (!tty)
+ return;
- /* again lockless, but debug info only */
- priv->bytes_in += bytes_sent;
+ len = get_unaligned_le16(data);
+ if (len > urb->actual_length - KLSI_HDR_LEN) {
+ dbg("%s - packet length mismatch", __func__);
+ len = urb->actual_length - KLSI_HDR_LEN;
}
- /* Continue trying to always read */
- usb_fill_bulk_urb(port->read_urb, port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- klsi_105_read_bulk_callback,
- port);
- rc = usb_submit_urb(port->read_urb, GFP_ATOMIC);
- if (rc)
- dev_err(&port->dev,
- "%s - failed resubmitting read urb, error %d\n",
- __func__, rc);
-} /* klsi_105_read_bulk_callback */
+ tty_insert_flip_string(tty, data + KLSI_HDR_LEN, len);
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
+}
static void klsi_105_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
@@ -887,8 +616,7 @@ static void klsi_105_set_termios(struct tty_struct *tty,
klsi_105_chg_port_settings(port, cfg);
err:
kfree(cfg);
-} /* klsi_105_set_termios */
-
+}
#if 0
static void mct_u232_break_ctl(struct tty_struct *tty, int break_state)
@@ -906,7 +634,7 @@ static void mct_u232_break_ctl(struct tty_struct *tty, int break_state)
lcr |= MCT_U232_SET_BREAK;
mct_u232_set_line_ctrl(serial, lcr);
-} /* mct_u232_break_ctl */
+}
#endif
static int klsi_105_tiocmget(struct tty_struct *tty, struct file *file)
@@ -962,29 +690,6 @@ static int klsi_105_tiocmset(struct tty_struct *tty, struct file *file,
return retval;
}
-static void klsi_105_throttle(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- dbg("%s - port %d", __func__, port->number);
- usb_kill_urb(port->read_urb);
-}
-
-static void klsi_105_unthrottle(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- int result;
-
- dbg("%s - port %d", __func__, port->number);
-
- port->read_urb->dev = port->serial->dev;
- result = usb_submit_urb(port->read_urb, GFP_KERNEL);
- if (result)
- dev_err(&port->dev,
- "%s - failed submitting read urb, error %d\n",
- __func__, result);
-}
-
-
static int __init klsi_105_init(void)
{
@@ -1005,7 +710,6 @@ failed_usb_serial_register:
return retval;
}
-
static void __exit klsi_105_exit(void)
{
usb_deregister(&kl5kusb105d_driver);
@@ -1023,5 +727,3 @@ MODULE_LICENSE("GPL");
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "enable extensive debugging messages");
-
-/* vim: set sts=8 ts=8 sw=8: */
diff --git a/drivers/usb/serial/kl5kusb105.h b/drivers/usb/serial/kl5kusb105.h
index 1231d9e7839..22a90badc86 100644
--- a/drivers/usb/serial/kl5kusb105.h
+++ b/drivers/usb/serial/kl5kusb105.h
@@ -17,16 +17,16 @@
/* baud rates */
enum {
- kl5kusb105a_sio_b115200 = 0,
- kl5kusb105a_sio_b57600 = 1,
- kl5kusb105a_sio_b38400 = 2,
- kl5kusb105a_sio_b19200 = 4,
- kl5kusb105a_sio_b14400 = 5,
- kl5kusb105a_sio_b9600 = 6,
- kl5kusb105a_sio_b4800 = 8, /* unchecked */
- kl5kusb105a_sio_b2400 = 9, /* unchecked */
- kl5kusb105a_sio_b1200 = 0xa, /* unchecked */
- kl5kusb105a_sio_b600 = 0xb /* unchecked */
+ kl5kusb105a_sio_b115200 = 0,
+ kl5kusb105a_sio_b57600 = 1,
+ kl5kusb105a_sio_b38400 = 2,
+ kl5kusb105a_sio_b19200 = 4,
+ kl5kusb105a_sio_b14400 = 5,
+ kl5kusb105a_sio_b9600 = 6,
+ kl5kusb105a_sio_b4800 = 8, /* unchecked */
+ kl5kusb105a_sio_b2400 = 9, /* unchecked */
+ kl5kusb105a_sio_b1200 = 0xa, /* unchecked */
+ kl5kusb105a_sio_b600 = 0xb /* unchecked */
};
/* data bits */
@@ -53,17 +53,16 @@ enum {
#define KL5KUSB105A_CTS ((1<<5) | (1<<4))
#define KL5KUSB105A_WANTS_TO_SEND 0x30
-//#define KL5KUSB105A_DTR /* Data Terminal Ready */
-//#define KL5KUSB105A_CTS /* Clear To Send */
-//#define KL5KUSB105A_CD /* Carrier Detect */
-//#define KL5KUSB105A_DSR /* Data Set Ready */
-//#define KL5KUSB105A_RxD /* Receive pin */
-
-//#define KL5KUSB105A_LE
-//#define KL5KUSB105A_RTS
-//#define KL5KUSB105A_ST
-//#define KL5KUSB105A_SR
-//#define KL5KUSB105A_RI /* Ring Indicator */
-
-/* vim: set ts=8 sts=8: */
-
+#if 0
+#define KL5KUSB105A_DTR /* Data Terminal Ready */
+#define KL5KUSB105A_CTS /* Clear To Send */
+#define KL5KUSB105A_CD /* Carrier Detect */
+#define KL5KUSB105A_DSR /* Data Set Ready */
+#define KL5KUSB105A_RxD /* Receive pin */
+
+#define KL5KUSB105A_LE
+#define KL5KUSB105A_RTS
+#define KL5KUSB105A_ST
+#define KL5KUSB105A_SR
+#define KL5KUSB105A_RI /* Ring Indicator */
+#endif
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index c113a2a0e10..bd5bd8589e0 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -345,7 +345,8 @@ static void kobil_close(struct usb_serial_port *port)
/* FIXME: Add rts/dtr methods */
if (port->write_urb) {
- usb_kill_urb(port->write_urb);
+ usb_poison_urb(port->write_urb);
+ kfree(port->write_urb->transfer_buffer);
usb_free_urb(port->write_urb);
port->write_urb = NULL;
}
diff --git a/drivers/usb/serial/kobil_sct.h b/drivers/usb/serial/kobil_sct.h
index a51fbb5ae45..be207f7156f 100644
--- a/drivers/usb/serial/kobil_sct.h
+++ b/drivers/usb/serial/kobil_sct.h
@@ -23,38 +23,55 @@
#define SUSBCR_SSL_SETDTR 0x0004
#define SUSBCR_SSL_CLRDTR 0x0010
-#define SUSBCR_SSL_PURGE_TXABORT 0x0100 // Kill the pending/current writes to the comm port.
-#define SUSBCR_SSL_PURGE_RXABORT 0x0200 // Kill the pending/current reads to the comm port.
-#define SUSBCR_SSL_PURGE_TXCLEAR 0x0400 // Kill the transmit queue if there.
-#define SUSBCR_SSL_PURGE_RXCLEAR 0x0800 // Kill the typeahead buffer if there.
+/* Kill the pending/current writes to the comm port. */
+#define SUSBCR_SSL_PURGE_TXABORT 0x0100
+/* Kill the pending/current reads to the comm port. */
+#define SUSBCR_SSL_PURGE_RXABORT 0x0200
+/* Kill the transmit queue if there. */
+#define SUSBCR_SSL_PURGE_TXCLEAR 0x0400
+/* Kill the typeahead buffer if there. */
+#define SUSBCR_SSL_PURGE_RXCLEAR 0x0800
#define SUSBCRequest_GetStatusLineState 4
-#define SUSBCR_GSL_RXCHAR 0x0001 // Any Character received
-#define SUSBCR_GSL_TXEMPTY 0x0004 // Transmitt Queue Empty
-#define SUSBCR_GSL_CTS 0x0008 // CTS changed state
-#define SUSBCR_GSL_DSR 0x0010 // DSR changed state
-#define SUSBCR_GSL_RLSD 0x0020 // RLSD changed state
-#define SUSBCR_GSL_BREAK 0x0040 // BREAK received
-#define SUSBCR_GSL_ERR 0x0080 // Line status error occurred
-#define SUSBCR_GSL_RING 0x0100 // Ring signal detected
+/* Any Character received */
+#define SUSBCR_GSL_RXCHAR 0x0001
+/* Transmitt Queue Empty */
+#define SUSBCR_GSL_TXEMPTY 0x0004
+/* CTS changed state */
+#define SUSBCR_GSL_CTS 0x0008
+/* DSR changed state */
+#define SUSBCR_GSL_DSR 0x0010
+/* RLSD changed state */
+#define SUSBCR_GSL_RLSD 0x0020
+/* BREAK received */
+#define SUSBCR_GSL_BREAK 0x0040
+/* Line status error occurred */
+#define SUSBCR_GSL_ERR 0x0080
+/* Ring signal detected */
+#define SUSBCR_GSL_RING 0x0100
#define SUSBCRequest_Misc 8
-#define SUSBCR_MSC_ResetReader 0x0001 // use a predefined reset sequence
-#define SUSBCR_MSC_ResetAllQueues 0x0002 // use a predefined sequence to reset the internal queues
+/* use a predefined reset sequence */
+#define SUSBCR_MSC_ResetReader 0x0001
+/* use a predefined sequence to reset the internal queues */
+#define SUSBCR_MSC_ResetAllQueues 0x0002
#define SUSBCRequest_GetMisc 0x10
-#define SUSBCR_MSC_GetFWVersion 0x0001 /* get the firmware version from device,
- coded like this 0xHHLLBBPP
- with HH = Firmware Version High Byte
- LL = Firmware Version Low Byte
- BB = Build Number
- PP = Further Attributes
- */
-
-#define SUSBCR_MSC_GetHWVersion 0x0002 /* get the hardware version from device
- coded like this 0xHHLLPPRR
- with HH = Software Version High Byte
- LL = Software Version Low Byte
- PP = Further Attributes
- RR = Reserved for the hardware ID
- */
+
+/*
+ * get the firmware version from device, coded like this 0xHHLLBBPP with
+ * HH = Firmware Version High Byte
+ * LL = Firmware Version Low Byte
+ * BB = Build Number
+ * PP = Further Attributes
+ */
+#define SUSBCR_MSC_GetFWVersion 0x0001
+
+/*
+ * get the hardware version from device coded like this 0xHHLLPPRR with
+ * HH = Software Version High Byte
+ * LL = Software Version Low Byte
+ * PP = Further Attributes
+ * RR = Reserved for the hardware ID
+ */
+#define SUSBCR_MSC_GetHWVersion 0x0002
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 2849f8c3201..7aa01b95b1d 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -549,12 +549,9 @@ static void mct_u232_close(struct usb_serial_port *port)
{
dbg("%s port %d", __func__, port->number);
- if (port->serial->dev) {
- /* shutdown our urbs */
- usb_kill_urb(port->write_urb);
- usb_kill_urb(port->read_urb);
+ usb_serial_generic_close(port);
+ if (port->serial->dev)
usb_kill_urb(port->interrupt_in_urb);
- }
} /* mct_u232_close */
diff --git a/drivers/usb/serial/mct_u232.h b/drivers/usb/serial/mct_u232.h
index 7417d5ce1e2..3a3f5e6b8f9 100644
--- a/drivers/usb/serial/mct_u232.h
+++ b/drivers/usb/serial/mct_u232.h
@@ -42,36 +42,44 @@
#define MCT_U232_SET_REQUEST_TYPE 0x40
#define MCT_U232_GET_REQUEST_TYPE 0xc0
-#define MCT_U232_GET_MODEM_STAT_REQUEST 2 /* Get Modem Status Register (MSR) */
-#define MCT_U232_GET_MODEM_STAT_SIZE 1
+/* Get Modem Status Register (MSR) */
+#define MCT_U232_GET_MODEM_STAT_REQUEST 2
+#define MCT_U232_GET_MODEM_STAT_SIZE 1
-#define MCT_U232_GET_LINE_CTRL_REQUEST 6 /* Get Line Control Register (LCR) */
-#define MCT_U232_GET_LINE_CTRL_SIZE 1 /* ... not used by this driver */
+/* Get Line Control Register (LCR) */
+/* ... not used by this driver */
+#define MCT_U232_GET_LINE_CTRL_REQUEST 6
+#define MCT_U232_GET_LINE_CTRL_SIZE 1
-#define MCT_U232_SET_BAUD_RATE_REQUEST 5 /* Set Baud Rate Divisor */
-#define MCT_U232_SET_BAUD_RATE_SIZE 4
+/* Set Baud Rate Divisor */
+#define MCT_U232_SET_BAUD_RATE_REQUEST 5
+#define MCT_U232_SET_BAUD_RATE_SIZE 4
-#define MCT_U232_SET_LINE_CTRL_REQUEST 7 /* Set Line Control Register (LCR) */
-#define MCT_U232_SET_LINE_CTRL_SIZE 1
+/* Set Line Control Register (LCR) */
+#define MCT_U232_SET_LINE_CTRL_REQUEST 7
+#define MCT_U232_SET_LINE_CTRL_SIZE 1
-#define MCT_U232_SET_MODEM_CTRL_REQUEST 10 /* Set Modem Control Register (MCR) */
-#define MCT_U232_SET_MODEM_CTRL_SIZE 1
+/* Set Modem Control Register (MCR) */
+#define MCT_U232_SET_MODEM_CTRL_REQUEST 10
+#define MCT_U232_SET_MODEM_CTRL_SIZE 1
-/* This USB device request code is not well understood. It is transmitted by
- the MCT-supplied Windows driver whenever the baud rate changes.
-*/
-#define MCT_U232_SET_UNKNOWN1_REQUEST 11 /* Unknown functionality */
-#define MCT_U232_SET_UNKNOWN1_SIZE 1
+/*
+ * This USB device request code is not well understood. It is transmitted by
+ * the MCT-supplied Windows driver whenever the baud rate changes.
+ */
+#define MCT_U232_SET_UNKNOWN1_REQUEST 11 /* Unknown functionality */
+#define MCT_U232_SET_UNKNOWN1_SIZE 1
-/* This USB device request code appears to control whether CTS is required
- during transmission.
-
- Sending a zero byte allows data transmission to a device which is not
- asserting CTS. Sending a '1' byte will cause transmission to be deferred
- until the device asserts CTS.
-*/
-#define MCT_U232_SET_CTS_REQUEST 12
-#define MCT_U232_SET_CTS_SIZE 1
+/*
+ * This USB device request code appears to control whether CTS is required
+ * during transmission.
+ *
+ * Sending a zero byte allows data transmission to a device which is not
+ * asserting CTS. Sending a '1' byte will cause transmission to be deferred
+ * until the device asserts CTS.
+ */
+#define MCT_U232_SET_CTS_REQUEST 12
+#define MCT_U232_SET_CTS_SIZE 1
#define MCT_U232_MAX_SIZE 4 /* of MCT_XXX_SIZE */
@@ -81,7 +89,8 @@
* and "Intel solution". They are the regular MCT and "Sitecom" for us.
* This is pointless to document in the header, see the code for the bits.
*/
-static int mct_u232_calculate_baud_rate(struct usb_serial *serial, speed_t value, speed_t *result);
+static int mct_u232_calculate_baud_rate(struct usb_serial *serial,
+ speed_t value, speed_t *result);
/*
* Line Control Register (LCR)
@@ -125,16 +134,16 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial, speed_t value
/*
* Line Status Register (LSR)
*/
-#define MCT_U232_LSR_INDEX 1 /* data[index] */
-#define MCT_U232_LSR_ERR 0x80 /* OE | PE | FE | BI */
-#define MCT_U232_LSR_TEMT 0x40 /* transmit register empty */
-#define MCT_U232_LSR_THRE 0x20 /* transmit holding register empty */
-#define MCT_U232_LSR_BI 0x10 /* break indicator */
-#define MCT_U232_LSR_FE 0x08 /* framing error */
-#define MCT_U232_LSR_OE 0x02 /* overrun error */
-#define MCT_U232_LSR_PE 0x04 /* parity error */
-#define MCT_U232_LSR_OE 0x02 /* overrun error */
-#define MCT_U232_LSR_DR 0x01 /* receive data ready */
+#define MCT_U232_LSR_INDEX 1 /* data[index] */
+#define MCT_U232_LSR_ERR 0x80 /* OE | PE | FE | BI */
+#define MCT_U232_LSR_TEMT 0x40 /* transmit register empty */
+#define MCT_U232_LSR_THRE 0x20 /* transmit holding register empty */
+#define MCT_U232_LSR_BI 0x10 /* break indicator */
+#define MCT_U232_LSR_FE 0x08 /* framing error */
+#define MCT_U232_LSR_OE 0x02 /* overrun error */
+#define MCT_U232_LSR_PE 0x04 /* parity error */
+#define MCT_U232_LSR_OE 0x02 /* overrun error */
+#define MCT_U232_LSR_DR 0x01 /* receive data ready */
/* -----------------------------------------------------------------------------
@@ -143,10 +152,10 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial, speed_t value
*
* The technical details of the device have been acquired be using "SniffUSB"
* and the vendor-supplied device driver (version 2.3A) under Windows98. To
- * identify the USB vendor-specific requests and to assign them to terminal
+ * identify the USB vendor-specific requests and to assign them to terminal
* settings (flow control, baud rate, etc.) the program "SerialSettings" from
* William G. Greathouse has been proven to be very useful. I also used the
- * Win98 "HyperTerminal" and "usb-robot" on Linux for testing. The results and
+ * Win98 "HyperTerminal" and "usb-robot" on Linux for testing. The results and
* observations are summarized below:
*
* The USB requests seem to be directly mapped to the registers of a 8250,
@@ -186,33 +195,33 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial, speed_t value
* Data: LCR (see below)
*
* Bit 7: Divisor Latch Access Bit (DLAB). When set, access to the data
- * transmit/receive register (THR/RBR) and the Interrupt Enable Register
- * (IER) is disabled. Any access to these ports is now redirected to the
- * Divisor Latch Registers. Setting this bit, loading the Divisor
- * Registers, and clearing DLAB should be done with interrupts disabled.
+ * transmit/receive register (THR/RBR) and the Interrupt Enable Register
+ * (IER) is disabled. Any access to these ports is now redirected to the
+ * Divisor Latch Registers. Setting this bit, loading the Divisor
+ * Registers, and clearing DLAB should be done with interrupts disabled.
* Bit 6: Set Break. When set to "1", the transmitter begins to transmit
- * continuous Spacing until this bit is set to "0". This overrides any
- * bits of characters that are being transmitted.
+ * continuous Spacing until this bit is set to "0". This overrides any
+ * bits of characters that are being transmitted.
* Bit 5: Stick Parity. When parity is enabled, setting this bit causes parity
- * to always be "1" or "0", based on the value of Bit 4.
+ * to always be "1" or "0", based on the value of Bit 4.
* Bit 4: Even Parity Select (EPS). When parity is enabled and Bit 5 is "0",
- * setting this bit causes even parity to be transmitted and expected.
- * Otherwise, odd parity is used.
+ * setting this bit causes even parity to be transmitted and expected.
+ * Otherwise, odd parity is used.
* Bit 3: Parity Enable (PEN). When set to "1", a parity bit is inserted
- * between the last bit of the data and the Stop Bit. The UART will also
- * expect parity to be present in the received data.
+ * between the last bit of the data and the Stop Bit. The UART will also
+ * expect parity to be present in the received data.
* Bit 2: Number of Stop Bits (STB). If set to "1" and using 5-bit data words,
- * 1.5 Stop Bits are transmitted and expected in each data word. For
- * 6, 7 and 8-bit data words, 2 Stop Bits are transmitted and expected.
- * When this bit is set to "0", one Stop Bit is used on each data word.
+ * 1.5 Stop Bits are transmitted and expected in each data word. For
+ * 6, 7 and 8-bit data words, 2 Stop Bits are transmitted and expected.
+ * When this bit is set to "0", one Stop Bit is used on each data word.
* Bit 1: Word Length Select Bit #1 (WLSB1)
* Bit 0: Word Length Select Bit #0 (WLSB0)
- * Together these bits specify the number of bits in each data word.
- * 1 0 Word Length
- * 0 0 5 Data Bits
- * 0 1 6 Data Bits
- * 1 0 7 Data Bits
- * 1 1 8 Data Bits
+ * Together these bits specify the number of bits in each data word.
+ * 1 0 Word Length
+ * 0 0 5 Data Bits
+ * 0 1 6 Data Bits
+ * 1 0 7 Data Bits
+ * 1 1 8 Data Bits
*
* SniffUSB observations: Bit 7 seems not to be used. There seem to be two bugs
* in the Win98 driver: the break does not work (bit 6 is not asserted) and the
@@ -234,20 +243,20 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial, speed_t value
* Bit 6: Reserved, always 0.
* Bit 5: Reserved, always 0.
* Bit 4: Loop-Back Enable. When set to "1", the UART transmitter and receiver
- * are internally connected together to allow diagnostic operations. In
- * addition, the UART modem control outputs are connected to the UART
- * modem control inputs. CTS is connected to RTS, DTR is connected to
- * DSR, OUT1 is connected to RI, and OUT 2 is connected to DCD.
+ * are internally connected together to allow diagnostic operations. In
+ * addition, the UART modem control outputs are connected to the UART
+ * modem control inputs. CTS is connected to RTS, DTR is connected to
+ * DSR, OUT1 is connected to RI, and OUT 2 is connected to DCD.
* Bit 3: OUT 2. An auxiliary output that the host processor may set high or
- * low. In the IBM PC serial adapter (and most clones), OUT 2 is used
- * to tri-state (disable) the interrupt signal from the
- * 8250/16450/16550 UART.
+ * low. In the IBM PC serial adapter (and most clones), OUT 2 is used
+ * to tri-state (disable) the interrupt signal from the
+ * 8250/16450/16550 UART.
* Bit 2: OUT 1. An auxiliary output that the host processor may set high or
- * low. This output is not used on the IBM PC serial adapter.
+ * low. This output is not used on the IBM PC serial adapter.
* Bit 1: Request to Send (RTS). When set to "1", the output of the UART -RTS
- * line is Low (Active).
+ * line is Low (Active).
* Bit 0: Data Terminal Ready (DTR). When set to "1", the output of the UART
- * -DTR line is Low (Active).
+ * -DTR line is Low (Active).
*
* SniffUSB observations: Bit 2 and 4 seem not to be used but bit 3 has been
* seen _always_ set.
@@ -264,22 +273,22 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial, speed_t value
* Data: MSR (see below)
*
* Bit 7: Data Carrier Detect (CD). Reflects the state of the DCD line on the
- * UART.
+ * UART.
* Bit 6: Ring Indicator (RI). Reflects the state of the RI line on the UART.
* Bit 5: Data Set Ready (DSR). Reflects the state of the DSR line on the UART.
* Bit 4: Clear To Send (CTS). Reflects the state of the CTS line on the UART.
* Bit 3: Delta Data Carrier Detect (DDCD). Set to "1" if the -DCD line has
- * changed state one more more times since the last time the MSR was
- * read by the host.
+ * changed state one more more times since the last time the MSR was
+ * read by the host.
* Bit 2: Trailing Edge Ring Indicator (TERI). Set to "1" if the -RI line has
- * had a low to high transition since the last time the MSR was read by
- * the host.
+ * had a low to high transition since the last time the MSR was read by
+ * the host.
* Bit 1: Delta Data Set Ready (DDSR). Set to "1" if the -DSR line has changed
- * state one more more times since the last time the MSR was read by the
- * host.
+ * state one more more times since the last time the MSR was read by the
+ * host.
* Bit 0: Delta Clear To Send (DCTS). Set to "1" if the -CTS line has changed
- * state one more times since the last time the MSR was read by the
- * host.
+ * state one more times since the last time the MSR was read by the
+ * host.
*
* SniffUSB observations: the MSR is also returned as first byte on the
* interrupt-in endpoint 0x83 to signal changes of modem status lines. The USB
@@ -290,31 +299,34 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial, speed_t value
* --------------------------
*
* Bit 7 Error in Receiver FIFO. On the 8250/16450 UART, this bit is zero.
- * This bit is set to "1" when any of the bytes in the FIFO have one or
- * more of the following error conditions: PE, FE, or BI.
+ * This bit is set to "1" when any of the bytes in the FIFO have one
+ * or more of the following error conditions: PE, FE, or BI.
* Bit 6 Transmitter Empty (TEMT). When set to "1", there are no words
- * remaining in the transmit FIFO or the transmit shift register. The
- * transmitter is completely idle.
- * Bit 5 Transmitter Holding Register Empty (THRE). When set to "1", the FIFO
- * (or holding register) now has room for at least one additional word
- * to transmit. The transmitter may still be transmitting when this bit
- * is set to "1".
+ * remaining in the transmit FIFO or the transmit shift register. The
+ * transmitter is completely idle.
+ * Bit 5 Transmitter Holding Register Empty (THRE). When set to "1", the
+ * FIFO (or holding register) now has room for at least one additional
+ * word to transmit. The transmitter may still be transmitting when
+ * this bit is set to "1".
* Bit 4 Break Interrupt (BI). The receiver has detected a Break signal.
- * Bit 3 Framing Error (FE). A Start Bit was detected but the Stop Bit did not
- * appear at the expected time. The received word is probably garbled.
- * Bit 2 Parity Error (PE). The parity bit was incorrect for the word received.
- * Bit 1 Overrun Error (OE). A new word was received and there was no room in
- * the receive buffer. The newly-arrived word in the shift register is
- * discarded. On 8250/16450 UARTs, the word in the holding register is
- * discarded and the newly- arrived word is put in the holding register.
+ * Bit 3 Framing Error (FE). A Start Bit was detected but the Stop Bit did
+ * not appear at the expected time. The received word is probably
+ * garbled.
+ * Bit 2 Parity Error (PE). The parity bit was incorrect for the word
+ * received.
+ * Bit 1 Overrun Error (OE). A new word was received and there was no room
+ * in the receive buffer. The newly-arrived word in the shift register
+ * is discarded. On 8250/16450 UARTs, the word in the holding register
+ * is discarded and the newly- arrived word is put in the holding
+ * register.
* Bit 0 Data Ready (DR). One or more words are in the receive FIFO that the
- * host may read. A word must be completely received and moved from the
- * shift register into the FIFO (or holding register for 8250/16450
- * designs) before this bit is set.
+ * host may read. A word must be completely received and moved from
+ * the shift register into the FIFO (or holding register for
+ * 8250/16450 designs) before this bit is set.
*
- * SniffUSB observations: the LSR is returned as second byte on the interrupt-in
- * endpoint 0x83 to signal error conditions. Such errors have been seen with
- * minicom/zmodem transfers (CRC errors).
+ * SniffUSB observations: the LSR is returned as second byte on the
+ * interrupt-in endpoint 0x83 to signal error conditions. Such errors have
+ * been seen with minicom/zmodem transfers (CRC errors).
*
*
* Unknown #1
@@ -364,16 +376,16 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial, speed_t value
* --------------
*
* SniffUSB observations: the bulk-out endpoint 0x1 and interrupt-in endpoint
- * 0x81 is used to transmit and receive characters. The second interrupt-in
- * endpoint 0x83 signals exceptional conditions like modem line changes and
+ * 0x81 is used to transmit and receive characters. The second interrupt-in
+ * endpoint 0x83 signals exceptional conditions like modem line changes and
* errors. The first byte returned is the MSR and the second byte the LSR.
*
*
* Other observations
* ------------------
*
- * Queued bulk transfers like used in visor.c did not work.
- *
+ * Queued bulk transfers like used in visor.c did not work.
+ *
*
* Properties of the USB device used (as found in /var/log/messages)
* -----------------------------------------------------------------
@@ -411,26 +423,26 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial, speed_t value
* bInterface Class:SubClass:Protocol = 00:00:00
* iInterface = 00
* Endpoint:
- * bLength = 7
- * bDescriptorType = 05
- * bEndpointAddress = 81 (in)
- * bmAttributes = 03 (Interrupt)
- * wMaxPacketSize = 0040
- * bInterval = 02
+ * bLength = 7
+ * bDescriptorType = 05
+ * bEndpointAddress = 81 (in)
+ * bmAttributes = 03 (Interrupt)
+ * wMaxPacketSize = 0040
+ * bInterval = 02
* Endpoint:
- * bLength = 7
- * bDescriptorType = 05
- * bEndpointAddress = 01 (out)
- * bmAttributes = 02 (Bulk)
- * wMaxPacketSize = 0040
- * bInterval = 00
+ * bLength = 7
+ * bDescriptorType = 05
+ * bEndpointAddress = 01 (out)
+ * bmAttributes = 02 (Bulk)
+ * wMaxPacketSize = 0040
+ * bInterval = 00
* Endpoint:
- * bLength = 7
- * bDescriptorType = 05
- * bEndpointAddress = 83 (in)
- * bmAttributes = 03 (Interrupt)
- * wMaxPacketSize = 0002
- * bInterval = 02
+ * bLength = 7
+ * bDescriptorType = 05
+ * bEndpointAddress = 83 (in)
+ * bmAttributes = 03 (Interrupt)
+ * wMaxPacketSize = 0002
+ * bInterval = 02
*
*
* Hardware details (added by Martin Hamilton, 2001/12/06)
@@ -440,7 +452,7 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial, speed_t value
* adaptor, which turns out to simply be a re-badged U232-P9. We
* know this because there is a sticky label on the circuit board
* which says "U232-P9" ;-)
- *
+ *
* The circuit board inside the adaptor contains a Philips PDIUSBD12
* USB endpoint chip and a Philips P87C52UBAA microcontroller with
* embedded UART. Exhaustive documentation for these is available at:
@@ -449,7 +461,7 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial, speed_t value
* http://www.semiconductors.philips.com/pip/pdiusbd12
*
* Thanks to Julian Highfield for the pointer to the Philips database.
- *
+ *
*/
#endif /* __LINUX_USB_SERIAL_MCT_U232_H */
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 0d47f2c4d59..30922a7e334 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -34,21 +34,18 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
-
+#include <linux/parport.h>
/*
* Version Information
*/
-#define DRIVER_VERSION "1.0.0.4F"
+#define DRIVER_VERSION "2.1"
#define DRIVER_AUTHOR "Aspire Communications pvt Ltd."
#define DRIVER_DESC "Moschip USB Serial Driver"
/* default urb timeout */
#define MOS_WDR_TIMEOUT (HZ * 5)
-#define MOS_PORT1 0x0200
-#define MOS_PORT2 0x0300
-#define MOS_VENREG 0x0000
#define MOS_MAX_PORT 0x02
#define MOS_WRITE 0x0E
#define MOS_READ 0x0D
@@ -63,7 +60,7 @@
#define NUM_URBS 16 /* URB Count */
#define URB_TRANSFER_BUFFER_SIZE 32 /* URB Size */
-/* This structure holds all of the local port information */
+/* This structure holds all of the local serial port information */
struct moschip_port {
__u8 shadowLCR; /* last LCR value received */
__u8 shadowMCR; /* last MCR value received */
@@ -74,11 +71,6 @@ struct moschip_port {
struct urb *write_urb_pool[NUM_URBS];
};
-/* This structure holds all of the individual serial device information */
-struct moschip_serial {
- int interrupt_started;
-};
-
static int debug;
static struct usb_serial_driver moschip7720_2port_driver;
@@ -94,6 +86,658 @@ static const struct usb_device_id moschip_port_id_table[] = {
};
MODULE_DEVICE_TABLE(usb, moschip_port_id_table);
+#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
+
+/* initial values for parport regs */
+#define DCR_INIT_VAL 0x0c /* SLCTIN, nINIT */
+#define ECR_INIT_VAL 0x00 /* SPP mode */
+
+struct urbtracker {
+ struct mos7715_parport *mos_parport;
+ struct list_head urblist_entry;
+ struct kref ref_count;
+ struct urb *urb;
+};
+
+enum mos7715_pp_modes {
+ SPP = 0<<5,
+ PS2 = 1<<5, /* moschip calls this 'NIBBLE' mode */
+ PPF = 2<<5, /* moschip calls this 'CB-FIFO mode */
+};
+
+struct mos7715_parport {
+ struct parport *pp; /* back to containing struct */
+ struct kref ref_count; /* to instance of this struct */
+ struct list_head deferred_urbs; /* list deferred async urbs */
+ struct list_head active_urbs; /* list async urbs in flight */
+ spinlock_t listlock; /* protects list access */
+ bool msg_pending; /* usb sync call pending */
+ struct completion syncmsg_compl; /* usb sync call completed */
+ struct tasklet_struct urb_tasklet; /* for sending deferred urbs */
+ struct usb_serial *serial; /* back to containing struct */
+ __u8 shadowECR; /* parallel port regs... */
+ __u8 shadowDCR;
+ atomic_t shadowDSR; /* updated in int-in callback */
+};
+
+/* lock guards against dereferencing NULL ptr in parport ops callbacks */
+static DEFINE_SPINLOCK(release_lock);
+
+#endif /* CONFIG_USB_SERIAL_MOS7715_PARPORT */
+
+static const unsigned int dummy; /* for clarity in register access fns */
+
+enum mos_regs {
+ THR, /* serial port regs */
+ RHR,
+ IER,
+ FCR,
+ ISR,
+ LCR,
+ MCR,
+ LSR,
+ MSR,
+ SPR,
+ DLL,
+ DLM,
+ DPR, /* parallel port regs */
+ DSR,
+ DCR,
+ ECR,
+ SP1_REG, /* device control regs */
+ SP2_REG, /* serial port 2 (7720 only) */
+ PP_REG,
+ SP_CONTROL_REG,
+};
+
+/*
+ * Return the correct value for the Windex field of the setup packet
+ * for a control endpoint message. See the 7715 datasheet.
+ */
+static inline __u16 get_reg_index(enum mos_regs reg)
+{
+ static const __u16 mos7715_index_lookup_table[] = {
+ 0x00, /* THR */
+ 0x00, /* RHR */
+ 0x01, /* IER */
+ 0x02, /* FCR */
+ 0x02, /* ISR */
+ 0x03, /* LCR */
+ 0x04, /* MCR */
+ 0x05, /* LSR */
+ 0x06, /* MSR */
+ 0x07, /* SPR */
+ 0x00, /* DLL */
+ 0x01, /* DLM */
+ 0x00, /* DPR */
+ 0x01, /* DSR */
+ 0x02, /* DCR */
+ 0x0a, /* ECR */
+ 0x01, /* SP1_REG */
+ 0x02, /* SP2_REG (7720 only) */
+ 0x04, /* PP_REG (7715 only) */
+ 0x08, /* SP_CONTROL_REG */
+ };
+ return mos7715_index_lookup_table[reg];
+}
+
+/*
+ * Return the correct value for the upper byte of the Wvalue field of
+ * the setup packet for a control endpoint message.
+ */
+static inline __u16 get_reg_value(enum mos_regs reg,
+ unsigned int serial_portnum)
+{
+ if (reg >= SP1_REG) /* control reg */
+ return 0x0000;
+
+ else if (reg >= DPR) /* parallel port reg (7715 only) */
+ return 0x0100;
+
+ else /* serial port reg */
+ return (serial_portnum + 2) << 8;
+}
+
+/*
+ * Write data byte to the specified device register. The data is embedded in
+ * the value field of the setup packet. serial_portnum is ignored for registers
+ * not specific to a particular serial port.
+ */
+static int write_mos_reg(struct usb_serial *serial, unsigned int serial_portnum,
+ enum mos_regs reg, __u8 data)
+{
+ struct usb_device *usbdev = serial->dev;
+ unsigned int pipe = usb_sndctrlpipe(usbdev, 0);
+ __u8 request = (__u8)0x0e;
+ __u8 requesttype = (__u8)0x40;
+ __u16 index = get_reg_index(reg);
+ __u16 value = get_reg_value(reg, serial_portnum) + data;
+ int status = usb_control_msg(usbdev, pipe, request, requesttype, value,
+ index, NULL, 0, MOS_WDR_TIMEOUT);
+ if (status < 0)
+ dev_err(&usbdev->dev,
+ "mos7720: usb_control_msg() failed: %d", status);
+ return status;
+}
+
+/*
+ * Read data byte from the specified device register. The data returned by the
+ * device is embedded in the value field of the setup packet. serial_portnum is
+ * ignored for registers that are not specific to a particular serial port.
+ */
+static int read_mos_reg(struct usb_serial *serial, unsigned int serial_portnum,
+ enum mos_regs reg, __u8 *data)
+{
+ struct usb_device *usbdev = serial->dev;
+ unsigned int pipe = usb_rcvctrlpipe(usbdev, 0);
+ __u8 request = (__u8)0x0d;
+ __u8 requesttype = (__u8)0xc0;
+ __u16 index = get_reg_index(reg);
+ __u16 value = get_reg_value(reg, serial_portnum);
+ int status = usb_control_msg(usbdev, pipe, request, requesttype, value,
+ index, data, 1, MOS_WDR_TIMEOUT);
+ if (status < 0)
+ dev_err(&usbdev->dev,
+ "mos7720: usb_control_msg() failed: %d", status);
+ return status;
+}
+
+#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
+
+static inline int mos7715_change_mode(struct mos7715_parport *mos_parport,
+ enum mos7715_pp_modes mode)
+{
+ mos_parport->shadowECR = mode;
+ write_mos_reg(mos_parport->serial, dummy, ECR, mos_parport->shadowECR);
+ return 0;
+}
+
+static void destroy_mos_parport(struct kref *kref)
+{
+ struct mos7715_parport *mos_parport =
+ container_of(kref, struct mos7715_parport, ref_count);
+
+ dbg("%s called", __func__);
+ kfree(mos_parport);
+}
+
+static void destroy_urbtracker(struct kref *kref)
+{
+ struct urbtracker *urbtrack =
+ container_of(kref, struct urbtracker, ref_count);
+ struct mos7715_parport *mos_parport = urbtrack->mos_parport;
+ dbg("%s called", __func__);
+ usb_free_urb(urbtrack->urb);
+ kfree(urbtrack);
+ kref_put(&mos_parport->ref_count, destroy_mos_parport);
+}
+
+/*
+ * This runs as a tasklet when sending an urb in a non-blocking parallel
+ * port callback had to be deferred because the disconnect mutex could not be
+ * obtained at the time.
+ */
+static void send_deferred_urbs(unsigned long _mos_parport)
+{
+ int ret_val;
+ unsigned long flags;
+ struct mos7715_parport *mos_parport = (void *)_mos_parport;
+ struct urbtracker *urbtrack;
+ struct list_head *cursor, *next;
+
+ dbg("%s called", __func__);
+
+ /* if release function ran, game over */
+ if (unlikely(mos_parport->serial == NULL))
+ return;
+
+ /* try again to get the mutex */
+ if (!mutex_trylock(&mos_parport->serial->disc_mutex)) {
+ dbg("%s: rescheduling tasklet", __func__);
+ tasklet_schedule(&mos_parport->urb_tasklet);
+ return;
+ }
+
+ /* if device disconnected, game over */
+ if (unlikely(mos_parport->serial->disconnected)) {
+ mutex_unlock(&mos_parport->serial->disc_mutex);
+ return;
+ }
+
+ spin_lock_irqsave(&mos_parport->listlock, flags);
+ if (list_empty(&mos_parport->deferred_urbs)) {
+ spin_unlock_irqrestore(&mos_parport->listlock, flags);
+ mutex_unlock(&mos_parport->serial->disc_mutex);
+ dbg("%s: deferred_urbs list empty", __func__);
+ return;
+ }
+
+ /* move contents of deferred_urbs list to active_urbs list and submit */
+ list_for_each_safe(cursor, next, &mos_parport->deferred_urbs)
+ list_move_tail(cursor, &mos_parport->active_urbs);
+ list_for_each_entry(urbtrack, &mos_parport->active_urbs,
+ urblist_entry) {
+ ret_val = usb_submit_urb(urbtrack->urb, GFP_ATOMIC);
+ dbg("%s: urb submitted", __func__);
+ if (ret_val) {
+ dev_err(&mos_parport->serial->dev->dev,
+ "usb_submit_urb() failed: %d", ret_val);
+ list_del(&urbtrack->urblist_entry);
+ kref_put(&urbtrack->ref_count, destroy_urbtracker);
+ }
+ }
+ spin_unlock_irqrestore(&mos_parport->listlock, flags);
+ mutex_unlock(&mos_parport->serial->disc_mutex);
+}
+
+/* callback for parallel port control urbs submitted asynchronously */
+static void async_complete(struct urb *urb)
+{
+ struct urbtracker *urbtrack = urb->context;
+ int status = urb->status;
+ dbg("%s called", __func__);
+ if (unlikely(status))
+ dbg("%s - nonzero urb status received: %d", __func__, status);
+
+ /* remove the urbtracker from the active_urbs list */
+ spin_lock(&urbtrack->mos_parport->listlock);
+ list_del(&urbtrack->urblist_entry);
+ spin_unlock(&urbtrack->mos_parport->listlock);
+ kref_put(&urbtrack->ref_count, destroy_urbtracker);
+}
+
+static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
+ enum mos_regs reg, __u8 data)
+{
+ struct urbtracker *urbtrack;
+ int ret_val;
+ unsigned long flags;
+ struct usb_ctrlrequest setup;
+ struct usb_serial *serial = mos_parport->serial;
+ struct usb_device *usbdev = serial->dev;
+ dbg("%s called", __func__);
+
+ /* create and initialize the control urb and containing urbtracker */
+ urbtrack = kmalloc(sizeof(struct urbtracker), GFP_ATOMIC);
+ if (urbtrack == NULL) {
+ dev_err(&usbdev->dev, "out of memory");
+ return -ENOMEM;
+ }
+ kref_get(&mos_parport->ref_count);
+ urbtrack->mos_parport = mos_parport;
+ urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (urbtrack->urb == NULL) {
+ dev_err(&usbdev->dev, "out of urbs");
+ kfree(urbtrack);
+ return -ENOMEM;
+ }
+ setup.bRequestType = (__u8)0x40;
+ setup.bRequest = (__u8)0x0e;
+ setup.wValue = get_reg_value(reg, dummy);
+ setup.wIndex = get_reg_index(reg);
+ setup.wLength = 0;
+ usb_fill_control_urb(urbtrack->urb, usbdev,
+ usb_sndctrlpipe(usbdev, 0),
+ (unsigned char *)&setup,
+ NULL, 0, async_complete, urbtrack);
+ kref_init(&urbtrack->ref_count);
+ INIT_LIST_HEAD(&urbtrack->urblist_entry);
+
+ /*
+ * get the disconnect mutex, or add tracker to the deferred_urbs list
+ * and schedule a tasklet to try again later
+ */
+ if (!mutex_trylock(&serial->disc_mutex)) {
+ spin_lock_irqsave(&mos_parport->listlock, flags);
+ list_add_tail(&urbtrack->urblist_entry,
+ &mos_parport->deferred_urbs);
+ spin_unlock_irqrestore(&mos_parport->listlock, flags);
+ tasklet_schedule(&mos_parport->urb_tasklet);
+ dbg("tasklet scheduled");
+ return 0;
+ }
+
+ /* bail if device disconnected */
+ if (serial->disconnected) {
+ kref_put(&urbtrack->ref_count, destroy_urbtracker);
+ mutex_unlock(&serial->disc_mutex);
+ return -ENODEV;
+ }
+
+ /* add the tracker to the active_urbs list and submit */
+ spin_lock_irqsave(&mos_parport->listlock, flags);
+ list_add_tail(&urbtrack->urblist_entry, &mos_parport->active_urbs);
+ spin_unlock_irqrestore(&mos_parport->listlock, flags);
+ ret_val = usb_submit_urb(urbtrack->urb, GFP_ATOMIC);
+ mutex_unlock(&serial->disc_mutex);
+ if (ret_val) {
+ dev_err(&usbdev->dev,
+ "%s: submit_urb() failed: %d", __func__, ret_val);
+ spin_lock_irqsave(&mos_parport->listlock, flags);
+ list_del(&urbtrack->urblist_entry);
+ spin_unlock_irqrestore(&mos_parport->listlock, flags);
+ kref_put(&urbtrack->ref_count, destroy_urbtracker);
+ return ret_val;
+ }
+ return 0;
+}
+
+/*
+ * This is the the common top part of all parallel port callback operations that
+ * send synchronous messages to the device. This implements convoluted locking
+ * that avoids two scenarios: (1) a port operation is called after usbserial
+ * has called our release function, at which point struct mos7715_parport has
+ * been destroyed, and (2) the device has been disconnected, but usbserial has
+ * not called the release function yet because someone has a serial port open.
+ * The shared release_lock prevents the first, and the mutex and disconnected
+ * flag maintained by usbserial covers the second. We also use the msg_pending
+ * flag to ensure that all synchronous usb messgage calls have completed before
+ * our release function can return.
+ */
+static int parport_prologue(struct parport *pp)
+{
+ struct mos7715_parport *mos_parport;
+
+ spin_lock(&release_lock);
+ mos_parport = pp->private_data;
+ if (unlikely(mos_parport == NULL)) {
+ /* release fn called, port struct destroyed */
+ spin_unlock(&release_lock);
+ return -1;
+ }
+ mos_parport->msg_pending = true; /* synch usb call pending */
+ INIT_COMPLETION(mos_parport->syncmsg_compl);
+ spin_unlock(&release_lock);
+
+ mutex_lock(&mos_parport->serial->disc_mutex);
+ if (mos_parport->serial->disconnected) {
+ /* device disconnected */
+ mutex_unlock(&mos_parport->serial->disc_mutex);
+ mos_parport->msg_pending = false;
+ complete(&mos_parport->syncmsg_compl);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * This is the the common bottom part of all parallel port functions that send
+ * synchronous messages to the device.
+ */
+static inline void parport_epilogue(struct parport *pp)
+{
+ struct mos7715_parport *mos_parport = pp->private_data;
+ mutex_unlock(&mos_parport->serial->disc_mutex);
+ mos_parport->msg_pending = false;
+ complete(&mos_parport->syncmsg_compl);
+}
+
+static void parport_mos7715_write_data(struct parport *pp, unsigned char d)
+{
+ struct mos7715_parport *mos_parport = pp->private_data;
+ dbg("%s called: %2.2x", __func__, d);
+ if (parport_prologue(pp) < 0)
+ return;
+ mos7715_change_mode(mos_parport, SPP);
+ write_mos_reg(mos_parport->serial, dummy, DPR, (__u8)d);
+ parport_epilogue(pp);
+}
+
+static unsigned char parport_mos7715_read_data(struct parport *pp)
+{
+ struct mos7715_parport *mos_parport = pp->private_data;
+ unsigned char d;
+ dbg("%s called", __func__);
+ if (parport_prologue(pp) < 0)
+ return 0;
+ read_mos_reg(mos_parport->serial, dummy, DPR, &d);
+ parport_epilogue(pp);
+ return d;
+}
+
+static void parport_mos7715_write_control(struct parport *pp, unsigned char d)
+{
+ struct mos7715_parport *mos_parport = pp->private_data;
+ __u8 data;
+ dbg("%s called: %2.2x", __func__, d);
+ if (parport_prologue(pp) < 0)
+ return;
+ data = ((__u8)d & 0x0f) | (mos_parport->shadowDCR & 0xf0);
+ write_mos_reg(mos_parport->serial, dummy, DCR, data);
+ mos_parport->shadowDCR = data;
+ parport_epilogue(pp);
+}
+
+static unsigned char parport_mos7715_read_control(struct parport *pp)
+{
+ struct mos7715_parport *mos_parport = pp->private_data;
+ __u8 dcr;
+ dbg("%s called", __func__);
+ spin_lock(&release_lock);
+ mos_parport = pp->private_data;
+ if (unlikely(mos_parport == NULL)) {
+ spin_unlock(&release_lock);
+ return 0;
+ }
+ dcr = mos_parport->shadowDCR & 0x0f;
+ spin_unlock(&release_lock);
+ return dcr;
+}
+
+static unsigned char parport_mos7715_frob_control(struct parport *pp,
+ unsigned char mask,
+ unsigned char val)
+{
+ struct mos7715_parport *mos_parport = pp->private_data;
+ __u8 dcr;
+ dbg("%s called", __func__);
+ mask &= 0x0f;
+ val &= 0x0f;
+ if (parport_prologue(pp) < 0)
+ return 0;
+ mos_parport->shadowDCR = (mos_parport->shadowDCR & (~mask)) ^ val;
+ write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR);
+ dcr = mos_parport->shadowDCR & 0x0f;
+ parport_epilogue(pp);
+ return dcr;
+}
+
+static unsigned char parport_mos7715_read_status(struct parport *pp)
+{
+ unsigned char status;
+ struct mos7715_parport *mos_parport = pp->private_data;
+ dbg("%s called", __func__);
+ spin_lock(&release_lock);
+ mos_parport = pp->private_data;
+ if (unlikely(mos_parport == NULL)) { /* release called */
+ spin_unlock(&release_lock);
+ return 0;
+ }
+ status = atomic_read(&mos_parport->shadowDSR) & 0xf8;
+ spin_unlock(&release_lock);
+ return status;
+}
+
+static void parport_mos7715_enable_irq(struct parport *pp)
+{
+ dbg("%s called", __func__);
+}
+static void parport_mos7715_disable_irq(struct parport *pp)
+{
+ dbg("%s called", __func__);
+}
+
+static void parport_mos7715_data_forward(struct parport *pp)
+{
+ struct mos7715_parport *mos_parport = pp->private_data;
+ dbg("%s called", __func__);
+ if (parport_prologue(pp) < 0)
+ return;
+ mos7715_change_mode(mos_parport, PS2);
+ mos_parport->shadowDCR &= ~0x20;
+ write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR);
+ parport_epilogue(pp);
+}
+
+static void parport_mos7715_data_reverse(struct parport *pp)
+{
+ struct mos7715_parport *mos_parport = pp->private_data;
+ dbg("%s called", __func__);
+ if (parport_prologue(pp) < 0)
+ return;
+ mos7715_change_mode(mos_parport, PS2);
+ mos_parport->shadowDCR |= 0x20;
+ write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR);
+ parport_epilogue(pp);
+}
+
+static void parport_mos7715_init_state(struct pardevice *dev,
+ struct parport_state *s)
+{
+ dbg("%s called", __func__);
+ s->u.pc.ctr = DCR_INIT_VAL;
+ s->u.pc.ecr = ECR_INIT_VAL;
+}
+
+/* N.B. Parport core code requires that this function not block */
+static void parport_mos7715_save_state(struct parport *pp,
+ struct parport_state *s)
+{
+ struct mos7715_parport *mos_parport;
+ dbg("%s called", __func__);
+ spin_lock(&release_lock);
+ mos_parport = pp->private_data;
+ if (unlikely(mos_parport == NULL)) { /* release called */
+ spin_unlock(&release_lock);
+ return;
+ }
+ s->u.pc.ctr = mos_parport->shadowDCR;
+ s->u.pc.ecr = mos_parport->shadowECR;
+ spin_unlock(&release_lock);
+}
+
+/* N.B. Parport core code requires that this function not block */
+static void parport_mos7715_restore_state(struct parport *pp,
+ struct parport_state *s)
+{
+ struct mos7715_parport *mos_parport;
+ dbg("%s called", __func__);
+ spin_lock(&release_lock);
+ mos_parport = pp->private_data;
+ if (unlikely(mos_parport == NULL)) { /* release called */
+ spin_unlock(&release_lock);
+ return;
+ }
+ write_parport_reg_nonblock(mos_parport, DCR, mos_parport->shadowDCR);
+ write_parport_reg_nonblock(mos_parport, ECR, mos_parport->shadowECR);
+ spin_unlock(&release_lock);
+}
+
+static size_t parport_mos7715_write_compat(struct parport *pp,
+ const void *buffer,
+ size_t len, int flags)
+{
+ int retval;
+ struct mos7715_parport *mos_parport = pp->private_data;
+ int actual_len;
+ dbg("%s called: %u chars", __func__, (unsigned int)len);
+ if (parport_prologue(pp) < 0)
+ return 0;
+ mos7715_change_mode(mos_parport, PPF);
+ retval = usb_bulk_msg(mos_parport->serial->dev,
+ usb_sndbulkpipe(mos_parport->serial->dev, 2),
+ (void *)buffer, len, &actual_len,
+ MOS_WDR_TIMEOUT);
+ parport_epilogue(pp);
+ if (retval) {
+ dev_err(&mos_parport->serial->dev->dev,
+ "mos7720: usb_bulk_msg() failed: %d", retval);
+ return 0;
+ }
+ return actual_len;
+}
+
+static struct parport_operations parport_mos7715_ops = {
+ .owner = THIS_MODULE,
+ .write_data = parport_mos7715_write_data,
+ .read_data = parport_mos7715_read_data,
+
+ .write_control = parport_mos7715_write_control,
+ .read_control = parport_mos7715_read_control,
+ .frob_control = parport_mos7715_frob_control,
+
+ .read_status = parport_mos7715_read_status,
+
+ .enable_irq = parport_mos7715_enable_irq,
+ .disable_irq = parport_mos7715_disable_irq,
+
+ .data_forward = parport_mos7715_data_forward,
+ .data_reverse = parport_mos7715_data_reverse,
+
+ .init_state = parport_mos7715_init_state,
+ .save_state = parport_mos7715_save_state,
+ .restore_state = parport_mos7715_restore_state,
+
+ .compat_write_data = parport_mos7715_write_compat,
+
+ .nibble_read_data = parport_ieee1284_read_nibble,
+ .byte_read_data = parport_ieee1284_read_byte,
+};
+
+/*
+ * Allocate and initialize parallel port control struct, initialize
+ * the parallel port hardware device, and register with the parport subsystem.
+ */
+static int mos7715_parport_init(struct usb_serial *serial)
+{
+ struct mos7715_parport *mos_parport;
+
+ /* allocate and initialize parallel port control struct */
+ mos_parport = kzalloc(sizeof(struct mos7715_parport), GFP_KERNEL);
+ if (mos_parport == NULL) {
+ dbg("mos7715_parport_init: kzalloc failed");
+ return -ENOMEM;
+ }
+ mos_parport->msg_pending = false;
+ kref_init(&mos_parport->ref_count);
+ spin_lock_init(&mos_parport->listlock);
+ INIT_LIST_HEAD(&mos_parport->active_urbs);
+ INIT_LIST_HEAD(&mos_parport->deferred_urbs);
+ usb_set_serial_data(serial, mos_parport); /* hijack private pointer */
+ mos_parport->serial = serial;
+ tasklet_init(&mos_parport->urb_tasklet, send_deferred_urbs,
+ (unsigned long) mos_parport);
+ init_completion(&mos_parport->syncmsg_compl);
+
+ /* cycle parallel port reset bit */
+ write_mos_reg(mos_parport->serial, dummy, PP_REG, (__u8)0x80);
+ write_mos_reg(mos_parport->serial, dummy, PP_REG, (__u8)0x00);
+
+ /* initialize device registers */
+ mos_parport->shadowDCR = DCR_INIT_VAL;
+ write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR);
+ mos_parport->shadowECR = ECR_INIT_VAL;
+ write_mos_reg(mos_parport->serial, dummy, ECR, mos_parport->shadowECR);
+
+ /* register with parport core */
+ mos_parport->pp = parport_register_port(0, PARPORT_IRQ_NONE,
+ PARPORT_DMA_NONE,
+ &parport_mos7715_ops);
+ if (mos_parport->pp == NULL) {
+ dev_err(&serial->interface->dev,
+ "Could not register parport\n");
+ kref_put(&mos_parport->ref_count, destroy_mos_parport);
+ return -EIO;
+ }
+ mos_parport->pp->private_data = mos_parport;
+ mos_parport->pp->modes = PARPORT_MODE_COMPAT | PARPORT_MODE_PCSPP;
+ mos_parport->pp->dev = &serial->interface->dev;
+ parport_announce_port(mos_parport->pp);
+
+ return 0;
+}
+#endif /* CONFIG_USB_SERIAL_MOS7715_PARPORT */
/*
* mos7720_interrupt_callback
@@ -109,8 +753,6 @@ static void mos7720_interrupt_callback(struct urb *urb)
__u8 sp1;
__u8 sp2;
- dbg(" : Entering");
-
switch (status) {
case 0:
/* success */
@@ -161,7 +803,7 @@ static void mos7720_interrupt_callback(struct urb *urb)
dbg("Serial Port 1: Receiver time out");
break;
case SERIAL_IIR_MS:
- dbg("Serial Port 1: Modem status change");
+ /* dbg("Serial Port 1: Modem status change"); */
break;
}
@@ -174,7 +816,7 @@ static void mos7720_interrupt_callback(struct urb *urb)
dbg("Serial Port 2: Receiver time out");
break;
case SERIAL_IIR_MS:
- dbg("Serial Port 2: Modem status change");
+ /* dbg("Serial Port 2: Modem status change"); */
break;
}
}
@@ -208,6 +850,7 @@ static void mos7715_interrupt_callback(struct urb *urb)
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
+ case -ENODEV:
/* this urb is terminated, clean up */
dbg("%s - urb shutting down with status: %d", __func__,
status);
@@ -243,11 +886,21 @@ static void mos7715_interrupt_callback(struct urb *urb)
dbg("Serial Port: Receiver time out");
break;
case SERIAL_IIR_MS:
- dbg("Serial Port: Modem status change");
+ /* dbg("Serial Port: Modem status change"); */
break;
}
}
+#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
+ { /* update local copy of DSR reg */
+ struct usb_serial_port *port = urb->context;
+ struct mos7715_parport *mos_parport = port->serial->private;
+ if (unlikely(mos_parport == NULL))
+ return;
+ atomic_set(&mos_parport->shadowDSR, data[2]);
+ }
+#endif
+
exit:
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result)
@@ -267,7 +920,6 @@ static void mos7720_bulk_in_callback(struct urb *urb)
int retval;
unsigned char *data ;
struct usb_serial_port *port;
- struct moschip_port *mos7720_port;
struct tty_struct *tty;
int status = urb->status;
@@ -276,13 +928,7 @@ static void mos7720_bulk_in_callback(struct urb *urb)
return;
}
- mos7720_port = urb->context;
- if (!mos7720_port) {
- dbg("NULL mos7720_port pointer");
- return ;
- }
-
- port = mos7720_port->port;
+ port = urb->context;
dbg("Entering...%s", __func__);
@@ -332,8 +978,6 @@ static void mos7720_bulk_out_data_callback(struct urb *urb)
return ;
}
- dbg("Entering .........");
-
tty = tty_port_tty_get(&mos7720_port->port->port);
if (tty && mos7720_port->open)
@@ -342,56 +986,6 @@ static void mos7720_bulk_out_data_callback(struct urb *urb)
}
/*
- * send_mos_cmd
- * this function will be used for sending command to device
- */
-static int send_mos_cmd(struct usb_serial *serial, __u8 request, __u16 value,
- __u16 index, u8 *data)
-{
- int status;
- u8 *buf;
- u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
-
- if (value < MOS_MAX_PORT) {
- if (product == MOSCHIP_DEVICE_ID_7715)
- value = 0x0200; /* identifies the 7715's serial port */
- else
- value = value*0x100+0x200;
- } else {
- value = 0x0000;
- if ((product == MOSCHIP_DEVICE_ID_7715) &&
- (index != 0x08)) {
- dbg("serial->product== MOSCHIP_DEVICE_ID_7715");
- /* index = 0x01 ; */
- }
- }
-
- if (request == MOS_WRITE) {
- value = value + *data;
- status = usb_control_msg(serial->dev,
- usb_sndctrlpipe(serial->dev, 0), MOS_WRITE,
- 0x40, value, index, NULL, 0, MOS_WDR_TIMEOUT);
- } else {
- buf = kmalloc(1, GFP_KERNEL);
- if (!buf) {
- status = -ENOMEM;
- goto out;
- }
- status = usb_control_msg(serial->dev,
- usb_rcvctrlpipe(serial->dev, 0), MOS_READ,
- 0xc0, value, index, buf, 1, MOS_WDR_TIMEOUT);
- *data = *buf;
- kfree(buf);
- }
-out:
- if (status < 0)
- dbg("Command Write failed Value %x index %x", value, index);
-
- return status;
-}
-
-
-/*
* mos77xx_probe
* this function installs the appropriate read interrupt endpoint callback
* depending on whether the device is a 7720 or 7715, thus avoiding costly
@@ -424,11 +1018,10 @@ static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
struct usb_serial *serial;
struct usb_serial_port *port0;
struct urb *urb;
- struct moschip_serial *mos7720_serial;
struct moschip_port *mos7720_port;
int response;
int port_number;
- char data;
+ __u8 data;
int allocated_urbs = 0;
int j;
@@ -440,11 +1033,6 @@ static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
port0 = serial->port[0];
- mos7720_serial = usb_get_serial_data(serial);
-
- if (mos7720_serial == NULL || port0 == NULL)
- return -ENODEV;
-
usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe);
@@ -489,103 +1077,36 @@ static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
* 0x08 : SP1/2 Control Reg
*/
port_number = port->number - port->serial->minor;
- send_mos_cmd(port->serial, MOS_READ, port_number, UART_LSR, &data);
+ read_mos_reg(serial, port_number, LSR, &data);
+
dbg("SS::%p LSR:%x", mos7720_port, data);
dbg("Check:Sending Command ..........");
- data = 0x02;
- send_mos_cmd(serial, MOS_WRITE, MOS_MAX_PORT, 0x01, &data);
- data = 0x02;
- send_mos_cmd(serial, MOS_WRITE, MOS_MAX_PORT, 0x02, &data);
+ write_mos_reg(serial, dummy, SP1_REG, 0x02);
+ write_mos_reg(serial, dummy, SP2_REG, 0x02);
- data = 0x00;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x01, &data);
- data = 0x00;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x02, &data);
+ write_mos_reg(serial, port_number, IER, 0x00);
+ write_mos_reg(serial, port_number, FCR, 0x00);
- data = 0xCF;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x02, &data);
- data = 0x03;
- mos7720_port->shadowLCR = data;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x03, &data);
- data = 0x0b;
- mos7720_port->shadowMCR = data;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x04, &data);
- data = 0x0b;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x04, &data);
-
- data = 0x00;
- send_mos_cmd(serial, MOS_READ, MOS_MAX_PORT, 0x08, &data);
- data = 0x00;
- send_mos_cmd(serial, MOS_WRITE, MOS_MAX_PORT, 0x08, &data);
-
-/* data = 0x00;
- send_mos_cmd(serial, MOS_READ, MOS_MAX_PORT, port_number + 1, &data);
- data = 0x03;
- send_mos_cmd(serial, MOS_WRITE, MOS_MAX_PORT, port_number + 1, &data);
- data = 0x00;
- send_mos_cmd(port->serial, MOS_WRITE, MOS_MAX_PORT,
- port_number + 1, &data);
-*/
- data = 0x00;
- send_mos_cmd(serial, MOS_READ, MOS_MAX_PORT, 0x08, &data);
+ write_mos_reg(serial, port_number, FCR, 0xcf);
+ mos7720_port->shadowLCR = 0x03;
+ write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
+ mos7720_port->shadowMCR = 0x0b;
+ write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR);
+ write_mos_reg(serial, port_number, SP_CONTROL_REG, 0x00);
+ read_mos_reg(serial, dummy, SP_CONTROL_REG, &data);
data = data | (port->number - port->serial->minor + 1);
- send_mos_cmd(serial, MOS_WRITE, MOS_MAX_PORT, 0x08, &data);
+ write_mos_reg(serial, dummy, SP_CONTROL_REG, data);
+ mos7720_port->shadowLCR = 0x83;
+ write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
+ write_mos_reg(serial, port_number, THR, 0x0c);
+ write_mos_reg(serial, port_number, IER, 0x00);
+ mos7720_port->shadowLCR = 0x03;
+ write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
+ write_mos_reg(serial, port_number, IER, 0x0c);
- data = 0x83;
- mos7720_port->shadowLCR = data;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x03, &data);
- data = 0x0c;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x00, &data);
- data = 0x00;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x01, &data);
- data = 0x03;
- mos7720_port->shadowLCR = data;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x03, &data);
- data = 0x0c;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x01, &data);
- data = 0x0c;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x01, &data);
-
- /* see if we've set up our endpoint info yet *
- * (can't set it up in mos7720_startup as the *
- * structures were not set up at that time.) */
- if (!mos7720_serial->interrupt_started) {
- dbg("Interrupt buffer NULL !!!");
-
- /* not set up yet, so do it now */
- mos7720_serial->interrupt_started = 1;
-
- dbg("To Submit URB !!!");
-
- /* set up our interrupt urb */
- usb_fill_int_urb(port0->interrupt_in_urb, serial->dev,
- usb_rcvintpipe(serial->dev,
- port->interrupt_in_endpointAddress),
- port0->interrupt_in_buffer,
- port0->interrupt_in_urb->transfer_buffer_length,
- mos7720_interrupt_callback, mos7720_port,
- port0->interrupt_in_urb->interval);
-
- /* start interrupt read for this mos7720 this interrupt *
- * will continue as long as the mos7720 is connected */
- dbg("Submit URB over !!!");
- response = usb_submit_urb(port0->interrupt_in_urb, GFP_KERNEL);
- if (response)
- dev_err(&port->dev,
- "%s - Error %d submitting control urb\n",
- __func__, response);
- }
-
- /* set up our bulk in urb */
- usb_fill_bulk_urb(port->read_urb, serial->dev,
- usb_rcvbulkpipe(serial->dev,
- port->bulk_in_endpointAddress),
- port->bulk_in_buffer,
- port->read_urb->transfer_buffer_length,
- mos7720_bulk_in_callback, mos7720_port);
response = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (response)
dev_err(&port->dev, "%s - Error %d submitting read urb\n",
@@ -640,7 +1161,6 @@ static void mos7720_close(struct usb_serial_port *port)
{
struct usb_serial *serial;
struct moschip_port *mos7720_port;
- char data;
int j;
dbg("mos7720_close:entering...");
@@ -673,13 +1193,10 @@ static void mos7720_close(struct usb_serial_port *port)
/* these commands must not be issued if the device has
* been disconnected */
if (!serial->disconnected) {
- data = 0x00;
- send_mos_cmd(serial, MOS_WRITE,
- port->number - port->serial->minor, 0x04, &data);
-
- data = 0x00;
- send_mos_cmd(serial, MOS_WRITE,
- port->number - port->serial->minor, 0x01, &data);
+ write_mos_reg(serial, port->number - port->serial->minor,
+ MCR, 0x00);
+ write_mos_reg(serial, port->number - port->serial->minor,
+ IER, 0x00);
}
mutex_unlock(&serial->disc_mutex);
mos7720_port->open = 0;
@@ -708,8 +1225,8 @@ static void mos7720_break(struct tty_struct *tty, int break_state)
data = mos7720_port->shadowLCR & ~UART_LCR_SBC;
mos7720_port->shadowLCR = data;
- send_mos_cmd(serial, MOS_WRITE, port->number - port->serial->minor,
- 0x03, &data);
+ write_mos_reg(serial, port->number - port->serial->minor,
+ LCR, mos7720_port->shadowLCR);
return;
}
@@ -854,9 +1371,8 @@ static void mos7720_throttle(struct tty_struct *tty)
/* if we are implementing RTS/CTS, toggle that line */
if (tty->termios->c_cflag & CRTSCTS) {
mos7720_port->shadowMCR &= ~UART_MCR_RTS;
- status = send_mos_cmd(port->serial, MOS_WRITE,
- port->number - port->serial->minor,
- UART_MCR, &mos7720_port->shadowMCR);
+ write_mos_reg(port->serial, port->number - port->serial->minor,
+ MCR, mos7720_port->shadowMCR);
if (status != 0)
return;
}
@@ -889,22 +1405,21 @@ static void mos7720_unthrottle(struct tty_struct *tty)
/* if we are implementing RTS/CTS, toggle that line */
if (tty->termios->c_cflag & CRTSCTS) {
mos7720_port->shadowMCR |= UART_MCR_RTS;
- status = send_mos_cmd(port->serial, MOS_WRITE,
- port->number - port->serial->minor,
- UART_MCR, &mos7720_port->shadowMCR);
+ write_mos_reg(port->serial, port->number - port->serial->minor,
+ MCR, mos7720_port->shadowMCR);
if (status != 0)
return;
}
}
+/* FIXME: this function does not work */
static int set_higher_rates(struct moschip_port *mos7720_port,
unsigned int baud)
{
- unsigned char data;
struct usb_serial_port *port;
struct usb_serial *serial;
int port_number;
-
+ enum mos_regs sp_reg;
if (mos7720_port == NULL)
return -EINVAL;
@@ -917,58 +1432,35 @@ static int set_higher_rates(struct moschip_port *mos7720_port,
dbg("Sending Setting Commands ..........");
port_number = port->number - port->serial->minor;
- data = 0x000;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x01, &data);
- data = 0x000;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x02, &data);
- data = 0x0CF;
- send_mos_cmd(serial, MOS_WRITE, port->number, 0x02, &data);
- data = 0x00b;
- mos7720_port->shadowMCR = data;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x04, &data);
- data = 0x00b;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x04, &data);
-
- data = 0x000;
- send_mos_cmd(serial, MOS_READ, MOS_MAX_PORT, 0x08, &data);
- data = 0x000;
- send_mos_cmd(serial, MOS_WRITE, MOS_MAX_PORT, 0x08, &data);
-
+ write_mos_reg(serial, port_number, IER, 0x00);
+ write_mos_reg(serial, port_number, FCR, 0x00);
+ write_mos_reg(serial, port_number, FCR, 0xcf);
+ mos7720_port->shadowMCR = 0x0b;
+ write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR);
+ write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x00);
/***********************************************
* Set for higher rates *
***********************************************/
-
- data = baud * 0x10;
- send_mos_cmd(serial, MOS_WRITE, MOS_MAX_PORT, port_number + 1, &data);
-
- data = 0x003;
- send_mos_cmd(serial, MOS_READ, MOS_MAX_PORT, 0x08, &data);
- data = 0x003;
- send_mos_cmd(serial, MOS_WRITE, MOS_MAX_PORT, 0x08, &data);
-
- data = 0x02b;
- mos7720_port->shadowMCR = data;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x04, &data);
- data = 0x02b;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x04, &data);
+ /* writing baud rate verbatum into uart clock field clearly not right */
+ if (port_number == 0)
+ sp_reg = SP1_REG;
+ else
+ sp_reg = SP2_REG;
+ write_mos_reg(serial, dummy, sp_reg, baud * 0x10);
+ write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x03);
+ mos7720_port->shadowMCR = 0x2b;
+ write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR);
/***********************************************
* Set DLL/DLM
***********************************************/
-
- data = mos7720_port->shadowLCR | UART_LCR_DLAB;
- mos7720_port->shadowLCR = data;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x03, &data);
-
- data = 0x001; /* DLL */
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x00, &data);
- data = 0x000; /* DLM */
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x01, &data);
-
- data = mos7720_port->shadowLCR & ~UART_LCR_DLAB;
- mos7720_port->shadowLCR = data;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x03, &data);
+ mos7720_port->shadowLCR = mos7720_port->shadowLCR | UART_LCR_DLAB;
+ write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
+ write_mos_reg(serial, port_number, DLL, 0x01);
+ write_mos_reg(serial, port_number, DLM, 0x00);
+ mos7720_port->shadowLCR = mos7720_port->shadowLCR & ~UART_LCR_DLAB;
+ write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
return 0;
}
@@ -1056,7 +1548,6 @@ static int send_cmd_write_baud_rate(struct moschip_port *mos7720_port,
struct usb_serial *serial;
int divisor;
int status;
- unsigned char data;
unsigned char number;
if (mos7720_port == NULL)
@@ -1078,21 +1569,16 @@ static int send_cmd_write_baud_rate(struct moschip_port *mos7720_port,
}
/* Enable access to divisor latch */
- data = mos7720_port->shadowLCR | UART_LCR_DLAB;
- mos7720_port->shadowLCR = data;
- send_mos_cmd(serial, MOS_WRITE, number, UART_LCR, &data);
+ mos7720_port->shadowLCR = mos7720_port->shadowLCR | UART_LCR_DLAB;
+ write_mos_reg(serial, number, LCR, mos7720_port->shadowLCR);
/* Write the divisor */
- data = ((unsigned char)(divisor & 0xff));
- send_mos_cmd(serial, MOS_WRITE, number, 0x00, &data);
-
- data = ((unsigned char)((divisor & 0xff00) >> 8));
- send_mos_cmd(serial, MOS_WRITE, number, 0x01, &data);
+ write_mos_reg(serial, number, DLL, (__u8)(divisor & 0xff));
+ write_mos_reg(serial, number, DLM, (__u8)((divisor & 0xff00) >> 8));
/* Disable access to divisor latch */
- data = mos7720_port->shadowLCR & ~UART_LCR_DLAB;
- mos7720_port->shadowLCR = data;
- send_mos_cmd(serial, MOS_WRITE, number, 0x03, &data);
+ mos7720_port->shadowLCR = mos7720_port->shadowLCR & ~UART_LCR_DLAB;
+ write_mos_reg(serial, number, LCR, mos7720_port->shadowLCR);
return status;
}
@@ -1117,7 +1603,6 @@ static void change_port_settings(struct tty_struct *tty,
__u8 lStop;
int status;
int port_number;
- char data;
if (mos7720_port == NULL)
return ;
@@ -1196,30 +1681,19 @@ static void change_port_settings(struct tty_struct *tty,
/* Update the LCR with the correct value */
mos7720_port->shadowLCR &=
- ~(LCR_BITS_MASK | LCR_STOP_MASK | LCR_PAR_MASK);
+ ~(LCR_BITS_MASK | LCR_STOP_MASK | LCR_PAR_MASK);
mos7720_port->shadowLCR |= (lData | lParity | lStop);
/* Disable Interrupts */
- data = 0x00;
- send_mos_cmd(serial, MOS_WRITE, port->number - port->serial->minor,
- UART_IER, &data);
-
- data = 0x00;
- send_mos_cmd(serial, MOS_WRITE, port_number, UART_FCR, &data);
-
- data = 0xcf;
- send_mos_cmd(serial, MOS_WRITE, port_number, UART_FCR, &data);
+ write_mos_reg(serial, port_number, IER, 0x00);
+ write_mos_reg(serial, port_number, FCR, 0x00);
+ write_mos_reg(serial, port_number, FCR, 0xcf);
/* Send the updated LCR value to the mos7720 */
- data = mos7720_port->shadowLCR;
- send_mos_cmd(serial, MOS_WRITE, port_number, UART_LCR, &data);
-
- data = 0x00b;
- mos7720_port->shadowMCR = data;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x04, &data);
- data = 0x00b;
- send_mos_cmd(serial, MOS_WRITE, port_number, 0x04, &data);
+ write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
+ mos7720_port->shadowMCR = 0x0b;
+ write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR);
/* set up the MCR register and send it to the mos7720 */
mos7720_port->shadowMCR = UART_MCR_OUT2;
@@ -1230,21 +1704,15 @@ static void change_port_settings(struct tty_struct *tty,
mos7720_port->shadowMCR |= (UART_MCR_XONANY);
/* To set hardware flow control to the specified *
* serial port, in SP1/2_CONTROL_REG */
- if (port->number) {
- data = 0x001;
- send_mos_cmd(serial, MOS_WRITE, MOS_MAX_PORT,
- 0x08, &data);
- } else {
- data = 0x002;
- send_mos_cmd(serial, MOS_WRITE, MOS_MAX_PORT,
- 0x08, &data);
- }
- } else {
+ if (port->number)
+ write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x01);
+ else
+ write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x02);
+
+ } else
mos7720_port->shadowMCR &= ~(UART_MCR_XONANY);
- }
- data = mos7720_port->shadowMCR;
- send_mos_cmd(serial, MOS_WRITE, port_number, UART_MCR, &data);
+ write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR);
/* Determine divisor based on baud rate */
baud = tty_get_baud_rate(tty);
@@ -1257,8 +1725,7 @@ static void change_port_settings(struct tty_struct *tty,
if (baud >= 230400) {
set_higher_rates(mos7720_port, baud);
/* Enable Interrupts */
- data = 0x0c;
- send_mos_cmd(serial, MOS_WRITE, port_number, UART_IER, &data);
+ write_mos_reg(serial, port_number, IER, 0x0c);
return;
}
@@ -1269,8 +1736,7 @@ static void change_port_settings(struct tty_struct *tty,
if (cflag & CBAUD)
tty_encode_baud_rate(tty, baud, baud);
/* Enable Interrupts */
- data = 0x0c;
- send_mos_cmd(serial, MOS_WRITE, port_number, UART_IER, &data);
+ write_mos_reg(serial, port_number, IER, 0x0c);
if (port->read_urb->status != -EINPROGRESS) {
port->read_urb->dev = serial->dev;
@@ -1308,7 +1774,7 @@ static void mos7720_set_termios(struct tty_struct *tty,
return;
}
- dbg("setting termios - ASPIRE");
+ dbg("%s\n", "setting termios - ASPIRE");
cflag = tty->termios->c_cflag;
@@ -1326,7 +1792,7 @@ static void mos7720_set_termios(struct tty_struct *tty,
change_port_settings(tty, mos7720_port, old_termios);
if (!port->read_urb) {
- dbg("URB KILLED !!!!!");
+ dbg("%s", "URB KILLED !!!!!");
return;
}
@@ -1361,8 +1827,7 @@ static int get_lsr_info(struct tty_struct *tty,
count = mos7720_chars_in_buffer(tty);
if (count == 0) {
- send_mos_cmd(port->serial, MOS_READ, port_number,
- UART_LSR, &data);
+ read_mos_reg(port->serial, port_number, LSR, &data);
if ((data & (UART_LSR_TEMT | UART_LSR_THRE))
== (UART_LSR_TEMT | UART_LSR_THRE)) {
dbg("%s -- Empty", __func__);
@@ -1400,13 +1865,11 @@ static int mos7720_tiocmget(struct tty_struct *tty, struct file *file)
}
static int mos7720_tiocmset(struct tty_struct *tty, struct file *file,
- unsigned int set, unsigned int clear)
+ unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct moschip_port *mos7720_port = usb_get_serial_port_data(port);
unsigned int mcr ;
- unsigned char lmcr;
-
dbg("%s - port %d", __func__, port->number);
dbg("he was at tiocmget");
@@ -1427,10 +1890,8 @@ static int mos7720_tiocmset(struct tty_struct *tty, struct file *file,
mcr &= ~UART_MCR_LOOP;
mos7720_port->shadowMCR = mcr;
- lmcr = mos7720_port->shadowMCR;
-
- send_mos_cmd(port->serial, MOS_WRITE,
- port->number - port->serial->minor, UART_MCR, &lmcr);
+ write_mos_reg(port->serial, port->number - port->serial->minor,
+ MCR, mos7720_port->shadowMCR);
return 0;
}
@@ -1440,7 +1901,6 @@ static int set_modem_info(struct moschip_port *mos7720_port, unsigned int cmd,
{
unsigned int mcr ;
unsigned int arg;
- unsigned char data;
struct usb_serial_port *port;
@@ -1475,10 +1935,8 @@ static int set_modem_info(struct moschip_port *mos7720_port, unsigned int cmd,
}
mos7720_port->shadowMCR = mcr;
-
- data = mos7720_port->shadowMCR;
- send_mos_cmd(port->serial, MOS_WRITE,
- port->number - port->serial->minor, UART_MCR, &data);
+ write_mos_reg(port->serial, port->number - port->serial->minor,
+ MCR, mos7720_port->shadowMCR);
return 0;
}
@@ -1590,12 +2048,12 @@ static int mos7720_ioctl(struct tty_struct *tty, struct file *file,
static int mos7720_startup(struct usb_serial *serial)
{
- struct moschip_serial *mos7720_serial;
struct moschip_port *mos7720_port;
struct usb_device *dev;
int i;
char data;
u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
+ int ret_val;
dbg("%s: Entering ..........", __func__);
@@ -1606,15 +2064,6 @@ static int mos7720_startup(struct usb_serial *serial)
dev = serial->dev;
- /* create our private serial structure */
- mos7720_serial = kzalloc(sizeof(struct moschip_serial), GFP_KERNEL);
- if (mos7720_serial == NULL) {
- dev_err(&dev->dev, "%s - Out of memory\n", __func__);
- return -ENOMEM;
- }
-
- usb_set_serial_data(serial, mos7720_serial);
-
/*
* The 7715 uses the first bulk in/out endpoint pair for the parallel
* port, and the second for the serial port. Because the usbserial core
@@ -1638,16 +2087,12 @@ static int mos7720_startup(struct usb_serial *serial)
serial->port[1]->interrupt_in_buffer = NULL;
}
- /* we set up the pointers to the endpoints in the mos7720_open *
- * function, as the structures aren't created yet. */
- /* set up port private structures */
+ /* set up serial port private structures */
for (i = 0; i < serial->num_ports; ++i) {
mos7720_port = kzalloc(sizeof(struct moschip_port), GFP_KERNEL);
if (mos7720_port == NULL) {
dev_err(&dev->dev, "%s - Out of memory\n", __func__);
- usb_set_serial_data(serial, NULL);
- kfree(mos7720_serial);
return -ENOMEM;
}
@@ -1669,12 +2114,22 @@ static int mos7720_startup(struct usb_serial *serial)
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
(__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5*HZ);
- /* LSR For Port 1 */
- send_mos_cmd(serial, MOS_READ, 0x00, UART_LSR, &data);
- dbg("LSR:%x", data);
+ /* start the interrupt urb */
+ ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL);
+ if (ret_val)
+ dev_err(&dev->dev,
+ "%s - Error %d submitting control urb\n",
+ __func__, ret_val);
- /* LSR For Port 2 */
- send_mos_cmd(serial, MOS_READ, 0x01, UART_LSR, &data);
+#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
+ if (product == MOSCHIP_DEVICE_ID_7715) {
+ ret_val = mos7715_parport_init(serial);
+ if (ret_val < 0)
+ return ret_val;
+ }
+#endif
+ /* LSR For Port 1 */
+ read_mos_reg(serial, 0, LSR, &data);
dbg("LSR:%x", data);
return 0;
@@ -1684,12 +2139,47 @@ static void mos7720_release(struct usb_serial *serial)
{
int i;
+#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
+ /* close the parallel port */
+
+ if (le16_to_cpu(serial->dev->descriptor.idProduct)
+ == MOSCHIP_DEVICE_ID_7715) {
+ struct urbtracker *urbtrack;
+ unsigned long flags;
+ struct mos7715_parport *mos_parport =
+ usb_get_serial_data(serial);
+
+ /* prevent NULL ptr dereference in port callbacks */
+ spin_lock(&release_lock);
+ mos_parport->pp->private_data = NULL;
+ spin_unlock(&release_lock);
+
+ /* wait for synchronous usb calls to return */
+ if (mos_parport->msg_pending)
+ wait_for_completion_timeout(&mos_parport->syncmsg_compl,
+ MOS_WDR_TIMEOUT);
+
+ parport_remove_port(mos_parport->pp);
+ usb_set_serial_data(serial, NULL);
+ mos_parport->serial = NULL;
+
+ /* if tasklet currently scheduled, wait for it to complete */
+ tasklet_kill(&mos_parport->urb_tasklet);
+
+ /* unlink any urbs sent by the tasklet */
+ spin_lock_irqsave(&mos_parport->listlock, flags);
+ list_for_each_entry(urbtrack,
+ &mos_parport->active_urbs,
+ urblist_entry)
+ usb_unlink_urb(urbtrack->urb);
+ spin_unlock_irqrestore(&mos_parport->listlock, flags);
+
+ kref_put(&mos_parport->ref_count, destroy_mos_parport);
+ }
+#endif
/* free private structure allocated for serial port */
for (i = 0; i < serial->num_ports; ++i)
kfree(usb_get_serial_port_data(serial->port[i]));
-
- /* free private structure allocated for serial device */
- kfree(usb_get_serial_data(serial));
}
static struct usb_driver usb_driver = {
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 2fda1c0182b..f8424d1bfc1 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -26,7 +26,6 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 84d0edad8e4..e280ad8e12f 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -42,35 +42,14 @@
#include <linux/bitops.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
+#include "usb-wwan.h"
/* Function prototypes */
static int option_probe(struct usb_serial *serial,
const struct usb_device_id *id);
-static int option_open(struct tty_struct *tty, struct usb_serial_port *port);
-static void option_close(struct usb_serial_port *port);
-static void option_dtr_rts(struct usb_serial_port *port, int on);
-
-static int option_startup(struct usb_serial *serial);
-static void option_disconnect(struct usb_serial *serial);
-static void option_release(struct usb_serial *serial);
-static int option_write_room(struct tty_struct *tty);
-
+static int option_send_setup(struct usb_serial_port *port);
static void option_instat_callback(struct urb *urb);
-static int option_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count);
-static int option_chars_in_buffer(struct tty_struct *tty);
-static void option_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old);
-static int option_tiocmget(struct tty_struct *tty, struct file *file);
-static int option_tiocmset(struct tty_struct *tty, struct file *file,
- unsigned int set, unsigned int clear);
-static int option_send_setup(struct usb_serial_port *port);
-#ifdef CONFIG_PM
-static int option_suspend(struct usb_serial *serial, pm_message_t message);
-static int option_resume(struct usb_serial *serial);
-#endif
-
/* Vendor and product IDs */
#define OPTION_VENDOR_ID 0x0AF0
#define OPTION_PRODUCT_COLT 0x5000
@@ -380,6 +359,10 @@ static int option_resume(struct usb_serial *serial);
#define CINTERION_VENDOR_ID 0x0681
+/* Olivetti products */
+#define OLIVETTI_VENDOR_ID 0x0b3c
+#define OLIVETTI_PRODUCT_OLICARD100 0xc000
+
/* some devices interfaces need special handling due to a number of reasons */
enum option_blacklist_reason {
OPTION_BLACKLIST_NONE = 0,
@@ -675,6 +658,180 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1060, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1061, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1062, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1063, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1064, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1065, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1066, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1067, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1068, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1069, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1070, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1071, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1072, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1073, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1074, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1075, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1076, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1077, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1078, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1079, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1080, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1081, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1082, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1083, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1084, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1085, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1086, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1087, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1088, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1089, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1090, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1091, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1092, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1093, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1094, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1095, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1096, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1097, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1098, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1099, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1100, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1101, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1102, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1103, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1104, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1105, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1106, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1107, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1108, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1109, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1110, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1111, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1112, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1113, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1114, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1115, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1116, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1117, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1118, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1119, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1120, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1121, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1122, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1123, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1124, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1125, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1126, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1127, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1128, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1129, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1130, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1131, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1132, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1133, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1134, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1135, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1136, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1137, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1138, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1139, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1140, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1141, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1142, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1143, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1144, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1145, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1146, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1147, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1148, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1149, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1150, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1151, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1152, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1153, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1154, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1155, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1156, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1157, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1158, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1159, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1160, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1161, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1162, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1163, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1164, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1165, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1166, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1167, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1168, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1169, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1260, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1261, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1262, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1263, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1264, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1265, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1266, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1274, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1275, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1276, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1277, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1278, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1279, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1280, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1281, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1282, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1283, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1284, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1285, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1286, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1287, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1288, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1289, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1290, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1291, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1292, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1293, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1294, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1295, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1296, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1297, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
@@ -726,6 +883,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
{ USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
+
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
@@ -757,22 +916,22 @@ static struct usb_serial_driver option_1port_device = {
.id_table = option_ids,
.num_ports = 1,
.probe = option_probe,
- .open = option_open,
- .close = option_close,
- .dtr_rts = option_dtr_rts,
- .write = option_write,
- .write_room = option_write_room,
- .chars_in_buffer = option_chars_in_buffer,
- .set_termios = option_set_termios,
- .tiocmget = option_tiocmget,
- .tiocmset = option_tiocmset,
- .attach = option_startup,
- .disconnect = option_disconnect,
- .release = option_release,
+ .open = usb_wwan_open,
+ .close = usb_wwan_close,
+ .dtr_rts = usb_wwan_dtr_rts,
+ .write = usb_wwan_write,
+ .write_room = usb_wwan_write_room,
+ .chars_in_buffer = usb_wwan_chars_in_buffer,
+ .set_termios = usb_wwan_set_termios,
+ .tiocmget = usb_wwan_tiocmget,
+ .tiocmset = usb_wwan_tiocmset,
+ .attach = usb_wwan_startup,
+ .disconnect = usb_wwan_disconnect,
+ .release = usb_wwan_release,
.read_int_callback = option_instat_callback,
#ifdef CONFIG_PM
- .suspend = option_suspend,
- .resume = option_resume,
+ .suspend = usb_wwan_suspend,
+ .resume = usb_wwan_resume,
#endif
};
@@ -785,13 +944,6 @@ static int debug;
#define IN_BUFLEN 4096
#define OUT_BUFLEN 4096
-struct option_intf_private {
- spinlock_t susp_lock;
- unsigned int suspended:1;
- int in_flight;
- struct option_blacklist_info *blacklist_info;
-};
-
struct option_port_private {
/* Input endpoints and buffer for this port */
struct urb *in_urbs[N_IN_URB];
@@ -848,8 +1000,7 @@ module_exit(option_exit);
static int option_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
- struct option_intf_private *data;
-
+ struct usb_wwan_intf_private *data;
/* D-Link DWM 652 still exposes CD-Rom emulation interface in modem mode */
if (serial->dev->descriptor.idVendor == DLINK_VENDOR_ID &&
serial->dev->descriptor.idProduct == DLINK_PRODUCT_DWM_652 &&
@@ -862,11 +1013,13 @@ static int option_probe(struct usb_serial *serial,
serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff)
return -ENODEV;
- data = serial->private = kzalloc(sizeof(struct option_intf_private), GFP_KERNEL);
+ data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL);
+
if (!data)
return -ENOMEM;
+ data->send_setup = option_send_setup;
spin_lock_init(&data->susp_lock);
- data->blacklist_info = (struct option_blacklist_info*) id->driver_info;
+ data->private = (void *)id->driver_info;
return 0;
}
@@ -887,194 +1040,6 @@ static enum option_blacklist_reason is_blacklisted(const u8 ifnum,
return OPTION_BLACKLIST_NONE;
}
-static void option_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
-{
- dbg("%s", __func__);
- /* Doesn't support option setting */
- tty_termios_copy_hw(tty->termios, old_termios);
- option_send_setup(port);
-}
-
-static int option_tiocmget(struct tty_struct *tty, struct file *file)
-{
- struct usb_serial_port *port = tty->driver_data;
- unsigned int value;
- struct option_port_private *portdata;
-
- portdata = usb_get_serial_port_data(port);
-
- value = ((portdata->rts_state) ? TIOCM_RTS : 0) |
- ((portdata->dtr_state) ? TIOCM_DTR : 0) |
- ((portdata->cts_state) ? TIOCM_CTS : 0) |
- ((portdata->dsr_state) ? TIOCM_DSR : 0) |
- ((portdata->dcd_state) ? TIOCM_CAR : 0) |
- ((portdata->ri_state) ? TIOCM_RNG : 0);
-
- return value;
-}
-
-static int option_tiocmset(struct tty_struct *tty, struct file *file,
- unsigned int set, unsigned int clear)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct option_port_private *portdata;
-
- portdata = usb_get_serial_port_data(port);
-
- /* FIXME: what locks portdata fields ? */
- if (set & TIOCM_RTS)
- portdata->rts_state = 1;
- if (set & TIOCM_DTR)
- portdata->dtr_state = 1;
-
- if (clear & TIOCM_RTS)
- portdata->rts_state = 0;
- if (clear & TIOCM_DTR)
- portdata->dtr_state = 0;
- return option_send_setup(port);
-}
-
-/* Write */
-static int option_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count)
-{
- struct option_port_private *portdata;
- struct option_intf_private *intfdata;
- int i;
- int left, todo;
- struct urb *this_urb = NULL; /* spurious */
- int err;
- unsigned long flags;
-
- portdata = usb_get_serial_port_data(port);
- intfdata = port->serial->private;
-
- dbg("%s: write (%d chars)", __func__, count);
-
- i = 0;
- left = count;
- for (i = 0; left > 0 && i < N_OUT_URB; i++) {
- todo = left;
- if (todo > OUT_BUFLEN)
- todo = OUT_BUFLEN;
-
- this_urb = portdata->out_urbs[i];
- if (test_and_set_bit(i, &portdata->out_busy)) {
- if (time_before(jiffies,
- portdata->tx_start_time[i] + 10 * HZ))
- continue;
- usb_unlink_urb(this_urb);
- continue;
- }
- dbg("%s: endpoint %d buf %d", __func__,
- usb_pipeendpoint(this_urb->pipe), i);
-
- err = usb_autopm_get_interface_async(port->serial->interface);
- if (err < 0)
- break;
-
- /* send the data */
- memcpy(this_urb->transfer_buffer, buf, todo);
- this_urb->transfer_buffer_length = todo;
-
- spin_lock_irqsave(&intfdata->susp_lock, flags);
- if (intfdata->suspended) {
- usb_anchor_urb(this_urb, &portdata->delayed);
- spin_unlock_irqrestore(&intfdata->susp_lock, flags);
- } else {
- intfdata->in_flight++;
- spin_unlock_irqrestore(&intfdata->susp_lock, flags);
- err = usb_submit_urb(this_urb, GFP_ATOMIC);
- if (err) {
- dbg("usb_submit_urb %p (write bulk) failed "
- "(%d)", this_urb, err);
- clear_bit(i, &portdata->out_busy);
- spin_lock_irqsave(&intfdata->susp_lock, flags);
- intfdata->in_flight--;
- spin_unlock_irqrestore(&intfdata->susp_lock, flags);
- continue;
- }
- }
-
- portdata->tx_start_time[i] = jiffies;
- buf += todo;
- left -= todo;
- }
-
- count -= left;
- dbg("%s: wrote (did %d)", __func__, count);
- return count;
-}
-
-static void option_indat_callback(struct urb *urb)
-{
- int err;
- int endpoint;
- struct usb_serial_port *port;
- struct tty_struct *tty;
- unsigned char *data = urb->transfer_buffer;
- int status = urb->status;
-
- dbg("%s: %p", __func__, urb);
-
- endpoint = usb_pipeendpoint(urb->pipe);
- port = urb->context;
-
- if (status) {
- dbg("%s: nonzero status: %d on endpoint %02x.",
- __func__, status, endpoint);
- } else {
- tty = tty_port_tty_get(&port->port);
- if (urb->actual_length) {
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
- } else
- dbg("%s: empty read urb received", __func__);
- tty_kref_put(tty);
-
- /* Resubmit urb so we continue receiving */
- if (status != -ESHUTDOWN) {
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err && err != -EPERM)
- printk(KERN_ERR "%s: resubmit read urb failed. "
- "(%d)", __func__, err);
- else
- usb_mark_last_busy(port->serial->dev);
- }
-
- }
- return;
-}
-
-static void option_outdat_callback(struct urb *urb)
-{
- struct usb_serial_port *port;
- struct option_port_private *portdata;
- struct option_intf_private *intfdata;
- int i;
-
- dbg("%s", __func__);
-
- port = urb->context;
- intfdata = port->serial->private;
-
- usb_serial_port_softint(port);
- usb_autopm_put_interface_async(port->serial->interface);
- portdata = usb_get_serial_port_data(port);
- spin_lock(&intfdata->susp_lock);
- intfdata->in_flight--;
- spin_unlock(&intfdata->susp_lock);
-
- for (i = 0; i < N_OUT_URB; ++i) {
- if (portdata->out_urbs[i] == urb) {
- smp_mb__before_clear_bit();
- clear_bit(i, &portdata->out_busy);
- break;
- }
- }
-}
-
static void option_instat_callback(struct urb *urb)
{
int err;
@@ -1131,183 +1096,6 @@ static void option_instat_callback(struct urb *urb)
}
}
-static int option_write_room(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct option_port_private *portdata;
- int i;
- int data_len = 0;
- struct urb *this_urb;
-
- portdata = usb_get_serial_port_data(port);
-
- for (i = 0; i < N_OUT_URB; i++) {
- this_urb = portdata->out_urbs[i];
- if (this_urb && !test_bit(i, &portdata->out_busy))
- data_len += OUT_BUFLEN;
- }
-
- dbg("%s: %d", __func__, data_len);
- return data_len;
-}
-
-static int option_chars_in_buffer(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct option_port_private *portdata;
- int i;
- int data_len = 0;
- struct urb *this_urb;
-
- portdata = usb_get_serial_port_data(port);
-
- for (i = 0; i < N_OUT_URB; i++) {
- this_urb = portdata->out_urbs[i];
- /* FIXME: This locking is insufficient as this_urb may
- go unused during the test */
- if (this_urb && test_bit(i, &portdata->out_busy))
- data_len += this_urb->transfer_buffer_length;
- }
- dbg("%s: %d", __func__, data_len);
- return data_len;
-}
-
-static int option_open(struct tty_struct *tty, struct usb_serial_port *port)
-{
- struct option_port_private *portdata;
- struct option_intf_private *intfdata;
- struct usb_serial *serial = port->serial;
- int i, err;
- struct urb *urb;
-
- portdata = usb_get_serial_port_data(port);
- intfdata = serial->private;
-
- dbg("%s", __func__);
-
- /* Start reading from the IN endpoint */
- for (i = 0; i < N_IN_URB; i++) {
- urb = portdata->in_urbs[i];
- if (!urb)
- continue;
- err = usb_submit_urb(urb, GFP_KERNEL);
- if (err) {
- dbg("%s: submit urb %d failed (%d) %d",
- __func__, i, err,
- urb->transfer_buffer_length);
- }
- }
-
- option_send_setup(port);
-
- serial->interface->needs_remote_wakeup = 1;
- spin_lock_irq(&intfdata->susp_lock);
- portdata->opened = 1;
- spin_unlock_irq(&intfdata->susp_lock);
- usb_autopm_put_interface(serial->interface);
-
- return 0;
-}
-
-static void option_dtr_rts(struct usb_serial_port *port, int on)
-{
- struct usb_serial *serial = port->serial;
- struct option_port_private *portdata;
-
- dbg("%s", __func__);
- portdata = usb_get_serial_port_data(port);
- mutex_lock(&serial->disc_mutex);
- portdata->rts_state = on;
- portdata->dtr_state = on;
- if (serial->dev)
- option_send_setup(port);
- mutex_unlock(&serial->disc_mutex);
-}
-
-
-static void option_close(struct usb_serial_port *port)
-{
- int i;
- struct usb_serial *serial = port->serial;
- struct option_port_private *portdata;
- struct option_intf_private *intfdata = port->serial->private;
-
- dbg("%s", __func__);
- portdata = usb_get_serial_port_data(port);
-
- if (serial->dev) {
- /* Stop reading/writing urbs */
- spin_lock_irq(&intfdata->susp_lock);
- portdata->opened = 0;
- spin_unlock_irq(&intfdata->susp_lock);
-
- for (i = 0; i < N_IN_URB; i++)
- usb_kill_urb(portdata->in_urbs[i]);
- for (i = 0; i < N_OUT_URB; i++)
- usb_kill_urb(portdata->out_urbs[i]);
- usb_autopm_get_interface(serial->interface);
- serial->interface->needs_remote_wakeup = 0;
- }
-}
-
-/* Helper functions used by option_setup_urbs */
-static struct urb *option_setup_urb(struct usb_serial *serial, int endpoint,
- int dir, void *ctx, char *buf, int len,
- void (*callback)(struct urb *))
-{
- struct urb *urb;
-
- if (endpoint == -1)
- return NULL; /* endpoint not needed */
-
- urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */
- if (urb == NULL) {
- dbg("%s: alloc for endpoint %d failed.", __func__, endpoint);
- return NULL;
- }
-
- /* Fill URB using supplied data. */
- usb_fill_bulk_urb(urb, serial->dev,
- usb_sndbulkpipe(serial->dev, endpoint) | dir,
- buf, len, callback, ctx);
-
- return urb;
-}
-
-/* Setup urbs */
-static void option_setup_urbs(struct usb_serial *serial)
-{
- int i, j;
- struct usb_serial_port *port;
- struct option_port_private *portdata;
-
- dbg("%s", __func__);
-
- for (i = 0; i < serial->num_ports; i++) {
- port = serial->port[i];
- portdata = usb_get_serial_port_data(port);
-
- /* Do indat endpoints first */
- for (j = 0; j < N_IN_URB; ++j) {
- portdata->in_urbs[j] = option_setup_urb(serial,
- port->bulk_in_endpointAddress,
- USB_DIR_IN, port,
- portdata->in_buffer[j],
- IN_BUFLEN, option_indat_callback);
- }
-
- /* outdat endpoints */
- for (j = 0; j < N_OUT_URB; ++j) {
- portdata->out_urbs[j] = option_setup_urb(serial,
- port->bulk_out_endpointAddress,
- USB_DIR_OUT, port,
- portdata->out_buffer[j],
- OUT_BUFLEN, option_outdat_callback);
- }
- }
-}
-
-
/** send RTS/DTR state to the port.
*
* This is exactly the same as SET_CONTROL_LINE_STATE from the PSTN
@@ -1316,15 +1104,16 @@ static void option_setup_urbs(struct usb_serial *serial)
static int option_send_setup(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
- struct option_intf_private *intfdata =
- (struct option_intf_private *) serial->private;
+ struct usb_wwan_intf_private *intfdata =
+ (struct usb_wwan_intf_private *) serial->private;
struct option_port_private *portdata;
int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
int val = 0;
dbg("%s", __func__);
- if (is_blacklisted(ifNum, intfdata->blacklist_info) ==
- OPTION_BLACKLIST_SENDSETUP) {
+ if (is_blacklisted(ifNum,
+ (struct option_blacklist_info *) intfdata->private)
+ == OPTION_BLACKLIST_SENDSETUP) {
dbg("No send_setup on blacklisted interface #%d\n", ifNum);
return -EIO;
}
@@ -1341,224 +1130,6 @@ static int option_send_setup(struct usb_serial_port *port)
0x22, 0x21, val, ifNum, NULL, 0, USB_CTRL_SET_TIMEOUT);
}
-static int option_startup(struct usb_serial *serial)
-{
- int i, j, err;
- struct usb_serial_port *port;
- struct option_port_private *portdata;
- u8 *buffer;
-
- dbg("%s", __func__);
-
- /* Now setup per port private data */
- for (i = 0; i < serial->num_ports; i++) {
- port = serial->port[i];
- portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
- if (!portdata) {
- dbg("%s: kmalloc for option_port_private (%d) failed!.",
- __func__, i);
- return 1;
- }
- init_usb_anchor(&portdata->delayed);
-
- for (j = 0; j < N_IN_URB; j++) {
- buffer = (u8 *)__get_free_page(GFP_KERNEL);
- if (!buffer)
- goto bail_out_error;
- portdata->in_buffer[j] = buffer;
- }
-
- for (j = 0; j < N_OUT_URB; j++) {
- buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL);
- if (!buffer)
- goto bail_out_error2;
- portdata->out_buffer[j] = buffer;
- }
-
- usb_set_serial_port_data(port, portdata);
-
- if (!port->interrupt_in_urb)
- continue;
- err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
- if (err)
- dbg("%s: submit irq_in urb failed %d",
- __func__, err);
- }
- option_setup_urbs(serial);
- return 0;
-
-bail_out_error2:
- for (j = 0; j < N_OUT_URB; j++)
- kfree(portdata->out_buffer[j]);
-bail_out_error:
- for (j = 0; j < N_IN_URB; j++)
- if (portdata->in_buffer[j])
- free_page((unsigned long)portdata->in_buffer[j]);
- kfree(portdata);
- return 1;
-}
-
-static void stop_read_write_urbs(struct usb_serial *serial)
-{
- int i, j;
- struct usb_serial_port *port;
- struct option_port_private *portdata;
-
- /* Stop reading/writing urbs */
- for (i = 0; i < serial->num_ports; ++i) {
- port = serial->port[i];
- portdata = usb_get_serial_port_data(port);
- for (j = 0; j < N_IN_URB; j++)
- usb_kill_urb(portdata->in_urbs[j]);
- for (j = 0; j < N_OUT_URB; j++)
- usb_kill_urb(portdata->out_urbs[j]);
- }
-}
-
-static void option_disconnect(struct usb_serial *serial)
-{
- dbg("%s", __func__);
-
- stop_read_write_urbs(serial);
-}
-
-static void option_release(struct usb_serial *serial)
-{
- int i, j;
- struct usb_serial_port *port;
- struct option_port_private *portdata;
-
- dbg("%s", __func__);
-
- /* Now free them */
- for (i = 0; i < serial->num_ports; ++i) {
- port = serial->port[i];
- portdata = usb_get_serial_port_data(port);
-
- for (j = 0; j < N_IN_URB; j++) {
- if (portdata->in_urbs[j]) {
- usb_free_urb(portdata->in_urbs[j]);
- free_page((unsigned long)
- portdata->in_buffer[j]);
- portdata->in_urbs[j] = NULL;
- }
- }
- for (j = 0; j < N_OUT_URB; j++) {
- if (portdata->out_urbs[j]) {
- usb_free_urb(portdata->out_urbs[j]);
- kfree(portdata->out_buffer[j]);
- portdata->out_urbs[j] = NULL;
- }
- }
- }
-
- /* Now free per port private data */
- for (i = 0; i < serial->num_ports; i++) {
- port = serial->port[i];
- kfree(usb_get_serial_port_data(port));
- }
-}
-
-#ifdef CONFIG_PM
-static int option_suspend(struct usb_serial *serial, pm_message_t message)
-{
- struct option_intf_private *intfdata = serial->private;
- int b;
-
- dbg("%s entered", __func__);
-
- if (message.event & PM_EVENT_AUTO) {
- spin_lock_irq(&intfdata->susp_lock);
- b = intfdata->in_flight;
- spin_unlock_irq(&intfdata->susp_lock);
-
- if (b)
- return -EBUSY;
- }
-
- spin_lock_irq(&intfdata->susp_lock);
- intfdata->suspended = 1;
- spin_unlock_irq(&intfdata->susp_lock);
- stop_read_write_urbs(serial);
-
- return 0;
-}
-
-static void play_delayed(struct usb_serial_port *port)
-{
- struct option_intf_private *data;
- struct option_port_private *portdata;
- struct urb *urb;
- int err;
-
- portdata = usb_get_serial_port_data(port);
- data = port->serial->private;
- while ((urb = usb_get_from_anchor(&portdata->delayed))) {
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (!err)
- data->in_flight++;
- }
-}
-
-static int option_resume(struct usb_serial *serial)
-{
- int i, j;
- struct usb_serial_port *port;
- struct option_intf_private *intfdata = serial->private;
- struct option_port_private *portdata;
- struct urb *urb;
- int err = 0;
-
- dbg("%s entered", __func__);
- /* get the interrupt URBs resubmitted unconditionally */
- for (i = 0; i < serial->num_ports; i++) {
- port = serial->port[i];
- if (!port->interrupt_in_urb) {
- dbg("%s: No interrupt URB for port %d", __func__, i);
- continue;
- }
- err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
- dbg("Submitted interrupt URB for port %d (result %d)", i, err);
- if (err < 0) {
- err("%s: Error %d for interrupt URB of port%d",
- __func__, err, i);
- goto err_out;
- }
- }
-
- for (i = 0; i < serial->num_ports; i++) {
- /* walk all ports */
- port = serial->port[i];
- portdata = usb_get_serial_port_data(port);
-
- /* skip closed ports */
- spin_lock_irq(&intfdata->susp_lock);
- if (!portdata->opened) {
- spin_unlock_irq(&intfdata->susp_lock);
- continue;
- }
-
- for (j = 0; j < N_IN_URB; j++) {
- urb = portdata->in_urbs[j];
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err < 0) {
- err("%s: Error %d for bulk URB %d",
- __func__, err, i);
- spin_unlock_irq(&intfdata->susp_lock);
- goto err_out;
- }
- }
- play_delayed(port);
- spin_unlock_irq(&intfdata->susp_lock);
- }
- spin_lock_irq(&intfdata->susp_lock);
- intfdata->suspended = 0;
- spin_unlock_irq(&intfdata->susp_lock);
-err_out:
- return err;
-}
-#endif
-
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index deeacdea05d..e199b0f4f99 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -51,12 +51,13 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
+#include <linux/kfifo.h>
#include "oti6858.h"
#define OTI6858_DESCRIPTION \
"Ours Technology Inc. OTi-6858 USB to serial adapter driver"
#define OTI6858_AUTHOR "Tomasz Michal Lukaszewski <FIXME@FIXME>"
-#define OTI6858_VERSION "0.1"
+#define OTI6858_VERSION "0.2"
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(OTI6858_VENDOR_ID, OTI6858_PRODUCT_ID) },
@@ -75,18 +76,6 @@ static struct usb_driver oti6858_driver = {
static int debug;
-
-/* buffering code, copied from pl2303 driver */
-#define PL2303_BUF_SIZE 1024
-#define PL2303_TMP_BUF_SIZE 1024
-
-struct oti6858_buf {
- unsigned int buf_size;
- char *buf_buf;
- char *buf_get;
- char *buf_put;
-};
-
/* requests */
#define OTI6858_REQ_GET_STATUS (USB_DIR_IN | USB_TYPE_VENDOR | 0x00)
#define OTI6858_REQ_T_GET_STATUS 0x01
@@ -161,18 +150,6 @@ static int oti6858_tiocmset(struct tty_struct *tty, struct file *file,
static int oti6858_startup(struct usb_serial *serial);
static void oti6858_release(struct usb_serial *serial);
-/* functions operating on buffers */
-static struct oti6858_buf *oti6858_buf_alloc(unsigned int size);
-static void oti6858_buf_free(struct oti6858_buf *pb);
-static void oti6858_buf_clear(struct oti6858_buf *pb);
-static unsigned int oti6858_buf_data_avail(struct oti6858_buf *pb);
-static unsigned int oti6858_buf_space_avail(struct oti6858_buf *pb);
-static unsigned int oti6858_buf_put(struct oti6858_buf *pb, const char *buf,
- unsigned int count);
-static unsigned int oti6858_buf_get(struct oti6858_buf *pb, char *buf,
- unsigned int count);
-
-
/* device info */
static struct usb_serial_driver oti6858_device = {
.driver = {
@@ -201,7 +178,6 @@ static struct usb_serial_driver oti6858_device = {
struct oti6858_private {
spinlock_t lock;
- struct oti6858_buf *buf;
struct oti6858_control_pkt status;
struct {
@@ -295,7 +271,7 @@ static void setup_line(struct work_struct *work)
}
}
-void send_data(struct work_struct *work)
+static void send_data(struct work_struct *work)
{
struct oti6858_private *priv = container_of(work,
struct oti6858_private, delayed_write_work.work);
@@ -314,9 +290,12 @@ void send_data(struct work_struct *work)
return;
}
priv->flags.write_urb_in_use = 1;
-
- count = oti6858_buf_data_avail(priv->buf);
spin_unlock_irqrestore(&priv->lock, flags);
+
+ spin_lock_irqsave(&port->lock, flags);
+ count = kfifo_len(&port->write_fifo);
+ spin_unlock_irqrestore(&port->lock, flags);
+
if (count > port->bulk_out_size)
count = port->bulk_out_size;
@@ -350,10 +329,9 @@ void send_data(struct work_struct *work)
return;
}
- spin_lock_irqsave(&priv->lock, flags);
- oti6858_buf_get(priv->buf, port->write_urb->transfer_buffer, count);
- spin_unlock_irqrestore(&priv->lock, flags);
-
+ count = kfifo_out_locked(&port->write_fifo,
+ port->write_urb->transfer_buffer,
+ count, &port->lock);
port->write_urb->transfer_buffer_length = count;
port->write_urb->dev = port->serial->dev;
result = usb_submit_urb(port->write_urb, GFP_NOIO);
@@ -376,11 +354,6 @@ static int oti6858_startup(struct usb_serial *serial)
priv = kzalloc(sizeof(struct oti6858_private), GFP_KERNEL);
if (!priv)
break;
- priv->buf = oti6858_buf_alloc(PL2303_BUF_SIZE);
- if (priv->buf == NULL) {
- kfree(priv);
- break;
- }
spin_lock_init(&priv->lock);
init_waitqueue_head(&priv->intr_wait);
@@ -397,7 +370,6 @@ static int oti6858_startup(struct usb_serial *serial)
for (--i; i >= 0; --i) {
priv = usb_get_serial_port_data(serial->port[i]);
- oti6858_buf_free(priv->buf);
kfree(priv);
usb_set_serial_port_data(serial->port[i], NULL);
}
@@ -407,17 +379,12 @@ static int oti6858_startup(struct usb_serial *serial)
static int oti6858_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
- struct oti6858_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
-
dbg("%s(port = %d, count = %d)", __func__, port->number, count);
if (!count)
return count;
- spin_lock_irqsave(&priv->lock, flags);
- count = oti6858_buf_put(priv->buf, buf, count);
- spin_unlock_irqrestore(&priv->lock, flags);
+ count = kfifo_in_locked(&port->write_fifo, buf, count, &port->lock);
return count;
}
@@ -425,15 +392,14 @@ static int oti6858_write(struct tty_struct *tty, struct usb_serial_port *port,
static int oti6858_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- struct oti6858_private *priv = usb_get_serial_port_data(port);
int room = 0;
unsigned long flags;
dbg("%s(port = %d)", __func__, port->number);
- spin_lock_irqsave(&priv->lock, flags);
- room = oti6858_buf_space_avail(priv->buf);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_lock_irqsave(&port->lock, flags);
+ room = kfifo_avail(&port->write_fifo);
+ spin_unlock_irqrestore(&port->lock, flags);
return room;
}
@@ -441,15 +407,14 @@ static int oti6858_write_room(struct tty_struct *tty)
static int oti6858_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- struct oti6858_private *priv = usb_get_serial_port_data(port);
int chars = 0;
unsigned long flags;
dbg("%s(port = %d)", __func__, port->number);
- spin_lock_irqsave(&priv->lock, flags);
- chars = oti6858_buf_data_avail(priv->buf);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_lock_irqsave(&port->lock, flags);
+ chars = kfifo_len(&port->write_fifo);
+ spin_unlock_irqrestore(&port->lock, flags);
return chars;
}
@@ -640,10 +605,10 @@ static void oti6858_close(struct usb_serial_port *port)
dbg("%s(port = %d)", __func__, port->number);
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&port->lock, flags);
/* clear out any remaining data in the buffer */
- oti6858_buf_clear(priv->buf);
- spin_unlock_irqrestore(&priv->lock, flags);
+ kfifo_reset_out(&port->write_fifo);
+ spin_unlock_irqrestore(&port->lock, flags);
dbg("%s(): after buf_clear()", __func__);
@@ -785,18 +750,12 @@ static int oti6858_ioctl(struct tty_struct *tty, struct file *file,
static void oti6858_release(struct usb_serial *serial)
{
- struct oti6858_private *priv;
int i;
dbg("%s()", __func__);
- for (i = 0; i < serial->num_ports; ++i) {
- priv = usb_get_serial_port_data(serial->port[i]);
- if (priv) {
- oti6858_buf_free(priv->buf);
- kfree(priv);
- }
- }
+ for (i = 0; i < serial->num_ports; ++i)
+ kfree(usb_get_serial_port_data(serial->port[i]));
}
static void oti6858_read_int_callback(struct urb *urb)
@@ -889,10 +848,14 @@ static void oti6858_read_int_callback(struct urb *urb)
}
} else if (!transient) {
unsigned long flags;
+ int count;
+
+ spin_lock_irqsave(&port->lock, flags);
+ count = kfifo_len(&port->write_fifo);
+ spin_unlock_irqrestore(&port->lock, flags);
spin_lock_irqsave(&priv->lock, flags);
- if (priv->flags.write_urb_in_use == 0
- && oti6858_buf_data_avail(priv->buf) != 0) {
+ if (priv->flags.write_urb_in_use == 0 && count != 0) {
schedule_delayed_work(&priv->delayed_write_work, 0);
resubmit = 0;
}
@@ -1014,165 +977,6 @@ static void oti6858_write_bulk_callback(struct urb *urb)
}
}
-
-/*
- * oti6858_buf_alloc
- *
- * Allocate a circular buffer and all associated memory.
- */
-static struct oti6858_buf *oti6858_buf_alloc(unsigned int size)
-{
- struct oti6858_buf *pb;
-
- if (size == 0)
- return NULL;
-
- pb = kmalloc(sizeof(struct oti6858_buf), GFP_KERNEL);
- if (pb == NULL)
- return NULL;
-
- pb->buf_buf = kmalloc(size, GFP_KERNEL);
- if (pb->buf_buf == NULL) {
- kfree(pb);
- return NULL;
- }
-
- pb->buf_size = size;
- pb->buf_get = pb->buf_put = pb->buf_buf;
-
- return pb;
-}
-
-/*
- * oti6858_buf_free
- *
- * Free the buffer and all associated memory.
- */
-static void oti6858_buf_free(struct oti6858_buf *pb)
-{
- if (pb) {
- kfree(pb->buf_buf);
- kfree(pb);
- }
-}
-
-/*
- * oti6858_buf_clear
- *
- * Clear out all data in the circular buffer.
- */
-static void oti6858_buf_clear(struct oti6858_buf *pb)
-{
- if (pb != NULL) {
- /* equivalent to a get of all data available */
- pb->buf_get = pb->buf_put;
- }
-}
-
-/*
- * oti6858_buf_data_avail
- *
- * Return the number of bytes of data available in the circular
- * buffer.
- */
-static unsigned int oti6858_buf_data_avail(struct oti6858_buf *pb)
-{
- if (pb == NULL)
- return 0;
- return (pb->buf_size + pb->buf_put - pb->buf_get) % pb->buf_size;
-}
-
-/*
- * oti6858_buf_space_avail
- *
- * Return the number of bytes of space available in the circular
- * buffer.
- */
-static unsigned int oti6858_buf_space_avail(struct oti6858_buf *pb)
-{
- if (pb == NULL)
- return 0;
- return (pb->buf_size + pb->buf_get - pb->buf_put - 1) % pb->buf_size;
-}
-
-/*
- * oti6858_buf_put
- *
- * Copy data data from a user buffer and put it into the circular buffer.
- * Restrict to the amount of space available.
- *
- * Return the number of bytes copied.
- */
-static unsigned int oti6858_buf_put(struct oti6858_buf *pb, const char *buf,
- unsigned int count)
-{
- unsigned int len;
-
- if (pb == NULL)
- return 0;
-
- len = oti6858_buf_space_avail(pb);
- if (count > len)
- count = len;
-
- if (count == 0)
- return 0;
-
- len = pb->buf_buf + pb->buf_size - pb->buf_put;
- if (count > len) {
- memcpy(pb->buf_put, buf, len);
- memcpy(pb->buf_buf, buf+len, count - len);
- pb->buf_put = pb->buf_buf + count - len;
- } else {
- memcpy(pb->buf_put, buf, count);
- if (count < len)
- pb->buf_put += count;
- else /* count == len */
- pb->buf_put = pb->buf_buf;
- }
-
- return count;
-}
-
-/*
- * oti6858_buf_get
- *
- * Get data from the circular buffer and copy to the given buffer.
- * Restrict to the amount of data available.
- *
- * Return the number of bytes copied.
- */
-static unsigned int oti6858_buf_get(struct oti6858_buf *pb, char *buf,
- unsigned int count)
-{
- unsigned int len;
-
- if (pb == NULL)
- return 0;
-
- len = oti6858_buf_data_avail(pb);
- if (count > len)
- count = len;
-
- if (count == 0)
- return 0;
-
- len = pb->buf_buf + pb->buf_size - pb->buf_get;
- if (count > len) {
- memcpy(buf, pb->buf_get, len);
- memcpy(buf+len, pb->buf_buf, count - len);
- pb->buf_get = pb->buf_buf + count - len;
- } else {
- memcpy(buf, pb->buf_get, count);
- if (count < len)
- pb->buf_get += count;
- else /* count == len */
- pb->buf_get = pb->buf_buf;
- }
-
- return count;
-}
-
/* module description and (de)initialization */
static int __init oti6858_init(void)
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index c28b1607eac..6b600182227 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -40,16 +40,6 @@ static int debug;
#define PL2303_CLOSING_WAIT (30*HZ)
-#define PL2303_BUF_SIZE 1024
-#define PL2303_TMP_BUF_SIZE 1024
-
-struct pl2303_buf {
- unsigned int buf_size;
- char *buf_buf;
- char *buf_get;
- char *buf_put;
-};
-
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) },
@@ -157,173 +147,12 @@ enum pl2303_type {
struct pl2303_private {
spinlock_t lock;
- struct pl2303_buf *buf;
- int write_urb_in_use;
wait_queue_head_t delta_msr_wait;
u8 line_control;
u8 line_status;
enum pl2303_type type;
};
-/*
- * pl2303_buf_alloc
- *
- * Allocate a circular buffer and all associated memory.
- */
-static struct pl2303_buf *pl2303_buf_alloc(unsigned int size)
-{
- struct pl2303_buf *pb;
-
- if (size == 0)
- return NULL;
-
- pb = kmalloc(sizeof(struct pl2303_buf), GFP_KERNEL);
- if (pb == NULL)
- return NULL;
-
- pb->buf_buf = kmalloc(size, GFP_KERNEL);
- if (pb->buf_buf == NULL) {
- kfree(pb);
- return NULL;
- }
-
- pb->buf_size = size;
- pb->buf_get = pb->buf_put = pb->buf_buf;
-
- return pb;
-}
-
-/*
- * pl2303_buf_free
- *
- * Free the buffer and all associated memory.
- */
-static void pl2303_buf_free(struct pl2303_buf *pb)
-{
- if (pb) {
- kfree(pb->buf_buf);
- kfree(pb);
- }
-}
-
-/*
- * pl2303_buf_clear
- *
- * Clear out all data in the circular buffer.
- */
-static void pl2303_buf_clear(struct pl2303_buf *pb)
-{
- if (pb != NULL)
- pb->buf_get = pb->buf_put;
- /* equivalent to a get of all data available */
-}
-
-/*
- * pl2303_buf_data_avail
- *
- * Return the number of bytes of data available in the circular
- * buffer.
- */
-static unsigned int pl2303_buf_data_avail(struct pl2303_buf *pb)
-{
- if (pb == NULL)
- return 0;
-
- return (pb->buf_size + pb->buf_put - pb->buf_get) % pb->buf_size;
-}
-
-/*
- * pl2303_buf_space_avail
- *
- * Return the number of bytes of space available in the circular
- * buffer.
- */
-static unsigned int pl2303_buf_space_avail(struct pl2303_buf *pb)
-{
- if (pb == NULL)
- return 0;
-
- return (pb->buf_size + pb->buf_get - pb->buf_put - 1) % pb->buf_size;
-}
-
-/*
- * pl2303_buf_put
- *
- * Copy data data from a user buffer and put it into the circular buffer.
- * Restrict to the amount of space available.
- *
- * Return the number of bytes copied.
- */
-static unsigned int pl2303_buf_put(struct pl2303_buf *pb, const char *buf,
- unsigned int count)
-{
- unsigned int len;
-
- if (pb == NULL)
- return 0;
-
- len = pl2303_buf_space_avail(pb);
- if (count > len)
- count = len;
-
- if (count == 0)
- return 0;
-
- len = pb->buf_buf + pb->buf_size - pb->buf_put;
- if (count > len) {
- memcpy(pb->buf_put, buf, len);
- memcpy(pb->buf_buf, buf+len, count - len);
- pb->buf_put = pb->buf_buf + count - len;
- } else {
- memcpy(pb->buf_put, buf, count);
- if (count < len)
- pb->buf_put += count;
- else /* count == len */
- pb->buf_put = pb->buf_buf;
- }
-
- return count;
-}
-
-/*
- * pl2303_buf_get
- *
- * Get data from the circular buffer and copy to the given buffer.
- * Restrict to the amount of data available.
- *
- * Return the number of bytes copied.
- */
-static unsigned int pl2303_buf_get(struct pl2303_buf *pb, char *buf,
- unsigned int count)
-{
- unsigned int len;
-
- if (pb == NULL)
- return 0;
-
- len = pl2303_buf_data_avail(pb);
- if (count > len)
- count = len;
-
- if (count == 0)
- return 0;
-
- len = pb->buf_buf + pb->buf_size - pb->buf_get;
- if (count > len) {
- memcpy(buf, pb->buf_get, len);
- memcpy(buf+len, pb->buf_buf, count - len);
- pb->buf_get = pb->buf_buf + count - len;
- } else {
- memcpy(buf, pb->buf_get, count);
- if (count < len)
- pb->buf_get += count;
- else /* count == len */
- pb->buf_get = pb->buf_buf;
- }
-
- return count;
-}
-
static int pl2303_vendor_read(__u16 value, __u16 index,
struct usb_serial *serial, unsigned char *buf)
{
@@ -372,11 +201,6 @@ static int pl2303_startup(struct usb_serial *serial)
if (!priv)
goto cleanup;
spin_lock_init(&priv->lock);
- priv->buf = pl2303_buf_alloc(PL2303_BUF_SIZE);
- if (priv->buf == NULL) {
- kfree(priv);
- goto cleanup;
- }
init_waitqueue_head(&priv->delta_msr_wait);
priv->type = type;
usb_set_serial_port_data(serial->port[i], priv);
@@ -404,7 +228,6 @@ cleanup:
kfree(buf);
for (--i; i >= 0; --i) {
priv = usb_get_serial_port_data(serial->port[i]);
- pl2303_buf_free(priv->buf);
kfree(priv);
usb_set_serial_port_data(serial->port[i], NULL);
}
@@ -422,102 +245,6 @@ static int set_control_lines(struct usb_device *dev, u8 value)
return retval;
}
-static void pl2303_send(struct usb_serial_port *port)
-{
- int count, result;
- struct pl2303_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
-
- dbg("%s - port %d", __func__, port->number);
-
- spin_lock_irqsave(&priv->lock, flags);
-
- if (priv->write_urb_in_use) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return;
- }
-
- count = pl2303_buf_get(priv->buf, port->write_urb->transfer_buffer,
- port->bulk_out_size);
-
- if (count == 0) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return;
- }
-
- priv->write_urb_in_use = 1;
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- usb_serial_debug_data(debug, &port->dev, __func__, count,
- port->write_urb->transfer_buffer);
-
- port->write_urb->transfer_buffer_length = count;
- result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
- if (result) {
- dev_err(&port->dev, "%s - failed submitting write urb,"
- " error %d\n", __func__, result);
- priv->write_urb_in_use = 0;
- /* TODO: reschedule pl2303_send */
- }
-
- usb_serial_port_softint(port);
-}
-
-static int pl2303_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count)
-{
- struct pl2303_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
-
- dbg("%s - port %d, %d bytes", __func__, port->number, count);
-
- if (!count)
- return count;
-
- spin_lock_irqsave(&priv->lock, flags);
- count = pl2303_buf_put(priv->buf, buf, count);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- pl2303_send(port);
-
- return count;
-}
-
-static int pl2303_write_room(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct pl2303_private *priv = usb_get_serial_port_data(port);
- int room = 0;
- unsigned long flags;
-
- dbg("%s - port %d", __func__, port->number);
-
- spin_lock_irqsave(&priv->lock, flags);
- room = pl2303_buf_space_avail(priv->buf);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- dbg("%s - returns %d", __func__, room);
- return room;
-}
-
-static int pl2303_chars_in_buffer(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct pl2303_private *priv = usb_get_serial_port_data(port);
- int chars = 0;
- unsigned long flags;
-
- dbg("%s - port %d", __func__, port->number);
-
- spin_lock_irqsave(&priv->lock, flags);
- chars = pl2303_buf_data_avail(priv->buf);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- dbg("%s - returns %d", __func__, chars);
- return chars;
-}
-
static void pl2303_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
@@ -729,22 +456,10 @@ static void pl2303_dtr_rts(struct usb_serial_port *port, int on)
static void pl2303_close(struct usb_serial_port *port)
{
- struct pl2303_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
-
dbg("%s - port %d", __func__, port->number);
- spin_lock_irqsave(&priv->lock, flags);
- /* clear out any remaining data in the buffer */
- pl2303_buf_clear(priv->buf);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* shutdown our urbs */
- dbg("%s - shutting down urbs", __func__);
- usb_kill_urb(port->write_urb);
- usb_kill_urb(port->read_urb);
+ usb_serial_generic_close(port);
usb_kill_urb(port->interrupt_in_urb);
-
}
static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
@@ -770,10 +485,8 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
pl2303_set_termios(tty, port, &tmp_termios);
dbg("%s - submitting read urb", __func__);
- result = usb_submit_urb(port->read_urb, GFP_KERNEL);
+ result = usb_serial_generic_submit_read_urb(port, GFP_KERNEL);
if (result) {
- dev_err(&port->dev, "%s - failed submitting read urb,"
- " error %d\n", __func__, result);
pl2303_close(port);
return -EPROTO;
}
@@ -953,10 +666,7 @@ static void pl2303_release(struct usb_serial *serial)
for (i = 0; i < serial->num_ports; ++i) {
priv = usb_get_serial_port_data(serial->port[i]);
- if (priv) {
- pl2303_buf_free(priv->buf);
- kfree(priv);
- }
+ kfree(priv);
}
}
@@ -1037,13 +747,31 @@ exit:
__func__, retval);
}
-static void pl2303_push_data(struct tty_struct *tty,
- struct usb_serial_port *port, struct urb *urb,
- u8 line_status)
+static void pl2303_process_read_urb(struct urb *urb)
{
+ struct usb_serial_port *port = urb->context;
+ struct pl2303_private *priv = usb_get_serial_port_data(port);
+ struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
- /* get tty_flag from status */
char tty_flag = TTY_NORMAL;
+ unsigned long flags;
+ u8 line_status;
+ int i;
+
+ /* update line status */
+ spin_lock_irqsave(&priv->lock, flags);
+ line_status = priv->line_status;
+ priv->line_status &= ~UART_STATE_TRANSIENT_MASK;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ wake_up_interruptible(&priv->delta_msr_wait);
+
+ if (!urb->actual_length)
+ return;
+
+ tty = tty_port_tty_get(&port->port);
+ if (!tty)
+ return;
+
/* break takes precedence over parity, */
/* which takes precedence over framing errors */
if (line_status & UART_BREAK_ERROR)
@@ -1058,107 +786,17 @@ static void pl2303_push_data(struct tty_struct *tty,
if (line_status & UART_OVERRUN_ERROR)
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
- if (tty_flag == TTY_NORMAL && !(port->console && port->sysrq))
- tty_insert_flip_string(tty, data, urb->actual_length);
- else {
- int i;
+ if (port->port.console && port->sysrq) {
for (i = 0; i < urb->actual_length; ++i)
if (!usb_serial_handle_sysrq_char(tty, port, data[i]))
tty_insert_flip_char(tty, data[i], tty_flag);
+ } else {
+ tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
+ urb->actual_length);
}
- tty_flip_buffer_push(tty);
-}
-
-static void pl2303_read_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- struct pl2303_private *priv = usb_get_serial_port_data(port);
- struct tty_struct *tty;
- unsigned long flags;
- int result;
- int status = urb->status;
- u8 line_status;
-
- dbg("%s - port %d", __func__, port->number);
-
- if (status) {
- dbg("%s - urb status = %d", __func__, status);
- if (status == -EPROTO) {
- /* PL2303 mysteriously fails with -EPROTO reschedule
- * the read */
- dbg("%s - caught -EPROTO, resubmitting the urb",
- __func__);
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result)
- dev_err(&urb->dev->dev, "%s - failed"
- " resubmitting read urb, error %d\n",
- __func__, result);
- return;
- }
- dbg("%s - unable to handle the error, exiting.", __func__);
- return;
- }
-
- usb_serial_debug_data(debug, &port->dev, __func__,
- urb->actual_length, urb->transfer_buffer);
-
- spin_lock_irqsave(&priv->lock, flags);
- line_status = priv->line_status;
- priv->line_status &= ~UART_STATE_TRANSIENT_MASK;
- spin_unlock_irqrestore(&priv->lock, flags);
- wake_up_interruptible(&priv->delta_msr_wait);
- tty = tty_port_tty_get(&port->port);
- if (tty && urb->actual_length) {
- pl2303_push_data(tty, port, urb, line_status);
- }
+ tty_flip_buffer_push(tty);
tty_kref_put(tty);
- /* Schedule the next read _if_ we are still open */
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result && result != -EPERM)
- dev_err(&urb->dev->dev, "%s - failed resubmitting"
- " read urb, error %d\n", __func__, result);
-}
-
-static void pl2303_write_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- struct pl2303_private *priv = usb_get_serial_port_data(port);
- int result;
- int status = urb->status;
-
- dbg("%s - port %d", __func__, port->number);
-
- switch (status) {
- case 0:
- /* success */
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* this urb is terminated, clean up */
- dbg("%s - urb shutting down with status: %d", __func__,
- status);
- priv->write_urb_in_use = 0;
- return;
- default:
- /* error in the urb, so we have to resubmit it */
- dbg("%s - Overflow in write", __func__);
- dbg("%s - nonzero write bulk status received: %d", __func__,
- status);
- port->write_urb->transfer_buffer_length = 1;
- result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
- if (result)
- dev_err(&urb->dev->dev, "%s - failed resubmitting write"
- " urb, error %d\n", __func__, result);
- else
- return;
- }
-
- priv->write_urb_in_use = 0;
-
- /* send any buffered data */
- pl2303_send(port);
}
/* All of the device info needed for the PL2303 SIO serial converter */
@@ -1170,21 +808,19 @@ static struct usb_serial_driver pl2303_device = {
.id_table = id_table,
.usb_driver = &pl2303_driver,
.num_ports = 1,
+ .bulk_in_size = 256,
+ .bulk_out_size = 256,
.open = pl2303_open,
.close = pl2303_close,
.dtr_rts = pl2303_dtr_rts,
.carrier_raised = pl2303_carrier_raised,
- .write = pl2303_write,
.ioctl = pl2303_ioctl,
.break_ctl = pl2303_break_ctl,
.set_termios = pl2303_set_termios,
.tiocmget = pl2303_tiocmget,
.tiocmset = pl2303_tiocmset,
- .read_bulk_callback = pl2303_read_bulk_callback,
+ .process_read_urb = pl2303_process_read_urb,
.read_int_callback = pl2303_read_int_callback,
- .write_bulk_callback = pl2303_write_bulk_callback,
- .write_room = pl2303_write_room,
- .chars_in_buffer = pl2303_chars_in_buffer,
.attach = pl2303_startup,
.release = pl2303_release,
};
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 23c09b38b9e..a871645389d 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -5,7 +5,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
+ *
*/
#define BENQ_VENDOR_ID 0x04a5
@@ -137,5 +137,5 @@
#define SANWA_PRODUCT_ID 0x0001
/* ADLINK ND-6530 RS232,RS485 and RS422 adapter */
-#define ADLINK_VENDOR_ID 0x0b63
-#define ADLINK_ND6530_PRODUCT_ID 0x6530
+#define ADLINK_VENDOR_ID 0x0b63
+#define ADLINK_ND6530_PRODUCT_ID 0x6530
diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
index 7e3bea23600..214a3e50429 100644
--- a/drivers/usb/serial/qcaux.c
+++ b/drivers/usb/serial/qcaux.c
@@ -50,6 +50,10 @@
#define SANYO_VENDOR_ID 0x0474
#define SANYO_PRODUCT_KATANA_LX 0x0754 /* SCP-3800 (Katana LX) */
+/* Samsung devices */
+#define SAMSUNG_VENDOR_ID 0x04e8
+#define SAMSUNG_PRODUCT_U520 0x6640 /* SCH-U520 */
+
static struct usb_device_id id_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5740, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5750, 0xff, 0x00, 0x00) },
@@ -61,6 +65,7 @@ static struct usb_device_id id_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDX650, 0xff, 0xff, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_U520, 0xff, 0x00, 0x00) },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 53a2d5a935a..04bb759536b 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -15,6 +15,8 @@
#include <linux/tty_flip.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
+#include <linux/slab.h>
+#include "usb-wwan.h"
#define DRIVER_AUTHOR "Qualcomm Inc"
#define DRIVER_DESC "Qualcomm USB Serial driver"
@@ -76,6 +78,8 @@ static const struct usb_device_id id_table[] = {
{USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */
{USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
+ {USB_DEVICE(0x05c6, 0x9204)}, /* Gobi 2000 QDL device */
+ {USB_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
@@ -92,6 +96,8 @@ static struct usb_driver qcdriver = {
static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
{
+ struct usb_wwan_intf_private *data;
+ struct usb_host_interface *intf = serial->interface->cur_altsetting;
int retval = -ENODEV;
__u8 nintf;
__u8 ifnum;
@@ -100,33 +106,45 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
nintf = serial->dev->actconfig->desc.bNumInterfaces;
dbg("Num Interfaces = %d", nintf);
- ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
+ ifnum = intf->desc.bInterfaceNumber;
dbg("This Interface = %d", ifnum);
+ data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ spin_lock_init(&data->susp_lock);
+
switch (nintf) {
case 1:
/* QDL mode */
- if (serial->interface->num_altsetting == 2) {
- struct usb_host_interface *intf;
-
+ /* Gobi 2000 has a single altsetting, older ones have two */
+ if (serial->interface->num_altsetting == 2)
intf = &serial->interface->altsetting[1];
- if (intf->desc.bNumEndpoints == 2) {
- if (usb_endpoint_is_bulk_in(&intf->endpoint[0].desc) &&
- usb_endpoint_is_bulk_out(&intf->endpoint[1].desc)) {
- dbg("QDL port found");
- retval = usb_set_interface(serial->dev, ifnum, 1);
- if (retval < 0) {
- dev_err(&serial->dev->dev,
- "Could not set interface, error %d\n",
- retval);
- retval = -ENODEV;
- }
- return retval;
- }
+ else if (serial->interface->num_altsetting > 2)
+ break;
+
+ if (intf->desc.bNumEndpoints == 2 &&
+ usb_endpoint_is_bulk_in(&intf->endpoint[0].desc) &&
+ usb_endpoint_is_bulk_out(&intf->endpoint[1].desc)) {
+ dbg("QDL port found");
+
+ if (serial->interface->num_altsetting == 1)
+ return 0;
+
+ retval = usb_set_interface(serial->dev, ifnum, 1);
+ if (retval < 0) {
+ dev_err(&serial->dev->dev,
+ "Could not set interface, error %d\n",
+ retval);
+ retval = -ENODEV;
}
+ return retval;
}
break;
+ case 3:
case 4:
/* Composite mode */
if (ifnum == 2) {
@@ -161,6 +179,18 @@ static struct usb_serial_driver qcdevice = {
.usb_driver = &qcdriver,
.num_ports = 1,
.probe = qcprobe,
+ .open = usb_wwan_open,
+ .close = usb_wwan_close,
+ .write = usb_wwan_write,
+ .write_room = usb_wwan_write_room,
+ .chars_in_buffer = usb_wwan_chars_in_buffer,
+ .attach = usb_wwan_startup,
+ .disconnect = usb_wwan_disconnect,
+ .release = usb_wwan_release,
+#ifdef CONFIG_PM
+ .suspend = usb_wwan_suspend,
+ .resume = usb_wwan_resume,
+#endif
};
static int __init qcinit(void)
diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c
index 43a0cadd578..a36e2313eed 100644
--- a/drivers/usb/serial/safe_serial.c
+++ b/drivers/usb/serial/safe_serial.c
@@ -1,6 +1,7 @@
/*
* Safe Encapsulated USB Serial Driver
*
+ * Copyright (C) 2010 Johan Hovold <jhovold@gmail.com>
* Copyright (C) 2001 Lineo
* Copyright (C) 2001 Hewlett-Packard
*
@@ -84,8 +85,8 @@ static int debug;
static int safe = 1;
static int padded = CONFIG_USB_SERIAL_SAFE_PADDED;
-#define DRIVER_VERSION "v0.0b"
-#define DRIVER_AUTHOR "sl@lineo.com, tbr@lineo.com"
+#define DRIVER_VERSION "v0.1"
+#define DRIVER_AUTHOR "sl@lineo.com, tbr@lineo.com, Johan Hovold <jhovold@gmail.com>"
#define DRIVER_DESC "USB Safe Encapsulated Serial"
MODULE_AUTHOR(DRIVER_AUTHOR);
@@ -212,191 +213,80 @@ static __u16 __inline__ fcs_compute10(unsigned char *sp, int len, __u16 fcs)
return fcs;
}
-static void safe_read_bulk_callback(struct urb *urb)
+static void safe_process_read_urb(struct urb *urb)
{
- struct usb_serial_port *port = urb->context;
+ struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
unsigned char length = urb->actual_length;
+ int actual_length;
struct tty_struct *tty;
- int result;
- int status = urb->status;
+ __u16 fcs;
- dbg("%s", __func__);
-
- if (status) {
- dbg("%s - nonzero read bulk status received: %d",
- __func__, status);
+ if (!length)
return;
- }
- dbg("safe_read_bulk_callback length: %d",
- port->read_urb->actual_length);
-#ifdef ECHO_RCV
- {
- int i;
- unsigned char *cp = port->read_urb->transfer_buffer;
- for (i = 0; i < port->read_urb->actual_length; i++) {
- if ((i % 32) == 0)
- printk("\nru[%02x] ", i);
- printk("%02x ", *cp++);
- }
- printk("\n");
- }
-#endif
tty = tty_port_tty_get(&port->port);
- if (safe) {
- __u16 fcs;
- fcs = fcs_compute10(data, length, CRC10_INITFCS);
- if (!fcs) {
- int actual_length = data[length - 2] >> 2;
- if (actual_length <= (length - 2)) {
- dev_info(&urb->dev->dev, "%s - actual: %d\n",
- __func__, actual_length);
- tty_insert_flip_string(tty,
- data, actual_length);
- tty_flip_buffer_push(tty);
- } else {
- dev_err(&port->dev,
- "%s - inconsistent lengths %d:%d\n",
- __func__, actual_length, length);
- }
- } else {
- dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs);
- }
- } else {
- tty_insert_flip_string(tty, data, length);
- tty_flip_buffer_push(tty);
- }
- tty_kref_put(tty);
-
- /* Continue trying to always read */
- usb_fill_bulk_urb(urb, port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- urb->transfer_buffer, urb->transfer_buffer_length,
- safe_read_bulk_callback, port);
-
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result)
- dev_err(&port->dev,
- "%s - failed resubmitting read urb, error %d\n",
- __func__, result);
- /* FIXME: Need a mechanism to retry later if this happens */
-}
-
-static int safe_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count)
-{
- unsigned char *data;
- int result;
- int i;
- int packet_length;
-
- dbg("safe_write port: %p %d urb: %p count: %d",
- port, port->number, port->write_urb, count);
-
- if (!port->write_urb) {
- dbg("%s - write urb NULL", __func__);
- return 0;
- }
-
- dbg("safe_write write_urb: %d transfer_buffer_length",
- port->write_urb->transfer_buffer_length);
-
- if (!port->write_urb->transfer_buffer_length) {
- dbg("%s - write urb transfer_buffer_length zero", __func__);
- return 0;
- }
- if (count == 0) {
- dbg("%s - write request of 0 bytes", __func__);
- return 0;
- }
- spin_lock_bh(&port->lock);
- if (port->write_urb_busy) {
- spin_unlock_bh(&port->lock);
- dbg("%s - already writing", __func__);
- return 0;
- }
- port->write_urb_busy = 1;
- spin_unlock_bh(&port->lock);
-
- packet_length = port->bulk_out_size; /* get max packetsize */
-
- i = packet_length - (safe ? 2 : 0); /* get bytes to send */
- count = (count > i) ? i : count;
-
-
- /* get the data into the transfer buffer */
- data = port->write_urb->transfer_buffer;
- memset(data, '0', packet_length);
-
- memcpy(data, buf, count);
-
- if (safe) {
- __u16 fcs;
-
- /* pad if necessary */
- if (!padded)
- packet_length = count + 2;
- /* set count */
- data[packet_length - 2] = count << 2;
- data[packet_length - 1] = 0;
+ if (!tty)
+ return;
- /* compute fcs and insert into trailer */
- fcs = fcs_compute10(data, packet_length, CRC10_INITFCS);
- data[packet_length - 2] |= fcs >> 8;
- data[packet_length - 1] |= fcs & 0xff;
+ if (!safe)
+ goto out;
- /* set length to send */
- port->write_urb->transfer_buffer_length = packet_length;
- } else {
- port->write_urb->transfer_buffer_length = count;
+ fcs = fcs_compute10(data, length, CRC10_INITFCS);
+ if (fcs) {
+ dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs);
+ goto err;
}
- usb_serial_debug_data(debug, &port->dev, __func__, count,
- port->write_urb->transfer_buffer);
-#ifdef ECHO_TX
- {
- int i;
- unsigned char *cp = port->write_urb->transfer_buffer;
- for (i = 0; i < port->write_urb->transfer_buffer_length; i++) {
- if ((i % 32) == 0)
- printk("\nsu[%02x] ", i);
- printk("%02x ", *cp++);
- }
- printk("\n");
- }
-#endif
- port->write_urb->dev = port->serial->dev;
- result = usb_submit_urb(port->write_urb, GFP_KERNEL);
- if (result) {
- port->write_urb_busy = 0;
- dev_err(&port->dev,
- "%s - failed submitting write urb, error %d\n",
- __func__, result);
- return 0;
+ actual_length = data[length - 2] >> 2;
+ if (actual_length > (length - 2)) {
+ dev_err(&port->dev, "%s - inconsistent lengths %d:%d\n",
+ __func__, actual_length, length);
+ goto err;
}
- dbg("%s urb: %p submitted", __func__, port->write_urb);
-
- return count;
+ dev_info(&urb->dev->dev, "%s - actual: %d\n", __func__, actual_length);
+ length = actual_length;
+out:
+ tty_insert_flip_string(tty, data, length);
+ tty_flip_buffer_push(tty);
+err:
+ tty_kref_put(tty);
}
-static int safe_write_room(struct tty_struct *tty)
+static int safe_prepare_write_buffer(struct usb_serial_port *port,
+ void *dest, size_t size)
{
- struct usb_serial_port *port = tty->driver_data;
- int room = 0; /* Default: no room */
- unsigned long flags;
+ unsigned char *buf = dest;
+ int count;
+ int trailer_len;
+ int pkt_len;
+ __u16 fcs;
+
+ trailer_len = safe ? 2 : 0;
+
+ count = kfifo_out_locked(&port->write_fifo, buf, size - trailer_len,
+ &port->lock);
+ if (!safe)
+ return count;
+
+ /* pad if necessary */
+ if (padded) {
+ pkt_len = size;
+ memset(buf + count, '0', pkt_len - count - trailer_len);
+ } else {
+ pkt_len = count + trailer_len;
+ }
- dbg("%s", __func__);
+ /* set count */
+ buf[pkt_len - 2] = count << 2;
+ buf[pkt_len - 1] = 0;
- spin_lock_irqsave(&port->lock, flags);
- if (port->write_urb_busy)
- room = port->bulk_out_size - (safe ? 2 : 0);
- spin_unlock_irqrestore(&port->lock, flags);
+ /* compute fcs and insert into trailer */
+ fcs = fcs_compute10(buf, pkt_len, CRC10_INITFCS);
+ buf[pkt_len - 2] |= fcs >> 8;
+ buf[pkt_len - 1] |= fcs & 0xff;
- if (room)
- dbg("safe_write_room returns %d", room);
- return room;
+ return pkt_len;
}
static int safe_startup(struct usb_serial *serial)
@@ -421,9 +311,8 @@ static struct usb_serial_driver safe_device = {
.id_table = id_table,
.usb_driver = &safe_driver,
.num_ports = 1,
- .write = safe_write,
- .write_room = safe_write_room,
- .read_bulk_callback = safe_read_bulk_callback,
+ .process_read_urb = safe_process_read_urb,
+ .prepare_write_buffer = safe_prepare_write_buffer,
.attach = safe_startup,
};
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 5d39191e724..329d311a35d 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -1,6 +1,7 @@
/*
* spcp8x5 USB to serial adaptor driver
*
+ * Copyright (C) 2010 Johan Hovold (jhovold@gmail.com)
* Copyright (C) 2006 Linxb (xubin.lin@worldplus.com.cn)
* Copyright (C) 2006 S1 Corp.
*
@@ -29,7 +30,7 @@
/* Version Information */
-#define DRIVER_VERSION "v0.04"
+#define DRIVER_VERSION "v0.10"
#define DRIVER_DESC "SPCP8x5 USB to serial adaptor driver"
static int debug;
@@ -64,11 +65,6 @@ struct spcp8x5_usb_ctrl_arg {
u16 length;
};
-/* wait 30s before close */
-#define SPCP8x5_CLOSING_WAIT (30*HZ)
-
-#define SPCP8x5_BUF_SIZE 1024
-
/* spcp8x5 spec register define */
#define MCR_CONTROL_LINE_RTS 0x02
@@ -155,133 +151,6 @@ enum spcp8x5_type {
SPCP835_TYPE,
};
-/* 1st in 1st out buffer 4 driver */
-struct ringbuf {
- unsigned int buf_size;
- char *buf_buf;
- char *buf_get;
- char *buf_put;
-};
-
-/* alloc the ring buf and alloc the buffer itself */
-static inline struct ringbuf *alloc_ringbuf(unsigned int size)
-{
- struct ringbuf *pb;
-
- if (size == 0)
- return NULL;
-
- pb = kmalloc(sizeof(*pb), GFP_KERNEL);
- if (pb == NULL)
- return NULL;
-
- pb->buf_buf = kmalloc(size, GFP_KERNEL);
- if (pb->buf_buf == NULL) {
- kfree(pb);
- return NULL;
- }
-
- pb->buf_size = size;
- pb->buf_get = pb->buf_put = pb->buf_buf;
-
- return pb;
-}
-
-/* free the ring buf and the buffer itself */
-static inline void free_ringbuf(struct ringbuf *pb)
-{
- if (pb != NULL) {
- kfree(pb->buf_buf);
- kfree(pb);
- }
-}
-
-/* clear pipo , juest repoint the pointer here */
-static inline void clear_ringbuf(struct ringbuf *pb)
-{
- if (pb != NULL)
- pb->buf_get = pb->buf_put;
-}
-
-/* get the number of data in the pipo */
-static inline unsigned int ringbuf_avail_data(struct ringbuf *pb)
-{
- if (pb == NULL)
- return 0;
- return (pb->buf_size + pb->buf_put - pb->buf_get) % pb->buf_size;
-}
-
-/* get the number of space in the pipo */
-static inline unsigned int ringbuf_avail_space(struct ringbuf *pb)
-{
- if (pb == NULL)
- return 0;
- return (pb->buf_size + pb->buf_get - pb->buf_put - 1) % pb->buf_size;
-}
-
-/* put count data into pipo */
-static unsigned int put_ringbuf(struct ringbuf *pb, const char *buf,
- unsigned int count)
-{
- unsigned int len;
-
- if (pb == NULL)
- return 0;
-
- len = ringbuf_avail_space(pb);
- if (count > len)
- count = len;
-
- if (count == 0)
- return 0;
-
- len = pb->buf_buf + pb->buf_size - pb->buf_put;
- if (count > len) {
- memcpy(pb->buf_put, buf, len);
- memcpy(pb->buf_buf, buf+len, count - len);
- pb->buf_put = pb->buf_buf + count - len;
- } else {
- memcpy(pb->buf_put, buf, count);
- if (count < len)
- pb->buf_put += count;
- else /* count == len */
- pb->buf_put = pb->buf_buf;
- }
- return count;
-}
-
-/* get count data from pipo */
-static unsigned int get_ringbuf(struct ringbuf *pb, char *buf,
- unsigned int count)
-{
- unsigned int len;
-
- if (pb == NULL || buf == NULL)
- return 0;
-
- len = ringbuf_avail_data(pb);
- if (count > len)
- count = len;
-
- if (count == 0)
- return 0;
-
- len = pb->buf_buf + pb->buf_size - pb->buf_get;
- if (count > len) {
- memcpy(buf, pb->buf_get, len);
- memcpy(buf+len, pb->buf_buf, count - len);
- pb->buf_get = pb->buf_buf + count - len;
- } else {
- memcpy(buf, pb->buf_get, count);
- if (count < len)
- pb->buf_get += count;
- else /* count == len */
- pb->buf_get = pb->buf_buf;
- }
-
- return count;
-}
-
static struct usb_driver spcp8x5_driver = {
.name = "spcp8x5",
.probe = usb_serial_probe,
@@ -293,8 +162,6 @@ static struct usb_driver spcp8x5_driver = {
struct spcp8x5_private {
spinlock_t lock;
- struct ringbuf *buf;
- int write_urb_in_use;
enum spcp8x5_type type;
wait_queue_head_t delta_msr_wait;
u8 line_control;
@@ -330,24 +197,15 @@ static int spcp8x5_startup(struct usb_serial *serial)
goto cleanup;
spin_lock_init(&priv->lock);
- priv->buf = alloc_ringbuf(SPCP8x5_BUF_SIZE);
- if (priv->buf == NULL)
- goto cleanup2;
-
init_waitqueue_head(&priv->delta_msr_wait);
priv->type = type;
usb_set_serial_port_data(serial->port[i] , priv);
-
}
return 0;
-
-cleanup2:
- kfree(priv);
cleanup:
for (--i; i >= 0; --i) {
priv = usb_get_serial_port_data(serial->port[i]);
- free_ringbuf(priv->buf);
kfree(priv);
usb_set_serial_port_data(serial->port[i] , NULL);
}
@@ -358,15 +216,9 @@ cleanup:
static void spcp8x5_release(struct usb_serial *serial)
{
int i;
- struct spcp8x5_private *priv;
- for (i = 0; i < serial->num_ports; i++) {
- priv = usb_get_serial_port_data(serial->port[i]);
- if (priv) {
- free_ringbuf(priv->buf);
- kfree(priv);
- }
- }
+ for (i = 0; i < serial->num_ports; i++)
+ kfree(usb_get_serial_port_data(serial->port[i]));
}
/* set the modem control line of the device.
@@ -470,33 +322,6 @@ static void spcp8x5_dtr_rts(struct usb_serial_port *port, int on)
spcp8x5_set_ctrlLine(port->serial->dev, control , priv->type);
}
-/* close the serial port. We should wait for data sending to device 1st and
- * then kill all urb. */
-static void spcp8x5_close(struct usb_serial_port *port)
-{
- struct spcp8x5_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
- int result;
-
- dbg("%s - port %d", __func__, port->number);
-
- spin_lock_irqsave(&priv->lock, flags);
- /* clear out any remaining data in the buffer */
- clear_ringbuf(priv->buf);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* kill urb */
- if (port->write_urb != NULL) {
- result = usb_unlink_urb(port->write_urb);
- if (result)
- dev_dbg(&port->dev,
- "usb_unlink_urb(write_urb) = %d\n", result);
- }
- result = usb_unlink_urb(port->read_urb);
- if (result)
- dev_dbg(&port->dev, "usb_unlink_urb(read_urb) = %d\n", result);
-}
-
static void spcp8x5_init_termios(struct tty_struct *tty)
{
/* for the 1st time call this function */
@@ -620,7 +445,7 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
}
/* open the serial port. do some usb system call. set termios and get the line
- * status of the device. then submit the read urb */
+ * status of the device. */
static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct ktermios tmp_termios;
@@ -655,52 +480,21 @@ static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port)
priv->line_status = status & 0xf0 ;
spin_unlock_irqrestore(&priv->lock, flags);
- dbg("%s - submitting read urb", __func__);
- port->read_urb->dev = serial->dev;
- ret = usb_submit_urb(port->read_urb, GFP_KERNEL);
- if (ret) {
- spcp8x5_close(port);
- return -EPROTO;
- }
port->port.drain_delay = 256;
- return 0;
+
+ return usb_serial_generic_open(tty, port);
}
-/* bulk read call back function. check the status of the urb. if transfer
- * failed return. then update the status and the tty send data to tty subsys.
- * submit urb again.
- */
-static void spcp8x5_read_bulk_callback(struct urb *urb)
+static void spcp8x5_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct spcp8x5_private *priv = usb_get_serial_port_data(port);
struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
unsigned long flags;
- int result = urb->status;
u8 status;
char tty_flag;
- dev_dbg(&port->dev, "start, result = %d, urb->actual_length = %d\n,",
- result, urb->actual_length);
-
- /* check the urb status */
- if (result) {
- if (result == -EPROTO) {
- /* spcp8x5 mysteriously fails with -EPROTO */
- /* reschedule the read */
- urb->dev = port->serial->dev;
- result = usb_submit_urb(urb , GFP_ATOMIC);
- if (result)
- dev_dbg(&port->dev,
- "failed submitting read urb %d\n",
- result);
- return;
- }
- dev_dbg(&port->dev, "unable to handle the error, exiting.\n");
- return;
- }
-
/* get tty_flag from status */
tty_flag = TTY_NORMAL;
@@ -711,141 +505,33 @@ static void spcp8x5_read_bulk_callback(struct urb *urb)
/* wake up the wait for termios */
wake_up_interruptible(&priv->delta_msr_wait);
- /* break takes precedence over parity, which takes precedence over
- * framing errors */
- if (status & UART_BREAK_ERROR)
- tty_flag = TTY_BREAK;
- else if (status & UART_PARITY_ERROR)
- tty_flag = TTY_PARITY;
- else if (status & UART_FRAME_ERROR)
- tty_flag = TTY_FRAME;
- dev_dbg(&port->dev, "tty_flag = %d\n", tty_flag);
-
- tty = tty_port_tty_get(&port->port);
- if (tty && urb->actual_length) {
- /* overrun is special, not associated with a char */
- if (status & UART_OVERRUN_ERROR)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
- tty_insert_flip_string_fixed_flag(tty, data,
- urb->actual_length, tty_flag);
- tty_flip_buffer_push(tty);
- }
- tty_kref_put(tty);
-
- /* Schedule the next read */
- urb->dev = port->serial->dev;
- result = usb_submit_urb(urb , GFP_ATOMIC);
- if (result)
- dev_dbg(&port->dev, "failed submitting read urb %d\n", result);
-}
-
-/* get data from ring buffer and then write to usb bus */
-static void spcp8x5_send(struct usb_serial_port *port)
-{
- int count, result;
- struct spcp8x5_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- if (priv->write_urb_in_use) {
- dev_dbg(&port->dev, "write urb still used\n");
- spin_unlock_irqrestore(&priv->lock, flags);
+ if (!urb->actual_length)
return;
- }
-
- /* send the 1st urb for writting */
- memset(port->write_urb->transfer_buffer , 0x00 , port->bulk_out_size);
- count = get_ringbuf(priv->buf, port->write_urb->transfer_buffer,
- port->bulk_out_size);
- if (count == 0) {
- spin_unlock_irqrestore(&priv->lock, flags);
+ tty = tty_port_tty_get(&port->port);
+ if (!tty)
return;
- }
-
- /* update the urb status */
- priv->write_urb_in_use = 1;
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- port->write_urb->transfer_buffer_length = count;
- port->write_urb->dev = port->serial->dev;
-
- result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
- if (result) {
- dev_dbg(&port->dev, "failed submitting write urb, error %d\n",
- result);
- priv->write_urb_in_use = 0;
- /* TODO: reschedule spcp8x5_send */
- }
-
-
- schedule_work(&port->work);
-}
-/* this is the call back function for write urb. NOTE we should not sleep in
- * this routine. check the urb return code and then submit the write urb again
- * to hold the write loop */
-static void spcp8x5_write_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- struct spcp8x5_private *priv = usb_get_serial_port_data(port);
- int result;
- int status = urb->status;
+ if (status & UART_STATE_TRANSIENT_MASK) {
+ /* break takes precedence over parity, which takes precedence
+ * over framing errors */
+ if (status & UART_BREAK_ERROR)
+ tty_flag = TTY_BREAK;
+ else if (status & UART_PARITY_ERROR)
+ tty_flag = TTY_PARITY;
+ else if (status & UART_FRAME_ERROR)
+ tty_flag = TTY_FRAME;
+ dev_dbg(&port->dev, "tty_flag = %d\n", tty_flag);
- switch (status) {
- case 0:
- /* success */
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* this urb is terminated, clean up */
- dev_dbg(&port->dev, "urb shutting down with status: %d\n",
- status);
- priv->write_urb_in_use = 0;
- return;
- default:
- /* error in the urb, so we have to resubmit it */
- dbg("%s - Overflow in write", __func__);
- dbg("%s - nonzero write bulk status received: %d",
- __func__, status);
- port->write_urb->transfer_buffer_length = 1;
- port->write_urb->dev = port->serial->dev;
- result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
- if (result)
- dev_dbg(&port->dev,
- "failed resubmitting write urb %d\n", result);
- else
- return;
+ /* overrun is special, not associated with a char */
+ if (status & UART_OVERRUN_ERROR)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
}
- priv->write_urb_in_use = 0;
-
- /* send any buffered data */
- spcp8x5_send(port);
-}
-
-/* write data to ring buffer. and then start the write transfer */
-static int spcp8x5_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count)
-{
- struct spcp8x5_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
-
- dev_dbg(&port->dev, "%d bytes\n", count);
-
- if (!count)
- return count;
-
- spin_lock_irqsave(&priv->lock, flags);
- count = put_ringbuf(priv->buf, buf, count);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- spcp8x5_send(port);
-
- return count;
+ tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
+ urb->actual_length);
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
}
static int spcp8x5_wait_modem_info(struct usb_serial_port *port,
@@ -953,36 +639,6 @@ static int spcp8x5_tiocmget(struct tty_struct *tty, struct file *file)
return result;
}
-/* get the avail space room in ring buffer */
-static int spcp8x5_write_room(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct spcp8x5_private *priv = usb_get_serial_port_data(port);
- int room = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- room = ringbuf_avail_space(priv->buf);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return room;
-}
-
-/* get the number of avail data in write ring buffer */
-static int spcp8x5_chars_in_buffer(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct spcp8x5_private *priv = usb_get_serial_port_data(port);
- int chars = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- chars = ringbuf_avail_data(priv->buf);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return chars;
-}
-
/* All of the device info needed for the spcp8x5 SIO serial converter */
static struct usb_serial_driver spcp8x5_device = {
.driver = {
@@ -992,21 +648,16 @@ static struct usb_serial_driver spcp8x5_device = {
.id_table = id_table,
.num_ports = 1,
.open = spcp8x5_open,
- .close = spcp8x5_close,
.dtr_rts = spcp8x5_dtr_rts,
.carrier_raised = spcp8x5_carrier_raised,
- .write = spcp8x5_write,
.set_termios = spcp8x5_set_termios,
.init_termios = spcp8x5_init_termios,
.ioctl = spcp8x5_ioctl,
.tiocmget = spcp8x5_tiocmget,
.tiocmset = spcp8x5_tiocmset,
- .write_room = spcp8x5_write_room,
- .read_bulk_callback = spcp8x5_read_bulk_callback,
- .write_bulk_callback = spcp8x5_write_bulk_callback,
- .chars_in_buffer = spcp8x5_chars_in_buffer,
.attach = spcp8x5_startup,
.release = spcp8x5_release,
+ .process_read_urb = spcp8x5_process_read_urb,
};
static int __init spcp8x5_init(void)
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index e1bfda33f5b..90979a1f531 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -30,7 +30,7 @@
#include <linux/spinlock.h>
#include <linux/ioctl.h>
#include <linux/serial.h>
-#include <linux/circ_buf.h>
+#include <linux/kfifo.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
@@ -40,7 +40,7 @@
/* Defines */
-#define TI_DRIVER_VERSION "v0.9"
+#define TI_DRIVER_VERSION "v0.10"
#define TI_DRIVER_AUTHOR "Al Borchers <alborchers@steinerpoint.com>"
#define TI_DRIVER_DESC "TI USB 3410/5052 Serial Driver"
@@ -82,7 +82,7 @@ struct ti_port {
spinlock_t tp_lock;
int tp_read_urb_state;
int tp_write_urb_in_use;
- struct circ_buf *tp_write_buf;
+ struct kfifo write_fifo;
};
struct ti_device {
@@ -144,15 +144,6 @@ static int ti_write_byte(struct ti_device *tdev, unsigned long addr,
static int ti_download_firmware(struct ti_device *tdev);
-/* circular buffer */
-static struct circ_buf *ti_buf_alloc(void);
-static void ti_buf_free(struct circ_buf *cb);
-static void ti_buf_clear(struct circ_buf *cb);
-static int ti_buf_data_avail(struct circ_buf *cb);
-static int ti_buf_space_avail(struct circ_buf *cb);
-static int ti_buf_put(struct circ_buf *cb, const char *buf, int count);
-static int ti_buf_get(struct circ_buf *cb, char *buf, int count);
-
/* Data */
@@ -450,8 +441,8 @@ static int ti_startup(struct usb_serial *serial)
tport->tp_closing_wait = closing_wait;
init_waitqueue_head(&tport->tp_msr_wait);
init_waitqueue_head(&tport->tp_write_wait);
- tport->tp_write_buf = ti_buf_alloc();
- if (tport->tp_write_buf == NULL) {
+ if (kfifo_alloc(&tport->write_fifo, TI_WRITE_BUF_SIZE,
+ GFP_KERNEL)) {
dev_err(&dev->dev, "%s - out of memory\n", __func__);
kfree(tport);
status = -ENOMEM;
@@ -468,7 +459,7 @@ static int ti_startup(struct usb_serial *serial)
free_tports:
for (--i; i >= 0; --i) {
tport = usb_get_serial_port_data(serial->port[i]);
- ti_buf_free(tport->tp_write_buf);
+ kfifo_free(&tport->write_fifo);
kfree(tport);
usb_set_serial_port_data(serial->port[i], NULL);
}
@@ -490,7 +481,7 @@ static void ti_release(struct usb_serial *serial)
for (i = 0; i < serial->num_ports; ++i) {
tport = usb_get_serial_port_data(serial->port[i]);
if (tport) {
- ti_buf_free(tport->tp_write_buf);
+ kfifo_free(&tport->write_fifo);
kfree(tport);
}
}
@@ -701,7 +692,6 @@ static int ti_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *data, int count)
{
struct ti_port *tport = usb_get_serial_port_data(port);
- unsigned long flags;
dbg("%s - port %d", __func__, port->number);
@@ -713,10 +703,8 @@ static int ti_write(struct tty_struct *tty, struct usb_serial_port *port,
if (tport == NULL || !tport->tp_is_open)
return -ENODEV;
- spin_lock_irqsave(&tport->tp_lock, flags);
- count = ti_buf_put(tport->tp_write_buf, data, count);
- spin_unlock_irqrestore(&tport->tp_lock, flags);
-
+ count = kfifo_in_locked(&tport->write_fifo, data, count,
+ &tport->tp_lock);
ti_send(tport);
return count;
@@ -736,7 +724,7 @@ static int ti_write_room(struct tty_struct *tty)
return 0;
spin_lock_irqsave(&tport->tp_lock, flags);
- room = ti_buf_space_avail(tport->tp_write_buf);
+ room = kfifo_avail(&tport->write_fifo);
spin_unlock_irqrestore(&tport->tp_lock, flags);
dbg("%s - returns %d", __func__, room);
@@ -757,7 +745,7 @@ static int ti_chars_in_buffer(struct tty_struct *tty)
return 0;
spin_lock_irqsave(&tport->tp_lock, flags);
- chars = ti_buf_data_avail(tport->tp_write_buf);
+ chars = kfifo_len(&tport->write_fifo);
spin_unlock_irqrestore(&tport->tp_lock, flags);
dbg("%s - returns %d", __func__, chars);
@@ -1309,7 +1297,7 @@ static void ti_send(struct ti_port *tport)
if (tport->tp_write_urb_in_use)
goto unlock;
- count = ti_buf_get(tport->tp_write_buf,
+ count = kfifo_out(&tport->write_fifo,
port->write_urb->transfer_buffer,
port->bulk_out_size);
@@ -1504,7 +1492,7 @@ static void ti_drain(struct ti_port *tport, unsigned long timeout, int flush)
add_wait_queue(&tport->tp_write_wait, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
- if (ti_buf_data_avail(tport->tp_write_buf) == 0
+ if (kfifo_len(&tport->write_fifo) == 0
|| timeout == 0 || signal_pending(current)
|| tdev->td_urb_error
|| port->serial->disconnected) /* disconnect */
@@ -1518,7 +1506,7 @@ static void ti_drain(struct ti_port *tport, unsigned long timeout, int flush)
/* flush any remaining data in the buffer */
if (flush)
- ti_buf_clear(tport->tp_write_buf);
+ kfifo_reset_out(&tport->write_fifo);
spin_unlock_irq(&tport->tp_lock);
@@ -1761,142 +1749,3 @@ static int ti_download_firmware(struct ti_device *tdev)
return 0;
}
-
-
-/* Circular Buffer Functions */
-
-/*
- * ti_buf_alloc
- *
- * Allocate a circular buffer and all associated memory.
- */
-
-static struct circ_buf *ti_buf_alloc(void)
-{
- struct circ_buf *cb;
-
- cb = kmalloc(sizeof(struct circ_buf), GFP_KERNEL);
- if (cb == NULL)
- return NULL;
-
- cb->buf = kmalloc(TI_WRITE_BUF_SIZE, GFP_KERNEL);
- if (cb->buf == NULL) {
- kfree(cb);
- return NULL;
- }
-
- ti_buf_clear(cb);
-
- return cb;
-}
-
-
-/*
- * ti_buf_free
- *
- * Free the buffer and all associated memory.
- */
-
-static void ti_buf_free(struct circ_buf *cb)
-{
- kfree(cb->buf);
- kfree(cb);
-}
-
-
-/*
- * ti_buf_clear
- *
- * Clear out all data in the circular buffer.
- */
-
-static void ti_buf_clear(struct circ_buf *cb)
-{
- cb->head = cb->tail = 0;
-}
-
-
-/*
- * ti_buf_data_avail
- *
- * Return the number of bytes of data available in the circular
- * buffer.
- */
-
-static int ti_buf_data_avail(struct circ_buf *cb)
-{
- return CIRC_CNT(cb->head, cb->tail, TI_WRITE_BUF_SIZE);
-}
-
-
-/*
- * ti_buf_space_avail
- *
- * Return the number of bytes of space available in the circular
- * buffer.
- */
-
-static int ti_buf_space_avail(struct circ_buf *cb)
-{
- return CIRC_SPACE(cb->head, cb->tail, TI_WRITE_BUF_SIZE);
-}
-
-
-/*
- * ti_buf_put
- *
- * Copy data data from a user buffer and put it into the circular buffer.
- * Restrict to the amount of space available.
- *
- * Return the number of bytes copied.
- */
-
-static int ti_buf_put(struct circ_buf *cb, const char *buf, int count)
-{
- int c, ret = 0;
-
- while (1) {
- c = CIRC_SPACE_TO_END(cb->head, cb->tail, TI_WRITE_BUF_SIZE);
- if (count < c)
- c = count;
- if (c <= 0)
- break;
- memcpy(cb->buf + cb->head, buf, c);
- cb->head = (cb->head + c) & (TI_WRITE_BUF_SIZE-1);
- buf += c;
- count -= c;
- ret += c;
- }
-
- return ret;
-}
-
-
-/*
- * ti_buf_get
- *
- * Get data from the circular buffer and copy to the given buffer.
- * Restrict to the amount of data available.
- *
- * Return the number of bytes copied.
- */
-
-static int ti_buf_get(struct circ_buf *cb, char *buf, int count)
-{
- int c, ret = 0;
-
- while (1) {
- c = CIRC_CNT_TO_END(cb->head, cb->tail, TI_WRITE_BUF_SIZE);
- if (count < c)
- c = count;
- if (c <= 0)
- break;
- memcpy(buf, cb->buf + cb->tail, c);
- cb->tail = (cb->tail + c) & (TI_WRITE_BUF_SIZE-1);
- buf += c;
- count -= c;
- ret += c;
- }
-
- return ret;
-}
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 3873660d821..941c2d409f8 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -289,7 +289,7 @@ static void serial_down(struct tty_port *tport)
* The console is magical. Do not hang up the console hardware
* or there will be tears.
*/
- if (port->console)
+ if (port->port.console)
return;
if (drv->close)
drv->close(port);
@@ -328,7 +328,7 @@ static void serial_cleanup(struct tty_struct *tty)
/* The console is magical. Do not hang up the console hardware
* or there will be tears.
*/
- if (port->console)
+ if (port->port.console)
return;
dbg("%s - port %d", __func__, port->number);
@@ -548,8 +548,12 @@ static void usb_serial_port_work(struct work_struct *work)
static void kill_traffic(struct usb_serial_port *port)
{
+ int i;
+
usb_kill_urb(port->read_urb);
usb_kill_urb(port->write_urb);
+ for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i)
+ usb_kill_urb(port->write_urbs[i]);
/*
* This is tricky.
* Some drivers submit the read_urb in the
@@ -568,6 +572,7 @@ static void kill_traffic(struct usb_serial_port *port)
static void port_release(struct device *dev)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
+ int i;
dbg ("%s - %s", __func__, dev_name(dev));
@@ -582,6 +587,10 @@ static void port_release(struct device *dev)
usb_free_urb(port->write_urb);
usb_free_urb(port->interrupt_in_urb);
usb_free_urb(port->interrupt_out_urb);
+ for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i) {
+ usb_free_urb(port->write_urbs[i]);
+ kfree(port->bulk_out_buffers[i]);
+ }
kfifo_free(&port->write_fifo);
kfree(port->bulk_in_buffer);
kfree(port->bulk_out_buffer);
@@ -901,7 +910,9 @@ int usb_serial_probe(struct usb_interface *interface,
dev_err(&interface->dev, "No free urbs available\n");
goto probe_error;
}
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
+ buffer_size = serial->type->bulk_in_size;
+ if (!buffer_size)
+ buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
port->bulk_in_size = buffer_size;
port->bulk_in_endpointAddress = endpoint->bEndpointAddress;
port->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL);
@@ -918,6 +929,8 @@ int usb_serial_probe(struct usb_interface *interface,
}
for (i = 0; i < num_bulk_out; ++i) {
+ int j;
+
endpoint = bulk_out_endpoint[i];
port = serial->port[i];
port->write_urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -927,7 +940,9 @@ int usb_serial_probe(struct usb_interface *interface,
}
if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL))
goto probe_error;
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
+ buffer_size = serial->type->bulk_out_size;
+ if (!buffer_size)
+ buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
port->bulk_out_size = buffer_size;
port->bulk_out_endpointAddress = endpoint->bEndpointAddress;
port->bulk_out_buffer = kmalloc(buffer_size, GFP_KERNEL);
@@ -941,6 +956,28 @@ int usb_serial_probe(struct usb_interface *interface,
endpoint->bEndpointAddress),
port->bulk_out_buffer, buffer_size,
serial->type->write_bulk_callback, port);
+ for (j = 0; j < ARRAY_SIZE(port->write_urbs); ++j) {
+ set_bit(j, &port->write_urbs_free);
+ port->write_urbs[j] = usb_alloc_urb(0, GFP_KERNEL);
+ if (!port->write_urbs[j]) {
+ dev_err(&interface->dev,
+ "No free urbs available\n");
+ goto probe_error;
+ }
+ port->bulk_out_buffers[j] = kmalloc(buffer_size,
+ GFP_KERNEL);
+ if (!port->bulk_out_buffers[j]) {
+ dev_err(&interface->dev,
+ "Couldn't allocate bulk_out_buffer\n");
+ goto probe_error;
+ }
+ usb_fill_bulk_urb(port->write_urbs[j], dev,
+ usb_sndbulkpipe(dev,
+ endpoint->bEndpointAddress),
+ port->bulk_out_buffers[j], buffer_size,
+ serial->type->write_bulk_callback,
+ port);
+ }
}
if (serial->type->read_int_callback) {
@@ -1294,6 +1331,8 @@ static void fixup_generic(struct usb_serial_driver *device)
set_to_generic_if_null(device, write_bulk_callback);
set_to_generic_if_null(device, disconnect);
set_to_generic_if_null(device, release);
+ set_to_generic_if_null(device, process_read_urb);
+ set_to_generic_if_null(device, prepare_write_buffer);
}
int usb_serial_register(struct usb_serial_driver *driver)
diff --git a/drivers/usb/serial/usb-wwan.h b/drivers/usb/serial/usb-wwan.h
new file mode 100644
index 00000000000..2be298a1305
--- /dev/null
+++ b/drivers/usb/serial/usb-wwan.h
@@ -0,0 +1,67 @@
+/*
+ * Definitions for USB serial mobile broadband cards
+ */
+
+#ifndef __LINUX_USB_USB_WWAN
+#define __LINUX_USB_USB_WWAN
+
+extern void usb_wwan_dtr_rts(struct usb_serial_port *port, int on);
+extern int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port);
+extern void usb_wwan_close(struct usb_serial_port *port);
+extern int usb_wwan_startup(struct usb_serial *serial);
+extern void usb_wwan_disconnect(struct usb_serial *serial);
+extern void usb_wwan_release(struct usb_serial *serial);
+extern int usb_wwan_write_room(struct tty_struct *tty);
+extern void usb_wwan_set_termios(struct tty_struct *tty,
+ struct usb_serial_port *port,
+ struct ktermios *old);
+extern int usb_wwan_tiocmget(struct tty_struct *tty, struct file *file);
+extern int usb_wwan_tiocmset(struct tty_struct *tty, struct file *file,
+ unsigned int set, unsigned int clear);
+extern int usb_wwan_send_setup(struct usb_serial_port *port);
+extern int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port,
+ const unsigned char *buf, int count);
+extern int usb_wwan_chars_in_buffer(struct tty_struct *tty);
+#ifdef CONFIG_PM
+extern int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message);
+extern int usb_wwan_resume(struct usb_serial *serial);
+#endif
+
+/* per port private data */
+
+#define N_IN_URB 4
+#define N_OUT_URB 4
+#define IN_BUFLEN 4096
+#define OUT_BUFLEN 4096
+
+struct usb_wwan_intf_private {
+ spinlock_t susp_lock;
+ unsigned int suspended:1;
+ int in_flight;
+ int (*send_setup) (struct usb_serial_port *port);
+ void *private;
+};
+
+struct usb_wwan_port_private {
+ /* Input endpoints and buffer for this port */
+ struct urb *in_urbs[N_IN_URB];
+ u8 *in_buffer[N_IN_URB];
+ /* Output endpoints and buffer for this port */
+ struct urb *out_urbs[N_OUT_URB];
+ u8 *out_buffer[N_OUT_URB];
+ unsigned long out_busy; /* Bit vector of URBs in use */
+ int opened;
+ struct usb_anchor delayed;
+
+ /* Settings for the port */
+ int rts_state; /* Handshaking pins (outputs) */
+ int dtr_state;
+ int cts_state; /* Handshaking pins (inputs) */
+ int dsr_state;
+ int dcd_state;
+ int ri_state;
+
+ unsigned long tx_start_time[N_OUT_URB];
+};
+
+#endif /* __LINUX_USB_USB_WWAN */
diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
index 28026b47344..f2ed6a31be7 100644
--- a/drivers/usb/serial/usb_debug.c
+++ b/drivers/usb/serial/usb_debug.c
@@ -16,7 +16,6 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
-#define URB_DEBUG_MAX_IN_FLIGHT_URBS 4000
#define USB_DEBUG_MAX_PACKET_SIZE 8
#define USB_DEBUG_BRK_SIZE 8
static char USB_DEBUG_BRK[USB_DEBUG_BRK_SIZE] = {
@@ -44,12 +43,6 @@ static struct usb_driver debug_driver = {
.no_dynamic_id = 1,
};
-static int usb_debug_open(struct tty_struct *tty, struct usb_serial_port *port)
-{
- port->bulk_out_size = USB_DEBUG_MAX_PACKET_SIZE;
- return usb_serial_generic_open(tty, port);
-}
-
/* This HW really does not support a serial break, so one will be
* emulated when ever the break state is set to true.
*/
@@ -69,7 +62,7 @@ static void usb_debug_read_bulk_callback(struct urb *urb)
memcmp(urb->transfer_buffer, USB_DEBUG_BRK,
USB_DEBUG_BRK_SIZE) == 0) {
usb_serial_handle_break(port);
- usb_serial_generic_resubmit_read_urb(port, GFP_ATOMIC);
+ usb_serial_generic_submit_read_urb(port, GFP_ATOMIC);
return;
}
@@ -83,8 +76,7 @@ static struct usb_serial_driver debug_device = {
},
.id_table = id_table,
.num_ports = 1,
- .open = usb_debug_open,
- .max_in_flight_urbs = URB_DEBUG_MAX_IN_FLIGHT_URBS,
+ .bulk_out_size = USB_DEBUG_MAX_PACKET_SIZE,
.break_ctl = usb_debug_break_ctl,
.read_bulk_callback = usb_debug_read_bulk_callback,
};
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
new file mode 100644
index 00000000000..0c70b4a621b
--- /dev/null
+++ b/drivers/usb/serial/usb_wwan.c
@@ -0,0 +1,665 @@
+/*
+ USB Driver layer for GSM modems
+
+ Copyright (C) 2005 Matthias Urlichs <smurf@smurf.noris.de>
+
+ This driver is free software; you can redistribute it and/or modify
+ it under the terms of Version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ Portions copied from the Keyspan driver by Hugh Blemings <hugh@blemings.org>
+
+ History: see the git log.
+
+ Work sponsored by: Sigos GmbH, Germany <info@sigos.de>
+
+ This driver exists because the "normal" serial driver doesn't work too well
+ with GSM modems. Issues:
+ - data loss -- one single Receive URB is not nearly enough
+ - controlling the baud rate doesn't make sense
+*/
+
+#define DRIVER_VERSION "v0.7.2"
+#define DRIVER_AUTHOR "Matthias Urlichs <smurf@smurf.noris.de>"
+#define DRIVER_DESC "USB Driver for GSM modems"
+
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/usb.h>
+#include <linux/usb/serial.h>
+#include "usb-wwan.h"
+
+static int debug;
+
+void usb_wwan_dtr_rts(struct usb_serial_port *port, int on)
+{
+ struct usb_serial *serial = port->serial;
+ struct usb_wwan_port_private *portdata;
+
+ struct usb_wwan_intf_private *intfdata;
+
+ dbg("%s", __func__);
+
+ intfdata = port->serial->private;
+
+ if (!intfdata->send_setup)
+ return;
+
+ portdata = usb_get_serial_port_data(port);
+ mutex_lock(&serial->disc_mutex);
+ portdata->rts_state = on;
+ portdata->dtr_state = on;
+ if (serial->dev)
+ intfdata->send_setup(port);
+ mutex_unlock(&serial->disc_mutex);
+}
+EXPORT_SYMBOL(usb_wwan_dtr_rts);
+
+void usb_wwan_set_termios(struct tty_struct *tty,
+ struct usb_serial_port *port,
+ struct ktermios *old_termios)
+{
+ struct usb_wwan_intf_private *intfdata = port->serial->private;
+
+ dbg("%s", __func__);
+
+ /* Doesn't support option setting */
+ tty_termios_copy_hw(tty->termios, old_termios);
+
+ if (intfdata->send_setup)
+ intfdata->send_setup(port);
+}
+EXPORT_SYMBOL(usb_wwan_set_termios);
+
+int usb_wwan_tiocmget(struct tty_struct *tty, struct file *file)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ unsigned int value;
+ struct usb_wwan_port_private *portdata;
+
+ portdata = usb_get_serial_port_data(port);
+
+ value = ((portdata->rts_state) ? TIOCM_RTS : 0) |
+ ((portdata->dtr_state) ? TIOCM_DTR : 0) |
+ ((portdata->cts_state) ? TIOCM_CTS : 0) |
+ ((portdata->dsr_state) ? TIOCM_DSR : 0) |
+ ((portdata->dcd_state) ? TIOCM_CAR : 0) |
+ ((portdata->ri_state) ? TIOCM_RNG : 0);
+
+ return value;
+}
+EXPORT_SYMBOL(usb_wwan_tiocmget);
+
+int usb_wwan_tiocmset(struct tty_struct *tty, struct file *file,
+ unsigned int set, unsigned int clear)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct usb_wwan_port_private *portdata;
+ struct usb_wwan_intf_private *intfdata;
+
+ portdata = usb_get_serial_port_data(port);
+ intfdata = port->serial->private;
+
+ if (!intfdata->send_setup)
+ return -EINVAL;
+
+ /* FIXME: what locks portdata fields ? */
+ if (set & TIOCM_RTS)
+ portdata->rts_state = 1;
+ if (set & TIOCM_DTR)
+ portdata->dtr_state = 1;
+
+ if (clear & TIOCM_RTS)
+ portdata->rts_state = 0;
+ if (clear & TIOCM_DTR)
+ portdata->dtr_state = 0;
+ return intfdata->send_setup(port);
+}
+EXPORT_SYMBOL(usb_wwan_tiocmset);
+
+/* Write */
+int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port,
+ const unsigned char *buf, int count)
+{
+ struct usb_wwan_port_private *portdata;
+ struct usb_wwan_intf_private *intfdata;
+ int i;
+ int left, todo;
+ struct urb *this_urb = NULL; /* spurious */
+ int err;
+ unsigned long flags;
+
+ portdata = usb_get_serial_port_data(port);
+ intfdata = port->serial->private;
+
+ dbg("%s: write (%d chars)", __func__, count);
+
+ i = 0;
+ left = count;
+ for (i = 0; left > 0 && i < N_OUT_URB; i++) {
+ todo = left;
+ if (todo > OUT_BUFLEN)
+ todo = OUT_BUFLEN;
+
+ this_urb = portdata->out_urbs[i];
+ if (test_and_set_bit(i, &portdata->out_busy)) {
+ if (time_before(jiffies,
+ portdata->tx_start_time[i] + 10 * HZ))
+ continue;
+ usb_unlink_urb(this_urb);
+ continue;
+ }
+ dbg("%s: endpoint %d buf %d", __func__,
+ usb_pipeendpoint(this_urb->pipe), i);
+
+ err = usb_autopm_get_interface_async(port->serial->interface);
+ if (err < 0)
+ break;
+
+ /* send the data */
+ memcpy(this_urb->transfer_buffer, buf, todo);
+ this_urb->transfer_buffer_length = todo;
+
+ spin_lock_irqsave(&intfdata->susp_lock, flags);
+ if (intfdata->suspended) {
+ usb_anchor_urb(this_urb, &portdata->delayed);
+ spin_unlock_irqrestore(&intfdata->susp_lock, flags);
+ } else {
+ intfdata->in_flight++;
+ spin_unlock_irqrestore(&intfdata->susp_lock, flags);
+ err = usb_submit_urb(this_urb, GFP_ATOMIC);
+ if (err) {
+ dbg("usb_submit_urb %p (write bulk) failed "
+ "(%d)", this_urb, err);
+ clear_bit(i, &portdata->out_busy);
+ spin_lock_irqsave(&intfdata->susp_lock, flags);
+ intfdata->in_flight--;
+ spin_unlock_irqrestore(&intfdata->susp_lock,
+ flags);
+ continue;
+ }
+ }
+
+ portdata->tx_start_time[i] = jiffies;
+ buf += todo;
+ left -= todo;
+ }
+
+ count -= left;
+ dbg("%s: wrote (did %d)", __func__, count);
+ return count;
+}
+EXPORT_SYMBOL(usb_wwan_write);
+
+static void usb_wwan_indat_callback(struct urb *urb)
+{
+ int err;
+ int endpoint;
+ struct usb_serial_port *port;
+ struct tty_struct *tty;
+ unsigned char *data = urb->transfer_buffer;
+ int status = urb->status;
+
+ dbg("%s: %p", __func__, urb);
+
+ endpoint = usb_pipeendpoint(urb->pipe);
+ port = urb->context;
+
+ if (status) {
+ dbg("%s: nonzero status: %d on endpoint %02x.",
+ __func__, status, endpoint);
+ } else {
+ tty = tty_port_tty_get(&port->port);
+ if (urb->actual_length) {
+ tty_insert_flip_string(tty, data, urb->actual_length);
+ tty_flip_buffer_push(tty);
+ } else
+ dbg("%s: empty read urb received", __func__);
+ tty_kref_put(tty);
+
+ /* Resubmit urb so we continue receiving */
+ if (status != -ESHUTDOWN) {
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err && err != -EPERM)
+ printk(KERN_ERR "%s: resubmit read urb failed. "
+ "(%d)", __func__, err);
+ else
+ usb_mark_last_busy(port->serial->dev);
+ }
+
+ }
+ return;
+}
+
+static void usb_wwan_outdat_callback(struct urb *urb)
+{
+ struct usb_serial_port *port;
+ struct usb_wwan_port_private *portdata;
+ struct usb_wwan_intf_private *intfdata;
+ int i;
+
+ dbg("%s", __func__);
+
+ port = urb->context;
+ intfdata = port->serial->private;
+
+ usb_serial_port_softint(port);
+ usb_autopm_put_interface_async(port->serial->interface);
+ portdata = usb_get_serial_port_data(port);
+ spin_lock(&intfdata->susp_lock);
+ intfdata->in_flight--;
+ spin_unlock(&intfdata->susp_lock);
+
+ for (i = 0; i < N_OUT_URB; ++i) {
+ if (portdata->out_urbs[i] == urb) {
+ smp_mb__before_clear_bit();
+ clear_bit(i, &portdata->out_busy);
+ break;
+ }
+ }
+}
+
+int usb_wwan_write_room(struct tty_struct *tty)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct usb_wwan_port_private *portdata;
+ int i;
+ int data_len = 0;
+ struct urb *this_urb;
+
+ portdata = usb_get_serial_port_data(port);
+
+ for (i = 0; i < N_OUT_URB; i++) {
+ this_urb = portdata->out_urbs[i];
+ if (this_urb && !test_bit(i, &portdata->out_busy))
+ data_len += OUT_BUFLEN;
+ }
+
+ dbg("%s: %d", __func__, data_len);
+ return data_len;
+}
+EXPORT_SYMBOL(usb_wwan_write_room);
+
+int usb_wwan_chars_in_buffer(struct tty_struct *tty)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct usb_wwan_port_private *portdata;
+ int i;
+ int data_len = 0;
+ struct urb *this_urb;
+
+ portdata = usb_get_serial_port_data(port);
+
+ for (i = 0; i < N_OUT_URB; i++) {
+ this_urb = portdata->out_urbs[i];
+ /* FIXME: This locking is insufficient as this_urb may
+ go unused during the test */
+ if (this_urb && test_bit(i, &portdata->out_busy))
+ data_len += this_urb->transfer_buffer_length;
+ }
+ dbg("%s: %d", __func__, data_len);
+ return data_len;
+}
+EXPORT_SYMBOL(usb_wwan_chars_in_buffer);
+
+int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port)
+{
+ struct usb_wwan_port_private *portdata;
+ struct usb_wwan_intf_private *intfdata;
+ struct usb_serial *serial = port->serial;
+ int i, err;
+ struct urb *urb;
+
+ portdata = usb_get_serial_port_data(port);
+ intfdata = serial->private;
+
+ dbg("%s", __func__);
+
+ /* Start reading from the IN endpoint */
+ for (i = 0; i < N_IN_URB; i++) {
+ urb = portdata->in_urbs[i];
+ if (!urb)
+ continue;
+ err = usb_submit_urb(urb, GFP_KERNEL);
+ if (err) {
+ dbg("%s: submit urb %d failed (%d) %d",
+ __func__, i, err, urb->transfer_buffer_length);
+ }
+ }
+
+ if (intfdata->send_setup)
+ intfdata->send_setup(port);
+
+ serial->interface->needs_remote_wakeup = 1;
+ spin_lock_irq(&intfdata->susp_lock);
+ portdata->opened = 1;
+ spin_unlock_irq(&intfdata->susp_lock);
+ usb_autopm_put_interface(serial->interface);
+
+ return 0;
+}
+EXPORT_SYMBOL(usb_wwan_open);
+
+void usb_wwan_close(struct usb_serial_port *port)
+{
+ int i;
+ struct usb_serial *serial = port->serial;
+ struct usb_wwan_port_private *portdata;
+ struct usb_wwan_intf_private *intfdata = port->serial->private;
+
+ dbg("%s", __func__);
+ portdata = usb_get_serial_port_data(port);
+
+ if (serial->dev) {
+ /* Stop reading/writing urbs */
+ spin_lock_irq(&intfdata->susp_lock);
+ portdata->opened = 0;
+ spin_unlock_irq(&intfdata->susp_lock);
+
+ for (i = 0; i < N_IN_URB; i++)
+ usb_kill_urb(portdata->in_urbs[i]);
+ for (i = 0; i < N_OUT_URB; i++)
+ usb_kill_urb(portdata->out_urbs[i]);
+ usb_autopm_get_interface(serial->interface);
+ serial->interface->needs_remote_wakeup = 0;
+ }
+}
+EXPORT_SYMBOL(usb_wwan_close);
+
+/* Helper functions used by usb_wwan_setup_urbs */
+static struct urb *usb_wwan_setup_urb(struct usb_serial *serial, int endpoint,
+ int dir, void *ctx, char *buf, int len,
+ void (*callback) (struct urb *))
+{
+ struct urb *urb;
+
+ if (endpoint == -1)
+ return NULL; /* endpoint not needed */
+
+ urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */
+ if (urb == NULL) {
+ dbg("%s: alloc for endpoint %d failed.", __func__, endpoint);
+ return NULL;
+ }
+
+ /* Fill URB using supplied data. */
+ usb_fill_bulk_urb(urb, serial->dev,
+ usb_sndbulkpipe(serial->dev, endpoint) | dir,
+ buf, len, callback, ctx);
+
+ return urb;
+}
+
+/* Setup urbs */
+static void usb_wwan_setup_urbs(struct usb_serial *serial)
+{
+ int i, j;
+ struct usb_serial_port *port;
+ struct usb_wwan_port_private *portdata;
+
+ dbg("%s", __func__);
+
+ for (i = 0; i < serial->num_ports; i++) {
+ port = serial->port[i];
+ portdata = usb_get_serial_port_data(port);
+
+ /* Do indat endpoints first */
+ for (j = 0; j < N_IN_URB; ++j) {
+ portdata->in_urbs[j] = usb_wwan_setup_urb(serial,
+ port->
+ bulk_in_endpointAddress,
+ USB_DIR_IN,
+ port,
+ portdata->
+ in_buffer[j],
+ IN_BUFLEN,
+ usb_wwan_indat_callback);
+ }
+
+ /* outdat endpoints */
+ for (j = 0; j < N_OUT_URB; ++j) {
+ portdata->out_urbs[j] = usb_wwan_setup_urb(serial,
+ port->
+ bulk_out_endpointAddress,
+ USB_DIR_OUT,
+ port,
+ portdata->
+ out_buffer
+ [j],
+ OUT_BUFLEN,
+ usb_wwan_outdat_callback);
+ }
+ }
+}
+
+int usb_wwan_startup(struct usb_serial *serial)
+{
+ int i, j, err;
+ struct usb_serial_port *port;
+ struct usb_wwan_port_private *portdata;
+ u8 *buffer;
+
+ dbg("%s", __func__);
+
+ /* Now setup per port private data */
+ for (i = 0; i < serial->num_ports; i++) {
+ port = serial->port[i];
+ portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
+ if (!portdata) {
+ dbg("%s: kmalloc for usb_wwan_port_private (%d) failed!.",
+ __func__, i);
+ return 1;
+ }
+ init_usb_anchor(&portdata->delayed);
+
+ for (j = 0; j < N_IN_URB; j++) {
+ buffer = (u8 *) __get_free_page(GFP_KERNEL);
+ if (!buffer)
+ goto bail_out_error;
+ portdata->in_buffer[j] = buffer;
+ }
+
+ for (j = 0; j < N_OUT_URB; j++) {
+ buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL);
+ if (!buffer)
+ goto bail_out_error2;
+ portdata->out_buffer[j] = buffer;
+ }
+
+ usb_set_serial_port_data(port, portdata);
+
+ if (!port->interrupt_in_urb)
+ continue;
+ err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
+ if (err)
+ dbg("%s: submit irq_in urb failed %d", __func__, err);
+ }
+ usb_wwan_setup_urbs(serial);
+ return 0;
+
+bail_out_error2:
+ for (j = 0; j < N_OUT_URB; j++)
+ kfree(portdata->out_buffer[j]);
+bail_out_error:
+ for (j = 0; j < N_IN_URB; j++)
+ if (portdata->in_buffer[j])
+ free_page((unsigned long)portdata->in_buffer[j]);
+ kfree(portdata);
+ return 1;
+}
+EXPORT_SYMBOL(usb_wwan_startup);
+
+static void stop_read_write_urbs(struct usb_serial *serial)
+{
+ int i, j;
+ struct usb_serial_port *port;
+ struct usb_wwan_port_private *portdata;
+
+ /* Stop reading/writing urbs */
+ for (i = 0; i < serial->num_ports; ++i) {
+ port = serial->port[i];
+ portdata = usb_get_serial_port_data(port);
+ for (j = 0; j < N_IN_URB; j++)
+ usb_kill_urb(portdata->in_urbs[j]);
+ for (j = 0; j < N_OUT_URB; j++)
+ usb_kill_urb(portdata->out_urbs[j]);
+ }
+}
+
+void usb_wwan_disconnect(struct usb_serial *serial)
+{
+ dbg("%s", __func__);
+
+ stop_read_write_urbs(serial);
+}
+EXPORT_SYMBOL(usb_wwan_disconnect);
+
+void usb_wwan_release(struct usb_serial *serial)
+{
+ int i, j;
+ struct usb_serial_port *port;
+ struct usb_wwan_port_private *portdata;
+
+ dbg("%s", __func__);
+
+ /* Now free them */
+ for (i = 0; i < serial->num_ports; ++i) {
+ port = serial->port[i];
+ portdata = usb_get_serial_port_data(port);
+
+ for (j = 0; j < N_IN_URB; j++) {
+ usb_free_urb(portdata->in_urbs[j]);
+ free_page((unsigned long)
+ portdata->in_buffer[j]);
+ portdata->in_urbs[j] = NULL;
+ }
+ for (j = 0; j < N_OUT_URB; j++) {
+ usb_free_urb(portdata->out_urbs[j]);
+ kfree(portdata->out_buffer[j]);
+ portdata->out_urbs[j] = NULL;
+ }
+ }
+
+ /* Now free per port private data */
+ for (i = 0; i < serial->num_ports; i++) {
+ port = serial->port[i];
+ kfree(usb_get_serial_port_data(port));
+ }
+}
+EXPORT_SYMBOL(usb_wwan_release);
+
+#ifdef CONFIG_PM
+int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message)
+{
+ struct usb_wwan_intf_private *intfdata = serial->private;
+ int b;
+
+ dbg("%s entered", __func__);
+
+ if (message.event & PM_EVENT_AUTO) {
+ spin_lock_irq(&intfdata->susp_lock);
+ b = intfdata->in_flight;
+ spin_unlock_irq(&intfdata->susp_lock);
+
+ if (b)
+ return -EBUSY;
+ }
+
+ spin_lock_irq(&intfdata->susp_lock);
+ intfdata->suspended = 1;
+ spin_unlock_irq(&intfdata->susp_lock);
+ stop_read_write_urbs(serial);
+
+ return 0;
+}
+EXPORT_SYMBOL(usb_wwan_suspend);
+
+static void play_delayed(struct usb_serial_port *port)
+{
+ struct usb_wwan_intf_private *data;
+ struct usb_wwan_port_private *portdata;
+ struct urb *urb;
+ int err;
+
+ portdata = usb_get_serial_port_data(port);
+ data = port->serial->private;
+ while ((urb = usb_get_from_anchor(&portdata->delayed))) {
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (!err)
+ data->in_flight++;
+ }
+}
+
+int usb_wwan_resume(struct usb_serial *serial)
+{
+ int i, j;
+ struct usb_serial_port *port;
+ struct usb_wwan_intf_private *intfdata = serial->private;
+ struct usb_wwan_port_private *portdata;
+ struct urb *urb;
+ int err = 0;
+
+ dbg("%s entered", __func__);
+ /* get the interrupt URBs resubmitted unconditionally */
+ for (i = 0; i < serial->num_ports; i++) {
+ port = serial->port[i];
+ if (!port->interrupt_in_urb) {
+ dbg("%s: No interrupt URB for port %d", __func__, i);
+ continue;
+ }
+ err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
+ dbg("Submitted interrupt URB for port %d (result %d)", i, err);
+ if (err < 0) {
+ err("%s: Error %d for interrupt URB of port%d",
+ __func__, err, i);
+ goto err_out;
+ }
+ }
+
+ for (i = 0; i < serial->num_ports; i++) {
+ /* walk all ports */
+ port = serial->port[i];
+ portdata = usb_get_serial_port_data(port);
+
+ /* skip closed ports */
+ spin_lock_irq(&intfdata->susp_lock);
+ if (!portdata->opened) {
+ spin_unlock_irq(&intfdata->susp_lock);
+ continue;
+ }
+
+ for (j = 0; j < N_IN_URB; j++) {
+ urb = portdata->in_urbs[j];
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err < 0) {
+ err("%s: Error %d for bulk URB %d",
+ __func__, err, i);
+ spin_unlock_irq(&intfdata->susp_lock);
+ goto err_out;
+ }
+ }
+ play_delayed(port);
+ spin_unlock_irq(&intfdata->susp_lock);
+ }
+ spin_lock_irq(&intfdata->susp_lock);
+ intfdata->suspended = 0;
+ spin_unlock_irq(&intfdata->susp_lock);
+err_out:
+ return err;
+}
+EXPORT_SYMBOL(usb_wwan_resume);
+#endif
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+
+module_param(debug, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug messages");
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 094942707c7..eb76aaef426 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -38,17 +38,9 @@
/* function prototypes for a handspring visor */
static int visor_open(struct tty_struct *tty, struct usb_serial_port *port);
static void visor_close(struct usb_serial_port *port);
-static int visor_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count);
-static int visor_write_room(struct tty_struct *tty);
-static void visor_throttle(struct tty_struct *tty);
-static void visor_unthrottle(struct tty_struct *tty);
static int visor_probe(struct usb_serial *serial,
const struct usb_device_id *id);
static int visor_calc_num_ports(struct usb_serial *serial);
-static void visor_release(struct usb_serial *serial);
-static void visor_write_bulk_callback(struct urb *urb);
-static void visor_read_bulk_callback(struct urb *urb);
static void visor_read_int_callback(struct urb *urb);
static int clie_3_5_startup(struct usb_serial *serial);
static int treo_attach(struct usb_serial *serial);
@@ -194,18 +186,14 @@ static struct usb_serial_driver handspring_device = {
.usb_driver = &visor_driver,
.id_table = id_table,
.num_ports = 2,
+ .bulk_out_size = 256,
.open = visor_open,
.close = visor_close,
- .throttle = visor_throttle,
- .unthrottle = visor_unthrottle,
+ .throttle = usb_serial_generic_throttle,
+ .unthrottle = usb_serial_generic_unthrottle,
.attach = treo_attach,
.probe = visor_probe,
.calc_num_ports = visor_calc_num_ports,
- .release = visor_release,
- .write = visor_write,
- .write_room = visor_write_room,
- .write_bulk_callback = visor_write_bulk_callback,
- .read_bulk_callback = visor_read_bulk_callback,
.read_int_callback = visor_read_int_callback,
};
@@ -219,18 +207,14 @@ static struct usb_serial_driver clie_5_device = {
.usb_driver = &visor_driver,
.id_table = clie_id_5_table,
.num_ports = 2,
+ .bulk_out_size = 256,
.open = visor_open,
.close = visor_close,
- .throttle = visor_throttle,
- .unthrottle = visor_unthrottle,
+ .throttle = usb_serial_generic_throttle,
+ .unthrottle = usb_serial_generic_unthrottle,
.attach = clie_5_attach,
.probe = visor_probe,
.calc_num_ports = visor_calc_num_ports,
- .release = visor_release,
- .write = visor_write,
- .write_room = visor_write_room,
- .write_bulk_callback = visor_write_bulk_callback,
- .read_bulk_callback = visor_read_bulk_callback,
.read_int_callback = visor_read_int_callback,
};
@@ -244,39 +228,19 @@ static struct usb_serial_driver clie_3_5_device = {
.usb_driver = &visor_driver,
.id_table = clie_id_3_5_table,
.num_ports = 1,
+ .bulk_out_size = 256,
.open = visor_open,
.close = visor_close,
- .throttle = visor_throttle,
- .unthrottle = visor_unthrottle,
+ .throttle = usb_serial_generic_throttle,
+ .unthrottle = usb_serial_generic_unthrottle,
.attach = clie_3_5_startup,
- .write = visor_write,
- .write_room = visor_write_room,
- .write_bulk_callback = visor_write_bulk_callback,
- .read_bulk_callback = visor_read_bulk_callback,
};
-struct visor_private {
- spinlock_t lock;
- int bytes_in;
- int bytes_out;
- int outstanding_urbs;
- unsigned char throttled;
- unsigned char actually_throttled;
-};
-
-/* number of outstanding urbs to prevent userspace DoS from happening */
-#define URB_UPPER_LIMIT 42
-
-static int stats;
-
/******************************************************************************
* Handspring Visor specific driver functions
******************************************************************************/
static int visor_open(struct tty_struct *tty, struct usb_serial_port *port)
{
- struct usb_serial *serial = port->serial;
- struct visor_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
int result = 0;
dbg("%s - port %d", __func__, port->number);
@@ -287,26 +251,10 @@ static int visor_open(struct tty_struct *tty, struct usb_serial_port *port)
return -ENODEV;
}
- spin_lock_irqsave(&priv->lock, flags);
- priv->bytes_in = 0;
- priv->bytes_out = 0;
- priv->throttled = 0;
- spin_unlock_irqrestore(&priv->lock, flags);
-
/* Start reading from the device */
- usb_fill_bulk_urb(port->read_urb, serial->dev,
- usb_rcvbulkpipe(serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- visor_read_bulk_callback, port);
- result = usb_submit_urb(port->read_urb, GFP_KERNEL);
- if (result) {
- dev_err(&port->dev,
- "%s - failed submitting read urb, error %d\n",
- __func__, result);
+ result = usb_serial_generic_open(tty, port);
+ if (result)
goto exit;
- }
if (port->interrupt_in_urb) {
dbg("%s - adding interrupt input for treo", __func__);
@@ -323,13 +271,12 @@ exit:
static void visor_close(struct usb_serial_port *port)
{
- struct visor_private *priv = usb_get_serial_port_data(port);
unsigned char *transfer_buffer;
dbg("%s - port %d", __func__, port->number);
/* shutdown our urbs */
- usb_kill_urb(port->read_urb);
+ usb_serial_generic_close(port);
usb_kill_urb(port->interrupt_in_urb);
mutex_lock(&port->serial->disc_mutex);
@@ -346,192 +293,6 @@ static void visor_close(struct usb_serial_port *port)
}
}
mutex_unlock(&port->serial->disc_mutex);
-
- if (stats)
- dev_info(&port->dev, "Bytes In = %d Bytes Out = %d\n",
- priv->bytes_in, priv->bytes_out);
-}
-
-
-static int visor_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count)
-{
- struct visor_private *priv = usb_get_serial_port_data(port);
- struct usb_serial *serial = port->serial;
- struct urb *urb;
- unsigned char *buffer;
- unsigned long flags;
- int status;
-
- dbg("%s - port %d", __func__, port->number);
-
- spin_lock_irqsave(&priv->lock, flags);
- if (priv->outstanding_urbs > URB_UPPER_LIMIT) {
- spin_unlock_irqrestore(&priv->lock, flags);
- dbg("%s - write limit hit", __func__);
- return 0;
- }
- priv->outstanding_urbs++;
- spin_unlock_irqrestore(&priv->lock, flags);
-
- buffer = kmalloc(count, GFP_ATOMIC);
- if (!buffer) {
- dev_err(&port->dev, "out of memory\n");
- count = -ENOMEM;
- goto error_no_buffer;
- }
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- dev_err(&port->dev, "no more free urbs\n");
- count = -ENOMEM;
- goto error_no_urb;
- }
-
- memcpy(buffer, buf, count);
-
- usb_serial_debug_data(debug, &port->dev, __func__, count, buffer);
-
- usb_fill_bulk_urb(urb, serial->dev,
- usb_sndbulkpipe(serial->dev,
- port->bulk_out_endpointAddress),
- buffer, count,
- visor_write_bulk_callback, port);
-
- /* send it down the pipe */
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status) {
- dev_err(&port->dev,
- "%s - usb_submit_urb(write bulk) failed with status = %d\n",
- __func__, status);
- count = status;
- goto error;
- } else {
- spin_lock_irqsave(&priv->lock, flags);
- priv->bytes_out += count;
- spin_unlock_irqrestore(&priv->lock, flags);
- }
-
- /* we are done with this urb, so let the host driver
- * really free it when it is finished with it */
- usb_free_urb(urb);
-
- return count;
-error:
- usb_free_urb(urb);
-error_no_urb:
- kfree(buffer);
-error_no_buffer:
- spin_lock_irqsave(&priv->lock, flags);
- --priv->outstanding_urbs;
- spin_unlock_irqrestore(&priv->lock, flags);
- return count;
-}
-
-
-static int visor_write_room(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct visor_private *priv = usb_get_serial_port_data(port);
- unsigned long flags;
-
- dbg("%s - port %d", __func__, port->number);
-
- /*
- * We really can take anything the user throws at us
- * but let's pick a nice big number to tell the tty
- * layer that we have lots of free space, unless we don't.
- */
-
- spin_lock_irqsave(&priv->lock, flags);
- if (priv->outstanding_urbs > URB_UPPER_LIMIT * 2 / 3) {
- spin_unlock_irqrestore(&priv->lock, flags);
- dbg("%s - write limit hit", __func__);
- return 0;
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 2048;
-}
-
-
-static void visor_write_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- struct visor_private *priv = usb_get_serial_port_data(port);
- int status = urb->status;
- unsigned long flags;
-
- /* free up the transfer buffer, as usb_free_urb() does not do this */
- kfree(urb->transfer_buffer);
-
- dbg("%s - port %d", __func__, port->number);
-
- if (status)
- dbg("%s - nonzero write bulk status received: %d",
- __func__, status);
-
- spin_lock_irqsave(&priv->lock, flags);
- --priv->outstanding_urbs;
- spin_unlock_irqrestore(&priv->lock, flags);
-
- usb_serial_port_softint(port);
-}
-
-
-static void visor_read_bulk_callback(struct urb *urb)
-{
- struct usb_serial_port *port = urb->context;
- struct visor_private *priv = usb_get_serial_port_data(port);
- unsigned char *data = urb->transfer_buffer;
- int status = urb->status;
- struct tty_struct *tty;
- int result;
- int available_room = 0;
-
- dbg("%s - port %d", __func__, port->number);
-
- if (status) {
- dbg("%s - nonzero read bulk status received: %d",
- __func__, status);
- return;
- }
-
- usb_serial_debug_data(debug, &port->dev, __func__,
- urb->actual_length, data);
-
- if (urb->actual_length) {
- tty = tty_port_tty_get(&port->port);
- if (tty) {
- tty_insert_flip_string(tty, data,
- urb->actual_length);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
- }
- spin_lock(&priv->lock);
- if (tty)
- priv->bytes_in += available_room;
-
- } else {
- spin_lock(&priv->lock);
- }
-
- /* Continue trying to always read if we should */
- if (!priv->throttled) {
- usb_fill_bulk_urb(port->read_urb, port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- visor_read_bulk_callback, port);
- result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
- if (result)
- dev_err(&port->dev,
- "%s - failed resubmitting read urb, error %d\n",
- __func__, result);
- } else
- priv->actually_throttled = 1;
- spin_unlock(&priv->lock);
}
static void visor_read_int_callback(struct urb *urb)
@@ -575,41 +336,6 @@ exit:
__func__, result);
}
-static void visor_throttle(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct visor_private *priv = usb_get_serial_port_data(port);
-
- dbg("%s - port %d", __func__, port->number);
- spin_lock_irq(&priv->lock);
- priv->throttled = 1;
- spin_unlock_irq(&priv->lock);
-}
-
-
-static void visor_unthrottle(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct visor_private *priv = usb_get_serial_port_data(port);
- int result, was_throttled;
-
- dbg("%s - port %d", __func__, port->number);
- spin_lock_irq(&priv->lock);
- priv->throttled = 0;
- was_throttled = priv->actually_throttled;
- priv->actually_throttled = 0;
- spin_unlock_irq(&priv->lock);
-
- if (was_throttled) {
- port->read_urb->dev = port->serial->dev;
- result = usb_submit_urb(port->read_urb, GFP_KERNEL);
- if (result)
- dev_err(&port->dev,
- "%s - failed submitting read urb, error %d\n",
- __func__, result);
- }
-}
-
static int palm_os_3_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
@@ -777,28 +503,6 @@ static int visor_calc_num_ports(struct usb_serial *serial)
return num_ports;
}
-static int generic_startup(struct usb_serial *serial)
-{
- struct usb_serial_port **ports = serial->port;
- struct visor_private *priv;
- int i;
-
- for (i = 0; i < serial->num_ports; ++i) {
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- while (i-- != 0) {
- priv = usb_get_serial_port_data(ports[i]);
- usb_set_serial_port_data(ports[i], NULL);
- kfree(priv);
- }
- return -ENOMEM;
- }
- spin_lock_init(&priv->lock);
- usb_set_serial_port_data(ports[i], priv);
- }
- return 0;
-}
-
static int clie_3_5_startup(struct usb_serial *serial)
{
struct device *dev = &serial->dev->dev;
@@ -849,7 +553,7 @@ static int clie_3_5_startup(struct usb_serial *serial)
goto out;
}
- result = generic_startup(serial);
+ result = 0;
out:
kfree(data);
@@ -867,7 +571,7 @@ static int treo_attach(struct usb_serial *serial)
(le16_to_cpu(serial->dev->descriptor.idVendor)
== KYOCERA_VENDOR_ID)) ||
(serial->num_interrupt_in == 0))
- goto generic_startup;
+ return 0;
dbg("%s", __func__);
@@ -897,8 +601,7 @@ static int treo_attach(struct usb_serial *serial)
COPY_PORT(serial->port[1], swap_port);
kfree(swap_port);
-generic_startup:
- return generic_startup(serial);
+ return 0;
}
static int clie_5_attach(struct usb_serial *serial)
@@ -921,20 +624,7 @@ static int clie_5_attach(struct usb_serial *serial)
serial->port[0]->bulk_out_endpointAddress =
serial->port[1]->bulk_out_endpointAddress;
- return generic_startup(serial);
-}
-
-static void visor_release(struct usb_serial *serial)
-{
- struct visor_private *priv;
- int i;
-
- dbg("%s", __func__);
-
- for (i = 0; i < serial->num_ports; i++) {
- priv = usb_get_serial_port_data(serial->port[i]);
- kfree(priv);
- }
+ return 0;
}
static int __init visor_init(void)
@@ -1018,8 +708,6 @@ MODULE_LICENSE("GPL");
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug enabled or not");
-module_param(stats, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(stats, "Enables statistics or not");
module_param(vendor, ushort, 0);
MODULE_PARM_DESC(vendor, "User specified vendor ID");
diff --git a/drivers/usb/serial/visor.h b/drivers/usb/serial/visor.h
index 57229cf6647..88db4d06aef 100644
--- a/drivers/usb/serial/visor.h
+++ b/drivers/usb/serial/visor.h
@@ -9,8 +9,9 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * See Documentation/usb/usb-serial.txt for more information on using this driver
- *
+ * See Documentation/usb/usb-serial.txt for more information on using this
+ * driver.
+ *
*/
#ifndef __LINUX_USB_SERIAL_VISOR_H
@@ -65,7 +66,7 @@
#define ACEECA_MEZ1000_ID 0x0001
#define KYOCERA_VENDOR_ID 0x0C88
-#define KYOCERA_7135_ID 0x0021
+#define KYOCERA_7135_ID 0x0021
#define FOSSIL_VENDOR_ID 0x0E67
#define FOSSIL_ABACUS_ID 0x0002
@@ -145,7 +146,7 @@ struct visor_connection_info {
* The maximum number of connections currently supported is 2
*/
struct palm_ext_connection_info {
- __u8 num_ports;
+ __u8 num_ports;
__u8 endpoint_numbers_different;
__le16 reserved1;
struct {
diff --git a/drivers/usb/serial/zio.c b/drivers/usb/serial/zio.c
new file mode 100644
index 00000000000..f5796727883
--- /dev/null
+++ b/drivers/usb/serial/zio.c
@@ -0,0 +1,64 @@
+/*
+ * ZIO Motherboard USB driver
+ *
+ * Copyright (C) 2010 Zilogic Systems <code@zilogic.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/usb/serial.h>
+#include <linux/uaccess.h>
+
+static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x1CBE, 0x0103) },
+ { },
+};
+MODULE_DEVICE_TABLE(usb, id_table);
+
+static struct usb_driver zio_driver = {
+ .name = "zio",
+ .probe = usb_serial_probe,
+ .disconnect = usb_serial_disconnect,
+ .id_table = id_table,
+ .no_dynamic_id = 1,
+};
+
+static struct usb_serial_driver zio_device = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "zio",
+ },
+ .id_table = id_table,
+ .usb_driver = &zio_driver,
+ .num_ports = 1,
+};
+
+static int __init zio_init(void)
+{
+ int retval;
+
+ retval = usb_serial_register(&zio_device);
+ if (retval)
+ return retval;
+ retval = usb_register(&zio_driver);
+ if (retval)
+ usb_serial_deregister(&zio_device);
+ return retval;
+}
+
+static void __exit zio_exit(void)
+{
+ usb_deregister(&zio_driver);
+ usb_serial_deregister(&zio_device);
+}
+
+module_init(zio_init);
+module_exit(zio_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index fdba2f69d4c..e9cbc1467f7 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -490,13 +490,13 @@ static int isd200_action( struct us_data *us, int action,
void* pointer, int value )
{
union ata_cdb ata;
- struct scsi_device srb_dev;
+ /* static to prevent this large struct being placed on the valuable stack */
+ static struct scsi_device srb_dev;
struct isd200_info *info = (struct isd200_info *)us->extra;
struct scsi_cmnd *srb = &info->srb;
int status;
memset(&ata, 0, sizeof(ata));
- memset(&srb_dev, 0, sizeof(srb_dev));
srb->cmnd = info->cmnd;
srb->device = &srb_dev;
++srb->serial_number;
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c
index 198bb3ed95b..1943be5a291 100644
--- a/drivers/usb/storage/onetouch.c
+++ b/drivers/usb/storage/onetouch.c
@@ -201,8 +201,8 @@ static int onetouch_connect_input(struct us_data *ss)
if (!onetouch || !input_dev)
goto fail1;
- onetouch->data = usb_buffer_alloc(udev, ONETOUCH_PKT_LEN,
- GFP_KERNEL, &onetouch->data_dma);
+ onetouch->data = usb_alloc_coherent(udev, ONETOUCH_PKT_LEN,
+ GFP_KERNEL, &onetouch->data_dma);
if (!onetouch->data)
goto fail1;
@@ -264,8 +264,8 @@ static int onetouch_connect_input(struct us_data *ss)
return 0;
fail3: usb_free_urb(onetouch->irq);
- fail2: usb_buffer_free(udev, ONETOUCH_PKT_LEN,
- onetouch->data, onetouch->data_dma);
+ fail2: usb_free_coherent(udev, ONETOUCH_PKT_LEN,
+ onetouch->data, onetouch->data_dma);
fail1: kfree(onetouch);
input_free_device(input_dev);
return error;
@@ -279,8 +279,8 @@ static void onetouch_release_input(void *onetouch_)
usb_kill_urb(onetouch->irq);
input_unregister_device(onetouch->dev);
usb_free_urb(onetouch->irq);
- usb_buffer_free(onetouch->udev, ONETOUCH_PKT_LEN,
- onetouch->data, onetouch->data_dma);
+ usb_free_coherent(onetouch->udev, ONETOUCH_PKT_LEN,
+ onetouch->data, onetouch->data_dma);
}
}
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index f253edec3bb..44716427c51 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -147,11 +147,9 @@ static int usb_stor_msg_common(struct us_data *us, int timeout)
* hasn't been mapped for DMA. Yes, this is clunky, but it's
* easier than always having the caller tell us whether the
* transfer buffer has already been mapped. */
- us->current_urb->transfer_flags = URB_NO_SETUP_DMA_MAP;
if (us->current_urb->transfer_buffer == us->iobuf)
us->current_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
us->current_urb->transfer_dma = us->iobuf_dma;
- us->current_urb->setup_dma = us->cr_dma;
/* submit the URB */
status = usb_submit_urb(us->current_urb, GFP_NOIO);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index ccf1dbbb87e..2c897eefadd 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -365,15 +365,6 @@ UNUSUAL_DEV( 0x04cb, 0x0100, 0x0000, 0x2210,
"FinePix 1400Zoom",
US_SC_UFI, US_PR_DEVICE, NULL, US_FL_FIX_INQUIRY | US_FL_SINGLE_LUN),
-/* Reported by Peter Wächtler <pwaechtler@loewe-komp.de>
- * The device needs the flags only.
- */
-UNUSUAL_DEV( 0x04ce, 0x0002, 0x0074, 0x0074,
- "ScanLogic",
- "SL11R-IDE",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_FIX_INQUIRY),
-
/* Reported by Ondrej Zary <linux@rainbow-software.org>
* The device reports one sector more and breaks when that sector is accessed
*/
@@ -1853,6 +1844,21 @@ UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
+/* Reported by Hans de Goede <hdegoede@redhat.com>
+ * These Appotech controllers are found in Picture Frames, they provide a
+ * (buggy) emulation of a cdrom drive which contains the windows software
+ * Uploading of pictures happens over the corresponding /dev/sg device. */
+UNUSUAL_DEV( 0x1908, 0x1315, 0x0000, 0x0000,
+ "BUILDWIN",
+ "Photo Frame",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_BAD_SENSE ),
+UNUSUAL_DEV( 0x1908, 0x1320, 0x0000, 0x0000,
+ "BUILDWIN",
+ "Photo Frame",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_BAD_SENSE ),
+
UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
"ST",
"2A",
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index bbeeb92a213..a7d0bf9d92a 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -407,15 +407,14 @@ static int associate_dev(struct us_data *us, struct usb_interface *intf)
/* Store our private data in the interface */
usb_set_intfdata(intf, us);
- /* Allocate the device-related DMA-mapped buffers */
- us->cr = usb_buffer_alloc(us->pusb_dev, sizeof(*us->cr),
- GFP_KERNEL, &us->cr_dma);
+ /* Allocate the control/setup and DMA-mapped buffers */
+ us->cr = kmalloc(sizeof(*us->cr), GFP_KERNEL);
if (!us->cr) {
US_DEBUGP("usb_ctrlrequest allocation failed\n");
return -ENOMEM;
}
- us->iobuf = usb_buffer_alloc(us->pusb_dev, US_IOBUF_SIZE,
+ us->iobuf = usb_alloc_coherent(us->pusb_dev, US_IOBUF_SIZE,
GFP_KERNEL, &us->iobuf_dma);
if (!us->iobuf) {
US_DEBUGP("I/O buffer allocation failed\n");
@@ -499,9 +498,6 @@ static void adjust_quirks(struct us_data *us)
}
}
us->fflags = (us->fflags & ~mask) | f;
- dev_info(&us->pusb_intf->dev, "Quirks match for "
- "vid %04x pid %04x: %x\n",
- vid, pid, f);
}
/* Get the unusual_devs entries and the string descriptors */
@@ -511,6 +507,7 @@ static int get_device_info(struct us_data *us, const struct usb_device_id *id,
struct usb_device *dev = us->pusb_dev;
struct usb_interface_descriptor *idesc =
&us->pusb_intf->cur_altsetting->desc;
+ struct device *pdev = &us->pusb_intf->dev;
/* Store the entries */
us->unusual_dev = unusual_dev;
@@ -524,7 +521,7 @@ static int get_device_info(struct us_data *us, const struct usb_device_id *id,
adjust_quirks(us);
if (us->fflags & US_FL_IGNORE_DEVICE) {
- printk(KERN_INFO USB_STORAGE "device ignored\n");
+ dev_info(pdev, "device ignored\n");
return -ENODEV;
}
@@ -535,6 +532,12 @@ static int get_device_info(struct us_data *us, const struct usb_device_id *id,
if (dev->speed != USB_SPEED_HIGH)
us->fflags &= ~US_FL_GO_SLOW;
+ if (us->fflags)
+ dev_info(pdev, "Quirks match for vid %04x pid %04x: %lx\n",
+ le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct),
+ us->fflags);
+
/* Log a message if a non-generic unusual_dev entry contains an
* unnecessary subclass or protocol override. This may stimulate
* reports from users that will help us remove unneeded entries
@@ -555,20 +558,20 @@ static int get_device_info(struct us_data *us, const struct usb_device_id *id,
us->protocol == idesc->bInterfaceProtocol)
msg += 2;
if (msg >= 0 && !(us->fflags & US_FL_NEED_OVERRIDE))
- printk(KERN_NOTICE USB_STORAGE "This device "
- "(%04x,%04x,%04x S %02x P %02x)"
- " has %s in unusual_devs.h (kernel"
- " %s)\n"
- " Please send a copy of this message to "
- "<linux-usb@vger.kernel.org> and "
- "<usb-storage@lists.one-eyed-alien.net>\n",
- le16_to_cpu(ddesc->idVendor),
- le16_to_cpu(ddesc->idProduct),
- le16_to_cpu(ddesc->bcdDevice),
- idesc->bInterfaceSubClass,
- idesc->bInterfaceProtocol,
- msgs[msg],
- utsname()->release);
+ dev_notice(pdev, "This device "
+ "(%04x,%04x,%04x S %02x P %02x)"
+ " has %s in unusual_devs.h (kernel"
+ " %s)\n"
+ " Please send a copy of this message to "
+ "<linux-usb@vger.kernel.org> and "
+ "<usb-storage@lists.one-eyed-alien.net>\n",
+ le16_to_cpu(ddesc->idVendor),
+ le16_to_cpu(ddesc->idProduct),
+ le16_to_cpu(ddesc->bcdDevice),
+ idesc->bInterfaceSubClass,
+ idesc->bInterfaceProtocol,
+ msgs[msg],
+ utsname()->release);
}
return 0;
@@ -718,8 +721,8 @@ static int usb_stor_acquire_resources(struct us_data *us)
/* Start up our control thread */
th = kthread_run(usb_stor_control_thread, us, "usb-storage");
if (IS_ERR(th)) {
- printk(KERN_WARNING USB_STORAGE
- "Unable to start control thread\n");
+ dev_warn(&us->pusb_intf->dev,
+ "Unable to start control thread\n");
return PTR_ERR(th);
}
us->ctl_thread = th;
@@ -757,13 +760,9 @@ static void dissociate_dev(struct us_data *us)
{
US_DEBUGP("-- %s\n", __func__);
- /* Free the device-related DMA-mapped buffers */
- if (us->cr)
- usb_buffer_free(us->pusb_dev, sizeof(*us->cr), us->cr,
- us->cr_dma);
- if (us->iobuf)
- usb_buffer_free(us->pusb_dev, US_IOBUF_SIZE, us->iobuf,
- us->iobuf_dma);
+ /* Free the buffers */
+ kfree(us->cr);
+ usb_free_coherent(us->pusb_dev, US_IOBUF_SIZE, us->iobuf, us->iobuf_dma);
/* Remove our private data from the interface */
usb_set_intfdata(us->pusb_intf, NULL);
@@ -816,13 +815,14 @@ static void release_everything(struct us_data *us)
static int usb_stor_scan_thread(void * __us)
{
struct us_data *us = (struct us_data *)__us;
+ struct device *dev = &us->pusb_intf->dev;
- dev_dbg(&us->pusb_intf->dev, "device found\n");
+ dev_dbg(dev, "device found\n");
set_freezable();
/* Wait for the timeout to expire or for a disconnect */
if (delay_use > 0) {
- dev_dbg(&us->pusb_intf->dev, "waiting for device to settle "
+ dev_dbg(dev, "waiting for device to settle "
"before scanning\n");
wait_event_freezable_timeout(us->delay_wait,
test_bit(US_FLIDX_DONT_SCAN, &us->dflags),
@@ -840,7 +840,7 @@ static int usb_stor_scan_thread(void * __us)
mutex_unlock(&us->dev_mutex);
}
scsi_scan_host(us_to_host(us));
- dev_dbg(&us->pusb_intf->dev, "scan complete\n");
+ dev_dbg(dev, "scan complete\n");
/* Should we unbind if no devices were detected? */
}
@@ -876,8 +876,8 @@ int usb_stor_probe1(struct us_data **pus,
*/
host = scsi_host_alloc(&usb_stor_host_template, sizeof(*us));
if (!host) {
- printk(KERN_WARNING USB_STORAGE
- "Unable to allocate the scsi host\n");
+ dev_warn(&intf->dev,
+ "Unable to allocate the scsi host\n");
return -ENOMEM;
}
@@ -925,6 +925,7 @@ int usb_stor_probe2(struct us_data *us)
{
struct task_struct *th;
int result;
+ struct device *dev = &us->pusb_intf->dev;
/* Make sure the transport and protocol have both been set */
if (!us->transport || !us->proto_handler) {
@@ -949,18 +950,18 @@ int usb_stor_probe2(struct us_data *us)
goto BadDevice;
snprintf(us->scsi_name, sizeof(us->scsi_name), "usb-storage %s",
dev_name(&us->pusb_intf->dev));
- result = scsi_add_host(us_to_host(us), &us->pusb_intf->dev);
+ result = scsi_add_host(us_to_host(us), dev);
if (result) {
- printk(KERN_WARNING USB_STORAGE
- "Unable to add the scsi host\n");
+ dev_warn(dev,
+ "Unable to add the scsi host\n");
goto BadDevice;
}
/* Start up the thread for delayed SCSI-device scanning */
th = kthread_create(usb_stor_scan_thread, us, "usb-stor-scan");
if (IS_ERR(th)) {
- printk(KERN_WARNING USB_STORAGE
- "Unable to start the device-scanning thread\n");
+ dev_warn(dev,
+ "Unable to start the device-scanning thread\n");
complete(&us->scanning_done);
quiesce_and_remove_host(us);
result = PTR_ERR(th);
@@ -1046,12 +1047,12 @@ static int __init usb_stor_init(void)
{
int retval;
- printk(KERN_INFO "Initializing USB Mass Storage driver...\n");
+ pr_info("Initializing USB Mass Storage driver...\n");
/* register the driver, return usb_register return code if error */
retval = usb_register(&usb_storage_driver);
if (retval == 0) {
- printk(KERN_INFO "USB Mass Storage support registered.\n");
+ pr_info("USB Mass Storage support registered.\n");
usb_usual_set_present(USB_US_TYPE_STOR);
}
return retval;
diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
index 69717134231..89d3bfff98d 100644
--- a/drivers/usb/storage/usb.h
+++ b/drivers/usb/storage/usb.h
@@ -139,8 +139,7 @@ struct us_data {
struct usb_ctrlrequest *cr; /* control requests */
struct usb_sg_request current_sg; /* scatter-gather req. */
unsigned char *iobuf; /* I/O buffer */
- dma_addr_t cr_dma; /* buffer DMA addresses */
- dma_addr_t iobuf_dma;
+ dma_addr_t iobuf_dma; /* buffer DMA addresses */
struct task_struct *ctl_thread; /* the control thread */
/* mutual exclusion and synchronization structures */
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index 61522787f39..d110588b56f 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -387,8 +387,8 @@ static void skel_write_bulk_callback(struct urb *urb)
}
/* free up our allocated buffer */
- usb_buffer_free(urb->dev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
up(&dev->limit_sem);
}
@@ -442,8 +442,8 @@ static ssize_t skel_write(struct file *file, const char *user_buffer,
goto error;
}
- buf = usb_buffer_alloc(dev->udev, writesize, GFP_KERNEL,
- &urb->transfer_dma);
+ buf = usb_alloc_coherent(dev->udev, writesize, GFP_KERNEL,
+ &urb->transfer_dma);
if (!buf) {
retval = -ENOMEM;
goto error;
@@ -491,7 +491,7 @@ error_unanchor:
usb_unanchor_urb(urb);
error:
if (urb) {
- usb_buffer_free(dev->udev, writesize, buf, urb->transfer_dma);
+ usb_free_coherent(dev->udev, writesize, buf, urb->transfer_dma);
usb_free_urb(urb);
}
up(&dev->limit_sem);
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index 112ef7e26f6..84b744c428a 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -76,7 +76,7 @@
* xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
* we are going to have to rebuild all this based on an scheduler,
* to where we have a list of transactions to do and based on the
- * availability of the different requried components (blocks,
+ * availability of the different required components (blocks,
* rpipes, segment slots, etc), we go scheduling them. Painful.
*/
#include <linux/init.h>
@@ -474,8 +474,6 @@ static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
struct wa_xfer_ctl *xfer_ctl =
container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
- BUG_ON(xfer->urb->transfer_flags & URB_NO_SETUP_DMA_MAP
- && xfer->urb->setup_packet == NULL);
memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
sizeof(xfer_ctl->baSetupData));
break;
diff --git a/drivers/usb/wusbcore/wusbhc.h b/drivers/usb/wusbcore/wusbhc.h
index 759cda55f7c..3d94c4247f4 100644
--- a/drivers/usb/wusbcore/wusbhc.h
+++ b/drivers/usb/wusbcore/wusbhc.h
@@ -58,9 +58,7 @@
#include <linux/mutex.h>
#include <linux/kref.h>
#include <linux/workqueue.h>
-/* FIXME: Yes, I know: BAD--it's not my fault the USB HC iface is not
- * public */
-#include <linux/../../drivers/usb/core/hcd.h>
+#include <linux/usb/hcd.h>
#include <linux/uwb.h>
#include <linux/usb/wusb.h>
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9777583218f..0f41c9195e9 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -593,17 +593,17 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
int r;
switch (ioctl) {
case VHOST_NET_SET_BACKEND:
- r = copy_from_user(&backend, argp, sizeof backend);
- if (r < 0)
- return r;
+ if (copy_from_user(&backend, argp, sizeof backend))
+ return -EFAULT;
return vhost_net_set_backend(n, backend.index, backend.fd);
case VHOST_GET_FEATURES:
features = VHOST_FEATURES;
- return copy_to_user(featurep, &features, sizeof features);
+ if (copy_to_user(featurep, &features, sizeof features))
+ return -EFAULT;
+ return 0;
case VHOST_SET_FEATURES:
- r = copy_from_user(&features, featurep, sizeof features);
- if (r < 0)
- return r;
+ if (copy_from_user(&features, featurep, sizeof features))
+ return -EFAULT;
if (features & ~VHOST_FEATURES)
return -EOPNOTSUPP;
return vhost_net_set_features(n, features);
@@ -642,7 +642,7 @@ static struct miscdevice vhost_net_misc = {
&vhost_net_fops,
};
-int vhost_net_init(void)
+static int vhost_net_init(void)
{
int r = vhost_init();
if (r)
@@ -659,7 +659,7 @@ err_init:
}
module_init(vhost_net_init);
-void vhost_net_exit(void)
+static void vhost_net_exit(void)
{
misc_deregister(&vhost_net_misc);
vhost_cleanup();
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 49fa953aaf6..3b83382e06e 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -320,10 +320,8 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
{
struct vhost_memory mem, *newmem, *oldmem;
unsigned long size = offsetof(struct vhost_memory, regions);
- long r;
- r = copy_from_user(&mem, m, size);
- if (r)
- return r;
+ if (copy_from_user(&mem, m, size))
+ return -EFAULT;
if (mem.padding)
return -EOPNOTSUPP;
if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS)
@@ -333,15 +331,16 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
return -ENOMEM;
memcpy(newmem, &mem, size);
- r = copy_from_user(newmem->regions, m->regions,
- mem.nregions * sizeof *m->regions);
- if (r) {
+ if (copy_from_user(newmem->regions, m->regions,
+ mem.nregions * sizeof *m->regions)) {
kfree(newmem);
- return r;
+ return -EFAULT;
}
- if (!memory_access_ok(d, newmem, vhost_has_feature(d, VHOST_F_LOG_ALL)))
+ if (!memory_access_ok(d, newmem, vhost_has_feature(d, VHOST_F_LOG_ALL))) {
+ kfree(newmem);
return -EFAULT;
+ }
oldmem = d->memory;
rcu_assign_pointer(d->memory, newmem);
synchronize_rcu();
@@ -374,7 +373,7 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
r = get_user(idx, idxp);
if (r < 0)
return r;
- if (idx > d->nvqs)
+ if (idx >= d->nvqs)
return -ENOBUFS;
vq = d->vqs + idx;
@@ -389,9 +388,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
r = -EBUSY;
break;
}
- r = copy_from_user(&s, argp, sizeof s);
- if (r < 0)
+ if (copy_from_user(&s, argp, sizeof s)) {
+ r = -EFAULT;
break;
+ }
if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) {
r = -EINVAL;
break;
@@ -405,9 +405,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
r = -EBUSY;
break;
}
- r = copy_from_user(&s, argp, sizeof s);
- if (r < 0)
+ if (copy_from_user(&s, argp, sizeof s)) {
+ r = -EFAULT;
break;
+ }
if (s.num > 0xffff) {
r = -EINVAL;
break;
@@ -419,12 +420,14 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
case VHOST_GET_VRING_BASE:
s.index = idx;
s.num = vq->last_avail_idx;
- r = copy_to_user(argp, &s, sizeof s);
+ if (copy_to_user(argp, &s, sizeof s))
+ r = -EFAULT;
break;
case VHOST_SET_VRING_ADDR:
- r = copy_from_user(&a, argp, sizeof a);
- if (r < 0)
+ if (copy_from_user(&a, argp, sizeof a)) {
+ r = -EFAULT;
break;
+ }
if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) {
r = -EOPNOTSUPP;
break;
@@ -477,9 +480,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
vq->used = (void __user *)(unsigned long)a.used_user_addr;
break;
case VHOST_SET_VRING_KICK:
- r = copy_from_user(&f, argp, sizeof f);
- if (r < 0)
+ if (copy_from_user(&f, argp, sizeof f)) {
+ r = -EFAULT;
break;
+ }
eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
if (IS_ERR(eventfp)) {
r = PTR_ERR(eventfp);
@@ -492,9 +496,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
filep = eventfp;
break;
case VHOST_SET_VRING_CALL:
- r = copy_from_user(&f, argp, sizeof f);
- if (r < 0)
+ if (copy_from_user(&f, argp, sizeof f)) {
+ r = -EFAULT;
break;
+ }
eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
if (IS_ERR(eventfp)) {
r = PTR_ERR(eventfp);
@@ -510,9 +515,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
filep = eventfp;
break;
case VHOST_SET_VRING_ERR:
- r = copy_from_user(&f, argp, sizeof f);
- if (r < 0)
+ if (copy_from_user(&f, argp, sizeof f)) {
+ r = -EFAULT;
break;
+ }
eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
if (IS_ERR(eventfp)) {
r = PTR_ERR(eventfp);
@@ -575,9 +581,10 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg)
r = vhost_set_memory(d, argp);
break;
case VHOST_SET_LOG_BASE:
- r = copy_from_user(&p, argp, sizeof p);
- if (r < 0)
+ if (copy_from_user(&p, argp, sizeof p)) {
+ r = -EFAULT;
break;
+ }
if ((u64)(unsigned long)p != p) {
r = -EFAULT;
break;
@@ -715,8 +722,8 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
return 0;
}
-int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
- struct iovec iov[], int iov_size)
+static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
+ struct iovec iov[], int iov_size)
{
const struct vhost_memory_region *reg;
struct vhost_memory *mem;
@@ -741,7 +748,7 @@ int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
_iov = iov + ret;
size = reg->memory_size - addr + reg->guest_phys_addr;
_iov->iov_len = min((u64)len, size);
- _iov->iov_base = (void *)(unsigned long)
+ _iov->iov_base = (void __user *)(unsigned long)
(reg->userspace_addr + addr - reg->guest_phys_addr);
s += size;
addr += size;
@@ -806,7 +813,7 @@ static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
count = indirect->len / sizeof desc;
/* Buffers are chained via a 16 bit next field, so
* we can have at most 2^16 of these. */
- if (count > USHORT_MAX + 1) {
+ if (count > USHRT_MAX + 1) {
vq_err(vq, "Indirect buffer length too big: %d\n",
indirect->len);
return -E2BIG;
@@ -995,7 +1002,7 @@ void vhost_discard_vq_desc(struct vhost_virtqueue *vq)
* want to notify the guest, using eventfd. */
int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
{
- struct vring_used_elem *used;
+ struct vring_used_elem __user *used;
/* The virtqueue contains a ring of used buffers. Get a pointer to the
* next entry in that used ring. */
@@ -1019,7 +1026,8 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
smp_wmb();
/* Log used ring entry write. */
log_write(vq->log_base,
- vq->log_addr + ((void *)used - (void *)vq->used),
+ vq->log_addr +
+ ((void __user *)used - (void __user *)vq->used),
sizeof *used);
/* Log used index update. */
log_write(vq->log_base,
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 6e16244f3ed..1e6fec48797 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1511,6 +1511,7 @@ config FB_VIA
select FB_CFB_IMAGEBLIT
select I2C_ALGOBIT
select I2C
+ select GPIOLIB
help
This is the frame buffer device driver for Graphics chips of VIA
UniChrome (Pro) Family (CLE266,PM800/CN400,P4M800CE/P4M800Pro/
@@ -1520,6 +1521,21 @@ config FB_VIA
To compile this driver as a module, choose M here: the
module will be called viafb.
+
+if FB_VIA
+
+config FB_VIA_DIRECT_PROCFS
+ bool "direct hardware access via procfs (DEPRECATED)(DANGEROUS)"
+ depends on FB_VIA
+ default n
+ help
+ Allow direct hardware access to some output registers via procfs.
+ This is dangerous but may provide the only chance to get the
+ correct output device configuration.
+ Its use is strongly discouraged.
+
+endif
+
config FB_NEOMAGIC
tristate "NeoMagic display support"
depends on FB && PCI
@@ -2186,7 +2202,6 @@ config FB_MSM
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
- default y
config FB_MX3
tristate "MX3 Framebuffer support"
diff --git a/drivers/video/amifb.c b/drivers/video/amifb.c
index dca48df9844..e5d6b56d444 100644
--- a/drivers/video/amifb.c
+++ b/drivers/video/amifb.c
@@ -50,8 +50,9 @@
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/ioport.h>
-
+#include <linux/platform_device.h>
#include <linux/uaccess.h>
+
#include <asm/system.h>
#include <asm/irq.h>
#include <asm/amigahw.h>
@@ -1135,7 +1136,7 @@ static int amifb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg
* Interface to the low level console driver
*/
-static void amifb_deinit(void);
+static void amifb_deinit(struct platform_device *pdev);
/*
* Internal routines
@@ -2246,7 +2247,7 @@ static inline void chipfree(void)
* Initialisation
*/
-static int __init amifb_init(void)
+static int __init amifb_probe(struct platform_device *pdev)
{
int tag, i, err = 0;
u_long chipptr;
@@ -2261,16 +2262,6 @@ static int __init amifb_init(void)
}
amifb_setup(option);
#endif
- if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_VIDEO))
- return -ENODEV;
-
- /*
- * We request all registers starting from bplpt[0]
- */
- if (!request_mem_region(CUSTOM_PHYSADDR+0xe0, 0x120,
- "amifb [Denise/Lisa]"))
- return -EBUSY;
-
custom.dmacon = DMAF_ALL | DMAF_MASTER;
switch (amiga_chipset) {
@@ -2377,6 +2368,7 @@ default_chipset:
fb_info.fbops = &amifb_ops;
fb_info.par = &currentpar;
fb_info.flags = FBINFO_DEFAULT;
+ fb_info.device = &pdev->dev;
if (!fb_find_mode(&fb_info.var, &fb_info, mode_option, ami_modedb,
NUM_TOTAL_MODES, &ami_modedb[defmode], 4)) {
@@ -2451,18 +2443,18 @@ default_chipset:
return 0;
amifb_error:
- amifb_deinit();
+ amifb_deinit(pdev);
return err;
}
-static void amifb_deinit(void)
+static void amifb_deinit(struct platform_device *pdev)
{
if (fb_info.cmap.len)
fb_dealloc_cmap(&fb_info.cmap);
+ fb_dealloc_cmap(&fb_info.cmap);
chipfree();
if (videomemory)
iounmap((void*)videomemory);
- release_mem_region(CUSTOM_PHYSADDR+0xe0, 0x120);
custom.dmacon = DMAF_ALL | DMAF_MASTER;
}
@@ -3794,14 +3786,35 @@ static void ami_rebuild_copper(void)
}
}
-static void __exit amifb_exit(void)
+static int __exit amifb_remove(struct platform_device *pdev)
{
unregister_framebuffer(&fb_info);
- amifb_deinit();
+ amifb_deinit(pdev);
amifb_video_off();
+ return 0;
+}
+
+static struct platform_driver amifb_driver = {
+ .remove = __exit_p(amifb_remove),
+ .driver = {
+ .name = "amiga-video",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init amifb_init(void)
+{
+ return platform_driver_probe(&amifb_driver, amifb_probe);
}
module_init(amifb_init);
+
+static void __exit amifb_exit(void)
+{
+ platform_driver_unregister(&amifb_driver);
+}
+
module_exit(amifb_exit);
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:amiga-video");
diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c
index 8d406fb689c..f3d7440f007 100644
--- a/drivers/video/arcfb.c
+++ b/drivers/video/arcfb.c
@@ -80,7 +80,7 @@ struct arcfb_par {
spinlock_t lock;
};
-static struct fb_fix_screeninfo arcfb_fix __initdata = {
+static struct fb_fix_screeninfo arcfb_fix __devinitdata = {
.id = "arcfb",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_MONO01,
@@ -90,7 +90,7 @@ static struct fb_fix_screeninfo arcfb_fix __initdata = {
.accel = FB_ACCEL_NONE,
};
-static struct fb_var_screeninfo arcfb_var __initdata = {
+static struct fb_var_screeninfo arcfb_var __devinitdata = {
.xres = 128,
.yres = 64,
.xres_virtual = 128,
@@ -588,7 +588,7 @@ err:
return retval;
}
-static int arcfb_remove(struct platform_device *dev)
+static int __devexit arcfb_remove(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);
@@ -602,7 +602,7 @@ static int arcfb_remove(struct platform_device *dev)
static struct platform_driver arcfb_driver = {
.probe = arcfb_probe,
- .remove = arcfb_remove,
+ .remove = __devexit_p(arcfb_remove),
.driver = {
.name = "arcfb",
},
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 29d72851f85..f8d69ad3683 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -1820,10 +1820,6 @@ struct atyclk {
#define ATYIO_FEATW 0x41545903 /* ATY\03 */
#endif
-#ifndef FBIO_WAITFORVSYNC
-#define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32)
-#endif
-
static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 6c37e8ee5ef..3c1e13ed1cb 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -2099,7 +2099,7 @@ static ssize_t radeon_show_one_edid(char *buf, loff_t off, size_t count, const u
}
-static ssize_t radeon_show_edid1(struct kobject *kobj,
+static ssize_t radeon_show_edid1(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -2112,7 +2112,7 @@ static ssize_t radeon_show_edid1(struct kobject *kobj,
}
-static ssize_t radeon_show_edid2(struct kobject *kobj,
+static ssize_t radeon_show_edid2(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index 68d2518fada..38ffc3fbcbe 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -222,6 +222,7 @@ static int pm860x_backlight_probe(struct platform_device *pdev)
data->port = __check_device(pdata, name);
if (data->port < 0) {
dev_err(&pdev->dev, "wrong platform data is assigned");
+ kfree(data);
return -EINVAL;
}
@@ -266,6 +267,7 @@ static int pm860x_backlight_probe(struct platform_device *pdev)
backlight_update_status(bl);
return 0;
out:
+ backlight_device_unregister(bl);
kfree(data);
return ret;
}
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index c025c84601b..e54a337227e 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -8,12 +8,13 @@ menuconfig BACKLIGHT_LCD_SUPPORT
Enable this to be able to choose the drivers for controlling the
backlight and the LCD panel on some platforms, for example on PDAs.
+if BACKLIGHT_LCD_SUPPORT
+
#
# LCD
#
config LCD_CLASS_DEVICE
tristate "Lowlevel LCD controls"
- depends on BACKLIGHT_LCD_SUPPORT
default m
help
This framework adds support for low-level control of LCD.
@@ -24,31 +25,32 @@ config LCD_CLASS_DEVICE
To have support for your specific LCD panel you will have to
select the proper drivers which depend on this option.
+if LCD_CLASS_DEVICE
+
config LCD_CORGI
tristate "LCD Panel support for SHARP corgi/spitz model"
- depends on LCD_CLASS_DEVICE && SPI_MASTER && PXA_SHARPSL
+ depends on SPI_MASTER && PXA_SHARPSL
help
Say y here to support the LCD panels usually found on SHARP
corgi (C7x0) and spitz (Cxx00) models.
config LCD_L4F00242T03
tristate "Epson L4F00242T03 LCD"
- depends on LCD_CLASS_DEVICE && SPI_MASTER && GENERIC_GPIO
+ depends on SPI_MASTER && GENERIC_GPIO
help
SPI driver for Epson L4F00242T03. This provides basic support
for init and powering the LCD up/down through a sysfs interface.
config LCD_LMS283GF05
tristate "Samsung LMS283GF05 LCD"
- depends on LCD_CLASS_DEVICE && SPI_MASTER && GENERIC_GPIO
+ depends on SPI_MASTER && GENERIC_GPIO
help
SPI driver for Samsung LMS283GF05. This provides basic support
for powering the LCD up/down through a sysfs interface.
config LCD_LTV350QV
tristate "Samsung LTV350QV LCD Panel"
- depends on LCD_CLASS_DEVICE && SPI_MASTER
- default n
+ depends on SPI_MASTER
help
If you have a Samsung LTV350QV LCD panel, say y to include a
power control driver for it. The panel starts up in power
@@ -59,60 +61,61 @@ config LCD_LTV350QV
config LCD_ILI9320
tristate
- depends on LCD_CLASS_DEVICE && BACKLIGHT_LCD_SUPPORT
- default n
help
If you have a panel based on the ILI9320 controller chip
then say y to include a power driver for it.
config LCD_TDO24M
tristate "Toppoly TDO24M and TDO35S LCD Panels support"
- depends on LCD_CLASS_DEVICE && SPI_MASTER
- default n
+ depends on SPI_MASTER
help
If you have a Toppoly TDO24M/TDO35S series LCD panel, say y here to
include the support for it.
config LCD_VGG2432A4
tristate "VGG2432A4 LCM device support"
- depends on BACKLIGHT_LCD_SUPPORT && LCD_CLASS_DEVICE && SPI_MASTER
+ depends on SPI_MASTER
select LCD_ILI9320
- default n
help
If you have a VGG2432A4 panel based on the ILI9320 controller chip
then say y to include a power driver for it.
config LCD_PLATFORM
tristate "Platform LCD controls"
- depends on LCD_CLASS_DEVICE
help
This driver provides a platform-device registered LCD power
control interface.
config LCD_TOSA
tristate "Sharp SL-6000 LCD Driver"
- depends on LCD_CLASS_DEVICE && SPI
- depends on MACH_TOSA
- default n
+ depends on SPI && MACH_TOSA
help
If you have an Sharp SL-6000 Zaurus say Y to enable a driver
for its LCD.
config LCD_HP700
tristate "HP Jornada 700 series LCD Driver"
- depends on LCD_CLASS_DEVICE
depends on SA1100_JORNADA720_SSP && !PREEMPT
default y
help
If you have an HP Jornada 700 series handheld (710/720/728)
say Y to enable LCD control driver.
+config LCD_S6E63M0
+ tristate "S6E63M0 AMOLED LCD Driver"
+ depends on SPI && BACKLIGHT_CLASS_DEVICE
+ default n
+ help
+ If you have an S6E63M0 LCD Panel, say Y to enable its
+ LCD control driver.
+
+endif # LCD_CLASS_DEVICE
+
#
# Backlight
#
config BACKLIGHT_CLASS_DEVICE
tristate "Lowlevel Backlight controls"
- depends on BACKLIGHT_LCD_SUPPORT
default m
help
This framework adds support for low-level control of the LCD
@@ -121,9 +124,11 @@ config BACKLIGHT_CLASS_DEVICE
To have support for your specific LCD panel you will have to
select the proper drivers which depend on this option.
+if BACKLIGHT_CLASS_DEVICE
+
config BACKLIGHT_ATMEL_LCDC
bool "Atmel LCDC Contrast-as-Backlight control"
- depends on BACKLIGHT_CLASS_DEVICE && FB_ATMEL
+ depends on FB_ATMEL
default y if MACH_SAM9261EK || MACH_SAM9G10EK || MACH_SAM9263EK
help
This provides a backlight control internal to the Atmel LCDC
@@ -136,8 +141,7 @@ config BACKLIGHT_ATMEL_LCDC
config BACKLIGHT_ATMEL_PWM
tristate "Atmel PWM backlight control"
- depends on BACKLIGHT_CLASS_DEVICE && ATMEL_PWM
- default n
+ depends on ATMEL_PWM
help
Say Y here if you want to use the PWM peripheral in Atmel AT91 and
AVR32 devices. This driver will need additional platform data to know
@@ -146,9 +150,18 @@ config BACKLIGHT_ATMEL_PWM
To compile this driver as a module, choose M here: the module will be
called atmel-pwm-bl.
+config BACKLIGHT_EP93XX
+ tristate "Cirrus EP93xx Backlight Driver"
+ depends on FB_EP93XX
+ help
+ If you have a LCD backlight connected to the BRIGHT output of
+ the EP93xx, say Y here to enable this driver.
+
+ To compile this driver as a module, choose M here: the module will
+ be called ep93xx_bl.
+
config BACKLIGHT_GENERIC
tristate "Generic (aka Sharp Corgi) Backlight Driver"
- depends on BACKLIGHT_CLASS_DEVICE
default y
help
Say y to enable the generic platform backlight driver previously
@@ -157,7 +170,7 @@ config BACKLIGHT_GENERIC
config BACKLIGHT_LOCOMO
tristate "Sharp LOCOMO LCD/Backlight Driver"
- depends on BACKLIGHT_CLASS_DEVICE && SHARP_LOCOMO
+ depends on SHARP_LOCOMO
default y
help
If you have a Sharp Zaurus SL-5500 (Collie) or SL-5600 (Poodle) say y to
@@ -165,7 +178,7 @@ config BACKLIGHT_LOCOMO
config BACKLIGHT_OMAP1
tristate "OMAP1 PWL-based LCD Backlight"
- depends on BACKLIGHT_CLASS_DEVICE && ARCH_OMAP1
+ depends on ARCH_OMAP1
default y
help
This driver controls the LCD backlight level and power for
@@ -174,7 +187,7 @@ config BACKLIGHT_OMAP1
config BACKLIGHT_HP680
tristate "HP Jornada 680 Backlight Driver"
- depends on BACKLIGHT_CLASS_DEVICE && SH_HP6XX
+ depends on SH_HP6XX
default y
help
If you have a HP Jornada 680, say y to enable the
@@ -182,7 +195,6 @@ config BACKLIGHT_HP680
config BACKLIGHT_HP700
tristate "HP Jornada 700 series Backlight Driver"
- depends on BACKLIGHT_CLASS_DEVICE
depends on SA1100_JORNADA720_SSP && !PREEMPT
default y
help
@@ -191,76 +203,70 @@ config BACKLIGHT_HP700
config BACKLIGHT_PROGEAR
tristate "Frontpath ProGear Backlight Driver"
- depends on BACKLIGHT_CLASS_DEVICE && PCI && X86
- default n
+ depends on PCI && X86
help
If you have a Frontpath ProGear say Y to enable the
backlight driver.
config BACKLIGHT_CARILLO_RANCH
tristate "Intel Carillo Ranch Backlight Driver"
- depends on BACKLIGHT_CLASS_DEVICE && LCD_CLASS_DEVICE && PCI && X86 && FB_LE80578
- default n
+ depends on LCD_CLASS_DEVICE && PCI && X86 && FB_LE80578
help
If you have a Intel LE80578 (Carillo Ranch) say Y to enable the
backlight driver.
config BACKLIGHT_PWM
tristate "Generic PWM based Backlight Driver"
- depends on BACKLIGHT_CLASS_DEVICE && HAVE_PWM
+ depends on HAVE_PWM
help
If you have a LCD backlight adjustable by PWM, say Y to enable
this driver.
config BACKLIGHT_DA903X
tristate "Backlight Driver for DA9030/DA9034 using WLED"
- depends on BACKLIGHT_CLASS_DEVICE && PMIC_DA903X
+ depends on PMIC_DA903X
help
If you have a LCD backlight connected to the WLED output of DA9030
or DA9034 WLED output, say Y here to enable this driver.
config BACKLIGHT_MAX8925
tristate "Backlight driver for MAX8925"
- depends on BACKLIGHT_CLASS_DEVICE && MFD_MAX8925
+ depends on MFD_MAX8925
help
If you have a LCD backlight connected to the WLED output of MAX8925
WLED output, say Y here to enable this driver.
config BACKLIGHT_MBP_NVIDIA
tristate "MacBook Pro Nvidia Backlight Driver"
- depends on BACKLIGHT_CLASS_DEVICE && X86
- default n
+ depends on X86
help
If you have an Apple Macbook Pro with Nvidia graphics hardware say Y
to enable a driver for its backlight
config BACKLIGHT_TOSA
tristate "Sharp SL-6000 Backlight Driver"
- depends on BACKLIGHT_CLASS_DEVICE && I2C
- depends on MACH_TOSA && LCD_TOSA
- default n
+ depends on I2C && MACH_TOSA && LCD_TOSA
help
If you have an Sharp SL-6000 Zaurus say Y to enable a driver
for its backlight
config BACKLIGHT_SAHARA
tristate "Tabletkiosk Sahara Touch-iT Backlight Driver"
- depends on BACKLIGHT_CLASS_DEVICE && X86
- default n
+ depends on X86
help
If you have a Tabletkiosk Sahara Touch-iT, say y to enable the
backlight driver.
config BACKLIGHT_WM831X
tristate "WM831x PMIC Backlight Driver"
- depends on BACKLIGHT_CLASS_DEVICE && MFD_WM831X
+ depends on MFD_WM831X
help
If you have a backlight driven by the ISINK and DCDC of a
WM831x PMIC say y to enable the backlight driver for it.
config BACKLIGHT_ADX
tristate "Avionic Design Xanthos Backlight Driver"
- depends on BACKLIGHT_CLASS_DEVICE && ARCH_PXA_ADX
+ depends on ARCH_PXA_ADX
default y
help
Say Y to enable the backlight driver on Avionic Design Xanthos-based
@@ -268,7 +274,7 @@ config BACKLIGHT_ADX
config BACKLIGHT_ADP5520
tristate "Backlight Driver for ADP5520/ADP5501 using WLED"
- depends on BACKLIGHT_CLASS_DEVICE && PMIC_ADP5520
+ depends on PMIC_ADP5520
help
If you have a LCD backlight connected to the BST/BL_SNK output of
ADP5520 or ADP5501, say Y here to enable this driver.
@@ -276,9 +282,31 @@ config BACKLIGHT_ADP5520
To compile this driver as a module, choose M here: the module will
be called adp5520_bl.
+config BACKLIGHT_ADP8860
+ tristate "Backlight Driver for ADP8860/ADP8861/ADP8863 using WLED"
+ depends on BACKLIGHT_CLASS_DEVICE && I2C
+ select NEW_LEDS
+ select LEDS_CLASS
+ help
+ If you have a LCD backlight connected to the ADP8860, ADP8861 or
+ ADP8863 say Y here to enable this driver.
+
+ To compile this driver as a module, choose M here: the module will
+ be called adp8860_bl.
+
config BACKLIGHT_88PM860X
tristate "Backlight Driver for 88PM8606 using WLED"
- depends on BACKLIGHT_CLASS_DEVICE && MFD_88PM860X
+ depends on MFD_88PM860X
help
Say Y to enable the backlight driver for Marvell 88PM8606.
+config BACKLIGHT_PCF50633
+ tristate "Backlight driver for NXP PCF50633 MFD"
+ depends on BACKLIGHT_CLASS_DEVICE && MFD_PCF50633
+ help
+ If you have a backlight driven by a NXP PCF50633 MFD, say Y here to
+ enable its driver.
+
+endif # BACKLIGHT_CLASS_DEVICE
+
+endif # BACKLIGHT_LCD_SUPPORT
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 09d1f14d625..44c0f81ad85 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -11,9 +11,11 @@ obj-$(CONFIG_LCD_PLATFORM) += platform_lcd.o
obj-$(CONFIG_LCD_VGG2432A4) += vgg2432a4.o
obj-$(CONFIG_LCD_TDO24M) += tdo24m.o
obj-$(CONFIG_LCD_TOSA) += tosa_lcd.o
+obj-$(CONFIG_LCD_S6E63M0) += s6e63m0.o
obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o
obj-$(CONFIG_BACKLIGHT_ATMEL_PWM) += atmel-pwm-bl.o
+obj-$(CONFIG_BACKLIGHT_EP93XX) += ep93xx_bl.o
obj-$(CONFIG_BACKLIGHT_GENERIC) += generic_bl.o
obj-$(CONFIG_BACKLIGHT_HP700) += jornada720_bl.o
obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o
@@ -30,5 +32,7 @@ obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o
obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o
obj-$(CONFIG_BACKLIGHT_ADX) += adx_bl.o
obj-$(CONFIG_BACKLIGHT_ADP5520) += adp5520_bl.o
+obj-$(CONFIG_BACKLIGHT_ADP8860) += adp8860_bl.o
obj-$(CONFIG_BACKLIGHT_88PM860X) += 88pm860x_bl.o
+obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
new file mode 100644
index 00000000000..921ca37398f
--- /dev/null
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -0,0 +1,838 @@
+/*
+ * Backlight driver for Analog Devices ADP8860 Backlight Devices
+ *
+ * Copyright 2009-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/fb.h>
+#include <linux/backlight.h>
+#include <linux/leds.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include <linux/i2c/adp8860.h>
+#define ADP8860_EXT_FEATURES
+#define ADP8860_USE_LEDS
+
+#define ADP8860_MFDVID 0x00 /* Manufacturer and device ID */
+#define ADP8860_MDCR 0x01 /* Device mode and status */
+#define ADP8860_MDCR2 0x02 /* Device mode and Status Register 2 */
+#define ADP8860_INTR_EN 0x03 /* Interrupts enable */
+#define ADP8860_CFGR 0x04 /* Configuration register */
+#define ADP8860_BLSEN 0x05 /* Sink enable backlight or independent */
+#define ADP8860_BLOFF 0x06 /* Backlight off timeout */
+#define ADP8860_BLDIM 0x07 /* Backlight dim timeout */
+#define ADP8860_BLFR 0x08 /* Backlight fade in and out rates */
+#define ADP8860_BLMX1 0x09 /* Backlight (Brightness Level 1-daylight) maximum current */
+#define ADP8860_BLDM1 0x0A /* Backlight (Brightness Level 1-daylight) dim current */
+#define ADP8860_BLMX2 0x0B /* Backlight (Brightness Level 2-office) maximum current */
+#define ADP8860_BLDM2 0x0C /* Backlight (Brightness Level 2-office) dim current */
+#define ADP8860_BLMX3 0x0D /* Backlight (Brightness Level 3-dark) maximum current */
+#define ADP8860_BLDM3 0x0E /* Backlight (Brightness Level 3-dark) dim current */
+#define ADP8860_ISCFR 0x0F /* Independent sink current fade control register */
+#define ADP8860_ISCC 0x10 /* Independent sink current control register */
+#define ADP8860_ISCT1 0x11 /* Independent Sink Current Timer Register LED[7:5] */
+#define ADP8860_ISCT2 0x12 /* Independent Sink Current Timer Register LED[4:1] */
+#define ADP8860_ISCF 0x13 /* Independent sink current fade register */
+#define ADP8860_ISC7 0x14 /* Independent Sink Current LED7 */
+#define ADP8860_ISC6 0x15 /* Independent Sink Current LED6 */
+#define ADP8860_ISC5 0x16 /* Independent Sink Current LED5 */
+#define ADP8860_ISC4 0x17 /* Independent Sink Current LED4 */
+#define ADP8860_ISC3 0x18 /* Independent Sink Current LED3 */
+#define ADP8860_ISC2 0x19 /* Independent Sink Current LED2 */
+#define ADP8860_ISC1 0x1A /* Independent Sink Current LED1 */
+#define ADP8860_CCFG 0x1B /* Comparator configuration */
+#define ADP8860_CCFG2 0x1C /* Second comparator configuration */
+#define ADP8860_L2_TRP 0x1D /* L2 comparator reference */
+#define ADP8860_L2_HYS 0x1E /* L2 hysteresis */
+#define ADP8860_L3_TRP 0x1F /* L3 comparator reference */
+#define ADP8860_L3_HYS 0x20 /* L3 hysteresis */
+#define ADP8860_PH1LEVL 0x21 /* First phototransistor ambient light level-low byte register */
+#define ADP8860_PH1LEVH 0x22 /* First phototransistor ambient light level-high byte register */
+#define ADP8860_PH2LEVL 0x23 /* Second phototransistor ambient light level-low byte register */
+#define ADP8860_PH2LEVH 0x24 /* Second phototransistor ambient light level-high byte register */
+
+#define ADP8860_MANUFID 0x0 /* Analog Devices ADP8860 Manufacturer ID */
+#define ADP8861_MANUFID 0x4 /* Analog Devices ADP8861 Manufacturer ID */
+#define ADP8863_MANUFID 0x2 /* Analog Devices ADP8863 Manufacturer ID */
+
+#define ADP8860_DEVID(x) ((x) & 0xF)
+#define ADP8860_MANID(x) ((x) >> 4)
+
+/* MDCR Device mode and status */
+#define INT_CFG (1 << 6)
+#define NSTBY (1 << 5)
+#define DIM_EN (1 << 4)
+#define GDWN_DIS (1 << 3)
+#define SIS_EN (1 << 2)
+#define CMP_AUTOEN (1 << 1)
+#define BLEN (1 << 0)
+
+/* ADP8860_CCFG Main ALS comparator level enable */
+#define L3_EN (1 << 1)
+#define L2_EN (1 << 0)
+
+#define CFGR_BLV_SHIFT 3
+#define CFGR_BLV_MASK 0x3
+#define ADP8860_FLAG_LED_MASK 0xFF
+
+#define FADE_VAL(in, out) ((0xF & (in)) | ((0xF & (out)) << 4))
+#define BL_CFGR_VAL(law, blv) ((((blv) & CFGR_BLV_MASK) << CFGR_BLV_SHIFT) | ((0x3 & (law)) << 1))
+#define ALS_CCFG_VAL(filt) ((0x7 & filt) << 5)
+
+enum {
+ adp8860,
+ adp8861,
+ adp8863
+};
+
+struct adp8860_led {
+ struct led_classdev cdev;
+ struct work_struct work;
+ struct i2c_client *client;
+ enum led_brightness new_brightness;
+ int id;
+ int flags;
+};
+
+struct adp8860_bl {
+ struct i2c_client *client;
+ struct backlight_device *bl;
+ struct adp8860_led *led;
+ struct adp8860_backlight_platform_data *pdata;
+ struct mutex lock;
+ unsigned long cached_daylight_max;
+ int id;
+ int revid;
+ int current_brightness;
+ unsigned en_ambl_sens:1;
+ unsigned gdwn_dis:1;
+};
+
+static int adp8860_read(struct i2c_client *client, int reg, uint8_t *val)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed reading at 0x%02x\n", reg);
+ return ret;
+ }
+
+ *val = (uint8_t)ret;
+ return 0;
+}
+
+static int adp8860_write(struct i2c_client *client, u8 reg, u8 val)
+{
+ return i2c_smbus_write_byte_data(client, reg, val);
+}
+
+static int adp8860_set_bits(struct i2c_client *client, int reg, uint8_t bit_mask)
+{
+ struct adp8860_bl *data = i2c_get_clientdata(client);
+ uint8_t reg_val;
+ int ret;
+
+ mutex_lock(&data->lock);
+
+ ret = adp8860_read(client, reg, &reg_val);
+
+ if (!ret && ((reg_val & bit_mask) == 0)) {
+ reg_val |= bit_mask;
+ ret = adp8860_write(client, reg, reg_val);
+ }
+
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static int adp8860_clr_bits(struct i2c_client *client, int reg, uint8_t bit_mask)
+{
+ struct adp8860_bl *data = i2c_get_clientdata(client);
+ uint8_t reg_val;
+ int ret;
+
+ mutex_lock(&data->lock);
+
+ ret = adp8860_read(client, reg, &reg_val);
+
+ if (!ret && (reg_val & bit_mask)) {
+ reg_val &= ~bit_mask;
+ ret = adp8860_write(client, reg, reg_val);
+ }
+
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+/*
+ * Independent sink / LED
+ */
+#if defined(ADP8860_USE_LEDS)
+static void adp8860_led_work(struct work_struct *work)
+{
+ struct adp8860_led *led = container_of(work, struct adp8860_led, work);
+ adp8860_write(led->client, ADP8860_ISC1 - led->id + 1,
+ led->new_brightness >> 1);
+}
+
+static void adp8860_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct adp8860_led *led;
+
+ led = container_of(led_cdev, struct adp8860_led, cdev);
+ led->new_brightness = value;
+ schedule_work(&led->work);
+}
+
+static int adp8860_led_setup(struct adp8860_led *led)
+{
+ struct i2c_client *client = led->client;
+ int ret = 0;
+
+ ret = adp8860_write(client, ADP8860_ISC1 - led->id + 1, 0);
+ ret |= adp8860_set_bits(client, ADP8860_ISCC, 1 << (led->id - 1));
+
+ if (led->id > 4)
+ ret |= adp8860_set_bits(client, ADP8860_ISCT1,
+ (led->flags & 0x3) << ((led->id - 5) * 2));
+ else
+ ret |= adp8860_set_bits(client, ADP8860_ISCT2,
+ (led->flags & 0x3) << ((led->id - 1) * 2));
+
+ return ret;
+}
+
+static int __devinit adp8860_led_probe(struct i2c_client *client)
+{
+ struct adp8860_backlight_platform_data *pdata =
+ client->dev.platform_data;
+ struct adp8860_bl *data = i2c_get_clientdata(client);
+ struct adp8860_led *led, *led_dat;
+ struct led_info *cur_led;
+ int ret, i;
+
+ led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
+ if (led == NULL) {
+ dev_err(&client->dev, "failed to alloc memory\n");
+ return -ENOMEM;
+ }
+
+ ret = adp8860_write(client, ADP8860_ISCFR, pdata->led_fade_law);
+ ret = adp8860_write(client, ADP8860_ISCT1,
+ (pdata->led_on_time & 0x3) << 6);
+ ret |= adp8860_write(client, ADP8860_ISCF,
+ FADE_VAL(pdata->led_fade_in, pdata->led_fade_out));
+
+ if (ret) {
+ dev_err(&client->dev, "failed to write\n");
+ goto err_free;
+ }
+
+ for (i = 0; i < pdata->num_leds; ++i) {
+ cur_led = &pdata->leds[i];
+ led_dat = &led[i];
+
+ led_dat->id = cur_led->flags & ADP8860_FLAG_LED_MASK;
+
+ if (led_dat->id > 7 || led_dat->id < 1) {
+ dev_err(&client->dev, "Invalid LED ID %d\n",
+ led_dat->id);
+ goto err;
+ }
+
+ if (pdata->bl_led_assign & (1 << (led_dat->id - 1))) {
+ dev_err(&client->dev, "LED %d used by Backlight\n",
+ led_dat->id);
+ goto err;
+ }
+
+ led_dat->cdev.name = cur_led->name;
+ led_dat->cdev.default_trigger = cur_led->default_trigger;
+ led_dat->cdev.brightness_set = adp8860_led_set;
+ led_dat->cdev.brightness = LED_OFF;
+ led_dat->flags = cur_led->flags >> FLAG_OFFT_SHIFT;
+ led_dat->client = client;
+ led_dat->new_brightness = LED_OFF;
+ INIT_WORK(&led_dat->work, adp8860_led_work);
+
+ ret = led_classdev_register(&client->dev, &led_dat->cdev);
+ if (ret) {
+ dev_err(&client->dev, "failed to register LED %d\n",
+ led_dat->id);
+ goto err;
+ }
+
+ ret = adp8860_led_setup(led_dat);
+ if (ret) {
+ dev_err(&client->dev, "failed to write\n");
+ i++;
+ goto err;
+ }
+ }
+
+ data->led = led;
+
+ return 0;
+
+ err:
+ for (i = i - 1; i >= 0; --i) {
+ led_classdev_unregister(&led[i].cdev);
+ cancel_work_sync(&led[i].work);
+ }
+
+ err_free:
+ kfree(led);
+
+ return ret;
+}
+
+static int __devexit adp8860_led_remove(struct i2c_client *client)
+{
+ struct adp8860_backlight_platform_data *pdata =
+ client->dev.platform_data;
+ struct adp8860_bl *data = i2c_get_clientdata(client);
+ int i;
+
+ for (i = 0; i < pdata->num_leds; i++) {
+ led_classdev_unregister(&data->led[i].cdev);
+ cancel_work_sync(&data->led[i].work);
+ }
+
+ kfree(data->led);
+ return 0;
+}
+#else
+static int __devinit adp8860_led_probe(struct i2c_client *client)
+{
+ return 0;
+}
+
+static int __devexit adp8860_led_remove(struct i2c_client *client)
+{
+ return 0;
+}
+#endif
+
+static int adp8860_bl_set(struct backlight_device *bl, int brightness)
+{
+ struct adp8860_bl *data = bl_get_data(bl);
+ struct i2c_client *client = data->client;
+ int ret = 0;
+
+ if (data->en_ambl_sens) {
+ if ((brightness > 0) && (brightness < ADP8860_MAX_BRIGHTNESS)) {
+ /* Disable Ambient Light auto adjust */
+ ret |= adp8860_clr_bits(client, ADP8860_MDCR,
+ CMP_AUTOEN);
+ ret |= adp8860_write(client, ADP8860_BLMX1, brightness);
+ } else {
+ /*
+ * MAX_BRIGHTNESS -> Enable Ambient Light auto adjust
+ * restore daylight l1 sysfs brightness
+ */
+ ret |= adp8860_write(client, ADP8860_BLMX1,
+ data->cached_daylight_max);
+ ret |= adp8860_set_bits(client, ADP8860_MDCR,
+ CMP_AUTOEN);
+ }
+ } else
+ ret |= adp8860_write(client, ADP8860_BLMX1, brightness);
+
+ if (data->current_brightness && brightness == 0)
+ ret |= adp8860_set_bits(client,
+ ADP8860_MDCR, DIM_EN);
+ else if (data->current_brightness == 0 && brightness)
+ ret |= adp8860_clr_bits(client,
+ ADP8860_MDCR, DIM_EN);
+
+ if (!ret)
+ data->current_brightness = brightness;
+
+ return ret;
+}
+
+static int adp8860_bl_update_status(struct backlight_device *bl)
+{
+ int brightness = bl->props.brightness;
+ if (bl->props.power != FB_BLANK_UNBLANK)
+ brightness = 0;
+
+ if (bl->props.fb_blank != FB_BLANK_UNBLANK)
+ brightness = 0;
+
+ return adp8860_bl_set(bl, brightness);
+}
+
+static int adp8860_bl_get_brightness(struct backlight_device *bl)
+{
+ struct adp8860_bl *data = bl_get_data(bl);
+
+ return data->current_brightness;
+}
+
+static const struct backlight_ops adp8860_bl_ops = {
+ .update_status = adp8860_bl_update_status,
+ .get_brightness = adp8860_bl_get_brightness,
+};
+
+static int adp8860_bl_setup(struct backlight_device *bl)
+{
+ struct adp8860_bl *data = bl_get_data(bl);
+ struct i2c_client *client = data->client;
+ struct adp8860_backlight_platform_data *pdata = data->pdata;
+ int ret = 0;
+
+ ret |= adp8860_write(client, ADP8860_BLSEN, ~pdata->bl_led_assign);
+ ret |= adp8860_write(client, ADP8860_BLMX1, pdata->l1_daylight_max);
+ ret |= adp8860_write(client, ADP8860_BLDM1, pdata->l1_daylight_dim);
+
+ if (data->en_ambl_sens) {
+ data->cached_daylight_max = pdata->l1_daylight_max;
+ ret |= adp8860_write(client, ADP8860_BLMX2,
+ pdata->l2_office_max);
+ ret |= adp8860_write(client, ADP8860_BLDM2,
+ pdata->l2_office_dim);
+ ret |= adp8860_write(client, ADP8860_BLMX3,
+ pdata->l3_dark_max);
+ ret |= adp8860_write(client, ADP8860_BLDM3,
+ pdata->l3_dark_dim);
+
+ ret |= adp8860_write(client, ADP8860_L2_TRP, pdata->l2_trip);
+ ret |= adp8860_write(client, ADP8860_L2_HYS, pdata->l2_hyst);
+ ret |= adp8860_write(client, ADP8860_L3_TRP, pdata->l3_trip);
+ ret |= adp8860_write(client, ADP8860_L3_HYS, pdata->l3_hyst);
+ ret |= adp8860_write(client, ADP8860_CCFG, L2_EN | L3_EN |
+ ALS_CCFG_VAL(pdata->abml_filt));
+ }
+
+ ret |= adp8860_write(client, ADP8860_CFGR,
+ BL_CFGR_VAL(pdata->bl_fade_law, 0));
+
+ ret |= adp8860_write(client, ADP8860_BLFR, FADE_VAL(pdata->bl_fade_in,
+ pdata->bl_fade_out));
+
+ ret |= adp8860_set_bits(client, ADP8860_MDCR, BLEN | DIM_EN | NSTBY |
+ (data->gdwn_dis ? GDWN_DIS : 0));
+
+ return ret;
+}
+
+static ssize_t adp8860_show(struct device *dev, char *buf, int reg)
+{
+ struct adp8860_bl *data = dev_get_drvdata(dev);
+ int error;
+ uint8_t reg_val;
+
+ mutex_lock(&data->lock);
+ error = adp8860_read(data->client, reg, &reg_val);
+ mutex_unlock(&data->lock);
+
+ if (error < 0)
+ return error;
+
+ return sprintf(buf, "%u\n", reg_val);
+}
+
+static ssize_t adp8860_store(struct device *dev, const char *buf,
+ size_t count, int reg)
+{
+ struct adp8860_bl *data = dev_get_drvdata(dev);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&data->lock);
+ adp8860_write(data->client, reg, val);
+ mutex_unlock(&data->lock);
+
+ return count;
+}
+
+static ssize_t adp8860_bl_l3_dark_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return adp8860_show(dev, buf, ADP8860_BLMX3);
+}
+
+static ssize_t adp8860_bl_l3_dark_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return adp8860_store(dev, buf, count, ADP8860_BLMX3);
+}
+
+static DEVICE_ATTR(l3_dark_max, 0664, adp8860_bl_l3_dark_max_show,
+ adp8860_bl_l3_dark_max_store);
+
+static ssize_t adp8860_bl_l2_office_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return adp8860_show(dev, buf, ADP8860_BLMX2);
+}
+
+static ssize_t adp8860_bl_l2_office_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return adp8860_store(dev, buf, count, ADP8860_BLMX2);
+}
+static DEVICE_ATTR(l2_office_max, 0664, adp8860_bl_l2_office_max_show,
+ adp8860_bl_l2_office_max_store);
+
+static ssize_t adp8860_bl_l1_daylight_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return adp8860_show(dev, buf, ADP8860_BLMX1);
+}
+
+static ssize_t adp8860_bl_l1_daylight_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct adp8860_bl *data = dev_get_drvdata(dev);
+
+ strict_strtoul(buf, 10, &data->cached_daylight_max);
+ return adp8860_store(dev, buf, count, ADP8860_BLMX1);
+}
+static DEVICE_ATTR(l1_daylight_max, 0664, adp8860_bl_l1_daylight_max_show,
+ adp8860_bl_l1_daylight_max_store);
+
+static ssize_t adp8860_bl_l3_dark_dim_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return adp8860_show(dev, buf, ADP8860_BLDM3);
+}
+
+static ssize_t adp8860_bl_l3_dark_dim_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return adp8860_store(dev, buf, count, ADP8860_BLDM3);
+}
+static DEVICE_ATTR(l3_dark_dim, 0664, adp8860_bl_l3_dark_dim_show,
+ adp8860_bl_l3_dark_dim_store);
+
+static ssize_t adp8860_bl_l2_office_dim_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return adp8860_show(dev, buf, ADP8860_BLDM2);
+}
+
+static ssize_t adp8860_bl_l2_office_dim_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return adp8860_store(dev, buf, count, ADP8860_BLDM2);
+}
+static DEVICE_ATTR(l2_office_dim, 0664, adp8860_bl_l2_office_dim_show,
+ adp8860_bl_l2_office_dim_store);
+
+static ssize_t adp8860_bl_l1_daylight_dim_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return adp8860_show(dev, buf, ADP8860_BLDM1);
+}
+
+static ssize_t adp8860_bl_l1_daylight_dim_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return adp8860_store(dev, buf, count, ADP8860_BLDM1);
+}
+static DEVICE_ATTR(l1_daylight_dim, 0664, adp8860_bl_l1_daylight_dim_show,
+ adp8860_bl_l1_daylight_dim_store);
+
+#ifdef ADP8860_EXT_FEATURES
+static ssize_t adp8860_bl_ambient_light_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct adp8860_bl *data = dev_get_drvdata(dev);
+ int error;
+ uint8_t reg_val;
+ uint16_t ret_val;
+
+ mutex_lock(&data->lock);
+ error = adp8860_read(data->client, ADP8860_PH1LEVL, &reg_val);
+ ret_val = reg_val;
+ error |= adp8860_read(data->client, ADP8860_PH1LEVH, &reg_val);
+ mutex_unlock(&data->lock);
+
+ if (error < 0)
+ return error;
+
+ /* Return 13-bit conversion value for the first light sensor */
+ ret_val += (reg_val & 0x1F) << 8;
+
+ return sprintf(buf, "%u\n", ret_val);
+}
+static DEVICE_ATTR(ambient_light_level, 0444,
+ adp8860_bl_ambient_light_level_show, NULL);
+
+static ssize_t adp8860_bl_ambient_light_zone_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct adp8860_bl *data = dev_get_drvdata(dev);
+ int error;
+ uint8_t reg_val;
+
+ mutex_lock(&data->lock);
+ error = adp8860_read(data->client, ADP8860_CFGR, &reg_val);
+ mutex_unlock(&data->lock);
+
+ if (error < 0)
+ return error;
+
+ return sprintf(buf, "%u\n",
+ ((reg_val >> CFGR_BLV_SHIFT) & CFGR_BLV_MASK) + 1);
+}
+
+static ssize_t adp8860_bl_ambient_light_zone_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct adp8860_bl *data = dev_get_drvdata(dev);
+ unsigned long val;
+ uint8_t reg_val;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (val == 0) {
+ /* Enable automatic ambient light sensing */
+ adp8860_set_bits(data->client, ADP8860_MDCR, CMP_AUTOEN);
+ } else if ((val > 0) && (val < 6)) {
+ /* Disable automatic ambient light sensing */
+ adp8860_clr_bits(data->client, ADP8860_MDCR, CMP_AUTOEN);
+
+ /* Set user supplied ambient light zone */
+ mutex_lock(&data->lock);
+ adp8860_read(data->client, ADP8860_CFGR, &reg_val);
+ reg_val &= ~(CFGR_BLV_MASK << CFGR_BLV_SHIFT);
+ reg_val |= val << CFGR_BLV_SHIFT;
+ adp8860_write(data->client, ADP8860_CFGR, reg_val);
+ mutex_unlock(&data->lock);
+ }
+
+ return count;
+}
+static DEVICE_ATTR(ambient_light_zone, 0664,
+ adp8860_bl_ambient_light_zone_show,
+ adp8860_bl_ambient_light_zone_store);
+#endif
+
+static struct attribute *adp8860_bl_attributes[] = {
+ &dev_attr_l3_dark_max.attr,
+ &dev_attr_l3_dark_dim.attr,
+ &dev_attr_l2_office_max.attr,
+ &dev_attr_l2_office_dim.attr,
+ &dev_attr_l1_daylight_max.attr,
+ &dev_attr_l1_daylight_dim.attr,
+#ifdef ADP8860_EXT_FEATURES
+ &dev_attr_ambient_light_level.attr,
+ &dev_attr_ambient_light_zone.attr,
+#endif
+ NULL
+};
+
+static const struct attribute_group adp8860_bl_attr_group = {
+ .attrs = adp8860_bl_attributes,
+};
+
+static int __devinit adp8860_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct backlight_device *bl;
+ struct adp8860_bl *data;
+ struct adp8860_backlight_platform_data *pdata =
+ client->dev.platform_data;
+ struct backlight_properties props;
+ uint8_t reg_val;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "SMBUS Byte Data not Supported\n");
+ return -EIO;
+ }
+
+ if (!pdata) {
+ dev_err(&client->dev, "no platform data?\n");
+ return -EINVAL;
+ }
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ ret = adp8860_read(client, ADP8860_MFDVID, &reg_val);
+ if (ret < 0)
+ goto out2;
+
+ switch (ADP8860_MANID(reg_val)) {
+ case ADP8863_MANUFID:
+ data->gdwn_dis = !!pdata->gdwn_dis;
+ case ADP8860_MANUFID:
+ data->en_ambl_sens = !!pdata->en_ambl_sens;
+ break;
+ case ADP8861_MANUFID:
+ data->gdwn_dis = !!pdata->gdwn_dis;
+ break;
+ default:
+ dev_err(&client->dev, "failed to probe\n");
+ ret = -ENODEV;
+ goto out2;
+ }
+
+ /* It's confirmed that the DEVID field is actually a REVID */
+
+ data->revid = ADP8860_DEVID(reg_val);
+ data->client = client;
+ data->pdata = pdata;
+ data->id = id->driver_data;
+ data->current_brightness = 0;
+ i2c_set_clientdata(client, data);
+
+ memset(&props, 0, sizeof(props));
+ props.max_brightness = ADP8860_MAX_BRIGHTNESS;
+
+ mutex_init(&data->lock);
+
+ bl = backlight_device_register(dev_driver_string(&client->dev),
+ &client->dev, data, &adp8860_bl_ops, &props);
+ if (IS_ERR(bl)) {
+ dev_err(&client->dev, "failed to register backlight\n");
+ ret = PTR_ERR(bl);
+ goto out2;
+ }
+
+ bl->props.max_brightness =
+ bl->props.brightness = ADP8860_MAX_BRIGHTNESS;
+
+ data->bl = bl;
+
+ if (data->en_ambl_sens)
+ ret = sysfs_create_group(&bl->dev.kobj,
+ &adp8860_bl_attr_group);
+
+ if (ret) {
+ dev_err(&client->dev, "failed to register sysfs\n");
+ goto out1;
+ }
+
+ ret = adp8860_bl_setup(bl);
+ if (ret) {
+ ret = -EIO;
+ goto out;
+ }
+
+ backlight_update_status(bl);
+
+ dev_info(&client->dev, "%s Rev.%d Backlight\n",
+ client->name, data->revid);
+
+ if (pdata->num_leds)
+ adp8860_led_probe(client);
+
+ return 0;
+
+out:
+ if (data->en_ambl_sens)
+ sysfs_remove_group(&data->bl->dev.kobj,
+ &adp8860_bl_attr_group);
+out1:
+ backlight_device_unregister(bl);
+out2:
+ i2c_set_clientdata(client, NULL);
+ kfree(data);
+
+ return ret;
+}
+
+static int __devexit adp8860_remove(struct i2c_client *client)
+{
+ struct adp8860_bl *data = i2c_get_clientdata(client);
+
+ adp8860_clr_bits(client, ADP8860_MDCR, NSTBY);
+
+ if (data->led)
+ adp8860_led_remove(client);
+
+ if (data->en_ambl_sens)
+ sysfs_remove_group(&data->bl->dev.kobj,
+ &adp8860_bl_attr_group);
+
+ backlight_device_unregister(data->bl);
+ i2c_set_clientdata(client, NULL);
+ kfree(data);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int adp8860_i2c_suspend(struct i2c_client *client, pm_message_t message)
+{
+ adp8860_clr_bits(client, ADP8860_MDCR, NSTBY);
+
+ return 0;
+}
+
+static int adp8860_i2c_resume(struct i2c_client *client)
+{
+ adp8860_set_bits(client, ADP8860_MDCR, NSTBY);
+
+ return 0;
+}
+#else
+#define adp8860_i2c_suspend NULL
+#define adp8860_i2c_resume NULL
+#endif
+
+static const struct i2c_device_id adp8860_id[] = {
+ { "adp8860", adp8860 },
+ { "adp8861", adp8861 },
+ { "adp8863", adp8863 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adp8860_id);
+
+static struct i2c_driver adp8860_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+ .probe = adp8860_probe,
+ .remove = __devexit_p(adp8860_remove),
+ .suspend = adp8860_i2c_suspend,
+ .resume = adp8860_i2c_resume,
+ .id_table = adp8860_id,
+};
+
+static int __init adp8860_init(void)
+{
+ return i2c_add_driver(&adp8860_driver);
+}
+module_init(adp8860_init);
+
+static void __exit adp8860_exit(void)
+{
+ i2c_del_driver(&adp8860_driver);
+}
+module_exit(adp8860_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("ADP8860 Backlight driver");
+MODULE_ALIAS("i2c:adp8860-backlight");
diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
index 7f4a7c30a98..fe9af129c5d 100644
--- a/drivers/video/backlight/adx_bl.c
+++ b/drivers/video/backlight/adx_bl.c
@@ -107,8 +107,8 @@ static int __devinit adx_backlight_probe(struct platform_device *pdev)
props.max_brightness = 0xff;
bldev = backlight_device_register(dev_name(&pdev->dev), &pdev->dev,
bl, &adx_backlight_ops, &props);
- if (!bldev) {
- ret = -ENOMEM;
+ if (IS_ERR(bldev)) {
+ ret = PTR_ERR(bldev);
goto out;
}
diff --git a/drivers/video/backlight/ep93xx_bl.c b/drivers/video/backlight/ep93xx_bl.c
new file mode 100644
index 00000000000..b0cc4918480
--- /dev/null
+++ b/drivers/video/backlight/ep93xx_bl.c
@@ -0,0 +1,160 @@
+/*
+ * Driver for the Cirrus EP93xx lcd backlight
+ *
+ * Copyright (c) 2010 H Hartley Sweeten <hsweeten@visionengravers.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This driver controls the pulse width modulated brightness control output,
+ * BRIGHT, on the Cirrus EP9307, EP9312, and EP9315 processors.
+ */
+
+
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/fb.h>
+#include <linux/backlight.h>
+
+#include <mach/hardware.h>
+
+#define EP93XX_RASTER_REG(x) (EP93XX_RASTER_BASE + (x))
+#define EP93XX_RASTER_BRIGHTNESS EP93XX_RASTER_REG(0x20)
+
+#define EP93XX_MAX_COUNT 255
+#define EP93XX_MAX_BRIGHT 255
+#define EP93XX_DEF_BRIGHT 128
+
+struct ep93xxbl {
+ void __iomem *mmio;
+ int brightness;
+};
+
+static int ep93xxbl_set(struct backlight_device *bl, int brightness)
+{
+ struct ep93xxbl *ep93xxbl = bl_get_data(bl);
+
+ __raw_writel((brightness << 8) | EP93XX_MAX_COUNT, ep93xxbl->mmio);
+
+ ep93xxbl->brightness = brightness;
+
+ return 0;
+}
+
+static int ep93xxbl_update_status(struct backlight_device *bl)
+{
+ int brightness = bl->props.brightness;
+
+ if (bl->props.power != FB_BLANK_UNBLANK ||
+ bl->props.fb_blank != FB_BLANK_UNBLANK)
+ brightness = 0;
+
+ return ep93xxbl_set(bl, brightness);
+}
+
+static int ep93xxbl_get_brightness(struct backlight_device *bl)
+{
+ struct ep93xxbl *ep93xxbl = bl_get_data(bl);
+
+ return ep93xxbl->brightness;
+}
+
+static const struct backlight_ops ep93xxbl_ops = {
+ .update_status = ep93xxbl_update_status,
+ .get_brightness = ep93xxbl_get_brightness,
+};
+
+static int __init ep93xxbl_probe(struct platform_device *dev)
+{
+ struct ep93xxbl *ep93xxbl;
+ struct backlight_device *bl;
+ struct backlight_properties props;
+
+ ep93xxbl = devm_kzalloc(&dev->dev, sizeof(*ep93xxbl), GFP_KERNEL);
+ if (!ep93xxbl)
+ return -ENOMEM;
+
+ /*
+ * This register is located in the range already ioremap'ed by
+ * the framebuffer driver. A MFD driver seems a bit of overkill
+ * to handle this so use the static I/O mapping; this address
+ * is already virtual.
+ *
+ * NOTE: No locking is required; the framebuffer does not touch
+ * this register.
+ */
+ ep93xxbl->mmio = EP93XX_RASTER_BRIGHTNESS;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = EP93XX_MAX_BRIGHT;
+ bl = backlight_device_register(dev->name, &dev->dev, ep93xxbl,
+ &ep93xxbl_ops, &props);
+ if (IS_ERR(bl))
+ return PTR_ERR(bl);
+
+ bl->props.brightness = EP93XX_DEF_BRIGHT;
+
+ platform_set_drvdata(dev, bl);
+
+ ep93xxbl_update_status(bl);
+
+ return 0;
+}
+
+static int ep93xxbl_remove(struct platform_device *dev)
+{
+ struct backlight_device *bl = platform_get_drvdata(dev);
+
+ backlight_device_unregister(bl);
+ platform_set_drvdata(dev, NULL);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ep93xxbl_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct backlight_device *bl = platform_get_drvdata(dev);
+
+ return ep93xxbl_set(bl, 0);
+}
+
+static int ep93xxbl_resume(struct platform_device *dev)
+{
+ struct backlight_device *bl = platform_get_drvdata(dev);
+
+ backlight_update_status(bl);
+ return 0;
+}
+#else
+#define ep93xxbl_suspend NULL
+#define ep93xxbl_resume NULL
+#endif
+
+static struct platform_driver ep93xxbl_driver = {
+ .driver = {
+ .name = "ep93xx-bl",
+ .owner = THIS_MODULE,
+ },
+ .probe = ep93xxbl_probe,
+ .remove = __devexit_p(ep93xxbl_remove),
+ .suspend = ep93xxbl_suspend,
+ .resume = ep93xxbl_resume,
+};
+
+static int __init ep93xxbl_init(void)
+{
+ return platform_driver_register(&ep93xxbl_driver);
+}
+module_init(ep93xxbl_init);
+
+static void __exit ep93xxbl_exit(void)
+{
+ platform_driver_unregister(&ep93xxbl_driver);
+}
+module_exit(ep93xxbl_exit);
+
+MODULE_DESCRIPTION("EP93xx Backlight Driver");
+MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ep93xx-bl");
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index bcdb12c93ef..9093ef0fa86 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -125,8 +125,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
if (priv == NULL) {
dev_err(&spi->dev, "No memory for this device.\n");
- ret = -ENOMEM;
- goto err;
+ return -ENOMEM;
}
dev_set_drvdata(&spi->dev, priv);
@@ -139,7 +138,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
if (ret) {
dev_err(&spi->dev,
"Unable to get the lcd l4f00242t03 reset gpio.\n");
- return ret;
+ goto err;
}
ret = gpio_direction_output(pdata->reset_gpio, 1);
@@ -151,7 +150,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
if (ret) {
dev_err(&spi->dev,
"Unable to get the lcd l4f00242t03 data en gpio.\n");
- return ret;
+ goto err2;
}
ret = gpio_direction_output(pdata->data_enable_gpio, 0);
@@ -222,9 +221,9 @@ static int __devexit l4f00242t03_remove(struct spi_device *spi)
gpio_free(pdata->reset_gpio);
if (priv->io_reg)
- regulator_put(priv->core_reg);
- if (priv->core_reg)
regulator_put(priv->io_reg);
+ if (priv->core_reg)
+ regulator_put(priv->core_reg);
kfree(priv);
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index b5accc957ad..b2b2c7ba1f6 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -162,6 +162,7 @@ static int __devinit max8925_backlight_probe(struct platform_device *pdev)
backlight_update_status(bl);
return 0;
out:
+ backlight_device_unregister(bl);
kfree(data);
return ret;
}
diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
index 1b5d3fe6bbb..9fb533f6373 100644
--- a/drivers/video/backlight/mbp_nvidia_bl.c
+++ b/drivers/video/backlight/mbp_nvidia_bl.c
@@ -141,7 +141,7 @@ static const struct dmi_system_id __initdata mbp_device_table[] = {
.callback = mbp_dmi_match,
.ident = "MacBook 1,1",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
},
.driver_data = (void *)&intel_chipset_data,
@@ -184,6 +184,42 @@ static const struct dmi_system_id __initdata mbp_device_table[] = {
},
{
.callback = mbp_dmi_match,
+ .ident = "MacBookPro 1,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro1,1"),
+ },
+ .driver_data = (void *)&intel_chipset_data,
+ },
+ {
+ .callback = mbp_dmi_match,
+ .ident = "MacBookPro 1,2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro1,2"),
+ },
+ .driver_data = (void *)&intel_chipset_data,
+ },
+ {
+ .callback = mbp_dmi_match,
+ .ident = "MacBookPro 2,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,1"),
+ },
+ .driver_data = (void *)&intel_chipset_data,
+ },
+ {
+ .callback = mbp_dmi_match,
+ .ident = "MacBookPro 2,2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,2"),
+ },
+ .driver_data = (void *)&intel_chipset_data,
+ },
+ {
+ .callback = mbp_dmi_match,
.ident = "MacBookPro 3,1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
@@ -238,6 +274,15 @@ static const struct dmi_system_id __initdata mbp_device_table[] = {
},
{
.callback = mbp_dmi_match,
+ .ident = "MacBook 6,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook6,1"),
+ },
+ .driver_data = (void *)&nvidia_chipset_data,
+ },
+ {
+ .callback = mbp_dmi_match,
.ident = "MacBookAir 2,1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
diff --git a/drivers/video/backlight/pcf50633-backlight.c b/drivers/video/backlight/pcf50633-backlight.c
new file mode 100644
index 00000000000..3c424f7efdc
--- /dev/null
+++ b/drivers/video/backlight/pcf50633-backlight.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
+ * PCF50633 backlight device driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+
+#include <linux/backlight.h>
+#include <linux/fb.h>
+
+#include <linux/mfd/pcf50633/core.h>
+#include <linux/mfd/pcf50633/backlight.h>
+
+struct pcf50633_bl {
+ struct pcf50633 *pcf;
+ struct backlight_device *bl;
+
+ unsigned int brightness;
+ unsigned int brightness_limit;
+};
+
+/*
+ * pcf50633_bl_set_brightness_limit
+ *
+ * Update the brightness limit for the pc50633 backlight. The actual brightness
+ * will not go above the limit. This is useful to limit power drain for example
+ * on low battery.
+ *
+ * @dev: Pointer to a pcf50633 device
+ * @limit: The brightness limit. Valid values are 0-63
+ */
+int pcf50633_bl_set_brightness_limit(struct pcf50633 *pcf, unsigned int limit)
+{
+ struct pcf50633_bl *pcf_bl = platform_get_drvdata(pcf->bl_pdev);
+
+ if (!pcf_bl)
+ return -ENODEV;
+
+ pcf_bl->brightness_limit = limit & 0x3f;
+ backlight_update_status(pcf_bl->bl);
+
+ return 0;
+}
+
+static int pcf50633_bl_update_status(struct backlight_device *bl)
+{
+ struct pcf50633_bl *pcf_bl = bl_get_data(bl);
+ unsigned int new_brightness;
+
+
+ if (bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK) ||
+ bl->props.power != FB_BLANK_UNBLANK)
+ new_brightness = 0;
+ else if (bl->props.brightness < pcf_bl->brightness_limit)
+ new_brightness = bl->props.brightness;
+ else
+ new_brightness = pcf_bl->brightness_limit;
+
+
+ if (pcf_bl->brightness == new_brightness)
+ return 0;
+
+ if (new_brightness) {
+ pcf50633_reg_write(pcf_bl->pcf, PCF50633_REG_LEDOUT,
+ new_brightness);
+ if (!pcf_bl->brightness)
+ pcf50633_reg_write(pcf_bl->pcf, PCF50633_REG_LEDENA, 1);
+ } else {
+ pcf50633_reg_write(pcf_bl->pcf, PCF50633_REG_LEDENA, 0);
+ }
+
+ pcf_bl->brightness = new_brightness;
+
+ return 0;
+}
+
+static int pcf50633_bl_get_brightness(struct backlight_device *bl)
+{
+ struct pcf50633_bl *pcf_bl = bl_get_data(bl);
+ return pcf_bl->brightness;
+}
+
+static const struct backlight_ops pcf50633_bl_ops = {
+ .get_brightness = pcf50633_bl_get_brightness,
+ .update_status = pcf50633_bl_update_status,
+ .options = BL_CORE_SUSPENDRESUME,
+};
+
+static int __devinit pcf50633_bl_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct pcf50633_bl *pcf_bl;
+ struct device *parent = pdev->dev.parent;
+ struct pcf50633_platform_data *pcf50633_data = parent->platform_data;
+ struct pcf50633_bl_platform_data *pdata = pcf50633_data->backlight_data;
+ struct backlight_properties bl_props;
+
+ pcf_bl = kzalloc(sizeof(*pcf_bl), GFP_KERNEL);
+ if (!pcf_bl)
+ return -ENOMEM;
+
+ bl_props.max_brightness = 0x3f;
+ bl_props.power = FB_BLANK_UNBLANK;
+
+ if (pdata) {
+ bl_props.brightness = pdata->default_brightness;
+ pcf_bl->brightness_limit = pdata->default_brightness_limit;
+ } else {
+ bl_props.brightness = 0x3f;
+ pcf_bl->brightness_limit = 0x3f;
+ }
+
+ pcf_bl->pcf = dev_to_pcf50633(pdev->dev.parent);
+
+ pcf_bl->bl = backlight_device_register(pdev->name, &pdev->dev, pcf_bl,
+ &pcf50633_bl_ops, &bl_props);
+
+ if (IS_ERR(pcf_bl->bl)) {
+ ret = PTR_ERR(pcf_bl->bl);
+ goto err_free;
+ }
+
+ platform_set_drvdata(pdev, pcf_bl);
+
+ pcf50633_reg_write(pcf_bl->pcf, PCF50633_REG_LEDDIM, pdata->ramp_time);
+
+ /* Should be different from bl_props.brightness, so we do not exit
+ * update_status early the first time it's called */
+ pcf_bl->brightness = pcf_bl->bl->props.brightness + 1;
+
+ backlight_update_status(pcf_bl->bl);
+
+ return 0;
+
+err_free:
+ kfree(pcf_bl);
+
+ return ret;
+}
+
+static int __devexit pcf50633_bl_remove(struct platform_device *pdev)
+{
+ struct pcf50633_bl *pcf_bl = platform_get_drvdata(pdev);
+
+ backlight_device_unregister(pcf_bl->bl);
+
+ platform_set_drvdata(pdev, NULL);
+
+ kfree(pcf_bl);
+
+ return 0;
+}
+
+static struct platform_driver pcf50633_bl_driver = {
+ .probe = pcf50633_bl_probe,
+ .remove = __devexit_p(pcf50633_bl_remove),
+ .driver = {
+ .name = "pcf50633-backlight",
+ },
+};
+
+static int __init pcf50633_bl_init(void)
+{
+ return platform_driver_register(&pcf50633_bl_driver);
+}
+module_init(pcf50633_bl_init);
+
+static void __exit pcf50633_bl_exit(void)
+{
+ platform_driver_unregister(&pcf50633_bl_driver);
+}
+module_exit(pcf50633_bl_exit);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("PCF50633 backlight driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pcf50633-backlight");
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
new file mode 100644
index 00000000000..a3128c9cb7a
--- /dev/null
+++ b/drivers/video/backlight/s6e63m0.c
@@ -0,0 +1,920 @@
+/*
+ * S6E63M0 AMOLED LCD panel driver.
+ *
+ * Author: InKi Dae <inki.dae@samsung.com>
+ *
+ * Derived from drivers/video/omap/lcd-apollon.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/wait.h>
+#include <linux/fb.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/lcd.h>
+#include <linux/backlight.h>
+
+#include "s6e63m0_gamma.h"
+
+#define SLEEPMSEC 0x1000
+#define ENDDEF 0x2000
+#define DEFMASK 0xFF00
+#define COMMAND_ONLY 0xFE
+#define DATA_ONLY 0xFF
+
+#define MIN_BRIGHTNESS 0
+#define MAX_BRIGHTNESS 10
+
+#define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL)
+
+struct s6e63m0 {
+ struct device *dev;
+ struct spi_device *spi;
+ unsigned int power;
+ unsigned int current_brightness;
+ unsigned int gamma_mode;
+ unsigned int gamma_table_count;
+ struct lcd_device *ld;
+ struct backlight_device *bd;
+ struct lcd_platform_data *lcd_pd;
+};
+
+static const unsigned short SEQ_PANEL_CONDITION_SET[] = {
+ 0xF8, 0x01,
+ DATA_ONLY, 0x27,
+ DATA_ONLY, 0x27,
+ DATA_ONLY, 0x07,
+ DATA_ONLY, 0x07,
+ DATA_ONLY, 0x54,
+ DATA_ONLY, 0x9f,
+ DATA_ONLY, 0x63,
+ DATA_ONLY, 0x86,
+ DATA_ONLY, 0x1a,
+ DATA_ONLY, 0x33,
+ DATA_ONLY, 0x0d,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+
+ ENDDEF, 0x0000
+};
+
+static const unsigned short SEQ_DISPLAY_CONDITION_SET[] = {
+ 0xf2, 0x02,
+ DATA_ONLY, 0x03,
+ DATA_ONLY, 0x1c,
+ DATA_ONLY, 0x10,
+ DATA_ONLY, 0x10,
+
+ 0xf7, 0x03,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+
+ ENDDEF, 0x0000
+};
+
+static const unsigned short SEQ_GAMMA_SETTING[] = {
+ 0xfa, 0x00,
+ DATA_ONLY, 0x18,
+ DATA_ONLY, 0x08,
+ DATA_ONLY, 0x24,
+ DATA_ONLY, 0x64,
+ DATA_ONLY, 0x56,
+ DATA_ONLY, 0x33,
+ DATA_ONLY, 0xb6,
+ DATA_ONLY, 0xba,
+ DATA_ONLY, 0xa8,
+ DATA_ONLY, 0xac,
+ DATA_ONLY, 0xb1,
+ DATA_ONLY, 0x9d,
+ DATA_ONLY, 0xc1,
+ DATA_ONLY, 0xc1,
+ DATA_ONLY, 0xb7,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x9c,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x9f,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0xd6,
+
+ 0xfa, 0x01,
+
+ ENDDEF, 0x0000
+};
+
+static const unsigned short SEQ_ETC_CONDITION_SET[] = {
+ 0xf6, 0x00,
+ DATA_ONLY, 0x8c,
+ DATA_ONLY, 0x07,
+
+ 0xb3, 0xc,
+
+ 0xb5, 0x2c,
+ DATA_ONLY, 0x12,
+ DATA_ONLY, 0x0c,
+ DATA_ONLY, 0x0a,
+ DATA_ONLY, 0x10,
+ DATA_ONLY, 0x0e,
+ DATA_ONLY, 0x17,
+ DATA_ONLY, 0x13,
+ DATA_ONLY, 0x1f,
+ DATA_ONLY, 0x1a,
+ DATA_ONLY, 0x2a,
+ DATA_ONLY, 0x24,
+ DATA_ONLY, 0x1f,
+ DATA_ONLY, 0x1b,
+ DATA_ONLY, 0x1a,
+ DATA_ONLY, 0x17,
+
+ DATA_ONLY, 0x2b,
+ DATA_ONLY, 0x26,
+ DATA_ONLY, 0x22,
+ DATA_ONLY, 0x20,
+ DATA_ONLY, 0x3a,
+ DATA_ONLY, 0x34,
+ DATA_ONLY, 0x30,
+ DATA_ONLY, 0x2c,
+ DATA_ONLY, 0x29,
+ DATA_ONLY, 0x26,
+ DATA_ONLY, 0x25,
+ DATA_ONLY, 0x23,
+ DATA_ONLY, 0x21,
+ DATA_ONLY, 0x20,
+ DATA_ONLY, 0x1e,
+ DATA_ONLY, 0x1e,
+
+ 0xb6, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x11,
+ DATA_ONLY, 0x22,
+ DATA_ONLY, 0x33,
+ DATA_ONLY, 0x44,
+ DATA_ONLY, 0x44,
+ DATA_ONLY, 0x44,
+
+ DATA_ONLY, 0x55,
+ DATA_ONLY, 0x55,
+ DATA_ONLY, 0x66,
+ DATA_ONLY, 0x66,
+ DATA_ONLY, 0x66,
+ DATA_ONLY, 0x66,
+ DATA_ONLY, 0x66,
+ DATA_ONLY, 0x66,
+
+ 0xb7, 0x2c,
+ DATA_ONLY, 0x12,
+ DATA_ONLY, 0x0c,
+ DATA_ONLY, 0x0a,
+ DATA_ONLY, 0x10,
+ DATA_ONLY, 0x0e,
+ DATA_ONLY, 0x17,
+ DATA_ONLY, 0x13,
+ DATA_ONLY, 0x1f,
+ DATA_ONLY, 0x1a,
+ DATA_ONLY, 0x2a,
+ DATA_ONLY, 0x24,
+ DATA_ONLY, 0x1f,
+ DATA_ONLY, 0x1b,
+ DATA_ONLY, 0x1a,
+ DATA_ONLY, 0x17,
+
+ DATA_ONLY, 0x2b,
+ DATA_ONLY, 0x26,
+ DATA_ONLY, 0x22,
+ DATA_ONLY, 0x20,
+ DATA_ONLY, 0x3a,
+ DATA_ONLY, 0x34,
+ DATA_ONLY, 0x30,
+ DATA_ONLY, 0x2c,
+ DATA_ONLY, 0x29,
+ DATA_ONLY, 0x26,
+ DATA_ONLY, 0x25,
+ DATA_ONLY, 0x23,
+ DATA_ONLY, 0x21,
+ DATA_ONLY, 0x20,
+ DATA_ONLY, 0x1e,
+ DATA_ONLY, 0x1e,
+
+ 0xb8, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x11,
+ DATA_ONLY, 0x22,
+ DATA_ONLY, 0x33,
+ DATA_ONLY, 0x44,
+ DATA_ONLY, 0x44,
+ DATA_ONLY, 0x44,
+
+ DATA_ONLY, 0x55,
+ DATA_ONLY, 0x55,
+ DATA_ONLY, 0x66,
+ DATA_ONLY, 0x66,
+ DATA_ONLY, 0x66,
+ DATA_ONLY, 0x66,
+ DATA_ONLY, 0x66,
+ DATA_ONLY, 0x66,
+
+ 0xb9, 0x2c,
+ DATA_ONLY, 0x12,
+ DATA_ONLY, 0x0c,
+ DATA_ONLY, 0x0a,
+ DATA_ONLY, 0x10,
+ DATA_ONLY, 0x0e,
+ DATA_ONLY, 0x17,
+ DATA_ONLY, 0x13,
+ DATA_ONLY, 0x1f,
+ DATA_ONLY, 0x1a,
+ DATA_ONLY, 0x2a,
+ DATA_ONLY, 0x24,
+ DATA_ONLY, 0x1f,
+ DATA_ONLY, 0x1b,
+ DATA_ONLY, 0x1a,
+ DATA_ONLY, 0x17,
+
+ DATA_ONLY, 0x2b,
+ DATA_ONLY, 0x26,
+ DATA_ONLY, 0x22,
+ DATA_ONLY, 0x20,
+ DATA_ONLY, 0x3a,
+ DATA_ONLY, 0x34,
+ DATA_ONLY, 0x30,
+ DATA_ONLY, 0x2c,
+ DATA_ONLY, 0x29,
+ DATA_ONLY, 0x26,
+ DATA_ONLY, 0x25,
+ DATA_ONLY, 0x23,
+ DATA_ONLY, 0x21,
+ DATA_ONLY, 0x20,
+ DATA_ONLY, 0x1e,
+ DATA_ONLY, 0x1e,
+
+ 0xba, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x11,
+ DATA_ONLY, 0x22,
+ DATA_ONLY, 0x33,
+ DATA_ONLY, 0x44,
+ DATA_ONLY, 0x44,
+ DATA_ONLY, 0x44,
+
+ DATA_ONLY, 0x55,
+ DATA_ONLY, 0x55,
+ DATA_ONLY, 0x66,
+ DATA_ONLY, 0x66,
+ DATA_ONLY, 0x66,
+ DATA_ONLY, 0x66,
+ DATA_ONLY, 0x66,
+ DATA_ONLY, 0x66,
+
+ 0xc1, 0x4d,
+ DATA_ONLY, 0x96,
+ DATA_ONLY, 0x1d,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x01,
+ DATA_ONLY, 0xdf,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x03,
+ DATA_ONLY, 0x1f,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x00,
+ DATA_ONLY, 0x03,
+ DATA_ONLY, 0x06,
+ DATA_ONLY, 0x09,
+ DATA_ONLY, 0x0d,
+ DATA_ONLY, 0x0f,
+ DATA_ONLY, 0x12,
+ DATA_ONLY, 0x15,
+ DATA_ONLY, 0x18,
+
+ 0xb2, 0x10,
+ DATA_ONLY, 0x10,
+ DATA_ONLY, 0x0b,
+ DATA_ONLY, 0x05,
+
+ ENDDEF, 0x0000
+};
+
+static const unsigned short SEQ_ACL_ON[] = {
+ /* ACL on */
+ 0xc0, 0x01,
+
+ ENDDEF, 0x0000
+};
+
+static const unsigned short SEQ_ACL_OFF[] = {
+ /* ACL off */
+ 0xc0, 0x00,
+
+ ENDDEF, 0x0000
+};
+
+static const unsigned short SEQ_ELVSS_ON[] = {
+ /* ELVSS on */
+ 0xb1, 0x0b,
+
+ ENDDEF, 0x0000
+};
+
+static const unsigned short SEQ_ELVSS_OFF[] = {
+ /* ELVSS off */
+ 0xb1, 0x0a,
+
+ ENDDEF, 0x0000
+};
+
+static const unsigned short SEQ_STAND_BY_OFF[] = {
+ 0x11, COMMAND_ONLY,
+
+ ENDDEF, 0x0000
+};
+
+static const unsigned short SEQ_STAND_BY_ON[] = {
+ 0x10, COMMAND_ONLY,
+
+ ENDDEF, 0x0000
+};
+
+static const unsigned short SEQ_DISPLAY_ON[] = {
+ 0x29, COMMAND_ONLY,
+
+ ENDDEF, 0x0000
+};
+
+
+static int s6e63m0_spi_write_byte(struct s6e63m0 *lcd, int addr, int data)
+{
+ u16 buf[1];
+ struct spi_message msg;
+
+ struct spi_transfer xfer = {
+ .len = 2,
+ .tx_buf = buf,
+ };
+
+ buf[0] = (addr << 8) | data;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ return spi_sync(lcd->spi, &msg);
+}
+
+static int s6e63m0_spi_write(struct s6e63m0 *lcd, unsigned char address,
+ unsigned char command)
+{
+ int ret = 0;
+
+ if (address != DATA_ONLY)
+ ret = s6e63m0_spi_write_byte(lcd, 0x0, address);
+ if (command != COMMAND_ONLY)
+ ret = s6e63m0_spi_write_byte(lcd, 0x1, command);
+
+ return ret;
+}
+
+static int s6e63m0_panel_send_sequence(struct s6e63m0 *lcd,
+ const unsigned short *wbuf)
+{
+ int ret = 0, i = 0;
+
+ while ((wbuf[i] & DEFMASK) != ENDDEF) {
+ if ((wbuf[i] & DEFMASK) != SLEEPMSEC) {
+ ret = s6e63m0_spi_write(lcd, wbuf[i], wbuf[i+1]);
+ if (ret)
+ break;
+ } else
+ udelay(wbuf[i+1]*1000);
+ i += 2;
+ }
+
+ return ret;
+}
+
+static int _s6e63m0_gamma_ctl(struct s6e63m0 *lcd, const unsigned int *gamma)
+{
+ unsigned int i = 0;
+ int ret = 0;
+
+ /* disable gamma table updating. */
+ ret = s6e63m0_spi_write(lcd, 0xfa, 0x00);
+ if (ret) {
+ dev_err(lcd->dev, "failed to disable gamma table updating.\n");
+ goto gamma_err;
+ }
+
+ for (i = 0 ; i < GAMMA_TABLE_COUNT; i++) {
+ ret = s6e63m0_spi_write(lcd, DATA_ONLY, gamma[i]);
+ if (ret) {
+ dev_err(lcd->dev, "failed to set gamma table.\n");
+ goto gamma_err;
+ }
+ }
+
+ /* update gamma table. */
+ ret = s6e63m0_spi_write(lcd, 0xfa, 0x01);
+ if (ret)
+ dev_err(lcd->dev, "failed to update gamma table.\n");
+
+gamma_err:
+ return ret;
+}
+
+static int s6e63m0_gamma_ctl(struct s6e63m0 *lcd, int gamma)
+{
+ int ret = 0;
+
+ ret = _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_22_table[gamma]);
+
+ return ret;
+}
+
+
+static int s6e63m0_ldi_init(struct s6e63m0 *lcd)
+{
+ int ret, i;
+ const unsigned short *init_seq[] = {
+ SEQ_PANEL_CONDITION_SET,
+ SEQ_DISPLAY_CONDITION_SET,
+ SEQ_GAMMA_SETTING,
+ SEQ_ETC_CONDITION_SET,
+ SEQ_ACL_ON,
+ SEQ_ELVSS_ON,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(init_seq); i++) {
+ ret = s6e63m0_panel_send_sequence(lcd, init_seq[i]);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int s6e63m0_ldi_enable(struct s6e63m0 *lcd)
+{
+ int ret = 0, i;
+ const unsigned short *enable_seq[] = {
+ SEQ_STAND_BY_OFF,
+ SEQ_DISPLAY_ON,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(enable_seq); i++) {
+ ret = s6e63m0_panel_send_sequence(lcd, enable_seq[i]);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int s6e63m0_ldi_disable(struct s6e63m0 *lcd)
+{
+ int ret;
+
+ ret = s6e63m0_panel_send_sequence(lcd, SEQ_STAND_BY_ON);
+
+ return ret;
+}
+
+static int s6e63m0_power_on(struct s6e63m0 *lcd)
+{
+ int ret = 0;
+ struct lcd_platform_data *pd = NULL;
+ struct backlight_device *bd = NULL;
+
+ pd = lcd->lcd_pd;
+ if (!pd) {
+ dev_err(lcd->dev, "platform data is NULL.\n");
+ return -EFAULT;
+ }
+
+ bd = lcd->bd;
+ if (!bd) {
+ dev_err(lcd->dev, "backlight device is NULL.\n");
+ return -EFAULT;
+ }
+
+ if (!pd->power_on) {
+ dev_err(lcd->dev, "power_on is NULL.\n");
+ return -EFAULT;
+ } else {
+ pd->power_on(lcd->ld, 1);
+ mdelay(pd->power_on_delay);
+ }
+
+ if (!pd->reset) {
+ dev_err(lcd->dev, "reset is NULL.\n");
+ return -EFAULT;
+ } else {
+ pd->reset(lcd->ld);
+ mdelay(pd->reset_delay);
+ }
+
+ ret = s6e63m0_ldi_init(lcd);
+ if (ret) {
+ dev_err(lcd->dev, "failed to initialize ldi.\n");
+ return ret;
+ }
+
+ ret = s6e63m0_ldi_enable(lcd);
+ if (ret) {
+ dev_err(lcd->dev, "failed to enable ldi.\n");
+ return ret;
+ }
+
+ /* set brightness to current value after power on or resume. */
+ ret = s6e63m0_gamma_ctl(lcd, bd->props.brightness);
+ if (ret) {
+ dev_err(lcd->dev, "lcd gamma setting failed.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int s6e63m0_power_off(struct s6e63m0 *lcd)
+{
+ int ret = 0;
+ struct lcd_platform_data *pd = NULL;
+
+ pd = lcd->lcd_pd;
+ if (!pd) {
+ dev_err(lcd->dev, "platform data is NULL.\n");
+ return -EFAULT;
+ }
+
+ ret = s6e63m0_ldi_disable(lcd);
+ if (ret) {
+ dev_err(lcd->dev, "lcd setting failed.\n");
+ return -EIO;
+ }
+
+ mdelay(pd->power_off_delay);
+
+ if (!pd->power_on) {
+ dev_err(lcd->dev, "power_on is NULL.\n");
+ return -EFAULT;
+ } else
+ pd->power_on(lcd->ld, 0);
+
+ return 0;
+}
+
+static int s6e63m0_power(struct s6e63m0 *lcd, int power)
+{
+ int ret = 0;
+
+ if (POWER_IS_ON(power) && !POWER_IS_ON(lcd->power))
+ ret = s6e63m0_power_on(lcd);
+ else if (!POWER_IS_ON(power) && POWER_IS_ON(lcd->power))
+ ret = s6e63m0_power_off(lcd);
+
+ if (!ret)
+ lcd->power = power;
+
+ return ret;
+}
+
+static int s6e63m0_set_power(struct lcd_device *ld, int power)
+{
+ struct s6e63m0 *lcd = lcd_get_data(ld);
+
+ if (power != FB_BLANK_UNBLANK && power != FB_BLANK_POWERDOWN &&
+ power != FB_BLANK_NORMAL) {
+ dev_err(lcd->dev, "power value should be 0, 1 or 4.\n");
+ return -EINVAL;
+ }
+
+ return s6e63m0_power(lcd, power);
+}
+
+static int s6e63m0_get_power(struct lcd_device *ld)
+{
+ struct s6e63m0 *lcd = lcd_get_data(ld);
+
+ return lcd->power;
+}
+
+static int s6e63m0_get_brightness(struct backlight_device *bd)
+{
+ return bd->props.brightness;
+}
+
+static int s6e63m0_set_brightness(struct backlight_device *bd)
+{
+ int ret = 0, brightness = bd->props.brightness;
+ struct s6e63m0 *lcd = bl_get_data(bd);
+
+ if (brightness < MIN_BRIGHTNESS ||
+ brightness > bd->props.max_brightness) {
+ dev_err(&bd->dev, "lcd brightness should be %d to %d.\n",
+ MIN_BRIGHTNESS, MAX_BRIGHTNESS);
+ return -EINVAL;
+ }
+
+ ret = s6e63m0_gamma_ctl(lcd, bd->props.brightness);
+ if (ret) {
+ dev_err(&bd->dev, "lcd brightness setting failed.\n");
+ return -EIO;
+ }
+
+ return ret;
+}
+
+static struct lcd_ops s6e63m0_lcd_ops = {
+ .set_power = s6e63m0_set_power,
+ .get_power = s6e63m0_get_power,
+};
+
+static const struct backlight_ops s6e63m0_backlight_ops = {
+ .get_brightness = s6e63m0_get_brightness,
+ .update_status = s6e63m0_set_brightness,
+};
+
+static ssize_t s6e63m0_sysfs_show_gamma_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct s6e63m0 *lcd = dev_get_drvdata(dev);
+ char temp[10];
+
+ switch (lcd->gamma_mode) {
+ case 0:
+ sprintf(temp, "2.2 mode\n");
+ strcat(buf, temp);
+ break;
+ case 1:
+ sprintf(temp, "1.9 mode\n");
+ strcat(buf, temp);
+ break;
+ case 2:
+ sprintf(temp, "1.7 mode\n");
+ strcat(buf, temp);
+ break;
+ default:
+ dev_info(dev, "gamma mode could be 0:2.2, 1:1.9 or 2:1.7)n");
+ break;
+ }
+
+ return strlen(buf);
+}
+
+static ssize_t s6e63m0_sysfs_store_gamma_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct s6e63m0 *lcd = dev_get_drvdata(dev);
+ struct backlight_device *bd = NULL;
+ int brightness, rc;
+
+ rc = strict_strtoul(buf, 0, (unsigned long *)&lcd->gamma_mode);
+ if (rc < 0)
+ return rc;
+
+ bd = lcd->bd;
+
+ brightness = bd->props.brightness;
+
+ switch (lcd->gamma_mode) {
+ case 0:
+ _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_22_table[brightness]);
+ break;
+ case 1:
+ _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_19_table[brightness]);
+ break;
+ case 2:
+ _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_17_table[brightness]);
+ break;
+ default:
+ dev_info(dev, "gamma mode could be 0:2.2, 1:1.9 or 2:1.7\n");
+ _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_22_table[brightness]);
+ break;
+ }
+ return len;
+}
+
+static DEVICE_ATTR(gamma_mode, 0644,
+ s6e63m0_sysfs_show_gamma_mode, s6e63m0_sysfs_store_gamma_mode);
+
+static ssize_t s6e63m0_sysfs_show_gamma_table(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct s6e63m0 *lcd = dev_get_drvdata(dev);
+ char temp[3];
+
+ sprintf(temp, "%d\n", lcd->gamma_table_count);
+ strcpy(buf, temp);
+
+ return strlen(buf);
+}
+static DEVICE_ATTR(gamma_table, 0644,
+ s6e63m0_sysfs_show_gamma_table, NULL);
+
+static int __init s6e63m0_probe(struct spi_device *spi)
+{
+ int ret = 0;
+ struct s6e63m0 *lcd = NULL;
+ struct lcd_device *ld = NULL;
+ struct backlight_device *bd = NULL;
+
+ lcd = kzalloc(sizeof(struct s6e63m0), GFP_KERNEL);
+ if (!lcd)
+ return -ENOMEM;
+
+ /* s6e63m0 lcd panel uses 3-wire 9bits SPI Mode. */
+ spi->bits_per_word = 9;
+
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(&spi->dev, "spi setup failed.\n");
+ goto out_free_lcd;
+ }
+
+ lcd->spi = spi;
+ lcd->dev = &spi->dev;
+
+ lcd->lcd_pd = (struct lcd_platform_data *)spi->dev.platform_data;
+ if (!lcd->lcd_pd) {
+ dev_err(&spi->dev, "platform data is NULL.\n");
+ goto out_free_lcd;
+ }
+
+ ld = lcd_device_register("s6e63m0", &spi->dev, lcd, &s6e63m0_lcd_ops);
+ if (IS_ERR(ld)) {
+ ret = PTR_ERR(ld);
+ goto out_free_lcd;
+ }
+
+ lcd->ld = ld;
+
+ bd = backlight_device_register("s6e63m0bl-bl", &spi->dev, lcd,
+ &s6e63m0_backlight_ops, NULL);
+ if (IS_ERR(bd)) {
+ ret = PTR_ERR(bd);
+ goto out_lcd_unregister;
+ }
+
+ bd->props.max_brightness = MAX_BRIGHTNESS;
+ bd->props.brightness = MAX_BRIGHTNESS;
+ lcd->bd = bd;
+
+ /*
+ * it gets gamma table count available so it gets user
+ * know that.
+ */
+ lcd->gamma_table_count =
+ sizeof(gamma_table) / (MAX_GAMMA_LEVEL * sizeof(int));
+
+ ret = device_create_file(&(spi->dev), &dev_attr_gamma_mode);
+ if (ret < 0)
+ dev_err(&(spi->dev), "failed to add sysfs entries\n");
+
+ ret = device_create_file(&(spi->dev), &dev_attr_gamma_table);
+ if (ret < 0)
+ dev_err(&(spi->dev), "failed to add sysfs entries\n");
+
+ /*
+ * if lcd panel was on from bootloader like u-boot then
+ * do not lcd on.
+ */
+ if (!lcd->lcd_pd->lcd_enabled) {
+ /*
+ * if lcd panel was off from bootloader then
+ * current lcd status is powerdown and then
+ * it enables lcd panel.
+ */
+ lcd->power = FB_BLANK_POWERDOWN;
+
+ s6e63m0_power(lcd, FB_BLANK_UNBLANK);
+ } else
+ lcd->power = FB_BLANK_UNBLANK;
+
+ dev_set_drvdata(&spi->dev, lcd);
+
+ dev_info(&spi->dev, "s6e63m0 panel driver has been probed.\n");
+
+ return 0;
+
+out_lcd_unregister:
+ lcd_device_unregister(ld);
+out_free_lcd:
+ kfree(lcd);
+ return ret;
+}
+
+static int __devexit s6e63m0_remove(struct spi_device *spi)
+{
+ struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev);
+
+ s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
+ lcd_device_unregister(lcd->ld);
+ kfree(lcd);
+
+ return 0;
+}
+
+#if defined(CONFIG_PM)
+unsigned int before_power;
+
+static int s6e63m0_suspend(struct spi_device *spi, pm_message_t mesg)
+{
+ int ret = 0;
+ struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev);
+
+ dev_dbg(&spi->dev, "lcd->power = %d\n", lcd->power);
+
+ before_power = lcd->power;
+
+ /*
+ * when lcd panel is suspend, lcd panel becomes off
+ * regardless of status.
+ */
+ ret = s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
+
+ return ret;
+}
+
+static int s6e63m0_resume(struct spi_device *spi)
+{
+ int ret = 0;
+ struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev);
+
+ /*
+ * after suspended, if lcd panel status is FB_BLANK_UNBLANK
+ * (at that time, before_power is FB_BLANK_UNBLANK) then
+ * it changes that status to FB_BLANK_POWERDOWN to get lcd on.
+ */
+ if (before_power == FB_BLANK_UNBLANK)
+ lcd->power = FB_BLANK_POWERDOWN;
+
+ dev_dbg(&spi->dev, "before_power = %d\n", before_power);
+
+ ret = s6e63m0_power(lcd, before_power);
+
+ return ret;
+}
+#else
+#define s6e63m0_suspend NULL
+#define s6e63m0_resume NULL
+#endif
+
+/* Power down all displays on reboot, poweroff or halt. */
+static void s6e63m0_shutdown(struct spi_device *spi)
+{
+ struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev);
+
+ s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
+}
+
+static struct spi_driver s6e63m0_driver = {
+ .driver = {
+ .name = "s6e63m0",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = s6e63m0_probe,
+ .remove = __devexit_p(s6e63m0_remove),
+ .shutdown = s6e63m0_shutdown,
+ .suspend = s6e63m0_suspend,
+ .resume = s6e63m0_resume,
+};
+
+static int __init s6e63m0_init(void)
+{
+ return spi_register_driver(&s6e63m0_driver);
+}
+
+static void __exit s6e63m0_exit(void)
+{
+ spi_unregister_driver(&s6e63m0_driver);
+}
+
+module_init(s6e63m0_init);
+module_exit(s6e63m0_exit);
+
+MODULE_AUTHOR("InKi Dae <inki.dae@samsung.com>");
+MODULE_DESCRIPTION("S6E63M0 LCD Driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/video/backlight/s6e63m0_gamma.h b/drivers/video/backlight/s6e63m0_gamma.h
new file mode 100644
index 00000000000..2c44bdb0696
--- /dev/null
+++ b/drivers/video/backlight/s6e63m0_gamma.h
@@ -0,0 +1,266 @@
+/* linux/drivers/video/samsung/s6e63m0_brightness.h
+ *
+ * Gamma level definitions.
+ *
+ * Copyright (c) 2009 Samsung Electronics
+ * InKi Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef _S6E63M0_BRIGHTNESS_H
+#define _S6E63M0_BRIGHTNESS_H
+
+#define MAX_GAMMA_LEVEL 11
+#define GAMMA_TABLE_COUNT 21
+
+/* gamma value: 2.2 */
+static const unsigned int s6e63m0_22_300[] = {
+ 0x18, 0x08, 0x24, 0x5f, 0x50, 0x2d, 0xB6,
+ 0xB9, 0xA7, 0xAd, 0xB1, 0x9f, 0xbe, 0xC0,
+ 0xB5, 0x00, 0xa0, 0x00, 0xa4, 0x00, 0xdb
+};
+
+static const unsigned int s6e63m0_22_280[] = {
+ 0x18, 0x08, 0x24, 0x64, 0x56, 0x33, 0xB6,
+ 0xBA, 0xA8, 0xAC, 0xB1, 0x9D, 0xC1, 0xC1,
+ 0xB7, 0x00, 0x9C, 0x00, 0x9F, 0x00, 0xD6
+};
+
+static const unsigned int s6e63m0_22_260[] = {
+ 0x18, 0x08, 0x24, 0x66, 0x58, 0x34, 0xB6,
+ 0xBA, 0xA7, 0xAF, 0xB3, 0xA0, 0xC1, 0xC2,
+ 0xB7, 0x00, 0x97, 0x00, 0x9A, 0x00, 0xD1
+
+};
+
+static const unsigned int s6e63m0_22_240[] = {
+ 0x18, 0x08, 0x24, 0x62, 0x54, 0x30, 0xB9,
+ 0xBB, 0xA9, 0xB0, 0xB3, 0xA1, 0xC1, 0xC3,
+ 0xB7, 0x00, 0x91, 0x00, 0x95, 0x00, 0xDA
+
+};
+static const unsigned int s6e63m0_22_220[] = {
+ 0x18, 0x08, 0x24, 0x63, 0x53, 0x31, 0xB8,
+ 0xBC, 0xA9, 0xB0, 0xB5, 0xA2, 0xC4, 0xC4,
+ 0xB8, 0x00, 0x8B, 0x00, 0x8E, 0x00, 0xC2
+};
+
+static const unsigned int s6e63m0_22_200[] = {
+ 0x18, 0x08, 0x24, 0x66, 0x55, 0x34, 0xBA,
+ 0xBD, 0xAB, 0xB1, 0xB5, 0xA3, 0xC5, 0xC6,
+ 0xB9, 0x00, 0x85, 0x00, 0x88, 0x00, 0xBA
+};
+
+static const unsigned int s6e63m0_22_170[] = {
+ 0x18, 0x08, 0x24, 0x69, 0x54, 0x37, 0xBB,
+ 0xBE, 0xAC, 0xB4, 0xB7, 0xA6, 0xC7, 0xC8,
+ 0xBC, 0x00, 0x7B, 0x00, 0x7E, 0x00, 0xAB
+};
+
+static const unsigned int s6e63m0_22_140[] = {
+ 0x18, 0x08, 0x24, 0x6C, 0x54, 0x3A, 0xBC,
+ 0xBF, 0xAC, 0xB7, 0xBB, 0xA9, 0xC9, 0xC9,
+ 0xBE, 0x00, 0x71, 0x00, 0x73, 0x00, 0x9E
+};
+
+static const unsigned int s6e63m0_22_110[] = {
+ 0x18, 0x08, 0x24, 0x70, 0x51, 0x3E, 0xBF,
+ 0xC1, 0xAF, 0xB9, 0xBC, 0xAB, 0xCC, 0xCC,
+ 0xC2, 0x00, 0x65, 0x00, 0x67, 0x00, 0x8D
+};
+
+static const unsigned int s6e63m0_22_90[] = {
+ 0x18, 0x08, 0x24, 0x73, 0x4A, 0x3D, 0xC0,
+ 0xC2, 0xB1, 0xBB, 0xBE, 0xAC, 0xCE, 0xCF,
+ 0xC5, 0x00, 0x5D, 0x00, 0x5E, 0x00, 0x82
+};
+
+static const unsigned int s6e63m0_22_30[] = {
+ 0x18, 0x08, 0x24, 0x78, 0xEC, 0x3D, 0xC8,
+ 0xC2, 0xB6, 0xC4, 0xC7, 0xB6, 0xD5, 0xD7,
+ 0xCC, 0x00, 0x39, 0x00, 0x36, 0x00, 0x51
+};
+
+/* gamma value: 1.9 */
+static const unsigned int s6e63m0_19_300[] = {
+ 0x18, 0x08, 0x24, 0x61, 0x5F, 0x39, 0xBA,
+ 0xBD, 0xAD, 0xB1, 0xB6, 0xA5, 0xC4, 0xC5,
+ 0xBC, 0x00, 0xA0, 0x00, 0xA4, 0x00, 0xDB
+};
+
+static const unsigned int s6e63m0_19_280[] = {
+ 0x18, 0x08, 0x24, 0x61, 0x60, 0x39, 0xBB,
+ 0xBE, 0xAD, 0xB2, 0xB6, 0xA6, 0xC5, 0xC7,
+ 0xBD, 0x00, 0x9B, 0x00, 0x9E, 0x00, 0xD5
+};
+
+static const unsigned int s6e63m0_19_260[] = {
+ 0x18, 0x08, 0x24, 0x63, 0x61, 0x3B, 0xBA,
+ 0xBE, 0xAC, 0xB3, 0xB8, 0xA7, 0xC6, 0xC8,
+ 0xBD, 0x00, 0x96, 0x00, 0x98, 0x00, 0xCF
+};
+
+static const unsigned int s6e63m0_19_240[] = {
+ 0x18, 0x08, 0x24, 0x67, 0x64, 0x3F, 0xBB,
+ 0xBE, 0xAD, 0xB3, 0xB9, 0xA7, 0xC8, 0xC9,
+ 0xBE, 0x00, 0x90, 0x00, 0x92, 0x00, 0xC8
+};
+
+static const unsigned int s6e63m0_19_220[] = {
+ 0x18, 0x08, 0x24, 0x68, 0x64, 0x40, 0xBC,
+ 0xBF, 0xAF, 0xB4, 0xBA, 0xA9, 0xC8, 0xCA,
+ 0xBE, 0x00, 0x8B, 0x00, 0x8C, 0x00, 0xC0
+};
+
+static const unsigned int s6e63m0_19_200[] = {
+ 0x18, 0x08, 0x24, 0x68, 0x64, 0x3F, 0xBE,
+ 0xC0, 0xB0, 0xB6, 0xBB, 0xAB, 0xC8, 0xCB,
+ 0xBF, 0x00, 0x85, 0x00, 0x86, 0x00, 0xB8
+};
+
+static const unsigned int s6e63m0_19_170[] = {
+ 0x18, 0x08, 0x24, 0x69, 0x64, 0x40, 0xBF,
+ 0xC1, 0xB0, 0xB9, 0xBE, 0xAD, 0xCB, 0xCD,
+ 0xC2, 0x00, 0x7A, 0x00, 0x7B, 0x00, 0xAA
+};
+
+static const unsigned int s6e63m0_19_140[] = {
+ 0x18, 0x08, 0x24, 0x6E, 0x65, 0x45, 0xC0,
+ 0xC3, 0xB2, 0xBA, 0xBE, 0xAE, 0xCD, 0xD0,
+ 0xC4, 0x00, 0x70, 0x00, 0x70, 0x00, 0x9C
+};
+
+static const unsigned int s6e63m0_19_110[] = {
+ 0x18, 0x08, 0x24, 0x6F, 0x65, 0x46, 0xC2,
+ 0xC4, 0xB3, 0xBF, 0xC2, 0xB2, 0xCF, 0xD1,
+ 0xC6, 0x00, 0x64, 0x00, 0x64, 0x00, 0x8D
+};
+
+static const unsigned int s6e63m0_19_90[] = {
+ 0x18, 0x08, 0x24, 0x74, 0x60, 0x4A, 0xC3,
+ 0xC6, 0xB5, 0xBF, 0xC3, 0xB2, 0xD2, 0xD3,
+ 0xC8, 0x00, 0x5B, 0x00, 0x5B, 0x00, 0x81
+};
+
+static const unsigned int s6e63m0_19_30[] = {
+ 0x18, 0x08, 0x24, 0x84, 0x45, 0x4F, 0xCA,
+ 0xCB, 0xBC, 0xC9, 0xCB, 0xBC, 0xDA, 0xDA,
+ 0xD0, 0x00, 0x35, 0x00, 0x34, 0x00, 0x4E
+};
+
+/* gamma value: 1.7 */
+static const unsigned int s6e63m0_17_300[] = {
+ 0x18, 0x08, 0x24, 0x70, 0x70, 0x4F, 0xBF,
+ 0xC2, 0xB2, 0xB8, 0xBC, 0xAC, 0xCB, 0xCD,
+ 0xC3, 0x00, 0xA0, 0x00, 0xA4, 0x00, 0xDB
+};
+
+static const unsigned int s6e63m0_17_280[] = {
+ 0x18, 0x08, 0x24, 0x71, 0x71, 0x50, 0xBF,
+ 0xC2, 0xB2, 0xBA, 0xBE, 0xAE, 0xCB, 0xCD,
+ 0xC3, 0x00, 0x9C, 0x00, 0x9F, 0x00, 0xD6
+};
+
+static const unsigned int s6e63m0_17_260[] = {
+ 0x18, 0x08, 0x24, 0x72, 0x72, 0x50, 0xC0,
+ 0xC3, 0xB4, 0xB9, 0xBE, 0xAE, 0xCC, 0xCF,
+ 0xC4, 0x00, 0x97, 0x00, 0x9A, 0x00, 0xD1
+};
+
+static const unsigned int s6e63m0_17_240[] = {
+ 0x18, 0x08, 0x24, 0x71, 0x72, 0x4F, 0xC2,
+ 0xC4, 0xB5, 0xBB, 0xBF, 0xB0, 0xCC, 0xCF,
+ 0xC3, 0x00, 0x91, 0x00, 0x95, 0x00, 0xCA
+};
+
+static const unsigned int s6e63m0_17_220[] = {
+ 0x18, 0x08, 0x24, 0x71, 0x73, 0x4F, 0xC2,
+ 0xC5, 0xB5, 0xBD, 0xC0, 0xB2, 0xCD, 0xD1,
+ 0xC5, 0x00, 0x8B, 0x00, 0x8E, 0x00, 0xC2
+};
+
+static const unsigned int s6e63m0_17_200[] = {
+ 0x18, 0x08, 0x24, 0x72, 0x75, 0x51, 0xC2,
+ 0xC6, 0xB5, 0xBF, 0xC1, 0xB3, 0xCE, 0xD1,
+ 0xC6, 0x00, 0x85, 0x00, 0x88, 0x00, 0xBA
+};
+
+static const unsigned int s6e63m0_17_170[] = {
+ 0x18, 0x08, 0x24, 0x75, 0x77, 0x54, 0xC3,
+ 0xC7, 0xB7, 0xC0, 0xC3, 0xB4, 0xD1, 0xD3,
+ 0xC9, 0x00, 0x7B, 0x00, 0x7E, 0x00, 0xAB
+};
+
+static const unsigned int s6e63m0_17_140[] = {
+ 0x18, 0x08, 0x24, 0x7B, 0x77, 0x58, 0xC3,
+ 0xC8, 0xB8, 0xC2, 0xC6, 0xB6, 0xD3, 0xD4,
+ 0xCA, 0x00, 0x71, 0x00, 0x73, 0x00, 0x9E
+};
+
+static const unsigned int s6e63m0_17_110[] = {
+ 0x18, 0x08, 0x24, 0x81, 0x7B, 0x5D, 0xC6,
+ 0xCA, 0xBB, 0xC3, 0xC7, 0xB8, 0xD6, 0xD8,
+ 0xCD, 0x00, 0x65, 0x00, 0x67, 0x00, 0x8D
+};
+
+static const unsigned int s6e63m0_17_90[] = {
+ 0x18, 0x08, 0x24, 0x82, 0x7A, 0x5B, 0xC8,
+ 0xCB, 0xBD, 0xC5, 0xCA, 0xBA, 0xD6, 0xD8,
+ 0xCE, 0x00, 0x5D, 0x00, 0x5E, 0x00, 0x82
+};
+
+static const unsigned int s6e63m0_17_30[] = {
+ 0x18, 0x08, 0x24, 0x8F, 0x73, 0x63, 0xD1,
+ 0xD0, 0xC5, 0xCC, 0xD1, 0xC2, 0xDE, 0xE0,
+ 0xD6, 0x00, 0x39, 0x00, 0x36, 0x00, 0x51
+};
+
+struct s6e63m0_gamma {
+ unsigned int *gamma_22_table[MAX_GAMMA_LEVEL];
+ unsigned int *gamma_19_table[MAX_GAMMA_LEVEL];
+ unsigned int *gamma_17_table[MAX_GAMMA_LEVEL];
+};
+
+static struct s6e63m0_gamma gamma_table = {
+ .gamma_22_table[0] = (unsigned int *)&s6e63m0_22_30,
+ .gamma_22_table[1] = (unsigned int *)&s6e63m0_22_90,
+ .gamma_22_table[2] = (unsigned int *)&s6e63m0_22_110,
+ .gamma_22_table[3] = (unsigned int *)&s6e63m0_22_140,
+ .gamma_22_table[4] = (unsigned int *)&s6e63m0_22_170,
+ .gamma_22_table[5] = (unsigned int *)&s6e63m0_22_200,
+ .gamma_22_table[6] = (unsigned int *)&s6e63m0_22_220,
+ .gamma_22_table[7] = (unsigned int *)&s6e63m0_22_240,
+ .gamma_22_table[8] = (unsigned int *)&s6e63m0_22_260,
+ .gamma_22_table[9] = (unsigned int *)&s6e63m0_22_280,
+ .gamma_22_table[10] = (unsigned int *)&s6e63m0_22_300,
+
+ .gamma_19_table[0] = (unsigned int *)&s6e63m0_19_30,
+ .gamma_19_table[1] = (unsigned int *)&s6e63m0_19_90,
+ .gamma_19_table[2] = (unsigned int *)&s6e63m0_19_110,
+ .gamma_19_table[3] = (unsigned int *)&s6e63m0_19_140,
+ .gamma_19_table[4] = (unsigned int *)&s6e63m0_19_170,
+ .gamma_19_table[5] = (unsigned int *)&s6e63m0_19_200,
+ .gamma_19_table[6] = (unsigned int *)&s6e63m0_19_220,
+ .gamma_19_table[7] = (unsigned int *)&s6e63m0_19_240,
+ .gamma_19_table[8] = (unsigned int *)&s6e63m0_19_260,
+ .gamma_19_table[9] = (unsigned int *)&s6e63m0_19_280,
+ .gamma_19_table[10] = (unsigned int *)&s6e63m0_19_300,
+
+ .gamma_17_table[0] = (unsigned int *)&s6e63m0_17_30,
+ .gamma_17_table[1] = (unsigned int *)&s6e63m0_17_90,
+ .gamma_17_table[2] = (unsigned int *)&s6e63m0_17_110,
+ .gamma_17_table[3] = (unsigned int *)&s6e63m0_17_140,
+ .gamma_17_table[4] = (unsigned int *)&s6e63m0_17_170,
+ .gamma_17_table[5] = (unsigned int *)&s6e63m0_17_200,
+ .gamma_17_table[6] = (unsigned int *)&s6e63m0_17_220,
+ .gamma_17_table[7] = (unsigned int *)&s6e63m0_17_240,
+ .gamma_17_table[8] = (unsigned int *)&s6e63m0_17_260,
+ .gamma_17_table[9] = (unsigned int *)&s6e63m0_17_280,
+ .gamma_17_table[10] = (unsigned int *)&s6e63m0_17_300,
+};
+
+#endif
+
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index 23b2a8c0dbf..b020ba7f1cf 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -501,7 +501,9 @@ static irqreturn_t bfin_bf54x_irq_error(int irq, void *dev_id)
static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
{
+#ifndef NO_BL_SUPPORT
struct backlight_properties props;
+#endif
struct bfin_bf54xfb_info *info;
struct fb_info *fbinfo;
int ret;
@@ -654,7 +656,8 @@ static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
printk(KERN_ERR DRIVER_NAME
": unable to register backlight.\n");
ret = -EINVAL;
- goto out9;
+ unregister_framebuffer(fbinfo);
+ goto out8;
}
lcd_dev = lcd_device_register(DRIVER_NAME, &pdev->dev, NULL, &bfin_lcd_ops);
@@ -663,8 +666,6 @@ static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
return 0;
-out9:
- unregister_framebuffer(fbinfo);
out8:
free_irq(info->irq, info);
out7:
diff --git a/drivers/video/bfin-lq035q1-fb.c b/drivers/video/bfin-lq035q1-fb.c
index 2baac7cc142..c8e1f04941b 100644
--- a/drivers/video/bfin-lq035q1-fb.c
+++ b/drivers/video/bfin-lq035q1-fb.c
@@ -61,47 +61,13 @@
#define LCD_X_RES 320 /* Horizontal Resolution */
#define LCD_Y_RES 240 /* Vertical Resolution */
#define DMA_BUS_SIZE 16
+#define U_LINE 4 /* Blanking Lines */
-#define USE_RGB565_16_BIT_PPI
-
-#ifdef USE_RGB565_16_BIT_PPI
-#define LCD_BPP 16 /* Bit Per Pixel */
-#define CLOCKS_PER_PIX 1
-#define CPLD_PIPELINE_DELAY_COR 0 /* NO CPLB */
-#endif
/* Interface 16/18-bit TFT over an 8-bit wide PPI using a small Programmable Logic Device (CPLD)
* http://blackfin.uclinux.org/gf/project/stamp/frs/?action=FrsReleaseBrowse&frs_package_id=165
*/
-#ifdef USE_RGB565_8_BIT_PPI
-#define LCD_BPP 16 /* Bit Per Pixel */
-#define CLOCKS_PER_PIX 2
-#define CPLD_PIPELINE_DELAY_COR 3 /* RGB565 */
-#endif
-
-#ifdef USE_RGB888_8_BIT_PPI
-#define LCD_BPP 24 /* Bit Per Pixel */
-#define CLOCKS_PER_PIX 3
-#define CPLD_PIPELINE_DELAY_COR 5 /* RGB888 */
-#endif
-
- /*
- * HS and VS timing parameters (all in number of PPI clk ticks)
- */
-
-#define U_LINE 4 /* Blanking Lines */
-
-#define H_ACTPIX (LCD_X_RES * CLOCKS_PER_PIX) /* active horizontal pixel */
-#define H_PERIOD (336 * CLOCKS_PER_PIX) /* HS period */
-#define H_PULSE (2 * CLOCKS_PER_PIX) /* HS pulse width */
-#define H_START (7 * CLOCKS_PER_PIX + CPLD_PIPELINE_DELAY_COR) /* first valid pixel */
-
-#define V_LINES (LCD_Y_RES + U_LINE) /* total vertical lines */
-#define V_PULSE (2 * CLOCKS_PER_PIX) /* VS pulse width (1-5 H_PERIODs) */
-#define V_PERIOD (H_PERIOD * V_LINES) /* VS period */
-
-#define ACTIVE_VIDEO_MEM_OFFSET ((U_LINE / 2) * LCD_X_RES * (LCD_BPP / 8))
#define BFIN_LCD_NBR_PALETTE_ENTRIES 256
@@ -110,12 +76,6 @@
#define PPI_PORT_CFG_01 0x10
#define PPI_POLS_1 0x8000
-#if (CLOCKS_PER_PIX > 1)
-#define PPI_PMODE (DLEN_8 | PACK_EN)
-#else
-#define PPI_PMODE (DLEN_16)
-#endif
-
#define LQ035_INDEX 0x74
#define LQ035_DATA 0x76
@@ -139,6 +99,15 @@ struct bfin_lq035q1fb_info {
int irq;
spinlock_t lock; /* lock */
u32 pseudo_pal[16];
+
+ u32 lcd_bpp;
+ u32 h_actpix;
+ u32 h_period;
+ u32 h_pulse;
+ u32 h_start;
+ u32 v_lines;
+ u32 v_pulse;
+ u32 v_period;
};
static int nocursor;
@@ -234,16 +203,69 @@ static int lq035q1_backlight(struct bfin_lq035q1fb_info *info, unsigned arg)
return 0;
}
+static int bfin_lq035q1_calc_timing(struct bfin_lq035q1fb_info *fbi)
+{
+ unsigned long clocks_per_pix, cpld_pipeline_delay_cor;
+
+ /*
+ * Interface 16/18-bit TFT over an 8-bit wide PPI using a small
+ * Programmable Logic Device (CPLD)
+ * http://blackfin.uclinux.org/gf/project/stamp/frs/?action=FrsReleaseBrowse&frs_package_id=165
+ */
+
+ switch (fbi->disp_info->ppi_mode) {
+ case USE_RGB565_16_BIT_PPI:
+ fbi->lcd_bpp = 16;
+ clocks_per_pix = 1;
+ cpld_pipeline_delay_cor = 0;
+ break;
+ case USE_RGB565_8_BIT_PPI:
+ fbi->lcd_bpp = 16;
+ clocks_per_pix = 2;
+ cpld_pipeline_delay_cor = 3;
+ break;
+ case USE_RGB888_8_BIT_PPI:
+ fbi->lcd_bpp = 24;
+ clocks_per_pix = 3;
+ cpld_pipeline_delay_cor = 5;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * HS and VS timing parameters (all in number of PPI clk ticks)
+ */
+
+ fbi->h_actpix = (LCD_X_RES * clocks_per_pix); /* active horizontal pixel */
+ fbi->h_period = (336 * clocks_per_pix); /* HS period */
+ fbi->h_pulse = (2 * clocks_per_pix); /* HS pulse width */
+ fbi->h_start = (7 * clocks_per_pix + cpld_pipeline_delay_cor); /* first valid pixel */
+
+ fbi->v_lines = (LCD_Y_RES + U_LINE); /* total vertical lines */
+ fbi->v_pulse = (2 * clocks_per_pix); /* VS pulse width (1-5 H_PERIODs) */
+ fbi->v_period = (fbi->h_period * fbi->v_lines); /* VS period */
+
+ return 0;
+}
+
static void bfin_lq035q1_config_ppi(struct bfin_lq035q1fb_info *fbi)
{
- bfin_write_PPI_DELAY(H_START);
- bfin_write_PPI_COUNT(H_ACTPIX - 1);
- bfin_write_PPI_FRAME(V_LINES);
+ unsigned ppi_pmode;
+
+ if (fbi->disp_info->ppi_mode == USE_RGB565_16_BIT_PPI)
+ ppi_pmode = DLEN_16;
+ else
+ ppi_pmode = (DLEN_8 | PACK_EN);
+
+ bfin_write_PPI_DELAY(fbi->h_start);
+ bfin_write_PPI_COUNT(fbi->h_actpix - 1);
+ bfin_write_PPI_FRAME(fbi->v_lines);
bfin_write_PPI_CONTROL(PPI_TX_MODE | /* output mode , PORT_DIR */
PPI_XFER_TYPE_11 | /* sync mode XFR_TYPE */
PPI_PORT_CFG_01 | /* two frame sync PORT_CFG */
- PPI_PMODE | /* 8/16 bit data length / PACK_EN? */
+ ppi_pmode | /* 8/16 bit data length / PACK_EN? */
PPI_POLS_1); /* faling edge syncs POLS */
}
@@ -272,19 +294,19 @@ static void bfin_lq035q1_stop_timers(void)
}
-static void bfin_lq035q1_init_timers(void)
+static void bfin_lq035q1_init_timers(struct bfin_lq035q1fb_info *fbi)
{
bfin_lq035q1_stop_timers();
- set_gptimer_period(TIMER_HSYNC_id, H_PERIOD);
- set_gptimer_pwidth(TIMER_HSYNC_id, H_PULSE);
+ set_gptimer_period(TIMER_HSYNC_id, fbi->h_period);
+ set_gptimer_pwidth(TIMER_HSYNC_id, fbi->h_pulse);
set_gptimer_config(TIMER_HSYNC_id, TIMER_MODE_PWM | TIMER_PERIOD_CNT |
TIMER_TIN_SEL | TIMER_CLK_SEL|
TIMER_EMU_RUN);
- set_gptimer_period(TIMER_VSYNC_id, V_PERIOD);
- set_gptimer_pwidth(TIMER_VSYNC_id, V_PULSE);
+ set_gptimer_period(TIMER_VSYNC_id, fbi->v_period);
+ set_gptimer_pwidth(TIMER_VSYNC_id, fbi->v_pulse);
set_gptimer_config(TIMER_VSYNC_id, TIMER_MODE_PWM | TIMER_PERIOD_CNT |
TIMER_TIN_SEL | TIMER_CLK_SEL |
TIMER_EMU_RUN);
@@ -294,21 +316,21 @@ static void bfin_lq035q1_init_timers(void)
static void bfin_lq035q1_config_dma(struct bfin_lq035q1fb_info *fbi)
{
+
set_dma_config(CH_PPI,
set_bfin_dma_config(DIR_READ, DMA_FLOW_AUTO,
INTR_DISABLE, DIMENSION_2D,
DATA_SIZE_16,
DMA_NOSYNC_KEEP_DMA_BUF));
- set_dma_x_count(CH_PPI, (LCD_X_RES * LCD_BPP) / DMA_BUS_SIZE);
+ set_dma_x_count(CH_PPI, (LCD_X_RES * fbi->lcd_bpp) / DMA_BUS_SIZE);
set_dma_x_modify(CH_PPI, DMA_BUS_SIZE / 8);
- set_dma_y_count(CH_PPI, V_LINES);
+ set_dma_y_count(CH_PPI, fbi->v_lines);
set_dma_y_modify(CH_PPI, DMA_BUS_SIZE / 8);
set_dma_start_addr(CH_PPI, (unsigned long)fbi->fb_buffer);
}
-#if (CLOCKS_PER_PIX == 1)
static const u16 ppi0_req_16[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
P_PPI0_D0, P_PPI0_D1, P_PPI0_D2,
P_PPI0_D3, P_PPI0_D4, P_PPI0_D5,
@@ -316,22 +338,27 @@ static const u16 ppi0_req_16[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
P_PPI0_D9, P_PPI0_D10, P_PPI0_D11,
P_PPI0_D12, P_PPI0_D13, P_PPI0_D14,
P_PPI0_D15, 0};
-#else
-static const u16 ppi0_req_16[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
+
+static const u16 ppi0_req_8[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
P_PPI0_D0, P_PPI0_D1, P_PPI0_D2,
P_PPI0_D3, P_PPI0_D4, P_PPI0_D5,
P_PPI0_D6, P_PPI0_D7, 0};
-#endif
-static inline void bfin_lq035q1_free_ports(void)
+static inline void bfin_lq035q1_free_ports(unsigned ppi16)
{
- peripheral_free_list(ppi0_req_16);
+ if (ppi16)
+ peripheral_free_list(ppi0_req_16);
+ else
+ peripheral_free_list(ppi0_req_8);
+
if (ANOMALY_05000400)
gpio_free(P_IDENT(P_PPI0_FS3));
}
-static int __devinit bfin_lq035q1_request_ports(struct platform_device *pdev)
+static int __devinit bfin_lq035q1_request_ports(struct platform_device *pdev,
+ unsigned ppi16)
{
+ int ret;
/* ANOMALY_05000400 - PPI Does Not Start Properly In Specific Mode:
* Drive PPI_FS3 Low
*/
@@ -342,7 +369,12 @@ static int __devinit bfin_lq035q1_request_ports(struct platform_device *pdev)
gpio_direction_output(P_IDENT(P_PPI0_FS3), 0);
}
- if (peripheral_request_list(ppi0_req_16, DRIVER_NAME)) {
+ if (ppi16)
+ ret = peripheral_request_list(ppi0_req_16, DRIVER_NAME);
+ else
+ ret = peripheral_request_list(ppi0_req_8, DRIVER_NAME);
+
+ if (ret) {
dev_err(&pdev->dev, "requesting peripherals failed\n");
return -EFAULT;
}
@@ -364,7 +396,7 @@ static int bfin_lq035q1_fb_open(struct fb_info *info, int user)
bfin_lq035q1_config_dma(fbi);
bfin_lq035q1_config_ppi(fbi);
- bfin_lq035q1_init_timers();
+ bfin_lq035q1_init_timers(fbi);
/* start dma */
enable_dma(CH_PPI);
@@ -402,12 +434,9 @@ static int bfin_lq035q1_fb_release(struct fb_info *info, int user)
static int bfin_lq035q1_fb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
- switch (var->bits_per_pixel) {
-#if (LCD_BPP == 24)
- case 24:/* TRUECOLOUR, 16m */
-#else
- case 16:/* DIRECTCOLOUR, 64k */
-#endif
+ struct bfin_lq035q1fb_info *fbi = info->par;
+
+ if (var->bits_per_pixel == fbi->lcd_bpp) {
var->red.offset = info->var.red.offset;
var->green.offset = info->var.green.offset;
var->blue.offset = info->var.blue.offset;
@@ -420,8 +449,7 @@ static int bfin_lq035q1_fb_check_var(struct fb_var_screeninfo *var,
var->red.msb_right = 0;
var->green.msb_right = 0;
var->blue.msb_right = 0;
- break;
- default:
+ } else {
pr_debug("%s: depth not supported: %u BPP\n", __func__,
var->bits_per_pixel);
return -EINVAL;
@@ -528,6 +556,7 @@ static int __devinit bfin_lq035q1_probe(struct platform_device *pdev)
{
struct bfin_lq035q1fb_info *info;
struct fb_info *fbinfo;
+ u32 active_video_mem_offset;
int ret;
ret = request_dma(CH_PPI, DRIVER_NAME"_CH_PPI");
@@ -550,6 +579,12 @@ static int __devinit bfin_lq035q1_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fbinfo);
+ ret = bfin_lq035q1_calc_timing(info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed PPI Mode\n");
+ goto out3;
+ }
+
strcpy(fbinfo->fix.id, DRIVER_NAME);
fbinfo->fix.type = FB_TYPE_PACKED_PIXELS;
@@ -571,46 +606,48 @@ static int __devinit bfin_lq035q1_probe(struct platform_device *pdev)
fbinfo->var.xres_virtual = LCD_X_RES;
fbinfo->var.yres = LCD_Y_RES;
fbinfo->var.yres_virtual = LCD_Y_RES;
- fbinfo->var.bits_per_pixel = LCD_BPP;
+ fbinfo->var.bits_per_pixel = info->lcd_bpp;
if (info->disp_info->mode & LQ035_BGR) {
-#if (LCD_BPP == 24)
- fbinfo->var.red.offset = 0;
- fbinfo->var.green.offset = 8;
- fbinfo->var.blue.offset = 16;
-#else
- fbinfo->var.red.offset = 0;
- fbinfo->var.green.offset = 5;
- fbinfo->var.blue.offset = 11;
-#endif
+ if (info->lcd_bpp == 24) {
+ fbinfo->var.red.offset = 0;
+ fbinfo->var.green.offset = 8;
+ fbinfo->var.blue.offset = 16;
+ } else {
+ fbinfo->var.red.offset = 0;
+ fbinfo->var.green.offset = 5;
+ fbinfo->var.blue.offset = 11;
+ }
} else {
-#if (LCD_BPP == 24)
- fbinfo->var.red.offset = 16;
- fbinfo->var.green.offset = 8;
- fbinfo->var.blue.offset = 0;
-#else
- fbinfo->var.red.offset = 11;
- fbinfo->var.green.offset = 5;
- fbinfo->var.blue.offset = 0;
-#endif
+ if (info->lcd_bpp == 24) {
+ fbinfo->var.red.offset = 16;
+ fbinfo->var.green.offset = 8;
+ fbinfo->var.blue.offset = 0;
+ } else {
+ fbinfo->var.red.offset = 11;
+ fbinfo->var.green.offset = 5;
+ fbinfo->var.blue.offset = 0;
+ }
}
fbinfo->var.transp.offset = 0;
-#if (LCD_BPP == 24)
- fbinfo->var.red.length = 8;
- fbinfo->var.green.length = 8;
- fbinfo->var.blue.length = 8;
-#else
- fbinfo->var.red.length = 5;
- fbinfo->var.green.length = 6;
- fbinfo->var.blue.length = 5;
-#endif
+ if (info->lcd_bpp == 24) {
+ fbinfo->var.red.length = 8;
+ fbinfo->var.green.length = 8;
+ fbinfo->var.blue.length = 8;
+ } else {
+ fbinfo->var.red.length = 5;
+ fbinfo->var.green.length = 6;
+ fbinfo->var.blue.length = 5;
+ }
fbinfo->var.transp.length = 0;
- fbinfo->fix.smem_len = LCD_X_RES * LCD_Y_RES * LCD_BPP / 8
- + ACTIVE_VIDEO_MEM_OFFSET;
+ active_video_mem_offset = ((U_LINE / 2) * LCD_X_RES * (info->lcd_bpp / 8));
+
+ fbinfo->fix.smem_len = LCD_X_RES * LCD_Y_RES * info->lcd_bpp / 8
+ + active_video_mem_offset;
fbinfo->fix.line_length = fbinfo->var.xres_virtual *
fbinfo->var.bits_per_pixel / 8;
@@ -629,8 +666,8 @@ static int __devinit bfin_lq035q1_probe(struct platform_device *pdev)
goto out3;
}
- fbinfo->screen_base = (void *)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET;
- fbinfo->fix.smem_start = (int)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET;
+ fbinfo->screen_base = (void *)info->fb_buffer + active_video_mem_offset;
+ fbinfo->fix.smem_start = (int)info->fb_buffer + active_video_mem_offset;
fbinfo->fbops = &bfin_lq035q1_fb_ops;
@@ -643,7 +680,8 @@ static int __devinit bfin_lq035q1_probe(struct platform_device *pdev)
goto out4;
}
- ret = bfin_lq035q1_request_ports(pdev);
+ ret = bfin_lq035q1_request_ports(pdev,
+ info->disp_info->ppi_mode == USE_RGB565_16_BIT_PPI);
if (ret) {
dev_err(&pdev->dev, "couldn't request gpio port\n");
goto out6;
@@ -693,7 +731,7 @@ static int __devinit bfin_lq035q1_probe(struct platform_device *pdev)
}
dev_info(&pdev->dev, "%dx%d %d-bit RGB FrameBuffer initialized\n",
- LCD_X_RES, LCD_Y_RES, LCD_BPP);
+ LCD_X_RES, LCD_Y_RES, info->lcd_bpp);
return 0;
@@ -705,7 +743,8 @@ static int __devinit bfin_lq035q1_probe(struct platform_device *pdev)
out8:
free_irq(info->irq, info);
out7:
- bfin_lq035q1_free_ports();
+ bfin_lq035q1_free_ports(info->disp_info->ppi_mode ==
+ USE_RGB565_16_BIT_PPI);
out6:
fb_dealloc_cmap(&fbinfo->cmap);
out4:
@@ -742,7 +781,8 @@ static int __devexit bfin_lq035q1_remove(struct platform_device *pdev)
fb_dealloc_cmap(&fbinfo->cmap);
- bfin_lq035q1_free_ports();
+ bfin_lq035q1_free_ports(info->disp_info->ppi_mode ==
+ USE_RGB565_16_BIT_PPI);
platform_set_drvdata(pdev, NULL);
framebuffer_release(fbinfo);
@@ -781,7 +821,7 @@ static int bfin_lq035q1_resume(struct device *dev)
bfin_lq035q1_config_dma(info);
bfin_lq035q1_config_ppi(info);
- bfin_lq035q1_init_timers();
+ bfin_lq035q1_init_timers(info);
/* start dma */
enable_dma(CH_PPI);
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index c2ec3dcd4e9..7a50272eaab 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -420,7 +420,9 @@ static irqreturn_t bfin_t350mcqb_irq_error(int irq, void *dev_id)
static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
{
+#ifndef NO_BL_SUPPORT
struct backlight_properties props;
+#endif
struct bfin_t350mcqbfb_info *info;
struct fb_info *fbinfo;
int ret;
@@ -550,7 +552,8 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
printk(KERN_ERR DRIVER_NAME
": unable to register backlight.\n");
ret = -EINVAL;
- goto out9;
+ unregister_framebuffer(fbinfo);
+ goto out8;
}
lcd_dev = lcd_device_register(DRIVER_NAME, NULL, &bfin_lcd_ops);
@@ -559,8 +562,6 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
return 0;
-out9:
- unregister_framebuffer(fbinfo);
out8:
free_irq(info->irq, info);
out7:
diff --git a/drivers/video/bw2.c b/drivers/video/bw2.c
index 43320925c4c..2c371c07f0d 100644
--- a/drivers/video/bw2.c
+++ b/drivers/video/bw2.c
@@ -376,8 +376,11 @@ static const struct of_device_id bw2_match[] = {
MODULE_DEVICE_TABLE(of, bw2_match);
static struct of_platform_driver bw2_driver = {
- .name = "bw2",
- .match_table = bw2_match,
+ .driver = {
+ .name = "bw2",
+ .owner = THIS_MODULE,
+ .of_match_table = bw2_match,
+ },
.probe = bw2_probe,
.remove = __devexit_p(bw2_remove),
};
diff --git a/drivers/video/cg14.c b/drivers/video/cg14.c
index 77a040af20a..d12e05b6e63 100644
--- a/drivers/video/cg14.c
+++ b/drivers/video/cg14.c
@@ -596,8 +596,11 @@ static const struct of_device_id cg14_match[] = {
MODULE_DEVICE_TABLE(of, cg14_match);
static struct of_platform_driver cg14_driver = {
- .name = "cg14",
- .match_table = cg14_match,
+ .driver = {
+ .name = "cg14",
+ .owner = THIS_MODULE,
+ .of_match_table = cg14_match,
+ },
.probe = cg14_probe,
.remove = __devexit_p(cg14_remove),
};
diff --git a/drivers/video/cg3.c b/drivers/video/cg3.c
index 30eedf79322..b98f93f7f66 100644
--- a/drivers/video/cg3.c
+++ b/drivers/video/cg3.c
@@ -463,8 +463,11 @@ static const struct of_device_id cg3_match[] = {
MODULE_DEVICE_TABLE(of, cg3_match);
static struct of_platform_driver cg3_driver = {
- .name = "cg3",
- .match_table = cg3_match,
+ .driver = {
+ .name = "cg3",
+ .owner = THIS_MODULE,
+ .of_match_table = cg3_match,
+ },
.probe = cg3_probe,
.remove = __devexit_p(cg3_remove),
};
diff --git a/drivers/video/cg6.c b/drivers/video/cg6.c
index 6d0fcb43696..480d761a27a 100644
--- a/drivers/video/cg6.c
+++ b/drivers/video/cg6.c
@@ -740,7 +740,7 @@ static void cg6_unmap_regs(struct of_device *op, struct fb_info *info,
static int __devinit cg6_probe(struct of_device *op,
const struct of_device_id *match)
{
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
struct fb_info *info;
struct cg6_par *par;
int linebytes, err;
@@ -856,8 +856,11 @@ static const struct of_device_id cg6_match[] = {
MODULE_DEVICE_TABLE(of, cg6_match);
static struct of_platform_driver cg6_driver = {
- .name = "cg6",
- .match_table = cg6_match,
+ .driver = {
+ .name = "cg6",
+ .owner = THIS_MODULE,
+ .of_match_table = cg6_match,
+ },
.probe = cg6_probe,
.remove = __devexit_p(cg6_remove),
};
diff --git a/drivers/video/cirrusfb.c b/drivers/video/cirrusfb.c
index 8d8dfda2f86..6df7c54db0a 100644
--- a/drivers/video/cirrusfb.c
+++ b/drivers/video/cirrusfb.c
@@ -299,6 +299,7 @@ static const struct zorro_device_id cirrusfb_zorro_table[] = {
},
{ 0 }
};
+MODULE_DEVICE_TABLE(zorro, cirrusfb_zorro_table);
static const struct {
zorro_id id2;
diff --git a/drivers/video/cobalt_lcdfb.c b/drivers/video/cobalt_lcdfb.c
index 5eb61b5adfe..42fe155aba0 100644
--- a/drivers/video/cobalt_lcdfb.c
+++ b/drivers/video/cobalt_lcdfb.c
@@ -123,7 +123,7 @@ static void lcd_clear(struct fb_info *info)
lcd_write_control(info, LCD_RESET);
}
-static struct fb_fix_screeninfo cobalt_lcdfb_fix __initdata = {
+static struct fb_fix_screeninfo cobalt_lcdfb_fix __devinitdata = {
.id = "cobalt-lcd",
.type = FB_TYPE_TEXT,
.type_aux = FB_AUX_TEXT_MDA,
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index 8d244ba0f60..cad7d45c8ba 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -36,7 +36,9 @@
#define DRIVER_NAME "da8xx_lcdc"
/* LCD Status Register */
+#define LCD_END_OF_FRAME1 BIT(9)
#define LCD_END_OF_FRAME0 BIT(8)
+#define LCD_PL_LOAD_DONE BIT(6)
#define LCD_FIFO_UNDERFLOW BIT(5)
#define LCD_SYNC_LOST BIT(2)
@@ -58,11 +60,13 @@
#define LCD_PALETTE_LOAD_MODE(x) ((x) << 20)
#define PALETTE_AND_DATA 0x00
#define PALETTE_ONLY 0x01
+#define DATA_ONLY 0x02
#define LCD_MONO_8BIT_MODE BIT(9)
#define LCD_RASTER_ORDER BIT(8)
#define LCD_TFT_MODE BIT(7)
#define LCD_UNDERFLOW_INT_ENA BIT(6)
+#define LCD_PL_ENABLE BIT(4)
#define LCD_MONOCHROME_MODE BIT(1)
#define LCD_RASTER_ENABLE BIT(0)
#define LCD_TFT_ALT_ENABLE BIT(23)
@@ -87,6 +91,10 @@
#define LCD_DMA_CTRL_REG 0x40
#define LCD_DMA_FRM_BUF_BASE_ADDR_0_REG 0x44
#define LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG 0x48
+#define LCD_DMA_FRM_BUF_BASE_ADDR_1_REG 0x4C
+#define LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG 0x50
+
+#define LCD_NUM_BUFFERS 2
#define WSI_TIMEOUT 50
#define PALETTE_SIZE 256
@@ -111,13 +119,20 @@ static inline void lcdc_write(unsigned int val, unsigned int addr)
struct da8xx_fb_par {
resource_size_t p_palette_base;
unsigned char *v_palette_base;
+ dma_addr_t vram_phys;
+ unsigned long vram_size;
+ void *vram_virt;
+ unsigned int dma_start;
+ unsigned int dma_end;
struct clk *lcdc_clk;
int irq;
unsigned short pseudo_palette[16];
- unsigned int databuf_sz;
unsigned int palette_sz;
unsigned int pxl_clk;
int blank;
+ wait_queue_head_t vsync_wait;
+ int vsync_flag;
+ int vsync_timeout;
#ifdef CONFIG_CPU_FREQ
struct notifier_block freq_transition;
#endif
@@ -148,9 +163,9 @@ static struct fb_fix_screeninfo da8xx_fb_fix __devinitdata = {
.type = FB_TYPE_PACKED_PIXELS,
.type_aux = 0,
.visual = FB_VISUAL_PSEUDOCOLOR,
- .xpanstep = 1,
+ .xpanstep = 0,
.ypanstep = 1,
- .ywrapstep = 1,
+ .ywrapstep = 0,
.accel = FB_ACCEL_NONE
};
@@ -221,22 +236,48 @@ static inline void lcd_disable_raster(void)
static void lcd_blit(int load_mode, struct da8xx_fb_par *par)
{
- u32 tmp = par->p_palette_base + par->databuf_sz - 4;
- u32 reg;
+ u32 start;
+ u32 end;
+ u32 reg_ras;
+ u32 reg_dma;
+
+ /* init reg to clear PLM (loading mode) fields */
+ reg_ras = lcdc_read(LCD_RASTER_CTRL_REG);
+ reg_ras &= ~(3 << 20);
+
+ reg_dma = lcdc_read(LCD_DMA_CTRL_REG);
+
+ if (load_mode == LOAD_DATA) {
+ start = par->dma_start;
+ end = par->dma_end;
+
+ reg_ras |= LCD_PALETTE_LOAD_MODE(DATA_ONLY);
+ reg_dma |= LCD_END_OF_FRAME_INT_ENA;
+ reg_dma |= LCD_DUAL_FRAME_BUFFER_ENABLE;
+
+ lcdc_write(start, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
+ lcdc_write(end, LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
+ lcdc_write(start, LCD_DMA_FRM_BUF_BASE_ADDR_1_REG);
+ lcdc_write(end, LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG);
+ } else if (load_mode == LOAD_PALETTE) {
+ start = par->p_palette_base;
+ end = start + par->palette_sz - 1;
+
+ reg_ras |= LCD_PALETTE_LOAD_MODE(PALETTE_ONLY);
+ reg_ras |= LCD_PL_ENABLE;
+
+ lcdc_write(start, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
+ lcdc_write(end, LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
+ }
- /* Update the databuf in the hw. */
- lcdc_write(par->p_palette_base, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
- lcdc_write(tmp, LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
+ lcdc_write(reg_dma, LCD_DMA_CTRL_REG);
+ lcdc_write(reg_ras, LCD_RASTER_CTRL_REG);
- /* Start the DMA. */
- reg = lcdc_read(LCD_RASTER_CTRL_REG);
- reg &= ~(3 << 20);
- if (load_mode == LOAD_DATA)
- reg |= LCD_PALETTE_LOAD_MODE(PALETTE_AND_DATA);
- else if (load_mode == LOAD_PALETTE)
- reg |= LCD_PALETTE_LOAD_MODE(PALETTE_ONLY);
-
- lcdc_write(reg, LCD_RASTER_CTRL_REG);
+ /*
+ * The Raster enable bit must be set after all other control fields are
+ * set.
+ */
+ lcd_enable_raster();
}
/* Configure the Burst Size of DMA */
@@ -368,12 +409,8 @@ static int lcd_cfg_display(const struct lcd_ctrl_config *cfg)
static int lcd_cfg_frame_buffer(struct da8xx_fb_par *par, u32 width, u32 height,
u32 bpp, u32 raster_order)
{
- u32 bpl, reg;
+ u32 reg;
- /* Disable Dual Frame Buffer. */
- reg = lcdc_read(LCD_DMA_CTRL_REG);
- lcdc_write(reg & ~LCD_DUAL_FRAME_BUFFER_ENABLE,
- LCD_DMA_CTRL_REG);
/* Set the Panel Width */
/* Pixels per line = (PPL + 1)*16 */
/*0x3F in bits 4..9 gives max horisontal resolution = 1024 pixels*/
@@ -410,9 +447,6 @@ static int lcd_cfg_frame_buffer(struct da8xx_fb_par *par, u32 width, u32 height,
return -EINVAL;
}
- bpl = width * bpp / 8;
- par->databuf_sz = height * bpl + par->palette_sz;
-
return 0;
}
@@ -421,8 +455,9 @@ static int fb_setcolreg(unsigned regno, unsigned red, unsigned green,
struct fb_info *info)
{
struct da8xx_fb_par *par = info->par;
- unsigned short *palette = (unsigned short *)par->v_palette_base;
+ unsigned short *palette = (unsigned short *) par->v_palette_base;
u_short pal;
+ int update_hw = 0;
if (regno > 255)
return 1;
@@ -439,8 +474,10 @@ static int fb_setcolreg(unsigned regno, unsigned red, unsigned green,
pal |= (green & 0x00f0);
pal |= (blue & 0x000f);
- palette[regno] = pal;
-
+ if (palette[regno] != pal) {
+ update_hw = 1;
+ palette[regno] = pal;
+ }
} else if ((info->var.bits_per_pixel == 16) && regno < 16) {
red >>= (16 - info->var.red.length);
red <<= info->var.red.offset;
@@ -453,9 +490,16 @@ static int fb_setcolreg(unsigned regno, unsigned red, unsigned green,
par->pseudo_palette[regno] = red | green | blue;
- palette[0] = 0x4000;
+ if (palette[0] != 0x4000) {
+ update_hw = 1;
+ palette[0] = 0x4000;
+ }
}
+ /* Update the palette in the h/w as needed. */
+ if (update_hw)
+ lcd_blit(LOAD_PALETTE, par);
+
return 0;
}
@@ -541,15 +585,54 @@ static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
static irqreturn_t lcdc_irq_handler(int irq, void *arg)
{
+ struct da8xx_fb_par *par = arg;
u32 stat = lcdc_read(LCD_STAT_REG);
+ u32 reg_ras;
if ((stat & LCD_SYNC_LOST) && (stat & LCD_FIFO_UNDERFLOW)) {
lcd_disable_raster();
lcdc_write(stat, LCD_STAT_REG);
lcd_enable_raster();
- } else
+ } else if (stat & LCD_PL_LOAD_DONE) {
+ /*
+ * Must disable raster before changing state of any control bit.
+ * And also must be disabled before clearing the PL loading
+ * interrupt via the following write to the status register. If
+ * this is done after then one gets multiple PL done interrupts.
+ */
+ lcd_disable_raster();
+
lcdc_write(stat, LCD_STAT_REG);
+ /* Disable PL completion inerrupt */
+ reg_ras = lcdc_read(LCD_RASTER_CTRL_REG);
+ reg_ras &= ~LCD_PL_ENABLE;
+ lcdc_write(reg_ras, LCD_RASTER_CTRL_REG);
+
+ /* Setup and start data loading mode */
+ lcd_blit(LOAD_DATA, par);
+ } else {
+ lcdc_write(stat, LCD_STAT_REG);
+
+ if (stat & LCD_END_OF_FRAME0) {
+ lcdc_write(par->dma_start,
+ LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
+ lcdc_write(par->dma_end,
+ LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
+ par->vsync_flag = 1;
+ wake_up_interruptible(&par->vsync_wait);
+ }
+
+ if (stat & LCD_END_OF_FRAME1) {
+ lcdc_write(par->dma_start,
+ LCD_DMA_FRM_BUF_BASE_ADDR_1_REG);
+ lcdc_write(par->dma_end,
+ LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG);
+ par->vsync_flag = 1;
+ wake_up_interruptible(&par->vsync_wait);
+ }
+ }
+
return IRQ_HANDLED;
}
@@ -654,9 +737,10 @@ static int __devexit fb_remove(struct platform_device *dev)
unregister_framebuffer(info);
fb_dealloc_cmap(&info->cmap);
- dma_free_coherent(NULL, par->databuf_sz + PAGE_SIZE,
- info->screen_base - PAGE_SIZE,
- info->fix.smem_start);
+ dma_free_coherent(NULL, PALETTE_SIZE, par->v_palette_base,
+ par->p_palette_base);
+ dma_free_coherent(NULL, par->vram_size, par->vram_virt,
+ par->vram_phys);
free_irq(par->irq, par);
clk_disable(par->lcdc_clk);
clk_put(par->lcdc_clk);
@@ -668,6 +752,39 @@ static int __devexit fb_remove(struct platform_device *dev)
return 0;
}
+/*
+ * Function to wait for vertical sync which for this LCD peripheral
+ * translates into waiting for the current raster frame to complete.
+ */
+static int fb_wait_for_vsync(struct fb_info *info)
+{
+ struct da8xx_fb_par *par = info->par;
+ int ret;
+
+ /*
+ * Set flag to 0 and wait for isr to set to 1. It would seem there is a
+ * race condition here where the ISR could have occured just before or
+ * just after this set. But since we are just coarsely waiting for
+ * a frame to complete then that's OK. i.e. if the frame completed
+ * just before this code executed then we have to wait another full
+ * frame time but there is no way to avoid such a situation. On the
+ * other hand if the frame completed just after then we don't need
+ * to wait long at all. Either way we are guaranteed to return to the
+ * user immediately after a frame completion which is all that is
+ * required.
+ */
+ par->vsync_flag = 0;
+ ret = wait_event_interruptible_timeout(par->vsync_wait,
+ par->vsync_flag != 0,
+ par->vsync_timeout);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
static int fb_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
@@ -697,6 +814,8 @@ static int fb_ioctl(struct fb_info *info, unsigned int cmd,
sync_arg.pulse_width,
sync_arg.front_porch);
break;
+ case FBIO_WAITFORVSYNC:
+ return fb_wait_for_vsync(info);
default:
return -EINVAL;
}
@@ -732,10 +851,47 @@ static int cfb_blank(int blank, struct fb_info *info)
return ret;
}
+/*
+ * Set new x,y offsets in the virtual display for the visible area and switch
+ * to the new mode.
+ */
+static int da8xx_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *fbi)
+{
+ int ret = 0;
+ struct fb_var_screeninfo new_var;
+ struct da8xx_fb_par *par = fbi->par;
+ struct fb_fix_screeninfo *fix = &fbi->fix;
+ unsigned int end;
+ unsigned int start;
+
+ if (var->xoffset != fbi->var.xoffset ||
+ var->yoffset != fbi->var.yoffset) {
+ memcpy(&new_var, &fbi->var, sizeof(new_var));
+ new_var.xoffset = var->xoffset;
+ new_var.yoffset = var->yoffset;
+ if (fb_check_var(&new_var, fbi))
+ ret = -EINVAL;
+ else {
+ memcpy(&fbi->var, &new_var, sizeof(new_var));
+
+ start = fix->smem_start +
+ new_var.yoffset * fix->line_length +
+ new_var.xoffset * var->bits_per_pixel / 8;
+ end = start + var->yres * fix->line_length - 1;
+ par->dma_start = start;
+ par->dma_end = end;
+ }
+ }
+
+ return ret;
+}
+
static struct fb_ops da8xx_fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = fb_check_var,
.fb_setcolreg = fb_setcolreg,
+ .fb_pan_display = da8xx_pan_display,
.fb_ioctl = fb_ioctl,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
@@ -829,40 +985,53 @@ static int __init fb_probe(struct platform_device *device)
}
/* allocate frame buffer */
- da8xx_fb_info->screen_base = dma_alloc_coherent(NULL,
- par->databuf_sz + PAGE_SIZE,
- (resource_size_t *)
- &da8xx_fb_info->fix.smem_start,
- GFP_KERNEL | GFP_DMA);
-
- if (!da8xx_fb_info->screen_base) {
+ par->vram_size = lcdc_info->width * lcdc_info->height * lcd_cfg->bpp;
+ par->vram_size = PAGE_ALIGN(par->vram_size/8);
+ par->vram_size = par->vram_size * LCD_NUM_BUFFERS;
+
+ par->vram_virt = dma_alloc_coherent(NULL,
+ par->vram_size,
+ (resource_size_t *) &par->vram_phys,
+ GFP_KERNEL | GFP_DMA);
+ if (!par->vram_virt) {
dev_err(&device->dev,
"GLCD: kmalloc for frame buffer failed\n");
ret = -EINVAL;
goto err_release_fb;
}
- /* move palette base pointer by (PAGE_SIZE - palette_sz) bytes */
- par->v_palette_base = da8xx_fb_info->screen_base +
- (PAGE_SIZE - par->palette_sz);
- par->p_palette_base = da8xx_fb_info->fix.smem_start +
- (PAGE_SIZE - par->palette_sz);
-
- /* the rest of the frame buffer is pixel data */
- da8xx_fb_info->screen_base = par->v_palette_base + par->palette_sz;
- da8xx_fb_fix.smem_start = par->p_palette_base + par->palette_sz;
- da8xx_fb_fix.smem_len = par->databuf_sz - par->palette_sz;
- da8xx_fb_fix.line_length = (lcdc_info->width * lcd_cfg->bpp) / 8;
+ da8xx_fb_info->screen_base = (char __iomem *) par->vram_virt;
+ da8xx_fb_fix.smem_start = par->vram_phys;
+ da8xx_fb_fix.smem_len = par->vram_size;
+ da8xx_fb_fix.line_length = (lcdc_info->width * lcd_cfg->bpp) / 8;
+
+ par->dma_start = par->vram_phys;
+ par->dma_end = par->dma_start + lcdc_info->height *
+ da8xx_fb_fix.line_length - 1;
+
+ /* allocate palette buffer */
+ par->v_palette_base = dma_alloc_coherent(NULL,
+ PALETTE_SIZE,
+ (resource_size_t *)
+ &par->p_palette_base,
+ GFP_KERNEL | GFP_DMA);
+ if (!par->v_palette_base) {
+ dev_err(&device->dev,
+ "GLCD: kmalloc for palette buffer failed\n");
+ ret = -EINVAL;
+ goto err_release_fb_mem;
+ }
+ memset(par->v_palette_base, 0, PALETTE_SIZE);
par->irq = platform_get_irq(device, 0);
if (par->irq < 0) {
ret = -ENOENT;
- goto err_release_fb_mem;
+ goto err_release_pl_mem;
}
ret = request_irq(par->irq, lcdc_irq_handler, 0, DRIVER_NAME, par);
if (ret)
- goto err_release_fb_mem;
+ goto err_release_pl_mem;
/* Initialize par */
da8xx_fb_info->var.bits_per_pixel = lcd_cfg->bpp;
@@ -870,8 +1039,8 @@ static int __init fb_probe(struct platform_device *device)
da8xx_fb_var.xres = lcdc_info->width;
da8xx_fb_var.xres_virtual = lcdc_info->width;
- da8xx_fb_var.yres = lcdc_info->height;
- da8xx_fb_var.yres_virtual = lcdc_info->height;
+ da8xx_fb_var.yres = lcdc_info->height;
+ da8xx_fb_var.yres_virtual = lcdc_info->height * LCD_NUM_BUFFERS;
da8xx_fb_var.grayscale =
lcd_cfg->p_disp_panel->panel_shade == MONOCHROME ? 1 : 0;
@@ -892,18 +1061,18 @@ static int __init fb_probe(struct platform_device *device)
ret = fb_alloc_cmap(&da8xx_fb_info->cmap, PALETTE_SIZE, 0);
if (ret)
goto err_free_irq;
-
- /* First palette_sz byte of the frame buffer is the palette */
da8xx_fb_info->cmap.len = par->palette_sz;
- /* Flush the buffer to the screen. */
- lcd_blit(LOAD_DATA, par);
-
/* initialize var_screeninfo */
da8xx_fb_var.activate = FB_ACTIVATE_FORCE;
fb_set_var(da8xx_fb_info, &da8xx_fb_var);
dev_set_drvdata(&device->dev, da8xx_fb_info);
+
+ /* initialize the vsync wait queue */
+ init_waitqueue_head(&par->vsync_wait);
+ par->vsync_timeout = HZ / 5;
+
/* Register the Frame Buffer */
if (register_framebuffer(da8xx_fb_info) < 0) {
dev_err(&device->dev,
@@ -919,10 +1088,6 @@ static int __init fb_probe(struct platform_device *device)
goto err_cpu_freq;
}
#endif
-
- /* enable raster engine */
- lcd_enable_raster();
-
return 0;
#ifdef CONFIG_CPU_FREQ
@@ -936,10 +1101,12 @@ err_dealloc_cmap:
err_free_irq:
free_irq(par->irq, par);
+err_release_pl_mem:
+ dma_free_coherent(NULL, PALETTE_SIZE, par->v_palette_base,
+ par->p_palette_base);
+
err_release_fb_mem:
- dma_free_coherent(NULL, par->databuf_sz + PAGE_SIZE,
- da8xx_fb_info->screen_base - PAGE_SIZE,
- da8xx_fb_info->fix.smem_start);
+ dma_free_coherent(NULL, par->vram_size, par->vram_virt, par->vram_phys);
err_release_fb:
framebuffer_release(da8xx_fb_info);
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index ecf405562f5..4a56f46af40 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -168,7 +168,7 @@ static void efifb_destroy(struct fb_info *info)
{
if (info->screen_base)
iounmap(info->screen_base);
- release_mem_region(info->aperture_base, info->aperture_size);
+ release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size);
framebuffer_release(info);
}
@@ -292,8 +292,13 @@ static int __devinit efifb_probe(struct platform_device *dev)
info->pseudo_palette = info->par;
info->par = NULL;
- info->aperture_base = efifb_fix.smem_start;
- info->aperture_size = size_remap;
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ err = -ENOMEM;
+ goto err_release_fb;
+ }
+ info->apertures->ranges[0].base = efifb_fix.smem_start;
+ info->apertures->ranges[0].size = size_remap;
info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
if (!info->screen_base) {
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index 6113c47e095..073c9b408cf 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -66,7 +66,7 @@ static int fb_deferred_io_fault(struct vm_area_struct *vma,
return 0;
}
-int fb_deferred_io_fsync(struct file *file, struct dentry *dentry, int datasync)
+int fb_deferred_io_fsync(struct file *file, int datasync)
{
struct fb_info *info = file->private_data;
@@ -155,25 +155,41 @@ static void fb_deferred_io_work(struct work_struct *work)
{
struct fb_info *info = container_of(work, struct fb_info,
deferred_work.work);
- struct list_head *node, *next;
- struct page *cur;
struct fb_deferred_io *fbdefio = info->fbdefio;
+ struct page *page, *tmp_page;
+ struct list_head *node, *tmp_node;
+ struct list_head non_dirty;
+
+ INIT_LIST_HEAD(&non_dirty);
/* here we mkclean the pages, then do all deferred IO */
mutex_lock(&fbdefio->lock);
- list_for_each_entry(cur, &fbdefio->pagelist, lru) {
- lock_page(cur);
- page_mkclean(cur);
- unlock_page(cur);
+ list_for_each_entry_safe(page, tmp_page, &fbdefio->pagelist, lru) {
+ lock_page(page);
+ /*
+ * The workqueue callback can be triggered after a
+ * ->page_mkwrite() call but before the PTE has been marked
+ * dirty. In this case page_mkclean() won't "rearm" the page.
+ *
+ * To avoid this, remove those "non-dirty" pages from the
+ * pagelist before calling the driver's callback, then add
+ * them back to get processed on the next work iteration.
+ * At that time, their PTEs will hopefully be dirty for real.
+ */
+ if (!page_mkclean(page))
+ list_move_tail(&page->lru, &non_dirty);
+ unlock_page(page);
}
/* driver's callback with pagelist */
fbdefio->deferred_io(info, &fbdefio->pagelist);
- /* clear the list */
- list_for_each_safe(node, next, &fbdefio->pagelist) {
+ /* clear the list... */
+ list_for_each_safe(node, tmp_node, &fbdefio->pagelist) {
list_del(node);
}
+ /* ... and add back the "non-dirty" pages to the list */
+ list_splice_tail(&non_dirty, &fbdefio->pagelist);
mutex_unlock(&fbdefio->lock);
}
@@ -202,6 +218,7 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_open);
void fb_deferred_io_cleanup(struct fb_info *info)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
+ struct list_head *node, *tmp_node;
struct page *page;
int i;
@@ -209,6 +226,13 @@ void fb_deferred_io_cleanup(struct fb_info *info)
cancel_delayed_work(&info->deferred_work);
flush_scheduled_work();
+ /* the list may have still some non-dirty pages at this point */
+ mutex_lock(&fbdefio->lock);
+ list_for_each_safe(node, tmp_node, &fbdefio->pagelist) {
+ list_del(node);
+ }
+ mutex_unlock(&fbdefio->lock);
+
/* clear out the mapping that we setup */
for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
page = fb_deferred_io_page(info, i);
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index a15b44e9c00..731fce64df9 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1468,16 +1468,70 @@ static int fb_check_foreignness(struct fb_info *fi)
return 0;
}
-static bool fb_do_apertures_overlap(struct fb_info *gen, struct fb_info *hw)
+static bool apertures_overlap(struct aperture *gen, struct aperture *hw)
{
/* is the generic aperture base the same as the HW one */
- if (gen->aperture_base == hw->aperture_base)
+ if (gen->base == hw->base)
return true;
/* is the generic aperture base inside the hw base->hw base+size */
- if (gen->aperture_base > hw->aperture_base && gen->aperture_base <= hw->aperture_base + hw->aperture_size)
+ if (gen->base > hw->base && gen->base <= hw->base + hw->size)
return true;
return false;
}
+
+static bool fb_do_apertures_overlap(struct apertures_struct *gena,
+ struct apertures_struct *hwa)
+{
+ int i, j;
+ if (!hwa || !gena)
+ return false;
+
+ for (i = 0; i < hwa->count; ++i) {
+ struct aperture *h = &hwa->ranges[i];
+ for (j = 0; j < gena->count; ++j) {
+ struct aperture *g = &gena->ranges[j];
+ printk(KERN_DEBUG "checking generic (%llx %llx) vs hw (%llx %llx)\n",
+ (unsigned long long)g->base,
+ (unsigned long long)g->size,
+ (unsigned long long)h->base,
+ (unsigned long long)h->size);
+ if (apertures_overlap(g, h))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+#define VGA_FB_PHYS 0xA0000
+void remove_conflicting_framebuffers(struct apertures_struct *a,
+ const char *name, bool primary)
+{
+ int i;
+
+ /* check all firmware fbs and kick off if the base addr overlaps */
+ for (i = 0 ; i < FB_MAX; i++) {
+ struct apertures_struct *gen_aper;
+ if (!registered_fb[i])
+ continue;
+
+ if (!(registered_fb[i]->flags & FBINFO_MISC_FIRMWARE))
+ continue;
+
+ gen_aper = registered_fb[i]->apertures;
+ if (fb_do_apertures_overlap(gen_aper, a) ||
+ (primary && gen_aper && gen_aper->count &&
+ gen_aper->ranges[0].base == VGA_FB_PHYS)) {
+
+ printk(KERN_ERR "fb: conflicting fb hw usage "
+ "%s vs %s - removing generic driver\n",
+ name, registered_fb[i]->fix.id);
+ unregister_framebuffer(registered_fb[i]);
+ }
+ }
+}
+EXPORT_SYMBOL(remove_conflicting_framebuffers);
+
/**
* register_framebuffer - registers a frame buffer device
* @fb_info: frame buffer info structure
@@ -1501,21 +1555,8 @@ register_framebuffer(struct fb_info *fb_info)
if (fb_check_foreignness(fb_info))
return -ENOSYS;
- /* check all firmware fbs and kick off if the base addr overlaps */
- for (i = 0 ; i < FB_MAX; i++) {
- if (!registered_fb[i])
- continue;
-
- if (registered_fb[i]->flags & FBINFO_MISC_FIRMWARE) {
- if (fb_do_apertures_overlap(registered_fb[i], fb_info)) {
- printk(KERN_ERR "fb: conflicting fb hw usage "
- "%s vs %s - removing generic driver\n",
- fb_info->fix.id,
- registered_fb[i]->fix.id);
- unregister_framebuffer(registered_fb[i]);
- }
- }
- }
+ remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id,
+ fb_is_primary_device(fb_info));
num_registered_fb++;
for (i = 0 ; i < FB_MAX; i++)
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index 81aa3129c17..0a08f134122 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -80,6 +80,7 @@ EXPORT_SYMBOL(framebuffer_alloc);
*/
void framebuffer_release(struct fb_info *info)
{
+ kfree(info->apertures);
kfree(info);
}
EXPORT_SYMBOL(framebuffer_release);
diff --git a/drivers/video/ffb.c b/drivers/video/ffb.c
index a42fabab69d..95c0227f47f 100644
--- a/drivers/video/ffb.c
+++ b/drivers/video/ffb.c
@@ -896,7 +896,7 @@ static void ffb_init_fix(struct fb_info *info)
static int __devinit ffb_probe(struct of_device *op,
const struct of_device_id *match)
{
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
struct ffb_fbc __iomem *fbc;
struct ffb_dac __iomem *dac;
struct fb_info *info;
@@ -1053,8 +1053,11 @@ static const struct of_device_id ffb_match[] = {
MODULE_DEVICE_TABLE(of, ffb_match);
static struct of_platform_driver ffb_driver = {
- .name = "ffb",
- .match_table = ffb_match,
+ .driver = {
+ .name = "ffb",
+ .owner = THIS_MODULE,
+ .of_match_table = ffb_match,
+ },
.probe = ffb_probe,
.remove = __devexit_p(ffb_remove),
};
diff --git a/drivers/video/fm2fb.c b/drivers/video/fm2fb.c
index 6c91c61cdb6..1b0feb8e724 100644
--- a/drivers/video/fm2fb.c
+++ b/drivers/video/fm2fb.c
@@ -219,6 +219,7 @@ static struct zorro_device_id fm2fb_devices[] __devinitdata = {
{ ZORRO_PROD_HELFRICH_RAINBOW_II },
{ 0 }
};
+MODULE_DEVICE_TABLE(zorro, fm2fb_devices);
static struct zorro_driver fm2fb_driver = {
.name = "fm2fb",
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index 994358a4f30..27455ce298b 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -1421,7 +1421,7 @@ static ssize_t show_monitor(struct device *device,
static int __devinit fsl_diu_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct mfb_info *mfbi;
phys_addr_t dummy_ad_addr;
int ret, i, error = 0;
@@ -1647,9 +1647,11 @@ static struct of_device_id fsl_diu_match[] = {
MODULE_DEVICE_TABLE(of, fsl_diu_match);
static struct of_platform_driver fsl_diu_driver = {
- .owner = THIS_MODULE,
- .name = "fsl_diu",
- .match_table = fsl_diu_match,
+ .driver = {
+ .name = "fsl_diu",
+ .owner = THIS_MODULE,
+ .of_match_table = fsl_diu_match,
+ },
.probe = fsl_diu_probe,
.remove = fsl_diu_remove,
.suspend = fsl_diu_suspend,
diff --git a/drivers/video/hgafb.c b/drivers/video/hgafb.c
index 8bbf251f83d..af8f0f2cc78 100644
--- a/drivers/video/hgafb.c
+++ b/drivers/video/hgafb.c
@@ -106,7 +106,7 @@ static DEFINE_SPINLOCK(hga_reg_lock);
/* Framebuffer driver structures */
-static struct fb_var_screeninfo __initdata hga_default_var = {
+static struct fb_var_screeninfo hga_default_var __devinitdata = {
.xres = 720,
.yres = 348,
.xres_virtual = 720,
@@ -120,7 +120,7 @@ static struct fb_var_screeninfo __initdata hga_default_var = {
.width = -1,
};
-static struct fb_fix_screeninfo __initdata hga_fix = {
+static struct fb_fix_screeninfo hga_fix __devinitdata = {
.id = "HGA",
.type = FB_TYPE_PACKED_PIXELS, /* (not sure) */
.visual = FB_VISUAL_MONO10,
@@ -276,7 +276,7 @@ static void hga_blank(int blank_mode)
spin_unlock_irqrestore(&hga_reg_lock, flags);
}
-static int __init hga_card_detect(void)
+static int __devinit hga_card_detect(void)
{
int count = 0;
void __iomem *p, *q;
@@ -596,7 +596,7 @@ static int __devinit hgafb_probe(struct platform_device *pdev)
return 0;
}
-static int hgafb_remove(struct platform_device *pdev)
+static int __devexit hgafb_remove(struct platform_device *pdev)
{
struct fb_info *info = platform_get_drvdata(pdev);
@@ -621,7 +621,7 @@ static int hgafb_remove(struct platform_device *pdev)
static struct platform_driver hgafb_driver = {
.probe = hgafb_probe,
- .remove = hgafb_remove,
+ .remove = __devexit_p(hgafb_remove),
.driver = {
.name = "hgafb",
},
diff --git a/drivers/video/hitfb.c b/drivers/video/hitfb.c
index 393f3f3d3df..cfb8d645101 100644
--- a/drivers/video/hitfb.c
+++ b/drivers/video/hitfb.c
@@ -30,14 +30,14 @@
#define WIDTH 640
-static struct fb_var_screeninfo hitfb_var __initdata = {
+static struct fb_var_screeninfo hitfb_var __devinitdata = {
.activate = FB_ACTIVATE_NOW,
.height = -1,
.width = -1,
.vmode = FB_VMODE_NONINTERLACED,
};
-static struct fb_fix_screeninfo hitfb_fix __initdata = {
+static struct fb_fix_screeninfo hitfb_fix __devinitdata = {
.id = "Hitachi HD64461",
.type = FB_TYPE_PACKED_PIXELS,
.accel = FB_ACCEL_NONE,
@@ -417,7 +417,7 @@ err_fb:
return ret;
}
-static int __exit hitfb_remove(struct platform_device *dev)
+static int __devexit hitfb_remove(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);
@@ -462,7 +462,7 @@ static const struct dev_pm_ops hitfb_dev_pm_ops = {
static struct platform_driver hitfb_driver = {
.probe = hitfb_probe,
- .remove = __exit_p(hitfb_remove),
+ .remove = __devexit_p(hitfb_remove),
.driver = {
.name = "hitfb",
.owner = THIS_MODULE,
diff --git a/drivers/video/intelfb/intelfb.h b/drivers/video/intelfb/intelfb.h
index 40984551c92..6b51175629c 100644
--- a/drivers/video/intelfb/intelfb.h
+++ b/drivers/video/intelfb/intelfb.h
@@ -371,10 +371,6 @@ struct intelfb_info {
((dinfo)->chipset == INTEL_965G) || \
((dinfo)->chipset == INTEL_965GM))
-#ifndef FBIO_WAITFORVSYNC
-#define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32)
-#endif
-
/*** function prototypes ***/
extern int intelfb_var_to_depth(const struct fb_var_screeninfo *var);
diff --git a/drivers/video/leo.c b/drivers/video/leo.c
index 1db55f12849..3d7895316ea 100644
--- a/drivers/video/leo.c
+++ b/drivers/video/leo.c
@@ -663,8 +663,11 @@ static const struct of_device_id leo_match[] = {
MODULE_DEVICE_TABLE(of, leo_match);
static struct of_platform_driver leo_driver = {
- .name = "leo",
- .match_table = leo_match,
+ .driver = {
+ .name = "leo",
+ .owner = THIS_MODULE,
+ .of_match_table = leo_match,
+ },
.probe = leo_probe,
.remove = __devexit_p(leo_remove),
};
diff --git a/drivers/video/mb862xx/mb862xxfb.c b/drivers/video/mb862xx/mb862xxfb.c
index 8280a58a0e5..0540de4f5cb 100644
--- a/drivers/video/mb862xx/mb862xxfb.c
+++ b/drivers/video/mb862xx/mb862xxfb.c
@@ -718,9 +718,11 @@ static struct of_device_id __devinitdata of_platform_mb862xx_tbl[] = {
};
static struct of_platform_driver of_platform_mb862xxfb_driver = {
- .owner = THIS_MODULE,
- .name = DRV_NAME,
- .match_table = of_platform_mb862xx_tbl,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_platform_mb862xx_tbl,
+ },
.probe = of_platform_mb862xx_probe,
.remove = __devexit_p(of_platform_mb862xx_remove),
};
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index 772ba3f45e6..7cfc170bce1 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -387,7 +387,8 @@ static void sdc_disable_channel(struct mx3fb_info *mx3_fbi)
spin_unlock_irqrestore(&mx3fb->lock, flags);
- mx3_fbi->txd->chan->device->device_terminate_all(mx3_fbi->txd->chan);
+ mx3_fbi->txd->chan->device->device_control(mx3_fbi->txd->chan,
+ DMA_TERMINATE_ALL, 0);
mx3_fbi->txd = NULL;
mx3_fbi->cookie = -EINVAL;
}
diff --git a/drivers/video/nuc900fb.c b/drivers/video/nuc900fb.c
index 6bf0d460a73..d4cde79ea15 100644
--- a/drivers/video/nuc900fb.c
+++ b/drivers/video/nuc900fb.c
@@ -667,7 +667,7 @@ release_irq:
release_regs:
iounmap(fbi->io);
release_mem_region:
- release_mem_region((unsigned long)fbi->mem, size);
+ release_mem_region(res->start, size);
free_fb:
framebuffer_release(fbinfo);
return ret;
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index 61f8b8f919b..46dda7d8aae 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -285,7 +285,7 @@ static void offb_destroy(struct fb_info *info)
{
if (info->screen_base)
iounmap(info->screen_base);
- release_mem_region(info->aperture_base, info->aperture_size);
+ release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size);
framebuffer_release(info);
}
@@ -491,8 +491,11 @@ static void __init offb_init_fb(const char *name, const char *full_name,
var->vmode = FB_VMODE_NONINTERLACED;
/* set offb aperture size for generic probing */
- info->aperture_base = address;
- info->aperture_size = fix->smem_len;
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures)
+ goto out_aper;
+ info->apertures->ranges[0].base = address;
+ info->apertures->ranges[0].size = fix->smem_len;
info->fbops = &offb_ops;
info->screen_base = ioremap(address, fix->smem_len);
@@ -501,17 +504,20 @@ static void __init offb_init_fb(const char *name, const char *full_name,
fb_alloc_cmap(&info->cmap, 256, 0);
- if (register_framebuffer(info) < 0) {
- iounmap(par->cmap_adr);
- par->cmap_adr = NULL;
- iounmap(info->screen_base);
- framebuffer_release(info);
- release_mem_region(res_start, res_size);
- return;
- }
+ if (register_framebuffer(info) < 0)
+ goto out_err;
printk(KERN_INFO "fb%d: Open Firmware frame buffer device on %s\n",
info->node, full_name);
+ return;
+
+out_err:
+ iounmap(info->screen_base);
+out_aper:
+ iounmap(par->cmap_adr);
+ par->cmap_adr = NULL;
+ framebuffer_release(info);
+ release_mem_region(res_start, res_size);
}
diff --git a/drivers/video/omap2/displays/Kconfig b/drivers/video/omap2/displays/Kconfig
index dfb57ee5086..881c9f77c75 100644
--- a/drivers/video/omap2/displays/Kconfig
+++ b/drivers/video/omap2/displays/Kconfig
@@ -10,6 +10,7 @@ config PANEL_GENERIC
config PANEL_SHARP_LS037V7DW01
tristate "Sharp LS037V7DW01 LCD Panel"
depends on OMAP2_DSS
+ select BACKLIGHT_CLASS_DEVICE
help
LCD Panel used in TI's SDP3430 and EVM boards
@@ -33,8 +34,14 @@ config PANEL_TOPPOLY_TDO35S
config PANEL_TPO_TD043MTEA1
tristate "TPO TD043MTEA1 LCD Panel"
- depends on OMAP2_DSS && I2C
+ depends on OMAP2_DSS && SPI
help
LCD Panel used in OMAP3 Pandora
+config PANEL_ACX565AKM
+ tristate "ACX565AKM Panel"
+ depends on OMAP2_DSS_SDI
+ select BACKLIGHT_CLASS_DEVICE
+ help
+ This is the LCD panel used on Nokia N900
endmenu
diff --git a/drivers/video/omap2/displays/Makefile b/drivers/video/omap2/displays/Makefile
index e2bb32168de..aa386095d7c 100644
--- a/drivers/video/omap2/displays/Makefile
+++ b/drivers/video/omap2/displays/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_PANEL_SHARP_LQ043T1DG01) += panel-sharp-lq043t1dg01.o
obj-$(CONFIG_PANEL_TAAL) += panel-taal.o
obj-$(CONFIG_PANEL_TOPPOLY_TDO35S) += panel-toppoly-tdo35s.o
obj-$(CONFIG_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
+obj-$(CONFIG_PANEL_ACX565AKM) += panel-acx565akm.o
diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c
new file mode 100644
index 00000000000..1f8eb70e293
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-acx565akm.c
@@ -0,0 +1,819 @@
+/*
+ * Support for ACX565AKM LCD Panel used on Nokia N900
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Original Driver Author: Imre Deak <imre.deak@nokia.com>
+ * Based on panel-generic.c by Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Adapted to new DSS2 framework: Roger Quadros <roger.quadros@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
+
+#include <plat/display.h>
+
+#define MIPID_CMD_READ_DISP_ID 0x04
+#define MIPID_CMD_READ_RED 0x06
+#define MIPID_CMD_READ_GREEN 0x07
+#define MIPID_CMD_READ_BLUE 0x08
+#define MIPID_CMD_READ_DISP_STATUS 0x09
+#define MIPID_CMD_RDDSDR 0x0F
+#define MIPID_CMD_SLEEP_IN 0x10
+#define MIPID_CMD_SLEEP_OUT 0x11
+#define MIPID_CMD_DISP_OFF 0x28
+#define MIPID_CMD_DISP_ON 0x29
+#define MIPID_CMD_WRITE_DISP_BRIGHTNESS 0x51
+#define MIPID_CMD_READ_DISP_BRIGHTNESS 0x52
+#define MIPID_CMD_WRITE_CTRL_DISP 0x53
+
+#define CTRL_DISP_BRIGHTNESS_CTRL_ON (1 << 5)
+#define CTRL_DISP_AMBIENT_LIGHT_CTRL_ON (1 << 4)
+#define CTRL_DISP_BACKLIGHT_ON (1 << 2)
+#define CTRL_DISP_AUTO_BRIGHTNESS_ON (1 << 1)
+
+#define MIPID_CMD_READ_CTRL_DISP 0x54
+#define MIPID_CMD_WRITE_CABC 0x55
+#define MIPID_CMD_READ_CABC 0x56
+
+#define MIPID_VER_LPH8923 3
+#define MIPID_VER_LS041Y3 4
+#define MIPID_VER_L4F00311 8
+#define MIPID_VER_ACX565AKM 9
+
+struct acx565akm_device {
+ char *name;
+ int enabled;
+ int model;
+ int revision;
+ u8 display_id[3];
+ unsigned has_bc:1;
+ unsigned has_cabc:1;
+ unsigned cabc_mode;
+ unsigned long hw_guard_end; /* next value of jiffies
+ when we can issue the
+ next sleep in/out command */
+ unsigned long hw_guard_wait; /* max guard time in jiffies */
+
+ struct spi_device *spi;
+ struct mutex mutex;
+
+ struct omap_dss_device *dssdev;
+ struct backlight_device *bl_dev;
+};
+
+static struct acx565akm_device acx_dev;
+static int acx565akm_bl_update_status(struct backlight_device *dev);
+
+/*--------------------MIPID interface-----------------------------*/
+
+static void acx565akm_transfer(struct acx565akm_device *md, int cmd,
+ const u8 *wbuf, int wlen, u8 *rbuf, int rlen)
+{
+ struct spi_message m;
+ struct spi_transfer *x, xfer[5];
+ int r;
+
+ BUG_ON(md->spi == NULL);
+
+ spi_message_init(&m);
+
+ memset(xfer, 0, sizeof(xfer));
+ x = &xfer[0];
+
+ cmd &= 0xff;
+ x->tx_buf = &cmd;
+ x->bits_per_word = 9;
+ x->len = 2;
+
+ if (rlen > 1 && wlen == 0) {
+ /*
+ * Between the command and the response data there is a
+ * dummy clock cycle. Add an extra bit after the command
+ * word to account for this.
+ */
+ x->bits_per_word = 10;
+ cmd <<= 1;
+ }
+ spi_message_add_tail(x, &m);
+
+ if (wlen) {
+ x++;
+ x->tx_buf = wbuf;
+ x->len = wlen;
+ x->bits_per_word = 9;
+ spi_message_add_tail(x, &m);
+ }
+
+ if (rlen) {
+ x++;
+ x->rx_buf = rbuf;
+ x->len = rlen;
+ spi_message_add_tail(x, &m);
+ }
+
+ r = spi_sync(md->spi, &m);
+ if (r < 0)
+ dev_dbg(&md->spi->dev, "spi_sync %d\n", r);
+}
+
+static inline void acx565akm_cmd(struct acx565akm_device *md, int cmd)
+{
+ acx565akm_transfer(md, cmd, NULL, 0, NULL, 0);
+}
+
+static inline void acx565akm_write(struct acx565akm_device *md,
+ int reg, const u8 *buf, int len)
+{
+ acx565akm_transfer(md, reg, buf, len, NULL, 0);
+}
+
+static inline void acx565akm_read(struct acx565akm_device *md,
+ int reg, u8 *buf, int len)
+{
+ acx565akm_transfer(md, reg, NULL, 0, buf, len);
+}
+
+static void hw_guard_start(struct acx565akm_device *md, int guard_msec)
+{
+ md->hw_guard_wait = msecs_to_jiffies(guard_msec);
+ md->hw_guard_end = jiffies + md->hw_guard_wait;
+}
+
+static void hw_guard_wait(struct acx565akm_device *md)
+{
+ unsigned long wait = md->hw_guard_end - jiffies;
+
+ if ((long)wait > 0 && wait <= md->hw_guard_wait) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(wait);
+ }
+}
+
+/*----------------------MIPID wrappers----------------------------*/
+
+static void set_sleep_mode(struct acx565akm_device *md, int on)
+{
+ int cmd;
+
+ if (on)
+ cmd = MIPID_CMD_SLEEP_IN;
+ else
+ cmd = MIPID_CMD_SLEEP_OUT;
+ /*
+ * We have to keep 120msec between sleep in/out commands.
+ * (8.2.15, 8.2.16).
+ */
+ hw_guard_wait(md);
+ acx565akm_cmd(md, cmd);
+ hw_guard_start(md, 120);
+}
+
+static void set_display_state(struct acx565akm_device *md, int enabled)
+{
+ int cmd = enabled ? MIPID_CMD_DISP_ON : MIPID_CMD_DISP_OFF;
+
+ acx565akm_cmd(md, cmd);
+}
+
+static int panel_enabled(struct acx565akm_device *md)
+{
+ u32 disp_status;
+ int enabled;
+
+ acx565akm_read(md, MIPID_CMD_READ_DISP_STATUS, (u8 *)&disp_status, 4);
+ disp_status = __be32_to_cpu(disp_status);
+ enabled = (disp_status & (1 << 17)) && (disp_status & (1 << 10));
+ dev_dbg(&md->spi->dev,
+ "LCD panel %senabled by bootloader (status 0x%04x)\n",
+ enabled ? "" : "not ", disp_status);
+ return enabled;
+}
+
+static int panel_detect(struct acx565akm_device *md)
+{
+ acx565akm_read(md, MIPID_CMD_READ_DISP_ID, md->display_id, 3);
+ dev_dbg(&md->spi->dev, "MIPI display ID: %02x%02x%02x\n",
+ md->display_id[0], md->display_id[1], md->display_id[2]);
+
+ switch (md->display_id[0]) {
+ case 0x10:
+ md->model = MIPID_VER_ACX565AKM;
+ md->name = "acx565akm";
+ md->has_bc = 1;
+ md->has_cabc = 1;
+ break;
+ case 0x29:
+ md->model = MIPID_VER_L4F00311;
+ md->name = "l4f00311";
+ break;
+ case 0x45:
+ md->model = MIPID_VER_LPH8923;
+ md->name = "lph8923";
+ break;
+ case 0x83:
+ md->model = MIPID_VER_LS041Y3;
+ md->name = "ls041y3";
+ break;
+ default:
+ md->name = "unknown";
+ dev_err(&md->spi->dev, "invalid display ID\n");
+ return -ENODEV;
+ }
+
+ md->revision = md->display_id[1];
+
+ dev_info(&md->spi->dev, "omapfb: %s rev %02x LCD detected\n",
+ md->name, md->revision);
+
+ return 0;
+}
+
+/*----------------------Backlight Control-------------------------*/
+
+static void enable_backlight_ctrl(struct acx565akm_device *md, int enable)
+{
+ u16 ctrl;
+
+ acx565akm_read(md, MIPID_CMD_READ_CTRL_DISP, (u8 *)&ctrl, 1);
+ if (enable) {
+ ctrl |= CTRL_DISP_BRIGHTNESS_CTRL_ON |
+ CTRL_DISP_BACKLIGHT_ON;
+ } else {
+ ctrl &= ~(CTRL_DISP_BRIGHTNESS_CTRL_ON |
+ CTRL_DISP_BACKLIGHT_ON);
+ }
+
+ ctrl |= 1 << 8;
+ acx565akm_write(md, MIPID_CMD_WRITE_CTRL_DISP, (u8 *)&ctrl, 2);
+}
+
+static void set_cabc_mode(struct acx565akm_device *md, unsigned mode)
+{
+ u16 cabc_ctrl;
+
+ md->cabc_mode = mode;
+ if (!md->enabled)
+ return;
+ cabc_ctrl = 0;
+ acx565akm_read(md, MIPID_CMD_READ_CABC, (u8 *)&cabc_ctrl, 1);
+ cabc_ctrl &= ~3;
+ cabc_ctrl |= (1 << 8) | (mode & 3);
+ acx565akm_write(md, MIPID_CMD_WRITE_CABC, (u8 *)&cabc_ctrl, 2);
+}
+
+static unsigned get_cabc_mode(struct acx565akm_device *md)
+{
+ return md->cabc_mode;
+}
+
+static unsigned get_hw_cabc_mode(struct acx565akm_device *md)
+{
+ u8 cabc_ctrl;
+
+ acx565akm_read(md, MIPID_CMD_READ_CABC, &cabc_ctrl, 1);
+ return cabc_ctrl & 3;
+}
+
+static void acx565akm_set_brightness(struct acx565akm_device *md, int level)
+{
+ int bv;
+
+ bv = level | (1 << 8);
+ acx565akm_write(md, MIPID_CMD_WRITE_DISP_BRIGHTNESS, (u8 *)&bv, 2);
+
+ if (level)
+ enable_backlight_ctrl(md, 1);
+ else
+ enable_backlight_ctrl(md, 0);
+}
+
+static int acx565akm_get_actual_brightness(struct acx565akm_device *md)
+{
+ u8 bv;
+
+ acx565akm_read(md, MIPID_CMD_READ_DISP_BRIGHTNESS, &bv, 1);
+
+ return bv;
+}
+
+
+static int acx565akm_bl_update_status(struct backlight_device *dev)
+{
+ struct acx565akm_device *md = dev_get_drvdata(&dev->dev);
+ int r;
+ int level;
+
+ dev_dbg(&md->spi->dev, "%s\n", __func__);
+
+ mutex_lock(&md->mutex);
+
+ if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+ dev->props.power == FB_BLANK_UNBLANK)
+ level = dev->props.brightness;
+ else
+ level = 0;
+
+ r = 0;
+ if (md->has_bc)
+ acx565akm_set_brightness(md, level);
+ else if (md->dssdev->set_backlight)
+ r = md->dssdev->set_backlight(md->dssdev, level);
+ else
+ r = -ENODEV;
+
+ mutex_unlock(&md->mutex);
+
+ return r;
+}
+
+static int acx565akm_bl_get_intensity(struct backlight_device *dev)
+{
+ struct acx565akm_device *md = dev_get_drvdata(&dev->dev);
+
+ dev_dbg(&dev->dev, "%s\n", __func__);
+
+ if (!md->has_bc && md->dssdev->set_backlight == NULL)
+ return -ENODEV;
+
+ if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+ dev->props.power == FB_BLANK_UNBLANK) {
+ if (md->has_bc)
+ return acx565akm_get_actual_brightness(md);
+ else
+ return dev->props.brightness;
+ }
+
+ return 0;
+}
+
+static const struct backlight_ops acx565akm_bl_ops = {
+ .get_brightness = acx565akm_bl_get_intensity,
+ .update_status = acx565akm_bl_update_status,
+};
+
+/*--------------------Auto Brightness control via Sysfs---------------------*/
+
+static const char *cabc_modes[] = {
+ "off", /* always used when CABC is not supported */
+ "ui",
+ "still-image",
+ "moving-image",
+};
+
+static ssize_t show_cabc_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acx565akm_device *md = dev_get_drvdata(dev);
+ const char *mode_str;
+ int mode;
+ int len;
+
+ if (!md->has_cabc)
+ mode = 0;
+ else
+ mode = get_cabc_mode(md);
+ mode_str = "unknown";
+ if (mode >= 0 && mode < ARRAY_SIZE(cabc_modes))
+ mode_str = cabc_modes[mode];
+ len = snprintf(buf, PAGE_SIZE, "%s\n", mode_str);
+
+ return len < PAGE_SIZE - 1 ? len : PAGE_SIZE - 1;
+}
+
+static ssize_t store_cabc_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acx565akm_device *md = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cabc_modes); i++) {
+ const char *mode_str = cabc_modes[i];
+ int cmp_len = strlen(mode_str);
+
+ if (count > 0 && buf[count - 1] == '\n')
+ count--;
+ if (count != cmp_len)
+ continue;
+
+ if (strncmp(buf, mode_str, cmp_len) == 0)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(cabc_modes))
+ return -EINVAL;
+
+ if (!md->has_cabc && i != 0)
+ return -EINVAL;
+
+ mutex_lock(&md->mutex);
+ set_cabc_mode(md, i);
+ mutex_unlock(&md->mutex);
+
+ return count;
+}
+
+static ssize_t show_cabc_available_modes(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acx565akm_device *md = dev_get_drvdata(dev);
+ int len;
+ int i;
+
+ if (!md->has_cabc)
+ return snprintf(buf, PAGE_SIZE, "%s\n", cabc_modes[0]);
+
+ for (i = 0, len = 0;
+ len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++)
+ len += snprintf(&buf[len], PAGE_SIZE - len, "%s%s%s",
+ i ? " " : "", cabc_modes[i],
+ i == ARRAY_SIZE(cabc_modes) - 1 ? "\n" : "");
+
+ return len < PAGE_SIZE ? len : PAGE_SIZE - 1;
+}
+
+static DEVICE_ATTR(cabc_mode, S_IRUGO | S_IWUSR,
+ show_cabc_mode, store_cabc_mode);
+static DEVICE_ATTR(cabc_available_modes, S_IRUGO,
+ show_cabc_available_modes, NULL);
+
+static struct attribute *bldev_attrs[] = {
+ &dev_attr_cabc_mode.attr,
+ &dev_attr_cabc_available_modes.attr,
+ NULL,
+};
+
+static struct attribute_group bldev_attr_group = {
+ .attrs = bldev_attrs,
+};
+
+
+/*---------------------------ACX Panel----------------------------*/
+
+static int acx_get_recommended_bpp(struct omap_dss_device *dssdev)
+{
+ return 16;
+}
+
+static struct omap_video_timings acx_panel_timings = {
+ .x_res = 800,
+ .y_res = 480,
+ .pixel_clock = 24000,
+ .hfp = 28,
+ .hsw = 4,
+ .hbp = 24,
+ .vfp = 3,
+ .vsw = 3,
+ .vbp = 4,
+};
+
+static int acx_panel_probe(struct omap_dss_device *dssdev)
+{
+ int r;
+ struct acx565akm_device *md = &acx_dev;
+ struct backlight_device *bldev;
+ int max_brightness, brightness;
+ struct backlight_properties props;
+
+ dev_dbg(&dssdev->dev, "%s\n", __func__);
+ dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+ OMAP_DSS_LCD_IHS;
+ /* FIXME AC bias ? */
+ dssdev->panel.timings = acx_panel_timings;
+
+ if (dssdev->platform_enable)
+ dssdev->platform_enable(dssdev);
+ /*
+ * After reset we have to wait 5 msec before the first
+ * command can be sent.
+ */
+ msleep(5);
+
+ md->enabled = panel_enabled(md);
+
+ r = panel_detect(md);
+ if (r) {
+ dev_err(&dssdev->dev, "%s panel detect error\n", __func__);
+ if (!md->enabled && dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+ return r;
+ }
+
+ mutex_lock(&acx_dev.mutex);
+ acx_dev.dssdev = dssdev;
+ mutex_unlock(&acx_dev.mutex);
+
+ if (!md->enabled) {
+ if (dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+ }
+
+ /*------- Backlight control --------*/
+
+ props.fb_blank = FB_BLANK_UNBLANK;
+ props.power = FB_BLANK_UNBLANK;
+
+ bldev = backlight_device_register("acx565akm", &md->spi->dev,
+ md, &acx565akm_bl_ops, &props);
+ md->bl_dev = bldev;
+ if (md->has_cabc) {
+ r = sysfs_create_group(&bldev->dev.kobj, &bldev_attr_group);
+ if (r) {
+ dev_err(&bldev->dev,
+ "%s failed to create sysfs files\n", __func__);
+ backlight_device_unregister(bldev);
+ return r;
+ }
+ md->cabc_mode = get_hw_cabc_mode(md);
+ }
+
+ if (md->has_bc)
+ max_brightness = 255;
+ else
+ max_brightness = dssdev->max_backlight_level;
+
+ if (md->has_bc)
+ brightness = acx565akm_get_actual_brightness(md);
+ else if (dssdev->get_backlight)
+ brightness = dssdev->get_backlight(dssdev);
+ else
+ brightness = 0;
+
+ bldev->props.max_brightness = max_brightness;
+ bldev->props.brightness = brightness;
+
+ acx565akm_bl_update_status(bldev);
+ return 0;
+}
+
+static void acx_panel_remove(struct omap_dss_device *dssdev)
+{
+ struct acx565akm_device *md = &acx_dev;
+
+ dev_dbg(&dssdev->dev, "%s\n", __func__);
+ sysfs_remove_group(&md->bl_dev->dev.kobj, &bldev_attr_group);
+ backlight_device_unregister(md->bl_dev);
+ mutex_lock(&acx_dev.mutex);
+ acx_dev.dssdev = NULL;
+ mutex_unlock(&acx_dev.mutex);
+}
+
+static int acx_panel_power_on(struct omap_dss_device *dssdev)
+{
+ struct acx565akm_device *md = &acx_dev;
+ int r;
+
+ dev_dbg(&dssdev->dev, "%s\n", __func__);
+
+ mutex_lock(&md->mutex);
+
+ r = omapdss_sdi_display_enable(dssdev);
+ if (r) {
+ pr_err("%s sdi enable failed\n", __func__);
+ return r;
+ }
+
+ /*FIXME tweak me */
+ msleep(50);
+
+ if (dssdev->platform_enable) {
+ r = dssdev->platform_enable(dssdev);
+ if (r)
+ goto fail;
+ }
+
+ if (md->enabled) {
+ dev_dbg(&md->spi->dev, "panel already enabled\n");
+ mutex_unlock(&md->mutex);
+ return 0;
+ }
+
+ /*
+ * We have to meet all the following delay requirements:
+ * 1. tRW: reset pulse width 10usec (7.12.1)
+ * 2. tRT: reset cancel time 5msec (7.12.1)
+ * 3. Providing PCLK,HS,VS signals for 2 frames = ~50msec worst
+ * case (7.6.2)
+ * 4. 120msec before the sleep out command (7.12.1)
+ */
+ msleep(120);
+
+ set_sleep_mode(md, 0);
+ md->enabled = 1;
+
+ /* 5msec between sleep out and the next command. (8.2.16) */
+ msleep(5);
+ set_display_state(md, 1);
+ set_cabc_mode(md, md->cabc_mode);
+
+ mutex_unlock(&md->mutex);
+
+ return acx565akm_bl_update_status(md->bl_dev);
+fail:
+ omapdss_sdi_display_disable(dssdev);
+ return r;
+}
+
+static void acx_panel_power_off(struct omap_dss_device *dssdev)
+{
+ struct acx565akm_device *md = &acx_dev;
+
+ dev_dbg(&dssdev->dev, "%s\n", __func__);
+
+ mutex_lock(&md->mutex);
+
+ if (!md->enabled) {
+ mutex_unlock(&md->mutex);
+ return;
+ }
+ set_display_state(md, 0);
+ set_sleep_mode(md, 1);
+ md->enabled = 0;
+ /*
+ * We have to provide PCLK,HS,VS signals for 2 frames (worst case
+ * ~50msec) after sending the sleep in command and asserting the
+ * reset signal. We probably could assert the reset w/o the delay
+ * but we still delay to avoid possible artifacts. (7.6.1)
+ */
+ msleep(50);
+
+ if (dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+
+ /* FIXME need to tweak this delay */
+ msleep(100);
+
+ omapdss_sdi_display_disable(dssdev);
+
+ mutex_unlock(&md->mutex);
+}
+
+static int acx_panel_enable(struct omap_dss_device *dssdev)
+{
+ int r;
+
+ dev_dbg(&dssdev->dev, "%s\n", __func__);
+ r = acx_panel_power_on(dssdev);
+
+ if (r)
+ return r;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+ return 0;
+}
+
+static void acx_panel_disable(struct omap_dss_device *dssdev)
+{
+ dev_dbg(&dssdev->dev, "%s\n", __func__);
+ acx_panel_power_off(dssdev);
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+
+static int acx_panel_suspend(struct omap_dss_device *dssdev)
+{
+ dev_dbg(&dssdev->dev, "%s\n", __func__);
+ acx_panel_power_off(dssdev);
+ dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+ return 0;
+}
+
+static int acx_panel_resume(struct omap_dss_device *dssdev)
+{
+ int r;
+
+ dev_dbg(&dssdev->dev, "%s\n", __func__);
+ r = acx_panel_power_on(dssdev);
+ if (r)
+ return r;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+ return 0;
+}
+
+static void acx_panel_set_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ int r;
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ omapdss_sdi_display_disable(dssdev);
+
+ dssdev->panel.timings = *timings;
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+ r = omapdss_sdi_display_enable(dssdev);
+ if (r)
+ dev_err(&dssdev->dev, "%s enable failed\n", __func__);
+ }
+}
+
+static void acx_panel_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ *timings = dssdev->panel.timings;
+}
+
+static int acx_panel_check_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ return 0;
+}
+
+
+static struct omap_dss_driver acx_panel_driver = {
+ .probe = acx_panel_probe,
+ .remove = acx_panel_remove,
+
+ .enable = acx_panel_enable,
+ .disable = acx_panel_disable,
+ .suspend = acx_panel_suspend,
+ .resume = acx_panel_resume,
+
+ .set_timings = acx_panel_set_timings,
+ .get_timings = acx_panel_get_timings,
+ .check_timings = acx_panel_check_timings,
+
+ .get_recommended_bpp = acx_get_recommended_bpp,
+
+ .driver = {
+ .name = "panel-acx565akm",
+ .owner = THIS_MODULE,
+ },
+};
+
+/*--------------------SPI probe-------------------------*/
+
+static int acx565akm_spi_probe(struct spi_device *spi)
+{
+ struct acx565akm_device *md = &acx_dev;
+
+ dev_dbg(&spi->dev, "%s\n", __func__);
+
+ spi->mode = SPI_MODE_3;
+ md->spi = spi;
+ mutex_init(&md->mutex);
+ dev_set_drvdata(&spi->dev, md);
+
+ omap_dss_register_driver(&acx_panel_driver);
+
+ return 0;
+}
+
+static int acx565akm_spi_remove(struct spi_device *spi)
+{
+ struct acx565akm_device *md = dev_get_drvdata(&spi->dev);
+
+ dev_dbg(&md->spi->dev, "%s\n", __func__);
+ omap_dss_unregister_driver(&acx_panel_driver);
+
+ return 0;
+}
+
+static struct spi_driver acx565akm_spi_driver = {
+ .driver = {
+ .name = "acx565akm",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = acx565akm_spi_probe,
+ .remove = __devexit_p(acx565akm_spi_remove),
+};
+
+static int __init acx565akm_init(void)
+{
+ return spi_register_driver(&acx565akm_spi_driver);
+}
+
+static void __exit acx565akm_exit(void)
+{
+ spi_unregister_driver(&acx565akm_spi_driver);
+}
+
+module_init(acx565akm_init);
+module_exit(acx565akm_exit);
+
+MODULE_AUTHOR("Nokia Corporation");
+MODULE_DESCRIPTION("acx565akm LCD Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
index 8d51a5e6341..7d9eb2b1f5a 100644
--- a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
@@ -20,10 +20,17 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
#include <linux/err.h>
+#include <linux/slab.h>
#include <plat/display.h>
+struct sharp_data {
+ struct backlight_device *bl;
+};
+
static struct omap_video_timings sharp_ls_timings = {
.x_res = 480,
.y_res = 640,
@@ -39,18 +46,89 @@ static struct omap_video_timings sharp_ls_timings = {
.vbp = 1,
};
+static int sharp_ls_bl_update_status(struct backlight_device *bl)
+{
+ struct omap_dss_device *dssdev = dev_get_drvdata(&bl->dev);
+ int level;
+
+ if (!dssdev->set_backlight)
+ return -EINVAL;
+
+ if (bl->props.fb_blank == FB_BLANK_UNBLANK &&
+ bl->props.power == FB_BLANK_UNBLANK)
+ level = bl->props.brightness;
+ else
+ level = 0;
+
+ return dssdev->set_backlight(dssdev, level);
+}
+
+static int sharp_ls_bl_get_brightness(struct backlight_device *bl)
+{
+ if (bl->props.fb_blank == FB_BLANK_UNBLANK &&
+ bl->props.power == FB_BLANK_UNBLANK)
+ return bl->props.brightness;
+
+ return 0;
+}
+
+static const struct backlight_ops sharp_ls_bl_ops = {
+ .get_brightness = sharp_ls_bl_get_brightness,
+ .update_status = sharp_ls_bl_update_status,
+};
+
+
+
static int sharp_ls_panel_probe(struct omap_dss_device *dssdev)
{
+ struct backlight_properties props;
+ struct backlight_device *bl;
+ struct sharp_data *sd;
+ int r;
+
dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
OMAP_DSS_LCD_IHS;
dssdev->panel.acb = 0x28;
dssdev->panel.timings = sharp_ls_timings;
+ sd = kzalloc(sizeof(*sd), GFP_KERNEL);
+ if (!sd)
+ return -ENOMEM;
+
+ dev_set_drvdata(&dssdev->dev, sd);
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = dssdev->max_backlight_level;
+
+ bl = backlight_device_register("sharp-ls", &dssdev->dev, dssdev,
+ &sharp_ls_bl_ops, &props);
+ if (IS_ERR(bl)) {
+ r = PTR_ERR(bl);
+ kfree(sd);
+ return r;
+ }
+ sd->bl = bl;
+
+ bl->props.fb_blank = FB_BLANK_UNBLANK;
+ bl->props.power = FB_BLANK_UNBLANK;
+ bl->props.brightness = dssdev->max_backlight_level;
+ r = sharp_ls_bl_update_status(bl);
+ if (r < 0)
+ dev_err(&dssdev->dev, "failed to set lcd brightness\n");
+
return 0;
}
static void sharp_ls_panel_remove(struct omap_dss_device *dssdev)
{
+ struct sharp_data *sd = dev_get_drvdata(&dssdev->dev);
+ struct backlight_device *bl = sd->bl;
+
+ bl->props.power = FB_BLANK_POWERDOWN;
+ sharp_ls_bl_update_status(bl);
+ backlight_device_unregister(bl);
+
+ kfree(sd);
}
static int sharp_ls_power_on(struct omap_dss_device *dssdev)
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index 4f3988a4108..aaf5d308a04 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -31,6 +31,7 @@
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
+#include <linux/mutex.h>
#include <plat/display.h>
@@ -67,6 +68,8 @@
static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable);
struct taal_data {
+ struct mutex lock;
+
struct backlight_device *bldev;
unsigned long hw_guard_end; /* next value of jiffies when we can
@@ -510,6 +513,8 @@ static int taal_probe(struct omap_dss_device *dssdev)
}
td->dssdev = dssdev;
+ mutex_init(&td->lock);
+
td->esd_wq = create_singlethread_workqueue("taal_esd");
if (td->esd_wq == NULL) {
dev_err(&dssdev->dev, "can't create ESD workqueue\n");
@@ -697,10 +702,9 @@ static int taal_power_on(struct omap_dss_device *dssdev)
return 0;
err:
- dsi_bus_unlock();
-
omapdss_dsi_display_disable(dssdev);
err0:
+ dsi_bus_unlock();
if (dssdev->platform_disable)
dssdev->platform_disable(dssdev);
@@ -733,54 +737,96 @@ static void taal_power_off(struct omap_dss_device *dssdev)
static int taal_enable(struct omap_dss_device *dssdev)
{
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
int r;
+
dev_dbg(&dssdev->dev, "enable\n");
- if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)
- return -EINVAL;
+ mutex_lock(&td->lock);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
+ r = -EINVAL;
+ goto err;
+ }
r = taal_power_on(dssdev);
if (r)
- return r;
+ goto err;
dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+ mutex_unlock(&td->lock);
+
+ return 0;
+err:
+ dev_dbg(&dssdev->dev, "enable failed\n");
+ mutex_unlock(&td->lock);
return r;
}
static void taal_disable(struct omap_dss_device *dssdev)
{
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+
dev_dbg(&dssdev->dev, "disable\n");
+ mutex_lock(&td->lock);
+
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
taal_power_off(dssdev);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+
+ mutex_unlock(&td->lock);
}
static int taal_suspend(struct omap_dss_device *dssdev)
{
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ int r;
+
dev_dbg(&dssdev->dev, "suspend\n");
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
- return -EINVAL;
+ mutex_lock(&td->lock);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
+ r = -EINVAL;
+ goto err;
+ }
taal_power_off(dssdev);
dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+ mutex_unlock(&td->lock);
+
return 0;
+err:
+ mutex_unlock(&td->lock);
+ return r;
}
static int taal_resume(struct omap_dss_device *dssdev)
{
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
int r;
+
dev_dbg(&dssdev->dev, "resume\n");
- if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED)
- return -EINVAL;
+ mutex_lock(&td->lock);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
+ r = -EINVAL;
+ goto err;
+ }
r = taal_power_on(dssdev);
dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ mutex_unlock(&td->lock);
+
+ return r;
+err:
+ mutex_unlock(&td->lock);
return r;
}
@@ -799,6 +845,7 @@ static int taal_update(struct omap_dss_device *dssdev,
dev_dbg(&dssdev->dev, "update %d, %d, %d x %d\n", x, y, w, h);
+ mutex_lock(&td->lock);
dsi_bus_lock();
if (!td->enabled) {
@@ -820,18 +867,24 @@ static int taal_update(struct omap_dss_device *dssdev,
goto err;
/* note: no bus_unlock here. unlock is in framedone_cb */
+ mutex_unlock(&td->lock);
return 0;
err:
dsi_bus_unlock();
+ mutex_unlock(&td->lock);
return r;
}
static int taal_sync(struct omap_dss_device *dssdev)
{
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+
dev_dbg(&dssdev->dev, "sync\n");
+ mutex_lock(&td->lock);
dsi_bus_lock();
dsi_bus_unlock();
+ mutex_unlock(&td->lock);
dev_dbg(&dssdev->dev, "sync done\n");
@@ -861,13 +914,16 @@ static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable)
static int taal_enable_te(struct omap_dss_device *dssdev, bool enable)
{
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
int r;
+ mutex_lock(&td->lock);
dsi_bus_lock();
r = _taal_enable_te(dssdev, enable);
dsi_bus_unlock();
+ mutex_unlock(&td->lock);
return r;
}
@@ -875,7 +931,13 @@ static int taal_enable_te(struct omap_dss_device *dssdev, bool enable)
static int taal_get_te(struct omap_dss_device *dssdev)
{
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
- return td->te_enabled;
+ int r;
+
+ mutex_lock(&td->lock);
+ r = td->te_enabled;
+ mutex_unlock(&td->lock);
+
+ return r;
}
static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
@@ -885,6 +947,7 @@ static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
dev_dbg(&dssdev->dev, "rotate %d\n", rotate);
+ mutex_lock(&td->lock);
dsi_bus_lock();
if (td->enabled) {
@@ -896,16 +959,24 @@ static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
td->rotate = rotate;
dsi_bus_unlock();
+ mutex_unlock(&td->lock);
return 0;
err:
dsi_bus_unlock();
+ mutex_unlock(&td->lock);
return r;
}
static u8 taal_get_rotate(struct omap_dss_device *dssdev)
{
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
- return td->rotate;
+ int r;
+
+ mutex_lock(&td->lock);
+ r = td->rotate;
+ mutex_unlock(&td->lock);
+
+ return r;
}
static int taal_mirror(struct omap_dss_device *dssdev, bool enable)
@@ -915,6 +986,7 @@ static int taal_mirror(struct omap_dss_device *dssdev, bool enable)
dev_dbg(&dssdev->dev, "mirror %d\n", enable);
+ mutex_lock(&td->lock);
dsi_bus_lock();
if (td->enabled) {
r = taal_set_addr_mode(td->rotate, enable);
@@ -925,23 +997,33 @@ static int taal_mirror(struct omap_dss_device *dssdev, bool enable)
td->mirror = enable;
dsi_bus_unlock();
+ mutex_unlock(&td->lock);
return 0;
err:
dsi_bus_unlock();
+ mutex_unlock(&td->lock);
return r;
}
static bool taal_get_mirror(struct omap_dss_device *dssdev)
{
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
- return td->mirror;
+ int r;
+
+ mutex_lock(&td->lock);
+ r = td->mirror;
+ mutex_unlock(&td->lock);
+
+ return r;
}
static int taal_run_test(struct omap_dss_device *dssdev, int test_num)
{
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
u8 id1, id2, id3;
int r;
+ mutex_lock(&td->lock);
dsi_bus_lock();
r = taal_dcs_read_1(DCS_GET_ID1, &id1);
@@ -955,9 +1037,11 @@ static int taal_run_test(struct omap_dss_device *dssdev, int test_num)
goto err;
dsi_bus_unlock();
+ mutex_unlock(&td->lock);
return 0;
err:
dsi_bus_unlock();
+ mutex_unlock(&td->lock);
return r;
}
@@ -971,12 +1055,16 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
unsigned buf_used = 0;
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
- if (!td->enabled)
- return -ENODEV;
-
if (size < w * h * 3)
return -ENOMEM;
+ mutex_lock(&td->lock);
+
+ if (!td->enabled) {
+ r = -ENODEV;
+ goto err1;
+ }
+
size = min(w * h * 3,
dssdev->panel.timings.x_res *
dssdev->panel.timings.y_res * 3);
@@ -995,7 +1083,7 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
r = dsi_vc_set_max_rx_packet_size(TCH, plen);
if (r)
- goto err0;
+ goto err2;
while (buf_used < size) {
u8 dcs_cmd = first ? 0x2e : 0x3e;
@@ -1006,7 +1094,7 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
if (r < 0) {
dev_err(&dssdev->dev, "read error\n");
- goto err;
+ goto err3;
}
buf_used += r;
@@ -1020,16 +1108,18 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
dev_err(&dssdev->dev, "signal pending, "
"aborting memory read\n");
r = -ERESTARTSYS;
- goto err;
+ goto err3;
}
}
r = buf_used;
-err:
+err3:
dsi_vc_set_max_rx_packet_size(TCH, 1);
-err0:
+err2:
dsi_bus_unlock();
+err1:
+ mutex_unlock(&td->lock);
return r;
}
@@ -1041,8 +1131,12 @@ static void taal_esd_work(struct work_struct *work)
u8 state1, state2;
int r;
- if (!td->enabled)
+ mutex_lock(&td->lock);
+
+ if (!td->enabled) {
+ mutex_unlock(&td->lock);
return;
+ }
dsi_bus_lock();
@@ -1084,16 +1178,19 @@ static void taal_esd_work(struct work_struct *work)
queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD);
+ mutex_unlock(&td->lock);
return;
err:
dev_err(&dssdev->dev, "performing LCD reset\n");
- taal_disable(dssdev);
- taal_enable(dssdev);
+ taal_power_off(dssdev);
+ taal_power_on(dssdev);
dsi_bus_unlock();
queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD);
+
+ mutex_unlock(&td->lock);
}
static int taal_set_update_mode(struct omap_dss_device *dssdev,
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index 87afb81b2c4..43b64403eaa 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -36,6 +36,12 @@ config OMAP2_DSS_COLLECT_IRQ_STATS
<debugfs>/omapdss/dispc_irq for DISPC interrupts, and
<debugfs>/omapdss/dsi_irq for DSI interrupts.
+config OMAP2_DSS_DPI
+ bool "DPI support"
+ default y
+ help
+ DPI Interface. This is the Parallel Display Interface.
+
config OMAP2_DSS_RFBI
bool "RFBI support"
default n
diff --git a/drivers/video/omap2/dss/Makefile b/drivers/video/omap2/dss/Makefile
index 980c72c2db9..d71b5d9d71b 100644
--- a/drivers/video/omap2/dss/Makefile
+++ b/drivers/video/omap2/dss/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_OMAP2_DSS) += omapdss.o
-omapdss-y := core.o dss.o dispc.o dpi.o display.o manager.o overlay.o
+omapdss-y := core.o dss.o dispc.o display.o manager.o overlay.o
+omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o
omapdss-$(CONFIG_OMAP2_DSS_RFBI) += rfbi.o
omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 7ebe50b335e..b3a498f22d3 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -482,6 +482,14 @@ static void dss_uninitialize_debugfs(void)
if (dss_debugfs_dir)
debugfs_remove_recursive(dss_debugfs_dir);
}
+#else /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
+static inline int dss_initialize_debugfs(void)
+{
+ return 0;
+}
+static inline void dss_uninitialize_debugfs(void)
+{
+}
#endif /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
/* PLATFORM DEVICE */
@@ -499,7 +507,7 @@ static int omap_dss_probe(struct platform_device *pdev)
r = dss_get_clocks();
if (r)
- goto fail0;
+ goto err_clocks;
dss_clk_enable_all_no_ctx();
@@ -515,64 +523,64 @@ static int omap_dss_probe(struct platform_device *pdev)
r = dss_init(skip_init);
if (r) {
DSSERR("Failed to initialize DSS\n");
- goto fail0;
+ goto err_dss;
}
-#ifdef CONFIG_OMAP2_DSS_RFBI
r = rfbi_init();
if (r) {
DSSERR("Failed to initialize rfbi\n");
- goto fail0;
+ goto err_rfbi;
}
-#endif
r = dpi_init(pdev);
if (r) {
DSSERR("Failed to initialize dpi\n");
- goto fail0;
+ goto err_dpi;
}
r = dispc_init();
if (r) {
DSSERR("Failed to initialize dispc\n");
- goto fail0;
+ goto err_dispc;
}
-#ifdef CONFIG_OMAP2_DSS_VENC
+
r = venc_init(pdev);
if (r) {
DSSERR("Failed to initialize venc\n");
- goto fail0;
+ goto err_venc;
}
-#endif
+
if (cpu_is_omap34xx()) {
-#ifdef CONFIG_OMAP2_DSS_SDI
r = sdi_init(skip_init);
if (r) {
DSSERR("Failed to initialize SDI\n");
- goto fail0;
+ goto err_sdi;
}
-#endif
-#ifdef CONFIG_OMAP2_DSS_DSI
+
r = dsi_init(pdev);
if (r) {
DSSERR("Failed to initialize DSI\n");
- goto fail0;
+ goto err_dsi;
}
-#endif
}
-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
r = dss_initialize_debugfs();
if (r)
- goto fail0;
-#endif
+ goto err_debugfs;
for (i = 0; i < pdata->num_devices; ++i) {
struct omap_dss_device *dssdev = pdata->devices[i];
r = omap_dss_register_device(dssdev);
- if (r)
- DSSERR("device reg failed %d\n", i);
+ if (r) {
+ DSSERR("device %d %s register failed %d\n", i,
+ dssdev->name ?: "unnamed", r);
+
+ while (--i >= 0)
+ omap_dss_unregister_device(pdata->devices[i]);
+
+ goto err_register;
+ }
if (def_disp_name && strcmp(def_disp_name, dssdev->name) == 0)
pdata->default_device = dssdev;
@@ -582,8 +590,29 @@ static int omap_dss_probe(struct platform_device *pdev)
return 0;
- /* XXX fail correctly */
-fail0:
+err_register:
+ dss_uninitialize_debugfs();
+err_debugfs:
+ if (cpu_is_omap34xx())
+ dsi_exit();
+err_dsi:
+ if (cpu_is_omap34xx())
+ sdi_exit();
+err_sdi:
+ venc_exit();
+err_venc:
+ dispc_exit();
+err_dispc:
+ dpi_exit();
+err_dpi:
+ rfbi_exit();
+err_rfbi:
+ dss_exit();
+err_dss:
+ dss_clk_disable_all_no_ctx();
+ dss_put_clocks();
+err_clocks:
+
return r;
}
@@ -593,25 +622,15 @@ static int omap_dss_remove(struct platform_device *pdev)
int i;
int c;
-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
dss_uninitialize_debugfs();
-#endif
-#ifdef CONFIG_OMAP2_DSS_VENC
venc_exit();
-#endif
dispc_exit();
dpi_exit();
-#ifdef CONFIG_OMAP2_DSS_RFBI
rfbi_exit();
-#endif
if (cpu_is_omap34xx()) {
-#ifdef CONFIG_OMAP2_DSS_DSI
dsi_exit();
-#endif
-#ifdef CONFIG_OMAP2_DSS_SDI
sdi_exit();
-#endif
}
dss_exit();
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index 6a74ea116d2..ef8c8529dda 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -392,7 +392,9 @@ void dss_init_device(struct platform_device *pdev,
int r;
switch (dssdev->type) {
+#ifdef CONFIG_OMAP2_DSS_DPI
case OMAP_DISPLAY_TYPE_DPI:
+#endif
#ifdef CONFIG_OMAP2_DSS_RFBI
case OMAP_DISPLAY_TYPE_DBI:
#endif
@@ -413,9 +415,11 @@ void dss_init_device(struct platform_device *pdev,
}
switch (dssdev->type) {
+#ifdef CONFIG_OMAP2_DSS_DPI
case OMAP_DISPLAY_TYPE_DPI:
r = dpi_init_display(dssdev);
break;
+#endif
#ifdef CONFIG_OMAP2_DSS_RFBI
case OMAP_DISPLAY_TYPE_DBI:
r = rfbi_init_display(dssdev);
@@ -541,7 +545,10 @@ int dss_resume_all_devices(void)
static int dss_disable_device(struct device *dev, void *data)
{
struct omap_dss_device *dssdev = to_dss_device(dev);
- dssdev->driver->disable(dssdev);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)
+ dssdev->driver->disable(dssdev);
+
return 0;
}
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index 54344184dd7..24b18258654 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -223,7 +223,13 @@ void dss_dump_clocks(struct seq_file *s)
seq_printf(s, "dpll4_ck %lu\n", dpll4_ck_rate);
- seq_printf(s, "dss1_alwon_fclk = %lu / %lu * 2 = %lu\n",
+ if (cpu_is_omap3630())
+ seq_printf(s, "dss1_alwon_fclk = %lu / %lu = %lu\n",
+ dpll4_ck_rate,
+ dpll4_ck_rate / dpll4_m4_ck_rate,
+ dss_clk_get_rate(DSS_CLK_FCK1));
+ else
+ seq_printf(s, "dss1_alwon_fclk = %lu / %lu * 2 = %lu\n",
dpll4_ck_rate,
dpll4_ck_rate / dpll4_m4_ck_rate,
dss_clk_get_rate(DSS_CLK_FCK1));
@@ -293,7 +299,8 @@ int dss_calc_clock_rates(struct dss_clock_info *cinfo)
{
unsigned long prate;
- if (cinfo->fck_div > 16 || cinfo->fck_div == 0)
+ if (cinfo->fck_div > (cpu_is_omap3630() ? 32 : 16) ||
+ cinfo->fck_div == 0)
return -EINVAL;
prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
@@ -329,7 +336,10 @@ int dss_get_clock_div(struct dss_clock_info *cinfo)
if (cpu_is_omap34xx()) {
unsigned long prate;
prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
- cinfo->fck_div = prate / (cinfo->fck / 2);
+ if (cpu_is_omap3630())
+ cinfo->fck_div = prate / (cinfo->fck);
+ else
+ cinfo->fck_div = prate / (cinfo->fck / 2);
} else {
cinfo->fck_div = 0;
}
@@ -402,10 +412,14 @@ retry:
goto found;
} else if (cpu_is_omap34xx()) {
- for (fck_div = 16; fck_div > 0; --fck_div) {
+ for (fck_div = (cpu_is_omap3630() ? 32 : 16);
+ fck_div > 0; --fck_div) {
struct dispc_clock_info cur_dispc;
- fck = prate / fck_div * 2;
+ if (cpu_is_omap3630())
+ fck = prate / fck_div;
+ else
+ fck = prate / fck_div * 2;
if (fck > DISPC_MAX_FCK)
continue;
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index 24326a5fd29..786f433fd57 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -242,11 +242,22 @@ int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
struct dispc_clock_info *dispc_cinfo);
/* SDI */
+#ifdef CONFIG_OMAP2_DSS_SDI
int sdi_init(bool skip_init);
void sdi_exit(void);
int sdi_init_display(struct omap_dss_device *display);
+#else
+static inline int sdi_init(bool skip_init)
+{
+ return 0;
+}
+static inline void sdi_exit(void)
+{
+}
+#endif
/* DSI */
+#ifdef CONFIG_OMAP2_DSS_DSI
int dsi_init(struct platform_device *pdev);
void dsi_exit(void);
@@ -270,11 +281,30 @@ void dsi_pll_uninit(void);
void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
u32 fifo_size, enum omap_burst_size *burst_size,
u32 *fifo_low, u32 *fifo_high);
+#else
+static inline int dsi_init(struct platform_device *pdev)
+{
+ return 0;
+}
+static inline void dsi_exit(void)
+{
+}
+#endif
/* DPI */
+#ifdef CONFIG_OMAP2_DSS_DPI
int dpi_init(struct platform_device *pdev);
void dpi_exit(void);
int dpi_init_display(struct omap_dss_device *dssdev);
+#else
+static inline int dpi_init(struct platform_device *pdev)
+{
+ return 0;
+}
+static inline void dpi_exit(void)
+{
+}
+#endif
/* DISPC */
int dispc_init(void);
@@ -362,12 +392,23 @@ int dispc_get_clock_div(struct dispc_clock_info *cinfo);
/* VENC */
+#ifdef CONFIG_OMAP2_DSS_VENC
int venc_init(struct platform_device *pdev);
void venc_exit(void);
void venc_dump_regs(struct seq_file *s);
int venc_init_display(struct omap_dss_device *display);
+#else
+static inline int venc_init(struct platform_device *pdev)
+{
+ return 0;
+}
+static inline void venc_exit(void)
+{
+}
+#endif
/* RFBI */
+#ifdef CONFIG_OMAP2_DSS_RFBI
int rfbi_init(void);
void rfbi_exit(void);
void rfbi_dump_regs(struct seq_file *s);
@@ -379,6 +420,15 @@ void rfbi_transfer_area(u16 width, u16 height,
void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t);
unsigned long rfbi_get_max_tx_rate(void);
int rfbi_init_display(struct omap_dss_device *display);
+#else
+static inline int rfbi_init(void)
+{
+ return 0;
+}
+static inline void rfbi_exit(void)
+{
+}
+#endif
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index 0820986d4a6..9e1fbe531bf 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -843,6 +843,7 @@ static void configure_manager(enum omap_channel channel)
c = &dss_cache.manager_cache[channel];
+ dispc_set_default_color(channel, c->default_color);
dispc_set_trans_key(channel, c->trans_key_type, c->trans_key);
dispc_enable_trans_key(channel, c->trans_enabled);
dispc_enable_alpha_blending(channel, c->alpha_enabled);
@@ -940,6 +941,22 @@ static int configure_dispc(void)
return r;
}
+/* Make the coordinates even. There are some strange problems with OMAP and
+ * partial DSI update when the update widths are odd. */
+static void make_even(u16 *x, u16 *w)
+{
+ u16 x1, x2;
+
+ x1 = *x;
+ x2 = *x + *w;
+
+ x1 &= ~1;
+ x2 = ALIGN(x2, 2);
+
+ *x = x1;
+ *w = x2 - x1;
+}
+
/* Configure dispc for partial update. Return possibly modified update
* area */
void dss_setup_partial_planes(struct omap_dss_device *dssdev,
@@ -968,6 +985,8 @@ void dss_setup_partial_planes(struct omap_dss_device *dssdev,
return;
}
+ make_even(&x, &w);
+
spin_lock_irqsave(&dss_cache.lock, flags);
/* We need to show the whole overlay if it is scaled. So look for
@@ -1029,6 +1048,8 @@ void dss_setup_partial_planes(struct omap_dss_device *dssdev,
w = x2 - x1;
h = y2 - y1;
+ make_even(&x, &w);
+
DSSDBG("changing upd area due to ovl(%d) scaling %d,%d %dx%d\n",
i, x, y, w, h);
}
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index 12eb4042dd8..ee07a3cc22e 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -23,13 +23,16 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/regulator/consumer.h>
#include <plat/display.h>
+#include <plat/cpu.h>
#include "dss.h"
static struct {
bool skip_init;
bool update_enabled;
+ struct regulator *vdds_sdi_reg;
} sdi;
static void sdi_basic_init(void)
@@ -57,6 +60,10 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
goto err0;
}
+ r = regulator_enable(sdi.vdds_sdi_reg);
+ if (r)
+ goto err1;
+
/* In case of skip_init sdi_init has already enabled the clocks */
if (!sdi.skip_init)
dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
@@ -115,19 +122,12 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
dssdev->manager->enable(dssdev->manager);
- if (dssdev->driver->enable) {
- r = dssdev->driver->enable(dssdev);
- if (r)
- goto err3;
- }
-
sdi.skip_init = 0;
return 0;
-err3:
- dssdev->manager->disable(dssdev->manager);
err2:
dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ regulator_disable(sdi.vdds_sdi_reg);
err1:
omap_dss_stop_device(dssdev);
err0:
@@ -137,15 +137,14 @@ EXPORT_SYMBOL(omapdss_sdi_display_enable);
void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
{
- if (dssdev->driver->disable)
- dssdev->driver->disable(dssdev);
-
dssdev->manager->disable(dssdev->manager);
dss_sdi_disable();
dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ regulator_disable(sdi.vdds_sdi_reg);
+
omap_dss_stop_device(dssdev);
}
EXPORT_SYMBOL(omapdss_sdi_display_disable);
@@ -162,6 +161,11 @@ int sdi_init(bool skip_init)
/* we store this for first display enable, then clear it */
sdi.skip_init = skip_init;
+ sdi.vdds_sdi_reg = dss_get_vdds_sdi();
+ if (IS_ERR(sdi.vdds_sdi_reg)) {
+ DSSERR("can't get VDDS_SDI regulator\n");
+ return PTR_ERR(sdi.vdds_sdi_reg);
+ }
/*
* Enable clocks already here, otherwise there would be a toggle
* of them until sdi_display_enable is called.
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index f0ba5732d84..eff35050e28 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -479,12 +479,6 @@ static int venc_panel_enable(struct omap_dss_device *dssdev)
goto err1;
}
- if (dssdev->platform_enable) {
- r = dssdev->platform_enable(dssdev);
- if (r)
- goto err2;
- }
-
venc_power_on(dssdev);
venc.wss_data = 0;
@@ -494,13 +488,9 @@ static int venc_panel_enable(struct omap_dss_device *dssdev)
/* wait couple of vsyncs until enabling the LCD */
msleep(50);
- mutex_unlock(&venc.venc_lock);
-
- return r;
-err2:
- venc_power_off(dssdev);
err1:
mutex_unlock(&venc.venc_lock);
+
return r;
}
@@ -524,9 +514,6 @@ static void venc_panel_disable(struct omap_dss_device *dssdev)
/* wait at least 5 vsyncs after disabling the LCD */
msleep(100);
- if (dssdev->platform_disable)
- dssdev->platform_disable(dssdev);
-
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
end:
mutex_unlock(&venc.venc_lock);
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c
index 1ffa760b854..9c7361871d7 100644
--- a/drivers/video/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c
@@ -183,13 +183,14 @@ int omapfb_update_window(struct fb_info *fbi,
struct omapfb2_device *fbdev = ofbi->fbdev;
int r;
+ if (!lock_fb_info(fbi))
+ return -ENODEV;
omapfb_lock(fbdev);
- lock_fb_info(fbi);
r = omapfb_update_window_nolock(fbi, x, y, w, h);
- unlock_fb_info(fbi);
omapfb_unlock(fbdev);
+ unlock_fb_info(fbi);
return r;
}
diff --git a/drivers/video/omap2/omapfb/omapfb-sysfs.c b/drivers/video/omap2/omapfb/omapfb-sysfs.c
index 62bb88f5c19..5179219128b 100644
--- a/drivers/video/omap2/omapfb/omapfb-sysfs.c
+++ b/drivers/video/omap2/omapfb/omapfb-sysfs.c
@@ -57,7 +57,8 @@ static ssize_t store_rotate_type(struct device *dev,
if (rot_type != OMAP_DSS_ROT_DMA && rot_type != OMAP_DSS_ROT_VRFB)
return -EINVAL;
- lock_fb_info(fbi);
+ if (!lock_fb_info(fbi))
+ return -ENODEV;
r = 0;
if (rot_type == ofbi->rotation_type)
@@ -105,7 +106,8 @@ static ssize_t store_mirror(struct device *dev,
if (mirror != 0 && mirror != 1)
return -EINVAL;
- lock_fb_info(fbi);
+ if (!lock_fb_info(fbi))
+ return -ENODEV;
ofbi->mirror = mirror;
@@ -137,8 +139,9 @@ static ssize_t show_overlays(struct device *dev,
ssize_t l = 0;
int t;
+ if (!lock_fb_info(fbi))
+ return -ENODEV;
omapfb_lock(fbdev);
- lock_fb_info(fbi);
for (t = 0; t < ofbi->num_overlays; t++) {
struct omap_overlay *ovl = ofbi->overlays[t];
@@ -154,8 +157,8 @@ static ssize_t show_overlays(struct device *dev,
l += snprintf(buf + l, PAGE_SIZE - l, "\n");
- unlock_fb_info(fbi);
omapfb_unlock(fbdev);
+ unlock_fb_info(fbi);
return l;
}
@@ -195,8 +198,9 @@ static ssize_t store_overlays(struct device *dev, struct device_attribute *attr,
if (buf[len - 1] == '\n')
len = len - 1;
+ if (!lock_fb_info(fbi))
+ return -ENODEV;
omapfb_lock(fbdev);
- lock_fb_info(fbi);
if (len > 0) {
char *p = (char *)buf;
@@ -303,8 +307,8 @@ static ssize_t store_overlays(struct device *dev, struct device_attribute *attr,
r = count;
out:
- unlock_fb_info(fbi);
omapfb_unlock(fbdev);
+ unlock_fb_info(fbi);
return r;
}
@@ -317,7 +321,8 @@ static ssize_t show_overlays_rotate(struct device *dev,
ssize_t l = 0;
int t;
- lock_fb_info(fbi);
+ if (!lock_fb_info(fbi))
+ return -ENODEV;
for (t = 0; t < ofbi->num_overlays; t++) {
l += snprintf(buf + l, PAGE_SIZE - l, "%s%d",
@@ -345,7 +350,8 @@ static ssize_t store_overlays_rotate(struct device *dev,
if (buf[len - 1] == '\n')
len = len - 1;
- lock_fb_info(fbi);
+ if (!lock_fb_info(fbi))
+ return -ENODEV;
if (len > 0) {
char *p = (char *)buf;
@@ -416,7 +422,8 @@ static ssize_t store_size(struct device *dev, struct device_attribute *attr,
size = PAGE_ALIGN(simple_strtoul(buf, NULL, 0));
- lock_fb_info(fbi);
+ if (!lock_fb_info(fbi))
+ return -ENODEV;
for (i = 0; i < ofbi->num_overlays; i++) {
if (ofbi->overlays[i]->info.enabled) {
diff --git a/drivers/video/p9100.c b/drivers/video/p9100.c
index 81440f2b909..c85dd408a9b 100644
--- a/drivers/video/p9100.c
+++ b/drivers/video/p9100.c
@@ -353,8 +353,11 @@ static const struct of_device_id p9100_match[] = {
MODULE_DEVICE_TABLE(of, p9100_match);
static struct of_platform_driver p9100_driver = {
- .name = "p9100",
- .match_table = p9100_match,
+ .driver = {
+ .name = "p9100",
+ .owner = THIS_MODULE,
+ .of_match_table = p9100_match,
+ },
.probe = p9100_probe,
.remove = __devexit_p(p9100_remove),
};
diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c
index 8a204e7a5b5..72a1f4c0473 100644
--- a/drivers/video/platinumfb.c
+++ b/drivers/video/platinumfb.c
@@ -536,7 +536,7 @@ static int __init platinumfb_setup(char *options)
static int __devinit platinumfb_probe(struct of_device* odev,
const struct of_device_id *match)
{
- struct device_node *dp = odev->node;
+ struct device_node *dp = odev->dev.of_node;
struct fb_info *info;
struct fb_info_platinum *pinfo;
volatile __u8 *fbuffer;
@@ -679,8 +679,11 @@ static struct of_device_id platinumfb_match[] =
static struct of_platform_driver platinum_driver =
{
- .name = "platinumfb",
- .match_table = platinumfb_match,
+ .driver = {
+ .name = "platinumfb",
+ .owner = THIS_MODULE,
+ .of_match_table = platinumfb_match,
+ },
.probe = platinumfb_probe,
.remove = platinumfb_remove,
};
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index 2b094dec4a5..46b430978bc 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -631,7 +631,7 @@ static struct fb_ops s3c2410fb_ops = {
* cache. Once this area is remapped, all virtual memory
* access to the video memory should occur at the new region.
*/
-static int __init s3c2410fb_map_video_memory(struct fb_info *info)
+static int __devinit s3c2410fb_map_video_memory(struct fb_info *info)
{
struct s3c2410fb_info *fbi = info->par;
dma_addr_t map_dma;
@@ -814,7 +814,7 @@ static inline void s3c2410fb_cpufreq_deregister(struct s3c2410fb_info *info)
static char driver_name[] = "s3c2410fb";
-static int __init s3c24xxfb_probe(struct platform_device *pdev,
+static int __devinit s3c24xxfb_probe(struct platform_device *pdev,
enum s3c_drv_type drv_type)
{
struct s3c2410fb_info *info;
@@ -1018,7 +1018,7 @@ static int __devinit s3c2412fb_probe(struct platform_device *pdev)
/*
* Cleanup
*/
-static int s3c2410fb_remove(struct platform_device *pdev)
+static int __devexit s3c2410fb_remove(struct platform_device *pdev)
{
struct fb_info *fbinfo = platform_get_drvdata(pdev);
struct s3c2410fb_info *info = fbinfo->par;
@@ -1096,7 +1096,7 @@ static int s3c2410fb_resume(struct platform_device *dev)
static struct platform_driver s3c2410fb_driver = {
.probe = s3c2410fb_probe,
- .remove = s3c2410fb_remove,
+ .remove = __devexit_p(s3c2410fb_remove),
.suspend = s3c2410fb_suspend,
.resume = s3c2410fb_resume,
.driver = {
@@ -1107,7 +1107,7 @@ static struct platform_driver s3c2410fb_driver = {
static struct platform_driver s3c2412fb_driver = {
.probe = s3c2412fb_probe,
- .remove = s3c2410fb_remove,
+ .remove = __devexit_p(s3c2410fb_remove),
.suspend = s3c2410fb_suspend,
.resume = s3c2410fb_resume,
.driver = {
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index d4471b4c037..dce8c97b433 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -71,7 +71,8 @@ static const char * const s3_names[] = {"S3 Unknown", "S3 Trio32", "S3 Trio64",
"S3 Trio64UV+", "S3 Trio64V2/DX", "S3 Trio64V2/GX",
"S3 Plato/PX", "S3 Aurora64VP", "S3 Virge",
"S3 Virge/VX", "S3 Virge/DX", "S3 Virge/GX",
- "S3 Virge/GX2", "S3 Virge/GX2P", "S3 Virge/GX2P"};
+ "S3 Virge/GX2", "S3 Virge/GX2P", "S3 Virge/GX2P",
+ "S3 Trio3D/1X", "S3 Trio3D/2X", "S3 Trio3D/2X"};
#define CHIP_UNKNOWN 0x00
#define CHIP_732_TRIO32 0x01
@@ -89,10 +90,14 @@ static const char * const s3_names[] = {"S3 Unknown", "S3 Trio32", "S3 Trio64",
#define CHIP_356_VIRGE_GX2 0x0D
#define CHIP_357_VIRGE_GX2P 0x0E
#define CHIP_359_VIRGE_GX2P 0x0F
+#define CHIP_360_TRIO3D_1X 0x10
+#define CHIP_362_TRIO3D_2X 0x11
+#define CHIP_368_TRIO3D_2X 0x12
#define CHIP_XXX_TRIO 0x80
#define CHIP_XXX_TRIO64V2_DXGX 0x81
#define CHIP_XXX_VIRGE_DXGX 0x82
+#define CHIP_36X_TRIO3D_1X_2X 0x83
#define CHIP_UNDECIDED_FLAG 0x80
#define CHIP_MASK 0xFF
@@ -324,6 +329,7 @@ static void s3fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
static void s3_set_pixclock(struct fb_info *info, u32 pixclock)
{
+ struct s3fb_info *par = info->par;
u16 m, n, r;
u8 regval;
int rv;
@@ -339,7 +345,13 @@ static void s3_set_pixclock(struct fb_info *info, u32 pixclock)
vga_w(NULL, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD);
/* Set S3 clock registers */
- vga_wseq(NULL, 0x12, ((n - 2) | (r << 5)));
+ if (par->chip == CHIP_360_TRIO3D_1X ||
+ par->chip == CHIP_362_TRIO3D_2X ||
+ par->chip == CHIP_368_TRIO3D_2X) {
+ vga_wseq(NULL, 0x12, (n - 2) | ((r & 3) << 6)); /* n and two bits of r */
+ vga_wseq(NULL, 0x29, r >> 2); /* remaining highest bit of r */
+ } else
+ vga_wseq(NULL, 0x12, (n - 2) | (r << 5));
vga_wseq(NULL, 0x13, m - 2);
udelay(1000);
@@ -456,7 +468,7 @@ static int s3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
static int s3fb_set_par(struct fb_info *info)
{
struct s3fb_info *par = info->par;
- u32 value, mode, hmul, offset_value, screen_size, multiplex;
+ u32 value, mode, hmul, offset_value, screen_size, multiplex, dbytes;
u32 bpp = info->var.bits_per_pixel;
if (bpp != 0) {
@@ -518,7 +530,7 @@ static int s3fb_set_par(struct fb_info *info)
svga_wcrt_mask(0x33, 0x00, 0x08); /* no DDR ? */
svga_wcrt_mask(0x43, 0x00, 0x01); /* no DDR ? */
- svga_wcrt_mask(0x5D, 0x00, 0x28); // Clear strange HSlen bits
+ svga_wcrt_mask(0x5D, 0x00, 0x28); /* Clear strange HSlen bits */
/* svga_wcrt_mask(0x58, 0x03, 0x03); */
@@ -530,10 +542,14 @@ static int s3fb_set_par(struct fb_info *info)
pr_debug("fb%d: offset register : %d\n", info->node, offset_value);
svga_wcrt_multi(s3_offset_regs, offset_value);
- vga_wcrt(NULL, 0x54, 0x18); /* M parameter */
- vga_wcrt(NULL, 0x60, 0xff); /* N parameter */
- vga_wcrt(NULL, 0x61, 0xff); /* L parameter */
- vga_wcrt(NULL, 0x62, 0xff); /* L parameter */
+ if (par->chip != CHIP_360_TRIO3D_1X &&
+ par->chip != CHIP_362_TRIO3D_2X &&
+ par->chip != CHIP_368_TRIO3D_2X) {
+ vga_wcrt(NULL, 0x54, 0x18); /* M parameter */
+ vga_wcrt(NULL, 0x60, 0xff); /* N parameter */
+ vga_wcrt(NULL, 0x61, 0xff); /* L parameter */
+ vga_wcrt(NULL, 0x62, 0xff); /* L parameter */
+ }
vga_wcrt(NULL, 0x3A, 0x35);
svga_wattr(0x33, 0x00);
@@ -570,6 +586,16 @@ static int s3fb_set_par(struct fb_info *info)
vga_wcrt(NULL, 0x66, 0x90);
}
+ if (par->chip == CHIP_360_TRIO3D_1X ||
+ par->chip == CHIP_362_TRIO3D_2X ||
+ par->chip == CHIP_368_TRIO3D_2X) {
+ dbytes = info->var.xres * ((bpp+7)/8);
+ vga_wcrt(NULL, 0x91, (dbytes + 7) / 8);
+ vga_wcrt(NULL, 0x90, (((dbytes + 7) / 8) >> 8) | 0x80);
+
+ vga_wcrt(NULL, 0x66, 0x81);
+ }
+
svga_wcrt_mask(0x31, 0x00, 0x40);
multiplex = 0;
hmul = 1;
@@ -615,11 +641,13 @@ static int s3fb_set_par(struct fb_info *info)
break;
case 3:
pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
- if (info->var.pixclock > 20000) {
- svga_wcrt_mask(0x50, 0x00, 0x30);
+ svga_wcrt_mask(0x50, 0x00, 0x30);
+ if (info->var.pixclock > 20000 ||
+ par->chip == CHIP_360_TRIO3D_1X ||
+ par->chip == CHIP_362_TRIO3D_2X ||
+ par->chip == CHIP_368_TRIO3D_2X)
svga_wcrt_mask(0x67, 0x00, 0xF0);
- } else {
- svga_wcrt_mask(0x50, 0x00, 0x30);
+ else {
svga_wcrt_mask(0x67, 0x10, 0xF0);
multiplex = 1;
}
@@ -634,7 +662,10 @@ static int s3fb_set_par(struct fb_info *info)
} else {
svga_wcrt_mask(0x50, 0x10, 0x30);
svga_wcrt_mask(0x67, 0x30, 0xF0);
- hmul = 2;
+ if (par->chip != CHIP_360_TRIO3D_1X &&
+ par->chip != CHIP_362_TRIO3D_2X &&
+ par->chip != CHIP_368_TRIO3D_2X)
+ hmul = 2;
}
break;
case 5:
@@ -647,7 +678,10 @@ static int s3fb_set_par(struct fb_info *info)
} else {
svga_wcrt_mask(0x50, 0x10, 0x30);
svga_wcrt_mask(0x67, 0x50, 0xF0);
- hmul = 2;
+ if (par->chip != CHIP_360_TRIO3D_1X &&
+ par->chip != CHIP_362_TRIO3D_2X &&
+ par->chip != CHIP_368_TRIO3D_2X)
+ hmul = 2;
}
break;
case 6:
@@ -866,6 +900,17 @@ static int __devinit s3_identification(int chip)
return CHIP_385_VIRGE_GX;
}
+ if (chip == CHIP_36X_TRIO3D_1X_2X) {
+ switch (vga_rcrt(NULL, 0x2f)) {
+ case 0x00:
+ return CHIP_360_TRIO3D_1X;
+ case 0x01:
+ return CHIP_362_TRIO3D_2X;
+ case 0x02:
+ return CHIP_368_TRIO3D_2X;
+ }
+ }
+
return CHIP_UNKNOWN;
}
@@ -930,17 +975,32 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
vga_wcrt(NULL, 0x38, 0x48);
vga_wcrt(NULL, 0x39, 0xA5);
- /* Find how many physical memory there is on card */
- /* 0x36 register is accessible even if other registers are locked */
- regval = vga_rcrt(NULL, 0x36);
- info->screen_size = s3_memsizes[regval >> 5] << 10;
- info->fix.smem_len = info->screen_size;
-
+ /* Identify chip type */
par->chip = id->driver_data & CHIP_MASK;
par->rev = vga_rcrt(NULL, 0x2f);
if (par->chip & CHIP_UNDECIDED_FLAG)
par->chip = s3_identification(par->chip);
+ /* Find how many physical memory there is on card */
+ /* 0x36 register is accessible even if other registers are locked */
+ regval = vga_rcrt(NULL, 0x36);
+ if (par->chip == CHIP_360_TRIO3D_1X ||
+ par->chip == CHIP_362_TRIO3D_2X ||
+ par->chip == CHIP_368_TRIO3D_2X) {
+ switch ((regval & 0xE0) >> 5) {
+ case 0: /* 8MB -- only 4MB usable for display */
+ case 1: /* 4MB with 32-bit bus */
+ case 2: /* 4MB */
+ info->screen_size = 4 << 20;
+ break;
+ case 6: /* 2MB */
+ info->screen_size = 2 << 20;
+ break;
+ }
+ } else
+ info->screen_size = s3_memsizes[regval >> 5] << 10;
+ info->fix.smem_len = info->screen_size;
+
/* Find MCLK frequency */
regval = vga_rseq(NULL, 0x10);
par->mclk_freq = ((vga_rseq(NULL, 0x11) + 2) * 14318) / ((regval & 0x1F) + 2);
@@ -1131,6 +1191,7 @@ static struct pci_device_id s3_devices[] __devinitdata = {
{PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A10), .driver_data = CHIP_356_VIRGE_GX2},
{PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A11), .driver_data = CHIP_357_VIRGE_GX2P},
{PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A12), .driver_data = CHIP_359_VIRGE_GX2P},
+ {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A13), .driver_data = CHIP_36X_TRIO3D_1X_2X},
{0, 0, 0, 0, 0, 0, 0}
};
diff --git a/drivers/video/sgivwfb.c b/drivers/video/sgivwfb.c
index 7a3a5e28eca..53455f29551 100644
--- a/drivers/video/sgivwfb.c
+++ b/drivers/video/sgivwfb.c
@@ -47,7 +47,7 @@ static int ywrap = 0;
static int flatpanel_id = -1;
-static struct fb_fix_screeninfo sgivwfb_fix __initdata = {
+static struct fb_fix_screeninfo sgivwfb_fix __devinitdata = {
.id = "SGI Vis WS FB",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
@@ -57,7 +57,7 @@ static struct fb_fix_screeninfo sgivwfb_fix __initdata = {
.line_length = 640,
};
-static struct fb_var_screeninfo sgivwfb_var __initdata = {
+static struct fb_var_screeninfo sgivwfb_var __devinitdata = {
/* 640x480, 8 bpp */
.xres = 640,
.yres = 480,
@@ -79,7 +79,7 @@ static struct fb_var_screeninfo sgivwfb_var __initdata = {
.vmode = FB_VMODE_NONINTERLACED
};
-static struct fb_var_screeninfo sgivwfb_var1600sw __initdata = {
+static struct fb_var_screeninfo sgivwfb_var1600sw __devinitdata = {
/* 1600x1024, 8 bpp */
.xres = 1600,
.yres = 1024,
@@ -825,7 +825,7 @@ fail_ioremap_regs:
return -ENXIO;
}
-static int sgivwfb_remove(struct platform_device *dev)
+static int __devexit sgivwfb_remove(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);
@@ -845,7 +845,7 @@ static int sgivwfb_remove(struct platform_device *dev)
static struct platform_driver sgivwfb_driver = {
.probe = sgivwfb_probe,
- .remove = sgivwfb_remove,
+ .remove = __devexit_p(sgivwfb_remove),
.driver = {
.name = "sgivwfb",
},
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index a531a0f7cdf..559bf1727a2 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -1845,7 +1845,7 @@ sisfb_get_fix(struct fb_fix_screeninfo *fix, int con, struct fb_info *info)
memset(fix, 0, sizeof(struct fb_fix_screeninfo));
- strcpy(fix->id, ivideo->myid);
+ strlcpy(fix->id, ivideo->myid, sizeof(fix->id));
mutex_lock(&info->mm_lock);
fix->smem_start = ivideo->video_base + ivideo->video_offset;
diff --git a/drivers/video/sunxvr1000.c b/drivers/video/sunxvr1000.c
index 23e69e834a1..489b44e8db8 100644
--- a/drivers/video/sunxvr1000.c
+++ b/drivers/video/sunxvr1000.c
@@ -114,7 +114,7 @@ static int __devinit gfb_set_fbinfo(struct gfb_info *gp)
static int __devinit gfb_probe(struct of_device *op,
const struct of_device_id *match)
{
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
struct fb_info *info;
struct gfb_info *gp;
int err;
@@ -199,10 +199,13 @@ static const struct of_device_id gfb_match[] = {
MODULE_DEVICE_TABLE(of, ffb_match);
static struct of_platform_driver gfb_driver = {
- .name = "gfb",
- .match_table = gfb_match,
.probe = gfb_probe,
.remove = __devexit_p(gfb_remove),
+ .driver = {
+ .name = "gfb",
+ .owner = THIS_MODULE,
+ .of_match_table = gfb_match,
+ },
};
static int __init gfb_init(void)
diff --git a/drivers/video/tcx.c b/drivers/video/tcx.c
index c0c2b18fcdc..ef7a7bd8b50 100644
--- a/drivers/video/tcx.c
+++ b/drivers/video/tcx.c
@@ -512,8 +512,11 @@ static const struct of_device_id tcx_match[] = {
MODULE_DEVICE_TABLE(of, tcx_match);
static struct of_platform_driver tcx_driver = {
- .name = "tcx",
- .match_table = tcx_match,
+ .driver = {
+ .name = "tcx",
+ .owner = THIS_MODULE,
+ .of_match_table = tcx_match,
+ },
.probe = tcx_probe,
.remove = __devexit_p(tcx_remove),
};
diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
index 0cadf7aee27..090aa1a9be6 100644
--- a/drivers/video/vesafb.c
+++ b/drivers/video/vesafb.c
@@ -177,7 +177,7 @@ static void vesafb_destroy(struct fb_info *info)
{
if (info->screen_base)
iounmap(info->screen_base);
- release_mem_region(info->aperture_base, info->aperture_size);
+ release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size);
framebuffer_release(info);
}
@@ -295,8 +295,13 @@ static int __init vesafb_probe(struct platform_device *dev)
info->par = NULL;
/* set vesafb aperture size for generic probing */
- info->aperture_base = screen_info.lfb_base;
- info->aperture_size = size_total;
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ err = -ENOMEM;
+ goto err;
+ }
+ info->apertures->ranges[0].base = screen_info.lfb_base;
+ info->apertures->ranges[0].size = size_total;
info->screen_base = ioremap(vesafb_fix.smem_start, vesafb_fix.smem_len);
if (!info->screen_base) {
diff --git a/drivers/video/vfb.c b/drivers/video/vfb.c
index 9b5532b4de3..bc67251f1a2 100644
--- a/drivers/video/vfb.c
+++ b/drivers/video/vfb.c
@@ -78,7 +78,7 @@ static void rvfree(void *mem, unsigned long size)
vfree(mem);
}
-static struct fb_var_screeninfo vfb_default __initdata = {
+static struct fb_var_screeninfo vfb_default __devinitdata = {
.xres = 640,
.yres = 480,
.xres_virtual = 640,
@@ -100,7 +100,7 @@ static struct fb_var_screeninfo vfb_default __initdata = {
.vmode = FB_VMODE_NONINTERLACED,
};
-static struct fb_fix_screeninfo vfb_fix __initdata = {
+static struct fb_fix_screeninfo vfb_fix __devinitdata = {
.id = "Virtual FB",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c
index bf638a47a5b..28ccab44a39 100644
--- a/drivers/video/vga16fb.c
+++ b/drivers/video/vga16fb.c
@@ -65,7 +65,7 @@ struct vga16fb_par {
/* --------------------------------------------------------------------- */
-static struct fb_var_screeninfo vga16fb_defined __initdata = {
+static struct fb_var_screeninfo vga16fb_defined __devinitdata = {
.xres = 640,
.yres = 480,
.xres_virtual = 640,
@@ -85,7 +85,7 @@ static struct fb_var_screeninfo vga16fb_defined __initdata = {
};
/* name should not depend on EGA/VGA */
-static struct fb_fix_screeninfo vga16fb_fix __initdata = {
+static struct fb_fix_screeninfo vga16fb_fix __devinitdata = {
.id = "VGA16 VGA",
.smem_start = VGA_FB_PHYS,
.smem_len = VGA_FB_PHYS_LEN,
@@ -1263,10 +1263,19 @@ static void vga16fb_imageblit(struct fb_info *info, const struct fb_image *image
vga_imageblit_color(info, image);
}
+static void vga16fb_destroy(struct fb_info *info)
+{
+ iounmap(info->screen_base);
+ fb_dealloc_cmap(&info->cmap);
+ /* XXX unshare VGA regions */
+ framebuffer_release(info);
+}
+
static struct fb_ops vga16fb_ops = {
.owner = THIS_MODULE,
.fb_open = vga16fb_open,
.fb_release = vga16fb_release,
+ .fb_destroy = vga16fb_destroy,
.fb_check_var = vga16fb_check_var,
.fb_set_par = vga16fb_set_par,
.fb_setcolreg = vga16fb_setcolreg,
@@ -1278,7 +1287,7 @@ static struct fb_ops vga16fb_ops = {
};
#ifndef MODULE
-static int vga16fb_setup(char *options)
+static int __init vga16fb_setup(char *options)
{
char *this_opt;
@@ -1306,6 +1315,11 @@ static int __devinit vga16fb_probe(struct platform_device *dev)
ret = -ENOMEM;
goto err_fb_alloc;
}
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
/* XXX share VGA_FB_PHYS and I/O region with vgacon and others */
info->screen_base = (void __iomem *)VGA_MAP_MEM(VGA_FB_PHYS, 0);
@@ -1335,7 +1349,7 @@ static int __devinit vga16fb_probe(struct platform_device *dev)
info->fix = vga16fb_fix;
/* supports rectangles with widths of multiples of 8 */
info->pixmap.blit_x = 1 << 7 | 1 << 15 | 1 << 23 | 1 << 31;
- info->flags = FBINFO_FLAG_DEFAULT |
+ info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
FBINFO_HWACCEL_YPAN;
i = (info->var.bits_per_pixel == 8) ? 256 : 16;
@@ -1354,6 +1368,9 @@ static int __devinit vga16fb_probe(struct platform_device *dev)
vga16fb_update_fix(info);
+ info->apertures->ranges[0].base = VGA_FB_PHYS;
+ info->apertures->ranges[0].size = VGA_FB_PHYS_LEN;
+
if (register_framebuffer(info) < 0) {
printk(KERN_ERR "vga16fb: unable to register framebuffer\n");
ret = -EINVAL;
@@ -1376,24 +1393,19 @@ static int __devinit vga16fb_probe(struct platform_device *dev)
return ret;
}
-static int vga16fb_remove(struct platform_device *dev)
+static int __devexit vga16fb_remove(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);
- if (info) {
+ if (info)
unregister_framebuffer(info);
- iounmap(info->screen_base);
- fb_dealloc_cmap(&info->cmap);
- /* XXX unshare VGA regions */
- framebuffer_release(info);
- }
return 0;
}
static struct platform_driver vga16fb_driver = {
.probe = vga16fb_probe,
- .remove = vga16fb_remove,
+ .remove = __devexit_p(vga16fb_remove),
.driver = {
.name = "vga16fb",
},
diff --git a/drivers/video/via/Makefile b/drivers/video/via/Makefile
index eeed238ad6a..d496adb0f83 100644
--- a/drivers/video/via/Makefile
+++ b/drivers/video/via/Makefile
@@ -4,4 +4,6 @@
obj-$(CONFIG_FB_VIA) += viafb.o
-viafb-y :=viafbdev.o hw.o via_i2c.o dvi.o lcd.o ioctl.o accel.o via_utility.o vt1636.o global.o tblDPASetting.o viamode.o tbl1636.o
+viafb-y :=viafbdev.o hw.o via_i2c.o dvi.o lcd.o ioctl.o accel.o \
+ via_utility.o vt1636.o global.o tblDPASetting.o viamode.o tbl1636.o \
+ via-core.o via-gpio.o via_modesetting.o
diff --git a/drivers/video/via/accel.c b/drivers/video/via/accel.c
index d5077dfa9e0..e44893ea590 100644
--- a/drivers/video/via/accel.c
+++ b/drivers/video/via/accel.c
@@ -18,14 +18,45 @@
* Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/via-core.h>
#include "global.h"
+/*
+ * Figure out an appropriate bytes-per-pixel setting.
+ */
+static int viafb_set_bpp(void __iomem *engine, u8 bpp)
+{
+ u32 gemode;
+
+ /* Preserve the reserved bits */
+ /* Lowest 2 bits to zero gives us no rotation */
+ gemode = readl(engine + VIA_REG_GEMODE) & 0xfffffcfc;
+ switch (bpp) {
+ case 8:
+ gemode |= VIA_GEM_8bpp;
+ break;
+ case 16:
+ gemode |= VIA_GEM_16bpp;
+ break;
+ case 32:
+ gemode |= VIA_GEM_32bpp;
+ break;
+ default:
+ printk(KERN_WARNING "viafb_set_bpp: Unsupported bpp %d\n", bpp);
+ return -EINVAL;
+ }
+ writel(gemode, engine + VIA_REG_GEMODE);
+ return 0;
+}
+
+
static int hw_bitblt_1(void __iomem *engine, u8 op, u32 width, u32 height,
u8 dst_bpp, u32 dst_addr, u32 dst_pitch, u32 dst_x, u32 dst_y,
u32 *src_mem, u32 src_addr, u32 src_pitch, u32 src_x, u32 src_y,
u32 fg_color, u32 bg_color, u8 fill_rop)
{
u32 ge_cmd = 0, tmp, i;
+ int ret;
if (!op || op > 3) {
printk(KERN_WARNING "hw_bitblt_1: Invalid operation: %d\n", op);
@@ -59,22 +90,9 @@ static int hw_bitblt_1(void __iomem *engine, u8 op, u32 width, u32 height,
}
}
- switch (dst_bpp) {
- case 8:
- tmp = 0x00000000;
- break;
- case 16:
- tmp = 0x00000100;
- break;
- case 32:
- tmp = 0x00000300;
- break;
- default:
- printk(KERN_WARNING "hw_bitblt_1: Unsupported bpp %d\n",
- dst_bpp);
- return -EINVAL;
- }
- writel(tmp, engine + 0x04);
+ ret = viafb_set_bpp(engine, dst_bpp);
+ if (ret)
+ return ret;
if (op != VIA_BITBLT_FILL) {
if (src_x & (op == VIA_BITBLT_MONO ? 0xFFFF8000 : 0xFFFFF000)
@@ -171,6 +189,7 @@ static int hw_bitblt_2(void __iomem *engine, u8 op, u32 width, u32 height,
u32 fg_color, u32 bg_color, u8 fill_rop)
{
u32 ge_cmd = 0, tmp, i;
+ int ret;
if (!op || op > 3) {
printk(KERN_WARNING "hw_bitblt_2: Invalid operation: %d\n", op);
@@ -204,22 +223,9 @@ static int hw_bitblt_2(void __iomem *engine, u8 op, u32 width, u32 height,
}
}
- switch (dst_bpp) {
- case 8:
- tmp = 0x00000000;
- break;
- case 16:
- tmp = 0x00000100;
- break;
- case 32:
- tmp = 0x00000300;
- break;
- default:
- printk(KERN_WARNING "hw_bitblt_2: Unsupported bpp %d\n",
- dst_bpp);
- return -EINVAL;
- }
- writel(tmp, engine + 0x04);
+ ret = viafb_set_bpp(engine, dst_bpp);
+ if (ret)
+ return ret;
if (op == VIA_BITBLT_FILL)
tmp = 0;
@@ -312,17 +318,29 @@ int viafb_init_engine(struct fb_info *info)
{
struct viafb_par *viapar = info->par;
void __iomem *engine;
+ int highest_reg, i;
u32 vq_start_addr, vq_end_addr, vq_start_low, vq_end_low, vq_high,
vq_len, chip_name = viapar->shared->chip_info.gfx_chip_name;
- engine = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len);
- viapar->shared->engine_mmio = engine;
+ engine = viapar->shared->vdev->engine_mmio;
if (!engine) {
printk(KERN_WARNING "viafb_init_accel: ioremap failed, "
"hardware acceleration disabled\n");
return -ENOMEM;
}
+ /* Initialize registers to reset the 2D engine */
+ switch (viapar->shared->chip_info.twod_engine) {
+ case VIA_2D_ENG_M1:
+ highest_reg = 0x5c;
+ break;
+ default:
+ highest_reg = 0x40;
+ break;
+ }
+ for (i = 0; i <= highest_reg; i += 4)
+ writel(0x0, engine + i);
+
switch (chip_name) {
case UNICHROME_CLE266:
case UNICHROME_K400:
@@ -352,13 +370,28 @@ int viafb_init_engine(struct fb_info *info)
viapar->shared->vq_vram_addr = viapar->fbmem_free;
viapar->fbmem_used += VQ_SIZE;
- /* Init 2D engine reg to reset 2D engine */
- writel(0x0, engine + VIA_REG_KEYCONTROL);
+#if defined(CONFIG_FB_VIA_CAMERA) || defined(CONFIG_FB_VIA_CAMERA_MODULE)
+ /*
+ * Set aside a chunk of framebuffer memory for the camera
+ * driver. Someday this driver probably needs a proper allocator
+ * for fbmem; for now, we just have to do this before the
+ * framebuffer initializes itself.
+ *
+ * As for the size: the engine can handle three frames,
+ * 16 bits deep, up to VGA resolution.
+ */
+ viapar->shared->vdev->camera_fbmem_size = 3*VGA_HEIGHT*VGA_WIDTH*2;
+ viapar->fbmem_free -= viapar->shared->vdev->camera_fbmem_size;
+ viapar->fbmem_used += viapar->shared->vdev->camera_fbmem_size;
+ viapar->shared->vdev->camera_fbmem_offset = viapar->fbmem_free;
+#endif
/* Init AGP and VQ regs */
switch (chip_name) {
case UNICHROME_K8M890:
case UNICHROME_P4M900:
+ case UNICHROME_VX800:
+ case UNICHROME_VX855:
writel(0x00100000, engine + VIA_REG_CR_TRANSET);
writel(0x680A0000, engine + VIA_REG_CR_TRANSPACE);
writel(0x02000000, engine + VIA_REG_CR_TRANSPACE);
@@ -393,6 +426,8 @@ int viafb_init_engine(struct fb_info *info)
switch (chip_name) {
case UNICHROME_K8M890:
case UNICHROME_P4M900:
+ case UNICHROME_VX800:
+ case UNICHROME_VX855:
vq_start_low |= 0x20000000;
vq_end_low |= 0x20000000;
vq_high |= 0x20000000;
@@ -446,7 +481,7 @@ void viafb_show_hw_cursor(struct fb_info *info, int Status)
struct viafb_par *viapar = info->par;
u32 temp, iga_path = viapar->iga_path;
- temp = readl(viapar->shared->engine_mmio + VIA_REG_CURSOR_MODE);
+ temp = readl(viapar->shared->vdev->engine_mmio + VIA_REG_CURSOR_MODE);
switch (Status) {
case HW_Cursor_ON:
temp |= 0x1;
@@ -463,23 +498,33 @@ void viafb_show_hw_cursor(struct fb_info *info, int Status)
default:
temp &= 0x7FFFFFFF;
}
- writel(temp, viapar->shared->engine_mmio + VIA_REG_CURSOR_MODE);
+ writel(temp, viapar->shared->vdev->engine_mmio + VIA_REG_CURSOR_MODE);
}
void viafb_wait_engine_idle(struct fb_info *info)
{
struct viafb_par *viapar = info->par;
int loop = 0;
-
- while (!(readl(viapar->shared->engine_mmio + VIA_REG_STATUS) &
- VIA_VR_QUEUE_BUSY) && (loop < MAXLOOP)) {
- loop++;
- cpu_relax();
+ u32 mask;
+ void __iomem *engine = viapar->shared->vdev->engine_mmio;
+
+ switch (viapar->shared->chip_info.twod_engine) {
+ case VIA_2D_ENG_H5:
+ case VIA_2D_ENG_M1:
+ mask = VIA_CMD_RGTR_BUSY_M1 | VIA_2D_ENG_BUSY_M1 |
+ VIA_3D_ENG_BUSY_M1;
+ break;
+ default:
+ while (!(readl(engine + VIA_REG_STATUS) &
+ VIA_VR_QUEUE_BUSY) && (loop < MAXLOOP)) {
+ loop++;
+ cpu_relax();
+ }
+ mask = VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY | VIA_3D_ENG_BUSY;
+ break;
}
- while ((readl(viapar->shared->engine_mmio + VIA_REG_STATUS) &
- (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY | VIA_3D_ENG_BUSY)) &&
- (loop < MAXLOOP)) {
+ while ((readl(engine + VIA_REG_STATUS) & mask) && (loop < MAXLOOP)) {
loop++;
cpu_relax();
}
diff --git a/drivers/video/via/accel.h b/drivers/video/via/accel.h
index 615c84ad0f0..2c122d29236 100644
--- a/drivers/video/via/accel.h
+++ b/drivers/video/via/accel.h
@@ -67,6 +67,34 @@
/* from 0x100 to 0x1ff */
#define VIA_REG_COLORPAT 0x100
+/* defines for VIA 2D registers for vt3353/3409 (M1 engine)*/
+#define VIA_REG_GECMD_M1 0x000
+#define VIA_REG_GEMODE_M1 0x004
+#define VIA_REG_GESTATUS_M1 0x004 /* as same as VIA_REG_GEMODE */
+#define VIA_REG_PITCH_M1 0x008 /* pitch of src and dst */
+#define VIA_REG_DIMENSION_M1 0x00C /* width and height */
+#define VIA_REG_DSTPOS_M1 0x010
+#define VIA_REG_LINE_XY_M1 0x010
+#define VIA_REG_DSTBASE_M1 0x014
+#define VIA_REG_SRCPOS_M1 0x018
+#define VIA_REG_LINE_K1K2_M1 0x018
+#define VIA_REG_SRCBASE_M1 0x01C
+#define VIA_REG_PATADDR_M1 0x020
+#define VIA_REG_MONOPAT0_M1 0x024
+#define VIA_REG_MONOPAT1_M1 0x028
+#define VIA_REG_OFFSET_M1 0x02C
+#define VIA_REG_LINE_ERROR_M1 0x02C
+#define VIA_REG_CLIPTL_M1 0x040 /* top and left of clipping */
+#define VIA_REG_CLIPBR_M1 0x044 /* bottom and right of clipping */
+#define VIA_REG_KEYCONTROL_M1 0x048 /* color key control */
+#define VIA_REG_FGCOLOR_M1 0x04C
+#define VIA_REG_DSTCOLORKEY_M1 0x04C /* as same as VIA_REG_FG */
+#define VIA_REG_BGCOLOR_M1 0x050
+#define VIA_REG_SRCCOLORKEY_M1 0x050 /* as same as VIA_REG_BG */
+#define VIA_REG_MONOPATFGC_M1 0x058 /* Add BG color of Pattern. */
+#define VIA_REG_MONOPATBGC_M1 0x05C /* Add FG color of Pattern. */
+#define VIA_REG_COLORPAT_M1 0x100 /* from 0x100 to 0x1ff */
+
/* VIA_REG_PITCH(0x38): Pitch Setting */
#define VIA_PITCH_ENABLE 0x80000000
@@ -157,6 +185,18 @@
/* Virtual Queue is busy */
#define VIA_VR_QUEUE_BUSY 0x00020000
+/* VIA_REG_STATUS(0x400): Engine Status for H5 */
+#define VIA_CMD_RGTR_BUSY_H5 0x00000010 /* Command Regulator is busy */
+#define VIA_2D_ENG_BUSY_H5 0x00000002 /* 2D Engine is busy */
+#define VIA_3D_ENG_BUSY_H5 0x00001FE1 /* 3D Engine is busy */
+#define VIA_VR_QUEUE_BUSY_H5 0x00000004 /* Virtual Queue is busy */
+
+/* VIA_REG_STATUS(0x400): Engine Status for VT3353/3409 */
+#define VIA_CMD_RGTR_BUSY_M1 0x00000010 /* Command Regulator is busy */
+#define VIA_2D_ENG_BUSY_M1 0x00000002 /* 2D Engine is busy */
+#define VIA_3D_ENG_BUSY_M1 0x00001FE1 /* 3D Engine is busy */
+#define VIA_VR_QUEUE_BUSY_M1 0x00000004 /* Virtual Queue is busy */
+
#define MAXLOOP 0xFFFFFF
#define VIA_BITBLT_COLOR 1
diff --git a/drivers/video/via/chip.h b/drivers/video/via/chip.h
index 8c06bd3c0b4..d9b6e06e070 100644
--- a/drivers/video/via/chip.h
+++ b/drivers/video/via/chip.h
@@ -121,9 +121,17 @@ struct lvds_chip_information {
int i2c_port;
};
+/* The type of 2D engine */
+enum via_2d_engine {
+ VIA_2D_ENG_H2,
+ VIA_2D_ENG_H5,
+ VIA_2D_ENG_M1,
+};
+
struct chip_information {
int gfx_chip_name;
int gfx_chip_revision;
+ enum via_2d_engine twod_engine;
struct tmds_chip_information tmds_chip_info;
struct lvds_chip_information lvds_chip_info;
struct lvds_chip_information lvds_chip_info2;
diff --git a/drivers/video/via/dvi.c b/drivers/video/via/dvi.c
index abe59b8c7a0..39b040bb381 100644
--- a/drivers/video/via/dvi.c
+++ b/drivers/video/via/dvi.c
@@ -18,6 +18,8 @@
* Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/via-core.h>
+#include <linux/via_i2c.h>
#include "global.h"
static void tmds_register_write(int index, u8 data);
@@ -96,7 +98,7 @@ int viafb_tmds_trasmitter_identify(void)
viaparinfo->chip_info->tmds_chip_info.tmds_chip_name = VT1632_TMDS;
viaparinfo->chip_info->
tmds_chip_info.tmds_chip_slave_addr = VT1632_TMDS_I2C_ADDR;
- viaparinfo->chip_info->tmds_chip_info.i2c_port = I2CPORTINDEX;
+ viaparinfo->chip_info->tmds_chip_info.i2c_port = VIA_PORT_31;
if (check_tmds_chip(VT1632_DEVICE_ID_REG, VT1632_DEVICE_ID) != FAIL) {
/*
* Currently only support 12bits,dual edge,add 24bits mode later
@@ -110,7 +112,7 @@ int viafb_tmds_trasmitter_identify(void)
viaparinfo->chip_info->tmds_chip_info.i2c_port);
return OK;
} else {
- viaparinfo->chip_info->tmds_chip_info.i2c_port = GPIOPORTINDEX;
+ viaparinfo->chip_info->tmds_chip_info.i2c_port = VIA_PORT_2C;
if (check_tmds_chip(VT1632_DEVICE_ID_REG, VT1632_DEVICE_ID)
!= FAIL) {
tmds_register_write(0x08, 0x3b);
@@ -160,32 +162,26 @@ int viafb_tmds_trasmitter_identify(void)
static void tmds_register_write(int index, u8 data)
{
- viaparinfo->shared->i2c_stuff.i2c_port =
- viaparinfo->chip_info->tmds_chip_info.i2c_port;
-
- viafb_i2c_writebyte(viaparinfo->chip_info->tmds_chip_info.
- tmds_chip_slave_addr, index,
- data);
+ viafb_i2c_writebyte(viaparinfo->chip_info->tmds_chip_info.i2c_port,
+ viaparinfo->chip_info->tmds_chip_info.tmds_chip_slave_addr,
+ index, data);
}
static int tmds_register_read(int index)
{
u8 data;
- viaparinfo->shared->i2c_stuff.i2c_port =
- viaparinfo->chip_info->tmds_chip_info.i2c_port;
- viafb_i2c_readbyte((u8) viaparinfo->chip_info->
- tmds_chip_info.tmds_chip_slave_addr,
- (u8) index, &data);
+ viafb_i2c_readbyte(viaparinfo->chip_info->tmds_chip_info.i2c_port,
+ (u8) viaparinfo->chip_info->tmds_chip_info.tmds_chip_slave_addr,
+ (u8) index, &data);
return data;
}
static int tmds_register_read_bytes(int index, u8 *buff, int buff_len)
{
- viaparinfo->shared->i2c_stuff.i2c_port =
- viaparinfo->chip_info->tmds_chip_info.i2c_port;
- viafb_i2c_readbytes((u8) viaparinfo->chip_info->tmds_chip_info.
- tmds_chip_slave_addr, (u8) index, buff, buff_len);
+ viafb_i2c_readbytes(viaparinfo->chip_info->tmds_chip_info.i2c_port,
+ (u8) viaparinfo->chip_info->tmds_chip_info.tmds_chip_slave_addr,
+ (u8) index, buff, buff_len);
return 0;
}
@@ -541,9 +537,10 @@ void viafb_dvi_enable(void)
else
data = 0x37;
viafb_i2c_writebyte(viaparinfo->chip_info->
- tmds_chip_info.
- tmds_chip_slave_addr,
- 0x08, data);
+ tmds_chip_info.i2c_port,
+ viaparinfo->chip_info->
+ tmds_chip_info.tmds_chip_slave_addr,
+ 0x08, data);
}
}
}
diff --git a/drivers/video/via/global.h b/drivers/video/via/global.h
index 8d95d5fd138..28221a062dd 100644
--- a/drivers/video/via/global.h
+++ b/drivers/video/via/global.h
@@ -41,7 +41,6 @@
#include "share.h"
#include "dvi.h"
#include "viamode.h"
-#include "via_i2c.h"
#include "hw.h"
#include "lcd.h"
diff --git a/drivers/video/via/hw.c b/drivers/video/via/hw.c
index f2583b1b527..b996803ae2c 100644
--- a/drivers/video/via/hw.c
+++ b/drivers/video/via/hw.c
@@ -19,6 +19,7 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/via-core.h>
#include "global.h"
static struct pll_map pll_value[] = {
@@ -62,6 +63,7 @@ static struct pll_map pll_value[] = {
CX700_52_977M, VX855_52_977M},
{CLK_56_250M, CLE266_PLL_56_250M, K800_PLL_56_250M,
CX700_56_250M, VX855_56_250M},
+ {CLK_57_275M, 0, 0, 0, VX855_57_275M},
{CLK_60_466M, CLE266_PLL_60_466M, K800_PLL_60_466M,
CX700_60_466M, VX855_60_466M},
{CLK_61_500M, CLE266_PLL_61_500M, K800_PLL_61_500M,
@@ -525,8 +527,7 @@ static void dvi_patch_skew_dvp_low(void);
static void set_dvi_output_path(int set_iga, int output_interface);
static void set_lcd_output_path(int set_iga, int output_interface);
static void load_fix_bit_crtc_reg(void);
-static void init_gfx_chip_info(struct pci_dev *pdev,
- const struct pci_device_id *pdi);
+static void init_gfx_chip_info(int chip_type);
static void init_tmds_chip_info(void);
static void init_lvds_chip_info(void);
static void device_screen_off(void);
@@ -537,18 +538,6 @@ static void device_on(void);
static void enable_second_display_channel(void);
static void disable_second_display_channel(void);
-void viafb_write_reg(u8 index, u16 io_port, u8 data)
-{
- outb(index, io_port);
- outb(data, io_port + 1);
- /*DEBUG_MSG(KERN_INFO "\nIndex=%2d Value=%2d", index, data); */
-}
-u8 viafb_read_reg(int io_port, u8 index)
-{
- outb(index, io_port);
- return inb(io_port + 1);
-}
-
void viafb_lock_crt(void)
{
viafb_write_reg_mask(CR11, VIACR, BIT7, BIT7);
@@ -560,16 +549,6 @@ void viafb_unlock_crt(void)
viafb_write_reg_mask(CR47, VIACR, 0, BIT0);
}
-void viafb_write_reg_mask(u8 index, int io_port, u8 data, u8 mask)
-{
- u8 tmp;
-
- outb(index, io_port);
- tmp = inb(io_port + 1);
- outb((data & mask) | (tmp & (~mask)), io_port + 1);
- /*DEBUG_MSG(KERN_INFO "\nIndex=%2d Value=%2d", index, tmp); */
-}
-
void write_dac_reg(u8 index, u8 r, u8 g, u8 b)
{
outb(index, LUT_INDEX_WRITE);
@@ -646,102 +625,6 @@ void viafb_set_iga_path(void)
}
}
-void viafb_set_primary_address(u32 addr)
-{
- DEBUG_MSG(KERN_DEBUG "viafb_set_primary_address(0x%08X)\n", addr);
- viafb_write_reg(CR0D, VIACR, addr & 0xFF);
- viafb_write_reg(CR0C, VIACR, (addr >> 8) & 0xFF);
- viafb_write_reg(CR34, VIACR, (addr >> 16) & 0xFF);
- viafb_write_reg_mask(CR48, VIACR, (addr >> 24) & 0x1F, 0x1F);
-}
-
-void viafb_set_secondary_address(u32 addr)
-{
- DEBUG_MSG(KERN_DEBUG "viafb_set_secondary_address(0x%08X)\n", addr);
- /* secondary display supports only quadword aligned memory */
- viafb_write_reg_mask(CR62, VIACR, (addr >> 2) & 0xFE, 0xFE);
- viafb_write_reg(CR63, VIACR, (addr >> 10) & 0xFF);
- viafb_write_reg(CR64, VIACR, (addr >> 18) & 0xFF);
- viafb_write_reg_mask(CRA3, VIACR, (addr >> 26) & 0x07, 0x07);
-}
-
-void viafb_set_primary_pitch(u32 pitch)
-{
- DEBUG_MSG(KERN_DEBUG "viafb_set_primary_pitch(0x%08X)\n", pitch);
- /* spec does not say that first adapter skips 3 bits but old
- * code did it and seems to be reasonable in analogy to 2nd adapter
- */
- pitch = pitch >> 3;
- viafb_write_reg(0x13, VIACR, pitch & 0xFF);
- viafb_write_reg_mask(0x35, VIACR, (pitch >> (8 - 5)) & 0xE0, 0xE0);
-}
-
-void viafb_set_secondary_pitch(u32 pitch)
-{
- DEBUG_MSG(KERN_DEBUG "viafb_set_secondary_pitch(0x%08X)\n", pitch);
- pitch = pitch >> 3;
- viafb_write_reg(0x66, VIACR, pitch & 0xFF);
- viafb_write_reg_mask(0x67, VIACR, (pitch >> 8) & 0x03, 0x03);
- viafb_write_reg_mask(0x71, VIACR, (pitch >> (10 - 7)) & 0x80, 0x80);
-}
-
-void viafb_set_primary_color_depth(u8 depth)
-{
- u8 value;
-
- DEBUG_MSG(KERN_DEBUG "viafb_set_primary_color_depth(%d)\n", depth);
- switch (depth) {
- case 8:
- value = 0x00;
- break;
- case 15:
- value = 0x04;
- break;
- case 16:
- value = 0x14;
- break;
- case 24:
- value = 0x0C;
- break;
- case 30:
- value = 0x08;
- break;
- default:
- printk(KERN_WARNING "viafb_set_primary_color_depth: "
- "Unsupported depth: %d\n", depth);
- return;
- }
-
- viafb_write_reg_mask(0x15, VIASR, value, 0x1C);
-}
-
-void viafb_set_secondary_color_depth(u8 depth)
-{
- u8 value;
-
- DEBUG_MSG(KERN_DEBUG "viafb_set_secondary_color_depth(%d)\n", depth);
- switch (depth) {
- case 8:
- value = 0x00;
- break;
- case 16:
- value = 0x40;
- break;
- case 24:
- value = 0xC0;
- break;
- case 30:
- value = 0x80;
- break;
- default:
- printk(KERN_WARNING "viafb_set_secondary_color_depth: "
- "Unsupported depth: %d\n", depth);
- return;
- }
-
- viafb_write_reg_mask(0x67, VIACR, value, 0xC0);
-}
-
static void set_color_register(u8 index, u8 red, u8 green, u8 blue)
{
outb(0xFF, 0x3C6); /* bit mask of palette */
@@ -1126,16 +1009,12 @@ void viafb_load_reg(int timing_value, int viafb_load_reg_num,
void viafb_write_regx(struct io_reg RegTable[], int ItemNum)
{
int i;
- unsigned char RegTemp;
/*DEBUG_MSG(KERN_INFO "Table Size : %x!!\n",ItemNum ); */
- for (i = 0; i < ItemNum; i++) {
- outb(RegTable[i].index, RegTable[i].port);
- RegTemp = inb(RegTable[i].port + 1);
- RegTemp = (RegTemp & (~RegTable[i].mask)) | RegTable[i].value;
- outb(RegTemp, RegTable[i].port + 1);
- }
+ for (i = 0; i < ItemNum; i++)
+ via_write_reg_mask(RegTable[i].port, RegTable[i].index,
+ RegTable[i].value, RegTable[i].mask);
}
void viafb_load_fetch_count_reg(int h_addr, int bpp_byte, int set_iga)
@@ -1516,8 +1395,6 @@ u32 viafb_get_clk_value(int clk)
/* Set VCLK*/
void viafb_set_vclock(u32 CLK, int set_iga)
{
- unsigned char RegTemp;
-
/* H.W. Reset : ON */
viafb_write_reg_mask(CR17, VIACR, 0x00, BIT7);
@@ -1590,8 +1467,7 @@ void viafb_set_vclock(u32 CLK, int set_iga)
}
/* Fire! */
- RegTemp = inb(VIARMisc);
- outb(RegTemp | (BIT2 + BIT3), VIAWMisc);
+ via_write_misc_reg_mask(0x0C, 0x0C); /* select external clock */
}
void viafb_load_crtc_timing(struct display_timing device_timing,
@@ -1835,6 +1711,7 @@ void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
int index = 0;
int h_addr, v_addr;
u32 pll_D_N;
+ u8 polarity = 0;
for (i = 0; i < video_mode->mode_array; i++) {
index = i;
@@ -1863,20 +1740,11 @@ void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
v_addr = crt_reg.ver_addr;
/* update polarity for CRT timing */
- if (crt_table[index].h_sync_polarity == NEGATIVE) {
- if (crt_table[index].v_sync_polarity == NEGATIVE)
- outb((inb(VIARMisc) & (~(BIT6 + BIT7))) |
- (BIT6 + BIT7), VIAWMisc);
- else
- outb((inb(VIARMisc) & (~(BIT6 + BIT7))) | (BIT6),
- VIAWMisc);
- } else {
- if (crt_table[index].v_sync_polarity == NEGATIVE)
- outb((inb(VIARMisc) & (~(BIT6 + BIT7))) | (BIT7),
- VIAWMisc);
- else
- outb((inb(VIARMisc) & (~(BIT6 + BIT7))), VIAWMisc);
- }
+ if (crt_table[index].h_sync_polarity == NEGATIVE)
+ polarity |= BIT6;
+ if (crt_table[index].v_sync_polarity == NEGATIVE)
+ polarity |= BIT7;
+ via_write_misc_reg_mask(polarity, BIT6 | BIT7);
if (set_iga == IGA1) {
viafb_unlock_crt();
@@ -1910,10 +1778,9 @@ void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
}
-void viafb_init_chip_info(struct pci_dev *pdev,
- const struct pci_device_id *pdi)
+void viafb_init_chip_info(int chip_type)
{
- init_gfx_chip_info(pdev, pdi);
+ init_gfx_chip_info(chip_type);
init_tmds_chip_info();
init_lvds_chip_info();
@@ -1980,12 +1847,11 @@ void viafb_update_device_setting(int hres, int vres,
}
}
-static void init_gfx_chip_info(struct pci_dev *pdev,
- const struct pci_device_id *pdi)
+static void init_gfx_chip_info(int chip_type)
{
u8 tmp;
- viaparinfo->chip_info->gfx_chip_name = pdi->driver_data;
+ viaparinfo->chip_info->gfx_chip_name = chip_type;
/* Check revision of CLE266 Chip */
if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266) {
@@ -2016,6 +1882,21 @@ static void init_gfx_chip_info(struct pci_dev *pdev,
CX700_REVISION_700;
}
}
+
+ /* Determine which 2D engine we have */
+ switch (viaparinfo->chip_info->gfx_chip_name) {
+ case UNICHROME_VX800:
+ case UNICHROME_VX855:
+ viaparinfo->chip_info->twod_engine = VIA_2D_ENG_M1;
+ break;
+ case UNICHROME_K8M890:
+ case UNICHROME_P4M900:
+ viaparinfo->chip_info->twod_engine = VIA_2D_ENG_H5;
+ break;
+ default:
+ viaparinfo->chip_info->twod_engine = VIA_2D_ENG_H2;
+ break;
+ }
}
static void init_tmds_chip_info(void)
@@ -2232,13 +2113,11 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
/* Fill VPIT Parameters */
/* Write Misc Register */
- outb(VPIT.Misc, VIAWMisc);
+ outb(VPIT.Misc, VIA_MISC_REG_WRITE);
/* Write Sequencer */
- for (i = 1; i <= StdSR; i++) {
- outb(i, VIASR);
- outb(VPIT.SR[i - 1], VIASR + 1);
- }
+ for (i = 1; i <= StdSR; i++)
+ via_write_reg(VIASR, i, VPIT.SR[i - 1]);
viafb_write_reg_mask(0x15, VIASR, 0xA2, 0xA2);
viafb_set_iga_path();
@@ -2247,10 +2126,8 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
viafb_fill_crtc_timing(crt_timing, vmode_tbl, video_bpp / 8, IGA1);
/* Write Graphic Controller */
- for (i = 0; i < StdGR; i++) {
- outb(i, VIAGR);
- outb(VPIT.GR[i], VIAGR + 1);
- }
+ for (i = 0; i < StdGR; i++)
+ via_write_reg(VIAGR, i, VPIT.GR[i]);
/* Write Attribute Controller */
for (i = 0; i < StdAR; i++) {
@@ -2277,11 +2154,11 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
}
}
- viafb_set_primary_pitch(viafbinfo->fix.line_length);
- viafb_set_secondary_pitch(viafb_dual_fb ? viafbinfo1->fix.line_length
+ via_set_primary_pitch(viafbinfo->fix.line_length);
+ via_set_secondary_pitch(viafb_dual_fb ? viafbinfo1->fix.line_length
: viafbinfo->fix.line_length);
- viafb_set_primary_color_depth(viaparinfo->depth);
- viafb_set_secondary_color_depth(viafb_dual_fb ? viaparinfo1->depth
+ via_set_primary_color_depth(viaparinfo->depth);
+ via_set_secondary_color_depth(viafb_dual_fb ? viaparinfo1->depth
: viaparinfo->depth);
/* Update Refresh Rate Setting */
@@ -2473,108 +2350,6 @@ static void disable_second_display_channel(void)
viafb_write_reg_mask(CR6A, VIACR, BIT6, BIT6);
}
-int viafb_get_fb_size_from_pci(void)
-{
- unsigned long configid, deviceid, FBSize = 0;
- int VideoMemSize;
- int DeviceFound = false;
-
- for (configid = 0x80000000; configid < 0x80010800; configid += 0x100) {
- outl(configid, (unsigned long)0xCF8);
- deviceid = (inl((unsigned long)0xCFC) >> 16) & 0xffff;
-
- switch (deviceid) {
- case CLE266:
- case KM400:
- outl(configid + 0xE0, (unsigned long)0xCF8);
- FBSize = inl((unsigned long)0xCFC);
- DeviceFound = true; /* Found device id */
- break;
-
- case CN400_FUNCTION3:
- case CN700_FUNCTION3:
- case CX700_FUNCTION3:
- case KM800_FUNCTION3:
- case KM890_FUNCTION3:
- case P4M890_FUNCTION3:
- case P4M900_FUNCTION3:
- case VX800_FUNCTION3:
- case VX855_FUNCTION3:
- /*case CN750_FUNCTION3: */
- outl(configid + 0xA0, (unsigned long)0xCF8);
- FBSize = inl((unsigned long)0xCFC);
- DeviceFound = true; /* Found device id */
- break;
-
- default:
- break;
- }
-
- if (DeviceFound)
- break;
- }
-
- DEBUG_MSG(KERN_INFO "Device ID = %lx\n", deviceid);
-
- FBSize = FBSize & 0x00007000;
- DEBUG_MSG(KERN_INFO "FB Size = %x\n", FBSize);
-
- if (viaparinfo->chip_info->gfx_chip_name < UNICHROME_CX700) {
- switch (FBSize) {
- case 0x00004000:
- VideoMemSize = (16 << 20); /*16M */
- break;
-
- case 0x00005000:
- VideoMemSize = (32 << 20); /*32M */
- break;
-
- case 0x00006000:
- VideoMemSize = (64 << 20); /*64M */
- break;
-
- default:
- VideoMemSize = (32 << 20); /*32M */
- break;
- }
- } else {
- switch (FBSize) {
- case 0x00001000:
- VideoMemSize = (8 << 20); /*8M */
- break;
-
- case 0x00002000:
- VideoMemSize = (16 << 20); /*16M */
- break;
-
- case 0x00003000:
- VideoMemSize = (32 << 20); /*32M */
- break;
-
- case 0x00004000:
- VideoMemSize = (64 << 20); /*64M */
- break;
-
- case 0x00005000:
- VideoMemSize = (128 << 20); /*128M */
- break;
-
- case 0x00006000:
- VideoMemSize = (256 << 20); /*256M */
- break;
-
- case 0x00007000: /* Only on VX855/875 */
- VideoMemSize = (512 << 20); /*512M */
- break;
-
- default:
- VideoMemSize = (32 << 20); /*32M */
- break;
- }
- }
-
- return VideoMemSize;
-}
void viafb_set_dpa_gfx(int output_interface, struct GFX_DPA_SETTING\
*p_gfx_dpa_setting)
diff --git a/drivers/video/via/hw.h b/drivers/video/via/hw.h
index 12ef32d334c..a109de37981 100644
--- a/drivers/video/via/hw.h
+++ b/drivers/video/via/hw.h
@@ -24,6 +24,11 @@
#include "viamode.h"
#include "global.h"
+#include "via_modesetting.h"
+
+#define viafb_read_reg(p, i) via_read_reg(p, i)
+#define viafb_write_reg(i, p, d) via_write_reg(p, i, d)
+#define viafb_write_reg_mask(i, p, d, m) via_write_reg_mask(p, i, d, m)
/***************************************************
* Definition IGA1 Design Method of CRTC Registers *
@@ -823,8 +828,8 @@ struct iga2_crtc_timing {
};
/* device ID */
-#define CLE266 0x3123
-#define KM400 0x3205
+#define CLE266_FUNCTION3 0x3123
+#define KM400_FUNCTION3 0x3205
#define CN400_FUNCTION2 0x2259
#define CN400_FUNCTION3 0x3259
/* support VT3314 chipset */
@@ -870,7 +875,6 @@ extern int viafb_LCD_ON;
extern int viafb_DVI_ON;
extern int viafb_hotplug;
-void viafb_write_reg_mask(u8 index, int io_port, u8 data, u8 mask);
void viafb_set_output_path(int device, int set_iga,
int output_interface);
@@ -885,8 +889,6 @@ void viafb_crt_disable(void);
void viafb_crt_enable(void);
void init_ad9389(void);
/* Access I/O Function */
-void viafb_write_reg(u8 index, u16 io_port, u8 data);
-u8 viafb_read_reg(int io_port, u8 index);
void viafb_lock_crt(void);
void viafb_unlock_crt(void);
void viafb_load_fetch_count_reg(int h_addr, int bpp_byte, int set_iga);
@@ -900,20 +902,14 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
struct VideoModeTable *vmode_tbl1, int video_bpp1);
void viafb_fill_var_timing_info(struct fb_var_screeninfo *var, int refresh,
struct VideoModeTable *vmode_tbl);
-void viafb_init_chip_info(struct pci_dev *pdev,
- const struct pci_device_id *pdi);
+void viafb_init_chip_info(int chip_type);
void viafb_init_dac(int set_iga);
int viafb_get_pixclock(int hres, int vres, int vmode_refresh);
int viafb_get_refresh(int hres, int vres, u32 float_refresh);
void viafb_update_device_setting(int hres, int vres, int bpp,
int vmode_refresh, int flag);
-int viafb_get_fb_size_from_pci(void);
void viafb_set_iga_path(void);
-void viafb_set_primary_address(u32 addr);
-void viafb_set_secondary_address(u32 addr);
-void viafb_set_primary_pitch(u32 pitch);
-void viafb_set_secondary_pitch(u32 pitch);
void viafb_set_primary_color_register(u8 index, u8 red, u8 green, u8 blue);
void viafb_set_secondary_color_register(u8 index, u8 red, u8 green, u8 blue);
void viafb_get_fb_info(unsigned int *fb_base, unsigned int *fb_len);
diff --git a/drivers/video/via/ioctl.h b/drivers/video/via/ioctl.h
index de899807ead..c430fa23008 100644
--- a/drivers/video/via/ioctl.h
+++ b/drivers/video/via/ioctl.h
@@ -75,7 +75,7 @@
/*SAMM operation flag*/
#define OP_SAMM 0x80
-#define LCD_PANEL_ID_MAXIMUM 22
+#define LCD_PANEL_ID_MAXIMUM 23
#define STATE_ON 0x1
#define STATE_OFF 0x0
diff --git a/drivers/video/via/lcd.c b/drivers/video/via/lcd.c
index 1b1ccdc2d83..2ab0f156439 100644
--- a/drivers/video/via/lcd.c
+++ b/drivers/video/via/lcd.c
@@ -18,7 +18,8 @@
* Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
-
+#include <linux/via-core.h>
+#include <linux/via_i2c.h>
#include "global.h"
#include "lcdtbl.h"
@@ -172,18 +173,16 @@ static bool lvds_identify_integratedlvds(void)
int viafb_lvds_trasmitter_identify(void)
{
- viaparinfo->shared->i2c_stuff.i2c_port = I2CPORTINDEX;
- if (viafb_lvds_identify_vt1636()) {
- viaparinfo->chip_info->lvds_chip_info.i2c_port = I2CPORTINDEX;
+ if (viafb_lvds_identify_vt1636(VIA_PORT_31)) {
+ viaparinfo->chip_info->lvds_chip_info.i2c_port = VIA_PORT_31;
DEBUG_MSG(KERN_INFO
- "Found VIA VT1636 LVDS on port i2c 0x31 \n");
+ "Found VIA VT1636 LVDS on port i2c 0x31\n");
} else {
- viaparinfo->shared->i2c_stuff.i2c_port = GPIOPORTINDEX;
- if (viafb_lvds_identify_vt1636()) {
+ if (viafb_lvds_identify_vt1636(VIA_PORT_2C)) {
viaparinfo->chip_info->lvds_chip_info.i2c_port =
- GPIOPORTINDEX;
+ VIA_PORT_2C;
DEBUG_MSG(KERN_INFO
- "Found VIA VT1636 LVDS on port gpio 0x2c \n");
+ "Found VIA VT1636 LVDS on port gpio 0x2c\n");
}
}
@@ -398,6 +397,15 @@ static void fp_id_to_vindex(int panel_id)
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
+ case 0x17:
+ /* OLPC XO-1.5 panel */
+ viaparinfo->lvds_setting_info->lcd_panel_hres = 1200;
+ viaparinfo->lvds_setting_info->lcd_panel_vres = 900;
+ viaparinfo->lvds_setting_info->lcd_panel_id =
+ LCD_PANEL_IDD_1200X900;
+ viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
+ viaparinfo->lvds_setting_info->LCDDithering = 0;
+ break;
default:
viaparinfo->lvds_setting_info->lcd_panel_hres = 800;
viaparinfo->lvds_setting_info->lcd_panel_vres = 600;
@@ -412,9 +420,8 @@ static int lvds_register_read(int index)
{
u8 data;
- viaparinfo->shared->i2c_stuff.i2c_port = GPIOPORTINDEX;
- viafb_i2c_readbyte((u8) viaparinfo->chip_info->
- lvds_chip_info.lvds_chip_slave_addr,
+ viafb_i2c_readbyte(VIA_PORT_2C,
+ (u8) viaparinfo->chip_info->lvds_chip_info.lvds_chip_slave_addr,
(u8) index, &data);
return data;
}
diff --git a/drivers/video/via/lcd.h b/drivers/video/via/lcd.h
index 071f47cf5be..9762ec62b49 100644
--- a/drivers/video/via/lcd.h
+++ b/drivers/video/via/lcd.h
@@ -60,6 +60,8 @@
#define LCD_PANEL_IDB_1360X768 0x0B
/* Resolution: 480x640, Channel: single, Dithering: Enable */
#define LCD_PANEL_IDC_480X640 0x0C
+/* Resolution: 1200x900, Channel: single, Dithering: Disable */
+#define LCD_PANEL_IDD_1200X900 0x0D
extern int viafb_LCD2_ON;
diff --git a/drivers/video/via/share.h b/drivers/video/via/share.h
index d55aaa7b912..7f0de7f006a 100644
--- a/drivers/video/via/share.h
+++ b/drivers/video/via/share.h
@@ -43,16 +43,9 @@
/* Video Memory Size */
#define VIDEO_MEMORY_SIZE_16M 0x1000000
-/* standard VGA IO port
-*/
-#define VIARMisc 0x3CC
-#define VIAWMisc 0x3C2
-#define VIAStatus 0x3DA
-#define VIACR 0x3D4
-#define VIASR 0x3C4
-#define VIAGR 0x3CE
-#define VIAAR 0x3C0
-
+/*
+ * Lengths of the VPIT structure arrays.
+ */
#define StdCR 0x19
#define StdSR 0x04
#define StdGR 0x09
@@ -570,6 +563,10 @@
#define M1200X720_R60_HSP NEGATIVE
#define M1200X720_R60_VSP POSITIVE
+/* 1200x900@60 Sync Polarity (DCON) */
+#define M1200X900_R60_HSP NEGATIVE
+#define M1200X900_R60_VSP NEGATIVE
+
/* 1280x600@60 Sync Polarity (GTF Mode) */
#define M1280x600_R60_HSP NEGATIVE
#define M1280x600_R60_VSP POSITIVE
@@ -651,6 +648,7 @@
#define CLK_52_406M 52406000
#define CLK_52_977M 52977000
#define CLK_56_250M 56250000
+#define CLK_57_275M 57275000
#define CLK_60_466M 60466000
#define CLK_61_500M 61500000
#define CLK_65_000M 65000000
@@ -939,6 +937,7 @@
#define VX855_52_406M 0x00580C03
#define VX855_52_977M 0x00940C05
#define VX855_56_250M 0x009D0C05
+#define VX855_57_275M 0x009D8C85 /* Used by XO panel */
#define VX855_60_466M 0x00A90C05
#define VX855_61_500M 0x00AC0C05
#define VX855_65_000M 0x006D0C03
@@ -1065,6 +1064,7 @@
#define RES_1600X1200_60HZ_PIXCLOCK 6172
#define RES_1600X1200_75HZ_PIXCLOCK 4938
#define RES_1280X720_60HZ_PIXCLOCK 13426
+#define RES_1200X900_60HZ_PIXCLOCK 17459
#define RES_1920X1080_60HZ_PIXCLOCK 5787
#define RES_1400X1050_60HZ_PIXCLOCK 8214
#define RES_1400X1050_75HZ_PIXCLOCK 6410
diff --git a/drivers/video/via/via-core.c b/drivers/video/via/via-core.c
new file mode 100644
index 00000000000..e8cfe839211
--- /dev/null
+++ b/drivers/video/via/via-core.c
@@ -0,0 +1,668 @@
+/*
+ * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
+ * Copyright 2009 Jonathan Corbet <corbet@lwn.net>
+ */
+
+/*
+ * Core code for the Via multifunction framebuffer device.
+ */
+#include <linux/via-core.h>
+#include <linux/via_i2c.h>
+#include <linux/via-gpio.h>
+#include "global.h"
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+/*
+ * The default port config.
+ */
+static struct via_port_cfg adap_configs[] = {
+ [VIA_PORT_26] = { VIA_PORT_I2C, VIA_MODE_OFF, VIASR, 0x26 },
+ [VIA_PORT_31] = { VIA_PORT_I2C, VIA_MODE_I2C, VIASR, 0x31 },
+ [VIA_PORT_25] = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x25 },
+ [VIA_PORT_2C] = { VIA_PORT_GPIO, VIA_MODE_I2C, VIASR, 0x2c },
+ [VIA_PORT_3D] = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x3d },
+ { 0, 0, 0, 0 }
+};
+
+/*
+ * We currently only support one viafb device (will there ever be
+ * more than one?), so just declare it globally here.
+ */
+static struct viafb_dev global_dev;
+
+
+/*
+ * Basic register access; spinlock required.
+ */
+static inline void viafb_mmio_write(int reg, u32 v)
+{
+ iowrite32(v, global_dev.engine_mmio + reg);
+}
+
+static inline int viafb_mmio_read(int reg)
+{
+ return ioread32(global_dev.engine_mmio + reg);
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Interrupt management. We have a single IRQ line for a lot of
+ * different functions, so we need to share it. The design here
+ * is that we don't want to reimplement the shared IRQ code here;
+ * we also want to avoid having contention for a single handler thread.
+ * So each subdev driver which needs interrupts just requests
+ * them directly from the kernel. We just have what's needed for
+ * overall access to the interrupt control register.
+ */
+
+/*
+ * Which interrupts are enabled now?
+ */
+static u32 viafb_enabled_ints;
+
+static void viafb_int_init(void)
+{
+ viafb_enabled_ints = 0;
+
+ viafb_mmio_write(VDE_INTERRUPT, 0);
+}
+
+/*
+ * Allow subdevs to ask for specific interrupts to be enabled. These
+ * functions must be called with reg_lock held
+ */
+void viafb_irq_enable(u32 mask)
+{
+ viafb_enabled_ints |= mask;
+ viafb_mmio_write(VDE_INTERRUPT, viafb_enabled_ints | VDE_I_ENABLE);
+}
+EXPORT_SYMBOL_GPL(viafb_irq_enable);
+
+void viafb_irq_disable(u32 mask)
+{
+ viafb_enabled_ints &= ~mask;
+ if (viafb_enabled_ints == 0)
+ viafb_mmio_write(VDE_INTERRUPT, 0); /* Disable entirely */
+ else
+ viafb_mmio_write(VDE_INTERRUPT,
+ viafb_enabled_ints | VDE_I_ENABLE);
+}
+EXPORT_SYMBOL_GPL(viafb_irq_disable);
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Access to the DMA engine. This currently provides what the camera
+ * driver needs (i.e. outgoing only) but is easily expandable if need
+ * be.
+ */
+
+/*
+ * There are four DMA channels in the vx855. For now, we only
+ * use one of them, though. Most of the time, the DMA channel
+ * will be idle, so we keep the IRQ handler unregistered except
+ * when some subsystem has indicated an interest.
+ */
+static int viafb_dma_users;
+static DECLARE_COMPLETION(viafb_dma_completion);
+/*
+ * This mutex protects viafb_dma_users and our global interrupt
+ * registration state; it also serializes access to the DMA
+ * engine.
+ */
+static DEFINE_MUTEX(viafb_dma_lock);
+
+/*
+ * The VX855 DMA descriptor (used for s/g transfers) looks
+ * like this.
+ */
+struct viafb_vx855_dma_descr {
+ u32 addr_low; /* Low part of phys addr */
+ u32 addr_high; /* High 12 bits of addr */
+ u32 fb_offset; /* Offset into FB memory */
+ u32 seg_size; /* Size, 16-byte units */
+ u32 tile_mode; /* "tile mode" setting */
+ u32 next_desc_low; /* Next descriptor addr */
+ u32 next_desc_high;
+ u32 pad; /* Fill out to 64 bytes */
+};
+
+/*
+ * Flags added to the "next descriptor low" pointers
+ */
+#define VIAFB_DMA_MAGIC 0x01 /* ??? Just has to be there */
+#define VIAFB_DMA_FINAL_SEGMENT 0x02 /* Final segment */
+
+/*
+ * The completion IRQ handler.
+ */
+static irqreturn_t viafb_dma_irq(int irq, void *data)
+{
+ int csr;
+ irqreturn_t ret = IRQ_NONE;
+
+ spin_lock(&global_dev.reg_lock);
+ csr = viafb_mmio_read(VDMA_CSR0);
+ if (csr & VDMA_C_DONE) {
+ viafb_mmio_write(VDMA_CSR0, VDMA_C_DONE);
+ complete(&viafb_dma_completion);
+ ret = IRQ_HANDLED;
+ }
+ spin_unlock(&global_dev.reg_lock);
+ return ret;
+}
+
+/*
+ * Indicate a need for DMA functionality.
+ */
+int viafb_request_dma(void)
+{
+ int ret = 0;
+
+ /*
+ * Only VX855 is supported currently.
+ */
+ if (global_dev.chip_type != UNICHROME_VX855)
+ return -ENODEV;
+ /*
+ * Note the new user and set up our interrupt handler
+ * if need be.
+ */
+ mutex_lock(&viafb_dma_lock);
+ viafb_dma_users++;
+ if (viafb_dma_users == 1) {
+ ret = request_irq(global_dev.pdev->irq, viafb_dma_irq,
+ IRQF_SHARED, "via-dma", &viafb_dma_users);
+ if (ret)
+ viafb_dma_users--;
+ else
+ viafb_irq_enable(VDE_I_DMA0TDEN);
+ }
+ mutex_unlock(&viafb_dma_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(viafb_request_dma);
+
+void viafb_release_dma(void)
+{
+ mutex_lock(&viafb_dma_lock);
+ viafb_dma_users--;
+ if (viafb_dma_users == 0) {
+ viafb_irq_disable(VDE_I_DMA0TDEN);
+ free_irq(global_dev.pdev->irq, &viafb_dma_users);
+ }
+ mutex_unlock(&viafb_dma_lock);
+}
+EXPORT_SYMBOL_GPL(viafb_release_dma);
+
+
+#if 0
+/*
+ * Copy a single buffer from FB memory, synchronously. This code works
+ * but is not currently used.
+ */
+void viafb_dma_copy_out(unsigned int offset, dma_addr_t paddr, int len)
+{
+ unsigned long flags;
+ int csr;
+
+ mutex_lock(&viafb_dma_lock);
+ init_completion(&viafb_dma_completion);
+ /*
+ * Program the controller.
+ */
+ spin_lock_irqsave(&global_dev.reg_lock, flags);
+ viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_DONE);
+ /* Enable ints; must happen after CSR0 write! */
+ viafb_mmio_write(VDMA_MR0, VDMA_MR_TDIE);
+ viafb_mmio_write(VDMA_MARL0, (int) (paddr & 0xfffffff0));
+ viafb_mmio_write(VDMA_MARH0, (int) ((paddr >> 28) & 0xfff));
+ /* Data sheet suggests DAR0 should be <<4, but it lies */
+ viafb_mmio_write(VDMA_DAR0, offset);
+ viafb_mmio_write(VDMA_DQWCR0, len >> 4);
+ viafb_mmio_write(VDMA_TMR0, 0);
+ viafb_mmio_write(VDMA_DPRL0, 0);
+ viafb_mmio_write(VDMA_DPRH0, 0);
+ viafb_mmio_write(VDMA_PMR0, 0);
+ csr = viafb_mmio_read(VDMA_CSR0);
+ viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_START);
+ spin_unlock_irqrestore(&global_dev.reg_lock, flags);
+ /*
+ * Now we just wait until the interrupt handler says
+ * we're done.
+ */
+ wait_for_completion_interruptible(&viafb_dma_completion);
+ viafb_mmio_write(VDMA_MR0, 0); /* Reset int enable */
+ mutex_unlock(&viafb_dma_lock);
+}
+EXPORT_SYMBOL_GPL(viafb_dma_copy_out);
+#endif
+
+/*
+ * Do a scatter/gather DMA copy from FB memory. You must have done
+ * a successful call to viafb_request_dma() first.
+ */
+int viafb_dma_copy_out_sg(unsigned int offset, struct scatterlist *sg, int nsg)
+{
+ struct viafb_vx855_dma_descr *descr;
+ void *descrpages;
+ dma_addr_t descr_handle;
+ unsigned long flags;
+ int i;
+ struct scatterlist *sgentry;
+ dma_addr_t nextdesc;
+
+ /*
+ * Get a place to put the descriptors.
+ */
+ descrpages = dma_alloc_coherent(&global_dev.pdev->dev,
+ nsg*sizeof(struct viafb_vx855_dma_descr),
+ &descr_handle, GFP_KERNEL);
+ if (descrpages == NULL) {
+ dev_err(&global_dev.pdev->dev, "Unable to get descr page.\n");
+ return -ENOMEM;
+ }
+ mutex_lock(&viafb_dma_lock);
+ /*
+ * Fill them in.
+ */
+ descr = descrpages;
+ nextdesc = descr_handle + sizeof(struct viafb_vx855_dma_descr);
+ for_each_sg(sg, sgentry, nsg, i) {
+ dma_addr_t paddr = sg_dma_address(sgentry);
+ descr->addr_low = paddr & 0xfffffff0;
+ descr->addr_high = ((u64) paddr >> 32) & 0x0fff;
+ descr->fb_offset = offset;
+ descr->seg_size = sg_dma_len(sgentry) >> 4;
+ descr->tile_mode = 0;
+ descr->next_desc_low = (nextdesc&0xfffffff0) | VIAFB_DMA_MAGIC;
+ descr->next_desc_high = ((u64) nextdesc >> 32) & 0x0fff;
+ descr->pad = 0xffffffff; /* VIA driver does this */
+ offset += sg_dma_len(sgentry);
+ nextdesc += sizeof(struct viafb_vx855_dma_descr);
+ descr++;
+ }
+ descr[-1].next_desc_low = VIAFB_DMA_FINAL_SEGMENT|VIAFB_DMA_MAGIC;
+ /*
+ * Program the engine.
+ */
+ spin_lock_irqsave(&global_dev.reg_lock, flags);
+ init_completion(&viafb_dma_completion);
+ viafb_mmio_write(VDMA_DQWCR0, 0);
+ viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_DONE);
+ viafb_mmio_write(VDMA_MR0, VDMA_MR_TDIE | VDMA_MR_CHAIN);
+ viafb_mmio_write(VDMA_DPRL0, descr_handle | VIAFB_DMA_MAGIC);
+ viafb_mmio_write(VDMA_DPRH0,
+ (((u64)descr_handle >> 32) & 0x0fff) | 0xf0000);
+ (void) viafb_mmio_read(VDMA_CSR0);
+ viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_START);
+ spin_unlock_irqrestore(&global_dev.reg_lock, flags);
+ /*
+ * Now we just wait until the interrupt handler says
+ * we're done. Except that, actually, we need to wait a little
+ * longer: the interrupts seem to jump the gun a little and we
+ * get corrupted frames sometimes.
+ */
+ wait_for_completion_timeout(&viafb_dma_completion, 1);
+ msleep(1);
+ if ((viafb_mmio_read(VDMA_CSR0)&VDMA_C_DONE) == 0)
+ printk(KERN_ERR "VIA DMA timeout!\n");
+ /*
+ * Clean up and we're done.
+ */
+ viafb_mmio_write(VDMA_CSR0, VDMA_C_DONE);
+ viafb_mmio_write(VDMA_MR0, 0); /* Reset int enable */
+ mutex_unlock(&viafb_dma_lock);
+ dma_free_coherent(&global_dev.pdev->dev,
+ nsg*sizeof(struct viafb_vx855_dma_descr), descrpages,
+ descr_handle);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(viafb_dma_copy_out_sg);
+
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Figure out how big our framebuffer memory is. Kind of ugly,
+ * but evidently we can't trust the information found in the
+ * fbdev configuration area.
+ */
+static u16 via_function3[] = {
+ CLE266_FUNCTION3, KM400_FUNCTION3, CN400_FUNCTION3, CN700_FUNCTION3,
+ CX700_FUNCTION3, KM800_FUNCTION3, KM890_FUNCTION3, P4M890_FUNCTION3,
+ P4M900_FUNCTION3, VX800_FUNCTION3, VX855_FUNCTION3,
+};
+
+/* Get the BIOS-configured framebuffer size from PCI configuration space
+ * of function 3 in the respective chipset */
+static int viafb_get_fb_size_from_pci(int chip_type)
+{
+ int i;
+ u8 offset = 0;
+ u32 FBSize;
+ u32 VideoMemSize;
+
+ /* search for the "FUNCTION3" device in this chipset */
+ for (i = 0; i < ARRAY_SIZE(via_function3); i++) {
+ struct pci_dev *pdev;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_VIA, via_function3[i],
+ NULL);
+ if (!pdev)
+ continue;
+
+ DEBUG_MSG(KERN_INFO "Device ID = %x\n", pdev->device);
+
+ switch (pdev->device) {
+ case CLE266_FUNCTION3:
+ case KM400_FUNCTION3:
+ offset = 0xE0;
+ break;
+ case CN400_FUNCTION3:
+ case CN700_FUNCTION3:
+ case CX700_FUNCTION3:
+ case KM800_FUNCTION3:
+ case KM890_FUNCTION3:
+ case P4M890_FUNCTION3:
+ case P4M900_FUNCTION3:
+ case VX800_FUNCTION3:
+ case VX855_FUNCTION3:
+ /*case CN750_FUNCTION3: */
+ offset = 0xA0;
+ break;
+ }
+
+ if (!offset)
+ break;
+
+ pci_read_config_dword(pdev, offset, &FBSize);
+ pci_dev_put(pdev);
+ }
+
+ if (!offset) {
+ printk(KERN_ERR "cannot determine framebuffer size\n");
+ return -EIO;
+ }
+
+ FBSize = FBSize & 0x00007000;
+ DEBUG_MSG(KERN_INFO "FB Size = %x\n", FBSize);
+
+ if (chip_type < UNICHROME_CX700) {
+ switch (FBSize) {
+ case 0x00004000:
+ VideoMemSize = (16 << 20); /*16M */
+ break;
+
+ case 0x00005000:
+ VideoMemSize = (32 << 20); /*32M */
+ break;
+
+ case 0x00006000:
+ VideoMemSize = (64 << 20); /*64M */
+ break;
+
+ default:
+ VideoMemSize = (32 << 20); /*32M */
+ break;
+ }
+ } else {
+ switch (FBSize) {
+ case 0x00001000:
+ VideoMemSize = (8 << 20); /*8M */
+ break;
+
+ case 0x00002000:
+ VideoMemSize = (16 << 20); /*16M */
+ break;
+
+ case 0x00003000:
+ VideoMemSize = (32 << 20); /*32M */
+ break;
+
+ case 0x00004000:
+ VideoMemSize = (64 << 20); /*64M */
+ break;
+
+ case 0x00005000:
+ VideoMemSize = (128 << 20); /*128M */
+ break;
+
+ case 0x00006000:
+ VideoMemSize = (256 << 20); /*256M */
+ break;
+
+ case 0x00007000: /* Only on VX855/875 */
+ VideoMemSize = (512 << 20); /*512M */
+ break;
+
+ default:
+ VideoMemSize = (32 << 20); /*32M */
+ break;
+ }
+ }
+
+ return VideoMemSize;
+}
+
+
+/*
+ * Figure out and map our MMIO regions.
+ */
+static int __devinit via_pci_setup_mmio(struct viafb_dev *vdev)
+{
+ int ret;
+ /*
+ * Hook up to the device registers. Note that we soldier
+ * on if it fails; the framebuffer can operate (without
+ * acceleration) without this region.
+ */
+ vdev->engine_start = pci_resource_start(vdev->pdev, 1);
+ vdev->engine_len = pci_resource_len(vdev->pdev, 1);
+ vdev->engine_mmio = ioremap_nocache(vdev->engine_start,
+ vdev->engine_len);
+ if (vdev->engine_mmio == NULL)
+ dev_err(&vdev->pdev->dev,
+ "Unable to map engine MMIO; operation will be "
+ "slow and crippled.\n");
+ /*
+ * Map in framebuffer memory. For now, failure here is
+ * fatal. Unfortunately, in the absence of significant
+ * vmalloc space, failure here is also entirely plausible.
+ * Eventually we want to move away from mapping this
+ * entire region.
+ */
+ vdev->fbmem_start = pci_resource_start(vdev->pdev, 0);
+ ret = vdev->fbmem_len = viafb_get_fb_size_from_pci(vdev->chip_type);
+ if (ret < 0)
+ goto out_unmap;
+ vdev->fbmem = ioremap_nocache(vdev->fbmem_start, vdev->fbmem_len);
+ if (vdev->fbmem == NULL) {
+ ret = -ENOMEM;
+ goto out_unmap;
+ }
+ return 0;
+out_unmap:
+ iounmap(vdev->engine_mmio);
+ return ret;
+}
+
+static void __devexit via_pci_teardown_mmio(struct viafb_dev *vdev)
+{
+ iounmap(vdev->fbmem);
+ iounmap(vdev->engine_mmio);
+}
+
+/*
+ * Create our subsidiary devices.
+ */
+static struct viafb_subdev_info {
+ char *name;
+ struct platform_device *platdev;
+} viafb_subdevs[] = {
+ {
+ .name = "viafb-gpio",
+ },
+ {
+ .name = "viafb-i2c",
+ }
+};
+#define N_SUBDEVS ARRAY_SIZE(viafb_subdevs)
+
+static int __devinit via_create_subdev(struct viafb_dev *vdev,
+ struct viafb_subdev_info *info)
+{
+ int ret;
+
+ info->platdev = platform_device_alloc(info->name, -1);
+ if (!info->platdev) {
+ dev_err(&vdev->pdev->dev, "Unable to allocate pdev %s\n",
+ info->name);
+ return -ENOMEM;
+ }
+ info->platdev->dev.parent = &vdev->pdev->dev;
+ info->platdev->dev.platform_data = vdev;
+ ret = platform_device_add(info->platdev);
+ if (ret) {
+ dev_err(&vdev->pdev->dev, "Unable to add pdev %s\n",
+ info->name);
+ platform_device_put(info->platdev);
+ info->platdev = NULL;
+ }
+ return ret;
+}
+
+static int __devinit via_setup_subdevs(struct viafb_dev *vdev)
+{
+ int i;
+
+ /*
+ * Ignore return values. Even if some of the devices
+ * fail to be created, we'll still be able to use some
+ * of the rest.
+ */
+ for (i = 0; i < N_SUBDEVS; i++)
+ via_create_subdev(vdev, viafb_subdevs + i);
+ return 0;
+}
+
+static void __devexit via_teardown_subdevs(void)
+{
+ int i;
+
+ for (i = 0; i < N_SUBDEVS; i++)
+ if (viafb_subdevs[i].platdev) {
+ viafb_subdevs[i].platdev->dev.platform_data = NULL;
+ platform_device_unregister(viafb_subdevs[i].platdev);
+ }
+}
+
+
+static int __devinit via_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+ /*
+ * Global device initialization.
+ */
+ memset(&global_dev, 0, sizeof(global_dev));
+ global_dev.pdev = pdev;
+ global_dev.chip_type = ent->driver_data;
+ global_dev.port_cfg = adap_configs;
+ spin_lock_init(&global_dev.reg_lock);
+ ret = via_pci_setup_mmio(&global_dev);
+ if (ret)
+ goto out_disable;
+ /*
+ * Set up interrupts and create our subdevices. Continue even if
+ * some things fail.
+ */
+ viafb_int_init();
+ via_setup_subdevs(&global_dev);
+ /*
+ * Set up the framebuffer device
+ */
+ ret = via_fb_pci_probe(&global_dev);
+ if (ret)
+ goto out_subdevs;
+ return 0;
+
+out_subdevs:
+ via_teardown_subdevs();
+ via_pci_teardown_mmio(&global_dev);
+out_disable:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static void __devexit via_pci_remove(struct pci_dev *pdev)
+{
+ via_teardown_subdevs();
+ via_fb_pci_remove(pdev);
+ via_pci_teardown_mmio(&global_dev);
+ pci_disable_device(pdev);
+}
+
+
+static struct pci_device_id via_pci_table[] __devinitdata = {
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CLE266_DID),
+ .driver_data = UNICHROME_CLE266 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_PM800_DID),
+ .driver_data = UNICHROME_PM800 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K400_DID),
+ .driver_data = UNICHROME_K400 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K800_DID),
+ .driver_data = UNICHROME_K800 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M890_DID),
+ .driver_data = UNICHROME_CN700 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K8M890_DID),
+ .driver_data = UNICHROME_K8M890 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CX700_DID),
+ .driver_data = UNICHROME_CX700 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M900_DID),
+ .driver_data = UNICHROME_P4M900 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CN750_DID),
+ .driver_data = UNICHROME_CN750 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX800_DID),
+ .driver_data = UNICHROME_VX800 },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX855_DID),
+ .driver_data = UNICHROME_VX855 },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, via_pci_table);
+
+static struct pci_driver via_driver = {
+ .name = "viafb",
+ .id_table = via_pci_table,
+ .probe = via_pci_probe,
+ .remove = __devexit_p(via_pci_remove),
+};
+
+static int __init via_core_init(void)
+{
+ int ret;
+
+ ret = viafb_init();
+ if (ret)
+ return ret;
+ viafb_i2c_init();
+ viafb_gpio_init();
+ return pci_register_driver(&via_driver);
+}
+
+static void __exit via_core_exit(void)
+{
+ pci_unregister_driver(&via_driver);
+ viafb_gpio_exit();
+ viafb_i2c_exit();
+ viafb_exit();
+}
+
+module_init(via_core_init);
+module_exit(via_core_exit);
diff --git a/drivers/video/via/via-gpio.c b/drivers/video/via/via-gpio.c
new file mode 100644
index 00000000000..595516aea69
--- /dev/null
+++ b/drivers/video/via/via-gpio.c
@@ -0,0 +1,285 @@
+/*
+ * Support for viafb GPIO ports.
+ *
+ * Copyright 2009 Jonathan Corbet <corbet@lwn.net>
+ * Distributable under version 2 of the GNU General Public License.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/via-core.h>
+#include <linux/via-gpio.h>
+
+/*
+ * The ports we know about. Note that the port-25 gpios are not
+ * mentioned in the datasheet.
+ */
+
+struct viafb_gpio {
+ char *vg_name; /* Data sheet name */
+ u16 vg_io_port;
+ u8 vg_port_index;
+ int vg_mask_shift;
+};
+
+static struct viafb_gpio viafb_all_gpios[] = {
+ {
+ .vg_name = "VGPIO0", /* Guess - not in datasheet */
+ .vg_io_port = VIASR,
+ .vg_port_index = 0x25,
+ .vg_mask_shift = 1
+ },
+ {
+ .vg_name = "VGPIO1",
+ .vg_io_port = VIASR,
+ .vg_port_index = 0x25,
+ .vg_mask_shift = 0
+ },
+ {
+ .vg_name = "VGPIO2", /* aka DISPCLKI0 */
+ .vg_io_port = VIASR,
+ .vg_port_index = 0x2c,
+ .vg_mask_shift = 1
+ },
+ {
+ .vg_name = "VGPIO3", /* aka DISPCLKO0 */
+ .vg_io_port = VIASR,
+ .vg_port_index = 0x2c,
+ .vg_mask_shift = 0
+ },
+ {
+ .vg_name = "VGPIO4", /* DISPCLKI1 */
+ .vg_io_port = VIASR,
+ .vg_port_index = 0x3d,
+ .vg_mask_shift = 1
+ },
+ {
+ .vg_name = "VGPIO5", /* DISPCLKO1 */
+ .vg_io_port = VIASR,
+ .vg_port_index = 0x3d,
+ .vg_mask_shift = 0
+ },
+};
+
+#define VIAFB_NUM_GPIOS ARRAY_SIZE(viafb_all_gpios)
+
+/*
+ * This structure controls the active GPIOs, which may be a subset
+ * of those which are known.
+ */
+
+struct viafb_gpio_cfg {
+ struct gpio_chip gpio_chip;
+ struct viafb_dev *vdev;
+ struct viafb_gpio *active_gpios[VIAFB_NUM_GPIOS];
+ char *gpio_names[VIAFB_NUM_GPIOS];
+};
+
+/*
+ * GPIO access functions
+ */
+static void via_gpio_set(struct gpio_chip *chip, unsigned int nr,
+ int value)
+{
+ struct viafb_gpio_cfg *cfg = container_of(chip,
+ struct viafb_gpio_cfg,
+ gpio_chip);
+ u8 reg;
+ struct viafb_gpio *gpio;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cfg->vdev->reg_lock, flags);
+ gpio = cfg->active_gpios[nr];
+ reg = via_read_reg(VIASR, gpio->vg_port_index);
+ reg |= 0x40 << gpio->vg_mask_shift; /* output enable */
+ if (value)
+ reg |= 0x10 << gpio->vg_mask_shift;
+ else
+ reg &= ~(0x10 << gpio->vg_mask_shift);
+ via_write_reg(VIASR, gpio->vg_port_index, reg);
+ spin_unlock_irqrestore(&cfg->vdev->reg_lock, flags);
+}
+
+static int via_gpio_dir_out(struct gpio_chip *chip, unsigned int nr,
+ int value)
+{
+ via_gpio_set(chip, nr, value);
+ return 0;
+}
+
+/*
+ * Set the input direction. I'm not sure this is right; we should
+ * be able to do input without disabling output.
+ */
+static int via_gpio_dir_input(struct gpio_chip *chip, unsigned int nr)
+{
+ struct viafb_gpio_cfg *cfg = container_of(chip,
+ struct viafb_gpio_cfg,
+ gpio_chip);
+ struct viafb_gpio *gpio;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cfg->vdev->reg_lock, flags);
+ gpio = cfg->active_gpios[nr];
+ via_write_reg_mask(VIASR, gpio->vg_port_index, 0,
+ 0x40 << gpio->vg_mask_shift);
+ spin_unlock_irqrestore(&cfg->vdev->reg_lock, flags);
+ return 0;
+}
+
+static int via_gpio_get(struct gpio_chip *chip, unsigned int nr)
+{
+ struct viafb_gpio_cfg *cfg = container_of(chip,
+ struct viafb_gpio_cfg,
+ gpio_chip);
+ u8 reg;
+ struct viafb_gpio *gpio;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cfg->vdev->reg_lock, flags);
+ gpio = cfg->active_gpios[nr];
+ reg = via_read_reg(VIASR, gpio->vg_port_index);
+ spin_unlock_irqrestore(&cfg->vdev->reg_lock, flags);
+ return reg & (0x04 << gpio->vg_mask_shift);
+}
+
+
+static struct viafb_gpio_cfg gpio_config = {
+ .gpio_chip = {
+ .label = "VIAFB onboard GPIO",
+ .owner = THIS_MODULE,
+ .direction_output = via_gpio_dir_out,
+ .set = via_gpio_set,
+ .direction_input = via_gpio_dir_input,
+ .get = via_gpio_get,
+ .base = -1,
+ .ngpio = 0,
+ .can_sleep = 0
+ }
+};
+
+/*
+ * Manage the software enable bit.
+ */
+static void viafb_gpio_enable(struct viafb_gpio *gpio)
+{
+ via_write_reg_mask(VIASR, gpio->vg_port_index, 0x02, 0x02);
+}
+
+static void viafb_gpio_disable(struct viafb_gpio *gpio)
+{
+ via_write_reg_mask(VIASR, gpio->vg_port_index, 0, 0x02);
+}
+
+/*
+ * Look up a specific gpio and return the number it was assigned.
+ */
+int viafb_gpio_lookup(const char *name)
+{
+ int i;
+
+ for (i = 0; i < gpio_config.gpio_chip.ngpio; i++)
+ if (!strcmp(name, gpio_config.active_gpios[i]->vg_name))
+ return gpio_config.gpio_chip.base + i;
+ return -1;
+}
+EXPORT_SYMBOL_GPL(viafb_gpio_lookup);
+
+/*
+ * Platform device stuff.
+ */
+static __devinit int viafb_gpio_probe(struct platform_device *platdev)
+{
+ struct viafb_dev *vdev = platdev->dev.platform_data;
+ struct via_port_cfg *port_cfg = vdev->port_cfg;
+ int i, ngpio = 0, ret;
+ struct viafb_gpio *gpio;
+ unsigned long flags;
+
+ /*
+ * Set up entries for all GPIOs which have been configured to
+ * operate as such (as opposed to as i2c ports).
+ */
+ for (i = 0; i < VIAFB_NUM_PORTS; i++) {
+ if (port_cfg[i].mode != VIA_MODE_GPIO)
+ continue;
+ for (gpio = viafb_all_gpios;
+ gpio < viafb_all_gpios + VIAFB_NUM_GPIOS; gpio++)
+ if (gpio->vg_port_index == port_cfg[i].ioport_index) {
+ gpio_config.active_gpios[ngpio] = gpio;
+ gpio_config.gpio_names[ngpio] = gpio->vg_name;
+ ngpio++;
+ }
+ }
+ gpio_config.gpio_chip.ngpio = ngpio;
+ gpio_config.gpio_chip.names = gpio_config.gpio_names;
+ gpio_config.vdev = vdev;
+ if (ngpio == 0) {
+ printk(KERN_INFO "viafb: no GPIOs configured\n");
+ return 0;
+ }
+ /*
+ * Enable the ports. They come in pairs, with a single
+ * enable bit for both.
+ */
+ spin_lock_irqsave(&gpio_config.vdev->reg_lock, flags);
+ for (i = 0; i < ngpio; i += 2)
+ viafb_gpio_enable(gpio_config.active_gpios[i]);
+ spin_unlock_irqrestore(&gpio_config.vdev->reg_lock, flags);
+ /*
+ * Get registered.
+ */
+ gpio_config.gpio_chip.base = -1; /* Dynamic */
+ ret = gpiochip_add(&gpio_config.gpio_chip);
+ if (ret) {
+ printk(KERN_ERR "viafb: failed to add gpios (%d)\n", ret);
+ gpio_config.gpio_chip.ngpio = 0;
+ }
+ return ret;
+}
+
+
+static int viafb_gpio_remove(struct platform_device *platdev)
+{
+ unsigned long flags;
+ int ret = 0, i;
+
+ /*
+ * Get unregistered.
+ */
+ if (gpio_config.gpio_chip.ngpio > 0) {
+ ret = gpiochip_remove(&gpio_config.gpio_chip);
+ if (ret) { /* Somebody still using it? */
+ printk(KERN_ERR "Viafb: GPIO remove failed\n");
+ return ret;
+ }
+ }
+ /*
+ * Disable the ports.
+ */
+ spin_lock_irqsave(&gpio_config.vdev->reg_lock, flags);
+ for (i = 0; i < gpio_config.gpio_chip.ngpio; i += 2)
+ viafb_gpio_disable(gpio_config.active_gpios[i]);
+ gpio_config.gpio_chip.ngpio = 0;
+ spin_unlock_irqrestore(&gpio_config.vdev->reg_lock, flags);
+ return ret;
+}
+
+static struct platform_driver via_gpio_driver = {
+ .driver = {
+ .name = "viafb-gpio",
+ },
+ .probe = viafb_gpio_probe,
+ .remove = viafb_gpio_remove,
+};
+
+int viafb_gpio_init(void)
+{
+ return platform_driver_register(&via_gpio_driver);
+}
+
+void viafb_gpio_exit(void)
+{
+ platform_driver_unregister(&via_gpio_driver);
+}
diff --git a/drivers/video/via/via_i2c.c b/drivers/video/via/via_i2c.c
index 15543e96824..da9e4ca94b1 100644
--- a/drivers/video/via/via_i2c.c
+++ b/drivers/video/via/via_i2c.c
@@ -1,5 +1,5 @@
/*
- * Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
* This program is free software; you can redistribute it and/or
@@ -19,77 +19,106 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
-#include "global.h"
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/via-core.h>
+#include <linux/via_i2c.h>
+
+/*
+ * There can only be one set of these, so there's no point in having
+ * them be dynamically allocated...
+ */
+#define VIAFB_NUM_I2C 5
+static struct via_i2c_stuff via_i2c_par[VIAFB_NUM_I2C];
+struct viafb_dev *i2c_vdev; /* Passed in from core */
static void via_i2c_setscl(void *data, int state)
{
u8 val;
- struct via_i2c_stuff *via_i2c_chan = (struct via_i2c_stuff *)data;
+ struct via_port_cfg *adap_data = data;
+ unsigned long flags;
- val = viafb_read_reg(VIASR, via_i2c_chan->i2c_port) & 0xF0;
+ spin_lock_irqsave(&i2c_vdev->reg_lock, flags);
+ val = via_read_reg(adap_data->io_port, adap_data->ioport_index) & 0xF0;
if (state)
val |= 0x20;
else
val &= ~0x20;
- switch (via_i2c_chan->i2c_port) {
- case I2CPORTINDEX:
+ switch (adap_data->type) {
+ case VIA_PORT_I2C:
val |= 0x01;
break;
- case GPIOPORTINDEX:
+ case VIA_PORT_GPIO:
val |= 0x80;
break;
default:
- DEBUG_MSG("via_i2c: specify wrong i2c port.\n");
+ printk(KERN_ERR "viafb_i2c: specify wrong i2c type.\n");
}
- viafb_write_reg(via_i2c_chan->i2c_port, VIASR, val);
+ via_write_reg(adap_data->io_port, adap_data->ioport_index, val);
+ spin_unlock_irqrestore(&i2c_vdev->reg_lock, flags);
}
static int via_i2c_getscl(void *data)
{
- struct via_i2c_stuff *via_i2c_chan = (struct via_i2c_stuff *)data;
+ struct via_port_cfg *adap_data = data;
+ unsigned long flags;
+ int ret = 0;
- if (viafb_read_reg(VIASR, via_i2c_chan->i2c_port) & 0x08)
- return 1;
- return 0;
+ spin_lock_irqsave(&i2c_vdev->reg_lock, flags);
+ if (via_read_reg(adap_data->io_port, adap_data->ioport_index) & 0x08)
+ ret = 1;
+ spin_unlock_irqrestore(&i2c_vdev->reg_lock, flags);
+ return ret;
}
static int via_i2c_getsda(void *data)
{
- struct via_i2c_stuff *via_i2c_chan = (struct via_i2c_stuff *)data;
+ struct via_port_cfg *adap_data = data;
+ unsigned long flags;
+ int ret = 0;
- if (viafb_read_reg(VIASR, via_i2c_chan->i2c_port) & 0x04)
- return 1;
- return 0;
+ spin_lock_irqsave(&i2c_vdev->reg_lock, flags);
+ if (via_read_reg(adap_data->io_port, adap_data->ioport_index) & 0x04)
+ ret = 1;
+ spin_unlock_irqrestore(&i2c_vdev->reg_lock, flags);
+ return ret;
}
static void via_i2c_setsda(void *data, int state)
{
u8 val;
- struct via_i2c_stuff *via_i2c_chan = (struct via_i2c_stuff *)data;
+ struct via_port_cfg *adap_data = data;
+ unsigned long flags;
- val = viafb_read_reg(VIASR, via_i2c_chan->i2c_port) & 0xF0;
+ spin_lock_irqsave(&i2c_vdev->reg_lock, flags);
+ val = via_read_reg(adap_data->io_port, adap_data->ioport_index) & 0xF0;
if (state)
val |= 0x10;
else
val &= ~0x10;
- switch (via_i2c_chan->i2c_port) {
- case I2CPORTINDEX:
+ switch (adap_data->type) {
+ case VIA_PORT_I2C:
val |= 0x01;
break;
- case GPIOPORTINDEX:
+ case VIA_PORT_GPIO:
val |= 0x40;
break;
default:
- DEBUG_MSG("via_i2c: specify wrong i2c port.\n");
+ printk(KERN_ERR "viafb_i2c: specify wrong i2c type.\n");
}
- viafb_write_reg(via_i2c_chan->i2c_port, VIASR, val);
+ via_write_reg(adap_data->io_port, adap_data->ioport_index, val);
+ spin_unlock_irqrestore(&i2c_vdev->reg_lock, flags);
}
-int viafb_i2c_readbyte(u8 slave_addr, u8 index, u8 *pdata)
+int viafb_i2c_readbyte(u8 adap, u8 slave_addr, u8 index, u8 *pdata)
{
u8 mm1[] = {0x00};
struct i2c_msg msgs[2];
+ if (!via_i2c_par[adap].is_active)
+ return -ENODEV;
*pdata = 0;
msgs[0].flags = 0;
msgs[1].flags = I2C_M_RD;
@@ -97,81 +126,144 @@ int viafb_i2c_readbyte(u8 slave_addr, u8 index, u8 *pdata)
mm1[0] = index;
msgs[0].len = 1; msgs[1].len = 1;
msgs[0].buf = mm1; msgs[1].buf = pdata;
- i2c_transfer(&viaparinfo->shared->i2c_stuff.adapter, msgs, 2);
-
- return 0;
+ return i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2);
}
-int viafb_i2c_writebyte(u8 slave_addr, u8 index, u8 data)
+int viafb_i2c_writebyte(u8 adap, u8 slave_addr, u8 index, u8 data)
{
u8 msg[2] = { index, data };
struct i2c_msg msgs;
+ if (!via_i2c_par[adap].is_active)
+ return -ENODEV;
msgs.flags = 0;
msgs.addr = slave_addr / 2;
msgs.len = 2;
msgs.buf = msg;
- return i2c_transfer(&viaparinfo->shared->i2c_stuff.adapter, &msgs, 1);
+ return i2c_transfer(&via_i2c_par[adap].adapter, &msgs, 1);
}
-int viafb_i2c_readbytes(u8 slave_addr, u8 index, u8 *buff, int buff_len)
+int viafb_i2c_readbytes(u8 adap, u8 slave_addr, u8 index, u8 *buff, int buff_len)
{
u8 mm1[] = {0x00};
struct i2c_msg msgs[2];
+ if (!via_i2c_par[adap].is_active)
+ return -ENODEV;
msgs[0].flags = 0;
msgs[1].flags = I2C_M_RD;
msgs[0].addr = msgs[1].addr = slave_addr / 2;
mm1[0] = index;
msgs[0].len = 1; msgs[1].len = buff_len;
msgs[0].buf = mm1; msgs[1].buf = buff;
- i2c_transfer(&viaparinfo->shared->i2c_stuff.adapter, msgs, 2);
- return 0;
+ return i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2);
}
-int viafb_create_i2c_bus(void *viapar)
+/*
+ * Allow other viafb subdevices to look up a specific adapter
+ * by port name.
+ */
+struct i2c_adapter *viafb_find_i2c_adapter(enum viafb_i2c_adap which)
{
- int ret;
- struct via_i2c_stuff *i2c_stuff =
- &((struct viafb_par *)viapar)->shared->i2c_stuff;
-
- strcpy(i2c_stuff->adapter.name, "via_i2c");
- i2c_stuff->i2c_port = 0x0;
- i2c_stuff->adapter.owner = THIS_MODULE;
- i2c_stuff->adapter.id = 0x01FFFF;
- i2c_stuff->adapter.class = 0;
- i2c_stuff->adapter.algo_data = &i2c_stuff->algo;
- i2c_stuff->adapter.dev.parent = NULL;
- i2c_stuff->algo.setsda = via_i2c_setsda;
- i2c_stuff->algo.setscl = via_i2c_setscl;
- i2c_stuff->algo.getsda = via_i2c_getsda;
- i2c_stuff->algo.getscl = via_i2c_getscl;
- i2c_stuff->algo.udelay = 40;
- i2c_stuff->algo.timeout = 20;
- i2c_stuff->algo.data = i2c_stuff;
-
- i2c_set_adapdata(&i2c_stuff->adapter, i2c_stuff);
+ struct via_i2c_stuff *stuff = &via_i2c_par[which];
- /* Raise SCL and SDA */
- i2c_stuff->i2c_port = I2CPORTINDEX;
- via_i2c_setsda(i2c_stuff, 1);
- via_i2c_setscl(i2c_stuff, 1);
+ return &stuff->adapter;
+}
+EXPORT_SYMBOL_GPL(viafb_find_i2c_adapter);
- i2c_stuff->i2c_port = GPIOPORTINDEX;
- via_i2c_setsda(i2c_stuff, 1);
- via_i2c_setscl(i2c_stuff, 1);
- udelay(20);
- ret = i2c_bit_add_bus(&i2c_stuff->adapter);
- if (ret == 0)
- DEBUG_MSG("I2C bus %s registered.\n", i2c_stuff->adapter.name);
+static int create_i2c_bus(struct i2c_adapter *adapter,
+ struct i2c_algo_bit_data *algo,
+ struct via_port_cfg *adap_cfg,
+ struct pci_dev *pdev)
+{
+ algo->setsda = via_i2c_setsda;
+ algo->setscl = via_i2c_setscl;
+ algo->getsda = via_i2c_getsda;
+ algo->getscl = via_i2c_getscl;
+ algo->udelay = 40;
+ algo->timeout = 20;
+ algo->data = adap_cfg;
+
+ sprintf(adapter->name, "viafb i2c io_port idx 0x%02x",
+ adap_cfg->ioport_index);
+ adapter->owner = THIS_MODULE;
+ adapter->id = 0x01FFFF;
+ adapter->class = I2C_CLASS_DDC;
+ adapter->algo_data = algo;
+ if (pdev)
+ adapter->dev.parent = &pdev->dev;
else
- DEBUG_MSG("Failed to register I2C bus %s.\n",
- i2c_stuff->adapter.name);
- return ret;
+ adapter->dev.parent = NULL;
+ /* i2c_set_adapdata(adapter, adap_cfg); */
+
+ /* Raise SCL and SDA */
+ via_i2c_setsda(adap_cfg, 1);
+ via_i2c_setscl(adap_cfg, 1);
+ udelay(20);
+
+ return i2c_bit_add_bus(adapter);
+}
+
+static int viafb_i2c_probe(struct platform_device *platdev)
+{
+ int i, ret;
+ struct via_port_cfg *configs;
+
+ i2c_vdev = platdev->dev.platform_data;
+ configs = i2c_vdev->port_cfg;
+
+ for (i = 0; i < VIAFB_NUM_PORTS; i++) {
+ struct via_port_cfg *adap_cfg = configs++;
+ struct via_i2c_stuff *i2c_stuff = &via_i2c_par[i];
+
+ i2c_stuff->is_active = 0;
+ if (adap_cfg->type == 0 || adap_cfg->mode != VIA_MODE_I2C)
+ continue;
+ ret = create_i2c_bus(&i2c_stuff->adapter,
+ &i2c_stuff->algo, adap_cfg,
+ NULL); /* FIXME: PCIDEV */
+ if (ret < 0) {
+ printk(KERN_ERR "viafb: cannot create i2c bus %u:%d\n",
+ i, ret);
+ continue; /* Still try to make the rest */
+ }
+ i2c_stuff->is_active = 1;
+ }
+
+ return 0;
+}
+
+static int viafb_i2c_remove(struct platform_device *platdev)
+{
+ int i;
+
+ for (i = 0; i < VIAFB_NUM_PORTS; i++) {
+ struct via_i2c_stuff *i2c_stuff = &via_i2c_par[i];
+ /*
+ * Only remove those entries in the array that we've
+ * actually used (and thus initialized algo_data)
+ */
+ if (i2c_stuff->is_active)
+ i2c_del_adapter(&i2c_stuff->adapter);
+ }
+ return 0;
+}
+
+static struct platform_driver via_i2c_driver = {
+ .driver = {
+ .name = "viafb-i2c",
+ },
+ .probe = viafb_i2c_probe,
+ .remove = viafb_i2c_remove,
+};
+
+int viafb_i2c_init(void)
+{
+ return platform_driver_register(&via_i2c_driver);
}
-void viafb_delete_i2c_buss(void *par)
+void viafb_i2c_exit(void)
{
- i2c_del_adapter(&((struct viafb_par *)par)->shared->i2c_stuff.adapter);
+ platform_driver_unregister(&via_i2c_driver);
}
diff --git a/drivers/video/via/via_modesetting.c b/drivers/video/via/via_modesetting.c
new file mode 100644
index 00000000000..3cddcff88ab
--- /dev/null
+++ b/drivers/video/via/via_modesetting.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
+ * Copyright 2010 Florian Tobias Schandinat <FlorianSchandinat@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation;
+ * either version 2, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even
+ * the implied warranty of MERCHANTABILITY or FITNESS FOR
+ * A PARTICULAR PURPOSE.See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+/*
+ * basic modesetting functions
+ */
+
+#include <linux/kernel.h>
+#include <linux/via-core.h>
+#include "via_modesetting.h"
+#include "share.h"
+#include "debug.h"
+
+void via_set_primary_address(u32 addr)
+{
+ DEBUG_MSG(KERN_DEBUG "via_set_primary_address(0x%08X)\n", addr);
+ via_write_reg(VIACR, 0x0D, addr & 0xFF);
+ via_write_reg(VIACR, 0x0C, (addr >> 8) & 0xFF);
+ via_write_reg(VIACR, 0x34, (addr >> 16) & 0xFF);
+ via_write_reg_mask(VIACR, 0x48, (addr >> 24) & 0x1F, 0x1F);
+}
+
+void via_set_secondary_address(u32 addr)
+{
+ DEBUG_MSG(KERN_DEBUG "via_set_secondary_address(0x%08X)\n", addr);
+ /* secondary display supports only quadword aligned memory */
+ via_write_reg_mask(VIACR, 0x62, (addr >> 2) & 0xFE, 0xFE);
+ via_write_reg(VIACR, 0x63, (addr >> 10) & 0xFF);
+ via_write_reg(VIACR, 0x64, (addr >> 18) & 0xFF);
+ via_write_reg_mask(VIACR, 0xA3, (addr >> 26) & 0x07, 0x07);
+}
+
+void via_set_primary_pitch(u32 pitch)
+{
+ DEBUG_MSG(KERN_DEBUG "via_set_primary_pitch(0x%08X)\n", pitch);
+ /* spec does not say that first adapter skips 3 bits but old
+ * code did it and seems to be reasonable in analogy to 2nd adapter
+ */
+ pitch = pitch >> 3;
+ via_write_reg(VIACR, 0x13, pitch & 0xFF);
+ via_write_reg_mask(VIACR, 0x35, (pitch >> (8 - 5)) & 0xE0, 0xE0);
+}
+
+void via_set_secondary_pitch(u32 pitch)
+{
+ DEBUG_MSG(KERN_DEBUG "via_set_secondary_pitch(0x%08X)\n", pitch);
+ pitch = pitch >> 3;
+ via_write_reg(VIACR, 0x66, pitch & 0xFF);
+ via_write_reg_mask(VIACR, 0x67, (pitch >> 8) & 0x03, 0x03);
+ via_write_reg_mask(VIACR, 0x71, (pitch >> (10 - 7)) & 0x80, 0x80);
+}
+
+void via_set_primary_color_depth(u8 depth)
+{
+ u8 value;
+
+ DEBUG_MSG(KERN_DEBUG "via_set_primary_color_depth(%d)\n", depth);
+ switch (depth) {
+ case 8:
+ value = 0x00;
+ break;
+ case 15:
+ value = 0x04;
+ break;
+ case 16:
+ value = 0x14;
+ break;
+ case 24:
+ value = 0x0C;
+ break;
+ case 30:
+ value = 0x08;
+ break;
+ default:
+ printk(KERN_WARNING "via_set_primary_color_depth: "
+ "Unsupported depth: %d\n", depth);
+ return;
+ }
+
+ via_write_reg_mask(VIASR, 0x15, value, 0x1C);
+}
+
+void via_set_secondary_color_depth(u8 depth)
+{
+ u8 value;
+
+ DEBUG_MSG(KERN_DEBUG "via_set_secondary_color_depth(%d)\n", depth);
+ switch (depth) {
+ case 8:
+ value = 0x00;
+ break;
+ case 16:
+ value = 0x40;
+ break;
+ case 24:
+ value = 0xC0;
+ break;
+ case 30:
+ value = 0x80;
+ break;
+ default:
+ printk(KERN_WARNING "via_set_secondary_color_depth: "
+ "Unsupported depth: %d\n", depth);
+ return;
+ }
+
+ via_write_reg_mask(VIACR, 0x67, value, 0xC0);
+}
diff --git a/drivers/video/via/via_i2c.h b/drivers/video/via/via_modesetting.h
index 3a13242a315..ae35cfdeb37 100644
--- a/drivers/video/via/via_i2c.h
+++ b/drivers/video/via/via_modesetting.h
@@ -1,46 +1,38 @@
/*
* Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
-
+ * Copyright 2010 Florian Tobias Schandinat <FlorianSchandinat@gmx.de>
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation;
* either version 2, or (at your option) any later version.
-
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even
* the implied warranty of MERCHANTABILITY or FITNESS FOR
* A PARTICULAR PURPOSE.See the GNU General Public License
* for more details.
-
+ *
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
-#ifndef __VIA_I2C_H__
-#define __VIA_I2C_H__
+/*
+ * basic modesetting functions
+ */
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
+#ifndef __VIA_MODESETTING_H__
+#define __VIA_MODESETTING_H__
-struct via_i2c_stuff {
- u16 i2c_port; /* GPIO or I2C port */
- struct i2c_adapter adapter;
- struct i2c_algo_bit_data algo;
-};
+#include <linux/types.h>
-#define I2CPORT 0x3c4
-#define I2CPORTINDEX 0x31
-#define GPIOPORT 0x3C4
-#define GPIOPORTINDEX 0x2C
-#define I2C_BUS 1
-#define GPIO_BUS 2
-#define DELAYPORT 0x3C3
+void via_set_primary_address(u32 addr);
+void via_set_secondary_address(u32 addr);
+void via_set_primary_pitch(u32 pitch);
+void via_set_secondary_pitch(u32 pitch);
+void via_set_primary_color_depth(u8 depth);
+void via_set_secondary_color_depth(u8 depth);
-int viafb_i2c_readbyte(u8 slave_addr, u8 index, u8 *pdata);
-int viafb_i2c_writebyte(u8 slave_addr, u8 index, u8 data);
-int viafb_i2c_readbytes(u8 slave_addr, u8 index, u8 *buff, int buff_len);
-int viafb_create_i2c_bus(void *par);
-void viafb_delete_i2c_buss(void *par);
-#endif /* __VIA_I2C_H__ */
+#endif /* __VIA_MODESETTING_H__ */
diff --git a/drivers/video/via/via_utility.c b/drivers/video/via/via_utility.c
index aefdeeec89b..d05ccb62b55 100644
--- a/drivers/video/via/via_utility.c
+++ b/drivers/video/via/via_utility.c
@@ -19,6 +19,7 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/via-core.h>
#include "global.h"
void viafb_get_device_support_state(u32 *support_state)
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index 777b38a06d4..1082541358f 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -1,5 +1,5 @@
/*
- * Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
* This program is free software; you can redistribute it and/or
@@ -23,8 +23,9 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/stat.h>
-#define _MASTER_FILE
+#include <linux/via-core.h>
+#define _MASTER_FILE
#include "global.h"
static char *viafb_name = "Via";
@@ -221,7 +222,7 @@ static int viafb_check_var(struct fb_var_screeninfo *var,
/* Adjust var according to our driver's own table */
viafb_fill_var_timing_info(var, viafb_refresh, vmode_entry);
if (info->var.accel_flags & FB_ACCELF_TEXT &&
- !ppar->shared->engine_mmio)
+ !ppar->shared->vdev->engine_mmio)
info->var.accel_flags = 0;
return 0;
@@ -317,12 +318,12 @@ static int viafb_pan_display(struct fb_var_screeninfo *var,
DEBUG_MSG(KERN_DEBUG "viafb_pan_display, address = %d\n", vram_addr);
if (!viafb_dual_fb) {
- viafb_set_primary_address(vram_addr);
- viafb_set_secondary_address(vram_addr);
+ via_set_primary_address(vram_addr);
+ via_set_secondary_address(vram_addr);
} else if (viapar->iga_path == IGA1)
- viafb_set_primary_address(vram_addr);
+ via_set_primary_address(vram_addr);
else
- viafb_set_secondary_address(vram_addr);
+ via_set_secondary_address(vram_addr);
return 0;
}
@@ -577,14 +578,9 @@ static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
break;
case VIAFB_SET_GAMMA_LUT:
- viafb_gamma_table = kmalloc(256 * sizeof(u32), GFP_KERNEL);
- if (!viafb_gamma_table)
- return -ENOMEM;
- if (copy_from_user(viafb_gamma_table, argp,
- 256 * sizeof(u32))) {
- kfree(viafb_gamma_table);
- return -EFAULT;
- }
+ viafb_gamma_table = memdup_user(argp, 256 * sizeof(u32));
+ if (IS_ERR(viafb_gamma_table))
+ return PTR_ERR(viafb_gamma_table);
viafb_set_gamma_table(viafb_bpp, viafb_gamma_table);
kfree(viafb_gamma_table);
break;
@@ -696,7 +692,7 @@ static void viafb_fillrect(struct fb_info *info,
rop = 0xF0;
DEBUG_MSG(KERN_DEBUG "viafb 2D engine: fillrect\n");
- if (shared->hw_bitblt(shared->engine_mmio, VIA_BITBLT_FILL,
+ if (shared->hw_bitblt(shared->vdev->engine_mmio, VIA_BITBLT_FILL,
rect->width, rect->height, info->var.bits_per_pixel,
viapar->vram_addr, info->fix.line_length, rect->dx, rect->dy,
NULL, 0, 0, 0, 0, fg_color, 0, rop))
@@ -718,7 +714,7 @@ static void viafb_copyarea(struct fb_info *info,
return;
DEBUG_MSG(KERN_DEBUG "viafb 2D engine: copyarea\n");
- if (shared->hw_bitblt(shared->engine_mmio, VIA_BITBLT_COLOR,
+ if (shared->hw_bitblt(shared->vdev->engine_mmio, VIA_BITBLT_COLOR,
area->width, area->height, info->var.bits_per_pixel,
viapar->vram_addr, info->fix.line_length, area->dx, area->dy,
NULL, viapar->vram_addr, info->fix.line_length,
@@ -755,7 +751,7 @@ static void viafb_imageblit(struct fb_info *info,
op = VIA_BITBLT_COLOR;
DEBUG_MSG(KERN_DEBUG "viafb 2D engine: imageblit\n");
- if (shared->hw_bitblt(shared->engine_mmio, op,
+ if (shared->hw_bitblt(shared->vdev->engine_mmio, op,
image->width, image->height, info->var.bits_per_pixel,
viapar->vram_addr, info->fix.line_length, image->dx, image->dy,
(u32 *)image->data, 0, 0, 0, 0, fg_color, bg_color, 0))
@@ -765,7 +761,7 @@ static void viafb_imageblit(struct fb_info *info,
static int viafb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
struct viafb_par *viapar = info->par;
- void __iomem *engine = viapar->shared->engine_mmio;
+ void __iomem *engine = viapar->shared->vdev->engine_mmio;
u32 temp, xx, yy, bg_color = 0, fg_color = 0,
chip_name = viapar->shared->chip_info.gfx_chip_name;
int i, j = 0, cur_size = 64;
@@ -1018,8 +1014,8 @@ static void viafb_set_device(struct device_t active_dev)
viafb_SAMM_ON = active_dev.samm;
viafb_primary_dev = active_dev.primary_dev;
- viafb_set_primary_address(0);
- viafb_set_secondary_address(viafb_SAMM_ON ? viafb_second_offset : 0);
+ via_set_primary_address(0);
+ via_set_secondary_address(viafb_SAMM_ON ? viafb_second_offset : 0);
viafb_set_iga_path();
}
@@ -1165,8 +1161,9 @@ static int apply_device_setting(struct viafb_ioctl_setting setting_info,
if (viafb_SAMM_ON)
viafb_primary_dev = setting_info.primary_device;
- viafb_set_primary_address(0);
- viafb_set_secondary_address(viafb_SAMM_ON ? viafb_second_offset : 0);
+ via_set_primary_address(0);
+ via_set_secondary_address(viafb_SAMM_ON ?
+ viafb_second_offset : 0);
viafb_set_iga_path();
}
need_set_mode = 1;
@@ -1325,6 +1322,8 @@ static void parse_dvi_port(void)
output_interface);
}
+#ifdef CONFIG_FB_VIA_DIRECT_PROCFS
+
/*
* The proc filesystem read/write function, a simple proc implement to
* get/set the value of DPA DVP0, DVP0DataDriving, DVP0ClockDriving, DVP1,
@@ -1701,16 +1700,21 @@ static void viafb_init_proc(struct proc_dir_entry **viafb_entry)
}
static void viafb_remove_proc(struct proc_dir_entry *viafb_entry)
{
- /* no problem if it was not registered */
+ struct chip_information *chip_info = &viaparinfo->shared->chip_info;
+
remove_proc_entry("dvp0", viafb_entry);/* parent dir */
remove_proc_entry("dvp1", viafb_entry);
remove_proc_entry("dfph", viafb_entry);
remove_proc_entry("dfpl", viafb_entry);
- remove_proc_entry("vt1636", viafb_entry);
- remove_proc_entry("vt1625", viafb_entry);
+ if (chip_info->lvds_chip_info.lvds_chip_name == VT1636_LVDS
+ || chip_info->lvds_chip_info2.lvds_chip_name == VT1636_LVDS)
+ remove_proc_entry("vt1636", viafb_entry);
+
remove_proc_entry("viafb", NULL);
}
+#endif /* CONFIG_FB_VIA_DIRECT_PROCFS */
+
static int parse_mode(const char *str, u32 *xres, u32 *yres)
{
char *ptr;
@@ -1732,12 +1736,13 @@ static int parse_mode(const char *str, u32 *xres, u32 *yres)
return 0;
}
-static int __devinit via_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+
+int __devinit via_fb_pci_probe(struct viafb_dev *vdev)
{
u32 default_xres, default_yres;
struct VideoModeTable *vmode_entry;
struct fb_var_screeninfo default_var;
+ int rc;
u32 viafb_par_length;
DEBUG_MSG(KERN_INFO "VIAFB PCI Probe!!\n");
@@ -1749,14 +1754,15 @@ static int __devinit via_pci_probe(struct pci_dev *pdev,
*/
viafbinfo = framebuffer_alloc(viafb_par_length +
ALIGN(sizeof(struct viafb_shared), BITS_PER_LONG/8),
- &pdev->dev);
+ &vdev->pdev->dev);
if (!viafbinfo) {
printk(KERN_ERR"Could not allocate memory for viafb_info.\n");
- return -ENODEV;
+ return -ENOMEM;
}
viaparinfo = (struct viafb_par *)viafbinfo->par;
viaparinfo->shared = viafbinfo->par + viafb_par_length;
+ viaparinfo->shared->vdev = vdev;
viaparinfo->vram_addr = 0;
viaparinfo->tmds_setting_info = &viaparinfo->shared->tmds_setting_info;
viaparinfo->lvds_setting_info = &viaparinfo->shared->lvds_setting_info;
@@ -1774,23 +1780,20 @@ static int __devinit via_pci_probe(struct pci_dev *pdev,
if (!viafb_SAMM_ON)
viafb_dual_fb = 0;
- /* Set up I2C bus stuff */
- viafb_create_i2c_bus(viaparinfo);
-
- viafb_init_chip_info(pdev, ent);
- viaparinfo->fbmem = pci_resource_start(pdev, 0);
- viaparinfo->memsize = viafb_get_fb_size_from_pci();
+ viafb_init_chip_info(vdev->chip_type);
+ /*
+ * The framebuffer will have been successfully mapped by
+ * the core (or we'd not be here), but we still need to
+ * set up our own accounting.
+ */
+ viaparinfo->fbmem = vdev->fbmem_start;
+ viaparinfo->memsize = vdev->fbmem_len;
viaparinfo->fbmem_free = viaparinfo->memsize;
viaparinfo->fbmem_used = 0;
- viafbinfo->screen_base = ioremap_nocache(viaparinfo->fbmem,
- viaparinfo->memsize);
- if (!viafbinfo->screen_base) {
- printk(KERN_INFO "ioremap failed\n");
- return -ENOMEM;
- }
+ viafbinfo->screen_base = vdev->fbmem;
- viafbinfo->fix.mmio_start = pci_resource_start(pdev, 1);
- viafbinfo->fix.mmio_len = pci_resource_len(pdev, 1);
+ viafbinfo->fix.mmio_start = vdev->engine_start;
+ viafbinfo->fix.mmio_len = vdev->engine_len;
viafbinfo->node = 0;
viafbinfo->fbops = &viafb_ops;
viafbinfo->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
@@ -1858,12 +1861,13 @@ static int __devinit via_pci_probe(struct pci_dev *pdev,
viafbinfo->var = default_var;
if (viafb_dual_fb) {
- viafbinfo1 = framebuffer_alloc(viafb_par_length, &pdev->dev);
+ viafbinfo1 = framebuffer_alloc(viafb_par_length,
+ &vdev->pdev->dev);
if (!viafbinfo1) {
printk(KERN_ERR
"allocate the second framebuffer struct error\n");
- framebuffer_release(viafbinfo);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out_fb_release;
}
viaparinfo1 = viafbinfo1->par;
memcpy(viaparinfo1, viaparinfo, viafb_par_length);
@@ -1914,48 +1918,66 @@ static int __devinit via_pci_probe(struct pci_dev *pdev,
viaparinfo->depth = fb_get_color_depth(&viafbinfo->var,
&viafbinfo->fix);
default_var.activate = FB_ACTIVATE_NOW;
- fb_alloc_cmap(&viafbinfo->cmap, 256, 0);
+ rc = fb_alloc_cmap(&viafbinfo->cmap, 256, 0);
+ if (rc)
+ goto out_fb1_release;
if (viafb_dual_fb && (viafb_primary_dev == LCD_Device)
&& (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266)) {
- if (register_framebuffer(viafbinfo1) < 0)
- return -EINVAL;
+ rc = register_framebuffer(viafbinfo1);
+ if (rc)
+ goto out_dealloc_cmap;
}
- if (register_framebuffer(viafbinfo) < 0)
- return -EINVAL;
+ rc = register_framebuffer(viafbinfo);
+ if (rc)
+ goto out_fb1_unreg_lcd_cle266;
if (viafb_dual_fb && ((viafb_primary_dev != LCD_Device)
|| (viaparinfo->chip_info->gfx_chip_name !=
UNICHROME_CLE266))) {
- if (register_framebuffer(viafbinfo1) < 0)
- return -EINVAL;
+ rc = register_framebuffer(viafbinfo1);
+ if (rc)
+ goto out_fb_unreg;
}
DEBUG_MSG(KERN_INFO "fb%d: %s frame buffer device %dx%d-%dbpp\n",
viafbinfo->node, viafbinfo->fix.id, default_var.xres,
default_var.yres, default_var.bits_per_pixel);
+#ifdef CONFIG_FB_VIA_DIRECT_PROCFS
viafb_init_proc(&viaparinfo->shared->proc_entry);
+#endif
viafb_init_dac(IGA2);
return 0;
+
+out_fb_unreg:
+ unregister_framebuffer(viafbinfo);
+out_fb1_unreg_lcd_cle266:
+ if (viafb_dual_fb && (viafb_primary_dev == LCD_Device)
+ && (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266))
+ unregister_framebuffer(viafbinfo1);
+out_dealloc_cmap:
+ fb_dealloc_cmap(&viafbinfo->cmap);
+out_fb1_release:
+ if (viafbinfo1)
+ framebuffer_release(viafbinfo1);
+out_fb_release:
+ framebuffer_release(viafbinfo);
+ return rc;
}
-static void __devexit via_pci_remove(struct pci_dev *pdev)
+void __devexit via_fb_pci_remove(struct pci_dev *pdev)
{
DEBUG_MSG(KERN_INFO "via_pci_remove!\n");
fb_dealloc_cmap(&viafbinfo->cmap);
unregister_framebuffer(viafbinfo);
if (viafb_dual_fb)
unregister_framebuffer(viafbinfo1);
- iounmap((void *)viafbinfo->screen_base);
- iounmap(viaparinfo->shared->engine_mmio);
-
- viafb_delete_i2c_buss(viaparinfo);
-
+#ifdef CONFIG_FB_VIA_DIRECT_PROCFS
+ viafb_remove_proc(viaparinfo->shared->proc_entry);
+#endif
framebuffer_release(viafbinfo);
if (viafb_dual_fb)
framebuffer_release(viafbinfo1);
-
- viafb_remove_proc(viaparinfo->shared->proc_entry);
}
#ifndef MODULE
@@ -2031,41 +2053,10 @@ static int __init viafb_setup(char *options)
}
#endif
-static struct pci_device_id viafb_pci_table[] __devinitdata = {
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CLE266_DID),
- .driver_data = UNICHROME_CLE266 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_PM800_DID),
- .driver_data = UNICHROME_PM800 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K400_DID),
- .driver_data = UNICHROME_K400 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K800_DID),
- .driver_data = UNICHROME_K800 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M890_DID),
- .driver_data = UNICHROME_CN700 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K8M890_DID),
- .driver_data = UNICHROME_K8M890 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CX700_DID),
- .driver_data = UNICHROME_CX700 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M900_DID),
- .driver_data = UNICHROME_P4M900 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CN750_DID),
- .driver_data = UNICHROME_CN750 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX800_DID),
- .driver_data = UNICHROME_VX800 },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX855_DID),
- .driver_data = UNICHROME_VX855 },
- { }
-};
-MODULE_DEVICE_TABLE(pci, viafb_pci_table);
-
-static struct pci_driver viafb_driver = {
- .name = "viafb",
- .id_table = viafb_pci_table,
- .probe = via_pci_probe,
- .remove = __devexit_p(via_pci_remove),
-};
-
-static int __init viafb_init(void)
+/*
+ * These are called out of via-core for now.
+ */
+int __init viafb_init(void)
{
u32 dummy;
#ifndef MODULE
@@ -2084,13 +2075,12 @@ static int __init viafb_init(void)
printk(KERN_INFO
"VIA Graphics Intergration Chipset framebuffer %d.%d initializing\n",
VERSION_MAJOR, VERSION_MINOR);
- return pci_register_driver(&viafb_driver);
+ return 0;
}
-static void __exit viafb_exit(void)
+void __exit viafb_exit(void)
{
DEBUG_MSG(KERN_INFO "viafb_exit!\n");
- pci_unregister_driver(&viafb_driver);
}
static struct fb_ops viafb_ops = {
@@ -2110,8 +2100,6 @@ static struct fb_ops viafb_ops = {
.fb_sync = viafb_sync,
};
-module_init(viafb_init);
-module_exit(viafb_exit);
#ifdef MODULE
module_param(viafb_mode, charp, S_IRUSR);
diff --git a/drivers/video/via/viafbdev.h b/drivers/video/via/viafbdev.h
index 61b5953cd15..52a35fabba9 100644
--- a/drivers/video/via/viafbdev.h
+++ b/drivers/video/via/viafbdev.h
@@ -24,12 +24,12 @@
#include <linux/proc_fs.h>
#include <linux/fb.h>
+#include <linux/spinlock.h>
#include "ioctl.h"
#include "share.h"
#include "chip.h"
#include "hw.h"
-#include "via_i2c.h"
#define VERSION_MAJOR 2
#define VERSION_KERNEL 6 /* For kernel 2.6 */
@@ -37,11 +37,11 @@
#define VERSION_OS 0 /* 0: for 32 bits OS, 1: for 64 bits OS */
#define VERSION_MINOR 4
+#define VIAFB_NUM_I2C 5
+
struct viafb_shared {
struct proc_dir_entry *proc_entry; /*viafb proc entry */
-
- /* I2C stuff */
- struct via_i2c_stuff i2c_stuff;
+ struct viafb_dev *vdev; /* Global dev info */
/* All the information will be needed to set engine */
struct tmds_setting_information tmds_setting_info;
@@ -51,7 +51,6 @@ struct viafb_shared {
struct chip_information chip_info;
/* hardware acceleration stuff */
- void __iomem *engine_mmio;
u32 cursor_vram_addr;
u32 vq_vram_addr; /* virtual queue address in video ram */
int (*hw_bitblt)(void __iomem *engine, u8 op, u32 width, u32 height,
@@ -99,4 +98,9 @@ u8 viafb_gpio_i2c_read_lvds(struct lvds_setting_information
void viafb_gpio_i2c_write_mask_lvds(struct lvds_setting_information
*plvds_setting_info, struct lvds_chip_information
*plvds_chip_info, struct IODATA io_data);
+int via_fb_pci_probe(struct viafb_dev *vdev);
+void via_fb_pci_remove(struct pci_dev *pdev);
+/* Temporary */
+int viafb_init(void);
+void viafb_exit(void);
#endif /* __VIAFBDEV_H__ */
diff --git a/drivers/video/via/viamode.c b/drivers/video/via/viamode.c
index af50e244016..2dbad3c0f67 100644
--- a/drivers/video/via/viamode.c
+++ b/drivers/video/via/viamode.c
@@ -19,6 +19,7 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/via-core.h>
#include "global.h"
struct res_map_refresh res_map_refresh_tbl[] = {
/*hres, vres, vclock, vmode_refresh*/
@@ -66,6 +67,7 @@ struct res_map_refresh res_map_refresh_tbl[] = {
{1088, 612, RES_1088X612_60HZ_PIXCLOCK, 60},
{1152, 720, RES_1152X720_60HZ_PIXCLOCK, 60},
{1200, 720, RES_1200X720_60HZ_PIXCLOCK, 60},
+ {1200, 900, RES_1200X900_60HZ_PIXCLOCK, 60},
{1280, 600, RES_1280X600_60HZ_PIXCLOCK, 60},
{1280, 720, RES_1280X720_50HZ_PIXCLOCK, 50},
{1280, 768, RES_1280X768_50HZ_PIXCLOCK, 50},
@@ -759,6 +761,16 @@ struct crt_mode_table CRTM1200x720[] = {
{1568, 1200, 1200, 368, 1256, 128, 746, 720, 720, 26, 721, 3} }
};
+/* 1200x900 (DCON) */
+struct crt_mode_table DCON1200x900[] = {
+ /* r_rate, vclk, hsp, vsp */
+ {REFRESH_60, CLK_57_275M, M1200X900_R60_HSP, M1200X900_R60_VSP,
+ /* The correct htotal is 1240, but this doesn't raster on VX855. */
+ /* Via suggested changing to a multiple of 16, hence 1264. */
+ /* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
+ {1264, 1200, 1200, 64, 1211, 32, 912, 900, 900, 12, 901, 10} }
+};
+
/* 1280x600 (GTF) */
struct crt_mode_table CRTM1280x600[] = {
/* r_rate, vclk, hsp, vsp */
@@ -937,6 +949,9 @@ struct VideoModeTable viafb_modes[] = {
/* Display : 1200x720 (GTF) */
{CRTM1200x720, ARRAY_SIZE(CRTM1200x720)},
+ /* Display : 1200x900 (DCON) */
+ {DCON1200x900, ARRAY_SIZE(DCON1200x900)},
+
/* Display : 1280x600 (GTF) */
{CRTM1280x600, ARRAY_SIZE(CRTM1280x600)},
diff --git a/drivers/video/via/vt1636.c b/drivers/video/via/vt1636.c
index a6b37494e79..d65bf1aee87 100644
--- a/drivers/video/via/vt1636.c
+++ b/drivers/video/via/vt1636.c
@@ -19,6 +19,8 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/via-core.h>
+#include <linux/via_i2c.h>
#include "global.h"
u8 viafb_gpio_i2c_read_lvds(struct lvds_setting_information
@@ -27,9 +29,8 @@ u8 viafb_gpio_i2c_read_lvds(struct lvds_setting_information
{
u8 data;
- viaparinfo->shared->i2c_stuff.i2c_port = plvds_chip_info->i2c_port;
- viafb_i2c_readbyte(plvds_chip_info->lvds_chip_slave_addr, index, &data);
-
+ viafb_i2c_readbyte(plvds_chip_info->i2c_port,
+ plvds_chip_info->lvds_chip_slave_addr, index, &data);
return data;
}
@@ -39,14 +40,13 @@ void viafb_gpio_i2c_write_mask_lvds(struct lvds_setting_information
{
int index, data;
- viaparinfo->shared->i2c_stuff.i2c_port = plvds_chip_info->i2c_port;
-
index = io_data.Index;
data = viafb_gpio_i2c_read_lvds(plvds_setting_info, plvds_chip_info,
index);
data = (data & (~io_data.Mask)) | io_data.Data;
- viafb_i2c_writebyte(plvds_chip_info->lvds_chip_slave_addr, index, data);
+ viafb_i2c_writebyte(plvds_chip_info->i2c_port,
+ plvds_chip_info->lvds_chip_slave_addr, index, data);
}
void viafb_init_lvds_vt1636(struct lvds_setting_information
@@ -159,7 +159,7 @@ void viafb_disable_lvds_vt1636(struct lvds_setting_information
}
}
-bool viafb_lvds_identify_vt1636(void)
+bool viafb_lvds_identify_vt1636(u8 i2c_adapter)
{
u8 Buffer[2];
@@ -167,26 +167,20 @@ bool viafb_lvds_identify_vt1636(void)
/* Sense VT1636 LVDS Transmiter */
viaparinfo->chip_info->lvds_chip_info.lvds_chip_slave_addr =
- VT1636_LVDS_I2C_ADDR;
+ VT1636_LVDS_I2C_ADDR;
/* Check vendor ID first: */
- viafb_i2c_readbyte((u8) viaparinfo->chip_info->lvds_chip_info.
- lvds_chip_slave_addr,
- 0x00, &Buffer[0]);
- viafb_i2c_readbyte((u8) viaparinfo->chip_info->lvds_chip_info.
- lvds_chip_slave_addr,
- 0x01, &Buffer[1]);
+ if (viafb_i2c_readbyte(i2c_adapter, VT1636_LVDS_I2C_ADDR,
+ 0x00, &Buffer[0]))
+ return false;
+ viafb_i2c_readbyte(i2c_adapter, VT1636_LVDS_I2C_ADDR, 0x01, &Buffer[1]);
if (!((Buffer[0] == 0x06) && (Buffer[1] == 0x11)))
return false;
/* Check Chip ID: */
- viafb_i2c_readbyte((u8) viaparinfo->chip_info->lvds_chip_info.
- lvds_chip_slave_addr,
- 0x02, &Buffer[0]);
- viafb_i2c_readbyte((u8) viaparinfo->chip_info->lvds_chip_info.
- lvds_chip_slave_addr,
- 0x03, &Buffer[1]);
+ viafb_i2c_readbyte(i2c_adapter, VT1636_LVDS_I2C_ADDR, 0x02, &Buffer[0]);
+ viafb_i2c_readbyte(i2c_adapter, VT1636_LVDS_I2C_ADDR, 0x03, &Buffer[1]);
if ((Buffer[0] == 0x45) && (Buffer[1] == 0x33)) {
viaparinfo->chip_info->lvds_chip_info.lvds_chip_name =
VT1636_LVDS;
diff --git a/drivers/video/via/vt1636.h b/drivers/video/via/vt1636.h
index 2a150c58c7e..4c1314e5746 100644
--- a/drivers/video/via/vt1636.h
+++ b/drivers/video/via/vt1636.h
@@ -22,7 +22,7 @@
#ifndef _VT1636_H_
#define _VT1636_H_
#include "chip.h"
-bool viafb_lvds_identify_vt1636(void);
+bool viafb_lvds_identify_vt1636(u8 i2c_adapter);
void viafb_init_lvds_vt1636(struct lvds_setting_information
*plvds_setting_info, struct lvds_chip_information *plvds_chip_info);
void viafb_enable_lvds_vt1636(struct lvds_setting_information
diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
index 31b0e17ed09..e66b8b19ce5 100644
--- a/drivers/video/w100fb.c
+++ b/drivers/video/w100fb.c
@@ -53,7 +53,7 @@ static void w100_update_enable(void);
static void w100_update_disable(void);
static void calc_hsync(struct w100fb_par *par);
static void w100_init_graphic_engine(struct w100fb_par *par);
-struct w100_pll_info *w100_get_xtal_table(unsigned int freq);
+struct w100_pll_info *w100_get_xtal_table(unsigned int freq) __devinit;
/* Pseudo palette size */
#define MAX_PALETTES 16
@@ -782,7 +782,7 @@ out:
}
-static int w100fb_remove(struct platform_device *pdev)
+static int __devexit w100fb_remove(struct platform_device *pdev)
{
struct fb_info *info = platform_get_drvdata(pdev);
struct w100fb_par *par=info->par;
@@ -1020,7 +1020,7 @@ static struct pll_entries {
{ 0 },
};
-struct w100_pll_info *w100_get_xtal_table(unsigned int freq)
+struct w100_pll_info __devinit *w100_get_xtal_table(unsigned int freq)
{
struct pll_entries *pll_entry = w100_pll_tables;
@@ -1611,7 +1611,7 @@ static void w100_vsync(void)
static struct platform_driver w100fb_driver = {
.probe = w100fb_probe,
- .remove = w100fb_remove,
+ .remove = __devexit_p(w100fb_remove),
.suspend = w100fb_suspend,
.resume = w100fb_resume,
.driver = {
@@ -1619,7 +1619,7 @@ static struct platform_driver w100fb_driver = {
},
};
-int __devinit w100fb_init(void)
+int __init w100fb_init(void)
{
return platform_driver_register(&w100fb_driver);
}
diff --git a/drivers/video/xilinxfb.c b/drivers/video/xilinxfb.c
index 3fcb83f0388..574dc54e12d 100644
--- a/drivers/video/xilinxfb.c
+++ b/drivers/video/xilinxfb.c
@@ -423,7 +423,7 @@ xilinxfb_of_probe(struct of_device *op, const struct of_device_id *match)
* To check whether the core is connected directly to DCR or PLB
* interface and initialize the tft_access accordingly.
*/
- p = (u32 *)of_get_property(op->node, "xlnx,dcr-splb-slave-if", NULL);
+ p = (u32 *)of_get_property(op->dev.of_node, "xlnx,dcr-splb-slave-if", NULL);
tft_access = p ? *p : 0;
/*
@@ -432,41 +432,41 @@ xilinxfb_of_probe(struct of_device *op, const struct of_device_id *match)
*/
if (tft_access) {
drvdata->flags |= PLB_ACCESS_FLAG;
- rc = of_address_to_resource(op->node, 0, &res);
+ rc = of_address_to_resource(op->dev.of_node, 0, &res);
if (rc) {
dev_err(&op->dev, "invalid address\n");
goto err;
}
} else {
res.start = 0;
- start = dcr_resource_start(op->node, 0);
- drvdata->dcr_len = dcr_resource_len(op->node, 0);
- drvdata->dcr_host = dcr_map(op->node, start, drvdata->dcr_len);
+ start = dcr_resource_start(op->dev.of_node, 0);
+ drvdata->dcr_len = dcr_resource_len(op->dev.of_node, 0);
+ drvdata->dcr_host = dcr_map(op->dev.of_node, start, drvdata->dcr_len);
if (!DCR_MAP_OK(drvdata->dcr_host)) {
dev_err(&op->dev, "invalid DCR address\n");
goto err;
}
}
- prop = of_get_property(op->node, "phys-size", &size);
+ prop = of_get_property(op->dev.of_node, "phys-size", &size);
if ((prop) && (size >= sizeof(u32)*2)) {
pdata.screen_width_mm = prop[0];
pdata.screen_height_mm = prop[1];
}
- prop = of_get_property(op->node, "resolution", &size);
+ prop = of_get_property(op->dev.of_node, "resolution", &size);
if ((prop) && (size >= sizeof(u32)*2)) {
pdata.xres = prop[0];
pdata.yres = prop[1];
}
- prop = of_get_property(op->node, "virtual-resolution", &size);
+ prop = of_get_property(op->dev.of_node, "virtual-resolution", &size);
if ((prop) && (size >= sizeof(u32)*2)) {
pdata.xvirt = prop[0];
pdata.yvirt = prop[1];
}
- if (of_find_property(op->node, "rotate-display", NULL))
+ if (of_find_property(op->dev.of_node, "rotate-display", NULL))
pdata.rotate_screen = 1;
dev_set_drvdata(&op->dev, drvdata);
@@ -492,13 +492,12 @@ static struct of_device_id xilinxfb_of_match[] __devinitdata = {
MODULE_DEVICE_TABLE(of, xilinxfb_of_match);
static struct of_platform_driver xilinxfb_of_driver = {
- .owner = THIS_MODULE,
- .name = DRIVER_NAME,
- .match_table = xilinxfb_of_match,
.probe = xilinxfb_of_probe,
.remove = __devexit_p(xilinxfb_of_remove),
.driver = {
.name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = xilinxfb_of_match,
},
};
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index bfec7c29486..0f1da45ba47 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -75,7 +75,7 @@ static void balloon_ack(struct virtqueue *vq)
struct virtio_balloon *vb;
unsigned int len;
- vb = vq->vq_ops->get_buf(vq, &len);
+ vb = virtqueue_get_buf(vq, &len);
if (vb)
complete(&vb->acked);
}
@@ -89,9 +89,9 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
init_completion(&vb->acked);
/* We should always be able to add one buffer to an empty queue. */
- if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0)
+ if (virtqueue_add_buf(vq, &sg, 1, 0, vb) < 0)
BUG();
- vq->vq_ops->kick(vq);
+ virtqueue_kick(vq);
/* When host has read buffer, this completes via balloon_ack */
wait_for_completion(&vb->acked);
@@ -204,7 +204,7 @@ static void stats_request(struct virtqueue *vq)
struct virtio_balloon *vb;
unsigned int len;
- vb = vq->vq_ops->get_buf(vq, &len);
+ vb = virtqueue_get_buf(vq, &len);
if (!vb)
return;
vb->need_stats_update = 1;
@@ -221,9 +221,9 @@ static void stats_handle_request(struct virtio_balloon *vb)
vq = vb->stats_vq;
sg_init_one(&sg, vb->stats, sizeof(vb->stats));
- if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0)
+ if (virtqueue_add_buf(vq, &sg, 1, 0, vb) < 0)
BUG();
- vq->vq_ops->kick(vq);
+ virtqueue_kick(vq);
}
static void virtballoon_changed(struct virtio_device *vdev)
@@ -314,10 +314,9 @@ static int virtballoon_probe(struct virtio_device *vdev)
* use it to signal us later.
*/
sg_init_one(&sg, vb->stats, sizeof vb->stats);
- if (vb->stats_vq->vq_ops->add_buf(vb->stats_vq,
- &sg, 1, 0, vb) < 0)
+ if (virtqueue_add_buf(vb->stats_vq, &sg, 1, 0, vb) < 0)
BUG();
- vb->stats_vq->vq_ops->kick(vb->stats_vq);
+ virtqueue_kick(vb->stats_vq);
}
vb->thread = kthread_run(balloon, vb, "vballoon");
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 24747aef195..95896f38792 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -655,7 +655,7 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev,
/* we use the subsystem vendor/device id as the virtio vendor/device
* id. this allows us to use the same PCI vendor/device id for all
* virtio devices and to identify the particular virtio driver by
- * the subsytem ids */
+ * the subsystem ids */
vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
vp_dev->vdev.id.device = pci_dev->subsystem_device;
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 0f90634bcb8..1ca88908723 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -110,13 +110,14 @@ struct vring_virtqueue
static int vring_add_indirect(struct vring_virtqueue *vq,
struct scatterlist sg[],
unsigned int out,
- unsigned int in)
+ unsigned int in,
+ gfp_t gfp)
{
struct vring_desc *desc;
unsigned head;
int i;
- desc = kmalloc((out + in) * sizeof(struct vring_desc), GFP_ATOMIC);
+ desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp);
if (!desc)
return vq->vring.num;
@@ -155,11 +156,12 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
return head;
}
-static int vring_add_buf(struct virtqueue *_vq,
- struct scatterlist sg[],
- unsigned int out,
- unsigned int in,
- void *data)
+int virtqueue_add_buf_gfp(struct virtqueue *_vq,
+ struct scatterlist sg[],
+ unsigned int out,
+ unsigned int in,
+ void *data,
+ gfp_t gfp)
{
struct vring_virtqueue *vq = to_vvq(_vq);
unsigned int i, avail, head, uninitialized_var(prev);
@@ -171,7 +173,7 @@ static int vring_add_buf(struct virtqueue *_vq,
/* If the host supports indirect descriptor tables, and we have multiple
* buffers, then go indirect. FIXME: tune this threshold */
if (vq->indirect && (out + in) > 1 && vq->num_free) {
- head = vring_add_indirect(vq, sg, out, in);
+ head = vring_add_indirect(vq, sg, out, in, gfp);
if (head != vq->vring.num)
goto add_head;
}
@@ -232,8 +234,9 @@ add_head:
return vq->num_free ? vq->vring.num : 0;
return vq->num_free;
}
+EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp);
-static void vring_kick(struct virtqueue *_vq)
+void virtqueue_kick(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
START_USE(vq);
@@ -253,6 +256,7 @@ static void vring_kick(struct virtqueue *_vq)
END_USE(vq);
}
+EXPORT_SYMBOL_GPL(virtqueue_kick);
static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
{
@@ -284,7 +288,7 @@ static inline bool more_used(const struct vring_virtqueue *vq)
return vq->last_used_idx != vq->vring.used->idx;
}
-static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
+void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
{
struct vring_virtqueue *vq = to_vvq(_vq);
void *ret;
@@ -325,15 +329,17 @@ static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
END_USE(vq);
return ret;
}
+EXPORT_SYMBOL_GPL(virtqueue_get_buf);
-static void vring_disable_cb(struct virtqueue *_vq)
+void virtqueue_disable_cb(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
}
+EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
-static bool vring_enable_cb(struct virtqueue *_vq)
+bool virtqueue_enable_cb(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -351,8 +357,9 @@ static bool vring_enable_cb(struct virtqueue *_vq)
END_USE(vq);
return true;
}
+EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
-static void *vring_detach_unused_buf(struct virtqueue *_vq)
+void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
unsigned int i;
@@ -375,6 +382,7 @@ static void *vring_detach_unused_buf(struct virtqueue *_vq)
END_USE(vq);
return NULL;
}
+EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
irqreturn_t vring_interrupt(int irq, void *_vq)
{
@@ -396,15 +404,6 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
}
EXPORT_SYMBOL_GPL(vring_interrupt);
-static struct virtqueue_ops vring_vq_ops = {
- .add_buf = vring_add_buf,
- .get_buf = vring_get_buf,
- .kick = vring_kick,
- .disable_cb = vring_disable_cb,
- .enable_cb = vring_enable_cb,
- .detach_unused_buf = vring_detach_unused_buf,
-};
-
struct virtqueue *vring_new_virtqueue(unsigned int num,
unsigned int vring_align,
struct virtio_device *vdev,
@@ -429,7 +428,6 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
vring_init(&vq->vring, num, pages, vring_align);
vq->vq.callback = callback;
vq->vq.vdev = vdev;
- vq->vq.vq_ops = &vring_vq_ops;
vq->vq.name = name;
vq->notify = notify;
vq->broken = false;
diff --git a/drivers/vlynq/Kconfig b/drivers/vlynq/Kconfig
index a9efb162532..d874b4f3413 100644
--- a/drivers/vlynq/Kconfig
+++ b/drivers/vlynq/Kconfig
@@ -1,8 +1,8 @@
menu "TI VLYNQ"
+ depends on AR7 && EXPERIMENTAL
config VLYNQ
bool "TI VLYNQ bus support"
- depends on AR7 && EXPERIMENTAL
help
Support for Texas Instruments(R) VLYNQ bus.
The VLYNQ bus is a high-speed, serial and packetized
diff --git a/drivers/w1/slaves/w1_ds2431.c b/drivers/w1/slaves/w1_ds2431.c
index 2c6c0cf6a20..84e2410aec1 100644
--- a/drivers/w1/slaves/w1_ds2431.c
+++ b/drivers/w1/slaves/w1_ds2431.c
@@ -96,7 +96,7 @@ static int w1_f2d_readblock(struct w1_slave *sl, int off, int count, char *buf)
return -1;
}
-static ssize_t w1_f2d_read_bin(struct kobject *kobj,
+static ssize_t w1_f2d_read_bin(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -202,7 +202,7 @@ retry:
return 0;
}
-static ssize_t w1_f2d_write_bin(struct kobject *kobj,
+static ssize_t w1_f2d_write_bin(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
diff --git a/drivers/w1/slaves/w1_ds2433.c b/drivers/w1/slaves/w1_ds2433.c
index d2bf32118a9..0f7b8f9c509 100644
--- a/drivers/w1/slaves/w1_ds2433.c
+++ b/drivers/w1/slaves/w1_ds2433.c
@@ -92,7 +92,7 @@ static int w1_f23_refresh_block(struct w1_slave *sl, struct w1_f23_data *data,
}
#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
-static ssize_t w1_f23_read_bin(struct kobject *kobj,
+static ssize_t w1_f23_read_bin(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -206,7 +206,7 @@ static int w1_f23_write(struct w1_slave *sl, int addr, int len, const u8 *data)
return 0;
}
-static ssize_t w1_f23_write_bin(struct kobject *kobj,
+static ssize_t w1_f23_write_bin(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
diff --git a/drivers/w1/slaves/w1_ds2760.c b/drivers/w1/slaves/w1_ds2760.c
index 6e153343e11..483d4518091 100644
--- a/drivers/w1/slaves/w1_ds2760.c
+++ b/drivers/w1/slaves/w1_ds2760.c
@@ -97,7 +97,7 @@ int w1_ds2760_recall_eeprom(struct device *dev, int addr)
return w1_ds2760_eeprom_cmd(dev, addr, W1_DS2760_RECALL_DATA);
}
-static ssize_t w1_ds2760_read_bin(struct kobject *kobj,
+static ssize_t w1_ds2760_read_bin(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index ad5897dc449..2839e281cd6 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -120,7 +120,7 @@ static struct device_attribute w1_slave_attr_id =
/* Default family */
-static ssize_t w1_default_write(struct kobject *kobj,
+static ssize_t w1_default_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -139,7 +139,7 @@ out_up:
return count;
}
-static ssize_t w1_default_read(struct kobject *kobj,
+static ssize_t w1_default_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index b87ba23442d..afcfacc9bbe 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -145,13 +145,19 @@ config KS8695_WATCHDOG
Watchdog timer embedded into KS8695 processor. This will reboot your
system when the timeout is reached.
+config HAVE_S3C2410_WATCHDOG
+ bool
+ help
+ This will include watchdog timer support for Samsung SoCs. If
+ you want to include watchdog support for any machine, kindly
+ select this in the respective mach-XXXX/Kconfig file.
+
config S3C2410_WATCHDOG
tristate "S3C2410 Watchdog"
- depends on ARCH_S3C2410
+ depends on ARCH_S3C2410 || HAVE_S3C2410_WATCHDOG
help
- Watchdog timer block in the Samsung S3C2410 chips. This will
- reboot the system when the timer expires with the watchdog
- enabled.
+ Watchdog timer block in the Samsung SoCs. This will reboot
+ the system when the timer expires with the watchdog enabled.
The driver is limited by the speed of the system's PCLK
signal, so with reasonably fast systems (PCLK around 50-66MHz)
@@ -306,6 +312,18 @@ config MAX63XX_WATCHDOG
help
Support for memory mapped max63{69,70,71,72,73,74} watchdog timer.
+config IMX2_WDT
+ tristate "IMX2+ Watchdog"
+ depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3 || ARCH_MX5
+ help
+ This is the driver for the hardware watchdog
+ on the Freescale IMX2 and later processors.
+ If you have one of these processors and wish to have
+ watchdog support enabled, say Y, otherwise say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx2_wdt.
+
# AVR32 Architecture
config AT32AP700X_WDT
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 5e3cb95bb0e..72f3e2073f8 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -47,6 +47,7 @@ obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o
obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o
obj-$(CONFIG_ADX_WATCHDOG) += adx_wdt.o
obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o
+obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o
# AVR32 Architecture
obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
diff --git a/drivers/watchdog/bfin_wdt.c b/drivers/watchdog/bfin_wdt.c
index 9c7ccd1e908..9042a95fc98 100644
--- a/drivers/watchdog/bfin_wdt.c
+++ b/drivers/watchdog/bfin_wdt.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/uaccess.h>
#include <asm/blackfin.h>
+#include <asm/bfin_watchdog.h>
#define stamp(fmt, args...) \
pr_debug("%s:%i: " fmt "\n", __func__, __LINE__, ## args)
@@ -49,24 +50,6 @@
# define bfin_write_WDOG_STAT(x) bfin_write_WDOGA_STAT(x)
#endif
-/* Bit in SWRST that indicates boot caused by watchdog */
-#define SWRST_RESET_WDOG 0x4000
-
-/* Bit in WDOG_CTL that indicates watchdog has expired (WDR0) */
-#define WDOG_EXPIRED 0x8000
-
-/* Masks for WDEV field in WDOG_CTL register */
-#define ICTL_RESET 0x0
-#define ICTL_NMI 0x2
-#define ICTL_GPI 0x4
-#define ICTL_NONE 0x6
-#define ICTL_MASK 0x6
-
-/* Masks for WDEN field in WDOG_CTL register */
-#define WDEN_MASK 0x0FF0
-#define WDEN_ENABLE 0x0000
-#define WDEN_DISABLE 0x0AD0
-
/* some defaults */
#define WATCHDOG_TIMEOUT 20
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 801ead19149..3d49671cdf5 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -137,12 +137,12 @@ static long booke_wdt_ioctl(struct file *file,
if (copy_to_user((void *)arg, &ident, sizeof(ident)))
return -EFAULT;
case WDIOC_GETSTATUS:
- return put_user(ident.options, p);
+ return put_user(0, p);
case WDIOC_GETBOOTSTATUS:
/* XXX: something is clearing TSR */
tmp = mfspr(SPRN_TSR) & TSR_WRS(3);
- /* returns 1 if last reset was caused by the WDT */
- return (tmp ? 1 : 0);
+ /* returns CARDRESET if last reset was caused by the WDT */
+ return (tmp ? WDIOF_CARDRESET : 0);
case WDIOC_SETOPTIONS:
if (get_user(tmp, p))
return -EINVAL;
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index ba2efce4b40..d62b9ce8f77 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -577,7 +577,7 @@ static int __devinit cpwd_probe(struct of_device *op,
* interrupt_mask register cannot be written, so no timer
* interrupts can be masked within the PLD.
*/
- str_prop = of_get_property(op->node, "model", NULL);
+ str_prop = of_get_property(op->dev.of_node, "model", NULL);
p->broken = (str_prop && !strcmp(str_prop, WD_BADMODEL));
if (!p->enabled)
@@ -677,8 +677,11 @@ static const struct of_device_id cpwd_match[] = {
MODULE_DEVICE_TABLE(of, cpwd_match);
static struct of_platform_driver cpwd_driver = {
- .name = DRIVER_NAME,
- .match_table = cpwd_match,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = cpwd_match,
+ },
.probe = cpwd_probe,
.remove = __devexit_p(cpwd_remove),
};
diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c
index d1c4e55b1db..3f3dc093ad6 100644
--- a/drivers/watchdog/eurotechwdt.c
+++ b/drivers/watchdog/eurotechwdt.c
@@ -68,7 +68,6 @@ static spinlock_t eurwdt_lock;
/*
* You must set these - there is no sane way to probe for this board.
- * You can use eurwdt=x,y to set these now.
*/
static int io = 0x3f0;
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c
index abdbad034a6..ca0f4c6cf5a 100644
--- a/drivers/watchdog/gef_wdt.c
+++ b/drivers/watchdog/gef_wdt.c
@@ -303,9 +303,11 @@ static const struct of_device_id gef_wdt_ids[] = {
};
static struct of_platform_driver gef_wdt_driver = {
- .owner = THIS_MODULE,
- .name = "gef_wdt",
- .match_table = gef_wdt_ids,
+ .driver = {
+ .name = "gef_wdt",
+ .owner = THIS_MODULE,
+ .of_match_table = gef_wdt_ids,
+ },
.probe = gef_wdt_probe,
};
diff --git a/drivers/watchdog/iTCO_vendor_support.c b/drivers/watchdog/iTCO_vendor_support.c
index 5133bca5ccb..481d1ad4346 100644
--- a/drivers/watchdog/iTCO_vendor_support.c
+++ b/drivers/watchdog/iTCO_vendor_support.c
@@ -101,13 +101,6 @@ static void supermicro_old_pre_stop(unsigned long acpibase)
outl(val32, SMI_EN); /* Needed to deactivate watchdog */
}
-static void supermicro_old_pre_keepalive(unsigned long acpibase)
-{
- /* Reload TCO Timer (done in iTCO_wdt_keepalive) + */
- /* Clear "Expire Flag" (Bit 3 of TC01_STS register) */
- outb(0x08, TCO1_STS);
-}
-
/*
* Vendor Support: 2
* Board: Super Micro Computer Inc. P4SBx, P4DPx
@@ -337,9 +330,7 @@ EXPORT_SYMBOL(iTCO_vendor_pre_stop);
void iTCO_vendor_pre_keepalive(unsigned long acpibase, unsigned int heartbeat)
{
- if (vendorsupport == SUPERMICRO_OLD_BOARD)
- supermicro_old_pre_keepalive(acpibase);
- else if (vendorsupport == SUPERMICRO_NEW_BOARD)
+ if (vendorsupport == SUPERMICRO_NEW_BOARD)
supermicro_new_pre_set_heartbeat(heartbeat);
}
EXPORT_SYMBOL(iTCO_vendor_pre_keepalive);
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 8da88603537..69de8713b8e 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -40,7 +40,7 @@
/* Module and version information */
#define DRV_NAME "iTCO_wdt"
-#define DRV_VERSION "1.05"
+#define DRV_VERSION "1.06"
#define PFX DRV_NAME ": "
/* Includes */
@@ -391,8 +391,8 @@ static struct platform_device *iTCO_wdt_platform_device;
#define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat */
static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */
module_param(heartbeat, int, 0);
-MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. "
- "(2<heartbeat<39 (TCO v1) or 613 (TCO v2), default="
+MODULE_PARM_DESC(heartbeat, "Watchdog timeout in seconds. "
+ "5..76 (TCO v1) or 3..614 (TCO v2), default="
__MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
static int nowayout = WATCHDOG_NOWAYOUT;
@@ -523,8 +523,13 @@ static int iTCO_wdt_keepalive(void)
/* Reload the timer by writing to the TCO Timer Counter register */
if (iTCO_wdt_private.iTCO_version == 2)
outw(0x01, TCO_RLD);
- else if (iTCO_wdt_private.iTCO_version == 1)
+ else if (iTCO_wdt_private.iTCO_version == 1) {
+ /* Reset the timeout status bit so that the timer
+ * needs to count down twice again before rebooting */
+ outw(0x0008, TCO1_STS); /* write 1 to clear bit */
+
outb(0x01, TCO_RLD);
+ }
spin_unlock(&iTCO_wdt_private.io_lock);
return 0;
@@ -537,6 +542,11 @@ static int iTCO_wdt_set_heartbeat(int t)
unsigned int tmrval;
tmrval = seconds_to_ticks(t);
+
+ /* For TCO v1 the timer counts down twice before rebooting */
+ if (iTCO_wdt_private.iTCO_version == 1)
+ tmrval /= 2;
+
/* from the specs: */
/* "Values of 0h-3h are ignored and should not be attempted" */
if (tmrval < 0x04)
@@ -593,6 +603,8 @@ static int iTCO_wdt_get_timeleft(int *time_left)
spin_lock(&iTCO_wdt_private.io_lock);
val8 = inb(TCO_RLD);
val8 &= 0x3f;
+ if (!(inw(TCO1_STS) & 0x0008))
+ val8 += (inb(TCOv1_TMR) & 0x3f);
spin_unlock(&iTCO_wdt_private.io_lock);
*time_left = (val8 * 6) / 10;
@@ -832,9 +844,9 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
TCOBASE);
/* Clear out the (probably old) status */
- outb(8, TCO1_STS); /* Clear the Time Out Status bit */
- outb(2, TCO2_STS); /* Clear SECOND_TO_STS bit */
- outb(4, TCO2_STS); /* Clear BOOT_STS bit */
+ outw(0x0008, TCO1_STS); /* Clear the Time Out Status bit */
+ outw(0x0002, TCO2_STS); /* Clear SECOND_TO_STS bit */
+ outw(0x0004, TCO2_STS); /* Clear BOOT_STS bit */
/* Make sure the watchdog is not running */
iTCO_wdt_stop();
@@ -844,8 +856,7 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
if (iTCO_wdt_set_heartbeat(heartbeat)) {
iTCO_wdt_set_heartbeat(WATCHDOG_HEARTBEAT);
printk(KERN_INFO PFX
- "heartbeat value must be 2 < heartbeat < 39 (TCO v1) "
- "or 613 (TCO v2), using %d\n", heartbeat);
+ "timeout value out of range, using %d\n", heartbeat);
}
ret = misc_register(&iTCO_wdt_miscdev);
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
new file mode 100644
index 00000000000..ea25885781b
--- /dev/null
+++ b/drivers/watchdog/imx2_wdt.c
@@ -0,0 +1,358 @@
+/*
+ * Watchdog driver for IMX2 and later processors
+ *
+ * Copyright (C) 2010 Wolfram Sang, Pengutronix e.K. <w.sang@pengutronix.de>
+ *
+ * some parts adapted by similar drivers from Darius Augulis and Vladimir
+ * Zapolskiy, additional improvements by Wim Van Sebroeck.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * NOTE: MX1 has a slightly different Watchdog than MX2 and later:
+ *
+ * MX1: MX2+:
+ * ---- -----
+ * Registers: 32-bit 16-bit
+ * Stopable timer: Yes No
+ * Need to enable clk: No Yes
+ * Halt on suspend: Manual Can be automatic
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+#include <linux/clk.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <mach/hardware.h>
+
+#define DRIVER_NAME "imx2-wdt"
+
+#define IMX2_WDT_WCR 0x00 /* Control Register */
+#define IMX2_WDT_WCR_WT (0xFF << 8) /* -> Watchdog Timeout Field */
+#define IMX2_WDT_WCR_WRE (1 << 3) /* -> WDOG Reset Enable */
+#define IMX2_WDT_WCR_WDE (1 << 2) /* -> Watchdog Enable */
+
+#define IMX2_WDT_WSR 0x02 /* Service Register */
+#define IMX2_WDT_SEQ1 0x5555 /* -> service sequence 1 */
+#define IMX2_WDT_SEQ2 0xAAAA /* -> service sequence 2 */
+
+#define IMX2_WDT_MAX_TIME 128
+#define IMX2_WDT_DEFAULT_TIME 60 /* in seconds */
+
+#define WDOG_SEC_TO_COUNT(s) ((s * 2 - 1) << 8)
+
+#define IMX2_WDT_STATUS_OPEN 0
+#define IMX2_WDT_STATUS_STARTED 1
+#define IMX2_WDT_EXPECT_CLOSE 2
+
+static struct {
+ struct clk *clk;
+ void __iomem *base;
+ unsigned timeout;
+ unsigned long status;
+ struct timer_list timer; /* Pings the watchdog when closed */
+} imx2_wdt;
+
+static struct miscdevice imx2_wdt_miscdev;
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+
+static unsigned timeout = IMX2_WDT_DEFAULT_TIME;
+module_param(timeout, uint, 0);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (default="
+ __MODULE_STRING(IMX2_WDT_DEFAULT_TIME) ")");
+
+static const struct watchdog_info imx2_wdt_info = {
+ .identity = "imx2+ watchdog",
+ .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE,
+};
+
+static inline void imx2_wdt_setup(void)
+{
+ u16 val = __raw_readw(imx2_wdt.base + IMX2_WDT_WCR);
+
+ /* Strip the old watchdog Time-Out value */
+ val &= ~IMX2_WDT_WCR_WT;
+ /* Generate reset if WDOG times out */
+ val &= ~IMX2_WDT_WCR_WRE;
+ /* Keep Watchdog Disabled */
+ val &= ~IMX2_WDT_WCR_WDE;
+ /* Set the watchdog's Time-Out value */
+ val |= WDOG_SEC_TO_COUNT(imx2_wdt.timeout);
+
+ __raw_writew(val, imx2_wdt.base + IMX2_WDT_WCR);
+
+ /* enable the watchdog */
+ val |= IMX2_WDT_WCR_WDE;
+ __raw_writew(val, imx2_wdt.base + IMX2_WDT_WCR);
+}
+
+static inline void imx2_wdt_ping(void)
+{
+ __raw_writew(IMX2_WDT_SEQ1, imx2_wdt.base + IMX2_WDT_WSR);
+ __raw_writew(IMX2_WDT_SEQ2, imx2_wdt.base + IMX2_WDT_WSR);
+}
+
+static void imx2_wdt_timer_ping(unsigned long arg)
+{
+ /* ping it every imx2_wdt.timeout / 2 seconds to prevent reboot */
+ imx2_wdt_ping();
+ mod_timer(&imx2_wdt.timer, jiffies + imx2_wdt.timeout * HZ / 2);
+}
+
+static void imx2_wdt_start(void)
+{
+ if (!test_and_set_bit(IMX2_WDT_STATUS_STARTED, &imx2_wdt.status)) {
+ /* at our first start we enable clock and do initialisations */
+ clk_enable(imx2_wdt.clk);
+
+ imx2_wdt_setup();
+ } else /* delete the timer that pings the watchdog after close */
+ del_timer_sync(&imx2_wdt.timer);
+
+ /* Watchdog is enabled - time to reload the timeout value */
+ imx2_wdt_ping();
+}
+
+static void imx2_wdt_stop(void)
+{
+ /* we don't need a clk_disable, it cannot be disabled once started.
+ * We use a timer to ping the watchdog while /dev/watchdog is closed */
+ imx2_wdt_timer_ping(0);
+}
+
+static void imx2_wdt_set_timeout(int new_timeout)
+{
+ u16 val = __raw_readw(imx2_wdt.base + IMX2_WDT_WCR);
+
+ /* set the new timeout value in the WSR */
+ val &= ~IMX2_WDT_WCR_WT;
+ val |= WDOG_SEC_TO_COUNT(new_timeout);
+ __raw_writew(val, imx2_wdt.base + IMX2_WDT_WCR);
+}
+
+static int imx2_wdt_open(struct inode *inode, struct file *file)
+{
+ if (test_and_set_bit(IMX2_WDT_STATUS_OPEN, &imx2_wdt.status))
+ return -EBUSY;
+
+ imx2_wdt_start();
+ return nonseekable_open(inode, file);
+}
+
+static int imx2_wdt_close(struct inode *inode, struct file *file)
+{
+ if (test_bit(IMX2_WDT_EXPECT_CLOSE, &imx2_wdt.status) && !nowayout)
+ imx2_wdt_stop();
+ else {
+ dev_crit(imx2_wdt_miscdev.parent,
+ "Unexpected close: Expect reboot!\n");
+ imx2_wdt_ping();
+ }
+
+ clear_bit(IMX2_WDT_EXPECT_CLOSE, &imx2_wdt.status);
+ clear_bit(IMX2_WDT_STATUS_OPEN, &imx2_wdt.status);
+ return 0;
+}
+
+static long imx2_wdt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ int __user *p = argp;
+ int new_value;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(argp, &imx2_wdt_info,
+ sizeof(struct watchdog_info)) ? -EFAULT : 0;
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, p);
+
+ case WDIOC_KEEPALIVE:
+ imx2_wdt_ping();
+ return 0;
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(new_value, p))
+ return -EFAULT;
+ if ((new_value < 1) || (new_value > IMX2_WDT_MAX_TIME))
+ return -EINVAL;
+ imx2_wdt_set_timeout(new_value);
+ imx2_wdt.timeout = new_value;
+ imx2_wdt_ping();
+
+ /* Fallthrough to return current value */
+ case WDIOC_GETTIMEOUT:
+ return put_user(imx2_wdt.timeout, p);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static ssize_t imx2_wdt_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ size_t i;
+ char c;
+
+ if (len == 0) /* Can we see this even ? */
+ return 0;
+
+ clear_bit(IMX2_WDT_EXPECT_CLOSE, &imx2_wdt.status);
+ /* scan to see whether or not we got the magic character */
+ for (i = 0; i != len; i++) {
+ if (get_user(c, data + i))
+ return -EFAULT;
+ if (c == 'V')
+ set_bit(IMX2_WDT_EXPECT_CLOSE, &imx2_wdt.status);
+ }
+
+ imx2_wdt_ping();
+ return len;
+}
+
+static const struct file_operations imx2_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .unlocked_ioctl = imx2_wdt_ioctl,
+ .open = imx2_wdt_open,
+ .release = imx2_wdt_close,
+ .write = imx2_wdt_write,
+};
+
+static struct miscdevice imx2_wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &imx2_wdt_fops,
+};
+
+static int __init imx2_wdt_probe(struct platform_device *pdev)
+{
+ int ret;
+ int res_size;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "can't get device resources\n");
+ return -ENODEV;
+ }
+
+ res_size = resource_size(res);
+ if (!devm_request_mem_region(&pdev->dev, res->start, res_size,
+ res->name)) {
+ dev_err(&pdev->dev, "can't allocate %d bytes at %d address\n",
+ res_size, res->start);
+ return -ENOMEM;
+ }
+
+ imx2_wdt.base = devm_ioremap_nocache(&pdev->dev, res->start, res_size);
+ if (!imx2_wdt.base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ return -ENOMEM;
+ }
+
+ imx2_wdt.clk = clk_get_sys("imx-wdt.0", NULL);
+ if (IS_ERR(imx2_wdt.clk)) {
+ dev_err(&pdev->dev, "can't get Watchdog clock\n");
+ return PTR_ERR(imx2_wdt.clk);
+ }
+
+ imx2_wdt.timeout = clamp_t(unsigned, timeout, 1, IMX2_WDT_MAX_TIME);
+ if (imx2_wdt.timeout != timeout)
+ dev_warn(&pdev->dev, "Initial timeout out of range! "
+ "Clamped from %u to %u\n", timeout, imx2_wdt.timeout);
+
+ setup_timer(&imx2_wdt.timer, imx2_wdt_timer_ping, 0);
+
+ imx2_wdt_miscdev.parent = &pdev->dev;
+ ret = misc_register(&imx2_wdt_miscdev);
+ if (ret)
+ goto fail;
+
+ dev_info(&pdev->dev,
+ "IMX2+ Watchdog Timer enabled. timeout=%ds (nowayout=%d)\n",
+ imx2_wdt.timeout, nowayout);
+ return 0;
+
+fail:
+ imx2_wdt_miscdev.parent = NULL;
+ clk_put(imx2_wdt.clk);
+ return ret;
+}
+
+static int __exit imx2_wdt_remove(struct platform_device *pdev)
+{
+ misc_deregister(&imx2_wdt_miscdev);
+
+ if (test_bit(IMX2_WDT_STATUS_STARTED, &imx2_wdt.status)) {
+ del_timer_sync(&imx2_wdt.timer);
+
+ dev_crit(imx2_wdt_miscdev.parent,
+ "Device removed: Expect reboot!\n");
+ } else
+ clk_put(imx2_wdt.clk);
+
+ imx2_wdt_miscdev.parent = NULL;
+ return 0;
+}
+
+static void imx2_wdt_shutdown(struct platform_device *pdev)
+{
+ if (test_bit(IMX2_WDT_STATUS_STARTED, &imx2_wdt.status)) {
+ /* we are running, we need to delete the timer but will give
+ * max timeout before reboot will take place */
+ del_timer_sync(&imx2_wdt.timer);
+ imx2_wdt_set_timeout(IMX2_WDT_MAX_TIME);
+ imx2_wdt_ping();
+
+ dev_crit(imx2_wdt_miscdev.parent,
+ "Device shutdown: Expect reboot!\n");
+ }
+}
+
+static struct platform_driver imx2_wdt_driver = {
+ .probe = imx2_wdt_probe,
+ .remove = __exit_p(imx2_wdt_remove),
+ .shutdown = imx2_wdt_shutdown,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init imx2_wdt_init(void)
+{
+ return platform_driver_probe(&imx2_wdt_driver, imx2_wdt_probe);
+}
+module_init(imx2_wdt_init);
+
+static void __exit imx2_wdt_exit(void)
+{
+ platform_driver_unregister(&imx2_wdt_driver);
+}
+module_exit(imx2_wdt_exit);
+
+MODULE_AUTHOR("Wolfram Sang");
+MODULE_DESCRIPTION("Watchdog driver for IMX2 and later");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index 4e3941c5e29..6622335773b 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -53,7 +53,7 @@ static int mpc8xxx_wdt_init_late(void);
static u16 timeout = 0xffff;
module_param(timeout, ushort, 0);
MODULE_PARM_DESC(timeout,
- "Watchdog timeout in ticks. (0<timeout<65536, default=65535");
+ "Watchdog timeout in ticks. (0<timeout<65536, default=65535)");
static int reset = 1;
module_param(reset, bool, 0);
@@ -273,12 +273,12 @@ static const struct of_device_id mpc8xxx_wdt_match[] = {
MODULE_DEVICE_TABLE(of, mpc8xxx_wdt_match);
static struct of_platform_driver mpc8xxx_wdt_driver = {
- .match_table = mpc8xxx_wdt_match,
.probe = mpc8xxx_wdt_probe,
.remove = __devexit_p(mpc8xxx_wdt_remove),
- .driver = {
- .name = "mpc8xxx_wdt",
- .owner = THIS_MODULE,
+ .driver = {
+ .name = "mpc8xxx_wdt",
+ .owner = THIS_MODULE,
+ .of_match_table = mpc8xxx_wdt_match,
},
};
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c
index d3aa2f1fe61..3a56bc36092 100644
--- a/drivers/watchdog/pc87413_wdt.c
+++ b/drivers/watchdog/pc87413_wdt.c
@@ -53,7 +53,9 @@
#define WDTO 0x11 /* Watchdog timeout register */
#define WDCFG 0x12 /* Watchdog config register */
-static int io = 0x2E; /* Address used on Portwell Boards */
+#define IO_DEFAULT 0x2E /* Address used on Portwell Boards */
+
+static int io = IO_DEFAULT;
static int timeout = DEFAULT_TIMEOUT; /* timeout value */
static unsigned long timer_enabled; /* is the timer enabled? */
@@ -583,12 +585,13 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
module_param(io, int, 0);
-MODULE_PARM_DESC(io, MODNAME " I/O port (default: " __MODULE_STRING(io) ").");
+MODULE_PARM_DESC(io, MODNAME " I/O port (default: "
+ __MODULE_STRING(IO_DEFAULT) ").");
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout,
"Watchdog timeout in minutes (default="
- __MODULE_STRING(timeout) ").");
+ __MODULE_STRING(DEFAULT_TIMEOUT) ").");
module_param(nowayout, int, 0);
MODULE_PARM_DESC(nowayout,
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 8e4eacc5bb5..748a74bd85e 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -600,8 +600,8 @@ static inline void usb_pcwd_delete(struct usb_pcwd_private *usb_pcwd)
{
usb_free_urb(usb_pcwd->intr_urb);
if (usb_pcwd->intr_buffer != NULL)
- usb_buffer_free(usb_pcwd->udev, usb_pcwd->intr_size,
- usb_pcwd->intr_buffer, usb_pcwd->intr_dma);
+ usb_free_coherent(usb_pcwd->udev, usb_pcwd->intr_size,
+ usb_pcwd->intr_buffer, usb_pcwd->intr_dma);
kfree(usb_pcwd);
}
@@ -671,7 +671,7 @@ static int usb_pcwd_probe(struct usb_interface *interface,
le16_to_cpu(endpoint->wMaxPacketSize) : 8);
/* set up the memory buffer's */
- usb_pcwd->intr_buffer = usb_buffer_alloc(udev, usb_pcwd->intr_size,
+ usb_pcwd->intr_buffer = usb_alloc_coherent(udev, usb_pcwd->intr_size,
GFP_ATOMIC, &usb_pcwd->intr_dma);
if (!usb_pcwd->intr_buffer) {
printk(KERN_ERR PFX "Out of memory\n");
diff --git a/drivers/watchdog/pnx833x_wdt.c b/drivers/watchdog/pnx833x_wdt.c
index 09102f09e68..a7b5ad2a98b 100644
--- a/drivers/watchdog/pnx833x_wdt.c
+++ b/drivers/watchdog/pnx833x_wdt.c
@@ -33,6 +33,8 @@
#define PFX "pnx833x: "
#define WATCHDOG_TIMEOUT 30 /* 30 sec Maximum timeout */
#define WATCHDOG_COUNT_FREQUENCY 68000000U /* Watchdog counts at 68MHZ. */
+#define PNX_WATCHDOG_TIMEOUT (WATCHDOG_TIMEOUT * WATCHDOG_COUNT_FREQUENCY)
+#define PNX_TIMEOUT_VALUE 2040000000U
/** CONFIG block */
#define PNX833X_CONFIG (0x07000U)
@@ -47,20 +49,21 @@
static int pnx833x_wdt_alive;
/* Set default timeout in MHZ.*/
-static int pnx833x_wdt_timeout = (WATCHDOG_TIMEOUT * WATCHDOG_COUNT_FREQUENCY);
+static int pnx833x_wdt_timeout = PNX_WATCHDOG_TIMEOUT;
module_param(pnx833x_wdt_timeout, int, 0);
MODULE_PARM_DESC(timeout, "Watchdog timeout in Mhz. (68Mhz clock), default="
- __MODULE_STRING(pnx833x_wdt_timeout) "(30 seconds).");
+ __MODULE_STRING(PNX_TIMEOUT_VALUE) "(30 seconds).");
static int nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, int, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-static int start_enabled = 1;
+#define START_DEFAULT 1
+static int start_enabled = START_DEFAULT;
module_param(start_enabled, int, 0);
MODULE_PARM_DESC(start_enabled, "Watchdog is started on module insertion "
- "(default=" __MODULE_STRING(start_enabled) ")");
+ "(default=" __MODULE_STRING(START_DEFAULT) ")");
static void pnx833x_wdt_start(void)
{
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index 69c6adbd820..428f8a1583e 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -1,7 +1,7 @@
/*
* RDC321x watchdog driver
*
- * Copyright (C) 2007 Florian Fainelli <florian@openwrt.org>
+ * Copyright (C) 2007-2010 Florian Fainelli <florian@openwrt.org>
*
* This driver is highly inspired from the cpu5_wdt driver
*
@@ -36,8 +36,7 @@
#include <linux/watchdog.h>
#include <linux/io.h>
#include <linux/uaccess.h>
-
-#include <asm/rdc321x_defs.h>
+#include <linux/mfd/rdc321x.h>
#define RDC_WDT_MASK 0x80000000 /* Mask */
#define RDC_WDT_EN 0x00800000 /* Enable bit */
@@ -63,6 +62,8 @@ static struct {
int default_ticks;
unsigned long inuse;
spinlock_t lock;
+ struct pci_dev *sb_pdev;
+ int base_reg;
} rdc321x_wdt_device;
/* generic helper functions */
@@ -70,14 +71,18 @@ static struct {
static void rdc321x_wdt_trigger(unsigned long unused)
{
unsigned long flags;
+ u32 val;
if (rdc321x_wdt_device.running)
ticks--;
/* keep watchdog alive */
spin_lock_irqsave(&rdc321x_wdt_device.lock, flags);
- outl(RDC_WDT_EN | inl(RDC3210_CFGREG_DATA),
- RDC3210_CFGREG_DATA);
+ pci_read_config_dword(rdc321x_wdt_device.sb_pdev,
+ rdc321x_wdt_device.base_reg, &val);
+ val |= RDC_WDT_EN;
+ pci_write_config_dword(rdc321x_wdt_device.sb_pdev,
+ rdc321x_wdt_device.base_reg, val);
spin_unlock_irqrestore(&rdc321x_wdt_device.lock, flags);
/* requeue?? */
@@ -105,10 +110,13 @@ static void rdc321x_wdt_start(void)
/* Clear the timer */
spin_lock_irqsave(&rdc321x_wdt_device.lock, flags);
- outl(RDC_CLS_TMR, RDC3210_CFGREG_ADDR);
+ pci_write_config_dword(rdc321x_wdt_device.sb_pdev,
+ rdc321x_wdt_device.base_reg, RDC_CLS_TMR);
/* Enable watchdog and set the timeout to 81.92 us */
- outl(RDC_WDT_EN | RDC_WDT_CNT, RDC3210_CFGREG_DATA);
+ pci_write_config_dword(rdc321x_wdt_device.sb_pdev,
+ rdc321x_wdt_device.base_reg,
+ RDC_WDT_EN | RDC_WDT_CNT);
spin_unlock_irqrestore(&rdc321x_wdt_device.lock, flags);
mod_timer(&rdc321x_wdt_device.timer,
@@ -148,7 +156,7 @@ static long rdc321x_wdt_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
void __user *argp = (void __user *)arg;
- unsigned int value;
+ u32 value;
static const struct watchdog_info ident = {
.options = WDIOF_CARDRESET,
.identity = "RDC321x WDT",
@@ -162,9 +170,10 @@ static long rdc321x_wdt_ioctl(struct file *file, unsigned int cmd,
case WDIOC_GETSTATUS:
/* Read the value from the DATA register */
spin_lock_irqsave(&rdc321x_wdt_device.lock, flags);
- value = inl(RDC3210_CFGREG_DATA);
+ pci_read_config_dword(rdc321x_wdt_device.sb_pdev,
+ rdc321x_wdt_device.base_reg, &value);
spin_unlock_irqrestore(&rdc321x_wdt_device.lock, flags);
- if (copy_to_user(argp, &value, sizeof(int)))
+ if (copy_to_user(argp, &value, sizeof(u32)))
return -EFAULT;
break;
case WDIOC_GETSUPPORT:
@@ -219,17 +228,35 @@ static struct miscdevice rdc321x_wdt_misc = {
static int __devinit rdc321x_wdt_probe(struct platform_device *pdev)
{
int err;
+ struct resource *r;
+ struct rdc321x_wdt_pdata *pdata;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data supplied\n");
+ return -ENODEV;
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_IO, "wdt-reg");
+ if (!r) {
+ dev_err(&pdev->dev, "failed to get wdt-reg resource\n");
+ return -ENODEV;
+ }
+
+ rdc321x_wdt_device.sb_pdev = pdata->sb_pdev;
+ rdc321x_wdt_device.base_reg = r->start;
err = misc_register(&rdc321x_wdt_misc);
if (err < 0) {
- printk(KERN_ERR PFX "watchdog misc_register failed\n");
+ dev_err(&pdev->dev, "misc_register failed\n");
return err;
}
spin_lock_init(&rdc321x_wdt_device.lock);
/* Reset the watchdog */
- outl(RDC_WDT_RST, RDC3210_CFGREG_DATA);
+ pci_write_config_dword(rdc321x_wdt_device.sb_pdev,
+ rdc321x_wdt_device.base_reg, RDC_WDT_RST);
init_completion(&rdc321x_wdt_device.stop);
rdc321x_wdt_device.queue = 0;
@@ -240,7 +267,7 @@ static int __devinit rdc321x_wdt_probe(struct platform_device *pdev)
rdc321x_wdt_device.default_ticks = ticks;
- printk(KERN_INFO PFX "watchdog init success\n");
+ dev_info(&pdev->dev, "watchdog init success\n");
return 0;
}
diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c
index ea7f803f624..5dceeddc885 100644
--- a/drivers/watchdog/riowd.c
+++ b/drivers/watchdog/riowd.c
@@ -239,8 +239,11 @@ static const struct of_device_id riowd_match[] = {
MODULE_DEVICE_TABLE(of, riowd_match);
static struct of_platform_driver riowd_driver = {
- .name = DRIVER_NAME,
- .match_table = riowd_match,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = riowd_match,
+ },
.probe = riowd_probe,
.remove = __devexit_p(riowd_remove),
};
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index e4cebef5517..300932580de 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -63,7 +63,7 @@ module_param(nowayout, int, 0);
module_param(soft_noboot, int, 0);
module_param(debug, int, 0);
-MODULE_PARM_DESC(tmr_margin, "Watchdog tmr_margin in seconds. default="
+MODULE_PARM_DESC(tmr_margin, "Watchdog tmr_margin in seconds. (default="
__MODULE_STRING(CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME) ")");
MODULE_PARM_DESC(tmr_atboot,
"Watchdog is started at boot time if set to 1, default="
@@ -71,8 +71,8 @@ MODULE_PARM_DESC(tmr_atboot,
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
MODULE_PARM_DESC(soft_noboot, "Watchdog action, set to 1 to ignore reboots, "
- "0 to reboot (default depends on ONLY_TESTING)");
-MODULE_PARM_DESC(debug, "Watchdog debug, set to >1 for debug, (default 0)");
+ "0 to reboot (default 0)");
+MODULE_PARM_DESC(debug, "Watchdog debug, set to >1 for debug (default 0)");
static unsigned long open_lock;
static struct device *wdt_dev; /* platform device attached to */
@@ -426,8 +426,7 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
wdt_mem = request_mem_region(res->start, size, pdev->name);
if (wdt_mem == NULL) {
dev_err(dev, "failed to get memory region\n");
- ret = -ENOENT;
- goto err_req;
+ return -EBUSY;
}
wdt_base = ioremap(res->start, size);
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index a03f84e5ee1..6fc74065abe 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -496,7 +496,7 @@ MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
module_param(clock_division_ratio, int, 0);
MODULE_PARM_DESC(clock_division_ratio,
"Clock division ratio. Valid ranges are from 0x5 (1.31ms) "
- "to 0x7 (5.25ms). (default=" __MODULE_STRING(clock_division_ratio) ")");
+ "to 0x7 (5.25ms). (default=" __MODULE_STRING(WTCSR_CKS_4096) ")");
module_param(heartbeat, int, 0);
MODULE_PARM_DESC(heartbeat,
diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c
index dcabe77ad14..b5045ca7e61 100644
--- a/drivers/watchdog/twl4030_wdt.c
+++ b/drivers/watchdog/twl4030_wdt.c
@@ -190,6 +190,8 @@ static int __devinit twl4030_wdt_probe(struct platform_device *pdev)
twl4030_wdt_dev = pdev;
+ twl4030_wdt_disable(wdt);
+
ret = misc_register(&wdt->miscdev);
if (ret) {
dev_err(wdt->miscdev.parent,
diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c
index bfda2e99dd8..552a4381e78 100644
--- a/drivers/watchdog/wdt.c
+++ b/drivers/watchdog/wdt.c
@@ -91,7 +91,7 @@ MODULE_PARM_DESC(tachometer,
static int type = 500;
module_param(type, int, 0);
MODULE_PARM_DESC(type,
- "WDT501-P Card type (500 or 501 , default=500)");
+ "WDT501-P Card type (500 or 501, default=500)");
/*
* Programming support
diff --git a/drivers/watchdog/wdt977.c b/drivers/watchdog/wdt977.c
index 90ef70eb47d..5c2521fc836 100644
--- a/drivers/watchdog/wdt977.c
+++ b/drivers/watchdog/wdt977.c
@@ -63,7 +63,7 @@ static char expect_close;
static DEFINE_SPINLOCK(spinlock);
module_param(timeout, int, 0);
-MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (60..15300), default="
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (60..15300, default="
__MODULE_STRING(DEFAULT_TIMEOUT) ")");
module_param(testmode, int, 0);
MODULE_PARM_DESC(testmode, "Watchdog testmode (1 = no reboot), default=0");
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 2ac4440e7b0..07e857b0de1 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -80,12 +80,6 @@ static void do_suspend(void)
shutting_down = SHUTDOWN_SUSPEND;
- err = stop_machine_create();
- if (err) {
- printk(KERN_ERR "xen suspend: failed to setup stop_machine %d\n", err);
- goto out;
- }
-
#ifdef CONFIG_PREEMPT
/* If the kernel is preemptible, we need to freeze all the processes
to prevent them from being in the middle of a pagetable update
@@ -93,7 +87,7 @@ static void do_suspend(void)
err = freeze_processes();
if (err) {
printk(KERN_ERR "xen suspend: freeze failed %d\n", err);
- goto out_destroy_sm;
+ goto out;
}
#endif
@@ -136,12 +130,8 @@ out_resume:
out_thaw:
#ifdef CONFIG_PREEMPT
thaw_processes();
-
-out_destroy_sm:
-#endif
- stop_machine_destroy();
-
out:
+#endif
shutting_down = SHUTDOWN_INVALID;
}
#endif /* CONFIG_PM_SLEEP */
@@ -195,6 +185,7 @@ static void shutdown_handler(struct xenbus_watch *watch,
kfree(str);
}
+#ifdef CONFIG_MAGIC_SYSRQ
static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
unsigned int len)
{
@@ -224,15 +215,16 @@ static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
handle_sysrq(sysrq_key, NULL);
}
-static struct xenbus_watch shutdown_watch = {
- .node = "control/shutdown",
- .callback = shutdown_handler
-};
-
static struct xenbus_watch sysrq_watch = {
.node = "control/sysrq",
.callback = sysrq_handler
};
+#endif
+
+static struct xenbus_watch shutdown_watch = {
+ .node = "control/shutdown",
+ .callback = shutdown_handler
+};
static int setup_shutdown_watcher(void)
{
@@ -244,11 +236,13 @@ static int setup_shutdown_watcher(void)
return err;
}
+#ifdef CONFIG_MAGIC_SYSRQ
err = register_xenbus_watch(&sysrq_watch);
if (err) {
printk(KERN_ERR "Failed to set sysrq watcher\n");
return err;
}
+#endif
return 0;
}
diff --git a/drivers/zorro/proc.c b/drivers/zorro/proc.c
index d47c47fc048..3c7046d7965 100644
--- a/drivers/zorro/proc.c
+++ b/drivers/zorro/proc.c
@@ -97,7 +97,7 @@ static void zorro_seq_stop(struct seq_file *m, void *v)
static int zorro_seq_show(struct seq_file *m, void *v)
{
- u_int slot = *(loff_t *)v;
+ unsigned int slot = *(loff_t *)v;
struct zorro_dev *z = &zorro_autocon[slot];
seq_printf(m, "%02x\t%08x\t%08lx\t%08lx\t%02x\n", slot, z->id,
@@ -129,7 +129,7 @@ static const struct file_operations zorro_devices_proc_fops = {
static struct proc_dir_entry *proc_bus_zorro_dir;
-static int __init zorro_proc_attach_device(u_int slot)
+static int __init zorro_proc_attach_device(unsigned int slot)
{
struct proc_dir_entry *entry;
char name[4];
@@ -146,7 +146,7 @@ static int __init zorro_proc_attach_device(u_int slot)
static int __init zorro_proc_init(void)
{
- u_int slot;
+ unsigned int slot;
if (MACH_IS_AMIGA && AMIGAHW_PRESENT(ZORRO)) {
proc_bus_zorro_dir = proc_mkdir("bus/zorro", NULL);
diff --git a/drivers/zorro/zorro-driver.c b/drivers/zorro/zorro-driver.c
index 53180a37cc9..7ee2b6e7178 100644
--- a/drivers/zorro/zorro-driver.c
+++ b/drivers/zorro/zorro-driver.c
@@ -137,10 +137,34 @@ static int zorro_bus_match(struct device *dev, struct device_driver *drv)
return 0;
}
+static int zorro_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+#ifdef CONFIG_HOTPLUG
+ struct zorro_dev *z;
+
+ if (!dev)
+ return -ENODEV;
+
+ z = to_zorro_dev(dev);
+ if (!z)
+ return -ENODEV;
+
+ if (add_uevent_var(env, "ZORRO_ID=%08X", z->id) ||
+ add_uevent_var(env, "ZORRO_SLOT_NAME=%s", dev_name(dev)) ||
+ add_uevent_var(env, "ZORRO_SLOT_ADDR=%04X", z->slotaddr) ||
+ add_uevent_var(env, "MODALIAS=" ZORRO_DEVICE_MODALIAS_FMT, z->id))
+ return -ENOMEM;
+
+ return 0;
+#else /* !CONFIG_HOTPLUG */
+ return -ENODEV;
+#endif /* !CONFIG_HOTPLUG */
+}
struct bus_type zorro_bus_type = {
.name = "zorro",
.match = zorro_bus_match,
+ .uevent = zorro_uevent,
.probe = zorro_device_probe,
.remove = zorro_device_remove,
};
diff --git a/drivers/zorro/zorro-sysfs.c b/drivers/zorro/zorro-sysfs.c
index 1d2a772ea14..26f7184ef9e 100644
--- a/drivers/zorro/zorro-sysfs.c
+++ b/drivers/zorro/zorro-sysfs.c
@@ -49,7 +49,7 @@ static ssize_t zorro_show_resource(struct device *dev, struct device_attribute *
static DEVICE_ATTR(resource, S_IRUGO, zorro_show_resource, NULL);
-static ssize_t zorro_read_config(struct kobject *kobj,
+static ssize_t zorro_read_config(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
@@ -77,6 +77,16 @@ static struct bin_attribute zorro_config_attr = {
.read = zorro_read_config,
};
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct zorro_dev *z = to_zorro_dev(dev);
+
+ return sprintf(buf, ZORRO_DEVICE_MODALIAS_FMT "\n", z->id);
+}
+
+static DEVICE_ATTR(modalias, S_IRUGO, modalias_show, NULL);
+
int zorro_create_sysfs_dev_files(struct zorro_dev *z)
{
struct device *dev = &z->dev;
@@ -89,6 +99,7 @@ int zorro_create_sysfs_dev_files(struct zorro_dev *z)
(error = device_create_file(dev, &dev_attr_slotaddr)) ||
(error = device_create_file(dev, &dev_attr_slotsize)) ||
(error = device_create_file(dev, &dev_attr_resource)) ||
+ (error = device_create_file(dev, &dev_attr_modalias)) ||
(error = sysfs_create_bin_file(&dev->kobj, &zorro_config_attr)))
return error;
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
index d45fb34e2d2..6455f3a244c 100644
--- a/drivers/zorro/zorro.c
+++ b/drivers/zorro/zorro.c
@@ -15,6 +15,8 @@
#include <linux/zorro.h>
#include <linux/bitops.h>
#include <linux/string.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
#include <asm/setup.h>
#include <asm/amigahw.h>
@@ -26,24 +28,17 @@
* Zorro Expansion Devices
*/
-u_int zorro_num_autocon = 0;
+unsigned int zorro_num_autocon;
struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO];
/*
- * Single Zorro bus
+ * Zorro bus
*/
-struct zorro_bus zorro_bus = {\
- .resources = {
- /* Zorro II regions (on Zorro II/III) */
- { .name = "Zorro II exp", .start = 0x00e80000, .end = 0x00efffff },
- { .name = "Zorro II mem", .start = 0x00200000, .end = 0x009fffff },
- /* Zorro III regions (on Zorro III only) */
- { .name = "Zorro III exp", .start = 0xff000000, .end = 0xffffffff },
- { .name = "Zorro III cfg", .start = 0x40000000, .end = 0x7fffffff }
- },
- .name = "Zorro bus"
+struct zorro_bus {
+ struct list_head devices; /* list of devices on this bus */
+ struct device dev;
};
@@ -53,18 +48,19 @@ struct zorro_bus zorro_bus = {\
struct zorro_dev *zorro_find_device(zorro_id id, struct zorro_dev *from)
{
- struct zorro_dev *z;
+ struct zorro_dev *z;
- if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(ZORRO))
- return NULL;
+ if (!zorro_num_autocon)
+ return NULL;
- for (z = from ? from+1 : &zorro_autocon[0];
- z < zorro_autocon+zorro_num_autocon;
- z++)
- if (id == ZORRO_WILDCARD || id == z->id)
- return z;
- return NULL;
+ for (z = from ? from+1 : &zorro_autocon[0];
+ z < zorro_autocon+zorro_num_autocon;
+ z++)
+ if (id == ZORRO_WILDCARD || id == z->id)
+ return z;
+ return NULL;
}
+EXPORT_SYMBOL(zorro_find_device);
/*
@@ -83,121 +79,138 @@ struct zorro_dev *zorro_find_device(zorro_id id, struct zorro_dev *from)
*/
DECLARE_BITMAP(zorro_unused_z2ram, 128);
+EXPORT_SYMBOL(zorro_unused_z2ram);
static void __init mark_region(unsigned long start, unsigned long end,
int flag)
{
- if (flag)
- start += Z2RAM_CHUNKMASK;
- else
- end += Z2RAM_CHUNKMASK;
- start &= ~Z2RAM_CHUNKMASK;
- end &= ~Z2RAM_CHUNKMASK;
-
- if (end <= Z2RAM_START || start >= Z2RAM_END)
- return;
- start = start < Z2RAM_START ? 0x00000000 : start-Z2RAM_START;
- end = end > Z2RAM_END ? Z2RAM_SIZE : end-Z2RAM_START;
- while (start < end) {
- u32 chunk = start>>Z2RAM_CHUNKSHIFT;
if (flag)
- set_bit(chunk, zorro_unused_z2ram);
+ start += Z2RAM_CHUNKMASK;
else
- clear_bit(chunk, zorro_unused_z2ram);
- start += Z2RAM_CHUNKSIZE;
- }
+ end += Z2RAM_CHUNKMASK;
+ start &= ~Z2RAM_CHUNKMASK;
+ end &= ~Z2RAM_CHUNKMASK;
+
+ if (end <= Z2RAM_START || start >= Z2RAM_END)
+ return;
+ start = start < Z2RAM_START ? 0x00000000 : start-Z2RAM_START;
+ end = end > Z2RAM_END ? Z2RAM_SIZE : end-Z2RAM_START;
+ while (start < end) {
+ u32 chunk = start>>Z2RAM_CHUNKSHIFT;
+ if (flag)
+ set_bit(chunk, zorro_unused_z2ram);
+ else
+ clear_bit(chunk, zorro_unused_z2ram);
+ start += Z2RAM_CHUNKSIZE;
+ }
}
-static struct resource __init *zorro_find_parent_resource(struct zorro_dev *z)
+static struct resource __init *zorro_find_parent_resource(
+ struct platform_device *bridge, struct zorro_dev *z)
{
- int i;
+ int i;
- for (i = 0; i < zorro_bus.num_resources; i++)
- if (zorro_resource_start(z) >= zorro_bus.resources[i].start &&
- zorro_resource_end(z) <= zorro_bus.resources[i].end)
- return &zorro_bus.resources[i];
- return &iomem_resource;
+ for (i = 0; i < bridge->num_resources; i++) {
+ struct resource *r = &bridge->resource[i];
+ if (zorro_resource_start(z) >= r->start &&
+ zorro_resource_end(z) <= r->end)
+ return r;
+ }
+ return &iomem_resource;
}
- /*
- * Initialization
- */
-static int __init zorro_init(void)
+static int __init amiga_zorro_probe(struct platform_device *pdev)
{
- struct zorro_dev *z;
- unsigned int i;
- int error;
-
- if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(ZORRO))
- return 0;
-
- pr_info("Zorro: Probing AutoConfig expansion devices: %d device%s\n",
- zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s");
-
- /* Initialize the Zorro bus */
- INIT_LIST_HEAD(&zorro_bus.devices);
- dev_set_name(&zorro_bus.dev, "zorro");
- error = device_register(&zorro_bus.dev);
- if (error) {
- pr_err("Zorro: Error registering zorro_bus\n");
- return error;
- }
-
- /* Request the resources */
- zorro_bus.num_resources = AMIGAHW_PRESENT(ZORRO3) ? 4 : 2;
- for (i = 0; i < zorro_bus.num_resources; i++)
- request_resource(&iomem_resource, &zorro_bus.resources[i]);
-
- /* Register all devices */
- for (i = 0; i < zorro_num_autocon; i++) {
- z = &zorro_autocon[i];
- z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8);
- if (z->id == ZORRO_PROD_GVP_EPC_BASE) {
- /* GVP quirk */
- unsigned long magic = zorro_resource_start(z)+0x8000;
- z->id |= *(u16 *)ZTWO_VADDR(magic) & GVP_PRODMASK;
- }
- sprintf(z->name, "Zorro device %08x", z->id);
- zorro_name_device(z);
- z->resource.name = z->name;
- if (request_resource(zorro_find_parent_resource(z), &z->resource))
- pr_err("Zorro: Address space collision on device %s %pR\n",
- z->name, &z->resource);
- dev_set_name(&z->dev, "%02x", i);
- z->dev.parent = &zorro_bus.dev;
- z->dev.bus = &zorro_bus_type;
- error = device_register(&z->dev);
+ struct zorro_bus *bus;
+ struct zorro_dev *z;
+ struct resource *r;
+ unsigned int i;
+ int error;
+
+ /* Initialize the Zorro bus */
+ bus = kzalloc(sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&bus->devices);
+ bus->dev.parent = &pdev->dev;
+ dev_set_name(&bus->dev, "zorro");
+ error = device_register(&bus->dev);
if (error) {
- pr_err("Zorro: Error registering device %s\n", z->name);
- continue;
+ pr_err("Zorro: Error registering zorro_bus\n");
+ kfree(bus);
+ return error;
}
- error = zorro_create_sysfs_dev_files(z);
- if (error)
- dev_err(&z->dev, "Error creating sysfs files\n");
- }
-
- /* Mark all available Zorro II memory */
- zorro_for_each_dev(z) {
- if (z->rom.er_Type & ERTF_MEMLIST)
- mark_region(zorro_resource_start(z), zorro_resource_end(z)+1, 1);
- }
-
- /* Unmark all used Zorro II memory */
- for (i = 0; i < m68k_num_memory; i++)
- if (m68k_memory[i].addr < 16*1024*1024)
- mark_region(m68k_memory[i].addr,
- m68k_memory[i].addr+m68k_memory[i].size, 0);
-
- return 0;
+ platform_set_drvdata(pdev, bus);
+
+ /* Register all devices */
+ pr_info("Zorro: Probing AutoConfig expansion devices: %u device%s\n",
+ zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s");
+
+ for (i = 0; i < zorro_num_autocon; i++) {
+ z = &zorro_autocon[i];
+ z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8);
+ if (z->id == ZORRO_PROD_GVP_EPC_BASE) {
+ /* GVP quirk */
+ unsigned long magic = zorro_resource_start(z)+0x8000;
+ z->id |= *(u16 *)ZTWO_VADDR(magic) & GVP_PRODMASK;
+ }
+ sprintf(z->name, "Zorro device %08x", z->id);
+ zorro_name_device(z);
+ z->resource.name = z->name;
+ r = zorro_find_parent_resource(pdev, z);
+ error = request_resource(r, &z->resource);
+ if (error)
+ dev_err(&bus->dev,
+ "Address space collision on device %s %pR\n",
+ z->name, &z->resource);
+ dev_set_name(&z->dev, "%02x", i);
+ z->dev.parent = &bus->dev;
+ z->dev.bus = &zorro_bus_type;
+ error = device_register(&z->dev);
+ if (error) {
+ dev_err(&bus->dev, "Error registering device %s\n",
+ z->name);
+ continue;
+ }
+ error = zorro_create_sysfs_dev_files(z);
+ if (error)
+ dev_err(&z->dev, "Error creating sysfs files\n");
+ }
+
+ /* Mark all available Zorro II memory */
+ zorro_for_each_dev(z) {
+ if (z->rom.er_Type & ERTF_MEMLIST)
+ mark_region(zorro_resource_start(z),
+ zorro_resource_end(z)+1, 1);
+ }
+
+ /* Unmark all used Zorro II memory */
+ for (i = 0; i < m68k_num_memory; i++)
+ if (m68k_memory[i].addr < 16*1024*1024)
+ mark_region(m68k_memory[i].addr,
+ m68k_memory[i].addr+m68k_memory[i].size,
+ 0);
+
+ return 0;
}
-subsys_initcall(zorro_init);
+static struct platform_driver amiga_zorro_driver = {
+ .driver = {
+ .name = "amiga-zorro",
+ .owner = THIS_MODULE,
+ },
+};
-EXPORT_SYMBOL(zorro_find_device);
-EXPORT_SYMBOL(zorro_unused_z2ram);
+static int __init amiga_zorro_init(void)
+{
+ return platform_driver_probe(&amiga_zorro_driver, amiga_zorro_probe);
+}
+
+module_init(amiga_zorro_init);
MODULE_LICENSE("GPL");